diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/INSTALLER b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/LICENSE.txt b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/LICENSE.txt new file mode 100644 index 00000000..78a5fe85 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/LICENSE.txt @@ -0,0 +1,11 @@ +Copyright (c) 2020, Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/METADATA b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/METADATA new file mode 100644 index 00000000..7a2179f8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/METADATA @@ -0,0 +1,60 @@ +Metadata-Version: 2.1 +Name: coremltools +Version: 6.3.0 +Summary: Community Tools for Core ML +Home-page: https://github.com/apple/coremltools +Author: Apple Inc. +Author-email: coremltools@apple.com +License: BSD +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +License-File: LICENSE.txt +Requires-Dist: numpy (>=1.14.5) +Requires-Dist: protobuf (<=4.0.0,>=3.1.0) +Requires-Dist: sympy +Requires-Dist: tqdm +Requires-Dist: packaging + +coremltools +=========== + +`Core ML `_ +is an Apple framework that allows developers to easily integrate +machine learning (ML) models into apps. Core ML is available on iOS, iPadOS, +watchOS, macOS, and tvOS. Core ML introduces a public file format (.mlmodel) +for a broad set of ML methods including deep neural networks (convolutional +and recurrent), tree ensembles (boosted trees, random forest, decision trees), +and generalized linear models. Core ML models can be directly integrated into +apps within Xcode. + +:code:`coremltools` is a python package for creating, examining, and testing models in +the .mlmodel format. In particular, it can be used to: + +- Convert trained models from popular machine learning tools into Core ML format + (.mlmodel). +- Write models to Core ML format with a simple API. +- Making predictions using the Core ML framework (on select platforms) to + verify conversion. + +More Information +---------------- + +- `coremltools user guide and examples `_ +- `Core ML framework documentation `_ +- `Machine learning at Apple `_ + +License +------- +Copyright (c) 2020, Apple Inc. All rights reserved. + +Use of this source code is governed by the +`3-Clause BSD License `_ +that can be found in the LICENSE.txt file. diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/RECORD b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/RECORD new file mode 100644 index 00000000..cdf94764 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/RECORD @@ -0,0 +1,801 @@ +coremltools-6.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +coremltools-6.3.0.dist-info/LICENSE.txt,sha256=66FzSUqZcxfHi3zmtVLo5EM-Me_29eqQk0GFJC63H90,1488 +coremltools-6.3.0.dist-info/METADATA,sha256=oTLMWokJJCBSYGmkl4bqAe5pF84UcZLGJQ8hpf-Sycs,2303 +coremltools-6.3.0.dist-info/RECORD,, +coremltools-6.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +coremltools-6.3.0.dist-info/WHEEL,sha256=bqKuSnIUrnP2Cm43W1aVJYmhiN5AXQEeSa7Zd-R-YnM,108 +coremltools-6.3.0.dist-info/top_level.txt,sha256=LpjwPWmxFPfhcovVTFV_nHNcZNnb4lN3DytAZ7KLgL8,12 +coremltools/__init__.py,sha256=MjUWXzLcsNS5WLZsNzIzBjOrIf2w_74jYYf_gXUUSD8,4561 +coremltools/__pycache__/__init__.cpython-310.pyc,, +coremltools/__pycache__/version.cpython-310.pyc,, +coremltools/_deps/__init__.py,sha256=rMyK1D_RfWjb2SYRr8Wm4ajG4UaOvhWK-5Zz2T0dDFI,5563 +coremltools/_deps/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/__init__.py,sha256=VUt_9erzq60Md56GOZYEry8JyHjcMTskJsg80-LTj4Y,490 +coremltools/converters/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/__pycache__/_converters_entry.cpython-310.pyc,, +coremltools/converters/__pycache__/_profile_utils.cpython-310.pyc,, +coremltools/converters/_converters_entry.py,sha256=kcryVpWGwEDgSFSdMphtj9tkhOOccfXHbYGHDaHLZpk,38179 +coremltools/converters/_profile_utils.py,sha256=j_N-n3H5E-ncyXmWqMw2TCXgebrv4Hi3VyLkxfcPxbQ,2415 +coremltools/converters/libsvm/__init__.py,sha256=5d5hKszt6hW6MmMDuRDS3o1qmdsRMLn6gBzD4vluU28,3399 +coremltools/converters/libsvm/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/libsvm/__pycache__/_libsvm_converter.cpython-310.pyc,, +coremltools/converters/libsvm/__pycache__/_libsvm_util.cpython-310.pyc,, +coremltools/converters/libsvm/_libsvm_converter.py,sha256=qSYudH0qCayM0OVld9z9AEDT9JM2RB4hwPU23RwuHQM,7202 +coremltools/converters/libsvm/_libsvm_util.py,sha256=Pprr6pb1BjMuICUsRBzDz-izxFpdRLWMVWrHpn1i7ak,971 +coremltools/converters/mil/__init__.py,sha256=VK_pHIuhb5HeMbYKMpNo2v62n0JplyFUPeJdLJEyxQ0,894 +coremltools/converters/mil/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/_deployment_compatibility.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/conftest.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/debugging_utils.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/input_types.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/test_flexible_shape_inputs.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/testing_reqs.cpython-310.pyc,, +coremltools/converters/mil/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/_deployment_compatibility.py,sha256=CGT3AkXVyQaBHor7l5y41pUch-UvYFN9yX6i0jCHi90,5944 +coremltools/converters/mil/backend/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/backend/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/__pycache__/backend_helper.cpython-310.pyc,, +coremltools/converters/mil/backend/backend_helper.py,sha256=hfDxGw9QwTWZHhKu2n6zJDsnJP8S8X6HkrP6zDd1ZBE,3868 +coremltools/converters/mil/backend/mil/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/backend/mil/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/test_helper.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/__pycache__/test_model_input_params.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/helper.py,sha256=eASB_rm9LoFzjqR073Yxkzh5J-CkJFd3vGx4XtvNaIg,12647 +coremltools/converters/mil/backend/mil/load.py,sha256=sfiSQyyY3ygM84MG9fTbFapOF7CWgJqweyzmH5Ws0oM,23014 +coremltools/converters/mil/backend/mil/passes/__init__.py,sha256=KHEqbZx-4q-53RegXVIDZfxonilAe0-361yeVMo_p-E,355 +coremltools/converters/mil/backend/mil/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/adjust_io_to_supported_types.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/fuse_activation_silu.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/insert_image_preprocessing_op.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/sanitize_name_strings.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py,sha256=KzYYhYlx95GhpPI_DfxPP5KBc1q7D4kPMdoWxi5aLsw,10346 +coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py,sha256=Phgi9Ci8db4tJyM6QbVLEKD-CqGIoDMOnD0JZGr3E-Q,2744 +coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py,sha256=amH7rQjeLlTXfAw3ccK8fS3SgzJW-tcsbe7plLpIzww,3434 +coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py,sha256=QGqKu7Eh69ebgIJosT6WeNMf9FrZKQB09R8SWRJWOzs,1014 +coremltools/converters/mil/backend/mil/passes/test_passes.py,sha256=VrJ-kIyVhT_YGBW66KTSiO2oLIZuCYEepp4nQrpGoIA,31333 +coremltools/converters/mil/backend/mil/test_helper.py,sha256=I_g44hmrxPTYDmtwQS735peHrAfzPS9WmkMr0uCtxt4,1265 +coremltools/converters/mil/backend/mil/test_model_input_params.py,sha256=8Q1IPsOUTbV1rl3P7nXD9ZeHZhZlEn2oDzz34ZXle-c,14116 +coremltools/converters/mil/backend/nn/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/backend/nn/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/__pycache__/mil_to_nn_mapping_registry.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/__pycache__/op_mapping.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/load.py,sha256=eBP4i2OnCkrXcEMWzM302c1kGD-4vKDcOvFJQI2ER9c,13539 +coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py,sha256=1Zn-vZtr94KghOsMg6wbvYd4hf9HS0eBjy7-CnwRigw,739 +coremltools/converters/mil/backend/nn/op_mapping.py,sha256=7iQEpxeh-PxOYc46AhJ-mQV9CI1sd1i_PN0rxe04--E,129681 +coremltools/converters/mil/backend/nn/passes/__init__.py,sha256=5v0mIELxvFz0asqZRg5vC3loZrPIwbRlciRMKbICIVU,432 +coremltools/converters/mil/backend/nn/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/alert_return_type_cast.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/commingle_loop_vars.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/conv1d_decomposition.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/handle_return_inputs_as_outputs.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/handle_return_unused_inputs.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/handle_unused_inputs.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/mlmodel_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/test_mlmodel_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py,sha256=r_ESDoZI1cTSFgB4nmmqPyhpsX7V6Rxg3FOQ9hd7pks,1698 +coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py,sha256=xFtEETz312GRoOj9G4MF88qR9Seg2weE0aIcEMWFNq0,2528 +coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py,sha256=TonBSpywmGI7nj18VAAWJ2Mjk7mIEI59TKBWBK0Cxx0,3705 +coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py,sha256=2rU5JAoP82u4lNIFjEUKbZgTATrH3Px52zud5Ql4CV4,2153 +coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py,sha256=pVvJDU6d5gTDnApVXofs1vSLK6xDY7Q_8HiOueFHaTk,2086 +coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py,sha256=JMEQBCGCCwspXXOrQVDO6zSU3z7ey8AcPqnesPT7bKc,1641 +coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py,sha256=MMkar87LfR2RqzJq1Gr7IjB1zS-cW6hYcYTGZz0kukQ,18489 +coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py,sha256=1YiFX-wL0vYxZECBLOd1bx0x2dAPdKQeWN4hwpiVveI,37205 +coremltools/converters/mil/backend/nn/passes/test_passes.py,sha256=-QTwAhNVRnR5nemZv8ck7jpQuKFmkgKiuHrIBMt_zpI,7734 +coremltools/converters/mil/conftest.py,sha256=b3s_rk8_X7Qef5qPqjQvD4Jh-0dvKogwCBza3__fLEg,460 +coremltools/converters/mil/converter.py,sha256=fLAKgvyjmRSnVcWvXrsbzT8zM6_CWkOl0hYd0_LWtF8,12340 +coremltools/converters/mil/debugging_utils.py,sha256=wseDDiowencLrucDa4fndMVzKhDmLK6HgfeWHvq0xhI,6991 +coremltools/converters/mil/experimental/__init__.py,sha256=FctNmlyIxwYyQaMKDFsO_9yRk-3zSd4DvigWLjq1i3A,218 +coremltools/converters/mil/experimental/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/README.md,sha256=Rl-HDN3V9SOeUpQb7pnE2ol7VNnLk6rVcIlOL_aO4_k,31488 +coremltools/converters/mil/experimental/passes/__init__.py,sha256=FctNmlyIxwYyQaMKDFsO_9yRk-3zSd4DvigWLjq1i3A,218 +coremltools/converters/mil/experimental/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_conv_batchnorm_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_conv_bias_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_conv_scale_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_layernorm_instancenorm_pattern_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_linear_bias_fusion.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/__pycache__/generic_pass_infrastructure.cpython-310.pyc,, +coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py,sha256=spiMvPiZIm6c95hRzLQ0QJV0m4byezGuJ7HqTUYMhkM,6064 +coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py,sha256=kw2uEqdvwaWhAtXMDTw8_r24lb98GAlioWTpinMrX1U,12523 +coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py,sha256=06IAMF3pmxC1x86Q09AZ-fAefyL1Cc89W7EpT4zPpJ0,8575 +coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py,sha256=76ER1S9R5c_xLr-Nk40NTCmerH880AezFD0oPqY-sG4,21031 +coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py,sha256=MzvMgbjshh_Q1qpzdAXJdxvcjsBOfP2i02_tv_BpQZQ,4994 +coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py,sha256=zEsWPj8wIcqQ8WlGhNZTRSiz4QBjhN_Bvotknrx96GM,9865 +coremltools/converters/mil/frontend/__init__.py,sha256=RNrekscuLoD1IlRtDtknffbXW-HpBgRUPeQvJZ36xCE,264 +coremltools/converters/mil/frontend/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/__pycache__/_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/_utils.py,sha256=makq6wWuKNEuu6bmzSULCXfrLRZde3eREmELjNO3LIw,15826 +coremltools/converters/mil/frontend/milproto/__init__.py,sha256=KmeSYtj2APlznOBRP6nMj3dWsZYytRNl3usX3h9TFrA,239 +coremltools/converters/mil/frontend/milproto/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/__pycache__/test_load.cpython-310.pyc,, +coremltools/converters/mil/frontend/milproto/helper.py,sha256=0PfRznC9qaThNNiky0GVc3Bxj71HZYoQrFE-xzWsyGQ,2434 +coremltools/converters/mil/frontend/milproto/load.py,sha256=xoH-n-oPIqwoQia0nLG0px76jzdSzGC_QuFxRMKEe68,17069 +coremltools/converters/mil/frontend/milproto/test_load.py,sha256=Enlsku2bIL7sAyhdiyubUAd96P9JDbud1U6Jnk_yrk4,8796 +coremltools/converters/mil/frontend/tensorflow/__init__.py,sha256=zk4a9ntbs4MmR810UzPz1ME0j3cFIFQ_8R6NnOU1icM,762 +coremltools/converters/mil/frontend/tensorflow/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/basic_graph_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/convert_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/dot_visitor.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/naming_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/parse.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/parsed_tf_node.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/tf_op_registry.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/__pycache__/tfssa.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py,sha256=DkqK2JZ3hBhZhM8-ADL3ai8e4zounF0hwY9d_7dXiS8,10999 +coremltools/converters/mil/frontend/tensorflow/convert_utils.py,sha256=Z6fsFcif-E5kz0zR1CM301fOV9UQARXNrY5y5L72pBk,7497 +coremltools/converters/mil/frontend/tensorflow/converter.py,sha256=8JdirgUo4woQZsBh0hIaLDlDyzAXAjLDQ6T79dFHWjw,21705 +coremltools/converters/mil/frontend/tensorflow/dialect_ops.py,sha256=mIEP6BkcTA6-ssQOl1A4I5PC0f1FWUDlUznSvD7Avjc,6464 +coremltools/converters/mil/frontend/tensorflow/dot_visitor.py,sha256=5Tg5P4jIHLfBLeyxKetUoXrH2ajvhStZzvWmLowWzow,4544 +coremltools/converters/mil/frontend/tensorflow/load.py,sha256=r68_hpdeAUtTWaIEg1RvO2gKFJCCw-jUEVmzklGFBKI,12848 +coremltools/converters/mil/frontend/tensorflow/naming_utils.py,sha256=8YpEU53uohT-JxZJBeIIUWBhbfW7lmdh9jP9ybQSCfg,993 +coremltools/converters/mil/frontend/tensorflow/ops.py,sha256=ewDgy2sAum-VjwTuFkJBB1phu9pl6cti5IHKSp_0QLM,122775 +coremltools/converters/mil/frontend/tensorflow/parse.py,sha256=r-DsewTvFfRKb78VZD5iPjkHEVyxT-TOoK1yBEz7C0M,4082 +coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py,sha256=qVeJqKXTkD4_P0SCpwcI1fnaeK7u40MMRFMKJArvnjg,3234 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py,sha256=YXqZ3yZ0uLdeDQolnlqAxtXu6QXijrNygfAktKyLeao,300 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/backfill_make_list_elem_type.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/expand_tf_lstm.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/__pycache__/tf_lstm_to_core_lstm.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py,sha256=VvsUVaSRRC5cJwKUNgkvfZDnX56PdxwbJ8WTCRyjme8,4788 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py,sha256=Rral6EZC_Fjnsbr6Mk_VyRS2LwzoKxovot3Vq9e0XQk,7617 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py,sha256=eiL-O4Ln5mydvomONP42C-mTo2oK4FHB1lhH4B6TPVM,2071 +coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py,sha256=m02EXj12T_aIIa9b9kSz4im5oL1gdh-FT-qIulADeMM,12437 +coremltools/converters/mil/frontend/tensorflow/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_composite_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_custom_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_graphs.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_parse.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_parsed_tf_node.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/test_tf_conversion_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py,sha256=oyUgbGcs1KD5rR7qGsJHLO_Dylj0No70IqhR-v7pGAE,2415 +coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py,sha256=hDQsXdH0lQnwz-bF6G13T8H5JyY_H2Qpxc5PUTTL1Tc,11068 +coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py,sha256=G_o4pMUduHq-bO_aoTi3NtJqUa9_r3wIjk9APdZuS-A,1463 +coremltools/converters/mil/frontend/tensorflow/test/test_load.py,sha256=M_m7DsGbRC09xFS_ABZrqdbvm6aM5z1MnXAiV8w7sSI,16127 +coremltools/converters/mil/frontend/tensorflow/test/test_ops.py,sha256=P991du_fGn2urOVRyOktaT03OPRQfoHruLvHtqJIwWA,251335 +coremltools/converters/mil/frontend/tensorflow/test/test_parse.py,sha256=dyxsv6vDOmD-u9KGD3VnCA14zLN03NsIq5xG0poacVo,5013 +coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py,sha256=-0-rwXYlsUKqRLHdtJXDszTJWZBisWpwkG5yU4eV4U0,2187 +coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py,sha256=PHNQrzTSaaGlDXMSOYM4ektUc3_Cr1q4N_nqwZvTGBo,38934 +coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py,sha256=1ojjkIsJBgkmxO-YWkIVvFPQ4yQU6-Qwm6ZR9Cs9dZE,14271 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py,sha256=034IrrS2tps3c8Fk78KUZUXz1jUGI8dFylgNqKAUbCQ,847 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/cond_to_where.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/constant_propagation.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/delete_asserts.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/delete_constant.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/delete_disconnected_nodes.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/functionalize_loops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/fuse_dilation_conv.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/insert_get_tuple.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/quantization_pass.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/tensor_array_transform.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/variable_node_transform.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__pycache__/visitors.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py,sha256=kBG2Og-V3RNMQLiXOAyDhtqB7brCBC7-cLot8Cqjpa4,4377 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py,sha256=SdKMbyaW67C2LGjWZ5VfJijnMXm7GASIMG_xuo0JzTo,6880 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py,sha256=1EBB-arega95PY7Lb7P2sSRq5T7RAK60bjg5sj-Kwhw,2386 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py,sha256=a9E5QPSdlsccpAhUcwuYJSAfyeyst6SsXFMetOjYTKs,3059 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py,sha256=eV7xam2sY2swOXkscTVQyO7VAJnUJD5UZr6uXi--QQQ,669 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py,sha256=qwMTLvVVHbFLqwx8u9lZn1ETmVl70Ft_WJAxQ7LTMTk,19047 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py,sha256=YzBKuHHd894T4PVek2HrWn34Lj4mleXlklu0_WubRd0,7675 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py,sha256=9KlQQGhZodQTAh6Zn8DD4q110a_pkN-WDgLazQjt0IU,3398 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py,sha256=cRa_rYajqS9q74XPkF8fwgwmE5PQdvQpU2AFRLmowuM,2966 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py,sha256=K05rgqEm8vhfRKkrjukFZYpYwGanWMQhnW33Qa0kSuE,3649 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py,sha256=-9SfcPttBV7jk8WHxPTL5a6iwwKoq3mVObfDJstzTM4,2898 +coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py,sha256=GcYQEti8-Pbkb9yx7gvYqrZtfNEFf_E49ZN69UzhXPQ,6501 +coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py,sha256=kVaXMUKnzA7mntR4C_Y7mrY9vg0dMUkdNFQg7C_j3xk,1769 +coremltools/converters/mil/frontend/tensorflow/tfssa.py,sha256=Cs607FC4dk1CjQ2Qr3jh2jmdd-dOlOjyjbk-dK5zo8M,21046 +coremltools/converters/mil/frontend/tensorflow2/__init__.py,sha256=egD5DxwrT6jNiUGCiyhrCKiF7alfE8-dSBcUtGn7djY,455 +coremltools/converters/mil/frontend/tensorflow2/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/__pycache__/ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/converter.py,sha256=CWj1lr2ee4DdiLU_rZYRcsMTD8eNj0IEFYyo6imSKnE,1540 +coremltools/converters/mil/frontend/tensorflow2/load.py,sha256=SsnyT62Cm_oT0AyNdHr1BD1JnqLUQo2gpzwLrczAGF4,14784 +coremltools/converters/mil/frontend/tensorflow2/ops.py,sha256=uxaNzzNYfGqZSNDr8lTP4h8LQ1mcViEDfvNY-kK5YtU,8460 +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py,sha256=w3I7aJMf6J9tFjXVBkU6LL-xBLMryvNyug7z2elobvs,253 +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__pycache__/remove_vacuous_cond.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__pycache__/test_v2_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py,sha256=XjbWSKCvtaVtqcFkkUtFrvY0LP7PxFiYiQ-lb3C_fI8,4645 +coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py,sha256=0PccXazU4ifnlPDbXPvO3y7SCN5EGud4t39zYP1IRWM,1850 +coremltools/converters/mil/frontend/tensorflow2/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_tf2_conversion_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_v2_load.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_v2_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/test_v2_ops_tf_keras.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py,sha256=09bLYLJJPWa9KlMy6fWW23Gp_yhEhqwbM2Qbg4s7lVM,17555 +coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py,sha256=gcYEMjyZ0NotJrH2Nbyam6AbCfvc1FNt7LVeyfvw2Y4,8379 +coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py,sha256=9KZIamhFRA0rPnLo0aIV8N3RxjqQUGm6j6ijLQO9AJI,25555 +coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py,sha256=OrUIgqhuQyj1-3TSb-DCZ6i726xZqH5bTpoQRJKLqM4,59928 +coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py,sha256=u3MJhV-abwCNAxW3uwqcaq7gdFkoEQ2M4SdzdKxNk4w,10465 +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py,sha256=iDhveVfdgf0DQZiiGKbrv2AqqXhxxIH02lpAAW7ysoU,371 +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__pycache__/rewrite_control_flow_functions.cpython-310.pyc,, +coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py,sha256=sS5Eu_vctUn9aUJJ7GrzPZKz9XBKxez_csb8LQtv89Q,20086 +coremltools/converters/mil/frontend/torch/__init__.py,sha256=Pb2LcDa-lprF5s4A9aui9gV1Wd9Hubh1r9pKgbviDwA,494 +coremltools/converters/mil/frontend/torch/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/converter.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/internal_graph.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/load.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/torch_op_registry.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/__pycache__/torchir_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/converter.py,sha256=a6NhfSjvjisr6DwW9v4EQD05QbtXDocvNhrncdYQZIk,20820 +coremltools/converters/mil/frontend/torch/dialect_ops.py,sha256=hufPXwH_ciov7fhTGc-bMnwIw7CQxI662vOZQda_988,8408 +coremltools/converters/mil/frontend/torch/internal_graph.py,sha256=aW5A5A1KspQ_uy8vwotGfEDM_-1hvjnI08K4Ngto9rc,12949 +coremltools/converters/mil/frontend/torch/load.py,sha256=xwbY8KbZoNGOByXNtOCJE9B_fW9Vaj2NB9c5NCCPz_0,4629 +coremltools/converters/mil/frontend/torch/ops.py,sha256=yp4S2smZu81rvxhm-onZhDVDHEywNrYcTHuwpp-aptQ,194098 +coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py,sha256=VKaxWdiSJkPcA59djF6299M2Zebt7YrchEallFmusr4,294 +coremltools/converters/mil/frontend/torch/ssa_passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/ssa_passes/__pycache__/torch_tensor_assign_to_core.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/ssa_passes/__pycache__/torch_upsample_to_core_upsample.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py,sha256=348dCWHvgqbRsuNKSvxi--2X93UaBqMRQCvBDbOM8lw,2582 +coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py,sha256=NdL3cinpWzxerHVtKe0Vc46S6knPQZAmls_ddsTGhxQ,4525 +coremltools/converters/mil/frontend/torch/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/frontend/torch/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_custom_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_examples.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_internal_graph.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_torch_conversion_api.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/test_torch_ops.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/frontend/torch/test/test_api.py,sha256=WMS4nlkduCpIEj0HFFIxQThgDfocYlBD1AdSnehK-BQ,1753 +coremltools/converters/mil/frontend/torch/test/test_custom_ops.py,sha256=ibMiwNeQkMcoEY1wWLfo6LKPm1RiYsDS3WbD_U_ZGnU,5626 +coremltools/converters/mil/frontend/torch/test/test_examples.py,sha256=AkdE7NNX3vbMKm6GGk4vqK0pZ60LJqKcmsKjAz8suSU,1880 +coremltools/converters/mil/frontend/torch/test/test_internal_graph.py,sha256=Bo2RTg0gzvAFFbtFmDgJ-jUh_VWBATZytsAzzbE58so,66729 +coremltools/converters/mil/frontend/torch/test/test_passes.py,sha256=WZKBvSOzDfB-bGSV1Z-A-XWnFx3MJG-1q3zeKTfAUD0,12355 +coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py,sha256=JXLgrTfCBG5PFgBZTIvlDUUz0a4DlehYcUmpeyTSiVA,65369 +coremltools/converters/mil/frontend/torch/test/test_torch_ops.py,sha256=C_T4gHQgMp_6w0sVS4848cUpV_RaYKokVo-wYvHtBaw,269720 +coremltools/converters/mil/frontend/torch/test/testing_utils.py,sha256=1PdmCOLXH99hrGwWvVMk2eEzmjHiqO3QyKBG7gkZ-KQ,9054 +coremltools/converters/mil/frontend/torch/torch_op_registry.py,sha256=OH4O2Wc74U7HiD8P4o2Yg6H6Fodhlm_RnMOLPzrXt6M,2276 +coremltools/converters/mil/frontend/torch/torchir_passes.py,sha256=UfllOFdJd4b-U4CvRlvETzQ7ZJJw7NQxVihzWafi4x8,12932 +coremltools/converters/mil/input_types.py,sha256=j78OHR-vDJQqMPSwT4BbrHz6xa1fshOLHCsXCOtq4Rw,18307 +coremltools/converters/mil/mil/__init__.py,sha256=5pSgAVXTX2o5s7uT2hi1goKhU1V5CAXWVoqMLL9HO7Q,833 +coremltools/converters/mil/mil/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/block.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/builder.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/input_type.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/operation.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/program.cpython-310.pyc,, +coremltools/converters/mil/mil/__pycache__/var.cpython-310.pyc,, +coremltools/converters/mil/mil/block.py,sha256=_tRC6YJPHAlXlzBsTydpCjZFyJkxPhYNOy-zKR9baDM,32383 +coremltools/converters/mil/mil/builder.py,sha256=D0ERESJiDeWVLP-em8Jmpmka19MhCieS2ZSneq4x9Mg,8906 +coremltools/converters/mil/mil/input_type.py,sha256=D4U9sQXKVlZPUt--fazMy85rNpomqHG1fxqUH_A_LgA,11733 +coremltools/converters/mil/mil/operation.py,sha256=7MdTAjxP9bvHCBiJhQQ4CVGEaGijZFCxwfIJN7ngysc,22407 +coremltools/converters/mil/mil/ops/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/mil/ops/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/__pycache__/registry.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__init__.py,sha256=H9x2ZQ0aQ3iJbD6GcQ3atZ6JkyCYny0wq3zYL3VeaaM,267 +coremltools/converters/mil/mil/ops/defs/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__pycache__/_op_reqs.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__pycache__/_utils.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/__pycache__/complex_dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/_op_reqs.py,sha256=zVg-z94rkUCBkGyxBD3dMXkk0jIDW1zHIQnc0XcQjzg,354 +coremltools/converters/mil/mil/ops/defs/_utils.py,sha256=DFsyGosV0c143ueKl_Ziiq6q9Ma6mNyW2AEf1YDZluI,21615 +coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py,sha256=gySvCLcaf3Ama_KjPllN0KCBE5XBP1hWS8A3TE0xQ4k,26614 +coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py,sha256=gZaKXRdzDh3fqDI-6auzI5-9Ij46ot2MuMAKandHRQw,3183 +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/activation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/classify.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/control_flow.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/conv.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/elementwise_binary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/elementwise_unary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/image_resizing.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/linear.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/normalization.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/pool.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/random.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/recurrent.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/reduction.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/scatter_gather.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/__pycache__/tensor_transformation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS15/activation.py,sha256=iUo74qPL7T3ppffKcR9JhI4YGnHyXrITUpAPRnCEuA4,15088 +coremltools/converters/mil/mil/ops/defs/iOS15/classify.py,sha256=eejXnPeTeD3mWdUb-2NwqaryZixIrgQDg1COQMi7fyc,3283 +coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py,sha256=K_KMZL1drvIdPj5N9JtQuhGHHHwHuTpwJxgVoJvrwm0,29194 +coremltools/converters/mil/mil/ops/defs/iOS15/conv.py,sha256=BYfxie-T11e43TS_0E-wFjShtVLs9i9-lkkAQFZlY60,16798 +coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py,sha256=jHxUuo1XPNQ-NprycwiBUwnAMrw55QfP5pEXq9VFqnk,15713 +coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py,sha256=r0yoTfmDkQcmqMZADu8OFYT6EIvxzU7akUV6PUSkSAg,20155 +coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py,sha256=ccdIxevYBva5zL5ORuLyenC4PzVJBMt0Xy4okuA85Hg,33876 +coremltools/converters/mil/mil/ops/defs/iOS15/linear.py,sha256=WX3L7tG7dShjCtDdhNimpfSL8PWLPreAVh6p6q567aA,12377 +coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py,sha256=193vZ0cm-909K4OKK3aHdHoIeYMTbyf9ddsrjgAknWY,12595 +coremltools/converters/mil/mil/ops/defs/iOS15/pool.py,sha256=2aub91zsj4qYs1F-fTicavq1JLQmWncO2LbvP4qs8SY,9122 +coremltools/converters/mil/mil/ops/defs/iOS15/random.py,sha256=R1yCaFuKJuX1cUsdYwr5Qdi-lcAVZZHyObkQfCq8koE,9058 +coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py,sha256=TBuFBK3cd16W6D7H2OWmVW1TapCorke5SzvoZ-3eNEE,20532 +coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py,sha256=JBUxB9XtMfWQq66Ul2-SSGevnQTopTrNVToebiEKi-I,15176 +coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py,sha256=nuBoZvW5FHkoLETaPmTzohjyGMp-sNqTLSc105SuVoo,16508 +coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py,sha256=rh2ifcTFw3E0Qi95qNbrLhaWQ_Ir2ACH_hGgWKt1s4I,41580 +coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py,sha256=FTQxh-5JyX1VCCgvlkfEObnhzifPEEmYGRI_O0uTzRI,35196 +coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py,sha256=9-uGmEr7gANNDX_ae5h-84NqCWwlQcARfWH6UsuNWdM,724 +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/constexpr_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/image_resizing.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/scatter_gather.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/__pycache__/tensor_transformation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py,sha256=QGDuDisrRlebkhKsWgSfAm96v11UuAwpfAzJc0sW3WE,14272 +coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py,sha256=K93EldkBrWTeIdkZxrRBcKMMIPu_DUPzuuZkiARDKnw,3335 +coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py,sha256=QRm9vh7dik47WZ1MaGyB0kHyTtNptvh5gM2IFeKw3Ec,5863 +coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py,sha256=d7QOIZ4spvY0ywvAFDXppOwenu5q9FAmxYHcL8vyuIQ,3930 +coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py,sha256=j_cAR2A8r2MRmtK7PiBX58ojp3QTxtDjaPXRUhtPwLE,6920 +coremltools/converters/mil/mil/ops/helper.py,sha256=X26CylbEZhqvmcE_BKUyBK3S9SM95eFKq_jEq1z82jY,1245 +coremltools/converters/mil/mil/ops/registry.py,sha256=5WPQk19fyFrjxzupmpkth-T5SxHZ8UbcNj_6qW2Ihfs,8910 +coremltools/converters/mil/mil/ops/tests/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/mil/ops/tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_activation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_const.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_constexpr_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_control_flow.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_conv.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_elementwise_binary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_elementwise_unary.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_image_resizing.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_linear.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_normalization.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_pool.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_random.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_recurrent.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_reduction.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_scatter_gather.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_slice.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_tensor_transformation.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/test_utils.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/__pycache__/testing_utils.cpython-310.pyc,, +coremltools/converters/mil/mil/ops/tests/test_activation.py,sha256=DXmTvhPuZkgel-XgTPEr0t_sKrFda69NCz5g2qSaBNs,35469 +coremltools/converters/mil/mil/ops/tests/test_const.py,sha256=RppHpqP6HuT4mrneuHfwHGU-KPRBX5EpmT3NsSHUvQQ,1851 +coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py,sha256=ZO5uCtJNKOQ4ofPyHY0rb7tSmLdWfiCemINms64cAJo,22658 +coremltools/converters/mil/mil/ops/tests/test_control_flow.py,sha256=C-9mLUesd4KQKCid-SQpK2-iAr2fgWAXkf72Op6ojh8,13602 +coremltools/converters/mil/mil/ops/tests/test_conv.py,sha256=_KbdkJY5_cFkKZUPzdPHAvIgVYuf-zQDe3rsO1likKI,30760 +coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py,sha256=TY5-4Gqhc8ZiQULDwS_QJRBqxiwuqSGBDC0_Roojj5g,21099 +coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py,sha256=qA5KE4ASEGyJtL4jPI1ur4d7ZesFTS8aPM6WMcXCvVI,24212 +coremltools/converters/mil/mil/ops/tests/test_image_resizing.py,sha256=mIXh4VF6nqPbX8-9d_IirlTnyTIXwVS7Wc79-NbfdPA,32525 +coremltools/converters/mil/mil/ops/tests/test_linear.py,sha256=IWQ6wiHlQXa2MT8hZQ1KYB9qokeMSZfSUJPrWhF96xE,12430 +coremltools/converters/mil/mil/ops/tests/test_normalization.py,sha256=e0d7pj2xI2PG06eomk9RDgHwg590VpMnfrfCxq0sSlY,25998 +coremltools/converters/mil/mil/ops/tests/test_pool.py,sha256=NGywf4NzVyHzR2FMMmUXxszPCS9U9Y1ESPkz2VP0lPE,17261 +coremltools/converters/mil/mil/ops/tests/test_random.py,sha256=NmnQSa52nreeSC49VqUVMv-quvP_6hqcPoNlM7x0zrI,14861 +coremltools/converters/mil/mil/ops/tests/test_recurrent.py,sha256=V318_xADa0fwaynlMPl3KUHcXhS2P2TMO1GrS8K95hA,25212 +coremltools/converters/mil/mil/ops/tests/test_reduction.py,sha256=eDx42BYueU4m5zZ9PQTUR2UK0pNxJ0dodw0bCYW9pd4,13720 +coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py,sha256=AAmHXx8vO_8raXPpUivr2w6iUA2GEayxuf-KYlZLjqY,26958 +coremltools/converters/mil/mil/ops/tests/test_slice.py,sha256=I6VuQBpcETSUtroEIb1hQKa3NTwhQGO7LfNaUAFO2ik,13981 +coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py,sha256=46gWhtx980Z3U9rMphLDq6_wJv9iBZDbqb3z4E0wOaE,54152 +coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py,sha256=qLBZb0dzWI_OzPyQCcYVp9jyRW8oSCM-kRV4HKWPwVw,46203 +coremltools/converters/mil/mil/ops/tests/test_utils.py,sha256=FApzl9gGXGxJ5QUHR5E9IOtVgP7gzN8g-uc8dKemPnM,8055 +coremltools/converters/mil/mil/ops/tests/testing_utils.py,sha256=OGIFAcwN4bDaD0TmB1aKxXpnYkuSDOx8aV6rznn2qFc,5734 +coremltools/converters/mil/mil/passes/__init__.py,sha256=3UdPk7iQnrIx-JULKELn6gY1ySWUULIniwe55ZDV9RY,1415 +coremltools/converters/mil/mil/passes/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/graph_pass.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/helper.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/pass_pipeline.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/__pycache__/pass_registry.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__init__.py,sha256=L_PMcjeb4I6OmDYQGt8pMhoce2THRcdxyat78rql4to,218 +coremltools/converters/mil/mil/passes/defs/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/lower_complex_dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_activation.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_conv.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_elementwise_binary.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_linear.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_normalization.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_repeat_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/optimize_tensor_operation.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/preprocess.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/__pycache__/quantization.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py,sha256=wzYdDR8g9nsgNt9JUKNk29qzEXHQc2ixuoeYDlq_k1g,714 +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/const_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/dead_code_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/dedup_op_and_var_names.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/fuse_reduce_mean.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/loop_invariant_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/noop_elimination.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/remove_redundant_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/remove_symbolic_reshape.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/__pycache__/topological_reorder.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py,sha256=o5C3nhcC-eMO0sdth9DyJSHf-epH1MEoVnpqgwtT8f0,3996 +coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py,sha256=SRMP4yoJcF9Sdu3tggXzMSzIIjZSbeIHyNPs4HuqdaY,2902 +coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py,sha256=rDNSVWUe5QYdS7idJqxffi14fKSUO1_dsmZhg5yBhBw,3833 +coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py,sha256=Aqxv_72E9uHOiIRqRa_BG78mNC5FB9V6o0un2zzAfXE,4434 +coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py,sha256=hRJU-ZwcGhkohY-tNTenp3cCm15LqN1MYy3MQq4BzzA,6930 +coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py,sha256=v6MtGmmL7jCpkMZW9wqgTP0XHBccqyhQYgVRJH8VQ7E,7962 +coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py,sha256=kMkM2xIKGXnfQ2SX-f6cndUQTFuSC7q6L4zlwBFobdE,8399 +coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py,sha256=j0DssL2n-WNOJZrYujbc6ePje-lmMV7AR0P0hsauRsA,3848 +coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py,sha256=Ea-SSm0xD-mY4_57bVSd9dSWOT4PqX4jN7D9XuXEx78,7620 +coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py,sha256=oeM3BoqpQfRK9GHSUaiPS16Xl26qCMcwalx1c67IjwM,21096 +coremltools/converters/mil/mil/passes/defs/optimize_activation.py,sha256=1SqRyWwYgZKShx2gzLyIA5zIW3VUIyaXkPAYfFZ1WV8,24562 +coremltools/converters/mil/mil/passes/defs/optimize_conv.py,sha256=QoLhYc0L54Wuz11Mup9i-RrAMvZY0RjTgMnZViott6U,41827 +coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py,sha256=m95s6dplNvoMywA_fgynCuC8_HNk54e_YU3KpjuAD7A,11714 +coremltools/converters/mil/mil/passes/defs/optimize_linear.py,sha256=KKymLQzMwrLu-QvQ1VR3VXUlDFYlAxUgyDeKtWH4YyE,11330 +coremltools/converters/mil/mil/passes/defs/optimize_normalization.py,sha256=dTPHtbYtCS-SqZYwSugE5N8-EJb-W_SKRWnw68eX4co,34386 +coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py,sha256=0pk9H4mVZ5wd-FuuC4LeGq_sBoxlRM76UBV4r-6J6II,72694 +coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py,sha256=f_V2gGTJ_YhIbtbKqtiiHxaUJ7rWnXwcFdsV2hk3kRA,29777 +coremltools/converters/mil/mil/passes/defs/preprocess.py,sha256=eBrN5Q1Y-FN-zivwnmuGCPaM8TQc6OWHvvcTTB3ydG0,15254 +coremltools/converters/mil/mil/passes/defs/quantization.py,sha256=KK0eq3nMUOFazWTIQUSTLIVIhnDambQsofMA0OXQIDk,32105 +coremltools/converters/mil/mil/passes/graph_pass.py,sha256=tyrIY8pXk7FsF4m-9ML0qIdy8DnUaZs7hqKbdLyrdms,2605 +coremltools/converters/mil/mil/passes/helper.py,sha256=u-8DCYAbEkrhQwcs2D6PRMv18wqKtTahKgHxJkvJKGI,5761 +coremltools/converters/mil/mil/passes/pass_pipeline.py,sha256=bpOZyFG3CYjvn-aJUzd1Hujs3qPbvPlaZg9Fxiey-A4,15680 +coremltools/converters/mil/mil/passes/pass_registry.py,sha256=Hh8_orCulG-GmKWN4STn0UW8PZdhJdNbsoaHZovb0KQ,2275 +coremltools/converters/mil/mil/passes/tests/__init__.py,sha256=L_PMcjeb4I6OmDYQGt8pMhoce2THRcdxyat78rql4to,218 +coremltools/converters/mil/mil/passes/tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_lower_complex_dialect_ops.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_pass_pipeline.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_passes.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/__pycache__/test_reduce_transposes_pass.cpython-310.pyc,, +coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py,sha256=jqdSY6_DVaAEl1M9v_LtGEt_jaCU0zypqVcmqe94HZY,2082 +coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py,sha256=GmURas3N7XDTM-seKSF3KP_rWgoc7xaFQP6MBSIvWdk,5077 +coremltools/converters/mil/mil/passes/tests/test_passes.py,sha256=VV25MCOhes0y_ZJDZQDPd9ed-iFgMOKQiQoYOMXM4zI,271534 +coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py,sha256=jvBaMlpNMh8qp-3oOiIufEZ75sO-xLi5IyIA5kyGJHw,78845 +coremltools/converters/mil/mil/program.py,sha256=Nc_floSXfmC7f6gO8rmMZm7ey53F3UQaHG8MkMCW1A4,10745 +coremltools/converters/mil/mil/tests/__init__.py,sha256=hDZrITJ4jV2RaNfWRvkN7mI2mF4QCA5OiQ4MmCp6Gns,217 +coremltools/converters/mil/mil/tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_block.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_debug.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_programs.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/__pycache__/test_types.cpython-310.pyc,, +coremltools/converters/mil/mil/tests/test_block.py,sha256=EPqbFOzCKf7-fSle65jHtMiUYyIyoVH8UVdhgYlNfdc,15736 +coremltools/converters/mil/mil/tests/test_debug.py,sha256=vqlRzqx88YHzgGM9F-FZw-5ZRE1chMFsZumS-Jwm6kI,10605 +coremltools/converters/mil/mil/tests/test_programs.py,sha256=oEb6no5KWuk9e4P4ja3Em0FB1GyzDUcsBaxFmKmlogI,13429 +coremltools/converters/mil/mil/tests/test_types.py,sha256=phOYAyvqsKoM5BQTdHCsQZ4XXUugyEsZIjdmLFhbhCc,1085 +coremltools/converters/mil/mil/types/__init__.py,sha256=Fqy2ofjkzm1iZ5viV9XGU9CjVhgFxeL459yId7XIKxY,1703 +coremltools/converters/mil/mil/types/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/annotate.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/get_type_info.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/global_methods.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/symbolic.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_bool.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_complex.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_dict.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_double.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_globals_pseudo_type.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_int.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_list.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_mapping.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_spec.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_str.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_tensor.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_tuple.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_unknown.cpython-310.pyc,, +coremltools/converters/mil/mil/types/__pycache__/type_void.cpython-310.pyc,, +coremltools/converters/mil/mil/types/annotate.py,sha256=3ZqR-mCpr2l1stnQlvGCDnY6H81o_th-hToFuNhZiu0,3411 +coremltools/converters/mil/mil/types/get_type_info.py,sha256=fM_y6Y5qvjUKthPkfE4HNdBaLHpI9ZJbIYwTgu2pHLY,2123 +coremltools/converters/mil/mil/types/global_methods.py,sha256=LgvWtHE26K6pNzDhVwWqxgTUuoLU_NBw1fwEgnNgxcM,1468 +coremltools/converters/mil/mil/types/symbolic.py,sha256=do8ZZ0tjmaxwXGDoiN98msijsdI9rnlqsr7oZ5Isr0A,2160 +coremltools/converters/mil/mil/types/type_bool.py,sha256=ddP0dQqFuq3gGBr8lcXqd0uji4KUnNEV1eidKfxe7Ls,1230 +coremltools/converters/mil/mil/types/type_complex.py,sha256=rFu8FATeQr2pjkQtIdq067BxnXhkobDvRPixnQeL5aU,5705 +coremltools/converters/mil/mil/types/type_dict.py,sha256=kQZ9m_sP3K1XXza94XghSKgjTQKIhjw_zJZXo0ysycU,1665 +coremltools/converters/mil/mil/types/type_double.py,sha256=_avobC6xf1Z7GIVZ0CEbTscGpUbYBbI41Yq7gbmU6qg,5119 +coremltools/converters/mil/mil/types/type_globals_pseudo_type.py,sha256=jEiaHuzPOMjmwuQWC0IeKRZSyqR-wG2LwPjU9VLGa_c,370 +coremltools/converters/mil/mil/types/type_int.py,sha256=W16HHH0u2UU5rzdRDUpZNDHEcDcqzYITWVQxgriVBOk,5439 +coremltools/converters/mil/mil/types/type_list.py,sha256=m32goMJcjMDgfuMgaP719tEO58hdXOGoFIv7vusI308,1974 +coremltools/converters/mil/mil/types/type_mapping.py,sha256=4528XrAWkxcR9jL1ylSniYINCEWo0xeTjJqjXO-brZY,12886 +coremltools/converters/mil/mil/types/type_spec.py,sha256=_zm-Uf3g0z4VRr9o2bm7D1ND2uCJETtHLg0pJiumQHQ,2994 +coremltools/converters/mil/mil/types/type_str.py,sha256=ximDBJgjn35kVqeGm65IM3PSS6yjr98_Xu4udpSlXVQ,641 +coremltools/converters/mil/mil/types/type_tensor.py,sha256=gq6dycXjqYj85HzdrWwNnBdSuCQOEWKSEojuAqFrbvg,7524 +coremltools/converters/mil/mil/types/type_tuple.py,sha256=4Ma5Q6caZL7jigqPe9menbCU09nPC0RjKOIdLlAUBT0,1263 +coremltools/converters/mil/mil/types/type_unknown.py,sha256=Z8Ky3qY6oG_3AF1iw2Ec6AlWJXcETrSEM9LYkaHvLJs,468 +coremltools/converters/mil/mil/types/type_void.py,sha256=fOaI7s928HwZVqYIV_dmYIZBYv9DbYsD6hZEwVddzII,352 +coremltools/converters/mil/mil/var.py,sha256=EiEu9sjvJL8TqqzhDyww54UdDIoBIqSdGquZMOK33ek,12468 +coremltools/converters/mil/mil/visitors/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/converters/mil/mil/visitors/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/mil/mil/visitors/__pycache__/dot_visitor.cpython-310.pyc,, +coremltools/converters/mil/mil/visitors/dot_visitor.py,sha256=zGPglQ2Aj5G07URoAf_uyvQuB-eOaxRAvg0eMs2JHtk,6100 +coremltools/converters/mil/test_flexible_shape_inputs.py,sha256=cNwy4Omd3iW87ONSxkvmeolkGc70JrynGFbov57Cmv8,6883 +coremltools/converters/mil/testing_reqs.py,sha256=xyXbk1lHscCKREjrjf0Y6wIMbWIvpfyVgLZS2K6hvKE,1839 +coremltools/converters/mil/testing_utils.py,sha256=f5vTHD-jGAedmDqWTVEWakKHq18LKRhGcd_XoCCODSA,20514 +coremltools/converters/sklearn/_LinearSVC.py,sha256=lCbB1yW0H8QBo1VT1HXzewdsSh0JJgXhA42UXize1NM,1500 +coremltools/converters/sklearn/_LinearSVR.py,sha256=DYLkoDJUy_KWPcTu0Zyz3zpwy1AYmvV1ocsI9rLD8jE,1405 +coremltools/converters/sklearn/_NuSVC.py,sha256=L5mj0xhEECZcYRs_Uc4z4d28Bf8o9k3Hl81iuWiu0KQ,1882 +coremltools/converters/sklearn/_NuSVR.py,sha256=Kuf2g6yI8txkwdwXZW5UNi1ZEpTmM6JUbSyYhSmtrh4,1458 +coremltools/converters/sklearn/_SVC.py,sha256=2EfcCNErT3F5ItFQvldQCKL6OeXBjhtS5jS26MaQri0,4021 +coremltools/converters/sklearn/_SVR.py,sha256=Nfgb5X2u5Q3im0UQ-_2MQD_rRO0TNKx18baI3crqdWI,2372 +coremltools/converters/sklearn/__init__.py,sha256=Pz9DffozoGDaptuA-3lFdNNXtbEB9DvmFs-y4ryRJ_g,294 +coremltools/converters/sklearn/__pycache__/_LinearSVC.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_LinearSVR.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_NuSVC.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_NuSVR.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_SVC.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_SVR.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_converter.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_converter_internal.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_decision_tree_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_decision_tree_regressor.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_dict_vectorizer.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_gradient_boosting_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_gradient_boosting_regressor.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_imputer.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_k_neighbors_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_linear_regression.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_logistic_regression.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_normalizer.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_one_hot_encoder.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_random_forest_classifier.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_random_forest_regressor.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_ridge_regression.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_sklearn_util.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_standard_scaler.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_svm_common.cpython-310.pyc,, +coremltools/converters/sklearn/__pycache__/_tree_ensemble.cpython-310.pyc,, +coremltools/converters/sklearn/_converter.py,sha256=m8smydsWCW3Wn2j1kbhS-_cFqgF54rlqfq2eZOuBLlE,5912 +coremltools/converters/sklearn/_converter_internal.py,sha256=U1ysD14cmi4upr8EKaF1W5BBzhPMlsv6Si5utQOaOc4,12902 +coremltools/converters/sklearn/_decision_tree_classifier.py,sha256=BWsZDwFYTb6MC94uDuLPOUMIsuAt3VMcEEM-chc3Ois,1684 +coremltools/converters/sklearn/_decision_tree_regressor.py,sha256=Lzca6wEghmuvZgDhYttAHuH22P88CgtxV-NSV0hugCk,1442 +coremltools/converters/sklearn/_dict_vectorizer.py,sha256=RjrXEHrwJAAx3d7VPUOVslwkW5P7d1tdUBGEh5FTric,3581 +coremltools/converters/sklearn/_gradient_boosting_classifier.py,sha256=EbJF4ZnGazU826RGS3c5n6bUhbD0o1CaVlyk7IyGIMo,3369 +coremltools/converters/sklearn/_gradient_boosting_regressor.py,sha256=a4h6Zg2YE_vsFL1vZshICljxpst1s8WnufRRstT7NYU,2246 +coremltools/converters/sklearn/_imputer.py,sha256=pIniBOLcEafe4x5P-cedYjrnn7kVqu0AJ6XYfBQiDK8,3417 +coremltools/converters/sklearn/_k_neighbors_classifier.py,sha256=H5paYfKvFWd5npSbqNKBXh0Pbc2id7dwiAcle57qRGc,9525 +coremltools/converters/sklearn/_linear_regression.py,sha256=_jmLLDSOQPa_hsgAWRRwPDXxs9vR-QPy_ojWm6Bjsvw,2376 +coremltools/converters/sklearn/_logistic_regression.py,sha256=o1Vd04GAuEMYlk8iDikGNNY5KuLy1C4wz_v1K3V7maI,3120 +coremltools/converters/sklearn/_normalizer.py,sha256=vb9ms0eOIaA4rGj5BNUtEIMGbccp_S64CRy4s2UH_wU,2315 +coremltools/converters/sklearn/_one_hot_encoder.py,sha256=I5VouIP_lho_Xi-ZhjYUrQLtNO1dGEgd5t__3xoGcvE,9933 +coremltools/converters/sklearn/_random_forest_classifier.py,sha256=0XyMd4VOTeO1oul8MghEfLhEUdRAizFmGE6rrscw6pY,1916 +coremltools/converters/sklearn/_random_forest_regressor.py,sha256=TZ0QFwtmswyKK9yJ2ikA45CQJ8bPqkGabAdGDXIx4f4,1710 +coremltools/converters/sklearn/_ridge_regression.py,sha256=gzAvke90jYfT269or8dS5eQPLC31-nT1H3W0UQgoHn8,1422 +coremltools/converters/sklearn/_sklearn_util.py,sha256=LhOC-eDE7dZpB0wTFDRt_V2HQjc7KgIV-eEQ7kWP_fw,1032 +coremltools/converters/sklearn/_standard_scaler.py,sha256=WOjXmHAxgPYkGV3QuygqV0-LLgoTeTqIA8nvJB8Ed2k,2626 +coremltools/converters/sklearn/_svm_common.py,sha256=CXvMzz6o2aiT4D5H5_XZiiPdkCIzVfQ4q4OeYAOYGRM,1210 +coremltools/converters/sklearn/_tree_ensemble.py,sha256=DhTs5J_8TbsG2LO_URrsJbefB_mqt6DreE6ks5UAUTY,7783 +coremltools/converters/xgboost/__init__.py,sha256=b_yMPWvR26-dnC7cJZlavGS1gOmBkmJL2lBAYvjLvaM,243 +coremltools/converters/xgboost/__pycache__/__init__.cpython-310.pyc,, +coremltools/converters/xgboost/__pycache__/_tree.cpython-310.pyc,, +coremltools/converters/xgboost/__pycache__/_tree_ensemble.cpython-310.pyc,, +coremltools/converters/xgboost/_tree.py,sha256=8hWCttuSP_Ig1s36YE_KqCSVrowtWsxBiTxV0Ed39Mg,2760 +coremltools/converters/xgboost/_tree_ensemble.py,sha256=wRMABFYEwWRKvpkW4GUsoHVXj6NRwvKSi1Rkrm-CjDU,9537 +coremltools/libcoremlpython.so,sha256=d4nonEZiMH9jMAVlzzqAzRYd4YU16Q19eZYMjJbMuKs,1626896 +coremltools/libmilstoragepython.so,sha256=d2ZMpv2iF6EVcWOT0_DW-fmuvzEuu4E5tTbbfLwvNQY,213608 +coremltools/libmodelpackage.so,sha256=5doC_sP61YJ_TuAtV34x-bm3bmMqPDl4HHosj4Ej3Tc,305376 +coremltools/models/__init__.py,sha256=VdXZQNtZlSSF4gSe8OfXvvvfEpPnE_6VuF0Mo_-_5QY,1049 +coremltools/models/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/__pycache__/_deprecation.cpython-310.pyc,, +coremltools/models/__pycache__/_feature_management.cpython-310.pyc,, +coremltools/models/__pycache__/_interface_management.cpython-310.pyc,, +coremltools/models/__pycache__/array_feature_extractor.cpython-310.pyc,, +coremltools/models/__pycache__/datatypes.cpython-310.pyc,, +coremltools/models/__pycache__/feature_vectorizer.cpython-310.pyc,, +coremltools/models/__pycache__/model.cpython-310.pyc,, +coremltools/models/__pycache__/pipeline.cpython-310.pyc,, +coremltools/models/__pycache__/tree_ensemble.cpython-310.pyc,, +coremltools/models/__pycache__/utils.cpython-310.pyc,, +coremltools/models/_deprecation.py,sha256=t3tie2O6lyTUpfmB6ZTMRItmAT74yxIQ_3tO_R4zsws,1131 +coremltools/models/_feature_management.py,sha256=R-HmJX4i7R7Kr4KjlAzT4S35XP69T_gAZg6YMbB7Xd0,11762 +coremltools/models/_interface_management.py,sha256=OevbCPfC5x07eUiMbOmOjO2jrIRdZHbKTOWVMVCVSaI,7068 +coremltools/models/array_feature_extractor.py,sha256=IZDVYlt-3xPfAqsagb5alr5BLXgRxYL7wPB3jw4kfB8,2018 +coremltools/models/datatypes.py,sha256=SyZo_AiQr_DFpHivsjXL4Tw_5uVKwSHQOwf-wPQ9Pto,6761 +coremltools/models/feature_vectorizer.py,sha256=TahnTSreaUUdfnJ3Us28QR1HChPUlWubdNEa0YiBWow,3718 +coremltools/models/ml_program/__init__.py,sha256=oxlPVGZ3j0hBRHK01gavhSG5buxJepofMsluRjJ9ZCk,247 +coremltools/models/ml_program/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/ml_program/__pycache__/compression_utils.cpython-310.pyc,, +coremltools/models/ml_program/compression_utils.py,sha256=ZfKUJgh9xcA9cnPqnKdd_HzlmrJInSpujSXdiPBGr4k,25126 +coremltools/models/model.py,sha256=IYSNOWemf29N_x10Mf6L33vFeTFnG_k1ssBQGqKj5zw,26036 +coremltools/models/nearest_neighbors/__init__.py,sha256=gdGUD03yQ8QSXbiI5lQzQzCuOFSrenjX0Awszx89cSc,272 +coremltools/models/nearest_neighbors/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/nearest_neighbors/__pycache__/builder.cpython-310.pyc,, +coremltools/models/nearest_neighbors/builder.py,sha256=8d5naMN2BB7SsIKpCweqZhQitQYQIE-Cp1BFEO2U2_I,21314 +coremltools/models/neural_network/__init__.py,sha256=Pi2j7iOWOOv3tb1oMwry3iIuYI9Yar0XfVW4jpAzPrk,486 +coremltools/models/neural_network/__pycache__/__init__.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/builder.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/flexible_shape_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/optimization_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/printer.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/quantization_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/spec_inspection_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/update_optimizer_utils.cpython-310.pyc,, +coremltools/models/neural_network/__pycache__/utils.cpython-310.pyc,, +coremltools/models/neural_network/builder.py,sha256=-Dlsymtrb_Q6TpShAmFbxpquT-TInG9YDNVY7zdLkao,337600 +coremltools/models/neural_network/flexible_shape_utils.py,sha256=B46ZaWbxBfNm4ZVLdJWsq7QqFi1aP7UyE3FyKoqGBZc,27323 +coremltools/models/neural_network/optimization_utils.py,sha256=KIRE1_EKzw5xgOZxuAZIWWYqitGgbgvO4PgpW-t1rhA,8194 +coremltools/models/neural_network/printer.py,sha256=TOfIgBgDp7xWUb6cA6udVEg03-by85C_w9Zwc0TIy3c,3748 +coremltools/models/neural_network/quantization_utils.py,sha256=YVYKpABtmom2LsN9COx6mDfem2dHNh8iLLzeOuGznY4,57685 +coremltools/models/neural_network/spec_inspection_utils.py,sha256=aEeoulN-oBEZvzfAXkM2vmPTJDobAEqXajKwR5vrcgA,10768 +coremltools/models/neural_network/update_optimizer_utils.py,sha256=qQZM0SjaHLZdVbth6kNcJOH5PUdIqiekMPKhish6nao,4775 +coremltools/models/neural_network/utils.py,sha256=Mq7lH7HLNTSu39aVAYuLj4VD7XqB76TzVdFWbWEGUk8,3967 +coremltools/models/pipeline.py,sha256=lLbm6IsZ7Lx1lpLoLdSGH2jwJJZ0MKfZwlNpv7rApKY,10916 +coremltools/models/tree_ensemble.py,sha256=fnnMaZBTUN79AQ27Y9MqUQo8DtaiBsHP3msYbKpqvzc,15764 +coremltools/models/utils.py,sha256=StUvsWc7thkPaMx8GPQ47zM4ArozER6NNEsOE5M8T4E,33597 +coremltools/proto/ArrayFeatureExtractor_pb2.py,sha256=n9QZfMjC8We5P-og9vRpYUMx2JlQXUrwxqODj61ikoo,2269 +coremltools/proto/AudioFeaturePrint_pb2.py,sha256=AveFTuEtE6soQiO0qKYVboVcU6sz2uEP4-1vtmscTXk,5197 +coremltools/proto/BayesianProbitRegressor_pb2.py,sha256=3862NhATie3vVRxR69S4SUoOH80UJrCHj2XfJc4uDVw,13073 +coremltools/proto/CategoricalMapping_pb2.py,sha256=v88lYib_p1kEM6kvJURtyxHQy89Ab6wcciw7TnLRWfU,5556 +coremltools/proto/ClassConfidenceThresholding_pb2.py,sha256=UeF1poavnXbfV4Sp6wcFGRzcvsqUkvNYxXZXua74SXI,2940 +coremltools/proto/CustomModel_pb2.py,sha256=XX4EwZBMIE-7HlFBIKYBuLRKa9H07uzsIyaqCk6femI,10557 +coremltools/proto/DataStructures_pb2.py,sha256=gy7-fsP8XCISGXQMbjHfWwSN3DiwmnYT6sxD9c8Mo1A,25856 +coremltools/proto/DictVectorizer_pb2.py,sha256=2ZnPXKbxdWy9PXpcuN8xnIkEsbeAOhqJPBsmz1EbyLs,3817 +coremltools/proto/FeatureTypes_pb2.py,sha256=GMIa8rmmyfm3tKS1tQQh807--FvjvYKV3WUmchBS0_g,38649 +coremltools/proto/FeatureVectorizer_pb2.py,sha256=T103AMfqJHZNCpNTKYW4O7CEGUSyNLRZOEXPcqN41og,4119 +coremltools/proto/GLMClassifier_pb2.py,sha256=VZ8IUgxc16YV-ixYBATdIW1pFhCOsMFahK99GN7eTVA,8780 +coremltools/proto/GLMRegressor_pb2.py,sha256=kHNL_QhJ1c0b9W9qtaVvzgNAhVeQ7gWn5HlzSaOKru8,5431 +coremltools/proto/Gazetteer_pb2.py,sha256=GEzYcai4VywOzKhyggMRgTcTULi6Kv7RTizbxTcaxlo,4337 +coremltools/proto/Identity_pb2.py,sha256=yLT0mbD-jLfiREQoZwqQWQr5R7vRXBC8lpcUgTx1S6s,1655 +coremltools/proto/Imputer_pb2.py,sha256=ud3XL7ea1C0_jePPNExH6X-ONtrrK-QoaMU3VeG3MHE,9310 +coremltools/proto/ItemSimilarityRecommender_pb2.py,sha256=oT_hrCPYTuI4-iCRLCD64JKvTuqkaLeik5dSBiCEQKE,11274 +coremltools/proto/LinkedModel_pb2.py,sha256=fh9o623qI2PT7rCqs9GZtWKG2Q96gmDiqDrJRtwkZ9A,5073 +coremltools/proto/MIL_pb2.py,sha256=P8Ykuz3H2-yteKySyjAbuxKIc5zr5QsZiIG4B8ShS48,83751 +coremltools/proto/Model_pb2.py,sha256=4zm7zLuL1thqU7zHwlCLGT_-ogsBdt5VtfaF6S_lDeQ,59302 +coremltools/proto/NamedParameters_pb2.py,sha256=L9sRHazQzNUsSg9E9AijBBsjCt0XryWus-xOVhPU9os,14471 +coremltools/proto/NearestNeighbors_pb2.py,sha256=0I7hB7wUBnKkGvpkdKH4N4LBBwWfDW59FB9qx-VPAPg,18991 +coremltools/proto/NeuralNetwork_pb2.py,sha256=e1SOqycUT7XWd-MJ13hhMKtTsI9m9oBFFvRxvEbCbvM,552750 +coremltools/proto/NonMaximumSuppression_pb2.py,sha256=QbK3qJpn_jD3bzM___wXzMBgdGwuNYiQw9WW5gBnsgs,10172 +coremltools/proto/Normalizer_pb2.py,sha256=c9ZjsaESTvzzfHB1rIokhwIYL7V5FjteAx9xZiVEPf0,3009 +coremltools/proto/OneHotEncoder_pb2.py,sha256=EBuLvjKlGQV-8RvW92o5XiiJ87I625rsW3df5UdcJoc,5613 +coremltools/proto/Parameters_pb2.py,sha256=M4e4VNbF6qwft7kzXT4-qIwds9FwGpcM0GjimCpVEiw,8731 +coremltools/proto/SVM_pb2.py,sha256=eP1JHJzwAqNG4ZTsjydL0DGYLNyn0O20FfJXSkqy-Ik,29440 +coremltools/proto/Scaler_pb2.py,sha256=gl16Hx5vsmELjijDI3N0uB6AbfHrIv0jCUn8DhAvJO8,2397 +coremltools/proto/SoundAnalysisPreprocessing_pb2.py,sha256=qy-rjnqqCOZjen2QVUBn9PmVGXqs8Ls8uE_DSnLSbdQ,4090 +coremltools/proto/TextClassifier_pb2.py,sha256=YcsTT4e4UnxaAwNBsnlk4bNnLlH7EdB4RmEsKk_5Qe8,4464 +coremltools/proto/TreeEnsemble_pb2.py,sha256=TO3_Ix8-O8MMTkX_yUgvWoSPwkIyuiwD9Jmmnq1t_-8,20559 +coremltools/proto/VisionFeaturePrint_pb2.py,sha256=02WZADtAql6rvzXifW-6O6g72EtrU96VrYBZxzxnCdA,9188 +coremltools/proto/WordEmbedding_pb2.py,sha256=dCK32bm8W7Pn5UuTu7lBzCDjdysxPQLMQ0shWdA0cgo,3417 +coremltools/proto/WordTagger_pb2.py,sha256=hgRJyhmrGo9Bmm74uxqjRGCmoufpviBs0Y-DDJczH-g,6177 +coremltools/proto/__init__.py,sha256=Qp3qzHiz2yBHkv3ovvGcwliSoAsiRrL4GtR0TygN5k0,44 +coremltools/proto/__pycache__/ArrayFeatureExtractor_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/AudioFeaturePrint_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/BayesianProbitRegressor_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/CategoricalMapping_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/ClassConfidenceThresholding_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/CustomModel_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/DataStructures_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/DictVectorizer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/FeatureTypes_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/FeatureVectorizer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/GLMClassifier_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/GLMRegressor_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Gazetteer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Identity_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Imputer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/ItemSimilarityRecommender_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/LinkedModel_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/MIL_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Model_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NamedParameters_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NearestNeighbors_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NeuralNetwork_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/NonMaximumSuppression_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Normalizer_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/OneHotEncoder_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Parameters_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/SVM_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/Scaler_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/SoundAnalysisPreprocessing_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/TextClassifier_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/TreeEnsemble_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/VisionFeaturePrint_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/WordEmbedding_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/WordTagger_pb2.cpython-310.pyc,, +coremltools/proto/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/__init__.py,sha256=OeQ1kZJJmCaKdUkg2uZ6SB5YJki1wtmJe50FQJ2jf6o,218 +coremltools/test/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/api/__init__.py,sha256=0v8QveTcIon5tuJ8MxBg0nR0YXo1dr1847H4W1-d62A,225 +coremltools/test/api/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/api/__pycache__/test_api_examples.cpython-310.pyc,, +coremltools/test/api/__pycache__/test_api_visibilities.cpython-310.pyc,, +coremltools/test/api/test_api_examples.py,sha256=_tFg2G4wvwOwi650eEdNB1PjXt79sPcC3gnAWhwgq0w,19747 +coremltools/test/api/test_api_visibilities.py,sha256=4hC_SmW5PnimDZ5khBU7X4jQRDkYQC-wFEsAYkeBltE,6967 +coremltools/test/blob/__init__.py,sha256=lnJqw4kPK2WsLMwTuKg6W3KQ7fsE9KDz8VkbdYwZGB0,225 +coremltools/test/blob/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/blob/__pycache__/test_weights.cpython-310.pyc,, +coremltools/test/blob/test_weights.py,sha256=JtynAC6byI2-CvCCUJzHkTN1kGQ2AGg2A3mwxbcOkSM,2604 +coremltools/test/ml_program/__init__.py,sha256=W3YJH2hWGWLcQ48vEYy5o2GcJgzS6rn6RogIBGMZ_Eo,214 +coremltools/test/ml_program/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/ml_program/__pycache__/test_compression.cpython-310.pyc,, +coremltools/test/ml_program/test_compression.py,sha256=dgHmBwgR0Gln9qCV9RkzATVa_0PtM-nw4rDwxEC3KNM,20484 +coremltools/test/modelpackage/__init__.py,sha256=lnJqw4kPK2WsLMwTuKg6W3KQ7fsE9KDz8VkbdYwZGB0,225 +coremltools/test/modelpackage/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/modelpackage/__pycache__/test_mlmodel.cpython-310.pyc,, +coremltools/test/modelpackage/__pycache__/test_modelpackage.cpython-310.pyc,, +coremltools/test/modelpackage/test_mlmodel.py,sha256=u9ITWH3xfE4npIgqPi6pIbpQAYiT3NF_IaXAItOWppQ,2137 +coremltools/test/modelpackage/test_modelpackage.py,sha256=z6fRzU_G9sFhWk3Uh1s_PxFWpMZzKQXdUuYA2m_EOiI,20790 +coremltools/test/neural_network/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/neural_network/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_custom_neural_nets.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_model.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_neural_networks.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_nn_builder.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_numpy_nn_layers.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_quantization.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_simple_nn_inference.cpython-310.pyc,, +coremltools/test/neural_network/__pycache__/test_tf_numeric.cpython-310.pyc,, +coremltools/test/neural_network/test_custom_neural_nets.py,sha256=OUACUnpr2-hzG9dHx-IZMEXAGw_4GTShO8xjS1cR8Rw,3321 +coremltools/test/neural_network/test_model.py,sha256=llY4nqxEmp9uLDTr7QymV9orpQm-meJXwzANX4jpjlo,22507 +coremltools/test/neural_network/test_neural_networks.py,sha256=cIrFyvfhVzJao1zZMvaNk8POr-M4ugyvbPemDUuqIkU,2061 +coremltools/test/neural_network/test_nn_builder.py,sha256=TrtngRAH6m3WGl4h1TU6-HP4GIteEhXInW0BE2AwJ6A,24047 +coremltools/test/neural_network/test_numpy_nn_layers.py,sha256=GoNLC9mDNVyRk8vWYo3kGS-9_ALr_jr0NOzfYUlV0js,280978 +coremltools/test/neural_network/test_quantization.py,sha256=JQaKJYO7QFz8hbP4gjl9IrBLthTXR-XY65GiQrGzV2Q,18696 +coremltools/test/neural_network/test_simple_nn_inference.py,sha256=e5mjXp1n9gtmNOYnkhFrCcBtNqK7jBWiYlWAYGkEEMI,1780 +coremltools/test/neural_network/test_tf_numeric.py,sha256=L9nl7z6tnXO_l2X7y_ttqqE3zUrpkoO5fQz3Eu0oF6c,19367 +coremltools/test/pipeline/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/pipeline/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/pipeline/__pycache__/test_model_updatable.cpython-310.pyc,, +coremltools/test/pipeline/__pycache__/test_pipeline.cpython-310.pyc,, +coremltools/test/pipeline/test_model_updatable.py,sha256=eN_932HI1ngSZYKCVth4nrgXwCtffKra36sBHr_qSMg,28313 +coremltools/test/pipeline/test_pipeline.py,sha256=ldrHNNoDHTutYsbvewjt1-2dOSim1O6GcTNxfiIa5f0,9810 +coremltools/test/sklearn_tests/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/sklearn_tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_NuSVC.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_NuSVR.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_SVC.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_SVR.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_categorical_imputer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_composite_pipelines.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_dict_vectorizer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_feature_names.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_glm_classifier.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_imputer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_io_types.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_k_neighbors_classifier.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_linear_regression.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_nearest_neighbors_builder.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_normalizer.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_one_hot_encoder.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_classifier.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_classifier_numeric.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_regression.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_random_forest_regression_numeric.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_ridge_regression.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_standard_scalar.cpython-310.pyc,, +coremltools/test/sklearn_tests/__pycache__/test_utils.cpython-310.pyc,, +coremltools/test/sklearn_tests/test_NuSVC.py,sha256=Zf0QJcofmfHB9X8NdovCUcJKexPr9fGHKnZ0m5HNLok,11837 +coremltools/test/sklearn_tests/test_NuSVR.py,sha256=S58cT0v5_hfLrCCs7fAQqaxGnhBBeoOg3RpeUXq-Dww,7381 +coremltools/test/sklearn_tests/test_SVC.py,sha256=MYe9JUUkxRg8rvGbUHnB38b2rc3bT5YMGf2uzcD9aT0,14243 +coremltools/test/sklearn_tests/test_SVR.py,sha256=60n2wRXaX7tT-EI3qsfcfUVJJxsDoQoSk6k3ZKgymbQ,8747 +coremltools/test/sklearn_tests/test_categorical_imputer.py,sha256=_nVEHUKsMhL6Q8PG8cgl9_i7EzpknUFfOWRydXDiVt4,2599 +coremltools/test/sklearn_tests/test_composite_pipelines.py,sha256=QXfE_bwdPKnNffJdbFsR-YhEFh5HpT59DOIyQXaVV_k,3064 +coremltools/test/sklearn_tests/test_dict_vectorizer.py,sha256=b6x0sWfpsnesADVYkQYc1bRSHFYsUXLN0LdBiIsgm6E,3358 +coremltools/test/sklearn_tests/test_feature_names.py,sha256=IX-cWvErkBoaJiPHMZnQx1711gpbfs8MgCRMRLZ8nvM,1031 +coremltools/test/sklearn_tests/test_glm_classifier.py,sha256=Rv6n2zD6hyeFd0Sqk5xPHdKhWfhgx2DNOzbwTrRB27M,4400 +coremltools/test/sklearn_tests/test_imputer.py,sha256=kokwW3tLOnRCa_PjJ9B3q_Z5WBOW5tgCzJ1YLeZpwoA,2545 +coremltools/test/sklearn_tests/test_io_types.py,sha256=qHq_6Zuz4hYMTVk0Oq_PCGg0k8YFg9YikMDLx_0dhC8,14446 +coremltools/test/sklearn_tests/test_k_neighbors_classifier.py,sha256=kUQPpEal6jnaTiLjgS5JIjQdf97IWdynpKdh78fytb8,11433 +coremltools/test/sklearn_tests/test_linear_regression.py,sha256=_dlpqZmOEOIf_VFLSO1OCtw-c5nGt5cRBAgwbnesUeg,5048 +coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py,sha256=oAuhQIvWSKhrJLxRfsswiSFm0EdmhNKjVHFJLn_oNyA,16089 +coremltools/test/sklearn_tests/test_normalizer.py,sha256=ufzr-6vOvOH9GJA7468MtwlsbvxvSril4hztuLWFZOk,1915 +coremltools/test/sklearn_tests/test_one_hot_encoder.py,sha256=kTZT3u0h3uf6ulHbvl8AW47m1YU249PUJBMqQlbcmbg,10354 +coremltools/test/sklearn_tests/test_random_forest_classifier.py,sha256=Ms_wdG4hFEt2CLE8AV3xP309TKZH6iwMRJLSCxvcaIw,6468 +coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py,sha256=_9c3KXkB9Zv_2rNrrOMN0NDSBSJZQFsynPxO_C_p33E,5055 +coremltools/test/sklearn_tests/test_random_forest_regression.py,sha256=Rqv51iL4lXjOM9NewQnMTcTekXO73LhlLLKKodMYcSE,3348 +coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py,sha256=7koorV7ORWMtYArqaNIoPFUJrGJ9v27Nq7TF5t-ARyA,3690 +coremltools/test/sklearn_tests/test_ridge_regression.py,sha256=o-dCWR7XEYLhUgBb2EshGp47CPkn7p4oa4fjZ4oXaQE,3884 +coremltools/test/sklearn_tests/test_standard_scalar.py,sha256=I0SMgMNiIdXqElNlzJiBArRKeXxW4w9hwJ7stHN7j-4,1960 +coremltools/test/sklearn_tests/test_utils.py,sha256=ju2vg0aN2SKn2Fm5EvmfVWR2GExqFIoI19-2SIe2NhI,1876 +coremltools/test/xgboost_tests/__init__.py,sha256=fOD8NncsWGGNnPwqckAPfPZqCASMyfaSrkFWrjoY8C0,215 +coremltools/test/xgboost_tests/__pycache__/__init__.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_classifier.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_classifier_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_regression.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_boosted_trees_regression_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_classifier.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_classifier_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_regression.cpython-310.pyc,, +coremltools/test/xgboost_tests/__pycache__/test_decision_tree_regression_numeric.cpython-310.pyc,, +coremltools/test/xgboost_tests/test_boosted_trees_classifier.py,sha256=eA7nO3AcZ_UAjnmM-l9oWwsFfHh4yJ7QhSb1tDpNaFw,12175 +coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py,sha256=kV8UPK5e7wmSftO1xE9D5pGvZaMXMKYet3lY1R65rYw,9737 +coremltools/test/xgboost_tests/test_boosted_trees_regression.py,sha256=MIxeTPUtKPpxRvQmmBIKAHmn3z69vGTVq7vOhiInbvI,7876 +coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py,sha256=xg7dqQtKBMFKXh4mGY6xh9d6x1sUwD4QDK0dxbNxxng,10997 +coremltools/test/xgboost_tests/test_decision_tree_classifier.py,sha256=IsP3qauvPvN62Nkw17Vv1it343lTJV3MwmLmG_spOpc,5295 +coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py,sha256=trzSdPcBuvS4RsWGbBis0W_VOXPM-AEm-_A0C6egAnc,5041 +coremltools/test/xgboost_tests/test_decision_tree_regression.py,sha256=r8vkwfJiMlNXP2HRh_tn9XVSeMQeWu7nzkB6l-RJvps,2999 +coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py,sha256=tJucU4K6vlGpdYp594JXjtd4ugSd7h2u9LyKmijpkSY,3755 +coremltools/version.py,sha256=X-VlAgFopx9zcYo1V8MTQQ0OFrG68cMdesrft92v8lQ,257 diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/REQUESTED b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/WHEEL b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/WHEEL new file mode 100644 index 00000000..69da415f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: cp310-none-macosx_11_0_arm64 + diff --git a/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/top_level.txt b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/top_level.txt new file mode 100644 index 00000000..42075f79 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools-6.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +coremltools diff --git a/__packaged__/coreml/.python_dependencies/coremltools/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/__init__.py new file mode 100644 index 00000000..d741975e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/__init__.py @@ -0,0 +1,114 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Core ML is an Apple framework which allows developers to simply and easily integrate machine +learning (ML) models into apps running on Apple devices (including iOS, watchOS, macOS, and +tvOS). Core ML introduces a public file format (.mlmodel) for a broad set of ML methods +including deep neural networks (both convolutional and recurrent), tree ensembles with boosting, +and generalized linear models. Models in this format can be directly integrated into apps +through Xcode. + +Coremltools is a python package for creating, examining, and testing models in the .mlpackage +and .mlmodel formats. In particular, it can be used to: + +* Convert existing models to .mlpackage or .mlmodel formats from popular machine learning tools including: + PyTorch, TensorFlow, scikit-learn, XGBoost and libsvm. +* Express models in .mlpackage and .mlmodel formats through a simple API. +* Make predictions with .mlpackage and .mlmodel files (on macOS). + +For more information: http://developer.apple.com/documentation/coreml +""" +from enum import Enum as _Enum +from logging import getLogger as _getLogger + +from .version import __version__ + +_logger = _getLogger(__name__) + +# This is the basic Core ML specification format understood by iOS 11.0 +SPECIFICATION_VERSION = 1 + +# New versions for iOS 11.2 features. Models which use these features should have these +# versions, but models created from this coremltools which do not use the features can +# still have the basic version. +_MINIMUM_CUSTOM_LAYER_SPEC_VERSION = 2 +_MINIMUM_FP16_SPEC_VERSION = 2 + +# New versions for iOS 12.0 features. Models which use these features should have these +# versions, but models created from this coremltools which do not use the features can +# still have the basic version. +_MINIMUM_CUSTOM_MODEL_SPEC_VERSION = 3 +_MINIMUM_QUANTIZED_MODEL_SPEC_VERSION = 3 +_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION = 3 + +# New versions for iOS 13.0. +_MINIMUM_NDARRAY_SPEC_VERSION = 4 +_MINIMUM_NEAREST_NEIGHBORS_SPEC_VERSION = 4 +_MINIMUM_LINKED_MODELS_SPEC_VERSION = 4 +_MINIMUM_UPDATABLE_SPEC_VERSION = 4 +_SPECIFICATION_VERSION_IOS_13 = 4 + +# New versions for iOS 14.0 +_SPECIFICATION_VERSION_IOS_14 = 5 + +# New versions for iOS 15.0 +_SPECIFICATION_VERSION_IOS_15 = 6 + +# New versions for iOS 16.0 +_SPECIFICATION_VERSION_IOS_16 = 7 + +class ComputeUnit(_Enum): + ''' + The set of processing-unit configurations the model can use to make predictions. + ''' + ALL = 1 # Allows the model to use all compute units available, including the neural engine + CPU_AND_GPU = 2 # Allows the model to use both the CPU and GPU, but not the neural engine + CPU_ONLY = 3 # Limit the model to only use the CPU + CPU_AND_NE = 4 # Allows the model to use both the CPU and neural engine, but not the GPU. + # Only available on macOS >= 13.0 + +# A dictionary that maps the CoreML model specification version to the MLProgram/MIL opset string +_OPSET = { + _SPECIFICATION_VERSION_IOS_13: "CoreML3", + _SPECIFICATION_VERSION_IOS_14: "CoreML4", + _SPECIFICATION_VERSION_IOS_15: "CoreML5", + _SPECIFICATION_VERSION_IOS_16: "CoreML6", +} + +# Default specification version for each backend +_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK = _SPECIFICATION_VERSION_IOS_13 +_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM = _SPECIFICATION_VERSION_IOS_15 + + +# expose sub packages as directories +from . import converters, models, proto + +# expose unified converter in coremltools package level +from .converters import ClassifierConfig +from .converters import ColorLayout as colorlayout +from .converters import EnumeratedShapes, ImageType, RangeDim, Shape, TensorType, convert +from .converters.mil._deployment_compatibility import AvailableTarget as target +from .converters.mil.mil.passes.defs import quantization as transform +from .converters.mil.mil.passes.pass_pipeline import PassPipeline +from .converters.mil.mil.passes.defs.quantization import ComputePrecision as precision +from .models import utils +from .models.ml_program import compression_utils + +try: + from . import libcoremlpython +except: + pass + +# Time profiling for functions in coremltools package, decorated with @profile +import os as _os +import sys as _sys + +from .converters._profile_utils import _profiler + +_ENABLE_PROFILING = _os.environ.get("ENABLE_PROFILING", False) + +if _ENABLE_PROFILING: + _sys.setprofile(_profiler) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/_deps/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/_deps/__init__.py new file mode 100644 index 00000000..9d59acbe --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/_deps/__init__.py @@ -0,0 +1,179 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +List of all external dependancies for this package. Imported as +optional includes +""" +import platform as _platform +import re as _re +import sys as _sys +from distutils.version import StrictVersion as _StrictVersion + +from packaging import version + +from coremltools import _logger as logger + + +def _get_version(version): + # matching 1.6.1, and 1.6.1rc, 1.6.1.dev + version_regex = r"^\d+\.\d+\.\d+" + version = _re.search(version_regex, str(version)).group(0) + return _StrictVersion(version) + + +def _warn_if_above_max_supported_version(package_name, package_version, max_supported_version): + if _get_version(package_version) > _StrictVersion(max_supported_version): + logger.warning( + "%s version %s has not been tested with coremltools. You may run into unexpected errors. " + "%s %s is the most recent version that has been tested." + % (package_name, package_version, package_name, max_supported_version) + ) + + +# --------------------------------------------------------------------------------------- + +_IS_MACOS = _sys.platform == "darwin" +_MACOS_VERSION = () + +if _IS_MACOS: + ver_str = _platform.mac_ver()[0] + MACOS_VERSION = tuple([int(v) for v in ver_str.split(".")]) + +MSG_ONLY_MACOS = "Only supported on macOS" + +# --------------------------------------------------------------------------------------- +_HAS_SKLEARN = True +_SKLEARN_VERSION = None +_SKLEARN_MIN_VERSION = "0.17" +_SKLEARN_MAX_VERSION = "1.1.2" + + +def __get_sklearn_version(version): + # matching 0.15b, 0.16bf, etc + version_regex = r"^\d+\.\d+" + version = _re.search(version_regex, str(version)).group(0) + return _StrictVersion(version) + + +try: + import sklearn + + _SKLEARN_VERSION = __get_sklearn_version(sklearn.__version__) + if _SKLEARN_VERSION < _StrictVersion( + _SKLEARN_MIN_VERSION + ) or _SKLEARN_VERSION > _StrictVersion(_SKLEARN_MAX_VERSION): + _HAS_SKLEARN = False + logger.warning( + ( + "scikit-learn version %s is not supported. Minimum required version: %s. " + "Maximum required version: %s. " + "Disabling scikit-learn conversion API." + ) + % (sklearn.__version__, _SKLEARN_MIN_VERSION, _SKLEARN_MAX_VERSION) + ) +except: + _HAS_SKLEARN = False +MSG_SKLEARN_NOT_FOUND = "Sklearn not found." + +# --------------------------------------------------------------------------------------- +_HAS_LIBSVM = True +try: + from libsvm import svm +except: + _HAS_LIBSVM = False +MSG_LIBSVM_NOT_FOUND = "Libsvm not found." + +# --------------------------------------------------------------------------------------- +_HAS_XGBOOST = True +_XGBOOST_MAX_VERSION = "1.4.2" +try: + import xgboost + _warn_if_above_max_supported_version("XGBoost", xgboost.__version__, _XGBOOST_MAX_VERSION) +except: + _HAS_XGBOOST = False + +# --------------------------------------------------------------------------------------- +_HAS_TF = True +_HAS_TF_1 = False +_HAS_TF_2 = False +_TF_1_MIN_VERSION = "1.12.0" +_TF_1_MAX_VERSION = "1.15.4" +_TF_2_MIN_VERSION = "2.1.0" +_TF_2_MAX_VERSION = "2.12.0" + +try: + import tensorflow + + tf_ver = _get_version(tensorflow.__version__) + + # TensorFlow + if tf_ver < _StrictVersion("2.0.0"): + _HAS_TF_1 = True + + if tf_ver >= _StrictVersion("2.0.0"): + _HAS_TF_2 = True + + if _HAS_TF_1: + if tf_ver < _StrictVersion(_TF_1_MIN_VERSION): + logger.warning( + ( + "TensorFlow version %s is not supported. Minimum required version: %s ." + "TensorFlow conversion will be disabled." + ) + % (tensorflow.__version__, _TF_1_MIN_VERSION) + ) + _warn_if_above_max_supported_version("TensorFlow", tensorflow.__version__, _TF_1_MAX_VERSION) + elif _HAS_TF_2: + if tf_ver < _StrictVersion(_TF_2_MIN_VERSION): + logger.warning( + ( + "TensorFlow version %s is not supported. Minimum required version: %s ." + "TensorFlow conversion will be disabled." + ) + % (tensorflow.__version__, _TF_2_MIN_VERSION) + ) + _warn_if_above_max_supported_version("TensorFlow", tensorflow.__version__, _TF_2_MAX_VERSION) + +except: + _HAS_TF = False + _HAS_TF_1 = False + _HAS_TF_2 = False + +MSG_TF1_NOT_FOUND = "TensorFlow 1.x not found." +MSG_TF2_NOT_FOUND = "TensorFlow 2.x not found." + +# --------------------------------------------------------------------------------------- +_HAS_TORCH = True +_TORCH_MAX_VERSION = "2.0.0" +try: + import torch + _warn_if_above_max_supported_version("Torch", torch.__version__, _TORCH_MAX_VERSION) +except: + _HAS_TORCH = False +MSG_TORCH_NOT_FOUND = "PyTorch not found." + + +# --------------------------------------------------------------------------------------- +try: + import scipy +except: + _HAS_SCIPY = False +else: + _HAS_SCIPY = True + +# General utils +def version_ge(module, target_version): + """ + Example usage: + + >>> import torch # v1.5.0 + >>> version_ge(torch, '1.6.0') # False + """ + return version.parse(module.__version__) >= version.parse(target_version) + +def version_lt(module, target_version): + """See version_ge""" + return version.parse(module.__version__) < version.parse(target_version) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/__init__.py new file mode 100644 index 00000000..bca49bbb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +# expose directories as imports +from . import libsvm +from . import sklearn +from . import xgboost +from ._converters_entry import convert +from .mil import ( + ClassifierConfig, + ColorLayout, + TensorType, + ImageType, + RangeDim, + Shape, + EnumeratedShapes, +) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/_converters_entry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/_converters_entry.py new file mode 100644 index 00000000..bc588de1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/_converters_entry.py @@ -0,0 +1,896 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import collections +import gc +import os +from typing import Optional, Text, Union + +from coremltools import ( + _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM, + _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK, +) +from coremltools import ComputeUnit as _ComputeUnit +from coremltools import __version__ as _ct_version +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil._deployment_compatibility import ( + AvailableTarget, + check_deployment_compatibility, +) +from coremltools.converters.mil.converter import mil_convert +from coremltools.converters.mil.input_types import ( + ClassifierConfig, + ImageType, + InputType, + TensorType, +) +from coremltools.converters.mil.mil import Program, types +from coremltools.converters.mil.mil.passes.defs.quantization import ComputePrecision as precision +from coremltools.converters.mil.mil.passes.defs.quantization import FP16ComputePrecision +from coremltools.converters.mil.mil.passes.graph_pass import PassOption as _PassOption +from coremltools.converters.mil.mil.passes.pass_pipeline import PassPipeline +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION +from coremltools.models.utils import _MLPACKAGE_EXTENSION + +if _HAS_TF_1: + import tensorflow as tf + + from coremltools.converters.mil.frontend.tensorflow.load import TF1Loader +if _HAS_TF_2: + import tensorflow as tf + + from coremltools.converters.mil.frontend.tensorflow2.load import TF2Loader + +if _HAS_TORCH: + import torch + + from coremltools.converters.mil.frontend.torch.load import \ + _torchscript_from_model as pytorch_load + + +@_profile +def convert( + model, + source="auto", + inputs=None, + outputs=None, + classifier_config=None, + minimum_deployment_target=None, + convert_to=None, + compute_precision=None, + skip_model_load=False, + compute_units=_ComputeUnit.ALL, + package_dir=None, + debug=False, + pass_pipeline: Optional[PassPipeline] = None, +): + """ + Convert a TensorFlow or PyTorch model to the Core ML model format as either + a neural network or an `ML program `_. + Some parameters and requirements differ for TensorFlow and PyTorch + conversions. + + Parameters + ---------- + + model : + TensorFlow 1, TensorFlow 2, or PyTorch model in one of the following + formats: + + * TensorFlow versions 1.x + + - Frozen `tf.Graph `_ + - Frozen graph (``.pb``) file path + - `tf.keras.Model `_ + - `HDF5 `_ file path (``.h5``) + - `SavedModel `_ directory path + + * TensorFlow versions 2.x + + - `tf.keras.Model `_ + - `HDF5 file path `_ (``.h5``) + - `SavedModel `_ directory path + - A `concrete function `_ + - A `GraphDef `_ + + * PyTorch + + - A `TorchScript `_ object + - Path to a ``.pt`` file + + source : str (optional) + + One of [``auto``, ``tensorflow``, ``pytorch``, ``milinternal``]. ``auto`` + determines the framework automatically for most cases. Raises + ``ValueError`` if it fails to determine the source framework. + + inputs : list of ``TensorType`` or ``ImageType`` + + * If you specify ``dtype`` with ``TensorType`` or ``ImageType``, it will + be applied to the input of the converted model. For example, the + following code snippet will produce a Core ML model with float 16 typed + inputs. + + .. sourcecode:: python + + import coremltools as ct + + mlmodel = ct.convert( + keras_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + + * The following code snippet will produce a Core ML model with the + ``GRAYSCALE_FLOAT16`` input image type: + + .. sourcecode:: python + + import coremltools as ct + + # H : image height, W: image width + mlmodel = ct.convert( + torch_model, + inputs=[ + ct.ImageType(shape=(1, 1, H, W), color_layout=ct.colorlayout.GRAYSCALE_FLOAT16) + ], + minimum_deployment_target=ct.target.macOS13, + ) + + * TensorFlow 1 and 2 (including tf.keras): + - The ``inputs`` parameter is optional. If not provided, the inputs + are placeholder nodes in the model (if the model is a frozen graph) + or function inputs (if the model is a ``tf.function``). + - If ``inputs`` is provided, it must be a flat list. + - The ``inputs`` must correspond to all or some of the placeholder nodes + in the TF model. + - If ``name`` is specified with ``TensorType`` and ``ImageType``, it + must correspond to a placeholder op in the TF graph. The input names + in the converted Core ML model can later be modifed using the + ``ct.utils.rename_feature`` API. + - If ``dtype`` is not specified, it defaults to the ``dtype`` of the + inputs in the TF model. + + * PyTorch: + - The ``inputs`` parameter is required. + - Number of elements in ``inputs`` must match the number of inputs + of the PyTorch model. + - ``inputs`` may be a nested list or tuple. + - ``TensorType`` and ``ImageType`` must have the ``shape`` specified. + - If the ``name`` argument is specified with ``TensorType`` or + ``ImageType``, the converted Core ML model will have inputs with + the same name. + - If ``dtype`` is missing, it defaults to float 32. + + outputs : list of ``TensorType`` or ``ImageType`` (optional) + + * If you specify ``dtype`` with ``TensorType`` or ``ImageType``, + it will be applied to the output of the converted model. For example, + to produce float 16 typed inputs and outputs: + + .. sourcecode:: python + + import coremltools as ct + + mlmodel = ct.convert( + keras_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + + * To produce image inputs and outputs: + + .. sourcecode:: python + + import coremltools as ct + + # H: image height, W: image width + mlmodel = ct.convert( + torch_model, + inputs=[ct.ImageType(shape=(1, 3, H, W), color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + + * TensorFlow 1 and 2 (including tf.keras): + + - If ``outputs`` is not specified, the converter infers outputs from + the sink nodes in the graph. + - If specified, the ``name`` with ``TensorType`` or ``ImageType`` + must correspond to a node in the TF graph. In this case, the model + will be converted up to that node. + + * PyTorch: + + - If specified, the length of the list must match the number of + outputs returned by the PyTorch model. + - If ``name`` is specified, it is applied to the output names of the + converted Core ML model. + + classifier_config : ClassifierConfig class (optional) + The configuration if the MLModel is intended to be a classifier. + + minimum_deployment_target : coremltools.target enumeration (optional) + A member of the ``coremltools.target`` enum. + The value of this parameter determines the type of the model + representation produced by the converter. To learn about the differences + between neural networks and ML programs, see + `ML Programs `_. + + - The converter produces a neural network (``neuralnetwork``) if: + :: + minimum_deployment_target <= coremltools.target.iOS14/ + coremltools.target.macOS11/ + coremltools.target.watchOS7/ + coremltools.target.tvOS14: + + - The converter produces an ML program (``mlprogram``) if: + :: + minimum_deployment_target >= coremltools.target.iOS15/ + coremltools.target.macOS12/ + coremltools.target.watchOS8/ + coremltools.target.tvOS15: + + - If neither the ``minimum_deployment_target`` nor the ``convert_to`` + parameter is specified, the converter produces the neural network + model type with as minimum of a deployment target as possible. + - If this parameter is specified and ``convert_to`` is also specified, + they must be compatible. The following are examples of invalid values: + :: + # Invalid: + convert_to="neuralnetwork", minimum_deployment_target=coremltools.target.iOS15 + # Invalid: + convert_to="mlprogram", minimum_deployment_target=coremltools.target.iOS14 + + convert_to : str (optional) + Must be one of [``'neuralnetwork'``, ``'mlprogram'``, ``'milinternal'``]. + The value of this parameter determines the type of the model + representation produced by the converter. To learn about the + differences between neural networks and ML programs, see + `ML Programs `_. + + - ``'neuralnetwork'``: Returns an MLModel (``coremltools.models.MLModel``) + containing a NeuralNetwork proto, which is the original Core ML format. + The model saved from this returned object is executable either on + iOS13/macOS10.15/watchOS6/tvOS13 and newer, or on + iOS14/macOS11/watchOS7/tvOS14 and newer, depending on the layers used + in the model. + - ``'mlprogram'`` : Returns an MLModel (``coremltools.models.MLModel``) + containing a MILSpec.Program proto, which is the Core ML program format. + The model saved from this returned object is executable on iOS15, + macOS12, watchOS8, and tvOS15. + - ``'milinternal'``: Returns an MIL program object + (``coremltools.converters.mil.Program``). An MIL program is primarily + used for debugging and inspection. It can be converted to an MLModel for + execution by using one of the following: + :: + ct.convert(mil_program, convert_to="neuralnetwork") + ct.convert(mil_program, convert_to="mlprogram") + + - If neither the ``minimum_deployment_target`` nor the ``convert_to`` + parameter is specified, the converter produces the neural network + model type with as minimum of a deployment target as possible. + + compute_precision : coremltools.precision enumeration or ct.transform.FP16ComputePrecision() (optional) + + Use this argument to control the storage precision of the tensors in the + ML program. Must be one of the following. + + - ``coremltools.precision.FLOAT16`` enum: The following transform is + applied to produce a float 16 program; that is, a program in which all + the intermediate float tensors are of type float 16 (for ops that + support that type). + :: + coremltools.transform.FP16ComputePrecision(op_selector= + lambda op:True) + + The above transform iterates through all the ops, looking at each op's + inputs and outputs. If they are of type float 32, ``cast`` + ops are injected to convert those tensors (also known as `vars`) to + type float 16. + + - ``coremltools.precision.FLOAT32`` enum: No transform is applied. + + The original float32 tensor dtype in the source model is preserved. + Opt into this option if the default converted model is displaying + numerical precision issues. + + - ``coremltools.transform.FP16ComputePrecision(op_selector=...)`` + + Use this option to control which tensors are cast to float 16. + Before casting the inputs/outputs of any op from float32 to float 16, + the op_selector function is invoked on the op object. This function + must return a boolean value. By default it returns ``True`` for every op, + but you can customize this. + + For example: + :: + coremltools.transform.FP16ComputePrecision(op_selector= + lambda op: op.op_type != "linear") + + The above casts all the float32 tensors to be float 16, except + the input/output tensors to any ``linear`` op. See more examples + below. + + - ``None``: The default + - When ``convert_to="mlprogram"``, the ``compute_precision`` parameter + defaults to ``coremltools.precision.FLOAT16``. + - When ``convert_to="neuralnetwork"``, the ``compute_precision`` parameter + needs to be ``None`` and has no meaning. + - For example, you can customize the float 16 precision transform to prevent + casting all the ``real_div`` ops in the program to float 16 + precision: + + .. sourcecode:: python + + def skip_real_div_ops(op): + if op.op_type == "real_div": + return False + return True + + + model = ct.convert( + source_model, + compute_precision=ct.transform.FP16ComputePrecision(op_selector=skip_real_div_ops), + minimum_deployment_target=ct.target.iOS15, + ) + + skip_model_load : bool + Set to ``True`` to prevent coremltools from calling into the Core ML framework + to compile and load the model, post-conversion. In that case, the returned + model object cannot be used to make a prediction, but can be used to save + with ``model.save()``. This flag may be used to convert to a newer model type + on an older Mac, which may raise a runtime warning if done without + turning this flag on. + + Example: Use this flag to suppress a runtime warning when converting to an + ML program model on macOS 11, since an ML program can only be compiled and + loaded from macOS12+. + + Defaults to ``False``. + + compute_units: coremltools.ComputeUnit + + An enum with the following possible values. + + - ``coremltools.ComputeUnit.ALL``: Use all compute units available, including the + neural engine. + - ``coremltools.ComputeUnit.CPU_ONLY``: Limit the model to only use the CPU. + - ``coremltools.ComputeUnit.CPU_AND_GPU``: Use both the CPU and GPU, but not the + neural engine. + - ``coremltools.ComputeUnit.CPU_AND_NE``: Use both the CPU and neural engine, but + not the GPU. Available only for macOS >= 13.0. + + package_dir : str + Post conversion, the model is saved at a temporary location and + loaded to form the MLModel object ready for prediction. + + * If ``package_dir`` is provided, model will be saved at this location + rather than creating a temporary directory. + * If not ``None``, this must be a path to a directory with the extension + ``.mlpackage``. + + debug : bool + This flag should generally be ``False`` except for debugging purposes. + Setting this flag to ``True`` produces the following behavior: + - For Torch conversion, it will print the list of supported and + unsupported ops found in the model if conversion fails due to an + unsupported op. + - For Tensorflow conversion, it will cause to display extra logging + and visualizations. + + pass_pipeline : PassPipeline + Manage graph passes. You can control which graph passes to run and the order of the + graph passes. You can also specify options for each pass. See the details in the docstring of + PassPipeline (``coremltools/converters/mil/mil/passes/pass_pipeline.py``). + + * To avoid fusing the ``conv`` and ``batchnorm`` ops, skip the corresponding pass + as shown in the following example: + + .. sourcecode:: python + + pipeline = ct.PassPipeline() + pipeline.remove_passes({"common::fuse_conv_batchnorm"}) + ct.convert(model, pass_pipeline=pipeline) + + * To avoid folding too-large ``const`` ops that lead to a large model, set pass option + as shown in the following example: + + .. sourcecode:: python + + pipeline = ct.PassPipeline() + pipeline.set_options("common::const_elimination", {"skip_const_by_size": "1e6"}) + ct.convert(model, pass_pipeline=pipeline) + + Returns + ------- + + model : ``coremltools.models.MLModel`` or ``coremltools.converters.mil.Program`` + A Core ML MLModel object or MIL program object (see ``convert_to``). + + Examples + -------- + + TensorFlow 1, 2 (``model`` is a frozen graph): + + >>> with tf.Graph().as_default() as graph: + >>> x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + >>> y = tf.nn.relu(x, name="output") + + Automatically infer inputs and outputs: + + >>> mlmodel = ct.convert(graph) + >>> test_input = np.random.rand(1, 2, 3) - 0.5 + >>> results = mlmodel.predict({"input": test_input}) + >>> print(results['output']) + + TensorFlow 2 (``model`` is a tf.Keras model path): + + >>> x = tf.keras.Input(shape=(32,), name='input') + >>> y = tf.keras.layers.Dense(16, activation='softmax')(x) + >>> keras_model = tf.keras.Model(x, y) + + >>> keras_model.save(h5_path) + >>> mlmodel = ct.convert(h5_path) + + >>> test_input = np.random.rand(2, 32) + >>> results = mlmodel.predict({'input': test_input}) + >>> print(results['Identity']) + + PyTorch: + + >>> model = torchvision.models.mobilenet_v2() + >>> model.eval() + >>> example_input = torch.rand(1, 3, 256, 256) + >>> traced_model = torch.jit.trace(model, example_input) + + >>> input = ct.TensorType(name='input_name', shape=(1, 3, 256, 256)) + >>> mlmodel = ct.convert(traced_model, inputs=[input]) + >>> results = mlmodel.predict({"input": example_input.numpy()}) + >>> print(results['1651']) # 1651 is the node name given by PyTorch's JIT + + See `Conversion Options `_ for + more advanced options. + """ + _check_deployment_target(minimum_deployment_target) + outputs_as_strings, outputs_as_tensor_or_image_types = _validate_outputs_argument(outputs) + exact_source = _determine_source(model, source, + outputs_as_strings, + outputs_as_tensor_or_image_types, + outputs) + exact_target = _determine_target(convert_to, minimum_deployment_target) + _validate_conversion_arguments(model, exact_source, inputs, outputs_as_tensor_or_image_types, + classifier_config, compute_precision, + exact_target, minimum_deployment_target) + + if pass_pipeline is None: + pass_pipeline = PassPipeline() + if not _need_fp16_cast_pass(compute_precision, exact_target): + pass_pipeline.remove_passes({"common::add_fp16_cast"}) + if isinstance(compute_precision, FP16ComputePrecision): + # For backward compatibility with the `op_selector` param in FP16ComputePrecision. + pass_pipeline._pass_options["common::add_fp16_cast"] = [ + _PassOption(option_name="op_selector", option_val=compute_precision.op_selector) + ] + + if package_dir is not None: + _, ext = os.path.splitext(package_dir) + if ext != _MLPACKAGE_EXTENSION: + raise ValueError( + f"`package_dir` must have extension {_MLPACKAGE_EXTENSION} (not {ext})" + ) + + specification_version = minimum_deployment_target.value if minimum_deployment_target is not None else None + + if specification_version is None: + specification_version = _set_default_specification_version(exact_target) + + mlmodel = mil_convert( + model, + convert_from=exact_source, + convert_to=exact_target, + inputs=inputs, + outputs=outputs_as_tensor_or_image_types, # None or list[ct.ImageType/ct.TensorType] + classifier_config=classifier_config, + skip_model_load=skip_model_load, + compute_units=compute_units, + package_dir=package_dir, + debug=debug, + specification_version=specification_version, + main_pipeline=pass_pipeline, + ) + + if exact_target == 'milinternal': + return mlmodel # Returns the MIL program + + if minimum_deployment_target is not None: + check_deployment_compatibility( + spec=mlmodel.get_spec(), + representation=exact_target, + deployment_target=minimum_deployment_target, + ) + + gc.collect() + + mlmodel = _record_build_metadata(mlmodel, exact_source) + + return mlmodel + + +def _need_fp16_cast_pass( + compute_precision: Optional[Union[precision, FP16ComputePrecision]], convert_to: Text +) -> bool: + if convert_to not in ("mlprogram", "neuralnetwork", "milinternal", "milpython"): + raise NotImplementedError(f"Backend converter {convert_to} not implemented") + + if compute_precision is None: + return convert_to != "neuralnetwork" + elif compute_precision == precision.FLOAT32: + return False + elif compute_precision == precision.FLOAT16 or isinstance( + compute_precision, FP16ComputePrecision + ): + return True + else: + raise ValueError(f"Invalid value of the argument 'compute_precision': {compute_precision}") + + +def _set_default_specification_version(target): + if target == "neuralnetwork": + return _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK + elif target == "mlprogram": + return _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM + elif target in ("milinternal", "milpython"): + return None + else: + raise NotImplementedError("Backend converter {} not implemented".format(target)) + + +def _check_deployment_target(minimum_deployment_target): + if minimum_deployment_target is not None and not isinstance( + minimum_deployment_target, AvailableTarget + ): + msg = ( + "Unrecognized value of argument 'minimum_deployment_target': {}. " + "It needs to be a member of 'coremltools.target' enumeration. " + "For example, coremltools.target.iOS13" + ) + raise TypeError(msg.format(minimum_deployment_target)) + + +def _validate_outputs_argument(outputs): + """ + - validate properties that the "outputs" argument must satisfy, for instance, it should either be a list + of ct.ImageType/ct.TensorType or a list of strings, etc. + - return : tuple + - (outputs_as_strings, outputs_as_tensor_or_image_types) + - outputs_as_strings: list[str] + - outputs_as_tensor_or_image_types : list[ct.ImageType] or list[ct.TensorType] + """ + if outputs is None: + return None, None + else: + if not isinstance(outputs, list): + raise ValueError('"outputs" must be of type list') + if len(outputs) == 0: + return None, None + if not all(map(lambda t: isinstance(t, (ImageType, str, TensorType)), outputs)): + raise ValueError('Elements in "outputs" must be ct.TensorType or ct.ImageType or str') + + msg_inconsistent_types = 'all elements of "outputs" must either be of type str ' \ + 'or of types ct.ImageType/ct.TensorType' + if isinstance(outputs[0], str): + # if one of the elements is a string, all elements must be strings + if not all([isinstance(t, str) for t in outputs]): + raise ValueError(msg_inconsistent_types) + return outputs, [TensorType(name=name) for name in outputs] + + if isinstance(outputs[0], InputType): + if not all([isinstance(t, TensorType) or isinstance(t, ImageType) for t in outputs]): + raise ValueError(msg_inconsistent_types) + if any([t.shape is not None for t in outputs]): + msg = "The 'shape' argument must not be specified for the outputs, since it is " \ + "automatically inferred from the input shapes and the ops in the model" + raise ValueError(msg) + for out_ in outputs: + if isinstance(out_, TensorType): + if out_.default_value is not None: + raise ValueError( + "The 'default_value' argument must not be specified for the outputs" + ) + if isinstance(out_, ImageType): + if out_.scale != 1.0: + raise ValueError("'scale' must be 1.0 for a output of ImageType") + if not (out_.bias is None or out_.bias == 0.0 or out_.bias == [0.0, 0.0, 0.0]): + raise ValueError("'bias' must be None or 0 for an output of ImageType") + if out_.channel_first is not None: + raise ValueError("'channel_first' must be None for an output of ImageType") + output_names = [t.name for t in outputs] + # verify that either all of the entries in output_names is "None" or none of them is "None" + msg_consistent_names = 'Either none or all the outputs must have the "name" argument specified' + if output_names[0] is None and not all([name is None for name in output_names]): + raise ValueError(msg_consistent_names) + if output_names[0] is not None and not all([name is not None for name in output_names]): + raise ValueError(msg_consistent_names) + if output_names[0] is not None: + if len(set(output_names)) != len(output_names): + raise ValueError("Duplicate names provided in 'outputs'") + if output_names[0] is None: + return None, outputs + else: + return output_names, outputs + + +def _validate_conversion_arguments(model, + exact_source, + inputs, + outputs, + classifier_config, + compute_precision, + convert_to, + minimum_deployment_target, + ): + """ + Validate and process model, inputs, classifier_config based on + `exact_source` (which cannot be `auto`) + """ + + def raise_if_duplicated(input_list): + # Detect duplicated inputs + input_names = [t.name for t in input_list if t.name is not None] + dups = [ + item + for item, count in collections.Counter(input_names).items() + if count > 1 + ] + if len(dups) > 0: + raise ValueError("Duplicated inputs: {}".format(dups)) + + def _flatten_list(_inputs): + ret = [] + for _input in _inputs: + if isinstance(_input, (list, tuple)): + ret.extend(_flatten_list(_input)) + elif isinstance(_input, InputType): + ret.append(_input) + else: + raise ValueError( + "Unknown type {} for flattening into InputType.".format( + type(_input) + ) + ) + return ret + + flat_inputs = None + if inputs is not None: + if not isinstance(inputs, list): + raise ValueError("`inputs` must be of type list") + + # get flattened inputs + flat_inputs = _flatten_list(inputs) + for t in flat_inputs: + if not isinstance(t, InputType): + raise ValueError("inputs must be a list of type ct.TensorType or ct.ImageType") + if t.dtype == types.fp16: + if not ( + minimum_deployment_target is not None + and minimum_deployment_target >= AvailableTarget.iOS16 + ): + raise TypeError( + "float16 dtype for inputs is only supported for deployment " + "target >= iOS16/macOS13/watchOS9/tvOS16" + ) + + if outputs is not None: + for t in outputs: + if t.dtype == types.fp16: + if not ( + minimum_deployment_target is not None + and minimum_deployment_target >= AvailableTarget.iOS16 + ): + raise TypeError( + "float16 dtype for outputs is only supported for deployment " + "target >= iOS16/macOS13/watchOS9/tvOS16" + ) + + if classifier_config is not None: + if not isinstance(classifier_config, ClassifierConfig): + raise ValueError("`classifier_config` must be of type ClassifierConfig") + + if convert_to.lower() == "neuralnetwork" and compute_precision is not None: + raise ValueError( + "compute_precision is only supported for mlprogram target and must be " + "None if target=='neuralnetwork'. Note that target may be implicitly set " + "depending on the minimum_deployment_target. See " + "minimum_deployment_target for more details." + ) + + if compute_precision is not None: + if compute_precision not in [precision.FLOAT32, precision.FLOAT16]: + if not isinstance(compute_precision, FP16ComputePrecision): + raise ValueError( + "'compute_precision' must be either coremltools.precision.FLOAT32 " + "or coremltools.precision.FLOAT16 or of type " + "coremltools.transform.FP16ComputePrecision()" + ) + + if exact_source in {"tensorflow", "tensorflow2"}: + if exact_source == "tensorflow" and not _HAS_TF_1: + raise ValueError( + 'Converter was called with source="tensorflow", but missing ' "tensorflow package" + ) + + if inputs is not None: + raise_if_duplicated(inputs) + + if inputs is not None and not all([isinstance(_input, InputType) for _input in inputs]): + raise ValueError("Input should be a list of TensorType or ImageType") + + elif exact_source == "pytorch": + if inputs is None: + raise ValueError('Expected argument for pytorch "inputs" not provided') + + raise_if_duplicated(flat_inputs) + if inputs is not None and not all( + [isinstance(_input, InputType) for _input in flat_inputs] + ): + raise ValueError( + "Input should be a list/tuple (or nested lists/tuples) of TensorType or ImageType" + ) + + elif exact_source == "milinternal": + if not isinstance(model, Program): + raise ValueError( + "Converter was asked to convert MIL input, but input is not a MIL " "program!" + ) + + +def _determine_source(model, source, + output_names, + outputs_as_tensor_or_image_types, + output_argument_as_specified_by_user): + """ + Infer source (which can be auto) to the precise framework. + """ + source = source.lower() + if source not in {"auto", "tensorflow", "pytorch", "milinternal"}: + raise ValueError( + f'Unrecognized value of argument "source": {source}. It must be one of ["auto", "tensorflow", "pytorch"].' + ) + + # Determine tensorflow version + if source == "tensorflow" and _HAS_TF_2: + return "tensorflow2" + + if source != 'auto': + return source + + # Determine `auto` source + if source == "auto" and _HAS_TF_1: + try: + loader = TF1Loader(model, outputs=outputs_as_tensor_or_image_types) + loader._graph_def_from_model(output_names=output_names) + return "tensorflow" + except: + pass + + if source == "auto" and _HAS_TF_2: + try: + loader = TF2Loader(model, outputs=outputs_as_tensor_or_image_types) + loader._graph_def_from_model(output_names=output_names) + return "tensorflow2" + except: + pass + + if source == "auto" and _HAS_TORCH: + is_torch_load_successful = False + try: + pytorch_load(model) + is_torch_load_successful = True + except: + pass + if is_torch_load_successful: + # validate that the outputs passed by the user are of type ImageType/TensorType + if output_argument_as_specified_by_user is not None and not all( + [ + isinstance(t, TensorType) or isinstance(t, ImageType) + for t in output_argument_as_specified_by_user + ] + ): + raise ValueError( + '"outputs" must be a list of type ct.TensorType or ct.ImageType ' + "for pytorch conversion" + ) + return "pytorch" + + if source == "auto" and isinstance(model, Program): + return "milinternal" + + msg = ( + "Unable to determine the type of the model, i.e. the source framework. " + 'Please provide the value of argument "source", from one of ' + '["tensorflow", "pytorch", "milinternal"]. Note that model conversion requires the ' + "source package that generates the model. Please make sure you have " + "the appropriate version of source package installed. E.g., if you're " + "converting model originally trained with TensorFlow 1.14, make sure " + "you have `tensorflow==1.14` installed." + ) + raise ValueError(msg) + + +def _determine_target(convert_to, minimum_deployment_target): + """ + Infer the precise backend target, which could be one of ``milinternal``, ``neuralnetwork`` or ``mlprogram`` + """ + if minimum_deployment_target is not None: + if convert_to == "mlprogram" and minimum_deployment_target < AvailableTarget.iOS15: + raise ValueError( + f"When 'convert_to' is {convert_to}, the minimum deployment target " + f"must be at least iOS15/macOS12/watchOS8/tvOS15" + ) + + if convert_to == "neuralnetwork" and minimum_deployment_target >= AvailableTarget.iOS15: + raise ValueError( + f"If minimum deployment target is iOS15/macOS12/watchOS8/tvOS15 or " + f"higher, then 'convert_to' cannot be {convert_to}. It must be " + f"'mlprogram'" + ) + + if convert_to is not None: + return convert_to + else: + if minimum_deployment_target is None: + return "neuralnetwork" + elif minimum_deployment_target <= AvailableTarget.iOS14: + return "neuralnetwork" + else: + return "mlprogram" + + +def _get_metadata_from_mlmodel(mlmodel): + # Copy from source mlmodel if metadata info exists + src_pkg_version = mlmodel.user_defined_metadata[_METADATA_SOURCE] + coremltools_version = mlmodel.user_defined_metadata[_METADATA_VERSION] + + src_pkg_version_list = src_pkg_version.split("==") + if len(src_pkg_version_list) == 0: + src_pkg, pkg_ver = None, None + elif len(src_pkg_version_list) == 1: + src_pkg, pkg_ver = src_pkg_version_list[0], "" + elif len(src_pkg_version_list) == 2: + src_pkg, pkg_ver = src_pkg_version_list + else: + raise AssertionError("Unable to parse src_pkg_version") + + build_info = { + "coremltools-version": _ct_version if not coremltools_version else coremltools_version + } + if src_pkg is not None and pkg_ver is not None: + build_info['coremltools-component-' + src_pkg] = str(pkg_ver) + + return build_info + + +def _record_build_metadata(mlmodel, exact_source): + # recording metadata: coremltools version, source framework and version + if exact_source in {"tensorflow", "tensorflow2"} and (_HAS_TF_1 or _HAS_TF_2): + src_pkg_version = "tensorflow=={0}".format(tf.__version__) + elif exact_source == "pytorch" and _HAS_TORCH: + src_pkg_version = "torch=={0}".format(torch.__version__) + elif exact_source == 'milinternal': + src_pkg_version = "milinternal" + else: + raise ValueError('Unsupported source {}'.format(exact_source)) + + mlmodel.user_defined_metadata[_METADATA_SOURCE] = src_pkg_version + mlmodel.user_defined_metadata[_METADATA_VERSION] = _ct_version + + build_info = _get_metadata_from_mlmodel(mlmodel) + + mlmodel._set_build_info_mil_attributes(build_info) + + return mlmodel diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/_profile_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/_profile_utils.py new file mode 100644 index 00000000..1f59c4a2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/_profile_utils.py @@ -0,0 +1,80 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import time + +_FUNCTION_PROFILE_REGISTRY = {} # str -> list (function name to time stack) +_ENABLE_PROFILING = os.environ.get("ENABLE_PROFILING", False) + + +def _profile(_f=None): + def func_wrapper(func): + f_name = func.__module__ + "." + func.__name__ + if f_name in _FUNCTION_PROFILE_REGISTRY: + raise ValueError( + "Function {} is already registered for profiling.".format(f_name) + ) + + _FUNCTION_PROFILE_REGISTRY[f_name] = [] + return func + + if _f is None: + return func_wrapper + return func_wrapper(_f) + + +_INITIAL_CALL = True + + +def _pr_color(skk, color="94m", end="\n"): + print("\033[{} {}\033[00m".format(color, skk), end=end) + + +def _profiler(frame, event, arg, indent=[0]): + if frame.f_globals.get("__name__", None) is None: + return + + package_name = __name__.split(".")[0] + + function_name = frame.f_globals["__name__"] + "." + frame.f_code.co_name + + profile_function = ( + package_name in str(frame) and function_name in _FUNCTION_PROFILE_REGISTRY + ) + + if event == "call" and profile_function: + global _INITIAL_CALL + if _INITIAL_CALL: + _INITIAL_CALL = False + print("\n" * 2) + + indent[0] += 3 + _pr_color( + "{} call {} {}".format( + "=" * indent[0] + ">", + function_name.split(".")[-1], + " (" + ".".join(function_name.split(".")[2:-1]) + ")", + ) + ) + start_time = time.clock() + _FUNCTION_PROFILE_REGISTRY[function_name].append(start_time) + + elif event == "return" and profile_function: + duration = time.clock() - _FUNCTION_PROFILE_REGISTRY[function_name][-1] + duration = round(duration) + _pr_color( + "{} exit {} {} ".format( + "<" + "=" * indent[0], + function_name.split(".")[-1], + " (" + ".".join(function_name.split(".")[2:-1]) + ")", + ), + end="", + ) + _pr_color(": Time spent {} seconds ".format(duration,), color="91m") + indent[0] -= 3 + _FUNCTION_PROFILE_REGISTRY[function_name].pop() + + return _profiler diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/__init__.py new file mode 100644 index 00000000..3278bfce --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/__init__.py @@ -0,0 +1,108 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from ..._deps import _HAS_LIBSVM +from . import _libsvm_converter, _libsvm_util + +if _HAS_LIBSVM: + from libsvm import svmutil as _svmutil + + +def convert( + model, + input_names="input", + target_name="target", + probability="classProbability", + input_length="auto", +): + """ + Convert a LIBSVM model to Core ML format. + + Parameters + ---------- + + model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR) + or string path to a saved model. + + input_names: str | [str] + Name of the input column(s). + If a single string is used (the default) the input will be an array. The + length of the array will be inferred from the model, this can be overridden + using the 'input_length' parameter. + + target: str + Name of the output column. + + probability: str + Name of the output class probability column. + Only used for C-SVC and nu-SVC that have been trained with probability + estimates enabled. + + input_length: int + Set the length of the input array. + This parameter should only be used when the input is an array (i.e. when + 'input_name' is a string). + + Returns + ------- + model: MLModel + Model in Core ML format. + + Examples + -------- + .. sourcecode:: python + + # Make a LIBSVM model + >>> import svmutil + >>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]]) + >>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter()) + + # Convert using default input and output names + >>> import coremltools + >>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model) + + # Save the CoreML model to a file. + >>> coreml_model.save('./my_model.mlmodel') + + # Convert using user specified input names + >>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y']) + """ + if not (_HAS_LIBSVM): + raise RuntimeError("libsvm not found. libsvm conversion API is disabled.") + + if isinstance(model, str): + libsvm_model = _libsvm_util.load_model(model) + else: + libsvm_model = model + if not isinstance(libsvm_model, _svmutil.svm_model): + raise TypeError( + "Expected 'model' of type '%s' (got %s)" + % (_svmutil.svm_model, type(libsvm_model)) + ) + + if not isinstance(target_name, str): + raise TypeError( + "Expected 'target_name' of type str (got %s)" % type(libsvm_model) + ) + + if input_length != "auto" and not isinstance(input_length, int): + raise TypeError( + "Expected 'input_length' of type int, got %s" % type(input_length) + ) + + if input_length != "auto" and not isinstance(input_names, str): + raise ValueError( + "'input_length' should not be used unless the input will be only one array." + ) + + if not isinstance(probability, str): + raise TypeError( + "Expected 'probability' of type str (got %s)" % type(probability) + ) + + return _libsvm_converter.convert( + libsvm_model, input_names, target_name, input_length, probability + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_converter.py new file mode 100644 index 00000000..8f476adc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_converter.py @@ -0,0 +1,199 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import __version__ as ct_version +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_LIBSVM + + +def _infer_min_num_features(model): + # find the largest index of all the support vectors + max_index = 0 + for i in range(model.l): + j = 0 + while model.SV[i][j].index != -1: + cur_last_index = model.SV[i][j].index + j += 1 + if cur_last_index > max_index: + max_index = cur_last_index + return max_index + + +def convert(libsvm_model, feature_names, target, input_length, probability): + """ + Convert a support vector machine (SVM) model to the protobuf spec. + + Supports: + * C-SVC + * nu-SVC + * Epsilon-SVR + * nu-SVR + + Parameters + ---------- + model_path: libsvm_model + Libsvm representation of the model. + + feature_names : [str] | str + Names of each of the features. + + target: str + Name of the predicted class column. + + probability: str + Name of the class probability column. Only used for C-SVC and nu-SVC. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_LIBSVM): + raise RuntimeError("libsvm not found. libsvm conversion API is disabled.") + + from libsvm import svm as _svm + + from ...models import MLModel + from ...proto import Model_pb2 + + svm_type_enum = libsvm_model.param.svm_type + + # Create the spec + export_spec = Model_pb2.Model() + export_spec.specificationVersion = SPECIFICATION_VERSION + + if svm_type_enum == _svm.EPSILON_SVR or svm_type_enum == _svm.NU_SVR: + svm = export_spec.supportVectorRegressor + else: + svm = export_spec.supportVectorClassifier + + # Set the features names + inferred_length = _infer_min_num_features(libsvm_model) + if isinstance(feature_names, str): + # input will be a single array + if input_length == "auto": + print( + "[WARNING] Infering an input length of %d. If this is not correct," + " use the 'input_length' parameter." % inferred_length + ) + input_length = inferred_length + elif inferred_length > input_length: + raise ValueError( + "An input length of %d was given, but the model requires an" + " input of at least %d." % (input_length, inferred_length) + ) + + input = export_spec.description.input.add() + input.name = feature_names + input.type.multiArrayType.shape.append(input_length) + input.type.multiArrayType.dataType = Model_pb2.ArrayFeatureType.DOUBLE + + else: + # input will be a series of doubles + if inferred_length > len(feature_names): + raise ValueError( + "%d feature names were given, but the model requires at" + " least %d features." % (len(feature_names), inferred_length) + ) + for cur_input_name in feature_names: + input = export_spec.description.input.add() + input.name = cur_input_name + input.type.doubleType.MergeFromString(b"") + + # Set target + output = export_spec.description.output.add() + output.name = target + + # Set the interface types + if svm_type_enum == _svm.EPSILON_SVR or svm_type_enum == _svm.NU_SVR: + export_spec.description.predictedFeatureName = target + output.type.doubleType.MergeFromString(b"") + nr_class = 2 + + elif svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC: + export_spec.description.predictedFeatureName = target + output.type.int64Type.MergeFromString(b"") + + nr_class = len(libsvm_model.get_labels()) + + for i in range(nr_class): + svm.numberOfSupportVectorsPerClass.append(libsvm_model.nSV[i]) + svm.int64ClassLabels.vector.append(libsvm_model.label[i]) + + if probability and bool(libsvm_model.probA): + output = export_spec.description.output.add() + output.name = probability + output.type.dictionaryType.MergeFromString(b"") + output.type.dictionaryType.int64KeyType.MergeFromString(b"") + export_spec.description.predictedProbabilitiesName = probability + + else: + raise ValueError( + "Only the following SVM types are supported: C_SVC, NU_SVC, EPSILON_SVR, NU_SVR" + ) + + if libsvm_model.param.kernel_type == _svm.LINEAR: + svm.kernel.linearKernel.MergeFromString( + b"" + ) # Hack to set kernel to an empty type + elif libsvm_model.param.kernel_type == _svm.RBF: + svm.kernel.rbfKernel.gamma = libsvm_model.param.gamma + elif libsvm_model.param.kernel_type == _svm.POLY: + svm.kernel.polyKernel.degree = libsvm_model.param.degree + svm.kernel.polyKernel.c = libsvm_model.param.coef0 + svm.kernel.polyKernel.gamma = libsvm_model.param.gamma + elif libsvm_model.param.kernel_type == _svm.SIGMOID: + svm.kernel.sigmoidKernel.c = libsvm_model.param.coef0 + svm.kernel.sigmoidKernel.gamma = libsvm_model.param.gamma + else: + raise ValueError( + "Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid." + ) + + # set rho + # also set probA/ProbB only for SVC + if svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC: + num_class_pairs = nr_class * (nr_class - 1) // 2 + for i in range(num_class_pairs): + svm.rho.append(libsvm_model.rho[i]) + if bool(libsvm_model.probA) and bool(libsvm_model.probB): + for i in range(num_class_pairs): + svm.probA.append(libsvm_model.probA[i]) + svm.probB.append(libsvm_model.probB[i]) + else: + svm.rho = libsvm_model.rho[0] + + # set coefficents + if svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC: + for _ in range(nr_class - 1): + svm.coefficients.add() + for i in range(libsvm_model.l): + for j in range(nr_class - 1): + svm.coefficients[j].alpha.append(libsvm_model.sv_coef[j][i]) + else: + for i in range(libsvm_model.l): + svm.coefficients.alpha.append(libsvm_model.sv_coef[0][i]) + + # set support vectors + for i in range(libsvm_model.l): + j = 0 + cur_support_vector = svm.sparseSupportVectors.vectors.add() + while libsvm_model.SV[i][j].index != -1: + cur_node = cur_support_vector.nodes.add() + cur_node.index = libsvm_model.SV[i][j].index + cur_node.value = libsvm_model.SV[i][j].value + j += 1 + + model = MLModel(export_spec) + + from libsvm import __version__ as libsvm_version + + libsvm_version = "libsvm=={0}".format(libsvm_version) + model.user_defined_metadata[_METADATA_VERSION] = ct_version + model.user_defined_metadata[_METADATA_SOURCE] = libsvm_version + + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_util.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_util.py new file mode 100644 index 00000000..fb75c05e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/libsvm/_libsvm_util.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_LIBSVM + + +def load_model(model_path): + """Load a libsvm model from a path on disk. + + This currently supports: + * C-SVC + * NU-SVC + * Epsilon-SVR + * NU-SVR + + Parameters + ---------- + model_path: str + Path on disk where the libsvm model representation is. + + Returns + ------- + model: libsvm_model + A model of the libsvm format. + """ + if not (_HAS_LIBSVM): + raise RuntimeError("libsvm not found. libsvm conversion API is disabled.") + + import os + + from svmutil import svm_load_model # From libsvm + + if not os.path.exists(model_path): + raise IOError("Expected a valid file path. %s does not exist" % model_path) + return svm_load_model(model_path) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/__init__.py new file mode 100644 index 00000000..64a17d12 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from .mil import (SPACES, SUPPORT_FLOAT_TYPES, SUPPORT_INT_TYPES, Block, + Builder, DefaultInputs, Function, InputSpec, InternalVar, + ListInputType, ListVar, Operation, Placeholder, Program, + Symbol, TupleInputType, Var, builder, curr_block, + get_existing_symbol, get_new_symbol, get_new_variadic_symbol, + mil_list, register_op) +from .input_types import (ClassifierConfig, ColorLayout, EnumeratedShapes, + ImageType, InputType, RangeDim, Shape, TensorType) +from .frontend.tensorflow.tf_op_registry import register_tf_op +from .frontend.torch import register_torch_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/_deployment_compatibility.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/_deployment_compatibility.py new file mode 100644 index 00000000..e3a8f498 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/_deployment_compatibility.py @@ -0,0 +1,165 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from enum import IntEnum + +from coremltools import (_SPECIFICATION_VERSION_IOS_13, + _SPECIFICATION_VERSION_IOS_14, + _SPECIFICATION_VERSION_IOS_15, + _SPECIFICATION_VERSION_IOS_16) + + +class AvailableTarget(IntEnum): + # iOS versions + iOS13 = _SPECIFICATION_VERSION_IOS_13 + iOS14 = _SPECIFICATION_VERSION_IOS_14 + iOS15 = _SPECIFICATION_VERSION_IOS_15 + iOS16 = _SPECIFICATION_VERSION_IOS_16 + + # macOS versions (aliases of iOS versions) + macOS15 = _SPECIFICATION_VERSION_IOS_13 + macOS16 = _SPECIFICATION_VERSION_IOS_14 + macOS10_15 = _SPECIFICATION_VERSION_IOS_13 + macOS10_16 = _SPECIFICATION_VERSION_IOS_14 + macOS11 = _SPECIFICATION_VERSION_IOS_14 + macOS12 = _SPECIFICATION_VERSION_IOS_15 + macOS13 = _SPECIFICATION_VERSION_IOS_16 + + # watchOS versions (aliases of iOS versions) + watchOS6 = _SPECIFICATION_VERSION_IOS_13 + watchOS7 = _SPECIFICATION_VERSION_IOS_14 + watchOS8 = _SPECIFICATION_VERSION_IOS_15 + watchOS9 = _SPECIFICATION_VERSION_IOS_16 + + # tvOS versions (aliases of iOS versions) + tvOS13 = _SPECIFICATION_VERSION_IOS_13 + tvOS14 = _SPECIFICATION_VERSION_IOS_14 + tvOS15 = _SPECIFICATION_VERSION_IOS_15 + tvOS16 = _SPECIFICATION_VERSION_IOS_16 + + # customized __str__ + def __str__(self): + original_str = super().__str__() + new_str = original_str.replace(type(self).__name__, "coremltools.target") + return new_str + + +_get_features_associated_with = {} + + +def register_with(name): + def decorator(func): + if name not in _get_features_associated_with: + _get_features_associated_with[name] = func + else: + raise ValueError("Function is already registered with {}".format(name)) + return func + + return decorator + + +@register_with(AvailableTarget.iOS14) +def iOS14Features(spec): + features_list = [] + + if spec.WhichOneof("Type") == "neuralNetwork": + nn_spec = spec.neuralNetwork + elif spec.WhichOneof("Type") in "neuralNetworkClassifier": + nn_spec = spec.neuralNetworkClassifier + elif spec.WhichOneof("Type") in "neuralNetworkRegressor": + nn_spec = spec.neuralNetworkRegressor + else: + raise ValueError("Invalid neural network specification for the model") + + # Non-zero default optional values + for idx, input in enumerate(spec.description.input): + value = 0 + if input.type.isOptional: + value = max(value, input.type.multiArrayType.floatDefaultValue) + value = max(value, input.type.multiArrayType.doubleDefaultValue) + value = max(value, input.type.multiArrayType.intDefaultValue) + + if value != 0: + msg = "Support of non-zero default optional values for inputs." + features_list.append(msg) + break + + # Layers or modifications introduced in iOS14 + new_layers = [ + "oneHot", + "cumSum", + "clampedReLU", + "argSort", + "pooling3d", + "convolution3d", + "globalPooling3d", + ] + for layer in nn_spec.layers: + layer_type = layer.WhichOneof("layer") + + msg = "" + + if layer_type in new_layers: + msg = "{} {}".format(layer_type.capitalize(), "operation") + + if layer_type == "tile" and len(layer.input) == 2: + msg = "Dynamic Tile operation" + + if layer_type == "upsample" and layer.upsample.linearUpsampleMode in [1, 2]: + msg = "Upsample operation with Align Corners mode" + + if layer_type == "reorganizeData" and layer.reorganizeData.mode == 2: + msg = "Pixel Shuffle operation" + + if layer_type == "sliceDynamic" and layer.sliceDynamic.squeezeMasks: + msg = "Squeeze mask for dynamic slice operation" + + if layer_type == "sliceStatic" and layer.sliceDynamic.squeezeMasks: + msg = "Squeeze mask for static slice operation" + + if layer_type == "concatND" and layer.concatND.interleave: + msg = "Concat layer with interleave operation" + + if msg != "" and (msg not in features_list): + features_list.append(msg) + + return features_list + + +def check_deployment_compatibility(spec, representation, deployment_target): + + if not isinstance(deployment_target, AvailableTarget): + raise TypeError( + "Argument for deployment_target must be an enumeration from Enum class AvailableTarget" + ) + + for any_target in AvailableTarget: + + if any_target > deployment_target and any_target in _get_features_associated_with: + missing_features = _get_features_associated_with[any_target](spec) + + if missing_features: + msg = ( + "Provided minimum deployment target requires model to be of version {} but converted model " + "uses following features which are available from version {} onwards. Please use a higher " + "minimum deployment target to convert. \n ".format( + deployment_target.value, any_target.value + ) + ) + + for i, feature in enumerate(missing_features): + msg += " {}. {}\n".format(i + 1, feature) + + raise ValueError(msg) + + # Default exception throwing if not able to find the reason behind spec version bump + if spec.specificationVersion > deployment_target.value: + msg = ( + "Provided deployment target requires model to be of version {} but converted model has version {} " + "suitable for later releases".format( + deployment_target.value, spec.specificationVersion, + ) + ) + raise ValueError(msg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/backend_helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/backend_helper.py new file mode 100644 index 00000000..8a75c904 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/backend_helper.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.input_types import ColorLayout +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer +from coremltools.proto import FeatureTypes_pb2 as ft + + +def _get_probability_var_for_classifier(prog, classifier_config): + ''' + Return the var which will be used to construct the dictionary for the classifier. + :param prog: mil program + :param classifier_config: an instance of coremltools.ClassifierConfig class + :return: var + ''' + block = prog.functions["main"] + probability_var = None + if classifier_config.predicted_probabilities_output is None \ + or classifier_config.predicted_probabilities_output == "": + # user has not indicated which tensor in the program to use as probabilities + # (i.e which tensor to link to the classifier output) + # in this case, attach the last non const op to the classify op + for op in reversed(block.operations): + if op.op_type != 'const' and len(op.outputs) == 1: + probability_var = op.outputs[0] + break + if probability_var is None: + raise ValueError("Unable to determine the tensor in the graph " + "that corresponds to the probabilities for the classifier output") + else: + # user has indicated which tensor in the program to use as probabilities + # (i.e which tensor to link to the classifier output) + # Verify that it corresponds to a var produced in the program + predicted_probabilities_output = NameSanitizer().sanitize_name(classifier_config.predicted_probabilities_output) + for op in block.operations: + for out in op.outputs: + if out.name == predicted_probabilities_output: + probability_var = out + break + if probability_var is None: + msg = "'predicted_probabilities_output', '{}', provided in 'ClassifierConfig', does not exist in the MIL program." + raise ValueError(msg.format(predicted_probabilities_output)) + return probability_var + + +def _get_colorspace_enum(color_layout): + if color_layout == ColorLayout.GRAYSCALE: + return ft.ImageFeatureType.ColorSpace.GRAYSCALE + elif color_layout == ColorLayout.GRAYSCALE_FLOAT16: + return ft.ImageFeatureType.ColorSpace.GRAYSCALE_FLOAT16 + elif color_layout == ColorLayout.BGR: + return ft.ImageFeatureType.ColorSpace.BGR + else: + return ft.ImageFeatureType.ColorSpace.RGB + +def _validate_image_input_output_shapes(color_layout, shape, name, is_input=True): + io_str = "input" if is_input else "output" + if len(shape) != 4: + raise ValueError("Image {}, '{}', must have rank 4. Instead it has rank {}". + format(io_str, name, len(shape))) + if color_layout in (ColorLayout.BGR, ColorLayout.RGB): + if shape[1] != 3 or shape[0] != 1: + raise ValueError("Shape of the RGB/BGR image {}, '{}', must be of kind (1, 3, H, W), " + "i.e., first two dimensions must be (1, 3), instead they are: {}". + format(io_str, name, shape[:2])) + elif color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + if shape[1] != 1 or shape[0] != 1: + raise ValueError("Shape of the Grayscale image {}, '{}', must be of kind (1, 1, H, W), " + "i.e., first two dimensions must be (1, 1), instead they are: {}". + format(io_str, name, shape[:2])) + else: + raise KeyError("Unrecognized color_layout {}".format(color_layout)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/helper.py new file mode 100644 index 00000000..b03708cd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/helper.py @@ -0,0 +1,329 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +import coremltools.proto.FeatureTypes_pb2 as ft +import coremltools.proto.MIL_pb2 as pm +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import (builtin_to_proto_types, + builtin_to_string, + numpy_type_to_builtin_type, + type_to_builtin_type) +from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type +from coremltools.models.utils import _WEIGHTS_DIR_NAME, _WEIGHTS_FILE_NAME + + +def create_valuetype_scalar(data_type): + """ + Return pm.ValueType with DataType set + """ + v_type = pm.ValueType() + update_tensortype(v_type.tensorType, (), data_type) + return v_type + + +def update_listtype(l_type, length, elem_shape, dtype): + """ + Update in-place of l_type (ListType) to length and type. + """ + + elem_type = create_valuetype_tensor(elem_shape, dtype) + l_type.type.CopyFrom(elem_type) + + l_dim = l_type.length + set_proto_dim(l_dim, length) + +def create_valuetype_list(length, elem_shape, dtype): + """ + Return pm.ValueType with List (ListType) set. + length: length of list (int) + """ + v_type = pm.ValueType() + update_listtype(v_type.listType, length, elem_shape, dtype) + return v_type + +def create_valuetype_dict(key_type, value_type): + """ + Return pm.ValueType with dict (dictionaryType) set + """ + v_type = pm.ValueType() + v_type.dictionaryType.keyType.CopyFrom(types_to_proto(key_type)) + v_type.dictionaryType.valueType.CopyFrom(types_to_proto(value_type)) + return v_type + + +def create_valuetype_tensor(shape, data_type): + """ + Return pm.ValueType with tensor (TensorType) set. + shape: list of ints + """ + v_type = pm.ValueType() + update_tensortype(v_type.tensorType, shape, data_type) + return v_type + + +def set_proto_dim(proto_dim, dim): + if isinstance(dim, (int, np.integer)): + proto_dim.constant.size = dim + else: + dim_str = str(dim) + if len(dim_str) > 0: + if dim_str[0] == "*" or (len(dim_str) >= 3 and dim_str[0:3] == "..."): + proto_dim.unknown.variadic = True + return + proto_dim.unknown.variadic = False + + +def update_tensortype(t_type, shape, data_type): + """ + Update in-place of t_type (TensorType) to shape and data_type. + """ + t_type.dataType = data_type + t_type.rank = len(shape) + t_type.ClearField("dimensions") + for s in shape: + t_dim = t_type.dimensions.add() + set_proto_dim(t_dim, s) + +def _tensor_field_by_type(tensor_val, builtin_type): + if builtin_type == types.bool: + return tensor_val.bools.values + elif types.is_int(builtin_type): + if (builtin_type == types.int64 or builtin_type == types.uint64): + return tensor_val.longInts.values + if builtin_type in (types.int8, types.uint8, types.uint32): + return tensor_val.bytes.values + return tensor_val.ints.values + elif types.is_float(builtin_type): + if (builtin_type == types.fp64): + return tensor_val.doubles.values + elif (builtin_type == types.fp32): + return tensor_val.floats.values + elif (builtin_type == types.fp16): + return tensor_val.bytes.values + else: + raise TypeError( + "Unsupported float dtype for MIL proto serialization: {}".format(builtin_to_string(builtin_type))) + elif builtin_type == types.str: + return tensor_val.strings.values + else: + raise NotImplementedError("Unimplemented tensor type for: " + str(builtin_type)) + +def _set_empty_tensor_field_by_type(tensor_val, builtin_type): + if builtin_type == types.bool: + tensor_val.bools.SetInParent() + elif types.is_int(builtin_type): + if (builtin_type == types.int64 or builtin_type == types.uint64): + tensor_val.longInts.SetInParent() + elif builtin_type in (types.int8, types.uint8, types.uint32): + tensor_val.bytes.SetInParent() + else: + tensor_val.ints.SetInParent() + elif types.is_float(builtin_type): + if (builtin_type == types.fp64): + tensor_val.doubles.SetInParent() + elif (builtin_type == types.fp32): + tensor_val.floats.SetInParent() + elif (builtin_type == types.fp16): + tensor_val.bytes.SetInParent() + else: + raise TypeError("Unsupported float dtype for MIL proto serialization: {}".format(builtin_to_string(builtin_type))) + elif builtin_type == types.str: + tensor_val.strings.SetInParent() + else: + raise NotImplementedError("Unimplemented tensor type for: " + str(builtin_type)) + +def create_tensor_value(np_tensor): + """ + Return TensorValue. + """ + builtin_type = numpy_type_to_builtin_type(np_tensor.dtype) + + value_type = create_valuetype_tensor(np_tensor.shape, types_to_proto_primitive(builtin_type)) + val = pm.Value(type=value_type) + t_val = val.immediateValue.tensor + + # Copy the tensor values from the input tensor + t_field = _tensor_field_by_type(t_val, builtin_type) + + if 0 not in np_tensor.shape: + if builtin_type == types.str: + for x in np.nditer(np_tensor): + t_field.append(x.encode("utf-8")) + elif builtin_type in (types.fp16, types.int8, types.uint8, types.uint32): + val.immediateValue.tensor.bytes.values = np_val_to_py_type(np_tensor) + else: + for x in np_tensor.flatten(): + t_field.append(np_val_to_py_type(x)) + else: # This is an "empty" tensor (tensor with a dimension being size 0) + _set_empty_tensor_field_by_type(t_val, builtin_type) + return val + + +def create_scalar_value(py_scalar): + """ + Return TensorValue (since there's no ScalarValue) + """ + # Create the "scalar" (rank 0) tensor + builtin_type = type_to_builtin_type(type(py_scalar)) + value_type = create_valuetype_scalar(types_to_proto_primitive(builtin_type)) + val = pm.Value(type=value_type) + t_val = val.immediateValue.tensor + + # Set the tensor value + t_field = _tensor_field_by_type(t_val, builtin_type) + if builtin_type in (types.fp16, types.int8, types.uint8, types.uint32): + val.immediateValue.tensor.bytes.values = np_val_to_py_type(py_scalar) + else: + if builtin_type == types.str: + py_scalar = py_scalar.encode("utf-8") + t_field.append(np_val_to_py_type(py_scalar)) + + return val + + +def create_tuple_value(py_tuple): + """ + Return type of Tuple + """ + tp_val = pm.TupleValue() + for t in py_tuple: + item_val = tp_val.values.add() + item_type = item_val.type # ValueType + if isinstance(t, int): + v = create_scalar_value(t) + item_val.immediateValue.i = t + item_type = v.type + elif isinstance(t, np.ndarray): + v = create_tensor_value(t) + item_val.immediateValue.tensor.CopyFrom(v.immediateValue.tensor) + item_type.tensorType.CopyFrom(v.type.tensorType) + else: + raise NotImplementedError() + return tp_val + +def create_list_scalarvalue(py_list, np_type): + """ + Return a Value of type List, which holds scalar values + """ + builtin_type = numpy_type_to_builtin_type(np_type) + value_type = create_valuetype_list(length=len(py_list), + elem_shape=(), + dtype=types_to_proto_primitive(builtin_type)) + val = pm.Value(type=value_type) + + list_val = val.immediateValue.list + for v in py_list: + item_val = list_val.values.add() + item_val.CopyFrom(create_scalar_value(v)) + + return val + +def create_file_value_tensor(file_name, offset, dim, data_type): + """ + Create a Value Type to store File Value + """ + val = pm.Value( + blobFileValue=pm.Value.BlobFileValue(fileName=file_name, offset=offset), + type=create_valuetype_tensor(dim, data_type), + ) + return val + + +def types_to_proto_primitive(valuetype): + if valuetype not in builtin_to_proto_types: + additional_error_msg = "" + if valuetype in (types.complex64, types.complex128): + additional_error_msg = ( + "(MIL doesn't support complex data as model's output, please extract real and " + "imaginary parts explicitly.) " + ) + raise ValueError( + f"Unknown map from SSA type {valuetype} to Proto type. {additional_error_msg}" + ) + return builtin_to_proto_types[valuetype] + + +def types_to_proto(valuetype): + if types.is_tensor(valuetype): + primitive = types_to_proto_primitive(valuetype.get_primitive()) + return create_valuetype_tensor(valuetype.get_shape(), primitive) + elif types.is_tuple(valuetype): + v_type = pm.ValueType() + t_type = v_type.tupleType + for t in valuetype.T: + new_v_type = t_type.types.add() + new_v_type.CopyFrom(types_to_proto(t)) + return v_type + elif types.is_list(valuetype): + elem = valuetype.T[0] + length = valuetype.T[1] + if types.is_tensor(elem): + dtype = types_to_proto_primitive(elem.get_primitive()) + elem_shape = elem.get_shape() + elif types.is_scalar(elem): + dtype = types_to_proto_primitive(valuetype) + elem_shape = () + elif types.is_str(elem): + dtype = types_to_proto_primitive(elem) + elem_shape = () + else: + raise NotImplementedError("Only list of either tensors or scalars supported. " + "Got element of type {}".format(elem.__type_info__())) + return create_valuetype_list(length=length, elem_shape=elem_shape, dtype=dtype) + elif types.is_dict(valuetype): + return create_valuetype_dict(valuetype.T[0], valuetype.T[1]) + else: + return create_valuetype_scalar(types_to_proto_primitive(valuetype)) + + +def create_file_value(output_var, blob_writer): + if output_var.val.dtype.kind == 'f' and output_var.val.dtype.itemsize == 4: + offset = blob_writer.write_float_data(output_var.val.flatten()) + elif output_var.val.dtype.kind == 'f' and output_var.val.dtype.itemsize == 2: + output_var_fp16_to_bytes_to_uint16 = np.frombuffer(output_var.val.flatten().tobytes(), np.uint16) + offset = blob_writer.write_fp16_data(output_var_fp16_to_bytes_to_uint16) + elif output_var.val.dtype.kind == "u" and output_var.val.dtype.itemsize == 1: + offset = blob_writer.write_uint8_data(output_var.val.flatten()) + elif output_var.val.dtype.kind == "i" and output_var.val.dtype.itemsize == 1: + offset = blob_writer.write_int8_data(output_var.val.flatten()) + else: + raise TypeError("Unsupported type, {}, for net buffer serialization.".format(output_var.val.dtype)) + + return create_file_value_tensor( + file_name=os.path.join(os.path.join('@model_path', _WEIGHTS_DIR_NAME), _WEIGHTS_FILE_NAME), + offset=offset, + dim=output_var.val.shape, + data_type=types_to_proto_primitive(output_var.sym_type.get_primitive()), + ) + +def create_immediate_value(var): + if types.is_tensor(var.sym_type): + return create_tensor_value(var.val) + elif types.is_list(var.sym_type): + if var.elem_type == types.str: + return create_list_scalarvalue(var.val, str) + elif var.elem_type == types.int64: + return create_list_scalarvalue(var.val, np.int64) + else: + raise NotImplementedError("List element type, {}, not supported yet.".format(var.sym_type.__type_info__())) + else: + return create_scalar_value(var.val) + +def cast_to_framework_io_dtype(var, is_output): + if var.dtype == types.fp32: + return ft.ArrayFeatureType.ArrayDataType.FLOAT32 + elif var.dtype == types.int32: + return ft.ArrayFeatureType.ArrayDataType.INT32 + elif var.dtype == types.fp16: + return ft.ArrayFeatureType.ArrayDataType.FLOAT16 + else: + ioname = "Output " if is_output else "Input " + ioname2 = "outputs" if is_output else "inputs" + raise NotImplementedError(ioname + var.name + " has data type " + builtin_to_string(var.dtype) + \ + ". ML Program models only support fp32 and int32 " + ioname2 + ".") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/load.py new file mode 100644 index 00000000..e682e871 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/load.py @@ -0,0 +1,535 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools import _OPSET, _SPECIFICATION_VERSION_IOS_15 +from coremltools import _logger as logger +from coremltools.converters.mil.backend.backend_helper import _get_probability_var_for_classifier +from coremltools.converters.mil.backend.mil.helper import ( + cast_to_framework_io_dtype, + create_file_value, + create_immediate_value, + create_list_scalarvalue, + create_scalar_value, + types_to_proto, +) +from coremltools.converters.mil.backend.nn.load import _set_optional_inputs +from coremltools.converters.mil.input_types import EnumeratedShapes, ImageType, RangeDim, TensorType +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, mil_list, types +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, any_variadic, is_symbolic +from coremltools.models.neural_network.flexible_shape_utils import ( + NeuralNetworkImageSize, + NeuralNetworkImageSizeRange, + add_enumerated_image_sizes, + add_multiarray_ndshape_enumeration, + set_multiarray_ndshape_range, + update_image_size_range, +) +from coremltools.models.utils import _WEIGHTS_FILE_NAME +from coremltools.proto import FeatureTypes_pb2 as ft +from coremltools.proto import MIL_pb2 as pm +from coremltools.proto import Model_pb2 as ml + +from ..backend_helper import _get_colorspace_enum, _validate_image_input_output_shapes + +try: + from coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter +except: + BlobWriter = None + + +def should_use_weight_file(val): + return ( + val is not None + and isinstance(val, (np.ndarray, np.generic)) + and val.size >= 10 + and val.dtype in ['float16', 'float32', 'uint8', 'int8'] + ) + + +def translate_const(op, blob_writer): + output_var = op.outputs[0] + + if should_use_weight_file(output_var.val): + value = create_file_value(output_var, blob_writer) + else: + value = create_immediate_value(output_var) + + return pm.Operation( + type="const", + attributes={"name": create_scalar_value(op.name), "val": value}, + outputs=[ + pm.NamedValueType( + name=output_var.name, type=types_to_proto(output_var.sym_type) + ) + ], + ) + + +def translate_constexpr(op, blob_writer): + + def get_value(var): + if should_use_weight_file(var.val): + value = create_file_value(var, blob_writer) + else: + value = create_immediate_value(var) + + return value + + output_var = op.outputs[0] + + attributes = {"name": create_scalar_value(op.name)} + attributes.update({k: get_value(v) for k, v in op.inputs.items()}) + + return pm.Operation( + type=op.op_type, + attributes=attributes, + outputs=[ + pm.NamedValueType( + name=output_var.name, type=types_to_proto(output_var.sym_type) + ) + ], + ) + + +def translate_generic_op(op, parameters, blob_writer, literal_params=[]): + inputs = {} + for param_name, vars in op.inputs.items(): + if param_name.startswith("_"): + continue + if not isinstance(vars, (list, tuple)): + vars = [vars] + + arguments = [] + for _var in vars: + binding = pm.Argument.Binding() + # use const value literals if requested + if param_name in literal_params: + binding.value.CopyFrom(create_immediate_value(_var)) + else: + binding.name = _var.name + arguments.append(binding) + + args = pm.Argument() + args.arguments.extend(arguments) + inputs[param_name] = args + + outputs = [ + pm.NamedValueType(name=v.name, type=types_to_proto(v.sym_type)) + for v in op.outputs + ] + blocks = None + if len(op.blocks) > 0: + blocks = [create_block(b, parameters, blob_writer) for b in op.blocks] + + op_type = op.op_type + attr_dict = {} + if op.op_type in SSAOpRegistry.custom_ops: + op_type = "custom_layer" + class_name = op.bindings.get("class_name", op.name) + input_order = op.bindings.get("input_order", []) + parameters = op.bindings.get("parameters", []) + weights = op.bindings.get("weights", []) + description = op.bindings.get("description", "") + + attr_dict["name"] = create_scalar_value(op.name) + attr_dict["class_name"] = create_scalar_value(class_name) + attr_dict["input_order"] = create_list_scalarvalue(input_order, str) + attr_dict["parameters"] = create_list_scalarvalue(parameters, str) + attr_dict["weights"] = create_list_scalarvalue(weights, str) + attr_dict["description"] = create_scalar_value(description) + + return pm.Operation( + type=op_type, + blocks=blocks, + inputs=inputs, + attributes=attr_dict, + outputs=outputs, + ) + +def create_block(block, parameters, blob_writer): + + def feeds_to_only_constexprs(op): + return (op.op_type == 'const') \ + and len(op.outputs[0].child_ops) > 0 \ + and all((child_op.op_type.startswith("constexpr_")) for child_op in op.outputs[0].child_ops) + + proto_ops = [] + + # Find the const op that generates classify's "label" / "class" string vec. + classify_const_classes_op = None + if len(block.operations) > 0: + # Classify is always the last operation in the block. + op = block.operations[-1] + op_cls_name = type(op).__name__ + if (op_cls_name == "classify"): + classes_var = op.inputs["classes"] + classify_const_classes_op = classes_var.op + if (len(classes_var.child_ops) != 1): + raise ValueError("Classify's labels/classes should be input to only 1 op (classify).") + + for op in block.operations: + op_cls_name = type(op).__name__ + if op_cls_name == "const": + if feeds_to_only_constexprs(op): + continue + # Do not serialize the const op that creates the var bound to the classifier's "classes" param. + # The variable's value will be bound directly to classify's "classes" param instead. + if op != classify_const_classes_op: + proto_ops.append(translate_const(op, blob_writer)) + elif op_cls_name.startswith("constexpr_"): + proto_ops.append(translate_constexpr(op, blob_writer)) + elif op_cls_name == "classify": + # Classify's "classes" param should be serialized as a value literal bound + # directly to the param, rather than as a const-generated variable. + proto_ops.append(translate_generic_op(op, parameters, blob_writer, ["classes"])) + elif op_cls_name == "reshape_like": + # The reshape_like should also be able to take value from a const op + # This is a workaround solution + # rdar://98689808 (Reshape_like should also accept const value from non literal input) + literal_params = ["begins", "ends", "end_masks"] + proto_ops.append(translate_generic_op(op, parameters, blob_writer, literal_params)) + else: + proto_ops.append(translate_generic_op(op, parameters, blob_writer)) + + inputs = [] + if not isinstance(block, Function): + # Function is subclass of Block, but function's block has no input, + # and hence skipping reading the block inputs. + for var in block.inputs: + proto_type = types_to_proto(var.sym_type) + inputs.append(pm.NamedValueType(name=var.name, type=proto_type)) + output_names = [v.name for v in block.outputs] + return pm.Block(inputs=inputs, outputs=output_names, operations=proto_ops) + + +def convert_function(function, parameters, blob_writer, opset): + block = create_block(function, parameters, blob_writer) + + inputs = [] + for name, var in function.inputs.items(): + proto_type = types_to_proto(var.sym_type) + inputs.append(pm.NamedValueType(name=name, type=proto_type)) + + return pm.Function(inputs=inputs, opset=opset, block_specializations={opset: block}) + +# Add a classify op to the output. +# Replaces the original probabilites output (in the containing MIL block) +# with the outputs of the classifier op. Returns the name of the original +# probabilities output variable. +def _add_classify_op(prog, classifier_config): + ''' + Add a "classify" op to the program, at the end of the main block + ''' + def remove_output(block, prob_var): + for i in range(len(block.outputs)): + if block.outputs[i] is prob_var: + block.outputs.pop(i) + break + + block = prog.functions["main"] + + message = "Class labels must be a list of integers / strings or a file path" + classes_in = classifier_config.class_labels + if isinstance(classes_in, str): + import os + + if not os.path.isfile(classes_in): + raise ValueError("Path to class labels (%s) does not exist." % classes_in) + with open(classes_in, "r") as f: + classes = f.read() + classes = classes.splitlines() + elif isinstance(classes_in, list): # list[int or str] + classes = classes_in + assert all([isinstance(x, (int, str)) for x in classes]), message + else: + raise ValueError(message) + + probability_var = _get_probability_var_for_classifier(prog, classifier_config) + + # add the classify op now + with block: + # cast the int label to np.int64 + if isinstance(classes[0], int): + classes = [np.int64(x) for x in classes] + classes_var = mb.const(val=mil_list(classes)) + if probability_var.dtype != types.fp32: + remove_output(block, probability_var) + probability_var = mb.cast(x=probability_var, dtype="fp32", name=probability_var.name + "_cast_to_fp32") + out = mb.classify(probabilities=probability_var, + classes=classes_var + ) + + predicted_feature_name = "classLabel" if classifier_config.predicted_feature_name is None \ + else classifier_config.predicted_feature_name + out[0].name = predicted_feature_name + out[1].name = predicted_feature_name + "_probs" + + # Remove probabilities from block outputs, replace with classify's outputs + remove_output(block, probability_var) + block.outputs[:0] = out + return out[0].name, out[1].name + + +def load(prog, weights_dir, resume_on_errors=False, specification_version=_SPECIFICATION_VERSION_IOS_15, **kwargs): + if BlobWriter is None: + raise RuntimeError("BlobWriter not loaded") + if "main" not in prog.functions: + raise ValueError("main function not found in program") + + # if user has specified "ClassifierConfig", then add the "classify" op to the prog + classifier_config = kwargs.get("classifier_config", None) + predicted_feature_name = None + predicted_probabilities_name = None + if classifier_config is not None: + predicted_feature_name, predicted_probabilities_name = _add_classify_op(prog, classifier_config) + + input_types = prog.main_input_types + output_types = prog.main_output_types + weight_path = os.path.join(weights_dir, _WEIGHTS_FILE_NAME) + blob_writer = BlobWriter(weight_path) + + opset = _OPSET[specification_version] + + function_protos = {} + for func_name, func in prog.functions.items(): + function_protos[func_name] = convert_function(func, prog.parameters, blob_writer, opset) + + proto = pm.Program( + version=1, + functions=function_protos, + ) + + desc = kwargs.get("model_description", None) + if desc and not isinstance(desc, ml.ModelDescription): + raise ValueError("Invalid model descriptor") + + if desc: + if classifier_config is not None: + raise AssertionError("Both model_description and classifier_config can't be provided") + model = ml.Model(description=desc, specificationVersion=specification_version) + model.mlProgram.CopyFrom(proto) + return model + + input_features = [] + output_features = [] + symbolic_inputs = [] + image_input_names = {} # these are the model inputs marked as image by the user + input_shape_map = {} + + for input_type in input_types: + if isinstance(input_type, ImageType): + image_input_names[input_type.name] = input_type + # error checking for input(s) marked as images + if input_type.name not in list(prog.functions["main"].inputs.keys()): + msg = "Provided image input '{}' is not one of the inputs of the MIL program" + raise ValueError(msg.format(input_type.name)) + input_shape_map[input_type.name] = input_type + + for name, var in prog.functions["main"].inputs.items(): + input_feature_type = ft.FeatureType() + + # error checking for input(s) marked as images + # an image input must be of type tensor in program proto + # (since an image type does not exist in MIL program) + if name in image_input_names and \ + not types.is_tensor(var.sym_type): + raise ValueError("For the image input, '{}', its type in the MIL program must be tensor. " + "Instead it is {}.".format(name, var.sym_type.__type_info__())) + + if types.is_tensor(var.sym_type): + shape = var.sym_type.get_shape() + if any_variadic(shape): + raise ValueError("Variable rank model inputs are not supported!") + if any_symbolic(shape): + symbolic_inputs.append(name) + # We extract the default input shape given by user first + if name in input_shape_map: + shape = input_shape_map[name].shape.default + else: + logger.warning("Input shape not fully specified by enumerated shapes or range dim! 1 will be used for dimension not specified instead.") + # If no input shape is provided (ex. auto conversion of -1 in Tensorflow) + shape = [1 if is_symbolic(d) else d for d in shape] + + if name not in image_input_names: + # make a feature type of Type "multiArrayType" + array_type = ft.ArrayFeatureType(shape=shape, dataType=cast_to_framework_io_dtype(var, False)) + input_feature_type.multiArrayType.CopyFrom(array_type) + else: + # make a feature type of Type "imageType" + input_type = image_input_names[name] + _validate_image_input_output_shapes(input_type.color_layout, shape, name, is_input=True) + if not input_type.channel_first: + raise ValueError("Image input, '{}', must be in the channel_first format". + format(name)) + clr_space = _get_colorspace_enum(input_type.color_layout) + image_type = ft.ImageFeatureType(width=shape[-1], + height=shape[-2], + colorSpace=clr_space) + input_feature_type.imageType.CopyFrom(image_type) + + input_features.append( + ml.FeatureDescription(name=name, type=input_feature_type) + ) + elif types.is_scalar(var.sym_type): + array_type = ft.ArrayFeatureType(shape=[1], dataType=cast_to_framework_io_dtype(var, False)) + input_feature_type.multiArrayType.CopyFrom(array_type) + input_features.append(ml.FeatureDescription(name=var.name, type=input_feature_type)) + else: + raise NotImplementedError() + + if output_types is not None and classifier_config is None: + assert len(output_types) == len(prog.functions["main"].outputs), \ + "number of mil program outputs do not match the number of outputs provided by the user" + + for i, var in enumerate(prog.functions["main"].outputs): + output_feature_type = ft.FeatureType() + if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type): + if output_types is not None and isinstance(output_types[i], ImageType): + if not types.is_tensor(var.sym_type): + raise ValueError("Image output, '{}', is a scalar, but it should be a tensor of rank 4".format( + var.name)) + shape = var.sym_type.get_shape() + if any_variadic(shape): + raise ValueError("Variable rank model outputs, that are ImageTypes, are not supported") + if any([is_symbolic(d) for d in shape]): + raise NotImplementedError("Image output '{}' has symbolic dimensions in its shape". + format(var.name)) + _validate_image_input_output_shapes(output_types[i].color_layout, shape, var.name, is_input=False) + clr_space = _get_colorspace_enum(output_types[i].color_layout) + image_type = ft.ImageFeatureType(width=shape[-1], + height=shape[-2], + colorSpace=clr_space) + output_feature_type.imageType.CopyFrom(image_type) + output_features.append( + ml.FeatureDescription(name=var.name, type=output_feature_type) + ) + else: + dataType = None + if classifier_config is None or var.name != predicted_feature_name: + # Not a classifier output, make sure model output type matches with ML Program type. + dataType = cast_to_framework_io_dtype(var, True) + else: + # Classifier outputs are set up separately, so default to fp32 for now. + dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32 + + array_type = ft.ArrayFeatureType(shape=None, dataType=dataType) + output_feature_type.multiArrayType.CopyFrom(array_type) + output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type)) + elif (types.is_dict(var.sym_type)): + output_feature_type.dictionaryType.MergeFromString(b"") + keytype, valtype = var.sym_type.T + if types.is_str(keytype): + output_feature_type.dictionaryType.stringKeyType.MergeFromString(b"") + elif (keytype == types.int64): + output_feature_type.dictionaryType.int64KeyType.MergeFromString(b"") + else: + raise ValueError("Dictionary key type not supported.") + output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type)) + else: + raise NotImplementedError() + + # Model description + desc = ml.ModelDescription(input=input_features, output=output_features) + if classifier_config is not None: + desc.predictedFeatureName = predicted_feature_name + desc.predictedProbabilitiesName = predicted_probabilities_name + + # Manually edit output type of predictedFeatureName. + # It doesn't use MLMultiArray and really uses a "primitive" type. + for output in desc.output: + if output.name == predicted_feature_name: + if type(classifier_config.class_labels[0]) == int: + output.type.int64Type.MergeFromString(b"") + else: + output.type.stringType.MergeFromString(b"") + break + + # Create ML Model + model = ml.Model(description=desc, specificationVersion=specification_version) + model.mlProgram.CopyFrom(proto) + + # Set symbolic shapes + for input_name in symbolic_inputs: + input_type = input_shape_map.get(input_name, None) + + if isinstance(input_type, ImageType): + if isinstance(input_type.shape, EnumeratedShapes): + enumerated_shapes = [] + for s in input_type.shape.shapes: + enumerated_shapes.append( + NeuralNetworkImageSize( + height=s.shape[-2], width=s.shape[-1] + ) + ) + add_enumerated_image_sizes( + model, input_name, sizes=enumerated_shapes + ) + else: + img_range = NeuralNetworkImageSizeRange() + H = input_type.shape.shape[-2] + W = input_type.shape.shape[-1] + + if isinstance(H, RangeDim): + img_range.add_height_range((H.lower_bound, H.upper_bound)) + elif is_symbolic(H): + img_range.add_height_range((1, -1)) + else: + img_range.add_height_range((H, H)) + if isinstance(W, RangeDim): + img_range.add_width_range((W.lower_bound, W.upper_bound)) + elif is_symbolic(W): + img_range.add_width_range((1, -1)) + else: + img_range.add_width_range((W, W)) + + update_image_size_range( + model, input_name, img_range + ) + elif isinstance(input_type, TensorType): + if isinstance(input_type.shape, EnumeratedShapes): + add_multiarray_ndshape_enumeration( + model, input_name, [tuple(s.shape) for s in input_type.shape.shapes] + ) + else: + lb = [] + ub = [] + for s in input_type.shape.shape: + if isinstance(s, RangeDim): + lb.append(s.lower_bound) + ub.append(s.upper_bound) + elif is_symbolic(s): + lb.append(1) + ub.append(-1) + else: + lb.append(s) + ub.append(s) + set_multiarray_ndshape_range( + model, input_name, lower_bounds=lb, upper_bounds=ub + ) + elif input_type is None: + sym_type = prog.functions["main"].inputs[input_name].sym_type + lb = [] + ub = [] + for s in sym_type.get_shape(): + if is_symbolic(s): + lb.append(1) + ub.append(-1) + else: + lb.append(s) + ub.append(s) + set_multiarray_ndshape_range( + model, input_name, lower_bounds=lb, upper_bounds=ub + ) + + # Set optional inputs + _set_optional_inputs(model, input_types) + + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/__init__.py new file mode 100644 index 00000000..21f3261a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import (adjust_io_to_supported_types, fuse_activation_silu, + insert_image_preprocessing_op, sanitize_name_strings) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py new file mode 100644 index 00000000..dd77dfed --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/adjust_io_to_supported_types.py @@ -0,0 +1,204 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types as types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="mil_backend") +class adjust_io_to_supported_types(AbstractGraphPass): + """ + Converts all dtypes to types that are supported by the CoreML runtime. + The runtime supports only fp16, fp32, int32, str, and bool variables. + + General rules: + * Integer vars that are not 32 bit are replaced with int32 types. + * All other types not in the list of runtime supported types are replaced with the fp32 dtype. + No casts are inserted; the previous type is replaced. The assumption is that all remaining + types are numerical and can be reasonably replaced with 32 bit float types. + + The "main" function has additional rules since its I/O is mapped to CoreML model I/O: + * if function.opset_version < coremltools.target.iOS16, then: + * Fp16 I/O is replaced with fp32 I/O. + Casts (fp32 input -> fp16) are inserted at the beginning of the program to preserve 16 bit inputs. + Casts (fp16 -> fp32 output) are inserted at the end of the program to preserve 16 bit computations. + + * All non-integer I/O that is not fp32 is replaced with fp32 I/O. + A cast (prev input type -> fp32) is inserted at the beginning of the program to preserve non-fp32 inputs. + A cast (prev type -> fp32 out) is inserted at the end of the program to preserve non-fp32 computations. + The assumption is that all remaining types are numerical and it is valid to cast them to/from fp32. + + * The only exception: Int64 outputs are allowed for the classifier op. This is to keep consistency with + the CoreML API, which uses 64 bit integers to represent classifier labels. + + ------ + + func main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + becomes + + func main(fp32 x, int32 y, fp32 z) { + bool x_casted = cast(x) + bool out__pre__output__fp32__cast = logical_not(x_casted) + fp32 out = cast(out__pre__output__fp32__cast) + } -> (out, y, z) + + ------ + + func not_main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + is unchanged. + """ + + def apply(self, prog): + for name, func in prog.functions.items(): + is_main_funtion = name == "main" + _adjust_io_to_supported_types(func, is_main_funtion) + +__RUNTIME_SUPPORTED_TYPES = [types.fp16, types.fp32, types.int32, types.str, types.bool] + +##### +# Main Function +##### +def _adjust_var_dtype_helper(var, dtype): + if (types.is_scalar(var.sym_type)): + var._sym_type = dtype + else: + var._sym_type = types.tensor(dtype, var.sym_type.get_shape()) + +@block_context_manager +def _adjust_main_inputs(func): + first_op = func.operations[0] if len(func.operations) > 0 else None + for input_name, input_var in func.inputs.items(): + if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \ + and input_var.dtype != types.fp32 \ + and input_var.dtype != types.int32: + input_dtype_str = types.builtin_to_string(input_var.dtype) + if types.is_int(input_var.dtype): + # Replace non-int32 input type with int32. + logger.warning("Input" + input_var.name + " is of dtype " + input_dtype_str +\ + ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\ + "This input will be assigned a dtype of int32. " +\ + "No cast will be inserted; the previous dtype will be replaced.") + _adjust_var_dtype_helper(input_var, types.int32) + elif input_var.dtype == types.fp64: + # Replace float64 input type with fp32. + logger.warning("Input '" + input_var.name + "' is of dtype fp64. 64 bit float inputs are " +\ + "not supported by ML program models. This input will be assigned a dtype " +\ + "of fp32. No cast will be inserted; the previous dtype will be replaced.") + _adjust_var_dtype_helper(input_var, types.fp32) + elif input_var.dtype == types.fp16 \ + and func.opset_version >= target.iOS16: + pass # do nothing, since fp16 is a valid input type for CoreML + else: + # This is some other dtype. Change the type to fp32 and add a cast. + # This is only a limitation of main--other functions do not represent CoreML model inputs + # and do not have the same limitation on input types. + supported_dtypes = "{int32, fp32}" if func.opset_version < target.iOS16 else \ + "{int32, fp16, fp32}" + msg = "\nInput '{}' is of dtype {}. The " +\ + "CoreML runtime does not support inputs with this dtype " +\ + "(supported dtypes are: {}). This input will be assigned a dtype of " +\ + "fp32. A cast will be inserted at the beginning of the program to " +\ + "convert the input to the originally defined dtype.\n" + if input_var.dtype == types.fp16: + msg += "fp16 dtype input is supported if the function.opset_version is chosen to be at least " \ + "iOS16/macOS13.\n" + logger.warning(msg.format( + input_var.name, + input_dtype_str, + supported_dtypes)) + + casted_input_var = mb.cast(x=input_var, dtype=input_dtype_str, before_op=first_op) + func.replace_uses_of_var_after_op(anchor_op=casted_input_var.op, old_var=input_var, new_var=casted_input_var) + _adjust_var_dtype_helper(input_var, types.fp32) + +@block_context_manager +def _adjust_main_outputs(func): + new_outputs = [] + for output_var in func.outputs: + output_type = output_var.sym_type + if (types.is_tensor(output_type) or types.is_scalar(output_type)) \ + and output_var.dtype != types.fp32 \ + and output_var.dtype != types.int32 \ + and (func.opset_version < target.iOS16 or output_var.dtype != types.fp16): + # since fp16 is a valid output type for coreml from ios16 spec onwards, no need to cast + output_dtype_str = types.builtin_to_string(output_var.dtype) + supported_dtypes = "{int32, fp32}" if func.opset_version < target.iOS16 else \ + "{int32, fp16, fp32}" + msg = "\nOutput '{}' is of dtype {}. The " +\ + "CoreML runtime does not support outputs with this dtype " +\ + "(supported dtypes are: {}). This output will be assigned a dtype " +\ + "of fp32. A cast will be inserted at the end of the program to convert" +\ + "the original output dtype to the dtype supported by the CoreML runtime.\n" + if output_var.dtype == types.fp16: + msg += "fp16 dtype output is supported if function.opset_version is chosen to be at least " \ + "iOS16/macOS13.\n" + logger.warning(msg.format( + output_var.name, + output_dtype_str, + supported_dtypes, + )) + + output_var_name = output_var.name + output_var.set_name(output_var_name + "__pre__output__fp32__cast") + # Convert the output to fp32, and add a cast. + output_var = mb.cast(x=output_var, dtype="fp32") + output_var.set_name(output_var_name) + new_outputs.append(output_var) + func.set_outputs(new_outputs) + + +##### +# General Functions and Blocks +##### +def _adjust_var(var): + """ + Changes the dtype of the provided variable according + to the rules outlined in the top level pass comment + (see adjust_io_to_supported_types). + """ + if (types.is_tensor(var.sym_type) or types.is_scalar(var.sym_type)) \ + and var.dtype not in __RUNTIME_SUPPORTED_TYPES: + dtype_str = types.builtin_to_string(var.dtype) + if types.is_int(var.dtype): + # Replace non-int32 input type with int32. + logger.warning("Input '" + var.name + "' is of dtype " + dtype_str +\ + ". Only integer variables of bit width 32 are supported by the CoreML runtime. " +\ + "This input will be assigned a dtype of int32. " +\ + "No cast will be inserted; the previous dtype will be replaced.") + _adjust_var_dtype_helper(var, types.int32) + else: + # This is some other unsupported dtype. Change the input type to fp32. + logger.warning("Var " + var.name + " is of dtype " + dtype_str + ". The CoreML runtime " +\ + "does not support this dtype (only fp16, fp32, bool, and int32 are supported). " +\ + "This input will be assigned a dtype of fp32. No cast will be inserted; " +\ + "the previous dtype will be replaced.") + _adjust_var_dtype_helper(var, types.fp32) + + +def _adjust_func_inputs(func): + for input_name, input_var in func.inputs.items(): + _adjust_var(input_var) + +##### +# The Pass +##### +def _adjust_io_to_supported_types(func, is_main): + if is_main: + _adjust_main_inputs(func) + _adjust_main_outputs(func) + else: + _adjust_func_inputs(func) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py new file mode 100644 index 00000000..5f9270df --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/fuse_activation_silu.py @@ -0,0 +1,82 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _match_pattern(op): + if op.op_type == "sigmoid": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find following op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + mul_op_candidate = list(child_ops)[0] + if mul_op_candidate.op_type != "mul": + return None + mul_inputs_actual = {mul_op_candidate.x.name, mul_op_candidate.y.name} + mul_inputs_expect = {op.x.name, op.outputs[0].name} + if mul_inputs_actual != mul_inputs_expect: + return None + return mul_op_candidate + + return None + + +def _try_to_transform(sigmoid_op, mul_op, block): + out_name = mul_op.outputs[0].name + # create a new silu op + x = mb.silu(x=sigmoid_op.x, name=out_name, before_op=sigmoid_op) + mul_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=mul_op, old_var=mul_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops([sigmoid_op, mul_op]) + return True + + +@block_context_manager +def _fuse_activation_silu_block(block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _fuse_activation_silu_block(b) + if len(op.blocks) > 0: + continue + + mul_op = _match_pattern(op) + if mul_op is not None: + fusion_status = _try_to_transform(op, mul_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="mil_backend") +class fuse_activation_silu(AbstractGraphPass): + """ + Fold x * sigmoid(x) into silu(x) + + Given: + %1 = sigmoid(x=%0) + %2 = mul(x=%0, y=%1) or mul(x=%1, y=%0) + ... + + Result: + %3 = silu(%0) + ... + """ + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = _fuse_activation_silu_block(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py new file mode 100644 index 00000000..b83a2b43 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py @@ -0,0 +1,67 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.input_types import ColorLayout, ImageType +from coremltools.converters.mil.mil import Builder as mb +# import mil internal ops to add it to the builder +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types import nptype_from_builtin + + +@register_pass(namespace="mil_backend") +class insert_image_preprocessing_ops(AbstractGraphPass): + """ + Insert preprocessing ops, right after the input if its of type Image + """ + def apply(self, prog): + for f_name, f in prog.functions.items(): + if f_name == 'main': + _insert_image_preprocessing_ops(f, prog) + +@block_context_manager +def _insert_image_preprocessing_ops(block, prog): + input_types = list(prog.main_input_types) + + for input_type in input_types: + if isinstance(input_type, ImageType): + if input_type.name not in block.inputs: + continue + + input_var = block.inputs[input_type.name] + placeholder_op = block.placeholder_inputs[input_type.name] + first_op = block.operations[0] + old_var = placeholder_op.outputs[0] + has_bias = np.any(np.array(input_type.bias) != 0) + last_output = input_var + input_nptype = nptype_from_builtin(type(last_output.dtype())) + if input_type.scale != 1: + last_output = mb.mul(x=last_output, + y=np.array(input_type.scale, dtype=input_nptype), + before_op=first_op, name=input_var.name + "__scaled__") + if has_bias: + if input_type.color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + last_output = mb.add(x=last_output, + y=np.array(input_type.bias, dtype=input_nptype), + before_op=first_op, name=input_var.name + "__biased__") + else: + if len(last_output.shape) == 3: + last_output = mb.add(x=last_output, + y=np.array(input_type.bias, dtype=input_nptype).reshape([3, 1, 1]), + before_op=first_op, name=input_var.name + "__biased__") + elif len(last_output.shape) == 4: + last_output = mb.add(x=last_output, + y=np.array(input_type.bias, dtype=input_nptype).reshape([1, 3, 1, 1]), + before_op=first_op, name=input_var.name + "__biased__") + else: + raise TypeError("Unsupported rank for image input type.") + + if last_output != input_var: + block.replace_uses_of_var_after_op(anchor_op=last_output.op, + old_var=old_var, + new_var=last_output) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py new file mode 100644 index 00000000..9ec89909 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="mil_backend") +class sanitize_name_strings(AbstractGraphPass): + """ + Sanitize the names of vars and ops to make sure + that they are of the format as described in the NameSanitizer class, i.e. + of the format [a-zA-Z_][a-zA-Z0-9_]* + """ + def apply(self, prog): + for f in prog.functions.values(): + sanitizer_vars = NameSanitizer(prefix="var_") + sanitizer_ops = NameSanitizer(prefix="op_") + NameSanitizer.sanitize_block(f, sanitizer_vars, sanitizer_ops, prog.main_input_types) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/test_passes.py new file mode 100644 index 00000000..6d82ff38 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/passes/test_passes.py @@ -0,0 +1,888 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import itertools + +import numpy as np +import pytest + +# import mil internal ops to add it to the builder +import coremltools as ct +# Set the testing backend +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, assert_model_is_valid, get_op_types_in_program) + + +class TestAdjustToSupportedTypes: + + def test_basic(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.bool), + mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int32), + mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.fp32)]) + def prog(x, y, z): + out = mb.logical_not(x=x) + return (out, y, z) + prog.functions['not_main'] = copy.deepcopy(prog.functions['main']) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + """ + Input graph: + + func main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + becomes + + func main(fp32 x, int32 y, fp32 z) { + bool x_casted = cast(x) + bool out__pre__output__fp32__cast = logical_not(x_casted) + fp32 out = cast(out__pre__output__fp32__cast) + } -> (out, y, z) + """ + assert get_op_types_in_program(prev_prog) == ['logical_not'] + assert get_op_types_in_program(prog) == ['cast', 'logical_not', 'cast'] + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.fp32 + for i in range(1, len(inputs)): + assert prev_inputs[i][1].name == inputs[i][1].name + assert prev_inputs[i][1].dtype == inputs[i][1].dtype + + prev_outputs = prev_prog.functions['main'].outputs + outputs = prog.functions['main'].outputs + assert prev_outputs[0].name == outputs[0].name + assert outputs[0].dtype == types.fp32 + for i in range(1, len(outputs)): + assert prev_outputs[i].name == outputs[i].name + assert prev_outputs[i].dtype == outputs[i].dtype + + """ + Input graph: + + func not_main(bool x, int32 y, fp32 z) { + bool out = logical_not(x) + } -> (out, y, z) + + is identical after the pass. + """ + assert get_op_types_in_program(prev_prog, 'not_main') == ['logical_not'] + assert get_op_types_in_program(prog, 'not_main') == ['logical_not'] + + prev_inputs = list(prev_prog.functions['not_main'].inputs.items()) + inputs = list(prog.functions['not_main'].inputs.items()) + for i in range(0, len(inputs)): + assert prev_inputs[i][1].name == inputs[i][1].name + assert prev_inputs[i][1].dtype == inputs[i][1].dtype + + prev_outputs = prev_prog.functions['not_main'].outputs + outputs = prog.functions['not_main'].outputs + for i in range(0, len(outputs)): + assert prev_outputs[i].name == outputs[i].name + assert prev_outputs[i].dtype == outputs[i].dtype + + def test_int64_input(self): + """ + Input graph: + + func main(int64 x) { + } -> (x) + + becomes + + func main(int32 x) { + } -> (x) + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int64)]) + def prog(x): + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.int32 + + def test_float64_input(self): + """ + Input graph: + + func main(float64 x) { + } -> (x) + + becomes + + func main(float32 x) { + } -> (x) + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.fp64)]) + def prog(x): + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.fp32 + + + @pytest.mark.parametrize( + "opset_version", + [None, target.iOS13, target.iOS16], + ) + def test_float16_input_output(self, opset_version): + """ + Input graph: + + main(%x: (1, 1, 1, 1, fp16)(Tensor)) { + block0() { + %relu_0: (1, 1, 1, 1, fp16)(Tensor) = relu(x=%x, name="relu_0") + } -> (%relu_0) + } + + Output graph (if opset_version < ios16): + + main(%x: (1, 1, 1, 1, fp32)(Tensor)) { + block0() { + %cast_0: (1, 1, 1, 1, fp16)(Tensor) = cast(x=%x, dtype="fp16", name="cast_0") + %relu_0__pre__output__fp32__cast: (1, 1, 1, 1, fp16)(Tensor) = relu(x=%cast_0, name="relu_0") + %relu_0: (1, 1, 1, 1, fp32)(Tensor) = cast(x=%relu_0__pre__output__fp32__cast, dtype="fp32", name="cast_1") + } -> (%relu_0) + } + + Output graph (if opset_version >= ios16): same as the input graph + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.fp16)], opset_version=opset_version) + def prog(x): + return mb.relu(x=x) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_block.inputs.items()) + inputs = list(block.inputs.items()) + prev_outputs = prev_block.outputs + outputs = block.outputs + assert prev_inputs[0][1].name == inputs[0][1].name + assert outputs[0].name == prev_outputs[0].name + if opset_version is None or opset_version < target.iOS16: + assert get_op_types_in_program(prog) == ['cast', 'relu', 'cast'] + assert inputs[0][1].dtype == types.fp32 + assert outputs[0].dtype == types.fp32 + else: + assert get_op_types_in_program(prog) == ['relu'] + assert inputs[0][1].dtype == types.fp16 + assert block.outputs[0].dtype == types.fp16 + + def test_float16_input_output_with_opset_version_inference(self): + """ + Input graph: + + main(%x: (1, 1, 4, 4, fp16)(Tensor)) { + block0() { + %pixel_unshuffle_0: (1, 4, 2, 2, fp16)(Tensor) = pixel_unshuffle(x=%x, downscale_factor=2, name="pixel_unshuffle_0") + } -> (%pixel_unshuffle_0) + } + + This function would be inferred as an iOS16 function, and the graph pass should behave properly + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4), dtype=types.fp16)]) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_block.inputs.items()) + inputs = list(block.inputs.items()) + prev_outputs = prev_block.outputs + outputs = block.outputs + assert prev_inputs[0][1].name == inputs[0][1].name + assert outputs[0].name == prev_outputs[0].name + assert get_op_types_in_program(prog) == ['pixel_unshuffle'] + assert inputs[0][1].dtype == types.fp16 + assert block.outputs[0].dtype == types.fp16 + + def test_int8_input(self): + """ + Input graph: + + func main(int8 x) { + } -> (x) + + becomes + + func main(int32 x) { + } -> (x) + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int8)]) + def prog(x): + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + assert prev_inputs[0][1].name == inputs[0][1].name + assert inputs[0][1].dtype == types.int32 + + def test_subblock(self): + """ + Input graph: + + func main(float64 a, float32 b) { + float64 out_0, float32 out_1 = while_loop(a, b, + (float64 a, float32 b) { + bool cond = less(a, b) + } -> (cond) + (float64 a, float32 b) { + float64 temp = const(1) + float64 out = add(a, b) + } -> (out, b) + ); + } -> (out_0, out_1) + + becomes + + func main(float32 a, float32 b) { + float32 out_0, float32 out_1 = while_loop(a, b, + (float32 a, float32 b) { + bool cond = less(a, b) + } -> (cond) + (float32 a, float32 b) { + float32 temp = const(1) + float32 out = add(a, b) + } -> (out, b) + ); + } -> (out_0, out_1) + """ + pytest.xfail("fp64 dtype not supported in MIL") + def body(a, b): + return mb.add(x=a, y=np.float64(1)), b + + def cond(a, b): + return mb.less(x=a, y=b) + + @mb.program(input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp64), + mb.TensorSpec(shape=(1,), dtype=types.fp32)]) + def prog(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + prev_inputs = list(prev_prog.functions['main'].inputs.items()) + inputs = list(prog.functions['main'].inputs.items()) + for i in range(0, len(prev_inputs)): + assert prev_inputs[i][1].name == inputs[i][1].name + assert inputs[i][1].dtype == types.fp32 + + assert get_op_types_in_program(prev_prog) == ['while_loop'] + assert get_op_types_in_program(prog) == ['while_loop'] + + def assert_block_inputs(prev_inputs, inputs): + for i in range(0, len(prev_inputs)): + assert prev_inputs[i].name == inputs[i].name + assert inputs[i].dtype == types.fp32 + + subblocks = prog.functions['main'].operations[0].blocks + prev_subblocks = prev_prog.functions['main'].operations[0].blocks + for i in range(0, len(subblocks)): + assert_block_inputs(prev_subblocks[i].inputs, subblocks[i].inputs) + + def test_adjust_cast(self): + """ + Input graph: + + func main(int32 x) { + fp64 y = cast(x=x, dtype="fp64") + } -> (y) + + becomes + + func main(int32 x) { + fp32 y = cast(x=x, dtype="fp32") + } -> (y) + """ + pytest.xfail("cast operation does not support casting to fp64") + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int32)]) + def prog(x): + y = mb.cast(x=x, dtype="fp64") + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + assert get_op_types_in_program(prev_prog) == ['cast'] + assert get_op_types_in_program(prog) == ['cast'] + + prev_cast = prev_prog.functions['main'].operations[1] + cast = prog.functions['main'].operations[2] + + assert prev_cast.dtype.val == "fp64" + assert prev_cast.outputs[0].dtype == types.fp64 + + assert cast.dtype.val == "fp32" + assert cast.outputs[0].dtype == types.fp32 + + def test_adjust_redundant_cast(self): + """ + Input graph: + + func main(int32 x) { + int64 y = cast(x=x, dtype="int64") + } -> (y) + + becomes + + func main(int32 x) { + } -> (x) + """ + pytest.xfail("cast not supports dtype=`int64`") + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 1, 1), dtype=types.int32)]) + def prog(x): + y = mb.cast(x=x, dtype="int64") + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::adjust_io_to_supported_types" + ) + + assert get_op_types_in_program(prev_prog) == ['cast'] + assert get_op_types_in_program(prog) == [] + +class TestImagePreprocessingPass: + + def test_program_grayscale(self): + """ + Input graph: + + main(x: ImageType(color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + + def test_program_grayscale_with_scale(self): + """ + Input graph: + + main(x: ImageType(scale=2.0, color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, 2) + y1 = relu(y) + y2 = relu(y) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + scale=2.0, + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + + def test_program_grayscale_with_bias(self): + """ + Input graph: + + main(x: ImageType(bias=2.0, color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = add(x, 2) + y1 = relu(y) + y2 = relu(y) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + bias=2.0, + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["add", "relu", "relu", "add"] + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert add_op.y.val == 2.0 + + def test_program_grayscale_with_scale_bias(self): + """ + Input graph: + + main(x: ImageType(scale=2.0, bias=2.0, color_layout="G", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y_scaled = mul(x, 2) + y = add(y_scaled, 2) + y1 = relu(y) + y2 = relu(y) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 1, 20, 20], + scale=2.0, + bias=2.0, + color_layout="G", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert add_op.y.val == 2.0 + + def test_program_rgb(self): + """ + Input graph: + + main(x: ImageType(color_layout="RGB", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + color_layout="RGB", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + + def test_program_rgb_scale_bias(self): + """ + Input graph: + + main(x: ImageType(color_layout="RGB", scale=2.0, bias=[1.0, 2.0, 3.0], channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, scale) + y_bias = add(y, bias) + y1 = relu(y_bias) + y2 = relu(y_bias) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + scale=2.0, + bias=[1.0, 2.0, 3.0], + color_layout="RGB", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert np.all(add_op.y.val == np.array([1.0, 2.0, 3.0]).reshape([1, 3, 1, 1])) + + def test_program_bgr(self): + """ + Input graph: + + main(x: ImageType(color_layout="BGR", channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + color_layout="BGR", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + + def test_program_bgr_scale_bias(self): + """ + Input graph: + + main(x: ImageType(color_layout="BGR", scale=2.0, bias=[1.0, 2.0, 3.0], channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, scale) + y_bias = add(y, bias) + y1 = relu(y_bias) + y2 = relu(y_bias) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + scale=2.0, + bias=[1.0, 2.0, 3.0], + color_layout="BGR", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.val == 2.0 + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert np.all(add_op.y.val == np.array([1.0, 2.0, 3.0]).reshape([1, 3, 1, 1])) + + @pytest.mark.parametrize( + "scale_type, bias_type", itertools.product([np.float32, np.int32], [np.float32, np.int32]) + ) + def test_scale_bias_types(self, scale_type, bias_type): + """ + Input graph: + + main(x: ImageType(color_layout="RGB", scale=2.0, bias=[1.0, 2.0, 3.0], channel_first=True)) { + y1 = relu(x) + y2 = relu(x) + output = add(y1, y2) + } [output] + + Output graph: + + main(x: ImageType(channel_first=True)) { + y = mul(x, scale) + y_bias = add(y, bias) + y1 = relu(y_bias) + y2 = relu(y_bias) + output = add(y1, y2) + } [output] + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20, 20))]) + def prog(x): + y1 = mb.relu(x=x) + y2 = mb.relu(x=x) + z = mb.add(x=y1, y=y2) + return z + + prog.main_input_types = (ct.ImageType(name='x', + shape=[1, 3, 20, 20], + scale=scale_type(2.0), + bias=np.array([1, 2, 3]).astype(bias_type), + color_layout="RGB", + channel_first=True),) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "mil_backend::insert_image_preprocessing_ops" + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "add"] + assert get_op_types_in_program(prog) == ["mul", "add", "relu", "relu", "add"] + scale_op = prog.find_ops(op_type="mul", exactly_one=True)[0] + assert scale_op.y.dtype() == prog.functions["main"].inputs["x"].dtype() + add_op = prog.find_ops(op_type="add", exactly_one=False)[0] + assert add_op.y.dtype() == prog.functions["main"].inputs["x"].dtype() + +class TestSanitizerPass: + + def test_sanitize_numeric_var_names(self): + """ + Input: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") + %1: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="1") + %3: (1, 3, 20, fp32)(Tensor) = add(x=%Var_1!, y=%1, name="3") + } -> (%3) + } + + Output: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1_: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") + %var_1: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="op_1") + %var_3: (1, 3, 20, fp32)(Tensor) = add(x=%var_1_, y=%var_1, name="op_3") + } -> (%var_3) + } + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) + def prog(x): + y1 = mb.relu(x=x, name = "var_1!") + y2 = mb.relu(x=x, name = "1") + z = mb.add(x=y1, y=y2, name = "3") + return z + + PASS_REGISTRY["mil_backend::sanitize_name_strings"](prog) + block = prog.functions["main"] + assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" + assert block.find_ops(op_type="relu")[1].outputs[0].name == "var_1" + assert prog["main"].outputs[0].name == "var_3" + assert block.find_ops(op_type="relu")[0].name == "var_1_" + assert block.find_ops(op_type="relu")[1].name == "op_1" + assert block.find_ops(op_type="add")[0].name == "op_3" + + def test_sanitize_var_names_with_two_functions(self): + """ + Input: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") + } -> (%var_1!) + } + + main_2(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") + } -> (%var_1!) + } + + + Output: + main(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") + } -> (%var_1_) + } + + main_2(%x: (1, 3, 20, fp32)(Tensor)) { + block0() { + %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") + } -> (%var_1_) + } + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) + def prog(x): + z = mb.relu(x=x, name = "var_1!") + return z + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) + def prog_2(x): + z = mb.relu(x=x, name = "var_1!") + return z + + prog.add_function("main_2", prog_2.functions["main"]) + PASS_REGISTRY["mil_backend::sanitize_name_strings"](prog) + block = prog.functions["main"] + assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" + assert prog["main"].outputs[0].name == "var_1_" + assert block.find_ops(op_type="relu")[0].name == "var_1_" + block = prog.functions["main_2"] + assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" + assert prog["main"].outputs[0].name == "var_1_" + assert block.find_ops(op_type="relu")[0].name == "var_1_" + + +class TestPassFuseActivationSiLU: + """ + Input graph: + input --> sigmoid --> mul --> output + Output graph: + input --> silu --> output + """ + + @pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason="mlprogram predict available only on macOS12+") + @pytest.mark.parametrize( + "reverse_order", itertools.product([True, False]), + ) + def test_0(self, reverse_order): + x_shape = tuple(np.random.randint(low=1, high=4, size=5)) + + @mb.program(input_specs=[mb.TensorSpec(shape=x_shape)]) + def program(x): + sigmoid_x = mb.sigmoid(x=x) + if not reverse_order: + x = mb.mul(x=x, y=sigmoid_x) + else: + x = mb.mul(x=sigmoid_x, y=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + program, "mil_backend::fuse_activation_silu" + ) + + assert get_op_types_in_program(prev_prog) == ["sigmoid", "mul"] + assert get_op_types_in_program(program) == ["silu"] + + assert_model_is_valid( + program=program, + inputs={"x": x_shape}, + backend=("mlprogram", "fp32"), + expected_output_shapes={block.outputs[0].name: tuple(x_shape)}, + ) + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_helper.py new file mode 100644 index 00000000..4a07c00b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_helper.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer as _NameSanitizer + + +class TestNameSanitizer: + + def test_name_sanitizer(self): + input_and_expected_strings = [("1", "_1"), + ("abc", "abc"), + ("*asdf", "_asdf"), + ("*asd*f", "_asd_f"), + ("0abc2", "_0abc2"), + ("is8174 + 16", "is8174___16"), + ("a:abc", "a_abc"), + ("a.abc", "a_abc"), + ("dense_2_1/BiasAdd", "dense_2_1_BiasAdd"), + ("dense_2_1-BiasAdd", "dense_2_1_BiasAdd"), + ("key:0", "key_0"), + ] + + for i, in_and_out_str in enumerate(input_and_expected_strings): + out = _NameSanitizer().sanitize_name(in_and_out_str[0]) + assert out == in_and_out_str[1] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_model_input_params.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_model_input_params.py new file mode 100644 index 00000000..5847e172 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/mil/test_model_input_params.py @@ -0,0 +1,195 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as np + +import coremltools as ct +from coremltools.converters.mil.mil.builder import Builder as mb +from coremltools.converters.mil.mil.program import Symbol +from coremltools.models.utils import _macos_version + + +class TestMILFlexibleShapes: + + @mb.program( + input_specs = [ + mb.TensorSpec(shape=[1, 3, Symbol("H"), Symbol("W")]) + ]) + def basic_network(x): + return mb.relu(x=x) + + def test_mil_enumerated_multiarray(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.TensorType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "enumeratedShapes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.multiArrayType.enumeratedShapes.shapes: + spec_enumerated_shapes.add(tuple([s for s in enumerated.shape])) + assert spec_default_shape == [1, 3, 10, 10], "Expected default shape to be [1, 3, 10, 10], got {} instead".format(str(spec_default_shape)) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_enumerated_multiarray_with_default(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.TensorType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes, default=(1, 3, 10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "enumeratedShapes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.multiArrayType.enumeratedShapes.shapes: + spec_enumerated_shapes.add(tuple([s for s in enumerated.shape])) + assert spec_default_shape == [1, 3, 10, 30], "Expected default shape to be [1, 3, 10, 10], got {} instead".format(str(spec_default_shape)) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_enumerated_image(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.ImageType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "enumeratedSizes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 10, "expected [H, W] == [10, 10], got [{}, {}] instead".format(spec_H, spec_W) + + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.imageType.enumeratedSizes.sizes: + spec_enumerated_shapes.add(tuple([1, 3, enumerated.height, enumerated.width])) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_enumerated_image_with_default(self): + enumerated_shapes = tuple([(1, 3, 10, 10), (1, 3, 10, 20), (1, 3, 10, 30)]) + input_shape = [ct.ImageType(name="x", shape=ct.EnumeratedShapes(shapes=enumerated_shapes, default=(1, 3, 10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "enumeratedSizes", "Expected enumeratedShapes in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 30, "expected [H, W] == [10, 30], got [{}, {}] instead".format(spec_H, spec_W) + + spec_enumerated_shapes = set() + for enumerated in input_spec[0].type.imageType.enumeratedSizes.sizes: + spec_enumerated_shapes.add(tuple([1, 3, enumerated.height, enumerated.width])) + assert spec_enumerated_shapes == set(enumerated_shapes), "Enumerated shape mismatch" + + def test_mil_ranged_multiarray(self): + input_shape = [ct.TensorType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "shapeRange", "Expected shapeRange in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + ranged_shapes = [(1, 1), (3, 3), (10, 10), (10, 30)] + spec_ranged_shapes = [] + for range_dim in input_spec[0].type.multiArrayType.shapeRange.sizeRanges: + spec_ranged_shapes.append(tuple([range_dim.lowerBound, range_dim.upperBound])) + assert spec_default_shape == [1, 3, 10, 10], "Expected default shape to be [1, 3, 10, 10], got {} instead".format(str(spec_default_shape)) + assert spec_ranged_shapes == ranged_shapes, "Enumerated shape mismatch" + + def test_mil_ranged_multiarray_with_default(self): + input_shape = [ct.TensorType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30, default=20)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("ShapeFlexibility") == "shapeRange", "Expected shapeRange in ShapeFlexibility" + + spec_default_shape = [s for s in input_spec[0].type.multiArrayType.shape] + ranged_shapes = [(1, 1), (3, 3), (10, 10), (10, 30)] + spec_ranged_shapes = [] + for range_dim in input_spec[0].type.multiArrayType.shapeRange.sizeRanges: + spec_ranged_shapes.append(tuple([range_dim.lowerBound, range_dim.upperBound])) + assert spec_default_shape == [1, 3, 10, 20], "Expected default shape to be [1, 3, 10, 20], got {} instead".format(str(spec_default_shape)) + assert spec_ranged_shapes == ranged_shapes, "Enumerated shape mismatch" + + def test_mil_ranged_image(self): + input_shape = [ct.ImageType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "imageSizeRange", "Expected imageSizeRange in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 10, "expected [H, W] == [10, 10], got [{}, {}] instead".format(spec_H, spec_W) + + spec_H_range = [input_spec[0].type.imageType.imageSizeRange.heightRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.heightRange.upperBound] + spec_W_range = [input_spec[0].type.imageType.imageSizeRange.widthRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.widthRange.upperBound] + assert spec_H_range == [10, 10], "Ranged height mismatch" + assert spec_W_range == [10, 30], "Ranged width mismatch" + + def test_mil_ranged_image_with_default(self): + input_shape = [ct.ImageType(name="x", shape=(1, 3, 10, ct.RangeDim(10, 30, default=20)))] + mlmodel = ct.convert(self.basic_network, source="milinternal", convert_to="mlprogram", inputs=input_shape) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 1, "1 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "imageType", "Expected imageType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.imageType.WhichOneof("SizeFlexibility") == "imageSizeRange", "Expected imageSizeRange in ShapeFlexibility" + + spec_H = input_spec[0].type.imageType.height + spec_W = input_spec[0].type.imageType.width + assert spec_H == 10 and spec_W == 20, "expected [H, W] == [10, 20], got [{}, {}] instead".format(spec_H, spec_W) + + spec_H_range = [input_spec[0].type.imageType.imageSizeRange.heightRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.heightRange.upperBound] + spec_W_range = [input_spec[0].type.imageType.imageSizeRange.widthRange.lowerBound, input_spec[0].type.imageType.imageSizeRange.widthRange.upperBound] + assert spec_H_range == [10, 10], "Ranged height mismatch" + assert spec_W_range == [10, 30], "Ranged width mismatch" + +class TestMILDefaultValues: + @mb.program( + input_specs = [ + mb.TensorSpec(shape=[1]), + mb.TensorSpec(shape=[1]) + ]) + def basic_network(x, y): + return mb.add(x=x, y=y, name="output") + + def test_mil_default_value_to_proto(self): + program_input_spec = [ct.TensorType(name="x", shape=[1], default_value=np.array([1.0]).astype(np.float32)), ct.TensorType(name="y", shape=[1])] + mlmodel = ct.convert(self.basic_network, convert_to="mlprogram", inputs=program_input_spec) + input_spec = mlmodel.get_spec().description.input + assert len(input_spec) == 2, "2 input expected, got {} instead".format(len(input_spec)) + assert input_spec[0].name == "x", "input name in MLModel is {}, 'x' is expected".format(input_spec[0].name) + assert input_spec[0].type.WhichOneof("Type") == "multiArrayType", "Expected multiArrayType, got {}".format(input_spec[0].type.WhichOneof("Type")) + assert input_spec[0].type.multiArrayType.WhichOneof("defaultOptionalValue") == "floatDefaultValue", "Expected floatDefaultValue, got {} instead".format(input_spec[0].type.multiArrayType.WhichOneof("defaultOptionalValue")) + assert input_spec[0].type.multiArrayType.floatDefaultValue == 1.0 + + def test_mil_default_value_runtime(self): + program_input_spec = [ct.TensorType(name="x", shape=[1], default_value=np.array([1.0]).astype(np.float32)), ct.TensorType(name="y", shape=[1])] + mlmodel = ct.convert(self.basic_network, convert_to="mlprogram", inputs=program_input_spec) + + if _macos_version() < (12, 0): + # Can only get predictions for ml program on macOS 12+ + return + + res = mlmodel.predict({"x": np.array([3.]), "y": np.array([2.])}) + assert res["output"][0] == 5.0 + + res = mlmodel.predict({"y": np.array([2.])}) + assert res["output"][0] == 3.0 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/load.py new file mode 100644 index 00000000..a2447782 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/load.py @@ -0,0 +1,313 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import coremltools as ct +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil.backend.backend_helper import _get_probability_var_for_classifier +from coremltools.converters.mil.input_types import ( + ColorLayout, + EnumeratedShapes, + ImageType, + RangeDim, + Shape, +) +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, any_variadic, is_symbolic +from coremltools.models import MLModel +from coremltools.models import neural_network as neural_network +from coremltools.models.datatypes import Array +from coremltools.models.neural_network import flexible_shape_utils +from coremltools.models.neural_network.flexible_shape_utils import ( + add_enumerated_image_sizes, + add_multiarray_ndshape_enumeration, + set_multiarray_ndshape_range, +) + +from ..backend_helper import _get_colorspace_enum, _validate_image_input_output_shapes +from .op_mapping import convert_ops + + +def _convert_to_image_input(proto, inputs, skip_model_load=False): + tmp_model = MLModel(proto, skip_model_load=skip_model_load) + for input_type in inputs: + if isinstance(input_type, ImageType): + if input_type.color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + gray_bias = input_type.bias + red_bias, green_bias, blue_bias = 0.0, 0.0, 0.0 + elif input_type.color_layout == ColorLayout.RGB: + gray_bias = 0.0 + red_bias, green_bias, blue_bias = input_type.bias + elif input_type.color_layout == ColorLayout.BGR: + gray_bias = 0.0 + blue_bias, green_bias, red_bias = input_type.bias + tmp_model = neural_network.utils.make_image_input( + tmp_model, + input_type.name, + is_bgr=input_type.color_layout == ColorLayout.BGR, + image_format="NCHW" if input_type.channel_first else "NHWC", + red_bias=red_bias, + green_bias=green_bias, + blue_bias=blue_bias, + gray_bias=gray_bias, + scale=input_type.scale, + ) + return tmp_model.get_spec() + + +def _convert_to_classifier(proto, classifier_config, skip_model_load=False): + tmp_model = MLModel(proto, skip_model_load=skip_model_load) + tmp_model = neural_network.utils.make_nn_classifier( + tmp_model, + classifier_config.class_labels, + classifier_config.predicted_feature_name, + classifier_config.predicted_probabilities_output, + ) + return tmp_model.get_spec() + + +def _set_user_inputs(proto, inputs): + for input_type in inputs: + shape = input_type.shape + if isinstance(shape, EnumeratedShapes): + if isinstance(input_type, ImageType): + default_height, default_width = 0, 0 + for inp in proto.description.input: + if inp.name == input_type.name: + default_height = inp.type.imageType.height + default_width = inp.type.imageType.width + break + image_sizes = [] + if input_type.channel_first: + for s in shape.shapes: + if s.shape[-2] == default_height and s.shape[-1] == default_width: + continue + image_sizes.append( + flexible_shape_utils.NeuralNetworkImageSize( + height=s.shape[-2], width=s.shape[-1] + ) + ) + else: + for s in shape.shapes: + if s.shape[-3] == default_height and s.shape[-2] == default_width: + continue + image_sizes.append( + flexible_shape_utils.NeuralNetworkImageSize( + height=s.shape[-3], width=s.shape[-2] + ) + ) + add_enumerated_image_sizes( + proto, input_type.name, sizes=image_sizes + ) + else: + add_multiarray_ndshape_enumeration( + proto, input_type.name, [tuple(s.shape) for s in shape.shapes] + ) + elif isinstance(shape, Shape): + shape = shape.shape # This is shape in Shape + if all( + [ + not isinstance(s, RangeDim) and not is_symbolic(s) and s > 0 + for s in shape + ] + ): + continue + if isinstance(input_type, ImageType): + img_range = flexible_shape_utils.NeuralNetworkImageSizeRange() + if input_type.channel_first: + H = shape[-2] + W = shape[-1] + else: + H = shape[-3] + W = shape[-2] + + if isinstance(H, RangeDim): + img_range.add_height_range((H.lower_bound, H.upper_bound)) + elif is_symbolic(H): + img_range.add_height_range((1, -1)) + else: + img_range.add_height_range((H, H)) + if isinstance(W, RangeDim): + img_range.add_width_range((W.lower_bound, W.upper_bound)) + elif is_symbolic(W): + img_range.add_width_range((1, -1)) + else: + img_range.add_width_range((W, W)) + + flexible_shape_utils.update_image_size_range( + proto, input_type.name, img_range + ) + else: + lb = [] + ub = [] + for s in shape: + if isinstance(s, RangeDim): + lb.append(s.lower_bound) + ub.append(s.upper_bound) + elif is_symbolic(s): + lb.append(1) + ub.append(-1) + else: + lb.append(s) + ub.append(s) + set_multiarray_ndshape_range( + proto, input_type.name, lower_bounds=lb, upper_bounds=ub + ) + + +def _set_symbolic_inputs(proto, symbolic_inputs): + # Set symbolic input shapes by -1 infered from graph + for input_name, shape in symbolic_inputs.items(): + lb = [1 if is_symbolic(d) else d for d in shape] + ub = [-1 if is_symbolic(d) else d for d in shape] + set_multiarray_ndshape_range( + proto, input_name, lower_bounds=lb, upper_bounds=ub + ) + +def _set_optional_inputs(proto, input_types): + # Set default values for optional input_types + default_map = {} + for input_type in input_types: + if isinstance(input_type, ImageType): + continue + if input_type.default_value is not None: + default_map[input_type.name] = input_type.default_value + + for idx, input in enumerate(proto.description.input): + name = proto.description.input[idx].name + if name in default_map: + default_value = default_map[name] + proto.description.input[idx].type.isOptional = True + array_t = proto.description.input[idx].type.multiArrayType + default_fill_val = default_value.flatten()[0] + array_t.floatDefaultValue = default_fill_val + if default_fill_val != 0 or list(default_value.shape) != \ + array_t.shape: + # promote spec version to 5 and set the default value + proto.specificationVersion = max(proto.specificationVersion, + ct._SPECIFICATION_VERSION_IOS_14) + # array_t.shape is not empty. + array_t.ClearField('shape') + array_t.shape.extend(list(default_value.shape)) + + +@_profile +def load(prog, **kwargs): + if "main" not in prog.functions: + msg = "main function not found in program {}" + raise ValueError(msg.format(prog)) + if len(prog.functions) != 1: + msg = ( + "Program must have exactly one `main` function to " + "convert to NN. Program: {}" + ) + raise ValueError(msg.format(prog)) + + input_types = prog.main_input_types + output_types = prog.main_output_types + + v1_inputs = [] + symbolic_inputs = {} + for name, var in prog.functions["main"].inputs.items(): + if types.is_tensor(var.sym_type): + sym_shape = var.sym_type.get_shape() + if any_variadic(sym_shape): + raise NotImplementedError("Variadic rank is not supported") + if any_symbolic(sym_shape): + user_specified = False + for input_type in input_types: + if name == input_type.name: + sym_shape = input_type.shape.default + user_specified = True + break + # Use dummy static shape, and will set it later. + shape = [1 if is_symbolic(d) else d for d in sym_shape] + if not user_specified: + symbolic_inputs[name] = sym_shape + else: + shape = sym_shape + v1_inputs.append((name, Array(*shape))) + elif types.is_scalar(var.sym_type): + v1_inputs.append((name, Array(1))) + else: + raise NotImplementedError() + + v1_outputs = [] + for var in prog.functions["main"].outputs: + if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type): + # Disregard the output types + v1_outputs.append((var.name, None)) + else: + raise NotImplementedError() + + # create neural network builder + builder = neural_network.NeuralNetworkBuilder( + v1_inputs, + v1_outputs, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + + # const in V2 are added lazily to V1 by each op whenever needed. + # `const_context` stores the const names we've added so far and avoid + # adding a const more than once. + # const_context: list[set of str] (const name for v1 & v2 + # (the same)). Note that in NN in outer layer is visible from the inner + # layer, so the const_context is simply a stack of set. + const_context = [] + # Iterate through ops and add to builder + convert_ops( + const_context, + builder, + prog.functions["main"].operations, + prog.functions["main"].outputs, + ) + + proto = builder.spec + # image input + has_image_input = any([isinstance(s, ImageType) for s in input_types]) + if has_image_input: + proto = _convert_to_image_input(proto, input_types, + skip_model_load=kwargs.get("skip_model_load", False)) + + # image output + if output_types is not None: + assert len(output_types) == len(prog.functions["main"].outputs), \ + "number of mil program outputs do not match the number of outputs provided by the user" + for i, output_proto_desc in enumerate(proto.description.output): + output_var = prog.functions["main"].outputs[i] + if isinstance(output_types[i], ImageType): + if not types.is_tensor(var.sym_type): + raise ValueError("Image output, '{}', is a scalar, but it should be a tensor of rank 4".format( + var.name)) + shape = var.sym_type.get_shape() + if any_variadic(shape): + raise ValueError("Variable rank model outputs, that are ImageTypes, are not supported") + if any([is_symbolic(d) for d in shape]): + raise NotImplementedError("Image output '{}' has symbolic dimensions in its shape". + format(var.name)) + _validate_image_input_output_shapes(output_types[i].color_layout, shape, var.name, is_input=False) + clr_space = _get_colorspace_enum(output_types[i].color_layout) + output_proto_desc.type.imageType.colorSpace = clr_space + output_proto_desc.type.imageType.width = shape[-1] + output_proto_desc.type.imageType.height = shape[-2] + + # classifier flag + classifier_config = kwargs.get("classifier_config", None) + if classifier_config is not None: + # verify that classifier_config.predicted_probabilities_output if its exists. + # And if its empty/None, fill it with the last non const op's output + # this is done in "_get_probability_var_for_classifier()" + probability_var = _get_probability_var_for_classifier(prog, classifier_config) + if classifier_config.predicted_probabilities_output != probability_var.name: + classifier_config.predicted_probabilities_output = probability_var.name + # add classifier related fields to the proto spec + proto = _convert_to_classifier(proto, classifier_config, + skip_model_load=kwargs.get("skip_model_load", False)) + + _set_user_inputs(proto, input_types) + _set_symbolic_inputs(proto, symbolic_inputs) + _set_optional_inputs(proto, input_types) + + return proto diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py new file mode 100644 index 00000000..f892583c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/mil_to_nn_mapping_registry.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +MIL_TO_NN_MAPPING_REGISTRY = {} + +def register_mil_to_nn_mapping(func=None, override=False): + def func_wrapper(_func): + f_name = _func.__name__ + if not override and f_name in MIL_TO_NN_MAPPING_REGISTRY: + raise ValueError("MIL to NN mapping for MIL op {} is already registered.".format(f_name)) + MIL_TO_NN_MAPPING_REGISTRY[f_name] = _func + return _func + + if func is None: + # decorator called without argument + return func_wrapper + return func_wrapper(func) \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/op_mapping.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/op_mapping.py new file mode 100644 index 00000000..f2778f73 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/op_mapping.py @@ -0,0 +1,3837 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + is_symbolic, + is_variadic) +from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type +from coremltools.models import neural_network as neural_network +from coremltools.models.neural_network.quantization_utils import \ + _convert_array_to_nbit_quantized_bytes +from coremltools.proto import NeuralNetwork_pb2 + +from .mil_to_nn_mapping_registry import (MIL_TO_NN_MAPPING_REGISTRY, + register_mil_to_nn_mapping) + + +def convert_ops(const_context, builder, ops, outputs): + """ + const_context: list[set of str]: const name for v1 & v2 (the same) + builder: neural_network.NeuralNetworkBuilder + ops: list[Operation], usually from Block.operations. + outputs: list[Var]. block outputs + """ + + const_context.append(set()) + custom_ops = SSAOpRegistry.custom_ops + for op in _tqdm(ops, desc="Translating MIL ==> NeuralNetwork Ops", unit=" ops"): + if op.op_type in custom_ops: + mapper = MIL_TO_NN_MAPPING_REGISTRY["custom_op"] + elif op.op_type in MIL_TO_NN_MAPPING_REGISTRY: + mapper = MIL_TO_NN_MAPPING_REGISTRY[op.op_type] + else: + msg = ("Op {} is used in the source model. This op is not supported " + "by the NeuralNetwork (compatibility with MacOS < 12, iOS < 15) model " + "type. To successfully convert this model, convert to the ML Program " + "model type (minimum target MacOS 12, iOS 15 and later).\n" + "Use coremltools.convert(..., convert_to=\"mlprogram\") to convert to ML Program.\n" + "block: {}") + raise NotImplementedError(msg.format(op.op_type, op.enclosing_block)) + # const is globally shared in nn. + mapper(const_context, builder, op) + + for ov in outputs: + # If block return value is a const, we need to add it. + if ov.op is None: + continue # placeholder + if ov.op.op_type == "const": + add_const(const_context, builder, ov.name, ov.val) + const_context.pop() + + +def make_input(const_context, builder, variables): + """ + Ensure that variables, if const, are added to builder. + + variables: list[Var] or Var or str. Inputs for an nn layer. + + Returns: + list[str] or str: variables' names. + """ + if isinstance(variables, (list, tuple)): + return [make_input(const_context, builder, v) for v in variables] + if isinstance(variables, str): + return variables + + v = variables # variables is Var + if v.op is not None and v.op.op_type == "const" and v.name not in const_context[-1]: + add_const(const_context, builder, v.name, v.val) + return v.name + + +def _convert_pool(const_context, builder, op, mode, exclude_padding_from_average=True): + num_spatial_dimensions = len(op.kernel_sizes.val) + op_pad = op.pad.val if op.pad_type.val == 'custom' \ + else [0] * num_spatial_dimensions * 2 + + padding_type = op.pad_type.val.upper() + same_padding_asymmetry_mode = "BOTTOM_RIGHT_HEAVY" + if padding_type == "SAME_LOWER": + if num_spatial_dimensions == 3: + msg = "For the neuralnetwork backend, padding_mode ``same_lower`` is not supported for 3d pooling." + raise ValueError(msg) + padding_type = "SAME" + same_padding_asymmetry_mode = "TOP_LEFT_HEAVY" + + if num_spatial_dimensions == 1: + builder.add_expand_dims( + name=op.name + "_expanded", + input_name=op.x.name, + output_name=op.name + "_expanded", + axes=[-2], + ) + # nn's add_pool function does not support CUSTOM padding, + # but VALID padding supports user-defined padding amounts. + # Therefore we map CUSTOM padding to VALID padding. + padding_type = "VALID" if padding_type == "CUSTOM" else padding_type + builder.add_pooling( + name=op.name, + height=1, + width=op.kernel_sizes.val[-1], + stride_height=1, + stride_width=op.strides.val[-1], + layer_type=mode.upper(), + padding_type="INCLUDE_LAST_PIXEL" if op.ceil_mode.val else padding_type, + input_name=make_input(const_context, builder, op.name + "_expanded"), + output_name=op.name + "_pool", + exclude_pad_area=exclude_padding_from_average, + padding_top=0, + padding_bottom=0, + padding_left=op_pad[0], + padding_right=op_pad[1], + is_global=False, + same_padding_asymmetry_mode=same_padding_asymmetry_mode, + ) + builder.add_squeeze( + name=op.name + "_squeeze", + input_name=op.name + "_pool", + output_name=op.outputs[0].name, + axes=[-2], + ) + elif num_spatial_dimensions == 2: + # nn's add_pool function does not support CUSTOM padding, + # but VALID padding supports user-defined padding amounts. + # Therefore we map CUSTOM padding to VALID padding. + padding_type = "VALID" if padding_type == "CUSTOM" else padding_type + builder.add_pooling( + name=op.name, + height=op.kernel_sizes.val[-2], + width=op.kernel_sizes.val[-1], + stride_height=op.strides.val[-2], + stride_width=op.strides.val[-1], + layer_type=mode.upper(), + padding_type="INCLUDE_LAST_PIXEL" if op.ceil_mode.val else padding_type, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + exclude_pad_area=exclude_padding_from_average, + padding_top=op_pad[0], + padding_bottom=op_pad[1], + padding_left=op_pad[2], + padding_right=op_pad[3], + is_global=False, + same_padding_asymmetry_mode=same_padding_asymmetry_mode, + ) + elif num_spatial_dimensions == 3: + builder.add_pooling3d( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + pooling_type=mode.upper(), + kernel_depth=op.kernel_sizes.val[-3], + kernel_height=op.kernel_sizes.val[-2], + kernel_width=op.kernel_sizes.val[-1], + stride_depth=op.strides.val[-3], + stride_height=op.strides.val[-2], + stride_width=op.strides.val[-1], + padding_mode=op.pad_type.val, + custom_padding_front=op_pad[0], + custom_padding_back=op_pad[1], + custom_padding_top=op_pad[2], + custom_padding_bottom=op_pad[3], + custom_padding_left=op_pad[4], + custom_padding_right=op_pad[5], + average_pooling_count_excludes_padding=exclude_padding_from_average, + ) + else: + raise ValueError( + "Unsupported number of spatial dimensions. Maximum is 3, but got %s" + % num_spatial_dimensions + ) + + +def _try_convert_global_pool(const_context, builder, op, mode): + """ + Optional performance optimization pass that tries to lower spatial + reduce_mean / reduce_max to global_avg_pool / global_max_pool. + Return True if the lowering happened, otherwise return False to + continue as normal reduction op. + """ + rank = op.x.rank + if is_variadic(rank) or rank not in {4, 5}: + return False + keep_dims = op.keep_dims.val + if keep_dims is False: + return False + + axes = None + if op.axes is not None and op.axes.val is not None: + axes = op.axes.val + else: + axes = list(range(rank)) + + if tuple(op.outputs[0].shape[:-2]) != tuple(op.inputs["x"].shape[:-2]): + return False + if not all([s == 1 for s in op.outputs[0].shape[-2:]]): + return False + + builder.add_pooling( + name=op.name, + height=0, + width=0, + stride_height=0, + stride_width=0, + layer_type=mode.upper(), + padding_type="valid".upper(), + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + is_global=True, + ) + return True + + +def add_const(const_context, builder, name, val): + """ + const_context (list of set of str): const names added to v1 builder. Const names are + identical between v2 and v1 + + name (str): name of const. Should be the same for v1 and v2. + val: np.ndarray + + No return values as `name` is the name of const in v1. + + Comment: we don't need to add scalar const as they are just fields in + layer proto message in NN. + If we really need a const scalar, we upcast it to rank-1. + + """ + for const_set in const_context: + if name in const_set: + logger.warning("Const {} was already added.".format(name)) + return + if not isinstance(val, (_np.ndarray, _np.generic)): + val = _np.array([val]) + if val.dtype != float: + # nn proto only supports float32 activation. (e.g., pred in cond op + # needs to be converted to float) + val = val.astype(float) + rank = len(val.shape) + if rank == 0: + builder.add_load_constant_nd( + name=name, output_name=name, constant_value=val.reshape([1]), shape=[1] + ) + else: + builder.add_load_constant_nd( + name=name, output_name=name, constant_value=val, shape=val.shape + ) + const_context[-1].add(name) + logger.info("added const {} for builder {}".format(name, builder)) + + +# Helper routines for recurrent layers +def _expand_dim(builder, node_name, input_name, axes): + builder.add_expand_dims( + name=node_name, input_name=input_name, output_name=node_name, axes=axes + ) + + +def _squeeze(builder, node_name, input_name, axes): + builder.add_squeeze( + name=node_name, input_name=input_name, output_name=node_name, axes=axes + ) + + +def _split(x, sections, axis=0): + if x is None: + return None + if x.shape[axis] % sections != 0: + raise ValueError( + "Cannot split axis {} into {} sections for input of shape {}".format( + axis, sections, x.shape + ) + ) + return _np.split(x, sections, axis=axis) + + +@register_mil_to_nn_mapping +def avg_pool(const_context, builder, op): + _convert_pool( + const_context=const_context, + builder=builder, + op=op, + mode="average", + exclude_padding_from_average=op.exclude_padding_from_average.val, + ) + + +@register_mil_to_nn_mapping +def band_part(const_context, builder, op): + builder.add_matrix_band_part( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + num_lower=op.lower.val, + num_upper=op.upper.val, + ) + + +@register_mil_to_nn_mapping +def batch_norm(const_context, builder, op): + channels = op.x.shape[1] + gamma = _np.array([1.0] * channels) if op.gamma is None else op.gamma.val + beta = _np.array([0.0] * channels) if op.beta is None else op.beta.val + + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + is_batchnorm_1d = op.x.rank == 3 + is_batchnorm_2d = op.x.rank == 4 + is_batchnorm_3d = op.x.rank == 5 + + if is_batchnorm_1d: + x_name = op.name + "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2], + ) + out_name += "_batch_norm" + + if is_batchnorm_1d or is_batchnorm_2d: + # batch norm 1d / 2d + builder.add_batchnorm( + name=op.name, + channels=channels, + gamma=gamma, + beta=beta, + mean=op.mean.val, + variance=op.variance.val, + input_name=x_name, + output_name=out_name, + compute_mean_var=False, + instance_normalization=False, + epsilon=op.epsilon.val, + ) + elif is_batchnorm_3d: + # batch norm 3d + batch_size, channel, height, width, depth = op.x.shape + assert not is_symbolic(channel), "Channel dimension must be known for batchnorm layer." + symbolic_num = sum([is_symbolic(x) for x in op.x.shape]) + + if symbolic_num > 1: + gamma_expand = _np.expand_dims(gamma, axis=(0, 2, 3, 4)) + beta_expand = _np.expand_dims(beta, axis=(0, 2, 3, 4)) + mean_expand = _np.expand_dims(op.mean.val, axis=(0, 2, 3, 4)) + var_expand = _np.expand_dims(op.variance.val, axis=(0, 2, 3, 4)) + + # compute batch norm 3d by decomposing it into elementwise operations + negative_mean_name = op.name + "_negative_mean" + add_const(const_context, builder, negative_mean_name, -mean_expand) + + numerator_name = op.name + "_numerator" + builder.add_add_broadcastable( + name=numerator_name, + input_names=[x_name, negative_mean_name], + output_name=numerator_name, + ) + + var_expand = var_expand + op.epsilon.val + denominator = _np.sqrt(var_expand) + gamma_expand = gamma_expand / denominator + gamma_name = op.name + "_gamma" + add_const(const_context, builder, gamma_name, gamma_expand) + + mul_name = op.name + "_mul" + builder.add_multiply_broadcastable( + name=mul_name, + input_names=[numerator_name, gamma_name], + output_name=mul_name, + ) + + beta_name = op.name + "_beta" + add_const(const_context, builder, beta_name, beta_expand) + + builder.add_add_broadcastable( + name=out_name, + input_names=[mul_name, beta_name], + output_name=out_name, + ) + else: + is_batch_symbloic = is_symbolic(batch_size) + is_height_symbolic = is_symbolic(height) + is_width_symbolic = is_symbolic(width) + is_depth_symbolic = is_symbolic(depth) + + if is_batch_symbloic: + shape1 = [-1, channel, height * width, depth] + shape2 = [-1, channel, height, width, depth] + + elif is_height_symbolic: + shape1 = [batch_size, channel, -1, width*depth] + shape2 = [batch_size, channel, -1, width, depth] + + elif is_width_symbolic: + shape1 = [batch_size, channel, -1, height*depth] + shape2 = [batch_size, channel, height, -1, depth] + + elif is_depth_symbolic: + shape1 = [batch_size, channel, height * width, -1] + shape2 = [batch_size, channel, height, width, -1] + + else: + shape1 = [batch_size, channel, height*width, depth] + shape2 = [batch_size, channel, height, width, depth] + + reshape_4d_name = op.name + "_reshape_4d" + builder.add_reshape_static( + name=reshape_4d_name, + input_name=x_name, + output_name=reshape_4d_name, + output_shape=shape1, + ) + + batchnorm_name = op.name + "_batchnorm_4d" + builder.add_batchnorm( + name=batchnorm_name, + channels=channels, + gamma=gamma, + beta=beta, + mean=op.mean.val, + variance=op.variance.val, + input_name=reshape_4d_name, + output_name=batchnorm_name, + compute_mean_var=False, + instance_normalization=False, + epsilon=op.epsilon.val, + ) + + builder.add_reshape_static( + name=out_name, + input_name=batchnorm_name, + output_name=out_name, + output_shape=shape2, + ) + + # Squeeze added `Width` dimension for 1d case + if is_batchnorm_1d: + x_name = op.name + "_squeeze" + builder.add_squeeze( + name=x_name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + +@register_mil_to_nn_mapping +def const(const_context, builder, op): + # const in V2 are added to V1 lazily. + pass + + +def conv_helper(const_context, builder, op): + # v2 x: (n, C_in/groups, spatial_dims) + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + is_conv1d = op.x.rank == 3 + is_conv2d = op.x.rank == 4 + is_conv3d = op.x.rank == 5 + if not (is_conv1d or is_conv2d or is_conv3d): + raise ValueError( + "Input tensor rank '{}' is not one of '{}'.".format(op.x.rank, (3, 4, 5),) + ) + if is_conv1d: + x_name = op.name + "_expand_dim" + out_name += "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2], + ) + # `x_name` is guaranteed to be (n, C_in/groups, spatial_dims) for 1D and 2D convolution + # W_v1 wil be np.ndarray (if W is const at compile time) or None + # (if W is not known at compile time). + weights = None + input_names = [x_name] + if op.weight.val is not None: + # v2 convolution (conv3d) expects weights to have shape (C_out, C_in/groups, spatial_dims) + # v1 convolution expects (H, W, C_in/groups, C_out) or (D, H, W, C_in/groups, C_out) + weights = op.weight.val + if is_conv1d: + weights = _np.expand_dims(op.weight.val, -2) + if is_conv1d or is_conv2d: + weights = _np.transpose(weights, [2, 3, 1, 0]) + else: + # op.weight is not const at compile time. + # When weight is dynamic, v1 convolution expects weight to be + # (C_out, C_in/groups, H, W) + # TODO 3D convolution doesn't support dynamic weights: + if is_conv3d: + raise ValueError("3D Convolution doesn't support dynamic weights.") + weights_name = op.weight.name + if is_conv1d: + weights_name += "_expand_dim" + builder.add_expand_dims( + name=weights_name, + input_name=op.weight.name, + output_name=weights_name, + axes=[-2], + ) + input_names.append(weights_name) + + # padding + padding_mode = op.pad_type.val + pad = {} + if padding_mode == "custom": + if is_conv1d: + padding_mode = "valid" + pad["padding_top"] = 0 + pad["padding_bottom"] = 0 + pad["padding_left"] = op.pad.val[0] + pad["padding_right"] = op.pad.val[1] + elif is_conv2d: + padding_mode = "valid" + pad["padding_top"] = op.pad.val[0] + pad["padding_bottom"] = op.pad.val[1] + pad["padding_left"] = op.pad.val[2] + pad["padding_right"] = op.pad.val[3] + else: + pad["padding_front"] = op.pad.val[0] + pad["padding_back"] = op.pad.val[1] + pad["padding_top"] = op.pad.val[2] + pad["padding_bottom"] = op.pad.val[3] + pad["padding_left"] = op.pad.val[4] + pad["padding_right"] = op.pad.val[5] + + same_padding_asymmetry_mode = "BOTTOM_RIGHT_HEAVY" + if padding_mode == "same_lower": + if is_conv3d: + msg = "For the neuralnetwork backend, padding_mode ``same_lower`` is not supported for conv 3d." + raise ValueError(msg) + padding_mode = "same" + same_padding_asymmetry_mode = "TOP_LEFT_HEAVY" + + has_bias = op.bias is not None + groups = op.groups.val + + strides = op.strides.val.tolist() + dilations = op.dilations.val.tolist() + if is_conv1d: + dilations = dilations[:-1] + [1] + dilations[-1:] + strides = strides[:-1] + [1] + strides[-1:] + + if weights is not None and op.op_type == "conv_quantized": + nbits = op.nbits.val + weights = _convert_array_to_nbit_quantized_bytes(weights.flatten(), nbits).tobytes() + quantization_type = op.quantization_type.val + quant_bias = op.quant_bias.val + quant_scale = op.quant_scale.val + else: + quantization_type = None + nbits = None + quant_bias = None + quant_scale = None + + if is_conv1d or is_conv2d: + if weights is None and has_bias: + # weights are dyanmic. + # In this case, bias, if present, cannot be part of the conv op + # it needs to be added separately via an add op + out_name += "_without_bias" + + if weights is None and groups > 1: + raise NotImplementedError("Convolution with dynamic weights and groups > 1 is not supported on the " + "neuralnetwork backend. Please use the mlprogram backend " + "(convert_to=\"mlprogram\")") + + builder.add_convolution( + name=out_name, + kernel_channels=op.weight.shape[1], + output_channels=op.weight.shape[0], + height= 1 if is_conv1d else op.weight.shape[2], + width= op.weight.shape[2] if is_conv1d else op.weight.shape[3], + stride_height=strides[0], + stride_width=strides[1], + border_mode=padding_mode, + same_padding_asymmetry_mode=same_padding_asymmetry_mode, + groups=groups, + W=weights, + b=op.bias.val if has_bias and weights is not None else None, + has_bias=has_bias if weights is not None else False, + is_deconv=False, + input_name=input_names, + output_name=out_name, + dilation_factors=dilations, + quantization_type=quantization_type, + nbits=nbits, + quant_bias=quant_bias, + quant_scale=quant_scale, + **pad # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad` + ) + + # add bias if weights are dynamic + if weights is None and has_bias: + Cout = op.weight.shape[0] + assert op.bias.val.size == Cout, \ + "size of bias for convolution must be same as the number of output channels" + builder.add_load_constant_nd( + name=op.name + '_constant_bias', output_name=op.name + "_constant_bias", + constant_value=op.bias.val.reshape((Cout, 1, 1)), shape=(Cout, 1, 1) + ) + add_op_output_name = op.name + "_with_bias" if is_conv1d else op.outputs[0].name + builder.add_add_broadcastable( + name=add_op_output_name, + input_names=[out_name, op.name + "_constant_bias"], + output_name=add_op_output_name, + ) + if is_conv1d: + out_name = add_op_output_name + + # Squeeze added `Width` dimension for 1d case + if is_conv1d: + x_name = op.name + "expand_dim" + builder.add_squeeze( + name=op.name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + + if is_conv3d: + builder.add_convolution3d( + name=op.name, + input_channels=op.weight.shape[1] * groups, + output_channels=op.weight.shape[0], + depth=op.weight.shape[2], + height=op.weight.shape[3], + width=op.weight.shape[4], + W=op.weight.val, + b=op.bias.val if has_bias else None, + has_bias=has_bias, + groups=groups, + stride_depth=strides[0], + stride_height=strides[1], + stride_width=strides[2], + dilation_depth=dilations[0], + dilation_height=dilations[1], + dilation_width=dilations[2], + padding_mode=padding_mode, + is_deconv=False, + output_shape=None, + input_name=input_names, + output_name=out_name, + **pad # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad` + ) + +@register_mil_to_nn_mapping +def conv(const_context, builder, op): + conv_helper(const_context, builder, op) + + +@register_mil_to_nn_mapping() +def conv_quantized(const_context, builder, op): + conv_helper(const_context, builder, op) + + +@register_mil_to_nn_mapping +def cumsum(const_context, builder, op): + input_names = make_input(const_context, builder, [op.x]) + builder.add_cumsum( + name=op.name, + input_names=input_names, + output_name=op.outputs[0].name, + axis=op.axis.val, + reverse=op.reverse.val, + exclusive=op.exclusive.val, + ) + + +def _add_elementwise_unary( + const_context, builder, op, mode, output_name=None, **kwargs +): + output_name = output_name if output_name else op.outputs[0].name + name = output_name if output_name else op.name + if mode in ["sqrt", "rsqrt", "inverse", "power", "exp", "log", "abs", "threshold"]: + builder.add_unary( + name=name, + input_name=make_input(const_context, builder, op.x), + output_name=output_name, + mode=mode, + **kwargs + ) + else: + add_func = getattr(builder, "add_" + mode, None) + if add_func is None: + logger.error( + "Elementwise unary method {} not found in builder.".format(mode) + ) + add_func( + name=name, + input_name=make_input(const_context, builder, op.x), + output_name=output_name, + **kwargs + ) + + +def _add_elementwise_binary( + const_context, builder, op, mode, output_name=None, **kwargs +): + output_name = output_name if output_name else op.outputs[0].name + name = output_name if output_name else op.name + if mode in ["add", "multiply"]: + params = {"name": name, "output_name": output_name, "mode": mode.upper()} + if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val): + params["input_names"] = make_input(const_context, builder, [op.y]) + val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + builder.add_elementwise(**params) + return + elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val): + params["input_names"] = make_input(const_context, builder, [op.x]) + val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + builder.add_elementwise(**params) + return + elif mode in ["equal", "not_equal"]: + add_func = getattr(builder, "add_" + mode, None) + params = {"name": name, "output_name": output_name} + if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val): + params["input_names"] = make_input(const_context, builder, [op.y]) + val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + add_func(**params) + return + elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val): + params["input_names"] = make_input(const_context, builder, [op.x]) + val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + add_func(**params) + return + elif mode in ["greater_than", "greater_equal", "less_than", "less_equal"]: + params = {"name": name, "output_name": output_name} + if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val): + params["input_names"] = make_input(const_context, builder, [op.y]) + val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + if "less" in mode: + params["use_greater_than_equal"] = mode.endswith("_equal") + builder.add_greater_than(**params) + elif "greater" in mode: + params["use_less_than_equal"] = mode.endswith("_equal") + builder.add_less_than(**params) + return + elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val): + params["input_names"] = make_input(const_context, builder, [op.x]) + val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32) + params["alpha"] = np_val_to_py_type(val) + if "greater" in mode: + params["use_greater_than_equal"] = mode.endswith("_equal") + builder.add_greater_than(**params) + elif "less" in mode: + params["use_less_than_equal"] = mode.endswith("_equal") + builder.add_less_than(**params) + return + + if op.x.can_be_folded_to_const(): + add_const(const_context, builder, op.x.name, op.x.val) + if op.y.can_be_folded_to_const(): + if mode == "pow": + _add_elementwise_unary( + const_context, + builder, + op, + "power", + output_name=output_name, + alpha=op.y.val, + ) + return + add_const(const_context, builder, op.y.name, op.y.val) + + if mode in {"add", "multiply", "max", "min"} and op.x.shape == op.y.shape: + builder.add_elementwise( + name=name, + input_names=make_input(const_context, builder, [op.x, op.y]), + output_name=output_name, + mode=mode.upper(), + ) + return + + # the broadcast feature in the elementwise layer is hardcoded to 4D or less + # for the 5d tensor, we need to use broadcasable layers instead. + if mode in {"add", "multiply", "subtract"} and op.x.rank < 5 and op.y.rank < 5: + shape_x = _np.array([1] * (5 - op.x.rank) + list(op.x.shape)) + shape_y = _np.array([1] * (5 - op.y.rank) + list(op.y.shape)) + + internal_x = internal_y = None + if all(shape_x == 1): + internal_y = op.x + internal_x = op.y + elif all(shape_y == 1): + internal_x = op.x + internal_y = op.y + + for indices in ([1], [2], [3, 4], [2, 3, 4], [1, 2, 3, 4]): + if indices == [1, 2, 3, 4] and mode == "multiply": + # INTERNAL_MUL_XYKN not implemented + continue + if all(shape_x[indices] == shape_y[indices]): + if all([True if i in indices else s == 1 for i, s in enumerate(shape_x)]): + internal_y = op.x + internal_x = op.y + break + if all([True if i in indices else s == 1 for i, s in enumerate(shape_y)]): + internal_x = op.x + internal_y = op.y + break + + if internal_x is not None: + if mode in {"add", "multiply"}: + builder.add_elementwise( + name=name, + input_names=make_input(const_context, builder, [internal_x, internal_y]), + output_name=output_name, + mode=mode.upper(), + ) + elif mode == "subtract": + builder.add_activation( + name="_neg_y_" + name, + input_name=make_input(const_context, builder, op.y), + output_name="_neg_y_" + output_name, + non_linearity="LINEAR", + params=[-1, 0]) + if op.x == internal_y: + internal_x = "_neg_y_" + output_name + else: + internal_y = "_neg_y_" + output_name + builder.add_elementwise( + name=name, + input_names=make_input(const_context, builder, [internal_x, internal_y]), + output_name=output_name, + mode="ADD", + ) + return + + if mode in {"add", "multiply", "max", "min"}: + add_func = getattr(builder, "add_" + mode + "_broadcastable", None) + + if add_func is None: + msg = "Element-wise binary method {} not found in builder." + raise ValueError(msg.format(mode)) + + add_func( + name=name, + input_names=make_input(const_context, builder, [op.x, op.y]), + output_name=output_name, + **kwargs + ) + else: + if mode in ["divide", "floor_div", "mod", "pow", "subtract"]: + add_func = getattr(builder, "add_" + mode + "_broadcastable", None) + elif mode == "less_equal": + add_func = builder.add_less_than + kwargs["use_less_than_equal"] = True + elif mode == "greater_equal": + add_func = builder.add_greater_than + kwargs["use_greater_than_equal"] = True + else: + add_func = getattr(builder, "add_" + mode, None) + + if add_func is None: + msg = "Element-wise binary method {} not found in builder." + raise ValueError(msg.format(mode)) + + add_func( + name=name, + input_names=make_input(const_context, builder, [op.x, op.y]), + output_name=output_name, + **kwargs + ) + + +def _add_logical(const_context, builder, op, mode): + input_names = [] + input_names.append(make_input(const_context, builder, op.x)) + if mode != "NOT": + input_names.append(make_input(const_context, builder, op.y)) + + builder.add_logical( + name=op.name, input_names=input_names, output_name=op.outputs[0].name, mode=mode + ) + + +@register_mil_to_nn_mapping +def abs(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "abs") + + +@register_mil_to_nn_mapping +def acos(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "acos") + + +@register_mil_to_nn_mapping +def add(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "add") + + +@register_mil_to_nn_mapping +def asin(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "asin") + + +@register_mil_to_nn_mapping +def atan(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "atan") + + +@register_mil_to_nn_mapping +def atanh(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "atanh") + + +@register_mil_to_nn_mapping +def cast(const_context, builder, op): + if op.dtype.val in ["int32", "int64"]: + _add_elementwise_unary( + const_context, builder, op, "floor", output_name=op.name + "_floor" + ) + _add_elementwise_unary( + const_context, builder, op, "ceil", output_name=op.name + "_ceil" + ) + + builder.add_greater_than( + name=op.name + "_cond", + input_names=[make_input(const_context, builder, op.x)], + output_name=op.name + "_cond", + alpha=0.0, + ) + + builder.add_where_broadcastable( + name=op.name, + input_names=[op.name + i for i in ["_cond", "_floor", "_ceil"]], + output_name=op.outputs[0].name, + ) + elif op.dtype.val in ["fp16", "fp32", "fp64"]: + builder.add_activation( + name=op.name, + non_linearity="LINEAR", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[1.0, 0.0], + ) + elif op.dtype.val == "bool": + builder.add_not_equal( + name=op.name, + input_names=op.x.name, + output_name=op.outputs[0].name, + alpha=0.0, + ) + else: + raise NotImplementedError( + "Parameter dtype of the cast operation can be one of the {}. " + "Provided {}".format(["int32", "int64", "fp16", "fp32", "fp64"], op.dtype.val) + ) + + +@register_mil_to_nn_mapping +def ceil(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "ceil") + + +@register_mil_to_nn_mapping +def clip(const_context, builder, op): + _add_elementwise_unary( + const_context, + builder, + op, + "clip", + min_value=op.alpha.val, + max_value=op.beta.val, + ) + + +@register_mil_to_nn_mapping +def cos(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "cos") + + +@register_mil_to_nn_mapping +def cosh(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "cosh") + +@register_mil_to_nn_mapping +def einsum(const_context, builder, op): + ''' + MIL einsum is either + - (B,C,H,W1) * (B,W1,H,W2) = (B,C,H,W2) + or + - (C,H,W1) * (W1,H,W2) = (C,H,W2) + + Hence to support it, first transpose the 2 inputs, so that the matrices + to be multiplied are on the last 2 axes, + then call bmm, and finally transpose the result again + ''' + rank = op.values[0].rank + perm = [0, 2, 1, 3] if rank == 4 else [1, 0, 2] + input_names = make_input(const_context, builder, op.values) + + output_name_1 = op.name + "_transpose_1" + output_name_2 = op.name + "_transpose_2" + builder.add_transpose(name=op.name + "_transpose_x", + axes=perm, + input_name=input_names[0], + output_name=output_name_1 + ) + builder.add_transpose(name=op.name + "_transpose_y", + axes=perm, + input_name=input_names[1], + output_name=output_name_2 + ) + builder.add_batched_mat_mul( + name=op.name + "_batch_matmul", + input_names=[output_name_1, output_name_2], + output_name=op.outputs[0].name + "_pre_transpose" + ) + builder.add_transpose(name=op.name, + axes=perm, + input_name=op.outputs[0].name + "_pre_transpose", + output_name=op.outputs[0].name + ) + + +@register_mil_to_nn_mapping +def equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "equal") + + +@register_mil_to_nn_mapping +def exp(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "exp") + + +@register_mil_to_nn_mapping +def exp2(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "exp2") + + +@register_mil_to_nn_mapping +def floor(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "floor") + + +@register_mil_to_nn_mapping +def floor_div(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "floor_div") + + +@register_mil_to_nn_mapping +def greater(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "greater_than") + + +@register_mil_to_nn_mapping +def greater_equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "greater_equal") + + +@register_mil_to_nn_mapping +def inverse(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "inverse", epsilon=op.epsilon.val) + + +@register_mil_to_nn_mapping +def less(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "less_than") + + +@register_mil_to_nn_mapping +def less_equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "less_equal") + + +@register_mil_to_nn_mapping +def log(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "log", epsilon=op.epsilon.val) + + +@register_mil_to_nn_mapping +def logical_and(const_context, builder, op): + _add_logical(const_context, builder, op, "AND") + + +@register_mil_to_nn_mapping +def logical_not(const_context, builder, op): + _add_logical(const_context, builder, op, "NOT") + + +@register_mil_to_nn_mapping +def logical_or(const_context, builder, op): + _add_logical(const_context, builder, op, "OR") + + +@register_mil_to_nn_mapping +def logical_xor(const_context, builder, op): + _add_logical(const_context, builder, op, "XOR") + + +@register_mil_to_nn_mapping +def maximum(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "max") + + +@register_mil_to_nn_mapping +def minimum(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "min") + + +@register_mil_to_nn_mapping +def mod(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "mod") + + +@register_mil_to_nn_mapping +def mul(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "multiply") + + +@register_mil_to_nn_mapping +def not_equal(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "not_equal") + + +@register_mil_to_nn_mapping +def pow(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "pow") + + +@register_mil_to_nn_mapping +def real_div(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "divide") + + +@register_mil_to_nn_mapping +def round(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "round") + + +@register_mil_to_nn_mapping +def rsqrt(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "rsqrt", epsilon=op.epsilon.val) + + +@register_mil_to_nn_mapping +def sign(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sign") + + +@register_mil_to_nn_mapping +def sin(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sin") + + +@register_mil_to_nn_mapping +def sinh(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sinh") + + +@register_mil_to_nn_mapping +def slice_by_index(const_context, builder, op): + rank = op.x.rank + stride = [1] * rank if op.stride is None else op.stride.val + begin_mask = [False] * rank if op.begin_mask is None else op.begin_mask.val + end_mask = [False] * rank if op.end_mask is None else op.end_mask.val + squeeze_mask = [False] * rank if op.squeeze_mask is None else op.squeeze_mask.val + + if op.begin.val is not None and op.end.val is not None: + + # If only one dimension is sliced, we should use the slice layer instead of static_slice or dynamic_slice + # In general, slice has a better performance. + begin = op.begin.val + end = op.end.val + slice_dim = [] + + for i in range(rank): + if (not begin_mask[i] and begin[i] != 0) or \ + (not end_mask[i] and end[i] != op.x.shape[i]) or \ + stride[i] != 1: + slice_dim.append(i) + + if len(slice_dim) == 1 and not any(squeeze_mask): + dim = slice_dim[0] - rank + if dim in [-3, -2, -1]: + # get the axis, only channel, width, and depth dimension are supported + axis = None + if dim == -1: + axis = "width" + elif dim == -2: + axis = "height" + elif dim == -3: + axis = "channel" + + start_index = 0 if begin_mask[dim] else begin[dim] + end_index = op.x.shape[dim] if end_mask[dim] else end[dim] + shape = op.x.shape + + if not is_symbolic(shape[dim]): + if start_index < 0: + start_index += shape[dim] + + if not is_symbolic(end_index) and start_index >= 0 and stride[dim] >= 1: + builder.add_slice( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=axis, + start_index=start_index, + end_index=end_index, + stride=stride[dim], + ) + return + + # use add_slice_static + builder.add_slice_static( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + begin_ids=op.begin.val, + end_ids=op.end.val, + strides=np_val_to_py_type(stride), + begin_masks=np_val_to_py_type(begin_mask), + end_masks=np_val_to_py_type(end_mask), + squeeze_masks=np_val_to_py_type(squeeze_mask), + ) + else: + builder.add_slice_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.begin, op.end]), + output_name=op.outputs[0].name, + strides=np_val_to_py_type(stride), + begin_masks=np_val_to_py_type(begin_mask), + end_masks=np_val_to_py_type(end_mask), + squeeze_masks=np_val_to_py_type(squeeze_mask), + ) + + +@register_mil_to_nn_mapping +def slice_by_size(const_context, builder, op): + """ + If the inputs satisfy + 1. op.x has static input shape for those dimension whose size is not -1 + 2. op.begin and op.size are both known during compile time + we use add_slice_static directly + + Otherwise, build a block of ops achieving slice_by_size with dynamic input x and size. + """ + + # The static case + if op.begin.val is not None and op.size.val is not None: + begin = op.begin.val + size = op.size.val + rank = op.x.rank + end = [] + + for i in range(rank): + if size[i] == -1: + end.append(op.x.shape[i]) + else: + end.append(begin[i] + size[i]) + + if not any_symbolic(end): + builder.add_slice_static( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + begin_ids=begin, + end_ids=end, + strides=[1] * rank, + begin_masks=[False] * rank, + end_masks=[False] * rank, + squeeze_masks=[False] * rank, + ) + return + + # The dynamic case + # get the end_index of input x + # for instance, x with shape [2,3,4] results in [2,3,4] + end_index_name = op.name + "_end_index" + builder.add_get_shape( + name=end_index_name, + input_name=make_input(const_context, builder, op.x), + output_name=end_index_name, + ) + + # get the mask where size = -1 + # for instance, size = [-1,1,2] results in [1,0,0] + const_name = op.name + "_const_name" + add_const(const_context, builder, const_name, _np.array([-1] * op.x.rank)) + + is_end_mask_name = op.name + "_is_end_mask" + builder.add_equal( + name=is_end_mask_name, + input_names=make_input(const_context, builder, [const_name, op.size]), + output_name=is_end_mask_name, + ) + + # get the mask where size != -1 + # for instance, size = [-1,1,2] results in [0,1,1] + is_not_end_mask_name = op.name + "_is_not_end_mask" + builder.add_not_equal( + name=is_not_end_mask_name, + input_names=make_input(const_context, builder, [const_name, op.size]), + output_name=is_not_end_mask_name, + ) + + # get the end index for dimensions i where size[i] = -1 + # for size[i] != -1, just make it 0 + # for instance, x with shape [2,3,4] and size = [-1,1,2] + # results in [2,0,0] + end_index_with_mask_name = op.name + "_end_index_with_mask" + builder.add_elementwise( + name=end_index_with_mask_name, + input_names=[end_index_name, is_end_mask_name], + output_name=end_index_with_mask_name, + mode="MULTIPLY", + ) + + # get the end index for dimension i where size[i] != -1 + # for size[i] = 1, just make it 0 + # for instance, x with shape [2,3,4], size = [-1,1,2], + # begin = [0,1,1] results in [0,2,3] + end_ids = op.name + "_end_ids" + builder.add_elementwise( + name=end_ids, + input_names=make_input(const_context, builder, [op.begin, op.size]), + output_name=end_ids, + mode="ADD", + ) + + end_index_without_mask_name = op.name + "_end_index_without_mask" + builder.add_elementwise( + name=end_index_without_mask_name, + input_names=make_input(const_context, builder, [is_not_end_mask_name, end_ids]), + output_name=end_index_without_mask_name, + mode="MULTIPLY", + ) + + # add two end index array together to get the final index + final_end_index_name = op.name + "_final_index" + builder.add_elementwise( + name=final_end_index_name, + input_names=make_input( + const_context, + builder, + [end_index_with_mask_name, end_index_without_mask_name], + ), + output_name=final_end_index_name, + mode="ADD", + ) + + input_names = make_input( + const_context, builder, [op.x, op.begin, final_end_index_name] + ) + builder.add_slice_dynamic( + name=op.name, input_names=input_names, output_name=op.outputs[0].name + ) + + +@register_mil_to_nn_mapping +def sqrt(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "sqrt") + + +@register_mil_to_nn_mapping +def square(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "power", alpha=2.0) + + +@register_mil_to_nn_mapping +def sub(const_context, builder, op): + _add_elementwise_binary(const_context, builder, op, "subtract") + + +@register_mil_to_nn_mapping +def tan(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "tan") + + +@register_mil_to_nn_mapping +def threshold(const_context, builder, op): + _add_elementwise_unary(const_context, builder, op, "threshold", alpha=op.alpha.val) + + +@register_mil_to_nn_mapping +def depth_to_space(const_context, builder, op): + builder.add_reorganize_data( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="DEPTH_TO_SPACE", + block_size=op.block_size.val, + ) + + +@register_mil_to_nn_mapping +def expand_dims(const_context, builder, op): + builder.add_expand_dims( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axes=op.axes.val, + ) + + + +@register_mil_to_nn_mapping +def fill(const_context, builder, op): + if op.shape.val is None: + builder.add_fill_dynamic( + name=op.name, + input_name=make_input(const_context, builder, op.shape), + output_name=op.outputs[0].name, + value=op.value.val, + ) + else: + builder.add_fill_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + value=op.value.val, + ) + + +@register_mil_to_nn_mapping +def random_bernoulli(const_context, builder, op): + if op.shape.val is None: + builder.add_random_bernoulli_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.shape]), + output_name=op.outputs[0].name, + prob=op.prob.val, + seed=op.seed.val, + ) + else: + builder.add_random_bernoulli_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + prob=op.prob.val, + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def random_categorical(const_context, builder, op): + builder.add_categorical_distribution( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + num_samples=op.size.val, + is_logits=(op.mode.val == "logits"), + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def random_normal(const_context, builder, op): + if op.shape.val is None: + builder.add_random_normal_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.shape]), + output_name=op.outputs[0].name, + mean=op.mean.val, + stddev=op.stddev.val, + seed=op.seed.val, + ) + else: + builder.add_random_normal_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + mean=op.mean.val, + stddev=op.stddev.val, + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def random_uniform(const_context, builder, op): + if op.shape.val is None: + builder.add_random_uniform_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.shape]), + output_name=op.outputs[0].name, + minval=op.low.val, + maxval=op.high.val, + seed=op.seed.val, + ) + else: + builder.add_random_uniform_static( + name=op.name, + output_name=op.outputs[0].name, + output_shape=op.shape.val, + minval=op.low.val, + maxval=op.high.val, + seed=op.seed.val, + ) + + +@register_mil_to_nn_mapping +def gru(const_context, builder, op): + make_input(const_context, builder, [op.x, op.initial_h]) + # Input shape: [b, s, I] + input_name = op.x.name + # Shape: [b, H] + initial_h = op.initial_h.name + + weight_ih = op.weight_ih.val + weight_hh = op.weight_hh.val + b = op.bias.val if op.bias is not None else None + direction = op.direction.val + output_sequence = op.output_sequence.val + + # Add expand dims for input, in + _expand_dim(builder, input_name + "_expanded", input_name, [3, 4]) + input_name += "_expanded" + + if direction not in {"forward", "reverse"}: + raise ValueError( + "Unknown direction {} for GRU layer. Supported are forward, reverse".format( + direction + ) + ) + + # Expand initial_h + _expand_dim(builder, initial_h + "_expanded", initial_h, [0, 3, 4]) + initial_h += "_expanded" + + def roz_to_zro(x): + if x is None: + return None + r, o, z = _split(x, sections=3, axis=0) + return [z, r, o] + + # w_x: [H*I, H*I, H*I] + # w_h: [H*H, H*H, H*H] + # where, format is [Z, R, O] + # Z: Update gate, R: Reset gate, O: Output gate + w_x = roz_to_zro(weight_ih) + w_h = roz_to_zro(weight_hh) + # bias format: [3*H] + b = roz_to_zro(b) + + input_size = w_x[0].shape[1] + hidden_size = w_x[0].shape[0] + + # 2 outputs + # Y : [s/1, b, h, 1, 1] + # Y_h: [ 1, b, h, 1, 1] + output_names = [_output.name + "_5d" for _output in op.outputs] + builder.add_gru( + name=op.name, + W_h=w_h, + W_x=w_x, + b=b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[input_name, initial_h], + output_names=output_names, + inner_activation=op.recurrent_activation.val, + activation=op.activation.val, + output_all=output_sequence, + reverse_input=(direction == "reverse"), + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4]) + # Squeeze Output H and Output C + # to output shape of [Batch Size, Hidden Size] + _squeeze(builder, op.outputs[1].name, output_names[1], axes=[0, 3, 4]) + + +@register_mil_to_nn_mapping +def squeeze(const_context, builder, op): + axes = op.axes.val if op.axes is not None else None + builder.add_squeeze( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axes=axes, + squeeze_all=axes is None, + ) + + +@register_mil_to_nn_mapping +def topk(const_context, builder, op): + builder.add_topk( + name=op.name, + input_names=make_input(const_context, builder, [op.x]), + output_names=[output.name for output in op.outputs], + k=op.k.val, + axis=op.axis.val, + use_bottom_k=op.ascending.val, + ) + + +@register_mil_to_nn_mapping +def l2_pool(const_context, builder, op): + _convert_pool(const_context=const_context, builder=builder, op=op, mode="l2") + + +@register_mil_to_nn_mapping +def linear(const_context, builder, op): + out_channels, in_channels = op.weight.shape + if op.x.rank and op.x.rank <= 3 and op.x.rank > 0: + has_bias = op.bias is not None and op.bias.val is not None + builder.add_inner_product( + name=op.name, + W=op.weight.val, + b=op.bias.val if has_bias else None, + input_channels=in_channels, + output_channels=out_channels, + has_bias=has_bias, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + else: + builder.add_batched_mat_mul( + name=op.name, + input_names=make_input(const_context, builder, [op.x]), + output_name=op.outputs[0].name, + W=op.weight.val.T, + bias=op.bias.val, + weight_matrix_rows=in_channels, + weight_matrix_columns=out_channels, + ) + +@register_mil_to_nn_mapping +def matmul(const_context, builder, op): + weight = None + rows, columns = 0, 0 + + if ( + op.y.val is not None + and op.y.rank == 2 + and len(op.y.child_ops) == 1 + and len(op.y.consuming_blocks) == 0 + ): + + weight = op.y.val + if op.transpose_y.val: + weight = weight.transpose((1, 0)) + + rows, columns = weight.shape + input_names = make_input(const_context, builder, [op.x]) + + if op.transpose_x.val: + perm = [i for i in range(op.x.rank)] + perm[-1], perm[-2] = perm[-2], perm[-1] + name = op.name + "_x_transpose" + builder.add_transpose( + name=name, axes=perm, input_name=input_names[0], output_name=name + ) + input_names = [name] + + else: + input_names = make_input(const_context, builder, [op.x, op.y]) + + builder.add_batched_mat_mul( + name=op.name, + input_names=input_names, + output_name=op.outputs[0].name, + transpose_a=op.transpose_x.val, + transpose_b=op.transpose_y.val, + W=weight, + weight_matrix_rows=rows, + weight_matrix_columns=columns, + ) + + +@register_mil_to_nn_mapping +def max_pool(const_context, builder, op): + _convert_pool(const_context=const_context, builder=builder, op=op, mode="max") + + +@register_mil_to_nn_mapping +def non_zero(const_context, builder, op): + builder.add_where_nonzero( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def lstm(const_context, builder, op): + make_input(const_context, builder, [op.x, op.initial_h, op.initial_c]) + # Input shape [b, s, I] + input_name = op.x.name + # Shape: [b, DIRECTION*H] + initial_h = op.initial_h.name + initial_c = op.initial_c.name + + wt_ih = op.weight_ih.val + wt_hh = op.weight_hh.val + b = op.bias.val if op.bias is not None else None + direction = op.direction.val + output_sequence = op.output_sequence.val + peephole = op.peephole.val if op.peephole is not None else None + # High enough clip value to be ineffective! + clip = 500.0 if op.clip is None else op.clip.val + + # Add expand dims for input, in + _expand_dim(builder, input_name + "_expanded", input_name, [3, 4]) + input_name += "_expanded" + + if direction in {"forward", "reverse"}: + # Expand initial_h and initial_c, + # from shape (B, H) to shape (1, Batch, H, 1, 1) + _expand_dim(builder, initial_h + "_expanded", initial_h, [0, 3, 4]) + initial_h += "_expanded" + # initial_h may have the same name as initial_c (e.g., same Var). + # Append a different string to avoid conflict + _expand_dim(builder, initial_c + "_expanded2", initial_c, [0, 3, 4]) + initial_c += "_expanded2" + + # w_x: [H*I, H*I, H*I, H*I] + # w_h: [H*H, H*H, H*H, H*H] + # where format is, [input gate, forget gate, output gate, cell gate] + w_x = _split(wt_ih, sections=4) + w_h = _split(wt_hh, sections=4) + # bias format: [4*H] + b = _split(b, sections=4) # ifoz layout + # peephole format: [3*H] + # where format is, [input gate, forget gate, output gate] + peephole = _split(peephole, sections=3) + + input_size = w_x[0].shape[1] + hidden_size = w_h[0].shape[1] + + # 3 outputs + # Y : [s/1, b, h, 1, 1] + # Y_h: [ 1, b, h, 1, 1] + # Y_c: [ 1, b, h, 1, 1] + output_names = [_output.name + "_5d" for _output in op.outputs] + builder.add_unilstm( + name=op.name, + W_h=w_h, + W_x=w_x, + b=b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[input_name, initial_h, initial_c], + output_names=output_names, + inner_activation=op.recurrent_activation.val, + cell_state_update_activation=op.cell_activation.val, + output_activation=op.activation.val, + peep=peephole, + output_all=output_sequence, + cell_clip_threshold=clip, + reverse_input=(direction == "reverse"), + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4]) + # Squeeze Output H and Output C + # to output shape of [Batch Size, Hidden Size] + _squeeze(builder, op.outputs[1].name, output_names[1], axes=[0, 3, 4]) + _squeeze(builder, op.outputs[2].name, output_names[2], axes=[0, 3, 4]) + + elif direction == "bidirectional": + # Expand initial_h and initial_c + # Issue #810 + num_layer = len(builder.layers) + initial_h_expand = initial_h + "_expanded" + "_" + str(num_layer) + # from shape (B, 2*H) to shape (1, Batch, 2*H, 1, 1) + if not (initial_h_expand in set(builder.layers)): + _expand_dim(builder, initial_h_expand, initial_h, [0, 3, 4]) + initial_h = initial_h_expand + + # initial_h may have the same name as initial_c (e.g., same Var) + initial_c_expand = initial_c + "_expanded2" + "_" + str(num_layer) + if not (initial_c_expand in set(builder.layers)): + _expand_dim(builder, initial_c_expand, initial_c, [0, 3, 4]) + initial_c = initial_c_expand + + initial_h_f = initial_h + "_forward" + initial_h_r = initial_h + "_reverse" + initial_c_f = initial_c + "_forward" + initial_c_r = initial_c + "_reverse" + + # split input_h and input_c into two parts + builder.add_split_nd( + name=op.name + "_split_h", + input_name=initial_h, + output_names=[initial_h_f, initial_h_r], + axis=2, + ) + builder.add_split_nd( + name=op.name + "_split_c", + input_name=initial_c, + output_names=[initial_c_f, initial_c_r], + axis=2, + ) + + wt_ih_back = op.weight_ih_back.val + wt_hh_back = op.weight_hh_back.val + # Get weights here + # weight format: [I+H, 2*4*H] -> [I+H, 4*H (forward):4*H (backward)] + hidden_size = wt_hh.shape[1] + input_size = wt_ih.shape[1] + + # f_w_x and r_w_x: [H*I, H*I, H*I, H*I] + # f_w_h and r_w_h: [H*H, H*H, H*H, H*H] + # where format is, [input gate, forget gate, output gate, cell gate] + w_x = _split(wt_ih, sections=4) + w_h = _split(wt_hh, sections=4) + r_w_x = _split(wt_ih_back, sections=4) + r_w_h = _split(wt_hh_back, sections=4) + + # f_b and r_b format: [4*H] + b_back = op.bias_back.val if op.bias_back is not None else None + f_b, r_b = None, None + if b is not None: + f_b = _split(b, sections=4) + if b_back is not None: + r_b = _split(b_back, sections=4) + + # peephole format: [2*3*H] -> [3*H (forward) : 3*H (backward)] + peephole_back = op.peephole_back.val if op.peephole_back is not None else None + f_peephole, r_peephole = None, None + if peephole is not None: + f_peephole = _split(peephole, sections=3) + if peephole_back is not None: + r_peephole = _split(peephole_back, sections=3) + + output_names = [ + op.outputs[0].name + "_5d", # Output Y [s/1, b, 2*h, 1, 1] + op.outputs[1].name + "_5d_foward", # Output Y_h [ 1, b, h, 1, 1] + op.outputs[2].name + + "_5d_forward", # Output Y_c [ 1, b, h, 1, 1] + op.outputs[1].name + + "_5d_reverse", # Output Y_h_reverse [ 1, b, h, 1, 1] + op.outputs[2].name + "_5d_reverse", + ] # Output Y_c_reverse [ 1, b, h, 1, 1] + + builder.add_bidirlstm( + name=op.name, + W_h=w_h, + W_x=w_x, + b=f_b, + W_h_back=r_w_h, + W_x_back=r_w_x, + b_back=r_b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[ + input_name, + initial_h_f, + initial_c_f, + initial_h_r, + initial_c_r, + ], + output_names=output_names, + inner_activation=op.recurrent_activation.val, + cell_state_update_activation=op.cell_activation.val, + output_activation=op.activation.val, + peep=f_peephole, + peep_back=r_peephole, + output_all=output_sequence, + cell_clip_threshold=clip, + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, 2*Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4]) + + # Output H is of format + # 1, Batch_Size, Hidden_Size, 1, 1 + # Concat to make it + # 1, Batch_Size, 2*Hidden_Size, 1, 1 + builder.add_elementwise( + name=op.outputs[1].name + "_5d", + input_names=[output_names[1], output_names[3]], + output_name=op.outputs[1].name + "_5d", + mode="CONCAT", + ) + # Output C is of format + # 1, Batch_Size, Hidden_Size, 1, 1 + builder.add_elementwise( + name=op.outputs[2].name + "_5d", + input_names=[output_names[2], output_names[4]], + output_name=op.outputs[2].name + "_5d", + mode="CONCAT", + ) + + # Squeeze Output H and Output C + # to output shape of [Batch Size, 2*Hidden Size] + _squeeze( + builder, op.outputs[1].name, op.outputs[1].name + "_5d", axes=[0, 3, 4] + ) + _squeeze( + builder, op.outputs[2].name, op.outputs[2].name + "_5d", axes=[0, 3, 4] + ) + else: + raise ValueError( + "Unknown direction {} for LSTM layer. Supported are forward, reverse or bidirectional".format( + direction + ) + ) + + +@register_mil_to_nn_mapping +def reshape(const_context, builder, op): + if op.shape.val is None: + builder.add_reshape_dynamic( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.shape]), + output_name=op.outputs[0].name, + ) + elif -1 in op.shape.val and len(op.shape.val) == op.x.rank: + # Support 0 in shape. + builder.add_rank_preserving_reshape( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + output_shape=op.shape.val, + ) + else: + if 0 in op.shape.val: + # Does not support 0 in shape + msg = "Use 0 in shape only if len(shape) == x.rank. Report bug." + raise ValueError(msg) + output_shape = (1,) if len(op.shape.val) == 0 or 0 in op.shape.shape else op.shape.val + builder.add_reshape_static( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + output_shape=output_shape, + ) + + +@register_mil_to_nn_mapping +def reduce_argmax(const_context, builder, op): + builder.add_argmax( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + keepdims=op.keep_dims.val, + ) + + +@register_mil_to_nn_mapping +def reduce_argmin(const_context, builder, op): + builder.add_argmin( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + keepdims=op.keep_dims.val, + ) + + +def _reduce_axes(const_context, builder, builder_op, op): + axes = op.axes.val if op.axes is not None else op.axes + builder_op( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axes=axes, + keepdims=op.keep_dims.val, + reduce_all=axes is None, + ) + + +@register_mil_to_nn_mapping +def reduce_l1_norm(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_l1, op) + + +@register_mil_to_nn_mapping +def reduce_l2_norm(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_l2, op) + + +@register_mil_to_nn_mapping +def reduce_log_sum(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_logsum, op) + + +@register_mil_to_nn_mapping +def reduce_log_sum_exp(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_logsumexp, op) + + +@register_mil_to_nn_mapping +def reduce_max(const_context, builder, op): + if not _try_convert_global_pool(const_context, builder, op, mode="max"): + _reduce_axes(const_context, builder, builder.add_reduce_max, op) + + +@register_mil_to_nn_mapping +def reduce_mean(const_context, builder, op): + if not _try_convert_global_pool(const_context, builder, op, mode="average"): + _reduce_axes(const_context, builder, builder.add_reduce_mean, op) + + +@register_mil_to_nn_mapping +def reduce_min(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_min, op) + + +@register_mil_to_nn_mapping +def reduce_prod(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_prod, op) + + +@register_mil_to_nn_mapping +def reduce_sum(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_sum, op) + + +@register_mil_to_nn_mapping +def reduce_sum_square(const_context, builder, op): + _reduce_axes(const_context, builder, builder.add_reduce_sumsquare, op) + + +@register_mil_to_nn_mapping +def reverse(const_context, builder, op): + reverse_dim = [False] * op.x.rank + if op.axes is None: + reverse_dim = [True] * op.x.rank + else: + for axis in op.axes.val: + reverse_dim[axis] = True + builder.add_reverse( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + reverse_dim=reverse_dim, + ) + + +@register_mil_to_nn_mapping +def reverse_sequence(const_context, builder, op): + builder.add_reverse_sequence( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.lengths]), + output_name=op.outputs[0].name, + batch_axis=op.batch_axis.val, + seq_axis=op.seq_axis.val, + ) + + +@register_mil_to_nn_mapping +def rnn(const_context, builder, op): + input_name = make_input(const_context, builder, op.x) # [b, s, I] + initial_h = make_input(const_context, builder, op.initial_h) # [b, H] + + w_ih = op.weight_ih.val + w_hh = op.weight_hh.val + b = op.bias.val if op.bias is not None else None + direction = op.direction.val + output_sequence = op.output_sequence.val + activation = op.activation.val + + # Add expand dims for input, in + _expand_dim(builder, input_name + "_expanded", input_name, [3, 4]) + input_name += "_expanded" + + if direction not in {"forward", "reverse"}: + raise ValueError( + "Unknown direction {} for RNN layer. Supported are forward and reverse".format( + direction + ) + ) + + # Expand initial_h and initial_c + _expand_dim(builder, initial_h + "_expanded", initial_h, [2, 3, 4]) + initial_h += "_expanded" + + # w_x: (H, I) + # w_h: (H, H) + hidden_size = w_hh.shape[0] + input_size = w_ih.shape[-1] + + # 3 outputs + # Y : [s/1, b, h, 1, 1] + # Y_h: [ 1, b, h, 1, 1] + output_names = [_output.name + "_5d" for _output in op.outputs] + builder.add_simple_rnn( + name=op.name, + W_h=w_hh, + W_x=w_ih, + b=b, + hidden_size=hidden_size, + input_size=input_size, + input_names=[input_name, initial_h], + output_names=output_names, + activation=activation, + output_all=output_sequence, + reverse_input=(direction == "reverse"), + ) + + # Squeeze Output + # to output shape of [Seq Len or 1, Batch Size, Hidden Size] + _squeeze(builder, op.outputs[0].name, output_names[0], [3, 4]) + # Squeeze Output H and Output C + # to output shape of [Batch Size, Hidden Size] + _squeeze(builder, op.outputs[1].name, output_names[1], [0, 3, 4]) + + +@register_mil_to_nn_mapping +def select(const_context, builder, op): + builder.add_where_broadcastable( + name=op.name, + input_names=make_input(const_context, builder, [op.cond, op.a, op.b]), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def space_to_depth(const_context, builder, op): + builder.add_reorganize_data( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="SPACE_TO_DEPTH", + block_size=op.block_size.val, + ) + + +@register_mil_to_nn_mapping +def batch_to_space(const_context, builder, op): + block_size = op.block_shape.val + if block_size[0] != block_size[1]: + raise ValueError("batch_to_space non-equal block shape is not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + block_size = block_size[0] + if block_size == 1: + raise ValueError("batch_to_space block shape == 1 not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + + transpose_1_name = op.name + "_transpose_1" + builder.add_transpose( + name=transpose_1_name, + input_name=make_input(const_context, builder, op.x), + axes=[1, 0, 2, 3], + output_name=transpose_1_name, + ) + depth_to_space_name = op.name + "_depth_to_space" + builder.add_reorganize_data( + name=depth_to_space_name, + input_name=transpose_1_name, + output_name=depth_to_space_name, + mode="DEPTH_TO_SPACE", + block_size=block_size, + ) + crop_name = op.name + "_crop" + crops = op.crops.val + builder.add_crop( + name=crop_name, + input_names=[depth_to_space_name], + output_name=crop_name, + offset=0, + top=crops[0][0], + bottom=crops[0][1], + left=crops[1][0], + right=crops[1][1], + ) + transpose_2_name = op.name + "_transpose_2" + builder.add_transpose( + name=transpose_2_name, + input_name=crop_name, + axes=[1, 0, 2, 3], + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def space_to_batch(const_context, builder, op): + block_size = op.block_shape.val + if block_size[0] != block_size[1]: + raise ValueError("space_to_batch non-equal block shape is not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + block_size = block_size[0] + if block_size == 1: + raise ValueError("space_to_batch block shape == 1 not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.") + + pad = op.paddings.val.flatten() + left, right = pad[2], pad[3] + top, bottom = pad[0], pad[1] + + pad_name = op.name + "_pad" + builder.add_padding( + name=pad_name, + left=left, + right=right, + top=top, + bottom=bottom, + input_name=make_input(const_context, builder, op.x), + output_name=pad_name, + padding_type="constant", + value=0., + ) + + transpose_1_name = op.name + "_transpose_1" + builder.add_transpose( + name=transpose_1_name, + input_name=pad_name, + axes=[1, 0, 2, 3], + output_name=transpose_1_name, + ) + space_to_depth_name = op.name + "_space_to_depth" + builder.add_reorganize_data( + name=space_to_depth_name, + input_name=transpose_1_name, + output_name=space_to_depth_name, + mode="SPACE_TO_DEPTH", + block_size=block_size, + ) + transpose_2_name = op.name + "_transpose_2" + builder.add_transpose( + name=transpose_2_name, + input_name=space_to_depth_name, + axes=[1, 0, 2, 3], + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def transpose(const_context, builder, op): + builder.add_transpose( + name=op.name, + axes=op.perm.val, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def gather(const_context, builder, op): + is_embedding = False + + if op.x.val is not None: + W = op.x.val + if len(W.shape) == 2: + if op.axis.val == 0 or op.axis.val == -2: + if len(op.x.child_ops) == 1: + # the constant feeding into the gather doesn't go to any other op + is_embedding = True + + if is_embedding: + """" + The following: + %3 = gather(%1, %2, axis=0) # %1 is a constant matrix of shape (vocab_size, embedding_size) + can be mapped to: + %2_e = expand_dims(%2, axis=-1) + %3 = embeddingND(%2_e, weight=%1) + """ + builder.add_expand_dims( + name=op.name + "_expand_dims", + input_name=make_input(const_context, builder, op.indices), + output_name=op.name + "_expand_dims", + axes=[-1], + ) + + builder.add_embedding_nd( + name=op.name, + input_name=op.name + "_expand_dims", + output_name=op.outputs[0].name, + vocab_size=W.shape[0], + embedding_size=W.shape[1], + W=_np.transpose(W), + ) + + else: + builder.add_gather( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.indices]), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def scatter(const_context, builder, op): + builder.add_scatter( + name=op.name, + input_names=make_input( + const_context, builder, [op.data, op.indices, op.updates] + ), + output_name=op.outputs[0].name, + axis=op.axis.val, + mode=op.mode.val.upper(), + ) + + +@register_mil_to_nn_mapping +def gather_along_axis(const_context, builder, op): + builder.add_gather_along_axis( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.indices]), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def scatter_along_axis(const_context, builder, op): + builder.add_scatter_along_axis( + name=op.name, + input_names=make_input( + const_context, builder, [op.data, op.indices, op.updates] + ), + output_name=op.outputs[0].name, + axis=op.axis.val, + mode=op.mode.val.upper(), + ) + + +@register_mil_to_nn_mapping +def gather_nd(const_context, builder, op): + builder.add_gather_nd( + name=op.name, + input_names=make_input( + const_context, builder, [op.x, op.indices] + ), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def scatter_nd(const_context, builder, op): + builder.add_scatter_nd( + name=op.name, + input_names=make_input( + const_context, builder, [op.data, op.indices, op.updates], + ), + output_name=op.outputs[0].name, + mode=op.mode.val.upper(), + ) + +@register_mil_to_nn_mapping +def silu(const_context, builder, op): + ''' + silu is: + y = x * sigmoid(x) + ''' + inp = make_input(const_context, builder, op.x) + builder.add_activation( + name=op.name + "__silu_sigmoid__", + non_linearity="SIGMOID", + input_name=inp, + output_name=op.name + "__silu_sigmoid__", + ) + builder.add_elementwise( + name=op.name, + input_names=[inp, op.name + "__silu_sigmoid__"], + output_name=op.outputs[0].name, + mode='MULTIPLY', + ) + + +@register_mil_to_nn_mapping +def tile(const_context, builder, op): + inputs = [make_input(const_context, builder, op.x)] + if op.reps.val is None: + inputs.append(op.reps.name) + builder.add_tile( + name=op.name, + reps=op.reps.val, + input_name=inputs, + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def tanh(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="TANH", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def scaled_tanh(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SCALED_TANH", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def sigmoid(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SIGMOID", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def sigmoid_hard(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SIGMOID_HARD", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def erf(const_context, builder, op): + builder.add_erf( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def thresholded_relu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="THRESHOLDEDRELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=op.alpha.val, + ) + + +@register_mil_to_nn_mapping +def elu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="ELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=op.alpha.val, + ) + + +@register_mil_to_nn_mapping +def leaky_relu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="LEAKYRELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val], + ) + + +@register_mil_to_nn_mapping +def gelu(const_context, builder, op): + builder.add_gelu( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode=op.mode.val, + ) + + +@register_mil_to_nn_mapping +def softplus(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SOFTPLUS", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def softmax(const_context, builder, op): + rank = op.x.rank + if op.axis.val == -3 or op.axis.val > 0 and op.axis.val == rank - 3: + builder.add_softmax( + name=op.name, input_name=op.x.name, output_name=op.outputs[0].name, + ) + else: + builder.add_softmax_nd( + name=op.name, + input_name=op.x.name, + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def softplus_parametric(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="PARAMETRICSOFTPLUS", + input_name=make_input(const_context, builder, op.x), + input_shape=op.x.shape, + input_rank=op.x.rank, + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def softsign(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="SOFTSIGN", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def linear_activation(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="LINEAR", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + params=[op.alpha.val, op.beta.val], + ) + + +@register_mil_to_nn_mapping +def relu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="RELU", + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def clamped_relu(const_context, builder, op): + builder.add_clamped_relu( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + alpha=op.alpha.val, + beta=op.beta.val, + ) + + +@register_mil_to_nn_mapping +def relu6(const_context, builder, op): + builder.add_activation( + name=op.name + "__relu6_relu__", + input_name=make_input(const_context, builder, op.x), + output_name=op.name + "__relu6_relu__", + non_linearity="RELU", + ) + builder.add_activation( + name=op.name + "__relu6_neg__", + input_name=op.name + "__relu6_relu__", + output_name=op.name + "__relu6_neg__", + non_linearity="LINEAR", + params=[-1, 0], + ) + builder.add_unary( + name=op.name + "__relu6_threshold6__", + input_name=op.name + "__relu6_neg__", + output_name=op.name + "__relu6_threshold6__", + mode="threshold", + alpha=-6, + ) + builder.add_activation( + name=op.name, + input_name=op.name + "__relu6_threshold6__", + output_name=op.outputs[0].name, + non_linearity="LINEAR", + params=[-1, 0], + ) + + +@register_mil_to_nn_mapping +def prelu(const_context, builder, op): + builder.add_activation( + name=op.name, + non_linearity="PRELU", + input_name=make_input(const_context, builder, op.x), + input_shape=op.x.shape, + input_rank=op.x.rank, + output_name=op.outputs[0].name, + params=op.alpha.val, + ) + + +@register_mil_to_nn_mapping +def pad(const_context, builder, op): + if len(op.pad.shape) != 1: + raise ValueError("Pad should be a 1D tensor.") + + pad = op.pad.val + mode = op.mode.val + constant_val = op.constant_val.val + + nn_mode_mapping = {"reflect": "reflection", "replicate": "replication"} + mode = nn_mode_mapping.get(mode, mode) + + if pad is not None: + missing_dims = op.x.rank - len(pad) // 2 + pad = [0, 0] * missing_dims + list(pad) + + + if pad is not None and op.x.rank > 1 and all(i == 0 for i in pad[:-4]): + pad = pad[-4:] + left, right = pad[2], pad[3] + top, bottom = pad[0], pad[1] + builder.add_padding( + name=op.name, + left=left, + right=right, + top=top, + bottom=bottom, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + padding_type=mode, + value=constant_val, + ) + elif mode == "constant": + if pad is None: + builder.add_constant_pad( + name=op.name, + input_names=make_input(const_context, builder, [op.x, op.pad]), + output_name=op.outputs[0].name, + value=constant_val + ) + else: + builder.add_constant_pad( + name=op.name, + input_names=make_input(const_context, builder, [op.x]), + output_name=op.outputs[0].name, + value=constant_val, + pad_amounts=pad, + ) + else: + raise ValueError("Unsupported mode for Pad layer! {}".format(mode)) + + +@register_mil_to_nn_mapping +def instance_norm(const_context, builder, op): + channels = op.x.shape[1] + gamma = _np.array([1.0] * channels) if op.gamma is None else op.gamma.val + beta = _np.array([0.0] * channels) if op.beta is None else op.beta.val + + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + if op.x.rank == 3: + x_name = op.name + "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2], + ) + out_name += "_instance_norm" + + builder.add_batchnorm( + name=op.name, + channels=channels, + gamma=gamma, + beta=beta, + input_name=x_name, + output_name=out_name, + compute_mean_var=True, + instance_normalization=True, + epsilon=op.epsilon.val, + ) + + # Squeeze added `Height` dimension for 1d case + if op.x.rank == 3: + x_name = op.name + "_squeeze" + builder.add_squeeze( + name=x_name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + + + + +@register_mil_to_nn_mapping +def l2_norm(const_context, builder, op): + builder.add_l2_normalize( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + epsilon=op.epsilon.val, + ) + + +@register_mil_to_nn_mapping +def layer_norm(const_context, builder, op): + + rank = op.x.rank + input_shape = [-1 if is_symbolic(dim) else dim for dim in list(op.x.shape)] + axes = list(range(op.x.rank)) if op.axes.val is None else op.axes.val + axes = [axis+rank if axis < 0 else axis for axis in op.axes.val] + epsilon = op.epsilon.val + + # if input shape = (X1, X2) or (X0, X1, X2), axes = [-1], X1 and X2 are known + # then the following operations are performed + # - reshape to (X1, 1, X2) / (X0, X1, 1, X2) + # - apply MVN layer, which normalizes across last 2 dims + # - apply scale layer + # - reshape back to (X1, X2) / (X0, X1, X2) + # Otherwise, we express the layer_norm as primitive operations + if rank in [2, 3] and len(axes) == 1 and axes[0] == rank - 1 and input_shape.count(-1) < 2 \ + and input_shape[-1] != -1 and input_shape[-2] != -1: + + reshaped_shape = input_shape[:] + # Insert a singleton dimension in the 'height' position + reshaped_shape.insert(-1, 1) + + # Scale layer can't take parameters of size [W], but can take [1, H, W], and H=1 in this case + gamma = _np.ones((1, 1, reshaped_shape[-1])) if op.gamma is None else _np.expand_dims(op.gamma.val, axis=(0, 1)) + beta = _np.zeros((1, 1, reshaped_shape[-1])) if op.beta is None else _np.expand_dims(op.beta.val, axis=(0, 1)) + + builder.add_reshape_static( + name=op.name + "_reshape", + input_name=make_input(const_context, builder, op.x), + output_name=op.name + "_reshape", + output_shape=reshaped_shape, + ) + + builder.add_mvn( + name=op.name + "_mvn", + input_name=op.name + "_reshape", + output_name=op.name + "_mvn", + across_channels=False, + normalize_variance=True, + epsilon=epsilon, + ) + + builder.add_scale( + name=op.name + "_scale", + input_name=op.name + "_mvn", + output_name=op.name + "_scale", + W=gamma, + b=beta, + has_bias=True, + shape_scale=_np.shape(gamma), + shape_bias=_np.shape(beta), + ) + + builder.add_reshape_static( + name=op.name, + input_name=op.name + "_scale", + output_name=op.outputs[0].name, + output_shape=input_shape, + ) + + else: # We don't meet the conditions for an MVN layer, so we use primitives + mean_name = op.name + "_mean" + builder.add_reduce_mean( + name=mean_name, + input_name=make_input(const_context, builder, op.x), + output_name=mean_name, + axes=axes, + keepdims=True, + reduce_all=False, + ) + + sub_mean_name = op.name + "_sub_mean" + builder.add_subtract_broadcastable( + name=sub_mean_name, + input_names=[op.x.name, mean_name], + output_name=sub_mean_name, + ) + + square_name = op.name + '_square' + builder.add_unary( + name=square_name, + input_name=sub_mean_name, + output_name=square_name, + mode="power", + alpha=2.0, + ) + + square_sum_name = op.name + '_square_sum' + builder.add_reduce_sum( + name=square_sum_name, + input_name=square_name, + output_name=square_sum_name, + axes=axes, + keepdims=True, + reduce_all=False, + ) + + normalized_shape = [op.x.shape[i] if i in axes else 1 for i in range(rank)] + if not any_symbolic(normalized_shape): + div_prod_name = op.name + '_div_constant' + add_const(const_context, builder, div_prod_name, _np.prod(normalized_shape)) + else: + raise NotImplementedError("dynamic shape input nor supported for layer_norm") + + div_square_sum_name = op.name + '_div_square_sum' + builder.add_divide_broadcastable( + name=div_square_sum_name, + input_names=[square_sum_name, div_prod_name], + output_name=div_square_sum_name + ) + + epsilon_const_name = op.name + '_epsilon' + add_const(const_context, builder, epsilon_const_name, epsilon) + add_epsilon_name = op.name + '_add_epsilon' + builder.add_elementwise( + name=add_epsilon_name, + input_names=[div_square_sum_name, epsilon_const_name], + output_name=add_epsilon_name, + mode="ADD", + ) + + sqrt_name = op.name + '_sqrt' + builder.add_unary( + name=sqrt_name, + input_name=add_epsilon_name, + output_name=sqrt_name, + mode="sqrt", + ) + + div_name = op.name + '_divide' + builder.add_divide_broadcastable( + name=div_name, + input_names=[sub_mean_name, sqrt_name], + output_name=div_name + ) + + gamma = _np.ones(normalized_shape) if op.gamma is None else _np.reshape(op.gamma.val, normalized_shape) + beta = _np.zeros(normalized_shape) if op.beta is None else _np.reshape(op.beta.val, normalized_shape) + + gamma_name = op.name + '_gamma' + beta_name = op.name + '_beta' + add_const(const_context, builder, gamma_name, gamma) + add_const(const_context, builder, beta_name, beta) + + mul_name = op.name + '_mul' + builder.add_multiply_broadcastable( + name=mul_name, + input_names=[div_name, gamma_name], + output_name=mul_name, + ) + + builder.add_add_broadcastable( + name=op.name, + input_names=[mul_name, beta_name], + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def local_response_norm(const_context, builder, op): + builder.add_lrn( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + alpha=op.alpha.val, + beta=op.beta.val, + local_size=op.size.val, + k=op.k.val, + ) + + +@register_mil_to_nn_mapping +def conv_transpose(const_context, builder, op): + x_name = make_input(const_context, builder, op.x) + out_name = op.outputs[0].name + + # Special handling for 1d conv transpose + is_conv_transpose_1d = op.x.rank == 3 + is_conv_transpose_2d = op.x.rank == 4 + is_conv_transpose_3d = op.x.rank == 5 + + if is_conv_transpose_1d: + x_name = op.name + "_expand_dim" + out_name = op.name + "_expanded" + builder.add_expand_dims( + name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2] + ) + + # Input names to be used + input_names = [x_name] + + # Kernel shape: [C_in, C_out, D, H, W] + weight = op.weight.val + kernel_channels = weight.shape[0] + output_channels = weight.shape[1] * op.groups.val + + if is_conv_transpose_1d: + weight = _np.expand_dims(weight, -2) + + # pyMIL Deconvolution format: [C_in, C_out / groups, spatial_dims] + # NN DeConvolution3D expects weights to have shape (C_out / groups, C_in, spatial_dims) + # NN DeConvolution2D/1D expects (spatial_dims, C_in, C_out/groups) + if is_conv_transpose_3d: + weight = _np.transpose(weight, [1, 0, 2, 3, 4]) + else: + weight = _np.transpose(weight, [2, 3, 0, 1]) + + strides = op.strides.val.tolist() + dilations = op.dilations.val.tolist() + + output_spatial_dims = list(op.outputs[0].shape[2:]) + if is_conv_transpose_1d: + dilations = dilations[:-1] + [1] + dilations[-1:] + strides = strides[:-1] + [1] + strides[-1:] + # Must be at least 2D + output_spatial_dims = output_spatial_dims[:-1] + [1] + output_spatial_dims[-1:] + + if any_symbolic(output_spatial_dims): + output_spatial_dims = None + + # padding + padding_mode = op.pad_type.val + pad = {} + if padding_mode == "custom": + if is_conv_transpose_1d: + padding_mode = "valid" + pad["padding_top"] = 0 + pad["padding_bottom"] = 0 + pad["padding_left"] = op.pad.val[0] # Left + pad["padding_right"] = op.pad.val[1] # Right + elif is_conv_transpose_2d: + padding_mode = "valid" + pad["padding_top"] = op.pad.val[0] # Top + pad["padding_bottom"] = op.pad.val[1] # Bottom + pad["padding_left"] = op.pad.val[2] # Left + pad["padding_right"] = op.pad.val[3] # Right + else: + pad["padding_front"] = op.pad.val[0] # Front + pad["padding_back"] = op.pad.val[1] # Back + pad["padding_top"] = op.pad.val[2] # Top + pad["padding_bottom"] = op.pad.val[3] # Bottom + pad["padding_left"] = op.pad.val[4] # Left + pad["padding_right"] = op.pad.val[5] # Right + + groups = op.groups.val + has_bias = op.bias is not None + + if is_conv_transpose_3d: + builder.add_convolution3d( + name=op.name, + input_channels=kernel_channels, + output_channels=output_channels, + depth=weight.shape[-3], + height=weight.shape[-2], + width=weight.shape[-1], + W=weight, + b=op.bias.val if has_bias else None, + has_bias=has_bias, + groups=groups, + stride_depth=strides[0], + stride_height=strides[1], + stride_width=strides[2], + dilation_depth=dilations[0], + dilation_height=dilations[1], + dilation_width=dilations[2], + padding_mode=padding_mode, + is_deconv=True, + output_shape=output_spatial_dims, + input_name=input_names, + output_name=out_name, + **pad + ) + else: + builder.add_convolution( + name=out_name, + kernel_channels=kernel_channels, + output_channels=output_channels, + height=weight.shape[0], + width=weight.shape[1], + stride_height=strides[0], + stride_width=strides[1], + border_mode=padding_mode, + groups=groups, + W=weight, + b=op.bias.val if has_bias else None, + has_bias=has_bias, + is_deconv=True, + output_shape=output_spatial_dims, + input_name=input_names, + output_name=out_name, + dilation_factors=dilations, + **pad + ) + + # Squeeze added `Height` dimension for 1d case + if is_conv_transpose_1d: + builder.add_squeeze( + name=op.name, + input_name=out_name, + output_name=op.outputs[0].name, + axes=[-2], + ) + + +@register_mil_to_nn_mapping +def range_1d(const_context, builder, op): + if op.start.val is not None and op.step.val is not None: + inputs = [op.end] + elif op.start.val is None and op.step.val is not None: + inputs = [op.end, op.start] + elif op.start.val is not None and op.step.val is None: + inputs = [op.end, op.start, op.step] + else: + inputs = [op.end, op.start, op.step] + + builder.add_range_dynamic( + name=op.name, + output_name=op.outputs[0].name, + input_names=make_input(const_context, builder, inputs), + start=op.start.val if op.start.val is not None else 0, + step=op.step.val if op.step.val is not None else 1, + ) + + +@register_mil_to_nn_mapping +def one_hot(const_context, builder, op): + if op.one_hot_vector_size.val is not None: + inputs = [op.indices] + else: + inputs = [op.indices, op.one_hot_vector_size] + + builder.add_one_hot( + name=op.name, + input_names=make_input(const_context, builder, inputs), + output_name=op.outputs[0].name, + one_hot_vector_size=op.one_hot_vector_size.val, + axis=op.axis.val, + on_value=op.on_value.val, + off_value=op.off_value.val, + ) + + +@register_mil_to_nn_mapping +def non_maximum_suppression(const_context, builder, op): + builder.add_nms( + name=op.name, + input_names=make_input(const_context, builder, [op.boxes, op.scores]), + output_names=[op.outputs[i].name for i in range(4)], + iou_threshold=op.iou_threshold.val, + score_threshold=op.score_threshold.val, + max_boxes=op.max_boxes.val, + per_class_suppression=op.per_class_suppression.val, + ) + + +@register_mil_to_nn_mapping +def flatten2d(const_context, builder, op): + builder.add_flatten_to_2d( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def shape(const_context, builder, op): + builder.add_get_shape( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +def add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w): + mode = "NN" + linear_upsample_mode = "DEFAULT" + if _np.abs(_np.round(scale_factor_h) - scale_factor_h) < 1e-4 and scale_factor_h >= 1 - 1e-4: + scale_factor_h = int(scale_factor_h) + else: + logger.warning( + f"Unsupported float type 'scale_factor_height' ({scale_factor_h}) for neuralnetwork. " + "Falling back to bilinear interpolation." + ) + mode = "BILINEAR" + linear_upsample_mode = "ALIGN_CORNERS_TRUE" + if _np.abs(_np.round(scale_factor_w) - scale_factor_w) < 1e-4 and scale_factor_w >= 1 - 1e-4: + scale_factor_w = int(scale_factor_w) + else: + logger.warning( + f"Unsupported float type 'scale_factor_width' ({scale_factor_w}) for neuralnetwork. " + "Falling back to bilinear interpolation." + ) + mode = "BILINEAR" + linear_upsample_mode = "ALIGN_CORNERS_TRUE" + + builder.add_upsample( + name=op.name, + scaling_factor_h=scale_factor_h, + scaling_factor_w=scale_factor_w, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode=mode, + linear_upsample_mode=linear_upsample_mode, + ) + + +@register_mil_to_nn_mapping +def resize_nearest_neighbor(const_context, builder, op): + Hout, Wout = op.target_size_height.val, op.target_size_width.val + x_shape = op.x.shape + Hin, Win = x_shape[-2], x_shape[-1] + + scale_factor_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scale_factor_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + + add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w) + + +@register_mil_to_nn_mapping +def upsample_nearest_neighbor(const_context, builder, op): + scale_factor_h = op.scale_factor_height.val + scale_factor_w = op.scale_factor_width.val + + add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w) + + +@register_mil_to_nn_mapping +def upsample_bilinear(const_context, builder, op): + builder.add_upsample( + name=op.name, + scaling_factor_h=op.scale_factor_height.val, + scaling_factor_w=op.scale_factor_width.val, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="BILINEAR", + linear_upsample_mode="ALIGN_CORNERS_TRUE" if op.align_corners.val else "ALIGN_CORNERS_FALSE", + ) + + +@register_mil_to_nn_mapping +def resize_bilinear(const_context, builder, op): + grid_sampling_mode_map = { + "STRICT_ALIGN_CORNERS": "STRICT_ALIGN_ENDPOINTS_MODE", + "ALIGN_CORNERS": "ALIGN_ENDPOINTS_MODE", + "DEFAULT": "UPSAMPLE_MODE", + "OFFSET_CORNERS": "ROI_ALIGN_MODE" + } + + if op.sampling_mode.val not in grid_sampling_mode_map: + raise NotImplementedError( + "Unsupported 'sampling_mode' ('{op.sampling_mode.val}') in neuralnetwork backend" + ) + + builder.add_resize_bilinear( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + target_height=op.target_size_height.val, + target_width=op.target_size_width.val, + mode=grid_sampling_mode_map[op.sampling_mode.val], + ) + + +@register_mil_to_nn_mapping +def cond(const_context, builder, op): + true_block = op.blocks[0] + false_block = op.blocks[1] + + branch_layer = builder.add_branch( + name=op.name, input_name=make_input(const_context, builder, op.pred), + ) + true_builder = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + convert_ops(const_context, true_builder, true_block.operations, true_block.outputs) + + # Copy block output to cond op output. + for block_out, op_out in zip(true_block.outputs, op.outputs): + true_builder.add_copy( + name=block_out.name + "_ret_copy", + # No need to make_input for block_out which is guaranteed + # to be a node + input_name=block_out.name, + output_name=op_out.name, + ) + + false_builder = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.elseBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + convert_ops( + const_context, false_builder, false_block.operations, false_block.outputs + ) + + for block_out, op_out in zip(false_block.outputs, op.outputs): + false_builder.add_copy( + name=block_out.name + "_ret_copy", + input_name=block_out.name, + output_name=op_out.name, + ) + + +@register_mil_to_nn_mapping +def while_loop(const_context, builder, op): + cond_block = op.blocks[0] + body_block = op.blocks[1] + + # Assume that all loop vars aren't loop invariant (invariant loop vars + # should've be optimized away in graph passes). + for v_in, vx_in in zip(op.loop_vars, cond_block.inputs): + assert v_in.name != vx_in.name, "Loop invariant detected in {}".format(op) + builder.add_copy( + name=vx_in.name + "_input_copy", + input_name=make_input(const_context, builder, v_in), + output_name=vx_in.name, + ) + + loop_layer = builder.add_loop( + name=op.name, + # max_iterations=0 to use condition network. + max_iterations=0, + ) + + # Construct while_loop condition + cond_builder = neural_network.NeuralNetworkBuilder( + nn_spec=loop_layer.loop.conditionNetwork, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + cond_builder.rank_dict = {k.name: builder.rank_dict[k.name] for k in cond_block.inputs} + convert_ops( + const_context, + cond_builder, + cond_block.operations, + cond_block.outputs, + ) + + loop_layer.loop.conditionVar = cond_block.outputs[0].name + + # while_loop body produces loop_vars + body_builder = neural_network.NeuralNetworkBuilder( + nn_spec=loop_layer.loop.bodyNetwork, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + body_builder.rank_dict = {k.name: builder.rank_dict[k.name] for k in body_block.inputs} + convert_ops( + const_context, + body_builder, + body_block.operations, + body_block.outputs, + ) + + # Also assume all outputs are different from loop inputs (i.e., no loop + # invariant.) + for vx_in, vx_out in zip(body_block.inputs, body_block.outputs): + if vx_in.name == vx_out.name: + msg = "Loop invariant var {} detected in block {}" + logger.warning(msg.format(vx_in.name, body_block.name)) + continue + body_builder.add_copy( + name=vx_in.name + "_ret_copy", + input_name=make_input(const_context, builder, vx_out), + output_name=vx_in.name, + ) + + +@register_mil_to_nn_mapping +def identity(const_context, builder, op): + builder.add_copy( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def concat(const_context, builder, op): + # filter out input tensor with 0 size + values = [] + for v in op.values: + if len(v.shape) > 0 and v.shape[op.axis.val] == 0: + continue + values.append(v) + + if len(values) == 0: + raise NotImplementedError('0 size tensor unsupported.') + + if len(values) >= 2: + rank = values[0].rank + if op.interleave.val: + builder.add_concat_nd( + name=op.name, + input_names=make_input(const_context, builder, values), + output_name=op.outputs[0].name, + axis=op.axis.val, + interleave=True) + elif rank >= 4 and (op.axis.val == -3 or op.axis.val > 0 and op.axis.val == rank - 3): + builder.add_elementwise( + name=op.name, + input_names=make_input(const_context, builder, values), + output_name=op.outputs[0].name, + mode="CONCAT", + ) + else: + builder.add_concat_nd( + name=op.name, + input_names=make_input(const_context, builder, values), + output_name=op.outputs[0].name, + axis=op.axis.val) + else: + builder.add_copy( + name=op.name, + input_name=make_input(const_context, builder, values[0]), + output_name=op.outputs[0].name) + + +@register_mil_to_nn_mapping +def stack(const_context, builder, op): + builder.add_stack( + name=op.name, + input_names=make_input(const_context, builder, op.values), + output_name=op.outputs[0].name, + axis=op.axis.val, + ) + + +@register_mil_to_nn_mapping +def split(const_context, builder, op): + split = op.sizes + split = [size for size in split if size != 0] + has_equal_splits = all([size == split[0] for size in split]) + num_splits = len(split) + output_names = [op.outputs[i].name for i in range(len(op.sizes)) if op.sizes[i] != 0] + + if has_equal_splits: + builder.add_split_nd( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_names=output_names, + axis=op.axis.val, + num_splits=num_splits) + else: + builder.add_split_nd( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_names=output_names, + axis=op.axis.val, + split_sizes=list(split)) + + +@register_mil_to_nn_mapping +def argsort(const_context, builder, op): + axis = op.x.rank + op.axis.val if op.axis.val < 0 else op.axis.val + builder.add_argsort( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=axis, + descending=(not op.ascending.val), + ) + + +@register_mil_to_nn_mapping +def pixel_shuffle(const_context, builder, op): + builder.add_reorganize_data( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + mode="PIXEL_SHUFFLE", + block_size=op.upscale_factor.val, + ) + + +@register_mil_to_nn_mapping +def sliding_windows(const_context, builder, op): + builder.add_sliding_windows( + name=op.name, + input_name=make_input(const_context, builder, op.x), + output_name=op.outputs[0].name, + axis=op.axis.val, + window_size=op.size.val, + step=op.stride.val, + ) + + +@register_mil_to_nn_mapping +def crop(const_context, builder, op): + builder.add_crop( + name=op.name, + input_names=[op.x.name], + output_name=op.outputs[0].name, + offset=0, + left=op.crop_width.val[0], + right=op.crop_width.val[1], + top=op.crop_height.val[0], + bottom=op.crop_height.val[1], + ) + + +@register_mil_to_nn_mapping +def crop_resize(const_context, builder, op): + grid_sampling_mode_map = { + "STRICT_ALIGN_CORNERS": "STRICT_ALIGN_ENDPOINTS_MODE", + "ALIGN_CORNERS": "ALIGN_ENDPOINTS_MODE", + "DEFAULT": "UPSAMPLE_MODE", + "OFFSET_CORNERS": "ROI_ALIGN_MODE", + } + + if op.sampling_mode.val not in grid_sampling_mode_map: + raise NotImplementedError( + "Unsupported 'sampling_mode' ('{}') in neuralnetwork backend".format( + op.sampling_mode.val + ) + ) + + mode = grid_sampling_mode_map[op.sampling_mode.val] + + input_expanded = op.name + "_x_expand" + builder.add_expand_dims( + name=input_expanded, + input_name=make_input(const_context, builder, op.x), + output_name=input_expanded, + axes=[0], + ) + builder.add_crop_resize( + name=op.name, + input_names=make_input(const_context, builder, [input_expanded, op.roi]), + output_name=op.outputs[0].name, + target_height=op.target_height.val, + target_width=op.target_width.val, + mode=mode, + normalized_roi=op.normalized_coordinates.val, + box_indices_mode=op.box_coordinate_mode.val, + spatial_scale=op.spatial_scale.val, + ) + + +@register_mil_to_nn_mapping +def custom_op(const_context, builder, op): + class_name = op.bindings.get("class_name", op.name) + input_order = op.bindings.get("input_order", []) + parameters = op.bindings.get("parameters", []) + weights = op.bindings.get("weights", []) + description = op.bindings.get("description", "") + + if len(input_order) == 0: + raise ValueError("Inputs not provided for Custom Layer: {}".format(op.name)) + + # Get input names + inputs = [op.inputs[_name] for _name in input_order] + + # Get output names + output_names = [_output.name for _output in op.outputs] + + # Load custom params + params = NeuralNetwork_pb2.CustomLayerParams() + params.className = class_name + params.description = description + + # Load parameters + for _param in parameters: + param = op.inputs[_param] + param_val = param.val + if types.is_bool(param.dtype): + params.parameters[_param].boolValue = param_val + elif types.is_int(param.dtype): + params.parameters[_param].intValue = param_val + elif types.is_float(param.dtype): + params.parameters[_param].doubleValue = param_val + elif types.is_str(param.dtype): + params.parameters[_param].stringValue = param_val + else: + raise ValueError( + "Unknown parameter type for custom layer- " + "Op: {}, Parameter: {}, Type: {}".format(op.name, _param, param.dtype) + ) + + # Load weights + for _weight in weights: + wt = params.weights.add() + wt.floatValue.extend(map(float, _weight)) + + # Add a custom layer + builder.add_custom( + name=op.name, + input_names=make_input(const_context, builder, inputs), + output_names=output_names, + custom_proto_spec=params, + ) + + +@register_mil_to_nn_mapping +def make_list(const_context, builder, op): + # Set a initial size + size = op.init_length.val + + # set the dynamic dimensions to 1 for initialization + # Ex: op.elem_shape = [i0, 128] will result in [1, 128] + elem_shape = [1 if isinstance(dim_var.val, str) else + dim_var.val for dim_var in op.elem_shape] + + if size is not None: + array_size = size if size > 0 else 1 + array_shape = [array_size] + elem_shape + add_const( + const_context, + builder, + op.outputs[0].name, + val=_np.zeros(array_shape, dtype="float"), + ) + else: + if len(elem_shape) > 0: + node_es_name = op.name + "_element_shape" + add_const( + const_context, + builder, + node_es_name, + val=_np.array(elem_shape, dtype="float"), + ) + + # Concatenate list length of the input, should be a constant vector of size 1) with element shape + node_arr_shape_name = op.name + "_arr_shape" + builder.add_concat_nd( + name=node_arr_shape_name, + input_names=[op.init_length.name, node_es_name], + output_name=node_arr_shape_name, + axis=0, + ) + else: + raise ValueError("elem_shape should have length > 0.") + + builder.add_fill_dynamic( + name=op.name, input_name=node_arr_shape_name, output_name=op.outputs[0].name + ) + + +def _realloc_list(const_context, builder, ls_var, index_var, value_var, mode): + # we do two things in this helper function + # (1) + # check if we need to re-initialize the tensorarray: + # it happens when the elem_shape is runtime determined and the runtime shape is not equal to + # the default shape. Ex: elem_shape is = [i0, 10] (initilized with [1, 10]) and at the runtime we get [2, 10]. + + # (2) + # If index_var >= len(ls_var), reallocate the array and copy over existing + # contents + + # index_var: str or Var + # ls_var: Var + + # check if elem_shape is runtime-determined + elem_shape = tuple(value_var.shape) + has_dynamic_shape = any([is_symbolic(i) for i in elem_shape]) + + # get the fill shape of the tensor array + # [length, elem_dim1, elem_dim2, ...] + full_shape_name = ls_var.name + "_full_shape" + builder.add_get_shape( + name=full_shape_name, + input_name=ls_var.name, # no need to make_input + output_name=full_shape_name, + ) + + # slice shape [length, elem_dim1, elem_dim2, ...] to get current length + curr_len_name = ls_var.name + "_length" + builder.add_slice_static( + name=curr_len_name, + input_name=full_shape_name, + output_name=curr_len_name, + begin_ids=[0], + end_ids=[1], + begin_masks=[False], + end_masks=[False], + strides=[1], + ) + + value_elem_shape_name = ls_var.name + '_value_elem_shape' + if has_dynamic_shape: + # get elem_shape from value if it is runtime-determined + # this is similar to what the backfill_make_list_elem_type tf graph pass does. + # if mode == "list_write", elem_shape equal to value.shape, + # if mode == "list_scatter", elem_shape equal to value.shape[1:] + if mode == "list_write": + builder.add_get_shape( + name=value_elem_shape_name, + input_name=make_input(const_context, builder, value_var), + output_name=value_elem_shape_name, + ) + elif mode == "list_scatter": + raw_value_elem_shape_name = ls_var.name + '_raw_value_elem_shape' + builder.add_get_shape( + name=raw_value_elem_shape_name, + input_name=make_input(const_context, builder, value_var), + output_name=raw_value_elem_shape_name, + ) + + builder.add_slice_static( + name=value_elem_shape_name, + input_name=raw_value_elem_shape_name, + output_name=value_elem_shape_name, + begin_ids=[1], + end_ids=[-1], + begin_masks=[False], + end_masks=[True], + strides=[1], + ) + else: + add_const(const_context, builder, value_elem_shape_name, _np.array(elem_shape)) + + # if elem_shape is runtime-determined, check if we need to re-initialize the array + + if has_dynamic_shape: + # slice shape [length, elem_dim1, elem_dim2, ...] to get list elem_shape + curr_elem_shape_name = ls_var.name + "_ls_elem_shape" + builder.add_slice_static( + name=curr_elem_shape_name, + input_name=full_shape_name, + output_name=curr_elem_shape_name, + begin_ids=[1], + end_ids=[-1], + begin_masks=[False], + end_masks=[True], + strides=[1], + ) + + # test if the runtime elem_shape from the list and value are equal + not_equal_name = ls_var.name + '_elem_shape_not_equal' + builder.add_not_equal( + name=not_equal_name, + input_names=[curr_elem_shape_name, value_elem_shape_name], + output_name=not_equal_name, + ) + + reduce_any_name = ls_var.name + '_reduce_any' + builder.add_reduce_sum( + name=reduce_any_name, + input_name=not_equal_name, + output_name=reduce_any_name, + axes=[0], + keepdims=False, + reduce_all=True, + ) + + # if the two elem_shape are different, then re initialize the list with elem_shape from the value + re_initialize_condition_name = ls_var.name + "_condition_re_initialize" + layer = builder.add_branch(name=re_initialize_condition_name, input_name=reduce_any_name) + true_builder = neural_network.NeuralNetworkBuilder( + nn_spec=layer.branch.ifBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + + re_initialize_shape_name = ls_var.name + "_re_initialize_shape" + true_builder.add_concat_nd( + name=re_initialize_shape_name, + input_names=[curr_len_name, value_elem_shape_name], + output_name=re_initialize_shape_name, + axis=0, + ) + + re_initialize_name = ls_var.name + "_re_initialize" + true_builder.add_fill_dynamic( + name=re_initialize_name, + input_name=re_initialize_shape_name, + output_name=re_initialize_name, + value=0.0, + ) + + true_builder.add_copy( + name=ls_var.name + "_re_initialize_assign", + input_name=re_initialize_name, + output_name=ls_var.name + ) + + # after re-initialize the list, we now check if we need to reallocate the list + # check if the index > curr_length + is_growing_name = ls_var.name + "_is_growing" + builder.add_greater_than( + name=is_growing_name, + input_names=make_input(const_context, builder, [index_var, curr_len_name]), + output_name=is_growing_name, + use_greater_than_equal=True, + ) + + condition_name = ls_var.name + "_condition" + layer = builder.add_branch(name=condition_name, input_name=is_growing_name) + + true_builder = neural_network.NeuralNetworkBuilder( + nn_spec=layer.branch.ifBranch, + disable_rank5_shape_mapping=True, + use_float_arraytype=True, + ) + + # alloc_length_name0 = index - list_length + alloc_length_name0 = ls_var.name + "_extra_length0" + true_builder.add_subtract_broadcastable( + name=alloc_length_name0, + input_names=make_input(const_context, builder, [index_var, curr_len_name]), + output_name=alloc_length_name0, + ) + + # alloc_length_name1 = index - list_length + 1 + alloc_length_name1 = ls_var.name + "_extra_length1" + true_builder.add_elementwise( + name=alloc_length_name1, + input_names=[alloc_length_name0], + mode="ADD", + output_name=alloc_length_name1, + alpha=1, + ) + + # alloc_shape_name = [alloc_length] + elem_shape + alloc_shape_name = ls_var.name + "_alloc_shape" + true_builder.add_concat_nd( + name=alloc_shape_name, + input_names=[alloc_length_name1, value_elem_shape_name], + output_name=alloc_shape_name, + axis=0, + ) + + # new_alloc_name is np.zeros([alloc_length] + elem_shape) + new_alloc_name = ls_var.name + "_alloc" + true_builder.add_fill_dynamic( + name=new_alloc_name, + input_name=alloc_shape_name, + output_name=new_alloc_name, + value=0.0, + ) + + # new_list_name is np.concat([old_list, new_alloc]) + new_list_name = ls_var.name + "_new" + true_builder.add_concat_nd( + name=new_list_name, + input_names=[ls_var.name, new_alloc_name], + output_name=new_list_name, + axis=0, + ) + + # Copy new_list_name to ls_var.name + true_builder.add_copy( + name=ls_var.name + "_assign", input_name=new_list_name, output_name=ls_var.name + ) + + +@register_mil_to_nn_mapping +def list_write(const_context, builder, op): + _realloc_list(const_context, builder, op.ls, op.index, op.value, "list_write") + + # expanded_value_name is [1, op.value] + expanded_value_name = op.ls.name + '_' + op.value.name + "_expanded" + builder.add_expand_dims( + name=expanded_value_name, + input_name=make_input(const_context, builder, op.value), + output_name=expanded_value_name, + axes=[0], + ) + + builder.add_scatter( + name=op.name, + input_names=make_input( + const_context, builder, [op.ls, op.index, expanded_value_name] + ), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def list_gather(const_context, builder, op): + builder.add_gather( + name=op.name, + input_names=make_input(const_context, builder, [op.ls, op.indices]), + output_name=op.outputs[0].name, + axis=0, + ) + + +@register_mil_to_nn_mapping +def list_scatter(const_context, builder, op): + max_idx_name = op.indices.name + "_max" + builder.add_reduce_max( + name=max_idx_name, + axes=[0], + keepdims=False, + input_name=make_input(const_context, builder, op.indices), + output_name=max_idx_name, + ) + _realloc_list(const_context, builder, op.ls, max_idx_name, op.value, "list_scatter") + builder.add_scatter( + name=op.name, + input_names=make_input(const_context, builder, [op.ls, op.indices, op.value]), + output_name=op.outputs[0].name, + ) + + +@register_mil_to_nn_mapping +def list_read(const_context, builder, op): + # gathered_name has shape [1] + elem_shape + gathered_name = op.name + "_gathered" + builder.add_gather( + name=op.name, + input_names=make_input(const_context, builder, [op.ls, op.index]), + output_name=gathered_name, + axis=0, + ) + + # squeezed_name has shape elem_shape + squeezed_name = op.name + "_squeezed" + builder.add_squeeze( + name=squeezed_name, + input_name=gathered_name, + output_name=op.outputs[0].name, + axes=[0], + ) + + +@register_mil_to_nn_mapping +def list_length(const_context, builder, op): + # list_shape_name == [list_length] + elem_shape + list_shape_name = op.ls.name + "_shape" + builder.add_get_shape( + name=list_shape_name, + input_name=make_input(const_context, builder, op.ls), + output_name=list_shape_name, + ) + + # slice to get list_length + builder.add_slice_static( + name=op.name, + input_name=list_shape_name, + output_name=op.outputs[0].name, + begin_ids=[0], + end_ids=[1], + begin_masks=[False], + end_masks=[False], + strides=[1], + ) + +@register_mil_to_nn_mapping +def _const_symbolic(const_context, builder, op): + # do nothing + pass diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/__init__.py new file mode 100644 index 00000000..d7ee0008 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import ( + alert_return_type_cast, + commingle_loop_vars, + conv1d_decomposition, + handle_return_inputs_as_outputs, + handle_return_unused_inputs, + handle_unused_inputs, + mlmodel_passes, +) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py new file mode 100644 index 00000000..dc2fe7e2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/alert_return_type_cast.py @@ -0,0 +1,48 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Var, types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="nn_backend") +class alert_return_type_cast(AbstractGraphPass): + """ + prog: Program + + # NN always implicitly cast return types to fp32. Detect any return + # types that are not builtin.fp32 and alert user of the implicit + # casting. This pass must be at the end. Example: + # + # Given: + # + # main(%x: (2, 3, fp32)) { + # block0() { + # %shape_0: (2,i32)* = const(val=[4, 7]) + # } -> (%shape_0) + # } + # + # (Notice that %shape_0 is i32, not fp32) + # + # Result: + # + # The same program. + # + # Alert messages about %shape_0 being implicitly cast from i32 to fp32. + # + # Comment: This pass should do more proper casting as backend supports more types. + """ + def apply(self, prog): + for f_name, f in prog.functions.items(): + for v in f.outputs: + if isinstance(v, Var) and v.dtype != types.fp32: + msg = ( + "Output var {} of type {} in function {} is " + "cast to type fp32" + ) + logger.warning( + msg.format(v.name, types.builtin_to_string(v.dtype), f_name) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py new file mode 100644 index 00000000..7105ea09 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/commingle_loop_vars.py @@ -0,0 +1,75 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _commingle_loop_vars_block(block): + for op in list(block.operations): + for b in op.blocks: + _commingle_loop_vars_block(b) + + if op.op_type != "while_loop": + continue + + for block in op.blocks: + for v_out, vx_in in zip(op.outputs, block.inputs): + # Disable check as v_out is not visible in block. + block.replace_uses_of_var_after_op( + anchor_op=None, + old_var=vx_in, + new_var=v_out, + no_check_var_visibility=True, + ) + + # replace block inputs + block._block_inputs = op.outputs + + +@register_pass(namespace="nn_backend") +class commingle_loop_vars(AbstractGraphPass): + """ + prog: Program + + # NN backend expects output vars as loop vars. Example: + # + # Given: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + # while_loop(loop_vars=(%a, %b)) + # loop_cond(%a.x, %b.x) { + # %cond_var: (bool) = some_op(x=%a.x, y=%b.x) + # } -> (%cond_var) + # loop_body(%a.x, %b.x) { + # %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x) + # } -> (%add_0, %b.x) + # } -> (%loop:0, %loop:1) + # } + # + # Result: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + # while_loop(loop_vars=(%a, %b)) + # loop_cond(%loop:0, %loop:1) { + # %cond_var: (bool) = some_op(x=%loop:0, y=%loop:1) + # } -> (%cond_var) + # loop_body(%loop:0, %loop:1) { + # %add_0: (1, 2, fp32) = add(x=%loop:0, y=%loop:1) + # } -> (%add_0, %loop:1) + # } -> (%loop:0, %loop:1) + # } + # + # Comment: The resulting program is no longer SSA (multiple assignments on + # %loop:0). + """ + def apply(self, prog): + for f in prog.functions.values(): + _commingle_loop_vars_block(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py new file mode 100644 index 00000000..48c207c5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/conv1d_decomposition.py @@ -0,0 +1,101 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="nn_backend") +class decompose_conv1d(AbstractGraphPass): + """ + NeuralNetwork does not support conv1d natively, + instead it decomposes conv1d into expand_dims -> conv2d -> squeeze + + Let us decompose conv1d for NN, + so we may have a chance to optimize expand_dims -> conv2d -> squeeze + + Given: + %2 = conv(%1), %1.rank = 3 + ... + + Result: + %3 = expand_dims(%1, axes=-2) + %4 = conv(%3) + %2 = squeeze(%4, axes=-2) + ... + + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._decompose_conv1d_block(f) + + @block_context_manager + def _decompose_conv1d_block(self, block: Block): + def help_decompose_conv1d_block(block: Block) -> bool: + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = help_decompose_conv1d_block(b) + + # must be conv1d + if op.op_type != "conv" or op.x.rank != 3: + continue + + if self._try_apply_transform(op, block): + # has to break as the downstream iterator is affected + return True + + return False + + block_changed = True + while block_changed: + block_changed = help_decompose_conv1d_block(block) + + @staticmethod + def _try_apply_transform(conv_op: Operation, block: Block) -> bool: + # create `expand_dims` + expand_out = mb.expand_dims(x=conv_op.x, axes=(-2,), before_op=conv_op) + + # prepare `conv2d` + conv_kwargs = {"x": expand_out, "before_op": conv_op} + + # inherit `pad_type`, `groups`, `bias` from `conv1d` + conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val + conv_kwargs["groups"] = conv_op.inputs["groups"].val + bias = conv_op.inputs.get("bias", None) + if bias is not None: + conv_kwargs["bias"] = bias + + # expand `weight`, `strides`, `pad`, `dilations` from `conv1d` + conv_kwargs["weight"] = mb.expand_dims( + x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op + ) + conv_kwargs["strides"] = (1, conv_op.inputs["strides"].val[-1]) + conv_kwargs["pad"] = (0, 0, conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1]) + conv_kwargs["dilations"] = (1, conv_op.inputs["dilations"].val[-1]) + + # compose `conv2d` + conv_out = mb.conv(**conv_kwargs) + + # create `squeeze` + squeeze_out = mb.squeeze( + x=conv_out, axes=(-2,), name=conv_op.outputs[0].name, before_op=conv_op + ) + + # try replacing `conv1d` output + # with the new `expand_dims` -> `conv2d` -> `squeeze` output + if conv_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=conv_op, old_var=conv_op.outputs[0], new_var=squeeze_out + ): + # remove `conv1d` + block.remove_ops([conv_op]) + return True + return False diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py new file mode 100644 index 00000000..1a5f42a5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_inputs_as_outputs.py @@ -0,0 +1,62 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _handle_return_inputs_as_outputs_func(f): + returned_inputs = [] + for v_name, v in f.inputs.items(): + if v not in f.outputs: + continue + returned_inputs.append(v) + + with f: + for v in returned_inputs: + # copy twice since NN layer cannot have input name == output name + v_tmp = mb.identity(x=v, name=v.name + "_tmp") + res = mb.identity(x=v_tmp, name=v.name) + res.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=res.op, old_var=v, new_var=res + ) + +@register_pass(namespace="nn_backend") +class handle_return_inputs_as_outputs(AbstractGraphPass): + """ + prog: Program + + # NN cannot handle returning input as output. Insert an identity op for + # those cases. Example: + # + # Given: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # } -> (%mul_0, %b) + # } + # + # (Notice that %b is returned from input. This causes error in NN) + # + # Result: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # %b_tmp: (1, 2, fp32) = identity(x=%b) + # %b: (1, 2, fp32) = identity(x=%b_tmp) + # } -> (%mul_0, %b) + # } + # + # where identity is applied twice since NN layer cannot have + # input name == output name + """ + def apply(self, prog): + for f in prog.functions.values(): + _handle_return_inputs_as_outputs_func(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py new file mode 100644 index 00000000..3f8e2b9e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_return_unused_inputs.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _handle_return_unused_inputs_func(f): + + returned_unused_inputs = filter(lambda x: x in f.outputs, list(f.inputs.values())) + + with f: + for v in returned_unused_inputs: + # copy twice since NN layer cannot have input name == output name + v_tmp = mb.identity(x=v, name=v.name + "_tmp") + res = mb.identity(x=v_tmp, name=v.name) + res.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=res.op, old_var=v, new_var=res + ) + +@register_pass(namespace="nn_backend") +class handle_return_unused_inputs(AbstractGraphPass): + """ + prog: Program + + # NN cannot handle returning input as output. Insert an identity op for + # those cases. Example: + # + # Given: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # } -> (%mul_0, %b) + # } + # + # (Notice that %b is returned from input. This causes error in NN) + # + # Result: + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32)) { + # block0() { + # %mul_0_y_0: (i32)* = const(val=2) + # %mul_0: (1, 2, fp64) = mul(x=%a, y=%mul_0_y_0) + # %b_tmp: (1, 2, fp32) = identity(x=%b) + # %b: (1, 2, fp32) = identity(x=%b_tmp) + # } -> (%mul_0, %b) + # } + # + # where identity is applied twice since NN layer cannot have + # input name == output name + """ + def apply(self, prog): + for f in prog.functions.values(): + _handle_return_unused_inputs_func(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py new file mode 100644 index 00000000..2effac4f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/handle_unused_inputs.py @@ -0,0 +1,50 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +def _handle_unused_inputs_func(f): + unused_inputs = [v for v_name, v in f.inputs.items() if len(v.child_ops) == 0] + + with f: + for v in unused_inputs: + # copy the input + v_tmp = mb.identity(x=v, name=v.name + "_tmp") + + +@register_pass(namespace="nn_backend") +class handle_unused_inputs(AbstractGraphPass): + """ + prog: Program + + # NN doesn't allow unused inputs. Insert an identity op to consume + # inputs (though its outputs are not used.). This pass must come after + # dead code elimination as all inserted code are "dead code". Example: + # + # Given: + # + # main(%x: (2, 3, fp32)) { + # block0() { + # %shape_0_const: (2,i32)* = const(val=[4, 7]) + # } -> (%shape_0_const) + # } + # + # (Notice that input %x is not consumed. This causes error in NN.) + # + # Result: + # + # main(%x: (2, 3, fp32)) { + # block0() { + # %unused_var: (2, 3, fp32) = identity(x=%x) + # %shape_0_const: (2,i32)* = const(val=[4, 7]) + # } -> (%shape_0_const) + # } + """ + def apply(self, prog): + for f in prog.functions.values(): + _handle_unused_inputs_func(f) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py new file mode 100644 index 00000000..9ab855e9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py @@ -0,0 +1,467 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def _get_nn_spec(spec): + if spec.WhichOneof("Type") == "neuralNetwork": + nn_spec = spec.neuralNetwork + elif spec.WhichOneof("Type") == "neuralNetworkClassifier": + nn_spec = spec.neuralNetworkClassifier + elif spec.WhichOneof("Type") == "neuralNetworkRegressor": + nn_spec = spec.neuralNetworkRegressor + else: + raise ValueError("Specification must contain a neural network") + return nn_spec + + +def _get_blob_out_degree(spec): + """ + Computes use count of every tensor/node in NN graph + i.e. How many layers are using it as an input + + :param nn_spec : NeuralNetworkSpecification + :returns use_count_dict : str -> int, a dictionary with node name as a key and it's use count as a value + """ + + def _get_blob_out_degree_rec(nn_spec, out_degree): + nn_layers = nn_spec.layers + for layer in nn_layers: + layer_type = layer.WhichOneof("layer") + for inp in layer.input: + out_degree[inp] = out_degree.get(inp, 0) + 1 + if layer_type == "loop": + out_degree[layer.loop.conditionVar] = ( + out_degree.get(layer.loop.conditionVar, 0) + 1 + ) + _get_blob_out_degree_rec(layer.loop.conditionNetwork, out_degree) + _get_blob_out_degree_rec(layer.loop.bodyNetwork, out_degree) + elif layer_type == "branch": + _get_blob_out_degree_rec(layer.branch.ifBranch, out_degree) + _get_blob_out_degree_rec(layer.branch.elseBranch, out_degree) + + use_count_dict = {} + # Collect variable use count recursively + nn_spec = _get_nn_spec(spec) + _get_blob_out_degree_rec(nn_spec, use_count_dict) + + # Network outputs are variable use + network_outputs = _get_network_output(spec) + for _output in network_outputs: + use_count_dict[_output] = use_count_dict.get(_output, 0) + 1 + return use_count_dict + + +def _is_layer(nn_layer, layer_type): + """ + :param nn_layer : NN layer proto message + :param layer_type : str Layer type to check against + :returns True if nn_layer is of type `layer_type` otherwise False + """ + return nn_layer.WhichOneof("layer") == layer_type + + +def _get_input(layer, index=0): + """ + :param layer : NN Layer Proto message + :param index : Layer input index (Default 0) + :returns name of input at provided index if present, otherwise None + """ + if len(layer.input) <= index: + return None + return layer.input[index] + + +def _get_output(layer, index=0): + """ + :param layer : NN Layer Proto message + :param index : Layer output index (Default 0) + :returns name of output at provided index if present, otherwise None + """ + if len(layer.output) <= index: + return None + return layer.output[index] + + +def _get_network_output(spec): + """ + :param spec : CoreML Specification + :returns network output names + """ + network_output_names = [] + for _out in spec.description.output: + network_output_names.append(_out.name) + return network_output_names + + +def transform_conv_crop(spec): + """ + Transforms Conv -> Crop -> BN (if present) -> Activation (if present) into + Conv -> BN (if present) -> Activation (if present) -> Crop + This transformation will allow Conv -> BN -> Activation fusion by changing + the position of the crop layer, which does not affect the computation + """ + # Collect metadata + out_degree = _get_blob_out_degree(spec) + network_output_names = _get_network_output(spec) + + nn_spec = _get_nn_spec(spec) + nn_layers = nn_spec.layers + for i in range(0, len(nn_layers) - 2): + + # If Convolution output is being using as a network output or more than one layers + # that's acceptable + if not _is_layer(nn_layers[i], "convolution"): + continue + + # Output of Crop layer must not be network output or used by more than one layer + if not ( + _is_layer(nn_layers[i + 1], "crop") + and _get_input(nn_layers[i + 1]) not in network_output_names + and out_degree[_get_output(nn_layers[i + 1])] == 1 + ): + continue + + layer_to_shuffle_with = -1 + + # Output of Batchnorm layer must not be network output or used by more than one layer + if ( + _is_layer(nn_layers[i + 2], "batchnorm") + and out_degree[_get_output(nn_layers[i + 2])] == 1 + ): + layer_to_shuffle_with = i + 2 + + # Output of Activation layer must not be network output or used by more than one layer + if ( + i + 3 < len(nn_layers) + and _is_layer(nn_layers[i + 3], "activation") + and out_degree[_get_output(nn_layers[i + 3])] == 1 + ): + layer_to_shuffle_with = i + 3 + + if layer_to_shuffle_with == -1: + continue + # restructure crop layer + # Conv ---> Crop ---> BN ---> Activation ---> Layer1 + # In following three steps + # 1. Conv --------------> BN ---> Activation ---> Layer1 + # \ / + # ---> Crop -- + nn_layers[i].output[0] = nn_layers[i + 1].output[0] + # 2. Conv ---> BN ---> Activation ---> Layer1 + # \ / + # -----------------Crop ---- + nn_layers[i + 1].output[0] = nn_layers[layer_to_shuffle_with].output[0] + # 3. Conv ---> BN ---> Activation ---> Crop ---> Layer1 + nn_layers[layer_to_shuffle_with].output[0] = nn_layers[i + 1].input[0] + + # Add Crop layer at new position and remove from current position + crop_layer = nn_layers[i + 1] + nn_layers.remove(crop_layer) + nn_layers.insert(layer_to_shuffle_with, crop_layer) + + +def remove_disconnected_layers(spec): + """ + Removes layers from model specification if it's output is not + connected or on path to the network output. + """ + + def _remove_layers_from_spec(nn_spec, layers_to_delete): + nn_layers = nn_spec.layers + for _layer in layers_to_delete: + nn_layers.remove(_layer) + + def _get_disconnected_layers_rec(nn_spec): + """ + - Iteraters over layers in bottom-up fashion + - Collect layers if it's output is not being used (marks and does lazy deletion) + - Recursively iterates over NN Spec if layer is Loop or Branch + """ + + def _decrease_input_degree(layer): + """ + Helper routine to reduce degree input nodes for given layer + """ + for _input in layer.input: + out_degree[_input] -= 1 + if out_degree[_input] == 0: + del out_degree[_input] + + nn_layers = nn_spec.layers + layers_to_delete = [] + for _layer in reversed(nn_layers): + layer_type = _layer.WhichOneof("layer") + if layer_type == "loop": + condition_net_layers_to_delete = _get_disconnected_layers_rec( + _layer.loop.conditionNetwork + ) + body_net_layers_to_delete = _get_disconnected_layers_rec( + _layer.loop.bodyNetwork + ) + _remove_layers_from_spec( + _layer.loop.conditionNetwork, condition_net_layers_to_delete + ) + _remove_layers_from_spec( + _layer.loop.bodyNetwork, body_net_layers_to_delete + ) + + # NOTE: Debatable? + # If condition network or bodyNetwork is empty, delete loop layer + if ( + len(_layer.loop.conditionNetwork.layers) == 0 + or len(_layer.loop.bodyNetwork.layers) == 0 + ): + layers_to_delete.append(_layer) + _decrease_input_degree(_layer) + continue + + if layer_type == "branch": + if_layers_to_delete = _get_disconnected_layers_rec( + _layer.branch.ifBranch + ) + else_layers_to_delete = _get_disconnected_layers_rec( + _layer.branch.elseBranch + ) + + total_if_layers = len(_layer.branch.ifBranch.layers) + total_else_layers = len(_layer.branch.elseBranch.layers) + + if ( + len(if_layers_to_delete) != total_if_layers + and len(else_layers_to_delete) != total_else_layers + ): + # If both branches are non-empty after dead-layer elimination + # remove respective layers + _remove_layers_from_spec( + _layer.branch.ifBranch, if_layers_to_delete + ) + _remove_layers_from_spec( + _layer.branch.elseBranch, else_layers_to_delete + ) + elif ( + len(if_layers_to_delete) == total_if_layers + and len(else_layers_to_delete) == total_else_layers + ): + # If both branches are empty after dead-layer elimination + # remove branch layer altogehter + layers_to_delete.append(_layer) + _decrease_input_degree(_layer) + continue + + output_is_used = False + for _output in _layer.output: + # If output is used, cannot remove current layer + if _output in out_degree: + output_is_used = True + break + + # If no output from current node is used + # Remove the layer and decrement use count for all the inputs + if not output_is_used: + layers_to_delete.append(_layer) + _decrease_input_degree(_layer) + + return layers_to_delete + + def _remove_disconnected_layers_rec(nn_spec): + """ + Entry point for removing disconnected layers + """ + layers_to_delete = _get_disconnected_layers_rec(nn_spec) + # delete layers to be removed + _remove_layers_from_spec(nn_spec, layers_to_delete) + + # Get the use count of each layer + out_degree = _get_blob_out_degree(spec) + nn_spec = _get_nn_spec(spec) + # Initiate removal from high level Neural Network spec + _remove_disconnected_layers_rec(nn_spec) + + +def remove_redundant_transposes(spec): + """ + Removes layers from model specification that are back to back transposes + that compose to the identity. + """ + + def blob_name_to_layers(nn_layers): + """ + output_to_layers: {str: layer_proto_message} : {blob name: layers that it feeds into} + input_to_parent_layers: {str: layer_proto_message} : {blob name: parent layers that feed in} + """ + output_to_layers = {} + for layer in nn_layers: + for input in layer.input: + if not input in output_to_layers: + output_to_layers[input] = [layer] + else: + output_to_layers[input].append(layer) + + input_to_parent_layers = {} + for layer in nn_layers: + for output in layer.output: + if not layer.WhichOneof("layer") == "copy": + assert output not in input_to_parent_layers, \ + "'{}' blob is generated by more than 1 layers".format(output) + input_to_parent_layers[output] = layer + + return input_to_parent_layers, output_to_layers + + def _delete_layers(nn_spec, layers_to_delete): + """ + Given a neural network spec and pairs of transposes to remove, rewire + the network to bypass those transposes and remove them from the spec. + """ + nn_layers = nn_spec.layers + _, output_to_layers = blob_name_to_layers(nn_layers) + + # First pass: rewire layers to bypass those that will be deleted. + for layers in layers_to_delete: + start_layer = layers[0] + end_layer = layers[-1] + + # Replace children's input by layer_start's input + children = output_to_layers[end_layer.output[0]] + for child in children: + idx = [ + i + for i, input in enumerate(child.input) + if input == end_layer.output[0] + ] + assert len(idx) == 1 + idx = idx[0] + child.input[idx] = start_layer.input[0] + + # Second pass: delete the layers. + for layers in layers_to_delete: + for layer in layers: + nn_layers.remove(layer) + + def _find_redundant_transposes(nn_spec): + """ + Search the neural network spec for sequence of transposes that together + are the identity, and return a list of those sequence. + """ + nn_layers = nn_spec.layers + layers_to_delete = [] + + input_to_parent_layers, output_to_layers = blob_name_to_layers(nn_layers) + + for layer in nn_layers: + # Only start with the last element of the transpose layers sequence + if not layer.WhichOneof("layer") == "transpose": + continue + if ( + layer.output[0] in output_to_layers + and len(output_to_layers[layer.output[0]]) == 1 + and output_to_layers[layer.output[0]][0].WhichOneof("layer") + == "transpose" + ): + continue + + # Get the transpose layers sequence + layers = [] + cursor = layer + while True: + if cursor.output[0] in output_to_layers: + layers.append(cursor) + if not cursor.input[0] in input_to_parent_layers: + break + cursor = input_to_parent_layers[cursor.input[0]] + if cursor.WhichOneof("layer") != "transpose": + break + if len(output_to_layers[cursor.output[0]]) != 1: + break + layers = layers[::-1] + + if len(layers) == 0: + continue + + # Optimize for the number of layers which can be merged using dynamic programming + def solve_dp(layers): + """ + The resulting dp[i] means the maximum length of transpose sequence resulting + in identity starting at index i + For example, dp[0] = 0 means there is no sequence starting at 0 results in identity + dp[10] = 5 means the longest identity sequence starts at 10 is 5, + so [layers[10],layer[11],..,layer[14]] is the longest identity sequence start at 10. + + # dic: {tuple:int} + # key is the net transpose axes pattern starting from the first layer + # value is the highest id of the layer which has this pattern + # e.g. if dic[(1,2,0)] = 34, it means that starting from the 1st layer, + # the net transpose pattern `(1,2,0)` is last seen at layer id 34. No layer after 34-th + # layer will result in the net pattern `(1,2,0)` + """ + dim = len(layers[0].transpose.axes) + dp = [0] * len(layers) + dic = {} + axes = list(range(dim)) + dic[tuple(axes)] = 0 + for i in range(len(layers)): + axes = [axes[k] for k in layers[i].transpose.axes] + key = tuple(axes) + if key in dic: + dp[dic[key]] = i - dic[key] + 1 + dic[key] = i + 1 + for i in range(len(layers) - 1, -1, -1): + j = i + dp[i] + if j < len(layers): + dp[i] = dp[i] + dp[j] + return dp + + dp = solve_dp(layers) + + """ + Once we know the maximum identity sequence starts at each index, we solve + for the maximum total node we can remove. + I think there must be lots of different solution for this, but I use DP again. + sol_num[i] keeps track of the maximum number of nodes can be remove after index i + For example, if sol_num[10] = 5, this means after index 10, we can at most remove 5 nodes. + sol_bt[i] keeps the first starting point of identity sequence which results in the + optimal solution after index i. + For example, if sol_num[10] = 12, means that in order to get rid of the maxium number of + nodes after 10, the first starting point is index 12. + After construct sol_num and sol_bt by dynamic programming, we backtrack for the optimal + solution using sol_bt. + """ + sol_num = [0] * len(dp) + sol_bt = [None] * len(dp) + if dp[-1] != 0: + sol_num[-1] = dp[-1] + sol_bt[-1] = len(dp) - 1 + for i in range(len(sol_num) - 2, -1, -1): + if dp[i] == 0: + sol_num[i] = sol_num[i + 1] + sol_bt[i] = sol_bt[i + 1] + else: + num = dp[i] + j = i + dp[i] + if j < len(sol_num): + num += sol_num[j] + if num > sol_num[i + 1]: + sol_num[i] = num + sol_bt[i] = i + else: + sol_num[i] = sol_num[i + 1] + sol_bt[i] = sol_bt[i + 1] + + # Get layers to delete using sol_bt + cursor = 0 + while cursor < len(dp): + if sol_bt[cursor] is None: + break + cursor = sol_bt[cursor] + tmp = [layers[i] for i in range(cursor, cursor + dp[cursor])] + layers_to_delete.append(tmp) + cursor += dp[cursor] + + return layers_to_delete + + nn_spec = _get_nn_spec(spec) + layers_to_delete = _find_redundant_transposes(nn_spec) + if len(layers_to_delete) > 0: + _delete_layers(nn_spec, layers_to_delete) + print("{} transpose pairs deleted".format(len(layers_to_delete))) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py new file mode 100644 index 00000000..841460a9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py @@ -0,0 +1,1052 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import unittest +from sys import platform + +import numpy as np + +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.backend.nn.passes.mlmodel_passes import ( + remove_disconnected_layers, remove_redundant_transposes, + transform_conv_crop) +from coremltools.models import MLModel +from coremltools.models import neural_network as neural_network +from coremltools.models.neural_network.printer import print_network_spec +from coremltools.models.utils import _macos_version + +DEBUG = False +np.random.seed(10) + + +class MLModelPassesTest(unittest.TestCase): + def test_load_constant_remove(self): + input_features = [("data", datatypes.Array(*(3, 4)))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("relu1", "RELU", "data", "relu1") + builder.add_load_constant_nd( + "const1", "c1", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_activation("relu2", "RELU", "relu1", "out") + builder.add_load_constant_nd( + "const2", "c2", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_load_constant_nd( + "const3", "c3", constant_value=np.ones((5,)), shape=(5,) + ) + spec = builder.spec + np.testing.assert_equal(5, len(spec.neuralNetwork.layers)) + remove_disconnected_layers(spec) + np.testing.assert_equal(2, len(spec.neuralNetwork.layers)) + + def test_dead_layer_remove(self): + input_features = [("data", datatypes.Array(*(3, 4)))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("relu1", "RELU", "data", "relu1") + builder.add_load_constant_nd( + "const1", "c1", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_load_constant_nd( + "const2", "c2", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_split_nd( + "splitnd1", "const2", ["s1", "s2", "s3"], axis=0, num_splits=3 + ) + builder.add_squeeze("squeeze", "s1", "squeeze_out") + builder.add_activation("relu4", "RELU", "s2", "relu4") + builder.add_activation("relu5", "RELU", "relu4", "relu5") + builder.add_load_constant_nd( + "const3", "c3", constant_value=np.ones((5,)), shape=(5,) + ) + builder.add_activation("relu2", "RELU", "relu1", "out") + spec = builder.spec + np.testing.assert_equal(9, len(spec.neuralNetwork.layers)) + remove_disconnected_layers(spec) + np.testing.assert_equal(2, len(spec.neuralNetwork.layers)) + + def test_dead_layer_remove_branch(self): + convergence_tolerance = 1e-8 + + input_features = [("input", datatypes.Array(*(2,)))] + output_features = [("out", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + # add condition to break from the loop, if convergence criterion is met + builder.add_less_than("cond", ["input"], "cond", alpha=convergence_tolerance) + branch_layer = builder.add_branch("branch_layer", "cond") + builder_ifbranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch + ) + builder_ifbranch.add_activation("relu1", "RELU", "input", "relu1_out") + builder_ifbranch.add_activation("relu2_out", "RELU", "relu1_out", "relu2_out") + builder_elsebranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.elseBranch + ) + builder_elsebranch.add_activation("linear1", "LINEAR", "input", "linear1_out") + builder_elsebranch.add_activation( + "linear2", "LINEAR", "linear1_out", "relu2_out" + ) + builder.add_squeeze("out", "input", "out", squeeze_all=True) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.random.rand(2,) + data_dict = {"input": data} + if _IS_MACOS: + before_pass_out = mlmodel.predict(data_dict)["out"] + if DEBUG: + print( + "\n mlmodel description before remove disconnected layers pass: \n" + ) + print_network_spec(builder.spec, style="coding") + remove_disconnected_layers(builder.spec) + if DEBUG: + print( + "\n mlmodel description after remove disconnected layers pass: \n" + ) + print_network_spec(builder.spec, style="coding") + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + after_pass_out = mlmodel.predict(data_dict)["out"] + + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2) + np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1) + + def test_dead_layer_partial_branch(self): + convergence_tolerance = 1e-8 + + input_features = [("input", datatypes.Array(*(2,)))] + output_features = [("out", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + # add condition to break from the loop, if convergence criterion is met + builder.add_less_than("cond", ["input"], "cond", alpha=convergence_tolerance) + branch_layer = builder.add_branch("branch_layer", "cond") + builder_ifbranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch + ) + builder_ifbranch.add_activation("relu1", "RELU", "input", "relu1_out") + builder_ifbranch.add_activation("relu2_out", "RELU", "relu1_out", "relu2_out") + builder_elsebranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.elseBranch + ) + builder_elsebranch.add_activation("linear1", "LINEAR", "input", "linear1_out") + builder_elsebranch.add_activation( + "linear_red_1", "LINEAR", "input", "linear_red1_out" + ) + builder_elsebranch.add_activation( + "linear_red_2", "LINEAR", "linear_red1_out", "linear_red2_out" + ) + builder_elsebranch.add_activation( + "linear2", "LINEAR", "linear1_out", "relu2_out" + ) + builder.add_squeeze("out", "relu2_out", "out", squeeze_all=True) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + data = np.random.rand(2,) + data_dict = {"input": data} + before_pass_out = mlmodel.predict(data_dict)["out"] + if DEBUG: + print("\n mlmodel description before remove disconnected layers pass: \n") + print_network_spec(builder.spec, style="coding") + old_spec = copy.copy(builder.spec) + remove_disconnected_layers(builder.spec) + if DEBUG: + print("\n mlmodel description after remove disconnected layers pass: \n") + print_network_spec(builder.spec, style="coding") + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + after_pass_out = mlmodel.predict(data_dict)["out"] + + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2) + np.testing.assert_equal( + len(old_spec.neuralNetwork.layers[1].branch.ifBranch.layers), + len(builder.spec.neuralNetwork.layers[1].branch.ifBranch.layers), + ) + np.testing.assert_equal( + len(builder.spec.neuralNetwork.layers[1].branch.elseBranch.layers), 2 + ) + + def test_conv_crop_bn_to_conv_bn_crop(self): + input_features = [("data", datatypes.Array(1, 10, 10))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.ones((1, 2, 2, 2), dtype=np.float32) + builder.add_convolution( + name="conv", + kernel_channels=1, + output_channels=2, + height=2, + width=2, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="conv_out", + ) + builder.add_crop( + name="crop", + left=1, + right=1, + top=1, + bottom=1, + offset=0, + input_names=["conv_out"], + output_name="crop_out", + ) + builder.add_batchnorm( + name="bn", + channels=2, + gamma=np.ones(2,).astype(np.float32), + beta=np.ones(2,).astype(np.float32), + mean=np.ones(2,).astype(np.float32), + variance=np.ones(2,).astype(np.float32), + input_name="crop_out", + output_name="out", + ) + # Conv -> Crop -> BN + spec = builder.spec.neuralNetwork + np.testing.assert_equal("crop", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("batchnorm", spec.layers[2].WhichOneof("layer")) + + # Predict + if _IS_MACOS: + mlmodel = MLModel(builder.spec, dict, compute_units=ComputeUnit.CPU_ONLY) + data = np.random.rand(1, 10, 10) + data_dict = {"data": data} + before_pass_out = mlmodel.predict(data_dict)["out"] + + # transform the pattern + transform_conv_crop(builder.spec) + # Conv -> BN -> Crop + np.testing.assert_equal("batchnorm", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("crop", spec.layers[2].WhichOneof("layer")) + + if _IS_MACOS: + # Predict + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + after_pass_out = mlmodel.predict(data_dict)["out"] + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=3) + + def test_conv_crop_bn_relu_to_conv_bn_relu_crop(self): + input_features = [("data", datatypes.Array(1, 10, 10))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + W = np.ones((1, 2, 2, 2), dtype=np.float32) + builder.add_convolution( + name="conv", + kernel_channels=1, + output_channels=2, + height=2, + width=2, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="conv_out", + ) + builder.add_crop( + name="crop", + left=1, + right=1, + top=1, + bottom=1, + offset=0, + input_names=["conv_out"], + output_name="crop_out", + ) + builder.add_batchnorm( + name="bn", + channels=2, + gamma=np.ones(2,).astype(np.float32), + beta=np.ones(2,).astype(np.float32), + mean=np.ones(2,).astype(np.float32), + variance=np.ones(2,).astype(np.float32), + input_name="crop_out", + output_name="bn_out", + ) + builder.add_activation( + name="relu", non_linearity="RELU", input_name="bn_out", output_name="out" + ) + # Conv -> Crop -> BN -> ReLU + spec = builder.spec.neuralNetwork + np.testing.assert_equal("crop", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("batchnorm", spec.layers[2].WhichOneof("layer")) + np.testing.assert_equal("activation", spec.layers[3].WhichOneof("layer")) + + # Predict + if _IS_MACOS: + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.random.rand(1, 10, 10) + data_dict = {"data": data} + before_pass_out = mlmodel.predict(data_dict)["out"] + + # transform the pattern + transform_conv_crop(builder.spec) + # Conv -> BN -> ReLU -> Crop + np.testing.assert_equal("batchnorm", spec.layers[1].WhichOneof("layer")) + np.testing.assert_equal("activation", spec.layers[2].WhichOneof("layer")) + np.testing.assert_equal("crop", spec.layers[3].WhichOneof("layer")) + + # Predict + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + if _IS_MACOS: + after_pass_out = mlmodel.predict(data_dict)["out"] + np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=3) + + +@unittest.skipIf( + platform != "darwin" or _macos_version() < (10, 15), "Requires MacOS 10.15 or later" +) +class Redundant_Transposees_Test(unittest.TestCase): + def _test_builder(self, builder, input_shape, expected_layer_num=None): + + data = np.random.rand(*input_shape) + + # Mlmodel before + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + output_before = mlmodel.predict({"data": data})["out"] + num_layers_before = len(builder.spec.neuralNetwork.layers) + + remove_redundant_transposes(builder.spec) + + layers = builder.spec.neuralNetwork.layers + if expected_layer_num is None: + self.assertTrue(len(layers) < num_layers_before) + else: + self.assertEqual(len(layers), expected_layer_num) + + # Mlmodel after + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + output_after = mlmodel.predict({"data": data})["out"] + + np.testing.assert_almost_equal(output_before, output_after, decimal=3) + + def test_output_edge_case(self): + + # For now for safety purpose, the node which are output should't be merged + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose( + name="first_transpose", + axes=[2, 0, 1], + input_name="data", + output_name="first_transpose_out", + ) + builder.add_transpose( + name="second_transpose", + axes=[1, 2, 0], + input_name="first_transpose_out", + output_name="out", + ) + + self._test_builder(builder, input_shape, 2) + + def test_output_edge_case_2(self): + + # For now for safety purpose, the node which are output should't be merged + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose( + name="ranspose", axes=[1, 2, 0], input_name="data", output_name="out" + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_single_identity_transpose(self): + + # A single identity transpose (like 0,1,2) should also be removed + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose( + name="uselss_transpose", + axes=[0, 1, 2], + input_name="data", + output_name="useless_transpose_out", + ) + builder.add_activation( + name="relu", + non_linearity="RELU", + input_name="useless_transpose_out", + output_name="out", + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_three_transpose(self): + + # Three transpose layer which can be removed + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [1, 0, 2], [2, 0, 1]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_thousands_identity_transpose(self): + + """ + INPUT + | + v + [t1] + | + v + [t2] + | + v + . + . + . + | + v + [t1000] + | + v + RELU + tk are all identity + Remove a sequence of 1000 identity transpose + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 1000 + input_name = "data" + for i in range(num_layers): + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + builder.add_transpose( + name=name, + axes=[0, 1, 2], + input_name=input_name, + output_name=output_name, + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + + self._test_builder(builder, input_shape, 1) + + def test_remove_thousands_identity_transpose_with_activation_between(self): + """ + INPUT + | + v + [t1] + | + v + . + . + . + [t500] + | + v + RELU_1 + | + v + . + . + . + | + v + [t1000] + | + v + RELU_2 + tk are all identity + Remove a sequence of 1000 identity transpose but with a RELU in the middle, + the final output should be + INPUT + | + v + RELU_1 + | + v + RELU_2 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 1000 + input_name = "data" + for i in range(num_layers): + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + builder.add_transpose( + name=name, + axes=[0, 1, 2], + input_name=input_name, + output_name=output_name, + ) + input_name = output_name + if i == num_layers / 2: + builder.add_activation( + name="relu_inter", + non_linearity="ReLU", + input_name=input_name, + output_name="relu_out", + ) + input_name = "relu_out" + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + self._test_builder(builder, input_shape, 2) + + def test_remove_thousands_random_transpose_layers(self): + """ + INPUT + | + v + [t_0] + | + v + [t_1] + | + v + . + . + . + | + v + [t_999] + | + v + RELU + tk are randomly generated, + under this certain seed, the result should be + INPUT + | + v + [t_0] + | + v + [t_1] + | + v + RELU + """ + + import random + from itertools import permutations + + random.seed(1000) + input_shape = (3, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 1000 + dim = 3 + input_name = "data" + debug = [] + for i in range(num_layers): + axes = list(permutations(range(dim))) + random.shuffle(axes) + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + debug.append(axes[0]) + builder.add_transpose( + name=name, axes=axes[0], input_name=input_name, output_name=output_name + ) + input_name = output_name + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + self._test_builder(builder, input_shape, None) + + def test_remove_thousands_random_transpose_layers_case_2(self): + """ + Same test as the previous one, but add more layers and dimension. + """ + import random + from itertools import permutations + + random.seed(0) + input_shape = (3, 10, 5, 2, 4) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + num_layers = 5000 + dim = 5 + input_name = "data" + for i in range(num_layers): + axes = list(permutations(range(dim))) + random.shuffle(axes) + output_name = "layer_" + str(i) + "_output" + name = "layer_" + str(i) + builder.add_transpose( + name=name, axes=axes[0], input_name=input_name, output_name=output_name + ) + input_name = output_name + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + self._test_builder(builder, input_shape, None) + + def test_branch_structure(self): + """ + INPUT + | + v + [t_0] + | + v + [t_1] + | + v + [t_3] --. + | | + v v + [t_4] RELU_1 + | + v + [t_5] + | + v + RELU_2 + t_0, t_1, t_3 can be merged. + t_4, t_5 can be merged. + The output shuld be + INPUT + | + .------. + | | + v v + RELU_2 RELU_1 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(1, 10, 5))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0], [0, 1, 2], [2, 0, 1], [1, 2, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + builder.add_activation( + name="dumpy", + non_linearity="RELU", + input_name="transpose_2_out", + output_name="dumpy", + ) + self._test_builder(builder, input_shape, 2) + + def test_branch_case_2(self): + """ + INPUT + | + v + [t_0] --. + | | + v v + [t_1] RELU_1 + | + v + RELU_2 + Even though t_0, t_1 can be merged, but there is a branch from t_0, + so we shouldn't remove anything here. + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + builder.add_activation( + name="dumpy", + non_linearity="RELU", + input_name="transpose_0_out", + output_name="dumpy", + ) + self._test_builder(builder, input_shape, 4) + + def test_fork_structure_case_3(self): + """ + INPUT + | + v + [t_0] + | + v + [t_1]--. + | | + | v + | RELU_1 + | + v + [t_2]--. + | | + | v + | RELU_2 + [t_3] + | + v + [t_4]--. + | | + | v + | RELU_3 + v + RELU_4 + + Even though t_0, t_1 can be merged, t_2 is identity, t_3, t_4 can be merge, + The final output should be + INPUT + | + .------------.----------. + | | | | + v v v v + RELU_1 RELU_2 RELU_3 RELU_4 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(1, 10, 5))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0], [0, 1, 2], [2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + builder.add_activation( + name="dumpy_1", + non_linearity="RELU", + input_name="transpose_1_out", + output_name="dumpy_1", + ) + builder.add_activation( + name="dumpy_2", + non_linearity="RELU", + input_name="transpose_2_out", + output_name="dumpy_2", + ) + builder.add_activation( + name="dumpy_4", + non_linearity="RELU", + input_name="transpose_4_out", + output_name="dumpy_4", + ) + + self._test_builder(builder, input_shape, 4) + + def test_fork(self): + """ + INPUT + | + .------.------. + | | + v v + [t_1] [t_3] + | | + v v + [t_2] [t_4] + | | + v v + RELU_1 RELU_2 + + t_1,t_2 can be merged and t_3,t_4 can be merged. + The result output would be + + INPUT + | + .------.------. + | | + v v + RELU_1 RELU_2 + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu", non_linearity="RELU", input_name=input_name, output_name="out" + ) + + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_branch_2_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + builder.add_activation( + name="relu_branch_2", + non_linearity="RELU", + input_name=input_name, + output_name="out_branch_2", + ) + self._test_builder(builder, input_shape, 2) + + def test_fork_and_add(self): + """ + INPUT + | + .------.------. + | | + v v + [t_1] [t_3] + | | + v v + [t_2] [t_4] + | | + .-----. .-----. + | | + v v + Add + + t_1,t_2 can be merged and t_3,t_4 can be merged. + The result output would be + + INPUT + | + .------.------. + | | + .-----. .-----. + | | + v v + Add + + """ + input_shape = (1, 10, 5) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + transpose = [[2, 1, 0], [2, 1, 0]] + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + input_1 = input_name + + input_name = "data" + for i, axes in enumerate(transpose): + name = "transpose_branch_2_" + str(i) + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=input_name, output_name=output_name + ) + input_name = output_name + + input_2 = input_name + + builder.add_add_broadcastable( + name="add", input_names=[input_1, input_2], output_name="out" + ) + self._test_builder(builder, input_shape, 1) + + def test_transpose(self): + def _build_and_test_network(input_size, transpose_layers, expected_layers): + """ + Helper function for testing transpose removal. + + Args: + input_size: Size of the input network tensor. + transpose_layers: Array of transpose axes definitions. + expected_layers: Array of indices into transpose_layers indicating + which of the transpose layers should be present after the + graph pass. + """ + input_features = [("data", datatypes.Array(*input_size))] + output_features = [("out", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + spec = builder.spec.neuralNetwork.layers + + last_layer = "data" + for idx, axes in enumerate(transpose_layers): + name = "t{}".format(idx) + if idx == len(transpose_layers) - 1: + output_name = "out" + else: + output_name = name + "_out" + builder.add_transpose( + name=name, axes=axes, input_name=last_layer, output_name=output_name + ) + last_layer = output_name + + spec = builder.spec.neuralNetwork + # Check the network before the graph pass. + for idx in range(len(transpose_layers)): + np.testing.assert_equal( + "transpose", spec.layers[idx].WhichOneof("layer") + ) + # Run the removal pass. + remove_redundant_transposes(builder.spec) + # Verify only the expected layers remain. + np.testing.assert_equal(len(spec.layers), len(expected_layers)) + for output_layer_idx, input_layer_idx in enumerate(expected_layers): + np.testing.assert_equal( + "transpose", spec.layers[output_layer_idx].WhichOneof("layer") + ) + np.testing.assert_array_equal( + transpose_layers[input_layer_idx], + spec.layers[output_layer_idx].transpose.axes, + ) + + _build_and_test_network( + input_size=[1, 10, 10], + # These transposes are not inverses. + transpose_layers=[[2, 0, 1], [2, 0, 1]], + expected_layers=[0, 1], + ) + + _build_and_test_network( + input_size=[1, 1, 10, 10, 3], + # First two are the identity, then an extra. + transpose_layers=[[2, 4, 1, 0, 3], [3, 2, 0, 4, 1], [1, 0, 2, 3, 4]], + expected_layers=[2], + ) + + # A slightly more complicated test case where there are two transposes + # in topological order, but are actually in parallel in the graph. + builder = neural_network.NeuralNetworkBuilder( + [("data", datatypes.Array(2, 4, 8))], [("out", None)] + ) + builder.add_transpose( + name="t1", axes=[0, 2, 1], input_name="data", output_name="t1" + ) + builder.add_transpose( + name="t2", axes=[0, 2, 1], input_name="data", output_name="t2" + ) + builder.add_stack(name="stack", input_names=["t1", "t2"], output_name="out") + spec = builder.spec.neuralNetwork + # Run the removal pass. + remove_redundant_transposes(builder.spec) + # Verify nothing was removed. + np.testing.assert_equal(len(spec.layers), 3) + + +if __name__ == "__main__": + RUN_ALL_TESTS = True + if RUN_ALL_TESTS: + unittest.main() + else: + suite = unittest.TestSuite() + suite.addTest(MLModelPassesTest("test_load_constant_remove")) + unittest.TextTestRunner().run(suite) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_passes.py new file mode 100644 index 00000000..5f85c24c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/backend/nn/passes/test_passes.py @@ -0,0 +1,227 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + assert_same_output_names, + get_op_types_in_program, +) + +backends = testing_reqs.backends + + +class TestConv1dDeompositionPasses: + @pytest.mark.parametrize( + "backend, has_strides, pad_type, has_pad, has_dilations, has_bias", + itertools.product( + backends, + (True, False), + ("valid", "custom", "same"), + (True, False), + (True, False), + (True, False), + ), + ) + def test_conv1d_decomposition( + self, backend, has_strides, pad_type, has_pad, has_dilations, has_bias + ): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 8 + C_in, C_out = 3, 4 + K = 3 + + conv_kwargs = {"weight": np.random.rand(C_out, C_in, K), "pad_type": pad_type} + if has_strides: + conv_kwargs["strides"] = (2,) + if has_pad: + conv_kwargs["pad"] = (1, 1) + if has_dilations: + conv_kwargs["dilations"] = (2,) + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, L))]) + def prog(x): + y = mb.conv(x=x, **conv_kwargs) + return y + + assert get_op_types_in_program(prog) == ["conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "nn_backend::decompose_conv1d" + ) + assert get_op_types_in_program(prog) == ["expand_dims", "expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze"] + + # infer output shape + strides = conv_kwargs["strides"] if has_strides else (1,) + pad = conv_kwargs["pad"] if has_pad else (0, 0) + dilations = conv_kwargs["dilations"] if has_dilations else (1,) + L_out = None + if pad_type == "valid": + L_out = (L - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "custom": + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "same": + L_out = np.ceil(L / strides[-1]) + else: + raise Exception("unsupported pad type") + output_shape = (N, C_out, L_out) + + assert_model_is_valid( + prog, + {"x": (N, C_in, L)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv1d_decomposition_dynamic_weight(self, backend): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 9 + C_in, C_out = 4, 3 + K = 4 + + strides = (2,) + pad = (1, 1) + # MIL convolution with dynamic weights does not support dilations != 1 + # see coremltools/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py + dilations = (1,) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(N, C_in, L)), + mb.TensorSpec(shape=(C_out, C_in, K)), + ] + ) + def prog(x, weight): + y = mb.conv(x=x, weight=weight, **conv_kwargs) + return y + + assert get_op_types_in_program(prog) == ["conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "nn_backend::decompose_conv1d" + ) + assert get_op_types_in_program(prog) == ["expand_dims", "expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["expand_dims", "expand_dims", "conv", "squeeze"] + + output_shape = (N, C_out, L_out) + assert_model_is_valid( + prog, + {"x": (N, C_in, L), "weight": (C_out, C_in, K)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +def test_commingle_loop_vars(): + def body(a, b): + # b is a loop invariant + return mb.add(x=a, y=b), b + + def cond(a, b): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=b, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),] + ) + def prog(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert while_op.blocks[0].inputs[0].name == "a_x0" + assert while_op.blocks[0].inputs[1].name == "b_x0" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["nn_backend::commingle_loop_vars"](prog) + assert_same_output_names(prev_prog, prog) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert while_op.blocks[0].inputs[0].name == while_op.outputs[0].name + assert while_op.blocks[0].inputs[1].name == while_op.outputs[1].name + + prog.validate() + + # The program is not ssa and thus cannot be converted + + +def test_handle_return_inputs_as_outputs(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),] + ) + def prog(a, b): + return mb.mul(x=a, y=2.), b + + prev_main_output_names = [o.name for o in prog["main"].outputs] + assert prog["main"].outputs[1].op is None # output comes from input + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["nn_backend::handle_return_inputs_as_outputs"](prog) + assert_same_output_names(prev_prog, prog) + + assert prog["main"].outputs[1].op is not None # output comes from an op + assert prog["main"].outputs[1].op.op_type == "identity" + + with pytest.raises(ValueError, match='used both as function\'s input and output'): + # prog has input and output names 'b' that refer to different vars + # This program can pass if we disable 'dedup_op_and_var_names' pass + assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)}) + + +def test_handle_unused_inputs(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)),] + ) + def prog(unused_input): + return mb.const(val=[3, 2]) + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["nn_backend::handle_unused_inputs"](prog) + assert_same_output_names(prev_prog, prog) + + id_op = prog.find_ops(op_type="identity", exactly_one=True)[0] + # Assert that input var is consumed by an identity op. + assert id_op in prog["main"].inputs["unused_input"].child_ops + + assert_model_is_valid(prog, {"unused_input": (1, 2)}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/conftest.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/conftest.py new file mode 100644 index 00000000..236ca03f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/conftest.py @@ -0,0 +1,12 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def pytest_make_parametrize_id(config, val, argname): + ''' + This function is a hook into pytest. It generates a user friendly string + representation of the parameterized values. + ''' + return "{}={}".format(argname, str(val)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/converter.py new file mode 100644 index 00000000..6642e200 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/converter.py @@ -0,0 +1,341 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import tempfile as _tempfile +import warnings as _warnings +from typing import Optional, Text, Tuple + +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil import Program +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.types.symbolic import k_num_internal_syms, k_used_symbols +from coremltools.models import MLModel +from coremltools.models.model import _create_mlpackage + +from . import ImageType, InputType +from .mil.passes.pass_pipeline import PassPipeline, PipelineManager + + +class ConverterRegistry: + frontends = {} + backends = {} + backend_alias_names = {} + + @staticmethod + def frontend(converter): + ConverterRegistry.frontends[converter.name] = converter + return converter + + @staticmethod + def backend(converter): + ConverterRegistry.backends[converter.name] = converter + if 'alias_names' in converter.__dict__: + for name in converter.alias_names: + ConverterRegistry.backend_alias_names[name] = converter.name + return converter + + +@ConverterRegistry.frontend +class MILFrontend: + name = "milinternal" + + def __call__(self, model, *args, **kwargs): + specification_version = kwargs.get("specification_version", None) + if specification_version is not None: + max_opset_version, op = model._get_max_opset_version_and_op() + if max_opset_version > specification_version: + msg = ( + "Please update the minimum_deployment_target to {!s}," + " since op {} is only available in opset {!s} or newer." + ).format(max_opset_version, op.op_type, max_opset_version) + raise ValueError(msg) + + if "inputs" in kwargs and kwargs["inputs"] is not None: + inputs = kwargs["inputs"] + if not isinstance(inputs, (list, tuple)): + raise ValueError( + "Type of inputs should be list or tuple, got {} instead.".format( + type(inputs) + ) + ) + if not all([isinstance(i, InputType) for i in inputs]): + raise ValueError( + "Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format( + [type(i) for i in inputs] + ) + ) + + for idx, inp in enumerate(inputs): + # We set the default image format in MIL as NCHW, since only NCHW is + # natively supported by MIL ops (ex. Conv/Pool/etc.) + if isinstance(inp, ImageType) and inputs[idx].channel_first is None: + inputs[idx].channel_first = True + model.set_main_input_types(tuple(inputs)) + return model + + +@ConverterRegistry.frontend +class TensorFlowFrontend: + name = "tensorflow" + + def __call__(self, *args, **kwargs): + from .frontend.tensorflow.load import TF1Loader + + tf1_loader = TF1Loader(*args, **kwargs) + return tf1_loader.load() + + +@ConverterRegistry.frontend +class TensorFlow2Frontend: + name = "tensorflow2" + + def __call__(self, *args, **kwargs): + from .frontend.tensorflow2.load import TF2Loader + + tf2_loader = TF2Loader(*args, **kwargs) + return tf2_loader.load() + + +@ConverterRegistry.frontend +class TorchFrontend: + name = "pytorch" + + def __call__(self, *args, **kwargs): + from .frontend.torch.load import load + + return load(*args, **kwargs) + + +@ConverterRegistry.backend +class NNProtoBackend: + name = "neuralnetwork" + alias_names = [] + + def __call__(self, *args, **kwargs): + from .backend.nn.load import load + + return load(*args, **kwargs) + + +@ConverterRegistry.backend +class MILProtoBackend: + name = "mlprogram" + alias_names = [] + + def __call__(self, *args, **kwargs): + from .backend.mil.load import load as backend_load + + return backend_load(*args, **kwargs) + + +def _reset_conversion_state(): + ''' + Reset any stateful properties/variables that are populated during conversion. + ''' + + # Clear the "name_count" dict, + # which is used to generate unique op names in the mil builder class. + mb.name_count.clear() + + # Clear "k_used_symbols" dict, and the int counter "k_num_internal_syms" that are used to track symbolic names + global k_used_symbols + global k_num_internal_syms + k_used_symbols.clear() + k_num_internal_syms = 0 + + +@_profile +def mil_convert( + model, + convert_from, + convert_to, + compute_units, + **kwargs +): + """ + Convert model from a specified frontend `convert_from` to a specified + converter backend `convert_to`. + + Parameters + ---------- + model: TF, PyTorch, or `coremltools.converters.mil.Program`. + See `coremltools.converters.convert` + + convert_from: str + The value must be one of ['tensorflow', 'tensorflow2', + 'pytorch', 'milinternal'] (aka name of a `ConverterRegistry.frontend`). + + compute_units: coremltools.ComputeUnit + A enum with three possible values: + - coremltools.ComputeUnit.ALL - use all compute units available, including the + neural engine. + - coremltools.ComputeUnit.CPU_ONLY - limit the model to only use the CPU. + - coremltools.ComputeUnit.CPU_AND_GPU - use both the CPU and GPU, but not the + neural engine. + + convert_to: str + Value must be one of ['neuralnetwork', 'mlprogram', 'milinternal'] + See `coremltools.converters.convert` + + Returns + ------- + model: `coremltools.models.MLModel` or + `coremltools.converters.mil.Program` + See `coremltools.converters.convert` + """ + return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs) + + +def _mil_convert( + model, + convert_from, + convert_to, + registry, + modelClass, + compute_units, + **kwargs +): + + # Map "convert_to" values that correspond to the alias_names, to the actual supported registries + if convert_to in registry.backend_alias_names: + msg = "Please use '{}' instead of '{}' with the 'convert_to' argument. The latter will be removed in the future." + _warnings.warn(msg.format(registry.backend_alias_names[convert_to], convert_to)) + convert_to = registry.backend_alias_names[convert_to] + + if convert_to == 'mlprogram': + # mil_convert_to_proto places weight files inside the weights_dir + weights_dir = _tempfile.TemporaryDirectory() + kwargs["weights_dir"] = weights_dir.name + + proto, mil_program = mil_convert_to_proto( + model, + convert_from, + convert_to, + registry, + **kwargs + ) + + _reset_conversion_state() + + if convert_to == 'milinternal': + return mil_program # mil program + elif convert_to == 'milpython': + return proto # internal mil data structure + + elif convert_to == "mlprogram": + package_path = _create_mlpackage( + proto, kwargs.get("weights_dir"), kwargs.get("package_dir") + ) + return modelClass( + package_path, + is_temp_package=not kwargs.get("package_dir"), + mil_program=mil_program, + skip_model_load=kwargs.get("skip_model_load", False), + compute_units=compute_units, + ) + + return modelClass(proto, + mil_program=mil_program, + skip_model_load=kwargs.get('skip_model_load', False), + compute_units=compute_units) + + +def mil_convert_to_proto( + model, convert_from, convert_to, converter_registry, main_pipeline=None, **kwargs +) -> Tuple[Optional[MLModel], Program]: + """ + Convert model to proto object. + + Parameters + ---------- + model: See `mil_convert` + + convert_from: See `mil_convert` + + convert_to: See `mil_convert` + + converter_registry: `ConverterRegistry` + Available frontend and backend converters + + main_pipeline: `PassPipeline` + The main pipeline with options set by users. + """ + frontend_converter_type = converter_registry.frontends.get(convert_from.lower()) + if not frontend_converter_type: + raise NotImplementedError( + f'Frontend converter "{convert_from}" not implemented, must be ' + f"one of: {list(converter_registry.frontends.keys())}" + ) + + kwargs.setdefault("convert_to", convert_to) + + if main_pipeline is None: + # If the client calls `mil_convert` directly, the `pass_pipeline` is None. To keep the + # behaviour same as before, the quantization pass is removed in this situation. + # TODO: rdar://106111553 ([Infra] Quantization Pass is skipped when `mil_convert` is called directly.) + main_pipeline = PassPipeline() + main_pipeline.remove_passes({"common::add_fp16_cast"}) + frontend_pipeline, backend_pipeline = _construct_other_pipelines( + main_pipeline, convert_from, convert_to + ) + + frontend_converter = frontend_converter_type() + prog = frontend_converter(model, **kwargs) + PipelineManager.apply_pipeline(prog, frontend_pipeline) + + PipelineManager.apply_pipeline(prog, main_pipeline) + + prog._check_invalid_tensor_rank() + + if convert_to == 'milinternal': + return None, prog + + PipelineManager.apply_pipeline(prog, backend_pipeline) + backend_converter_type = converter_registry.backends.get(convert_to.lower()) + if not backend_converter_type: + raise NotImplementedError( + f'Backend converter "{convert_to}" not implemented, must be ' + f"one of: {list(converter_registry.backends.keys())}" + ) + backend_converter = backend_converter_type() + out = backend_converter(prog, **kwargs) + + return out, prog + + +def _construct_other_pipelines( + main_pipeline: PassPipeline, convert_from: Text, convert_to: Text +) -> Tuple[PassPipeline, PassPipeline]: + """ + Construct other pipelines based on the main pipeline. It includes: + - The frontend pipeline which will run in the frontend converter + - The backend pipeline which will run in the backend converter + As the main pipeline could have passes which also exists in the frontend/backend passes, we + need to make sure the pass options are set properly in all pipelines. + For example, if users set options to skip some vars in `const_elimination` pass, we want to make + sure those vars are skipped not only in main_pipeline, but also in other pipelines wherever the + `const_elimination` pass runs. + + TODO: rdar://106046237 ([Infra] Expose Backend and Frontend Pipeline to External Users) + Currently users only control the passes in the main pipeline by passing `pass_pipeline` param. + There are two reasons why we don't expose the frontend/backend pipelines at the current stage: + - The frontend and backend specific passes need to be well documented. + - The interface need more carefully design, as we don't want to provide too many params such as + ct.convert(..., frontend_pipeline=xxx, backend_pipelien=xxx, main_pipeline=xxx) to overwhelm + users. + """ + # Set the main pipeline options specified by the user in frontend/backend pipeline. + frontend_pipeline = PassPipeline.get_pipeline(f"frontend_{convert_from.lower()}") + frontend_pipeline.set_options_by_another_pipeline(main_pipeline) + backend_pipeline = PassPipeline.get_pipeline(f"backend_{convert_to.lower()}") + backend_pipeline.set_options_by_another_pipeline(main_pipeline) + + # If a pass is skipped in the main pipeline, we also skip it in the frontend/backend pipeline. + default_main_pipeline = PassPipeline.get_pipeline("default") + passes_skipped_in_main = set(default_main_pipeline.passes) - set(main_pipeline.passes) + frontend_pipeline.remove_passes(passes_skipped_in_main) + backend_pipeline.remove_passes(passes_skipped_in_main) + + return frontend_pipeline, backend_pipeline diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/debugging_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/debugging_utils.py new file mode 100644 index 00000000..a2a59d8f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/debugging_utils.py @@ -0,0 +1,175 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +from collections import OrderedDict +from typing import List, Optional + +import coremltools as ct +from coremltools.models import MLModel +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.frontend.milproto.load import \ + load as milproto_to_pymil + +def extract_submodel( + model: MLModel, + outputs: List[str], + inputs: Optional[List[str]] = None, + function_name: str = "main" + ) -> MLModel: + """ + This utility function allows the user to extract a submodel from a Core ML model. + + For the NeuralNetwork model, only in memory Core ML model can be extracted. That is to say, + the user should always call this function to a model directly from `ct.convert`. It is not + allowed to load the model from the disk, and call this API. + + For the ML program model, both cases (in memory / from disk) are supported. + + Parameters + ---------- + model: MLModel + The Core ML model from which the submodel is extracted. + + outputs: list[str] + A list of names of Vars, which are the outputs of the extracted submodel. + + inputs: list[str] (Optional) + A list of names of Vars, which are the inputs of the extracted submodel. + If not provided, we use the inputs from the original model. + + function_name: str (Optional) + Name of the function where the subgraph is extracted. Default "main". + + Examples + -------- + + NeuralNetwork: + + >>> from coremltools.converters.mil.debugging_utils import extract_submodel + >>> mlmodel = ct.convert(model, convert_to="neuralnetwork") + >>> outputs = ["output_0", "output_1"] + >>> submodel = extract_submodel(mlmodel, outputs) + + ML Program: + >>> from coremltools.converters.mil.debugging_utils import extract_submodel + >>> mlmodel = ct.convert(model, convert_to="mlprogram") + >>> outputs = ["output_0", "output_1"] + >>> + >>> # Directly extract model in memory + >>> submodel = extract_submodel(mlmodel, outputs) + >>> + >>> # Extract model loaded from disk + >>> mlmodel.save("model.mlpackage") + >>> mlmodel = coremltools.model.models.MLModel("model.mlpackage") + >>> submodel = extract_submodel(mlmodel, outputs) + + """ + def validate_inputs(func, input_vars): + reachable_vars = set(input_vars) + for op in func.operations: + if op.op_type == "const": + reachable_vars.add(op.outputs[0]) + + for op in func.operations: + if all([x in reachable_vars for x in op.inputs.values()]): + reachable_vars.update(op.outputs) + + for out in func.outputs: + if out not in reachable_vars: + raise ValueError(f"output {output} not reachable from inputs") + + @block_context_manager + def replace_inputs(func, input_vars): + func_inputs = {} + for input in input_vars: + name = input.name + func_inputs[name] = mb.placeholder(input.shape, dtype=input.dtype) + func.replace_uses_of_var_after_op( + anchor_op=input.op, + old_var=input, + new_var=func_inputs[name].outputs[0], + no_check_var_visibility=True, + ) + func._input_dict = OrderedDict() + for k, v in func_inputs.items(): + v.set_name(k) + func._input_dict[k] = v.outputs[0] + + if not isinstance(outputs, (list, tuple)): + raise ValueError(f"outputs must be of type list/tuple. Got {type(outputs)}.") + + for output in outputs: + if not isinstance(output, str): + raise ValueError(f"outputs must be a list of str. Got element {output} with type {type(output)}.") + if outputs.count(output) > 1: + raise ValueError(f"outputs must be a list of unique elements. '{output}' occurs {outputs.count(output)} times.") + + model_spec = model.get_spec() + backend = "mlprogram" if model_spec.WhichOneof("Type") == "mlProgram" else "neuralnetwork" + if backend == "neuralnetwork": + if model._mil_program is None: + raise ValueError("NeuralNetwork model loaded from the disk is not supported by the extract_submodel util.") + program = model._mil_program + else: + assert backend == "mlprogram" + if model._mil_program is None: + program = milproto_to_pymil( + model_spec=model_spec, + specification_version=model_spec.specificationVersion, + file_weights_dir=model.weights_dir, + ) + else: + program = model._mil_program + + # extract subgraph + prog = copy.deepcopy(program) + func = prog.functions[function_name] + vars = {} + new_outputs = [] + for op in func.operations: + for o in op.outputs: + if o.name in outputs: + new_outputs.append(o) + vars[o.name] = o + + if len(outputs) != len(new_outputs): + new_outputs_names = [o.name for o in new_outputs] + outputs_not_found = [name for name in outputs if name not in new_outputs_names] + raise ValueError(f"outputs {outputs_not_found} not found in the function.") + + func.set_outputs(new_outputs) + + # Clean up the graph + PASS_REGISTRY["common::dead_code_elimination"](prog) + + # If the inputs are provided, we subtract the subgraph starting from them + if inputs is not None: + if not isinstance(inputs, (list, tuple)): + raise ValueError(f"inputs must be of type list/tuple. Got {type(inputs)}.") + + input_vars = [] + for input in inputs: + if not isinstance(input, str): + raise ValueError(f"inputs must be a list of str. Got element {input} with type {type(input)}.") + if inputs.count(input) > 1: + raise ValueError(f"inputs must be a list of unique elements. '{input}' occurs {inputs.count(input)} times.") + if input not in vars and input not in func.inputs: + raise ValueError(f"input {input} not found in the function.") + if input in vars: + input_vars.append(vars[input]) + if input in func.inputs: + input_vars.append(func.inputs[input]) + + validate_inputs(func, input_vars) + replace_inputs(func, input_vars) + PASS_REGISTRY["common::dead_code_elimination"](prog) + + prog.skip_all_passes = True + submodel = ct.convert(prog, convert_to=backend, compute_units=model.compute_unit) + + return submodel diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/__init__.py new file mode 100644 index 00000000..545ac7e5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/README.md b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/README.md new file mode 100644 index 00000000..aff706f8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/README.md @@ -0,0 +1,587 @@ +# Generic Pattern Matching Infrastructure Documentation + +## _**Introduction**_ + +This document contains the **motivation**, **user flow**, and **documentation**, and **instructions** for adding/running a pass for Arjun Singla’s Generic Pattern Matching Infrastructure. + +## _**What We Know**_ + +* Existing TensorFlow and Pytorch models are converted to intermediate representations, GraphDef and TorchScript respectively, by the frameworks themselves when they are compiled. These intermediate representations are “verbose” - each operation is expanded into the combination of its most basic operations. +* Then, our Apple infrastructure performs a one to one mapping, taking these intermediate representations and converting them into a MIL (Model Intermediate Language) representation. As this mapping is one to one, the MIL representation is “verbose” as well. +* Now, the goal becomes to take these “verbose” MIL representations, and make them compact again - taking sets of simple operations and consolidating them into their more complicated cousins - the same ones that the user defined in the original TensorFlow and Pytorch models. These are executed when we convert the MIL representation into the final CoreML one. +* The project + * My project is working on a very specific subproblem to this larger issue. The goal is to take these “verbose” MIL representations, detect **any** sequence of operations, and replace it with **any** other sequence of operations. + +## _**The User Flow: Documentation**_ + +* We are assuming that the user has a very high understanding of PyMil. So, we will have the user define a PyMil program, which will be the pattern to detect in the larger machine learning model. Attached is a code snippet, taken from the PyMil docs, on how to define a program: + +``` +#import builder +from coremltools.converters.mil import Builder as mb + +# Input to MIL program is a list of tensors. Here we have one input with +# shape (1, 100, 100, 3) and implicit dtype == fp32 + +@mb.program(input_specs=[mb.TensorSpec(shape=(1, 100, 100, 3)),]) +def prog(x): + + # MIL operation takes named inputs (instead of positional inputs). + # Here name argument is optional. + + x = mb.relu(x=x, name='relu') + x = mb.transpose(x=x, perm=[0, 3, 1, 2], name='transpose') + x = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=False, name='reduce') + x = mb.log(x=x, name='log') + return x +``` + +* It is important that the user follows these constraints when writing their MIL program: + * **This program must only have one root variable** + * **This program has exactly one proper last operation topologically.** + * **Each operation in the program must have a UNIQUE NAME!!!** + ``` + # Read from left to right, this pattern has two "last" operations, + # and is not permitted + + --> op1 --- op2 --- op3 --> + | + | ---- op4 --> + + # Read from left to right, this pattern has one "last" operation, + # and is permitted. The only thing that must be + # singular here is the last operation (and, of course, the root var) + + --> op1 --- op2 --- op3 --- op5 --> + | | + | ---- op4 -----| + + + +* The second function the user needs to define is the following: +`def var_constraints(pattern):` + * Parameters + * a `Pattern` object + * What is a pattern object, you may ask? Excellent question! + * A `Pattern` object stores the captured operations in the larger machine learning model. So, let’s say that the user defined a pattern ` return mb.relu(x=x, name='mycoolrelu') ` . Then, `pattern.mycoolrelu` would return the **captured** relu operation in the larger machine learning model! + * The pattern also has the following additional attributes: + * `pattern.root_var`, which is the root variable of the first operation of the captured pattern (and corresponds to the user defined pattern’s root variable) + * `pattern.final_op`, the operation in the larger machine learning model that corresponds to the last operation in the user defined pattern. + * `pattern.block`, the block in the larger machine learning model where the pattern was found + * `pattern.op_set`, a set of all the operations captured from the larger machine learning model. The user should call `pattern.op_list() `to return a list verision of the set (without duplicates) + * Note: The user can add additional attributes to the pattern object using this method if they choose: + `pattern.add_attribute("attr_name", attribute)` + * Returns `True` if the pattern satisfies certain constraints (ie constant input values, rank, etc). Basically, anything beyond its topological order with respect to operation types, which is already identical to that of the user defined pattern. Returns `False` otherwise. + + + +* The third function the user needs to define is the following: +`def transform_pattern(pattern):` + + * Parameters + * a `Pattern` object + * This function needs to replace the captured operations (stored in the pattern object) with whatever you want! Feel free to define another MIL program and replace the pattern with that second program. + + + +* The last thing the user needs to do is **call** the following function +`register_generic_pass(ops_arrangement, var_constraints, + transform_pattern, pass_name, namespace)` + + * Parameters + * `ops_arrangement`: the user defined pattern + * `var_constraints`: the user defined function (see above) + * `transform_pattern`: the user defined function (see above) + * `pass_name`: a string representing the name of the pass. + * `namespace`: a string representing the namespace of the pass (ie `"common"`) + * Calling this function will register the pass will the given `passname` and `namespace`, so that it will be called when the passes are run. + * If you have multiple patterns to detect for a single pass, just call this function multiple times with the respective `ops_arrangement`, `var_constraints`, and `transform_pattern`, but have the `pass_name` and `namespace` be the same. That way, all of these “mini passes” will be registered under the same pass! + + + +## Gelu Example - Everything the User Does + +``` +# Full source @ coreml/coremltools/coremltools/converters/mil/experimental/passes/generic_gelu_tanh_approximation_fusion.py +# This is a simple function defined by the user +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.mil.passes.helper import _check_var_scalar_value +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import register_generic_pass + +# This is the user defined pattern to detect +@mb.program(input_specs=[mb.TensorSpec(shape=([1, 1024, 4096])), ]) +def gelu_to_detect_1(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is MANDATORY. + pow = mb.pow(x=x, y=3.0, name="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x=0.5, y=add_1, name="mul") + mul_3 = mb.mul(x=mul, y=x, name="mul_3") + return mul_3 +""" +y = x * (0.5 * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1)) + + +[...] -----> pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) ----> mul (0.5) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------------------------------------------------------------------ + +""" + +# This is another user defined pattern to detect +# In this pattern, 0.5 is first multiplied with the input which is then multiplied with the tanh term. +# In pattern1, 0.5 is first multiplied with the tanh term, and then multiplied with input +@mb.program(input_specs=[mb.TensorSpec(shape=([1, 1024, 4096])), ]) +def gelu_to_detect_2(x): + pow = mb.pow(x=x, y=3.0, name ="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x = 0.5, y=x, name="mul") + mul_3 = mb.mul(x=mul, y=add_1, name="mul_3") + return mul_3 + +""" +y = (0.5 * x) * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1) + + --------------------------------------------------------------------------------------------------------- + ^ | + | V + [...] -----> mul(0.5) pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------ +""" + +# Constraint enforcement +def var_constraints(pattern): + passed = True + + passed = passed and (_check_var_scalar_value(pattern.mul.y, 0.5) or _check_var_scalar_value(pattern.mul.x, 0.5)) + passed = passed and _check_var_scalar_value(pattern.pow.y, 3.0) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_1.y, 0.044715) or + _check_var_scalar_value(pattern.mul_1.x, 0.044715) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_2.y, 0.79788) or + _check_var_scalar_value(pattern.mul_2.x, 0.79788) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.add_1.y, 1) or + _check_var_scalar_value(pattern.add_1.x, 1) + ) + + return passed + +# Transformation Logic +def transform_pattern(pattern): + # remove all the ops, and replace with a gelu op + out_name = pattern.mul_3.outputs[0].name + x = mb.gelu(x=pattern.root_var, mode="TANH_APPROXIMATION", name=out_name, before_op=pattern.mul) + + pattern.mul_3.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.mul_3, old_var=pattern.mul_3.outputs[0], new_var=x + ) + + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + +# Registering the Pass +register_generic_pass(ops_arrangement=gelu_to_detect_1, var_constraints=var_constraints, + transform_pattern = transform_pattern, pass_name="fuse_gelu_tanh_approximation", namespace="common") + +register_generic_pass(ops_arrangement=gelu_to_detect_2, var_constraints = var_constraints, + transform_pattern = transform_pattern, pass_name="fuse_gelu_tanh_approximation", namespace="common") + + +``` + + + +## Linear Bias Example - Everything the User Does + +``` +# Full source @ coreml/coremltools/coremltools/converters/mil/mil/passes/linear_bias_fusion.py arbitrary_shape = (get_new_symbol(), get_new_symbol()) +arbitrary_shape = (get_new_symbol(), get_new_symbol()) +np.random.seed() +arbitrary_weight = np.random.rand(4,3) +arbitrary_bias = np.random.rand(4) + +@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) +def pattern_add(x): + """ + Original: + % 4 = linear(x= % 1, weight = % 2, bias = % 3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + % 6 = add(x= % 4, y = % 5) # %5 is a const tensor with same shape as %3 + + Result: + % 8 = linear(x= % 1, weight = % 2, bias = % 7) # where %7 is a new const tensor with value + # %7 = %3 + %6 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.add(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + +@mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) +def pattern_sub(x): + """ + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = sub(x=%5, y=%4) # %5 is a const tensor with a broacasable shape with %3. + i.e. if %3 has shape (Dout), %5 could be (1, Dout). + + Result: + %9 = linear(x=%1, weight=%7, bias=%8) # where %7 is a new const tensor with value %7 = -%2 + # %8 = %5 - %3 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.sub(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + + +def var_constraints(pattern): + passed = True + passed = passed and pattern.add_or_sub.x.val is not None or pattern.add_or_sub.y.val is not None + + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + + # check if the shape is broadcasable + passed = passed and np.prod(linear_bias.shape) == np.prod(bias.shape) + passed = passed and bias.shape[-1] == Dout + return passed + + +def _get_is_sub_and_is_first_input(pattern): + is_sub = pattern.add_or_sub.op_type == "sub" + is_first_input = pattern.add_or_sub.x == pattern.linear.outputs[0] + return is_sub, is_first_input + + +def _get_linear_bias_bias_Dout(pattern, is_first_input): + linear_bias = pattern.linear.bias.val + bias = pattern.add_or_sub.y.val if is_first_input else pattern.add_or_sub.x.val + Dout = linear_bias.shape[0] + return linear_bias, bias, Dout + + +def transform_pattern(pattern): + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + bias = np.reshape(bias, (Dout,)) + + if is_sub and is_first_input: bias = -bias + if is_sub and not is_first_input: linear_bias = -linear_bias + + new_bias = linear_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -pattern.linear.weight.val + else: + new_weight = pattern.linear.weight.val + + # create a new linear op with the new weight, bias value, copying rest of the attributes + out_name = pattern.add_or_sub.outputs[0].name + linear_kargs = {"weight": new_weight, "bias": new_bias, "name": out_name, "before_op": pattern.linear} + + linear_kargs.update({k: v for k, v in pattern.linear.inputs.items() if k not in ["weight", "bias"]}) + + x = mb.linear(**linear_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +register_generic_pass( + ops_arrangement=pattern_add, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", +) + +register_generic_pass( + ops_arrangement=pattern_sub, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", +) +``` + + + +## Layernorm/Instancenorm Fusion - Everything the User Does (for one of the patterns) + +``` +# Full source @coreml/coremltools/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py +@mb.program(input_specs=[mb.TensorSpec(shape=(1, 100, 100, 3)),]) +def layernorm(x): + + # MIL operation takes named inputs (instead of positional inputs). + + y = mb.reduce_mean(x = x, keep_dims = False, name = "reduce_mean") + x = sub(x = x, y =y, name = "add") + ... + x = add(x = x, y = sub, name = "last_add") + return x + +# User defined helper function +def _check_no_output_connection(block: Block, ops: List[Operation]) -> bool: + """ + Check that none of the op in this pattern is connected to the output + (except the last add op) + + :param block: Block + :param ops: List of operations to check on. + """ + for op in ops[:-1]: + for out in op.outputs: + if out in block.outputs: + return False + return True + +# User defined helper function +def _check_reduce_op(reduce_op: Operation, mode: str = "reduce_mean") -> bool: + """ + Check whether or not the reduction op satisfy following conditions: + - Mode is expected. + - Does not change rank (keep_dims is True). + - Axes is known at compile time. + + :param reduce_op: reduce op to check on + :param mode: reduce mode + """ + if reduce_op is None: + return False + if reduce_op.op_type != mode: + return False + if reduce_op.keep_dims is None or reduce_op.keep_dims.val is None: + return False + if reduce_op.keep_dims.val is False: + return False + if reduce_op.axes is None or reduce_op.axes.val is None: + return False + return True + + +def var_constraints(pattern) -> bool: + + root_var = pattern.reduce_op.x + epsilon_var = pattern.add_op1.y if add_op1.x == pattern.reduce_op2.outputs[0] else pattern.add_op1.x + gamma_var = pattern.mul_op1.y if pattern.mul_op1.x == pattern.rsqrt_op.outputs[0] else pattern.mul_op1.x + beta_var = pattern.sub_op2.x + rank = len(root_var.shape) + + passed = True + + passed = passed and _check_no_output_connection(pattern.block, pattern.to_list) + + passed = passed and root_var.shape is not None + passed = passed and rank == 4 + passed = passed and _check_reduce_op(pattern.reduce_op) + passed = passed and not(epsilon_var.val is None or len(epsilon_var.val.shape) != 0) + passed = passed and gamma_var.val is not None + passed = passed and beta_var.val is not None + + pattern.add_attribute('epsilon_var', epsilon_var) + pattern.add_attribute('gamma_var', gamma_var) + pattern.add_attribute('beta_var', beta_var) + + return constraints_passed + + +def transform_pattern(pattern): + + # Insert instance_norm / layer_norm and delete all ops. + + axes = pattern.reduce_op.axes.val + rank = len(pattern.reduce_op.x.shape) + + # check whether the pattern is instance_norm or layer_norm + is_layernorm = False + is_instancenorm = False + is_require_rank4_transpose = False + + negative_axes = [a - rank if a >= 0 else a for a in axes] + negative_axes.sort() + + if len(pattern.gamma_var.val.shape) == len(axes) and len(pattern.beta_var.val.shape) == len(axes): + # axes for layer_norm must be [-1] or [-1, -2] or [-1, -2, -3] and so on + if negative_axes == list(range(-len(negative_axes), 0)): + is_layernorm = True + + if rank == 4 and (negative_axes == [-2, -1] or negative_axes == [-3, -2]): + if ( + len(np.squeeze(pattern.gamma_var.val).shape) == 1 + and len(np.squeeze(pattern.beta_var.val).shape) == 1 + ): + is_instancenorm = True + if negative_axes == [-3, -2]: + is_require_rank4_transpose = True + + if not (is_instancenorm or is_layernorm): + return False + + # remove all the ops, and replace with a layer_norm or instance_norm op + out_name = pattern.end_op.outputs[0].name + + if is_require_rank4_transpose: + x = mb.transpose( + x=pattern.reduce_op.x, + perm=[0, 3, 1, 2], + name=out_name + "_transpose_nhwc_nchw", + before_op=pattern.end_op, + ) + if is_instancenorm: + x = mb.instance_norm( + x=x if is_require_rank4_transpose else pattern.reduce_op.x, + gamma=np.squeeze(pattern.gamma_var.val), + beta=np.squeeze(pattern.beta_var.val), + epsilon=pattern.epsilon_var, + name=out_name + "_instancenorm" if is_require_rank4_transpose else out_name, + before_op=pattern.end_op, + ) + else: # is_layernorm + x = mb.layer_norm( + x=x if is_require_rank4_transpose else pattern.reduce_op.x, + axes=axes, + gamma=pattern.gamma_var, + beta=pattern.beta_var, + epsilon=pattern.epsilon_var, + name=out_name + "_layernorm" if is_require_rank4_transpose else out_name, + before_op=pattern.end_op, + ) + if is_require_rank4_transpose: + x = mb.transpose( + x=x, + perm=[0, 2, 3, 1], + name=out_name + "_transpose_nchw_nhwc", + before_op=pattern.end_op, + ) + + pattern.end_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.end_op, old_var=pattern.end_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops(pattern.to_list) + return True + + +`register_generic_pass`(ops_arrangement=layernorm, var_constraints = var_constraints, + transform_pattern = transform_pattern, + pass_name="layernorm_pass", namespace="common") +``` + + + +## _**Understanding the Infrastructure: Implementation Details**_ + +* This is a list of all the internal functions in my infrastructure, and what they each do. Remember, the goal is to detect a small user-defined MIL program inside a larger machine learning model (also a MIL program). Most of these functions are in the `coreml/coremltools/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py` file +* The first (highest level) function: +`register_generic_pass(ops_arrangement, var_constraints, transform_pattern, pass_name, namespace)` + * Parameters + * `ops_arragement` : The user defined MIL program we are trying to detect + * `var_constraints` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and returns whether the captured operations in that object satisfy certain constraints + * `transform_pattern` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and replaces those captured operations with the desired operations in the larger machine learning model + * `pass_name` : A string that is the name of the pass + * `namespace`: A string that is the namespace where the pass is registered + * Results + * This function registers a pass with the given parameters +* The second function, called by the one above: +`fuse_all_blocks(ops_arrangement, var_constraints, transform_pattern, prog)` + * Parameters + * `ops_arragement` : The user defined MIL program we are trying to detect + * `var_constraints` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and returns whether the captured operations in that object satisfy certain constraints + * `transform_pattern` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and replaces those captured operations with the desired operations in the larger machine learning model + * `prog` : The large machine learning model (represented in MIL) in which we are tying to detect `ops_arragement` + * Results + * This function replaces all instances of `ops_arragement` in `prog` with the desired replacement code in `transform_pattern` +* The third function, called by the one above: +`fuse_one_block(block, ops_arrangement, var_constraints, transform_pattern)` + * Parameters + * `block`: The block in the main machine learning model that we are looking into right now + * `ops_arragement` : The user defined MIL program we are trying to detect + * `var_constraints` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and returns whether the captured operations in that object satisfy certain constraints + * `transform_pattern` : The user defined function that takes in a `Pattern` object (which stores captured operations in the larger machine learning model) as a parameter, and replaces those captured operations with the desired operations in the larger machine learning model + * Results + * This function replaces the first instance of `ops_arragement` in `block` with the desired replacement code in `transform_pattern` +* The fourth function, called by the one above: +`detect_pattern(program_op, ops_arrangement_root_var, block)` + + * Parameters + * `program_op`: A single operation in the main machine learning model + * `ops_arrangement_root_var` : The root variable for the user defined MIL program we are trying to detect. **Assumption: this program has only one root variable** + * `block`: The block in the main machine learning model that we are looking into right now + * Results + * This function does the following: + * Creates a `Pattern` object to capture operations and other relevant details from the main machine learning model + * Sets the `Pattern` object’s `block` and `root_var` attributes. `Root_var` is to the **single** variable input of the `program_op` that corresponds to the `ops_arrangement_root_var`. In other words, if you remember your SAT prep from high school, `ops_arrangement_root_var` is to `ops_arragement` as `pattern.root_var` is to the main machine learning model. **Since we are assuming that the user defined pattern has only 1 root variable, if `program_op` has more than 1 input variable, we loop through these inputs to find the one that corresponds to the one in the user defined pattern. Here, “corresponds” is defined as recursively having the same number and type of child operations, in the same topological order.** + * Sets the `Pattern` object operation attributes. Each of these attribute's names correspond to the names given to the operations in the user defined pattern. + * Sets the Pattern object `final_op` attribute. This is the operation in the main machine learning model that corresponds to the last operation in the user defined pattern. For this last operation, we always only verify that the operation types are the same (we don’t care about child operations). + * We also check that this is the only operation in the captured pattern that has an output that is also in the block’s output. If it is not, we return `False, None` + * **Assumption: Here, we are assuming that the user defined pattern has exactly one proper last operation. If the user defined pattern has multiple “last operations” (ie, operations with 0 child operations) then the `final_op` will be set to only one of these last operations, and the check mentioned above will fail - therefore, not capturing the pattern.** + * Returns `True, pattern` if the user defined pattern is found in the main machine learning model starting at `program_op` ‘s root variable, and `False, None` otherwise +* The fifth function, called by the one above: +`pattern_detected(pattern, program_op, pattern_op, program_root_var, pattern_root_var, block)` + * Parameters + * `pattern`: A `Pattern` object + * `program_op` : The current operation in the main machine learning model that we are looking at + * `pattern_op` : The current operation in the user defined pattern that we are looking at + * `program_root_var` : The variable in the main machine learning model that is analogous to the root variable in the user defined pattern. + * `pattern_root_var` : The root variable in the user defined pattern + * `block`: The block in the main machine learning model that we are looking into right now + * Results + * This recursively looks at operations and their children in the main machine learning model, and returns true if the following conditions are met, and false otherwise + * Every operation in the user defined pattern has the same operation type and number of outputs as its counterpart in the main machine learning model + * Every operation in the user defined pattern has the same number and type of child operations as its counterpart in the main machine learning model (recursive call). This constraint is not enforced if the operation in the user defined pattern has 0 children. + * **Assumption: If an `program_op` and the `pattern_op` have the same number of outputs, we are assuming that, if there is a match, those outputs are stored in the same order. Child operations do not have to be ordered.** +* The sixth function, called by the one above: +`lists_op_equality(oplist1, oplist2)` + * Parameters + * `oplist1`: A list of operations + * `oplist2` : A list of operations + * Results + * Returns True if the operations in `oplist1` are in the same order and have the same operation type as the operations in `oplist2` and False otherwise. +* The `Pattern` class + * Stores a bunch of stuff, including operations, and in addition has `root_var`, `bock`, `op_set` and `final_op` attributes. The user can, of course, add more attributes to the pattern in their functions if they wish, using `pattern.add_attribute(attribute_name, attribute`) + * `pattern.op_list()` Returns a list of all unique operations stored in the pattern +* The `PassContainer` class + * In the new infrastructure, each new pattern that the user wants to detect needs to be defined and registered separately. If the user wants to group each of these “subpasses” together, they can register them with the same name and namespace, and all the “subpasses” will be stored in a `PassContainer` instance, where they will eventually all be executed. + * `PassContainer(pass_name)`: makes a new `PassContainer` object with a single pass name (String) + * `passContainer.add(pass_func)` adds a pass function to the `PassContainer’s` list of pass functions. A pass function is a function that takes in a machine learning model as a parameter and transforms it into the compressed, transformed machine learning model. This is a partial function of `fuse_all_blocks` defined above. + * `PassContainer.__call__(prog)` : Executes all `pass_functions` stored in this `PassContainer` object with respect to the given machine learning model + +## _**How to Add/Run a Pass**_ + +* Write the passs, and save it in a file in the `coreml/coremltools/coremltools/converters/mil/experimental/passes` folder +* Add an import line to the `coreml/coremltools/coremltools/converters/mil/mil/passes/init.py` file +* Run the experimental (generic) passes by setting the `ENABLE_EXPERIMENTAL_PASSES` environment variable to 1, which will override the regular passes with the same name diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/__init__.py new file mode 100644 index 00000000..545ac7e5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py new file mode 100644 index 00000000..498b0810 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_batchnorm_fusion.py @@ -0,0 +1,169 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass + +""" +Fuse the following batch_norm layer into conv and conv_transpose +That is, convert conv + batch_norm to conv, by modifying the weight and bias in the conv layer +Given: + %2 = conv(%1) + ... + %3 = batch_norm(%2) + ... + +Result: + %3 = conv(%1) + ... +""" + +arbitrary_cin = 5 +arbitrary_cout = 8 +np.random.seed() +arbitrary_input = (3, arbitrary_cin, 224, 224) +arbitrary_weight = np.random.rand(arbitrary_cout, arbitrary_cin, 10, 10) +arbitrary_mean= np.random.rand(arbitrary_cout) +arbitrary_variance = np.random.rand(arbitrary_cout) + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_batchnorm(x): + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + batch_norm = mb.batch_norm(x=conv, mean=arbitrary_mean, variance=arbitrary_variance, name="batchnorm") + return batch_norm + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_transpose_batchorm(x): + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + batch_norm = mb.batch_norm(x=conv, mean=arbitrary_mean, variance=arbitrary_variance, name="batchnorm") + return batch_norm + + +def var_constraints(pattern): + return pattern.conv.weight.val is not None + + +def transform_pattern(pattern): + # get parameters from batch_norm layer + gamma = pattern.batchnorm.gamma.val + beta = pattern.batchnorm.beta.val + mean = pattern.batchnorm.mean.val + variance = pattern.batchnorm.variance.val + epsilon = pattern.batchnorm.epsilon.val + # get weight, bias and groups from conv layer + + conv_weight = pattern.conv.weight.val + conv_bias = pattern.conv.bias + groups = pattern.conv.groups.val + + # get type of the conv layer + is_deconv = pattern.conv.op_type == 'conv_transpose' + is_conv_1d = len(conv_weight.shape) == 3 + + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight for conv layer + new_conv_weight = [] + new_conv_bias = [] + + if is_deconv: + conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:])) + + for i in range(Cout): + # get batch norm parameters for each channel + _gamma = gamma[i] + _beta = beta[i] + _mean = mean[i] + _variance = variance[i] + _scale = _gamma / np.sqrt(_variance + epsilon) + + # get conv weight and bias for each channel + _conv_weight = conv_weight[i] + _conv_bias = conv_bias[i] + + # update the conv weight and bias + _conv_weight = _conv_weight * _scale + _conv_bias = _scale * (_conv_bias - _mean) + _beta + new_conv_weight.append(_conv_weight) + new_conv_bias.append(_conv_bias) + + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + new_conv_bias = np.array(new_conv_bias).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:])) + new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + + # make sure the updated weight and bias have the same shape as the original ones + assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_batchnorm pass." + assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_batchnorm pass." + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = pattern.batchnorm.outputs[0].name + conv_kargs = {"weight": new_conv_weight, "bias": new_conv_bias, "name": out_name, "before_op": pattern.conv} + + for k, v in pattern.conv.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + pattern.batchnorm.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.batchnorm, old_var=pattern.batchnorm.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv('ENABLE_EXPERIMENTAL_PASSES') == '1': + register_generic_pass( + ops_arrangement=conv_batchnorm, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_batchnorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_transpose_batchorm, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_batchnorm", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py new file mode 100644 index 00000000..ca8977c4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py @@ -0,0 +1,367 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass +from coremltools.converters.mil.mil import types + +""" +Fold add/sub into bias of conv and conv_transpose +That is, convert conv + add/sub to conv, when add/sub is adding a constant + +There are two main patterns supported now. The first one is: + +Pattern 1: +Given: + %2 = conv(%1) + ... + %3 = add(%2, constant) # where constant has shape (1,C,1)/(C,1) for 1d conv, (1,C,1,1)/(C,1,1) for 2d conv etc + ... + +Result: + %3 = conv(%1) + ... + +The second one is: + +Pattern 2: + Given: + %2 = conv(%1) + %3 = transpose(%2) + ... + %4 = add(%3, constant) # where constant has a broacasable shape + ... + + Result: + %2 = conv(%1) + %4 = transpose(%2) + ... + +When taking all of the conv/conv_tranpose, transpose/no transpose, and add/sub into account, +We end up with a total of 8 patterns (2^3). These patterns are paramaterized by the pattern_to_detect +function below. +""" + +arbitrary_cin = 5 +arbitrary_cout = 8 +arbitrary_scalar = 5 +np.random.seed() +arbitrary_perm = [0,1,2,3] +arbitrary_input = (3, arbitrary_cin, 224, 224) +arbitrary_weight = np.random.rand(arbitrary_cout, arbitrary_cin, 10, 10) + + +def pattern_to_detect(conv_transpose, transpose, sub): + """ + Wrapper to create 8 patterns to detect for conciseness. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_bias_pattern(x): + if not conv_transpose: + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + else: + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + + if transpose: + transpose_layer = mb.transpose(x=conv, perm=arbitrary_perm, name="transpose") + + if sub: + add_or_sub = mb.sub(x=transpose_layer if transpose else conv, y=arbitrary_scalar, name="add_or_sub") + else: + add_or_sub = mb.add(x=transpose_layer if transpose else conv, y=arbitrary_scalar, name="add_or_sub") + return add_or_sub + + return conv_bias_pattern + + +def var_constraints(pattern): + bias_value = _get_bias_var(pattern).val + rank = pattern.conv.x.rank + is_bias_scalar = True if not isinstance(bias_value, np.ndarray) else False + old_bias = pattern.conv.inputs.get("bias", None) + old_bias_value = old_bias.val if old_bias is not None and old_bias.val is not None else None + + passed = True + passed = passed and isinstance(bias_value, (np.ndarray, np.generic)) + passed = passed and rank is not None + passed = passed and (rank == 3 or rank == 4 or rank == 5) + + # check compatibility of bias value with the rank of the conv op + # either bias value should be a scalar or: + # rank=3 ==> (B,C,D), which means bias must be (1,C,1) or (C,1) + # rank=4 ==> (B,C,D1,D2), which means bias must be (1,C,1,1) or (C,1,1) + # rank=5 ==> (B,C,D1,D2,D3), which means bias must be (1,C,1,1,1) or (C,1,1,1) + if not is_bias_scalar: + # check that there is at most one dimension in the shape that is not 1 + passed = passed and len(np.squeeze(bias_value).shape) <= 1 + # check that addition is not happening on the batch dimension + passed = passed and (len(bias_value) != rank or bias_value.shape[0] == 1) + # check that last rank-2 entries in the shape vector are all 1s + passed = passed and np.prod(bias_value.shape[-(rank - 2):]) == 1 + + bias_value = np.array([bias_value]) if is_bias_scalar else np.squeeze(bias_value) + + passed = passed and ( + old_bias is not None + or np.prod(bias_value.shape) != 1 + or pattern.conv.weight.val is not None + ) + + if old_bias is not None: + try: + new_bias_value = old_bias_value + bias_value + except: + return False + + return passed + + +def var_constraints_tranpose(pattern): + bias = pattern.add_or_sub.x.val if pattern.add_or_sub.x.val is not None else pattern.add_or_sub.y.val + Cout = pattern.conv.outputs[0].shape[1] + + passed = True + passed = passed and pattern.add_or_sub.x.val is not None or pattern.add_or_sub.y.val is not None + passed = passed and _bias_mod_and_validity(bias, Cout, pattern) is not None + return passed + +def transform_pattern(pattern): + bias_value = _get_bias_var(pattern).val + + is_conv_op = (pattern.conv.op_type == "conv") + + is_bias_scalar = False + if not isinstance(bias_value, np.ndarray): + is_bias_scalar = True + + bias_value = np.array([bias_value]) if is_bias_scalar else np.squeeze(bias_value) + + if pattern.add_or_sub.op_type == "sub": + bias_value *= -1 + + # everything looks good, now find the new updated bias + old_bias = pattern.conv.inputs.get("bias", None) + old_bias_value = None + if old_bias is not None and old_bias.val is not None: + old_bias_value = old_bias.val + if old_bias is None: + # need to create a fresh numpy array for bias + if np.prod(bias_value.shape) == 1: + # its a scalar bias + # need to find the value of Cout to form a new bias + # conv_transpose has weight format [K, C_out, spatial dims] + # conv has weight format [C_out, K, spatial dims] + Cout = pattern.conv.weight.val.shape[0 if is_conv_op else 1] + new_bias_value = np.broadcast_to(bias_value, (Cout,)) + else: + new_bias_value = bias_value + else: + # just need to update the existing bias array + new_bias_value = old_bias_value + bias_value + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = pattern.add_or_sub.outputs[0].name + if new_bias_value.dtype != np.float32 and new_bias_value.dtype != np.float16: + # cast the bias to match the weight type + weight_np_type = types.nptype_from_builtin(pattern.conv.inputs["weight"].sym_type.get_primitive()) + logger.warning("conv_bias_fusion pass: casting bias " + "from {} to {} to match the dtype of the weight of the conv layer".format( + new_bias_value.dtype, weight_np_type + ) + ) + new_bias_value = new_bias_value.astype(weight_np_type) + new_bias_var = mb.const(val=new_bias_value, before_op=pattern.conv) + + conv_kargs = {"bias": new_bias_var, "name": out_name, "before_op": pattern.conv} + + for k, v in pattern.conv.inputs.items(): + if k == "bias": + continue + conv_kargs[k] = v + + if is_conv_op: + x = mb.conv(**conv_kargs) + else: + x = mb.conv_transpose(**conv_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +def transform_transpose_pattern(pattern): + is_deconv = pattern.conv.op_type == "conv_transpose" + + # get the bias + bias = pattern.add_or_sub.x.val if pattern.add_or_sub.x.val is not None else pattern.add_or_sub.y.val + is_first_input = pattern.add_or_sub.y.val is not None + is_sub = pattern.add_or_sub.op_type == "sub" + + # get the conv bias/weight + conv_shape = pattern.conv.outputs[0].shape + Cout = conv_shape[1] + conv_weight = pattern.conv.weight.val + conv_weight_type = conv_weight.dtype + conv_bias = np.zeros(Cout).astype(conv_weight_type) if pattern.conv.bias is None else pattern.conv.bias.val + + bias = _bias_mod_and_validity(bias, Cout, pattern) + + # compute the new bias + if is_sub: + if is_first_input: + bias = -bias + else: + conv_bias = -conv_bias + + new_bias = conv_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -conv_weight + else: + new_weight = conv_weight + + # create a new conv op with the new weight, bias value, copying rest of the attributes + conv_kargs = {"weight": new_weight, "bias": new_bias, "before_op": pattern.conv} + + for k, v in pattern.conv.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + # create a new transpose op + out_name = pattern.add_or_sub.outputs[0].name + tranpose_kargs = {"x": x, "name": out_name, "before_op": pattern.transpose} + for k, v in pattern.transpose.inputs.items(): + if k == "x": + continue + tranpose_kargs[k] = v + x = mb.transpose(**tranpose_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + +def _bias_mod_and_validity(bias, Cout, pattern): + # check if the bias is compatible for fusion + is_bias_scalar = True + if isinstance(bias, np.ndarray): + if bias.shape == (): + bias = bias.tolist() + elif np.prod(bias.shape) == 1: + bias = np.squeeze(bias).tolist() + else: + is_bias_scalar = False + + if not is_bias_scalar: + if np.prod(bias.shape) != Cout: + return None + rank = pattern.transpose.outputs[0].rank + cout_dim = pattern.transpose.perm.val.tolist().index(1) - rank + if bias.shape[cout_dim] != Cout: + return None + bias = np.reshape(bias, (Cout)) + + return bias + +def _get_bias_var(pattern): + if pattern.add_or_sub.op_type == "sub": + bias_var = pattern.add_or_sub.y + else: + bias_var = pattern.add_or_sub.x if pattern.add_or_sub.x.val is not None else pattern.add_or_sub.y + + return bias_var + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + + # conv -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(False, False, False), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(False, False, True), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(True, False, False), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(True, False, True), + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv -> transpose -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(False, True, False), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv -> transpse -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(False, True, True), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> transpose -> add + register_generic_pass( + ops_arrangement=pattern_to_detect(True, True, False), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) + + # conv_transpose -> transpose -> sub + register_generic_pass( + ops_arrangement=pattern_to_detect(True, True, True), + var_constraints=var_constraints_tranpose, + transform_pattern=transform_transpose_pattern, + pass_name="fuse_conv_bias", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py new file mode 100644 index 00000000..744e2d2f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_conv_scale_fusion.py @@ -0,0 +1,244 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass + +""" +Fold mul/div into conv/conv_transpose by updating the weight/bias of the convolution layers. + +The scale const can be a single number (scalar) or a vector with a broacasable shape, +for instance, if the output of the conv/deconv layer is (B, Cout, H, W), +const of shape (Cout, 1, 1) and (1, Cout, 1, 1) are allowed. + +Given: + %2 = conv(%1) + ... + %3 = mul(%2, constant) # where constant is the scale constant + ... + +Result: + %3 = conv(%1) + ... +""" + +arbitrary_cin = 5 +arbitrary_cout = 8 +arbitrary_scalar = 5 +np.random.seed() +arbitrary_input = (3, arbitrary_cin, 224, 224) +arbitrary_weight = np.random.rand(arbitrary_cout, arbitrary_cin, 10, 10) + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_scale_mul(x): + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + mul = mb.mul(x=conv, y=arbitrary_scalar, name="scale") + return mul + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_transpose_scale_mul(x): + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + mul = mb.mul(x=conv, y=arbitrary_scalar, name="scale") + return mul + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_scale_div(x): + conv = mb.conv(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + real_div = mb.real_div(x=conv, y=arbitrary_scalar, name="scale") + return real_div + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_input)]) + def conv_transpose_scale_div(x): + conv = mb.conv_transpose(x=x, weight=arbitrary_weight, pad_type="valid", name="conv") + real_div = mb.real_div(x=conv, y=arbitrary_scalar, name="scale") + return real_div + + +def _cin_cout(pattern): + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + is_deconv = pattern.conv.op_type == "conv_transpose" + groups = pattern.conv.groups.val + conv_weight = pattern.conv.weight.val + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + return Cin, Cout + + +def _is_scalar(pattern): + # for the scalar case, the scalar can be either + # 1. a python int/float + # 2. a 0d numpy array + # 3. a 1d numpy array with shape (1,) + scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y + scale = scale_var.val + is_scalar = True + if isinstance(scale, np.ndarray): + if scale.shape == (): + scale = scale.tolist() + elif scale.shape == (1) or scale.shape == (1,): + scale = scale[0] + else: + is_scalar = False + + return is_scalar + + +def var_constraints(pattern): + passed = True + passed = passed and pattern.scale.x.val is not None or pattern.scale.y.val is not None + passed = passed and pattern.conv.weight.val is not None + + is_scalar = _is_scalar(pattern) + Cin, Cout = _cin_cout(pattern) + scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y + scale = scale_var.val + + # for the vector scale case, check if the shape is broacastable + if not is_scalar: + conv_weight = pattern.conv.weight.val + passed = passed and ( + np.product(scale.shape) == Cout + or (len(scale.shape) == len(conv_weight.shape) and scale.shape[1] == Cout) + or (len(scale.shape) == len(conv_weight.shape) - 1 and scale.shape[0] == Cout) + ) + + return passed + + +def transform_pattern(pattern): + # get the scale + scale_var = pattern.scale.x if pattern.scale.x.val is not None else pattern.scale.y + scale = scale_var.val + is_scalar = _is_scalar(pattern) + + # get weight and bias and groups from conv layer + conv_weight = pattern.conv.weight.val + conv_bias = pattern.conv.bias + groups = pattern.conv.groups.val + + # get type of the conv layer + is_deconv = pattern.conv.op_type == "conv_transpose" + is_conv_1d = len(conv_weight.shape) == 3 + + Cin, Cout = _cin_cout(pattern) + + # transform the scale to 1./scale for the real_div case + if pattern.scale.op_type == "real_div": + scale = 1.0 / scale + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight/bias for conv layer + if is_scalar: + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type) + + else: + scale = np.reshape(scale, (Cout)) + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = [] + if is_deconv: + conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:])) + + for i in range(Cout): + _conv_weight = conv_weight[i] * scale[i] + new_conv_weight.append(_conv_weight) + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:])) + new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + + # make sure the updated weight and bias have the same shape as the original ones + assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass." + assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass." + + # create a new conv op with the new weight, bias value, copying rest of the attributes + out_name = pattern.scale.outputs[0].name + conv_kargs = { + "weight": new_conv_weight, + "bias": new_conv_bias, + "name": out_name, + "before_op": pattern.conv, + } + + for k, v in pattern.conv.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + pattern.scale.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.scale, old_var=pattern.scale.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + register_generic_pass( + ops_arrangement=conv_scale_mul, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_transpose_scale_mul, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_scale_div, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=conv_transpose_scale_div, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_conv_scale", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py new file mode 100644 index 00000000..890dacf4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_layernorm_instancenorm_pattern_fusion.py @@ -0,0 +1,457 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass +from coremltools.converters.mil.mil import get_new_symbol + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + shape = (get_new_symbol(), get_new_symbol(), get_new_symbol(), get_new_symbol()) + +def _check_reduce_op(reduce_op, mode="reduce_mean") -> bool: + """ + Check whether or not the reduction op satisfy following conditions: + - Mode is expected. + - Does not change rank (keep_dims is True). + - Axes are known at compile time. + + :param reduce_op: reduce op to check on + :param mode: reduce mode + """ + if reduce_op is None: + return False + if reduce_op.op_type != mode: + return False + if reduce_op.keep_dims.val is False: + return False + if reduce_op.axes is None or reduce_op.axes.val is None: + return False + return True + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_or_layernorm(x): + """ + Identify the pattern: + + y = gamma * (x - mean) / sqrt(variance + epsilon) + beta + + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + x --> main_reduce --> sub --> square --> reduce_mean_2 --> add(epsilon) --> rsqrt + | | ^ | + | | | V + |----------------------- mul (gamma) + | | | + | | --------|--------- + | | | | + | | | V + | |------------------------------------------------------------------> mul_3 + | | | + | V | + |----------------------------------------------------------------> mul_2 | + | V + | sub (beta) --> add_2 --> [...] + | ^ + |------------------------------- + + This pattern corresponds to either layer_norm or instance_norm. + + It is instance_norm if all of the following are true: + - input is rank 4 + - axes of reduce_mean is [-2, -1] or [-3, -2] + (when [-3, -2], a channel first to channel last transpose would be inserted) + - gamma and beta are rank 1, after squeeze + + It is layer_norm if all of the following are true: + - axes is either [-1] or [-1, -2] or [-1, -2, -3] and so on + - rank of gamma and beta is equal to the length of the axes + """ + main_reduce = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + sub = mb.sub(x=x, y=main_reduce, name="sub") + square = mb.square(x=sub, name="square") + reduce_mean_2 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True, name="reduce_mean_2") + add_epsilon = mb.add(x=reduce_mean_2, y=1e-5, name="add_epsilon") + rsqrt = mb.rsqrt(x=add_epsilon, epsilon=1e-12, name="rsqrt") + mul_gamma = mb.mul(x=rsqrt, y=np.random.rand(1, 5, 1, 1), name="mul_gamma") + mul_2 = mb.mul(x=x, y=mul_gamma, name="mul_2") + mul_3 = mb.mul(x=main_reduce, y=mul_gamma, name="mul_3") + sub_beta = mb.sub(x=np.random.rand(1, 5, 1, 1), y=mul_3, name="sub_beta") + add_2 = mb.add(x=sub_beta, y=mul_2, name="add_2") + return add_2 + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_2(x): + """ + Identify the pattern: + y = (x - mean) / pow(variance + epsilon) * gamma + beta + + This pattern corresponds to, should be fused as instance_norm. + All of the following must be satisty: + 1) Input is rank 4 tensor + 2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a + channel first to channel last transpose would be inserted in such case) + 3) Gamma and beta are both shape (C,) after squeeze, where C is number of channels + + + |----> sub0 ----------| const (0.5) + | ^ | | + | | V V + x ---> main_reduce square --> mean1 --> add_eps ---> pow const_gamma const_beta + | | | | | + | V V V V + |----> sub1 --------------------------------------> real_div --> mul_gamma --> add_beta --> ... + """ + + main_reduce = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + sub0 = mb.sub(x=x, y=main_reduce, name="sub0") + sub1 = mb.sub(x=x, y=main_reduce, name="sub1") + square = mb.square(x=sub0, name="square") + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True, name="mean1") + add_epsilon = mb.add(x=mean1, y=1e-5, name="add_epsilon") + pow = mb.pow(x=add_epsilon, y=0.5, name="pow") + real_div = mb.real_div(x=sub1, y=pow, name="real_div") + mul_gamma = mb.mul(x=np.random.rand(1, 5, 1, 1), y=real_div, name="mul_gamma") + add_beta = mb.add(x=np.random.rand(1, 5, 1, 1), y=mul_gamma, name="add_beta") + return add_beta + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_3(x): + """ + Detect InstanceNorm pattern in TensorFlow-Addons. + + This pattern corresponds to, should be fused as instance_norm. + All of the following must be satisty: + 1) Input is rank 4 tensor + 2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a + channel first to channel last transpose would be inserted in such case) + 3) Gamma and beta are absent. Default values for gamma and beta would be used. + + |-------------------------------------------------------| + | | + | V + x --> main_reduce square --> mean1 --> add_eps --> rsqrt --> mul2 --> mul_sub + | | ^ | | + | V | | | + | --> sub -----------| | | + | V V + |--------------------------------------------------> mul1 -------------> add --> ... + """ + + main_reduce = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + sub = mb.sub(x=x, y=main_reduce, name="sub") + square = mb.square(x=sub, name="square") + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True, name="mean1") + add_epsilon = mb.add(x=mean1, y=1e-5, name="add_epsilon") # epsilon + rsqrt = mb.rsqrt(x=add_epsilon, name="rsqrt") + mul1 = mb.mul(x=rsqrt, y=x, name="mul1") + mul2 = mb.mul(x=main_reduce, y=rsqrt, name="mul2") + mul_sub = mb.mul(x=mul2, y=-1, name="mul_sub") + add = mb.add(x=mul1, y=mul_sub, name="add") + return add + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def instancenorm_4(x): + """ + Identify the pattern: + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + This pattern corresponds to, should be fused as instance_norm. + All of the following must be satisty: + 1) Input is rank 4 tensor + 2) Reduce operates on spatial dimensions axes=[-2, -1], or axes=[-3, -2] (a + channel first to channel last transpose would be inserted in such case) + 3) Gamma and beta are both shape (C,) after squeeze, where C is number of channels + + |-----------| + | V + |------> mul_square1 -------------> sum1 -----> mul_mean1 + | | + | V + x --> main_reduce --> mul_mean ==> mul_square --> sub_variance --> add_eps --> rsqrt + | | | + | | V + | | mul_gamma + | | | + | | |----------------| + | | | V + | |--------------------------------------------+-------------> mul2 + | V | + |------------------------------------------------------------------> mul1 | + | V + | sub_beta --> add --> [...] + | ^ + |---------------------------| + """ + mul_square1 = mb.mul(x=x, y=x, name="mul_square1") + main_reduce = mb.reduce_sum(x=x, axes=[2, 3], keep_dims=True, name="main_reduce") + mul_mean = mb.mul(x=main_reduce, y=3.3333334e-05, name="mul_mean") # dummy value here + mul_square = mb.mul(x=mul_mean, y=mul_mean, name="mul_square") + sum1 = mb.reduce_sum(x=mul_square1, axes=[2, 3], keep_dims=True, name="sum1") + mul_mean1 = mb.mul(x=sum1, y=8.333333e-06, name="mul_mean1") # dummy value here + sub_variance = mb.sub(x=mul_mean1, y=mul_square, name="sub_variance") + add_epsilon = mb.add(x=sub_variance, y=1e-5, name="add_epsilon") # epsilon + rsqrt = mb.rsqrt(x=add_epsilon, name="rsqrt") + mul_gamma = mb.mul(x=rsqrt, y=np.random.rand(1, 5, 1, 1), name="mul_gamma") + mul1 = mb.mul(x=mul_gamma, y=x, name="mul1") + mul2 = mb.mul(x=mul_mean, y=mul_gamma, name="mul2") + sub_beta = mb.sub(x=np.random.rand(1, 5, 1, 1), y=mul2, name="sub_beta") + add = mb.add(x=mul1, y=sub_beta, name="add") + return add + +def instancenorm_1_constraints(pattern): + passed = True + passed = passed and _common_pattern1_constraints(pattern) + passed = passed and _instancenorm_constraints(pattern) + return passed + + +def layernorm_1_constraints(pattern): + passed = True + passed = passed and _common_pattern1_constraints(pattern) + passed = passed and _layernorm_constraints(pattern) + return passed + + +def instancenorm_2_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.mean1) + gamma_var = _get_var(pattern.mul_gamma, pattern.real_div) + beta_var = _get_var(pattern.add_beta, pattern.mul_gamma) + + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce) + passed = passed and pattern.sub0.x == pattern.root_var and pattern.sub0.y == pattern.main_reduce.outputs[0] + passed = passed and pattern.sub1.x == pattern.root_var and pattern.sub1.y == pattern.main_reduce.outputs[0] + passed = passed and _check_reduce_op(pattern.mean1) + passed = passed and pattern.pow.y.val is not None and np.isclose(pattern.pow.y.val, 0.5) + passed = passed and pattern.real_div.x == pattern.sub1.outputs[0] and pattern.real_div.y == pattern.pow.outputs[0] + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + passed = passed and _instancenorm_constraints(pattern) + + return passed + + +def instancenorm_3_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.mean1) + + gamma_var = mb.const( + val=np.ones(shape=(1, pattern.root_var.shape[1], 1, 1)), name="gamma_var" + ) + beta_var = mb.const( + val=np.zeros(shape=(1, pattern.root_var.shape[1], 1, 1)), + name="_fuse_layernorm_or_instancenorm_beta", + ) + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce) + passed = passed and pattern.sub.x == pattern.root_var and pattern.sub.y == pattern.main_reduce.outputs[0] + passed = passed and _check_reduce_op(pattern.mean1) + passed = passed and pattern.mul_sub.y.val is not None and pattern.mul_sub.y.val == -1 + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + passed = passed and _instancenorm_constraints(pattern) + + return passed + + +def instancenorm_4_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.sub_variance) + gamma_var = _get_var(pattern.mul_gamma, pattern.rsqrt) + beta_var = pattern.sub_beta.x + + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce, mode="reduce_sum") + passed = passed and pattern.mul_mean.y.shape == () + passed = passed and _check_reduce_op(pattern.sum1, "reduce_sum") + passed = passed and pattern.mul_mean1.y.shape == () + passed = passed and pattern.sub_variance.y == pattern.mul_square.outputs[0] + passed = passed and pattern.sub_beta.y == pattern.mul2.outputs[0] + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + passed = passed and _instancenorm_constraints(pattern) + + return passed + + +def _general_constraints(pattern, epsilon_var, gamma_var, beta_var): + passed = True + passed = passed and pattern.root_var.shape is not None + passed = passed and epsilon_var.val is not None and len(epsilon_var.val.shape) == 0 + passed = passed and gamma_var.val is not None + passed = passed and beta_var.val is not None + + pattern.add_attribute("epsilon_var", epsilon_var) + pattern.add_attribute("gamma_var", gamma_var) + pattern.add_attribute("beta_var", beta_var) + return passed + + +def _common_pattern1_constraints(pattern): + epsilon_var = _get_var(pattern.add_epsilon, pattern.reduce_mean_2) + gamma_var = _get_var(pattern.mul_gamma, pattern.rsqrt) + beta_var = pattern.sub_beta.x + + passed = True + passed = passed and _check_reduce_op(pattern.main_reduce) + passed = passed and _check_reduce_op(pattern.reduce_mean_2) + passed = passed and pattern.sub.x == pattern.root_var and pattern.sub.y == pattern.main_reduce.outputs[0] + passed = passed and pattern.sub_beta.y == pattern.mul_3.outputs[0] + + passed = passed and _general_constraints(pattern, epsilon_var, gamma_var, beta_var) + + return passed + +def _layernorm_constraints(pattern): + rank, axes, negative_axes = _rank_and_axes(pattern) + + passed = True + passed = passed and len(pattern.gamma_var.val.shape) == len(axes) + passed = passed and len(pattern.beta_var.val.shape) == len(axes) + passed = passed and negative_axes == list(range(-len(negative_axes), 0)) + requires_rank4_transpose = False + + if rank == 4 and negative_axes == [-3, -2]: + requires_rank4_transpose = True + + pattern.add_attribute("requires_rank4_transpose", requires_rank4_transpose) + pattern.add_attribute("is_instancenorm", False) + return passed + + +def _instancenorm_constraints(pattern): + rank, axes, negative_axes = _rank_and_axes(pattern) + + passed = True + passed = passed and rank == 4 + passed = passed and _check_axes_and_var_shape(negative_axes, pattern.gamma_var.shape) + passed = passed and _check_axes_and_var_shape(negative_axes, pattern.beta_var.shape) + + requires_rank4_transpose = False + if negative_axes == [-3, -2]: + requires_rank4_transpose = True + pattern.add_attribute("requires_rank4_transpose", requires_rank4_transpose) + pattern.add_attribute("is_instancenorm", True) + return passed + + +def _rank_and_axes(pattern): + rank = len(pattern.root_var.shape) + axes = pattern.main_reduce.axes.val + negative_axes = [a - rank if a >= 0 else a for a in axes] + negative_axes.sort() + return rank, axes, negative_axes + + +def _get_var(operation1, operation2): + return operation1.y if operation1.x == operation2.outputs[0] else operation1.x + +def _check_axes_and_var_shape(negative_axes, shape): + if len(shape) == 1: + return True + if negative_axes == [-2, -1]: + return shape[0] == 1 and shape[2] == 1 and shape[3] == 1 + if negative_axes == [-3, -2]: + return shape[0] == 1 and shape[1] == 1 and shape[2] == 1 + return False + +def transform_pattern(pattern): + """ + Insert instance_norm / layer_norm and delete all ops. + :param pattern: A pattern object that contains all relevant information. + """ + out_name = pattern.final_op.outputs[0].name + axes = pattern.main_reduce.axes.val + + if pattern.requires_rank4_transpose: + x = mb.transpose( + x=pattern.main_reduce.x, + perm=[0, 3, 1, 2], + name=out_name + "_transpose_nhwc_nchw", + before_op=pattern.final_op, + ) + if pattern.is_instancenorm: + x = mb.instance_norm( + x=x if pattern.requires_rank4_transpose else pattern.main_reduce.x, + gamma=np.squeeze(pattern.gamma_var.val), + beta=np.squeeze(pattern.beta_var.val), + epsilon=pattern.epsilon_var, + name=out_name + "_instancenorm" if pattern.requires_rank4_transpose else out_name, + before_op=pattern.final_op, + ) + else: # is_layernorm + x = mb.layer_norm( + x=x if pattern.requires_rank4_transpose else pattern.main_reduce.x, + axes=axes, + gamma=pattern.gamma_var, + beta=pattern.beta_var, + epsilon=pattern.epsilon_var, + name=out_name + "_layernorm" if pattern.requires_rank4_transpose else out_name, + before_op=pattern.final_op, + ) + if pattern.requires_rank4_transpose: + x = mb.transpose( + x=x, + perm=[0, 2, 3, 1], + name=out_name + "_transpose_nchw_nhwc", + before_op=pattern.final_op, + ) + + pattern.final_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.final_op, old_var=pattern.final_op.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + register_generic_pass( + ops_arrangement=instancenorm_or_layernorm, + var_constraints=layernorm_1_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_or_layernorm, + var_constraints=instancenorm_1_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_2, + var_constraints=instancenorm_2_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_3, + var_constraints=instancenorm_3_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=instancenorm_4, + var_constraints=instancenorm_4_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_layernorm_or_instancenorm", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py new file mode 100644 index 00000000..3b12b5e6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py @@ -0,0 +1,133 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \ + register_generic_pass +from coremltools.converters.mil.mil import get_new_symbol + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + arbitrary_shape = (get_new_symbol(), get_new_symbol()) + np.random.seed() + arbitrary_weight = np.random.rand(4, 3) + arbitrary_bias = np.random.rand(4) + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) + def pattern_add(x): + """ + Original: + % 4 = linear(x= % 1, weight = % 2, bias = % 3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + % 6 = add(x= % 4, y = % 5) # %5 is a const tensor with same shape as %3 + + Result: + % 8 = linear(x= % 1, weight = % 2, bias = % 7) # where %7 is a new const tensor with value + # %7 = %3 + %6 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.add(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + +if os.getenv("ENABLE_EXPERIMENTAL_PASSES") == "1": + @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)]) + def pattern_sub(x): + """ + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = sub(x=%5, y=%4) # %5 is a const tensor with a broacasable shape with %3. + i.e. if %3 has shape (Dout), %5 could be (1, Dout). + + Result: + %9 = linear(x=%1, weight=%7, bias=%8) # where %7 is a new const tensor with value %7 = -%2 + # %8 = %5 - %3 + """ + linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name="linear") + add_or_sub = mb.sub(x=linear, y=arbitrary_bias, name="add_or_sub") + return add_or_sub + + +def var_constraints(pattern): + passed = True + passed = passed and pattern.add_or_sub.x.val is not None or pattern.add_or_sub.y.val is not None + + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + + # check if the shape is broadcasable + passed = passed and np.prod(linear_bias.shape) == np.prod(bias.shape) + passed = passed and bias.shape[-1] == Dout + return passed + + +def _get_is_sub_and_is_first_input(pattern): + is_sub = pattern.add_or_sub.op_type == "sub" + is_first_input = pattern.add_or_sub.x == pattern.linear.outputs[0] + return is_sub, is_first_input + + +def _get_linear_bias_bias_Dout(pattern, is_first_input): + linear_bias = pattern.linear.bias.val + bias = pattern.add_or_sub.y.val if is_first_input else pattern.add_or_sub.x.val + Dout = linear_bias.shape[0] + return linear_bias, bias, Dout + + +def transform_pattern(pattern): + is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern) + linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input) + bias = np.reshape(bias, (Dout,)) + + if is_sub and is_first_input: + bias = -bias + if is_sub and not is_first_input: + linear_bias = -linear_bias + + new_bias = linear_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -pattern.linear.weight.val + else: + new_weight = pattern.linear.weight.val + + # create a new linear op with the new weight, bias value, copying rest of the attributes + out_name = pattern.add_or_sub.outputs[0].name + linear_kargs = {"weight": new_weight, "bias": new_bias, "name": out_name, "before_op": pattern.linear} + + linear_kargs.update({k: v for k, v in pattern.linear.inputs.items() if k not in ["weight", "bias"]}) + + x = mb.linear(**linear_kargs) + + pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + +if os.getenv('ENABLE_EXPERIMENTAL_PASSES') == '1': + register_generic_pass( + ops_arrangement=pattern_add, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", + ) + + register_generic_pass( + ops_arrangement=pattern_sub, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="fuse_linear_bias", + namespace="common", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py new file mode 100644 index 00000000..9ebb1b2d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/experimental/passes/generic_pass_infrastructure.py @@ -0,0 +1,221 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +from functools import partial + +from coremltools.converters.mil.mil.passes.helper import block_context_manager + +from ...mil.passes import pass_registry + +# IMPORTANT: List of assumptions we are making about the problem +# 1) The user defined pattern has exactly one root variable, and one final output operation. As such, we will be searching for a singlular +# root variable in the larger program, and using that root variable as a starting point for our pattern matching. +# And, we will only match one of the final operations for the larger program. +# 2) The root variable in the larger program, where we start off the pattern matching, must have the same number of child ops as the +# root variable in the user defined program +# 3) The outputs of an operation are stored in identical, predictable order. The child operations of an operation are stored in a random order. + + +class Pattern: + + """This class will have references to all the ops that we have captured in the main, larger program. + Each captured op will be an attribute of this class. The attribute name will be the same name + that the user defined in their pattern. So, if the user defines a pattern add(name = 'add_1') -> sub(name = 'sub_1), + the pattern object will have the fields pattern.add_1, pattern.sub_1, which are references to the corresponding operations + in the larger program. + + + Minimum Attributes: + root_var: which is the root variable of the first operation of the captured pattern (and corresponds to the user defined pattern’s root variable) + final_op: the operation in the larger machine learning model that corresponds to the last operation in the user defined pattern. + block: the block in the larger machine learning model where the pattern was found + op_set: a set of all the operations captured from the larger machine learning model + attribute_set: used for enforcing naming (ie, so the user doesn't overwrite any of the variables mentioned above) + + Setters + set_root_var(root_var): sets the root_var attribute of the Pattern with the given root_var + set_block(block): sets the block attribute of the Pattern with the given block + set_final_op(op_name, final_op): adds the operation in question to the pattern and also sets it as the final_op + + Other Methods + add_attribute(attribute_name, attribute): Adds an attribute to the pattern object. Can be useful for the user. + Verifies name using the attribute set mentioned above + add_op(op_name, op): Adds an operation to the pattern, as an attribute which can be accessed and as part of the op_set + op_list(): convers the op_set to a list and returns it to make it easier for the user + + """ + + def __init__(self): + self.root_var = None + self.block = None + self.final_op = None + self.op_set = set() + self.attribute_set = set(["root_var", "block", "final_op", "op_set", "attribute_set"]) + + def set_root_var(self, root_var): + self.root_var = root_var + + def set_block(self, block): + self.block = block + + def set_final_op(self, op_name, final_op): + self.add_op(op_name, final_op) + self.final_op = final_op + + def add_attribute(self, attribute_name, attribute): + if attribute_name in self.attribute_set: + raise NameError("Pattern " + attribute_name + " is being overwritten. " + "Make sure every operation in your MIL pattern to detect " + "has a unique name, and that no operation in it or an attribute you are setting is named " + "root_var, block, final_op, op_set, or attribute_set.") + setattr(self, attribute_name, attribute) + + def add_op(self, op_name, op): + self.add_attribute(op_name, op) + self.op_set.add(op) + + def op_list(self): + return list(self.op_set) + +def _lists_op_equality(oplist1, oplist2): + if (len(oplist1) != len(oplist2)): + return False + + for i in range(len(oplist1)): + if oplist1[i].op_type != oplist2[i].op_type: + return False + + return True + +def _pattern_detected(pattern, program_op, pattern_op, program_root_var, pattern_root_var, block): + # If the pattern_op is None, that means we are dealing with root_var checking (which don't have op_types or outputs) + if pattern_op is not None and program_op.op_type != pattern_op.op_type: + return False + + if pattern_op is not None and len(program_op.outputs) != len(pattern_op.outputs): + return False + + for i in range(len(program_op.outputs) if pattern_op is not None else 1): + output_same = False + + # ASSUMTION: Assumming that the outputs of an operation are ordered in a particular way + # So, two identical operations will have the same ordering of outputs. + program_child_op_list = list(program_op.outputs[i].child_ops) if pattern_op is not None else program_root_var.child_ops + pattern_child_op_list = list(pattern_op.outputs[i].child_ops) if pattern_op is not None else pattern_root_var.child_ops + + # Last op in the pattern + if len(pattern_child_op_list) == 0: + if pattern.final_op is not None and pattern.final_op != program_op: + raise ValueError("User defined pattern has more than one final operation") + pattern.set_final_op(pattern_op.name, program_op) + return True + + if len(program_child_op_list) != len(pattern_child_op_list): + return False + + # Permuting the program child operations so that at least one of the permutations will be in + # the exact same order as the pattern child operations + op_combos = list(itertools.permutations(pattern_child_op_list)) + + for combo in op_combos: + if _lists_op_equality(combo, program_child_op_list): + truly_equal = True + + for i in range(len(combo)): + truly_equal = truly_equal and _pattern_detected(pattern, program_child_op_list[i], combo[i], program_root_var, pattern_root_var, block) + + if truly_equal: + # The operations in this sequence match perfectly with the pattern + output_same = True + break + + if output_same is False: + return False + + if pattern_op is not None: + pattern.add_op(pattern_op.name, program_op) + return True + + +# This function finds the root_variable in the program that matches with the root_variable in the pattern, +# And then kicks off the pattern matching from there +def _detect_pattern(program_op, ops_arrangement_root_var, block): + # The goal of this function is to find the root variable of both operations + program_op_inputs = program_op.get_flattened_inputs() + + for potential_program_root_variable in program_op_inputs: + pattern = Pattern() + pattern.set_block(block) + + if _pattern_detected(pattern, program_op, ops_arrangement_root_var.op, potential_program_root_variable, ops_arrangement_root_var, block): + pattern.set_root_var(potential_program_root_variable) + + # check that none of the ops in this pattern is connected to the output + # (except the last one) + for op in pattern.op_list(): + if op is not pattern.final_op: + for out in op.outputs: + if out in pattern.block.outputs: + return False, None + + return True, pattern + + return False, None + +@block_context_manager +def _fuse_one_block(block, ops_arrangement, var_constraints, transform_pattern): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _fuse_one_block(b, ops_arrangement, var_constraints, transform_pattern) + + ops_arrangement_root_var = list(ops_arrangement.functions.values())[0].function_inputs[0] + fusion_status, pattern = _detect_pattern(op, ops_arrangement_root_var, block) + + if fusion_status: + fusion_status &= var_constraints(pattern) + + if fusion_status: + transform_pattern(pattern) + return fusion_status + + return fusion_status + + +def fuse_all_blocks(ops_arrangement, var_constraints, transform_pattern, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = _fuse_one_block(f, ops_arrangement, var_constraints, transform_pattern) + + +class PassContainer(): + def __init__(self, pass_name): + self.pass_name = pass_name + self.passes = [] + + def __call__(self, prog): + if len(self.passes) == 0: + raise ValueError("no pass functions associated with " + self.pass_name) + + for one_pass in self.passes: + one_pass(prog) + prog.validate() + + def add(self, pass_function): + self.passes.append(pass_function) + +def register_generic_pass(ops_arrangement, var_constraints, transform_pattern, pass_name, namespace): + pass_function = partial(fuse_all_blocks, ops_arrangement, var_constraints, transform_pattern) + + pass_id = namespace + "::" + pass_name + if pass_id not in pass_registry.PASS_REGISTRY or not isinstance(pass_registry.PASS_REGISTRY[pass_id], PassContainer): + pass_registry.PASS_REGISTRY.passes[pass_id] = PassContainer(pass_name) + + pass_registry.PASS_REGISTRY[pass_id].add(pass_function) + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/__init__.py new file mode 100644 index 00000000..ee7e9ea0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import tensorflow, tensorflow2, torch diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/_utils.py new file mode 100644 index 00000000..8e39fbe5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/_utils.py @@ -0,0 +1,410 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import List, Optional + +from coremltools.converters.mil.input_types import InputType +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Var, types +from coremltools.converters.mil.mil.ops.defs._utils import parse_einsum_equation +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, is_symbolic + + +def value_at(x: Var, idx: int, name=None): + """ + input x: 1D tensor (vector). + return value at index idx. x[idx]. + Could specify the name of the returned MIL scalar tensor as well. + """ + assert x.rank == 1 + args = { + "x": x, + "begin": [idx], + "end": [0], + "squeeze_mask": [True], + } + if name is not None: + args["name"] = name + return mb.slice_by_index(**args) + + +def _reverse_input_einsum_eq(equation: str) -> str: + """ + Reverse the input order of the einsum eqaution + e.g.: + input : "nchw,nwhu->nchu" + returns : "nwhu,nchw->nchu" + """ + input_output_strings = equation.split('->') + assert len(input_output_strings) == 2, "invalid equation" + input_strings = input_output_strings[0].split(',') + assert len(input_strings) == 2, "invalid equation" + equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1] + return equation + + +def build_einsum_mil(a_var: Var, b_var: Var, equation: str, name: str) -> Var: + """ + Get MIL variables as input and build a variable using MIL builder, that + contains the output of the einsum equation + + :param a_var: + - var + - first input variable + :param b_var: + - var + - second input variable + :param equation: + - str + - the einsum equation + :param name: + - str + - name tp be assigned to the output var + + :return: + - var + - output var that contains the einsum result + """ + + ## TODO: rdar://73851694 (Update einsum op translation to support generic cases) + equation = equation.replace(" ", "") + parsed_vectors = parse_einsum_equation(equation) + equation_rev = _reverse_input_einsum_eq(equation) + parsed_vectors_rev = parse_einsum_equation(equation_rev) + + def _swap(a, b): + return b, a + + is_dynamic = any_symbolic(a_var.shape) or any_symbolic(b_var.shape) + # list of equations supported for explicit mil translations + vec_bnqd_bnkd_bnqk = ( + [0, 1, 2, 3], + [0, 1, 4, 3], + [0, 1, 2, 4], + ) # equation == "bnqd,bnkd->bnqk" + vec_bhcq_bhck_bhqk = ( + [0, 1, 2, 3], + [0, 1, 2, 4], + [0, 1, 3, 4], + ) # equation == "bhcq,bhck->bhqk" + vec_abc_cd_abd = ([0, 1, 2], [2, 3], [0, 1, 3]) # equation == "abc,cd->abd" + vec_abc_cde_abde = ( + [0, 1, 2], + [2, 3, 4], + [0, 1, 3, 4], + ) # equation == "abc,cde->abde" + vec_btnh_bfnh_bnft = ( + [0, 1, 2, 3], + [0, 4, 2, 3], + [0, 2, 4, 1], + ) # equation == "btnh,bfnh->bnft" + vec_bnft_btnh_bfnh = ( + [0, 1, 2, 3], + [0, 3, 1, 4], + [0, 2, 1, 4], + ) # equation == "bnft,btnh->bfnh" + vec_abcd_cde_abe = ( + [0, 1, 2, 3], + [2, 3, 4], + [0, 1, 4], + ) # equation == "abcd,cde->abe" + vec_nchw_nwhu_nchu = ( + [0, 1, 2, 3], + [0, 3, 2, 4], + [0, 1, 2, 4], + ) # equation == "nchw,nwhu->nchu" + vec_chw_whu_chu = ([0, 1, 2], [2, 1, 3], [0, 1, 3]) # equation == "chw,whu->chu" + + # add the op(s) corresponding to the equation + if vec_bnqd_bnkd_bnqk in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_bnqd_bnkd_bnqk: + a_var, b_var = _swap(a_var, b_var) + x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=True, name=name) + elif vec_bhcq_bhck_bhqk in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_bhcq_bhck_bhqk: + a_var, b_var = _swap(a_var, b_var) + x = mb.matmul(x=a_var, y=b_var, transpose_x=True, transpose_y=False, name=name) + elif vec_abc_cd_abd in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_abc_cd_abd: + a_var, b_var = _swap(a_var, b_var) + x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=False, name=name) + elif vec_abc_cde_abde in [parsed_vectors, parsed_vectors_rev] and not is_dynamic: + if parsed_vectors_rev == vec_abc_cde_abde: + a_var, b_var = _swap(a_var, b_var) + x_1 = mb.reshape(x=a_var, shape=[a_var.shape[0] * a_var.shape[1], a_var.shape[2]]) + x_2 = mb.reshape(x=b_var, shape=[b_var.shape[0], b_var.shape[1] * b_var.shape[2]]) + x = mb.matmul(x=x_1, y=x_2, transpose_x=False, transpose_y=False) + x = mb.reshape( + x=x, shape=[a_var.shape[0], a_var.shape[1], b_var.shape[1], b_var.shape[2]], name=name + ) + elif vec_btnh_bfnh_bnft in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_btnh_bfnh_bnft: + a_var, b_var = _swap(a_var, b_var) + x_1 = mb.transpose(x=a_var, perm=[0, 2, 1, 3]) + x_2 = mb.transpose(x=b_var, perm=[0, 2, 1, 3]) + x = mb.matmul(x=x_2, y=x_1, transpose_x=False, transpose_y=True, name=name) + elif vec_bnft_btnh_bfnh in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors_rev == vec_bnft_btnh_bfnh: + a_var, b_var = _swap(a_var, b_var) + b_var = mb.transpose(x=b_var, perm=[0, 2, 1, 3]) + x = mb.matmul(x=a_var, y=b_var, transpose_x=False, transpose_y=False) + x = mb.transpose(x=x, perm=[0, 2, 1, 3], name=name) + elif vec_abcd_cde_abe in [parsed_vectors, parsed_vectors_rev] and not is_dynamic: + if parsed_vectors_rev == vec_abcd_cde_abe: + a_var, b_var = _swap(a_var, b_var) + x_1 = mb.reshape(x=a_var, shape=[a_var.shape[0], a_var.shape[1], a_var.shape[2] * a_var.shape[3]]) + x_2 = mb.reshape(x=b_var, shape=[b_var.shape[0] * b_var.shape[1], b_var.shape[2]]) + x = mb.matmul(x=x_1, y=x_2, transpose_x=False, transpose_y=False, name=name) + elif vec_nchw_nwhu_nchu in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors == vec_nchw_nwhu_nchu: + x = mb.einsum(values=(a_var, b_var), equation=equation, name=name) + else: + x = mb.einsum(values=(b_var, a_var), equation=equation_rev, name=name) + elif vec_chw_whu_chu in [parsed_vectors, parsed_vectors_rev]: + if parsed_vectors == vec_chw_whu_chu: + x = mb.einsum(values=(a_var, b_var), equation=equation, name=name) + else: + x = mb.einsum(values=(b_var, a_var), equation=equation_rev, name=name) + else: + x = solve_generic_einsum(parsed_vectors, a_var, b_var, name) + + return x + + +def is_symbolic_dim_in_prog(prog): + ''' + Takes in a MIL program object, checks if any of the tensors in it contain a symbolic dimension. + Returns true if it does. + + :param prog: coremltools.converters.mil.Program + :return: bool + ''' + def _does_block_contain_symbolic_shape(block): + for op in block.operations: + for b in op.blocks: + if _does_block_contain_symbolic_shape(b): + return True + for out in op.outputs: + if types.is_tensor(out.sym_type): + shape = out.sym_type.get_shape() + if any_symbolic(shape): + return True + elif types.is_scalar(out.sym_type) or types.is_str(out.sym_type): + if is_symbolic(out.val): + return True + elif types.is_list(out.sym_type): + if types.is_tensor(out.elem_type): + if any_symbolic(out.elem_type.get_shape()): + return True + else: + raise NotImplementedError("\'{}\' type in a list not handled".format(out.elem_type)) + else: + raise NotImplementedError("\'{}\' type is not handled".format(out.sym_type)) + return False + + for f in prog.functions.values(): + if _does_block_contain_symbolic_shape(f): + return True + return False + + +def get_output_names(outputs) -> Optional[List[str]]: + """ + :param: list[ct.TensorType/ct.ImageType] + :return: list[str] or None + """ + output_names = None + if outputs is not None: + assert all([isinstance(t, InputType) for t in outputs]), \ + "outputs must be a list of ct.ImageType or ct.TensorType" + output_names = [t.name for t in outputs] + if all([name is None for name in output_names]): + output_names = None + return output_names + + +def solve_diagonal_einsum(parsed_vectors, vars): + def solve_diagonal_einsum_one_step(parsed_vector, x): + for i in range(len(parsed_vector)): + for j in range(i + 1, len(parsed_vector)): + if parsed_vector[i] != parsed_vector[j]: + continue + + perm = list(range(len(parsed_vector))) + duplicated_indices = [j for j in range(len(parsed_vector)) if parsed_vector[j] == parsed_vector[i]] + for i, j in enumerate(duplicated_indices): + perm[i], perm[j] = perm[j], perm[i] + parsed_vector[i], parsed_vector[j] = parsed_vector[j], parsed_vector[i] + + dims = mb.shape(x=x) + dim_length = value_at(dims, duplicated_indices[0]) + + indices = mb.range_1d(end=dim_length, start=0, step=1) + indices = mb.stack(values=[indices] * len(duplicated_indices), axis=1) + x = mb.transpose(x=x, perm=perm) + x = mb.gather_nd(x=x, indices=indices) + ret_parsed_vector = [parsed_vector[0]] + parsed_vector[len(duplicated_indices):] + return ret_parsed_vector, x + + parsed_vectors = list(parsed_vectors) + for i in range(len(vars)): + while len(parsed_vectors[i]) != len(set(parsed_vectors[i])): + parsed_vector, var = solve_diagonal_einsum_one_step(parsed_vectors[i], vars[i]) + parsed_vectors[i] = parsed_vector + vars[i] = var + return tuple(parsed_vectors), vars + + +def solve_sum_einsum(parsed_vectors, vars): + """ + Apply reduce_sum for axes before binary einsum calculation if enable. + + e.g.: + input : "abce,acd->ae" + returns : "ace,ac->ae" + + In this example, since each of those axes is only used by one var and does not appear in the output, + axes `b` and `d` can be reduced before binary einsum. + """ + + def solve_sum_einsum_one_step(src_axes, used_by_other_axes, x): + dst_axes = [] + for axis in src_axes: + if axis not in used_by_other_axes: + continue + dst_axes.append(axis) + summed_axis_indices = [i for i in range(len(src_axes)) if src_axes[i] not in dst_axes] + if summed_axis_indices: + x = mb.reduce_sum(x=x, axes=summed_axis_indices) + return dst_axes, x + + ret_parsed_vectors = [] + parsed_vectors = list(parsed_vectors) + for i, var in enumerate(vars): + used_by_other_axes = [] + for j, parsed_vector in enumerate(parsed_vectors): + if i != j: + used_by_other_axes += parsed_vector + dst_axes, var = solve_sum_einsum_one_step(parsed_vectors[i], used_by_other_axes, vars[i]) + ret_parsed_vectors.append(dst_axes) + vars[i] = var + ret_parsed_vectors.append(parsed_vectors[-1]) + return ret_parsed_vectors, vars + + +def solve_generic_einsum(parsed_vectors, a_var, b_var, name): + """ + :param parsed_vectors: list[list[int]] + :param a_var: + - var + - first input variable + :param b_var: + - var + - second input variable + :param name: + - str + - name to be assigned to the output var + + :return: + - var + - output var that contains the einsum result + """ + + def _get_perm(src_axes, dst_axes): + """ + :param src_axes: list[int] + :param dst_axes: list[int] + :return: list[int] + """ + return [src_axes.index(s) for s in dst_axes] + + def _concat_dims(dims, none_if_empty=False): + if len(dims) == 0: + if none_if_empty: + return None + else: + return 1 + return mb.concat(values=dims, axis=0) + + parsed_vectors, vars = solve_diagonal_einsum(parsed_vectors, [a_var, b_var]) + parsed_vectors, vars = solve_sum_einsum(parsed_vectors, vars) + a_var, b_var = vars + a_axes, b_axes, out_axes = parsed_vectors + + a_dims = mb.shape(x=a_var) + b_dims = mb.shape(x=b_var) + + batched_axes = [] + reduced_axes = [] + a_unique_axes = [] + b_unique_axes = [] + + batch_dims = [] + reduce_dims = [] + a_unique_dims = [] + b_unique_dims = [] + + for i, a_axis in enumerate(a_axes): + a_dim = value_at(a_dims, i) + if a_axis in b_axes: + if a_axis in out_axes: + batched_axes.append(a_axis) + batch_dims.append(a_dim) + else: + reduced_axes.append(a_axis) + reduce_dims.append(a_dim) + else: + a_unique_axes.append(a_axis) + a_unique_dims.append(a_dim) + concat_batch_dims = _concat_dims(batch_dims, True) + # if there is no dim to reduce, then add a dummy dim, + # so mb.matmul will reduce the dummy dim to achieve outer product + concat_reduce_dims = _concat_dims(reduce_dims) + # if there is no dim of `a` remains, then add a dummy dim for `a` as a matrix dim, + # otherwise mb.matmul may mistake the batch dim of `a` as the matrix dim + concat_a_unique_dims = _concat_dims(a_unique_dims) + + for i, b_axis in enumerate(b_axes): + b_dim = value_at(b_dims, i) + if b_axis not in a_axes: + b_unique_axes.append(b_axis) + b_unique_dims.append(b_dim) + # if there is no dim of `b` remains, then add a dummy dim for `b`, + # otherwise mb.matmul may mistake the batch dim of `b` as a matrix dim + concat_b_unique_dims = _concat_dims(b_unique_dims) + + a_transpose_axes = batched_axes + a_unique_axes + reduced_axes + a = mb.transpose(x=a_var, perm=_get_perm(a_axes, a_transpose_axes)) + a_reshape_dims = _concat_dims( + [mb.reduce_prod(x=x) for x in [concat_batch_dims, concat_a_unique_dims, concat_reduce_dims] if x is not None]) + a = mb.reshape(x=a, shape=a_reshape_dims) + + b_transpose_axes = batched_axes + reduced_axes + b_unique_axes + b = mb.transpose(x=b_var, perm=_get_perm(b_axes, b_transpose_axes)) + b_reshape_dims = _concat_dims( + [mb.reduce_prod(x=x) for x in [concat_batch_dims, concat_reduce_dims, concat_b_unique_dims] if x is not None]) + b = mb.reshape(x=b, shape=b_reshape_dims) + + ab = mb.matmul(x=a, y=b) + concat_batch_dims = _concat_dims(batch_dims, True) + concat_a_unique_dims = _concat_dims(a_unique_dims, True) + concat_b_unique_dims = _concat_dims(b_unique_dims, True) + ab_reshaped_dims = _concat_dims( + [ + x + for x in [concat_batch_dims, concat_a_unique_dims, concat_b_unique_dims] + if x is not None + ], + True, + ) + # Removes excessive dimensions for scalar output + if ab_reshaped_dims is None: + return mb.squeeze(x=ab, name=name) + # Reshape tensor output to specified output shape + else: + ab = mb.reshape(x=ab, shape=ab_reshaped_dims) + ab_reshaped_axes = batched_axes + a_unique_axes + b_unique_axes + ab = mb.transpose(x=ab, perm=_get_perm(ab_reshaped_axes, out_axes), name=name) + return ab diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/__init__.py new file mode 100644 index 00000000..34ab79f0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .load import load diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/helper.py new file mode 100644 index 00000000..b1fe7e6a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/helper.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.program import get_new_symbol + + +def get_proto_dim(dim): + if dim.WhichOneof("dimension") == "constant": + return dim.constant.size + else: + if not dim.unknown.variadic: + return get_new_symbol() + raise NotImplementedError("Variadic dimensions not yet implemented.") + + +def proto_to_types(valuetype): + """ + A helper function that maps the proto value type to PyMIL types. + """ + if valuetype.WhichOneof("type") == "tensorType": + tensortype = valuetype.tensorType + dtype = types.proto_to_builtin_types[tensortype.dataType] + + if tensortype.rank < 0: + raise ValueError("Negative or Dynamic ranks not supported") + if tensortype.rank != len(tensortype.dimensions): + raise ValueError("Rank doesn't match the number of dimensions") + if tensortype.attributes != {}: + raise ValueError("Attributes on tensorType not supported") + + shape = [] + for i in range(tensortype.rank): + shape.append(get_proto_dim(tensortype.dimensions[i])) + + # For the zero rank tensor, we always convert it back to scalar in PyMIL first + if tensortype.rank == 0: + return dtype + + return types.tensor(dtype, shape) + + elif valuetype.WhichOneof("type") == "listType": + listtype = valuetype.listType + elem_type = proto_to_types(listtype.type) + + if listtype.length.unknown: + init_length = None + else: + init_length = listtype.length.constant.size + + # In the MIL proto, there is no such thing of "dynamic_length", hence we set it to True when + # converting back to PyMIL + return types.list(elem_type, init_length, dynamic_length=True) + + elif valuetype.WhichOneof("type") == "dictionaryType": + dicttype = valuetype.dictionaryType + keytype = proto_to_types(dicttype.keyType) + valuetype = proto_to_types(dicttype.valueType) + + return types.dict(keytype, valuetype) + else: + raise NotImplementedError("Types {} not yet implemented".format(valuetype.WhichOneof("type"))) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/load.py new file mode 100644 index 00000000..1761e31b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/load.py @@ -0,0 +1,429 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as _target +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import (Function, ListVar, Placeholder, + Program, TupleInputType, Var, + mil_list, types) +from coremltools.converters.mil.mil.block import curr_block +from coremltools.converters.mil.mil.ops.registry import \ + SSAOpRegistry as _SSAOpRegistry +from coremltools.proto import MIL_pb2 as pm +from coremltools.proto import Model_pb2 as ml + +from .helper import proto_to_types + +try: + from coremltools.libmilstoragepython import _BlobStorageReader as BlobReader +except: + BlobReader = None + + +class TranscriptionContext: + """ + Holds shared variables needed for transcription. + """ + + def __init__(self, weights_dir=""): + self.name_to_var = {} # mapping from name -> var object + self.blob_reader_from_filename = ( + {} + ) # mapping from filename -> BlobReader object + self.weights_dir = weights_dir + + def register_var_with_name(self, name, var): + var.name = name + if name in self.name_to_var: + # Overriding allow us to translate control flow blocks + msg = "Var %s is added again. Overriding previous value" + logger.info(msg % name) + self.name_to_var[name] = var + + def get_var_from_name(self, name): + if name not in self.name_to_var: + raise KeyError("Var {} not found".format(name)) + return self.name_to_var[name] + + +def _load_tensorvalue(tensorvalue_spec): + if not isinstance(tensorvalue_spec, pm.TensorValue): + raise TypeError("Invalid TensorValue spec object") + + if tensorvalue_spec.WhichOneof("value") == "floats": + return tensorvalue_spec.floats.values + elif tensorvalue_spec.WhichOneof("value") == "ints": + return tensorvalue_spec.ints.values + elif tensorvalue_spec.WhichOneof("value") == "bools": + return tensorvalue_spec.bools.values + elif tensorvalue_spec.WhichOneof("value") == "strings": + return tensorvalue_spec.strings.values + elif tensorvalue_spec.WhichOneof("value") == "longInts": + return tensorvalue_spec.longInts.values + elif tensorvalue_spec.WhichOneof("value") == "doubles": + return tensorvalue_spec.doubles.values + elif tensorvalue_spec.WhichOneof("value") == "bytes": + return tensorvalue_spec.bytes.values + else: + raise ValueError("Invalid dtype for TensorValue type") + + +def _load_immediate_value(immediatevalue_spec): + if not isinstance(immediatevalue_spec, pm.Value.ImmediateValue): + raise TypeError("Invalid ImmedidateValue spec object") + + if immediatevalue_spec.WhichOneof("value") == "tensor": + return _load_tensorvalue(immediatevalue_spec.tensor) + elif immediatevalue_spec.WhichOneof("value") == "list": + return immediatevalue_spec.list.values + else: + raise NotImplementedError( + "Immediate value type not supported yet." + ) + + +def _load_file_value(context, filevalue_spec, dtype): + if BlobReader is None: + raise RuntimeError("BlobReader not loaded") + if not isinstance(filevalue_spec, pm.Value.BlobFileValue): + raise TypeError("Invalid BlobFileValue spec object") + + filename = os.path.join(context.weights_dir, filevalue_spec.fileName.split("/")[-1]) + offset = filevalue_spec.offset + + if filename in context.blob_reader_from_filename: + blob_reader = context.blob_reader_from_filename[filename] + else: + blob_reader = BlobReader(filename) + context.blob_reader_from_filename[filename] = blob_reader + + if dtype == types.uint8: + np_value = np.array(blob_reader.read_uint8_data(offset), np.uint8) + elif dtype == types.int8: + np_value = np.array(blob_reader.read_int8_data(offset), np.int8) + elif dtype == types.fp16: + np_value_uint16 = np.array(blob_reader.read_fp16_data(offset), np.uint16) + np_value = np.frombuffer(np_value_uint16.tobytes(), np.float16) + elif dtype == types.fp32: + np_value = np.array(blob_reader.read_float_data(offset), np.float32) + else: + raise ValueError("Invalid dtype for blob file value type") + + return np_value + + +def _load_value(context, value_spec): + if not isinstance(value_spec, pm.Value): + raise TypeError("Invalid Value spec object") + + if value_spec.docString: + raise ValueError("Docstring would get lost in the process.") + + if value_spec.type.WhichOneof("type") == "tensorType": + valuetype = proto_to_types(value_spec.type) + + is_tensor = types.is_tensor(valuetype) + + dtype = valuetype if not is_tensor else valuetype.get_primitive() + shape = () if not is_tensor else valuetype.get_shape() + + if value_spec.WhichOneof("value") == "immediateValue": + value = _load_immediate_value(value_spec.immediateValue) + else: + value = _load_file_value(context, value_spec.blobFileValue, dtype) + + if dtype in (types.fp16, types.int8, types.uint8, types.uint32): + value = np.frombuffer(value, types.nptype_from_builtin(dtype)).reshape( + shape + ) + elif dtype == types.str and shape == (): + value = str(value[0]) + elif dtype in (types.fp32, types.str, types.bool, types.int32, types.int64): + value = ( + np.array(value).astype(types.nptype_from_builtin(dtype)).reshape(shape) + ) + else: + raise ValueError("Invalid dtype for tensor value") + else: + raise NotImplementedError("Only value of tensorType implemented yet") + + if not is_tensor and not isinstance(value, str): + value = types.nptype_from_builtin(dtype)(value.item()) + + return value + + +def _create_var_from_spec(spec): + """ + This helper function is used for creating PyMIL Var/ListVar from the proto spec. + Mainly used for the contruction of the control flow ops. + """ + assert isinstance(spec, pm.NamedValueType) + sym_type = proto_to_types(spec.type) + name = spec.name + if types.is_list(sym_type): + var = ListVar( + name, + elem_type=sym_type.T[0], + init_length=sym_type.T[1], + dynamic_length=sym_type.T[2]) + else: + var = Var(name, sym_type, None, op=None, op_output_idx=None) + return var + +def _set_outer_op_for_nested_blocks(blocks, op): + """ + An ultility function that sets the outer_op of the blocks for control flow ops. + """ + for block in blocks: + block.outer_op = op + +def _create_nested_blocks(context, op_spec): + """ + An utility function that creates nested blocks for control flow ops. + """ + if not op_spec.blocks: + return [] + + blocks = [] + + for block_spec in op_spec.blocks: + input_vars = [_create_var_from_spec(input) for input in block_spec.inputs] + + # add block input vars to the context + for v in input_vars: + context.register_var_with_name(v.name, v) + + # In pymil, the outer_op for a block can only be None if the block is a Functino. + # As the result, we use a dummy outer_op here for block creation, and set it to + # the legit op later on in _set_outer_op_for_nested_blocks + dummy = mb.const(val=0.) + with Block(block_inputs=input_vars, outer_op=dummy._op, + name=Block._get_new_name()) as block: + _load_block(context, block_spec) + + blocks.append(block) + + return blocks + +def _set_inputs_for_control_flow_op(inputs, blocks, op_type): + """ + An utility function that set the dummy functional inputs and blocks inputs for + control flow ops. + """ + if op_type == "while_loop": + def _dummy_cond(*loop_vars): + return None + + def _dummy_body(*loop_vars): + return None + + inputs["_existing_blocks"] = blocks + inputs["_cond"] = _dummy_cond + inputs["_body"] = _dummy_body + + elif op_type == "cond": + def _dummy_true_fn(*loop_vars): + return None + def _dummy_false_fn(*loop_vars): + return None + + inputs["_existing_blocks"] = blocks + inputs["_true_fn"] = _dummy_true_fn + inputs["_false_fn"] = _dummy_false_fn + + +def _load_operation(context, op_spec): + if not isinstance(op_spec, pm.Operation): + raise TypeError("Invalid Operation spec object") + + op_type = op_spec.type + if op_type == "const" or op_type.startswith("constexpr_"): + if op_spec.blocks: + raise ValueError("const / constexpr operation can't have any block") + if op_spec.inputs: + raise ValueError("const / constexpr operation can't have any input") + + inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()} + pymil_var = getattr(mb, op_type)(**inputs) + context.register_var_with_name(op_spec.outputs[0].name, pymil_var) + + else: + if op_type == "custom_layer": + raise NotImplementedError( + "Loading Custom Layer operation not yet implemented" + ) + + if op_spec.attributes: + raise ValueError("Attributes on operation not supported") + + # The conversion steps of an operation proto -> PyMIL operation are as following: + + # (i) Convert the input arguments: + # In most of the cases, the input variable is already created beforehand, hence we can + # directly access and get them through the TranscriptionContext. + # There are cases, though, the inputs are literal value. This could happens in the classify op spec. + # For that case, we directly create a constant variable. + + # (ii) Create nested blocks for control flow operations: + # The Python functinoal input arguments for control flow ops cannot be recovered from milproto -> pymil conversion, + # for instance, the _body, _cond for mb.while_loop and _true_fn, _false_fn for mb.cond are not invertible + # Hence, here we directly create the nested blocks from the proto, and set them to mb.while_loop.blocks / mb.cond.blocks. + # Note that, when creating a block, PyMIL required an outer_op, which should be the control flow operation itself. However, + # in this approach we take, the outer_op hasn't been created at the time when the blocks produced. Here, we make a "dummy outer_op", + # which could pass the check in PyMIL, also it could provide enough information (such as visible variables in the blocks etc.) + # for the creation of the block. + + # (iii) Create PyMIL operation using inputs / blocks + # Note that for the control flow cases, we create dummy functional inputs, and use the exisiting block to create the op. + + # (iv) Set the outer_op for control flow + # Once the operation is created, we replace the dummy outer_op with the legit one, to make it a valid PyMIL program + + inputs = {} + for param_name, argument in op_spec.inputs.items(): + vars = [] + for binding in argument.arguments: + binding_type = binding.WhichOneof("binding") + if binding_type == "name": + vars.append(context.get_var_from_name(binding.name)) + elif binding_type == "value": + # We only support the list value for now (for the classifier use case) + value_spec = binding.value + assert value_spec.WhichOneof("value") == "immediateValue" + assert value_spec.immediateValue.WhichOneof("value") == "list" + list_value = _load_immediate_value(value_spec.immediateValue) + values = [] + for value_spec in list_value: + values.append(_load_value(context, value_spec)) + var = mb.const(val=mil_list(values)) + vars.append(var) + else: + raise NotImplementedError("Binding {} not yet implemented".format(binding_type)) + op_cls = _SSAOpRegistry._get_core_op_cls(op_type) + if len(vars) == 1 and not isinstance( + op_cls.input_spec.input_types[param_name], TupleInputType + ): + inputs[param_name] = vars[0] + else: + inputs[param_name] = vars + + blocks = _create_nested_blocks(context, op_spec) + _set_inputs_for_control_flow_op(inputs, blocks, op_type) + + output_var = getattr(mb, op_type)(**inputs) + if not isinstance(output_var, (tuple, list)): + output_var = [output_var] + + if len(output_var) != len(op_spec.outputs): + raise AssertionError( + "Mismatch between number of outputs in operation specification vs PyMIL outputs" + ) + + for spec, var in zip(op_spec.outputs, output_var): + context.register_var_with_name(spec.name, var) + + pymil_type = var.sym_type + proto_type = proto_to_types(spec.type) + if not types.is_compatible_type(pymil_type, proto_type): + # We allow a corner case where the pymil has an 0 rank tensor and the spec produces a scalar + if types.is_tensor(pymil_type) and types.is_scalar(proto_type): + if pymil_type.get_primitive() == proto_type: + continue + raise AssertionError( + "Mismatch between var types in specification vs PyMIL" + ) + + _set_outer_op_for_nested_blocks(blocks, output_var[0].op) + + +def _load_block(context, block_spec): + if not isinstance(block_spec, pm.Block): + raise TypeError("Invalid Block spec object") + + if block_spec.attributes: + raise ValueError("Attributes on block not supported") + + block_outputs = block_spec.outputs + output_vars = [] + for op_spec in block_spec.operations: + _load_operation(context, op_spec) + + for proto_output_name in block_outputs: + output_vars.append(context.get_var_from_name(proto_output_name)) + + pymil_block = curr_block() + pymil_block.set_outputs(output_vars) + return pymil_block + + +def _load_function(context, func_spec, spec_version): + if not isinstance(func_spec, pm.Function): + raise TypeError("Invalid Function spec object") + + if func_spec.attributes: + raise ValueError("Attributes on functions not supported") + + func_inputs = {} + for named_value_type in func_spec.inputs: + name = named_value_type.name + valuetype = proto_to_types(named_value_type.type) + + if not types.is_tensor(valuetype): + raise ValueError("Functions inputs can only be tensors") + func_inputs[name] = Placeholder( + sym_shape=valuetype.get_shape(), dtype=valuetype.get_primitive(), name=name + ) + context.register_var_with_name(name, func_inputs[name].outputs[0]) + + opset = func_spec.opset + if opset not in func_spec.block_specializations: + raise ValueError("Missing block specialization for opset {}".format(opset)) + + with Function(func_inputs, opset_version=_target(spec_version)) as pymil_func: + _load_block(context, func_spec.block_specializations[opset]) + + return pymil_func + + +def load(model_spec, specification_version, file_weights_dir="", **kwargs): + if not isinstance(model_spec, ml.Model): + raise TypeError("Invalid Model sepc object") + + if specification_version < model_spec.specificationVersion: + raise ValueError("specification_version must be greater or equal to the input model spec version") + + if model_spec.WhichOneof("Type") != "mlProgram": + raise ValueError("Only MIL proto based mlmodels can be loaded") + + program_spec = model_spec.mlProgram + if not isinstance(program_spec, pm.Program): + raise TypeError("Invalid Program spec object") + + if program_spec.docString: + raise NotImplementedError("Docstring would be lost in the process") + + if program_spec.version != 1: + raise ValueError("Invalid program version") + + context = TranscriptionContext(file_weights_dir) + pymil_program = Program() + for func_name, func_spec in program_spec.functions.items(): + pymil_program.add_function( + func_name, _load_function(context, func_spec, specification_version) + ) + + for attr_name, attr_spec in program_spec.attributes.items(): + if attr_name not in ("buildInfo",): + raise ValueError("Invalid attribute for program") + + return pymil_program diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/test_load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/test_load.py new file mode 100644 index 00000000..cb45d13b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/milproto/test_load.py @@ -0,0 +1,199 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest + +import coremltools as ct +from coremltools import ComputeUnit +from coremltools._deps import _HAS_TF_2, _HAS_TORCH +from coremltools.converters._converters_entry import _get_metadata_from_mlmodel +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.converter import mil_convert +from coremltools.converters.mil.frontend.milproto.load import \ + load as milproto_to_pymil +from coremltools.converters.mil.frontend.tensorflow.test.test_ops import \ + TestTensorArray +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + run_compare_tf +from coremltools.converters.mil.mil.ops.tests.testing_utils import \ + compare_backend +from coremltools.converters.mil.testing_utils import get_op_types_in_program + +if _HAS_TORCH: + import torch + from coremltools.converters.mil.frontend.torch.test.test_torch_ops import \ + TestScriptedModels + + +def get_pymil_prog_from_mlmodel(mlmodel): + model_spec = mlmodel.get_spec() + return milproto_to_pymil( + model_spec=model_spec, + specification_version=model_spec.specificationVersion, + file_weights_dir=mlmodel.weights_dir, + ) + +def get_roundtrip_mlmodel(mlmodel): + """ + This utility function does the following roundtrip conversion: + + mlprogram proto -> pymil program -> mlprogram model + """ + pymil_prog = get_pymil_prog_from_mlmodel(mlmodel) + + # convert the pymil program to mlmodel + model_spec = mlmodel.get_spec() + roundtripped_mlmodel = mil_convert( + pymil_prog, + convert_to="mlprogram", + convert_from="milinternal", + compute_units=mlmodel.compute_unit, + model_description=model_spec.description, + specification_version=model_spec.specificationVersion, + ) + + # set MIL program attributes + build_info = _get_metadata_from_mlmodel(mlmodel) + roundtripped_mlmodel._set_build_info_mil_attributes(build_info) + return roundtripped_mlmodel + +def roundtrip_and_compare_mlmodel(mlmodel, input_dict): + roundtripped_mlmodel = get_roundtrip_mlmodel(mlmodel) + expected_outputs = mlmodel.predict(input_dict) + compare_backend(roundtripped_mlmodel, input_dict, expected_outputs) + + +class TestLoadAPIUsage: + def test_mil_proto_to_pymil(self): + # Define a PyMIL program + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 100, 100)), ]) + def prog(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is optional. + x = mb.relu(x=x, name='relu') + x = mb.conv(x=x, weight=np.random.rand(10, 3, 2, 2), name="conv") + x = mb.transpose(x=x, perm=[0, 3, 1, 2], name='transpose') + x = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=False, name='reduce') + x = mb.log(x=x, name='log') + return x + + # Convert it to MIL proto backed MLModel + mlmodel = ct.convert(prog, convert_to="mlprogram") + + # Load MLModel back to PyMIL + loaded_pymil_prog = get_pymil_prog_from_mlmodel(mlmodel) + + # Assert that loaded PyMIL prog matches with defined PyMIL prog + if get_op_types_in_program(loaded_pymil_prog) != get_op_types_in_program(prog): + raise AssertionError("Mismatch between defined PyMIL prog and loaded PyMIL prog") + + def test_mil_proto_to_pymil_with_version_handling(self): + # This test makes sure the correct version of the op is picked up during mil_proto -> pymil conversion + + # iOS15 version program with iOS13 version topk + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=ct.target.iOS15) + def prog(x): + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + + iOS15_mlmodel = ct.convert(prog, convert_to="mlprogram", minimum_deployment_target=ct.target.iOS15) + iOS15_pymil_prog = get_pymil_prog_from_mlmodel(iOS15_mlmodel) + topk_op = iOS15_pymil_prog.functions["main"].find_ops(op_type="topk")[0] + assert not hasattr(topk_op, "sort") + + # iOS16 version program with iOS16 version topk + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=ct.target.iOS16) + def prog(x): + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + + iOS16_mlmodel = ct.convert(prog, convert_to="mlprogram", minimum_deployment_target=ct.target.iOS16) + iOS16_pymil_prog = get_pymil_prog_from_mlmodel(iOS16_mlmodel) + topk_op = iOS16_pymil_prog.functions["main"].find_ops(op_type="topk")[0] + assert hasattr(topk_op, "sort") + +@pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason="mlprogram predict available only on macOS12+") +class TestE2ENumericalCorrectness: + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_elu(self): + inputs = [ct.TensorType(name="data", shape=(2, 3, 1))] + input_data = [torch.rand(*i.shape.to_list()) for i in inputs] + torchmodel = torch.jit.trace(torch.nn.ELU(inplace=False), input_data) + + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY) + input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, input_data) + } + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_linear(self): + inputs = [ct.TensorType(name="data", shape=(10, 2))] + input_data = [torch.rand(*i.shape.to_list()) for i in inputs] + torchmodel = torch.jit.trace( + torch.nn.Linear(in_features=2, out_features=3, bias=True), input_data + ) + + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY) + input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, input_data) + } + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_conv(self): + inputs = [ct.TensorType(name="data", shape=(5, 10, 4, 4))] + input_data = [torch.rand(*i.shape.to_list()) for i in inputs] + torchmodel = torch.jit.trace( + torch.nn.Conv2d(in_channels=10, out_channels=20, kernel_size=4), input_data + ) + + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY) + input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, input_data) + } + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_while_loop(self): + model = TestScriptedModels.get_while_loop_model() + model_spec = torch.jit.script(model) + mlmodel = ct.convert(model_spec, + inputs=[ct.TensorType(name="data", shape=model.input_size, dtype=np.float32)], + convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY + ) + input_values = {"data": np.array([10.])} + roundtrip_and_compare_mlmodel(mlmodel, input_values) + + @pytest.mark.skipif(not _HAS_TORCH, reason="requires torch") + def test_cond(self): + model = TestScriptedModels.get_cond_model() + model_spec = torch.jit.script(model) + mlmodel = ct.convert(model_spec, + inputs=[ct.TensorType(name="data", shape=(1,), dtype=np.float32)], + convert_to="mlprogram", + compute_units=ComputeUnit.CPU_ONLY + ) + roundtrip_and_compare_mlmodel(mlmodel, {"data": np.array([1.])}) + roundtrip_and_compare_mlmodel(mlmodel, {"data": np.array([11.])}) + + @pytest.mark.skipif(_HAS_TF_2, reason="Fix and re-enable this test: rdar://76293949 (TF2 unit test InvalidArgumentError)") + def test_list(self): + model, inputs, outputs = TestTensorArray.get_dynamic_elem_shape_model() + input_values = [np.random.rand(2, 3)] + input_dict = dict(zip(inputs, input_values)) + _, mlmodel, _, _ = run_compare_tf( + model, + input_dict, + outputs, + compute_unit=ct.ComputeUnit.CPU_ONLY, + backend=("mlprogram", "fp16") + ) + roundtrip_and_compare_mlmodel(mlmodel, {"Placeholder": input_values[0]}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/__init__.py new file mode 100644 index 00000000..92a4ecf4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import logging +# suppress TensorFlow stdout prints +import os + +from coremltools._deps import _HAS_TF + +if os.getenv("TF_SUPPRESS_LOGS", "1") == "1": + os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # FATAL + logging.getLogger("tensorflow").setLevel(logging.FATAL) + +register_tf_op = None + +if _HAS_TF: + # Importing these causes them to register their ops + from . import ops + from .dialect_ops import (TfLSTMBase, tf_lstm_block, tf_lstm_block_cell, + tf_make_list) + from .tf_op_registry import register_tf_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py new file mode 100644 index 00000000..81d2f72e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/basic_graph_ops.py @@ -0,0 +1,356 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def connect_edge(g, source, dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.outputs.append(dest.name) + dest.inputs.append(source.name) + + +def connect_edge_at_index(g, source, dest, idx): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.outputs.insert(idx, dest.name) + dest.inputs.insert(idx, source.name) + + +def replace_source(g, source, dest, new_source): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_source, str): + new_source = g[new_source] + dest_inputs = [] + for inp in dest.inputs: + if inp == source.name: + dest_inputs.append(new_source.name) + g[new_source.name].outputs.append(dest.name) + else: + dest_inputs.append(inp) + dest.inputs = dest_inputs + source.outputs = [i for i in g[source.name].outputs if i != dest.name] + + +def replace_control_source(g, source, dest, new_source): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_source, str): + new_source = g[new_source] + dest_inputs = [] + for inp in dest.control_inputs: + if inp == source.name: + dest_inputs.append(new_source.name) + g[new_source.name].control_outputs.append(dest.name) + else: + dest_inputs.append(inp) + dest.control_inputs = dest_inputs + source.control_outputs = [i for i in g[source.name].outputs if i != dest.name] + + +def replace_dest(g, source, dest, new_dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_dest, str): + new_dest = g[new_dest] + for idx, d in enumerate(source.outputs): + if d == dest.name: + source.outputs[idx] = new_dest.name + new_dest.inputs = new_dest.inputs[:] + [source.name] + + dest.inputs = [i for i in dest.inputs if i != source.name] + + +def replace_control_dest(g, source, dest, new_dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + if isinstance(new_dest, str): + new_dest = g[new_dest] + for idx, d in enumerate(source.control_outputs): + if d == dest.name: + source.control_outputs[idx] = new_dest.name + new_dest.control_inputs = new_dest.control_inputs[:] + [source.name] + + dest.control_inputs = [i for i in dest.control_inputs if i != source.name] + + +def connect_dests(g, source, dests): + for i in dests: + connect_edge(g, source, i) + + +def connect_sources(g, sources, dest): + for i in sources: + connect_edge(g, i, dest) + + +def disconnect_edge(g, source, dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.outputs = [i for i in source.outputs if i != dest.name] + + dest.inputs = [i for i in dest.inputs if i != source.name] + + +def disconnect_control_edge(g, source, dest): + if isinstance(source, str): + source = g[source] + if isinstance(dest, str): + dest = g[dest] + source.control_outputs = [i for i in source.control_outputs if i != dest.name] + + dest.control_inputs = [i for i in dest.control_inputs if i != source.name] + + +def disconnect_vertex_outs(g, source): + if isinstance(source, str): + source = g[source] + for out in source.outputs: + g[out].inputs = [i for i in g[out].inputs if i != source.name] + source.outputs = [] + + +def disconnect_vertex_ins(g, dest): + if isinstance(dest, str): + dest = g[dest] + for inp in dest.inputs: + if isinstance(inp, str): + innode = g[inp] + else: + innode = inp + innode.outputs = [i for i in innode.outputs if i != dest.name] + dest.inputs = [] + + +def disconnect_vertex_control_ins(g, dest): + if isinstance(dest, str): + dest = g[dest] + for inp in dest.control_inputs: + if isinstance(inp, str): + innode = g[inp] + else: + innode = inp + innode.control_outputs = [i for i in innode.control_outputs if i != dest.name] + dest.control_inputs = [] + + +def disconnect_vertex_control_outs(g, source): + if isinstance(source, str): + source = g[source] + for out in source.control_outputs: + g[out].control_inputs = [i for i in g[out].control_inputs if i != source.name] + source.control_outputs = [] + + +def delete_node(g, node): + if not isinstance(node, str): + node = node.name + disconnect_vertex_ins(g, node) + disconnect_vertex_outs(g, node) + disconnect_vertex_control_ins(g, node) + disconnect_vertex_control_outs(g, node) + del g[node] + + +def replace_node(g, original_node, new_node): + if isinstance(new_node, str): + new_node = g[new_node] + if not isinstance(original_node, str): + original_node = original_node.name + + for o in list(g[original_node].control_outputs): + replace_control_source(g, original_node, o, new_node) + for o in list(g[original_node].outputs): + replace_source(g, original_node, o, new_node) + for i in list(g[original_node].control_inputs): + replace_control_dest(g, i, original_node, new_node) + for i in list(g[original_node].inputs): + replace_dest(g, i, original_node, new_node) + + +def fill_outputs(gd): + """ + Fills the output lists of of a graph of ParsedNode + + Takes a graph in "dict{str, ParsedNode}" form, and returns a new graph. + """ + # fill outputs + for k, v in gd.items(): + for i in v.inputs: + gd[i].outputs.append(v.name) + for i in v.control_inputs: + gd[i].control_outputs.append(v.name) + get_tuple_ops = ["Split", "SplitV", "LSTMBlock", "NonMaxSuppressionV5"] + for k, v in gd.items(): + if v.op in get_tuple_ops: + outputs = [[out, int(gd[out].attr["index"])] for out in v.outputs] + outputs.sort(key=lambda x: x[1]) + gd[k].outputs = [out for [out, _] in outputs] + + return gd + + +def check_connections(gd): + """ + Given a graph, checks that all + - inputs/outputs are symmetric + - control_inputs/control_outputs are symmetric + - The graph does not reference vertices outside of the graph + + Takes a graph in "dict{str, ParsedNode}" form. Does not return, + asserts false on failure. + """ + # check that inputs and outputs line up + for k, v in gd.items(): + for i in v.inputs: + if isinstance(i, str): + assert k in gd[i].outputs + else: + assert k in gd[i.name].outputs + for i in v.outputs: + inputs = [ + inp if isinstance(inp, str) else inp.name + for inp in gd[i].inputs + ] + assert k in inputs + for i in v.control_inputs: + if isinstance(i, str): + assert k in gd[i].control_outputs + else: + assert k in gd[i.name].control_outputs + for i in v.control_outputs: + control_inputs = [ + inp if isinstance(inp, str) else inp.name + for inp in gd[i].control_inputs + ] + assert k in control_inputs + + +def const_determined_nodes(gd, assume_variable_nodes=None): + """ + Given a graph, extract all nodes that only depends on const nodes. + + # TODO: extract nodes that depends on the "const part" of placeholders. + """ + if assume_variable_nodes is None: + assume_variable_nodes = [] + vis = {} + + def visit(node): + # make sure node is a ParsedNode + if isinstance(node, str): + node = gd[node] + if node.name in vis: + return + + if "Const" in node.op: + vis[node.name] = True + elif "Variable" in node.op: + vis[node.name] = False + elif "Placeholder" in node.op: + vis[node.name] = False + # TF1 uses TensorArray* while TF2 uses TensorList* ops + elif "TensorArray" in node.op or "TensorList" in node.op: + vis[node.name] = False + elif "function" in node.op: + vis[node.name] = False + elif "global" in node.op: + vis[node.name] = False + elif "FakeQuant" in node.op: + vis[node.name] = False + elif node.name in assume_variable_nodes: + vis[node.name] = False + else: + ret = True + vis[node.name] = False + for innode in node.inputs: + if isinstance(innode, str): + inname = innode + else: + inname = innode.name + if inname not in vis: + visit(innode) + if not vis[inname]: + ret = False + break + vis[node.name] = ret + + for k, v in gd.items(): + if k in vis: + continue + visit(k) + + ret = [] + for k, v in vis.items(): + if v: + ret.append(k) + return ret + + +def topsort(graph): + if len(graph) == 0: + return [] + inedge_count = {k: len(v.inputs) + len(v.control_inputs) for k, v in graph.items()} + ret = [] + curboundary = [k for k, v in inedge_count.items() if v == 0] + nextboundary = [] + if len(curboundary) == 0: + raise ValueError("Graph is not a DAG!") + + while len(curboundary) > 0: + ret.extend(curboundary) + for b in curboundary: + for o in graph[b].outputs + graph[b].control_outputs: + inedge_count[o] -= 1 + if inedge_count[o] == 0: + nextboundary.append(o) + curboundary = nextboundary + nextboundary = [] + if len(ret) != len(graph): + raise ValueError("Graph is not a DAG!") + return ret + + +def simple_topsort(inputs): + if len(inputs) == 0: + return [] + outputs = {k: [] for k in inputs} + for k in inputs: + for o in inputs[k]: + outputs[o].append(k) + + inedge_count = {k: len(v) for k, v in inputs.items()} + ret = [] + curboundary = [k for k, v in inedge_count.items() if v == 0] + nextboundary = [] + if len(curboundary) == 0: + raise ValueError("Graph is not a DAG!") + + while len(curboundary) > 0: + ret.extend(curboundary) + for b in curboundary: + for o in outputs[b]: + inedge_count[o] -= 1 + if inedge_count[o] == 0: + nextboundary.append(o) + curboundary = nextboundary + nextboundary = [] + if len(ret) != len(inputs): + raise ValueError("Graph is not a DAG!") + return ret diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/convert_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/convert_utils.py new file mode 100644 index 00000000..5e83f867 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/convert_utils.py @@ -0,0 +1,211 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import defaultdict + +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.symbolic import (any_variadic, + is_symbolic) +from coremltools.converters.mil.mil.var import ListVar + +from .basic_graph_ops import topsort +from .tf_op_registry import _TF_OPS_REGISTRY + + +def compatible_shapes(tf_shape, inf_shape): + def compare_elem(dt, ds): + if dt is None or dt < 0: + return True + elif dt == ds: + return True + elif is_symbolic(ds): + if is_symbolic(dt) and dt != ds: + logger.warning("Symbolic dim {} and {}".format(ds, dt) +\ + " assumed to be equal") + return True + else: + return False + + if tf_shape is None or any_variadic(inf_shape): + return True + else: + return all(compare_elem(dt, ds) for dt, ds in zip(tf_shape, inf_shape)) + + +def check_output_shapes(x, node): + """ + x: list[Var] or tuple[Var] + node: ParsedTFNode + """ + if isinstance(x, ListVar): + # No check on list. + return + if not isinstance(x, (list, tuple)): + x = [x] + tf_shapes = node.attr.get("_output_shapes", None) + if tf_shapes is None: + return + inf_shapes = [] + for y in x: + if y is None: + msg = "TF convert returns None type in TF node {}" + raise TypeError(msg.format(node.name)) + if types.is_tensor(y.sym_type): + inf_shapes.append(list(y.shape)) + elif types.is_scalar(y.sym_type): + inf_shapes.append([]) + else: + msg = "Output type {} not understood" + raise ValueError(msg.format(y)) + + for t, s in zip(tf_shapes, inf_shapes): + if not compatible_shapes(t, s): + msg = ( + "Op {} ({}) type inference ({}) and TF output shape " + "({}) mismatch" + ) + raise ValueError(msg.format(node.name, node.op, s, t)) + + +def connect_global_initializer(graph): + # In TF1, variable initialization (from frozen graph) is done by a + # DAG in main function that is disconnected from the rest of the main + # function. For example: + # + # Initialization DAG (disconnected from Main DAG): + # Const -> set_global(variable='v1') + # + # Main DAG: + # Placeholder --- + # | + # get_global(variable='v1') ----> some_output + # + # (Note that in this example there's no loop or other function.) + # + # If the variable does not cross block boundary, we can always represent + # `get_global` by the input to `set_global`, which may or may not be + # Const, following the control dependency. + # + # Note that this is incorrect if global variable crosses, say, + # while_loop block boundary, which needs a more complex resource inference + # to support and is not supported in this function. + # + # Due to the lack of control depeendency between thhe two DAG, we could be + # converting `set_global` after `get_global`, which makes it impossible to + # perform eager type inference, as type information (e.g., tensor shape) + # is only provided by `set_global` (whether setting it to a const or a + # non-const). + # + # Here we remedy the simpler case: when `set_global` takes in a Const, + # we assume it's initialization and thus must + # run before get_global, i.e. all get_global(variable='v1') must be a + # control_output of set_global(variable='v1') where set_global's input is + # Const (with and control_inputs set symmetrically). Note that multiple + # `get_global(variable='v1')` might have dependences among themselves, but + # they should all take the constant `set_global(variable='v1')` as control + # dependency. + + # Phase 1: Collect get_global nodes for each variable. + # variable name to list[ParsedTFNode] + var_to_get_global_nodes = defaultdict(list) + for node in graph.values(): + if node.op == "get_global": + variable_name = node.attr["variable"] + var_to_get_global_nodes[variable_name].append(node) + + # Phase 2: Find set_global with compile time values + for node_name, node in graph.items(): + if node.op != "set_global": + continue + input_name = node.inputs[0] + input_node = graph[input_name] + if input_node.op != "Const": + continue + variable_name = node.attr["variable"] + for get_node in var_to_get_global_nodes[variable_name]: + logger.info( + "add {} as control inputs of {}".format(node_name, get_node.name) + ) + get_node.control_inputs.append(node_name) + node.control_outputs.append(get_node.name) + + +def convert_graph(context, graph, outputs=None): + """ + Construct Core ML ops corresponding to `graph`. + + Inputs: + + - context (TranscriptContext) + + - graph (dict of str -> ParsedTFNode): op name --> ParsedTFNode + + - outputs (list[str]): List of output names. If outputs is None, the last + node graph (after topsort) must have op type return. + + Returns: + + list[Var]: the output Vars of the constructed Block. + """ + connect_global_initializer(graph) + nodes = topsort(graph) + + if outputs is None: + # infer outputs from return + last_node = graph[nodes[-1]] + if last_node.op != "return": + msg = "Expect the last node in graph to be 'return'; Got {}" + raise ValueError(msg.format(last_node.op)) + second_last_node = graph[last_node.inputs[0]] + if second_last_node.op == "make_tuple": + outputs = second_last_node.inputs + else: + # single output function + outputs = second_last_node.name + + # Translate the non-placeholder ops. + num_nodes = len(nodes) + for i, node_name in enumerate( + _tqdm(nodes, desc="Converting TF Frontend ==> MIL Ops", unit=" ops") + ): + node = graph[node_name] + if node.op == "return": + continue + logger.info( + "[{}/{}] Converting {} op '{}'".format(i + 1, num_nodes, node.op, node.name) + ) + + if node.op in ("NoOp", "Assert"): + continue + + add_op = _TF_OPS_REGISTRY.get(node.op, None) + if add_op is None: + msg = "Conversion for TF op '{0}' not implemented.\n \n{1}".format( + node.op, node.original_node + ) + raise NotImplementedError(msg) + add_op(context, node) + + if len(node.outputs) > 0: + # set_global / get_global / NoOp has no direct consumer / outputs + x = context[node.name] + check_output_shapes(x, node) + + output_is_list = isinstance(outputs, (tuple, list)) + if not output_is_list: + outputs = [outputs] + + output_vars = [] + for output in outputs: + x = context[output.split(":")[0]] + if isinstance(x, (tuple, list)): + idx = int(output.split(":")[1]) + output_vars.append(x[idx]) + else: + output_vars.append(x) + + return output_vars if output_is_list else output_vars[0] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/converter.py new file mode 100644 index 00000000..98dd468c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/converter.py @@ -0,0 +1,466 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil._deployment_compatibility import AvailableTarget as _target +from coremltools.converters.mil.input_types import ImageType, InputType, RangeDim +from coremltools.converters.mil.input_types import Shape as InputShape +from coremltools.converters.mil.input_types import TensorType, _get_shaping_class +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, get_new_symbol, types +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.mil.var import Var + +from .._utils import get_output_names +from .basic_graph_ops import simple_topsort +from .convert_utils import convert_graph + + +# TranscriptionContext maintains a map of tf_node.name --> ssa_var available +# to the current TF --> tfssa transcription. +class TranscriptionContext: + def __init__(self, name=None): + self.name = name if name is not None else "" + self.context = {} + self.graphs = {} + + # TF loops are represented as functions, so nested loops becomes + # stacked functions. Stacked functions are translated to nested + # blocks in Program, like + # + # while_loop(loop_vars=(%a, %b)) + # cond_block1(%a.x, %b.x) { + # ...some ops + # } -> (%bool_var1) + # body_block1(%a.x, %b.x) { + # %ret_axx = while_loop(loop_vars=(%a.x,)) + # cond_block2(%a.x.x) { + # ...some ops + # } -> (%bool_var2) + # body_block2(%a.x.x) { + # ...some ops + # } -> (%new_a.x.x) + # } -> (%ret_axx) + # ....some ops using %ret_a + # } -> (%ret_ax, %ret_bx) + # + # During the translation of cond_block2, we'd have func_input_stack + # + # (%a.x.x,) + # (%a.x, %b.x) + # + # where [%a.x.x] would be unstacked once cond_block2 is done. + self.func_input_stack = [] # list of tuple[Var] + + def add(self, tf_name, ssa_vars, is_new_var=True): + """ + ssa_vars: list[Var] / tuple[Var] (multiple outputs) or + Var (single_output) + is_new_var: True if ssa_vars are newly created for tf_name. + """ + if tf_name in self.context: + # Overriding allow us to translate while_loop body twice (which is + # needed to figure out shapes changes during iterates) + msg = "TF var %s is added again. Overriding previous value" + logger.info(msg % tf_name) + if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name: + msg = ( + "MIL op's name ({}) does not match TensorFlow's node name ({})." + " Warning: Node added to context must have the same name as the name passed to context." + ) + raise ValueError(msg.format(tf_name, ssa_vars.name)) + self.context[tf_name] = ssa_vars + + def add_graph(self, graph_name, graph): + self.graphs[graph_name] = graph + + def get_graph(self, graph_name): + if graph_name not in self.graphs: + msg = "Graph '{}' not found in: {}" + raise KeyError(msg.format(graph_name, list(self.graphs.keys()))) + return self.graphs[graph_name] + + def stack_func_inputs(self, inputs): + self.func_input_stack.append(inputs) + + def unstack_func_inputs(self): + if len(self.func_input_stack) == 0: + raise ValueError("No func input available") + self.func_input_stack.pop() + + def get_func_inputs(self): + if len(self.func_input_stack) == 0: + raise ValueError("No func input available") + return self.func_input_stack[-1] + + def __getitem__(self, tf_name): + if tf_name not in self.context: + msg = "TF var {} not found in context {}" + raise KeyError(msg.format(tf_name, self.name)) + return self.context[tf_name] + + def __contains__(self, tf_name): + return tf_name in self.context + + +class TFConverter: + def __init__(self, tfssa, inputs=None, outputs=None, opset_version=None): + """ + tfssa: TensorFlow IR. + inputs: list of TensorType or ImageType, optional, defaults to None. + outputs: list[ct.InputType] or None + list of either ct.TensorTypes or ct.ImageTypes (both of which are child classes of InputType) + This is the value of the "outputs" argument, passed on by the user in "coremltools.convert" API. + """ + self.tfssa = tfssa + self.global_type = {} + self.inputs = None + self.main_output_types = outputs + self.opset_version = _target(opset_version) if opset_version is not None else None + output_names = get_output_names(outputs) + + main_func = tfssa.functions["main"] + graph = main_func.graph + + # Filter the inputs to only Placeholder names + tf_placeholder_names = [n for n in graph if graph[n].op == "Placeholder"] + placeholder_names = [] + if inputs is not None: + # Check inputs format + if not isinstance(inputs, (list, tuple)): + raise ValueError( + "Type of inputs should be list or tuple, got {} instead.".format( + type(inputs) + ) + ) + if not all([isinstance(i, InputType) for i in inputs]): + raise ValueError( + "Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format( + [type(i) for i in inputs] + ) + ) + + # Special case: if there's only 1 input and 1 placeholder, we match them. + if len(tf_placeholder_names) == 1 and len(inputs) == 1: + if inputs[0].name is None: + inputs[0].name = tf_placeholder_names[0] + + # We fill in shapes for user-specified input that doesn't have shape + for inp in inputs: + # Check inputs existence + if inp.name is None: + raise ValueError( + "Multiple inputs are found in graph, but no input name was provided" + ) + if inp.name not in tf_placeholder_names: + raise ValueError( + "Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format( + inp.name, tf_placeholder_names + ) + ) + if inp.shape is None: + shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name) + # _get_shaping_class does not accept -1 or None dimension. + shape = [get_new_symbol() if s is None or s == -1 else s \ + for s in shape] + inp.shape = _get_shaping_class(shape) + + # Extract placeholders that users didn't specify. + user_input_names = [inp.name for inp in inputs] + for name in tf_placeholder_names: + if name not in user_input_names: + placeholder_names.append(name) + else: + inputs = [] + placeholder_names = tf_placeholder_names + + # name -> (shape, mil_type) mapping. shape has type list[int] + added_inputs = {} + for inp in main_func.inputs: + if inp not in placeholder_names: + continue + node = graph[inp] + dtype = node.attr['dtype'] + shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp) + shape = [get_new_symbol() if s is None or s == -1 else s \ + for s in shape] + inputs.append(TensorType(name=inp, shape=shape, dtype=dtype)) + added_inputs[inp] = (shape, dtype) + + if len(added_inputs) > 0: + logger.info( + "Adding Input not specified by users: '{}'".format( + added_inputs) + ) + + for idx, inp in enumerate(inputs): + # We set the default image format in TF as NHWC, since NHWC is used + # for TF unless GPU is specified as device. + if isinstance(inp, ImageType) and inputs[idx].channel_first is None: + inputs[idx].channel_first = False + self.inputs = tuple(inputs) + + for inputtype in self.inputs: + if not isinstance(inputtype.shape, InputShape): + continue + if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]): + continue + if inputtype.name not in graph: + raise ValueError( + f"The input {inputtype.name} provided is not in graph." + ) + node = graph[inputtype.name] + shape = [-1 if is_symbolic(s) else s for s in inputtype.shape.shape] + node.attr["_output_shapes"] = [shape] # list of length 1 + + # infer outputs if not provided + self._validate_outputs(tfssa, output_names) + output_names = main_func.outputs if output_names is None else output_names + output_names = output_names if isinstance(output_names, (tuple, list)) else [output_names] + output_names = [x if isinstance(x, str) else x.name for x in output_names] + self.output_names = output_names + + # We would like a stack so that we run conversion sequentially. + self.graph_stack = self._get_stack(tfssa, root="main") + self.context = TranscriptionContext() + + def _get_placeholder_shape_from_tf_graph(self, tfgraph, name): + + error_message = "Unable to determine the shape of input: {}." \ + " Please provide its shape during conversion, using \n" \ + "'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])".format(name, name) + + if tfgraph[name].attr.get("shape", None) is not None: + shape = tfgraph[name].attr["shape"] + + elif tfgraph[name].attr.get("_output_shapes", None) is not None: + shape = tfgraph[name].attr["_output_shapes"][0] + if shape is None: + raise ValueError(error_message) + else: + raise ValueError(error_message) + + return shape + + def _get_stack(self, tfssa, root="main"): + # We're trying to get a order of how to loop through the graphs. + # This is NOT necessarily a DAG. + dep = {x: [] for x in tfssa.functions} + for fname in tfssa.functions: + for node in tfssa.functions[fname].graph.values(): + func_x, func_y = None, None + + if node.op == "while": + func_x = node.attr["body_function"] + func_y = node.attr["cond_function"] + + if func_x and fname not in dep[func_x]: + dep[func_x].append(fname) + if func_y and fname not in dep[func_y]: + dep[func_y].append(fname) + + assert len(dep[root]) == 0 + graph_stack = simple_topsort(dep) + + return graph_stack + + @staticmethod + def _get_tensor_name(tensor): + ret = None + if isinstance(tensor, str): + ret = tensor + else: + ret = tensor.name + return ret.split(":")[0] + + def _validate_outputs(self, tfssa, outputs): + if outputs is None: + return + outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs] + output_nodes = [] + for f in tfssa.functions.values(): + output_nodes += list(f.outputs) + all_nodes = [] + for f in tfssa.functions.values(): + all_nodes += list(f.graph.keys()) + for n in outputs: + if self._get_tensor_name(n) not in output_nodes + all_nodes: + raise KeyError('Output node name "{}" does exist.'.format(n)) + + def _validate_and_update_main_output_types(self, prog): + assert isinstance(self.main_output_types, list) + assert len(self.main_output_types) > 0 + output_vars = prog.functions["main"].outputs + output_vars_names = set([var.name for var in output_vars]) + + # validation + if get_output_names(self.main_output_types) is None: + # this is the case, where the user did not provide names for the outputs. + # In this case, the outputs were inferred from the TF graph autmatically. + # There are two scenarios here: number of inferred outputs equal to 1 or greater than 1 + if len(output_vars) == 1: + if len(self.main_output_types) > 1: + msg = "The list of ct.TensorType()/ct.ImageType() provided in the 'outputs' argument, does not " \ + "have names. When more than 1 output is provided for tensorflow conversion, " \ + "each entry in the outputs list must have the name specified as well, " \ + "via the 'name' argument in ct.TensorType/ct.ImageType" + raise ValueError(msg) + else: # len(output_vars) > 1 + # if there are more than 1 sink nodes (i.e. inferred outputs), the user must provide names + # so that the output types can be correctly mapped. + msg = "The list of ct.TensorType()/ct.ImageType() provided in the 'outputs' argument, does not " \ + "have names. When names are not provided, the outputs are automatically inferred " \ + "from the TF graph. There are {} outputs detected which are more than 1. " \ + "In this case, to map the output types correctly, " \ + "please provide names for each of the " \ + "outputs. The output names inferred from the TF graph are: {} " + raise ValueError(msg.format( + len(output_vars), + output_vars_names, + )) + else: + # user provided output names. In this case, the appropriate tensors must have + # been selected from the TF graph bases on the output names. + # Verify that the names present in self.main_output_types match the output_vars_names (it should match). + # Also, reconstruct the self.main_output_types list, in the same order of outputs as + # present in the output_vars_names + assert len(output_vars) == len(self.main_output_types), \ + "this should match if the outputs were picked correctly from the TF graph" + for out in self.main_output_types: + if out.name not in output_vars_names: + msg = "output name, '{}', not found in Tensorflow Graph. Available output names are: {}" + raise KeyError(msg.format(out.name, output_vars_names)) + name_to_input_type_map = {} + for out in self.main_output_types: + name_to_input_type_map[out.name] = out + main_output_types = [] + for out_var in output_vars: + main_output_types.append(name_to_input_type_map[out_var.name]) + self.main_output_types = main_output_types + + def check_placeholder_output(self, prog, outputs_name): + """ + Handle the cases where placeholder is output. + There is a case where the program is like + main(%Placeholder: (5,fp32)) { + block3() { + } -> (%Placeholder) + } + But self.output_names = ["Placeholder:0"] + We need to change the block output to Placeholder:0 by inserting an identity + """ + block = prog["main"] + input_name = [x.name for x in list(block.inputs.values())] + with block: + new_outputs = [] + for output, output_name in zip(block.outputs, outputs_name): + if output.name not in input_name or output.name == output_name: + new_output = output + else: + new_output = mb.identity(x=output, name=output_name) + new_outputs.append(new_output) + block.set_outputs(new_outputs) + + def convert_main_graph(self, prog, graph): + func_inputs = {} + for input_type in self.inputs: + func_inputs[input_type.name] = mb.placeholder( + input_type.shape.symbolic_shape, dtype=input_type.dtype) + prog.set_main_input_types(self.inputs) + + with Function(func_inputs, opset_version=self.opset_version) as ssa_func: + # Get the input Var + for name in func_inputs.keys(): + input_var = ssa_func.inputs[name] + if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \ + and (input_var.dtype == types.fp16 or input_var.dtype == types.fp64): + # cast the input var to float32 + # We need to do this because the type inference is very buggy when started from + # float16/float64 typed inputs. Until that is fixed in the following radar + # we cast all inputs of type float16/float64 to float32 as the first step. + # These casts will later get removed, if compute_precision=Float16 is + # provided, which will cause the FP16ComputePrecision pass to run. + # TODO: remove this when this radar is fixed: rdar://93731970 + input_var = mb.cast(x=input_var, dtype="fp32", name=name) + self.context.add(name, input_var) + outputs = convert_graph(self.context, graph, self.output_names) + ssa_func.set_outputs(outputs) + prog.add_function("main", ssa_func) + # check duplicate output + # Note: sometimes two outputs are pointing to the same Var, we should + # create mb.identity for those cases + block = prog["main"] + with block: + name_counts = {} + new_outputs = [output for output in block.outputs] + for i, v_o in enumerate(block.outputs): + if v_o.name not in name_counts: + name_counts[v_o.name] = 1 + else: + name_counts[v_o.name] += 1 + new_name = v_o.name + "_duplicate_" + str(name_counts[v_o.name]) + x = mb.identity(x=v_o, name=new_name) + new_outputs[i] = x + block.set_outputs(new_outputs) + + # Rename outputs to TF's name. This is needed when the last op doesn't + # generate a new Var (e.g., get_tuple, Identity etc.), and thus the + # last Var would have a different name than the last TF op's name. + # + # Example: + # + # TF code: + # x = tf.placeholder(tf.float32, shape=(1,)) + # y = tf.placeholder(tf.float32, shape=(1,)) + # c = lambda i, j: \ + # tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) + # b = lambda i, j: (tf.add(i, 1), j) + # res = tf.while_loop(c, b, [x, y]) + # + # Resulting nodes (excluding the nodes in while loop cond & body): + # + # node name: Placeholder op type: Placeholder inputs: [] + # node name: Placeholder_1 op type: Placeholder inputs: [] + # node name: make_input_0 op type: make_tuple inputs: ['Placeholder', + # 'Placeholder_1'] + # node name: while_0 op type: while inputs: ['make_input_0'] + # node name: while/Exit op type: get_tuple inputs: ['while_0'] + # node name: while/Exit_1 op type: get_tuple inputs: ['while_0'] + # + # Observe that return node `while/Exit` is an output from get_tuple, + # which in our translation simply unpack a python tuple of Vars + # ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to + # rename `while_0:0` to `while/Exit` in order for users to find the + # output. + # Note: only rename the output if the output is not Placeholder. + + input_names = [x.name for x in self.inputs] + for v_o, out_name in zip(prog["main"].outputs, self.output_names): + if v_o.name != out_name and v_o.name not in input_names: + logger.info( + "Renaming output var: '{}' -> '{}'".format(v_o.name, out_name) + ) + v_o.name = out_name + self.check_placeholder_output(prog, self.output_names) + + # verify that if model output dtypes / names are provided by the user, they are valid + if self.main_output_types is not None: + self._validate_and_update_main_output_types(prog) + prog.set_main_output_types(self.main_output_types) + + @_profile + def convert(self): + prog = Program() + if len(self.graph_stack) == 0: + raise ValueError("At least one TF function must be present") + if self.graph_stack[0] != "main": + msg = "TF root graph must be named 'main'. Got {}" + raise ValueError(msg.format(self.graph_stack[0])) + graph = self.tfssa.functions["main"].graph + for g_name in self.graph_stack[1:]: + self.context.add_graph(g_name, self.tfssa.functions[g_name].graph) + self.convert_main_graph(prog, graph) + return prog diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py new file mode 100644 index 00000000..1bc96bb9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dialect_ops.py @@ -0,0 +1,173 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry + +register_op = SSAOpRegistry.register_op + + +# This file contains the TF dialect of SSA. Briefly, these ops are only +# understandable in the TF frontend and not acceptable in the standard op set. +# No backend would support any of the op here. These ops exist to facilitate +# frontend SSA passes, but must be replaced with standard ops during SSA +# passes. + +# All tf op must start with 'tf_' prefix. +# +# tf_make_list allows elem_shape to be unspecified. core op make_list does +# not allow that. +@register_op(namespace="tf") +class tf_make_list(Operation): + input_spec = InputSpec( + init_length=TensorInputType(optional=True, type_domain=types.int32), + dynamic_length=TensorInputType(optional=True, type_domain=types.bool), + elem_shape=TensorInputType(const=True, optional=True, type_domain=types.int32), + dtype=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + def default_inputs(self): + return DefaultInputs( + init_length=1, + dynamic_length=True, + dtype="fp32", + ) + + def type_inference(self): + init_length = self.init_length.val + if self.elem_shape is None or self.elem_shape.sym_val is None: + return types.list( + types.unknown, + init_length=init_length, + dynamic_length=self.dynamic_length.val, + ) + builtin_dtype = types.string_to_builtin(self.dtype.val) + if builtin_dtype is None: + raise ValueError("Unsupported dtype {}".format(self.dtype.val)) + elem_type = types.tensor(builtin_dtype, self.elem_shape.sym_val) + return types.list( + elem_type, init_length=init_length, dynamic_length=self.dynamic_length.val + ) + + +class TfLSTMBase(Operation): + """ + Common LSTM inputs for BlockLSTMCell and BlockLSTM. + """ + + input_spec = InputSpec( + c_prev=TensorInputType(type_domain="T"), # [batch, hidden_dim] + h_prev=TensorInputType(type_domain="T"), # [batch, hidden_dim] + # weight: [input_dim + hidden_dim, 4*hidden_dim] (icfo layout) + weight=TensorInputType(const=True, type_domain="T"), + forget_bias=TensorInputType(const=True, optional=True, type_domain="T"), + # cell_clip == None implies not using cell clip + cell_clip=TensorInputType(const=True, optional=True, type_domain="T"), + # If use_peephole == False, weight_peep_* is ignored + use_peephole=TensorInputType(const=True, optional=True, type_domain=types.bool), + weight_peep_i=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,] + weight_peep_f=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,] + weight_peep_o=TensorInputType(const=True, optional=True, type_domain="T"), # [hidden_dim,] + bias=TensorInputType(const=True, type_domain="T"), # [4*hidden_dim] (icfo layout) + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + forget_bias=1., + use_peephole=False, + ) + + def _check_peephole_weights(self): + # Check weight_peep_* + if self.use_peephole.val: + if ( + self.weight_peep_i is None + or self.weight_peep_f is None + or self.weight_peep_o is None + ): + raise ValueError( + "weight_peep_* cannot be None when use_peephole is True" + ) + + +@register_op(namespace="tf") +class tf_lstm_block_cell(TfLSTMBase): + """ + xh = [x, h_prev] + [i, ci, f, o] = xh * w + b + f = f + forget_bias + + if not use_peephole: + wci = wcf = wco = 0 + i = sigmoid(cs_prev .* wci + i) + f = sigmoid(cs_prev .* wcf + f) + ci = tanh(ci) + cs = ci .* i + cs_prev .* f + cs = clip(cs, cell_clip) + o = sigmoid(cs * wco + o) + co = tanh(cs) + h = co .* o + """ + input_spec = ( + InputSpec(x=TensorInputType(type_domain="T"),) + TfLSTMBase.input_spec # [batch, input_dim] + ) + + def __init__(self, **kwargs): + super(tf_lstm_block_cell, self).__init__(**kwargs) + + def type_inference(self): + self._check_peephole_weights() + # all return shapes are [batch, hidden_dim] + ret_shape = self.c_prev.shape + dtype = self.x.dtype + # See + # https://www.tensorflow.org/api_docs/python/tf/raw_ops/LSTMBlockCell + # All returned shapes are [batch, hidden_dim] + return ( + types.tensor(dtype, ret_shape), # i + types.tensor(dtype, ret_shape), # cs + types.tensor(dtype, ret_shape), # f + types.tensor(dtype, ret_shape), # o + types.tensor(dtype, ret_shape), # ci + types.tensor(dtype, ret_shape), # co + types.tensor(dtype, ret_shape), + ) # h + + +@register_op(namespace="tf") +class tf_lstm_block(TfLSTMBase): + """ + Apply LSTM to an input sequence + """ + input_spec = ( + InputSpec( + seq_len=TensorInputType(type_domain=types.int32), # int + x=TensorInputType(type_domain="T"), # [padded_len, batch, input_dim] + ) + + TfLSTMBase.input_spec + ) + + def type_inference(self): + self._check_peephole_weights() + padded_len = self.x.shape[0] + ret_shape = [padded_len] + list(self.c_prev.shape) + dtype = self.x.dtype + # All returned shapes are [padded_len, b, hidden_dim] + return ( + types.tensor(dtype, ret_shape), # i + types.tensor(dtype, ret_shape), # cs + types.tensor(dtype, ret_shape), # f + types.tensor(dtype, ret_shape), # o + types.tensor(dtype, ret_shape), # ci + types.tensor(dtype, ret_shape), # co + types.tensor(dtype, ret_shape), + ) # h diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dot_visitor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dot_visitor.py new file mode 100644 index 00000000..2e4ba504 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/dot_visitor.py @@ -0,0 +1,149 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import types + + +class DotVisitor: + """ + Generates a dot description of a graph in dictionary form. + """ + + def __init__(self, annotation=None): + self.result = [] + self.visited_memo = {} + self.highlights = {} + self.alternate_labeller = None + self.annotation = annotation + + def labeller(self, labeller): + self.alternate_labeller = labeller + return self + + def highlight_nodes(self, nodeset, color="yellow"): + for i in nodeset: + self.highlights[i] = color + return self + + def visit(self, graph, node, nodename_prefix=""): + if node.name in self.visited_memo: + return self + + # For printing datatype, breaks type + if node.attr.get("symbolic_datatype", None) is not None: + dtype = str(types.get_type_info(node.attr["symbolic_datatype"])) + elif node.datatype is not None: + dtype = str(types.get_type_info(node.datatype)) + else: + dtype = "Unknown" + + label = "" + if self.alternate_labeller is not None: + label = self.alternate_labeller(node) + else: + if len(node.outputs) == 0: + label = "\\n{" + node.name + "}" + if "Placeholder" in node.op: + label = "\\n{" + node.name + "}" + if node.op == "while": + label = ( + "\\n{body: " + + node.attr["body_function"] + + " cond:" + + node.attr["cond_function"] + + "}" + ) + if node.op == "function": + label = "\\n{body: " + node.attr["function_name"] + "}" + if node.op == "function_entry": + label = "\\n{" + node.name + "}" + label = node.op + ":" + dtype + label + + if node.name in self.highlights: + self.result.append( + '"' + + nodename_prefix + + node.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % ( + self.highlights[node.name], + "violetred" if node.attr.get(self.annotation, False) else "black", + ) + ) + else: + self.result.append( + '"' + + nodename_prefix + + node.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' + % ("violetred" if node.attr.get(self.annotation, False) else "black") + ) + + for i in node.inputs: + input_name = i + edge = ( + '"' + + nodename_prefix + + input_name + + '"' + + " -> " + + '"' + + nodename_prefix + + node.name + + '"' + ) + self.result.append(edge) + + for i in node.control_inputs: + input_name = i + edge = ( + '"' + + nodename_prefix + + input_name + + '"' + + " -> " + + '"' + + nodename_prefix + + node.name + + '"' + ) + edge = edge + " [style=dotted]" + self.result.append(edge) + + self.visited_memo[node.name] = 1 + + for i in node.inputs: + input_name = i + if input_name[0] == "^": + input_name = input_name[1:] + assert input_name in graph + self.visit(graph, graph[input_name], nodename_prefix) + return self + + def visit_all(self, graph, nodename_prefix=""): + for i in graph: + self.visit(graph, graph[i], nodename_prefix) + return self + + def get_result(self, graphtype="digraph", graph_name="g"): + return ( + graphtype + + " " + + graph_name + + " {\n\t" + + "\n\t".join(str(i) for i in self.result) + + ';\n\tlabel="' + + graph_name[8:] + + '";\n\tfontsize=96;\n}' + ) + + def __str__(self): + return self.get_result() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/load.py new file mode 100644 index 00000000..bd77337b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/load.py @@ -0,0 +1,316 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import gc +import os +from distutils.version import StrictVersion as _StrictVersion +from tempfile import mktemp + +import tensorflow as tf +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools._deps import _get_version +from coremltools.converters._profile_utils import _profile + +from .._utils import get_output_names +from .basic_graph_ops import fill_outputs +from .converter import TFConverter +from .parsed_tf_node import ParsedTFNode +from .tf_graph_pass import (cond_to_where, constant_propagation, + delete_asserts, delete_disconnected_nodes, + delete_unnecessary_constant_nodes, + functionalize_loops, fuse_dilation_conv, + insert_get_tuple, quantization_pass, + remove_variable_nodes, + tensor_array_resource_removal) +from .tfssa import NetworkEnsemble, SSAFunction + + +class TFLoader: + """Abstract class for TensorFlow model loader.""" + + def __init__(self, model, debug=False, **kwargs): + """ + TensorFlow model loader. + + Parameters + ---------- + model: TensorFlow model + Model generated using TensorFlow. + debug: bool, optional, defaults to False + If true, display verbose logging and visualizations. + kwargs: dict(str, Any), optional, defaults to None + Dictionary of additional arguments. + """ + self.model = model + self.debug = debug + self.kwargs = kwargs + self._graph_def = None + self._tf_ssa = None + + @_profile + def load(self): + """Load TensorFlow model into MIL program.""" + + logger.info("Loading TensorFlow model '{}'".format(self.model)) + outputs = self.kwargs.get("outputs", None) + output_names = get_output_names(outputs) + self._graph_def = self._graph_def_from_model(output_names) + + if self._graph_def is not None and len(self._graph_def.node) == 0: + msg = "tf.Graph should have at least 1 node, Got empty graph." + raise ValueError(msg) + + self._tf_ssa = self._tf_ssa_from_graph_def() + + del self._graph_def + gc.collect() + + if self.debug: + import graphviz + + dot_string = self._tf_ssa.get_dot_string( + annotation=True, name_and_op_style=True, highlight_debug_nodes=[] + ) + graphviz.Source(dot_string).view( + filename="/tmp/ssa_before_tf_passes", cleanup=True + ) + + program = self._program_from_tf_ssa() + logger.debug("program:\n{}".format(program)) + return program + + # @abstractmethod + def _graph_def_from_model(self, output_names=None): + """Load TensorFlow model into GraphDef. Overwrite for different TF versions.""" + pass + + # @abstractmethod + def _tf_ssa_from_graph_def(self, fn_name="main"): + """Load GraphDef and parse into NetworkEnsemble (TFSSA).""" + pass + + # @abstractmethod + def _program_from_tf_ssa(self): + """Load NetworkEnsemble (TFSSA) and parse into MIL program.""" + pass + + @staticmethod + def extract_sub_graph(graph_def, outputs=None): + """Extract sub-graph based on user-provided outputs.""" + if outputs is None or len(outputs) == 0: + return graph_def + msg = "Extracting sub-graph based on outputs '{}' from the full model" + logger.debug(msg.format(outputs)) + outputs = outputs if isinstance(outputs, list) else [outputs] + outputs = [i.split(":")[0] for i in outputs] + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + return tf.graph_util.extract_sub_graph(graph_def, outputs) + else: + return tf.compat.v1.graph_util.extract_sub_graph(graph_def, outputs) + + +class TF1Loader(TFLoader): + def __init__(self, model, debug=False, **kwargs): + """ + TensorFlow 1.x model loader. + + Parameters + ---------- + model: Model created with TensorFlow 1.x + One of the following model format: + - TensorFlow tf.Graph object or frozen graph (.pb) file path + - TensorFlow tf.keras.Model object or HDF5 (.h5) file path + - TensorFlow SavedModel directory path + debug: bool, optional. Defaults to False. + This flag should generally be False except for debugging purposes + for diagnosing conversion errors. Setting this flag to True will + cause graph pass errors to be ignored, forcefully returning a + NetworkEnsemble object. + kwargs: dict(str, Any), optional + Dictionary of additional arguments. + """ + TFLoader.__init__(self, model, debug, **kwargs) + + def _graph_def_from_model(self, output_names=None): + """Overwrites TFLoader._graph_def_from_model()""" + msg = "Expected model format: [tf.Graph | .pb | SavedModel | tf.keras.Model | .h5], got {}" + if isinstance(self.model, tf.Graph) and hasattr(self.model, "as_graph_def"): + graph_def = self.model.as_graph_def(add_shapes=True) + return self.extract_sub_graph(graph_def, output_names) + elif isinstance(self.model, tf.keras.Model): + graph_def = self._from_tf_keras_model(self.model) + return self.extract_sub_graph(graph_def, output_names) + elif isinstance(self.model, str): + if not os.path.exists(str(self.model)): + raise ValueError('Input model "{}" does not exist'.format(self.model)) + elif os.path.isfile(str(self.model)) and self.model.endswith(".pb"): + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + with open(self.model, "rb") as f: + gd = tf.GraphDef() + gd.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.import_graph_def(gd, name="") + else: + with tf.io.gfile.GFile(self.model, "rb") as f: + gd = tf.compat.v1.GraphDef() + gd.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.graph_util.import_graph_def(gd, name="") + graph_def = graph.as_graph_def(add_shapes=True) + return self.extract_sub_graph(graph_def, output_names) + elif os.path.isfile(str(self.model)) and self.model.endswith(".h5"): + graph_def = self._from_tf_keras_model(self.model) + return self.extract_sub_graph(graph_def, output_names) + elif os.path.isdir(str(self.model)): + graph_def = self._from_saved_model(self.model) + return self.extract_sub_graph(graph_def, output_names) + else: + raise NotImplementedError(msg.format(self.model)) + else: + raise NotImplementedError(msg.format(self.model)) + + def _tf_ssa_from_graph_def(self, fn_name="main"): + """Overwrites TFLoader._tf_ssa_from_graph_def()""" + graph_dict = {} + for node in self._graph_def.node: + graph_dict[node.name] = ParsedTFNode(node) + + tensor_array_resource_removal(graph_dict) + graph = insert_get_tuple(graph_dict) + graph = fill_outputs(graph) + delete_disconnected_nodes(graph) + + tf_ssa = NetworkEnsemble() + tf_ssa.functions[fn_name] = SSAFunction(graph) + return tf_ssa + + def _program_from_tf_ssa(self): + """Overwrites TFLoader._mil_program_from_tf_ssa()""" + # Applying frontend passes on TFSSA. Note that these are different from + # passes applied to MIL in TF frontend. + tf_passes = [ + delete_asserts, + functionalize_loops, + constant_propagation, + delete_unnecessary_constant_nodes, # must come after constant_propagation + quantization_pass, + cond_to_where, + remove_variable_nodes, + fuse_dilation_conv, + ] + + if self.debug: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + try: + tf_pass(self._tf_ssa) + except Exception as e: + logger.exception('Exception in pass "{}": {}'.format(tf_pass, e)) + logger.info("Ignoring exception and continuing to next pass") + else: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + tf_pass(self._tf_ssa) + + if self.debug: + import graphviz + + dot_string = self._tf_ssa.get_dot_string( + annotation=True, name_and_op_style=True, highlight_debug_nodes=[] + ) + graphviz.Source(dot_string).view( + filename="/tmp/ssa_after_tf_passes", cleanup=True + ) + + converter = TFConverter( + tfssa=self._tf_ssa, + inputs=self.kwargs["inputs"], + outputs=self.kwargs["outputs"], + opset_version=self.kwargs["specification_version"], + ) + return converter.convert() + + @staticmethod + def _from_saved_model(saved_model_dir): + # must import here as tf.contrib is only available on TF 1.x + from tensorflow.contrib.saved_model.python.saved_model import reader + from tensorflow.python.tools import freeze_graph + + saved_model_tags = reader.get_saved_model_tag_sets(saved_model_dir)[0] + if not saved_model_tags: + msg = "Unsupported SavedModel directory format: no tag_sets available" + raise NotImplementedError(msg) + + # get model outputs + output_node_names = [] + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + sess = tf.Session() + else: + sess = tf.compat.v1.Session() + metagraph = tf.saved_model.loader.load( + sess, saved_model_tags, saved_model_dir + ) + for sd in metagraph.signature_def.values(): + output_node_names += [o.name.split(":")[0] for o in sd.outputs.values()] + + sess.close() + + # get frozen graph + output_graph = mktemp() + tf.compat.v1.reset_default_graph() if _get_version(tf.__version__) >= _StrictVersion("1.13.1") else tf.reset_default_graph() + freeze_graph.freeze_graph( + input_graph=None, + input_saver=None, + input_binary=None, + input_checkpoint=None, + output_node_names=",".join(output_node_names), + restore_op_name=None, + filename_tensor_name=None, + output_graph=output_graph, + clear_devices=True, + initializer_nodes="", + variable_names_whitelist="", + variable_names_blacklist="", + input_meta_graph=None, + input_saved_model_dir=saved_model_dir, + saved_model_tags=",".join(saved_model_tags), + ) + + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + graph_def = tf.GraphDef() + with open(output_graph, "rb") as f: + graph_def.ParseFromString(f.read()) + graph_def = tf.graph_util.remove_training_nodes(graph_def) + else: + graph_def = tf.compat.v1.GraphDef() + with open(output_graph, "rb") as f: + graph_def.ParseFromString(f.read()) + graph_def = tf.compat.v1.graph_util.remove_training_nodes(graph_def) + with tf.Graph().as_default() as graph: + tf.graph_util.import_graph_def(graph_def, name="") + return graph.as_graph_def(add_shapes=True) + + @staticmethod + def _from_tf_keras_model(keras_model): + from tensorflow.python.framework.convert_to_constants import \ + convert_variables_to_constants_v2 + from tensorflow.python.keras.saving import saving_utils + + if not isinstance(keras_model, tf.keras.Model): + keras_model = tf.keras.models.load_model(keras_model, None) + + tf.keras.backend.clear_session() + tf.keras.backend.set_learning_phase(False) + fn = saving_utils.trace_model_call(keras_model) + cf = fn.get_concrete_function() + try: + frozen_fn = convert_variables_to_constants_v2(cf) + return frozen_fn.graph.as_graph_def(add_shapes=True) + except Exception: + raise NotImplementedError("Unhandled tf.keras model format") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/naming_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/naming_utils.py new file mode 100644 index 00000000..ebb94bc3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/naming_utils.py @@ -0,0 +1,35 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +_varname_charset = set( + [chr(i) for i in range(ord("A"), ord("Z") + 1)] + + [chr(i) for i in range(ord("a"), ord("z") + 1)] + + [chr(i) for i in range(ord("0"), ord("9") + 1)] + + ["_"] +) + + +def escape_name(name): + ret = "".join([i if i in _varname_charset else "_" for i in name]) + if ret.endswith("_"): + return ret + else: + return ret + "_" + + +def escape_fn_name(name): + ret = "".join([i if i in _varname_charset else "_" for i in name]) + ret = escape_name(name) + if ret.startswith("f_"): + return ret + else: + return "f_" + ret + + +def normalize_names(names): + if isinstance(names, str): + return names.replace(":", "__").replace("/", "__") + return [i.replace(":", "__").replace("/", "__") for i in names] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ops.py new file mode 100644 index 00000000..856ade4f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ops.py @@ -0,0 +1,3546 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.block import \ + is_current_opset_version_compatible_with +from coremltools.converters.mil.mil.ops.defs._utils import ( + broadcast_shapes, promote_input_dtypes) +from coremltools.converters.mil.mil.types import builtin_to_string +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + is_symbolic) + +from .._utils import build_einsum_mil +from .convert_utils import convert_graph +from .tf_op_registry import register_tf_op + + +def _adjust_min_max(min, max, num_bits=8): + if (min <= max) and (max <= 0): + min = (min - max) * 1.0 + max = 0.0 + elif (min >= 0) and (max >= min): + max = (max - min) * 1.0 + min = 0.0 + else: + scale = (max - min) / (2 ** num_bits - 1) + min_adj = scale * round(min / scale) + max_adj = max + min_adj - min + min = min_adj + max = max_adj + return min, max + + +def _is_scalar(type_): + if type_ is None: + return False + result = types.is_int(type_) or types.is_float(type_) or types.is_bool(type_) + if types.is_tensor(type_) and (len(type_.get_shape()) == 0): + result = True + return result + + +def _transpose_NHWC_to_NCHW(x): + return mb.transpose(x=x, perm=[0, 3, 1, 2]) + + +def _transpose_NCHW_to_NHWC(x, node_name): + return mb.transpose(x=x, perm=[0, 2, 3, 1], name=node_name) + + +def _transpose_NDHWC_to_NCDHW(x): + return mb.transpose(x=x, perm=[0, 4, 1, 2, 3]) + + +def _transpose_NCDHW_to_NDHWC(x, node_name): + return mb.transpose(x=x, perm=[0, 2, 3, 4, 1], name=node_name) + + +def _check_axes_type(x): + if x is None or x.val is None: + return None + if isinstance(x.val, _np.int32): + return _np.array([x.val]) + return x.val + + +def _value_at(x, idx): + """ + input x: 1D tensor (vector). + return value at index idx. x[idx]. + """ + assert x.rank == 1 + return mb.slice_by_index(x=x, begin=[idx], end=[0], squeeze_mask=[True]) + + +def _freq_to_mel(freq): + return 1127.0 * _np.log(1 + freq / 700.0) + + +def _get_MFCC_constants(spectrogram_N, + sample_rate, + upper_frequency_limit, + lower_frequency_limit, + filterbank_channel_count, + dct_coefficient_count): + + """ + params: + spectrogram_N : int + sample_rate: int + upper_frequency_limit : int + filterbank_channel_count : int + dct_coefficient_count : int + + returns: + array(shape: (spectrogram_N,)) + array(shape: (spectrogram_N, filterbank_channel_count)) + array(shape: (spectrogram_N, filterbank_channel_count)) + array(shape: (filterbank_channel_count, dct_coefficient_count)) + + reference: + https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/core/kernels/mfcc_mel_filterbank.cc + """ + + center_frequencies = _np.zeros((filterbank_channel_count + 1)) + mel_low = _freq_to_mel(lower_frequency_limit) + mel_hi = _freq_to_mel(upper_frequency_limit) + mel_span = mel_hi - mel_low + mel_spacing = mel_span / (filterbank_channel_count + 1) + for i in range(filterbank_channel_count + 1): + center_frequencies[i] = mel_low + (mel_spacing * (i + 1)) + + hz_per_sbin = 0.5 * sample_rate / (spectrogram_N - 1) + start_index = int(1.5 + (lower_frequency_limit / hz_per_sbin)) + end_index = int(upper_frequency_limit / hz_per_sbin) + + band_mapper = _np.zeros((spectrogram_N)) + channel = 0 + for i in range(spectrogram_N): + melf = _freq_to_mel(i * hz_per_sbin) + if (i < start_index) or (i > end_index): + band_mapper[i] = -2 + else: + while channel < filterbank_channel_count and center_frequencies[channel] < melf: + channel += 1 + band_mapper[i] = channel - 1 # Can be == -1 + + weights = _np.zeros((spectrogram_N)) + for i in range(spectrogram_N): + channel = int(band_mapper[i]) + if (i < start_index) or (i > end_index): + weights[i] = 0 + else: + if channel >= 0: + weights[i] = (center_frequencies[channel + 1] - _freq_to_mel(i * hz_per_sbin)) / ( + center_frequencies[channel + 1] - center_frequencies[channel]) + else: + weights[i] = (center_frequencies[0] - _freq_to_mel(i * hz_per_sbin)) / (center_frequencies[0] - mel_low) + + mat_spec_val = _np.zeros((spectrogram_N, filterbank_channel_count)) + mat_weighted = _np.zeros((spectrogram_N, filterbank_channel_count)) + for i in range(start_index, end_index + 1): # For each FFT bin + channel = int(band_mapper[i]) + if channel >= 0: + mat_weighted[i, channel] = 1 # Right side of triangle, downward slope + channel += 1 + if channel < filterbank_channel_count: + mat_weighted[i, channel] = -1 # Left side of triangle + mat_spec_val[i, channel] = 1 # Left side of triangle + + # compute the dct matrix + cosines = _np.zeros((filterbank_channel_count, dct_coefficient_count)) + fnorm = _np.sqrt(2.0 / filterbank_channel_count) + arg = _np.pi / filterbank_channel_count + for i in range(filterbank_channel_count): + for j in range(dct_coefficient_count): + cosines[i, j] = fnorm * _np.cos(j * arg * (i + 0.5)) + + return weights, mat_weighted, mat_spec_val, cosines + + +def _reshape_remaining_dimensions_to_canonical_shape(x, remaining_rank): + # An utility function that reshape a tensor with shape [batch, spatial_dims, remaining_dim_1, ..., remaining_dim_N] + # to [batch, spatial_dims, remaining_dim_1 * ... * remaining_dim_N] + # For the special case where there is no remaining dimensions, we expand the last axis + assert remaining_rank != 1 + if remaining_rank == 0: + return mb.expand_dims(x=x, axes=[-1]) + else: + x_shape = mb.shape(x=x) + batch_and_spatial_shape = mb.slice_by_size(x=x_shape, begin=[0], size=[x.rank-remaining_rank]) + reshape_shape = mb.concat(values=[batch_and_spatial_shape, [-1]], axis=0) + return mb.reshape(x=x, shape=reshape_shape) + + +def _reshape_remaining_dimension_to_original_shape(x, original_shape, remaining_rank): + # An utility function that reshape the tensor with shape [batch_new, spatial_dims_new, remaining_dims] to the original + # form, which is [batch_new, spatial_dims_new, remaining_dim_1, ..., remaining_dim_N] + assert remaining_rank != 1 + if remaining_rank == 0: + return mb.squeeze(x=x, axes=[-1]) + else: + x_shape = mb.shape(x=x) + spatial_rank = original_shape.shape[0] - remaining_rank - 1 + batch_and_spatial_shape = mb.slice_by_size(x=x_shape, begin=[0], size=[1+spatial_rank]) + remaining_shape = mb.slice_by_size(x=original_shape, begin=[1+spatial_rank], size=[-1]) + reshape_shape = mb.concat(values=[batch_and_spatial_shape, remaining_shape], axis=0) + return mb.reshape(x=x, shape=reshape_shape) + + +@register_tf_op(tf_alias=["BiasAdd", "AddV2"]) +def Add(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x, y = promote_input_dtypes([x, y]) + + if "data_format" in node.attr and node.attr["data_format"] == "NCHW": + if x.rank != 1 and y.rank != 1: + raise AssertionError("Bias needs to have its rank equals to 1") + + bias, data = (y, x) if y.rank == 1 else (x, y) + + if not data.rank >= 3: + raise AssertionError("Data needs to be of at least ranke 3") + + axes = [-(i + 1) for i in range(data.rank - 2)] + + x = data + y = mb.expand_dims(x=bias, axes=axes, name=node.name + "_expanded_bias") + + x = mb.add(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def AddN(context, node): + values = [context[name] for name in node.inputs] + if len(values) == 1: + Identity(context, node) + return + prev_var = values[0] + for idx, var in enumerate(values[1:]): + if var == values[-1]: + x = mb.add(x=prev_var, y=var, name=node.name) + else: + prev_var = mb.add(x=prev_var, y=var, name=node.name + "_tmpAddN_" + str(idx)) + context.add(node.name, x) + + +@register_tf_op +def Abs(context, node): + x = context[node.inputs[0]] + x = mb.abs(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Acos(context, node): + x = context[node.inputs[0]] + x = mb.acos(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def All(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.cast(x=x, dtype="int32") + x = mb.reduce_prod(x=x, axes=axes, keep_dims=keep_dims) + x = mb.cast(x=x, dtype="bool", name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Any(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.cast(x=x, dtype="int32") + x = mb.reduce_sum(x=x, axes=axes, keep_dims=keep_dims) + x = mb.cast(x=x, dtype="bool", name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ArgMax(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + x = mb.reduce_argmax(x=x, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ArgMin(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + x = mb.reduce_argmin(x=x, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Asin(context, node): + x = context[node.inputs[0]] + x = mb.asin(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Atan(context, node): + x = context[node.inputs[0]] + x = mb.atan(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Atanh(context, node): + x = context[node.inputs[0]] + x = mb.atanh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def AvgPool(context, node): + x = context[node.inputs[0]] + in_shape = x.sym_type.get_shape() + d_rank = len(in_shape) - 2 + data_format = node.attr.get("data_format", "NHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + name=node.name, + ) + context.add(node.name, x) + + +@register_tf_op +def AvgPool3D(context, node): + x = context[node.inputs[0]] + d_rank = x.rank - 2 + data_format = node.attr.get("data_format", "NDHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NDHWC": + x = _transpose_NDHWC_to_NCDHW(x) + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + ) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + else: + x = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + exclude_padding_from_average=True, + name=node.name, + ) + + context.add(node.name, x) + + +@register_tf_op +def BatchToSpaceND(context, node): + # In tensorflow, the input tensor has the shape of (batch,) + spatial_shape + remaining_shape. + # The shape is treated as a combination of 3 components: + # 1. A single batch dimension + # 2. Spatial dimensions, with a length spatial_rank, which could be neither 1 or 2. Also, spatial_rank + # is equal to the length of block_shape + # 3. Remaining dimensions, with a length remaining_rank + + # The logic of translating this op is as followed: + # 1. We first reshape the input to a canonical shape (rolling the remaining shape dimensions into a + # single dimension): (batch,) + spatial_shape + (R), where R = remaining_dim_1 * ... * remaining_dim_n + # 2. We support rank 1 and rank 2 spatial shape: + # (i) rank 1: We decompose the BatchToSpace into small basic ops. + # (ii) rank 2: We directly use the built in batch_to_space op. + # The output would have shape (batch_new,) + spatial_shape_new + (R) + # 3. We transform the tensor back, by unrolling the remaining shape: (B_new,) + spatial_shape_new + remaining_shape + + x = context[node.inputs[0]] + block_shape = context[node.inputs[1]].val + crops = context[node.inputs[2]].val + original_shape = mb.shape(x=x) + + input_rank = x.rank + spatial_rank = len(block_shape) + remaining_rank = x.rank - 1 - spatial_rank + has_non_unity_remaining_dims = remaining_rank != 1 + + if block_shape is None or crops is None: + raise NotImplementedError( + "Not support dynamic block_shape and crops for BatchToSpaceND!" + ) + + if has_non_unity_remaining_dims: + # Reshape the input tensor to shape [batch, spatial_shape, remaining_dim_1 * ... * remaining_dim_N] + x = _reshape_remaining_dimensions_to_canonical_shape(x, remaining_rank) + + if spatial_rank >= 3: + raise NotImplementedError("Rank of spatial shape > 2 is not supported.") + + if spatial_rank == 2: + # Tensor has shape [B, H, W, C], we can directly use the batch_to_space op by doing + # [B, H, W, C] -> transpose -> [B, C, H, W] -> batch_to_space -> [B_new, C, H_new, W_new] -> + # transpose -> [B_new, H_new, W_new, C] + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.batch_to_space(x=x, block_shape=block_shape, crops=_np.zeros((2, 2), _np.int32), name=node.name) + if tuple(crops[0]) != (0, 0) or tuple(crops[1]) != (0, 0): + x = mb.crop(x=x, crop_height=crops[0], crop_width=crops[1]) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + + if spatial_rank == 1: + # In this case, we decompose space_to_batch into small basic ops + # [B, H, C] -> decomposite ops -> [B_new, H_new, C] + + # reshape input to [block_shape, B/block_shape, H, C] + input_shape = mb.shape(x=x) + block_shape = block_shape[0] + batch_size = _value_at(input_shape, 0) + spatial_size = _value_at(input_shape, 1) + channel_size = _value_at(input_shape, 2) + new_batch_size = mb.cast(x=mb.real_div(x=batch_size, y=block_shape), dtype="int32") + reshape_values = [block_shape, new_batch_size, spatial_size, channel_size] + reshape_shape = mb.concat(values=reshape_values, axis=0) + x = mb.reshape(x=x, shape=reshape_shape, name=node.name) + + # permute the tensor to [B/block_shape, H, block_shape, C] + x = mb.transpose(x=x, perm=[1, 2, 0, 3]) + + # reshape the tensor to [B/block_shape, H*block_shape, C] + new_spatial_size = mb.cast(x=mb.mul(x=spatial_size, y=block_shape), dtype="int32") + reshape_values = [new_batch_size, new_spatial_size, channel_size] + reshape_shape = mb.concat(values=reshape_values, axis=0) + x = mb.reshape(x=x, shape=reshape_shape) + + # crop the tensor to [B/block_shape, H - crops[0][0] - crops[0][1], C] + x = mb.crop(x=x, crop_height=crops[0], crop_width=[0, 0]) + + if has_non_unity_remaining_dims: + # Reshape the tensor from shape [batch_new, spatial_shape_new, remaining_dim_1 * ... * remaining_dim_N] back to + # shape [batch_new, spatial_shape_new, remaining_shape] + x = _reshape_remaining_dimension_to_original_shape(x, original_shape, remaining_rank) + + context.add(node.name, mb.identity(x=x, name=node.name)) + + +@register_tf_op +def Ceil(context, node): + x = context[node.inputs[0]] + x = mb.ceil(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Const(context, node): + if node.value is None: + raise ValueError("Const node '{}' cannot have no value".format(node.name)) + x = mb.const(val=node.value.val, name=node.name) + context.add(node.name, x) + + +def _conv2d3d_strides_or_dilations(name, value, data_format, default_value=1): + """Compute strides or dilation values for 2D and 3D convolutions.""" + if value is None: + value = default_value + if not isinstance(value, (int, list)): + raise ValueError("{} must be an int or list".format(name)) + + # Parse number of spatial dimensions from `data_format`, assuming N (batch) and C + # (input channels) are present + n_dims = len(data_format) - 2 + + if isinstance(value, int): + return [value] * n_dims + + if len(value) == 1: + return value * n_dims + if len(value) == n_dims: + return value + if len(value) != n_dims + 2: + raise ValueError( + "{} must have length 1, {}, or {}".format(name, n_dims, n_dims + 2) + ) + + if data_format == "NHWC": + # Only support stride/dilation along N, C == 1 + if not (value[0] == value[3] == 1): + raise ValueError( + "{} along N and C other than 1 not implemented".format(name) + ) + return value[1:3] + elif data_format == "NCHW" or data_format == "NCDHW": + if not (value[0] == value[1] == 1): + raise ValueError( + "{} along N and C other than 1 not implemented".format(name) + ) + return value[2:] + # "NDHWC" + if not (value[0] == value[4] == 1): + raise ValueError("{} along N and C other than 1 not implemented".format(name)) + return value[1:4] + + +@register_tf_op +def Cos(context, node): + x = context[node.inputs[0]] + x = mb.cos(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cosh(context, node): + x = context[node.inputs[0]] + x = mb.cosh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cross(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + # last dim must be 3; other dims must match + assert x.shape[1:] == y.shape[1:] + assert x.shape[-1] == 3 + x1 = mb.gather(x=x, indices=[1, 2, 0], axis=-1) + x2 = mb.gather(x=x, indices=[2, 0, 1], axis=-1) + y1 = mb.gather(x=y, indices=[1, 2, 0], axis=-1) + y2 = mb.gather(x=y, indices=[2, 0, 1], axis=-1) + z = mb.sub(x=mb.mul(x=x1, y=y2), y=mb.mul(x=x2, y=y1), name=node.name) + context.add(node.name, z) + + +@register_tf_op +def Einsum(context, node): + equation = node.attr["equation"] + a = context[node.inputs[0]] + b = context[node.inputs[1]] + x = build_einsum_mil(a, b, equation, node.name) + context.add(node.name, x) + + +@register_tf_op +def Equal(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ExtractImagePatches(context, node): + x = context[node.inputs[0]] + sizes = node.attr.get("ksizes") + strides = node.attr.get("strides") + rates = node.attr.get("rates") + padding = node.attr.get("padding") + if x.rank != 4: + raise ValueError("input for ExtractImagePatches should be a 4D tensor.") + if not all([rate == 1 for rate in rates]): + raise NotImplementedError( + "only rates with all 1s is implemented for ExtractImagePatches." + ) + if len(sizes) != 4 or sizes[0] != 1 or sizes[3] != 1: + raise ValueError( + "ExtractImagePatches only supports sizes (4D tensor) with 1s for batch and channel dimensions." + ) + if len(sizes) != 4 or strides[0] != 1 or strides[3] != 1: + raise ValueError( + "ExtractImagePatches only supports strides (4D tensor) with 1s for batch and channel dimensions." + ) + if not padding in ["VALID", "SAME"]: + raise ValueError("non-supported padding for ExtractImagePatches.") + h, w = x.shape[1], x.shape[2] + + # padding for SAME mode + if padding == "SAME": + delta_h = h % strides[1] if h % strides[1] != 0 else strides[1] + delta_w = w % strides[2] if w % strides[2] != 0 else strides[2] + last_h = h - delta_h + 1 + last_w = w - delta_w + 1 + pad_h = max(0, last_h + sizes[1] - 1 - h) + pad_w = max(0, last_w + sizes[2] - 1 - w) + pad_h = [pad_h // 2, pad_h // 2 if pad_h % 2 == 0 else pad_h // 2 + 1] + pad_w = [pad_w // 2, pad_w // 2 if pad_w % 2 == 0 else pad_w // 2 + 1] + pad = _np.array([[0, 0], pad_h, pad_w, [0, 0]]).astype(_np.int32) + pad = pad.reshape(-1) + if not all(pad == 0): + x = mb.pad(x=x, pad=pad, mode="constant", constant_val=0.0) + h, w = x.shape[1], x.shape[2] + + # compute boxes + batch = x.shape[0] + boxes = [] + h_index = list(range(0, h - sizes[1] + 1, strides[1])) + w_index = list(range(0, w - sizes[2] + 1, strides[2])) + for hi in h_index: + for wi in w_index: + boxes.append((hi, wi, hi + sizes[1] - 1, wi + sizes[2] - 1)) + + boxes = _np.array(boxes, dtype=_np.float32) + box_indices = _np.arange(batch) + box_indices = _np.tile(box_indices, (len(boxes), 1)) + box_indices = _np.transpose(box_indices) + box_indices = box_indices.reshape(-1, 1) + boxes = _np.tile(boxes, (batch, 1)) + boxes = _np.concatenate([box_indices, boxes], axis=1) + boxes = boxes.reshape(boxes.shape[0], 1, boxes.shape[1], 1, 1) + + # use crop_and_resize + x = _transpose_NHWC_to_NCHW(x) + x = mb.crop_resize( + x=x, + roi=boxes, + target_height=sizes[1], + target_width=sizes[2], + normalized_coordinates=False, + spatial_scale=1.0, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + x = mb.squeeze(x=x, axes=[1]) + x = _transpose_NCHW_to_NHWC(x, node_name=node.name + "_transpose_to_nhwc") + x = mb.reshape(x=x, shape=(batch, len(h_index), len(w_index), -1), name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Exp(context, node): + x = context[node.inputs[0]] + x = mb.exp(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Floor(context, node): + x = context[node.inputs[0]] + x = mb.floor(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def FloorDiv(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.floor_div(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Greater(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.greater(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def GreaterEqual(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.greater_equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Less(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.less(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LessEqual(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.less_equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Log(context, node): + x = context[node.inputs[0]] + x = mb.log(x=x, name=node.name) + context.add(node.name, x) + +@register_tf_op +def Log1p(context, node): + x = context[node.inputs[0]] + x = mb.log(x=x, epsilon=1., name=node.name) + context.add(node.name, x) + +@register_tf_op +def LogicalAnd(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.logical_and(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LogicalNot(context, node): + x = context[node.inputs[0]] + x = mb.logical_not(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LogicalOr(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.logical_or(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LogicalXor(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.logical_xor(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def LRN(context, node): + x = context[node.inputs[0]] + depth_radius = node.attr.get("depth_radius") + size = (depth_radius * 2) + 1 + alpha = node.attr.get("alpha") * size + beta = node.attr.get("beta") + bias = node.attr.get("bias") + x = _transpose_NHWC_to_NCHW(x) + x = mb.local_response_norm(x=x, size=size, alpha=alpha, beta=beta, k=bias) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Maximum(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.maximum(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Minimum(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.minimum(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def FloorMod(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + floor = mb.floor_div(x=x, y=y, name=node.name + "_floor_div") + floor_mutiply = mb.mul(x=floor, y=y, name=node.name + "_multiply") + x = mb.sub(x=x, y=floor_mutiply, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Mul(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.mul(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Neg(context, node): + x = context[node.inputs[0]] + x, y = promote_input_dtypes([x, -1]) + x = mb.mul(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def NotEqual(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.not_equal(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Pow(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.pow(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def DepthwiseConv2dNative(context, node): + # [kH, kW, C_in, multiplier] + W_hwim = context[node.inputs[1]] # m = multiplier + # [kH, kW, 1, C_in * multipler] + shape_hw1o = list(W_hwim.shape[:2]) + [1, W_hwim.shape[2] * W_hwim.shape[3]] + W_hw1o = mb.reshape(x=W_hwim, shape=shape_hw1o) + # [C_in * multipler, 1, kH, kW]. Note that C_in * multiplier = C_out in + # MIL. C_in / groups = 1 in depthwise conv. + W_o1hw = mb.transpose(x=W_hw1o, perm=[3, 2, 0, 1]) + data_format = node.attr.get("data_format", "NHWC") + HW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + HW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + + pad_type = node.attr.get("padding") + if pad_type not in ["VALID", "SAME"]: + raise ValueError("Invalid padding type for tf.nn.depthwise_conv2d") + + pad_type = pad_type.lower() + x = context[node.inputs[0]] + C_in = x.shape[-1] + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NHWC" else node.name + x = mb.conv( + x=x, + weight=W_o1hw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + groups=C_in, + name=conv_name, + ) + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def FakeQuantWithMinMaxVars(context, node): + w = context[node.inputs[0]] + min = context[node.inputs[1]].val + max = context[node.inputs[2]].val + num_bits = node.attr['num_bits'] + narrow_range = node.attr['narrow_range'] + + min, max = _adjust_min_max(min, max, num_bits) + + if narrow_range: + scale = (max-min) / (2 ** (num_bits) - 2) + bias = min - scale + else: + scale = (max-min) / (2 ** (num_bits) - 1) + bias = min + + w = mb.clip(x=w, alpha=min, beta=max) + w = mb.sub(x=w, y=bias) + x = mb.real_div(x=w, y=scale) + x = mb.round(x=x) + x = mb.mul(x=x, y=scale) + x = mb.add(x=x, y=bias, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv2D(context, node): + if "quantize" in node.attr: + quantization_type = "linear" + min = node.attr['quantize_min'] + max = node.attr['quantize_max'] + nbits = node.attr['num_bits'] + narrow_range = node.attr['narrow_range'] + + w = context[node.inputs[1]].sym_val + + min, max = _adjust_min_max(min, max, nbits) + + if narrow_range: + quant_scale = (max - min) / (2 ** (nbits) - 2) + quant_bias = (min-quant_scale) + else: + quant_scale = (max - min) / (2 ** (nbits) - 1) + quant_bias = (min) + + w_clip = _np.clip(w, min, max) + w_round = _np.round((w_clip-quant_bias)/quant_scale) + W_hwio = w_round.astype(_np.uint8) + + if not isinstance(quant_scale, list) and not isinstance(quant_scale, tuple): + quant_bias = [quant_bias] + quant_scale = [quant_scale] + else: + quantization_type = None + nbits = None + quant_scale = None + quant_bias = None + W_hwio = context[node.inputs[1]] + + if quantization_type is not None: + W_oihw = _np.transpose(W_hwio, axes=[3, 2, 0, 1]) + else: + W_oihw = mb.transpose(x=W_hwio, perm=[3, 2, 0, 1]) + + data_format = node.attr.get("data_format", "NHWC") + HW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + HW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + + pad_type = node.attr.get("padding") + pad_type = pad_type.lower() + pad_type = "custom" if pad_type == "explicit" else pad_type + assert pad_type in {"same", "valid", "custom"} + x = context[node.inputs[0]] + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + if pad_type == "custom": + pad_val = node.attr["explicit_paddings"] + pad_val = pad_val[2:-2] + elif data_format == "NCHW" and pad_type == "custom": + pad_val = node.attr["explicit_paddings"] + pad_val = pad_val[4:] + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NHWC" else node.name + + # get the groups from the weighs shape and the input shape + _, in_channel, _, _ = x.shape + _, weight_in_channel, _, _ = W_oihw.shape + if in_channel % weight_in_channel != 0: + raise ValueError("input channel should be divided by the weight channel.") + groups = int(in_channel / weight_in_channel) + + if quantization_type is not None: + x = mb.conv_quantized( + x=x, + weight=W_oihw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + name=conv_name, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + groups=groups, + ) + elif pad_type == "custom": + x = mb.conv( + x=x, + weight=W_oihw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + pad=pad_val, + groups=groups, + name=conv_name, + ) + else: + x = mb.conv( + x=x, + weight=W_oihw, + pad_type=pad_type, + strides=HW_strides, + dilations=HW_dilations, + groups=groups, + name=conv_name, + ) + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv3D(context, node): + W_dhwio = context[node.inputs[1]] + W_oidhw = mb.transpose(x=W_dhwio, perm=[4, 3, 0, 1, 2]) + data_format = node.attr.get("data_format", "NDHWC") + DHW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + DHW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + + pad_type = node.attr.get("padding") + if not isinstance(pad_type, str): + pad_type = "custom" + raise NotImplementedError("Custom padding not implemented for TF") + pad_type = pad_type.lower() + x = context[node.inputs[0]] + if data_format == "NDHWC": + # Convert input to NCDHW + x = _transpose_NDHWC_to_NCDHW(x) + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NDHWC" else node.name + _, in_channel, _, _, _ = x.shape + _, weight_in_channel, _, _, _ = W_oidhw.shape + if in_channel % weight_in_channel != 0: + raise ValueError("input channel should be divided by the weight channel.") + groups = int(in_channel / weight_in_channel) + + x = mb.conv( + x=x, + weight=W_oidhw, + pad_type=pad_type, + strides=DHW_strides, + dilations=DHW_dilations, + groups=groups, + name=conv_name, + ) + if data_format == "NDHWC": + # Convert input back to NDHWC (from NCDHW) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv3DBackpropInputV2(context, node): + # Output shape: [N, D_out, H_out, W_out, C_out] + output_shape = context[node.inputs[0]].val + # Weight shape: [D, H, W, C_out, C_in] + W_dhwoi = context[node.inputs[1]] + W_iodhw = mb.transpose(x=W_dhwoi, perm=[4, 3, 0, 1, 2]) + # Input shape: [N, D_in, H_in, W_in, C_in] + x = context[node.inputs[2]] + + data_format = node.attr.get("data_format", "NDHWC") + DHW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + DHW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + pad_type = node.attr.get("padding", None) + + if pad_type is None: + raise ValueError("Padding type not specified for op: {}".format(node.name)) + + if not isinstance(pad_type, str): + pad_type = "custom" + raise NotImplementedError("Custom padding not implemented for TF") + pad_type = pad_type.lower() + + if data_format == "NDHWC": + # Convert input to NCDHW + x = _transpose_NDHWC_to_NCDHW(x) + if output_shape is not None: + output_shape = [output_shape[0], output_shape[4], + output_shape[1], output_shape[2], output_shape[3]] + + # Only the last op should have the same name as node.name + conv_name = node.name + "_x" if data_format == "NDHWC" else node.name + # Pass output shape provided above + x = mb.conv_transpose( + x=x, + weight=W_iodhw, + pad_type=pad_type, + strides=DHW_strides, + output_shape=output_shape, + dilations=DHW_dilations, + name=conv_name, + ) + if data_format == "NDHWC": + # Convert input back to NDHWC (from NCDHW) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def DepthToSpace(context, node): + x = context[node.inputs[0]] + block_size = node.attr.get("block_size") + data_format = node.attr.get("data_format", "NHWC") + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.depth_to_space(x=x, block_size=block_size) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.depth_to_space(x=x, block_size=block_size, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def EuclideanNorm(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_l2_norm(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + +@register_tf_op +def IdentityN(context, node): + res = [mb.identity(x=context[x]) for x in node.inputs] + context.add(node.name, res) + + +@register_tf_op +def ExpandDims(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + if axis.op.op_type == "const" and (axis.val is not None and axis.val.size == 1): + axis = axis.val[0] if axis.shape == (1,) else axis.val + else: + raise ValueError("Expand Dims: Invalid value for parameter axis") + x = mb.expand_dims(x=x, axes=[axis], name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["FusedBatchNormV2", "FusedBatchNormV3"]) +def FusedBatchNorm(context, node): + # Get attributes + data_format = node.attr.get("data_format", "NHWC") + epsilon = node.attr.get("epsilon", None) + + # Get inputs + x = context[node.inputs[0]] + scale = context[node.inputs[1]] + offset = context[node.inputs[2]] + mean = context[node.inputs[3]] + variance = context[node.inputs[4]] + if data_format == "NHWC": + # TF's FusedBatchNorm is only for 4D inputs + x = _transpose_NHWC_to_NCHW(x) + x = mb.batch_norm( + x=x, mean=mean, variance=variance, gamma=scale, beta=offset, epsilon=epsilon + ) + x = _transpose_NCHW_to_NHWC(x, node.name + ":0") + else: + x = mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=scale, + beta=offset, + epsilon=epsilon, + name=node.name + ":0", + ) + # Inference only batch norm does not have meaningful outputs for + # batch_mean, batch_variance etc. + context.add(node.name, [x, mean, variance]) + + +@register_tf_op +def Fill(context, node): + shape = context[node.inputs[0]] + value = context[node.inputs[1]] + x = mb.fill(shape=shape, value=value, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["ImageProjectiveTransformV3"]) +def ImageProjectiveTransformV2(context, node): + # Data shape format: [batch, height, width, channels] + x = context[node.inputs[0]] + # Transforms shape format: [batch, 8] or [1, 8] matrix, [a0, a1, a2, b0, b1, b2, c0, c1] + transforms = context[node.inputs[1]] + # 1-D Tensor [new_height, new_width] + output_shape = context[node.inputs[2]] + + # For V3, there is an additional fill_value input + if len(node.inputs) == 4: + fill_value = context[node.inputs[3]].val + if fill_value != 0.0: + msg = ("fill_value {} not supported for tf ImageProjectiveTransformV2/V3 op {}. " + "Only fill_value = 0.0 is supported.").format(fill_value, node.name) + raise ValueError(msg) + + interpolation = node.attr.get("interpolation") + if interpolation != "BILINEAR": + msg = ("interpolation {} not supported for tf ImageProjectiveTransformV2/V3 op {}. " + "Only interpolation = BILINEAR is supported.").format(interpolation, node.name) + raise ValueError(msg) + + fill_mode = node.attr.get("fill_mode") + if fill_mode != "CONSTANT": + msg = ("fill_mode {} not supported for tf ImageProjectiveTransformV2/V3 op {}. " + "Only fill_mode = CONSTANT is supported.").format(fill_mode, node.name) + raise ValueError(msg) + + h_out = output_shape.val[0] + w_out = output_shape.val[1] + h_in = x.shape[1] + w_in = x.shape[2] + + # Don't allow non-zero c0 or c1, check for each batch + n_batch = transforms.val.shape[0] + transform_matrix = [] + for batch in range(n_batch): + c0 = transforms.val[batch][6] + c1 = transforms.val[batch][7] + if not (c0 == c1 == 0.0): + raise NotImplementedError( + "'affine' op with 'transforms' contains non-zero " + + "c0 or c1 is not supported, Got: {}".format( + transforms + ) + ) + # In the tensorflow affine transform function, the coordinate is in the original image size range, + # i.e., for the input image, x is in range [0, W_in), and y is in range [0, H_in) + # For the output image, x is in range [0, W_out), and y is in range [0, H_out) + # However, the MIL affine op is in the normalized coordinate, in which x and y are both in range [-1, 1] + # So we need to update the affine transformation matrix. + # We have the following four equations: + # (1) x_original_in = (2 * x_normalized_in + 1) * (W_in - 1) + # (2) y_original_in = (2 * y_normalized_in + 1) * (H_in - 1) + # (3) x_original_out = (2 * x_normalized_out + 1) * (W_out - 1) + # (4) y_original_out = (2 * y_normalized_out + 1) * (H_out - 1) + # The original transforms matrix is in the original coordinate: + # (i) x_original_in = a * x_original_out + b * y_original_out + c + # (ii) y_original_in = d * x_original_out + e * y_original_out + f + # After plugging (1) - (4) into (i) (ii), we could have the new transformation matrix in the normalized coordinate + a, b, c, d, e, f = transforms.val[batch].tolist()[:6] + new_a = a * (w_out - 1) / (w_in - 1) + new_b = b * (h_out - 1) / (w_in - 1) + new_c = (2 * c + a * (w_out - 1) + b * (h_out - 1)) / (w_in - 1) - 1 + new_d = d * (w_out - 1) / (h_in - 1) + new_e = e * (h_out - 1) / (h_in - 1) + new_f = (2 * f + d * (w_out - 1) + e * (h_out - 1)) / (h_in - 1) - 1 + transform_matrix.append([new_a, new_b, new_c, new_d, new_e, new_f]) + + transform_matrix = _np.array(transform_matrix) + + x = _transpose_NHWC_to_NCHW(x) + x = mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=output_shape.val[0], + output_width=output_shape.val[1], + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + name=node.name + "_affine", + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["DivNoNan"]) +def RealDiv(context, node): + x = mb.cast(x=context[node.inputs[0]], dtype="fp32") + y = mb.cast(x=context[node.inputs[1]], dtype="fp32") + x = mb.real_div(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["Addons>Resampler"]) +def Resampler(context, node): + # Data shape format: (Batch, Hin, Win, C) + x = context[node.inputs[0]] + # Warp shape format: (Batch, Hout, Wout, 2) + warp = context[node.inputs[1]] + + # Handle rank-3 warp tensor + is_rank3_warp = warp.rank == 3 + if is_rank3_warp: # expand spatial dimension + warp = mb.expand_dims(x=warp, axes=[1], name=warp.name + "_expand_dims") + + x = _transpose_NHWC_to_NCHW(x) + x = mb.resample( + x=x, + coordinates=warp, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="unnormalized", + align_corners=True, + name=node.name + "_resample", + ) + x = _transpose_NCHW_to_NHWC( + x, node.name + "_transpose" if is_rank3_warp else node.name + ) + if is_rank3_warp: # squeeze spatial dimension + x = mb.squeeze(x=x, axes=[1], name=node.name) + + context.add(node.name, x) + + +@register_tf_op +def Rsqrt(context, node): + x = context[node.inputs[0]] + x = mb.rsqrt(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sub(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.sub(x=x, y=y, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def StopGradient(context, node): + Identity(context, node) + + +@register_tf_op +def Identity(context, node): + x = context[node.inputs[0]] + # In many cases we can skip and just make downstream ops reference the + # pre-identity op. However, when identity is an output or pre-identity + # is a placeholder, an identity op, or mb.mul(x, 1.0) is required. + if len(node.outputs) != 0 or x.op is not None: + context.add(node.name, x, is_new_var=False) + else: + x = mb.mul(x=x, y=1.0, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Print(context, node): + Identity(context, node) + + +@register_tf_op +def Placeholder(context, node): + # no-op as we add Placeholder separately. + pass + + +def _pool_pads_or_strides(tf_spec, data_format, d_rank): + if tf_spec is None: + d_spec = [1] * d_rank + elif not isinstance(tf_spec, list): + d_spec = [tf_spec] * d_rank + elif len(tf_spec) == 2: + d_spec = tf_spec + elif len(tf_spec) == 4: + if data_format == "NHWC": + d_spec = tf_spec[1:3] + else: + d_spec = tf_spec[2:] + elif len(tf_spec) == 5: + if data_format == "NDHWC": + d_spec = tf_spec[1:4] + else: + # NCDHW + d_spec = tf_spec[2:] + else: + raise ValueError("Unsupported tf_spec: %s" % tf_spec) + return d_spec + + +@register_tf_op(tf_alias=["BatchMatMul", "BatchMatMulV2"]) +def MatMul(context, node): + a = context[node.inputs[0]] + b = context[node.inputs[1]] + transpose_a = node.attr.get("adj_x", False) or node.attr.get("transpose_a", False) + transpose_b = node.attr.get("adj_y", False) or node.attr.get("transpose_b", False) + a, b = promote_input_dtypes([a, b]) + x = mb.matmul( + x=a, y=b, transpose_x=transpose_a, transpose_y=transpose_b, name=node.name + ) + context.add(node.name, x) + + +@register_tf_op +def MaxPool(context, node): + x = context[node.inputs[0]] + in_shape = x.sym_type.get_shape() + d_rank = len(in_shape) - 2 + data_format = node.attr.get("data_format", "NHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.max_pool( + x=x, kernel_sizes=kernel_sizes, strides=strides, pad_type=pad_type + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.max_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + name=node.name, + ) + context.add(node.name, x) + + +@register_tf_op +def MaxPool3D(context, node): + x = context[node.inputs[0]] + d_rank = x.rank - 2 + data_format = node.attr.get("data_format", "NDHWC") + ksize = node.attr.get("ksize", None) + kernel_sizes = _pool_pads_or_strides(ksize, data_format, d_rank) + strides = node.attr.get("strides", None) + if strides is not None: + strides = _pool_pads_or_strides(strides, data_format, d_rank) + pad_type = node.attr["padding"].lower() + if data_format == "NDHWC": + x = _transpose_NDHWC_to_NCDHW(x) + x = mb.max_pool( + x=x, kernel_sizes=kernel_sizes, strides=strides, pad_type=pad_type + ) + x = _transpose_NCDHW_to_NDHWC(x, node.name) + else: + x = mb.max_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + name=node.name, + ) + + context.add(node.name, x) + + +@register_tf_op +def MatrixBandPart(context, node): + x = context[node.inputs[0]] + lower = context[node.inputs[1]] + upper = context[node.inputs[2]] + x = mb.band_part(x=x, lower=lower, upper=upper, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Max(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_max(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Min(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_min(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Prod(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_prod(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cast(context, node): + type_map = { + types.fp16: "fp16", + types.float: "fp32", + types.double: "fp32", + types.int32: "int32", + types.int64: "int32", + } + if node.attr["DstT"] not in type_map.keys(): + raise NotImplementedError( + "Cast: Provided destination type {} not " + "supported.".format(types.get_type_info(node.attr["DstT"])) + ) + x = context[node.inputs[0]] + dtype = type_map[node.attr["DstT"]] + x = mb.cast(x=x, dtype=dtype, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Round(context, node): + x = context[node.inputs[0]] + x = mb.round(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sign(context, node): + x = context[node.inputs[0]] + x = mb.sign(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sin(context, node): + x = context[node.inputs[0]] + x = mb.sin(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sinh(context, node): + x = context[node.inputs[0]] + x = mb.sinh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Slice(context, node): + x = context[node.inputs[0]] + begin = context[node.inputs[1]] + size = context[node.inputs[2]] + res = mb.slice_by_size(x=x, begin=begin, size=size, name=node.name) + context.add(node.name, res) + + +@register_tf_op +def Sqrt(context, node): + x = context[node.inputs[0]] + x = mb.sqrt(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Square(context, node): + x = context[node.inputs[0]] + x = mb.mul(x=x, y=x, name=node.name) + context.add(node.name, x) + + +def _softmax_cross_entropy_with_logits(feats, labels, name): + # compute the log softmax + y = mb.reduce_log_sum_exp(x=feats, axes=[-1], keep_dims=True) + log_softmax = mb.sub(x=feats, y=y) + loss = mb.mul(x=labels, y=log_softmax) + loss = mb.mul(x=loss, y=-1.) + loss = mb.reduce_sum(x=loss, axes=[-1], name=name) + return loss + + +@register_tf_op +def SparseSoftmaxCrossEntropyWithLogits(context, node): + feats = context[node.inputs[0]] + labels = context[node.inputs[1]] + class_nums = feats.shape[1] + labels = mb.one_hot( + indices=labels, + one_hot_vector_size=class_nums, + ) + labels = mb.cast(x=labels, dtype="fp32") + loss = _softmax_cross_entropy_with_logits(feats, labels, node.name) + context.add(node.name, loss) + + +@register_tf_op +def SoftmaxCrossEntropyWithLogits(context, node): + feats = context[node.inputs[0]] + labels = context[node.inputs[1]] + loss = _softmax_cross_entropy_with_logits(feats, labels, node.name) + context.add(node.name, loss) + + +@register_tf_op +def StridedSlice(context, node): + x = context[node.inputs[0]] + begin = context[node.inputs[1]] + end = context[node.inputs[2]] + stride = context[node.inputs[3]] + + def bitmask_to_array(bit): + if bit < 0: + arr = _np.binary_repr(bit, width=8)[::-1] + arr = [bool(int(x)) for x in list(arr)] + if node.attr.get("ellipsis_mask", 0) != 0: + # In case of non-zero ellipsis_mask, we compute the output rank to be the + # max rank of all the masks. This doesn't work if we computed a mask of constant + # width 8 here (since the max rank is then taken to be 8 wrongly). + raise ValueError("Cannot figure out slice rank with negative mask values and " \ + "non-zero ellipsis_mask") + else: + # This method prevents unnecessary padding of the bitmask when it is not negative. + # It can be padded with any extra False values later, based on output rank. + arr = [] + while bit > 0: + if bit & 1: + arr.append(True) + else: + arr.append(False) + bit >>= 1 + + return arr + + begin_mask = bitmask_to_array(node.attr.get("begin_mask", 0)) + end_mask = bitmask_to_array(node.attr.get("end_mask", 0)) + squeeze_mask = bitmask_to_array(node.attr.get("shrink_axis_mask", 0)) + ellipsis_mask = bitmask_to_array(node.attr.get("ellipsis_mask", 0)) + new_axis_mask = bitmask_to_array(node.attr.get("new_axis_mask", 0)) + + def _pad_mask( + x, + begin, + end, + stride, + begin_mask, + end_mask, + squeeze_mask, + ellipsis_mask, + new_axis_mask, + ): + # This function pad the masks, stride, begin and end to the same rank as the input tensor. + if begin.rank != 1: + raise ValueError( + "begin should be 1-D tensor, got {}-D tensor instead".format(begin.rank) + ) + if end.rank != 1: + raise ValueError( + "end should be 1-D tensor, got {}-D tensor instead".format(end.rank) + ) + + # check if inputs can be determined + begin_cache = begin + end_cache = end + begin = [] if begin.val is None else begin.val.tolist() + end = [] if end.val is None else end.val.tolist() + stride = [] if stride is None else stride.val.tolist() + + # pad masks function + new_dims = sum(i is True for i in new_axis_mask) + if new_dims > 0: + x_rank = x.rank + new_dims + else: + x_rank = x.rank + + def pad_array(arr, max_rank, idx, default_value): + """ + This function pads the arr to x_rank with default_value. + idx is the index where ellipis_mask = True. + max_rank is the maximum rank of the masks, stride, begin and end. + """ + mask = arr[:] + mask += [default_value] * (x_rank - len(mask)) + new_mask = [] + + for i in range(max_rank): + num = 1 if i != idx else x_rank - max_rank + 1 + new_mask += [mask[i]] * num + return new_mask + + mask_list = [ + begin_mask, + end_mask, + squeeze_mask, + ellipsis_mask, + new_axis_mask, + stride, + begin, + end, + ] + max_rank = max([len(arr) for arr in mask_list]) + + # If ellipsis_mask is given, the last element of it would be True + # Otherwise, we simply pad each mask by appending default value + if ellipsis_mask != []: + rank = max_rank + idx = len(ellipsis_mask) - 1 + else: + rank = x_rank + idx = -1 + + begin_mask = pad_array(begin_mask, rank, idx, False) + end_mask = pad_array(end_mask, rank, idx, False) + squeeze_mask = pad_array(squeeze_mask, rank, idx, False) + ellipsis_mask = pad_array(ellipsis_mask, rank, idx, False) + new_axis_mask = pad_array(new_axis_mask, rank, idx, False) + stride = pad_array(stride, rank, idx, 1) + + # pad begin and end if they are determined during compile time + if begin != []: + begin = pad_array(begin, rank, idx, 0) + if end != []: + end = pad_array(end, rank, idx, 0) + + # make sure begin_mask, end_mask, and stride are consistent with ellipsis mask + # begin_mask and end_mask should be True, and stride should be 1. + for i, mask in enumerate(ellipsis_mask): + if mask: + begin_mask[i] = True + end_mask[i] = True + stride[i] = 1 + + # make sure begin_mask, end_mask, and stride are consistent with new axis mask + # begin_mask and end_mask should be True, and stride should be 1. + for i, mask in enumerate(new_axis_mask): + if mask: + begin_mask[i] = True + end_mask[i] = True + stride[i] = 1 + + # convert begin and end back to cache value if they are run-time determined + if begin == []: + begin = begin_cache + + if end == []: + end = end_cache + + # check which mask is adding by our default value + # This happens when the given index is less than the tensor rank, + # for instance, indexing a 3D tensor A with A[:1, :1] is equivalent to + # A[:1, :1, :]. In this case we should append True to begin_mask and end_mask + if ellipsis_mask == [False] * x_rank: + for i in range(max_rank, x_rank): + begin_mask[i] = True + end_mask[i] = True + + return begin, end, stride, begin_mask, end_mask, squeeze_mask, new_axis_mask + + begin, end, stride, begin_mask, end_mask, squeeze_mask, new_axis_mask = _pad_mask( + x, + begin, + end, + stride, + begin_mask, + end_mask, + squeeze_mask, + ellipsis_mask, + new_axis_mask, + ) + + if sum(i is True for i in new_axis_mask) > 0: + axes = [i for i, val in enumerate(new_axis_mask) if val is True] + x = mb.expand_dims(x=x, axes=axes, name=node.name + "_new_axes") + + x = mb.slice_by_index( + x=x, + name=node.name, + begin=begin, + end=end, + stride=stride, + begin_mask=begin_mask, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + ) + + context.add(node.name, x) + + +@register_tf_op +def Sum(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + input_type = x.sym_type + if _is_scalar(input_type): + context.add(node.name, x, is_new_var=False) + else: + x = mb.reduce_sum(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Tan(context, node): + x = context[node.inputs[0]] + x = mb.tan(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def get_tuple(context, node): + x = context[node.inputs[0]] + if not isinstance(x, (list, tuple)): + # In some rare cases, the upstream op produces a single output + x = [x] + idx = node.attr["index"] + if idx >= len(x): + msg = "Index {} out of range, op '{}' only has {} outputs: {}" + raise IndexError(msg.format(idx, node.inputs[0], len(x), [v.name for v in x])) + context.add(node.name, x[idx], is_new_var=False) + + +@register_tf_op +def Mean(context, node): + x = context[node.inputs[0]] + axes = _check_axes_type(context[node.inputs[1]]) + keep_dims = node.attr.get("keep_dims", False) + x = mb.reduce_mean(x=x, axes=axes, keep_dims=keep_dims, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def MatrixDiag(context, node): + x = context[node.inputs[0]] + if x.rank != 1: + raise NotImplementedError('Only support MatrixDiag op with input rank = 1.') + length = mb.shape(x=x) + x = mb.expand_dims(x=x, axes=[0]) + reps = mb.concat(values=[length, [1]], axis=0) + x = mb.tile(x=x, reps=reps) + x = mb.band_part(x=x, lower=0, upper=0, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def MirrorPad(context, node): + x = context[node.inputs[0]] + pad = context[node.inputs[1]] + constant_val = node.attr.get("constant_val", 0.0) + + if pad is None: + raise ValueError("TF `paddings` in Pad op must be const.") + + mode = node.attr.get("mode", "reflect").lower() + if mode == "symmetric": + mode = "reflect" + in_rank = len(x.sym_type.get_shape()) + + if in_rank > 5 or in_rank < 2: + raise ValueError( + "Unsupported Pad configuration with input rank {}!".format(str(in_rank)) + ) + + if pad.val.shape != (in_rank, 2): + raise ValueError("Padding must have length as input tensor rank.") + + pad = pad.val + + # get axix which is non zero + non_zero_axis = [] + for i in range(len(pad)): + if not all(pad[i] == 0): + non_zero_axis.append(i) + + if len(non_zero_axis) > 2: + raise ValueError("Unsupported configuration for Pad layer!") + + # make padding a 2 x 2 tensor if len(non_zero_axis) < 2 + if len(non_zero_axis) == 0: + non_zero_axis = [0, 1] + + if len(non_zero_axis) == 1: + if non_zero_axis[0] != len(pad) - 1: + non_zero_axis.append(len(pad) - 1) + else: + non_zero_axis = [0, non_zero_axis[0]] + + # transpose the input such that the padding dim is the last two + perm = [i for i in range(in_rank) if i not in non_zero_axis] + non_zero_axis + x = mb.transpose(x=x, perm=perm, name=node.name + "_transpose_1") + pad = pad[non_zero_axis, :] + pad = pad.reshape(-1) + x = mb.pad( + x=x, pad=pad, name=node.name + "_pad", constant_val=constant_val, mode=mode + ) + inverse_perm = [-1] * len(perm) + for i, index in enumerate(perm): + inverse_perm[index] = i + x = mb.transpose(x=x, perm=inverse_perm, name=node.name) + + context.add(node.name, x) + + +@register_tf_op +def Pad(context, node): + x = context[node.inputs[0]] + pad = context[node.inputs[1]] + input_dtype = x.dtype + + mode = node.attr.get("mode", "constant").lower() + if mode == "symmetric": + mode = "reflect" + constant_val = node.attr.get("constant_val", 0.0) + constant_val = mb.const(val=constant_val) + in_rank = len(x.sym_type.get_shape()) + + if in_rank > 5: + raise ValueError("Unsupported Pad configuration!") + + if pad.val is None: + pad = mb.reshape(x=pad, shape=[-1]) + else: + pad = pad.val.reshape(-1) + + x = mb.cast(x=x, dtype=builtin_to_string(constant_val.dtype)) + x = mb.pad(x=x, pad=pad, mode=mode, constant_val=constant_val) + x = mb.cast(x=x, dtype=builtin_to_string(input_dtype), name=node.name) + + context.add(node.name, x) + + +@register_tf_op +def PadV2(context, node): + # compared to tf.raw_ops.Pad, tf.raw_ops.PadV2 allow constant values rather than 0. + x = context[node.inputs[0]] + pad = context[node.inputs[1]] + constant_val = context[node.inputs[2]] + + if constant_val.shape != (): + raise NotImplementedError( + "TF `constant_values` in PadV2 op must be const scalar." + ) + in_rank = x.rank + if in_rank > 5: + raise ValueError("Unsupported Pad configuration!") + + if pad.val is None: + pad = mb.reshape(x=pad, shape=[-1]) + else: + pad = pad.val.reshape(-1) + + constant_val = constant_val.val + if constant_val == -_np.inf: + INT_MIN = -_np.iinfo(_np.int64).max - 1 + constant_val = float(INT_MIN) + + if constant_val == _np.inf: + INT_MAX = _np.iinfo(_np.int64).max + constant_val = float(INT_MAX) + + x = mb.pad(x=x, pad=pad, name=node.name, mode="constant", constant_val=constant_val) + context.add(node.name, x) + + +@register_tf_op +def Relu(context, node): + x = context[node.inputs[0]] + x = mb.relu(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Reciprocal(context, node): + x = context[node.inputs[0]] + x = mb.inverse(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Relu6(context, node): + x = context[node.inputs[0]] + x = mb.relu6(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Reshape(context, node): + x = context[node.inputs[0]] + new_shape = context[node.inputs[1]] + x = mb.reshape(x=x, shape=new_shape, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["ReverseV2"]) +def Reverse(context, node): + x = context[node.inputs[0]] + axes = context[node.inputs[1]] + x = mb.reverse(x=x, axes=axes, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ReverseSequence(context, node): + x = context[node.inputs[0]] + lengths = context[node.inputs[1]] + seq_axis = node.attr.get("seq_dim") + batch_axis = node.attr.get("batch_dim") + x = mb.reverse_sequence( + x=x, lengths=lengths, seq_axis=seq_axis, batch_axis=batch_axis, name=node.name + ) + context.add(node.name, x) + + +@register_tf_op +def Transpose(context, node): + x = context[node.inputs[0]] + perm = context[node.inputs[1]] + x = mb.transpose(x=x, perm=perm, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Squeeze(context, node): + x = context[node.inputs[0]] + axes = node.attr.get("squeeze_dims", []) + if axes == []: + axes = None + x = mb.squeeze(x=x, axes=axes, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Multinomial(context, node): + x = context[node.inputs[0]] + size = context[node.inputs[1]] + x = mb.random_categorical(x=x, size=size, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["Elu"]) +def ELU(context, node): + x = context[node.inputs[0]] + x = mb.elu(x=x, alpha=1.0, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["Erf"]) +def ERF(context, node): + x = context[node.inputs[0]] + x = mb.erf(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["LeakyRelu"]) +def LeakyReLU(context, node): + x = context[node.inputs[0]] + alpha = node.attr["alpha"] + x = mb.leaky_relu(x=x, alpha=alpha, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Selu(context, node): + x = context[node.inputs[0]] + x = mb.elu(x=x, alpha=1.6732632423543772) + x = mb.mul(x=x, y=1.0507009873554805, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["SelectV2"]) +def Select(context, node): + cond = context[node.inputs[0]] + a = context[node.inputs[1]] + b = context[node.inputs[2]] + + # broadcast vector type cond + rank_cond = cond.rank + rank_a = a.rank + if rank_cond == 1 and rank_a > 1: + axes = [-i - 1 for i in range(rank_a - rank_cond)] + cond = mb.expand_dims(x=cond, axes=axes) + + if not types.is_bool(cond.dtype): + # cond must be bool type + cond = mb.cast(x=cond, dtype="bool") + + x = mb.select(cond=cond, a=a, b=b, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Sigmoid(context, node): + x = context[node.inputs[0]] + x = mb.sigmoid(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Softplus(context, node): + x = context[node.inputs[0]] + x = mb.softplus(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Softsign(context, node): + x = context[node.inputs[0]] + x = mb.softsign(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Softmax(context, node): + logit = context[node.inputs[0]] + axis = node.attr.get("axis") + x = mb.softmax(x=logit, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def SpaceToBatchND(context, node): + # In tensorflow, the input tensor has the shape of (batch,) + spatial_shape + remaining_shape. + # The shape is treated as a combination of 3 components: + # 1. A single batch dimension + # 2. Spatial dimensions, with a length spatial_rank, which could be neither 1 or 2. Also, spatial_rank + # is equal to the length of block_shape + # 3. Remaining dimensions, with a length remaining_rank + + # The logic of translating this op is as followed: + # 1. We first reshape the input to a canonical shape (rolling the remaining shape dimensions into a + # single dimension): (batch,) + spatial_shape + (R), where R = remaining_dim_1 * ... * remaining_dim_n + # 2. We support rank 1 and rank 2 spatial shape: + # (i) rank 1: We decompose the SpaceToBatch into small basic ops. + # (ii) rank 2: We directly use the built in space_to_batch op. + # The output would have shape (batch_new,) + spatial_shape_new + (R) + # 3. We transform the tensor back, by unrolling the remaining shape: (B_new,) + spatial_shape_new + remaining_shape + + x = context[node.inputs[0]] + block_shape = context[node.inputs[1]].val + paddings = context[node.inputs[2]].val + original_shape = mb.shape(x=x) + + input_rank = x.rank + spatial_rank = len(block_shape) + remaining_rank = x.rank - 1 - spatial_rank + has_non_unity_remaining_dims = remaining_rank != 1 + + if block_shape is None or paddings is None: + raise NotImplementedError( + "Not support dynamic block_shape and paddings for SpaceToBatchND!" + ) + + if has_non_unity_remaining_dims: + # Reshape the input tensor to shape [batch, spatial_shape, remaining_dim_1 * ... * remaining_dim_N] + x = _reshape_remaining_dimensions_to_canonical_shape(x, remaining_rank) + + if spatial_rank >= 3: + raise NotImplementedError("Rank of spatial shape > 2 is not supported.") + + if spatial_rank == 2: + # Tensor has shape [B, H, W, C], we can directly use the space_to_batch op by doing + # [B, H, W, C] -> transpose -> [B, C, H, W] -> space_to_batch -> [B_new, C, H_new, W_new] -> + # transpose -> [B_new, H_new, W_new, C] + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + if tuple(paddings[0]) != (0, 0) or tuple(paddings[1]) != (0, 0): + x = mb.pad(x=x, pad=paddings.flatten(), mode="constant") + x = mb.space_to_batch(x=x, block_shape=block_shape, paddings=_np.zeros((2, 2), _np.int32)) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + + if spatial_rank == 1: + # In this case, we decompose space_to_batch into small basic ops + # [B, H, C] -> decomposite ops -> [B_new, H_new, C] + + # expand padding to shape [3, 2] + new_paddings = _np.zeros(shape=(3, 2), dtype=_np.int32) + new_paddings[1] = paddings + paddings = new_paddings + needs_paddings = any(paddings.flatten()) + if needs_paddings: + padded = mb.pad(x=x, pad=paddings.flatten(), mode="constant") + else: + padded = x + + # padded_shape = [B, H_padded, C] + padded_shape = mb.shape(x=padded) + + # reshape to [B, H_padded/block_shape, block_shape, C] + block_shape = block_shape[0] + batch_size = _value_at(padded_shape, 0) + spatial_dim = mb.real_div(x=_value_at(padded_shape, 1), y=block_shape) + spatial_dim = mb.cast(x=spatial_dim, dtype="int32") + remain_dim = _value_at(padded_shape, 2) + reshape_shape = mb.concat(values=[batch_size, spatial_dim, block_shape, remain_dim], axis=0) + reshaped_padded = mb.reshape(x=padded, shape=reshape_shape) + + # permute the shape to: [block_shape, B, H_padded/block_shape, C] + permuted_reshaped_padded = mb.transpose(x=reshaped_padded, perm=[2, 0, 1, 3]) + + # reshape the tensor to [block_shape * B, H_padded/block_shape, C] + final_reshape_values = [mb.mul(x=batch_size, y=block_shape), spatial_dim, remain_dim] + final_shape = mb.concat(values=final_reshape_values, axis=0) + x = mb.reshape(x=permuted_reshaped_padded, shape=final_shape) + + if has_non_unity_remaining_dims: + # Reshape the tensor from shape [batch_new, spatial_shape_new, remaining_dim_1 * ... * remaining_dim_N] back to + # shape [batch_new, spatial_shape_new, remaining_shape] + x = _reshape_remaining_dimension_to_original_shape(x, original_shape, remaining_rank) + + context.add(node.name, mb.identity(x=x, name=node.name)) + + +@register_tf_op +def SpaceToDepth(context, node): + x = context[node.inputs[0]] + block_size = node.attr.get("block_size") + data_format = node.attr.get("data_format", "NHWC") + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + x = mb.space_to_depth(x=x, block_size=block_size) + x = _transpose_NCHW_to_NHWC(x, node.name) + else: + x = mb.space_to_depth(x=x, block_size=block_size, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Tanh(context, node): + x = context[node.inputs[0]] + x = mb.tanh(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op(tf_alias=["TopKV2"]) +def TopK(context, node): + x = context[node.inputs[0]] + k = context[node.inputs[1]].val + sort = node.attr["sorted"] + + kwargs = { + "x": x, + "k": k, + "axis": -1, + "name": node.name + } + + if is_current_opset_version_compatible_with(target.iOS16): + kwargs["sort"] = sort + elif not sort: + raise ValueError("For opset <= iOS16, only sorted=True supported for the topk") + + context.add(node.name, mb.topk(**kwargs)) + +@register_tf_op(tf_alias=["InTopKV2"]) +def InTopK(context, node): + x = context[node.inputs[0]] + target = context[node.inputs[1]] + k = context[node.inputs[2]].val + + _, class_num = x.shape + if not is_symbolic(class_num): + k = min(k, class_num) + + _, indices = mb.topk(x=x, k=k, axis=-1) + target = mb.expand_dims(x=target, axes=[-1]) + x = mb.equal(x=target, y=indices) + x = mb.cast(x=x, dtype="fp32") + x = mb.reduce_sum(x=x, axes=[-1], keep_dims=False) + x = mb.cast(x=x, dtype="bool", name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Cumsum(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + exclusive = node.attr.get("exclusive", False) + reverse = node.attr.get("reverse", False) + x = mb.cumsum(x=x, axis=axis, exclusive=exclusive, reverse=reverse, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Gather(context, node): + x = context[node.inputs[0]] + indices = context[node.inputs[1]] + axis = 0 + x = mb.gather(x=x, indices=indices, axis=axis, name=node.name) + context.add(node.name, x) + +def _perform_gather_with_batch_dims(x, indices, batch_dims, gather_func, func_args, name): + """ + An utility function to compute gather and gather_nd with batch_dims + """ + # (Step 1) + # Reshape x, indices with shape + # x: [batch_1, ..., batch_n, *remaining_x_shape] + # indices: [batch_1, ..., batch_n, *remaing_indices_shape] + # into shape + # x_reshape: [prod(batch_1, ..., batch_n), *remaning_x_shape] + # indices_reshape: [prod(batch_1, ..., batch_n), *remaning_indices_shape] + msg = ("The implementation of gather/gather_nd for iOS15 and older is not efficient. Highly recommend " + " set minimum_deployment_target=coremltools.target.iOS16 in the coremltools.convert() function." + ) + logger.warning(msg) + x_shape = mb.shape(x=x) + indices_shape = mb.shape(x=indices) + batch_shape = mb.gather(x=x_shape, indices=_np.array(range(batch_dims)), axis=0) + batch_prod = mb.reduce_prod(x=batch_shape, axes=[0], keep_dims=True) + x_remaining_shape = mb.gather(x=x_shape, indices=_np.array(range(batch_dims, x.rank)), axis=0) + indices_remaining_shape = mb.gather(x=indices_shape, indices=_np.array(range(batch_dims, indices.rank)), axis=0) + new_x_shape = mb.concat(values=[batch_prod, x_remaining_shape], axis=0) + new_indices_shape = mb.concat(values=[batch_prod, indices_remaining_shape], axis=0) + x_reshape = mb.reshape(x=x, shape=new_x_shape) + indices_reshape = mb.reshape(x=indices, shape=new_indices_shape) + + # (Step 2) + # We iterate through the batch dimension, and compute the gather individually for each batch + # All results are stacked into a tensor with shape [prod(batch_1, ..., batch_n), *remaning_result_shape] + res = [] + if batch_prod.val is None: + raise ValueError("batch dimenstion must be known at compile time") + for i in range(batch_prod.val[0]): + temp_x = mb.gather(x=x_reshape, indices=[i], axis=0) + temp_indices = mb.gather(x=indices_reshape, indices=[i], axis=0) + temp_x = mb.squeeze(x=temp_x, axes=[0]) + temp_indices = mb.squeeze(x=temp_indices, axes=[0]) + func_args.update({"x": temp_x, "indices": temp_indices}) + temp = gather_func(**func_args) + res.append(temp) + res = mb.stack(values=res, axis=0) + + # (Step 3) + # Finally, we reshape the result to shape [batch_1, ..., batch_n, *remaining_result_shape] + res_shape = mb.shape(x=res) + res_remaning_shape = mb.gather(x=res_shape, indices=_np.array(range(1, res_shape.shape[0])), axis=0) + res_new_shape = mb.concat(values=[batch_shape, res_remaning_shape], axis=0) + return mb.reshape(x=res, shape=res_new_shape, name=name) + + +@register_tf_op +def GatherV2(context, node): + x = context[node.inputs[0]] + indices = context[node.inputs[1]] + axis = context[node.inputs[2]].val + batch_dims = node.attr.get("batch_dims", 0) + if is_current_opset_version_compatible_with(target.iOS16): + # For iOS16 and above, we can directly use the batch_dims argument + x = mb.gather(x=x, indices=indices, axis=axis, batch_dims=batch_dims, name=node.name) + else: + # For iOS15 or below, we have to manually compute it + if batch_dims == 0: + x = mb.gather(x=x, indices=indices, axis=axis, name=node.name) + else: + func_args = {"axis": axis - batch_dims} + x = _perform_gather_with_batch_dims(x, indices, batch_dims, mb.gather, func_args, node.name) + + context.add(node.name, x) + + +@register_tf_op +def GatherNd(context, node): + x = context[node.inputs[0]] + indices = context[node.inputs[1]] + batch_dims = node.attr.get("batch_dims", 0) + if is_current_opset_version_compatible_with(target.iOS16): + # For iOS16 and above, we can directly use the batch_dims argument + x = mb.gather_nd(x=x, indices=indices, batch_dims=batch_dims, name=node.name) + else: + if batch_dims == 0: + x = mb.gather_nd(x=x, indices=indices, name=node.name) + else: + x = _perform_gather_with_batch_dims(x, indices, batch_dims, mb.gather_nd, {}, node.name) + + context.add(node.name, x) + + +@register_tf_op +def Tile(context, node): + x = context[node.inputs[0]] + reps = context[node.inputs[1]] + x = mb.tile(x=x, reps=reps, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Where(context, node): + if len(node.inputs) > 1: + raise NotImplementedError('tf.where with x,y will be supported by ' + 'MIL::select in the future') + x = context[node.inputs[0]] + x = mb.non_zero(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def SquaredDifference(context, node): + x = context[node.inputs[0]] + y = context[node.inputs[1]] + x = mb.sub(x=x, y=y, name=node.name + '_sub') + x = mb.square(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Conv2DBackpropInput(context, node): + # Output shape: [N, H_out, W_out, C_out] + output_shape = context[node.inputs[0]].val + # Weight shape: [H, W, C_out, C_in] + W_hwoi = context[node.inputs[1]] + W_iohw = mb.transpose(x=W_hwoi, perm=[3, 2, 0, 1]) + # Input shape: [N, H_in, W_in, C_in] + x = context[node.inputs[2]] + + data_format = node.attr.get("data_format", "NHWC") + HW_dilations = _conv2d3d_strides_or_dilations( + "dilations", node.attr.get("dilations"), data_format + ) + HW_strides = _conv2d3d_strides_or_dilations( + "strides", node.attr.get("strides"), data_format + ) + pad_type = node.attr.get("padding") + + if not isinstance(pad_type, str): + pad_type = "custom" + raise NotImplementedError("Custom padding not implemented for TF") + + pad_type = pad_type.lower() + # CoreML expects input to be in NCHW format + # Transpose input to NCHW format + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + if output_shape is not None: + output_shape = [output_shape[0], output_shape[3], + output_shape[1], output_shape[2]] + + # Only the last op should have the same name as node.name + conv_name = node.name + "x" if data_format == "NHWC" else node.name + # Pass output shape provided above + x = mb.conv_transpose( + x=x, + weight=W_iohw, + pad_type=pad_type, + output_shape=output_shape, + strides=HW_strides, + dilations=HW_dilations, + name=conv_name, + ) + + # Convert NCHW output back to NHWC format + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def Range(context, node): + start = context[node.inputs[0]] + end = context[node.inputs[1]] + step = context[node.inputs[2]] + x = mb.range_1d(start=start, end=end, step=step, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def RandomUniform(context, node): + shape = context[node.inputs[0]] + seed = node.attr["seed"] + x = mb.random_uniform(shape=shape, seed=seed, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def RandomStandardNormal(context, node): + shape = context[node.inputs[0]] + seed = node.attr["seed"] + x = mb.random_normal(shape=shape, seed=seed, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def OneHot(context, node): + indices = context[node.inputs[0]] + depth = context[node.inputs[1]] + on_value = context[node.inputs[2]] + off_value = context[node.inputs[3]] + axis = node.attr.get("axis", -1) + x = mb.one_hot( + indices=indices, + one_hot_vector_size=depth, + axis=axis, + on_value=on_value, + off_value=off_value, + name=node.name, + ) + context.add(node.name, x) + + +def _get_non_maximum_supression(context, node, iou_threshold_override=None, score_threshold_override=None): + """ + The helper function returns the outputs from mb.non_maximum_suppression, + along with the number of boxes and the maximum number of boxes. + """ + boxes = context[node.inputs[0]] + scores = context[node.inputs[1]] + max_boxes = context[node.inputs[2]] + iou_threshold = iou_threshold_override or context[node.inputs[3]] + score_threshold = score_threshold_override or context[node.inputs[4]] + + # The boxes' coordinates in Tensorflow is (y1, x1, y2, x2) where (y1, x1) and (y2, x2) are the + # coordinates of diagonal pair of box corners. However, MIL NMS expects CENTER_SIZE_WIDTH_FIRST + # format, which is (x, y, width, height) where (x, y) is the center coordinate. + y1, x1, y2, x2 = mb.split(x=boxes, num_splits=4, axis=-1) + # As the input coordinates could be any diagonal pair of box corners, it's not guaranteed that + # x2 > x1 nor y2 > y1. So we need to use abs to get width/height, and (x1+x2)/2 to get center. + width = mb.abs(x=mb.sub(x=x2, y=x1)) + height = mb.abs(x=mb.sub(x=y2, y=y1)) + center_x = mb.real_div(x=mb.add(x=x1, y=x2), y=2.0) + center_y = mb.real_div(x=mb.add(x=y1, y=y2), y=2.0) + boxes = mb.concat(values=[center_x, center_y, width, height], axis=-1) + + if score_threshold.val == float("-inf"): + # TensorFlow's default value for score_threshold, Core ML does not + # have float('-inf') support, converted to minimum float32 instead + score_threshold = -3.4e38 + + boxes = mb.expand_dims(x=boxes, axes=[0]) + scores = mb.expand_dims(x=scores, axes=[0, -1]) + coordinates, scores, indices, valid_outputs = mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + max_boxes=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + + # The results from MIL NMS op are padded to max_boxes. We need to extract the valid part for TF. + # Notice that the batch dim and class num dim also need to be squeezed. + valid_outputs = mb.squeeze(x=valid_outputs, axes=[0]) + range = mb.range_1d(end=valid_outputs, start=0, step=1) + coordinates = mb.squeeze(x=coordinates, axes=[0]) + valid_coordinates = mb.gather(x=coordinates, indices=range, axis=0) + scores = mb.squeeze(x=scores, axes=[0, -1]) + valid_scores = mb.gather(x=scores, indices=range, axis=0) + indices = mb.squeeze(x=indices, axes=[0]) + valid_indices = mb.cast( + x=mb.gather(x=mb.cast(x=indices, dtype="fp32"), indices=range, axis=0), + dtype="int32", + name=node.name, + ) + + return valid_coordinates, valid_scores, valid_indices, valid_outputs + + +@register_tf_op(tf_alias=["NonMaxSuppressionV3"]) +def NonMaxSuppression(context, node): + _, _, valid_indices, valid_outputs = _get_non_maximum_supression(context, node) + context.add(node.name, valid_indices) + + +@register_tf_op +def NonMaxSuppressionV5(context, node): + """ + Different from NonMaxSuppression/NonMaxSuppressionV3, which only returns the indices of the selected boxes, + NonMaxSuppressionV5 returns all indices, scores and number of the selected boxes. + """ + soft_nms_sigma = context[node.inputs[5]].val + iou_threshold_override = None + score_threshold_override = None + if soft_nms_sigma != 0: + # fallback to "hard" NMS with sensible defaults + iou_threshold_override = types.fp32(0.5) + score_threshold_override = types.fp32(float("-inf")) + logger.warning("NonMaxSuppressionV5 with soft_nms_sigma != 0 not supported. " + "Setting soft_nms_sigma to zero.") + + _, valid_scores, valid_indices, valid_outputs = _get_non_maximum_supression( + context, node, iou_threshold_override=iou_threshold_override, score_threshold_override=score_threshold_override + ) + res = [valid_indices, valid_scores, valid_outputs] + context.add(node.name, res) + + +@register_tf_op +def Shape(context, node): + x = context[node.inputs[0]] + if types.is_complex(x.dtype): + x = mb.complex_shape(x=x, name=node.name) + else: + x = mb.shape(x=x, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ResizeNearestNeighbor(context, node): + # "ResizeNearestNeighbor" op in TF is always in the channel last mode + # instead of upsample factor, it uses output size, which is the second input + x = context[node.inputs[0]] + + input_shape = x.shape # (N,Hin,Win,C) + if len(input_shape) != 4: + raise ValueError('"ResizeNearestNeighbor" op: input rank is not 4') + + if len(context[node.inputs[1]].shape) != 1: + raise ValueError('"ResizeNearestNeighbor" op: the second input, must have rank 1') + + if context[node.inputs[1]].shape[0] != 2: + raise ValueError( + '"ResizeNearestNeighbor" op: the second input, which is the output size, must have 2 elements' + ) + Hout, Wout = None, None + if context[node.inputs[1]].val is None: + # for the dynamic input shape case, + # context[node.inputs[1]] is a mul(x=input_shape, y=scaling_factor) op. + scaling_factor_h = context[node.inputs[1]].op.y.val[0] + scaling_factor_w = context[node.inputs[1]].op.y.val[1] + else: + Hin, Win = input_shape[1], input_shape[2] + Hout, Wout = context[node.inputs[1]].val + scaling_factor_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scaling_factor_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + + if scaling_factor_h < 1 and scaling_factor_w < 1: + ResizeBilinear(context, node) + return + + # first transpose to from channel last to channel first format for coreml + x = _transpose_NHWC_to_NCHW(x) + + align_corners = node.attr.get("align_corners", False) + half_pixel_centers = node.attr.get("half_pixel_centers", False) + + # add either the resize or the upsample layer + if align_corners is False and half_pixel_centers is False: + x = mb.upsample_nearest_neighbor( + x=x, + scale_factor_height=scaling_factor_h, + scale_factor_width=scaling_factor_w, + name=node.name + "_channel_first_upsample", + ) + elif align_corners is False and half_pixel_centers is True: + # if output size can be determined at compile time, + # we call the core op resize_nearest_neighbor, + # otherwise we use upsample_nearest_neighbor for approximation. + # rdar://75204549 (resize_nearest_neighbor need to support dynamic input shape) + if Hout is not None and Wout is not None: + x = mb.resize_nearest_neighbor( + x=x, + target_size_height=Hout, + target_size_width=Wout, + name=node.name + "_channel_first_resize", + ) + else: + logger.warning('Using upsample_nearest_neighbor to approximate resize_nearest_neighbor.') + x = mb.upsample_nearest_neighbor( + x=x, + scale_factor_height=scaling_factor_h, + scale_factor_width=scaling_factor_w, + name=node.name + "_channel_first_upsample", + ) + + else: + raise NotImplementedError( + "ResizeNearestNeighbor op with align_corners={}and half_pixel_centers={} not supported".format( + align_corners, half_pixel_centers + ) + ) + + # transpose again + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def ResizeBilinear(context, node): + # "ResizeBilinear" op in TF is always in the channel last mode + # second input is the output size + + x = context[node.inputs[0]] + input_shape = x.shape # (N,Hin,Win,C) + if len(input_shape) != 4: + raise ValueError('"ResizeBilinear" op: input rank is not 4') + + if len(context[node.inputs[1]].shape) != 1: + raise ValueError('"ResizeBilinear" op: the second input, must have rank 1') + + if context[node.inputs[1]].shape[0] != 2: + raise ValueError( + '"ResizeBilinear" op: the second input, which is the output size, must have 2 elements' + ) + + align_corners = node.attr.get("align_corners", False) + half_pixel_centers = node.attr.get("half_pixel_centers", False) + + if align_corners and half_pixel_centers: + # we should not come here since TF does not support align_corners=True and half_pixel_centers=True + raise ValueError( + '"ResizeBilinear" op: "align_corners" and "half_pixel_centers" are both True and this mode is not supported' + ) + + # In iOS16, we can support dynamic shape + any combination of aligh_corners and half_pixel_centers, + # if the output_shape comes from a pattern of input_shape * (h_scale, w_scale) + if is_current_opset_version_compatible_with(target.iOS16) and context[node.inputs[1]].val is None: + output_shape = context[node.inputs[1]] + if output_shape.op.op_type == "mul": + scale_factor_height = context[node.inputs[1]].op.y.val[0] + scale_factor_width = context[node.inputs[1]].op.y.val[1] + x = _transpose_NHWC_to_NCHW(x) + x = mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor_height, + scale_factor_width=scale_factor_width, + align_corners=align_corners, + half_pixel_centers=half_pixel_centers, + ) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + return + + if (align_corners and not half_pixel_centers) or \ + (not align_corners and not half_pixel_centers): + # output shape needed to be known at compile time + if context[node.inputs[1]].val is None: + raise ValueError( + '"ResizeBilinear" op: the second input, which is the output size, must be known statically' + ) + + Hout, Wout = context[node.inputs[1]].val + + if not (isinstance(Hout, (_np.int32, _np.int64)) and isinstance(Wout, (_np.int32, _np.int64))): + raise ValueError( + '"ResizeBilinear" op: the second input, which is the output size, must have elements of type int32 or int64' + ) + + # first transpose to from channel last to channel first format for coreml + x = _transpose_NHWC_to_NCHW(x) + + # add either the resize_bilinear layer or the upsample layer + + # [align_corners = True, half_pixel_centers = False] + if align_corners and not half_pixel_centers: + x = mb.resize_bilinear( + x=x, + target_size_height=Hout, + target_size_width=Wout, + sampling_mode="STRICT_ALIGN_CORNERS", + name=node.name + "_channel_first_resize_bilinear", + ) + + # [align_corners = False, half_pixel_centers = False] + elif not align_corners and not half_pixel_centers: + x = mb.resize_bilinear( + x=x, + target_size_height=Hout, + target_size_width=Wout, + sampling_mode="DEFAULT", + name=node.name + "_channel_first_resize_bilinear", + ) + + # [align_corners = False, half_pixel_centers = True] + elif not align_corners and half_pixel_centers: + if context[node.inputs[1]].val is None: + # for the dynamic input shape case, + # context[node.inputs[1]] is a mul(x=input_shape, y=scaling_factor) op. + if context[node.inputs[1]].op.op_type != "mul": + raise NotImplementedError("Cannot determine the scale factor for the bilinear resize layer.") + scale_factor_height = context[node.inputs[1]].op.y.val[0] + scale_factor_width = context[node.inputs[1]].op.y.val[1] + else: + Hin, Win = input_shape[1], input_shape[2] + Hout, Wout = context[node.inputs[1]].val + # check if the output size divide the input size, + # if not, then cast the scale factor to float type. + scale_factor_height = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scale_factor_width = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + + x = mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor_height, + scale_factor_width=scale_factor_width, + align_corners=False, + name=node.name + "_channel_first_upsample_bilinear", + ) + + # transpose again + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def make_tuple(context, node): + res = tuple([context[in_name] for in_name in node.inputs]) + context.add(node.name, res) + + +@register_tf_op +def function_entry(context, node): + if context.get_func_inputs() is None: + msg = ( + "function_entry requires function inputs stored in " + + "context.curr_func_inputs" + ) + raise ValueError(msg) + context.add(node.name, context.get_func_inputs()) + + +@register_tf_op(tf_alias=["while"]) +def While(context, node): + # TF while will never have break statement, because break can always be + # transformed into while and condition. Example: + # + # while pred: + # a = op1(...) + # if a == 0: + # break + # b = op2(...) + # + # is equivalent to + # + # while pred and not break_a: + # a = op1(...) + # break_a = a == 0 + # if not break_a: + # b = op2(...) + + # node.inputs[0] == 'make_tuple_X' (always a make_tuple) + loop_vars = context[node.inputs[0]] # python tuple of Vars + cond_graph = context.get_graph(node.attr["cond_function"]) + body_graph = context.get_graph(node.attr["body_function"]) + + def cond(*loop_vars): + context.stack_func_inputs(loop_vars) + + # convert_graph uses context to convert cond_graph. During conversion + # it constructs operations (mb.some_op). Note that cond(*loop_vars) is + # only evaluated inside while_loop's type_inference(), not here. In + # other words, we use python's deferred function evaluation to defer + # the SSA block construction until inside while_loop Operation. + res = convert_graph(context, cond_graph) + # Done with translating the function + context.unstack_func_inputs() + return res + + def body(*loop_vars): + context.stack_func_inputs(loop_vars) + res = convert_graph(context, body_graph) + # Done with translating the function + context.unstack_func_inputs() + return res + + x = mb.while_loop(_cond=cond, _body=body, loop_vars=loop_vars, name=node.name) + # wraps x as tuple for get_tuple that always follow the while node. + if not isinstance(x, (tuple, list)): + x = (x,) + context.add(node.name, x) + + +@register_tf_op +def iff(context, node): + pred = context[node.inputs[0]] + + # this is always a tensor, as TF uses one iff op for each returned value. + # + # Example TF program: + # + # x = tf.placeholder(tf.float32, shape=(1,)) + # y = tf.placeholder(tf.float32, shape=(1,)) + # z = tf.multiply(x, y) + # pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + # def true_fn(): return tf.add(x, z), x + # def false_fn(): return tf.square(y), z + # res = tf.cond(pred, true_fn, false_fn) + # + # There will be 2 iffs: + # + # iff('cond/pred_id', 'cond/Add', 'cond/Square') + # iff('cond/pred_id', 'cond/Add/Switch', 'cond/Switch_1') + # + # where + # 'cond/pred_id': pred + # 'cond/Add': tf.add(x, z) + # 'cond/Square': tf.square(y) + # 'cond/Add/Switch': x + # 'cond/Switch_1': z + # + # And both branches are executed, and one of the results will be + # discarded at iff nodes. + # + # Note that the above program would translate to two cond ops, each with + # two blocks. + true_output_var = context[node.inputs[1]] + false_output_var = context[node.inputs[2]] + + def true_fn(): + return mb.identity(x=true_output_var) + + def false_fn(): + return mb.identity(x=false_output_var) + + x = mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Concat(context, node): + values = [context[input] for input in node.inputs[1:]] + axis = context[node.inputs[0]] + x = mb.concat(values=values, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def ConcatV2(context, node): + values = [context[input] for input in node.inputs[:-1]] + axis = context[node.inputs[-1]] + x = mb.concat(values=values, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Pack(context, node): + values = [context[name] for name in node.inputs] + axis = node.attr["axis"] + if axis < 0: + # TF axis = -1 creates new dim at the end + axis += values[0].rank + 1 + if len(values) == 1: + # for example: + # y = tf.raw_ops.Pack(values=[2], axis=0). + # or y = tf.raw_ops.Pack(values=[tf.constant([1,2])], axis=0) + input_type = values[0].sym_type + if _is_scalar(input_type): + x = mb.mul(x=_np.array([1], dtype=_np.int32), y=values[0], name=node.name) + else: + x = mb.expand_dims(x=values[0], axes=[axis], name=node.name) + else: + if all([_is_scalar(input.sym_type) for input in values]): + x = mb.concat(values=values, axis=axis, name=node.name) + else: + x = mb.stack(values=values, axis=axis, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def Unpack(context, node): + x = context[node.inputs[0]] + axis = int(node.attr["axis"]) + num_splits = node.attr.get("num", None) + if num_splits is None: + num_splits = x.shape[axis] + if num_splits == 1: + y = [x] + else: + y = mb.split(x=x, num_splits=num_splits, axis=axis, name=node.name + "_unsqueezed") + output_vars = [] + for i in range(num_splits): + output_vars.append( + mb.squeeze(x=y[i], axes=[axis], name=node.name + ":{}".format(i)) + ) + + context.add(node.name, output_vars) + + +@register_tf_op +def Split(context, node): + axis = context[node.inputs[0]] + x = context[node.inputs[1]] + if "num_split" not in node.attr: + raise ValueError("num_splits not found in TF op {}".format(node.name)) + num_splits = node.attr["num_split"] + if num_splits == 1: + if len(node.outputs) == 0: + x = mb.mul(x=x, y=1.0, name=node.name) + context.add(node.name, x) + else: + # Don't change tfssa. Just make downstream ops reference the pre-identity op. + context.add(node.name, [x], is_new_var=False) + else: + x = mb.split(x=x, num_splits=num_splits, axis=axis, name=node.name) + context.add(node.name, x) + # TODO : If tf.split output is returned, there's no + # get_tuple nodes. Some graph pass is needed. Example: + # + # x = tf.placeholder(tf.float32, shape=input_shape1) + # res = tf.split(x, 3, axis=0) + # + # res are ['split:0', 'split:1', 'split'] + # + # but node.outputs == ['gto_1', 'gto_2', 'gto_3'] + + +@register_tf_op +def SplitV(context, node): + x = context[node.inputs[0]] + split_sizes = context[node.inputs[1]] + axis = context[node.inputs[2]] + if "num_split" not in node.attr: + raise ValueError("num_splits not found in TF op {}".format(node.name)) + num_splits = node.attr["num_split"] + if num_splits == 1: + Identity(context, node) + else: + x = mb.split( + x=x, + num_splits=num_splits, + split_sizes=split_sizes, + axis=axis, + name=node.name, + ) + context.add(node.name, x) + + +@register_tf_op +def ScatterNd(context, node): + indices = context[node.inputs[0]] + updates = context[node.inputs[1]] + shape = context[node.inputs[2]] + x = mb.fill(shape=shape, value=types.nptype_from_builtin(updates.dtype)(0)) + x = mb.scatter_nd(data=x, indices=indices, updates=updates, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def TensorScatterAdd(context, node): + tensor, indices, updates, = [context[name] for name in node.inputs] + output = mb.scatter_nd(data=tensor, indices=indices, updates=updates, mode="add", name=node.name) + context.add(node.name, output) + + +@register_tf_op +def ZerosLike(context, node): + x = context[node.inputs[0]] + if x.rank == 0: + np_type = types.nptype_from_builtin(x.sym_type) + x = mb.const(val=np_type(0), name=node.name) + else: + np_type = types.nptype_from_builtin(x.sym_type.get_primitive()) + x = mb.fill(shape=mb.shape(x=x), value=np_type(0), name=node.name) + context.add(node.name, x) + + +@register_tf_op +def IsFinite(context, node): + x = context[node.inputs[0]] + if any_symbolic(x.shape): + x_shape = mb.shape(x=x) + else: + x_shape = [1] if x.shape == () else x.shape + max_tensor = mb.fill(shape=x_shape, value=_np.finfo(_np.float32).max) + min_tensor = mb.fill(shape=x_shape, value=_np.finfo(_np.float32).min) + less_then = mb.less_equal(x=x, y=max_tensor) + greater_than = mb.greater_equal(x=x, y=min_tensor) + x = mb.logical_and(x=less_then, y=greater_than, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def CropAndResize(context, node): + x = context[node.inputs[0]] + input_shape = x.shape # (B, h_in, w_in, C) + if len(input_shape) != 4: + raise ValueError( + '"CropResize" op: expected input rank 4, got {}'.format(x.rank) + ) + Hin, Win = input_shape[1:3] + + const_box_info = True + if context[node.inputs[1]].val is None or context[node.inputs[2]].val is None: + const_box_info = False + + crop_size = context[node.inputs[3]].val + method = node.attr.get("method", "bilinear") + pad_value = node.attr.get("extrapolation_value", 0.0) + + # CoreML index information along with boxes + if const_box_info: + boxes = context[node.inputs[1]].val + box_indices = context[node.inputs[2]].val + box_indices = _np.expand_dims(box_indices, axis=1) + boxes = _np.concatenate([box_indices, boxes], axis=1) + # CoreML expects boxes/ROI in + # [N, 1, 5, 1, 1] format + boxes = boxes.reshape(boxes.shape[0], 1, boxes.shape[1], 1, 1) + else: + box_indices = context[node.inputs[2]] + boxes = context[node.inputs[1]] + box_indices = mb.expand_dims(x=box_indices, axes=[1]) + if box_indices.dtype != boxes.dtype: + box_indices = mb.cast(x=box_indices, dtype=types.builtin_to_string(boxes.dtype)) + boxes = mb.concat(values=(box_indices, boxes), axis=1) + # TODO: Dynamic rank: Use GetShape and select indices dynamically + boxes = mb.reshape(x=boxes, shape=[boxes.shape[0], 1, boxes.shape[1], 1, 1]) + + # Get Height and Width of crop + h_out, w_out = crop_size[0], crop_size[1] + + # TF `nearest` mode not supported + method_map = {"bilinear": "ALIGN_CORNERS"} + if method not in method_map: + raise ValueError( + "CropResize op: Unsupported method {}. Supports {}".format( + method, method_map.keys() + ) + ) + method = method_map[method] + + # TF input format: [B, h_in, w_in, C] + # CoreML input format: [B, C, h_in, w_in] + x = _transpose_NHWC_to_NCHW(x) + + # Crop Resize + args = { + "x": x, + "roi": boxes, + "target_height": h_out, + "target_width": w_out, + "normalized_coordinates": True, + "spatial_scale": 1.0, + "box_coordinate_mode": "CORNERS_HEIGHT_FIRST", + "sampling_mode": method, + } + if is_current_opset_version_compatible_with(target.iOS16): + args["pad_value"] = pad_value + else: + if pad_value != 0.0: + msg = ( + "For iOS15 or older, only extrapolation_value=0.0 is supported or the tf CropAndResize op. " + "Got {}" + ).format(pad_value) + raise ValueError(msg) + x = mb.crop_resize(**args) + + # CoreML output format: [N, 1, C, h_out, w_out] + # TF output format: [N, h_out, w_out, C] + x = mb.squeeze(x=x, axes=[1]) + x = _transpose_NCHW_to_NHWC(x, node.name) + context.add(node.name, x) + + +@register_tf_op +def TensorArrayV3(context, node): + if "infer_shape" in node.attr: + if not node.attr["infer_shape"]: + raise ValueError("Only fixed size TensorArray is supported") + + dynamic_length = node.attr.get("dynamic_size", True) + elem_shape = node.attr.get("element_shape", None) + size = node.attr.get("size", None) + if size is None: + size = context[node.inputs[0]] + + if size.val is None: + init_length = size + else: + init_length = size.val + if init_length == 0: + # Dynamic list. Use 1 as init_length + init_length = 1 + + builtin_dtype = node.attr["dtype"] + dtype_str = types.builtin_to_string(builtin_dtype) + if elem_shape is not None and not -1 in elem_shape: + ls = mb.make_list( + init_length=init_length, + dtype=dtype_str, + elem_shape=elem_shape, + dynamic_length=dynamic_length, + name=node.name, + ) + else: + ls = mb.tf_make_list( + init_length=init_length, + dtype=dtype_str, + dynamic_length=dynamic_length, + name=node.name, + ) + context.add(node.name, ls) + + +@register_tf_op +def TensorArrayWriteV3(context, node): + index = context[node.inputs[0]] + new_val = context[node.inputs[1]] + ls = context[node.inputs[2]] + new_list = mb.list_write(ls=ls, index=index, value=new_val, name=node.name) + context.add(node.name, new_list) + + +@register_tf_op +def TensorArraySizeV3(context, node): + ls = context[node.inputs[0]] + length = mb.list_length(ls=ls, name=node.name) + context.add(node.name, length) + + +@register_tf_op +def TensorArrayGatherV3(context, node): + indices = context[node.inputs[0]] + ls = context[node.inputs[1]] + tensor = mb.list_gather(ls=ls, indices=indices, name=node.name) + context.add(node.name, tensor) + + +@register_tf_op +def TensorArrayReadV3(context, node): + idx = context[node.inputs[0]] + ls = context[node.inputs[1]] + ls = mb.list_read(ls=ls, index=idx, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorArrayScatterV3(context, node): + indices = context[node.inputs[0]] + value = context[node.inputs[1]] + ls = context[node.inputs[2]] + ls = mb.list_scatter(ls=ls, indices=indices, value=value, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def BroadcastTo(context, node): + x = context[node.inputs[0]] + shape = context[node.inputs[1]] + if shape.val is None: # dynamic shape + raise NotImplementedError("dynamic shape not yet supported") + else: # static shape + target_shape = tuple(shape.val) + broadcast_shape = broadcast_shapes(x.shape, target_shape) + if target_shape != broadcast_shape: + msg = "shapes are not broadcastable: {} vs. {}" + raise ValueError(msg.format(x.shape, target_shape)) + target_rank = len(target_shape) + if x.rank != target_rank: + axes = [i for i in range(target_rank - x.rank)] + x = mb.expand_dims(x=x, axes=axes) + reps = [1] * target_rank + for i in range(target_rank): + reps[i] = target_shape[i] // x.shape[i] + + x = mb.tile(x=x, reps=reps, name=node.name) + context.add(node.name, x) + + +@register_tf_op +def get_global(context, node): + # Design comment: This is only works if variable doesn't cross block + # boundary (e.g. while_loop, cond, function) + variable_name = node.attr["variable"] + x = context[variable_name] # This must've been set by set_global + context.add(node.name, x, is_new_var=False) + + +@register_tf_op +def set_global(context, node): + x = context[node.inputs[0]] + variable_name = node.attr["variable"] + context.add(variable_name, x, is_new_var=False) + + +def _get_const_or_raise(variable): + if variable.val is None: + raise ValueError("Var {} must be const".format(variable.name)) + return variable.val + + +@register_tf_op +def LSTMBlockCell(context, node): + x = context[node.inputs[0]] # [batch, input_dim] + c_prev = context[node.inputs[1]] # [b, hidden_dim] + h_prev = context[node.inputs[2]] # [b, hidden_dim] + # W layout is ifco + W = context[node.inputs[3]] # [input_dim + hidden_dim, 4*hidden_dim] + + kwargs = {} + use_peephole = node.attr["use_peephole"] + if use_peephole: + peep_i = context[node.inputs[4]] # [hidden_dim,] + peep_f = context[node.inputs[5]] # [hidden_dim,] + peep_o = context[node.inputs[6]] # [hidden_dim,] + kwargs["weight_peep_i"] = peep_i + kwargs["weight_peep_f"] = peep_f + kwargs["weight_peep_o"] = peep_o + + bias = context[node.inputs[7]] # [4*hidden_dim,] + + forget_bias = node.attr["forget_bias"] + cell_clip = None + if node.attr["cell_clip"] is not None and node.attr["cell_clip"] > 0: + cell_clip = node.attr["cell_clip"] + + res = mb.tf_lstm_block_cell( + x=x, + c_prev=c_prev, + h_prev=h_prev, + weight=W, + bias=bias, + forget_bias=forget_bias, + cell_clip=cell_clip, + use_peephole=use_peephole, + name=node.name, + **kwargs + ) + context.add(node.name, res) + +@register_tf_op(tf_alias=["BlockLSTMV2"]) +def BlockLSTM(context, node): + # BlockLSTM: https://www.tensorflow.org/api_docs/python/tf/raw_ops/BlockLSTM + # BlockLSTMV2: https://www.tensorflow.org/api_docs/python/tf/raw_ops/BlockLSTMV2 + seq_len = context[node.inputs[0]] # int + x = context[node.inputs[1]] # [padded_len, batch, input_dim] + init_c = context[node.inputs[2]] # [1, hidden_dim] + init_h = context[node.inputs[3]] # [1, hidden_dim] + # BlockLSTM: icfo format, BlockLSTMV2: ifco format + weight = context[node.inputs[4]] # [input_dim + hidden_dim, 4*hidden_dim] + + kwargs = {} + use_peephole = node.attr["use_peephole"] + if use_peephole: + peep_i = context[node.inputs[5]] # [hidden_dim,] + peep_f = context[node.inputs[6]] # [hidden_dim,] + peep_o = context[node.inputs[7]] # [hidden_dim,] + kwargs["weight_peep_i"] = peep_i + kwargs["weight_peep_f"] = peep_f + kwargs["weight_peep_o"] = peep_o + + # BlockLSTM: icfo format, BlockLSTMV2: ifco format + bias = context[node.inputs[8]] # [4*hidden_dim,] + + # forget bias is always 0 for BlockLSTMV2 + forget_bias = 0.0 if node.op == "BlockLSTMV2" else node.attr["forget_bias"] + cell_clip = None + if node.attr["cell_clip"] is not None and node.attr["cell_clip"] > 0: + cell_clip = node.attr["cell_clip"] + + if node.op == "BlockLSTMV2": + # mb.tf_lstm_block takes weights and bias in icfo format + # BlockLSTMV2's weights and bias are in ifco format + # convert from ifco to icfo format + w_i, w_f, w_c, w_o = mb.split(x=weight, num_splits=4, axis=-1) + weight = mb.concat(values=(w_i, w_c, w_f, w_o), axis=1, name=weight.name) + b_i, b_f, b_c, b_o = mb.split(x=bias, num_splits=4, axis=-1) + bias = mb.concat(values=(b_i, b_c, b_f, b_o), axis=0, name=bias.name) + + res = mb.tf_lstm_block( + seq_len=seq_len, + x=x, + c_prev=init_c, + h_prev=init_h, + weight=weight, + bias=bias, + forget_bias=forget_bias, + cell_clip=cell_clip, + use_peephole=use_peephole, + name=node.name, + **kwargs + ) + context.add(node.name, res) + +@register_tf_op +def ClipByValue(context, node): + x = context[node.inputs[0]] + min_value = context[node.inputs[1]] + max_value = context[node.inputs[2]] + if min_value.val < max_value.val: + x = mb.clip(x=x, alpha=min_value, beta=max_value, name=node.name) + else: + # When min >= max, TensorFlow sets all values to min. + x = mb.fill(shape=mb.shape(x=x), value=min_value, name=node.name) + context.add(node.name, x) + +@register_tf_op +def Size(context, node): + x = context[node.inputs[0]] + x = mb.shape(x=x) + x = mb.reduce_prod(x=x, axes=[0], name=node.name) + context.add(node.name, x) + +@register_tf_op +def LogSoftmax(context, node): + x = context[node.inputs[0]] + axis = node.attr.get('axis', -1) + x_max = mb.reduce_max(x=x, axes=[axis], keep_dims=True) + x_off = mb.sub(x=x, y=x_max) + y = mb.reduce_log_sum_exp(x=x_off, axes=[axis], keep_dims=True) + res = mb.sub(x=x_off, y=y, name=node.name) + context.add(node.name, res) + +@register_tf_op +def AudioSpectrogram(context, node): + """ + input shape: (Tin, channels) + attributes: stride (int), window_size (int), magnitude_squared (bool) + + output shape : (channels, Tout, fout) + where, + Tout = floor((Tin - window_size)/stride + 1) + fout = N / 2 + 1 + where N = next_power_of_2(window_size) = 2 ^ ceil(log2(window_size)) + + reference: + https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/core/kernels/spectrogram_op.cc + """ + + x = context[node.inputs[0]] # (Tin, channels) + if x.rank != 2: + raise NotImplementedError("AudioSpectrogram op: rank of the input must be 2") + + if "magnitude_squared" not in node.attr: + raise ValueError("AudioSpectrogram op: missing attribute: 'magnitude_squared'") + if "stride" not in node.attr: + raise ValueError("AudioSpectrogram op: missing attribute: 'stride'") + if "window_size" not in node.attr: + raise ValueError("AudioSpectrogram op: missing attribute: 'window_size'") + + magnitude_squared = node.attr["magnitude_squared"] + stride = node.attr["stride"] + window_size = node.attr["window_size"] + + N = 2 ** _np.ceil(_np.log2(window_size)) + N = N.astype(_np.int32) + fout = N / 2 + 1 + fout = fout.astype(_np.int32) + + # construct constant for hann window tensor, of shape (window_size,) + h = _np.arange(window_size) * ((2 * _np.pi) / window_size) + h = 0.5 - 0.5 * _np.cos(h) + + # construct the constant DFT matrices + k = _np.arange(fout).reshape(1, fout) # (1, fout) + n = _np.arange(N).reshape(N, 1) # (N, 1) + kn = _np.matmul(n, k) * (2 * _np.pi / N) # (N, fout) + Re_DFT_matrix_const = _np.cos(kn) # (N, fout) + Im_DFT_matrix_const = -_np.sin(kn) # (N, fout) + + # transpose input + x = mb.transpose(x=x, perm=[1,0]) # (channels, Tin) + # extract slices from the input + x = mb.sliding_windows(x=x, axis=1, size=window_size, stride=stride) # (channels, Tout, window_size) + # multiply with hann window + x = mb.mul(x=x, y=h) + # pad the last dimension to size N + x = mb.pad(x=x, pad=[0,0,0,0,0,N - window_size], mode="constant", constant_val=0.0) # (channels, Tout, N) + # multiply by DFT matrices + re = mb.matmul(x=x, y=Re_DFT_matrix_const) # (channels, Tout, fout) + im = mb.matmul(x=x, y=Im_DFT_matrix_const) # (channels, Tout, fout) + + # compute spectrogram + re = mb.mul(x=re, y=re) + im = mb.mul(x=im, y=im) + if not magnitude_squared: + y = mb.add(x=re, y=im) + y = mb.sqrt(x=y, name=node.name) + else: + y = mb.add(x=re, y=im, name=node.name) + context.add(node.name, y) + +@register_tf_op +def Mfcc(context, node): + """ + inputs: + - x : (channels, T, N) + - sampling rate: int + + attributes: + - upper_frequency_limit : int + - lower_frequency_limit : int + - filterbank_channel_count : int + - dct_coefficient_count : int + + output shape: (channels, T, dct_coefficient_count) + """ + x = context[node.inputs[0]] # (channels, T, F) + if x.rank != 3: + raise NotImplementedError("Mfcc op: rank of the input must be 3") + sampling_rate_var = context[node.inputs[1]] + if sampling_rate_var.val is None: + raise NotImplementedError("Mfcc op: dynamic sampling rate not supported") + sample_rate = sampling_rate_var.val + if is_symbolic(x.shape[2]): + raise NotImplementedError("Mfcc op: the last dimension, i.e. spectrogram size of the input must be known") + + spectrogram_N = x.shape[2] + upper_frequency_limit = node.attr.get("upper_frequency_limit", 4000) + lower_frequency_limit = node.attr.get("lower_frequency_limit", 20) + filterbank_channel_count = node.attr.get("filterbank_channel_count", 40) + dct_coefficient_count = node.attr.get("dct_coefficient_count", 13) + + # get the constant weights, matrices for MFCC filterbank and for DCT + # weights: (N,) + # mat_weighted, mat_spec_val : (N, filterbank_channel_count) + # cosines : (filterbank_channel_count, dct_coefficient_count) + weights, mat_weighted, mat_spec_val, cosines = _get_MFCC_constants(spectrogram_N, + sample_rate, + upper_frequency_limit, + lower_frequency_limit, + filterbank_channel_count, + dct_coefficient_count) + + spectogram_value = mb.sqrt(x=x) # (channels, T, N) + weighted_spectogram_value = mb.mul(x=spectogram_value, y=weights) # (channels, T, N) + x1 = mb.matmul(x=weighted_spectogram_value, y=mat_weighted) # (channels, T, filterbank_channel_count) + x2 = mb.matmul(x=spectogram_value, y=mat_spec_val) # (channels, T, filterbank_channel_count) + y = mb.add(x=x1, y=x2) # (channels, T, filterbank_channel_count) + y = mb.log(x=y, epsilon=1e-12) + y = mb.matmul(x=y, y=cosines, name=node.name) # (channels, T, dct_coefficient_count) + context.add(node.name, y) + + +@register_tf_op +def Complex(context, node): + real_part = context[node.inputs[0]] + imag_part = context[node.inputs[1]] + result = mb.complex(real_data=real_part, imag_data=imag_part, name=node.name) + context.add(node.name, result) + + +@register_tf_op +def Real(context, node): + input_data = context[node.inputs[0]] + if types.is_complex(input_data.dtype): + real_part = mb.complex_real(data=input_data, name=node.name) + else: + real_part = input_data + context.add(node.name, real_part) + + +@register_tf_op +def Imag(context, node): + input_data = context[node.inputs[0]] + if types.is_complex(input_data.dtype): + imag_part = mb.complex_imag(data=input_data, name=node.name) + else: + # According to the doc of tf.math.imag, it returns a tensor of all zeros if input is real. + np_type = types.nptype_from_builtin(input_data.sym_type.get_primitive()) + imag_part = mb.fill( + shape=mb.shape(x=input_data), value=np_type(0), name=node.name + ) + context.add(node.name, imag_part) + + +@register_tf_op +def FFT(context, node): + input_data = context[node.inputs[0]] + fft_res = mb.complex_fft(data=input_data, name=node.name) + context.add(node.name, fft_res) + + +@register_tf_op +def RFFT(context, node): + input_data = context[node.inputs[0]] + fft_length = context[node.inputs[1]] + # The fft_length is an int32 tensor of shape [1] instead of an integer. To make it compatible + # to complex_rfft (which use PyTorch's params as reference), we extract the value from tensor. + rfft_res = mb.complex_rfft( + data=input_data, n=mb.const(val=fft_length.val[0]), name=node.name + ) + context.add(node.name, rfft_res) + + +@register_tf_op +def IFFT(context, node): + input_data = context[node.inputs[0]] + ifft_res = mb.complex_ifft(data=input_data, name=node.name) + context.add(node.name, ifft_res) + + +@register_tf_op +def IRFFT(context, node): + input_data = context[node.inputs[0]] + fft_length = context[node.inputs[1]] + # The fft_length is an int32 tensor of shape [1] instead of an integer. To make it compatible + # to complex_rfft (which use PyTorch's params as reference), we extract the value from tensor. + irfft_res = mb.complex_irfft( + data=input_data, n=mb.const(val=fft_length.val[0]), name=node.name + ) + context.add(node.name, irfft_res) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parse.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parse.py new file mode 100644 index 00000000..9247885b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parse.py @@ -0,0 +1,138 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +from tensorflow.core.framework.types_pb2 import DataType +from tensorflow.python.framework.dtypes import _TF_TO_NP + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types + + +def parse_type(t): + mapping = { + # bool + DataType.DT_BOOL: types.bool, + # floating point + DataType.DT_HALF: types.fp16, + DataType.DT_FLOAT: types.float, + DataType.DT_DOUBLE: types.double, + # int + DataType.DT_INT8: types.int8, + DataType.DT_INT16: types.int16, + DataType.DT_INT32: types.int32, + DataType.DT_INT64: types.int32, + + # unsigned int + DataType.DT_UINT8: types.uint8, + DataType.DT_UINT16: types.uint16, + DataType.DT_UINT32: types.uint32, + DataType.DT_UINT64: types.uint64, + # string + DataType.DT_STRING: types.str, + } + t = int(t) + if t in mapping: + return mapping[t] + else: + logger.info("Type %d cannot be mapped", t) + return None + + +def parse_shape(t): + if t.unknown_rank: + return None + ret = [d.size for d in t.dim] + return ret + + +def parse_tensor(t): + typ = parse_type(t.dtype) + shape = parse_shape(t.tensor_shape) + + retval = None + if len(t.half_val) > 0: + retval = _np.array(t.half_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.float_val) > 0: + retval = _np.array(t.float_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.double_val) > 0: + retval = _np.array(t.double_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.int_val) > 0: + retval = _np.array(t.int_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.int64_val) > 0: + retval = _np.array(t.int64_val, dtype=_TF_TO_NP[t.dtype]) + elif len(t.bool_val) > 0: + retval = _np.array(t.bool_val, dtype=_TF_TO_NP[t.dtype]) + elif hasattr(t, "uint32_val") and len(t.uint32_val) > 0: + retval = _np.array(t.uint32_val, dtype=_TF_TO_NP[t.dtype]) + elif hasattr(t, "uint64_val") and len(t.uint64_val) > 0: + retval = _np.array(t.uint64_val, dtype=_TF_TO_NP[t.dtype]) + + if not t.tensor_shape.unknown_rank and len(shape) == 0: + retobj = typ() + if retval is not None: + retobj.val = retval[0] + else: + rettype = types.tensor(typ, tuple(shape)) + retobj = rettype() + retobj.shape = shape + if retval is not None: + retobj.val = retval + + return retobj + + +def parse_string(s): + if isinstance(s, bytes): + return s.decode("utf-8", errors="ignore") + else: + return s + + +def parse_list(t): + if len(t.s) > 0: + return list(parse_string(s) for s in t.s) + elif len(t.i) > 0: + return list(t.i) + elif len(t.f) > 0: + return list(t.f) + elif len(t.b) > 0: + return list(t.b) + elif len(t.type) > 0: + return list(parse_type(z) for z in t.type) + elif len(t.shape) > 0: + return list(parse_shape(z) for z in t.shape) + elif len(t.tensor) > 0: + return list(parse_tensor(z) for z in t.tensor) + else: + return [] + + +def parse_func(f): + return f.name + + +def parse_attr(attr): + if attr.HasField("s"): + return parse_string(attr.s) + elif attr.HasField("i"): + return attr.i + elif attr.HasField("f"): + return attr.f + elif attr.HasField("b"): + return attr.b + elif attr.HasField("type"): + return parse_type(attr.type) + elif attr.HasField("shape"): + return parse_shape(attr.shape) + elif attr.HasField("tensor"): + return parse_tensor(attr.tensor) + elif attr.HasField("list"): + return parse_list(attr.list) + elif attr.HasField("func"): + return parse_func(attr.func) + elif attr.HasField("placeholder"): + raise NotImplementedError("placeholder not yet implemented") + raise ValueError("unintelligible TFNode attributes") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py new file mode 100644 index 00000000..10253d89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py @@ -0,0 +1,80 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import types + +from .tfssa import ParsedNode + + +class ParsedTFNode(ParsedNode): + """ + A parsed TensorFlow Node. + + name: The name of the node (str) + op: The operation represented by the node (str) + datatype: The type of the node. (type) + value: The value of the node if available + inputs: The list of nodes which are inputs to this node (list[str]) + control_inputs: The list of nodes which have to be executed before this node (list[str]) + attr: The attributes of the node + outputs: The list of nodes which consume the result of this node (list[str]) + control_outputs: The list of nodes which have to be executed after this node (list[str]) + """ + + def __init__(self, tfnode=None): + super(ParsedTFNode, self).__init__() + self.original_node = tfnode + + if tfnode is not None: + from .parse import parse_attr + + self.name = tfnode.name + if tfnode.op == "PlaceholderWithDefault": + self.op = "Placeholder" + else: + self.op = tfnode.op + self.inputs = [x for x in tfnode.input if not x.startswith("^")] + self.control_inputs = [x[1:] for x in tfnode.input if x.startswith("^")] + self.attr = {k: parse_attr(v) for k, v in tfnode.attr.items()} + + def parse_from_attr(self): + if "value" in self.attr: + self.datatype = self.attr["value"].__class__ + elif "_output_shapes" in self.attr: + output_shapes = self.attr["_output_shapes"] + if output_shapes[0] is not None and len(output_shapes[0]) > 0: + if "dtype" in self.attr: + rettype = types.tensor(self.attr["dtype"], tuple(output_shapes[0])) + elif "T" in self.attr: + rettype = types.tensor(self.attr["T"], tuple(output_shapes[0])) + elif "Tparams" in self.attr: + rettype = types.tensor( + self.attr["Tparams"], tuple(output_shapes[0]) + ) + else: + raise NotImplementedError( + "Op-(%s) %s not implemented\nWith attribute:" + + str(self.attr) % (self.op, self.name) + ) + self.datatype = rettype + elif "dtype" in self.attr: + self.datatype = self.attr["dtype"] + elif "shape" in self.attr: + shape = self.attr["shape"] + assert "dtype" in self.attr + if len(shape) == 0: + self.datatype = self.attr["dtype"] + else: + self.datatype = types.tensor(self.attr["dtype"], shape) + elif "dtype" in self.attr: + self.datatype = self.attr["dtype"] + + def _copy_impl(self, dest): + dest = super(ParsedTFNode, self)._copy_impl(dest) + dest.original_node = self.original_node + return dest + + def __copy__(self): + return self._copy_impl(ParsedTFNode()) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py new file mode 100644 index 00000000..2dc81438 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import backfill_make_list_elem_type, expand_tf_lstm, tf_lstm_to_core_lstm diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py new file mode 100644 index 00000000..81d8423e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/backfill_make_list_elem_type.py @@ -0,0 +1,121 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.mil.var import ListVar + + +@register_pass(namespace="tensorflow") +class backfill_make_list_elem_type(AbstractGraphPass): + """ + TF's TensorArrayV3 (represented as make_list in mil) doesn't necessarily + contain elem shape/type, which is known when write is performed. We + backfill elem type info to make_list + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _backfill_make_list_elem_type_block(f) + +@block_context_manager +def _backfill_make_list_elem_type_block(block): + # shallow copy hides changes on f.operations during the loop + for op in block.operations: + for b in op.blocks: + _backfill_make_list_elem_type_block(b) + + if op.op_type != "tf_make_list": + continue + + if op.outputs[0].elem_type != types.unknown: + # elem_type of the list is known + continue + + list_var = op.outputs[0] + elem_type = _infer_elem_type(list_var) # types.tensor + if elem_type is None: + msg = ( + "No list_write or list_scatter op to infer make_list " + + "'{}' element type. Block:\n{}" + ) + raise ValueError(msg.format(op.name, op.enclosing_block)) + + # elem_shape can be runtime-detemrined, which cannot be inferred here at this point, + # so we add an internal _const_symbolic node to cover both static and dynamic cases. + elem_shape = [dim.name if is_symbolic(dim) else dim for dim in elem_type.get_shape()] + new_list = mb.make_list( + init_length=op.init_length, + dynamic_length=op.dynamic_length, + elem_shape=tuple(elem_shape), + dtype=op.inputs["dtype"], + before_op=op, + name=op.name, + ) + + block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_list + ) + block.remove_ops([op]) + + +def _infer_elem_type(list_var): + """ + Returns types.tensor. None if failed to infer element type. + Example: + + Given: + + main(%update: (2,fp32)) { + block0() { + %list: List[unknown] = tf_make_list(...) # unknown elem type + %while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...)) + while_loop_0_body(...) { + %list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update) + } -> (%add_0, %list_write_0) + + Result: + + main(%update: (2,fp32)) { + block0() { + %list: List[(2,fp32)] = tf_make_list(...) # Get the elem type from list_write + %while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...)) + while_loop_0_body(...) { + %list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update) + } -> (%add_0, %list_write_0) + """ + # Search for child op that have informative element types + for o in list_var.child_ops: + if o.op_type in ["list_write", "list_scatter"]: + return o.outputs[0].elem_type + if o.op_type == "while_loop": + idx = list(o.loop_vars).index(list_var) + block = o.blocks[0] + # the corresponding Var in body block + block_var = block.inputs[idx] + elem_type = _infer_elem_type(block_var) + if elem_type is not None: + + def _set_types_for_block_inputs(block): + block_var = block.inputs[idx] + new_block_var = ListVar(name=block_var.name, elem_type=elem_type, + init_length=block_var.sym_type.T[1], + dynamic_length=block_var.sym_type.T[2]) + block._replace_var(block_var, new_block_var) + + _set_types_for_block_inputs(o.blocks[0]) # condition block + _set_types_for_block_inputs(o.blocks[1]) # body block + + return elem_type + # otherwise continue to other block_var (a list_var can be + # passed into while_loop twice). + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py new file mode 100644 index 00000000..1f28bfad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/expand_tf_lstm.py @@ -0,0 +1,225 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="tensorflow") +class expand_tf_lstm(AbstractGraphPass): + """ + Expand tf_lstm_block_cell to fine-grained SSA ops following: + + xh = [x, h_prev] + [i, ci, f, o] = xh * w + b + f = f + forget_bias + if not use_peephole: + wci = wcf = wco = 0 + i = sigmoid(cs_prev .* wci + i) + f = sigmoid(cs_prev .* wcf + f) + ci = tanh(ci) + cs = ci .* i + cs_prev .* f + cs = clip(cs, cell_clip) + o = sigmoid(cs * wco + o) + co = tanh(cs) + h = co .* o + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _expand_tf_lstm_helper(f) + + +def _expand_tf_lstm_helper(block): + # shallow copy hides changes on f.operations during the loop + for op in block.operations[:]: + for b in op.blocks: + _expand_tf_lstm_helper(b) + + if op.op_type == "tf_lstm_block_cell": + _expand_tf_lstm_block_cell(op) + logger.info("Expanding {} (op_type: {})".format(op.name, op.op_type)) + + if op.op_type == "tf_lstm_block": + # only cs, h are supported for now. Can be easily extended to other outputs at performance hit. + i, cs, f, o, ci, co, h = op.outputs + if all( + map(lambda x: len(x.child_ops) == 0 and len(x.consuming_blocks) == 0, + (i, f, o, ci, co) + ) + ): + _expand_tf_lstm_block(op) + logger.info("Expanding {} (op_type: {})".format(op.name, op.op_type)) + + +def _lstm_cell_builder(op, x, h_prev, cs_prev, before_op=None): + b = op.bias # [4*hidden_dim] + forget_bias = op.forget_bias.val # python:float + + # xh = [x, h_prev] + # xh shape: [b, input_dim+hidden_dim] + xh = mb.concat(values=[x, h_prev], axis=-1, before_op=before_op) + + # w: [4*hidden_dim, input_dim + hidden_dim] (icfo layout) + w = np.transpose(op.weight.val) + # [i, ci, f, o] = xh * w + b. Shape is [b, 4*hidden_dim] + icfo = mb.linear(x=xh, weight=w, bias=b, before_op=before_op) + + # i, ci, f, o shape: [b, hidden_dim] + i, ci, f, o = mb.split(x=icfo, num_splits=4, axis=-1, before_op=before_op) + if op.forget_bias.val != 0: + f = mb.add(x=f, y=forget_bias, before_op=before_op) + + # note that .* means Hadamard product + # i = sigmoid(cs_prev .* wci + i) + # f = sigmoid(cs_prev .* wcf + f) + if op.use_peephole.val: + wci = op.weight_peep_i.val # [hidden_dim] + wcf = op.weight_peep_f.val # [hidden_dim] + + x = mb.mul(x=cs_prev, y=wci, before_op=before_op) + pre_i = mb.add(x=x, y=i, before_op=before_op) + + x = mb.mul(x=cs_prev, y=wcf, before_op=before_op) + pre_f = mb.add(x=x, y=f, before_op=before_op) + else: + pre_i = i + pre_f = f + + i = mb.sigmoid(x=pre_i, before_op=before_op) + f = mb.sigmoid(x=pre_f, before_op=before_op) + ci = mb.tanh(x=ci, before_op=before_op) + + # cs = ci .* i + cs_prev .* f + x = mb.mul(x=ci, y=i, before_op=before_op) + y = mb.mul(x=cs_prev, y=f, before_op=before_op) + cs = mb.add(x=x, y=y, before_op=before_op) + + # cs = clip(cs, cell_clip) + if op.cell_clip is not None: + clip_val = op.cell_clip.val + cs = mb.clip(x=cs, alpha=-clip_val, beta=clip_val, before_op=before_op) + + # o = sigmoid(cs * wco + o) + if op.use_peephole.val: + wco = op.weight_peep_o.val + x = mb.mul(x=cs, y=wco, before_op=before_op) + pre_o = mb.add(x=x, y=o, before_op=before_op) + else: + pre_o = o + o = mb.sigmoid(x=pre_o, before_op=before_op) + co = mb.tanh(x=cs, before_op=before_op) + + # h = co .* o + h = mb.mul(x=co, y=o, before_op=before_op) + + return [i, cs, f, o, ci, co, h] + + +def _expand_tf_lstm_block_cell(op): + if op.op_type != "tf_lstm_block_cell": + raise ValueError() + + with op.enclosing_block as block: + x = op.x # [b, input_dim] + h_prev = op.h_prev # [b, hidden_dim] + cs_prev = op.c_prev # [b, hidden_dim] + + i, cs, f, o, ci, co, h = _lstm_cell_builder( + op, x, h_prev, cs_prev, before_op=op + ) + + # Replace all outputs + new_outputs = [i, cs, f, o, ci, co, h] + for old_v, new_v in zip(op.outputs, new_outputs): + block.replace_uses_of_var_after_op( + anchor_op=op, old_var=old_v, new_var=new_v + ) + block.remove_ops([op]) + + +def _expand_tf_lstm_block(op): + if op.op_type != "tf_lstm_block": + raise ValueError() + + with op.enclosing_block as block: + x = op.x # [s, b, input_dim] + h_prev = op.h_prev # [b, hidden_dim] + cs_prev = op.c_prev # [b, hidden_dim] + + # cond and body function gor the while_loop + def cond(i, cs_list, h_list): + return mb.less(x=i, y=length) + + def body(i, cs_list, h_list): + xi = mb.gather(x=x, indices=i, axis=0) + h_prev = mb.gather(x=h_list, indices=i, axis=0) + cs_prev = mb.gather(x=cs_list, indices=i, axis=0) + + ig, cs, fg, og, ci, co, h = _lstm_cell_builder(op, xi, h_prev, cs_prev) + + counter = mb.add(x=i, y=1) + + return ( + counter, + mb.scatter(data=cs_list, indices=counter, updates=cs), + mb.scatter(data=h_list, indices=counter, updates=h), + ) + + # Allocate two lists: cs & h + x_shape = mb.shape(x=x, before_op=op) + length = mb.slice_by_index(x=x_shape, begin=[0], end=[1], before_op=op) + h_shape = mb.shape(x=h_prev, before_op=op) + list_shape = mb.concat(values=[length, h_shape], axis=0, before_op=op) + cs_list = mb.fill(shape=list_shape, before_op=op) + h_list = mb.fill(shape=list_shape, before_op=op) + + # append initial state at index 0 + cs_prev = mb.expand_dims(x=cs_prev, axes=[0], before_op=op) + cs_list = mb.concat(values=[cs_prev, cs_list], axis=0, before_op=op) + h_prev = mb.expand_dims(x=h_prev, axes=[0], before_op=op) + h_list = mb.concat(values=[h_prev, h_list], axis=0, before_op=op) + + _, cs_list, h_list = mb.while_loop( + _cond=cond, _body=body, loop_vars=([0], cs_list, h_list), before_op=op + ) + + # strip initial state or element at index 0 + begin, end = [1, 0, 0], [0, 0, 0] + begin_mask = [False, True, True] + end_mask = [True, True, True] + cs = mb.slice_by_index( + x=cs_list, + begin=begin, + end=end, + begin_mask=begin_mask, + end_mask=end_mask, + before_op=op, + ) + h = mb.slice_by_index( + x=h_list, + begin=begin, + end=end, + begin_mask=begin_mask, + end_mask=end_mask, + before_op=op, + ) + + # Replace all outputs + new_outputs = [cs, h] + for old_v, new_v in zip( + [ov for index, ov in enumerate(op.outputs) if index in [1, 6]], new_outputs + ): + block.replace_uses_of_var_after_op( + anchor_op=op, old_var=old_v, new_var=new_v + ) + block.remove_ops([op]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py new file mode 100644 index 00000000..5b5d53cc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/test_passes.py @@ -0,0 +1,56 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import (assert_model_is_valid, + assert_same_output_names) + +pytest.importorskip("tensorflow", minversion="1.15.0") + + +def test_backfill_make_list_elem_type(): + # The while_loop appends [1, 2]*i to `ls` for each iteration + # i = 0, ... num_iters-1. + + elem_shape = (2,) + + @mb.program( + input_specs=[mb.TensorSpec(shape=elem_shape),] + ) + def prog(update): + def body(i, ls): + return mb.add(x=i, y=1), mb.list_write(ls=ls, index=i, value=update) + + def cond(i, ls): + return mb.less(x=i, y=num_iters) + + i = 0 + ls = mb.tf_make_list(init_length=1) + num_iters = 3 + _, final_tensor_list = mb.while_loop(_cond=cond, _body=body, loop_vars=(i, ls)) + list_len = mb.list_length(ls=final_tensor_list) + indices = mb.range_1d(start=0, end=list_len, step=1) + return mb.list_gather(ls=final_tensor_list, indices=indices) + + # tf_make_list has no elem_type info + make_list_op = prog.find_ops(op_type="tf_make_list", exactly_one=True)[0] + assert make_list_op.outputs[0].elem_type == types.unknown + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["tensorflow::backfill_make_list_elem_type"](prog) + assert_same_output_names(prev_prog, prog) + prog.validate() + + # tf_make_list is replaced with make_list and should have elem_type now + make_list_op = prog.find_ops(op_type="make_list", exactly_one=True)[0] + assert make_list_op.outputs[0].elem_type.get_shape() == elem_shape + + assert_model_is_valid(prog, {"update": elem_shape}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py new file mode 100644 index 00000000..18819629 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/ssa_passes/tf_lstm_to_core_lstm.py @@ -0,0 +1,308 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import ( + Block, + Builder as mb, + Operation, + Var, +) +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + +SUPPORTED_TF_LSTM_OPS = ["tf_lstm_block_cell", "tf_lstm_block"] + +@register_pass(namespace="tensorflow") +class tf_lstm_to_core_lstm(AbstractGraphPass): + """ + Try to map TF dialect ops `tf_lstm_block` and `tf_lstm_block_cell` to + `lstm` in the core op set if compatible. They are compatible if all of the + followings are satisfied: + + - If tf_lstm_block: only h output is consumed. tf_lstm_block has 7 + sequence outputs: [i, cs, f, o, ci, co, h]. Each of them (e.g., i) has + shape [seq_len, batch, hidden_dim] (see tf_lstm_block op doc string). + core lstm only supports sequence output for hidden state h, and thus if + any outputs other than `h` is consumed, we cannot convert to lstm in the + core op set. + + - If tf_lstm_block_cell: only cs, h output (outputs[1], outputs[6]) + are consumed. Similar to above. + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _tf_lstm_to_core_lstm_block(f) + +@block_context_manager +def _tf_lstm_to_core_lstm_block(block: Block): + # shallow copy hides changes on f.operations during the loop + for op in block.operations: + for b in op.blocks: + _tf_lstm_to_core_lstm_block(b) + + if op.op_type in SUPPORTED_TF_LSTM_OPS: + if _try_replace_with_core_lstm(op): + logger.info("Successfully map {} to lstm".format(op.op_type)) + else: + logger.info("Unable to map {} to lstm".format(op.op_type)) + +def _try_get_last_cell_state_in_tf_lstm_block(op: Operation) -> Var: + """ + Parameters + ---------- + op: Operation + Must have op type "tf_lstm_block" + + Returns + ------- + Var, a var representing the last cell state in the lstm. None if check fails. + + One of the outputs of the op "tf_lstm_block" is the cell state (cs) which has shape [seq_len, batch, feat]. + That is, it is the cell state tensor of the lstm, which includes all the time steps. + This, normally, can not be mapped to the MIL lstm op's cell state output, since that op only + returns the last time step of the cell state, which is a tensor of shape [batch, feat]. + However, if the cell state output of "tf_lstm_block" is being sliced, before being used anywhere else, + and sliced in such a way that it extracts just the last time step of the seq dimension, then + it can indeed be mapped to MIL's lstm op. + This utility function detects this condition. If true, it returns the var + that corresponds to the rank 2 sliced cell state. + + In particular, the following pattern is detected: + + Input pattern: + ..., cs, ... = tf_lstm_block(...) # [seq_len, batch, feat] + extracted_cell_state = slice_by_index(x=cs, ...) # [batch, feat] or [1, batch, feat], such that seq dim. is sliced at the last time step + out = op(extracted_cell_state) + + The "cs" var can be feeding into multiple "slice_by_index" ops, some of which slice it into [batch, feat] and + some into [1, batch, feat] shaped tensors. This scenario is handled in the following manner: + + step 1: verify that the output "cs" only feeds into slice_by_index ops + step 2: add a slice_by_index op to the graph, which slices the last time step and creates a + tensor, "last_cs", of shape [batch, feat] + step 3: add an expand_dims op to the graph which takes in "last_cs" and expands it to create + a tensor, "expanded_last_cs", of shape [1, batch, feat] + step 4: now, iterate over all the child ops of "cs". Each one of these will be of type "slice_by_index". + Verify that they are slicing only the last time step. If not, exit out of the function by returning None. + Once verified, replace its output var with either "last_cs" or "expanded_last_cs", depending on its shape. + step 5: remove all the child ops of "cs". Return "last_cs" + """ + if op.op_type != "tf_lstm_block": + raise ValueError("op must have type 'tf_lstm_block'. Got {}".format(op.op_type)) + + cs = op.outputs[1] + if len(cs.child_ops) == 0 and len(cs.consuming_blocks) == 0: + return cs + if len(cs.consuming_blocks) > 1: + return None + if not all([child_op.op_type == "slice_by_index" for child_op in cs.child_ops]): + return None + child_ops = cs.child_ops[:] + block = op.enclosing_block + + # extract the last time step of the cell states + last_cs = mb.slice_by_index( + x=cs, + begin=[-1, 0, 0], + end=[-1, 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + before_op=child_ops[0], + ) # this is of shape [batch, feat] + expanded_last_cs = mb.expand_dims( + x=last_cs, axes=[0], before_op=child_ops[0] + ) # shape: [1, batch, feat] + + # for each child op, which is a "slice_by_index" op, verify the following conditions: + # - input is a rank 3 tensor, of shape [seq_len, batch, feat] + # - output is either a rank 2 tensor of shape [batch, feat] or rank 3 of shape [1, batch, feat] + # - the first dimension is sliced with an index that is the last index, + # so if its positive it should be of value, seq-1, or if negative, it should be -1 + for slice_op in child_ops: + # if any of the input arguments of the slice op is not compile time known, the check fails early + for input in slice_op.inputs.values(): + if input == slice_op.x: + continue + if input is None or input.val is None: + return None + + x = slice_op.x + out = slice_op.outputs[0] + # check input rank + if x.rank != 3: + return None + # check output rank and shape + if out.rank not in (2, 3): + return None + if out.shape[-2:] != x.shape[-2:]: + return None + if out.rank == 3 and out.shape[0] != 1: + return None + + # check that only the last time step is being extracted + begin = slice_op.begin.val.tolist() + end = slice_op.end.val.tolist() + stride = slice_op.stride.val.tolist() + begin_mask = slice_op.begin_mask.val.tolist() + end_mask = slice_op.end_mask.val.tolist() + squeeze_mask = slice_op.squeeze_mask.val.tolist() + + # the stride for the first dimension must be 1 + if stride[0] != 1: + return None + + # check if the first dimension is sliced exactly for the last time step + if is_symbolic(x.shape[0]): + """ + When the first dimension is symbolic, we check for the following condition to be true: + - begin[0] == -1 and begin_mask[0] == False + If this condition is not met, we return None and exit + """ + if begin[0] != -1 or begin_mask[0]: + return None + else: + time = x.shape[0] + begin = [i + time if i < 0 else i for i in begin] + end = [i + time if i < 0 else i for i in end] + + begin_time = 0 if begin_mask[0] else begin[0] + end_time = time if end_mask[0] else end[0] + if squeeze_mask[0]: + if begin_time != time - 1: + return None + else: + if end_time - begin_time != 1: + return None + if begin_time != time - 1: + return None + + block.replace_uses_of_var_after_op( + anchor_op=slice_op, + old_var=slice_op.outputs[0], + new_var=last_cs if len(out.shape) == 2 else expanded_last_cs, + ) + + block.remove_ops(child_ops) + return last_cs + + +def _try_replace_with_core_lstm(op: Operation) -> bool: + """ + Inputs: + + op (Operation): op.op_type must be 'tf_lstm_block_cell' or `tf_lstm_block` + + Returns: + + True if op can be represented by mb.lstm op in SSA. False otherwise + """ + def _check_unsupported_outputs(unsupported_outputs): + for ov in unsupported_outputs: + if len(ov.child_ops) > 0 or len(ov.consuming_blocks) > 0: + return False + return True + + # Check for unsupported configuration : When peephole is present + if op.use_peephole.val: + return False + + # Check if the tf lstm op can be replaced with coreml lstm op + # We check the following two conditions + # (1) The outputs must not be (i, f, o, ci, co), since there is no corresponding outputs with the LSTM in Core ML + # (2) For the tf_lstm_block op, only the last time step of cell state can be used + # Here is an example of valid supported configuration: + # _, cell_states, _, _, _, _, _, _ = tf_lstm_block.outputs + # output = cell_states[-1, 1:2, :] + # And here is an example that coreml cannot handle currently: + # _, cell_states, _, _, _, _, _, _ = tf_lstm_block.outputs + # output = cell_states[:2, :, :] + i, cs, f, o, ci, co, h = op.outputs + unsupported_outputs = [i, f, o, ci, co] + if not _check_unsupported_outputs(unsupported_outputs): + return False + + if op.op_type == "tf_lstm_block": + cs = _try_get_last_cell_state_in_tf_lstm_block(op) + if cs is None: + return False + + # op is compatible with lstm + + mb_peep = None + if op.use_peephole.val: + mb_peep = np.stack( + [op.weight_peep_i.val, op.weight_peep_f.val, op.weight_peep_o.val] + ) + + # Set weights. The layout of the weight in TF1 is icfo (input, cell, forget, output gate). + # Need to convert to ifoc for coreml + tf_w = op.weight.val # [input_dim+hidden_dim, 4*hidden_dim] in icfo layout + tf_w_i, tf_w_c, tf_w_f, tf_w_o = np.split(tf_w, 4, axis=1) + w = np.concatenate([tf_w_i, tf_w_f, tf_w_o, tf_w_c], axis=1) + w = np.transpose(w, [1, 0]) + hidden_dim = w.shape[0] // 4 + input_dim = w.shape[1] - hidden_dim + # Split input and hidden weights + w_ih, w_hh = np.split(w, [input_dim], axis=1) + + # Bias is icfo. Convert to ssa LSTM's ifoc layout + tf_b = op.bias.val + tf_b_i, tf_b_c, tf_b_f, tf_b_o = np.split(tf_b, 4, axis=0) + tf_b_f += op.forget_bias.val # add forget bias to bias + bias = np.concatenate([tf_b_i, tf_b_f, tf_b_o, tf_b_c], axis=0) + + cell_clip = None if op.cell_clip is None else op.cell_clip.val + + output_sequence = op.op_type == "tf_lstm_block" + + block = op.enclosing_block + # x: [seq_len, batch, input_dim] + if op.op_type == "tf_lstm_block_cell": + x = mb.expand_dims(x=op.x, axes=[0], before_op=op) + elif op.op_type == "tf_lstm_block": + x = op.x + else: + raise ValueError("tf lstm op {} not supported. Only {} supported".format(op.op_type, SUPPORTED_TF_LSTM_OPS)) + + new_h_all, new_h, new_cs = mb.lstm( + x=x, + initial_c=op.c_prev, + initial_h=op.h_prev, + weight_ih=w_ih, + weight_hh=w_hh, + bias=bias, + recurrent_activation="sigmoid", + cell_activation="tanh", + activation="tanh", + peephole=mb_peep, + clip=cell_clip, + output_sequence=output_sequence, + name=op.name, + before_op=op, + ) + + ops_to_remove = [op] + block.replace_uses_of_var_after_op(anchor_op=op, old_var=cs, new_var=new_cs) + if op.op_type == "tf_lstm_block_cell": + block.replace_uses_of_var_after_op(anchor_op=op, old_var=h, new_var=new_h) + elif op.op_type == "tf_lstm_block": + block.replace_uses_of_var_after_op(anchor_op=op, old_var=h, new_var=new_h_all) + if cs.op != op: + ops_to_remove.append(cs.op) + else: + raise ValueError("tf lstm op {} not supported. Only {} supported".format(op.op_type, SUPPORTED_TF_LSTM_OPS)) + + block.remove_ops(ops_to_remove) + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py new file mode 100644 index 00000000..6d813d6f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_composite_ops.py @@ -0,0 +1,69 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, make_tf_graph) +# Importing _TF_OPS_REGISTRY to ensure `overriding` existing TF op does not break +# testing of default op +# pytest imports all the tests and hence overriding op invokes custom op which is not expected +# In real usecase, importing following is not recommended!! +from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import ( + _TF_OPS_REGISTRY, register_tf_op) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen + +tf = pytest.importorskip("tensorflow") + +class TestCompositeOp(TensorFlowBaseTest): + @pytest.fixture(scope="class") + def create_custom_selu(self): + default_selu = _TF_OPS_REGISTRY.get("Selu", None) + + @register_tf_op(tf_alias=[], override=True) + def Selu(context, node): + x = context[node.inputs[0]] + alpha = 1.6732631921768188 + lamda = 1.0507010221481323 + out_elu = mb.elu(x=x, alpha=alpha) + out = mb.mul(x=out_elu, y=lamda, name=node.name) + context.add(node.name, out) + + yield + + _TF_OPS_REGISTRY["Selu"] = default_selu + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + list(range(1, 5)) + ), + ) + @pytest.mark.usefixtures("create_custom_selu") + def test_selu(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=6, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.keras.activations.selu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py new file mode 100644 index 00000000..6edcec0e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_custom_ops.py @@ -0,0 +1,288 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, make_tf_graph) +# Importing _TF_OPS_REGISTRY to ensure `overriding` existing TF op does not break +# testing of default op +# pytest imports all the tests and hence overriding op invokes custom op which is not expected +# In real usecase, importing following is not recommended!! +from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import ( + _TF_OPS_REGISTRY, register_tf_op) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen + +tf = pytest.importorskip("tensorflow") + + +class TestCustomMatMul: + # Define SSA Custom Op for Sparse MatMul + # This will map to `custom_op` in SSA with binding information + # to bind input spec to the custom implementation + @register_op(is_custom_op=True) + class custom_sparse_matmul(Operation): + # Defining input spec for current op + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + transpose_x=TensorInputType(const=True, optional=True, type_domain=types.bool), + transpose_y=TensorInputType(const=True, optional=True, type_domain=types.bool), + x_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + y_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + # Specifying binding for custom op for specifying inputs, + # parameters required for creating custom op to be synced with Swift API + bindings = { + "class_name": "SparseMatMul", + "input_order": ["x", "y"], + "parameters": ["transpose_x", "transpose_y", "x_is_sparse", "y_is_sparse"], + "description": "Custom Sparse MatMul Layer", + } + + def default_inputs(self): + return DefaultInputs( + transpose_x=False, + transpose_y=False, + x_is_sparse=False, + y_is_sparse=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + y_shape = self.y.shape + # For illustration purpose, assumming getting valid shape + # Ideally, should consider transpose_?, ?_is_sparse parameters into consideration + # for computing output shape + return types.tensor(x_type, [x_shape[0], y_shape[1]]) + + # TensorFlow Sparse Matmul Op + @register_tf_op + def SparseMatMul(context, node): + a = context[node.inputs[0]] + b = context[node.inputs[1]] + transpose_a = node.attr.get("transpose_a", False) + transpose_b = node.attr.get("transpose_b", False) + a_is_sparse = node.attr.get("a_is_sparse", False) + b_is_sparse = node.attr.get("b_is_sparse", False) + + x = mb.custom_sparse_matmul( + x=a, + y=b, + transpose_x=transpose_a, + transpose_y=transpose_b, + x_is_sparse=a_is_sparse, + y_is_sparse=b_is_sparse, + name=node.name, + ) + context.add(node.name, x) + + + @pytest.mark.parametrize( + "compute_unit, backend, transpose_a, transpose_b," "a_is_sparse, b_is_sparse, b_is_const", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [True, False], + [True, False], + ), + ) + def test_tf( + self, compute_unit, backend, transpose_a, transpose_b, a_is_sparse, b_is_sparse, b_is_const, + ): + if backend[0] == 'mlprogram': + pytest.skip("Custom layer not supported with ML Program backend") + + rank = 2 + input_shape = list(np.random.randint(low=3, high=100, size=1)) * rank + if b_is_const: + @make_tf_graph([input_shape]) + def build_model(x): + ref = tf.compat.v1.sparse_matmul( + x, + random_gen(input_shape), + transpose_a=transpose_a, + transpose_b=transpose_b, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, + ) + return ref + input_values = [random_gen(input_shape, -1.0, 1.0)] + else: + @make_tf_graph([input_shape, input_shape]) + def build_model(x, y): + ref = tf.compat.v1.sparse_matmul( + x, + y, + transpose_a=transpose_a, + transpose_b=transpose_b, + a_is_sparse=a_is_sparse, + b_is_sparse=b_is_sparse, + ) + return ref + input_values = [random_gen(input_shape, -1.0, 1.0), random_gen(input_shape, -1.0, 1.0)] + + model, inputs, outputs = build_model + input_dict = dict(zip(inputs, input_values)) + spec, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + frontend_only=True, + backend=backend, + ) + + layers = spec.neuralNetwork.layers + assert layers[-1].custom is not None, "Expecting a custom layer" + assert ( + "SparseMatMul" == layers[-1].custom.className + ), "Custom Layer class name mis-match" + assert ( + transpose_a == layers[-1].custom.parameters["transpose_x"].boolValue + ), "Incorrect parameter value k" + assert ( + transpose_b == layers[-1].custom.parameters["transpose_y"].boolValue + ), "Incorrect parameter value k" + assert ( + a_is_sparse == layers[-1].custom.parameters["x_is_sparse"].boolValue + ), "Incorrect parameter value k" + assert ( + b_is_sparse == layers[-1].custom.parameters["y_is_sparse"].boolValue + ), "Incorrect parameter value k" + + assert len(layers) == 2 if b_is_const else len(layers) == 1 + + +class TestCustomTopK: + @pytest.fixture(scope="class") + def create_custom_TopK(self): + # Defining SSA TopK Op + @register_op(is_custom_op=True) + class custom_topk(Operation): + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + k=TensorInputType(const=True, optional=True, type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + sorted=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + bindings = { + "class_name": "TopK", + "input_order": ["x"], + "parameters": ["k", "axis", "sorted"], + "description": "Top K Custom layer", + } + + def default_inputs(self): + return DefaultInputs( + k=1, + axis=-1, + sorted=False, + ) + + def __init__(self, **kwargs): + super(custom_topk, self).__init__(**kwargs) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + k = self.k.val + axis = self.axis.val + + if not is_symbolic(x_shape[axis]) and k > x_shape[axis]: + msg = "K={} is greater than size of the given axis={}" + raise ValueError(msg.format(k, axis)) + + ret_shape = list(x_shape) + ret_shape[axis] = k + return types.tensor(x_type, ret_shape), types.tensor(types.int32, ret_shape) + + # Following logging is to ensure testing of TopK implemented in tf converter + # default path is testing with appropriate conversion function + # Log default tf topk + default_tf_topk = _TF_OPS_REGISTRY.get("TopKV2", None) + + # Override TopK op with override=True flag + @register_tf_op(tf_alias=["TopKV2"], override=True) + def CustomTopK(context, node): + x = context[node.inputs[0]] + k = context[node.inputs[1]] + sorted = node.attr.get("sorted", False) + x = mb.custom_topk(x=x, k=k.val, axis=-1, sorted=sorted, name=node.name) + context.add(node.name, x) + + yield + + _TF_OPS_REGISTRY["TopKV2"] = default_tf_topk + + @pytest.mark.parametrize( + "compute_unit, backend, rank, k", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 4)], + [1, 2], + ), + ) + @pytest.mark.usefixtures("create_custom_TopK") + def test_tf(self, compute_unit, backend, rank, k): + if backend[0] == 'mlprogram': + pytest.skip("Custom layer not supported with ML Program backend") + + input_shape = np.random.randint(low=3, high=6, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + ref = tf.math.top_k(x, k=k, sorted=True) + return ref[1], ref[0] + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1.0, 1.0)] + input_dict = dict(zip(inputs, input_values)) + spec, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + frontend_only=True, + backend=backend, + ) + + layers = spec.neuralNetwork.layers + assert layers[-1].custom is not None, "Expecting a custom layer" + assert ( + "TopK" == layers[-1].custom.className + ), "Custom Layer class name mis-match" + assert ( + k == layers[-1].custom.parameters["k"].intValue + ), "Incorrect parameter value k" + assert ( + layers[-1].custom.parameters["sorted"].boolValue is True + ), "Incorrect parameter value for Sorted" diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py new file mode 100644 index 00000000..381c2d62 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_graphs.py @@ -0,0 +1,45 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, make_tf_graph) +from coremltools.converters.mil.testing_reqs import backends, compute_units + +tf = pytest.importorskip("tensorflow") + + +# TODO (rdar://103050703): Move it to test_ops because it only test tf ops instead of graphs. +class TestTFGraphs(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_masked_input(self, compute_unit, backend): + + input_shape = [4, 10, 8] + val = np.random.rand(*input_shape).astype(np.float32) + + @make_tf_graph([input_shape]) + def build_model(input): + sliced_input = input[..., 4] + mask = tf.where(sliced_input > 0) + masked_input = tf.gather_nd(input, mask) + return masked_input + + model, inputs, outputs = build_model + + input_values = [val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_load.py new file mode 100644 index 00000000..175aaac0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_load.py @@ -0,0 +1,435 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile + +import numpy as np +import pytest + +import coremltools as ct +import coremltools.converters as converter +import coremltools.proto.FeatureTypes_pb2 as ft +from coremltools import EnumeratedShapes, ImageType, RangeDim, TensorType +from coremltools._deps import _HAS_TF_1, _IS_MACOS, MSG_TF1_NOT_FOUND +from coremltools.converters.mil.frontend.tensorflow.converter import \ + TFConverter +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, get_tf_keras_io_names, make_tf_graph) +from coremltools.converters.mil.testing_utils import random_gen + +tf = pytest.importorskip("tensorflow") +frontend = "tensorflow" + +class TestTfModelInputsOutputs(TensorFlowBaseTest): + def setup(self): + self.saved_model_dir = tempfile.mkdtemp() + _, self.model_path_h5 = tempfile.mkstemp( + suffix=".h5", prefix=self.saved_model_dir + ) + _, self.model_path_pb = tempfile.mkstemp( + suffix=".pb", prefix=self.saved_model_dir + ) + + def teardown(self): + if os.path.exists(self.saved_model_dir): + shutil.rmtree(self.saved_model_dir) + + def test_infer_inputs(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + if not isinstance(outputs, (tuple, list)): + outputs = [outputs] + + output_names = [ + j if isinstance(j, str) else j.op.name for j in outputs + ] + mlmodel = converter.convert(model, outputs=output_names) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_infer_outputs(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_name = ( + inputs[0] if isinstance(inputs[0], str) else inputs[0].op.name + ) + mlmodel = converter.convert(model, inputs=[TensorType(input_name, (3, 4, 5))]) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_infer_inputs_and_outputs(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + mlmodel = converter.convert(model) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_extract_sub_model(self): + x_shape = (3, 4, 5) + y_shape = (3, 4, 5) + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.nn.relu(x), tf.math.add(x, y) + + model, inputs, outputs = build_model + if isinstance(outputs[0], str): + first_output_name = outputs[0] + else: + first_output_name = outputs[0].name.split(":")[0] + mlmodel = converter.convert(model, outputs=[first_output_name]) + assert mlmodel is not None + + def test_auto_image_nhwc_input_names(self): + x_shape = (4, 5, 3) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + + mlmodel = converter.convert(model, inputs=[ImageType()]) + assert mlmodel is not None + + def test_auto_image_nchw_input_names(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + + mlmodel = converter.convert(model, inputs=[ImageType(channel_first=True)]) + assert mlmodel is not None + + @pytest.mark.parametrize( + "target", + [ct.target.iOS13, ct.target.macOS15, ct.target.watchOS6, ct.target.tvOS13], + ) + def test_invalid_deployment_target_cumsum(self, target): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.math.cumsum(x, axis=-1, reverse=False, exclusive=False) + + model, inputs, outputs = build_model + + with pytest.raises(ValueError) as e: + converter.convert(model, minimum_deployment_target=target) + e.match( + r"Provided minimum deployment target requires model to be of version 4 but converted model " + r"uses following features which are available from version 5 onwards. " + r"Please use a higher minimum deployment target to convert. \n 1. Cumsum operation\n" + ) + + @pytest.mark.parametrize( + "target", + [ct.target.iOS14, ct.target.macOS16, ct.target.watchOS7, ct.target.tvOS14], + ) + def test_valid_deployment_target_cumsum(self, target): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.math.cumsum(x, axis=-1, reverse=False, exclusive=False) + + model, inputs, outputs = build_model + + # successful conversion + converter.convert(model, minimum_deployment_target=target) + + def test_invalid_output_names(self): + x_shape = (3, 4, 5) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + with pytest.raises(AssertionError) as e: + converter.convert(model, source=frontend, outputs=["invalid_name"]) + e.match(r".* is not in graph") + + def test_missing_placeholder_shape(self): + x_shape = None # Missing Placeholder shape + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + with pytest.raises(ValueError) as e: + converter.convert(model, source=frontend) + e.match(r"Unable to determine the shape of input .*") + + mlmodel = converter.convert(model, source=frontend, + inputs=[ct.TensorType(shape=(1,))]) + assert mlmodel is not None + + @pytest.mark.skip(reason="Rank-0 input is not supported") + def test_scalar_placeholder_shape(self): + x_shape = () # Scalar Placeholder Shape + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + mlmodel = converter.convert(model, source=frontend) + assert mlmodel is not None + + input_values = [random_gen(x_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf(model, input_dict, outputs) + + def test_shaping_utils(self): + @make_tf_graph([(None, 4, 5)]) + def build_flexible_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_flexible_model + input_name = TFConverter._get_tensor_name(inputs[0]) + output_name = TFConverter._get_tensor_name(outputs[0]) + + # static-Flexible shape + mlmodel = converter.convert( + model, + inputs=[ + # Use TF's input shapes (None, 4, 5) + TensorType(name=input_name) + ], + outputs=[output_name] + ) + assert mlmodel is not None + input_values = [random_gen((3, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + # Enumerate shape + inputs_shape = [ + TensorType(input_name, EnumeratedShapes(shapes=[(3, 4, 5), (4, 4, 5)])) + ] + mlmodel = converter.convert(model, inputs=inputs_shape, outputs=[output_name]) + assert mlmodel is not None + input_values = [random_gen((3, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + input_values = [random_gen((4, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + if _IS_MACOS: + with pytest.raises(RuntimeError): + input_values = [random_gen((5, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + ret = mlmodel.predict(input_dict) + + # Ranged shape + inputs_shape = [TensorType(input_name, [RangeDim(3, 5), 4, 5])] + mlmodel = converter.convert(model, inputs=inputs_shape, outputs=[output_name]) + assert mlmodel is not None + input_values = [random_gen((3, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + input_values = [random_gen((4, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + if _IS_MACOS: + ret = mlmodel.predict(input_dict) + np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) + + if _IS_MACOS: + with pytest.raises(RuntimeError): + input_values = [random_gen((2, 4, 5), -10.0, 10.0)] + input_dict = {input_name: input_values[0]} + ret = mlmodel.predict(input_dict) + + def test_default_data_types(self): + @make_tf_graph([(2, 2)]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + mlmodel = converter.convert(model) + assert mlmodel is not None + spec = mlmodel.get_spec() + + # Defaults should be FLOAT32 instead of DOUBLE + it = spec.description.input[0].type.multiArrayType.dataType + assert it == ft.ArrayFeatureType.ArrayDataType.Value("FLOAT32") + ot = spec.description.output[0].type.multiArrayType.dataType + assert ot == ft.ArrayFeatureType.ArrayDataType.Value("FLOAT32") + + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestTf1ModelFormats: + def setup(self): + self.saved_model_dir = tempfile.mkdtemp() + _, self.model_path_h5 = tempfile.mkstemp( + suffix=".h5", prefix=self.saved_model_dir + ) + _, self.model_path_pb = tempfile.mkstemp( + suffix=".pb", prefix=self.saved_model_dir + ) + + def teardown(self): + if os.path.exists(self.saved_model_dir): + shutil.rmtree(self.saved_model_dir) + + def test_graph_def(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + out = tf.nn.relu(x) + mlmodel = converter.convert( + graph, inputs=[TensorType(x.op.name, (3, 4, 5))], outputs=[out.op.name] + ) + assert mlmodel is not None + + def test_graph_def_file(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + out = tf.nn.relu(x) + tf.io.write_graph( + graph, self.saved_model_dir, self.model_path_pb, as_text=False + ) + mlmodel = converter.convert( + self.model_path_pb, + inputs=[TensorType(x.op.name, (3, 4, 5))], + outputs=[out.op.name], + ) + assert mlmodel is not None + + def test_saved_model_from_simple_save(self): + with tf.compat.v1.Session() as sess: + x = tf.placeholder(shape=(1, 3, 5), dtype=tf.float32) + y = tf.nn.relu(x) + inputs = {"x": x} + outputs = {"y": y} + tf.compat.v1.saved_model.simple_save( + sess, self.saved_model_dir, inputs, outputs + ) + mlmodel = converter.convert(self.saved_model_dir) + assert mlmodel is not None + + def test_tf_keras(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + keras_model, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_tf_keras_hdf5_file(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + keras_model.save(self.model_path_h5) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + self.model_path_h5, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_model_metadata(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + out = tf.nn.relu(x) + mlmodel = converter.convert( + graph, inputs=[TensorType(x.op.name, (3, 4, 5))], outputs=[out.op.name] + ) + metadata_keys = mlmodel.get_spec().description.metadata.userDefined + assert "com.github.apple.coremltools.version" in metadata_keys + assert "com.github.apple.coremltools.source" in metadata_keys + assert "tensorflow==1." in metadata_keys["com.github.apple.coremltools.source"] + + def test_invalid_format_none(self): + with pytest.raises(NotImplementedError) as e: + converter.convert(None, source="tensorflow") + e.match(r"Expected model format: .* .pb") + + def test_invalid_format_invalid_extension(self): + _, invalid_filename = tempfile.mkstemp( + suffix=".invalid", prefix=self.saved_model_dir + ) + with pytest.raises(NotImplementedError) as e: + converter.convert(invalid_filename, source="tensorflow") + e.match(r"Expected model format: .* .pb") + + def test_invalid_converter_source(self): + with pytest.raises(ValueError) as e: + converter.convert(None, source="invalid") + expected_msg = r'Unrecognized value of argument "source": .*' + e.match(expected_msg) + + def test_invalid_converter_minimum_deployment_flag(self): + with pytest.raises(TypeError) as e: + converter.convert( + None, source="tensorflow", minimum_deployment_target="iOs14" + ) + expected_msg = ( + "Unrecognized value of argument 'minimum_deployment_target': iOs14. " + "It needs to be a member of 'coremltools.target' enumeration" + ) + + e.match(expected_msg) + + def test_invalid_converter_target(self): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(3, 4, 5)) + with pytest.raises(NotImplementedError) as e: + converter.convert(graph, convert_to="invalid", source="tensorflow") + e.match(r"Backend converter .* not implemented") + + def test_invalid_format_non_exist(self): + non_exist_filename = self.model_path_pb.replace(".pb", "_non_exist.pb") + with pytest.raises(ValueError) as e: + converter.convert(non_exist_filename, source="tensorflow") + e.match(r"Input model .* does not exist") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_ops.py new file mode 100644 index 00000000..1cdcfbdd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_ops.py @@ -0,0 +1,7370 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import math +import os +import shutil +import tempfile +from distutils.version import StrictVersion + +import numpy as np +import pytest + +import coremltools as ct +from coremltools import RangeDim, TensorType +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, MSG_TF1_NOT_FOUND, _get_version +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, + freeze_g, + layer_counts, + load_tf_pb, + make_tf_graph, +) +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ( + einsum_equations, + gen_input_shapes_einsum, + random_gen, +) +from coremltools.models.utils import _is_macos, _macos_version + +tf = pytest.importorskip("tensorflow") + +PREBUILT_TF1_WHEEL_VERSION = "1.15.5" + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestContribResampler(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, data_warp_shapes", + itertools.product( + compute_units, + backends, + [ + # Data shape format: (Batch, Hin, Win, C) + # Warp shape format: (Batch, Hout, Wout, 2) + [(1, 3, 3, 1), (1, 3, 3, 2)], # no size change + [(2, 5, 5, 3), (2, 3, 3, 2)], # down-sampling + [(3, 6, 6, 1), (3, 8, 8, 2)], # up-sampling + [(1, 3, 9, 1), (1, 19, 2)], # rank-3 warp tensor + ], + ), + ) + def test( + self, compute_unit, backend, data_warp_shapes, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + data_shape, warp_shape = data_warp_shapes + + @make_tf_graph([data_shape, warp_shape]) + def build_model(x, warp): + return tf.contrib.resampler.resampler(data=x, warp=warp) + + model, inputs, outputs = build_model + # warp exceeding input sizes in order to test more padding modes + input_values = [ + random_gen(data_shape, -100, 100), + random_gen(warp_shape, -15, 15), + ] + input_dict = dict(zip(inputs, input_values)) + self.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestDebugging(TensorFlowBaseTest): + """ + TF converter does not handling debugging nodes, they are + expected to be deleted by graph pass before op conversions + in Grappler graph pass: debug_stripper. + """ + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_assert(self, compute_unit, backend): + input_shape = (1,) + + @make_tf_graph([input_shape]) + def build_model(x): + tf.debugging.Assert(True, [x]) + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, 0, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_check_numerics(self, compute_unit, backend): + input_shape = (1,) + + @make_tf_graph([input_shape]) + def build_model(x): + tf.debugging.check_numerics(x, 'check') + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, 0, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_print(self, compute_unit, backend): + input_shape = (1,) + + @make_tf_graph([input_shape]) + def build_model(x): + tf.raw_ops.Print(input=x, data=[x], message='[x]') + return tf.nn.relu(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, 0, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPlaceholderAsOutput(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(6)] + ), + ) + def test(self, compute_unit, backend, rank): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape, input_shape]) + def build_model(x, y): + return x, y, x + 1, x + y + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1), random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestDuplicateOutputs(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(6)] + ), + ) + def test(self, compute_unit, backend, rank): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + b = tf.identity(x) + c = tf.identity(x) + d = b + c + return b, c, d + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1), random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestIdentity(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(6)] + ), + ) + def test(self, compute_unit, backend, rank): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return x + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationElu(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.elu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestAddN(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, num_inputs", + itertools.product( + compute_units, + backends, + list(range(6)), + [1, 3, 9], + ), + ) + def test(self, compute_unit, backend, rank, num_inputs): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=1, high=4, size=rank) + input_shapes = [input_shape[:] for _ in range(num_inputs)] + + @make_tf_graph(input_shapes) + def build_model(*inputs): + return tf.raw_ops.AddN(inputs=inputs) + + model, inputs, outputs = build_model + input_values = [random_gen(shape, -1, 1) for shape in input_shapes] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestAddOrdering(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test(self, compute_unit, backend): + @make_tf_graph([(2, 3, 4), (2, 3, 4)]) + def build_model(x, y): + return tf.math.add(x, y) + + model, inputs, outputs = build_model + input_values = [random_gen((2, 3, 4), -1, 1)] * 2 + input_dict = dict(zip(inputs, input_values)) + + spec, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] == "neuralnetwork": + nn_spec = spec.neuralNetwork + if _HAS_TF_1: + input_names = ["Placeholder", "Placeholder_1"] + elif _HAS_TF_2: + input_names = ["args_0", "args_1"] + + assert nn_spec.layers[0].input[0] == input_names[0] + assert nn_spec.layers[0].input[1] == input_names[1] + + +class TestActivationLeakyReLU(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.leaky_relu(x, 0.2) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationReLU(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.relu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -10.0, 10)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationReLU6(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.relu6(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGelu(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 3)], + ("tanh_approx", "exact_1", "exact_2", "exact_3") + ), + ) + def test(self, compute_unit, backend, rank, mode): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model_tanh_approx(x): + a = 0.5 * ( + 1.0 + tf.tanh((math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))) + ) + return a * x + + @make_tf_graph([input_shape]) + def build_model_exact_1(x): + return x * (0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))) + + @make_tf_graph([input_shape]) + def build_model_exact_2(x): + return 0.5 * (x * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))) + + @make_tf_graph([input_shape]) + def build_model_exact_3(x): + return (x * 0.5) * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0))) + + if mode == "tanh_approx": + build_model = build_model_tanh_approx + elif mode == "exact_1": + build_model = build_model_exact_1 + elif mode == "exact_2": + build_model = build_model_exact_2 + elif mode == "exact_3": + build_model = build_model_exact_3 + else: + raise ValueError("Unexpected mode for Gelu layer") + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -5, 5)] + input_dict = dict(zip(inputs, input_values)) + spec, mlmodel, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + assert TestGelu._op_count_in_mil_program(mlmodel, "gelu") == 1 + assert TestGelu._op_count_in_mil_program(mlmodel, "erf") == 0 + assert TestGelu._op_count_in_mil_program(mlmodel, "pow") == 0 + assert TestGelu._op_count_in_mil_program(mlmodel, "tanh") == 0 + + +class TestActivationSigmoid(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.sigmoid(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSoftPlus(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.softplus(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSoftmax(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 6) for axis in range(-1, rank)], + ), + ) + def test(self, compute_unit, backend, rank_and_axes): + rank, axis = rank_and_axes + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.softmax(x, axis=axis) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSoftSign(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.softsign(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSelu(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.selu(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, -1.0, 1.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class Testlog1p(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3, 5] + ), + ) + def test(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.log1p(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, 0.0, 2.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSelect(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, broadcast, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + [True, False], + ), + ) + def test_select(self, compute_unit, backend, rank, broadcast, dynamic): + shape = np.random.randint(low=1, high=4, size=rank) + cond_shape = np.array([shape[0]]) if broadcast else shape + + cond_val = np.random.randint(low=0, high=2, size=cond_shape).astype(bool) + a_val = random_gen(shape=shape, rand_min=-1962.0, rand_max=0.0) + b_val = random_gen(shape=shape, rand_min=0.0, rand_max=1964.0) + + if dynamic: + cond_shape = [None] * len(cond_shape) + [tf.bool] + a_shape = [None] * len(shape) + [tf.float32] + b_shape = [None] * len(shape) + [tf.float32] + else: + cond_shape = cond_shape.tolist() + [tf.bool] + a_shape = shape.tolist() + [tf.float32] + b_shape = shape.tolist() + [tf.float32] + + @make_tf_graph([cond_shape, a_shape, b_shape]) + def build_model_select(cond, a, b): + return tf.raw_ops.Select(condition=cond, x=a, y=b) + + model, inputs, outputs = build_model_select + inputs_dic = dict(zip(inputs, [cond_val, a_val, b_val])) + TensorFlowBaseTest.run_compare_tf( + model, + inputs_dic, + outputs, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestWhere(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test_where_1_input(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank) + cond_val = np.random.randint(low=-1, high=2, size=shape).astype(np.float32) + + @make_tf_graph([shape]) + def build_model(condition): + return tf.where(condition=condition) + + model, inputs, outputs = build_model + inputs_dic = dict(zip(inputs, [cond_val])) + TensorFlowBaseTest.run_compare_tf( + model, + inputs_dic, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)] + ), + ) + def test_where(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank) + cond_val = np.random.randint(low=0, high=2, size=shape).astype(bool) + x_val = random_gen(shape=shape, rand_min=-1962.0, rand_max=0.0) + y_val = random_gen(shape=shape, rand_min=0.0, rand_max=1964.0) + + @make_tf_graph([[*shape, tf.bool], shape, shape]) + def build_model(condition, x, y): + return tf.where(condition=condition, x=x, y=y) + + model, inputs, outputs = build_model + inputs_dic = dict(zip(inputs, [cond_val, x_val, y_val])) + TensorFlowBaseTest.run_compare_tf( + model, + inputs_dic, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCast(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dtype", + itertools.product( + compute_units, + backends, + list(range(1, 6)), + ['int32', 'float64'] + ), + ) + def test(self, compute_unit, backend, rank, dtype): + shape = np.random.randint(low=1, high=3, size=rank) + + if backend[0] == "mlprogram" and dtype == "int32": + pytest.xfail("rdar://78630549") + + @make_tf_graph([shape]) + def build_model(x): + y = tf.cast(x, dtype=dtype) + y = tf.square(y) + return y + + model, inputs, outputs = build_model + min_range, max_range = -100, 100 + input_values = [random_gen(shape, min_range, max_range)] + + # When using GPU with neuralnetwork backend, that uses FP16 precision, we make sure that + # the input is not too close to its ceiling / floor, + # for instance, 24.993 or -13.985 will not be allowed. + if compute_unit != ct.ComputeUnit.CPU_ONLY and dtype == "int32": + TOR_THRESHOLD = 0.03 + value = input_values[0].flatten() + for i, v in enumerate(value): + while abs(math.ceil(v) - v) < TOR_THRESHOLD or abs(math.floor(v) - v) < TOR_THRESHOLD: + v = random_gen((1,), min_range, max_range)[0] + value[i] = v + value = np.reshape(value, shape) + input_values = [value] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestCond(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_naive(self, compute_unit, backend): + if (backend[0] == "mlprogram" and backend[1] == "fp16"): + pytest.xfail("rdar://96627246 (ConsTest unittest is failing)") + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + return tf.cond(tf.constant(True), lambda: x + y, lambda: x * y) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([6], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + return tf.cond(pred, lambda: tf.add(x, z), lambda: tf.square(y)) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_multi_returns(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + + def true_fn(): + return tf.add(x, z), tf.math.multiply(x, z) + + def false_fn(): + return tf.square(y), tf.sqrt(z) + + return tf.cond(pred, true_fn, false_fn) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_with_identity(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + return tf.cond(pred, lambda: z, lambda: tf.square(y)) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_multi_returns_with_identity(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + pred = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + + def true_fn(): + return tf.add(x, z), x + + def false_fn(): + return tf.square(y), z + + return tf.cond(pred, true_fn, false_fn) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_nested_0(self, compute_unit, backend): + if backend == ("mlprogram", "fp16"): + pytest.xfail("rdar://80660074 (Cond mlprogram FP16 tests falling in TF1 converter with numerical errors)") + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + t = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + f = tf.less(tf.math.reduce_mean(z), tf.math.reduce_mean(y)) + inner_cond = tf.cond( + f, lambda: tf.pow(x, y), lambda: tf.math.subtract(x, y) + ) + return tf.cond(t, lambda: inner_cond, lambda: tf.square(y)) + + model, inputs, outputs = build_model + + input_values = [ + np.array([2], dtype=np.float32), + np.array([3], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_cond_nested_1(self, compute_unit, backend): + if backend == ("mlprogram", "fp16"): + pytest.xfail("rdar://80660074 (Cond mlprogram FP16 tests falling in TF1 converter with numerical errors)") + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + z = tf.multiply(x, y) + t = tf.less(tf.math.reduce_mean(x), tf.math.reduce_mean(y)) + f = tf.less(tf.math.reduce_mean(z), tf.math.reduce_mean(y)) + cond_1 = tf.cond(f, lambda: tf.pow(x, y), lambda: tf.math.subtract(x, y)) + cond_2 = tf.cond(t, lambda: tf.multiply(x, y), lambda: tf.math.mod(x, y)) + cond_3 = tf.cond(f, lambda: tf.math.divide(x, y), lambda: cond_2) + return tf.cond(t, lambda: cond_1, lambda: cond_3) + + model, inputs, outputs = build_model + + input_values = [ + np.array([2], dtype=np.float32), + np.array([3], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestWhileLoop(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_with_changing_shape(self, compute_unit, backend): + @make_tf_graph([(2, 1), (2, 1)]) + def build_model(x, y): + c = lambda i, j: tf.less(tf.shape(j)[1], 5) + b = lambda i, j: (i, tf.concat([i, j], axis=1)) + return tf.while_loop(c, b, [x, y], shape_invariants=[x.get_shape(), tf.TensorShape([2, None])]) + + model, inputs, outputs = build_model + input_values = [np.array([[1], [2]], dtype=np.float32), np.array([[1], [2]], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_no_entry(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + c = lambda i: tf.greater(tf.math.reduce_mean(i), 5) + b = lambda i: i - 1 + return tf.while_loop(c, b, [x]) + + model, inputs, outputs = build_model + input_values = [np.array([5], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_0(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + c = lambda i: tf.greater(tf.math.reduce_mean(i), 5) + b = lambda i: i - 1 + return tf.while_loop(c, b, [x]) + + model, inputs, outputs = build_model + input_values = [np.array([10], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_1(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + c = lambda i, j: tf.greater(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) + b = lambda i, j: (tf.add(i, 1), tf.square(j)) + return tf.while_loop(c, b, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_2(self, compute_unit, backend): + @make_tf_graph([(1,), (1, 2)]) + def build_model(x, y): + c = lambda i, j: tf.greater(tf.math.reduce_mean(i), 5) + b = lambda i, j: (i - 3, j * 2) + return tf.while_loop(c, b, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([10], dtype=np.float32), + np.array([[2, 3]], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_3(self, compute_unit, backend): + @make_tf_graph([(1,), (1, 2), (1,)]) + def build_model(x, y, z): + c = lambda i, j, k: tf.greater( + tf.math.reduce_mean(i), tf.math.reduce_mean(j) + ) + b = lambda i, j, k: (i / 3, j ** 2, k - 2) + return tf.while_loop(c, b, [x, y, z]) + + model, inputs, outputs = build_model + input_values = [ + np.array([10], dtype=np.float32), + np.array([[2, 3]], dtype=np.float32), + np.array([5], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_4(self, compute_unit, backend): + @make_tf_graph([(1,), (1, 2), (1,), (2, 1)]) + def build_model(x, y, z, m): + c = lambda i, j, k, l: tf.greater( + tf.math.reduce_mean(i), tf.math.reduce_mean(j) + ) + b = lambda i, j, k, l: (i / 3, j ** 2, k - 2, l % 2) + return tf.while_loop(c, b, [x, y, z, m]) + + model, inputs, outputs = build_model + input_values = [ + np.array([10], dtype=np.float32), + np.array([[2, 3]], dtype=np.float32), + np.array([5], dtype=np.float32), + np.array([[2], [3]], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.skipif(_HAS_TF_2, reason="tf.function() error in TF2") + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_nested_while_body(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + # The following while loop: + # + # i, j = 0, 10 + # while i < j: + # while 2*i < i+2: + # i += 1 + # i += 2 + + def cond2(i): + return tf.less(2 * tf.math.reduce_mean(i), tf.math.reduce_mean(i + 2)) + + def body2(i): + return i + 1 + + def cond1(i, j): + return tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j)) + + def body1(i, j): + new_i = tf.while_loop(cond2, body2, [i]) + return new_i + 2, j + + return tf.while_loop(cond1, body1, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([0], dtype=np.float32), + np.array([10], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_nested_while_cond(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + # The following while loop: + # + # def cond(i, j): + # while 2*i < i+2: + # i += 1 + # return i < j + # + # i, j = 0, 10 + # while cond(i, j): + # i += 2 + # j += 1 + + def cond2(i): + return tf.less(2 * tf.math.reduce_mean(i), tf.math.reduce_mean(i + 2)) + + def body2(i): + return i + 1 + + def cond1(i, j): + new_i = tf.while_loop(cond2, body2, [i]) + return tf.less(tf.squeeze(new_i), tf.squeeze(j)) + + def body1(i, j): + return i + 2, j + 1 + + return tf.while_loop(cond1, body1, [x, y]) + + model, inputs, outputs = build_model + input_values = [ + np.array([0], dtype=np.float32), + np.array([10], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestConv(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", # 1d or 2d conv + "padding", + "data_format", + "HWkHkW", + "strides", + "dilations", + "dynamic_weights", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d"], + ["SAME", "VALID", [[2, 3], [3, 2]]], + ["NHWC"], # NCHW not supported by TF. + [(11, 12, 3, 2), (12, 11, 2, 3)], + [(1, 1), (2, 3)], + [(1, 1), (2, 3)], + [True, False], + [1, 3], + ), + ) + def test( + self, + compute_unit, + backend, + conv_dim, + padding, + data_format, + HWkHkW, + strides, + dilations, + dynamic_weights, + batch_size, + ): + H, W, kH, kW = HWkHkW + N, C_in, C_out = batch_size, 2, 3 + if data_format == "NHWC": + input_shape = (N, W, C_in) if conv_dim == "conv1d" else (N, H, W, C_in) + if isinstance(padding, list): + padding = [[0, 0]] + padding + [[0, 0]] + if conv_dim == "conv1d": + data_format = "NWC" + if isinstance(padding, list): + # No explicit padding for conv1d in TF + return + else: # 'NCHW' + input_shape = (N, C_in, W) if conv_dim == "conv1d" else (N, C_in, H, W) + if isinstance(padding, list): + padding = [[0, 0], [0, 0]] + padding + if conv_dim == "conv1d": + data_format = "NCW" + if isinstance(padding, list): + # No explicit padding for conv1d in TF + return + W_shape = (kW, C_in, C_out) if conv_dim == "conv1d" else (kH, kW, C_in, C_out) + dilations = dilations[1] if conv_dim == "conv1d" else dilations + strides = strides[1] if conv_dim == "conv1d" else strides + + # We do not support dynamic weight when dilations != 1. + if dynamic_weights and dilations == (1, 1): + + @make_tf_graph([input_shape, W_shape]) + def build_model_dynamic_weights(x, W): + if conv_dim == "conv1d": + conv = tf.nn.conv1d( + x, + W, + stride=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + else: + conv = tf.nn.conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + return conv + + model, inputs, outputs = build_model_dynamic_weights + input_values = [ + random_gen(input_shape, -10.0, 10.0), + random_gen(W_shape, -1.0, 1.0), + ] + input_dict = dict(zip(inputs, input_values)) + + else: + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + W = tf.constant(np.random.rand(*W_shape), tf.float32) + if conv_dim == "conv1d": + conv = tf.nn.conv1d( + x, + W, + stride=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + else: + conv = tf.nn.conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + return conv + + model, inputs, outputs = build_model_static_weights + input_values = [random_gen(input_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConv3d(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "data_format", + "input_size", + "kernel_size", + "strides", + "dilations", + "padding_type", + "batch_size", + ] + ), + itertools.product( + compute_units, # compute_unit + backends, + ["NDHWC"], # NCDHW not supported by TF. + [(7, 11, 13), (32, 16, 8)], # input_size + [(1, 1, 1), (3, 3, 3), (1, 2, 3)], # kernel_size + [(1, 1, 1), (2, 2, 2), (3, 2, 1)], # strides + [ + (1, 1, 1) + ], # , (2, 2, 2), (2, 3, 1)], # dilations: dilations greater than 1 not supported on CPU + ["SAME", "VALID"], # padding_type + [1, 3], # batch_size + ), + ) + def test_tf( + self, + compute_unit, + backend, + data_format, + input_size, + kernel_size, + strides, + dilations, + padding_type, + batch_size, + ): + C_in = np.random.randint(low=1, high=4) + C_out = np.random.randint(low=1, high=(C_in + 1)) + input_shape = [batch_size] + list(input_size) + [C_in] + weights_shape = list(kernel_size) + [C_in, C_out] + # TF1 and TF2 tf.nn.conv3d require dilations and strides to have length 5 or greater, with values of 1 for + # indices 0 and 4 (batch and channel in NDHWC format) + tf_strides = [1] + list(strides) + [1] + tf_dilations = [1] + list(dilations) + [1] + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + W = tf.constant(np.random.rand(*weights_shape), tf.float32) + return tf.nn.conv3d( + x, + W, + strides=tf_strides, + padding=padding_type, + data_format=data_format, + dilations=tf_dilations, + ) + + model, inputs, outputs = build_model_static_weights + input_values = [random_gen(input_shape, -10.0, 10.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-03, # default 1e-04 + rtol=2e-03, # default 1e-05 + ) + + +class TestDepthwiseConv(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "HWkHkW", + "strides", + "dilations", + "dynamic_weights", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + ["SAME", "VALID"], + [(11, 12, 3, 2), (12, 11, 2, 3)], + # TF doesn't support non-square strides for depthwise + # https://github.com/tensorflow/tensorflow/issues/33005 + [(1, 1, 1, 1), (1, 2, 2, 1)], + [ + (1, 1), + (2, 2), + ], + [True, False], + [1, 3], + ), + ) + def test_depthwise_conv( + self, + compute_unit, + backend, + padding, + HWkHkW, + strides, + dilations, + dynamic_weights, + batch_size, + ): + if backend[0] == "mlprogram" and dilations == (1,1) and dynamic_weights and compute_unit != ct.ComputeUnit.CPU_ONLY: + # in this case, there is a numerical mismatch on the GPU MIL backend. The GPU runtime tests are + # tracked seprately. + return + + if np.sum(strides) != len(strides) and np.sum(dilations) != len(dilations): + # TF doesn't compute correct output for non-one stride+dilation + return + + H, W, kH, kW = HWkHkW + N, C_in, C_out = batch_size, 2, 6 + input_shape = (N, H, W, C_in) + data_format = "NHWC" + assert C_out % C_in == 0 + multiplier = int(C_out / C_in) + W_shape = (kH, kW, C_in, multiplier) + + def test_static_W(): + W = np.random.rand(*W_shape).astype(np.float32) + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + return tf.nn.depthwise_conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_static_weights + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + proto, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] == 'neuralnetwork': + assert layer_counts(proto, "reorganizeData") == 0 + + def test_dynamic_W(): + @make_tf_graph([input_shape, W_shape]) + def build_model_dynamic_weights(x, W): + return tf.nn.depthwise_conv2d( + x, + W, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_dynamic_weights + + input_values = [ + (np.random.rand(*input_shape).astype(np.float32)), + (np.random.rand(*W_shape).astype(np.float32)), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] == "neuralnetwork" and dynamic_weights: + pytest.skip("dynamic conv with groups > 1 is not supported on the neuralnetwork backend") + + # We do not support dynamic weight when dilations != 1. + test_dynamic_W() if dynamic_weights and dilations == (1, 1) else test_static_W() + + +class TestSeparableConv(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "HWkHkW", + "strides", + "dilations", + "dynamic_weights", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + ["SAME", "VALID"], + [(11, 12, 3, 2), (12, 11, 2, 3)], + [(1, 1, 1, 1), (1, 2, 2, 1)], + [(1, 1), (2, 2)], + [True, False], + [1, 3], + ), + ) + def test_separable_conv( + self, + compute_unit, + backend, + padding, + HWkHkW, + strides, + dilations, + dynamic_weights, + batch_size, + ): + if backend[0] == "mlprogram" and dilations == (1,1) and compute_unit != ct.ComputeUnit.CPU_ONLY: + msg = "In this case, there is a numerical mismatch on the GPU MIL backend. The GPU runtime tests are tracked seprately." + pytest.skip(msg) + + H, depthwise_filter, kH, kW = HWkHkW + N, C_in, C_out = batch_size, 2, 6 + input_shape = (N, H, depthwise_filter, C_in) + data_format = "NHWC" + assert C_out % C_in == 0 + multiplier = int(C_out / C_in) + depthwise_filter_shape = (kH, kW, C_in, multiplier) + pointwise_filter_shape = [1, 1, multiplier * C_in, C_out] + if dilations != (1, 1): + strides = (1, 1, 1, 1) + + def test_dynamic_W(): + @make_tf_graph( + [input_shape, depthwise_filter_shape, pointwise_filter_shape] + ) + def build_model_dynamic_weights(x, depthwise_filter, pointwise_filter): + return tf.nn.separable_conv2d( + x, + depthwise_filter, + pointwise_filter, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_dynamic_weights + + input_values = [ + (np.random.rand(*input_shape).astype(np.float32)), + (np.random.rand(*depthwise_filter_shape).astype(np.float32)), + (np.random.rand(*pointwise_filter_shape).astype(np.float32)), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_static_W(): + depthwise_filter = np.random.rand(*depthwise_filter_shape).astype( + np.float32 + ) + pointwise_filter = np.random.rand(*pointwise_filter_shape).astype( + np.float32 + ) + + @make_tf_graph([input_shape]) + def build_model_static_weights(x): + return tf.nn.separable_conv2d( + x, + depthwise_filter, + pointwise_filter, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model_static_weights + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + test_static_W() + if not any([True if d > 1 else False for d in dilations]): + if backend[0] == "neuralnetwork": + pytest.skip("dynamic conv with groups > 1 is not supported on the neuralnetwork backend") + test_dynamic_W() + +class TestConvTranspose(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", # 1d or 2d conv + "padding", + "data_format", + "HWkHkW", + "strides", + "dilations", + "dynamic", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d"], + ["SAME", "VALID"], + ["NHWC"], # NCHW not supported by TF + [(12, 10, 2, 2), (4, 2, 2, 3), (7, 5, 3, 3)], + [(1, 1), (1, 2)], + [(1, 1)], # Dilation > 1 not supported by TF + [True, False], + ), + ) + def test_conv_transpose( + self, + compute_unit, + backend, + conv_dim, + padding, + data_format, + HWkHkW, + strides, + dilations, + dynamic, + ): + H, W, kH, kW = HWkHkW + N, C_in, C_out = 1, 1, 2 + + if data_format == "NHWC": + input_shape = (N, W, C_in) if conv_dim == "conv1d" else (N, H, W, C_in) + if conv_dim == "conv1d": + data_format = "NWC" + else: # 'NCHW' + pass + + w_shape = (kW, C_out, C_in) if conv_dim == "conv1d" else (kH, kW, C_out, C_in) + + # dynamic input shape + tf_input_shape = list(input_shape) + if dynamic: + if data_format == "NHWC": + tf_input_shape[1] = None + tf_input_shape[2] = None + elif data_format == "NWC": + tf_input_shape[1] = None + + @make_tf_graph([tf_input_shape]) + def build_model(x): + Weight = tf.constant(np.random.rand(*w_shape), tf.float32) + + # get the dynamic height and width + if dynamic: + shape = tf.shape(x) + if data_format == "NHWC": + H, W = shape[1], shape[2] + elif data_format == "NWC": + W = shape[1] + else: + H, W = HWkHkW[:2] + + kH, kW = HWkHkW[2:] + + is_conv_2d = conv_dim == "conv2d" + + # compute the output shape, in both static / dynamic cases + if padding == "SAME": + oW = W * strides[1] + if is_conv_2d: + oH = H * strides[0] + elif padding == "VALID": + oW = (W - 1) * strides[1] + (kW - 1) * dilations[1] + 1 + if is_conv_2d: + oH = (H - 1) * strides[0] + (kH - 1) * dilations[0] + 1 + + if data_format == "NHWC": + output_shape = [N, oH, oW, C_out] + elif data_format == "NWC": + output_shape = [N, oW, C_out] + + if conv_dim == "conv1d": + return tf.nn.conv1d_transpose( + x, + Weight, + output_shape=output_shape, + strides=strides[1], + padding=padding, + dilations=dilations[1], + data_format=data_format, + ) + elif conv_dim == "conv2d": + return tf.nn.conv2d_transpose( + x, + Weight, + output_shape=output_shape, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + model, inputs, outputs = build_model + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "data_format", + "DHWkDkHkW", + "strides", + "dilations", + "dynamic", + ] + ), + itertools.product( + compute_units, + backends, + [ + "SAME", "VALID" + ], + ["NDHWC"], + [ + (10, 12, 14, 2, 3, 5), + (4, 6, 8, 2, 3, 1), + (6, 8, 10, 3, 3, 3), + (5, 7, 9, 2, 4, 2), + ], + [(1, 1, 1), (1, 2, 3)], + [(1, 1, 1)], # Dilation > 1 not supported by TF + [True, False], + ), + ) + def test_conv3d_transpose( + self, compute_unit, backend, padding, data_format, DHWkDkHkW, strides, dilations, dynamic, + ): + if _macos_version() < (12, 0) and strides == (1, 2, 3) and padding == "VALID": + # Behavior changed in macOS 12 + return + + D, H, W, kD, kH, kW = DHWkDkHkW + N, C_in, C_out = 2, 1, 2 + + if data_format == "NDHWC": + input_shape = (N, D, H, W, C_in) + else: # 'NCDHW' + pass + + tf_input_shape = list(input_shape) + if dynamic: + if data_format == "NDHWC": + tf_input_shape[1] = None + tf_input_shape[2] = None + tf_input_shape[3] = None + else: + pass + + w_shape = (kD, kH, kW, C_out, C_in) + + @make_tf_graph([tf_input_shape]) + def build_model(x): + weight = tf.constant(np.random.rand(*w_shape), tf.float32) + + # get the depth, height and width + if dynamic: + shape = tf.shape(x) + if data_format == "NDHWC": + D, H, W = shape[1], shape[2], shape[3] + else: + pass + else: + D, H, W = DHWkDkHkW[:3] + + kD, kH, kW = DHWkDkHkW[3:] + + # compute the output shape + if padding == "SAME": + oD = D * strides[0] + oH = H * strides[1] + oW = W * strides[2] + else: + oD = (D - 1) * strides[0] + (kD - 1) * dilations[0] + 1 + oH = (H - 1) * strides[1] + (kH - 1) * dilations[1] + 1 + oW = (W - 1) * strides[2] + (kW - 1) * dilations[2] + 1 + + if data_format == "NDHWC": + output_shape = [N, oD, oH, oW, C_out] + else: + pass + + return tf.nn.conv3d_transpose( + x, + weight, + output_shape=output_shape, + strides=strides, + padding=padding, + dilations=dilations, + data_format=data_format, + ) + + model, inputs, outputs = build_model + + input_values = [(np.random.rand(*input_shape).astype(np.float32))] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestElementWiseBinary(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op, broadcast_case", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4], + [ + tf.math.add, + tf.math.floordiv, + tf.math.floormod, + tf.math.maximum, + tf.math.minimum, + tf.math.mod, + tf.math.multiply, + tf.math.pow, + tf.math.truediv, + tf.math.subtract, + tf.math.squared_difference, + ], + [0, 1, 2, 3] + ), + ) + def test_binary_math(self, compute_unit, backend, rank, tf_op, + broadcast_case): + if rank == 0 or broadcast_case == 0: + pytest.skip("Rank-0 input is not supported") + + x_shape = y_shape = list(np.random.randint(low=2, high=4, size=rank)) + + # test broadcasting + # 0 -> broadcast with one of the inputs is a 0-D tensor (scalar) + # 1 -> broadcast with same rank, some of dimensions are size 1 + # 2 -> broadcast with different rank, extra dimension with size 1 + # 3 -> no broadcast, same type for both inputs + if broadcast_case == 0: + y_shape = [] + elif broadcast_case == 1: + y_shape = [1 if np.random.randint(2) == 0 else d for d in y_shape] + elif broadcast_case == 2: + y_shape = [1] + y_shape + + # randomly swap x and y + if np.random.randint(2) == 0: + x_shape, y_shape = y_shape, x_shape + + # lower precision input data for non-CPU tests + dtype = np.float32 if compute_unit == ct.ComputeUnit.CPU_ONLY else np.float16 + + if tf_op in {tf.math.add, tf.math.subtract, tf.math.multiply}: + x_val = random_gen(x_shape, -100, 100, dtype=dtype).astype(np.float32) + y_val = random_gen(y_shape, -100, 100, dtype=dtype).astype(np.float32) + elif tf_op in {tf.math.truediv, tf.math.floordiv, tf.math.floormod, tf.math.mod}: + x_val = random_gen(x_shape, -100, 100, dtype=dtype).astype(np.float32) + y_val = random_gen(y_shape, 1, 20, dtype=dtype).astype(np.float32) + elif tf_op in {tf.math.maximum, tf.math.minimum}: + x_val = random_gen(x_shape, -10, 10, dtype=dtype).astype(np.float32) + y_val = random_gen(y_shape, -10, 10, dtype=dtype).astype(np.float32) + elif tf_op in {tf.math.pow, tf.math.squared_difference}: + x_val = random_gen(x_shape, -5, 5, dtype=np.int32).astype(np.float32) + y_val = random_gen(y_shape, -5, 5, dtype=np.int32).astype(np.float32) + else: + raise NotImplementedError("input values needs to be defined") + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf_op(x, y) + + model, inputs, outputs = build_model + input_values = [x_val, y_val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op, broadcast_case", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4], + [ + tf.equal, + tf.not_equal, + tf.greater, + tf.greater_equal, + tf.less, + tf.less_equal, + ], + [0, 1, 2, 3], + ), + ) + def test_binary_compare(self, compute_unit, backend, rank, tf_op, + broadcast_case): + if rank == 0 or broadcast_case == 0: + pytest.skip("Rank-0 input is not supported") + + x_shape = y_shape = list(np.random.randint(low=2, high=4, size=rank)) + + # test broadcasting + # 0 -> broadcast with one of the inputs is a 0-D tensor (scalar) + # 1 -> broadcast with same rank, some of dimensions are size 1 + # 2 -> broadcast with different rank, extra dimension with size 1 + # 3 -> no broadcast, same type for both inputs + if broadcast_case == 0: + y_shape = [] + elif broadcast_case == 1: + y_shape = [1 if np.random.randint(2) == 0 else d for d in y_shape] + elif broadcast_case == 2: + y_shape = [1] + y_shape + + # randomly swap x and y + if np.random.randint(2) == 0: + x_shape, y_shape = y_shape, x_shape + + # lower precision input data for non-CPU tests + dtype = np.float32 if compute_unit == ct.ComputeUnit.CPU_ONLY else np.float16 + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf_op(x, y) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -5, 3, dtype=dtype).astype(np.float32), + random_gen(y_shape, -5, 3, dtype=dtype).astype(np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op, broadcast_case", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4], + [ + tf.math.logical_and, + tf.math.logical_or, + tf.math.logical_xor, + ], + [0, 1, 2, 3], + ), + ) + def test_binary_logical(self, compute_unit, backend, rank, tf_op, + broadcast_case): + if rank == 0 or broadcast_case == 0: + pytest.skip("Rank-0 input is not supported") + + x_shape = y_shape = list(np.random.randint(low=2, high=4, size=rank)) + + # test broadcasting + # 0 -> broadcast with one of the inputs is a 0-D tensor (scalar) + # 1 -> broadcast with same rank, some of dimensions are size 1 + # 2 -> broadcast with different rank, extra dimension with size 1 + # 3 -> no broadcast, same type for both inputs + if broadcast_case == 0: + y_shape = [] + elif broadcast_case == 1: + y_shape = [1 if np.random.randint(2) == 0 else d for d in y_shape] + elif broadcast_case == 2: + y_shape = [1] + y_shape + + # randomly swap x and y + if np.random.randint(2) == 0: + x_shape, y_shape = y_shape, x_shape + + @make_tf_graph([x_shape + [tf.bool], y_shape + [tf.bool]]) + def build_model(x, y): + return tf_op(x, y) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, 0, 2, dtype=np.int32).astype(bool), + random_gen(y_shape, 0, 2, dtype=np.int32).astype(bool), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCross(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [2, 3, 4], + ) + ) + def test(self, compute_unit, backend, rank): + input_shape = list(np.random.randint(low=2, high=4, size=rank)) + [3] + input_shapes = [input_shape, input_shape] + + @make_tf_graph(input_shapes) + def build_model(x, y): + return tf.linalg.cross(x, y) + + model, inputs, outputs = build_model + + input_values = [random_gen(shape, -1, 1) for shape in input_shapes] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEinsum(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, equation, reverse_input_order", + itertools.product( + compute_units, + backends, + einsum_equations, + [False, True], + ) + ) + def test(self, compute_unit, backend, equation, reverse_input_order): + input_shapes, _ = gen_input_shapes_einsum(equation, False) + if _HAS_TF_1: + if len(set(input_shapes[0])) < len(input_shapes[0]) or len(set(input_shapes[1])) < len(input_shapes[1]): + pytest.skip("tf1 does not support diagonal cases") + + if reverse_input_order: + input_output_strings = equation.split('->') + input_strings = input_output_strings[0].split(',') + equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1] + input_shapes = [input_shapes[1], input_shapes[0]] + + @make_tf_graph(input_shapes) + def build_model(x, y): + return tf.einsum(equation, x, y) + + model, inputs, outputs = build_model + + input_values = [ + random_gen(input_shapes[0], -1, 1), + random_gen(input_shapes[1], -1, 1), + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestElementWiseUnary(TensorFlowBaseTest): + _FP16_UNSUPPORTED = {'acos', 'asin', 'atan', 'atanh', 'cosh', 'sinh'} + + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode", + itertools.product( + compute_units, + backends, + [1, 2, 5], + [ + "abs", + "acos", + "asin", + "atan", + "atanh", + "cast", + "ceil", + "clip", + "cos", + "cosh", + "erf", + "exp", + "floor", + "inverse", + "log", + "negative", + "round", + "rsqrt", + "sign", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + ], + ), + ) + def test_unary(self, compute_unit, backend, rank, mode): + _PREBUILD_WHEEL_SEGFAULTING_MODE = ["acos", "asin", "atan", "atanh", "cosh", "sinh"] + + if compute_unit != ct.ComputeUnit.CPU_ONLY and mode in self._FP16_UNSUPPORTED: + return + + if _get_version(tf.__version__) == StrictVersion(PREBUILT_TF1_WHEEL_VERSION): + if mode in _PREBUILD_WHEEL_SEGFAULTING_MODE: + # we shuold re-enable these tests after this radar rdar://100735561 ([CI] Build a more stable TF1 Rosetta wheel for the lightning CI) is fixed + pytest.skip("Prebuilt wheel segfaulting on several functions.") + + if _macos_version() < (13, 0): + if backend == ("mlprogram", "fp16") and _is_macos(): + pytest.skip("Requires macOS13 or greater") + elif compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.skip("GPU issue fixed in iOS16/macOS13") + else: + dtype = np.float32 + tf_dtype = tf.float32 + + atol, rtol = 1e-4, 1e-5 + input_shape = np.random.randint(low=2, high=4, size=rank) + + if backend == ("mlprogram", "fp16") and mode != "clip": + # For the clip mode with tf.float16 as input, it seems like the tf graph is producing wrong results + # It looks like a tensorflow bug, tracked by this radar: + # rdar://96850184 (Tensor clip_by_value is producing wrong numerical outputs with tf.float16 type input) + dtype = np.float16 + tf_dtype = tf.float16 + else: + dtype = np.float32 + tf_dtype = tf.float32 + + def cast_func(x): + return tf.cast(x, dtype=tf.int32) + + def clip_func(x): + return tf.clip_by_value(x, clip_value_min=0.0, clip_value_max=5.0) + + def _get_test(test_mode): + if test_mode == "abs": + res = tf.abs + val = random_gen(input_shape, rand_min=-1, rand_max=1) + elif test_mode == "acos": + res = tf.acos + val = random_gen(input_shape, rand_min=-1, rand_max=1) + elif test_mode == "asin": + res = tf.asin + val = random_gen(input_shape, rand_min=-1, rand_max=1) + elif test_mode == "atan": + res = tf.atan + val = random_gen(input_shape, rand_min=-100, rand_max=100) + elif test_mode == "atanh": + res = tf.atanh + val = random_gen(input_shape, rand_min=-0.9, rand_max=0.9) + elif test_mode == "cast": + eps_from_int = 0.0 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + eps_from_int = 0.1 + res = cast_func + val = random_gen( + input_shape, + rand_min=-10, + rand_max=10, + eps_from_int=eps_from_int, + dtype=dtype, + ) + elif test_mode == "ceil": + res = tf.math.ceil + eps_from_int = 0.0 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + eps_from_int = 0.1 + val = random_gen( + input_shape, + rand_min=-100, + rand_max=100, + eps_from_int=eps_from_int, + dtype=dtype, + ) + elif test_mode == "clip": + if compute_unit != ct.ComputeUnit.CPU_ONLY: + return None, None # clip does not support float16 + res = clip_func + val = random_gen(input_shape, rand_min=-5, rand_max=10) + elif test_mode == "cos": + res = tf.cos + rand_range = 1000 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + rand_range = 10 + val = random_gen(input_shape, rand_min=-rand_range, rand_max=rand_range) + elif test_mode == "cosh": + res = tf.cosh + val = random_gen(input_shape, rand_min=-4, rand_max=4) + elif test_mode == "erf": + res = tf.math.erf + val = random_gen(input_shape, rand_min=1, rand_max=6) + elif test_mode == "exp": + if compute_unit != ct.ComputeUnit.CPU_ONLY: + # We skip GPU here, since exp(1) already differs in backend. + return None, None + res = tf.exp + val = random_gen(input_shape, rand_min=-4, rand_max=4) + elif test_mode == "floor": + res = tf.floor + eps_from_int = 0.0 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + eps_from_int = 0.1 + val = random_gen( + input_shape, + rand_min=-100, + rand_max=100, + eps_from_int=eps_from_int, + dtype=dtype, + ) + elif test_mode == "inverse": + res = tf.math.reciprocal + val = random_gen(input_shape, rand_min=0.1, rand_max=10) + elif test_mode == "log": + res = tf.math.log + val = random_gen(input_shape, rand_min=0.2, rand_max=1000) + elif test_mode == "negative": + res = tf.math.negative + val = random_gen(input_shape, rand_min=-100.0, rand_max=100.0) + elif test_mode == "round": + res = tf.round + val = random_gen( + input_shape, rand_min=-1000, rand_max=1000, dtype=dtype + ) + elif test_mode == "rsqrt": + res = tf.math.rsqrt + val = random_gen(input_shape, rand_min=0.5, rand_max=1000) + elif test_mode == "sign": + res = tf.sign + val = random_gen(input_shape, rand_min=-5, rand_max=5) + elif test_mode == "sin": + res = tf.sin + rand_range = 1000 + if compute_unit != ct.ComputeUnit.CPU_ONLY: + rand_range = 10 + val = random_gen(input_shape, rand_min=-rand_range, rand_max=rand_range) + elif test_mode == "sinh": + res = tf.sinh + val = random_gen(input_shape, rand_min=-10, rand_max=10) + elif test_mode == "sqrt": + res = tf.sqrt + val = random_gen(input_shape, rand_min=0.5, rand_max=1000) + elif test_mode == "square": + res = tf.math.square + val = random_gen(input_shape, rand_min=-5, rand_max=5) + elif test_mode == "tan": + res = tf.tan + val = random_gen(input_shape, rand_min=-1000, rand_max=1000) + elif test_mode == "tanh": + res = tf.tanh + val = random_gen(input_shape, rand_min=-1000, rand_max=1000) + + return res, val + + func, input_val = _get_test(mode) + if func is None: + return + + input_type = list(input_shape) + [tf_dtype] + @make_tf_graph([input_type]) + def build_model(x): + return func(x) + + model, inputs, outputs = build_model + + input_dict = dict(zip(inputs, [input_val.astype(dtype)])) + + if mode == "inverse" or mode == "rsqrt": + atol, rtol = 1e-2, 1e-3 + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=atol, + rtol=rtol, + minimum_deployment_target=ct.target.iOS16 if backend == ("mlprogram", "fp16") else None, + ) + + +class TestImageResizing(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, target_shape, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [(1, 10, 20, 1), (2, 5, 1, 3)], + [(25, 30), (2, 20)], + [True, False], + [True, False], + ), + ) + def test_resize_bilinear( + self, + compute_unit, + backend, + input_shape, + target_shape, + align_corners, + half_pixel_centers, + ): + if half_pixel_centers and align_corners: + return + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ResizeBilinear( + images=x, + size=target_shape, + half_pixel_centers=half_pixel_centers, + align_corners=align_corners, + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, scale_factor, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [(1, 10, 20, 1), (2, 5, 2, 3)], + [(2, 3),], + [True, False], + [True, False], + ), + ) + def test_resize_bilinear_dynamic_shape( + self, + compute_unit, + backend, + input_shape, + scale_factor, + align_corners, + half_pixel_centers, + ): + if backend[0] == "neuralnetwork" or ct.utils._macos_version() < (13, 0): + pytest.skip("half_pixel_centers only support for iOS16 upsample_bilinear layer") + + if half_pixel_centers and align_corners: + pytest.skip("half_pixel_centers and align_corners cannot be both True") + + batch_dim, _, _, channel = input_shape + h_factor, w_factor = scale_factor + + @make_tf_graph([(batch_dim, None, None, channel, tf.float32)]) + def build_model(x): + input_shape = tf.shape(x) + target_shape = tf.math.multiply(input_shape[1:3], (h_factor, w_factor)) + return tf.raw_ops.ResizeBilinear( + images=x, + size=target_shape, + half_pixel_centers=half_pixel_centers, + align_corners=align_corners, + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, upsample_factor, data_format", + itertools.product( + compute_units, + backends, + [(1, 1, 1, 3), (1, 10, 5, 3)], + [(1, 2), (4, 3)], + ["channels_last", "channels_first"], + ), + ) + def test_upsampling_2d( + self, compute_unit, backend, input_shape, upsample_factor, data_format + ): + if data_format == "channels_last": + input_shape = ( + input_shape[0], + input_shape[2], + input_shape[3], + input_shape[1], + ) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.keras.layers.UpSampling2D( + size=upsample_factor, data_format=data_format, interpolation="nearest" + )(x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + spec = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + )[0] + # also check if the scale factor are integers + if backend[0] == 'neuralnetwork': + for layer in spec.neuralNetwork.layers: + if layer.WhichOneof('layer') == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, num_of_crops, crop_size, method, dynamic, extrapolation_value", + itertools.product( + compute_units, + backends, + [(1, 64, 64, 1)], + [1, 3, 5], + [(2, 2), (1, 1), (4, 4), (128, 128)], + ["bilinear"], + [False, True], + [0.0, 1.0], + ), + ) + def test_crop_and_resize( + self, + compute_unit, + backend, + input_shape, + num_of_crops, + crop_size, + method, + dynamic, + extrapolation_value, + ): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY and crop_size == (1, 1): + # in this case, there is a numerical mismatch on the GPU MIL backend. The GPU runtime tests are + # tracked seprately. + return + + if extrapolation_value != 0.0: + if backend[0] == "neuralnetwork": + pytest.xfail("pad_value not availabe in neural network backend.") + if ct.utils._macos_version() < (13, 0): + pytest.skip("pad_value not supported in macOS12 or older.") + minimum_deployment_target = ct.target.iOS16 + else: + minimum_deployment_target = None + + # rdar://98749492 (crop_resize is unstable for cropping out of bound setting in fp16) + if backend[0] == "mlprogram": + backend = ("mlprogram", "fp32") + + # TODO(rdar://98749492): Once resolved, set crop_bias = 0.5 in order to test the crop outside the image + crop_bias = 0.0 + + input = np.random.randn(*input_shape).astype(np.float32) + boxes = np.random.uniform(size=(num_of_crops, 4)).astype(np.float32) + crop_bias + box_indices = np.random.randint( + size=(num_of_crops,), low=0, high=input_shape[0] + ).astype(np.int32) + + def test_static(): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.CropAndResize( + image=x, + boxes=boxes, + box_ind=box_indices, + crop_size=crop_size, + method=method, + extrapolation_value=extrapolation_value, + ) + + model, inputs, outputs = build_model + input_values = [input] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + def test_dynamic(): + @make_tf_graph([input_shape, boxes.shape, list(box_indices.shape) + [tf.int32]]) + def build_model(x, boxes_pl, box_indices_pl): + return tf.raw_ops.CropAndResize( + image=x, + boxes=boxes_pl, + box_ind=box_indices_pl, + crop_size=crop_size, + method=method, + extrapolation_value=extrapolation_value + ) + model, inputs, outputs = build_model + input_values = [input, boxes, box_indices] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + test_dynamic() if dynamic else test_static() + + @pytest.mark.parametrize( + "compute_unit, backend, width, height, strides, sizes, padding,", + itertools.product( + compute_units, + backends, + [1, 3, 5], + [2, 7, 12], + [(1, 1), (2, 1), (3, 5)], + [(1, 1), (1, 2), (5, 4)], + ["VALID", "SAME"], + ), + ) + def test_extract_patches( + self, compute_unit, backend, width, height, strides, sizes, padding + ): + # TODO: theoritically, the current extractpatches code handle batch size rather than 1, + # but there seems to have a bug in crop_resize when using GPU and batch_size > 1. + # We should test batch_size > 1 after the issue is fixed. + # + input = np.random.rand(1, height, width, 128).astype(np.float32) + if padding == "VALID": + size_h = min(sizes[0], height) + size_w = min(sizes[1], width) + else: + size_h = sizes[0] + size_w = sizes[1] + + @make_tf_graph([input.shape]) + def build_model(x): + return tf.compat.v1.image.extract_image_patches( + images=x, + ksizes=[1, size_h, size_w, 1], + strides=[1, strides[0], strides[1], 1], + rates=[1, 1, 1, 1], + padding=padding, + ) + model, inputs, outputs = build_model + input_values = [input] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestLinear(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim, transpose_a, transpose_b, use_constant", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [True, False], + [True, False], + [True, False], + ), + ) + def test_matmul( + self, compute_unit, backend, dim, transpose_a, transpose_b, use_constant + ): + shape_x = np.array([dim, dim * 2, dim * 4]) + shape_y = np.array([dim * 4, dim * 2]) + + flip = (not transpose_a and transpose_b) or (transpose_a and not transpose_b) + shape_y = np.flip(shape_y) if flip else shape_y + + if not use_constant: + + @make_tf_graph([shape_x, shape_y]) + def build_model(x, y): + return tf.linalg.matmul( + x, y, transpose_a=transpose_a, transpose_b=transpose_b + ) + + input_values = [ + random_gen(shape=shape_x, rand_min=-100, rand_max=100), + random_gen(shape=shape_y, rand_min=-1.0, rand_max=1.0), + ] + else: + y = random_gen(shape=shape_y, rand_min=-1.0, rand_max=1.0) + + @make_tf_graph([shape_x]) + def build_model(x): + return tf.linalg.matmul( + x, y, transpose_a=transpose_a, transpose_b=transpose_b + ) + + input_values = [random_gen(shape=shape_x, rand_min=-100, rand_max=100)] + + model, inputs, outputs = build_model + + input_dict = dict(zip(inputs, input_values)) + + proto, _, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + for layer in proto.neuralNetwork.layers: + if layer.WhichOneof("layer") == "batchedMatmul": + wp = layer.batchedMatmul.weights + if use_constant: + assert len(wp.floatValue) != 0 + else: + assert len(wp.floatValue) == 0 + + +class TestBatchNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, shape_mode, epsilon", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [True, False], + [1e-1, 1e-10], + ), + ) + def test_batch_norm(self, compute_unit, backend, rank, shape_mode, epsilon): + input_shape = np.random.randint(low=1, high=4, size=rank) + if shape_mode: + # same shape with 1 for being normalized over + attr_shape = list(input_shape) + attr_shape[1] = 1 + attr_shape[2] = 1 + else: + # 1D tensor of the same size as channel dimension + attr_shape = [list(input_shape)[-1]] + + @make_tf_graph([input_shape, attr_shape, attr_shape, attr_shape, attr_shape]) + def build_model(x, m, v, o, s): + return tf.nn.batch_normalization( + x, mean=m, variance=v, offset=o, scale=s, variance_epsilon=epsilon + ) + + model, inputs, outputs = build_model + + input_values = [ + random_gen(shape=input_shape, rand_min=-100.0, rand_max=100.0), + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0), + random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0), + random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0), + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=.2, + rtol=1e-4, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, shape_mode, epsilon, scale_after_normalization", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [True, False], + [1e-1, 1e-10], + [True, False], + ), + ) + def test_batch_norm_with_global_normalization( + self, + compute_unit, + backend, + rank, + shape_mode, + epsilon, + scale_after_normalization, + ): + input_shape = np.random.randint(low=1, high=4, size=rank) + if shape_mode: + # same shape with 1 for being normalized over + attr_shape = list(input_shape) + attr_shape[1] = 1 + attr_shape[2] = 1 + else: + # 1D tensor of the same size as channel dimension + attr_shape = [list(input_shape)[-1]] + + if scale_after_normalization: + + @make_tf_graph( + [input_shape, attr_shape, attr_shape, attr_shape, attr_shape] + ) + def build_model(x, m, v, b, g): + return tf.nn.batch_norm_with_global_normalization( + x, + mean=m, + variance=v, + beta=b, + gamma=g, + variance_epsilon=epsilon, + scale_after_normalization=scale_after_normalization, + ) + + else: + + @make_tf_graph([input_shape, attr_shape, attr_shape, attr_shape]) + def build_model(x, m, v, b): + return tf.nn.batch_norm_with_global_normalization( + x, + mean=m, + variance=v, + beta=b, + gamma=None, + variance_epsilon=epsilon, + scale_after_normalization=scale_after_normalization, + ) + + model, inputs, outputs = build_model + + input_values = [ + random_gen(shape=input_shape, rand_min=-100.0, rand_max=100.0), + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0), + random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0), + random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0), + ] + if scale_after_normalization: + input_values.append( + random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + ) + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=0.2, + rtol=1e-4, + ) + + +class TestNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-1, 1e-10] + ), + ) + def test_fused_batch_norm(self, compute_unit, backend, epsilon): + # TensorFlow's FusedBatchNorm is only for 4D inputs + input_shape = np.random.randint(low=1, high=4, size=4) + attr_shape = [list(input_shape)[-1]] + + m = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + v = random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0) + o = random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0) + s = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.compat.v1.nn.fused_batch_norm( + x, + mean=m, + variance=v, + offset=o, + scale=s, + epsilon=epsilon, + is_training=False, + )[0] + + model, inputs, outputs = build_model + + input_values = [random_gen(shape=input_shape, rand_min=-100.0, rand_max=100.0)] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + +class TestL2Normalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axes, epsilon", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [(-1,), (-2,), (0, 1)], + [1e-5, 1e-10], + ), + ) + def test_l2_normalize(self, compute_unit, backend, rank, axes, epsilon): + input_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.l2_normalize(x, axis=axes, epsilon=epsilon) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, rand_min=-10, rand_max=10)] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=0.05, + rtol=1e-4, + ) + +class TestLocalResponseNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, size, alpha, beta, k", + itertools.product( + compute_units, + backends, + [1, 2, 3], + [0.0001, 0.01], + [0.75, 1.0], + [1.0, 2.0], + ), + ) + def test_local_response_normalization( + self, compute_unit, backend, size, alpha, beta, k + ): + # TensorFlow's local_response_normalization only supports rank 4 + input_shape = np.random.randint(low=3, high=4, size=4) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.local_response_normalization( + x, depth_radius=size, bias=k, alpha=alpha, beta=beta + ) + + model, inputs, outputs = build_model + + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + + +class TestPool1d(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,)], + [(1,), (2,)], + ["same", "valid"], + ), + ) + def test_avg_pool_1d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=3) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.avg_pool1d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,)], + [(1,), (2,)], + ["same", "valid"], + ), + ) + def test_max_pool_1d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=3) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.max_pool1d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPool2d(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + ["same", "valid"], + ), + ) + def test_avg_pool_2d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=4) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.avg_pool( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + [(1,), (2,), (1, 1), (1, 2), (2, 2)], + ["same", "valid"], + ), + ) + def test_max_pool_2d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=2, high=4, size=4) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.max_pool( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPool3d(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + ["same", "valid"], + ), + ) + def test_avg_pool_3d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=3, high=4, size=5) + + if kernel_sizes[0] == 1 and pad_type == "same": + pytest.xfail("rdar://81630684 (Pool3d with pad type == same fails from TF2.5 onwards)") + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.avg_pool3d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, kernel_sizes, strides, pad_type", + itertools.product( + compute_units, + backends, + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + [(1,), (2,), (1, 1, 1), (1, 2, 3), (2, 2, 3), (3, 3, 3)], + ["same", "valid"], + ), + ) + def test_max_pool_3d(self, compute_unit, backend, kernel_sizes, strides, pad_type): + input_shape = np.random.randint(low=3, high=4, size=5) + + if kernel_sizes[0] == 1 and pad_type == "same": + pytest.xfail("rdar://81630684 (Pool3d with pad type == same fails from TF2.5 onwards)") + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.max_pool3d( + x, ksize=kernel_sizes[:], strides=strides[:], padding=pad_type.upper() + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPrint(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [size for size in range(1, 5)], + ), + ) + def test_print(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + + @make_tf_graph([shape]) + def build_model(x): + print_layer = tf.raw_ops.Print(input=x, data=[]) + res = print_layer + 1 + return res + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestRandom(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, size, rank, constant", + itertools.product( + compute_units, + backends, + [1, 4], + [1, 5], + [True, False], + ), + ) + def test_random_binomial(self, compute_unit, backend, size, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.keras.backend.random_binomial(shape=shape, p=1.0)) + else: + ref = tf.add( + x, + tf.keras.backend.random_binomial( + shape=tf.raw_ops.Shape(input=x), p=1.0 + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + + @pytest.mark.parametrize( + "compute_unit, backend, size", + itertools.product( + compute_units, + backends, + [1, 4] + ), + ) + def test_random_categorical(self, compute_unit, backend, size): + # TensorFlow's input is 2-D tensor with shape [batch_size, num_classes]. + shape = np.random.randint(low=1, high=4, size=2) + y_shape = (1,) + @make_tf_graph([shape, y_shape]) + def build_model(x, y): + x = tf.random.categorical(x, size) + x = tf.cast(x, dtype=tf.float32) + return x * y + + model, inputs, outputs = build_model + input_value = [np.zeros(shape).astype(np.float32), np.zeros(y_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mean, rank, constant", + itertools.product( + compute_units, + backends, + [0.0], + [1, 5], + [True, False], + ), + ) + def test_random_normal(self, compute_unit, backend, mean, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.random.normal(shape=shape, mean=mean, stddev=0.0)) + else: + ref = tf.add( + x, + tf.random.normal( + shape=tf.raw_ops.Shape(input=x), mean=mean, stddev=0.0 + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mean, rank, constant", + itertools.product( + compute_units, + backends, + [0.0], + [1, 5], + [True, False], + ), + ) + def test_keras_random_normal(self, compute_unit, backend, mean, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.keras.backend.random_normal(shape=shape, mean=mean, stddev=0.0)) + else: + ref = tf.add( + x, + tf.keras.backend.random_normal( + shape=tf.raw_ops.Shape(input=x), mean=mean, stddev=0.0 + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, low, high, rank, constant", + itertools.product( + compute_units, + backends, + [0.0], + [0.0], + [1], + [True, False], + ), + ) + def test_random_uniform(self, compute_unit, backend, low, high, rank, constant): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.random.uniform(shape=shape, minval=low, maxval=high)) + else: + ref = tf.add( + x, + tf.random.uniform( + shape=tf.raw_ops.Shape(input=x), minval=low, maxval=high + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, low, high, rank, constant", + itertools.product( + compute_units, + backends, + [1.0], + [1.0], + [rank for rank in range(1, 6)], + [True, False], + ), + ) + def test_keras_random_uniform( + self, compute_unit, backend, low, high, rank, constant + ): + if not constant and backend[0] != "neuralnetwork": + return # dynamic input is only support in neuralnetwork backend + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + + @make_tf_graph([shape]) + def build_model(x): + if constant: + ref = tf.add(x, tf.keras.backend.random_uniform(shape=shape, minval=low, maxval=high)) + else: + ref = tf.add( + x, + tf.keras.backend.random_uniform( + shape=tf.raw_ops.Shape(input=x), minval=low, maxval=high + ), + ) + return ref + + model, inputs, outputs = build_model + input_value = [random_gen(shape=shape)] + input_dict = dict(zip(inputs, input_value)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +@pytest.mark.skipif(_macos_version() < (10, 16), + reason="This only works for 'neuralnetwork' on macOS 11") +class TestReduction(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes, keep_dims, tf_op", + itertools.product( + compute_units, + backends, + [ + (1, (-1,)), + (2, (0,)), + (2, (-1, 0)), + (3, (1, -3)), + (3, (-2,)), + (3, (-3, -2, -1)), + (4, (0, 1, 2)), + (4, (-2, -1, 0)), + (4, (1, -2)), + (5, (-3, -1)), + (5, (-2, -1)), + (5, (-3, -2, -1)), + (5, (0, -1, 1, -2)), + (3, None), + (5, None), + (3, 1), + ], + [True, False], + [ + tf.reduce_all, + tf.math.reduce_euclidean_norm, + tf.reduce_max, + tf.reduce_mean, + tf.reduce_min, + tf.reduce_prod, + tf.reduce_sum, + tf.reduce_any, + tf.reduce_logsumexp, + tf.math.argmax, + tf.math.argmin, + ], + ), + ) + def test_reduction(self, compute_unit, backend, rank_and_axes, keep_dims, tf_op): + rank, axes = rank_and_axes + shape = np.random.randint(low=1, high=3, size=rank) + + def parse_axes(axes): + if axes is None: + axes = 0 + elif isinstance(axes, (tuple, list)): + axes = axes[0] + return axes + + def test_tf_argmax(): + @make_tf_graph([shape]) + def build_model(x): + return tf.math.argmax(x, axis=parse_axes(axes)) + + model, inputs, outputs = build_model + input_values = [random_gen(shape, rand_min=-5.0, rand_max=5.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_tf_argmin(): + @make_tf_graph([shape]) + def build_model(x): + return tf.math.argmin(x, axis=parse_axes(axes)) + + model, inputs, outputs = build_model + input_values = [random_gen(shape, rand_min=-5.0, rand_max=5.0)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_tf_reduction(): + if isinstance(axes, list) and axes and len(axes) == rank and not keep_dims: + return + + input_type = list(shape) + x_val = random_gen(shape=shape, rand_min=-5.0, rand_max=5.0) + if tf_op in {tf.reduce_all, tf.reduce_any}: + input_type += [tf.bool] + x_val = np.random.randint(low=0, high=2, size=shape).astype(bool) + elif tf_op in {tf.math.reduce_euclidean_norm}: + x_val = random_gen(shape=shape, rand_min=0.0, rand_max=10.0) + elif tf_op in {tf.reduce_prod}: + x_val = random_gen(shape=shape, rand_min=1.0, rand_max=1.3) + elif tf_op in {tf.reduce_logsumexp}: + x_val = random_gen(shape=shape, rand_min=-5, rand_max=5) + + @make_tf_graph([input_type]) + def build_model(x): + ref = tf_op(x, axis=axes, keepdims=keep_dims) + if tf_op == tf.reduce_any: + ref = tf.cast(ref, tf.float32) + return ref + + model, inputs, outputs = build_model + input_dict = dict(zip(inputs, [x_val])) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + if tf_op in {tf.math.argmax}: + test_tf_argmax() + elif tf_op in {tf.math.argmin}: + test_tf_argmin() + else: + test_tf_reduction() + +class TestGather(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices_axis, mode", + itertools.product( + compute_units, + backends, + [ + (1, 2, -1), + (2, 1, 0), + (3, 2, -2), + (2, 3, 1), + (2, 2, 1), + (1, 1, 0), + (3, 3, -2), + (3, 3, 2), + (3, 3, 0), + (1, 3, -1), + (3, 1, 2), + (3, 1, -1), + ], + ["Gather", "GatherV2", "gather"], + ), + ) + def test_gather_function(self, compute_unit, backend, rankX_rankIndices_axis, mode): + x_rank, indices_rank, axis = rankX_rankIndices_axis + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + + @make_tf_graph([x_shape, list(indices_shape) + [tf.int32]]) + def build_model(x, indices): + if mode == "Gather": + res = tf.raw_ops.Gather(params=x, indices=indices) + elif mode == "GatherV2": + res = tf.raw_ops.GatherV2(params=x, indices=indices, axis=axis) + elif mode == "gather": + res = tf.gather(x, indices, axis=axis) + + return res + + model, inputs, outputs = build_model + + axis = 0 if mode == "Gather" else axis + input_dict = {inputs[0]: np.random.rand(*x_shape).astype(np.float32), + inputs[1]: np.random.randint(0, x_shape[axis], size=indices_shape, dtype=np.int32)} + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices_axis_batchdims, mode", + itertools.product( + compute_units, + backends, + [ + (2, 2, 1, 0), + (3, 2, 1, 1), + (3, 3, 2, 0), + (3, 3, 2, 1), + (3, 3, 2, 2), + ], + ["GatherV2", "gather"], + ), + ) + def test_gather_with_batch_dims(self, compute_unit, backend, rankX_rankIndices_axis_batchdims, mode): + if _macos_version() < (13, 0) and backend[0] == 'mlprogram': + pytest.skip("Requires macOS 13 or higher") + + x_rank, indices_rank, axis, batch_dims = rankX_rankIndices_axis_batchdims + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + indices_shape[:batch_dims] = x_shape[:batch_dims] + + @make_tf_graph([x_shape, list(indices_shape) + [tf.int32]]) + def build_model(x, indices): + if mode == "GatherV2": + res = tf.raw_ops.GatherV2(params=x, indices=indices, axis=axis, batch_dims=batch_dims) + elif mode == "gather": + res = tf.gather(x, indices, axis=axis, batch_dims=batch_dims) + else: + raise ValueError("Unsupported tf op {}".format(mode)) + return res + + model, inputs, outputs = build_model + + axis = 0 if mode == "Gather" else axis + input_dict = {inputs[0]: np.random.rand(*x_shape).astype(np.float32), + inputs[1]: np.random.randint(0, x_shape[axis], size=indices_shape, dtype=np.int32)} + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 if backend[0] == "mlprogram" else None + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (2, 2), + (3, 2), + (2, 3), + (1, 4), + (5, 2), + (2, 5), + (4, 3), + (3, 4), + (2, 4), + (4, 2), + (1, 5), + ], + ), + ) + def test_gather_nd(self, compute_unit, backend, rankX_rankIndices): + x_rank, indices_rank = rankX_rankIndices + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=x_rank + 1) + + @make_tf_graph([x_shape, list(indices_shape) +[tf.int32]]) + def build_model(x, indices): + return tf.gather_nd(x, indices) + + model, inputs, outputs = build_model + + a = np.random.rand(*x_shape).astype(np.float32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, x_shape[i], size=indices_shape[:-1]) + ) + + input_dict = { + inputs[0]: a, + inputs[1]: np.stack(indices_list, axis=-1).astype(np.int32), + } + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rankX_rankIndices_batchdims", + itertools.product( + compute_units, + backends, + [ + (1, 2, 0), + (2, 2, 1), + (3, 5, 2), + (5, 5, 3), + ], + ), + ) + def test_gather_nd_with_batch_dims(self, compute_unit, backend, rankX_rankIndices_batchdims): + if _macos_version() < (13, 0) and backend[0] == 'mlprogram': + pytest.skip("Requires macOS 13 or higher") + + x_rank, indices_rank, batch_dims = rankX_rankIndices_batchdims + x_shape = np.random.randint(low=2, high=4, size=x_rank) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + x_shape[:batch_dims] = indices_shape[:batch_dims] + indices_shape[-1] = np.random.randint(low=1, high=x_rank + 1 - batch_dims) + + @make_tf_graph([x_shape, list(indices_shape) +[tf.int32]]) + def build_model(x, indices): + return tf.gather_nd(x, indices, batch_dims=batch_dims) + + model, inputs, outputs = build_model + + a = np.random.rand(*x_shape).astype(np.float32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, x_shape[i+batch_dims], size=indices_shape[:-1]) + ) + + input_dict = { + inputs[0]: a, + inputs[1]: np.stack(indices_list, axis=-1).astype(np.int32), + } + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 if backend[0] == "mlprogram" else None + ) + + +class TestScatter(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, data_rank, indices_rank", + itertools.product( + compute_units, + backends, + list(range(1, 4)), + list(range(2, 4)), + ), + ) + def test_scatter_nd_with_zeros( + self, compute_unit, backend, data_rank, indices_rank + ): + + shape = np.random.randint(low=2, high=4, size=data_rank).astype(np.int32) + indices_shape = np.random.randint(low=2, high=4, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=data_rank + 1) + updates_shape = list(indices_shape[:-1]) + list(shape[indices_shape[-1] :]) + + updates = np.random.rand(*updates_shape).astype(np.int32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append(np.random.randint(0, shape[i], size=indices_shape[:-1])) + + indices = np.stack(indices_list, axis=-1).astype(np.int32) + + @make_tf_graph( + [list(indices.shape) + [tf.int32], updates_shape + [tf.int32], [data_rank, tf.int32]] + ) + def build_model(indices, updates, shape): + return tf.raw_ops.ScatterNd(indices=indices, updates=updates, shape=shape) + + model, inputs, outputs = build_model + input_values = [indices, updates, shape] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTensorScatterAdd(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, tensor_rank, indices_rank", + itertools.product( + compute_units, + backends, + # updates_rank = indices_rank - 1 + tensor_rank - indices_shape[-1] <= tensor_rank + indices_rank - 2 + # and Core ML only supports updates_rank < 6, + # so we constrain tensor_rank + indices_rank - 2 < 6 + [tensor_rank for tensor_rank in range(1, 5)], + [indices_rank for indices_rank in range(2, 4)] + ), + ) + def test(self, compute_unit, backend, tensor_rank, indices_rank): + # To avoid indexing out of bound: + # tensor size for each dimension >= MIN_TENSOR_SIZE + # index for each dimension < MIN_TENSOR_SIZE + MIN_TENSOR_SIZE = 3 + + tensor_shape = np.random.randint(low=MIN_TENSOR_SIZE, high=9, size=tensor_rank) + # indices shape constraint: 0 < indices_shape[-1] <= tensor_rank + indices_shape = np.random.randint(low=1, high=tensor_rank + 1, size=indices_rank) + + # updates rank and shape are infered from tensor and indices + # reference https://www.tensorflow.org/api_docs/python/tf/compat/v1/scatter_nd_add + updates_rank = indices_rank - 1 + tensor_rank - indices_shape[-1] + updates_shape = [] + for i in range(indices_rank - 1): + updates_shape.append(indices_shape[i]) + for i in range(indices_shape[-1], tensor_rank): + updates_shape.append(tensor_shape[i]) + updates_shape = np.array(updates_shape) + + @make_tf_graph([tensor_shape, list(indices_shape) + [tf.int32], updates_shape]) + def build_model(tensor, indices, updates): + return tf.tensor_scatter_nd_add(tensor, indices, updates) + + model, inputs, outputs = build_model + input_values = [ + random_gen(tensor_shape, rand_min=-1.0, rand_max=1.0), + random_gen(indices_shape, rand_min=0, rand_max=MIN_TENSOR_SIZE, dtype=np.int32), + random_gen(updates_shape, rand_min=-1.0, rand_max=1.0), + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSliceByIndex(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, masking_type", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 5)], + ["none", "positive_mask", "negative_mask"] + ), + ) + def test_slice_by_index_simple(self, compute_unit, backend, rank, masking_type): + input_shape = np.random.randint(low=2, high=4, size=rank) + begin_val = np.array( + [ + np.random.randint(low=-input_shape[i], high=input_shape[i]) + for i in range(rank) + ] + ).astype(np.int32) + end_val = np.array( + [ + np.random.randint(low=-input_shape[i], high=input_shape[i]) + for i in range(rank) + ] + ).astype(np.int32) + stride_val = np.array( + [ + np.random.randint(low=-input_shape[i], high=input_shape[i]) + for i in range(rank) + ] + ).astype(np.int32) + if masking_type == "none": + begin_mask = [False] * rank + end_mask = [False] * rank + squeeze_mask = [False] * rank + else: + begin_mask = np.array( + [np.random.choice([True, False, False]) for i in range(rank)] + ).astype(bool) + end_mask = np.array( + [np.random.choice([True, False, False]) for i in range(rank)] + ).astype(bool) + squeeze_flag = True + # We do not squeeze to scalar in nn + while squeeze_flag: + squeeze_mask = np.array( + [np.random.choice([True, False]) for i in range(rank)] + ).astype(bool) + for i in range(rank): + if begin_mask[i] or end_mask[i]: + squeeze_mask[i] = False + for s in squeeze_mask: + if not s: + squeeze_flag = False + + for i in range(rank): + if begin_mask[i] or end_mask[i]: + stride = 0 + while stride == 0: + stride = np.random.randint(low=-input_shape[i], high=input_shape[i]) + stride_val[i] = stride + + if not end_mask[i]: + while True: + end = np.random.randint( + low=-input_shape[i], high=input_shape[i] + ) + normalized_end = input_shape[i] + end if end < 0 else end + if normalized_end == 0 and stride_val[i] > 0: + continue + elif normalized_end == input_shape[i] - 1 and stride_val[i] < 0: + continue + else: + end_val[i] = end + break + continue + if squeeze_mask[i]: + stride_val[i] = 1 + while True: + end = np.random.randint(low=-input_shape[i], high=input_shape[i]) + normalized_end = input_shape[i] + end if end < 0 else end + normalized_begin = ( + input_shape[i] + begin_val[i] if begin_val[i] < 0 else begin_val[i] + ) + if normalized_end == normalized_begin: + continue + if begin_mask[i] or end_mask[i] or squeeze_mask[i]: + stride = 1 + elif normalized_end < normalized_begin: + stride = -np.random.randint(low=1, high=input_shape[i]) + else: + stride = np.random.randint(low=1, high=input_shape[i]) + end_val[i] = end + stride_val[i] = stride + break + + def _mask_to_bit(mask): + ret = 0 + for x in mask[::-1]: + ret <<= 1 + if x: + ret += 1 + if ret > 0 and masking_type == "negative_mask": + ret = ret - 2**rank + return ret + + @make_tf_graph( + [ + input_shape, + list(begin_val.shape) + [tf.int32], + list(end_val.shape) + [tf.int32], + ] + ) + def build_model(x, begin, end): + return tf.strided_slice( + x, + begin, + end, + stride_val, + begin_mask=_mask_to_bit(begin_mask), + end_mask=_mask_to_bit(end_mask), + shrink_axis_mask=_mask_to_bit(squeeze_mask), + ) + + model, inputs, outputs = build_model + + input_values = [ + np.array(list(range(np.prod(input_shape)))) + .reshape(input_shape) + .astype(np.float32), + begin_val, + end_val, + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, testcase", + itertools.product( + compute_units, + backends, + # Change to slice representation for allowing iteration with a non-constant input + [ + ( + slice(1, 2), + slice(1, 2), + slice(1, 2), + ), # equivalent to [1:2, 1:2, 1:2] + (slice(-3, -2), slice(-4, -3), slice(-5, -4)), + (slice(0, -2), slice(0, -1), slice(-3, -2)), + (slice(-1, 0, -2), slice(-1, 1, -1), slice(-1, -3, -3)), + (slice(1, 2), slice(1, 3), slice(1, 4, 2)), + (slice(None, 2), slice(1, 3), slice(None, 4, 2)), + ( + slice(None), + slice(1, None), + slice(None, 4, 2), + ), # equivalent to [:,1:,:4:2] + (slice(1, None, 1), 1, slice(None, 3, 2)), + (slice(None), slice(None), slice(None)), + (slice(1, 2), slice(1, 2), 1), + (slice(1, 2), slice(None), slice(None)), + (slice(None), slice(None), slice(None)), + (slice(1, 2), slice(None), slice(1, 2)), + (slice(None), slice(None), 1), + (0, 0, slice(None)), + (slice(1, 2)), + (slice(1, 2), slice(1, 2)), + (1), + (slice(0, 3)), + (slice(None)), + (slice(None), slice(None), slice(None, None, -1)), + ], + ), + ) + def test_slice_by_index_from_scratch(self, compute_unit, backend, testcase): + input_shape = np.array([3, 4, 5]) + + @make_tf_graph([input_shape]) + def build_model(x): + return x[testcase] + + model, inputs, outputs = build_model + + input_values = [ + np.array(list(range(np.prod(input_shape)))) + .reshape(input_shape) + .astype(np.float32) + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_and_slice", + itertools.product( + compute_units, + backends, + [ + [[3], (slice(1, 2))], + [[2,10], (slice(0, 2), slice(None, 8, 2))], + [[2,3,4,5], (slice(None), slice(1, None, 3), slice(None), slice(0, 5))], + [[2,3,4,5], (slice(0, None), slice(None), slice(2, None, 1), slice(None))], + ], + ), + ) + def test_slice_by_index_one_dimension(self, compute_unit, backend, shape_and_slice): + input_shape, testcase = shape_and_slice + + @make_tf_graph([input_shape]) + def build_model(x): + return x[testcase] + + model, inputs, outputs = build_model + + input_values = [ + np.array(list(range(np.prod(input_shape)))) + .reshape(input_shape) + .astype(np.float32) + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_slice_by_index_smoke(self, compute_unit, backend): + input_shape = [1, 64, 2] + x_val = np.random.rand(*input_shape).astype(np.float32) + y_val = np.random.rand(*input_shape).astype(np.float32) + + @make_tf_graph([input_shape, input_shape]) + def build_model(x, y): + x_slice = x[:, :, 0] + y_slice = y[:, :, 0] + return (x_slice, y_slice) + + model, inputs, outputs = build_model + + input_values = [x_val, y_val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.xfail(reason="ExpandDims exist mismatch", run=False) + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_slice_by_index_with_new_axes(self, compute_unit, backend): + input_shape = [4, 5, 64] + val = np.random.rand(*input_shape).astype(np.float32) + num_cases = 8 + + @make_tf_graph([input_shape] * num_cases) + def build_model(*args): + a, b, c, d, e, f, g, h = args + slice_0 = a[:1, tf.newaxis, :3, :] + slice_1 = b[:, tf.newaxis] + slice_2 = c[..., tf.newaxis] + slice_3 = d[..., tf.newaxis, :, 10] + slice_4 = e[:, 2, tf.newaxis, ...] + slice_5 = f[2, ..., :, tf.newaxis] + slice_6 = g[tf.newaxis, ..., tf.newaxis] + slice_7 = h[tf.newaxis, 2, tf.newaxis, ...] + + return ( + slice_0, + slice_1, + slice_2, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + ) + + model, inputs, outputs = build_model + + input_values = [val] * num_cases + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSliceBySize(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, single_size, dynamic_size", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 5)], + [True, False], + [True, False], + ), + ) + def test_dynamic_slice_by_size( + self, compute_unit, backend, rank, single_size, dynamic_size + ): + # Test for when either begin or size are runtime determines + input_shape = np.random.randint(low=2, high=4, size=rank) + begin_val = np.array( + [np.random.randint(input_shape[i]) for i in range(rank)] + ).astype(np.int32) + size_val = np.array( + [np.random.randint(input_shape[i] - begin_val[i]) + 1 for i in range(rank)] + ) + if single_size: + for r in range(rank): + size_val_r = np.array( + [s if i == r else -1 for i, s in enumerate(size_val)] + ).astype(np.int32) + + @make_tf_graph([input_shape, list(begin_val.shape) + [tf.int32]]) + def build_model(x, begin): + return tf.slice(x, begin, size_val_r) + + @make_tf_graph( + [ + input_shape, + list(begin_val.shape) + [tf.int32], + list(size_val_r.shape) + [tf.int32], + ] + ) + def build_model_dynamic_size(x, begin, size): + return tf.slice(x, begin, size) + + if dynamic_size: + model, inputs, outputs = build_model_dynamic_size + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + size_val_r, + ] + else: + model, inputs, outputs = build_model + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + else: + size_val = np.array( + [s if np.random.randint(2) == 0 else -1 for s in size_val] + ).astype(np.int32) + + @make_tf_graph([input_shape, list(begin_val.shape) + [tf.int32]]) + def build_model(x, begin): + return tf.slice(x, begin, size_val) + + @make_tf_graph( + [ + input_shape, + list(begin_val.shape) + [tf.int32], + list(size_val.shape) + [tf.int32], + ] + ) + def build_model_dynamic_size(x, begin, size): + return tf.slice(x, begin, size) + + if dynamic_size: + model, inputs, outputs = build_model_dynamic_size + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + size_val, + ] + else: + model, inputs, outputs = build_model + input_values = [ + random_gen(input_shape, rand_min=-100, rand_max=100), + begin_val, + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, begin_size", + itertools.product( + compute_units, + backends, + [ + [[0, 1, 2], [1, 1, 1]], + [[0, 0, 0], [-1, -1, -1]], + [[0, 0, 1], [1, 2, -1]], + [[0, 1, 2], [-1, -1, -1]], + ] + ), + ) + def test_static_slice_by_size( + self, compute_unit, backend, begin_size + ): + # Test for when begin and size are both constant + input_shape = [1, 2, 3] + begin, size = begin_size + tf_input_shape = input_shape.copy() + + for i in range(3): + if np.random.randint(2) == 0: + tf_input_shape[i] = None + # We set the begin to 0 for the symbolic dimension, + # since the default input shape will be 1 in this case, + # we need to make sure that begin = 0 and size = 1 (unless size == -1) + begin[i] = 0 + if size[i] != -1: + size[i] = 1 + + @make_tf_graph([tf_input_shape]) + def build_model(x): + return tf.slice(x, begin, size) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=-2, rand_max=2)] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestMatrixBandPart(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, lower_and_upper", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)], + [(0, -1), (-1, 0), (0, 0)], + ), + ) + def test_matrix_band_part(self, compute_unit, backend, rank, lower_and_upper): + + lower, upper = lower_and_upper + shape = np.random.randint(low=3, high=4, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + return tf.raw_ops.MatrixBandPart(input=x, num_lower=lower, num_upper=upper) + + model, inputs, outputs = build_model + TensorFlowBaseTest.run_compare_tf( + model, + {inputs[0]: random_gen(shape, rand_min=-100, rand_max=100)}, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCumSum(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, reverse, exclusive", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + [True, False], + ), + ) + def test_cumsum(self, compute_unit, backend, rank, reverse, exclusive): + input_shape = np.random.randint(low=1, high=4, size=rank) + for axis in range(-1, rank, 3): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.cumsum(x, axis=axis, reverse=reverse, exclusive=exclusive) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=-10, rand_max=10)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf(model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend) + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestFakeQuant(TensorFlowBaseTest): + @pytest.mark.parametrize( + "num_bits, weight_boundaries, compute_unit, backend", + itertools.product( + [2, 8], # TensorFlow does not support 1-bit quantization + [(0, 10), (-0.01, 0.02), (-101, 100)], + compute_units, + backends, + ), + ) + def test_fake_quant_weight_quantization_with_conv(self, num_bits, weight_boundaries, compute_unit, backend): + if backend[0] == 'mlprogram': + pytest.skip("Not supported with ML Program backend") + + tf.reset_default_graph() + filter_width = 1 + filter_height = 1 + spatial_size = 2 + input_channels = 3 + output_channels = 1 + input_tensor = tf.placeholder(tf.float32, [1, spatial_size, spatial_size, input_channels], name='input') + output_tensor = tf.placeholder(tf.float32, [1, spatial_size, spatial_size, output_channels], name='output') + kernel_in = random_gen((filter_width, filter_height), weight_boundaries[0], weight_boundaries[1]) + init = tf.constant_initializer(kernel_in) + + def model(x): + with tf.compat.v1.variable_scope('quantized_model'): + x = tf.layers.conv2d(x, filters=3, kernel_size=1, strides=1, kernel_initializer=init) + return x + + with tf.compat.v1.variable_scope('quantize'): + output = model(x=input_tensor) + tf.contrib.quantize.experimental_create_training_graph(quant_delay=0, weight_bits=num_bits, + activation_bits=num_bits) + loss = tf.losses.mean_squared_error(labels=input_tensor, predictions=output) + saver = tf.train.Saver() + + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + with tf.control_dependencies(update_ops): + optimizer = tf.train.AdamOptimizer().minimize(loss) + + checkpoint_dir = tempfile.mkdtemp() + # Run training pass to retrieve the correct min and max in FakeQuant op (to avoid using default values) and + # save dummy checkpoint. + with tf.Session() as sess: + tf.global_variables_initializer().run() + for iter in range(1): + image = np.random.rand(spatial_size, spatial_size, input_channels).astype(np.float32) * 255 + label = np.random.rand(spatial_size, spatial_size, output_channels).astype(np.float32) * 255 + training_loss, _ = sess.run([loss, optimizer], feed_dict={input_tensor: image[None, ...], + output_tensor: label[None, ...]}) + + saver.save(sess=sess, save_path=os.path.join(checkpoint_dir, 'quantization')) + + with tf.Graph().as_default() as g: + input_tensor = tf.placeholder(tf.float32, [1, spatial_size, spatial_size, input_channels], name='input') + with tf.variable_scope('quantize'): + output = model(x=input_tensor) + + # define eval graph, by quantizing the weights of the model with learned min/max values for each layer + tf.contrib.quantize.experimental_create_eval_graph(input_graph=g, weight_bits=num_bits, + activation_bits=num_bits) + tmpdir = tempfile.mkdtemp() + tf_graph_path = os.path.join(str(tmpdir), "tf_graph.pb") + tf_graph_path_quantized = os.path.join(str(tmpdir), "frozen_graph_quantized.pb") + with open(tf_graph_path, 'wb') as f: + f.write(g.as_graph_def().SerializeToString()) + freeze_g(input_graph=tf_graph_path, + input_saver="", + input_binary=True, + input_checkpoint=os.path.join(checkpoint_dir, 'quantization'), + output_node_names="quantize/quantized_model/conv2d/Conv2D", + restore_op_name="save/restore_all", + filename_tensor_name="save/Const:0", + output_graph=tf_graph_path_quantized, + clear_devices=True, + initializer_nodes="") + shutil.rmtree(checkpoint_dir) + + graph = load_tf_pb(tf_graph_path_quantized) + + tf.reset_default_graph() + graphdef = tf.GraphDef() + input_dict = {} + with open(tf_graph_path_quantized, "rb") as f: + graphdef.ParseFromString(f.read()) + shutil.rmtree(tmpdir) + + with tf.Graph().as_default(), tf.Session(config=None) as sess: + tf.graph_util.import_graph_def(graphdef, name='') + input_dict[sess.graph.get_tensor_by_name('input:0')] = (np.random.rand(1, spatial_size, spatial_size, + input_channels).astype(np.float32)) + outputs = [] + outputs.append(sess.graph.get_tensor_by_name('quantize/quantized_model/conv2d/Conv2D:0')) + tf_outs = sess.run(outputs, feed_dict=input_dict) + + TensorFlowBaseTest.run_compare_tf( + graph, + input_dict, + ["quantize/quantized_model/conv2d/Conv2D"], + compute_unit=compute_unit, + backend=backend, + tf_outputs=tf_outs, + rtol=0.005, + ) + + +class TestFill(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, value", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [-19.0, 0.0, 37.0], + ), + ) + def test_fill(self, compute_unit, backend, rank, value): + def test_tf_static(): + shape = np.random.randint(low=1, high=3, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + return tf.add( + x, tf.fill(dims=np.array(shape, dtype=np.float32), value=value) + ) + + model, inputs, outputs = build_model + input_values = [np.random.rand(*shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + def test_tf_dynamic(): + shape = np.random.randint(low=1, high=3, size=rank) + @make_tf_graph([(len(shape), tf.int32)]) + def build_model(x): + return tf.fill(dims=x, value=value) + + model, inputs, outputs = build_model + input_values = [np.array(shape, dtype=np.int32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + test_tf_static() + test_tf_dynamic() + + +class TestNonMaximumSuppression(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "num_boxes", + "max_boxes", + "iou_threshold", + "score_threshold", + "use_V5", + ] + ), + itertools.product( + compute_units, + backends, + [1, 5, 20, 1000], + [1, 8, 100], + [0.2, 0.8], + [float("-inf"), -200.0, 200.0], + [True, False], + ), + ) + def test_non_max_suppression( + self, + compute_unit, + backend, + num_boxes, + max_boxes, + iou_threshold, + score_threshold, + use_V5, + ): + if score_threshold > 100.0: + pytest.xfail( + "When score threshold is too high, TF will return empty result, while MIL " + "will still keep the highest score box." + ) + if num_boxes >= 1000 and backend == ("mlprogram", "fp16"): + pytest.xfail( + "rdar://103891349 ([TensorFlow] [PyTorch] NMS discrepancy in Fp16 when " + "number of boxes is large)" + ) + + boxes_val = random_gen(shape=(num_boxes, 4), rand_min=0, rand_max=32) + # When the input score is too close, the returned index order is not guaranteed. + # So instead of generating random scores by rand, use shuffle. + scores_val = np.arange(num_boxes).astype(np.float32) + np.random.shuffle(scores_val) + + @make_tf_graph([boxes_val.shape, scores_val.shape]) + def build_model(boxes, scores): + if use_V5: + ret = tf.raw_ops.NonMaxSuppressionV5( + boxes=boxes, + scores=scores, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + soft_nms_sigma=0., + ) + else: + ret = tf.image.non_max_suppression( + boxes=boxes, + scores=scores, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + return ret + + model, inputs, outputs = build_model + input_dict = dict(zip(inputs, [boxes_val, scores_val])) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestOneHot(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis, dynamic", + itertools.product( + compute_units, + backends, + [ + (2, 0), + (2, -1), + (3, 3), + (3, 0), + (3, -2), + (4, -4), + (4, 1), + (4, -1), + (4, -2), + (4, 3), + ], + [True, False], + ), + ) + def test_one_hot(self, compute_unit, backend, rank_and_axis, dynamic): + rank, axis = rank_and_axis + depth, on_value, off_value = 30, 28.0, -4.0 + x_shape = np.random.randint(low=2, high=4, size=rank) + axis = (axis if axis >= -1 else axis + rank + 1) + + if not dynamic: + @make_tf_graph([list(x_shape)+[tf.int32]]) + def build_model(x): + return tf.one_hot(x, axis=axis, depth=depth, on_value=on_value, off_value=off_value) + + model, inputs, outputs = build_model + input_values = [np.random.randint(0, depth, size=x_shape).astype(np.int32)] + input_dict = dict(zip(inputs, input_values)) + + else: # Dynamic Case with depth being an input + @make_tf_graph([list(x_shape)+[tf.int32], [1, tf.int32]]) + def build_model(x, depth_input): + # tf.squeeze since CoreML input has to be rank 1~5. + return tf.one_hot(x, axis=axis, depth=tf.squeeze(depth_input), + on_value=on_value, off_value=off_value) + + model, inputs, outputs = build_model + input_values = [np.random.randint(0, depth, size=x_shape).astype(np.int32), + np.array([depth]).astype(np.int32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestSoftmaxCrossEntropyWithLogits(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, class_num", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_sparse_softmax_cross_entropy_with_logits(self, compute_unit, backend, class_num): + batch_size = 2 + feature_shape = [batch_size, class_num] + label_shape = [batch_size, tf.int32] + + @make_tf_graph([feature_shape, label_shape]) + def build_model(feat, label): + return tf.raw_ops.SparseSoftmaxCrossEntropyWithLogits(features=feat, labels=label)[0] + + model, inputs, outputs = build_model + features = random_gen(feature_shape, rand_min=0, rand_max=1) + labels = np.random.randint(low=0, high=class_num, size=(batch_size,), dtype=np.int32) + input_values = [features, labels] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, class_num", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_softmax_cross_entropy_with_logits(self, compute_unit, backend, class_num): + batch_size = 2 + feature_shape = [batch_size, class_num] + label_shape = [batch_size, class_num] + + @make_tf_graph([feature_shape, label_shape]) + def build_model(feat, label): + return tf.raw_ops.SoftmaxCrossEntropyWithLogits(features=feat, labels=label)[0] + + model, inputs, outputs = build_model + input_values = [ + random_gen(feature_shape, rand_min=0, rand_max=1), + random_gen(label_shape, rand_min=0, rand_max=1), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestIdentityN(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_identity_n(self, compute_unit, backend): + shape_1 = [1,] + shape_2 = [3, 4] + shape_3 = [5, 6, 7] + + @make_tf_graph([shape_1, shape_2, shape_3]) + def build_model(x, y ,z): + return tf.raw_ops.IdentityN(input=[x, y, z]) + + model, inputs, outputs = build_model + input_values = [ + random_gen(shape_1, rand_min=0, rand_max=1), + random_gen(shape_2, rand_min=0, rand_max=1), + random_gen(shape_3, rand_min=0, rand_max=1), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_identity_n_with_downstream_op(self, compute_unit, backend): + shape = [3, 4] + + @make_tf_graph([shape]) + def build_model(x): + x = tf.identity_n(input=[x, x]) + return tf.reduce_max(x, 1) + + model, inputs, outputs = build_model + input_values = [np.random.rand(*shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPad(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode, dynamic, trial", + itertools.product( + compute_units, + backends, + [2, 3, 4], + ['constant', 'reflect'], + [True, False], + list(range(10)), + ), + ) + def test(self, compute_unit, backend, rank, mode, dynamic, trial): + input_shape = np.random.randint(low=2, high=10, size=rank) + min_input_dim_size = input_shape.min() + padding_val = np.random.randint(low=0, high=min_input_dim_size, size=(rank, 2), dtype=np.int32) + + # Only constant mode supports padding across all dimensions + # All other padding modes are only applied on two dimensions. + perm = list(range(rank)) + import random + random.shuffle(perm) + if mode != "constant": + padding_val[perm[:-2]] = 0 + tf_mode = mode.upper() + + if dynamic: + if mode != "constant": + return + padding_shape = padding_val.shape + @make_tf_graph([input_shape, list(padding_shape)+[tf.int32]]) + def build_model(x, paddings): + return tf.pad(x, paddings=paddings, mode=tf_mode) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000), padding_val] + input_dict = dict(zip(inputs, input_values)) + + else: + @make_tf_graph([input_shape]) + def build_model(x): + return tf.pad(x, paddings=padding_val, mode=tf_mode) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPadV2(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, constant_values, dynamic, trial", + itertools.product( + compute_units, + backends, + list(range(1, 6)), + [0., 10, -1], + [True], + list(range(10)) + ), + ) + def test(self, compute_unit, backend, rank, constant_values, dynamic, trial): + input_shape = np.random.randint(low=2, high=10, size=rank) + paddings = np.random.randint(low=2, high=5, size=2*rank).astype(np.int32) + padding_val = paddings.reshape(-1,2) + if dynamic: + padding_shape = padding_val.shape + @make_tf_graph([input_shape, list(padding_shape)+[tf.int32]]) + def build_model(x, paddings): + return tf.raw_ops.PadV2(input=x, paddings=paddings, constant_values=constant_values) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000), padding_val] + input_dict = dict(zip(inputs, input_values)) + + else: + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.PadV2(input=x, paddings=padding_val, constant_values=constant_values) + + model, inputs, outputs = build_model + + input_values = [random_gen(input_shape, rand_min=0.2, rand_max=1000)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestRange(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, params", + itertools.product( + compute_units, + backends, + [ + (-10.4, 23, 12.2), + (0, 10, 1), + (50.5, 90.5, 1.5), + (5, 8, 2), + (5, 8, 98), + (5, 8, 1.5), + (10, 5, -0.6), + (24, -65, -2), + ], + ), + ) + def test_range(self, compute_unit, backend, params): + start, end, step = np.array(params).astype(np.float32) + + # CoreML requires rank-1~5 input. + @make_tf_graph([[1, tf.float32]]) + def build_model(limit): + return tf.range(start=start, limit=tf.squeeze(limit), delta=step) + + model, inputs, outputs = build_model + input_values = [np.array([end])] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # CoreML requires rank-1~5 input. + @make_tf_graph([[1, tf.float32]]) + def build_model(delta): + return tf.range(start=start, limit=end, delta=tf.squeeze(delta)) + + model, inputs, outputs = build_model + input_values = [np.array([step])] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # CoreML requires rank-1~5 input. + @make_tf_graph([[1, tf.float32]]) + def build_model(begin): + return tf.range(start=tf.squeeze(begin), limit=end, delta=step) + + model, inputs, outputs = build_model + input_values = [np.array([start])] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTile(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_reps", + itertools.product( + compute_units, + backends, + [ + (1, (2,)), + (2, (1, 2)), + (2, (2, 2)), + (3, (3, 2, 1)), + (3, (2, 1, 3)), + (3, (2, 1, 1)), + (4, (1, 3, 2, 1)), + (4, (2, 1, 1, 2)), + (5, (2, 1, 1, 3, 2)), + (5, (1, 1, 2, 3, 2)), + ], + ), + ) + def test_tile(self, compute_unit, backend, rank_and_reps): + rank, reps = rank_and_reps + x_shape = np.random.randint(low=2, high=4, size=rank) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.tile(x, multiples=reps) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestDynamicTile(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, [1, 2, 3, 4, 5]), + ) + def test_tile(self, compute_unit, backend, rank): + x_shape = np.random.randint(low=2, high=4, size=rank) + reps_val = np.random.randint(low=1, high=3, size=rank).astype(np.int32) + + @make_tf_graph([x_shape, [*reps_val.shape, tf.int32]]) + def build_model(x, reps): + return tf.tile(input=x, multiples=reps) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape), reps_val] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestTopK(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, k, sort", + itertools.product( + compute_units, + backends, + [1, 3, 5], + [1, 3], + [True, False], + ), + ) + def test_top_k(self, compute_unit, backend, rank, k, sort): + if not sort and backend[0] == "neuralnetwork": + pytest.skip("iOS16 version topk needed for sort = False") + if not sort and _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + # TensorFlow only supports last dimension (axis = -1). + shape = np.random.randint(low=3, high=4, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + ref = tf.math.top_k(x, k=k, sorted=sort) + if not sort: + ref = (tf.sort(ref[0]), tf.sort(ref[1])) + return ref + + model, inputs, outputs = build_model + input_values = [random_gen(shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 if not sort else None, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, k", + itertools.product( + compute_units, + backends, + [(1, 3), (1, 10), (3, 50)], + [1, 3, 20], + ), + ) + def test_in_top_k(self, compute_unit, backend, shape, k): + # TensorFlow only supports last dimension (axis = -1). + batch_size, class_num = shape + + @make_tf_graph([shape, (batch_size, tf.int32)]) + def build_model(predictions, targets): + return tf.math.in_top_k(predictions=predictions, targets=targets, k=k) + + model, inputs, outputs = build_model + pred_values = random_gen(shape, rand_min=-2, rand_max=2) + target_values = np.random.randint(class_num, size=batch_size).astype(np.int32) + input_values = [pred_values, target_values] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestConcat(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op_version, rank, num_inputs", + itertools.product( + compute_units, + backends, + ['v1', 'v2'], + list(range(6)), + list(range(1, 4)), + ), + ) + def test_concat(self, compute_unit, backend, op_version, rank, num_inputs): + + import random + for axis in range(-rank, rank): + input_shape = np.random.randint(low=1, high=4, size=rank) + input_shapes = [input_shape.copy() for _ in range(num_inputs)] + concat_axis_value = np.random.randint(low=1, high=3, size=num_inputs) + for i, v in enumerate(concat_axis_value): + input_shapes[i][axis] = concat_axis_value[i] + + @make_tf_graph(input_shapes) + def build_model(*inputs): + # add 3 additional tensor contains dimension size of 0 + zero_shape = input_shape.copy() + zero_shape[axis] = 0 + const = [tf.constant([], shape=zero_shape) for _ in range(3)] + values = inputs + tuple(const) + values = list(values) + random.shuffle(values) + values = tuple(values) + if op_version == 'v1': + # Seems like now the tf functions are using concatV2, so create as raw_ops here + res = tf.raw_ops.Concat(concat_dim=axis, values=values) + elif op_version == 'v2': + res = tf.raw_ops.ConcatV2(values=values, axis=axis) + return res + + model, inputs, outputs = build_model + input_values = [random_gen(shape) for shape in input_shapes] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestSplit(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dynamic", + itertools.product( + compute_units, + backends, + [1, 2, 3, 4], + [True, False] + ), + ) + def test_split(self, compute_unit, backend, rank, dynamic): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY and dynamic: + pytest.xfail("rdar://97398133 (TestSplit::test_split is failing on mlprogram + GPU + dynamic combination)") + if _macos_version() < (13, 0) and (dynamic or (backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY)): + pytest.skip("Issue fixed in iOS16/macOS13") + + input_shape1 = np.random.randint(low=1, high=3, size=rank) + for axis in range(-rank, rank, 2): + for split_num in range(2, input_shape1[axis] + 1, 2): + if input_shape1[axis] % split_num != 0: + continue + tf_input_shape = list(input_shape1) + if dynamic: + axis1 = np.random.randint(low=0, high=rank) + tf_input_shape[axis1] = None + + @make_tf_graph([tf_input_shape]) + def build_model(x): + res = tf.split(x, split_num, axis=axis) + # Comment: If tf.split output is returned, there's no + # get_tuple nodes. Some graph pass is needed. Example: + # + # x = tf.placeholder(tf.float32, shape=input_shape1) + # res = tf.split(x, 3, axis=0) + # + # res are ['split:0', 'split:1', 'split'] + # + # but node.outputs == ['gto_1', 'gto_2', 'gto_3'] + import random + + random.shuffle(res) + return tuple(res) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape1)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, sizes", + itertools.product( + compute_units, + backends, + [[1, 1, 2], [0, 2, 2], [1, 0, 3], [2, 0, 1, 1, 0]] + ), + ) + def test_split_with_sizes(self, compute_unit, backend, sizes): + input_shape = (4, 2) + + @make_tf_graph([input_shape]) + def build_model(x): + res = tf.split(x, sizes, axis=0) + # split sizes can contain 0s, and we skip those in outputs + return tuple([res[i] for i in range(len(sizes)) if sizes[i] != 0]) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_splitv(self, compute_unit, backend): + input_shape = [3, 2, 1] + + @make_tf_graph([input_shape]) + def build_model(x): + res = tf.split(x, [1, 2], axis=0) + return res[0], res[1] + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestStack(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_stack(self, compute_unit, backend): + input_shape1 = [3, 1, 1] + input_shape2 = [3, 1, 1] + + @make_tf_graph([input_shape1, input_shape2]) + def build_model(x, y): + return [tf.stack((x, y), axis=0), tf.stack((y, x), axis=-1)] + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape1), random_gen(input_shape2)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestUnstack(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [[3, 1], [4, 3]] + ), + ) + def test_unstack(self, compute_unit, backend, shape): + @make_tf_graph([shape]) + def build_model(x): + return tf.unstack(x, axis=1) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [[3, 1], [4, 3]] + ), + ) + def test_unstack_and_stack(self, compute_unit, backend, shape): + @make_tf_graph([shape]) + def build_model(x): + x = tf.unstack(x, axis=1) + return tf.stack(x) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPack(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, num_inputs", + itertools.product(compute_units, backends, list(range(5)), list(range(1, 5))), + ) + def test_pack(self, compute_unit, backend, rank, num_inputs): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + shape = np.random.randint(low=1, high=4, size=rank) + input_shapes = [shape[:] for _ in range(num_inputs)] + + @make_tf_graph(input_shapes) + def build_model(*inputs): + return tf.raw_ops.Pack(values=inputs, axis=0) + + model, inputs, outputs = build_model + input_values = [ + random_gen(shape, rand_min=-1, rand_max=1) for shape in input_shapes + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestArgSort(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, direction", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [-1, 0], + ["ascending", "descending"], + ), + ) + def test_argsort(self, compute_unit, backend, rank, axis, direction): + shape = np.random.randint(low=1, high=4, size=rank) + dtype = np.float32 + tf_dtype = tf.float32 + + @make_tf_graph([list(shape) + [tf_dtype]]) + def build_model(x): + return tf.argsort(x, axis=axis, direction=direction.upper()) + + model, inputs, outputs = build_model + input_values = np.arange(np.prod(shape)) + np.random.shuffle(input_values) + input_values = [np.reshape(input_values, shape).astype(dtype)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestDepthToSpace(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_size", + itertools.product( + compute_units, + backends, + [(1, 1, 1, 16), (1, 1, 1, 32), (1, 3, 3, 16)], + [2, 4], + ), + ) + def test_depth_to_space(self, compute_unit, backend, input_shape, block_size): + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.depth_to_space(x, block_size) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestExpandDims(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (rank, axis) + for rank in range(1, 5) + for axis in range(-rank - 1, rank + 1) + ], + ), + ) + def test_expand_dims(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + input_shape = np.random.randint(low=2, high=4, size=rank) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.expand_dims(x, axis=axis) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestReshape(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_flatten(self, compute_unit, backend): + shapes = [[2, 2], [3, 2, 1, 2], [2, 1, 4, 3]] + + for input_shape in shapes: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.keras.backend.flatten(x) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product( + compute_units, + backends, + [ + ([10, 10], [5, 20]), + ([3, 4, 5, 6], [4, 5, 3, 6]), + ([4, 4, 5, 6], [2, 2, -1]), + ], + ), + ) + def test_reshape_static(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape[0]]) + def build_model(x): + return tf.reshape(x, shape=input_shape[1]) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape[0]).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product( + compute_units, + backends, + [ + ([10, 10], [5, 20]), + ([3, 4, 5, 6], [4, 5, 3, 6]), + ([4, 4, 5, 6], [2, 2, -1]), + ([2, 3, 5, 3], [2, -1]), + ], + ), + ) + def test_reshape_dynamic(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape[0], (len(input_shape[1]), tf.int32)]) + def build_model(x, y): + return tf.reshape(x, shape=y) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*input_shape[0]).astype(np.float32), + np.array(input_shape[1], dtype=np.int32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [[1], [1, 1], [1, 1, -1], []], + ), + ) + def test_reshape_scalar(self, compute_unit, backend, shape): + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = () + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.Reshape(tensor=x, shape=shape) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestShape(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + ), + ) + def test_shape(self, compute_unit, backend, rank): + shape = np.random.randint(low=3, high=4, size=rank) + shape_holder = [None] * rank + + @make_tf_graph([shape_holder]) + def build_model(x): + return tf.shape(x) + + model, inputs, outputs = build_model + + input_values = [random_gen(shape, rand_min=-100, rand_max=100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestMatrixDiag(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, length, dynamic", + itertools.product( + compute_units, + backends, + [length for length in range(1, 5)], + [True, False] + ), + ) + def test(self, compute_unit, backend, length, dynamic): + + if dynamic: + input_shape = np.random.randint(low=1, high=4, size=length) + a, b = np.prod(input_shape[:2]), np.prod(input_shape[2:]) + size = np.array([a,b]).astype(np.int32) + reshape_shape = [2] + + @make_tf_graph([input_shape, reshape_shape+[tf.int32]]) + def build_model(x, reshape): + x = tf.reshape(x, reshape) + x = tf.reshape(x, [-1]) + return tf.raw_ops.MatrixDiag(diagonal=x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1), size] + else: + input_shape = [length] + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.MatrixDiag(diagonal=x) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -1, 1)] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestReverse(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [ + (1, (-1,)), + (2, (0,)), + (2, (-1, 0)), + (3, (1, -3)), + (3, (-2,)), + (3, (0, 1, 2)), + (4, (-2, -1, 0)), + (4, (-1, -2)), + (4, []), + (5, (-3, -1, 3)), + (5, (0, -1, 1, -2)), + ], + ), + ) + def test_reverse(self, compute_unit, backend, rank_and_axes): + rank, axes = rank_and_axes + shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([shape]) + def build_model(x): + return tf.reverse(x, axis=axes) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReverseSequence(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)] + ), + ) + def test_reverse_sequence(self, compute_unit, backend, rank): + shape = np.random.randint(low=1, high=4, size=rank) + seq_axis = np.random.randint(low=1, high=rank) + batch_axis = np.random.randint(low=0, high=seq_axis) + lengths = np.random.randint(low=0, high=shape[seq_axis], size=shape[batch_axis]) + + @make_tf_graph([shape]) + def build_model(x): + return tf.reverse_sequence( + x, seq_lengths=lengths, seq_axis=seq_axis, batch_axis=batch_axis + ) + + model, inputs, outputs = build_model + input_values = [random_gen(shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSpaceToDepth(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_size", + itertools.product( + compute_units, + backends, + [(1, 6, 6, 1), (1, 12, 12, 1), (1, 6, 6, 3)], + [2, 3], + ), + ) + def test_space_to_depth(self, compute_unit, backend, input_shape, block_size): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.nn.space_to_depth(x, block_size) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSqueeze(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [ + (2, (1,)), + (2, (0,)), + (3, (1,)), + (3, (0, -1)), + (3, []), + (4, (-1, 2, 1)), + (4, (0, 1)), + (5, (3, 1, 2)), + (5, (-1,)), + ], + ), + ) + def test_squeeze(self, compute_unit, backend, rank_and_axes): + rank, axes = rank_and_axes + x_shape = np.random.randint(low=2, high=4, size=rank) + for axis in axes: + x_shape[axis] = 1 + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.squeeze(x, axis=axes) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*x_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTranspose(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_perm", + itertools.product( + compute_units, + backends, + [ + (1, (0,)), + (2, (1, 0)), + (2, (0, 1)), + (3, (0, 2, 1)), + (3, (2, 1, 0)), + (3, (2, 0, 1)), + (4, (0, 3, 2, 1)), + (4, (3, 0, 1, 2)), + (5, (2, 3, 1, 0, 4)), + (5, (3, 1, 0, 4, 2)), + ], + ), + ) + def test_transpose_1(self, compute_unit, backend, rank_and_perm): + + rank, perm = rank_and_perm + x_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.transpose(x, perm=perm) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 2, 3, 4], + ), + ) + def test_transpose_2(self, compute_unit, backend, rank): + + input_shape = np.random.randint(low=1, high=4, size=rank) + perm = np.random.permutation(rank) + + def static_perm(): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.transpose(x, perm=perm) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def dynamic_perm(): + @make_tf_graph([input_shape, list(perm.shape) + [tf.int32]]) + def build_model(x, tf_perm): + return tf.transpose(x, perm=tf_perm) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape), perm.astype(np.int32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + static_perm() + # Note that TF supports dynamic perm in tf.transpose. + with pytest.raises(ValueError, match=r".*must be const at compile time.*"): + dynamic_perm() + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_perm", + itertools.product( + compute_units, + backends, + [ + (2, (0, 1)), + (3, (0, 2, 1)), + ], + ), + ) + def test_transpose_after_another_op(self, compute_unit, backend, rank_and_perm): + + rank, perm = rank_and_perm + x_shape = np.random.randint(low=1, high=4, size=rank) + + @make_tf_graph([x_shape]) + def build_model(x): + # Test transpose operations after another operation that may return symbolic value + # in value_inference implementation (e.g. concat) - see issue #1556 + x = tf.concat([x, x], axis=-1) + return tf.transpose(x, perm=perm) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_redundant_transpose(self, compute_unit, backend, rank): + import random + random.seed(10) + input_shape = np.random.randint(low=1, high=4, size=rank) + num_layers = 30 + perms = [] + for _ in range(num_layers): + perm = list(range(rank)) + random.shuffle(perm) + perms.append(perm) + + @make_tf_graph([input_shape]) + def build_model(x): + net = x + for perm in perms: + net = tf.transpose(net, perm=perm) + return net + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSpaceToBatchND(TensorFlowBaseTest): + # No direct mil smoke test since it's a TF op which is a composite of several ops. + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_shape, paddings", + itertools.product( + compute_units, + backends, + [(1, 4, 4, 1), (1, 4, 4, 3), (2, 4, 6, 1)], + [[2, 2]], + [[[0, 0], [0, 0]], [[1, 1], [0, 2]], [[4, 2], [4, 2]]], + ), + ) + def test_smoke(self, compute_unit, backend, input_shape, block_shape, paddings): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_block_paddings, dynamic", + itertools.product( + compute_units, + backends, + [ + [(1, 4, 6, 2, 2), [2, 3], [[2, 0], [3, 6]]], + [(2, 4, 6, 1), [1, 2], [[2, 1], [3, 3]]], + [(2, 4, 6, 1, 2), [2, 1], [[0, 0],[0, 0]]], + [(2, 4, 6, 1, 2), [2], [[0, 0]]], + ], + [True, False], + ), + ) + def test_smoke_new_op(self, compute_unit, backend, shape_block_paddings, dynamic): + input_shape, block_shape, paddings = shape_block_paddings + + # The neuralnetwork backend doesn't support these tests + if backend[0] == "neuralnetwork": + return + + tf_input_shape = input_shape if not dynamic else [None] * len(input_shape) + @make_tf_graph([tf_input_shape]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_block_rank, dynamic", + itertools.product( + compute_units, + backends, + [(3, 1), (3, 2), (4, 1)], + [True, False], + ), + ) + def test_programmatic( + self, compute_unit, backend, input_block_rank, dynamic + ): + + input_rank, block_rank = input_block_rank + + # generate data + input_shape = np.random.randint(low=1, high=4, size=input_rank) + block_shape = np.random.randint(low=1, high=3, size=block_rank) + + if backend[0] == "neuralnetwork": + if block_rank == 2 and block_shape[0] != block_shape[1]: + pytest.skip("neuralnetwork backend doesn't support unequal block shape.") + if block_shape[0] == 1: + pytest.skip("neuralnetwork backend doesn't support unity block shape.") + + paddings = [] + for i in range(block_rank): + while True: + temp = np.random.randint(low=0, high=10, size=2) + if (np.sum(temp) + input_shape[i + 1]) % block_shape[i] == 0: + paddings.append(temp) + break + paddings = np.array(paddings) + + if not dynamic: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + else: + + @make_tf_graph([[None] * input_rank]) + def build_model(x): + return tf.raw_ops.SpaceToBatchND( + input=x, block_shape=block_shape, paddings=paddings + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestBatchToSpaceND(TensorFlowBaseTest): + # No direct mil smoke test since it's a TF op which is a composite of several ops. + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, block_size, crops", + itertools.product( + compute_units, + backends, + [(4, 4, 4, 1), (4, 4, 4, 3), (4, 4, 6, 1)], + [[2, 2]], + [[[0, 0], [0, 0]], [[1, 1], [0, 2]], [[4, 2], [4, 2]]], + ), + ) + def test_smoke(self, compute_unit, backend, input_shape, block_size, crops): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_size, crops=crops + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_block_rank, dynamic", + itertools.product( + compute_units, + backends, + [(3, 1), (3, 2), (4, 1)], + [True, False] + ), + ) + def test_programmatic( + self, compute_unit, backend, input_block_rank, dynamic): + + input_rank, block_rank = input_block_rank + + # generate data + input_shape = np.random.randint(low=1, high=4, size=input_rank) + block_shape = np.random.randint(low=1, high=3, size=block_rank) + + if backend[0] == "neuralnetwork": + if block_rank == 2 and block_shape[0] != block_shape[1]: + pytest.skip("neuralnetwork backend doesn't support unequal block shape.") + if block_shape[0] == 1: + pytest.skip("neuralnetwork backend doesn't support unity block shape.") + + input_shape[0] = input_shape[0] * np.prod(block_shape) + crops = [] + for i in range(block_rank): + while True: + temp = np.random.randint(low=0, high=4, size=2) + if np.sum(temp) < input_shape[i + 1] * block_shape[i]: + crops.append(temp) + break + crops = np.array(crops) + + if not dynamic: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_shape, crops=crops + ) + + else: + + @make_tf_graph([[None] * input_rank]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_shape, crops=crops + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + + # Before rdar://93071454 (batch_to_space is error out in espresso for dynamic inputs cormel model) is fixed, + # we need to specify the default shape for the dynamic model by setting inputs_for_conversion + if dynamic: + shape = tuple([RangeDim(default=dim) for dim in input_shape]) + inputs_for_conversion = [TensorType(shape=shape, dtype=np.float32)] + else: + inputs_for_conversion = None + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_block_crops, dynamic", + itertools.product( + compute_units, + backends, + [ + [(6, 4, 6, 2, 2), [2, 3], [[2, 0], [3, 6]]], + [(4, 4, 6, 1), [1, 2], [[2, 1], [3, 3]]], + [(4, 4, 6, 1, 2), [2, 1], [[0, 0],[0, 0]]], + [(4, 4, 6, 1, 2), [2], [[0, 0]]], + ], + [True, False], + ), + ) + def test_smoke_new_op(self, compute_unit, backend, shape_block_crops, dynamic): + input_shape, block_shape, crops = shape_block_crops + + # The neuralnetwork backend doesn't support these tests + if backend[0] == "neuralnetwork": + return + + tf_input_shape = input_shape if not dynamic else [None] * len(input_shape) + @make_tf_graph([tf_input_shape]) + def build_model(x): + return tf.raw_ops.BatchToSpaceND( + input=x, block_shape=block_shape, crops=crops + ) + + # Before rdar://93071454 (batch_to_space is error out in espresso for dynamic inputs cormel model) is fixed, + # we need to specify the default shape for the dynamic model by setting inputs_for_conversion + if dynamic: + shape = tuple([RangeDim(default=dim) for dim in input_shape]) + inputs_for_conversion = [TensorType(shape=shape, dtype=np.float32)] + else: + inputs_for_conversion = None + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + inputs_for_conversion=inputs_for_conversion, + backend=backend, + ) + +@pytest.mark.skipif(_HAS_TF_2, reason="Fix and re-enable this test: rdar://76293949 (TF2 unit test InvalidArgumentError)") +class TestTensorArray(TensorFlowBaseTest): + @staticmethod + def get_dynamic_elem_shape_model(): + elem_shape = (None, None) + @make_tf_graph([elem_shape]) + def build_model(x): + ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True) + ta = ta.write(10, x) + ta = ta.write(9, x) + ta = ta.scatter([3], tf.expand_dims(x, 0)) + ta = ta.scatter([8], tf.expand_dims(x, 0)) + + return ta.stack() + return build_model + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_tf_basic(self, compute_unit, backend): + # TF1: TensorArrayV3, TensorArrayWriteV3, TensorArrayScatterV3, + # TensorArraySizeV3, TensorArrayGatherV3 + # TF2: TensorListReserve, TensorListLength, TensorListSetItem, + # TensorListScatterIntoExistingList, TensorListStack, + # TensorListResize + + elem_shape = (3, 2) + + @make_tf_graph([elem_shape]) + def build_model(x): + ta = tf.TensorArray(dtype=tf.float32, size=1, dynamic_size=True) + + ta = ta.write(2, x) + + # TensorArray has write-once semantics, and thus we write to a new + # index + # (https://www.tensorflow.org/api_docs/python/tf/TensorArray) + # writing to out of bound index + ta = ta.scatter([3], tf.expand_dims(x, 0)) + + # writing to in-bound index + ta = ta.scatter([0], tf.expand_dims(x, 0)) + + return ta.stack() + + model, inputs, outputs = build_model + input_values = [random_gen(elem_shape)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_tf_dynamic_elem_shape(self, compute_unit, backend): + + # TF1: TensorArrayV3, TensorArrayWriteV3, TensorArrayScatterV3, + # TensorArraySizeV3, TensorArrayGatherV3 + # TF2: TensorListReserve, TensorListLength, TensorListSetItem, + # TensorListScatterIntoExistingList, TensorListStack, + # TensorListResize + model, inputs, outputs = TestTensorArray.get_dynamic_elem_shape_model() + input_values = [random_gen((2, 3))] + input_dict = dict(zip(inputs, input_values)) + _, mlmodel, _, _, _, _ = TensorFlowBaseTest.run_compare_tf( + model, + input_dict, outputs, + compute_unit=compute_unit, + backend=backend) + + # Once rdar://76293949 (TF2 unit test InvalidArgumentError) is fixed, the following milproto frontend tests should be removed + from coremltools.converters.mil.frontend.milproto.test_load import \ + roundtrip_and_compare_mlmodel + if backend[0] != "mlprogram": + pytest.skip("milproto front end only supported in mlprogram") + roundtrip_and_compare_mlmodel(mlmodel, {"Placeholder": input_values[0]}) + + @pytest.mark.skip( + reason="[NNv2 TensorArray scatter returns wrong result](rdar://63345281)" + ) + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_tf_while_loop(self, compute_unit, backend): + @make_tf_graph([(3, 2)]) + def build_model(x): + def body(i, num_iters, array, update): + return i + 1, num_iters, array.write(i, update), update + + def cond(i, num_iters, array, update): + return i < num_iters + + i = 0 + max_iters = 3 + ta = tf.TensorArray(dtype=tf.float32, size=1, dynamic_size=True) + _, _, new_ta, _ = tf.while_loop(cond, body, [i, max_iters, ta, x]) + new_ta = new_ta.scatter([max_iters], tf.expand_dims(x, 0)) + + return new_ta.stack() + + model, inputs, outputs = build_model + input_values = [random_gen(shape=(3, 2))] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestBroadcastTo(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes, is_dynamic", + itertools.product( + compute_units, + backends, + [ + ((2,), (2,)), + ((1,), (10,)), + ((3,), (3, 3)), + ((1, 1), (1, 4)), + ((1, 1, 5), (3, 4, 4, 4, 5)), + ((3,), (1, 3, 2, 1, 3)), + ((3, 5), (2, 3, 5)), + ((1, 2), (2, 3, 1, 2)), + ((1, 3, 1, 4), (8, 3, 32, 4)), + ((2, 16), (3, 1, 4, 2, 16)), + ], + [False], + ), + ) + def test(self, compute_unit, backend, shapes, is_dynamic): + input_shape, output_shape = shapes + + if is_dynamic is False: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.broadcast_to(x, output_shape) + + else: # output / target shape is an input (placeholder) + + @make_tf_graph([input_shape, (len(output_shape), tf.int32)]) + def build_model(x, shape): + return tf.broadcast_to(x, shape) + + model, inputs, outputs = build_model + if is_dynamic is False: + input_values = [random_gen(input_shape)] + else: + input_values = [ + random_gen(input_shape), + np.array(output_shape, dtype=np.int32), + ] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestContribLSTMBlockCell(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, batch, return_hc_only, has_peephole, has_clip", + itertools.product( + compute_units, + backends, + [1, 2], + [True, False], + [True, False], + [True, False], + ), + ) + def test_tf_no_variable( + self, compute_unit, batch, backend, return_hc_only, has_peephole, has_clip + ): + """ + If return_hc_only == True, the op can be mapped to mb.lstm. + Otherwise it has to be expanded. + """ + # _lstm_block_cell allows fine-grained control of W, peephole etc + from tensorflow.contrib.rnn.python.ops.lstm_ops import _lstm_block_cell + + input_dim, hidden_dim = 2, 3 + x_shape = (batch, input_dim) + init_h = np.random.rand(batch, hidden_dim).astype(np.float32) + init_c = np.random.rand(batch, hidden_dim).astype(np.float32) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=x_shape) + res = _lstm_block_cell( + x, + tf.constant(init_c), + tf.constant(init_h), + w=tf.constant( + np.random.rand(input_dim + hidden_dim, 4 * hidden_dim).astype( + np.float32 + ) + ), + b=tf.constant(np.random.rand(4 * hidden_dim).astype(np.float32)), + use_peephole=has_peephole, + wci=tf.constant(np.random.rand(hidden_dim).astype(np.float32)), + wcf=tf.constant(np.random.rand(hidden_dim).astype(np.float32)), + wco=tf.constant(np.random.rand(hidden_dim).astype(np.float32)), + forget_bias=np.random.rand(), + cell_clip=np.random.rand() if has_clip else -1, + ) + if return_hc_only: + # All other outputs aren't supported by mb.lstm. + res = res[1], res[6] + + TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + res, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, batch", + itertools.product(compute_units, backends, [1, 2],), + ) + def test_tf_lstm_block_cell(self, compute_unit, backend, batch): + # tf.contrib.rnn.LSTMBlockCell runs a single step of an LSTM. It needs to be wrapped + # inside a for loop to handle inputs with sequence length more than 1. In that case, use + # tf.contrib.rnn.LSTMBlockFusedCell + input_dim, hidden_dim = 2, 3 + x_shape = (batch, input_dim) + init_h = np.random.rand(batch, hidden_dim).astype(np.float32) + init_c = np.random.rand(batch, hidden_dim).astype(np.float32) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=x_shape) + rnn_cell = tf.contrib.rnn.LSTMBlockCell( + hidden_dim, use_peephole=True, forget_bias=np.random.rand() + ) + res = rnn_cell(x, (init_h, init_c)) + cs_new, h_new = res[1][0], res[1][1] + res = [h_new, cs_new] # shape of h_new, cs_new: (batch_dim, hidden_dim) + + TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + res, + compute_unit=compute_unit, + backend=backend, + # variable needs to be frozen + freeze_graph=True, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, batch_size", + itertools.product(compute_units, backends, [1, 2],), + ) + def test_tf_lstm_block_fused_cell(self, compute_unit, backend, batch_size): + # tf.contrib.rnn.LSTMBlockFusedCell runs an LSTM over a sequence of inputs + input_dim, hidden_dim = 4, 3 + seq_length = 5 + init_h = np.zeros((batch_size, hidden_dim)).astype(np.float32) + init_c = np.zeros((batch_size, hidden_dim)).astype(np.float32) + x_shape = (seq_length, batch_size, input_dim) + with tf.Graph().as_default() as graph: + lstm_cell = tf.contrib.rnn.LSTMBlockFusedCell( + num_units=hidden_dim, + forget_bias=2.0, + cell_clip=None, + use_peephole=False, + ) + + x = tf.placeholder(tf.float32, shape=x_shape) + # shape of output: (seq_length, batch_size, hidden_dim) + # shape of output_state: Tuple of shape ((batch_size, hidden_dim), (batch_size, hidden_dim)) + output, output_state = lstm_cell( + inputs=x, + initial_state=(init_c, init_h), + ) + output = tf.nn.relu(output) + + res = TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + output, + compute_unit=compute_unit, + backend=backend, + # variable needs to be frozen + freeze_graph=True, + ) + + # check that the resulting program has the LSTM block as a fused op + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + assert len(mil_prog.find_ops(op_type="lstm")) == 1 + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,), + ) + def test_tf_multiple_lstm_block_fused_cell(self, compute_unit, backend): + ''' + Define a network with a stack of fused LSTM ops: + + %input (shape: (Seq, Batch, idim) == (5, 2, 4)) + %x1 = LSTM(h=10) (%input) # shape = (5, 2, 10) + %x2 = LSTM(h=20) (%x1) # shape = (5, 2, 20) + %x3 = slice()(%x2) # shape = (1, 2, 20), to get the final seq value + %x4 = reshape((1, -1)) (%x3) # shape = (1, 40) + %x5 = Dense(h=3)(%x4) # shape = (1, 3) + ''' + input_dim = 4 + seq_length = 5 + batch_size = 2 + x_shape = (seq_length, batch_size, input_dim) + + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=x_shape) # shape = (5, 2, 4) + + lstm_cell_1 = tf.contrib.rnn.LSTMBlockFusedCell(num_units=10) + x1, _ = lstm_cell_1(x, dtype=tf.float32) # shape = (5, 2, 10) + lstm_cell_2 = tf.contrib.rnn.LSTMBlockFusedCell(num_units=20) + x2 , _ = lstm_cell_2(x1, dtype=tf.float32) # shape = (5, 2, 20) + x3 = tf.slice(x2, begin=[4, 0, 0], size=[1, 2, 20]) # shape = [1, 2, 20] + x4 = tf.reshape(x3, shape=(1, -1)) # shape = [1, 40] + x5 = tf.linalg.matmul(x4, tf.constant(np.arange(1, 40*3, dtype=np.float32), shape=[40, 3])) # shape: [1, 3] + + res = TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(*x_shape).astype(np.float32),}, + x5, + compute_unit=compute_unit, + backend=backend, + # variable needs to be frozen + freeze_graph=True, + ) + + # check that the resulting program has the LSTM block ops as fused ops + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + assert len(mil_prog.find_ops(op_type="lstm")) == 2 + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestVariable(TensorFlowBaseTest): + @pytest.mark.xfail(reason="Investigate get_global ", run=False) + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_tf_no_variable(self, compute_unit, backend): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1,], name="input") + y = tf.Variable([1.0], dtype=tf.float32, name="y") + + # We set our assign op + assign_op = tf.assign(y, y + 10) + + with tf.control_dependencies([assign_op]): + res = tf.multiply(x, y, name="output") + + TensorFlowBaseTest.run_compare_tf( + graph, + {x: np.random.rand(1).astype(np.float32),}, + res, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestZerosLike(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, dynamic): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + input_shape = np.random.randint(low=2, high=4, size=rank) + input_value = random_gen(input_shape, rand_min=-1, rand_max=1) + if dynamic: + a, b = np.prod(input_shape[:2]), np.prod(input_shape[2:]) + reshape_vals = np.array([a, b], dtype=np.int32) + reshape_input_shape = np.array([2], dtype=np.int32) + + @make_tf_graph([input_shape, list(reshape_input_shape) + [tf.int32]]) + def build_model(x, reshape): + x = tf.reshape(x, shape=reshape) + return tf.raw_ops.ZerosLike(x=x) + + model, inputs, outputs = build_model + input_values = [input_value, reshape_vals] + else: + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ZerosLike(x=x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestIsFinite(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [True, False] + ), + ) + def test(self, compute_unit, backend, rank, dynamic): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + def _generate_num_with_inf(input_shape): + res = random_gen(input_shape, rand_min=-1, rand_max=1) + random_map = np.random.choice([np.inf, -np.inf, 0], size=input_shape) + if len(input_shape) == 0: + return random_map.astype(np.float32) + res[np.where(random_map == np.inf)] = np.inf + res[np.where(random_map == -np.inf)] = -np.inf + return res.astype(np.float32) + + input_shape = np.random.randint(low=2, high=4, size=rank) + input_value = _generate_num_with_inf(input_shape) + if dynamic: + reshape_shape = [2, tf.int32] + + if len(input_shape) == 0: + reshape_value = np.array([1, 1], dtype=np.int32) + else: + reshape_value = np.array( + [input_shape[0], np.prod(input_shape[1:])], dtype=np.int32 + ) + + @make_tf_graph([input_shape, reshape_shape]) + def build_model(x, reshape): + x = tf.reshape(x, reshape) + x = tf.raw_ops.IsFinite(x=x) + return tf.raw_ops.Cast(x=x, DstT=tf.float32) + + model, inputs, outputs = build_model + input_values = [input_value, reshape_value] + + else: + + @make_tf_graph([input_shape]) + def build_model(x): + x = tf.raw_ops.IsFinite(x=x) + return tf.raw_ops.Cast(x=x, DstT=tf.float32) + + model, inputs, outputs = build_model + input_values = [input_value] + + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + backend=backend, + compute_unit=compute_unit, + ) + +class TestLogSoftMax(TensorFlowBaseTest): + @pytest.mark.parametrize( + 'compute_unit, backend', + itertools.product( + compute_units, + backends, + ), + ) + def test(self, compute_unit, backend): + input_shape = (5, 20) + input_value = random_gen(input_shape, rand_min=-1, rand_max=1) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.log_softmax(x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + 'compute_unit, backend', + itertools.product( + compute_units, + backends, + ), + ) + def test_numerical_stability(self, compute_unit, backend): + input_shape = (4,) + input_value = np.array([10, 2, 10000, 4], dtype=np.float32) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.log_softmax(x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestClipByValue(TensorFlowBaseTest): + @pytest.mark.parametrize( + 'compute_unit, backend, rank, min_and_max', + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [(-1, 1), (-1, -1), (1, 2), (-3, -2)], + ), + ) + def test(self, compute_unit, backend, rank, min_and_max): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=2, high=4, size=rank) + min_val, max_val = min_and_max + input_value = random_gen(input_shape, rand_min=min_val-1, rand_max=max_val+1) + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ClipByValue(t=x, clip_value_min=min_val, clip_value_max=max_val) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestSize(TensorFlowBaseTest): + @pytest.mark.parametrize( + 'compute_unit, backend, rank, dynamic', + itertools.product( + compute_units, + backends, + [rank for rank in range(5)], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, dynamic): + if rank == 0: + pytest.skip('Rank 0 not supported by CoreML runtime') + + input_shape = np.random.randint(low=2, high=4, size=rank) + input_value = random_gen(input_shape, rand_min=-1, rand_max=1) + if dynamic: + a, b = np.prod(input_shape[:2]), np.prod(input_shape[2:]) + reshape_vals = np.array([a,b], dtype=np.int32) + reshape_input_shape = np.array([2], dtype=np.int32) + + @make_tf_graph([input_shape, list(reshape_input_shape)+[tf.int32]]) + def build_model(x, reshape): + x = tf.reshape(x, shape=reshape) + return tf.raw_ops.Size(input=x) + + model, inputs, outputs = build_model + input_values = [input_value, reshape_vals] + else: + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.Size(input=x) + + model, inputs, outputs = build_model + input_values = [input_value] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + +class TestAudioSpectrogram(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, params, magnitude_squared", + itertools.product( + compute_units, + backends, + [ + ((100, 2), 5, 10), + ((50, 1), 18, 2), + ((512, 1), 512, 320), + ], + [True, False], + ), + ) + def test_audio_spectrogram(self, compute_unit, backend, params, magnitude_squared): + input_shape = params[0] + window_size = params[1] + stride = params[2] + + @make_tf_graph([input_shape]) + def build_model(x): + y = tf.raw_ops.AudioSpectrogram(input=x, + window_size=window_size, + stride=stride, + magnitude_squared=magnitude_squared) + return y + + model, inputs, outputs = build_model + + input_values = [(2 * np.random.rand(*input_shape) - 1).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestMfcc(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, params", + itertools.product( + compute_units, + backends, + [ + ((100, 2), 5, 10, 8000, (40, 4000), 20, 13), + ((50, 1), 18, 2, 4000, (20, 1500), 40, 26), + ((512, 1), 512, 320, 16000, (20, 8000), 40, 26), + ], + ), + ) + def test_mfcc(self, compute_unit, backend, params): + if backend == ("mlprogram", "fp16"): + pytest.xfail("rdar://80660411 (MFCC FP16 unit tests failing in TF1 converter with numerical errors)") + + input_shape = params[0] + window_size = params[1] + stride = params[2] + sample_rate = params[3] + lower_frequency_limit, upper_frequency_limit = params[4] + filterbank_channel_count = params[5] + dct_coefficient_count = params[6] + + @make_tf_graph([input_shape]) + def build_model(x): + y = tf.raw_ops.AudioSpectrogram(input=x, + window_size=window_size, + stride=stride, + magnitude_squared=True) + y_out = tf.raw_ops.Mfcc(spectrogram=y, + sample_rate=sample_rate, + upper_frequency_limit=upper_frequency_limit, + lower_frequency_limit=lower_frequency_limit, + filterbank_channel_count=filterbank_channel_count, + dct_coefficient_count=dct_coefficient_count) + return y_out + + model, inputs, outputs = build_model + + input_values = [(2 * np.random.rand(*input_shape) - 1).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestComplex(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + # Placeholder doesn't support rank-0 input, so we don't use empty shape here. + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_complex_basic(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + return tf.stack([tf.math.real(complex_data), tf.math.imag(complex_data)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestReal(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_real_real_input(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape]) + def build_model(x): + return tf.math.real(x) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_real_complex_input(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.math.real(tf.complex(x, y)) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestImag(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_imag_real_input(self, compute_unit, backend, input_shape): + @make_tf_graph([input_shape]) + def build_model(x): + return x + tf.math.imag(x) + + model, inputs, outputs = build_model + + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_imag_complex_input(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.math.imag(tf.complex(x, y)) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestFft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_fft_basic(self, compute_unit, backend, input_shape): + # No need to test other parameter combinations because tf.signal.fft doesn't provide API to + # control more fine-grained params such as "n,dim,norm" in PyTorch. + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + fft_res = tf.signal.fft(complex_data) + return tf.stack([tf.math.real(fft_res), tf.math.imag(fft_res)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_fft_directly_output_error(self, compute_unit, backend): + x_shape = [2, 3] + y_shape = [2, 3] + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + return tf.signal.fft(complex_data) + + model, inputs, outputs = build_model + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + + with pytest.raises( + ValueError, match="MIL doesn't support complex data as model's output" + ): + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_fft_nested(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_data = tf.complex(x, y) + fft_res1 = tf.signal.fft(complex_data) + fft_res2 = tf.signal.fft(fft_res1) + fft_res3 = tf.signal.fft(fft_res2) + return tf.stack([tf.math.real(fft_res3), tf.math.imag(fft_res3)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestRfft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, fft_length, input_shape", + # TF requires fft_length be an int32 tensor of shape [1] instead of an integer. + itertools.product( + compute_units, backends, [None, [1], [3], [5]], [[1], [2, 3], [4, 1, 5]] + ), + ) + def test_rfft_basic(self, compute_unit, backend, fft_length, input_shape): + @make_tf_graph([input_shape]) + def build_model(x): + rfft_res = tf.signal.rfft(x, fft_length=fft_length) + return tf.stack([tf.math.real(rfft_res), tf.math.imag(rfft_res)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*input_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestIfft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[1], [2, 3], [4, 1, 5]]), + ) + def test_ifft_basic(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_input = tf.complex(x, y) + ifft_res = tf.signal.ifft(complex_input) + return tf.stack([tf.math.real(ifft_res), tf.math.imag(ifft_res)]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + +class TestIrfft(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, fft_length, input_shape", + # TF requires fft_length be an int32 tensor of shape [1] instead of an integer. + itertools.product( + compute_units, backends, [None, [1], [3], [5]], [[6], [2, 3], [4, 1, 5]] + ), + ) + def test_irfft_basic(self, compute_unit, backend, fft_length, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_input = tf.complex(x, y) + return tf.signal.irfft(complex_input, fft_length=fft_length) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product(compute_units, backends, [[6], [2, 3], [4, 1, 5]]), + ) + def test_fft_length_specify_by_shape(self, compute_unit, backend, input_shape): + x_shape = input_shape + y_shape = input_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + complex_input = tf.complex(x, y) + return tf.signal.irfft(complex_input, fft_length=[complex_input.shape[-1]]) + + model, inputs, outputs = build_model + + input_values = [ + np.random.rand(*x_shape).astype(np.float32), + np.random.rand(*y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py new file mode 100644 index 00000000..286c2670 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parse.py @@ -0,0 +1,124 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pytest + +pytest.importorskip("tensorflow", minversion="1.15.0") +from tensorflow.core.framework import attr_value_pb2 as attr_value +from tensorflow.core.framework import tensor_shape_pb2 as tensor_shape +from tensorflow.core.framework import types_pb2 as types + +import coremltools.converters.mil.frontend.tensorflow.parse as parse +from coremltools.converters.mil.mil import types as mil_types + + +class TestParse(unittest.TestCase): + def test_parse_list(self): + def compare(expected, lst, field_name): + attr = attr_value.AttrValue() + field = getattr(attr.list, field_name) + field.extend(lst) + + actual = parse.parse_attr(attr) + self.assertEqual(expected, actual) + + compare([1, 2, 3], [1, 2, 3], "i") + compare(["foo", "bar"], [b"foo", b"bar"], "s") + + def test_parse_scalar(self): + def compare(expected, val, field_name): + a = attr_value.AttrValue() + setattr(a, field_name, val) + actual = parse.parse_attr(a) + self.assertEqual(expected, actual) + + compare("a String", b"a String", "s") + compare(55, 55, "i") + compare(True, True, "b") + + attr = attr_value.AttrValue() + attr.f = 12.3 + self.assertAlmostEqual(12.3, parse.parse_attr(attr), places=2) + + @staticmethod + def _attr_with_shape(dims, unknown_rank=0): + attr = attr_value.AttrValue() + for (dim_size, dim_name) in dims: + tf_dim = tensor_shape.TensorShapeProto.Dim() + tf_dim.size = dim_size + tf_dim.name = dim_name + attr.shape.dim.append(tf_dim) + attr.shape.unknown_rank = unknown_rank + return attr + + def test_parse_shape(self): + def compare(expected, dims, unknown_rank=0): + attr = self._attr_with_shape(dims, unknown_rank) + actual = parse.parse_attr(attr) + self.assertEqual(expected, actual) + + compare(None, [], 5) + compare([100], [(100, "outer")]) + compare([1, 2, 3], [(1, "outer"), (2, "middle"), (3, "inner")]) + + def test_parse_tensor(self): + # Zero-rank tensor + attr = attr_value.AttrValue() + attr.tensor.version_number = 1 + attr.tensor.dtype = types.DataType.DT_INT32 + t = parse.parse_attr(attr) + self.assertTrue(isinstance(t, mil_types.int32)) + self.assertEqual(0, t.val) + + # Non-zero rank + attr = attr_value.AttrValue() + attr.tensor.version_number = 1 + attr.tensor.dtype = types.DataType.DT_INT32 + shaped_attr = self._attr_with_shape([(1, "outer"), (2, "middle"), (3, "inner")]) + attr.tensor.tensor_shape.dim.extend(shaped_attr.shape.dim) + attr.tensor.int_val.extend([55, 56, 57]) + + t = parse.parse_attr(attr) + self.assertEqual([55, 56, 57], t.val.tolist()) + self.assertEqual("tensor", mil_types.get_type_info(t).name) + + # Note that the result of t.get_primitive() is a function that returns a type + # rather than an instance of that type as it is when the tensor has rank zero. + self.assertTrue(isinstance(t.get_primitive()(), mil_types.int32)) + self.assertEqual((1, 2, 3), t.get_shape()) + + def test_parse_type(self): + def compare(expected, tf_type): + attr = attr_value.AttrValue() + attr.type = tf_type + self.assertEqual(expected, parse.parse_attr(attr)) + + compare(None, types.DataType.DT_INVALID) + compare(mil_types.float, types.DataType.DT_FLOAT) + compare(mil_types.double, types.DataType.DT_DOUBLE) + compare(mil_types.int32, types.DataType.DT_INT32) + compare(mil_types.uint8, types.DataType.DT_UINT8) + compare(mil_types.int16, types.DataType.DT_INT16) + compare(mil_types.int8, types.DataType.DT_INT8) + compare(mil_types.int8, types.DataType.DT_INT8) + compare(mil_types.str, types.DataType.DT_STRING) + compare(None, types.DataType.DT_COMPLEX64) + compare(mil_types.int32, types.DataType.DT_INT64) + compare(mil_types.bool, types.DataType.DT_BOOL) + compare(None, types.DataType.DT_QINT8) + compare(None, types.DataType.DT_QUINT8) + compare(None, types.DataType.DT_QINT32) + compare(None, types.DataType.DT_BFLOAT16) + compare(None, types.DataType.DT_QINT16) + compare(None, types.DataType.DT_QUINT16) + compare(mil_types.uint16, types.DataType.DT_UINT16) + compare(None, types.DataType.DT_COMPLEX128) + compare(mil_types.fp16, types.DataType.DT_HALF) + compare(None, types.DataType.DT_RESOURCE) + compare(None, types.DataType.DT_VARIANT) + compare(mil_types.uint32, types.DataType.DT_UINT32) + compare(mil_types.uint64, types.DataType.DT_UINT64) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py new file mode 100644 index 00000000..d39bb861 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_parsed_tf_node.py @@ -0,0 +1,65 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pytest + +pytest.importorskip("tensorflow", minversion="1.15.0") +from tensorflow.core.framework import node_def_pb2 as node_def +from tensorflow.core.framework import tensor_shape_pb2 as tensor_shape +from tensorflow.core.framework import types_pb2 as types + +from coremltools.converters.mil.frontend.tensorflow.parsed_tf_node import \ + ParsedTFNode + + +def _mock_tf_node(): + tfnode = node_def.NodeDef() + tfnode.name = "aNode" + tfnode.op = "PlaceholderWithDefault" + tfnode.input.extend(["anInput", "^aControlInput"]) + tfnode.attr["dtype"].type = types.DataType.DT_INT32 + dims = [(1, "outer"), (2, "middle"), (3, "inner")] + for (dim_size, dim_name) in dims: + tf_dim = tensor_shape.TensorShapeProto.Dim() + tf_dim.size = dim_size + tf_dim.name = dim_name + tfnode.attr["shape"].shape.dim.append(tf_dim) + return tfnode + + +class TestParsedTFNode(unittest.TestCase): + def test_init(self): + parsed_node = ParsedTFNode(_mock_tf_node()) + parsed_node.parse_from_attr() + self.assertEqual("aNode", parsed_node.name) + self.assertEqual("Placeholder", parsed_node.op) + self.assertEqual(["anInput"], parsed_node.inputs) + self.assertEqual(["aControlInput"], parsed_node.control_inputs) + + def test_copy(self): + parsed_node = ParsedTFNode(_mock_tf_node()) + parsed_node.parse_from_attr() + copy = parsed_node.copy() + self.assertTrue(isinstance(copy, type(parsed_node))) + props = [ + "name", + "op", + "datatype", + "value", + "inputs", + "control_inputs", + "outputs", + "control_outputs", + "attr", + "original_node", + ] + for prop in props: + self.assertEqual( + getattr(parsed_node, prop), + getattr(copy, prop), + "Mismatch in property {}".format(prop), + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py new file mode 100644 index 00000000..3c820eb2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/test_tf_conversion_api.py @@ -0,0 +1,766 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, MSG_TF1_NOT_FOUND +from coremltools.converters.mil.testing_utils import ( + assert_cast_ops_count, assert_input_dtype, assert_ops_in_mil_program, + assert_output_dtype, assert_prog_input_type, assert_prog_output_type, + assert_spec_input_image_type, assert_spec_output_image_type, + get_op_types_in_program, verify_prediction) +from coremltools.proto import FeatureTypes_pb2 as ft +from coremltools.test.api.test_api_examples import TestInputs as _TestInputs + +tf = pytest.importorskip("tensorflow") + +################################################################################# +# Note: all tests are also used as examples in https://coremltools.readme.io/docs +# as a reference. +# Whenever any of the following test fails, we should update API documentations +################################################################################# + + +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +class TestTensorFlow1ConverterExamples: + @staticmethod + def test_convert_from_frozen_graph(tmpdir): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + y = tf.nn.relu(x, name="output") + + mlmodel = ct.convert(graph, compute_units=ct.ComputeUnit.CPU_ONLY) + + test_input = np.random.rand(1, 2, 3) - 0.5 + with tf.compat.v1.Session(graph=graph) as sess: + expected_val = sess.run(y, feed_dict={x: test_input}) + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["output"], expected_val) + + @staticmethod + def test_convert_from_frozen_graph_file(tmpdir): + # create the model to convert + + # write a toy frozen graph + # Note that we usually needs to run freeze_graph() on tf.Graph() + # skipping here as this toy model does not contain any variables + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + y = tf.nn.relu(x, name="output") + + save_path = str(tmpdir) + tf.io.write_graph(graph, save_path, "frozen_graph.pb", as_text=False) + + # Create a test sample + # -0.5 to have some negative values + test_input = np.random.rand(1, 2, 3) - 0.5 + with tf.compat.v1.Session(graph=graph) as sess: + expected_val = sess.run(y, feed_dict={x: test_input}) + + # The input `.pb` file is a frozen graph format that usually + # generated by TensorFlow's utility function `freeze_graph()` + pb_path = os.path.join(save_path, "frozen_graph.pb") + + # 3 ways to specify inputs: + # (1) Fully specify inputs + mlmodel = ct.convert( + pb_path, + # We specify inputs with name matching the placeholder name. + inputs=[ct.TensorType(name="input", shape=(1, 2, 3))], + outputs=["output"], + ) + + # (2) Specify input TensorType without name (when there's only one + # input) + mlmodel = ct.convert( + pb_path, + # TensorType name is optional when there's only one input. + inputs=[ct.TensorType(shape=(1, 2, 3))], + outputs=["output"], + ) + + # (3) Not specify inputs at all. `inputs` is optional for TF. When + # inputs is not specified, convert() infers inputs from Placeholder + # nodes. + mlmodel = ct.convert(pb_path, outputs=["output"], compute_units=ct.ComputeUnit.CPU_ONLY) + + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["output"], expected_val) + mlmodel_path = os.path.join(save_path, "model.mlmodel") + # Save the converted model + mlmodel.save(mlmodel_path) + + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["output"], expected_val, atol=1e-3) + + @staticmethod + def test_convert_from_saved_model_dir(tmpdir): + # Sample input + test_input = np.random.rand(1, 3, 5) - 0.5 + + # create the model to convert + with tf.compat.v1.Session() as sess: + x = tf.placeholder(shape=(1, 3, 5), dtype=tf.float32) + y = tf.nn.relu(x) + + expected_val = sess.run(y, feed_dict={x: test_input}) + + # Save model as SavedModel + inputs = {"x": x} + outputs = {"y": y} + save_path = str(tmpdir) + tf.compat.v1.saved_model.simple_save(sess, save_path, inputs, outputs) + + # SavedModel directory generated by TensorFlow 1.x + # when converting from SavedModel dir, inputs / outputs are optional + mlmodel = ct.convert(save_path, compute_units=ct.ComputeUnit.CPU_ONLY) + + # Need input output names to call mlmodel + # x.name == 'Placeholder:0'. Strip out ':0' + input_name = x.name.split(":")[0] + results = mlmodel.predict({input_name: test_input}) + # y.name == 'Relu:0'. output_name == 'Relu' + output_name = y.name.split(":")[0] + np.testing.assert_allclose(results[output_name], expected_val) + + + @staticmethod + def test_freeze_and_convert_matmul_graph(): + # testing : https://coremltools.readme.io/docs/tensorflow-1#export-as-frozen-graph-and-convert + graph = tf.Graph() + with graph.as_default(): + x = tf.placeholder(tf.float32, shape=[None, 20], name="input") + W = tf.Variable(tf.truncated_normal([20, 10], stddev=0.1)) + b = tf.Variable(tf.ones([10])) + y = tf.matmul(x, W) + b + output_names = [y.op.name] + + from tensorflow.python.tools.freeze_graph import freeze_graph + + model_dir = tempfile.TemporaryDirectory() + graph_def_file = os.path.join(model_dir.name, "tf_graph.pb") + checkpoint_file = os.path.join(model_dir.name, "tf_model.ckpt") + frozen_graph_file = os.path.join(model_dir.name, "tf_frozen.pb") + + with tf.Session(graph=graph) as sess: + # initialize variables + sess.run(tf.global_variables_initializer()) + # save graph definition somewhere + tf.train.write_graph( + sess.graph, model_dir.name, graph_def_file, as_text=False + ) + # save the weights + saver = tf.train.Saver() + saver.save(sess, checkpoint_file) + + # take the graph definition and weights + # and freeze into a single .pb frozen graph file + freeze_graph(input_graph=graph_def_file, + input_saver="", + input_binary=True, + input_checkpoint=checkpoint_file, + output_node_names=",".join(output_names), + restore_op_name="save/restore_all", + filename_tensor_name="save/Const:0", + output_graph=frozen_graph_file, + clear_devices=True, + initializer_nodes="") + print("Tensorflow frozen graph saved at {}".format(frozen_graph_file)) + ct.convert(frozen_graph_file) + + @staticmethod + def test_convert_tf1_frozen_graph_to_milinternal(tmpdir): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + y = tf.nn.relu(x, name="output") + + model = ct.convert(graph, convert_to='milinternal') + assert isinstance(model, ct.converters.mil.Program) + + @staticmethod + def test_mil_op_names_consistency(tmpdir): + ''' + Test to make sure that when the same model is converted to MIL program, + in the same session, it gives the same program, with the same op names + ''' + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 5, 5, 3), name="input") + conv = tf.nn.conv2d( + x, + filter = tf.constant(np.random.rand(1, 1, 3, 5), tf.float32), + padding = "VALID", + ) + y = tf.nn.relu(conv, name="output") + + mil_prog1 = ct.convert(graph, convert_to='milinternal') + # convert the same model again + mil_prog2 = ct.convert(graph, convert_to='milinternal') + + # compare op names of the two programs + np.testing.assert_array_equal(get_op_types_in_program(mil_prog1), get_op_types_in_program(mil_prog2)) + +############################################################################### +# Note: Stress tests for TF1 input / output types +############################################################################### +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +@pytest.mark.skipif(not _HAS_TF_1, reason=MSG_TF1_NOT_FOUND) +class TestTf1Inputs(_TestInputs): + @staticmethod + def test_input_noname(): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + x1 = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input_1") + y = tf.nn.relu(x, name="output") + y1 = tf.nn.relu(x1, name="output_1") + + with pytest.raises(ValueError) as e: + model = ct.convert( + graph, + inputs=[ct.TensorType(shape=(1, 2, 3))] + ) + expected_error = "Multiple inputs are found in graph, but no input name was provided" + assert expected_error == str(e.value) + + @staticmethod + def test_input_wrongname(): + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input") + x1 = tf.placeholder(tf.float32, shape=(1, 2, 3), name="input_1") + y = tf.nn.relu(x, name="output") + y1 = tf.nn.relu(x1, name="output_1") + + with pytest.raises(ValueError) as e: + model = ct.convert( + graph, + inputs=[ct.TensorType(shape=(1, 2, 3), name="wrong_input")] + ) + expected_error = "Multiple inputs are found in graph, but no input name was provided" + expected_error = "Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format("wrong_input", ["input", "input_1"]) + assert expected_error == str(e.value) + + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="test needs predictions") + def test_tf_predict_input(): + TestTf1Inputs._test_variant_input_type_prediction(tf.convert_to_tensor) + +@pytest.fixture +def int32_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.int32, shape=[10, 20], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.int32), name="output") + return graph + +@pytest.fixture +def float32_input_model_add_op(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input") + out = tf.add(x, tf.constant(5.5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def float32_input_model_relu_ops(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input") + x1 = tf.nn.relu(x) + out = tf.nn.relu(x1, name="output") + return graph + +@pytest.fixture +def int64_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.int64, shape=[10, 20], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.int64), name="output") + return graph + +@pytest.fixture +def float32_two_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input1") + y = tf.placeholder(tf.float32, shape=[10, 20], name="input2") + out = tf.add(x, y, name="output") + return graph + +@pytest.fixture +def float32_two_output_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[10, 20], name="input") + y = tf.nn.relu(x) + out2 = tf.nn.relu6(x, name="output2") + out1 = tf.nn.relu(y, name="output1") + return graph + +@pytest.fixture +def rank3_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def rank4_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 3], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def rank4_input_model_with_channel_first_output(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 3], name="input") + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return graph + +@pytest.fixture +def rank4_grayscale_input_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 1], name="input") + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return graph + +@pytest.fixture +def rank4_grayscale_input_model_with_channel_first_output(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 10, 20, 1], name="input") + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return graph + +@pytest.fixture +def linear_model(): + if not _HAS_TF_1: + pytest.skip(MSG_TF1_NOT_FOUND) + # this model will test the fuse_matmul_weight_bias pass + with tf.Graph().as_default() as graph: + x = tf.placeholder(tf.float32, shape=[1, 2], name="input") + y = tf.matmul(x, tf.constant([1, 2], shape=(2, 4), dtype=tf.float32)) + y = tf.add(y, tf.constant([1, 2, 3, 4], shape=(4,), dtype=tf.float32)) + out = tf.nn.relu(y) + return graph + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason='Tests are for deployment target ios16/macos13') +class TestInputOutputConversionAPI: + + def test_input_dtype_inferred(self, int32_input_model): + # test that the input dtype is picked up from TF correctly + mlmodel = ct.convert(int32_input_model, + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32") + verify_prediction(mlmodel) + + def test_unsupported_input_dtype_in_tf_graph(self, int64_input_model): + # test that no error is raised when no dtype is provided by the user, + # and the TF graph's input dtype is not supported. + # In this case, it will be mapped to the closest supported dtype + mlmodel = ct.convert(int64_input_model, + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32") + verify_prediction(mlmodel) + + def test_input_dtype_user_provided(self, int32_input_model): + # test that provided dtype in the api overrides the input dtype in the TF model + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_invalid_input_dtype(self, int32_input_model): + # error should be raised if a dtype is provided by the user that is not supported + with pytest.raises(TypeError, + match="is unsupported for inputs/outputs of the model" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.int16)], + minimum_deployment_target=ct.target.macOS12) + + with pytest.raises(TypeError, + match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12) + + def test_fp16_input_dtype(self, float32_input_model_add_op, float32_input_model_relu_ops, int32_input_model): + """ + Test that providing fp16 input dtype works with macOS13. + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + # Two consecutive relus are merged in the `merge_consecutive_relus` pass. + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op, float32_input_model_relu_ops, + int32_input_model): + """ + Same test as test_fp16_input_dtype, but with Float32 precision + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + def test_two_input_model(self, float32_two_input_model): + # test forcing input type of "input1" to be int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(name="input1", dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input1") + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="input2") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + # test forcing both inputs to be int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(name="input1", dtype=np.int32), + ct.TensorType(name="input2", dtype=np.int32), + ], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input1") + assert_input_dtype(mlmodel, expected_type_str="int32", expected_name="input2") + assert_output_dtype(mlmodel, expected_type_str="int32") + + # if names are not provided an error should be raised + with pytest.raises(ValueError): + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(dtype=np.int32), + ct.TensorType(dtype=np.int32), + ], + minimum_deployment_target=ct.target.macOS12) + + # test forcing both inputs to be float16 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(name="input1", dtype=np.float16), + ct.TensorType(name="input2", dtype=np.float16), + ], + minimum_deployment_target=ct.target.macOS13) + assert_input_dtype(mlmodel, expected_type_str="fp16", expected_name="input1") + assert_input_dtype(mlmodel, expected_type_str="fp16", expected_name="input2") + assert_output_dtype(mlmodel, expected_type_str="fp32") + assert_cast_ops_count(mlmodel, expected_count=1) + verify_prediction(mlmodel) + + def test_single_output_model(self, int32_input_model, float32_input_model_relu_ops): + # test output type + mlmodel = ct.convert(int32_input_model, + minimum_deployment_target=ct.target.macOS12) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_output_dtype(mlmodel, expected_type_str="int32") + + # test that error is raised when an output of unknown name is provided + with pytest.raises(Exception): + # output name does not exist in the model + mlmodel = ct.convert(int32_input_model, + outputs=["z"], + minimum_deployment_target=ct.target.macOS12) + + # test that error is raised when two outputs are provided without names + with pytest.raises(ValueError, match=", does not have names"): + mlmodel = ct.convert(int32_input_model, + outputs=[ct.TensorType(dtype=np.float32), ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS12) + + # test that an error is raised when shape is provided for the output + with pytest.raises(ValueError): + mlmodel = ct.convert(int32_input_model, + outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + + # test that the output dtype provided by the user is applied during conversion + mlmodel = ct.convert(int32_input_model, + outputs=[ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS12) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="Identity" if _HAS_TF_2 else "output") + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + + # test that output dtype of float16 is rejected when deployment target is low + with pytest.raises(TypeError, + match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13" + ): + ct.convert(float32_input_model_relu_ops, + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that output type float16 is applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output") + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + + # test that input and output types float16 are applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="Identity" if _HAS_TF_2 else "output") + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu"]) + verify_prediction(mlmodel) + + def test_multi_output_model(self, float32_two_output_model): + # check that error is raised when only 1 output provided + with pytest.raises(ValueError, match="please provide names for each of the outputs"): + mlmodel = ct.convert(float32_two_output_model, + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + + # check that error is raised when multiple outputs are provided without names + with pytest.raises(ValueError, match="please provide names for each of the outputs"): + mlmodel = ct.convert(float32_two_output_model, + outputs=[ct.TensorType(dtype=np.float16), ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + ) + + # set 1 output to float16 and the other to float32 + output1_name = "Identity" if _HAS_TF_2 else "output1" + output2_name = "Identity_1" if _HAS_TF_2 else "output2" + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(name=output2_name, dtype=np.float16), + ct.TensorType(name=output1_name, dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=1) + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name=output2_name, index=0) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name=output1_name, index=1) + assert_input_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + # in this case only the single output will be selected + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(name=output2_name, dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=0) + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name=output2_name, index=0) + assert_input_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + def test_color_input(self, rank4_input_model, rank3_input_model): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(ValueError, match="must have rank 4"): + mlmodel = ct.convert(rank3_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS12, + ) + + def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model): + with pytest.raises(ValueError, match="must have rank 4"): + mlmodel = ct.convert(rank3_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + # invalid shape + with pytest.raises(ValueError): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "transpose", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that grayscale_16 raises error when used with neural network + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["transpose", "add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + def test_color_output(self, rank4_input_model, rank4_input_model_with_channel_first_output): + # check that an error is raised if the output shape is not of form (1, 3, H, W) + with pytest.raises(ValueError, match="Shape of the RGB/BGR image output,"): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + + mlmodel = ct.convert(rank4_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + # check neural network conversion + mlmodel = ct.convert(rank4_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + verify_prediction(mlmodel) + + def test_grayscale_output(self, rank4_grayscale_input_model, rank4_grayscale_input_model_with_channel_first_output): + # check that an error is raised if the output shape is not of form (1, 1, H, W) + with pytest.raises(ValueError, match="Shape of the Grayscale image output,"): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + ) + + with pytest.raises(TypeError, match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"): + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=0) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model_with_channel_first_output, + inputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + + def test_linear_model(self, linear_model): + # this will test the fuse_matmul_weight_bias pass, when the inputs are of type float16 + mlmodel = ct.convert(linear_model, + inputs=[ct.TensorType(dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, ["linear", "relu"]) + verify_prediction(mlmodel) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py new file mode 100644 index 00000000..08dc9d9b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/test/testing_utils.py @@ -0,0 +1,373 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile + +import numpy as np +import pytest + +import coremltools.models.utils as coremltoolsutils +from coremltools._deps import _HAS_TF_2 +from coremltools.converters.mil.testing_reqs import ct +from coremltools.converters.mil.testing_utils import (compare_backend, + ct_convert) + +tf = pytest.importorskip("tensorflow", minversion="1.15.0") + +from tensorflow.python.framework import dtypes +from tensorflow.python.keras.saving import saving_utils as _saving_utils +from tensorflow.python.tools.freeze_graph import freeze_graph as freeze_g + + +def make_tf_graph(input_types): + """ + Decorator to help construct TensorFlow 1.x model. + + Parameters + ---------- + input_types: list of tuple or list of list + List of input types. E.g. [(3, 224, 224, tf.int32)] represent 1 input, + with shape (3, 224, 224), and the expected data type is tf.int32. The + dtype is optional, in case it's missing, tf.float32 will be used. + + Returns + ------- + tf.Graph, list of str, list of str + """ + + def wrapper(ops): + with tf.Graph().as_default() as model: + inputs = [] + for input_type in input_types: + input_type = tuple(input_type) if input_type is not None else None + if input_type is not None and len(input_type) > 0 and isinstance(input_type[-1], dtypes.DType): + shape, dtype = input_type[:-1], input_type[-1] + else: + shape, dtype = input_type, tf.float32 + inputs.append(tf.placeholder(shape=shape, dtype=dtype)) + + outputs = ops(*inputs) + return model, inputs, outputs + + return wrapper + + +def get_tf_keras_io_names(model): + """ + Utility function to get tf.keras inputs/outputs names from a tf.keras model. + + Parameter + --------- + model: tf.keras.Model + """ + input_names, output_names = [], [] + try: + # The order of outputs in conc_func.structured_outputs is the same order + # that Keras predicts in, which can be different from model.outputs + input_signature = _saving_utils.model_input_signature( + model, keep_original_batch_size=True + ) + fn = _saving_utils.trace_model_call(model, input_signature) + conc_func = fn.get_concrete_function() + for key in conc_func.structured_outputs: + output_names.append(conc_func.structured_outputs[key].name.split(":")[0]) + except: + for o in model.outputs: + output_names.append(o.name.split(":")[0].split("/")[-1]) + for name in model.input_names: + input_names.append(name.split(":")[0]) + return input_names, output_names + + +def get_tf_node_names(tf_nodes, mode="inputs"): + """ + Inputs: + - tf_nodes: list[str]. Names of target placeholders or output variable. + - mode: str. When mode == inputs, do the stripe for the input names, for + instance 'placeholder:0' could become 'placeholder'. + when model == 'outputs', we keep the origin suffix number, like + 'bn:0' will still be 'bn:0'. + Return a list of names from given list of TensorFlow nodes. Tensor name's + postfix is eliminated if there's no ambiguity. Otherwise, postfix is kept + """ + if not isinstance(tf_nodes, list): + tf_nodes = [tf_nodes] + names = list() + for n in tf_nodes: + tensor_name = n if isinstance(n, str) else n.name + if mode == "outputs": + names.append(tensor_name) + continue + name = tensor_name.split(":")[0] + if name in names: + # keep postfix notation for multiple inputs/outputs + names[names.index(name)] = name + ":" + str(names.count(name) - 1) + names.append(tensor_name) + else: + names.append(name) + return names + + +def tf_graph_to_mlmodel( + graph, feed_dict, output_nodes, frontend="tensorflow", + backend=("neuralnetwork", "fp32"), compute_unit=ct.ComputeUnit.CPU_ONLY, + inputs_for_conversion=None, minimum_deployment_target=None, +): + """ + Parameters + ---------- + graph: tf.Graph + TensorFlow 1.x model in tf.Graph format. + feed_dict: dict of {tf.placeholder -> np.array or python primitive) + Dict of placeholder and value pairs representing inputs. + output_nodes: tf.node or list[tf.node] + List of names representing outputs. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + compute_unit: Enum[ct.ComputeUnit]. + Compute unit for the coreml model + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + minimum_deployment_target : coremltools.target enumeration + It set the minimum_deployment_target argument in the coremltools.convert functino. + ----------- + Returns MLModel, Input Values, Output Names + """ + if isinstance(output_nodes, tuple): + output_nodes = list(output_nodes) + if not isinstance(output_nodes, list): + output_nodes = [output_nodes] + + # Convert TF graph. + input_names = get_tf_node_names(list(feed_dict.keys()), mode="inputs") + output_names = get_tf_node_names(output_nodes, mode="outputs") + input_values = {name: val for name, val in zip(input_names, feed_dict.values())} + + inputs = inputs_for_conversion if inputs_for_conversion is not None else None + + mlmodel = ct_convert( + graph, inputs=inputs, outputs=output_names, source=frontend, convert_to=backend, + compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + return mlmodel, input_values, output_names, output_nodes + + +def load_tf_pb(pb_file): + """ + Loads a pb file to tf.Graph + """ + # We load the protobuf file from the disk and parse it to retrieve the + # unsterilized graph_def + with tf.io.gfile.GFile(pb_file, "rb") as f: + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(f.read()) + + # Then, we import the graph_def into a new Graph and returns it + with tf.Graph().as_default() as graph: + # The name var will prefix every op/nodes in your graph + # Since we load everything in a new graph, this is not needed + tf.import_graph_def(graph_def, name="") + return graph + + +def run_compare_tf( + graph, + feed_dict, + output_nodes, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05, + freeze_graph=False, + tf_outputs=None, + minimum_deployment_target=None, +): + """ + Utility function to convert and compare a given TensorFlow 1.x model. + + Parameters + ---------- + graph: tf.Graph + TensorFlow 1.x model in tf.Graph format. + feed_dict: dict of (tf.placeholder, np.array) + Dict of placeholder and value pairs representing inputs. + output_nodes: tf.node or list[tf.node] + List of names representing outputs. + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + compute_unit: Enum[ct.ComputeUnit]. + Compute unit for the coreml model + frontend_only: bool + If true, skip the prediction call, only validate conversion. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + atol: float + The absolute tolerance parameter. + rtol: float + The relative tolerance parameter. + freeze_graph: bool + If True, use the "tensorflow.python.tools.freeze_graph" function + to freeze the TF graph prior to conversion. This will ensure that + all the variables in the graph have been converted to constants. + tf_outputs: float or list[float] + If present, use it as TensorFlow predictions + minimum_deployment_target : coremltools.target enumeration + It set the minimum_deployment_target argument in the coremltools.convert functino. + + Return: + Proto, mlmodel, input dictionay, prediction(if possible) + """ + if not isinstance(output_nodes, (tuple, list)): + output_nodes = [output_nodes] + + if freeze_graph: + with tempfile.TemporaryDirectory() as model_dir: + graph_def_file = os.path.join(model_dir, "tf_graph.pb") + checkpoint_file = os.path.join(model_dir, "tf_model.ckpt") + static_model_file = os.path.join(model_dir, "tf_static.pb") + + with tf.Session(graph=graph) as sess: + sess.run(tf.global_variables_initializer()) + if tf_outputs is None: + tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) + tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False) + saver = tf.train.Saver() + saver.save(sess, checkpoint_file) + output_node_names = get_tf_node_names(output_nodes, mode="outputs") + output_node_names = [name.split(":")[0] for name in output_node_names] + output_op_names = ",".join(output_node_names) + freeze_g( + input_graph=graph_def_file, + input_saver="", + input_binary=True, + input_checkpoint=checkpoint_file, + output_node_names=output_op_names, + restore_op_name="save/restore_all", + filename_tensor_name="save/Const:0", + output_graph=static_model_file, + clear_devices=True, + initializer_nodes="", + ) + graph = load_tf_pb(static_model_file) + + mlmodel, input_key_values, output_names, output_nodes = tf_graph_to_mlmodel( + graph, feed_dict, output_nodes, frontend, backend, + compute_unit=compute_unit, + inputs_for_conversion=inputs_for_conversion, + minimum_deployment_target=minimum_deployment_target + ) + + if frontend_only or coremltoolsutils._macos_version() < (10, 13) \ + or (mlmodel.is_package and coremltoolsutils._macos_version() < (12, 0)): + return mlmodel._spec, mlmodel, input_key_values, None + + if tf_outputs is None: + with tf.Session(graph=graph) as sess: + sess.run(tf.global_variables_initializer()) + tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) + + expected_outputs = {name: val for name, val in zip(output_names, tf_outputs)} + + for k, v in input_key_values.items(): + if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer): + input_key_values[k] = v.astype(float) # Core ML only accepts floats + + pred = None + if not coremltoolsutils._has_custom_layer(mlmodel._spec): + pred = compare_backend( + mlmodel, + input_key_values, + expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=True, + dtype=backend[1], + ) + else: + print('Skipping model prediction as it has a custom nn layer!') + return mlmodel._spec, mlmodel, input_key_values, pred + + +def layer_counts(spec, layer_type): + spec_type_map = { + "neuralNetworkClassifier": spec.neuralNetworkClassifier, + "neuralNetwork": spec.neuralNetwork, + "neuralNetworkRegressor": spec.neuralNetworkRegressor, + } + nn_spec = spec_type_map.get(spec.WhichOneof("Type")) + if nn_spec is None: + raise ValueError("MLModel must have a neural network") + + n = 0 + for layer in nn_spec.layers: + if layer.WhichOneof("layer") == layer_type: + n += 1 + return n + + +class TensorFlowBaseTest: + testclassname='' + testmodelname='' + + @pytest.fixture(autouse=True) + def store_testname_with_args(self, request): + TensorFlowBaseTest.testclassname = type(self).__name__ + TensorFlowBaseTest.testmodelname = request.node.name + + @staticmethod + def run_compare_tf(graph, feed_dict, output_nodes, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, frontend="tensorflow", + backend=("neuralnetwork", "fp32"), atol=1e-04, rtol=1e-05, + freeze_graph=False, tf_outputs=None, + minimum_deployment_target=None): + + res = run_compare_tf(graph, + feed_dict, + output_nodes, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + frontend_only=frontend_only, + frontend=frontend, + backend=backend, atol=atol, + rtol=rtol, + freeze_graph=freeze_graph, + tf_outputs=tf_outputs, + minimum_deployment_target=minimum_deployment_target + ) + + alist = [] + if res is not None: + alist = list(res) + alist.append(TensorFlowBaseTest.testclassname) + alist.append(TensorFlowBaseTest.testmodelname) + + return tuple(alist) + + @staticmethod + def _op_count_in_mil_program(mlmodel, op_type): + prog = mlmodel._mil_program + return len(prog.find_ops(op_type=op_type)) + + +if _HAS_TF_2: + from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import ( + TensorFlow2BaseTest, make_tf2_graph) + from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + TensorFlowBaseTest + TensorFlowBaseTest.run_compare_tf = TensorFlow2BaseTest.run_compare_tf2 + make_tf_graph = make_tf2_graph + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py new file mode 100644 index 00000000..67dd1921 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from .cond_to_where import cond_to_where +from .constant_propagation import constant_propagation +# graph passes +from .delete_asserts import delete_asserts +from .delete_constant import delete_unnecessary_constant_nodes +# graphdef to tfssa +from .delete_disconnected_nodes import delete_disconnected_nodes +from .functionalize_loops import functionalize_loops +from .fuse_dilation_conv import fuse_dilation_conv +from .insert_get_tuple import insert_get_tuple +from .quantization_pass import quantization_pass +from .tensor_array_transform import tensor_array_resource_removal +from .variable_node_transform import remove_variable_nodes diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py new file mode 100644 index 00000000..678cd585 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/cond_to_where.py @@ -0,0 +1,130 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools._deps import _HAS_TF_2 + +from ..basic_graph_ops import delete_node, disconnect_edge +from .visitors import FindAllUpstreamTerminals + + +def compute_max_rank(graph): + # highly inefficient way to calculate the rank of every node + ret = {} + # begin at max rank + for v in graph.keys(): + if len(graph[v].inputs) == 0: + ret[v] = 0 + else: + ret[v] = len(graph) + + changes = True + while changes: + changes = False + for v in graph.keys(): + if len(graph[v].inputs) > 0: + rank = max(ret[i] for i in graph[v].inputs) + 1 + if ret[v] != rank: + changes = True + ret[v] = rank + return ret + + +class CondToWhere: + @staticmethod + def _search(g, node_name): + """ + Find the nearest Switch nodes upstream of node_name. + """ + node = g[node_name] + + switches = ( + FindAllUpstreamTerminals(lambda x: x.op == "Switch") + .visit(g, node.name) + .get_result() + ) + if len(switches) == 0: + switches = ( + FindAllUpstreamTerminals( + lambda x: x.op == "Switch" or x.attr.get("was_switch") is not None + ) + .visit(g, node.name) + .get_result() + ) + return switches + + @staticmethod + def _fix_found_cond(g, merge, switches): + """ + Convert a Merge's Switch nodes to Identity ops and the Merge to iff. + """ + if g[switches[0]].op == "Switch": + condition_input = g[switches[0]].inputs[1] + else: + condition_input = g[switches[0]].attr["was_switch"] + + # convert the merge to a select + # TensorFlow seems to ensure the condition that the first + # merge input is the True branch and the second merge input + # is the false branch. + + # we convert switches to identity, detaching to switch condition + for s in switches: + if g[s].op == "Switch": + g[s].op = "Identity" + g[s].attr["was_switch"] = g[s].inputs[1] + # detach input 1: the switch condition + if g[s].inputs[0] == g[s].inputs[1]: + g[s].inputs.pop() + g[g[s].inputs[0]].outputs.pop() + else: + disconnect_edge(g, g[s].inputs[1], s) + + # build the final select + g[merge].op = "iff" + if not _HAS_TF_2: + # swap true branch with false branch to get the right semantics for IFF + g[merge].inputs[0], g[merge].inputs[1] = ( + g[merge].inputs[1], + g[merge].inputs[0], + ) + + g[merge].inputs = [condition_input] + g[merge].inputs + g[condition_input].outputs.append(merge) + + def cond_to_where(self, graph): + stuff_done = False + g = graph + ranks = compute_max_rank(graph) + merges = [a for a in g if g[a].op == "Merge"] + merges = sorted(merges, key=lambda k: ranks[k]) + if len(merges) == 0: + return False + for m in merges: + logger.debug("Fixing cond at merge location: %s", m) + switches = self._search(g, m) + self._fix_found_cond(g, m, switches) + stuff_done = True + + # delete the extra switches that seem to just lead to identities + # which then lead nowhere but into control dependencies + extra_switches = [a for a in g if g[a].op == "Switch"] + for s in extra_switches: + if all( + [g[o].op == "Identity" and len(g[o].outputs) == 0 for o in g[s].outputs] + ): + nodes_to_delete = g[s].outputs + [s] + for d in nodes_to_delete: + delete_node(g, d) + stuff_done = True + return stuff_done + + +def cond_to_where(tfssa): + for k, v in tfssa.functions.items(): + while True: + stuff_done = CondToWhere().cond_to_where(v.graph) + if not stuff_done: + break diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py new file mode 100644 index 00000000..82f42d98 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/constant_propagation.py @@ -0,0 +1,163 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import gc +from distutils.version import StrictVersion as _StrictVersion + +import tensorflow as tf + +from coremltools import _logger as logger +from coremltools._deps import _get_version +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.type_mapping import \ + numpy_val_to_builtin_val + +from ..basic_graph_ops import const_determined_nodes + + +def _get_const_nodes(fn): + from tensorflow.core.framework import graph_pb2, node_def_pb2 + + new_graph = graph_pb2.GraphDef() + constant_nodes = set() + constant_node_num_outputs = {} + generated_nodes = [k for k, v in fn.graph.items() if v.original_node is None] + const_nodes_in_this_graph = const_determined_nodes(fn.graph, set(generated_nodes)) + # we can only run TF on nodes with outputs since we must evaluate + # tensors and not ops + const_nodes_in_this_graph = [ + i for i in const_nodes_in_this_graph if fn.graph[i].op != "NoOp" + ] + constant_nodes = constant_nodes.union(set(const_nodes_in_this_graph)) + + # topological sort const nodes + topsort = [] + topsort_set = set() + while len(const_nodes_in_this_graph) > 0: + for n in const_nodes_in_this_graph: + input_names = fn.graph[n].inputs + if len(set(input_names).difference(topsort_set)) == 0: + topsort.append(n) + topsort_set.add(n) + + const_nodes_in_this_graph = set(const_nodes_in_this_graph).difference( + topsort_set + ) + + for node in topsort: + new_node = node_def_pb2.NodeDef() + new_node.CopyFrom(fn.graph[node].original_node) + if "_class" in new_node.attr: + del new_node.attr["_class"] + del new_node.input[:] + new_node.input.extend(fn.graph[node].inputs) + if "_output_shapes" in fn.graph[node].attr: + constant_node_num_outputs[node] = len(fn.graph[node].attr["_output_shapes"]) + else: + constant_node_num_outputs[node] = 1 + new_graph.node.extend([new_node]) + del new_node + gc.collect() + return new_graph, list(constant_nodes), constant_node_num_outputs + + +def _constant_propagation(fn, new_graph, constant_nodes, constant_node_num_outputs): + try: + if len(constant_nodes) > 0: + with tf.Graph().as_default() as graph: + tf.import_graph_def(new_graph, name="") + + # We're only making one call to `sess.run()` in order to compute constant values. + # In this context, the default optimization settings make everything dramatically + # slower and more memory-intensive. + if _get_version(tf.__version__) < _StrictVersion("1.13.1"): + session_config = tf.ConfigProto() + session_config.graph_options.optimizer_options.opt_level = ( + tf.OptimizerOptions.L0 + ) + sess = tf.Session(graph=graph, config=session_config) + else: + session_config = tf.compat.v1.ConfigProto() + session_config.graph_options.optimizer_options.opt_level = ( + tf.compat.v1.OptimizerOptions.L0 + ) + session_config.graph_options.rewrite_options.disable_meta_optimizer = ( + True + ) + sess = tf.compat.v1.Session(graph=graph, config=session_config) + + query_list = list() + control_flow_ops = list() + for c in constant_nodes: + for j in range(constant_node_num_outputs[c]): + query = c + ":" + str(j) + lower_query = query.lower() + if "switch" in lower_query or "cond" in lower_query: + control_flow_ops.append(query) + else: + query_list.append(query) + result_list = sess.run(query_list) + result = { + query_list[i]: result_list[i] for i in range(len(query_list)) + } + # propagate switch one by one + for op in control_flow_ops: + try: + res = sess.run([op]) + result.update({op: res[0]}) + except: + logger.warning( + '[Constant Propagation] Skip "dead" tensor: {}'.format( + op + ) + ) + result.update({op: None}) + + sess.close() + + for k, v in fn.graph.items(): + if k in constant_node_num_outputs: + if constant_node_num_outputs[k] == 1: + result_entry = k + ":0" + try: + v.value, v.datatype = numpy_val_to_builtin_val( + result[result_entry] + ) + except: + logger.error(result_entry) + logger.error(result[result_entry]) + else: + values = [ + result[k + ":" + str(i)] + for i in range(constant_node_num_outputs[k]) + ] + try: + npval = [numpy_val_to_builtin_val(i) for i in values] + v.datatype = types.tuple(tuple([val[1] for val in npval])) + v.value = v.datatype() + for idx, val in enumerate(npval): + v.value.val[idx] = val[0] + except: + logger.error(values) + for k, v in fn.graph.items(): + if v.op == "get_tuple": + inp = fn.graph[v.inputs[0]] + idx = v.attr["index"] + if inp.value is not None: + v.value = inp.value.val[idx] + v.datatype = inp.datatype.T[idx] + + except Exception as e: + logger.exception("Constant Propagation pass failed: {}".format(e)) + + +def constant_propagation(tfssa): + # we are going to rely on the TensorFlow graph to perform constant + # propagation. For each graph, we construct a new graph comprising + # only a subset of nodes that are constant nodes. + + for f in tfssa.functions.values(): + const_nodes_info = _get_const_nodes(f) + _constant_propagation(f, *const_nodes_info) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py new file mode 100644 index 00000000..15fe20ec --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_asserts.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import sys +from coremltools import _logger as logger + +from ..basic_graph_ops import delete_node + + +sys.setrecursionlimit(5000) # increase recursion limit to support convert large models + + +def _all_assert_leaves(gdict, nodename, memo): + """ + Does the given node lead to only assertions? + + Args: + gdict (dict): The node's graph. + nodename (str): The name of the node to test. + memo (dict): Storage for memoization. + """ + work = [nodename] + while True: + assert len(work) <= len(gdict) # If true, this algorithm is broken + node = gdict[work.pop()] + + # Entries in memo have one of the following values for a given node: + # None: the node is in the stack; this node is downstream. + # True: the node is an assertion or leads only to assertions. + # False: the node does not lead only to assertions. + if not isinstance(memo.get(node.name), bool): + memo[node.name] = None + outputs = node.outputs + if len(outputs) == 0: + # Leaf node: stack shrinks + memo[node.name] = node.op in ("Assert", "CheckNumerics") + else: + outputs_to_process = [n for n in outputs if n not in memo] + if len(outputs_to_process) == 0: + # Non-leaf node with fully processed outputs: stack shrinks + memo[node.name] = all(memo[n] for n in outputs) + else: + # Non-leaf node with unprocess outputs: stack grows + work.append(node.name) + work.extend(outputs_to_process) + if len(work) == 0: + return memo[node.name] + + +def delete_asserts(tfssa): + """ + Delete all nodes that lead only to assertions. + """ + delete_count = 0 + for f in tfssa.functions.values(): + memo = {} + for n in f.graph: + _all_assert_leaves(f.graph, n, memo) + for m in memo: + if memo[m]: + delete_count += 1 + delete_node(f.graph, m) + logger.debug("%d assert nodes deleted", delete_count) + return delete_count diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py new file mode 100644 index 00000000..9cc7ffbd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_constant.py @@ -0,0 +1,82 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger + +from ..basic_graph_ops import check_connections, delete_node, disconnect_edge + + +def convert_constant_nodes_to_const_ops(tfssa): + """ + Convert nodes with known constant value to Const nodes + """ + for fn_key in list(tfssa.functions.keys()): + f = tfssa.functions[fn_key] + for k in list(f.graph.keys()): + v = f.graph.get(k, None) + if v is None: + continue + if v.value is not None: + v.op = "Const" + # delete all upstream edges now that this is constant + inv = v.inputs[:] + for i in inv: + curnode = i + nextnode = v.name + disconnect_edge(f.graph, curnode, nextnode) + + # keep deleting upwards as long as it is a chain + while curnode is not None: + prevnode = None + if len(f.graph[curnode].outputs) == 0: + if len(f.graph[curnode].inputs) == 1: + prevnode = f.graph[curnode].inputs[0] + delete_node(f.graph, curnode) + curnode = prevnode + + +def delete_nodes_with_only_constant_descendents(tfssa): + # look for nodes whose value is known AND downstream values are known + # and delete them + delete_count = 0 + for fn_key in list(tfssa.functions.keys()): + f = tfssa.functions[fn_key] + keys = list(f.graph.keys()) + for k in keys: + if k not in f.graph: + continue + to_delete = (f.graph[k].value is not None) and (k not in f.outputs) + if to_delete: + # check the outputs + for o in f.graph[k].outputs: + if f.graph[o].value is None: + to_delete = False + else: + disconnect_edge(f.graph, k, o) + if to_delete: + delete_count += 1 + delete_node(f.graph, k) + # also delete all Const nodes with no descendants + keys = list(f.graph.keys()) + for k in keys: + if k not in f.graph: + continue + if ( + f.graph[k].op == "Const" + and len(f.graph[k].outputs) == 0 + and (k not in f.outputs) + ): + delete_count += 1 + delete_node(f.graph, k) + return delete_count + + +def delete_unnecessary_constant_nodes(tfssa): + delete_count = delete_nodes_with_only_constant_descendents(tfssa) + for f in list(tfssa.functions.values()): + check_connections(f.graph) + convert_constant_nodes_to_const_ops(tfssa) + logger.debug("%s nodes deleted", delete_count) + return delete_count diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py new file mode 100644 index 00000000..9a83956a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/delete_disconnected_nodes.py @@ -0,0 +1,21 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def delete_disconnected_nodes(gd): + # delete all nodes with no inputs and outputs + empty_nodes = [] + for k, v in gd.items(): + if ( + len(gd[k].inputs) == 0 + and len(gd[k].outputs) == 0 + and len(gd[k].control_inputs) == 0 + and len(gd[k].control_outputs) == 0 + and gd[k].op != "Placeholder" + ): + empty_nodes.append(k) + + for k in empty_nodes: + del gd[k] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py new file mode 100644 index 00000000..f3fb006a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py @@ -0,0 +1,469 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger + +from ..basic_graph_ops import (connect_dests, connect_edge, connect_sources, + delete_node, disconnect_edge, replace_dest, + replace_source) +from ..parsed_tf_node import ParsedTFNode +from ..tfssa import SSAFunction +from .visitors import (FindAllReachableNodes, FindImmediateDownstreamNodes, + FindImmediateUpstreamNodes, FindSubgraph) + + +class FunctionalizeLoops: + """ + Turns while loops in TensorFlow dataflow graph into the functional form: + while(cond_function, body_function) + + Usage: + Given a graph in tfssa (the NetworkEnsemble defined in network.py) form: + + This will functionalize *ONE* loop in the main function. + + f = FunctionalizeLoops() + ret = f.functionalize_loops(self, tfssa, "main") + + if ret is True, one loop has been functionalized, and the new functions + added to tfssa. If False, there is no loop to functionalize. + + Generally, repeated calls to this will be necessary to catch all loops. + + Instead, use functionalize_loops. + """ + + def __init__(self): + self.exits = None + self.merges = None + self.enters = None + self.constant_enters = None + self.switches = None + self.subgraph = None + self.loopcond = None + self.is_constant = None + self.next_iterations = None + self.cond = None + self.body = None + + def _search(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + # we look for NextIteration nodes + assert node.op == "Enter" + + frame_name = node.attr["frame_name"] + logger.debug("Fixing frame name: %s", frame_name) + # find all the enter args + # this is basically the enter frame + # functionalize_control_flow.cc:FunctionalizeControlFlow (1160-1196) + self.enters = [ + k for k, v in g.items() if v.attr.get("frame_name", "") == frame_name + ] + self.is_constant = [ + bool(g[n].attr.get("is_constant", False)) for n in self.enters + ] + self.merges = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Merge") + .visit_many(g, self.enters) + .get_result() + ) + self.next_iterations = ( + FindImmediateUpstreamNodes(lambda x: x.op == "NextIteration") + .visit_many(g, self.merges) + .get_result() + ) + self.switches = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Switch") + .visit_many(g, self.merges) + .get_result() + ) + self.exits = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Exit") + .visit_many(g, self.switches) + .get_result() + ) + self.loopcond = list( + set( + FindImmediateUpstreamNodes(lambda x: x.op == "LoopCond") + .visit_many(g, self.switches) + .get_result() + ) + ) + + self.subgraph = FindSubgraph(self.exits).visit_many(g, self.enters).get_result() + self.cond = FindSubgraph(self.switches).visit_many(g, self.merges).get_result() + self.body = ( + FindSubgraph([node.name] + self.exits) + .visit_many(g, self.switches) + .get_result() + ) + # drop merges and switches from cond and body + self.cond = [ + i for i in self.cond if i not in (self.merges + self.switches + self.enters) + ] + self.body = ( + [i for i in self.body if i not in ([node.name] + self.switches)] + + [node.name] + + self.switches + + self.merges + + self.enters + ) + + # ok. we can now rebuild. + + def _fix_graph_invariants(self, g): + import copy + + check = lambda x: x is not None and len(x) > 0 + check(self.exits) + check(self.merges) + check(self.enters) + check(self.switches) + check(self.subgraph) + check(self.cond) + check(self.loopcond) + assert len(self.loopcond) == 1 + # maintain the invariant of a unique Enter node per argument + # functionalize_control_flow.cc:FunctionalizeLoop (295) + for i in copy.copy(self.enters): + node = g[i] + assert len(node.outputs) > 0 + assert len(node.inputs) == 1 + assert len(node.control_inputs) == 0 + assert len(node.control_outputs) == 0 + if len(node.outputs) == 1: + continue + node_output_copy = copy.copy(node.outputs) + for j in range(1, len(node_output_copy)): + # make a new enter node for each + new_enter_node = copy.deepcopy(node) + new_enter_node.inputs = [] + new_enter_node.outputs = [] + new_enter_node.name = node.name + "/trsplit%d" % (j) + g[new_enter_node.name] = new_enter_node + logger.debug("splitting %s", node.name) + # connect the new node + enter_output = node_output_copy[j] + disconnect_edge(g, node.name, enter_output) + connect_edge(g, new_enter_node.name, enter_output) + connect_sources(g, node.inputs, new_enter_node.name) + # insert into graph + self.enters.append(new_enter_node.name) + + def functionalize_loops(self, tfssa, function_to_functionalize): + g = tfssa.functions[function_to_functionalize].graph + loopni = [a for a in g if g[a].op == "Enter"] + if len(loopni) == 0: + return False + self._search(g, loopni[0]) + + self.constant_enters = [ + self.enters[i] for i in range(len(self.enters)) if self.is_constant[i] + ] + self.enters = [ + self.enters[i] for i in range(len(self.enters)) if not self.is_constant[i] + ] + self._fix_graph_invariants(g) + # for each enter node, find the corresponding downstream merge node + enter_corresponding_merge = [ + FindImmediateDownstreamNodes(lambda x: x.op == "Merge") + .visit(g, enter) + .get_result()[0] + for enter in self.enters + ] + merge_corresponding_ni = [ + FindImmediateUpstreamNodes(lambda x: x.op == "NextIteration") + .visit(g, merge) + .get_result()[0] + for merge in enter_corresponding_merge + ] + switch_corresponding_merge = [] + for merge in enter_corresponding_merge: + switch_after_merge = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Switch") + .visit(g, merge) + .get_result() + ) + if len(switch_after_merge) > 0: + switch_corresponding_merge.append(switch_after_merge[0]) + else: + # There are some situations there is no switch not for a given + # merge. While odd... its ok. we construct one + # In this situation there is no Exit either, but it can be + # constructed later on + new_switch_node = ParsedTFNode() + new_switch_node.op = "Switch" + new_switch_node.name = tfssa._find_free_name("fake_switch_") + g[new_switch_node.name] = new_switch_node + connect_edge(g, merge, new_switch_node.name) + connect_edge(g, self.loopcond[0], new_switch_node.name) + switch_corresponding_merge.append(new_switch_node.name) + + exit_corresponding_switch = [] + for switch in switch_corresponding_merge: + res = ( + FindImmediateDownstreamNodes(lambda x: x.op == "Exit") + .visit(g, switch) + .get_result() + ) + if len(res) > 0: + exit_corresponding_switch.append(res[0]) + else: + new_exit_node = ParsedTFNode() + new_exit_node.op = "Exit" + new_exit_node.name = tfssa._find_free_name("fake_exit_") + g[new_exit_node.name] = new_exit_node + connect_edge(g, switch, new_exit_node.name) + exit_corresponding_switch.append(new_exit_node.name) + + while_loop = ParsedTFNode() + while_loop.op = "while" + while_loop.name = tfssa._find_free_name("while_") + g[while_loop.name] = while_loop + + # Build the Loop Condition + + # replace all enters with a single make_tuple + # we replace merge with get_tuple and turn it into a function call + # terminated with LoopCond + make_inputs = ParsedTFNode() + make_inputs.op = "make_tuple" + make_inputs.name = tfssa._find_free_name("make_input_") + g[make_inputs.name] = make_inputs + for enter in self.enters: + replace_dest(g, g[enter].inputs[0], enter, make_inputs.name) + constant_base_index = len(make_inputs.inputs) + for enter in self.constant_enters: + replace_dest(g, g[enter].inputs[0], enter, make_inputs.name) + + connect_edge(g, make_inputs.name, while_loop.name) + connect_dests(g, while_loop.name, exit_corresponding_switch) + + # build the cond function + cond_body = ParsedTFNode() + cond_body.op = "function_entry" + cond_body.name = tfssa._find_free_name("cond_function_") + cond_body.inputs = [] + g[cond_body.name] = cond_body + for merge_idx in range(len(enter_corresponding_merge)): + merge = enter_corresponding_merge[merge_idx] + switch = switch_corresponding_merge[merge_idx] + enter_node = g[self.enters[merge_idx]] + merge_node = g[merge] + if switch is not None: + switch_node = g[switch] + else: + switch_node = None + merge_node.op = "get_tuple" + merge_node.attr = {"index": merge_idx} + # disconnect merge from switch + # disconnect loopcond from switch + disconnect_edge(g, enter_node.name, merge_node.name) + if switch_node is not None: + disconnect_edge(g, merge_node.name, switch_node.name) + disconnect_edge(g, self.loopcond[0], switch_node.name) + for i in merge_node.inputs[:]: + disconnect_edge(g, i, merge_node.name) + connect_edge(g, cond_body.name, merge_node.name) + # delete get_tuple if it does nothing + if len(merge_node.outputs) == 0: + delete_node(g, merge) + + g[self.loopcond[0]].op = "return" + + # build the body function + body = ParsedTFNode() + body.op = "function_entry" + body.name = tfssa._find_free_name("body_function_") + body.inputs = [] + g[body.name] = body + for switch_idx in range(len(switch_corresponding_merge)): + switch = switch_corresponding_merge[switch_idx] + exit = exit_corresponding_switch[switch_idx] + disconnect_edge(g, switch, exit) + + # replace switch with a get_tuple + switch_node = g[switch] + switch_node.op = "get_tuple" + switch_node.attr = {"index": switch_idx} + connect_edge(g, body.name, switch_node.name) + # delete get_tuple if it does nothing + if len(switch_node.outputs) == 0: + delete_node(g, switch) + + # replace all next_iteration with a single make_tuple + # we replace merge with get_tuple and turn it into a function call + # terminated with LoopCond + make_outputs = ParsedTFNode() + make_outputs.op = "make_tuple" + make_outputs.name = tfssa._find_free_name("make_output_") + g[make_outputs.name] = make_outputs + for ni in merge_corresponding_ni: + connect_edge(g, g[ni].inputs[0], make_outputs.name) + + # connect constant enters to come from function + # connect constant enters to exit + for idx, enter in enumerate(self.constant_enters): + for output in list(g[enter].outputs): + if output not in self.cond and output not in self.body: + cond_intersection = ( + FindSubgraph(self.cond).visit(g, output).get_result() + ) + body_intersection = ( + FindSubgraph(self.body).visit(g, output).get_result() + ) + if len(cond_intersection) > 0: + cond_intersection.append(output) + self.cond += cond_intersection + if len(body_intersection) > 0: + body_intersection.append(output) + self.body += body_intersection + get_tuple = ParsedTFNode() + get_tuple.op = "get_tuple" + get_tuple.name = tfssa._find_free_name("get_tuple_const_") + get_tuple.attr = {"index": idx + constant_base_index} + g[get_tuple.name] = get_tuple + + if output in self.cond: + connect_edge(g, cond_body.name, get_tuple.name) + elif output in self.body: + connect_edge(g, body.name, get_tuple.name) + replace_source(g, enter, output, get_tuple.name) + + # body must accept and return everything + get_tuple = ParsedTFNode() + get_tuple.op = "get_tuple" + get_tuple.name = tfssa._find_free_name("get_tuple_const_") + get_tuple.attr = {"index": idx + constant_base_index} + g[get_tuple.name] = get_tuple + connect_edge(g, body.name, get_tuple.name) + connect_edge(g, get_tuple.name, make_outputs.name) + + assert len(g[make_outputs.name].inputs) == len(g[make_inputs.name].inputs) + + output_return = ParsedTFNode() + output_return.op = "return" + output_return.name = tfssa._find_free_name("body_return_") + g[output_return.name] = output_return + connect_edge(g, make_outputs.name, output_return.name) + while_loop.attr["cond_function"] = cond_body.name + while_loop.attr["body_function"] = body.name + for i in self.enters: + delete_node(g, i) + for i in self.next_iterations: + delete_node(g, i) + for i in self.constant_enters: + delete_node(g, i) + + for i in range(len(exit_corresponding_switch)): + exit_node = exit_corresponding_switch[i] + g[exit_node].op = "get_tuple" + g[exit_node].attr = {"index": i} + cond_function = ( + FindSubgraph(self.loopcond[0]).visit(g, cond_body.name).get_result() + ) + cond_function = set(cond_function + [self.loopcond[0], cond_body.name]) + body_function = ( + FindSubgraph(output_return.name).visit(g, body.name).get_result() + ) + body_function = set(body_function + [body.name, output_return.name]) + + # trace input constants associated with the cond_graph + # and the body_graph. These constants can only have one consumer + # for now. Any more and we will either need to associate + # it as an argument, or split the constant. + cond_constants = ( + FindImmediateUpstreamNodes(lambda x: x.op == "Const") + .visit_many(g, cond_function) + .get_result() + ) + body_constants = ( + FindImmediateUpstreamNodes(lambda x: x.op == "Const") + .visit_many(g, body_function) + .get_result() + ) + # for const_node in cond_constants + body_constants: + # assert(len(g[const_node].outputs) == 1) + + cond_function = cond_function.union(set(cond_constants)) + body_function = body_function.union(set(body_constants)) + + downstream_cond = ( + FindAllReachableNodes(lambda x: True) + .visit_many(g, cond_function) + .get_result() + ) + downstream_cond = set(downstream_cond) - cond_function + if len(downstream_cond) > 0: + logger.debug( + "Disconnecting unused variables in condition function %s", + downstream_cond, + ) + for i in downstream_cond: + delete_node(g, i) + + downstream_body = ( + FindAllReachableNodes(lambda x: True) + .visit_many(g, body_function) + .get_result() + ) + downstream_body = set(downstream_body) - body_function + if len(downstream_body) > 0: + logger.debug( + "Disconnecting unused variables in body function %s", downstream_body + ) + for i in downstream_body: + delete_node(g, i) + + cond_graph = {k: v for k, v in g.items() if k in cond_function} + body_graph = {k: v for k, v in g.items() if k in body_function} + g = { + k: v + for k, v in g.items() + if k not in cond_function and k not in body_function + } + # localize control dependencies + # In the main graph, reattach the control dependency to the while op + for k, v in g.items(): + for idx in range(len(v.control_inputs)): + if v.control_inputs[idx] not in g: + v.control_inputs[idx] = while_loop.name + while_loop.control_outputs.append(k) + for idx in range(len(v.control_outputs)): + if v.control_outputs[idx] not in g: + v.control_outputs[idx] = while_loop.name + while_loop.control_inputs.append(k) + + # in the cond and body graphs, drop non-local control dependencies + # entirely + for graph in [cond_graph, body_graph]: + for k, v in graph.items(): + for idx in range(len(v.control_inputs) - 1, -1, -1): + if v.control_inputs[idx] not in graph: + v.control_inputs.pop(idx) + + for idx in range(len(v.control_outputs) - 1, -1, -1): + if v.control_outputs[idx] not in graph: + v.control_outputs.pop(idx) + tfssa.functions[function_to_functionalize] = SSAFunction(g) + tfssa.add_function(cond_body.name, SSAFunction(cond_graph)) + tfssa.add_function(body.name, SSAFunction(body_graph)) + return True + + +def functionalize_loops(tfssa): + """ + Functionalize all loops in an tfssa + """ + done = False + while not done: + done = True + for f in list(tfssa.functions.keys()): + functionalize = FunctionalizeLoops() + ret = functionalize.functionalize_loops(tfssa, f) + if ret: + done = False diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py new file mode 100644 index 00000000..ddf3b7af --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/fuse_dilation_conv.py @@ -0,0 +1,215 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from ..basic_graph_ops import delete_node, replace_source + + +def _try_same(input_h, input_w, W_h, W_w, dilation_factor, padding, crop): + base_paddings = [0] * 4 + + dilated_W_h = dilation_factor[0] * (W_h - 1) + 1 + dilated_W_w = dilation_factor[1] * (W_w - 1) + 1 + + base_paddings[0] = (dilated_W_h - 1) // 2 + base_paddings[1] = dilated_W_h - 1 - (dilated_W_h - 1) // 2 + base_paddings[2] = (dilated_W_w - 1) // 2 + base_paddings[3] = dilated_W_w - 1 - (dilated_W_w - 1) // 2 + + pad_start_h = base_paddings[0] + pad_start_w = base_paddings[2] + orig_pad_end_h = base_paddings[1] + orig_pad_end_w = base_paddings[3] + full_input_h = input_h + pad_start_h + orig_pad_end_h + full_input_w = input_w + pad_start_w + orig_pad_end_w + pad_end_extra_h = ( + dilation_factor[0] - full_input_h % dilation_factor[0] + ) % dilation_factor[0] + pad_end_extra_w = ( + dilation_factor[1] - full_input_w % dilation_factor[1] + ) % dilation_factor[1] + pad_end_h = orig_pad_end_h + pad_end_extra_h + pad_end_w = orig_pad_end_w + pad_end_extra_w + + return ( + padding[0] == pad_start_h + and padding[1] == pad_end_h + and padding[2] == pad_start_w + and padding[3] == pad_end_w + and crop[0] == 0 + and crop[1] == pad_end_extra_h + and crop[2] == 0 + and crop[3] == pad_end_extra_w + ) + + +def _pattern_match_and_rewrite(gddict, conv_op): + node = gddict[conv_op] + channel_first = node.attr["data_format"].startswith("NC") + + if len(node.inputs) == 0 or len(node.outputs) == 0: + return + + prev_node = gddict[node.inputs[0]] + next_node = gddict[node.outputs[0]] + + expand_node = None + squeeze_node = None + # Check for Conv1D cases + if prev_node.op == "ExpandDims": + # All Conv1D has ExpandDims and Squeeze as pairs. + if next_node.op != "Squeeze": + return + + expand_node = prev_node + squeeze_node = next_node + + if len(prev_node.inputs) == 0 or len(next_node.outputs) == 0: + return + prev_node = gddict[prev_node.inputs[0]] + next_node = gddict[next_node.outputs[0]] + + # Check if Conv1D/Conv2D is surrounded by SpaceToBatchND and BatchToSpaceND + if prev_node.op != "SpaceToBatchND" or next_node.op != "BatchToSpaceND": + return + else: + stb_node = prev_node + bts_node = next_node + + dilation_node = gddict[stb_node.inputs[1]] + if dilation_node.value is None: + return + dilation_factor = dilation_node.value.val + if gddict[bts_node.inputs[1]].value is None or np.any( + dilation_factor != gddict[bts_node.inputs[1]].value.val + ): + # If SpaceToBatchND and BatchToSpaceND doesn't match, we do not fuse. + return + + padding_node = gddict[stb_node.inputs[2]] + if padding_node.value is None: + return + padding_val = padding_node.value.val.flatten() + + crop_node = gddict[bts_node.inputs[2]] + if crop_node.value is None: + return + crop_val = crop_node.value.val.flatten() + + if expand_node: + dilation_factor = [1] + list(dilation_factor) + padding_val = [0, 0] + list(padding_val) + crop_val = [0, 0] + list(crop_val) + # Trying to inverse the logic of TF generating padding/cropping values for + # SpaceToBatchND and BatchToSpaceND with different padding values in Conv2D. + # Logic extracted from TF's builder at: + # tensorflow/python/ops/nn_ops.py and tensorflow/python/ops/array_ops.py + is_same = False + if np.any(padding_val != 0): + input_shape = gddict[stb_node.inputs[0]].attr.get("_output_shapes", None) + if input_shape is None: + input_shape = gddict[stb_node.inputs[0]].attr.get("shape", None) + else: + input_shape = input_shape[0] + W_node = gddict[node.inputs[1]] + W_shape = None if W_node.op != "Const" else W_node.datatype.get_shape() + if input_shape is None or W_shape is None: + return + W_h, W_w = W_shape[0], W_shape[1] + HW = input_shape[2:] if channel_first else input_shape[1:-1] + if expand_node: + HW = [1] + list(HW) + is_same = _try_same( + HW[0], HW[1], W_h, W_w, dilation_factor, padding_val, crop_val + ) + + # Re-wiring the nodes to skip SpaceToBatchND. + # We change BatchToSpaceND to Identity since it might be a terminate op. + deleted_nodes = set() + if expand_node: + replace_source(gddict, stb_node, expand_node, stb_node.inputs[0]) + else: + replace_source(gddict, stb_node, node, stb_node.inputs[0]) + + bts_node.op = "Identity" + bts_node.attr = {} + + deleted_nodes.update(stb_node.inputs[1:]) + deleted_nodes.update([stb_node.name]) + deleted_nodes.update(bts_node.inputs[1:]) + + # Rewrite dilation attribute for (Depthwise)Conv2D + dilation_val = ( + [1, 1] + list(dilation_factor) + if node.attr["data_format"] == "NCHW" + else [1] + list(dilation_factor) + [1] + ) + node.attr["dilations"] = dilation_val + # Rewrite padding attribute for (Depthwise)Conv2D + # This is due to, TF always plug in VALID padding for Conv2D after + # SpaceToBatchND. If, the original Conv2D is SAME padding, TF would + # automatically insert padding, therefore, we set it as SAME over here. + if is_same: + node.attr["padding"] = "SAME" + + # Removing stale attributes for nodes. + if expand_node and "_output_shapes" in expand_node.attr: + del expand_node.attr["_output_shapes"] + if squeeze_node and "_output_shapes" in squeeze_node.attr: + del squeeze_node.attr["_output_shapes"] + if "_output_shapes" in node.attr: + del node.attr["_output_shapes"] + if expand_node and "shape" in expand_node.attr: + del expand_node.attr["shape"] + if squeeze_node and "shape" in squeeze_node.attr: + del squeeze_node.attr["shape"] + if "shape" in node.attr: + del node.attr["shape"] + + for d in deleted_nodes: + delete_node(gddict, d) + + +def _fuse_dilation_conv(gddict): + """ + A dilated convolution in older tensorflow versions might not be fused in the + Conv2D or DepthwiseConv2D op, but represented with the following format: + + SpaceToBatchND -> (Depthwise)Conv2D -> BatchToSpaceND + + We try to fuse it back into (Depthwise)Conv2D with the dilation parameter + set in attribute. + There are several patterns that exist in tensorflow for breaking up dilation + convolutions. We detect the following patterns: + + SpaceToBatchND -> ExpandDims -> Conv2D -> Squeeze -> BatchToSpaceND + + SpaceToBatchND -> Conv2D -> BatchToSpaceND + + The first case appears when Conv1D is used, TF expands/squeeze the inputs to + conform Conv2D pattern. + The second case is a basic Conv2D pattern. + + """ + for name in list(gddict.keys()): + if name not in gddict: + # Node might have been removed from graph during fusion. + continue + node = gddict[name] + if node.op in {"Conv2D", "DepthwiseConv2dNative"}: + _pattern_match_and_rewrite(gddict, name) + + +def fuse_dilation_conv(tfssa): + """ + Tensorflow decomposes Depthwise Convolution with dialtion into: + + SpaceToBatchND ---> Conv2D/DepthwiseConv2D ---> BatchToSpaceND + + We identify such pattern and use Conv2D/DepthwiseConv2D to represent it. + """ + for f in tfssa.functions.keys(): + _fuse_dilation_conv(tfssa.functions[f].graph) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py new file mode 100644 index 00000000..bca2a4b4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py @@ -0,0 +1,111 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +from ..parsed_tf_node import ParsedTFNode + + +def insert_get_tuple(gddict): + """ + TensorFlow uses input "nodename:i" to denote "get tuple i" from "nodename". + Here we split it so that: + + node1:i -> node2 + + gets transformed into + + node1 -> get_tuple(i) --> node2 + + Takes a graph in "dict{str, ParsedTFNode}" form, and returns a new graph. + + We do not do this for control flow nodes(Switch, Enter, Exit, Merge + LoopCond, NextIteration). For these nodes, we just convert + + node1:i -> node2 + + to + + node1 -> node2 + """ + retdict = {} + get_tuple_op_var_index = 1 + + inserted_ops = {} + + def make_op(input_node, index, new_node_name, gto_make_op_cache): + cache_key = ( + input_node, + index, + ) + if cache_key in gto_make_op_cache: + return gto_make_op_cache[cache_key] + + inserted_op_name = new_node_name + inserted_op = ParsedTFNode() + inserted_op.name = inserted_op_name + inserted_op.op = "get_tuple" + inserted_op.inputs = [input_node] + inserted_op.attr["index"] = index + inserted_ops[inserted_op_name] = inserted_op + gto_make_op_cache[cache_key] = inserted_op + return inserted_op + + exclusions = [ + "Switch", + "Enter", + "Exit", + "Merge", + "LoopCond", + "NextIteration", + "TensorArrayV3", + "Const", + ] + inclusions = ["IdentityN", "Split", "SplitV", "LSTMBlockCell", "TopK", "TopKV2", "Unpack", "BlockLSTM", "BlockLSTMV2", "NonMaxSuppressionV5"] + gto_make_op_cache = {} + for name in list(gddict.keys()): + new_node = ParsedTFNode() + new_node = copy.deepcopy(gddict[name]) + new_inputs = [] + for idx in range(len(new_node.inputs)): + if ":" in new_node.inputs[idx]: + input_node, input_index = new_node.inputs[idx].split(":") + else: + input_node = new_node.inputs[idx] + input_index = 0 + + if ( + "_output_shapes" in gddict[input_node].attr + and len(gddict[input_node].attr["_output_shapes"]) > 1 + and gddict[input_node].op not in exclusions + ) or (gddict[input_node].op in inclusions): + get_tuple_node_name = "gto_%s" % (get_tuple_op_var_index) + new_inputs.append( + make_op( + input_node, + int(input_index), + get_tuple_node_name, + gto_make_op_cache, + ).name + ) + get_tuple_op_var_index += 1 + else: + new_inputs.append(new_node.inputs[idx]) + new_node.inputs = new_inputs + + retdict[name] = new_node + + for k, v in inserted_ops.items(): + retdict[k] = v + + # Force fix up the remaining node names by dropping the : + # + for k, v in retdict.items(): + for idx in range(len(v.inputs)): + if ":" in v.inputs[idx]: + nodename, nodeindex = v.inputs[idx].split(":") + v.inputs[idx] = nodename + + return retdict diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py new file mode 100644 index 00000000..ca06494c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/quantization_pass.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..basic_graph_ops import delete_node + + +def delete_fakequant_node_and_repair_graph(g, node): + inputs = node.inputs + # Delete const inputs of the fakequant op + for i in inputs: + if g[i].op == 'Const': + delete_node(g, i) + else: + non_const_input = i + outputs = node.outputs + # Append FakeQuant Op's outputs to its input node's outputs + g[non_const_input].outputs = [i for i in g[non_const_input].outputs if i != node.name] + g[non_const_input].outputs.extend(outputs) + # Modify the FakeQuant op's outputs to set FakeQuant op's parent node as the new input. + for i in outputs: + for j in range(len(g[i].inputs)): + if g[i].inputs[j] == node.name: + g[i].inputs[j] = non_const_input + delete_node(g, node) + +def quantization_pass_impl(fn): + all_quantization_ops = [i for i in fn.graph.values() if "FakeQuant" in i.op] + for node in all_quantization_ops: + is_const_input = True + for input in node.inputs: + if fn.graph[input].op != 'Const': + is_const_input = False + if not is_const_input and ('weights_quant' not in input): + # If activation quantization - + # Delete the FakeQuant op and its const inputs, + # Append FakeQuant Op's outputs to its input node's outputs, + # Modify the FakeQuant op's outputs to reflect the 'new' input node. + delete_fakequant_node_and_repair_graph(fn.graph, node) + else: + # If weight quantization - + # Add attributes of the FakeQuant op to its output's attr dict + for output in node.outputs: + output_node = fn.graph[output] + output_node.attr['quantize'] = True + output_node.attr['num_bits'] = node.attr['num_bits'] + output_node.attr['narrow_range'] = node.attr['narrow_range'] + output_node.attr['quantize_min'] = fn.graph[node.inputs[1]].value.val + output_node.attr['quantize_max'] = fn.graph[node.inputs[2]].value.val + +def quantization_pass(tfssa): + """ + Delete activation quantization ops and repair TF graph: + If the FakeQuant op is not connected to constant inputs (which means that the op performs activation + quantization) then delete that FakeQuant op and repair the graph. + Edit weight quantization ops: + If the FakeQuant op is connected to constant inputs then add its attributes to its output op so that parameters + min, max, narrow_range, num_bits are available (in addition to weights) to downstream ops for denoting and + supporting weight quantization. + """ + for v in tfssa.functions.values(): + quantization_pass_impl(v) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py new file mode 100644 index 00000000..27be50b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/tensor_array_transform.py @@ -0,0 +1,78 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +# A TensorArray is essentially a runtime vector with +# +# - an optional requirement "infer_shape" (True by default) that all Tensors +# stored within the vector have the same size/shape (inferred by the +# first element stored into the tensor) +# - an optional "element_shape" which requires all elements to have this +# exact shape. +# - an optional "clear_after_read" (True by default) where read of an index +# is destructive. (It doesn't *really* destroy, but just enables a particular +# optimization where the tensor memory can be reused). +# - An optional "dynamic_size" (False by default) where the vector is resized +# automatically at runtime +# +# The way it works is rather odd. To enforce "control dependency" constraints, +# a single float (flow) variable is passed between operations that write/read +# the TensorArray. Additionally, a "Resource" variable is also passed along +# which contains the actual handle to the TensorArray. +# +# The TensorArray can therefore also be passed around as as argument to while +# loops. Thus unlike a global "Variable", this really is better thought of as +# an additional type, a list[tensor]. +# +# See: +# +# https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/python/ops/tensor_array_ops.py +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/tensor_array.h +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/tensor_array.cc +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/ops/data_flow_ops.cc +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/tensor_array_ops.cc +# +# The way we transform it is to introduce a new type. list[tensor] +# The flow variable is the list[tensor] since that is consistently passed through +# every operation. +# The 'resource' edges then gets passed as void. +# +# We would like to delete the resource edges, but once too many graph passes are +# performed, this becomes very difficult (since tuple shapes have to be updated). +# The ideal is to perform the resource edge deletion *BEFORE* any additional +# graph transformations. +# The conversion of the flow variable to list[tensor] can be performed during +# type inference. +# +# +# After this op: +# All nodes which take a TensorArray resource input will have the resource input +# edge deleted. +# +# TensorArrayV3 op will only have 1 output, a flow variable. + + +def tensor_array_resource_removal(gd): + # this should be called *BEFORE* introduction of tuples, + # and before output edges are added (for simplicity) + for k, node in gd.items(): + if node.op.startswith("TensorArray") and node.op != "TensorArrayV3": + # generally the resource edge is the first edge + # input is resource, indices, flow + # output is generally flow + node.inputs = node.inputs[1:] + + # TensorArrayV3 node outputs resource and flow + # shift all flow reads from TensorArray to output 0 of TensorArray + for i in range(len(node.inputs)): + if ":" in node.inputs[i]: + input_node, input_index = node.inputs[i].split(":") + input_index = int(input_index) + else: + input_node = node.inputs[i] + input_index = 0 + if gd[input_node].op == "TensorArrayV3": + if input_index == 1: + node.inputs[i] = "%s" % input_node diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py new file mode 100644 index 00000000..9c977a9e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/variable_node_transform.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..basic_graph_ops import delete_node, disconnect_vertex_ins + + +# Variable nodes are not horribly complicated. +# +# There are Variable nodes which don't really do much on their own +# +# To initialize, there is an additional Assign op which is just dangling away +# on one side which assigns from "Variable/initial_value". +# +# [Variable] --> Assign <-- Const (VariableName/initial_value) +# | +# | ... rest of graph ... +# v +# ... Assign <---- New Values +# ... etc +# +# Reads of the variable go through an Identity node with the name +# VariableName/read, and has attribute _class:loc:@VariableName. +# +# Writes of the variable go through an Assign nodes which take as input +# one Variable and one value, and has attribute _class:loc:@VariableName. +# Assign also returns the new value of the variable. +# +# +# +# - We transform Variable to a function attribute +# - We transform Assign ops to just "set_global" with attribute variable:VariableName +# - We transform Read ops to just "get_global" with attribute variable:VariableName +def remove_variable_node_impl(fn, tfssa): + variables = [var for var in fn.graph.values() if var.op == "VariableV2"] + assigns = [assign for assign in fn.graph.values() if assign.op == "Assign"] + reads = [ + read + for read in fn.graph.values() + if read.op == "Identity" + and len(read.inputs) == 1 + and fn.graph[read.inputs[0]].op == "VariableV2" + ] + + # find the variable initial values + variable_values = {} + additional_nodes_to_delete = [] + for v in variables: + v.parse_from_attr() + variable_values[v.name] = v.datatype() + for node in fn.graph.values(): + if ( + node.op == "Assign" + and node.inputs[0] == v.name + and node.inputs[1] == v.name + "/initial_value" + ): + variable_values[v.name] = fn.graph[node.inputs[1]].value + additional_nodes_to_delete += [node.name, node.inputs[1]] + for r in reads: + r.op = "get_global" + r.attr["variable"] = r.inputs[0] + disconnect_vertex_ins(fn.graph, r.name) + + # transform writes to set_global + for r in assigns: + r.op = "set_global" + r.attr["variable"] = r.inputs[0] + + for var in variables: + delete_node(fn.graph, var.name) + + for node in additional_nodes_to_delete: + delete_node(fn.graph, node) + + for k, v in variable_values.items(): + tfssa.variables[k] = v + + +def remove_variable_nodes(tfssa): + """ + This should be performed after constant propagation pass. + """ + for v in tfssa.functions.values(): + remove_variable_node_impl(v, tfssa) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py new file mode 100644 index 00000000..516963b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/visitors.py @@ -0,0 +1,233 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..parsed_tf_node import ParsedTFNode + + +class FindAllDownstreamTerminals: + # Find all nodes matching a particular function + # which is downstream reachable from a set of nodes. + def __init__(self, fn): + self.result = [] + self.fn = fn + self.memo = {} + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.memo: + return self + self.memo[node.name] = 1 + + if self.fn(node): + self.result.append(node.name) + return self + + for i in node.outputs: + self.visit(g, g[i]) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + return self + + def get_result(self): + return self.result + + +class FindAllReachableNodes: + # Find all nodes reachable from a set of nodes which satisfy a criteria + def __init__(self, fn): + self.result = [] + self.fn = fn + self.memo = {} + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.memo: + return self + self.memo[node.name] = 1 + + if self.fn(node): + self.result.append(node.name) + + for i in node.outputs: + self.visit(g, g[i]) + + for i in node.inputs: + self.visit(g, g[i]) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + return self + + def get_result(self): + return self.result + + +class FindImmediateUpstreamNodes: + # Find all nodes matching a particular function which is immediately above a set of nodes + def __init__(self, fn): + self.result = [] + self.fn = fn + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + for i in node.inputs: + if self.fn(g[i]): + self.result.append(i) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + return self + + def get_result(self): + return self.result + + +class FindImmediateDownstreamNodes: + # Find all nodes matching a particular function which is immediately above a set of nodes + def __init__(self, fn): + self.result = [] + self.fn = fn + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + for i in node.outputs: + if self.fn(g[i]): + self.result.append(i) + + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + self.result = list(set(self.result)) + return self + + def get_result(self): + return self.result + + +class FindAllUpstreamTerminals: + # Find the "upstream frontier" of nodes passing some predicate. + # In other words, perform a pre-order traversal of a node and its inputs, collecting all nodes + # passing a given predicate as we go along. Terminate the search along a given branch as soon + # as a node is collected. + def __init__(self, fn, control_dependencies=False): + self.result = [] + self.fn = fn + self.control_dependencies = control_dependencies + self.memo = {} + + def visit(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.memo: + return self + self.memo[node.name] = 1 + + if self.fn(node): + self.result.append(node.name) + return self + + for i in node.inputs: + self.visit(g, g[i]) + if self.control_dependencies: + for i in node.control_inputs: + self.visit(g, g[i]) + return self + + def visit_many(self, g, nodes): + for i in nodes: + self.visit(g, i) + self.result = list(set(self.result)) + return self + + def get_result(self): + return self.result + + +class FindSubgraph: + # Find all nodes between a set of sources and a set of terminals + # Sources are not returned, but reached terminals are returned + def __init__(self, terminal_nodes): + self.memo = {} + self.terminal = terminal_nodes + + def visit_impl(self, g, node): + if not isinstance(node, ParsedTFNode): + node = g[node] + + if node.name in self.terminal: + self.memo[node.name] = True + return True + + if node.name in self.memo: + return self.memo[node.name] + + # add self to memo first otherwise cycles will not terminate + self.memo[node.name] = None + reachable = None + all_unreachable = True + for i in node.outputs + node.control_outputs: + visit_result = self.visit_impl(g, g[i]) + if visit_result == True: # pylint: disable=singleton-comparison + reachable = True + if visit_result != False: # pylint: disable=singleton-comparison + all_unreachable = False + + if reachable: + self.memo[node.name] = reachable + elif all_unreachable: + self.memo[node.name] = False + else: + self.memo[node.name] = None + + return reachable + + def visit(self, g, node): + self.visit_impl(g, node) + while True: + if None in iter(self.memo.values()): + revisit = [k for k, v in self.memo.items() if v is None] + self.memo = {k: v for k, v in self.memo.items() if v is not None} + for n in revisit: + self.visit_impl(g, n) + else: + break + return self + + def visit_many(self, g, nodes): + for node in nodes: + self.visit_impl(g, node) + while True: + if None in iter(self.memo.values()): + revisit = [k for k, v in self.memo.items() if v is None] + self.memo = {k: v for k, v in self.memo.items() if v is not None} + for n in revisit: + self.visit_impl(g, n) + else: + break + return self + + def get_result(self): + return [k for k, v in self.memo.items() if v] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py new file mode 100644 index 00000000..9b5d48a7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tf_op_registry.py @@ -0,0 +1,47 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +_TF_OPS_REGISTRY = {} + + +def register_tf_op(_func=None, tf_alias=None, override=False): + """ + Registration routine for TensorFlow operators + _func: (TF conversion function) [Default=None] + TF conversion function to register + + tf_alias: (List of string) [Default=None] + All other TF operators that should also be mapped to + current conversion routine. + e.g. Sort aliased with SortV1, SortV2 + All provided alias operators must not be registered previously. + + override: (Boolean) [Default=False] + If True, overrides earlier registration i.e. specified + operator and alias will start pointing to current conversion + function. + Otherwise, duplicate registration will error out. + """ + + def func_wrapper(func): + f_name = func.__name__ + + if not override and f_name in _TF_OPS_REGISTRY: + raise ValueError("TF op {} already registered.".format(f_name)) + _TF_OPS_REGISTRY[f_name] = func + # If tf_alias is provided, then all the functions mentioned as aliased + # are mapped to current function + if tf_alias is not None: + for name in tf_alias: + if not override and name in _TF_OPS_REGISTRY: + msg = "TF op alias {} already registered." + raise ValueError(msg.format(name)) + _TF_OPS_REGISTRY[name] = func + return func + + if _func is None: + # decorator called without argument + return func_wrapper + return func_wrapper(_func) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tfssa.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tfssa.py new file mode 100644 index 00000000..44abe88b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow/tfssa.py @@ -0,0 +1,549 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import types + +from .basic_graph_ops import check_connections, const_determined_nodes +from .dot_visitor import DotVisitor +from .naming_utils import escape_fn_name + + +class ParsedNode: + """ + Node class for the tfssa graph. + + name: The name of the node (str) + op: The operation represented by the node (str) + datatype: The type of the node. (type) + value: The value of the node if available + inputs: The list of nodes which are inputs to this node (list[str]) + control_inputs: The list of nodes which have to be executed before this node (list[str]) + attr: The attributes of the node + outputs: The list of nodes which consume the result of this node (list[str]) + control_outputs: The list of nodes which have to be executed after this node (list[str]) + """ + + __slots__ = [ + "name", + "op", + "datatype", + "value", + "inputs", + "control_inputs", + "outputs", + "control_outputs", + "attr", + ] + + def __init__(self): + self.name = None + self.op = None + self.datatype = None + self.value = None + self.inputs = [] + self.outputs = [] + self.control_inputs = [] + self.control_outputs = [] + self.attr = {} + + def __copy__(self): + return self._copy_impl(ParsedNode()) + + def _copy_impl(self, dest): + dest.name = self.name + dest.op = self.op + dest.datatype = self.datatype + dest.value = copy.deepcopy(self.value) + dest.inputs = self.inputs[:] + dest.control_inputs = self.control_inputs[:] + dest.outputs = self.outputs[:] + dest.control_outputs = self.control_outputs[:] + dest.attr = {k: copy.deepcopy(v) for k, v in self.attr.items()} + return dest + + def copy(self): + return self.__copy__() + + +class SSAFunction: + __slots__ = ["graph", "inputs", "input_types", "outputs", "output_types", "ret"] + + def __init__(self, gdict=None, inputs=None, outputs=None, ret=None): + if gdict is None: + gdict = {} + self.graph = gdict + self.inputs = [] if inputs is None else inputs + self.outputs = [] if outputs is None else outputs + self.input_types = [] + self.output_types = [] + + # ret is a mapping from the output arg names from `signature` to the + # outputs from `node_def` that should be returned by the function. + # Only used in TF2 for getting indices when generating get_tuple ops + # for control flow ops. Because the sub-graph's outputs and control + # flow node's outputs mapping is defined in `ret` dict. See usages in + # tf_graph_pass: rewrite_control_flow_functions for details. + # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/function.proto + self.ret = [] if ret is None else ret + + check_connections(gdict) + + # respect TF inputs/outputs if given, otherwise, infer from the graph + # in currently implementation: TF1 will always infer from graph. TF2, + # on the other hand, respect the inputs/outputs provided. + if len(self.inputs) == 0 or len(self.outputs) == 0: + self.find_inputs_and_outputs() + else: + self.inputs, self.outputs = inputs, outputs + self.filter_inputs_and_outputs() + + def find_inputs_and_outputs(self): + # solve for input and output vars + sorted_keys = sorted(self.graph.keys()) + + # we use function entry and exit points if available + # otherwise we find graph entry and exit points + enters = [ + n.name for n in self.graph.values() if ("entry" in n.op or "Entry" in n.op) + ] + exits = [n.name for n in self.graph.values() if n.op in ("Return", "return")] + if len(enters) > 0 or len(exits) > 0: + assert len(enters) > 0 + assert len(exits) > 0 + self.inputs = enters + self.input_types = [self.graph[v].datatype for v in self.inputs] + self.outputs = exits + self.output_types = [self.graph[v].datatype for v in self.outputs] + else: + for k in sorted_keys: + v = self.graph[k] + if len(v.inputs) == 0 and v.op not in ["Const", "get_global", "NoOp"]: + self.inputs.append(k) + self.input_types.append(v.datatype) + elif len(v.inputs) != 0 and v.op == "Placeholder": + assert len(v.inputs) == 1, "This is not a PlaceholderWithDefault!" + self.inputs.append(k) + self.input_types.append(v.datatype) + if ( + len(v.outputs) == 0 + and len(v.control_outputs) == 0 + and v.op != "set_global" + ): + self.outputs.append(k) + self.output_types.append(v.datatype) + + def filter_inputs_and_outputs(self): + """ + Eliminate invalid input/output nodes in the given list. Should only be + invoked if the self.inputs and self.outputs are both provided and we + want to respect those when adding SSAFunctions. Only needed for TF2 for + now because of the needs to parse multiple functions in graph. TF1 only + has one "main" function. + """ + filtered_inputs = [] + filtered_outputs = [] + for k in self.inputs: + if k not in self.graph.keys(): + continue + v = self.graph[k] + if len(v.inputs) == 0 and v.op not in {"Const", "get_global", "NoOp"}: + filtered_inputs.append(k) + self.input_types.append(v.datatype) + elif len(v.inputs) != 0 and v.op == "Placeholder": + assert len(v.inputs) == 1, "This is not a PlaceholderWithDefault!" + filtered_inputs.append(k) + self.input_types.append(v.datatype) + for k in self.outputs: + if k not in self.graph.keys(): + continue + v = self.graph[k] + filtered_outputs.append(k) + self.output_types.append(v.datatype) + self.inputs, self.outputs = filtered_inputs, filtered_outputs + + def __copy__(self): + ret = SSAFunction() + ret.inputs = self.inputs[:] + ret.input_types = self.input_types[:] + ret.outputs = self.outputs[:] + ret.output_types = self.output_types[:] + ret.graph = {k: copy.deepcopy(v) for k, v in self.graph.items()} + + return ret + + def copy(self): + return self.__copy__() + + +class NetworkEnsemble: + __slots__ = ["functions", "variables", "global_resource"] + + def __init__(self, instance=None): + self.functions = {} + self.variables = {} + self.global_resource = {} + + if isinstance(instance, NetworkEnsemble): + self.functions = instance.functions + self.variables = instance.variables + self.global_resource = instance.global_resource + elif instance is not None: + raise ValueError( + "Instance type {} not compatible with NetworkEnsemble".format( + type(instance) + ) + ) + + def rename_function(self, src_func, tgt_func): + """ + Renames the function with function name (src_func) to (tgt_func) + """ + if src_func not in self.functions: + logger.warning("Couldn't find function name (%s).", src_func) + return + if tgt_func in self.functions: + logger.warning("(%s) already exists in some function name.", tgt_func) + return + + self.functions[tgt_func] = self.functions.pop(src_func) + logger.debug( + "Successfully changed function name from (%s) to (%s)", src_func, tgt_func + ) + + def rename_node(self, src_node, tgt_node): + """ + Rename the node with node name (src_node) to (tgt_node). + Note that the name (tgt_node) cannot appear in the whole network, + not only the function it lies in. + """ + in_ssa = False + success = None + for func, tfssa in self.functions.items(): + if src_node in tfssa.graph: + in_ssa = True + if tgt_node in tfssa.graph: + logger.warning( + "(%s) already exists in function (%s).", tgt_node, func + ) + break + success = func + tfssa.graph[tgt_node] = tfssa.graph.pop(src_node) + # Replace other nodes' output dependency + for inp in tfssa.graph[tgt_node].inputs: + for idx, out in enumerate(tfssa.graph[inp].outputs): + if out == src_node: + tfssa.graph[inp].outputs[idx] = tgt_node + break + # Replace other nodes' control output dependency + for c_inp in tfssa.graph[tgt_node].control_inputs: + for idx, c_out in enumerate(tfssa.graph[c_inp].control_outputs): + if c_out == src_node: + tfssa.graph[c_inp].control_outputs[idx] = tgt_node + break + # Replace other nodes' input dependency + for out in tfssa.graph[tgt_node].outputs: + for idx, inp in enumerate(tfssa.graph[out].inputs): + if inp == src_node: + tfssa.graph[out].inputs[idx] = tgt_node + break + # Replace other nodes' control input dependency + for c_out in tfssa.graph[tgt_node].control_outputs: + for idx, c_inp in enumerate(tfssa.graph[c_out].control_inputs): + if c_inp == src_node: + tfssa.graph[c_out].control_inputs[idx] = tgt_node + break + break + + if not in_ssa: + logger.warning("Couldn't find (%s) in any functions", src_node) + if success is not None: + logger.debug( + "Changed (%s) to (%s) in function (%s)", src_node, tgt_node, success + ) + + def extract_subgraph(self, outputs, target_inputs=None, name=""): + """Add a new SSAFunction to the current NetworkEnsemble to produce the given outputs. + + Args: + outputs: The outputs the new function must produce. + target_inputs: + name: The name of the new function to create. If unspecified, a name will be generated + by joining output names. + Returns: + The name of the new function. + """ + if not isinstance(outputs, list): + raise TypeError("Expected a list of output names for subgraph extraction") + + if name == "": + outputs.sort() + name = escape_fn_name("_".join(outputs)) + + if target_inputs is None: + target_inputs = [] + + def DFS_inputs(graph, node, vis): + vis.add(node) + if node in target_inputs: + return [node] + if ( + len(graph[node].inputs) == 0 + and len(graph[node].control_inputs) == 0 + and graph[node].op != "Const" + ): + return [node] + inputs = [] + for i in graph[node].inputs + graph[node].control_inputs: + if i in vis: + continue + inputs += DFS_inputs(graph, i, vis) + return inputs + + def DFS_set_globals(graph, node, vis): + vis.add(node) + set_globals = [] + if graph[node].op == "set_global": + set_globals.append(node) + for i in graph[node].outputs + graph[node].control_outputs: + if i in vis: + continue + set_globals += DFS_set_globals(graph, i, vis) + return set_globals + + for k in list(self.functions.keys()): + v = self.functions[k] + extract = [] + for output in outputs: + if output in v.graph: + extract.append(output) + + if len(extract) == 0: + continue + incl_nodes = set() + gdict = copy.deepcopy(v.graph) + inputs = [] + set_globals = [] + for output in extract: + inputs += DFS_inputs(gdict, output, incl_nodes) + vis_nodes = set() + for inp in inputs: + set_globals += DFS_set_globals(gdict, inp, vis_nodes) + for node in set_globals: + inputs += DFS_inputs(gdict, node, incl_nodes) + + for new_k, new_v in v.graph.items(): + if new_k not in incl_nodes: + del gdict[new_k] + continue + if new_k in target_inputs: + gdict[new_k].op = "Placeholder" + gdict[new_k].inputs = [inp for inp in new_v.inputs if inp in incl_nodes] + gdict[new_k].outputs = [ + out for out in new_v.outputs if out in incl_nodes + ] + gdict[new_k].control_inputs = [ + inp for inp in new_v.control_inputs if inp in incl_nodes + ] + gdict[new_k].control_outputs = [ + out for out in new_v.control_outputs if out in incl_nodes + ] + + for output in extract: + old_name = "preIdentity_" + output + output_node = copy.deepcopy(gdict[output]) + output_node.op = "Identity" + output_node.inputs = [old_name] + output_node.control_inputs = [] + output_node.outputs = [] + output_node.control_outputs = [] + + for inp in gdict[output].inputs: + for idx, out in enumerate(gdict[inp].outputs): + if out == output: + gdict[inp].outputs[idx] = old_name + for inp in gdict[output].control_inputs: + for idx, out in enumerate(gdict[inp].control_outputs): + if out == output: + gdict[inp].control_outputs[idx] = old_name + for out in gdict[output].outputs: + for idx, inp in enumerate(gdict[out].inputs): + if inp == output: + gdict[out].inputs[idx] = old_name + for out in gdict[output].control_outputs: + for idx, inp in enumerate(gdict[out].control_inputs): + if inp == output: + gdict[out].control_inputs[idx] = old_name + gdict[output].outputs.append(output) + gdict[output].name = old_name + gdict[old_name] = gdict[output] + gdict[output] = output_node + + self.functions[name] = SSAFunction(gdict) + return name + + def delete_subgraph(self, name): + """ + Delete the SSAfunction with function_name. + """ + if name not in self.functions: + logger.warning("(%s) not in NetworkEnsemble", name) + return + del self.functions[name] + + def __repr__(self): + return str(self) + + def __str__(self): + ret = "" + for func, v in self.functions.items(): + if func.startswith("body_function_") or func.startswith("f_body_function_"): + continue + elif func.startswith("cond_function_") or func.startswith( + "f_cond_function_" + ): + continue + + ret += "Input Function Name: %s\n" % (func) + ret += " Inputs:\n" + for inp in v.inputs: + ret += " %s\n" % (inp) + ret += " Outputs:\n" + for out in v.outputs: + if out.startswith("fake_exit_"): + continue + ret += " %s\n" % (out) + return ret + + def get_dot_string( + self, name_and_op_style=False, annotation=False, highlight_debug_nodes=None + ): + """ + Return the dot string that can be used to show the whole graph + with dot. By default, the graph contains op and type. If + name_and_op_style is set, the graph will contain the name of the node + and the op instead. + + * Input nodes : yellow + * constant nodes : azure + * output nodes : goldenrod2 + * nodes with variable shaped tensors : cyan + * node names or op types that user wants to highlight: green + + Parameters + ---------- + name_and_op_style: bool + If set, graph contains only the name and the op. + + annotation: bool + Examples + -------- + >>> import graphviz + >>> graphviz.Source(network.get_dot_string()).view() + + """ + if highlight_debug_nodes is None: + highlight_debug_nodes = [] + function_names = sorted(self.functions.keys()) + + dotstring = "digraph g {\n" + "\tcompound=true;\n" + # find all tensor nodes with unknown sizes + ctr = 0 + for k in function_names: + const_nodes = const_determined_nodes(self.functions[k].graph) + unknown_sized_tensor_ops = [] + for v, n in self.functions[k].graph.items(): + if n.datatype is None or ( + n.datatype is not None + and types.is_tensor(n.datatype) + and ( + len(n.datatype.get_shape()) == 0 or -1 in n.datatype.get_shape() + ) + ): + unknown_sized_tensor_ops.append(v) + if n.op in highlight_debug_nodes: + highlight_debug_nodes.append(v) + + v = self.functions[k] + vis = DotVisitor(annotation) + vis.highlight_nodes(v.inputs, "yellow").highlight_nodes( + const_nodes, "azure2" + ).highlight_nodes(v.outputs, "goldenrod2").highlight_nodes( + unknown_sized_tensor_ops, "cyan2" + ) + if len(highlight_debug_nodes) > 0: + vis.highlight_nodes(highlight_debug_nodes, "green") + if name_and_op_style: + vis.labeller(lambda n: n.name + " (" + n.op + ")") + + res = vis.visit_all(v.graph, nodename_prefix=str(ctr)).get_result( + "subgraph", "cluster_" + k.replace("/", "_") + ) + dotstring += "\n".join("\t" + r for r in res.split("\n")) + "\n" + ctr += 1 + dotstring += "}" + return dotstring + + def add_function_with_prefix(self, fprefix, tfssa): + assert isinstance(tfssa, SSAFunction) + s = 0 + while fprefix + str(s) in self.functions: + s += 1 + self.functions[fprefix + str(s)] = tfssa + + def add_function(self, f, tfssa): + self.functions[f] = tfssa + + def __copy__(self): + ret = self.__class__() + ret.functions = self.functions + ret.variables = self.variables + ret.global_resource = self.global_resource + return ret + + def __deepcopy__(self, memo): + ret = self.__class__() + ret.functions = {k: copy.copy(v) for k, v in self.functions.items()} + ret.variables = {k: copy.copy(v) for k, v in self.variables.items()} + ret.global_resource = {k: copy.copy(v) for k, v in self.global_resource.items()} + return ret + + def copy(self): + return self.__copy__() + + def _find_free_name(self, prefix): + idx = 0 + while True: + name = prefix + str(idx) + found = False + for v in self.functions.values(): + if name in v.graph: + found = True + break + if found: + idx += 1 + else: + return name + + def get_image_format(self): + """ + Iterates over graph and returns input format (`NCHW` or `NHWC`) + if input is of type Image, otherwise `None` + """ + for fn_key in list(self.functions.keys()): + graph = self.functions[fn_key].graph + + for name in graph: + node = graph[name] + if ( + node.attr.get("data_format", None) == "NHWC" + or node.attr.get("data_format") == "NHWC_format_inserted" + ): + return "NHWC" + elif node.attr.get("data_format", None) == "NCHW": + return "NCHW" + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/__init__.py new file mode 100644 index 00000000..fc51ca1e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ....._deps import _HAS_TF_2 + +if _HAS_TF_2: + # importing these causes all its imports to be registered + from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import \ + register_tf_op + + from . import ops diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/converter.py new file mode 100644 index 00000000..b4876c18 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/converter.py @@ -0,0 +1,40 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.frontend.tensorflow.basic_graph_ops import \ + simple_topsort +from coremltools.converters.mil.frontend.tensorflow.converter import \ + TFConverter + + +class TF2Converter(TFConverter): + def _get_stack(self, tfssa, root="main"): + """ + Overwrite TFConverter._get_stack() as TF2 generates different sub-graphs. + """ + + # We're trying to get a order of how to loop through the graphs. + # This is NOT necessarily a DAG. + dep = {x: [] for x in tfssa.functions} + for fname in tfssa.functions: + for node in tfssa.functions[fname].graph.values(): + func_x, func_y = None, None + + if node.op in {"StatelessIf", "If"}: + func_x = node.attr.get("then_branch") + func_y = node.attr.get("else_branch") + elif node.op in {"StatelessWhile", "While"}: + func_x = node.attr.get("body") + func_y = node.attr.get("cond") + + if func_x and fname not in dep[func_x]: + dep[func_x].append(fname) + if func_y and fname not in dep[func_y]: + dep[func_y].append(fname) + + assert len(dep[root]) == 0 + graph_stack = simple_topsort(dep) + + return graph_stack diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/load.py new file mode 100644 index 00000000..e7f2504b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/load.py @@ -0,0 +1,346 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os.path as _os_path +from distutils.version import StrictVersion as _StrictVersion + +import tensorflow as _tf +from tensorflow.lite.python.util import \ + get_grappler_config as _get_grappler_config +from tensorflow.lite.python.util import \ + run_graph_optimizations as _run_graph_optimizations +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.python.framework.convert_to_constants import \ + convert_variables_to_constants_v2 as _convert_variables_to_constants_v2 +from tensorflow.python.framework.function_def_to_graph import \ + function_def_to_graph as _function_def_to_graph +from tensorflow.python.keras.saving import saving_utils as _saving_utils +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools._deps import _get_version +from coremltools.converters.mil.frontend.tensorflow2.tf_graph_pass import ( + flatten_sub_graph_namespaces, rewrite_control_flow_functions) +from coremltools.converters.mil.frontend.tensorflow.basic_graph_ops import \ + fill_outputs +from coremltools.converters.mil.frontend.tensorflow.load import TFLoader +from coremltools.converters.mil.frontend.tensorflow.parsed_tf_node import \ + ParsedTFNode +from coremltools.converters.mil.frontend.tensorflow.tf_graph_pass import ( + constant_propagation, delete_disconnected_nodes, + delete_unnecessary_constant_nodes, fuse_dilation_conv, insert_get_tuple, + remove_variable_nodes, tensor_array_resource_removal) +from coremltools.converters.mil.frontend.tensorflow.tfssa import ( + NetworkEnsemble, SSAFunction) +from coremltools.converters.mil.input_types import TensorType + +from .converter import TF2Converter + + +class TF2Loader(TFLoader): + """ + There are the steps how the TF2Loader loads and converts the TF2 model + 1. Get the concrete functions from the Keras model (only 1 concrete function is supported now) + 2. Get the tensorflow graphdef from the concrete function by doing + (a) calling tensorflow's convert_variables_to_constants_v2 API to freeze variables into constants + (b) run grappler optimizations on the graphdef ("constfold", "dependency", "debug_stripper") + 3. Extract sub graph based on "outputs" + 4. Construct tfssa IR from graphdef + 5. Run tfssa graph passes + 6. Convert tfssa to program by TF2Converter + """ + def __init__(self, model, debug=False, **kwargs): + """ + TensorFlow 2.x model loader. + + Parameters + ---------- + model: Model created with TensorFlow 2.x + One of the following model format: + - TensorFlow tf.keras.Model object or HDF5 (.h5 or .hdf5) file path + - TensorFlow SavedModel directory path + - TensorFlow list of concrete functions(s) + debug: bool, optional. Defaults to False. + This flag should generally be False except for debugging purposes + for diagnosing conversion errors. Setting this flag to True will + cause graph pass errors to be ignored, forcefully returning a + NetworkEnsemble object. + kwargs: dict(str, Any), optional + Dictionary of additional arguments. + """ + TFLoader.__init__(self, model, debug, **kwargs) + + """ + tf_ssa graph passes + Notes: + - "flatten_while_loop_namespaces" should be after "constant_propagation" + as it changes node names which constant propagation pass is relying on + to perform session.run(), renamed nodes are not understandable for TF. + """ + self.tfssa_passes = [ + constant_propagation, + delete_unnecessary_constant_nodes, # delete_unnecessary_constant_nodes must come right after constant_propagation + rewrite_control_flow_functions, + flatten_sub_graph_namespaces, + remove_variable_nodes, + fuse_dilation_conv, + ] + + def _get_concrete_functions_and_graph_def(self): + msg = ( + "Expected model format: [SavedModel | [concrete_function] | " + "tf.keras.Model | .h5 | GraphDef], got {}" + ) + if ( + isinstance(self.model, list) + or isinstance(self.model, _tf.keras.Model) + or isinstance(self.model, str) + or isinstance(self.model, _tf.compat.v1.GraphDef) + ): + cfs = [] + if isinstance(self.model, list): + cfs = self.model + if isinstance(self.model, _tf.keras.Model): + cfs = self._concrete_fn_from_tf_keras_or_h5(self.model) + elif isinstance(self.model, _tf.compat.v1.GraphDef): + return None, self.model + elif isinstance(self.model, str): + if not _os_path.exists(self.model): + raise ValueError( + 'Input model "{}" does not exist'.format(self.model) + ) + elif _os_path.isfile(self.model) \ + and (self.model.endswith(".h5") or self.model.endswith(".hdf5")): + cfs = self._concrete_fn_from_tf_keras_or_h5(self.model) + elif _os_path.isdir(self.model): + saved_model = _tf.saved_model.load(self.model) + sv = saved_model.signatures.values() + cfs = sv if isinstance(sv, list) else list(sv) + else: + raise NotImplementedError(msg.format(self.model)) + else: + raise NotImplementedError(msg.format(self.model)) + + graph_def = self._graph_def_from_concrete_fn(cfs) + + return cfs, graph_def + + def _graph_def_from_model(self, output_names=None): + """Overwrites TFLoader._graph_def_from_model()""" + cfs, graph_def = self._get_concrete_functions_and_graph_def() + if isinstance(self.model, _tf.keras.Model) and self.kwargs.get("outputs", None) is None: + # For the keras model, check if the outputs is provided by the user. + # If not, we make sure the coreml model outputs order is the same as + # the original keras model + cf = cfs[0] + output_names = [] + for key in cf.structured_outputs: + output_names.append(cf.structured_outputs[key].name.split(":")[0]) + self.kwargs["outputs"] = [TensorType(name=name) for name in output_names] + return self.extract_sub_graph(graph_def, output_names) + + def _tf_ssa_from_graph_def(self, fn_name="main"): + """Overwrites TFLoader._tf_ssa_from_graph_def()""" + with _tf.Graph().as_default() as tf_graph: + _tf.graph_util.import_graph_def(self._graph_def, name="") + + # sub-graphs' input shapes are required for extracting sub-graphs + sg_input_shapes = self._populate_sub_graph_input_shapes( + tf_graph, tf_graph._functions + ) + + # get graph_dict and sub-graphs' inputs / outputs + graph_dict, inputs, outputs, ret = self._dict_from_graph_def( + tf_graph, fn_name, sg_input_shapes + ) + + tf_ssa = NetworkEnsemble() + for name, graph in graph_dict.items(): + tensor_array_resource_removal(graph) + graph = insert_get_tuple(graph) + graph = fill_outputs(graph) + if name == "main": # skip for sub-graphs as input can be also output + delete_disconnected_nodes(graph) + tf_ssa.functions[name] = SSAFunction( + graph, inputs=inputs[name], outputs=outputs[name], ret=ret[name] + ) + + return tf_ssa + + def _run_tf_ssa_passes(self): + tf_passes = self.tfssa_passes + + if self.debug: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + try: + tf_pass(self._tf_ssa) + except Exception as e: + logger.exception('Exception in pass "{}": {}'.format(tf_pass, e)) + logger.info("Ignoring exception and continuing to next pass") + + else: + for tf_pass in _tqdm( + tf_passes, desc="Running TensorFlow Graph Passes", unit=" passes" + ): + tf_pass(self._tf_ssa) + + if self.debug: + import graphviz + + dot_string = self._tf_ssa.get_dot_string( + annotation=True, name_and_op_style=True, highlight_debug_nodes=[] + ) + graphviz.Source(dot_string).view( + filename="/tmp/ssa_after_tf_passes", cleanup=True + ) + + def _program_from_tf_ssa(self): + self._run_tf_ssa_passes() + converter = TF2Converter( + tfssa=self._tf_ssa, + inputs=self.kwargs["inputs"], + outputs=self.kwargs["outputs"], + opset_version=self.kwargs["specification_version"], + ) + return converter.convert() + + def _populate_sub_graph_input_shapes(self, graph, graph_fns): + """ + Populate function (sub-graph) input shapes from control flow op's inputs + Note that the functions (sub-graphs) are not nested but the control flow + ops are nested. The input shapes are used to extract sub-graphs from the + parent graph (as the input of function_def_to_graph). + + Parameter + --------- + graph: tf.Graph + TensorFlow graph. + graph_fns: list of graph functions. + List of TensorFlow graph functions. + + Returns + ------- + sg_input_shapes: dict(str: list) + Dictionary of function (sub-graph) name and input shape pairs. + """ + sg_input_shapes = {} + sub_graphs = [] + for op in graph.get_operations(): + if op.type not in {"StatelessIf", "If", "StatelessWhile", "While"}: + continue + + sg1, sg2 = None, None + if op.type in {"StatelessIf", "If"}: + sg1 = op.get_attr("then_branch").name + sg2 = op.get_attr("else_branch").name + if op.type in {"StatelessWhile", "While"}: + sg1 = op.get_attr("cond").name + sg2 = op.get_attr("body").name + + # memorize input shapes for sub-graph conversions + op_input_shapes = [i.get_shape() for i in op.inputs] + sg_input_shapes.update({sg1: op_input_shapes, sg2: op_input_shapes}) + sub_graphs += [sg1, sg2] + + for name in sub_graphs: + sg = graph_fns.get(name) + fn_def = context.get_function_def(name) + op_input_shapes = sg_input_shapes[name] + op_input_shapes = op_input_shapes[-len(fn_def.signature.input_arg) :] + fn_graph = _function_def_to_graph(fn_def, input_shapes=op_input_shapes) + sg_input_shapes.update( + self._populate_sub_graph_input_shapes(fn_graph, graph_fns) + ) + + return sg_input_shapes + + @staticmethod + def _dict_from_graph_def(graph, fn_name="main", sg_input_shapes=None): + """ + Loads a tf.Graph and transform it into dictionary of ParsedTFNodes. + Potentially contains multiple functions, in such case, recursively + resolve functions (sub-graphs). + + Parameters + ---------- + graph: tf.Graph + TensorFlow graph. + fn_name: str, optional, defaults to 'main' + Function name of the graph. + sg_input_shapes: dict(str: list) + Dictionary of name and input shapes for functions / sub-graphs. + + Returns + ------- + dict(str: dict(str: ParsedTFNode)) + Dictionary of function name and dictionary of node name and + ParsedTFNode object. + """ + graph_dict = {fn_name: {}} + graph_inputs = {fn_name: []} + graph_outputs = {fn_name: []} + graph_ret = {fn_name: {}} + + for op in graph.get_operations(): + graph_dict[fn_name].update({op.name: ParsedTFNode(op.node_def)}) + + for name, sg in graph._functions.items(): + sg_def = context.get_function_def(name) + if name in sg_input_shapes: + input_shapes = sg_input_shapes[name] + input_shapes = input_shapes[-len(sg_def.signature.input_arg):] + fn_graph = _function_def_to_graph(sg_def, input_shapes=input_shapes) + + graph_dict.update( + TF2Loader._dict_from_graph_def(fn_graph, name, sg_input_shapes)[0] + ) + graph_inputs.update({name: [t.name.split(":")[0] for t in fn_graph.inputs]}) + graph_outputs.update( + {name: [t.name.split(":")[0] for t in fn_graph.outputs]} + ) + + # ret is a mapping from the output arg names from `signature` to the + # outputs from `node_def` that should be returned by the function. + graph_ret.update({name: sg_def.ret}) + + return graph_dict, graph_inputs, graph_outputs, graph_ret + + @staticmethod + def _concrete_fn_from_tf_keras_or_h5(keras_model): + if not isinstance(keras_model, _tf.keras.Model): + keras_model = _tf.keras.models.load_model(keras_model) + input_signature = _saving_utils.model_input_signature( + keras_model, keep_original_batch_size=True + ) + fn = _saving_utils.trace_model_call(keras_model, input_signature) + return [fn.get_concrete_function()] + + def _graph_def_from_concrete_fn(self, cfs): + if len(cfs) != 1: + raise NotImplementedError("Only a single concrete function is supported.") + + if _get_version(_tf.__version__) >= _StrictVersion("2.2.0"): + frozen_fn = _convert_variables_to_constants_v2(cfs[0], lower_control_flow=False, aggressive_inlining=True) + else: + frozen_fn = _convert_variables_to_constants_v2(cfs[0], lower_control_flow=False) + graph_def = frozen_fn.graph.as_graph_def(add_shapes=True) + + # run a Grappler's constant folding pass. + fn_inputs = [t for t in frozen_fn.inputs if t.dtype != _dtypes.resource] + grappler_optimizers_list = self._get_grappler_optimizers_list() + graph_def = _run_graph_optimizations( + graph_def, + fn_inputs, + frozen_fn.outputs, + config=_get_grappler_config(grappler_optimizers_list), + graph=frozen_fn.graph, + ) + return graph_def + + def _get_grappler_optimizers_list(self): + return ["constfold", "dependency", "debug_stripper"] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ops.py new file mode 100644 index 00000000..225a2b0b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ops.py @@ -0,0 +1,235 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +# TF 2.x now imports and registers all TF 1.x op against the new registry +# (separated from TF 1.x registry). Overwrite might needed in case the op +# semantics are different between TF 1.x and TF 2.x.< +from coremltools.converters.mil.frontend.tensorflow.convert_utils import \ + convert_graph +from coremltools.converters.mil.frontend.tensorflow.ops import ( + _transpose_NCDHW_to_NDHWC, _transpose_NCHW_to_NHWC, + _transpose_NDHWC_to_NCDHW, _transpose_NHWC_to_NCHW) +from coremltools.converters.mil.frontend.tensorflow.tf_op_registry import \ + register_tf_op +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.types import builtin_to_string +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_tf_op(override=True, tf_alias=["FusedBatchNorm"]) +def FusedBatchNormV3(context, node): + + # helper function that add the batch norm layer + def _add_batch_norm(x, mean, variance, scale, offset, epsilon, name): + + if mean.shape[0] != 0 and variance.shape[0] != 0: + # In this case, we can use the mb.batch_norm directly + x = mb.batch_norm( + x=x, mean=mean, variance=variance, gamma=scale, beta=offset, epsilon=epsilon, name=name + ) + else: + # In this case, we need to manually compute the batch_norm + axes = [axis for axis in range(x.rank) if axis != 1] + mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + num = mb.sub(x=x, y=mean) + square = mb.mul(x=num, y=num) + variance = mb.reduce_mean(x=square, axes=axes, keep_dims=True) + variance_add_epsilon = mb.add(x=variance, y=epsilon) + sqrt = mb.sqrt(x=variance_add_epsilon) + x = mb.real_div(x=num, y=sqrt) + + shape = [1] * x.rank + shape[1] = -1 if any_symbolic(scale.shape) else scale.shape[0] + scale_reshape = mb.reshape(x=scale, shape=shape) + offset_reshape = mb.reshape(x=offset, shape=shape) + + x = mb.mul(x=x, y=scale_reshape) + x = mb.add(x=x, y=offset_reshape, name=name) + + return x + + # Get attributes + data_format = node.attr.get("data_format", "NHWC") + epsilon = node.attr.get("epsilon", None) + + # Get inputs + x = context[node.inputs[0]] + scale = context[node.inputs[1]] + offset = context[node.inputs[2]] + mean = context[node.inputs[3]] + variance = context[node.inputs[4]] + input_dtype = x.dtype + + batch_norm_name = node.name + "_nchw" if data_format == "NHWC" else node.name + + if data_format == "NHWC": + x = _transpose_NHWC_to_NCHW(x) + elif data_format == "NDHWC": + x = _transpose_NDHWC_to_NCDHW(x) + + x = mb.cast(x=x, dtype=builtin_to_string(mean.dtype)) + + x = _add_batch_norm(x, mean, variance, scale, offset, epsilon, batch_norm_name) + + if data_format == "NHWC": + x = _transpose_NCHW_to_NHWC(x, node.name + "_to_NHWC") + elif data_format == "NDHWC": + x = _transpose_NCDHW_to_NDHWC(x, node.name + "_to_NDHWC") + + x = mb.cast(x=x, dtype=builtin_to_string(input_dtype), name=node.name) + + # Inference only batch norm does not have meaningful outputs for + # batch_mean, batch_variance etc. + context.add(node.name, x) + + +@register_tf_op(tf_alias=["If"], override=True) +def StatelessIf(context, node): + pred = context[node.inputs[0]][0] + then_graph = context.get_graph(node.attr.get("then_branch")) + else_graph = context.get_graph(node.attr.get("else_branch")) + + def then_fn(): + context.stack_func_inputs(context[node.inputs[0]]) + then_output_var = convert_graph(context, then_graph) + context.unstack_func_inputs() + return then_output_var + + def else_fn(): + context.stack_func_inputs(context[node.inputs[0]]) + else_output_var = convert_graph(context, else_graph) + context.unstack_func_inputs() + return else_output_var + + x = mb.cond(pred=pred, _true_fn=then_fn, _false_fn=else_fn, name=node.name) + + # wraps x as tuple for get_tuple that always follow the cond node. + x = (x,) if not isinstance(x, (tuple, list)) else x + + context.add(node.name, x) + + +@register_tf_op(tf_alias=["While"], override=True) +def StatelessWhile(context, node): + # inputs are loop_counter, max_iterations, [loop_vars] + loop_vars = context[node.inputs[0]][2:] + + cond_graph = context.get_graph(node.attr.get("cond")) + body_graph = context.get_graph(node.attr.get("body")) + + def cond(*loop_vars): + context.stack_func_inputs(loop_vars) + cond_output_vars = convert_graph(context, cond_graph) + context.unstack_func_inputs() + return cond_output_vars + + def body(*loop_vars): + context.stack_func_inputs(loop_vars) + body_output_vars = convert_graph(context, body_graph) + context.unstack_func_inputs() + return body_output_vars + + x = mb.while_loop(_cond=cond, _body=body, loop_vars=loop_vars, name=node.name) + + # wraps x as tuple for get_tuple that always follow the while node. + x = (x,) if not isinstance(x, (tuple, list)) else x + + context.add(node.name, x) + + +@register_tf_op +def TensorListFromTensor(context, node): + value = context[node.inputs[0]] + element_shape = context[node.inputs[1]] + element_dtype = node.attr.get("element_dtype") + dtype_str = builtin_to_string(element_dtype) + + length = mb.shape(x=value) + length = mb.slice_by_index(x=length, begin=[0], end=[1], squeeze_mask=[True]) + + if element_shape is not None and all(_np.atleast_1d(element_shape.val) != -1): + ls = mb.make_list(init_length=length, + elem_shape=tuple(element_shape.val.tolist()), dtype=dtype_str) + else: + ls = mb.tf_make_list(init_length=length, dtype=dtype_str) + + indices = mb.range_1d(end=length, start=0, step=1) + ls = mb.list_scatter(ls=ls, indices=indices, value=value, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorListGather(context, node): + ls = context[node.inputs[0]] + indices = context[node.inputs[1]] + tensor = mb.list_gather(ls=ls, indices=indices, name=node.name) + context.add(node.name, tensor) + + +@register_tf_op +def TensorListGetItem(context, node): + ls = context[node.inputs[0]] + index = context[node.inputs[1]] + new_ls = mb.list_read(ls=ls, index=index, name=node.name) + context.add(node.name, new_ls) + + +@register_tf_op +def TensorListLength(context, node): + ls = context[node.inputs[0]] + length = mb.list_length(ls=ls, name=node.name) + context.add(node.name, length) + + +@register_tf_op +def TensorListReserve(context, node): + element_shape = context[node.inputs[0]] + num_elements = context[node.inputs[1]] + element_dtype = node.attr.get("element_dtype") + dtype = builtin_to_string(element_dtype) + + if element_shape is not None and all(_np.atleast_1d(element_shape.val) != -1): + ls = mb.make_list( + init_length=num_elements, + elem_shape=tuple(element_shape.val.tolist()), + dynamic_length=num_elements.val is None, + dtype=dtype, + name=node.name, + ) + else: + ls = mb.tf_make_list(init_length=num_elements, + dtype=dtype, + dynamic_length=num_elements.val is None, + name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorListScatterIntoExistingList(context, node): + ls = context[node.inputs[0]] + value = context[node.inputs[1]] + indices = context[node.inputs[2]] + ls = mb.list_scatter(ls=ls, indices=indices, value=value, name=node.name) + context.add(node.name, ls) + + +@register_tf_op +def TensorListSetItem(context, node): + ls = context[node.inputs[0]] + index = context[node.inputs[1]] + value = context[node.inputs[2]] + new_ls = mb.list_write(ls=ls, index=index, value=value, name=node.name) + context.add(node.name, new_ls) + + +@register_tf_op +def TensorListStack(context, node): + ls = context[node.inputs[0]] + length = mb.list_length(ls=ls) + indices = mb.range_1d(end=length, start=0, step=1) + x = mb.list_gather(ls=ls, indices=indices, name=node.name) + context.add(node.name, x) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py new file mode 100644 index 00000000..91ca84e4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import remove_vacuous_cond diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py new file mode 100644 index 00000000..65815792 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py @@ -0,0 +1,118 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@block_context_manager +def _remove_vacuous_cond_block(block): + num_changes = 0 + for op in list(block.operations): + for b in op.blocks: + num_changes += _remove_vacuous_cond_block(b) + + if op.op_type != "cond": + continue + + then_ops = op.blocks[0].operations + else_ops = op.blocks[1].operations + + if len(then_ops) > 1 or len(else_ops) > 1: + continue + + # Pattern 1: dynamic length TensorList generates this pattern. See + # conversion functions of TensorList* ops for details. TF2's graph + # contains a tf.cond op with 2 sub-graphs. The condition is either + # `less_equal` or `greater_equal` op. 1 sub-graph contains only an + # identity op forwarding the original TensorList, another sub-graph + # contains TensorListResize op to generate a new TensorList. But in + # backend, list length is handled dynamically in list_write/scatter + # and thus, the entire tf.cond and it's sub-graphs can be removed. + if len(then_ops) == 0 and len(else_ops) == 0: + if op.pred.op.op_type not in {"less_equal", "greater_equal"}: + continue + + # cond op must have pred + pred_x = op.pred.op.x.op + pred_y = op.pred.op.y.op + + if pred_x is None and pred_y is None: + continue + + if op.pred.op.op_type == "less_equal": + if pred_x.op_type != "list_length": + continue + new_var = pred_x.ls + + else: # op.pred.op.op_type == 'greather_equal': + if pred_y.op_type != "list_length": + continue + new_var = pred_y.ls + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_var + ) + block.remove_ops([op]) # rely on DCE to remove extra cond inputs + num_changes += 1 + + # Pattern 2: both than and else branch contains exactly 1 identity op + if len(then_ops) == 1 and len(then_ops) == 1: + if then_ops[0].op_type != "identity" or else_ops[0].op_type != "identity": + continue + if then_ops[0].x != else_ops[0].x: + continue + + new_var = mb.identity(x=then_ops[0].x, before_op=op, name=op.name) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_var + ) + block.remove_ops([op]) # rely on DCE to remove extra cond inputs + num_changes += 1 + + return num_changes + +@register_pass(namespace="tensorflow2") +class remove_vacuous_cond(AbstractGraphPass): + """ + Remove cond op and it's sub-graphs that produces identity on both then and + else branch. One example use case is the TensorListReverse op, in Core ML, + we dynamically resize in write operations, and thus, both branches of the + cond op will be a skip (identity) op. + + Given: + + main(%a: (1, bool), + %b: (2, 3, fp32)) { + block0() { + %squeeze_0: (bool) = squeeze(x=%a, name="squeeze_0") + %cond_0: (2, 3, fp32) = cond(pred=%squeeze_0, name="cond_0") + cond_0_true() { + %identity_0: (2, 3, fp32) = identity(x=%b, name="identity_0") + } -> (%identity_0) + cond_0_false() { + %identity_1: (2, 3, fp32) = identity(x=%b, name="identity_1") + } -> (%identity_1) + } -> (%cond_0) + } + + Result: + + main(%a: (1, bool), + %b: (2, 3, fp32)) { + block0() { + %squeeze_0: (bool) = squeeze(x=%a, name="squeeze_0") + %cond_0: (2, 3, fp32) = identity(x=%b, name="cond_0") + } -> (%cond_0) + } + """ + def apply(self, prog): + for f_name, f in prog.functions.items(): + num_changes = _remove_vacuous_cond_block(f) + msg = "remove_vacuous_cond: changed {} ops in function '{}'" + logger.info(msg.format(num_changes, f_name)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py new file mode 100644 index 00000000..8b9ec829 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/ssa_passes/test_v2_passes.py @@ -0,0 +1,54 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import (assert_model_is_valid, + assert_same_output_names) + +np.random.seed(1984) +validate_model = True + + +def test_remove_vacuous_cond(): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1,), dtype=types.bool), + mb.TensorSpec(shape=(2, 3)), + ] + ) + def prog(a, b): + def then_branch(): + return mb.identity(x=b) + + def else_branch(): + return mb.identity(x=b) + + pred = mb.squeeze(x=a) + return mb.cond(pred=pred, _true_fn=then_branch, _false_fn=else_branch) + + cond_op = prog.find_ops(op_type="cond", exactly_one=True)[0] + original_cond_op_name = cond_op.name + assert len(cond_op.blocks[0].operations) == 1 + assert len(cond_op.blocks[1].operations) == 1 + assert cond_op.blocks[0].operations[0].op_type == "identity" + assert cond_op.blocks[1].operations[0].op_type == "identity" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["tensorflow2::remove_vacuous_cond"](prog) + assert_same_output_names(prev_prog, prog) + + cond_op = prog.find_ops(op_type="cond") + assert len(cond_op) == 0 + identity_op = prog.find_ops(prefix=original_cond_op_name, exactly_one=True)[0] + assert identity_op.op_type == "identity" + + if validate_model: + assert_model_is_valid(prog, {"a": (1,), "b": (2, 3)}) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py new file mode 100644 index 00000000..a004ed89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_tf2_conversion_api.py @@ -0,0 +1,437 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import platform +import urllib +from io import BytesIO +from os import chdir, getcwd +from shutil import rmtree +from tempfile import mkdtemp + +import numpy as np +import pytest +import requests +from PIL import Image + +import coremltools as ct +from coremltools.converters.mil.mil import types + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers + + +@pytest.fixture +def int32_input_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.int32) + out = tf.add(x, tf.constant(5, dtype=tf.int32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def float32_input_model_add_op(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5.5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def float32_input_model_relu_ops(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.float32) + x1 = tf.keras.layers.ReLU()(x) + out = tf.keras.layers.ReLU(name="output")(x1) + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def int64_input_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.int64) + out = tf.add(x, tf.constant(5, dtype=tf.int64), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def float32_two_input_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input1", dtype=tf.float32) + y = tf.keras.Input(batch_input_shape=(10, 20), name="input2", dtype=tf.float32) + out = tf.add(x, y, name="output") + return tf.keras.Model(inputs=[x, y], outputs=out) + +@pytest.fixture +def float32_two_output_model(): + x = tf.keras.Input(batch_input_shape=(10, 20), name="input", dtype=tf.float32) + y = tf.nn.relu(x) + out2 = tf.nn.relu6(x, name="output2") + out1 = tf.nn.relu(y, name="output1") + return tf.keras.Model(inputs=x, outputs=[out1, out2]) + +@pytest.fixture +def rank3_input_model(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_input_model(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 3), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_input_model_with_channel_first_output(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 3), name="input", dtype=tf.float32) + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_grayscale_input_model(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 1), name="input", dtype=tf.float32) + out = tf.add(x, tf.constant(5, dtype=tf.float32), name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def rank4_grayscale_input_model_with_channel_first_output(): + x = tf.keras.Input(batch_input_shape=(1, 10, 20, 1), name="input", dtype=tf.float32) + y = tf.add(x, tf.constant(5, dtype=tf.float32)) + out = tf.transpose(y, perm=[0, 3, 1, 2], name="output") + return tf.keras.Model(inputs=x, outputs=out) + +@pytest.fixture +def linear_model(): + # this model will test the fuse_matmul_weight_bias pass + x = tf.keras.Input(batch_input_shape=(1, 10), name="input", dtype=tf.float32) + y = tf.keras.layers.Dense(4)(x) + y = tf.add(y, tf.constant([1, 2, 3, 4], shape=(4,), dtype=tf.float32)) + out = tf.nn.relu(y) + return tf.keras.Model(inputs=x, outputs=out) + + + +################################################################################# +# Note: all tests are also used as examples in https://coremltools.readme.io/docs +# as a reference. +# Whenever any of the following test fails, we should update API documentations +################################################################################# + +class TestTensorFlow2ConverterExamples: + def setup_class(self): + self._cwd = getcwd() + self._temp_dir = mkdtemp() + # step into temp directory as working directory + # to make the user-facing examples cleaner + chdir(self._temp_dir) + + # create toy models for conversion examples + # write a toy tf.keras HDF5 model + tf_keras_model = tf.keras.Sequential( + [ + tf.keras.layers.Flatten(input_shape=(28, 28)), + tf.keras.layers.Dense(128, activation=tf.nn.relu), + tf.keras.layers.Dense(10, activation=tf.nn.softmax), + ] + ) + tf_keras_model.save("./tf_keras_model.h5") + + # write a toy SavedModel directory + tf_keras_model.save("./saved_model", save_format="tf") + + def teardown_class(self): + chdir(self._cwd) + if os.path.exists(self._temp_dir): + rmtree(self._temp_dir) + + @staticmethod + def test_convert_tf_keras_h5_file(): + if platform.machine() == "arm64": + pytest.xfail("rdar://101162740 ([CI] [TF] The tf_keras_h5_file API testing is failing on M1 with new OS)") + + for file_extension in ("h5", "hdf5"): + x = tf.keras.Input(shape=(32,), name="input") + y = tf.keras.layers.Dense(16, activation="softmax")(x) + keras_model = tf.keras.Model(x, y) + temp_dir = mkdtemp() + save_dir = str(temp_dir) + path = os.path.join(save_dir, "tf_keras_model." + file_extension) + keras_model.save(path) + mlmodel = ct.convert(path) + + test_input = np.random.rand(2, 32) + expected_val = keras_model(test_input) + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-4) + + @staticmethod + def test_convert_tf_keras_model(): + x = tf.keras.Input(shape=(32,), name="input") + y = tf.keras.layers.Dense(16, activation="softmax")(x) + keras_model = tf.keras.Model(x, y) + + mlmodel = ct.convert(keras_model) + + test_input = np.random.rand(2, 32) + expected_val = keras_model(test_input) + results = mlmodel.predict({"input": test_input}) + np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-4) + + @staticmethod + @pytest.mark.parametrize( + "dtype", ['default', 'mil_type', 'np type']) + def test_convert_tf_keras_applications_model(dtype): + tf_keras_model = tf.keras.applications.MobileNet( + weights="imagenet", input_shape=(224, 224, 3) + ) + + # inputs / outputs are optional, we can get from tf.keras model + # this can be extremely helpful when we want to extract sub-graphs + input_name = tf_keras_model.inputs[0].name.split(":")[0] + + if dtype == 'default': + dtype = None + elif dtype == 'mil_type': + dtype = types.fp32 + else: + dtype = np.float32 + + mlmodel = ct.convert( + tf_keras_model, + inputs=[ct.TensorType(shape=(1, 224, 224, 3), dtype=dtype)], + ) + mlmodel.save("./mobilenet.mlmodel") + + @staticmethod + def test_convert_from_saved_model_dir(): + # SavedModel directory generated by TensorFlow 2.x + mlmodel = ct.convert("./saved_model") + mlmodel.save("./model.mlmodel") + + + @staticmethod + def test_keras_custom_layer_model(): + # testing : https://coremltools.readme.io/docs/tensorflow-2#conversion-from-user-defined-models + + class CustomDense(layers.Layer): + def __init__(self, units=32): + super(CustomDense, self).__init__() + self.units = units + + def build(self, input_shape): + self.w = self.add_weight( + shape=(input_shape[-1], self.units), + initializer="random_normal", + trainable=True, + ) + self.b = self.add_weight( + shape=(self.units,), initializer="random_normal", trainable=True + ) + + def call(self, inputs): + return tf.matmul(inputs, self.w) + self.b + + inputs = keras.Input((4,)) + outputs = CustomDense(10)(inputs) + model = keras.Model(inputs, outputs) + ct.convert(model) + + @staticmethod + def test_concrete_function_conversion(): + # testing : https://coremltools.readme.io/docs/tensorflow-2#conversion-from-user-defined-models + + @tf.function(input_signature=[tf.TensorSpec(shape=(6,), dtype=tf.float32)]) + def gelu_tanh_activation(x): + a = (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))) + y = 0.5 * (1.0 + tf.tanh(a)) + return x * y + + conc_func = gelu_tanh_activation.get_concrete_function() + mlmodel = ct.convert([conc_func]) + + @staticmethod + def test_convert_tf2_keras(): + x = tf.keras.Input(shape=(32,), name="input") + y = tf.keras.layers.Dense(16, activation="softmax")(x) + keras_model = tf.keras.Model(x, y) + model = ct.convert(keras_model, convert_to='milinternal') + assert isinstance(model, ct.converters.mil.Program) + + +class TestTF2FlexibleInput: + # Test examples in https://coremltools.readme.io/docs/flexible-inputs + @staticmethod + @pytest.mark.parametrize("use_symbol", [True, False]) + def test_tf2keras_shared_range_dim(use_symbol): + input_dim = 3 + # None denotes seq_len dimension + x1 = tf.keras.Input(shape=(None,input_dim), name="seq1") + x2 = tf.keras.Input(shape=(None,input_dim), name="seq2") + y = x1 + x2 + keras_model = tf.keras.Model(inputs=[x1, x2], outputs=[y]) + + # One RangeDim shared by two inputs + if use_symbol: + seq_len_dim = ct.RangeDim(symbol='seq_len') + else: + # symbol is optional + seq_len_dim = ct.RangeDim() + seq1_input = ct.TensorType(name="seq1", shape=(1, seq_len_dim, input_dim)) + seq2_input = ct.TensorType(name="seq2", shape=(1, seq_len_dim, input_dim)) + mlmodel = ct.convert(keras_model, + inputs=[seq1_input, seq2_input]) + + batch = 1 + seq_len = 5 + test_input_x1 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + test_input_x2 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + expected_val = keras_model([test_input_x1, test_input_x2]) + if ct.utils._is_macos(): + results = mlmodel.predict({ + "seq1": test_input_x1, + "seq2": test_input_x2}) + np.testing.assert_allclose(results["Identity"], expected_val, + rtol=1e-2, atol=1e-2) + + + @staticmethod + def test_tf2keras_incorrect_range_dim(): + input_dim = 3 + # None denotes seq_len dimension + x1 = tf.keras.Input(shape=(None,input_dim), name="seq1") + y = x1 + 1 + keras_model = tf.keras.Model(inputs=[x1], outputs=[y]) + + # Incorrectly using -1 instead of ct.RangeDim + # One RangeDim shared by two inputs + with pytest.raises(ValueError, + match=r"Can\'t convert to CoreML shaping"): + seq1_input = ct.TensorType(name="seq1", shape=(1, -1, input_dim)) + mlmodel = ct.convert(keras_model, inputs=[seq1_input]) + + @staticmethod + @pytest.mark.parametrize("use_symbol", [True, False]) + def test_tf2keras_outofbound_range_dim(use_symbol): + input_dim = 3 + # None denotes seq_len dimension + x = tf.keras.Input(shape=(None,input_dim), name="seq") + y = x * 2 + keras_model = tf.keras.Model(inputs=[x], outputs=[y]) + + if use_symbol: + seq_len_dim = ct.RangeDim(symbol='sequence_len', lower_bound=3, + upper_bound=5) + else: + seq_len_dim = ct.RangeDim(lower_bound=3, upper_bound=5) + seq_input = ct.TensorType(name="seq", shape=(1, seq_len_dim, input_dim)) + mlmodel = ct.convert(keras_model, inputs=[seq_input]) + + # seq_len is within bound + batch = 1 + seq_len = 3 + test_input_x = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + expected_val = keras_model([test_input_x]) + if ct.utils._is_macos(): + results = mlmodel.predict({"seq": test_input_x}) + np.testing.assert_allclose(results["Identity"], expected_val, + rtol=1e-4, atol=1e-3) + + # seq_len below/above lower_bound/upper_bound + with pytest.raises(RuntimeError, + match=r"Size \(2\) of dimension \(1\) is not in allowed range \(3\.\.5\)"): + seq_len = 2 + test_input_x = np.random.rand(batch, seq_len, + input_dim).astype(np.float32) + results = mlmodel.predict({"seq": test_input_x}) + + with pytest.raises(RuntimeError, + match=r"Size \(6\) of dimension \(1\) is not in allowed range \(3\.\.5\)"): + seq_len = 6 + test_input_x = np.random.rand(batch, seq_len, + input_dim).astype(np.float32) + results = mlmodel.predict({"seq": test_input_x}) + + @staticmethod + def test_tf2_image_enumerated_shapes(): + keras_model = tf.keras.applications.MobileNetV2( + input_shape=(None, None, 3,), + classes=1000, + include_top=False, + ) + input_shapes = ct.EnumeratedShapes(shapes=[(1, 192, 192, 3), (1, 224, 224, 3)]) + image_input = ct.ImageType(shape=input_shapes, + bias=[-1,-1,-1], scale=1/127) + model = ct.convert(keras_model, inputs=[image_input]) + assert model is not None + spec = model.get_spec() + assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 2 + + @staticmethod + def test_tf2keras_enumerated_shapes(): + input_shape = (28, 28, 3) + # None denotes seq_len dimension + x = tf.keras.Input(shape=input_shape, name="input") + C_out = 2 + kHkW = 3 + y = tf.keras.layers.Conv2D(C_out, kHkW, activation='relu', + input_shape=input_shape)(x) + keras_model = tf.keras.Model(inputs=[x], outputs=[y]) + + # One RangeDim shared by two inputs + shapes = [(1, 28, 28, 3), (1, 56, 56, 3)] + enumerated_shapes = ct.EnumeratedShapes(shapes=shapes) + tensor_input = ct.TensorType(name="input", shape=enumerated_shapes) + mlmodel = ct.convert(keras_model, inputs=[tensor_input]) + + # Test (1, 28, 28, 3) shape + test_input_x = np.random.rand(*shapes[0]).astype(np.float32) + expected_val = keras_model([test_input_x]) + if ct.utils._is_macos(): + results = mlmodel.predict({ + "input": test_input_x}) + # rdar://101303143 ([CI] test_tf2keras_enumerated_shapes is getting some stochastic numerical issues on intel machines) + # The tolerance is set a little bit big here. Need to investigate this issue if possible and lower the threshold down. + np.testing.assert_allclose(results["Identity"], + expected_val, atol=1e-2, rtol=3) + + # Test (1, 56, 56, 3) shape (can't verify numerical parity with Keras + # which doesn't support enumerated shape) + test_input_x = np.random.rand(*shapes[1]).astype(np.float32) + results = mlmodel.predict({ + "input": test_input_x}) + + # Test with a wrong shape + with pytest.raises(RuntimeError, + match=r"MultiArray Shape \(1 x 29 x 29 x 3\) was not in enumerated set of allowed shapes"): + test_input_x = np.random.rand(1, 29, 29, 3).astype(np.float32) + results = mlmodel.predict({ + "input": test_input_x}) + + @staticmethod + def test_tf2keras_optional_input(): + input_dim = 3 + # None denotes seq_len dimension + x1 = tf.keras.Input(shape=(None,input_dim), name="optional_input") + x2 = tf.keras.Input(shape=(None,input_dim), name="required_input") + y = x1 + x2 + keras_model = tf.keras.Model(inputs=[x1, x2], outputs=[y]) + + seq_len_dim = ct.RangeDim() + default_value = np.ones((1, 2, input_dim)).astype(np.float32) + optional_input = ct.TensorType( + name="optional_input", + shape=(1, seq_len_dim, input_dim), + default_value=default_value, + ) + required_input = ct.TensorType( + name="required_input", + shape=(1, seq_len_dim, input_dim), + ) + mlmodel = ct.convert(keras_model, + inputs=[optional_input, required_input]) + + batch = 1 + seq_len = 2 + test_input_x2 = np.random.rand(batch, seq_len, input_dim).astype(np.float32) + expected_val = keras_model([default_value, test_input_x2]) + if ct.utils._is_macos(): + results = mlmodel.predict({"required_input": test_input_x2}) + np.testing.assert_allclose(results["Identity"], expected_val, rtol=1e-2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py new file mode 100644 index 00000000..7e05b4ca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py @@ -0,0 +1,224 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile + +import pytest + +import coremltools.converters as converter +from coremltools.converters.mil.frontend.tensorflow.test.test_load import \ + frontend +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + get_tf_keras_io_names +from coremltools.converters.mil.input_types import TensorType + + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + + +class TestTf2ModelFormats: + def setup(self): + self.saved_model_dir = tempfile.mkdtemp() + _, self.model_path_h5 = tempfile.mkstemp( + suffix=".h5", prefix=self.saved_model_dir + ) + + def teardown(self): + if os.path.exists(self.saved_model_dir): + shutil.rmtree(self.saved_model_dir) + + def test_keras_model(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + keras_model, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_keras_saved_model_file(self): + keras_model = tf.keras.Sequential( + [ + tf.keras.layers.Flatten(input_shape=(28, 28), batch_size=1), + tf.keras.layers.Dense(10, activation=tf.nn.relu), + ] + ) + keras_model.save(self.saved_model_dir, save_format="tf") + mlmodel = converter.convert( + self.saved_model_dir, outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_keras_h5_file(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + keras_model.save(self.model_path_h5, save_format="h5") + mlmodel = converter.convert( + self.model_path_h5, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_keras_hdf5_file(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + keras_model.save(self.model_path_h5, save_format="h5") + mlmodel = converter.convert( + self.model_path_h5, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + assert mlmodel is not None + + def test_concrete_function_list_from_tf_low_level_api(self): + root = tf.train.Checkpoint() + root.v1 = tf.Variable(3.0) + root.v2 = tf.Variable(2.0) + root.f = tf.function(lambda x: root.v1 * root.v2 * x) + + input_data = tf.constant(1.0, shape=[1, 1]) + to_save = root.f.get_concrete_function(input_data) + tf.saved_model.save(root, self.saved_model_dir, to_save) + + tf_model = tf.saved_model.load(self.saved_model_dir) + concrete_func = tf_model.signatures[ + tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY + ] + mlmodel = converter.convert( + [concrete_func], outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_saved_model_list_from_tf_function(self): + class build_model(tf.Module): + @tf.function( + input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)] + ) + def __call__(self, x): + return tf.nn.relu(x) + + model = build_model() + tf.saved_model.save(model, self.saved_model_dir) + mlmodel = converter.convert( + self.saved_model_dir, outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_concrete_function_list_from_tf_function(self): + class build_model(tf.Module): + @tf.function( + input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)] + ) + def __call__(self, x): + return tf.nn.relu(x) + + model = build_model() + concrete_func = model.__call__.get_concrete_function() + mlmodel = converter.convert( + [concrete_func], outputs=["Identity"], source=frontend + ) + assert mlmodel is not None + + def test_graphdef_from_tf_function(self): + class build_model(tf.Module): + def __init__(self): + self.dense = tf.keras.layers.Dense(256, activation="relu") + + input_signature = [ + tf.TensorSpec(name="input", shape=( + 128, 128), dtype=tf.float32), + ] + + @tf.function(input_signature=input_signature) + def call(self, x): + x = self.dense(x) + return x + + model = build_model() + + from tensorflow.python.framework.convert_to_constants import \ + convert_variables_to_constants_v2 + frozen_graph_func = convert_variables_to_constants_v2( + model.call.get_concrete_function()) + frozen_graph_def = frozen_graph_func.graph.as_graph_def() + + mlmodel = converter.convert(frozen_graph_def) + assert mlmodel is not None + + def test_model_metadata(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + input_names, output_names = get_tf_keras_io_names(keras_model) + mlmodel = converter.convert( + keras_model, + inputs=[TensorType(input_names[0], (3, 4, 5))], + outputs=["Identity"], + source=frontend, + ) + metadata_keys = mlmodel.get_spec().description.metadata.userDefined + assert "com.github.apple.coremltools.version" in metadata_keys + assert "com.github.apple.coremltools.source" in metadata_keys + assert "tensorflow==2." in metadata_keys["com.github.apple.coremltools.source"] + + def test_invalid_format_none(self): + with pytest.raises(NotImplementedError) as e: + converter.convert(None, source=frontend) + e.match(r"Expected model format: .* .h5") + + def test_invalid_format_invalid_extension(self): + _, invalid_filename = tempfile.mkstemp( + suffix=".invalid", prefix=self.saved_model_dir + ) + with pytest.raises(NotImplementedError) as e: + converter.convert(invalid_filename, source=frontend) + e.match(r"Expected model format: .* .h5") + + def test_invalid_format_multiple_concrete_functions(self): + class build_model(tf.Module): + @tf.function( + input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)] + ) + def __call__(self, x): + return tf.nn.relu(x) + + model = build_model() + cf = model.__call__.get_concrete_function() + with pytest.raises(NotImplementedError) as e: + converter.convert([cf, cf, cf], source=frontend) + e.match(r"Only a single concrete function is supported") + + def test_invalid_converter_type(self): + keras_model = tf.keras.Sequential( + [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] + ) + with pytest.raises(ValueError) as e: + converter.convert(keras_model, source="invalid") + + expected_msg = r'Unrecognized value of argument "source": .*' + e.match(expected_msg) + + with pytest.raises(NotImplementedError) as e: + converter.convert(keras_model, convert_to="invalid", source=frontend) + e.match(r"Backend converter .* not implemented") + + def test_invalid_format_non_exist(self): + non_exist_filename = self.model_path_h5.replace(".h5", "_non_exist.h5") + with pytest.raises(ValueError) as e: + converter.convert(non_exist_filename, source=frontend) + e.match(r"Input model .* does not exist") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py new file mode 100644 index 00000000..5713ae60 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops.py @@ -0,0 +1,792 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import \ + TensorFlowBaseTest +from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import \ + TensorFlow2BaseTest +from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import \ + make_tf2_graph as make_tf_graph +from coremltools.converters.mil.testing_utils import random_gen + +TensorFlowBaseTest.run_compare_tf = TensorFlow2BaseTest.run_compare_tf2 + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + +class TestImageResample(TensorFlowBaseTest): + @pytest.mark.skip( + "TODO: rdar://100812753 ([TF] [Infra] TensorFlow Addons dylib issues in TF 2.10.0)" + ) + @pytest.mark.parametrize( + "compute_unit, backend, data_warp_shapes", + itertools.product( + compute_units, + backends, + [ + # Data shape format: (Batch, Hin, Win, C) + # Warp shape format: (Batch, Hout, Wout, 2) + [(1, 3, 3, 1), (1, 3, 3, 2)], # no size change + [(2, 5, 5, 3), (2, 3, 3, 2)], # down-sampling + [(3, 6, 6, 1), (3, 8, 8, 2)], # up-sampling + ], + ), + ) + def test_resample( + self, compute_unit, backend, data_warp_shapes, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + tfa = pytest.importorskip("tensorflow_addons") + + data_shape, warp_shape = data_warp_shapes + + @make_tf_graph([data_shape, warp_shape]) + def build_model(x, warp): + return tfa.image.resampler(data=x, warp=warp) + + model, inputs, outputs = build_model + # warp exceeding input sizes in order to test more padding modes + input_values = [ + random_gen(data_shape, -100, 100), + random_gen(warp_shape, -15, 15), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestImageTransform(TensorFlowBaseTest): + @pytest.mark.skip( + "TODO: rdar://73165549 (Add other mode in 'affine' to coremltools when backend is ready)" + ) + @pytest.mark.parametrize( + "compute_unit, backend, transforms, interpolation, shapes", + itertools.product( + [True], + backends, + [ + [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 1.0, -250, 0.0, 1.0, 0.0, 0.0, 0.0], + [1.25, -1.75, 25.0, -25.0, 1.5, -1.5, 0.0, 0.0], + ], + ["BILINEAR"], + [ + ((1, 2, 2, 1), None), + ((2, 2, 2, 1), (2, 3)), + ((3, 5, 5, 2), (4, 4)), + ((1, 3, 3, 2), (6, 6)), + ((3, 50, 50, 2), (20, 20)), + ], + ), + ) + def test(self, compute_unit, backend, transforms, interpolation, shapes): + x_shape, output_shape = shapes + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + tfa = pytest.importorskip("tensorflow_addons") + + @make_tf_graph([x_shape]) + def build_model(x): + return tfa.image.transform( + x, + transforms=transforms, + interpolation=interpolation, + output_shape=output_shape, + ) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -100, 100), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, input_dict, outputs, compute_unit=compute_unit, backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, InputShape_OutputShape, op", + itertools.product( + compute_units, + backends, + [ + [(2, 5, 15, 3), (2, 5, 15, 3)], + [(2, 4, 8, 5), (2, 2, 4, 5)], + [(2, 4, 8, 3), (2, 9, 13, 3)], + ], + ["V2", "V3"], + ), + ) + def test_affine_transform(self, compute_unit, backend, InputShape_OutputShape, op): + if backend[0] == "neuralnetwork": + pytest.skip("Affine op not available in the neuralnetwork backend") + + input_shape, output_shape = InputShape_OutputShape + batch_size = input_shape[0] + transforms = np.random.rand(batch_size, 8) - 0.05 + transforms[:, 6:8] = 0 + + @make_tf_graph([input_shape]) + def build_model(x): + if op == "V2": + return tf.raw_ops.ImageProjectiveTransformV2( + images=x, + transforms=transforms, + fill_mode="CONSTANT", + output_shape=(output_shape[0], output_shape[1]), + interpolation="BILINEAR", + ) + elif op == "V3": + return tf.raw_ops.ImageProjectiveTransformV3( + images=x, + transforms=transforms, + fill_mode="CONSTANT", + output_shape=(output_shape[0], output_shape[1]), + interpolation="BILINEAR", + fill_value=0.0, + ) + else: + raise ValueError("tensorflow op {} not supported".format(op)) + + model, inputs, outputs = build_model + input_values = [np.random.rand(*input_shape).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestActivationSiLU(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, tf_op", + itertools.product( + compute_units, + backends, + list(range(1, 6)), + [ + tf.nn.swish, # TODO(yuduo): in TF 2.4.0+, it's renamed to tf.nn.silu, + tf.keras.activations.swish, + ], + ), + ) + def test(self, compute_unit, backend, rank, tf_op): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape = tuple(np.random.randint(low=1, high=4, size=rank)) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf_op(x) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -100, 100), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestResizeNearestNeighbor(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, target_shape, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [(1, 10, 20, 1), (2, 5, 1, 3)], + [(25, 30), (2, 20)], + [False], + [True, False], + ), + ) + def test_raw_ops( + self, + compute_unit, + backend, + input_shape, + target_shape, + align_corners, + half_pixel_centers, + ): + if align_corners is True and half_pixel_centers is True: + return + + if backend[0] == "neuralnetwork": + # neural network backend does not support fractional scale factors for nearest neighbor upsample op + if target_shape[-1] % input_shape[-1] != 0: + return + if target_shape[-2] % input_shape[-2] != 0: + return + + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY and not half_pixel_centers: + pytest.xfail("rdar://97399545 (TestResizeNearestNeighbor failing on mlprogram + GPU + half_pixel_centers=False)") + + @make_tf_graph([input_shape]) + def build_model(x): + return tf.raw_ops.ResizeNearestNeighbor( + images=x, + size=target_shape, + align_corners=align_corners, + half_pixel_centers=half_pixel_centers, + ) + + model, inputs, outputs = build_model + input_values = [random_gen(input_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size", + itertools.product(compute_units, backends, [(1, 1), (2, 3), (4, 1)]), + ) + def test_keras_layer(self, compute_unit, backend, size): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape = tuple(np.random.randint(low=1, high=4, size=4)) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.keras.layers.UpSampling2D( + size=size, interpolation="nearest", + )(x) + + model, inputs, outputs = build_model + input_values = [random_gen(x_shape, -100, 100)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size, method", + itertools.product( + compute_units, + backends, + [(1, 1), (2, 3)], + [tf.image.ResizeMethod.NEAREST_NEIGHBOR], + ), + ) + def test_tf_image_resize(self, compute_unit, backend, size, method): + if backend[0] == "mlprogram" and size == (1, 1): + pytest.xfail("rdar://79699954 (Nearest neighbor resize numerical mismatch when output size is (1,1))") + + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape = tuple(np.random.randint(low=1, high=3, size=4)) + + @make_tf_graph([x_shape]) + def build_model(x): + return tf.image.resize(x, size=size, method=method) + + model, inputs, outputs = build_model + input_values = [ + random_gen(x_shape, -100, 100), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestNormalizationTF2(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, func, backend, epsilon", + itertools.product( + compute_units, + [tf.raw_ops.FusedBatchNorm, tf.raw_ops.FusedBatchNormV3], + backends, + [1e-1, 1e-10] + ), + ) + def test_fused_batch_norm(self, compute_unit, func, backend, epsilon): + input_shape = np.random.randint(low=1, high=4, size=4) + attr_shape = [list(input_shape)[-1]] + + m = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + v = random_gen(shape=attr_shape, rand_min=0.0, rand_max=10.0) + o = random_gen(shape=attr_shape, rand_min=1.0, rand_max=10.0) + s = random_gen(shape=attr_shape, rand_min=-1.0, rand_max=1.0) + + @make_tf_graph([input_shape]) + def build_model(x): + return func( + x=x, + scale=s, + offset=o, + mean=m, + variance=v, + epsilon=epsilon, + is_training=False, + )[0] + + model, inputs, outputs = build_model + input_values = [random_gen(shape=input_shape)] + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + + +class TestElementWiseBinaryTF2(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, [rank for rank in range(1, 4)]), # False + ) + def test_add_v2(self, compute_unit, backend, rank): + x_shape = list(np.random.randint(low=2, high=5, size=rank)) + y_shape = x_shape[:] + for i in range(rank): + if np.random.randint(4) == 0: + y_shape[i] = 1 + if np.random.randint(2) == 0: + y_shape = [1] + y_shape + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.raw_ops.AddV2(x=x, y=y) + + model, inputs, outputs = build_model + + input_values = [ + np.random.randint(low=-1, high=1, size=x_shape).astype(np.float32), + np.random.randint(low=-1, high=1, size=y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestControlFlowFromAutoGraph(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_if_unary_const(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + if x > 0.5: + y = x - 0.5 + else: + y = x + 0.5 + return y + + model, inputs, outputs = build_model + input_values = [np.array([0.7], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_if_unary_double_if_positive_else_square(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + if x >= 0: + out = x + x + else: + out = x * x + return out + + model, inputs, outputs = build_model + input_values = [np.array([2], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_if_binary_add_if_else_mul(self, compute_unit, backend): + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + if x > y: + out = x + x + else: + out = x * x + return out + + model, inputs, outputs = build_model + input_values = [ + np.array([3], dtype=np.float32), + np.array([7], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_square(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + i = 0 + while i < 10: + x *= 2 + i += 1 + return x + + model, inputs, outputs = build_model + input_values = [np.array([2.0], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_power(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + i = 0 + while i < 3: + x *= x + i += 1 + return x + + model, inputs, outputs = build_model + input_values = [np.array([2.0], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_while_loop_nested_body(self, compute_unit, backend): + @make_tf_graph([(1,)]) + def build_model(x): + i, j = 0, 10 + while i < j: + while 2 * i < i + 2: + i += 1 + x -= 1 + i += 2 + x *= 2 + return x + + model, inputs, outputs = build_model + input_values = [np.array([9.0], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + +@pytest.mark.xfail(reason="rdar://76293949 (TF2 unit test InvalidArgumentError)", run=False) +class TestTensorList(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (1, True, None), + (1, True, (1,)), + (2, False, (1,)) + ], + ), + ) + def test_write_read_and_stack(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.write(0, x) + ta = ta.write(1, y) + return ta.read(0), ta.read(1), ta.stack() + + model, inputs, outputs = build_model + input_values = [ + np.array([3.14], dtype=np.float32), + np.array([6.17], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (0, True, None), + (1, True, (1,)), + (3, False, (1,)) + ], + ), + ) + def test_unstack_and_read(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(3, 1)]) + def build_model(x): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.unstack(x) + return ta.read(0), ta.read(1), ta.read(2) + + model, inputs, outputs = build_model + input_values = [np.array([[3.14], [6.17], [12.14]], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (2, True, None), + (1, True, (1,)), + (3, False, (1,)) + ], + ), + ) + def test_write_and_gather(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(1,), (1,)]) + def build_model(x, y): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.write(0, x) + ta = ta.write(1, y) + return ta.gather(indices=[0, 1]) + + model, inputs, outputs = build_model + input_values = [ + np.array([3.14], dtype=np.float32), + np.array([6.17], dtype=np.float32), + ] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product( + compute_units, + backends, + [ + (2, True, None), + (1, True, (1,)), + (3, False, (1,)) + ], + ), + ) + def test_scatter_and_read(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(3, 1)]) + def build_model(x): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.scatter(indices=[0, 1, 2], value=x) + return ta.read(0), ta.read(1), ta.read(2) + + model, inputs, outputs = build_model + input_values = [np.array([[3.14], [6.17], [12.14]], dtype=np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + @pytest.mark.parametrize( + "compute_unit, backend, size_dynamic_shape", + itertools.product(compute_units, backends, [(2, False, (None, 8))]), + ) + def test_partial_element_shape(self, compute_unit, backend, size_dynamic_shape): + size, dynamic_size, element_shape = size_dynamic_shape + + @make_tf_graph([(3, 1, 8)]) + def build_model(x): + ta = tf.TensorArray( + tf.float32, + size=size, + dynamic_size=dynamic_size, + element_shape=element_shape, + ) + ta = ta.scatter(indices=[0, 1, 2], value=x) + return ta.read(0), ta.read(1), ta.read(2) + + model, inputs, outputs = build_model + input_values = [np.random.rand(3, 1, 8).astype(np.float32)] + input_dict = dict(zip(inputs, input_values)) + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) + + +class TestPartitionedCall(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_partitioned_call_optimized_to_add_op(self, compute_unit, backend): + """ + The PartitionedCall will be optimized to V2Add op in TF's internal optimization pass (see + `_run_inline_graph_optimization`), so this test passes even when we haven't implemented + the `PartitionedCall` op). + """ + x_shape = [2, 3] + y_shape = [2, 3] + + @tf.function + def simple_func(*args): + output = [args[0] + args[1]] + return output + + @make_tf_graph([x_shape, y_shape]) + def build_model(x, y): + return tf.raw_ops.PartitionedCall( + args=[x, y], + f=simple_func.get_concrete_function(tf.zeros(x_shape), tf.zeros(y_shape)), + Tout=[tf.float32] + ) + + model, inputs, outputs = build_model + + input_values = [ + np.zeros(x_shape).astype(np.float32), + np.zeros(y_shape).astype(np.float32), + ] + + input_dict = dict(zip(inputs, input_values)) + + TensorFlowBaseTest.run_compare_tf( + model, + input_dict, + outputs, + compute_unit=compute_unit, + backend=backend + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py new file mode 100644 index 00000000..7dd167eb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/test_v2_ops_tf_keras.py @@ -0,0 +1,1739 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform +import random +from distutils.version import StrictVersion as _StrictVersion + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _get_version +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.frontend._utils import is_symbolic_dim_in_prog +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, +) +from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import ( + TensorFlow2BaseTest, +) +from coremltools.converters.mil.testing_utils import get_op_types_in_program, random_gen +from coremltools.models.utils import _macos_version + +TensorFlowBaseTest.run_compare_tf_keras = TensorFlow2BaseTest.run_compare_tf_keras +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") + +import tensorflow as _tf # should be after pytest.importorskip checks +from tensorflow.keras import Input +from tensorflow.keras.layers import Conv2D, GlobalMaxPooling2D +from tensorflow.keras.models import Model + + +class TestActivation(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, op", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [ + tf.keras.layers.ELU, + tf.keras.layers.LeakyReLU, + tf.keras.layers.ReLU, + tf.keras.layers.PReLU, + tf.keras.layers.Softmax, + tf.keras.layers.ThresholdedReLU, + ], + ), + ) + def test_layer(self, compute_unit, backend, rank, op): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential([op(batch_input_shape=shape)]) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, op", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [ + tf.keras.activations.elu, + tf.keras.activations.exponential, + tf.keras.activations.hard_sigmoid, + tf.keras.activations.linear, + tf.keras.activations.relu, + tf.keras.activations.selu, + tf.keras.activations.sigmoid, + tf.keras.activations.softmax, + tf.keras.activations.softplus, + tf.keras.activations.softsign, + tf.keras.activations.tanh, + ], + ), + ) + def test_activation(self, compute_unit, backend, rank, op): + kwargs = ( + {"atol": 1e-3, "rtol": 1e-4} + if op == tf.keras.activations.exponential and compute_unit != ct.ComputeUnit.CPU_ONLY + else {} + ) + if op == tf.keras.activations.softmax and rank == 1: + return # skip apply softmax to a tensor that is 1D + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Activation(op, batch_input_shape=shape)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + **kwargs + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv2d_prelu_fusion(self, backend): + x_shape = (1, 10, 10, 32) + x = tf.keras.Input(batch_input_shape=x_shape) # (B, H, W, C) + x1 = tf.keras.layers.Conv2D(16, kernel_size=1)(x) + x1 = tf.keras.layers.PReLU(alpha_initializer='glorot_uniform', shared_axes=[1, 2])(x1) + x1 = tf.keras.layers.Conv2D(16, kernel_size=1)(x1) + x1 = tf.keras.layers.PReLU(alpha_initializer='glorot_uniform', shared_axes=[1, 2])(x1) + keras_model = tf.keras.Model(inputs=x, outputs=x1) + + res = TensorFlowBaseTest.run_compare_tf_keras( + keras_model, + [random_gen(x_shape, -1, 1)], + compute_unit=ct.ComputeUnit.CPU_ONLY, + backend=backend, + ) + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + # assert that "prelu" ops are present in the mil program, + # which should be if "fuse_prelu" pass worked correctly + assert len(mil_prog.find_ops(op_type="prelu")) == 2 + assert "relu" not in get_op_types_in_program(mil_prog) + + +class TestBinary(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, op", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)], + [ + tf.keras.layers.Add, + tf.keras.layers.Average, + tf.keras.layers.Subtract, + tf.keras.layers.Maximum, + tf.keras.layers.Minimum, + ], + ), + ) + def test(self, compute_unit, backend, rank, op): + shape = np.random.randint(low=1, high=4, size=rank) + input_x = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + input_y = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + out = op()([input_x, input_y]) + model = tf.keras.Model(inputs=[input_x, input_y], outputs=out) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10), random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, axes, normalize", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 3)], + [-1,], + [True, False], + ), + ) + def test_dot(self, compute_unit, rank, backend, axes, normalize): + shape = np.random.randint(low=2, high=4, size=rank) + input_x = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + input_y = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + out = tf.keras.layers.Dot(axes=axes, normalize=normalize)([input_x, input_y]) + model = tf.keras.Model(inputs=[input_x, input_y], outputs=out) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10), random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConcatenate(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis", + itertools.product( + compute_units, backends, [rank for rank in range(5, 6)], [-1, -2], + ), + ) + def test(self, compute_unit, backend, rank, axis): + shape = np.random.randint(low=2, high=4, size=rank) + inputs = [] + for _ in range(2): + inputs.append(tf.keras.layers.Input(batch_input_shape=tuple(shape))) + out = tf.keras.layers.Concatenate(axis=axis)(inputs) + model = tf.keras.Model(inputs=inputs, outputs=out) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape), random_gen(shape)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConvolution(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + "groups", + ] + ), + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.Conv1D, + tf.keras.layers.Conv2D, + tf.keras.layers.Conv3D, + ], + ["same", "valid"], + ["channels_last"], + [ + (2, 4, 4, 2, 2, 2), + (3, 7, 5, 1, 3, 2) + ], + [ + (1, 1, 1), + (1, 2, 3), + (1, 3, 2) + ], + [ + (1, 1, 1), (2, 2, 2), + ], + [1, 3], + [1, 2], + ), + ) + def test_conv( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + groups, + ): + if _get_version(_tf.__version__) < _StrictVersion("2.5.0") and groups != 1: + pytest.skip("TF supports groupwise convolution only for version > tf.2.5.0-rc3") + + if _get_version(_tf.__version__) > _StrictVersion("2.8.0") and groups != 1: + pytest.xfail("rdar://100814590 ([TF] [Infra] TF 2.10.0 Uses Unimplemented " + "PartitionedCall op for Groupwise Convolution)") + + if op == tf.keras.layers.Conv3D and groups != 1: + pytest.xfail("rdar://81629932 (Conv3d with group > 1 tests failing in TF2.0 converter)") + + for i, stride in enumerate(strides): + if stride > 1 and dilations[i] > 1: + pytest.skip("TF does not support strides > 1 in conjunction with dilation_rate > 1") + + for d in dilations: + if d > 1 and op == tf.keras.layers.Conv3D: + pytest.skip("Dilations with Conv3D not supported yet, since SpaceToBatchND is " + "only supported for ranks 3 or 4") + + s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks + c_in, c_out = 2, 4 + input_shape = None + kernel_size = None + if op == tf.keras.layers.Conv1D: + input_shape = (batch_size, s3, c_in) + kernel_size = k3 + strides = strides[2] + dilations = dilations[2] + elif op == tf.keras.layers.Conv2D: + input_shape = (batch_size, s2, s3, c_in) + kernel_size = (k2, k3) + strides = (strides[1], strides[2]) + dilations = dilations[1:] + elif op == tf.keras.layers.Conv3D: + input_shape = (batch_size, s1, s2, s3, c_in) + kernel_size = (k1, k2, k3) + + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + groups=groups, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.LocallyConnected1D, + tf.keras.layers.LocallyConnected2D, + ], + ["same", "valid"], + ["channels_last"], + [ + (2, 4, 4, 2, 2, 2), + (3, 7, 5, 1, 3, 2) + ], + [ + (1, 1, 1), + (1, 2, 3), + (1, 3, 2) + ], + [ + (1, 1, 1), (2, 2, 2), + ], + [1, 3], + ), + ) + def test_conv_locally_connected( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + ): + s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks + c_in, c_out = 2, 3 + input_shape = None + kernel_size = None + if op in {tf.keras.layers.Conv1D, tf.keras.layers.LocallyConnected1D}: + input_shape = (batch_size, s3, c_in) + kernel_size = k3 + strides = strides[2] + dilations = dilations[2] + elif op in {tf.keras.layers.Conv2D, tf.keras.layers.LocallyConnected2D}: + input_shape = (batch_size, s2, s3, c_in) + kernel_size = (k2, k3) + strides = (strides[1], strides[2]) + dilations = dilations[1:] + elif op == tf.keras.layers.Conv3D: + input_shape = (batch_size, s1, s2, s3, c_in) + kernel_size = (k1, k2, k3) + + if op in { + tf.keras.layers.LocallyConnected1D, + tf.keras.layers.LocallyConnected2D, + }: + if padding != "valid": + return # tf.keras only supports "valid" + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + ) + ] + ) + else: + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [tf.keras.layers.DepthwiseConv2D], + ["same", "valid"], + ["channels_last"], + [(11, 12, 3, 2), (12, 11, 2, 3)], + [(1, 1), (2, 2)], + [(1, 1), (2, 2)], + [1, 3], + ), + ) + def test_depth_wise_conv( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + ): + s1, s2, k1, k2 = spatial_dim_and_ks + c_in = 2 + + if len(strides) != np.sum(strides) and len(dilations) != np.sum(dilations): + # TF produces incorrect output for non-one strides + dilations + return + + input_shape = (batch_size, s1, s2, c_in) + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + kernel_size=(k1, k2), + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + ] + ), + itertools.product( + compute_units, + backends, + ["same", "valid"], + ), + ) + def test_conv2d_padding_dynamic_input( + self, + compute_unit, + backend, + padding, + ): + if backend[0] == "mlprogram" and _macos_version() < (13, 0): + pytest.skip("Error in declaring network.") + + # Test same padding + input_layer = Input(batch_size=1, shape=(None, None, 1)) + layer = Conv2D( + filters=16, + kernel_size=(3, 3), + padding=padding, + activation="relu" + )(input_layer) + output_layer = GlobalMaxPooling2D()(layer) + model = Model(inputs=[input_layer], outputs=[output_layer]) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen((1, 80, 40, 1), rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [tf.keras.layers.SeparableConv1D, tf.keras.layers.SeparableConv2D], + ["same", "valid"], + ["channels_last"], + [ + (14, 14, 2, 2), + (11, 9, 3, 2), + (12, 11, 2, 3) + ], + [ + (1, 1), (2, 2), (3, 3) + ], + [(1, 1)], + [1, 3], + ), + ) + def test_separable_conv( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + strides, + dilations, + batch_size, + ): + s1, s2, k1, k2 = spatial_dim_and_ks + c_in, c_out = 2, 3 + input_shape = None + kernel_size = None + if op == tf.keras.layers.SeparableConv1D: + input_shape = (batch_size, s2, c_in) + kernel_size = k2 + strides = strides[1] + dilations = dilations[1] + elif op == tf.keras.layers.SeparableConv2D: + input_shape = (batch_size, s1, s2, c_in) + kernel_size = (k1, k2) + + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + +class TestConvTranspose(TensorFlowBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "op", + "padding", + "data_format", + "spatial_dim_and_ks", + "output_padding", + "strides", + "dilations", + "batch_size", + ] + ), + itertools.product( + compute_units, + backends, + [tf.keras.layers.Conv2DTranspose, tf.keras.layers.Conv3DTranspose], + ["same", "valid"], + ["channels_last"], + [(7, 11, 12, 1, 2, 2), (9, 5, 7, 3, 3, 3)], + [(1, 1, 1)], + [(2, 2, 2), (2, 3, 3)], + [(1, 1, 1)], # Dilation > 1 not supported by TF + [1, 3], + ), + ) + def test_conv_transpose( + self, + compute_unit, + backend, + op, + padding, + data_format, + spatial_dim_and_ks, + output_padding, + strides, + dilations, + batch_size, + ): + if ( + platform.machine() == "arm64" + and backend == ("mlprogram", "fp16") + and op == tf.keras.layers.Conv3DTranspose + and padding == "valid" + and spatial_dim_and_ks == (7, 11, 12, 1, 2, 2) + and strides == (2, 3, 3) + and batch_size == 3 + ): + pytest.xfail("rdar://98015195 ([M1 native tests] Some MIL unittests are failing M1 native)") + + s1, s2, s3, k1, k2, k3 = spatial_dim_and_ks + c_in, c_out = 2, 3 + input_shape = None + kernel_size = None + if op == tf.keras.layers.Conv2DTranspose: + input_shape = (batch_size, s2, s3, c_in) + kernel_size = (k2, k3) + strides = (strides[1], strides[2]) + dilations = dilations[1:] + output_padding = (output_padding[1], output_padding[2]) + elif op == tf.keras.layers.Conv3DTranspose: + input_shape = (batch_size, s1, s2, s3, c_in) + kernel_size = (k1, k2, k3) + + model = tf.keras.Sequential( + [ + op( + batch_input_shape=input_shape, + filters=c_out, + kernel_size=kernel_size, + strides=strides, + padding=padding.upper(), + output_padding=output_padding, + data_format=data_format, + dilation_rate=dilations, + ) + ] + ) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(input_shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + +class TestCropping(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, begin_end", + itertools.product( + compute_units, backends, [(0, 0), (1, 1), (1, 2), (2, 1), (2, 4), (3, 2)], + ), + ) + def test_cropping_1d(self, compute_unit, backend, begin_end): + shape = (1, 10, 3) + model = tf.keras.Sequential( + [tf.keras.layers.Cropping1D(batch_input_shape=shape, cropping=begin_end)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, begin_end1, begin_end2", + itertools.product( + compute_units, + backends, + [(0, 0), (1, 1), (2, 1)], + [(0, 0), (1, 2), (4, 2)], + ), + ) + def test_cropping_2d(self, compute_unit, backend, begin_end1, begin_end2): + shape = (1, 10, 10, 3) + model = tf.keras.Sequential( + [ + tf.keras.layers.Cropping2D( + batch_input_shape=shape, cropping=(begin_end1, begin_end2) + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, begin_end1, begin_end2, begin_end3", + itertools.product( + compute_units, + backends, + [(0, 0), (1, 2), (2, 1)], + [(1, 1), (1, 2), (4, 2)], + [(0, 0), (1, 1), (2, 4)], + ), + ) + def test_cropping_3d( + self, compute_unit, backend, begin_end1, begin_end2, begin_end3 + ): + shape = (1, 10, 10, 10, 3) + model = tf.keras.Sequential( + [ + tf.keras.layers.Cropping3D( + batch_input_shape=shape, + cropping=(begin_end1, begin_end2, begin_end3), + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestDense(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, units, activation, use_bias", + itertools.product( + compute_units, + backends, + [rank for rank in range(2, 6)], + [2, 4, 8], + [tf.nn.relu, tf.nn.softmax, tf.nn.swish], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, units, activation, use_bias): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.Dense( + batch_input_shape=shape, + units=units, + activation=activation, + use_bias=use_bias, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEmbedding(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dims, batch_size, input_length", + itertools.product( + compute_units, + backends, + [(4, 1), (8, 3), (16, 5), (32, 7), (64, 9)], + [1, 3, 5], + [2, 4, 10], + ), + ) + def test(self, compute_unit, backend, dims, batch_size, input_length): + # input shape: 2D tensor (batch_size, input_length) + # output shape: 3D tensor (batch_size, input_length, output_dim) + shape = (batch_size, input_length) + model = tf.keras.Sequential( + [ + tf.keras.layers.Embedding( + batch_input_shape=shape, + input_dim=dims[0], + output_dim=dims[1], + input_length=input_length, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=0, rand_max=dims[0])], + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + ) + + +class TestFlatten(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, data_format", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + ["channels_last", "channels_first"], + ), + ) + def test(self, compute_unit, backend, rank, data_format): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Flatten(batch_input_shape=shape, data_format=data_format,)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestLambda(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, function", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [ + lambda x: x + x, + lambda x: x * 3.14 - 1.0, + lambda x: np.sqrt(4) + x, + lambda x: tf.math.abs(x), + ], + ), + ) + def test_unary(self, compute_unit, backend, rank, function): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Lambda(batch_input_shape=shape, function=function,)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-5, rand_max=5)], + compute_unit=compute_unit, + backend=backend, + ) + +class TestBatchNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, momentum, epsilon, mixed_precision", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [0, -1], + [0.99, 0.85], + [1e-2, 1e-5], + [True, False], + ), + ) + def test_batch_normalization( + self, compute_unit, backend, rank, axis, momentum, epsilon, mixed_precision + ): + if backend[0] != "mlprogram" and mixed_precision: + pytest.skip("neuralnetwork backend doesn't support fp16 computation.") + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy('mixed_float16') + + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.BatchNormalization( + batch_input_shape=shape, + axis=axis, + momentum=momentum, + epsilon=epsilon, + ) + ] + ) + random_weights = np.random.rand(4, shape[axis]) + model.layers[0].set_weights(random_weights) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy(tf.keras.backend.floatx()) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis, momentum, epsilon, mixed_precision", + itertools.product( + compute_units, + backends, + [(4, 1), (4, -3)], + [0.99, 0.85], + [1e-2, 1e-5], + [True, False], + ), + ) + def test_fused_batch_norm_v3( + self, compute_unit, backend, rank_and_axis, momentum, epsilon, mixed_precision + ): + if backend[0] != "mlprogram" and mixed_precision: + pytest.skip("neuralnetwork backend doesn't support fp16 computation.") + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy('mixed_float16') + + rank, axis = rank_and_axis + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.BatchNormalization( + batch_input_shape=shape, + axis=axis, + momentum=momentum, + epsilon=epsilon, + ) + ] + ) + random_weights = np.random.rand(4, shape[axis]) + model.layers[0].set_weights(random_weights) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy(tf.keras.backend.floatx()) + + +class TestInstanceNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, epsilon, center, scale", + itertools.product( + compute_units, + backends, + [rank for rank in range(4, 5)], + [-1], + [1e-3, 1e-5], + [True, False], + [True, False], + ), + ) + def test_instance_normalization( + self, compute_unit, backend, rank, axis, epsilon, center, scale + ): + tensorflow_addons = pytest.importorskip("tensorflow_addons") + from tensorflow_addons.layers import InstanceNormalization + + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + InstanceNormalization( + batch_input_shape=shape, + axis=axis, + epsilon=epsilon, + center=center, + scale=scale, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) + + +class TestNormalization(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, axis, epsilon, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 4)], + [-1,], + [1e-2, 1e-10], + [True, False], + ), + ) + def test_layer_normalization(self, compute_unit, backend, rank, axis, epsilon, dynamic): + shape = np.random.randint(low=2, high=4, size=rank) + keras_shape = shape.tolist() + + if dynamic: + keras_shape[0] = None + + model = tf.keras.Sequential( + [ + tf.keras.layers.LayerNormalization( + batch_input_shape=keras_shape, axis=axis, epsilon=epsilon, trainable=False + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-100, rand_max=100)], + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + "compute_unit, backend, rank, groups, axis, epsilon, center, scale", + itertools.product( + compute_units, + backends, + [rank for rank in range(4, 5)], + [1, 2, 3], + [-1], + [1e-3, 1e-5], + [True, False], + [True, False], + ), + ) + def test_group_normalization( + self, compute_unit, backend, rank, groups, axis, epsilon, center, scale + ): + tensorflow_addons = pytest.importorskip("tensorflow_addons") + from tensorflow_addons.layers import GroupNormalization + + shape = np.random.randint(low=2, high=4, size=rank) + shape[-1] = shape[-1] * groups # groups must be a multiple of channels + model = tf.keras.Sequential( + [ + GroupNormalization( + batch_input_shape=shape, + groups=groups, + axis=axis, + epsilon=epsilon, + center=center, + scale=scale, + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + ) + + +class TestPadding(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, data_format, padding, mixed_precision", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.ZeroPadding1D, + tf.keras.layers.ZeroPadding2D, + tf.keras.layers.ZeroPadding3D, + ], + ["channels_first", "channels_last"], + [(1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 3, 4), (2, 3, 5)], + [True, False], + ), + ) + def test(self, compute_unit, backend, op, data_format, padding, mixed_precision): + if backend[0] != "mlprogram" and mixed_precision: + pytest.skip("neuralnetwork backend doesn't support fp16 computation.") + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy("mixed_float16") + + shape = None + kwargs = {} + if op == tf.keras.layers.ZeroPadding1D: + padding = padding[-1] + shape = np.random.randint(low=2, high=4, size=3) + elif op == tf.keras.layers.ZeroPadding2D: + padding = padding[1:] + kwargs = {"data_format": data_format} + shape = np.random.randint(low=2, high=4, size=4) + elif op == tf.keras.layers.ZeroPadding3D: + kwargs = {"data_format": data_format} + shape = np.random.randint(low=2, high=4, size=5) + model = tf.keras.Sequential( + [op(batch_input_shape=shape, padding=padding, **kwargs)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + if mixed_precision: + tf.keras.mixed_precision.set_global_policy(tf.keras.backend.floatx()) + + +class TestPermute(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_perm", + itertools.product( + compute_units, + backends, + [ + (rank, perm) + for rank in range(3, 6) + for perm in list(itertools.permutations(range(rank)[1:])) + ], + ), + ) + def test(self, compute_unit, backend, rank_and_perm): + rank, perm = rank_and_perm + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [tf.keras.layers.Permute(batch_input_shape=shape, dims=perm)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGlobalPooling(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, data_format", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.GlobalAveragePooling1D, + tf.keras.layers.GlobalAveragePooling2D, + tf.keras.layers.GlobalAveragePooling3D, + tf.keras.layers.GlobalMaxPool1D, + tf.keras.layers.GlobalMaxPool2D, + tf.keras.layers.GlobalMaxPool3D, + ], + ["channels_first", "channels_last"], + ), + ) + def test_global_pooling(self, compute_unit, backend, op, data_format): + shape = None + if op in { + tf.keras.layers.GlobalAveragePooling1D, + tf.keras.layers.GlobalMaxPool1D, + }: + shape = np.random.randint(low=2, high=4, size=3) + elif op in { + tf.keras.layers.GlobalAveragePooling2D, + tf.keras.layers.GlobalMaxPool2D, + }: + shape = np.random.randint(low=2, high=4, size=4) + elif op in { + tf.keras.layers.GlobalAveragePooling3D, + tf.keras.layers.GlobalMaxPool3D, + }: + shape = np.random.randint(low=2, high=4, size=5) + model = tf.keras.Sequential( + [op(batch_input_shape=shape, data_format=data_format)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPooling(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, data_format, pool_size", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.AveragePooling1D, + tf.keras.layers.AveragePooling2D, + tf.keras.layers.AveragePooling3D, + tf.keras.layers.MaxPool1D, + tf.keras.layers.MaxPool2D, + tf.keras.layers.MaxPool3D, + ], + ["channels_first", "channels_last"], + [(2, 2, 1), (2, 3, 2), (1, 2, 3)], + ), + ) + def test_pooling(self, compute_unit, backend, op, data_format, pool_size): + shape = None + if op in {tf.keras.layers.AveragePooling1D, tf.keras.layers.MaxPool1D}: + shape = np.random.randint(low=3, high=9, size=3) + pool_size = pool_size[2] + elif op in {tf.keras.layers.AveragePooling2D, tf.keras.layers.MaxPool2D}: + if data_format == "channels_first": + return # AvgPoolingOp only supports NHWC on CPU + shape = np.random.randint(low=3, high=9, size=4) + pool_size = pool_size[1:] + elif op in {tf.keras.layers.AveragePooling3D, tf.keras.layers.MaxPool3D}: + shape = np.random.randint(low=3, high=9, size=5) + model = tf.keras.Sequential( + [op(batch_input_shape=shape, pool_size=pool_size, data_format=data_format)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRecurrent(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, units, activation, " + "recurrent_activation, use_bias, return_sequences", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 4)], + [1, 3], + [None, tf.nn.tanh], + [None, tf.nn.relu], + [True, False], + [True, False], + ), + ) + def test_lstm( + self, + compute_unit, + backend, + rank, + units, + activation, + recurrent_activation, + use_bias, + return_sequences, + ): + shape = np.random.randint(low=1, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.LSTM( + batch_input_shape=shape, + units=units, + activation=activation, + recurrent_activation=recurrent_activation, + use_bias=use_bias, + return_sequences=return_sequences, + ), + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstmcell(self, compute_unit, backend): + shape = np.random.randint(low=1, high=4, size=3) + model = tf.keras.Sequential( + [ + tf.keras.layers.RNN( + batch_input_shape=shape, cell=tf.keras.layers.LSTMCell(units=3) + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstm_time_distributed_dense(self, compute_unit, backend): + shape = list(np.random.randint(low=1, high=4, size=3)) + k_in = tf.keras.layers.Input(batch_size=shape[0], shape=shape[1:]) + lstm = tf.keras.layers.LSTM(units=32, return_sequences=True)(k_in) + k_out = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1))(lstm) + model = tf.keras.Model(inputs=k_in, outputs=k_out) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-1, rand_max=1)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstm_dynamic_batch(self, compute_unit, backend): + input_shape = (1, 1280) + inp = tf.keras.layers.Input(shape=input_shape) + out, hn, cn = tf.keras.layers.LSTM(512, + return_sequences=True, + return_state=True, + recurrent_activation='sigmoid')(inp) + model = tf.keras.models.Model(inputs=[inp], outputs=[out, hn, cn]) + batch_size = 2 + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen((batch_size, 1, 1280), -1, 1),], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_lstm_conversion_static_shapes(self, compute_unit, backend): + ''' + Test that intermediate tensor shapes are populated correctly by the converter. + That is, there are no symbolic dimensions in the shapes, when conversion is + performed with a fixed input shape, irrespective of the shape used in the source model definition. + ''' + def _get_keras_simple_lstm_model(input_shape): + input = tf.keras.Input(batch_input_shape=input_shape) + output = tf.keras.layers.LSTM(5)(input) + keras_model = tf.keras.Model(inputs=input, outputs=output) + return keras_model + + def _test_for_symbolic_shapes(keras_input_shape, input_shape_for_conversion, are_symbols_expected): + keras_model = _get_keras_simple_lstm_model(keras_input_shape) + res = TensorFlowBaseTest.run_compare_tf_keras( + keras_model, + [random_gen((1, 32, 10), -1, 1)], + inputs_for_conversion=[ct.TensorType(shape=input_shape_for_conversion)], + compute_unit=compute_unit, + backend=backend, + ) + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + assert is_symbolic_dim_in_prog(mil_prog) == are_symbols_expected + + _test_for_symbolic_shapes(keras_input_shape=(1, 32, 10), + input_shape_for_conversion=(1, 32, 10), + are_symbols_expected=False) + + _test_for_symbolic_shapes(keras_input_shape=(None, 32, 10), + input_shape_for_conversion=(1, 32, 10), + are_symbols_expected=False) + + _test_for_symbolic_shapes(keras_input_shape=(None, None, 10), + input_shape_for_conversion=(1, 32, 10), + are_symbols_expected=False) + + _test_for_symbolic_shapes(keras_input_shape=(None, 32, 10), + input_shape_for_conversion=(ct.RangeDim(1, 10), 32, 10), + are_symbols_expected=True) + + if backend[0] != "mlprogram": + # FIX ME: model load fails if backend is "mlprogram". rdar://84862138 + _test_for_symbolic_shapes(keras_input_shape=(None, None, 10), + input_shape_for_conversion=(ct.RangeDim(1, 10), ct.RangeDim(16, 64), 10), + are_symbols_expected=True) + + @pytest.mark.parametrize( + "compute_unit, tf_raw_lstm_op, is_flexible_input, batch_size, backend", + itertools.product( + compute_units, + [ + tf.raw_ops.BlockLSTMV2, + tf.raw_ops.BlockLSTM, + ], + [False, True], + [1, 2], + backends, + ), + ) + def test_lstm_block_fused_op( + self, compute_unit, tf_raw_lstm_op, is_flexible_input, batch_size, backend + ): + """ + Define a model with custom LSTM ops that uses tf.raw_ops.BlockLSTM / tf.raw_ops.BlockLSTMV2 + and verify that it converts to a fused lstm op. + + %x (shape: (Seq, Batch, idim) == (seq_len, batch, 4)) + %x1 = LSTM(h=10) (%input) # shape = (seq_len, batch, 10) + %x2 = LSTM(h=20) (%x1) # shape = (seq_len, batch, 20) + %x3 = slice()(%x2) # shape = (1, batch, 20), to get the final seq value + %x4 = reshape((1, -1)) (%x3) # shape = (1, batch * 20) + %x5 = Dense(h=3)(%x4) # shape = (1, 3) + """ + + class CustomLSTM(tf.keras.layers.Layer): + def __init__(self, num_units, max_seq_length, batch_size): + super(CustomLSTM, self).__init__() + self.hidden_dim = num_units + self.seq_length = max_seq_length + self.batch_size = batch_size + + def build(self, input_shape): + input_dim = input_shape[-1] + self.w = self.add_weight( + shape=(input_dim + self.hidden_dim, 4 * self.hidden_dim), + initializer="random_normal", + trainable=True, + ) + self.b = self.add_weight(shape=(4 * self.hidden_dim,), initializer="random_normal", trainable=True) + self.init_h = tf.constant(np.zeros((self.batch_size, self.hidden_dim)).astype(np.float32)) + self.init_c = tf.constant(np.zeros((self.batch_size, self.hidden_dim)).astype(np.float32)) + + def call(self, inputs): + _, output_state, _, _, _, _, output = tf_raw_lstm_op( + seq_len_max=self.seq_length, + x=inputs, + cs_prev=self.init_c, + h_prev=self.init_h, + w=self.w, + wci=tf.constant(np.zeros((self.hidden_dim)).astype(np.float32)), + wcf=tf.constant(np.zeros((self.hidden_dim)).astype(np.float32)), + wco=tf.constant(np.zeros((self.hidden_dim)).astype(np.float32)), + b=self.b, + ) + return output, output_state + + input_dim = 4 + seq_length = 5 + batch_size = batch_size + x_shape = (seq_length, batch_size, input_dim) + hidden_dim_1 = 10 + hidden_dim_2 = 20 + + x = tf.keras.Input(batch_input_shape=x_shape) # (seq_len, batch, 4) + x1, output_states_1 = CustomLSTM(num_units=hidden_dim_1, max_seq_length=seq_length, batch_size=batch_size)(x) # (seq_len, batch, 10), (seq_len, batch, 10) + x2, output_states_2 = CustomLSTM(num_units=hidden_dim_2, max_seq_length=seq_length, batch_size=batch_size)(x1) # (seq_len, batch, 20), (seq_len, batch 10) + x3 = tf.slice(x2, begin=[4, 0, 0], size=[1, batch_size, 20]) # (1, batch, 20) + x4 = tf.reshape(x3, shape=(1, -1)) # (1, batch * 20) + x5 = tf.keras.layers.Dense(3)(x4) # (1, 3) + + # Test that we can fuse the lstm op if we have an output that only extract the information from the last cell state + x6 = tf.keras.layers.ReLU()(output_states_1[4, :, :]) + x7 = output_states_2[4:5, :, :] + x8 = output_states_1[-1, :, :] + x9 = tf.keras.layers.ReLU()(output_states_2[-1:, :, :]) + outputs = [x5, x8, x9] if is_flexible_input else [x5, x6, x7, x8, x9] + + keras_model = tf.keras.Model(inputs=x, outputs=outputs) + + inputs = None + if is_flexible_input: + inputs = [ + ct.TensorType( + shape=(ct.RangeDim(seq_length, 20), batch_size, input_dim) + ) + ] + + res = TensorFlowBaseTest.run_compare_tf_keras( + keras_model, + [random_gen(x_shape, -1, 1)], + compute_unit=compute_unit, + backend=backend, + inputs_for_conversion=inputs, + ) + coreml_model = res[1] + mil_prog = coreml_model._get_mil_internal() + # assert that "lstm" ops are present in the mil program + assert len(mil_prog.find_ops(op_type="lstm")) == 2 + + +class TestRepeatVector(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, n", + itertools.product( + compute_units, + backends, + [2, 3, 5, 7], + ), + ) + def test(self, compute_unit, backend, n): + # input shape 2D tensor (batch size, features) + # output shape 3D tensor (batch size, n, features) + shape = np.random.randint(low=1, high=4, size=2) + model = tf.keras.Sequential( + [tf.keras.layers.RepeatVector(batch_input_shape=shape, n=n)] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReshape(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, infer_shape", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + ), + ) + def test(self, compute_unit, backend, rank, infer_shape): + shape = np.random.randint(low=2, high=4, size=rank) + # target shape does not include the batch dimension + target_shape = random.sample(list(shape[1:]), len(shape[1:])) + if len(target_shape) > 0 and infer_shape: + target_shape[-1] = -1 + model = tf.keras.Sequential( + [ + tf.keras.layers.Reshape( + batch_input_shape=shape, target_shape=target_shape + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSkips(TensorFlowBaseTest): + # ops in this class should be ignored / pass-through during conversion + + @pytest.mark.parametrize( + "compute_unit, backend, skip_op", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.Dropout, + tf.keras.layers.AlphaDropout, + tf.keras.layers.GaussianDropout, + tf.keras.layers.SpatialDropout1D, + tf.keras.layers.SpatialDropout2D, + tf.keras.layers.SpatialDropout3D, + ], + ), + ) + def test_skip_dropout(self, compute_unit, backend, skip_op): + shape = np.random.randint(low=1, high=4, size=5) + if skip_op == tf.keras.layers.SpatialDropout1D: + shape = shape[:3] + elif skip_op == tf.keras.layers.SpatialDropout2D: + shape = shape[:4] + model = tf.keras.Sequential([skip_op(batch_input_shape=shape, rate=0.5)]) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_skip_noise(self, compute_unit, backend): + shape = np.random.randint(low=1, high=4, size=5) + model = tf.keras.Sequential( + [ + # GaussianNoise should do nothing in inference mode + tf.keras.layers.GaussianNoise(batch_input_shape=shape, stddev=0.5) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, l1, l2", + itertools.product( + compute_units, + backends, + [rank for rank in range(5, 6)], + [0.0, 0.5, 1.0], + [0.0, 0.5, 1.0], + ), + ) + def test_skip_regularization(self, compute_unit, backend, rank, l1, l2): + shape = np.random.randint(low=2, high=4, size=rank) + model = tf.keras.Sequential( + [ + tf.keras.layers.ActivityRegularization( + batch_input_shape=shape, l1=l1, l2=l2 + ) + ] + ) + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + ) + + +class TestUpSampling(TensorFlowBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, op, upsample_factor, data_format, interpolation, dynamic", + itertools.product( + compute_units, + backends, + [ + tf.keras.layers.UpSampling1D, + tf.keras.layers.UpSampling2D, + tf.keras.layers.UpSampling3D, + ], + [(2, 2, 1), (4, 3, 2), (1, 2, 3)], + ["channels_first", "channels_last"], + ["nearest", "bilinear"], + [True, False], + ), + ) + def test( + self, compute_unit, backend, op, upsample_factor, data_format, interpolation, dynamic + ): + kwargs = {} + shape = None + keras_shape = None + + if op == tf.keras.layers.UpSampling1D: + shape = np.random.randint(low=2, high=4, size=3) + keras_shape = np.copy(shape).tolist() + if dynamic: + keras_shape[1] = None + upsample_factor = upsample_factor[2] + elif op == tf.keras.layers.UpSampling2D: + kwargs = {"data_format": data_format, "interpolation": interpolation} + shape = np.random.randint(low=2, high=4, size=4) + keras_shape = np.copy(shape).tolist() + if dynamic: + keras_shape[1] = keras_shape[2] = None + upsample_factor = (upsample_factor[1], upsample_factor[2]) + elif op == tf.keras.layers.UpSampling3D: + kwargs = {"data_format": data_format} + shape = np.random.randint(low=2, high=4, size=5) + keras_shape = np.copy(shape).tolist() + # not support upsampling3D with dynamic input shape, since 6D tensors are produced in that case + if dynamic: + return + + model = tf.keras.Sequential( + [op(batch_input_shape=keras_shape, size=upsample_factor, **kwargs)] + ) + spec = TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, rand_min=-10, rand_max=10)], + compute_unit=compute_unit, + backend=backend, + )[0] + # also check if the scale factor are integers + if backend[0] == 'neuralnetwork': + for layer in spec.neuralNetwork.layers: + if layer.WhichOneof('layer') == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + +class TestGelu(TensorFlowBaseTest): + @pytest.mark.skipif( + _get_version(_tf.__version__) < _StrictVersion("2.4.0"), + reason="Gelu is a new layer for tf 2.4.0 and above." + ) + @pytest.mark.parametrize( + "compute_unit, backend, rank, approximate", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [True, False], + ), + ) + def test( + self, compute_unit, backend, rank, approximate + ): + shape = np.random.randint(low=2, high=4, size=rank) + input = tf.keras.layers.Input(batch_input_shape=tuple(shape)) + out = tf.keras.activations.gelu(input, approximate=approximate) + model = tf.keras.Model(inputs=[input], outputs=out) + + TensorFlowBaseTest.run_compare_tf_keras( + model, + [random_gen(shape, -10, 10)], + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py new file mode 100644 index 00000000..b80e5df1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/test/testing_utils.py @@ -0,0 +1,290 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +import numpy as np +import pytest + +tf = pytest.importorskip("tensorflow", minversion="2.1.0") +from tensorflow.python.framework import dtypes + +import coremltools as ct +import coremltools.models.utils as coremltoolsutils +from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import ( + TensorFlowBaseTest, get_tf_node_names) +from coremltools.converters.mil.input_types import RangeDim, TensorType +from coremltools.converters.mil.testing_utils import (compare_backend, + ct_convert) +from coremltools.models.utils import _macos_version + + +def make_tf2_graph(input_types): + """ + Decorator to help construct TensorFlow 2.x model. + + Parameters + ---------- + input_types: list of tuple or list of list + List of input types. E.g. [(3, 224, 224, tf.int32)] represent 1 input, + with shape (3, 224, 224), and the expected data type is tf.int32. The + dtype is optional, in case it's missing, tf.float32 will be used. + + Returns + ------- + list of ConcreteFunction, list of str, list of str + """ + + def wrapper(ops): + input_signature = [] + for input_type in input_types: + if input_type is not None and len(input_type) > 0 and isinstance(input_type[-1], dtypes.DType): + shape, dtype = input_type[:-1], input_type[-1] + else: + shape, dtype = input_type, tf.float32 + input_signature.append(tf.TensorSpec(shape=shape, dtype=dtype)) + + @tf.function(input_signature=input_signature) + def tf2_model(*args): + return ops(*args) + + concrete_func = tf2_model.get_concrete_function() + inputs = get_tf_node_names( + [t.name for t in concrete_func.inputs if t.dtype != dtypes.resource], + mode="input", + ) + outputs = get_tf_node_names( + [t.name for t in concrete_func.outputs], mode="output" + ) + return [concrete_func], inputs, outputs + + return wrapper + + +def run_compare_tf2( + model, + input_dict, + output_names, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + debug=False, + atol=1e-04, + rtol=1e-05, + minimum_deployment_target=None, +): + """ + Parameters + ---------- + model: list of tf.ConcreteFunction + List of TensorFlow 2.x concrete functions. + input_dict: dict of (str, np.array) + Dict of name and value pairs representing inputs. + output_names: list of str + List of output node names. + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + compute_unit: Enum[ct.ComputeUnit] + Compute unit for the coreml model + frontend_only: bool + If True, skip the prediction call, only validate conversion. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + debug: bool + If True, print verbose information and plot intermediate graphs. + atol: float + The absolute tolerance parameter. + rtol: float + The relative tolerance parameter. + minimum_deployment_target: coremltools.target enumeration + The spec version for the mlmodel + """ + inputs = [] + if inputs_for_conversion is None: + cf_inputs = [t for t in model[0].inputs if t.dtype != dtypes.resource] + for t in cf_inputs: + name = get_tf_node_names(t.name)[0] + shape = [RangeDim() if s is None or s == -1 else s \ + for s in list(t.get_shape())] + inputs.append(TensorType(name=name, shape=shape, + dtype=t.dtype.as_numpy_dtype)) + else: + inputs = inputs_for_conversion + + outputs = [] + for t in output_names: + name = get_tf_node_names(t)[0] + outputs.append(name) + + # get TensorFlow 2.x output as reference and run comparison + tf_input_values = [tf.constant(t) for t in input_dict.values()] + tf_outputs = model[0](*tf_input_values) + if isinstance(tf_outputs, (tuple, list)): + ref = [t.numpy() for t in tf_outputs] + else: + ref = [tf_outputs.numpy()] + expected_outputs = {n: v for n, v in zip(outputs, ref)} + + mlmodel = ct_convert( + model, + source=frontend, + inputs=inputs, + outputs=outputs, + convert_to=backend, + debug=debug, + compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + for k,v in input_dict.items(): + if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer): + input_dict[k] = v.astype(float) # Core ML only accepts floats + + if frontend_only or _macos_version() < (10, 13) \ + or (mlmodel.is_package and _macos_version() < (12, 0)): + return mlmodel._spec, mlmodel, input_dict, None + + pred = None + if not coremltoolsutils._has_custom_layer(mlmodel._spec): + pred = compare_backend( + mlmodel, + input_dict, + expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=True, + dtype=backend[1], + ) + else: + print('Skipping model prediction as it has a custom nn layer!') + return mlmodel._spec, mlmodel, input_dict, pred + + +def run_compare_tf_keras( + model, + input_values, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05, +): + """ + Parameters + ---------- + model: TensorFlow 2.x model + TensorFlow 2.x model annotated with @tf.function. + input_values: list of np.array + List of input values in the same order as the input signature. + inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects + Defaults to None. It is passed as is to the "inputs" argument of the converter. + compute_unit: Enum[ct.ComputeUnit] + Compute unit for the coreml model + frontend_only: bool + If True, skip the prediction call, only validate conversion. + frontend: str + Frontend to convert from. + backend: str + Backend to convert to. + atol: float + The absolute tolerance parameter. + rtol: float + The relative tolerance parameter. + """ + mlmodel = ct_convert(model, inputs=inputs_for_conversion, source=frontend, convert_to=backend, + compute_units=compute_unit) + + # assumes conversion preserve the i/o names + proto = mlmodel._spec + inputs = [i.name.split(":")[0].strip() for i in model.inputs] + outputs = [str(o.name) for o in proto.description.output] + + # get tf.keras model output as reference and run comparison + keras_outputs = model(input_values) + if not isinstance(keras_outputs, list): + keras_outputs = [keras_outputs] + ref = [output.numpy() for output in keras_outputs] + expected_outputs = {n: v for n, v in zip(outputs, ref)} + input_key_values = {n: v for n, v in zip(inputs, input_values)} + + if frontend_only or _macos_version() < (10, 13) \ + or (mlmodel.is_package and _macos_version() < (12, 0)): + return proto, mlmodel, input_key_values, None + + pred = None + if not coremltoolsutils._has_custom_layer(proto): + pred = compare_backend( + mlmodel, + input_key_values, + expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=True, + dtype=backend[1] + ) + else: + print('Skipping model prediction as it has a custom nn layer!') + return proto, mlmodel, input_key_values, pred + + +class TensorFlow2BaseTest(TensorFlowBaseTest): + + @staticmethod + def run_compare_tf2(model, + input_dict, + output_names, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + debug=False, + atol=1e-04, + rtol=1e-05, + minimum_deployment_target=None,): + res = run_compare_tf2(model, + input_dict, + output_names, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + frontend_only=frontend_only, + frontend=frontend, + backend=backend, + debug=debug, + atol=atol, + rtol=rtol, + minimum_deployment_target=minimum_deployment_target,) + alist = list(res) + alist.append(TensorFlow2BaseTest.testclassname) + alist.append(TensorFlow2BaseTest.testmodelname) + return tuple(alist) + + @staticmethod + def run_compare_tf_keras( + model, + input_values, + inputs_for_conversion=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + frontend="tensorflow", + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05 + ): + res = run_compare_tf_keras(model, input_values, + inputs_for_conversion=inputs_for_conversion, + compute_unit=compute_unit, + frontend_only=frontend_only, + frontend=frontend, + backend=backend, atol=atol, rtol=rtol) + alist = list(res) + alist.append(TensorFlow2BaseTest.testclassname) + alist.append(TensorFlow2BaseTest.testmodelname) + return tuple(alist) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py new file mode 100644 index 00000000..5f18ff00 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .rewrite_control_flow_functions import (flatten_sub_graph_namespaces, + rewrite_control_flow_functions) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py new file mode 100644 index 00000000..68468055 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/tensorflow2/tf_graph_pass/rewrite_control_flow_functions.py @@ -0,0 +1,561 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools import _logger as logger +from coremltools.converters.mil.frontend.tensorflow.basic_graph_ops import ( + connect_edge, connect_edge_at_index, delete_node, disconnect_edge, + replace_dest, replace_node) +from coremltools.converters.mil.frontend.tensorflow.parsed_tf_node import \ + ParsedTFNode + + +def _rename_node_in_fn(node, new_name, fn): + """ + Rename a node and all it's connections. + + Parameters + ---------- + node: ParsedTFNode + Node to rename. + new_name: str + New name of the node. + fn: SSAFunction + Function that contains graph to operate on. + """ + old_name = node.name + node.name = new_name + for i in node.inputs: + idx = fn.graph[i].outputs.index(old_name) + fn.graph[i].outputs[idx] = new_name + if old_name in fn.graph[i].control_outputs: + idx = fn.graph[i].control_outputs.index(old_name) + fn.graph[i].control_outputs[idx] = new_name + + for o in node.outputs: + idx = fn.graph[o].inputs.index(old_name) + fn.graph[o].inputs[idx] = new_name + if old_name in fn.graph[o].control_inputs: + idx = fn.graph[o].control_inputs.index(old_name) + fn.graph[o].control_inputs[idx] = new_name + + for i in node.control_inputs: + if old_name in fn.graph[i].control_outputs: + idx = fn.graph[i].control_outputs.index(old_name) + fn.graph[i].control_outputs[idx] = new_name + + for o in node.control_outputs: + if old_name in fn.graph[o].control_inputs: + idx = fn.graph[o].control_inputs.index(old_name) + fn.graph[o].control_inputs[idx] = new_name + + fn.graph[new_name] = fn.graph.pop(old_name) + + +def _flatten_sub_graph_namespaces(tf_ssa, fn_name): + """ + A pass to flatten namespaces for sub-graphs of the control flow while_loop + op. For example, the while_loop's has two sub-graphs, "cond" and "body", + all the nodes in the graph will be prefixing the sub-graph's name. This + pass is required for converting control flow v2 ops (enabled by default in + TensorFlow 2.0+) as the original sub-graphs will contain duplicated names. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn_name: str + Name of the function / sub-graph to operate on. + """ + count = 0 + fn = tf_ssa.functions.get(fn_name) + for name, node in fn.graph.copy().items(): + if node.op not in {"StatelessWhile", "While", "StatelessIf", "If"}: + continue + + if node.op in {"StatelessWhile", "While"}: + sub_fn_names = [node.attr.get("cond"), node.attr.get("body")] + else: + sub_fn_names = [node.attr.get("then_branch"), node.attr.get("else_branch")] + + for sf_name in sub_fn_names: + sf = tf_ssa.functions.get(sf_name) + prefix = "{}/{}".format(node.name, sf_name) + + for old_name, n in sf.graph.copy().items(): + _rename_node_in_fn(n, "{}/{}".format(prefix, old_name), sf) + count += 1 + + ios = set(sf.inputs + sf.outputs) + io_name_mappings = {n: "{}/{}".format(prefix, n) for n in ios} + sf.inputs = [io_name_mappings[n] for n in sf.inputs] + sf.outputs = [io_name_mappings[n] for n in sf.outputs] + _flatten_sub_graph_namespaces(tf_ssa, sf_name) + + msg = "flatten_sub_graph_namespaces: {} nodes renamed in '{}'" + logger.info(msg.format(count, sf_name)) + + +def _insert_op(fn, op, name, attr=None): + """ + Create a node with given attributes, then insert to the target graph in + given function. + + Parameters + ---------- + fn: SSAFunction + Function that contains graph to operate on. + op: str + Type of the operation for the new node. + name: str + Name of the new node. + attr: dict or None (optional) + Attributes of the new node. + + Returns + ------- + node: ParsedTFNode + New node object. + """ + node = ParsedTFNode() + node.op = op + node.name = name + if attr is not None: + node.attr = attr + fn.graph[node.name] = node + return node + + +def _insert_function_entry(fn): + return _insert_op(fn=fn, op="function_entry", name="entry") + + +def _insert_return(fn): + return _insert_op(fn=fn, op="return", name="return") + + +def _insert_make_tuple(fn, name=None): + name = "make_tuple" if name is None else name + return _insert_op(fn=fn, op="make_tuple", name=name) + + +def _insert_get_tuple(fn, name, idx): + return _insert_op(fn=fn, op="get_tuple", name=name, attr={"index": idx}) + + +def _rewrite_cond_functions(tf_ssa, fn): + r""" + Rewrite tf.cond's sub-graphs with get_tuple, make_tuple, function_entry and + return ops. This rewrite is required in order to convert functional form + control flow v2 nodes 'StatelessIf' and 'If'. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn: SSAFunction + Function that contains graph to operate on. + + Examples + -------- + + Input: + + Before pass "main" graph: + + [const/greater/y] ---------\ + [placeholder/args_0] -> [greater] -> [if] -> [identity] + \------------------/ \--> [identity] + [placeholder/args_1] ----------------/ + + Before pass "then" graph: + + [const/sub/y] ---------------\ + [placeholder/sub_args_0] -> [sub] + [placeholder/sub_args_1] -> [identity] + + Before pass "else" graph: + + [const/add/y] ---------------\ + [placeholder/add_args_0] -> [add] + + [const/mul/y] ---------------\ + [placeholder/add_args_1] -> [mul] + + Output: + + After pass "main" graph: + + [const/greater/y] ---------\ + [placeholder/args_0] -> [greater] -> [make_tuple] -> [if] -> [get_tuple] -> [identity] + \---------------------/ \--> [get_tuple] -> [identity] + [placeholder/args_1] -------------------/ + + After pass "then" graph: + + [const/sub/y] ---------------\ + [entry] -> [get_tuple] -> [placeholder/sub_args_0] -> [sub] -> [make_tuple] -> [return] + -> [get_tuple] -> [placeholder/sub_args_1] -----------------/ + + After pass "else" graph: + + [const/add/y] ---------------\ + [entry] -> [get_tuple] -> [placeholder/add_args_0] -> [add] -> [make_tuple] -> [return] + -> [get_tuple] -> [placeholder/add_args_1] -> [mul] --------/ + [const/mul/y] ---------------/ + + """ + for cond_name, cond_node in fn.graph.copy().items(): + if cond_node.op not in {"StatelessIf", "If"}: + continue + + then_fn_name = cond_node.attr.get("then_branch") + else_fn_name = cond_node.attr.get("else_branch") + + msg = "Rewriting '{}' ({}) sub-graphs: then '{}', else '{}'" + logger.info( + msg.format(cond_node.name, cond_node.op, then_fn_name, else_fn_name) + ) + + then_fn = tf_ssa.functions.get(then_fn_name) + else_fn = tf_ssa.functions.get(else_fn_name) + + # insert function entry nodes + then_entry = _insert_function_entry(then_fn) + else_entry = _insert_function_entry(else_fn) + + # pack node inputs to a single tuple + cond_input = _insert_make_tuple(fn, "make_tuple/{}".format(cond_name)) + for ci in cond_node.inputs: + disconnect_edge(fn.graph, ci, cond_node.name) + connect_edge(fn.graph, ci, cond_input) + connect_edge(fn.graph, cond_input, cond_node.name) + + # unpack node outputs to multiple get_tuples + for i, co in enumerate(cond_node.outputs): + # utilize FunctionDef's ret to make sure function outputs and + # node outputs order matches when multiple outputs are there. + # Fallback to use original cond_node.outputs order if fails. + o_original = fn.graph[co].original_node + if o_original: + c_input = [n for n in o_original.input if str(n).startswith(cond_name)][ + 0 + ] + if ":" in c_input: + identity_postfix = "identity_{}".format(c_input.split(":")[-1]) + else: # access identity "0" + identity_postfix = "identity" + + identity_keys = [t for t in then_fn.ret.keys() if t.endswith(identity_postfix)] + if len(identity_keys) != 1: + raise NotImplementedError("Branch not found.") + + mapped_name = then_fn.ret[identity_keys[0]].split(":")[0] + + if mapped_name in then_fn.outputs: + idx = then_fn.outputs.index(mapped_name) + else: # in else_fn.outputs + idx = else_fn.outputs.index(mapped_name) + else: + idx = i + + cond_output = _insert_get_tuple( + fn, "get_tuple/{}/{}".format(idx, cond_name), idx + ) + edge_idx = fn.graph[co].inputs.index(cond_node.name) + replace_dest(fn.graph, cond_node, co, cond_output) + connect_edge_at_index(fn.graph, cond_output, co, edge_idx) + + # fetch inputs using get_tuple for then branch + for i, ti in enumerate(then_fn.inputs): + then_input = _insert_get_tuple( + then_fn, "get_tuple/{}/{}".format(i, ti), i + 1 + ) + connect_edge(then_fn.graph, then_entry, then_input) + replace_node(then_fn.graph, ti, then_input) + delete_node(then_fn.graph, ti) + + # fetch inputs using get_tuple for else branch + for i, ei in enumerate(else_fn.inputs): + else_input = _insert_get_tuple( + else_fn, "get_tuple/{}/{}".format(i, ei), i + 1 + ) + connect_edge(else_fn.graph, else_entry, else_input) + replace_node(else_fn.graph, ei, else_input) + delete_node(else_fn.graph, ei) + + # returns a tuple of value(s) as output for then branch + then_output = _insert_make_tuple(then_fn) + for to in then_fn.outputs: + if to not in then_fn.graph.keys(): + # from identity, map back to get_tuple node + to = "get_tuple/{}/{}".format(then_fn.inputs.index(to), to) + connect_edge(then_fn.graph, to, then_output.name) + + then_return = _insert_return(then_fn) + connect_edge(then_fn.graph, then_output.name, then_return.name) + + # returns a tuple of value(s) as output for else branch + else_output = _insert_make_tuple(else_fn) + for eo in else_fn.outputs: + if eo not in else_fn.graph.keys(): + # from identity, map back to get_tuple node + eo = "get_tuple/{}/{}".format(else_fn.inputs.index(eo), eo) + connect_edge(else_fn.graph, eo, else_output.name) + + else_return = _insert_return(else_fn) + connect_edge(else_fn.graph, else_output.name, else_return.name) + + +def _eliminate_loop_cond_nodes(tf_ssa, fn): + """ + Eliminate loop condition nodes, such as loop_counters, max_iterations from + the cond sub-graph and body sub-graph of tf.while_loop. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn: SSAFunction + Function that contains graph to operate on. + + Examples + -------- + + Input: + + Before pass "main" graph: + + [while/maximum_iterations] -----\ + [while/loop_counter] -------> [while] --> [identity] + [placeholder/args_0] ----------/ + + Before pass "cond" graph: + + [const/mean] -------\ + [placeholder] --> [mean] --> [greater] + [const/greater/y] --------------/ + + [while_maximum_iterations], [while_loop_counter] (not connected) + + Before pass "body" graph: + + [const/sub/y] ------\ + [placeholder] ---> [sub] + + [const/add/y] ------------\ + [while_loop_counter] --> [add] + + [while_maximum_iterations] (not connected) + + Output: + + After pass "main" graph: + + [placeholder/args_0] --> [while] --> [identity] + + After pass "cond" graph: + + [const/mean] -------\ + [placeholder] --> [mean] --> [greater] + [const/greater/y] --------------/ + + After pass "body" graph: + + [const/sub/y] ------\ + [placeholder] ---> [sub] + """ + for name, node in fn.graph.copy().items(): + if node.op not in {"StatelessWhile", "While"}: + continue + + cond_fn = tf_ssa.functions.get(node.attr.get("cond")) + body_fn = tf_ssa.functions.get(node.attr.get("body")) + + cond_lc_nodes = {cond_fn.inputs.pop(0), cond_fn.inputs.pop(0)} + logger.info("Removing {} from cond graph".format(cond_lc_nodes)) + for n in cond_lc_nodes: + delete_node(cond_fn.graph, n) + + body_lc_nodes = {body_fn.inputs.pop(0), body_fn.inputs.pop(0)} + q = list(body_lc_nodes) + + # delete entire sub-fn + while len(q) > 0: + n = body_fn.graph[q.pop(0)] + for o in n.outputs: + if o not in body_lc_nodes: + q.append(o) + body_lc_nodes.add(o) + for i in body_fn.graph[o].inputs: + if i not in body_lc_nodes: + q.append(i) + body_lc_nodes.add(i) + + # remove if in outputs + for n in body_lc_nodes: + if n in body_fn.outputs: + msg = "Removing '{}' ({}) from body fn outputs" + logger.info(msg.format(n, body_fn.graph[n].op)) + body_fn.outputs.remove(n) + + logger.info("Removing {} from body graph".format(body_lc_nodes)) + for n in body_lc_nodes: + delete_node(body_fn.graph, n) + + +def _rewrite_while_loop_functions(tf_ssa, fn): + """ + Rewrite tf.while_loop's sub-graphs with get_tuple, make_tuple, + function_entry and return ops. This rewrite is required in order to convert + functional form control flow v2 nodes 'StatelessWhile' and 'While'. + + Parameters + ---------- + tf_ssa: NetworkEnsemble + An object that contains multiple functions / sub-graphs. + fn: SSAFunction + Function that contains graph to operate on. + + Example + ------- + + Input: + + Before pass "main" graph: + + [placeholder/args_0] --> [while] --> [identity] + + Before pass "cond" graph: + + [const/mean] -------\ + [placeholder] --> [mean] --> [greater] + [const/greater/y] --------------/ + + Before pass "body" graph: + + [const/sub/y] ------\ + [placeholder] ---> [sub] + + Output: + + After pass "main" graph: + + [placeholder/args_0] --> [make_tuple] --> [while] --> [get_tuple] --> [identity] + + After pass "cond" graph: + + [const/mean] ------\ + [entry] -> [get_tuple] -> [placeholder] -> [mean] -> [greater] -> [make_tuple] -> [return] + [const/greater/y] ------------/ + + After pass "body" graph: + + [const/sub/y] ----\ + [entry] -> [get_tuple] -> [placeholder] -> [sub] -> [make_tuple] -> [return] + """ + for while_name, while_node in fn.graph.copy().items(): + if while_node.op not in {"StatelessWhile", "While"}: + continue + + cond_fn_name = while_node.attr.get("cond") + body_fn_name = while_node.attr.get("body") + + msg = "Rewriting '{}' ({}) sub-graphs: cond '{}', body '{}'" + logger.info( + msg.format(while_node.name, while_node.op, cond_fn_name, body_fn_name) + ) + + cond_fn = tf_ssa.functions.get(cond_fn_name) + body_fn = tf_ssa.functions.get(body_fn_name) + + # insert function entry nodes + cond_entry = _insert_function_entry(cond_fn) + body_entry = _insert_function_entry(body_fn) + + # pack node inputs to a single tuple + while_input_tuple = _insert_make_tuple(fn, "make_tuple/{}".format(while_name)) + for wi in while_node.inputs: + disconnect_edge(fn.graph, wi, while_node.name) + connect_edge(fn.graph, wi, while_input_tuple) + connect_edge(fn.graph, while_input_tuple, while_node.name) + + # unpack node outputs to multiple get_tuples + for i, wo in enumerate(while_node.outputs): + # utilize FunctionDef's ret to make sure function outputs and + # node outputs order matches when multiple outputs are there. + o_original = fn.graph[wo].original_node + while_input = [ + n for n in o_original.input if str(n).startswith(while_name) + ][0] + while_index = while_input.split(":")[-1] + if while_index != 0: + identity_postfix = "identity_{}".format(while_index) + else: # access identity "0" + identity_postfix = "identity" + + identity_keys = [t for t in body_fn.ret.keys() if t.endswith(identity_postfix)] + if len(identity_keys) != 1: + raise NotImplementedError("Branch not found.") + + mapped_name = body_fn.ret[identity_keys[0]].split(":")[0] + idx = body_fn.outputs.index(mapped_name) + + loop_output = _insert_get_tuple( + fn, "get_tuple/{}/{}".format(idx, while_input), idx + ) + + edge_idx = fn.graph[wo].inputs.index(while_node.name) + replace_dest(fn.graph, while_node, wo, loop_output) + connect_edge_at_index(fn.graph, loop_output, wo, edge_idx) + + # fetch inputs using get_tuple for cond fn + for i, ci in enumerate(cond_fn.inputs): + cond_input = _insert_get_tuple(cond_fn, "get_tuple/{}/{}".format(i, ci), i) + connect_edge(cond_fn.graph, cond_entry, cond_input) + replace_node(cond_fn.graph, ci, cond_input) + delete_node(cond_fn.graph, ci) + + # fetch inputs using get_tuple for body fn + for i, bi in enumerate(body_fn.inputs): + new_name = "get_tuple/{}/{}".format(i, bi) + + if bi in body_fn.outputs: # input is also an output + body_fn.outputs[body_fn.outputs.index(bi)] = new_name + + body_input = _insert_get_tuple(body_fn, new_name, i) + + connect_edge(body_fn.graph, body_entry, body_input) + replace_node(body_fn.graph, bi, body_input) + delete_node(body_fn.graph, bi) + + # returns a tuple of value(s) as output for cond fn + cond_output = _insert_make_tuple(cond_fn) + for co in cond_fn.outputs: + connect_edge(cond_fn.graph, co, cond_output.name) + + cond_return = _insert_return(cond_fn) + connect_edge(cond_fn.graph, cond_output.name, cond_return.name) + + # returns a tuple of value(s) as output for body branch + body_output = _insert_make_tuple(body_fn) + + for bo in body_fn.outputs: + connect_edge(body_fn.graph, bo, body_output.name) + + body_return = _insert_return(body_fn) + connect_edge(body_fn.graph, body_output.name, body_return.name) + + +def rewrite_control_flow_functions(tf_ssa): + for fn_name, fn in tf_ssa.functions.items(): + _rewrite_cond_functions(tf_ssa, fn) + for fn_name, fn in tf_ssa.functions.items(): + _eliminate_loop_cond_nodes(tf_ssa, fn) + _rewrite_while_loop_functions(tf_ssa, fn) + + +def flatten_sub_graph_namespaces(tf_ssa): + _flatten_sub_graph_namespaces(tf_ssa, fn_name="main") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/__init__.py new file mode 100644 index 00000000..521d2e46 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools._deps import _HAS_TORCH + +register_torch_op = None + +if _HAS_TORCH: + from .dialect_ops import (torch_tensor_assign, torch_upsample_bilinear, + torch_upsample_nearest_neighbor) + from .torch_op_registry import register_torch_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/converter.py new file mode 100644 index 00000000..a7cdff80 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/converter.py @@ -0,0 +1,495 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import OrderedDict + +import numpy as np +import torch as torch + +from coremltools import _logger as logger +from coremltools._deps import version_lt +from coremltools.converters.mil._deployment_compatibility import AvailableTarget as _target +from coremltools.converters.mil.input_types import ImageType +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, types + +from .._utils import get_output_names +from .internal_graph import InternalTorchIRGraph, InternalTorchIRNode +from .ops import convert_nodes +from .torch_op_registry import _TORCH_OPS_REGISTRY +from .torchir_passes import ( + flatten_graph_input_values, + flatten_graph_output_values, + generate_tensor_assignment_ops, + remove_getattr_nodes, + transform_inplace_ops, +) + +torch_to_mil_types = { + torch.bool: types.bool, + torch.float16: types.fp16, + torch.float32: types.fp32, + torch.float64: types.fp32, + torch.int32: types.int32, + torch.int64: types.int32, +} + + +mil_to_torch_types = {v: k for k, v in torch_to_mil_types.items()} + + +class TranscriptionContext: + """ + Maintains a map from torch operations to their MIL values + while building the graph. Can be used to process subgraphs recursively + by pushing new context when stepping into a subgraph and popping that + context when stepping out. + """ + + def __init__(self, name=None): + self.name = name if name else "" + self._current_graph = [{}] + + def prepare_for_conversion(self, node: InternalTorchIRNode): + """ + Perform any preparation necessary before node-specific frontend conversion + is invoked. + """ + pass + + def add(self, ssa_var, torch_name=None): + """ + Arguments: + ssa_var: Variable to add to the graph being constructed. + torch_name: Optional unique string identifier of the operation. If + omitted, it will use @ssa_var.name. + """ + if torch_name is None: + torch_name = ssa_var.name + if torch_name in self._current_graph[-1]: + print("Torch var {} is added again.".format(torch_name)) + return + self._current_graph[-1][torch_name] = ssa_var + + def __getitem__(self, torch_name): + """ + Lookup a name in the context. Note that since nested blocks must be + able to access anything that was defined before them, we have to + search all contexts for a name, starting with the most local scope. + """ + for idx in reversed(range(len(self._current_graph))): + current_graph = self._current_graph[idx] + if torch_name in current_graph: + return self._current_graph[idx][torch_name] + raise ValueError( + "Torch var {} not found in context {}".format(torch_name, self.name) + ) + + def __contains__(self, torch_name): + """Returns whether or not the torch var exist in context.""" + return torch_name in self._current_graph[-1] + + def push(self, inputs=None): + """ + Add another frame to the context. Optionally provide a tuple of + (name list, Var list) to populate the new context frame. + """ + self._current_graph.append({}) + + if inputs is not None: + if len(inputs[0]) != len(inputs[1]): + raise ValueError("name list and Var list must be the same length") + for name, var in zip(inputs[0], inputs[1]): + self.add(var, torch_name=name) + + def pop(self): + """ + Remove and discard the top context frame. + """ + self._current_graph = self._current_graph[:-1] + + def __str__(self): + _str = "" + for current_graph in reversed(self._current_graph): + __str = "" + for k, v in current_graph.items(): + if hasattr(v, "shape_str"): + shape_str = v.shape_str() + elif hasattr(v, "sym_shape"): + shape_str = v.sym_shape() + else: + shape_str = "None" + __str += "%{} : {}\n".format(k, shape_str) + _str += __str + "\n" + return _str + + def __repr__(self): + return str(self) + + +class TorchConverter: + """ + Class that handles conversion of pytorch models represented in TorchScript + format to the MIL format. + + Models passed to the @TorchConverter go from: + TorchScript -> Expanded/Optimized Torch IR -> Internal Graph -> CoreML SSA + The internal graph representation was added to make testing easier. + """ + + def __init__( + self, + torchscript, + inputs, + outputs=None, + cut_at_symbols=None, + opset_version=None, + ): + """ + Arguments: + torchscript: torch.jit.ScriptModule object representing the model to convert. + inputs: Input values and optional names. See kwarg in load.py for full description. + outputs: List of outputs as ct.InputType. See kwarg in load.py for full description. + cut_at_symbols: A list of internal symbol name strings. Graph conversion will + terminate once these symbols have been generated. For debugging use + only. See kwarg in load.py. + opset_version: An int represents the Core ML opset version. + """ + assert isinstance(torchscript, torch.jit.ScriptModule) + + self.inputs = inputs + for idx, inp in enumerate(self.inputs): + if isinstance(inp, ImageType) and self.inputs[idx].channel_first is None: + self.inputs[idx].channel_first = True + + self.torchscript = torchscript + self.outputs = outputs + self.output_names = get_output_names(self.outputs) + self.opset_version = _target(opset_version) if opset_version is not None else None + self.context = TranscriptionContext() + raw_graph, params_dict = self._expand_and_optimize_ir(self.torchscript) + self.params_dict = params_dict + self.graph = InternalTorchIRGraph( + raw_graph, params_dict, self.inputs, cut_at_symbols + ) + + # TODO (rdar://106161395): Register Torch IR passes and unify them into the pass pipeline. + # Apply Torch IR passes + passes = [ + transform_inplace_ops, + flatten_graph_input_values, + flatten_graph_output_values, + remove_getattr_nodes, + generate_tensor_assignment_ops, + ] + for p in passes: + p(self.graph) + + self.inputs = list(self.graph.inputs.values()) + self._prog = Program() + + @staticmethod + def _check_ops(graph): + """ + Returns the set of ops in @graph that are implemented, and the set + for which no conversion function is registered. @graph can be + either InternalTorchIRGraph or InternalTorchIRBlock. + """ + implemented_ops = set() + missing_ops = set() + for node in graph.nodes: + _add_op = _TORCH_OPS_REGISTRY.get(node.kind, None) + if _add_op is None: + missing_ops.add(node.kind) + else: + implemented_ops.add(node.kind) + for block in node.blocks: + _impl, _miss = TorchConverter._check_ops(block) + implemented_ops.update(_impl) + missing_ops.update(_miss) + return implemented_ops, missing_ops + + @staticmethod + def _create_placeholder(_input): + """ + Converts an InputType into a Placeholder. + + _input: TensorType + """ + shape = _input.shape.symbolic_shape + dtype = _input.dtype + return mb.placeholder(shape, dtype=dtype) + + def check_ops(self): + """ + Returns the set of ops in @self.graph that are implemented, and + the set for which no conversion function is registered. + """ + return TorchConverter._check_ops(self.graph) + + def convert_const(self): + for name, val in self.graph.params.items(): + if not isinstance(val, np.ndarray): + raise ValueError("unsupported class for {} in PyTorch graph: {}".format(name, type(val))) + if val.dtype == np.uint8: + val = val.astype(np.int32) + const = mb.const(val=val, name=name) + self.context.add(const) + + def convert(self): + logger.info("Converting graph.") + + # This will hold the converted model. + prog = self._prog + + # Construct placeholder for input to SSA function + # This is where input renaming occurs + ssa_func_inputs = OrderedDict() + for index, (name, spec) in enumerate(self.graph.inputs.items()): + placeholder = self._create_placeholder(spec) + # Set SSA function input name to user defined name if provided. + if spec.name is not None: + name = spec.name + self.inputs[index].name = name + ssa_func_inputs[name] = placeholder + prog.set_main_input_types(tuple(self.inputs)) + + # Initialize the SSA for conversion + with Function(ssa_func_inputs, opset_version=self.opset_version) as ssa_func: + + # Map internal @self.graph.inputs to user specified @ssa_func_inputs + # If @self.graph.inputs == @ssa_func_inputs this just adds the inputs + # to the context. + for internal_name, users_name in zip( + self.graph.inputs.keys(), ssa_func_inputs.keys() + ): + input_var = ssa_func.inputs[users_name] + if (types.is_tensor(input_var.sym_type) or types.is_scalar(input_var.sym_type)) \ + and (input_var.dtype == types.fp16 or input_var.dtype == types.fp64): + # cast the input var to float32 + # We need to do this because the type inference is very buggy when started from + # float16/float64 typed inputs. Until that is fixed in the following radar + # we cast all inputs of type float16/float64 to float32 as the first step. + # These casts will later get removed, if compute_precision=Float16 is + # provided, which will cause the FP16ComputePrecision pass to run. + # TODO: remove this when this radar is fixed: rdar://93731970 + input_var = mb.cast(x=input_var, dtype="fp32") + self.context.add(input_var, torch_name=internal_name) + + self.convert_const() + + # Add the rest of the operations + convert_nodes(self.context, self.graph) + + graph_outputs = [self.context[name] for name in self.graph.outputs] + + # An output can be None when it's a None constant, which happens + # in Fairseq MT. + for g in graph_outputs: + if g is None: + msg = "Droping output {} which is None" + logger.warning(msg.format(g)) + graph_outputs = [g for g in graph_outputs if g is not None] + + # Output renaming occurs + if self.outputs is not None: + if len(self.outputs) != len(graph_outputs): + msg = "Number of outputs provided, {}, do not match the number of outputs detected in the model, {}." + raise ValueError(msg.format( + len(self.outputs), + len(graph_outputs), + )) + if self.output_names: + for index, var in enumerate(graph_outputs): + if self.output_names[index] is not None: + output_rename = self.output_names[index] + var.name = output_rename + + ssa_func.set_outputs(graph_outputs) + prog.add_function("main", ssa_func) + if self.outputs is not None: + prog.set_main_output_types(self.outputs) + return prog + + def _jit_pass_lower_graph(graph, torchscript): + """ + This graph pass does a similar thing as torch._C._jit_pass_lower_graph does. + It does two things: + 1. Rename getattr nodes which produce a torch tensor to match the keys in torch model's state_dict + 2. Construct the params_dict, with the keys similar to state_dict + + To be more specific, this graph pass traces down series of GetAttr ops, and rename the final node to match the torch model state_dict. + It also replaces the node inputs by the first created tensor node with the same name. + + Example: + Input graph: + graph(%self.1 : __torch__.torch.nn.modules.Sequential, %input.1 : Tensor): + %2 : prim::GetAttr[name="linear"](%self.1) + %3 : prim::GetAttr[name="weight"](%2) + %4 : prim::GetAttr[name="bias"](%2) + %5 : prim::GetAttr[name="bias"](%2) # duplicated node + %6 : conv(%input.1, %3, %4) + %7 : add(%input.1, %5) + return (%6, %7) + + Output graph: + graph(%self.1 : __torch__.torch.nn.modules.Sequential, %input.1 : Tensor): + %2 : prim::GetAttr[name="linear"](%self.1) + %linear.weight : prim::GetAttr[name="weight"](%2) + %linear.bias : prim::GetAttr[name="bias"](%2) + %5 : prim::GetAttr[name="bias"](%2) # duplicated node, it is not used now + %6 : conv(%input.1, %linear.weight, %linear.bias) + %7 : add(%input.1, %linear.bias) # the second input is replaced + return (%6, %7) + + And a dictionary {"linear.weight": ..., "linear.bias": ...} is returned, to record the parameters values. + Note that, those GetAttr nodes are still in the torch ir graph, but they would be removed in a latter + graph pass in the coremltools torch internal graph + + """ + + """ + Each getattr node corresponds to a torch object in the torch IR, + it could be either: + 1. torch.nn.modules: submodule in a torch model. For instance, a linear layer in a MLP network. + 2. torch.Tensor: torch model parameters. For instance, weight for a conv layer. + 3. torch._C.ScriptObject: quantized torch model parameters. + For example, in the graph above, %2 is pointing to the __torch__.torch.nn.modules.Sequential.linear torch submodule. + node_to_module_map tracks these mapping. + + node_to_prefic_map track the name for each module, + for example, %2 has the prefix name linear and %3 is linear.weight. + These names are also keys in the state_dict + """ + node_to_module_map = {} + node_to_prefix_map = {} + first_node_with_prefix = {} + replace_input = {} + + base_module_node = list(graph.inputs())[0] + node_to_module_map[base_module_node] = torchscript + node_to_prefix_map[base_module_node] = "" + + """ + params_dict will be contructed in this graph pass. It contains all const tensors needed for the graph computation. + And the value is validated against the state_dict if the key is presented in both dictionaries. + In some rare cases, state_dict lacks parameters / buffers, so we still need to go through the while graph ourselves. + """ + params_dict = {} + state_dict = torchscript.state_dict(keep_vars=True) + + def _check_is_tensor(node, module): + if not isinstance(module, torch.Tensor): + return False + if str(node.output().type()) not in ("Tensor", "Optional[Tensor]"): + raise TypeError("Type \"{}\" not supported".format(node.output().type())) + return True + + def _check_is_quantized_tensor(node, module): + if not isinstance(module, torch._C.ScriptObject): + return False + # We only support ScriptObjects that correspond to quantized packed params. + assert "PackedParams" in node.output().type().name() + return True + + def _lower_graph_block(graph): + for node in list(graph.nodes()): + + for block in node.blocks(): + _lower_graph_block(block) + + for idx, _input in enumerate(list(node.inputs())): + if _input in replace_input: + node.replaceInput(idx, replace_input[_input]) + + kind = node.kind().split("::")[1].lower() + if kind != "getattr": + continue + + _input = node.input() + _output = node.output() + attr_name = getattr(node, node.kindOf("name"))("name") + + module = getattr(node_to_module_map[_input], attr_name) + node_to_module_map[_output] = module + + input_prefix = node_to_prefix_map[_input] + prefix = input_prefix + '.' + attr_name if input_prefix != "" else attr_name + node_to_prefix_map[_output] = prefix + + is_tensor = _check_is_tensor(node, module) + is_quantized_tensor = _check_is_quantized_tensor(node, module) + + if is_tensor or is_quantized_tensor: + if is_tensor and prefix in state_dict: + assert torch.equal( + module, state_dict[prefix] + ), "tensor value not consistent between torch ir and state_dict" + if prefix in params_dict: + assert torch.equal(module, params_dict[prefix]) + replace_input[_output] = first_node_with_prefix[prefix] + else: + params_dict[prefix] = module + first_node_with_prefix[prefix] = _output + _output.setDebugName(prefix) + + _lower_graph_block(graph) + + return graph, params_dict + + @staticmethod + def _expand_and_optimize_ir(torchscript): + """ + Given a torch.jit.ScriptModule, convert it to a optimized + torch._C.Graph and dict of model parameter's names to tensors. + """ + graph = torchscript.forward.graph + + # From PyTorch code: Inline function and method calls. + torch._C._jit_pass_inline(graph) + # From PyTorch code: This inlines the forked section in the fork() + # callsite and replaces uses of the result of wait() calls with the + # values produced from the (now-inlined) forked section. + torch._C._jit_pass_inline_fork_wait(graph) + # Starting from the return node, marks all nodes that feed into the + # output, as well as nodes with side effects. Any nodes not marked are + # eliminated. + torch._C._jit_pass_dce(graph) + # From PyTorch code: checks well-formedness and invariants of graph. + torch._C._jit_pass_lint(graph) + # Replaces a couple specific ops patterns (add, sub, mul, div, chunk). + if version_lt(torch, "1.6.0"): + torch._C._jit_pass_canonicalize_ops(graph) + torch._C._jit_pass_lint(graph) + + # From PyTorch code: This pass catches all of the small, easy to catch + # peephole optimizations you might be interested in doing. + # Eliminate no-op 'expand' nodes + # Simplify x.t().t() to x + # pass disabled for v1.6.0 and onwards, wrongly captures the shape of dummy inputs during tracing. + torch._C._jit_pass_peephole(graph, addmm_fusion_enabled=False) + else: + # v1.6.0 pass renamed + torch._C._jit_pass_canonicalize_graph_fuser_ops(graph) + torch._C._jit_pass_lint(graph) + + # From PyTorch docs: Renumber the graph so that all structurally + # equivalent graphs have same numbers. + graph = torch._C._jit_pass_canonicalize(graph) + torch._C._jit_pass_lint(graph) + if version_lt(torch, "1.6.0"): + # v1.6.0 JIT changes disallows pulling list values out of + # prim::Constant. We can only pull scalar values. constant + # propagation removes `listConstruct` and results in list values. + # We disallow constant prop pass to keep them as scalars, and rely + # on our own constant prop to interpret `listConstruct`. + torch._C._jit_pass_constant_propagation(graph) + # NOTE: Don't need another DCE, it's included in constant propagation. + torch._C._jit_pass_lint(graph) + + # Get the params_dict and rename the getattr nodes in the graph + graph, params_dict = TorchConverter._jit_pass_lower_graph(graph, torchscript) + + return graph, params_dict diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/dialect_ops.py new file mode 100644 index 00000000..101144c6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/dialect_ops.py @@ -0,0 +1,219 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, get_new_symbol, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._utils import \ + solve_slice_by_index_shape +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import \ + is_compatible_symbolic_vector + +register_op = SSAOpRegistry.register_op + + +# This file contains the Torch dialect of SSA. Briefly, these ops are only +# understandable in the Torch frontend and not acceptable in the standard op set. +# No backend would support any of the op here. These ops exist to facilitate +# frontend SSA passes, but must be replaced with standard ops during SSA +# passes. + +# All torch op must start with 'torch_' prefix. + +# torch_upsample_nearest_neighbor is dealing with upsample layer which has flexible input shape, +# and recompute_scale_factor is set to True in the original torch layer. +@register_op(namespace="torch") +class torch_upsample_nearest_neighbor(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input by + scale factors using nearest-neighbor interpolation. + It corresponds to `torch.nn.functional.interpolate` function with `mode=nearest`, + `recompute_scale_factor=True`, and input with flexible shape. + source: https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#interpolate + + Parameters + ---------- + x: tensor<[b, C, H1, W1],T> (Required) + * Must be rank ``4``. + output_height: i32 + * Output height for the height dimension. + output_width: i32 + * Output width for the width dimension. + + Returns + ------- + tensor<[b, C, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = output_height + * ``W2`` = output_width + + Attributes + ---------- + T: fp16, fp32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + output_height=TensorInputType(type_domain=types.int32), + output_width=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input to the "torch_upsample_nearest_neighbor" op must have rank 4' + ) + ret_shape = list(self.x.shape) + ret_shape[2] = get_new_symbol() + ret_shape[3] = get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + +# torch_upsample_bilinear is dealing with upsample layer which has flexible input shape, +# and recompute_scale_factor is set to True in the original torch layer. +@register_op(namespace="torch") +class torch_upsample_bilinear(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input by + scale factors using bilinear interpolation. + It corresponds to `torch.nn.functional.interpolate` function with `mode=bilinear`, + `recompute_scale_factor=True`, and input with flexible shape. + source: https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#interpolate + + Parameters + ---------- + x: tensor<[b, C, H1, W1], T> (Required) + * Must be rank ``4``. + output_height: i32 + * Output height for the height dimension. + output_width: i32 + * Output width for the width dimension. + aligh_corners: const + * The `aligh_corners` parameter for the original torch op. + + Returns + ------- + tensor<[b, C, H2, W2], T> + * Tensor with same type as the input. + * ``H2`` = output_height + * ``W2`` = output_width + + Attributes + ---------- + T: fp16, fp32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + output_height=TensorInputType(type_domain=types.int32), + output_width=TensorInputType(type_domain=types.int32), + align_corners=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + align_corners=True, + ) + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input to the "torch_upsample_bilinear" op must have rank 4' + ) + ret_shape = list(self.x.shape) + ret_shape[2] = get_new_symbol() + ret_shape[3] = get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + +# torch_tensor_assign is dealing with the tensor assignment operation +@register_op(namespace="torch") +class torch_tensor_assign(Operation): + """ + Method for tensor value assignment via indexing and slicing. + Suppose we have a tensor ``x``, this method achieves: + ``x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...] = value`` + + Parameters + ---------- + data: tensor<*?, T> (Required) + * Input tensor + updates: tensor<\*K, T> (Required) + * Value tensor to be inserted + * The shape of the updates tensor must match the slicing result of the input data. + begin: tensor<[rank], i32> (Required) + * Starting index for the dimension of slicing. + end: tensor<[rank(x)], i32> (Required) + * Ending index for the dimension of slicing. + stride: tensor<[rank(x)], i32> (Optional) + * Default as all ``1``s. + * Stride for the dimension of slicing. + begin_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``begin_mask[i]==True``, neglect ``begin[i]``, and set ``begin[i]`` to ``0``. + end_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``end_mask[i]==True``, neglect ``end[i]``, and set ``end[i]`` to ``x.shape[i]``. + squeeze_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``squeeze_mask[i]==true``, neglect ``end[i]``, and do the pure index at ``begin[i]``. + + Returns + ------- + tensor<*?, T> + - Scalar or tensor. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + updates=TensorInputType(type_domain="T"), + begin=TensorInputType(type_domain=types.int32), + end=TensorInputType(type_domain=types.int32), + stride=TensorInputType(const=True, optional=True, type_domain=types.int32), + begin_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + end_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + squeeze_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + stride=None, + begin_mask=None, + end_mask=None, + squeeze_mask=None, + ) + + def type_inference(self): + # Verify the updates and the data slicing have the same shape + begin = self.begin.val + end = self.end.val + data_rank = self.data.rank + stride = self.stride.val if self.stride is not None else [1] * data_rank + begin_mask = ( + self.begin_mask.val if self.begin_mask is not None else [False] * data_rank + ) + end_mask = self.end_mask.val if self.end_mask is not None else [False] * data_rank + squeeze_mask = ( + self.squeeze_mask.val if self.squeeze_mask is not None else [False] * data_rank + ) + data_shape = self.data.shape + expected_updates_shape = tuple(solve_slice_by_index_shape(data_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask)) + if not is_compatible_symbolic_vector(expected_updates_shape, self.updates.shape): + raise ValueError("The updates tensor should have shape {}. Got {}".format(expected_updates_shape, self.updates.shape)) + return self.data.sym_type diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/internal_graph.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/internal_graph.py new file mode 100644 index 00000000..76633f87 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/internal_graph.py @@ -0,0 +1,336 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import torch + +from collections import OrderedDict +from itertools import islice + +_DEFAULT_OP_NAMESPACES = set(["aten", "prim"]) + + +def _make_ssa_name(name): + """ + Converts a symbol name (string) into an SSA name, by prepending '%'. + Only used for pretty printing the graph. + """ + if name is None: + return "None" + return "%" + name + + +def _ssa_name_list(names): + """ + Take a list of symbol names (strings) and return them as SSA names. Only + used for pretty printing the graph. + """ + return [_make_ssa_name(x) for x in names] + + +def _find_new_name(old_name, node_names): + """ + Disambiguate a node's name from a list of existing node names by adding + successively larger integers. + """ + count = 0 + new_name = old_name + "." + str(count) if count != 0 else old_name + while new_name in node_names: + count += 1 + new_name = old_name + "." + str(count) + return new_name + + +def _replace_in_list(ls, old_val, new_val): + """Helper function to replace a value in a list.""" + try: + idx = ls.index(old_val) + except ValueError: + pass + else: + ls[idx] = new_val + + +class InternalTorchIRBlock: + """ + coremltools internal representation of a torch IR block. + """ + + def __init__(self, raw_block=None, parent=None, nodes=None, inputs=None, outputs=None): + """" + Arguments: + raw_block: The torch._C.Block to convert, or None. + parent: The InternalTorchIRNode this block belongs to. + nodes: If @raw_block is None, the list of InternalTorchIRNodes in the block + inputs: If @raw_block is None, the list of input symbols. + outputs: If @raw_block is None, the list of output symbols. + """ + + self.nodes = [] + node_names = set() + self.inputs = [] + self.outputs = [] + self.parent = parent + + if raw_block: + # Add nodes + for raw_node in raw_block.nodes(): + new_node = InternalTorchIRNode(raw_node, parent=self) + if new_node.name == new_node.kind: + new_node.name = _find_new_name(new_node.name, node_names) + self.nodes.append(new_node) + node_names.add(new_node.name) + + # Add inputs + for inp in raw_block.inputs(): + self.inputs.append(inp.debugName()) + + # Add outputs + for outp in raw_block.outputs(): + self.outputs.append(outp.debugName()) + else: + self.nodes = nodes + self.inputs = inputs + self.outputs = outputs + + def __str__(self, indent=2): + indent_str = " " * indent + graph_str = "{}block({}):\n".format( + indent_str, ", ".join(_ssa_name_list(self.inputs)) + ) + graph_str += "{}\n".format(indent_str).join( + [x.__str__(indent=indent + 2) for x in self.nodes] + ) + graph_str += "\n{}return ({})".format( + indent_str, ", ".join(_ssa_name_list(self.outputs)) + ) + return graph_str + + def __repr__(self): + return str(self) + + def replace_name(self, old_name, new_name): + """Replaces all instances of @old_name with @new_name in @self.""" + + # Replace graph inputs/outputs + _replace_in_list(self.inputs, old_name, new_name) + _replace_in_list(self.outputs, old_name, new_name) + + for node in self.nodes: + node.replace_name(old_name, new_name) + + +class InternalTorchIRNode: + """ + coremltools internal representation of a torch IR node. + Can construct itself from a provided torchIR node or manually constructed with + args for testing. + + See InternalTorchIRGraph for the motivation behind this structure. + """ + + def __init__( + self, node=None, parent=None, attr=None, inputs=None, outputs=None, kind=None, blocks=None, + ): + """ + Arguments: + node: The torch._C.Node to convert, or None. + parent: The InternalTorchIRGraph/Block this node belongs to. + attr: If @node is not specified, the dict of named attributes. + inputs: If @node is not specified, the list of input symbols. + outputs: If @node is not specified, the list of output symbols. + kind: If @node is not specified, the kind (op) of the node. + blocks: If @node is not specified, the list of InternalTorchIRBlock. + """ + + self.parent = parent + if node is not None: + self.inputs = [_input.debugName() for _input in node.inputs()] + self.outputs = [output.debugName() for output in node.outputs()] + namespace = node.kind().split("::")[0].lower() + if namespace in _DEFAULT_OP_NAMESPACES: + # We conventionally skip the aten/prim namespaces in our naming. + self.kind = node.kind().split("::")[-1].lower() + else: + self.kind = node.kind().lower() + self.blocks = [InternalTorchIRBlock(raw_block=b, parent=self) for b in node.blocks()] + self.attr = { + name: getattr(node, node.kindOf(name))(name) + for name in node.attributeNames() + } + if "value" not in self.attr: + self.attr["value"] = None + # If the output is boolean, explicitly cast it so type inference + # will work correctly. + if len(self.outputs) == 1 and next(node.outputs()).type().str() == "bool": + self.attr["value"] = bool(self.attr["value"]) + else: + self.inputs = inputs + self.outputs = outputs + self.kind = kind + self.blocks = blocks if blocks is not None else [] + self.attr = attr if attr is not None else {"value": None} + # On rare occassions, a node has no outputs. In that case, the node's + # name will be its kind. However, this no longer guarantees the node's + # name is unique. It will be up to the graph constructing the node to + # make sure names are unique. + self.name = self.outputs[0] if len(self.outputs) > 0 else self.kind + + def __str__(self, indent=2): + node_str = " " * indent + "{} = {}".format( + ", ".join(_ssa_name_list(self.outputs)), self.kind + ) + node_str += "[{}]".format( + ", ".join( + ["{}={}".format(n, v) for n, v in self.attr.items() if v is not None] + ) + ) + node_str += "({})".format(", ".join(_ssa_name_list(self.inputs))) + for b in self.blocks: + node_str += "\n" + b.__str__(indent=indent + 2) + return node_str + + def __repr__(self): + return str(self) + + def replace_name(self, old_name, new_name): + """Replaces all instances of @old_name with @new_name in @self.""" + + _replace_in_list(self.inputs, old_name, new_name) + _replace_in_list(self.outputs, old_name, new_name) + + if self.name == old_name: + self.name = new_name + for block in self.blocks: + block.replace_name(old_name, new_name) + + +class InternalTorchIRGraph: + """ + CoreML internal representation of a torch IR graph. A torch._C.Graph + object is not an ideal structure to use in converting to CoreML. Conversion + to an InternalTorchIRGraph is inserted between the original graph and the + final CoreML model to address several issues: + 1. A torch._C.graph is hard to work with. For example, its .inputs() + and .outputs() functions return iterators, so the only way to + determine the number of inputs/outputs is by counting to the end. + There are other examples of why the torch structure is hard to work + with, and this structure alleviates those isses. + 2. torch._C.graph is an internal API and so we can't count on its + stability. By inserting a layer in between, we can handle any changes + to torch._C.graph here and isolate the ops code that processes the + graph. + 3. torch._C.graph does not expose a Python constructor. This makes + it impossible to write unit tests that isolate specific ops since + they have to come from actually converting a PyTorch graph. With an + internal structure, we can directly build the test cases we need for + unit testing. + """ + + def __init__( + self, raw_graph=None, params_dict=None, input_values=None, cut_at_symbols=None, + nodes=None, params=None, inputs=None, outputs=None, + ): + """ + Arguments: + raw_graph: raw_graph: The torch._C.Graph to convert, or None. + params_dict: A dictionary mapping graph parameter names to tensors. + Must be given if @raw_graph is not None. + input_values: A list of inputs to the graph. Must be given is + @raw_graph if not None. + cut_at_symbols: The list of desired outputs from the graph. Symbols + must be present in the graph. For debugging use only. Can only + be given if @raw_graph is not None. + nodes: If @raw_graph is None, the list of InternalTorchIRNodes in + the graph. + params: If @raw_graph is None, the dict mapping parameter names to + their numpy value. + inputs: If @raw_graph is None, the OrderedDict mapping input names + to their example values. + outputs: list[str], If @raw_graph is None, the list of outputs from the graph. + """ + + self.nodes = [] + node_names = set() + self.params = {} + self.inputs = OrderedDict() + self.outputs = [] + + if raw_graph is not None: + # Add nodes + for raw_node in raw_graph.nodes(): + new_node = InternalTorchIRNode(raw_node, parent=self) + if new_node.name == new_node.kind: + new_node.name = _find_new_name(new_node.name, node_names) + self.nodes.append(new_node) + node_names.add(new_node.name) + + # Add params + for name, param in params_dict.items(): + if isinstance(param, torch.Tensor): + value = param.detach().cpu().numpy() + else: + value = param + self.params[name] = value + + # Add inputs + # The first element of the raw_graph.inputs() is the 'self' of the module, which is not used. + graph_inputs = list(raw_graph.inputs())[1:] + for index, _input in enumerate(islice(graph_inputs, len(input_values))): + name = _input.debugName() + value = input_values[index] + self.inputs[name] = value + + # Add outputs, cutting if @cut_at_symbols is set + output_names = cut_at_symbols + if output_names is None: + output_names = [x.debugName() for x in raw_graph.outputs()] + for output in output_names: + self.outputs.append(output) + else: + self.nodes = nodes + self.params = params + self.inputs = inputs + self.outputs = outputs + + def __str__(self): + graph_str = "graph(\n" + graph_str += self._format_inputs(self.inputs, unpack=True) + graph_str += self._format_inputs(self.params) + graph_str += "):\n" + graph_str += "\n".join([str(x) for x in self.nodes]) + "\n" + graph_str += "return ({})".format(", ".join(_ssa_name_list(self.outputs))) + return graph_str + + def _format_inputs(self, inputs, unpack=False): + def tensor_str(x): + try: + return "Tensor{}".format( + tuple(list(x.shape.shape if unpack else x.shape) + [str(x.dtype)]) + ) + except: + + return "Custom Params({})".format(type(x)) + + inp_str = "" + for k, v in inputs.items(): + if isinstance(v, (tuple, list)): + shape_str = "({})".format(", ".join([tensor_str(x) for x in v])) + else: + shape_str = tensor_str(v) + inp_str += " {} : {},\n".format(_make_ssa_name(k), shape_str) + return inp_str + + def __repr__(self): + return str(self) + + def replace_name(self, old_name, new_name): + """Replaces all instances of @old_name with @new_name in @self.""" + + # Replace graph inputs/outputs + _replace_in_list(self.inputs, old_name, new_name) + _replace_in_list(self.outputs, old_name, new_name) + + for node in self.nodes: + node.replace_name(old_name, new_name) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/load.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/load.py new file mode 100644 index 00000000..c95f2777 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/load.py @@ -0,0 +1,112 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os.path as _os_path + +import torch as _torch + +from coremltools import _logger as logger +from coremltools.converters.mil.input_types import InputType, TensorType + +from .converter import TorchConverter, torch_to_mil_types + + +def load(model_spec, inputs, specification_version, + debug=False, outputs=None, cut_at_symbols=None, + **kwargs): + """ + Convert PyTorch model to mil CoreML format. + + Parameters + ---------- + model_spec: String path to .pt file, or a TorchScript object representing + the model to convert. + inputs: Can be a singular element or list of elements of the following form + 1. Any subclass of InputType + 2. torch.Tensor (only shape and dtype will be used) + 3. list of (1. or 2.) + Inputs are parsed in the flattened order that the model accepts them. + If names are not specified: input keys for calling predict on the converted model + will be internal symbols of the input to the graph. + User can specify a subset of names. + debug: bool, optional. Defaults to False. + This flag should generally be False except for debugging purposes + for diagnosing conversion errors. Setting this flag to True will + print the list of supported and unsupported ops found in the model + if conversion fails due to an unsupported op. + outputs (optional): list[ct.InputType] or None + list of either ct.TensorTypes or ct.ImageTypes (both of which are child classes of InputType) + This is the value of the "outputs" argument, passed on by the user in "coremltools.convert" API. + cut_at_symbols (optional): List of internal symbol name strings. Graph conversion will + terminate once these symbols have been generated. For debugging use + only. + """ + torchscript = _torchscript_from_model(model_spec) + + if hasattr(torchscript, 'training') and torchscript.training: + logger.warning("Model is not in eval mode. " + "Consider calling '.eval()' on your model prior to conversion") + if type(torchscript) == _torch.jit._script.RecursiveScriptModule: + logger.warning("Support for converting Torch Script Models is experimental. " + "If possible you should use a traced model for conversion.") + + inputs = _convert_to_torch_inputtype(inputs) + converter = TorchConverter( + torchscript, + inputs, + outputs, + cut_at_symbols, + specification_version, + ) + return _perform_torch_convert(converter, debug) + + +def _torchscript_from_model(model_spec): + if isinstance(model_spec, str) and (model_spec.endswith(".pt") or model_spec.endswith(".pth")): + filename = _os_path.abspath(model_spec) + return _torch.jit.load(filename) + elif isinstance(model_spec, _torch.jit.ScriptModule): + return model_spec + else: + raise TypeError( + "@model must either be a PyTorch .pt or .pth file or a TorchScript object, received: {}".format( + type(model_spec) + ) + ) + +def _convert_to_torch_inputtype(inputs): + input_type = [] + for _input in inputs: + if isinstance(_input, (list, tuple)): + input_type.append(_convert_to_torch_inputtype(_input)) + elif isinstance(_input, InputType): + if _input.shape is None: + raise ValueError("'shape' must be provided in the 'inputs' argument for pytorch conversion") + input_type.append(_input) + elif isinstance(_input, _torch.Tensor): + input_type.append( + TensorType( + shape=_input.shape, dtype=torch_to_mil_types[_input.dtype] + ) + ) + else: + raise ValueError( + "Unknown type {} for conversion to InputType.".format(type(_input)) + ) + return input_type + +def _perform_torch_convert(converter, debug): + try: + prog = converter.convert() + except RuntimeError as e: + if debug and "convert function" in str(e): + implemented, missing = converter.check_ops() + print("the following model ops are IMPLEMENTED:") + print("\n".join([" " + str(x) for x in sorted(implemented)])) + print("the following model ops are MISSING:") + print("\n".join([" " + str(x) for x in sorted(missing)])) + raise e + + return prog diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ops.py new file mode 100644 index 00000000..b882b234 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ops.py @@ -0,0 +1,5734 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import builtins +import math as _math +import numbers +from collections.abc import Iterable +from typing import List, Optional + +import numpy as _np +import torch +from tqdm import tqdm as _tqdm + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import ( + AvailableTarget as target, +) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Symbol, types +from coremltools.converters.mil.mil.block import ( + is_current_opset_version_compatible_with, +) +from coremltools.converters.mil.mil.ops.defs._utils import ( + MAX_SIZE_CONSTANT_FOLDING, promote_input_dtypes, + solve_slice_by_index_shape) +from coremltools.converters.mil.mil.types import is_bool, nptype_from_builtin +from coremltools.converters.mil.mil.types.symbolic import ( + any_symbolic, + is_symbolic, +) +from coremltools.converters.mil.mil.var import ListVar, Var + +from .._utils import value_at, build_einsum_mil +from .torch_op_registry import _TORCH_OPS_REGISTRY, register_torch_op + +# The pytorch args for many of the below ops were sourced from +# https://github.com/pytorch/pytorch/blob/d971007c291c0ead1003d12cd553d18ddb582207/torch/csrc/jit/mobile/register_mobile_ops.cpp#L216 + + +# Max int64 value. Used as a default value in many PyTorch functions. +PYTORCH_DEFAULT_VALUE = 2**63 - 1 + +VALUE_CLOSE_TO_INFINITY = 1e+38 + + +def _all_outputs_present(context, graph): + """ + Returns true if all the symbols in the graph's output list are + present in context. + """ + for outp in graph.outputs: + try: + context[outp] + except ValueError: + return False + return True + + +def convert_nodes(context, graph): + """ + Iterate over the nodes of a graph or block and convert to MIL. + + Arguments: + context: A TranscriptionContext object to pull node inputs and + assign node outputs. + graph: An InternalTorchIRGraph or InternalTorchIRBlock object. + """ + for node in _tqdm(graph.nodes, desc="Converting PyTorch Frontend ==> MIL Ops", unit=" ops"): + op_lookup = node.kind + if op_lookup.startswith("__") and op_lookup.endswith("__"): + # Some ops may have double underscore, such as `__and__`. + op_lookup = op_lookup[2:-2] + elif op_lookup.endswith("_"): + # This is an "in place" op. + # Look up the standard op instead by removing underscore. + op_lookup = op_lookup[:-1] + add_op = _TORCH_OPS_REGISTRY.get(op_lookup, None) + + logger.info("Converting op {} : {}".format(node.name, node.kind)) + if add_op is None: + raise RuntimeError( + "PyTorch convert function for op '{}' not implemented.".format(node.kind) + ) + + context.prepare_for_conversion(node) + add_op(context, node) + + # We've generated all the outputs the graph needs, terminate conversion. + if _all_outputs_present(context, graph): + break + + +def convert_block(context, block, inputs): + """Convert a block (sub-graph) to MIL. Conversion happens within a new + context frame. + + Arguments: + context: A TranscriptionContext object to pull node inputs and + assign node outputs. + block: An InternalTorchIRBlock object. + inputs: List of Vars from the outer context that map to the block's + expected inputs. The number of inputs provided must match the + number expected by the block. + """ + + assert len(block.inputs) == len(inputs) + + # Start a new context frame. + context.push((block.inputs, inputs)) + + # Add the block ops. + convert_nodes(context, block) + + # Collect the block outputs. + outputs = [context[outp] for outp in block.outputs] + + # Return to the previous context frame. + context.pop() + return outputs + + +# Some ops will receive a dtype input as an integer +# which maps to a torch dtype. The below mapping was found by +# converting test models with different dtypes passed to ones. +NUM_TO_TORCH_DTYPE = { + 0: torch.uint8, + 1: torch.int8, + 2: torch.int16, + 3: torch.int32, + 4: torch.int32, + 5: torch.float16, + 6: torch.float32, + 7: torch.float32, + 11: torch.bool, + 12: torch.qint8, + 13: torch.quint8, +} + +NUMPY_DTYPE_TO_TORCH_NUM = { + _np.uint8: 0, + _np.int8: 1, + _np.int16: 2, + _np.int32: 3, + _np.int64: 4, + _np.float16: 5, + _np.float32: 6, + _np.float64: 7, + bool: 11, +} + +NUM_TO_NUMPY_DTYPE = { + 0: _np.uint8, + 1: _np.int8, + 2: _np.int16, + 3: _np.int32, + 4: _np.int32, + 5: _np.float16, + 6: _np.float32, + 7: _np.float32, + 11: bool, +} + +NUM_TO_DTYPE_STRING = { + 3: "int32", + 4: "int32", + 5: "fp16", + 6: "fp32", + 7: "fp32", + 11: "bool", +} + +TYPE_TO_DTYPE_STRING = { + types.bool: "bool", + types.fp16: "fp16", + types.fp32: "fp32", + types.int32: "int32", +} + + +def _get_inputs(context, node, expected=None, min_expected=None) -> List[Var]: + """ + Look up a node's inputs in @context and return them as a list. If + @expected is not None, also verifies the number of inputs matches the + value of @expected. + """ + inputs = [context[name] for name in node.inputs] + if expected is not None: + expected = [expected] if not isinstance(expected, (list, tuple)) else expected + + if len(inputs) not in expected: + raise ValueError( + "node {} ({}) got {} input(s), expected {}".format( + node.name, node.kind, len(inputs), expected + ) + ) + if min_expected is not None: + if len(inputs) < min_expected: + raise ValueError( + "node {} ({}) got {} input(s), expected minimum {} inputs".format( + node.name, node.kind, len(inputs), min_expected + ) + ) + + return inputs + + +def _list_select(shape_var, index): + """ + Sometimes we need to select a specific item from a list. If that item + is known at compile time, extract it as a const. Otherwise, if it's + symbolic, use gather. + """ + if shape_var.can_be_folded_to_const(): + res = mb.const(val=shape_var.val[index]) + else: + res = mb.gather(x=shape_var, indices=index) + return res + + +def _construct_constant(val, name): + # Converter cannot handle torch tensors. + if isinstance(val, torch.Tensor): + val = val.cpu().numpy() + + # MIL casts ints to int32, which can't represent PyTorch's default value. + # So we instead represent it with None, and any ops that might get the + # value will check for None instead. + if isinstance(val, int) and val == PYTORCH_DEFAULT_VALUE: + val = None + + # Pytorch uses inf + if val is not None and isinstance(val, numbers.Number) and _np.isinf(val): + if val < 0: # neg inf + # most negative number in fp32 + val = -3.4e+38 + else: # positive inf + val = 3.4e+38 + if val is None: + return None + else: + return mb.const(val=val, name=name) + + +@register_torch_op +def affine_grid_generator(context, node): + # rdar://73165386 (Improve error handling of coremltools "affine" op PyTorch conversion.) + + affine_op_name = node.name + theta, size, align_corners = _get_inputs(context, node, expected=3) + + # note: only add consts here as PyTorch uses affine_grid + grid_sampler together + is_theta_const = theta.val is not None + if is_theta_const: + context.add(mb.const(val=theta.val, name="{}_theta".format(affine_op_name))) + else: # theta is dynamic input, keep track of it's name + context.add(mb.const(val=theta.name, name="{}_theta".format(affine_op_name))) + + context.add(mb.const(val=size.val, name="{}_size".format(affine_op_name))) + context.add(mb.const(val=align_corners.val, name="{}_align_corners".format(affine_op_name))) + + +@register_torch_op +def grid_sampler(context, node): + affine_op_name = node.inputs[1] + # https://github.com/pytorch/pytorch/blob/00d432a1ed179eff52a9d86a0630f623bf20a37a/aten/src/ATen/native/GridSampler.h#L10-L11 + m_mode = {0: "bilinear", 1: "nearest"} + m_padding_mode = {0: "constant", 1: "border", 2: "reflection"} + + # add `resample` if grid/coordinates is in input, otherwise, + # add `affine` to generate grid from `affine_grid_generator`. + if affine_op_name in context: # add `resample` op + inputs = _get_inputs(context, node, expected=5) + sampling_mode = m_mode[inputs[2].val] + padding_mode = m_padding_mode[inputs[3].val] + align_corners = inputs[4].val + + # When align_corners=False, padding_mode is corresponding to Core ML's symmetric + if padding_mode == "reflection" and align_corners is False: + padding_mode = "symmetric" + + x = mb.resample( + x=inputs[0], + coordinates=inputs[1], + sampling_mode=sampling_mode, + padding_mode=padding_mode, + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=align_corners, + name=node.name, + ) + context.add(x) + else: # add `affine` op instead + x = context[node.inputs[0]] + # inputs from `affine_grid_generator` + affine_theta = context["{}_theta".format(affine_op_name)] + affine_size = context["{}_size".format(affine_op_name)] + affine_align_corners = context["{}_align_corners".format(affine_op_name)] + + # affine_theta.val is either name string (dynamic input) or np.ndarray (static values) + # see `affine_grid_generator` for details. + is_theta_const = not isinstance(affine_theta.val, str) + if is_theta_const: + transform_matrix = _np.reshape(affine_theta.val, (affine_theta.shape[0], 6)) + else: # theta is dynamic input, add `reshape` op to PyMIL + transform_matrix = mb.reshape( + x=context[affine_theta.val], + shape=(-1, 6), + name=node.name + "_theta_reshape", + ) + + # inputs from `grid_sampler` + sampling_mode = m_mode[context[node.inputs[2]].val] + padding_mode = m_padding_mode[context[node.inputs[3]].val] + align_corners = context[node.inputs[4]].val + + if sampling_mode != "bilinear": + raise NotImplementedError("'sampling_mode' not supported.") + + if padding_mode != "constant": + raise NotImplementedError("'padding_mode' not supported.") + + if affine_align_corners.val != align_corners: + raise ValueError( + "Op 'affine_grid_generator' and 'grid_sampler' must agree on 'align_corners'." + ) + + x = mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=affine_size.val[2], + output_width=affine_size.val[3], + sampling_mode=sampling_mode, + padding_mode=padding_mode, + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=align_corners, + name=node.name, + ) + context.add(x) + + +@register_torch_op +def silu(context, node): + inputs = _get_inputs(context, node, expected=1) + x = mb.silu(x=inputs[0], name=node.name) + context.add(x) + + +@register_torch_op +def constant(context, node): + assert len(node.inputs) == 0 + assert len(node.outputs) == 1 + + name = node.name + val = node.attr["value"] + + const = _construct_constant(val, name) + context.add(const, torch_name=name) + + +@register_torch_op +def cosine_similarity(context, node): + inputs = _get_inputs(context, node, expected=4) + dim = inputs[-2].val + eps = inputs[-1].val + xy = mb.mul(x=inputs[0], y=inputs[1]) + sum_xy = mb.reduce_sum(x=xy, axes=[dim]) + + xx = mb.mul(x=inputs[0], y=inputs[0]) + sum_xx = mb.reduce_sum(x=xx, axes=[dim]) + yy = mb.mul(x=inputs[1], y=inputs[1]) + sum_yy = mb.reduce_sum(x=yy, axes=[dim]) + + mul_sum_xy = mb.mul(x=sum_xx, y=sum_yy) + div_12 = mb.maximum(x=mul_sum_xy, y=eps * eps) + div_sqrt = mb.sqrt(x=div_12) + + cs = mb.real_div(x=sum_xy, y=div_sqrt, name=node.name) + context.add(cs) + + +@register_torch_op +def selu(context, node): + ALPHA = 1.6732632423543772 + SCALE = 1.0507009873554805 + + x = _get_inputs(context, node, expected=1)[0] + x = mb.elu(x=x, alpha=ALPHA) + x = mb.mul(x=x, y=SCALE, name=node.name) + context.add(x) + + +@register_torch_op +def dot(context, node): + inputs = _get_inputs(context, node, expected=2) + xy = mb.mul(x=inputs[0], y=inputs[1]) + sum_xy = mb.reduce_sum(x=xy, axes=[0]) + context.add(sum_xy, node.name) + + +@register_torch_op +def mv(context, node): + inputs = _get_inputs(context, node, expected=2) + expand = mb.expand_dims(x=inputs[1], axes=[-1], name=node.name + "_expanded") + mv = mb.matmul(x=inputs[0], y=expand, name=node.name + "_mv") + res = mb.squeeze(x=mv, axes=[-1], name=node.name) + context.add(res) + + +@register_torch_op +def outer(context, node): + inputs = _get_inputs(context, node, expected=2) + x = mb.reshape(x=inputs[0], shape=[-1, 1]) + y = mb.reshape(x=inputs[1], shape=[1, -1]) + res = mb.matmul(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def cross(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + y = inputs[1] + dim = inputs[2] + + x1 = mb.gather(x=x, indices=[1, 2, 0], axis=dim, name="x1") + x2 = mb.gather(x=x, indices=[2, 0, 1], axis=dim, name="x2") + y1 = mb.gather(x=y, indices=[1, 2, 0], axis=dim, name="y1") + y2 = mb.gather(x=y, indices=[2, 0, 1], axis=dim, name="y2") + m1 = mb.mul(x=x1, y=y2) + m2 = mb.mul(x=x2, y=y1) + z = mb.sub(x=m1, y=m2, name=node.name) + context.add(z) + + +@register_torch_op +def frobenius_norm(context, node): + x, dim, keep_dims = _get_inputs(context, node, expected=3) + result = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=node.name) + context.add(result) + + +@register_torch_op +def norm(context, node): + x, num, dim, keep_dims = _get_inputs(context, node, expected=4) + assert x is not None and keep_dims is not None and num is not None and dim is not None + temp = _vector_norm(x=x, order=num, dim=dim, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +def _vector_norm(x, order, dim, keep_dims, name): + if order.val == 0: + # sum(x!=0) + x = mb.cast(x=x, dtype="fp32") + temp = mb.not_equal(x=x, y=0.) + temp = mb.cast(x=temp, dtype='int32') + temp = mb.reduce_sum(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val > VALUE_CLOSE_TO_INFINITY: + # max(abs(x)) + temp = mb.abs(x=x) + temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val < -VALUE_CLOSE_TO_INFINITY: + # min(abs(x)) + temp = mb.abs(x=x) + temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name) + else: + # sum(abs(x)^{order})^{(1 / order)} + temp = mb.abs(x=x) + x, y = promote_input_dtypes([temp, order.val]) + temp = mb.pow(x=x, y=y) + temp = mb.reduce_sum(x=temp, axes=dim, keep_dims=keep_dims) + temp = mb.pow(x=temp, y=1.0 / order.val, name=name) + return temp + +@register_torch_op +def _weight_norm(context, node): + v, g, dim = _get_inputs(context, node, expected=3) + + # Determine axes for L2 norm + if dim.val == -1: + axes = None + else: + axes = list(range(v.rank)) + dim = dim.val + if dim >= 0: + axes.remove(dim) + else: + axes.remove(v.rank + dim) + + # Calculate L2 norm of v + temp = mb.pow(x=v, y=2.) + temp = mb.reduce_sum(x=temp, axes=axes, keep_dims=True) + norm = mb.pow(x=temp, y=1./2) + + inverse_norm = mb.inverse(x=norm) + direction = mb.mul(x=v, y=inverse_norm) + result = mb.mul(x=g, y=direction, name=node.name) + context.add(result) + + + +def _matrix_norm(x, order, dim, keep_dims, name): + if order.val == 1: + # min(sum(abs(x), dim=0)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[0]], keep_dims=True) + temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val == -1: + # min(sum(abs(x), dim=0)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[0]], keep_dims=True) + temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val == "fro": + # sum(x**2)**1/2 + temp = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=name) + elif order.val > VALUE_CLOSE_TO_INFINITY: + # max(sum(abs(x), dim=1)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[1]], keep_dims=True) + temp = mb.reduce_max(x=temp, axes=dim, keep_dims=keep_dims, name=name) + elif order.val < -VALUE_CLOSE_TO_INFINITY: + # min(sum(abs(x), dim=1)) + temp = mb.abs(x=x) + temp = mb.reduce_sum(x=temp, axes=[dim[1]], keep_dims=True) + temp = mb.reduce_min(x=temp, axes=dim, keep_dims=keep_dims, name=name) + else: + raise RuntimeError("Matrix norm is not defined for the current inputs") + return temp + + +@register_torch_op +def linalg_vector_norm(context, node): + x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5) + assert x is not None and keep_dims is not None and order is not None + temp = _vector_norm(x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +@register_torch_op +def linalg_matrix_norm(context, node): + x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5) + assert x is not None and keep_dims is not None and order is not None and dim is not None + assert len(dim.val) == 2 + temp = _matrix_norm(x=x, order=order, dim=dim.val, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +@register_torch_op +def linalg_norm(context, node): + x, order, dim, keep_dims, _ = _get_inputs(context, node, expected=5) + assert x is not None and keep_dims is not None + if dim is None: + dim = _np.arange(x.rank) + else: + dim = dim.val + if order is None: + temp = mb.reduce_l2_norm(x=x, axes=dim, keep_dims=keep_dims, name=node.name) + elif len(dim) == 2: + temp = _matrix_norm( + x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name + ) + else: + temp = _vector_norm(x=x, order=order, dim=dim, keep_dims=keep_dims, name=node.name) + context.add(temp) + + +@register_torch_op +def hardswish(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + + w = mb.thresholded_relu(x=x, alpha=-3.0) + y = mb.sigmoid_hard( + x=w, alpha=1.0 / 6, beta=0.5 + ) # ``y = min(max(alpha * x + beta, -1), 1) + result = mb.mul(x=w, y=y, name=node.name) + + context.add(result) + + +@register_torch_op +def reshape_as(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + ref = inputs[1] + shape = mb.shape(x=ref) + result = mb.reshape(x=x, shape=shape, name=node.name) + context.add(result) + + +def _array_construct(context, node, array_type): + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node) + scalar_inputs = [ + inp + for inp in inputs + if isinstance(inp, Var) and inp.can_be_folded_to_const() and len(inp.shape) == 0 + ] + + if len(scalar_inputs) == len(inputs): + # All the list items are compile-time scalar constants, so let's create + # a new const that concatenates them. + val = array_type([inp.val for inp in inputs]) + const = mb.const(val=val, name=node.name) + context.add(const) + else: + # If at least one input to the construct op is non-const, collect + # the inputs and add them directly to the context. Ops that use this + # node's output will take the list directly as input. + context.add(array_type(inputs), node.name) + + +@register_torch_op +def tupleconstruct(context, node): + _array_construct(context, node, array_type=tuple) + + +@register_torch_op +def listconstruct(context, node): + _array_construct(context, node, array_type=list) + + +@register_torch_op +def eq(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + y = inputs[1] + if is_bool(x.dtype): + x = mb.cast(x=x, dtype='int32') + if is_bool(y.dtype): + y = mb.cast(x=y, dtype='int32') + x, y = promote_input_dtypes([x, y]) + equal_to = mb.equal(x=x, y=y, name=node.name) + context.add(equal_to) + + +@register_torch_op +def ne(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + y = inputs[1] + if is_bool(x.dtype): + x = mb.cast(x=x, dtype='int32') + if is_bool(y.dtype): + y = mb.cast(x=y, dtype='int32') + x, y = promote_input_dtypes([x, y]) + equal_to = mb.not_equal(x=x, y=y, name=node.name) + context.add(equal_to) + + +@register_torch_op +def le(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + less_equal = mb.less_equal(x=x, y=y, name=node.name) + context.add(less_equal) + + +@register_torch_op +def lt(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + less = mb.less(x=x, y=y, name=node.name) + context.add(less) + + +@register_torch_op +def ge(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + greater_equal = mb.greater_equal(x=x, y=y, name=node.name) + context.add(greater_equal) + + +@register_torch_op +def gt(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs[:2]) + greater = mb.greater(x=x, y=y, name=node.name) + context.add(greater) + + +@register_torch_op(torch_alias=["t"]) +def transpose(context, node): + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node) + x = inputs[0] + + if len(node.inputs) == 1: + # PyTorch has several transpose ops that can be emitted. This one is only + # emitted when .t() is called on a tensor, which means it can only be + # called on a matrix. + if len(x.shape) > 2: + raise ValueError("transpose without dims for rank > 2 is unsupported") + res = mb.transpose(x=x, perm=[1, 0], name=node.name) + else: + assert len(inputs) == 3 + ax0 = inputs[1].val + ax1 = inputs[2].val + + perm = list(range(len(x.shape))) + perm[ax0] = ax1 + perm[ax1] = ax0 + + res = mb.transpose(x=x, perm=perm, name=node.name) + context.add(res) + + +@register_torch_op +def permute(context, node): + inputs = _get_inputs(context, node, expected=2) + perm = mb.transpose(x=inputs[0], perm=inputs[1], name=node.name) + context.add(perm) + + +@register_torch_op +def frac(context, node): + # Frac(x) = x - floor(abs(x)) * sign(x) + + x = _get_inputs(context, node, expected=1)[0] + floor_abs = mb.floor(x=mb.abs(x=x)) + sign_abs_floor = mb.mul(x=floor_abs, y=mb.sign(x=x)) + res = mb.sub(x=x, y=sign_abs_floor) + context.add(res, torch_name=node.name) + + +@register_torch_op +def pixel_shuffle(context, node): + inputs = _get_inputs(context, node, expected=2) + perm = mb.pixel_shuffle(x=inputs[0], upscale_factor=inputs[1], name=node.name) + context.add(perm) + + +@register_torch_op +def pixel_unshuffle(context, node): + inputs = _get_inputs(context, node, expected=2) + downscale_factor = _np.uint32(inputs[1].val) + perm = mb.pixel_unshuffle(x=inputs[0], downscale_factor=downscale_factor, name=node.name) + context.add(perm) + + +@register_torch_op(torch_alias=["bmm"]) +def matmul(context, node): + inputs = _get_inputs(context, node, expected=2) + if inputs[1].val is not None and \ + len(inputs[1].shape) == 2 and len(inputs[0].shape) <= 3: + res = mb.linear(x=inputs[0], weight=_np.transpose(inputs[1].val), name=node.name) + else: + res = mb.matmul(x=inputs[0], y=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def add(context, node): + add_inputs = _get_inputs(context, node) + assert len(node.outputs) == 1 + + # TODO (sberardi): 3rd param to aten::add is a scale factor, need to handle that. + # out=input+alpha x other + # rdar://60175736 + if len(add_inputs) > 2 and add_inputs[2].val != 1: + raise ValueError("ADD does not support scale factor param") + x, y = promote_input_dtypes(add_inputs[:2]) + add_node = mb.add(x=x, y=y, name=node.name) + context.add(add_node) + + +@register_torch_op +def cumsum(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + if is_bool(x.dtype): + x = mb.cast(x=x, dtype='int32') + res = mb.cumsum(x=x, axis=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def addmm(context, node): + # addmm(Tensor input, Tensor mat1, Tensor mat2, Scalar beta=1, Scalar alpha=1) + # output = beta * input + alpha * mat1 * mat2 + + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=5) + bias = inputs[0] + mat1 = inputs[1] + mat2 = inputs[2] + beta = inputs[3] + alpha = inputs[4] + + if beta.val != 1.0: + # Apply scaling factor beta to the bias. + bias = mb.mul(x=beta, y=bias, name=bias.name + "_scaled") + context.add(bias) + + if alpha.val != 1.0: + # Apply scaling factor alpha to the input. + mat1 = mb.mul(x=alpha, y=mat1, name=mat1.name + "_scaled") + context.add(mat1) + + # MIL linear will transpose mat2, but addmm expects that mat1 and mat2 + # can multiply as is. So we add a tranpose. + mat2 = mb.transpose(x=mat2, perm=[1, 0], name=mat2.name + "_transposed") + context.add(mat2) + + addmm_node = mb.linear(x=mat1, weight=mat2, bias=bias, name=node.name) + context.add(addmm_node) + + +@register_torch_op +def linear(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + x = inputs[0] + W = inputs[1] + bias = inputs[2] if len(node.inputs) == 3 else None + res = mb.linear(x=x, weight=W, bias=bias, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["conv2d"]) +def _convolution(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + # PyTorch and MIL has same weight layout + # Conv: [Cout, Cin, *D] + # ConvTranspose: [Cin, Cout, *D] + weight = inputs[1] + bias = inputs[2] + strides = inputs[3] + + x, weight = promote_input_dtypes([x, weight]) + + # Expand padding. Torch accepts either an int (for all dimensions) or an n-tuple of ints (one per dimension), but + # we require a (2 * n)-tuple, where n is the number of spatial dimensions, start and end for each spatial dimension + pad = inputs[4].val + + if len(weight.shape) in (3, 4): + # 1D and 2D: Need to explicitly state L-R, T-B pad + pad = _np.repeat(pad, 2) + elif len(weight.shape) == 5: + # 3D: Need to explicitly state F-Bk, L-R, T-B pad + if type(pad) == int: + pad = _np.repeat(pad, 6) + elif len(pad) == 3: + pad = _np.repeat(pad, 2) + else: + raise ValueError( + "Invalid weight dimension. Must be 3, 4, or 5 for 1D, 2D, or 3D convolution, respectively." + ) + + dilations = inputs[5] + out_pad = None + if len(inputs) >= 12: + transposed = inputs[6].val + out_pad = inputs[7].val + group = inputs[8] + elif len(inputs) == 7: + transposed = False + group = inputs[6] + else: + raise ValueError( + "unexpected number of inputs for node {} ({}): {}".format( + node.name, node.kind, len(inputs) + ) + ) + + kwargs = { + "x": x, + "weight": weight, + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + "groups": group, + "name": node.name, + } + # Bias is optional in PyTorch's convolution. + if bias is not None: + kwargs["bias"] = bias + + if transposed is True: + # Transposed convolution + # Handle output_padding using pre-pad or post-crop + pre_pad = [0] * len(pad) + post_crop = [0] * len(pad) + + if out_pad is not None and any(out_pad): + output_padding = [0] * len(pad) + # output padding adds additional padding on one of the side of dimension + # i.e. bottom from top-bottom, + # right from left-right + # back from front-back + # Core ML padding structure is similar [top, bottom, left, right] + # mapping output_padding to simplify further processing! + # + # For ConvTranspose2d: [bottom, right] -> [0, b, 0, r] + output_padding = [ + 0 if i % 2 == 0 else out_pad[i // 2] for i in range(len(pad)) + ] + if sum(pad) == 0 and any(output_padding): + raise ValueError( + "ConvTranspose configuration of padding=0 and output_padding > 0 not supported!" + ) + post_crop = pad.copy() + pad *= 0 + for i in range(0, len(pad)): + if post_crop[i] >= output_padding[i]: + post_crop[i] -= output_padding[i] + else: + pre_pad[i] = output_padding[i] - post_crop[i] + kwargs["pad"] = pre_pad + if any(pre_pad): + # Constant pad requires pad to be of length 2*input_rank + pre_pad = [0] * 2 * (len(x.shape) - 2) + pre_pad + x = mb.pad(x=x, pad=pre_pad) + kwargs["x"] = x + if any(post_crop): + del kwargs["name"] + + conv = mb.conv_transpose(**kwargs) + if any(post_crop): + # TODO: rdar://65575826 (PyTorch converter: output_padding mapping to slice + # instead of crop layer for 1 and 3D ConvTranspose) + if len(post_crop) == 2 and conv.rank == 3: + # Number of elements to crop from right = post_crop[-1]. + # Since slicing supports negative indexing, end_id = -1 * post_crop[-1] + conv = mb.slice_by_index( + x=conv, + begin=[0, 0, post_crop[0]], + end=[0, 0, -1 * post_crop[-1]], + begin_mask=[True, True, False], + end_mask=[True, True, False], + name=node.name, + ) + elif len(post_crop) == 4 and conv.rank == 4: + conv = mb.crop( + x=conv, + crop_height=post_crop[:2], + crop_width=post_crop[2:4], + name=node.name, + ) + else: + raise ValueError( + "output_padding is supported only for ConvTranspose1D or ConvTranspose2D!" + ) + else: + # Normal convolution + conv = mb.conv(**kwargs) + context.add(conv) + + +# Convolution with "same, valid" padding +@register_torch_op +def _convolution_mode(context, node): + inputs = _get_inputs(context, node, expected=7) + mode = inputs[4].val + + context.add( + mb.conv( + x=inputs[0], + weight=inputs[1], + bias=inputs[2], + strides=inputs[3], + pad_type=mode, + dilations=inputs[5], + groups=inputs[6], + name=node.name, + ) + ) + + +@register_torch_op +def softmax(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + axis = inputs[1] + res = mb.softmax(x=x, axis=axis, name=node.name) + context.add(res) + + +@register_torch_op +def flatten(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + dims = list(x.shape) + start_val = inputs[1].val + end_val = inputs[2].val + + start = len(dims) + start_val if start_val < 0 else start_val + end = len(dims) + end_val if end_val < 0 else end_val + + if start > len(dims) or end > len(dims) or start < 0 or end < 0: + raise ValueError( + "Invalid start and end. (start, end) == ({}, {})".format(start, end_val) + ) + if start > end: + raise ValueError( + "Start must be before end. (start, end) == ({}, {})".format(start, end_val) + ) + x_shape = mb.shape(x=x) + + shape1 = mb.slice_by_index(x=x_shape, begin=[0], end=[start]) + shape2 = mb.slice_by_index(x=x_shape, begin=[end + 1], end=[len(dims)]) + + flatten_dim = -1 + if not any_symbolic(x.shape): + flatten_dim = 1 + for dim in dims[start: end + 1]: + flatten_dim *= dim + + shape = mb.concat(values=(shape1, [flatten_dim], shape2), axis=0) + shape = mb.cast(x=shape, dtype="int32") + reshape = mb.reshape(x=x, shape=shape, name=node.name) + context.add(reshape) + + +@register_torch_op +def _reshape_from_tensor(context, node): + inputs = _get_inputs(context, node, expected=2) + + reshape = mb.reshape(x=inputs[0], shape=inputs[1], name=node.name) + context.add(reshape) + + +@register_torch_op +def softsign(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.softsign(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def relu(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.relu(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def prelu(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + alpha = inputs[1] + # In the MIL backend, it assumes that the inputs of prelu should have + # at least rank 3, i.e. [batch, channel, spatial_dims*]. + if x.rank >= 2: + alpha = alpha.val + alpha = _np.ones((x.shape[1],)) * alpha + + if x.rank <= 2: + axes = [1, 2] if x.rank == 1 else [2] + x = mb.expand_dims(x=x, axes=axes) + x = mb.prelu(x=x, alpha=alpha) + res = mb.squeeze(x=x, axes=axes, name=node.name) + else: + res = mb.prelu(x=x, alpha=alpha, name=node.name) + + context.add(res) + + +@register_torch_op +def linspace(context, node): + inputs = _get_inputs(context, node, min_expected=3) + + start = inputs[0] + end = inputs[1] + nums = inputs[2] + start = mb.cast(x=start, dtype="fp32") + end = mb.cast(x=end, dtype="fp32") + + if start.can_be_folded_to_const() and end.can_be_folded_to_const() and nums.can_be_folded_to_const(): + start_val = start.val + end_val = end.val + nums_val = nums.val + if nums_val < MAX_SIZE_CONSTANT_FOLDING: + res = mb.const(val=_np.linspace(start_val, end_val, nums_val), name=node.name) + context.add(res) + return + + if nums.val is None: + msg = "Dynamic steps input for torch.linspace is not supported. Please use torch.arange instead" + raise NotImplementedError(msg) + else: + if nums.val == 1: + res = mb.expand_dims(x=start, axes=[0], name=node.name) + else: + # step = (end - start) / (nums - 1) + x = mb.sub(x=end, y=start) + y = mb.sub(x=nums, y=1) + x = mb.cast(x=x, dtype="fp32") + y = mb.cast(x=y, dtype="fp32") + step = mb.real_div(x=x, y=y) + + # Note that the range_1d op excluded the end point, + # so we have to add the end back to the resulting array. + arange = mb.range_1d(end=end, start=start, step=step) + new_end = mb.expand_dims(x=end, axes=[0]) + res = mb.concat(values=[arange, new_end], axis=0, name=node.name) + context.add(res) + + +@register_torch_op +def relu6(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.relu6(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def einsum(context, node): + a = context[node.inputs[1]][0] + b = context[node.inputs[1]][1] + equation = context[node.inputs[0]].val + x = build_einsum_mil(a, b, equation, node.name) + context.add(x) + + +@register_torch_op +def eye(context, node): + # TODO: rdar://104400568 ([PyTorch] Use MIL ops to construct the eye matrix in order to avoid directly folding the input into a const) + inputs = _get_inputs(context, node, expected=[5, 6]) + if len(inputs) == 5: + eye = _np.eye(inputs[0].val) + if len(inputs) == 6: + eye = _np.eye(inputs[0].val, inputs[1].val) + eye = mb.const(val=eye, name=node.name) + context.add(eye) + + +@register_torch_op +def elu(context, node): + ## Torch port to ATen adds scale and input_scale which is set to 1 + inputs = _get_inputs(context, node, expected=4) + + res = mb.elu(x=inputs[0], alpha=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def leaky_relu(context, node): + inputs = _get_inputs(context, node, expected=2) + + res = mb.leaky_relu(x=inputs[0], alpha=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def rrelu(context, node): + inputs = _get_inputs(context, node, expected=5) + + # Alpha in evaluation mode is just the average between upper and lower. + lower_alpha = inputs[1] + upper_alpha = inputs[2] + alpha = (lower_alpha.val + upper_alpha.val) / 2 + + res = mb.leaky_relu(x=inputs[0], alpha=alpha, name=node.name) + context.add(res) + + +@register_torch_op +def softplus(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + beta_ = inputs[1].val + C = x.shape[1] + alpha_br = _np.repeat(1.0 / beta_, C).astype('float32') + beta_br = _np.repeat(beta_, C).astype('float32') + + res = mb.softplus_parametric(x=x, alpha=alpha_br, beta=beta_br, name=node.name) + context.add(res) + + +@register_torch_op +def mish(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + + softplus = mb.softplus(x=x) + tanh = mb.tanh(x=softplus) + res = mb.mul(x=x, y=tanh, name=node.name) + context.add(res) + + +def _adjust_pad_for_ceil_mode(input_shape, kernel_size, stride_sizes, pad_sizes): + """ Given an input tensor and pooling parameters, add the extra input + padding needed to replicate ceil_mode. + MIL 3D pooling does not support ceil_mode natively, but we can + workaround by padding the input appropriately. + + PyTorch output size formula for pooling: + (reference: https://github.com/pytorch/pytorch/blob/375c30a7177442fb9d6de7516a9ae4031ae324c4/aten/src/ATen/native/Pool.h#L28) + + When ceil mode is True: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size + (stride-1)) / stride) + 1 + if (out_dim-1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0): + out_dim = out_dim - 1 + When ceil mode is False: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size) / stride) + 1 + + + # follow the approach here to calculate padding: + # https://github.com/pytorch/pytorch/blob/edf751ca2fededecdd9366874c761431c0f61f01/aten/src/ATen/native/mkldnn/Pooling.cpp#L121 + # which keeps increasing the pad_r value until the output size without the ceil mode matches that of the ceil mode + """ + + def _calculate_pool_output_size(in_dim, kernel, stride, pad_l, pad_r, ceil_mode): + if ceil_mode: + out_dim = _math.floor((in_dim + pad_r + pad_l - kernel + stride - 1) / stride) + 1 + if (out_dim - 1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0): + out_dim = out_dim - 1 + else: + out_dim = _math.floor((in_dim + pad_r + pad_l - kernel) / stride) + 1 + return out_dim + + new_pad = pad_sizes.copy() + for idx in range(len(input_shape)): + if is_symbolic(input_shape[idx]): + logger.warning( + "pooling padding adjusted to support ceil_mode=True, for symbolic dimension." + "Output shape of the pool op maybe be wrong for certain input shapes." + ) + new_pad[2 * idx + 1] += stride_sizes[idx] - 1 + else: + out_dim_with_ceil_mode = _calculate_pool_output_size( + input_shape[idx], + kernel_size[idx], + stride_sizes[idx], + pad_sizes[2 * idx], + pad_sizes[2 * idx + 1], + True, + ) + is_equal = False + while not is_equal: + out_dim_without_ceil_mode = _calculate_pool_output_size( + input_shape[idx], + kernel_size[idx], + stride_sizes[idx], + new_pad[2 * idx], + new_pad[2 * idx + 1], + False, + ) + is_equal = True + if out_dim_without_ceil_mode < out_dim_with_ceil_mode: + new_pad[2 * idx + 1] += 1 + is_equal = False + + return new_pad + + +def _max_pool(context, node, inputs): + x = inputs[0] + kernel_sizes = inputs[1] + strides = inputs[2] + if strides.op.op_type == "const" and (not list(strides.val)): + strides = mb.const(val=kernel_sizes.val, name=strides.name) + + pad_type = "custom" + # Need to explicitly state L-R, T-B pad + pad = inputs[3] + pad = _np.repeat(pad.val, 2) + dilation = inputs[4].val + ceil_mode = inputs[5].val + if _np.any(dilation > 1): + # See: rdar://60633736 (Implement dilation for mil op max_pool) + raise ValueError("@max_pool does not support dilation > 1") + spatial_rank = len(pad) // 2 + if spatial_rank > 2 and ceil_mode is True and list(strides.val) != [1] * len(strides.val): + # since MIL does not support ceil_mode for 3D pool, + # need to adjust padding values if ceil_mode is True + # ceil_mode only causes any difference though, if the strides are not 1 + x_spatial_dimensions = x.shape[-spatial_rank:] + pad = _adjust_pad_for_ceil_mode(x_spatial_dimensions, kernel_sizes.val, strides.val, pad) + + pool = mb.max_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + pad=pad, + name=node.name, + ceil_mode=ceil_mode if spatial_rank <= 2 else False, + ) + context.add(pool) + + +@register_torch_op +def max_pool1d(context, node): + inputs = _get_inputs(context, node, expected=6) + _max_pool(context, node, inputs) + + +@register_torch_op +def max_pool2d(context, node): + inputs = _get_inputs(context, node, expected=6) + _max_pool(context, node, inputs) + + +@register_torch_op +def max_pool3d(context, node): + inputs = _get_inputs(context, node, expected=6) + _max_pool(context, node, inputs) + + +@register_torch_op +def minimum(context, node): + inputs = _get_inputs(context, node, expected=2) + assert len(node.outputs) == 1 + x = context[node.inputs[0]] + y = context[node.inputs[1]] + out = mb.minimum(x=x, y=y, name=node.name) + context.add(out) + + +@register_torch_op +def clamp_min(context, node): + x = _get_inputs(context, node, expected=2) + x = mb.clip(x=x[0], alpha=x[1], beta=_np.inf, name=node.name) + context.add(x) + + +@register_torch_op +def maximum(context, node): + inputs = _get_inputs(context, node, expected=2) + assert len(node.outputs) == 1 + x = context[node.inputs[0]] + y = context[node.inputs[1]] + out = mb.maximum(x=x, y=y, name=node.name) + context.add(out) + + +@register_torch_op +def div(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + + if len(inputs) > 2 and inputs[2] is not None: + rounding_mode = inputs[2].val + if rounding_mode == "floor": + # round towards negative infinity + # e.g.: + # values before floor: [2.6, -3.4, -3.6] + # values after floor: [2, -4, -4] + res = mb.floor_div(x=inputs[0], y=inputs[1], name=node.name) + elif rounding_mode == "trunc": + # round towards 0 + # e.g.: + # values before trunc: [2.6, -3.4, -3.6] + # values after trunc: [2, -3, -3] + x = mb.cast(x=inputs[0], dtype="fp32") + y = mb.cast(x=inputs[1], dtype="fp32") + z = mb.real_div(x=x, y=y) + s = mb.sign(x=z) + all_positive = mb.mul(x=z, y=s) + all_positive_floor = mb.floor(x=all_positive) + res = mb.mul(x=all_positive_floor, y=s, name=node.name) + else: + raise NotImplementedError( + 'rounding mode "{}" not supported in the "div" op'.format(rounding_mode) + ) + else: + x = mb.cast(x=inputs[0], dtype="fp32") + y = mb.cast(x=inputs[1], dtype="fp32") + res = mb.real_div(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["floordiv"]) +def floor_divide(context, node): + inputs = _get_inputs(context, node, expected=2) + inputs = promote_input_dtypes(inputs) + div_res = mb.floor_div(x=inputs[0], y=inputs[1]) + # Pytorch's floor_divide always returns fp32, even if the inputs are int + res = mb.cast(x=div_res, dtype='fp32', name=node.name) + context.add(res) + + +@register_torch_op +def true_divide(context, node): + inputs = _get_inputs(context, node, expected=2) + res = mb.real_div(x=inputs[0], y=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def mul(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + res = mb.mul(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def pow(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = promote_input_dtypes(inputs) + res = mb.pow(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["rsub"]) +def sub(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + assert len(node.outputs) == 1 + + if node.kind == "rsub": + # rsub reverses the order of arguments + y = inputs[0] + x = inputs[1] + else: + x = inputs[0] + y = inputs[1] + + if len(inputs) > 2: + alpha = inputs[2].val + + # TODO (sberardi): 3rd param to aten::sub is a scale factor, need to handle that. + # out=input-alpha x other + # rdar://60175736 + if alpha != 1: + raise ValueError("SUB does not support scale factor param") + + x, y = promote_input_dtypes([x, y]) + res = mb.sub(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op( + torch_alias=[ + "sum", + "logsumexp", + ]) +def mean(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + if types.is_bool(x.dtype): + # TODO: In the future when MIL op supports bool, we need to use curr_opset_version to decide + # if we want to cast or not. + x = mb.cast(x=x, dtype="fp32") + kwargs = {"x": x, "name": node.name} + + # @axes is optional, so omit if None. + axes = inputs[1] + if axes is not None: + # @axes needs to be a list, but if only one axis was specified in the + # model, it will be constructed as an int. Construct a new constant as a + # list. + if not isinstance(axes.val, _np.ndarray): + axes = mb.const(val=[axes.val], name=axes.name + "_list") + context.add(axes) + kwargs["axes"] = axes + + # @keep_dims is optional. + if len(inputs) >= 3: + keep_dims = inputs[2] + kwargs["keep_dims"] = keep_dims + + # Last input to mean is an optional output tensor. We always expect this to + # be None or absent. + assert len(inputs) <= 3 or inputs[3] is None + if node.kind == "sum": + res = mb.reduce_sum(**kwargs) + elif node.kind == "logsumexp": + res = mb.reduce_log_sum_exp(**kwargs) + else: + res = mb.reduce_mean(**kwargs) + context.add(res) + + +@register_torch_op +def squeeze(context, node): + inputs = _get_inputs(context, node) + if len(inputs) == 1: + res = mb.squeeze(x=inputs[0], name=node.name) + elif len(inputs) == 2: + squeeze_dim = inputs[1].val + res = mb.squeeze(x=inputs[0], axes=(squeeze_dim,), name=node.name) + context.add(res) + + +@register_torch_op +def unsqueeze(context, node): + inputs = _get_inputs(context, node, expected=2) + unsqueeze = mb.expand_dims(x=inputs[0], axes=[inputs[1].val], name=node.name) + context.add(unsqueeze) + + +@register_torch_op +def size(context, node): + inputs = _get_inputs(context, node, expected=[1, 2]) + x = inputs[0] + + # Get the shape of the tensor. + if types.is_complex(x.dtype): + size_node = mb.complex_shape(x=inputs[0], name=node.name + "_shape") + else: + size_node = mb.shape(x=inputs[0], name=node.name + "_shape") + + # Get the size of the tensor along the input dimension. + if len(node.inputs) == 2: + dim = inputs[1].val + size_node = _list_select(size_node, dim) + context.add(size_node, node.name) + + +@register_torch_op +def _shape_as_tensor(context, node): + inputs = _get_inputs(context, node, expected=1) + + # Get the shape of the tensor. + shape_node = mb.shape(x=inputs[0], name=node.name) + context.add(shape_node, node.name) + + +@register_torch_op(torch_alias=["reshape"]) +def view(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + shape = inputs[1] + + if isinstance(shape, ListVar): + length = mb.list_length(ls=shape) + indices = mb.range_1d(start=0, end=length, step=1) + shape = mb.list_gather(ls=shape, indices=indices) + + if ( + isinstance(shape, list) + and all([isinstance(dim, Var) and len(dim.shape) == 0 for dim in shape]) + and any([dim.val is None for dim in shape]) + ): + shape = mb.concat(values=shape, axis=0) + + shape = mb.cast(x=shape, dtype="int32") + view = mb.reshape(x=x, shape=shape, name=node.name) + context.add(view) + + +@register_torch_op(torch_alias=['constant_pad_nd']) +def pad(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + + pad = inputs[1] + if pad.val is not None: + pad = pad.val.reshape((-1, 2))[::-1].reshape(-1).tolist() + missing_dims = x.rank - (len(pad) // 2) + pad = [0, 0] * missing_dims + pad + + if len(inputs) == 4: + mode = inputs[2].val + assert mode in ('constant', 'reflect', 'replicate') + val_index = 3 + else: + mode = 'constant' + val_index = 2 + + scalar_val = inputs[val_index] if inputs[val_index] else 0.0 + if inputs[val_index] and inputs[val_index].op.op_type == "const": + scalar_val = float(scalar_val.val) + + res = mb.pad(x=x, pad=pad, mode=mode, constant_val=scalar_val, name=node.name) + context.add(res) + + +@register_torch_op +def adaptive_avg_pool2d(context, node): + _adaptive_pool2d(context, node, mb.avg_pool, mb.reduce_mean) + + +@register_torch_op +def adaptive_max_pool2d(context, node): + _adaptive_pool2d(context, node, mb.max_pool, mb.reduce_max) + + +def _adaptive_pool2d_non_fixed_kernel_size_and_stride(x, output_shape, name, reduce_op): + ''' + If the input dimension is not evenly divisible by the output dimension, then the + stride and kernel size used by PyTorch is not fixed. This is true for both the + height and width dimension. + ''' + + def get_kernel_indexes_1d(in_dimension, out_dimension): + results = [] + for i in range(out_dimension): + start = _math.floor(i * in_dimension / out_dimension) + end = _math.ceil((i + 1) * in_dimension / out_dimension) + results.append((start, end)) + + return results + + pool_results = [] + + for s2, e2 in get_kernel_indexes_1d(x.shape[2], output_shape[0]): + for s3, e3 in get_kernel_indexes_1d(x.shape[3], output_shape[1]): + cur_kernel = mb.slice_by_index( + x=x, + begin=[0, 0, s2, s3], + end=[x.shape[0], x.shape[1], e2, e3], + ) + cur_result = reduce_op( + x=cur_kernel, + axes=[-2, -1], + keep_dims=True + ) + pool_results.append(cur_result) + + return mb.reshape( + x=mb.concat(values=pool_results, axis=-1), + shape=[x.shape[0], x.shape[1], output_shape[0], output_shape[1]], + name=name, + ) + + +def _adaptive_pool2d(context, node, pool_op, reduce_op): + # Get input tensor and output shape + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + output_shape = inputs[1].val + assert isinstance(output_shape, _np.ndarray) and len(output_shape) == 2 + output_shape = tuple(output_shape) + + if output_shape == (1, 1): + # Represent (1,1) output size with global reduce op + result = reduce_op(x=x, axes=[-2, -1], keep_dims=True, name=node.name) + elif x.shape is None or any_symbolic(x.shape): + raise ValueError( + "Adaptive pooling is only supported when input tensor size is known or output size == (1,1). " + "Received: input size == {}, output size == {}".format( + x.shape_str(), output_shape, + ) + ) + elif x.shape[-2] % output_shape[-2] == 0 and x.shape[-1] % output_shape[-1] == 0: + # Stride and and kernel size is fixed + strides = [ind // outd for ind, outd in zip(x.shape[-2:], output_shape)] + kernel_sizes = [ + ind - s * (outd - 1) + for ind, outd, s in zip(x.shape[-2:], output_shape, strides) + ] + result = pool_op( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type="valid", + name=node.name, + ) + else: + result = _adaptive_pool2d_non_fixed_kernel_size_and_stride( + x, output_shape, node.name, reduce_op + ) + + context.add(result) + + +@register_torch_op +def batch_norm(context, node): + inputs = _get_inputs(context, node, expected=9) + # inputs skipped: + # float momentum (6) + # bool cudnn_enabled (8) + input_rank = inputs[0].rank + if input_rank < 2 or input_rank > 5: + raise ValueError( + "BatchNorm: Encountered invalid input rank during translation in torch frontend." + ) + + _input = inputs[0] + weight = inputs[1] + bias = inputs[2] + running_mean = inputs[3] + running_var = inputs[4] + training = inputs[5].val + eps = inputs[7] + + # If training = True, the mean and variance of the current batch of data are used to normalize the input data. + # If training = False, data statistics running_mean and running_var are used instead. + # Note that, even in the evaluation mode (after calling model.eval()), the training parameter can still be true + # and it just refers to a different computation as mentioned above. + + # helper functions for different type of batch norm + def _add_batch_norm_dynamic(): + x = _input + + if training or (running_mean is None) or (running_var is None): + axes = [axis for axis in range(x.rank) if axis != 1] + mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + num = mb.sub(x=x, y=mean) + square = mb.mul(x=num, y=num) + variance = mb.reduce_mean(x=square, axes=axes, keep_dims=True) + shape = mb.shape(x=variance) + else: + shape = [1] * x.rank + shape[1] = -1 if any_symbolic(running_mean.shape) else running_mean.shape[0] + mean = mb.reshape(x=running_mean, shape=shape) + num = mb.sub(x=x, y=mean) + variance = mb.reshape(x=running_var, shape=shape) + + variance_add_epsilon = mb.add(x=variance, y=eps) + sqrt = mb.sqrt(x=variance_add_epsilon) + + name = node.name if weight is None and bias is None else node.name + "_div" + x = mb.real_div(x=num, y=sqrt, name=name) + + if weight is not None: + weight_reshape = mb.reshape(x=weight, shape=shape) + name = node.name if bias is None else node.name + "_mul" + x = mb.mul(x=x, y=weight_reshape, name=name) + + if bias is not None: + bias_reshape = mb.reshape(x=bias, shape=shape) + x = mb.add(x=x, y=bias_reshape, name=node.name) + + context.add(x) + + def _add_batch_norm_1d(): + # first expand the 3d tensor to 4d, and call the standard mb.batch_norm + x = mb.expand_dims(x=_input, axes=[-1], name=node.name + "_rank2_expansion") + bn = mb.batch_norm( + x=x, + mean=running_mean, + variance=running_var, + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name + "_batch_norm_1d", + ) + bn = mb.squeeze(x=bn, name=node.name, axes=[-1]) + context.add(bn) + + def _add_batch_norm(): + bn = mb.batch_norm( + x=_input, + mean=running_mean, + variance=running_var, + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name, + ) + context.add(bn) + + is_batch_norm_1d_rank_2 = input_rank == 2 + + if training or running_mean.val is None or running_var.val is None or weight is None or bias is None: + _add_batch_norm_dynamic() + elif is_batch_norm_1d_rank_2: + _add_batch_norm_1d() + else: + _add_batch_norm() + + +@register_torch_op +def instance_norm(context, node): + inputs = _get_inputs(context, node, expected=9) + x = inputs[0] + weight = inputs[1] + bias = inputs[2] + eps = inputs[7] + x = mb.instance_norm( + x=x, + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name, + ) + context.add(x) + + +@register_torch_op +def group_norm(context, node): + inputs = _get_inputs(context, node, expected=6) + x = inputs[0] + num_groups = inputs[1].val + weight = inputs[2] + bias = inputs[3] + eps = inputs[4] + n,c = x.shape[0],x.shape[1] # at minimum (N, C) required + input_shape = [*x.shape] # n, c, * + num_groups = builtins.min(num_groups,c) + new_shape = [n, num_groups, c//num_groups] + new_shape += [*x.shape[2:]] # adds remaining dims + num_extra_axes = len(x.shape[2:]) + axes_ = [int(i) for i in range(2, 2 + num_extra_axes + 1)] + weight_shape, bias_shape = [1,c], [1,c] + weight_shape += [1 for _ in range(num_extra_axes)] + bias_shape += [1 for _ in range(num_extra_axes)] + + x = mb.reshape(x=x, shape=new_shape) + mean = mb.reduce_mean(x=x, axes=axes_, keep_dims=True) + var = _std(x,axes_,True,False,eps.val) + x = mb.sub(x=x,y=mean) + x = mb.real_div(x=x,y=var) + x = mb.reshape(x=x, shape=input_shape) + if weight is not None: + weight = mb.reshape(x=weight, shape=weight_shape) + x = mb.mul(x=x,y=weight) + if bias is not None: + bias = mb.reshape(x=bias, shape=bias_shape) + x = mb.add(x=x,y=bias) + context.add(x,node.name) + + +@register_torch_op +def embedding(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + indices = inputs[1] + + padding_idx = -1 + scale_grad_by_freq = False + sparse = False + if len(inputs) >= 3: + padding_idx = inputs[2].val + if len(inputs) >= 4: + scale_grad_by_freq = inputs[3].val + if len(inputs) >= 5: + sparse = inputs[4].val + + if padding_idx != -1 or scale_grad_by_freq or sparse: + logger.warning( + "Core ML embedding (gather) layer does not support any " + "inputs besides the weights and indices. Those given " + "will be ignored." + ) + + indices = mb.cast(x=indices, dtype="int32") + + # Changing the axis from 0 is not an option in torch, so we don't expose it + gather = mb.gather(x=_input, indices=indices, name=node.name) + context.add(gather) + + +@register_torch_op +def hardtanh(context, node): + inputs = _get_inputs(context, node, expected=3) + _input = inputs[0] + min_val = inputs[1].val + max_val = inputs[2].val + + res = mb.clip(x=_input, alpha=min_val, beta=max_val, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=['concat']) +def cat(context, node): + inputs = _get_inputs(context, node) + axis = 0 if len(inputs) == 1 else inputs[1] + concat = mb.concat( + values=promote_input_dtypes(inputs[0]), axis=axis, name=node.name + ) + context.add(concat) + + +@register_torch_op +def stack(context, node): + inputs = _get_inputs(context, node) + + values = inputs[0] + + if len(inputs) < 2: + axis = 0 + else: + axis = inputs[1] + + if len(values) == 1: + res = mb.expand_dims(x=values[0], axes=[axis.val], name=node.name) + else: + res = mb.stack(values=values, axis=axis, name=node.name) + context.add(res) + + +@register_torch_op +def item(context, node): + inputs = _get_inputs(context, node, expected=1) + + if inputs[0].shape == (): + # MIL ops that reduce already output a scalar, so no need to do + # anything. + res = inputs[0] + elif _np.all([d == 1 for d in inputs[0].shape]): + # Item only makes sense when called on a length 1 tensor. We use + # reduce_max as a workaround for not having a way to extract a scalar + # from a symbolic tensor. + res = mb.reduce_max(x=inputs[0], name=node.name) + else: + raise ValueError("expected input to be a scalar or a length 1 tensor") + context.add(res, node.name) + + +def _cast(context, node, dtype, dtype_name): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + # Input must either be a scalar or a (1 x 1 x ... x 1) tensor + if not (len(x.shape) == 0 or _np.all([d == 1 for d in x.shape])): + raise ValueError("input to cast must be either a scalar or a length 1 tensor") + + if x.can_be_folded_to_const(): + # If x is a compile-time constant, directly cast it to @dtype if it's + # not one already. + if not isinstance(x.val, dtype): + res = mb.const(val=dtype(x.val), name=node.name) + else: + res = x + elif x.shape == (1,): + x = mb.squeeze(x=x, name=node.name + "_item") + res = mb.cast(x=x, dtype=dtype_name, name=node.name) + else: + if len(x.shape) > 0: + # TODO: There's no MIL op to extract a value from a symbolic tensor, + # so as a workaround we use reduce_max to convert it to a scalar. + x = mb.reduce_max(x=x, name=node.name + "_item") + res = mb.cast(x=x, dtype=dtype_name, name=node.name) + context.add(res, node.name) + + +@register_torch_op(torch_alias=["bool"]) +def _bool(context, node): + _cast(context, node, bool, "bool") + + +@register_torch_op(torch_alias=["int"]) +def _int(context, node): + _cast(context, node, int, "int32") + + +@register_torch_op +def layer_norm(context, node): + inputs = _get_inputs(context, node, expected=6) + _input = inputs[0] + normalized_shape = inputs[1] + weight = inputs[2] + bias = inputs[3] + eps = inputs[4] + # cudnn_enable = inputs[5] unused + + layer_norm = mb.layer_norm( + x=_input, + axes=list(range(-len(normalized_shape.val), 0)), + gamma=weight, + beta=bias, + epsilon=eps, + name=node.name, + ) + context.add(layer_norm) + + +@register_torch_op +def numtotensor(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + if x.shape != (): + raise ValueError( + "numtotensor expected scalar input, got tensor with shape {}".format( + x.shape + ) + ) + + if x.can_be_folded_to_const(): + res = mb.const(val=[x.val], name=node.name) + context.add(res) + else: + context.add(x, node.name) + + +def _ifzo_to_ifoz(weights, name): + """ + i, f, z, o -> i, f, o, z + where weights_split[0] == i, etc. + Used to transform lstm weights from pytorch + to Core ML format + """ + split_size = weights.shape[0] // 4 + weights_split = mb.split(x=weights, split_sizes=_np.array([split_size] * 4), axis=0) + return mb.concat( + values=[weights_split[0], weights_split[1], weights_split[3], weights_split[2]], + axis=0, + ) + + +def _pytorch_hidden_to_coreml_milops(x, name): + """ + Used to transform lstm state values (hn, cn) + from pytorch to Core ML format. + """ + split_size = x.shape[0] // 2 + x_split = mb.split(x=x, split_sizes=_np.array([split_size] * 2), axis=0) + x_concat = mb.concat( + values=[x_split[0], x_split[1]], + axis=2, + ) + # (4.) See docstring to @lstm + return mb.squeeze(x=x_concat, axes=_np.array([0]), name=name) + + +def _add_gru_layer(_input, h0, wi, wh, bi, bh, h_list_name, h_name): + """ + Add a single GRU layer. + Please note that the Core ML GRU has different definition from Torch, + so we cannot use mb.gru, and need to implement it with while loop. + To be more specific, in Core ML: + + o_t = activation(W_{io} x_t + r_t * W_{ho} h_(t−1) + b_{o}) + + while torch has + o_t = activation(W_{io} x_t + b_{io} + r_t * (W_{ho} h_(t−1) + b_{ho})) + + Inputs: + _input : (seq_len, batch_size, input_dim) + h0 : (1, batch_size, hidden_dim) + wi : (3*hidden_dim, input_dim) for the first layer, else (3*hidden_dim, hidden_dim) + wh : (3*hidden_dim, hidden_dim) + bi : (3*hidden_dim) + bh : (3*hidden_dim) + + Return: + h_list : the list contains all hidden states for each time step + with shape (seq_len, batch_size, hidden_dim) + h : the last hidden state, with shape (1, batch_size, hidden_dim + """ + + # split the weights and bias + w_ir, w_iz, w_in = _np.split(wi, 3) + w_hr, w_hz, w_hn = _np.split(wh, 3) + b_ir, b_iz, b_in = _np.split(bi, 3) + b_hr, b_hz, b_hn = _np.split(bh, 3) + + # allocate hlist + # hlist : (seq_len, batch_size, hidden_dim) + x_shape = mb.shape(x=_input) + seq_len = mb.slice_by_index(x=x_shape, begin=[0], end=[1]) + h_shape = mb.shape(x=h0) + h_shape = mb.slice_by_index(x=h_shape, begin=[1], end=[3]) + h_list_shape = mb.concat(values=[seq_len, h_shape], axis=0) + h_list = mb.fill(shape=h_list_shape) + + # concate h0 to h_list + # h_list: (seq_len + 1, batch_size, hidden_dim) + h_list = mb.concat(values=[h0, h_list], axis=0) + + def cond(i, h_list): + return mb.less(x=i, y=seq_len) + + def body(i, h_list): + # slice for the x and state for time step i + # the resulting shape: + # xt : (batch_size, input_dim) + # h_prev : (batch_size, hidden_dim) + + xt = mb.gather(x=_input, indices=i, axis=0) + h_prev = mb.gather(x=h_list, indices=i, axis=0) + + xt = mb.squeeze(x=xt, axes=[0]) + h_prev = mb.squeeze(x=h_prev, axes=[0]) + + # rt = sigmoid(wir * xt + whr * h_prev + bir + bhr) + # rt : (batch_size, hidden_dim) + rt_1 = mb.linear(x=xt, weight=w_ir, bias=b_ir) + rt_2 = mb.linear(x=h_prev, weight=w_hr, bias=b_hr) + rt = mb.add(x=rt_1, y=rt_2) + rt = mb.sigmoid(x=rt) + + # zt = sigmoid(wiz * xt + whz * h_prev + biz + bhz) + # zt : (batch_size, hidden_dim) + zt_1 = mb.linear(x=xt, weight=w_iz, bias=b_iz) + zt_2 = mb.linear(x=h_prev, weight=w_hz, bias=b_hz) + zt = mb.add(x=zt_1, y=zt_2) + zt = mb.sigmoid(x=zt) + + # nt = tanh(win * xt + bin + rt(whn * h_prev + bhn)) + # nt : (batch_size, hidden_dim) + nt_1 = mb.linear(x=xt, weight=w_in, bias=b_in) + nt_2 = mb.linear(x=h_prev, weight=w_hn, bias=b_hn) + nt_2 = mb.mul(x=rt, y=nt_2) + nt = mb.add(x=nt_1, y=nt_2) + nt = mb.tanh(x=nt) + + # h = (1-zt) * nt + zt* h_prev + # h : (batch_size, hidden_dim) + h_1 = mb.sub(x=1., y=zt) + h_1 = mb.mul(x=h_1, y=nt) + h_2 = mb.mul(x=zt, y=h_prev) + h = mb.add(x=h_1, y=h_2) + + # update counter + counter = mb.add(x=i, y=1) + + # update h and h_list + h = mb.expand_dims(x=h, axes=[0]) + h_list = mb.scatter(data=h_list, indices=counter, updates=h) + + return ( + counter, + h_list, + ) + + _, h_list = mb.while_loop( + _cond=cond, _body=body, loop_vars=([0], h_list), + ) + + # slice h0 out of h_list + h_list = mb.slice_by_index( + x=h_list, + begin=[1, 0, 0], + end=[0, 0, 0], + begin_mask=[False, True, True], + end_mask=[True, True, True], + name=h_list_name, + ) + + # get the last state of h_list + if seq_len.val is None or seq_len.val > 1: + h = mb.slice_by_index( + x=h_list, + begin=[-1, 0, 0], + end=[-2, 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + stride=[-1, 1, 1], + name=h_name, + ) + else: + h = h_list + + return h_list, h + + +@register_torch_op +def gru(context, node): + inputs = _get_inputs(context, node, expected=9) + + _input = inputs[0] + h0 = inputs[1] + weights_list = inputs[2] + has_bias = inputs[3].val + num_layers = inputs[4].val + dropout = inputs[5] + bidirectional = inputs[7].val + batch_first = inputs[8].val + + # For each layer of GRU, the layout of the weights list is [Wi, Wh, bi, bh] with has_bias == True, + # and is [Wi, Wh] with bias == False. + # If bidirectional == True, the list is double up, corresponding to forward and backward direction. + expected_num_weights = 2 * num_layers * (int(has_bias) + 1) * (int(bidirectional) + 1) + if len(weights_list) != expected_num_weights: + raise ValueError( + "Incorrect weights shape for gru layer: Expected: {}. Recieved {}".format( + expected_num_weights, len(weights_list) + ) + ) + + # Transpose the input data to (seq_len, batch_size, input_dim) if batch_first == True + if batch_first: + _input = mb.transpose(x=_input, perm=[1, 0, 2]) + + # iterate through all the layers + x = _input + state_out_list = [] + + def _get_weights_and_bias(weights_list, index, num_layers, has_bias, bidirectional, mode): + num_weights_per_layer = len(weights_list) // num_layers + weights = weights_list[ + num_weights_per_layer * index : num_weights_per_layer * (index + 1) + ] + + if bidirectional: + weights_f, weights_r = ( + weights[: num_weights_per_layer // 2], + weights[num_weights_per_layer // 2 :], + ) + assert len(weights_f) == len(weights_r) + else: + weights_f, weights_r = weights, [] + + if mode == "forward": + weights = weights_f + elif mode == "reverse": + weights = weights_r + + wi, wh = weights[0].val, weights[1].val + + if has_bias: + bi, bh = weights[2].val, weights[3].val + else: + hidden_dim = wh.shape[1] + bi, bh = _np.zeros(3 * hidden_dim), _np.zeros(3 * hidden_dim) + + return wi, wh, bi, bh + + def _get_initial_state(h0, i, bidirectional, mode): + + if mode == "forward": + return mb.slice_by_index( + x=h0, + begin=[(1 + int(bidirectional)) * i, 0, 0], + end=[(1 + int(bidirectional)) * i + 1, 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ) + if mode == "reverse": + assert bidirectional + return mb.slice_by_index( + x=h0, + begin=[2 * i + 1, 0, 0], + end=[2 * (i + 1), 0, 0], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ) + + seq_output_name = node.outputs[0] # output sequence name + state_output_name = node.outputs[1] # output state name + + for i in range(num_layers): + # get layer names + x_name = seq_output_name + "_layer_" + str(i) if i < num_layers - 1 else seq_output_name + h_name = state_output_name + '_layer_' + str(i) if num_layers > 0 else state_output_name + + if batch_first: + x_name += "_tmp" + + if bidirectional: + x_f_name = x_name + '_forward' + h_f_name = h_name + '_forward' + x_r_name = x_name + '_backward' + h_r_name = h_name + '_backward' + else: + x_f_name = x_name + h_f_name = h_name + + # forward direction + x_f = x + wi_f, wh_f, bi_f, bh_f = _get_weights_and_bias( + weights_list, i, num_layers, has_bias, bidirectional, "forward" + ) + initial_h_f = _get_initial_state(h0, i, bidirectional, "forward") + x_f, h_f = _add_gru_layer(x_f, initial_h_f, wi_f, wh_f, bi_f, bh_f, x_f_name, h_f_name) + + # reverse direction + if bidirectional: + x_r = mb.reverse(x=x, axes=[0]) + wi_r, wh_r, bi_r, bh_r = _get_weights_and_bias( + weights_list, i, num_layers, has_bias, bidirectional, "reverse" + ) + initial_h_r = _get_initial_state(h0, i, bidirectional, "reverse") + x_r, h_r = _add_gru_layer( + x_r, + initial_h_r, + wi_r, + wh_r, + bi_r, + bh_r, + x_r_name + "_reverse", + h_r_name, + ) + x_r = mb.reverse(x=x_r, axes=[0], name=x_r_name) + + # concate output from forward and reverse direction + x = mb.concat(values=[x_f, x_r], axis=2, name=x_name) + h = mb.concat(values=[h_f, h_r], axis=0, name=h_name) + else: + x = x_f + h = h_f + + state_out_list.append(h) + + # rnn output + if batch_first: + x = mb.transpose(x=x, perm=[1, 0, 2], name=seq_output_name) + context.add(x, seq_output_name) + + # state output + if len(state_out_list) > 1: + h = mb.concat(values=state_out_list, axis=0, name=state_output_name) + context.add(h, state_output_name) + + +def _add_simple_rnn(context, node, activation): + inputs = _get_inputs(context, node, expected=9) + + ''' + Batch size: B + Sequence length: S + Input dimension: C + Hidden dimension: H + + (1) _input : (B, S, C) if batch_first == True, else (S, B, C) + (2) h0: (num_layers, B, H) + ''' + _input = inputs[0] + h0 = inputs[1] + weights_list = inputs[2] + has_bias = inputs[3].val + num_layers = inputs[4].val + dropout = inputs[5] + bidirectional = inputs[7].val + batch_first = inputs[8].val + + # We only support uni-directional simple RNN now + if bidirectional: + raise NotImplementedError("Bidirectional simple RNN not supported.") + + expected_num_weights = 2 * num_layers * (int(has_bias) + 1) + if len(weights_list) != expected_num_weights: + raise ValueError( + "Incorrect weights shape for lstm layer: Expected: {}. Recieved {}".format( + expected_num_weights, len(weights_list) + ) + ) + + # Transpose the input data to (S, B, C) if batch_first == True + if batch_first: + _input = mb.transpose(x=_input, perm=[1, 0, 2]) + + state_out_list = [] + out = _input + + for i in range(num_layers): + if has_bias: + weight_ih = weights_list[4 * i] + weight_hh = weights_list[4 * i + 1] + bias = mb.add(x=weights_list[4 * i + 2], y=weights_list[4 * i + 3]) + else: + weight_ih = weights_list[2 * i] + weight_hh = weights_list[2 * i + 1] + bias = None + + # get the initial state + initial_h = mb.slice_by_index( + x=h0, + begin=[i, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + ) + + # get the RNN output for each unit + out, state = mb.rnn( + x=out, + initial_h=initial_h, + weight_ih=weight_ih, + weight_hh=weight_hh, + bias=bias, + output_sequence=True, + activation=activation, + ) + + # append state to lists which will stack later + state_out_list.append(state) + + # rnn output + output_name = node.outputs[0] + if batch_first: + out = mb.transpose(x=out, perm=[1, 0, 2], name=output_name) + else: + out = mb.identity(x=out, name=output_name) + context.add(out, output_name) + + # stack the states into a single tensor + state_output_name = node.outputs[1] + if num_layers == 1: + state = mb.expand_dims(x=state_out_list[0], axes=[0], name=state_output_name) + else: + state = mb.stack(values=state_out_list, axis=0, name=state_output_name) + context.add(state, state_output_name) + + +@register_torch_op +def rnn_tanh(context, node): + _add_simple_rnn(context, node, "tanh") + + +@register_torch_op +def rnn_relu(context, node): + _add_simple_rnn(context, node, "relu") + + +def _add_mil_lstm(input, initial_h, initial_c, weights, has_bias, bidirectional, name): + """ + Most of this code is to transform the tensors into + a shape acceptable by the Core ML implementation of LSTM. + + For weights, biases, per direction, pytorch uses two tensors: + (ii, if, ig, io) stacked on top of each other for each layer (tensor 1) + and (hi, hf, hg, ho) stacked on top of each other for each layer (tensor 2). + That is, (W_ii|W_if|W_ig|W_io), of shape (4*hidden_size, input_size) and + (W_hi|W_hf|W_hg|W_ho), of shape (4*hidden_size, hidden_size). + + + The Core ML LSTM op expects two tensors, weight and bias. So + the tensors for weight and bias are seperated from pytorch's @weights list (1.). + For bias tensor, the Core ML LSTM op expects the form ii, if, io, ig and hi, hf, ho, hg, + requiring the ifzo_to_ifoz function. Further adding input and hidden bias into one (2.). + Similar to bias, input and hidden weight requires different layout. (3.) + + initial_h and initial_c are list of "num_layers" tensors, each of shape [n_directions, B, H], + where n_directions = 1 or 2 + whereas the shapes of the initial states to MIL's LSTM, BiLSTM must be [B, H] and [B, 2*H] respectively. + This means we need to do the following transformations: + - if its an LSTM (n_directions=1): + squeeze the first dimension of initial_h/initial_c , before feeding it to MIL's LSTM + - if its a BiLSTM (n_directions=2): + - split the input, shape=(2, B, H), to get (1,B,H) and (1,B,H) + - concatenate to get (1,B,2*H) + - squeeze to get (B,2*H) + """ + + if bidirectional: + if has_bias: + # (1.) + biases = weights[2:4] + weights[6:8] + weights = weights[0:2] + weights[4:6] + + # (2.) + assert len(biases) == 4 + for index in range(len(biases)): + biases[index] = _ifzo_to_ifoz( + biases[index], + name="{}_lstm_bias_reshape_{}".format(name, index), + ) + f_b = mb.add(x=biases[0], y=biases[1], ) + r_b = mb.add(x=biases[2], y=biases[3], ) + + # (3.) + f_ih_w = _ifzo_to_ifoz( + weights[0], name=name + "_lstm_forward_ih_weights_ifoz_to_ifzo", + ) + f_hh_w = _ifzo_to_ifoz( + weights[1], name=name + "_lstm_forward_hh_weights_ifoz_to_ifzo", + ) + r_ih_w = _ifzo_to_ifoz( + weights[2], name=name + "_lstm_reverse_ih_weights_ifoz_to_ifzo", + ) + r_hh_w = _ifzo_to_ifoz( + weights[3], name=name + "_lstm_reverse_hh_weights_ifoz_to_ifzo", + ) + + h = _pytorch_hidden_to_coreml_milops(initial_h, name=name + "_lstm_h0_reshaped") + c = _pytorch_hidden_to_coreml_milops(initial_c, name=name + "_lstm_c0_reshaped") + return mb.lstm(x=input, + initial_h=h, + initial_c=c, + weight_ih=f_ih_w, + weight_hh=f_hh_w, + weight_ih_back=r_ih_w, + weight_hh_back=r_hh_w, + bias=(f_b if has_bias else None), + bias_back=(r_b if has_bias else None), + direction="bidirectional", + output_sequence=True, + name=name) + else: + if has_bias: + # (1.) + biases = weights[len(weights) // 2:] + weights = weights[: len(weights) // 2] + # (2.) + b = mb.add(x=biases[0], y=biases[1], ) + b = _ifzo_to_ifoz( + b, name=name + "_lstm_bias_transformed", + ) + # (3.) + f_ih_w = _ifzo_to_ifoz( + weights[0], name=name + "_lstm_ih_weights_ifoz_to_ifzo", + ) + f_hh_w = _ifzo_to_ifoz( + weights[1], name=name + "_lstm_hh_weights_ifoz_to_ifzo", + ) + + h = mb.squeeze(x=initial_h, axes=_np.array([0]), name=name + "_lstm_h0_squeeze") + c = mb.squeeze(x=initial_c, axes=_np.array([0]), name=name + "_lstm_c0_squeeze") + + return mb.lstm(x=input, + initial_h=h, + initial_c=c, + weight_ih=f_ih_w, + weight_hh=f_hh_w, + bias=(b if has_bias else None), + direction="forward", + output_sequence=True, + name=name) + + +@register_torch_op +def lstm(context, node): + inputs = _get_inputs(context, node, expected=9) + + _input = inputs[0] + + # there are two cases here, + # (1) the input tensor is a PackedSequence object, + # in this case, the second input of the lstm layer is the batch_size (MIL Var). + # (2) the input tensor is a normal tensor, + # in this case, the second input is an array. + # As the result, we can use the second input to identify which category the graph is. + + has_batch_sizes = not isinstance(inputs[1], Iterable) + if has_batch_sizes: + batch_sizes = inputs[1] + h0, c0 = inputs[2] + weights_list = inputs[3] + has_bias = inputs[4].val + num_layers = inputs[5].val + dropout = inputs[6] + bidirectional = inputs[8].val + # the output of the _pack_padded_sequence is always in the layout of batch first + batch_first = True + else: + h0, c0 = inputs[1] + weights_list = inputs[2] + has_bias = inputs[3].val + num_layers = inputs[4].val + dropout = inputs[5] + bidirectional = inputs[7].val + batch_first = inputs[8].val + + ''' + Torch LSTM layer's input shapes: + + (1) first input + (Seq, B, C) : if batch_first = False + (B, Seq, C) : if batch_first = True + + (2) & (3) initialization states + (num_layers, B, H) : if bidirectional = False + (num_layers * 2, B, H) : if bidirectional = True + + + For the MIL LSTM layer, these are the input shapes: + + (1) first input: (Seq, B, C) + this means, if batch_first=True, we need to insert a transpose op first + + (2) & (3) initialization states + MIL's LSTM layer does not natively support the "num_layers" parameters. + So, when num_layers > 1, we add multiple MIL LSTM ops in a sequence. + Each of these LSTM ops will take in initialization states in the following shape: + (B, H) if bidirectional = False + (B, 2*H) if bidirectional = True + ''' + + if batch_first: + _input = mb.transpose(x=_input, perm=[1, 0, 2], name=_input.name + "_batch_first_transpose") + + expected_num_weights = 2 * num_layers * (int(bidirectional) + 1) * (int(has_bias) + 1) + if len(weights_list) != expected_num_weights: + raise ValueError( + "Incorrect weights shape for lstm layer: Expected: {}. Recieved {}".format( + expected_num_weights, len(weights_list) + ) + ) + + # shape of h0 and c0 are (num_layers * n_directions, B, H) + if num_layers == 1: + all_initial_h = [h0] # [(n_directions, B, H)] + all_initial_c = [c0] # [(n_directions, B, H)] + else: + all_initial_h = mb.split( + x=h0, num_splits=num_layers, axis=0 + ) # [(n_directions, B, H)] + all_initial_c = mb.split( + x=c0, num_splits=num_layers, axis=0 + ) # [(n_directions, B, H)] + + n_weights_per_layer = int(len(weights_list) / num_layers) + x = _input + h_out_list = [] + c_out_list = [] + for i in range(num_layers): + if i < num_layers - 1: + op_name = node.name + "_lstm_layer_{}".format(i) + else: + if batch_first: + op_name = node.name + "_batch_first" + else: + op_name = node.name + + lstm_out = _add_mil_lstm( + input=x, + initial_h=all_initial_h[i], + initial_c=all_initial_c[i], + weights=weights_list[ + i * n_weights_per_layer : (i + 1) * n_weights_per_layer + ], + has_bias=has_bias, + bidirectional=bidirectional, + name=op_name, + ) + # shape of lstm_out[0] == (S,B,H) if bidirectional = True else (S, B, 2*H) + x = lstm_out[0] + # shape of lstm_out[1] == (B,H) if bidirectional = False else (B, 2*H) + h_out_list.append(lstm_out[1]) + # shape of lstm_out[2] == (B,H) if bidirectional = False else (B, 2*H) + c_out_list.append(lstm_out[2]) + + ''' + For torch, these are the dimensions of the 3 output tensors: + (1) output[0] : + (Seq, B, H) if batch_first = False, bidirectional = False + (Seq, B, 2*H) if batch_first = False, bidirectional = True + (B, Seq, H) if batch_first = True, bidirectional = False + (B, Seq, 2*H) if batch_first = True, bidirectional = True + + (2) & (3) these are the state outputs: + (num_layers, B, H) if bidirectional = False + (num_layers * 2, B, H) if bidirectional = True + + MIL lstm layer's output shapes: + (1) output[0]: + (Seq, B, H) if bidirectional = False + (Seq, B, 2*H) if bidirectional = True + This means we need a transpose op if batch_first is True + + (2) & (3) shapes of the state outputs: + each MIL LSTM op will produce final state tensors with the following shape: + (B, H) if bidirectional = False + (B, 2*H) if bidirectional = True + + stack/expand the final state tensors to match the Torch output + ''' + for index, (name, output) in enumerate(zip(node.outputs, lstm_out)): + if index > 0: + # index > 0 ===> its one of the state outputs (h or c) + if bidirectional: + if num_layers == 1: + out1, out2 = mb.split( + x=output, num_splits=2, axis=1 + ) # each output of shape [B, H] after the split + final_out = mb.stack( + values=[out1, out2], axis=0, name=name + ) # [2, B, H] + context.add(final_out, name) + else: + out_state_tensors_list = ( + h_out_list if index == 1 else c_out_list + ) # each tensor in the list is of shape (B, 2*H) + list_of_tensors_to_stack = [] + for i in range(num_layers): + out1, out2 = mb.split( + x=out_state_tensors_list[i], num_splits=2, axis=1 + ) # each output of shape [B, H] after the split + out = mb.stack(values=[out1, out2], axis=0) # [2, B, H] + list_of_tensors_to_stack.append(out) + final_out = mb.concat( + values=list_of_tensors_to_stack, axis=0, name=name + ) # output of shape (num_layers * 2, B, H) + context.add(final_out, name) + else: + if num_layers == 1: + unsqueeze = mb.expand_dims(x=output, axes=[0], name=name) + context.add(unsqueeze, name) + else: + out = mb.stack( + values=h_out_list if index == 1 else c_out_list, + axis=0, + name=name, + ) + context.add(out, name) + else: + if batch_first: + output = mb.transpose(x=output, perm=[1, 0, 2], name=name) + context.add(output, name) + + +def _get_scales_from_output_size(output_size, input_shape): + scales = [] + if output_size is not None: + # output_size will be either + # (1) A list of Var, and each Var indicates the output size for that dimension + # (2) A single Var which indicates the whole output size + # (3) A numpy array + + if isinstance(output_size, list): + output_size = [x.val for x in output_size] + if isinstance(output_size, Var): + output_size = [x for x in output_size.val] + if isinstance(output_size, _np.ndarray): + output_size = output_size.tolist() + + # output size is computed using the formula floor (scale * input_size) in Core ML (and PyTorch). + # Thus, when computing the scales from the output size, we add a small positive constant to the output size + # to make sure that the floor formula results in the correct output size and not 1 unit smaller. + # For instance, if output size = 5 and input size = 2, then scale will be 2.5, which can get + # represented as 2.49999 due to float precision issues, and this might resultin an output size of 4 + # instead of 5, without the epsilon correction. + + if len(output_size) == 1: + # 1d upsampling + Hout = output_size[0] + Hin = input_shape[-1] + scales_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scales = scales_h + elif len(output_size) == 2: + # 2d upsampling + Hout, Wout = output_size[0], output_size[1] + Hin, Win = input_shape[-2], input_shape[-1] + scales_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin + scales_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win + scales = [scales_h, scales_w] + else: + msg = "Only 1d and 2d unsampling are supported." + raise NotImplementedError(msg) + + return scales + + +def _is_float_value(x, threshold=0.001): + return x - _math.floor(x) > threshold + + +@register_torch_op +def upsample_linear1d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + output_size = inputs[1] + align_corners = bool(inputs[2].val) + scale = inputs[3] + + scale_factor = None + + if scale is not None and scale.val is not None and scale.shape == (1,): + # Get the scale factor from provided inputs + # This happens when recompute_scale_factor = False + scale_factor = scale.val[0] + + # Currently, we are not supporting recompute_scale_factor = False, align_corners = False with float output size + _, _, h = x.shape + if not is_symbolic(h): + # For the static input shape, we can compute the output size beforehand, and check if it is a float value + output_size = h * scale_factor + is_float = _is_float_value(output_size) + else: + # For the dynamic input shape, we check if the scale factor itself is float + is_float = _is_float_value(scale_factor) + + if is_float and not align_corners: + msg = ( + "recompute_scale_factor = False, align_corners = False with float output size is " + + "not supported for the upsample op {}".format(node.name) + ) + raise NotImplementedError(msg) + + elif isinstance(output_size, list): + # When the input shape is dynamic and recompute_scale_factor = True, + # we need to trace the graph to find the scale factor. + x = mb.expand_dims(x=x, axes=[3]) + x = mb.torch_upsample_bilinear( + x=x, + output_height=output_size[0], + output_width=1, + align_corners=align_corners, + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + return + + elif output_size.val is not None: + # Infer the scale factor from the provided output size + scale_factor = _get_scales_from_output_size(output_size, x.shape) + + # Expand the input to a 4d tensor, and use MIL's upsample_bilinear op + x = mb.expand_dims(x=x, axes=[3]) + x = mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor, + scale_factor_width=1., + align_corners=align_corners, + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + + +@register_torch_op +def upsample_bilinear2d(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + output_size = inputs[1] + align_corners = bool(inputs[2].val) + scale_factors = inputs[3] + + scales_h, scales_w = None, None + + if ( + scale_factors is not None + and scale_factors.val is not None + and scale_factors.rank == 1 + and scale_factors.shape[0] == 2 + ): + # get scale factors from provided inputs + # this happens when recompute_scale_factor = False + scale_factors = scale_factors.val + scales_h = scale_factors[0] + scales_w = scale_factors[1] + + # currently, we are not supporting recompute_scale_factor = False, align_corners = False with float output size + _, _, h, w = _input.shape + if not is_symbolic(h) and not is_symbolic(w): + # For the static input shape, we can compute the output size beforehand + output_h = h * scales_h + output_w = w * scales_w + is_h_float = _is_float_value(output_h) + is_w_float = _is_float_value(output_w) + + else: + # For the dynamic input shape, we check if the scale factor itself is float + is_h_float = _is_float_value(scales_h) + is_w_float = _is_float_value(scales_w) + + if (is_h_float or is_w_float) and not align_corners: + msg = ( + "recompute_scale_factor = False, align_corners = False with float output size is " + + "not supported for the upsample op {}".format(node.name) + ) + raise NotImplementedError(msg) + + elif ( + isinstance(output_size, list) + and output_size[0].val is None + and output_size[1].val is None + ): + # the input shape is dynamic and recompute_scale_factor = True + # need to trace the graph to find the scale factor + # we define a torch front end op mb.torch_upsample_bilinear to resolve the const scaling factor + torch_upsample_bilinear = mb.torch_upsample_bilinear( + x=_input, + output_height=output_size[0], + output_width=output_size[1], + align_corners=align_corners, + name=node.name, + ) + context.add(torch_upsample_bilinear) + return + else: + # infer scale factors from output sizes + # This happens when recompute_scale_factor = True or the output_size is specified + scales = _get_scales_from_output_size(output_size, _input.shape) + if scales: + scales_h, scales_w = scales + + if scales_h is None or scales_w is None: + if len(inputs) == 5: + # For torch==1.5.0, upsample_bilinear2d has 5 inputs. + scales_h = inputs[3] + scales_w = inputs[4] + else: + raise ValueError("Failed to infer scale factors from inputs.") + + upsample_bilinear = mb.upsample_bilinear( + x=_input, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + align_corners=align_corners, + name=node.name, + ) + context.add(upsample_bilinear) + + +@register_torch_op +def upsample_nearest1d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + output_size = inputs[1] + scale = inputs[2] + + scale_factor = None + + if scale is not None and scale.val is not None and scale.shape == (1,): + # Get the scale factor from provided inputs + # This happens when recompute_scale_factor = False + scale_factor = scale.val[0] + + elif isinstance(output_size, list): + # When the input shape is dynamic and recompute_scale_factor = True, + # we need to trace the graph to find the scale factor. + x = mb.expand_dims(x=x, axes=[3]) + x = mb.torch_upsample_nearest_neighbor( + x=x, + output_height=output_size[0], + output_width=1, + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + return + else: + # Infer scale factors from output sizes + scale_factor = _get_scales_from_output_size(output_size, x.shape) + + x = mb.expand_dims(x=x, axes=[3]) + x = mb.upsample_nearest_neighbor( + x=x, + scale_factor_height=scale_factor, + scale_factor_width=1., + ) + x = mb.squeeze(x=x, axes=[3], name=node.name) + context.add(x) + + +@register_torch_op +def upsample_nearest2d(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + scales_h, scales_w = None, None + + output_size = inputs[1] + scale_factors = inputs[2] + + if ( + scale_factors is not None + and scale_factors.val is not None + and scale_factors.rank == 1 + and scale_factors.shape[0] == 2 + ): + # get scale factors from provided inputs + scale_factors = scale_factors.val + scales_h = scale_factors[0] + scales_w = scale_factors[1] + elif ( + isinstance(output_size, list) + and output_size[0].val is None + and output_size[1].val is None + ): + # the input shape is dynamic and recompute_scale_factor = True + # need to trace the graph to find the scale factor + # we define a torch front end op mb.torch_upsample_nearest_neighbor to resolve the const scaling factor + torch_upsample_nearest2d = mb.torch_upsample_nearest_neighbor( + x=_input, + output_height=output_size[0], + output_width=output_size[1], + name=node.name, + ) + context.add(torch_upsample_nearest2d) + return + else: + # infer scale factors from output sizes + scales = _get_scales_from_output_size(output_size, _input.shape) + if scales: + scales_h, scales_w = scales + + if scales_h is None or scales_w is None: + if len(inputs) == 5: + # For torch==1.5.0, upsample_bilinear2d has 5 inputs. + scales_h = inputs[3] + scales_w = inputs[4] + else: + raise ValueError("Failed to infer scale factors from inputs.") + + upsample_nearest2d = mb.upsample_nearest_neighbor( + x=_input, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + name=node.name, + ) + context.add(upsample_nearest2d) + + +@register_torch_op(torch_alias=["listunpack"]) +def tupleunpack(context, node): + inputs = _get_inputs(context, node, expected=1) + values = inputs[0] + + # Node input could have been turned into constant array in @tupleconstruct + if not isinstance(values, (tuple, list)): + if values.val is not None: + values = values.val + else: + # The `values` could be a single Var with symbolic val. + values = [values] + + if len(values) != len(node.outputs): + raise ValueError(f"unpack node expected {len(node.outputs)} outputs, got {len(values)}") + + # @value is either a numpy primitive or a Var object + for value, output in zip(values, node.outputs): + if not isinstance(value, Var): + value = _construct_constant(value, name=output) + assert isinstance(value, Var) + context.add(value, output) + + +@register_torch_op +def loop(context, node): + """ In TorchIR, a loop looks like: + %y_1, ..., %y_r = prim::Loop(%max_trip_count, %initial_condition, %x_1, ..., %x_r) + block0(%i, %a_1, ..., %a_r): + %b_1, ..., %b_m = some::node(%a_value_from_outer_block, %a_1) + %iter_condition = some::other_node(%a_2) + -> (%iter_condition, %b_1, ..., %b_r) + + This translates to pseudo code as: + y_1, ..., y_r = x_1, ..., x_r + condition = initial_condition + i = 0 + while condition and i < max_trip_count: + a_1, ..., a_r = y_1, ..., y_r + + ############################################################ + # Actual body of the loop + b_1, ..., b_m = some::node(a_value_from_outside_of_the_loop, a_1) + iter_condition = some::node(a_2) + ############################################################ + + y_1, ..., y_r = b_1, ..., b_r + condition = iter_condition + i += 1 + + Which further translates to MIL while_loop as: + loop_vars = (0, initial_condition, x_1, ..., x_r) + _cond = { + return (loop_vars[1] and loop_vars[0] < max_trip_count) + } + _body = { + a_1, ..., a_r = loop_vars[2], ..., loop_vars[-1] + b_1, ..., b_m = some::node(a_value_from_outside_of_the_loop, a_1) + iter_condition = some::node(a_2) + return (loop_vars[0] + 1, iter_condition, b_1, ..., b_r) + } + + For loops pass True for %initial_condition and %iter_condition + While loops set %max_trip_count to INT_MAX and %i is unused + """ + name = node.name + # inputs[0]: max iter count + # inputs[1]: initial condition + # inputs[2]: block input 0 + # ... + # inputs[N+2]: block input N + inputs = _get_inputs(context, node) + max_iter_count = inputs[0] + + # Magic default signals this is a while-only loop, so no iteration count + # is needed. + has_iter_count = max_iter_count is not None + + # Create an interation count. This will only be used if this is a for loop. + iter_count = mb.const(val=0, name=node.name + "_iter") + # @loop_vars is tuple(iter_count, cond, inputs...) + loop_vars = tuple([iter_count] + inputs[1:]) + + def _loop_cond(*loop_vars): + cond = loop_vars[1] + + # Check the iteration count if we're keeping track. + if has_iter_count: + iter_count = loop_vars[0] + iter_cond = mb.less( + x=iter_count, y=max_iter_count, name=node.name + "_cond" + ) + return mb.logical_and(x=cond, y=iter_cond) + else: + return mb.identity(x=cond) + + def _shapes_are_equivalent(shape1, shape2): + """ Compares two sets of tensor shapes and returns True if they are + equivalent. That is, they are the same rank, and each dimension + is the same or symbolic. + """ + if len(shape1) != len(shape2): + return False + + # Each dimension must have the same integer length, or else be + # symbolic. + all_equivalent = [ + s1 == s2 or (isinstance(s1, Symbol) and isinstance(s2, Symbol)) + for s1, s2 in zip(shape1, shape2) + ] + return all_equivalent + + def _loop_body(*loop_vars): + block = node.blocks[0] + iter_var = loop_vars[0] + inputs = (iter_var,) + loop_vars[2:] + res = convert_block(context, block, inputs) + + for input_var, output_var in zip(loop_vars[2:], res[1:]): + if not _shapes_are_equivalent(input_var.shape, output_var.shape): + logger.warning( + "detected change in shape of loop variable. this could lead to incorrect inference results!" + ) + logger.warning( + "{}:{} -> {}:{}".format( + input_var.name, + input_var.shape, + output_var.name, + output_var.shape, + ) + ) + + # Update the iteration count if we're keeping track. + if has_iter_count: + iter_var = mb.add(x=iter_var, y=1, name=iter_var.name + "_inc") + else: + iter_var = mb.identity(x=iter_var) + + # Must return tuple with same length and types as @loop_vars. + return tuple( + [ + iter_var, + ] + + res + ) + + loop = mb.while_loop( + _cond=_loop_cond, _body=_loop_body, loop_vars=loop_vars, name=name + ) + + # Make sure the loop returned the expected number of outputs. Note that the + # first two loop outputs are the iteration count and condition. + assert len(loop) - 2 == len(node.outputs) + for output_name, output_var in zip(node.outputs, loop[2:]): + context.add(output_var, torch_name=output_name) + + +@register_torch_op(torch_alias=["if"]) +def _if(context, node): + """ In TorchIR, a conditional looks like: + %y_1, ..., %y_r = prim::If(%condition) + block0(): # TRUE BRANCH, never takes arguments, has to return r outputs + %t_1, ..., %t_k = some::node(%a_value_from_outer_block) + -> (%t_1, ..., %t_r) + block1(): # FALSE BRANCH, never takes arguments, has to return r outputs + %f_1, ..., %f_m = some::node(%a_value_from_outer_block) + -> (%f_1, ..., %f_r) + + This translates to pseudo code as: + if (condition): + t_1, ..., t_k = some::node(a_value_from_outer_block) + y_1, ..., y_r = t_1, ..., t_r + else: + f_1, ..., f_m = some::node(a_value_from_outer_block) + y_1, ..., y_r = f_1, ..., f_r + + Which further translates to MIL cond as: + _true = { + t_1, ..., t_k = some::node(a_value_from_outer_block) + return (t_1, ..., t_r) + } + _false = { + f_1, ..., f_m = some::node(a_value_from_outer_block) + return (f_1, ..., f_m) + } + """ + name = node.name + # inputs[0]: condition + inputs = _get_inputs(context, node, expected=1) + condition = inputs[0] + + assert len(node.blocks) == 2 + true_block = node.blocks[0] + false_block = node.blocks[1] + + def _true_path(): + res = convert_block(context, true_block, []) + return tuple(res) + + def _false_path(): + res = convert_block(context, false_block, []) + return tuple(res) + + cond = mb.cond( + pred=condition, _true_fn=_true_path, _false_fn=_false_path, name=name + ) + # If the condition only returns one item, wrap it in a tuple. + if not isinstance(cond, (tuple, list)): + cond = (cond,) + + # Make sure the condition returned the expected number of outputs. + assert len(cond) == len(node.outputs) + for output_name, output_var in zip(node.outputs, cond): + context.add(output_var, torch_name=output_name) + + +@register_torch_op +def select(context, node): + inputs = _get_inputs(context, node, expected=3) + _input = inputs[0] + dim = inputs[1].val + index = inputs[2].val + + assert dim.shape == () + assert index.shape == () + + # NOTE: + # Each index in @begin_array/@end_array corresponds to a dimension of @_input + # Each val of those arrays corresponds to the start/end index to slice in that dimension + rank = _input.rank + begin_array = [0] * rank + begin_array[dim] = index + end_array = [s if isinstance(s, int) else 0 for s in _input.shape] + end_mask = [True] * rank + squeeze_mask = [False] * rank + squeeze_mask[dim] = True + + if index != -1: + end_array[dim] = index + 1 + end_mask[dim] = False + + slice_by_index = mb.slice_by_index( + x=_input, + begin=begin_array, + end=end_array, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + name=node.name, + ) + context.add(slice_by_index) + + +@register_torch_op +def type_as(context, node): + inputs = _get_inputs(context, node, expected=2) + + if inputs[0].dtype == inputs[1].dtype: + x = mb.identity(x=inputs[0], name=node.name) + else: + x = inputs[0] + if inputs[1].dtype not in TYPE_TO_DTYPE_STRING: + raise NotImplementedError( + "Tensor type {} cast is not supported.".format(inputs[1].dtype) + ) + x = mb.cast(x=x, dtype=TYPE_TO_DTYPE_STRING[inputs[1].dtype], name=node.name) + + context.add(x) + + +@register_torch_op +def nonzero(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + nonzero = mb.non_zero(x=x, name=node.name) + context.add(nonzero) + + +def _get_slice_params(context, data, inputs): + rank = data.rank + begin = [0] * rank + end = [0] * rank + stride = [1] * rank + begin_mask = [False] * rank + end_mask = [False] * rank + squeeze_mask = [False] * rank + + num_of_slice_set = len(inputs) // 3 + + for i in range(num_of_slice_set): + if inputs[3 * i + 1] is None: + # This is pure index select + idx = context[inputs[3 * i]].val + begin[i] = idx + squeeze_mask[i] = True + else: + # This is a slice + begin_var = context[inputs[3 * i]] + end_var = context[inputs[3 * i + 1]] + stride_var = context[inputs[3 * i + 2]] + + if begin_var is None: + begin_mask[i] = True + else: + begin[i] = begin_var + + if end_var is None: + end_mask[i] = True + else: + end[i] = end_var + + if stride_var is None: + stride[i] = 1 + else: + stride[i] = stride_var.val + + for i in range(num_of_slice_set, rank): + begin_mask[i] = True + end_mask[i] = True + + begin = mb.concat(values=begin, axis=0) + end = mb.concat(values=end, axis=0) + + return begin, end, stride, begin_mask, end_mask, squeeze_mask + + +@register_torch_op +def _internal_op_tensor_inplace_copy(context, node): + data = context[node.inputs[0]] + updates = context[node.inputs[1]] + begin, end, stride, begin_mask, end_mask, squeeze_mask = _get_slice_params( + context, data, node.inputs[2:] + ) + + data, updates = promote_input_dtypes([data, updates]) + updated_x = mb.torch_tensor_assign( + data=data, + updates=updates, + begin=begin, + end=end, + stride=stride, + begin_mask=begin_mask, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + name=node.name, + ) + context.add(updated_x) + + +@register_torch_op +def _internal_op_tensor_inplace_fill(context, node): + data = context[node.inputs[0]] + fill_scalar = context[node.inputs[1]] + + begin, end, stride, begin_mask, end_mask, squeeze_mask = _get_slice_params( + context, data, node.inputs[2:] + ) + if begin.val is None or end.val is None: + raise ValueError("_internal_op_tensor_inplace_fill does not support dynamic index") + + fill_shape = solve_slice_by_index_shape( + data.shape, begin.val, end.val, stride, begin_mask, end_mask, squeeze_mask + ) + update_values = _np.full(fill_shape, fill_scalar.val) + + data, update_values = promote_input_dtypes([data, update_values]) + updated_x = mb.torch_tensor_assign( + data=data, + updates=update_values, + begin=begin, + end=end, + stride=stride, + begin_mask=begin_mask, + end_mask=end_mask, + squeeze_mask=squeeze_mask, + name=node.name, + ) + context.add(updated_x) + + +@register_torch_op +def index_put(context, node): + inputs = _get_inputs(context, node, expected=4) + x = inputs[0] + indices = inputs[1] + values = inputs[2] + accumulate = inputs[3].val + rank = x.rank + mode = "add" if accumulate else "update" + + indices_type = indices[0].sym_type.get_primitive() + + if types.is_bool(indices_type): + assert len(indices) == 1, "Unsupported index_put_ usage." + indices = indices[0] + assert indices.shape == x.shape, "indices shape must equal to input shape for index put operation." + indices = mb.cast(x=indices, dtype="int32") + indices = mb.non_zero(x=indices) + + if types.is_int(indices_type): + if len(indices) > 1: + indices = mb.stack(values=indices, axis=rank - 1) + else: + indices = mb.expand_dims(x=indices[0], axes=[-1]) + + if len(values.shape) == 0: + values = mb.expand_dims(x=values, axes=[0]) + + if values.rank == 1 and values.shape[0] == 1: + reps = value_at(mb.shape(x=indices), 0) + reps = mb.expand_dims(x=reps, axes=[0]) + values = mb.tile(x=values, reps=reps) + + result = mb.scatter_nd(data=x, indices=indices, updates=values, mode=mode, name=node.name) + context.add(result) + + +@register_torch_op +def index(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + indices = inputs[1] + rank = x.rank + + """ + Case 1: A single boolean index selection + Ex: + a = torch.rand(2, 3, 4) + b = torch.rand(3, 4) + index = b > 0.1 + c = a[:, b] + + For this case, the only non-None tensor is with dtype bool + The true value indicates whether the element should be selected among the masked axes + The output c is a tensor with shape (2, N), where N is the number of elements of b satisfying condition > 0.1 + """ + boolean_indices_axis = [] + for i, index in enumerate(indices): + if index is not None and types.is_bool(index.dtype): + boolean_indices_axis.append(i) + if len(boolean_indices_axis) == 1: + # get the True element indices + axis = boolean_indices_axis[0] + axes = list(range(axis, axis + index.rank)) + index = indices[axis] + index = mb.non_zero(x=index) + + # tranpose the masked axes to the beginning + perm = axes + [i for i in range(rank) if i not in axes] + x = mb.transpose(x=x, perm=perm) + x = mb.gather_nd(x=x, indices=index) + + # transpose the tensor back + perm_back = list(range(1, x.rank)) + perm_back.insert(axis, 0) + res = mb.transpose(x=x, perm=perm_back, name=node.name) + context.add(res) + return + + """ + Case 2: Pure index selection + Ex # 1 [Single dimension selection]: + a = torch.rand(1,2,3,4) + index = torch.tensor([0, 1]) + b = a[:,:,:,index] + + In this case, indices is a list [None, None, None, [0, 1]]]. The None element means the corresponding + dimension is masked. + + b has shape (1,2,3,2). + + Ex # 2 [Multiple disconnected dimensions selection]: + a = torch.rand(1,2,3,4) + index = torch.tensor([0, 1]) + b = a[:,index,:,index] + + In this case, indices is a list [None, [0,1], None, [0,1]] + + b has shape (2,1,3), + where b[0,:,:] = a[:,0,:,0] and b[1,:,:] = a[:,1,:,1] + + Ex # 3 [Multiple connected dimensions selection]: + a = torch.rand(1,2,3,4) + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0, 1]) + b = a[:,index_1,index_2,:] + + indices is a list [None, [0, 1], [0, 1], None] + + b has shape (1,2,4), + where b[:,0,:] = a[:,0,0,:] and b[:,1,:] = a[:,1,1,:] + + Ex # 4 [Selection with boolean masks]: + a = torch.rand(4,5) + index_1 = [True, True, False, False] + index_2 = [False, True, True, False, False] + b = a[index_1, index_2] + + indices is a list [[True, True, False, False], [False, True, True, False, False]] + + In this case, index_1 and index_2 are interpreted as mask by indices of True, + index_1 -> [0, 1] + index_2 -> [1, 2] + + b has shape (2,), + where b[0] = a[0, 1] and b[1] = a[1, 2] + + Ex # 5 [Broadcast selection]: + a = torch.rand(1,2,3,4) + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0]) + b = a[:,index_1,index_2,:] + + indices is a list [None, [0, 1], [0], None] + + In this case, index_2 is going to be broadcasted to [0, 0] + + b has shape (1,2,4), + where b[:,0,:] = a[:,0,0,:] and b[:,1,:] = a[:,1,0,:] + + """ + + # get the index axes + indices = indices + [None] * (x.rank - len(indices)) + indices_axes = [] + valid_indices = [] + for i, index in enumerate(indices): + if index is not None: + indices_axes.append(i) + valid_indices.append(index) + + # If all elements in indices is None, simpily return the original tensor. + if len(indices_axes) == 0: + x = mb.identity(x=x, name=node.name) + context.add(x) + return + + # convert all indices to int type + for i, indice in enumerate(valid_indices): + if indice is not None and types.is_bool(indice.dtype): + indice = mb.non_zero(x=indice) + indice = mb.squeeze(x=indice, axes=[1]) + valid_indices[i] = indice + + # For the single index axis case, we can use mb.gather directly + if len(indices_axes) == 1: + axis = indices_axes[0] + x = mb.gather(x=x, indices=valid_indices[0], axis=axis, name=node.name) + context.add(x) + return + + # For multiple index axes case, we delegate broadcast to np if there is no dynamic shape. + if all(not any_symbolic(idx.shape) for idx in valid_indices): + broadcasted_shape = _np.broadcast_shapes(*[idx.shape for idx in valid_indices]) + for i, index in enumerate(valid_indices): + if (index.shape != broadcasted_shape) and index.val is not None: + new_val = _np.broadcast_to(index.val, broadcasted_shape) + valid_indices[i] = mb.const( + val=new_val, name=index.name + "_broadcasted" + ) + valid_indices = [mb.cast(x=index, dtype="int32") for index in valid_indices] + + # First stack the index together + indices_rank = valid_indices[0].rank + indices = mb.stack(values=valid_indices, axis=indices_rank) + + # transpose the input tensor to gather the slicing index in front + is_connected = True + for i in range(1, len(indices_axes)): + if indices_axes[i] != indices_axes[i - 1] + 1: + is_connected = False + break + + name = node.name + "_transpose" if is_connected else node.name + perm = indices_axes + [axis for axis in range(x.rank) if axis not in indices_axes] + x = mb.transpose(x=x, perm=perm) + x = mb.gather_nd(x=x, indices=indices, name=name) + + # if the index axes are connect, we need to transpose it back + if is_connected: + new_dimensions = list(range(indices_axes[0], indices_axes[0] + indices_rank)) + new_perm = new_dimensions + [ + axis + for axis in range(rank + indices_rank - len(indices_axes)) + if axis not in new_dimensions + ] + perm_back = [new_perm.index(axis) for axis in range(len(new_perm))] + x = mb.transpose(x=x, perm=perm_back, name=node.name) + context.add(x) + + +@register_torch_op +def ones(context, node): + inputs = _get_inputs(context, node, expected=[5, 6]) + size = inputs[0] + # dtype = NUM_TO_TORCH_DTYPE[inputs[1].val] unused + # layout = inputs[2] unused + # device = inputs[3] unused + # requires_grad = inputs[4] unused + # out = inputs[5] unused + if isinstance(size, list): + size = mb.concat(values=size, axis=0) + fill = mb.fill(shape=size, value=1.0, name=node.name) + context.add(fill) + + +@register_torch_op +def ones_like(context, node): + inputs = _get_inputs(context, node, expected=6) + x = inputs[0] + if is_current_opset_version_compatible_with(target.iOS16): + fill = mb.fill_like(ref_tensor=x, value=1.0, name=node.name) + else: + size = mb.shape(x=x) + # dtype = NUM_TO_TORCH_DTYPE[inputs[1].val] unused + # layout = inputs[2] unused + # device = inputs[3] unused + # requires_grad = inputs[4] unused + # out = inputs[5] unused + fill = mb.fill(shape=size, value=1.0, name=node.name) + context.add(fill) + + +def _make_fill_op(size, val, name): + assert val is not None + if isinstance(size, list): + size = mb.concat(values=size, axis=0) + fill = mb.fill(shape=size, value=val, name=name) + return fill + + +@register_torch_op +def full(context, node): + inputs = _get_inputs(context, node) + size = inputs[0] + val = inputs[1].val + result = _make_fill_op(size, val, node.name) + context.add(result) + + +@register_torch_op +def full_like(context, node): + inputs = _get_inputs(context, node, expected=7) + x = inputs[0] + val = inputs[1].val + if is_current_opset_version_compatible_with(target.iOS16): + result = mb.fill_like(ref_tensor=x, value=val, name=node.name) + else: + size = mb.shape(x=inputs[0]) + result = _make_fill_op(size, val, node.name) + context.add(result) + + +@register_torch_op +def new_full(context, node): + # The difference between "new_full" and "full" is that the "new_full" is called from + # an existing tensor: tensor.new_full(size, fill_value), while the "full" is called + # from the torch API: torch.full(size, fill_value). + # But they are basically doing the same thing. + inputs = _get_inputs(context, node) + size = inputs[1] + val = inputs[2].val + result = _make_fill_op(size, val, node.name) + context.add(result) + +@register_torch_op +def randint(context, node): + inputs = _get_inputs(context, node, expected=8) + low = mb.cast(x=inputs[0], dtype="fp32") + high = mb.cast(x=inputs[1], dtype="fp32") + shape = inputs[2] + rand_uniform = mb.random_uniform(shape=shape, low=low, high=high) + rand_int = mb.cast(x=rand_uniform, dtype="int32", name=node.name) + context.add(rand_int) + + +@register_torch_op +def bitwise_not(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + dtype = x.dtype + if types.is_int(dtype): + x = mb.add(x=x, y=1) + x = mb.mul(x=x, y=-1, name=node.name) + elif types.is_bool(dtype): + x = mb.logical_not(x=x, name=node.name) + else: + raise ValueError("Not supported type {} found for 'bitwise_not' op".format(dtype)) + context.add(x) + + +@register_torch_op(torch_alias=["and"]) +def bitwise_and(context, node): + inputs = _get_inputs(context, node) + + input_dtypes = [i.dtype for i in inputs] + if all(types.is_bool(input_dtype) for input_dtype in input_dtypes): + logical_and(context, node) + else: + raise NotImplementedError( + f"The `bitwise_and` op only supports boolean input, but get {input_dtypes}." + ) + + +def _avg_pool(context, node, inputs): + x = inputs[0] + kernel_sizes = inputs[1] + strides = inputs[2] + if strides.op.op_type == "const" and (not list(strides.val)): + strides = mb.const(val=kernel_sizes.val, name=strides.name) + pad_type = "custom" + # Need to explicitly state L-R, T-B pad + pad = inputs[3] + pad = _np.repeat(pad.val, 2) + ceil_mode = inputs[4].val + include_pad = inputs[5].val + + spatial_rank = len(pad) // 2 + if spatial_rank > 2 and ceil_mode is True and list(strides.val) != [1] * len(strides.val): + # since MIL does not support ceil_mode for 3D pool, + # need to adjust padding values if ceil_mode is True + # ceil_mode only causes any difference though, if the strides are not 1 + x_spatial_dimensions = x.shape[-spatial_rank:] + new_pad = _adjust_pad_for_ceil_mode( + x_spatial_dimensions, kernel_sizes.val, strides.val, pad + ) + if _np.sum(_np.abs(new_pad - pad)) > 1e-3: + if include_pad: + raise ValueError('pool3D with ceil mode=True and include_pad=True not supported') + pad = new_pad + + pool = mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes, + strides=strides, + pad_type=pad_type, + pad=pad, + name=node.name, + exclude_padding_from_average=not include_pad, + ceil_mode=ceil_mode if spatial_rank <= 2 else False, + ) + context.add(pool) + + +@register_torch_op +def avg_pool1d(context, node): + inputs = _get_inputs(context, node, expected=6) + _avg_pool(context, node, inputs) + + +@register_torch_op +def avg_pool2d(context, node): + inputs = _get_inputs(context, node, expected=7) + divisor_override = inputs[6] + if divisor_override is not None: + raise ValueError("divisor_override is not supported for avg_pool2d") + _avg_pool(context, node, inputs) + + +@register_torch_op +def avg_pool3d(context, node): + inputs = _get_inputs(context, node, expected=7) + divisor_override = inputs[6] + if divisor_override is not None: + raise ValueError("divisor_override is not supported for avg_pool3d") + _avg_pool(context, node, inputs) + + +@register_torch_op +def log_softmax(context, node): + inputs = _get_inputs(context, node) + + x = inputs[0] + axis = inputs[1] + out = inputs[2] # Ignored. + assert out is None + res = mb.softmax(x=x, axis=axis, name=node.name + "_softmax") + res = mb.log(x=res, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["nll_loss_nd"]) +def nll_loss(context, node): + inputs = _get_inputs(context, node, expected=5) + + x = inputs[0] + target = inputs[1] + weight = inputs[2] + reduction = inputs[3] + ignore_index = inputs[4] + + # mapping for reduction + reduction_mapping = {0: "none", 1: "mean", 2: "sum"} + reduction = reduction_mapping[reduction.val] + + # compute the weights loss + batch_size = x.shape[0] + + # only support weight and ignore_index both None + if weight is not None: + raise NotImplementedError("Only unity weight is supported for NLLLoss.") + if ignore_index.val != -100: + raise NotImplementedError("ignore index not supported for NLLLoss.") + + x = mb.cast(x=x, dtype="fp32") + x = mb.mul(x=x, y=-1.) + range_indices = mb.range_1d(end=batch_size, start=0, step=1) + total_indices = mb.stack(values=[range_indices, target], axis=1) + loss = mb.gather_nd(x=x, indices=total_indices) + + # reduction type + if reduction == "none": + out = mb.identity(x=loss, name=node.name) + elif reduction == "sum": + out = mb.reduce_sum(x=loss, axes=[0], keep_dims=False, name=node.name) + elif reduction == "mean": + out = mb.real_div(x=loss, y=_np.float32(batch_size)) + out = mb.reduce_sum(x=out, axes=[0], keep_dims=False, name=node.name) + else: + raise NotImplementedError("Unsupported reduction type for NLLLoss.") + + context.add(out) + + +@register_torch_op +def sigmoid(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.sigmoid(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op +def hardsigmoid(context, node): + inputs = _get_inputs(context, node, expected=1) + + res = mb.sigmoid_hard(x=inputs[0], alpha=1.0 / 6, beta=0.5, name=node.name) + context.add(res) + + +@register_torch_op +def gelu(context, node): + inputs = _get_inputs(context, node) + assert len(inputs) in (1, 2) + if len(inputs) == 2: + approximate = inputs[1].val + assert approximate == 'none' + res = mb.gelu(x=inputs[0], name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["slice"]) +def _slice(context, node): + inputs = _get_inputs(context, node, expected=5) + x = inputs[0] + dim = inputs[1].val + + if inputs[2] and inputs[2].val is not None: + start = inputs[2].val + elif isinstance(inputs[2], Var): + start = inputs[2] + else: + start = 0 + + if inputs[3] and inputs[3].val is not None: + end = inputs[3].val + elif isinstance(inputs[3], Var): + end = inputs[3] + else: + end = None + + step = inputs[4].val + + if start == 0 and end is None and step == 1: + # Handling x[:], just pass through the tensor. + context.add(x, node.name) + return + + begin_array = [0] * len(x.shape) + begin_array[dim] = start + end_array = [s if isinstance(s, int) else 0 for s in x.shape] + end_mask = [True] * len(x.shape) + if end is not None: + end_array[dim] = end + end_mask[dim] = False + + if isinstance(start, Var): + begin_array = mb.concat(values=begin_array, axis=0) + + if isinstance(end, Var): + end_array = mb.concat(values=end_array, axis=0) + + kwargs = { + "x": x, + "begin": begin_array, + "end": end_array, + "end_mask": end_mask, + "name": node.name, + } + + if step != 1: + stride_array = _np.array([1] * len(x.shape)) + stride_array[dim] = step + kwargs["stride"] = stride_array + + res = mb.slice_by_index(**kwargs) + context.add(res) + + +@register_torch_op(torch_alias=["split_with_sizes"]) +def split(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + split_sizes = inputs[1] + dim = inputs[2].val + + if not isinstance(split_sizes.val, _np.ndarray): + shape = mb.shape(x=x) + dim_size = _list_select(shape, dim) + # MIL split op needs the size of each split to be given explicitly. + num_whole_splits = mb.floor_div(x=dim_size, y=split_sizes) + remainder = mb.mod(x=dim_size, y=split_sizes) + + # MIL doesn't have a way of turning a scalar into a tensor (list write + # only supports tensors). As a workaround, we create a constant [1] + # tensor and multiply it by the scalar value, thus creating a tensor + # with the scalar value in it. + tmp = mb.const(val=[1]) + whole_sizes = mb.mul(x=tmp, y=split_sizes) + reps = mb.mul(x=tmp, y=num_whole_splits) + whole_sizes = mb.tile(x=whole_sizes, reps=reps) + if remainder.val == 0: + split_sizes = whole_sizes + else: + partial_size = mb.mul(x=tmp, y=remainder) + split_sizes = mb.concat(values=[whole_sizes, partial_size], axis=0) + res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name) + context.add(res, torch_name=node.name) + + +@register_torch_op +def unbind(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + dim = inputs[1].val + split_sizes = [1] * x.shape[dim] + if len(split_sizes) == 1: + res = [mb.squeeze(x=x, axes=[dim])] + else: + res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name) + res = [mb.squeeze(x=x, axes=[dim]) for x in res] + context.add(res, torch_name=node.name) + + +@register_torch_op +def to(context, node): + inputs = _get_inputs(context, node) + + # There are a lot of variants of `to` op. + # - When len(inputs) is 7 or 8, we only care about the first two params (input and dtype). + # - When len(inputs) == 6, the parameter is (input, _, dtype, non_blocking, copy, memory_format) + # - When len(inputs) == 5, the parameter is (input, dtype, non_blocking, copy, memory_format) + # - When len(inputs) == 4, the parameter is (input, dtype, non_blocking, copy) + # - When len(inputs) == 3, the parameter is (input, non_blocking, copy) + # We only use `input` and `dtype`, and `non_blocking` and `copy` are unused. + _input = inputs[0] + target_dtype: Optional[Var] + inputs_len = len(inputs) + if inputs_len in (4, 5, 7, 8): + target_dtype = inputs[1] + elif inputs_len == 6: + target_dtype = inputs[2] + elif inputs_len == 3: + target_dtype = None + else: + raise ValueError( + "Received invalid arguments for PyTorch conversion of op {}".format(node) + ) + + if target_dtype is None: + # When target_dtype is None, it means the input's dtype is already the target dtype. + context.add(_input, torch_name=node.name) + return + elif types.is_scalar(target_dtype.sym_type) and target_dtype.val is not None: + dtype = target_dtype.val + else: + # When the val of dtype is not available, bridge from the np dtype. + np_type = nptype_from_builtin(target_dtype.dtype) + dtype = NUMPY_DTYPE_TO_TORCH_NUM[np_type] + + torch_dtype = NUM_TO_TORCH_DTYPE[dtype] + if isinstance(_input, Var) and _input.can_be_folded_to_const(): + # numpy -> torch -> torch cast -> numpy + # This path is needed to use the mapping of passed in dtypes to torch dtypes. + casted_input = torch.tensor(_input.val).type(torch_dtype).cpu().numpy() + res = mb.const(val=casted_input, name=node.name) + else: + if dtype in NUM_TO_DTYPE_STRING: + res = mb.cast(x=_input, dtype=NUM_TO_DTYPE_STRING[dtype], name=node.name) + else: + # For dtype that is not supported by mb.cast, we do it in best-effort to cast it to int + # or float based on the dtype. + np_dtype = NUM_TO_NUMPY_DTYPE[dtype] + if _np.issubdtype(np_dtype, _np.integer): + res = mb.cast(x=_input, dtype="int32", name=node.name) + elif _np.issubdtype(np_dtype, _np.floating): + res = mb.cast(x=_input, dtype="fp32", name=node.name) + else: + raise ValueError(f"Unsupported op {node} with target dtype {np_dtype}") + context.add(res) + + +@register_torch_op +def erf(context, node): + inputs = _get_inputs(context, node, expected=1) + _input = inputs[0] + erf = mb.erf(x=_input, name=node.name) + context.add(erf) + + +@register_torch_op(torch_alias=["scalarimplicit"]) +def implicittensortonum(context, node): + inputs = _get_inputs(context, node, expected=1) + _input = inputs[0] + + if _input.shape == (): # already a scalar + context.add(_input, node.name) + else: + assert _input.shape == (1,) + # shape: (1,) -> () + squeeze = mb.squeeze(x=_input, name=node.name) + context.add(squeeze) + + +@register_torch_op +def constantchunk(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + # ConstantChunk gets its parameters as attributes of the node. + chunks = node.attr["chunks"] + dim = node.attr["dim"] + + total = x.shape[dim] + size = int(_math.ceil(float(total) / float(chunks))) + split_sizes = [size] * int(_math.floor(total / size)) + remainder = total - sum(split_sizes) + if remainder > 0: + split_sizes.append(remainder) + + res = mb.split(x=x, split_sizes=split_sizes, axis=dim, name=node.name) + for val, name in zip(res, node.outputs): + context.add(val, name) + + +def _broadcast(name, tensor, shape): + if len(shape) > tensor.rank: + new_dims = len(shape) - tensor.rank + tensor = mb.expand_dims(x=tensor, axes=list(range(new_dims))) + + reps = [] + for ts, ds in zip(tensor.shape, shape): + if not is_symbolic(ts) and not is_symbolic(ds) and ds > 0 and ts == 1: + reps.append(ds) + else: + reps.append(1) + + res = mb.tile(x=tensor, reps=reps, name=name) + return res + + +@register_torch_op +def expand(context, node): + def _broadcast_dynamic(name, tensor, shape): + # Add any extra dimensions + if len(shape) > tensor.rank: + new_dims = len(shape) - tensor.rank + tensor = mb.expand_dims(x=tensor, axes=list(range(new_dims))) + + tensor_shape = mb.shape(x=tensor) + shape = mb.concat(values=shape, axis=0) + reps = mb.real_div(x=shape, y=tensor_shape) + reps = mb.cast(x=reps, dtype="int32") + res = mb.tile(x=tensor, reps=reps, name=name) + return res + + + # PyTorch 1.6+ has 3 inputs while older version has 2 + inputs = _get_inputs(context, node, expected=[2, 3]) + + x = inputs[0] + shape = inputs[1] + + if isinstance(shape, list): + res = _broadcast_dynamic(node.name, x, shape) + else: + res = _broadcast(node.name, x, shape.val) + context.add(res) + + +@register_torch_op +def expand_as(context, node): + # PyTorch 1.6+ has 3 inputs while older version has 2 + inputs = _get_inputs(context, node, expected=[2, 3]) + x = inputs[0] + other = inputs[1] + + res = _broadcast(node.name, x, other.shape) + context.add(res) + + +@register_torch_op +def arange(context, node): + inputs = _get_inputs(context, node) + # dtype = inputs[-4] + # layout = inputs[-3] + # device = inputs[-2] + # pin_memory = inputs[-1] + if len(inputs) == 5: + # inputs are [end, dtype, layout, device, pin_memory] + start = 0 + end = inputs[0] + step = 1 + elif len(inputs) == 6: + # inputs are [start, end, dtype, layout, device, pin_memory] + start = inputs[0] + end = inputs[1] + step = 1 + elif len(inputs) == 7: + # inputs are [start, end, step, dtype, layout, device, pin_memory] + start = inputs[0] + end = inputs[1] + step = inputs[2] + else: + raise ValueError( + "arange must have exactly 5, 6, or 7 inputs, got {}".format(len(inputs)) + ) + # If start, end, and step don't have the same dtype, we cast them to fp32 + int_start = isinstance(start, int) or types.is_int(start.dtype) + int_end = isinstance(end, int) or types.is_int(end.dtype) + int_step = isinstance(step, int) or types.is_int(step.dtype) + + if int_start != int_end or int_start != int_step: + start = mb.cast(x=start, dtype="fp32") + end = mb.cast(x=end, dtype="fp32") + step = mb.cast(x=step, dtype="fp32") + res = mb.range_1d(start=start, end=end, step=step, name=node.name) + context.add(res) + + +@register_torch_op +def masked_fill(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + mask = inputs[1] + value = inputs[2] + # @mb.select does not properly broadcast scalar input, so as a workaround + # we create a full sized tensor. + + if types.is_int(value.dtype): + # @mb.fill cannot handle value with dtype integer + # so we cast the value. + value = mb.cast(x=value, dtype="fp32") + + if not types.is_bool(mask.dtype): + # cond must be bool type + mask = mb.cast(x=mask, dtype="bool") + + shape = mb.shape(x=x, name=node.name + "_shape") + value = mb.fill(shape=shape, value=value, name=node.name + "_value") + res = mb.select(cond=mask, a=value, b=x, name=node.name) + context.add(res) + + +@register_torch_op +def meshgrid(context, node): + """ + For N input tensors, a meshgrid is constructed by viewing each tensor as an N-dimension tensor + with values in the dimension corresponding it its order in the args. (a.) + Then, it is expanded along dimensions corresponding to the dimensions of each + 1d tensor in the order that they were passed in. (b.) + + Each output tensor is put into a tuple that is returned. These tuples form + N, N-dimenional grids, where the ith grid is defined as expanding the ith input over + dimensions defined by the other inputs. + """ + supported_indexing_modes = ("ij", "xy") + indexing = "ij" + inputs = _get_inputs(context, node, expected=[1, 2]) + + if len(inputs) == 2: + indexing = inputs[1].val + if indexing not in supported_indexing_modes: + raise ValueError("indexing mode {} not supported".format(indexing)) + + tensor_inputs = inputs[0] + assert isinstance(tensor_inputs, (list, tuple)) + if len(tensor_inputs) < 2: + raise ValueError("Requires >= 2 tensor inputs.") + + if any([len(tensor_var.shape) > 1 for tensor_var in tensor_inputs]): + raise ValueError("meshgrid recieved non-1d tensor.") + + dim_tuple = tuple(tensor_var.shape[0] for tensor_var in tensor_inputs) + + grids = [] + size = len(tensor_inputs) + for i in range(size): + view_shape = [1] * size + view_shape[i] = -1 + view_shape = tuple(view_shape) + # (a.) in docstring + view = mb.reshape( + x=tensor_inputs[i], shape=view_shape, name=node.name + "_view_" + str(i) + ) + + # (b.) in docstring + reps = [ + ds if ds > 0 and ts == 1 else 1 for ts, ds in zip(view.shape, dim_tuple) + ] + res = mb.tile(x=view, reps=reps, name=node.name + "_expand_" + str(i)) + + # transpose the first two dimensions for "xy" indexing + if indexing == "xy": + perm = [1, 0] + list(range(2, size)) + res = mb.transpose(x=res, perm=perm, name=node.name + "_transpose_" + str(i)) + + grids.append(res) + + context.add(tuple(grids), node.name) + + +# Defines all the nodes that are noOps +@register_torch_op( + torch_alias=[ + "dropout", + "dropout_", + "feature_dropout", + "contiguous", + "device", + "detach", + "clone", + ] +) +def noop(context, node): + logger.info("Setting pytorch op: {} to no-op.".format(node)) + inputs = _get_inputs(context, node) + _input = inputs[0] + context.add(_input, torch_name=node.name) + + +@register_torch_op +def argmax(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + axis = inputs[1] + keep_dims = inputs[2] + if types.is_int(x.dtype) and x.dtype._width == 64: + # MIL reduce_argmax doesn't support int64. + x = mb.cast(x=x, dtype="int32") + res = mb.reduce_argmax(x=x, axis=axis, keep_dims=keep_dims, name=node.name) + context.add(res) + + +@register_torch_op(torch_alias=["empty_like"]) +def zeros_like(context, node): + inputs = _get_inputs(context, node, expected=6) + x = inputs[0] + dtype = inputs[1].val + shape = mb.shape(x=x) + np_type = NUM_TO_NUMPY_DTYPE[dtype] + + if shape.can_be_folded_to_const(): + shape = shape.val + zeros = _np.zeros(shape).astype(np_type) + zeros_like = mb.const(val=zeros, name=node.name) + else: + value = np_type(0) + if is_current_opset_version_compatible_with(target.iOS16): + zeros_like = mb.fill_like(ref_tensor=x, value=value, name=node.name) + else: + zeros_like = mb.fill(shape=shape, value=value, name=node.name) + + context.add(zeros_like) + + +@register_torch_op(torch_alias=["empty"]) +def zeros(context, node): + inputs = _get_inputs(context, node) + size = inputs[0] + if inputs[1] is not None: + dtype = inputs[1].val + else: + dtype = torch.get_default_dtype() + assert dtype in (torch.float32, torch.float64) + dtype = 6 + + if isinstance(size, list) or not size.can_be_folded_to_const(): + # the size is dynamic or this zeros op cannot be folded into const. + size = mb.concat(values=size, axis=0) if isinstance(size, list) else size + np_type = NUM_TO_NUMPY_DTYPE[dtype] + zeros = mb.fill(shape=size, value=np_type(0), name=node.name) + else: + # the size is static and this zeros op can be folded into const. + size = size.val + # layout = inputs[2] unused + # device = inputs[3] unused + # pin_memory = inputs[4] unused + torch_dtype = NUM_TO_TORCH_DTYPE[dtype] + zeros_array = torch.zeros(tuple(size)).type(torch_dtype).numpy() + zeros = mb.const(val=zeros_array, name=node.name) + + context.add(zeros) + + +@register_torch_op(torch_alias=["new_empty"]) +def new_zeros(context, node): + inputs = _get_inputs(context, node) + shape = inputs[1] + if isinstance(shape, list): + # when the size is dynamic, it is a list of pymil scalar, + # we need to concat them first to get a shape. + shape = mb.concat(values=shape, axis=0) + context.add(mb.fill(shape=shape, value=0., name=node.name)) + + +@register_torch_op +def dim(context, node): + inputs = _get_inputs(context, node) + shape = mb.shape(x=inputs[0]) + rank = mb.shape(x=shape) + context.add(value_at(rank, 0, node.name)) + + +@register_torch_op +def min(context, node): + inputs = _get_inputs(context, node, expected=[1, 2, 3]) + + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.min.html + if len(inputs) == 1: + value = mb.reduce_min(x=inputs[0], axes=None, name=node.name) + context.add(value) + elif len(inputs) == 2: + value = mb.minimum(x=inputs[0], y=inputs[1], name=node.name) + context.add(value) + elif len(inputs) == 3: + _input = inputs[0] + dim = inputs[1].val + keepdim = inputs[2].val + + values = mb.reduce_min(x=_input, axes=[dim], keep_dims=keepdim) + indices = mb.reduce_argmin(x=_input, axis=dim, keep_dims=keepdim) + assert len(node.outputs) == 2 + values_name = node.outputs[0] + indices_name = node.outputs[1] + context.add(values, torch_name=values_name) + context.add(indices, torch_name=indices_name) + + +@register_torch_op +def max(context, node): + inputs = _get_inputs(context, node, expected=[1, 2, 3]) + + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.max.html + if len(inputs) == 1: + value = mb.reduce_max(x=inputs[0], axes=None, name=node.name) + context.add(value) + elif len(inputs) == 2: + value = mb.maximum(x=inputs[0], y=inputs[1], name=node.name) + context.add(value) + elif len(inputs) == 3: + _input = inputs[0] + dim = inputs[1].val + keepdim = inputs[2].val + + values = mb.reduce_max(x=_input, axes=[dim], keep_dims=keepdim) + indices = mb.reduce_argmax(x=_input, axis=dim, keep_dims=keepdim) + assert len(node.outputs) == 2 + values_name = node.outputs[0] + indices_name = node.outputs[1] + context.add(values, torch_name=values_name) + context.add(indices, torch_name=indices_name) + +def _add_amax_amin(context, node, reduce_op): + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.amax.html + # mimic functionality from https://pytorch.org/docs/stable/generated/torch.amin.html + assert len(node.outputs) == 1 + + all_inputs = _get_inputs(context, node, expected=[2, 3]) + _input = all_inputs[0] + dim = [all_inputs[1].val] if type(all_inputs[1].val) == int else [x for x in all_inputs[1].val] + keepdim = all_inputs[2] if len(all_inputs) == 3 else False + + context.add(reduce_op(x=_input, axes=dim, keep_dims=keepdim), torch_name=node.outputs[0]) + +@register_torch_op +def amax(context, node): + _add_amax_amin(context, node, mb.reduce_max) + +@register_torch_op +def amin(context, node): + _add_amax_amin(context, node, mb.reduce_min) + + +@register_torch_op +def argsort(context, node): + inputs = _get_inputs(context, node, expected=3) + ascending = mb.logical_not(x=inputs[2]) + argsort = mb.argsort(x=inputs[0], axis=inputs[1], ascending=ascending, name=node.name) + context.add(argsort) + + +@register_torch_op +def sort(context, node): + inputs = _get_inputs(context, node) + _input = inputs[0] + axis = inputs[1].val + ascending = not inputs[2].val + indices_name = node.outputs[1] + values_name = node.outputs[0] + indices = mb.argsort(x=_input, axis=axis, ascending=ascending, name=indices_name) + values = mb.gather_along_axis(x=_input, indices=indices, axis=axis, name=values_name) + context.add(values, torch_name=values_name) + context.add(indices, torch_name=indices_name) + + +@register_torch_op +def append(context, node): + # Note: by applying torchir_passes.transform_inplace_ops the meaning of + # this op is changed from the original TorchIR. This op expects a python + # list or MIL List as its first input. If an MIL List, the second input + # must be a tensor of whatever shape the List expects. If not an MIL List, + # the second input can by anything. The result will be the second input + # joined to the first input, either by list_write if an MIL list, or + # append if a python list. + inputs = _get_inputs(context, node, expected=2) + ls = inputs[0] + value = inputs[1] + + if isinstance(ls, list): + context.add(ls + [value], node.name) + elif isinstance(ls, ListVar): + index = mb.list_length(ls=ls, name=node.name + "_index") + res = mb.list_write(ls=ls, index=index, value=value, name=node.name) + context.add(res) + else: + raise ValueError( + "can only append to Python list or MIL ListVar, got {}.".format( + type(inputs[0]) + ) + ) + + +@register_torch_op +def gather(context, node): + inputs = _get_inputs(context, node) + res = mb.gather_along_axis(x=inputs[0], indices=inputs[2], axis=inputs[1], name=node.name) + context.add(res) + + +@register_torch_op +def index_select(context, node): + x = context[node.inputs[0]] + axis = context[node.inputs[1]] + indices = context[node.inputs[2]] + context.add(mb.gather(x=x, indices=indices, axis=axis, name=node.name)) + + +@register_torch_op(torch_alias=["abs"]) +def _abs(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.abs(x=inputs[0], name=node.name)) + + +@register_torch_op +def repeat(context, node): + x = context[node.inputs[0]] + reps = context[node.inputs[1]] + if isinstance(reps, list): + reps = mb.concat(values=reps, axis=0) + + if reps.shape[0] > len(x.shape): + x = mb.expand_dims(x=x, axes=list(range(reps.shape[0] - x.rank))) + context.add(mb.tile(x=x, reps=reps, name=node.name)) + + +@register_torch_op +def acos(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.acos(x=inputs[0], name=node.name)) + + +@register_torch_op +def acosh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.acosh(x=inputs[0], name=node.name)) + + +@register_torch_op +def asin(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.asin(x=inputs[0], name=node.name)) + + +@register_torch_op +def atan(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.atan(x=inputs[0], name=node.name)) + + +@register_torch_op +def atan2(context, node): + """ + atan2(Tensor y, Tensor x) + Element-wise arctangent of y / x with consideration of the quadrant + Returns a new tensor with the signed angles in radians between vector (x, y) and vector (1, 0) + + On a high level: + 1. atan(y / x) to get the angle in [-pi / 2, pi / 2] + 2. analyze quadrant to determine the angle in [-pi, pi] + + Reference PyTorch code https://gist.github.com/nikola-j/b5bb6b141b8d9920318677e1bba70466 + def my_atan2(y, x): + pi = torch.from_numpy(np.array([np.pi])).to(y.device, y.dtype) + ans = torch.atan(y / x) + ans += ((y > 0) & (x < 0)) * pi + ans -= ((y < 0) & (x < 0)) * pi + ans *= (1 - ((y > 0) & (x == 0)) * 1.0) + ans += ((y > 0) & (x == 0)) * (pi / 2) + ans *= (1 - ((y < 0) & (x == 0)) * 1.0) + ans += ((y < 0) & (x == 0)) * (-pi / 2) + return ans + """ + inputs = _get_inputs(context, node, expected=2) + y = inputs[0] + x = inputs[1] + if not types.is_float(y.dtype): + y = mb.cast(x=y, dtype="fp32") + if not types.is_float(x.dtype): + x = mb.cast(x=x, dtype="fp32") + + # basic logical expressions + y_less_0 = mb.less(x=y, y=0.0) + y_greater_0 = mb.greater(x=y, y=0.0) + x_less_0 = mb.less(x=x, y=0.0) + x_equal_0 = mb.equal(x=x, y=0.0) + + # combined logical expressions + ygreater0_and_xless0 = mb.logical_and(x=y_greater_0, y=x_less_0) + yless0_and_xless0 = mb.logical_and(x=y_less_0, y=x_less_0) + ygreater0_and_xequal0 = mb.logical_and(x=y_greater_0, y=x_equal_0) + yless0_and_xequal0 = mb.logical_and(x=y_less_0, y=x_equal_0) + + # bool -> fp32 for numeric operation + ygreater0_and_xless0_numeric = mb.cast(x=ygreater0_and_xless0, dtype="fp32") + yless0_and_xless0_numeric = mb.cast(x=yless0_and_xless0, dtype="fp32") + ygreater0_and_xequal0_numeric = mb.cast(x=ygreater0_and_xequal0, dtype="fp32") + yless0_and_xequal0_numeric = mb.cast(x=yless0_and_xequal0, dtype="fp32") + + # quadrant modification coefficients + coeff1 = mb.mul(x=ygreater0_and_xless0_numeric, y=_np.pi) + coeff2 = mb.mul(x=yless0_and_xless0_numeric, y=_np.pi) + coeff3 = mb.sub(x=1.0, y=ygreater0_and_xequal0_numeric) + coeff4 = mb.mul(x=ygreater0_and_xequal0_numeric, y=_np.pi / 2.0) + coeff5 = mb.sub(x=1.0, y=yless0_and_xequal0_numeric) + coeff6 = mb.mul(x=yless0_and_xequal0_numeric, y=-_np.pi / 2.0) + + # if -1e-8 < x < 1e-8, x += 2e-8 to avoid y / 0 + # this shift makes atan2(0, 0) = 0, which is consistent with PyTorch torch.atan2 + x0left = mb.greater(x=x, y=-1e-8) + x0right = mb.less(x=x, y=1e-8) + x0 = mb.logical_and(x=x0left, y=x0right) + x0numeric = mb.cast(x=x0, dtype="fp32") + safe_shift = mb.mul(x=x0numeric, y=2e-8) + x_safe = mb.add(x=x, y=safe_shift) + + # compute atan(y / x) + ydx = mb.real_div(x=y, y=x_safe) + atan2_1 = mb.atan(x=ydx) + + # analyze quadrant + atan2_2 = mb.add(x=atan2_1, y=coeff1) + atan2_3 = mb.sub(x=atan2_2, y=coeff2) + atan2_4 = mb.mul(x=atan2_3, y=coeff3) + atan2_5 = mb.add(x=atan2_4, y=coeff4) + atan2_6 = mb.mul(x=atan2_5, y=coeff5) + context.add(mb.add(x=atan2_6, y=coeff6, name=node.name)) + + +@register_torch_op +def atanh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.atanh(x=inputs[0], name=node.name)) + + +@register_torch_op +def ceil(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.ceil(x=inputs[0], name=node.name)) + + +@register_torch_op +def clamp(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + min_val = inputs[1] if inputs[1] else _np.finfo(_np.float32).min + max_val = inputs[2] if inputs[2] else _np.finfo(_np.float32).max + + if isinstance(min_val, Var) and isinstance(max_val, Var) and min_val.val >= max_val.val: + # When min >= max, PyTorch sets all values to max. + context.add(mb.fill(shape=mb.shape(x=x), value=max_val.val, name=node.name)) + return + + is_input_int = types.is_int(x.dtype) + if not types.is_float(x.dtype): + # The `mb.clip` op requires parameters from type domain ['fp16', 'fp32']. + x = mb.cast(x=x, dtype="fp32") + x, min_val, max_val = promote_input_dtypes([x, min_val, max_val]) + if is_input_int: + clip_res = mb.clip(x=x, alpha=min_val, beta=max_val) + context.add(mb.cast(x=clip_res, dtype="int32", name=node.name)) + else: + context.add(mb.clip(x=x, alpha=min_val, beta=max_val, name=node.name)) + + +@register_torch_op +def triu(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + diagonal = inputs[1] + diagonal = 0 if diagonal is None else diagonal.val + if diagonal <= 0: + res = mb.band_part(x=x, lower=-diagonal, upper=-1, name=node.name) + else: + y = mb.band_part(x=x, lower=-1, upper=diagonal - 1) + res = mb.sub(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def tril(context, node): + inputs = _get_inputs(context, node, expected=2) + x = inputs[0] + diagonal = inputs[1] + diagonal = 0 if diagonal is None else diagonal.val + if diagonal >= 0: + res = mb.band_part(x=x, lower=-1, upper=diagonal, name=node.name) + else: + y = mb.band_part(x=x, lower=-diagonal - 1, upper=-1) + res = mb.sub(x=x, y=y, name=node.name) + context.add(res) + + +@register_torch_op +def cos(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.cos(x=inputs[0], name=node.name)) + + +@register_torch_op +def cosh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.cosh(x=inputs[0], name=node.name)) + + +@register_torch_op +def exp(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.exp(x=inputs[0], name=node.name)) + + +@register_torch_op +def exp2(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.exp2(x=inputs[0], name=node.name)) + + +@register_torch_op +def floor(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.floor(x=inputs[0], name=node.name)) + + +@register_torch_op +def reciprocal(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.inverse(x=inputs[0], name=node.name)) + + +@register_torch_op +def log(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.log(x=inputs[0], name=node.name)) + + +@register_torch_op(torch_alias=["round"]) +def _round(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.round(x=inputs[0], name=node.name)) + + +@register_torch_op +def rsqrt(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.rsqrt(x=inputs[0], name=node.name)) + + +@register_torch_op +def sin(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sin(x=inputs[0], name=node.name)) + + +@register_torch_op +def sinh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sinh(x=inputs[0], name=node.name)) + + +@register_torch_op +def asinh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.asinh(x=inputs[0], name=node.name)) + + +@register_torch_op +def sqrt(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sqrt(x=inputs[0], name=node.name)) + + +@register_torch_op +def square(context, node): + inputs = _get_inputs(context, node, expected=1) + # mb.square is not supported in some backend + context.add(mb.mul(x=inputs[0], y=inputs[0], name=node.name)) + + +@register_torch_op +def tan(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.tan(x=inputs[0], name=node.name)) + + +@register_torch_op +def tanh(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.tanh(x=inputs[0], name=node.name)) + + +@register_torch_op +def threshold(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + alpha = inputs[1] + threshold_val = inputs[2] + + # Simple case (threshold_val == alpha) + if alpha.val == threshold_val.val: + threshold_node = mb.threshold(x=x, alpha=alpha, name=node.name) + context.add(threshold_node) + return + + # Complex case (threshold_val != threshold) + threshold_node = mb.threshold(x=x, alpha=alpha, name=node.name + '_threshold') + context.add(threshold_node) + + gt_node = mb.greater_equal(x=alpha, y=x, name=node.name + '_ge') + context.add(gt_node) + gt_node_32 = mb.cast(x=gt_node, dtype="fp32", name=node.name + '_ge32') + + mul_node = mb.linear_activation( + x=gt_node_32, + alpha=float(threshold_val.val - alpha.val), + beta=0., + name=node.name + '_mul' + ) + context.add(mul_node) + + final_node = mb.add(x=mul_node, y=threshold_node, name=node.name) + context.add(final_node) + + +@register_torch_op +def sign(context, node): + inputs = _get_inputs(context, node, expected=1) + context.add(mb.sign(x=inputs[0], name=node.name)) + + +@register_torch_op +def is_floating_point(context, node): + inputs = _get_inputs(context, node, expected=1) + is_float = types.is_float(inputs[0].dtype) + context.add(mb.const(val=is_float, name=node.name)) + + +@register_torch_op +def logical_and(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = inputs + x = mb.cast(x=x, dtype="bool") + y = mb.cast(x=y, dtype="bool") + context.add(mb.logical_and(x=x, y=y, name=node.name)) + +@register_torch_op +def logical_or(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = inputs + x = mb.cast(x=x, dtype="bool") + y = mb.cast(x=y, dtype="bool") + context.add(mb.logical_or(x=x, y=y, name=node.name)) + + +@register_torch_op +def logical_xor(context, node): + inputs = _get_inputs(context, node, expected=2) + x, y = inputs + x = mb.cast(x=x, dtype="bool") + y = mb.cast(x=y, dtype="bool") + context.add(mb.logical_xor(x=x, y=y, name=node.name)) + + +def _nonzero_as_tuple(context, node, x): + ''' + Calculates the non-zero elements of x then slices results by each inner index. + ''' + non_zero = mb.non_zero(x=x) + + result = [] + for i in range(x.rank): + result.append( + mb.slice_by_index( + x=non_zero, + begin=[0, i], + end=[-1, -1], # Ignored, but required + end_mask=[True, False], + squeeze_mask=[False, True] + ) + ) + + context.add(result, node.name) + + +@register_torch_op +def where(context, node): + inputs = _get_inputs(context, node) + + if len(inputs) == 1: + _nonzero_as_tuple(context, node, inputs[0]) + return + + assert len(inputs) == 3 + cond = inputs[0] + if not types.is_bool(cond.dtype): + # cond must be bool type + cond = mb.cast(x=cond, dtype="bool") + if not any([any_symbolic(x.shape) for x in inputs[:3]]): + # broadcast all tensors to the same shape + broadcast_inputs = _broadcast_tensors([cond, inputs[1], inputs[2]]) + result = mb.select( + cond=broadcast_inputs[0], + a=broadcast_inputs[1], + b=broadcast_inputs[2], + name=node.name, + ) + else: + result = mb.select(cond=cond, a=inputs[1], b=inputs[2], name=node.name) + context.add(result) + + +@register_torch_op +def nonzero_numpy(context, node): + inputs = _get_inputs(context, node, expected=1) + _nonzero_as_tuple(context, node, inputs[0]) + + +@register_torch_op +def neg(context, node): + inputs = _get_inputs(context, node, expected=1) + x, y = promote_input_dtypes([inputs[0], -1]) + context.add(mb.mul(x=x, y=y, name=node.name)) + +@register_torch_op +def topk(context, node): + def dynamic_topk(x, k, axis, ascending): + assert k.val is None, "Please use mb.topk directly if k is compile time known" + indices = mb.argsort(x=x, axis=axis, ascending=ascending) + values = mb.gather_along_axis(x=x, indices=indices, axis=axis) + + k_indices = mb.range_1d(end=k, start=0, step=1) + values = mb.gather(x=values, indices=k_indices, axis=axis) + indices = mb.gather(x=indices, indices=k_indices, axis=axis) + + return values, indices + + inputs = _get_inputs(context, node) + kwargs = {"name": node.name, "x": inputs[0], "k": inputs[1]} + + if len(inputs) > 6: + raise Exception("Number of inputs to topk exceeds 6") + # optional: @axis + if len(inputs) > 2: + if inputs[2] is not None: + kwargs["axis"] = inputs[2].val + + # optional: @ascending + if len(inputs) > 3: + largest = inputs[3].val + kwargs["ascending"] = not largest + + # last inputs to topk are optional - sorted and out. + sort = True + if len(inputs) > 4: + if inputs[4].val is False and not is_current_opset_version_compatible_with(target.iOS16): + raise Exception("For opset <= iOS16, only sorted=True supported for the topk") + sort = inputs[4].val + + if len(inputs) > 5: + if inputs[5] is not None: + raise Exception( + "Unsupported value for argument 'out' in topk. Supported values: None, but input " + "is {}".format(inputs[5].val) + ) + + if is_current_opset_version_compatible_with(target.iOS16): + kwargs["sort"] = sort + + if kwargs["k"].val is None: + res = dynamic_topk( + x=kwargs["x"], + k=kwargs["k"], + axis=kwargs["axis"], + ascending=kwargs["ascending"] + ) + else: + res = mb.topk(**kwargs) + + values_name = node.outputs[0] + indices_name = node.outputs[1] + context.add(res[0], torch_name=values_name) + context.add(res[1], torch_name=indices_name) + + +def _std(x, axes, keep_dim, unbiased, eps): + need_rescale = False + if unbiased: + # If "unbiased" is True, + # then we need to divide by "N-1" (instead of "N") to compute the mean of (x-E[x])^2 + # for an unbiased estimate of the variance / standard deviation. + # In the sequence of MIL ops added below, we first compute the mean using "N", and only if its unbiased + # we rescale later, the final result. + # We ignore the "unbiased" flag, if any of the dimensions involved in this operation are dynamic + # (we could have still handled that case by using "get_shape" etc ops, but we don't do that here, + # trading performance for numerical accuracy) + if axes is None: + if not any_symbolic(x.shape) and _np.prod(x.shape) > 1: + N = _np.prod(x.shape) + need_rescale = True + else: + dims = [] + # collect dimensions corresponding to "axes" + for axis in axes: + dims.append(x.shape[axis]) + if all([not is_symbolic(s) for s in dims]): + N = _np.prod(dims) + if N > 1: + need_rescale = True + if need_rescale: + rescale_factor = _np.sqrt(N / float(N - 1)) + + x_mean = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + x_demeaned = mb.sub(x=x, y=x_mean) + x_demeaned_square = mb.square(x=x_demeaned) + x_demeaned_square_mean = mb.reduce_mean(x=x_demeaned_square, axes=axes, keep_dims=keep_dim) + if eps > 0: + x_demeaned_square_mean = mb.add(x=x_demeaned_square_mean, y=eps) + if need_rescale: + y_before_scale = mb.sqrt(x=x_demeaned_square_mean) + y = mb.mul(x=y_before_scale, y=rescale_factor) + else: + y = mb.sqrt(x=x_demeaned_square_mean) + return y + +@register_torch_op +def numel(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + x = mb.shape(x=x) + x = mb.reduce_prod(x=x, axes=[0], name=node.name) + context.add(x) + +@register_torch_op +def std(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + if not (len(inputs) == 2 or len(inputs) == 4): + raise ValueError("Number of inputs to the 'std' op must be" + "2 or 4") + + keep_dim = False + axes = None + if len(inputs) == 2: + unbiased = inputs[1].val + if len(inputs) == 4: + axes = inputs[1].val + if isinstance(axes, int): + axes = [axes] + unbiased = inputs[2].val + keep_dim = inputs[3].val + + y = _std(x, axes, keep_dim, unbiased, 0) + context.add(y, node.name) + + +@register_torch_op +def copy(context, node): + inputs = _get_inputs(context, node, expected=[2, 3]) + context.add(mb.identity(x=inputs[0], name=node.name)) + + +@register_torch_op +def dtype(context, node): + inputs = _get_inputs(context, node, expected=1) + dtype_str = inputs[0].dtype.__name__ + context.add(mb.const(val=dtype_str, name=node.name)) + + +@register_torch_op +def tensor(context, node): + def _make_tensor(list_of_tensor, name, rank): + if rank == 6: + raise NotImplementedError("Core ML only supports tensor rank <= 5.") + if not isinstance(list_of_tensor, list): + return list_of_tensor + values = [ + _make_tensor(x, name + "_r_" + str(i), rank + 1) + for i, x in enumerate(list_of_tensor) + ] + if len(values) == 1: + return mb.expand_dims(x=values[0], axes=[0], name=name) + return mb.stack(values=values, axis=0, name=name) + + inputs = _get_inputs(context, node, expected=4) + + # Case 1: Using torch.tensor to create a const tensor + # For example: + # torch.tensor([[[0, 0], [0, 10], [5, 10], [5, 0]]], dtype=torch.float32) + val = inputs[0] + if isinstance(val, list): + context.add(_make_tensor(val, node.name, 1)) + return + + if inputs[2] is None: + context.add(mb.identity(x=val, name=node.name)) + return + + # Case 2: Create a tensor filled with a single value + val = val.val # element val to fill + msg_prefix = 'torch::tensor {} '.format(node.name) + if val is None: + raise ValueError(msg_prefix + 'val is None') + dtype_str = inputs[1].val + if dtype_str != "fp32": + raise NotImplementedError( + msg_prefix + "Unsupported dtype: {}".format(dtype_str) + ) + # inputs[3] is a bool (not sure what it is) + shape = mb.shape(x=inputs[2], name=node.name + "_shape") + context.add(mb.fill(shape=shape, value=val, name=node.name)) + + +""" +Pack and unpack op in pytorch. +The typical pattern is as following + +>>> seq = torch.tensor([[1,2,0], [3,0,0], [4,5,6]]) +>>> lens = [2, 1, 3] +>>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False) +>>> packed +PackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]), + sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0])) +>>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True) +>>> seq_unpacked +tensor([[1, 2, 0], + [3, 0, 0], + [4, 5, 6]]) +>>> lens_unpacked +tensor([2, 1, 3]) + +source from https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pad_packed_sequence.html +""" + + +@register_torch_op +def _pack_padded_sequence(context, node): + # The implementation of this op is not efficient. Raise a warning. + logger.warning( + "Encountered a _pack_padded_sequence layer. The implementation of translating pack/unpack op\ + in pytorch is not efficient due to the current limitation of Core ML. Removing the pack-unpack logic \ + and use a fixed batch size model is recommended." + ) + + inputs = _get_inputs(context, node, expected=3) + tensor_name, batch_sizes_name = node.outputs + tensor_input = inputs[0] + batch_sizes = inputs[1] + batch_first = inputs[2].val + + # by assuming that the output of this op is always feed in lstm layer, + # we enforce the layout to be Batch * seq_length * Feature. + if not batch_first: + tensor_input = mb.transpose(x=tensor_input, perm=[1, 0, 2]) + context.add(mb.identity(x=tensor_input, name=tensor_name)) + + # add the batch_sizes in the context, so that _pad_packed_sequence can + # find it later. + context.add(mb.identity(x=batch_sizes, name=batch_sizes_name)) + + +@register_torch_op +def _pad_packed_sequence(context, node): + # The implementation of this op is not efficient. Raise a warning. + logger.warning( + "Encountered a _pad_packed_sequence layer. The implementation of translating pack/unpack op\ + in pytorch is not efficient due to the current limitation of Core ML. Removing the pack-unpack logic \ + and use a fixed batch size model is recommended." + ) + inputs = _get_inputs(context, node) + + # seq_lengths denotes the actual sequence length for each batch. + # pad denotes the padding value for those data which has shorter length. + input_tensor = inputs[0] + seq_lengths = inputs[1] + batch_first = inputs[2].val + pad = inputs[3].val + + # we only support pack and unpack translation for static tensor shape, + # i.e., the three dimensions are all known during compile time. + if any([is_symbolic(x) for x in input_tensor.shape]): + raise NotImplementedError("Only static shape of PackedSequence object is supported.") + + # the input always has batch first layout. + # padded_seq_len denotes the maximum sequence length across batches. + batch, padded_seq_len, input_dim = input_tensor.shape + assert seq_lengths.rank == 1 + assert batch == seq_lengths.shape[0] + + # we iterate through the batch, pad each data, and concate them into a single tensor in the end, + # which is the total_tensor here. + # Say the input_tensor has shape [batch , padded_seq_len, input_dim], + # and the seq_lengths = [len_1, len_2, len_3]. + # Note that in pytorch, the seq_lengths must be decreasing in order, len_1 >= len_2 >= len_3. + total_tensor = [] + + for i in range(batch): + # slice for each data + # x has shape [padded_seq_len, input_dim] + x = mb.slice_by_index( + x=input_tensor, + begin=[i, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + ) + + # get the unpadded sequence, + # if the unpadded sequence has length seq_length, + # x would have shape [seq_length, input_dim]. + # For example, the first data would result in a [len_1, input_dim] tensor. + seq_length = mb.cast(x=value_at(seq_lengths, i), dtype="int32") + concate_values = [seq_length, input_dim] + end_index = mb.concat(values=concate_values, axis=0) + x = mb.slice_by_index( + x=x, + begin=[0, 0], + end=end_index, + stride=[1, 1], + begin_mask=[True, True], + end_mask=[False, True], + ) + + # get the padding part of the data + # Note that we always add one dummy padding in the end with shape [padded_seq_len - seq_length + 1, input_dim]. + # The reason is that for the case when seq_length = padded_seq_len, + # coreml cannot handle the empty tensor. + pad_length = mb.sub(x=padded_seq_len + 1, y=seq_length) + concate_values = [pad_length, input_dim] + shape = mb.concat(values=concate_values, axis=0) + pad_values = mb.fill(shape=shape, value=pad) + + # concate the unpadded sequence and the padding data + # the resulting tensor would have shape [padded_seq_len + 1, input_dim] + x, pad_values = promote_input_dtypes([x, pad_values]) + concate_values = [x, pad_values] + add_values = mb.concat(values=concate_values, axis=0) + + # trim the dummy padding tensor + # the output would have shpae [padded_seq_len, input_dim] + x = mb.slice_by_index( + x=add_values, + begin=[0, 0], + end=[padded_seq_len, 0], + stride=[1, 1], + begin_mask=[True, True], + end_mask=[False, True], + ) + + # add it to total tensor + total_tensor.append(x) + + # transpose the tensor if batch_first = False + if not batch_first: + x = mb.stack(values=total_tensor, axis=0) + x = mb.transpose(x=x, perm=[1, 0, 2], name=node.name) + else: + x = mb.stack(values=total_tensor, axis=0, name=node.name) + + context.add(x) + + +@register_torch_op +def log10(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + log_x = mb.log(x=x) + context.add(mb.mul(x=log_x, y=1 / _np.log(10.0)), node.name) + + +@register_torch_op +def log2(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + log_x = mb.log(x=x) + context.add(mb.mul(x=log_x, y=1 / _np.log(2.0)), node.name) + + +@register_torch_op +def flip(context, node): + inputs = _get_inputs(context, node, expected=2) + x = mb.reverse(x=inputs[0], axes=inputs[1], name=node.name) + context.add(x, node.name) + + +@register_torch_op(torch_alias=["reflection_pad1d"]) +def reflection_pad2d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + torch_pad = inputs[1].val + pad_flipped = torch_pad.reshape((-1, 2))[::-1].ravel() + pad = _np.pad(pad_flipped, (len(x.shape) * 2 - len(pad_flipped), 0)) + context.add(mb.pad(x=x, pad=pad, mode='reflect'), node.name) + + +@register_torch_op(torch_alias=["replication_pad1d"]) +def replication_pad2d(context, node): + inputs = _get_inputs(context, node) + x = inputs[0] + torch_pad = inputs[1].val + pad_flipped = torch_pad.reshape((-1, 2))[::-1].ravel() + pad = _np.pad(pad_flipped, (len(x.shape) * 2 - len(pad_flipped), 0)) + context.add(mb.pad(x=x, pad=pad, mode='replicate'), node.name) + + +def _broadcast_tensors(tensors): + def _solve_broadcast_shape(shapes): + rank = _np.max([len(shape) for shape in shapes]) + shapes = [[1] * (rank - len(shape)) + shape for shape in shapes] + result_shape = [] + for i in range(rank): + dims = [shapes[j][i] for j in range(len(tensors))] + if any_symbolic(dims): + # rdar://85559497 (Handle dynamic shapes inputs broadcast for pytorch) + raise NotImplementedError( + "Only static shaped inputs are supported for torch.broadcast_tensors conversion." + ) + result_shape.append(_np.max(dims)) + return result_shape + + if len(tensors) == 1: + return tensors + + # solve the broadcast shape + input_shapes = [list(x.shape) for x in tensors] + broadcast_shape = _solve_broadcast_shape(input_shapes) + + # do the broadcasting + results = [] + for tensor in tensors: + name = tensor.name + "_after_broadcast" + results.append(_broadcast(name, tensor, broadcast_shape)) + return results + + +@register_torch_op +def broadcast_tensors(context, node): + inputs = _get_inputs(context, node) + context.add(_broadcast_tensors(inputs[0]), node.name) + + +def _scatter(context, inputs, mode, name): + data = inputs[0] + axis = inputs[1].val + indices = inputs[2] + updates = inputs[3] + if types.is_scalar(updates.sym_type): + updates = mb.fill(shape=indices.shape, value=updates.val, name=name) + result = mb.scatter_along_axis(data=data, indices=indices, updates=updates, + axis=axis, mode=mode, name=name) + context.add(result) + + +@register_torch_op +def scatter(context, node): + inputs = _get_inputs(context, node) + assert len(inputs) in (4, 5) + + # Determine reduce/mode parameter + if len(inputs) == 5: + mode = inputs[4].val + if mode == 'multiply': + mode = 'mul' + else: + assert mode == 'add' + else: + mode = 'update' + + _scatter(context, inputs, mode, node.name) + + +@register_torch_op +def scatter_add(context, node): + inputs = _get_inputs(context, node) + _scatter(context, inputs, 'add', node.name) + + +@register_torch_op +def baddbmm(context, node): + """ + baddbmm(Tensor input, Tensor batch1, Tensor batch2, Scalar beta=1, Scalar alpha=1) + output = beta * input + alpha * batch1 * batch2 + + Notice that batch1 and batch2 must be 3-D tensors each containing the same number of matrices. + If batch1 is a (b×n×m) tensor, batch2 is a (b×m×p) tensor, then input must be broadcastable with a (b×n×p) tensor + and out will be a (b×n×p) tensor. + """ + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=5) + bias, batch1, batch2, beta, alpha = inputs + + if beta.val != 1.0: + # Apply scaling factor beta to the bias. + bias = mb.mul(x=beta, y=bias, name=bias.name + "_scaled") + context.add(bias) + + if alpha.val != 1.0: + # Apply scaling factor alpha to the input. + batch1 = mb.mul(x=alpha, y=batch1, name=batch1.name + "_scaled") + context.add(batch1) + + bmm_node = mb.matmul(x=batch1, y=batch2, name=node.name + "_bmm") + context.add(bmm_node) + + baddbmm_node = mb.add(x=bias, y=bmm_node, name=node.name) + context.add(baddbmm_node) + + +@register_torch_op +def glu(context, node): + """ + glu(Tensor input, Scalar dim=-1) + Applies the gated linear unit function GLU(a,b)=a⊗σ(b) where a is the first half of the input matrices and b is the + second half. + """ + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=2) + input, axis = inputs + + first_half, second_half = mb.split(x=input, num_splits=2, axis=axis.val, name=node.name + "_split") + context.add(first_half) + context.add(second_half) + + sigmoid_second_half = mb.sigmoid(x=second_half, name=second_half.name + "_sigmoid") + context.add(sigmoid_second_half) + + glu_node = mb.mul(x=first_half, y=sigmoid_second_half, name=node.name) + context.add(glu_node) + + +@register_torch_op +def hstack(context, node): + """ + hstack(List[Tensor] tensors, Optional[Tensor] out) + Stack tensors in sequence horizontally (column wise). This is equivalent to concatenation along the first axis for + 1-D tensors, and along the second axis for all other tensors. + """ + inputs = _get_inputs(context, node) + tensors = inputs[0] + input_shapes = [list(x.shape) for x in tensors] + # Concatenates along the first axis for 1-D tensors, and along the second axis for all other tensors. + axis = 0 if len(input_shapes[0]) == 1 else 1 + hstack_node = mb.concat(values=tensors, axis=axis, name=node.name) + context.add(hstack_node) + + +@register_torch_op +def remainder(context, node): + """ + remainder(Tensor dividend, Tensor divisor, Optional[Tensor] out) + Computes Python’s modulus operation entrywise. The result has the same sign as the divisor and its absolute value + is less than that of divisor. It may also be defined in terms of torch.div() as: + remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + """ + # Don't specify `expected` because the parameter `out` is optional. + inputs = _get_inputs(context, node) + dividend, divisor = promote_input_dtypes([inputs[0], inputs[1]]) + div_node = mb.floor_div(x=dividend, y=divisor, name=node.name + "_div") + context.add(div_node) + scaled_div = mb.mul(x=div_node, y=divisor, name=div_node.name + "_scaled") + context.add(scaled_div) + remainder_node = mb.sub(x=dividend, y=scaled_div, name=node.name) + context.add(remainder_node) + + +@register_torch_op +def hann_window(context, node): + inputs = _get_inputs(context, node, expected=[5, 6]) + if inputs[0].val is None: + raise NotImplementedError("variable 'window_length' not supported.") + + periodic = True + if len(inputs) == 6: + if inputs[1].val is None: + raise NotImplementedError("variable 'periodic' not supported.") + if not inputs[1].val: + periodic = False + + size = (inputs[0].val,) + if inputs[0].val <= 1: + one = mb.fill(shape=size, value=1.0, name=node.name) + context.add(one) + return + + ones = mb.fill(shape=size, value=1.0) + cum = mb.cumsum(x=ones, axis=0) + seq = mb.sub(x=cum, y=ones) + pi = mb.fill(shape=size, value=_math.pi) + window_length_float = mb.cast(x=inputs[0], dtype="fp32") + if not periodic: + window_length_float = mb.sub(x=window_length_float, y=ones) + denominator = mb.fill(shape=size, value=window_length_float) + numerator = mb.mul(x=seq, y=pi) + frac = mb.real_div(x=numerator, y=denominator) + sin = mb.sin(x=frac) + sin_sq = mb.mul(x=sin, y=sin, name=node.name) + context.add(sin_sq) + +@register_torch_op +def mse_loss(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + y = inputs[1] + reduction = inputs[2].val + + diff = mb.sub(x=x, y=y) + + if reduction == 0: + # reduction is "none" + res = mb.mul(x=diff, y=diff, name=node.name) + context.add(res) + return + + square = mb.mul(x=diff, y=diff) + if reduction == 1: + # reduction is "mean" + res = mb.reduce_mean(x=square, axes=None, name=node.name) + + elif reduction == 2: + # reduction is "sum" + res = mb.reduce_sum(x=square, axes=None, name=node.name) + else: + raise ValueError("Reduction is not supported") + + context.add(res) + +@register_torch_op +def trace(context, node): + inputs = _get_inputs(context, node, expected=1) + x = inputs[0] + dims = mb.shape(x=x) + dim0 = value_at(dims, 0) + dim1 = value_at(dims, 1) + min_dim = mb.minimum(x=dim0, y=dim1) + indices = mb.range_1d(end=min_dim, start=0, step=1) + indices = mb.stack(values=[indices, indices], axis=1) + diagonal = mb.gather_nd(x=x, indices=indices) + trace = mb.reduce_sum(x=diagonal, name=node.name) + context.add(trace) + +@register_torch_op +def roll(context, node): + inputs = _get_inputs(context, node, expected=3) + x = inputs[0] + shift = inputs[1].val + dims = inputs[2].val + origin_shape = mb.shape(x=x) + + need_flatten = len(dims) == 0 + + if need_flatten: + # The tensor is flattened before rolling + x = mb.reshape(x=x, shape=[-1]) + dims = [0] + + shape = mb.shape(x=x) + + for s, i in zip(shift, dims): + dim = value_at(shape, i) + s = mb.mod(x=s, y=dim) + start_idx = mb.sub(x=dim, y=s) + indices0 = mb.range_1d(end=dim, start=start_idx, step=1) + indices1 = mb.range_1d(end=start_idx, start=0, step=1) + indices = mb.concat(values=[indices0, indices1], axis=0) + x = mb.gather(x=x, indices=indices, axis=i) + + if need_flatten: + x = mb.reshape(x=x, shape=origin_shape) + + context.add(x, node.name) + + +@register_torch_op +def im2col(context, node): + """ + Extract sliding local blocks from a batched input tensor (rank=4). + + torch.nn.functional.unfold aims to be the general version: im2col is the rank=4 case of unfold. + PyTorch currently only supports rank=4 input: torch.nn.functional.unfold redispatches to at::im2col, + which is why coremltools needs im2col to convert torch.nn.functional.unfold. + + We currently only support rank=4 input (consistent with PyTorch) and dilation set to 1. + More flexbible dilation support will be added in the future. + + Reference https://pytorch.org/docs/stable/generated/torch.nn.Unfold.html + """ + inputs = _get_inputs(context, node, expected=5) + x = inputs[0] + kernel_size = inputs[1].val + dilation = inputs[2].val + padding = inputs[3].val + stride = inputs[4].val + + if x.rank != 4: + raise ValueError("Only supports rank=4 input data for im2col (unfold).") + if not (dilation[0] == 1 and dilation[1] == 1): + raise ValueError("Only supports dilation=1 for im2col (unfold).") + + # for simplicity, we explicitly pad; TODO: implicit padding would be more efficient + # torch.unfold padding has different semantics + # * for torch.unfold + # x.shape[i + x.rank - padding.rank] = padding[i] + x.shape[i + x.rank - padding.rank] + padding[i] + # taking x.rank = 4 and padding.rank = 2 as an example: + # x.shape[0 + 4 - 2] = padding[0] + x.shape[0 + 4 - 2] + padding[0] + # x.shape[1 + 4 - 2] = padding[1] + x.shape[1 + 4 - 2] + padding[1] + # * for mb.pad(x=x, pad=pad, mode="constant") + # x.shape[i] = pad[2 * i] + x.shape[i] + pad[2 * i + 1] + # * for torch.nn.functional.pad + # x.shape[-1] = padding[0] +x.shape[-1] + padding[1] + # x.shape[-2] = padding[2] +x.shape[-1] + padding[3] + # ... + # x.shape[-i] = padding[2 * i - 2] + x.shape[-i] + padding[2 * i - 1] + # so we need to convert torch.unfold padding to mb.pad(mode="constant") pad + missing_dims = x.rank - len(padding) + pad = [0, 0] * missing_dims + _np.array(padding).repeat(2).tolist() + x = mb.pad(x=x, pad=pad, mode="constant") + + N, C, H, W = x.shape + + # Get total number of blocks. It follows the formula at torch.nn.Unfold documentation. + sptial_size = (H, W) + block_count = 1 + for i in range(2): + block_count *= ( + _np.floor( + # the original formula is + # (sptial_size[i] + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1) / stride[i] + # since we have explicitly padded, we no longer add 2 * padding[i] to sptial_size[i] + (sptial_size[i] - dilation[i] * (kernel_size[i] - 1) - 1) / stride[i] + ).astype(_np.int32) + + 1 + ) + + """ + The implementation below assumes x to be contiguous + """ + + # Get batch block indices. + batch_idx = _np.arange(N)[:, None, None] * C * H * W + + # Get starting block indices. + start_idx = _np.arange(kernel_size[0])[None, :, None] * W + _np.arange( + kernel_size[1] + ) + + # Generate depth indices. + channel_index = H * W * _np.arange(C) + start_idx = (channel_index[None, :, None] + _np.ravel(start_idx)).reshape( + (-1, kernel_size[0], kernel_size[1]) + ) + + # Get offsetted indices across the height and width of input array. + row_extent = H - kernel_size[0] + 1 + col_extent = W - kernel_size[1] + 1 + offset_idx = _np.arange(0, row_extent, stride[0])[None, :, None] * W + _np.arange(0, col_extent, stride[1]) + indices = _np.ravel(start_idx)[:, None] + _np.ravel(offset_idx) + + # Gather batches together. + indices = batch_idx + indices + x = mb.reshape(x=x, shape=[-1]) + gathered_data = mb.gather_along_axis(x=x, indices=indices.reshape(-1), axis=0) + block_size = C * kernel_size[0] * kernel_size[1] + output = mb.reshape( + x=gathered_data, shape=(N, block_size, block_count), name=node.name + ) + + context.add(output) + + +@register_torch_op +def complex(context, node): + real_part, imag_part = _get_inputs(context, node, expected=2) + result = mb.complex(real_data=real_part, imag_data=imag_part) + context.add(result, node.name) + + +@register_torch_op +def real(context, node): + input_data = _get_inputs(context, node, expected=1)[0] + if types.is_complex(input_data.dtype): + real_part = mb.complex_real(data=input_data) + context.add(real_part, node.name) + else: + context.add(input_data, node.name) + + +@register_torch_op +def imag(context, node): + input_data = _get_inputs(context, node, expected=1)[0] + if not types.is_complex(input_data.dtype): + # Keep consistent with PyTorch. + raise ValueError("The `imag` op only supports complex input.") + real_part = mb.complex_imag(data=input_data) + context.add(real_part, node.name) + + +@register_torch_op +def fft_fft(context, node): + """Lowers torch.fft.fft by the dialect op `complex_fft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + fft_res = mb.complex_fft(data=input_data, n=n, dim=dim, norm=norm) + context.add(fft_res, node.name) + + +@register_torch_op +def fft_fftn(context, node): + """Lowers torch.fft.fftn by the dialect op `complex_fftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + fft_res = mb.complex_fftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(fft_res, node.name) + + +@register_torch_op +def fft_rfft(context, node): + """Lowers torch.fft.rfft by the dialect op `complex_rfft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + rfft_res = mb.complex_rfft(data=input_data, n=n, dim=dim, norm=norm) + context.add(rfft_res, node.name) + + +@register_torch_op +def fft_rfftn(context, node): + """Lowers torch.fft.rfftn by the dialect op `complex_rfftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + rfft_res = mb.complex_rfftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(rfft_res, node.name) + + +@register_torch_op +def fft_ifft(context, node): + """Lowers torch.fft.ifft by the dialect op `complex_ifft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + ifft_res = mb.complex_ifft(data=input_data, n=n, dim=dim, norm=norm) + context.add(ifft_res, node.name) + + +@register_torch_op +def fft_ifftn(context, node): + """Lowers torch.fft.ifftn by the dialect op `complex_ifftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + ifftn_res = mb.complex_ifftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(ifftn_res, node.name) + + +@register_torch_op +def fft_irfft(context, node): + """Lowers torch.fft.irfft by the dialect op `complex_irfft` from complex_dialect_ops.py.""" + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + irfft_res = mb.complex_irfft(data=input_data, n=n, dim=dim, norm=norm) + context.add(irfft_res, node.name) + + +@register_torch_op +def fft_irfftn(context, node): + """Lowers torch.fft.irfftn by the dialect op `complex_irfftn` from complex_dialect_ops.py.""" + input_data, shapes, dims, norm = _get_inputs(context, node, expected=[4]) + irfftn_res = mb.complex_irfftn(data=input_data, shapes=shapes, dims=dims, norm=norm) + context.add(irfftn_res, node.name) + + +@register_torch_op(torch_alias=["torchvision::nms"]) +def torchvision_nms(context, node): + inputs = _get_inputs(context, node, expected=3) + boxes, scores = promote_input_dtypes([inputs[0], inputs[1]]) + iou_threshold = inputs[2].val + # Use float min to avoid boxes being pruned by scores in MIL NMS op. + score_threshold = ( + _np.finfo(_np.float16).min + if boxes.dtype._width == 16 + else _np.finfo(_np.float32).min + ) + + box_num = boxes.shape[0] + if is_symbolic(box_num): + # When the number of boxes is unknown at compile time, use a large number to avoid valid + # boxes got pruned. We don't use _np.iinfo(_np.int32).max here because it triggers the MIL + # NMS op segment fault. + box_num = 10000 + + # The boxes' coordinates from PyTorch input is (x1, y1, x2, y2) format with 0 <= x1 < x2 and + # 0 <= y1 < y2. However, the MIL NMS op expects CENTER_SIZE_WIDTH_FIRST format, which is + # (x, y, width, height) where (x, y) is the center coordinate. + x1, y1, x2, y2 = mb.split(x=boxes, num_splits=4, axis=-1) + # For numerical stability, use x1+(x2-x1)/2 instead of (x1+x2)/2 to calculate center coordinate. + width = mb.sub(x=x2, y=x1) + height = mb.sub(x=y2, y=y1) + center_x = mb.add(x=x1, y=mb.real_div(x=width, y=2.0)) + center_y = mb.add(x=y1, y=mb.real_div(x=height, y=2.0)) + boxes = mb.concat(values=[center_x, center_y, width, height], axis=-1) + + # Expand dims to construct the batch dim and score class dim expected by MIL NMS op. + boxes = mb.expand_dims(x=boxes, axes=[0]) + scores = mb.expand_dims(x=scores, axes=[0, -1]) + + _, _, indices, valid_outputs = mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + max_boxes=box_num, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + + indices = mb.squeeze(x=indices, axes=[0]) + valid_outputs = mb.squeeze(x=valid_outputs, axes=[0]) + range = mb.range_1d(end=valid_outputs, start=0, step=1) + indices = mb.cast(x=indices, dtype="fp32") + valid_indices = mb.gather(x=indices, indices=range, axis=0) + valid_indices = mb.cast(x=valid_indices, dtype="int32", name=node.name) + context.add(valid_indices) + + +@register_torch_op +def tupleindex(context, node): + tuple_input, index_input = _get_inputs(context, node, expected=2) + context.add(tuple_input[index_input.val], node.name) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py new file mode 100644 index 00000000..2dac14c2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import torch_tensor_assign_to_core, torch_upsample_to_core_upsample diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py new file mode 100644 index 00000000..a24a31c3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_tensor_assign_to_core.py @@ -0,0 +1,64 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="torch") +class torch_tensor_assign_to_core(AbstractGraphPass): + """ + Map Torch dialect ops `torch_tensor_assign` into core opset. + + Currently, we tranform the torch_tensor_assign op using mb.scatter. + """ + def apply(self, prog): + for f in prog.functions.values(): + _torch_tensor_assign_to_core_block(f) + +@block_context_manager +def _torch_tensor_assign_to_core_block(block): + for op in block.operations[:]: + for b in op.blocks: + _torch_tensor_assign_to_core_block(b) + + if op.op_type in ["torch_tensor_assign"]: + _transform_tensor_assign(op, block) + + +def _transform_tensor_assign(op, block): + shape = mb.shape(x=op.data, before_op=op) + dim_prod = mb.reduce_prod(x=shape, before_op=op) + ref_indices = mb.range_1d(end=dim_prod, start=0, step=1, before_op=op) + ref_indices = mb.reshape(x=ref_indices, shape=shape, before_op=op) + ref_sliced_indices = mb.slice_by_index( + x=ref_indices, + begin=op.begin, + end=op.end, + stride=op.stride, + begin_mask=op.begin_mask, + end_mask=op.end_mask, + squeeze_mask=op.squeeze_mask, + before_op=op, + ) + flatten_indices = mb.reshape(x=ref_sliced_indices, shape=[-1], before_op=op) + flatten_updates = mb.reshape(x=op.updates, shape=[-1], before_op=op) + flatten_data = mb.reshape(x=op.data, shape=[-1], before_op=op) + new_data = mb.scatter( + data=flatten_data, + indices=flatten_indices, + updates=flatten_updates, + mode="update", + before_op=op + ) + new_data = mb.reshape(x=new_data, shape=shape, before_op=op) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=new_data + ) + # Remove all the ops at once + block.remove_ops([op]) \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py new file mode 100644 index 00000000..d8864f80 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py @@ -0,0 +1,135 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +target_ops = [ + "torch_upsample_nearest_neighbor", + "torch_upsample_bilinear", +] + + +@register_pass(namespace="torch") +class torch_upsample_to_core_upsample(AbstractGraphPass): + """ + Try to map Torch dialect ops + 1. `torch_upsample_nearest_neighbor` + 2. `torch_upsample_bilinear` + to `upsample_nearest_neighbor` or `upsample_bilinear` in the core op set if compatible. + + Inputs: + + prog: Program + """ + def apply(self, prog): + for f in prog.functions.values(): + _torch_upsample_to_core_upsample_block(f) + +@block_context_manager +def _torch_upsample_to_core_upsample_block(block): + for op in block.operations[:]: + for b in op.blocks: + _torch_upsample_to_core_upsample_block(b) + + if op.op_type in target_ops: + if _try_replace_with_core_upsample(op): + logger.info("Successfully map {} to core upsample".format(op.op_type)) + else: + raise ValueError("Unable to map {} to core upsample".format(op.op_type)) + + +def _try_get_upsample_factor(output_size): + op = output_size + # If the output has value, than the upsample op itself is derived from the upsample_1d op, + # so we can just return scale factor 1 for that case + if op.outputs[0].val is not None: + assert op.outputs[0].val == 1. + return 1. + + # output_size = [ + # (torch.floor((input.size(i + 2).float() * torch.tensor(scale_factors[i], dtype=torch.float32)).float())) + # for i in range(dim) + # ] + # source from : https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#interpolate + # We validation if we can trace all the way back to the original scale_factor + # The whole sequence is mul(input_size, scale_factor) -> cast(fp32) -> floor() -> cast(int32) + + # 1. check if the output_size is type 'cast' with dtype 'int32' + if op.op_type != "cast" or op.dtype.val != "int32": + return None + + # 2. check if the op is type 'floor' + op = op.x.op + if op.op_type != "floor": + return None + + # 3. check if the op is type 'cast' with dtype 'fp32' + op = op.x.op + if op.op_type != 'cast' or op.dtype.val != "fp32": + return None + + # 4. check if the op is type mul + op = op.x.op + if op.op_type != 'mul': + return None + + # we successfully trace back the original scale factor + return np.float32(op.y.val) + + +def _try_replace_with_core_upsample(op): + """ + Inputs: + + op (Operation): op.op_type must be either + 1. `torch_upsample_nearest_neighbor` + 2. `torch_upsample_bilinear` + + Returns: + + True if op can be represented by mb.upsample_nearest_neighbor or mb.upsample_bilinear op in SSA. + False otherwise + """ + assert op.op_type in target_ops + + # 2d upsampling + if op.op_type in ["torch_upsample_nearest_neighbor", "torch_upsample_bilinear"]: + scales_h = _try_get_upsample_factor(op.output_height.op) + scales_w = _try_get_upsample_factor(op.output_width.op) + + if scales_h is None or scales_w is None: + return False + + old_upsample = op.outputs[0] + block = op.enclosing_block + + if op.op_type == "torch_upsample_nearest_neighbor": + new_upsample = mb.upsample_nearest_neighbor( + x=op.x, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + name=op.name, + before_op=op, + ) + elif op.op_type == "torch_upsample_bilinear": + new_upsample = mb.upsample_bilinear( + x=op.x, + scale_factor_height=scales_h, + scale_factor_width=scales_w, + align_corners=op.align_corners, + name=op.name, + before_op=op, + ) + block.replace_uses_of_var_after_op(anchor_op=op, old_var=old_upsample, new_var=new_upsample) + block.remove_ops([op]) + + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_api.py new file mode 100644 index 00000000..f52eb32f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_api.py @@ -0,0 +1,62 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import pytest +import torch +import torchvision + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND + +if _HAS_TORCH: + import torch + import torchvision + + +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestPyTorchConverter: + @staticmethod + def test_no_inputs(): + model = torchvision.models.mobilenet_v2() + model.eval() + + example_input = torch.rand(1, 3, 256, 256) + + traced_model = torch.jit.trace(model, example_input) + + with pytest.raises(ValueError) as e: + ct.convert(traced_model) + e.match(r'Expected argument for pytorch "inputs" not provided') + + + @staticmethod + def test_pth_extension(tmpdir): + # test for issue: https://github.com/apple/coremltools/issues/917 + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.linear = torch.nn.Linear(10, 20) + + def forward(self, x): + return self.linear(x) + + model = TestModule() + model.eval() + example_input = torch.rand(1, 10) + traced_model = torch.jit.trace(model, example_input) + model_path = os.path.join(str(tmpdir), "torch_model.pth") + traced_model.save(model_path) + + ct.convert( + model_path, + source='pytorch', + inputs=[ + ct.TensorType( + shape=example_input.shape, + ) + ], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_custom_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_custom_ops.py new file mode 100644 index 00000000..d8e266c9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_custom_ops.py @@ -0,0 +1,144 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import pytest +import torch +import torch.nn as nn + +from coremltools.converters.mil.frontend.torch.ops import _get_inputs +from coremltools.converters.mil.frontend.torch.ops import \ + cosine_similarity as cosine_similarity_main +from coremltools.converters.mil.frontend.torch.torch_op_registry import \ + _TORCH_OPS_REGISTRY as _TORCH_OPS_REG +from coremltools.converters.mil.frontend.torch.torch_op_registry import \ + register_torch_op +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + +from .testing_utils import TorchBaseTest, convert_to_mlmodel + +# Custom layer imports + + + +# Log Converter supported Cosine Similarity conversion function +default_cosine_similarity = _TORCH_OPS_REG.get("cosine_similarity", None) + + +@register_torch_op(override=True) +def cosine_similarity(context, node): + cosine_similarity_main(context, node) + + +# Log custom Cosine Similarity conversion function +custom_cosine_similarity = _TORCH_OPS_REG["cosine_similarity"] + + +def _set_torch_reg_op(op_type, op_func): + _TORCH_OPS_REG[op_type] = op_func + + +class TestCompositeOp(TorchBaseTest): + + @pytest.mark.parametrize("input_shape", [(100, 180), (56, 123)]) + def test_composite_op(self, input_shape): + _set_torch_reg_op("cosine_similarity", custom_cosine_similarity) + model = nn.CosineSimilarity(dim=1, eps=1e-6) + self.run_compare_torch([input_shape, input_shape], model) + _set_torch_reg_op("cosine_similarity", default_cosine_similarity) + + +class TestCustomOp: + # Define SSA Custom Op for Sparse MatMul + # This will map to `custom_op` in SSA with binding information + # to bind input spec to the custom implementation + @register_op(is_custom_op=True) + class custom_torch_sparse_matmul(Operation): + # Defining input spec for current op + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + transpose_x=TensorInputType(const=True, optional=True, type_domain=types.bool), + transpose_y=TensorInputType(const=True, optional=True, type_domain=types.bool), + x_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + y_is_sparse=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + transpose_x=False, + transpose_y=False, + x_is_sparse=False, + y_is_sparse=False, + ) + + # Specifying binding for custom op for specifying inputs, + # parameters required for creating custom op to be synced with Swift API + bindings = { + "class_name": "SparseMatMul", + "input_order": ["x", "y"], + "parameters": ["transpose_x", "transpose_y", "x_is_sparse", "y_is_sparse"], + "description": "Custom Sparse MatMul Layer", + } + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + y_shape = self.y.shape + # For illustration purpose, assumming getting valid shape + # Ideally, should consider transpose_?, ?_is_sparse parameters into consideration + # for computing output shape + return types.tensor(x_type, [x_shape[0], y_shape[1]]) + + @register_torch_op() + def _sparse_mm(context, node): + inputs = _get_inputs(context, node, expected=2) + x = mb.custom_torch_sparse_matmul( + x=inputs[0], y=inputs[1], x_is_sparse=True, y_is_sparse=True, name=node.name + ) + context.add(x) + + def test_custom_sparse_mm_op(self, input_shape=(4, 4)): + class TestLayer(nn.Module): + def __init__(self): + super(TestLayer, self).__init__() + + def forward(self, x, y): + x = torch.sparse.mm(x, y) + return x + + model = TestLayer() + input_data_x = torch.ones(input_shape) + input_data_y = torch.ones(input_shape) + input_data = [input_data_x, input_data_y] + model.eval() + torch_model = torch.jit.trace(model, (input_data_x, input_data_y)) + mlmodel = convert_to_mlmodel(torch_model, input_data) + + layers = mlmodel.get_spec().neuralNetwork.layers + assert layers[-1].custom is not None, "Expecting a custom layer" + assert ( + "SparseMatMul" == layers[-1].custom.className + ), "Custom Layer class name mis-match" + assert ( + not layers[-1].custom.parameters["transpose_x"].boolValue + ), "Incorrect parameter value k" + assert ( + not layers[-1].custom.parameters["transpose_y"].boolValue + ), "Incorrect parameter value k" + assert ( + layers[-1].custom.parameters["x_is_sparse"].boolValue + ), "Incorrect parameter value k" + assert ( + layers[-1].custom.parameters["y_is_sparse"].boolValue + ), "Incorrect parameter value k" diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_examples.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_examples.py new file mode 100644 index 00000000..10a99a5a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_examples.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import pytest + +import coremltools +from coremltools._deps import ( + _HAS_TORCH, + MSG_TORCH_NOT_FOUND, +) + +if _HAS_TORCH: + import torch + from torch import nn + import torch.nn.functional as F + + +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestModelScripting: + @staticmethod + def test(): + # Example code from https://coremltools.readme.io/docs/model-scripting + + class _LoopBody(nn.Module): + def __init__(self, channels): + super(_LoopBody, self).__init__() + conv = nn.Conv2d( + in_channels=channels, + out_channels=channels, + kernel_size=3, + padding=1, + ) + self.conv = conv + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) + return x + + + class ControlFlowNet(nn.Module): + def __init__(self, num_channels: int): + super(ControlFlowNet, self).__init__() + self.loop_body = _LoopBody(num_channels) + + def forward(self, x): + avg = torch.mean(x) + if avg.item() < 0: + loop_count = 2 + else: + loop_count = 1 + for _ in range(loop_count): + x = self.loop_body(x) + return x + + model = ControlFlowNet(num_channels=3) + scripted_model = torch.jit.script(model) + + mlmodel = coremltools.converters.convert( + scripted_model, + inputs=[coremltools.TensorType(shape=(1, 3, 64, 64))], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_internal_graph.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_internal_graph.py new file mode 100644 index 00000000..d2c3fdb8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_internal_graph.py @@ -0,0 +1,1804 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +torch = pytest.importorskip("torch") + +import torch.nn as nn +import torch.nn.functional as F + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, get_new_symbol, types +from coremltools.converters.mil.testing_utils import random_gen + +from .. import ops +from ..converter import TranscriptionContext +from ..internal_graph import InternalTorchIRNode + + +class TestTorchOps: + """Class containing tests for converting TorchIR -> CoreML ops. + + These tests interface with only the InternalTorchIRGraph and do not + build a torch module. Thus, they are much faster then the numerical tests. + However, for some ops it is necessary to use the torch module to verify + numerical output so they are placed the numerical tests. + + NOTE: Confused where @context is coming from? Its from the pytest fixture defined below. + """ + + @pytest.fixture + def context(self): + return TranscriptionContext() + + @pytest.fixture + def set_random_seeds(self): + torch.manual_seed(1) + np.random.seed(1) + + @pytest.mark.parametrize("dtype", [torch.bool, torch.float, torch.int]) + def test_constant(self, context, dtype): + test_data = torch.ones(1, dtype=dtype) + node = InternalTorchIRNode( + attr={"value": test_data}, kind="constant", inputs=[], outputs=["1"] + ) + ssa = self._construct_test_graph(context, ops.constant, node, "1") + assert np.allclose(test_data, ssa.val) + assert test_data.shape == ssa.shape + + def test_constant_magic(self, context): + test_val = ops.PYTORCH_DEFAULT_VALUE + node = InternalTorchIRNode( + attr={"value": test_val}, kind="constant", inputs=[], outputs=["1"] + ) + ssa = self._construct_test_graph(context, ops.constant, node, "1") + # We expect the magic default to get converted to None + assert ssa is None + + @staticmethod + def _gen_constants(size, vals): + """Helper function. Generates a list of internal constant nodes. + + Arguments: + size: number of constants to generate + vals: Either a list of values for each constant or one value used for all constants.""" + is_list = isinstance(vals, list) + if is_list: + if len(vals) != size: + raise ValueError("len(@vals): {} != size: {}".format(len(vals), size)) + constants = [] + for index in range(size): + if is_list: + val = vals[index] + else: + val = vals + constants.append( + InternalTorchIRNode( + attr={"value": val}, + kind="constant", + inputs=[], + outputs=[str(index)], + ) + ) + input_list = [str(i) for i in range(size)] + output_name = str(len(input_list)) + return constants, input_list, output_name + + @staticmethod + def _construct_test_graph( + context, test_op, test_node, output_name=None, graph_inputs=None, constants=None + ): + """ Construct an Function for the given @graph_inputs, @constants, + and @test_node. Returns the output of the graph, which is the ssa + Var of the given @output_name. + """ + if graph_inputs is None: + graph_inputs = {} + if constants is None: + constants = [] + + with Function(inputs=graph_inputs) as ssa_func: + for name in ssa_func.inputs.keys(): + context.add(ssa_func.inputs[name]) + for node in constants: + ops.constant(context, node) + test_op(context, test_node) + + ssa = None + if output_name: + ssa = context[output_name] + return ssa + + def _test_elementwise_binary( + self, context, op_name, op, test_input, num_constants, expected_result + ): + """Helper function, runs op on test input and compares against expected result""" + constants, input_list, output_name = self._gen_constants( + num_constants, test_input + ) + eb_node = InternalTorchIRNode( + kind=op_name, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, op, eb_node, output_name, constants=constants + ) + np.testing.assert_allclose(expected_result, ssa.val, atol=1e-6) + + def _test_cast(self, context, test_val, op_kind, op_func, python_type): + constants, input_list, output_name = self._gen_constants(1, [test_val]) + node = InternalTorchIRNode( + kind=op_kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, op_func, node, output_name, constants=constants + ) + assert ssa.val == python_type(test_val) + + def test_add(self, context): + test_input_1 = np.random.rand(2, 3) + test_input_2 = np.random.rand(2, 3) + scale_factor = 1 + self._test_elementwise_binary( + context, + "Add", + ops.add, + [test_input_1, test_input_2, scale_factor], + 3, + test_input_1 + test_input_2, + ) + + def test_add_no_scale_factor(self, context): + test_input_1 = np.random.rand(2, 3) + test_input_2 = np.random.rand(2, 3) + self._test_elementwise_binary( + context, + "Add", + ops.add, + [test_input_1, test_input_2], + 2, + test_input_1 + test_input_2, + ) + + @pytest.mark.parametrize( + "test_input_1, test_input_2", + [(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5), ], + ) + def test_sub(self, context, test_input_1, test_input_2): + scale_factor = 1 + self._test_elementwise_binary( + context, + "Sub", + ops.sub, + [test_input_1, test_input_2, scale_factor], + 3, + test_input_1 - test_input_2, + ) + + @pytest.mark.parametrize( + "test_input_1, test_input_2", + [(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5), ], + ) + def test_rsub(self, context, test_input_1, test_input_2): + scale_factor = 1 + self._test_elementwise_binary( + context, + "rsub", + ops.sub, + [test_input_1, test_input_2, scale_factor], + 3, + # Note the reversal of arg ordering relative to 'sub' + test_input_2 - test_input_1, + ) + + def test_mul(self, context): + test_input_1 = np.random.rand(3, 2) + test_input_2 = np.random.rand(3, 2) + self._test_elementwise_binary( + context, + "Mul", + ops.mul, + [test_input_1, test_input_2], + 2, + test_input_1 * test_input_2, + ) + + def test_div(self, context): + test_input_1 = np.random.rand(3, 2) + test_input_2 = np.random.rand(3, 2) + self._test_elementwise_binary( + context, + "Div", + ops.div, + [test_input_1, test_input_2], + 2, + np.divide(test_input_1, test_input_2), + ) + + def test_floor_divide(self, context): + test_input_1 = np.random.randint(low=1, high=100, size=(3, 2)) + test_input_2 = np.random.randint(low=1, high=100, size=(3, 2)) + self._test_elementwise_binary( + context, + "floor_divide", + ops.floor_divide, + [test_input_1, test_input_2], + 2, + np.floor_divide(test_input_1, test_input_2), + ) + + def test_pow(self, context): + test_input_1 = np.random.rand(3, 2) + test_input_2 = np.random.rand(3, 2) + self._test_elementwise_binary( + context, + "Pow", + ops.pow, + [test_input_1, test_input_2], + 2, + np.power(test_input_1, test_input_2), + ) + + def test_eq(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 == test_input_2).float() + + self._test_elementwise_binary( + context, "Eq", ops.eq, [test_input_1, test_input_2], 2, expected_output + ) + + def test_ne(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 != test_input_2).float() + + self._test_elementwise_binary( + context, "ne", ops.ne, [test_input_1, test_input_2], 2, expected_output + ) + + def test_le(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 <= test_input_2).float() + + self._test_elementwise_binary( + context, "Le", ops.le, [test_input_1, test_input_2], 2, expected_output + ) + + def test_lt(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 < test_input_2).float() + + self._test_elementwise_binary( + context, "Lt", ops.lt, [test_input_1, test_input_2], 2, expected_output + ) + + def test_ge(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 >= test_input_2).float() + + self._test_elementwise_binary( + context, "Ge", ops.ge, [test_input_1, test_input_2], 2, expected_output + ) + + def test_gt(self, context): + test_input_1 = torch.zeros([2, 3, 4, 5, 6]).float() + test_input_2 = torch.ones([2, 3, 4, 5, 6]).float() + test_input_2[0][0][0][0][0] = 0 + expected_output = (test_input_1 > test_input_2).float() + + self._test_elementwise_binary( + context, "Gt", ops.gt, [test_input_1, test_input_2], 2, expected_output + ) + + @pytest.mark.parametrize( + "size, array_type", + itertools.product( + [1, 5, 7], + [ + ("ListConstruct", ops.listconstruct), + ("TupleConstruct", ops.tupleconstruct), + ], + ), + ) + def test_arrayconstruct_scalars(self, context, size, array_type): + constant_vals = list(range(size)) + array_kind = array_type[0] + array_op = array_type[1] + constants, input_list, output_name = self._gen_constants(size, constant_vals) + ac_node = InternalTorchIRNode( + kind=array_kind, inputs=input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, array_op, ac_node, output_name, constants=constants + ) + expected_val = np.arange(size) + np.testing.assert_equal(ssa.shape, (size,)) + np.testing.assert_array_equal(ssa.val, expected_val) + + @pytest.mark.parametrize( + "shape1, shape2, array_type", + itertools.product( + [(1, 2), (3, 4, 5), (2,)], + [(2, 1), (1, 4, 5), (3,)], + [ + ("ListConstruct", ops.listconstruct), + ("TupleConstruct", ops.tupleconstruct), + ], + ), + ) + def test_arrayconstruct_nonscalar(self, context, shape1, shape2, array_type): + tensor1 = torch.rand(shape1) + tensor2 = torch.rand(shape2) + array_kind = array_type[0] + array_op = array_type[1] + constants, input_list, output_name = self._gen_constants(2, [tensor1, tensor2]) + ac_node = InternalTorchIRNode( + kind=array_kind, inputs=input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, array_op, ac_node, output_name, constants=constants + ) + expected_val = (tensor1.numpy(), tensor2.numpy()) + np.testing.assert_equal(len(ssa), 2) + for x, y in zip(ssa, expected_val): + np.testing.assert_allclose(x.val, y) + + @pytest.mark.parametrize( + "input_shape, dim0, dim1", + [ + x + for x in itertools.product( + [(1, 2, 3), (1, 2, 3, 4), (1, 2, 3, 4, 5)], [0, 1, -1], [0, 2, -2], + ) + ] + + [((1, 2), None, None)], + ) + def test_transpose(self, context, input_shape, dim0, dim1): + test_input = torch.rand(input_shape) + + constant_list = [test_input] + if len(input_shape) > 2: + constant_list += [dim0, dim1] + kind = "transpose" + expected_result = torch.transpose(test_input, dim0, dim1) + else: + kind = "t" + expected_result = test_input.t() + + constants, input_list, output_name = self._gen_constants( + len(constant_list), constant_list + ) + transpose_node = InternalTorchIRNode( + kind=kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.transpose, transpose_node, output_name, constants=constants, + ) + np.testing.assert_array_equal(expected_result.shape, ssa.shape) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "dim1, dim2, dim3", itertools.product([1, 2, 5], [2, 5, 10], [1, 2, 5]), + ) + def test_matmul(self, context, dim1, dim2, dim3): + mat1 = torch.rand((dim1, dim2)) + mat2 = torch.rand((dim2, dim3)) + constant_vals = [ + mat1, + mat2, + ] + constants, input_list, output_name = self._gen_constants(2, constant_vals) + + matmul_node = InternalTorchIRNode( + kind="matmul", inputs=input_list, outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops.matmul, matmul_node, output_name, constants=constants + ) + expected_result = torch.matmul(mat1, mat2).detach().numpy() + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, axis, expected_shape", + [ + ((1, 2), None, (2,)), + ((1, 2), 0, (2,)), + ((1, 2, 1), None, (2,)), + ((1, 2, 1, 1), None, (2,)), + ((1, 2, 1, 1), 2, (1, 2, 1)), + ((1, 2, 1, 1, 1), None, (2,)), + ], + ) + def test_squeeze(self, context, input_shape, axis, expected_shape): + test_data = torch.rand(input_shape) + if axis is None: + constants, input_list, output_name = self._gen_constants(1, test_data) + else: + constants, input_list, output_name = self._gen_constants( + 2, [test_data, axis] + ) + squeeze_node = InternalTorchIRNode( + kind="Squeeze", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.squeeze, squeeze_node, output_name, constants=constants + ) + if axis is None: + expected_result = torch.squeeze(test_data) + else: + expected_result = torch.squeeze(test_data, axis) + assert np.allclose(expected_result, ssa.val) + assert expected_result.size() == torch.Size(expected_shape) + + @pytest.mark.parametrize( + "input_shape, axis, expected_shape", + [ + ((2,), 0, (1, 2)), + ((2,), 1, (2, 1)), + ((2,), -1, (2, 1)), + ((2, 3), 1, (2, 1, 3)), + ], + ) + def test_unsqueeze(self, context, input_shape, axis, expected_shape): + test_data = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants(2, [test_data, axis]) + unsqueeze_node = InternalTorchIRNode( + kind="Unsqueeze", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.unsqueeze, unsqueeze_node, output_name, constants=constants + ) + expected_result = torch.unsqueeze(test_data, axis) + assert np.allclose(expected_result, ssa.val) + assert expected_result.size() == torch.Size(expected_shape) + + @pytest.mark.parametrize( + "input_shape, start, end", + [ + ((2, 1, 1, 2), 1, 3), + ((2, 2, 1, 1), 1, -2), + ((1, 1, 1), 0, 2), + ((1, 2), 0, 1), + ((1, 2), 1, 1), + ((1, 1), 1, -1), + ((1,), 0, 0), + ], + ) + def test_flatten(self, context, input_shape, start, end): + test_data = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants( + 3, [test_data, start, end] + ) + flatten_node = InternalTorchIRNode( + kind="Flatten", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.flatten, flatten_node, output_name, constants=constants + ) + expected_result = torch.flatten(test_data, start, end) + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "start, end", [(0, -5), (100, 2), (2, 100), (-3, -4),], + ) + def test_flatten_exception(self, context, start, end): + test_data = torch.rand(1, 1, 1, 1) + constants, input_list, output_name = self._gen_constants( + 3, [test_data, start, end] + ) + flatten_node = InternalTorchIRNode( + kind="Flatten", inputs=input_list, outputs=[output_name] + ) + with pytest.raises(ValueError): + self._construct_test_graph( + context, ops.flatten, flatten_node, output_name, constants=constants, + ) + + @pytest.mark.parametrize( + "input_shape", [(2, 3), (2, 3, 4), (2, 3, 4, 5), (2, 3, 4, 5, 6),], + ) + def test_permute(self, context, input_shape): + test_data = torch.rand(*input_shape) + permutation = list(range(len(input_shape))) + np.random.shuffle(permutation) + constants, input_list, output_name = self._gen_constants( + 2, [test_data, permutation] + ) + permute_node = InternalTorchIRNode( + kind="Permute", inputs=input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, ops.permute, permute_node, output_name, constants=constants + ) + expected_result = test_data.permute(*permutation) + assert expected_result.shape == ssa.shape + + @pytest.mark.parametrize( + "in_features, out_features, scaling", + itertools.product([10, 25, 100], [3, 6], [1.0, 0.5]), + ) + def test_addmm(self, context, in_features, out_features, scaling): + input_data = torch.rand((1, in_features)) + weight_data = torch.rand((in_features, out_features)) + bias_data = torch.rand((out_features)) + constant_vals = [ + scaling, + input_data, + weight_data, + bias_data, + ] + constants, _, output_name = self._gen_constants(4, constant_vals) + + addmm_node = InternalTorchIRNode( + kind="addmm", inputs=["3", "1", "2", "0", "0"], outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops.addmm, addmm_node, output_name, constants=constants + ) + torch_linear = nn.Linear(in_features=in_features, out_features=out_features,) + expected_shape = tuple(torch_linear(input_data).shape) + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "height, width, kernel_size, stride, padding, dilation", + itertools.product([5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [1, 3]), + ) + def test_convolution2d( + self, + context, + height, + width, + kernel_size, + stride, + padding, + dilation, + groups=1, + in_channels=1, + out_channels=2, + ): + test_input = torch.rand(1, in_channels, height, width) + constant_vals = [ + 1, # None argument + test_input, + np.random.rand( + out_channels, in_channels, kernel_size, kernel_size + ), # weights + np.random.rand(out_channels), # bias + np.array([stride, stride]), + np.array([padding, padding]), + np.array([dilation, dilation]), + False, # transposed + np.array([0, 0]), # output_pad + groups, + ] + constants, _, output_name = self._gen_constants( + len(constant_vals), constant_vals + ) + # For reference, the values for `kind` and `inputs` indices are determined from the definition for Torch's + # `at::_convolution` used for all convolutions. The link below is approximately correct at the time of writing. + # https://github.com/pytorch/pytorch/blob/bd604mb5b7ae4f6388aca461891d620b0d485fbb/aten/src/ATen/native/Convolution.cpp#L544 + conv_node = InternalTorchIRNode( + kind="_convolution", + inputs=["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "0", "0"], + outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops._convolution, conv_node, output_name, constants=constants + ) + torch_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + expected_shape = tuple(torch_conv(test_input).shape) + assert ssa.val is None + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "depth, height, width, kernel_size, stride, padding, dilation, groups", + itertools.product( + [5, 5], + [5, 6], + [5, 7], + [1, 3], + [(1, 1, 1), (3, 2, 1)], + [(1, 1, 1), (1, 3, 2)], + [(1, 1, 1), (1, 2, 3)], + [ + 1, + -1, + ], # -1 groups indicates it should be set to the number of input channels for depthwise convolution + ), + ) + def test_convolution3d( + self, + context, + depth, + height, + width, + kernel_size, + stride, + padding, + dilation, + groups, + in_channels=2, + out_channels=4, + ): + if groups == -1: + groups = in_channels + test_input = torch.rand(1, in_channels, depth, height, width) + constant_vals = [ + 1, # None argument + test_input, + np.random.rand( + out_channels, + in_channels // groups, + kernel_size, + kernel_size, + kernel_size, + ), # weights + np.random.rand(out_channels), # bias + # PyTorch's Conv3d accepts either an int (for all dimensions) or a 3-tuple of ints (one per dimension) + np.array([stride[0], stride[1], stride[2]]), + np.array([padding[0], padding[1], padding[2]]), + np.array([dilation[0], dilation[1], dilation[2]]), + False, # transposed + np.array([0, 0, 0]), # out_pad + groups, + ] + constants, _, output_name = self._gen_constants( + len(constant_vals), constant_vals + ) + # For reference, the values for `kind` and `inputs` indices are determined from the definition for Torch's + # `at::_convolution` used for all convolutions. The link below is approximately correct at the time of writing. + # https://github.com/pytorch/pytorch/blob/bd604mb5b7ae4f6388aca461891d620b0d485fbb/aten/src/ATen/native/Convolution.cpp#L544 + conv_node = InternalTorchIRNode( + kind="_convolution", + inputs=["1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "0", "0"], + outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, ops._convolution, conv_node, output_name, constants=constants + ) + torch_conv = nn.Conv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + expected_result = torch_conv(test_input) + expected_shape = tuple(expected_result.shape) + assert ssa.val is None + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "height, width, kernel_size, stride, padding, dilation", + itertools.product([5, 6], [5, 7], [1, 3], [2, 3], [0, 1], [1, 3]), + ) + def test_convolution_transpose2d( + self, + context, + height, + width, + kernel_size, + stride, + padding, + dilation, + groups=1, + in_channels=1, + out_channels=2, + ): + test_input = torch.rand(1, in_channels, height, width) + + constant_vals = [ + np.random.rand( + in_channels, out_channels, kernel_size, kernel_size + ), # weights + np.random.rand(out_channels), # bias + np.array([stride, stride]), + np.array([padding, padding]), + np.array([dilation, dilation]), + True, # transposed, + np.array([0, 0]), # output_pad + groups, + False, + False, + False, + ] + graph_inputs = {"input": mb.placeholder(test_input.shape, dtype=types.float)} + + constants, input_list, output_name = self._gen_constants( + len(constant_vals), constant_vals + ) + conv_node = InternalTorchIRNode( + kind="_convolution", inputs=["input"] + input_list, outputs=[output_name], + ) + + ssa = self._construct_test_graph( + context, + ops._convolution, + conv_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + torch_conv = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + expected_shape = tuple(torch_conv(test_input).shape) + assert ssa.val is None + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "input_shape, dim, keepdim", + itertools.product([(3, 20, 20), (1, 50, 50)], [0, 1, 2, [0, 2]], [True, False]), + ) + def test_mean(self, context, input_shape, dim, keepdim): + test_input = torch.rand(*input_shape) + + constants, input_list, output_name = self._gen_constants( + 4, [test_input, dim, keepdim, None] + ) + mean_node = InternalTorchIRNode( + kind="mean", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, mean_node, output_name, constants=constants + ) + expected_result = torch.mean(test_input, dim, keepdim) + assert np.allclose(expected_result, ssa.val) + + def test_mean_no_dims(self, context): + test_input = torch.rand((3, 20, 20)) + + constants, input_list, output_name = self._gen_constants(2, [test_input, None]) + mean_node = InternalTorchIRNode( + kind="mean", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, mean_node, output_name, constants=constants + ) + expected_result = torch.mean(test_input) + assert np.allclose(expected_result, ssa.val) + + def test_embedding(self, context): + EMBEDDING_DIMENSION = 10 + NUM_EMBEDDINGS = 20 + input_shape = (NUM_EMBEDDINGS, EMBEDDING_DIMENSION) + # size is arbitrary for indices + indices = np.random.randint(NUM_EMBEDDINGS, size=100) + test_input = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants( + 2, [test_input, indices] + ) + gather_node = InternalTorchIRNode( + kind="embedding", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.embedding, gather_node, output_name, constants=constants + ) + torch_embedding = nn.Embedding.from_pretrained(test_input) + expected_result = torch_embedding(torch.LongTensor(indices)) + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "dim", [0, 1, 2, 3, 4], + ) + def test_size(self, context, dim): + test_input = torch.rand(1, 2, 3, 4, 5) + + graph_inputs = {"input": mb.placeholder(test_input.shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants(1, [dim]) + size_node = InternalTorchIRNode( + kind="size", inputs=["input"] + input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.size, + size_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + expected_result = test_input.shape[dim] + assert expected_result == ssa.val + + @pytest.mark.parametrize( + "dim", [0, 1], + ) + def test_size_symbolic(self, context, dim): + test_shape = (3, get_new_symbol()) + graph_inputs = {"input": mb.placeholder(shape=test_shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants(1, [dim]) + size_node = InternalTorchIRNode( + kind="size", inputs=["input"] + input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.size, + size_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + expected_result = test_shape[dim] + assert expected_result == ssa.sym_val + + @pytest.mark.parametrize( + "input_size, shape", + itertools.product([(5, 12), (1, 4, 15), (3, 5, 4)], [(3, 20), (-1, 6), (60,)],), + ) + def test_view(self, context, input_size, shape): + test_input = torch.rand(input_size) + + constants, input_list, output_name = self._gen_constants(2, [test_input, shape]) + view_node = InternalTorchIRNode( + kind="view", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.view, view_node, output_name, constants=constants + ) + expected_result = test_input.view(shape) + assert np.allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, output_shape", + itertools.product( + [(1, 3, 15, 15), (1, 1, 2, 2), (1, 3, 10, 10)], [(1, 1), (2, 2), (2, 1)], + ), + ) + def test_adaptive_avg_pool2d(self, context, input_shape, output_shape): + test_input = torch.rand(input_shape) + + constants, input_list, output_name = self._gen_constants( + 2, [test_input, output_shape] + ) + + adaptive_avg_pool2d_node = InternalTorchIRNode( + kind="adaptive_avg_pool2d", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.adaptive_avg_pool2d, + adaptive_avg_pool2d_node, + output_name, + constants=constants, + ) + expected_result = torch._adaptive_avg_pool2d(test_input, output_shape) + expected_shape = tuple(expected_result.shape) + assert expected_shape == ssa.shape + # We only expect numerical output when reducing to global average. + if output_shape == (1, 1): + assert np.allclose(expected_result, ssa.val) + + def test_adaptive_avg_pool2d_exception(self, context): + # For this test, the input tensor HW channels are dynamic. + input_shape = [1, 3, get_new_symbol(), get_new_symbol()] + graph_inputs = {"input": mb.placeholder(input_shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants(1, [(2, 1)]) + adaptive_avg_pool2d_node = InternalTorchIRNode( + kind="adaptive_avg_pool2d", + inputs=["input"] + input_list, + outputs=[output_name], + ) + with pytest.raises(ValueError): + self._construct_test_graph( + context, + ops.adaptive_avg_pool2d, + adaptive_avg_pool2d_node, + output_name, + constants=constants, + graph_inputs=graph_inputs, + ) + + @pytest.mark.parametrize("input_shape", [(1, 3, 15, 15), (1, 1, 1, 1)]) + def test_batch_norm(self, context, input_shape): + test_input = torch.rand(input_shape) + channels = input_shape[1] + constants, input_list, output_name = self._gen_constants( + 9, + [ + torch.rand(input_shape), # input + torch.rand(channels), # weight + torch.rand(channels), # bias + torch.rand(channels), # running mean + torch.rand(channels), # running var + 0, # training + 0.1, # momentum + 1e-6, # eps + 1, # cudnn_enabled + ], + ) + + batch_norm_node = InternalTorchIRNode( + kind="batch_norm", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.batch_norm, batch_norm_node, output_name, constants=constants + ) + assert ssa.val is None + assert ssa.shape == tuple(test_input.shape) + + @pytest.mark.parametrize("input_shape", [(1, 3, 15, 15), (1, 1, 1, 1)]) + def test_instance_norm(self, context, input_shape): + test_input = torch.rand(input_shape) + channels = input_shape[1] + constants, input_list, output_name = self._gen_constants( + 9, + [ + torch.rand(input_shape), # input + torch.rand(channels), # weight + torch.rand(channels), # bias + torch.rand(channels), # running mean + torch.rand(channels), # running var + 0, # training + 0.1, # momentum + 1e-6, # eps + 1, # cudnn_enabled + ], + ) + + instant_norm_node = InternalTorchIRNode( + kind="instance_norm", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.instance_norm, instant_norm_node, output_name, constants=constants + ) + assert ssa.val is None + assert ssa.shape == tuple(test_input.shape) + + @pytest.mark.parametrize("axis", [1, 2, 3]) + def test_cat(self, context, axis): + input_shape = (1, 3, 240, 320) + + test_input1 = torch.rand(input_shape) + test_input2 = torch.rand(input_shape) + const_input = torch.rand(input_shape) + + graph_inputs = { + "input1": mb.placeholder(input_shape, dtype=types.float), + "input2": mb.placeholder(input_shape, dtype=types.float), + } + dim_node = InternalTorchIRNode( + attr={"value": axis}, kind="constant", inputs=[], outputs=["0"], + ) + const_tensor_node = InternalTorchIRNode( + attr={"value": const_input.numpy()}, + kind="constant", + inputs=[], + outputs=["1"], + ) + listconstruct_node = InternalTorchIRNode( + kind="listconstruct", inputs=["1", "input1", "input2"], outputs=["2"] + ) + cat_node = InternalTorchIRNode( + kind="cat", inputs=["2", "0"], outputs=["output"] + ) + + with Function(inputs=graph_inputs) as ssa_func: + context.add(ssa_func.inputs["input1"]) + context.add(ssa_func.inputs["input2"]) + ops.constant(context, dim_node) + ops.constant(context, const_tensor_node) + ops.listconstruct(context, listconstruct_node) + ops.cat(context, cat_node) + + ssa = context["output"] + expected_result = torch.cat( + (const_input, test_input1, test_input2), dim=axis + ).numpy() + assert np.allclose(expected_result.shape, ssa.shape) + + @pytest.mark.parametrize("axis", [0, 1, 2, 3, 4]) + def test_stack(self, context, axis): + input_shape = (1, 3, 240, 320) + + test_input1 = torch.rand(input_shape) + test_input2 = torch.rand(input_shape) + const_input = torch.rand(input_shape) + + graph_inputs = { + "input1": mb.placeholder(input_shape, dtype=types.float), + "input2": mb.placeholder(input_shape, dtype=types.float), + } + dim_node = InternalTorchIRNode( + attr={"value": axis}, kind="constant", inputs=[], outputs=["0"], + ) + const_tensor_node = InternalTorchIRNode( + attr={"value": const_input.numpy()}, + kind="constant", + inputs=[], + outputs=["1"], + ) + listconstruct_node = InternalTorchIRNode( + kind="listconstruct", inputs=["1", "input1", "input2"], outputs=["2"] + ) + stack_node = InternalTorchIRNode( + kind="stack", inputs=["2", "0"], outputs=["output"] + ) + + with Function(inputs=graph_inputs) as ssa_func: + context.add(ssa_func.inputs["input1"]) + context.add(ssa_func.inputs["input2"]) + ops.constant(context, dim_node) + ops.constant(context, const_tensor_node) + ops.listconstruct(context, listconstruct_node) + ops.stack(context, stack_node) + + ssa = context["output"] + expected_result = np.stack((const_input, test_input1, test_input2), axis=axis) + assert np.allclose(expected_result.shape, ssa.shape) + + def test_item(self, context): + const_val = 0 + constants, input_list, output_name = self._gen_constants(1, [const_val]) + item_node = InternalTorchIRNode( + kind="item", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.item, item_node, output_name, constants=constants + ) + assert ssa.val == const_val + + def test_item_exception(self, context): + const_val = [0, 1] + constants, input_list, output_name = self._gen_constants(1, [const_val]) + item_node = InternalTorchIRNode( + kind="item", inputs=input_list, outputs=[output_name] + ) + with pytest.raises(ValueError): + self._construct_test_graph( + context, ops.item, item_node, output_name, constants=constants, + ) + + @pytest.mark.parametrize("test_val", [1, 1.5, False]) + def test_bool(self, context, test_val): + self._test_cast(context, test_val, "bool", ops._bool, bool) + + @pytest.mark.parametrize("test_val", [1, 1.5, -0.3]) + def test_int(self, context, test_val): + self._test_cast(context, test_val, "int", ops._int, int) + + @pytest.mark.parametrize("input_shape", [(1, 3, 15, 15), (1, 1, 1, 1)]) + def test_layer_norm(self, context, input_shape): + graph_inputs = {"input": mb.placeholder(input_shape, dtype=types.float)} + constants, input_list, output_name = self._gen_constants( + 5, + [ + input_shape, # normalized shape + torch.rand(*input_shape), # weight + torch.rand(*input_shape), # running bias + 1e-6, + 1, # cudnn enabled + ], + ) + + layer_norm_node = InternalTorchIRNode( + kind="layer_norm", inputs=["input"] + input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.layer_norm, + layer_norm_node, + output_name, + graph_inputs=graph_inputs, + constants=constants, + ) + assert ssa.val is None + assert ssa.shape == input_shape + + @pytest.mark.parametrize("shape", [(1, 2), (2, 3, 4, 5), (3, 4, 5),]) + def test_ones(self, context, shape): + constants, constant_input_list, output_name = self._gen_constants( + 6, [shape, 1, 1, 1, 1, 1] + ) + ones_node = InternalTorchIRNode( + kind="ones", inputs=constant_input_list, outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, ops.ones, ones_node, output_name, constants=constants, + ) + assert ssa.shape == shape + + @pytest.mark.parametrize("input_shape", [(1, 2), (2, 3, 4, 5), (3, 4, 5),]) + def test_ones_like(self, context, input_shape): + graph_inputs = {"input": mb.placeholder(input_shape, dtype=types.float)} + constants, constant_input_list, output_name = self._gen_constants(5, 1) + ones_node = InternalTorchIRNode( + kind="ones_like", + inputs=["input"] + constant_input_list, + outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, + ops.ones_like, + ones_node, + output_name, + graph_inputs=graph_inputs, + constants=constants, + ) + assert ssa.shape == input_shape + + @pytest.mark.parametrize( + "input_size, dim, index", + itertools.product( + [(13, 43, 10), (39, 14, 11, 9)], [0, 1, 2], [0, 1, 3, 8, -1], + ), + ) + def test_select(self, context, input_size, dim, index): + graph_inputs = {"input1": mb.placeholder(input_size, dtype=types.float)} + constants, constant_input_list, output_name = self._gen_constants( + 2, [dim, index] + ) + select_node = InternalTorchIRNode( + kind="select", + inputs=["input1"] + constant_input_list, + outputs=[output_name], + ) + ssa = self._construct_test_graph( + context, + ops.select, + select_node, + output_name, + graph_inputs=graph_inputs, + constants=constants, + ) + select_index = index + if index < 0: + select_index += input_size[dim] + expected_shape = tuple( + torch.rand(input_size) + .index_select(dim, torch.tensor([select_index])) + .squeeze(dim) + .shape + ) + assert np.allclose(ssa.shape, expected_shape) + + @pytest.mark.parametrize( + "dynamic, test_tuple", itertools.product([True, False], [True, False]) + ) + def test_tuple_and_list_unpack(self, context, dynamic, test_tuple): + """ + if @dynamic is True then packs up a dynamic input + if @test_tuple is True tests tupleUnpack else tests listUnpack + """ + if test_tuple: + construct_op = ops.tupleconstruct + construct_name = "TupleConstruct" + unpack_name = "TupleUnpack" + else: + construct_op = ops.listconstruct + construct_name = "ListConstruct" + unpack_name = "ListUnpack" + + input_shape = (1, 2, 3) + constant_vals = [str(i) for i in range(1, 6)] + constants_unpacked = [str(i) for i in range(6, 11)] + constants, input_list, _ = self._gen_constants(5, constant_vals) + output_list = constants_unpacked[:] + graph_inputs = {} + if dynamic: + graph_input_name = "input1" + graph_inputs = { + graph_input_name: mb.placeholder(input_shape, dtype=types.float) + } + input_list += [graph_input_name] + output_list += [graph_input_name + "_out"] + + construct_node = InternalTorchIRNode( + kind=construct_name, inputs=input_list, outputs=["construct"], + ) + unpack_node = InternalTorchIRNode( + kind=unpack_name, inputs=["construct"], outputs=output_list + ) + with Function(inputs=graph_inputs) as ssa_func: + if dynamic: + context.add(ssa_func.inputs["input1"]) + for node in constants: + ops.constant(context, node) + construct_op(context, construct_node) + ops.tupleunpack(context, unpack_node) + + ssa_constants = [] + for name in constants_unpacked: + ssa_constants.append(context[name].val) + assert ssa_constants == constant_vals + + if dynamic: + ssa_dyanmic = context[graph_input_name + "_out"] + assert ssa_dyanmic.val is None + assert ssa_dyanmic.shape == input_shape + + def _test_pool( + self, context, test_input, param_list, op_kind, op_func, expected_result + ): + constants, input_list, output_name = self._gen_constants( + len(param_list) + 1, [test_input] + param_list, + ) + + pool_node = InternalTorchIRNode( + kind=op_kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, op_func, pool_node, output_name, constants=constants, + ) + expected_shape = tuple(expected_result.shape) + assert expected_shape == ssa.shape + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, include_pad, ceil_mode", + itertools.product( + [(1, 3, 15), (1, 1, 7), (1, 3, 10)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + [False, True], + ), + ) + def test_avg_pool1d( + self, context, input_shape, kernel_size, stride, pad, include_pad, ceil_mode, + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.avg_pool1d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self._test_pool( + context, + test_input, + [[kernel_size], [stride], [pad], ceil_mode, not include_pad], + "avg_pool1d", + ops.avg_pool1d, + expected_result, + ) + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, include_pad, ceil_mode", + itertools.product( + [(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + [False, True], + ), + ) + def test_avg_pool2d( + self, context, input_shape, kernel_size, stride, pad, include_pad, ceil_mode, + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.avg_pool2d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self._test_pool( + context, + test_input, + [ + [kernel_size, kernel_size], + [stride, stride], + [pad, pad], + ceil_mode, + not include_pad, + None, + ], + "avg_pool2d", + ops.avg_pool2d, + expected_result, + ) + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, ceil_mode", + itertools.product( + [(1, 3, 15), (1, 1, 7), (1, 3, 10)], [1, 3], [1, 2], [0, 1], [False, True] + ), + ) + def test_max_pool1d( + self, context, input_shape, kernel_size, stride, pad, ceil_mode + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.max_pool1d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + ) + self._test_pool( + context, + test_input, + [[kernel_size], [stride], [pad], [1], ceil_mode], + "max_pool1d", + ops.max_pool1d, + expected_result, + ) + + @pytest.mark.parametrize( + "input_shape, kernel_size, stride, pad, ceil_mode", + itertools.product( + [(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)], + [1, 3], + [1, 2], + [0, 1], + [False, True], + ), + ) + def test_max_pool2d( + self, context, input_shape, kernel_size, stride, pad, ceil_mode, + ): + if pad > kernel_size / 2: + return + + if ceil_mode: + if kernel_size == 1 and stride == 2 and pad == 0 and input_shape[-1] == 10: + pytest.xfail("Torch ceil_mode does not match exactly with CoreML's ceil_mode. rdar://80050546") + + test_input = torch.rand(input_shape) + expected_result = F.max_pool2d( + test_input, + kernel_size=kernel_size, + stride=stride, + padding=pad, + ceil_mode=ceil_mode, + ) + self._test_pool( + context, + test_input, + [ + [kernel_size, kernel_size], + [stride, stride], + [pad, pad], + [1, 1,], # dilation + ceil_mode, + ], + "max_pool2d", + ops.max_pool2d, + expected_result, + ) + + @pytest.mark.parametrize( + "dim, start, end, step", + itertools.product([0, 1, 2], [0, 1, 2], [3, 4, 5, None], [1, 2]), + ) + def test_slice(self, context, dim, start, end, step): + test_input = torch.rand(5, 5, 5) + constants, input_list, output_name = self._gen_constants( + 5, [test_input, dim, start, end, step] + ) + node = InternalTorchIRNode( + kind="slice", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops._slice, node, output_name, constants=constants + ) + if end is None: + end = test_input.shape[dim] + expected_result = test_input.index_select( + dim, torch.LongTensor(range(start, end, step)) + ) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "split_sizes, dim, make_explicit", + itertools.product([2, 3], [0, 1, 2], [True, False]), + ) + def test_split(self, context, split_sizes, dim, make_explicit): + test_input = torch.rand(3, 4, 5) + if make_explicit: + # Explicitly provide the size of each split. This will be two + # splits, the given size and the remainder. + split_sizes = [split_sizes, test_input.shape[dim] - split_sizes] + constants, input_list, output_name = self._gen_constants( + 3, [test_input, split_sizes, dim] + ) + node = InternalTorchIRNode( + kind="split", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.split, node, output_name, constants=constants + ) + expected_result = torch.split(test_input, split_sizes, dim) + if not isinstance(ssa, list): + ssa = [ssa] + + for ex_res, ssa_res in zip(expected_result, ssa): + np.testing.assert_allclose(ex_res.numpy(), ssa_res.val, atol=1e-6) + + def test_floor(self, context): + test_input = torch.rand(1, 2, 3) * 10 + constants, input_list, output_name = self._gen_constants(1, test_input) + floor_node = InternalTorchIRNode( + kind="floor", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.floor, floor_node, output_name, constants=constants, + ) + expected_result = test_input.floor() + assert np.allclose(expected_result, ssa.val) + + def test_erf(self, context): + test_input = torch.rand(1, 2, 3, 4) + constants, input_list, output_name = self._gen_constants(1, test_input) + node = InternalTorchIRNode(kind="erf", inputs=input_list, outputs=[output_name]) + ssa = self._construct_test_graph( + context, ops.erf, node, output_name, constants=constants + ) + expected_result = test_input.erf() + assert np.allclose(expected_result, ssa.val) + + def test_implicittensortonum(self, context): + input_shape = (1,) + graph_input_name = "input1" + graph_inputs = { + graph_input_name: mb.placeholder(input_shape, dtype=types.float) + } + output_name = "1" + node = InternalTorchIRNode( + kind="implicittensortonum", inputs=["input1"], outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, + ops.implicittensortonum, + node, + output_name, + graph_inputs=graph_inputs, + ) + assert ssa.shape == () + + @pytest.mark.parametrize( + "chunks, dim", itertools.product([2, 3, 5], [0, 1, 2, 3]), + ) + def test_constantchunk(self, context, chunks, dim): + test_input = torch.rand(5, 8, 9, 11) + expected_result = test_input.chunk(chunks, dim=dim) + constants, input_list, first_output = self._gen_constants(1, [test_input]) + outputs = [str(int(first_output) + i) for i in range(len(expected_result))] + node = InternalTorchIRNode( + attr={"chunks": chunks, "dim": dim}, + kind="constantchunk", + inputs=input_list, + outputs=outputs, + ) + self._construct_test_graph( + context, ops.constantchunk, node, first_output, constants=constants + ) + actual_result = [context[name] for name in outputs] + + np.testing.assert_equal(len(expected_result), len(actual_result)) + for ex_res, ssa_res in zip(expected_result, actual_result): + np.testing.assert_allclose(ex_res.numpy(), ssa_res.val, atol=1e-6) + + @pytest.mark.parametrize( + "input_shape, shape", + [ + ((3, 1), (3, 4)), + ((3, 1), (-1, 4)), + ((3, 1, 1), (3, 4, 1)), + ((3, 1, 1), (3, -1, 5)), + ((3, 1, 1), (3, 4, 5)), + ((1, 3, 1, 1), (2, 3, -1, 1)), + ((1, 3, 4, 1), (2, 3, -1, 5)), + ], + ) + def test_expand(self, context, input_shape, shape): + test_input = torch.rand(input_shape) + constants, input_list, output_name = self._gen_constants(2, [test_input, shape]) + node = InternalTorchIRNode( + kind="expand", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.expand, node, output_name, constants=constants + ) + expected_result = test_input.expand(shape) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, other_shape", + [ + ((3, 1), (3, 4)), + ((3, 1, 1), (3, 4, 1)), + ((3, 1, 1), (3, 4, 5)), + ((1, 3, 1, 1), (2, 3, 4, 1)), + ((1, 3, 4, 1), (2, 3, 4, 5)), + ((1, 3, 4, 1), (1, 3, 4, 5)), + ], + ) + def test_expand_as(self, context, input_shape, other_shape): + test_input = torch.rand(input_shape) + other = torch.rand(other_shape) + constants, input_list, output_name = self._gen_constants(2, [test_input, other]) + node = InternalTorchIRNode( + kind="expand_as", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.expand_as, node, output_name, constants=constants + ) + expected_result = test_input.expand_as(other) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "start, end, step", + [x for x in itertools.product((None, 0, 2), (5, 10), (None,),)] + + [x for x in itertools.product((0, 2), (5, 10), (1, 2))], + ) + def test_arange(self, context, start, end, step): + # Arange can get [end], [start, end], or [start, end, step] + args = [x for x in [start, end, step] if x is not None] + args += [0, 0, 0, False] # Extra args needed but ignored by arange + constants, input_list, output_name = self._gen_constants(len(args), args) + node = InternalTorchIRNode( + kind="arange", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.arange, node, output_name, constants=constants + ) + kwargs = {"end": end} + if start is not None: + kwargs["start"] = start + if step is not None: + kwargs["step"] = step + expected_result = torch.arange(**kwargs) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "input_shape, axis", + [((2, 3), 0), ((2, 3, 4), 1), ((2, 3, 4, 5), 0), ((2, 3, 4, 5), 2),], + ) + def test_masked_fill(self, context, input_shape, axis): + mask_shape = list(input_shape) + mask_shape[axis] = 1 + mask = torch.randint(0, 1, mask_shape, dtype=torch.bool) + input_data = torch.rand(input_shape) + value = -1.0 + constants, input_list, output_name = self._gen_constants( + 3, [input_data, mask, value] + ) + node = InternalTorchIRNode( + kind="masked_fill", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.masked_fill, node, output_name, constants=constants + ) + expected_result = input_data.masked_fill(mask, value) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "noop_kind", + ["dropout", "dropout_", "feature_dropout", "contiguous", "device", "detach"], + ) + def test_noops(self, context, noop_kind): + test_input = torch.rand(3, 4, 5) + constants, input_list, output_name = self._gen_constants( + 3, [test_input, "test", "test"] + ) + node = InternalTorchIRNode( + kind=noop_kind, inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.noop, node, output_name, constants=constants + ) + assert np.allclose(test_input.numpy(), ssa.val) + + def test_tanh(self, context): + test_input = torch.rand(3, 4, 5) + constants, input_list, output_name = self._gen_constants(1, [test_input]) + node = InternalTorchIRNode( + kind="tanh", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.tanh, node, output_name, constants=constants + ) + expected_result = torch.tanh(test_input) + assert np.allclose(expected_result.numpy(), ssa.val) + + @pytest.mark.parametrize( + "input_shape, dim, keepdim", + itertools.product([(3, 20, 20), (1, 50, 50)], [0, 1, 2], [True, False]), + ) + def test_argmax(self, context, input_shape, dim, keepdim): + test_input = torch.rand(*input_shape) + + constants, input_list, output_name = self._gen_constants( + 4, [test_input, dim, keepdim, None] + ) + node = InternalTorchIRNode( + kind="argmax", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.argmax, node, output_name, constants=constants + ) + expected_result = torch.argmax(test_input, dim, keepdim) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize( + "size, dtype", itertools.product([(1, 2, 3, 4), (1,)], [11, 0, 1, 6]), + ) + def test_zeros(self, context, size, dtype): + layout = 0 # unused + device = 0 # unused + pin_memory = 0 # unused + constants, input_list, output_name = self._gen_constants( + 5, [size, dtype, layout, device, pin_memory] + ) + node = InternalTorchIRNode( + kind="zeros", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.zeros, node, output_name, constants=constants + ) + expected_result = torch.zeros(size, dtype=ops.NUM_TO_TORCH_DTYPE[dtype]) + np.testing.assert_allclose(expected_result, ssa.val) + + @pytest.mark.parametrize("input_size", [(1, 2, 3, 4), (1,)]) + def test_exp(self, context, input_size): + test_input = torch.rand(input_size) + constants, input_list, output_name = self._gen_constants(1, test_input) + node = InternalTorchIRNode(kind="exp", inputs=input_list, outputs=[output_name]) + ssa = self._construct_test_graph( + context, ops.exp, node, output_name, constants=constants + ) + expected_result = torch.exp(test_input) + np.testing.assert_allclose(expected_result, ssa.val, rtol=1e-06) + + @pytest.mark.parametrize( + "input_size, dim, keepdim", + itertools.product([(1, 2, 3, 4)], [0, 1, 2], [True, False]), + ) + def test_max(self, context, input_size, dim, keepdim): + test_input = torch.rand(input_size) + constants, input_list, _ = self._gen_constants(3, [test_input, dim, keepdim]) + node = InternalTorchIRNode( + kind="max", inputs=input_list, outputs=["out1", "out2"], + ) + self._construct_test_graph(context, ops.max, node, constants=constants) + torch.max(test_input, dim=dim, keepdim=keepdim) + + @pytest.mark.parametrize( + "input_size, dim, descending", + itertools.product([(2, 3, 4), (1, 2, 3, 4)], [0, 1, 2], [True, False]), + ) + def test_sort(self, context, input_size, dim, descending): + test_input = torch.rand(input_size) + constants, input_list, output_name = self._gen_constants( + 3, [test_input, dim, descending] + ) + node = InternalTorchIRNode( + kind="sort", inputs=input_list, outputs=["out1", "out2"], + ) + self._construct_test_graph(context, ops.sort, node, constants=constants) + expected_sort, expected_index = torch.sort( + test_input, dim=dim, descending=descending + ) + sort_result = context["out1"].val + index_result = context["out2"].val + np.testing.assert_allclose(expected_sort, sort_result) + np.testing.assert_allclose(expected_index, index_result) + + @pytest.mark.parametrize( + "input_shape, dim, keepdim", + itertools.product( + [(3, 20, 20), (1, 50, 50)], + [[0], [1], [2], [0, 2]], + [True, False]), + ) + def test_sum(self, context, input_shape, dim, keepdim): + test_input = torch.rand(*input_shape) + + constants, input_list, output_name = self._gen_constants( + 4, [test_input, dim, keepdim, None] + ) + sum_node = InternalTorchIRNode( + kind="sum", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, sum_node, output_name, constants=constants + ) + expected_result = torch.sum(test_input, dim, keepdim) + assert np.allclose(expected_result, ssa.val) + + def test_sum_no_dims(self, context): + test_input = torch.rand((3, 20, 20)) + + constants, input_list, output_name = self._gen_constants(2, [test_input, None]) + sum_node = InternalTorchIRNode( + kind="sum", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.mean, sum_node, output_name, constants=constants + ) + expected_result = torch.sum(test_input) + assert np.allclose(expected_result, ssa.val) + + def test_neg(self, context): + test_input = torch.rand(3, 4, 5) + constants, input_list, output_name = self._gen_constants(1, [test_input]) + node = InternalTorchIRNode( + kind="neg", inputs=input_list, outputs=[output_name] + ) + ssa = self._construct_test_graph( + context, ops.neg, node, output_name, constants=constants + ) + expected_result = torch.neg(test_input) + assert np.allclose(expected_result.numpy(), ssa.val) + + @pytest.mark.parametrize( + "input_shape, k, dim, largest", + itertools.product([(5, 10, 10), (10, 5, 5)], [0, 3, 5], [0, 1, 2], [True, False]), + ) + def test_topk(self, context, input_shape, k, dim, largest): + test_input = torch.tensor(random_gen(input_shape, allow_duplicate=False)) + + constants, input_list, output_name = self._gen_constants( + 6, [test_input, k, dim, largest, True, None] + ) + topk_node = InternalTorchIRNode( + kind="topk", inputs=input_list, outputs=["out1", "out2"] + ) + self._construct_test_graph( + context, ops.topk, topk_node, constants=constants + ) + topk_result = context["out1"].val + index_result = context["out2"].val + + expected_max, expected_indices = torch.topk(test_input, k, dim, largest) + np.testing.assert_allclose(expected_max.numpy(), topk_result) + np.testing.assert_allclose(expected_indices.numpy(), index_result) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_passes.py new file mode 100644 index 00000000..423cedb9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_passes.py @@ -0,0 +1,371 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from collections import OrderedDict + +import numpy as np +import pytest +import torch + +from ..internal_graph import ( + InternalTorchIRBlock, + InternalTorchIRGraph, + InternalTorchIRNode, +) +from ..torchir_passes import ( + flatten_graph_input_values, + flatten_graph_output_values, + transform_inplace_ops, +) + + +def _build_flattening_test_graph(): + # This test graph is: + # graph( + # %1 : (Tensor[1, 1], (Tensor[1, 2], Tensor[1, 3])) + # ): + # %2, %3 = tupleunpack[](%1) + # %4, %5 = tupleunpack[](%3) + # %6 = tupleconstruct[](%2, %4) + # %7 = tupleconstruct[](%6, %5) + # return (%7) + # + # And if you were to run the graph it would turn + # (a, (b, c)) + # into + # ((a, b), c) + + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["1"] = ( + torch.rand(1, 1), + ( + torch.rand(1, 2), + torch.rand(1, 3), + ), + ) + graph_nodes = [ + InternalTorchIRNode( + inputs=["1"], + outputs=["2", "3"], + kind="tupleunpack", + ), + InternalTorchIRNode( + inputs=["3"], + outputs=["4", "5"], + kind="tupleunpack", + ), + InternalTorchIRNode( + inputs=["2", "4"], + outputs=["6"], + kind="tupleconstruct", + ), + InternalTorchIRNode( + inputs=["6", "5"], + outputs=["7"], + kind="tupleconstruct", + ), + ] + graph_outputs = ["7"] + + return InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + + +class TestTorchPasses: + """Class containing tests for InternalTorchIR optimization passes. + """ + + @pytest.fixture + def set_random_seeds(self): + torch.manual_seed(1) + np.random.seed(1) + + def test_flatten_input_values(self): + graph = _build_flattening_test_graph() + + flatten_graph_input_values(graph) + + # The graph input tuple should have been flattened. + np.testing.assert_equal(len(graph.inputs.keys()), 3) + # Tuple flattening should introduce two new ops. + np.testing.assert_equal(len(graph.nodes), 6) + # The new ops at the beginning of the graph should be a tupleconstruct. + np.testing.assert_equal(graph.nodes[0].kind, "tupleconstruct") + np.testing.assert_equal(graph.nodes[1].kind, "tupleconstruct") + # The inputs to the tupleconstructs should be the new flattened inputs. + input_names = [k for k in graph.inputs.keys()] + np.testing.assert_equal(input_names[1:], graph.nodes[0].inputs) + np.testing.assert_equal(input_names[0], graph.nodes[1].inputs[0]) + np.testing.assert_equal(graph.nodes[0].outputs[0], graph.nodes[1].inputs[1]) + # The last inserted tuple construct should produce the input for the + # next op. + np.testing.assert_equal(graph.nodes[1].outputs[0], graph.nodes[2].inputs[0]) + + def test_flatten_output_values(self): + graph = _build_flattening_test_graph() + + flatten_graph_output_values(graph) + + # The graph output tuple should have been flattened. + np.testing.assert_equal(len(graph.outputs), 3) + # The outputs of the graph should come from intermediate ops. + np.testing.assert_equal(graph.outputs[0], graph.nodes[0].outputs[0]) + np.testing.assert_equal(graph.outputs[1], graph.nodes[1].outputs[0]) + np.testing.assert_equal(graph.outputs[2], graph.nodes[1].outputs[1]) + + def test_transform_inplace_ops_graph(self): + # The test graph is: + # graph( + # %x : Tensor[1], + # ): + # %1 = constant[value=0]() + # %2 = constant[value=10]() + # %3 = listconstruct[](%1) + # %4 = append[](%3, %2) + # return (%3) + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["x"] = torch.rand(1) + graph_nodes = [ + InternalTorchIRNode( + inputs=[], + attr={"value": 0}, + outputs=["1"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 10}, + outputs=["2"], + kind="constant", + ), + InternalTorchIRNode( + inputs=["1"], + outputs=["3"], + kind="listconstruct", + ), + InternalTorchIRNode( + inputs=["3", "2"], + outputs=["4"], + kind="append", + ), + ] + graph_outputs = ["3"] + graph = InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + for node in graph.nodes: + node.parent = graph + + transform_inplace_ops(graph) + + np.testing.assert_equal(len(graph.outputs), 1) + np.testing.assert_equal(graph.outputs[0], graph.nodes[-1].outputs[0]) + + def test_transform_inplace_ops_loop(self): + # The test graph is: + # graph( + # %x : Tensor[1], + # ): + # %1 = constant[value=True]() + # %2 = constant[value=-1]() + # %3 = constant[value=10]() + # %4 = listconstruct[](%2) + # = loop[](%3, %1) + # block(%i.1): + # %6 = append[](%4, %i.1) + # return (%1) + # return (%4) + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["x"] = torch.rand(1) + loop_block = InternalTorchIRBlock( + inputs=["i.1"], + outputs=["1"], + nodes=[ + InternalTorchIRNode( + inputs=["4", "i.1"], + outputs=["6"], + kind="append", + ), + ], + ) + loop_block.nodes[0].parent = loop_block + loop_node = InternalTorchIRNode( + inputs=["3", "1"], + outputs=[], + kind="loop", + blocks=[loop_block], + ) + loop_block.parent = loop_node + graph_nodes = [ + InternalTorchIRNode( + inputs=[], + attr={"value": True}, + outputs=["1"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": -1}, + outputs=["2"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 10}, + outputs=["3"], + kind="constant", + ), + InternalTorchIRNode( + inputs=["2"], + outputs=["4"], + kind="listconstruct", + ), + loop_node, + ] + graph_outputs = ["4"] + graph = InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + for node in graph.nodes: + node.parent = graph + + transform_inplace_ops(graph) + + # There should be an additional input to the loop. + np.testing.assert_equal(len(loop_node.inputs), 3) + # That input should be the output of the previous op. + np.testing.assert_equal(loop_node.inputs[2], graph.nodes[3].outputs[0]) + # The loop block should have an additional input. + np.testing.assert_equal(len(loop_block.inputs), 2) + # The loop block's new input should be the input to append. + np.testing.assert_equal(loop_block.inputs[1], loop_block.nodes[0].inputs[0]) + # The loop block should have an additional output. + np.testing.assert_equal(len(loop_block.outputs), 2) + # Append's output should be returned from the loop block. + np.testing.assert_equal(loop_block.outputs[1], loop_block.nodes[0].outputs[0]) + # The loop should now have an output. + np.testing.assert_equal(len(loop_node.outputs), 1) + # The loop's name should now be the name of its output. + np.testing.assert_equal(loop_node.name, loop_node.outputs[0]) + # That graph output should now be the output of the graph. + np.testing.assert_equal(loop_node.outputs[0], graph.outputs[0]) + + @pytest.mark.xfail(reason="rdar://64235006") + def test_transform_inplace_ops_if(self): + # The test graph is: + # graph( + # %x : Tensor[1], + # ): + # %1 = constant[value=True]() + # %2 = constant[value=0]() + # %3 = constant[value=1]() + # %4 = listconstruct[](%2) + # = if[](%1) + # block0(): + # %5 = append[](%4, %3) + # return () + # block1(): + # %6 = append[](%4, %2) + # return () + # return (%4) + graph_params = {} + graph_inputs = OrderedDict() + graph_inputs["x"] = torch.rand(1) + if_true_block = InternalTorchIRBlock( + inputs=[], + outputs=[], + nodes=[ + InternalTorchIRNode( + inputs=["4", "3"], + outputs=["5"], + kind="append", + ), + ], + ) + if_true_block.nodes[0].parent = if_true_block + if_false_block = InternalTorchIRBlock( + inputs=[], + outputs=[], + nodes=[ + InternalTorchIRNode( + inputs=["4", "2"], + outputs=["6"], + kind="append", + ), + ], + ) + if_false_block.nodes[0].parent = if_false_block + if_node = InternalTorchIRNode( + inputs=["1"], + outputs=[], + kind="if", + blocks=[if_true_block, if_false_block], + ) + if_true_block.parent = if_node + if_false_block.parent = if_node + graph_nodes = [ + InternalTorchIRNode( + inputs=[], + attr={"value": True}, + outputs=["1"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 0}, + outputs=["2"], + kind="constant", + ), + InternalTorchIRNode( + inputs=[], + attr={"value": 1}, + outputs=["3"], + kind="constant", + ), + InternalTorchIRNode( + inputs=["2"], + outputs=["4"], + kind="listconstruct", + ), + if_node, + ] + graph_outputs = ["4"] + graph = InternalTorchIRGraph( + nodes=graph_nodes, + params=graph_params, + inputs=graph_inputs, + outputs=graph_outputs, + ) + for node in graph.nodes: + node.parent = graph + + transform_inplace_ops(graph) + + # The true block should now have an output. + np.testing.assert_equal(len(if_true_block.outputs), 1) + # The true block should output the result of the append op. + np.testing.assert_equal(if_true_block.outputs[0], if_true_block.nodes[0].outputs[0]) + # The false block should now have an output. + np.testing.assert_equal(len(if_false_block.outputs), 1) + # The false block should output the result of the append op. + np.testing.assert_equal(if_false_block.outputs[0], if_false_block.nodes[0].outputs[0]) + # The if op should have an additional output. + np.testing.assert_equal(len(if_node.outputs), 1) + # The if's name should now be the name of its output. + np.testing.assert_equal(if_node.name, if_node.outputs[0]) + # The graph output should be the if op output. + np.testing.assert_equal(if_node.outputs[0], graph.outputs[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py new file mode 100644 index 00000000..bd7d1ce1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_conversion_api.py @@ -0,0 +1,1401 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import os + +import numpy as np +import pytest +from PIL import Image + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil.frontend.torch.test.testing_utils import \ + _copy_input_data +from coremltools.converters.mil.testing_utils import ( + assert_cast_ops_count, assert_input_dtype, assert_ops_in_mil_program, + assert_output_dtype, assert_prog_input_type, assert_prog_output_type, + assert_spec_input_image_type, assert_spec_output_image_type, + verify_prediction) +from coremltools.proto import FeatureTypes_pb2 as ft +from coremltools.test.api.test_api_examples import TestInputs as _TestInputs + +if _HAS_TORCH: + import torch + import torchvision + +################################################################################# +# Note: all tests are also used as examples in https://coremltools.readme.io/docs +# as a reference. +# Whenever any of the following test fails, we should update API documentations +################################################################################# + +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestPyTorchConverterExamples: + @staticmethod + def test_convert_torch_vision_mobilenet_v2(tmpdir): + """ + In this example, we'll instantiate a PyTorch classification model and convert + it to Core ML. + """ + + """ + Here we instantiate our model. In a real use case this would be your trained + model. + """ + model = torchvision.models.mobilenet_v2() + + """ + The next thing we need to do is generate TorchScript for the model. The easiest + way to do this is by tracing it. + """ + + """ + It's important that a model be in evaluation mode (not training mode) when it's + traced. This makes sure things like dropout are disabled. + """ + model.eval() + + """ + Tracing takes an example input and traces its flow through the model. Here we + are creating an example image input. + + The rank and shape of the tensor will depend on your model use case. If your + model expects a fixed size input, use that size here. If it can accept a + variety of input sizes, it's generally best to keep the example input small to + shorten how long it takes to run a forward pass of your model. In all cases, + the rank of the tensor must be fixed. + """ + example_input = torch.rand(1, 3, 256, 256) + + """ + Now we actually trace the model. This will produce the TorchScript that the + CoreML converter needs. + """ + traced_model = torch.jit.trace(model, example_input) + + """ + Now with a TorchScript representation of the model, we can call the CoreML + converter. The converter also needs a description of the input to the model, + where we can give it a convenient name. + """ + mlmodel = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + ) + + """ + Now with a conversion complete, we can save the MLModel and run inference. + """ + save_path = os.path.join(str(tmpdir), "mobilenet_v2.mlmodel") + mlmodel.save(save_path) + + """ + Running predict() is only supported on macOS. + """ + if ct.utils._is_macos(): + results = mlmodel.predict({"input": example_input.numpy()}) + assert isinstance(results, dict) + + @staticmethod + def test_convert_torch_traced_model_to_milinternal(tmpdir): + from torch import nn + class Network(nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = nn.Linear(100, 10) + self.output = nn.Linear(10, 2) + self.sigmoid = nn.Sigmoid() + self.softmax = nn.Softmax(dim=1) + + def forward(self, x): + x = self.hidden(x) + x = self.sigmoid(x) + x = self.output(x) + x = self.softmax(x) + return x + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 100) + traced_model = torch.jit.trace(torch_model, example_input) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to='milinternal' + ) + assert isinstance(model, ct.converters.mil.Program) + + @staticmethod + def test_torch_classifier(): + class Net(torch.nn.Module): + def __init__(self): + super(Net, self).__init__() + self.linear1 = torch.nn.Linear(28 * 28, 100) + self.linear2 = torch.nn.Linear(100, 50) + self.final = torch.nn.Linear(50, 10) + self.relu = torch.nn.ReLU() + + def forward(self, img): # convert + flatten + x = img.view(-1, 28 * 28) + x = self.relu(self.linear1(x)) + x = self.relu(self.linear2(x)) + x = self.final(x) + return x + model = Net() + model.eval() + example_input = torch.rand(1, 28 * 28, 1) + traced_model = torch.jit.trace(model, example_input) + traced_model.eval() + + def _test_classifier(traced_model, example_input, class_type, backend): + label = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + if class_type == "str": + label = list(map(lambda x: str(x), label)) + classifier_config = ct.ClassifierConfig(label) + mlmodel = ct.convert( + traced_model, + source='pytorch', + convert_to=backend, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + classifier_config=classifier_config + ) + if ct.utils._is_macos(): + coreml_out = mlmodel.predict({"input": example_input.detach().numpy()}) + assert "classLabel" in coreml_out + key_type = str if class_type == "str" else int + assert isinstance(coreml_out["classLabel"], key_type) + + for class_type in ("str", "int"): + _test_classifier(traced_model, example_input, class_type, "neuralnetwork") + if ct.utils._macos_version() >= (12, 0): + _test_classifier(traced_model, example_input, class_type, "mlprogram") + + @staticmethod + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_convert_to_argument_with_torch_model(tmpdir, convert_to): + class Network(torch.nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = torch.nn.Linear(30, 5) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.hidden(x) + return self.relu(x) + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 30) + traced_model = torch.jit.trace(torch_model, example_input) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to=convert_to + ) + assert isinstance(model, ct.models.MLModel) + spec = model.get_spec() + if convert_to == "mlprogram": + assert spec.WhichOneof('Type') == 'mlProgram' + else: + assert spec.WhichOneof('Type') == 'neuralNetwork' + + @staticmethod + def test_deployment_target_argument_with_torch_model(): + class Network(torch.nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = torch.nn.Linear(30, 5) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.hidden(x) + return self.relu(x) + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 30) + traced_model = torch.jit.trace(torch_model, example_input) + + # convert to 'neuralnetwork' by specifying an iOS13 target + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + minimum_deployment_target=ct.target.iOS13, + ) + assert isinstance(model, ct.models.MLModel) + assert model.get_spec().WhichOneof('Type') == 'neuralNetwork' + + # convert to 'mlprogram' by specifying an iOS15 target + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + minimum_deployment_target=ct.target.iOS15, + ) + assert isinstance(model, ct.models.MLModel) + assert model.get_spec().WhichOneof('Type') == 'mlProgram' + + # verify an error is raised when convert_to="neuralnetwork" and target is iOS15 + with pytest.raises(ValueError) as e: + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to="neuralnetwork", + minimum_deployment_target=ct.target.iOS15, + ) + expected_error = "If minimum deployment target is iOS15/macOS12/watchOS8/tvOS15 or higher, " \ + "then 'convert_to' cannot be neuralnetwork. It must be 'mlprogram'" + assert expected_error == str(e.value) + + # verify an error is raised when convert_to="mlprogram" and target is less than iOS15 + with pytest.raises(ValueError) as e: + model = ct.convert( + traced_model, + inputs=[ct.TensorType(name="input", shape=example_input.shape)], + convert_to="mlprogram", + minimum_deployment_target=ct.target.iOS14, + ) + expected_error = "When 'convert_to' is mlprogram, the minimum deployment target " \ + "must be at least iOS15/macOS12/watchOS8/tvOS15" + assert expected_error == str(e.value) + + @staticmethod + def test_get_milprogram_method_with_torch_model(): + class Network(torch.nn.Module): + def __init__(self): + super(Network, self).__init__() + self.hidden = torch.nn.Linear(100, 10) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.hidden(x) + x = self.relu(x) + return x + + torch_model = Network() + torch_model.eval() + example_input = torch.rand(1, 100) + traced_model = torch.jit.trace(torch_model, example_input) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to='mlprogram' + ) + assert isinstance(model._get_mil_internal(), ct.converters.mil.Program) + + @staticmethod + @pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason='Model produces specification 6.') + @pytest.mark.parametrize( + "convert_to, provide_prob_output_argument", + itertools.product( + ["neuralnetwork", "mlprogram"], + [False, True], + ) + ) + def test_classifier_from_torch_model(convert_to, provide_prob_output_argument): + torch_model = torch.nn.ReLU().eval() + traced_model = torch.jit.trace(torch_model, torch.rand(3,)) + variable_name = "var_2" + class_label_name = "class_label" + classifier_config = ct.ClassifierConfig( + class_labels=['a', 'b', 'c'], + predicted_feature_name=class_label_name, + predicted_probabilities_output=variable_name if provide_prob_output_argument else None, + ) + + model = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=(3,))], + classifier_config = classifier_config, + convert_to=convert_to, + ) + spec = model.get_spec() + input_name = spec.description.input[0].name + out_dict = model.predict({input_name : np.array([1.0, 2.0, 3.0])}) + + assert class_label_name in out_dict + assert out_dict[class_label_name] == 'c' + if convert_to == "neuralnetwork": + assert variable_name in out_dict + assert isinstance(out_dict[variable_name], dict) + else: + output_dict_feature_name = class_label_name + "_probs" + assert output_dict_feature_name in out_dict + assert isinstance(out_dict[output_dict_feature_name], dict) + +############################################################################### +# Note: Stress tests for PyTorch input / output types +############################################################################### + +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) +class TestTorchInputs(_TestInputs): + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="test needs predictions") + def test_torch_predict_input(): + TestTorchInputs._test_variant_input_type_prediction(torch.tensor) + + @staticmethod + def test_int64_inputs(): + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, + embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(2,), dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + + # running predict() is supported on macOS + if ct.utils._is_macos(): + result = mlmodel.predict( + {"input": example_input.detach().numpy().astype(np.float32)} + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy()) + + # Duplicated inputs are invalid + with pytest.raises(ValueError, match=r"Duplicated inputs"): + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ), + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ), + ], + ) + + # Outputs must be of type ct.ImageType or ct.TensorType + with pytest.raises(ValueError, match=r"must be a list of type ct.TensorType or ct.ImageType"): + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ), + ], + outputs=["output"], + ) + + @staticmethod + def test_fully_dynamic_inputs(): + """ + All dims of the inputs are dynamic, and write to slice to one of the + inputs. + """ + + class Model(torch.nn.Module): + def __init__(self, index): + super(Model, self).__init__() + self.index = index + + def forward(self, x, y): + x[:, int(self.index.item())] = 0.0 + y = y.unsqueeze(0) + return y, x + + model = Model(torch.tensor(3)) + scripted_model = torch.jit.script(model) + + mlmodel = ct.convert( + scripted_model, + inputs=[ + ct.TensorType("x", shape=(ct.RangeDim(), ct.RangeDim())), + ct.TensorType("y", shape=(ct.RangeDim(), ct.RangeDim())) + ], + ) + + # running predict() is supported on macOS + if ct.utils._is_macos(): + x, y = torch.rand(2, 4), torch.rand(1, 2) + torch_input = _copy_input_data([x, y]) + torch_res = model(*torch_input) + results = mlmodel.predict({"x": x.cpu().detach().numpy(), + "y": y.cpu().detach().numpy()}) + for i, name in enumerate(mlmodel.output_description): + np.testing.assert_allclose(torch_res[i], results[name]) + + x, y = torch.rand(1, 6), torch.rand(2, 3) + torch_input = _copy_input_data([x, y]) + torch_res = model(*torch_input) + results = mlmodel.predict({"x": x.cpu().detach().numpy(), + "y": y.cpu().detach().numpy()}) + for i, name in enumerate(mlmodel.output_description): + np.testing.assert_allclose(torch_res[i], results[name]) + + @staticmethod + def test_rank0_inputs_torch(): + """Similar to TestPyTorchConverterExamples::test_int64_inputs but + using rank-0 int input. + """ + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, + embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.tensor(1) + traced_model = torch.jit.trace(model, example_input) + with pytest.raises(ValueError, match=r"Rank-0"): + mlmodel = ct.convert( + traced_model, + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + + @staticmethod + @pytest.mark.parametrize("variable_length", [True, False]) + def test_torch_range_dim_lstm(variable_length): + """ + This example shows how to run LSTM with previous hidden / cell states + """ + + input_size = 3 + hidden_size = 2 + + class TestNet(torch.nn.Module): + def __init__(self): + super(TestNet, self).__init__() + self.lstm = torch.nn.LSTM(input_size, hidden_size, 1) + + def forward(self, x, hidden_state, cell_state): + # LSTM takes in previous hidden and cell states. The first + # invokation usually have zero vectors as initial states. + output, (new_hidden_state, new_cell_state) = \ + self.lstm(x, (hidden_state, cell_state)) + # LSTM hidden / cell states are returned to be managed by the + # caller (and is fed in as inputs in the next call). + return output, new_hidden_state, new_cell_state + + model = TestNet() + model.eval() + + seq_len = 2 # we'll make seq_len dynamic later + batch = 1 + input_shape = (seq_len, batch, input_size) + rand_input = torch.rand(*input_shape) + h_shape = (1, batch, hidden_size) + rand_h0 = torch.rand(*h_shape) + rand_c0 = torch.rand(*h_shape) + + traced_model = torch.jit.trace(model, (rand_input, rand_h0, rand_c0)) + + # ct.RangeDim() tells coremltools that this dimension can change for + # each inference example (aka "runtime-determined"). If the sequence + # length is always the same (e.g., 2 step LSTM would have seq_len == 2) + # Note that fixed-length models usually run slightly faster + # than variable length models. + ct_seq_len = ct.RangeDim() if variable_length else seq_len + seq_input = ct.TensorType(shape=(ct_seq_len, batch, input_size), + name="seq_input") + h_input = ct.TensorType(shape=h_shape, name="h_input") + c_input = ct.TensorType(shape=h_shape, name="c_input") + + mlmodel = ct.convert( + traced_model, + inputs=[seq_input, h_input, c_input], + ) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"seq_input": rand_input.detach().numpy().astype(np.float32), + "h_input": rand_h0.detach().numpy().astype(np.float32), + "c_input": rand_c0.detach().numpy().astype(np.float32), + } + ) + + # Verify outputs + expected = model(rand_input, rand_h0, rand_c0) + names = list(result.keys()) + names.sort() + np.testing.assert_allclose(result[names[0]], + expected[0].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[1]], + expected[1].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[2]], + expected[2].detach().numpy(), atol=1e-4) + + # Try example of different length + if variable_length: + seq_len = 10 + input_shape = (seq_len, batch, input_size) + rand_input = torch.rand(*input_shape) + + result = mlmodel.predict( + {"seq_input": rand_input.detach().numpy().astype(np.float32), + "h_input": rand_h0.detach().numpy().astype(np.float32), + "c_input": rand_c0.detach().numpy().astype(np.float32), + } + ) + expected = model(rand_input, rand_h0, rand_c0) + names = list(result.keys()) + names.sort() + np.testing.assert_allclose(result[names[0]], + expected[0].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[1]], + expected[1].detach().numpy(), atol=1e-4) + np.testing.assert_allclose(result[names[2]], + expected[2].detach().numpy(), atol=1e-4) + + @staticmethod + @pytest.mark.parametrize("use_symbol", [True, False]) + def test_torch_outofbound_range_dim(use_symbol): + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(3,), + dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + + if use_symbol: + seq_len_dim = ct.RangeDim(symbol='len', lower_bound=3, + upper_bound=5) + else: + # symbol is optional + seq_len_dim = ct.RangeDim(lower_bound=3, upper_bound=5) + seq_input = ct.TensorType(name="input", shape=(seq_len_dim,), + dtype=np.int64) + mlmodel = ct.convert( + traced_model, + inputs=[seq_input], + ) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"input": example_input.detach().numpy().astype(np.float32)} + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy()) + + # seq_len below/above lower_bound/upper_bound + with pytest.raises(RuntimeError, + match=r"Size \(99\) of dimension \(0\) is not in allowed range \(3\.\.5\)"): + example_input2 = torch.randint(high=num_tokens, size=(99,), + dtype=torch.int64) + result = mlmodel.predict( + {"input": example_input2.detach().numpy().astype(np.float32)} + ) + + with pytest.raises(RuntimeError, + match=r"Size \(2\) of dimension \(0\) is not in allowed range \(3\.\.5\)"): + example_input2 = torch.randint(high=num_tokens, size=(2,), + dtype=torch.int64) + result = mlmodel.predict( + {"input": example_input2.detach().numpy().astype(np.float32)} + ) + + @staticmethod + def test_torch_enumerated_shapes(): + + in_channels = 3 + out_channels = 2 + kernel_size = 3 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.conv = torch.nn.Conv2d(in_channels, out_channels, + kernel_size) + + def forward(self, x): + return self.conv(x) + + model = TestModule() + model.eval() + + example_input = torch.randn(1, 3, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + shapes = [(1, 3, 28, 28), (1, 3, 56, 56)] + enumerated_shapes = ct.EnumeratedShapes(shapes=shapes) + tensor_input = ct.TensorType(name="input", shape=enumerated_shapes) + + mlmodel = ct.convert( + traced_model, + inputs=[tensor_input], + compute_units=ct.ComputeUnit.CPU_ONLY + ) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"input": example_input.detach().numpy().astype(np.float32)}, + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy(), + rtol=1e-3, atol=1e-4) + + # Test (1, 3, 56, 56) shape (can't verify numerical parity with Torch + # which doesn't support enumerated shape) + test_input_x = np.random.rand(*shapes[1]).astype(np.float32) + mlmodel.predict({"input": test_input_x}) + + # Test with a wrong shape + with pytest.raises(RuntimeError, + match=r"MultiArray Shape \(1 x 3 x 29 x 29\) was not in enumerated set of allowed shapes"): + test_input_x = np.random.rand(1, 3, 29, 29).astype(np.float32) + mlmodel.predict({"input": test_input_x}) + + @staticmethod + def test_torch_image_enumerated_shapes(): + import torchvision + torch_model = torchvision.models.mobilenet_v2().features + torch_model.eval() + example_input = torch.rand(1, 3, 256, 256) + traced_model = torch.jit.trace(torch_model, example_input) + input_shapes = ct.EnumeratedShapes(shapes=[(1, 3, 256, 256), (1, 3, 224, 224)]) + image_input = ct.ImageType(shape=input_shapes, + bias=[-1, -1, -1], scale=1 / 127) + model = ct.convert(traced_model, inputs=[image_input]) + assert model is not None + spec = model.get_spec() + assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 2 + + @staticmethod + def test_torch_optional_input(): + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x, y): + return self.embedding(x) + y + + model = TestModule() + model.eval() + + example_input = [ + torch.randint(high=num_tokens, size=(2,), dtype=torch.int64), + torch.rand(1), + ] + traced_model = torch.jit.trace(model, example_input) + + required_input = ct.TensorType( + name="required_input", shape=(ct.RangeDim(),), dtype=np.int64) + default_value = np.array([3]).astype(np.float32) + optional_input = ct.TensorType(name="optional_input", shape=(1,), + default_value=default_value) + + for compute_units in ct.ComputeUnit: + if compute_units == ct.ComputeUnit.CPU_AND_NE and ct.utils._macos_version() < (13, 0): + continue + + mlmodel = ct.convert( + traced_model, + inputs=[required_input, optional_input], + compute_units=compute_units, + ) + + assert(mlmodel.compute_unit == compute_units) + + if ct.utils._is_macos(): + result = mlmodel.predict( + {"required_input": + example_input[0].detach().numpy().astype(np.float32)} + ) + + # Verify outputs + torch_default_value = torch.tensor([3]) + expected = model(example_input[0].detach(), torch_default_value) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.detach().numpy()) + + +@pytest.fixture +def int32_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.int32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def int64_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.int64) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def float32_input_model_add_op(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5.5 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def float32_input_model_relu_ops(): + class Model(torch.nn.Module): + def forward(self, x): + x = torch.nn.ReLU()(x) + return torch.nn.ReLU()(x) + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def float32_two_input_model(): + class Model(torch.nn.Module): + def forward(self, x, y): + return x + y + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), [example_input, example_input]) + +@pytest.fixture +def float32_two_output_model(): + class Model(torch.nn.Module): + def forward(self, x): + y = torch.nn.ReLU()(x) + out1 = torch.nn.ReLU()(y) + out2 = torch.nn.ReLU6()(x) + return out1, out2 + example_input = torch.randint(0, 100, (10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def rank3_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5.5 + example_input = torch.randint(0, 100, (1, 10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def rank4_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 5.5 + example_input = torch.randint(0, 100, (1, 3, 10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def rank4_grayscale_input_model(): + class Model(torch.nn.Module): + def forward(self, x): + return x + 10 + example_input = torch.randint(0, 100, (1, 1, 10, 20), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + +@pytest.fixture +def linear_model(): + # this model will test the fuse_linear_bias pass + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(10, 15, bias=False) + self.constant_tensor = torch.ones((15,), dtype=torch.float32) + + def forward(self, x): + x = self.linear(x) + x = x - self.constant_tensor + x = torch.nn.ReLU()(x) + return x + example_input = torch.randint(0, 10, (1, 10), dtype=torch.float32) + return torch.jit.trace(Model().eval(), example_input) + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason='Tests are for deployment target ios16/macos13') +class TestInputOutputConversionAPI: + + def test_input_dtype_default(self, int32_input_model): + #if dtype is not provided it defaults to float32 + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_input_shape_missing_error(self, float32_input_model_add_op): + with pytest.raises(ValueError, + match="'shape' must be provided in the 'inputs' argument for pytorch conversion"): + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + + def test_unsupported_input_dtype_in_torch_model(self, int64_input_model): + # test that no error is raised when no dtype is provided by the user, + # and the Torch model's input dtype is not supported. + # In this case, it will be mapped to the default dtype which is float32 + mlmodel = ct.convert(int64_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_input_dtype_user_provided(self, float32_input_model_add_op): + # test that provided dtype in the api is applied + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_invalid_input_dtype(self, int32_input_model): + with pytest.raises(TypeError, + match="is unsupported for inputs/outputs of the model" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.int16)], + minimum_deployment_target=ct.target.macOS12) + + with pytest.raises(TypeError, + match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13" + ): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12) + + def test_fp16_input_dtype(self, float32_input_model_add_op, float32_input_model_relu_ops, int32_input_model): + """ + Test that providing fp16 input dtype works with macOS13. + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13 + ) + # Two consecutive relus are merged in the `merge_consecutive_relus` pass. + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_fp16_input_dtype_fp32_precision(self, float32_input_model_add_op, float32_input_model_relu_ops, + int32_input_model): + """ + Same test as test_fp16_input_dtype, but with Float32 precision + """ + mlmodel = ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + """ + Although no FP16ComputePrecision is applied, the float16 input propagates through the network + """ + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + def test_input_name_specified_by_user(self, float32_input_model_relu_ops, + float32_two_input_model): + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), name="my_custom_input_name")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="my_custom_input_name") + + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), name="user_provided_name_1"), + ct.TensorType(shape=(10, 20), name="user_provided_name_2")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="user_provided_name_1", index=0) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="user_provided_name_2", index=1) + + def test_two_input_model(self, float32_two_input_model): + # test that error is raised if only 1 input is provided + with pytest.raises(ValueError): + ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + + + # test forcing 1st input to type int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32), + ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", index=0) + assert_input_dtype(mlmodel, expected_type_str="fp32", index=1) + assert_output_dtype(mlmodel, expected_type_str="fp32") + + # test forcing both inputs to be int32 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.int32), + ct.TensorType(shape=(10, 20), dtype=np.int32), + ], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="int32", index=0) + assert_input_dtype(mlmodel, expected_type_str="int32", index=1) + assert_output_dtype(mlmodel, expected_type_str="int32") + + # test forcing both inputs to be float16 + mlmodel = ct.convert(float32_two_input_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16), + ct.TensorType(shape=(10, 20), dtype=np.float16), + ], + minimum_deployment_target=ct.target.macOS13) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp16", index=0) + assert_input_dtype(mlmodel, expected_type_str="fp16", index=1) + assert_output_dtype(mlmodel, expected_type_str="fp32") + verify_prediction(mlmodel) + + def test_output_name_specified_by_user(self, float32_input_model_relu_ops, float32_two_output_model): + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")], + outputs=[ct.TensorType(name="custom_output_name")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_input_name") + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_output_name") + + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(shape=(10, 20), name="custom_input_name")], + outputs=[ct.TensorType(name="custom_output1_name"), + ct.TensorType(name="custom_output2_name")], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_input_name") + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_output1_name", index=0) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="custom_output2_name", index=1) + + def test_single_output_model(self, int32_input_model, float32_input_model_relu_ops): + # test output type: if not provided, it should be the default which is float32 + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_input_dtype(mlmodel, expected_type_str="fp32") + assert_output_dtype(mlmodel, expected_type_str="fp32") + + # test that the output dtype provided by the user is applied during conversion + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.int32)], + minimum_deployment_target=ct.target.macOS12) + assert_input_dtype(mlmodel, expected_type_str="fp32") + assert_output_dtype(mlmodel, expected_type_str="int32") + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu", "cast", "cast"]) + + # test that an error is raised when shape is provided for the output + with pytest.raises(ValueError): + mlmodel = ct.convert(int32_input_model, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.float32, shape=(10, 20))], + minimum_deployment_target=ct.target.macOS12) + + # test that output dtype of float16 is rejected when deployment target is low + with pytest.raises(TypeError, + match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13" + ): + ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that output type float16 is applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "relu"]) + + # test that input and output types float16 are applied correctly + mlmodel = ct.convert(float32_input_model_relu_ops, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, expected_op_list=["relu"]) + verify_prediction(mlmodel) + + def test_multi_output_model(self, float32_two_output_model): + # check that error is raised when only 1 output provided + with pytest.raises(ValueError, match="Number of outputs provided, 1, " + "do not match the number of outputs detected in the model, 2"): + ct.convert(float32_two_output_model, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.TensorType()], + minimum_deployment_target=ct.target.macOS12) + + # set 1 output to float16 and the other to float32 + mlmodel = ct.convert(float32_two_output_model, + inputs=[ct.TensorType(shape=(10, 20), dtype=np.float16)], + outputs=[ct.TensorType(name="out1", dtype=np.float16), + ct.TensorType(name="out2", dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_cast_ops_count(mlmodel, expected_count=1) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16", expected_name="out1" ,index=0) + assert_output_dtype(mlmodel, expected_type_str="fp32", expected_name="out2", index=1) + verify_prediction(mlmodel) + + def test_color_input(self, rank4_input_model, rank3_input_model): + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(ValueError, match="must have rank 4"): + mlmodel = ct.convert(rank3_input_model, + inputs=[ct.ImageType(shape=(1, 10, 20), color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS12, + ) + + def test_grayscale_input(self, rank4_input_model, rank3_input_model, rank4_grayscale_input_model): + with pytest.raises(ValueError, match="must have rank 4"): + ct.convert(rank3_input_model, + inputs=[ct.ImageType(shape=(1, 10, 20), color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + # invalid shape + with pytest.raises(ValueError): + ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + # test that grayscale_16 raises error when used with neural network + with pytest.raises(TypeError, match="float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13"): + ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + verify_prediction(mlmodel) + + def test_color_output(self, rank4_input_model, float32_input_model_add_op): + # check that an error is raised if the output shape is not of form (1, 3, H, W) + with pytest.raises(ValueError, match="must have rank 4. Instead it has rank 2"): + ct.convert(float32_input_model_add_op, + inputs=[ct.TensorType(shape=(10, 20))], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13) + + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), + color_layout=ct.colorlayout.BGR)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.RGB)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add", "cast"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp32") + verify_prediction(mlmodel) + + # check neural network conversion + mlmodel = ct.convert(rank4_input_model, + inputs=[ct.ImageType(shape=(1, 3, 10, 20), + color_layout=ct.colorlayout.RGB)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.BGR)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.RGB) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.BGR) + verify_prediction(mlmodel) + + def test_grayscale_output(self, rank4_grayscale_input_model): + with pytest.raises(TypeError, match="float16 dtype for outputs is only supported for deployment target >= iOS16/macOS13"): + ct.convert(rank4_grayscale_input_model, + inputs=[ct.TensorType(shape=(1, 1, 10, 20))], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS12, + ) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE)], + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp16") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.ImageType(color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_ops_in_mil_program(mlmodel, expected_op_list=["cast", "add"]) + assert_spec_input_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE) + assert_spec_output_image_type(mlmodel._spec, expected_feature_type=ft.ImageFeatureType.GRAYSCALE_FLOAT16) + assert_prog_input_type(mlmodel._mil_program, expected_dtype_str="fp32") + assert_prog_output_type(mlmodel._mil_program, expected_dtype_str="fp16") + verify_prediction(mlmodel) + + def test_linear_model(self, linear_model): + # this will test the fuse_linear_bias pass, when the inputs are of type float16 + mlmodel = ct.convert(linear_model, + inputs=[ct.TensorType(shape=(1, 10), dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(mlmodel, expected_type_str="fp16") + assert_output_dtype(mlmodel, expected_type_str="fp16") + assert_ops_in_mil_program(mlmodel, ["linear", "relu"]) + verify_prediction(mlmodel) + + + def test_classifier(self): + torch_model = torch.nn.ReLU().eval() + traced_model = torch.jit.trace(torch_model, torch.rand(3,)) + model = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=(3,), dtype=np.float16)], + outputs=[ct.TensorType(dtype=np.float16)], + classifier_config = ct.ClassifierConfig(['a', 'b', 'c']), + convert_to='mlprogram', + minimum_deployment_target=ct.target.macOS13, + ) + assert_input_dtype(model, expected_type_str="fp16") + assert_ops_in_mil_program(model, ["relu", "cast", "classify"]) + spec = model.get_spec() + input_name = spec.description.input[0].name + out_dict = model.predict({input_name : np.array([1.0, 2.0, 3.0])}) + assert 'classLabel' in out_dict + assert out_dict['classLabel'] == 'c' + assert len(spec.description.output) == 2 + assert "classLabel_probs" in out_dict + assert isinstance(out_dict["classLabel_probs"], dict) + + def test_prediction_with_fp16_io(self): + torch_model = torch.nn.Linear(30, 5).eval() + traced_model = torch.jit.trace(torch_model, torch.rand(1, 30)) + mlmodel = ct.convert(traced_model, + inputs=[ct.TensorType(name="input", shape=(1, 30), dtype=np.float32)], + outputs=[ct.TensorType(dtype=np.float32)], + minimum_deployment_target=ct.target.macOS13, + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + # test prediction + sample_input = np.random.rand(1, 30).astype(np.float32) * 10 + model_output = mlmodel.predict({"input": sample_input})[mlmodel._spec.description.output[0].name] + reference_output = traced_model(torch.from_numpy(sample_input)).detach().numpy() + np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2) + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason='Tests are for deployment target ios16/macos13') +class TestGrayscaleImagePredictions: + + def test_grayscale_input_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(name="input_image", + shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE)], + outputs=[ct.TensorType(name="output")], + minimum_deployment_target=ct.target.macOS13, + ) + sample_input = np.random.randint(low=0, high=246, size=(1, 1, 10, 20)) + img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.uint8), 'L') + model_output = mlmodel.predict({"input_image": img_input})['output'] + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input.astype(np.float32))).detach().numpy() + np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2) + + def test_grayscale_fp16_input_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.ImageType(name="input_image", + shape=(1, 1, 10, 20), + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + outputs=[ct.TensorType(name="output")], + minimum_deployment_target=ct.target.macOS13, + ) + + # incorrect way to do prediction + with pytest.raises(TypeError, + match="must be of type PIL.Image.Image with mode=='F'", + ): + sample_input = np.random.randint(low=0, high=246, size=(1, 1, 10, 20)) + img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.uint8), 'L') + mlmodel.predict({"input_image": img_input}) + + # correct way to do prediction + sample_input = np.random.rand(1, 1, 10, 20) # in between [0, 1] + img_input = Image.fromarray(sample_input[0, 0, :, :].astype(np.float32), 'F') + model_output = mlmodel.predict({"input_image": img_input})['output'] + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input.astype(np.float32))).detach().numpy() + np.testing.assert_allclose(reference_output, model_output, rtol=1e-2, atol=1e-2) + + def test_grayscale_output_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.TensorType(name="input", + shape=(1, 1, 10, 20))], + outputs=[ct.ImageType(name="output_image", + color_layout=ct.colorlayout.GRAYSCALE)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + sample_input = np.random.randint(low=0, high=200, size=(1, 1, 10, 20)).astype(np.float32) + model_output_pil_image = mlmodel.predict({"input": sample_input})['output_image'] + assert isinstance(model_output_pil_image, Image.Image) + assert model_output_pil_image.mode == "L" + model_output_as_numpy = np.array(model_output_pil_image) + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input)).detach().numpy() + reference_output = np.squeeze(reference_output) + np.testing.assert_allclose(reference_output, model_output_as_numpy, rtol=1e-2, atol=1e-2) + + def test_grayscale_fp16_output_image(self, rank4_grayscale_input_model): + mlmodel = ct.convert(rank4_grayscale_input_model, + inputs=[ct.TensorType(name="input", + shape=(1, 1, 10, 20))], + outputs=[ct.ImageType(name="output_image", + color_layout=ct.colorlayout.GRAYSCALE_FLOAT16)], + minimum_deployment_target=ct.target.macOS13, + compute_precision=ct.precision.FLOAT32, + ) + sample_input = np.random.randint(low=0, high=200, size=(1, 1, 10, 20)).astype(np.float32) + model_output_pil_image = mlmodel.predict({"input": sample_input})['output_image'] + assert isinstance(model_output_pil_image, Image.Image) + assert model_output_pil_image.mode == "F" + model_output_as_numpy = np.array(model_output_pil_image) + reference_output = rank4_grayscale_input_model(torch.from_numpy(sample_input)).detach().numpy() + reference_output = np.squeeze(reference_output) + np.testing.assert_allclose(reference_output, model_output_as_numpy, rtol=1e-2, atol=1e-2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py new file mode 100644 index 00000000..d7416387 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/test_torch_ops.py @@ -0,0 +1,8442 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform +from typing import List, Tuple +from unittest.mock import patch + +import numpy as np +import pytest +import torch.nn as nn +import torchvision + +import coremltools as ct +from coremltools import RangeDim, Shape, TensorType +from coremltools._deps import version_lt +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil.var import Var +from coremltools.converters.mil.testing_utils import einsum_equations, gen_input_shapes_einsum +from coremltools.models.utils import _macos_version, _python_version + +from .testing_utils import ModuleWrapper, TorchBaseTest, contains_op, generate_input_data + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + +torch = pytest.importorskip("torch") +torch.manual_seed(30) +np.random.seed(30) + +# Set of common shapes for testing. Not all layers support 1D, so these two +# set of shapes are kept separate +COMMON_SHAPES = [(1, 10), (1, 5, 6), (1, 3, 5, 6), (1, 3, 4, 5, 6)] +COMMON_SHAPES_ALL = [(1,)] + COMMON_SHAPES + + +class TestScriptedModels(TorchBaseTest): + + @staticmethod + def get_while_loop_model(): + class TestLayer(nn.Module): + def forward(self, x): + x = 0.5 * x + return x + + class TestNet(nn.Module): + input_size = (1,) + + def __init__(self): + super(TestNet, self).__init__() + layer = TestLayer() + self.layer = torch.jit.trace(layer, torch.rand(self.input_size)) + + def forward(self, x): + while x > 0.01: + x = self.layer(x) + return x + + return TestNet().eval() + + @staticmethod + def get_cond_model(): + class TestNet(nn.Module): + def forward(self, x): + if torch.squeeze(x) < 10.0: + return x * 10.0 + else: + return x * 2.0 + + return TestNet().eval() + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_while_loop(self, compute_unit, backend): + model = TestScriptedModels.get_while_loop_model() + self.run_compare_torch( + model.input_size, + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_cond(self, compute_unit, backend): + torch_model = TestScriptedModels.get_cond_model() + + self.run_compare_torch( + torch.tensor([1.]), + torch_model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + self.run_compare_torch( + torch.tensor([11.]), + torch_model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_for_loop(self, compute_unit, backend): + class TestLayer(nn.Module): + def forward(self, x): + x = 2.0 * x + return x + + class TestNet(nn.Module): + input_size = (64,) + + def __init__(self): + super(TestNet, self).__init__() + layer = TestLayer() + self.layer = torch.jit.trace(layer, torch.rand(self.input_size)) + + def forward(self, x): + for _ in range(7): + x = self.layer(x) + return x + + model = TestNet().eval() + + self.run_compare_torch( + model.input_size, + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_if(self, compute_unit, backend): + class TestLayer(nn.Module): + def forward(self, x): + x = torch.mean(x) + return x + + class TestNet(nn.Module): + input_size = (64,) + + def __init__(self): + super(TestNet, self).__init__() + layer = TestLayer() + self.layer = torch.jit.trace(layer, torch.rand(self.input_size)) + + def forward(self, x): + m = self.layer(x) + if m < 0: + scale = -2.0 + else: + scale = 2.0 + x = scale * x + return x + + model = TestNet().eval() + + self.run_compare_torch( + model.input_size, + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_linear(self, compute_unit, backend): + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.linear = torch.nn.Linear(2, 2) + + def forward(self, x): + return self.linear(x) + + model = Model().eval() + + self.run_compare_torch( + torch.tensor([[1.0, 2.0]]), + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + use_scripting=True, + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_conv(self, compute_unit, backend): + pytest.xfail( + "rdar://88194776 ([Converter] coremltools is not working with scripted torch convolution model)" + ) + model = torch.nn.Conv2d( + in_channels=2, + out_channels=3, + kernel_size=1, + padding="same", + stride=1, + dilation=1, + groups=1, + bias=False, + ) + self.run_compare_torch( + (1, 2, 4, 5), + model, + backend=backend, + compute_unit=compute_unit, + use_scripting=True, + ) + + +class TestMean(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_with_flexible_shape(self, compute_unit, backend): + if backend[0] == "mlprogram" and _macos_version() < (13, 0): + pytest.xfail( + "Issue fixed in iOS16/macOS13: https://github.com/apple/coremltools/issues/1420" + ) + + class Model(nn.Module): + def forward(self, x): + return torch.mean(x, dim=(2, 3), keepdim=True) + + model = Model() + shape = (1, 3, 256, 256) + converter_input_type = [ + TensorType(shape=Shape(shape=[1, 3, RangeDim(), RangeDim()], default=shape)) + ] + + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + @staticmethod + @pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), reason="Bug fixed in macOS13/iOS16" + ) + def test_flexible_shape_with_default_value(): + # test for bug reported in https://github.com/apple/coremltools/issues/1420 + class Network(torch.nn.Module): + def forward(self, x): + return torch.mean(x, dim=(2, 3), keepdim=True) + + model = Network() + x = torch.rand(1, 3, 256, 256) + traced_model = torch.jit.trace(model, x) + input_x = ct.TensorType( + shape=(1, 3, ct.RangeDim(default=256), ct.RangeDim(default=256)), + name="input", + ) + cml = ct.convert( + traced_model, + inputs=[input_x], + outputs=[ct.TensorType(name="out")], + convert_to="mlprogram", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + input_dict = {"input": np.random.rand(1, 3, 112, 112)} + + if ct.utils._is_macos(): + out = cml.predict(input_dict)["out"] + assert out.shape == (1, 3, 1, 1) + + +class TestAffineGrid(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "x_shape_and_target_size", + "sampling_mode", + "padding_mode", + "align_corners", + ] + ), + itertools.product( + compute_units, + backends, + [ + # shape format: (Batch, Channel, Height, Width) + [(1, 1, 3, 3), (1, 1, 3, 3)], # no size change + [(2, 3, 5, 5), (2, 3, 3, 2)], # down-sampling + [(3, 1, 6, 6), (3, 1, 8, 8)], # up-sampling + ], + ["bilinear"], + ["zeros"], + [True], + ), + ) + def test( + self, + compute_unit, + backend, + x_shape_and_target_size, + sampling_mode, + padding_mode, + align_corners, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_shape, target_size = x_shape_and_target_size + theta = torch.rand((x_shape[0], 2, 3)) + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.affine_grid = torch.nn.functional.affine_grid + self.grid_sample = torch.nn.functional.grid_sample + + def forward(self, x): + grid = self.affine_grid( + theta=theta, + size=target_size, + align_corners=align_corners, + ) + x = self.grid_sample( + x, + grid=grid, + mode=sampling_mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + return x + + model = TestModule() + self.run_compare_torch( + x_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestGridSample(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, data_grid_shapes, mode, padding_mode, align_corners", + itertools.product( + compute_units, + backends, + [ + # Input shape format: (Batch, C, Hin, Win) + # Grid shape format: (Batch, Hout, Wout, 2) + [(1, 1, 3, 3), (1, 3, 3, 2)], # no size change + [(2, 3, 5, 5), (2, 3, 3, 2)], # down-sampling + [(3, 1, 6, 6), (3, 8, 8, 2)], # up-sampling + ], + ["bilinear", "nearest"], + ["zeros", "border", "reflection"], + [True, False], + ), + ) + def test( + self, + compute_unit, + backend, + data_grid_shapes, + mode, + padding_mode, + align_corners, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + params = { + "mode": mode, + "padding_mode": padding_mode, + "align_corners": align_corners, + } + model = ModuleWrapper(function=torch.nn.functional.grid_sample, kwargs=params) + self.run_compare_torch( + data_grid_shapes, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestFrac(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_frac(self, compute_unit, backend, shape): + model = ModuleWrapper(function=torch.frac) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + rand_range=(-10.0, 10.0), + ) + + +class TestNLLLoss(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, reduction", + itertools.product( + compute_units, + backends, + ["none", "sum", "mean"], + ), + ) + def test_nllloss( + self, + compute_unit, + backend, + reduction, + ): + class NLLLossModel(nn.Module): + def __init__(self): + super(NLLLossModel, self).__init__() + self.loss = nn.NLLLoss(reduction=reduction) + + def forward(self, x, target): + loss = self.loss(x, target) + return loss + + x = torch.randn(3, 5) + target = torch.tensor([1, 0, 4]) + inputs = (x, target) + + model = NLLLossModel() + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestArgSort(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, axis, descending", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0], + [True, False], + ), + ) + def test_argsort(self, compute_unit, backend, shape, axis, descending): + model = ModuleWrapper( + function=torch.argsort, kwargs={"dim": axis, "descending": descending} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestSort(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, axis, descending", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0], + [True, False], + ), + ) + def test_sort(self, compute_unit, backend, shape, axis, descending): + model = ModuleWrapper( + function=torch.sort, kwargs={"dim": axis, "descending": descending} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestSelu(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, inplace", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_selu(self, compute_unit, backend, inplace): + x = torch.tensor([-6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0]) + model = torch.nn.SELU(inplace=inplace) + TorchBaseTest.run_compare_torch( + x, + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestMv(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, matrix_shape", + itertools.product(compute_units, backends, [(2, 3), (10, 12), (10, 1), (1, 5)]), + ) + def test_mv(self, compute_unit, backend, matrix_shape): + model = ModuleWrapper(function=torch.mv) + + matrix = generate_input_data(matrix_shape) + vector_length = matrix_shape[-1] + vector = generate_input_data((vector_length,)) + + TorchBaseTest.run_compare_torch( + (matrix, vector), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +@pytest.mark.skip( + reason="rdar://100332029 ([PyTorch] cos_similarity unittest is failing stochastically)" +) +class TestCosineSimilarity(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim, eps, shape", + itertools.product( + compute_units, + backends, + [0, -1], + [0.1, 1e-5, 1e-8], + COMMON_SHAPES, + ), + ) + def test_cosine_similarity(self, compute_unit, backend, dim, eps, shape): + class CosineSimilarity(nn.Module): + def __init__(self, dim, eps): + super(CosineSimilarity, self).__init__() + self.cossim = torch.nn.CosineSimilarity(dim=dim, eps=eps) + + def forward(self, x, y): + out = self.cossim(x, y) + return out + + model = CosineSimilarity(dim, eps) + input1 = generate_input_data(shape) + input2 = generate_input_data(shape) + + TorchBaseTest.run_compare_torch( + [input1, input2], + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestDot(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, vector_length", + itertools.product(compute_units, backends, [1, 5, 11]), + ) + def test_dot(self, compute_unit, backend, vector_length): + model = ModuleWrapper(function=torch.dot) + + vector1 = generate_input_data((vector_length,)) + vector2 = generate_input_data((vector_length,)) + + TorchBaseTest.run_compare_torch( + (vector1, vector2), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestOuter(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_vector_length, y_vector_length", + itertools.product( + compute_units, + backends, + [1, 5], + [1, 3], + ), + ) + def test_outer(self, compute_unit, backend, x_vector_length, y_vector_length): + model = ModuleWrapper(function=torch.outer) + + vector1 = generate_input_data((x_vector_length,)) + vector2 = generate_input_data((y_vector_length,)) + + TorchBaseTest.run_compare_torch( + (vector1, vector2), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestCross(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape_dim", + itertools.product(compute_units, backends, [((3,), 0), ((4, 3, 2), 1)]), + ) + def test_cross(self, compute_unit, backend, shape_dim): + shape = shape_dim[0] + dim = shape_dim[1] + + class CrossModel(nn.Module): + def forward(self, x, y): + return torch.cross(x, y, dim) + + x = generate_input_data(shape) + y = generate_input_data(shape) + model = CrossModel().eval() + torch_out = model(x, y) + self.run_compare_torch( + (x, y), + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestNormalize(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_normalize(self, compute_unit, backend, shape): + model = ModuleWrapper(function=nn.functional.normalize) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestNorms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, keepdim", + itertools.product(compute_units, backends, COMMON_SHAPES, [True, False]), + ) + def test_frobenius_norm(self, compute_unit, backend, shape, keepdim): + num_dims = len(shape) + for dim in range(-num_dims, num_dims): + model = ModuleWrapper( + function=torch.norm, kwargs={"keepdim": keepdim, "dim": dim} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, p, keepdim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0, 1, 2, 3, np.inf, -np.inf], + [True, False], + ), + ) + def test_number_norm(self, compute_unit, backend, shape, p, keepdim): + for dim in (-1, 0, 1): + model = ModuleWrapper( + function=torch.norm, kwargs={"p": p, "keepdim": keepdim, "dim": dim} + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-2, + ) + + +class TestWeightNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, in_out_features", + itertools.product( + compute_units, + backends, + [(1, 1), (2, 10), (20, 10)], + ), + ) + def test_linear(self, compute_unit, backend, in_out_features): + in_features, out_features = in_out_features + + for dim in (None, -2, -1, 0, 1): + model = nn.utils.weight_norm(nn.Linear(in_features, out_features), dim=dim) + TorchBaseTest.run_compare_torch( + (in_features,), + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-3, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_conv2d(self, compute_unit, backend): + x = torch.randn(20, 16, 50, 100) + + for dim in (None,) + tuple(range(-4, 4)): + model = nn.utils.weight_norm(nn.Conv2d(16, 33, 3), dim=dim) + TorchBaseTest.run_compare_torch( + x, + model, + input_as_shape=False, + atol=1e-3, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_conv3d(self, compute_unit, backend): + x = torch.randn(20, 16, 5, 50, 100) + + for dim in (None,) + tuple(range(-5, 5)): + model = nn.utils.weight_norm(nn.Conv3d(16, 33, 3), dim=dim) + TorchBaseTest.run_compare_torch( + x, + model, + input_as_shape=False, + atol=1e-3, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLinAlgNorms(TorchBaseTest): + def _is_valid_config(self, shape, order, dim): + if isinstance(dim, tuple): + if isinstance(order, int) and (order == 0 or order > 2): + return False + elif isinstance(dim, int): + if order == "fro": + return False + elif dim is None: + if order is not None: + if len(shape) > 2: + return False + elif ( + len(shape) == 2 + and not isinstance(order, str) + and (order == 0 or order > 2) + ): + return False + elif len(shape) == 1 and isinstance(order, str): + return False + return True + + @pytest.mark.parametrize( + "compute_unit, backend, shape, order, keepdim, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-2, -1, 0, 1, 2, 3, np.inf, -np.inf, "fro", None], + [True, False], + [-1, 0, 1, (0, 1), (0, -1), None], + ), + ) + def test_norm(self, compute_unit, backend, shape, order, keepdim, dim): + if not self._is_valid_config(shape, order, dim): + pytest.skip() + if ( + isinstance(order, int) + and abs(order) == 2 + and ((dim is None and len(shape) == 2) or isinstance(dim, tuple)) + ): + pytest.xfail("Matrix norm for order 2 and -2 is not implemented") + model = ModuleWrapper( + function=torch.linalg.norm, + kwargs={"ord": order, "keepdim": keepdim, "dim": dim}, + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-2, + ) + + +class TestLinAlgMatrixNorms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, order, keepdim, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-2, -1, 1, 2, np.inf, -np.inf, "fro", "nuc"], + [True, False], + [(0, 1), (0, -1), (1, 2), (0, 2), (2, 3)], + ), + ) + def test_norm(self, compute_unit, backend, shape, order, keepdim, dim): + if dim[-1] > len(shape) - 1: + pytest.skip() + if order == "nuc" or (type(order) != str and abs(order) == 2): + pytest.xfail("Matrix norm for order 2, -2 and nuc is not implemented") + model = ModuleWrapper( + function=torch.linalg.matrix_norm, + kwargs={"ord": order, "keepdim": keepdim, "dim": dim}, + ) + TorchBaseTest.run_compare_torch( + shape, model, backend=backend, compute_unit=compute_unit, atol=1e-2 + ) + + +class TestLinAlgVectorNorms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, order, keepdim, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-2, -1, 0, 1, 2, np.inf, -np.inf], + [True, False], + [-1, 0, 1, (0, 1), (0, -1), None], + ), + ) + def test_norm(self, compute_unit, backend, shape, order, keepdim, dim): + model = ModuleWrapper( + function=torch.linalg.vector_norm, + kwargs={"ord": order, "keepdim": keepdim, "dim": dim}, + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + atol=1e-2, + ) + + +class TestHardswish(TorchBaseTest): + class HardswishModel(nn.Module): + def __init__(self, inplace=False): + super(TestHardswish.HardswishModel, self).__init__() + self.activation = nn.Hardswish(inplace=inplace) + + def forward(self, x): + return self.activation(x) + + def test_longer_range_input_element_values(self): + x = torch.tensor([-6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0]) + + model = TestHardswish.HardswishModel() + TorchBaseTest.run_compare_torch(x, model, input_as_shape=False) + + model = TestHardswish.HardswishModel(inplace=True) + TorchBaseTest.run_compare_torch(x, model, input_as_shape=False) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_additional_shapes_and_backends(self, compute_unit, backend, shape): + model = TestHardswish.HardswishModel() + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestBatchNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, num_features, eps, affine", + itertools.product( + compute_units, backends, [5, 3, 1], [0.1, 1e-05], [True, False] + ), + ) + def test_batchnorm(self, compute_unit, backend, num_features, eps, affine): + model = nn.BatchNorm2d(num_features, eps, affine=affine) + self.run_compare_torch( + (6, num_features, 5, 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, affine", + itertools.product(compute_units, backends, [True, False]), + ) + def test_batchnorm_2d_with_conv(self, compute_unit, backend, affine): + class CRNNBase(nn.Module): + def __init__(self, ch_in, ch_out, kernel_size=3): + super(CRNNBase, self).__init__() + self.conv = nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size) + self.norm = nn.BatchNorm2d(ch_out, affine=affine) + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + model = CRNNBase(ch_in=6, ch_out=16) + self.run_compare_torch( + (1, 6, 15, 30), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_features, eps, affine, dynamic_input", + itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + [5, 1], + [0.1, 1e-05], + [True, False], + ["None", "Batch", "Height", "Width", "Depth", "All"], + ), + ) + def test_batchnorm_3d( + self, compute_unit, backend, num_features, eps, affine, dynamic_input + ): + model = nn.BatchNorm3d(num_features, eps, affine=affine) + input_shape = (6, num_features, 2, 3, 4) + if dynamic_input == "None": + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + else: + if dynamic_input == "Batch": + converter_input_type = [ + TensorType(shape=(6, num_features, 2, 3, 4), dtype=np.float32) + ] + converter_input_type = [ + TensorType( + shape=(RangeDim(1, 10), num_features, 2, 3, 4), dtype=np.float32 + ) + ] + elif dynamic_input == "Height": + converter_input_type = [ + TensorType( + shape=(6, num_features, RangeDim(1, 10), 3, 4), dtype=np.float32 + ) + ] + elif dynamic_input == "Width": + converter_input_type = [ + TensorType( + shape=(6, num_features, 2, RangeDim(1, 10), 4), dtype=np.float32 + ) + ] + elif dynamic_input == "Depth": + converter_input_type = [ + TensorType( + shape=(6, num_features, 2, 3, RangeDim(1, 10)), dtype=np.float32 + ) + ] + elif dynamic_input == "All": + converter_input_type = [ + TensorType( + shape=( + RangeDim(1, 10), + num_features, + RangeDim(1, 10), + RangeDim(1, 10), + RangeDim(1, 10), + ), + dtype=np.float32, + ) + ] + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, num_features, eps, training", + itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + [3, 4, 5], + [5, 1], + [0.1, 1e-05], + [True, False], + ), + ) + def test_batchnorm_dynamic( + self, compute_unit, backend, rank, num_features, eps, training + ): + model = ModuleWrapper( + nn.functional.batch_norm, + { + "training": training, + "eps": eps, + }, + ) + input_shape = [6, num_features, 3, 4, 5] + input_shape = input_shape[:rank] + _input = torch.randn(*input_shape) + _mean = torch.randn(num_features) + _var = torch.randn(num_features) + + inputs = (_input, _mean, _var) + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, has_weight, has_bias, has_running_mean, has_running_var", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [True, False], + ), + ) + def test_batchnorm_dynamic_stress( + self, + compute_unit, + backend, + has_weight, + has_bias, + has_running_mean, + has_running_var, + ): + num_features = 5 + input_shape = (3, num_features, 2) + + weight = torch.randn(num_features) if has_weight else None + bias = torch.randn(num_features) if has_bias else None + running_mean = torch.randn(num_features) if has_running_mean else None + running_var = torch.randn(num_features) if has_running_var else None + + class Model(torch.nn.Module): + def forward(self, x): + res = torch.nn.functional.batch_norm( + input=x, + running_mean=running_mean, + running_var=running_var, + weight=weight, + bias=bias, + training=True, + momentum=0.0, + eps=1e-05, + ) + return res + + self.run_compare_torch( + input_shape, + Model(), + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, affine", + itertools.product(compute_units, backends, [True, False]), + ) + def test_batchnorm_1d_with_conv(self, compute_unit, backend, affine): + class CRNNBase(nn.Module): + def __init__(self, ch_in, ch_out, kernel_size=3): + super(CRNNBase, self).__init__() + self.conv = nn.Conv1d(ch_in, ch_out, kernel_size=kernel_size) + self.norm = nn.BatchNorm1d(ch_out, affine=affine) + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + model = CRNNBase(ch_in=6, ch_out=16) + self.run_compare_torch( + (1, 6, 15), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, eps, affine", + itertools.product( + compute_units, + backends, + [(1, 10), (4, 6), (10, 1)], + [0.1, 1e-05], + [True, False], + ), + ) + def test_batchnorm1d_rank2(self, compute_unit, backend, shape, eps, affine): + N, C = shape + batchnorm = nn.BatchNorm1d(C, eps=eps, affine=affine).eval() + self.run_compare_torch( + (N, C), + batchnorm, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, eps, affine", + itertools.product( + compute_units, + backends, + [(4, 8, 2), (1, 5, 3), (5, 10, 1), (6, 1, 4)], + [0.1, 1e-05], + [True, False], + ), + ) + def test_batchnorm1d_rank3(self, compute_unit, backend, shape, eps, affine): + N, C, L = shape + batchnorm = nn.BatchNorm1d(C, eps=eps, affine=affine).eval() + self.run_compare_torch( + (N, C, L), + batchnorm, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestInstanceNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, num_features, eps", + itertools.product(compute_units, backends, [5, 2, 1], [0.1, 1e-05]), + ) + def test_instancenorm(self, compute_unit, backend, num_features, eps): + model = nn.InstanceNorm2d(num_features, eps) + self.run_compare_torch( + (6, num_features, 5, 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_features", + itertools.product(compute_units, backends, [5, 2, 1]), + ) + def test_instancenorm_1d(self, compute_unit, backend, num_features): + model = nn.InstanceNorm1d(num_features) + self.run_compare_torch( + (6, num_features, 10), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestGroupNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, group_features, eps, affine", + itertools.product( + compute_units, backends, [(16, 32), (1, 1)], [0.1, 1e-05], [True, False] + ), + ) + def test_groupnorm(self, compute_unit, backend, group_features, eps, affine): + model = nn.GroupNorm( + group_features[0], group_features[1], eps=eps, affine=affine + ) + self.run_compare_torch( + (6, group_features[1], 5, 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, group_features, eps, affine", + itertools.product( + compute_units, backends, [(16, 32), (1, 1)], [0.1, 1e-05], [True, False] + ), + ) + def test_groupnorm_rank3_input( + self, compute_unit, backend, group_features, eps, affine + ): + model = nn.GroupNorm( + group_features[0], group_features[1], eps=eps, affine=affine + ) + self.run_compare_torch( + (6, group_features[1], 5), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, group_features, eps, affine", + itertools.product( + compute_units, backends, [(16, 32), (1, 1)], [0.1, 1e-05], [True, False] + ), + ) + def test_groupnorm_rank2_input( + self, compute_unit, backend, group_features, eps, affine + ): + model = nn.GroupNorm( + group_features[0], group_features[1], eps=eps, affine=affine + ) + self.run_compare_torch( + (4, group_features[1]), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLinear(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product( + compute_units, + backends, + [5], + [10], + [True, False], + ), + ) + def test_linear_rank1_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch( + (in_features,), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product(compute_units, backends, [10, 25], [3, 6], [True, False]), + ) + def test_linear_rank2_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch( + (1, in_features), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product(compute_units, backends, [10], [6], [True, False]), + ) + def test_linear_rank3_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch( + (1, 3, in_features), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, in_features, out_features, bias", + itertools.product(compute_units, backends, [10], [6], [True, False]), + ) + def test_linear_rank4_input( + self, compute_unit, backend, in_features, out_features, bias + ): + model = nn.Linear(in_features, out_features, bias=bias) + self.run_compare_torch((1, 5, 3, in_features), model, backend=backend) + + +class TestConv(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "stride", + "length", + "in_channels", + "out_channels", + "kernel_size", + "dilation", + "bias", + ] + ), + [ + (compute_unit, backend, padding, stride, *param) + for compute_unit, backend, padding, stride, param in itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + ["same", "valid", 0, 1], + [1, 2, 3], + [ + (5, 1, 1, 1, 1, True), + (3, 1, 1, 1, 3, False), + (4, 3, 3, 2, 1, True), + (7, 3, 3, 1, 1, False), + (5, 3, 3, 1, 1, True), + (3, 3, 3, 1, 1, False), + (3, 3, 3, 1, 3, True), + (7, 3, 3, 2, 3, False), + ], + ) + ], + ) + def test_convolution1d( + self, + compute_unit, + backend, + padding, + stride, + length, + in_channels, + out_channels, + kernel_size, + dilation, + bias, + groups=1, + ): + if padding == "same" and stride != 1: + # configuration not supported + return + model = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + self.run_compare_torch( + (1, in_channels, length), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "stride", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "dilation", + "bias", + ] + ), + [ + (compute_unit, backend, padding, stride, *param) + for compute_unit, backend, padding, stride, param in itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + ["same", "valid", 1, 0], + [1, 2, 3], + [ + (5, 3, 1, 1, 1, 1, True), + (3, 3, 1, 1, 1, 3, False), + (4, 3, 3, 3, 2, 1, True), + (7, 3, 3, 3, 1, 1, False), + (5, 5, 3, 3, 1, 1, True), + (3, 5, 3, 3, 1, 1, False), + (3, 5, 3, 3, 1, 3, True), + (7, 5, 3, 3, 2, 3, False), + ], + ) + ], + ) + def test_convolution2d( + self, + compute_unit, + backend, + padding, + stride, + height, + width, + in_channels, + out_channels, + kernel_size, + dilation, + bias, + groups=1, + ): + if padding == "same" and stride != 1: + return + model = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + self.run_compare_torch( + (1, in_channels, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "padding", + "stride", + "depth", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "dilation", + "bias", + ] + ), + [ + (compute_unit, backend, padding, stride, *param) + for compute_unit, backend, padding, stride, param in itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + ["same", "valid", 1, 0], + [1, 2, 3], + [ + (5, 3, 2, 1, 1, 1, 1, True), + (3, 3, 1, 1, 1, 1, 3, False), + (4, 3, 3, 3, 3, 2, 1, True), + (7, 3, 4, 3, 3, 1, 1, False), + (5, 5, 3, 3, 3, 1, 1, True), + (3, 5, 1, 3, 3, 1, 1, False), + (3, 5, 4, 3, 3, 1, 3, True), + (7, 5, 6, 3, 3, 2, 3, False), + ], + ) + ], + ) + def test_convolution3d( + self, + compute_unit, + backend, + padding, + stride, + depth, + height, + width, + in_channels, + out_channels, + kernel_size, + dilation, + bias, + groups=1, + ): + if padding == "same" and stride != 1: + return + model = nn.Conv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + self.run_compare_torch( + (1, in_channels, depth, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestDynamicConv(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 1, 1, 1, 2, 1), + (3, 1, 1, 1, 2, 3), + (4, 3, 3, 1, 2, 1), + (7, 3, 3, 1, 3, 1), + (5, 3, 3, 2, 2, 1), + (3, 3, 3, 1, 3, 1), + (3, 3, 3, 1, 3, 3), + (7, 3, 3, 3, 1, 3), + ], + ) + ], + ) + def test_convolution1d( + self, + compute_unit, + backend, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + ): + class DynamicConv(nn.Module): + def forward(self, input_data, weights): + return nn.functional.conv1d( + input_data, weights, stride=stride, padding=padding + ) + + model = DynamicConv() + input_shape = [ + (1, in_channels, width), + (out_channels, int(in_channels / groups), kernel_size), + ] + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 3, 1, 1, 1, 2, 0, 1), + (3, 3, 1, 1, 1, 2, 1, 3), + (4, 3, 3, 3, 1, 2, 0, 1), + (7, 3, 3, 3, 1, 3, 0, 1), + (5, 5, 3, 3, 2, 1, 0, 1), + (3, 5, 3, 3, 1, 3, 0, 1), + (3, 5, 3, 3, 1, 3, 1, 3), + (7, 5, 3, 3, 2, 3, 1, 3), + ], + ) + ], + ) + def test_convolution2d( + self, + compute_unit, + backend, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + groups=1, + ): + class DynamicConv(nn.Module): + def forward(self, input_data, weights): + return nn.functional.conv2d( + input_data, weights, stride=stride, padding=padding + ) + + model = DynamicConv() + + input_shape = [ + (1, in_channels, height, width), + (out_channels, int(in_channels / groups), kernel_size, kernel_size), + ] + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestConvTranspose(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (3, 1, 1, 1, 2, 0, 1), + (3, 1, 1, 1, 2, 1, 3), + (3, 3, 3, 1, 2, 0, 1), + (3, 3, 3, 1, 3, 0, 1), + (5, 3, 3, 1, 3, 0, 1), + (5, 3, 3, 1, 3, 0, 1), + (5, 3, 3, 1, 3, 1, 3), + (5, 3, 3, 1, 3, 1, 3), + ], + ) + ], + ) + def test_convolution_transpose1d( + self, + compute_unit, + backend, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + groups=1, + ): + model = nn.ConvTranspose1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + self.run_compare_torch( + (1, in_channels, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 5, 1, 1, 1, 2, 0, 1), + (5, 5, 1, 1, 1, 2, 1, 3), + (5, 5, 3, 3, 1, 2, 0, 1), + (5, 5, 3, 3, 1, 3, 0, 1), + (6, 5, 3, 3, 1, 3, 0, 1), + (6, 5, 3, 3, 1, 3, 0, 1), + (6, 5, 3, 3, 1, 3, 1, 3), + (6, 5, 3, 3, 1, 3, 1, 3), + ], + ) + ], + ) + def test_convolution_transpose2d( + self, + compute_unit, + backend, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + groups=1, + ): + model = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + self.run_compare_torch( + (1, in_channels, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dynamic_input", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_convolution_transpose2d_dynamic_input( + self, + compute_unit, + backend, + dynamic_input, + ): + in_channels = 5 + model = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=10, + kernel_size=3, + stride=2, + padding=1, + dilation=3, + ) + in_height = 256 + in_width = 512 + input_shape = (1, in_channels, in_height, in_width) + + if dynamic_input: + converter_input_type = [ + TensorType( + shape=(1, in_channels, RangeDim(256, -1), RangeDim(256, -1)), + dtype=np.float32, + ) + ] + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + else: + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + "output_padding", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (5, 5, 1, 1, 1, 2, 1, 1, 1), + (5, 5, 1, 1, 1, 2, 2, 3, 2), + (5, 5, 3, 3, 1, 2, 0, 1, 0), + (5, 5, 3, 3, 1, 3, 1, 1, 1), + (6, 5, 3, 3, 1, 3, 2, 1, 2), + (6, 5, 3, 3, 1, 3, 1, 1, 1), + (6, 5, 3, 3, 1, 3, 2, 3, 2), + (6, 5, 3, 3, 1, 3, 3, 3, 3), + ], + ) + ], + ) + def test_convolution_transpose2d_output_padding( + self, + compute_unit, + backend, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + output_padding, + groups=1, + ): + + # Output padding must be less than either stride or dilation + # Skip testing invalid combinations + if isinstance(output_padding, int): + if output_padding >= stride and output_padding >= dilation: + return + elif isinstance(output_padding, tuple): + for _output_padding in output_padding: + if _output_padding >= stride and _output_padding >= dilation: + return + + model = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + output_padding=output_padding, + ) + self.run_compare_torch((1, in_channels, height, width), model, backend=backend) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "depth", + "height", + "width", + "in_channels", + "out_channels", + "kernel_size", + "stride", + "padding", + "dilation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (3, 5, 5, 1, 1, 1, 2, 0, 1), + (3, 5, 5, 1, 1, 1, 2, 1, 3), + (3, 5, 5, 3, 3, 1, 2, 0, 1), + (3, 5, 5, 3, 3, 1, 1, 0, 2), + (4, 6, 5, 3, 3, 1, 3, 0, 1), + (4, 6, 5, 3, 3, 1, 3, 1, 2), + (4, 6, 5, 3, 3, 1, 3, 1, 3), + ], + ) + ] + + [ + pytest.param( + ct.ComputeUnit.CPU_ONLY, + "neualnetwork", + 5, + 5, + 1, + 1, + 3, + 4, + 1, + 1, + 2, + marks=pytest.mark.xfail, + ), + pytest.param( + ct.ComputeUnit.CPU_ONLY, + "neualnetwork", + 5, + 5, + 1, + 1, + 3, + 2, + 1, + 3, + 2, + marks=pytest.mark.xfail, + ), + ], + ) + def test_convolution_transpose3d( + self, + compute_unit, + backend, + depth, + height, + width, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + ): + model = nn.ConvTranspose3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ) + self.run_compare_torch( + (1, in_channels, depth, height, width), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +def _is_float_value(x, threshold=0.001): + return x - np.floor(x) > threshold + + +class TestUpsample(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_size, align_corners", + itertools.product( + compute_units, + backends, + [1, 3, 10, 190], + [True, False], + ), + ) + def test_upsample_linear1d_with_output_size( + self, compute_unit, backend, output_size, align_corners + ): + input_shape = (1, 3, 10) + output_size = 3 + model = ModuleWrapper( + nn.functional.interpolate, + { + "size": output_size, + "mode": "linear", + "align_corners": align_corners, + }, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scale, align_corners, recompute_scale_factor", + itertools.product( + compute_units, backends, [2, 0.5, 5.3], [True, False], [True, False] + ), + ) + def test_upsample_linear1d_with_scales( + self, compute_unit, backend, scale, align_corners, recompute_scale_factor + ): + Height = 8 + input_shape = (1, 3, Height) + output_h = Height * scale + is_h_float = _is_float_value(output_h) + + if is_h_float and not align_corners and not recompute_scale_factor: + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": scale, + "mode": "linear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales, align_corners, recompute_scale_factor", + itertools.product( + compute_units, backends, [2, 0.7, 3.6], [True, False], [True, False] + ), + ) + def test_upsample_linear1d_with_scales_dynamic( + self, compute_unit, backend, scales, align_corners, recompute_scale_factor + ): + + is_float = _is_float_value(scales) + input_shape = (1, 3, 22) + + if is_float and not align_corners and not recompute_scale_factor: + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": scales, + "mode": "linear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + converter_input_type = [ + TensorType(shape=(1, 3, RangeDim(default=22)), dtype=np.float32) + ] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork" and not is_float: + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, output_size", + itertools.product( + compute_units, + backends, + [10, 170], + ), + ) + def test_upsample_nearest1d_with_output_size( + self, compute_unit, backend, output_size + ): + input_shape = (1, 3, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"size": output_size, "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales", + itertools.product(compute_units, backends, [2, 3, 4.5]), + ) + def test_upsample_nearest1d_with_scales(self, compute_unit, backend, scales): + if backend[0] == "neuralnetwork": + if isinstance(scales, float): + return # Skip fractional scale factors tests for neuralnetwork + + input_shape = (1, 3, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"scale_factor": scales, "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales", + itertools.product(compute_units, backends, [2, 3]), + ) + def test_upsample_nearest1d_with_scales_dynamic( + self, compute_unit, backend, scales + ): + input_shape = (1, 3, 10) + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": scales, + "mode": "nearest", + "recompute_scale_factor": True, + }, + ) + converter_input_type = [TensorType(shape=(1, 3, RangeDim()), dtype=np.float32)] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork": + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, output_size, align_corners", + itertools.product( + compute_units, + backends, + [ + (10, 10), + # PyTorch has a bug for the following parameter: + # (1, 1), + # See: https://github.com/pytorch/pytorch/issues/71188 + (2, 3), + (190, 170), + ], + [True, False], + ), + ) + def test_upsample_bilinear2d_with_output_size( + self, compute_unit, backend, output_size, align_corners + ): + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + { + "size": output_size, + "mode": "bilinear", + "align_corners": align_corners, + }, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w, align_corners, recompute_scale_factor", + itertools.product( + compute_units, + backends, + [2, 0.5, 4.1], + [3, 0.5, 5.3], + [True, False], + [True, False], + ), + ) + def test_upsample_bilinear2d_with_scales( + self, + compute_unit, + backend, + scales_h, + scales_w, + align_corners, + recompute_scale_factor, + ): + + Height = 8 + Width = 22 + input_shape = (1, 3, Height, Width) + output_h = Height * scales_h + output_w = Width * scales_w + is_h_float = _is_float_value(output_h) + is_w_float = _is_float_value(output_w) + + if ( + (is_h_float or is_w_float) + and not align_corners + and not recompute_scale_factor + ): + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": (scales_h, scales_w), + "mode": "bilinear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, output_size", + itertools.product( + compute_units, + backends, + [(10, 10), (190, 170)], + ), + ) + def test_upsample_nearest2d_with_output_size( + self, compute_unit, backend, output_size + ): + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"size": output_size, "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w", + itertools.product(compute_units, backends, [2, 3, 4.5], [4, 5, 5.5]), + ) + def test_upsample_nearest2d_with_scales( + self, compute_unit, backend, scales_h, scales_w + ): + if backend[0] == "neuralnetwork": + if isinstance(scales_h, float) or isinstance(scales_w, float): + return # Skip fractional scale factors tests for neuralnetwork + + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + {"scale_factor": (scales_h, scales_w), "mode": "nearest"}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w", + itertools.product(compute_units, backends, [2, 3], [4, 5]), + ) + def test_upsample_nearest2d_with_scales_dynamic( + self, compute_unit, backend, scales_h, scales_w + ): + input_shape = (1, 3, 10, 10) + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": (scales_h, scales_w), + "mode": "nearest", + "recompute_scale_factor": True, + }, + ) + converter_input_type = [ + TensorType(shape=(1, 3, RangeDim(), RangeDim()), dtype=np.float32) + ] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork": + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, scales_h, scales_w, align_corners, recompute_scale_factor", + itertools.product( + compute_units, + backends, + [2, 3.6], + [4, 0.7], + [True, False], + [True, False], + ), + ) + def test_upsample_bilinear2d_with_scales_dynamic( + self, + compute_unit, + backend, + scales_h, + scales_w, + align_corners, + recompute_scale_factor, + ): + + is_h_float = _is_float_value(scales_h) + is_w_float = _is_float_value(scales_w) + input_shape = (1, 3, 9, 22) + + if ( + (is_h_float or is_w_float) + and not align_corners + and not recompute_scale_factor + ): + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + model = ModuleWrapper( + nn.functional.interpolate, + { + "scale_factor": (scales_h, scales_w), + "mode": "bilinear", + "align_corners": align_corners, + "recompute_scale_factor": recompute_scale_factor, + }, + ) + converter_input_type = [ + TensorType( + shape=(1, 3, RangeDim(default=9), RangeDim(default=22)), + dtype=np.float32, + ) + ] + mlmodel = self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + )[1] + + # also check if the scale factor are integers + if backend[0] == "neuralnetwork" and not is_h_float and not is_w_float: + for layer in mlmodel._spec.neuralNetwork.layers: + if layer.WhichOneof("layer") == "upsample": + assert len(layer.upsample.fractionalScalingFactor) == 0 + + +class TestEmpty(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_empty_like(self, compute_unit, backend, shape): + class TestModel(nn.Module): + def forward(self, x): + y = torch.empty_like(x) + # Value of y is Nondeterministic, so return length + return torch.Tensor([len(y)]) + + self.run_compare_torch( + shape, TestModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + ), + ) + def test_new_empty(self, compute_unit, backend, shape): + class TestModel(nn.Module): + def forward(self, _): + tensor = torch.ones(()) + y = tensor.new_empty(shape) + # Value of y is Nondeterministic, so return length + return torch.Tensor([len(y)]) + + self.run_compare_torch( + shape, + TestModel(), + backend=backend, + compute_unit=compute_unit, + ) + + +class TestAvgPool(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_shape", + "kernel_size", + "stride", + "padding", + "ceil_mode", + "include_pad", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + ((1, 3, 5), 1, 1, 0, True, True), + ((1, 3, 5), 3, 1, 0, False, True), + ((1, 3, 5), 1, 2, 1, False, False), + ((1, 3, 5), 3, 2, 1, False, True), + ((1, 3, 5), 1, 2, 0, False, True), + ((1, 3, 10), 1, 1, 1, False, False), + ((1, 3, 10), 3, 1, 0, False, False), + ((1, 3, 10), 1, 2, 1, True, True), + ((1, 3, 10), 3, 2, 0, True, False), + ((1, 3, 10), 1, 1, 1, True, True), + ], + ) + ], + ) + def test_avg_pool1d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + include_pad, + ): + if padding > kernel_size / 2: + return + + model = nn.AvgPool1d( + kernel_size, + stride, + padding, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_shape", + "kernel_size", + "stride", + "padding", + "ceil_mode", + "include_pad", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + ((1, 3, 5, 5), 1, 1, 0, True, True), + ((1, 3, 5, 5), 3, 1, 0, False, True), + ((1, 3, 5, 5), 1, 2, 1, False, False), + ((1, 3, 5, 5), 3, 2, 1, False, True), + ((1, 3, 5, 5), 1, 2, 0, False, True), + ((1, 3, 10, 10), 1, 1, 1, False, False), + ((1, 3, 10, 10), 3, 1, 0, False, False), + ((1, 3, 10, 10), 1, 2, 1, True, True), + ((1, 3, 10, 10), 3, 2, 0, True, False), + ((1, 3, 10, 10), 1, 1, 1, True, True), + ], + ) + ], + ) + def test_avg_pool2d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + include_pad, + ): + if padding > kernel_size / 2: + return + + model = nn.AvgPool2d( + kernel_size, + stride, + padding, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_shape", + "kernel_size", + "stride", + "padding", + "ceil_mode", + "include_pad", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + ((1, 3, 11, 5, 5), 1, 1, 0, True, True), + ((1, 3, 11, 5, 5), 3, 1, 0, False, True), + ((1, 3, 11, 5, 5), 1, 2, 1, False, False), + ((1, 3, 11, 5, 5), 3, 2, 1, False, True), + ((1, 3, 11, 5, 5), 1, 2, 0, False, True), + ((1, 3, 6, 10, 10), 1, 1, 1, False, False), + ((1, 3, 6, 10, 10), 3, 1, 0, False, False), + ((1, 3, 6, 10, 10), 1, 2, 1, True, True), + ((1, 3, 6, 10, 10), 3, 2, 0, True, False), + ((1, 3, 6, 10, 10), 1, 1, 1, True, True), + ], + ) + ], + ) + def test_avg_pool3d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + include_pad, + ): + if padding > kernel_size / 2: + return + + if include_pad and ceil_mode and stride > 1: + # skip: MIL/CoreML does not support this configuration + pytest.xfail( + "rdar://73723194 (Support 3D Avg pooling with ceil_mode=True and include_pad = True, in MIL)" + ) + model = nn.AvgPool3d( + kernel_size, + stride, + padding, + ceil_mode=ceil_mode, + count_include_pad=include_pad, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestAdaptiveMaxPool(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_size, magnification, delta, depth, n", + itertools.product( + compute_units, + backends, + [(1, 1), (3, 2)], + [1, 2, 7], + [0, 11], + [1, 2, 3], + [1, 2], + ), + ) + def test_adaptive_max_pool2d( + self, compute_unit, backend, output_size, magnification, delta, depth, n + ): + # input_size = output_size * magnification + delta + input_size = ( + delta + magnification * output_size[0], + delta + magnification * output_size[1], + ) + in_shape = (n, depth) + input_size + model = nn.AdaptiveMaxPool2d(output_size) + self.run_compare_torch( + in_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestAdaptiveAvgPool(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_size, magnification, delta, depth, n", + itertools.product( + compute_units, + backends, + [(1, 1), (3, 2)], + [1, 2, 7], + [0, 11], + [1, 2, 3], + [1, 2], + ), + ) + def test_adaptive_avg_pool2d( + self, compute_unit, backend, output_size, magnification, delta, depth, n + ): + # input_size = output_size * magnification + delta + input_size = ( + delta + magnification * output_size[0], + delta + magnification * output_size[1], + ) + in_shape = (n, depth) + input_size + model = nn.AdaptiveAvgPool2d(output_size) + self.run_compare_torch( + in_shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestMaxPool(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, stride, padding, ceil_mode", + itertools.product( + compute_units, + backends, + [(1, 3, 15), (1, 1, 7)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + ), + ) + def test_max_pool1d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + ): + if padding > kernel_size / 2: + return + if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2: + if input_shape[-1] % 2 == 0: + # TODO: is this a valid case? + # in this case, torch adds "-inf" values at the border, post max pool operation + return + + model = nn.MaxPool1d( + kernel_size, + stride, + padding, + dilation=1, + return_indices=False, + ceil_mode=ceil_mode, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, stride, padding, ceil_mode", + itertools.product( + compute_units, + backends, + [(1, 3, 15, 15), (1, 1, 7, 7)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + ), + ) + def test_max_pool2d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + ): + if padding > kernel_size / 2: + return + if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2: + for r in range(2, 4): + if input_shape[r] % 2 == 0: + # TODO: is this a valid case? + # in this case, torch adds "-inf" values at the border, post max pool operation + return + + model = nn.MaxPool2d( + kernel_size, + stride, + padding, + dilation=1, + return_indices=False, + ceil_mode=ceil_mode, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, stride, padding, ceil_mode", + itertools.product( + compute_units, + backends, + [(1, 3, 11, 3, 11), (1, 1, 7, 4, 7)], + [1, 3], + [1, 2], + [0, 1], + [True, False], + ), + ) + def test_max_pool3d( + self, + compute_unit, + backend, + input_shape, + kernel_size, + stride, + padding, + ceil_mode, + ): + if padding > kernel_size / 2: + return + if ceil_mode > 0 and padding == 0 and kernel_size == 1 and stride == 2: + for r in range(2, 5): + if input_shape[r] % 2 == 0: + # TODO: is this a valid case? + # in this case, torch adds "-inf" values at the border, post max pool operation + return + + model = nn.MaxPool3d( + kernel_size, + stride, + padding, + dilation=1, + return_indices=False, + ceil_mode=ceil_mode, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestMaximumMinimum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shapes, mode", + itertools.product( + compute_units, + backends, + [ + [(2, 5, 7, 3), (2, 5, 7, 3)], + [(3, 2, 9), (3, 2, 9)], + [(1, 2, 3), (1,)], + [(1,), (2, 5, 6, 7)], + [(1, 2, 1), (3, 4, 2, 5)], + ], + ["minimum", "maximum"], + ), + ) + def test_minimum_maximum(self, compute_unit, backend, input_shapes, mode): + class TestModel(torch.nn.Module): + def forward(self, x, y): + if mode == "minimum": + return torch.minimum(x, y) + elif mode == "maximum": + return torch.maximum(x, y) + else: + raise ValueError("Unsupported mode: {mode}".format(mode=mode)) + + model = TestModel() + self.run_compare_torch( + input_shapes, model, backend=backend, compute_unit=compute_unit + ) + +class TestAMaxAMin(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shapes, mode, reduce_dim, keepdim", + itertools.product( + compute_units, + backends, + [ + [(2, 5, 7, 3)], + [(3, 2, 9)], + [(1,)], + ], + ["minimum", "maximum"], + [0, 1, 2, 3, [0, 1], [0, 1, 2], [0, 1, 2, 3]], + [True, False], + ), + ) + def test_minimum_maximum(self, compute_unit, backend, input_shapes, mode, reduce_dim, keepdim): + class TestModel(torch.nn.Module): + def forward(self, input): + if type(reduce_dim) == int: + reduce_dim_clamped = min(input.dim() - 1, reduce_dim) + else: + reduce_dim_clamped = reduce_dim[:input.dim()] + if mode == "minimum": + return torch.amin(input, reduce_dim_clamped, keepdim) + elif mode == "maximum": + return torch.amax(input, reduce_dim_clamped, keepdim) + else: + raise ValueError("Unsupported mode: {mode}".format(mode=mode)) + + model = TestModel() + self.run_compare_torch( + input_shapes, model, backend=backend, compute_unit=compute_unit + ) + + +class TestPoolSymbolicInput(TorchBaseTest): + def test_max_pool(self): + model = nn.MaxPool2d( + kernel_size=1, + stride=2, + padding=0, + dilation=1, + ceil_mode=True, + ) + input_shape = (1, 1, 11, 11) + converter_input_type = [ + TensorType(shape=(1, 1, RangeDim(), RangeDim()), dtype=np.float32) + ] + self.run_compare_torch( + input_shape, + model, + backend=backends[0], + converter_input_type=converter_input_type, + ) + + def test_avg_pool(self): + model = nn.AvgPool2d( + kernel_size=2, + stride=2, + padding=1, + count_include_pad=True, + ceil_mode=True, + ) + input_shape = (1, 2, 15, 15) + converter_input_type = [ + TensorType(shape=(1, 2, RangeDim(), RangeDim()), dtype=np.float32) + ] + self.run_compare_torch( + input_shape, + model, + backend=backends[0], + converter_input_type=converter_input_type, + ) + + +class TestLSTM(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_size", + "hidden_size", + "num_layers", + "bias", + "batch_first", + "dropout", + "bidirectional", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (1, 1, 1, True, True, 0.3, True), + (1, 1, 1, False, True, 0.3, False), + (1, 1, 1, False, True, 0.3, True), + (3, 1, 5, True, False, 0.3, False), + (3, 1, 5, True, True, 0.3, True), + (3, 7, 5, True, False, 0.3, False), + (3, 7, 5, False, True, 0.3, True), + (3, 7, 5, False, True, 0.3, False), + ], + ) + ], + ) + def test_lstm( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + dropout, + bidirectional, + ): + model = nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=bidirectional, + ) + SEQUENCE_LENGTH = 3 + BATCH_SIZE = 2 + model.eval() + + num_directions = int(bidirectional) + 1 + + if batch_first: + _input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + + inputs = (_input, (h0, c0)) + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + ) + + +class TestRNN(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_size", + "hidden_size", + "num_layers", + "bias", + "batch_first", + "dropout", + "activation", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (1, 1, 1, True, True, 0.3, "tanh"), + (1, 1, 1, False, True, 0.3, "relu"), + (1, 1, 1, False, True, 0.3, "tanh"), + (3, 1, 5, True, False, 0.3, "relu"), + (3, 1, 5, True, True, 0.3, "tanh"), + (3, 7, 5, True, False, 0.3, "relu"), + (3, 7, 5, False, True, 0.3, "relu"), + (3, 7, 5, False, True, 0.3, "tanh"), + ], + ) + ], + ) + def test_rnn( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + dropout, + activation, + ): + SEQUENCE_LENGTH = 10 + BATCH_SIZE = 3 + model = nn.RNN( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + bias=bias, + batch_first=batch_first, + dropout=dropout, + nonlinearity=activation, + bidirectional=False, # bi-directional simple RNN not supported + ) + model.eval() + num_directions = 1 + + if batch_first: + _input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + inputs = (_input, h0) + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestGRU(TorchBaseTest): + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_size", + "hidden_size", + "num_layers", + "bias", + "batch_first", + "sequence_length", + "bidirectional", + ] + ), + [ + (compute_unit, backend, *param) + for compute_unit, backend, param in itertools.product( + compute_units, + backends, + [ + (1, 1, 1, True, True, 10, True), + (1, 1, 1, False, True, 10, True), + (1, 1, 1, False, True, 1, False), + (3, 1, 5, True, False, 10, False), + (3, 1, 5, True, True, 10, True), + (3, 7, 5, True, True, 10, False), + (3, 7, 5, False, True, 10, True), + (3, 7, 5, False, True, 1, True), + ], + ) + ], + ) + def test_gru( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + sequence_length, + bidirectional, + ): + DROPOUT = 0.3 + BATCH_SIZE = 3 + model = nn.GRU( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + bias=bias, + batch_first=batch_first, + dropout=DROPOUT, + bidirectional=bidirectional, + ) + model.eval() + num_directions = int(bidirectional) + 1 + + if batch_first: + _input = torch.randn(BATCH_SIZE, sequence_length, input_size) + else: + _input = torch.randn(sequence_length, BATCH_SIZE, input_size) + + h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size) + + inputs = (_input, h0) + expected_results = model(*inputs) + + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLSTMWithPackedSequence(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, pack_batch_first, pad_batch_first, LSTM_batch_first, pad_value", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [-1, 0], + ), + ) + def test_lstm( + self, + compute_unit, + backend, + pack_batch_first, + pad_batch_first, + LSTM_batch_first, + pad_value, + ): + from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + + input_size = 4 + hidden_size = 6 + num_layers = 1 + + class Encoder(torch.nn.Module): + def __init__(self): + super().__init__() + self.lstm = torch.nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=LSTM_batch_first, + bidirectional=False, + dropout=0.0, + ) + + def forward(self, batch_in, seq_lengths): + packed_input = pack_padded_sequence( + batch_in, seq_lengths, batch_first=pack_batch_first + ) + output_packed, (hidden, _) = self.lstm(packed_input) + output, _ = pad_packed_sequence( + output_packed, padding_value=pad_value, batch_first=pad_batch_first + ) + return output + + SEQUENCE_LENGTH = 10 + BATCH_SIZE = 3 + model = Encoder() + model.eval() + + if pack_batch_first: + _input = torch.randn(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + seq_lengths = torch.tensor([10, 5, 1], dtype=torch.int32) + + inputs = (_input, seq_lengths) + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +# Workaround for GitHub Issue #824 +# i.e. the return h_n/c_n for a converted BLSTM are mangled. +# Therefore, just look at output 'y' (for now) which is correct. +class StripCellAndHidden(nn.Module): + def __init__(self, flagReturnTuple_): + super(StripCellAndHidden, self).__init__() + self.flagReturnTuple = flagReturnTuple_ + + def forward(self, x): + # Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:" + # Pass tensor when we need input for LSTM #2 as part of nn.Sequential() + return tuple(x[0]) if self.flagReturnTuple else x[0] + + +# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True +class TestStackedBLSTM(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional", + itertools.product( + compute_units, + backends, + [7], + [5], + [2], + [True, False], + [True, False], + [0.3], + [True], + ), + ) + def test_lstm( + self, + compute_unit, + backend, + input_size, + hidden_size, + num_layers, + bias, + batch_first, + dropout, + bidirectional, + ): + model = nn.Sequential( + nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=1, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=True, + ), + StripCellAndHidden(False), + nn.LSTM( + input_size=2 * hidden_size, + hidden_size=hidden_size, + num_layers=1, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=True, + ), + StripCellAndHidden(True), + ) + + SEQUENCE_LENGTH = 3 + BATCH_SIZE = 2 + + # (seq_len, batch, input_size) + if batch_first: + _input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size) + else: + _input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size) + + # Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824 + expected_results = model(_input) + + self.run_compare_torch( + _input, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestConcat(TorchBaseTest): + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_cat_basic(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x): + x = torch.cat((x, x), axis=1) + return x + + model = TestNet() + self.run_compare_torch( + (1, 2, 3), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_cat_input_types_promotion(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.cat((x, y), axis=1) + + input_data_x = torch.randint(low=0, high=10, size=(2, 3), dtype=torch.int32) + input_data_y = torch.rand(2, 3) + self.run_compare_torch( + [input_data_x, input_data_y], + TestNet(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + # This tests an edge case where the list of tensors to concatenate only + # has one item. NN throws an error for this case, hence why we have to + # run through the full conversion process to test it. + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_cat_single_input(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x): + x = torch.cat((x,), axis=1) + return x + + model = TestNet() + self.run_compare_torch( + (1, 3, 16, 16), + model, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_cat_const_fold(self, compute_unit, backend): + class TestNet(nn.Module): + def forward(self, x): + x = torch.tensor([[[1, 2], [2, 3], [3, 4]]]) + return torch.cat((x, x), axis=1) + + model = TestNet() + mlmodel = self.run_compare_torch( + (1, 2, 3), + model, + backend=backend, + compute_unit=compute_unit, + ) + prog = mlmodel[1]._mil_program + # The `listconstruct` is folded into a single const. + assert len(prog.find_ops(op_type="const")) == 1 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that the input with shape [1, 3, 2] const is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and var.op.op_type == "const" and var.rank == 3 + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The `listconstruct` is not folded so there are 3 const ops. + assert len(prog.find_ops(op_type="const")) == 3 + + @pytest.mark.parametrize("compute_unit, backend", itertools.product(compute_units, backends)) + def test_concat_alias(self, compute_unit, backend): + class Outer(torch.nn.Module): + def __init__(self, net): + super(Outer, self).__init__() + self.net = net + + def forward(self, x): + x = self.net(x) + return x + + class TestNet(nn.Module): + def forward(self, x): + x = torch.concat((x, x), axis=1) + return x + + # test passes without adding alias if `Outer` is not used + model = Outer(TestNet()) + self.run_compare_torch( + (1, 3, 16, 16), + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestBitwiseNot(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int", "bool"], + ), + ) + def test_bitwise_not(self, compute_unit, backend, input_type): + class TestNet(nn.Module): + def forward(self, x): + return torch.bitwise_not(x) + + model = TestNet() + if input_type == "int": + torch_in = torch.tensor([1, 2, 3, -5, 0], dtype=torch.int32) + elif input_type == "bool": + torch_in = torch.tensor([True, False, True, False]) + self.run_compare_torch( + torch_in, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestFull(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_full_dynamic(self, compute_unit, backend, rank): + class FullDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.full(x.shape, fill_value=3.14) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = FullDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_val", + itertools.product( + compute_units, + backends, + [ + [(1,), 0.0], + [(2, 3), 3.1415], + [(1, 1, 2, 5, 1), -2.0], + ], + ), + ) + def test_full_static(self, compute_unit, backend, shape_val): + shape, val = shape_val + + class FullStaticModel(nn.Module): + def forward(self, x): + return torch.full(x.shape, fill_value=val) + + self.run_compare_torch( + shape, FullStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_val", + itertools.product( + compute_units, + [ + ["neuralnetwork", "fp32", ct.target.iOS14], + ["mlprogram", "fp16", ct.target.iOS15], + ["mlprogram", "fp32", ct.target.iOS15], + ["mlprogram", "fp16", ct.target.iOS16], + ["mlprogram", "fp32", ct.target.iOS16], + ], + [ + [(1,), 0.0], + [(2, 3), 3.1415], + [(1, 1, 2, 5, 1), -2.0], + ], + ), + ) + def test_full_like(self, compute_unit, backend, shape_val): + if _macos_version() < (13, 0) and backend[2] == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + shape, val = shape_val + + class FullLikeModel(nn.Module): + def forward(self, x): + return torch.full_like(x, fill_value=val) + + self.run_compare_torch( + shape, + FullLikeModel().eval(), + backend=backend[:2], + compute_unit=compute_unit, + minimum_deployment_target=backend[2], + ) + + +class TestDim(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1,), + (2, 3), + (1, 1, 2, 5, 1), + ], + ), + ) + def test_dim(self, compute_unit, backend, shape): + class DimModel(nn.Module): + def forward(self, x): + return torch.tensor([x.dim()]) + + self.run_compare_torch( + shape, DimModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestNewZeros(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_new_zeros_dynamic(self, compute_unit, backend, rank): + class ZerosDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return x.new_zeros(x.shape) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = ZerosDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1,), + (2, 3), + (1, 1, 2, 5, 1), + ], + ), + ) + def test_new_zeros_static(self, compute_unit, backend, shape): + class ZerosStaticModel(nn.Module): + def __init__(self): + super(ZerosStaticModel, self).__init__() + + def forward(self, x): + return x.new_zeros(x.shape) + + self.run_compare_torch( + shape, ZerosStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestNewFull(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_new_full_dynamic(self, compute_unit, backend, rank): + class FullDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return x.new_full(x.shape, fill_value=3.14) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = FullDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_val", + itertools.product( + compute_units, + backends, + [ + [(1,), 0.0], + [(2, 3), 3.1415], + [(1, 1, 2, 5, 1), -2.0], + ], + ), + ) + def test_new_full_static(self, compute_unit, backend, shape_val): + shape, val = shape_val + + class FullStaticModel(nn.Module): + def forward(self, x): + return x.new_full(x.shape, fill_value=val) + + self.run_compare_torch( + shape, FullStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestEye(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, eye_type", + itertools.product( + compute_units, + backends, + ["single", "double"], + ), + ) + def test(self, compute_unit, backend, eye_type): + class Model(nn.Module): + def forward(self, x): + if eye_type == "single": + eye = torch.eye(3) + return x + eye + elif eye_type == "double": + eye = torch.eye(2, 3) + return x + eye + + input_shape = (3, 3) if eye_type == "single" else (2, 3) + model = Model().eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestOnes(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_ones_dynamic(self, compute_unit, backend, rank): + class OnesDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.ones(x.shape) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = OnesDynamicModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [(1,), (2, 3), (1, 1, 2, 5, 1)], + ), + ) + def test_ones_static(self, compute_unit, backend, shape): + class OnesStaticModel(nn.Module): + def forward(self, x): + return torch.ones(x.shape) + + self.run_compare_torch( + shape, OnesStaticModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestRandint(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, low, high", + itertools.product( + compute_units, + backends, + [(1,), (2, 3)], + [-1, 2], + [3, 5], + ), + ) + def test_randint(self, compute_unit, backend, shape, low, high): + class TestModel(nn.Module): + def forward(self, x): + y = torch.randint(low, high, x.shape) + return torch.Tensor([len(y)]) + + self.run_compare_torch( + shape, TestModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestTypeAs(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, type", + itertools.product(compute_units, backends, ["int32", "float32", "bool"]), + ) + def test_type_as(self, compute_unit, backend, type): + class TestNet(nn.Module): + def forward(self, x, y): + return x.type_as(y) + + model = TestNet() + type_map = { + "int32": torch.int32, + "float16": torch.float16, + "float32": torch.float32, + "bool": torch.bool, + } + input = [ + torch.Tensor([0, 1, 2, 3]).to(torch.float32), + torch.Tensor([2, 3]).to(type_map[type]), + ] + self.run_compare_torch( + input, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestReduction(TorchBaseTest): + class TestModel(nn.Module): + def __init__(self, mode, dim=None, keepdim=None): + super().__init__() + args = {"dim": dim, "keepdim": keepdim} + self.op_args = {k: v for k, v in args.items() if v is not None} + + if mode == "min": + self.op = torch.min + elif mode == "max": + self.op = torch.max + else: + raise ValueError("Unsupported mode: {mode}".format(mode=mode)) + + def forward(self, x, y=None): + if y is not None: + return self.op(x, y) + return self.op(x, **self.op_args) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, dim, keepdim, mode", + itertools.product( + compute_units, + backends, + [(2, 2), (1, 1)], + [0, 1, None], + [True, False, None], + ["min", "max"], + ), + ) + def test_min_max(self, compute_unit, backend, input_shape, dim, keepdim, mode): + if dim is None and keepdim is not None: + pytest.skip("invalid torch.min configuration") + + input_data = torch.rand(input_shape) + model = self.TestModel(mode, dim=dim, keepdim=keepdim) + + self.run_compare_torch( + input_data, + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, mode", + itertools.product(compute_units, backends, [(2, 2), (1, 1)], ["min", "max"]), + ) + def test_min_max_with_no_arguments(self, compute_unit, backend, input_shape, mode): + self.run_compare_torch( + input_shape, + self.TestModel(mode), + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, dim, mode", + itertools.product( + compute_units, backends, [(2, 2), (1, 1)], [0, 1], ["min", "max"] + ), + ) + def test_min_max_no_keepdim(self, compute_unit, backend, input_shape, dim, mode): + input_data = torch.rand(input_shape) + model = self.TestModel(mode, dim=dim) + expected_results = model(input_data) + + self.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, mode", + itertools.product(compute_units, backends, [(2, 2), (1, 1)], ["min", "max"]), + ) + def test_min_max_two_tensors(self, compute_unit, backend, input_shape, mode): + model = self.TestModel(mode) + self.run_compare_torch( + [input_shape] * 2, model, backend=backend, compute_unit=compute_unit + ) + + +class TestLayerNorm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, eps", + itertools.product( + [ct.ComputeUnit.CPU_ONLY], + backends, + [(1, 3, 15, 15), (1, 1, 1, 1)], + [1e-5, 1e-7], + ), + ) + def test_layer_norm(self, compute_unit, backend, input_shape, eps): + model = nn.LayerNorm(input_shape, eps=eps) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestPixelShuffle(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, batch_size, CHW, r", + itertools.product( + compute_units, backends, [1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4] + ), + ) + def test_pixel_shuffle(self, compute_unit, backend, batch_size, CHW, r): + C, H, W = CHW + input_shape = (batch_size, C * r * r, H, W) + model = nn.PixelShuffle(upscale_factor=r) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +@pytest.mark.skipif( + _macos_version() < (13, 0), reason="New functionality in macOS13/iOS16" +) +class TestPixelUnshuffle(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, batch_size, CHW, r", + itertools.product( + compute_units, backends, [1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4] + ), + ) + def test_pixel_shuffle(self, compute_unit, backend, batch_size, CHW, r): + if backend[0] == "neuralnetwork": + pytest.skip("pixel_unshuffle only supported in mlprogram backend.") + + C, H, W = CHW + input_shape = (batch_size, C, H * r, W * r) + model = nn.PixelUnshuffle(downscale_factor=r) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + minimum_deployment_target=ct.target.iOS16, + ) + + +class TestExpand(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (2, 2)], + [(3, 1), (-1, 4)], + [(1, 3, 4, 4), (3, 3, 4, 4)], + [(4,), (3, 4)], + [(3, 2), (1, 2, -1, 2)], + ], + ), + ) + def test_expand(self, compute_unit, backend, shapes): + input_shape, output_shape = shapes + + class TestModel(torch.nn.Module): + def forward(self, x): + return x.expand(*output_shape) + + model = TestModel() + + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape0(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[1], x.shape[1]) + + self.run_compare_torch( + torch.arange(20).reshape((1, 20)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[1, ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape1(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[0], 1, x.shape[-1], x.shape[-1]) + + self.run_compare_torch( + torch.arange(20).reshape((1, 20)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[ct.RangeDim(), ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape2(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[-1], 1, x.shape[-1], x.shape[-1]) + + self.run_compare_torch( + torch.arange(20).reshape((1, 20)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[1, ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape3(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return x.expand(x.shape[0], 10) + + self.run_compare_torch( + torch.arange(20).reshape((20, 1)), + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[ct.RangeDim(), ct.RangeDim()])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_expand_dynamic_shape_from_another_input(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x, y): + return x.expand(int(y[0]), int(y[1])) + + self.run_compare_torch( + [torch.arange(20).reshape((20, 1)), torch.Tensor([20, 20])], + TestModel(), + input_as_shape=False, + converter_input_type=[TensorType(shape=[ct.RangeDim(), 1])], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (2, 2)], + [(3, 1), (3, 4)], + [(1, 3, 4, 4), (3, 3, 4, 4)], + [(4,), (1, 3, 4)], + ], + ), + ) + def test_expand_as(self, compute_unit, backend, input_shapes): + class TestModel(torch.nn.Module): + def forward(self, x, y): + return x.expand_as(y) + + model = TestModel() + + self.run_compare_torch( + input_shapes, model, backend=backend, compute_unit=compute_unit + ) + + +class TestExpandDims(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (rank, axis) + for rank in range(1, 5) + for axis in range(-rank - 1, rank + 1) + ], + ), + ) + def test_unsqueeze(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + input_shape = tuple(np.random.randint(low=2, high=10, size=rank)) + model = ModuleWrapper(function=torch.unsqueeze, kwargs={"dim": axis}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestLinspace(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, start_end, steps", + itertools.product( + compute_units, + backends, + [(-0.1, -0.7), (1, 10)], + [1, 3], + ), + ) + def test_linspace_static(self, compute_unit, backend, start_end, steps): + input_shape = tuple([steps]) + start, end = start_end + + class Model(nn.Module): + def forward(self, x): + return torch.linspace(start, end, steps) + + model = Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_linspace_static_large(self, compute_unit, backend): + input_shape = tuple([1]) + + class Model(nn.Module): + def forward(self, x): + return torch.linspace(1, 2_000_000, 2_000_000) + + model = Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, start_end, steps", + itertools.product( + compute_units, + backends, + [(-0.1, -0.7), (1, 10)], + [1, 2, 100], + ), + ) + def test_linspace_dynamic(self, compute_unit, backend, start_end, steps): + start, end = start_end + + class Model(nn.Module): + def forward(self, x): + return torch.linspace(x[0], x[1], steps) + + model = Model() + inputs = [torch.Tensor([start, end])] + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_linspace_static_not_fold(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x): + return torch.linspace(0, 1, 100) + + model = Model() + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The linspace op is folded to const, so there is no range_1d op. + assert len(prog.find_ops(op_type="const")) == 1 + assert len(prog.find_ops(op_type="range_1d")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that the first param to linspace is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op + and var.op.op_type == "const" + and var.rank == 0 + and var.val == 0 + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The linspace op is not folded to const, but translated to range_1d instead. + assert len(prog.find_ops(op_type="range_1d")) == 1 + + +class TestArange(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, start_end_step", + itertools.product( + compute_units, + backends, + [ + (-0.1, -0.7, -0.07), + (3, 10, 0.3), + (1, 10, 100), + (1, 300000, 1), + (1, 10, 1e-6), + ], + ), + ) + def test_arange_static(self, compute_unit, backend, start_end_step): + if start_end_step == (1, 10, 1e-6): + pytest.xfail( + "rdar://88998831 (range_1d has numerical issue when the step is small)" + ) + input_shape = tuple( + [ + 1, + ] + ) + start, end, step = start_end_step + + class Model(nn.Module): + def forward(self, x): + return torch.arange(start, end, step) + + model = Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, start_end_step", + itertools.product( + compute_units, + backends, + [ + (-0.1, -0.7, -0.07), + (3, 10, 0.3), + (1, 10, 100), + (1, 300000, 1), + ], + ), + ) + def test_arange_dynamic(self, compute_unit, backend, start_end_step): + start, end, step = start_end_step + + class Model(nn.Module): + def forward(self, x): + return torch.arange(x[0], x[1], x[2]) + + model = Model() + inputs = [torch.tensor([start, end, step])] + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + +class TestEinsum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, equation, reverse_input_order, dynamic", + itertools.product( + compute_units, + backends, + einsum_equations, + [False, True], + [False, True], + ), + ) + def test_einsum(self, compute_unit, backend, equation, reverse_input_order, dynamic): + class TestEinsum(nn.Module): + def forward(self, x, y): + return torch.einsum(equation, x, y) + if backend == ("mlprogram", "fp16"): + if equation in [ + "abc,cde->abde", + "abcd,cde->abe", + "iji,ji->j", + "jii,ijk->jk", + "ija,la->ijal", + "ia,ia->a", + "ai,ia->a", + "abi,abi->ab", + "iab,iab->ab", + "abi,bai->ba", + "ij,j->i", + "i,ij->j", + "ai,ija->aj", + "aibj,bi->jba", + "ij,jk->ik", + "abij,abjk->abik", + "aijb,bajk->abik", + "aij,aij->a", + "ija,ija->a", + "ija,jia->a", + "aijb,ajbi->ab", + "aibj,cdij->cadb", + "ijk,lmj->iklm", + "ijak,akl->aijl", + ] and dynamic: + pytest.xfail("rdar://106631543 ([Infra]Re-enable the unittests for torch einsum ops)") + + input_shapes, converter_input_type = gen_input_shapes_einsum(equation, dynamic) + + if reverse_input_order: + input_output_strings = equation.split("->") + input_strings = input_output_strings[0].split(",") + equation = ( + input_strings[1] + + "," + + input_strings[0] + + "->" + + input_output_strings[1] + ) + input_shapes = [input_shapes[1], input_shapes[0]] + if converter_input_type is not None: + converter_input_type = [converter_input_type[1], converter_input_type[0]] + + model = TestEinsum() + self.run_compare_torch( + input_shapes, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + converter_input_type=converter_input_type + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_einsum_with_same_input(self, compute_unit, backend): + class Einsum(nn.Module): + def forward(self, m1, m2, m3): + y1 = torch.einsum("bnhd,bdhm->bnhm", m1, m2) + y2 = torch.einsum("bnhd,bdhm->bnhm", m1, m3) + return y1, y2 + + m1 = torch.rand(1, 8, 8, 64) + m3 = torch.rand(1, 8, 128, 64).transpose(1, 3).transpose(2, 3) + m2 = m3.clone() + model = Einsum() + out = model(m1, m2, m3) + + self.run_compare_torch( + [m1, m2, m3], + Einsum(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + expected_results=out, + ) + + +class TestSqueeze(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (2, 1), + (2, 0), + (3, 1), + (3, None), + (4, None), + (4, 2), + (5, None), + (5, -1), + ], + ), + ) + def test_squeeze(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + input_shape = list(np.random.randint(low=2, high=10, size=rank)) + if axis is not None: + input_shape[axis] = 1 + else: + input_shape[0] = 1 + input_shape = tuple(input_shape) + model = ModuleWrapper( + function=torch.squeeze, kwargs={"dim": axis} if axis else {} + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestCumSum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, axis", + itertools.product( + compute_units, + backends, + [-1, 0, 1, 2, 3], + ), + ) + def test_cumsum(self, compute_unit, backend, axis): + input_shape = list(np.random.randint(low=2, high=10, size=4)) + input_shape = tuple(input_shape) + model = ModuleWrapper(function=torch.cumsum, kwargs={"dim": axis}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestReshape(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, output_shape", + itertools.product( + compute_units, + backends, + [ + (3, 2), + (2, -1), + (2, 1, 1, 3), + ], + ), + ) + def test_reshape(self, compute_unit, backend, output_shape): + input_shape = (2, 3) + model = ModuleWrapper(function=torch.reshape, kwargs={"shape": output_shape}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestReshapeAs(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_output_shape", + itertools.product( + compute_units, + backends, + [ + ((6, 1, 1), (3, 2)), + ((8,), (2, 1, 1, 2, 2)), + ], + ), + ) + def test_reshape(self, compute_unit, backend, input_output_shape): + class Model(nn.Module): + def forward(self, x, ref): + return x.reshape_as(ref) + + model = Model() + input_shape, output_shape = input_output_shape + self.run_compare_torch( + [input_shape, output_shape], + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestFlatten(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, start_dim, end_dim, is_dynamic", + itertools.product(compute_units, backends, [2, -2, 0], [3, -1], [False, True]), + ) + def test_flatten(self, compute_unit, backend, start_dim, end_dim, is_dynamic): + input_shape = (2, 3, 4, 5) + converter_input_type = None + if is_dynamic: + converter_input_type = [ + TensorType( + shape=(2, 3, RangeDim(default=4), RangeDim(default=5)), + dtype=np.float32, + ) + ] + model = ModuleWrapper( + function=torch.flatten, kwargs={"start_dim": start_dim, "end_dim": end_dim} + ) + self.run_compare_torch( + input_shape, + model, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + +class TestGather(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, backends, [(i, j) for i in range(1, 6) for j in range(0, i)] + ), + ) + def test_gather_along_axis(self, compute_unit, backend, rank_and_axis): + rank, axis = rank_and_axis + params_shape = np.random.randint(low=2, high=5, size=rank) + indices_shape = np.copy(params_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + indices = np.random.randint(0, params_shape[axis], size=indices_shape) + params_shape, indices_shape = tuple(params_shape), tuple(indices_shape) + model = ModuleWrapper( + function=torch.gather, + kwargs={"dim": axis, "index": torch.from_numpy(indices)}, + ) + self.run_compare_torch( + [params_shape], model, backend=backend, compute_unit=compute_unit + ) + + +class TestActivation(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_relu(self, compute_unit, backend, shape): + model = nn.ReLU().eval() + self.run_compare_torch( + shape, + model, + backend=backend, + ) + + model = ModuleWrapper(nn.functional.relu_) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_relu6(self, compute_unit, backend, shape): + model = nn.ReLU6().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, alpha, shape, single_alpha", + itertools.product( + compute_units, + backends, + [0.25, 2.0], + [(3,), (2, 6), (2, 3, 4), (2, 5, 6, 7), (2, 3, 4, 5, 6)], + [True, False], + ), + ) + def test_prelu(self, compute_unit, backend, alpha, shape, single_alpha): + if backend[0] == "mlprogram" and backend[1] == "fp16" or (len(shape) == 5): + pytest.xfail( + "rdar://92175249 ([MIL] TestActivation::test_prelu[backend=(mlprogram, fp16)] CI failure)" + ) + input_shape = shape + num_parameters = input_shape[1] if len(input_shape) >= 2 else 1 + if single_alpha: + num_parameters = 1 + model = nn.PReLU(num_parameters, alpha).eval() + mlmodel = self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # Unfortunately since all these tests result in a prelu with a common leakage factor, the + # prelu_to_lrelu pass optimizes them to contain leaky_relu instead. + assert len(prog.find_ops(op_type="leaky_relu")) == 1 + assert len(prog.find_ops(op_type="prelu")) == 0 + + @pytest.mark.parametrize( + "compute_unit, backend, shape, alpha", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL, [0.1, 2.0, 1.4]), + ) + def test_leaky_relu(self, compute_unit, backend, shape, alpha): + model = nn.LeakyReLU(negative_slope=alpha).eval() + self.run_compare_torch( + shape, + model, + backend=backend, + ) + + model = ModuleWrapper(nn.functional.leaky_relu_, {"negative_slope": alpha}) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES_ALL, + ), + ) + def test_randomized_leaky_relu(self, compute_unit, backend, shape): + model = nn.RReLU(lower=0.01, upper=0.9).eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_softmax(self, compute_unit, backend, shape): + model = nn.Softmax().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, range_val", + itertools.product( + compute_units, backends, [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)] + ), + ) + def test_hardtanh(self, compute_unit, backend, range_val): + input_shape = (1, 10, 4, 5) + model = nn.Hardtanh(range_val[0], range_val[1]).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + model = ModuleWrapper( + nn.functional.hardtanh_, {"min_val": range_val[0], "max_val": range_val[1]} + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, alpha", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL, [0.1, 2.0, 1.4]), + ) + def test_elu(self, compute_unit, backend, shape, alpha): + model = nn.ELU(alpha).eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_gelu(self, compute_unit, backend, shape): + model = nn.GELU().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_erf(self, compute_unit, backend, shape): + class ERFActivation(nn.Module): + def forward(self, x): + return torch.erf(x) + + model = ERFActivation().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, backends, [(1, 10), (1, 3, 5), (1, 5, 6, 7), (1, 3, 4, 5, 6)] + ), + ) + def test_sigmoid(self, compute_unit, backend, shape): + model = nn.Sigmoid().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_sigmoid_hard(self, compute_unit, backend, shape): + model = nn.Hardsigmoid().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, beta, threshold", + itertools.product(compute_units, backends, [1, 2, 5], [5, 10, 20]), + ) + @pytest.mark.skipif( + _macos_version() <= (10, 15), + reason="Parametric SoftPlus segfaults on macOS 10.15 and below.", + ) + def test_softplus(self, compute_unit, backend, beta, threshold): + input_shape = (1, 10, 5, 15) + model = nn.Softplus(beta, threshold).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + COMMON_SHAPES_ALL + ), + ) + def test_mish(self, compute_unit, backend, shape): + model = nn.Mish().eval() + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES_ALL), + ) + def test_softsign(self, compute_unit, backend, shape): + model = nn.Softsign().eval() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.skipif( + condition=version_lt(torch, "1.7.0"), + reason="torch.nn.SiLU available only in PyTorch 1.7.0+", + ) + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, [(1, 10), (1, 3, 4), (1, 4, 5, 6)]), + ) + def test_silu(self, compute_unit, backend, shape): + model = ModuleWrapper(function=torch.nn.functional.silu) + self.run_compare_torch([shape], model, backend=backend) + + @pytest.mark.parametrize( + "compute_unit, backend, rounding_mode", + itertools.product(compute_units, backends, [None, "floor", "trunc"]), + ) + def test_div(self, compute_unit, backend, rounding_mode): + model = ModuleWrapper( + function=torch.div, kwargs={"rounding_mode": rounding_mode} + ) + x1 = torch.from_numpy(np.array([2.3, 2.6, -3.6, -3.2], dtype=np.float32)) + x2 = torch.from_numpy(np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32)) + out = torch.div(x1, x2, rounding_mode=rounding_mode) + self.run_compare_torch( + [x1, x2], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + expected_results=out, + ) + + +class TestElementWiseUnary(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, op_string", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [ + "abs", + "acos", + "asin", + "atan", + "ceil", + "cos", + "cosh", + "exp", + "floor", + "round", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "sign", + ], + ), + ) + def test_elementwise_no_params(self, compute_unit, backend, shape, op_string): + if not contains_op(torch, op_string): + return + if op_string == "sqrt" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.skip("sqrt on GPU producing nan.") + + op_func = getattr(torch, op_string) + model = ModuleWrapper(function=op_func) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, clamp_range", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [ + (0.0, 1.0), + (-1.0, 0.5), + (0.2, 0.7), + (None, 4.0), + (-3.0, None), + (1, 2), + (1, 3.5), + (1, -1), + ], + ), + ) + def test_clamp(self, compute_unit, backend, shape, clamp_range): + params_dict = {} + if clamp_range[0] is not None: + params_dict["min"] = clamp_range[0] + if clamp_range[1] is not None: + params_dict["max"] = clamp_range[1] + + model = ModuleWrapper(torch.clamp, params_dict) + self.run_compare_torch( + shape, model, backend=backend, compute_unit=compute_unit, rand_range=(-5, 5) + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_clamp_int_input(self, compute_unit, backend): + params_dict = {"min": -2, "max": 2} + input_data = torch.randint(low=-5, high=5, size=(2, 3, 4)) + model = ModuleWrapper(torch.clamp, params_dict) + self.run_compare_torch( + input_data, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + converter_input_type=[TensorType(shape=input_data.shape, dtype=np.int32)], + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, threshold", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [(0.0, 0.0), (0.5, 0.5), (0.5, 10), (0.9, 0.0)], + ), + ) + def test_threshold(self, compute_unit, backend, shape, threshold): + model = torch.nn.Threshold(threshold[0], threshold[1]).eval() + input_value = torch.rand(np.prod(shape)) + # make sure the values are not too close to the threshold + for i in range(len(input_value)): + if abs(input_value[i] - threshold[0]) < 0.005: + input_value[i] += 0.05 + input_value = torch.reshape(input_value, shape) + self.run_compare_torch( + input_value, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, op_string", + itertools.product( + compute_units, + backends, + [(1, 3, 5, 8)], + [ + "log", + "rsqrt", + "reciprocal", + ], + ), + ) + def test_elementwise_numerically_stable( + self, compute_unit, backend, shape, op_string + ): + op_func = getattr(torch, op_string) + model = ModuleWrapper(function=op_func) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + rand_range=(20, 100), + ) + + +class TestAtan2(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + TorchBaseTest.run_compare_torch( + [input_shape, input_shape], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2_x0(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + y = generate_input_data(input_shape, rand_range=(-1.0, 1.0)) + x = torch.zeros(input_shape) + TorchBaseTest.run_compare_torch( + (y, x), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2_y0x0(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + y = torch.zeros(input_shape) + x = torch.zeros(input_shape) + TorchBaseTest.run_compare_torch( + (y, x), + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_atan2_broadcast(self, compute_unit, backend, rank): + model = ModuleWrapper(function=torch.atan2) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + truncated_shape = list(input_shape) + while len(truncated_shape) > 1: + truncated_shape.pop(0) + TorchBaseTest.run_compare_torch( + [input_shape, truncated_shape], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + ) + TorchBaseTest.run_compare_torch( + [truncated_shape, input_shape], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=True, + ) + + +class TestTriu(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, diagonal", + itertools.product( + compute_units, + backends, + [(5, 5), (3, 4), (5, 1)], + [None, -1, 0, 2], + ), + ) + def test_triu(self, compute_unit, backend, shape, diagonal): + params_dict = {} + if diagonal is not None: + params_dict["diagonal"] = diagonal + model = ModuleWrapper(torch.triu, params_dict) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestTril(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, diagonal", + itertools.product( + compute_units, + backends, + [(5, 5), (3, 4), (5, 1)], + [None, -1, 0, 2], + ), + ) + def test_tril(self, compute_unit, backend, shape, diagonal): + params_dict = {} + if diagonal is not None: + params_dict["diagonal"] = diagonal + model = ModuleWrapper(torch.tril, params_dict) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestMatMul(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_bmm(self, compute_unit, backend): + shape_x, shape_y = (3, 4, 5), (3, 5, 6) + model = ModuleWrapper(function=torch.bmm) + self.run_compare_torch( + [shape_x, shape_y], model, backend=backend, compute_unit=compute_unit + ) + + +class TestNumel(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape", + itertools.product( + compute_units, + backends, + [(1,), (2, 3)], + ), + ) + def test_numel(self, compute_unit, backend, input_shape): + class TestModel(torch.nn.Module): + def forward(self, x): + res = torch.numel(x) + return x + res + + model = TestModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestSplit(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, split_size_or_sections, dim", + itertools.product(compute_units, backends, [1, 2, [1, 4]], [0, -2]), + ) + def test_split(self, compute_unit, backend, split_size_or_sections, dim): + input_shape = (5, 2) + model = ModuleWrapper( + function=torch.split, + kwargs={"split_size_or_sections": split_size_or_sections, "dim": dim}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, split_sizes, dim", + itertools.product(compute_units, backends, [[1, 4], [3, 2]], [-1, -2]), + ) + def test_split_with_sizes(self, compute_unit, backend, split_sizes, dim): + input_shape = (5, 5) + model = ModuleWrapper( + function=torch.split_with_sizes, + kwargs={"split_sizes": split_sizes, "dim": dim}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestUnbind(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim", + itertools.product(compute_units, backends, [0, 1, 2]), + ) + def test_unbind(self, compute_unit, backend, dim): + input_shape = (3, 3, 4) + model = ModuleWrapper(function=torch.unbind, kwargs={"dim": dim}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_unbind_one_dim_shape(self, compute_unit, backend): + input_shape = (1,) + dim = 0 + model = ModuleWrapper(function=torch.unbind, kwargs={"dim": dim}) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestTranspose(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, dims", + itertools.product( + compute_units, backends, COMMON_SHAPES, [(0, 1), (-2, -1), (1, 0), (-1, -2)] + ), + ) + def test(self, compute_unit, backend, shape, dims): + model = ModuleWrapper( + function=torch.transpose, kwargs={"dim0": dims[0], "dim1": dims[1]} + ) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestTo(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_cast_bug(self, compute_unit, backend): + if _macos_version() < (13, 0) and backend[0] == "mlprogram": + pytest.xfail("Issue fixed in iOS16/macOS13") + + class TestModel(torch.nn.Module): + def forward(self, spans, embedding): + spans = spans.float().relu().int() + + max1, _ = torch.max(spans, dim=1, keepdim=False) + max1, _ = torch.max(max1, dim=1, keepdim=False) + max2, _ = torch.max(embedding, dim=1, keepdim=False) + max2, _ = torch.max(max2, dim=1, keepdim=False) + sigmoided_scores = max1 + max2 + return sigmoided_scores + + if ( + platform.machine() == "arm64" + and compute_unit != ct.ComputeUnit.CPU_ONLY + and backend[0] == "neuralnetwork" + ): + pytest.xfail( + "rdar://98015195 ([M1 native tests] Some MIL unittests are failing on M1 native)" + ) + model = TestModel() + self.run_compare_torch( + [(1, 4, 2), (1, 6, 3)], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_to_uint8(self, compute_unit, backend): + class TestModel(torch.nn.Module): + def forward(self, input_data): + input_data = input_data + input_data + return input_data.to(torch.uint8) + + inputs = [TensorType(name="input_data", shape=(1, 2, 3), dtype=np.int32)] + self.run_compare_torch( + inputs, TestModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + [np.float32, np.float16, np.int32], + ), + ) + def test_to_no_param(self, compute_unit, backend: Tuple[str], input_type): + if input_type == np.float16 and backend[0] == "neuralnetwork": + pytest.skip( + "Input float16 needs target >= iOS16, which doesn't support neuralnetwork." + ) + if input_type == np.float16 and _macos_version() < (13, 0): + pytest.skip("Input float16 needs target >= iOS16, which is not available until macOS 13.") + + class TestModel(torch.nn.Module): + def forward(self, input_data): + return input_data.to() + + inputs = [TensorType(name="input_data", shape=(1, 2, 3), dtype=input_type)] + # The float16 dtype for inputs is only supported for deployment target >= iOS16/macOS13. + minimum_deployment_target = ( + ct.target.iOS16 if input_type == np.float16 else None + ) + self.run_compare_torch( + inputs, + TestModel(), + backend=backend, + compute_unit=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_fold_const(self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]]): + class TestModel(torch.nn.Module): + def forward(self, x): + return torch.arange(0, 3).float() + + model = TestModel() + + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The range_1d op translated from `torch.arange` is folded to const. + assert len(prog.find_ops(op_type="range_1d")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that only the range_1d op is not replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and "range_1d" in var.op.op_type + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The range_1d op translated from `torch.arange` shouldn't be folded. + assert len(prog.find_ops(op_type="range_1d")) == 1 + + +class TestSlice(TorchBaseTest): + @pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6") + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_dynamic_slice(self, compute_unit, backend): + class DynamicSlicer(torch.nn.Module): + def forward(self, x, context_length): + return x[context_length:, :, :] + + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.tokens_embedding = torch.nn.Embedding(10, 10, 0) + self.context_embedding = torch.nn.Embedding(10, 10, 0) + self.dynamic_slicer = DynamicSlicer() + + def forward(self, tokens, context, context_length): + # CoreML requires rank1~5 input, so we use rank 1 for + # context-length + tokens_embeddings = self.tokens_embedding(tokens) + context_embeddings = self.context_embedding(context) + embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0) + embeddings = self.dynamic_slicer( + embeddings, torch.squeeze(context_length) + ) + + return embeddings + + model = Model() + batch_size = 5 + inputs = [ + TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64), + TensorType(name="context", shape=(3, batch_size), dtype=np.int64), + TensorType(name="context_length", shape=(1,), dtype=np.int32), + ] + self.run_compare_torch( + inputs, model, rand_range=(0, 8), backend=backend, compute_unit=compute_unit + ) + + +class TestRepeat(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_repeat(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=2, high=6, size=rank) + repeats = np.random.randint(low=2, high=4, size=rank) + input_shape = tuple(input_shape) + + model = ModuleWrapper(function=lambda x: x.repeat(*repeats)) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, (1, 2)), + ) + def test_repeats_with_extra_dimensions(self, compute_unit, backend, rank): + input_shape = np.random.randint(low=2, high=6, size=rank) + + for num_extra_dims in (1, 2): + repeats = np.random.randint(low=2, high=4, size=rank + num_extra_dims) + model = ModuleWrapper(function=lambda x: x.repeat(*repeats)) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_repeats_with_enumerated_shape_case1(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x, y): + reps = x.size(0) + return y.repeat(reps) + + enumerated_shapes = ct.EnumeratedShapes(shapes=[(1, 1), (2, 1)]) + module = Model() + inputs = [torch.tensor([[1]]), torch.tensor([2])] + + self.run_compare_torch( + inputs, + module, + input_as_shape=False, + converter_input_type=[ + ct.TensorType(shape=enumerated_shapes), + ct.TensorType(shape=(1,)), + ], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_repeats_with_enumerated_shape_case2(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x, y): + return y.repeat(x.size(0), x.size(1)) + + enumerated_shapes = ct.EnumeratedShapes(shapes=[(1, 1), (2, 1)]) + module = Model() + inputs = [torch.tensor([[1], [2]]), torch.tensor([2])] + self.run_compare_torch( + inputs, + module, + input_as_shape=False, + converter_input_type=[ + ct.TensorType(shape=enumerated_shapes), + ct.TensorType(shape=(1,)), + ], + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_repeats_with_symbolic_shape(self, compute_unit, backend): + class Model(nn.Module): + def forward(self, x, y): + return y.repeat([x.shape[-1], 1, x.shape[0]]) + + module = Model() + inputs = [torch.tensor([[1], [2]]), torch.tensor([2])] + self.run_compare_torch( + inputs, + module, + input_as_shape=False, + converter_input_type=[ + ct.TensorType(shape=(ct.RangeDim(), ct.RangeDim())), + ct.TensorType(shape=(1,)), + ], + backend=backend, + compute_unit=compute_unit, + ) + + +class TestStd(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, unbiased", + itertools.product(compute_units, backends, [True, False]), + ) + def test_std_2_inputs(self, compute_unit, backend, unbiased): + model = ModuleWrapper(function=torch.std, kwargs={"unbiased": unbiased}) + x = torch.randn(1, 5, 10) * 3 + out = torch.std(x, unbiased=unbiased).unsqueeze(0) + self.run_compare_torch( + x, + model, + expected_results=out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, unbiased, dim, keepdim", + itertools.product( + compute_units, backends, [True, False], [[0, 2], [1], [2]], [True, False] + ), + ) + def test_std_4_inputs(self, compute_unit, backend, unbiased, dim, keepdim): + model = ModuleWrapper( + function=torch.std, + kwargs={"unbiased": unbiased, "dim": dim, "keepdim": keepdim}, + ) + input_shape = (2, 5, 10) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestOnesLike(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_ones_like_static(self, compute_unit, backend, rank): + class OnesLikeStaticModel(nn.Module): + def forward(self, x): + return torch.ones_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + input_shape = tuple(input_shape) + model = OnesLikeStaticModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + [ + ["neuralnetwork", "fp32", ct.target.iOS14], + ["mlprogram", "fp16", ct.target.iOS15], + ["mlprogram", "fp32", ct.target.iOS15], + ["mlprogram", "fp16", ct.target.iOS16], + ["mlprogram", "fp32", ct.target.iOS16], + ], + [1, 3], + ), + ) + def test_ones_like_dynamic(self, compute_unit, backend, rank): + if _macos_version() < (13, 0) and backend[2] == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + + class OnesLikeDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.ones_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape) + model = OnesLikeDynamicModel() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend[:2], + compute_unit=compute_unit, + minimum_deployment_target=backend[2], + ) + + +class TestZeros(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_zeros_like_static(self, compute_unit, backend, rank): + class ZerosLikeStaticModel(nn.Module): + def forward(self, x): + return torch.zeros_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + input_shape = tuple(input_shape) + model = ZerosLikeStaticModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + [ + ["neuralnetwork", "fp32", ct.target.iOS14], + ["mlprogram", "fp16", ct.target.iOS15], + ["mlprogram", "fp32", ct.target.iOS15], + ["mlprogram", "fp16", ct.target.iOS16], + ["mlprogram", "fp32", ct.target.iOS16], + ], + [1, 3], + ), + ) + def test_zeros_like_dynamic(self, compute_unit, backend, rank): + if _macos_version() < (13, 0) and backend[2] == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + + class ZerosLikeDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return torch.zeros_like(x) + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = ZerosLikeDynamicModel() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend[:2], + compute_unit=compute_unit, + minimum_deployment_target=backend[2], + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_zeros_like_static_fold_to_const(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + x = torch.arange(0, 3) + return torch.zeros_like(x) + + model = TestModel() + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The empty_like op is folded to const, so there is no fill nor fill_like op. + assert len(prog.find_ops(op_type="fill")) + len(prog.find_ops(op_type="fill_like")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that only shape op is not replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and var.op.op_type == "shape" + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The shape op is not folded to const. + assert len(prog.find_ops(op_type="fill")) + len(prog.find_ops(op_type="fill_like")) == 1 + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_zeros_static(self, compute_unit, backend, rank): + class ZerosStaticModel(nn.Module): + def forward(self, x): + if rank == 1: + return torch.zeros(1) + elif rank == 3: + return torch.zeros(2, 3, 5) + + input_shape = np.random.randint(low=2, high=6, size=rank) + input_shape = tuple(input_shape) + model = ZerosStaticModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [1, 3], + ), + ) + def test_zeros_dynamic(self, compute_unit, backend, rank): + class ZerosDynamicModel(nn.Module): + def forward(self, x): + if rank == 1: + h = x[0] + x = torch.zeros(h) + elif rank == 3: + h, w, d = x[0], x[1], x[2] + x = torch.zeros(h, w, d) + return x + + input_shape = np.random.randint(low=2, high=6, size=rank) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = ZerosDynamicModel() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_zeros_static_fold_to_const(self, compute_unit, backend): + class TestModel(nn.Module): + def forward(self, x): + return torch.zeros(2, 3, 5) + + model = TestModel() + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The zeros op is folded to const. + assert len(prog.find_ops(op_type="fill")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that the size parameter to torch.zeros is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and var.rank == 1 and np.all(var.val == [2, 3, 5]) + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The zeros op is not folded to const. + assert len(prog.find_ops(op_type="fill")) == 1 + + +class TestTopk(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, largest, sort, dynamic, shape_dim_k", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + [True, False], + [((4, 6, 7, 3), -1, 2), ((10, 3, 4), 2, 2), ((5,), 0, 2)], + ), + ) + def test_topk(self, compute_unit, backend, largest, sort, shape_dim_k, dynamic): + if not sort and backend[0] == "neuralnetwork": + pytest.xfail("iOS16 version topk needed for sort = False") + if not sort and _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + input_shape = shape_dim_k[0] + dim = shape_dim_k[1] + k = shape_dim_k[2] + + class TopkModel(nn.Module): + def forward(self, x, y): + if dynamic: + nonlocal k + k = torch.min(y) + topk = torch.topk(x, k, dim=dim, largest=largest, sorted=sort) + values, indices = topk.values, topk.indices + if not sort: + values, _ = torch.sort(values, dim=dim) + indices, _ = torch.sort(indices, dim=dim) + return values, indices, y + 1 + + input_data = torch.rand(input_shape) + k_list = torch.tensor([k + 1, k, k + 2]) + + model = TopkModel() + expected_results = model(input_data, k_list) + self.run_compare_torch( + [input_data, k_list], + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + minimum_deployment_target=ct.target.iOS16 if not sort else None, + ) + + +class TestLog10(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_log10(self, compute_unit, backend, rank): + class Log10Model(nn.Module): + def forward(self, x): + return torch.log10(x) + + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + model = Log10Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestLog2(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_log2(self, compute_unit, backend, rank): + class Log2Model(nn.Module): + def __init__(self): + super(Log2Model, self).__init__() + + def forward(self, x): + return torch.log2(x) + + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + model = Log2Model() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestFlip(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank_dim", + itertools.product( + compute_units, + backends, + [(1, [0]), (2, [0, 1]), (3, [1]), (4, [0, 1, 2, 3])], + ), + ) + def test_flip(self, compute_unit, backend, rank_dim): + rank, dim = rank_dim + + class FlipModel(nn.Module): + def forward(self, x): + return torch.flip(x, dim) + + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + model = FlipModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestBitWiseLogical(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y, op_string", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([[True, False], [True, False]], [[1, 0], [2, 1]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + [ + "eq", + "ne", + ], + ), + ) + def test_bitwise_logical(self, compute_unit, backend, x_y, op_string): + if not contains_op(torch, op_string): + return + op_func = getattr(torch, op_string) + model = ModuleWrapper(function=op_func) + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestLogicalAnd(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + ), + ) + def test_logical_and(self, compute_unit, backend, x_y): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.logical_and(x, y) + + model = TestNet() + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestLogicalOr(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + ), + ) + def test_logical_or(self, compute_unit, backend, x_y): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.logical_or(x, y) + + model = TestNet() + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestLogicalXor(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x_y", + itertools.product( + compute_units, + backends, + [ + ([True, False, True, False], [True, True, False, False]), + ([[True, False], [True, False]], [[True, True], [False, False]]), + ([-1.5, 0.0, 1.0, 0.0], [0.1, 2.5, 0.0, 0.0]), + ([2, 0, -1, 0, 5], [1, 1, 0, 0, -5]), + ], + ), + ) + def test_logical_xor(self, compute_unit, backend, x_y): + class TestNet(nn.Module): + def forward(self, x, y): + return torch.logical_xor(x, y) + + model = TestNet() + x = torch.tensor(x_y[0]) + y = torch.tensor(x_y[1]) + self.run_compare_torch( + [x, y], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestWhere(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, [(2, 6), (3, 4, 5)]), + ) + def test_where_test1(self, compute_unit, backend, shape): + class WhereModel(nn.Module): + def forward(self, x, y): + return torch.where(x > 0.5, x, y) + + input_shape = [shape, shape] + model = WhereModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, [(2, 6), (3, 4, 5)]), + ) + def test_where_test2(self, compute_unit, backend, shape): + class WhereModel(nn.Module): + def forward(self, cond, x, y): + return torch.where(cond, x, y) + + cond = torch.rand(*shape) > 0.5 + inputs = [cond, torch.rand(*shape), torch.rand(*shape)] + model = WhereModel() + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + expected_results=expected_results, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(1, 2), (1, 2), (1, 1)], + [(1, 2, 3), (1, 1, 1), (1, 1, 3)], + ], + ), + ) + def test_where_test3(self, compute_unit, backend, shapes): + class WhereModel(nn.Module): + def forward(self, cond, x, y): + return torch.where(cond, x, y) + + cond_shape, x_shape, y_shape = shapes + cond = torch.rand(*cond_shape) > 0.5 + inputs = [cond, torch.rand(*x_shape), torch.rand(*y_shape)] + model = WhereModel() + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + expected_results=expected_results, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product(compute_units, backends, COMMON_SHAPES + [(10,)]), + ) + def test_where_single_param(self, compute_unit, backend, shape): + class WhereModelSingleParam(nn.Module): + def forward(self, x): + return torch.where(x) + + # Create a tensor of given shape with ~90% zero entries + x = np.zeros(shape) + all_indices = list(zip(*np.where(x == 0))) + num_indices = len(all_indices) + random_picks = np.random.choice( + np.arange(num_indices), size=num_indices // 10, replace=False + ) + for i in random_picks: + x[all_indices[i]] = np.random.choice([-1, 12, 100]) + x = torch.Tensor(x) + + self.run_compare_torch( + x, + WhereModelSingleParam(), + backend=backend, + input_as_shape=False, + compute_unit=compute_unit, + ) + + +class TestSelect(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, dim_index", + itertools.product( + compute_units, + backends, + [ + [0, 0], + [1, 1], + [-1, -1], + ], + ), + ) + def test_select(self, compute_unit, backend, dim_index): + dim, index = dim_index + + class SelectModel(nn.Module): + def forward(self, x): + return x.select(dim, index) + + input_shape = (1, 2, 3) + model = SelectModel() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestNonZero(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, as_tuple", + itertools.product( + compute_units, + backends, + [1, 3], + [False, True], + ), + ) + def test_non_zero(self, compute_unit, backend, rank, as_tuple): + + if rank == 1: + input_shape = 10 + zeros_indices = np.array([1, 4, 7, 9]) + elif rank == 3: + input_shape = (2, 7, 3) + zeros_indices = np.array([1, 12, 33, 40]) + + input = np.arange(np.prod(input_shape)).astype(np.float32) + input[zeros_indices] = 0 + input = np.reshape(input, input_shape) + input = torch.tensor(input) + + model = ModuleWrapper( + torch.nonzero, + {"as_tuple": as_tuple}, + ) + + self.run_compare_torch( + input, + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestTorchTensor(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [0, 1, 2, 3, 4, 5], + ), + ) + def test_torch_tensor(self, compute_unit, backend, rank): + class Model(nn.Module): + def __init__(self, rank): + super(Model, self).__init__() + self.rank = rank + + def forward(self, x): + with torch.no_grad(): + if self.rank == 0: + res = self.generate_tensor_rank_0(x) + return torch.unsqueeze(res, 0) + if self.rank == 1: + return self.generate_tensor_rank_1(x) + if self.rank == 2: + return self.generate_tensor_rank_2(x) + if self.rank == 3: + return self.generate_tensor_rank_3(x) + if self.rank == 4: + return self.generate_tensor_rank_4(x) + if self.rank == 5: + return self.generate_tensor_rank_5(x) + + @torch.jit.script + def generate_tensor_rank_0(x): + _, _, _, w = x.shape + return torch.tensor(w, dtype=torch.int32) + + @torch.jit.script + def generate_tensor_rank_1(x): + _, _, h, w = x.shape + return torch.tensor([h, w, 0, 1], dtype=torch.int32) + + @torch.jit.script + def generate_tensor_rank_2(x): + _, _, h, w = x.shape + return torch.tensor([[0, h], [h, w], [w, w]], dtype=torch.float32) + + @torch.jit.script + def generate_tensor_rank_3(x): + _, _, h, w = x.shape + return torch.tensor([[[h, 1]], [[3, w]]], dtype=torch.int32) + + @torch.jit.script + def generate_tensor_rank_4(x): + _, _, h, w = x.shape + return torch.tensor( + [ + [[[h, h], [h, w]], [[w, w], [w, 1]]], + [[[0, 0], [1, 1]], [[0, h], [h, w]]], + ], + dtype=torch.float32, + ) + + @torch.jit.script + def generate_tensor_rank_5(x): + _, _, h, w = x.shape + return torch.tensor( + [[[[[h, w], [w, w]], [[1, 1], [0, h]]]]], dtype=torch.float32 + ) + + shape = (1, 1, 3, 4) + model = Model(rank) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, torch_op", + itertools.product( + compute_units, + backends, + [ + torch.abs, + torch.acos, + torch.asin, + torch.atan, + torch.atanh, + torch.ceil, + torch.cos, + torch.cosh, + torch.exp, + torch.exp2, + torch.floor, + torch.round, + torch.rsqrt, + torch.sign, + torch.sin, + torch.sinh, + torch.sqrt, + torch.square, + torch.tan, + torch.tanh, + ], + ), + ) + def test_torch_rank0_tensor(self, compute_unit, backend, torch_op): + class Model(nn.Module): + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch_op(torch.tensor(0.1)) + + model = Model() + self.run_compare_torch( + torch.tensor([1.0, 2.0, 3.0]), + model, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestTensorAssign(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_1(self, compute_unit, backend): + # single dimension assignment for a 1D tensor + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[0] = 0 + x[1] = 1 + y = x + 1 + x[1] = 2 * y[1] + return x, y + + shape = (5,) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_2(self, compute_unit, backend): + # single dimension assignment for two 1D tensors + class TensorAssignModel(torch.nn.Module): + def forward(self, x, y): + x[0] = 0 + y[1] = 2 + y = x + y + x = 2 * y + y[3] = x[1] + 5 + y[0] = x[0] * 10 + z = x + y + return z, x, y + + shape = (5,) + model = TensorAssignModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (5, 4), + (5, 4, 3), + ], + ), + ) + def test_tensor_assign_case_3(self, compute_unit, backend, shape): + # broadcast assignment for two n-D tensors + class TensorAssignModel(torch.nn.Module): + def __init__(self): + super(TensorAssignModel, self).__init__() + + def forward(self, x, y): + x[0] = 0 + x[3] = 1 + y[2] = 2 + return x + + model = TensorAssignModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_itensor_assign_case_4(self, compute_unit, backend): + # single dimension assignment for two n-D tensors + class TensorAssignModel(torch.nn.Module): + def forward(self, x, y): + x[0] = torch.tensor([1.0, 2.0, 3.0, 4.0]) + x[3] = 1 + y[0] = x[0] + return x, y + + shape = (5, 4) + model = TensorAssignModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_5(self, compute_unit, backend): + # slice dimension assigment + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:, 1] = torch.tensor([1.0, 2.0]) + return x + + shape = (2, 10) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_case_6(self, compute_unit, backend): + # a more complicated slice dimension assigment + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:, 1, :] = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).view(2, 3) + return x + + shape = (2, 10, 3) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, dynamic", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_tensor_assign_case_7(self, compute_unit, backend, dynamic): + # general case + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:1, 1, :1] = torch.tensor([1.0]).view(1, 1) + x[0, 1, 2] = 6. + x[:2, 2:8:2, 1:2] = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).view(2, 3, 1) + x[:, 1:10:8, 1:3] = torch.tensor([1.0, 2.0, 3.0, 4.0]).view(2, 1, 2) + return x + + shape = (2, 10, 3) + model = TensorAssignModel() + if dynamic: + converter_input_type = [ct.TensorType(shape=(ct.RangeDim(), ct.RangeDim(), ct.RangeDim()))] + else: + converter_input_type = None + self.run_compare_torch( + shape, + model, + converter_input_type=converter_input_type, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dynamic", + itertools.product( + compute_units, + backends, + [True, False], + ), + ) + def test_tensor_assign_case_8(self, compute_unit, backend, dynamic): + # general case with dynamic begin and end + class TensorAssignModel(torch.nn.Module): + def forward(self, x, begin_0, begin_1, end_1): + x[:1, begin_0:begin_0+5:2, 2] = torch.tensor([1.0, 2.0, 3.0]).view(1, 3) + x[:, 4, begin_1:end_1] = torch.tensor([1.0]).view(1, 1) + return x + + shape = (2, 10, 3) + model = TensorAssignModel() + if dynamic: + converter_input_type = [ + ct.TensorType(shape=(ct.RangeDim(), ct.RangeDim(), ct.RangeDim())), + ct.TensorType(shape=(1,), dtype=np.int32), + ct.TensorType(shape=(1,), dtype=np.int32), + ct.TensorType(shape=(1,), dtype=np.int32), + ] + else: + converter_input_type = None + + inputs = [ + torch.rand(*shape), + torch.as_tensor([1], dtype=torch.int32), + torch.as_tensor([1], dtype=torch.int32), + torch.as_tensor([2], dtype=torch.int32), + ] + + torch_inputs = [torch.clone(x) for x in inputs] + expected_results = model(*torch_inputs) + + self.run_compare_torch( + inputs, + model, + expected_results=expected_results, + input_as_shape=False, + converter_input_type=converter_input_type, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tensor_assign_type_compatibility(self, compute_unit, backend): + class TensorAssignModel(torch.nn.Module): + def forward(self, x): + x[:, 1] = torch.tensor([1, 2], dtype=torch.int32) + return x + + shape = (2, 3) + model = TensorAssignModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestIndexPut(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_index_put_case_1(self, compute_unit, backend): + class IndexPutModel(torch.nn.Module): + def forward(self, x, y): + y = x + 1 + mask = torch.tensor([True, False, False, False, True, True]).view(3, 2) + x[mask] = y[mask] + return x + + shape = (3, 2) + model = IndexPutModel() + self.run_compare_torch( + [shape, shape], model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product( + compute_units, + backends, + [0, 1], + ), + ) + def test_index_put_case_2(self, compute_unit, backend, rank): + class IndexPutModel(torch.nn.Module): + def forward(self, x): + mask = torch.tensor([True, False, False, False, True, True]).view(3, 2) + if rank == 0: + x[mask] = 0.0 + if rank == 1: + x[mask] = torch.tensor([1.0]) + return x + + shape = (3, 2) + model = IndexPutModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_index_put_case_3(self, compute_unit, backend): + if _macos_version() < (13, 0): + pytest.skip("Issue fixed in iOS16/macOS13") + + class IndexPutModel(torch.nn.Module): + def forward(self, x, y): + mask = y > 1 + x[y > 1] = 0.0 + return x + + inputs = [ + torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6]), + torch.Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + ] + model = IndexPutModel() + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, accumulate", + itertools.product(compute_units, backends, [1, 2], [True, False]), + ) + def test_index_put_case_4(self, compute_unit, backend, rank, accumulate): + class IndexPutModel(torch.nn.Module): + def forward(self, x, indices, values): + x.index_put_(tuple(indices.t()), values, accumulate=accumulate) + return x + + if rank == 1: + inputs = [ + torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6]), + torch.LongTensor([[0], [4]]), + torch.Tensor([3.0, 7.0]), + ] + elif rank == 2: + inputs = [ + torch.ones([3, 4]), + torch.LongTensor([[0, 1], [1, 2], [2, 2]]), + torch.Tensor([1.0, 5.0, 8.0]), + ] + + model = IndexPutModel() + self.run_compare_torch( + inputs, + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestIndex(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (10,), + (3, 4, 5, 6), + ], + ), + ) + def test_index_bool_indices(self, compute_unit, backend, shape): + rank = len(shape) + class IndexModel(torch.nn.Module): + def __init__(self, axis): + super().__init__() + self.axis = axis + + def forward(self, x, y): + index = y > 0.5 + if self.axis == 0: + return x[index] + elif self.axis == 1: + return x[:, index] + elif self.axis == 2: + return x[:, :, index] + else: + assert self.axis == 3 + return x[:, :, :, index] + + for index_rank in range(1, rank + 1): + for axis in range(rank + 1 - index_rank): + input_data = torch.randn(*shape, dtype=torch.float32) + ref_data_shape = shape[axis:axis+index_rank] + ref_data = torch.rand(ref_data_shape) + # We set the first element to 0.6, so that we can make sure at least one element is selected, + # and ensure no empty tensors are produced. + ref_data[0] = 0.6 + + model = IndexModel(axis=axis) + self.run_compare_torch( + [input_data, ref_data], + model, + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (3, 4, 5, 6), + ], + ), + ) + def test_index_int_index_case_1(self, compute_unit, backend, shape): + # all elements are selected + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + return x[:, :] + elif len(shape) == 4: + return x[:] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (3, 4, 5, 6), + ], + ), + ) + def test_index_int_index_case_2(self, compute_unit, backend, shape): + # only one axis is sliced + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + index = torch.tensor([0]) + return x[index, :] + elif len(shape) == 4: + index = torch.tensor([1, 2]) + return x[:, :, index] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_3(self, compute_unit, backend, shape): + # only two axes are sliced, and connected + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0]) + index_2 = torch.tensor([1]) + return x[index_1, index_2, :] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1]) + index_2 = torch.tensor([2, 1, 0]) + return x[:, index_1, index_2, :] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_4(self, compute_unit, backend, shape): + # only two axes are sliced, and not connected + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0]) + index_2 = torch.tensor([1]) + return x[index_1, :, index_2] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1]) + index_2 = torch.tensor([3, 3, 4]) + return x[index_1, :, :, index_2] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_5(self, compute_unit, backend, shape): + # all axes are sliced + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0]) + index_2 = torch.tensor([1]) + index_3 = torch.tensor([2]) + return x[index_1, index_2, index_3] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1, 0, 0]) + index_2 = torch.tensor([1, 2, 0, 0, 0]) + index_3 = torch.tensor([0, 1, 2, 3, 3]) + index_4 = torch.tensor([2, 1, 0, 4, 4]) + return x[index_1, index_2, index_3, index_4] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (3, 4, 5, 6), + ], + ), + ) + def test_index_int_index_case_6(self, compute_unit, backend, shape): + # only one axis is sliced + nd mode + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + index = torch.tensor([0, 0, 0, 0, 0, 0]) + index = index.view(2, 3) + return x[index, :] + elif len(shape) == 4: + index = torch.tensor([0, 1, 2, 3, 0, 1]) + index = index.view(3, 2) + return x[:, index] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_7(self, compute_unit, backend, shape): + # two axes are sliced, and connected + nd mode + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0]).view(4, 2) + index_2 = torch.tensor([1, 0, 0, 0, 1, 1, 1, 1]).view(4, 2) + return x[index_1, index_2, :] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 0, 2, 2, 1, 1, 2, 0]).view(2, 4) + index_2 = torch.tensor([0, 1, 2, 3, 0, 1, 2, 3]).view(2, 4) + return x[:, index_1, index_2, :] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_8(self, compute_unit, backend, shape): + # two axes are sliced, and not connected + nd mode + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + index_1 = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0]).view(2, 4) + index_2 = torch.tensor([1, 0, 0, 2, 2, 1, 1, 1]).view(2, 4) + return x[index_1, :, index_2] + + elif len(shape) == 4: + index_1 = torch.tensor([0, 1, 1, 1, 1, 1, 0, 0]).view(4, 2) + index_2 = torch.tensor([0, 1, 2, 3, 4, 0, 1, 2]).view(4, 2) + return x[index_1, :, :, index_2] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_9(self, compute_unit, backend, shape): + # one axis is sliced through bool mask + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + return x[:, [True, False], :] + + elif len(shape) == 4: + return x[[True, False], :, :, :] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_10(self, compute_unit, backend, shape): + # multiple axes are sliced through bool masks with possible broadcasting + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 3: + return x[[True], [True, False], [False, True, False]] + + else: + assert len(shape) == 4 + # This is an non-broadcasable case, where the number of `True` for each dimension is the same + output_1 = x[ + [True, True], + :, + [True, True, False, False], + [True, False, False, True, False], + ] + # This is a broadcasable case + output_2 = x[ + [True, True], + :, + [False, False, True, False], + [True, False, False, True, False], + ] + return output_1, output_2 + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (3, 4), + (3, 4, 5, 6) + ], + ), + ) + def test_index_int_index_case_11(self, compute_unit, backend, shape): + # broadcasable indices + class IndexModel(torch.nn.Module): + def forward(self, x): + if len(shape) == 2: + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0]) + return x[index_1, index_2] + else: + assert len(shape) == 4 + index_1 = torch.tensor([0, 1, 1, 1, 1, 1, 0, 0]).view(4, 2) + index_2 = torch.tensor([0, 1, 2, 3]).view(4, 1) + index_3 = torch.tensor([2]).view(1,) + return x[index_1, :, index_3, index_2] + + model = IndexModel() + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [ + (1, 2, 3), + (2, 3, 4, 5), + ], + ), + ) + def test_index_int_index_case_12(self, compute_unit, backend, shape): + # Another broadcastable indices test case + class IndexModel(torch.nn.Module): + def forward(self, x): + index_1 = torch.tensor([0, 1]) + index_2 = torch.tensor([0]) + return ( + x[:, index_1, index_2] + if len(shape) == 3 + else x[:, index_1, index_2, :] + ) + + self.run_compare_torch( + shape, IndexModel(), backend=backend, compute_unit=compute_unit + ) + +class TestLoss(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, reduction", + itertools.product( + compute_units, backends, range(1, 4), ["none", "mean", "sum"] + ), + ) + def test_mse_loss(self, compute_unit, backend, rank: int, reduction: str): + input_shape = tuple(np.random.randint(low=1, high=5, size=rank)) + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + self.loss = nn.MSELoss(reduction=reduction) + + def forward(self, x, y): + return self.loss(x, y) + + input_shapes = [input_shape, input_shape] + + self.run_compare_torch( + input_shapes, Model(), backend=backend, compute_unit=compute_unit + ) + + +class TestPad(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, mode", + itertools.product( + compute_units, backends, range(3, 5), ["reflect", "replicate"] + ), + ) + def test_pad_reflect_replicate(self, compute_unit, backend, rank: int, mode: str): + if rank == 3: + pad_len = 2 + input_shape = (5, 10, 10) + elif rank == 4: + pad_len = 4 + input_shape = (10, 5, 5, 10) + else: + raise NotImplementedError( + "Only 3D, 4D padding with non-constant padding are supported for now" + ) + max_pad = min(input_shape[-1], input_shape[-2]) + pad = list(np.random.randint(low=0, high=max_pad, size=pad_len)) + model = ModuleWrapper( + function=torch.nn.functional.pad, kwargs={"pad": pad, "mode": mode} + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, range(1, 6)), + ) + def test_pad_constant(self, compute_unit, backend, rank: int): + if rank > 5: + raise NotImplementedError("Only supports < 6D constant padding") + val = float(np.random.random(1)) + input_shape = tuple(np.random.randint(low=1, high=10, size=rank)) + pad_dims = np.random.randint(low=1, high=rank + 1) + pad = list(np.random.randint(low=0, high=10, size=pad_dims * 2)) + model = ModuleWrapper( + function=torch.nn.functional.pad, + kwargs={"pad": pad, "mode": "constant", "value": val}, + ) + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_constant_pad_1d(self, compute_unit, backend): + input_shape = (3, 4, 5) + model = torch.nn.ConstantPad1d((5, 6), 3.5).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_constant_pad_2d(self, compute_unit, backend): + input_shape = (3, 4, 5, 6) + model = torch.nn.ConstantPad2d((5, 6, 3, 8), 3.5).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_constant_pad_3d(self, compute_unit, backend): + input_shape = (3, 4, 5, 6, 2) + model = torch.nn.ConstantPad3d((5, 6, 3, 8, 2, 4), 3.5).eval() + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestMeshgrid(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, x, y, z, dtype, inp_mode, indexing", + itertools.product( + compute_units, + backends, + [1, 2], + [3, 4], + [5, 6], + [torch.int, torch.float], + ["norm", "list"], + [None, "ij", "xy"], + ), + ) + def test_meshgrid( + self, + compute_unit, + backend, + x, + y, + z, + dtype, + inp_mode, + indexing, + ): + class TestModel(nn.Module): + def forward(self, x, y, z): + if inp_mode == "norm": + return torch.meshgrid(x, y, z, indexing=indexing) + elif inp_mode == "list": + return torch.meshgrid([x, y, z], indexing=indexing) + else: + raise ValueError("Unsupported mode: {mode}".format(mode=inp_mode)) + + inputs = ( + torch.arange(start=0, end=x, step=1, dtype=dtype), + torch.arange(start=0, end=y, step=1, dtype=dtype), + torch.arange(start=0, end=z, step=1, dtype=dtype), + ) + model = TestModel().eval() + expected_results = model(*inputs) + self.run_compare_torch( + inputs, + model, + expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestScatter(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ), + ) + def test_scatter(self, compute_unit, backend, shapes_dims): + class TestModel(nn.Module): + def __init__(self, dim, shapes): + super(TestModel, self).__init__() + self.dim = dim + self.source = torch.rand(*(shapes)) + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_(self.dim, self.index, self.source) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(0, shapes) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ), + ) + def test_scatter_with_scalar_source(self, compute_unit, backend, shapes_dims): + class TestModel(nn.Module): + def __init__(self, dim, shapes): + super(TestModel, self).__init__() + self.dim = dim + self.source = 1.0 + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_(self.dim, self.index, self.source) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(0, shapes) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims, mode", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ["add", "multiply"], + ), + ) + def test_scatter_with_reduce(self, compute_unit, backend, shapes_dims, mode): + class TestModel(nn.Module): + def __init__(self, dim, shapes, mode): + super(TestModel, self).__init__() + self.dim = dim + self.mode = mode + self.source = torch.rand(*(shapes)) + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_(self.dim, self.index, self.source, reduce=self.mode) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(0, shapes, mode) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes_dims", + itertools.product( + compute_units, + backends, + [ + [(10,), (0, -1)], + [(2, 3), (1, -1)], + [(2, 3, 4, 5), (0, -2)], + ], + ), + ) + def test_scatter_add(self, compute_unit, backend, shapes_dims): + class TestModel(nn.Module): + def __init__(self, dim, shapes): + super(TestModel, self).__init__() + self.dim = dim + self.source = torch.rand(*(shapes)) + self.index = torch.randint(0, shapes[dim], size=shapes) + + def forward(self, x): + return x.scatter_add_(self.dim, self.index, self.source) + + shapes, dims = shapes_dims + for dim in dims: + m = TestModel(dim, shapes) + self.run_compare_torch( + shapes, m, backend=backend, compute_unit=compute_unit + ) + + +class TestBroadcastTensors(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [(1,), (1, 2)], + ), + ) + def test_one_tensor(self, compute_unit, backend, shapes): + class TestModel(nn.Module): + def forward(self, a): + return torch.broadcast_tensors(a) + + self.run_compare_torch( + shapes, TestModel().eval(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (1, 3)], + [(5, 1, 4, 1), (3, 1, 1)], + [(1,), (3, 1, 7)], + [(2, 1), (4, 3, 2, 1)], + ], + ), + ) + def test_two_tensors(self, compute_unit, backend, shapes): + class TestModel(nn.Module): + def forward(self, a, b): + return torch.broadcast_tensors(a, b) + + self.run_compare_torch( + shapes, TestModel().eval(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 1), (1, 3), (1,), (1, 1)], + [(5, 1, 4, 1), (3, 1, 1), (1,), (4, 8)], + [(1,), (2, 1), (3, 2, 1), (5, 4, 3, 2, 1)], + ], + ), + ) + def test_four_tensors(self, compute_unit, backend, shapes): + class TestModel(nn.Module): + def forward(self, a, b, c, d): + return torch.broadcast_tensors(a, b, c, d) + + self.run_compare_torch( + shapes, TestModel().eval(), backend=backend, compute_unit=compute_unit + ) + + +class TestEmbedding(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_dtype", + itertools.product( + compute_units, + backends, + [np.int32, np.float32], + ), + ) + def test_embedding(self, compute_unit, backend, input_dtype): + num_embeddings = 4 + embedding_size = 10 + B = 2 + dim = 5 + converter_input_type = [TensorType(shape=(B, dim), dtype=input_dtype)] + + # input shape: (B, dim) + # output shape : (B, dim, embedding_size) + # shape of weights : (num_embeddings, embedding_size) + class EmbeddingModel(nn.Module): + def __init__(self): + super(EmbeddingModel, self).__init__() + self.embedding = torch.nn.Embedding(num_embeddings, embedding_size) + + def forward(self, x): + return self.embedding(x) + + input_data = np.random.randint(low=0, high=num_embeddings, size=(B, dim)) + input_data = torch.from_numpy(input_data) + model = EmbeddingModel() + expected_results = model(input_data) + self.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + +class TestDuplicateOutputTensors(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_dtype", + itertools.product( + compute_units, + backends, + [np.int32, np.float32], + ), + ) + # Test case for rdar://100138064 (Duplicate output tensors trigger ops removal errors). + def test_duplicate_output_not_raise_errors( + self, compute_unit, backend, input_dtype + ): + if backend[0] == "neuralnetwork": + pytest.skip( + "rdar://100243127 ([PyTorch] Duplicate Output Tensor Doesn't work for neuralnetwork)" + ) + + class DuplicateTensorsModel(torch.nn.Module): + def forward(self, x): + return x, x + + input_data = torch.rand(2, 2, 1, 1) + converter_input_type = [ct.TensorType(shape=input_data.shape)] + model = DuplicateTensorsModel() + expected_results = model(input_data) + self.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + converter_input_type=converter_input_type, + ) + + +class TestBaddbmm(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [(2, 4, 6, 8), (4, 12, 6, 16)], + ), + ) + def test_baddbmm(self, compute_unit, backend, shapes): + B, N, M, P = shapes + + # input shape: any shape broadcastable to (B, N, P) + # batch1 shape: (B, N, M) + # batch2 shape: (B, M, P) + # output shape : (B, N, P) + class BaddbmmModel(nn.Module): + def __init__(self): + super(BaddbmmModel, self).__init__() + self.batch1 = torch.randn(B, N, M) + self.batch2 = torch.randn(B, M, P) + + def forward(self, x): + return torch.baddbmm(x, self.batch1, self.batch2) + + model = BaddbmmModel() + # Makes it broadcastable to (B, N, P). + for input_shape in [(1, N, P), (B, 1, P), (1, P)]: + self.run_compare_torch( + input_shape, model, backend=backend, compute_unit=compute_unit + ) + + +class TestGlu(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [(2, 4, 6, 8), (6, 2, 10)], + ), + ) + def test_glu(self, compute_unit, backend, shapes): + # The dim specified for GLU shouldn't exceed the max dim in input. + glu_dim_list = [-1] + [i for i in range(len(shapes))] + for glu_dim in glu_dim_list: + model = torch.nn.GLU(glu_dim) + self.run_compare_torch( + shapes, model, backend=backend, compute_unit=compute_unit + ) + + +class TestHstack(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 4, 6), (2, 4, 6)], + [(1, 4, 5), (1, 2, 5)], + [(1,), (3,)], + ], # Test 1-D tensors. + ), + ) + def test_hstack(self, compute_unit, backend, shapes): + class HstackModel(nn.Module): + def forward(self, *tensors): + return torch.hstack(tensors) + + self.run_compare_torch( + shapes, HstackModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [[(2, 4, 6), (2, 4, 6)]], + ), + ) + def test_hstack_with_parameter_out(self, compute_unit, backend, shapes): + class HstackModel(nn.Module): + def forward(self, *tensors): + output_tensor = torch.tensor([]) + torch.hstack(tensors, out=output_tensor) + return output_tensor + + self.run_compare_torch( + shapes, HstackModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestRemainder(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + [(2, 4, 6), (2, 4, 6)], + [(2, 4, 6), (4, 6)], # broadcastable tensors + [(2, 4, 6), (2, 1, 6)], + ], + ), + ) + def test_remainder(self, compute_unit, backend, shapes): + class RemainderModel(nn.Module): + def forward(self, dividend, divisor): + return torch.remainder(dividend, divisor) + + self.run_compare_torch( + shapes, RemainderModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [[(2, 4, 6), (2, 4, 6)]], + ), + ) + def test_remainder_with_parameter_out(self, compute_unit, backend, shapes): + class RemainderModel(nn.Module): + def forward(self, dividend, divisor): + output_tensor = torch.tensor([]) + torch.remainder(dividend, divisor, out=output_tensor) + return output_tensor + + self.run_compare_torch( + shapes, RemainderModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_remainder_input_types_promotion(self, compute_unit, backend): + class RemainderModel(nn.Module): + def forward(self, dividend, divisor): + return torch.remainder(dividend, divisor) + + input_dividend = torch.randint(low=0, high=10, size=(2, 3), dtype=torch.int32) + input_divisor = torch.rand(2, 3) + self.run_compare_torch( + [input_dividend, input_divisor], + RemainderModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestSum(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_dtype", + itertools.product( + compute_units, backends, [torch.int32, torch.float32, torch.bool] + ), + ) + def test_sum(self, compute_unit, backend, input_dtype): + model = ModuleWrapper(function=torch.sum) + + input_data = torch.zeros(2, 3).to(input_dtype) + expected_results = model(input_data) + + TorchBaseTest.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestLogsumexp(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, dim", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [0, -1], + ), + ) + def test_logsumexp(self, compute_unit, backend, shape, dim): + params = {"dim": dim} + model = ModuleWrapper( + function=torch.logsumexp, + kwargs=params, + ) + TorchBaseTest.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestHannWindow(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, window_length, periodic", + itertools.product( + compute_units, + backends, + [1, 3, 6, 10, 12], + [True, False], + ), + ) + def test_hann_window(self, compute_unit, backend, window_length, periodic): + class HannWindowModel(nn.Module): + def forward(self, x): + return torch.hann_window(window_length, periodic) + + input_shape = np.random.randint(low=1, high=10, size=(window_length,)) + torch_in = torch.tensor(input_shape, dtype=torch.int32) + model = HannWindowModel().eval() + torch_out = model(torch_in) + self.run_compare_torch( + torch_in, + model, + expected_results=torch_out, + input_as_shape=False, + backend=backend, + compute_unit=compute_unit, + ) + + +class TestTrace(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape", + itertools.product( + compute_units, + backends, + [(1, 1), (2, 4), (4, 3), (5, 5)], + ), + ) + def test_trace(self, compute_unit, backend, shape): + model = ModuleWrapper(torch.trace) + self.run_compare_torch(shape, model, backend=backend, compute_unit=compute_unit) + + +class TestRoll(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, shifts", + itertools.product( + compute_units, + backends, + [(5,), (2, 4), (4, 2, 3)], + [0, 1, 3], + ), + ) + def test_roll(self, compute_unit, backend, shape, shifts): + model = ModuleWrapper(torch.roll, kwargs={"shifts": shifts}) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape, shifts_dims", + itertools.product( + compute_units, + backends, + [(4, 2, 3)], + [ + [0, 0], + [4, 0], + [9, 0], + [[0, 1], [0, 1]], + # Shifts exceeeds dimension + [[89, 93, 102], [0, 1, 2]], + # Negative shifts + [[-9, -1], [1, 2]], + # Duplicate dims + [[8, 10, -8], [0, 1, 0]] + ], + ), + ) + def test_roll_with_dims(self, compute_unit, backend, shape, shifts_dims): + shifts, dims = shifts_dims + model = ModuleWrapper(torch.roll, kwargs={"shifts": shifts, "dims": dims}) + self.run_compare_torch( + shape, + model, + backend=backend, + compute_unit=compute_unit + ) + +class TestArgmax(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, shape, axis, input_dtype", + itertools.product( + compute_units, + backends, + COMMON_SHAPES, + [-1, 0], + [np.float32, np.int32, np.int64], + ), + ) + def test_argmax( + self, + compute_unit, + backend: Tuple[str, str], + shape: Tuple[int], + axis: int, + input_dtype: np.dtype, + ): + input_data = ( + torch.rand(*shape) + if input_dtype == np.float32 + else torch.randint(10, shape) + ) + converter_input_type = [ + ct.TensorType(shape=input_data.shape, dtype=input_dtype) + ] + model = ModuleWrapper(function=torch.argmax, kwargs={"dim": axis}) + expected_results = model(input_data) + TorchBaseTest.run_compare_torch( + input_data, + model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + +class TestStack(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, rank, num", + itertools.product( + compute_units, + backends, + [1, 3], + [1, 3], + ), + ) + def test_stack(self, compute_unit, backend, rank, num): + input_shape = np.random.randint(low=1, high=6, size=rank) + for dim in [None] + list(range(rank + 1)): + print("dim", dim) + + class StackModel(torch.nn.Module): + def forward(self, *inputs): + if dim is None: + return torch.stack(inputs) + else: + return torch.stack(inputs, dim=dim) + + TorchBaseTest.run_compare_torch( + [input_shape] * num, + StackModel(), + backend=backend, + compute_unit=compute_unit, + ) + + +class TestComplex(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + real_part = x + 1 + imag_part = -x + complex_data = torch.complex(real_part, imag_part) + return torch.stack([complex_data.real, complex_data.imag], dim=1) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), ComplexModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex_real_imag_same_input(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + return torch.complex(x, x).real + + TorchBaseTest.run_compare_torch( + (2, 3, 4), ComplexModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex_input_error(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + return torch.complex(x.real, x.imag) + + input_data = torch.tensor([1 + 0j, 2 + 3j], dtype=torch.complex64) + with pytest.raises( + TypeError, + match="dtype= is unsupported for inputs/outputs of the model", + ): + converter_input_type = [ + ct.TensorType(shape=input_data.shape, dtype=np.complex64) + ] + TorchBaseTest.run_compare_torch( + input_data, + ComplexModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + converter_input_type=converter_input_type, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_complex_output_error(self, compute_unit: ct.ComputeUnit, backend): + class ComplexModel(torch.nn.Module): + def forward(self, x): + return torch.complex(x, x) + + with pytest.raises( + ValueError, match="MIL doesn't support complex data as model's output" + ): + TorchBaseTest.run_compare_torch( + (2, 3, 4), ComplexModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestReal(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_real_real_input(self, compute_unit: ct.ComputeUnit, backend): + class RealModel(torch.nn.Module): + def forward(self, x): + return torch.real(x) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), RealModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_real_complex_input(self, compute_unit: ct.ComputeUnit, backend): + class RealModel(torch.nn.Module): + def forward(self, x): + return torch.real(torch.complex(x, x)) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), RealModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestImag(TorchBaseTest): + # torch.imag only support complex input, so we don't need to test real number input. + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_imag_complex_input(self, compute_unit: ct.ComputeUnit, backend): + class ImagModel(torch.nn.Module): + def forward(self, x): + return torch.imag(torch.complex(x, x)) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), ImagModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestFft(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_directly_use_fft_complex_output_error( + self, compute_unit: ct.ComputeUnit, backend + ): + class FftModel(torch.nn.Module): + def forward(self, x): + return torch.fft.fft(x) + + with pytest.raises( + ValueError, match="MIL doesn't support complex data as model's output" + ): + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, fft_variant", + itertools.product( + compute_units, + backends, + [(1,), (2, 3), (3, 1, 2)], + ["fft", "rfft", "ifft", "irfft"], + ), + ) + def test_fft_basic_no_param( + self, compute_unit: ct.ComputeUnit, backend, input_shape, fft_variant + ): + if input_shape == (1,) and fft_variant == "irfft": + pytest.skip("PyTorch doesn't support length-1 input (1,) for irfft.") + + class FftModel(torch.nn.Module): + def forward(self, x): + if fft_variant == "fft": + return torch.fft.fft(x).real + elif fft_variant == "rfft": + return torch.fft.rfft(x).real + elif fft_variant == "ifft": + x = torch.complex(x, x) + return torch.fft.ifft(x).real + elif fft_variant == "irfft": + x = torch.complex(x, x) + return torch.fft.irfft(x) + else: + raise ValueError(f"Invalid fft_variant {fft_variant}.") + + TorchBaseTest.run_compare_torch( + input_shape, FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, fft_variant, n, dim, norm", + itertools.product( + compute_units, + backends, + ["fft", "rfft", "ifft", "irfft"], + [None, 1, 5], + [0, 1, -1], + [None, "forward", "backward", "ortho"], + ), + ) + def test_fft_basic( + self, compute_unit: ct.ComputeUnit, backend, fft_variant, n, dim, norm + ): + class FftModel(torch.nn.Module): + def forward(self, x): + if fft_variant == "fft": + fft_res = torch.fft.fft(x, n=n, dim=dim, norm=norm) + elif fft_variant == "rfft": + fft_res = torch.fft.rfft(x, n=n, dim=dim, norm=norm) + elif fft_variant == "ifft": + x = torch.complex(x, x) + fft_res = torch.fft.ifft(x, n=n, dim=dim, norm=norm) + elif fft_variant == "irfft": + x = torch.complex(x, x) + return torch.fft.irfft(x, n=n, dim=dim, norm=norm) + else: + raise ValueError(f"Invalid fft_variant {fft_variant}.") + return torch.stack([fft_res.real, fft_res.imag], dim=0) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_fft_nested(self, compute_unit: ct.ComputeUnit, backend): + class FftModel(torch.nn.Module): + def forward(self, x): + fft_1 = torch.fft.fft(x, dim=2, norm="forward") + fft_2 = torch.fft.fft(fft_1, dim=0, norm="backward") + fft_3 = torch.fft.fft(fft_2, dim=1, norm="ortho") + return torch.real(fft_3) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend, fftn_variant, shapes_and_dims, norm", + itertools.product( + compute_units, + backends, + ["fftn", "rfftn", "ifftn", "irfftn"], + [ + (None, None), + (None, [1, 0]), + ([2], None), + ([5], [0]), + ([1, 4], [1, 2]), + ([1, 3, 5], [1, -1, 0]), + ], + [None, "forward", "backward", "ortho"], + ), + ) + def test_fftn( + self, compute_unit: ct.ComputeUnit, backend, fftn_variant, shapes_and_dims, norm + ): + shapes, dims = shapes_and_dims + + class FftnModel(torch.nn.Module): + def forward(self, x): + if fftn_variant == "fftn": + fftn_res = torch.fft.fftn(x, s=shapes, dim=dims, norm=norm) + elif fftn_variant == "rfftn": + fftn_res = torch.fft.rfftn(x, s=shapes, dim=dims, norm=norm) + elif fftn_variant == "ifftn": + x = torch.complex(x, x) + fftn_res = torch.fft.ifftn(x, s=shapes, dim=dims, norm=norm) + elif fftn_variant == "irfftn": + x = torch.complex(x, x) + return torch.fft.irfftn(x, s=shapes, dim=dims, norm=norm) + else: + raise ValueError(f"Invalid fftn_variant {fftn_variant}.") + return torch.stack([torch.real(fftn_res), torch.imag(fftn_res)], dim=0) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftnModel(), backend=backend, compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_dims_specify_by_shapes(self, compute_unit: ct.ComputeUnit, backend): + class FftnModel(torch.nn.Module): + def forward(self, x): + x = torch.complex(x, x) + return torch.fft.irfftn(x, s=x.shape[-3:], dim=(-3, -2, -1)) + + TorchBaseTest.run_compare_torch( + (2, 3, 4), FftnModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestNms(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, box_num, iou_threshold, dynamic_input", + itertools.product( + compute_units, + backends, + [1, 5, 20, 1000], + [0.0, 0.2, 0.8], + [True, False], + ), + ) + def test_nms( + self, + compute_unit, + backend: Tuple[str, str], + box_num: int, + iou_threshold: float, + dynamic_input: bool, + ): + if box_num >= 1000 and backend == ("mlprogram", "fp16"): + pytest.xfail( + "rdar://103891349 ([TensorFlow] [PyTorch] NMS discrepancy in Fp16 when " + "number of boxes is large)" + ) + + class NmsModel(torch.nn.Module): + def forward(self, boxes, scores): + return torchvision.ops.nms(boxes, scores, iou_threshold=iou_threshold) + + input_boxes = torch.randint( + low=0, high=box_num, size=(box_num, 4), dtype=torch.float32 + ) + # When two boxes have IOU exactly equal to iou_threshold (>0.0), it will hit the corner case as shown in + # `test_nms_corner_case`, which has a discrepancy between CoreML and PyTorch. To avoid this situation, we keep + # regenerating the input boxes at most _MAX_REGEN times until there is no corner case in the generated boxes. + _MAX_REGEN = 3 + regen_count = 0 + while regen_count < _MAX_REGEN and iou_threshold > 0.0 and iou_threshold in torchvision.ops.box_iou( + input_boxes, input_boxes): + input_boxes = torch.randint( + low=0, high=box_num, size=(box_num, 4), dtype=torch.float32 + ) + regen_count += 1 + + # When the input score is too close, the returned index order is not guaranteed (same + # behaviour as PyTorch). So instead of generating random scores by torch.rand, use shuffle. + input_scores = np.arange(box_num) + np.random.shuffle(input_scores) + input_scores = torch.tensor(input_scores, dtype=torch.float32) + + if dynamic_input: + converter_input_type = [ + ct.TensorType(shape=(RangeDim(1, -1), 4)), + ct.TensorType(shape=(RangeDim(1, -1),)), + ] + else: + converter_input_type = [ + ct.TensorType(shape=input_boxes.shape), + ct.TensorType(shape=input_scores.shape), + ] + + nms_model = NmsModel() + nms_model.eval() + expected_results = nms_model(input_boxes, input_scores) + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_nms_corner_case_iou_equal_threshold( + self, + compute_unit, + backend: Tuple[str, str], + ): + class NmsModel(torch.nn.Module): + def forward(self, boxes, scores): + return torchvision.ops.nms(boxes, scores, iou_threshold=0.2) + + input_boxes = torch.tensor([[3., 2., 3., 0.], + [0., 0., 2., 2.], + [1., 3., 2., 1.], + [0., 2., 1., 3.], + [1., 1., 2., 3.]], dtype=torch.float32) + input_scores = torch.tensor([3., 2., 0., 1., 4.], dtype=torch.float32) + converter_input_type = [ + ct.TensorType(shape=input_boxes.shape), + ct.TensorType(shape=input_scores.shape), + ] + + nms_model = NmsModel() + nms_model.eval() + expected_results = nms_model(input_boxes, input_scores) + with pytest.raises(AssertionError, match="Items are not equal"): + # TODO: rdar://104966206 ([PyTorch] Re-enable NMS Corner Case Tests After PyTorch Fixes Bugs). + # This is because the IOU between the last box ([1., 1., 2., 3.]) and the second box ([0., 0., 2., 2.]) is + # exactly 0.2 (IOU threshold), which leads to a corner case that PyTorch will remove the second box while + # CoreML keeps it. According to PyTorch's doc, only boxes with `greater than iou_threshold` should be + # removed, so it's a bug in PyTorch's side. + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + # Change the last input box to make IOU slightly larger than 0.2, the output of CoreML will match PyTorch. + input_boxes[-1][-1] = 2.999 + expected_results = nms_model(input_boxes, input_scores) + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + # Change the last input box to make IOU slightly smaller than 0.2, the output of CoreML will match PyTorch. + input_boxes[-1][-1] = 3.0001 + expected_results = nms_model(input_boxes, input_scores) + TorchBaseTest.run_compare_torch( + [input_boxes, input_scores], + nms_model, + expected_results=expected_results, + input_as_shape=False, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + ) + + +class TestTensorSize(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ) + ) + def test_tensor_size(self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]]): + class TestModel(torch.nn.Module): + def forward(self, x): + return x.size() + + self.run_compare_torch( + [(1, 2, 3)], + TestModel(), + backend=backend, + compute_unit=compute_unit + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + [('mlprogram', "fp16")], + ) + ) + def test_tensor_size_with_dim(self, compute_unit: ct.ComputeUnit.CPU_ONLY, + backend: List[Tuple[str]]): + class TestModel(torch.nn.Module): + def forward(self, x): + return x.size(dim=-1) + + model = TestModel() + + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The shape op is folded to const. + assert len(prog.find_ops(op_type="shape")) == 0 + + with patch.object(Var, '_is_nonreplaceable_var') as mocked_is_nonreplaceable_var: + # Mock that shape op is non-replaceable. + mocked_is_nonreplaceable_var.side_effect = ( + lambda var: var.op and "shape" in var.op.op_type + ) + mlmodel = self.run_compare_torch( + [(1, 2, 3)], + model, + backend=backend, + compute_unit=compute_unit + ) + prog = mlmodel[1]._mil_program + # The shape op is not folded to const. + assert len(prog.find_ops(op_type="shape")) == 1 + + +class TestBitwiseAnd(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_bitwise_and( + self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]] + ): + class TestModel(torch.nn.Module): + def forward(self, x, y): + return torch.bitwise_and(x, y) + + input_shape = (2, 3) + input_data_x = torch.rand(*input_shape) > 0.2 + input_data_y = torch.rand(*input_shape) < 0.8 + self.run_compare_torch( + [input_data_x, input_data_y], + TestModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_bitwise_and_unsupport_input( + self, compute_unit: ct.ComputeUnit.CPU_ONLY, backend: List[Tuple[str]] + ): + class TestModel(torch.nn.Module): + def forward(self, x, y): + return torch.bitwise_and(x, y) + + input_shape = (2, 3) + input_data_x = torch.randint( + low=0, high=10, size=input_shape, dtype=torch.int32 + ) + input_data_y = torch.randint( + low=0, high=10, size=input_shape, dtype=torch.int32 + ) + with pytest.raises( + NotImplementedError, + match="The `bitwise_and` op only supports boolean input", + ): + self.run_compare_torch( + [input_data_x, input_data_y], + TestModel(), + backend=backend, + compute_unit=compute_unit, + input_as_shape=False, + ) + + +class TestUnfold(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, kernel_size, padding, stride", + itertools.product( + compute_units, + backends, + [(1, 1, 10, 11), (5, 3, 12, 13)], + [(2, 3)], + [0, 1, 8, (1, 3), (2, 6), (0, 5)], + [1, 2, 7, (2, 3), (5, 4)], + ), + ) + def test_unfold(self, compute_unit, backend, input_shape, kernel_size, padding, stride): + class UnfoldModel(nn.Module): + def forward(self, x): + return torch.nn.functional.unfold( + input=x, kernel_size=kernel_size, padding=padding, stride=stride + ) + + self.run_compare_torch( + input_shape, UnfoldModel(), backend=backend, compute_unit=compute_unit + ) + + +class TestTupleUnpack(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_tuple_unpack(self, compute_unit, backend): + class ReturnTupleModel(nn.Module): + def forward(self, x): + return x * 3, x * 4, x * 5 + + class TestModel(nn.Module): + def __init__(self): + super().__init__() + self.return_tuple_layer = ReturnTupleModel() + + def forward(self, x): + out1, out2, out3 = self.return_tuple_layer(x) + return out1.relu(), out2.sigmoid(), out3.softmax(1) + + self.run_compare_torch((1, 2, 3), TestModel(), backend=backend, compute_unit=compute_unit) + + +class TestTupleIndex(TorchBaseTest): + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends,), + ) + def test_tuple_index(self, compute_unit, backend): + class InnerModel(nn.Module): + def forward(self,x): + return (torch.tensor([0]), torch.tensor([1])) + + class OuterModel(nn.Module): + def __init__(self): + super().__init__() + self.innermodel = torch.jit.trace(InnerModel().eval(), x) + + def forward(self, x): + inner = self.innermodel(x) + return inner[0] + + x = torch.rand(1, 3, 640, 640) + self.run_compare_torch(x, OuterModel(), + input_as_shape=False, use_scripting=True, + backend=backend, compute_unit=compute_unit) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/testing_utils.py new file mode 100644 index 00000000..f50a587d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/test/testing_utils.py @@ -0,0 +1,259 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest +import torch +import torch.nn as nn + +import coremltools as ct +import coremltools.models.utils as coremltoolsutils +from coremltools import RangeDim, TensorType +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.mil.types.type_mapping import \ + nptype_from_builtin +from coremltools.converters.mil.testing_utils import ct_convert + +from ..converter import torch_to_mil_types + + +class ModuleWrapper(nn.Module): + """ + Helper class to transform torch function into torch nn module. + This helps to keep the testing interface same for torch functional api. + """ + def __init__(self, function, kwargs=None): + super(ModuleWrapper, self).__init__() + self.function = function + self.kwargs = kwargs if kwargs else {} + + def forward(self, *args): + return self.function(*args, **self.kwargs) + + +np.random.seed(1984) + + +def _flatten(objects): + flattened_list = [] + for item in objects: + if isinstance(item, (list, tuple)): + flattened_list.extend(_flatten(item)) + else: + flattened_list.append(item) + return flattened_list + + +def _copy_input_data(input_data): + if isinstance(input_data, (list, tuple)): + return [_copy_input_data(x) for x in input_data] + return input_data.clone().detach() + + +def contains_op(torch, op_string): + return hasattr(torch, op_string) + + +def convert_to_coreml_inputs(input_description, inputs): + """ + Convenience function to combine a CoreML model's input description and + set of raw inputs into the format expected by the model's predict function. + """ + flattened_inputs = _flatten(inputs) + coreml_inputs = { + str(x): inp.numpy().astype(np.float32) for x, inp in zip(input_description, flattened_inputs) + } + + for k, v in coreml_inputs.items(): + if isinstance(v, np.ndarray) and v.ndim == 0: + coreml_inputs[k] = np.expand_dims(v, axis=-1) + + return coreml_inputs + + +def convert_to_mlmodel(model_spec, tensor_inputs, backend=("neuralnetwork", "fp32"), + converter_input_type=None, compute_unit=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=None): + def _convert_to_inputtype(inputs): + if isinstance(inputs, list): + return [_convert_to_inputtype(x) for x in inputs] + elif isinstance(inputs, tuple): + return tuple([_convert_to_inputtype(x) for x in inputs]) + elif isinstance(inputs, TensorType): + return inputs + elif isinstance(inputs, torch.Tensor): + return TensorType(shape=inputs.shape, dtype=torch_to_mil_types[inputs.dtype]) + else: + raise ValueError( + "Unable to parse type {} into InputType.".format(type(inputs)) + ) + + if converter_input_type is None: + inputs = list(_convert_to_inputtype(tensor_inputs)) + else: + inputs = converter_input_type + + return ct_convert(model_spec, inputs=inputs, convert_to=backend, + source="pytorch", compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target) + + +def generate_input_data(input_size, rand_range=(0, 1)): + r1, r2 = rand_range + + def random_data(spec): + if isinstance(spec, TensorType): + spec_shape = spec.shape.shape + dtype = nptype_from_builtin(spec.dtype) + else: + spec_shape = spec + dtype = np.float32 + + static_shape = tuple([np.random.randint(dim.lower_bound, dim.upper_bound if dim.upper_bound > 0 else 10) + if isinstance(dim, RangeDim) else dim for dim in spec_shape]) + + data = np.random.rand(*static_shape) if static_shape != () else np.random.rand() + data = (r1 - r2) * data + r2 + return torch.from_numpy(np.array(data).astype(dtype)) + + if isinstance(input_size, list): + return [random_data(size) for size in input_size] + else: + return random_data(input_size) + + +def trace_model(model, input_data): + model.eval() + if isinstance(input_data, list): + input_data = tuple(input_data) + torch_model = torch.jit.trace(model, input_data) + return torch_model + + +def flatten_and_detach_torch_results(torch_results): + if isinstance(torch_results, (list, tuple)): + return [x.detach().numpy() for x in _flatten(torch_results) if x is not None] + # Do not need to flatten + return [torch_results.detach().numpy()] + + +def convert_and_compare( + input_data, + model_spec, + expected_results=None, + atol=1e-4, + rtol=1e-05, + backend=("neuralnetwork", "fp32"), + converter_input_type=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=None +): + """ + If expected results is not set, it will by default + be set to the flattened output of the torch model. + + Inputs: + + - input_data: torch.tensor or list[torch.tensor] + """ + if isinstance(model_spec, str): + torch_model = torch.jit.load(model_spec) + else: + torch_model = model_spec + + if not isinstance(input_data, (list, tuple)): + input_data = [input_data] + + if expected_results is None: + torch_input = _copy_input_data(input_data) + expected_results = torch_model(*torch_input) + expected_results = flatten_and_detach_torch_results(expected_results) + mlmodel = convert_to_mlmodel(model_spec, input_data, backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + minimum_deployment_target=minimum_deployment_target,) + + coreml_inputs = convert_to_coreml_inputs(mlmodel.input_description, input_data) + + if not _IS_MACOS or (mlmodel.is_package and coremltoolsutils._macos_version() < (12, 0)): + return model_spec, mlmodel, coreml_inputs, None + + _, dtype = backend + if mlmodel.compute_unit != ct.ComputeUnit.CPU_ONLY or (dtype == "fp16"): + atol = max(atol * 100.0, 5e-1) + rtol = max(rtol * 100.0, 5e-2) + + if not coremltoolsutils._has_custom_layer(mlmodel._spec): + coreml_preds = mlmodel.predict(coreml_inputs) + coreml_outputs = mlmodel._spec.description.output + coreml_results = [ + coreml_preds[output.name] for output in coreml_outputs + ] + for torch_result, coreml_result in zip(expected_results, + coreml_results): + + if torch_result.shape == (): + torch_result = np.array([torch_result]) + np.testing.assert_equal(coreml_result.shape, torch_result.shape) + np.testing.assert_allclose(coreml_result, torch_result, atol=atol, rtol=rtol) + return model_spec, mlmodel, coreml_inputs, coreml_preds + + +class TorchBaseTest: + testclassname = '' + testmodelname = '' + + @pytest.fixture(autouse=True) + def store_testname_with_args(self, request): + TorchBaseTest.testclassname = type(self).__name__ + TorchBaseTest.testmodelname = request.node.name + + @staticmethod + def run_compare_torch( + input_data, + model, + expected_results=None, + atol=1e-04, + rtol=1e-05, + input_as_shape=True, + backend=("neuralnetwork", "fp32"), + rand_range=(-1.0, 1.0), + use_scripting=False, + converter_input_type=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=None, + ): + """ + Traces a model and runs a numerical test. + Args: + input_as_shape : If true generates random input data with shape. + expected_results : Expected result from running pytorch model. + converter_input_type: If not None, then pass it to the "inputs" argument to the + ct.convert() call. + """ + model.eval() + if input_as_shape: + input_data = generate_input_data(input_data, rand_range) + + if use_scripting: + model_spec = torch.jit.script(model) + else: + model_spec = trace_model(model, _copy_input_data(input_data)) + + model_spec, mlmodel, coreml_inputs, coreml_results = \ + convert_and_compare( + input_data, + model_spec, + expected_results=expected_results, + atol=atol, + rtol=rtol, + backend=backend, + converter_input_type=converter_input_type, + compute_unit=compute_unit, + minimum_deployment_target=minimum_deployment_target, + ) + + return model_spec, mlmodel, coreml_inputs, coreml_results, \ + TorchBaseTest.testclassname, TorchBaseTest.testmodelname diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torch_op_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torch_op_registry.py new file mode 100644 index 00000000..128fdd5a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torch_op_registry.py @@ -0,0 +1,58 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +_TORCH_OPS_REGISTRY = {} + + +def register_torch_op(_func=None, torch_alias=None, override=False): + """ + Registration routine for PyTorch operators + _func: (PyTorch conversion function) [Default=None] + PyTorch conversion function to register + + torch_alias: (List of string) [Default=None] + All other PyTorch operators that should also be mapped to + current conversion routine. + e.g. Sort aliased with SortV1, SortV2 + All provided alias operators must not be registered previously. + + "In place" alias are looked up automatically and do not need to + be registered. PyTorch uses an underscore suffix to denote the + in place version, e.g. "sum_" is the in place version of "sum". + + override: (Boolean) [Default=False] + If True, overrides earlier registration i.e. specified + operator and alias will start pointing to current conversion + function. + Otherwise, duplicate registration will error out. + """ + + def func_wrapper(func): + f_name = func.__name__ + + if f_name.endswith("_"): + raise Exception( + "Attempting to register \"{}\" op. Do not register inplace ops. (inplace torch ops" + " end in a \"_\"). Instead register the normal op version: \"{}\". The inplace" + " version will be supported automatically.".format(f_name, f_name[:-1]) + ) + if not override and f_name in _TORCH_OPS_REGISTRY: + raise ValueError("Torch op {} already registered.".format(f_name)) + + _TORCH_OPS_REGISTRY[f_name] = func + + if torch_alias is not None: + for name in torch_alias: + if not override and name in _TORCH_OPS_REGISTRY: + msg = "Torch op alias {} already registered." + raise ValueError(msg.format(name)) + _TORCH_OPS_REGISTRY[name] = func + + return func + + if _func is None: + # decorator called without argument + return func_wrapper + return func_wrapper(_func) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torchir_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torchir_passes.py new file mode 100644 index 00000000..c3f4298b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/frontend/torch/torchir_passes.py @@ -0,0 +1,322 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from collections import OrderedDict, defaultdict + +from coremltools import _logger as logger + +from .internal_graph import InternalTorchIRGraph, InternalTorchIRNode + + +def generate_tensor_assignment_ops(graph): + """ + This graph pass handles inplace tensor assignements, specifically it handles: + `torch.Tensor.copy_` and `torch.Tensor.fill_`. There are many other inplace tensor + assignments which are currently not handled. + + for instance: + + def forward(self, x): # x a tensor with shape [4,10] + x[:2, 4] = [[1],[3]] + return x + + In Pytorch, this is represented by a sequence of slice / select ops followed by a copy op: + + input -> %x + %1 = slice(%x, dim=0, begin=0, end=2, stride=1) # the slice for dimension 0 + %2 = select(%1, dim=1, index=4) # the select for dimension 1 + %3 = copy_(%2, value=[[1], [3]]) + output -> %x + + This graph pass fuses the sequences into a single InternalTorchIRNode of a new kind, which is defined as `_internal_op_tensor_inplace_copy`. + + input -> %x + %nodes_to_fuse = [slice(%x, begin=0, end=2, stride=1), select(%1, dim=1, index=4)] + %x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=[[1],[3]], nodes_to_fuse=nodes_to_fuse) + output -> x_internal_tensor_assign_1 + + The _internal_tensor_value_assign op takes an additional internal data member nodes_to_fuse, + which is a list of select / slice InternalTorchIRNodes that need to be fused. + Here is a more complicated example: + + def forward(self, x): # x a tensor with shape [4,10] + x[0, 0] = 1 + x[1:2, 1:2] = [[0]] + return x + + Input graph: + input -> %x + %1 = select(%x, dim=0, index=0) + %2 = select(%1, dim=0, index=0) + %3 = copy_(%2, value=1) + %4 = slice(%x, dim=0, begin=1, end=2, stride=1) + %5 = slice(%4, dim=1, begin=1, end=2, stride=1) + %6 = copy_(%5, value=[[0]]) + output -> %x + + Output graph: + input -> %x + %nodes_to_fuse_1 = [select(%x, dim=0, index=0), select(%1, dim=0, index=0)] + %x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=1, nodes_to_fuse=nodes_to_fuse_1) + %nodes_to_fuse_2 = [slice(%x, dim=0, begin=1, end=2, stride=1), slice(%4, dim=1, begin=1, end=2, stride=1)] + %x_internal_tensor_assign_2 = _internal_op_tensor_inplace_copy(%x_internal_tensor_assign_1, value=[[0]], nodes_to_fuse=nodes_to_fuse_2) + output -> x_internal_tensor_assign_2 + + torch.Tensor.fill_ works in a similar way, except the InternalTorchIRNodes is defined by `_internal_op_tensor_inplace_fill`. + + A fill_ operator is generated from the following forward pass: + + def forward(self, x): # x a tensor with shape [5, 4] + x[2] = 9 + return x + """ + + TENSOR_ASSIGMENT_PREFIX = "_internal_tensor_assign_" + + def _get_updated_name(name, updated_tensor_count): + if name in updated_tensor_count: + return name + TENSOR_ASSIGMENT_PREFIX + str(updated_tensor_count[name]) + return name + + def _construct_nodes_to_fuse_inputs(nodes_to_fuse): + inputs = [] + for node in nodes_to_fuse: + if node.kind == "select": + inputs += [node.inputs[2], None, None] + if node.kind == "slice": + inputs += [node.inputs[2], node.inputs[3], node.inputs[4]] + return inputs + + tensor_to_node_sequence_mapping = {} + updated_tensor_count = defaultdict(lambda: 0) + + for i in range(len(graph.nodes)): + node = graph.nodes[i] + + for idx in range(len(node.inputs)): + input_name = node.inputs[idx] + node.inputs[idx] = _get_updated_name(input_name, updated_tensor_count) + + if node.kind in ("empty", "select", "slice"): + node_input = node.inputs[0] + node_output = node.outputs[0] + node_sequence = tensor_to_node_sequence_mapping.get(node_input, []) + if len(node_sequence) > 0: + tensor_to_node_sequence_mapping.pop(node_input) + node_sequence.append(node) + tensor_to_node_sequence_mapping[node_output] = node_sequence + + if node.kind in ("copy_", "fill_"): + node_input = node.inputs[0] + if node_input not in tensor_to_node_sequence_mapping: + raise ValueError("No matching select or slice.") + + if node.kind == "copy_": + kind = "_internal_op_tensor_inplace_copy" + else: + kind = "_internal_op_tensor_inplace_fill" + + nodes_to_fuse = tensor_to_node_sequence_mapping[node_input] + source_tensor = nodes_to_fuse[0].inputs[0] + origin_name = source_tensor.split(TENSOR_ASSIGMENT_PREFIX)[0] + + updated_tensor_count[origin_name] += 1 + + outputs = [_get_updated_name(origin_name, updated_tensor_count)] + + update_value = node.inputs[1] + nodes_to_fuse_inputs = _construct_nodes_to_fuse_inputs(nodes_to_fuse) + tensor_assign_node = InternalTorchIRNode( + node=None, + inputs=[source_tensor, update_value] + nodes_to_fuse_inputs, + outputs=outputs, + kind=kind, + blocks=[], + ) + graph.nodes[i] = tensor_assign_node + + # modify the graph outputs if it is effected by this graph pass + for idx in range(len(graph.outputs)): + output = graph.outputs[idx] + if output in updated_tensor_count: + graph.outputs[idx] = _get_updated_name(output, updated_tensor_count) + + +def remove_getattr_nodes(graph): + """ + Remove the getattr nodes in the graph + """ + + getattr_nodes = [] + new_nodes = [] + + for node in graph.nodes: + + for block in node.blocks: + remove_getattr_nodes(block) + + if node.kind == "getattr": + getattr_nodes.append(node) + else: + new_nodes.append(node) + + # check the getattr nodes not in the outputs + for node in getattr_nodes: + if node.name in graph.outputs: + raise RuntimeError("{} should not be in the graph outputs.".format(node.name)) + + # remove the getattr nodes + graph.nodes = new_nodes + + +def transform_inplace_ops(graph, name_remap_dict=None): + + # As we modify ops, we'll need to remap symbols. + if name_remap_dict is None: + name_remap_dict = {} + + for node in graph.nodes: + for k, v in name_remap_dict.items(): + node.replace_name(k, v) + + if node.kind == "append": + if isinstance(node.parent, InternalTorchIRGraph): + # If append appears in a graph (outer block), replace + # subsequent uses of its input symbol with its output symbol. + name_remap_dict[node.inputs[0]] = node.outputs[0] + elif node.parent.parent.kind == "loop": + # If append appears in a loop block, add its inputs to the block + # inputs and loop inputs, and its outputs to the block outputs + # and loop outputs. + + # This is the global input to append. We need to add it to the + # loop's input list, and replace any uses after the node with + # @global_output below. + global_input = node.inputs[0] + # This will be the name of the input to append within the + # block. We need to add it to the block inputs. + local_input = node.parent.parent.name + ".0" + # This is the output of append. We need to add it to the list + # of block outputs. + local_output = node.outputs[0] + # This is the name of the new output from the loop. It should + # replace any uses of @global_input after the loop op. + global_output = local_output + ".out" + name_remap_dict[global_input] = global_output + + node.parent.parent.inputs.append(global_input) + node.parent.inputs.append(local_input) + node.replace_name(global_input, local_input) + node.parent.outputs.append(local_output) + node.parent.parent.outputs.append(global_output) + node.parent.parent.name = node.parent.parent.outputs[0] + elif node.parent.parent.kind == "if": + # If append appears in an if/else block, add its outputs to the + # block outputs and loop outputs. + # Note that we can't assume the append appears in both blocks. + raise NotImplementedError( + "inplace_ops pass doesn't yet support append op inside conditional" + ) + + for block in node.blocks: + transform_inplace_ops(block, name_remap_dict) + + # Replace names in graph outputs + for k, v in name_remap_dict.items(): + try: + idx = graph.outputs.index(k) + except ValueError: + pass + else: + graph.outputs[idx] = v + + +def flatten_graph_input_values(graph): + """ CoreML can't handle nested iterables of tensors, so we flatten the + inputs of any graph that expects them. + """ + new_graph_inputs = graph.inputs + all_new_nodes = [] + changed = True + notified = False + + while changed: + old_graph_inputs = new_graph_inputs + new_graph_inputs = OrderedDict() + new_nodes = [] + changed = False + for _input_name, _input_val in old_graph_inputs.items(): + if isinstance(_input_val, (tuple, list)): + changed = True + if not notified: + notified = True + logger.warning( + "Tuple detected at graph input. This will be flattened in the converted model." + ) + # If this input to the graph is a tuple, we want to replace it + # with a flattened version and add an op to construct the tuple. + node_inputs = [] + for idx, item in enumerate(_input_val): + name = _input_name + "_{}".format(idx) + new_graph_inputs[name] = item + node_inputs.append(name) + new_nodes.append( + InternalTorchIRNode( + inputs=node_inputs, + outputs=[_input_name], + kind="tupleconstruct", + ) + ) + else: + # This input isn't a tuple, keep it as is. + new_graph_inputs[_input_name] = _input_val + all_new_nodes = new_nodes + all_new_nodes + graph.inputs = new_graph_inputs + graph.nodes = all_new_nodes + graph.nodes + + +def flatten_graph_output_values(graph): + """ + CoreML can't handle nested iterables of tensors, so we flatten the + outputs of any graph that produces them. + """ + node_names = [node.name for node in graph.nodes] + new_graph_outputs = graph.outputs + changed = True + notified = False + + while changed: + old_graph_outputs = new_graph_outputs + new_graph_outputs = [] + changed = False + for outp in old_graph_outputs: + # Find the node that generates this output var. + # It is possible to not find the output var in the list of node + # names since nodes are named after their first output. In that + # case, it means the output var comes from a node that returns + # multiple outputs, which means that node cannot be a construct op. + try: + node_idx = node_names.index(outp) + except: + # @outp doesn't come from a construct op + new_graph_outputs.append(outp) + continue + if graph.nodes[node_idx].kind in [ + "tupleconstruct", + "listconstruct", + ]: + # Since this output came from a construct op, we can replace it + # with the inputs to the op. + new_graph_outputs.extend(graph.nodes[node_idx].inputs) + changed = True + if not notified: + notified = True + logger.warning( + "Tuple detected at graph output. This will be flattened in the converted model." + ) + else: + new_graph_outputs.append(outp) + # Note: if we flattened outputs, there are likely to be construct ops + # that are no longer needed. These will be removed in a later DCE pass. + graph.outputs = new_graph_outputs diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/input_types.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/input_types.py new file mode 100644 index 00000000..0a06d4d7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/input_types.py @@ -0,0 +1,492 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from enum import Enum + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.mil.types.type_mapping import ( + is_builtin, numpy_type_to_builtin_type) + + + +class ColorLayout(Enum): + RGB = "RGB" + BGR = "BGR" + GRAYSCALE = "G" + GRAYSCALE_FLOAT16 = "G_FLOAT16" + + +class ClassifierConfig: + def __init__( + self, + class_labels, + predicted_feature_name="classLabel", + predicted_probabilities_output=None, + ): + """ + Configuration for classifier models. + + Parameters + ---------- + class_labels: str / list of int / list of str + If a ``list`` is provided, the ``list`` maps the index of the output of a + neural network to labels in a classifier. + + If a ``str`` is provided, the ``str`` points to a file which maps the index + to labels in a classifier. + + predicted_feature_name: str + Name of the output feature for the class labels exposed in the + Core ML neural network classifier. Default: ``'classLabel'``. + + predicted_probabilities_output: str + If provided, then this is the name of the neural network blob which + generates the probabilities for each class label (typically the output + of a softmax layer). + + If not provided, then the last output layer is assumed. + """ + self.class_labels = class_labels + self.predicted_feature_name = predicted_feature_name + self.predicted_probabilities_output = predicted_probabilities_output + + +class InputType: + def __init__(self, name=None, shape=None, dtype=None): + """ + The input type for inputs fed into the model. + + Parameters + ---------- + name: (str) + The name of the input. + + shape: list, tuple, Shape object, EnumeratedShapes object, or None + The shape(s) that are valid for this input. + + If set to ``None``, the shape will be infered from the model itself. + """ + + self.name = name + if shape is not None: + self.shape = _get_shaping_class(shape) + else: + self.shape = None + self.dtype = dtype + + +class ImageType(InputType): + def __init__( + self, + name=None, + shape=None, + scale=1.0, + bias=None, + color_layout=ColorLayout.RGB, + channel_first=None, + ): + """ + Configuration class used for image inputs in Core ML. + + Parameters + ---------- + scale: float or list of floats + The scaling factor for all values in the image channels. + + bias: float or list of floats + * If ``color_layout`` is ``ct.colorlayout.GRAYSCALE`` or + ``ct.colorlayout.GRAYSCALE_FLOAT16``, bias would be a ``float``. + * If ``color_layout`` is ``ct.colorlayout.RGB`` or ``ct.colorlayout.BGR``, + bias would be a list of ``float``. + + color_layout: string or enumeration of type ``ct.colorlayout`` + Color layout of the image. Valid values are as follows: + + Enumeration (recommended): + * ``ct.colorlayout.RGB`` + * ``ct.colorlayout.BGR`` + * ``ct.colorlayout.GRAYSCALE`` + * ``ct.colorlayout.GRAYSCALE_FLOAT16`` + + String values (older way to specify): + * ``'G'``: Grayscale (maps to ``ct.colorlayout.GRAYSCALE``) + * ``'RGB'``: [Red, Green, Blue] (maps to ``ct.colorlayout.BGR``) + * ``'BGR'``: [Blue, Green, Red] (maps to ``ct.colorlayout.RGB``) + + channel_first: (bool) or None + Set to ``True`` if input format is channel first. + + Default format: + * For TensorFlow: channel last (``channel_first=False``). + * For PyTorch: channel first (``channel_first=True``). + """ + super(ImageType, self).__init__(name, shape) + self.scale = scale + msg = "color_layout should be an enum of type ct.colorlayout, i.e. one of: " \ + "{ct.colorlayout.RGB, ct.colorlayout.BGR, " \ + "ct.colorlayout.GRAYSCALE, ct.colorlayout.GRAYSCALE_FLOAT16}" + if not (isinstance(color_layout, str) or isinstance(color_layout, ColorLayout)): + raise ValueError(msg) + if isinstance(color_layout, str): + if color_layout not in ("G", "RGB", "BGR"): + raise ValueError(msg) + color_layout = ColorLayout(color_layout) + + self.color_layout = color_layout + if color_layout == ColorLayout.GRAYSCALE_FLOAT16: + self.dtype = types.fp16 + if bias is None: + if color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16): + self.bias = 0.0 + else: + self.bias = [0.0, 0.0, 0.0] + else: + self.bias = bias + self.channel_first = channel_first + + def __repr__(self): + return self.__str__() + + def __str__(self): + str_repr = 'ImageType[name={}, shape={}, scale={}, bias={}, ' +\ + 'color_layout={}, channel_first={}]' + return str_repr.format(self.name, self.shape, self.scale, self.bias, + self.color_layout, self.channel_first) + + +class TensorType(InputType): + def __init__(self, name=None, shape=None, dtype=None, default_value=None): + """ + Specify a (dense) tensor input. + + Parameters + ---------- + name: str + Input name. Must match an input name in the model (usually the + Placeholder name for TensorFlow or the input name for PyTorch). + + The ``name`` is required except for a TensorFlow model in which there is + exactly one input Placeholder. + + shape: (1) list of positive int or RangeDim, or (2) EnumeratedShapes + The shape of the input. + + For TensorFlow: + * The ``shape`` is optional. If omitted, the shape is inferred from + TensorFlow graph's Placeholder shape. + + For PyTorch: + * The ``shape`` is required. + + dtype: np.generic or mil.type type + For example, ``np.int32`` or ``coremltools.converters.mil.mil.types.fp32`` + + default_value: np.ndarray + If provided, the input is considered optional. At runtime, if the + input is not provided, ``default_value`` is used. + + Limitations: + * If ``default_value`` is ``np.ndarray``, all + elements are required to have the same value. + + * The ``default_value`` may not be specified if ``shape`` is + ``EnumeratedShapes``. + + Examples + -------- + * ``ct.TensorType(name="input", shape=(1, 2, 3))` implies `dtype == + np.float32`` + + * ``ct.TensorType(name="input", shape=(1, 2, 3), dtype=np.int32)`` + + * ``ct.TensorType(name="input", shape=(1, 2, 3), + dtype=ct.converters.mil.types.fp32)`` + """ + super(TensorType, self).__init__(name, shape) + if dtype is not None: + if is_builtin(dtype): + self.dtype = dtype + if dtype not in (types.fp16, types.fp32, types.fp64, types.int32, types.int64, types.bool): + raise TypeError("dtype={} is unsupported for inputs/outputs of the model".format(dtype)) + else: + # Assume dtype is numpy type + try: + self.dtype = numpy_type_to_builtin_type(dtype) + except TypeError: + raise TypeError("dtype={} is unsupported".format(dtype)) + if dtype not in (np.float16, np.float32, np.float64, float, + np.int32, np.int64, int, + bool, np.bool_): + raise TypeError("dtype={} is unsupported for inputs/outputs of the model".format(dtype)) + + if default_value is not None: + if isinstance(shape, EnumeratedShapes): + msg = 'TensorType input {} has EnumeratedShapes and ' +\ + 'may not be optional' + raise ValueError(msg.format(name)) + if not isinstance(default_value, np.ndarray): + msg = 'TensorType {} default_value is not np.ndarray' + raise ValueError(msg.format(name)) + default_fill_val = default_value.flatten()[0] + if not np.all(default_value == default_fill_val): + msg = 'TensorType {} default_value can only have ' +\ + 'same entries' + raise ValueError(msg.format(name)) + if not self.shape.has_symbolic and \ + list(default_value.shape) != list(self.shape.symbolic_shape): + msg = 'TensorType {} default_value shape {} != ' +\ + 'TensorType.shape {}' + raise ValueError(msg.format(name, default_value.shape, + self.shape.to_list())) + if self.dtype is not None and \ + numpy_type_to_builtin_type(default_value.dtype) != self.dtype: + msg = 'TensorType {} default_value dtype {} != ' +\ + 'TensorType.dtype {}' + raise ValueError(msg.format(name, default_value.dtype, + self.dtype.__type_info__())) + else: + self.dtype = numpy_type_to_builtin_type(default_value.dtype) + + self.default_value = default_value + + def __repr__(self): + return self.__str__() + + def __str__(self): + return 'TensorType[name={}, shape={}, dtype={}]'.format(self.name, + self.shape, + self.dtype) + + +class RangeDim: + def __init__(self, lower_bound=1, upper_bound=-1, default=None, + symbol=None): + """ + A class for providing a range of accepted shapes. + + Parameters + ---------- + lower_bound: (int) + The minimum valid value for the shape. + + upper_bound: (int) + The maximum valid value for the shape. + + Set to ``-1`` if there is no upper limit. + + default: (int) or None + The default value that is used for initiating the model, and set in the input shape field of the model file. + + If set to ``None``, ``lower_bound`` would be used as default. + + symbol: (str) + Optional symbol name for the dim. Autogenerate a symbol name if + not specified. + """ + if symbol is None: + from coremltools.converters.mil.mil import get_new_symbol + self.symbol = get_new_symbol() + else: + from coremltools.converters.mil.mil import Symbol + self.symbol = Symbol(symbol) + self.lower_bound = lower_bound + self.upper_bound = upper_bound + if default is None: + self.default = lower_bound + else: + if default < lower_bound: + raise ValueError( + "Default value {} is less than minimum value ({}) for range".format( + default, lower_bound + ) + ) + if upper_bound > 0 and default > upper_bound: + raise ValueError( + "Default value {} is greater than maximum value ({}) for range".format( + default, upper_bound + ) + ) + self.default = default + + def __repr__(self): + return self.__str__() + + def __str__(self): + return 'RangeDim(lower_bound={}, upper_bound={}, default={}, symbol="{}")'.format( + self.lower_bound, self.upper_bound, self.default, self.symbol) + + +class Shape: + def __init__(self, shape, default=None): + """ + The basic shape class to be set in InputType. + + Parameters + ---------- + shape: list of (int), symbolic values, RangeDim object + The valid shape of the input. + + default: tuple of int or None + The default shape that is used for initiating the model, and set in + the metadata of the model file. + + If None, then ``shape`` is used. + """ + from coremltools.converters.mil.mil import get_new_symbol + + if not isinstance(shape, (list, tuple)): + msg = "Shape should be list or tuple, got type {} instead" + raise ValueError(msg.format(type(shape))) + self.symbolic_shape = [] + shape = list(shape) + for idx, s in enumerate(shape): + if s is None or s == -1: + msg = 'Dimension cannot be None or -1. Use ' +\ + 'ct.RangeDim for runtime determined dimension. ' +\ + 'Dim {}: {} ' +\ + 'See https://coremltools.readme.io/docs/flexible-inputs' + raise ValueError(msg.format(idx, s)) + if isinstance(s, RangeDim): + sym = s.symbol + self.symbolic_shape.append(sym) + elif isinstance(s, (np.generic, int)) or is_symbolic(s): + self.symbolic_shape.append(s) + else: + raise ValueError( + "Unknown type {} to build symbolic shape.".format(type(s)) + ) + + self.shape = tuple(shape) + if default is not None: + if not isinstance(default, (list, tuple)): + raise ValueError( + "Default shape should be list or tuple, got type {} instead".format( + type(default) + ) + ) + for idx, s in enumerate(default): + if not isinstance( + s, (np.generic, int) + ) and not is_symbolic(s): + raise ValueError( + "Default shape invalid, got error at index {} which is {}".format( + idx, s + ) + ) + else: + default = [] + for idx, s in enumerate(self.shape): + if isinstance(s, RangeDim): + default.append(s.default) + elif s is None or s == -1: + default.append(self.symbolic_shape[idx]) + else: + default.append(s) + self.default = tuple(default) + + @property + def has_symbolic(self): + return any(is_symbolic(s) for s in self.symbolic_shape) + + def to_list(self, allow_symbolic=False): + if not allow_symbolic and self.has_symbolic: + return None + return self.symbolic_shape + + +class EnumeratedShapes: + def __init__(self, shapes, default=None): + """ + A shape class for setting multiple valid shapes in InputType. + + Parameters + ---------- + shapes: list of Shape objects, or Shape-compatible lists. + The valid shapes of the inputs. + + If input provided is not a Shape object, but can be converted to a Shape, + the Shape object would be stored in ``shapes`` instead. + + default: tuple of int or None + The default shape that is used for initiating the model, and set in + the metadata of the model file. + + If None, then the first element in ``shapes`` is used. + """ + from coremltools.converters.mil.mil import get_new_symbol + + if not isinstance(shapes, (list, tuple)): + raise ValueError( + "EnumeratedShapes should be list or tuple of shape, got type {} instead".format( + type(shapes) + ) + ) + if len(shapes) < 2: + raise ValueError( + "EnumeratedShapes should be take a list or tuple with len >= 2, got {} instead".format( + len(shapes) + ) + ) + + self.shapes = [] + for idx, s in enumerate(shapes): + if isinstance(s, Shape): + self.shapes.append(s) + else: + self.shapes.append(Shape(s)) + + self.symbolic_shape = self.shapes[0].symbolic_shape + for shape in self.shapes: + for idx, s in enumerate(shape.symbolic_shape): + if is_symbolic(self.symbolic_shape[idx]): + continue + elif is_symbolic(s): + self.symbolic_shape[idx] = s + elif s != self.symbolic_shape[idx]: + self.symbolic_shape[idx] = get_new_symbol() + + if default is not None: + if not isinstance(default, (list, tuple)): + raise ValueError( + "Default shape should be list or tuple, got type {} instead".format( + type(default) + ) + ) + for idx, s in enumerate(default): + if not isinstance( + s, (np.generic, int) + ) and not is_symbolic(s): + raise ValueError( + "Default shape invalid, got error at index {} which is {}".format( + idx, s + ) + ) + else: + default = self.shapes[0].default + self.default = default + + +def _get_shaping_class(shape): + """ + Returns a Shape class or EnumeratedShapes class for `shape` + where `shape` could be lists/tuple/Shape/EnumeratedShapes/etc. + """ + if isinstance(shape, (Shape, EnumeratedShapes)): + return shape + + try: + enum_shape = EnumeratedShapes(shape) + return enum_shape + except ValueError: + pass + try: + shape = Shape(shape) + return shape + except ValueError: + pass + raise ValueError("Can't convert to CoreML shaping class from {}.".format(shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/__init__.py new file mode 100644 index 00000000..15f4c03b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +SPACES = " " + +from .block import Block, Function, curr_block +from .builder import Builder +from .input_type import (SUPPORT_FLOAT_TYPES, SUPPORT_INT_TYPES, DefaultInputs, + InputSpec, InternalVar, ListInputType, + PyFunctionInputType, TensorInputType, TupleInputType) +from .operation import Operation, mil_list, precondition +from .program import (InputType, Placeholder, Program, Symbol, + get_existing_symbol, get_new_symbol, + get_new_variadic_symbol) +from .var import ListVar, Var +from .ops.defs._op_reqs import register_op + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/block.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/block.py new file mode 100644 index 00000000..b9759959 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/block.py @@ -0,0 +1,894 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +from collections import Counter, OrderedDict + +from coremltools import _OPSET, _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as _target + +from . import SPACES, types +from .types.symbolic import is_symbolic, k_used_symbols +from .var import ComplexVar, InternalVar, Var +from .visitors.dot_visitor import DotVisitor + +# BLOCK_STACK[-1] is the current block +BLOCK_STACK = [] +DEBUG = False + +def curr_block(): + if len(BLOCK_STACK) == 0: + raise ValueError("Must call Builder inside an Function" + " or Block") + return BLOCK_STACK[-1] + +def curr_opset_version(): + block = curr_block() + while not isinstance(block, Function): + block = block.outer_op.enclosing_block + return block.opset_version + +def is_current_opset_version_compatible_with(opset_version): + if curr_opset_version() is None: + return opset_version <= _target.iOS13 + return curr_opset_version() >= opset_version + + +class InvalidBlockStateError(Exception): + pass + + +class Block: + __slots__ = [ + "name", + "_block_inputs", + "_outputs", + "operations", + "_internal_vars", + "outer_op", + ] + + counter = 0 + + @classmethod + def _get_new_name(cls): + curr_val = cls.counter + cls.counter += 1 + return "block" + str(curr_val) + + def __init__(self, block_inputs=None, outer_op=None, name=None): + """ + Inputs: + + block_inputs: python tuple[Var]. + + block_inputs is None except when the block represents loop. By + convention block_inputs should have name ending in '.x', and the + Variable are not produced by any op (block_inputs[i]._op is None). + + Ex: + + # main(%a: (1, 2, fp32), + # %b: (1, 2, fp32), + # %c: (1, 2, fp32)) { + # block0() { + # %const1: (1, fp32) = const(...) + # %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + # while_loop(loop_vars=(%a, %b)) + # loop_cond(%a.x, %b.x) { + # %blah: (bool) = some_op(x=%a.x, y=%b.x) + # %cond_var: (bool) = some_op2(x=%a.x, y=%blah) + # } -> (%cond_var) + # loop_body(%a.x, %b.x) { + # %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x) + # } -> (%add_0, %b.x) + # %linear: (1, fp32) = linear(...) + # } -> (%loop:0, %loop:1) + # } + + %a.x, %b.x are block_inputs. + + `some_op` in `loop_cond` block can access %a, %b, %a.x, %b.x. + `some_op`, however, cannot take %linear as input. + + outer_op: Operation + The enclosing op. None iff this Block is an Function. + + function_inputs: tuple[Var] + function_inputs are always visible for this block and all blocks + nested within. If function_inputs is None, get it from + `outer_op.block` + """ + self.name = name + if self.name is None: + self.name = Block._get_new_name() + + # list[Operation]. Topologically sorted. + self.operations = [] + + # Must be set before self.validate() + self.outer_op = outer_op + + self._block_inputs = block_inputs + if self._block_inputs is None: + self._block_inputs = tuple() + + # list[Var]. This is converted to str when generating MIL proto. + self._outputs = [] + + # If we create const, whose inputs (mode, val) cannot be const + # (infinite recursion). They must be considered as always visible. + self._internal_vars = set() + + if self.outer_op is None and not isinstance(self, Function): + msg = "Block {} is not Function and thus outer_op cannot be None" + raise ValueError(msg.format(self.name)) + + self.validate() + + def validate(self): + """ + Basic validation to protect against some invalid state. + """ + if not DEBUG: + return + + for op in self.operations: + for b in op.blocks: + b.validate() + if op.outputs is None: + raise InvalidBlockStateError() + + # Check the input output relationships + # from outputs -> inputs + for ov in op.outputs: + child_op_count = Counter(ov.child_ops) + for next_op, c in child_op_count.items(): + c_actual = next_op.get_flattened_inputs().count(ov) + if c_actual != c: + msg = ( + "Var {} should be consumed by op {} {}" + + " times, but op {} uses it {} times.\n{}" + ) + raise InvalidBlockStateError( + msg.format( + ov.name, + next_op.name, + c, + next_op.name, + c_actual, + next_op, + ) + ) + + # from inputs -> outputs + input_var_count = Counter(op.get_flattened_inputs()) + for iv, c in input_var_count.items(): + c_actual = iv.child_ops.count(op) + if c_actual != c: + msg = ( + "Var {} should be consumed by op {} {}" + + " times, but op {} uses it {} times.\n{}" + ) + raise InvalidBlockStateError( + msg.format(iv.name, op.name, c_actual, op.name, c, op) + ) + + # 1 to 1 mapping between Block outputs and Var.consuming_blocks + for op in self.operations: + for ov in op.outputs: + for b in ov.consuming_blocks: + if ov not in b.outputs: + msg = "Var {} should be output of block {}: {}" + raise ValueError(msg.format(ov.name, b.name, b)) + + for v in self.outputs: + if self not in v.consuming_blocks: + msg = "Var {} should be output of block {}: {}" + raise ValueError(msg.format(ov.name, b.name, b)) + + def remove_inputs(self, curr_input_vars): + """ + curr_input_vars: list[Var], whose elements must be in + self._block_inputs. + """ + self.validate() + remove_idx = [self._block_inputs.index(v) for v in curr_input_vars] + self._block_inputs = [ + v for i, v in enumerate(self._block_inputs) if i not in remove_idx + ] + + def find_ops(self, prefix=None, op_type=None): + """ + Return list of ops with name matching `prefix` if specified and + op_type, if specified. At least one of {prefix, op_type} must be specified. + + prefix: str + + Return list[Operation]. Empty list if no op satisfies. + """ + if prefix is None and op_type is None: + raise ValueError("Must specify one of {prefix, op_type}") + found_ops = [] + for op in self.operations: + prefix_match = prefix is None or op.name[: len(prefix)] == prefix + op_type_match = op_type is None or op.op_type == op_type + if prefix_match and op_type_match: + found_ops.append(op) + for b in op.blocks: + found_ops.extend(b.find_ops(prefix=prefix, op_type=op_type)) + return found_ops + + def add_internal_var(self, internal_var): + if not isinstance(internal_var, InternalVar): + raise ValueError("Only InternalVar can be manually added to Block.") + self._internal_vars.add(internal_var) + + @property + def inputs(self): + return self._block_inputs + + @property + def outputs(self): + return self._outputs + + def is_var_visible_in_block(self, var, upto_op_with_id=None): + """ + Checks if a var is visible to ops starting from id=`upto_op_with_id` inside the block. + + Var is visible if + - It is the output of a const op, or + - It is the output of "preceding" operations in that block, or + - It is visible in the enclosing block, or + - It is either a block or a function input + + If upto_op_with_id is None, outputs of all operations inside the block are visible to + that block. + """ + + if var in self._internal_vars: + return True + + inputs = self.function_inputs if isinstance(self, Function) else self.inputs + if var in inputs: + return True + + idx = len(self.operations) if upto_op_with_id is None else upto_op_with_id + + for i in range(idx-1, -1, -1): + op_outputs = self.operations[i].outputs + if op_outputs is not None and var in op_outputs: + return True + + if self.outer_op is not None: + enclosing_block = self.outer_op.enclosing_block + outer_op_id = enclosing_block.find_op_id_in_block(self.outer_op) + if enclosing_block.is_var_visible_in_block(var, upto_op_with_id=outer_op_id): + return True + + return False + + def find_op_id_in_block(self, target_op): + try: + idx = self.operations.index(target_op) + except ValueError: + raise ValueError("Op {} not found in {}: {}".format(target_op.name, self.name, self)) + return idx + + def set_outputs(self, outputs): + """ + outputs: list[Var] + """ + if not isinstance(outputs, list): + raise ValueError("Outputs must be list of Vars") + + self.validate() + for ov in outputs: + if not self.is_var_visible_in_block(ov): + msg = ( + "Var {} is not visible in block {} and thus cannot " + + "be a block output.\n{}" + ) + raise ValueError(msg.format(ov.name, self.name, self)) + + # For duplicate vars in self._outputs, only remove block once. + for ov in set(self._outputs): + ov.consuming_blocks.remove(self) + + # Need to copy, or block's output would be completely tied to a var's + # output and we cannot replace a block output with another var's + # output. + self._outputs = copy.copy(outputs) + # For duplicate vars in outputs, only add consuming_blocks once. + for ov in set(outputs): + ov.consuming_blocks.append(self) + + def __enter__(self): + global BLOCK_STACK + BLOCK_STACK.append(self) + return self + + def __exit__(self, type, value, traceback): + self._propagate_nonreplaceable_vars() + global BLOCK_STACK + BLOCK_STACK = BLOCK_STACK[:-1] + + def _insert_op_before(self, new_op, before_op=None): + """ + A private API used by builder. Please use `builder.YOUR_OP(...,before_op)`. + + new_op's outputs are not used (not input to any other op) after + this call. All inputs to new_op must be visible at or before + the before_op (i.e., new_op must be added in topologically sorted + order). Note that this is more restrictive than MIL, whose Block + supports lexical scoping and thus an op can reference Var in enclosing + scopes. new_op.name must be unique in the block. + + before_op=None to append new_op at the end of self.operations. + + Given: %2 = op0(%1, %1) + %4 = op2(%1) + %6 = op3(%4, %4) + + Execute: insert_op_before(op1, before_op=op2), + where %3 = op1(%1, %2) + + Result: %2 = op0(%1, %1) + %3 = op1(%1, %2) + %4 = op2(%1) + %6 = op3(%4, %4) + + Comment: We assume op1 has been constructed outside the block with + %1, %2 as inputs. Typically it's builder's job to create an op and + insert into the current block. + + Comment: insert_op_before(op1, before_op=op0) would error as %2 (an input to op1) + is not visible before op0. + """ + self.validate() + + idx = len(self.operations) if before_op is None else self.find_op_id_in_block(before_op) + + # check inputs are visible + for k, v in new_op.inputs.items(): + if not isinstance(v, (Var, tuple)): + continue + vs = [v] if isinstance(v, Var) else v + for s in vs: + if not self.is_var_visible_in_block(s, upto_op_with_id=idx): + before_op_name = before_op.name if before_op is not None else "None" + msg = "Op '{}' input {}={} is not in scope of {} before {}" + raise ValueError( + msg.format(new_op.name, k, s.name, self.name, before_op_name) + ) + + # add new_op + if before_op is None: + self.operations.append(new_op) + else: + self.operations.insert(idx, new_op) + + def _replace_var( + self, + old_var, + new_var, + start=0, + end_id=-1, + no_check_var_types=False, + ): + """ + Helper function for replace_uses_of_var_after_op + """ + num_ops_affected = 0 + + if end_id == -1: + op_list = self.operations[start:] + else: + op_list = self.operations[start : end_id + 1] + + for op in op_list: + new_inputs = {} + affected = False + for k, v in op.inputs.items(): + if isinstance(v, (list, tuple)) and old_var in v: + new_inputs[k] = tuple(new_var if vv == old_var else vv for vv in v) + affected = True + elif v == old_var: + new_inputs[k] = new_var + affected = True + else: + new_inputs[k] = v + if affected: + num_ops_affected += 1 + op.set_inputs(no_check_var_types=no_check_var_types, + **new_inputs) + + # Replace recursively. + for b in op.blocks: + num_ops_affected += b._replace_var(old_var, new_var) + + if end_id != -1 and old_var.op not in op_list: + return num_ops_affected + + if old_var in self._block_inputs: + idx = self._block_inputs.index(old_var) + self._block_inputs = list(self._block_inputs) + self._block_inputs[idx] = new_var + self._block_inputs = tuple(self._block_inputs) + + # If old_var is block's output, replace as well. + self.replace_block_output_var(old_var, new_var) + + return num_ops_affected + + def replace_block_output_var( + self, + old_var, + new_var, + ): + """ + If old_var is in the list of block's outputs, + replace old_var with the new_var. + """ + found_old_var_in_output = False + # There could be multiple matched `old_var` in output when the program has duplicate vars + # in the output. + for idx, output_var in enumerate(self._outputs): + if old_var == output_var: + found_old_var_in_output = True + self._outputs[idx] = new_var + if found_old_var_in_output: + new_var.consuming_blocks.append(self) + # This block no longer uses `old_var` as its outputs + old_var.consuming_blocks.remove(self) + # Ensure output name is consistent + if isinstance(self, Function): + new_var.name = old_var.name + + def try_replace_uses_of_var_after_op( + self, + anchor_op, + old_var, + new_var, + no_check_var_types=False, + no_check_var_visibility=False, + ): + """ + :param anchor_op: Operation + :param old_var: Var + :param new_var: Var + :param no_check_var_types: bool + :param no_check_var_visibility: bool + :return: True if the old_var can be replaced by new_var. False otherwsie. + + This helper function guards the replace_uses_of_var_after_op function, + by first checking if the old_var could be replaced by the new_var. + + 1. If old_var can be replaced by new_var, the replace_uses_of_var_after_op is called, + and returns True. 2. Return False if the replacement is not allow. + """ + if not old_var.can_be_replaced_by_var(new_var): + return False + + self.replace_uses_of_var_after_op( + anchor_op=anchor_op, + old_var=old_var, + new_var=new_var, + no_check_var_types=no_check_var_types, + no_check_var_visibility=no_check_var_visibility, + ) + return True + + def replace_uses_of_var_after_op( + self, + anchor_op, + old_var, + new_var, + no_check_var_visibility=False, + end_op=None, + no_check_var_types=False, + force_replace=False, + ): + """ + Replace all uses of `old_var` with `new_var` after `anchor_op`, + and before `end_op` (inclusive). + + That is all the ops that use `old_var` will now use `new_var`. + The op that produces the `old_var` will continue to produce it, its output + won't be replaced by `new_var`. + + If `anchor_op` is None, replace all input occurrences of `old_var` in the block. If + `end_op` is None, all occurrences of `old_var` are replaced in the block starting from + the op just after `anchor_op` + + no_check_var_visibility: True to disable the check ensuring new_var is visible + (visibility requirement depends on anchor_op). + + no_check_var_types: An error will be raised if the type of new_var is not same as the + old_var, unless `no_check_var_types` is set to True. Normally type inference is + re-invoked for all the child ops of `old_var` after updating it to `new_var`. However, + this is skipped if `no_check_var_types` is set to True. + + old_var, new_var must meet the following conditions: + + - old_var, new_var both existing within the block. This implies that + the op generating new_var must be inserted prior to this + replacement. + + - Affected ops (i.e., Operation after anchor_op that take old_var as + input) must generate the same type inference results as before. + + - new_var must be visible at or before anchor_op in the order of + self.operations. + + Given: %2 = op0(%1, %1) + %3 = op1(%1, %2) + %4 = op2(%1) + %6 = op3(%4, %4) + + Execute: replace_uses_of_var_after_op(op2, %4, %3) + + Result: %2 = op0(%1, %1) + %3 = op1(%1, %2) + %4 = op2(%1) + %6 = op3(%3, %3) # type inference check against %6 + + + Comment: Execute: replace_uses_of_var_after_op(op1, %4, %3) would lead to + identical results, as op2 does not take %4 as input. + + Comment: replace_uses_of_var_after_op(op0, %4, %3) would cause error as %3 is + after op0 + + Comment: To avoid clutter, we drop the names of arguments and return + Var in the illustration above. + + + Another example, usage of "end_op": + + Given: %2 = op0(%1, %1) + %3 = op1() + %4 = op2(%1, %2) + %5 = op3(%2) + + if execute replace_uses_of_var_after_op(anchor_op=op0, old_var=%2, new_var=%3) + + Result: %2 = op0(%1, %1) + %3 = op1() + %4 = op2(%1, %3) + %5 = op3(%3) + + if execute replace_uses_of_var_after_op(anchor_op=op0, old_var=%2, new_var=%3, end_op=op2) + + Result: %2 = op0(%1, %1) + %3 = op1() + %4 = op2(%1, %3) # %2 is replaced with %3 till here + %5 = op3(%2) # will continue using %2 + + """ + if not force_replace and old_var.op is not None and new_var.op is not None: + if not old_var.can_be_replaced_by_var(new_var): + old_nonreplaceable_vars = old_var.nonreplaceable_vars_upstream + new_nonreplaceable_vars = new_var.nonreplaceable_vars_upstream + err_var = None + for _var in old_nonreplaceable_vars: + if _var not in new_nonreplaceable_vars: + err_var = _var + break + msg = ( + "var {} cannot be replaced by {}. Since the nonreplaceable var {} might " + "potentially " + "be removed during the replacement of those vars." + ).format(old_var, new_var, err_var) + raise ValueError(msg) + + start = self.find_op_id_in_block(anchor_op) + 1 if anchor_op is not None else 0 + end_id = self.find_op_id_in_block(end_op) if end_op is not None else -1 + + if not no_check_var_visibility: + self.validate() + + idx = start if anchor_op is not None else len(self.operations) + visibility_error_msg = ( + "new_var '{}' is not visible in block '{}' at or before " + + "anchor_op '{}'" + ) + anchor_op_name = "None" if anchor_op is None else anchor_op.name + + if isinstance(new_var, ComplexVar): + # For CompleVar, as it's just a temp wrapper to transit the real and imag data, we + # check the visibility of its real and imaginary Var instead. + if not self.is_var_visible_in_block(new_var.real, upto_op_with_id=idx): + raise ValueError( + visibility_error_msg.format( + new_var.real.name, self.name, anchor_op_name + ) + ) + if not self.is_var_visible_in_block(new_var.imag, upto_op_with_id=idx): + raise ValueError( + visibility_error_msg.format( + new_var.imag.name, self.name, anchor_op_name + ) + ) + else: + if not self.is_var_visible_in_block(new_var, upto_op_with_id=idx): + raise ValueError( + visibility_error_msg.format( + new_var.name, self.name, anchor_op_name + ) + ) + + if end_id != -1 and end_id < start: + msg = "end_op '{}' comes before the anchor_op '{}'" + raise ValueError(msg.format(end_op.name, anchor_op.name)) + + num_ops_affected = self._replace_var( + old_var, + new_var, + start=start, + end_id=end_id, + no_check_var_types=no_check_var_types, + ) + + logger.debug("Num ops affected in replacing var: {}".format(num_ops_affected)) + + def remove_ops(self, existing_ops): + """ + Remove ops in `existing_ops`. + + Args: existing_ops: List[Operation]. All ops in this list must be pre-existing in the + block. It allows duplicated ops, but duplicated ops will only be removed once. + + Raises: + ValueError if any `op` in `existing_ops` meets any of following conditions: + - `op` is not found in the block + - any other op in the block uses output Vars of `op` + - the output var is block's output + """ + self.validate() + + # Dedup ops because each op can only be deleted once. + existing_ops_set = set(existing_ops) + existing_ops = list(existing_ops_set) + # Find the idx of each to-be-removed op, and raise errors if any op couldn't be found. + idxs = [-1] * len(existing_ops) + for i, op in enumerate(self.operations): + if op in existing_ops_set: + idxs[existing_ops.index(op)] = i + if -1 in idxs: + not_found = [] + for i, op in zip(idxs, existing_ops): + if i == -1: + not_found.append(op.name) + raise ValueError( + "Ops {} not found in block {}".format(not_found, self.name) + ) + + # Remove ops in reverse topological order + pairs = list(zip(idxs, existing_ops)) + pairs.sort(key=lambda x: x[0], reverse=True) + + for idx, op in pairs: + for i, v in enumerate(op.outputs): + # Check that no ops depend on op's outputs + if len(v.child_ops) > 0: + child_op_names = [s.name for s in v.child_ops] + msg = ( + "Cannot delete op '{}' with active output at id {}: '{}' " + + "used by ops {}" + ) + raise ValueError(msg.format(op.name, i, v.name, child_op_names)) + # Check that the output Var isn't block's output + if v in self._outputs: + msg = ( + "cannot delete op {} with output {}: {} " + + "that's block {}'s output" + ) + raise ValueError(msg.format(op.name, i, v.name, self.name)) + + for b in op.blocks: + b.set_outputs([]) + b.remove_ops(b.operations) + + # Remove the op (in reverse topological order) + self.operations.pop(idx) + op.enclosing_block = None + + for v in op.inputs.values(): + if isinstance(v, (tuple, list)): + for vv in v: + vv.remove_child_op(op) + else: + v.remove_child_op(op) + + def operations_for_vars(self, end_vs): + """ + Inputs: + + end_vs: list[Operation]. + + Return: + + list[Operation] which are subset of self.operations that are ancestors + of `end_vs`. Also do recursion into nested blocks. + """ + used_vars = set(end_vs) + used_ops = [] + for op in reversed(self.operations): + # if none of op's output is used, delete op + if not set(op.outputs).intersection(used_vars): + continue + + used_ops.append(op) # append in reverse topological order + + # recursively search for nested blocks + ops_to_check = [] + for b in op.blocks: + ops_to_check += b.operations_for_vars(b.outputs) + ops_to_check.append(op) + + # mark used vars + for op_to_check in ops_to_check: + # mark all op's inputs to used + for _, input_var in op_to_check.inputs.items(): + if isinstance(input_var, (tuple, list)): + used_vars.update(list(input_var)) + else: + used_vars.add(input_var) + + return used_ops[::-1] + + def _propagate_nonreplaceable_vars(self): + def propagate_nonreplaceable_vars_block(block): + for op in list(block.operations): + for b in op.blocks: + propagate_nonreplaceable_vars_block(b) + if op.outputs is None: + continue + for o in op.outputs: + o._reset_nonreplaceable_vars_upstream() + o._set_nonreplaceable_vars_upstream() + propagate_nonreplaceable_vars_block(self) + + def indented_str(self, indent=None): + if indent is None: + indent = "" + s = ( + indent + + self.name + + "(" + + ", ".join([str(var) for var in self._block_inputs]) + ) + s += ") {\n" + for op in self.operations: + s += op.indented_str(indent + SPACES * 1) + s += indent + "} -> (" + if self._outputs is not None: + s += ", ".join(["%" + v.name for v in self._outputs]) + s += ")\n" + return s + + def __repr__(self): + return self.__str__() + + def __str__(self): + return self.indented_str() + + def get_dot_string( + self, + function_name="main", + prefix_id=0, + highlight_debug_op_types=None, + highlight_debug_op_names=None, + ): + """ + Return the dot string that can be used to show the block + with dot. Const ops are not added to the dot string. + + * Input vars : yellow + * output vars : goldenrod2 + * op names that user wants to highlight, provided in "highlight_debug_op_names": cyan + * op types that user wants to highlight, provided in "highlight_debug_op_types": green + + Examples + -------- + >>> import graphviz + >>> graphviz.Source(block.get_dot_string()).view() + >>> # OR + >>> graphviz.Source(block.get_dot_string()).view(filename='graph.pdf') + """ + if highlight_debug_op_types is None: + highlight_debug_op_types = [] + if highlight_debug_op_names is None: + highlight_debug_op_names = [] + + dotstring = "digraph g {\n" + "\tcompound=true;\n" + + input_var_names = list(self.inputs.keys()) + output_var_names = [v.name for v in self.outputs] + + debug_op_types = [] + if len(highlight_debug_op_types) > 0: + for op in self.operations: + if op.op_type in highlight_debug_op_types: + debug_op_types.append(op.name) + + vis = DotVisitor() + vis.highlight_nodes(input_var_names, "yellow").highlight_nodes( + output_var_names, "goldenrod2" + ).highlight_nodes(highlight_debug_op_names, "cyan").highlight_nodes( + debug_op_types, "green" + ) + + vis.visit_all(self, nodename_prefix=str(prefix_id)) + res = vis.get_result("subgraph", "cluster_" + function_name.replace("/", "_")) + dotstring += "\n".join("\t" + r for r in res.split("\n")) + "\n" + dotstring += "}" + return dotstring + + +class Function(Block): + def __init__(self, inputs, opset_version=None): + """ + inputs: str -> placeholder + opset_version: AvailableTarget enum. Describes the opset version of the function + """ + self.placeholder_inputs = inputs + self.opset_version = opset_version + + # str -> Var + self._input_dict = OrderedDict() + for k, v in self.placeholder_inputs.items(): + v.set_name(k) # set to user input name + self._input_dict[k] = v.outputs[0] + self.function_inputs = tuple(self._input_dict.values()) + + global k_used_symbols + global k_num_internal_syms + for inp in self.function_inputs: + if types.is_tensor(inp.dtype): + shapes = inp.dtype.get_shape() + for s in shapes: + if is_symbolic(s): + k_used_symbols.add(s) + super().__init__() + + # Override Block's input + @property + def inputs(self): + return self._input_dict + + @property + def opset_version(self): + return self._opset_version + + @opset_version.setter + def opset_version(self, version): + if not ( + isinstance(version, _target) or + version is None + ): + raise ValueError("opset_version must be type of coremltools.AvailableTarget") + self._opset_version = version + + def __repr__(self): + return self.__str__() + + def __str__(self): + return self.to_str("function") + + def to_str(self, func_name="function"): + func_name = func_name + "[{}]".format(_OPSET[self.opset_version]) + if len(self._input_dict) == 0: + s = func_name + "()" + else: + inputs = [(in_name, ph) for in_name, ph in self._input_dict.items()] + s = func_name + "(" + str(inputs[0][1]) + for in_name, ph in inputs[1:]: + s += ",\n" + " " * (len(func_name) + 1) + str(ph) + s += ") {\n" + s += self.indented_str(SPACES) + s += "}\n" + return s diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/builder.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/builder.py new file mode 100644 index 00000000..2f782c27 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/builder.py @@ -0,0 +1,246 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numbers +from collections import defaultdict + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + +from .block import Function, curr_block +from .input_type import (InternalInputType, ListOrTensorInputType, + TensorInputType, TupleInputType) +from .program import Placeholder, Program +from .var import InternalVar, Var + + +def is_python_value(val): + return ( + isinstance(val, (np.generic, np.ndarray)) + or isinstance(val, numbers.Number) + or isinstance(val, str) + or isinstance(val, bool) + or (isinstance(val, (tuple, list)) and all(is_python_value(v) for v in val)) + ) + + +class Builder: + """ + This class is a singleton builder to construct a MIL program. For more + information, see `Create a MIL program `_. + + Importing ``.ops`` triggers the installation of all MIL ops into the Builder. + For details on each op, see `MIL ops `_. + + Examples + -------- + + >>> from coremltools.converters.mil.mil import Builder as mb + >>> from coremltools.converters.mil.mil import Program, Function + + >>> prog = Program() + >>> func_inputs = {"x": mb.placeholder(shape=[2,3]), + >>> "y": mb.placeholder(shape=[2,3])} + >>> with Function(func_inputs) as ssa_fun: + >>> x, y = ssa_fun.inputs['x'], ssa_fun.inputs['y'] + >>> res_var = mb.add(x=x, y=y) # created within ssa_fun block + >>> ssa_fun.set_outputs([res_var]) + >>> prog.add_function("main", ssa_fun) + + >>> # Importing ops triggers installation of all ops into Builder. + >>> from .ops import defs as _ops + + """ + + name_count = defaultdict(int) + + @classmethod + def _get_free_name(cls, name): + new_name = name + "_" + str(cls.name_count[name]) + cls.name_count[name] += 1 + return new_name + + @classmethod + def _maybe_set_name(cls, kwargs, op_type): + if "name" not in kwargs: + kwargs["name"] = cls._get_free_name(op_type) + return kwargs + + @classmethod + def _add_const(cls, val, name, before_op): + if not is_python_value(val): + raise ValueError("Cannot add const {}".format(val)) + if any_symbolic(val): + msg = ( + "Python native vals (list, tuple), np.array that are" + + "operation inputs cannot have symbolic values. Consider feeding" + + "symbolic shape in through placeholder and use mb.shape() " + + "operator. Input {}: {}" + ) + raise ValueError(msg.format(name, val)) + const_name = cls._get_free_name(name) + logger.debug("Adding const op '{}'".format(const_name)) + output_var = cls.const(val=val, name=const_name, + before_op=before_op) + return output_var + + + @classmethod + def _create_vars(cls, input_spec, op_name, before_op, + candidate_kv): + """ + For each key K in `candidate_kv`, create a Var if the + followings are satisfied: + + - K exists in input_spec and is not an InternalInputType + - candidate_kv[K] is not already a Var + + Inputs + ------ + - candidate_kv: Dict[str, Any] + Key-values may be inputs to an op (whose inputs is defined by + input_spec) + + Returns + ------- + - var_kv: Dict[str, Var] + For the K satisfying the above, var_kv[K] is the newly + created Var + """ + update_dict = {} + for k, val in candidate_kv.items(): + if isinstance(val, Var): + continue # already a Var + + if k not in input_spec.input_types: + continue # k is not an op input + + in_type = input_spec.input_types[k] + if isinstance(in_type, InternalInputType): + new_var_name = op_name + "_" + k + var = InternalVar(val, name=new_var_name) + curr_block().add_internal_var(var) + update_dict[k] = var + continue # Not a regular Var + + new_var_name = op_name + "_" + k + if isinstance(in_type, TupleInputType): + var = [] + for i, v in enumerate(val): + if isinstance(v, Var): + var.append(v) + continue + var.append( + cls._add_const(v, new_var_name + str(i), + before_op) + ) + update_dict[k] = var + continue + + if isinstance(in_type, (TensorInputType, ListOrTensorInputType)): + var = cls._add_const(val, new_var_name, before_op) + update_dict[k] = var + + return update_dict + + @classmethod + def _add_op(cls, op_cls, **kwargs): + """ + Add an op of type `op_cls` (e.g., convolution) to current block. + """ + kwargs = cls._maybe_set_name(kwargs, op_cls.__name__) + logger.info( + "Adding op '{}' of type {}".format(kwargs["name"], op_cls.__name__) + ) + before_op = kwargs.get("before_op", None) + # Shallow copy list inputs to ensure op inputs are immutable + kwargs = {k: v if not isinstance(v, (list, tuple)) else v[:] for k, v in kwargs.items() if v is not None} + kwargs.update(cls._create_vars( + input_spec=op_cls.input_spec, + op_name=kwargs["name"], before_op=before_op, + candidate_kv=kwargs)) + new_op = op_cls(**kwargs) + + # Initialize optional input Vars if it wasn't in kwargs + default_inputs = new_op.default_inputs() + # Shallow copy list inputs to ensure op inputs are immutable + missing_optional_vals = {k: v if not isinstance(v, (list, tuple)) else v[:] for k, v in default_inputs.items() + if k not in kwargs and v is not None} + missing_optional_vars = cls._create_vars( + input_spec=op_cls.input_spec, + op_name=kwargs["name"], before_op=before_op, + candidate_kv=missing_optional_vals) + new_op.set_inputs(type_inference=False, + **missing_optional_vars) + + curr_block()._insert_op_before(new_op, before_op=before_op) + new_op.build_nested_blocks() + new_op.type_value_inference() + if len(new_op.outputs) == 1: + return new_op.outputs[0] + return new_op.outputs + + @staticmethod + def placeholder(shape, dtype=None, allow_rank0_input=False): + return Placeholder(shape, dtype, allow_rank0_input=allow_rank0_input) + + @staticmethod + def TensorSpec(shape, dtype=None): + return Placeholder(shape, dtype) + + @staticmethod + def program(input_specs=None, opset_version=None): + """ + + The ``mb.program`` decorator creates a MIL program with a single + function (``main``). The input to ``main`` is a tensor. + + Parameters + ---------- + + input_specs: TensorSpec + Describes a tensor. + + opset_version: AvailableTarget enum + Describes the opset version of the program + + + Examples + -------- + >>> import coremltools as ct + >>> @mb.program(input_specs=[mb.TensorSpec(shape=(1,2))], opset_version=ct.target.iOS16) + >>> def prog(a): + >>> return mb.add(x=a, y=2) + + """ + if input_specs is None: + input_specs = [] + + def wrapper(main_block): + program = Program() + num_args = main_block.__code__.co_argcount + arg_names = list(main_block.__code__.co_varnames)[:num_args] + if len(input_specs) != num_args: + msg = "{} expects {} inputs: {}. Got {} input_specs." + raise ValueError( + msg.format( + main_block.__name__, num_args, arg_names, len(input_specs) + ) + ) + input_spec_dict = {k: v for k, v in zip(arg_names, input_specs)} + with Function(input_spec_dict, opset_version) as func: + input_vars = [func.inputs[a] for a in arg_names] + outputs = main_block(*input_vars) + if isinstance(outputs, tuple): + outputs = list(outputs) + elif not isinstance(outputs, list): + outputs = [outputs] + func.set_outputs(outputs) + program.add_function("main", func) + return program + + return wrapper diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/input_type.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/input_type.py new file mode 100644 index 00000000..8721d927 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/input_type.py @@ -0,0 +1,382 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import OrderedDict + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.var import InternalVar + +SUPPORT_FLOAT_TYPES = [ + types.fp16, + types.fp32, + types.fp64, +] + +SUPPORT_INT_TYPES = [ + types.uint8, + types.uint16, + types.uint32, + types.uint64, + types.int8, + types.int16, + types.int32, + types.int64, +] + +SUPPORT_COMPLEX_TYPES = [ + types.complex64, + types.complex128, +] + +_SUPPORT_TYPES = ( + SUPPORT_FLOAT_TYPES + + SUPPORT_INT_TYPES + + SUPPORT_COMPLEX_TYPES + + [types.bool, types.str] +) + + +class DefaultInputs: + def __init__(self, **kwargs): + # Since python 3.6, kwargs preserves the input order. See + # https://docs.python.org/3/whatsnew/3.6.html#whatsnew36-pep468 + self._default_inputs = [(k, v) for k, v in kwargs.items()] + self._ordered_dict = OrderedDict() + for k, v in self._default_inputs: + self._ordered_dict[k] = v + + def items(self): + return self._ordered_dict.items() + + def __add__(self, default_inputs): + new_order_dict = {k: v for k, v in self._ordered_dict.items()} + for k, v in default_inputs._default_inputs: + new_order_dict[k] = v + return DefaultInputs(**new_order_dict) + + +class InputSpec: + def __init__(self, **kwargs): + # Since python 3.6, kwargs preserves the input order. See + # https://docs.python.org/3/whatsnew/3.6.html#whatsnew36-pep468 + self._input_types = [(k, v) for k, v in kwargs.items()] + self._ordered_dict = OrderedDict() + for k, v in self._input_types: + self._ordered_dict[k] = v + + def __add__(self, input_spec): + new_order_dict = {k: v for k, v in self._ordered_dict.items()} + for k, v in input_spec._input_types: + new_order_dict[k] = v + return InputSpec(**new_order_dict) + + + @property + def input_types(self): + """ + Ordered dict[str, _InputType] (name, input_type) + """ + return self._ordered_dict + + def validate_inputs(self, op_name, op_type, candidate_kvs): + """ + For each key K in `candidate_kvs`, if K is found in + self.input_types, perform the followings: + + - check that candidate_kvs[K] is a Var and satisfies + requirements in InputType (const, types) + - Place K, candidate_kvs[K] in output (list of (name, var) pairs). + + Note that this does not ensure the presence of all required + input_spec (optional == False). + + Parameters + ---------- + - op_name: str + + - op_type: str + + - candidate_kvs: Dict[str, Var] + Values cannot be None + + Return + ------ + None + + Raise: + ValueErrr if value type is incompatible + """ + msg_prefix = 'Op \"{}\" (op_type: {}) '.format(op_name, op_type) + + # check vars sharing the same type_domain_id have the same dtype + type_domain_group = {} + var_to_input_name = {} + for name, var in candidate_kvs.items(): + input_type = self.input_types[name] + if isinstance(input_type, TensorInputType) and input_type.type_domain_id is not None: + type_domain_id = input_type.type_domain_id + if type_domain_id in type_domain_group: + type_domain_group[type_domain_id].append(var) + else: + type_domain_group[type_domain_id] = [var] + var_to_input_name[var] = name + + for type_domain_id, vars in type_domain_group.items(): + expected_dtype = vars[0].dtype + ref_name = var_to_input_name[vars[0]] + for var in vars: + name = var_to_input_name[var] + if not var.dtype == expected_dtype: + msg = ( + "In op, of type {}, named {}, the named input `{}` must have the same data type " + "as the named input `{}`. However, {} has dtype {} whereas {} has dtype {}." + ).format(op_type, op_name, name, ref_name, name, + var.dtype.__type_info__(), ref_name, expected_dtype.__type_info__()) + raise ValueError(msg) + + # Ensure candidate_kvs doesn't contain None + for name, var in candidate_kvs.items(): + if var is None: + raise ValueError(msg_prefix + 'Input {} is None'.format(name)) + + if name not in self.input_types: + raise ValueError(msg_prefix + \ + 'Unrecognized input {}'.format(name)) + + input_type = self.input_types[name] + # Check constness + # Don't check InternalInputType (so _const_symbolic can work) + if input_type.const and \ + not isinstance(input_type, InternalInputType) \ + and var.val is None: + msg = msg_prefix + \ + 'Input {} must be const at compile time' + raise ValueError(msg.format(name), name, var.name) + + if not isinstance(var, InternalVar) and \ + not input_type.is_compatible(var): + msg = msg_prefix + "Input {}=\"{}\" expects " +\ + "{} but got {}" + raise ValueError(msg.format(name, var.name, input_type.type_str, + var.sym_type.__type_info__())) + + +class _InputType: + """ + (Untyped) input containing fundamental properties of all inputs to an + Operation: + """ + + def __init__(self, const=False, optional=False): + """ + const (bool): + True if the InputType has to be constant / materialized at compile time. + Const InputType is semantically equivalent to attribute. By + default False. Read-only. + + optional (bool): + True to allow user not to specify this input and rely on default + values (defined in default_inputs). + + Note: _InputType should not be directly instantiated. Only its subclasses may + be instantiated. + """ + self.const = const + self.optional = optional + + def is_compatible(self, v): + """ + Return True if (possibly symbolic) value `v` is compatible. False + otherwise. + + Inputs: + + v (Var | ListVar | native python function): input + + Comment: Define is_compatible as instance method to call proper subclass + methods. + """ + return self._is_compatible(v) + + def _is_compatible(self, v): + return True + + def _get_predefined_datatype(self): + """ + Override this function if datatype can be known without `_default` or + `_val`. + """ + return None + + def __str__(self): + return type(self).__name__ + + @property + def type_str(self): + """Descriptive string describing expected mil types""" + return self.__str__(self) + + +class TensorInputType(_InputType): + """ + TensorInputType specifies the generic tensor inputs. + The `type_domain` validates data type constraints, and it could be either + (1) A object / tuple of builtin types: + This puts constraint on the allowed inputs data type. + For example: + + ``` + input_spec = InputSpec( + x=TensorInputType(type_domain=types.int32), + ) + ``` + only allows input `x` have int32 dtype. + + ``` + input_spec = InputSpec( + x=TensorInputType(type_domain=(types.int32, types.fp16)), + ) + ``` + allows input `x` be either type of int32 or float16 + + (2) string: + Verify different input parameters binding with the same `type_domain` are the same data type. + This additional check is done by defining a `type_domains` dictionary in the Operation class + For example: + + ``` + class conv(Operation): + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(type_domain="U"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + ``` + would verify: + (i) `x` and `weight` are one of the float16 or float32 type. + (ii) `x` and `weight` are the same type. + + """ + def __init__(self, type_domain, **kwargs): + self._type_domain = () + self._type_domain_id = None + + if isinstance(type_domain, str): + self.type_domain_id = type_domain + else: + if isinstance(type_domain, type): + type_domain = (type_domain,) + self.type_domain = type_domain + super().__init__(**kwargs) + + def _is_compatible(self, v): + result = types.is_scalar(v.dtype) or types.is_tensor(v.dtype) + result = result and (v.dtype in self.type_domain) + return result + + @property + def type_domain(self): + return self._type_domain + + @type_domain.setter + def type_domain(self, val): + msg = "type_domain must be a tuple of builtin types" + if not isinstance(val, tuple) or any(map(lambda t: t not in _SUPPORT_TYPES, val)): + raise ValueError(msg) + self._type_domain = val + + @property + def type_domain_id(self): + return self._type_domain_id + + @type_domain_id.setter + def type_domain_id(self, val): + if not isinstance(val, str): + raise ValueError("type_domain_id must be type of str") + self._type_domain_id = val + + @property + def type_str(self): + return 'tensor or scalar of dtype from type domain ' + str([types.builtin_to_string(v) for v in self.type_domain]) + +class ListInputType(_InputType): + """ + ListInputType allows inputs of type types.list + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return types.is_list(v.sym_type) + + @property + def type_str(self): + return 'list' + + +class ListOrTensorInputType(_InputType): + """ + ListOrTensorInputType allows inputs of + (1) MIL tensor + (2) python list/tuple of MIL tensors + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return ( + types.is_list(v.sym_type) + or types.is_scalar(v.dtype) + or types.is_tensor(v.dtype) + ) + + @property + def type_str(self): + return 'list, tensor, or scalar' + + +class TupleInputType(_InputType): + """ + TupleInputType specifies input types of python list/tuple of MIL tensors. + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + # We don't check the detail types within the tuple. + return isinstance(v, (tuple, list)) + + @property + def type_str(self): + return 'tuple' + + +class InternalInputType(_InputType): + """ + InternalInputType specifies input types outside of Program's type system. + It allows ops to take, for example, python primitive types, instead of + only the builtin types. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return True # skip type check by default for InternalInputType. + + +class PyFunctionInputType(InternalInputType): + """ + Native python function. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _is_compatible(self, v): + return callable(v.val) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/operation.py new file mode 100644 index 00000000..8b8888e2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/operation.py @@ -0,0 +1,603 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import Any, Dict, Tuple + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import is_compatible_type +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + is_symbolic) + +from . import SPACES +from .block import curr_block +from .input_type import DefaultInputs, TensorInputType, TupleInputType +from .var import ComplexVar, InternalVar, ListVar, Var + +VALUE = 1 +SYMBOL = 2 +NONE = 4 +ALL = 7 + + +def _is_compatible_symbolic_array(a, b): + """ + A helper function which check if two numpy array with symbolic value. + For instance, a = np.array([is0, is2]) + b = np.array([is1, 1]) + are considered compatible. + a = np.array([is0, 1]) + b = np.array([is1, -1]) + are not. + """ + if not a.shape == b.shape: + return False + a = a.flatten() + b = b.flatten() + for t, v in zip(a, b): + if not is_symbolic(t) and not is_symbolic(v): + if t != v: + return False + return True + + +def precondition(allow=ALL): + """ + A helper decorator for value_inference method. + Decorate value_inference with parameter VALUE/SYMBOL/NONE or ALL. + For VALUE/SYMBOL/NONE use logical or ( | ) for multiple allowance. + Note that: + 1. ALL == VALUE | SYMBOL | NONE + 2. Chosen flag (some or all VALUE/SYMBOL/NONE) must be satisfied + by EVERY INPUTS for the precondition to be satisfied. + + The meaning for each flag is: + VALUE: value that can be materialized during compile time + SYMBOL: value that cannot be materialized by exist as a symbol value + NONE: a None value + + Usage: + @precondition(allow=VALUE|SYMBOL) + def value_inference(self): + '''some value_inference implementation''' + """ + ALLOW_VALUE = allow & VALUE + ALLOW_SYMBOL = allow & SYMBOL + ALLOW_NONE = allow & NONE + + def process(v, has_value, has_symbol, has_none): + """ + v: Var + + Return updated has_value, has_symbol, has_none + """ + if any_symbolic(v.sym_val): + return has_value, True, has_none + elif v.val is None: + return has_value, has_symbol, True + return True, has_symbol, has_none + + def decorator(func): + def wrapper(self): + HAS_VALUE = False + HAS_SYMBOL = False + HAS_NONE = False + for in_name, in_type in self._input_types.items(): + if in_type.optional: + # Optional inputs are not required to invoke value_inference() + continue + + if isinstance(in_type, TupleInputType): + for v in self._input_vars[in_name]: + HAS_VALUE, HAS_SYMBOL, HAS_NONE = process( + v, HAS_VALUE, HAS_SYMBOL, HAS_NONE + ) + else: + HAS_VALUE, HAS_SYMBOL, HAS_NONE = process( + self._input_vars[in_name], HAS_VALUE, HAS_SYMBOL, HAS_NONE + ) + + if HAS_VALUE and not ALLOW_VALUE: + msg = "Implementation of value_inference() for op {} doesn't support input with VALUE" + raise NotImplementedError(msg.format(self.op_type)) + elif HAS_SYMBOL and not ALLOW_SYMBOL: + msg = "Implementation of value_inference() for op {} doesn't support input with SYMBOL" + raise NotImplementedError(msg.format(self.op_type)) + elif HAS_NONE and not ALLOW_NONE: + msg = "Implementation of value_inference() for op {} doesn't support input with NONE" + raise NotImplementedError(msg.format(self.op_type)) + else: + return func(self) + + return wrapper + + return decorator + + +def is_internal_input(arg_name): + return arg_name[0] == "_" + + +class mil_list: + """ + A wrapper around python list + """ + + def __init__(self, ls=None): + self.ls = ls if ls is not None else [] + if not isinstance(self.ls, list): + raise TypeError("Type of 'ls' must be list in the 'mil_list' class") + + +class Operation: + """ + Represents Operation in MIL. + + # Properties + name (str): + The name of the operation + + input_types (InputSpec, class attr): + Read-only named input types from all subclasses. Input types are used + to validate `inputs`. + + inputs [_input_vars] (dict of str --> Var): + An Operation (subclass of Operation) only has access to input Var, + which is already validated against `input_spec`. + + outputs [_output_vars] (list of Var): + List of output var based on type inference. Read-only + """ + + # Map from type domain id to a tuple of accepted types. + type_domains: Dict[str, Tuple[Any]] = dict() + + def __init__(self, **kwargs): + self._input_types = self.input_spec.input_types + self._type_domains = self.type_domains + self.name = kwargs.get("name", None) + + self._output_vars = None + self._input_vars = {} + self.blocks = [] + self.enclosing_block = curr_block() + + # Initialize inputs as object attributes (all None) + for k in self._input_types.keys(): + setattr(self, k, None) + self._input_vars[k] = None + + self._check_expected_inputs(kwargs) + + # Populate type_domains into input types + for v in self._input_types.values(): + if not isinstance(v, TensorInputType): + continue + if len(v.type_domain) == 0: + if v.type_domain_id not in self._type_domains: + raise ValueError("type_domain {} not defined.".format(v.type_domain_id)) + v.type_domain = self._type_domains[v.type_domain_id] + + # Set inputs from kwargs + input_kv = {k: v for k, v in kwargs.items() + if k in self._input_types and v is not None} + self._validate_and_set_inputs(input_kv) + self._ensure_required_inputs() + + def _check_expected_inputs(self, kwargs): + """ + Check that all kwargs are one of the following: + + - system inputs (non-attributes) + - op inputs (self._input_types.keys()) + """ + non_attributes = [ + "name", + "symbolic_datatype", + "datatype", + "symbolic_value", + "value", + "version", + "before_op", + "no_check_var_visibility", # no_check_var_visibility==True to deviate from SSA + "no_check_var_types", + # no_check_var_types==True to force set inputs, even if type does not match with earlier ones + ] + for k in kwargs.keys(): + if k not in non_attributes and k not in self._input_types: + raise ValueError( + "Unknown input '{}' for op '{}'".format(k, self.op_type) + ) + + def set_inputs(self, no_check_var_types=False, type_inference=False, **input_kvs): + """ + Parameters + ---------- + - input_kvs: Dict[str, Var] + Value cannot be None + + - type_inference: bool + True to perform type inference and recreate output Var. + """ + self._validate_and_set_inputs(input_kvs, no_check_var_types=no_check_var_types) + if type_inference and not no_check_var_types: + self.type_value_inference() + self._ensure_required_inputs() + + def get_flattened_inputs(self): + """ + Returns: + list[Var]. Flatten all tuple inputs + """ + flat_inputs = [] + for v in self.inputs.values(): + if isinstance(v, (list, tuple)): + flat_inputs.extend(v) + else: + flat_inputs.append(v) + return flat_inputs + + def type_value_inference(self, overwrite_output=False): + """ + Perform type inference and auto_val computation based on new input Vars + in kwargs. If self._output_vars is None then we generate _output_vars; + otherwise no new Var is created, but type inference result is verified + against existing _output_vars, if overwrite_output is False. + + If overwrite_output is True, then the type inference result overwrites the + existing _output_vars + """ + output_types = self.type_inference() + if not isinstance(output_types, tuple): + output_types = (output_types,) + output_vals = self._auto_val(output_types) + try: + output_names = self.output_names() + if not isinstance(output_names, tuple): + output_names = (output_names,) + except NotImplementedError: + if len(output_types) > 1: + output_names = tuple(str(i) for i, _ in enumerate(output_types)) + else: + output_names = ("",) # output name same as op name. + + # Combine (output_names, output_types, output_vals) to create output + # Vars. + if self._output_vars is None: + self._output_vars = [] + for i, (n, sym_type, sym_val) in enumerate( + zip(output_names, output_types, output_vals) + ): + name = self.name + "_" + n if n != "" else self.name + if types.is_list(sym_type): + new_var = ListVar( + name, + elem_type=sym_type.T[0], + init_length=sym_type.T[1], + dynamic_length=sym_type.T[2], + sym_val=sym_val + if (sym_val is not None and isinstance(sym_val.val, list)) + else None, + op=self, + op_output_idx=i, + ) + elem_shape = new_var.elem_shape + if elem_shape is not None and len(elem_shape) >= 5: + msg = ( + "Core ML only supports list of elements with rank <= 4. " + 'Layer "{}", with type "{}", outputs a list of rank {} tensors.' + ).format(self.name, self.op_type, len(elem_shape)) + raise ValueError(msg) + else: + if types.is_tensor(sym_type) and types.is_complex(sym_type.T[0]): + # Only `complex` op needs to maintain the real/imag data in the ComplexVar. + # For other ops, this ComplexVar is just a placeholder here, which will be + # replaced by a newly created ComplexVar during complex ops lowering pass. + real_data = ( + self.real_data if self.op_type == "complex" else None + ) + imag_data = ( + self.imag_data if self.op_type == "complex" else None + ) + new_var = ComplexVar( + name, + sym_type, + sym_val, + op=self, + op_output_idx=i, + real=real_data, + imag=imag_data, + ) + else: + new_var = Var(name, sym_type, sym_val, op=self, op_output_idx=i) + self._output_vars.append(new_var) + else: + # Check new inference result against existing self._output_vars. + for i, (sym_type, sym_val) in enumerate(zip(output_types, output_vals)): + out_var = self._output_vars[i] + # Check type inference + if overwrite_output: + out_var._sym_type = sym_type + elif not is_compatible_type(sym_type, out_var.sym_type): + msg = "Output Var {} in op {} type changes with new input Vars" + raise ValueError(msg.format(out_var.name, self.name)) + + # Check value inference + if overwrite_output: + out_var._sym_val = sym_val + + if sym_val is not None and out_var.sym_val is not None: + if np.any(sym_val.val != out_var.sym_val): + if overwrite_output: + out_var._sym_val = sym_val + else: + msg = 'value_inference differs for var {} in op {}' + if not _is_compatible_symbolic_array(sym_val.val, out_var.sym_val): + raise ValueError(msg.format(out_var.name, self.name)) + + for o in self.outputs: + o._set_nonreplaceable_vars_upstream() + + def _auto_val(self, output_types): + """ + # Evaluation is two stage: + # + # Stage 1: Check whether the method value_inference() is implemented + # + # Stage 2: Check if there's an value_inference() implementation + # for given input types. + # + # Suppose input are all SYMBOL: + # Case 1: No value_inference() implemented => fail at stage 1 + # Case 2: If value_inference() implemented, but requires all VALUE not + # SYMBOL => fail at stage 2 + # Case 3: If value_inference() implemented, and has no restriction on + # input types => Success + # + # If either stage fails, outputs[i].val is None. + # Otherwise, output[i].sym_val is not None. + + output_types: tuple of builtin types + + Returns: + output_vals: tuple of builtin type with value, or tuple of None + """ + do_auto_val = True + + if do_auto_val: + # Is self.value_inference implemented for corresponding input? + try: + vals = self.value_inference() + except NotImplementedError: + do_auto_val = False + + if not do_auto_val: + # No auto_val possible. + return tuple(None for _ in output_types) + + if not isinstance(vals, (tuple, list)): + vals = (vals,) + for val in vals: + if val is None: + do_auto_val = False + if not do_auto_val: + # No auto_val possible. + return tuple(None for _ in output_types) + + auto_val = [] + for t, v in zip(output_types, vals): + builtin_val = t() + if isinstance(v, mil_list): + builtin_val.val = v.ls + else: + builtin_val.val = v + auto_val.append(builtin_val) + return auto_val + + def value_inference(self): + """ + Optional Python implementation of the op based on (materialized) values + in `self.input_var`. Return a builtin value (single output) or a tuple of + builtin values (multi-outputs) of the same length as returned by ` + type_inference` + """ + msg = "value_inference() is not implemented by op {}" + raise NotImplementedError(msg.format(self.op_type)) + + def default_inputs(self): + """ + Optional. Returns default values for optional inputs. The + function is guaranteed to have access to all required inputs and + possibly some optional inputs should the user supply them. + They may be used to construct default values, such as + `strides=[1]*num_spatial_dims` in conv, where + `num_spatial_dims` may be inferred from the rank of + required inputs + """ + return DefaultInputs() + + def output_names(self): + """ + Optional. If implemented, we set the output var i name as + self.name + "/" + output_names[i] + + Returns a string (single output) or tuple of strings + """ + msg = "output_names() is not implemented by op {}" + raise NotImplementedError(msg.format(self.op_type)) + + def type_inference(self): + """ + Return (builtin_type, builtin_val) pair from type inference. + builtin_val may be None if symbolic_value is not attainable at compile + time. + """ + raise NotImplementedError("This function must be implemented by each op") + + def build_nested_blocks(self): + """ + Build nested blocks (for cond and while_loop and other composite + blocks) + """ + pass + + def _ensure_required_inputs(self): + """ + Raises ValueError if required inputs are not present + """ + for name, input_type in self._input_types.items(): + if not input_type.optional and self._input_vars[name] is None: + msg_prefix = 'Op "{}" (op_type: {}) '.format(self.name, self.op_type) + raise ValueError( + msg_prefix + "Required input {} is missing".format(name) + ) + + def _validate_and_set_inputs(self, input_kvs, no_check_var_types=False): + """ + For each k, v in `input_kvs`, perform the followings: + + - Check k exists in `self.input_specs` + - Check that v satisfies the correspodning `InputType` + - Set input, possibly replacing existing input. + + Note that it does not ensure all required inputs are satisfied. + Use _ensure_required_inputs() for that. + + Parameters + ---------- + - input_kvs: Dict[str, Var] + Each key in input_kvs must exist in `self.input_specs`. Its values + must be a Var. + + - no_check_var_types: bool + True to check var types against input_specs only, but not + enforcing new input vars to be a subtype of existing input vars + """ + for key in input_kvs.keys(): + if key not in self._input_types: + raise RuntimeError( + "Unknown input '{}' for op '{}'".format(key, self.op_type) + ) + + def check_and_detach(v_new, v_old, op, no_check_var_types): + # Check new var's sym_type is compatible with the + # existing's sym_type. + if ( + not is_compatible_type(v_new.sym_type, v_old.sym_type) + and not no_check_var_types + ): + msg = "New var type {} not a subtype of " + "existing var type {}" + raise ValueError(msg.format(v_new.sym_type, v_old.sym_type)) + v_old.remove_child_op(op, no_check_var_types) + + self.input_spec.validate_inputs(self.name, self.op_type, input_kvs) + + for name, var in input_kvs.items(): + # Remove this operation itself from existing input + # Var's child_ops + existing_input_var = self._input_vars[name] + if existing_input_var is not None: + if isinstance(existing_input_var, (list, tuple)): + for v_old, v_new in zip(existing_input_var, var): + check_and_detach(v_new, v_old, self, no_check_var_types) + else: + check_and_detach( + var, existing_input_var, self, no_check_var_types + ) + + # Set var as input_var + if isinstance(var, Var): + # TODO: the child op of complex op's input might get lost, as the complex op will + # be lowered. Maybe should add child op here and take care of it in lowering pass. + var.add_child_op(self) + elif isinstance(var, (tuple, list)): + for v in var: + v.add_child_op(self) + # ignore function inputs + self._input_vars[name] = var + setattr(self, name, var) + + @property + def inputs(self): + """ + Returns + ------- + - inputs: Dict[str, Union[Var, Tuple[Var]]] + """ + # Filter out InternalVar + return { + k: v + for k, v in self._input_vars.items() + if not isinstance(v, InternalVar) and v is not None + } + + @property + def outputs(self): + return self._output_vars + + @property + def op_type(self): + return type(self).__name__ + + @property + def opset_version(self): + op_variants = type(self)._op_variants + opset_versions = sorted(list(op_variants.keys())) + for i in opset_versions: + if op_variants[i] == type(self): + return i + + def remove_from_block(self): + """ + Remove / detach itself from the enclosing block. See Block.remove_ops + for details. + """ + self.enclosing_block.remove_ops([self]) + + @staticmethod + def var_to_str(v): + if isinstance(v, (tuple, list)): + return "(" + ", ".join(["%" + s.name for s in v]) + ")" + elif v.op and v.op.op_type == "const": + val = v.op.val.sym_val + if isinstance(val, (np.generic, np.ndarray)): + # for small tensors, serialize as string; skip large tensors. + if val.size <= 10: + return str(val.tolist()) + else: + # other types are small enough they can be serialized + return ( + '"' + val + '"' + if isinstance(val, str) + else str(val) + ) + + return "%" + v.name + + def indented_str(self, indent=""): + if self.op_type == "const": + return "" + s = indent + if self.outputs is not None: + s += ", ".join([str(o) for o in self.outputs]) + s += " = " + self.op_type + "(" + s += ", ".join( + [ + k + "=" + Operation.var_to_str(self.inputs[k]) + for k in self._input_types.keys() + if k in self.inputs and not is_internal_input(k) + ] + ) + s += ', name="{}")\n'.format(self.name) + for b in self.blocks: + s += b.indented_str(indent=indent + SPACES) + return s + + def __repr__(self): + return str(self) + + def __str__(self): + return self.indented_str(SPACES) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/__init__.py new file mode 100644 index 00000000..f62e29b9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import complex_dialect_ops, iOS15, iOS16 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_op_reqs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_op_reqs.py new file mode 100644 index 00000000..e5a3b0cd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_op_reqs.py @@ -0,0 +1,8 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from coremltools.converters.mil.mil.ops.registry import \ + SSAOpRegistry as _SSAOpRegistry + +register_op = _SSAOpRegistry.register_op diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_utils.py new file mode 100644 index 00000000..d971ffb8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/_utils.py @@ -0,0 +1,548 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math +import numbers +from typing import List, Tuple + + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Var, get_new_symbol, types +from coremltools.converters.mil.mil.ops.defs.iOS15.elementwise_unary import ( + cast as cast_op_class, +) +from coremltools.converters.mil.mil.types import builtin_to_string, promote_dtypes +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + +MAX_SIZE_CONSTANT_FOLDING = 1024 * 1024 / 4 # When a fp32 const takes over 1MB, we won't create a const op for that + +def broadcast_shapes(shape_x, shape_y): + """ + Check and broadcast given input shapes. + :param shape_x: tuple of int or symbols + Shape of the first tensor (possibly symbolic). + :param shape_y: tuple of int or symbols + Shape of the second tensor (possibly symbolic). + :return: tuple of int or symbols + Result from broadcast. + """ + + def raise_incompatible_dim_exception(): + raise ValueError( + "Incompatible dim {} in shapes {} vs. {}".format( + i, shape_x, shape_y + ) + ) + + shape_x = tuple(shape_x) + shape_y = tuple(shape_y) + if len(shape_x) < len(shape_y): + shape_x = tuple([1] * (len(shape_y) - len(shape_x))) + shape_x + if len(shape_y) < len(shape_x): + shape_y = tuple([1] * (len(shape_x) - len(shape_y))) + shape_y + + ret_shapes = list() + for i in range(len(shape_x)): + if shape_x[i] == shape_y[i]: + ret_shapes.append(shape_x[i]) + else: + is_x_unknown = is_symbolic(shape_x[i]) + is_y_unknown = is_symbolic(shape_y[i]) + if shape_x[i] == 1: + ret_shapes.append(shape_y[i]) + elif shape_y[i] == 1: + ret_shapes.append(shape_x[i]) + elif not is_y_unknown and shape_y[i] > 1: + if not is_x_unknown and shape_x[i] != shape_y[i]: + raise_incompatible_dim_exception() + ret_shapes.append(shape_y[i]) + elif not is_x_unknown and shape_x[i] > 1: + if not is_y_unknown and shape_x[i] != shape_y[i]: + raise_incompatible_dim_exception() + ret_shapes.append(shape_x[i]) + elif is_x_unknown or is_y_unknown: + ret_shapes.append(get_new_symbol()) + else: + raise_incompatible_dim_exception() + + return tuple(ret_shapes) + + +def promoted_primitive_type(type1, type2): + """ + Given a pair of tensor or primitive types, find the smallest type that can store an instance + of their primitive type. + """ + ptype1 = type1.get_primitive() if types.is_tensor(type1) else type1 + ptype2 = type2.get_primitive() if types.is_tensor(type2) else type2 + return types.promote_types(ptype1, ptype2) + + +def effective_kernel(kernel_shape, dilations): + """ + + Args: + kernel_shape: tuple[int] representing the kernel shape in each + given dimension. + dilations: tuple[int] representing the dilation of the kernel + in each given dimension. Must be the same length as + kernel_shape, and is assumed to give the dimensions in + the same order as kernel_shape + + Returns: tuple[int] representing the effective shape of the kernel + in each given dimension, with each dimension in the order given, + taking into account dilation. + See http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions + Note that a dilation of 1 is equivalent to having no dilation. + + """ + if len(kernel_shape) != len(dilations): + raise ValueError( + f"kernel_shape ({len(kernel_shape)}) and dilations ({len(dilations)}) " + f"must be the same length" + ) + return [(k - 1) * d + 1 for k, d in zip(kernel_shape, dilations)] + + +def aggregated_pad( + pad_type, + kernel_shape, + input_shape=None, + strides=None, + dilations=None, + custom_pad=None, +): + """ + Args + pad_type: string. Must be one of ('same', 'same_lower', 'valid', 'custom') + + kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels) + + input_shape: [iH, iW, ...]: spatial input dims (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + strides: [sH, sW, ...]: spatial strides (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + dilations: [dH, dW, ...]: dilations (excluding channels) + If not provided, defaults to [1, 1, ...], effectively no dilation. + + custom_pad: Required iff pad_type == 'custom'. + custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding + for spatial dim i. + + + Returns: + A list of total (before + after) padding for each spatial dimension in kernel_shape. + """ + num_spatial_dims = len(kernel_shape) + if dilations is None: + dilations = [1] * num_spatial_dims + elif len(dilations) != num_spatial_dims: + raise ValueError( + f"dilations must have same length as kernel_shape " + f"({num_spatial_dims}, but got {len(dilations)})" + ) + if pad_type in ["same", "same_lower"]: + if input_shape is None or len(input_shape) != num_spatial_dims: + raise ValueError( + "For SAME padding input_shape must not be None and must have " + "same length as kernel_shape ({}, but got {})".format( + num_spatial_dims, + len(input_shape) if input_shape is not None else "None", + ) + ) + if strides is None or len(strides) != num_spatial_dims: + raise ValueError( + "For SAME padding strides must not be None and must have " + "same length as kernel_shape ({}, but got {})".format( + num_spatial_dims, len(strides) if strides is not None else "None" + ) + ) + effective_ks = effective_kernel(kernel_shape, dilations) + return [ + int(max(0, s * math.ceil(float(i) / float(s)) - i + k - s)) + if not is_symbolic(i) else get_new_symbol() + for i, k, s in zip(input_shape, effective_ks, strides) + ] + if pad_type == "valid": + return [0] * num_spatial_dims + if pad_type == "custom": + if custom_pad is None or len(custom_pad) != 2 * num_spatial_dims: + raise ValueError("Invalid custom_pad.") + return [ + custom_pad[2 * d] + custom_pad[2 * d + 1] for d in range(num_spatial_dims) + ] + raise ValueError('Invalid padding pad_type "{}"'.format(pad_type)) + + +def spatial_dimensions_out_shape( + pad_type, input_shape, kernel_shape, strides, dilations=None, custom_pad=None, ceil_mode=False, +): + """ + Args + pad_type: string. Must be one of ('same', 'same_lower', 'valid', 'custom') + + input_shape: [iH, iW, ...]: spatial input dims (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels) + + strides: [sH, sW, ...]: spatial strides (excluding channels) + Required iff pad_type in ['same', 'same_lower'] + + dilations: [dH, dW, ...]: dilations (excluding channels) + If not provided, defaults to [1, 1, ...], effectively no dilation. + + custom_pad: Required iff pad_type == 'custom'. + custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding + for spatial dim i. + + ceil_mode: determines the padding and output shape. + When ceil mode is True: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size + (stride-1)) / stride) + 1 + if (out_dim-1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0): + out_dim = out_dim - 1 + When ceil mode is False: + out_dim = floor((in_dim + pad_l + pad_r - kernel_size) / stride) + 1 + + Returns: + A list of spatial output sizes for each spatial dimension of kernel_shape. + + """ + num_spatial_dims = len(kernel_shape) + if dilations is None: + dilations = [1] * num_spatial_dims + if custom_pad is None: + custom_pad = [0] * num_spatial_dims * 2 + if not ( + len(input_shape) + == len(kernel_shape) + == len(strides) + == len(dilations) + == len(custom_pad) / 2 + ): + raise ValueError( + f"input_shape (length {len(input_shape)}), " + f"kernel_shape (length {len(kernel_shape)}), " + f"strides (length {len(strides)}), " + f"dilations (length {len(dilations)}), " + f"and custom_pad (length {len(custom_pad)}) divided by two " + "must all be the same length" + ) + + pad = aggregated_pad( + pad_type=pad_type, + kernel_shape=kernel_shape, + input_shape=input_shape, + strides=strides, + dilations=dilations, + custom_pad=custom_pad, + ) + effective_ks = effective_kernel(kernel_shape, dilations) + out_shape = [] + for r in range(num_spatial_dims): + # only check if `input_shape` (spatial part of the input image) is symbolic, because: + # * `input_shape` can be symbolic + # * `pad` (aggregated from `input_shape` + ...) is symbolic only if `input_shape` is symbolic + # * `effective_ks` (effective kernel size, determined from kernel size + dilations) cannot be symbolic + # * strides cannot be symbolic + if is_symbolic(input_shape[r]): + out_shape.append(get_new_symbol()) + else: + out_dim = 0 + if not ceil_mode: + out_dim = math.floor((input_shape[r] + pad[r] - effective_ks[r]) / strides[r] + 1) + else: + out_dim = math.floor((input_shape[r] + pad[r] - effective_ks[r] + strides[r] - 1) / strides[r] + 1) + if (out_dim - 1) * strides[r] >= input_shape[r] + pad[r]/2 and pad[r] > 0: + out_dim = out_dim - 1 + if out_dim <= 0: + raise ValueError(f"spatial dimension {r} has invalid output size {out_dim}") + out_shape.append(out_dim) + return out_shape + + +def parse_einsum_equation(equation: str) -> Tuple[List]: + """ + Args + equation : str + + parse the equation in the following manner: + (running example: "nchw,nwhr->nchr") + + step 1: split the equation with delimiter "->" + e.g.: this will give "nchw,nwhr" and "nchr" + + step 2: split the LHS equation string with delimiter "," + e.g.: this will give input1 : "nchw", input2: "nwhr" + + step 3: map each character to a unique integer, which is incremented. + Iterate over input1, input2 and output, in that order. + e.g.: input 1, i.e., "nchw" will give vector {0,1,2,3} + input 2, i.e, "nwhr" will produce {0,3,2,4} + output , i.e. "nchr" will produce {0,1,2,4} + + return vectors corresponding to the 2 inputs and the output + """ + input_output_str = equation.split('->') + assert len(input_output_str) == 2, "unsupported einsum equation {}".format(equation) + input_str = input_output_str[0] + output_str = input_output_str[1] + + inputs = input_str.split(',') + assert len(inputs) == 2, "unsupported einsum equation {}".format(equation) + input1_str = inputs[0] + input2_str = inputs[1] + + input1_vec = [-1 for i in range(len(input1_str))] + input2_vec = [-1 for i in range(len(input2_str))] + output_vec = [-1 for i in range(len(output_str))] + map_char_to_int = {} + + def _update_vec(str, vec, map_char_to_int, index): + for i, s in enumerate(str): + if s not in map_char_to_int: + map_char_to_int[s] = index + index += 1 + vec[i] = map_char_to_int[s] + return index + + index = _update_vec(input1_str, input1_vec, map_char_to_int, 0) + index = _update_vec(input2_str, input2_vec, map_char_to_int, index) + _update_vec(output_str, output_vec, map_char_to_int, index) + + return input1_vec, input2_vec, output_vec + +def compute_gather(params, indices, axis, batch_dims): + """ + This utility function computes the gather operation with batch_dims supported. + """ + def compute_gather_helper(params, indices, axis): + scalar_indices = isinstance(indices, numbers.Integral) + if scalar_indices: + res = np.take(params, [indices], axis) + res2 = np.squeeze(res, axis=axis) + if isinstance(res2, np.ndarray) and len(res2.shape) == 0: + # The `res2` is a numpy 0-d array (after doing np.squeeze on a 1-d array). + # For 0-d array in numpy, we need to extract the scalar value by first converting + # it back to 1-d array. + # Notice that .item() doesn't work because it returns a built-in type instead of + # np.generic type, which will fail the downstream var value setter. + return np.atleast_1d(res2)[0] + return res2 + return np.take(params, indices, axis) + + if batch_dims == 0: + return compute_gather_helper(params, indices, axis) + + params_shape = params.shape + indices_shape = indices.shape + batch_shape = params_shape[:batch_dims] + + params_new_shape = [np.prod(batch_shape)] + list(params_shape[batch_dims:]) + indices_new_shape = [np.prod(batch_shape)] + list(indices_shape[batch_dims:]) + params_reshape = np.reshape(params, params_new_shape) + indices_reshape = np.reshape(indices, indices_new_shape) + + res = [] + for p, i in zip(params_reshape, indices_reshape): + res.append(compute_gather_helper(p, i, axis - batch_dims)) + res = np.stack(res) + res_new_shape = tuple(batch_shape) + tuple(res.shape[1:]) + return np.reshape(res, res_new_shape) + +def promote_input_dtypes(input_vars): + """ + This utility function promotes all input variables to the same data type. + It is used to homogenize inputs to an op such as matmul / elementwise_binary, + and not the inputs to a function itself. + """ + def _is_same_dtype(dtype1, dtype2): + return builtin_to_string(dtype1) == builtin_to_string(dtype2) + + def _promoted_var(var, promoted_dtype): + if var.val is None: + x = mb.cast( + x=var, dtype=builtin_to_string(promoted_dtype), name=var.name + "_promoted") + else: + const_value_after_cast = cast_op_class.get_cast_value(var, builtin_to_string(promoted_dtype)) + x = mb.const(val=const_value_after_cast, name=var.name + "_promoted") + return x + + for i, var in enumerate(input_vars): + if not isinstance(var, Var): + input_vars[i] = mb.const(val=var) + + promoted_dtype = promote_dtypes([var.dtype for var in input_vars]) + + for i, var in enumerate(input_vars): + if not _is_same_dtype(var.dtype, promoted_dtype): + input_vars[i] = _promoted_var(var, promoted_dtype) + + return input_vars + + +def solve_slice_by_index_shape(x_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask): + """ + Helper function to solve the shape of tensor slicing. + """ + ret_shape = [] + + if begin is None or len(begin) == 0: + begin = [None] * len(x_shape) + if end is None or len(end) == 0: + end = [None] * len(x_shape) + + if len(begin) != len(x_shape): + raise TypeError( + "slice_by_index op: size of 'begin', {}, is not equal to the rank of input, which is {}".format( + len(begin), len(x_shape) + ) + ) + if len(end) != len(x_shape): + raise TypeError( + "slice_by_index op: size of 'end', {}, is not equal to the rank of input, which is {}".format( + len(end), len(x_shape) + ) + ) + + # solve for shape inference + for idx in range(len(x_shape)): + # skip if we want to squeeze the dimension + if squeeze_mask[idx]: + continue + + # for those a[:] cases + if begin_mask[idx] and end_mask[idx]: + if is_symbolic(x_shape[idx]): + if stride[idx] == -1 or stride[idx] == 1: + ret_shape.append(x_shape[idx]) + else: + ret_shape.append(get_new_symbol()) + else: + num = np.ceil(float(x_shape[idx]) / abs(stride[idx])).astype( + np.int32 + ) + ret_shape.append(num) + continue + + """ + We first deal with those cases, where the output size is a deterministic number, even if the input dimension + is unknown (i.e. symbolic) + """ + if ( + not begin_mask[idx] + and not end_mask[idx] + and begin[idx] is not None + and end[idx] is not None + ): + # in this case the slice is from "begin" to "end", where both these boundary points are known + # we can find the size of the slice in this case, unless one of them is positive and other is negative + # as in that case, we would need to know the size of the full input dimension + if begin[idx] >= 0 and end[idx] >= 0 and stride[idx] > 0: + if end[idx] < begin[idx]: + raise ValueError( + "slice_by_index op: unsupported values in for dimension {}, " + "(begin, end, stride) : ({}, {}, {})".format( + idx, begin[idx], end[idx], stride[idx] + ) + ) + ret_shape.append( + np.arange(end[idx] - begin[idx])[ + slice(0, end[idx] - begin[idx], stride[idx]) + ].size + ) + continue + if begin[idx] < 0 and end[idx] < 0 and stride[idx] < 0: + if begin[idx] < end[idx]: + raise ValueError( + "slice_by_index op: unsupported values in for dimension {}, " + "(begin, end, stride) : ({}, {}, {})".format( + idx, begin[idx], end[idx], stride[idx] + ) + ) + ret_shape.append( + np.arange(begin[idx] - end[idx])[ + slice(-1, end[idx] - begin[idx] - 1, stride[idx]) + ].size + ) + continue + + if begin_mask[idx] and not end_mask[idx] and end[idx] is not None: + # in this case we know that the slice is [0, end] or [-1, end], depending on the sign of stride, + # and the value of end is known + if end[idx] > 0 and stride[idx] > 0: + ret_shape.append( + np.arange(end[idx])[slice(None, end[idx], stride[idx])].size + ) + continue + if end[idx] < 0 and stride[idx] < 0: + ret_shape.append( + np.arange(abs(end[idx]))[slice(None, end[idx], stride[idx])].size + ) + continue + + if not begin_mask[idx] and end_mask[idx] and begin[idx] is not None: + # in this case we know the value of begin, and since end_mask is True, we know that the slice + # is till the right most edge + if begin[idx] > 0 and stride[idx] < 0: + ret_shape.append( + np.arange(begin[idx] + 1)[slice(begin[idx], None, stride[idx])].size + ) + continue + if begin[idx] < 0 and stride[idx] > 0: + ret_shape.append( + np.arange(abs(begin[idx]))[ + slice(begin[idx], None, stride[idx]) + ].size + ) + continue + + # for symbolic case + if is_symbolic(x_shape[idx]): + ret_shape.append(get_new_symbol()) + continue + + # for single-element extraction case + if x_shape[idx] == 1: + ret_shape.append(1) + continue + + # when begin and end are not determined + if begin[idx] is None and not begin_mask[idx]: + ret_shape.append(get_new_symbol()) + continue + if end[idx] is None and not end_mask[idx]: + ret_shape.append(get_new_symbol()) + continue + + # parse negative dimension + if begin[idx] is not None and begin[idx] < 0: + begin[idx] = max(0, begin[idx] + x_shape[idx]) + if end[idx] is not None and end[idx] < 0: + end[idx] = max(0, end[idx] + x_shape[idx]) + + # compute shape + low, high = [0, x_shape[idx]] if stride[idx] > 0 else [-1, x_shape[idx] - 1] + begin_idx, end_idx = ( + [begin[idx], end[idx]] if stride[idx] > 0 else [end[idx], begin[idx]] + ) + is_begin_mask, is_end_mask = ( + [begin_mask[idx], end_mask[idx]] + if stride[idx] > 0 + else [end_mask[idx], begin_mask[idx]] + ) + if is_begin_mask: + begin_idx = low + end_idx = high if is_end_mask else min(end_idx, high) + num = np.ceil(float(end_idx - begin_idx) / abs(stride[idx])).astype( + np.int32 + ) + ret_shape.append(max(0, num)) + + return ret_shape diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py new file mode 100644 index 00000000..2a12f029 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/complex_dialect_ops.py @@ -0,0 +1,744 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +This file contains the dialect ops for handling complex numbers. + +For example, torch.fft.fft accepts complex input and produces complex outputs, which is not +supported by CoreML. However, we can break the calculation into the real part and imaginary part +to work around the restriction. +The dialect op provided by this file could be used by any frontend (PyTorch, Tensorflow, etc). +For example, during torch frontend translation, the torch's fft_fft op could be translated to + def fft_fft(context, nodes): + input_data, n, dim, norm = _get_inputs(context, node, expected=[4]) + fft_res = mb.complex_fft(data=input_data, n=n, dim=dim, norm=norm) + context.add(fft_res, node.name) +and then the fft dialect op will be lowered into core ops by calculating the real and imaginary +part separately. + +There are mainly three types of complex dialect ops: +- Ops where real and imag data has interactions (such as fft). +- Ops where real and imag data go through the non-complex version op separately (such as add). +- Ops where only one of the real/imag data go through the non-complex version (such as shape). + +All dialect ops in this file will be lowered into core ops by `lower_complex_dialect_ops` pass. +For adding a new dialect op, see steps in the file docstring of `lower_complex_dialect_ops.py`. +Notice that all dialect op has `complex_` as prefix, because it's required by setting the +`namespace="complex"` in `register_op`. +""" + +from typing import Optional, Tuple + +import numpy as np + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import ( + DefaultInputs, + InputSpec, + TensorInputType, +) +from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry +from coremltools.converters.mil.mil.types.symbolic import any_symbolic, is_symbolic +from coremltools.converters.mil.mil.types.type_mapping import ( + infer_complex_dtype, + infer_fp_dtype_from_complex, +) +from coremltools.converters.mil.mil.var import ComplexVar, Var + +register_op = SSAOpRegistry.register_op + +_FFT_VALID_NORMS = {"forward", "backward", "ortho"} + + +def fft_canonicalize_length_dim( + input_data: Var, length: Optional[Var], dim: Optional[Var], c2r: bool = False +) -> Tuple[int, int]: + """ + Canonicalize shape and dim for 1-D FFT (based on PyTorch's fft documentation): + - length: Signal length. If given, the input will either be zero-padded or trimmed to this + length before computing the FFT. + - dim: The dimension along which to take the one dimensional FFT. + - c2r: Use for "complex to real", such as irfft, which takes complex and output real data. + """ + shapes, dims = fft_canonicalize_shapes_dims(input_data, length, dim, c2r) + return shapes[0], dims[0] + + +def fft_canonicalize_shapes_dims( + input_data: Var, shapes: Optional[Var], dims: Optional[Var], c2r: bool = False +) -> Tuple[Tuple[int], Tuple[int]]: + """ + Canonicalize shapes and dims for N-D FFT (based on PyTorch's fftn documentation): + - shapes: Signal size in the transformed dimensions. If given, each dimension dims[i] will + either be zero-padded or trimmed to the length s[i] before computing the FFT. If a + length -1 is specified, no padding is done in that dimension. + Default: s = [input.size(d) for d in dims] + - dims: Dimensions to be transformed. Default: all dimensions, or the last len(s) dimensions if + s is given. + - c2r: Use for "complex to real", such as irfftn, which takes complex and output real data. + """ + if shapes is not None: + shapes = shapes.val + if isinstance(shapes, np.integer): + shapes = (shapes,) + if dims is not None: + dims = dims.val + if isinstance(dims, np.integer): + dims = (dims,) + + # Input validation. + input_rank = input_data.rank + if dims is not None: + for dim in dims: + if dim < -input_rank or dim >= input_rank: + raise ValueError(f"Invalid dim {dim} in `dims`.") + if shapes is not None: + for shape in shapes: + if shape <= 0: + raise ValueError(f"Invalid shape {shape} in `shapes`.") + + # Determine if the last dim specified in dims need to be expanded. For IRFFTN, the input is + # interpreted as a one-sided Hermitian signal in the Fourier domain, as produced by rfftn(), so + # we need to expand the dim back to the full matrix (with conjugate part not pruned). + last_dim_expand: bool = shapes is None and c2r + + if shapes is not None: + if dims is None: + # Has shape, no dim. + # Default is last len(s) dimensions. + dims = tuple(range(input_rank - len(shapes), input_rank)) + else: + # Has shape, has dim. + if len(shapes) != len(dims): + raise ValueError( + "shapes and dims must have the same number of elements." + ) + shapes = tuple( + shape if shape != -1 else input_data.shape[dim] + for (shape, dim) in zip(shapes, dims) + ) + elif dims is None: + # No shape, no dim. + dims = tuple(range(input_rank)) + shapes = tuple(input_data.shape) + else: + # No shape, has dim. + shapes = tuple(input_data.shape[dim] for dim in dims) + + # In RFFTN, the output is trimmed (because FFT of real-value input is Hermitian-symmetric, the + # conjugate part is removed) to ``original_dim // 2 + 1``, so here we do the reverse + # ``2 * (trimmed_dim - 1)`` to restore the original shape. + if last_dim_expand: + target_last_dim_shape = 2 * (input_data.shape[dims[-1]] - 1) + shapes = shapes[:-1] + (target_last_dim_shape,) + + if len(shapes) != len(dims): + raise ValueError( + f"shape ({len(shapes)}) and dim ({len(dims)}) should have same number of elements." + ) + + return shapes, dims + + +@register_op(namespace="complex") +class complex(Operation): + """ + Dialect op for constructing a complex data from real and imaginary data. + """ + + input_spec = InputSpec( + real_data=TensorInputType(type_domain="T"), + imag_data=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp32,), + } + + def type_inference(self): + if self.real_data.shape != self.imag_data.shape: + raise ValueError( + f"The shape of real_data ({self.real_data.shape}) and imag_data " + f"({self.imag_data.shape}) must match to construct complex data." + ) + return types.tensor( + infer_complex_dtype(self.real_data.dtype, self.imag_data.dtype), + self.real_data.shape, + ) + + +@register_op(namespace="complex") +class complex_real(Operation): + """Dialect op for extracting real part of complex data.""" + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.complex64,), + } + + def type_inference(self): + return types.tensor( + infer_fp_dtype_from_complex(self.data.dtype), self.data.shape + ) + + +@register_op(namespace="complex") +class complex_imag(Operation): + """Dialect op for extracting imaginary part of complex data.""" + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.complex64,), + } + + def type_inference(self): + return types.tensor( + infer_fp_dtype_from_complex(self.data.dtype), self.data.shape + ) + + +@register_op(namespace="complex") +class complex_fft(Operation): + """ + Dialect op for 1-D FFT. As PyTorch's FFT API has a much more fine-grained control than + TensorFlow's, the parameters of this dialect op mainly follows `torch.fft.fft`. + + Parameters + ---------- + data: tensor<\*D, T> (Required) + * The input tensor. + n: const i32 (Optional. Default=None) + * Signal length. If given, the input will either be zero-padded or trimmed to this length + before computing the FFT. + dim: const i32 (Optional. Default=``-1``) + * The dimension along which to take the one dimensional FFT. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the forward transform (fft()), these correspond to: + * "forward" - normalize by 1/n + * "backward" - no normalization + * "ortho" - normalize by 1/sqrt(n) (making the FFT orthonormal) + * Calling the backward transform (ifft()) with the same normalization mode will apply an + overall normalization of 1/n between the two transforms. This is required to make ifft() + the exact inverse. + * Default is "backward" (no normalization). + + Returns + ------- + tensor<\*V, complex64> + * A complex tensor where real and imag parts have the same shape. + * If ``n`` is None, real's and imag's shapes are same as the input. + * If ``n`` is specified, shape is ``V[dim]=n``. + + Attributes + ---------- + T: fp32, complex64 + + References + ---------- + See `torch.fft.fft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32, types.complex64), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + if self.norm.val not in _FFT_VALID_NORMS: + raise ValueError( + f"Invalid norm param. Valid options are {_FFT_VALID_NORMS}" + ) + output_type = ( + self.data.dtype if types.is_complex(self.data.dtype) else types.complex64 + ) + # The shape of FFT output is determined by `n` and `dim`. + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim) + output_shape[dim] = n + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_fftn(Operation): + """ + Dialect op for N-D FFT. As PyTorch's FFT API has a much more fine-grained control than + TensorFlow's, the parameters of this dialect op mainly follows `torch.fft.fftn`. + + Parameters + ---------- + data: tensor<\*D, T> (Required) + * The input tensor. + shapes: const tensor (Optional. Default=None) + * Signal size in the transformed dimensions. If given, each dimension ``dims[i]`` will + either be zero-padded or trimmed to the length ``shapes[i]`` before computing the FFT. If + a length ``-1`` is specified, no padding is done in that dimension. If not specified, it's + equivalent to ``shapes = [data.size(dim) for dim in dims]``. + dims: const tensor (Optional. Default=None) + * Dimensions to be transformed. If not specified, it's equivalent to all dimensions, or the + last ``len(shapes)`` dimensions if ``shapes`` is given. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the forward transform (fftn()), these correspond to: + * "forward" - normalize by 1/n + * "backward" - no normalization + * "ortho" - normalize by 1/sqrt(n) (making the FFT orthonormal) + where ``n = prod(shapes)`` is the logical FFT size. Calling the backward transform + (ifftn()) with the same normalization mode will apply an overall normalization of 1/n + between the two transforms. This is required to make ifftn() the exact inverse. + * Default is "backward" (no normalization). + + Returns + ------- + tensor<\*V, complex64> + * A complex tensor where real and imag parts have the same shape. + * If ``shapes`` and ``dims`` are both None, real's and imag's shapes are same as the input. + * If ``shapes`` or ``dims`` is specified, shape is ``V[dim]=shapes[dim] for dim in dims``. + + Attributes + ---------- + T: fp32, complex64 + + References + ---------- + See `torch.fft.fftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32, types.complex64), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + if self.norm.val not in _FFT_VALID_NORMS: + raise ValueError( + f"Invalid norm param. Valid options are {_FFT_VALID_NORMS}" + ) + output_type = ( + self.data.dtype if types.is_complex(self.data.dtype) else types.complex64 + ) + # The shape of FFT output is determined by `shapes` and `dims`. + shapes, dims = fft_canonicalize_shapes_dims(self.data, self.shapes, self.dims) + output_shape = list(self.data.shape) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_rfft(Operation): + """ + Dialect op for 1-D RFFT. It's similar to 1-D FFT, but the input is real number. The FFT of a + real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])``, so the output contains only the + positive frequencies below the Nyquist frequency. To compute the full output, use FFT. + + Parameters + ---------- + See the ``complex_fft`` op. + + Returns + ------- + tensor<\*V, complex64> + * Based on the output of FFT, further remove the redundant conjugate part, which means + ``V[dim] = V[dim] // 2 + 1``. + + Attributes + ---------- + T: fp32 + + References + ---------- + See `torch.fft.rfft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + if types.is_complex(self.data.dtype): + raise ValueError( + "RFFT requires real-value input. For complex input, please use FFT." + ) + output_type = infer_complex_dtype(self.data.dtype, self.data.dtype) + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim) + output_shape[dim] = n + # The shape of RFFT output is FFT after removing redundant conjugate part. + output_shape[self.dim.val] = output_shape[self.dim.val] // 2 + 1 + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_rfftn(Operation): + """ + Dialect op for N-D RFFT (rfftn). The FFT of a real signal is Hermitian-symmetric, + X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n]) so the full ``complex_fftn`` output contains + redundant information. ``complex_rfftn`` omits the negative frequencies in the last dimension. + + Parameters + ---------- + See the ``complex_fftn`` op. + + Returns + ------- + tensor<\*V, complex64> + * Based on the output of N-D FFT, further remove the redundant conjugate part in last dim, + which means ``V[dims[-1]] = V[dims[-1]] // 2 + 1``. + + Attributes + ---------- + T: fp32 + + References + ---------- + See `torch.fft.rfftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + output_type = infer_complex_dtype(self.data.dtype, self.data.dtype) + output_shape = list(self.data.shape) + shapes, dims = fft_canonicalize_shapes_dims(self.data, self.shapes, self.dims) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + # The last dim's shape is after removing the redundant conjugate part. + output_shape[dims[-1]] = output_shape[dims[-1]] // 2 + 1 + + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_ifft(Operation): + """ + Dialect op for IFFT. Computes the one dimensional inverse discrete Fourier transform of input. + + Parameters + ---------- + All parameters except ``norm`` are same as the ``complex_fft`` op. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the backward transform (ifft()), these correspond to: + * "forward" - no normalization + * "backward" - normalize by 1/n + * "ortho" - normalize by 1/sqrt(n) (making the IFFT orthonormal) + * Calling the forward transform (fft()) with the same normalization mode will apply an + overall normalization of 1/n between the two transforms. This is required to make ifft() + the exact inverse. + * Default is "backward" (normalize by 1/n). + + Returns + ------- + tensor<\*V, T> + * A complex tensor where real and imag parts have the same shape. The shape is the same as + the input except for the ``dim``: + * If ``n`` is None, the shape is same as the input. + * If ``n`` is specified, the shape at the `dim` is ``V[dim]=n``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.ifft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + output_type = self.data.dtype + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim) + output_shape[dim] = n + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_ifftn(Operation): + """ + Dialect op for N-D IFFT (ifftn). + + Parameters + ---------- + All parameters except ``norm`` are same as the ``complex_fftn`` op. + norm: const str (Optional. Default=``backward``) + * Normalization mode. For the backward transform (ifftn()), these correspond to: + * "forward" - no normalization + * "backward" - normalize by 1/n + * "ortho" - normalize by 1/sqrt(n) (making the IFFT orthonormal) + where n = prod(s) is the logical IFFT size. Calling the forward transform (fftn()) with + the same normalization mode will apply an overall normalization of 1/n between the two + transforms. This is required to make ifftn() the exact inverse. + * Default is "backward" (normalize by 1/n). + + Returns + ------- + tensor<\*V, T> + * A complex tensor where real and imag parts have the same shape. The shape is the same as + the input except for the ``dim`` in ``dims``: + * If ``shapes`` and ``dims`` are both None, the shape is same as the input. + * If ``shapes`` or ``dims`` is specified, shape at ``dim`` is ``shapes[dim]``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.ifftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + output_type = self.data.dtype + output_shape = list(self.data.shape) + shapes, dims = fft_canonicalize_shapes_dims(self.data, self.shapes, self.dims) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_irfft(Operation): + """ + Dialect op for IRFFT. Computes the inverse of RFFT. The input is interpreted as a one-sided + Hermitian signal in the Fourier domain, as produced by rfft(). By the Hermitian property, the + output will be real-valued. + + Parameters + ---------- + See the ``complex_ifft`` op for details. + + Returns + ------- + tensor<\*V, fp32> + * The shape is the same as the input except for the ``dim``: + * If ``n`` is None, the shape at the `dim` is ``V[dim] = 2 * (D[dim] - 1)``. + * If ``n`` is specified, the shape at the `dim` is ``V[dim]=n``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.irfft `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + n=TensorInputType(const=True, optional=True, type_domain=types.int32), + dim=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + n=None, + dim=-1, + norm="backward", + ) + + def type_inference(self): + output_type = infer_fp_dtype_from_complex(self.data.dtype) + output_shape = list(self.data.shape) + n, dim = fft_canonicalize_length_dim(self.data, self.n, self.dim, c2r=True) + output_shape[dim] = n + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_irfftn(Operation): + """ + Dialect op for N-D IRFFT (irfftn). + + Parameters + ---------- + See the ``complex_ifftn`` op for details. + + Returns + ------- + tensor<\*V, fp32> + * The shape is the same as the input except for: + * If ``shapes`` and ``dims`` are both None, shape at the last dim ``V[-1]`` is + ``2 * (D[-1] - 1)``. + * If ``shapes`` or ``dims`` is specified, shape at ``dim`` is ``shapes[dim]``. + + Attributes + ---------- + T: complex64 + + References + ---------- + See `torch.fft.irfftn `_. + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + shapes=TensorInputType(const=True, optional=True, type_domain=types.int32), + dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + norm=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.complex64,), + } + + def default_inputs(self): + return DefaultInputs( + shapes=None, + dims=None, + norm="backward", + ) + + def type_inference(self): + output_type = infer_fp_dtype_from_complex(self.data.dtype) + output_shape = list(self.data.shape) + shapes, dims = fft_canonicalize_shapes_dims( + self.data, self.shapes, self.dims, c2r=True + ) + for shape, dim in zip(shapes, dims): + output_shape[dim] = shape + return types.tensor(output_type, tuple(output_shape)) + + +@register_op(namespace="complex") +class complex_shape(Operation): + """ + Returns a 1-dimensional tensor with the shape of the input complex tensor. + + Parameters + ---------- + x: tensor<[*?], T> (Required) + * Input tensor. + + Returns + ------- + tensor + * Shape of the input tensor. + * ``K = x.real.rank``. + + Attributes + ---------- + T: complex64 + """ + + input_spec = InputSpec(x=TensorInputType(type_domain="T")) + + type_domains = { + "T": (types.complex64,), + } + + def type_inference(self): + if not isinstance(self.x, ComplexVar): + raise ValueError("x must be a ComplexVar.") + input_rank = self.x.real.rank + return types.tensor(types.int32, tuple([input_rank])) + + def value_inference(self): + if any_symbolic(self.x.real.shape): + # convert elements in shape to int32 + res = [x if is_symbolic(x) else np.int32(x) for x in self.x.real.shape] + return np.array(res) + else: + return np.array(self.x.real.shape).astype(np.int32) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py new file mode 100644 index 00000000..9e7f6b89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/__init__.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target + +_IOS15_TARGET = target.iOS15 + +from .activation import (clamped_relu, elu, gelu, leaky_relu, + linear_activation, prelu, relu, relu6, scaled_tanh, + sigmoid, sigmoid_hard, silu, softmax, softplus, + softplus_parametric, softsign, thresholded_relu) +from .classify import classify +from .control_flow import (cond, const, list_gather, list_length, list_read, + list_scatter, list_write, make_list, select, + while_loop) +from .conv import conv, conv_quantized, conv_transpose +from .elementwise_binary import (add, elementwise_binary, equal, floor_div, + greater, greater_equal, less, less_equal, + logical_and, logical_or, logical_xor, maximum, + minimum, mod, mul, not_equal, pow, real_div, + sub) +from .elementwise_unary import (abs, acos, asin, atan, atanh, cast, ceil, clip, + cos, cosh, erf, exp, exp2, floor, inverse, log, + logical_not, round, rsqrt, sign, sin, sinh, + sqrt, square, tan, tanh, threshold) +from .image_resizing import (affine, crop, crop_resize, resample, + resize_bilinear, resize_nearest_neighbor, + upsample_bilinear, upsample_nearest_neighbor) +from .linear import einsum, linear, matmul +from .normalization import (batch_norm, instance_norm, l2_norm, layer_norm, + local_response_norm) +from .pool import avg_pool, l2_pool, max_pool +from .random import (random_bernoulli, random_categorical, random_normal, + random_uniform) +from .recurrent import gru, lstm, rnn +from .reduction import (reduce_argmax, reduce_argmin, reduce_l1_norm, + reduce_l2_norm, reduce_log_sum, reduce_log_sum_exp, + reduce_max, reduce_mean, reduce_min, reduce_prod, + reduce_sum, reduce_sum_square) +from .scatter_gather import (gather, gather_along_axis, gather_nd, scatter, + scatter_along_axis, scatter_nd) +from .tensor_operation import (argsort, band_part, concat, cumsum, fill, + flatten2d, identity, non_maximum_suppression, + non_zero, one_hot, pad, range_1d, shape, split, + stack, tile, topk) +from .tensor_transformation import (depth_to_space, expand_dims, pixel_shuffle, + reshape, reverse, reverse_sequence, + slice_by_index, slice_by_size, + sliding_windows, space_to_batch, + space_to_depth, squeeze, transpose) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/activation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/activation.py new file mode 100644 index 00000000..0df819f4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/activation.py @@ -0,0 +1,616 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (VALUE, Operation, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + +from .elementwise_unary import elementwise_unary + + +class activation_with_alpha(Operation): + """ + Activation with Alpha Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + + +class activation_with_alpha_and_beta(Operation): + """ + Activation with Alpha Beta Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + beta=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + + +@register_op +class clamped_relu(activation_with_alpha_and_beta): + """ + If ``x >= 0`` return elementwise ``min(beta, x)``, otherwise return + ``min(beta, alpha * x)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same type and shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + x = np.minimum(np.maximum(self.x.val, 0), self.beta.val) + y = np.minimum(np.minimum(self.x.val, 0) * self.alpha.val, self.beta.val) + return x + y + + +@register_op +class elu(activation_with_alpha): + """ + If ``x > 0`` return elementwise ``x``, otherwise return ``alpha * (e^x - 1)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + b = np.copy(self.x.val) + b[b < 0] = self.alpha.val * (np.exp(b[b < 0]) - 1) + return b + + +@register_op +class gelu(Operation): + """ + Return the elementwise Gaussian error linear unit activation function for ``x``. + + You can use ``EXACT``, ``TANH_APPROXIMATION``, or ``SIGMOID_APPROXIMATION`` values + based on the following formulas: + + * ``EXACT``: + + .. math:: + f(x) = 0.5x\\left ( 1+\\rm{erf}\\left ( \\frac{x}{\\sqrt{2}} \\right ) \\right ) + + * ``TANH_APPROXIMATION``: + + .. math:: + f(x) = 0.5x\\left ( 1+\\rm{tanh}\\left ( \\sqrt{2/\\pi}\\left ( x + 0.044715x^3 \\right ) \\right ) \\right ) + + * ``SIGMOID_APPROXIMATION``: + + .. math:: + f(x) = x*\\rm{sigmoid}(1.702x) + + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + mode: const str (Optional) + * Use ``'EXACT'``, ``'TANH_APPROXIMATION'``, or ``'SIGMOID_APPROXIMATION'`` for ``str``. + * Default is ``'EXACT'``. + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + mode="EXACT", + ) + + @precondition(allow=VALUE) + def value_inference(self): + if self.mode.val == "TANH_APPROXIMATION": + a = np.sqrt(2 / np.pi) * (self.x.val + 0.044715 * np.power(self.x.val, 3)) + return 0.5 * self.x.val * (1 + np.tanh(a)) + elif self.mode.val == "SIGMOID_APPROXIMATION": + return self.x.val * (1 / (1 + np.exp(-(1.702 * self.x.val)))) + else: + sqaure_root_of_2 = np.sqrt(2) + vfunc = np.vectorize(lambda x: 0.5 * x * (1 + math.erf(x / sqaure_root_of_2))) + return vfunc(self.x.val) + + def type_inference(self): + allowed_values = {"EXACT", "TANH_APPROXIMATION", "SIGMOID_APPROXIMATION"} + if self.mode.val not in allowed_values: + msg = '"gelu" op: unrecognized value of mode: "{}". Allowed values are {}' + raise ValueError(msg.format(self.mode.val, allowed_values)) + return self.x.sym_type + + +@register_op +class leaky_relu(activation_with_alpha): + """ + If ``x >= 0`` apply ``x`` elementwise, otherwise apply ``alpha * x`` elementwise. + + Parameters + ---------- + x: <*?, T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + b = np.copy(self.x.val) + b[b < 0] *= self.alpha.val + return b + + +@register_op +class linear_activation(activation_with_alpha_and_beta): + """ + Apply elementwise ``x * alpha + beta``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return self.alpha.val * self.x.val + self.beta.val + + +@register_op +class prelu(activation_with_alpha): + """ + Where ``i = 1 ... C``, if ``x_i > 0``, return ``x_i`` , otherwise return ``alpha_i * x_i``. + + Parameters + ---------- + x: tensor<[B, C, 1..3], T> (Required) + * x must have rank 4 or rank 3 or rank 5, i.e. a shape of (B,C,H) or (B,C,H,W) or (B,C,D,H,W) + alpha: const tensor<[C], T>, (Required) + * The length of alpha must match the second dimension of x (channel dimension) + + Returns + ------- + tensor<[B, C, 1..3], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp32, fp16 + """ + + @precondition(allow=VALUE) + def value_inference(self): + alpha_br = self.alpha.val + for i in range(1, len(self.x.shape)): + alpha_br = np.expand_dims(alpha_br, i) + x_pos = np.maximum(self.x.val, 0) + b = np.minimum(self.x.val, 0) + return x_pos + b * alpha_br + + def type_inference(self): + if self.x.rank not in (3, 4, 5): + raise ValueError( + "prelu op: x must be rank 3 or 4 or 5, instead it is of rank {}".format( + len(self.x.shape) + ) + ) + if len(self.alpha.val.shape) != 1: + raise ValueError("alpha should be rank 1") + if self.x.shape[1] != self.alpha.val.shape[0]: + raise ValueError( + "Size of dimension 1 of alpha should be the same as " + + "the size of dimension 1 of x." + ) + if self.x.rank in (3, 5): + # check whether all alpha values are the same or not + are_values_same = ( + np.where(np.abs(self.alpha.val - self.alpha.val[0]) > 1e-5)[0].size == 0 + ) + if not are_values_same: + raise ValueError( + "prelu op: rank 3 or rank 5 input is only supported when all the values of alpha are same," + "which is not the case here" + ) + return self.x.sym_type + + +@register_op +class relu(elementwise_unary): + """ + Return elementwise-applied rectified linear activation: ``max(x, 0)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.maximum(self.x.val, 0) + + +@register_op +class relu6(elementwise_unary): + """ + Return elementwise-applied rectified linear activation: ``min(max(x, 0), 6)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.minimum(np.maximum(self.x.val, 0), 6) + + +@register_op +class scaled_tanh(activation_with_alpha_and_beta): + """ + Return ``alpha * tanh(beta * x)`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input range is ``(-inf, inf)``. + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return self.alpha.val * np.tanh(self.x.val * self.beta.val) + + +@register_op +class sigmoid(elementwise_unary): + """ + Return ``sigmoid(x)`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return 1 / (1 + np.exp(-self.x.val)) + + +@register_op +class sigmoid_hard(activation_with_alpha_and_beta): + """ + Return ``min( max( alpha * x + beta, 0 ), 1 )`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.minimum( + np.maximum((self.alpha.val * self.x.val) + self.beta.val, 0), 1 + ) + + +@register_op +class silu(elementwise_unary): + """ + Sigmoid Linear Unit, elementwise apply the SiLU or Swish operation ``x * sigmoid(x)``. + + Parameters + ---------- + x: tensor<\*, T> + + Returns + ------- + tensor<\*, T> + + Attributes + ---------- + T: fp16, fp32 + """ + + pass + + +@register_op +class softplus(elementwise_unary): + """ + Return ``log( 1 + e^x )`` elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.log(1 + np.exp(-np.abs(self.x.val))) + np.maximum(self.x.val, 0) + + +@register_op +class softplus_parametric(activation_with_alpha_and_beta): + """ + Return ``alpha_i * log( 1 + e^( beta_i * x_i ) )``, where ``i = 1 ... C``. + + Parameters + ---------- + x: tensor<[b, C, n, m], T> (Required) + alpha: const tensor<[C], T> (Required) + beta: const tensor<[C], T> (Required) + + Returns + ------- + tensor<[b, C, n, m], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + alpha_br = np.copy(self.alpha.val) + beta_br = np.copy(self.beta.val) + for i in range(1, len(self.x.val.shape)): + alpha_br = np.expand_dims(alpha_br, i) + beta_br = np.expand_dims(beta_br, i) + return alpha_br * np.log(1 + np.exp(self.x.val * beta_br)) + + def type_inference(self): + if len(self.x.shape) < 3: + raise ValueError("x should be at least rank 3") + if len(self.alpha.val.shape) != 1: + raise ValueError("alpha should be rank 1") + if self.x.shape[1] != self.alpha.val.shape[0]: + raise ValueError( + "Size of dimension 0 of alpha should be the same as " + + "the size of dimension 1 of x." + ) + if len(self.beta.val.shape) != 1: + raise ValueError("beta should be rank 1") + if self.x.shape[1] != self.beta.val.shape[0]: + raise ValueError( + "Size of dimension 0 of beta should be the same as " + + "the size of dimension 1 of x." + ) + return self.x.sym_type + + +@register_op +class softmax(Operation): + """ + Return ``exp(x) / tf.reduce_sum(tf.exp(x), axis)``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + axis: const i32 (Optional) + * Default is ``-1``. + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + x = self.x.val + axis = self.axis.val + max_vals = np.max(x, axis=axis, keepdims=True) + temp = np.exp(x - max_vals) + return temp / np.sum(temp, axis=axis, keepdims=True) + + +@register_op +class softsign(elementwise_unary): + """ + Return ``x / ( 1 + |x| )`` applied elementwise. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + Returns + ------- + tensor<\*?, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return self.x.val / (1 + np.abs(self.x.val)) + + +@register_op +class thresholded_relu(activation_with_alpha): + """ + Return ``x`` if ``x >= alpha``, otherwise return ``0``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<\*, T> + * A tensor of the same shape and type as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + y = self.x.val + y[y < self.alpha.val] = 0 + return y diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/classify.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/classify.py new file mode 100644 index 00000000..29d819fd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/classify.py @@ -0,0 +1,76 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (InputSpec, + ListInputType, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_op +class classify(Operation): + """ + The presence of this op indicates that the model is of type classifier. The op + constructs the model output accordingly; that is, the predicted class label + and the output probability dictionary. The parameters of this op are set + based on the attributes set for the + `coremltools.ClassifierConfig `_ class + by the user. The outputs of this op cannot be used by another op. + + Parameters + ---------- + probabilities: tensor<[\* , ProbT]> (Required) + A tensor in the graph, which is used to compute the classifier output(s). This + is the tensor whose values are mapped to the class labels and used for constructing + the predicted class label and the output dictionary of class names and values. + + classes: list<\*, ClassT> (Required) + List of classes. + + Returns + ------- + + Dict[classT, probT] + + + Attributes + ---------- + ProbT: fp32 + ClassT: i64, str + """ + + input_spec = InputSpec( + probabilities=TensorInputType(type_domain=types.fp32), + classes=ListInputType(const=True), + ) + + def type_inference(self): + # check the type of "classes" + if not types.is_list(self.classes.sym_type): + msg = "'classes' in the op 'classify' must be of type list. Instead it is {}." + raise ValueError(msg.format(self.classes.sym_type.__type_info__())) + + # check the type of "probabilities" + if self.probabilities.dtype != types.fp32: + msg = "classify op: input probabilities must be of type fp32. Instead it is of type {}" + raise TypeError(msg.format(self.probabilities.sym_type.get_primitive().__type_info__())) + + classes_elem_type = self.classes.elem_type + if classes_elem_type not in {types.str, types.int64}: + msg = "Type of elements in 'classes' in the op 'classify' must be either str or int64. Instead it is {}." + raise ValueError(msg.format(classes_elem_type.__type_info__())) + + # check that the size of "classes" is compatible with the size of "probabilities" + if not any_symbolic(self.probabilities.shape): + size = np.prod(self.probabilities.shape) + if len(self.classes.val) != size: + msg = "In op 'classify', number of classes must match the size of the tensor corresponding to 'probabilities'." + raise ValueError(msg) + + return classes_elem_type, types.dict(classes_elem_type, types.double) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py new file mode 100644 index 00000000..621ddf05 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/control_flow.py @@ -0,0 +1,828 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import (Block, get_existing_symbol, + get_new_symbol, types) +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + InternalInputType, + ListInputType, + PyFunctionInputType, + TensorInputType, + TupleInputType) +from coremltools.converters.mil.mil.operation import (NONE, SYMBOL, VALUE, + Operation, mil_list, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types import is_compatible_type +from coremltools.converters.mil.mil.types.type_mapping import ( + builtin_to_string, is_subtype, numpy_type_to_builtin_type, + numpy_val_to_builtin_val) + + +@register_op +class cond(Operation): + """ + Perform a conditional execution. The return types must be identical + between the true and false branches. + + Parameters + ---------- + pred: tensor<[], bool> (Required) + * 0-D tensor (scalar) predicate to switch between true and false branches. + + _true_fn: function (Required) + * A Python function that executes if ``pred`` evaluates to ``True``. + * It must take zero input (i.e, no input), and return one or more values whose type becomes + the operation's return type. + + _false_fn: function (Required) + * A Python function that executes if ``pred`` evaluates to ``False``. + * It must take zero input (i.e. no input), and have return types that match those of the + ``if`` branch. + + _existing_blocks: list[Block] (Optional) + * Python list of ``Block``. + * For internal use only. When converting a milproto, we already got existing blocks, + and the ``build_nested_blocks`` function can use them directly. + * When ``_existing_blocks`` is set, ``_true_fn`` and ``_false_fn`` must be dummy functions which returns ``None``. + + Returns + ------- + tuple + * Python tuple of ``Variables`` from one of the branches. + """ + + input_spec = InputSpec( + pred=TensorInputType(type_domain=types.bool), + _true_fn=PyFunctionInputType(), + _false_fn=PyFunctionInputType(), + _existing_blocks=InternalInputType(optional=True), + ) + + def build_nested_blocks(self): + # If the front end is milproto, we already have the well constructed cond/body block. + # For this case, we set self.blocks directly. + # We also check that _cond and _body are both dummy functions (return None). + if self._existing_blocks is not None and self._existing_blocks.val is not None: + assert self._true_fn.val([]) is None + assert self._false_fn.val([]) is None + self.blocks = self._existing_blocks.val + return + + # Cond block + true_block_name = self.name + "_true" + with Block(name=true_block_name, outer_op=self) as true_block: + true_func = self._true_fn.val + true_ret_vars = true_func() + if isinstance(true_ret_vars, tuple): + true_ret_vars = list(true_ret_vars) + if not isinstance(true_ret_vars, list): + true_ret_vars = [true_ret_vars] + true_block.set_outputs(true_ret_vars) + self.blocks.append(true_block) + + false_block_name = self.name + "_false" + with Block(name=false_block_name, outer_op=self) as false_block: + false_func = self._false_fn.val + false_ret_vars = false_func() + if isinstance(false_ret_vars, tuple): + false_ret_vars = list(false_ret_vars) + if not isinstance(false_ret_vars, list): + false_ret_vars = [false_ret_vars] + false_block.set_outputs(false_ret_vars) + self.blocks.append(false_block) + + def type_inference(self): + true_ret_vars = self.blocks[0].outputs + false_ret_vars = self.blocks[1].outputs + # Verify true_ret_vars has the same types as false_ret_vars + for i, (vt, vf) in enumerate(zip(true_ret_vars, false_ret_vars)): + if not is_compatible_type(vt.sym_type, vf.sym_type): + msg = ( + "true branch output {} type {} mismatch false branch" + + " output type {}" + ) + raise ValueError(msg.format(vt.name, + vt.sym_type.__type_info__(), vf.sym_type.__type_info__())) + + return tuple(v.sym_type for v in true_ret_vars) + + def value_inference(self): + if self.pred.val is None: + raise NotImplementedError() + if self.pred.val: + return [v.val for v in self.blocks[0].outputs] + return [v.val for v in self.blocks[1].outputs] + + +class Const(Operation): + """ + A base class that returns constant values. + + Parameters + ---------- + mode: immediate_value, file_value (Optional) + * Determines how the constant value is stored in the internal MIL format. + * For large constants such as convolution weights, use ``file_value``. + * For smaller-size constants such as values of a stride, use ``immediate_value``. + + val: const<\*,T> (Required) + + Returns + ------- + const<\*,T> + + Attributes + ---------- + T: fp16, fp32, i32, str, bool + """ + + input_spec = InputSpec( + val=InternalInputType(const=True), + ) + + def type_inference(self): + builtin_type, _ = self._get_type_val(self.val.val) + return builtin_type + + def value_inference(self): + _, val = self._get_type_val(self.val.val) + return val + + def _get_type_val(self, value): + + if isinstance(value, (float, np.float64)): + value = np.float32(value) + elif isinstance(value, bool): + pass + elif isinstance(value, (int, np.int64)): + value = np.int32(value) + elif isinstance(value, (tuple, list, np.ndarray)): + value = np.array(value) if isinstance(value, (tuple, list)) else value + + # For the int type, we use int32 by default + if value.dtype in [np.uint16, np.int16, np.uint64, np.int64]: + if value.dtype in [np.uint64, np.int64]: + msg = "Downcast const op {} data".format(self.name) + builtin_to_string(numpy_type_to_builtin_type(value.dtype)) + " as int32" + logger.debug(msg) + value = value.astype(np.int32) + + + # For the float type, we use float32 by default + elif value.dtype == np.float64: + msg = "Downcast const op {} data fp64 as fp32".format(self.name) + logger.debug(msg) + value = value.astype(np.float32) + + elif isinstance(value, mil_list): + # if val that was passed in is of type mil_list, which is just a wrapper on top of python list + # then construct the list type + list_value = value.ls + if len(list_value) == 0: + raise ValueError("'mil_list' points to an empty list") + builtin_elem_type, _ = self._get_type_val(list_value[0]) + # mil_list is a special case that we want to preserve the int64 element type + if isinstance(list_value[0], np.int64): + builtin_elem_type = types.int64 + from coremltools.converters.mil.mil.types.type_list import \ + list as types_list + builtin_type = types_list(builtin_elem_type, init_length=len(list_value), dynamic_length=False) + return builtin_type, value + + + if not isinstance(value, (np.generic, np.ndarray, str, bool, mil_list)): + raise ValueError("Unknown value for constant: {}".format(value)) + + _, builtin_type = numpy_val_to_builtin_val(value) + return builtin_type, value + + +@register_op +class const(Const): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + +# Internal const can have symbolic value (for testing purpose) +@register_op +class _const_symbolic(const): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def type_inference(self): + builtin_type, _ = self._get_type_val(self.val.sym_val) + return builtin_type + + def value_inference(self): + # We allow symbolic values in _const_symbolic + _, val = self._get_type_val(self.val.sym_val) + return val + + +@register_op +class select(Operation): + """ + Return the elements selected from either ``a`` or ``b`` depending on the ``cond``. + + The shape of ``cond``, ``a``, and ``b`` must be broadcastable. + You must provide ``a`` and ``b`` together, or provide neither. + If you provide neither, the operation returns the indices + of ``cond`` that are ``True``. + + Parameters + ---------- + cond: tensor<[\*D1], B> (Required) + * Tensor. When ``True``, select element from ``x``, otherwise, ``y``. + + a: tensor<[\*D2], T> (Optional) + * Values selected at indices where ``cond`` is ``True``. + * Default is ``None``. + + b: tensor<[\*D3], T> (Optional) + * Values selected at indices where ``cond`` is ``False``. + * Default is ``None``. + + Returns + ------- + tensor<[\*D_out], T> or tensor<[n, len(D1)], int32> + * If ``a, b`` are both provided, the return shape is based on broadcast rules + from ``cond, a, b``. + * If ``a, b`` are ``None``, the return shape is 2-D, where the first dimension + ``n`` is the number of matching indices in ``cond``, and ``len(D1)`` is the + rank of ``cond``. + + Attributes + ---------- + B: bool + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + cond=TensorInputType(type_domain=types.bool), + a=TensorInputType(type_domain="T"), + b=TensorInputType(type_domain="T") + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.bool, types.int32), + } + + def type_inference(self): + a_type = self.a.sym_type + b_type = self.b.sym_type + if all([a_type, b_type]): + compatible, ret_type = types.is_tensor_and_is_compatible_general_shape( + a_type, b_type + ) + if compatible: + return ret_type + elif a_type == b_type: + return a_type + else: + raise ValueError("Type mismatch {} vs. {}".format(a_type, b_type)) + return a_type if a_type is not None else b_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.where(self.cond.val, self.a.val, self.b.val) + + +@register_op +class while_loop(Operation): + """ + Perform the body repeatedly while the condition ``cond`` is true. + + Parameters + ---------- + _cond: function (Required) + * A Python function that takes ``loop_vars`` as positional arguments. + * The function must return a ``bool`` ``Var``. + + _body: function (Required) + * A Python function that takes ``loop_vars`` as positional arguments. + * The function must return the same number of output vars as ``loop_vars`` + with the same types. + + loop_vars: tuple (Required) + * Python tuple of ``Variables``. + + _existing_blocks: list[Block] (Optional) + * Python list of ``Block``. + * For internal use only. When converting a milproto, we already got existing blocks, + and the ``build_nested_blocks`` function can use them directly. + * When ``_existing_blocks`` is set, ``_cond`` and ``_body`` must be dummy functions which returns ``None``. + + Returns + ------- + tuple + * Python tuple (same type as ``loop_vars``). + """ + + input_spec = InputSpec( + # arg name with underscore prefix won't be printed. + _cond=PyFunctionInputType(), + _body=PyFunctionInputType(), + loop_vars=TupleInputType(), + _existing_blocks=InternalInputType(optional=True), + ) + + @staticmethod + def _check_equal_value(val1, val2): + if val1 is None and val2 is None: + return True + if val1 is None or val2 is None: + return False + if isinstance(val1, np.ndarray) and isinstance(val2, np.ndarray): + return np.array_equal(val1, val2) + return val1 == val2 + + @staticmethod + def _clean_up_child_ops(block): + for op in list(block.operations): + + for b in op.blocks: + while_loop._clean_up_child_ops(b) + + inputs = op.get_flattened_inputs() + for in_var in inputs: + in_var.remove_child_op(op) + + def _build_block(self, block_inputs): + # Cond block: + block_name = self.name + '_cond_block' + with Block(block_inputs=block_inputs, outer_op=self, + name=block_name) as cond_block: + + cond_func = self._cond.val + cond_var = cond_func(*cond_block.inputs) + cond_vars = cond_var if isinstance(cond_var, list) else [cond_var] + cond_block.set_outputs(cond_vars) + + # Body block + block_name = self.name + '_body_block' + with Block(block_inputs=block_inputs, outer_op=self, + name=block_name) as body_block: + body_func = self._body.val + exit_vars = body_func(*body_block.inputs) + exit_vars = list(exit_vars) if isinstance(exit_vars, (list, tuple)) \ + else [exit_vars] + body_block.set_outputs(exit_vars) + + return cond_block, body_block, exit_vars + + def build_nested_blocks(self): + # self.loop_vars is python tuple of Vars. + + # block_inputs Var are not produced by any op. + # We assume block_inputs have the same types as self.loop_var. If not + # (e.g., when certain dimensions change shape during iterate), we'd + # adjust later. + + # We assume that sym_val is unchanging across the block iterate. If it + # changes, we rebuild the block and rerun type and value inference. + + # Design notes on two blocks (cond and body): + # + # - Observe that two blocks can always be represented as a single + # block that contains both cond and body logic, which would return + # [loop_cond] + loop_carries. `loop_cond` is a bool. + # + # - Observe that single block implies a do-while logic, + # in which the first iterate is always executed. It's possible to add + # a cond input to while_loop to modify do-while behavior: + # + # %first_cond = cond_logic(...) + # while_loop(cond=%first_cond, loop_vars=(...)) + # + # and we enter the first iterate only if cond is True. But this would + # require caller to execute cond logic outside of while_loop first + # (which also needs to be duplicated within the loop), + # resulting in duplicated code / ops. + # + # - Thus, single block is unnatural for the natural execution order, + # in which we execute the cond block first to get the loop_cond. Only + # if `loop_cond` is True do we execute the body block. This is the + # semantics of tf.while_loop. + + # If the front end is milproto, we already have the well constructed cond/body block. + # For this case, we set self.blocks directly. + # We also check that _cond and _body are both dummy functions (return None). + if self._existing_blocks is not None and self._existing_blocks.val is not None: + assert self._cond.val([]) is None + assert self._body.val([]) is None + self.blocks = self._existing_blocks.val + return + + block_inputs = tuple(copy.copy(v) for v in self.loop_vars) + name_count = {v.name: 0 for v in block_inputs} + for v in block_inputs: + v._op = None + v.op_output_idx = None + v._child_ops = list() + + # Get unique name + + old_v_name = v.name + v.name = v.name + "_x" + str(name_count[v.name]) + name_count[old_v_name] += 1 + + v._sym_val = v._sym_val + v.consuming_blocks = list() + + cond_block, body_block, exit_vars = self._build_block(block_inputs) + + # Verify exit_vars has the same types as loop_vars + block_input_type_change = False + for i, (v_in, v_out) in enumerate(zip(block_inputs, exit_vars)): + if not is_subtype(v_out.sym_type, v_in.sym_type): + compat_shape = while_loop.get_compat_shape(v_out.sym_type, + v_in.sym_type) + if compat_shape is None: + msg = "loop_vars '{}' changes in the body of " \ + "while_loop '{}':\n {} -> {}" + raise ValueError(msg.format( + v_in.name, self.name, + v_in.sym_type, v_out.sym_type)) + else: + block_inputs[i]._sym_type = types.tensor( + v_in.dtype, compat_shape) + block_input_type_change = True + if not while_loop._check_equal_value(v_out.sym_val, v_in.sym_val): + block_inputs[i]._sym_val = None + block_input_type_change = True + + if block_input_type_change: + # Since we are going to build the block again, we first need to remove ops + # in the block from vars's _child_ops. + while_loop._clean_up_child_ops(cond_block) + while_loop._clean_up_child_ops(body_block) + + # Rebuild our block to invoke type inference. + cond_block, body_block, exit_vars = self._build_block(block_inputs) + for i, (v_in, v_out) in enumerate(zip(block_inputs, exit_vars)): + if not is_subtype(v_out.sym_type, v_in.sym_type): + msg = 'Block output {}: {} is not a subtype of ' +\ + 'block input {}: {} after factoring shape changes' + raise ValueError(msg.format(v_out.name, v_out.sym_type.__name__, + v_in.name, v_in.sym_type.__name__)) + if not while_loop._check_equal_value(v_out.sym_val, v_in.sym_val): + msg = 'Block output {}: {} is not equal to ' +\ + 'block input {}: {} after value changes' + raise ValueError(msg.format(v_out.name, v.sym_val, + v_in.name, v_in.sym_val)) + self.blocks.append(cond_block) + self.blocks.append(body_block) + + @staticmethod + def get_compat_shape(type1, type2): + """ + For tensor types `type1`, `type2` that are of the same rank, return + compat_shape (python list) where compat_shape[i] is integer iff type1 + and type2 have the same integer shape on dim i. compat_shape[i] is + symbolic otherwise. + + Return None if `type1`, `type2` have different rank or non-tensor + type. + """ + if not types.is_tensor(type1) or not types.is_tensor(type2): + return None + + s1 = type1.get_shape() + s2 = type2.get_shape() + + if len(s1) != len(s2): + return None + + compat_shape = [] + for d1, d2 in zip(s1, s2): + if d1 != d2: + compat_shape.append(get_new_symbol()) + else: + compat_shape.append(d1) + return compat_shape + + def type_inference(self): + # Skip the conditional var + return tuple(v.sym_type for v in self.blocks[1].outputs) + + +@register_op +class make_list(Operation): + """ + Create a list of tensor elements. The elements should have the same shape. + The list is similar to an auto-resizing array. + + Parameters + ---------- + init_length: (Optional, Default=1) + * Initial length for the list. + * If ``dynamic_length`` is ``False``, + ``init_length`` is the fixed length of the list throughout runtime. + + dynamic_length: (Optional, Default is True) + + elem_shape: Tuple[const] (Required) + * 1-D vector denoting the shape of elements. + * If ``T = int32``, the element shape is known at compile time. + * ``T = string`` denotes the symbolic shape, in which the shape is determined + at runtime. + * If not provided, the resulting ``List`` won’t have the elementary shape + info, which may cause backend errors. Remedy this with SSA passes. + + dtype: const (Optional, Default is fp32) + * Possible values: ``{"bool", "fp16", "fp32", "int32"}`` + * Element tensor’s ``dtype``. + + Returns + ------- + List[*] + + Attributes + ---------- + T: i32, string + """ + + input_spec = InputSpec( + init_length=TensorInputType(optional=True, type_domain=types.int32), + dynamic_length=TensorInputType(const=True, optional=True, type_domain=types.bool), + elem_shape=TupleInputType(), + dtype=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + def default_inputs(self): + return DefaultInputs( + init_length=1, + dynamic_length=True, + dtype="fp32", + ) + + def type_inference(self): + builtin_dtype = types.string_to_builtin(self.dtype.val) + if builtin_dtype is None: + raise ValueError("Unsupported dtype {}".format(self.dtype.val)) + # Replace string with symbol + elem_shape_sym = [] + for s_var in self.elem_shape: + # s is str or int + s = s_var.val + if s is None: + msg = 'make_list elem_shape must be tuple of const. ' +\ + 'Tuple elem {} is not' + raise ValueError(msg.format(s_var.name)) + + if isinstance(s, str): + try: + symbol = get_existing_symbol(s) + except ValueError: + # Must be a new symbol + symbol = get_new_symbol(s) + elem_shape_sym.append(symbol) + else: + elem_shape_sym.append(s) + elem_type = types.tensor(builtin_dtype, elem_shape_sym) + return types.list( + elem_type, + init_length=self.init_length.val, + dynamic_length=self.dynamic_length.val, + ) + + +@register_op +class list_length(Operation): + """ + Return the length of ``ls``. + + Parameters + ---------- + ls: List[*] (Required) + + Returns + ------- + + * Length of ``ls``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec(ls=ListInputType(),) + + def type_inference(self): + return types.int32 + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + if not self.ls.dynamic_length: + return self.ls.init_length + raise NotImplementedError() + + +@register_op +class list_write(Operation): + """ + Write a value into index ``index`` of ``ls``. + + Parameters + ---------- + ls: List (Required) + + index: (Required) + * Size of the list. + + value: <*,T> (Optional) + * Element value to write, which must match the element shape of ``ls``. + * Default is ``None``. + + Returns + ------- + List[*] + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + index=TensorInputType(type_domain=types.int32), + value=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.bool, types.int32), + } + + def type_inference(self): + list_elem_type = self.ls.elem_type + value_type = self.value.sym_type + dynamic_length = self.ls.dynamic_length + init_length = self.ls.init_length + + if list_elem_type is None: + # fill in the elem type using value's type info. + return types.list( + value_type, init_length=init_length, dynamic_length=dynamic_length + ) + if list_elem_type == types.unknown: + msg = "Input ls elem type unknown. Override with {}" + logger.warning(msg.format(value_type)) + return types.list( + value_type, init_length=init_length, dynamic_length=dynamic_length + ) + if not types.is_subtype(value_type, list_elem_type): + msg = "Elem type mismatch: ls elem type {} vs " + "value type {}" + raise ValueError(msg.format(list_elem_type.__type_info__(), + value_type.__type_info__())) + return self.ls.sym_type + + +@register_op +class list_read(Operation): + """ + Read the value at location ``index`` of ``ls``. + + Parameters + ---------- + ls: List[\*] (Required) + + index: (Required) + * Size of the list. + + Returns + ------- + <\*,T> + * The element's value. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + index=TensorInputType(type_domain=types.int32), + ) + + def type_inference(self): + list_elem_type = self.ls.elem_type + if list_elem_type is None: + msg = ( + "Unknown element type. The List might not have been " + + "written to ({})" + ) + raise ValueError(msg.format(self.name)) + return list_elem_type + + +@register_op +class list_gather(Operation): + """ + Return selected values in ``ls`` as a packed ``Tensor``. + + Parameters + ---------- + ls: List[\*] (Required) + + indices: (Required) + * Gather from indices, whose element must be in ``[0, ls.length)`` at runtime. + + Returns + ------- + <\*K,T> + * Selected tensors packed into a ``len(ls.elem_shape)+1`` rank tensor. + * ``K[0] == len(indices)``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + indices=TensorInputType(type_domain=types.int32), + ) + + def type_inference(self): + list_elem_type = self.ls.elem_type + if list_elem_type == types.unknown: + msg = ( + "Unknown element type. The List might not have been " + + "written to ({})" + ) + raise ValueError(msg.format(self.name)) + elem_shape = list_elem_type.get_shape() + dtype = list_elem_type.get_primitive() + ret_shape = [self.indices.shape[0]] + list(elem_shape) + return types.tensor(dtype, tuple(ret_shape)) + + +@register_op +class list_scatter(Operation): + """ + Scatter ``values`` to ``ls`` at locations ``indices``. + + Parameters + ---------- + ls: List[*] (Required) + + indices: tensor (Required) + * Indices of ``ls`` to scatter to. + * Elements of ``indices`` must be in ``[0, ls.length)`` at runtime. + * If indices are greater than or equal to the list length, the list is + dynamically resized. + + value: <*,T> (Optional) + * Element value to write, which must match the element shape of ``ls``. + * Default is ``None``. + + Returns + ------- + List[*] + * Updated list. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + ls=ListInputType(), + indices=TensorInputType(type_domain=types.int32), + value=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.bool, types.int32), + } + + def type_inference(self): + num_indices = self.indices.shape[0] + num_values = self.value.shape[0] + if num_values != num_indices: + raise ValueError( + "Cannot scatter {} values to {} indices".format(num_values, num_indices) + ) + list_elem_type = self.ls.elem_type + value_type = self.value.sym_type + dynamic_length = self.ls.dynamic_length + init_length = self.ls.init_length + + elem_type = types.tensor(value_type.get_primitive(), value_type.get_shape()[1:]) + if list_elem_type == types.unknown: + # fill in the elem type using value's type info. + return types.list( + elem_type, dynamic_length=dynamic_length, init_length=init_length + ) + if not types.is_subtype(elem_type, list_elem_type): + msg = "Elem type mismatch: ls elem type {} vs " + "value type {}" + raise ValueError(msg.format(list_elem_type, elem_type)) + return self.ls.sym_type diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py new file mode 100644 index 00000000..ee0ffd80 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py @@ -0,0 +1,428 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.block import curr_opset_version +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import \ + spatial_dimensions_out_shape +from coremltools.converters.mil.mil.ops.defs.iOS15 import _IOS15_TARGET + + +@register_op +class conv(Operation): + """ + Perform convolution over input. Supports 1-D, 2-D, and 3-D convolution. + + Parameters + ---------- + x: tensor<[n, C_in, \*d_in], T> (Required) + + * ``d_in`` are (possibly runtime-determined) spatial dimensions. For example, + ``d_in = [224, 224]`` for 2D convolution. + * ``1 <= len(d_in) <= 3``. + * ``C_in`` is the number of input channels or depth dimensions. + * ``n`` is the batch dimension. + + weight: tensor<[C_out, C_in/groups, \*K], T> (Required) + + * Filter weights. + * ``C_in`` is the number of input channels. + * ``C_in`` must be divisible by ``groups``. + * ``K`` are kernel sizes. For example, ``K = [KH, KW]`` for 2-D convolution. + * When ``dilations`` is not all ``1``, ``weight`` has to be ``const`` + at compile time + + strides: const tensor<[S], i32> (Optional) + + * Default to one vector of length equal to the number of spatial dimensions. + * Strides along each of the spatial dimensions. + * ``S == len(d_in)``. + + pad_type: const str (Required) + + Must be one of the following: + + * ``valid``: No padding. This is equivalent to custom pad with + ``pad[2*i] == pad[2*i+1] == 0, for i=0,...,len(d_in)-1``. + * ``custom``: Specify custom padding in the parameter ``pad``. + * ``same``: Input is padded such that out spatial shapes are + ``d_out[i] = ceil(d_in[i] / strides[i])``. + * ``same_lower``: Similar to ``same`` but the padding + will place extra rows/cols on the top/left if the padding amount is odd. + + Specifically, for ``i = 0,..,,len(d_in)-1``, the equivalent paddings are + calculated as follows: + + * ``dilated_kernel = (K[i] - 1) * dilate[i] + 1`` + * If ``dilated_kernel`` is odd, + ``padding[2*i] = padding[2*i+1] = floor(dilated_kernel / 2)`` + * Otherwise: + ``padding[2*i] = ceil((dilated_kernel - 1) / 2)``, + ``padding[2*i+1] = floor((dilated_kernel - 1) / 2)`` + + pad: const tensor<[P], i32> (Optional. Default to all zeros) + + * ``len(P) = 2 * len(d_in)`` + * ``pad`` should be specified if and only if ``pad_type == custom``, + otherwise errors occur. + * ``pad`` represents the number of elements to pad before and after each + dimension. Specifically, ``pad[0], pad[1]`` are the pad size before / after + spatial dimension 0, ``pad[2], pad[3]`` are the pad size before / after + spatial dimension 1, etc. + + dilations: const tensor<[S], i32> (Optional. Default to all 1s) + + * Dilation value along each spatial dimension in ``d_in``. + See `visualization `_. + * ``S == len(d_in)``. + + groups: const tensor<[], i32> (Optional, default to 1) + + * Input and output channels are split by ``groups``. + * ``C_in`` must be divisible by ``groups``. + * Maximum value for group is ``C_in``, in which case it is a depthwise + convolution. + + For examples (assuming ``C_in = 16, C_out = 32``): + + * ``groups == 1``, ``weight`` has shape ``[32, 16, KH, KW]``: All input + channels are convolved with the ``weight`` kernel to produce all output + channels. + * ``groups == 2``, ``weight`` has shape ``[32, 8, KH, KW]``: Input + channels 0~7 are convolved with half of the ``weight`` kernel to produce + output channels 0~15. Similarly, input channels 8~15 are convolved with + the other half of ``weight`` to product output channels 16~31. + * ``groups == C_in``, ``weight`` has shape ``[32, 1, KH, KW]``: Each input + channel is convolved with its own set of filters and each produce + ``C_out / C_in = 2`` channels. This is equivalent to depthwise + convolution. + + bias: const tensor<[C_out],T> (Optional, default to all 0) + * Bias along output channels. + + Returns + ------- + tensor<[n, C_out, \*d_out], T> + * Output activation has the same rank and spatial dimension as the input. + That is, ``len(d_out) == len(d_in)``. + * For ``i=0,..,len(d_in)-1, d_out[i] = floor [(D_in[i] + pad[2*i] + + pad[2*i+1] - (K[i]-1)*dilations[i] - 1) / strides[i] ] + 1``. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + conv_transpose + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, optional=True, type_domain=types.str), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + dilations=TensorInputType(const=True, optional=True, type_domain=types.int32), + groups=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + num_spatial_dims = self.x.rank - 2 + return DefaultInputs( + bias=None, + strides=[1]*num_spatial_dims, + pad_type="valid", + pad=[0]*num_spatial_dims*2, + dilations=[1]*num_spatial_dims, + groups=1, + ) + + def type_inference(self): + inshape = self.x.shape + f_shape = self.weight.shape + kernel_shape = f_shape[2:] + C_out = f_shape[0] + C_in = self.x.shape[1] + groups = self.groups.val + + if self.bias is not None and \ + (len(self.bias.val.shape) > 1 or self.bias.val.shape[0] != C_out): + msg = "# of bias values {} not equal to # output channels {}" + raise ValueError(msg.format(self.bias.val.shape[0], C_out)) + if C_in % groups != 0: + msg = "# of input channels {} not divisible by groups {}" + raise ValueError(msg.format(C_in, groups)) + if C_in // groups != self.weight.shape[1]: + msg = "C_in / groups = {}/{} != weight[1] ({})" + raise ValueError(msg.format(C_in, groups, self.weight.shape[1])) + + strides = self.strides.val + dilations = self.dilations.val + + # The same_lower padding is not supported in iOS15 + if curr_opset_version() == _IOS15_TARGET and self.pad_type.val == "same_lower": + msg = "iOS15 version of conv does not support pad_type = `same_lower`" + raise ValueError(msg) + + # Ignore self.pad if pad_type != custom + custom_pad = None if self.pad_type.val != 'custom' else self.pad.val + + if self.weight.val is None and any([True if d > 1 else False for d in dilations]): + raise ValueError("Convolution with dynamic weights does not support dilations!") + + N = inshape[0] + C_out = f_shape[0] + # spatial dimensions + d_out_shape = spatial_dimensions_out_shape( + pad_type=self.pad_type.val, + input_shape=inshape[2:], + kernel_shape=kernel_shape, + strides=strides, + dilations=dilations, + custom_pad=custom_pad, + ) + retshape = [N, C_out] + d_out_shape + return types.tensor(self.x.dtype, tuple(retshape)) + + +@register_op +class conv_quantized(conv): + """ + Note: This is experimental and may change in the future. + Supports weight quantization for parameters while performing convolution over input. + ``W_float = W_quantized * scale + bias``. + + Parameters + ---------- + In addition to convolutional layer parameters, the following additional parameters + are required. + + quantization_type: const str (Required) + * One of ``linear``, or ``lut``. + + nbits: const tensor<[], i32> (Optional. Default to 8) + * Denotes the bit-width of the quantization. ``1 <= nbits <= 8``. + + quant_scale: tensor<*?, T> (Required) + * Denotes the scale of quantization. + + quant_bias: tensor<*?, T> (Required) + * Denotes the bias that is used to quantize/dequantize. + + Returns + ------- + tensor<[n, C_out, *d_out], T> + * Output activation has the same rank and spatial dimension as the input. + That is, ``len(d_out) == len(d_in)``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(type_domain="U"), + bias=TensorInputType(const=True, optional=True, type_domain="U"), + quantization_type=TensorInputType(const=True, type_domain=types.str), + nbits=TensorInputType(const=True, optional=True, type_domain=types.int32), + quant_scale=TensorInputType(const=True, type_domain="T"), + quant_bias=TensorInputType(const=True, type_domain="T"), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, optional=True, type_domain=types.str), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + dilations=TensorInputType(const=True, optional=True, type_domain=types.int32), + groups=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp32, types.fp16), + "U": (types.uint8,), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + nbits=8, + ) + +@register_op +class conv_transpose(Operation): + """ + Perform transposed convolution (also known as deconvolution and fractionally + stride convolution) over input. ``conv_transpose`` can also be used to compute + the gradient of conv. Supports 1-D, 2-D, and 3-D convolution. + + Parameters + ---------- + + x: tensor<[n,C_in,*D_in],T> (Required) + * Input data. + * ``D_in`` are spatial dimensions. + * ``1 <= len(D_in) <= 3``. + * ``C_in`` is the number of input channels. + + weight: const tensor<[C_in,C_out/groups,*D_in], T> (Required) + * Filter weights. ``C_in, C_out`` are the number of input and output channels + respectively. + * ``D_in`` are spatial dimensions. ``1 <= len(D_in) <= 2``. + + bias: const tensor<[C_out],T> (Optional, default to all 0) + * Bias added along output channels. + + pad: const tensor<[P],i32> (Optional, default to all 0s) + * Number of elements to pad before and after each dimension. + * ``P == 2 * len(D_in)``. + * ``pad[2*i], pad[2*i+1]`` are pad sizes before and after + dimension ``i``, where ``0 <= i < len(D_in)``. + + output_shape: const tensor<[P],i32> (Optional, default None) + * Expected output shape. The first two dimensions must be ``[n, C_out]``. + * The output shape of ``conv_transpose`` is underdetermined in general, + because ``conv`` can map multiple input shapes to a single output shape. + For example, for ``same`` padding mode, ``conv_out = ceil(conv_in/stride)``. + Hence we need ``output_shape`` when this occurs. + + pad_type: const tensor<[P],i32> (Optional, default valid) + * One of ``same``, ``valid``, or ``custom``. + + strides: const tensor<[S],i32> (Optional. Default to all 1s) + * Stride along each of the spatial dimensions. + * ``S == len(D_in)``. + + dilations: const tensor<[S],i32> (Optional. Default to all 1s) + * Dilation value along each spatial dimension in ``d_in``. See ``conv``. + * ``S == len(D_in)``. + + groups: const tensor<[], i32> (Optional. Default to 1) + * Input and output channels are separated into ``groups``. + * ``C_in`` and ``C_out`` must be divisible by the number of groups. + See ``conv`` for examples. + + Returns + ------- + tensor<[n,C_out,*D_out],T> + * If ``output_shape`` is not ``None``: + + ``Dout = output_shape`` + + * If ``pad_type == "custom"``: + + ``Dout[i] = (D_in[i]-1)*stride[i] + (K[i]-1) * dilation[i] + 1 - pad[2*i] - pad[2*i-1]`` + + * If ``pad_type == "valid"``: + + ``Dout[i] = (D_in[i]-1)*stride[i] + (K[i]-1) * dilation[i] + 1`` + + * If ``pad_type == "same"``: + + ``Dout[i] = D_in[i] * stride[i]`` + + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + conv + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), # [n, C_in, spatial_dims] + weight=TensorInputType(const=True, type_domain="T"), # [C_out, C_in, spatial_dims] + bias=TensorInputType(const=True, optional=True, type_domain="T"), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + output_shape=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, optional=True, type_domain=types.str), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + dilations=TensorInputType(const=True, optional=True, type_domain=types.int32), + groups=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + num_spatial_dims = self.x.rank - 2 + return DefaultInputs( + bias=None, + pad=[0]*2*num_spatial_dims, + output_shape=None, + pad_type="valid", + strides=[1]*num_spatial_dims, + dilations=[1]*num_spatial_dims, + groups=1, + ) + + def type_inference(self): + # Input shape is [n, C_in, spatial_dims] + in_shape = self.x.shape + # Weight shape is [C_in, C_out/group, spatial_dims] + f_shape = self.weight.shape + kernel_shape = f_shape[2:] + spatial_dim_rank = len(in_shape) - 2 + N = in_shape[0] + C_in = self.x.shape[0] + groups = self.groups.val + C_out = f_shape[1] * groups + + if self.bias is not None and self.bias.val.shape[0] != C_out: + msg = "# of bias values {} not equal to # output channels {}" + raise ValueError(msg.format(self.bias.val.shape[0], C_out)) + if C_out % groups != 0: + msg = "# of input channels {} not divisible by groups {}" + raise ValueError(msg.format(C_in, groups)) + + # If output shape is given, return it + if self.output_shape is not None: + output_shape = self.output_shape.val + assert output_shape[0] == N + assert output_shape[1] == C_out + return types.tensor( + self.x.dtype, tuple(output_shape) + ) + + strides = self.strides.val + dilations = self.dilations.val + kernel_shape = [ + (kernel_shape[r] - 1) * dilations[r] + 1 for r in range(spatial_dim_rank) + ] + + D_in = in_shape[2:] # spatial dimensions + + # Deconv's output shape is non-deterministic, we follow TF shape logic here. + if self.pad_type.val == "same": + d_out_shape = [strides[r] * D_in[r] for r in range(spatial_dim_rank)] + elif self.pad_type.val == "valid": + d_out_shape = [ + strides[r] * (D_in[r]-1) + kernel_shape[r] + for r in range(spatial_dim_rank) + ] + elif self.pad_type.val == "custom": + if self.pad is None: + raise ValueError("self.pad must exist if pad_type is custom") + pad = self.pad.val + d_out_shape = [ + strides[r] * (D_in[r] - 1) + + kernel_shape[r] + - pad[2 * r] + - pad[2 * r + 1] + for r in range(spatial_dim_rank) + ] + + retshape = [N, C_out] + d_out_shape + return types.tensor(self.x.dtype, tuple(retshape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py new file mode 100644 index 00000000..1f89facb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_binary.py @@ -0,0 +1,638 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import operator + +import numpy as np + +from coremltools.converters.mil.mil import (InputSpec, Operation, + TensorInputType, precondition, + types) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import ( + broadcast_shapes, promoted_primitive_type) + + +class elementwise_binary(Operation): + """ + Elementwise Binary Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + typea = self.x.sym_type + typeb = self.y.sym_type + primitive_type = promoted_primitive_type(typea, typeb) + if primitive_type is None: + raise ValueError("Incompatible primitive types in broadcast operation") + primitive_type = self.get_dtype(primitive_type) + + # broadcast + if not types.is_tensor(typea) and not types.is_tensor(typeb): + # both typea and typeb are not tensors + return primitive_type + if types.is_tensor(typea) and not types.is_tensor(typeb): + # a is tensor, b is not + return types.tensor(primitive_type, typea.get_shape()) + if not types.is_tensor(typea) and types.is_tensor(typeb): + # a is not tensor, b is + return types.tensor(primitive_type, typeb.get_shape()) + + # both a, b are tensors + shapea = list(typea.get_shape()) + shapeb = list(typeb.get_shape()) + ret_shape = broadcast_shapes(shapea, shapeb) + return types.tensor(primitive_type, ret_shape) + + @precondition(allow=VALUE) + def value_inference(self): + return self._cast_check_value_inferene(self.x.val, self.y.val) + + def get_operator(self): + """ + All subclasses have to implement this. + """ + raise NotImplementedError() + + def get_dtype(self, promoted_dtype): + """ + Override if output primitive type is different from input types + (e.g., less, greater) + """ + return promoted_dtype + + def _cast_check_value_inferene(self, a, b): + """ + If one of the input is tensor, cast the result to tensor. + """ + to_cast = any([isinstance(x, np.ndarray) for x in [a, b]]) + result = self.get_operator()(a, b) + return result if not to_cast else np.array(result) + + +class elementwise_binary_logical(elementwise_binary): + """ + Elementwise Binary Logical Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.bool,), + } + + +""" +Elementwise Binary Op Implementation(s) +""" + + +@register_op +class add(elementwise_binary): + """ + Return ``x + y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: <\*,T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: <\*,T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + <\*,T> + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.add + + +@register_op +class equal(elementwise_binary): + """ + Return the truth value of ``x == y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: <\*,T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: <\*,T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + <\*, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return np.equal + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class floor_div(elementwise_binary): + """ + Return ``x / y`` element-wise with + `broadcasting `_, + rounded towards negative infinity. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*, T> + * A tensor of the same type and shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.floordiv + + +@register_op +class greater(elementwise_binary): + """ + Return the truth value of ``x > y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.gt + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class greater_equal(elementwise_binary): + """ + Return the truth value of ``x >= y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.ge + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class less(elementwise_binary): + """ + Return the truth value of ``x < y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.lt + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class less_equal(elementwise_binary): + """ + Return the truth value of ``x <= y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.le + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class logical_and(elementwise_binary_logical): + """ + Return the truth value of ``x AND y`` element-wise with + `broadcasting `_ + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: bool + + """ + + def get_operator(self): + return np.logical_and + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class logical_or(elementwise_binary_logical): + """ + Return the truth value of ``x OR y`` element-wise with + `broadcasting `_ + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: bool + + """ + + def get_operator(self): + return np.logical_or + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class logical_xor(elementwise_binary_logical): + """ + Return the truth value of ``x XOR y`` element-wise with + `broadcasting `_ + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the same shape as the inputs. + + Attributes + ---------- + T: bool + + """ + + def get_operator(self): + return np.logical_xor + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class maximum(elementwise_binary): + """ + Return ``x > y ? x : y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return np.maximum + + +@register_op +class minimum(elementwise_binary): + """ + Return ``x > y ? y : x`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return np.minimum + + +@register_op +class mod(elementwise_binary): + """ + Return ``x % y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.mod + + +@register_op +class mul(elementwise_binary): + """ + Return ``x * y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.mul + + +@register_op +class not_equal(elementwise_binary): + """ + Return the truth value of ``x != y`` element-wise with + `broadcasting `_ + (``1`` for true, ``0`` for false in numeric domain). + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, bool> + * A boolean tensor with the broadcasted shape from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.ne + + def get_dtype(self, promoted_dtype): + return types.bool + + +@register_op +class real_div(elementwise_binary): + """ + Return ``x / y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.truediv + + +@register_op +class pow(elementwise_binary): + """ + Return ``x ^ y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.pow + + +@register_op +class sub(elementwise_binary): + """ + Return ``x - y`` element-wise with + `broadcasting `_. + + Parameters + ---------- + x: tensor<\*, T> (Required) + * Shape must be compatible with ``y`` in broadcast. + + y: tensor<\*, T> (Required) + * Shape must be compatible with ``x`` in broadcast. + + Returns + ------- + tensor<\*?, T> + * A tensor with the broadcasted shape from inputs, and type is derived from inputs. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + def get_operator(self): + return operator.sub diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py new file mode 100644 index 00000000..1ef87516 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/elementwise_unary.py @@ -0,0 +1,898 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (SYMBOL, VALUE, Operation, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types import nptype_from_builtin +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +def _maintain_shape(x, y): + # numpy converts rank 0 tensors to scalars + if x.ndim == 0: + # convert back to rank 0 tensor + return np.array(y) + return y + + +class elementwise_unary(Operation): + """ + Elementwise Unary Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + +class elementwise_unary_with_int(Operation): + """ + Elementwise Unary Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + return self.x.sym_type + +""" +Elementwise unary op implementation(s) +""" + +@register_op +class abs(elementwise_unary_with_int): + """ + Return the absolute values of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.abs(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class acos(elementwise_unary): + """ + Return the inverse cosine values of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arccos(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class asin(elementwise_unary): + """ + Return the inverse sine of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arcsin(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class atan(elementwise_unary): + """ + Return the inverse tangent of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arctan(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class atanh(elementwise_unary): + """ + Return the inverse hyperbolic tangent values of the input + ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.arctanh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class ceil(elementwise_unary): + """ + Return the ceil values of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.ceil(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class clip(Operation): + """ + Clip the values in the input ``x`` to ``[alpha, beta]``, element-wise. + Any values less than ``alpha`` are set to ``alpha``, and any values greater + than ``beta`` are set to ``beta``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + alpha: const T (Required) + beta: const T (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + beta=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.minimum(np.maximum(self.x.val, self.alpha.val), self.beta.val) + + +@register_op +class cos(elementwise_unary): + """ + Return cosine of ``x`` element-wise. Input domain is ``(-inf, inf)`` and + output range is ``[-1,1]``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.cos(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class cosh(elementwise_unary): + """ + Return hyperbolic cosine of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.cosh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class erf(elementwise_unary): + """ + Return the gauss error function of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + erf_vector_function = np.vectorize(math.erf) + return erf_vector_function(self.x.val) + + +@register_op +class exp(elementwise_unary): + """ + Return e^x, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.exp(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class exp2(elementwise_unary_with_int): + """ + Return 2^x, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.exp2(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class floor(elementwise_unary): + """ + Return the floor of the input ``x``, element-wise, the same as rounding + towards negative infinity. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.floor(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class inverse(Operation): + """ + Return the reciprocal value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + epsilon: const T (Optional, default=1e-4) + * This is a small constant that is added to the input, before taking its + inverse, for stability. + * ``y = 1 / (x + epsilon)``. + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=nptype_from_builtin(self.x.dtype)(1e-4), + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.array(np.reciprocal(self.x.val + self.epsilon.val), copy=False) + + +@register_op +class log(Operation): + """ + Return the natural logarithm value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + epsilon: const T (Optional, default=1e-45) + * This is a small constant that is added to the input, before taking log. + * ``y = log(x + epsilon)``. + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=nptype_from_builtin(self.x.dtype)(1e-45) + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.log(self.x.val + self.epsilon.val) + + +@register_op +class logical_not(Operation): + """ + Return the value of NOT the input ``x``, element-wise. (``1`` for true, ``0`` + for false in numeric domain.) A numeric value ``t`` is evaluated to true + ``iff t != 0``. + + Parameters + ---------- + x: tensor<[\*d], bool> (Required) + + Returns + ------- + tensor<[\*d], bool> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain=types.bool), + ) + + @precondition(allow=VALUE) + def value_inference(self): + return np.logical_not(self.x.val) + + def type_inference(self): + return self.x.sym_type + + +@register_op +class round(elementwise_unary): + """ + Return the round value of the input ``x`` to nearest integer, element-wise. + ``0.5`` is rounded to ``0``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.round(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class rsqrt(Operation): + """ + Return the reciprocal value of the square root of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + epsilon: const T (Optional, default=1e-12) + * This is a small constant that is added to the input, before applying the + ``rsqrt`` function, for stability. + * ``y = 1 / sqrt(x + epsilon)``. + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=nptype_from_builtin(self.x.dtype)(1e-12), + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + result = 1.0 / np.sqrt(self.x.val + self.epsilon.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sign(elementwise_unary_with_int): + """ + Return the sign value of the input ``x``, element-wise. + + All elements in the output will be either ``-1``. or ``1``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sign(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sin(elementwise_unary): + """ + Return the sine value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sin(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sinh(elementwise_unary): + """ + Return the hyperbolic sine value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sinh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class sqrt(elementwise_unary): + """ + Returns the square root value of the input ``x``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.sqrt(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class square(elementwise_unary_with_int): + """ + Return ``x^2``, element-wise. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + return np.square(self.x.val) + + +@register_op +class tan(elementwise_unary): + """ + Return the tangent value of the input ``x``, element-wise. Both input and output + ranges are ``(-inf, inf)``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.tan(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class tanh(elementwise_unary): + """ + Return the hyperbolic tangent value of the input ``x``, element-wise. Both input + and output ranges are ``(-inf, inf)`` while output range is ``[-1, 1]``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + @precondition(allow=VALUE) + def value_inference(self): + result = np.tanh(self.x.val) + return _maintain_shape(self.x.val, result) + + +@register_op +class threshold(Operation): + """ + Set a lower bound ``alpha`` to the values in the input ``x``, element-wise. + Any values less than ``alpha`` are set to ``alpha``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + alpha: const T (Required) + + Returns + ------- + tensor<[\*d], T> + * A tensor of the same shape as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + alpha=TensorInputType(const=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + return np.maximum(self.x.val, self.alpha.val) + + +@register_op +class cast(Operation): + """ + Cast the input ``x`` to the new type ``dtype``. + + Parameters + ---------- + x: tensor<[\*d], T> (Required) + dtype: const str (Required) + * Can be one of the following types: ``int32``, ``int64``, ``fp32``, ``fp64``. + + Returns + ------- + tensor<[\*d], dtype> + * A tensor of the same shape as ``x``, with type ``dtype``. + + Attributes + ---------- + T: i32, i64, fp16, fp32, fp64, bool. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + dtype=TensorInputType(const=True, type_domain=types.str) + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.fp64, types.int32, types.int64, types.bool), + } + + def type_inference(self): + type_map = { + "int32": types.int32, + "int64": types.int32, + "fp16": types.fp16, + "fp32": types.fp32, + "fp64": types.fp32, + "bool": types.bool, + } + + if self.dtype.val not in type_map.keys(): + raise NotImplementedError( + "Parameter dtype of the cast operation can be one of the {}. " + "Provided {}".format(type_map.keys(), self.dtype.val) + ) + + if not types.is_tensor(self.x.sym_type): + return type_map[self.dtype.val] + + ret_shape = self.x.shape + return types.tensor(type_map[self.dtype.val], ret_shape) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + return self.get_cast_value(self.x, self.dtype.val) + + @staticmethod + def get_cast_value(input_var, dtype_val): + type_map = { + "int32": np.int32, + "int64": np.int32, + "fp16": np.float16, + "fp32": np.float32, + "fp64": np.float32, + "bool": bool, + } + + if dtype_val not in type_map.keys(): + raise NotImplementedError( + "Parameter dtype of the cast operation can be one of the {}. " + "Provided {}".format(type_map.keys(), dtype_val) + ) + + if input_var.val is None: + if input_var.sym_val is not None and not is_symbolic(input_var.sym_val) and len(input_var.sym_val.shape) == 1: + result = [np.array(val).astype(dtype=type_map[dtype_val]).item() if not is_symbolic(val) else val for val in input_var.sym_val] + return np.array(result) + return None + + if not types.is_tensor(input_var.sym_type): + return input_var.val.astype(dtype=type_map[dtype_val]) + else: + return np.array(input_var.val).astype(dtype=type_map[dtype_val]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py new file mode 100644 index 00000000..3186ead7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/image_resizing.py @@ -0,0 +1,899 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import (DefaultInputs, InputSpec, + Operation, TensorInputType, + get_new_symbol, types) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS15 import _IOS15_TARGET +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +@register_op +class upsample_nearest_neighbor(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input + by integer scale factors using nearest-neighbor interpolation. + + Parameters + ---------- + x: tensor<[\*D, H1, W1],T> (Required) + * Must be at least rank ``3``. + scale_factor_height: const or const (Optional, default=1) + * Scale factor for the height dimension (``axis=-2``). + * Can be either an integer or fractional. + scale_factor_width: const or const (Optional, default=1) + * Scale factor for the width dimension (``axis=-1``). + * Can be either an integer or fractional. + + Returns + ------- + tensor<[\*D, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = floor(``H1`` * ``scale_factor_height``). + * ``W2`` = floor(``W1`` * ``scale_factor_width``). + + Attributes + ---------- + T: fp16, fp32 + U: fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + scale_factor_height=TensorInputType( + const=True, + optional=True, + type_domain="U" + ), + scale_factor_width=TensorInputType( + const=True, + optional=True, + type_domain="U" + ), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + scale_factor_height=1, + scale_factor_width=1, + ) + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "upsample_nearest_neighbor" op must have rank at least 3' + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = np.floor(self.scale_factor_width.val * ret_shape[-1]) if not is_symbolic(ret_shape[-1]) else get_new_symbol() + ret_shape[-2] = np.floor(self.scale_factor_height.val * ret_shape[-2]) if not is_symbolic(ret_shape[-2]) else get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class resize_nearest_neighbor(Operation): + """ + Resize the spatial (last two) dimensions to the specified target size + using nearest neighbor interpolation. Although this op is similar to + ``upsample_nearest_neighbor``, ``resize_nearest_neighbor`` works with + a target size rather than with scale factors. + + Parameters + ---------- + x: tensor<[\*D, H1, W1], T> (Required) + * Must be at least rank ``3``. + target_size_height: const (Required) + * Target spatial size for the height dimension (``axis=-2``). + target_size_width: const (Required) + * Target spatial size for the width dimension (``axis=-1``). + + Notes + ----- + See ``resize_bilinear`` for examples. + + See Also + -------- + resize_bilinear + + Returns + ------- + tensor<[\*D, H2, W2], T> + * Tensor with same type as the input. + * ``H2`` = ``target_size_height``. + * ``W2`` = ``target_size_width``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + target_size_height=TensorInputType(const=True, type_domain=types.int32), + target_size_width=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "resize_nearest_neighbor" op must have rank at least 3' + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = int(self.target_size_width.val) + ret_shape[-2] = int(self.target_size_height.val) + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class upsample_bilinear(Operation): + """ + Upsample the spatial dimensions (last two dimensions) of the input + by scale factors using bilinear interpolation. + The upsample_bilinear operation in MIL corresponds to the recompute_scale_factor=True + mode in the pyorch bilinear interpolation op. That is, + the scale factor is recomputed by the output size. + Note that when the scale_factor_height and scale_factor_width are floating point, this + could result in a different scale factor due to rounding. + + Parameters + ---------- + x: tensor<[\*D, H1, W1], T> (Required) + * Must be at least rank ``3``. + scale_factor_height: const (Optional, default=1) + * Scale factor for the height dimension (``axis=-2``). + scale_factor_width: const (Optional, default=1) + * Scale factor for the width dimension (``axis=-1``). + align_corners: const (Optional, default=True) + * This parameter determines how samples are chosen for bilinear + interpolation. For details, see the Notes section. + + Notes + ----- + To understand the ``align_corners`` parameter, consider the 1-D case. + You need to sample a grid of pixels whose values are computed using linear + interpolation. This parameter controls how the grid is sampled. If the + input grid is ``[0, Xin-1]`` (corresponding to an input size of ``Xin``), + and if the output size is ``Xout``, then the grid points are sampled in + the following manner: + + .. sourcecode:: python + + # If align_corners == True: + spacing = (Xin - 1) / (Xout - 1) + grid_point[i] = min(Xin - 1, max(0, i*spacing)), for i=0,1,...,Xout-1 + + # If align_corners == False: + spacing = Xin / Xout + grid_point[i] = min(Xin - 1, max(0, i*spacing + 0.5*spacing - 0.5)), + ... for i=0,1,...,Xout-1 + + For example: + + .. sourcecode:: python + + Xin = 2 + input_interval = [0,1] + + Grid points: + + .. sourcecode:: python + + [0., 0.1, 0.5, 0.9, 1.] (Xout = 5, align_corners=False) + [0., 0.25, 0.5, 0.75, 1.] (Xout = 5, align_corners=True) + [0., 0., 0.33, 0.67, 1., 1.] (Xout = 6, align_corners=False) + [0., 0.2, 0.4, 0.6, 0.8, 1.] (Xout = 6, align_corners=True) + + Note the following similarities: + + * ``align_corners=False`` is the same as + ``tf.raw_ops.ResizeBilinear(align_corners=False, half_pixel_centers=True)``. + + * ``align_corners=True`` is the same as + ``tf.raw_ops.ResizeBilinear(align_corners=True, half_pixel_centers=False)``. + + Returns + ------- + tensor<[\*D, H2, W2], T> + * Tensor with same type as the input. + * ``H2`` = floor(``H1`` * ``scale_factor_height``). + * ``W2`` = floor(``W1`` * ``scale_factor_width``). + + Attributes + ---------- + T: fp16, fp32 + U : fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + scale_factor_height=TensorInputType( + const=True, + optional=True, + type_domain="U", + ), + scale_factor_width=TensorInputType( + const=True, + optional=True, + type_domain="U", + ), + align_corners=TensorInputType( + const=True, + optional=True, + type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.int32, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + scale_factor_height=1, + scale_factor_width=1, + align_corners=True, + ) + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "upsample_bilinear" op must have rank at least 3' + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = np.floor(self.scale_factor_width.val * ret_shape[-1]) if not is_symbolic(ret_shape[-1]) else get_new_symbol() + ret_shape[-2] = np.floor(self.scale_factor_height.val * ret_shape[-2]) if not is_symbolic(ret_shape[-2]) else get_new_symbol() + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class resize_bilinear(Operation): + """ + Resize the spatial (last two) dimensions to the specified target size + using bilinear interpolation. Although this op is similar to + ``upsample_bilinear``, ``resize_bilinear`` works with a target size + rather than with scale factors. + + Parameters + ---------- + x: tensor<[\*D, H1, W1],T> (Required) + * Must be at least rank ``3``. + target_size_height: const (Optional, default=1) + * Target spatial size for the height dimension (``axis=-2``). + target_size_width: const (Optional, default=1) + * Target spatial size for the width dimension (``axis=-1``). + sampling_mode: const (Optional, default="DEFAULT") + * This parameter can take ``"STRICT_ALIGN_CORNERS”``, ``"ALIGN_CORNERS"``, + ``"DEFAULT"``, ``"OFFSET_CORNERS"`` or ``UNALIGN_CORNERS`` as values. + For details, see the Notes section. + + Notes + ----- + To understand the ``sampling_mode`` parameter, consider the 1-D case. + You need to sample a grid of pixels whose values are computed using + linear interpolation. This parameter controls how the grid is sampled. + If the input grid is ``[0, Xin-1]`` (corresponding to an input size of + ``Xin``), and if the output size is ``Xout``, then the grid points are + sampled in the following manner: + + .. sourcecode:: python + + # "STRICT_ALIGN_CORNERS": + spacing = (Xin - 1) / (Xout - 1) + grid_point[i] = min(Xin-1, max(0, i*spacing)), for i=0,1,...,Xout-1 + + # "ALIGN_CORNERS": Same as "STRICT_ALIGN_CORNERS" unless Xout=1, + # in which case: + grid_point[0] = (Xin-1) / 2, if Xout==1 + + # "DEFAULT": + spacing = (Xin - Xin/Xout) / (Xout - 1) + grid_point[i] = min(Xin-1, max(0, i*spacing)), for i=0,1,...,Xout-1 + + # "OFFSET_CORNERS": + delta = max(1, Xin - 1) / Xout + spacing = ((Xout - 1) * delta) / (Xout - 1) + grid_point[i] = min(Xin-1, max(0, 0.5*delta + i*spacing)), for + ... i=0,1,...,Xout-1 + + # "UNALIGN_CORNERS": + spacing = Xin / Xout + grid_point[i] = min(Xin - 1, max(0, i*spacing + 0.5*spacing - 0.5)), for i=0,1,...,Xout-1 + + For example: + + .. sourcecode:: python + + Xin = 2 + input_interval = [0,1] + + Grid points: + + .. sourcecode:: python + + [0., 0.1, 0.5, 0.9, 1.] (Xout = 5, UNALIGN_CORNERS) + [0., 0.25, 0.5, 0.75, 1.] (Xout = 5, "STRICT_ALIGN_CORNERS" / "ALIGN_CORNERS") + [0., 0.4, 0.8, 1., 1.] (Xout = 5, "DEFAULT") + [0.1, 0.3, 0.5, 0.7, 0.9] (Xout = 5, "OFFSET_CORNERS") + + [0., 0., 0.33, 0.67, 1., 1.] (Xout = 6, UNALIGN_CORNERS) + [0., 0.2, 0.4, 0.6, 0.8, 1.] (Xout = 6, "STRICT_ALIGN_CORNERS" / "ALIGN_CORNERS") + [0., 0.33, 0.67, 1., 1., 1.] (Xout = 6, "DEFAULT") + [0.08, 0.25, 0.42, 0.58, 0.75, 0.92] (Xout = 6, "OFFSET_CORNERS") + + Note the following similarities: + + * ``"DEFAULT"`` is same as + ``tf.raw_ops.ResizeBilinear(align_corners=False, + half_pixel_centers=False)``. + * ``"STRICT_ALIGN_CORNERS"`` is same as + ``tf.raw_ops.ResizeBilinear(align_corners=True, + half_pixel_centers=False)``. + + Returns + ------- + tensor<[\*D, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = ``target_size_height``. + * ``W2`` = ``target_size_width``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + target_size_height=TensorInputType( + const=True, + optional=True, + type_domain=types.int32 + ), + target_size_width=TensorInputType( + const=True, + optional=True, + type_domain=types.int32 + ), + sampling_mode=TensorInputType( + const=True, + optional=True, + type_domain=types.str + ), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + target_size_height=1, + target_size_width=1, + sampling_mode="DEFAULT", + ) + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "resize_bilinear" op must have rank at least 3' + ) + + if self.sampling_mode.val not in { + "STRICT_ALIGN_CORNERS", + "ALIGN_CORNERS", + "UNALIGN_CORNERS", + "DEFAULT", + "OFFSET_CORNERS", + }: + raise ValueError( + '"resize_bilinear" op: unrecognized sampling mode "{}"'.format( + self.sampling_mode.val + ) + ) + + ret_shape = list(self.x.shape) + ret_shape[-1] = self.target_size_width.val + ret_shape[-2] = self.target_size_height.val + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class crop_resize(Operation): + """ + Resize the spatial dimensions (last two dimensions) of the first input + according to the bounding boxes specified in the second input, using + bilinear interpolation. + + Parameters + ---------- + + x: tensor<[B, C, H, W],T> (Required) + * The input, from which patches (regions of interest) are extracted + and resized using bilinear interpolation. + * Rank ``4``. + + roi: tensor<[N,1,4,1,1], T> or tensor<[N,1,5,1,1], T> (Required) + * Regions of interest, or coordinates of the boxes. The above input + represents coordinates of ``N`` boxes. + * The convention to express coordinates depends on the value of the + input ``box_coordinate_mode``. + * Rank ``5``. + * If ``tensor<[N,1,4,1,1], T>``: Resized images are computed for all + ``B`` input images. + * If ``tensor<[N,1,5,1,1], T>``: The first element from ``axis=-3`` + to be resized is an index. It must be within range ``[0, B)``. + + target_height: const (Optional, Default=1) + * Target height for resizing each patch. + + target_width: const (Optional, Default=1) + * Target width for resizing each patch. + + normalized_coordinates : const (Optional, default=False) + * If true, the bounding box coordinates must be in the + interval ``[0, 1]``. Scaling is based on the input spatial + dimensions: ``(H_in - 1)`` for height and ``(W_in - 1)`` for width. + * If false, the bounding box coordinates must be in the interval + ``[0, H_in - 1]`` for height dimensions and ``[0, W_in - 1]`` for + width dimensions. + + spatial_scale : const (Optional, default=1.0) + * Additional spatial scale that multiplies the bounding box coordinates. + You would use this to implement the RoI Align layer, which typically + uses unnormalized RoI coordinates along with a spatial scale that is + less than or equal to 1. + + box_coordinate_mode: const (Optional, default="CORNERS_HEIGHT_FIRST") + * Specifies the convention for specifying the four bounding box + coordinates for an image of size ``(Height, Width)``. The ``(0,0)`` + coordinate corresponds to the top-left corner of the image. + * This parameter can take one of four values: + + ``"CORNERS_HEIGHT_FIRST"``: ``[h_start, w_start, h_end, w_end]`` + + ``"CORNERS_WIDTH_FIRST"``: ``[w_start, h_start, w_end, h_end]`` + + ``"CENTER_SIZE_HEIGHT_FIRST"``: ``[h_center, w_center, box_height, box_width]`` + + ``"CENTER_SIZE_WIDTH_FIRST"``: ``[w_center, h_center, box_width, box_height]`` + + sampling_mode : const (Optional, default="DEFAULT") + * This parameter can take ``"STRICT_ALIGN_CORNERS"``, + ``"ALIGN_CORNERS"``, ``"DEFAULT"``, ``"OFFSET_CORNERS"`` or + ``UNALIGN_CORNERS`` as values. + * This same convention is used by the ``resize_bilinear`` op (see + that op for details). + + See Also + -------- + resize_bilinear + + Returns + ------- + tensor<[N, B, C, target_height, target_width],T> or tensor<[N, 1, C, target_height, target_width],T> + * Tensor with same type as the input. + * If ``roi : tensor<[N,1,4,1,1], T>``, the output is + ``tensor<[N, B, C, target_height, target_width],T>``. + Total crops = ``N*B``; that is, ``N`` crops for each input in the batch. + * If ``roi : tensor<[N,1,5,1,1], T>``, the output is + ``tensor<[N, 1, C, target_height, target_width],T>``. + Total crops = ``N``; that is, 1 crop for given input image index + in the batch. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + roi=TensorInputType(type_domain="T"), + target_height=TensorInputType(const=True, optional=True, type_domain=types.int32), + target_width=TensorInputType(const=True, optional=True, type_domain=types.int32), + normalized_coordinates=TensorInputType(const=True, optional=True, type_domain=types.bool), + spatial_scale=TensorInputType(const=True, optional=True, type_domain=types.fp32), + box_coordinate_mode=TensorInputType(const=True, optional=True, type_domain=types.str), + sampling_mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + target_height=1, + target_width=1, + normalized_coordinates=False, + spatial_scale=1., + box_coordinate_mode="CONRNERS_HEIGHT_FIRST", + sampling_mode="DEFAULT", + ) + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input to the "crop_resize" op must be of rank 4. Provided {}'.format( + self.x.rank + ) + ) + + if self.roi.rank != 5: + raise ValueError( + 'ROI input to the "crop_resize" op must be of rank 5, provided {}'.format( + self.roi.rank + ) + ) + + if self.sampling_mode.val not in { + "STRICT_ALIGN_CORNERS", + "ALIGN_CORNERS", + "UNALIGN_CORNERS", + "DEFAULT", + "OFFSET_CORNERS", + }: + raise ValueError( + '"crop_resize" op: unrecognized sampling mode "{}"'.format( + self.sampling_mode + ) + ) + + # ret_shape: [N] + [B, C, h_out, w_out] + N, B, C = self.roi.shape[0], self.x.shape[0], self.x.shape[1] + ret_shape = [N, B, C, self.target_height.val, self.target_width.val] + return types.tensor(self.x.dtype, ret_shape) + + +@register_op +class crop(Operation): + """ + Crop the spatial dimensions (last two dimensions) of the input by the + specified amounts. + + Parameters + ---------- + x: tensor<[\*D, H1, W1],T> (Required) + * Must be at least rank ``3``. + crop_height: const<2, i32> (Required) + * Amount to be cropped from the top and bottom of the height dimension + (``axis=-2``). + crop_width: const<2, i32> (Required) + * Amount to be cropped from the left and right sides of the width dimension (``axis=-1``). + + Returns + ------- + tensor<[\*D, H2, W2],T> + * Tensor with same type as the input. + * ``H2`` = ``H1 - crop_height[0] - crop_height[1]``. + * ``W2`` = ``W1 - crop_width[0] - crop_width[1]``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + crop_height=TensorInputType(const=True, type_domain=types.int32), + crop_width=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank < 3: + raise ValueError( + 'input to the "crop" op must at least be of rank 3. Provided {}'.format( + self.x.rank + ) + ) + + crop_height = self.crop_height.val + crop_width = self.crop_width.val + + if len(crop_height.flatten()) != 2: + raise ValueError( + "crop_height must have 2 elements. Provided {}".format( + len(crop_height.flatten()) + ) + ) + + if len(crop_width.flatten()) != 2: + raise ValueError( + "crop_width must have 2 elements. Provided {}".format( + len(crop_width.flatten()) + ) + ) + + input_shape = list(self.x.shape) + ret_shape = ( + input_shape[:-2] + + [input_shape[-2] - crop_height[0] - crop_height[1]] + + [input_shape[-1] - crop_width[0] - crop_width[1]] + ) + return types.tensor(self.x.dtype, ret_shape) + +@register_op(opset_version=_IOS15_TARGET) +class affine(Operation): + """ + Apply a linear affine transform to the input 2D image tensor. The value at the + ``(x, y)`` (i.e., ``(w, h)``) coordinate of the output is computed by first computing + the coordinates ``x’`` and ``y’`` with the following equation, and then computing the + value at the coordinate ``(x’,y’)`` in the input image using either bilinear or + nearest neighbor interpolation. If the ``(x’, y’)`` point falls outside the input + image, then padding information is used to compute the value. + + :: + + x’ = a0 * x + a1 * y + a2 + y’ = b0 * x + b1 * y + b2 + + + Parameters + ---------- + x: tensor<[B, C, H1, W1], T> + * Must be rank ``4``. + transform_matrix: tensor<[D, 6], T> + * Must be rank ``2``. + * ``D`` can be either ``B`` or 1. + * If ``D == B``, there is a separate transform matrix for each batch. + * If ``D == 1``, the same matrix is used for all input batches. + * For each batch: ``[a0, a1, a2, b0, b1, b2]``. + output_height: const + * Target output height + output_width: const + * Target output width + sampling_mode: const + * Allowed values: ``"bilinear"`` + padding_mode: const + * Allowed values: ``"constant"``. + * Note that the following example is 1D case for brevity. + The op supports only 2D image input. + * If ``padding_mode == "constant"``: + * The input image is assumed to be padded with the padding_value. + * For example, ``|1, 2, 3| -> |0, 0, 0, 1, 2, 3, 0, 0, 0|``. + padding_value: const + * Currently non-zero values are not supported. + * To be used only when ``padding_mode == "constant"``, ignored in other cases. + coordinates_mode: const + * Allowed values: ``"normalized_minus_one_to_one"`` + * If ``coordinates_mode == "normalized_minus_one_to_one"``, in-image values are ``[-1, 1]``. + * For example, if ``coordinates_mode == "normalized_minus_one_to_one"``, + the in range values are ``[-1, 1]``. That is: + * ``(-1, -1)``, i.e. ``(w=-1, h=-1)``, corresponds to the top-left pixel. + * ``(1, -1)``, i.e. ``(w=1, h=-1)``, corresponds to the top-right pixel. + * ``(-1, 1)``, i.e. ``(w=-1, h=1)``, corresponds to the bottom-left pixel. + * ``(1, 1)``, i.e. ``(w=1, h=1)``, corresponds to the bottom-right pixel. + align_corners: const + * Currently ``align_corners=False`` is not supported. + * To be used only when ``coordinates_mode != unnormalized``, ignored otherwise. + * if ``align_corners == True``, the extrema coordinates correspond + to the center of the first and last corner pixels. + * if ``align_corners == False``, the extrema coordinates correspond + to the edge of the first and last corner pixels. + + Returns + ------- + tensor<[B, C, output_height, output_width], T> + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + transform_matrix=TensorInputType(type_domain="T"), + output_height=TensorInputType(const=True, type_domain=types.int32), + output_width=TensorInputType(const=True, type_domain=types.int32), + sampling_mode=TensorInputType(const=True, type_domain=types.str), + padding_mode=TensorInputType(const=True, type_domain=types.str), + padding_value=TensorInputType(const=True, type_domain="T"), + coordinates_mode=TensorInputType(const=True, type_domain=types.str), + align_corners=TensorInputType(const=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input "x" to the "affine" op must be a rank 4 tensor. ' + "Got rank {} tensor of shape {}".format( + self.x.rank, self.x.shape + ) + ) + if self.transform_matrix.rank != 2: + raise ValueError( + 'input "transform_matrix" to the "affine" op must be a rank 2 tensor. ' + "Got rank {} tensor of shape {}".format( + self.transform_matrix.rank, self.transform_matrix.shape + ) + ) + if self.sampling_mode.val.lower() != "bilinear": + raise NotImplementedError( + 'input "sampling_mode" to the "affine" not implemented. ' + 'Got "{}"'.format(self.sampling_mode.val) + ) + if self.coordinates_mode.val.lower() != "normalized_minus_one_to_one": + raise NotImplementedError( + 'input "coordinates_mode" to the "affine" not implemented. ' + 'Got "{}"'.format(self.coordinates_mode.val) + ) + if self.padding_mode.val.lower() != "constant" or self.padding_value.val != 0.0: + raise NotImplementedError( + 'input "padding_mode" to the "affine" not implemented. ' + 'Got "{}" with "padding_value={}"'.format( + self.padding_mode.val, self.padding_value.val + ) + ) + + input_shape = self.x.shape + transform_matrix_shape = self.transform_matrix.shape + if ( + not is_symbolic(transform_matrix_shape[-1]) + and transform_matrix_shape[-1] != 6 + ): + raise ValueError( + 'input "transform_matrix" to the "affine" op last dimension must be 6 ' + "[a0, a1, a2, b0, b1, b2], " + "Got {} for last dimension".format(transform_matrix_shape[-1]) + ) + + ret_shape = list(input_shape) + ret_shape[2] = self.output_height.val + ret_shape[3] = self.output_width.val + return types.tensor(self.x.dtype, tuple(ret_shape)) + + +@register_op(opset_version=_IOS15_TARGET) +class resample(Operation): + """ + Resample the input image tensor ``x`` at the ``coordinates``. + Resampling is required if the coordinates do not correspond to exact + pixels in the input image. The ``sampling_mode`` determines + the algorithm used for resampling and computing the values. + + Parameters + ---------- + x: tensor<[B, C, H1, W1], T> + * Must be rank ``4``. + coordinates: tensor<[B, H2, W2, 2], U> + * Must be rank ``4``. + * Coordinates are provided in the order ``(x, y)`` (i.e. ``(w, h)``). + * The value of each output location ``output[b, c, h, w]`` is calculated + by sampling from the input image ``x[b, c, :, :]``. + * The pixel at the ``(x, y)`` location corresponds to the length-2 + vector: ``coordinates[b, h, w, :]``. + * Coordinate (normalized or unnormalized) should be specified according + to ``coordinates_mode``. + sampling_mode: const + * Allowed values: ``"bilinear"`` , ``"nearest"`` + padding_mode: const + * Allowed values: ``"constant"``, ``"border"``, ``"reflection"``, ``"symmetric"`` + * Note that the following example is 1D case for brevity. + The op supports only 2D image input. + * If ``padding_mode == "constant"``: + * The input image is assumed to be padded with the ``padding_value``. + * For example: ``|1, 2, 3| -> |0, 0, 0, 1, 2, 3, 0, 0, 0|`` + * if ``padding_mode == "border"``: + * The input image is assumed to be padded with the values replicated + from the values at the edge. This is also referred to as the + "clamped" or "replication" mode, since the padded values are + clamped to the border values. + * For example: ``|1, 2, 3| -> |1, 1, 1, 1, 2, 3, 3, 3, 3|`` + * If ``padding_mode == "reflection"``: + * The border values are reflected, *not* including the values at the edge/border. + * For example: ``|1, 2, 3| -> |2, 3, 2, 1, 2, 3, 2, 1, 2|`` + * If ``padding_mode == "symmetric"``: + * Values are reflected, including the border/edge values. + * For example: ``|1, 2, 3| -> |3, 2, 1 , 1, 2, 3, 3, 2, 1|`` + padding_value: const + * To be used only when ``padding_mode == "constant"``, ignored in other cases. + coordinates_mode: const + * Allowed values: ``"unnormalized"``, ``"normalized_minus_one_to_one"``, + ``"normalized_zero_to_one"`` + * If ``coordinates_mode == "unnormalized"``, the coordinates input values + are interpreted to be in range ``[0, W - 1] / [0, H - 1]``, which + corresponds to the in-image point. + * If ``coordinates_mode == "normalized_minus_one_to_one"``, + the in-image values are ``[-1, 1]``. + * If ``coordinates_mode == "normalized_zero_to_one"``, + in-image values are ``[0, 1]``. + * For example, if ``coordinates_mode == "normalized_minus_one_to_one"``, + the in range values are [-1, 1]. That is: + * ``(-1, -1)``, i.e. ``(w=-1, h=-1)``, corresponds to the top-left pixel. + * ``(1, -1)``, i.e. ``(w=1, h=-1)``, corresponds to the top-right pixel. + * ``(-1, 1)``, i.e. ``(w=-1, h=1)``, corresponds to the bottom-left pixel. + * ``(1, 1)``, i.e. ``(w=1, h=1)``, corresponds to the bottom-right pixel. + align_corners: const + * If ``align_corners == True``, the extrema coordinates correspond + to the center of the first and last corner pixels. + * If ``align_corners == False``, the extrema coordinates correspond + to the edge of the first and last corner pixels. + + Returns + ------- + tensor<[B, C, H2, W2], T> + + Attributes + ---------- + T: fp16, fp32 + U: fp32, int32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + coordinates=TensorInputType(type_domain="U"), + sampling_mode=TensorInputType(const=True, type_domain=types.str), + padding_mode=TensorInputType(const=True, type_domain=types.str), + padding_value=TensorInputType(const=True, type_domain="T"), + coordinates_mode=TensorInputType(const=True, type_domain=types.str), + align_corners=TensorInputType(const=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.int32, types.fp32), + } + + def type_inference(self): + if self.x.rank != 4: + raise ValueError( + 'input "x" to the "resample" op must be a rank 4 tensor. ' + "Got rank {} tensor of shape {}".format( + self.x.rank, self.x.shape + ) + ) + if self.coordinates.rank != 4: + raise ValueError( + 'input "coordinates" to the "resample" op must be a rank 4 tensor. ' + "Got rank {} tensor of shape {}".format( + self.coordinates.rank, self.coordinates.shape + ) + ) + + input_shape = self.x.shape + coord_shape = self.coordinates.shape + if ( + not is_symbolic(input_shape[0]) + and not is_symbolic(coord_shape[0]) + and input_shape[0] != coord_shape[0] + ): + raise ValueError( + 'input "x" and "coordinates" to the "resample" must agree on ' + "dimension of batch size: {} vs. {}".format( + input_shape[0], coord_shape[0] + ) + ) + if not is_symbolic(coord_shape[-1]) and coord_shape[-1] != 2: + raise ValueError( + 'input "coordinates" to the "resample" op last dimension must be 2. ' + "Got {} for last dimension".format( + coord_shape[-1] + ) + ) + + ret_shape = list(input_shape) + ret_shape[2] = coord_shape[1] # Output height + ret_shape[3] = coord_shape[2] # Output width + return types.tensor(self.x.dtype, tuple(ret_shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/linear.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/linear.py new file mode 100644 index 00000000..87479ab8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/linear.py @@ -0,0 +1,343 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import ( + DefaultInputs, + InputSpec, + Operation, + TensorInputType, + TupleInputType, + precondition, + types, +) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import broadcast_shapes, parse_einsum_equation +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +@register_op +class linear(Operation): + """ + Perform ``x * weight.T + bias`` where ``weight`` and ``bias`` are constant at + compile time. + + Parameters + ---------- + x: tensor<[\*D,D_in], T> (Required) + * ``1 <= rank <= 3``. + * ``0 <= rank(*D) <= 2``. + weight: const tensor<[D_out,D_in], T> (Required) + bias: const tensor<[D_out],T> (Optional) + * Default to ``0``. + + Returns + ------- + tensor<[\*D,D_out], T> + * Same rank as the input ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + weight=TensorInputType(const=True, type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + Dout = self.weight.shape[0] + return DefaultInputs( + bias=[0.]*Dout, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + weight_shape = self.weight.shape + assert len(weight_shape) == 2 + if not ( + x_shape[-1] == weight_shape[-1] + or is_symbolic(x_shape[-1]) + or is_symbolic(weight_shape[-1]) + ): + msg = "Op '{}' (linear op): Size of the last dimension of x, which is {}, " \ + "does not match the last dimension of weights, which is {}" + raise ValueError(msg.format(self.name, x_shape[-1], weight_shape[-1])) + if self.bias is not None: + assert len(self.bias.shape) == 1 + if len(self.bias.val) != weight_shape[-2]: + msg = "Op '{}' (linear op): Size of the bias, which is {}, " \ + "does not match the first dimension of weights, which is {}" + raise ValueError(msg.format(self.name, len(self.bias.val), weight_shape[-2])) + shape = list(x_shape) + shape[-1] = weight_shape[0] + return types.tensor(x_type, tuple(shape)) + + @precondition(allow=VALUE) + def value_inference(self): + res = np.matmul(self.x.val, np.transpose(self.weight.val)) + if self.bias is not None: + res += self.bias.val + return res + + +@register_op +class matmul(Operation): + """ + Perform N-D batch matrix multiplication with NumPy-style broadcasting + based on the following rules: + + Rule 1. If both ``x, y`` are 1-D, return the scalar from the dot product. + + Rule 2. If both ``x, y`` are 2-D or higher, perform a broadcast on the batch dimensions + (all dimensions except the last ``2``). + + For example: + + * ``x.shape == (10, 4, 3)`` + * ``y.shape == (5, 10, 3, 2)`` + * ``matmul(x, y).shape == (5, 10, 4, 2)`` + + Conventional matrix multiplication is a special case where both ``x, y`` are + exactly 2-D. For example: + + * ``x.shape == (4, 3)`` + * ``y.shape == (3, 2)`` + * ``matmul(x, y).shape == (4, 2)`` + + If ``x`` is 1-D, and ``y`` is N-D where ``N >= 2``, ``x`` is first promoted to + matrix ``xm`` by prepending a ``1`` to its dimension, and the resulting ``xm`` is + broadcast to ``y`` following Rule 2 above. After this, remove the inserted dimension. + For example: + + * ``x.shape == (4)`` + * ``y.shape == (10, 4, 3)`` + * ``xm.shape == (1, 4)`` + * ``matmul(xm, y).shape == (10, 1, 3)`` + * Removing the inserted dimension results in ``matmul(x, y).shape == (10, 3)``. + * Note: ``xm`` and ``matmul(xm, y)`` are for illustration only. + + If ``x`` is N-D where ``N >= 2``, and ``y`` is 1-D, ``y`` is first promoted to + matrix ``ym`` by appending a ``1`` to its dimension, and the resulting ``ym`` is + broadcast to ``x`` following Rule 2 above. After this, remove the inserted dimension. + For example: + + * ``x.shape == (10, 3, 4)`` + * ``y.shape == (4,)`` + * ``ym.shape == (4, 1)`` + * ``matmul(x, ym).shape == (10, 3, 1)`` + * Removing the inserted dimension results in ``matmul(x, y).shape == (10, 3)``. + * Note: ``xm`` and ``matmul(xm, y)`` are for illustration only. + + Parameters + ---------- + x: tensor<[\*,K1], T> (Required) + * ``x`` must be 1-D or higher. + y: tensor<[\*,K2], T> (Required) + * ``y`` must be 1-D or higher. + transpose_x: const bool (Optional) + * Default to ``False``. + * Use ``True`` to transpose the last two dimensions of ``x`` before multiplication. + It has no effect when ``x`` is 1-D. + transpose_y: const bool (Optional) + * Default to ``False``. + * Use ``True`` to transpose the last two dimensions of ``y`` before multiplication. + It has no effect when ``y`` is 1-D. + + Returns + ------- + tensor<\*, T> + * Scalar or tensor output. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + y=TensorInputType(type_domain="T"), + transpose_x=TensorInputType(const=True, optional=True, type_domain=types.bool), + transpose_y=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + transpose_x=False, + transpose_y=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = list(self.x.shape) + y_shape = list(self.y.shape) + x_rank = len(x_shape) + + if x_rank == 1 and self.transpose_x.val: + msg = "Op {} (matmul): x is rank 1, but transpose_x is True, which is not allowed." + raise ValueError(msg.format(self.name)) + + if self.transpose_x.val: + x_shape = list(x_shape) + x_shape[-1], x_shape[-2] = x_shape[-2], x_shape[-1] + x_shape = tuple(x_shape) + if self.transpose_y.val: + y_shape = list(y_shape) + y_shape[-1], y_shape[-2] = y_shape[-2], y_shape[-1] + y_shape = tuple(y_shape) + if not ( + x_shape[-1] == y_shape[-2] + or is_symbolic(x_shape[-1]) + or is_symbolic(y_shape[-2]) + ): + msg = "Op {} (matmul): x {}, y {} are not broadcastable" + raise ValueError(msg.format(self.name, self.x.shape, self.y.shape)) + + if x_rank == 1: + # promote shape of x to rank 2 + x_shape = list((1,) + tuple(x_shape)) + ret_shape = list(broadcast_shapes(x_shape[:-2], y_shape[:-2])) + ret_shape += [x_shape[-2], y_shape[-1]] + if x_rank == 1: + # remove the first dimension of the returned shape + return types.tensor(x_type, tuple(ret_shape[1:])) + else: + return types.tensor(x_type, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + x = self.x.val + if self.transpose_x.val: + x = np.transpose(x) + y = self.y.val + if self.transpose_y.val: + y = np.transpose(y) + return np.matmul(x, y) + + +@register_op +class einsum(Operation): + """ + Perform tensor multiplication expressed according to the einsum notation. + The mode/equation that is currently supported is mutiplying matrices that are laid out on + dimensions -1 and -3, treating all the other dimensions as batch. Broadcasting is supported along batch dimensions. + In particular, the inputs must be of the following shapes: + + * Rank 4 input case: + * Input 1: ``[B, C, H, W1]``. + * Input 2: ``[B, W1, H, W2]``. + * Output: ``[B, C, H, W2]``. + * If, for one of the inputs, the dimensions ``"B"`` or ``"H"`` is 1, they are broadcast to match the other input. + + * Rank 3 input case: + * Input 1: ``[C, H, W1]``. + * Input 2: ``[W1, H, W2]``. + * Output: ``[C, H, W2]``. + * If, for one of the inputs, the dimension ``"H"`` is 1, it is broadcast to match the other input. + + Parameters + ---------- + values : Tuple(tensor_1, tensor_2) + * Where: + * ``tensor_1``: ``tensor<[*D, C, H, W1], T>``. + * Must be of rank 3 or 4. + * ``tensor_2``: ``tensor<[*D, W1, H, W2], T>``. + * Must be of rank 3 or 4. + equation: const + * Supported equations are: + * ``"nchw,nwhu->nchu"`` and its equivalent equation strings. + * ``"chw,whr->chr"`` and its equivalent equation strings. + + Returns + ------- + tensor<[\*D, C, H, W2], T> + * Same ranks as the inputs. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + values=TupleInputType(), + equation=TensorInputType(const=True, type_domain=types.str) + ) + + def type_inference(self): + if len(self.values) != 2: + raise ValueError("einsum op must get \'values\' of length 2") + x = self.values[0] + y = self.values[1] + + # validate the input shapes + x_type = x.dtype + assert x_type == y.dtype, "input types do not match" + x_shape = x.shape + y_shape = y.shape + assert len(x_shape) == len(y_shape), "inputs not of the same rank" + assert x_shape[-1] == y_shape[-3], "input shapes incompatible" + if x_shape[-2] != 1 and y_shape[-2] != 1: + assert x_shape[-2] == y_shape[-2], "input shapes incompatible" + if len(x_shape) == 4: + if x_shape[-4] != 1 and y_shape[-4] != 1: + assert x_shape[-4] == y_shape[-4], "input shapes incompatible" + + # validate the equation + input1_vec, input2_vec, output_vec = parse_einsum_equation(self.equation.val) + + assert \ + (input1_vec == [0, 1, 2, 3] and input2_vec == [0, 3, 2, 4] and output_vec == [0, 1, 2, 4]) or \ + (input1_vec == [0, 1, 2] and input2_vec == [2, 1, 3] and output_vec == [0, 1, 3]), \ + "unsupported einsum equation {}".format(self.equation.val) + + # calculate the output shape + def _get_dim_value(shape1, shape2, dim): + if is_symbolic(shape1[dim]) and is_symbolic(shape2[dim]): + return shape1[dim] + elif is_symbolic(shape1[dim]): + return shape1[dim] + elif is_symbolic(shape2[dim]): + return shape2[dim] + else: + return max(shape1[dim], shape2[dim]) + + out_shape = [1 for i in range(len(x_shape))] + out_shape[-1] = y_shape[-1] + out_shape[-3] = x_shape[-3] + out_shape[-2] = _get_dim_value(x_shape, y_shape, -2) + if len(x_shape) == 4: + out_shape[-4] = _get_dim_value(x_shape, y_shape, -4) + return types.tensor(x_type, tuple(out_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + x = self.values[0] + y = self.values[1] + x_shape = x.val.shape + y_shape = y.val.shape + # broadcast dimensions -2 and -4, if required + if len(x_shape) == 4: + x_shape = (max(x_shape[0], y_shape[0]), x_shape[1], max(x_shape[2], y_shape[2]), x_shape[3]) + y_shape = (max(x_shape[0], y_shape[0]), y_shape[1], max(x_shape[2], y_shape[2]), y_shape[3]) + elif len(x_shape) == 3: + x_shape = (x_shape[0], max(x_shape[1], y_shape[1]), x_shape[2]) + y_shape = (y_shape[0], max(x_shape[1], y_shape[1]), y_shape[2]) + else: + raise ValueError("ranks of the input must be 3 or 4") + res = np.einsum(self.equation.val, + np.broadcast_to(x.val, x_shape), + np.broadcast_to(y.val, y_shape)) + return res diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py new file mode 100644 index 00000000..8f21d5f4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/normalization.py @@ -0,0 +1,381 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as np + +from coremltools.converters.mil.mil import (DefaultInputs, InputSpec, + Operation, TensorInputType, + precondition, types) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_op +class batch_norm(Operation): + """ + Normalize input tensor ``x`` by ``mean`` and ``variance``, and optionally apply a + scale ``gamma`` and an offset ``beta``: + + .. math:: + y_i = \\gamma_i \\dfrac{ (x_i - mean_i)}{\\sqrt{variance_i + epsilon}} + beta_i \\;,\\;i=1,....,C + + The ``mean``, ``variance``, ``gamma``, and ``beta`` + must be 1-D tensors whose lengths are equal to the second axis (the "depth" + or "channel" dimension) of ``x``. + + Parameters + ---------- + x: tensor<[n,C,*D], T> (Required) + * ``3 <= rank <= 5``. + * ``*D`` refers to the spatial dimensions, ``1 <= rank(*D) <= 3``. + * ``n`` is the batch dimension. + mean: const tensor<[C], T> (Required) + variance: const tensor<[C], T> (Required) + gamma: const tensor<[C], T> (Optional) + * Optional scale applied to normalized tensor. + * Default is all ones. + beta: const tensor<[C], T> (Optional) + * Optional offset applied to normalized tensor. + * Default is all zeros. + epsilon: const T (Optional) + * Default is ``1e-5``. + + Returns + ------- + tensor<[n,C,*D], T> + * Output tensor has the same shape and type as the input ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + mean=TensorInputType(const=True, type_domain="T"), + variance=TensorInputType(const=True, type_domain="T"), + gamma=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + gamma=None, + beta=None, + epsilon=1e-5, + ) + + def type_inference(self): + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + +@register_op +class instance_norm(Operation): + """ + Apply instance normalization to the n-dimensional input tensor. + + Parameters + ---------- + x: tensor<[n,C,*D], T> (Required) + * ``3 <= rank(x) <= 4``. + * ``*D`` refers to the spatial dimensions, ``1 <= rank(*D) <= 2``. + * ``n`` is the batch dimension. + gamma: const tensor<[C], T> (Optional) + * Optional scale applied to normalized tensor. + * Default to all ones. + beta: const tensor<[C], T> (Optional) + * Optional offset applied to normalized tensor. + * Default to all zeros. + epsilon: const f32 (Optional) + * Default to ``1e-5``. + + Returns + ------- + tensor<[n,C,*D], T> + * Output tensor has the same shape and type as the input ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + gamma=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + gamma=None, + beta=None, + epsilon=1e-5, + ) + + def type_inference(self): + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + +@register_op +class l2_norm(Operation): + """ + Apply L2 normalization to the n-dimensional input tensor. That is, divide the input + tensor by the square root of the sum of squares of all elements of the input. + + .. math:: + x_i \\leftarrow \\dfrac{x_i}{\\sqrt{\\sum{x_i^2} + \\epsilon}} + + + Parameters + ---------- + x: tensor<[\*B, \*D], T> (Required) + * Input tensor, ``rank(x) >= 3``. + * ``*B`` refers to the leading dimensions. + * ``*D`` refers to the spatial dimensions to be normalized. Must be rank 3: ``rank(*D) == 3``. + * When ``rank(x) == 3``, in which ``rank(*B) == 0 and rank(*D) == 3``, the input is divided by + the square root of the sum of squares of all elements. + * For ranks greater than 3, in which ``rank(*B) >= 1 and rank(*D) == 3``, + the leading dimensions \*B, starting from ``0`` to ``-4`` (inclusive), + are all treated as batch. The L2 normalization are done batch-wise. + epsilon: const T (Optional) + * Small constant to avoid division by ``0``. + * Optional, defaults to ``1e-6``. + + Returns + ------- + tensor<[\*B, \*D], T> + * Same type and shape as the input tensor ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + epsilon=1e-6, + ) + + def type_inference(self): + if self.x.rank < 3: + msg = "Input rank of l2_norm must be at least 3. Got {}".format(self.x.rank) + raise ValueError(msg) + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + val = self.x.val + eps = self.epsilon.val + shape = self.x.shape + rank = self.x.rank + batch_dims = rank - 3 + if batch_dims == 0: + square_sum = np.sum(val**2) + output = val/np.power(square_sum + eps, 0.5) + else: + batch_dim_prod = np.prod(shape[:batch_dims]) + reshape_val = np.reshape(val, (batch_dim_prod, -1)) + square_sum = np.sum(reshape_val * reshape_val, axis=1, keepdims=True) + eps + output = reshape_val/np.power(square_sum, 0.5) + output = np.reshape(output, shape) + return output + +@register_op +class layer_norm(Operation): + """ + Apply layer normalization to the n-dimensional input tensor: + + .. math:: + out = gamma * (input - E[x]) / sqrt(Var[x] + epsilon) + beta + + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + + axes: const<[K], i32> (Optional) + * Dimensions to perform layer normalization. + * Default is ``None`` (all dimensions). + + gamma: const tensor<\*?, T>, T> (Optional) + * if provided, the shape must be be ``x.shape[axes]``. For instance, if + input ``x`` with shape ``(3,4,5,6)`` and ``axes = [2,3]``, gamma must have + shape ``(5,6)``. + * Default is all ones. + + beta: const tensor<\*?, T>, T> (Optional) + * Same shape as gamma. + * Default is all zeros. + + epsilon: const T (Optional) + * Small constant to avoid division by ``0``. + * Default is ``1e-5``. + + + Returns + ------- + tensor<\*?, T>: + * Tensor with same shape and type as the input tensor ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + gamma=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + epsilon=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + axes=range(self.x.rank), + gamma=None, + beta=None, + epsilon=1e-5, + ) + + @staticmethod + def _is_compatible_shape(shapea, shapeb): + if not len(shapea) == len(shapeb): + return False + for a, b in zip(shapea, shapeb): + if any_symbolic([a, b]): + continue + if a != b: + return False + return True + + def type_inference(self): + rank = self.x.rank + + # check valid axes + positive_axes = [axis + rank if axis < 0 else axis for axis in self.axes.val] + if not all([axis >= 0 and axis < rank for axis in positive_axes]): + raise ValueError("axes must in the range of [-x.rank, x.rank-1].") + + # check shape of gamma and beta + normalized_shape = [self.x.shape[i] for i in range(rank) if i in positive_axes] + if self.gamma is not None and not layer_norm._is_compatible_shape(list(self.gamma.shape), normalized_shape): + raise ValueError("Expect shape {} for gamma, but get shape {} instead".format(normalized_shape, self.gamma.shape)) + + if self.beta is not None and not layer_norm._is_compatible_shape(list(self.gamma.shape), normalized_shape): + raise ValueError("Expect shape {} for beta, but get shape {} instead".format(normalized_shape, self.beta.shape)) + + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) + + + @precondition(allow=VALUE) + def value_inference(self): + def np_layer_norm(x, axes, gamma, beta, epsilon=1e-5): + rank = len(x.shape) + axes = [axis + rank if axis < 0 else axis for axis in axes] + normalized_shape = [x.shape[i] if i in axes else 1 for i in range(rank)] + gamma = np.ones(shape=normalized_shape) if gamma is None else np.reshape(gamma, normalized_shape) + beta = np.zeros(shape=normalized_shape) if beta is None else np.reshape(beta, normalized_shape) + num = x - np.mean(x, axis=tuple(axes), keepdims=True) + dem = np.sqrt( + np.sum(np.square(num), axis=tuple(axes), keepdims=True) + / np.prod(normalized_shape) + + epsilon + ) + return num / dem * gamma + beta + + _axes = self.x.shape if self.axes is None else self.axes.val + _gamma = None if self.gamma is None else self.gamma.val + _beta = None if self.beta is None else self.beta.val + return np_layer_norm(self.x.val, _axes, _gamma, _beta, self.epsilon.val) + + +@register_op +class local_response_norm(Operation): + """ + Apply local response normalization to the n-dimensional input tensor: + + .. math:: + x_i \\leftarrow \\dfrac{x_i}{\\left ( k + \\dfrac{\\alpha}{\\text{size}} \\sum_j x_j^2 \\right )^\\beta} + + + Parameters + ---------- + x: tensor<[n,C,*D], T> (Required) + * Input tensor, ``3 <= rank(x) <= 4``. + * ``*D`` refers to the spatial dimensions, ``1 <= rank(*D) <= 2``. + * ``n`` is the batch dimension. + size: const i32 (Required) + * Amount of neighboring channels to normalize. + alpha: const T (Optional) + * Scale factor. + * Default is ``1e-4``. + beta: const T (Optional) + * An exponent. + * Default is ``0.75``. + k: const T (Optional) + * Additive factor. + * Default is ``1.0``. + + Returns + ------- + tensor<[n,C,*D], T> + * Same type and shape as the input tensor ``x``. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + size=TensorInputType(const=True, type_domain=types.int32), + alpha=TensorInputType(const=True, optional=True, type_domain="T"), + beta=TensorInputType(const=True, optional=True, type_domain="T"), + k=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + alpha=1e-4, + beta=0.75, + k=1., + ) + + def type_inference(self): + x_shape = self.x.shape + return types.tensor(self.x.dtype, tuple(x_shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/pool.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/pool.py new file mode 100644 index 00000000..b1d25fb2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/pool.py @@ -0,0 +1,263 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.block import curr_opset_version +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import \ + spatial_dimensions_out_shape +from coremltools.converters.mil.mil.ops.defs.iOS15 import _IOS15_TARGET + + +class Pooling(Operation): + """ + Pooling Op Superclass + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + kernel_sizes=TensorInputType(const=True, type_domain=types.int32), + strides=TensorInputType(const=True, optional=True, type_domain=types.int32), + pad_type=TensorInputType(const=True, type_domain=types.str), + pad=TensorInputType(const=True, optional=True, type_domain=types.int32), + ceil_mode=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + num_spatial_dims = self.x.rank - 2 + return DefaultInputs( + strides=[1] * num_spatial_dims, + pad=[0] * 2 * num_spatial_dims, + ceil_mode=False, + ) + + def type_inference(self): + ksize = self.kernel_sizes.val + x_shape = self.x.shape + D_in_rank = len(x_shape) - 2 + + strides = [1] * D_in_rank if self.strides is None else self.strides.val + pad_type = "valid" if self.pad_type is None else self.pad_type.val.lower() + if pad_type not in ["valid", "same", "custom", "same_lower"]: + raise ValueError("Unrecognized value of pad_type : {}".format(pad_type)) + pad = None if self.pad is None else self.pad.val + D_in = x_shape[2:] # spatial dimensions + + if self.ceil_mode.val: + if D_in_rank > 2: + raise ValueError('pool: ceil_mode only supported for 1D or 2D pool') + if pad_type == "same" and self.ceil_mode.val: + raise ValueError("ceil_mode must be False when pad_type==same") + if pad is not None: + for i in range(D_in_rank): + if pad[2 * i] != pad[2 * i + 1]: + raise ValueError("Padding must be symmetric if ceil_mode is True") + + # The same_lower padding is not supported in iOS15 + if curr_opset_version() == _IOS15_TARGET and self.pad_type.val == "same_lower": + msg = "iOS15 version of pooling layers do not support pad_type = `same_lower`" + raise ValueError(msg) + + D_out_shape = spatial_dimensions_out_shape( + pad_type=pad_type, + input_shape=D_in, + kernel_shape=ksize, + strides=strides, + custom_pad=pad, + ceil_mode=self.ceil_mode.val, + ) + ret_shape = list(x_shape[:2]) + D_out_shape + return types.tensor(self.x.dtype, tuple(ret_shape)) + + +@register_op +class avg_pool(Pooling): + """ + Perform average pooling. Supports 1-D, 2-D, and 3-D pool (1, 2, or 3 spatial dimensions). + + Parameters + ---------- + x: tensor<[n,C_in,\*D_in], T> (Required) + * ``3 <= rank <= 5``. + * ``D_in`` are spatial dimensions, ``1 <= len(D_in) <= 3``. + * ``C_in`` is the number of input channels or depth dimensions. + * ``n`` is the batch dimension. + + kernel_sizes: const tensor<[K], T> (Required) + * The size of the window for each spatial dimension ``D_in`` of the + input tensor. + * ``K == len(D_in)`` + + strides: const tensor<[S],i32> (Optional, default to all 1s) + * Stride along each of the spatial dimensions. + * ``S == len(D_in)``. + + pad_type: const str (Required) + Must be one of ``valid``, ``same``, ``custom`` or ``same_lower``. + + * ``valid``: No padding. This is equivalent to custom pad with ``pad[i] = 0, for + all i``. + * ``same`` : This is equivalent to custom pad with ``pad[2*i] + pad[2*i+1] = kernel_size[i]``. + * ``custom``: Specify custom padding in the parameter pad. note that ``same`` + padding is equivalent to custom padding with + ``pad[2*i] + pad[2*i+1] = kernel_size[i]``. + * ``same_lower``: Similar to ``same`` but the padding + will place extra rows/cols on the top/left if the padding amount is odd. + + pad: const<[P],i32> (Optional. Default to all 0s) + * ``pad`` represents the number of elements to pad before and after each + dimension: ``pad[2*i], pad[2*i+1]`` are the pad size before and after spatial + dimension ``i``. + * ``P = 2 * len(D_in)``. + * ``pad`` should be specified if and only if ``pad_type == custom`` + + exclude_padding_from_average: const tensor<[], bool> (Optional, default to False) + * If ``True``, padded values (0s) are excluded from the denominator count + when computing the average over the kernel window. + + ceil_mode: const + * Same as PyTorch's ``ceil`` mode. + * ``ceil`` is used instead of floor in calculating the output size. + * Optional, defaults to ``False``. + * Only applicable when ``pad_type`` is ``valid`` or ``custom``. + * When ``ceil_mode`` is True, padding must be symmetric; that is, if specified, + ``pad[2*i] == pad[2*i+1]`` must hold. + + Returns + ------- + tensor<[n, C_out,\*D_out], T> + * Same rank as ``x``. + * When ``ceil_mode = False``: + * ``D_out[i] = floor[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i]) / + strides[i]] +1, for i = 0, .., len(D_in) - 1`` is mathematically the same + as (when all parameters involved are integers): + + * ``D_out[i] = ceil [(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_size[i] - 1) / stride[i]], for i = 0, .., len(D_in) - 1``. + * ``*D_out`` is all ones if ``global_pooling`` is ``true``. + + * When ``ceil_mode = True``: + * ``D_out[i] = ceil[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i]) / strides[i]] +1, for i = 0, .., len(D_in) - 1`` + + * If ``(D_out[i] - 1) * strides[i] >= D_in[i] + pad[2*i] and (pad[2*i] + pad[2*i+1] > 0)`` + then ``D_out[i] = D_out[i] - 1``. + + * The first equation is same as: + + * ``D_out[i] = floor[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i] + strides[i] - 1) / strides[i]] +1, for i = 0, .., len(D_in) - 1`` + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + l2_pool, max_pool + """ + + input_spec = ( + InputSpec( + exclude_padding_from_average=TensorInputType( + const=True, optional=True, type_domain=types.bool + ) + ) + + Pooling.input_spec + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs( + exclude_padding_from_average=False, + ) + + +@register_op +class l2_pool(Pooling): + """ + Perform L2 pooling. Supports 1-D and 2-D pool. + + Parameters + ---------- + x: tensor<[n,C_in,*D_in], T> (Required) + * Only support 1d and 2d pooling. + * See ``avg_pool``. + + kernel_sizes: const tensor<[K], T> (Required) + * See ``avg_pool``. + + strides: const tensor<[S],i32> (Optional, default to all 1s) + * See ``avg_pool``. + + pad_type: const str (Required) + * See ``avg_pool``. + + pad: const<[P],i32> (Optional, default to all 0s) + * See ``avg_pool``. + + Returns + ------- + tensor<[n, C_out,*D_out], T> + * See ``avg_pool``. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + avg_pool, max_pool + """ + + def type_inference(self): + if self.x.rank - 2 > 2: + msg = "l2_pool only supports rank 1 or 2. Got rank: {}".format(self.x.rank - 2) + raise ValueError(msg) + return super().type_inference() + + +@register_op +class max_pool(Pooling): + """ + Perform max pooling. Supports 1-D, 2-D, and 3-D pool. + + Parameters + ---------- + x: tensor<[n,C_in,*D_in], T> (Required) + * See ``avg_pool``. + + kernel_sizes: const tensor<[K], T> (Required) + * See ``avg_pool``. + + strides: const tensor<[S],i32> (Optional, default to all 1s) + * See ``avg_pool``. + + pad_type: const str (Required) + * See ``avg_pool``. + + pad: const<[P],i32> (Optional, default to all 0s) + * See ``avg_pool``. + + ceil_mode: const + * see ``avg_pool``. + + Returns + ------- + tensor<[n, C_out,*D_out], T> + * See ``avg_pool``. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + avg_pool, l2_pool + """ + + pass diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/random.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/random.py new file mode 100644 index 00000000..f6663cf4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/random.py @@ -0,0 +1,294 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import (get_new_symbol, + get_new_variadic_symbol, types) +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +class RandomDistribution(Operation): + """ + Random Op Superclass + """ + input_spec = InputSpec( + shape=TensorInputType(type_domain=types.int32), + ) + out_dtype = types.fp32 + + def type_inference(self): + if any_symbolic(self.shape.shape): + # We can't infer any shape if shape has variable length. + return types.tensor(self.out_dtype, (get_new_variadic_symbol(),)) + + # shape has fixed length here. + if self.shape.sym_val is None: + shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) + return types.tensor(self.out_dtype, shape) + + return types.tensor(self.out_dtype, tuple(self.shape.sym_val.tolist())) + + +""" +Random Op Implementation(s) +""" + + +@register_op +class random_bernoulli(RandomDistribution): + r""" + Returns a tensor with the specified shape, with random values from a Bernoulli + distribution. + + .. math:: + f(k) = \begin{cases}1-p &\text{if } k = 0\\ + p &\text{if } k = 1\end{cases} + + for :math:`k` in :math:`\{0, 1\}`. + + Parameters + ---------- + shape: (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. + ``shape[k] > 0`` for ``k = 0,..., K-1``. + prob: const (Optional) + * The probability of sampling ``1``. Defaults to ``0.5``. + seed: const (Optional) + * Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*, T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_categorical, random_normal, random_uniform + """ + + input_spec = ( + InputSpec( + shape=TensorInputType(type_domain=types.int32), + prob=TensorInputType(const=True, optional=True, type_domain="T"), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + RandomDistribution.input_spec + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + seed=-1, + prob=0.5, + ) + + def type_inference(self): + self.out_dtype = self.prob.dtype + return super().type_inference() + + +@register_op +class random_categorical(Operation): + """ + Returns random values from a categorical distribution. + + Parameters + ---------- + shape: <\*D_in, T> + * N-dimensional tensor, one of ``logits`` (event log-probabilities) or ``probs`` + (event probabilities). The first ``N - 1`` dimensions specifies distributions, + and the last dimension represents a vector of probabilities. + + mode: const (Optional) + One of ``['logits', 'probs']``. Defaults to ``logits``. + + size: const (Optional) + Number of samples to draw. Defaults to ``1``. + + seed: const (Optional) + Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*D_in[:-1] + [size], T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_bernoulli, random_normal, random_uniform + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + size=TensorInputType(const=True, optional=True, type_domain=types.int32), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + mode="logits", + size=1, + seed=-1, + ) + + def type_inference(self): + self.out_dtype = self.x.dtype + output_shape = self.x.shape[:-1] + (self.size.val,) + return types.tensor(self.out_dtype, output_shape) + + +@register_op +class random_normal(RandomDistribution): + r""" + Returns a tensor with the specified shape, with random values from a normal + distribution. + + Parameters + ---------- + shape: (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. + ``shape[k] > 0`` for ``k = 0,..., K-1``. + mean: const (Optional) + The mean (center) of the normal distribution. Defaults to 0.0. + stddev: const (Optional) + The standard deviation (width) of the normal distribution. Defaults to ``1.0``. + seed: const (Optional) + Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*, T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_categorical, random_bernoulli, random_uniform + """ + + input_spec = ( + InputSpec( + shape=TensorInputType(type_domain=types.int32), + mean=TensorInputType(const=True, optional=True, type_domain="T"), + stddev=TensorInputType(const=True, optional=True, type_domain="T"), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + RandomDistribution.input_spec + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + mean=0., + stddev=1., + seed=-1, + ) + + def type_inference(self): + if self.mean.dtype != self.stddev.dtype: + raise ValueError("Incompatible primitive types in random_normal operation") + self.out_dtype = self.mean.dtype + return super().type_inference() + + +@register_op +class random_uniform(RandomDistribution): + r""" + Returns a tensor with the specified shape with random values from a uniform + distribution. Samples are uniformly distributed over the half-open interval + ``[low, high)`` (includes low, but excludes high). + + .. math:: + p(x) = \frac{1}{high - low} + + For a real number :math:`x`. + + When ``high == low``, values of ``low`` will be returned. If ``high < low``, + the results are officially undefined and may eventually raise an error. + + Parameters + ---------- + shape: (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. + ``shape[k] > 0`` for ``k = 0,..., K-1``. + low: const (Optional) + * Lower boundary of the output interval (inclusive). Defaults to ``0.0``. + high: const (Optional) + * Upper boundary of the output interval (exclusive). Defaults to ``1.0``. + seed: const (Optional) + * Seed to create a reproducible sequence of values across multiple invokes. + + Returns + ------- + <\*, T> + * A tensor of the given target output shape filled with random values. + + Attributes + ---------- + T: fp16, fp32 + + See Also + -------- + random_categorical, random_bernoulli, random_normal + """ + + input_spec = ( + InputSpec( + shape=TensorInputType(type_domain=types.int32), + low=TensorInputType(const=True, optional=True, type_domain="T"), + high=TensorInputType(const=True, optional=True, type_domain="T"), + seed=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + RandomDistribution.input_spec + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return super().default_inputs() + \ + DefaultInputs( + low=0., + high=1., + seed=-1, + ) + + def type_inference(self): + if self.low.dtype != self.high.dtype: + raise ValueError("Incompatible primitive types in random_uniform operation") + self.out_dtype = self.low.dtype + return super().type_inference() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py new file mode 100644 index 00000000..b6d5ee4e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py @@ -0,0 +1,519 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import ( + DefaultInputs, + InputSpec, + TensorInputType +) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + + +@register_op +class gru(Operation): + r""" + Gated Recurrent Unit (GRU) + + .. math:: + r_t = \rm{recurrent\_activation}(W_{ir} x_t + b_{ir} + W_{hr} h_{t-1} + b_{hr}) + + .. math:: + z_t = \rm{recurrent\_activation}(W_{iz} x_t + b_{iz} + W_{hz} h_{t-1} + b_{hz}) + + .. math:: + o_t = \rm{activation}(W_{io} x_t + b_{io} + r_t * W_{ho} h_{t-1} + b_{ho}) + + .. math:: + h_t = (1 − z_t) * o_t + z_t * h_{t−1} + + Where: + + * :math:`W_{i[r|o|z]}` are state input weights for reset, output and update gate, respectively. + * :math:`b_{i[r|o|z]}` are input biases for reset, output and update gate, respectively. + * :math:`W_{h[r|o|z]}` are recurrent/hidden weights on hidden state to reset, output, and update gates, respectively. + * :math:`b_{h[r|o|z]}` are recurrent/hidden biases on hidden state to reset, output, and update gates, respectively. + * :math:`h_t` is the hidden state at time ``t``. + * :math:`x_t` is the input at time ``t``. + * :math:`h_{t-1}` is the hidden state of the layer at time ``t-1`` or the initial + hidden state at time ``0``. + * :math:`r_t`, :math:`o_t`, and :math:`z_t` are the reset, new, and update gates, respectively. + * :math:`*` is elementwise product. + + Parameters + ---------- + x: (Required) + * ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the + input dimension. + + initial_h: (Required) + * ``H`` denotes hidden size. + + weight_ih: const<3*H, I, T> (Required) - Weight matrix + * ``weigh_ih = [W_{ir} | W_{io} | W_{iz}]`` where ``[a|b]`` denotes column + concatenation and ``[a, b]`` denotes row concatenation. ``W_{ir}``, + ``W_{io}``, and ``W_{iz}`` have shape ``(H, I)``. + * This is used when direction="forward" or "reverse". + + weight_hh: const<3*H, H, T> (Required) - Weight matrix + * ``weight_hh = [W_{hr} | W_{ho} | W_{hz}]``: ``W_{hr}``, ``W_{ho}``, and + ``W_{hz}`` have shape ``(H, H)``. + * This is used when direction="forward" or "reverse". + + bias: const<3*H, T> (Optional) [Default all 0s] + * ``bias[0]`` are input-hidden and hidden-hidden bias. + * ``3*H`` are biases for ``[b_{ir} | b_{io} | b_{hz}]``. + * This is used when direction="forward" or "reverse". + + direction: const (Optional) [Default=forward] + * Either ``forward`` or ``reverse``. + + output_sequence: const (Optional) [Default=False] + * Outputs every step if ``True``. + + recurrent_activation: const (Optional) [Default=sigmoid] + * Activation applied on update and reset gate. + + activation: const (Optional) [Default=tanh] + * Activation applied on output gate. + + Returns + ------- + or <1, b, H, T> + * If ``output_sequence == True`` (hidden states from every step): + ````. + * Else ``<1, b, H, T>`` (hidden states of the final step). + + * Hidden states of the final step. + + Attributes + ---------- + T: fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + initial_h=TensorInputType(type_domain="T"), + weight_ih=TensorInputType(const=True, type_domain="T"), + weight_hh=TensorInputType(const=True, type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + direction=TensorInputType(const=True, optional=True, type_domain=types.str), + output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool), + recurrent_activation=TensorInputType(const=True, optional=True, type_domain=types.str), + activation=TensorInputType(const=True, optional=True, type_domain=types.str) + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + bias=None, + direction="forward", + output_sequence=False, + recurrent_activation="sigmoid", + activation="tanh", + ) + + def type_inference(self): + if self.x.rank != 3: + raise ValueError( + "Invalid input shape. Expecting Rank 3 input, got {}".format( + len(self.x.rank) + ) + ) + + sequence_length, batch_size, input_size = self.x.shape + + if self.weight_ih.rank != 2: + raise ValueError( + "Invalid weight shape. Expecting Rank 2 input, got {}".format( + len(self.weight_ih.rank) + ) + ) + if self.weight_hh.rank != 2: + raise ValueError( + "Invalid weight shape. Expecting Rank 2 input, got {}".format( + len(self.weight_hh.rank) + ) + ) + + hidden_dim, hidden_size = self.weight_hh.shape + + direction = self.direction.val + valid_directions = {"forward", "reverse"} + if direction not in valid_directions: + raise ValueError( + "Direction {} not supported. Supported directions: {}".format( + direction, valid_directions + ) + ) + + dim_factor = 3 + if hidden_size != (hidden_dim // dim_factor): + raise ValueError( + "Incorrect weight matrix: hidden dim size mismatch. \ + Provided weight_ih {}, weight_hh {}. Expecting ".format( + self.weight_ih.shape, self.weight_hh.shape + ) + ) + + out_seq_len = sequence_length if self.output_sequence.val else 1 + output_shape = [out_seq_len, batch_size, hidden_size] + output_h_shape = [batch_size, hidden_size] + return ( + types.tensor(self.x.dtype, tuple(output_shape)), + types.tensor(self.x.dtype, tuple(output_h_shape)), + ) + + +@register_op +class lstm(Operation): + r""" + Long Short-Term Memory (LSTM) + + .. math:: + i_t = \rm{recurrent\_activation}(W_{ii} x_t + B_{ii} + W_{hi} h_{t-1} + B_{hi}) + + .. math:: + f_t = \rm{recurrent\_activation}(W_{if} x_t + B_{if} + W_{hf} h_{t-1} + B_{hf}) + + .. math:: + z_t = \rm{cell\_activation}(W_{iz} x_t + B_{iz} + W_{hz} h_{t-1} + B_{hz}) + + .. math:: + o_t = \rm{recurrent\_activation}(W_{io} x_t + B_{io} + W_{ho} h_{t-1} + B_{ho}) + + .. math:: + c_t = f_t * c_{t-1} + i_t * z_t + + .. math:: + h_t = o_t * \rm{activation(c_t)} + + Where: + + * :math:`i_t`, :math:`f_t`, :math:`o_t`, and :math:`z_t` are input, forget, output, and cell gates, + respectively, at time ``t``. + * :math:`c_t` is cell state at time ``t``. + * :math:`h_t` is the hidden state at time ``t``. + * :math:`W_{ii}`, :math:`W_{if}`, :math:`W_{io}`, and :math:`W_{iz}` are input weights for input, + forget, output, and cell gate, respectively. + * :math:`B_{ii}`, :math:`B_{if}`, :math:`B_{io}`, and :math:`B_{iz}` are input biases for input, + forget, output, and cell gate, respectively. + * :math:`W_{hi}`, :math:`W_{hf}`, :math:`W_{ho}`, and :math:`W_{hz}` are recurrent weights for input, + forget, output, and cell gate, respectively. + * :math:`B_{hi}`, :math:`B_{hf}`, :math:`B_{ho}`, and :math:`B_{hz}` are recurrent weights for input, + forget, output, and cell gate, respectively. + + Parameters + ---------- + x: (Required) + * ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the + input dimension. + + initial_h: (Required) + * Initial hidden state. ``DIRECTIONS = 1`` for uni-directional. + ``DIRECTIONS = 2`` for bi-directional LSTM. + * ``H`` denotes hidden size. + * ``[b, :H]`` and ``[b, H:]`` represents forward and reverse direction + values, respectively. + + initial_c: (Required) + * Initial cell state. + * Format is same as ``initial_h``. + + weight_ih: const<4*H, I, T> (Required) + * Input-hidden weight matrix + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" these weights are used. + + weight_hh: const<4*H, H, T> (Required) + * Hidden-hidden weight matrix. + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" these weights are used. + + bias: const<4*H, T> (Optional, default all 0s) + * bias = input-hidden bias + hidden-hidden bias + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" this bias are used. + + peephole: const<3*H, T> (Optional, default all 0s) + * Weight tensor for peephole. + * Order is ``[input_gate, forget_gate, output_gate]``. + * Shape of each peephole vector is ``(H,)`` (``H`` is hidden size). + * If direction=="bidirectional", this is applied in forward direction. + * If direction=="forward" or "backward" these weights are used. + + weight_ih_back: const<4*H, I, T> (Optional) - + * Input-hidden weight matrix for backward direction for `bidirectinal LSTM`. + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * Must be provided for `bidirectional LSTM`. + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `weight_ih` instead. + + weight_hh_back: const<4*H, H, T> (Optional) - Hidden-hidden weight matrix + * Hidden-hidden weight matrix for backward direction for `bidirectinal LSTM`. + * Weight tensor should be in order of + ``[input_gate, forget_gate, output_gate, cell_gate]``. + * Must be provided for `bidirectional LSTM`. + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `weight_hh` instead. + + bias_back: const<4*H, T> (Optional, default all 0s) + * bias = input-hidden bias + hidden-hidden bias. + * Bias of backward direction for `bidirectional lstm` + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `bias` instead. + + peephole_back: const<3*H, T> (Optional, default all 0s) + * Weight tensor for peephole in backward direction for `bidirectional LSTM`. + * Order is ``[input_gate, forget_gate, output_gate]``. + * Shape of each peephole vector is ``(H,)`` (``H`` is hidden size). + * Peephole of backward direction for `bidirectional lstm` + * Bias of backward direction for `bidirectional lstm` + * This is only used when `direction` is "bidirectional". + * For direction="reverse" use `peephole` instead. + + direction: const (Optional) [Default=forward] + * One of the following: ``forward``, ``reverse``, or ``bidirectional``. + * Must match ``DIRECTIONAL`` in initial states and weight parameters. + + output_sequence: const (Optional) [Default=False] + * Outputs every step if ``True``. + + recurrent_activation: const (Optional) [Default=sigmoid] + * Activation applied on input, forget, and output gates. + + cell_activation: const (Optional) [Default=tanh] + * Activation applied on cell gate. + + activation: const (Optional) [Default=tanh] + * Activation applied on output gate. + + clip: const (optional) [Default=None] + * Cell gate is clipped to ``[-clip, +clip]``. + + Returns + ------- + or <1, b, DIRECTIONS*H, T> + * If ``output_sequence == True`` (hidden states from every step): + ````. + * Else ``<1, b, DIRECTIONS*H, T>`` (hidden states of the final step). + + * Hidden states of the final step. + + * Memory state of the final step. + + Attributes + ---------- + T: fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + initial_h=TensorInputType(type_domain="T"), + initial_c=TensorInputType(type_domain="T"), + weight_ih=TensorInputType(const=True, type_domain="T"), # ifoz layout, + weight_hh=TensorInputType(const=True, type_domain="T"), # ifoz layout + bias=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout + peephole=TensorInputType(const=True, optional=True, type_domain="T"), # ifo layout + weight_ih_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout, + weight_hh_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout + bias_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout + peephole_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifo layout + direction=TensorInputType(const=True, optional=True, type_domain=types.str), + output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool), + recurrent_activation=TensorInputType(const=True, optional=True, type_domain=types.str), + cell_activation=TensorInputType(const=True, optional=True, type_domain=types.str), + activation=TensorInputType(const=True, optional=True, type_domain=types.str), + clip=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + bias=None, + direction="forward", + output_sequence=False, + recurrent_activation="sigmoid", + cell_activation="tanh", + activation="tanh", + peephole=None, + clip=None) + + def type_inference(self): + if self.x.rank != 3: + raise ValueError( + "Invalid input shape. Expecting Rank 3 input, got {}".format( + len(self.x.rank) + ) + ) + sequence_length, batch_size, input_size = self.x.shape + + def weight_shape_check(wt_ih, wt_hh): + if wt_ih.rank != 2 or wt_hh.rank != 2: + raise ValueError( + "Expecting Rank 2 input, got weight_ih rank: {}, weight_hh rank: {}".format( + wt_ih.rank, wt_hh.rank + ) + ) + + hidden_size = wt_hh.shape[1] + if wt_hh.shape[0] // hidden_size != 4 or wt_ih.shape[0] // hidden_size != 4: + raise ValueError( + "Incorrect weight matrix: hidden dim size mismatch. \ + Provided weight_ih {}, weight_hh {}. Expecting <4*H, H>".format( + wt_ih.shape, wt_hh.shape + ) + ) + + direction = self.direction.val + valid_directions = {"forward", "reverse", "bidirectional"} + if direction not in valid_directions: + raise ValueError( + "Direction {} not supported. Supported directions: {}".format( + direction, valid_directions + ) + ) + + weight_shape_check(self.weight_ih, self.weight_hh) + if direction == "bidirectional": + weight_shape_check(self.weight_ih_back, self.weight_hh_back) + + hidden_dim, hidden_size = self.weight_hh.shape + + dim_factor = 8 if direction == "bidirectional" else 4 + out_seq_len = sequence_length if self.output_sequence.val else 1 + num_directions = dim_factor // 4 + output_shape = [out_seq_len, batch_size, num_directions * hidden_size] + output_h_shape = [batch_size, num_directions * hidden_size] + output_c_shape = [batch_size, num_directions * hidden_size] + return ( + types.tensor(self.x.dtype, tuple(output_shape)), + types.tensor(self.x.dtype, tuple(output_h_shape)), + types.tensor(self.x.dtype, tuple(output_c_shape)), + ) + + +@register_op +class rnn(Operation): + r""" + Recurrent Neural Network (RNN) + + .. math:: + h_t = \rm{activation}(W_{ih} x_t + b_{ih} + W_{hh} h_{t−1} + b_{hh}) + + Where: + + * :math:`W_{ih}` is the input weight. + * :math:`W_{hh}` is the hidden/recurrent weight. + * :math:`h_t` is the hidden state at time ``t``. + * :math:`x_t` is the input at time ``t``. + * :math:`h_{t-1}` is the hidden state of the layer at time ``t-1`` or the initial + hidden state at ``t = 0``. + * :math:`b_{ih}` is the input bias. + * :math:`b_{hh}` if the hidden/recurrent bias. + + Parameters + ---------- + x: (Required) + * ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the + input dimension. + + initial_h: (Required) + * ``H`` denotes hidden size. + + weight_ih: const (Required) - Input-hidden weight matrix + + weight_hh: const (Required) - Hidden-hidden weight matrix + + bias: const (Optional) [Default all 0s] + * bias for input-hidden and hidden-hidden + + direction: const (Optional) [Default=forward] + * Either ``forward`` or ``reverse``. + + output_sequence: const (Optional) [Default=False] + * Outputs every step if ``True``. + + activation: const (Optional) [Default=tanh] + * Supported activation functions: ``relu``, ``tanh``, ``sigmoid``, + ``sigmoid_hard``, ``scaled_tanh``, and ``linear``. + + Returns + ------- + or <1, b, H, T> + * If ``output_sequence == True`` (hidden states from every step): + ````. + * Else ``<1, b, H, T>`` (hidden states of the final step). + + * Hidden states of the final step. + + Attributes + ---------- + T: fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + initial_h=TensorInputType(type_domain="T"), + weight_ih=TensorInputType(const=True, type_domain="T"), + weight_hh=TensorInputType(const=True, type_domain="T"), + bias=TensorInputType(const=True, optional=True, type_domain="T"), + direction=TensorInputType(const=True, optional=True, type_domain=types.str), + output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool), + activation=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp32,), + } + + def default_inputs(self): + return DefaultInputs( + bias=None, + direction="forward", + output_sequence=False, + activation="tanh") + + def type_inference(self): + if self.x.rank != 3: + raise ValueError( + f"Invalid input shape. Expecting Rank 3 input, got {len(self.x.rank)}" + ) + + sequence_length, batch_size, input_size = self.x.shape + + if self.weight_ih.rank != 2 or self.weight_hh.rank != 2: + raise ValueError( + f"Invalid weight shape. Expecting Rank 2 input, got weight_ih " + f"{self.weight_ih.rank}, weight_hh {self.weight_hh.rank}" + ) + + hidden_size, _ = self.weight_ih.shape + + direction = self.direction.val + valid_directions = {"forward", "reverse"} + if direction not in valid_directions: + raise ValueError( + f"Direction {direction} not supported. Supported directions: {valid_directions}" + ) + + out_seq_len = sequence_length if self.output_sequence.val else 1 + output_shape = [out_seq_len, batch_size, hidden_size] + output_h_shape = [batch_size, hidden_size] + return ( + types.tensor(self.x.dtype, tuple(output_shape)), + types.tensor(self.x.dtype, tuple(output_h_shape)), + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py new file mode 100644 index 00000000..ce934303 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/reduction.py @@ -0,0 +1,558 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as np + +from coremltools.converters.mil.mil import Operation, precondition, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op + + +class ReductionAxes(Operation): + """ + Reduction Op Superclasses + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + keep_dims=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axes=None, + keep_dims=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + axes = self.axes.val if self.axes is not None else None + if axes is None: + axes = range(self.x.rank) + keep_dims = self.keep_dims.val + + reduced_shape = list(x_shape) + if keep_dims: + for i in axes: + reduced_shape[i] = 1 + else: + # sort reverse so we can delete shape elements back to front + axes = [axis if axis >= 0 else axis + len(reduced_shape) for axis in axes] + for i in sorted(axes)[::-1]: + reduced_shape.pop(i) + if len(reduced_shape) == 0: + return x_type # scalar + + return types.tensor(x_type, tuple(reduced_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + axes = tuple(self.axes.val) if self.axes is not None else None + return self.get_operator()(self.x.val, axis=axes, keepdims=self.keep_dims.val) + + def get_operator(self): + raise NotImplementedError() + + +class ReductionAxis(Operation): + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + keep_dims=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + keep_dims=False, + ) + + def _find_reduced_shape(self): + x_shape = self.x.shape + axis = self.axis.val + + reduced_shape = list(x_shape) + axis = axis if axis >= 0 else axis + len(reduced_shape) + if self.keep_dims.val: + reduced_shape[axis] = 1 + else: + reduced_shape.pop(axis) + return reduced_shape + + def type_inference(self): + x_type = self.x.dtype + reduced_shape = self._find_reduced_shape_and_axis() + return types.tensor(x_type, tuple(reduced_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + tmp = self.get_operator()(self.x.val, axis=self.axis.val) + reduced_shape = self._find_reduced_shape() + if self.keep_dims.val: + tmp = np.reshape(tmp, reduced_shape) + return tmp + + def get_operator(self): + raise NotImplementedError() + + +class reduce_arg(ReductionAxis): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def type_inference(self): + x_shape = self.x.shape + axis = self.axis.val + + reduced_shape = list(x_shape) + axis = axis if axis >= 0 else axis + len(reduced_shape) + if self.keep_dims.val: + reduced_shape[axis] = 1 + else: + reduced_shape.pop(axis) + + return types.tensor(types.int32, tuple(reduced_shape)) + + +""" +Reduction op implementations +""" + +@register_op +class reduce_argmax(reduce_arg): + """ + Computes the indices of the maximum value across dimensions of a tensor. + In case of ties, the identity of the return value is not guaranteed. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axis: const (Optional) + * The dimension to reduce. Default is ``-1``. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` by removing the dimension + specified in ``axis``. If ``True``, retain reduced axis with length ``1``. + + Returns + ------- + <\*, int32> + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.math.argmax `_. + """ + + def get_operator(self): + return np.argmax + + +@register_op +class reduce_argmin(reduce_arg): + """ + Computes the indices of the minimum value across dimensions of a tensor. + In case of ties, the identity of the return value is not guaranteed. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axis: const (Optional) + * The dimension to reduce. Default is ``-1``. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` by removing the dimension specified + in ``axis``, otherwise retain reduced axis with length ``1``. + + Returns + ------- + <\*, int32> + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.math.argmin `_. + + """ + + def get_operator(self): + return np.argmin + + +@register_op +class reduce_l1_norm(ReductionAxes): + """ + Computes the L1 normalization of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + References + ---------- + See `reduce_mean `_. + + """ + + def get_operator(self): + def l1_norm(x, axis=None, keepdims=False): + return np.sum(np.abs(x), axis=axis, keepdims=keepdims) + + return l1_norm + + +@register_op +class reduce_l2_norm(ReductionAxes): + """ + Computes the L2 normalization of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + def l2_norm(x, axis=None, keepdims=False): + return np.sqrt(np.sum(np.square(x), axis=axis, keepdims=keepdims)) + + return l2_norm + + +@register_op +class reduce_log_sum(ReductionAxes): + """ + Computes the natural logarithm of the sum of all the elements across given dimensions + of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + def log_sum(x, axis=None, keepdims=False): + return np.log(np.sum(x, axis=axis, keepdims=keepdims)) + + return log_sum + + +@register_op +class reduce_log_sum_exp(ReductionAxes): + """ + Computes the natural logarithm of the sum of the exponentials of the elements across + given dimensions of the input tensor. It is a smooth approximation of the maximum + function, more numerically stable than ``log(sum(exp(input)))``. It avoids + overflows caused by taking the ``exp`` of large inputs and underflows caused by + taking the ``log`` of small inputs. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + References + ---------- + See `tf.math.reduce_logsumexp `_. + + """ + + def get_operator(self): + def operator(a, axis=None, keepdims=False): + max_values = np.amax(a, axis=axis, keepdims=True) + temp = np.exp(a - max_values) + + if not keepdims: + max_values = np.squeeze(max_values, axis=axis) + + sum = np.sum(temp, axis=axis, keepdims=keepdims) + result = np.log(sum) + return result + max_values + + return operator + + +@register_op +class reduce_max(ReductionAxes): + """ + Computes the maximum of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_operator(self): + return np.max + + +@register_op +class reduce_mean(ReductionAxes): + """ + Computes the mean of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + References + ---------- + For an example, see `tf.math.reduce_mean `_. + """ + + def get_operator(self): + return np.mean + + +@register_op +class reduce_min(ReductionAxes): + """ + Computes the minimum of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + return np.min + + +@register_op +class reduce_prod(ReductionAxes): + """ + Computes the product of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + + """ + + def get_operator(self): + return np.prod + + +@register_op +class reduce_sum(ReductionAxes): + """ + Computes the sum of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + return np.sum + + +@register_op +class reduce_sum_square(ReductionAxes): + """ + Computes the sum of squares of elements across given dimensions of the input tensor. + + Parameters + ---------- + x: <\*,T> (Required) + * Must be 1-dimensional or higher. + + axes: const (Optional, default="None", reduce on all axes.) + * The dimensions to reduce. + + keep_dims: const (Optional, default=False) + * If ``False``, the rank is reduced by ``1`` for each entry in ``axes``, + otherwise retain reduced axes with length ``1``. + + Returns + ------- + <\*,T> + * Scalar or tensor: The reduced tensor. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + def get_operator(self): + def sum_squre(x, axis=None, keepdims=False): + return np.sum(np.square(x), axis=axis, keepdims=keepdims) + + return sum_squre diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py new file mode 100644 index 00000000..6650c443 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py @@ -0,0 +1,549 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (SYMBOL, VALUE, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import compute_gather +from coremltools.converters.mil.mil.types.symbolic import ( + is_compatible_symbolic_vector) + + +@register_op +class gather(Operation): + """ + Gather slices from input ``x`` along dimension ``axis`` according to ``indices``, + similar to `tf.gather `_. + + * If ``indices`` is scalar (0-D): + + .. math:: + output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] = + .. math:: + x[p_0, ..., p_{axis-1}, ~~~~~~~~~ indices, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] + + Where ``rank(x)`` is the rank of ``x``. The ``output`` has rank ``rank(x) - 1``. + + * If ``indices`` is 1-D tensor: + + .. math:: + output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~ i, ~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] = + .. math:: + x[p_0, ..., p_{axis-1}, ~~~~~~~~ indices[i], ~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] + + The output has rank ``rank(x)``. + + * In general: + + .. math:: + output[p_0, ..., p_{axis-1}, ~~~~~~~~ i_0, ..., i_{M-1}, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] = + .. math:: + x[p_0, ..., p_{axis-1}, ~~~~~~~ indices[i_0, ..., i_{M-1}], ~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] + + Where ``M = rank(indices)``. + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*N, i32> (Required) + * Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``. + axis: const i32 (Optional. Default=``0``) + * Negative axis is supported. + + Returns + ------- + tensor<\*K, T> + * Where ``K = D[:axis] + N + D[axis+1:]``. + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.gather `_. + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + ) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + x = self.x.sym_val + indices = self.indices.val + if indices is None: + # only allow x to be symbolic. indices cannot. + return None + return compute_gather( + params=self.x.sym_val, + indices=self.indices.val, + axis=self.axis.val, + batch_dims=0 + ) + + def type_inference(self): + out_type = self.x.dtype + + if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + output_rank = self.x.rank - 1 + self.indices.rank + if output_rank == 0: + # output scalar + return out_type + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.x.rank + out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :] + return types.tensor(out_type, out_shape) + + +@register_op +class scatter(Operation): + """ + Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis`` + by operation ``mode``. + + Example: ``mode == update``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + * For ``j != i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Example: ``mode == add``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + .. math:: + x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] + + * For ``j != i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Parameters + ---------- + data: tensor<\*D, T> (Required) + indices: tensor<[C], i32> (Required) + * 1-D tensor. + updates: tensor<\*K, T> (Required) + * ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``. + axis: const i32 (Optional) + * Default to ``0``. + mode: const string (Optional) + * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``, + ``div``, ``max``, ``min``. + * Default value is ``update``. + + Returns + ------- + tensor<\*D, T> + * With the same type and shape as input ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + + For example: + data = [[1, 2, 3], [4, 5, 6]] + indices = [1, 0] + updates = [[5, 6, 7], [8, 9, 10]] + axis = 0 + mode = "update" + + produces: + [[9, 11, 13], [9, 11, 13]] + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + updates=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + mode="add", + ) + + def type_inference(self): + if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.data.rank + expected_updates_shape = ( + self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :] + ) + + err = "Updates shape {} is incorrect. It should be {}.".format(self.updates.shape, expected_updates_shape) + assert is_compatible_symbolic_vector( + self.updates.shape, tuple(expected_updates_shape) + ), err + + return self.data.sym_type + + +@register_op +class gather_along_axis(Operation): + """ + Take the values along ``axis`` at locations ``indices``. + + .. math:: + idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + .. math:: + output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + * ``rank(indices) == rank(x)``. + axis: const i32 (Optional): + * Default to ``0``. + + Returns + ------- + tensor<\*D, T>: + * Output tensor has the same shape as ``indices``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + ) + + @precondition(allow=VALUE) + def value_inference(self): + x = self.x.val + indices = self.indices.val + axis = self.axis.val + return np.take_along_axis(x, indices, axis) + + def type_inference(self): + + if self.x.rank != self.indices.rank: + raise ValueError( + "Rank mismatch between input and indices. \ + Input rank: {}, indices rank: {}".format( + self.x.rank, self.indices.rank + ) + ) + + if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.x.rank + + for i in range(self.x.rank): + if i != axis: + assert self.x.shape[i] == self.indices.shape[i] + + return types.tensor(self.x.dtype, self.indices.shape) + + +@register_op +class scatter_along_axis(Operation): + """ + Scatter ``updates`` to ``data`` at locations ``indices`` along ``axis`` dimension + using ``mode`` operation. + + Example: ``mode == update``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + .. math:: + output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + * For ``j! = i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Example: ``mode == add``. + + * For ``i`` in ``[0, len(indices)]``: + + .. math:: + idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + .. math:: + output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] = + .. math:: + updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] + + .. math:: + x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] + + * For ``j! = i``: + + .. math:: + output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] = + .. math:: + data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] + + Parameters + ---------- + data: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + * ``rank(indices) == rank(data)``. + updates: tensor<\*K, T> (Required) + * Must be the same shape as ``indices``. + axis: const i32 (Optional) + * Default to ``0``. + mode: const string (Optional) + * Default to ``add``. + * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``, + ``div``, ``max``, ``min``. + + Returns + ------- + tensor<\*D, T> + * With the same type and shape as input ``x``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + updates=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + mode="add", + ) + + @precondition(allow=VALUE) + def value_inference(self): + data = np.copy(self.data.val) + indices = self.indices.val + updates = self.updates.val + axis = self.axis.val + np_output = data + np.put_along_axis(np_output, indices, updates, axis=axis) + return np_output + + def type_inference(self): + if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.data.rank + + assert is_compatible_symbolic_vector( + self.indices.shape, self.updates.shape + ) + assert self.data.rank == self.indices.rank + for i in range(self.data.rank): + if i != axis: + assert self.data.shape[i] == self.indices.shape[i] + + return self.data.sym_type + + +@register_op +class gather_nd(Operation): + """ + Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd `_. + + The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice + of ``x``: + + .. math:: + output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]] + + Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank + ``rank(x) - indices.shape[-1]``. + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + + Returns + ------- + tensor<\*V, T> + * ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``. + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.gather_nd `_. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def type_inference(self): + assert self.indices.shape[-1] <= self.x.rank + out_type = self.x.dtype + out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :] + return types.tensor(out_type, out_shape) + + +@register_op +class scatter_nd(Operation): + """ + Scatter ``updates`` to ``data`` at locations ``indices``. + + The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a + slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]`` + has rank ``rank(data) - indices.shape[-1]``. + + * Example: ``mode == update``: The ``output`` is set to ``data`` initially, and + the op updates ``output`` as follows: + + .. math:: + output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]] + + * Example: ``mode == add``. The update rule is: + + .. math:: + output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]] + + Parameters + ---------- + data: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + updates: tensor<\*K, T> (Required) + * Must be the shape as ``K[:-1]+data.shape[K[-1]:]``. + mode: const string (Optional) + * Default to ``add``. + * Can be the following modes: ``update``, ``add``, ``sub``, ``mul``, + ``div``, ``max``, ``min``. + + Returns + ------- + tensor<\*D, T> + * A tensor with the same shape and type as ``data``. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + data=TensorInputType(type_domain="T"), + indices=TensorInputType(type_domain=types.int32), + updates=TensorInputType(type_domain="T"), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + mode="add", + ) + + def type_inference(self): + assert self.indices.shape[-1] <= self.data.rank + expected_updates_shape = ( + self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :] + ) + assert is_compatible_symbolic_vector( + self.updates.shape, tuple(expected_updates_shape) + ) + return self.data.sym_type diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py new file mode 100644 index 00000000..f5cc3d36 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_operation.py @@ -0,0 +1,1320 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools.converters.mil.mil import ( + get_new_symbol, + get_new_variadic_symbol, + types, +) +from coremltools.converters.mil.mil.input_type import ( + DefaultInputs, + InputSpec, + ListOrTensorInputType, + TensorInputType, + TupleInputType, +) +from coremltools.converters.mil.mil.operation import ( + NONE, + SYMBOL, + VALUE, + Operation, + precondition, +) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import MAX_SIZE_CONSTANT_FOLDING +from coremltools.converters.mil.mil.types.symbolic import ( + any_symbolic, + is_compatible_symbolic_vector, + is_symbolic, +) + + +@register_op +class band_part(Operation): + """ + Returns a tensor setting everything outside a center band to zeros for the innermost + matrix. Special cases: + + - ``band_part(x, 0, -1)`` returns upper triangular part. + - ``band_part(x, -1, 0)`` returns lower triangular part. + - ``band_part(x, 0, 0)`` returns diagonal. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + lower: const (Optional) + * Number of lower / below sub-diagonals to keep. If negative, keep entire + lower triangle. + * Defaults to ``-1`` (keep the entire lower triangle). + upper: const (Optional) + * Number of upper / above sub-diagonals to keep. If negative, keep entire + lower triangle. + * Defaults to ``-1`` (keep the entire upper triangle). + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + lower=TensorInputType(const=True, optional=True, type_domain=types.int32), + upper=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + lower=-1, + upper=-1) + + def type_inference(self): + return self.x.sym_type + + +@register_op +class cumsum(Operation): + """ + Returns the cumulative sum of the input along the given axis. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + axis: const (Optional) + * Defaults to ``0``. + * Axis for which the cumulative sum is computed. + exclusive: const (Optional) + * Defaults to ``False``. + * When set to ``False``, inclusive cumsum is computed, that is the first element of + the output is identical to the first element in the input. + * When set to ``True``, exclusive cumsum is computed, which makes the first element + of output to ``0``. + reverse: const (Optional) + * Defaults to ``False``. + * When set to ``True``, perform cumsum in the reverse order. + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + exclusive=TensorInputType(const=True, optional=True, type_domain=types.bool), + reverse=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + exclusive=False, + reverse=False) + + @precondition(allow=VALUE) + def value_inference(self): + data = np.copy(self.x.val) + axis = self.axis.val + reverse = self.reverse.val + exclusive = self.exclusive.val + if reverse: + data = np.flip(data, axis=axis) + data = np.cumsum(data, axis=axis) + if exclusive: + zero_shape = np.copy(data.shape) + zero_shape[axis] = 1 + data = np.concatenate((np.zeros(zero_shape, data)), axis=axis) + if reverse: + data = np.flip(data, axis=axis) + return data + + def type_inference(self): + # Check range of axis + if self.axis.val < -1 or self.axis.val > self.x.rank - 1: + raise ValueError( + "axis should be in the range [-1, {}]".format(self.x.rank - 1) + ) + + return self.x.sym_type + + +@register_op +class fill(Operation): + """ + Returns a tensor with a given shape filled with a constant value. + + Parameters + ---------- + shape: tensor<[K], i32> (Required) + * Target output tensor shape. + * ``K`` is the rank of the output tensor. ``shape[k] > 0`` for ``k = 0,..., K-1``. + value: const (Optional) + * Defaults to ``0.0``. + * Constant value to fill in. + + Returns + ------- + tensor<\*?, T> + * Tensor with shape determined by the input shape. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + shape=TensorInputType(type_domain=types.int32), + value=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + value=0.) + + def type_inference(self): + if any_symbolic(self.shape.shape): + # We can't infer any shape if shape has variable length. + return types.tensor(self.value.dtype, (get_new_variadic_symbol(),)) + + # shape has fixed length here. + if self.shape.sym_val is None: + ret_shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) + return types.tensor(self.value.dtype, ret_shape) + + return types.tensor(self.value.dtype, tuple(self.shape.sym_val.tolist())) + + @precondition(allow=VALUE) + def value_inference(self): + return np.full(shape=self.shape.val, fill_value=self.value.val) + + +@register_op +class non_maximum_suppression(Operation): + """ + Applies non-maximum suppression (NMS) on the input box coordinates according + to their intersection-over-union (IoU). + + NMS selects a subset of bounding boxes in descending order of score, and removes + boxes that have high intersection-over-union (IOU) overlap with previously-selected + boxes. + + + Parameters + ---------- + + boxes: tensor<[n, B, 4], T> (Required) + * Box coordinates on which to perform NMS. The coordinates are expected in + CENTER_SIZE_WIDTH_FIRST format (x, y, width, height) where (x, y) is the center. + scores: tensor<[n, B, K], T> (Required) + * Scores for each one of the boxes. K is the number of classes. + iou_threshold: const (Required) + * The intersection over union (``IoU``) threshold over which boxes are + suppressed. NMS remove all overlapping boxes with ``IoU > iou_threshold``. + score_threshold: const (Required) + * Before IoU suppression is performed, boxes with class scores below this + threshold are rejected. + max_boxes: const (Required) + * Maximum number of boxes to select. If the number of surviving boxes are + less, output is padded up to this number. + per_class_suppression: const (Optional) + * Defaults to ``False``. + * If ``True``, suppression is performed independently within boxes of each class. + + Returns + ------- + tensor<[n, max_boxes, 4], T> + * Coordinates of selected boxes. + tensor<[n, max_boxes, K], T> + * Scores of selected boxes. + tensor<[n, max_boxes], i32> + * Indices of selected boxes. + tensor<[n], i32> + * Number of boxes selected for each batch. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + boxes=TensorInputType(type_domain="T"), + scores=TensorInputType(type_domain="T"), + iou_threshold=TensorInputType(const=True, type_domain="T"), + score_threshold=TensorInputType(const=True, type_domain="T"), + max_boxes=TensorInputType(const=True, type_domain=types.int32), + per_class_suppression=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + per_class_suppression=False) + + def type_inference(self): + boxes_dtype = self.boxes.dtype + scores_dtype = self.scores.dtype + n_batch, _, n_score = self.scores.shape + max_boxes = self.max_boxes.val + + return ( + types.tensor(boxes_dtype, (n_batch, max_boxes, 4)), + types.tensor(scores_dtype, (n_batch, max_boxes, n_score)), + types.tensor(types.int32, (n_batch, max_boxes)), + types.tensor(types.int32, (n_batch,)), + ) + + +@register_op +class non_zero(Operation): + """ + Returns the indices of the elements in the given tensor that are non-zero. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Tensor, values selected at indices where its values is not equal to ``0``. + + Returns + ------- + tensor<[N, R], int32> + * 2-dimensional tensor contains indices of elements that are non-zero. + Each row is the index for a non-zero value. + * ``N`` is the number of non-zero elements, ``R`` is the rank of the input. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T") + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + if self.x.val is not None: + value = self.value_inference() + return types.tensor(types.int32, value.shape) + shape = tuple([get_new_symbol(), self.x.rank]) + return types.tensor(types.int32, shape) + + @precondition(allow=VALUE) + def value_inference(self): + return np.transpose(np.nonzero(self.x.val)) + + +@register_op +class one_hot(Operation): + """ + Returns one-hot vectors whose locations represented in ``indices`` take the ``on_value``, + while other locations take the ``off_value``. + + Parameters + ---------- + indices: tensor<[D], i32> (Required) + * Tensor, values indicate the locations for each one-hot vector to take the ``on_value``. + one_got_vector_size: i32 (Required) + * Indicates the number of returning vectors. + axis: const i32 (Optional) + * Indicates which dimension to append the new axis. + * If the input indices is rank ``D``, the output tensor will have rank ``D+1``. + * Defaults to ``-1`` (the last dimension). + on_value: const T (Optional) + * Values for locations where defined in ``indices``. + * Defaults to ``1``. + off_value: const T (Optional) + * Defaults to ``0``. + + Returns + ------- + tensor<\*?,T> + * A tensor that contains one-hot vectors. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + indices=TensorInputType(type_domain=types.int32), + one_hot_vector_size=TensorInputType(type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + on_value=TensorInputType(const=True, optional=True, type_domain="T"), + off_value=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + on_value=1, + off_value=0, + ) + + def type_inference(self): + on_type = self.on_value.dtype + off_type = self.off_value.dtype + + if on_type != off_type: + raise TypeError( + "Parameters on_value and off_value must have same input types." + ) + + if self.axis.val < -self.indices.rank - 1 or self.axis.val > self.indices.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + + indices_shape = list(self.indices.shape) + + depth_value = self.one_hot_vector_size.val + if depth_value is None: + depth_value = get_new_symbol() + elif depth_value < 0: + raise ValueError("Parameter one_hot_vector_size must be non-negative") + + retshape = indices_shape + + if self.axis.val < 0: + cut = len(retshape) + self.axis.val + 1 + else: + cut = self.axis.val + retshape = retshape[0:cut] + [depth_value] + retshape[cut:] + + return types.tensor(on_type, retshape) + + +@register_op +class pad(Operation): + """ + Pad a tensor. + + Parameters + ---------- + + x: tensor<[\*D_in], T> (Required) + + pad: tensor<[2\*N], i32> (Required) + ``N <= D_in``. Last ``N`` dimensions of ``x`` are padded as follows: + + * For each dimension ``i`` of ``x`` if ``i >= D_in - N``: + * pad ``pad[2*i]`` elements before ``x[..,i,..]``. + * pad ``pad[2*i+1]`` elements after ``x[..,i,..]``. + * If mode is "reflect" then ``pad[2*i]`` and ``pad[2*i+1]`` can be at + most ``D[i]-1``. + * If mode is "replicate" then ``pad[2*i]`` and ``pad[2*i+1]`` can be + at most ``D[i]``. + + mode: const (Optional) + * Defaults to ``constant``. + * Must be one of the following values: + ``constant``, ``reflect``, or ``replicate``. + + constant_val: const (Optional) + * Defaults to ``0``. + * Constant value to pad. Ignored if ``mode != constant``. + + Returns + ------- + tensor<[\*D_out],T> + * Tensor with same type as the input. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + pad=TensorInputType(type_domain=types.int32), + mode=TensorInputType(const=True, optional=True, type_domain=types.str), + constant_val=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def default_inputs(self): + return DefaultInputs( + mode="constant", + constant_val=0., + ) + + def type_inference(self): + in_shape = self.x.shape + ret_shape = list(in_shape) + pad = self.pad + if len(pad.shape) != 1: + raise ValueError("Pad should be a 1D tensor!") + if self.mode and not self.mode.val in {'constant', 'reflect', 'replicate'}: + raise ValueError("Pad mode should be one of {'constant', 'reflect', 'replicate'}") + + if pad.val is None: + for i in range(self.pad.shape[0] // 2): + ret_shape[-self.pad.shape[0] // 2 + i] = get_new_symbol() + else: + pad = pad.val + pad = pad.copy() + + if len(pad) % 2 != 0: + raise ValueError("Number of elements in the argument Pad must be divisible by 2.") + + pad = pad.reshape(-1, 2) + + if pad.shape[0] > len(ret_shape): + raise ValueError( + "Number of dimensions specified through pad must less than or equal to rank " + "of input x" + ) + + for i in range(len(pad)): + ret_shape[-len(pad) + i] = ret_shape[-len(pad) + i] + pad[i][0] + pad[i][1] + + return types.tensor(self.x.dtype, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + # NumPy `edge` mode is equivalent to `replicate` mode of PyTorch and CoreML + mode = "edge" if self.mode.val == "replicate" else self.mode.val + pad_val = self.pad.val + + if pad_val is None: + return None + + if len(self.x.val.shape) > (pad_val.shape[0] // 2): + updated_pad = np.zeros(len(self.x.val.shape) * 2) + updated_pad[-pad_val.shape[0] :] = pad_val + pad_val = updated_pad + pad_val = pad_val.reshape(-1, 2).astype(np.int32) + if mode == "constant": + return np.pad( + self.x.val, pad_val, mode, constant_values=self.constant_val.val + ) + # NumPy does not support non-constant mode and constant_values argument + return np.pad(self.x.val, pad_val, mode) + + +@register_op +class range_1d(Operation): + """ + Returns a numpy-like 1-D range sequence. + + Parameters + ---------- + end: (Required) + * The upper limit of the sequence, exclusive. + start: (Required) + * The start point of the sequence. + step: (Required) + * Number that increments ``start``. + + Returns + ------- + tensor + * A 1-D tensor, where ``M`` is the length of the sequence. + + Attributes + ---------- + T: i32, fp16, fp32 + """ + + input_spec = InputSpec( + end=TensorInputType(type_domain="T"), + start=TensorInputType(type_domain="T"), + step=TensorInputType(type_domain="T"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + @precondition(allow=VALUE) + def value_inference(self): + start = self.start.val + end = self.end.val + step = self.step.val + shape = (end - start) / step + # To prevent from creating constant greater then 1MB, + # a upper bound of the size of the resulting array is set. + if shape > MAX_SIZE_CONSTANT_FOLDING: + return None + return np.arange(start, end, step) + + def type_inference(self): + start = self.start.sym_val + end = self.end.sym_val + step = self.step.sym_val + + if ( + (self.start.dtype != self.end.dtype) + or (self.start.dtype != self.step.dtype) + or (self.end.dtype != self.step.dtype) + ): + raise TypeError( + "All inputs to the range operation must have same input types." + ) + + if all(sym_val is not None for sym_val in (start, end, step)): + shape = (end - start) / step + shape = shape if is_symbolic(shape) else int(math.ceil(shape)) + shape = tuple([shape]) + else: + shape = tuple( + [ + get_new_symbol(), + ] + ) + + return types.tensor(self.start.dtype, shape) + + +@register_op +class tile(Operation): + """ + Returns a new tensor by replicating input ``x`` multiples times. + Dimension ``i`` of ``x`` will be replicated ``reps[i]`` times. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + reps: tensor<[rank(x)], i32> (Required) + * A 1-D tensor with length ``rank(x)``, which indicates the number to replicate the input along each dimension. + + Returns + ------- + tensor<\*?, T>: + * An n-D tensor with same type as the input. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + reps=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + x_type = self.x.dtype + x_shape = np.array(self.x.shape) + + reps = self.reps.sym_val + + if reps is None: + out_shape = tuple([get_new_symbol() for _ in range(self.x.rank)]) + return types.tensor(x_type, out_shape) + + if len(reps) == 0 or len(reps) != self.x.rank: + msg = ( + "Length of the reps ({}) must be at least 1, and " + "equal to the rank of the input x ({})" + ) + raise ValueError(msg.format(len(reps), self.x.rank)) + + out_shape = [] + for i, rep in enumerate(reps): + if not is_symbolic(rep): + if rep <= 0: + raise ValueError("All entries of reps parameter must be greater than 0") + + if is_symbolic(rep) or is_symbolic(x_shape[i]): + out_shape.append(get_new_symbol()) + else: + out_shape.append(rep * x_shape[i]) + + out_shape = tuple(out_shape) + + return types.tensor(x_type, out_shape) + + @precondition(allow=VALUE) + def value_inference(self): + # Infer only if don't have symbolic values. + if self.reps.val is None: + return None + return np.tile(self.x.val, reps=self.reps.val) + + +@register_op +class argsort(Operation): + """ + Returns a tensor containing the indices of the sorted values along a given axis + of the input tensor. + + Parameters + ---------- + x: <\*?, T> (Required) + * Input tensor. + * axis: const (Optional) + * Defaults to ``-1`` (the last dimension). + * Axis to perform the operation. + * ascending: const (Optional) + * Defaults to ``False``, sort in descending order. + * ``True`` to sort in ascending order. + + Returns + ------- + tensor<\*?, int32> + * Tensor containing the indices of the sorted values + + Attributes + ---------- + T: fp16, fp32, i32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ascending=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + axis=-1, + ascending=False, + ) + + def type_inference(self): + return types.tensor(types.int32, self.x.shape) + + @precondition(allow=VALUE) + def value_inference(self): + # The default np argsort mode is ascending, which is opposite to MIL's argsort op. + if self.ascending.val: + return np.argsort(self.x.val, axis=self.axis.val) + return np.argsort(-self.x.val, axis=self.axis.val) + + +@register_op +class topk(Operation): + """ + Returns a tensor containing top or bottom ``k`` values and the corresponding + indices of the input tensor along a given axis. + + Parameters + ---------- + x: <\*?, T> (Required) + * Input tensor. + k: const (Optional) + * Defaults to ``1``. + * Number of values/indices to be computed along each axis. + axis: const (Optional) + * Defaults to ``-1`` (last dimension). + * Axis to perform the operation. + ascending: const (Optional) + * Defaults to ``False``, sort in descending order. + * ``True`` to sort in ascending order. + + Returns + ------- + tensor<\*?, T> + * Values of top/bottom ``k`` elements. + tensor<\*?, int32> + * Indices of the top/bottom ``k`` elements along axis. + + Attributes + ---------- + T: fp16, fp32, int32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + k=TensorInputType(const=True, optional=True, type_domain=types.int32), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ascending=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs( + k=1, + axis=-1, + ascending=False, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + k = self.k.val + axis = self.axis.val + + if not is_symbolic(x_shape[axis]) and k > x_shape[axis]: + msg = "K={} is greater than size of the given axis={}" + raise ValueError(msg.format(k, axis)) + + ret_shape = list(x_shape) + ret_shape[axis] = k + return types.tensor(x_type, ret_shape), types.tensor(types.int32, ret_shape) + + @precondition(allow=VALUE) + def value_inference(self): + indices = np.argsort(self.x.val, axis=self.axis.val) + if not self.ascending.val: + indices = np.argsort(-self.x.val, axis=self.axis.val) + slc = [slice(None)] * self.x.rank + slc[self.axis.val] = slice(0, self.k.val) + indices = indices[tuple(slc)] + values = np.take_along_axis(self.x.val, indices, axis=self.axis.val) + return values, indices + + +@register_op +class flatten2d(Operation): + """ + Flattens input tensor into 2d tensor by flattening dimensions before and + after the provided axis. + + Parameters + ---------- + x: tensor<[*d], T> (Required) + * Input tensor. + axis: const (Optional) + * Defaults to ``1``. + * Negative axis is supported. + + Returns + ------- + tensor + * ``d_prior`` is product of dimensions ``x[:axis]`` + * ``d_post`` is product of dimensions ``x[axis:]`` + + Examples + -------- + 1. ``input_shape = (3, ), axis = -1, output_shape = (1, 3)`` + 2. ``input_shape = (3, ), axis = 1, output_shape = (3, 1)`` + 3. ``input_shape = (4, 3), axis = -1, output_shape = (4, 3)`` + 4. ``input_shape = (2, 3, 2), axis = -1, output_shape = (6, 2)`` + 5. ``input_shape = (5, 5, 2), axis = 1, output_shape = (5, 10)`` + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32) + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axis=1, + ) + + def type_inference(self): + shape = list(self.x.shape) + axis = self.axis.val + dim_pre_axis = np.prod(shape[:axis]) + dim_post_axis = np.prod(shape[axis:]) + new_shape = [dim_pre_axis, dim_post_axis] + return types.tensor(self.x.dtype, tuple(new_shape)) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + shape = self.x.shape + axis = self.axis.val + + dim_pre_axis = np.prod(shape[:axis]) + dim_post_axis = np.prod(shape[axis:]) + return self.x.val.reshape(dim_pre_axis, dim_post_axis) + + +@register_op +class shape(Operation): + """ + Returns a 1-dimensional tensor with the shape of the input tensor. + + Parameters + ---------- + x: tensor<[*?], T> (Required) + * Input tensor. + + Returns + ------- + tensor + * Shape of the input tensor. + * ``K = x.rank``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec(x=TensorInputType(type_domain="T")) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + input_rank = self.x.rank + return types.tensor(types.int32, tuple([input_rank])) + + def value_inference(self): + if any_symbolic(self.x.shape): + # convert elements in shape to int32 + res = [x if is_symbolic(x) else np.int32(x) for x in self.x.shape] + return np.array(res) + else: + return np.array(self.x.shape).astype(np.int32) + + +@register_op +class concat(Operation): + """ + Concatenates tensors along a dimension. + + Parameters + ---------- + values: Tuple[tensor<[d0, d1, ..., d_axis_i, ..., d_n],T>] (Required) + * The number of dimensions of the input tensors must match, and all + dimensions except ``axis`` must be equal. + * The tensors may be variadic, but the number of tensors must be + determined at compile time (i.e. a tuple). + axis: const (Required) + * The dimension along which to concatenate. Must be in the range + ``[-rank(values[i]), rank(values[i]))`` for all ``i``. + interleave: const (Optional, Default=False) + * If True, concatenate the inputs by interleaving them. + * If True, all the inputs to this op must have the exact same shape. + + Examples + -------- + + .. sourcecode:: python + + in1 = [[1, 2], [3, 4], [5, 6]] # shape (3, 2) + in2 = [[7, 8], [9, 10], [11, 12]] # shape (3, 2) + axis = 0 # output shape is (6, 2) + + if interleave is False: # default + # output[0:3, :] = in1 + # output[3:6, :] = in2 + output = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]] + + if interleave is True: + # output[0::2, :] = in1 + # output[1::2, :] = in2 + output = [[1, 2], [7, 8], [3, 4], [9, 10], [5, 6], [11, 12]] + + Returns + ------- + tensor<[d0, d1,...d_axis_out, ..., d_n],T> + * Where ``d_axis_out = sum(d_axis_i)``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + values=TupleInputType(), + axis=TensorInputType(const=True, type_domain=types.int32), + interleave=TensorInputType(const=True, optional=True, type_domain=types.bool) + ) + + def default_inputs(self): + return DefaultInputs( + interleave=False, + ) + + def type_inference(self): + concat_dim_len = 0 + if len(self.values) == 0: + raise ValueError("Concat {} got 0 values".format(self.name)) + + # Validate values have the same rank + rank = self.values[0].rank + for v in self.values: + if v.rank != rank: + msg = "Input {} has rank {} != other inputs rank {}" + raise ValueError(msg.format(v.name, v.rank, rank)) + + # Check concat axis is within (-rank, rank) + concat_axis = self.axis.val + if concat_axis < 0: + concat_axis += rank + if rank > 0 and (concat_axis < 0 or concat_axis >= rank): + msg = "In {} of op_type {}: axis out of bound for input " + "(rank {})" + raise ValueError(msg.format(self.name, self.op_type, rank)) + + # Validate values share the same data type + dtype = self.values[0].dtype + for v in self.values[1:]: + if v.dtype != dtype: + msg = ( + "Tensors in 'values' of the concat op ({}) should share the " + "same data type. Got {}." + ).format(self.name, [x.dtype for x in self.values]) + raise ValueError(msg) + + # validate that non-axis dimensions match + retshape = list(self.values[0].shape) + for v in self.values[1:]: + for i in range(rank): + if is_symbolic(retshape[i]) or is_symbolic(v.shape[i]): + continue + if i != concat_axis and retshape[i] != v.shape[i]: + msg = 'Dimension mismatch in {} ("{}"): shapes {} vs. {}' + raise ValueError( + msg.format(self.op_type, self.name, retshape, v.shape) + ) + if self.interleave.val and retshape[i] != v.shape[i]: + msg = 'Dimension mismatch in {} ("{}"): shapes {} vs. {}. ' \ + 'All inputs must have same shape when \'interleave\' option is True.' + raise ValueError( + msg.format(self.op_type, self.name, retshape, v.shape) + ) + + # Get length of concat dim + concat_dim_len = 0 + for v in self.values: + if len(v.shape) == 0: + taxis = 1 + else: + taxis = v.shape[concat_axis] + if is_symbolic(taxis): + concat_dim_len = get_new_symbol() + break + concat_dim_len += taxis + + if len(retshape) == 0: + retshape = [concat_dim_len] + else: + retshape[concat_axis] = concat_dim_len + + return types.tensor(dtype, retshape) + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + + values = [] + for v in self.values: + if v.sym_val is not None: + values.append(v.sym_val) + continue + if v.rank == 0: + values.append(get_new_symbol()) + continue + if any_symbolic(v.shape): + values.append(None) + continue + + # we support value inference when number of elements for each tensor is less than 10 + shape = v.shape + num_element = np.prod(shape) + if num_element > 10: + values.append(None) + continue + + symbolic_tensor = [get_new_symbol() for _ in range(num_element)] + symbolic_tensor = np.reshape(np.array(symbolic_tensor), shape) + values.append(symbolic_tensor) + + if any([val is None for val in values]): + return None + + if not isinstance(values[0], np.ndarray) or values[0].shape == (): + return np.stack(values, axis=self.axis.val) + + return np.concatenate(values, axis=self.axis.val) + + +@register_op +class split(Operation): + """ + Split tensors into a tuple + + Parameters + ---------- + x: <\*?,T> (Required) + * The tensor to split. + * The tensors may be variadic, but the number of tensors must be determined + at compile time (i.e. a tuple). + + num_splits: (Optional) + If specified, divide ``x`` into ``num_splits`` tensors along ``axis``. + Its behavior depends on ``split_sizes``: + + * If ``split_sizes`` is defined, ``num_splits == S``, and the output + sizes may be uneven. + * If ``split_sizes`` is not defined, ``value.shape[axis]`` must be + divisible by ``num_splits``, and the output sizes must be even. + + At least one of ``num_splits`` or ``split_sizes`` must be provided. + If ``split_sizes`` length ``S`` cannot be determined at compile time, + ``num_splits`` must be supplied to determine the number of outputs. + + split_sizes: const (Optional) + * Sizes to split to. The sum of ``split_sizes`` must equal to + ``value.shape[axis]``. + + axis: const (Required) + * The dimension along which to concatenate. Must be in the + range ``[-rank(x), rank(x))``. + + Returns + ------- + Tuple[tensor<\*?, T>] + * Where the length of the tuple is the number of splits (determined + from ``num_splits`` or ``split_sizes``). + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + num_splits=TensorInputType(const=True, optional=True, type_domain=types.int32), + split_sizes=TensorInputType(const=True, optional=True, type_domain=types.int32), + axis=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + num_splits, sizes = self._get_num_splits_and_sizes() + x_shape = list(self.x.shape) + ret_shapes = [x_shape[:] for _ in range(num_splits)] + axis = self.axis.val + for i, d in enumerate(sizes): + ret_shapes[i][axis] = d + self.sizes = sizes + return tuple([types.tensor(self.x.dtype, s) for s in ret_shapes]) + + def _get_num_splits_and_sizes(self): + """ + Return: + - num_splits: int + - sizes: list of int/symbols. Of length num_splits + + Raise ValueError if num_splits cannot be determined. + """ + if self.num_splits is None and self.split_sizes is None: + msg = ( + "At least one of num_splits and split_sizes " + + "must be specified in split op {}" + ) + raise ValueError(msg.format(self.name)) + + axis = self.axis.val + + if self.num_splits is not None: + num_splits = self.num_splits.val + if self.split_sizes is None: + # Even split + if ( + not is_symbolic(self.x.shape[axis]) + and self.x.shape[axis] % num_splits != 0 + ): + msg = "num_split {} does not divide split " + "dim (length = {})" + raise ValueError(msg.format(num_splits, self.x.shape[axis])) + size = self.x.shape[axis] / num_splits + return num_splits, [size] * num_splits + + # self.split_sizes is not None + if self.split_sizes.sym_val is not None: + return num_splits, self.split_sizes.sym_val + + # self.split_size.sym_val is None. + sizes = [get_new_symbol() for _ in range(num_splits)] + return num_splits, sizes + + # self.num_splits is None, self.split_sizes is not None + if self.split_sizes.sym_val is not None: + return len(self.split_sizes.sym_val), self.split_sizes.sym_val + + # self.num_splits is None, self.split_sizes is not None + # self.split_sizes.sym_val is None + if any_symbolic(self.split_sizes.shape): + raise ValueError("Unable to determine number of splits") + + num_splits = len(self.split_sizes.shape) + sizes = [get_new_symbol() for _ in range(num_splits)] + return num_splits, sizes + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + num_splits, sizes = self._get_num_splits_and_sizes() + if self.x.sym_val is None or any_symbolic(sizes): + raise NotImplementedError() + + if num_splits == 1: + # No split_indices possible. + return self.x.sym_val + + split_indices = np.cumsum(sizes).astype(np.int32) + return tuple(np.split(self.x.sym_val, split_indices[:-1], axis=self.axis.val)) + + +@register_op +class stack(Operation): + """ + Concatenates tensors along a dimension. + + Parameters + ---------- + values: Tuple[tensor<[d0, d1,...d_axis_i, ..., d_n], T>] (Required) + * All tensors must have identical shape. + axis: const (Required) + * The dimension along which to concatenate. Must be in the range ``[-rank(values[i]), rank(values[i]))`` for all ``i``. + + Returns + ------- + tenor<[d0, d1,...d_axis_out, ..., d_n], T> + * Where ``d_axis_out = sum(d_axis_i)``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + values=TupleInputType(), + axis=TensorInputType(const=True, type_domain=types.int32) + ) + + def type_inference(self): + + num_tensors = len(self.values) + if num_tensors == 0: + raise ValueError("Cannot stack 0 tensor") + + # get the first value without symbolic shape + t_shape = None + for value in self.values: + if not any_symbolic(value.shape): + t_shape = value.shape + break + t_shape = self.values[0].shape if t_shape is None else t_shape + + # compare all shape + for t in self.values: + if not is_compatible_symbolic_vector(t.shape, t_shape): + msg = "Component tensor {} has shape {}, others have {}" + raise ValueError(msg.format(t.name, t.shape, t_shape)) + + # Validate values share the same data type + dtype = self.values[0].dtype + for v in self.values[1:]: + if v.dtype != dtype: + msg = ( + "Tensors in 'values' of the stack op ({}) should share the " + "same data type. Got {}." + ).format(self.name, [x.dtype for x in self.values]) + raise ValueError(msg) + + axis = self.axis.val + if axis < 0: + axis += (self.values[0].rank + 1) + ret_shape = list(t_shape) + ret_shape.insert(axis, num_tensors) + return types.tensor(self.values[0].dtype, ret_shape) + + @precondition(allow=VALUE | SYMBOL | NONE) + def value_inference(self): + + is_all_rank_zero = all([v.rank == 0 for v in self.values]) + values = [ + v.sym_val if v.sym_val is not None else get_new_symbol() + for v in self.values + ] + + if any([is_symbolic(v) for v in values]) and not is_all_rank_zero: + return None + + return np.stack(values, self.axis.val) + + +# identity is used for renaming and is rarely necessary. See +# `loop_invariant_elimination` pass for a rare use case. +@register_op +class identity(Operation): + """ + Returns a tensor with the same shape and contents as input. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=ListOrTensorInputType() + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + return self.x.sym_val diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py new file mode 100644 index 00000000..fe2480f4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py @@ -0,0 +1,1069 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import sympy as sm + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import (Operation, get_new_symbol, + get_new_variadic_symbol, + precondition, types) +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import SYMBOL, VALUE +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import \ + solve_slice_by_index_shape +from coremltools.converters.mil.mil.types.symbolic import (any_symbolic, + any_variadic, + is_symbolic, + isscalar) + + +@register_op +class depth_to_space(Operation): + """ + Rearrange elements in a tensor from depth (channel) into spatial dimensions. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor of rank ``4``. + block_size: const i32 (Required) + * The size of the spatial block. Must be greater than ``1`` and divisible by + channel dimension ``C``. + + Returns + ------- + tensor<[n, C / block_size^2, H x block_size, W x block_size], T> + * Where ``b`` is the block size. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_size=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + bs = self.block_size.val + ret_shape = (n, c // (bs * bs), h * bs, w * bs) + return types.tensor(x_type, ret_shape) + + +@register_op +class expand_dims(Operation): + """ + Insert a single-dimension in a 1-D or higher tensor at each axis in axes. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Scalar or tensor. + axes: const tensor<[K], i32> Required + * ``K`` is the number of dimensions expanded. + * Insert single dimension at dimension index at each axes. + * Negative value to index from the end. ``-d-1 <= axis <= d`` + where ``d`` is the rank of ``x``. + + Returns + ------- + tensor<\*(rank(x)+K), T> + * Same type as the input ``x`` with rank ``rank(x)+K``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + x_rank = self.x.rank + x_type = self.x.dtype + x_shape = list(self.x.shape) + axes = self.axes.val + out_rank = x_rank + len(axes) + + for axis in axes: + if axis <= -out_rank - 1 or axis >= out_rank: + msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}' + raise IndexError( + msg.format(axis, self.op_type, self.name, self.x.shape) + ) + + ret_shape = x_shape + axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes]) + for axis in axes: + ret_shape.insert(axis, 1) + + return types.tensor(x_type, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + axes = self.axes.val + out_rank = self.x.rank + len(axes) + + for axis in axes: + if axis <= -out_rank - 1 or axis >= out_rank: + msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}' + raise IndexError( + msg.format(axis, self.op_type, self.name, self.x.shape) + ) + + axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes]) + ret_shape = list(self.x.shape) + for axis in axes: + ret_shape.insert(axis, 1) + return np.reshape(self.x.val, ret_shape) + + +def reshape_with_symbol(v, shape): + """ + Perform basic reshape if v is symbolic (not array of symbols). + """ + if is_symbolic(v): + return np.array(v).reshape(shape) + shape = [int(s) for s in shape] + return v.reshape(shape) + + +@register_op +class reshape(Operation): + """ + Return a tensor that has the same values as ``x`` with shape ``shape``. + ``shape`` must have the same volume (number of elements) as ``x``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + + * A n-D tensor or a scalar. + * If ``x`` is fixed rank (and possibly contains symbolic dimension), + shape may contain elements that are not positive integers (see below). + * If ``x`` is variadic rank, shape can only contain positive integers. + + shape: tensor<[K], i32> (Required) + + A 1-D tensor, with elements from the following: + + * Positive integers. + * Symbols: All but one symbol in shape must be present in ``x.shape``. + The new symbol that is not present in ``x.shape`` represent a dimension + such that the total size remains constant. Symbol is illegal + if ``x`` is variadic rank. + * ``-1``: ``-1`` introduces a new symbol (see Symbols). Therefore, ``-1`` is + allowed if all symbols in the shape appear in ``x.shape``. ``-1`` is illegal + if ``x`` is variadic rank. + * ``0``: If ``K == rank(x)`` then ``0`` means inheriting from the corresponding + dimension in ``x.shape``. ``0`` is illegal if ``x`` is variadic rank. + + Returns + ------- + tensor<\*?, T> + * Tensor with shape determined by the input shape. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + shape=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + if any_symbolic(self.shape.shape): + # We can't infer any shape if shape has variable length. + return types.tensor(self.x.dtype, (get_new_variadic_symbol(),)) + + # shape has fixed length here. + if self.shape.sym_val is None: + shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])]) + return types.tensor(self.x.dtype, shape) + t, _ = self._get_type_val() + return t + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + _, val = self._get_type_val() + return val + + def _get_type_val(self): + x_type = self.x.dtype + x_shape = self.x.shape + x_vol = np.prod(x_shape) + # shape is const, and thus sym_val is not None + sym_shape = self.shape.sym_val + sym_shape = [get_new_symbol() if d == -1 else d for d in sym_shape] + try: + ret_shape = reshape.enforce_volumetric_constraint(x_vol, sym_shape) + except: + ret_shape = sym_shape + ret_val = None + if self.x.val is not None and all(isscalar(a) and not is_symbolic(a) for a in ret_shape): + ret_val = reshape_with_symbol(self.x.val, ret_shape) + return types.tensor(x_type, tuple(ret_shape)), ret_val + + @staticmethod + def enforce_volumetric_constraint(left_volume, inshape): + left_symbols = set() + if is_symbolic(left_volume): + left_symbols = left_volume.free_symbols + # Generally, we want to solve for right in terms of left. But this + # is kinda annoying actually. + shape = list(inshape) + + # Handling when reshape is given 0 instead of actual input + # input tensor shape: [4, 3, 2], reshape:[0, -1], output tensor shape: [4, 6] + if shape.count(-1) > 1: + raise ValueError( + "Reshape op supports only one dimension to be -1. Given {}".format( + shape.count(-1) + ) + ) + + infer_dim_index = shape.index(-1) if -1 in shape else None + right_volume = 1 + for i in shape: + if i != -1: + right_volume = right_volume * i + + if infer_dim_index: + shape[infer_dim_index] = left_volume // right_volume + + if not is_symbolic(right_volume): + return shape + + constraints = [left_volume - right_volume] + solve_for = [s for s in shape if is_symbolic(s)] + + for rightsym in solve_for: + sol = sm.solve(constraints, [rightsym], dict=True) + if not isinstance(sol, list): + sol = [sol] + # look for an acceptable solution + for s in sol: + if 0 in s.values(): + continue + for i in range(len(shape)): + if shape[i] in s: + v = s[shape[i]] + if len(v.free_symbols - left_symbols) > 0: + continue + try: + shape[i] = int(v) + except: + shape[i] = v + return shape + + +@register_op +class reverse(Operation): + """ + Reverse the order of the input tensor ``x`` along specified ``axes`` (dimensions). + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + + axes: const (Optional) + * Dimension(s) to reverse. Each axis must be in the range ``[-rank(x), rank(x))``. + * Defaults to None (reverse on all dimensions). + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + References + ---------- + See `tf.reverse `_ + and `TORCH `_. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axes=None, + ) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + res = self.x.val + axes = self.axes.val if self.axes is not None else range(self.x.rank) + for axis in axes: + res = np.flip(res, axis=axis) + return res + + +@register_op +class reverse_sequence(Operation): + """ + Reverse variable length slices for specified axes / dimensions of the input + tensor. This op first slices input tensor along the ``batch_axis`` dimension, then + partially reverses the elements along the ``seq_axis`` for the first ``lengths[i]`` + elements. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Input tensor. + lengths: tensor (Required) + * 1-dimensional tensor of length ``x.shape[batch_axis]`` specifying the length + of the sequence to reverse. + * Values must be in range ``[0, x.shape[seq_axis]]``. + seq_axis: const (Optional) + * The dimension to reverse. + * Defaults to ``0``. + batch_axis: const (Optional) + * Dimension for slicing. + * Defaults to ``0``. + + Returns + ------- + tensor<\*?, T> + * Same type and shape as the input tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + References + ---------- + `tf.reverse_sequence `_ + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + lengths=TensorInputType(type_domain=types.int32), + seq_axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + batch_axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + seq_axis=0, + batch_axis=0) + + def type_inference(self): + return self.x.sym_type + + @precondition(allow=VALUE) + def value_inference(self): + raise NotImplementedError("TODO") + + +@register_op +class slice_by_index(Operation): + """ + Method for numpy style indexing and slicing. + With a tensor ``x``, this method achieves the following: + + ``result = x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...]`` + + Note: This method does not support pure indexing. You would need to do a + squeeze if indexing is intended. + + Parameters + ---------- + x: tensor<*?, T> (Required) + * Input tensor + begin: tensor<[rank(x)], i32> (Required) + * Starting index for the dimension of slicing. + end: tensor<[rank(x)], i32> (Required) + * Ending index for the dimension of slicing. + stride: tensor<[rank(x)], i32> (Optional) + * Default is all ``1``. + * Stride for the dimension of slicing. + begin_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``begin_mask[i]==True``, ignores ``begin[i]``, and set ``begin[i]`` to ``0``. + end_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``end_mask[i]==True``, ignores ``end[i]``, and set ``end[i]`` to ``x.shape[i]``. + squeeze_mask: tensor<[rank(x)], bool> (Optional) + * Default to all ``False``. + * If ``squeeze_mask[i]==true``, ignores ``end[i]``, and do the pure index at ``begin[i]``. + + Returns + ------- + tensor<\*?, T> + - Scalar or tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + begin=TensorInputType(type_domain=types.int32), + end=TensorInputType(type_domain=types.int32), + stride=TensorInputType(const=True, optional=True, type_domain=types.int32), + begin_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + end_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + squeeze_mask=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + stride=None, + begin_mask=None, + end_mask=None, + squeeze_mask=None, + ) + + def type_inference(self): + + # get tensor and set default value + begin = self.begin.val + end = self.end.val + x_rank = self.x.rank + stride = self.stride.val if self.stride is not None else [1] * x_rank + begin_mask = ( + self.begin_mask.val if self.begin_mask is not None else [False] * x_rank + ) + end_mask = self.end_mask.val if self.end_mask is not None else [False] * x_rank + squeeze_mask = ( + self.squeeze_mask.val if self.squeeze_mask is not None else [False] * x_rank + ) + + # solve shape + x_shape = self.x.shape + ret_shape = solve_slice_by_index_shape(x_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask) + + if len(ret_shape) == 0: + # Scalar case. + return self.x.dtype + else: + return types.tensor(self.x.dtype, tuple(ret_shape)) + + def value_inference(self): + if self.x.sym_val is None or self.begin.val is None or self.end.val is None: + return None + begin = [int(i) for i in list(self.begin.val[:])] + end = [int(i) for i in list(self.end.val[:])] + stride = [1] * self.x.rank if self.stride is None else self.stride.val + begin_mask = ( + [False] * self.x.rank if self.begin_mask is None else self.begin_mask.val + ) + end_mask = [False] * self.x.rank if self.end_mask is None else self.end_mask.val + squeeze_mask = ( + [False] * self.x.rank + if self.squeeze_mask is None + else self.squeeze_mask.val + ) + + slices = [] + for idx, mask in enumerate(begin_mask): + if mask: + begin[idx] = None + for idx, mask in enumerate(end_mask): + if mask: + end[idx] = None + squeeze_axes = [] + for idx, mask in enumerate(squeeze_mask): + if mask: + end[idx] = None + stride[ + idx + ] = 2147483647 # We slice out only 1 element by setting stride to INF + squeeze_axes.append(idx) + for idx in range(self.x.rank): + slices.append(slice(begin[idx], end[idx], stride[idx])) + + slices = tuple(slices) + res = self.x.sym_val[slices] + + # remove squeezed axes + if len(squeeze_axes) > 0: + if len(squeeze_axes) == len(res.shape): + if len(res) == 0: + logger.warning("%s seems to be a 0 sized tensor", self.name) + return np.array([]) + res = np.squeeze(res).tolist() + if is_symbolic(res): + return res + elif self.x.dtype == types.int32 or self.x.dtype == types.int64: + res = np.int32(res) + elif self.x.dtype == types.float or self.x.dtype == types.double: + res = np.float32(res) + else: + raise ValueError( + "Unable to convert type {}".format(self.x.sym_val.dtype) + ) + else: + res = np.squeeze(res, axis=tuple(squeeze_axes)) + return res + + +@register_op +class slice_by_size(Operation): + """ + Slice input tensor starting from the given ``begin`` index and by + the amount specified by the ``size`` input, for each dimension. + + Parameters + ---------- + x: tensor<*?, T> (Required) + * Input tensor. + begin: tensor<[rank(x)], i32> Required + * The begin index for slice. + size: tensor<[rank(x)], i32> Required + * The size that is to be sliced. If ``size`` is ``-1``, + all the remaining elements starting with "begin" are sliced. + + Returns + ------- + tensor<\*?, T> + * Scalar or tensor. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + begin=TensorInputType(type_domain=types.int32), + size=TensorInputType(type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + if self.begin.rank != 1: + raise ValueError( + "begin should be 1-D tensor, got {}-D tensor instead".format( + self.begin.rank + ) + ) + if self.size.rank != 1: + raise ValueError( + "size should be 1-D tensor, got {}-D tensor instead".format( + self.size.rank + ) + ) + if self.x.rank != self.begin.shape[0]: + raise ValueError( + "Length of begin {} doesn't equal to input rank {}.".format( + len(self.begin.shape[0]), len(self.x.rank) + ) + ) + if self.x.rank != self.size.shape[0]: + raise ValueError( + "Length of size {} doesn't equal to input rank {}.".format( + len(self.size.shape[0]), len(self.x.rank) + ) + ) + + x_shape = self.x.shape + ret_shape = [] + if self.size.sym_val is None: + ret_shape = [get_new_symbol() for _ in range(self.x.rank)] + return types.tensor(self.x.dtype, tuple(ret_shape)) + + for idx, s in enumerate(self.size.sym_val): + if is_symbolic(s): + ret_shape.append(s) + elif s != -1: + ret_shape.append(s) + elif self.begin.sym_val is not None: + ret_shape.append(x_shape[idx] - self.begin.sym_val[idx]) + else: + ret_shape.append(get_new_symbol()) + + return types.tensor(self.x.dtype, tuple(ret_shape)) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + if any_symbolic(self.begin.sym_val): + return None + if any_symbolic(self.size.sym_val): + return None + if self.x.val is None: + return None + slices = [] + for i in range(self.x.rank): + begin_val = self.begin.val[i] + if begin_val < 0: + if is_symbolic(self.x.shape[i]): + return None + begin_val += self.x.shape[i] + if self.size.val[i] > 0: + slices.append(slice(begin_val, begin_val + self.size.val[i])) + else: + slices.append(slice(begin_val, None, None)) + return self.x.val[tuple(slices)] + + +@register_op +class space_to_depth(Operation): + """ + Rearrange elements in a tensor from spatial into depth (channel) dimension. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor of rank ``4``. + block_size: const (Required) + * The size of the spatial block. Must be greater than ``1`` and divisible + by spatial dimensions ``H, W``. + + Returns + ------- + tensor<[n, C x block_size^2, H / block_size, W / block_size], T> + * Where ``b`` is the block size. + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_size=TensorInputType(const=True, type_domain=types.int32) + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + bs = self.block_size.val + ret_shape = (n, c * (bs * bs), h // bs, w // bs) + return types.tensor(x_type, ret_shape) + +@register_op +class space_to_batch(Operation): + """ + Rearrange elements in a tensor from spatial into batch dimension. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor must have rank 4. + * The first and the second dimension are batch, channel, respectively + * The remaining dimensions (H, W) are treated as "spatial dimensions" + block_shape: const tensor<[2], i32> (Required) + * The length of the block_shape must be `2` + * It defines the shapes of the block in which the spatial dimensions are divided + paddings: const tensor<[2, 2], i32> (Required) + * It must have shape `(2, 2)` + * It defines the padding for each spatial dimensions + + Returns + ------- + tensor<[new_n, C, new_H, new_W], T> + * new_n = n * block_shape[0] * block_shape[1] + * new_H = (H + paddings[0][0] + padding[0][1])/block_shape[0] + * new_W = (W + paddings[1][0] + padding[1][1])/block_shape[1] + * The output has the same rank as the input + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_shape=TensorInputType(const=True, type_domain=types.int32), + paddings=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_shape = self.x.shape + block_shape = self.block_shape.val + paddings = self.paddings.val + + if self.x.rank != 4: + msg = "Input to space_to_batch op must be rank 4. Instead got an input with rank {}".format(self.x.rank) + raise ValueError(msg) + + if paddings.shape != (block_shape.shape[0], 2): + msg = "block_shape and paddings must have shape [2], [2, 2] accordingly in the space_to_batch op. "\ + "Got {}, {}.".format(block_shape.shape, paddings.shape) + raise ValueError(msg) + + m = block_shape.shape[0] + if m != 2: + msg = "space_to_batch op only supports spatial dimensions = 2. Got {}".format(m) + raise ValueError(msg) + + b = x_shape[0] + c = x_shape[1] + spatial_shape = x_shape[2:2+m] + + if self.x.rank != m + 2: + raise ValueError("The input rank of space_to_batch op must exactly be " \ + "len(block_shape){} + 2! Got {}".format(self.block_shape.val, self.x.rank)) + + padded_spatial_shape = [x + paddings[i][0] + paddings[i][1] for i, x in enumerate(spatial_shape)] + new_b = b * np.prod(block_shape) + new_spatial_shape = [padded_spatial_shape[i]/block_shape[i] for i in range(m)] + ret_shape = [new_b, c] + new_spatial_shape + x_type = self.x.dtype + + return types.tensor(x_type, ret_shape) + +@register_op +class batch_to_space(Operation): + """ + Rearrange elements in a tensor from batch into spatial dimension. + + Parameters + ---------- + x: tensor<[n, C, H, W], T> (Required) + * Input tensor must have rank 4. + * The first and the second dimension are batch, channel, respectively + * The remaining dimensions (H, W) are treated as "spatial dimensions" + block_shape: const tensor<[2], i32> (Required) + * The length of the block_shape must be `2` + * It defines the shapes of the block in which the spatial dimensions are multiplied + crops: const tensor<[2, 2], i32> (Required) + * It must have shape `(2, 2)` + * It defines the amount to crop from each spatial dimensions + + Returns + ------- + tensor<[new_n, C, new_H, new_W], T> + * new_n = n / (block_shape[0] * block_shape[1]) + * new_H = (H * block_shape[0]) - paddings[0][0] - padding[0][1] + * new_W = (W * block_shape[1]) - paddings[1][0] - padding[1][1] + * The output has the same rank as the input + + Attributes + ---------- + T: fp16, fp32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + block_shape=TensorInputType(const=True, type_domain=types.int32), + crops=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_shape = self.x.shape + block_shape = self.block_shape.val + crops = self.crops.val + + if self.x.rank != 4: + msg = "Input to batch_to_space op must be rank 4. Instead got an input with rank {}".format(self.x.rank) + raise ValueError(msg) + + if crops.shape != (block_shape.shape[0], 2): + msg = "block_shape and crops must have shape [2], [2, 2] accordingly in the batch_to_space op. "\ + "Got {}, {}.".format(block_shape.shape, crops.shape) + raise ValueError(msg) + + m = block_shape.shape[0] + if m != 2: + msg = "batch_to_space op only supports spatial dimensions = 2. Got {}".format(m) + raise ValueError(msg) + + b = x_shape[0] + c = x_shape[1] + spatial_shape = x_shape[2:2+m] + + if self.x.rank != m + 2: + raise ValueError("The input rank of batch_to_space op must exactly be " \ + "len(block_shape){} + 2! Got {}".format(self.block_shape.val, self.x.rank)) + + if not is_symbolic(b) and b % np.prod(block_shape) != 0: + msg = ("Batch size must be perfectly divided by the product of block_shape. Got batch size {}, and block_shape {}." + ).format(b, block_shape) + raise ValueError(msg) + + new_b = b / np.prod(block_shape) + new_spatial_shape = [spatial_shape[i] * block_shape[i] for i in range(m)] + cropped_spatial_shape = [x - crops[i][0] - crops[i][1] for i, x in enumerate(new_spatial_shape)] + ret_shape = [new_b, c] + cropped_spatial_shape + x_type = self.x.dtype + + return types.tensor(x_type, ret_shape) + +@register_op +class squeeze(Operation): + """ + Remove single-dimension dimensions in a 1-D or higher tensor. + + Parameters + ---------- + x: tensor<\*?,T> (Required) + * Must be at least 1-D. + axes: const (Optional) + * Axes to squeeze out. + * Default to remove all single-dimensions. + + Returns + ------- + tensor<\*(rank(x)-K),T> + * Tensor with same type as input ``x`` and rank ``rank(x)-K``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axes=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + axes=None, + ) + + def type_inference(self): + x_type = self.x.dtype + x_shape = self.x.shape + squeezed_shape = list(x_shape) + if self.axes is None: + # Squeeze all single-dim, assuming symbolic dims != 1 + squeezed_shape = [s for s in squeezed_shape if s != 1] + else: + axes = self.axes.val + axes = [axis if axis >= 0 else axis + self.x.rank for axis in axes] + for i in sorted(axes)[::-1]: # descending order + if len(squeezed_shape) <= i: + raise ValueError( + "Cannot squeeze dim {} for shape {}".format(i, squeezed_shape) + ) + squeezed_shape.pop(i) + + return types.tensor(x_type, tuple(squeezed_shape)) if len(squeezed_shape) != 0 else x_type + + @precondition(allow=VALUE) + def value_inference(self): + if self.x.val is None: + return None + if self.axes is None: + val = np.squeeze(self.x.val) + else: + val = np.squeeze(self.x.val, axis=tuple(self.axes.val)) + return val if val.shape != () else self.x.val[0] + +@register_op +class transpose(Operation): + """ + Permute tensor ``x`` dimensions according to ``perm``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * Must be at least 1-D. ``x`` may have a symbolic shape. + perm: const<[rank(x)], i32> (Required) + * Permutation order. -rank(x) <= perm[I] < rank(x) for all perm entries. + + Returns + ------- + tensor<\*?,T> + * Tensor with same rank and type as ``x``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + + References + ---------- + `torch.Tensor.permute `_ + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + perm=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def type_inference(self): + x_type = self.x.dtype + perm = self.perm.val + x_shape = np.array(self.x.shape) + if len(perm) != self.x.rank: + msg = "perm should have the same length as rank(x): {} != {}" + raise ValueError(msg.format(len(perm), self.x.rank)) + if self.x.rank == 0: + return self.x.sym_type # scalar cannot be transposed + if any_variadic(self.x.shape): + ret_shape = get_new_variadic_symbol() + else: + ret_shape = x_shape[perm] + return types.tensor(x_type, tuple(ret_shape)) + + @precondition(allow=VALUE) + def value_inference(self): + return np.transpose(self.x.val, axes=self.perm.val) + + +@register_op +class pixel_shuffle(Operation): + """ + Rearrange elements in a tensor from depth (channel) into spatial dimensions. + Equivalent to PyTorch's ``PixelShuffle``. + + Parameters + ---------- + x: tensor<[n, C x f^2, H, W], T> (Required) + * Input tensor of rank ``4``. + upscale_factor: const + * Factor to increase spatial resolution by. + + Returns + ------- + tensor<[n, C, H x f, W x f], T> + * Where ``f`` is the upscale factor. + + Attributes + ---------- + T: fp16, fp32 + + References + ---------- + `torch.nn.PixelShuffle `_ + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + upscale_factor=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + f = self.upscale_factor.val + ret_shape = (n, c // (f * f), h * f, w * f) + return types.tensor(x_type, ret_shape) + + +@register_op +class sliding_windows(Operation): + """ + Return a tensor containing all windows of ``size``, separated by stride along the + given ``axis``. + + Parameters + ---------- + x: tensor<[\*d0, d_axis, *dn], T> + * Input tensor. + + axis: const + * Axis to perform the operation. + + size: const + * Number of elements in the sliding window. + + stride: const Optional + * Default to ``1``. + * The stride of the input elements in the sliding window. + + Returns + ------- + tensor<[\*d0, d_axis - size // stride + 1, size, \*dn], T> + * The output will be a tensor of rank ``N+1`` where ``N`` is the input tensor + rank. + + Attributes + ---------- + T: fp16, fp32, int32 + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + axis=TensorInputType(const=True, type_domain=types.int32), + size=TensorInputType(const=True, type_domain=types.int32), + stride=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32), + } + + def default_inputs(self): + return DefaultInputs(stride=1) + + def type_inference(self): + x_shape = self.x.shape + axis = self.axis.val + size = self.size.val + stride = self.stride.val + ret_shape = list(x_shape) + ret_shape[axis] = (x_shape[axis] - size) // stride + 1 + pos_axis = axis if axis >= 0 else axis + self.x.rank + ret_shape.insert(pos_axis + 1, size) + return types.tensor(self.x.dtype, tuple(ret_shape)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py new file mode 100644 index 00000000..e83fcb3b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target + +_IOS16_TARGET = target.iOS16 + +from .constexpr_ops import (constexpr_affine_dequantize, constexpr_cast, + constexpr_lut_to_dense, constexpr_sparse_to_dense) +from .image_resizing import crop_resize, resample, upsample_bilinear +from .scatter_gather import gather, gather_nd +from .tensor_operation import fill_like, topk +from .tensor_transformation import pixel_unshuffle, reshape_like diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py new file mode 100644 index 00000000..5306bbf3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py @@ -0,0 +1,383 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_affine_dequantize(Operation): + """ + A compile-time operation that returns a constant output value upon dequantizing its constant inputs. + + This operation is used to represent constant 8-bit quantized data with affine/linear quantization. + The quantized data is stored in the parameter ``quantized_data``. + The other parameters -- ``scale``, ``zero_point``, and ``axis`` -- describe how + unquantized values can be extracted from it, using the equation for affine/linear quantization: + :: + unquantized_data = scale * (quantized_data - zero_point) + + Although all of the parameters of this op are constants, this op is not constant folded + to a single const op at the time of model serialization. The unquantized output will + be decompressed later, based on the implementation detail (either at model load time or runtime). + + Parameters + ---------- + quantized_data: const tensor (Required) + + zero_point: const tensor (Required) + * ``zero_point`` can be either a scalar or a vector. + * ``zero_point`` follows similar broadcasting rules and size constraints as ``scale``. + + scale: const tensor (Required) + * ``scale`` can be either a scalar or a vector. If ``scale`` is a vector, + for implementation it is broadcast to the following shape: + * The rank of ``scale`` becomes the same as the rank of ``quantized_data``. + * The constraint: ``size(scale-vector) == quantized_data.shape[axis]``. + * For ``i == axis``, ``scale.shape[i] == quantized_data.shape[i]``. + * For ``i != axis``, ``scale.shape == 1``. + For example, assume ``quantized_data.shape = (2, 3, 4, 5)`` and ``axis = 1``. + If ``scale`` is a vector, then ``scale.size`` needs to be equal to + ``quantized_data.shape[axis] i.e = 3``, which would be broadcast to ``(1, 3, 1, 1)``. + + axis: const tensor (Required) + + Returns + ------- + const tensor + + Attributes + ---------- + SrcT: uint8, int8 + DstT: fp16, fp32 + """ + + input_spec = InputSpec( + quantized_data=TensorInputType(const=True, type_domain="SrcT"), + zero_point=TensorInputType(const=True, type_domain="ZeroPointT"), + scale=TensorInputType(const=True, type_domain="DstT"), + axis=TensorInputType(const=True, type_domain=types.int32), + ) + + type_domains = { + "DstT": (types.fp16, types.fp32), + "SrcT": (types.uint8, types.int8), + "ZeroPointT": (types.uint8, types.int8), + } + + def type_inference(self): + def assert_is_scalar_or_vector(param, name): + if param.rank not in (0, 1): + raise ValueError( + "Parameter {} needs to be either a scalar or vector".format(name) + ) + + def assert_vector_size_same_as_axial_dimension(param, axis_dim_size, name): + if param.rank == 1 and param.shape[0] != axis_dim_size: + raise ValueError( + "Parameter {}, if vector, needs to have same size as the dimension size along the parameter quantized_data".format( + name + ) + ) + + if self.zero_point.dtype != self.quantized_data.dtype: + raise ValueError( + "Parameters quantized_data and zero_point needs to be of the same dtype" + ) + + rank = self.quantized_data.rank + if self.axis.val < -rank or self.axis.val >= rank: + raise ValueError( + "Parameter axis needs to be in the range -quantized_data.rank <= axis < quantized_data.rank" + ) + + assert_is_scalar_or_vector(self.scale, "scale") + assert_is_scalar_or_vector(self.zero_point, "zero_point") + + assert_vector_size_same_as_axial_dimension( + self.scale, self.quantized_data.shape[self.axis.val], "scale" + ) + assert_vector_size_same_as_axial_dimension( + self.zero_point, self.quantized_data.shape[self.axis.val], "zero_point" + ) + + dtype = self.scale.dtype + shape = self.quantized_data.shape + return types.tensor(dtype, shape) + + def value_inference(self): + return self.decompress( + self.quantized_data.val, + self.zero_point.val, + self.scale.val, + self.axis.val + ) + + @staticmethod + def decompress(quantized_data, zero_point, scale, axis): + + axis = axis if axis >= 0 else axis + len(quantized_data.shape) + + def rank_promoted_to_same_as_quantized_data(param): + if len(param.shape) == 0: + return np.reshape(param, np.ones(len(quantized_data.shape), np.int32)) + else: + axes = [i for i in range(len(quantized_data.shape)) if i != axis] + return np.expand_dims(param, axis=tuple(axes)) + + sc = rank_promoted_to_same_as_quantized_data(scale) + zp = rank_promoted_to_same_as_quantized_data(zero_point) + val = sc * (quantized_data.astype(np.float32) - zp.astype(np.float32)) + return val.astype(scale.dtype) + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_cast(Operation): + """ + A compile-time operation that returns a constant output value upon casting its constant input. + :: + Expression: output = constexpr_cast(source_val, output_dtype="fp32") + + Parameters + ---------- + source_val: const tensor (Required) + + output_dtype: const tensor (Required) + + Returns + ------- + const tensor + + Attributes + ---------- + SrcT: fp16 + DstT: fp32 + """ + + input_spec = InputSpec( + source_val=TensorInputType(const=True, type_domain=types.fp16), + output_dtype=TensorInputType(const=True, type_domain=types.str), + ) + + def type_inference(self): + + dtype = types.string_to_builtin(self.output_dtype.val) + if dtype != types.fp32: + raise NotImplementedError("Only output_dtype = fp32 is supported") + + shape = self.source_val.shape + return types.tensor(dtype, shape) + + def value_inference(self): + return np.float32(self.source_val.val) + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_lut_to_dense(Operation): + """ + A compile-time operation that returns a constant output value upon decompressing + a look-up table (LUT) to a dense tensor. + + This operation is used to store constant weights in a LUT format (also known as + `palettized` weights). A LUT is a mapping from index to values. + Weights are quantized and stored as indices (or keys) into the LUT. + Before computation, these keys are mapped to corresponding values in the LUT. + + Parameters + ---------- + indices: const tensor (Required) + + lut: const tensor (Required) + + shape: const tensor (Required) + + Notes + ----- + + * Any data is packed and read in a row-major order. + * ``NUM_PALETTES`` can be one of ``{2, 4, 16, 64 or 256}``. + * ``n_bits = log2(NUM_PALETTES)`` can thus be one of ``{1, 2, 4, 6, 8}``. + * Indices are packed in bytes of size ``M``, where ``M = ceil(n_bits * product(shape) / 8)``. + + The bit fields are packed one byte at a time, starting with the least significant bit (LSB) and + moving upward to the most significant bit (MSB). It follows, naturally, that if an index is split + across two bytes, the LSBs of that index is filled over the MSBs of current byte, and the remaining + bits of the same index are filled in the LSBs of the next byte. + + For example: + :: + if n_bits = 2, shape = (5,) => M = 2 bytes + + MSB LSB + | | + indices = | 01 10 11 00 | xx xx xx 11 | <== packed elements + | i3 | i2 | i1 | i0 | -- | -- | -- | i4 | <== tagged element ids + | byte 0 | byte 1 | <== tagged bytes + + Returns + ------- + const tensor + + Attributes + ---------- + T: uint8, int8, fp16, fp32 + """ + + input_spec = InputSpec( + indices=TensorInputType(const=True, type_domain=types.uint8), + lut=TensorInputType(const=True, type_domain="T"), + shape=TensorInputType(const=True, type_domain=types.uint32), + ) + + type_domains = { + "T": (types.int8, types.uint8, types.fp16, types.fp32) + } + + def type_inference(self): + def assert_is_vector(param, name): + if param.rank != 1: + raise ValueError("Parameter {} needs to have rank == 1".format(name)) + + assert_is_vector(self.indices, "indices") + assert_is_vector(self.lut, "lut") + + if self.lut.shape[0] not in (2, 4, 16, 64, 256): + raise ValueError( + "Parameter lut should be a vector of size from one of {2, 4, 16, 64, 256}" + ) + + nbits = int(np.log2(self.lut.shape[0])) + output_size = np.prod(self.shape.val) + if self.indices.shape[0] != np.ceil(nbits * (output_size / 8.0)): + raise AssertionError( + "Constraint violated, M = ceil(n_bits * product(shape) / 8) where M = indices.size" + ) + + dtype = self.lut.dtype + shape = self.shape.val + return types.tensor(dtype, shape) + + def value_inference(self): + return self.decompress( + self.lut.val, + self.indices.val, + self.shape.val, + ) + + @staticmethod + def decompress(lut, indices, shape): + bitarray = np.unpackbits(indices, bitorder="little") + nbits = np.log2(lut.size).astype(np.int32) + + pad_required = bitarray.size % nbits != 0 + if pad_required: + bitarray = np.concatenate([bitarray, np.zeros(nbits - bitarray.size % nbits)]).astype(bitarray.dtype) + + assert bitarray.size % nbits == 0 + + size = np.prod(shape) + bitarray = bitarray.reshape(-1, nbits)[:size, :] + + indices = np.packbits(bitarray, bitorder="little", axis=-1).reshape(-1) + flatten_val = lut[indices] + return flatten_val.reshape(shape) + + +@register_op(opset_version=_IOS16_TARGET) +class constexpr_sparse_to_dense(Operation): + """ + A compile-time operation that returns a constant output value upon de-sparsification of its constant inputs. + + This operation represents unstructured sparsity and uses bit mask binary representation. + If a bit is set, then the corresponding element in the output tensor is non-zero and the + value is read from the ``nonzero_data`` attribute. Likewise, if the bit is not set, + then the corresponding element in the output tensor is zero. + + Parameters + ---------- + nonzero_data: const tensor (Required) + + mask: const tensor (Required) + + shape: const tensor (Required) + + Notes + ----- + * Any data is packed and read in a row-major order. + * ``mask`` contains ``M`` bytes, where ``M = ceil( product(shape) / 8)``. That is, each bit + field corresponds to one element in the output tensor. + * ``D ==`` the total number of set bits in ``mask``. + + The bit fields are packed one byte at a time, starting with the least significant bit and + moving up to the most significant bit. + + For example: + :: + shape = (5,) => M = 1 bytes + + MSB LSB + | | + mask = |x x x 0 1 1 0 0 | <== packed elements + |--|--|--|i4|i3|i2|i1|i0| <== tagged element ids + | byte 0 | <== tagged bytes + + Returns + ------- + const tensor + + Attributes + ---------- + T: uint8, int8, fp16, fp32 + """ + + input_spec = InputSpec( + nonzero_data=TensorInputType(const=True, type_domain="T"), + mask=TensorInputType(const=True, type_domain=types.uint8), + shape=TensorInputType(const=True, type_domain=types.uint32), + ) + + type_domains = { + "T": (types.int8, types.uint8, types.fp16, types.fp32) + } + + def type_inference(self): + def assert_is_vector(param, name): + if param.rank != 1: + raise ValueError("Parameter {} needs to have rank == 1".format(name)) + + assert_is_vector(self.nonzero_data, "nonzero_data") + assert_is_vector(self.mask, "mask") + + if sum(bin(x).count("1") for x in self.mask.val) != self.nonzero_data.shape[0]: + raise AssertionError( + "Number of set bits in mask needs to be equal to number of elements in parameter nonzero_data" + ) + + output_size = np.prod(self.shape.val) + if self.mask.shape[0] != np.ceil(output_size / 8.0): + raise AssertionError( + "Constraint Violated: M = ceil( product(shape) / 8) where M = mask.size" + ) + + bitarray = np.unpackbits(self.mask.val, bitorder="little") + if any(bitarray[i] != 0 for i in range(output_size, len(bitarray))): + raise AssertionError("Padded bits in mask should be unset or equals to zero") + + dtype = self.nonzero_data.dtype + shape = self.shape.val + return types.tensor(dtype, shape) + + def value_inference(self): + return self.decompress(self.nonzero_data.val, self.mask.val, self.shape.val) + + @staticmethod + def decompress(nonzero_data, mask, shape): + flattend_val = np.zeros(shape, dtype=nonzero_data.dtype).flatten() + flattend_val[ + np.where(np.unpackbits(mask, bitorder="little") != 0) + ] = nonzero_data + return flattend_val.reshape(shape) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py new file mode 100644 index 00000000..da1f5dfb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/image_resizing.py @@ -0,0 +1,86 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing import \ + crop_resize as _crop_resize_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing import \ + resample as _resample_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing import \ + upsample_bilinear as _upsample_bilinear_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class resample(_resample_iOS15): + """ + The iOS 16 version of ``resample`` supports float 16 coordinates. + + For the complete documentation, see the + `iOS 15 version <#module-coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing>`_. + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + coordinates=TensorInputType(type_domain="U"), + sampling_mode=TensorInputType(const=True, type_domain=types.str), + padding_mode=TensorInputType(const=True, type_domain=types.str), + padding_value=TensorInputType(const=True, type_domain="T"), + coordinates_mode=TensorInputType(const=True, type_domain=types.str), + align_corners=TensorInputType(const=True, type_domain=types.bool), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + "U": (types.int32, types.fp16, types.fp32), + } + + def type_inference(self): + return super().type_inference() + +@register_op(opset_version=_IOS16_TARGET) +class upsample_bilinear(_upsample_bilinear_iOS15): + """ + iOS16 version of upsample_bilinear supports half_pixel_centers + + Additional Parameters + ---------- + half_pixel_centers: const (Optional) + * Default to !align_corners if not provided + """ + + input_spec = _upsample_bilinear_iOS15.input_spec + InputSpec( + half_pixel_centers=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs(half_pixel_centers=not self.align_corners.val) + +@register_op(opset_version=_IOS16_TARGET) +class crop_resize(_crop_resize_iOS15): + """ + iOS16 version of crop_resize, which supports ``pad_value`` + + Additional Parameters + ---------- + pad_value : const (Optional, default=1.0) + * If the box indexes go beyond the input boundary, the input image is padded with pad_value. + * Defaults to 0. + * It is the same as extrapolation_value in tf.image.crop_and_resize. + + Attributes + ---------- + T: fp16, fp32 + """ + input_spec = _crop_resize_iOS15.input_spec + InputSpec( + pad_value=TensorInputType(const=True, optional=True, type_domain="T"), + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs(pad_value=1.0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py new file mode 100644 index 00000000..82653b0f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/scatter_gather.py @@ -0,0 +1,170 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (SYMBOL, VALUE, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs._utils import compute_gather +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class gather(Operation): + """ + An iOS16 version of gather + + The new gather op supports `batch_dims` + similar to `tf.gather `_. + + Parameters + ---------- + x: tensor<\*D, U> (Required) + indices: tensor<\*N, I> (Required) + * Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``. + axis: const i32 (Optional. Default=``0``) + * Negative axis is supported. + batch_dims: const i32 (Optional. Default=``0``) + * The number of batch dimensions + + Returns + ------- + tensor<\*K, T> + * Where ``K = D[:axis] + N[batch_dims:] + D[axis+1:]``. + + Attributes + ---------- + T: fp16, fp32, i32 + I: uint16, int16, int32 + + References + ---------- + See `tf.gather `_. + + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="U"), + indices=TensorInputType(type_domain="I"), + axis=TensorInputType(const=True, optional=True, type_domain=types.int32), + batch_dims=TensorInputType(const=True, optional=True, type_domain=types.int32) + ) + + type_domains = { + "U": (types.fp16, types.fp32, types.int32), + "I": (types.int32, types.uint16, types.int16), + } + + def default_inputs(self): + return DefaultInputs( + axis=0, + batch_dims=0, + ) + + @precondition(allow=VALUE | SYMBOL) + def value_inference(self): + x = self.x.sym_val + indices = self.indices.val + if indices is None: + # only allow x to be symbolic. indices cannot. + return None + return compute_gather( + params=self.x.sym_val, + indices=self.indices.val, + axis=self.axis.val, + batch_dims=self.batch_dims.val + ) + + def type_inference(self): + # validate parameters + if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank: + raise IndexError( + "Axis value {} is out of bounds for {} node {}".format( + self.axis.val, self.op_type, self.name + ) + ) + if self.batch_dims.val >= self.x.rank: + raise ValueError( + "batch_dims {} must be less than x.rank {} for node {}".format( + self.batch_dims.val, self.x.rank, self.name + ) + ) + if self.batch_dims.val > self.indices.rank: + raise ValueError( + "batch_dims {} must be less or equal to than indices.rank {} for node {}".format( + self.batch_dims.val, self.indices.rank, self.name + ) + ) + + output_rank = self.x.rank - 1 + self.indices.rank - self.batch_dims.val + if output_rank == 0: + # output scalar + return self.x.dtype + + # compute output shape + axis = self.axis.val + axis = axis if axis >= 0 else axis + self.x.rank + batch_dims = self.batch_dims.val + out_shape = self.x.shape[:axis] + self.indices.shape[batch_dims:] + self.x.shape[axis + 1 :] + + return types.tensor(self.x.dtype, out_shape) + +@register_op(opset_version=_IOS16_TARGET) +class gather_nd(Operation): + """ + An iOS16 version of gather_nd + The new gather_nd op supports `batch_dims` + + Parameters + ---------- + x: tensor<\*D, T> (Required) + indices: tensor<\*K, i32> (Required) + batch_dims: const i32 (Optional. Default=``0``) + * The number of batch dimensions + + Returns + ------- + tensor<\*V, T> + * ``V = K[:-1] + D[batch_dims + K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``. + + Attributes + ---------- + T: fp16, fp32, i32 + + References + ---------- + See `tf.gather_nd `_. + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="U"), + indices=TensorInputType(type_domain="I"), + batch_dims=TensorInputType(const=True, optional=True, type_domain=types.int32), + ) + + type_domains = { + "U": (types.fp16, types.fp32, types.int32), + "I": (types.int32, types.uint16, types.int16), + } + + def default_inputs(self): + return DefaultInputs( + batch_dims=0, + ) + + def type_inference(self): + batch_dims = self.batch_dims.val + indices_depth = self.indices.shape[-1] + if indices_depth > self.x.rank - batch_dims: + msg = "For node {}, indices.shape[-1] ({}) + batch_dims ({}) must be smaller or equal to the input rank {}".format( + self.name, indices_depth, batch_dims, self.x.rank + ) + raise ValueError(msg) + out_type = self.x.dtype + out_shape = self.indices.shape[:-1] + self.x.shape[batch_dims+indices_depth:] + return types.tensor(out_type, out_shape) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py new file mode 100644 index 00000000..e29a3ff6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_operation.py @@ -0,0 +1,115 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (DefaultInputs, + InputSpec, + TensorInputType) +from coremltools.converters.mil.mil.operation import (VALUE, Operation, + precondition) +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS15.tensor_operation import \ + topk as _topk_iOS15 +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET + + +@register_op(opset_version=_IOS16_TARGET) +class fill_like(Operation): + """ + Returns a tensor with the same size as the input tensor filled with a constant value. + + Parameters + ---------- + ref_tensor: tensor<\*?, T> (Required) + * Input tensor. + value: const (Optional) + * Default is ``0.0``. + * Constant value to fill in. + + Returns + ------- + tensor<\*?, T> + * Tensor with shape determined by the input tensor. + + Attributes + ---------- + T: fp16, fp32, int32, bool + U: fp16, fp32, int32, bool + """ + + input_spec = InputSpec( + ref_tensor=TensorInputType(type_domain="T"), + value=TensorInputType(const=True, optional=True, type_domain="U"), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + "U": (types.fp16, types.fp32, types.int32, types.bool), + } + + def default_inputs(self): + return DefaultInputs( + value=0. + ) + + def type_inference(self): + return types.tensor(self.value.dtype, self.ref_tensor.shape) + + @precondition(allow=VALUE) + def value_inference(self): + return np.full(shape=self.ref_tensor.shape, fill_value=self.value.val) + +@register_op(opset_version=_IOS16_TARGET) +class topk(_topk_iOS15): + """ + A version of ``topk`` for iOS 16+. This section documents the differences. The following are additional parameters for the iOS 16+ version. For the + rest of the documentation, see `the iOS 15 version of topk <#coremltools.converters.mil.mil.ops.defs.iOS15.tensor_operation.topk>`_. + + Parameters + ---------- + sort: const (Optional) + * Defaults to ``True``. + * If ``True``, ``top-k`` elements are themselves sorted. + Otherwise, no particular ordering is guaranteed. + return_indices: const (Optional) + * Defaults to ``True``. + * If ``True``, returns both values and indices. Otherwise, returns only the ``top-k`` values. + + Returns + ------- + tensor<\*?, T> + * Values of top/bottom ``k`` elements. + + tensor<\*?, int32> + * Only returned when ``return_indices = True`` + * Indices of the top/bottom ``k`` elements along axis. + + Attributes + ---------- + T: fp32, int32 + """ + + input_spec = _topk_iOS15.input_spec + InputSpec( + sort=TensorInputType(const=True, optional=True, type_domain=types.bool), + return_indices=TensorInputType(const=True, optional=True, type_domain=types.bool), + ) + + def default_inputs(self): + return super().default_inputs() + DefaultInputs(sort=True, return_indices=True) + + def type_inference(self): + value_type, indices_type = super().type_inference() + if not self.return_indices.val: + return value_type + return value_type, indices_type + + @precondition(allow=VALUE) + def value_inference(self): + values, indices = super().value_inference() + if not self.return_indices.val: + return values + return values, indices diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py new file mode 100644 index 00000000..473b7c68 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/defs/iOS16/tensor_transformation.py @@ -0,0 +1,186 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom coremltools.converters.mil.mil import types + +import numpy as np + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.input_type import (InputSpec, + TensorInputType, + TupleInputType) +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op +from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_op(opset_version=_IOS16_TARGET) +class reshape_like(Operation): + """ + Reshape a tensor to an output shape specified by some or all dimensions of a tuple of reference tensors ``ref_tensors``. + + Parameters + ---------- + x: tensor<\*?, T> (Required) + * The input tensor to be reshaped. + + ref_tensors: Tuple[tensor<\*?, R>] (Required) + * A tuple of tensors that define the output shape. + + begins: Tuple[const] (Required) + * A tuple of integers specifying the begin index into the shape vector of the corresponding ``ref_tensor``. + + ends: Tuple[const] (Required) + * A tuple of integers specifying the end index into the shape vector of the corresponding ``ref_tensor``. + + end_masks: Tuple[const] (Required) + * If ``True``, select all axes from the begin index until the end of the corresponding ``ref_tensor``, as in + ``ref_tensors[i].shape[begins[i]:]``. + + Notes + ----- + The output shape is computed as follows: + + .. sourcecode:: python + + output_shape = [] + num_of_refs = len(begins) + for i in range(num_of_refs): + if end_masks[i]: + output_shape.append(ref_tensor_i.shape[begins[i]:]) + else: + output_shape.append(ref_tensor_i.shape[begins[i]:ends[i]]) + output_shape = np.concat(output_shape, axis=0) + + The following is an example: + + .. sourcecode:: python + + ref_tensors=[tensor[2, 3, 4], tensor[1, 5, 6]] + begins=[0, 1] + ends=[2, 0] + end_masks=[False, True] + + The output shape would be ``(2, 3, 5, 6)``. + + Returns + ------- + tensor<\*?, T> + * Same type as input tensor ``x``. + * Output shape is computed by ``ref_tensors``, ``begins``, ``ends``, and ``end_masks``. + + Attributes + ---------- + T: fp16, fp32, i32, bool + R: fp16, fp32, i32, bool + """ + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + ref_tensors=TupleInputType(), + begins=TupleInputType(), + ends=TupleInputType(), + end_masks=TupleInputType(), + ) + + type_domains = { + "T": (types.fp16, types.fp32, types.int32, types.bool), + } + + def _check_is_const_tuple_with_scalar(self, param, expected_type, param_name): + """ + This utility function checks the param is a Tuple of scalar with expected data type. + """ + for x in param: + if x.dtype != expected_type or x.shape != (): + msg = "In op reshape_like {}, {} must be a Tuple of scalar {}. Got a {} tensor with shape {}.".format( + self.name, + param_name, + expected_type.__type_info__(), + x.dtype.__type_info__(), + x.shape, + ) + raise ValueError(msg) + + def type_inference(self): + # Validation the inputs + ref_number = len(self.ref_tensors) + if len(self.begins) != ref_number or len(self.ends) != ref_number or len(self.end_masks) != ref_number: + msg = ( + "Op reshape_like {}'s ref_tensors, begins, ends and end_masks must have exactly the same length. " + "Got {}, {}, {} and {}." + ).format(self.name, ref_number, len(self.begins), len(self.ends), len(self.end_masks)) + + self._check_is_const_tuple_with_scalar(self.begins, types.int32, "begins") + self._check_is_const_tuple_with_scalar(self.ends, types.int32, "ends") + self._check_is_const_tuple_with_scalar(self.end_masks, types.bool, "end_masks") + + # Compute the output shape + out_shape = () + for ref_tensor, begin, end, end_mask in zip(self.ref_tensors, self.begins, self.ends, self.end_masks): + shape = ref_tensor.shape + begin, end, end_mask = begin.val, end.val, end_mask.val + ref_shape = shape[begin:end] if not end_mask else shape[begin:] + out_shape += tuple(ref_shape) + + # Output shape must be known at compile time + if any_symbolic(out_shape): + msg = "Output shape of a reshape_like op {} must not be symbolic. Got {}".format(self.name, out_shape) + raise ValueError(msg) + + # Output shape must be consistent with the input shape + if not any_symbolic(self.x.shape): + if np.prod(self.x.shape) != np.prod(out_shape): + msg = "At reshape_like op {}, input shape {} not consistent with the output shape {}.".format( + self.name, + self.x.shape, + out_shape + ) + raise ValueError(msg) + + return types.tensor(self.x.dtype, out_shape) + +@register_op(opset_version=_IOS16_TARGET) +class pixel_unshuffle(Operation): + """ + Rearrange elements in a tensor from spatial dimensions into depth (channel). + It is basically the inverse operation of `pixel_shuffle <#coremltools.converters.mil.mil.ops.defs.iOS15.tensor_transformation.pixel_shuffle>`_. + Equivalent to PyTorch's ``PixelUnshuffle``. + + Parameters + ---------- + x: tensor<[n, C, H / f , W / f], T> (Required) + * Input tensor of rank ``4``. + + downscale_factor: const + * Factor to decrease spatial resolution by. + + Returns + ------- + tensor<[n, C * f^2, H, W], T> + * Where ``f`` is the downscale factor. + + Attributes + ---------- + T: fp16, fp32 + + References + ---------- + `torch.nn.PixelUnshuffle `_ + """ + + input_spec = InputSpec( + x=TensorInputType(type_domain="T"), + downscale_factor=TensorInputType(const=True, type_domain=types.uint32), + ) + + type_domains = { + "T": (types.fp16, types.fp32), + } + + def type_inference(self): + x_type = self.x.dtype + n, c, h, w = self.x.shape + f = self.downscale_factor.val + ret_shape = (n, c * f * f, h / f, w / f) + return types.tensor(x_type, ret_shape) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/helper.py new file mode 100644 index 00000000..fc699b63 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/helper.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +def _get_version_of_op(op_variants, opset_version): + """ + A utility function that retrieves an op cls given a dictionary of op variants and target version + """ + assert isinstance(op_variants, dict) + opset_versions = list(op_variants.keys()) + opset_versions.sort() + if opset_version is None: + op_cls = op_variants[opset_versions[0]] + elif opset_version > opset_versions[-1]: + # TODO(rdar://103267345): Remove when no longer required. + # MIL opsets inherit ops from previous ones by default. + op_cls = op_variants[opset_versions[-1]] + else: + if opset_version not in op_variants: + op_type = list(op_variants.values())[0].__name__ + msg = ( + "No available version for {} in the {!s} opset. Please update the " + "minimum_deployment_target to at least {!s}" + ).format(op_type, opset_version, opset_versions[0]) + raise ValueError(msg) + op_cls = op_variants[opset_version] + return op_cls diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/registry.py new file mode 100644 index 00000000..d24f5f4e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/registry.py @@ -0,0 +1,190 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections import defaultdict + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as target +from coremltools.converters.mil.mil.block import curr_opset_version + +from ..builder import Builder +from .helper import _get_version_of_op + + +class SSAOpRegistry: + + """ + There are three kinds of operations that we could register: + + (1) core_ops: dict[str, dict[Operation]] + - These are the core ops in PyMIL, which have a direct mapping to the backend in neural_network or mlprogram + - The registered op is considered a core op if the namespace is not provided + - coreml_ops[op_type] is a dict that tracks different opset versions for an op. For instance + - ``core_ops[op_1] = { + ct.target.iOS13: op_1_iOS13, + ct.target.iOS14: op_1_iOS13, + ct.target.iOS15: op_1_iOS13, + ct.target.iOS16: op_1_iOS13, + }`` + . Only one version of op type ``op_1`` is registered, and it is defined in iOS13, which both + neural_network and mlprogram backend support + - ``core_ops[op_2] = { + ct.target.iOS13: op_2_iOS13, + ct.target.iOS14: op_2_iOS13, + ct.target.iOS15: op_2_iOS13, + ct.target.iOS16: op_2_iOS16, + }`` + . Two versions of op type ``op_2`` are registered, one each for iOS13, iOS16. + . The builder picks up correct version of the op according to curr_opset_version(), which returns the opset version of + the current function. + -- If ``curr_opset_version()`` is ``None`` (the version of the function is not set), ``mb.op_2`` would call the oldest version of the op by default, which is ``op_2_ios13`` + -- Otherwise, the builder would pick up core_ops[op_2][curr_opset_version()] + - In the highest level, users can choose the desired version by specifying the ``minum_deployment_target`` argument in ``coremltools.convert`` + - The default ``opset_version`` for the core ops would be set to iOS13, for which neural_network backend supports + + (2) dialect_ops: dict[str, Operation] + - These are the ops that are created for specific frontend framework, for instance: ``tf_lstm_block, torch_upsample_nearest_neighbor`` + - A graph pass must be customized by the developer to translate a dialect_ops into core ops + + (3) custom_ops: dict[str, Operation] + - These are the custom ops, in which an additional ``bindings`` which should be specificed in operator + """ + SUPPORTED_OPSET_VERSIONS = ( + target.iOS13, + target.iOS14, + target.iOS15, + target.iOS16 + ) + core_ops = defaultdict(dict) + dialect_ops = {} + custom_ops = {} + + @staticmethod + def _get_core_op_cls(op_type=None): + """ + A utility function that retrieves an op cls using the curr_opset_version + """ + if op_type not in SSAOpRegistry.core_ops: + raise ValueError("op {} not registered.".format(op_type)) + candidate_ops = SSAOpRegistry.core_ops[op_type] + return _get_version_of_op(candidate_ops, curr_opset_version()) + + @staticmethod + def register_op(_cls=None, is_custom_op=False, namespace=None, opset_version=target.iOS13, allow_override=False): + """ + Registration routine for MIL Program operators + + Parameters + ---------- + is_custom_op: boolean + - If ``True``, maps current operator to ``custom_op``. ``custom_op`` requires additional ``bindings`` which should be specified in operator + - Default ``False`` + + namespace: str + - If provided, the op is registered as a dialect op + - Otherwise is considered as a core op + + opset_version: int + - Specify the minimum spec version that supports this op + - Default to ``ct.target.iOS13``, which is for the neural_network backend + + allow_override: boolean + - If True, it is allowed for an operation to override the previous operation with the same registered name + - Default ``False`` + """ + def class_wrapper(op_cls): + op_type = op_cls.__name__ + op_cls.__name__ = op_type + + # debug message + op_msg = "op" + is_dialect_op = (namespace is not None) + if is_custom_op: + op_msg = "Custom op" + elif is_dialect_op: + op_msg = "Dialect op" + logger.debug("Registering {} {}".format(op_msg, op_type)) + + # pick the right dict for registration + if is_custom_op: + op_reg = SSAOpRegistry.custom_ops + elif is_dialect_op: + op_reg = SSAOpRegistry.dialect_ops + # Check that op_type is prefixed with namespace + if op_type[: len(namespace)] != namespace: + msg = ( + "Dialect pp type {} registered under {} namespace must " + + "prefix with {}" + ) + raise ValueError(msg.format(op_type, namespace, namespace)) + else: + op_reg = SSAOpRegistry.core_ops + + # verify that the op have not been registered before if allow_override = False + msg = "SSA {} {} already registered.".format(op_msg, op_type) + if is_custom_op or is_dialect_op: + if op_type in op_reg and not allow_override: + raise ValueError(msg) + else: + if opset_version in op_reg[op_type] and not allow_override: + if opset_version - 1 not in op_reg[op_type] or (op_reg[op_type][opset_version - 1] != op_reg[op_type][opset_version]): + raise ValueError(msg) + + # add the op to op_reg + if is_custom_op or is_dialect_op: + op_reg[op_type] = op_cls + else: + # The older version of the op must be registered first, or it will override the + # newer version. For example, assuming an op has two versions: IOS13 and IOS15. If + # the IOS15 is registered first, the op_reg[op_type] will have that op class for + # IOS15/16/..., and when IOS13 is registered, it will override all op classes for + # IOS13/14/15/16/... where IOS15 op class will get lost. So we error out early + # instead of keep registering when this happens. + if opset_version in op_reg[op_type]: + old_op_cls = op_reg[op_type][opset_version] + for i in range(opset_version, SSAOpRegistry.SUPPORTED_OPSET_VERSIONS[-1] + 1): + if op_reg[op_type][i] != old_op_cls: + raise ValueError( + f"Older version of op {op_type} must be registered " + f"before a newer version." + ) + idx = SSAOpRegistry.SUPPORTED_OPSET_VERSIONS.index(opset_version) + for i in range(idx, len(SSAOpRegistry.SUPPORTED_OPSET_VERSIONS)): + op_reg[op_type][SSAOpRegistry.SUPPORTED_OPSET_VERSIONS[i]] = op_cls + + # add the version information to the op cls + op_cls._op_variants = op_reg[op_type] + + @classmethod + def add_op(cls, **kwargs): + """ + An utility function that help the builder to pickup the correct op class when calling ``mb.op`` + + There are two cases: + + (1) custom op / dialect op: + If the op is a custom op or a dialect op, we could directly pick up the op class through + ``SSAOpRegistry.custom_ops[op_type]`` or ``SSAOpRegistry.dialect_ops[op_type]`` + + (2) core op: + For the core op, the builder would pick up the correct version according to ``curr_opset_version()`` + """ + op_cls_to_add = None + is_core_op = (op_reg == SSAOpRegistry.core_ops) + if is_core_op: + op_cls_to_add = SSAOpRegistry._get_core_op_cls(op_type) + else: + op_cls_to_add = op_reg[op_type] + + return cls._add_op(op_cls_to_add, **kwargs) + + setattr(Builder, op_type, add_op) + return op_cls + + if _cls is None: + return class_wrapper + + return class_wrapper(_cls) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_activation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_activation.py new file mode 100644 index 00000000..2daf5ad8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_activation.py @@ -0,0 +1,1080 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import scipy + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestClampedReLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.clamped_relu(x=x, alpha=2.0, beta=1.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[-2, 1, -6], [1, -10, 1]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.clamped_relu(x=x_val, alpha=2.0, beta=1.0) + + x = np.minimum(np.maximum(x_val, 0), 1.0) + y = np.minimum(np.minimum(x_val, 0) * 2.0, 1.0) + np.testing.assert_allclose(x + y, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha, beta", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0], + [4.0, 5.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha, beta): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.clamped_relu(x=x, alpha=alpha, beta=beta)] + + x = np.minimum(np.maximum(x_val, 0), 1.0) + y = np.minimum(np.minimum(x_val, 0) * 2.0, 1.0) + + expected_outputs = [x + y] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestELU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.elu(x=x, alpha=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[-1.2642411, 2.0, -1.9004259], [4.0, -1.9865241, 6.0]], dtype=np.float32 + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.elu(x=x_val, alpha=2.0) + + b = np.copy(x_val) + b[b < 0] = 2.0 * (np.exp(b[b < 0]) - 1) + + np.testing.assert_allclose(b, v.val, atol=1e-04, rtol=1e-05) + + +class TestGeLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.gelu(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [ + [-1.58691406e-01, 1.95410156e00, -4.04968858e-03], + [3.99987316e00, -1.49011612e-06, 6.00000000e00], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-3, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + + mode = "TANH_APPROXIMATION" + v = mb.gelu(x=x_val, mode=mode) + a = np.sqrt(2 / np.pi) * (x_val + 0.044715 * np.power(x_val, 3)) + out = 0.5 * x_val * (1 + np.tanh(a)) + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + mode = "SIGMOID_APPROXIMATION" + v = mb.gelu(x=x_val, mode=mode) + out = x_val * (1 / (1 + np.exp(-(1.702 * x_val)))) + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + v = mb.gelu(x=x_val) + out = 0.5 * x_val * (1 + scipy.special.erf(x_val / np.sqrt(2))) + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, mode", + itertools.product( + compute_units, + backends, + [2, 6], + ["EXACT", "TANH_APPROXIMATION", "SIGMOID_APPROXIMATION"], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, mode): + shape = np.array([dim, dim]) + x_val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.gelu(x=x, mode=mode)] + + if mode == "TANH_APPROXIMATION": + a = np.sqrt(2 / np.pi) * (x_val + 0.044715 * np.power(x_val, 3)) + out = 0.5 * x_val * (1 + np.tanh(a)) + elif mode == "SIGMOID_APPROXIMATION": + out = x_val * (1 / (1 + np.exp(-(1.702 * x_val)))) + else: + out = 0.5 * x_val * (1 + scipy.special.erf(x_val / np.sqrt(2))) + + expected_outputs = [out] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-3, + ) + + +class TestLeakyReLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.leaky_relu(x=x, alpha=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[-2, 2, -6], [4, -10, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.leaky_relu(x=x_val, alpha=2.0) + + b = np.copy(x_val) + b[b < 0] *= 2.0 + np.testing.assert_allclose(b, v.val, atol=1e-04, rtol=1e-05) + + +class TestLinearActivation: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.linear_activation(x=x, alpha=2.0, beta=3.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[1, 7, -3], [11, -7, 15]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.linear_activation(x=x_val, alpha=2.0, beta=3.0) + np.testing.assert_allclose(x_val * 2.0 + 3.0, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim", + itertools.product(compute_units, backends, [2, 4, 8]), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim): + shape = np.array([dim, dim]) + x_val = np.random.rand(*shape) + alpha = np.random.uniform() + beta = np.random.uniform() + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return [mb.linear_activation(x=x, alpha=alpha, beta=beta)] + + expected_outputs = [x_val * alpha + beta] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPReLU: + @pytest.mark.parametrize( + "rank, alpha_values, compute_unit, backend", + itertools.product( + [3, 4, 5], + [[1.0, 2.0, 3.0], [4.0, 4.0, 4.0]], + compute_units, + backends, + ) + ) + def test_builder_to_backend_smoke(self, rank, alpha_values, compute_unit, backend): + if (backend[0] == "mlprogram" and backend[1] == "fp16"): + pytest.xfail( + "rdar://92175249 ([MIL] TestActivation::test_prelu[backend=(mlprogram, fp16)] CI failure)" + ) + + alpha = np.array(alpha_values, dtype=np.float32) + + if rank == 3 or rank == 5: + are_alpha_values_same = np.where(np.abs(alpha - alpha[0]) > 1e-5)[0].size == 0 + if not are_alpha_values_same: + pytest.xfail("rdar://91442339") + + t = np.array([[[[-1, 3]], [[-1, 2]], [[4, -5]]]], dtype=np.float32) + expected_outputs = np.array( + [[[[-1 * alpha[0], 3]], [[-1 * alpha[1], 2]], [[4, -5 * alpha[2]]]]], dtype=np.float32 + ) + + shape = None + if rank == 3: + shape = (1, 3, 2) + elif rank == 4: + shape = (1, 3, 1, 2) + elif rank == 5: + shape = (1, 3, 1, 1, 2) + else: + raise ValueError("rank not supported") + + t = np.reshape(t, shape) + expected_outputs = np.reshape(expected_outputs, shape) + expected_output_types = tuple([s for s in shape]) + (types.fp32,) + + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.prelu(x=x, alpha=alpha) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + alpha = np.array([1, 2, 3], dtype=np.float32) + v = mb.prelu(x=x_val, alpha=alpha) + + alpha_br = alpha + + for i in range(1, len(x_val.shape)): + alpha_br = np.expand_dims(alpha_br, i) + + x_pos = np.maximum(x_val, 0) + b = np.minimum(x_val, 0) + + np.testing.assert_allclose(x_pos + b * alpha_br, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval1(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r".* dimension 1 .*"): + mb.prelu(x=x_val, alpha=np.array([1, 2], dtype=np.float32)) + + @ssa_fn + def test_builder_eval2(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r"alpha .* rank 1"): + mb.prelu(x=x_val, alpha=np.array([[1, 2, 3]], dtype=np.float32)) + + @ssa_fn + def test_builder_eval3(self): + with pytest.raises(ValueError, match=r"x .* rank 3"): + mb.prelu( + x=np.array([1], dtype=np.float32), + alpha=np.array([[1, 2, 3]], dtype=np.float32), + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, chan", + itertools.product( + compute_units, + backends, + [1, 2, 4, 8], + [2, 3, 4] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, chan): + shape = np.array([1, chan, dim, dim]) + x_val = np.random.rand(*shape) + alpha_val = np.random.rand(chan).astype(np.float32) + + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.prelu(x=x, alpha=alpha_val)] + + alpha_br = np.copy(alpha_val) + for i in range(1, len(x_val.shape) - 1): + alpha_br = np.expand_dims(alpha_br, i) + x_pos = np.maximum(x_val, 0) + b = np.minimum(x_val, 0) + + expected_outputs = [x_pos + b * alpha_br] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReLU: + @pytest.mark.parametrize( + "compute_unit, backend, data_type", + itertools.product(compute_units, backends, [np.float32, np.float16]), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, data_type): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=data_type) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.relu(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[0, 2, 0], [4, 0, 6]], dtype=data_type) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.relu(x=x_val) + np.testing.assert_allclose(np.maximum(x_val, 0), v.val, atol=1e-04, rtol=1e-05) + + +class TestReLU6: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 7, -3], [4, -5, 8]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.relu6(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[0, 6, 0], [4, 0, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 7, -3], [4, -5, 8]], dtype=np.float32) + v = mb.relu6(x=x_val) + np.testing.assert_allclose( + np.minimum(np.maximum(x_val, 0), 6), v.val, atol=1e-04, rtol=1e-05 + ) + + +class TestScaledTanh: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.scaled_tanh(x=x, alpha=2.0, beta=1.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[-1.5231884, 1.9280552, -1.9901096], [1.9986587, -1.9998184, 1.9999754]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.scaled_tanh(x=x_val, alpha=2.0, beta=1.0) + np.testing.assert_allclose(2.0 * np.tanh(x_val * 1.0), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha, beta", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0], + [4.0, 5.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha, beta): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.scaled_tanh(x=x, alpha=alpha, beta=beta)] + + expected_outputs = [alpha * np.tanh(x_val * beta)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSigmoid: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.sigmoid(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [ + [0.2689414213699951, 0.8807970779778823, 0.04742587], + [0.98201376, 0.00669285, 0.9975274], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.sigmoid(x=x_val) + np.testing.assert_allclose(1 / (1 + np.exp(-x_val)), v.val, atol=1e-04, rtol=1e-05) + + +class TestSigmoidHard: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.sigmoid_hard(x=x, alpha=1.0, beta=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float32 + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + alpha = 1.0 + beta = 2.0 + v = mb.sigmoid_hard(x=x_val, alpha=alpha, beta=beta) + np.testing.assert_allclose( + np.minimum(np.maximum((alpha * x_val) + beta, 0), 1), + v.val, + atol=1e-04, + rtol=1e-05, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha, beta", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0], + [4.0, 5.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha, beta): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.sigmoid_hard(x=x, alpha=alpha, beta=beta)] + + expected_outputs = [np.minimum(np.maximum((alpha * x_val) + beta, 0), 1)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSiLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([-1.1, 2.2, -3.3, 4.4], dtype=np.float32).reshape((1, 2, 1, 2)) + + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + } + input_value_dict = {"x": x_val} + expected_output_type = x_val.shape + (types.fp32,) + + def build(x): + return mb.silu(x=x) + + expected_output = np.array( + [-0.2747, 1.9805, -0.1174, 4.3466], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSoftplus: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softplus(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[0.31326166, 2.126928, 0.04858733], [4.01815, 0.00671535, 6.0024757]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.softplus(x=x_val) + np.testing.assert_allclose( + np.log(1 + np.exp(-np.abs(x_val))) + np.maximum(x_val, 0), v.val, atol=1e-04, rtol=1e-05 + ) + + +# No torch test because there is no direct torch translation to this layer +class TestSoftplusParametric: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softplus_parametric( + x=x, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + expected_output_types = (1, 3, 1, 3, types.fp32) + expected_outputs = np.array( + [[ + [[1.8142700e-02, 1.2000000e01, 2.4000000e01]], + [[1.3427734e-02, 2.0000000e01, 7.1525574e-07]], + [[7.2000000e01, 0.0000000e00, 1.0800000e02]], + ]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + v = mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + alpha_br = np.array([1, 2, 3], dtype=np.float32) + beta_br = np.array([4, 5, 6], dtype=np.float32) + for i in range(1, len(x_val.shape)): + alpha_br = np.expand_dims(alpha_br, i) + beta_br = np.expand_dims(beta_br, i) + out = alpha_br * np.log(np.exp(x_val * beta_br) + 1) + + np.testing.assert_allclose(out, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval2(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r".* dimension 1 .*"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval3(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r"alpha .* rank 1"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([[1, 2, 3]], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval4(self): + with pytest.raises(ValueError, match=r"x .* rank 3"): + mb.softplus_parametric( + x=np.array([1], dtype=np.float32), + alpha=np.array([[1, 2, 3]], dtype=np.float32), + beta=np.array([4, 5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval5(self): + x_val = np.array([[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]], dtype=np.float32) + with pytest.raises(ValueError, match=r".* dimension 1 .*"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([5, 6], dtype=np.float32), + ) + + @ssa_fn + def test_builder_eval6(self): + x_val = np.array([[[[-1, 3, 6]], [[-1, 2, -3]], [[4, -5, 6]]]], dtype=np.float32) + with pytest.raises(ValueError, match=r"beta .* rank 1"): + mb.softplus_parametric( + x=x_val, + alpha=np.array([1, 2, 3], dtype=np.float32), + beta=np.array([[4, 5, 6]], dtype=np.float32), + ) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, chan", + itertools.product( + compute_units, + backends, + [1, 2, 4, 8], + [1, 2, 3] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, chan): + shape = np.array([1, chan, dim, dim]) + x_val = np.random.rand(*shape) + alpha_val = np.random.rand(chan).astype(np.float32) + beta_val = np.random.rand(chan).astype(np.float32) + + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.softplus_parametric(x=x, alpha=alpha_val, beta=beta_val)] + + alpha_br = np.copy(alpha_val) + beta_br = np.copy(beta_val) + for i in range(1, len(x_val.shape) - 1): + alpha_br = np.expand_dims(alpha_br, i) + beta_br = np.expand_dims(beta_br, i) + expected_outputs = [alpha_br * np.log(np.exp(x_val * beta_br) + 1)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSoftmax: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_buidler_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softmax(x=x, axis=0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [ + [6.69285092e-03, 9.99088949e-01, 1.23394576e-04], + [9.93307149e-01, 9.11051194e-04, 9.99876605e-01], + ], + dtype=np.float32, + ) + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.softmax(x=x_val, axis=0) + np.testing.assert_allclose( + scipy.special.softmax(x_val, axis=0), v.val, atol=1e-04, rtol=1e-05 + ) + + @pytest.mark.parametrize( + "input_size", [(1), (2), (1, 2), (2, 2), (2, 3, 4), (2, 3, 4, 10)] + ) + def test_value_inference(self, input_size): + rs = np.random.RandomState(1234) + x = rs.random(input_size) + + for axis in range(-x.ndim, x.ndim - 1): + @mb.program(input_specs=[]) + def prog(): + return mb.softmax(x=x, axis=axis) + + op = list(prog.functions.values())[0].operations[2] + assert op.op_type == "softmax" + np.testing.assert_allclose( + op.value_inference(), + scipy.special.softmax(x, axis=axis), + atol=1e-04, + rtol=1e-05, + ) + + +class TestSoftsign: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.softsign(x=x) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array( + [[-0.5, 0.66666667, -0.75], [0.8, -0.83333333, 0.85714286]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.softsign(x=x_val) + np.testing.assert_allclose(x_val / (1 + np.abs(x_val)), v.val, atol=1e-04, rtol=1e-05) + + +class TestThresholdedReLU: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.thresholded_relu(x=x, alpha=2.0) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[0, 2, 0], [4, 0, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[0, 2, 0], [4, 0, 6]], dtype=np.float32) + v = mb.thresholded_relu(x=x_val, alpha=2.0) + y = x_val + y[y < 2.0] = 0 + np.testing.assert_allclose(y, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, dim, alpha", + itertools.product( + compute_units, + backends, + [2, 4, 8], + [2.0, 3.0] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, dim, alpha): + shape_x = np.array([dim, dim]) + x_val = np.random.rand(*shape_x) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.thresholded_relu(x=x, alpha=alpha)] + + y = x_val + y[y < alpha] = 0 + expected_outputs = [y] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_const.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_const.py new file mode 100644 index 00000000..b484e357 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_const.py @@ -0,0 +1,62 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types + +from .testing_utils import run_compare_builder + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestConst: + @pytest.mark.parametrize( + "compute_unit, backend, dtype", itertools.product( + compute_units, + backends, + [ + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ] + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, dtype): + if backend[0] == "mlprogram" and dtype in [np.uint8, np.int8, np.uint32]: + pytest.skip("Data type not supported") + + t = np.random.randint(0, 5, (4, 2)).astype(np.float32) + constant = np.random.randint(0, 5, (4, 2)).astype(dtype) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + y = mb.const(val=constant) + y = mb.cast(x=y, dtype='fp32') + return mb.add(x=x, y=y) + + expected_output_types = (4, 2, types.fp32) + expected_outputs = t + constant.astype(np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py new file mode 100644 index 00000000..4e8aa47b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_constexpr_ops.py @@ -0,0 +1,646 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.ops.defs.iOS16 import constexpr_ops +from coremltools.converters.mil.mil.ops.tests.testing_utils import \ + run_compare_builder +from coremltools.converters.mil.testing_utils import (get_op_types_in_program, + ssa_fn) + +backends = [("mlprogram", "fp32"), ("mlprogram", "fp16")] +compute_units = testing_reqs.compute_units + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprAffineDequantize: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(1, 1, 2, 2).astype(np.float32) + decompressed_constant = ( + np.array([1, 2, 3, 4]).reshape(1, 1, 2, 2).astype(np.float32) + ) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + quantized_data = np.array([3, 5, 5, 6]).reshape(1, 1, 2, 2).astype(np.uint8) + scale = np.array([1, 2]).astype(np.float32) + zero_point = np.array([2, 4]).astype(np.uint8) + axis = 3 + y = mb.constexpr_affine_dequantize( + quantized_data=quantized_data, + zero_point=zero_point, + scale=scale, + axis=axis, + ) + return mb.add(x=x, y=y) + + expected_output_types = (1, 1, 2, 2, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_affine_dequantize" in get_op_types_in_program(prog) + + @ssa_fn + def test_builder_eval(self): + # scalar zero-point & scalar scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.uint8), + zero_point=np.uint8(1), + scale=np.float32(2), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [0, 2, 4]]), v.val) + + # vector zero-point & scalar scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int8), + zero_point=np.array([1, 2]).astype(np.int8), + scale=np.float32(2), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [-2, 0, 2]]), v.val) + + # scalar zero-point & vector scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.uint8), + zero_point=np.uint8(1), + scale=np.array([2, 4]).astype(np.float32), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [0, 4, 8]]), v.val) + + # vector zero-point & vector scale + v = mb.constexpr_affine_dequantize( + quantized_data=np.array([[1, 2, 3], [1, 2, 3]]).astype(np.int8), + zero_point=np.array([1, 2]).astype(np.int8), + scale=np.array([2, 4]).astype(np.float32), + axis=0, + ) + np.testing.assert_allclose(np.float32([[0, 2, 4], [-4, 0, 4]]), v.val) + + @staticmethod + def affine_dequant_config_generator(): + np.random.seed(1984) + + for quant_dtype in [np.int8, np.uint8]: + low = 0 if quant_dtype == np.uint8 else -128 + high = 255 if quant_dtype == np.uint8 else 127 + for rank in range(1, 6): + shape = np.random.randint(low=2, high=5, size=rank) + quantized_data = np.random.randint( + low=low, high=high, size=shape, dtype=quant_dtype + ) + axis = np.random.choice(range(-rank, rank)) + scalar_zp = np.random.choice([True, False]) + scalar_sc = np.random.choice([True, False]) + zero_point = ( + np.random.randint( + low=low, + high=high, + size=quantized_data.shape[axis], + dtype=quant_dtype, + ) + if not scalar_zp + else np.random.choice(range(low, high)).astype(quant_dtype) + ) + scale = ( + np.random.rand(quantized_data.shape[axis]).astype(np.float32) + if not scalar_sc + else np.float32(np.random.rand()) + ) # fp16 is already covered under backends parameterization + + params = { + "quantized_data": quantized_data, + "zp": zero_point, + "sc": scale, + "axis": axis, + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + affine_dequant_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + quantized_data, zero_point, scale, axis = ( + config["quantized_data"], + config["zp"], + config["sc"], + config["axis"], + ) + + def build(x): + y = mb.constexpr_affine_dequantize( + quantized_data=quantized_data, + zero_point=zero_point, + scale=scale, + axis=axis, + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *quantized_data.shape, + types.numpy_type_to_builtin_type(scale.dtype), + ) + + t = np.random.rand(*quantized_data.shape).astype(scale.dtype) + decompressed_constant = constexpr_ops.constexpr_affine_dequantize.decompress( + quantized_data, zero_point, scale, axis + ) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=quantized_data.shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_affine_dequantize" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprCast: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(4, 1).astype(np.float32) + decompressed_constant = np.array([1, 2, 3, 4]).reshape(4, 1).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + source_val = np.array([1, 2, 3, 4]).reshape(4, 1).astype(np.float16) + y = mb.constexpr_cast(source_val=source_val, output_dtype="fp32") + return mb.add(x=x, y=y) + + expected_output_types = (4, 1, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_cast" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") + + @ssa_fn + def test_builder_eval(self): + v = mb.constexpr_cast(source_val=np.float16([1, 2]), output_dtype="fp32") + np.testing.assert_allclose(np.float32([1, 2]), v.val) + + @staticmethod + def cast_config_generator(): + np.random.seed(1984) + + for rank in range(1, 6): + shape = np.random.randint(low=2, high=5, size=rank) + source_val = np.random.rand(*shape).astype(np.float16) + params = { + "source_val": source_val, + "output_dtype": "fp32", + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + cast_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + source_val, output_dtype = ( + config["source_val"], + config["output_dtype"], + ) + + def build(x): + y = mb.constexpr_cast( + source_val=source_val, + output_dtype=output_dtype, + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *source_val.shape, + types.string_to_builtin(output_dtype), + ) + + output_np_type = types.nptype_from_builtin( + types.string_to_builtin(output_dtype) + ) + t = np.random.rand(*source_val.shape).astype(output_np_type) + decompressed_constant = source_val.astype(output_np_type) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=source_val.shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_cast" in get_op_types_in_program(prog) + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprLutToDense: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(4, 1).astype(np.float32) + decompressed_constant = np.array([1, 2, 3, 4]).reshape(4, 1).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + lut_data = np.array( + [ + -19.0, + 4.0, + 0.0, + -1.0, + 1.0, + 3.0, + 5.0, + -8.0, + 19, + 13, + 42, + 4.5, + 5.4, + 2.0, + -6, + -7, + ] + ).astype(np.float32) + indices = np.array([212, 21]).astype(np.uint8) + shape = np.array([4, 1]).astype(np.uint32) + y = mb.constexpr_lut_to_dense(lut=lut_data, indices=indices, shape=shape) + return mb.add(x=x, y=y) + + expected_output_types = (4, 1, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_lut_to_dense" in get_op_types_in_program(prog) + + @ssa_fn + def test_builder_eval(self): + v = mb.constexpr_lut_to_dense( + lut=np.array([1.0, 2.0, 3.0, 4.0]), + indices=np.array([10, 4]).astype(np.uint8), + shape=np.array( + [ + 5, + ] + ).astype(np.uint32), + ) + np.testing.assert_allclose( + np.float32([3, 3, 1, 1, 1]).astype(np.float32), v.val + ) + + @staticmethod + def lut_config_generator(): + np.random.seed(1999) + for lut_dtype in [np.float32]: # [np.uint8, np.int8]: + # float16 already covered under backends parameterization + # Not possible to write 8-bit tests since no other op consumes uint8/int8 tensors + for nbits in [1, 2, 4, 6, 8]: + lut_size = 2**nbits + if lut_dtype == np.uint8: + lut = np.random.randint(low=255, size=lut_size, dtype=np.uint8) + elif lut_dtype == np.int8: + lut = np.random.randint( + low=-128, high=127, size=lut_size, dtype=np.int8 + ) + else: + lut = np.random.rand(lut_size).astype(lut_dtype) + for output_rank in range(1, 6): + output_shape = np.random.randint(low=2, high=5, size=output_rank) + + indices = np.random.randint( + low=0, high=2**nbits, size=output_shape, dtype=np.uint8 + ) + indices_bitarray = np.unpackbits( + indices, bitorder="little" + ).reshape(-1, 8) + packed_indices = np.packbits( + indices_bitarray[:, :nbits], bitorder="little" + ) + + assert packed_indices.size == np.ceil( + nbits * np.prod(output_shape) / 8 + ).astype(np.int32) + params = { + "indices": packed_indices, + "shape": output_shape, + "lut": lut, + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + lut_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + indices, lut, shape = ( + config["indices"], + config["lut"], + config["shape"], + ) + + def build(x): + y = mb.constexpr_lut_to_dense( + indices=indices, + lut=lut, + shape=shape.astype(np.uint32), + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *shape, + types.numpy_type_to_builtin_type(lut.dtype), + ) + + t = np.random.rand(*shape).astype(lut.dtype) + decompressed_constant = constexpr_ops.constexpr_lut_to_dense.decompress( + lut, indices, shape + ) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_lut_to_dense" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") + + +@pytest.mark.skipif( + ct.utils._macos_version() < (13, 0), + reason="ConstExpr ops available from macOS13 onwards.", +) +class TestConstexprSparseToDense: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + t = np.array(range(4)).reshape(4, 1).astype(np.float32) + decompressed_constant = np.array([1, 2, 0, 4]).reshape(4, 1).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + nonzero_data = np.array([1, 2, 4]).astype(np.float32) + mask = np.array([11]).astype(np.uint8) + shape = np.array([4, 1]).astype(np.uint32) + y = mb.constexpr_sparse_to_dense( + nonzero_data=nonzero_data, mask=mask, shape=shape + ) + return mb.add(x=x, y=y) + + expected_output_types = (4, 1, types.fp32) + expected_outputs = t + decompressed_constant.astype(np.float32) + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + assert "constexpr_sparse_to_dense" in get_op_types_in_program(prog) + + @ssa_fn + def test_builder_eval(self): + v = mb.constexpr_sparse_to_dense( + nonzero_data=np.array([1.0, 2.0, 4.0]), + mask=np.array([11]).astype(np.uint8), + shape=np.array( + [ + 4, + ] + ).astype(np.uint32), + ) + np.testing.assert_allclose(np.float32([1.0, 2.0, 0.0, 4.0]), v.val) + + @staticmethod + def sparse_config_generator(): + np.random.seed(1999) + + for nonzero_data_dtype in [np.float32]: # [np.uint8, np.int8]: + # float16 already covered under backends parameterization + # Not possible to write 8-bit tests since no other op consumes uint8/int8 tensors + for output_rank in range(1, 6): + output_shape = np.random.randint(low=2, high=5, size=output_rank) + output_size = np.prod(output_shape) + nBytes = np.ceil(output_size / 8).astype(np.int32) + + mask = np.random.randint(low=255, size=nBytes, dtype=np.uint8) + bitarray = np.unpackbits(mask, bitorder="little") + while any(bitarray[i] != 0 for i in range(output_size, len(bitarray))): + mask = np.random.randint(low=255, size=nBytes, dtype=np.uint8) + bitarray = np.unpackbits(mask, bitorder="little") + + nonzero_size = np.sum( + np.where(np.unpackbits(mask, bitorder="little") != 0, 1, 0) + ) + + if nonzero_data_dtype == np.uint8: + nonzero_data = np.random.randint( + low=255, size=nonzero_size, dtype=np.uint8 + ) + elif nonzero_data_dtype == np.int8: + nonzero_data = np.random.randint( + low=-128, high=127, size=nonzero_size, dtype=np.int8 + ) + else: + nonzero_data = np.random.rand(nonzero_size).astype( + nonzero_data_dtype + ) + + params = { + "nonzero_data": nonzero_data, + "shape": output_shape, + "mask": mask, + } + yield params + + @pytest.mark.parametrize( + "compute_unit, backend, config", + itertools.product( + compute_units, + backends, + sparse_config_generator.__func__() + ), + ) + def test_builder_stress(self, compute_unit, backend, config): + + nonzero_data, mask, shape = ( + config["nonzero_data"], + config["mask"], + config["shape"], + ) + + def build(x): + y = mb.constexpr_sparse_to_dense( + nonzero_data=nonzero_data, + mask=mask, + shape=shape.astype(np.uint32), + ) + return mb.add(x=x, y=y) + + expected_output_types = ( + *shape, + types.numpy_type_to_builtin_type(nonzero_data.dtype), + ) + + t = np.random.rand(*shape).astype(nonzero_data.dtype) + decompressed_constant = constexpr_ops.constexpr_sparse_to_dense.decompress( + nonzero_data, mask, shape + ) + expected_outputs = t + decompressed_constant + + input_placeholders = { + "x": mb.placeholder(shape=shape), + } + input_values = {"x": t} + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + # validate that the constexpr op is not removed by any graph pass + prog = mlmodel._mil_program + if "constexpr_sparse_to_dense" not in get_op_types_in_program(prog): + raise AssertionError("Invalidated: Test Failed") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_control_flow.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_control_flow.py new file mode 100644 index 00000000..f541f1ca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_control_flow.py @@ -0,0 +1,419 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen, ssa_fn + +from .testing_utils import UNK_SYM, run_compare_builder + + +class TestSelect: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + cond_val = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.float32) + a_val = np.array([[3, 1, 1], [1, 4, 1], [5, 6, 1]], dtype=np.float32) + b_val = np.array([[3, 2, 2], [2, 4, 2], [5, 6, 2]], dtype=np.float32) + input_placeholders = { + "cond": mb.placeholder(shape=cond_val.shape), + "a": mb.placeholder(shape=a_val.shape), + "b": mb.placeholder(shape=b_val.shape), + } + input_values = {"cond": cond_val, "a": a_val, "b": b_val} + + def build(cond, a, b): + if not types.is_bool(cond.dtype): + cond = mb.cast(x=cond, dtype="bool") + return [mb.select(cond=cond, a=a, b=b)] + + expected_output_types = [(3, 3, types.fp32)] + expected_outputs = [ + np.array( + [[3.0, 2.0, 2.0], [2.0, 4.0, 2.0], [5.0, 6.0, 2.0]], dtype=np.float32 + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke_broadcast(self, compute_unit, backend): + cond_val = np.array([[1], [0], [2]], dtype=np.float32) + a_val = np.array([[3, 1, 1], [1, 4, 1], [5, 6, 1]], dtype=np.float32) + b_val = np.array([[3, 2, 2], [2, 4, 2], [5, 6, 2]], dtype=np.float32) + input_placeholders = { + "cond": mb.placeholder(shape=cond_val.shape), + "a": mb.placeholder(shape=a_val.shape), + "b": mb.placeholder(shape=b_val.shape), + } + input_values = {"cond": cond_val, "a": a_val, "b": b_val} + + def build(cond, a, b): + if not types.is_bool(cond.dtype): + cond = mb.cast(x=cond, dtype="bool") + return [mb.select(cond=cond, a=a, b=b)] + + expected_output_types = [(3, 3, types.fp32)] + expected_outputs = [ + np.array( + [[3.0, 1.0, 1.0], [2.0, 4.0, 2.0], [5.0, 6.0, 1.0]], dtype=np.float32 + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + cond = np.random.randint(low=0, high=2, size=(6, 1, 7)).astype(bool) + a = random_gen(shape=(6, 1, 7), rand_min=-1962.0, rand_max=0.0) + b = random_gen(shape=(6, 1, 7), rand_min=0.0, rand_max=1964.0) + res = mb.select(cond=cond, a=a, b=b) + np.testing.assert_allclose(np.where(cond, a, b), res.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval_broadcast(self): + cond = np.array([[True], [False], [True]]) + a = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + b = np.array([[7, 8], [9, 10], [11, 12]], dtype=np.float32) + res = mb.select(cond=cond, a=a, b=b) + np.testing.assert_allclose(np.array([[1, 2], [9, 10], [5, 6]], dtype=np.float32), res.val, atol=1e-04, rtol=1e-05) + + +class TestCond: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + input_placeholders = { + "a": mb.placeholder(shape=(1,), dtype=types.bool), + "b": mb.placeholder(shape=(1,)), + } + + def build(a, b): + def true_fn(): + return mb.add(x=b, y=1.), mb.mul(x=b, y=2.) + + def false_fn(): + return mb.add(x=b, y=-1.), mb.mul(x=b, y=-2.) + + pred = mb.squeeze(x=a) + return mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn) + + input_values = { + "a": np.array([0], dtype=np.float32), + "b": np.array([2], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + (1, types.fp32), + ] + + expected_outputs = [ + np.array([1], dtype=np.float32), + np.array([-4], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestWhileLoop: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + def body(a, b): + return mb.add(x=a, y=np.float32(1)), b + + def cond(a, b): + return mb.less(x=a, y=b) + + input_placeholders = { + "a": mb.placeholder(shape=(1,)), + "b": mb.placeholder(shape=(1,)), + } + + def build(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + input_values = { + "a": np.array([1], dtype=np.float32), + "b": np.array([2], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + (1, types.fp32), + ] + + expected_outputs = [ + np.array([2], dtype=np.float32), + np.array([2], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_power(self, compute_unit, backend): + + input_placeholders = { + "a": mb.placeholder(shape=(1,)), + "b": mb.placeholder(shape=(1,)), + } + + def build(a, b): + # Compute a^b + def body(res, bx): + return mb.mul(x=res, y=a), mb.add(x=bx, y=np.float32(1)) + + def cond(res, bx): + return mb.less(x=bx, y=b) + + res, ignored = mb.while_loop(_cond=cond, _body=body, + loop_vars=([1.], [0.])) + return res + + input_values = { + "a": np.array([2], dtype=np.float32), + "b": np.array([4], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + ] + + expected_outputs = [ + np.array([16], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_nested(self, compute_unit, backend): + if backend[0] == 'neuralnetwork': + pytest.xfail("rdar://96862073 (test_control_folw::TestWhileLoop::test_builder_to_backend_nested failing on nnv1)") + + input_placeholders = { + "x": mb.placeholder(shape=(1,)), + "y": mb.placeholder(shape=(1,)), + } + + def build(x, y): + # i, j = x, y + # while i < j: + # while 2*i < i+2: + # i += 1 + # i += 2 + # return i, j + + # Create const outside of while loop for testing purpose + two = mb.const(val=[2.], name='const_two') + one = mb.const(val=[1.], name='const_one') + + def cond2(i): + return mb.less(x=mb.mul(x=two, y=i), y=mb.add(x=i, y=two)) + + def body2(i): + return mb.add(x=i, y=one) + + def cond1(i, j): + return mb.less(x=i, y=j) + + def body1(i, j): + new_i = mb.while_loop(_cond=cond2, _body=body2, + loop_vars=(i,)) + return mb.add(x=new_i, y=two), j + + return mb.while_loop(_cond=cond1, _body=body1, + loop_vars=(x, y)) + + input_values = { + "x": np.array([0], dtype=np.float32), + "y": np.array([10], dtype=np.float32), + } + + expected_output_types = [ + (1, types.fp32), + (1, types.fp32), + ] + + expected_outputs = [ + np.array([10], dtype=np.float32), + np.array([10], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestList: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + elem_shape = (2,) + input_placeholders = { + "a": mb.placeholder(shape=elem_shape), + "b": mb.placeholder(shape=elem_shape), + } + + def build(a, b): + ls = mb.make_list(init_length=2, elem_shape=elem_shape) + # list is initially all 0 + init_t = mb.list_read(ls=ls, index=0) + ls = mb.list_write(ls=ls, index=0, value=a) + # this write is out of bound + ls = mb.list_write(ls=ls, index=4, value=b) + ls = mb.list_scatter( + ls=ls, + indices=[2, 1], + value=np.array([[-1, -2], [-4, -5]], dtype=np.float32), + ) + return ( + init_t, + mb.list_read(ls=ls, index=0), + mb.list_gather(ls=ls, indices=[4, 2, 3]), + ) + + input_values = { + "a": np.array([1, 3], dtype=np.float32), + "b": np.array([2, 4], dtype=np.float32), + } + + expected_output_types = [ + (2, types.fp32), + (2, types.fp32), + (3, 2, types.fp32), + ] + + expected_outputs = [ + np.array([0, 0], dtype=np.float32), + np.array([1, 3], dtype=np.float32), + np.array([[2, 4], [-1, -2], [0, 0]], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_while(self, compute_unit, backend): + # The while_loop appends [1, 2]*i to `ls` for each iteration + # i = 0, ... num_iters-1. + def body(i, num_iters, ls, update): + y = mb.cast(x=i, dtype="fp32") + new_elem = mb.mul(x=update, y=y) + return ( + mb.add(x=i, y=1), + num_iters, + mb.list_write(ls=ls, index=i, value=new_elem), + update, + ) + + def cond(i, num_iters, ls, update): + i = mb.cast(x=i, dtype="fp32") + return mb.less(x=i, y=num_iters) + + elem_shape = (2,) + input_placeholders = { + "num_iters": mb.placeholder(shape=(1,)), + "update": mb.placeholder(shape=elem_shape), + } + + def build(num_iters, update): + i = 0 + ls = mb.make_list(init_length=1, elem_shape=elem_shape) + _, _, final_tensor_list, _ = mb.while_loop( + _cond=cond, _body=body, loop_vars=(i, num_iters, ls, update) + ) + list_len = mb.list_length(ls=final_tensor_list) + indices = mb.range_1d(start=0, end=list_len, step=1) + return mb.list_gather(ls=final_tensor_list, indices=indices) + + input_values = { + "num_iters": np.array([3], dtype=np.float32), + "update": np.array([1, 2], dtype=np.float32), + } + + expected_output_types = [ + # Type inference does not unroll loop + (UNK_SYM, 2, types.fp32), + ] + + expected_outputs = [ + np.array([[0, 0], [1, 2], [2, 4]], dtype=np.float32), + ] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_conv.py new file mode 100644 index 00000000..d1404382 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_conv.py @@ -0,0 +1,940 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.models.utils import _macos_version + +from .testing_utils import run_compare_builder + + +class TestConvTranspose: + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", + "config", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d", "conv3d"], + [{ + "padding": (1, 2, 3), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": False, + "groups": 1, + "test_symbolic": False, + "test_output_shape": True, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": False, + "groups": 2, + "test_symbolic": True, + "test_output_shape": False, + }, + { + "padding": (1, 2, 3), + "DHWKdKhKw": (7, 7, 7, 2, 2, 2), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": True, + "groups": 1, + "test_symbolic": True, + "test_output_shape": False, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (7, 7, 7, 2, 2, 2), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": True, + "groups": 2, + "test_symbolic": False, + "test_output_shape": False, + }, + ], + ), + ) + def test_builder_to_backend_stress( + self, + compute_unit, + backend, + conv_dim, + config, + ): + padding = config["padding"] + DHWKdKhKw = config["DHWKdKhKw"] + stride = config["stride"] + dilation = config["dilation"] + has_bias = config["has_bias"] + groups = config["groups"] + test_symbolic = config["test_symbolic"] + test_output_shape = config["test_output_shape"] + + D, H, W, Kd, Kh, Kw = DHWKdKhKw + N, C_in, C_out = 1, 1 * groups, 2 * groups + + import torch + import torch.nn as nn + + isDeconv1d = conv_dim == "conv1d" + isDeconv2d = conv_dim == "conv2d" + + if isDeconv1d: + strides = [stride[0]] + dilations = [dilation[0]] + kernels = [Kh] + m = nn.ConvTranspose1d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding[0], + ) + input_shape = [N, C_in, H] + paddings = [padding[0], padding[0]] + + elif isDeconv2d: + strides = [stride[0], stride[1]] + dilations = [dilation[0], dilation[1]] + kernels = [Kh, Kw] + m = nn.ConvTranspose2d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=(padding[0], padding[1]), + ) + input_shape = [N, C_in, H, W] + paddings = [padding[0], padding[0], padding[1], padding[1]] + else: + strides = [stride[0], stride[1], stride[2]] + dilations = [dilation[0], dilation[1], dilation[2]] + kernels = [Kd, Kh, Kw] + m = nn.ConvTranspose3d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding, + ) + input_shape = [N, C_in, D, H, W] + paddings = [ + padding[0], + padding[0], + padding[1], + padding[1], + padding[2], + padding[2], + ] + + wts = m.state_dict() + weight = wts["weight"].detach().numpy() + bias = wts["bias"].detach().numpy() if has_bias else None + + input = torch.randn(*input_shape) + output = m(input) + output = output.detach().numpy() + input = input.detach().numpy() + + output_shape = list(output.shape) + if test_symbolic: + # For symbolic input test + # Make Batch Size and input channel as symbolic + symbolic_batch_size = get_new_symbol() + input_shape[0] = symbolic_batch_size + output_shape[0] = symbolic_batch_size + + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + input_values = {"x": input} + + def build(x): + arguments = { + "x": x, + "weight": weight, + "pad": paddings, + "pad_type": "custom", + "strides": strides, + "dilations": dilations, + "groups": groups, + } + if has_bias: + arguments["bias"] = bias + if test_output_shape: + arguments["output_shape"] = output.shape + return mb.conv_transpose(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConv: + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + "compute_unit, backend, padding_mode, conv_dim", + itertools.product( + compute_units, + backends, + ["same_lower", "same", "valid"], + ["conv1d", "conv2d", "conv3d"], + ), + ) + def test_padding_mode_stress(self, compute_unit, backend, padding_mode, conv_dim): + import torch + def rotation_tensor(tensor): + assert tensor.shape[0] == tensor.shape[1] == 1 + tensor = tensor[0][0] + rank = len(tensor.shape) + new_tensor = np.copy(np.flip(tensor, axis=tuple(range(rank)))) + return np.expand_dims(new_tensor, axis=(0, 1)) + + if conv_dim == "conv3d" and padding_mode == "same_lower": + if backend[0] == "neuralnetwork": + pytest.skip("same_lower mode not supported for conv3d in neuralnetwork backend") + + if padding_mode == "same_lower" and backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + if _macos_version() < (13, 0) and minimum_deployment_target == ct.target.iOS16: + pytest.skip("iOS16 target not available on macOS 13") + + batch, in_channels, out_channels = 1, 1, 1 + input_shape = (batch, in_channels, 4, 5, 6) # batch, channel, height, width + kernel_size = (2, 4, 3) + torch_padding_mode = padding_mode if padding_mode != "same_lower" else "same" + + # Get the right shape for each conv_dim + if conv_dim == "conv1d": + input_shape = input_shape[:3] + kernel_size = kernel_size[:1] + elif conv_dim == "conv2d": + input_shape = input_shape[:4] + kernel_size = kernel_size[:2] + + # Get the ground truth answer from torch + if conv_dim == "conv1d": + m = torch.nn.Conv1d( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=torch_padding_mode, + bias=False, + ) + elif conv_dim == "conv2d": + m = torch.nn.Conv2d( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=torch_padding_mode, + bias=False, + ) + elif conv_dim == "conv3d": + m = torch.nn.Conv3d( + in_channels, + out_channels, + kernel_size, + stride=1, + padding=torch_padding_mode, + bias=False, + ) + + # Original weight / inputs for the torch model + weight = torch.clone(m.state_dict()["weight"]) + input = torch.randn(*input_shape, dtype=torch.float32) + + # Coreml weights / inputs values + coreml_weight = weight.detach().numpy() + coreml_input = input.detach().numpy() + + if padding_mode == "same_lower": + # For the same_lower padding mode, we get the ground truth output by doing the following steps + # (1) Rotate the input value + # (2) Rotate the kernel value + # (3) Rotate the torch out + rotated_input = torch.tensor(rotation_tensor(input.detach().numpy()), dtype=torch.float32) + rotated_weight = torch.tensor(rotation_tensor(weight.detach().numpy()), dtype=torch.float32) + m.load_state_dict({'weight': rotated_weight}, strict=False) + output = m(rotated_input).detach().numpy() + output = rotation_tensor(output) + else: + output = m(input).detach().numpy() + + output_shape = list(output.shape) + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + input_values = {"x": coreml_input} + + def build(x): + arguments = { + "x": x, + "weight": coreml_weight, + "pad_type": padding_mode, + } + return mb.conv(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", + "config", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d", "conv3d"], + [{ + "padding": (1, 1, 1), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": False, + "groups": 1, + "symbolic": False, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": False, + "groups": 2, + "symbolic": True, + }, + { + "padding": (1, 1, 1), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": True, + "groups": 1, + "symbolic": True, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": True, + "groups": 2, + "symbolic": False, + }, + ], + ), + ) + def test_builder_to_backend_stress( + self, + compute_unit, + backend, + conv_dim, + config, + ): + padding = config["padding"] + DHWKdKhKw = config["DHWKdKhKw"] + stride = config["stride"] + dilation = config["dilation"] + has_bias = config["has_bias"] + groups = config["groups"] + symbolic = config["symbolic"] + + D, H, W, Kd, Kh, Kw = DHWKdKhKw + N, C_in, C_out = 1, 1 * groups, 2 * groups + + import torch + import torch.nn as nn + + isConv1d = conv_dim == "conv1d" + isConv2d = conv_dim == "conv2d" + + if isConv1d: + strides = [stride[0]] + dilations = [dilation[0]] + kernels = [Kh] + m = nn.Conv1d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding[0], + ) + input_shape = [N, C_in, H] + paddings = [padding[0], padding[0]] + elif isConv2d: + strides = [stride[0], stride[1]] + dilations = [dilation[0], dilation[1]] + kernels = [Kh, Kw] + m = nn.Conv2d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=(padding[0], padding[1]), + ) + input_shape = [N, C_in, H, W] + paddings = [padding[0], padding[0], padding[1], padding[1]] + else: + strides = [stride[0], stride[1], stride[2]] + dilations = [dilation[0], dilation[1], dilation[2]] + kernels = [Kd, Kh, Kw] + m = nn.Conv3d( + C_in, + C_out, + kernels, + stride=strides, + dilation=dilations, + bias=has_bias, + groups=groups, + padding=padding, + ) + input_shape = [N, C_in, D, H, W] + paddings = [ + padding[0], + padding[0], + padding[1], + padding[1], + padding[2], + padding[2], + ] + + wts = m.state_dict() + weight = wts["weight"].detach().numpy() + bias = wts["bias"].detach().numpy() if has_bias else None + + # PyTorch and CoreML weight format is same + # PyTorch weight format: C_out, C_in, H, W + # MIL weight format: C_out, C_in, H, W + + input = torch.randn(*input_shape) + output = m(input) + output = output.detach().numpy() + input = input.detach().numpy() + + output_shape = list(output.shape) + if symbolic: + # For symbolic input test + # Make Batch Size and input channel as symbolic + symbolic_batch_size = get_new_symbol() + input_shape[0] = symbolic_batch_size + output_shape[0] = symbolic_batch_size + + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + input_values = {"x": input} + + def build(x): + arguments = { + "x": x, + "weight": weight, + "pad": paddings, + "pad_type": "custom", + "strides": strides, + "dilations": dilations, + "groups": groups, + } + if has_bias: + arguments["bias"] = bias + return mb.conv(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason="PyTorch not installed.") + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "conv_dim", + "config", + ] + ), + itertools.product( + compute_units, + backends, + ["conv1d", "conv2d"], + [ + { + "padding": (1, 1, 1), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": False, + "groups": 1, + "symbolic": False, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (10, 12, 14, 3, 2, 4), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": False, + "groups": 2, + "symbolic": True, + }, + { + "padding": (1, 1, 1), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 2, 2), + "dilation": (2, 1, 1), + "has_bias": True, + "groups": 1, + "symbolic": True, + }, + { + "padding": (2, 2, 2), + "DHWKdKhKw": (5, 5, 5, 2, 2, 2), + "stride": (2, 1, 1), + "dilation": (1, 1, 1), + "has_bias": True, + "groups": 2, + "symbolic": False, + }, + ], + ), + ) + def test_builder_to_backend_stress_weights_input( + self, + compute_unit, + backend, + conv_dim, + config, + ): + padding = config["padding"] + DHWKdKhKw = config["DHWKdKhKw"] + stride = config["stride"] + has_bias = config["has_bias"] + groups = config["groups"] + symbolic = config["symbolic"] + + if backend[0] == "neuralnetwork" and groups > 1: + pytest.skip("dynamic conv with groups > 1 is not supported on the neuralnetwork backend") + + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398343 (test_builder_to_backend_stress_weights_input is failing on mlprogram + GPU)") + + D, H, W, Kd, Kh, Kw = DHWKdKhKw + N, C_in, C_out = 1, 1 * groups, 2 * groups + + import torch + import torch.nn as nn + + isConv1d = conv_dim == "conv1d" + isConv2d = conv_dim == "conv2d" + + if isConv1d: + strides = [stride[0]] + kernels = [Kh] + m = nn.Conv1d( + C_in, + C_out, + kernels, + stride=strides, + bias=has_bias, + groups=groups, + padding=padding[0], + ) + input_shape = [N, C_in, H] + paddings = [padding[0], padding[0]] + elif isConv2d: + strides = [stride[0], stride[1]] + kernels = [Kh, Kw] + m = nn.Conv2d( + C_in, + C_out, + kernels, + stride=strides, + groups=groups, + padding=(padding[0], padding[1]), + bias=has_bias, + ) + input_shape = [N, C_in, H, W] + paddings = [padding[0], padding[0], padding[1], padding[1]] + + wts = m.state_dict() + weight = wts["weight"].detach().numpy() + bias = wts["bias"].detach().numpy() if has_bias else None + + # PyTorch and CoreML weight format is same + # PyTorch weight format: C_out, C_in, H, W + # MIL weight format: C_out, C_in, H, W + + input = torch.randn(*input_shape) + output = m(input) + output = output.detach().numpy() + input = input.detach().numpy() + + output_shape = list(output.shape) + if symbolic: + # For symbolic input test + # Make Batch Size and input channel as symbolic + symbolic_batch_size = get_new_symbol() + input_shape[0] = symbolic_batch_size + output_shape[0] = symbolic_batch_size + + expected_output_types = tuple(output_shape[:]) + (types.fp32,) + expected_outputs = [output] + + input_placeholders = {"x": mb.placeholder(shape=input_shape), "input_weight":mb.placeholder(shape=weight.shape)} + input_values = {"x": input, "input_weight": weight} + + def build(x, input_weight): + arguments = { + "x": x, + "weight": input_weight, + "pad": paddings, + "pad_type": "custom", + "strides": strides, + "groups": groups, + } + if has_bias: + arguments["bias"] = bias + return mb.conv(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_conv_bias_fusion(self, compute_unit, backend): + """ + Test conv bias fusion when const input. + + + Input graph: + Const + | + V + input -----> convolution -----> add/sub ---> out + + Output graph: + input -----> convolution -----> out + """ + weight = np.array([2.5], dtype=np.float32).reshape([1, 1, 1, 1]) + + def build(x): + x = mb.conv(x=x, weight=weight) + bias = mb.const(val=[10.]) + return mb.add(x=x, y=bias) + + input = np.array([1, 2, 3, 4], dtype=np.float32).reshape((1, 1, 2, 2)) + output = np.array([12.5, 15.0, 17.5, 20.0], dtype=np.float32).reshape((1, 1, 2, 2)) + expected_output_types = output.shape + (types.fp32,) + expected_outputs = [output] + input_placeholders = {"x": mb.placeholder(shape=input.shape)} + input_values = {"x": input} + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestInvalidConvConfig: + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_weight(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=16, high=32, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=1, high=4, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + groups = np.random.randint(low=1, high=C_in + 1) + while C_in % groups != 0: + groups = np.random.randint(low=1, high=C_in + 1) + + weight = np.random.rand(C_out, C_in // groups + + np.random.randint(low=1, high=8), *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, groups=groups) + + with pytest.raises( + ValueError, + match=r"C_in / groups = [0-9]+/[0-9]+ != weight\[1\] \([0-9]+\)" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_bias(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=1, high=10, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=1, high=4, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + weight = np.random.rand(C_out, C_in, *K) * 2.0 - 1.0 + + wrong_bias_size = C_out + np.random.randint(low=1, high=8) + bias = np.random.rand(wrong_bias_size) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, bias=bias) + + with pytest.raises( + ValueError, + match=r"# of bias values [0-9]+ not equal to # output channels [0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_kernel(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=1, high=10, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=16, high=32, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + weight = np.random.rand(C_out, C_in, *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight) + + with pytest.raises( + ValueError, + match=r"spatial dimension [0-9]+ has invalid output size -?[0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_dilation(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=1, high=10, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=2, high=4, size=conv_dim)) + dilations = tuple(np.random.randint(low=16, high=32, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + weight = np.random.rand(C_out, C_in, *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, dilations=dilations) + + with pytest.raises( + ValueError, + match=r"spatial dimension [0-9]+ has invalid output size -?[0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_groups(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=16, high=32, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + K = tuple(np.random.randint(low=1, high=4, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + groups = np.random.randint(low=1, high=C_in) + while C_in % groups == 0: + groups = np.random.randint(low=1, high=C_in) + + weight = np.random.rand(C_out, C_in // groups, *K) * 2.0 - 1.0 + + def build(x): + return mb.conv(x=x, weight=weight, groups=groups) + + with pytest.raises( + ValueError, + match=r"# of input channels [0-9]+ not divisible by groups [0-9]+" + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, conv_dim", + itertools.product( + compute_units, + backends, + (1, 2, 3), + ), + ) + def test_invalid_rank(self, compute_unit, backend, conv_dim): + N, C_in, C_out = tuple(np.random.randint(low=16, high=32, size=3)) + D = tuple(np.random.randint(low=8, high=16, size=conv_dim)) + + input_shape = (N, C_in) + D + x = np.random.rand(*input_shape) + + wrong_K = tuple(np.random.randint(low=1, high=4, size=conv_dim - 1)) + + weight = np.random.rand(C_out, C_in, *wrong_K) * 2.0 - 1.0 + strides = tuple(np.random.randint(low=1, high=4, size=conv_dim + 1)) + dilations = tuple(np.random.randint(low=1, high=4, size=conv_dim + 2)) + pad = tuple(np.random.randint(low=1, high=4, size=2 * conv_dim + 3)) + + def build(x): + return mb.conv(x=x, weight=weight, strides=strides, dilations=dilations, pad_type="custom", pad=pad) + + with pytest.raises( + ValueError, + match=r"input_shape \(length [0-9]+\), " + r"kernel_shape \(length [0-9]+\), " + r"strides \(length [0-9]+\), " + r"dilations \(length [0-9]+\), " + r"and custom_pad \(length [0-9]+\) divided by two " + r"must all be the same length", + ): + run_compare_builder( + build, + {"x": mb.placeholder(shape=input_shape)}, + {"x": x}, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py new file mode 100644 index 00000000..fa2790ce --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_binary.py @@ -0,0 +1,592 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + + +class TestElementwiseBinary: + # All in this test share the same backends + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + [ + "add", + "floor_div", + "maximum", + "minimum", + "mod", + "mul", + "pow", + "real_div", + "sub", + ], + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, mode): + if mode == "add": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 4, 0], [8, 0, 12]], dtype=np.float32) + + build = lambda x, y: mb.add(x=x, y=y) + elif mode == "floor_div": + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 2], [2, 3, 3]], dtype=np.float32) + + build = lambda x, y: mb.floor_div(x=x, y=y) + elif mode == "maximum": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + build = lambda x, y: mb.maximum(x=x, y=y) + elif mode == "minimum": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + + build = lambda x, y: mb.minimum(x=x, y=y) + elif mode == "mod": + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[10, 8, 4], [12, 5, 12]], dtype=np.float32) + + build = lambda x, y: mb.mod(x=x, y=y) + elif mode == "mul": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 4, -9], [16, -25, 36]], dtype=np.float32) + + build = lambda x, y: mb.mul(x=x, y=y) + elif mode == "pow": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1, 4, 0.037], [256, 0.00032, 46656]], dtype=np.float32 + ) + + build = lambda x, y: mb.pow(x=x, y=y) + elif mode == "real_div": + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array( + [[0.90909091, 1.66666667, 2.30769231], [2.85714286, 3.33333333, 3.75]], + dtype=np.float32, + ) + + build = lambda x, y: mb.real_div(x=x, y=y) + elif mode == "sub": + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[2, 0, 6], [0, 10, 0]], dtype=np.float32) + + build = lambda x, y: mb.sub(x=x, y=y) + + expected_output_types = (2, 3, types.fp32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_output_dim_for_same_symbolic_dim_inputs(self): + symbolic_input_shape = (get_new_symbol(), 4, 5) + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=symbolic_input_shape), + mb.TensorSpec(shape=symbolic_input_shape), + ] + ) + def prog(x, y): + return mb.add(x=x, y=y) + + add_op = prog.find_ops(op_type="add")[0] + output_shape = add_op.outputs[0].shape + if output_shape != symbolic_input_shape: + raise AssertionError( + "Invalid Output shape {}. Should instead be {}".format( + output_shape, symbolic_input_shape + ) + ) + + @ssa_fn + def test_builder_add(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 4, 0], [8, 0, 12]], dtype=np.float32) + v = mb.add(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_floor_div(self): + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 2], [2, 3, 3]], dtype=np.float32) + v = mb.floor_div(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_maximum(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.maximum(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_minimum(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.minimum(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_mod(self): + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array([[10, 8, 4], [12, 5, 12]], dtype=np.float32) + v = mb.mod(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_mul(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 4, -9], [16, -25, 36]], dtype=np.float32) + v = mb.mul(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_pow(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1, 4, 0.037], [256, 0.00032, 46656]], dtype=np.float32 + ) + v = mb.pow(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_real_div(self): + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + expected_outputs = np.array( + [[0.90909091, 1.66666667, 2.30769231], [2.85714286, 3.33333333, 3.75]], + dtype=np.float32, + ) + v = mb.real_div(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_real_div_both_ints(self): + x = np.array([5], dtype=np.int32) + y = np.array([2], dtype=np.int32) + expected_outputs = np.array([2], dtype=np.int32) + v = mb.real_div(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + assert isinstance(v.val[0], (float, np.int32)) + # make sure the dtype is float + assert types.is_int(v.dtype) + # make sure the symbolic type matches the value type + assert v._sym_type.get_primitive() == v._sym_val.get_primitive() + + @ssa_fn + def test_builder_sub(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[2, 0, 6], [0, 10, 0]], dtype=np.float32) + v = mb.sub(x=x, y=y) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_real_div_int_builder_to_backend(self, compute_unit, backend): + """ + For the neuralnetwork backend, the real_div is producing float output even for int inputs, + while the mlprogram backend produces int type output. + """ + x = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32) + y = np.array([[11, 12, 13], [14, 15, 16]], dtype=np.float32) + + if backend[0] == "neuralnetwork": + dtype = np.float32 + else: + dtype = np.int32 + expected_outputs = np.array(x / y, dtype=dtype) + + build = lambda x, y: mb.real_div(x=x, y=y) + + expected_output_types = (2, 3, types.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape, dtype=types.int32), + "y": mb.placeholder(shape=y.shape, dtype=types.int32), + } + input_values = {"x": x, "y": y} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.equal(x=x, y=y), mb.equal(x=-3., y=y) + + expected_output_types = [ + (2, 3, types.bool), + (2, 3, types.bool), + ] + expected_outputs = [ + np.array([[0, 1, 0], [1, 0, 1]], dtype=bool), + np.array([[0, 0, 1], [0, 0, 0]], dtype=bool), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 0], [1, 0, 1]], dtype=bool) + v = mb.equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestGreater: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.greater(x=x, y=y), mb.greater(x=x, y=3.5) + + expected_output_types = [ + (2, 3, types.bool), + (2, 3, types.bool), + ] + expected_outputs = [ + np.array([[1, 0, 1], [0, 1, 0]], dtype=bool), + np.array([[0, 0, 0], [1, 1, 1]], dtype=bool), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 0, 1], [0, 1, 0]], dtype=bool) + v = mb.greater(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestGreaterEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.greater_equal(x=x, y=y), mb.greater_equal(x=x, y=3.5) + + expected_output_types = [ + (2, 3, types.bool), + (2, 3, types.bool), + ] + expected_outputs = [ + np.array([[1, 1, 1], [1, 1, 1]], dtype=bool), + np.array([[0, 0, 0], [1, 1, 1]], dtype=bool), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 1, 1], [1, 1, 1]], dtype=bool) + v = mb.greater_equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestLess: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.less(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[0, 0, 0], [0, 0, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke2(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + input_values = {"x": x} + + def build(x): + # y is const + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + return mb.less(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[0, 0, 0], [0, 0, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_broadcast(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + input_values = {"x": x} + + def build(x): + # y is const + return mb.less(x=x, y=3.5) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[1, 1, 1], [0, 0, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 0, 0], [0, 0, 0]], dtype=bool) + v = mb.less(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestLessEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.less_equal(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[0, 1, 0], [1, 0, 1]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[0, 1, 0], [1, 0, 1]], dtype=bool) + v = mb.less_equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + +class TestNotEqual: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "y": mb.placeholder(shape=y.shape), + } + input_values = {"x": x, "y": y} + + def build(x, y): + return mb.not_equal(x=x, y=y) + + expected_output_types = (2, 3, types.bool) + expected_outputs = np.array([[1, 0, 1], [0, 1, 0]], dtype=bool) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + y_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 0, 1], [0, 1, 0]], dtype=bool) + v = mb.not_equal(x=x_val, y=y_val) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py new file mode 100644 index 00000000..f1b0640a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py @@ -0,0 +1,688 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import scipy + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import (Function, get_new_symbol, + types) +from coremltools.converters.mil.mil.types.symbolic import \ + is_compatible_symbolic_vector +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestElementwiseUnary: + # All ops in this test share the same backends + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + [ + "abs", + "acos", + "asin", + "atan", + "atanh", + "cast", + "clip", + "cos", + "cosh", + "erf", + "exp", + "exp2", + "floor", + "inverse", + "log", + "round", + "rsqrt", + "sign", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "threshold", + ], + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, mode): + if mode == "abs": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + build = lambda x: mb.abs(x=x) + elif mode == "acos": + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + expected_outputs = np.array( + [ + [3.14159265, 2.0943951, 1.57079633], + [1.15927948, 1.04719755, 0.64350111], + ], + dtype=np.float32, + ) + + build = lambda x: mb.acos(x=x) + elif mode == "asin": + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + expected_outputs = np.array( + [[-1.57079633, -0.52359878, 0.0], [0.41151685, 0.52359878, 0.92729522]], + dtype=np.float32, + ) + + build = lambda x: mb.asin(x=x) + elif mode == "atan": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.78539816, 1.10714872, -1.24904577], + [1.32581766, -1.37340077, 1.40564765], + ], + dtype=np.float32, + ) + build = lambda x: mb.atan(x=x) + elif mode == "atanh": + val = np.array([[-0.8, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + expected_outputs = np.array( + [[-1.09861229, -0.54930614, 0.0], [0.42364893, 0.54930614, 1.09861229]], + dtype=np.float32, + ) + + build = lambda x: mb.atanh(x=x) + elif mode == "cast": + val = np.array([[-1.2, 2, -3.6], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.int32) + build = lambda x: mb.cast(x=x, dtype="int32") + elif mode == "ceil": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + build = lambda x: mb.ceil(x=x) + elif mode == "clip": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[0, 2, 0], [4.5, 0, 5]], dtype=np.float32) + + build = lambda x: mb.clip(x=x, alpha=0.0, beta=5.0) + elif mode == "cos": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [0.54030231, -0.41614684, -0.9899925], + [-0.65364362, 0.28366219, 0.96017029], + ], + dtype=np.float32, + ) + + build = lambda x: mb.cos(x=x) + elif mode == "cosh": + val = np.array([[-1, -2, -3], [1, 2, 3]], dtype=np.float32) + expected_outputs = np.array( + [ + [1.54308063, 3.76219569, 10.067662], + [1.54308063, 3.76219569, 10.067662], + ], + dtype=np.float32, + ) + + build = lambda x: mb.cosh(x=x) + elif mode == "erf": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.8427007929497148, 0.9953222650189527, -0.9999779095030014], + [0.9999999845827421, -0.9999999999984626, 1.0], + ], + dtype=np.float32, + ) + + build = lambda x: mb.erf(x=x) + elif mode == "exp": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [0.36787944, 7.3890561, 0.04978707], + [54.5981500, 0.0067379, 403.428793], + ], + dtype=np.float32, + ) + + build = lambda x: mb.exp(x=x) + elif mode == "exp2": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[0.5, 4.0, 0.125], [16, 0.03125, 64]], dtype=np.float32 + ) + + build = lambda x: mb.exp2(x=x) + elif mode == "floor": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-2, 2, -4], [4, -5, 6]], dtype=np.float32) + + build = lambda x: mb.floor(x=x) + elif mode == "inverse": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[-1.0, 0.5, -0.33333334], [0.25, -0.2, 0.16666667]], dtype=np.float32 + ) + build = lambda x: mb.inverse(x=x) + elif mode == "log": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[0.0, 0.69314718, 1.09861229], [1.38629436, 1.60943791, 1.79175947]], + dtype=np.float32, + ) + + build = lambda x: mb.log(x=x) + elif mode == "round": + val = np.array([[-1.2, 2, -3.4], [4.6, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + build = lambda x: mb.round(x=x) + elif mode == "rsqrt": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 0.70710678, 0.57735027], [0.5, 0.4472136, 0.40824829]], + dtype=np.float32, + ) + + build = lambda x: mb.rsqrt(x=x) + elif mode == "sign": + val = np.array([[-1, 2, 0], [0, -5, 6]], dtype=np.float32) + expected_outputs = np.array([[-1, 1, 0], [0, -1, 1]], dtype=np.float32) + + build = lambda x: mb.sign(x=x) + elif mode == "sin": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.84147098, 0.90929743, -0.14112001], + [-0.7568025, 0.95892427, -0.2794155], + ], + dtype=np.float32, + ) + + build = lambda x: mb.sin(x=x) + elif mode == "sinh": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[-1.1752, 3.62686, -10.017874], [27.289917, -74.20321, 201.71315]], + dtype=np.float32, + ) + + build = lambda x: mb.sinh(x=x) + elif mode == "sqrt": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 1.41421356, 1.73205081], [2.0, 2.23606798, 2.44948974]], + dtype=np.float32, + ) + + build = lambda x: mb.sqrt(x=x) + elif mode == "square": + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 4.0, 9.0], [16.0, 25.0, 36.]], + dtype=np.float32, + ) + + build = lambda x: mb.square(x=x) + elif mode == "tan": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [[-1.5574, -2.185, 0.1425], [1.15782, 3.3805, -0.291]], dtype=np.float32 + ) + + build = lambda x: mb.tan(x=x) + elif mode == "tanh": + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + expected_outputs = np.array( + [ + [-0.7615942, 0.9640276, -0.9950548], + [0.9993293, -0.9999092, 0.9999877], + ], + dtype=np.float32, + ) + + build = lambda x: mb.tanh(x=x) + elif mode == "threshold": + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array( + [[1.0, 2, 1.0], [4.5, 1.0, 6.7]], dtype=np.float32 + ) + + build = lambda x: mb.threshold(x=x, alpha=1.0) + + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + expected_output_types = ( + (2, 3, types.int32) if mode == "cast" else (2, 3, types.fp32) + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_abs_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.abs(x=val) + expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_acos_eval(self): + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + v = mb.acos(x=val) + expected_outputs = np.array( + [[3.14159265, 2.0943951, 1.57079633], [1.15927948, 1.04719755, 0.64350111]], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_asin_eval(self): + val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + v = mb.asin(x=val) + expected_outputs = np.array( + [[-1.57079633, -0.52359878, 0.0], [0.41151685, 0.52359878, 0.92729522]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_atan_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.atan(x=val) + expected_outputs = np.array( + [ + [-0.78539816, 1.10714872, -1.24904577], + [1.32581766, -1.37340077, 1.40564765], + ], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_atanh_eval(self): + val = np.array([[-0.8, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32) + v = mb.atanh(x=val) + expected_outputs = np.array( + [[-1.09861229, -0.54930614, 0.0], [0.42364893, 0.54930614, 1.09861229]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_cast_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.int32) + + v = mb.cast(x=val, dtype="int32") + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_ceil_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.ceil(x=val) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_clip_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.clip(x=val, alpha=0.0, beta=5.0) + expected_outputs = np.array([[0, 2, 0], [4.5, 0, 5]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_cos_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.cos(x=val) + expected_outputs = np.array( + [ + [0.54030231, -0.41614684, -0.9899925], + [-0.65364362, 0.28366219, 0.96017029], + ], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_cosh_eval(self): + val = np.array([[-1, -2, -3], [1, 2, 3]], dtype=np.float32) + v = mb.cosh(x=val) + expected_outputs = np.array( + [[1.54308063, 3.76219569, 10.067662], [1.54308063, 3.76219569, 10.067662]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_erf_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.erf(x=x_val) + np.testing.assert_allclose(scipy.special.erf(x_val), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_exp_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.exp(x=val) + expected_outputs = np.array( + [[0.36787944, 7.3890561, 0.04978707], [54.5981500, 0.0067379, 403.428793]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_exp2_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.exp2(x=val) + expected_outputs = np.array( + [[0.5, 4.0, 0.125], [16, 0.03125, 64]], dtype=np.float32 + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_floor_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.floor(x=val) + expected_outputs = np.array([[-2, 2, -4], [4, -5, 6]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_inverse_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.inverse(x=val) + expected_outputs = np.array( + [[-1.0, 0.5, -0.33333334], [0.25, -0.2, 0.16666667]], dtype=np.float32 + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_log_eval(self): + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.log(x=val) + expected_outputs = np.array( + [[0.0, 0.69314718, 1.09861229], [1.38629436, 1.60943791, 1.79175947]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_round_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.6, -5, 6.7]], dtype=np.float32) + v = mb.round(x=val) + expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_rsqrt_eval(self): + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.rsqrt(x=val) + expected_outputs = np.array( + [[1.0, 0.70710678, 0.57735027], [0.5, 0.4472136, 0.40824829]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sign_eval(self): + val = np.array([[-1, 2, 0], [0, -5, 6]], dtype=np.float32) + v = mb.sign(x=val) + expected_outputs = np.array([[-1, 1, 0], [0, -1, 1]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sin_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.sin(x=val) + expected_outputs = np.array( + [ + [-0.84147098, 0.90929743, -0.14112001], + [-0.7568025, 0.95892427, -0.2794155], + ], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sinh_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.sinh(x=val) + expected_outputs = np.array( + [[-1.1752, 3.62686, -10.017874], [27.289917, -74.20321, 201.71315]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_sqrt_eval(self): + val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.sqrt(x=val) + expected_outputs = np.array( + [[1.0, 1.41421356, 1.73205081], [2.0, 2.23606798, 2.44948974]], + dtype=np.float32, + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_tan_eval(self): + val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.tan(x=val) + expected_outputs = np.array( + [[-1.5574, -2.185, 0.1425], [1.15782, 3.3805, -0.291]], dtype=np.float32 + ) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_tanh_eval(self): + x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32) + v = mb.tanh(x=x_val) + np.testing.assert_allclose(np.tanh(x_val), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_threshold_eval(self): + val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32) + v = mb.threshold(x=val, alpha=1.0) + expected_outputs = np.array([[1.0, 2, 1.0], [4.5, 1.0, 6.7]], dtype=np.float32) + + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_cast_with_symbolic_value(self): + input_shape = [get_new_symbol(), 1] + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + } + + def build(x): + shape = mb.shape(x=x) + return mb.cast(x=shape, dtype="int32") + + with Function(input_placeholders) as ssa_func: + output_vars = build(**ssa_func.inputs) + assert is_compatible_symbolic_vector(output_vars.sym_val, [get_new_symbol(), 1]) + + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-3, 1e-1, 1.0], + ), + ) + def test_builder_to_backend_stress_inverse( + self, compute_unit, backend, epsilon + ): + x = np.array([[1, -2, 3], [4, -5, 6]], dtype=np.float32) + numpy_pred = 1 / (x + epsilon) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.inverse(x=x, epsilon=epsilon) + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-3, 1e-1, 1.0], + ), + ) + def test_builder_to_backend_stress_rsqrt( + self, compute_unit, backend, epsilon + ): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + numpy_pred = 1.0 / np.sqrt(x + epsilon) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.rsqrt(x=x, epsilon=epsilon) + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, epsilon", + itertools.product( + compute_units, + backends, + [1e-3, 1e-1, 1.0], + ), + ) + def test_builder_to_backend_stress_log( + self, compute_unit, backend, epsilon + ): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + numpy_pred = np.log(x + epsilon) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.log(x=x, epsilon=epsilon) + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, src_dst", + itertools.product( + compute_units, + backends, + [("fp16", "fp32"), ("fp32", "fp16")], + ), + ) + def test_builder_to_backend_stress_cast( + self, compute_unit, backend, src_dst + ): + src_dtype, dst_dtype = src_dst + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + numpy_pred = x.astype(dtype=np.float16) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + x = mb.cast(x=x, dtype=src_dtype) + x = mb.square(x=x) + x = mb.cast(x=x, dtype=dst_dtype) + x = mb.sqrt(x=x) + x = mb.cast(x=x, dtype="fp32") + return x + + expected_output_type = x.shape + (types.fp32,) + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + numpy_pred, + compute_unit=compute_unit, + backend=backend, + ) + + def test_erf_value_inference(self): + INPUT_SIZE=(2, 3, 4) + rs = np.random.RandomState(1234) + x = rs.random(INPUT_SIZE) + + @mb.program(input_specs=[]) + def prog(): + return mb.erf(x=x) + + ops = list(prog.functions.values())[0].operations + assert len(ops) == 2 + assert ops[0].op_type == 'const' + erf_op = ops[1] + assert erf_op.op_type == 'erf' + np.testing.assert_allclose(erf_op.value_inference(), scipy.special.erf(x), atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_image_resizing.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_image_resizing.py new file mode 100644 index 00000000..ab0e542e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_image_resizing.py @@ -0,0 +1,934 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import functools +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen +from coremltools.models.utils import _macos_version + +from .testing_utils import run_compare_builder + +if _HAS_TORCH: + import torch + + +class TestAffine: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + x_val = np.array([11.0, 22.0, 33.0, 44.0], dtype=np.float32).reshape( + [1, 1, 2, 2] + ) + transform_matrix_val = np.array( + [-1.0, -2.0, -3.7, -1.0, 3.5, 1.2], dtype=np.float32 + ).reshape([1, 6]) + + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + "transform_matrix": mb.placeholder(shape=transform_matrix_val.shape), + } + input_value_dict = {"x": x_val, "transform_matrix": transform_matrix_val} + + def build(x, transform_matrix): + return [ + mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=3, + output_width=3, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + ), + mb.affine( + x=x, + transform_matrix=transform_matrix, + output_height=2, + output_width=5, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=0.0, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + ), + ] + + expected_output_types = [ + (1, 1, 3, 3, types.fp32), + (1, 1, 2, 5, types.fp32), + ] + expected_outputs = [ + np.array( + [10.752501, 2.5025, 0.0, 1.9799997, 0.0, 0.0, 0.0, 0.0, 0.0], + dtype=np.float32, + ).reshape([1, 1, 3, 3]), + np.array( + [10.752501, 5.94, 2.5025, 0.44000006, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + dtype=np.float32, + ).reshape([1, 1, 2, 5]), + ] + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestResample: + @pytest.mark.parametrize( + "compute_unit, backend, minimum_deployment_target", + itertools.product( + compute_units, + backends, + [ct.target.iOS15, ct.target.iOS16], + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, minimum_deployment_target): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + if minimum_deployment_target == ct.target.iOS16 and _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + x_ = np.array([11.0, 22.0, 33.0, 44.0], dtype=np.float32).reshape([1, 1, 2, 2]) + coordinates_ = np.array( + [-1.0, -2.0, -3.7, -1.0, 0.0, 0.0, 3.5, 1.2], dtype=np.float32 + ).reshape([1, 2, 2, 2]) + + input_placeholder_dict = { + "x": mb.placeholder(shape=x_.shape), + "coordinates": mb.placeholder(shape=coordinates_.shape), + } + input_value_dict = {"x": x_, "coordinates": coordinates_} + expected_output_type = (1, 1, 2, 2, types.fp32) + + def build_0(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="bilinear", + padding_mode="constant", + padding_value=6.17, + coordinates_mode="normalized_minus_one_to_one", + align_corners=True, + ) + + expected_output_0 = np.array( + [8.585, 6.17, 27.5, 6.17], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + def build_1(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="nearest", + padding_mode="border", + padding_value=-1.0, + coordinates_mode="unnormalized", + align_corners=False, + ) + + expected_output_1 = np.array( + [11.0, 11.0, 11.0, 44.0], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + def build_2(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="bilinear", + padding_mode="reflection", + padding_value=-1.0, + coordinates_mode="normalized_zero_to_one", + align_corners=True, + ) + + expected_output_2 = np.array( + [22.0, 36.3, 11.0, 34.1], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + def build_3(x, coordinates): + return mb.resample( + x=x, + coordinates=coordinates, + sampling_mode="nearest", + padding_mode="symmetric", + padding_value=-1.0, + coordinates_mode="normalized_zero_to_one", + align_corners=False, + ) + + expected_output_3 = np.array( + [22.0, 33.0, 11.0, 33.0], dtype=np.float32 + ).reshape(expected_output_type[:-1]) + + for build, expected_output in zip( + [build_0, build_1, build_2, build_3], + [ + expected_output_0, + expected_output_1, + expected_output_2, + expected_output_3, + ], + ): + mlmodel = run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + prog = mlmodel._mil_program + number_of_cast = len(prog["main"].find_ops(op_type="cast")) + # for the new iOS16 resample op, the coordinates is cast to fp16 + if minimum_deployment_target == ct.target.iOS15: + assert number_of_cast == 2 + elif minimum_deployment_target == ct.target.iOS16: + assert number_of_cast == 3 + else: + raise ValueError("Unrecognized target {}".format(minimum_deployment_target)) + + +class TestResizeNearestNeighbor: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([0.37, 6.17], dtype=np.float32).reshape([1, 1, 2, 1]) + input_placeholder_dict = {"x": mb.placeholder(shape=x_val.shape)} + input_value_dict = {"x": x_val} + + def build_model(x): + return [ + mb.resize_nearest_neighbor( + x=x, target_size_height=2, target_size_width=1, + ), + mb.resize_nearest_neighbor( + x=x, target_size_height=2, target_size_width=3, + ), + ] + + expected_output_types = [ + (1, 1, 2, 1, types.fp32), + (1, 1, 2, 3, types.fp32), + ] + expected_outputs = [ + x_val, + np.array([0.37, 0.37, 0.37, 6.17, 6.17, 6.17], dtype=np.float32).reshape( + [1, 1, 2, 3] + ), + ] + + run_compare_builder( + build_model, + input_placeholder_dict, + input_value_dict, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestUpsampleNearestNeighborFractionalScales: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398448 (TestUpsampleNearestNeighborFractionalScales failing on GPU)") + + x_val = np.array([1.5, -2.5, 3.5], dtype=np.float32).reshape([1, 1, 1, 3]) + input_placeholder_dict = {"x": mb.placeholder(shape=x_val.shape)} + input_value_dict = {"x": x_val} + + def build(x): + return [ + mb.upsample_nearest_neighbor( + x=x, scale_factor_height=1.0, scale_factor_width=1.0, + ), + mb.upsample_nearest_neighbor( + x=x, scale_factor_height=3.17, scale_factor_width=0.67 + ), + mb.upsample_nearest_neighbor( + x=x, scale_factor_height=2.0, scale_factor_width=1.12, + ), + ] + + expected_output_types = [ + (1, 1, 1, 3, types.fp32), + (1, 1, 3, 2, types.fp32), + (1, 1, 2, 3, types.fp32), + ] + expected_outputs = [ + x_val, + np.array([1.5, -2.5, 1.5, -2.5, 1.5, -2.5], dtype=np.float32).reshape( + [1, 1, 3, 2] + ), + np.array([1.5, -2.5, 3.5, 1.5, -2.5, 3.5], dtype=np.float32).reshape( + [1, 1, 2, 3] + ), + ] + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestResizeBilinear: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "mlprogram": + pytest.xfail("Seg fault: rdar://78343191 ((MIL GPU) Core ML Tools Unit Test failures [failure to load or Seg fault])") + + if backend[0] == "neuralnetwork" and compute_unit == ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://85318710 (Coremltools Smoke test on ResizeBilinear failing on NNv1 backend.)") + + x = np.array([0, 1], dtype=np.float32).reshape(1, 1, 2) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_mode_0(x): + return mb.resize_bilinear( + x=x, + target_size_height=1, + target_size_width=5, + sampling_mode="STRICT_ALIGN_CORNERS", + ) + + expected_output_type = (1, 1, 5, types.fp32) + expected_output = np.array([0, 0.25, 0.5, 0.75, 1], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_0, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + def build_mode_2(x): + return mb.resize_bilinear( + x=x, target_size_height=1, target_size_width=5, sampling_mode="DEFAULT" + ) + + expected_output = np.array([0, 0.4, 0.8, 1, 1], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_2, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + def build_mode_3(x): + return mb.resize_bilinear( + x=x, + target_size_height=1, + target_size_width=5, + sampling_mode="OFFSET_CORNERS", + ) + + expected_output = np.array([0.1, 0.3, 0.5, 0.7, 0.9], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_3, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + if backend[0] != "neuralnetwork": + def build_mode_4(x): + return mb.resize_bilinear( + x=x, + target_size_height=1, + target_size_width=5, + sampling_mode="UNALIGN_CORNERS", + ) + + expected_output = np.array([0.0, 0.1, 0.5, 0.9, 1.0], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_mode_4, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestUpsampleBilinear: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([0, 1], dtype=np.float32).reshape(1, 1, 2) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_upsample_integer(x): + return mb.upsample_bilinear( + x=x, scale_factor_height=1, scale_factor_width=3 + ) + + expected_output_type = (1, 1, 6, types.fp32) + expected_output = np.array( + [0, 0.2, 0.4, 0.6, 0.8, 1], dtype=np.float32 + ).reshape(1, 1, 6) + + run_compare_builder( + build_upsample_integer, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + def build_upsample_fractional(x): + return mb.upsample_bilinear( + x=x, scale_factor_height=1.0, scale_factor_width=2.6, align_corners=False + ) + + expected_output_type = (1, 1, 5, types.fp32) + expected_output = np.array([0, 0.1, 0.5, 0.9, 1], dtype=np.float32).reshape( + 1, 1, 5 + ) + + run_compare_builder( + build_upsample_fractional, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, align_corners, half_pixel_centers", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + ) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend, align_corners, half_pixel_centers): + if backend[0] == "neuralnetwork" or ct.utils._macos_version() < (13, 0): + pytest.skip("The new half_pixel_centers argument only available in iOS16") + + if align_corners and half_pixel_centers: + pytest.skip("Invalid configuration of align_corners and half_pixel_centers") + + x = np.array([1, 2], dtype=np.float32).reshape(1, 1, 1, 2) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_upsample_bilinear(x): + return mb.upsample_bilinear( + x=x, + scale_factor_height=2, + scale_factor_width=3, + align_corners=align_corners, + half_pixel_centers=half_pixel_centers, + ) + + expected_output_type = (1, 1, 2, 6, types.fp32) + + if align_corners and not half_pixel_centers: + expected_output = [1., 1.2, 1.4, 1.6, 1.8, 2., 1., 1.2, 1.4, 1.6, 1.8, 2.] + elif not align_corners and half_pixel_centers: + expected_output = [1., 1., 1.33334, 1.66667, 2., 2., 1., 1., 1.33334, 1.66667, 2., 2.] + elif not align_corners and not half_pixel_centers: + expected_output = [1., 1.33334, 1.66667, 2., 2., 2., 1., 1.33334, 1.66667, 2., 2., 2.] + else: + raise ValueError("align_corners and half_pixel_centers cannot be both True") + + expected_output = [np.array(expected_output, dtype=np.float32).reshape(1, 1, 2, 6)] + + run_compare_builder( + build_upsample_bilinear, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, input_shape, scale_factor, align_corners, recompute_scale_factor", + itertools.product( + compute_units, + backends, + [(2, 5, 10, 22)], + [(3, 4), (2.5, 2.0), (0.5, 0.75)], + [True, False], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, input_shape, scale_factor, align_corners, recompute_scale_factor + ): + scale_factor_height, scale_factor_width = scale_factor + _, _, height, width = input_shape + height = height * scale_factor_height + width = width * scale_factor_width + is_h_float = height - np.floor(height) > 0.001 + is_w_float = width - np.floor(width) > 0.001 + + # Currently, MIL is not suporting recompute_scale_factor=False + align_corners=False + # with fractional output size + if not recompute_scale_factor and not align_corners and (is_h_float or is_w_float): + pytest.xfail("rdar://81124053 (Support recompute_scale_factor)") + + def _get_torch_upsample_prediction(x, scale_factor=(2, 2), align_corners=False, recompute_scale_factor=True): + x = torch.from_numpy(x) + out = torch.nn.functional.interpolate( + x, + scale_factor=scale_factor, + mode="bilinear", + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + return out.numpy() + + x = random_gen(input_shape, rand_min=-100, rand_max=100) + torch_pred = _get_torch_upsample_prediction( + x, + scale_factor=scale_factor, + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build_upsample(x): + return mb.upsample_bilinear( + x=x, + scale_factor_height=scale_factor[0], + scale_factor_width=scale_factor[1], + align_corners=align_corners, + ) + + expected_output_type = torch_pred.shape + (types.fp32,) + run_compare_builder( + build_upsample, + input_placeholder_dict, + input_value_dict, + expected_output_type, + torch_pred, + compute_unit=compute_unit, + backend=backend, + rtol=0.5, + ) + + +class TestUpsampleNearestNeighbor: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([1.5, 2.5, 3.5], dtype=np.float32).reshape([1, 1, 1, 3]) + input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.upsample_nearest_neighbor( + x=x, scale_factor_height=1, scale_factor_width=2 + ) + + expected_output_type = (1, 1, 1, 6, types.fp32) + expected_output = np.array( + [1.5, 1.5, 2.5, 2.5, 3.5, 3.5], dtype=np.float32 + ).reshape([1, 1, 1, 6]) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCrop: + @pytest.mark.parametrize( + "compute_unit, backend, is_symbolic", + itertools.product(compute_units, backends, compute_units), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, is_symbolic): + x = np.array( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], + dtype=np.float32, + ).reshape(1, 1, 4, 4) + + input_shape = list(x.shape) + placeholder_input_shape = input_shape + if is_symbolic: + # set batch and channel dimension symbolic + placeholder_input_shape[0] = get_new_symbol() + placeholder_input_shape[1] = get_new_symbol() + + input_placeholder_dict = {"x": mb.placeholder(shape=placeholder_input_shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.crop(x=x, crop_height=[0, 1], crop_width=[1, 1]) + + expected_output_type = ( + placeholder_input_shape[0], + placeholder_input_shape[1], + 3, + 2, + types.fp32, + ) + expected_output = np.array([2, 3, 6, 7, 10, 11], dtype=np.float32).reshape(1, 1, 3, 2) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, C, H, W", + itertools.product( + compute_units, + backends, + [x for x in range(2, 4)], + [x for x in range(5, 8)], + [x for x in range(8, 10)], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, C, H, W): + input_shape = (1, C, H, W) + x = np.random.random(input_shape) + + crop_h = [np.random.randint(H)] + crop_h.append(np.random.randint(H - crop_h[0])) + crop_w = [np.random.randint(W)] + crop_w.append(np.random.randint(W - crop_w[0])) + + input_placeholder_dict = {"x": mb.placeholder(shape=input_shape)} + input_value_dict = {"x": x} + + def build(x): + return mb.crop(x=x, crop_height=crop_h, crop_width=crop_w) + + expected_output_type = ( + 1, + C, + H - crop_h[0] - crop_h[1], + W - crop_w[0] - crop_w[1], + types.fp32, + ) + expected_output = x[:, :, crop_h[0] : H - crop_h[1], crop_w[0] : W - crop_w[1]] + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCropResize: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_builder_to_backend_smoke_pad_value(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("pad_mode only supported on iOS16 or above") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("pad_value not supported in macOS12 or older.") + + x = np.array( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], + dtype=np.float32, + ).reshape(1, 1, 4, 4) + + roi = np.array([ + [0, 0.1, 0.3, 1.3, 1], + [0, 0.5, 1.8, 1., 0.3], + [0, 0.0, 0.4, 0.6, 0.7], + ], dtype=np.float32).reshape(3, 1, 5, 1, 1) + + def build(x): + return mb.crop_resize( + x=x, + roi=roi, + target_width=2, + target_height=2, + normalized_coordinates=True, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + pad_value=10.0, + ) + + expected_output_type = [ + (3, 1, 1, 2, 2, types.fp32), + ] + expected_output = [ + np.array([ 3.1, 5.2, 10, 10, 10, 7.899, 10, 13.9, 2.2, 3.1, 9.4, 10.3], dtype=np.float32).reshape(3, 1, 1, 2, 2), + ] + + input_placeholder_dict = {"x": mb.placeholder(shape=(1, 1, 4, 4))} + input_value_dict = {"x": x} + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + + @pytest.mark.parametrize( + "compute_unit, backend, is_symbolic", + itertools.product(compute_units, backends, compute_units), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, is_symbolic): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398582 (TestCropResize failing on mlprogram + GPU)") + x = np.array( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], + dtype=np.float32, + ).reshape(1, 1, 4, 4) + + input_shape = list(x.shape) + placeholder_input_shape = input_shape + if is_symbolic: + # set batch and channel dimension symbolic + placeholder_input_shape[0] = get_new_symbol() + placeholder_input_shape[1] = get_new_symbol() + + input_placeholder_dict = {"x": mb.placeholder(shape=placeholder_input_shape)} + input_value_dict = {"x": x} + N = 1 + roi = np.array([[1, 1, 2, 2]], dtype=np.float32).reshape(1, 1, 4, 1, 1) + roi_normalized = np.array( + [[0, 0.0, 0.0, 1.0 / 3, 1.0 / 3]], dtype=np.float32 + ).reshape(1, 1, 5, 1, 1) + roi_invert = np.array([[2, 2, 1, 1]], dtype=np.float32).reshape(1, 1, 4, 1, 1) + + def build(x, mode=0): + if mode == 0: + return mb.crop_resize( + x=x, + roi=roi, + target_width=2, + target_height=2, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 1: + return mb.crop_resize( + x=x, + roi=roi, + target_width=4, + target_height=4, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 2: + return mb.crop_resize( + x=x, + roi=roi, + target_width=1, + target_height=1, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 3: + return mb.crop_resize( + x=x, + roi=roi_normalized, + target_width=2, + target_height=2, + normalized_coordinates=True, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 4: + return mb.crop_resize( + x=x, + roi=roi_invert, + target_width=2, + target_height=2, + normalized_coordinates=False, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="ALIGN_CORNERS", + ) + + elif mode == 5: + return mb.crop_resize( + x=x, + roi=roi_invert, + target_width=2, + target_height=2, + normalized_coordinates=True, + box_coordinate_mode="CORNERS_HEIGHT_FIRST", + sampling_mode="UNALIGN_CORNERS", + ) + + expected_output_type = [ + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 4, + 4, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 1, + 1, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ( + N, + placeholder_input_shape[0], + placeholder_input_shape[1], + 2, + 2, + types.fp32, + ), + ] + expected_output = [ + np.array([6, 7, 10, 11], dtype=np.float32).reshape(1, 1, 1, 2, 2), + np.array( + [ + [6, 6.333333, 6.66666, 7], + [7.333333, 7.666666, 8, 8.333333], + [8.666666, 9, 9.3333333, 9.666666], + [10, 10.333333, 10.666666, 11], + ], + dtype=np.float32, + ).reshape(1, 1, 1, 4, 4), + np.array([8.5], dtype=np.float32).reshape(1, 1, 1, 1, 1), + np.array([1, 2, 5, 6], dtype=np.float32).reshape(1, 1, 1, 2, 2), + np.array([11, 10, 7, 6], dtype=np.float32).reshape(1, 1, 1, 2, 2), + np.array([3.5, 5.5, 11.5, 13.5], dtype=np.float32).reshape(1, 1, 1, 2, 2), + ] + + for mode in range(6): + # nn-proto does not support UNALIGN_CORNERS + if not (backend[0] == 'neuralnetwork' and mode == 5): + run_compare_builder( + functools.partial(build, mode=mode), + input_placeholder_dict, + input_value_dict, + expected_output_type[mode], + expected_output[mode], + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_linear.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_linear.py new file mode 100644 index 00000000..31763287 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_linear.py @@ -0,0 +1,333 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import itertools +import platform + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen, ssa_fn + +from .testing_utils import run_compare_builder + + +class TestLinear: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product(compute_units, backends), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[-4.7182, 11.94], [-3.3939, 9.2166]], dtype=np.float32) + weight_val = np.array([[1.2313, -0.095], [-1.4075, -0.8816]], dtype=np.float32) + bias_val = np.array([1.0, 2.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.linear(x=x, weight=weight_val, bias=bias_val)] + + expected_output_types = [(2, 2, types.fp32)] + expected_outputs = [ + np.array( + [[-5.9438195, -1.8854373], [-4.054486, -1.3484411]], dtype=np.float32 + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(2, 2), rand_min=-37, rand_max=64) + weight_val = random_gen(shape=(2, 2), rand_min=-91, rand_max=84) + bias_val = random_gen(shape=(2,), rand_min=0.0, rand_max=9.0) + v = mb.linear(x=x_val, weight=weight_val, bias=bias_val) + np.testing.assert_allclose(np.matmul(x_val, weight_val.T) + bias_val, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank", + itertools.product(compute_units, backends, [2, 3, 5]), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, rank): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398733 (TestLinear failing on mlprogram + GPU)") + + if backend[0] == "neuralnetwork" and compute_unit != ct.ComputeUnit.CPU_ONLY and platform.machine() == "arm64" and rank == 5: + pytest.xfail("rdar://98015195 ([M1 native tests] Some MIL unittests are failing on M1 native)") + + x_shape = np.random.randint(low=1, high=3, size=(rank,)) + x_val = np.random.rand(*x_shape) + out_channels = 3 + w_shape = np.array([out_channels, x_shape[-1]]) + weight_val = np.random.rand(*w_shape).astype(np.float32) + bias_val = np.random.rand(out_channels).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return [mb.linear(x=x, weight=weight_val, bias=bias_val)] + + expected_outputs = [np.matmul(x_val, np.transpose(weight_val)) + bias_val] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestMatMul: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[-4.0, 13.0], [-3.0, 9.0]], dtype=np.float32) + y_val = np.array([[1.0, -7.0], [-1.0, -8.0]], dtype=np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + input_values = {"x": x_val, "y": y_val} + + def build(x, y): + return [ + mb.matmul(x=x_val, y=y), + mb.matmul(x=x, y=y_val), + mb.matmul(x=x, y=y), + mb.matmul(x=x, y=y, transpose_x=True, transpose_y=True), + mb.matmul(x=x_val, y=y, transpose_x=True, transpose_y=True), + mb.matmul(x=x, y=y_val, transpose_x=True, transpose_y=True), + mb.matmul(x=x, y=y_val, transpose_x=True, transpose_y=False), + mb.matmul(x=x, y=y_val, transpose_x=False, transpose_y=True), + ] + + expected_output_types = [ + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + (2, 2, types.fp32), + ] + expected_outputs = [ + np.array([[-17.0, -76.0], [-12.0, -51.0]], dtype=np.float32), + np.array([[-17.0, -76.0], [-12.0, -51.0]], dtype=np.float32), + np.array([[-17.0, -76.0], [-12.0, -51.0]], dtype=np.float32), + np.array([[17.0, 28.0], [-50.0, -85.0]], dtype=np.float32), + np.array([[17.0, 28.0], [-50.0, -85.0]], dtype=np.float32), + np.array([[17.0, 28.0], [-50.0, -85.0]], dtype=np.float32), + np.array([[-1.0, 52.0], [4.0, -163.0]], dtype=np.float32), + np.array([[-95.0, -100.0], [-66.0, -69.0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(2, 2, 4), rand_min=-37, rand_max=64) + y_val = random_gen(shape=(2, 4, 2), rand_min=-91, rand_max=84) + v = mb.matmul(x=x_val, y=y_val) + np.testing.assert_allclose(np.matmul(x_val, y_val), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, shapes", + itertools.product( + compute_units, + backends, + [ + ((3, 2, 3, 4), (3, 2, 4, 5)), + ((1, 1, 1, 3, 4), (1, 3, 2, 4, 5)), + ((1, 3, 1, 2, 3), (1, 4, 3, 2)), + ((1, 3, 4), (3, 2, 4, 6)), + ((7, 4), (3, 9, 5, 4, 3)), + ], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, shapes): + shape_x, shape_y = shapes + x_val = np.random.rand(*shape_x) + y_val = np.random.rand(*shape_y) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + input_values = {"x": x_val, "y": y_val} + + def build(x, y): + return [mb.matmul(x=x, y=y, transpose_x=False, transpose_y=False)] + + expected_outputs = [np.matmul(x_val, y_val)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, shape_x", + itertools.product( + compute_units, + backends, + [ + (5,), + (2, 5), + (2, 2, 5), + (4, 3, 2, 5), + (5, 4, 2, 3, 5), + ], + ), + ) + def test_builder_y_rank_2_const(self, compute_unit, backend, shape_x): + x_val = np.random.rand(*shape_x) + y_val = np.random.rand(5, 10) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return [mb.matmul(x=x, y=y_val, transpose_x=False, transpose_y=False)] + + expected_outputs = [np.matmul(x_val, y_val)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestEinsum: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + equation = "abcd,adce->abce" + + x_val = np.arange(12).astype(np.float32).reshape((2, 1, 3, 2)) + y_val = np.arange(48).astype(np.float32).reshape((2, 2, 3, 4)) + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + input_value_dict = {"x": x_val, "y": y_val} + out_shape = list(x_val.shape) + out_shape[-1] = y_val.shape[-1] + expected_output_type = tuple(out_shape) + (types.fp32,) + + def build(x, y): + return mb.einsum(values=(x, y), equation=equation) + + expected_output = np.einsum(equation, x_val, y_val) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, rank, broadcast, backend", + itertools.product( + compute_units, + [3, 4], + [True, False], + backends, + ) + ) + def test_builder_to_backend_stress(self, compute_unit, rank, broadcast, backend): + equation = "abcd,adce->abce" if rank == 4 else "vnm,mno->vno" + shape_x = np.random.randint(low=2, high=16, size=rank).astype(np.int32) + shape_y = np.random.randint(low=2, high=12, size=rank).astype(np.int32) + shape_y[-3] = shape_x[-1] + shape_y[-2] = 1 if broadcast else shape_x[-2] + if rank == 4: + shape_x[-4] = 1 if broadcast else shape_y[-4] + + x_val = np.random.rand(*shape_x) + y_val = np.random.rand(*shape_y) + input_placeholder_dict = { + "x": mb.placeholder(shape=x_val.shape), + "y": mb.placeholder(shape=y_val.shape), + } + + input_value_dict = {"x": x_val, "y": y_val} + out_shape = [shape_y[-4], shape_x[-3], shape_x[-2], shape_y[-1]] if rank == 4 else \ + [shape_x[-3], shape_x[-2], shape_y[-1]] + expected_output_type = tuple(out_shape) + (types.fp32,) + + def build(x, y): + return mb.einsum(values=(x, y), equation=equation) + + if rank == 3: + expected_output = np.einsum(equation, + np.broadcast_to(x_val, [shape_x[-3], shape_x[-2], shape_x[-1]]), + np.broadcast_to(y_val, [shape_y[-3], shape_x[-2], shape_y[-1]])) + else: + expected_output = np.einsum(equation, + np.broadcast_to(x_val, [shape_y[-4], shape_x[-3], shape_x[-2], shape_x[-1]]), + np.broadcast_to(y_val, [shape_y[-4], shape_y[-3], shape_x[-2], shape_y[-1]])) + + run_compare_builder( + build, + input_placeholder_dict, + input_value_dict, + expected_output_type, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.arange(6).astype(np.float32).reshape((1, 3, 2)) + y_val = np.arange(24).astype(np.float32).reshape((2, 3, 4)) + equation = "bcd,dce->bce" + v = mb.einsum(values=(x_val, y_val), equation=equation) + np.testing.assert_allclose(np.einsum(equation, x_val, y_val), v.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_normalization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_normalization.py new file mode 100644 index 00000000..abff161b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_normalization.py @@ -0,0 +1,751 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import (_HAS_TF_2, _HAS_TORCH, MSG_TF2_NOT_FOUND, + MSG_TORCH_NOT_FOUND) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import random_gen + +from .testing_utils import UNK_SYM, run_compare_builder + +if _HAS_TORCH: + import torch + +if _HAS_TF_2: + import tensorflow as tf + + +class TestNormalizationBatchNorm: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [ + [[-16.0, 13.0], [11.0, -16.0]], + [[13.0, -15.0], [13.0, 9.0]], + [[-9.0, -4.0], [-6.0, 3.0]], + ] + ], + dtype=np.float32, + ) + mean_val = np.array([9.0, 6.0, 3.0], dtype=np.float32) + variance_val = np.array([6.0, 1.0, 7.0], dtype=np.float32) + gamma_val = np.array([1.0, 1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 3.0, 0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.batch_norm(x=x, mean=mean_val, variance=variance_val), + mb.batch_norm( + x=x, + mean=mean_val, + variance=variance_val, + gamma=gamma_val, + beta=beta_val, + epsilon=1e-4, + ), + ] + + expected_output_types = [ + (1, 3, 2, 2, types.fp32), + (1, 3, 2, 2, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [ + [[-10.206199, 1.6329918], [0.8164959, -10.206199]], + [[6.999965, -20.999895], [6.999965, 2.9999852]], + [[-4.53557, -2.6457493], [-3.4016776, 0.0]], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [[-9.206122, 2.6329796], [1.8164899, -9.206122]], + [[9.99965, -17.998951], [9.99965, 5.9998503]], + [[-4.535541, -2.6457324], [-3.4016557, 0.0]], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestNormalizationInstanceNorm: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [ + [[-16.0, 13.0], [11.0, 16.0]], + [[13.0, 15.0], [13.0, 9.0]], + [[-9.0, 4.0], [-6.0, 3.0]], + ], + + [ + [[-5.0, 1.0], [12.0, 3.0]], + [[0.0, 9.0], [2.0, -8.0]], + [[2.0, 5.0], [10.0, 0.0]], + + ] + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.instance_norm(x=x, epsilon=1e-2) + + expected_output_types = [(2, 3, 2, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [[-1.71524656, 0.54576027], [0.38982874, 0.77965748]], + [[0.22917463, 1.14587319], [0.22917463, -1.60422242]], + [[-1.2470212, 1.06887531], [-0.71258354, 0.89072943]], + ], + + [ + [[-1.27070526, -0.28693344], [1.51664821, 0.04099049]], + [[-0.12380638, 1.36187018], [0.20634397, -1.44440776]], + [[-0.59714057, 0.19904686], [1.5260259, -1.12793219]], + ] + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_with_gamma_and_beta(self, compute_unit, backend): + x_val = np.array( + [ + [ + [[-16.0, 13.0], [11.0, 16.0]], + [[13.0, 15.0], [13.0, 9.0]], + [[-9.0, 4.0], [-6.0, 3.0]], + ], + + [ + [[-5.0, 1.0], [12.0, 3.0]], + [[0.0, 9.0], [2.0, -8.0]], + [[2.0, 5.0], [10.0, 0.0]], + + ] + ], + dtype=np.float32, + ) + gamma_val = np.array([-9.0, 3.2, 1.3], dtype=np.float32) + beta_val = np.array([-0.8, 3.4, 1.2], dtype=np.float32) + + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.instance_norm(x=x, gamma=gamma_val, beta=beta_val, epsilon=1e-2) + + expected_output_types = [(2, 3, 2, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [[14.63721807, -5.71184211], [-4.30845865, -7.8169173]], + [[4.1333588, 7.06679399], [4.1333588, -1.73351158]], + [[-0.42112757, 2.58953791], [0.27364139, 2.35794826]], + ], + + [ + [[10.6363473, 1.782401], [-14.44983388, -1.16891443]], + [[3.00381959, 7.75798456], [4.06030069, -1.22210484]], + [[0.42371726, 1.45876091], [3.18383368, -0.26631185]], + ] + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "rank, compute_unit, backend, epsilon", + itertools.product( + [3, 4], + compute_units, + backends, + [1e-3, 1e-5, 1e-10] + ), + ) + def test_builder_to_backend_stress(self, rank, compute_unit, backend, epsilon): + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.instance_norm(x=x, epsilon=epsilon) + + layer = torch.nn.InstanceNorm2d if rank == 4 else torch.nn.InstanceNorm1d + torch_op = layer(num_features=shape[1], eps=epsilon) + expected_outputs = [torch_op(torch.as_tensor(x_val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + also_compare_shapes=True + ) + + +class TestNormalizationL2Norm: + + @staticmethod + def _compute_l2_norm(val, eps): + shape = val.shape + rank = len(shape) + batch_dims = rank - 3 + if batch_dims == 0: + square_sum = np.sum(val**2) + output = val/np.power(square_sum + eps, 0.5) + else: + batch_dim_prod = np.prod(shape[:batch_dims]) + reshape_val = np.reshape(val, (batch_dim_prod, -1)) + square_sum = np.sum(reshape_val * reshape_val, axis=1, keepdims=True) + eps + output = reshape_val/np.power(square_sum, 0.5) + output = np.reshape(output, shape) + return output + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.l2_norm(x=x, epsilon=1e-10)] + + expected_output_types = [(1, 3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [0.08304548, -0.58131838], + [0.41522741, -0.4982729], + [-0.24913645, -0.41522741], + ] + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, epsilon", + itertools.product( + compute_units, + backends, + [3, 4, 5], + [1e-4, 5.7] + ) + ) + def test_builder_to_backend_stress(self, compute_unit, backend, rank, epsilon): + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-1.0, rand_max=1.0) + input_placeholders = {"x": mb.placeholder(shape=shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.l2_norm(x=x, epsilon=epsilon)] + + output = TestNormalizationL2Norm._compute_l2_norm(x_val, epsilon) + expected_output_types = [list(output.shape) + [types.fp32]] + expected_outputs = [ + output + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize("rank, epsilon", + itertools.product( + [3, 4, 5], + [1e-4, 11.2], + ), + ) + def test_builder_eval_stress(self, rank, epsilon): + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-1, rand_max=1) + with Function({}): + res = mb.l2_norm(x=x_val, epsilon=epsilon) + ref = TestNormalizationL2Norm._compute_l2_norm(x_val, epsilon) + np.testing.assert_allclose(ref, res.val, atol=1e-6, rtol=1e-5) + + +class TestNormalizationLayerNorm: + + @staticmethod + def _keras_layer_norm(x, axes, epsilon): + layer = tf.keras.layers.LayerNormalization(axis=axes, epsilon=epsilon) + data = tf.constant(x, dtype=tf.float32) + output = layer(data) + return output.numpy() + + @staticmethod + def _np_layer_norm(x, axes, gamma=None, beta=None, epsilon=1e-5): + rank = len(x.shape) + axes = [axis + rank if axis < 0 else axis for axis in axes] + normalized_shape = [x.shape[i] if i in axes else 1 for i in range(rank)] + gamma = np.ones(shape=normalized_shape) if gamma is None else np.reshape(gamma, normalized_shape) + beta = np.zeros(shape=normalized_shape) if beta is None else np.reshape(beta, normalized_shape) + num = x - np.mean(x, axis=tuple(axes), keepdims=True) + dem = np.sqrt( + np.sum(np.square(num), axis=tuple(axes), keepdims=True) + / np.prod(normalized_shape) + + epsilon + ) + return num / dem * gamma + beta + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + gamma_val = np.array([1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 0.0], dtype=np.float32) + + def build(x): + return [ + # V2->V1 lowering (op_mappings.py): if branch + mb.layer_norm(x=x, axes=[2], epsilon=1e-4), + # V2->V1 lowering (op_mappings.py): else branch + mb.layer_norm(x=x, axes=[-2, -1], epsilon=1e-4), + # V2->V1 lowering (op_mappings.py): if branch with scale + mb.layer_norm(x=x, axes=[2], epsilon=1e-4, gamma=gamma_val, beta=beta_val), + ] + + expected_output_types = [(1, 3, 2, types.fp32), (1, 3, 2, types.fp32), (1, 3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [0.9999969, -0.9999969 ], + [0.99999833, -0.99999833], + [0.99995005, -0.99995005], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [0.82687193, -1.06312108], + [1.77186835, -0.82687193], + [-0.11812456, -0.59062278], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [1.9999969, -0.9999969 ], + [1.99999833, -0.99999833], + [1.99995005, -0.99995005], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_rank_2(self, compute_unit, backend): + x_val = np.array([[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]], dtype=np.float32) + gamma_val = np.array([1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + # V2->V1 lowering (op_mappings.py): if branch + mb.layer_norm(x=x, axes=[1], epsilon=1e-4), + mb.layer_norm(x=x, axes=[1], epsilon=1e-4, gamma=gamma_val, beta=beta_val) + ] + + expected_output_types = [(3, 2, types.fp32), (3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ 0.9999969, -0.9999969 ], + [ 0.99999833, -0.99999833], + [ 0.99995005, -0.99995005], + ], + dtype=np.float32, + ), + np.array( + [ + [ 1.9999969, -0.9999969 ], + [ 1.99999833, -0.99999833], + [ 1.99995005, -0.99995005], + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_with_dynamic_shape(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + shape = (get_new_symbol(), get_new_symbol(), 2) + input_placeholders = {"x": mb.placeholder(shape=shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.layer_norm(x=x, axes=[2], epsilon=1e-4), + ] + + expected_output_types = [(UNK_SYM, UNK_SYM, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [ 0.9999969, -0.9999969 ], + [ 0.99999833, -0.99999833], + [ 0.99995005, -0.99995005], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes, epsilon, provides_gamma_beta", + itertools.product( + compute_units, + backends, + [ + [3, [0, 2]], + [3, [-2]], + [4, [0, 1, 3]], + [5, [0, 4]], + [5, [-5, -4, -3, -2, -1]] + ], + [0.0001, 0.01], + [True, False] + ), + ) + def test_builder_to_backend_stress_numpy(self, compute_unit, backend, rank_and_axes, epsilon, provides_gamma_beta): + + if backend == ("mlprogram", "fp16") and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://80662357 ([GPU failures] LayerNorm FP16 tests failing on GPU with numerical errors)") + + if backend[0] == "neuralnetwork" and compute_unit != ct.ComputeUnit.CPU_ONLY and platform.machine() == "arm64": + pytest.xfail("rdar://98015195 ([M1 native tests] Some MIL unittests are failing on M1 native)") + + rank, axes = rank_and_axes + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + gamma, beta = None, None + + if provides_gamma_beta: + positive_axes = [axis+rank if axis < 0 else axis for axis in axes] + normalized_shape = [shape[i] for i in range(rank) if i in positive_axes] + gamma = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + beta = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + + def build(x): + return [ + mb.layer_norm(x=x, axes=axes, epsilon=epsilon, gamma=gamma, beta=beta) + ] + + output = TestNormalizationLayerNorm._np_layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma, beta=beta) + expected_output_types = [tuple(output.shape) + (types.fp32,)] + expected_outputs = [ + output + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-4, + ) + + @pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes, epsilon", + itertools.product( + compute_units, + backends, + [ + [3, [0, 2]], + [3, [-2]], + [4, [0, 1, 3]], + [5, [0, 4]], + [5, [-5, -4, -3, -2, -1]] + ], + [0.0001, 0.01] + ), + ) + def test_builder_to_backend_stress_keras(self, compute_unit, backend, rank_and_axes, epsilon): + rank, axes = rank_and_axes + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.layer_norm(x=x, axes=axes, epsilon=epsilon) + ] + + output = TestNormalizationLayerNorm._keras_layer_norm(x=x_val, axes=axes, epsilon=epsilon) + expected_output_types = [tuple(output.shape) + (types.fp32,)] + expected_outputs = [ + output + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize("rank_and_axes, epsilon", + itertools.product( + [ + [3, [0, 2]], + [3, [-2, -1]], + [4, [0, 1, 2, 3]], + [5, [0, 2, -1]], + [5, [-5, -4, -3, -2, -1]] + ], + [0.0001, 0.01], + ), + ) + def test_builder_eval_stress(self, rank_and_axes, epsilon): + rank, axes = rank_and_axes + shape = np.random.randint(low=2, high=6, size=rank) + x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0) + positive_axes = [axis+rank if axis < 0 else axis for axis in axes] + normalized_shape = [shape[i] for i in range(rank) if i in positive_axes] + gamma_val = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + beta_val = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100) + with Function({}): + res = mb.layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma_val, beta=beta_val) + ref = TestNormalizationLayerNorm._np_layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma_val, beta=beta_val) + np.testing.assert_allclose(ref, res.val, atol=1e-04, rtol=1e-05) + + +class TestNormalizationLocalResponseNorm: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.local_response_norm(x=x, size=2), + mb.local_response_norm(x=x, size=3, alpha=0.0001, beta=0.75, k=1.0), + ] + + expected_output_types = [(1, 3, 2, types.fp32), (1, 3, 2, types.fp32)] + expected_outputs = [ + np.array( + [ + [ + [0.99996257, -6.98716545], + [4.99531746, -5.99191284], + [-2.99898791, -4.99531746], + ] + ], + dtype=np.float32, + ), + np.array( + [ + [ + [0.99997497, -6.99143696], + [4.99687672, -5.99460602], + [-2.99932504, -4.99687672], + ] + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rank, size, alpha, beta, k", + itertools.product( + compute_units, + backends, + [rank for rank in range(3, 6)], + [2, 3, 5], + [0.0001, 0.01], + [0.75, 1.0], + [1.0, 2.0], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, size, alpha, beta, k + ): + shape = np.random.randint(low=2, high=5, size=rank) + x_val = random_gen(shape=shape) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.local_response_norm(x=x, size=size, alpha=alpha, beta=beta, k=k) + + torch_lrn = torch.nn.LocalResponseNorm(size=size, alpha=alpha, beta=beta, k=k) + expected_outputs = [torch_lrn(torch.as_tensor(x_val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-2, + rtol=1e-3, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_pool.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_pool.py new file mode 100644 index 00000000..a42f3fb3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_pool.py @@ -0,0 +1,494 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units + +from .testing_utils import run_compare_builder + + +class TestAvgPool: + @pytest.mark.parametrize( + "compute_unit, backend, inputshape_kernelshape", + itertools.product( + compute_units, + backends, + [ + [(1, 1, 2), (2,)], + [(1, 1, 2, 2), (2, 2)], + [(1, 1, 2, 2, 2), (2, 2, 2)], + ] + ), + ) + def test_avgpool_builder_to_backend_smoke_samelower_padtype( + self, compute_unit, backend, inputshape_kernelshape + ): + input_shape, kernel_shape = inputshape_kernelshape + rank = len(input_shape) - 2 + + if backend[0] == "neuralnetwork" and rank == 3: + pytest.skip( + "pad_type `same_lower` not supported for 3d pooling in neuralnetwork backend" + ) + if backend[0] == "mlprogram" and rank == 1: + pytest.xfail( + "rdar://98852008 (MIL backend producing wrong result for 1d pooling with pad_type " + "same_lower)" + ) + if backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + + x_val = np.arange(1, np.prod(input_shape) + 1).reshape(*input_shape).astype(np.float32) + + if rank == 1: + expected_output_val = [0.5, 1.5] + elif rank == 2: + expected_output_val = [0.25, 0.75, 1, 2.5] + else: + expected_output_val = [0.125, 0.375, 0.5, 1.25, 0.75, 1.75, 2, 4.5] + + expected_output_types = [input_shape + (types.fp32,)] + expected_outputs = [np.array(expected_output_val).reshape(*input_shape).astype(np.float32)] + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return mb.avg_pool( + x=x, + kernel_sizes=kernel_shape, + pad_type="same_lower", + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_dims", + itertools.product( + compute_units, + backends, + [1, 2, 3] + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, num_dims): + kernel_sizes = [1, 2, 3] + strides = [2, 1, 3] + + if num_dims == 1: + x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]], dtype=np.float32) + expected_output_types = [(1, 1, 4, types.fp32), (1, 1, 3, types.fp32)] + expected_outputs = [ + np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32), + np.array([[[1.5, 4.0, 6.5]]], dtype=np.float32), + ] + elif num_dims == 2: + x_val = np.array( + [ + [ + [[-10.80291205, -6.42076184], [-7.07910997, 9.1913279]], + [[-3.18181497, 0.9132147], [11.9785544, 7.92449539]], + ] + ], + dtype=np.float32, + ) + expected_output_types = [(1, 2, 1, 1, types.fp32), (1, 2, 2, 1, types.fp32)] + expected_outputs = [ + np.array([[[[-8.611837]], [[-1.1343001]]]], dtype=np.float32), + np.array( + [[[[-3.7778642], [1.056109]], [[4.4086123], [9.951525]]]], + dtype=np.float32, + ), + ] + else: # num_dims == 3 + x_val = np.array( + [ + [ + [ + [[-1, -5, -1], [-3, -3, 8], [2, 6, 2]], + [[-4, 7, -4], [4, 6, 7], [4, 4, 8]], + [[5, -3, 5], [0, -5, 8], [1, 7, 2]], + ] + ], + [ + [ + [[7, -3, -5], [5, 4, 7], [-2, -4, -3]], + [[-4, 3, -1], [6, -4, 4], [3, 6, 2]], + [[-1, 4, -4], [-2, -1, -2], [3, 2, 8]], + ] + ], + ], + dtype=np.float32, + ) + expected_output_types = [ + (2, 1, 2, 2, 1, types.fp32), + (2, 1, 2, 3, 1, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [[[[-0.8333334], [2.0]], [[1.6666667], [2.1666667]]]], + [[[[2.5], [1.1666667]], [[-1.0], [1.3333334]]]], + ], + dtype=np.float32, + ), + np.array( + [ + [ + [ + [[-0.8333334], [2.0], [3.3333335]], + [[1.6666667], [2.1666667], [3.3333335]], + ] + ], + [ + [ + [[2.5], [1.1666667], [-3.0]], + [[-1.0], [1.3333334], [4.3333335]], + ] + ], + ], + dtype=np.float32, + ), + ] + + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return [ + mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes[:num_dims], + strides=strides[:num_dims], + pad_type="valid", + ), + mb.avg_pool( + x=x, + kernel_sizes=kernel_sizes[-num_dims:], + strides=strides[-num_dims:], + pad_type="same", + exclude_padding_from_average=True, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestMaxPool: + + @pytest.mark.parametrize( + "compute_unit, backend, inputshape_kernelshape", + itertools.product( + compute_units, + backends, + [ + [(1, 1, 2), (2,)], + [(1, 1, 2, 2), (2, 2)], + [(1, 1, 2, 2, 2), (2, 2, 2)], + ] + ), + ) + def test_maxpool_builder_to_backend_smoke_samelower_padtype( + self, compute_unit, backend, inputshape_kernelshape + ): + input_shape, kernel_shape = inputshape_kernelshape + rank = len(input_shape) - 2 + + if backend[0] == "neuralnetwork" and rank == 3: + pytest.skip( + "pad_type `same_lower` not supported for 3d pooling in neuralnetwork backend" + ) + if backend[0] == "mlprogram" and rank == 1: + pytest.xfail( + "rdar://98852008 (MIL backend producing wrong result for 1d pooling with pad_type " + "same_lower)" + ) + if backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + + x_val = np.arange(1, np.prod(input_shape) + 1).reshape(*input_shape).astype(np.float32) + + if rank == 1: + expected_output_val = [1, 2] + elif rank == 2: + expected_output_val = [1, 2, 3, 4] + else: + expected_output_val = [1, 2, 3, 4, 5, 6, 7, 8] + + expected_output_types = [input_shape + (types.fp32,)] + expected_outputs = [np.array(expected_output_val).reshape(*input_shape).astype(np.float32)] + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return mb.max_pool( + x=x, + kernel_sizes=kernel_shape, + pad_type="same_lower", + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_dims", + itertools.product( + compute_units, + backends, + [1, 2, 3] + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, num_dims): + kernel_sizes = [1, 2, 3] + strides = [2, 1, 3] + + if num_dims == 1: + x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]], dtype=np.float32) + expected_output_types = [(1, 1, 4, types.fp32), (1, 1, 3, types.fp32)] + expected_outputs = [ + np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32), + np.array([[[2.0, 5.0, 7.0]]], dtype=np.float32), + ] + elif num_dims == 2: + x_val = np.array( + [ + [ + [[-10.80291205, -6.42076184], [-7.07910997, 9.1913279]], + [[-3.18181497, 0.9132147], [11.9785544, 7.92449539]], + ] + ], + dtype=np.float32, + ) + expected_output_types = [(1, 2, 1, 1, types.fp32), (1, 2, 2, 1, types.fp32)] + expected_outputs = [ + np.array([[[[-6.42076184]], [[0.9132147]]]], dtype=np.float32), + np.array( + [[[[9.191328], [9.191328]], [[11.978555], [11.978555]]]], + dtype=np.float32, + ), + ] + else: # num_dims == 3 + x_val = np.array( + [ + [ + [ + [[-1, -5, -1], [-3, -3, 8], [2, 6, 2]], + [[-4, 7, -4], [4, 6, 7], [4, 4, 8]], + [[5, -3, 5], [0, -5, 8], [1, 7, 2]], + ] + ], + [ + [ + [[7, -3, -5], [5, 4, 7], [-2, -4, -3]], + [[-4, 3, -1], [6, -4, 4], [3, 6, 2]], + [[-1, 4, -4], [-2, -1, -2], [3, 2, 8]], + ] + ], + ], + dtype=np.float32, + ) + expected_output_types = [ + (2, 1, 2, 2, 1, types.fp32), + (2, 1, 2, 3, 1, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [[[[8.0], [8.0]], [[8.0], [8.0]]]], + [[[[7.0], [7.0]], [[4.0], [8.0]]]], + ], + dtype=np.float32, + ), + np.array( + [ + [[[[8.0], [8.0], [6.0]], [[8.0], [8.0], [7.0]]]], + [[[[7.0], [7.0], [-2.0]], [[4.0], [8.0], [8.0]]]], + ], + dtype=np.float32, + ), + ] + + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return [ + mb.max_pool( + x=x, + kernel_sizes=kernel_sizes[:num_dims], + strides=strides[:num_dims], + pad_type="valid", + ), + mb.max_pool( + x=x, + kernel_sizes=kernel_sizes[-num_dims:], + strides=strides[-num_dims:], + pad_type="same", + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestL2Pool: + + @pytest.mark.parametrize( + "compute_unit, backend, inputshape_kernelshape", + itertools.product( + compute_units, + backends, + [ + [(1, 1, 2), (2,)], + [(1, 1, 2, 2), (2, 2)], + ] + ), + ) + def test_l2pool_builder_to_backend_smoke_samelower_padtype( + self, compute_unit, backend, inputshape_kernelshape + ): + input_shape, kernel_shape = inputshape_kernelshape + rank = len(input_shape) - 2 + + if backend[0] == "mlprogram" and rank == 1: + pytest.xfail( + "rdar://98852008 (MIL backend producing wrong result for 1d pooling with pad_type " + "same_lower)" + ) + if backend[0] == "mlprogram" and ct.utils._macos_version() < (13, 0): + pytest.skip("same_lower pad_type not supported in macOS12 or older.") + + minimum_deployment_target = ct.target.iOS16 if backend[0] == "mlprogram" else None + + x_val = np.arange(1, np.prod(input_shape) + 1).reshape(*input_shape).astype(np.float32) + + if rank == 1: + expected_output_val = [1, 2.236068] + else: + expected_output_val = [1, 2.236068, 3.162278, 5.477226] + + expected_output_types = [input_shape + (types.fp32,)] + expected_outputs = [np.array(expected_output_val).reshape(*input_shape).astype(np.float32)] + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return mb.l2_pool( + x=x, + kernel_sizes=kernel_shape, + pad_type="same_lower", + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=minimum_deployment_target, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, num_dims", + itertools.product(compute_units, backends, [1, 2]), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, num_dims): + kernel_sizes = [1, 2, 3] + strides = [2, 1, 3] + + if num_dims == 1: + x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]], dtype=np.float32) + expected_output_types = [(1, 1, 4, types.fp32), (1, 1, 3, types.fp32)] + expected_outputs = [ + np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32), + np.array([[[2.236068, 7.071068, 9.219544]]], dtype=np.float32), + ] + elif num_dims == 2: + x_val = np.array( + [[[[-10.0, -6.0], [-7.0, 9.0]], [[-3.0, 0.0], [11.0, 7.0]]]], + dtype=np.float32, + ) + expected_output_types = [(1, 2, 1, 1, types.fp32), (1, 2, 2, 1, types.fp32)] + expected_outputs = [ + np.array([[[[11.66190338]], [[3.0]]]], dtype=np.float32), + np.array( + [[[[16.309507], [11.401754]], [[13.379088], [13.038404]]]], + dtype=np.float32, + ), + ] + else: # num_dims == 3 + pass # Enum PoolingType3D has no value defined for name L2 + + input_values = {"x": x_val} + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + def build(x): + return [ + mb.l2_pool( + x=x, + kernel_sizes=kernel_sizes[:num_dims], + strides=strides[:num_dims], + pad_type="valid", + ), + mb.l2_pool( + x=x, + kernel_sizes=kernel_sizes[-num_dims:], + strides=strides[-num_dims:], + pad_type="same", + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_random.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_random.py new file mode 100644 index 00000000..abfb9dd3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_random.py @@ -0,0 +1,443 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import get_core_ml_prediction +from coremltools.models.utils import _macos_version + +from .testing_utils import UNK_SYM, run_compare_builder + + +class TestRandomBernoulli: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + + x_val = np.array([0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_bernoulli(shape=np.array([2, 1, 3], np.int32), prob=1.0), + mb.random_bernoulli(shape=np.array([3, 1, 2], np.int32), prob=0.0), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.array(np.ones(shape=(2, 1, 3)), np.float32), + np.array(np.zeros(shape=(3, 1, 2)), np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, prob, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [1.0, 0.0], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, prob, dynamic + ): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.array([0.0], dtype=np.float32) + if dynamic: + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "dyn_shape": mb.placeholder(shape=shape.shape, dtype=types.int32), + } + input_values = {"x": x_val, "dyn_shape": shape} + else: + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.add(x=x, y=x), mb.random_bernoulli(shape=shape, prob=prob)] + + def build_dyn(x, dyn_shape): + return [mb.add(x=x, y=x), mb.random_bernoulli(shape=dyn_shape, prob=prob)] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.random.binomial(1, prob, shape), + ] + + if dynamic: + expected_output_types = [ + tuple([UNK_SYM for _ in o.shape]) + (types.fp32,) + for o in expected_outputs + ] + else: + expected_output_types = [ + o.shape[:] + (types.fp32,) for o in expected_outputs + ] + + builder = build_dyn if dynamic else build + + run_compare_builder( + builder, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRandomCategorical: + def softmax(self, data): + e_data = np.exp(data - np.max(data)) + return e_data / e_data.sum() + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([1], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.random_categorical(x=x, seed=1), + mb.random_categorical(x=x, seed=1, size=4), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), dtype=np.float32), + np.array(np.zeros(shape=(4,)), dtype=np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(_macos_version() < (12, 0), reason="Can only get predictions for ml program on macOS 12+") + @pytest.mark.parametrize( + "compute_unit, backend, n_sample, n_class", + itertools.product( + compute_units, + backends, + [50000], + [2, 10, 20] + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, n_sample, n_class): + output_name = "random_categorical" + logits = np.random.rand(2, n_class) + probs = [self.softmax(logits[0]), self.softmax(logits[1])] + + # Test logits input + input_placeholders = {"x": mb.placeholder(shape=(2, n_class))} + input_values = {"x": logits} + + def build(x): + return [ + mb.random_categorical( + x=x, size=n_sample, mode="logits", name=output_name + ) + ] + + prediction = get_core_ml_prediction( + build, input_placeholders, input_values, backend=backend, compute_unit=compute_unit, + ) + + ref0 = np.random.multinomial(n_sample, probs[0]) + ref1 = np.random.multinomial(n_sample, probs[1]) + + pred0 = prediction[output_name].reshape(2, n_sample)[0] + pred1 = prediction[output_name].reshape(2, n_sample)[1] + + # convert to bincount and validate probabilities + pred0 = np.bincount(np.array(pred0).astype(np.int32), minlength=n_class) + pred1 = np.bincount(np.array(pred1).astype(np.int32), minlength=n_class) + + assert np.allclose(np.true_divide(pred0, n_sample), probs[0], atol=1e-2) + assert np.allclose( + np.true_divide(pred0, n_sample), + np.true_divide(ref0, n_sample), + atol=1e-2, + ) + + assert np.allclose(np.true_divide(pred1, n_sample), probs[1], atol=1e-2) + assert np.allclose( + np.true_divide(pred1, n_sample), + np.true_divide(ref1, n_sample), + atol=1e-2, + ) + + # Test probs input + input_placeholders = {"x": mb.placeholder(shape=(2, n_class))} + input_values = {"x": np.array(probs)} + + def build(x): + return [ + mb.random_categorical( + x=x, size=n_sample, mode="probs", name=output_name + ) + ] + + prediction = get_core_ml_prediction( + build, input_placeholders, input_values, backend=backend, compute_unit=compute_unit + ) + + pred0 = prediction[output_name].reshape(2, n_sample)[0] + pred1 = prediction[output_name].reshape(2, n_sample)[1] + + # convert to bincount and validate probabilities + pred0 = np.bincount(np.array(pred0).astype(np.int32), minlength=n_class) + pred1 = np.bincount(np.array(pred1).astype(np.int32), minlength=n_class) + + assert np.allclose(np.true_divide(pred0, n_sample), probs[0], atol=1e-2) + assert np.allclose( + np.true_divide(pred0, n_sample), + np.true_divide(ref0, n_sample), + atol=1e-2, + ) + + assert np.allclose(np.true_divide(pred1, n_sample), probs[1], atol=1e-2) + assert np.allclose( + np.true_divide(pred1, n_sample), + np.true_divide(ref1, n_sample), + atol=1e-2, + ) + + +class TestRandomNormal: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_normal( + shape=np.array([2, 1, 3], np.int32), mean=1.0, stddev=0.0 + ), + mb.random_normal( + shape=np.array([3, 1, 2], np.int32), mean=0.0, stddev=0.0 + ), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.array(np.ones(shape=(2, 1, 3)), np.float32), + np.array(np.zeros(shape=(3, 1, 2)), np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, mean, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [1.0, 0.0], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, mean, dynamic + ): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.array([0.0], dtype=np.float32) + if dynamic: + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "dyn_shape": mb.placeholder(shape=shape.shape, dtype=types.int32), + } + input_values = {"x": x_val, "dyn_shape": shape} + else: + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_normal(shape=shape, mean=mean, stddev=0.0), + ] + + def build_dyn(x, dyn_shape): + return [ + mb.add(x=x, y=x), + mb.random_normal(shape=dyn_shape, mean=mean, stddev=0.0), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.random.normal(loc=mean, scale=0.0, size=shape), + ] + + if dynamic: + expected_output_types = [ + tuple([UNK_SYM for _ in o.shape]) + (types.fp32,) + for o in expected_outputs + ] + else: + expected_output_types = [ + o.shape[:] + (types.fp32,) for o in expected_outputs + ] + + builder = build_dyn if dynamic else build + run_compare_builder( + builder, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRandomUniform: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([0.0], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_uniform( + shape=np.array([2, 1, 3], np.int32), low=0.0, high=0.0 + ), + mb.random_uniform( + shape=np.array([3, 1, 2], np.int32), low=1.0, high=1.0 + ), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.array(np.zeros(shape=(2, 1, 3)), np.float32), + np.array(np.ones(shape=(3, 1, 2)), np.float32), + ] + + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, low, high, dynamic", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [0.0], + [0.0], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank, low, high, dynamic + ): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.array([0.0], dtype=np.float32) + if dynamic: + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "dyn_shape": mb.placeholder(shape=shape.shape, dtype=types.int32), + } + input_values = {"x": x_val, "dyn_shape": shape} + else: + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.add(x=x, y=x), + mb.random_uniform(shape=shape, low=low, high=high), + ] + + def build_dyn(x, dyn_shape): + return [ + mb.add(x=x, y=x), + mb.random_uniform(shape=dyn_shape, low=low, high=high), + ] + + expected_outputs = [ + np.array(np.zeros(shape=(1,)), np.float32), + np.random.uniform(low=low, high=high, size=shape), + ] + + if dynamic: + expected_output_types = [ + tuple([UNK_SYM for _ in o.shape]) + (types.fp32,) + for o in expected_outputs + ] + else: + expected_output_types = [ + o.shape[:] + (types.fp32,) for o in expected_outputs + ] + + builder = build_dyn if dynamic else build + run_compare_builder( + builder, + input_placeholders, + input_values, + expected_output_types, + expected_outputs=expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_recurrent.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_recurrent.py new file mode 100644 index 00000000..43c44ead --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_recurrent.py @@ -0,0 +1,790 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units + +from .testing_utils import run_compare_builder + +if _HAS_TORCH: + import torch + + +class TestGRU: + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "direction", + "activation_functions", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [1, 3], + [1], # [MIL] GRU with batch size 1 produces incorrect + # output(always 0) for second batch onwards + [1, 2], + [1, 2], + [True, False], + [True, False], + ["forward", "reverse"], + [ + ["TANH", "SIGMOID"], + ["SIGMOID", "TANH"], + ], + [True, False], + ), + ) + def test_builder_to_backend_smoke( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + direction, + activation_functions, + symbolic, + ): + torch.manual_seed(5) + + R_z = 2 * np.random.rand(hidden_size, hidden_size) - 1 + R_r = 2 * np.random.rand(hidden_size, hidden_size) - 1 + R_o = 2 * np.random.rand(hidden_size, hidden_size) - 1 + W_z = 2 * np.random.rand(hidden_size, input_size) - 1 + W_r = 2 * np.random.rand(hidden_size, input_size) - 1 + W_o = 2 * np.random.rand(hidden_size, input_size) - 1 + b_z = 2 * np.random.rand(hidden_size) - 1 if has_bias else np.zeros((hidden_size)) + b_r = 2 * np.random.rand(hidden_size) - 1 if has_bias else np.zeros((hidden_size)) + b_o = 2 * np.random.rand(hidden_size) - 1 if has_bias else np.zeros((hidden_size)) + + def apply_act(x, option): + if option == 'TANH': + return np.tanh(x) + elif option == 'SIGMOID': + return 1. / (1 + np.exp(-x)) + else: + raise ValueError("activation invalid") + + def get_numpy_prediction_gru(X, H, return_seq, direction, + inner_activation_str='SIGMOID', + activation_str='TANH', + ): + """ + shape of X : (B, Seq, input_size) + + shape of H : (B, hidden_size) + + shape of return = (B, 1, hidden_size) if return_seq=False else (B, Seq, hidden_size) + """ + assert X.shape == (batch_size, seq_len, input_size) + assert H.shape == (batch_size, hidden_size) + out = [] + for i in range(batch_size): + numpy_input = X[i] + hidden_state = H[i] + out.append( + get_numpy_prediction_gru_single_batch( + numpy_input, + hidden_state, + return_seq, + direction, + inner_activation_str=inner_activation_str, + activation_str=activation_str, + ) + ) + output = np.stack(out, axis=0) + output = np.transpose(output, (1, 0, 2)) + return output, output[-1, :, :] + + def get_numpy_prediction_gru_single_batch(X, h, return_seq, direction, + inner_activation_str='SIGMOID', + activation_str='TANH'): + np_out = np.zeros((seq_len, hidden_size)) + batch_x = X if direction == "forward" else X[::-1, :] + for k in range(seq_len): + x = batch_x[k, :] + z = apply_act(np.dot(W_z, x) + np.dot(R_z, h) + b_z, inner_activation_str) + r = apply_act(np.dot(W_r, x) + np.dot(R_r, h) + b_r, inner_activation_str) + c = h * r + o = apply_act(np.dot(W_o, x) + np.dot(R_o, c) + b_o, activation_str) + h = (1 - z) * o + z * h + np_out[k, :] = h + + if return_seq: + np_out_final = np_out + else: + np_out_final = np_out[-1:, :] + + return np_out_final + + x = np.random.rand(batch_size, seq_len, input_size) + h = np.random.rand(batch_size, hidden_size) + + activation, inner_activation = activation_functions + output, state = get_numpy_prediction_gru( + x, h, output_sequence, direction, inner_activation, activation + ) + expected_outputs = [output, state] + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + hh_wt = np.concatenate([R_r, R_o, R_z], axis=0) + ih_wt = np.concatenate([W_r, W_o, W_z], axis=0) + b = np.concatenate([b_r, b_o, b_z], axis=0) + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, hidden_size] + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + } + + coreml_x = np.transpose(x, (1, 0, 2)) + input_values = {"x": coreml_x, "initial_h": h} + + expected_output_types = [ + (seq_len if output_sequence else 1, batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + ] + + def build(x, initial_h): + arguments = { + "x": x, + "initial_h": initial_h, + "weight_ih": ih_wt, + "weight_hh": hh_wt, + "direction": direction, + "output_sequence": output_sequence, + "activation": activation, + "recurrent_activation": inner_activation, + } + # If bias is provided, add in arguments + if has_bias: + arguments["bias"] = b + return mb.gru(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestLSTM: + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "input_dims", + "output_dim", + "activation", + "inner_activation", + "outer_activation", + "return_seq", + "has_bias", + "forget_bias", + "has_peephole", + "coupled_input_forget", + "clip", + ] + ), + itertools.product( + compute_units, + backends, + [[8, 32, 32]], + [1, 4], + ["SIGMOID"], + ["TANH"], + ["TANH", "SIGMOID"], + [False, True], + [False, True], + [False, True], + [True, False], + [False], # We have not exposed this option yet! + [50.0, 0.2, 0.01], + ), + ) + def test_numpy_numerical( + self, + compute_unit, + backend, + input_dims, + output_dim, + activation, + inner_activation, + outer_activation, + return_seq, + has_bias, + forget_bias, + has_peephole, + coupled_input_forget, + clip, + ): + def _apply_act(x, option): + if option == "TANH": + return np.tanh(x) + elif option == "RELU": + return np.maximum(0, x) + elif option == "SIGMOID": + return 1.0 / (1 + np.exp(-x)) + elif option == "SIGMOID_HARD": + return np.minimum(np.maximum(0.2 * x + 0.5, 0), 1) + elif option == "LINEAR": + return x + else: + raise ValueError("activation invalid") + + def _clip(x, threshold=500.0): + return np.maximum(np.minimum(x, threshold), -threshold) + + def _get_numpy_prediction_lstm(Weights, X): + # X : (batch, seq_len, channel) + batch, _, _ = X.shape + out = [] + for i in range(batch): + out.append( + _get_numpy_prediction_lstm_single_batch( + Weights, np.expand_dims(X[i, :, :], axis=0) + ) + ) + return np.stack(out, axis=0) + + def _get_numpy_prediction_lstm_single_batch(Weights, X): + + batch_size, seq_len, input_size = X.shape + X = X[0, :, :] + hidden_size = output_dim + + b = Weights["b"] + Wx_i, Wx_f, Wx_o, Wx_g = np.split(Weights["W_x"], 4) + Wh_i, Wh_f, Wh_o, Wh_g = np.split(Weights["W_h"], 4) + b_i, b_f, b_o, b_g = np.split(b, 4) + p_i, p_f, p_o = np.split(Weights["p"], 3) + + act1 = activation + act2 = inner_activation + act3 = outer_activation + + h = np.zeros((hidden_size)) + c = np.zeros((hidden_size)) + np_out = np.zeros((seq_len, hidden_size)) + for k in range(seq_len): + x = X[k, :] + i = _apply_act(np.dot(Wx_i, x) + np.dot(Wh_i, h) + b_i + c * p_i, act1) + f = _apply_act(np.dot(Wx_f, x) + np.dot(Wh_f, h) + b_f + c * p_f, act1) + g = _apply_act(np.dot(Wx_g, x) + np.dot(Wh_g, h) + b_g, act2) + if coupled_input_forget: + c = c * (1 - i) + i * g + else: + c = c * f + i * g + c = _clip(c, clip) + o = _apply_act(np.dot(Wx_o, x) + np.dot(Wh_o, h) + b_o + c * p_o, act1) + h = o * _apply_act(c, act3) + np_out[k, :] = h + + if return_seq: + np_out_final = np_out + else: + np_out_final = np_out[-1:, :] + return np_out_final + + batch = input_dims[0] + seq_len = input_dims[1] + input_size = input_dims[2] + hidden_size = output_dim + + # define random weights + W_x = np.random.rand(4 * hidden_size, input_size) + W_h = np.random.rand(4 * hidden_size, hidden_size) + + if has_bias: + b = np.random.rand(4 * hidden_size) - 0.5 + if forget_bias: + b = b + 1 + else: + b = np.zeros((4 * hidden_size)) + + if has_peephole: + p = np.random.rand(3 * hidden_size) - 0.5 + else: + p = np.zeros((3 * hidden_size)) + + Weights = {} + Weights["W_x"] = W_x + Weights["W_h"] = W_h + Weights["b"] = b + Weights["p"] = p + + input_data = np.random.rand(batch, seq_len, input_size) + numpy_preds = _get_numpy_prediction_lstm(Weights, input_data) + numpy_preds = np.transpose(numpy_preds, [1, 0, 2]) + + coreml_input_data = np.transpose(input_data, [1, 0, 2]) + input_placeholders = {"x": mb.placeholder(shape=coreml_input_data.shape)} + input_values = {"x": coreml_input_data} + + def build(x): + h_all, ht, ct = mb.lstm( + x=x, + initial_h=np.zeros((batch, hidden_size)).astype(np.float32), + initial_c=np.zeros((batch, hidden_size)).astype(np.float32), + weight_ih=W_x, + weight_hh=W_h, + peephole=p, + direction="forward", + bias=b, + output_sequence=return_seq, + recurrent_activation=activation, + cell_activation=inner_activation, + activation=outer_activation, + clip=clip, + ) + return h_all + + expected_output_types = ( + seq_len if return_seq else 1, + batch, + hidden_size, + types.fp32, + ) + expected_outputs = numpy_preds + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + atol=1e-3, + rtol=1e-3, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "direction", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [1, 8], + [1, 32], + [1, 64], + [1, 16], + [True, False], + [True, False], + ["forward", "reverse"], + [True, False], + ), + ) + def test_builder_to_backend_smoke_unilstm( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + direction, + symbolic, + ): + + torch.manual_seed(50) + rnn = torch.nn.LSTM(input_size, hidden_size, 1, bias=has_bias) + state_dict = rnn.state_dict() + + ih_wt = state_dict["weight_ih_l0"].detach().numpy() + hh_wt = state_dict["weight_hh_l0"].detach().numpy() + + # Make weight compatible to CoreML format + def ifzo_to_ifoz(x): + i, f, z, o = np.split(x, 4) + return np.concatenate([i, f, o, z], axis=0) + + w_x = ifzo_to_ifoz(ih_wt) + w_h = ifzo_to_ifoz(hh_wt) + + b = None + if has_bias: + ih_b = state_dict["bias_ih_l0"].detach().numpy() + hh_b = state_dict["bias_hh_l0"].detach().numpy() + ih_b = ifzo_to_ifoz(ih_b) + hh_b = ifzo_to_ifoz(hh_b) + b = ih_b + hh_b + + t = torch.randn(seq_len, batch_size, input_size) + h0 = torch.randn(1, batch_size, hidden_size) + c0 = torch.randn(1, batch_size, hidden_size) + + n_t = t + if direction == "reverse": + n_t = torch.flip(n_t, [0]) + + output, (hn, cn) = rnn(n_t, (h0, c0)) + if not output_sequence: + output = output[-1].unsqueeze(0) + + output = output.detach().numpy() + hn = hn.detach().numpy().squeeze(0) + cn = cn.detach().numpy().squeeze(0) + + t = np.reshape(t.detach().numpy(), [seq_len, batch_size, input_size]) + h = np.reshape(h0.detach().numpy().squeeze(0), [batch_size, hidden_size]) + c = np.reshape(c0.detach().numpy().squeeze(0), [batch_size, hidden_size]) + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, hidden_size] + c_shape = [batch_size, hidden_size] + + expected_output_types = [ + (seq_len if output_sequence else 1, batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + ] + expected_outputs = [output, hn, cn] + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + "initial_c": mb.placeholder(shape=c_shape), + } + input_values = {"x": t, "initial_h": h, "initial_c": c} + + def build(x, initial_h, initial_c): + arguments = { + "x": x, + "initial_h": initial_h, + "initial_c": initial_c, + "weight_ih": w_x, + "weight_hh": w_h, + "direction": direction, + "output_sequence": output_sequence, + } + # If bias is provided, add in arguments + if b is not None: + arguments["bias"] = b + return mb.lstm(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [1, 8], + [1, 32], + [1, 64], + [2, 16], + [True, False], + [True, False], + [True, False], + ), + ) + def test_builder_to_backend_smoke_bidirlstm( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + symbolic, + ): + def _pytorch_hidden_to_coreml(x): + x = x.detach().numpy() + # Split of Direction axis + f, b = np.split(x, 2, axis=0) + # Concat on Hidden Size axis + x = np.concatenate([f, b], axis=2) + x = np.squeeze(x, axis=0) + return x + + direction = "bidirectional" + torch.manual_seed(20) + rnn = torch.nn.LSTM( + input_size, hidden_size, 1, bidirectional=True, bias=has_bias + ) + state_dict = rnn.state_dict() + + ih_wt = state_dict["weight_ih_l0"].detach().numpy() + hh_wt = state_dict["weight_hh_l0"].detach().numpy() + ih_wt_r = state_dict["weight_ih_l0_reverse"].detach().numpy() + hh_wt_r = state_dict["weight_hh_l0_reverse"].detach().numpy() + + def ifzo_to_ifoz(x): + i, f, z, o = np.split(x, 4) + return np.concatenate([i, f, o, z], axis=0) + + wx = ifzo_to_ifoz(ih_wt) + wh = ifzo_to_ifoz(hh_wt) + r_wx = ifzo_to_ifoz(ih_wt_r) + r_wh = ifzo_to_ifoz(hh_wt_r) + + b, r_b = None, None + if has_bias: + ih_b = state_dict["bias_ih_l0"].detach().numpy() + hh_b = state_dict["bias_hh_l0"].detach().numpy() + r_ih_b = state_dict["bias_ih_l0_reverse"].detach().numpy() + r_hh_b = state_dict["bias_hh_l0_reverse"].detach().numpy() + # Convert forward bias into [4*H] + b = ih_b + hh_b + b = ifzo_to_ifoz(b) + # Convert reverse bias into [*H] + r_b = r_ih_b + r_hh_b + r_b = ifzo_to_ifoz(r_b) + + t = torch.randn(seq_len, batch_size, input_size) + h0 = torch.randn(2, batch_size, hidden_size) + c0 = torch.randn(2, batch_size, hidden_size) + + output, (hn, cn) = rnn(t, (h0, c0)) + if not output_sequence: + output_f = output[-1].unsqueeze(0)[:, :, :hidden_size] + output_r = output[0].unsqueeze(0)[:, :, hidden_size:] + output = torch.cat([output_f, output_r], dim=2) + + output = output.detach().numpy() + hn = _pytorch_hidden_to_coreml(hn) + cn = _pytorch_hidden_to_coreml(cn) + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, 2 * hidden_size] + c_shape = [batch_size, 2 * hidden_size] + + expected_output_types = [ + ( + seq_len if output_sequence else 1, + batch_size, + 2 * hidden_size, + types.fp32, + ), + (batch_size, 2 * hidden_size, types.fp32), + (batch_size, 2 * hidden_size, types.fp32), + ] + expected_outputs = [output, hn, cn] + + t = t.detach().numpy() + h = _pytorch_hidden_to_coreml(h0) + c = _pytorch_hidden_to_coreml(c0) + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + "initial_c": mb.placeholder(shape=c_shape), + } + input_values = {"x": t, "initial_h": h, "initial_c": c} + + def build(x, initial_h, initial_c): + arguments = { + "x": x, + "initial_h": initial_h, + "initial_c": initial_c, + "weight_ih": wx, + "weight_hh": wh, + "weight_ih_back": r_wx, + "weight_hh_back": r_wh, + "direction": direction, + "output_sequence": output_sequence, + } + # If bias is provided, add in arguments + if b is not None: + arguments["bias"] = b + arguments["bias_back"] = r_b + return mb.lstm(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestRNN: + @pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + argnames=[ + "compute_unit", + "backend", + "seq_len", + "batch_size", + "input_size", + "hidden_size", + "has_bias", + "output_sequence", + "direction", + "symbolic", + ], + argvalues=itertools.product( + compute_units, + backends, + [2, 8], + [1, 32], + [1, 64], + [1, 16], + [True, False], + [True, False], + ["forward", "reverse"], + [True, False], + ), + ) + def test_builder_to_backend_smoke( + self, + compute_unit, + backend, + seq_len, + batch_size, + input_size, + hidden_size, + has_bias, + output_sequence, + direction, + symbolic, + ): + torch.manual_seed(50) + rnn = torch.nn.RNN(input_size, hidden_size, 1, bias=has_bias) + state_dict = rnn.state_dict() + + ih_wt = state_dict["weight_ih_l0"].detach().numpy() + hh_wt = state_dict["weight_hh_l0"].detach().numpy() + + b = None + if has_bias: + ih_b = state_dict["bias_ih_l0"].detach().numpy() + hh_b = state_dict["bias_hh_l0"].detach().numpy() + b = ih_b + hh_b + + t = torch.randn(seq_len, batch_size, input_size) + h0 = torch.randn(1, batch_size, hidden_size) + + n_t = t + if direction == "reverse": + n_t = torch.flip(n_t, [0]) + + output, hn = rnn(n_t, h0) + if not output_sequence: + output = output[-1].unsqueeze(0) + + output = output.detach().numpy() + hn = hn.detach().numpy().squeeze(0) + + t = np.reshape(t.detach().numpy(), [seq_len, batch_size, input_size]) + h = np.reshape(h0.detach().numpy().squeeze(0), [batch_size, hidden_size]) + + if symbolic: + batch_size = get_new_symbol() + seq_len = get_new_symbol() + + input_shape = [seq_len, batch_size, input_size] + h_shape = [batch_size, hidden_size] + + expected_output_types = [ + (seq_len if output_sequence else 1, batch_size, hidden_size, types.fp32), + (batch_size, hidden_size, types.fp32), + ] + expected_outputs = [output, hn] + + input_placeholders = { + "x": mb.placeholder(shape=input_shape), + "initial_h": mb.placeholder(shape=h_shape), + } + input_values = {"x": t, "initial_h": h} + + def build(x, initial_h): + arguments = { + "x": x, + "initial_h": initial_h, + "weight_ih": ih_wt, + "weight_hh": hh_wt, + "direction": direction, + "output_sequence": output_sequence, + } + # If bias is provided, add in arguments + if b is not None: + arguments["bias"] = b + return mb.rnn(**arguments) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_reduction.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_reduction.py new file mode 100644 index 00000000..2a10db8a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_reduction.py @@ -0,0 +1,356 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import scipy + +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.mil.ops.tests.testing_utils import \ + run_compare_builder +from coremltools.converters.mil.testing_utils import random_gen, ssa_fn + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestReduction: + # All ops in this test share the same backends + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + [ + "argmax", + "argmin", + "l1_norm", + "l2_norm", + "log_sum", + "log_sum_exp", + "max", + "mean", + "min", + "prod", + "sum", + "sum_square", + ], + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, mode): + val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + if mode in {"argmax", "argmin"}: + expected_output_types = (2, types.int32) + else: + expected_output_types = (2, types.fp32) + + if mode == "argmax": + build = lambda x: mb.reduce_argmax(x=x, axis=1, keep_dims=False) + expected_outputs = np.array([2, 2], dtype=np.int32) + elif mode == "argmin": + build = lambda x: mb.reduce_argmin(x=x, axis=1, keep_dims=False) + expected_outputs = np.array([0, 0], dtype=np.int32) + elif mode == "l1_norm": + build = lambda x: mb.reduce_l1_norm(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([6.0, 15.0], dtype=np.float32) + elif mode == "l2_norm": + build = lambda x: mb.reduce_l2_norm(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([3.74165738, 8.77496438], dtype=np.float32) + elif mode == "log_sum": + build = lambda x: mb.reduce_log_sum(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([1.7917595, 2.70805025], dtype=np.float32) + elif mode == "log_sum_exp": + build = lambda x: mb.reduce_log_sum_exp(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([3.40760589, 6.40760612], dtype=np.float32) + elif mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([3.0, 6.0], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([2.0, 5.0], dtype=np.float32) + elif mode == "min": + build = lambda x: mb.reduce_min(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([1.0, 4.0], dtype=np.float32) + elif mode == "prod": + build = lambda x: mb.reduce_prod(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([6.0, 120.0], dtype=np.float32) + elif mode == "sum": + build = lambda x: mb.reduce_sum(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([6.0, 15.0], dtype=np.float32) + elif mode == "sum_square": + build = lambda x: mb.reduce_sum_square(x=x, axes=[1], keep_dims=False) + expected_outputs = np.array([14.0, 77.0], dtype=np.float32) + else: + raise NotImplementedError() + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + ["max", "mean"] + ), + ) + def test_builder_to_backend_global_pool_2d(self, compute_unit, backend, mode): + # test lowering to spatial reduction to global_pool path + val = np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + expected_output_types = (1, 1, 1, 1, types.fp32) + + if mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=[2, -1], keep_dims=True) + expected_outputs = np.array([[[[6.0]]]], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=[3, -2], keep_dims=True) + expected_outputs = np.array([[[[3.5]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + ["max", "mean"] + ), + ) + def test_builder_to_backend_global_pool_none(self, compute_unit, backend, mode): + # test lowering to spatial reduction to global_pool path for axis = None + val = np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + expected_output_types = (1, 1, 1, 1, types.fp32) + + if mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=None, keep_dims=True) + expected_outputs = np.array([[[[6.0]]]], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=None, keep_dims=True) + expected_outputs = np.array([[[[3.5]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, mode", + itertools.product( + compute_units, + backends, + ["max", "mean"] + ), + ) + def test_builder_to_backend_global_pool_3d(self, compute_unit, backend, mode): + # test lowering to spatial reduction to global_pool path + val = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + expected_output_types = (1, 1, 1, 1, 1, types.fp32) + + if mode == "max": + build = lambda x: mb.reduce_max(x=x, axes=[2, -1, 3], keep_dims=True) + expected_outputs = np.array([[[[[6.0]]]]], dtype=np.float32) + elif mode == "mean": + build = lambda x: mb.reduce_mean(x=x, axes=[-3, 3, 4], keep_dims=True) + expected_outputs = np.array([[[[[3.5]]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + ["axis", "keep_dims"], + itertools.product( + [1, -3], + [True, False] + ) + ) + def test_builder_eval(self, axis, keep_dims): + x_val = random_gen(shape=(1, 3, 4, 4), rand_min=-100.0, rand_max=100.0) + + @ssa_fn + def test_reduce_argmax(): + res = mb.reduce_argmax(x=x_val, axis=axis, keep_dims=keep_dims).val + ref = np.argmax(x_val, axis=axis) + if keep_dims: + ref = np.expand_dims(ref, axis=axis) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_argmin(): + res = mb.reduce_argmin(x=x_val, axis=axis, keep_dims=keep_dims).val + ref = np.argmin(x_val, axis=axis) + if keep_dims: + ref = np.expand_dims(ref, axis=axis) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_l1_norm(): + res = mb.reduce_l1_norm(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sum(np.abs(x_val), axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_l2_norm(): + res = mb.reduce_l2_norm(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sqrt(np.sum(np.square(x_val), axis=axis, keepdims=keep_dims)) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_log_sum(): + x_val = random_gen(shape=(1, 3, 4, 4), rand_min=0.0, rand_max=100.0) + res = mb.reduce_log_sum(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.log(np.sum(x_val, axis=axis, keepdims=keep_dims)) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_log_sum_exp(): + res = mb.reduce_log_sum_exp(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = scipy.special.logsumexp(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_max(): + res = mb.reduce_max(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.max(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_mean(): + res = mb.reduce_mean(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.mean(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_min(): + res = mb.reduce_min(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.min(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_prod(): + res = mb.reduce_prod(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.prod(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_sum(): + res = mb.reduce_sum(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sum(x_val, axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_reduce_sum_square(): + res = mb.reduce_sum_square(x=x_val, axes=[axis], keep_dims=keep_dims).val + ref = np.sum(np.square(x_val), axis=axis, keepdims=keep_dims) + np.testing.assert_allclose(ref, res, atol=1e-04, rtol=1e-05) + + test_reduce_argmax() + test_reduce_argmin() + test_reduce_l1_norm() + test_reduce_l2_norm() + test_reduce_log_sum() + test_reduce_log_sum_exp() + test_reduce_max() + test_reduce_mean() + test_reduce_min() + test_reduce_prod() + test_reduce_sum() + test_reduce_sum_square() + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} + input_values = {"x": val} + + def build(x): + return [ + mb.reduce_argmax(x=x, axis=1, keep_dims=True), + mb.reduce_argmin(x=x, axis=0, keep_dims=True), + ] + + expected_output_types = [(s0, 1, types.int32), (1, 3, types.int32)] + expected_outputs = [ + np.array([[2], [2]], dtype=np.int32), + np.array([[0, 0, 0]], dtype=np.int32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "input_size", [(1), (2), (1,2), (2,2), (2,3,4), (2,3,4,10)] + ) + def test_reduce_log_sum_exp_value_inference(self, input_size): + rs = np.random.RandomState(1234) + x = rs.random(input_size) + + for axis in range(-x.ndim, x.ndim - 1): + @mb.program(input_specs=[]) + def prog(): + return mb.reduce_log_sum_exp(x=x, axes=(axis,)) + + op = list(prog.functions.values())[0].operations[3] + assert op.op_type == 'reduce_log_sum_exp' + np.testing.assert_allclose( + op.value_inference(), + scipy.special.logsumexp(x, axis=axis), + atol=1e-04, + rtol=1e-05 + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py new file mode 100644 index 00000000..6829e9a8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_scatter_gather.py @@ -0,0 +1,750 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import run_compare_builder + +if _HAS_TF_2: + import tensorflow as tf + + +class TestScatter: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + updates = np.array([[5, 6, 7], [8, 9, 10]], dtype=np.float32) + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + def build(data, indices, updates): + return (mb.scatter(data=data, indices=indices, updates=updates),) + + expected_output_types = (2, 3, types.fp32) + + expected_outputs = np.array([[9, 11, 13], [9, 11, 13]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rankData_rankIndices, accumulate_mode", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (2, 1), + (3, 2), + (2, 3), + (2, 2), + (1, 1), + (3, 3), + (3, 3), + (3, 3), + (1, 3), + (3, 1), + (3, 1), + ], + ["update", "add", "sub", "mul", "div", "max", "min"], + ), + ) + def test_builder_to_backend_programmatic( + self, compute_unit, backend, rankData_rankIndices, accumulate_mode + ): + data_rank, indices_rank = rankData_rankIndices + data_shape = np.random.randint(low=2, high=5, size=data_rank) + indices_shape = np.random.randint(low=2, high=5, size=indices_rank) + updates_shape = list(indices_shape) + list(data_shape[1:]) + + data = np.random.rand(*data_shape).astype(np.float32) + updates = np.random.rand(*updates_shape).astype(np.float32) + indices = np.random.randint(0, data_shape[0], size=indices_shape).astype( + np.int32 + ) + + def build(data, indices, updates): + return mb.scatter( + data=data, indices=indices, updates=updates, mode=accumulate_mode + ) + + tf_output = tf.Variable(data) + if accumulate_mode == "update": + tf.compat.v1.scatter_update(tf_output, indices, updates) + if accumulate_mode == "add": + tf.compat.v1.scatter_add(tf_output, indices, updates) + if accumulate_mode == "sub": + tf.compat.v1.scatter_sub(tf_output, indices, updates) + if accumulate_mode == "mul": + tf.compat.v1.scatter_mul(tf_output, indices, updates) + if accumulate_mode == "div": + tf.compat.v1.scatter_div(tf_output, indices, updates) + if accumulate_mode == "max": + tf.compat.v1.scatter_max(tf_output, indices, updates) + if accumulate_mode == "min": + tf.compat.v1.scatter_min(tf_output, indices, updates) + expected_output = tf_output.numpy() + + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + expected_output_types = tuple(data_shape[:]) + (types.fp32,) + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestScatterAlongAxis: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [1, 1, 0]], dtype=np.int32) + updates = np.array([[5, 6, 7], [8, 9, 10]], dtype=np.float32) + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + def build(data, indices, updates): + return mb.scatter_along_axis( + data=data, indices=indices, updates=updates, axis=0, mode="update" + ) + + expected_output_types = (2, 3, types.fp32) + + expected_outputs = np.array([[1, 6, 10], [8, 9, 7]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [1, 1, 0]], dtype=np.int32) + updates = np.array([[5, 6, 7], [8, 9, 10]], dtype=np.float32) + v = mb.scatter_along_axis( + data=x, indices=indices, updates=updates, axis=0, mode="update" + ) + np.testing.assert_allclose(np.array([[1, 6, 10], [8, 9, 7]], dtype=np.float32), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_axis", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 5) for axis in range(-rank, rank)], + ), + ) + def test_builder_to_backend_programmatic(self, compute_unit, backend, rank_axis): + rank, axis = rank_axis + data_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(data_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + updates_shape = indices_shape + + data = np.random.rand(*data_shape).astype(np.float32) + updates = np.random.rand(*updates_shape).astype(np.float32) + indices = np.random.randint( + -data_shape[axis], data_shape[axis], size=indices_shape + ).astype(np.int32) + + def build(data, indices, updates): + return mb.scatter_along_axis( + data=data, indices=indices, updates=updates, axis=axis, mode="update" + ) + + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + expected_output_types = tuple(data_shape[:]) + (types.fp32,) + + np_output = np.copy(data) + np.put_along_axis(np_output, indices, updates, axis=axis) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + np_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestScatterNd: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0], [0, 2]], dtype=np.int32) + updates = np.array([5, 10], dtype=np.float32) + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + def build(data, indices, updates): + return (mb.scatter_nd(data=data, indices=indices, updates=updates),) + + expected_output_types = (2, 3, types.fp32) + + expected_outputs = np.array([[1, 2, 13], [9, 5, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, rankData_rankIndices, accumulate_mode", + itertools.product( + compute_units, + backends, + [ + (1, 2), + (2, 2), + (3, 2), + (2, 3), + (1, 4), + (5, 2), + (2, 5), + (4, 3), + (3, 4), + (2, 4), + (4, 2), + (1, 5), + ], + ["update", "add", "sub"], + ), + ) + def test_builder_to_backend_programmatic( + self, compute_unit, backend, rankData_rankIndices, accumulate_mode + ): + data_rank, indices_rank = rankData_rankIndices + data_shape = np.random.randint(low=2, high=5, size=data_rank) + indices_shape = np.random.randint(low=2, high=5, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=data_rank + 1) + updates_shape = list(indices_shape[:-1]) + list(data_shape[indices_shape[-1] :]) + + data = np.random.rand(*data_shape).astype(np.float32) + updates = np.random.rand(*updates_shape).astype(np.float32) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, data_shape[i], size=indices_shape[:-1]) + ) + + indices = np.stack(indices_list, axis=-1).astype(np.int32) + + def build(data, indices, updates): + return mb.scatter_nd( + data=data, indices=indices, updates=updates, mode=accumulate_mode + ) + + tf_output = tf.Variable(data) + if accumulate_mode == "update": + tf.compat.v1.scatter_nd_update(tf_output, indices, updates) + if accumulate_mode == "add": + tf.compat.v1.scatter_nd_add(tf_output, indices, updates) + if accumulate_mode == "sub": + tf.compat.v1.scatter_nd_sub(tf_output, indices, updates) + expected_output = tf_output.numpy() + + input_placeholders = { + "data": mb.placeholder(shape=data.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + "updates": mb.placeholder(shape=updates.shape), + } + + input_values = {"data": data, "indices": indices, "updates": updates} + + expected_output_types = tuple(data_shape[:]) + (types.fp32,) + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGather: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather(x=x, indices=indices, axis=0), + mb.gather(x=x, indices=indices, axis=1), + mb.gather(x=x, indices=indices, axis=-2), + mb.gather(x=x, indices=indices, axis=-1), + mb.gather(x=x, indices=indices), + # mb.gather(x=x, indices=1), #shape of scalar indices is incorrect. + # mb.gather(x=x, indices=1, axis=1), #Scalar index passes on axis=0 but fails on axis=1, + # Need to handle rank 0 correctly, rdar://73160449 + ] + + expected_output_types = [ + (2, 3, types.fp32), + (2, 2, types.fp32), + (2, 3, types.fp32), + (2, 2, types.fp32), + (2, 3, types.fp32), + # (3, types.fp32), + ] + + expected_outputs = [ + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + np.array([[2, 1], [5, 4]], dtype=np.float32), + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + np.array([[2, 1], [5, 4]], dtype=np.float32), + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + # np.array([4, 5, 6], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + if ct.utils._macos_version() < (13, 0): + pytest.skip("batch_dims not supported in macOS12 or older.") + + x = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.float32) + indices = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 0]]], dtype=np.int32) + + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather(x=x, indices=indices, axis=1, batch_dims=0), + mb.gather(x=x, indices=indices, axis=1, batch_dims=1), + mb.gather(x=x, indices=indices, axis=2, batch_dims=0), + mb.gather(x=x, indices=indices, axis=2, batch_dims=1), + mb.gather(x=x, indices=indices, axis=2, batch_dims=2), + ] + + expected_output_types = [ + (2, 2, 2, 2, 3, types.fp32), + (2, 2, 2, 3, types.fp32), + (2, 2, 2, 2, 2, types.fp32), + (2, 2, 2, 2, types.fp32), + (2, 2, 2, types.fp32), + ] + + expected_outputs = [ + np.array([[[[[ 4, 5, 6], + [ 1, 2, 3]], + [[ 1, 2, 3], + [ 4, 5, 6]]], + [[[ 4, 5, 6], + [ 1, 2, 3]], + [[ 1, 2, 3], + [ 1, 2, 3]]]], + [[[[10, 11, 12], + [ 7, 8, 9]], + [[ 7, 8, 9], + [10, 11, 12]]], + [[[10, 11, 12], + [ 7, 8, 9]], + [[ 7, 8, 9], + [ 7, 8, 9]]]]], dtype=np.float32 + ), + np.array([[[[ 4, 5, 6], + [ 1, 2, 3]], + [[ 1, 2, 3], + [ 4, 5, 6]]], + [[[10, 11, 12], + [ 7, 8, 9]], + [[ 7, 8, 9], + [ 7, 8, 9]]]], dtype=np.float32 + ), + np.array([[[[[ 2, 1], + [ 1, 2]], + [[ 2, 1], + [ 1, 1]]], + [[[ 5, 4], + [ 4, 5]], + [[ 5, 4], + [ 4, 4]]]], + [[[[ 8, 7], + [ 7, 8]], + [[ 8, 7], + [ 7, 7]]], + [[[11, 10], + [10, 11]], + [[11, 10], + [10, 10]]]]], dtype=np.float32 + ), + np.array([[[[ 2, 1], + [ 1, 2]], + [[ 5, 4], + [ 4, 5]]], + [[[ 8, 7], + [ 7, 7]], + [[11, 10], + [10, 10]]]], dtype=np.float32 + ), + np.array([[[ 2, 1], + [ 4, 5]], + [[ 8, 7], + [10, 10]]], dtype=np.float32 + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + + def test_builder_eval_iOS16(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, ), dtype=types.fp32)], opset_version=ct.target.iOS16) + def prog(x): + params = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.float32) + indices = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 0]]], dtype=np.int32) + res = mb.gather(x=params, indices=indices, axis=2, batch_dims=2) + return res + + main_func = prog.functions["main"] + gather_ops = main_func.find_ops(op_type="gather")[0] + + np.testing.assert_allclose( + np.array([[[ 2, 1], [ 4, 5]], [[ 8, 7], [10, 10]]], dtype=np.float32), + gather_ops.outputs[0].val, + atol=1e-04, + rtol=1e-05 + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_embedding_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + input_placeholders = { + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"indices": indices} + + def build(indices): + return [ + mb.gather(x=x, indices=indices, axis=0), + mb.gather(x=x, indices=indices, axis=-2), + ] + + expected_output_types = [ + (2, 3, types.fp32), + (2, 3, types.fp32), + ] + + expected_outputs = [ + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + np.array([[4, 5, 6], [1, 2, 3]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([1, 0], dtype=np.int32) + v = mb.gather(x=x, indices=indices, axis=-1) + np.testing.assert_allclose(np.array([[2, 1], [5, 4]], dtype=np.float32), v.val, atol=1e-04, rtol=1e-05) + + +class TestGatherAlongAxis: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [1, 1, 0]], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather_along_axis(x=x, indices=indices, axis=0), + mb.gather_along_axis(x=x, indices=indices, axis=1), + mb.gather_along_axis(x=x, indices=indices, axis=-2), + mb.gather_along_axis(x=x, indices=indices, axis=-1), + mb.gather_along_axis(x=x, indices=indices), + ] + + expected_output_types = [ + (2, 3, types.fp32), + (2, 3, types.fp32), + (2, 3, types.fp32), + (2, 3, types.fp32), + (2, 3, types.fp32), + ] + + expected_outputs = [ + np.array([[4, 2, 6], [4, 5, 3]], dtype=np.float32), + np.array([[2, 1, 2], [5, 5, 4]], dtype=np.float32), + np.array([[4, 2, 6], [4, 5, 3]], dtype=np.float32), + np.array([[2, 1, 2], [5, 5, 4]], dtype=np.float32), + np.array([[4, 2, 6], [4, 5, 3]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0, 1], [0, 0, 1]], dtype=np.int32) + v = mb.gather_along_axis(x=x, indices=indices, axis=0) + np.testing.assert_allclose(np.array([[4, 2, 6], [1, 2, 6]], dtype=np.float32), v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_axis", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 5) for axis in range(-rank, rank)], + ), + ) + def test_builder_to_backend_programmatic(self, compute_unit, backend, rank_axis): + if backend[0] == "mlprogram" and compute_unit != ct.ComputeUnit.CPU_ONLY: + pytest.xfail("rdar://97398875 (TestGatherAlongAxis failing on mlprgram + GPU)") + rank, axis = rank_axis + x_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(x_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + + x = np.random.rand(*x_shape).astype(np.float32) + indices = np.random.randint( + -x_shape[axis], x_shape[axis], size=indices_shape + ).astype(np.int32) + + def build(x, indices): + return mb.gather_along_axis(x=x, indices=indices, axis=axis) + + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + expected_output_types = tuple(indices_shape[:]) + (types.fp32,) + expected_output = np.take_along_axis(x, indices, axis=axis) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_output, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestGatherNd: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + indices = np.array([[1, 0], [0, 2]], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return (mb.gather_nd(x=x, indices=indices),) + + expected_output_types = (2, types.fp32) + expected_outputs = np.array([4, 3], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + frontend_only=False, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("batch_dims not supported in macOS12 or older.") + + x = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=np.float32) + indices = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 0]]], dtype=np.int32) + + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "indices": mb.placeholder(shape=indices.shape, dtype=types.int32), + } + + input_values = {"x": x, "indices": indices} + + def build(x, indices): + return [ + mb.gather_nd(x=x, indices=indices, batch_dims=0), + mb.gather_nd(x=x, indices=indices, batch_dims=1), + ] + + expected_output_types = [ + (2, 2, 3, types.fp32), + (2, 2, types.fp32) + ] + + expected_outputs = [ + np.array([[[7, 8, 9], + [4, 5, 6]], + [[7, 8, 9], + [1, 2, 3]]], dtype=np.float32 + ), + np.array([[ 4, 2], + [10, 7]], dtype=np.float32 + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_slice.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_slice.py new file mode 100644 index 00000000..a9fa669e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_slice.py @@ -0,0 +1,394 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import UNK_SYM, run_compare_builder + + +class TestSliceByIndex: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array(list(range(24))).reshape((2, 3, 4)).astype(np.float32) + begin_val = np.array([1, 1, 1], dtype=np.int32) + end_val = np.array([2, 3, 3], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "begin": mb.placeholder(shape=begin_val.shape, dtype=types.int32), + "end": mb.placeholder(shape=end_val.shape, dtype=types.int32), + } + input_values = {"x": x_val, "begin": begin_val, "end": end_val} + + def build(x, begin, end): + begin_c = mb.const(val=begin_val) + end_c = mb.const(val=end_val) + return [ + mb.slice_by_index(x=x, begin=begin, end=end), + mb.slice_by_index(x=x, begin=begin_c, end=end_c) + ] + + expected_output_types = [(UNK_SYM, UNK_SYM, UNK_SYM, types.fp32)] * 2 + expected_outputs = [np.array([[[17, 18], [21, 22]]], dtype=np.float32)] * 2 + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_type_inference(self): + s0 = get_new_symbol() + s1 = get_new_symbol() + s2 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(10, s0, s1, s2)), + } + + def build(x): + return [ + mb.slice_by_index( + x=x, begin=[2, 5, 6, 12], end=[6, 9, 20, -9], stride=[2, 1, 2, 1] + ), + mb.slice_by_index( + x=x, + begin=[-2, -5, -3, 9], + end=[-6, -9, -6, -7], + stride=[-2, -1, -2, 1], + ), + mb.slice_by_index( + x=x, + begin=[0, 0, 0, 0], + end=[-6, -9, 3, -2], + stride=[-2, -3, 1, 2], + begin_mask=[True, True, True, True], + end_mask=[False, False, False, False], + ), + mb.slice_by_index( + x=x, + begin=[-2, 5, -1, -7], + end=[0, 0, 0, 0], + stride=[-2, -3, 1, -2], + begin_mask=[False, False, False, False], + end_mask=[True, True, True, True], + ), + mb.slice_by_index( + x=x, begin=[4, -1, 0, -5], end=[4, -1, 0, -5], stride=[1, -1, 2, -2] + ), + ] + + expected_output_types = [ + (2, 4, 7, UNK_SYM, types.fp32), + (2, 4, 2, UNK_SYM, types.fp32), + (3, 3, 3, UNK_SYM, types.fp32), + (5, 2, 1, UNK_SYM, types.fp32), + (0, 0, 0, 0, types.fp32), + ] + + run_compare_builder( + build, + input_placeholders, + expected_output_types=expected_output_types, + frontend_only=True, + ) + + + @pytest.mark.xfail(reason="rdar://99664032") + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_single_element_edge_case(self, compute_unit, backend): + x_val = np.array(list(range(6))).reshape((1, 3, 2)).astype(np.float32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + } + input_values = {"x": x_val} + + def build(x): + return mb.slice_by_index( + x=x, + begin=[-1, 0, 0], + end=[-2, 0, 0], + stride=[-1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True] + ) + + expected_output_types = [(1, 3, 2, types.fp32)] + expected_outputs = [np.array([[[0, 1], [2, 3], [4, 5]]], dtype=np.float32)] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval_scalar_output_corner_cases(self): + x1 = np.array([2.]) + x2 = np.array([[[[1.],[3.]]]]) + v = [ + mb.slice_by_index( + x=x1, begin=[0,], end=[0], squeeze_mask=[True], + ), + mb.slice_by_index( + x=x2, begin=[0, 0, 0, 0], end=[0, 0, 0, 0], squeeze_mask=[True, True, True, True], + ), + ] + assert v[0].val.shape == () + assert v[0].val == 2 + assert v[1].val.shape == () + assert v[1].val == 1 + + @ssa_fn + def test_builder_eval(self): + x_val = np.array(list(range(24))).reshape((2, 3, 4)) + v = [ + mb.slice_by_index( + x=x_val, begin=[1, 1, 1], end=[2, 2, 2] + ), # x_val[1:2, 1:2, 1:2] + mb.slice_by_index( + x=x_val, begin=[1, 1, 1], end=[2, 3, 4], stride=[1, 1, 2] + ), # x_val[1:2, 1:3, 1:4:2] + mb.slice_by_index( + x=x_val, begin=[-3, -3, -3], end=[-1, -1, -1] + ), # x_val[-3:-1, -3:-1, -3:-1] + mb.slice_by_index( + x=x_val, begin=[0, 0, -3], end=[-1, -2, -2] + ), # x_val[0:-1, 0:-2, -3:-2] + mb.slice_by_index( + x=x_val, begin=[-1, -1, -1], end=[0, 1, -3], stride=[-2, -1, -3] + ), # x_val[-1:0:-2, -1:1:-1, -1:-3:-3] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 3, 4], + stride=[1, 1, 2], + begin_mask=[True, False, True], + ), # x_val[:2, 1:3, :4:2] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 3, 4], + stride=[1, 1, 2], + begin_mask=[True, False, True], + end_mask=[True, True, False], + ), # x_val[:, 1:, :4:2] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 3, 4], + stride=[1, 1, 2], + begin_mask=[False, False, True], + end_mask=[True, False, False], + squeeze_mask=[False, True, False], + ), # x_val[1::1, 1, :3:2] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[:, :, :] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 1], + end=[2, 2, 0], + stride=[1, 1, 1], + squeeze_mask=[False, False, True], + ), # x_val[1:2, 1:2, 1] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 0], + end=[2, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ), # x_val[1:2, ...] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[...] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 1], + end=[2, 0, 2], + stride=[1, 1, 1], + begin_mask=[False, True, False], + end_mask=[False, True, False], + ), # x_val[1:2, ..., 1:2] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 1], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[True, True, False], + end_mask=[True, True, False], + squeeze_mask=[False, False, True], + ), # x_val[..., 1] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, False, True], + end_mask=[False, False, True], + squeeze_mask=[True, True, False], + ), # x_val[0, 0, :] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 0], + end=[2, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + ), # x_val[1:2] + mb.slice_by_index( + x=x_val, + begin=[1, 1, 0], + end=[2, 2, 0], + stride=[1, 1, 1], + begin_mask=[False, False, True], + end_mask=[False, False, True], + ), # x_val[1:2, 1:2] + mb.slice_by_index( + x=x_val, + begin=[1, 0, 0], + end=[0, 0, 0], + stride=[1, 1, 1], + begin_mask=[False, True, True], + end_mask=[False, True, True], + squeeze_mask=[True, False, False], + ), # x_val[1] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[:] + mb.slice_by_index( + x=x_val, + begin=[0, 0, 0], + end=[0, 0, 0], + stride=[1, 1, -1], + begin_mask=[True, True, True], + end_mask=[True, True, True], + ), # x_val[..., ::-1] + ] + ans = [ + x_val[1:2, 1:2, 1:2], + x_val[1:2, 1:3, 1:4:2], + x_val[-3:-1, -3:-1, -3:-1], + x_val[0:-1, 0:-2, -3:-2], + x_val[-1:0:-2, -1:1:-1, -1:-3:-3], + x_val[:2, 1:3, :4:2], + x_val[:, 1:, :4:2], + x_val[1::1, 1, :3:2], + x_val[:, :, :], + x_val[1:2, 1:2, 1], + x_val[1:2, ...], + x_val[...], + x_val[1:2, ..., 1:2], + x_val[..., 1], + x_val[0, 0, :], + x_val[1:2], + x_val[1:2, 1:2], + x_val[1], + x_val[:], + x_val[..., ::-1], + ] + for idx in range(len(v)): + assert ans[idx].shape == v[idx].shape + np.testing.assert_allclose(ans[idx], v[idx].val, atol=1e-04, rtol=1e-05) + + + @staticmethod + def test_slice_by_index(): + INPUT_SHAPE = (1, 2, 8, 16) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + x = mb.slice_by_index( + x=x, + begin=[0, 0, 0, 0], + end=[1, 2, 8, 12], + stride=[1, 1, 2, 2], + begin_mask=None, + end_mask=None, + squeeze_mask=None, + ) + return x + + x = np.random.rand(*INPUT_SHAPE) + + # slice by index is x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...] + y_numpy = x[0:1:1, 0:2:1, 0:8:2, 0:12:2] + + model = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + y_neuralnetwork = list(model.predict({'x': x}).values())[0] + np.testing.assert_allclose(y_numpy, y_neuralnetwork) + + model = ct.convert(prog, source="milinternal", convert_to="mlprogram") + y_mlprogram = list(model.predict({'x': x}).values())[0] + # rdar://102217935 needs to be fixed before mlprogram will pass + # np.testing.assert_allclose(y_numpy, y_mlprogram) + + @staticmethod + def test_slice_by_index_slice_squeeze_separate(): + INPUT_SHAPE = (1, 2, 8, 16) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + x = mb.slice_by_index( + x=x, + begin=[0, 0, 0, 0], + end=[1, 2, 8, 12], + stride=[1, 1, 1, 2], + begin_mask=None, + end_mask=None, + squeeze_mask=[True, False, False, False], + ) + return x + + x = np.random.rand(*INPUT_SHAPE) + + # slice by index is x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...] + # and squeeze dim 0 + y_numpy = x[0:1:1, 0:2:1, 0:8:1, 0:12:2] + y_numpy = np.squeeze(y_numpy, axis=0) + + model = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + y_neuralnetwork = list(model.predict({'x': x}).values())[0] + + assert y_numpy.shape == y_neuralnetwork.shape + np.testing.assert_allclose(y_numpy, y_neuralnetwork) + + model = ct.convert(prog, source="milinternal", convert_to="mlprogram") + y_mlprogram = list(model.predict({'x': x}).values())[0] + # TODO: rdar://103365766 MLProgram does not apply squeeze_mask. + # np.testing.assert_allclose(y_numpy, y_mlprogram) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py new file mode 100644 index 00000000..dc1d8672 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_operation.py @@ -0,0 +1,1645 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import platform + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.testing_utils import (get_op_types_in_program, + random_gen, ssa_fn) +from coremltools.models.utils import _macos_version + +from .testing_utils import UNK_SYM, UNK_VARIADIC, run_compare_builder + +if _HAS_TF_2: + import tensorflow as tf + +backends = testing_reqs.backends +compute_units = testing_reqs.compute_units + + +class TestBandPart: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [3.0, 3.0, 5.0, 1.0], + [5.0, 6.0, 3.0, 8.0], + [7.0, 2.0, 7.0, 2.0], + [6.0, 7.0, 7.0, 1.0], + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.band_part(x=x), + mb.band_part(x=x, lower=0, upper=-1), + mb.band_part(x=x, lower=-1, upper=0), + mb.band_part(x=x, lower=0, upper=0), + ] + + expected_output_types = [ + (4, 4, types.fp32), + (4, 4, types.fp32), + (4, 4, types.fp32), + (4, 4, types.fp32), + ] + + expected_outputs = [ + np.array( + [ + [3.0, 3.0, 5.0, 1.0], + [5.0, 6.0, 3.0, 8.0], + [7.0, 2.0, 7.0, 2.0], + [6.0, 7.0, 7.0, 1.0], + ], + dtype=np.float32, + ), + np.array( + [ + [3.0, 3.0, 5.0, 1.0], + [0.0, 6.0, 3.0, 8.0], + [0.0, 0.0, 7.0, 2.0], + [0.0, 0.0, 0.0, 1.0], + ], + dtype=np.float32, + ), + np.array( + [ + [3.0, 0.0, 0.0, 0.0], + [5.0, 6.0, 0.0, 0.0], + [7.0, 2.0, 7.0, 0.0], + [6.0, 7.0, 7.0, 1.0], + ], + dtype=np.float32, + ), + np.array( + [ + [3.0, 0.0, 0.0, 0.0], + [0.0, 6.0, 0.0, 0.0], + [0.0, 0.0, 7.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestCumSum: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.cumsum(x=x, axis=0, reverse=True, exclusive=False) + + expected_output_types = (2, 3, types.fp32) + expected_outputs = np.array([[5, 7, 9], [4, 5, 6]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + v = mb.cumsum(x=x_val) + np.testing.assert_allclose(np.cumsum(x_val, axis=0), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_invalid_arg(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis=0, invalid_arg=3) + + @ssa_fn + def test_invalid_axis1(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis=-2) + + @ssa_fn + def test_invalid_axis2(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis=len(x_val.shape)) + + @ssa_fn + def test_invalid_axis3(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, axis="") + + @ssa_fn + def test_invalid_reverse1(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, reverse="") + + @ssa_fn + def test_invalid_reverse2(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, reverse=0) + + @ssa_fn + def test_invalid_reverse3(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, reverse=1) + + @ssa_fn + def test_invalid_exclusive1(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, exclusive="") + + @ssa_fn + def test_invalid_exclusive2(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, exclusive=0) + + @ssa_fn + def test_invalid_exclusive3(self): + x_val = random_gen(shape=(1, 2, 3, 4, 5), rand_min=-100, rand_max=100) + with pytest.raises(ValueError): + mb.cumsum(x=x_val, exclusive=1) + + @ssa_fn + def test_invalid_input1(self): + x_val = 1 + with pytest.raises(ValueError): + mb.cumsum(x=x_val) + + @ssa_fn + def test_invalid_input2(self): + x_val = ["1"] + with pytest.raises(ValueError): + mb.cumsum(x=x_val) + + +class TestFillLike: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.xfail("nn backend not supported") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("fill_like not supported in macOS12 or older.") + + shape = (2, 1, 3) + x_val = np.zeros(shape=shape, dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape, dtype=types.int32)} + + input_values = {"x": x_val} + + def build(x): + return mb.fill_like(ref_tensor=x, value=1.0) + + expected_output_types = [(2, 1, 3, types.fp32)] + expected_outputs = [np.full(shape=shape, fill_value=1.0)] + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + +class TestFill: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + shape = (2, 1, 3) + x_val = np.zeros(shape=shape, dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + + input_values = {"x": x_val} + + def build(x): + return mb.add(x=x, y=mb.fill(shape=shape, value=1.0)) + + expected_output_types = [(2, 1, 3, types.fp32)] + expected_outputs = [np.full(shape=shape, fill_value=1.0)] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + shape = np.random.randint(low=1, high=3, size=5).astype(np.int32) + res = mb.fill(shape=shape, value=1991.0).val + np.testing.assert_allclose(np.full(shape, fill_value=1991.0), res, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, value", + itertools.product( + compute_units, + backends, + [rank for rank in range(1, 6)], + [-1917.0, 0.0, 2048.0], + ), + ) + def test_builder_to_backend_stress(self, compute_unit, backend, rank, value): + shape = np.random.randint(low=1, high=4, size=rank).astype(np.int32) + x_val = np.zeros(shape=shape, dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return mb.add(x=x, y=mb.fill(shape=shape, value=value)) + + expected_outputs = [np.full(shape=shape, fill_value=value)] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s_len = get_new_symbol() + input_placeholders = { + "shape": mb.placeholder(shape=(s_len,), dtype=types.int32), + } + + def build(shape): + return [mb.fill(shape=shape)] + + expected_output_types = [(UNK_VARIADIC, types.fp32)] + expected_outputs = [np.zeros(shape=(2, 1, 3), dtype=np.float32)] + input_values = {"shape": np.array([2, 1, 3], dtype=np.float32)} + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +@pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND) +class TestNonMaximumSuppression: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + boxes_val = np.array( + [ + [ + [0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 1.0], + [2.0, 2.0, 2.0, 2.0], + [3.0, 3.0, 3.0, 3.0], + ] + ], + dtype=np.float32, + ) + scores_val = np.array([[[-3.5], [9.4], [2.3], [0.7]]], dtype=np.float32) + input_placeholders = { + "boxes": mb.placeholder(shape=(1, 4, 4)), + "scores": mb.placeholder(shape=(1, 4, 1)), + } + input_values = {"boxes": boxes_val, "scores": scores_val} + + expected_output_types = [ + (1, 2, 4, types.fp32), + (1, 2, 1, types.fp32), + (1, 2, types.int32), + (1, types.int32), + ] + expected_outputs = [ + np.array([[[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]]], dtype=np.float32), + np.array([[[9.4], [2.3]]], dtype=np.float32), + np.array([[1, 2]], dtype=np.int32), + np.array([2], dtype=np.int32), + ] + + def build(boxes, scores): + return mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + iou_threshold=0.2, + score_threshold=0.4, + max_boxes=2, + per_class_suppression=True, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @staticmethod + def _compute_iou_matrix(boxes): + # input is (N, 4), in order [center_w, center_h, width, height] + boxes = boxes.astype(np.float32) + center_w, center_h, width, height = np.split(boxes, 4, axis=1) + top = center_h + 0.5 * height + bottom = center_h - 0.5 * height + left = center_w - 0.5 * width + right = center_w + 0.5 * width + area = width * height + + h_b = np.minimum(top, np.transpose(top)) + w_b = np.minimum(right, np.transpose(right)) + h_a = np.maximum(bottom, np.transpose(bottom)) + w_a = np.maximum(left, np.transpose(left)) + + intersection_area = np.maximum(0, h_b - h_a) * np.maximum(0, w_b - w_a) + union_area = area + np.transpose(area) - intersection_area + return intersection_area / union_area + + @staticmethod + def _ref_non_maximum_suppression( + boxes, scores, iou_threshold, score_threshold, max_boxes, per_class_suppression + ): + """ + Reference implementation of Core ML's NMS op using TensorFlow. + boxes of shape (n_batch, n_box, 4), [center_w, center_h, width, height] + scores of shape (n_batch, n_box, n_score) + output shapes [ + (n_batch, max_boxes, 4), + (n_batch, max_boxes, n_score), + (n_batch, max_boxes), + (n_batch,) + ] + """ + n_batch, n_box, n_score = scores.shape + + iou_threshold = iou_threshold.astype(np.float32) + score_threshold = score_threshold.astype(np.float32) + + # convert box ids to TF style + center_w, center_h, width, height = np.split( + boxes, 4, axis=-1 + ) # (n_batch,n_box,1) + y1 = center_h - 0.5 * height + y2 = center_h + 0.5 * height + x1 = center_w - 0.5 * width + x2 = center_w + 0.5 * width + boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (n_batch,n_box,4) + + out1 = np.zeros((n_batch, max_boxes, 4)) + out2 = np.zeros((n_batch, max_boxes, n_score)) + out3 = -1 * np.ones((n_batch, max_boxes)) + out4 = np.zeros((n_batch,)) + + for b in range(n_batch): + box_coord_matrix = boxes_tf[b, :, :] # (n_box,4) + score_vector = np.max(scores[b, :, :], axis=-1) # (n_box,) + if not per_class_suppression: + # this is the simple case as TF directly supports it + ids_g = tf.image.non_max_suppression( + box_coord_matrix, + score_vector, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + ids = ids_g.numpy() + else: + # this is slightly complicated as TF does not directly support it + class_ids = np.argmax(scores[b, :, :], axis=-1) # (n_box,) + sorted_score_ids = np.argsort(-score_vector) + box_coord_matrix2 = np.take(box_coord_matrix, sorted_score_ids, axis=0) + score_vector2 = np.take(score_vector, sorted_score_ids) + class_ids = np.take(class_ids, sorted_score_ids) + classes_seen = dict() + ids_intermediate = np.array([], dtype=np.int32) + for n in range(n_box): + if class_ids[n] in classes_seen: + continue + c = class_ids[n] + classes_seen[c] = True + current_class_ids = np.where(class_ids == c)[0] + if len(current_class_ids) > 0: + feed_in1 = np.take(box_coord_matrix2, current_class_ids, axis=0) + feed_in2 = np.take(score_vector2, current_class_ids) + cur_ids_g = tf.image.non_max_suppression( + feed_in1, + feed_in2, + max_output_size=max_boxes, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + cur_ids = cur_ids_g.numpy() + + from_sort_ids = np.take(current_class_ids, cur_ids) + ids_intermediate = np.append(ids_intermediate, from_sort_ids) + ids_intermediate.sort() + ids = np.take(sorted_score_ids, ids_intermediate) + + xx = len(ids) + if xx == 0: + ids = np.array([np.argmax(score_vector)]) + xx = 1 + if xx > max_boxes: + ids = ids[:max_boxes] + xx = len(ids) + out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0) + out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0) + out3[b, :xx] = ids + out4[b] = xx + + return out1, out2, out3, out4 + + @pytest.mark.parametrize( + ",".join( + [ + "compute_unit", + "backend", + "iou_threshold_percentile", + "score_threshold_percentile", + "n_boxes", + "n_batch", + "n_score", + "per_class_suppression", + ] + ), + itertools.product( + compute_units, + backends, + [0, 30, 80, 100], + [0, 40, 100], + [(10, 7), (30, 37), (100, 64)], + [1], + [1, 4, 7], + [True, False], + ), + ) + def test_builder_to_backend_stress( + self, + compute_unit, + backend, + iou_threshold_percentile, + score_threshold_percentile, + n_boxes, + n_batch, + n_score, + per_class_suppression, + ): + if backend[0] == "mlprogram" and iou_threshold_percentile == 0: + pytest.xfail("rdar://78080118") + + if backend[0] == "neuralnetwork" and n_boxes == (10, 7) and platform.machine() == "x86_64": + pytest.xfail("rdar://78080118 (Investigate failing tests for NMS in coremltools)") + + if backend == ("mlprogram", "fp16"): + pytest.xfail("CPU: rdar://80662705 and GPU: rdar://80661262") + + n_boxes_in, n_boxes_out = n_boxes + boxes_val = random_gen((n_batch, n_boxes_in, 4), 0, 100) + scores_val = random_gen((n_batch, n_boxes_in, n_score), -100, 100) + + iou_matrix = self._compute_iou_matrix(boxes_val[0, :, :]) + iou_matrix = iou_matrix[~np.eye(iou_matrix.shape[0], dtype=bool)].reshape( + iou_matrix.shape[0], -1 + ) + + if score_threshold_percentile == 0: + score_threshold = np.min(scores_val) - 1 + elif score_threshold_percentile == 100: + score_threshold = np.max(scores_val) + 1 + else: + score_threshold = ( + np.percentile(scores_val, score_threshold_percentile) + 0.01 + ) + + if iou_threshold_percentile == 0: + iou_threshold = np.maximum(np.min(iou_matrix) - 0.01, 0.0) + else: + iou_threshold = np.percentile(iou_matrix, iou_threshold_percentile) + 0.01 + iou_threshold = np.maximum(iou_threshold, 1e-8) + + ( + tf_boxes, + tf_scores, + tf_indices, + tf_num_boxes, + ) = self._ref_non_maximum_suppression( + boxes_val, + scores_val, + iou_threshold, + score_threshold, + n_boxes_out, + per_class_suppression, + ) + expected_outputs = [tf_boxes, tf_scores, tf_indices, tf_num_boxes] + expected_output_types = [ + tf_boxes.shape[:] + (types.fp32,), + tf_scores.shape[:] + (types.fp32,), + tf_indices.shape[:] + (types.int32,), + tf_num_boxes.shape[:] + (types.int32,), + ] + + input_placeholders = { + "boxes": mb.placeholder(shape=(n_batch, n_boxes_in, 4)), + "scores": mb.placeholder(shape=(n_batch, n_boxes_in, n_score)), + } + input_values = {"boxes": boxes_val, "scores": scores_val} + + def build(boxes, scores): + return mb.non_maximum_suppression( + boxes=boxes, + scores=scores, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + max_boxes=n_boxes_out, + per_class_suppression=per_class_suppression, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestNonZero: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [mb.non_zero(x=x)] + + expected_output_types = [(UNK_SYM, 2, types.int32)] + expected_outputs = [np.array(np.transpose(np.nonzero(x_val)))] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.random.randint(low=-1, high=2, size=(6, 1, 7)) + res = mb.non_zero(x=x_val) + np.testing.assert_allclose(np.transpose(np.nonzero(x_val)), res.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_shape_inference_for_deterministic_input(self): + # If the input is compile time known, the builder should be able to infer the shape from value + x_val = np.array([[0, 2], [1, 1]]) + res = mb.non_zero(x=x_val) + assert res.shape == (3, 2) + +class TestOneHot: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([1, 0], dtype=np.int32) + depth = 4 + + input_placeholders = { + "x": mb.placeholder(shape=x.shape, dtype=types.int32), + "y": mb.placeholder(shape=(1,), dtype=types.int32), + } + + input_values = {"x": x, "y": depth} + + def build(x, y): + return [ + mb.one_hot(indices=x, one_hot_vector_size=4), + mb.one_hot(indices=x, one_hot_vector_size=4, axis=0), + mb.one_hot( + indices=x, one_hot_vector_size=4, on_value=1.0, off_value=0.1 + ), + mb.one_hot( + indices=x, one_hot_vector_size=mb.squeeze(x=y), on_value=1, off_value=9 + ), + ] + + expected_output_types = [ + (2, 4, types.int32), + (4, 2, types.int32), + (2, 4, types.fp32), + (2, UNK_SYM, types.int32), + ] + + expected_outputs = [ + np.array([[0, 1, 0, 0], [1, 0, 0, 0]], dtype=np.float32), + np.array([[0, 1], [1, 0], [0, 0], [0, 0]], dtype=np.float32), + np.array([[0.1, 1, 0.1, 0.1], [1, 0.1, 0.1, 0.1]], dtype=np.float32), + np.array([[9, 1, 9, 9], [1, 9, 9, 9]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPad: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + def test_constant_mode(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="constant", constant_val=0.0) + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 2.0, 3.0, 0.0, 0.0], + [0.0, 0.0, 4.0, 5.0, 6.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_constant_mode_constant_val(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="constant", constant_val=0.5) + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + [0.5, 0.5, 1.0, 2.0, 3.0, 0.5, 0.5], + [0.5, 0.5, 4.0, 5.0, 6.0, 0.5, 0.5], + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_reflect_mode(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="reflect") + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_replicate_mode(): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + pad = np.array([1, 1, 2, 2], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad(x=x, pad=pad, mode="replicate") + + expected_output_types = (4, 7, types.fp32) + expected_outputs = np.array( + [ + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + ], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + def test_constant_general(): + t = np.arange(12, dtype=np.float32).reshape([2, 2, 3]) + pad = np.array([[1, 1], [2, 2], [1, 1]], dtype=np.int32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return mb.pad( + x=x, pad=pad.reshape(-1), mode="constant", constant_val=0.0 + ) + + expected_output_types = (4, 6, 5, types.fp32) + expected_outputs = np.pad(t, pad, mode="constant") + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # Test different modes + test_constant_mode() + test_constant_mode_constant_val() + test_reflect_mode() + test_replicate_mode() + test_constant_general() + + @ssa_fn + def test_builder_eval(self): + def test_constant_mode(): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.pad( + x=x_val, + pad=np.array([1, 1, 2, 2], dtype=np.int32), + mode="constant", + constant_val=0.0, + ) + expected_outputs = np.array( + [ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 2.0, 3.0, 0.0, 0.0], + [0.0, 0.0, 4.0, 5.0, 6.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_reflect_mode(): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.pad( + x=x_val, pad=np.array([1, 1, 2, 2], dtype=np.int32), mode="reflect" + ) + expected_outputs = np.array( + [ + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0], + [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0], + ], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_replicate_mode(): + x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.pad( + x=x_val, pad=np.array([1, 1, 2, 2], dtype=np.int32), mode="replicate" + ) + expected_outputs = np.array( + [ + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + [4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0], + ], + dtype=np.float32, + ) + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + def test_constant_general(): + x_val = np.arange(12, dtype=np.float32).reshape([2, 2, 3]) + pad = np.array([[1, 1], [2, 2], [1, 1]], dtype=np.int32) + v = mb.pad(x=x_val, pad=pad.reshape(-1), mode="constant", constant_val=0.0) + expected_outputs = np.pad(x_val, pad, mode="constant") + np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05) + + # Test different modes + test_constant_mode() + test_reflect_mode() + test_replicate_mode() + test_constant_general() + + +class TestRange1d: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = 15.0 + y = 5.0 + z = 2.0 + # Model inputs must have rank at least 1 + input_placeholders = { + "x": mb.placeholder(shape=(1,)), + "y": mb.placeholder(shape=(1,)), + "z": mb.placeholder(shape=(1,)), + } + input_values = {"x": x, "y": y, "z": z} + + def build(x, y, z): + return [ + mb.range_1d(start=mb.squeeze(x=y), end=15.0, step=2.0), + mb.range_1d(start=mb.squeeze(x=y), end=15.0, step=mb.squeeze(x=z)), + mb.range_1d(start=mb.squeeze(x=y), end=mb.squeeze(x=x), step=2.0), + mb.range_1d(start=mb.squeeze(x=y), end=mb.squeeze(x=x), step=mb.squeeze(x=z)), + mb.range_1d(start=5.0, end=15.0, step=mb.squeeze(x=z)), + mb.range_1d(start=5.0, end=mb.squeeze(x=x), step=2.0), + mb.range_1d(start=5.0, end=mb.squeeze(x=x), step=mb.squeeze(x=z)), + ] + + expected_output_types = [ + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + (UNK_SYM, types.fp32), + ] + + expected_outputs = [ + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + np.array([5, 7, 9, 11, 13], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_large_array(self, compute_unit, backend): + input_placeholders = { + "x": mb.placeholder(shape=(1,)), # dummpy input + } + input_values = {"x": 0.5} + + def build(x): + return [mb.range_1d(start=0.0, end=2000000.0, step=1.0)] + + expected_output_types = [ + (2000000, types.fp32) + ] + + expected_outputs = [ + np.arange(0.0, 2000000.0, 1.0), + ] + + mlmodel = run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + # verify that the range_1d op is not const folded + prog = mlmodel._mil_program + ops = get_op_types_in_program(prog) + assert ops == ["range_1d", "identity"] + + @ssa_fn + def test_builder_eval(self): + v = mb.range_1d(start=5, end=15, step=2) + np.testing.assert_allclose(np.arange(5, 15, 2), v.val, atol=1e-04, rtol=1e-05) + + +class TestTile: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + + input_values = {"x": x} + + def build(x): + return [ + mb.tile(x=x, reps=(1, 1)), + mb.tile(x=x, reps=(2, 1)), + ] + + expected_output_types = [ + (2, 3, types.fp32), + (4, 3, types.fp32), + ] + + expected_outputs = [ + x, + np.array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.tile(x=x, reps=(1, 2)) + np.testing.assert_allclose(np.tile(x, reps=(1, 2)), v.val, atol=1e-04, rtol=1e-05) + + +class TestDynamicTile: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + rep1 = np.array([1, 1]).astype(np.int32) + rep2 = np.array([2, 1]).astype(np.int32) + rep3 = np.array([2, 3]).astype(np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x.shape), + "reps1": mb.placeholder(shape=rep1.shape, dtype=types.int32), + "reps2": mb.placeholder(shape=rep2.shape, dtype=types.int32), + "reps3": mb.placeholder(shape=rep3.shape, dtype=types.int32), + } + + input_values = {"x": x, "reps1": rep1, "reps2": rep2, "reps3": rep3} + + def build(x, reps1, reps2, reps3): + return [ + mb.tile(x=x, reps=reps1), + mb.tile(x=x, reps=reps2), + mb.tile(x=x, reps=reps3), + ] + + expected_output_types = [ + (UNK_SYM, UNK_SYM, types.fp32), + (UNK_SYM, UNK_SYM, types.fp32), + (UNK_SYM, UNK_SYM, types.fp32), + ] + + expected_outputs = [ + x, + np.array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6]], dtype=np.float32), + np.array( + [ + [1, 2, 3, 1, 2, 3, 1, 2, 3], + [4, 5, 6, 4, 5, 6, 4, 5, 6], + [1, 2, 3, 1, 2, 3, 1, 2, 3], + [4, 5, 6, 4, 5, 6, 4, 5, 6], + ], + dtype=np.float32, + ), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestTopK: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return mb.topk(x=x, k=2, axis=1) + + expected_output_types = [ + (2, 2, types.fp32), + (2, 2, types.int32), + ] + expected_outputs = [ + np.array([[2.0, -1.0], [6.0, 4.0]], dtype=np.float32), + np.array([[1, 0], [2, 0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, return_indices, sort", + itertools.product( + compute_units, + backends, + [True, False], + [True, False], + ) + ) + def test_builder_to_backend_smoke_iOS16(self, compute_unit, backend, return_indices, sort): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + if _macos_version() < (13, 0): + pytest.skip("New functionality in macOS13/iOS16") + + if not return_indices: + pytest.xfail( + "rdar://92880117 (Topk with return_indices = False error out at the MIL->EIR stage)" + ) + + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return mb.topk(x=x, k=2, axis=1, return_indices=return_indices, sort=sort) + + expected_output_types = [ + (2, 2, types.fp32), + (2, 2, types.int32), + ] + expected_outputs = [ + np.array([[2.0, -1.0], [6.0, 4.0]], dtype=np.float32), + np.array([[1, 0], [2, 0]], dtype=np.float32), + ] + + if not return_indices: + expected_output_types = expected_output_types[:1] + expected_outputs = expected_outputs[:1] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @ssa_fn + def test_builder_eval(self): + def np_topk(x, k, axis, ascending=False): + indices = np.argsort(x, axis=axis) + if not ascending: + indices = np.argsort(-x, axis=axis) + slc = [slice(None)] * len(x.shape) + slc[axis] = slice(0, k) + indices = indices[tuple(slc)] + values = np.take_along_axis(x, indices, axis=axis) + return values, indices + + val = np.array([[-1.0, 7.0, -3.0], [4.0, -5.0, 8.0]], dtype=np.float32) + res_values, res_indices = mb.topk(x=val, k=1, axis=0) + ref_values, ref_indices = np_topk(x=val, k=1, axis=0) + np.testing.assert_allclose(ref_values, res_values.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(ref_indices, res_indices.val, atol=1e-04, rtol=1e-05) + res_values, res_indices = mb.topk(x=val, k=2, axis=-1, ascending=True) + ref_values, ref_indices = np_topk(x=val, k=2, axis=-1, ascending=True) + np.testing.assert_allclose(ref_values, res_values.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(ref_indices, res_indices.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + val = np.array([[1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} + input_values = {"x": val} + + def build(x): + return mb.topk(x=x, k=2, axis=-1, ascending=True) + + expected_output_types = [ + (s0, 2, types.fp32), + (s0, 2, types.int32), + ] + expected_outputs = [ + np.array([[-3.0, 1.0], [-5.0, 4.0]], dtype=np.float32), + np.array([[2, 0], [1, 0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestFlatten2d: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array( + [[[1, 2, 3], [4, 5, 6]], [[-1, -2, -3], [-4, -5, -6]]], dtype=np.float32 + ) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [mb.flatten2d(x=x)] + + expected_output_types = [ + (2, 6, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2, 3, 4, 5, 6], [-1, -2, -3, -4, -5, -6]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, rank, axis, backend", + itertools.product( + compute_units, + range(1, 6), + range(-5, 6), + backends, + ), + ) + def test_builder_to_backend_stress(self, compute_unit, rank, axis, backend): + if axis < -rank or axis >= rank + 1: + return + + shape = np.random.randint(low=2, high=6, size=rank) + t = np.random.random(shape) + + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [mb.flatten2d(x=x, axis=axis)] + + np_axis = axis + rank if axis < 0 else axis + pl, pr = 1, 1 + for i in range(0, np_axis): + pl *= shape[i] + for i in range(np_axis, len(shape)): + pr *= shape[i] + + new_shape = [pl, pr] + ref = t.reshape(new_shape) + + expected_outputs = [ref] + expected_output_types = [ + tuple(list(ref.shape) + [types.fp32]), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + f = mb.flatten2d(x=t) + expected_f = np.array([[1, 2, 3, 4, 5, 6]], dtype=np.float32) + np.testing.assert_allclose(expected_f, f.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(s0, 4, 5, 6)), + } + + def build(x): + return [mb.flatten2d(x=x)] + + input = np.random.rand(10, 4, 5, 6) + output = input.reshape(10, -1) + + expected_output_types = (s0, 120, types.fp32) + expected_outputs = [output] + + input_values = {"x": input} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestShape: + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int32", "float32"] + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, input_type): + np_type = np.int32 if input_type == "int32" else np.float32 + mb_type = types.int32 if input_type == "int32" else types.fp32 + + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np_type) + input_placeholders = {"x": mb.placeholder(shape=t.shape, dtype=mb_type)} + input_values = {"x": t} + + def build(x): + return mb.shape(x=x) + + expected_output_types = (2, types.int32) + expected_outputs = [ + np.array([2, 3], dtype=np.int32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + f = mb.shape(x=t) + expected_f = np.array([1, 2, 3], dtype=np.float32) + np.testing.assert_allclose(expected_f, f.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int32", "float32"] + ) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend, input_type): + np_type = np.int32 if input_type == "int32" else np.float32 + mb_type = types.int32 if input_type == "int32" else types.fp32 + + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(s0, 4, 5, 6), dtype=mb_type), + } + + def build(x): + return [mb.shape(x=x)] + + input = np.random.rand(10, 4, 5, 6) + input = input.astype(np_type) + output = np.array([10, 4, 5, 6], dtype=np.int32) + + expected_output_types = (4, types.int32) + expected_outputs = [output] + + input_values = {"x": input} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestIdentity: + @pytest.mark.parametrize( + "compute_unit, backend, input_type", + itertools.product( + compute_units, + backends, + ["int32", "float32"] + ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, input_type): + np_type = np.int32 if input_type == "int32" else np.float32 + mb_type = types.int32 if input_type == "int32" else types.fp32 + + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np_type) + input_placeholders = {"x": mb.placeholder(shape=t.shape, dtype=mb_type)} + input_values = {"x": t} + + def build(x): + return mb.identity(x=x) + + expected_output_types = [(2, 3, mb_type)] + expected_outputs = [ + np.array([[1, 2, 3], [4, 5, 6]], dtype=np_type), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + f = mb.identity(x=t) + expected_f = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32) + np.testing.assert_allclose(expected_f, f.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + input_placeholders = { + "x": mb.placeholder(shape=(10, 4, 5, 6)), + } + + def build(x): + return [mb.identity(x=x)] + + input = np.random.rand(10, 4, 5, 6) + output = input + + expected_output_types = [(10, 4, 5, 6, types.fp32)] + expected_outputs = [output] + + input_values = {"x": input} + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestArgSort: + @pytest.mark.parametrize( + "compute_unit, backend", + itertools.product( + compute_units, + backends, + ), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.argsort(x=x), mb.argsort(x=x, axis=0, ascending=True)] + + expected_output_types = [ + (2, 3, types.int32), + (2, 3, types.int32), + ] + expected_outputs = [ + np.array([[1, 0, 2], [2, 0, 1]], dtype=np.int32), + np.array([[0, 1, 0], [1, 0, 1]], dtype=np.int32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = random_gen(shape=(1, 3, 2, 2), rand_min=-100, rand_max=100) + res = mb.argsort(x=x_val, axis=-3) + # The default np argsort mode is ascending, which is opposite to MIL's argsort op. + np.testing.assert_allclose(np.argsort(-x_val, axis=-3), res.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py new file mode 100644 index 00000000..11106529 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_tensor_transformation.py @@ -0,0 +1,1347 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND +from coremltools.converters.mil import testing_reqs +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol, types +from coremltools.converters.mil.mil.types import nptype_from_builtin +from coremltools.converters.mil.testing_reqs import backends, compute_units +from coremltools.converters.mil.testing_utils import ssa_fn + +from .testing_utils import UNK_SYM, UNK_VARIADIC, run_compare_builder + +if _HAS_TORCH: + import torch + + +class TestDepthToSpace: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 4, 1, 1, fp32) + val = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.depth_to_space(x=x, block_size=2)] + + expected_output_types = (1, 1, 2, 2, types.fp32) + expected_outputs = np.array([[[[9.0, 5.0], [1.0, 3.0]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSpaceToBatch: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (2, 1, 2, 4, fp32) + val = np.array([[[[ 1, 2, 3, 4], + [ 5, 6, 7, 8]]], + [[[ 9, 10, 11, 12], + [13, 14, 15, 16]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.space_to_batch(x=x, block_shape=[2, 2], paddings=[[0, 0], [2, 0]])] + + expected_output_types = (8, 1, 1, 3, types.fp32) + expected_outputs = np.array([[[[ 0, 1, 3]]], + [[[ 0, 9, 11]]], + [[[ 0, 2, 4]]], + [[[ 0, 10, 12]]], + [[[ 0, 5, 7]]], + [[[ 0, 13, 15]]], + [[[ 0, 6, 8]]], + [[[ 0, 14, 16]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestBatchToSpace: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (8, 1, 1, 3, fp32) + val = np.array([[[[ 0, 1, 3]]], + [[[ 0, 9, 11]]], + [[[ 0, 2, 4]]], + [[[ 0, 10, 12]]], + [[[ 0, 5, 7]]], + [[[ 0, 13, 15]]], + [[[ 0, 6, 8]]], + [[[ 0, 14, 16]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.batch_to_space(x=x, block_shape=[2, 2], crops=[[0, 0], [2, 0]])] + + expected_output_types = (2, 1, 2, 4, types.fp32) + expected_outputs = np.array([[[[ 1, 2, 3, 4], + [ 5, 6, 7, 8]]], + [[[ 9, 10, 11, 12], + [13, 14, 15, 16]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestExpandDims: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [ + mb.expand_dims(x=x, axes=[0]), + mb.expand_dims(x=x, axes=[1]), + mb.expand_dims(x=x, axes=[2]), + mb.expand_dims(x=x, axes=[-1]), + mb.expand_dims(x=x, axes=[0, 1]), + mb.expand_dims(x=x, axes=[-2, -1]), + ] + + expected_output_types = [ + (1, 2, 3, types.fp32), + (2, 1, 3, types.fp32), + (2, 3, 1, types.fp32), + (2, 3, 1, types.fp32), + (1, 1, 2, 3, types.fp32), + (2, 3, 1, 1, types.fp32), + ] + expected_outputs = [ + np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + np.array([[[1], [2], [3]], [[4], [5], [6]]], dtype=np.float32), + np.array([[[1], [2], [3]], [[4], [5], [6]]], dtype=np.float32), + np.array([[[[1, 2, 3], [4, 5, 6]]]], dtype=np.float32), + np.array([[[[1]], [[2]], [[3]]], [[[4]], [[5]], [[6]]]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(2, s0)), + } + + def build(x): + return [ + mb.expand_dims(x=x, axes=[-1]), + mb.expand_dims(x=x, axes=[1]), + ] + + expected_output_types = [ + (2, s0, 1, types.fp32), + (2, 1, s0, types.fp32), + ] + expected_outputs = [ + np.array([[[1], [2], [3]], [[4], [5], [6]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + ] + + input_values = { + "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + } + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x_val = np.random.rand(1, 6) + v1 = mb.expand_dims(x=x_val, axes=[2]) + np.testing.assert_allclose(np.expand_dims(x_val, 2), v1.val, atol=1e-04, rtol=1e-05) + + v2 = mb.expand_dims(x=x_val, axes=[-1]) + np.testing.assert_allclose(np.expand_dims(x_val, -1), v2.val, atol=1e-04, rtol=1e-05) + + v3 = mb.expand_dims(x=x_val, axes=[-1, -2]) + ref = np.expand_dims(np.expand_dims(x_val, -1), -1) + np.testing.assert_allclose(ref, v3.val, atol=1e-04, rtol=1e-05) + + v4 = mb.expand_dims(x=x_val, axes=[0, -1, -2]) + np.testing.assert_allclose(np.reshape(x_val, (1, 1, 6, 1, 1)), v4.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis", + itertools.product( + compute_units, + backends, + [ + (rank, axis) + for rank in range(1, 5) + for axis in range(-rank - 1, rank + 1) + ], + ), + ) + def test_builder_to_backend_programmatic_one_axis( + self, compute_unit, backend, rank_and_axis + ): + rank, axis = rank_and_axis + x_shape = np.random.randint(low=2, high=6, size=rank) + input_placeholders = {"x": mb.placeholder(shape=x_shape)} + input_values = {"x": np.random.sample(x_shape).astype(np.float32)} + + def build(x): + return mb.expand_dims(x=x, axes=[axis]) + + adjusted_axis = axis if axis >= 0 else rank + axis + 1 + x_shape = list(x_shape) + out_shape = x_shape[:adjusted_axis] + [1] + x_shape[adjusted_axis:] + expected_output_types = tuple(out_shape[:]) + (types.fp32,) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + np.expand_dims(input_values["x"], axis), + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axes", + itertools.product( + compute_units, + backends, + [ + (3, [0, 1]), + (3, [1, 0]), + (3, [-2, -1]), + (3, [-1, -2]), + (2, [-3, -1]), + (2, [-3, 1, -1]), + (2, [-2, 0]), + (1, [-1, -2, -3, -4]), + (1, [0, -1]), + (1, [0, 1, -2, -1]), + ], + ), + ) + def test_builder_to_backend_programmatic_multiple_axes( + self, compute_unit, backend, rank_and_axes + ): + rank, axes = rank_and_axes + x_shape = np.random.randint(low=1, high=6, size=rank) + input_placeholders = {"x": mb.placeholder(shape=x_shape)} + input_values = {"x": np.random.sample(x_shape).astype(np.float32)} + + def build(x): + return mb.expand_dims(x=x, axes=axes) + + out_shape = list(x_shape) + out_rank = rank + len(axes) + pos_axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes]) + for axis in pos_axes: + out_shape.insert(axis, 1) + + expected_outputs = np.reshape(input_values["x"], out_shape) + expected_output_types = tuple(out_shape) + (types.fp32,) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + +class TestReshapeLike: + @pytest.mark.parametrize( + "compute_unit, backend, InputShape_RefShapes_Begins_Ends_EndMasks, InputType_RefType", + itertools.product( + compute_units, + backends, + [ + [(4, 3), ((2, 2, 3), (1, 3)), (0, 1), (2, 2), (False, False)], + [(32,), ((1, 2, 2, 2), (3, 2, 2)), (1, 1), (0, 0), (True, True)], + [(72, 1), ((1, 2, 3, 4, 1), (3,)), (1, 0), (0, 1), (True, False)], + ], + [(types.bool, types.fp32), (types.fp32, types.bool)], + ) + ) + def test_builder_to_backend_smoke( + self, + compute_unit, + backend, + InputShape_RefShapes_Begins_Ends_EndMasks, + InputType_RefType, + ): + if backend[0] == "neuralnetwork": + pytest.skip("reshape_like not supoprted in neuralnetwork backend.") + + if ct.utils._macos_version() < (13, 0): + pytest.skip("reshape_like not supported in macOS12 or older.") + + input_shape, ref_shapes, begins, ends, end_masks = InputShape_RefShapes_Begins_Ends_EndMasks + ref_shape_1, ref_shape_2 = ref_shapes + input_type, ref_type = InputType_RefType + + t = np.random.rand(*input_shape).astype(np.float32) + ref_tensor_1 = np.random.rand(*ref_shape_1).astype(np.float32) + ref_tensor_2 = np.random.rand(*ref_shape_2).astype(np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + "ref_tensor_1": mb.placeholder(shape=ref_shape_1), + "ref_tensor_2": mb.placeholder(shape=ref_shape_2), + } + input_values = { + "x": t, + "ref_tensor_1": ref_tensor_1, + "ref_tensor_2": ref_tensor_2, + } + + def build(x, ref_tensor_1, ref_tensor_2): + if input_type == types.bool: + x = mb.cast(x=x, dtype="bool") + + if ref_type == types.bool: + ref_tensor_1 = mb.cast(x=ref_tensor_1, dtype="bool") + ref_tensor_2 = mb.cast(x=ref_tensor_2, dtype="bool") + + ref_tensors = (ref_tensor_1, ref_tensor_2) + return mb.reshape_like(x=x, ref_tensors=ref_tensors, begins=begins, ends=ends, end_masks=end_masks) + + output_shape = () + for ref_shape, begin, end, end_mask in zip((ref_shape_1, ref_shape_2), begins, ends, end_masks): + if end_mask: + output_shape += tuple(ref_shape[begin:]) + else: + output_shape += tuple(ref_shape[begin:end]) + + expected_output_types = [ + output_shape + (input_type,), + ] + expected_outputs = [ + np.reshape(t, output_shape).astype(nptype_from_builtin(input_type)), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16 + ) + + +class TestReshape: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=t.shape)} + input_values = {"x": t} + + def build(x): + return [ + mb.reshape(x=x, shape=[3, 2]), + mb.reshape(x=x, shape=[2, -1]), + mb.reshape(x=x, shape=[2, 1, 1, 3]), + ] + + expected_output_types = [ + (3, 2, types.fp32), + (2, 3, types.fp32), + (2, 1, 1, 3, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32), + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + np.array([[[[1.0, 2.0, 3.0]]], [[[4.0, 5.0, 6.0]]]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + r = mb.reshape(x=t, shape=[3, 2]) + expected_r = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + np.testing.assert_allclose(expected_r, r.val, atol=1e-04, rtol=1e-05) + r2 = mb.reshape(x=t, shape=[2, -1]) + expected_r2 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + np.testing.assert_allclose(expected_r2, r2.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + s_len = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(2, s0)), + "shape": mb.placeholder(shape=(3,), dtype=types.int32), + "shape2": mb.placeholder(shape=(s_len,), dtype=types.int32), + } + + def build(x, shape, shape2): + return [ + mb.reshape(x=x, shape=[2, -1]), + mb.reshape(x=x, shape=[1, -1]), + mb.reshape(x=x, shape=[2, 1, 1, -1]), + mb.reshape(x=x, shape=shape), + mb.reshape(x=x, shape=shape2), + ] + + expected_output_types = [ + (2, s0, types.fp32), + (1, 2 * s0, types.fp32), + (2, 1, 1, s0, types.fp32), + (UNK_SYM, UNK_SYM, UNK_SYM, types.fp32), + (UNK_VARIADIC, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + np.array([[1, 2, 3, 4, 5, 6]], dtype=np.float32), + np.array([[[[1.0, 2.0, 3.0]]], [[[4.0, 5.0, 6.0]]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + np.array([[[1, 2, 3]], [[4, 5, 6]]], dtype=np.float32), + ] + + input_values = { + "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + "shape": np.array([2, 1, 3], dtype=np.float32), + "shape2": np.array([2, 1, 3], dtype=np.float32), + } + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReverse: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + val = np.array([[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.reverse(x=x), mb.reverse(x=x, axes=[0])] + + expected_output_types = [(2, 3, types.fp32), (2, 3, types.fp32)] + expected_outputs = [ + np.array([[6.0, -5.0, 4.0], [-3.0, 2.0, -1.0]], dtype=np.float32), + np.array([[4.0, -5.0, 6.0], [-1.0, 2.0, -3.0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + val = np.array([[-1.0, 7.0, -3.0], [4.0, -5.0, 8.0]], dtype=np.float32) + res = mb.reverse(x=val, axes=[0]) + np.testing.assert_allclose(np.flip(val, axis=0), res.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + val = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=(s0, 3))} + input_values = {"x": val} + + def build(x): + return [ + mb.reverse(x=x, axes=[1]), + mb.reverse(x=x, axes=[0]), + ] + + expected_output_types = [ + (s0, 3, types.fp32), + (s0, 3, types.fp32), + ] + expected_outputs = [ + np.array([[3.0, 2.0, 1.0], [6.0, 5.0, 4.0]], dtype=np.float32), + np.array([[4.0, 5.0, 6.0], [1.0, 2.0, 3.0]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestReverseSequence: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array( + [ + [1, 2, 3, 4, 5, 0, 0, 0], + [1, 2, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 0, 0, 0], + [1, 2, 3, 4, 5, 6, 7, 8], + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=x_val.shape)} + input_values = {"x": x_val} + + def build(x): + return [ + mb.reverse_sequence( + x=x, lengths=[7, 2, 3, 5], seq_axis=1, batch_axis=0 + ), + ] + + expected_output_types = [ + (4, 8, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [0, 0, 5, 4, 3, 2, 1, 0], + [2, 1, 0, 0, 0, 0, 0, 0], + [3, 2, 1, 4, 0, 0, 0, 0], + [5, 4, 3, 2, 1, 6, 7, 8], + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + x_val = np.array( + [ + [1, 2, 3, 4, 5, 0, 0, 0], + [1, 2, 0, 0, 0, 0, 0, 0], + [1, 2, 3, 4, 0, 0, 0, 0], + [1, 2, 3, 4, 5, 6, 7, 8], + ], + dtype=np.float32, + ) + input_placeholders = {"x": mb.placeholder(shape=(4, s0))} + input_values = {"x": x_val} + + def build(x): + return [ + mb.reverse_sequence( + x=x, lengths=[7, 2, 3, 5], seq_axis=1, batch_axis=0 + ), + ] + + expected_output_types = [ + (4, s0, types.fp32), + ] + expected_outputs = [ + np.array( + [ + [0, 0, 5, 4, 3, 2, 1, 0], + [2, 1, 0, 0, 0, 0, 0, 0], + [3, 2, 1, 4, 0, 0, 0, 0], + [5, 4, 3, 2, 1, 6, 7, 8], + ], + dtype=np.float32, + ) + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSliceBySize: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x_val = np.array(list(range(24))).reshape((2, 3, 4)).astype(np.float32) + begin_val = np.array([1, 1, 1], dtype=np.int32) + input_placeholders = { + "x": mb.placeholder(shape=x_val.shape), + "begin": mb.placeholder(shape=begin_val.shape, dtype=types.int32), + } + input_values = {"x": x_val, "begin": begin_val} + + def build_non_single(x, begin): + return [ + mb.slice_by_size(x=x, begin=begin, size=[1, 2, 3]), + ] + + def build_single(x, begin): + return [ + mb.slice_by_size(x=x, begin=begin, size=[-1, 2, -1]), + ] + + expected_output_types = [(1, 2, 3, types.fp32)] + expected_outputs = [np.array([[[17, 18, 19], [21, 22, 23]]], dtype=np.float32)] + run_compare_builder( + build_non_single, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + expected_output_types = [(UNK_SYM, 2, UNK_SYM, types.fp32)] + run_compare_builder( + build_single, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array(list(range(24))).reshape(2, 3, 4) + v_1 = mb.slice_by_size(x=x, begin=(0, 1, 0), size=(-1, -1, -1)) + v_2 = mb.slice_by_size(x=x, begin=(0, 1, 0), size=(-1, -1, 3)) + v_3 = mb.slice_by_size(x=x, begin=(0, -2, 0), size=(-1, -1, 3)) + np.testing.assert_allclose(x[:, 1:, :], v_1.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(x[:, 1:, :3], v_2.val, atol=1e-04, rtol=1e-05) + np.testing.assert_allclose(x[:, -2:, :3], v_3.val, atol=1e-04, rtol=1e-05) + + +class TestSpaceToDepth: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 1, 2, 2, fp32) + val = np.array([[[[7.0, 9.0], [4.0, 6.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.space_to_depth(x=x, block_size=2)] + + expected_output_types = (1, 4, 1, 1, types.fp32) + expected_outputs = np.array( + [[[[7.0]], [[9.0]], [[4.0]], [[6.0]]]], dtype=np.float32 + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestSqueeze: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + x = np.array([[[[1], [2], [3]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=x.shape)} + + input_values = {"x": x} + + def build(x): + return [ + mb.squeeze(x=x, axes=(-1,)), + mb.squeeze(x=x, axes=(-3, 0)), + mb.squeeze(x=x, axes=(0, 1, 3)), + mb.squeeze(x=x), + ] + + expected_output_types = [ + (1, 1, 3, types.fp32), + (3, 1, types.fp32), + (3, types.fp32), + (3, types.fp32), + ] + + expected_outputs = [ + np.array([[[1, 2, 3]]], dtype=np.float32), + np.array([[1], [2], [3]], dtype=np.float32), + np.array([1, 2, 3], dtype=np.float32), + np.array([1, 2, 3], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[[[1], [2], [3]], [[4], [5], [6]]]], dtype=np.float32) + v = mb.squeeze(x=x, axes=(-4, 3)) + np.testing.assert_allclose(np.squeeze(x, axis=(-4, 3)), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval_rank_0(self): + x = np.array([1], dtype=np.float32) + v = mb.squeeze(x=x) + assert v.shape == () + assert type(v.val) == np.float32 + assert np.isclose(np.squeeze(x), v.val) + + +class TestTranspose: + @pytest.mark.parametrize( + "compute_unit, backend, is_symbolic", + itertools.product(compute_units, backends, [True, False],), + ) + def test_builder_to_backend_smoke(self, compute_unit, backend, is_symbolic): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + + input_shape = x.shape + if is_symbolic: + input_shape = [get_new_symbol(), get_new_symbol()] + + input_placeholders = {"x": mb.placeholder(shape=input_shape)} + + input_values = {"x": x} + + def build(x): + return [ + mb.transpose(x=x, perm=(0, 1)), + mb.transpose(x=x, perm=(1, 0)), + mb.transpose(x=x, perm=(-1, 0)), + mb.transpose(x=x, perm=(-2, -1)), + ] + + d0 = input_shape[0] + d1 = input_shape[1] + expected_output_types = [ + (d0, d1, types.fp32), + (d1, d0, types.fp32), + (d1, d0, types.fp32), + (d0, d1, types.fp32), + ] + + expected_outputs = [x, x.T, x.T, x] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) + v = mb.transpose(x=x, perm=(1, 0)) + np.testing.assert_allclose(x.T, v.val, atol=1e-04, rtol=1e-05) + + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_symbolic(self, compute_unit, backend): + s0 = get_new_symbol() + + input_placeholders = { + "x": mb.placeholder(shape=(2, s0)), + } + + def build(x): + return [ + mb.transpose(x=x, perm=[1, 0]), + ] + + expected_output_types = [ + (s0, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1, 4], [2, 5], [3, 6]], dtype=np.float32), + ] + + input_values = { + "x": np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), + } + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestPixelShuffle: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 4, 1, 1, fp32) + val = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_shuffle(x=x, upscale_factor=2)] + + expected_output_types = (1, 1, 2, 2, types.fp32) + expected_outputs = np.array([[[[9.0, 5.0], [1.0, 3.0]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, shape, upscale_factor", + itertools.product( + compute_units, + backends, + [(1, 16, 1, 1), (2, 16, 3, 3), (1, 32, 1, 1)], + [2, 4], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, shape, upscale_factor + ): + val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_shuffle(x=x, upscale_factor=upscale_factor)] + + torch_pixel_shuffle = torch.nn.PixelShuffle(upscale_factor) + expected_outputs = [torch_pixel_shuffle(torch.Tensor(val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +@pytest.mark.skipif(ct.utils._macos_version() < (13, 0), reason="New functionality in macOS13/iOS16") +class TestPixelUnshuffle: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + val = np.array([[[[9.0, 5.0], [1.0, 3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2))] + + expected_output_types = (1, 4, 1, 1, types.fp32) + expected_outputs = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + @pytest.mark.skipif(not testing_reqs._HAS_TORCH, reason=MSG_TORCH_NOT_FOUND) + @pytest.mark.parametrize( + "compute_unit, backend, shape, downscale_factor", + itertools.product( + compute_units, + backends, + [(1, 2, 4, 4), (2, 1, 8, 4)], + [2, 4], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, shape, downscale_factor, + ): + if backend[0] == "neuralnetwork": + pytest.skip("nn backend not supported") + + val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(downscale_factor))] + + torch_pixel_unshuffle = torch.nn.PixelUnshuffle(downscale_factor) + expected_outputs = [torch_pixel_unshuffle(torch.Tensor(val)).numpy()] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + minimum_deployment_target=ct.target.iOS16, + ) + + +class TestSlidingWindows: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends,) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + # original input type is (1, 4, 1, 1, fp32) + val = np.array([[[[9.0]], [[5.0]], [[1.0]], [[3.0]]]], dtype=np.float32) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.sliding_windows(x=x, axis=1, size=2)] + + expected_output_types = (1, 3, 2, 1, 1, types.fp32) + expected_outputs = np.array( + [[[[[9.0]], [[5.0]]], [[[5.0]], [[1.0]]], [[[1.0]], [[3.0]]]]], + dtype=np.float32, + ) + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank_and_axis, size, stride", + itertools.product( + compute_units, + backends, + [(rank, axis) for rank in range(1, 5) for axis in range(-rank, rank)], + [1, 2], + [1, 2], + ), + ) + def test_builder_to_backend_stress( + self, compute_unit, backend, rank_and_axis, size, stride + ): + def np_sliding_windows(a, np_axis, np_size, np_stride): + n = (a.shape[np_axis] - np_size) // np_stride + 1 + x_shape = list(a.shape) + x_shape[np_axis] = n + if np_axis < 0: + np_axis += len(x_shape) + x_shape.insert(np_axis + 1, np_size) + strides = list(a.strides) + eff_stride = strides[np_axis] * np_stride + strides.insert(np_axis, eff_stride) + return np.lib.stride_tricks.as_strided(a, x_shape, strides) + + rank, axis = rank_and_axis + shape = np.random.randint(low=2, high=5, size=rank) + val = np.random.rand(*shape) + input_placeholders = {"x": mb.placeholder(shape=val.shape)} + input_values = {"x": val} + + def build(x): + return [mb.sliding_windows(x=x, axis=axis, size=size, stride=stride)] + + expected_outputs = [ + np_sliding_windows(val, np_axis=axis, np_size=size, np_stride=stride) + ] + expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs] + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + +class TestConcat: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t1 = np.array([[1, 2], [4, 5]], dtype=np.float32) + t2 = np.array([[7, 8]], dtype=np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t1.shape), + "y": mb.placeholder(shape=t2.shape), + } + input_values = {"x": t1, "y": t2} + + def build(x, y): + return (mb.concat(values=(x, y), axis=0),) + + expected_output_types = [ + (3, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2], [4, 5], [7, 8]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @pytest.mark.parametrize( + "compute_unit, backend, rank, n_inputs, negative_index", + itertools.product( + compute_units, + backends, + [1, 2, 3, 4, 5], + [2, 3], + [False, True], + ) + ) + def test_builder_to_backend_stress_interleave(self, compute_unit, backend, + rank, n_inputs, negative_index): + + def np_concat_interleave(arrays, axis): + step = len(arrays) + in_shape = arrays[0].shape + out_shape = list(in_shape) + if axis < 0: + axis += len(in_shape) + out_shape[axis] = step * in_shape[axis] + concat_tensor = np.empty(tuple(out_shape), dtype=np.float32) + for i in range(step): + if rank == 5: + if axis == 4: + concat_tensor[:, :, :, :, i::step] = arrays[i] + if axis == 3: + concat_tensor[:, :, :, i::step, :] = arrays[i] + if axis == 2: + concat_tensor[:, :, i::step, :, :] = arrays[i] + if axis == 1: + concat_tensor[:, i::step, :, :, :] = arrays[i] + if axis == 0: + concat_tensor[i::step, :, :, :, :] = arrays[i] + if rank == 4: + if axis == 3: + concat_tensor[:, :, :, i::step] = arrays[i] + if axis == 2: + concat_tensor[:, :, i::step, :] = arrays[i] + if axis == 1: + concat_tensor[:, i::step, :, :] = arrays[i] + if axis == 0: + concat_tensor[i::step, :, :, :] = arrays[i] + if rank == 3: + if axis == 2: + concat_tensor[:, :, i::step] = arrays[i] + if axis == 1: + concat_tensor[:, i::step, :] = arrays[i] + if axis == 0: + concat_tensor[i::step, :, :] = arrays[i] + if rank == 2: + if axis == 1: + concat_tensor[:, i::step] = arrays[i] + if axis == 0: + concat_tensor[i::step, :] = arrays[i] + if rank == 1: + concat_tensor[i::step] = arrays[i] + return concat_tensor + + input_shape = [4, 2, 3, 6, 5] + for axis in range(rank): + if negative_index: + axis = axis - rank + shape = tuple(input_shape[:rank]) + t1 = np.random.normal(size=shape).astype(np.float32) + t2 = np.random.normal(size=shape).astype(np.float32) + all_input_arrs = [t1, t2] + input_placeholders = { + "x": mb.placeholder(shape=t1.shape), + "y": mb.placeholder(shape=t2.shape), + } + input_values = {"x": t1, "y": t2} + if n_inputs == 3: + t3 = np.random.normal(size=shape).astype(np.float32) + input_placeholders["z"] = mb.placeholder(shape=t3.shape) + input_values["z"] = t3 + all_input_arrs.append(t3) + + def build_2_inputs(x, y): + return (mb.concat(values=(x, y), axis=axis, interleave=True),) + + def build_3_inputs(x, y, z): + return (mb.concat(values=(x, y, z), axis=axis, interleave=True),) + + np_out = np_concat_interleave(all_input_arrs, axis) + expected_output_types = [np_out.shape + (types.fp32,)] + expected_outputs = [np_out] + + run_compare_builder( + build_3_inputs if n_inputs == 3 else build_2_inputs, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + values = [ + np.random.rand(1, 1, 6, 2), + np.random.rand(1, 1, 3, 2), + ] + v = mb.concat(values=values, axis=2) + np.testing.assert_allclose(np.concatenate(values, 2), v.val, atol=1e-04, rtol=1e-05) + + @ssa_fn + def test_builder_eval_failure(self): + values = [ + np.random.rand(1, 1, 6, 2), + np.random.rand(1, 1, 3, 1), + ] + with pytest.raises(ValueError): + mb.concat(values=values, axis=2) + + +class TestSplit: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t.shape), + } + input_values = {"x": t} + + def build(x): + return mb.split(x=x, num_splits=2, axis=1) + mb.split( + x=x, split_sizes=[1, 2], axis=0 + ) + + expected_output_types = [ + (3, 1, types.fp32), + (3, 1, types.fp32), + (1, 2, types.fp32), + (2, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1], [3], [5]], dtype=np.float32), + np.array([[2], [4], [6]], dtype=np.float32), + np.array([[1, 2]], dtype=np.float32), + np.array([[3, 4], [5, 6]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + t = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + vs = mb.split(x=t, num_splits=3, axis=0) + es = np.split(t, [1, 2, 3], axis=0) + for v, e in zip(vs, es): + np.testing.assert_allclose(e, v.val, atol=1e-04, rtol=1e-05) + + +class TestStack: + @pytest.mark.parametrize( + "compute_unit, backend", itertools.product(compute_units, backends, ) + ) + def test_builder_to_backend_smoke(self, compute_unit, backend): + t1 = np.array([1, 2, 3], dtype=np.float32) + t2 = np.array([7, 8, 9], dtype=np.float32) + + input_placeholders = { + "x": mb.placeholder(shape=t1.shape), + "y": mb.placeholder(shape=t2.shape), + } + input_values = {"x": t1, "y": t2} + + def build(x, y): + return [mb.stack(values=(x, y), axis=0), mb.stack(values=(x, y), axis=1), mb.stack(values=(x, y), axis=-1)] + + expected_output_types = [ + (2, 3, types.fp32), + (3, 2, types.fp32), + (3, 2, types.fp32), + ] + expected_outputs = [ + np.array([[1, 2, 3], [7, 8, 9]], dtype=np.float32), + np.array([[1, 7], [2, 8], [3, 9]], dtype=np.float32), + np.array([[1, 7], [2, 8], [3, 9]], dtype=np.float32), + ] + + run_compare_builder( + build, + input_placeholders, + input_values, + expected_output_types, + expected_outputs, + compute_unit=compute_unit, + backend=backend, + ) + + @ssa_fn + def test_builder_eval(self): + values = [ + np.random.rand(1, 1, 3, 2).astype(np.float32), + np.random.rand(1, 1, 3, 2).astype(np.float32), + ] + v = mb.stack(values=values, axis=2) + np.testing.assert_allclose(np.stack(values, 2), v.val, atol=1e-04, rtol=1e-05) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_utils.py new file mode 100644 index 00000000..82e22c74 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/test_utils.py @@ -0,0 +1,262 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil.ops.defs._utils import ( + aggregated_pad, effective_kernel, spatial_dimensions_out_shape) + + +class TestDilation: + def test_kernel_and_dilations_not_same_size(self): + np.testing.assert_raises_regex( + ValueError, + "kernel_shape.*dilations.*length", + effective_kernel, + kernel_shape=(1, 2, 3), + dilations=(1, 2), + ) + + def test_effective_kernel_dilation_1(self): + actual = effective_kernel(kernel_shape=(1, 2, 3), dilations=(1, 1, 1)) + + expected = [1, 2, 3] + np.testing.assert_equal(actual, expected) + + def test_effective_kernel_dilation_2(self): + actual = effective_kernel(kernel_shape=(1, 2, 3), dilations=(2, 2, 2)) + + expected = [1, 3, 5] + np.testing.assert_equal(actual, expected) + + def test_effective_kernel_dilation_3(self): + actual = effective_kernel(kernel_shape=(1, 2, 3), dilations=(3, 3, 3)) + + expected = [1, 4, 7] + np.testing.assert_equal(actual, expected) + + +class TestAggregatePadding: + def test_invalid_pad_type(self): + np.testing.assert_raises_regex( + ValueError, + "Invalid padding pad_type", + aggregated_pad, + pad_type="bananas", + kernel_shape=(1, 2, 3), + ) + + def test_dilations_rank_different_from_input_rank(self): + np.testing.assert_raises_regex( + ValueError, + "dilations must have same length as kernel_shape", + aggregated_pad, + pad_type="valid", # doesn't matter + kernel_shape=(1, 2, 3), + dilations=(4, 5), + ) + + def test_custom_pad(self): + actual = aggregated_pad( + pad_type="custom", kernel_shape=(1, 2, 3), custom_pad=(7, 8, 9, 10, 11, 12) + ) + + expected = [7 + 8, 9 + 10, 11 + 12] + np.testing.assert_equal(actual, expected) + + def test_custom_pad_none(self): + np.testing.assert_raises_regex( + ValueError, + "Invalid custom_pad", + aggregated_pad, + pad_type="custom", + kernel_shape=(1, 2, 3), # doesn't matter + custom_pad=None, + ) + + def test_custom_pad_invalid(self): + np.testing.assert_raises_regex( + ValueError, + "Invalid custom_pad", + aggregated_pad, + pad_type="custom", + kernel_shape=(1, 2, 3), # doesn't matter + custom_pad=(7, 8, 9, 10), # too few elements + ) + + def test_valid_pad(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=(1, 2, 3),) + + expected = [0, 0, 0] + np.testing.assert_equal(actual, expected) + + def test_valid_pad_4d(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=(1, 2, 3, 4),) + + expected = [0, 0, 0, 0] + np.testing.assert_equal(actual, expected) + + def test_valid_pad_2d(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=(1, 2),) + + expected = [0, 0] + np.testing.assert_equal(actual, expected) + + def test_valid_pad_1d(self): + actual = aggregated_pad(pad_type="valid", kernel_shape=[4]) + + expected = [0] + np.testing.assert_equal(actual, expected) + + def test_same_padding_no_dilation(self): + actual = aggregated_pad( + pad_type="same", + input_shape=(5, 6, 7), + kernel_shape=(2, 2, 2), + strides=(1, 2, 2), + ) + + expected = [1, 0, 1] + np.testing.assert_equal(actual, expected) + + def test_same_padding_dilation_with_dilation(self): + actual = aggregated_pad( + pad_type="same", + input_shape=(19, 20, 21), + kernel_shape=(2, 2, 2), + strides=(1, 2, 2), + dilations=(5, 6, 7), + ) + + expected = [5, 5, 7] + np.testing.assert_equal(actual, expected) + + def test_same_padding_stride_same_as_input(self): + actual = aggregated_pad( + pad_type="same", input_shape=(5, 5), kernel_shape=(3, 3), strides=(5, 5), + ) + + expected = [0, 0] + np.testing.assert_equal(actual, expected) + + def test_same_padding_stride_larger_than_kernel_but_less_than_input(self): + actual = aggregated_pad( + pad_type="same", input_shape=(5, 5), kernel_shape=(3, 3), strides=(4, 4), + ) + + expected = [2, 2] + np.testing.assert_equal(actual, expected) + + def test_same_padding_none_input_shape(self): + np.testing.assert_raises_regex( + ValueError, + "input_shape.*None", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + strides=(1, 2, 3), + ) + + def test_same_padding_input_shape_wrong_size(self): + np.testing.assert_raises_regex( + ValueError, + "input_shape.*same length", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + input_shape=(1, 2), + strides=(1, 2, 3), + ) + + def test_same_padding_none_strides(self): + np.testing.assert_raises_regex( + ValueError, + "strides.*None", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + input_shape=(1, 2, 3), + ) + + def test_same_padding_strides_wrong_size(self): + np.testing.assert_raises_regex( + ValueError, + "strides.*same length", + aggregated_pad, + pad_type="same", + kernel_shape=(1, 2, 3), + input_shape=(1, 2, 3), + strides=(1, 2), + ) + + +class TestOutputShape: + def test_custom_padding_shape(self): + actual = spatial_dimensions_out_shape( + pad_type="custom", + input_shape=(3, 3, 3), + kernel_shape=(2, 2, 2), + strides=(2, 2, 2), + custom_pad=(2, 0, 1, 2, 2, 3), + ) + + expected = [2, 3, 4] + np.testing.assert_equal(actual, expected) + + def test_valid_padding_shape(self): + actual = spatial_dimensions_out_shape( + pad_type="valid", input_shape=(7, 7), kernel_shape=(3, 3), strides=(1, 1) + ) + + expected = [5, 5] + np.testing.assert_equal(actual, expected) + + def test_valid_padding_shape_dilation_2(self): + actual = spatial_dimensions_out_shape( + pad_type="valid", + input_shape=(7, 7), + kernel_shape=(3, 3), + strides=(1, 1), + dilations=(2, 2), + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_valid_padding_shape_with_stride_2(self): + actual = spatial_dimensions_out_shape( + pad_type="valid", input_shape=(7, 7), kernel_shape=(3, 3), strides=(2, 2) + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_same_padding_shape(self): + actual = spatial_dimensions_out_shape( + pad_type="same", input_shape=(6, 6), kernel_shape=(2, 2), strides=(2, 2) + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_same_padding_shape_stride_2_input_not_multiple_of_kernel(self): + actual = spatial_dimensions_out_shape( + pad_type="same", input_shape=(5, 5), kernel_shape=(2, 2), strides=(2, 2) + ) + + expected = [3, 3] + np.testing.assert_equal(actual, expected) + + def test_same_padding_shape_dilation_2(self): + actual = spatial_dimensions_out_shape( + pad_type="same", + input_shape=(5, 5), + kernel_shape=(2, 2), + strides=(1, 1), + dilations=(2, 2), + ) + + expected = [5, 5] + np.testing.assert_equal(actual, expected) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/testing_utils.py new file mode 100644 index 00000000..c6528b40 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/ops/tests/testing_utils.py @@ -0,0 +1,159 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import coremltools as ct +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Function, Program +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.testing_utils import (compare_backend, + ct_convert) + + +UNK_VARIADIC = "*s_unk" +UNK_SYM = "s_unk" + + +def run_compare_builder( + build, + input_placeholders, + input_values=None, + expected_output_types=None, + expected_outputs=None, + compute_unit=ct.ComputeUnit.CPU_ONLY, + frontend_only=False, + backend=("neuralnetwork", "fp32"), + atol=1e-04, + rtol=1e-05, + inputs=None, + also_compare_shapes=False, + converter=ct.convert, + minimum_deployment_target=None, +): + """ + Inputs: + - build: python function taking input of Vars and returning Var or + list[Var]. Each input argument in build must match a key in + input_values / input_placeholders. + + - input_placeholders: str -> placeholder. It may not be an empty + dict as MLModel doesn't support function with + no input. + + - input_values: str -> np.array or PIL.Image. Keys must match those in + input_placeholders. + + - expected_output_types: list[(shape, builtin_type)] or (shape, + builtin_type). None skips type inference validation. + + - compute_unit: Enum[ct.ComputeUnit]. Compute unit for the coreml model + + - expected_outputs: list[np.array] or np.array. Required iff + frontend_only == False + + - frontend_only: True to test up to proto generation. + + - inputs: type of inputs (either None (defaults to tensor) or [ct.ImageType]) + + - converter: function + Reference to convert function to be used. + Default: ct.convert + + - minimum_deployment_target : coremltools.target enumeration (optional) + A member of the ``coremltools.target`` enum. + + Returns: + The converted mlmodel + """ + if not isinstance(expected_output_types, list): + expected_output_types = [expected_output_types] + + if expected_outputs is not None and not isinstance(expected_outputs, list): + expected_outputs = [expected_outputs] + + prog = Program() + with Function(input_placeholders, opset_version=minimum_deployment_target) as ssa_func: + output_vars = build(**ssa_func.inputs) + if isinstance(output_vars, tuple): + output_vars = list(output_vars) + elif not isinstance(output_vars, list): + output_vars = [output_vars] + ssa_func.set_outputs(output_vars) + prog.add_function("main", ssa_func) + + # get output names for output_vars + output_names = [x.name for x in output_vars] + + # Validate type inference + msg = ( + "Provided expected outputs types {} should match number of output" + + " variables {}" + ) + assert_msg = msg.format(len(expected_output_types), len(output_vars)) + assert len(output_vars) == len(expected_output_types), assert_msg + + for out_var, s in zip(output_vars, expected_output_types): + if out_var.dtype != s[-1]: + raise ValueError( + "Output {} type: expect {}, got {}. Program:\n{}".format( + out_var.name, s[-1].__type_info__(), + out_var.dtype.__type_info__(), prog + ) + ) + if UNK_VARIADIC in s[:-1]: + msg = "Skip type checking for UNK_VARIADIC. Output shape: {} vs expected shape: {}" + logger.debug(msg.format(out_var.shape, s[:-1])) + continue + expected_shape = s[:-1] + msg = "Output {} shape: expect {}, got {}. Program:\n{}".format( + out_var.name, expected_shape, out_var.shape, prog + ) + # No more variadic here. + if len(out_var.shape) != len(expected_shape): + raise ValueError(msg) + # replace UNK_SYM in out_var.shape. + output_shape = [ + 0 if es == UNK_SYM else os for os, es in zip(out_var.shape, expected_shape) + ] + expected_shape = [0 if es == UNK_SYM else es for es in expected_shape] + # convert float etc to int. + output_shape = [i if is_symbolic(i) else int(i) for i in output_shape] + expected_shape = [i if is_symbolic(i) else int(i) for i in expected_shape] + if output_shape != expected_shape: + raise ValueError(msg) + + mlmodel = ct_convert(prog, + converter=converter, + source="milinternal", + convert_to=backend, + inputs=inputs, + compute_units=compute_unit, + minimum_deployment_target=minimum_deployment_target + ) + + if frontend_only: + return mlmodel + + if expected_outputs: + assert len(output_vars) == len(expected_outputs), ( + "Provided expected_outputs {}" + " should match number of output" + " variables {}".format(len(expected_outputs), len(output_vars)) + ) + + expected_outputs = { + name: val for name, val in zip(output_names, expected_outputs) + } + + compare_backend( + mlmodel=mlmodel, + input_key_values=input_values, + expected_outputs=expected_outputs, + atol=atol, + rtol=rtol, + also_compare_shapes=also_compare_shapes, + dtype=backend[1] + ) + + return mlmodel diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/__init__.py new file mode 100644 index 00000000..ec624d2d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/__init__.py @@ -0,0 +1,43 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +# Import all frontend/backend passes to make sure they got registered. +from coremltools.converters.mil.backend.mil.passes import ( + adjust_io_to_supported_types, + fuse_activation_silu, + insert_image_preprocessing_op, + sanitize_name_strings, +) +from coremltools.converters.mil.backend.nn.passes import ( + alert_return_type_cast, + commingle_loop_vars, + conv1d_decomposition, + handle_return_inputs_as_outputs, + handle_return_unused_inputs, + handle_unused_inputs, + mlmodel_passes, +) +from coremltools.converters.mil.frontend.tensorflow2.ssa_passes import remove_vacuous_cond +from coremltools.converters.mil.frontend.tensorflow.ssa_passes import ( + backfill_make_list_elem_type, + expand_tf_lstm, + tf_lstm_to_core_lstm, +) +from coremltools.converters.mil.frontend.torch.ssa_passes import ( + torch_tensor_assign_to_core, + torch_upsample_to_core_upsample, +) +from coremltools.converters.mil.mil.passes.defs import ( + cleanup, + lower_complex_dialect_ops, + optimize_activation, + optimize_conv, + optimize_elementwise_binary, + optimize_linear, + optimize_normalization, + optimize_repeat_ops, + optimize_tensor_operation, + preprocess, +) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/__init__.py new file mode 100644 index 00000000..25c7d28c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py new file mode 100644 index 00000000..5c534eb7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .const_elimination import const_elimination +from .dead_code_elimination import dead_code_elimination +from .dedup_op_and_var_names import dedup_op_and_var_names +from .fuse_reduce_mean import fuse_reduce_mean +from .loop_invariant_elimination import loop_invariant_elimination +from .noop_elimination import noop_elimination +from .remove_redundant_ops import remove_redundant_ops +from .remove_symbolic_reshape import remove_symbolic_reshape +from .topological_reorder import topological_reorder diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py new file mode 100644 index 00000000..41db68e7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/const_elimination.py @@ -0,0 +1,103 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class const_elimination(AbstractGraphPass): + """ + Replace non-``const`` ops that have ``const`` Var. Outputs are replaced with the ``const`` op. Example: + + .. code-block:: + + Given: + %2, %3 = non_const_op(...) # %2 is const, %3 isn't const + %4 = other_op(%2, %3) + + Result: + _, %3 = non_const_op(...) # _ is the ignored output + %2_const = const() # %2_const name is for illustration only + %4 = other_op(%2_const, %3) + + Support options: + - skip_const_by_size: Skip folding consts that have larger number of elements than a threshold. + """ + + _skip_const_by_size = None + + @property + def skip_const_by_size(self): + return self._skip_const_by_size + + @skip_const_by_size.setter + def skip_const_by_size(self, threshold: str): + try: + # Convert to float instead of int to support more flexible input such as `1e6`. + threshold = float(threshold) + except Exception as e: + raise ValueError( + f"Expected to get float threshold, but got `{threshold}` which cannot " + f"be converted to float. {e}" + ) + self._skip_const_by_size = float(threshold) + + def apply(self, prog: Program): + for f in prog.functions.values(): + self._const_elimination_block(f) + + @block_context_manager + def _const_elimination_block(self, block): + # shallow copy hides changes on f.operations during the loop + for op in list(block.operations): + if op.op_type == "const": + continue + + for b in op.blocks: + self._const_elimination_block(b) + + all_outputs_are_replaced = True + for output in op.outputs: + if output.can_be_folded_to_const(): + if ( + self._skip_const_by_size is not None + and len(output.shape) > 0 + and output.val.size > self._skip_const_by_size + ): + logger.warning( + f"The output ({output}) of op {op} is skipped in const elimination pass " + f"because its val size ({output.val.size}) is larger than threshold " + f"({self._skip_const_by_size})." + ) + all_outputs_are_replaced = False + break + + res = mb.const( + val=output.val, + before_op=op, + # same var name, but different python + # instance does not violate SSA property. + name=output.name, + ) + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=op, + old_var=output, + new_var=res, + ): + # rename the const output + output.set_name(output.name + "_ignored") + else: + all_outputs_are_replaced = False + else: + all_outputs_are_replaced = False + + if all_outputs_are_replaced: + op.remove_from_block() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py new file mode 100644 index 00000000..bbe6578e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dead_code_elimination.py @@ -0,0 +1,79 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class dead_code_elimination(AbstractGraphPass): + """ + Eliminate unused ops in program. Ops whose outputs do not contribute to final outputs will be + deleted. + + .. code-block:: + + # Before dead_code_elimination pass. + main(%x: (2, 4, fp32)) { + block0() { + %const_2: (4, 2, fp32)* = const(val=[...]) + %const_3: (4, fp32)* = const(val=[...]) + %tx_0: (bool)* = const(val=False) + %ty_0: (bool)* = const(val=False) + %matmul_0: (2, 2, fp32) = matmul(x=%x, y=%const_2, transpose_x=%tx_0, transpose_y=%ty_0) + %linear_0: (2, 4, fp32) = linear(x=%x, weight=%const_2, bias=%const_3) + } -> (%linear_0) + } + + # After dead_code_elimination pass. + main(%x: (2, 4, fp32)) { + block0() { + %const_2: (4, 2, fp32)* = const(val=[...]) + %const_3: (4, fp32)* = const(val=[...]) + %linear_0: (2, 4, fp32) = linear(x=%x, weight=%const_2, bias=%const_3) + } -> (%linear_0) + } + + In the example above, ``%matmul_0`` is an op that is not used in the computation. This op and + its input ops (``%tx_0`` and ``%ty_0``) are eliminated in this pass. + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + self._dead_code_elimination_block(f) + + @staticmethod + def _dead_code_elimination_block(block): + used_vars = set() + ops_to_remove = list() + + # mark block's outputs to used + used_vars.update(block.outputs) + + for op in reversed(block.operations): + # if none of op's output is used, delete op + if not set(op.outputs).intersection(used_vars): + ops_to_remove.append(op) + continue + + # mark all op's inputs to used + for _, input_var in op.inputs.items(): + if isinstance(input_var, (tuple, list)): + used_vars.update(list(input_var)) + else: + used_vars.update([input_var]) + + for b in op.blocks: + used_in_block = dead_code_elimination._dead_code_elimination_block(b) + used_vars.update(used_in_block) + + for op in ops_to_remove: + logger.info('Removing op "{}" (type: {})'.format(op.name, op.op_type)) + op.remove_from_block() + + return used_vars diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py new file mode 100644 index 00000000..f2067552 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/dedup_op_and_var_names.py @@ -0,0 +1,94 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import collections +import itertools + +from coremltools.converters.mil.mil import Function +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class dedup_op_and_var_names(AbstractGraphPass): + """ + For each function, this pass renames ops and variables with the same name + as any preceding ops/variables across all scopes in the given function, + where the precedence is implementation-specific. Note that an op name and + variable names are tracked separately, so an op may have the same name as + a variable. + + The pass preserves input and output name. Raises ValueError if we cannot + dedup without changing the input/output var names. + + .. code-block:: + + def prog(x): + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="fp32", name="castop") + x = mb.square(x=x, name="square_last") + return x + + # Before dedup pass, the op names are ["castop", "castop", "square_last"]. + # After dedup pass, the op names are ["castop", "castop_1", "square_last"]. + """ + + def apply(self, prog): + for func in prog.functions.values(): + # Handle function input/outputs as they cannot be changed (to maintain user interface) + inputs = list(func.function_inputs) + io_vars = set(inputs + func.outputs) + self._ensure_unique_var_names(io_vars) + seen_var_names = set([v.name for v in io_vars]) + seen_op_names = set() + self._deduplicate_block(func, set(func.outputs), seen_var_names, seen_op_names) + + @staticmethod + def _gen_new_name(seen_names, curr_name): + if curr_name not in seen_names: + return curr_name + # make sure the name is unique + for i in itertools.count(start=1): # loop from 1 to infinity + # rename duplicated name start from 1: 'xxx_1' + new_name = curr_name + "_" + str(i) + if new_name not in seen_names: + return new_name + + def _deduplicate_block(self, block, func_outputs, seen_var_names, seen_op_names): + """ + seen_var_names: set[str] + seen_op_names: set[str] + """ + # Add block input (function input is handled separately) + if not isinstance(block, Function): + for v in block.inputs: + v.name = self._gen_new_name(seen_var_names, v.name) + seen_var_names.add(v.name) + + for op in list(block.operations): + for b in op.blocks: + self._deduplicate_block(b, func_outputs, seen_var_names, seen_op_names) + if op.name is not None: + op.name = self._gen_new_name(seen_op_names, op.name) + seen_op_names.add(op.name) + for v in op.outputs: + if v in func_outputs: + # func output is never renamed + continue + v.name = self._gen_new_name(seen_var_names, v.name) + seen_var_names.add(v.name) + + @staticmethod + def _ensure_unique_var_names(v_set): + """ + v_set: set[Variable] + + All variables in v_set should have different names. Raise ValueError + otherwise + """ + names = [v.name for v in v_set] + dup_names = [name for name, count in collections.Counter(names).items() if count > 1] + if len(dup_names) > 0: + raise ValueError(f"Var names {dup_names} is used both as function's input and output") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py new file mode 100644 index 00000000..815c6076 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/fuse_reduce_mean.py @@ -0,0 +1,123 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_var_scalar_value, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import is_symbolic + + +@register_pass(namespace="common") +class fuse_reduce_mean(AbstractGraphPass): + """ + Detect the "``reduce_sum``--->``mul/real_div``" pattern than can be mapped to ``reduce_mean``. + That is, the operation ``reduce_sum/count == reduce_mean``. + + .. code-block:: + + Input graph: + + const (scalar) + | + input ----> reduce_sum ----> mul/real_div -----------> output + + Output graph: + + input --------> reduce_mean ---------> output + + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_reduce_mean_block(f) + + @staticmethod + def _try_to_transform(reduce_sum_op, block): + + ops_to_remove = [] + + # check that the dimensions in the shape of the input to the reduce_sum op, + # over which the reduction operation is being performed, are known + input_shape = reduce_sum_op.x.shape + if input_shape is None: + return False + axes = None + if reduce_sum_op.axes is not None: + axes = reduce_sum_op.axes.val + if axes is None: + return False + count = 1 + for dim in axes: + if is_symbolic(input_shape[dim]): + return False + count *= input_shape[dim] + + # check that output of reduce_sum is not a block output + if reduce_sum_op.outputs[0] in block.outputs: + return False + ops_to_remove.append(reduce_sum_op) + + # check that reduce_sum op is followed by either: + # - mul op with scalar value 1/count + # or + # - real_div op with scalar value count + if _check_child_op_type(reduce_sum_op, "mul"): + child_op = list(reduce_sum_op.outputs[0].child_ops)[0] + other_input = child_op.x if child_op.y == reduce_sum_op.outputs[0] else child_op.y + if not _check_var_scalar_value(other_input, 1.0 / count, 1e-6): + return False + elif _check_child_op_type(reduce_sum_op, "real_div"): + child_op = list(reduce_sum_op.outputs[0].child_ops)[0] + if child_op.x != reduce_sum_op.outputs[0]: + return False + other_input = child_op.y + if not _check_var_scalar_value(other_input, count, 1e-2): + return False + else: + return False + + ops_to_remove.append(child_op) + + # remove all the ops, and replace with a reduce_mean op + out_name = child_op.outputs[0].name + x = mb.reduce_mean( + x=reduce_sum_op.x, + axes=reduce_sum_op.axes.val, + keep_dims=reduce_sum_op.keep_dims.val, + name=out_name, + before_op=child_op, + ) + child_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=child_op, old_var=child_op.outputs[0], new_var=x + ) + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _fuse_reduce_mean_block(self, block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_reduce_mean_block(b) + if len(op.blocks) > 0: + continue + + # start pattern match if mul op is encountered + if op.op_type == "reduce_sum": + fusion_status = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py new file mode 100644 index 00000000..774c6b20 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/loop_invariant_elimination.py @@ -0,0 +1,169 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class loop_invariant_elimination(AbstractGraphPass): + """ + When a block does not modify a block input var, eliminate that block + input var and use the corresponding var in the outer scope. Example: + + .. code-block:: + + # Before loop_invariant_elimination pass. + # Notice that ``%b.x`` is constant through while loop iterates. + main(%a: (1, 2, fp32), + %b: (1, 2, fp32)) { + block0() { + %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \ + while_loop(loop_vars=(%a, %b)) + loop_cond(%a.x, %b.x) { + %cond_var: (bool) = some_op(x=%a.x, y=%b.x) + } -> (%cond_var) + loop_body(%a.x, %b.x) { + %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x) + } -> (%add_0, %b.x) + } -> (%loop:0, %loop:1) + } + + # After loop_invariant_elimination pass. + main(%a: (1, 2, fp32), + %b: (1, 2, fp32)) { + block0() { + %loop:1: (1, 2, fp32) = identity(x=%b) + %loop:0: (1, 2, fp32) = \ + while_loop(loop_vars=(%a)) + loop_cond(%a.x) { + %cond_var: (bool) = some_op(x=%a.x, y=%b) + } -> (%cond_var) + loop_body(%a.x) { + %add_0: (1, 2, fp32) = add(x=%a.x, y=%b) + } -> (%add_0) + } -> (%loop:0, %loop:1) + } + + where we eliminate loop invariant ``%b.x`` from ``while_loop``, which returns 1 + instead of 2 outputs. We also preserve the return var names with identity. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._loop_invariant_elimination_block(f) + + @staticmethod + def _detect_loop_invariants(while_op): + block = while_op.blocks[1] # body block + loop_invariant_ids = [] # list of index in op.loop_vars, block.inputs + for i, vx_in in enumerate(block.inputs): + vx_out = block.outputs[i] # first output is cond var. + return_input_as_output = vx_in == vx_out + # this block output is a var from outside of the block + + enclosing_block = while_op.enclosing_block + while_op_id = enclosing_block.find_op_id_in_block(while_op) + output_from_outside_of_block = ( + True + if enclosing_block.is_var_visible_in_block(vx_out, upto_op_with_id=while_op_id) + else False + ) + if return_input_as_output or output_from_outside_of_block: + loop_invariant_ids.append(i) + + # TODO: All outputs that depend on only invariants are invariant. We + # need to move computation out of while loop. + return loop_invariant_ids + + @block_context_manager + def _loop_invariant_elimination_block(self, block): + # Phase 1: Find vars needed to be renamed. + # + # while_loop outputs need to be renamed if the output will be eliminated + # (due to loop invariant) and is returned as block output (which would + # change the return var name and the program interface). + # + # list[(v_src, v_tgt, before_op)]: will rename v_src to v_tgt before + # before_op (a while_loop) + output_rename = [] + for op in list(block.operations): + for b in op.blocks: + self._loop_invariant_elimination_block(b) + + if op.op_type != "while_loop": + continue + + loop_invariant_ids = self._detect_loop_invariants(op) + for i in loop_invariant_ids: + output_rename.append((op.loop_vars[i], op.outputs[i], op)) + if len(loop_invariant_ids) > 0: + # Avoid the following case: + # %a, %b = while_loop(..., name="b") + # becomes + # %b = identity(..., name="b") + # %a = while_loop(..., name="b") + # (two ops with the same name -> name collision) + op.name = op.name + "_renamed" + + # Phase 2: insert rename ops. This changes block.operations + for v_src, v_tgt, op in output_rename: + if v_tgt in block.outputs: + # rename the loop output to existing block output names + res = mb.identity(x=v_src, before_op=op, name=v_tgt.name) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=v_tgt, new_var=res + ) + + # Phase 3: Perform loop invariant elimination without fear! + for op in list(block.operations): + if op.op_type != "while_loop": + continue + loop_invariant_ids = self._detect_loop_invariants(op) + + # replace uses of loop_invariants with its source from outside of the + # while_loop op. + for i in loop_invariant_ids: + for block in op.blocks: + block.replace_uses_of_var_after_op( + anchor_op=None, old_var=block.inputs[i], new_var=op.loop_vars[i] + ) + + # replace block inputs + for block in op.blocks: + block.remove_inputs([block.inputs[i] for i in loop_invariant_ids]) + + # remove invariants from while_loop loop_vars + for i in loop_invariant_ids: + # replace usage of while_loop outputs that we'll eliminate. + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[i], new_var=op.loop_vars[i] + ) + + # Remove after replacing to ensure program is valid + for i in loop_invariant_ids: + op.loop_vars[i].remove_child_op(op) + + op.loop_vars = tuple( + v for i, v in enumerate(op.loop_vars) if i not in loop_invariant_ids + ) + op._input_vars["loop_vars"] = op.loop_vars + + # remove invariants from while_loop body_block outputs + body_block = op.blocks[1] + body_block.set_outputs( + [v for i, v in enumerate(body_block.outputs) if i not in loop_invariant_ids] + ) + + # op._output_vars doesn't include cond var + op._output_vars = [ + v for i, v in enumerate(op._output_vars) if i not in loop_invariant_ids + ] + + # check healthy state + op.enclosing_block.validate() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py new file mode 100644 index 00000000..0e9aac55 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/noop_elimination.py @@ -0,0 +1,243 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +import numpy as np + +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class noop_elimination(AbstractGraphPass): + """ + Remove ops that have no effect. + + .. code-block:: + + Given: + %1 (1, 96, 128, 64, fp32) = ... + %2 (1, 96, 128, 64, fp32) = reshape(%1) + ... + %3 (1, 96, 128, 64, fp32) = add(%2, constant) + ... + + Result: + %1 (1, 96, 128, 64, fp32) = ... + %3 (1, 96, 128, 64, fp32) = add(%1, constant) + ... + """ + + _SUPPORTED_OPS = { + "add", + "mul", + "floor_div", + "pow", + "real_div", + "sub", + "reshape", + "split", + "slice_by_index", + "slice_by_size", + "pad", + "tile", + "transpose", + "upsample_nearest_neighbor", + "upsample_bilinear", + "resize_bilinear", + "crop", + "linear_activation", + } + + def apply(self, prog): + for f in prog.functions.values(): + self._noop_elimination_block_wrapper(f) + + @staticmethod + def _match_pattern(op, block): + def _remove_elementwise_binary(op, x, y): + # We remove the ops that has op.x == x or op.y == y + def has_all_elements_equal_to(var, value): + if value is None: + return False + + if var.val is not None: + return np.all(var.val == value) + elif var.op is not None and var.op.op_type == "fill": + fill_value = var.op.value.val + return fill_value is not None and (fill_value == value) + else: + return False + + if has_all_elements_equal_to(op.x, x): + input_var = op.y + input_op = input_var.op + elif has_all_elements_equal_to(op.y, y): + input_var = op.x + input_op = input_var.op + else: + return False + + input_shape = input_var.sym_type + output_shape = op.outputs[0].sym_type + + # We might be using elementwise as broadcasting + if input_shape != output_shape: + return False + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_elementwise(op, block): + if op.op_type in {"add"}: + return _remove_elementwise_binary(op, 0, 0) + elif op.op_type in {"mul"}: + return _remove_elementwise_binary(op, 1, 1) + elif op.op_type in {"floor_div", "pow", "real_div"}: + return _remove_elementwise_binary(op, None, 1) + elif op.op_type in {"sub"}: + return _remove_elementwise_binary(op, None, 0) + else: + return False + + def remove_slice_by_index(op, block): + input_shape = op.x.sym_type + output_shape = op.outputs[0].sym_type + + if input_shape != output_shape: + return False + + if op.stride is not None and op.stride.val is not None: + stride = op.stride.val.flatten().tolist() + if any([x < 0 for x in stride]): + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_same_shape(op, block): + input_shape = op.x.sym_type + output_shape = op.outputs[0].sym_type + + if input_shape != output_shape: + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_linear(op, block): + if op.alpha.val != 1 or op.beta.val != 0: + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + def remove_transpose(op, block): + perm = np.array([p if p >= 0 else p + len(op.perm.val) for p in op.perm.val]) + sorted_perm = np.sort(perm) + if (perm != sorted_perm).any(): + return False + + input_var = op.x + input_op = input_var.op + + if op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=input_op, + old_var=op.outputs[0], + new_var=input_var, + ): + op.enclosing_block.remove_ops([op]) + return True + return False + + op_to_removal_fn = { + "add": remove_elementwise, + "mul": remove_elementwise, + "floor_div": remove_elementwise, + "pow": remove_elementwise, + "real_div": remove_elementwise, + "sub": remove_elementwise, + "reshape": remove_same_shape, + "split": remove_same_shape, + "slice_by_index": remove_slice_by_index, + "slice_by_size": remove_same_shape, + "pad": remove_same_shape, + "tile": remove_same_shape, + "transpose": remove_transpose, + "upsample_nearest_neighbor": remove_same_shape, + "upsample_bilinear": remove_same_shape, + "resize_bilinear": remove_same_shape, + "crop": remove_same_shape, + "linear_activation": remove_linear, + } + + # abort if op output is a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + + if op.op_type in noop_elimination._SUPPORTED_OPS: + + if len(op.outputs) != 1: + return None + return op_to_removal_fn[op.op_type] + + return None + + @block_context_manager + def _noop_elimination_block_wrapper(self, block): + def _noop_elimination_block(block): + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _noop_elimination_block(b) + if len(op.blocks) > 0: + continue + + remove_fn = noop_elimination._match_pattern(op, block) + if remove_fn is not None: + status = remove_fn(op, block) + # has to break as the downstream iterator is affected. + if status: + return status + return False + + block_changed = True + while block_changed: + block_changed = _noop_elimination_block(block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py new file mode 100644 index 00000000..2c0905e0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_redundant_ops.py @@ -0,0 +1,196 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import collections + +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import _are_ops_identical, block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class remove_redundant_ops(AbstractGraphPass): + """ + If there are multiple ops with "identical" inputs, then they are redundant and all but one of them can be removed. + This pass checks and removes such ops. + + Since all inputs to ops in MIL are named, two ops with same ``op_types`` can be compared by comparing their + correspondingly named inputs. Inputs are treated as identical if one of the following is true: + + - The input is a constant var, in which case its value should have the same dtype and numerical value. + - The input is a non constant var, in which case it should be the same var object. + + This pass iterates over the ops, takes its first output var, and then builds a candidate op list from the child + ops of this var. + This candidate ops list contains ops of the same ``op_type``, arranged in topological order. + From each of these candidate ops in the list, the second, third, and subsequent ops are pairwise compared with the first op, + and if identical to it, they are removed. For example: + + .. code-block:: + + Input: + %0 = op0(...) + %1 = op1(...) + %2 = const(val=4.5) + %3 = const(val=4.5) + %4 = op2(%1, %0, %2) + %5 = op3(%1, %0, %3) + + Output: + %0 = op0(...) + %1 = op1(...) + %2 = const(val=4.5) + %3 = const(val=4.5) # this will get removed later by dead code elimination pass + %4 = op2(%1, %0, %2) + + In the example above, ``op3`` is removed and all uses of ``%5`` is replaced by ``%4``. + For more examples, see "TestRemoveRedundantOpsPass". + """ + + _NON_REDUNDANT_OPS = tuple() + + def apply(self, prog): + for f in prog.functions.values(): + self._remove_redundant_ops_in_block_wrapper(f) + + @staticmethod + def _is_op_eligible_to_be_removed(op): + if ( + len(op.blocks) != 0 + or op.op_type.startswith("random") + or op.op_type in remove_redundant_ops._NON_REDUNDANT_OPS + ): + return False + else: + return True + + @staticmethod + def _get_candidate_ops_list(prospective_ops_list): + od = collections.OrderedDict() + enclosing_block = [op.enclosing_block for op in prospective_ops_list] + if len(set(enclosing_block)) > 1: # all candidate ops must belong to the same block + return [] + for op in prospective_ops_list: + if remove_redundant_ops._is_op_eligible_to_be_removed(op): + od[op] = enclosing_block[0].operations.index(op) + # Sort the ops according to their index of appearing in block.operations, which is + # topologically sorted + return [x[0] for x in sorted(od.items(), key=lambda t: t[1])] + + @staticmethod + def _get_candidate_ops_lists_from_var(var): + """ + Return a list of lists. + Each element is a list of a subset of the child ops of var, which satisifies the following conditions: + - they are of the same op_type + - ops are not repeated in it. The .child_ops property of a var may sometimes contain an op repeated more than once + - the ops are ordered based on the order in which they appear in the block.operations list (which is topologically sorted), + with ops appearing earlier in that list appearing first here. + """ + candidate_ops_lists = [] + + op_types_to_ops = collections.OrderedDict() + for op in var.child_ops: + if op.op_type in op_types_to_ops: + op_types_to_ops[op.op_type].append(op) + else: + op_types_to_ops[op.op_type] = [op] + + for v in op_types_to_ops.values(): + if len(v) > 1: + candidate_ops_list = remove_redundant_ops._get_candidate_ops_list(v) + if len(candidate_ops_list) > 1: + candidate_ops_lists.append(candidate_ops_list) + + return candidate_ops_lists + + @staticmethod + def _try_to_remove_ops(candidate_ops_list): + # candidate_ops_list contains ops in topological order. + # All the ops in candidate_ops_list will be compared to the first op, and removed if identical to it. + # Removing ops later in the topological order is much easier, as their output vars + # can simply be replaced by the output var of the first_op, this doesn't require + # changing any op order in the block. + if len(candidate_ops_list) < 2: + return False + first_op = candidate_ops_list[0] + block = first_op.enclosing_block + + # currently, we only consider the cases when the op has 1 output. + # The replace var logic below only handles the single output case. + if len(first_op.outputs) > 1: + return False + + ops_to_remove = [] + for op in candidate_ops_list[1:]: + if op.outputs[0] not in block.outputs: # to make sure we don't remove an output op + if _are_ops_identical(first_op, op): + ops_to_remove.append(op) + + if len(ops_to_remove) == 0: + return False + + # remove uses of output vars of the ops to be removed. + # This can be safely done, since all the ops in ops_to_remove + # appear after first_op, hence first_op.outputs[0] variable is in + # scope before the op's output var + for op in ops_to_remove: + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=first_op.outputs[0] + ) + block.remove_ops(ops_to_remove) + return True + + @staticmethod + def _try_to_transform(parent_var): + """ + scan the children ops to parent_var, to find and remove indentical ops, if any. + Returns True, if succesful in finding such redundant ops. + """ + candidate_ops_lists = remove_redundant_ops._get_candidate_ops_lists_from_var(parent_var) + block_changed = False + for ops_list in candidate_ops_lists: + if remove_redundant_ops._try_to_remove_ops(ops_list): + block_changed = True + return block_changed + + @block_context_manager + def _remove_redundant_ops_in_block_wrapper(self, block): + def _remove_redundant_ops_in_block(block): + if isinstance(block.inputs, dict): + block_input_var_list = list(block.inputs.values()) + elif isinstance(block.inputs, (list, tuple)): + block_input_var_list = block.inputs + else: + raise ValueError("Unrecognized type of block.inputs, its neither a list nor dict.") + + # iterate over the block inputs + for input_var in block_input_var_list: + if len(input_var.child_ops) > 1: + self._try_to_transform(input_var) + + # iterate over the ops in the block + graph_updated = False + for op in block.operations: + if op.op_type == "const": + continue + + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = _remove_redundant_ops_in_block(b) + + if len(op.outputs) > 0 and len(op.outputs[0].child_ops) > 1: + # currently, we only check the first output of the op + # this can be extended, if required, to check for other outputs. + graph_updated = self._try_to_transform(op.outputs[0]) + # has to break as the downstream iterator is affected. + if graph_updated: + return graph_updated + return graph_updated + + block_changed = True + while block_changed: + block_changed = _remove_redundant_ops_in_block(block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py new file mode 100644 index 00000000..60db0130 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/remove_symbolic_reshape.py @@ -0,0 +1,95 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_variadic, is_symbolic, num_symbolic + + +@register_pass(namespace="common") +class remove_symbolic_reshape(AbstractGraphPass): + """ + Convert symbolic shape in ``reshape`` to integers. + + Note: This does not perform any optimization, but simply + replaces symbols with positive integers if solved from volumetric + constraint, or -1. Therefore, this pass fails if more than one symbol + needs to be resolved to -1. + + .. code-block:: + + # Before remove_symbolic_reshape pass. + main(%x: (s0, 4, fp32)) { + block0() { + %reshape_0_shape_0: (3,i32)^ = const(val=(s0, s1, 2)) + %reshape_0: (s0, 2, 2, fp32) = reshape(x=%x, shape=%reshape_0_shape_0) + } -> (%reshape_0) + } + + # After remove_symbolic_reshape pass. + main(%x: (s0, 4, fp32)) { + block0() { + %reshape_0_shape_0x: (3,i32)* = const(val=[-1, 2, 2]) + %reshape_0: (-1, 2, 2, fp32) = reshape(x=%x, shape=%reshape_0_shape_0x) + } -> (%reshape_0) + } + + TODO (rdar://59165842): Use expand_dims, squeeze etc to use 0 instead of dynamic reshape with -1. + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + num_changes = self._remove_symbolic_reshape_block(f) + msg = "remove_symbolic_reshape: changed {} reshapes." + logger.info(msg.format(num_changes)) + + @block_context_manager + def _remove_symbolic_reshape_block(self, block): + num_changes = 0 + for op in list(block.operations): + for b in op.blocks: + num_changes += self._remove_symbolic_reshape_block(b) + if op.op_type != "reshape": + continue + if op.shape.val is not None: + # shape does not contain symbol. + continue + if op.shape.sym_val is None: + # shape is runtime determined. + continue + if len(op.shape.child_ops) > 1: + continue + # Use output shape as `shape` + shape = op.outputs[0].shape + if any_variadic(shape): + msg = ( + "Cannot reshape to variadic from a compile time " + + "shape argument. Variadic shape can only be achieved " + + "via runtime shape argument. op: {}" + ) + raise ValueError(msg.format(op)) + num_symbols = num_symbolic(shape) + if num_symbols > 1: + continue + # Convert the one symbol to -1 + integer_shape = [-1 if is_symbolic(i) else i for i in shape] + shape_const = mb.const( + val=integer_shape, + name=op.shape.name + "x", + before_op=op, + ) + reshaped = mb.reshape(x=op.x, shape=shape_const, name=op.name, before_op=op) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=reshaped + ) + # Remove all the ops at once + block.remove_ops([op, op.shape.op]) + num_changes += 1 + return num_changes diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py new file mode 100644 index 00000000..afbc88ee --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/cleanup/topological_reorder.py @@ -0,0 +1,169 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class topological_reorder(AbstractGraphPass): + """ + Topologically re-orders the list of operations in a program by places each operation closer to its + first use, or at the end if it's not consumed by any other operation. + + Currently, This pass re-orders only Transpose and Cast operations. + + .. code-block:: + + # Example: input program + main(x: (2, 4, fp32)) { + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + x3 = mb.log(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + } -> x2, x4, x7, x8 + + # After moving `cast` ops becomes + main(x: (2, 4, fp32)) { + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x3 = mb.log(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + x4 = mb.cast(x=x3_t, dtype="fp32") + x2 = mb.cast(x=x1_t, dtype="fp32") + } -> x2, x4, x7, x8 + + # After moving `transpose` ops becomes + main(x: (2, 4, fp32)) { + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x3 = mb.log(x=x) + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + } -> x2, x4, x7, x8 + """ + + def apply(self, prog): + for f_name, f in prog.functions.items(): + self._move_operations_to_the_end_block(f, ["cast", "transpose"]) + + @staticmethod + @block_context_manager + def _move_operations_to_the_end_block(block, op_type_to_move): + # Moves ops with `op_type_to_move` in `block.operations` (list) to the end of the program. + # Note: ops with `op_type_to_move` and is dead code are moved toward end, which can be eliminated + # later with dead-code-elimination pass. + # + # Inputs: + # - block (mil.Block): block to be modified in-place + # - op_type_to_move (List[str]) + # Returns: + # - set[Var]: Set of vars consumed in block (or returned as block output) + + # first_use maps var to (index, op) representing the first op in block.operation that consumes this var. + first_use = {} # var -> op + ops_to_remove = [] # list of ops to be deleted at the end of pass + for index, op in enumerate(reversed(block.operations[:])): + current_op = op + + if op.op_type in op_type_to_move: + # Mark op for deletion + ops_to_remove.append(op) + + # Create list of operations consuming each output of current operation + first_consumers = [first_use[v] for v in op.outputs if v in first_use] + + before_op = None # None means adding at the end of block + if len(first_consumers) > 0: + # Current op should be moved right before this first consumer of one of it's output. + # 1. Find indices for all the consumer ops of outputs + # 2. Move current op right before first consumer i.e. smallest index in block.operations + first_use_indices = [ + block.operations.index(first_use_op) for first_use_op in first_consumers + ] + before_op = block.operations[min(first_use_indices)] + + # Create new copy of current operation + new_var = getattr(mb, op.op_type)(**op.inputs, before_op=before_op) + + if not isinstance(new_var, (list, tuple)): + new_var = [new_var] + + # Override current_op to be newly created op to ensure `first_use` + # points to newly created op instead of old one. + current_op = new_var[0].op + + for old_output_var, new_output_var in zip(op.outputs, new_var): + block.replace_uses_of_var_after_op( + anchor_op=None, old_var=old_output_var, new_var=new_output_var + ) + + # Collect input vars from sub-block if present + relevant_inputs = set() + for b in current_op.blocks: + relevant_inputs |= topological_reorder._move_operations_to_the_end_block( + b, op_type_to_move + ) + + # Collect vars from operation input + for v in current_op.inputs.values(): + if isinstance(v, (tuple, list)): + relevant_inputs |= set(v) + continue + relevant_inputs.add(v) + + # Mark current op as first use for all the input vars + # a) of it's sub-block + # b) of current op + for v in relevant_inputs: + # input is seen for the first time or + # current_op is first_use i.e. appears before earlier recorded first_use. + # Note: since ops are moved to the end, it's possible that an op is moved right after + # earlier recorded first_use and in such cases, first_use should not be modified. + # + # == Example == + # main( %x: (10, 20, fp32)(Tensor)) { + # block0() { + # %cast_0: (10, 20, fp16)(Tensor) = cast(x= %x, dtype = "fp16", name = "cast_0") + # %cast_1: (10, 20, fp32)(Tensor) = cast(x= %cast_0, dtype = "fp32", name = "cast_1") + # %transpose_0: (20, 10, fp16)(Tensor) = transpose(x= %cast_0, perm = [1, 0], name = "transpose_0") + # %transpose_1: (10, 20, fp16)(Tensor) = transpose(x= %transpose_0, perm = [1, 0], name = "transpose_1") + # } -> (% cast_1, % transpose_1) + # } + # In above example, `%cast_1` will be moved to the end of the block and first_use info for `%cast_0` + # should point to `%transpose_0` and not to `%cast_1` + if v not in first_use or block.operations.index( + first_use[v] + ) > block.operations.index(current_op): + first_use[v] = current_op + + # Remove ops that are reordered + block.remove_ops(ops_to_remove) + + # Returns set of vars consumed in current block + vars_consumed_in_block = set([v for v in first_use]) + vars_consumed_in_block.update(block.outputs) + return vars_consumed_in_block diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py new file mode 100644 index 00000000..64943197 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/lower_complex_dialect_ops.py @@ -0,0 +1,552 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +This file contains a pass for lowering complex dialect ops into core ops. + +Steps for adding a new complex dialect op: +1. Add a dialect op in complex_dialect_ops.py +2. Add a corresponding lowering function + +In Step 2, notice that when implementing lower functions, we need to specify before_op during +lowering to core ops. It's for both correctness as well as SSA graph's readability, because the +generated core ops should be placed before the ops which were placed after that dialect op. +More specifically, here is the SSA graph before lowering: + block0() { + %1 = complex_dialect_op(data=%input) + %2 = core_op1(x=%1) + %3 = core_op2(x=%2) + } -> (%3) +During lowering `complex_dialect_op`, we want all newly generated core ops are placed before the +`core_op1`. +""" + +import functools +from typing import Callable, Dict, Optional, Tuple + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.operation import Operation +from coremltools.converters.mil.mil.ops.defs.complex_dialect_ops import ( + fft_canonicalize_length_dim, + fft_canonicalize_shapes_dims, +) +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.var import ComplexVar, Var + + +class LowerComplex: + # The map recording each complex dialect op's lowering function. + _lower_map: Dict[str, Callable] = dict() + + @staticmethod + def register_lower_func(op_type: str) -> Callable: + """Register lowering function for complex dialect ops.""" + + def lower_func_wrapper(func): + @functools.wraps(func) + def wrapper_inner(*args, **kwargs): + return func(*args, **kwargs) + + if op_type in LowerComplex._lower_map: + raise ValueError(f"The op {op_type} already got lowering function registered.") + LowerComplex._lower_map[op_type] = func + return wrapper_inner + + return lower_func_wrapper + + @staticmethod + def has_lower_func(op_type: str) -> bool: + """Check if the complex dialect op has corresponding lowering function.""" + return op_type in LowerComplex._lower_map + + @staticmethod + def get_lower_func(op_type: str) -> Callable: + """Get the complex dialect op's lowering function.""" + if not LowerComplex.has_lower_func(op_type): + raise ValueError(f"The op {op_type} doesn't have any lowering function registered.") + return LowerComplex._lower_map[op_type] + + +def _resize_data(input_data: Var, dims: Tuple[int], sizes: Tuple[int], before_op: Operation) -> Var: + """ + For each dim in `dims`, resize the input data size to corresponding size in `sizes`. + If the `size` is smaller than the data's size at `dim`, trim the data to `size`. + If the `size` is larger, pad zeros to make the data reaches `size`. + """ + for (dim, size) in zip(dims, sizes): + if size < input_data.shape[dim]: + indices = mb.range_1d(start=0, end=size, step=1, before_op=before_op) + input_data = mb.gather(x=input_data, indices=indices, axis=dim, before_op=before_op) + elif size > input_data.shape[dim]: + zero_shape = list(input_data.shape) + zero_shape[dim] = size - input_data.shape[dim] + zero_data = mb.fill(shape=zero_shape, value=0.0, before_op=before_op) + input_data = mb.concat(values=[input_data, zero_data], axis=dim, before_op=before_op) + + return input_data + + +def _restore_conj( + input_data: ComplexVar, n: Var, dim: Var, before_op: Operation +) -> Tuple[Var, Var]: + """ + The input is interpreted as a one-sided Hermitian signal in the Fourier domain, as produced + by rfft(). So we need to restore it to the full matrix by following X[i] = conj(X[-i]). + Real part's conj is itself, and imaginary part's conj is negative of the original value. + For odd number n, the last element is also included in mirroring input. + """ + real_data: Var = input_data.real + imag_data: Var = input_data.imag + + size = 2 * (input_data.real.shape[dim.val] - 1) + if n is not None and n.val is not None: + size = n.val + real_data = _resize_data( + real_data, dims=(dim.val,), sizes=(size // 2 + 1,), before_op=before_op + ) + imag_data = _resize_data( + imag_data, dims=(dim.val,), sizes=(size // 2 + 1,), before_op=before_op + ) + + range_end = real_data.shape[dim.val] - 2 if size % 2 == 0 else real_data.shape[dim.val] - 1 + if range_end > 0: + mirror_indices = mb.range_1d(start=range_end, end=0, step=-1, before_op=before_op) + real_part_mirror_values = mb.gather( + x=real_data, indices=mirror_indices, axis=dim.val, before_op=before_op + ) + imag_part_mirror_values = mb.gather( + x=imag_data, indices=mirror_indices, axis=dim.val, before_op=before_op + ) + imag_part_mirror_values = mb.mul(x=imag_part_mirror_values, y=-1.0, before_op=before_op) + + real_data = mb.concat( + values=[real_data, real_part_mirror_values], + axis=dim.val, + before_op=before_op, + ) + imag_data = mb.concat( + values=[imag_data, imag_part_mirror_values], + axis=dim.val, + before_op=before_op, + ) + + return real_data, imag_data + + +def _fft_1d( + input_real: Var, + input_imag: Var, + n: Optional[Var], + dim: Optional[Var], + norm: Optional[Var], + before_op: Operation, + inverse: bool = False, # For inverse FFT. +) -> Tuple[Var, Var]: + """ + 1-D FFT by DFT Matrix Multiplication. + + The core issue is how to derive the DFT matrix. As the DFT matrix is consist of different powers + of `w`, where w=e^(2pi/N i), we need to separate the real and imaginary part of w. To achieve + that, we need to find a way to construct the following matrix (from the power of `w` in DFT): + 0 0 0 ... 0 + 0 1 2 ... N-1 + 0 2 4 ... 2(N-1) + ... .... ... + 0 N-1 2(N-1) ... (N-1)(N-1) + This matrix could be derived by outer product of two range tensors. + + After getting that base matrix, we can take sin and cos to get the corresponding `sin_base` and + `cos_base` matrix. Now based on some math formulas including: + * The addition of complex numbers is: (a+bi)+(c+di)=(a+c)+(b+d)i. + * The multiplication of complex numbers is: (a+bi)(c+di)=ac+adi+bci−bd=(ac−bd)+(ad+bc)i. + * Euler’s formula: e^xi=cosx+isinx. + * Cosine is an even function: cos(−x)=cosx. + * Sine is an odd function: sin(−x)=−(sinx). + We can get + * The real part output is: cos_base * input_real + sin_base * input_imag + * The imaginary part output is: - (sin_base * input_real - cos_base * input_imag) + That's how we calculate the real and imaginary part separately for the FFT. + """ + n, dim = fft_canonicalize_length_dim(input_real, n, dim) + + # Swaps target dim axis to the first axis. + axes = list(range(len(input_real.shape))) + axes[0] = dim + axes[dim] = 0 + transposed_input_real = mb.transpose(x=input_real, perm=axes, before_op=before_op) + transposed_input_imag = mb.transpose(x=input_imag, perm=axes, before_op=before_op) + + # Trim or pad input according to n. + transposed_input_real = _resize_data( + input_data=transposed_input_real, + dims=(0,), + sizes=(n,), + before_op=before_op, + ) + transposed_input_imag = _resize_data( + input_data=transposed_input_imag, + dims=(0,), + sizes=(n,), + before_op=before_op, + ) + + # Calculate DFT matrix. + original_shape = transposed_input_real.shape + N = transposed_input_real.shape[0] + reshaped_input_real = mb.reshape(x=transposed_input_real, shape=[N, -1], before_op=before_op) + reshaped_input_imag = mb.reshape(x=transposed_input_imag, shape=[N, -1], before_op=before_op) + tmp = mb.range_1d(start=0, end=N, step=1, before_op=before_op) + # Use MIL ops to calculate base = torch.outer(tmp, tmp) * (2 * torch.pi / N). + tmp_x = mb.reshape(x=tmp, shape=[-1, 1], before_op=before_op) + tmp_y = mb.reshape(x=tmp, shape=[1, -1], before_op=before_op) + base = mb.matmul(x=tmp_x, y=tmp_y, before_op=before_op) + base = mb.cast(x=base, dtype="fp32", before_op=before_op) + base = mb.mul(x=base, y=2 * np.pi, before_op=before_op) + N = mb.cast(x=N, dtype="fp32", before_op=before_op) + base = mb.real_div(x=base, y=N, before_op=before_op) + # Get real part and imaginary part separately. + cos_base = mb.cos(x=base, before_op=before_op) + sin_base = mb.sin(x=base, before_op=before_op) + + if not inverse: + real_part = mb.add( + x=mb.matmul(x=cos_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=sin_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + imag_part = mb.sub( + x=mb.matmul(x=sin_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=cos_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + imag_part = mb.mul(x=imag_part, y=-1.0, before_op=before_op) + else: + real_part = mb.sub( + x=mb.matmul(x=cos_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=sin_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + imag_part = mb.add( + x=mb.matmul(x=sin_base, y=reshaped_input_real, before_op=before_op), + y=mb.matmul(x=cos_base, y=reshaped_input_imag, before_op=before_op), + before_op=before_op, + ) + + real_part = mb.reshape(x=real_part, shape=original_shape, before_op=before_op) + imag_part = mb.reshape(x=imag_part, shape=original_shape, before_op=before_op) + + # Swaps dim back. + real_part = mb.transpose(x=real_part, perm=axes, before_op=before_op) + imag_part = mb.transpose(x=imag_part, perm=axes, before_op=before_op) + + # Normalization if needed. + apply_scale = False + scale = 1 + if norm.val is not None: + # For FFT, "forward" means normalize 1/N, while in IFFT, "backward" means normalize 1/N. + if (not inverse) and (norm.val in ["forward", "ortho"]): + apply_scale = True + scale = N if norm.val == "forward" else mb.sqrt(x=N, before_op=before_op) + if inverse and (norm.val in ["backward", "ortho"]): + apply_scale = True + scale = N if norm.val == "backward" else mb.sqrt(x=N, before_op=before_op) + if apply_scale: + real_part = mb.real_div(x=real_part, y=scale, before_op=before_op) + imag_part = mb.real_div(x=imag_part, y=scale, before_op=before_op) + + return real_part, imag_part + + +def _rfft_1d( + input_real: Var, + n: Optional[Var], + dim: Optional[Var], + norm: Optional[Var], + before_op: Operation, +) -> Tuple[Var, Var]: + """ + It's similar to fft, but as the input is real data, the redundant info (the conjugate part) is + removed in the result. + """ + input_imag = mb.fill( + shape=mb.shape(x=input_real, before_op=before_op), + value=0.0, + before_op=before_op, + ) + real_data, imag_data = _fft_1d(input_real, input_imag, n, dim, norm, before_op=before_op) + remain_len = real_data.shape[dim.val] // 2 + 1 + remain_indices = mb.range_1d(start=0, end=remain_len, step=1, before_op=before_op) + real_data = mb.gather(x=real_data, indices=remain_indices, axis=dim.val, before_op=before_op) + imag_data = mb.gather(x=imag_data, indices=remain_indices, axis=dim.val, before_op=before_op) + + return real_data, imag_data + + +def _wrap_complex_output(original_output: Var, real_data: Var, imag_data: Var) -> ComplexVar: + return ComplexVar( + name=original_output.name + "_lowered", + sym_type=original_output.sym_type, + real=real_data, + imag=imag_data, + ) + + +@LowerComplex.register_lower_func(op_type="complex") +def _lower_complex(op: Operation): + return _wrap_complex_output(op.outputs[0], op.real_data, op.imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_real") +def _lower_complex_real(op: Operation): + complex_input: ComplexVar = op.data + # Use an identity op to avoid the block's input name inconsistency issue. If we directly use + # complex_input.real, the var's name could be inconsistent with the block's input name. + result = mb.identity(x=complex_input.real, before_op=op) + return result + + +@LowerComplex.register_lower_func(op_type="complex_imag") +def _lower_complex_imag(op: Operation): + complex_input: ComplexVar = op.data + # Use an identity op to avoid the block's input name inconsistency issue. If we directly use + # complex_input.imag, the var's name could be inconsistent with the block's input name. + result = mb.identity(x=complex_input.imag, before_op=op) + return result + + +@LowerComplex.register_lower_func(op_type="complex_fft") +def _lower_complex_fft(op: Operation): + if types.is_complex(op.data.dtype): + real_data = op.data.real + imag_data = op.data.imag + else: + real_data = op.data + imag_data = mb.fill( + shape=mb.shape(x=real_data, before_op=op), + value=mb.cast( + x=mb.const(val=0.0, before_op=op), + dtype=real_data.dtype.__name__, + before_op=op, + ), + before_op=op, + ) + real_data, imag_data = _fft_1d( + real_data, + imag_data, + op.n, + op.dim, + op.norm, + before_op=op, + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_fftn") +def _lower_complex_fftn(op: Operation): + if types.is_complex(op.data.dtype): + real_data = op.data.real + imag_data = op.data.imag + else: + real_data = op.data + imag_data = mb.fill( + shape=mb.shape(x=real_data, before_op=op), + value=mb.cast( + x=mb.const(val=0.0, before_op=op), + dtype=real_data.dtype.__name__, + before_op=op, + ), + before_op=op, + ) + + shapes, dims = fft_canonicalize_shapes_dims(real_data, op.shapes, op.dims) + for shape, dim in zip(shapes, dims): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + ) + + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_rfft") +def _lower_complex_rfft(op: Operation): + real_data, imag_data = _rfft_1d(op.data, op.n, op.dim, op.norm, before_op=op) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_rfftn") +def _lower_complex_rfftn(op: Operation): + shapes, dims = fft_canonicalize_shapes_dims(op.data, op.shapes, op.dims) + real_data, imag_data = _rfft_1d( + op.data, + mb.const(val=shapes[-1], before_op=op), + mb.const(val=dims[-1], before_op=op), + op.norm, + before_op=op, + ) + for shape, dim in zip(shapes[:-1], dims[:-1]): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_ifft") +def _lower_complex_ifft(op: Operation): + real_data, imag_data = _fft_1d( + op.data.real, op.data.imag, op.n, op.dim, op.norm, before_op=op, inverse=True + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_ifftn") +def _lower_complex_ifftn(op: Operation): + real_data = op.data.real + imag_data = op.data.imag + shapes, dims = fft_canonicalize_shapes_dims(real_data, op.shapes, op.dims) + for shape, dim in zip(shapes, dims): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + inverse=True, + ) + return _wrap_complex_output(op.outputs[0], real_data, imag_data) + + +@LowerComplex.register_lower_func(op_type="complex_irfft") +def _lower_complex_irfft(op: Operation): + real_data, imag_data = _restore_conj(op.data, op.n, op.dim, before_op=op) + n, dim = fft_canonicalize_length_dim(op.data, op.n, op.dim, c2r=True) + real_data, imag_data = _fft_1d( + real_data, + imag_data, + mb.const(val=n, before_op=op), + mb.const(val=dim, before_op=op), + op.norm, + before_op=op, + inverse=True, + ) + return real_data + + +@LowerComplex.register_lower_func(op_type="complex_irfftn") +def _lower_complex_irfftn(op: Operation): + real_data = op.data.real + imag_data = op.data.imag + shapes, dims = fft_canonicalize_shapes_dims(real_data, op.shapes, op.dims, c2r=True) + + # For all but last dim/shape, do N-D IFFT. + for shape, dim in zip(shapes[:-1], dims[:-1]): + real_data, imag_data = _fft_1d( + real_data, + imag_data, + n=mb.const(val=shape, before_op=op), + dim=mb.const(val=dim, before_op=op), + norm=op.norm, + before_op=op, + inverse=True, + ) + + # For the last dim/shape, do 1-D IRFFT. + n: Var = mb.const(val=shapes[-1], before_op=op) + dim: Var = mb.const(val=dims[-1], before_op=op) + real_data, imag_data = _restore_conj( + input_data=_wrap_complex_output(op.outputs[0], real_data, imag_data), + n=n, + dim=dim, + before_op=op, + ) + real_data, imag_data = _fft_1d( + real_data, imag_data, n, dim, op.norm, before_op=op, inverse=True + ) + real_data = _resize_data(real_data, dims=(dim.val,), sizes=(n.val,), before_op=op) + + return real_data + + +@LowerComplex.register_lower_func(op_type="complex_shape") +def _lower_complex_shape(op: Operation): + return mb.shape(x=op.data.real, before_op=op) + + +def _match_and_replace_dialect_op(block, op): + if not LowerComplex.has_lower_func(op.op_type): + return False + + lower_res = LowerComplex.get_lower_func(op.op_type)(op) + + if not op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=lower_res, + ): + raise ValueError(f"Unable to lower complex dialect op {op}") + block.remove_ops([op]) + return True + + +@block_context_manager +def _lower_complex_dialect_ops_in_block(block): + def help_lower_complex_dialect_ops(block): + for op in list(block.operations): + if _match_and_replace_dialect_op(block, op): + return True + return False + + block_changed = True + while block_changed: + block_changed = help_lower_complex_dialect_ops(block) + + +@register_pass(namespace="common") +class lower_complex_dialect_ops(AbstractGraphPass): + """ + Identify complex data related ops and replace it by using real and imaginary parts separately. + The goal of this pass it to lower complex dialect ops into core ops. + + This pass also checks if the output is complex. As Core ML doesn't support complex data yet, + it errors out early when detecting complex output. + + Input graph (`complex` and `complex_real` are complex dialect ops): + %complex_data = complex(real_data=%real_data, imag_data=%imag_data) + %real_data = complex_real(data=%complex_data) + return %real_data + + Output graph (only core ops, no complex dialect ops): + %complex_data_real = identity(x=%real_data) + %complex_data_imag = identity(x=%imag_data) + %real_data = identity(data=%complex_data_real) + return %real_data + """ + + def apply(self, prog): + for block in prog.functions.values(): + # Early error out for complex data output. + for out_var in block.outputs: + if types.is_complex(out_var.dtype): + raise ValueError( + "MIL doesn't support complex data as model's output, please " + "extract real and imaginary parts explicitly." + ) + + _lower_complex_dialect_ops_in_block(block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_activation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_activation.py new file mode 100644 index 00000000..95ce1ffb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_activation.py @@ -0,0 +1,649 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import ( + fuse_all_blocks, +) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_var_scalar_value, + _check_var_scalar_value_in_interval, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class fuse_gelu_exact(AbstractGraphPass): + """ + Identify the pattern that corresponds to the exact version of ``gelu``, and replace it with a single + ``gelu`` layer with ``mode=EXACT``. The pattern is ``y = 0.5 * x * (1 + erf (x / srqt (2))``, which + can be represented by one of the following: + + .. code-block:: + + (1) + [...] ----> div (1.414) ---> erf ---> add (1) -----> mul (0.5) ---> mul ---> [...] + | ^ + | | + |------------------------------------------------------------------- + + (2) + [...] ----> div (1.414) ---> erf ---> add (1) -----> mul ---> mul (0.5) ---> [...] + | ^ + | | + |---------------------------------------------------- + + (3) + [...] ----> div (1.414) ---> erf ---> add (1) -----> mul ------> [...] + | ^ + | | + |---------------> mul(0.5) -------------------------- + + All of them are converted to: + [...] ----> gelu (mode=EXACT) ---> [...] + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_gelu_exact_block(f) + + @staticmethod + def _try_to_transform(op, block): + ops_to_remove = [] + if op.x.val is None and op.y.val is None: + return False + + # check either the op is mul(1/sqrt(2)) or real_div(sqrt(2)) + root_var = op.x if op.y.val is not None else op.y + if op.op_type == "real_div": + if not _check_var_scalar_value(op.y, 2**0.5): + return False + elif op.op_type == "mul": + if not ( + _check_var_scalar_value(op.x, 2**-0.5) or _check_var_scalar_value(op.y, 2**-0.5) + ): + return False + ops_to_remove.append(op) + + # check if the child op is erf + if not _check_child_op_type(op, "erf"): + return False + erf_op = list(op.outputs[0].child_ops)[0] + ops_to_remove.append(erf_op) + + # check if the child op is add + if not _check_child_op_type(erf_op, "add"): + return False + add_op = list(erf_op.outputs[0].child_ops)[0] + if not (_check_var_scalar_value(add_op.x, 1) or _check_var_scalar_value(add_op.y, 1)): + return False + ops_to_remove.append(add_op) + + # check if the child op is mul + if not _check_child_op_type(add_op, "mul"): + return False + mul_op = list(add_op.outputs[0].child_ops)[0] + + # now we have two case: + # (1) first mul by 0.5 and by the root var + if _check_var_scalar_value(mul_op.x, 0.5) or _check_var_scalar_value(mul_op.y, 0.5): + ops_to_remove.append(mul_op) + if not _check_child_op_type(mul_op, "mul"): + return False + mul_op_2 = list(mul_op.outputs[0].child_ops)[0] + if not (mul_op_2.x == root_var or mul_op_2.y == root_var): + return False + ops_to_remove.append(mul_op_2) + + # (2) first mul by the root var and then mul by 0.5 + elif mul_op.x == root_var or mul_op.y == root_var: + ops_to_remove.append(mul_op) + if not _check_child_op_type(mul_op, "mul"): + return False + mul_op_2 = list(mul_op.outputs[0].child_ops)[0] + if not ( + _check_var_scalar_value(mul_op_2.x, 0.5) or _check_var_scalar_value(mul_op_2.y, 0.5) + ): + return False + ops_to_remove.append(mul_op_2) + + else: + other_parent_op = mul_op.x.op if mul_op.y == add_op.outputs[0] else mul_op.y.op + if other_parent_op.op_type != "mul": + return False + if not ( + _check_var_scalar_value(other_parent_op.x, 0.5) + or _check_var_scalar_value(other_parent_op.y, 0.5) + ): + return False + if not (other_parent_op.x == root_var or other_parent_op.y == root_var): + return False + ops_to_remove.append(other_parent_op) + ops_to_remove.append(mul_op) + mul_op_2 = mul_op + + # check that none of the op in this pattern is connected to the output + # (except the last mul op) + for op in ops_to_remove[:-1]: + for out in op.outputs: + if out in block.outputs: + return False + + # remove all the ops, and replace with a gelu op + out_name = mul_op_2.outputs[0].name + x = mb.gelu(x=root_var, mode="EXACT", name=out_name, before_op=op) + + mul_op_2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=mul_op_2, old_var=mul_op_2.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _fuse_gelu_exact_block(self, block): + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_gelu_exact_block(b) + if len(op.blocks) > 0: + # This op can't be real_div or mul + continue + + if op.op_type in ["mul", "real_div"]: + fusion_occurred = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_gelu_tanh_approximation(AbstractGraphPass): + """ + Identify the pattern that corresponds to the ``tanh`` approximate version of ``gelu``, and replace it + with a single ``gelu`` layer with ``mode=TANH_APPROXIMATION``. + + The implementation of this pass uses the generic graph pattern matching and transform algorithm + implemented in ``coremltools.converters.mil.experimental.passes.generic_pass_infrastructure`` and + documented in ``coremltools/converters/mil/experimental/passes/readme.md``. + """ + + def apply(self, prog): + fuse_all_blocks( + ops_arrangement=self.get_gelu_pattern1(), + var_constraints=self.is_var_constraint_satisifed, + transform_pattern=self.transform_pattern, + prog=prog, + ) + + fuse_all_blocks( + ops_arrangement=self.get_gelu_pattern2(), + var_constraints=self.is_var_constraint_satisifed, + transform_pattern=self.transform_pattern, + prog=prog, + ) + + @staticmethod + def is_var_constraint_satisifed(pattern): + + passed = _check_var_scalar_value(pattern.mul.y, 0.5) or _check_var_scalar_value( + pattern.mul.x, 0.5 + ) + passed = passed and _check_var_scalar_value(pattern.pow.y, 3.0) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_1.y, 0.044715) + or _check_var_scalar_value(pattern.mul_1.x, 0.044715) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.mul_2.y, 0.79788) + or _check_var_scalar_value(pattern.mul_2.x, 0.79788) + ) + + passed = passed and ( + _check_var_scalar_value(pattern.add_1.y, 1) + or _check_var_scalar_value(pattern.add_1.x, 1) + ) + + return passed + + @staticmethod + def transform_pattern(pattern): + # remove all the ops, and replace with a gelu op + out_name = pattern.mul_3.outputs[0].name + x = mb.gelu( + x=pattern.root_var, mode="TANH_APPROXIMATION", name=out_name, before_op=pattern.mul + ) + + pattern.mul_3.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.mul_3, old_var=pattern.mul_3.outputs[0], new_var=x + ) + + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + @staticmethod + def get_gelu_pattern1(): + """ + ``y = x * (0.5 * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1))`` + + .. code-block:: + + [...] -----> pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) ----> mul (0.5) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------------------------------------------------------------------ + + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(), get_new_symbol()])), + ] + ) + def gelu_to_detect_1(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is MANDATORY. + pow = mb.pow(x=x, y=3.0, name="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x=0.5, y=add_1, name="mul") + mul_3 = mb.mul(x=mul, y=x, name="mul_3") + return mul_3 + + return gelu_to_detect_1 + + @staticmethod + def get_gelu_pattern2(): + """ + ``y = (0.5 * x) * (tanh(((.0447)x^3 + x ) * sqrt(2/pi)) + 1)`` + + .. code-block:: + + --------------------------------------------------------------------------------------------------------- + ^ | + | V + [...] -----> mul(0.5) pow (3) ----> mul (.044715) ---> add -----> mul (sqrt(2/pi)) ---> tanh ----> add (1) -----> mul ---> [...] + | ^ ^ + | | | + |------------------------------------------------------------ + + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(), get_new_symbol()])), + ] + ) + def gelu_to_detect_2(x): + pow = mb.pow(x=x, y=3.0, name="pow") + mul_1 = mb.mul(x=0.044714998453855515, y=pow, name="mul_1") + add = mb.add(x=x, y=mul_1, name="add") + mul_2 = mb.mul(x=0.7978845834732056, y=add, name="mul_2") + tanh = mb.tanh(x=mul_2, name="tanh") + add_1 = mb.add(x=1.0, y=tanh, name="add_1") + mul = mb.mul(x=0.5, y=x, name="mul") + mul_3 = mb.mul(x=mul, y=add_1, name="mul_3") + return mul_3 + + return gelu_to_detect_2 + + +@register_pass(namespace="common") +class fuse_leaky_relu(AbstractGraphPass): + """ + Detect the ``mul`` ---> ``max`` pattern than can be mapped to ``leaky_relu``. + + .. code-block:: + + In code form: + ------------ + + Input: + %2 = const(value = alpha) # where 0 <= alpha <= 1 + %3 = mul(%1, %2) # alpha * x + %4 = max(%3, %1) # max(alpha * x, x) + + Output: + %4 = leaky_relu(x=%1, alpha=%2) + + + In graphical form: + ----------------- + + Input graph: + + const (val = alpha) + | + input ----> mul ---------------> maximum -----------> output + | | + |---------------------------------- + + Output graph: + + input --------> leaky_relu ---------> output + + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_leaky_relu_block(f) + + @staticmethod + def _try_to_transform(mul_op, block): + + ops_to_remove = [] + + # check that one of the inputs of the mul op is a constant that is between 0 and 1 + if _check_var_scalar_value_in_interval(mul_op.x, 0, 1): + alpha_input_var = mul_op.x + parent_var = mul_op.y + elif _check_var_scalar_value_in_interval(mul_op.y, 0, 1): + alpha_input_var = mul_op.y + parent_var = mul_op.x + else: + return False + + # check that output of mul is not a block output + if mul_op.outputs[0] in block.outputs: + return False + ops_to_remove.append(mul_op) + + # check if the child op of the mul op is maximum + if not _check_child_op_type(mul_op, "maximum"): + return False + + # check that the other input of the max op is same as the parent of the mul op + max_op = list(mul_op.outputs[0].child_ops)[0] + if not ( + (max_op.x == mul_op.outputs[0] and max_op.y == parent_var) + or (max_op.y == mul_op.outputs[0] and max_op.x == parent_var) + ): + return False + ops_to_remove.append(max_op) + + # remove all the ops, and replace with a leaky relu op + out_name = max_op.outputs[0].name + x = mb.leaky_relu(x=parent_var, alpha=alpha_input_var.val, name=out_name, before_op=max_op) + max_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=max_op, old_var=max_op.outputs[0], new_var=x + ) + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _fuse_leaky_relu_block(self, block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_leaky_relu_block(b) + if len(op.blocks) > 0: + continue + + # start pattern match if mul op is encountered + if op.op_type == "mul": + fusion_status = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +class FusePreluPattern1: + @staticmethod + def is_var_constraint_satisifed(pattern): + # input must be rank 4 + if pattern.root_var.rank != 4: + return False + # output must be rank 4 + if pattern.out_op.outputs[0].rank != 4: + return False + if not ( + _check_var_scalar_value(pattern.neg.y, -1) or _check_var_scalar_value(pattern.neg.x, -1) + ): + return False + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + elif pattern.alpha_mul.y.val is not None: + alpha = pattern.alpha_mul.y.val + else: + return False + # alpha must be of shape (1, C, 1, 1) or (C, 1, 1) + if len(alpha.shape) not in (3, 4): + return False + if alpha.size != alpha.shape[-3]: + return False + + return True + + @staticmethod + def transform_pattern(pattern): + # remove all the ops, and replace with a prelu op + out_var = pattern.out_op.outputs[0] + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + else: + alpha = pattern.alpha_mul.y.val + + alpha_vector = -1 * alpha.flatten() + x = mb.prelu( + x=pattern.root_var, alpha=alpha_vector, name=out_var.name, before_op=pattern.out_op + ) + pattern.out_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.out_op, old_var=out_var, new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + @staticmethod + def get_prelu_pattern(): + """ + ``y = a * relu(-1 * x) + relu(x)`` + + When ``x`` is rank 4, and ``a`` is of shape ``(1, C, 1, 1)`` or ``(C, 1, 1)``, + this is equivalent to ``prelu`` with ``alpha = -a.flatten()``. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec( + shape=([get_new_symbol(), get_new_symbol(), get_new_symbol(), get_new_symbol()]) + ), + ] + ) + def prelu_pattern(x): + return fuse_prelu._prelu_pattern(x) + + return prelu_pattern + + +class FusePreluPattern2: + @staticmethod + def is_var_constraint_satisifed(pattern): + perm = pattern.transpose.perm.val + if not np.array_equal(perm, np.array([0, 2, 3, 1])): + return False + # output must be rank 4 + if pattern.out_op.outputs[0].rank != 4: + return False + if not ( + _check_var_scalar_value(pattern.neg.y, -1) or _check_var_scalar_value(pattern.neg.x, -1) + ): + return False + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + elif pattern.alpha_mul.y.val is not None: + alpha = pattern.alpha_mul.y.val + else: + return False + # alpha must be of shape (C,) or (1,C) or (1,1,C) or (1,1,1,C) + if alpha.size != alpha.shape[-1]: + return False + + return True + + @staticmethod + def transform_pattern(pattern): + # remove all the ops, and replace with a prelu op + transpose op + perm = pattern.transpose.perm.val + out_var = pattern.out_op.outputs[0] + if pattern.alpha_mul.x.val is not None: + alpha = pattern.alpha_mul.x.val + else: + alpha = pattern.alpha_mul.y.val + + alpha_vector = -1 * alpha.flatten() + x = mb.prelu(x=pattern.root_var, alpha=alpha_vector, before_op=pattern.out_op) + x = mb.transpose(x=x, perm=perm, name=out_var.name, before_op=pattern.out_op) + pattern.out_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.out_op, old_var=out_var, new_var=x + ) + # Remove all the ops at once + pattern.block.remove_ops(pattern.op_list()) + + @staticmethod + def get_prelu_pattern(): + """ + ``x1 = transpose(perm=(0,2,3,1))(x)`` + + ``y = a * relu(-1 * x1) + relu(x1)`` + + When ``x`` is rank 4, and ``a`` is of shape (``C,)``, ``(1, C)``, ``(1,1,C)``, or ``(1,1,1,C)``, + this is equivalent to ``prelu`` with ``alpha = -a.flatten()``, followed by a ``transpose`` + with ``perm (0,2,3,1)``. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec( + shape=([get_new_symbol(), get_new_symbol(), get_new_symbol(), get_new_symbol()]) + ), + ] + ) + def prelu_pattern(x): + # perm value can be anything, it will be checked in "is_var_constraint_satisifed" method + x = mb.transpose(x=x, perm=[0, 1, 2, 3], name="transpose") + return fuse_prelu._prelu_pattern(x) + + return prelu_pattern + + +@register_pass(namespace="common") +class fuse_prelu(AbstractGraphPass): + """ + Detect the following patterns that can be mapped to a ``prelu`` op. + Essentially, the ``prelu`` op can be broken down into the following ops: + + ``y = a * relu(-1 * x) + relu(x)`` + + .. code-block:: + + Pattern 1: + + + | ------------> relu --------------------| + | V + x (BCHW) ------| add -----> y (BCHW) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(name=a, shape=(C,1,1) or (1,C,1,1)) + + This will be mapped to: + x (BCHW) ------> prelu(alpha=a, shape=(C,)) ---------> y (BCHW) + + + Pattern 2: + + | ------------> relu --------------------| + | V + x (BCHW) -->transpose(BHWC)---->| add -----> y (BHWC) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(shape=(C,) or (1,C) or (1,1,C) or (1,1,1,C)) + + This will be mapped to: + x (BCHW) ------> prelu ---------> transpose ------> y (BHWC) + """ + + def apply(self, prog): + for pattern in (FusePreluPattern1, FusePreluPattern2): + fuse_all_blocks( + ops_arrangement=pattern.get_prelu_pattern(), + var_constraints=pattern.is_var_constraint_satisifed, + transform_pattern=pattern.transform_pattern, + prog=prog, + ) + + @staticmethod + def _prelu_pattern(x): + # MIL operation takes named inputs (instead of positional inputs). + # Here `name` argument is MANDATORY. + neg = mb.mul(x=x, y=-1.0, name="neg") + relu1 = mb.relu(x=neg, name="relu1") + # Use any constant here to match, rank and shape will be verified in + # `is_var_constraint_satisifed`. + mul = mb.mul(x=relu1, y=np.random.rand(2, 2, 2, 2), name="alpha_mul") + relu2 = mb.relu(x=x, name="relu2") + out = mb.add(x=relu2, y=mul, name="out_op") + return out + + +@register_pass(namespace="common") +class prelu_to_lrelu(AbstractGraphPass): + """ + If ``prelu`` has the same leakage factor across all channels, it will be converted to ``leaky_relu``. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._prelu_to_lrelu_block(f) + + @block_context_manager + def _prelu_to_lrelu_block(self, block): + for op in list(block.operations): + for b in op.blocks: + self._prelu_to_lrelu_block(b) + if len(op.blocks) > 0: + # This op can't be prelu. + continue + + if op.op_type == "prelu": + alpha_val = op.alpha.val + common_leakage_factor = True + for c in range(1, op.alpha.val.shape[0]): + if alpha_val[c] != alpha_val[0]: + common_leakage_factor = False + break + if common_leakage_factor: + lrelu_out = mb.leaky_relu( + x=op.x, alpha=alpha_val[0], name=op.outputs[0].name, before_op=op + ) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=lrelu_out + ) + block.remove_ops([op]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_conv.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_conv.py new file mode 100644 index 00000000..a6359d25 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_conv.py @@ -0,0 +1,1142 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_no_output_connection, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +@register_pass(namespace="common") +class add_conv_transpose_output_shape(AbstractGraphPass): + """ + The ``conv_transpose`` input ``output_shape`` is an optional input. + Since we can infer the output shape from ``type_inference``, we add + ``output_shape`` input whenever it is known to be constant at + compile time. For example: + + .. code-block:: + + Given: + %1: (1, 5, 39, fp32) = conv_transpose(...) # no output_shape input. + + Result: + %2: (3, i32) = const(val=[1,5,39]) + %3: (1, 5, 39, fp32) = conv_transpose(..., output_shape=%2) + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._handle_block(f) + + @staticmethod + def _match_pattern(op): + return ( + op.op_type == "conv_transpose" + and op.output_shape is None + and not any_symbolic(op.outputs[0].shape) + ) + + @block_context_manager + def _handle_block(self, block): + for op in list(block.operations): + for b in op.blocks: + self._handle_block(b) + + if not self._match_pattern(op): + continue + + # matched pattern + x = mb.conv_transpose( + **op.inputs, + output_shape=op.outputs[0].shape, + name=op.name + "_has_output_shape", + before_op=op, + ) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=x + ) + block.remove_ops([op]) + + +@register_pass(namespace="common") +class compose_conv1d(AbstractGraphPass): + """ + In `TensorFlow `_, + ``tf.keras.layers.Conv1D`` is a composite op: + + .. code-block:: + + expand a dummy dim -> Conv2D -> squeeze the dummy dim + + In `PyTorch `_, + this is also true for some backends (``mkldnn`` and ``xpu``). + + This decomposition wrecks the coremltools ``conv1d`` graph passes, + so we should recompose the fragments back to MIL ``conv``, which natively supports ``conv1d``: + + .. code-block:: + + Pattern 1: + Given: + %2 = expand_dims(%1, axes=-2) or expand_dims(%1, axes=2), %1.rank = 3 + %3 = conv(%2) + %4 = squeeze(%3, axes=-2) or squeeze(%3, axes=2) + ... + + Result: + %4 = conv(%1) + ... + + Pattern 2 (TensorFlow channel_last): + Given: + %2 = expand_dims(%1, axes=-3) or expand_dims(%1, axes=1), %1.rank = 3 + %3 = transpose(%2, perm=(0, 3, 1, 2)) + %4 = conv(%3) + %5 = transpose(%4, perm=(0, 2, 3, 1)) + %6 = squeeze(%5, axes=-3) or squeeze(%5, axes=1) + ... + + Result: + %3 = transpose(%1, perm=(0, 2, 1)) + %4 = conv(%3) + %6 = transpose(%4, perm=(0, 2, 1)) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._compose_conv1d_block(f) + + @block_context_manager + def _compose_conv1d_block(self, block: Block): + def help_compose_conv1d_block(block: Block) -> bool: + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = help_compose_conv1d_block(b) + + # must start with expanding a 3-D tensor, + # who has batch, channel, length dimensions + if op.op_type != "expand_dims" or op.x.rank != 3: + continue + + # try pattern `expand_dim` -> `conv2d` -> `squeeze` + if self._try_match_and_transform_pattern(op, block): + # has to break as the downstream iterator is affected + return True + + # try pattern `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` + if self._try_match_and_transform_pattern_channel_last(op, block): + # has to break as the downstream iterator is affected + return True + + return False + + block_changed = True + while block_changed: + block_changed = help_compose_conv1d_block(block) + + def _try_match_and_transform_pattern(self, expand_op: Operation, block: Block) -> bool: + """ + identify the pattern: `expand_dim` -> `conv2d` -> `squeeze` + """ + # abort composition if dummy dimension is not added as height + if expand_op.axes.rank != 1 or expand_op.axes.val[0] not in (-2, 2): + return False + + # `expand_dims` -> `conv` + if not _check_child_op_type(expand_op, "conv"): + return False + conv_op = expand_op.outputs[0].child_ops[0] + + # `conv` -> `squeeze` + if not _check_child_op_type(conv_op, "squeeze"): + return False + squeeze_op = conv_op.outputs[0].child_ops[0] + + # abort composition if not squeezing the dummy height + if squeeze_op.axes.rank != 1 or squeeze_op.axes.val[0] not in (-2, 2): + return False + + # everything looks good + return self._try_apply_transform(expand_op, conv_op, squeeze_op, block) + + def _try_match_and_transform_pattern_channel_last( + self, expand_op: Operation, block: Block + ) -> bool: + """ + identify the pattern: `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` + """ + # abort composition if dummy dimension is not added as height + if expand_op.axes.rank != 1 or expand_op.axes.val[0] not in (-3, 1): + return False + + # `expand_dims` -> `transpose` + if not _check_child_op_type(expand_op, "transpose"): + return False + transpose1_op = expand_op.outputs[0].child_ops[0] + + # abort composition if permutation is not (0, 3, 1, 2) + perm1 = transpose1_op.perm.val.copy() + perm1[np.where(perm1 < 0)] += 4 + if np.any(perm1 != (0, 3, 1, 2)): + return False + + # `transpose` -> `conv` + if not _check_child_op_type(transpose1_op, "conv"): + return False + conv_op = transpose1_op.outputs[0].child_ops[0] + + # `conv` -> `transpose` + if not _check_child_op_type(conv_op, "transpose"): + return False + transpose2_op = conv_op.outputs[0].child_ops[0] + + # abort composition if permutation is not (0, 2, 3, 1) + perm2 = transpose2_op.perm.val.copy() + perm2[np.where(perm2 < 0)] += 4 + if np.any(perm2 != (0, 2, 3, 1)): + return False + + # `transpose` -> `squeeze` + if not _check_child_op_type(transpose2_op, "squeeze"): + return False + squeeze_op = transpose2_op.outputs[0].child_ops[0] + + # abort composition if not squeezing the dummy height + if squeeze_op.axes.rank != 1 or squeeze_op.axes.val[0] not in (-3, 1): + return False + + # everything looks good + return self._try_apply_transform_channel_last( + expand_op, transpose1_op, conv_op, transpose2_op, squeeze_op, block + ) + + @staticmethod + def _try_apply_transform( + expand_op: Operation, conv_op: Operation, squeeze_op: Operation, block: Block + ) -> bool: + ops_to_remove = [expand_op, conv_op, squeeze_op] + if not _check_no_output_connection(block, ops_to_remove): + return False + + # prepare `conv1d` + conv_kwargs = {"name": squeeze_op.outputs[0].name, "before_op": conv_op} + + # inherit `x` from `expand_dim` + conv_kwargs["x"] = expand_op.x + + # inherit `pad_type`, `groups`, `bias` from `conv2d` + conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val + conv_kwargs["groups"] = conv_op.inputs["groups"].val + bias = conv_op.inputs.get("bias", None) + if bias is not None: + conv_kwargs["bias"] = bias + + # squeeze `weight`, `strides`, `pad`, `dilations` from `conv2d` + conv_kwargs["weight"] = mb.squeeze( + x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op + ) + conv_kwargs["strides"] = (conv_op.inputs["strides"].val[-1],) + conv_kwargs["pad"] = (conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1]) + conv_kwargs["dilations"] = (conv_op.inputs["dilations"].val[-1],) + + # compose `conv1d` + out = mb.conv(**conv_kwargs) + + # try replacing `expand_dim` -> `conv2d` -> `squeeze` output + # with the new `conv1d` output + if squeeze_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=squeeze_op, old_var=squeeze_op.outputs[0], new_var=out + ): + # remove `expand_dim` -> `conv2d` -> `squeeze` + block.remove_ops(ops_to_remove) + return True + return False + + @staticmethod + def _try_apply_transform_channel_last( + expand_op: Operation, + transpose1_op: Operation, + conv_op: Operation, + transpose2_op: Operation, + squeeze_op: Operation, + block: Block, + ) -> bool: + ops_to_remove = [expand_op, transpose1_op, conv_op, transpose2_op, squeeze_op] + if not _check_no_output_connection(block, ops_to_remove): + return False + + # create `transpose1` + transpose1_out = mb.transpose( + x=expand_op.x, perm=(0, 2, 1), name=transpose1_op.outputs[0].name, before_op=expand_op + ) + + # prepare `conv1d` + conv_kwargs = {"name": conv_op.outputs[0].name, "x": transpose1_out, "before_op": conv_op} + + # inherit `pad_type`, `groups`, `bias` from `conv2d` + conv_kwargs["pad_type"] = conv_op.inputs["pad_type"].val + conv_kwargs["groups"] = conv_op.inputs["groups"].val + bias = conv_op.inputs.get("bias", None) + if bias is not None: + conv_kwargs["bias"] = bias + + # squeeze `weight`, `strides`, `pad`, `dilations` from `conv2d` + conv_kwargs["weight"] = mb.squeeze( + x=conv_op.inputs["weight"], axes=(-2,), before_op=conv_op + ) + conv_kwargs["strides"] = (conv_op.inputs["strides"].val[-1],) + conv_kwargs["pad"] = (conv_op.inputs["pad"].val[-2], conv_op.inputs["pad"].val[-1]) + conv_kwargs["dilations"] = (conv_op.inputs["dilations"].val[-1],) + + # compose `conv1d` + conv_out = mb.conv(**conv_kwargs) + + # create `transpose2` + transpose2_out = mb.transpose( + x=conv_out, perm=(0, 2, 1), name=squeeze_op.outputs[0].name, before_op=transpose2_op + ) + + # try replacing `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` output + # with the new `transpose` -> `conv1d` -> `transpose` output + if squeeze_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=squeeze_op, old_var=squeeze_op.outputs[0], new_var=transpose2_out + ): + # remove `expand_dim` -> `transpose` -> `conv2d` -> `transpose` -> `squeeze` + block.remove_ops(ops_to_remove) + return True + return False + + +@register_pass(namespace="common") +class fuse_conv_batchnorm(AbstractGraphPass): + """ + Fuse the following ``batch_norm`` layer into ``conv`` and ``conv_transpose``. + That is, convert ``conv + batch_norm`` to ``conv``, by modifying the weight and bias in the ``conv`` layer. + + .. code-block:: + + Given: + %2 = conv(%1) + ... + %3 = batch_norm(%2) + ... + + Result: + %3 = conv(%1) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_conv_batchnorm_block(f) + + @staticmethod + def _try_to_transform(conv_op, bn_op): + # get parameters from batch_norm layer + gamma = bn_op.gamma.val + beta = bn_op.beta.val + mean = bn_op.mean.val + variance = bn_op.variance.val + epsilon = bn_op.epsilon.val + + # get weight, bias and groups from conv layer + if conv_op.weight.val is None: + return False + conv_weight = conv_op.weight.val + conv_bias = conv_op.bias + groups = conv_op.groups.val + + # get type of the conv layer + is_deconv = conv_op.op_type == "conv_transpose" + # The deconv weight transpose axes is determined by the dimension of convolution. + # Conv1d should be [1, 0, 2], Conv2d should be [1, 0, 2, 3], Conv3d should be [1, 0, 2, 3, 4] + if not 3 <= len(conv_weight.shape) <= 5: + raise AssertionError( + f"Only supports Conv1/2/3d, which means weight's dimension should" + f"between 3 and 5, but got weight with {len(conv_weight.shape)} " + f"dimensions. " + ) + deconv_weight_transpose_axes = [1, 0] + [axis for axis in range(2, len(conv_weight.shape))] + + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight for conv layer + new_conv_weight = [] + new_conv_bias = [] + + if is_deconv: + conv_weight = np.transpose(conv_weight, deconv_weight_transpose_axes) + conv_weight = np.reshape( + conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]) + ) + + for i in range(Cout): + # get batch norm parameters for each channel + _gamma = gamma[i] + _beta = beta[i] + _mean = mean[i] + _variance = variance[i] + _scale = _gamma / np.sqrt(_variance + epsilon) + + # get conv weight and bias for each channel + _conv_weight = conv_weight[i] + _conv_bias = conv_bias[i] + + # update the conv weight and bias + _conv_weight = _conv_weight * _scale + _conv_bias = _scale * (_conv_bias - _mean) + _beta + new_conv_weight.append(_conv_weight) + new_conv_bias.append(_conv_bias) + + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + new_conv_bias = np.array(new_conv_bias).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape( + new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]) + ) + new_conv_weight = np.transpose(new_conv_weight, deconv_weight_transpose_axes) + + # make sure the updated weight and bias have the same shape as the original ones + if new_conv_weight.shape != origin_weight_shape: + raise AssertionError( + "conv weight should have the same shape before and after the fuse_" + "conv_batchnorm pass. " + ) + if new_conv_bias.shape != origin_bias_shape: + raise AssertionError( + "conv bias should have the same shape before and after the fuse_" + "conv_batchnorm pass. " + ) + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = bn_op.outputs[0].name + conv_kargs = { + "weight": new_conv_weight, + "bias": new_conv_bias, + "name": out_name, + "before_op": conv_op, + } + + for k, v in conv_op.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + if bn_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=bn_op, + old_var=bn_op.outputs[0], + new_var=x, + ): + bn_op.enclosing_block.remove_ops([conv_op, bn_op]) + return True + return False + + @block_context_manager + def _fuse_conv_batchnorm_block(self, block): + def _match_pattern(op): + if op.op_type == "conv" or op.op_type == "conv_transpose": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find batch_norm op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + bn_op_candidate = list(child_ops)[0] + if bn_op_candidate.op_type == "batch_norm": + return bn_op_candidate + return None + + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_conv_batchnorm_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + bn_op = _match_pattern(op) + if bn_op is not None: + fusion_occurred = self._try_to_transform(op, bn_op) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_conv_bias(AbstractGraphPass): + """ + Fold ``add``/``sub`` into ``bias`` of ``conv`` and ``conv_transpose``. + That is, convert ``conv + add/sub`` to ``conv``, when ``add``/``sub`` is adding a constant. + + Two patterns are supported: + + .. code-block:: + + Pattern 1: + Given: + %2 = conv(%1) + ... + %3 = add(%2, constant) # where constant has shape (1,C,1)/(C,1) for 1d conv, (1,C,1,1)/(C,1,1) for 2d conv etc + ... + + Result: + %3 = conv(%1) + ... + + + Pattern 2: + Given: + %2 = conv(%1) + %3 = transpose(%2) + ... + %4 = add(%3, constant) # where constant has a broacasable shape + ... + + Result: + %2 = conv(%1) + %4 = transpose(%2) + ... + """ + + child_op_types = ["add", "sub"] + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_conv_bias_block(f) + + def _match_pattern(self, op): + if op.op_type == "conv" or op.op_type == "conv_transpose": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find add + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + add_op_candidate = list(child_ops)[0] + if add_op_candidate.op_type in self.child_op_types: + return add_op_candidate + return None + + @staticmethod + def _try_to_transform_transpose_pattern(conv_op, block): + + ops_to_remove = [] + + # conv layer + if conv_op.op_type != "conv" and conv_op.op_type != "conv_transpose": + return False + is_deconv = conv_op.op_type == "conv_transpose" + ops_to_remove.append(conv_op) + + # transpose layer + if not _check_child_op_type(conv_op, "transpose"): + return False + transpose_op = list(conv_op.outputs[0].child_ops)[0] + ops_to_remove.append(transpose_op) + + # add/sub layer + if not _check_child_op_type(transpose_op, "add") and not _check_child_op_type( + transpose_op, "sub" + ): + return False + add_or_sub_op = list(transpose_op.outputs[0].child_ops)[0] + + ops_to_remove.append(add_or_sub_op) + + # get the bias + if add_or_sub_op.x.val is None and add_or_sub_op.y.val is None: + return False + bias = add_or_sub_op.x.val if add_or_sub_op.x.val is not None else add_or_sub_op.y.val + is_first_input = add_or_sub_op.y.val is not None + is_sub = add_or_sub_op.op_type == "sub" + + # get the conv bias/weight + conv_shape = conv_op.outputs[0].shape + Cout = conv_shape[1] + conv_weight = conv_op.weight.val + conv_weight_type = conv_weight.dtype + conv_bias = ( + np.zeros(Cout).astype(conv_weight_type) if conv_op.bias is None else conv_op.bias.val + ) + + # check if the bias is compatible for fusion + is_bias_scalar = True + if isinstance(bias, np.ndarray): + if bias.shape == (): + bias = bias.tolist() + elif np.prod(bias.shape) == 1: + bias = np.squeeze(bias).tolist() + else: + is_bias_scalar = False + + if not is_bias_scalar: + if np.prod(bias.shape) != Cout: + return False + rank = transpose_op.outputs[0].rank + cout_dim = transpose_op.perm.val.tolist().index(1) - rank + if bias.shape[cout_dim] != Cout: + return False + bias = np.reshape(bias, (Cout)) + + # compute the new bias + if is_sub: + if is_first_input: + bias = -bias + else: + conv_bias = -conv_bias + + new_bias = conv_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -conv_weight + else: + new_weight = conv_weight + + if not _check_no_output_connection(block, ops_to_remove): + return False + + # create a new conv op with the new weight, bias value, copying rest of the attributes + conv_kargs = {"weight": new_weight, "bias": new_bias, "before_op": conv_op} + + for k, v in conv_op.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + # create a new transpose op + out_name = add_or_sub_op.outputs[0].name + tranpose_kargs = {"x": x, "name": out_name, "before_op": transpose_op} + for k, v in transpose_op.inputs.items(): + if k == "x": + continue + tranpose_kargs[k] = v + x = mb.transpose(**tranpose_kargs) + + if add_or_sub_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_or_sub_op, + old_var=add_or_sub_op.outputs[0], + new_var=x, + ): + add_or_sub_op.enclosing_block.remove_ops(ops_to_remove) + return True + return False + + @staticmethod + def _try_to_transform(conv_op, add_op): + + if add_op.op_type == "sub": + bias_var = add_op.y + else: + bias_var = add_op.x if add_op.x.val is not None else add_op.y + bias_value = bias_var.val + + is_conv_op = conv_op.op_type == "conv" + + # check that the bias value is a constant array or a scalar constant + if not isinstance(bias_value, (np.ndarray, np.generic)): + return False + + is_bias_scalar = False + if not isinstance(bias_value, np.ndarray): + is_bias_scalar = True + + # find rank of the conv input + rank = conv_op.x.rank + if rank is None: + return False + if not (rank == 3 or rank == 4 or rank == 5): + return False + + # check compatibility of bias value with the rank of the conv op + # either bias value should be a scalar or: + # rank=3 ==> (B,C,D), which means bias must be (1,C,1) or (C,1) + # rank=4 ==> (B,C,D1,D2), which means bias must be (1,C,1,1) or (C,1,1) + # rank=5 ==> (B,C,D1,D2,D3), which means bias must be (1,C,1,1,1) or (C,1,1,1) + + if is_bias_scalar: + bias_value = np.array([bias_value]) + else: + # check that there is at most one dimension in the shape that is not 1 + if len(np.squeeze(bias_value).shape) > 1: + return False + # check that addition is not happening on the batch dimension + if len(bias_value.shape) == rank: + if bias_value.shape[0] != 1: + return False + # check that last rank-2 entries in the shape vector are all 1s + if np.prod(bias_value.shape[-(rank - 2) :]) != 1: + return False + bias_value = np.squeeze(bias_value) + + if add_op.op_type == "sub": + bias_value *= -1 + + # everything looks good, now find the new updated bias + old_bias = conv_op.inputs.get("bias", None) + old_bias_value = None + if old_bias is not None and old_bias.val is not None: + old_bias_value = old_bias.val + if old_bias is None: + # need to create a fresh numpy array for bias + if np.prod(bias_value.shape) == 1: + # its a scalar bias + # need to find the value of Cout to form a new bias + if conv_op.weight.val is None: + return False + # conv_transpose has weight format [K, C_out, spatial dims] + # conv has weight format [C_out, K, spatial dims] + Cout = conv_op.weight.val.shape[0 if is_conv_op else 1] + new_bias_value = np.broadcast_to(bias_value, (Cout,)) + else: + new_bias_value = bias_value + else: + # just need to update the existing bias array + try: + new_bias_value = old_bias_value + bias_value + except: + return False + + # create a new conv op with the new bias value, copying rest of the attributes + out_name = add_op.outputs[0].name + if new_bias_value.dtype != np.float32 and new_bias_value.dtype != np.float16: + # cast the bias to match the weight type + weight_np_type = types.nptype_from_builtin( + conv_op.inputs["weight"].sym_type.get_primitive() + ) + logger.warning( + "conv_bias_fusion pass: casting bias " + "from {} to {} to match the dtype of the weight of the conv layer".format( + new_bias_value.dtype, weight_np_type + ) + ) + new_bias_value = new_bias_value.astype(weight_np_type) + new_bias_var = mb.const(val=new_bias_value, before_op=conv_op) + + conv_kargs = {"bias": new_bias_var, "name": out_name, "before_op": conv_op} + + for k, v in conv_op.inputs.items(): + if k == "bias": + continue + conv_kargs[k] = v + + if is_conv_op: + x = mb.conv(**conv_kargs) + else: + x = mb.conv_transpose(**conv_kargs) + + if add_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_op, + old_var=add_op.outputs[0], + new_var=x, + ): + add_op.enclosing_block.remove_ops([conv_op, add_op]) + return True + return False + + @block_context_manager + def _fuse_conv_bias_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_conv_bias_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + # pattern 1 : conv + add/sub + add_op = self._match_pattern(op) + if add_op is not None: + fusion_status = self._try_to_transform(op, add_op) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + + # pattern 2 : conv + transpose + add/sub + fusion_status = self._try_to_transform_transpose_pattern(op, block) + if fusion_status: + return fusion_status + + return fusion_status + + +@register_pass(namespace="common") +class fuse_conv_scale(AbstractGraphPass): + """ + Fold ``mul``/``div`` into ``conv``/``conv_transpose`` by updating the weight/bias of the convolution layers. + + The scale ``const`` can be a single number (scalar) or a vector with a broadcastable shape. + For example, if the output of the ``conv``/``deconv`` layer is ``(B, Cout, H, W)``, + ``const`` of shape ``(Cout, 1, 1)`` and ``(1, Cout, 1, 1)`` are allowed. + + .. code-block:: + + Given: + %2 = conv(%1) + ... + %3 = mul(%2, constant) # where constant is the scale constant + ... + + Result: + %3 = conv(%1) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_conv_scale_block(f) + + @staticmethod + def _try_to_transform(conv_op, scale_op): + # get the scale + if scale_op.x.val is None and scale_op.y.val is None: + return False + scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y + scale = scale_var.val + + # for the scalar case, the scalar can be either + # 1. a python int/float + # 2. a 0d numpy array + # 3. a 1d numpy array with shape (1,) + + is_scalar = True + if isinstance(scale, np.ndarray): + if scale.shape == (): + scale = scale.tolist() + elif scale.shape == (1) or scale.shape == (1,): + scale = scale[0] + else: + is_scalar = False + + # get weight and bias and groups from conv layer + if conv_op.weight.val is None: + return False + conv_weight = conv_op.weight.val + conv_bias = conv_op.bias + groups = conv_op.groups.val + + # get type of the conv layer + is_deconv = conv_op.op_type == "conv_transpose" + is_conv_1d = len(conv_weight.shape) == 3 + + # D_in denotes the spatial dimensions for conv kernel weight + # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in] + # for conv, conv_weight has shape [Cout, Cin / groups, *D_in] + if is_deconv: + Cout = conv_weight.shape[1] * groups + Cin = conv_weight.shape[0] + else: + Cout = conv_weight.shape[0] + Cin = conv_weight.shape[1] * groups + + # for the vector scale case, check if the shape is broacastable + if not is_scalar: + if not np.product(scale.shape) == Cout: + return False + if len(scale.shape) == len(conv_weight.shape): + if not scale.shape[1] == Cout: + return False + elif len(scale.shape) == len(conv_weight.shape) - 1: + if not scale.shape[0] == Cout: + return False + else: + return False + + # transform the scale to 1./scale for the real_div case + if scale_op.op_type == "real_div": + scale = 1.0 / scale + + # get the type of the conv weight + conv_weight_type = conv_weight.dtype + + # create bias for conv if not exist + if conv_bias is None: + conv_bias = np.zeros(Cout) + else: + conv_bias = conv_bias.val + conv_bias = conv_bias.astype(conv_weight_type) + + # get the original shape of weight and bias + origin_weight_shape = conv_weight.shape + origin_bias_shape = conv_bias.shape + + # update the weight/bias for conv layer + if is_scalar: + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type) + + else: + scale = np.reshape(scale, (Cout)) + new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type) + new_conv_weight = [] + if is_deconv: + conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3]) + conv_weight = np.reshape( + conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]) + ) + + for i in range(Cout): + _conv_weight = conv_weight[i] * scale[i] + new_conv_weight.append(_conv_weight) + new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type) + + if is_deconv: + new_conv_weight = np.reshape( + new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]) + ) + new_conv_weight = np.transpose( + new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3] + ) + + # make sure the updated weight and bias have the same shape as the original ones + assert ( + new_conv_weight.shape == origin_weight_shape + ), "conv weight should have the same shape before and after the fuse_conv_scale pass." + assert ( + new_conv_bias.shape == origin_bias_shape + ), "conv bias should have the same shape before and after the fuse_conv_scale pass." + + # create a new conv op with the new weight, bias value, copying rest of the attributes + out_name = scale_op.outputs[0].name + conv_kargs = { + "weight": new_conv_weight, + "bias": new_conv_bias, + "name": out_name, + "before_op": conv_op, + } + + for k, v in conv_op.inputs.items(): + if k in ["weight", "bias"]: + continue + conv_kargs[k] = v + + if is_deconv: + x = mb.conv_transpose(**conv_kargs) + else: + x = mb.conv(**conv_kargs) + + if scale_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=scale_op, + old_var=scale_op.outputs[0], + new_var=x, + ): + scale_op.enclosing_block.remove_ops([conv_op, scale_op]) + return True + return False + + @block_context_manager + def _fuse_conv_scale_block(self, block): + def _match_pattern(op): + if op.op_type == "conv" or op.op_type == "conv_transpose": + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find batch_norm op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + scale_op_candidate = list(child_ops)[0] + if scale_op_candidate.op_type in ["mul", "real_div"]: + return scale_op_candidate + return None + + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_conv_scale_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + scale_op = _match_pattern(op) + + if scale_op is not None: + fusion_occurred = self._try_to_transform(op, scale_op) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_pad_conv(AbstractGraphPass): + """ + When we observe ``pad -> transpose -> conv``, we move the ``pad`` to be next to ``conv``. + This allows us to meld ``pad + conv`` if possible. + + .. code-block:: + + Given: + %1 = pad(%0, ...) + %2 = transpose(%1, ...) + %3 = conv(%2, ...) + ... + + Result: + %1.a = transpose(%0, ...) + $2.a = pad(%1.a, ...) + %3 = conv(%2.a) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._pad_conv_connect_block(f) + + @staticmethod + def _match_pattern(op): + ret = set([]) + child_ops = op.outputs[0].child_ops + + for child_op in child_ops: + if child_op.op_type != "transpose": + continue + skip_ops = child_op.outputs[0].child_ops + for skip_op in skip_ops: + if "conv" not in skip_op.op_type: + continue + ret.update([child_op]) + + return ret if len(ret) != 0 else None + + @staticmethod + def _try_to_transform(pad_op, transpose_ops, block): + def _compute_new_pad_values(transpose_op): + if pad_op.inputs["pad"].val is None: + return None + pad_amounts = np.reshape(pad_op.inputs["pad"].val, [-1, 2]) + transpose_axes = transpose_op.inputs["perm"].val + rank_diff = len(transpose_axes) - pad_amounts.shape[0] + pad_amounts_new = copy.deepcopy(pad_amounts) + # append "rank_diff" rows of zeros to the top + pad_amounts_new = np.concatenate( + (np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts_new) + ) + pad_amounts_new = pad_amounts_new.astype(pad_amounts.dtype) + pad_amounts = np.concatenate((np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts)) + for i, axis in enumerate(transpose_axes): + pad_amounts_new[i][0] = pad_amounts[axis][0] + pad_amounts_new[i][1] = pad_amounts[axis][1] + + # get the top "rank_diff" rows + top_rows = pad_amounts_new[:rank_diff, :] + if not np.all(top_rows == 0): + return False + # cut "rank_diff" from the top + pad_amounts_new = pad_amounts_new[rank_diff:, :] + pad_amounts_new = pad_amounts_new.flatten() + return pad_amounts_new + + if pad_op.outputs[0] in pad_op.enclosing_block.outputs: + return False + if len(set(pad_op.outputs[0].child_ops)) != len(transpose_ops): + return False + + for transpose_op in transpose_ops: + pad_amounts_new = _compute_new_pad_values(transpose_op) + if pad_amounts_new is None: + continue + + with pad_op.enclosing_block: + new_transpose_var = mb.transpose( + x=pad_op.inputs["x"], + perm=transpose_op.inputs["perm"].val, + before_op=transpose_op, + ) + new_pad_inputs = {"x": new_transpose_var, "pad": pad_amounts_new} + for k, v in pad_op.inputs.items(): + if k not in new_pad_inputs: + new_pad_inputs[k] = v + new_pad_var = mb.pad(before_op=transpose_op, **new_pad_inputs) + pad_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=transpose_op, old_var=transpose_op.outputs[0], new_var=new_pad_var + ) + + pad_op.enclosing_block.remove_ops(list(transpose_ops) + [pad_op]) + + return True + + @block_context_manager + def _pad_conv_connect_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._pad_conv_connect_block(b) + + if op.op_type != "pad": + continue + + transpose_ops = self._match_pattern(op) + if transpose_ops is not None: + fusion_status = self._try_to_transform(op, transpose_ops, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py new file mode 100644 index 00000000..1b0fd507 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_elementwise_binary.py @@ -0,0 +1,321 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types as _types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class divide_to_multiply(AbstractGraphPass): + """ + Convert divide into multiply if the divisor is ``const``. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._divide_to_multiply_block(f) + + @block_context_manager + def _divide_to_multiply_block(self, block): + for op in list(block.operations): + for b in op.blocks: + self._divide_to_multiply_block(b) + if len(op.blocks) > 0: + # This op can't be divided. + continue + + # If real_div has integer input, the result is an integer (following TensorFlow spec). + # Hence, this pass needs disabled if the input is not float, since it translates y + # to a floating point number. If x or y was originally an integer, and y becomes + # a floating point number, then the original type + # signature (with integer output) would not be preserved. + if op.op_type == "real_div" and op.y.val is not None and _types.is_float(op.x.dtype): + new_y_val = np.array(1.0, dtype=op.y.val.dtype) / op.y.val + if not np.isfinite(new_y_val).all(): + continue + + x = mb.mul(x=op.x, y=new_y_val, name="_inversed_" + op.name, before_op=op) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, old_var=op.outputs[0], new_var=x + ) + block.remove_ops([op]) + + +@register_pass(namespace="common") +class fuse_elementwise_to_batchnorm(AbstractGraphPass): + """ + Fold ``mul`` + ``add`` into a ``batchnorm`` + if the ``const`` feeding into the ``mul``/``add`` is of shape ``(1,C,1,1)`` or ``(C,1,1)`` + and input to ``mul`` is of rank 4. + + .. code-block:: + + Given: + [Const] [Const] + | | + V V + [...] --> [Mul] --> [Add] --> [...] + + That is, + + %2 = op1(%1) + %3 = mul(%2, constant) + %4 = add(%3, constant) + %5 = op2(%4) + ... + + Result: + + [...] --> [BatchNorm] --> [...] + + That is, + %2 = op1(%1) + %4 = batchnorm(%2) + %5 = op2(%4) + ... + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_elementwise_to_batchnorm_block(f) + + @staticmethod + def _match_pattern(op): + if op.outputs[0] in op.enclosing_block.outputs: + return None + + if op.op_type == "mul": + # find add + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + add_op_candidate = list(child_ops)[0] + if add_op_candidate.op_type == "add": + return add_op_candidate + return None + + @staticmethod + def _try_to_transform(mul_op, add_op, block): + def _find_const_input_val(op): + if op.x.val is not None: + return op.x.val + if op.y.val is not None: + return op.y.val + return None + + def _check_shape(arr): + """ + return True if shape is of form + (1,C,1,1) or (C,1,1) + """ + rank = len(arr.shape) + if not (rank == 3 or rank == 4): + return False + C = arr.shape[-3] + if not (arr.shape == (1, C, 1, 1) or arr.shape == (C, 1, 1)): + return False + return True + + non_const_input_mul = mul_op.x if mul_op.x.val is None else mul_op.y + if non_const_input_mul.rank != 4: + return False + + gamma = _find_const_input_val(mul_op) + beta = _find_const_input_val(add_op) + if gamma is None or beta is None: + return False + + if not (isinstance(gamma, np.ndarray) and isinstance(beta, np.ndarray)): + return False + + # check that gamma and beta have shape (1,C,1,1) or (C,1,1) + # that is they are doing vector addition on the axis=-3, which is what the + # batchnorm layer does (batchnorm layer only works on rank 4 input tensors) + if not (_check_shape(gamma) and _check_shape(beta)): + return False + + C = gamma.shape[-3] + if C == 1: + return False + + out_name = add_op.outputs[0].name + x = mb.batch_norm( + x=non_const_input_mul, + mean=np.zeros((C,), np.float32), + variance=np.ones((C,), np.float32), + gamma=np.squeeze(gamma), + beta=np.squeeze(beta), + name=out_name, + before_op=mul_op, + ) + + add_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=add_op, old_var=add_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops([mul_op, add_op]) + return True + + @block_context_manager + def _fuse_elementwise_to_batchnorm_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_elementwise_to_batchnorm_block(b) + if len(op.blocks) > 0: + # This op can't be mul + continue + + add_op = self._match_pattern(op) + if add_op is not None: + fusion_status = self._try_to_transform(op, add_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="common") +class rank0_expand_dims_swap(AbstractGraphPass): + """ + Identify the pattern of a ``rank-0`` binary elementwise operation followed by an ``expand_dims`` op. + In the MIL backend, the output of the ``elementwise`` op becomes rank 1. Hence, an ``expand_dims`` op + should be added after both of the ``rank-0`` tensors, and the final ``expand_dims`` should be removed. + If the output var of the binary elementwise op is consumed by more than one op, a ``squeeze`` op + is inserted. + + .. code-block:: + + Input: + + [...](rank-0) --> sub --> expand_dims (axes=[0]) --> [...] + ^ | + | |--> op2 + | | + | |--> op3 + | + [scalar const] + + Output: + [...](rank-0) --> expand_dims (axes=[0]) --> sub --> [...] + ^ | + | |--> squeeze ---> op2 + | | + | |--> op3 + | + expand_dims (axes=[0]) + ^ + | + | + [scalar const] + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._rank0_expand_dims_swap(f) + + @staticmethod + def _try_to_transform(op, block): + op_type = op.op_type + ops_to_remove = [] + if op.x.rank != 0 or op.y.rank != 0: + return False + + # One and only one input is a scalar const + if (op.x.val is None) == (op.y.val is None): + return False + + var_1, var_2 = op.x, op.y + ops_to_remove.append(op) + + # check if the output is consumed by exact one expand_dims op and other ops + expand_dims_ops = [] + other_ops = [] + child_ops = list(op.outputs[0].child_ops) + for child_op in child_ops: + if child_op.op_type == "expand_dims": + expand_dims_ops.append(child_op) + else: + other_ops.append(child_op) + if len(expand_dims_ops) != 1: + return False + + # check the expand_dim op has axes = [0] + expand_dims_op = expand_dims_ops[0] + if expand_dims_op.axes.val != [0]: + return False + ops_to_remove.append(expand_dims_op) + ops_to_remove += other_ops + + for out in op.outputs: + if out in block.outputs: + return False + + # add a expand_dims op after each rank-0 tensor + var_1_expand = mb.expand_dims(x=var_1, axes=[0], before_op=op) + var_2_expand = mb.expand_dims(x=var_2, axes=[0], before_op=op) + + # add a new elementwise binary op + elem_op = getattr(mb, op_type) + + # replace var for the expand_dims op + x = elem_op( + x=var_1_expand, y=var_2_expand, name=expand_dims_op.outputs[0].name, before_op=op + ) + expand_dims_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=expand_dims_op, old_var=expand_dims_op.outputs[0], new_var=x + ) + + # replace var for other ops + if len(other_ops) >= 1: + elem_op_output = op.outputs[0] + squeeze = mb.squeeze(x=x, before_op=op) + for other_op in other_ops: + new_op = getattr(mb, other_op.op_type) + kargs = {} + for k, v in other_op.inputs.items(): + if v == elem_op_output: + kargs[k] = squeeze + else: + kargs[k] = v + kargs["name"] = other_op.name + kargs["before_op"] = other_op + new_var = new_op(**kargs) + other_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=other_op, old_var=other_op.outputs[0], new_var=new_var + ) + + # Remove all the ops at once + block.remove_ops(ops_to_remove) + return True + + @block_context_manager + def _rank0_expand_dims_swap(self, block): + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._rank0_expand_dims_swap(b) + if len(op.blocks) > 0: + # This op can't be elementwise binary ops + continue + + if op.op_type in ["add", "sub", "mul", "real_div", "floor_div"]: + fusion_occurred = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_linear.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_linear.py new file mode 100644 index 00000000..b72f30f7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_linear.py @@ -0,0 +1,306 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class fuse_linear_bias(AbstractGraphPass): + """ + Convert ``linear + add/sub`` to a single ``linear`` by updating the weight and bias of the ``linear`` layer. + + .. code-block:: + + Example 1: + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = add(x=%4, y=%5) # %5 is a const tensor with same shape as %3 + + Result: + %8 = linear(x=%1, weight=%2, bias=%7) # where %7 is a new const tensor with value + # %7 = %3 + %6 + + Example 2: + Original: + %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight) + # %3 is a rank-1 const tensor (bias) + ... + %6 = sub(x=%5, y=%4) # %5 is a const tensor with a broacasable shape with %3. + i.e. if %3 has shape (Dout), %5 could be (1, Dout). + + Result: + %9 = linear(x=%1, weight=%7, bias=%8) # where %7 is a new const tensor with value %7 = -%2 + # %8 = %5 - %3 + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_linear_bias_block(f) + + @staticmethod + def _try_to_transform(linear_op, add_or_sub_op, block): + + if add_or_sub_op.x.val is None and add_or_sub_op.y.val is None: + return False + + is_sub = add_or_sub_op.op_type == "sub" + is_first_input = add_or_sub_op.x == linear_op.outputs[0] + + # compute the new bias + linear_bias = linear_op.bias.val + bias = add_or_sub_op.y.val if is_first_input else add_or_sub_op.x.val + + # check if the shape is broadcasable + if np.prod(linear_bias.shape) != np.prod(bias.shape): + return False + Dout = linear_bias.shape[0] + if bias.shape[-1] != Dout: + return False + bias = np.reshape(bias, (Dout,)) + + if is_sub: + if is_first_input: + bias = -bias + else: + linear_bias = -linear_bias + + new_bias = linear_bias + bias + + # compute the new weight + if is_sub and not is_first_input: + new_weight = -linear_op.weight.val + else: + new_weight = linear_op.weight.val + + # create a new linear op with the new weight, bias value, copying rest of the attributes + out_name = add_or_sub_op.outputs[0].name + linear_kargs = { + "weight": new_weight, + "bias": new_bias, + "name": out_name, + "before_op": linear_op, + } + + for k, v in linear_op.inputs.items(): + if k in ["weight", "bias"]: + continue + linear_kargs[k] = v + + x = mb.linear(**linear_kargs) + + if add_or_sub_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_or_sub_op, + old_var=add_or_sub_op.outputs[0], + new_var=x, + ): + add_or_sub_op.enclosing_block.remove_ops([linear_op, add_or_sub_op]) + return True + return False + + @block_context_manager + def _fuse_linear_bias_block(self, block): + def _find_candicate_op(op): + if op.op_type != "linear": + return None + # abort fusion if op output is also a block output + if op.outputs[0] in op.enclosing_block.outputs: + return None + # find add/sub op + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + op_candidate = list(child_ops)[0] + if op_candidate.op_type in ["add", "sub"]: + return op_candidate + + fusion_occurred = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_linear_bias_block(b) + if len(op.blocks) > 0: + # This op can't be conv or conv_transpose + continue + + add_or_sub_op = _find_candicate_op(op) + if add_or_sub_op is not None: + fusion_occurred = self._try_to_transform(op, add_or_sub_op, block) + # has to break as the downstream iterator is affected. + if fusion_occurred: + return fusion_occurred + return fusion_occurred + + +@register_pass(namespace="common") +class fuse_matmul_weight_bias(AbstractGraphPass): + """ + Convert ``matmul + add/sub`` to ``linear`` whenever possible. + + .. code-block:: + + Given: + %3 = matmul(x=%1, y=%2) # %1 or %2 is const and rank 2 (weight) + ... + %5 = add(x=%3, y=%4) # %4 is const. add(x=%4, y=%3) is equivalent + # sub is similar. + + Result: + # assuming %2 above is const and rank 2 + %5 = linear(x=%1, weight=%2, bias=%4) + """ + + def apply(self, prog: Program): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_matmul_weight_bias_block(f) + + @staticmethod + def _find_candidate_op(op): + _CHILD_OP_TYPES = ["add", "sub"] + + if op.op_type != "matmul": + return None + # find add + child_ops = op.outputs[0].child_ops + if len(child_ops) == 1: + add_op_candidate = list(child_ops)[0] + if add_op_candidate.op_type in _CHILD_OP_TYPES: + return add_op_candidate + + @staticmethod + def _transpose(v, before_op, name=None): + """ + Transpose the last 2 dims. + + - ``v``: (Var, must be a tensor). + - ``before_op``: (Operation) The op right before the newly added ``transpose`` op. + - ``name``: Name for the ``transpose`` op if provided. + """ + perm = list(range(v.rank)) + perm[-2], perm[-1] = perm[-1], perm[-2] + + if name is None: + return mb.transpose(x=v, perm=perm, before_op=before_op) + else: + return mb.transpose(x=v, perm=perm, before_op=before_op, name=name) + + def _try_to_transform(self, matmul_op, add_op, block): + if matmul_op.x.val is None and matmul_op.y.val is None: + # This is a dynamic matmul. + return False + if add_op.x.val is None and add_op.y.val is None: + # This is a dynamic add. + return False + + x_is_weight = matmul_op.x.val is not None + if x_is_weight: + weight, linear_x = matmul_op.x, matmul_op.y + transpose_weight = matmul_op.transpose_x.val + transpose_x = matmul_op.transpose_y.val + else: + weight, linear_x = matmul_op.y, matmul_op.x + transpose_weight = matmul_op.transpose_y.val + transpose_x = matmul_op.transpose_x.val + + # We potentially are going to transpose the weight, so if the weight itself is not removable, we skip this path + if len(weight.nonreplaceable_vars_upstream) > 0: + return False + + if linear_x.rank < 2 or weight.rank != 2: + # We don't support these cases yet. + return False + + # For those weights which are the input for more than one op, + # we don't do the fusion. + # The reason is that it might cause memory explosion by adding + # those weight as a numpy array in the inner product or + # the batch_mat_mul kernel. + if len(weight.child_ops) > 1: + return False + + d_out = weight.shape[1] if not transpose_weight else weight.shape[0] + bias = add_op.x.val if add_op.x.val is not None else add_op.y.val + if len(bias.shape) > 1: + if any([d != 1 for d in bias.shape[:-1]]): + return # cannot transform + + # squeeze leading dims of size 1 + bias = np.squeeze(bias) + + if len(bias.shape) != 1 or bias.shape[0] != d_out: + return # cannot transform + + if add_op.op_type == "sub": + bias = -bias + out_name = add_op.outputs[0].name + + if x_is_weight: + # If transpose_x == transpose_weight == False: + # w*x = (x^T w^T)^T = linear(x^T, w)^T + x_transposed = ( + self._transpose(linear_x, before_op=matmul_op) if not transpose_x else linear_x + ) + w_no_transpose = ( + weight if not transpose_weight else self._transpose(weight, before_op=matmul_op) + ) + x = mb.linear(x=x_transposed, weight=w_no_transpose, bias=bias, before_op=matmul_op) + x = self._transpose(x, before_op=matmul_op, name=out_name) + else: + # If transpose_x == transpose_weight == False + # x*w = x*(w^T)^T = linear(x, w^T) + x_no_transpose = ( + self._transpose(linear_x, before_op=matmul_op) if transpose_x else linear_x + ) + w_transposed = ( + weight if transpose_weight else self._transpose(weight, before_op=matmul_op) + ) + x = mb.linear( + x=x_no_transpose, + weight=w_transposed, + bias=bias, + before_op=matmul_op, + name=out_name, + ) + + if add_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=add_op, + old_var=add_op.outputs[0], + new_var=x, + ): + add_op.enclosing_block.remove_ops([matmul_op, add_op]) + return True + return False + + @block_context_manager + def _fuse_matmul_weight_bias_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_matmul_weight_bias_block(b) + if len(op.blocks) > 0: + # This op can't be matmul + continue + + add_op = self._find_candidate_op(op) + + if add_op is not None: + fusion_status = self._try_to_transform(op, add_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_normalization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_normalization.py new file mode 100644 index 00000000..79720a23 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_normalization.py @@ -0,0 +1,851 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import List, Optional + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Block +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Operation, Program, Var +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_no_output_connection, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class fuse_layernorm_or_instancenorm(AbstractGraphPass): + """ + A graph optimization pass on PyMIL to detect and fuse several variants of ``layer_norm`` or + ``instance_norm``. Pattern 1 corresponds to either ``layer_norm`` or ``instance_norm``. Patterns 2-4 + are ``instance_norm``. + """ + + _DEBUG = False # set to true to plot the block before and after the transformation + + def apply(self, prog: Program): + for f in prog.functions.values(): + block_changed = True + while block_changed: + if self._DEBUG: + import graphviz + + graphviz.Source( + f.get_dot_string( + highlight_debug_op_types=["instance_norm"], + ) + ).view(filename="/tmp/block_before_fuse_layernorm_or_instancenorm") + logger.debug("Block before fuse_layernorm_or_instancenorm transform:\n{}".format(f)) + + block_changed = self._fuse_layernorm_or_instancenorm_block(f) + + if self._DEBUG: + graphviz.Source( + f.get_dot_string( + highlight_debug_op_types=["instance_norm"], + ) + ).view(filename="/tmp/block_after_fuse_layernorm_or_instancenorm") + + logger.debug("Block after fuse_layernorm_or_instancenorm transform:\n{}".format(f)) + + @staticmethod + def _check_reduce_op(reduce_op: Operation, mode: str = "reduce_mean") -> bool: + """ + Check whether or not the ``reduction`` op satisfies following conditions: + + - Mode is expected. + - Does not change rank (``keep_dims`` is ``True``). + - The ``axes`` is known at compile time. + + Parameters + ---------- + + param reduce_op : ``reduce_op`` to check on. + + param mode : ``reduce`` mode + + """ + if reduce_op is None: + return False + if reduce_op.op_type != mode: + return False + if reduce_op.keep_dims is None or reduce_op.keep_dims.val is None: + return False + if reduce_op.keep_dims.val is False: + return False + if reduce_op.axes is None or reduce_op.axes.val is None: + return False + return True + + @staticmethod + def _check_child_op_types( + op: Operation, child_op_types: List[str], check_order: bool = True + ) -> bool: + """ + Returns ``True`` for child op types matching ``child_op_types``, otherwise returns ``False``. + + Parameters + ---------- + + param op : Current op. + + param child_op_type : Expected child op type. + + param check_order : Ensure child in given order, defaults to ``True``. + """ + if op is None or len(op.outputs) != 1: + return False + child_ops = list(op.outputs[0].child_ops) + if len(child_ops) != len(child_op_types): + return False + ops_types = [c.op_type for c in child_ops] + if check_order is False: + ops_types = sorted(ops_types) + child_op_types = sorted(child_op_types) + return ops_types == child_op_types + + @staticmethod + def _try_get_child_op_type( + op: Operation, child_op_type: str, index: int = 0 + ) -> Optional[Operation]: + """ + Returns child op if type matches, otherwise returns ``None``. + + Parameters + ---------- + + param op : Current op. + + param child_op_type : Expected child op type. + + param index : Child op index. + """ + if op is None: + return None + if len(op.outputs) != 1: + return None + child_ops = list(op.outputs[0].child_ops) + if index >= len(child_ops): + return None + if child_ops[index].op_type != child_op_type: + return None + return child_ops[index] + + @staticmethod + def _try_apply_transform( + reduce_op: Operation, + block: Block, + gamma_var: Var, + beta_var: Var, + epsilon_var: Var, + end_op: Operation, + ops_to_remove: List[Operation], + ) -> bool: + """ + Insert instance_norm / layer_norm and delete all ops. + + :param reduce_op: Start operation of the pattern. + :param block: Block + :param gamma_var: Gamma variable. + :param beta_var: Beta variable. + :param epsilon_var: Epsilon variable. + :param end_op: End operation of the pattern. + :param ops_to_remove: Operations to remove. + """ + if not _check_no_output_connection(block, ops_to_remove): + return False + + axes = reduce_op.axes.val + rank = len(reduce_op.x.shape) + + # check whether the pattern is instance_norm or layer_norm + is_layernorm = False + is_instancenorm = False + is_require_rank4_transpose = False + + negative_axes = [a - rank if a >= 0 else a for a in axes] + negative_axes.sort() + + if len(gamma_var.val.shape) == len(axes) and len(beta_var.val.shape) == len(axes): + # axes for layer_norm must be [-1] or [-1, -2] or [-1, -2, -3] and so on + if negative_axes == list(range(-len(negative_axes), 0)): + is_layernorm = True + + if rank == 4 and (negative_axes == [-2, -1] or negative_axes == [-3, -2]): + if ( + len(np.squeeze(gamma_var.val).shape) == 1 + and len(np.squeeze(beta_var.val).shape) == 1 + ): + is_instancenorm = True + if negative_axes == [-3, -2]: + is_require_rank4_transpose = True + + if not (is_instancenorm or is_layernorm): + return False + + # remove all the ops, and replace with a layer_norm or instance_norm op + out_name = end_op.outputs[0].name + + if is_require_rank4_transpose: + x = mb.transpose( + x=reduce_op.x, + perm=[0, 3, 1, 2], + name=out_name + "_transpose_nhwc_nchw", + before_op=end_op, + ) + if is_instancenorm: + x = mb.instance_norm( + x=x if is_require_rank4_transpose else reduce_op.x, + gamma=np.squeeze(gamma_var.val), + beta=np.squeeze(beta_var.val), + epsilon=epsilon_var, + name=out_name + "_instancenorm" if is_require_rank4_transpose else out_name, + before_op=end_op, + ) + else: # is_layernorm + x = mb.layer_norm( + x=x if is_require_rank4_transpose else reduce_op.x, + axes=axes, + gamma=gamma_var, + beta=beta_var, + epsilon=epsilon_var, + name=out_name + "_layernorm" if is_require_rank4_transpose else out_name, + before_op=end_op, + ) + if is_require_rank4_transpose: + x = mb.transpose( + x=x, + perm=[0, 2, 3, 1], + name=out_name + "_transpose_nchw_nhwc", + before_op=end_op, + ) + + end_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=end_op, old_var=end_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops(ops_to_remove) + return True + + def _try_match_and_transform_pattern_1(self, reduce_op, block) -> bool: + """ + Identify the pattern: + + ``y = gamma * (x - mean) / sqrt(variance + epsilon) + beta`` + + ``y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])`` + + .. code-block:: + + x --> reduce_mean --> sub --> square --> reduce_mean --> add(epsilon) --> rsqrt + | | ^ | + | | | V + |----------------------- mul (gamma) + | | | + | | --------|--------- + | | | | + | | | V + | |----------------------------------------------------------------> mul + | | | + | V | + |--------------------------------------------------------------> mul | + | V + | sub (beta) --> add --> [...] + | ^ + |------------------------------- + + This pattern corresponds to either ``layer_norm`` or ``instance_norm``. + + It is ``instance_norm`` if all of the following are true: + - ``input`` is rank 4. + - ``axes`` of ``reduce_mean`` is ``[-2, -1]`` or ``[-3, -2]`` + (when ``[-3, -2]``, a channel first to channel last transpose would be inserted). + - ``gamma`` and ``beta`` are rank 1, after ``squeeze``. + + It is ``layer_norm`` if all of the following are true: + - ``axes`` is either ``[-1]``, ``[-1, -2]``, or ``[-1, -2, -3]``, and so on. + - ``rank`` of ``gamma`` and ``beta`` is equal to the length of the ``axes``. + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 3 ops + if len(list(root_var.child_ops)) != 3: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, child_op_types=["reduce_mean", "sub", "mul"] + ): + return False + + # check 1st reduce_mean op + if not self._check_reduce_op(reduce_op): + return False + ops_to_remove.append(reduce_op) + + # check 1st sub op + if not self._check_child_op_types(reduce_op, ["sub", "mul"], check_order=False): + return False + child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops) + op_a = child_ops_reduce_mean[0] + op_b = child_ops_reduce_mean[1] + sub_op1 = op_a if op_a.op_type == "sub" else op_b + if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]): + return False + ops_to_remove.append(sub_op1) + + # check square op + square_op = self._try_get_child_op_type(sub_op1, "square") + if square_op is None: + return False + ops_to_remove.append(square_op) + + # check second reduce mean + reduce_op2 = self._try_get_child_op_type(square_op, "reduce_mean") + if not self._check_reduce_op(reduce_op2): + return False + ops_to_remove.append(reduce_op2) + + # check add op (with epsilon) + add_op1 = self._try_get_child_op_type(reduce_op2, "add") + if add_op1 is None: + return False + epsilon_var = add_op1.y if add_op1.x == reduce_op2.outputs[0] else add_op1.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_op1) + + # check rsqrt + rsqrt_op = self._try_get_child_op_type(add_op1, "rsqrt") + if rsqrt_op is None: + return False + ops_to_remove.append(rsqrt_op) + + # check mul (gamma) + mul_op1 = self._try_get_child_op_type(rsqrt_op, "mul") + if mul_op1 is None: + return False + gamma_var = mul_op1.y if mul_op1.x == rsqrt_op.outputs[0] else mul_op1.x + if gamma_var.val is None: + return False + ops_to_remove.append(mul_op1) + + # check 2 muls after the gamma mul + if not self._check_child_op_types(mul_op1, ["mul", "mul"]): + return False + child_ops = list(mul_op1.outputs[0].child_ops) + mul_op2 = child_ops[0] + mul_op3 = child_ops[1] + mul_op2_other_var = mul_op2.x if mul_op2.y == mul_op1.outputs[0] else mul_op2.y + mul_op3_other_var = mul_op3.x if mul_op3.y == mul_op1.outputs[0] else mul_op3.y + if not ( + (mul_op2_other_var == root_var and mul_op3_other_var == reduce_op.outputs[0]) + or (mul_op2_other_var == reduce_op.outputs[0] and mul_op3_other_var == root_var) + ): + return False + if mul_op2_other_var == root_var: + mul_root_op = mul_op2 + mul_mean_op = mul_op3 + else: + mul_root_op = mul_op3 + mul_mean_op = mul_op2 + ops_to_remove.append(mul_mean_op) + ops_to_remove.append(mul_root_op) + + # check sub with beta + sub_op2 = self._try_get_child_op_type(mul_mean_op, "sub") + if sub_op2 is None: + return False + if sub_op2.y != mul_mean_op.outputs[0]: + return False + beta_var = sub_op2.x + if beta_var.val is None: + return False + ops_to_remove.append(sub_op2) + + # check last add op + add_op2 = self._try_get_child_op_type(sub_op2, "add") + if add_op2 is None: + return False + if not (add_op2.x == mul_root_op.outputs[0] or add_op2.y == mul_root_op.outputs[0]): + return False + ops_to_remove.append(add_op2) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_op2, ops_to_remove + ) + + def _try_match_and_transform_pattern_2(self, reduce_op, block) -> bool: + """ + Identify the pattern: + + ``y = (x - mean) / pow(variance + epsilon) * gamma + beta`` + + This pattern corresponds to, and should be fused as, ``instance_norm``. + + All of the following conditions must be satisfied: + + 1. ``input`` is rank 4 tensor. + 2. ``reduce`` operates on spatial dimensions ``axes=[-2, -1]``, or ``axes=[-3, -2]`` (a + channel first to channel last transpose would be inserted in such cases). + 3. ``gamma`` and ``beta`` are both shape ``(C,)`` after ``squeeze``, where ``C`` is number of channels. + + .. code-block:: + + |----> sub -----| const (0.5) + | ^ | | + | | V V + x ---> mean square --> mean1 --> add_eps ---> pow const_gamma const_beta + | | | | | + | V V V V + |----> sub1 --------------------------------> real_div --> mul_gamma --> add_beta --> ... + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 3 ops + if len(root_var.child_ops) != 3: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, child_op_types=["reduce_mean", "sub", "sub"] + ): + return False + + # check 1st reduce_mean op + if not self._check_reduce_op(reduce_op): + return False + ops_to_remove.append(reduce_op) + + # check 1st sub op + if not self._check_child_op_types(reduce_op, ["sub", "sub"]): + return False + child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops) + reduce_mean_child_op_a = child_ops_reduce_mean[0] + reduce_mean_child_op_b = child_ops_reduce_mean[1] + # One of sub op directly goes square, the other one goes real_div + if list(reduce_mean_child_op_a.outputs[0].child_ops)[0].op_type == "square": + sub_op0 = reduce_mean_child_op_a + sub_op1 = reduce_mean_child_op_b + else: + sub_op0 = reduce_mean_child_op_b + sub_op1 = reduce_mean_child_op_a + if not (sub_op0.x == root_var and sub_op0.y == reduce_op.outputs[0]): + return False + if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]): + return False + ops_to_remove.append(sub_op0) + ops_to_remove.append(sub_op1) + + # check square op + square_op = self._try_get_child_op_type(sub_op0, "square") + if square_op is None: + return False + ops_to_remove.append(square_op) + + # check second reduce mean + reduce_op2 = self._try_get_child_op_type(square_op, "reduce_mean") + if not self._check_reduce_op(reduce_op2): + return False + ops_to_remove.append(reduce_op2) + + # check add op (with epsilon) + add_eps_op = self._try_get_child_op_type(reduce_op2, "add") + if add_eps_op is None: + return False + epsilon_var = add_eps_op.y if add_eps_op.x == reduce_op2.outputs[0] else add_eps_op.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_eps_op) + + # check pow + pow_op = self._try_get_child_op_type(add_eps_op, "pow") + if pow_op is None: + return False + if pow_op.y.val is None or not np.isclose(pow_op.y.val, 0.5): + return False + ops_to_remove.append(pow_op) + + # check real_div + real_div_op = self._try_get_child_op_type(pow_op, "real_div") + if real_div_op is None: + return False + if not (real_div_op.x == sub_op1.outputs[0] and real_div_op.y == pow_op.outputs[0]): + return False + ops_to_remove.append(real_div_op) + + # check mul with gamma + mul_gamma_op = self._try_get_child_op_type(real_div_op, "mul") + if mul_gamma_op is None: + return False + gamma_var = mul_gamma_op.y if mul_gamma_op.x == real_div_op.outputs[0] else mul_gamma_op.x + if gamma_var.val is None: + return False + ops_to_remove.append(mul_gamma_op) + + # check add with beta + add_beta_op = self._try_get_child_op_type(mul_gamma_op, "add") + if add_beta_op is None: + return False + beta_var = add_beta_op.y if add_beta_op.x == mul_gamma_op.outputs[0] else add_beta_op.x + if beta_var.val is None: + return False + ops_to_remove.append(add_beta_op) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_beta_op, ops_to_remove + ) + + def _try_match_and_transform_pattern_3(self, reduce_op, block) -> bool: + """ + Detect ``InstanceNorm`` pattern in TensorFlow-Addons. + + This pattern corresponds to, and should be fused as, ``instance_norm``. + + All of the following conditions must be satisfied: + + 1. ``input`` is rank 4 tensor. + 2. ``reduce`` operates on spatial dimensions ``axes=[-2, -1]``, or ``axes=[-3, -2]`` (a + channel first to channel last transpose would be inserted in such cases). + 3. ``gamma`` and ``beta`` are absent. Default values for ``gamma`` and ``beta`` would be used. + + .. code-block:: + + |-------------------------------------------------| + | | + | V + x --> mean square --> mean1 --> add_eps --> rsqrt --> mul2 --> mul_sub + | | ^ | | + | V | | | + | --> sub -----| | | + | V V + |--------------------------------------------> mul1 -------------> add --> ... + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 3 ops + if len(root_var.child_ops) != 3: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, ["sub", "mul", "reduce_mean"] + ): + return False + + # check 1st reduce_mean op + if not self._check_reduce_op(reduce_op): + return False + ops_to_remove.append(reduce_op) + + # check 1st sub op + if not self._check_child_op_types(reduce_op, ["sub", "mul"], check_order=False): + return False + child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops) + reduce_mean_child_op_a = child_ops_reduce_mean[0] + reduce_mean_child_op_b = child_ops_reduce_mean[1] + sub_op1 = ( + reduce_mean_child_op_a + if reduce_mean_child_op_a.op_type == "sub" + else reduce_mean_child_op_b + ) + if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]): + return False + ops_to_remove.append(sub_op1) + + # check square op + square_op = self._try_get_child_op_type(sub_op1, "square") + if square_op is None: + return False + ops_to_remove.append(square_op) + + # check second reduce mean + reduce_op2 = self._try_get_child_op_type(square_op, "reduce_mean") + if reduce_op2 is None or not self._check_reduce_op(reduce_op2): + return False + ops_to_remove.append(reduce_op2) + + # check add op (with epsilon) + add_eps_op = self._try_get_child_op_type(reduce_op2, "add") + if add_eps_op is None: + return False + epsilon_var = add_eps_op.y if add_eps_op.x == reduce_op2.outputs[0] else add_eps_op.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_eps_op) + + # check rsqrt + rsqrt_op = self._try_get_child_op_type(add_eps_op, "rsqrt") + if rsqrt_op is None: + return False + ops_to_remove.append(rsqrt_op) + + # check mul 1 + mul_op1 = self._try_get_child_op_type(rsqrt_op, "mul") + if mul_op1 is None: + return False + if not ( + (mul_op1.x == root_var and mul_op1.y == rsqrt_op.outputs[0]) + or (mul_op1.x == rsqrt_op.outputs[0] and mul_op1.y == root_var) + ): + return False + ops_to_remove.append(mul_op1) + + # check mul 2 + mul_op2 = self._try_get_child_op_type(rsqrt_op, "mul", index=1) + if mul_op2 is None: + return False + if not ( + (mul_op2.x == reduce_op.outputs[0] and mul_op2.y == rsqrt_op.outputs[0]) + or (mul_op2.x == rsqrt_op.outputs[0] and mul_op2.y == reduce_op.outputs[0]) + ): + return False + ops_to_remove.append(mul_op2) + + # check mul (sub) + mul_sub_op = self._try_get_child_op_type(mul_op2, "mul") + if mul_sub_op is None: + return False + if mul_sub_op.y.val is None or mul_sub_op.y.val != -1: + return False + ops_to_remove.append(mul_sub_op) + + # check last add op + add_op = self._try_get_child_op_type(mul_sub_op, "add") + if add_op is None: + return False + if not ( + (add_op.x == mul_op1.outputs[0] and add_op.y == mul_sub_op.outputs[0]) + or (add_op.x == mul_sub_op.outputs[0] and add_op.y == mul_op1.outputs[0]) + ): + return False + ops_to_remove.append(add_op) + + gamma_var = mb.const( + val=np.ones(shape=(1, root_var.shape[1], 1, 1)), + name="_fuse_layernorm_or_instancenorm_gamma", + ) + beta_var = mb.const( + val=np.zeros(shape=(1, root_var.shape[1], 1, 1)), + name="_fuse_layernorm_or_instancenorm_beta", + ) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_op, ops_to_remove + ) + + def _try_match_and_transform_pattern_4(self, reduce_op: Operation, block: Block) -> bool: + """ + Identify the pattern: + + ``y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])`` + + This pattern corresponds to, and should be fused as, ``instance_norm``. + + All of the following conditions must be satisfied: + + 1. ``input`` is rank 4 tensor. + 2. ``reduce`` operates on spatial dimensions ``axes=[-2, -1]`` or ``axes=[-3, -2]`` (a + channel first to channel last transpose would be inserted in such cases). + 3. ``gamma`` and ``beta`` are both shape ``(C,)`` after ``squeeze``, where ``C`` is number of channels. + + .. code-block:: + + |-----------| + | V + |------> mul_square1 -----> sum1 -----> mul_mean1 + | | + | V + x --> sum --> mul_mean ==> mul_square --> sub_variance --> add_eps --> rsqrt + | | | + | | V + | | mul_gamma + | | | + | | |----------------| + | | | V + | |--------------------------------------------+-------------> mul2 + | V | + |----------------------------------------------------------> mul1 | + | V + | sub_beta --> add --> [...] + | ^ + |---------------------------| + """ + ops_to_remove = [] + root_var = reduce_op.x + + if root_var.shape is None: + return False + + # check that root_var feeds into exactly 4 ops + if len(root_var.child_ops) != 4: + return False + if root_var.op is not None and not self._check_child_op_types( + root_var.op, child_op_types=["mul", "mul", "reduce_sum", "mul"] + ): + return False + + # check 1st reduce_sum op + if not self._check_reduce_op(reduce_op, mode="reduce_sum"): + return False + ops_to_remove.append(reduce_op) + + # check mul (mean) op + mul_mean_op = self._try_get_child_op_type(reduce_op, "mul") + if mul_mean_op is None: + return False + if mul_mean_op.y.shape != (): + return False + ops_to_remove.append(mul_mean_op) + + # check 1st mul (square) op + if not self._check_child_op_types(mul_mean_op, child_op_types=["mul", "mul", "mul"]): + return False + # both 0 and 1 should be mul square op + mul_square_op = self._try_get_child_op_type(mul_mean_op, "mul") + if mul_square_op is None: + return False + if self._try_get_child_op_type(mul_mean_op, "mul", index=1) is None: + return False + ops_to_remove.append(mul_square_op) + + # Check another branch + + # check 2nd mul (square) op + # both 0 and 1 should be mul square op 1 + mul_square_op2 = list(root_var.child_ops)[0] + ops_to_remove.append(mul_square_op2) + + # check 2nd reduce sum + reduce_op2 = self._try_get_child_op_type(mul_square_op2, child_op_type="reduce_sum") + if not self._check_reduce_op(reduce_op2, "reduce_sum"): + return False + ops_to_remove.append(reduce_op2) + + # check mul after 2nd reduce op + mul_mean_op2 = self._try_get_child_op_type(reduce_op2, "mul") + if mul_mean_op2 is None: + return False + if mul_mean_op2.y.shape != (): + return False + ops_to_remove.append(mul_mean_op2) + + # check sub (variance) + sub_variance_op = self._try_get_child_op_type(mul_mean_op2, "sub") + if sub_variance_op is None: + return False + if sub_variance_op.y != mul_square_op.outputs[0]: + return False + ops_to_remove.append(sub_variance_op) + + # check add op (epsilon) + add_eps_op = self._try_get_child_op_type(sub_variance_op, "add") + if add_eps_op is None: + return False + epsilon_var = add_eps_op.y if add_eps_op.x == sub_variance_op.outputs[0] else add_eps_op.x + if epsilon_var.val is None or len(epsilon_var.val.shape) != 0: + return False # must be scalar + ops_to_remove.append(add_eps_op) + + # check rsqrt + rsqrt_op = self._try_get_child_op_type(add_eps_op, "rsqrt") + if rsqrt_op is None: + return False + ops_to_remove.append(rsqrt_op) + + # check mul (gamma) + mul_gamma_op = self._try_get_child_op_type(rsqrt_op, "mul") + if mul_gamma_op is None: + return False + gamma_var = mul_gamma_op.y if mul_gamma_op.x == rsqrt_op.outputs[0] else mul_gamma_op.x + if gamma_var.val is None: + return False + ops_to_remove.append(mul_gamma_op) + + # check 2 muls after the gamma mul + if not self._check_child_op_types(mul_gamma_op, ["mul", "mul"]): + return False + mul_gamma_child_ops = list(mul_gamma_op.outputs[0].child_ops) + mul_op1 = mul_gamma_child_ops[0] + mul_op2 = mul_gamma_child_ops[1] + mul_op1_other_var = mul_op1.x if mul_op1.y == mul_gamma_op.outputs[0] else mul_op1.y + mul_op2_other_var = mul_op2.x if mul_op2.y == mul_gamma_op.outputs[0] else mul_op2.y + if not ( + (mul_op1_other_var == root_var and mul_op2_other_var == mul_square_op.x) + or (mul_op1_other_var == mul_square_op.x and mul_op2_other_var == root_var) + ): + return False + if mul_op1_other_var == root_var: + mul_op1, mul_op2 = mul_op1, mul_op2 + else: + mul_op2, mul_op1 = mul_op1, mul_op2 + ops_to_remove.append(mul_op1) + ops_to_remove.append(mul_op2) + + # check sub with beta + sub_beta_op = self._try_get_child_op_type(mul_op2, "sub") + if sub_beta_op is None: + return False + if sub_beta_op.y != mul_op2.outputs[0]: + return False + beta_var = sub_beta_op.x + if beta_var.val is None: + return False + ops_to_remove.append(sub_beta_op) + + # check last add op + add_op = self._try_get_child_op_type(sub_beta_op, "add") + if add_op is None: + return False + if not ( + (add_op.x == mul_op1.outputs[0] and add_op.y == sub_beta_op.outputs[0]) + or (add_op.y == mul_op1.outputs[0] and add_op.x == sub_beta_op.outputs[0]) + ): + return False + ops_to_remove.append(add_op) + + return self._try_apply_transform( + reduce_op, block, gamma_var, beta_var, epsilon_var, add_op, ops_to_remove + ) + + @block_context_manager + def _fuse_layernorm_or_instancenorm_block(self, block: Block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_layernorm_or_instancenorm_block(b) + if len(op.blocks) > 0: + continue + + # start pattern match if reduce_mean op is encountered + if op.op_type == "reduce_mean": + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_1(op, block) + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_2(op, block) + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_3(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + elif op.op_type == "reduce_sum": + if fusion_status is False: + fusion_status = self._try_match_and_transform_pattern_4(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py new file mode 100644 index 00000000..e7f7c95c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_repeat_ops.py @@ -0,0 +1,1755 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +from collections import defaultdict +from typing import List, Text + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import _check_child_op_type, block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_symbolic +from coremltools.converters.mil.mil.var import Var + + +@register_pass(namespace="common") +class merge_consecutive_paddings(AbstractGraphPass): + """ + Identify two consecutive ``pad`` layers which could be merged into a single ``pad`` layer. + + This is possible only if one of the following conditions is satisfied: + + - The paddings are "constant" and have the same ``constant_val``. + - The paddings act along different axes. + + .. code-block:: + + Input graph: + input(1, 2, 6, 8) ------> pad([1, 1], mode='reflect) -----> pad([1, 1, 0, 0], mode='reflect') ---> out(1, 2, 8, 10) + + Output graph: + input(1, 2, 6, 8) ------> pad([1, 1, 1, 1], mode='reflect) ---> out(1, 2, 8, 10) + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._merge_padding_block(f) + + def _match_pattern(self, block, padding_op): + + if padding_op.op_type != "pad": + return False + + if not _check_child_op_type(padding_op, "pad"): + return False + + child_padding_op = list(padding_op.outputs[0].child_ops)[0] + + if padding_op.inputs["mode"].val != child_padding_op.inputs["mode"].val: + return False + + # Ensure the paddings have the same length by prepending zeros to the shorter one + first_pad = padding_op.inputs["pad"].val + child_pad = child_padding_op.inputs["pad"].val + if len(first_pad) > len(child_pad): + child_pad = np.insert(child_pad, 0, [0] * (len(first_pad) - len(child_pad))) + elif len(child_pad) > len(first_pad): + first_pad = np.insert(first_pad, 0, [0] * (len(child_pad) - len(first_pad))) + final_pad = child_pad + first_pad + + if padding_op.inputs["mode"].val == "constant": + # if the padding is constant, then the values need to be equal + if padding_op.inputs["constant_val"].val != child_padding_op.inputs["constant_val"].val: + return False + else: + # if the padding is not constant, then we can't merge if both pads affected the same + # side of the image + if any(i != 0 and j != 0 for (i, j) in zip(first_pad, child_pad)): + return False + + return self._replace_ops(block, padding_op, child_padding_op, final_pad) + + @staticmethod + def _replace_ops(block, padding_op, child_padding_op, final_pad): + mode = padding_op.inputs["mode"].val + x = mb.pad( + x=padding_op.inputs["x"], + pad=final_pad, + mode=mode, + constant_val=padding_op.inputs["constant_val"].val, + before_op=padding_op, + ) + padding_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=padding_op, old_var=child_padding_op.outputs[0], new_var=x + ) + block.remove_ops([padding_op, child_padding_op]) + return True + + @block_context_manager + def _merge_padding_block(self, block): + for op in list(block.operations): + result = self._match_pattern(block, op) + if result: + return True + return False + +@register_pass(namespace="common") +class merge_consecutive_transposes(AbstractGraphPass): + """ + Identify consecutive 'transpose' layers which could be merged into a single 'transpose' layer. + + .. code-block:: + + Input graph: + input ------> transpose -----> 1 or more transpose layers ---> out + + Output graph: + input ------> transpose ---> out + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._merge_transposes_in_block(f) + + def _match_and_replace_pattern(self, block, transpose_op): + if not (transpose_op.op_type == "transpose" and _check_child_op_type(transpose_op, "transpose")): + return False + if transpose_op.outputs[0] in block.outputs: + return False + + child_transpose_op = list(transpose_op.outputs[0].child_ops)[0] + return self._replace_ops(block, transpose_op, child_transpose_op) + + @staticmethod + def _replace_ops(block, transpose_op, child_transpose_op): + perm = transpose_op.perm.val + new_perm = [perm[i] for i in child_transpose_op.perm.val] + x = mb.transpose(x=transpose_op.x, perm=new_perm, before_op=transpose_op) + if transpose_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=transpose_op, old_var=child_transpose_op.outputs[0], new_var=x, + ): + block.remove_ops([transpose_op, child_transpose_op]) + return True + return False + + @block_context_manager + def _merge_transposes_in_block(self, block): + def help_merge_transpose_ops(block): + for op in list(block.operations): + if self._match_and_replace_pattern(block, op): + return True + return False + + block_changed = True + while block_changed: + block_changed = help_merge_transpose_ops(block) + + +@register_pass(namespace="common") +class merge_consecutive_relus(AbstractGraphPass): + """ + Identify consecutive ``relu`` layers which could be merged into a single ``relu`` layer. + + .. code-block:: + + Input graph: + input ------> relu -----> 1 or more relu layers ---> out + + Output graph: + input ------> relu ---> out + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._merge_relus_in_block(f) + + def _match_and_replace_pattern(self, block, relu_op): + if not (relu_op.op_type == "relu" and _check_child_op_type(relu_op, "relu")): + return False + + child_relu_op = list(relu_op.outputs[0].child_ops)[0] + return self._replace_ops(block, relu_op, child_relu_op) + + @staticmethod + def _replace_ops(block, relu_op, child_relu_op): + if relu_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=relu_op, old_var=child_relu_op.outputs[0], new_var=relu_op.outputs[0] + ): + block.remove_ops([child_relu_op]) + return True + return False + + @block_context_manager + def _merge_relus_in_block(self, block): + def help_merge_relu_ops(block): + for op in list(block.operations): + if self._match_and_replace_pattern(block, op): + return True + return False + + block_changed = True + while block_changed: + block_changed = help_merge_relu_ops(block) + + +@register_pass(namespace="common") +class merge_consecutive_reshapes(AbstractGraphPass): + """ + Identify consecutive ``reshape`` ops which could be merged into a single ``reshape``. + + .. code-block:: + + Input graph: + input -> reshape -> 1 or more reshapes -> output + + Output graph: + input -> reshape -> output + """ + + # TODO (rdar://105227587): merge a tree of consecutive reshapes + + def apply(self, prog): + for f in prog.functions.values(): + self._merge_consecutive_reshapes_block(f) + + @staticmethod + def _match_pattern(reshape_op): + """ + Given a ``reshape`` op, + consider it as the head of a sequence of ``reshape`` ops, and + then end the sequence at a non-removable ``reshape`` op. + Return this sequence as a list. + """ + res = [] + op = reshape_op + + while op.op_type == "reshape": + res.append(op) + + # current reshape has 0 or 2+ child ops: + # * no child: this is the end of graph + # * 2+ children: only pattern of sequential reshape ops (1 child) + # is supported for now. For more general cases, please see TODO below + if len(op.outputs[0].child_ops) != 1: + break + # current reshape output is a block output, so it is non-removable + if op.outputs[0] in op.enclosing_block.outputs: + break + + op = op.outputs[0].child_ops[0] + + return res + + @block_context_manager + def _merge_consecutive_reshapes_block(self, block): + def help_merge_consecutive_reshapes_block(block): + for op in block.operations: + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = help_merge_consecutive_reshapes_block(b) + # move on to the next op if this op is not reshape + if op.op_type != "reshape": + continue + + reshape_ops = self._match_pattern(op) + # merge the list of consecutive reshape ops + if len(reshape_ops) > 1: + # create a new reshape op + reshape_out = mb.reshape( + x=reshape_ops[0].x, + shape=reshape_ops[-1].shape, + name=reshape_ops[-1].outputs[0].name, + before_op=reshape_ops[-1], + ) + # replace the consecutive reshape ops with the new reshape op + reshape_ops[-1].enclosing_block.replace_uses_of_var_after_op( + anchor_op=reshape_ops[-1], + old_var=reshape_ops[-1].outputs[0], + new_var=reshape_out, + ) + reshape_ops[-1].enclosing_block.remove_ops(reshape_ops) + return True + + return False + + block_changed = True + while block_changed: + block_changed = help_merge_consecutive_reshapes_block(block) + + +class CastOptimizationNode: + def __init__(self, op_type, match_criterion=None): + """ + Parameters + ---------- + + param op_type : Type of an operation. + param match_criterion : A callable function that matches a MIL op and returns a boolean. + + Examples + -------- + + .. sourcecode:: python + + CastOptimizationNode("mul"), + CastOptimizationNode("round"), + CastOptimizationNode("add", lambda op: op.y.val == 0), + CastOptimizationNode("clip", lambda op: op.alpha.val == -128 and op.beta.val == 127), + CastOptimizationNode("cast", lambda op: op.dtype.val == "int8"), + CastOptimizationNode("cast", lambda op: op.dtype.val == "fp32"), + + """ + + self.op_type = op_type + if not match_criterion: + match_criterion = lambda op: True + + self.match_criterion = match_criterion + + +@register_pass(namespace="common") +class cast_optimization(AbstractGraphPass): + """ + This optimization pass performs the following: + + - Removes redundant ``cast`` op; that is, ``cast`` where source and destination tensors have same dtypes. + - Either cancels or fuses any two consecutive `cast` ops, repeatedly. + + After this pass, there can't be any consecutive `cast` ops present in the program. + For examples, see ``TestCastOptimization``. + This is a non-algebraic translation which assumes that the upcasting doesn't change the user's intent. + + For example: + + .. code-block:: + + Input graph: + input -----> cast(dtype="fp16") -----> cast(dtype="fp32") ----> square ---> out + + Output graph: + input -----> square -----> out + + The input graph has a maximum precision of fp16 while the output graph has fp32 precision. + + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._fuse_or_cancel_consecutive_casts_block_wrapper(f, {}) + + # main function's output_vars are treated differently, which are not handled by the method + # above, "_fuse_or_cancel_consecutive_casts_block". + # For that, we invoke another method + block_changed = True + while block_changed: + block_changed = self._cancel_consecutive_casts_connected_to_outputs( + prog.functions["main"] + ) + + def _match_linear_pattern(self, root, pattern): + """ + Use Depth First Search to match the pattern + + :param root: operation + :param pattern: List[CastOptimizationNode] + :return: Return List[operation] if pattern matches entirely else [] + """ + op = root + if not pattern or len(op.outputs) != 1: + return [] + + node = pattern[0] + if op.op_type != node.op_type: + return [] + + if not node.match_criterion(op): + return [] + + for child in op.outputs[0].child_ops: + op_list = [op] + self._match_linear_pattern(child, pattern[1:]) + if len(op_list) == len(pattern): + return op_list + + return [] + + def _try_to_transform(self, root_op, cached_vars): + block = root_op.enclosing_block + + # Scenario: Redundant cast when source and destination dtype are same. + if root_op.op_type == "cast" and root_op.x.is_tensor_or_scalar_of(dtype=root_op.dtype.val): + block.replace_uses_of_var_after_op( + anchor_op=root_op, + old_var=root_op.outputs[0], + new_var=root_op.x, + ) + block.remove_ops([root_op]) + return True + + # Scenario: Consecutive casts + list_of_ops_in_pattern = self._match_linear_pattern( + root_op, + [ + CastOptimizationNode("cast"), + CastOptimizationNode("cast"), + ], + ) + + if not list_of_ops_in_pattern: + return False + + cast_1, cast_2 = list_of_ops_in_pattern + + fused_output_var_name = cast_1.x.name + "_to_{}".format(cast_2.dtype.val) + + if cast_1.x.is_tensor_or_scalar_of(dtype=cast_2.dtype.val): + # when consecutive casts cancel each other + # Please check out: test_linear_consecutive_cast_ops_cancellation in TestCastOptimization + new_output_var = cast_1.x + elif fused_output_var_name in cached_vars: + # When the output of 1 cast goes into multiple casts of same configuration + # Please check out: test_consecutive_fusable_casts_on_all_branches in TestCastOptimization + new_output_var = cached_vars[fused_output_var_name] + else: + new_output_var = mb.cast( + x=cast_1.x, + dtype=cast_2.dtype, + name=fused_output_var_name, + before_op=cast_2, + ) + cached_vars[fused_output_var_name] = new_output_var + + # It's important to use `cast_2.enclosing_block` over `block` since `cast_2` might be present in + # a block nested under `block` + cast_2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=cast_2, + old_var=cast_2.outputs[0], + new_var=new_output_var, + ) + + # Remove just the last cast op and let dce eliminate the rest of the ops if needed, + # The reason is that first cast op could be feeding into other non-cast ops. + cast_2.enclosing_block.remove_ops([cast_2]) + return True + + @block_context_manager + def _fuse_or_cancel_consecutive_casts_block_wrapper(self, block, cached_vars): + def _fuse_or_cancel_consecutive_casts_block(block, cached_vars): + block_changed = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + nested_block_changed = True + nested_block_cached_vars = {} + nested_block_cached_vars.update(cached_vars) + self._fuse_or_cancel_consecutive_casts_block_wrapper( + b, nested_block_cached_vars + ) + + if len(op.blocks) > 0: + continue + + # start pattern match if cast op is encountered + if op.op_type == "cast": + block_changed = self._try_to_transform(op, cached_vars) + # has to break as the downstream iterator is affected. + if block_changed: + return block_changed + return block_changed + + block_changed = True + """ + Cached vars are used when `all` of the following conditions are met: + + 1. The output of a ``cast`` is fed into multiple ``cast`` ops of same configuration. + 2. These 2 consecutive ``cast`` ops can be fused into a single ``cast``. + + When these conditions are satisfied, we create a `new` fused ``cast`` op `only` once, and + the output of all these consecutive ``cast`` ops are replaced with the ouptut of this fused ``cast``. + + .. code-block:: + + Input graph: + |---->cast(dtype="fp16")---->square--->out_1 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_2 + | + |---->cast(dtype="fp16")---->log--->out_3 + + Output graph: + |---->square--->out_1 + | + input---->new_fused_cast(dtype="fp16")---->relu--->out_2 + | + |---->log--->out_3 + + """ + while block_changed: + block_changed = _fuse_or_cancel_consecutive_casts_block(block, cached_vars) + + @staticmethod + def _cancel_consecutive_casts_connected_to_outputs(block): + """ + Lets say the ops in the block have the following pattern + "some_op"---->{var1}---->"cast_op1"---->"cast_op2"--->{var2} + , where var2 is one of the outputs in block.outputs + + If cast_op1 and cast_op2 can be cancelled, this means, var1 and var2 are duplicates + of each other. The program can then be updated to + "some_op"---->{var1} + where var1 replaces var2 in block.outputs + This also requires replacing var1's name with var2's so that the model output names remain unchanged + """ + new_output_vars = [] + block_changed = False + for output_var in block.outputs: + cast_op2 = output_var.op + if cast_op2 is None: + continue + if cast_op2.op_type != "cast": + new_output_vars.append(output_var) + continue + cast_op1 = cast_op2.x.op + if cast_op1 is None: + new_output_vars.append(output_var) + continue + if cast_op1.op_type != "cast": + new_output_vars.append(output_var) + continue + var1 = cast_op1.x + if var1.op is None or var1.dtype != output_var.dtype: + new_output_vars.append(output_var) + continue + var1.set_name(output_var.name) + new_output_vars.append(var1) + block_changed = True + + if block_changed: + block.set_outputs(new_output_vars) + + return block_changed + + +class TransformAxisUpdateOps: + """ + Parent class for every axis update op's class + + An axis update op is an op that can be updated, such that it can allow a transpose layer to "pass" through it. + That is, + + op(transpose(x)) == transpose(op_updated(x)) + + where + "op" : original op, + "op_updated": op after being updated. + + Example: + + if x is a tensor of rank 2, and transpose has perm=[1,0], + then + + reduce_mean[axis=1](transpose(x)) == transpose(reduce_mean[axis=0](x)) + + here reduce_mean op with axis=1 can be updated to a reduce_mean op with axis=0, + to allow the transpose to "pass" through it, i.e. get applied after it. + + """ + + def __init__(self, op, transpose_axes, var_to_hypothetical_value_dict=None): + self.op = op + self.transpose_axes = transpose_axes + self.var_to_hypothetical_value_dict = var_to_hypothetical_value_dict + + def can_transpose_pass(self): + """ + Each "axis" op must determine whether it can act like a unary op + and allow the transpose to pass through. + Return True if it can allow the transpose to pass through, otherwise return False. + + :return: bool + """ + raise NotImplementedError("This function must be implemented by each op") + + def update(self): + """ + A method that updates some attribute of the axis op, + based on the transpose axes value. + This method only gets called if "can_transpose_pass" returns True. + + Update the op such that the output %i2 should be equal to %o2 + + Before: + %i_1 = transpose_op(%i_0, perm=transpose_axes) + %i2 = op(%i1) + + After: + %o1 = op_updated(%i0) + %o2 = transpose_op(%o1, perm=transpose_axes) + + :return: None + """ + raise NotImplementedError("This function must be implemented by each op") + + @staticmethod + def _find_transpose_compliment(perm): + """ + return the permutation value that when applied will reverse the + effect of the given permutation. + + e.g.: if perm == (1, 2, 3, 0), then return (3, 0, 1, 2), which will undo the + first permutation's effect + """ + rank = len(perm) + all_positive_perm = [p + rank if p < 0 else p for p in perm] + perm_inverse = [0] * rank + for i in range(rank): + perm_inverse[i] = all_positive_perm.index(i) + return perm_inverse + + +class _HypotheticalValue: + """ + A hypothetical value that simply wraps a Var. Actual Var it wraps doesn't really matter, as + its mainly for debugging. + This class really exists to differentiate a "_LazyTransposeHypotheticalValue" type with a + non-"_LazyTransposeHypotheticalValue" type. + """ + + def __init__(self, var=None): + self.value = var # type : Var + + +class _LazyTransposeHypotheticalValue: + """ + A hypothetical value that represents a transpose op on top of a hypothetical value, or a + collection of transpose_ops, which have the same "perm" parameter. + """ + + def __init__(self, hypothetical_value, transpose_ops, perm): + # Input hypothetical value to the transpose op. + # When there are multiple transpose ops, this is the incoming hypothetical value to any one of those + self.wrapped_hypothetical_value = hypothetical_value # type : _HypotheticalValue + + if not isinstance(hypothetical_value, _HypotheticalValue): + raise ValueError( + "transpose optimization pass: incorrect type passed for hypothetical_value" + ) + + for op in transpose_ops: + if op.op_type != "transpose": + raise ValueError( + "transpose optimization pass: _LazyTransposeHypotheticalValue can only be made with transpose ops" + ) + perm_op = list(op.inputs["perm"].val) + if perm_op != perm: + raise ValueError( + "transpose optimization pass: _LazyTransposeHypotheticalValue can only be made with transpose ops with the same 'perm' values" + ) + + self.perm = perm # type : list[int], perm parameter of all the transpose ops + self.transpose_ops = transpose_ops # type : Set(op) + + +class _TransposeOptimization: + _DEBUG = False # Set to true to plot the block before and after the transformation. + + # Dictionary from axis update op to its class + # This is filled in by child classes of the class "TransformAxisUpdateOps". + _AXIS_UPDATE_OPS = dict() + + # TODO: instead of a hard-coded set, use op-traits + # These are the ops that satisfy the following property: + # - single non constant input + # - single output + # - non rank changing + # - doesn't need to be updated of a transpose passes through it. i.e. + # Transpose(op(x)) == op(Transpose(x)) + _UNARY_LIKE_OP_TYPES = { + "relu", + "log", + "relu6", + "abs", + "acos", + "asin", + "atan", + "atanh", + "ceil", + "clip", + "cos", + "cosh", + "erf", + "exp", + "exp2", + "floor", + "identity", + "logical_not", + "round", + "rsqrt", + "sign", + "sin", + "sinh", + "sqrt", + "square", + "pow", + "tan", + "tanh", + "threshold", + "clamped_relu", + "elu", + "gelu", + "leaky_relu", + "linear_activation", + "scaled_tanh", + "sigmoid", + "sigmoid_hard", + "softplus", + "softplus_parametric", + "softsign", + "thresholded_relu", + } + + def __init__(self, block): + self.block = block + + # for each var in the block, this dictionary stores the hypothetical value that is assigned to it during + # graph traversal + self.var_to_hypothetical_value = ( + {} + ) # type : var : _HypotheticalValue or _LazyTransposeHypotheticalValue + # start out by filling this dictionary with all the inputs of the block + for _, input_var in block.inputs.items(): + self.var_to_hypothetical_value[input_var] = _HypotheticalValue(input_var) + + # Dictionaries below are used to store transpose cancellation/fusion information. + # These are filled during the traversal of the graph, + # after which they are used by the `_apply_transform` method + + # transpose op to the list of transpose ops that are its compliments and can be cancelled away with it + self.transpose_op_to_cancel_ops = defaultdict(lambda: []) # type : op : List[op] + + # transpose op to the list of ops before which it has to materialize, i.e. the root transpose op + # can be moved downstream in the graph, as far as these materialize ops + self.transpose_op_to_materialize_ops = defaultdict( + lambda: [] + ) # type : op : List[Tuple(op, Var)] + + # list of the ops that need to be updated (either their axis parameter or one of their constant inputs) + # if the transpose op is fused away or moved downstream in the graph + self.transpose_op_to_axis_update_ops = defaultdict(lambda: []) # type : op : List[op] + + # for book keeping + self.ops_updated = set() + self.materialized_ops_handled = set() + self.transpose_ops_removed = set() + + # save the output sinks' information + self.old_output_vars = [] + self.output_sink_ops = [] + + # We modify the graph temporarily for outputs + self._add_output_sinks() + + def _add_output_sinks(self): + # We add an identity sink for all outputs. + self.old_output_vars = {var: var.name for var in self.block.outputs} + new_outputs = [] + output_sinks_var = {} + for out_var in self.block.outputs: + if out_var not in output_sinks_var: + out_sink = mb.identity(x=out_var) + output_sinks_var[out_var] = out_sink + else: + out_sink = output_sinks_var[out_var] + new_outputs.append(out_sink) + self.output_sink_ops.append(out_sink.op) + self.block.set_outputs(new_outputs) + + def _visit_unary_like_op(self, op, input_var=None): + # pass the input var's hypothetical_value to the output var's, since shape invariant ops do + # not modify the incoming hypothetical_value + + if input_var is None: + input_var = op.inputs["x"] + + if len(op.outputs) > 1: + msg = ( + "transpose optimization pass: op '{}', of type = '{}', has multiple outputs, hence it" + "cannot be handled like a unary op" + ) + raise ValueError(msg.format(op.name, op.op_type)) + self.var_to_hypothetical_value[op.outputs[0]] = self.var_to_hypothetical_value[input_var] + + def _visit_materialize_op(self, op): + # this is the catch all category of ops + # these are the "not-lazy-transpose-pass-through" kind of ops + # output hypothetical_value is same as the vars + for out_var in op.outputs: + self.var_to_hypothetical_value[out_var] = _HypotheticalValue(out_var) + + # check for the inputs + # if there is a lazy transpose hypothetical value as an input, + # all the transpose ops it hold, + # need to be materialized here now, i.e., we should update "transpose_op_to_materialize_ops" + for input_var in self._get_input_vars(op): + input_hypothetical_value = self.var_to_hypothetical_value[input_var] + if isinstance(input_hypothetical_value, _LazyTransposeHypotheticalValue): + all_lazy_transpose_ops = input_hypothetical_value.transpose_ops + for transpose_op in all_lazy_transpose_ops: + self.transpose_op_to_materialize_ops[transpose_op].append((op, input_var)) + + def _visit_axis_update_op(self, op): + """ + Check: + - at least one of the non-constant inputs to this op is of type _LazyTransposeHypotheticalValue + - for all non-constant inputs, that are of type _LazyTransposeHypotheticalValue, they + have the same perm value. + These checks are common for all "axis update" ops. + """ + input_vars = self._get_input_vars(op, only_nonconst_vars=True) + perm = None + num_lazy_input_vars = 0 + for var in input_vars: + hypothetical_value = self.var_to_hypothetical_value[var] + if isinstance(hypothetical_value, _LazyTransposeHypotheticalValue): + num_lazy_input_vars += 1 + if perm is None: + perm = hypothetical_value.perm + elif perm != hypothetical_value.perm: + self._visit_materialize_op(op) + return + + if num_lazy_input_vars == 0: + self._visit_materialize_op(op) + return + + # checks specific to the op type + op_cls = self._AXIS_UPDATE_OPS.get(op.op_type, None) + if op_cls is None: + raise ValueError("Transform class for op of type '{}' not found".format(op.op_type)) + + if not op_cls( + **{ + "op": op, + "transpose_axes": perm, + "var_to_hypothetical_value_dict": self.var_to_hypothetical_value, + } + ).can_transpose_pass(): + self._visit_materialize_op(op) + return + + # add this op to the dictionary "transpose_op_to_axis_update_ops" + # and update self.var_to_hypothetical_value[op.outputs[0]] + all_lazy_transpose_ops = set() + wrapped_hypothetical_value = None + for var in input_vars: + input_hypothetical_value = self.var_to_hypothetical_value[var] + if isinstance(input_hypothetical_value, _LazyTransposeHypotheticalValue): + all_lazy_transpose_ops.update(input_hypothetical_value.transpose_ops) + wrapped_hypothetical_value = input_hypothetical_value.wrapped_hypothetical_value + + for transpose_op in all_lazy_transpose_ops: + self.transpose_op_to_axis_update_ops[transpose_op].append(op) + + for output in op.outputs: + self.var_to_hypothetical_value[output] = _LazyTransposeHypotheticalValue( + wrapped_hypothetical_value, + all_lazy_transpose_ops, + perm, + ) + + @staticmethod + def _do_transposes_cancel(perm1, perm2): + if len(perm1) != len(perm2): + return False + x = list(range(len(perm1))) + x1 = [x[i] for i in perm1] + x2 = [x1[i] for i in perm2] + if x == x2: + return True + return False + + def _visit_transpose_op(self, op): + input_var = op.inputs["x"] + if op.inputs["perm"].val is None: + self._visit_materialize_op(op) + return + perm = list(op.inputs["perm"].val) + input_hypothetical_value = self.var_to_hypothetical_value[input_var] + + """ + There are 3 cases to handle: + + 1. input type == _HypotheticalValue + 2. input type == _LazyTransposeHypotheticalValue and this op is the transpose compliment of it + 3. input type == _LazyTransposeHypotheticalValue and this op is NOT the transpose compliment of it + """ + + if isinstance(input_hypothetical_value, _HypotheticalValue): + # case 1 + # the input is not a lazy transpose. + # Since the current node is a transpose, there are two sub-cases. + # a) It's a output node. We materialize it directly. + # b) It might get cancelled downstream, so make the output var's + # hypothetical_value a lazy transpose + if op.outputs[0] in self.old_output_vars: + self._visit_materialize_op(op) + else: + self.var_to_hypothetical_value[op.outputs[0]] = _LazyTransposeHypotheticalValue( + input_hypothetical_value, set([op]), perm + ) + return + + # input is a Lazy transpose hypothetical value. Lets first check whether the current + # transpose cancels it or not + do_cancel = self._do_transposes_cancel(input_hypothetical_value.perm, perm) + if do_cancel: + # case 2 + # transposes cancel, so now the hypothetical_value of the output will + # be same as the hypothetical value wrapped inside the upstream lazy transpose + self.var_to_hypothetical_value[ + op.outputs[0] + ] = input_hypothetical_value.wrapped_hypothetical_value + # also update the dictionary "transpose_op_to_cancel_ops" + all_lazy_transpose_ops = input_hypothetical_value.transpose_ops + for transpose_op in all_lazy_transpose_ops: + self.transpose_op_to_cancel_ops[transpose_op].append(op) + else: + # case 3 + # transposes don't cancel + # this is same as a materialize op then + self._visit_materialize_op(op) + + def _visit_op(self, op): + + input_vars = self._get_input_vars(op) + for var in input_vars: + assert ( + var in self.var_to_hypothetical_value + ), "transpose optimization pass: hypothetical value for var '{}', not found".format( + var.name + ) + + if op in self.output_sink_ops: + self._visit_materialize_op(op) + elif op.op_type in self._UNARY_LIKE_OP_TYPES: + self._visit_unary_like_op(op) + elif op.op_type in self._AXIS_UPDATE_OPS: + self._visit_axis_update_op(op) + elif op.op_type == "transpose": + self._visit_transpose_op(op) + elif op.op_type == "const": + self.var_to_hypothetical_value[op.outputs[0]] = _HypotheticalValue(op.outputs[0]) + else: + self._visit_materialize_op(op) + + def block_traversal(self): + + # Since the ops are already organized in a topological manner, + # simply iterate through all the ops + + for op in self.block.operations: + self._visit_op(op) + + def _verify_cancellable_transposes(self): + + # invert "transpose_op_to_cancel_ops" + transpose_cancel_ops_to_starting_transpose_set = defaultdict(lambda: set()) + for op, cancel_ops_list in self.transpose_op_to_cancel_ops.items(): + for cancel_op in cancel_ops_list: + transpose_cancel_ops_to_starting_transpose_set[cancel_op].update(set([op])) + + for op in transpose_cancel_ops_to_starting_transpose_set: + assert ( + op not in self.transpose_op_to_cancel_ops + ), "transpose reduction optimization: transpose op '{}' cannot be both a starting and cancel op".format( + op.name + ) + + # invert "transpose_op_to_materialize_ops" + materizalize_ops_to_starting_transpose_set = defaultdict(lambda: set()) + for op, materialize_ops in self.transpose_op_to_materialize_ops.items(): + for materialize_op, edge in materialize_ops: + materizalize_ops_to_starting_transpose_set[materialize_op].update(set([op])) + + # the starting transpose op may not be in "transpose_op_to_cancel_ops" + # but it needs to be removed if it materializes later, hence we need to add it + # to the "transpose_op_to_cancel_ops", with an empty value, i.e. no other ops to cancel because of it + if op not in self.transpose_op_to_cancel_ops: + self.transpose_op_to_cancel_ops[op] = [] + + # (starting transpose ops) and (transpose cancel ops + materialize ops) form a bipartite graph. + # Find the connected components of this graph, by doing a BFS traversal + connected_components = [] # List[(Set(op), Set(op)), Set(op)] + visited = {} + for op in list(self.transpose_op_to_cancel_ops.keys()): + if op in visited: + continue + visited[op] = 1 + set_a = set([op]) # set of starting transpose ops + set_b1 = set() # set of transpose cancel ops connected to set_a + set_b2 = set() # set of materialize ops connected to set_a + queue = [] + queue.extend(self.transpose_op_to_cancel_ops[op]) + if op in self.transpose_op_to_materialize_ops: + materialize_ops_list = list(list(zip(*self.transpose_op_to_materialize_ops[op]))[0]) + queue.extend(materialize_ops_list) + while len(queue) > 0: + o = queue.pop(0) + visited[o] = 1 + # enque nodes connected to o + if o in self.transpose_op_to_cancel_ops: + set_a.update(set([o])) + for neighbor_op in self.transpose_op_to_cancel_ops[o]: + if neighbor_op not in visited: + queue.append(neighbor_op) + if o in self.transpose_op_to_materialize_ops: + materialize_ops_list = list( + list(zip(*self.transpose_op_to_materialize_ops[o]))[0] + ) + for neighbor_op in materialize_ops_list: + if neighbor_op not in visited: + queue.append(neighbor_op) + elif o in transpose_cancel_ops_to_starting_transpose_set: + set_b1.update(set([o])) + for neighbor_op in transpose_cancel_ops_to_starting_transpose_set[o]: + if neighbor_op not in visited: + queue.append(neighbor_op) + else: + set_b2.update(set([o])) + for neighbor_op in materizalize_ops_to_starting_transpose_set[o]: + if neighbor_op not in visited: + queue.append(neighbor_op) + connected_components.append((set_a, set_b1, set_b2)) + + starting_ops_to_remove = set() # starting ops to remove from the optimization list + + # now for each connected component, make a decision whether to cancel it or not + # (either all transpose ops in a set get cancelled or they don't) + for op_set, op_cancel_set, materialize_op_set in connected_components: + + block_output = False + # check that output is not directly connected to a starting transpose op + for op in op_set: + if op.outputs[0] in self.block.outputs: + starting_ops_to_remove.update(op_set) + block_output = True + break + if block_output: + continue + + materizalize_set = set(list(materialize_op_set)) + if len(materizalize_set) >= len(op_set) + len(op_cancel_set): + starting_ops_to_remove.update(op_set) + + # remove ops + for op in starting_ops_to_remove: + self.transpose_op_to_cancel_ops.pop(op, None) + + def _remove_transpose_ops(self, starting_transpose_op): + + perm = list(starting_transpose_op.inputs["perm"].val) + starting_transpose_op_out_var = starting_transpose_op.outputs[0] + starting_transpose_op_input_var = starting_transpose_op.inputs["x"] + + # update all the "axis_update" ops + for op in self.transpose_op_to_axis_update_ops.get(starting_transpose_op, []): + if op not in self.ops_updated: + op_cls = self._AXIS_UPDATE_OPS.get(op.op_type, None) + op_cls( + **{ + "op": op, + "transpose_axes": perm, + "var_to_hypothetical_value_dict": self.var_to_hypothetical_value, + } + ).update() + self.ops_updated.add(op) + + # short circuit starting_transpose_op and its cancel ops + + to_be_removed_ops = [] + name_changed_vars = set() + + for op in [starting_transpose_op] + self.transpose_op_to_cancel_ops[starting_transpose_op]: + if op in self.transpose_ops_removed: + continue + + to_be_removed_ops.append(op) + self.transpose_ops_removed.add(op) + + input_var = op.inputs["x"] # input to the transpose op + output_var = op.outputs[0] # output of the transpose op + parent_op = input_var.op # parent op of the transpose op + + if output_var in self.old_output_vars: + # output is a block output, so this must be one of the "edge" transpose compliment ops + # We need to set `input_var` as the block output var + # Change the name of the input_var to match the block output if input_var is not changed. + # If the same input_var is in output twice, we can't rename it twice, therefore we initiate an + # Identity op to match the name + if input_var in self.block.inputs.values(): + input_var = mb.identity(x=input_var, before_op=op, name=output_var.name) + parent_op = None # set anchor op as None. + elif input_var not in name_changed_vars: + input_var.name = output_var.name + input_var.op.name = output_var.op.name + name_changed_vars.update([input_var]) + else: + input_var = mb.identity(x=input_var, before_op=op, name=output_var.name) + parent_op = input_var.op + + # connect all the child ops of the output_var to the parent of the transpose op. + self.block.replace_uses_of_var_after_op( + anchor_op=parent_op, + old_var=output_var, + new_var=input_var, + no_check_var_types=True, + ) + + """ + Insert a transpose op JUST before each one of the materialize ops + i.e. + Given: %i1 = op(...) + ... + ... = materialize_op(..., %i1 ,...) + ... + + Result: %i1 = op(...) + ... + %i2 = transpose_op(%i1, %perm) + ... = materialize_op(..., %i2 ,...) + ... + """ + for op, input_var in self.transpose_op_to_materialize_ops.get(starting_transpose_op, []): + if (op, input_var) in self.materialized_ops_handled: + continue + + self.materialized_ops_handled.add((op, input_var)) + if input_var == starting_transpose_op_out_var: + # materialize op is connected to the starting transpose op + # in this case, connect to its parent + if op in self.output_sink_ops: + continue + i1 = starting_transpose_op_input_var + else: + i1 = input_var + + if op in self.output_sink_ops: + # The input_var of output sink is itself a output. We can safely + # modify the name of the input_var since it should only be consumed + # by block output here. + if i1 not in name_changed_vars: + x = mb.transpose(x=i1, perm=perm, before_op=op, name=i1.name) + i1.name = "_before_transpose_op_" + x.op.name + i1.op.name = "_before_transpose_op_" + x.op.name + else: + x = mb.transpose(x=i1, perm=perm, before_op=op, name=self.old_output_vars[i1]) + else: + x = mb.transpose(x=i1, perm=perm, before_op=op) + + self.block.replace_uses_of_var_after_op( + anchor_op=x.op, + end_op=op, + old_var=i1, + new_var=x, + no_check_var_types=True, + ) + + self.block.remove_ops(to_be_removed_ops) + + def apply_transform(self): + """ + Take in the data collected during graph traversal + and transform the graph by cancelling out transpose ops that can be removed. + """ + + logger.debug("Block before optimize transpose transform:\n{}".format(self.block)) + if self._DEBUG: + import graphviz + + graphviz.Source( + self.block.get_dot_string( + highlight_debug_op_names=[], highlight_debug_op_types=["transpose"] + ) + ).view(filename="/tmp/block_before_reduce_transpose") + + """ + First check which transposes can be cancelled. + After this function call we get an updated dictionary "transpose_op_to_cancel_ops" + with only the transpose ops that can really be cancelled in the graph + Reasons to not cancel: + - materialize_ops are greater than cancel_ops, so removing transpose will instead end up increasing the count of transposes + - removing a transpose op can only be successful, if all of its cancel ops are removed, removing all the cancel ops + is only successful if all of their starting transpose ops are removed and so on. This check is also done in + "_verify_cancellable_transposes()" + """ + self._verify_cancellable_transposes() + + # apply transform + for transpose_op in self.transpose_op_to_cancel_ops: + self._remove_transpose_ops(transpose_op) + self.block.set_outputs([sink_op.x for sink_op in self.output_sink_ops]) + self.block.remove_ops(list(self.output_sink_ops)) + + if self._DEBUG: + graphviz.Source( + self.block.get_dot_string( + highlight_debug_op_names=[], highlight_debug_op_types=["transpose"] + ) + ).view(filename="/tmp/block_after_reduce_transpose") + + logger.debug("Block after optimize transpose transform:\n{}".format(self.block)) + + for op in self.block.operations: + op.type_value_inference(overwrite_output=True) + + @staticmethod + def register_axis_update_op(ops: List[Text]): + """ + :param ops: Ops that will be registered. For example: the class "_TransformReduceMean" can + be used to register ops including "reduce_prod", "reduce_sum" etc. + """ + + def class_wrapper(op_update_cls): + for op_type in ops: + if op_type in _TransposeOptimization._AXIS_UPDATE_OPS: + raise ValueError( + "Update class for op of type '{}' already defined".format(op_type) + ) + _TransposeOptimization._AXIS_UPDATE_OPS[op_type] = op_update_cls + return op_update_cls + + return class_wrapper + + @staticmethod + def _get_input_vars(op, only_nonconst_vars=False) -> List[Var]: + input_vars = [] + for name, val in op.inputs.items(): + if isinstance(val, Var): + if only_nonconst_vars: + if val.op and val.op.op_type == "const": + continue + input_vars.append(val) + elif isinstance(val, (list, tuple)): + for var in val: + if not isinstance(var, Var): + raise ValueError( + f"transpose optimization pass: unrecognized input type of " + f"op='{op.name}', input='{name}'" + ) + if only_nonconst_vars: + if var.op and var.op.op_type == "const": + continue + input_vars.append(var) + else: + raise ValueError( + f"transpose optimization pass: unrecognized input type of " + f"op='{op.name}', input='{name}'" + ) + return input_vars + + +@_TransposeOptimization.register_axis_update_op(ops=["concat"]) +class _TransformConcat(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformConcat, self).__init__(**kwargs) + self.axis_var = self.op.inputs["axis"] + + def can_transpose_pass(self): + # Check that all non const inputs are of type _LazyTransposeHypotheticalValue. + # That they have the same perm value has already been checked before. + input_vars = _TransposeOptimization._get_input_vars(self.op, only_nonconst_vars=True) + for var in input_vars: + hypothetical_value = self.var_to_hypothetical_value_dict[var] + if not isinstance(hypothetical_value, _LazyTransposeHypotheticalValue): + return False + if self.axis_var.val is not None: + return True + return False + + def update(self): + new_axis_val = self.transpose_axes[self.axis_var.val] + + # to be used, if there is a consant inputs to the concat op + self._update_const_inputs() + + # insert a new constant for the new axis, JUST before the op + with self.op.enclosing_block: + new_axis_var = mb.const(val=new_axis_val, before_op=self.op) + + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_axis_var.op, + end_op=self.op, + old_var=self.axis_var, + new_var=new_axis_var, + no_check_var_types=True, + ) + + def _update_const_inputs(self): + transpose_perm_for_const = [0] * len(self.transpose_axes) + for i, axis in enumerate(self.transpose_axes): + transpose_perm_for_const[axis] = i + + # if there is a constant input, transpose it + inputs = list(self.op.inputs["values"]) + for input_var in inputs: + if input_var.op.op_type == "const": + const_val = input_var.val + new_const_val = np.transpose(const_val, transpose_perm_for_const) + # insert a new constant JUST before the op + with self.op.enclosing_block: + new_const_input_var = mb.const(val=new_const_val, before_op=self.op) + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_const_input_var.op, + end_op=self.op, + old_var=input_var, + new_var=new_const_input_var, + no_check_var_types=True, + ) + + +@_TransposeOptimization.register_axis_update_op(ops=["split"]) +class _TransformSplit(_TransformConcat): + def __init__(self, **kwargs): + super(_TransformSplit, self).__init__(**kwargs) + + # The split op is handled the same as the concat op, except it does not need + # to transform const inputs + def _update_const_inputs(self): + pass + + +@_TransposeOptimization.register_axis_update_op(ops=["pad"]) +class _TransformPad(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformPad, self).__init__(**kwargs) + self.pad_var = self.op.inputs["pad"] + self.pad_op = self.pad_var.op + self.mode = self.op.mode.val + self.pad_amounts_new = None + + def _compute_new_pad_values(self): + pad_amounts = np.reshape(self.pad_var.val, [-1, 2]) + rank_diff = len(self.transpose_axes) - pad_amounts.shape[0] + self.pad_amounts_new = copy.deepcopy(pad_amounts) + # append "rank_diff" rows of zeros to the top + self.pad_amounts_new = np.concatenate( + (np.zeros((2 * rank_diff)).reshape(-1, 2), self.pad_amounts_new) + ) + self.pad_amounts_new = self.pad_amounts_new.astype(pad_amounts.dtype) + pad_amounts = np.concatenate((np.zeros((2 * rank_diff)).reshape(-1, 2), pad_amounts)) + for i, axis in enumerate(self.transpose_axes): + self.pad_amounts_new[axis][0] = pad_amounts[i][0] + self.pad_amounts_new[axis][1] = pad_amounts[i][1] + # get the top "rank_diff" rows + top_rows = self.pad_amounts_new[:rank_diff, :] + if not np.all(top_rows == 0): + return False + # cut "rank_diff" from the top + self.pad_amounts_new = self.pad_amounts_new[rank_diff:, :] + self.pad_amounts_new = self.pad_amounts_new.flatten() + return True + + def can_transpose_pass(self): + if ( + len(_TransposeOptimization._get_input_vars(self.op, only_nonconst_vars=True)) != 1 + or self.pad_op.op_type != "const" + ): + return False + if len(self.transpose_axes) < 2: + return False + if not self._compute_new_pad_values(): + return False + # check that if mode is not constant, the updated padding + # would stay limited to last 2 axes + if self.mode != "constant" and not np.all(self.pad_amounts_new[:-4] == 0): + return False + return True + + def update(self): + self._compute_new_pad_values() + # insert a new constant for pad val, JUST before the op + with self.op.enclosing_block: + new_pad_var = mb.const(val=self.pad_amounts_new, before_op=self.op) + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_pad_var.op, + end_op=self.op, + old_var=self.pad_var, + new_var=new_pad_var, + no_check_var_types=True, + ) + + +@_TransposeOptimization.register_axis_update_op( + ops=[ + "reduce_l1_norm", + "reduce_l2_norm", + "reduce_max", + "reduce_log_sum", + "reduce_log_sum_exp", + "reduce_mean", + "reduce_min", + "reduce_prod", + "reduce_sum", + "reduce_sum_square", + ] +) +class _TransformReduceMean(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformReduceMean, self).__init__(**kwargs) + self.axes_var = self.op.inputs["axes"] + self.axes_op = self.axes_var.op + + def can_transpose_pass(self): + # allow transpose to push through it only if keep_dims are True since that doesn't change the rank + if self.op.inputs["keep_dims"].val: + if self.axes_op.op_type == "const": + return True + return False + + def update(self): + # update axis of the op + old_axes_val = self.axes_var.val + new_axes_val = [0] * len(old_axes_val) + for i, axis in enumerate(old_axes_val): + new_axes_val[i] = self.transpose_axes[axis] + + # insert a new constant for the axis, JUST before the op + with self.op.enclosing_block: + new_axis_var = mb.const(val=new_axes_val, before_op=self.op) + + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_axis_var.op, + end_op=self.op, + old_var=self.axes_var, + new_var=new_axis_var, + no_check_var_types=True, + ) + + +@_TransposeOptimization.register_axis_update_op( + ops=["add", "mul", "sub", "real_div", "maximum", "minimum"] +) +class _TransformAdd(TransformAxisUpdateOps): + def __init__(self, **kwargs): + super(_TransformAdd, self).__init__(**kwargs) + # self.tranpose_input: this is the input coming from an upstream transpose op. If both inputs are + # connected to an upstream transpose, this will be set to one of those + # self.other_input: the other input, that is not coming from a transpose + is_x_input_lazy_transpose = isinstance( + self.var_to_hypothetical_value_dict[self.op.x], _LazyTransposeHypotheticalValue + ) + is_y_input_lazy_transpose = isinstance( + self.var_to_hypothetical_value_dict[self.op.y], _LazyTransposeHypotheticalValue + ) + if is_x_input_lazy_transpose and is_y_input_lazy_transpose: + self.other_input = None + self.tranpose_input = self.op.x + elif is_y_input_lazy_transpose and not is_x_input_lazy_transpose: + self.other_input = self.op.x + self.tranpose_input = self.op.y + elif is_x_input_lazy_transpose and not is_y_input_lazy_transpose: + self.other_input = self.op.y + self.tranpose_input = self.op.x + else: + # we should not be here since this class is only invoked, + # when there is at least one input var of type _LazyTransposeHypotheticalValue + self.tranpose_input = None + self.other_input = None + + def can_transpose_pass(self): + """ + Return True if the one of the following is true: + - (scenario 1) both inputs are of type _LazyTransposeHypotheticalValue, with the same perm value + - one input is of type _LazyTransposeHypotheticalValue and the other satisfies one of the following: + - (scenario 2) it is constant. In this case, the constant can be updated accordingly to allow the transpose to pass through + - (scenario 3) if its non constant, then all of the following must be true + - its shape is fully defined + - the transpose compliment operation on the other input can be expressed via a reshape. This can + be done if there is only 1 non unit dimension in its shape, or if there are more than 1 non unit dims, + the transpose compliment operation only permutes the unit dimensions. + + In scenario 3, the transpose will be removed, by adding an extra static reshape. + This is based on the assumption that a static reshape op will be less expensive than transpose. + An example of scenario 3 is displayed below: + + Input pattern: + + (shape=(10, 20, 30)) + | + | + V + Transpose op + (shape = (20, 30, 10)) + | + | + V + this op <--------- (shape = (10,)) (other non const input) + | + V + + + After transpose passes through: + + (shape=(10, 20, 30)) + | + | + V + this op <--------- (shape = (10, 1, 1)) Reshape op <---------- (shape = (10,)) (other non const input) + | + V + Transpose op + (shape = (20, 30, 10)) + | + V + + """ + + # --------------------- + # check for scenario 1 + # -------------------- + # are both inputs _LazyTransposeHypotheticalValue? + if self.other_input is None: + return True + + # --------------------- + # check for scenario 2 + # -------------------- + # is the second input a constant? + rank = len(self.tranpose_input.shape) + if len(self.transpose_axes) != rank: + return False + other_input_shape = self.other_input.shape + if any_symbolic(other_input_shape): + return False + if len(other_input_shape) > rank: + return False + if isinstance(self.other_input.val, (np.ndarray, np.generic)): + return True + + # --------------------- + # check for scenario 3 + # -------------------- + # can other input be "reshaped" to allow the transpose to pass through? + if any_symbolic(self.other_input.shape): + return False + transpose_compliment_perm = self._find_transpose_compliment(self.transpose_axes) + # make the rank of the other input, same as that of the transpose input, + # by broadcasting + if len(other_input_shape) < rank: + other_input_shape = [1] * (rank - len(other_input_shape)) + list(other_input_shape) + + # how many non unit dimensions in the other input's shape? + if other_input_shape.count(1) in [rank, rank - 1]: + # 0 or 1 non unit dimension + return True + else: + # more than 1 non unit dimensions in other input + # check if transpose is moving only dimensions that have values 1 + # if true, then the transpose compliment can be expressed via a reshape + for i, axis in enumerate(transpose_compliment_perm): + if i != axis and other_input_shape[axis] != 1: + return False + return True + + def update(self): + # ---------------------- + # update for scenario 1 + # ---------------------- + if self.other_input is None: + # nothing to update + return + + # -------------------------- + # update for scenario 2 & 3 + # -------------------------- + if len(self.other_input.shape) == 0: + # other input is a scalar, no need to modify it + return + + # broadcast the shape of other input to match the rank + rank = len(self.tranpose_input.shape) + other_input_shape = self.other_input.shape + if len(other_input_shape) < rank: + other_input_shape = [1] * (rank - len(other_input_shape)) + list(other_input_shape) + + # find new shape after transpose compliment + transpose_compliment_perm = self._find_transpose_compliment(self.transpose_axes) + new_shape = [0] * rank + for i, axis in enumerate(transpose_compliment_perm): + new_shape[i] = other_input_shape[axis] + + if self.other_input.val is not None: + # update the const (scenario 2) + const_value = self.other_input.val + new_const_val = np.transpose( + const_value.reshape(other_input_shape), transpose_compliment_perm + ) + # insert a new constant JUST before the op + with self.op.enclosing_block: + new_const_var = mb.const(val=new_const_val, before_op=self.op) + + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_const_var.op, + end_op=self.op, + old_var=self.other_input, + new_var=new_const_var, + no_check_var_types=True, + ) + else: + # insert a reshape (scenario 3) + with self.op.enclosing_block: + new_other_var = mb.reshape(x=self.other_input, shape=new_shape, before_op=self.op) + self.op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=new_other_var.op, + end_op=self.op, + old_var=self.other_input, + new_var=new_other_var, + no_check_var_types=True, + ) + + +@register_pass(namespace="common") +class reduce_transposes(AbstractGraphPass): + """ + Reduce transposes when it is applicable. For example: + + .. code-block:: + + # Example 1 + Input graph: + input -----> transpose(axis=[1,0]) -----> transpose(axis=[1,0]) ---> out + + Output graph: + input -----> identity -----> out + + # Example 2 + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->transpose(axis=[0,2,3,1])--->out + + Output graph: + input----->relu----->out + + # Example 3 + Input graph: + input(shape=10,2,3,5)--->transpose(axis=[0,2,3,1])----->relu---->pool----->out1 + | + | + --->relu----->log---->transpose(axis=[0,3,1,2])---->out2 + + Output graph: + input(shape=10,2,3,5)----->relu---->transpose(axis=[0,2,3,1])---->pool----->out1 + | + | + --->relu----->log---->out2 + + Please see ``TransposeOptimizationPass`` for more details. + + Notes + ----- + + This pass is divided into 3 phases: + + `1st phase:` Information gathering. + + - Plug in Identity ops for all output nodes. This allows us to treat all ops uniformly during traversal. + - Block is traversed in the topological order, starting from the ops connected to the inputs. + - During the traversal, a value is associated with every var in the block. + This value can be either of type ``_HypotheticalValue`` or ``_LazyTransposeHypotheticalValue``. + The main purpose of type ``_HypotheticalValue`` is to indicate that it is `not` of type ``_LazyTransposeHypotheticalValue``. + - ``_LazyTransposeHypotheticalValue`` represents either one or multiple transpose ops with the same perm value. This information + is stored in this class. It also wraps a ``_HypotheticalValue`` that was the last hypothetical value which was generated + prior to the origin of ``_LazyTransposeHypotheticalValue``. + - Each op decides which type of hypothetical value to associate with its output vars, based on its op type, + attributes, and the types of the hypothetical values of its input vars. + - Ops are classified into 4 categories: `unary like`, `axis update`, `transpose`, and `materialize` (for all the rest). + - Transpose ops are the ops from which a ``_LazyTransposeHypotheticalValue`` originate. + - If the input to it is a ``_HypotheticalValue``, its output will be a ``_LazyTransposeHypotheticalValue``, + indicating that this ``transpose`` op is available to get cancelled downstream. + - If the input to it is a ``_LazyTransposeHypotheticalValue``, then it is checked whether this op cancels it or not. + - If the op cancels it, a ``_HypotheticalValue`` value is generated at the output and the information about this ``transpose`` cancellation + is recorded in the dictionary ``transpose_op_to_cancel_ops``. + - If the op does not cancel, the current ``transpose`` op is categrorized as a `materialize` op. Therefore, the information in + dictionary ``transpose_op_to_materialize_ops`` is updated accordingly. The output of the op is now mapped to a + ``_HypotheticalValue``. + - Unary like ops: These simply transfer their input hypothetical value type to the output. + - Axis update ops: If a ``transpose`` can pass through them, they are treated like a unary op and the dictionary + ``transpose_op_to_axis_update_ops`` is updated. If the op cannot be updated in any manner to + allow a ``transpose`` to pass through, this op is then categorized as a `materialize` op and handled accordingly. + - Materialize ops: All ``_LazyTransposeHypotheticalValue`` input vars, if present, materialize here. Output of this op + is always of type ``_HypotheticalValue``. If the input is a ``_LazyTransposeHypotheticalValue``, update the dictionary + ``transpose_op_to_materialize_ops``. + - To treat an op like a unary op, add its type to ``_UNARY_LIKE_OP_TYPES``. In future changes we want to make this process + automatic by detecting an op as a `unary like` by its "traits". + - To treat an op like `axis update` op, add a class specific to the op implementing the class ``TransformAxisUpdateOps``. + For examples, see classes ``_TransformConcat``, ``_TransformPad``, and so on. The dictionary ``AXIS_UPDATE_OPS`` is automatically filled + in by the decorator ``_TransposeOptimization.register_axis_update_op``. + + `2nd phase:` Determining which ``transpose`` ops to remove from the graph. + + All ``transpose`` ops that have a corresponding compliment op in dict ``transpose_op_to_cancel_ops`` is a candidate. + However, you need to ensure the following: + + - If a ``transpose`` op is removed, then all of its ``cancel`` ops in ``transpose_op_to_cancel_ops`` must also be removed, + to ensure correctness of the graph. The same is true in the reverse direction as well; + that is, for every ``cancel`` op that is removed, all its parent ``transpose`` ops upstream must also be removed. + - ``transpose`` ops should be removed only if the number of ``cancel`` ops is greater than the number of ``transpose`` ops + that would get freshly introduced to the block as a result of materialization ops. Currently in the algorithm, + each materialization op/output var (dicts ``transpose_op_to_materialize_ops``/``old_output_vars``) + results in one more ``transpose`` op, although this can be further optimized in the future. + + To resolve this, we recognize that nodes consisting of sets ``(a)`` and ``(b)`` form a bipartitle graph, where, + ``(a) ==`` starting ``transpose`` ops (originators of ``_LazyTransposeHypotheticalValue``) + and ``(b) ==`` set of ``transpose`` ``cancel`` ops and ``materialize`` ops. + + - In this bipartite graph, we find all the connected components for each connected component. + Either the entire set of ``transpose`` ops in it are removed/materialized, or none + of them are touched. + - Thus for each set, a determination is made based on counting the number of ``cancel`` ops and ``materialize`` ops. + - Based on this determination, the final set of ``transpose`` ops to be removed is updated. + + `3rd phase:` Transforming the graph. + + - ``transpose`` starting ops and the ``cancel`` ops are removed. + - Axis update ops, affected by these ``transpose`` ops, are updated. + - Transposes are materialized; that is, added just before the ``materialize`` ops, which are linked to the starting ``transpose`` ops. + The starting ``transpose`` op can be materialized (inserted) multiple times, before each of the ``materialize`` ops downstream. + - Block outputs are handled in a similar fashion as the `materialize` ops. + - Type inference on all ops is invoked after all the transformations. + - All Identity ops that are plugged into the graph to treat outputs as materialized are removed. + + `Debugging` + + If the ``debug`` flag is set to ``True``, the block before and after the transformation is plotted, + with transpose nodes highlighted. + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._reduce_transposes_block(f) + + @staticmethod + def _reduce_transposes_block(block): + """ + Only apply the optimization if the block is flat, + i.e, it does not contain any op which contains a sub-block. + TODO: + Removing transposes and transpose compliments requires re-running + type inference for the set of ops in between the fused transpose ops, + which is simpler to do when all the ops in the block are free of sub blocks. + The case of transpose fusion with sub-block containing ops needs to be handled with more care and test cases. + """ + for op in list(block.operations): + if len(op.blocks) > 0: + return + + with block: + opt_transposes = _TransposeOptimization(block) + opt_transposes.block_traversal() + opt_transposes.apply_transform() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py new file mode 100644 index 00000000..82ac2dec --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/optimize_tensor_operation.py @@ -0,0 +1,831 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import ( + _check_child_op_type, + _check_var_scalar_value, + block_context_manager, +) +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + +@register_pass(namespace="common") +class expand_high_rank_reshape_and_transpose(AbstractGraphPass): + """ + Detect the pattern ``reshape_1-->transpose-->reshape_2``, where ``reshape_1`` has + a output tensor with rank >= 6, and the reshape_2 produces a tensor with rank <= 5. + + In general, we can expand this pattern into a sequence of rank 4 ``reshape`` and ``transpose`` ops, + which is supported by Core ML runtime. + + .. code-block:: + + Given: + %1 = reshape(%x, shape=(d1, d2, d3, d4, ..., dn)) + %2 = transpose(%1, perm=(p1, p2, ..., pn)) + %3 = reshape(%2, shape=(o1, o2, o3, o4, o5)) + + Result: + %t1 = reshape(%x, shape=(y11, y12, y13, y14)) + %h1 = transpose(%t1, perm=[0, 2, 1, 3]) + %t2 = reshape(%h1, shape=(y21, y22, y23, 214)) + %h2 = transpose(%t2, perm=[0, 2, 1, 3]) + .... + %hn = transpose(%tn, perm=[0, 2, 1, 3]) + %3 = reshape(%hn, shape=(o1, o2, o3, o4, o5)) + """ + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self.expand_high_rank_reshape_and_transpose_block(f) + + @staticmethod + def _match_pattern(op): + # We are detecting the + # reshape(>= rank 6) -> transpose -> reshape(<= rank 5) pattern + ops = [op] + if op.op_type != "reshape": + return None + if op.outputs[0].rank <= 5: + return None + if any_symbolic(op.outputs[0].shape): + return None + + if not _check_child_op_type(op, "transpose"): + return None + transpose_op = op.outputs[0].child_ops[0] + ops.append(transpose_op) + + if not _check_child_op_type(transpose_op, "reshape"): + return None + reshape_op = transpose_op.outputs[0].child_ops[0] + ops.append(reshape_op) + if reshape_op.outputs[0].rank >= 6: + return None + + for candidate_op in ops[:-1]: + if candidate_op.outputs[0] in op.enclosing_block.outputs: + return None + return ops + + @staticmethod + def _try_to_transform(ops, block): + def _get_prod(start, end, arr, skip_indices): + res = 1 + for i in range(start, end): + if i in skip_indices: + continue + res *= arr[i] + return res + + reshape_op, transpose_op, last_reshape_op = ops[0], ops[1], ops[2] + original_shape = reshape_op.outputs[0].shape + original_perm = transpose_op.perm.val.tolist() + + # Group the consecutive axes in the perm, sometimes this could directly lower the + # rank under 6. + # + # For instance: + # + # reshape = mb.reshape(x=x, shape=[1, 2, 3, 4, 5, 6]) + # transpose = mb.transpose(x=reshape, perm=[4, 5, 3, 2, 0, 1]) + # output = mb.reshape(x=transpose, shape=[6, 20, 6]) + # + # Have 4 groups of axes: [4, 5], [3], [2], [0, 1] + # We can transform the ops to + # + # new_reshape = mb.reshape(x=x, shape=[1*2, 3, 4, 5*6]) + # new_transpose = mb.transpose(x=reshape, perm=[3, 2, 1, 0]) + # output = mb.reshape(x=new_transpose, shape=[6, 20, 6]) + # + # Note that, the output of new_transpose have different rank than transpose, + # however, they have the same data layout, so the final output is still unchanged. + group_axes = [] + i = 0 + res = [] + for i in range(len(original_perm)): + if i > 0 and original_perm[i] == original_perm[i-1] + 1: + res.append(original_perm[i]) + else: + if len(res) > 0: + group_axes.append(res) + res = [original_perm[i]] + if i == len(original_perm) - 1: + group_axes.append(res) + + group_shape = [] + for axes in group_axes: + start, end = axes[0], axes[-1] + 1 + group_shape.append(_get_prod(start, end, original_shape, set())) + + start_group_axis = [axes[0] for axes in group_axes] + group_axis_order = np.argsort(start_group_axis) + shape = np.array(group_shape)[group_axis_order].tolist() + + sorted_start_group_axis = np.sort(start_group_axis).tolist() + perm = [sorted_start_group_axis.index(i) for i in start_group_axis] + + rank = len(perm) + x = reshape_op.x + + if rank < 6: + # If the intermediate tensors have rank < 6, + # we can directly use them to replace the original pattern + x = mb.reshape(x=x, shape=shape, before_op=reshape_op) + x = mb.transpose(x=x, perm=perm, before_op=reshape_op) + + else: + # Otherwise, we need to expand the rank-N tensor into N reshape, and N transpose ops. + # Note that all intrermediate tensors have rank 4. + # + # The algorithm is as followed: + # + # reshape shape: [d_1, d_2, ..., d_n] + # transpose perm: [p_1, p_2, ..., p_n] + # + # reshape to [1, d_1*d_2*...*d_(p_1-1), d_(p_1), d_(p_1+1)*...*d_n] + # transpose to [1, d_(p_1), d_1*d_2*...*d_(p_1-1), d_(p_1+1)*...*d_n] + # + # reshape to [d_(p_1), d_1*d_2*...*d_(p_2-1), d_(p_2), d_(p_2+1)*...*d_n] + # transpose to [d_(p_1), d_(p_2), d_1*d_2*...*d_(p_2-1), d_(p_2+1)*...*d_n] + # + # reshape to [d_(p_1)*d_(p_2), d_1*d_2*...*d_(p_3-1), d_(p_3), d_(p_3+1)*...*d_n] + # .... + # so on and so forth + leading_dim = 1 + memo = set() + for i in range(rank): + axis = perm[i] + dim = shape[axis] + memo.add(axis) + reshape_shape = [ + leading_dim, + _get_prod(0, axis, shape, memo), + dim, + _get_prod(axis + 1, rank, shape, memo) + ] + x = mb.reshape(x=x, shape=reshape_shape, before_op=reshape_op) + x = mb.transpose(x=x, perm=[0, 2, 1, 3], before_op=reshape_op) + leading_dim *= dim + + + x = mb.reshape(x=x, shape=last_reshape_op.shape.val, before_op=reshape_op) + if reshape_op.enclosing_block.try_replace_uses_of_var_after_op( + anchor_op=reshape_op, old_var=last_reshape_op.outputs[0], new_var=x, + ): + # Remove all the ops at once + block.remove_ops(ops) + return True + return False + + @block_context_manager + def expand_high_rank_reshape_and_transpose_block(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self.expand_high_rank_reshape_and_transpose_block(b) + if len(op.blocks) > 0: + continue + + ops = self._match_pattern(op) + if ops is not None: + fusion_status = self._try_to_transform(ops, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + +@register_pass(namespace="common") +class concat_to_pixel_shuffle(AbstractGraphPass): + """ + Identify nested, interleaved ``concat`` ops which can be replaced by a single ``concat`` and a `pixel shuffle` layer. + + This pattern occurs with the faster up-convolution from the FCRN model (Laina et al., 2016). + + .. code-block:: + + # Before the concat_to_pixel_shuffle pass. + input(N, C, H, W) ------------------- + | + v + input(N, C, H, W) -----> concat(axis=2, interleave=True) -----> concat(axis=3, interleave=True) ----> output + ^ + | + input(N, C, H, W) -----> concat(axis=2, interleave=True) -------------------- + | ^ + | | + input(N, C, H, W) ------------------- + + + # After the concat_to_pixel_shuffle pass. + input(N, C, H, W) --------------- + | + v + input(N, C, H, W) -----> concat(axis=1, interleave=True) -----> pixel_shuffle(upscale_factor=2) ----> output + ^ + | + input(N, C, H, W) --------------| + | + | + input(N, C, H, W) --------------- + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._concat_to_pixel_shuffle_block(f) + + @staticmethod + def _match_pattern(op): + + # Identify if this is an op we can transform + if op.op_type != "concat": + return None + + w_concat = op + if w_concat.inputs["values"][0].rank != 4: + return None + + if w_concat.inputs["axis"].val != 3: + return None + if not w_concat.inputs["interleave"].val: + return None + + inputs = list(w_concat.inputs["values"]) + if len(inputs) != 2: + return None + + if not inputs[0].op or not inputs[1].op: + return None + + if inputs[0].op.op_type != "concat" or inputs[1].op.op_type != "concat": + return None + + h_concat_0 = inputs[0].op + if not h_concat_0.inputs["interleave"].val: + return None + + h_concat_0_inputs = list(h_concat_0.inputs["values"]) + if len(h_concat_0_inputs) != 2: + return None + + h_concat_1 = inputs[1].op + if not h_concat_1.inputs["interleave"].val: + return None + + h_concat_1_inputs = list(h_concat_1.inputs["values"]) + if len(h_concat_1_inputs) != 2: + return None + + if h_concat_0.inputs["axis"].val != 2 or h_concat_1.inputs["axis"].val != 2: + return None + + return w_concat, h_concat_0, h_concat_1 + + @staticmethod + def _replace_ops(block, w_concat, h_concat_0, h_concat_1): + + h_concat_0_inputs = list(h_concat_0.inputs["values"]) + h_concat_1_inputs = list(h_concat_1.inputs["values"]) + + all_inputs = [ + h_concat_0_inputs[0], + h_concat_1_inputs[0], + h_concat_0_inputs[1], + h_concat_1_inputs[1], + ] + + # Concatenate all 4 inputs on the channel axis + x = mb.concat(values=all_inputs, axis=1, before_op=h_concat_0, interleave=True) + # Shuffle into place + x = mb.pixel_shuffle(x=x, upscale_factor=2, before_op=h_concat_0) + + w_concat.enclosing_block.replace_uses_of_var_after_op( + anchor_op=h_concat_0, old_var=w_concat.outputs[0], new_var=x + ) + + block.remove_ops([w_concat, h_concat_0, h_concat_1]) + + @block_context_manager + def _concat_to_pixel_shuffle_block(self, block): + for op in list(block.operations): + layers = self._match_pattern(op) + if layers: + self._replace_ops(block, layers[0], layers[1], layers[2]) + + +@register_pass(namespace="common") +class detect_concat_interleave(AbstractGraphPass): + """ + Detect the pattern ``concat-->reshape--->transpose--->reshape``, where ``concat`` is + along the channel axis ``(axis=-3)``, and map this pattern to the ``concat`` with ``interleave`` op. + + This pattern occurs, for example, in the ``shufflenet`` model in ``torchvision``. + + .. code-block:: + + Given: + %3 = concat(%1.a, %1.b, ..., axis=-3, interleave=False) #shape = (B, n*C, H, W) + %4 = reshape(%3) #shape = (B, n, C, H, W) + %5 = transpose(%4, perm=[0, 2, 1, 3, 4]) # shape = (B, C, n, H, W) + %6 = reshape(%5) # shape = (B, C*n, H, W) + + Result: + %6 = concat(%1.a, %1.b, ..., axis=-3, interleave=True) + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_concat_interleave(f) + + @staticmethod + def _match_pattern(op): + if op.outputs[0] in op.enclosing_block.outputs: + return None + + if op.op_type == "concat": + if op.interleave.val: + return None + + # check that axis is -3 and rank is 4 + rank = op.values[0].rank + if rank != 4: + return None + axis = op.axis.val + if axis > 0: + axis = axis - rank + if axis != -3: + return None + + # check that all inputs to concat have fully defined shapes + for in_ in op.values: + if any_symbolic(in_.shape): + return None + + # check that all inputs to concat have the same shape + inshape = list(op.values[0].shape) + for v in op.values[1:]: + for i in range(rank): + if inshape[i] != v.shape[i]: + return None + + # check that this concat is connected to exactly 1 reshape op + child_ops = list(op.outputs[0].child_ops) + if len(child_ops) == 1: + if list(child_ops)[0].op_type == "reshape": + return op + return None + + @staticmethod + def _try_to_transform(concat_op, add_op, block): + all_ops = [concat_op] + B, C, H, W = list(concat_op.values[0].shape) + n = len(concat_op.values) + + # check that reshape shapes the input to (B, n, C, H, W) + reshape_op1 = concat_op.outputs[0].child_ops[0] + reshape_shape1 = reshape_op1.shape.val + if reshape_shape1 is None: + return False + if not isinstance(reshape_shape1, np.ndarray): + return False + reshape_shape1 = list(reshape_shape1) + if reshape_shape1 != [B, n, C, H, W]: + return False + all_ops.append(reshape_op1) + + # check that after reshape is a transpose op with perm=[0, 2, 1, 3, 4] + if len(list(reshape_op1.outputs[0].child_ops)) != 1: + return False + transpose_op = list(reshape_op1.outputs[0].child_ops)[0] + if transpose_op.op_type != "transpose": + return False + perm = transpose_op.perm.val + if perm is None: + return + if list(perm) != [0, 2, 1, 3, 4]: + return False + all_ops.append(transpose_op) + + # check that after transpose is another reshape with [B, . , H, W] + if len(list(transpose_op.outputs[0].child_ops)) != 1: + return False + reshape_op2 = list(transpose_op.outputs[0].child_ops)[0] + if reshape_op2.op_type != "reshape": + return False + reshape_shape2 = reshape_op2.shape.val + if reshape_shape2 is None: + return False + if not isinstance(reshape_shape2, np.ndarray): + return False + reshape_shape2 = list(reshape_shape2) + if len(reshape_shape2) != 4: + return False + if [reshape_shape2[0], reshape_shape2[-2], reshape_shape2[-1]] != [B, H, W]: + return False + all_ops.append(reshape_op2) + + # check that none of the op in this pattern is connected to the output + # (except the last mul op) + for i, op in enumerate(all_ops): + if i == len(all_ops) - 1: + continue + for out in op.outputs: + if out in block.outputs: + return False + + # add a new concat op + out_name = reshape_op2.outputs[0].name + x = mb.concat( + values=concat_op.values, + axis=concat_op.axis.val, + interleave=True, + name=out_name, + before_op=concat_op, + ) + + reshape_op2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=reshape_op2, old_var=reshape_op2.outputs[0], new_var=x + ) + + # Remove all the ops at once + block.remove_ops(all_ops) + return True + + @block_context_manager + def _fuse_concat_interleave(self, block): + fusion_status = False + for op in list(block.operations): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_concat_interleave(b) + if len(op.blocks) > 0: + continue + + concat_op = self._match_pattern(op) + if concat_op is not None: + fusion_status = self._try_to_transform(op, concat_op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="common") +class fuse_onehot_matmul_to_gather(AbstractGraphPass): + """ + Detect if ``onehot (axis=-1, on_value=1, off_value=0)`` is followed by a ``matmul`` op (no bias). + If so, they can be replaced by a ``gather`` op. + + .. code-block:: + + Input: + %2 = one_hot(%1, on_value=1, off_value=0, axis=-1) + %3 = const() # rank 2 + %4 = matmul(%2, %3) + + Output: + %4 = gather(%3, %2, axis=0) + """ + + def apply(self, prog): + for f in prog.functions.values(): + block_changed = True + while block_changed: + block_changed = self._fuse_onehot_matmul_to_gather_block(f) + + @staticmethod + def _try_to_transform(onehot_op, block): + root_var = onehot_op.indices + + # check that the output of the onehot op is not a block output + if onehot_op.outputs[0] in block.outputs: + return False + + # check that onehot op has axis=-1, on_value=1 and off_value=0 + # and constant one_hot_vector_size + axis = onehot_op.axis.val + if axis is None: + return False + if onehot_op.indices.shape is None: + return False + rank = len(onehot_op.indices.shape) + if axis >= 0: + axis -= rank + if axis != -1: + return False + if not _check_var_scalar_value(onehot_op.on_value, 1): + return False + if not _check_var_scalar_value(onehot_op.off_value, 0): + return False + if onehot_op.one_hot_vector_size.val is None: + return False + + # checks for the following matmul op + if not _check_child_op_type(onehot_op, "matmul"): + return False + matmul_op = list(onehot_op.outputs[0].child_ops)[0] + if matmul_op.x != onehot_op.outputs[0]: + return False + if matmul_op.transpose_x.val or matmul_op.transpose_y.val: + return False + W_var = matmul_op.y + if W_var.val is None: + return False + if len(W_var.val.shape) != 2: + return False + + # remove onehot and matmul and replace with gather op + out_name = matmul_op.outputs[0].name + x = mb.gather(x=W_var, indices=root_var, axis=0, name=out_name, before_op=matmul_op) + + matmul_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=matmul_op, old_var=matmul_op.outputs[0], new_var=x + ) + # Remove all the ops at once + block.remove_ops([onehot_op, matmul_op]) + return True + + @block_context_manager + def _fuse_onehot_matmul_to_gather_block(self, block): + fusion_status = False + for i, op in enumerate(list(block.operations)): + for b in op.blocks: + block_changed = True + while block_changed: + block_changed = self._fuse_onehot_matmul_to_gather_block(b) + if len(op.blocks) > 0: + # This op can't be pow + continue + + # start pattern match if one_hot op is encountered + if op.op_type == "one_hot": + fusion_status = self._try_to_transform(op, block) + # has to break as the downstream iterator is affected. + if fusion_status: + return fusion_status + return fusion_status + + +@register_pass(namespace="common") +class replace_stack_reshape(AbstractGraphPass): + """ + A stack followed by a reshape layer can be replaced by a ``concat`` if the reshape + simply removes the new axis and doubles the size of one of the axes next to it. + + If the new axis is reshaped to the "right" (that is, the axis just after it is + doubled), then we can use a ``concat``. If it is reshaped to the "left" (the axis + just before it is doubled), then the ``concat`` needs to set the ``interleaved`` flag. + + Examples: + + .. code-block:: + + Given: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %3 = stack((%1,%2), axis=2) # shape = (1, 5, 2, 3, 4) + %4 = reshape(%3, shape=[1, 10, 3, 4]) + + Result: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %4 = concat((%1,%2), axis=1, interleave=True) # shape = (1, 10, 3, 4) + + Given: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %3 = stack((%1, %2), axis=1) # shape = (1, 2, 5, 3, 4) + %4 = reshape(%3, shape=[1, 10, 3, 4]) + + Result: + %1 = tensor(1, 5, 3, 4) + %2 = tensor(1, 5, 3, 4) + %4 = concat((%1, %2), axis = 1) # shape = (1, 10, 3, 4) + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._replace_stack_reshape_block(f) + + @staticmethod + def _match_operation(stack_op): + + # Identify if this is an op we can transform + if stack_op.op_type != "stack": + return None, None + + child_ops = stack_op.outputs[0].child_ops + if len(child_ops) != 1: + return None, None + + if child_ops[0].op_type != "reshape": + return None, None + + stack_axis = stack_op.inputs["axis"] + if not stack_axis: + return None, None + stack_axis_val = stack_axis.val + + reshape_op = child_ops[0] + + # Now, op is a stack op followed by a reshape op + # So we need to check that the stack really gets eliminated + stack_output_rank = len(stack_op.outputs[0].shape) + reshape_output_rank = len(reshape_op.outputs[0].shape) + + if stack_output_rank != (reshape_output_rank + 1): + return None, None + + # Compare the input to stack to the output from reshape + # These shapes should differ in either the stack_axis_val place (by a factor of 2), + # or in the stack_axis_val-1 place by the same factor + input_shape = list(stack_op.inputs["values"][0].shape) + concat_axis = [ + idx + for idx, (x, y) in enumerate(zip(input_shape, reshape_op.outputs[0].shape)) + if x != y + ] + if len(concat_axis) != 1: + return None, None + + concat_axis = concat_axis[0] + + if input_shape[concat_axis] * 2 != reshape_op.outputs[0].shape[concat_axis]: + return None, None + + if concat_axis != stack_axis_val and concat_axis != stack_axis_val - 1: + return None, None + + return stack_op, reshape_op + + @staticmethod + def _replace_stack_reshape_ops(block, stack_op, reshape_op): + + stack_axis = stack_op.inputs["axis"] + if not stack_axis: + return None, None + stack_axis_val = stack_axis.val + + input_shape = list(stack_op.outputs[0].shape) + input_shape.pop(stack_axis_val) + + concat_axis = [ + idx + for idx, (x, y) in enumerate(zip(input_shape, reshape_op.outputs[0].shape)) + if x != y + ] + if len(concat_axis) != 1: + return + concat_axis = concat_axis[0] + + interleave = concat_axis == stack_axis_val - 1 + + x = mb.concat( + values=stack_op.values, axis=concat_axis, before_op=stack_op, interleave=interleave + ) + + reshape_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=stack_op, old_var=reshape_op.outputs[0], new_var=x + ) + block.remove_ops([stack_op, reshape_op]) + + @block_context_manager + def _replace_stack_reshape_block(self, block): + for op in list(block.operations): + + stack_op, reshape_op = self._match_operation(op) + + if stack_op: + self._replace_stack_reshape_ops(block, stack_op, reshape_op) + + +@register_pass(namespace="common") +class use_reflection_padding(AbstractGraphPass): + """ + Identify a reflection padding layer composed out of `slices` and `concats`. + + .. code-block:: + + Input graph: + ------------------------------------------------------------------------------------- | + | v + input(1, 2, 6, 8) ------> slice_by_index(begin=[0, 0, 0, 1], end=[0, 0, 0, 2]) -----> concat(axis=3) ---> out(1, 2, 6, 10) + | ^ + ----------------> slice_by_index(begin=[0, 0, 0, -2], end=[0, 0, 0, -1]) -------------| + + Output graph: + input(1, 2, 6, 8) -----0> pad(mode=reflect, size=[0, 0, 1, 1]) -----> out(1, 2, 6, 10) + """ + + def apply(self, prog): + for f in prog.functions.values(): + self._reflection_padding_block(f) + + @staticmethod + def _match_pattern(concat_op, block): + if concat_op.op_type != "concat": + return False + + concat_inputs = list(concat_op.inputs["values"]) + # There need to be an odd number of inputs, and at least one model has a concat input of + # length 1 + if len(concat_inputs) % 2 != 1 or len(concat_inputs) == 1: + return False + + # The original input will need to be in the middle of the concatenated inputs + original_input = concat_inputs[len(concat_inputs) // 2] + + axis = None + slice_ops_out = [] + end_mask = None + begin_index = len(concat_inputs) // 2 + + for slice_op in concat_inputs: + + # one of the concat inputs is the original input (to the slices) + if slice_op == original_input: + # We'll now start checking indices from the end + begin_index = begin_index - 2 + continue + + slice_op = slice_op.op + if not slice_op: + return False + + if slice_op.op_type != "slice_by_index": + return False + + # check that the input to slice op is the original input + if slice_op.inputs["x"] != original_input: + return False + + # If the slice is an output + if slice_op.outputs[0] in block.outputs: + return False + + if end_mask is None: + end_mask = slice_op.inputs["end_mask"].val + axis = list(end_mask).index(False, 0, len(end_mask)) + + if end_mask is None: + return False + + if axis != list(end_mask).index(False, 0, len(end_mask)): + return False + + # Check that we're only taking a slice of size 1 + end = slice_op.inputs["end"].val + begin = slice_op.inputs["begin"].val + if end[axis] - begin[axis] != 1: + return False + + input_shape = original_input.shape + # Check that the slices are in order + if begin[axis] != begin_index and begin[axis] != begin_index + input_shape[axis]: + return False + begin_index = begin_index - 1 + + slice_ops_out.append(slice_op) + + if axis is None: + return False + + return use_reflection_padding._replace_ops( + block, concat_op, slice_ops_out, axis - len(end_mask) + ) + + @staticmethod + def _replace_ops(block, concat_op, slice_ops, axis): + + pad_size = len(slice_ops) // 2 + if axis == -1: + pad = [pad_size, pad_size] + elif axis == -2: + pad = [pad_size, pad_size, 0, 0] + else: + return False + + x = mb.pad(x=slice_ops[0].inputs["x"], pad=pad, mode="reflect", before_op=concat_op) + concat_op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=concat_op, old_var=concat_op.outputs[0], new_var=x + ) + + block.remove_ops([concat_op] + slice_ops) + return True + + @block_context_manager + def _reflection_padding_block(self, block): + for op in list(block.operations): + self._match_pattern(op, block) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/preprocess.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/preprocess.py new file mode 100644 index 00000000..6add95b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/preprocess.py @@ -0,0 +1,362 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clausefrom + +import re +import warnings +from collections import OrderedDict + +from coremltools.converters.mil.input_types import EnumeratedShapes, ImageType, Shape +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, types +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.pass_registry import register_pass + + +@register_pass(namespace="common") +class image_input_preprocess(AbstractGraphPass): + """ + Plug in to ``transpose`` image input in NHWC format to NCHW format. + + Follow these steps: + + 1. Check whether there are any inputs that the users specify as ImageType. + 2. Check the channel's dimension for all inputs that are ImageType. + + a) ``channel_first == True`` + We do not modify this input, since ``channel_first`` is the intended + behaviour for feeding images for optimal performance. + b) ``channel_first == False`` + We convert the input into a "channel_first" input, and plug in a + ``transpose`` for the input to maintain the remaining graph's dimensionality. + """ + + def apply(self, prog): + for f_name, f in prog.functions.items(): + if f_name == "main": + # We need to make sure main exist and start here. + self._image_input_preprocess(prog) + + @staticmethod + def _image_input_preprocess(prog): + def _transform_to_channel_first(shape): + if isinstance(shape, tuple): + shape = list(shape) + return tuple(shape[:-3] + [shape[-1]] + shape[-3:-1]) + else: + return shape[:-3] + [shape[-1]] + shape[-3:-1] + + main_input_types = list(prog.main_input_types) + for idx, input_type in enumerate(main_input_types): + if isinstance(input_type, ImageType) and not input_type.channel_first: + name = input_type.name + # Build new ImageType to change data layout + if isinstance(input_type.shape, Shape): + new_shape = _transform_to_channel_first(input_type.shape.shape) + new_default = _transform_to_channel_first(input_type.shape.default) + shape_type = Shape(shape=new_shape, default=new_default) + elif isinstance(input_type.shape, EnumeratedShapes): + shape_list = [] + for shape in input_type.shape.shapes: + if isinstance(shape, Shape): + shape_list.append(_transform_to_channel_first(shape.shape)) + else: + shape_list.append(_transform_to_channel_first(shape)) + shape_type = EnumeratedShapes( + shapes=shape_list, + default=_transform_to_channel_first(input_type.shape.default), + ) + new_image_type = ImageType( + name=name, + shape=shape_type, + bias=input_type.bias, + scale=input_type.scale, + color_layout=input_type.color_layout, + channel_first=True, + ) + main_input_types[idx] = new_image_type + + # Reconstruct Placeholder of Function inputs. + placeholder_op = prog.functions["main"].placeholder_inputs[name] + old_var = placeholder_op.outputs[0] + nchw_shape = _transform_to_channel_first(placeholder_op.sym_shape) + placeholder_op.__init__( + nchw_shape, dtype=placeholder_op.dtype, name=placeholder_op.name + ) + + # Update Function input var + prog.functions["main"]._input_dict[name] = placeholder_op.outputs[0] + prog.functions["main"].function_inputs = tuple( + prog.functions["main"]._input_dict.values() + ) + + # Add transpose into graph (Transpose from NCHW back to NHWC) + curr_block = prog.functions["main"] + curr_var = prog.functions["main"].inputs[name] + + perm = list(range(curr_var.rank)) + perm = perm[:-3] + [perm[-2], perm[-1], perm[-3]] + with curr_block: + new_input = mb.transpose( + x=curr_var, + perm=perm, + before_op=prog.functions["main"].operations[0], + name=curr_var.name + "__transpose_from_nchw__", + ) + curr_block.replace_uses_of_var_after_op( + anchor_op=None, old_var=old_var, new_var=new_input + ) + prog.main_input_types = tuple(main_input_types) + + +class NameSanitizer: + def __init__(self, prefix=None): + # to hold all names encountered, + # to make sure that all new names are unique + self.all_names = set() + self.prefix = "_" if prefix is None else prefix + + def sanitize_name(self, name): + """ + Sanitize the input string and return it back. + Input string should be of the format: [a-zA-Z_][a-zA-Z0-9_]* + + If it is not, then it is sanitized in the following manner: + - first, any character that is not [a-zA-Z0-9_] is replaced with "_" + - if the starting character is not [a-zA-Z_], it is prefixed with self.prefix + - the resulting string must be unique. If it has been encountered before, + it is appended by "_0" or "_1" and so on, until it becomes unique. + + :name: str + current name + + :return: str + updated name. Returns the same string, if sanitization not required. + """ + + # replace any character that is not [a-zA-Z0-9_] with an underscore + new_name = re.sub("[^a-zA-Z0-9_]", "_", name) + + # now check if the name starts with anything but [A-Za-z_] + # if so, then add the prefix + if re.match("[^a-zA-Z_]", new_name): + new_name = self.prefix + new_name + + reserved_names = [ + "any", + "bool", + "program", + "func", + "tensor", + "list", + "dict", + "tuple", + "true", + "false", + "string", + "bf16", + "fp16", + "fp32", + "fp64", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + ] + if new_name in reserved_names: + new_name += "_workaround" + + if new_name == name: + # return if nothing has changed + self.all_names.add(name) + return name + else: + # name has changed + # make sure it is unique, then return + if new_name in self.all_names: + idx = 0 + new_name += "_" + str(idx) + while new_name in self.all_names: + idx += 1 + new_name += "_" + str(idx) + # now we have a unique name + self.all_names.add(new_name) + return new_name + + @staticmethod + def sanitize_block( + block, + sanitizer_vars, + sanitizer_ops, + main_input_types=None, + sanitize_model_inputs_outputs_only=False, + ): + """ + Sanitize the vars and op names inside the block to adhere to the format [a-zA-Z_][a-zA-Z0-9_]* + """ + + if sanitize_model_inputs_outputs_only: + NameSanitizer._sanitize_block_input_vars( + block, sanitizer_vars, main_input_types, sanitize_main_input_only=True + ) + NameSanitizer._sanitize_main_outputs_only(block, sanitizer_vars) + else: + NameSanitizer._sanitize_block_input_vars(block, sanitizer_vars, main_input_types) + NameSanitizer._sanitize_output_vars_and_nested_blocks( + block, sanitizer_vars, sanitizer_ops + ) + NameSanitizer._sanitize_op_names(block, sanitizer_ops) + + @staticmethod + def _sanitize_block_input_vars( + block, sanitizer_vars, main_input_types, sanitize_main_input_only=False + ): + + # iterate over all the block input vars and sanitize the names + if isinstance(block, Function): + # this is the "main" block + # block.inputs is a dict from input names to input vars + # iterate over the input vars of the main program and sanitize their names + new_input_dict = OrderedDict() + input_name_updated = False + for input_name, var in block.inputs.items(): + msg = "Main block's input name, '{}', is different from its corresponding var's name, '{}'." + assert input_name == var.name, msg.format(input_name, var.name) + new_name = sanitizer_vars.sanitize_name(var.name) + new_input_dict[new_name] = var + if new_name != var.name: + msg = "Input, '{}', of the source model, has been renamed to '{}' in the Core ML model." + warnings.warn(msg.format(var.name, new_name)) + if var.name in block.placeholder_inputs: + block.placeholder_inputs[new_name] = block.placeholder_inputs.pop(var.name) + block.placeholder_inputs[new_name].set_name(new_name) + var.set_name(new_name) + input_name_updated = True + if main_input_types is not None: + # update prog's main_input_types, since we are updating the name of a model input here + for i in range(len(main_input_types)): + if main_input_types[i].name == input_name: + main_input_types[i].name = new_name + break + if input_name_updated: + block._input_dict = new_input_dict + elif not sanitize_main_input_only: + # in this case block is not the "main" function + # in this case block.inputs is a list of input vars of the block + for var in block.inputs: + new_name = sanitizer_vars.sanitize_name(var.name) + if new_name != var.name: + var.set_name(new_name) + + @staticmethod + def _sanitize_var_names(var, sanitizer_vars, emit_warning=False): + new_name = sanitizer_vars.sanitize_name(var.name) + if new_name != var.name: + if emit_warning: + msg = "Output, '{}', of the source model, has been renamed to '{}' in the Core ML model." + warnings.warn(msg.format(var.name, new_name)) + var.set_name(new_name) + + @staticmethod + def _sanitize_op_names(block, sanitizer_ops): + # iterate over all the ops and sanitize the op names + for op in list(block.operations): + if op.name is not None: + op.name = sanitizer_ops.sanitize_name(op.name) + + @staticmethod + def _sanitize_output_vars_and_nested_blocks(block, sanitizer_vars, sanitizer_ops): + for op in list(block.operations): + for b in op.blocks: + NameSanitizer.sanitize_block(b, sanitizer_vars, sanitizer_ops) + + for var in op.outputs: + if isinstance(block, Function) and var in block.outputs: + NameSanitizer._sanitize_var_names(var, sanitizer_vars, emit_warning=True) + else: + NameSanitizer._sanitize_var_names(var, sanitizer_vars) + + @staticmethod + def _sanitize_main_outputs_only(block, sanitizer_vars): + for op in list(block.operations): + for var in op.outputs: + if isinstance(block, Function) and var in block.outputs: + NameSanitizer._sanitize_var_names(var, sanitizer_vars, emit_warning=True) + + +@register_pass(namespace="common") +class sanitize_input_output_names(AbstractGraphPass): + """ + Sanitize the names of model input and output vars to make sure + that they are of the format as described in the NameSanitizer class; that is, + of the format ``[a-zA-Z_][a-zA-Z0-9_]*``. + """ + + def apply(self, prog): + sanitizer_vars = NameSanitizer(prefix="var_") + sanitizer_ops = NameSanitizer(prefix="op_") + + # sanitize the input/output of the main block + NameSanitizer.sanitize_block( + prog.functions["main"], + sanitizer_vars, + sanitizer_ops, + prog.main_input_types, + sanitize_model_inputs_outputs_only=True, + ) + + +@register_pass(namespace="common") +class update_output_dtypes(AbstractGraphPass): + """ + Update the dtypes of output vars of the main block to match the dtypes + provided in ``prog.main_output_types``, which in turn is populated by the + ``outputs`` argument provided by the user in the ``coremltools.convert()`` API. + This graph pass assumes that the list of outputs in ``prog.main_output_types`` (if not ``None``), + are in the same order as the output vars. + """ + + def apply(self, prog): + user_provided_output_types = prog.main_output_types + main_func = prog.functions["main"] + output_vars = main_func.outputs + if user_provided_output_types is None or len(user_provided_output_types) == 0: + return + if len(output_vars) != len(user_provided_output_types): + msg = ( + "Number of outputs provided by the user, which is {}, " + "does not match the number of outputs generated by the model, which is {}" + ) + raise ValueError(msg.format(len(user_provided_output_types), len(output_vars))) + + new_outputs = [] + for i, output_type in enumerate(user_provided_output_types): + required_output_dtype = output_type.dtype + output_var = output_vars[i] + if ( + required_output_dtype is None + or not ( + types.is_tensor(output_var.sym_type) or types.is_scalar(output_var.sym_type) + ) + or required_output_dtype == output_var.dtype + ): + # no need to update the output var's dtype in this case + new_outputs.append(output_var) + else: + output_var_name = output_var.name + output_var.set_name( + output_var_name + "_type_" + types.builtin_to_string(output_var.dtype) + ) + with main_func: + output_var = mb.cast( + x=output_var, dtype=types.builtin_to_string(required_output_dtype) + ) + output_var.set_name(output_var_name) + new_outputs.append(output_var) + + main_func.set_outputs(new_outputs) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/quantization.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/quantization.py new file mode 100644 index 00000000..1a8383aa --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/defs/quantization.py @@ -0,0 +1,857 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from enum import Enum as _Enum +from typing import Set, Text + +import numpy as np + +from coremltools import _logger as logger +from coremltools.converters.mil.backend.mil.load import should_use_weight_file +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.ops.defs.iOS16 import ( + constexpr_affine_dequantize, + constexpr_lut_to_dense, + constexpr_sparse_to_dense, +) +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import register_pass +from coremltools.converters.mil.mil.program import Program +from coremltools.converters.mil.mil.types.type_mapping import ( + is_builtin, + nptype_from_builtin, + numpy_type_to_builtin_type, +) +from coremltools.models.neural_network.quantization_utils import _get_kmeans_lookup_table_and_weight + + +class ComputePrecision(_Enum): + FLOAT16 = "float16" + FLOAT32 = "float32" + + +class AbstractQuantizationPass(AbstractGraphPass): + """ + Base class for Post-Training Quantization transforms. + + Derived class needs to implement following two methods: + - is_valid_op(op) + - transform_op(op) + """ + + type_eps = {} + type_min = {} + type_negmin = {} + + def __init__(self, op_selector=None): + super().__init__() + if op_selector is not None and not callable(op_selector): + raise TypeError( + "Argument `op_selector` needs to be a callable function which accepts " + "a MIL operation object and returns a boolean value." + ) + self.op_selector = op_selector + + def apply(self, prog): + """ + Walks over each operation in the graph and performs following two steps, + 1. Checks whether an operation is valid for that quantized transform using `is_valid_op` method. + 2. If yes, calls `transform_op` method of the derived quantized transform class. + + :param prog: MIL program + :return: Transformed MIL program + """ + if not isinstance(prog, Program): + raise TypeError('Transform "{}" can only be applied on PyMIL programs.'.format(self)) + + if getattr(self, "skip_ops_by_type", set()) and self.op_selector is not None: + raise ValueError( + "The graph pass option `skip_ops_by_type` cannot be set along with " + "the `op_selector` in FP16ComputePrecision. Please only use one " + "method to control which ops to operate on." + ) + + @block_context_manager + def apply_block(block): + for op in list(block.operations): + for b in op.blocks: + apply_block(b) + + if self.is_valid_op(op): + need_transform: bool + if self.op_selector is not None: + need_transform = self.op_selector(op) + else: + need_transform = op.op_type not in getattr(self, "skip_ops_by_type", set()) + if need_transform: + self.transform_op(op) + + for f in prog.functions.values(): + apply_block(f) + + def transform_op(self, op): + """ + Replaces an op with a transformed op. + + :param op: MIL operation + :return: None + """ + raise NotImplementedError( + 'Op transformation for quantization mode "{}" not implemented.'.format(self) + ) + + def is_valid_op(self, op): + """ + Checks whether an operation is valid for given quantized transform. + + :param op: MIL operation + :return: true | false + """ + raise NotImplementedError( + 'Operation Preconditions for quantization mode "{}" not implemented.'.format(self) + ) + + @classmethod + def _close_to_zero(cls, val, np_type): + if np_type not in cls.type_eps: + cls.type_eps[np_type] = np.finfo(np_type).eps + cls.type_min[np_type] = np.nextafter(0.0, 1.0, dtype=np_type) + cls.type_negmin[np_type] = np.nextafter(0.0, -1.0, dtype=np_type) + + return np.isclose(val, 0, atol=cls.type_min[np_type], rtol=cls.type_eps[np_type]) + + def __repr__(self): + return str(self) + + def __str__(self): + return type(self).__name__ + + +class FP16ComputePrecision(AbstractQuantizationPass): + """ + This transform does the following, for each valid op and if the "op_selector" return True: + - For each input of dtype float32, inject a "cast" op to change it to float16 dtype + - For each output of dtype float16, inject a "cast" op to change it back to float32 + """ + + def __init__(self, op_selector=None): + super(FP16ComputePrecision, self).__init__(op_selector=op_selector) + self.target_dtype = "fp16" + + # Var that feeds into multiple ops will be casted once and cached into this dict + # For reference: Checkout test_single_input_to_multiple_operations in `TestFP16CastTransform`. + self.cache_vars = {} + + def fp16_overflow(self, op): + # Constants with values more than 65504 or less than -65504 overflows in FP16 + for _, inputs in op.inputs.items(): + is_list_input = isinstance(inputs, (list, tuple)) + if not is_list_input: + inputs = [inputs] + for var in inputs: + if ( + var.op is not None + and var.op.op_type == "const" + and var.is_tensor_or_scalar_of(dtype="fp32") + ): + if np.max(np.abs(var.op.val.val), initial=0.0) > 65504: + return True + return False + + def is_valid_op(self, op): + + if op.op_type in ["cast", "while_loop", "cond"]: + return False + + if op.op_type in [ + "make_list", + "list_gather", + "list_scatter", + "list_read", + "list_write", + "list_length", + ]: + return False # rdar://74458192 + + if op.op_type in ["gru", "rnn", "lstm"]: + return False + + if self.fp16_overflow(op): + return False + + return True + + def is_valid_parameter(self, op, param_name): + type_domain = getattr(op.input_spec.input_types[param_name], "type_domain", None) + if type_domain is not None: + if len(type_domain) == 0: + return True + return types.fp16 in type_domain + return True + + def _check_underflow_to_zero(self, new_var, var): + # We check whether there are casted values that "becomes" 0 which is not ideal for eps purposes. + # However we skip arrays with more than 400 in case we compare through a large sparse matrix. + if ( + new_var.val is not None + and len(var.val.flatten()) < 400 + and self._close_to_zero(new_var.val, np.float16).any() + ): + value_modified = False + original_val = var.val.flatten() + new_val = new_var.val.flatten() + + for idx in range(len(original_val)): + if not self._close_to_zero(original_val[idx], np.float32) and self._close_to_zero( + new_val[idx], np.float16 + ): + new_val[idx] = ( + self.type_min[np.float16] + if np.sign(original_val[idx]) > 0 + else self.type_negmin[np.float16] + ) + value_modified = True + + if value_modified: + if np.isscalar(new_var.val): + new_var._sym_val.val = new_val[0] + else: + new_var._sym_val.val = new_val.reshape(new_var.val.shape) + + def transform_op(self, op): + block = op.enclosing_block + casted_inputs = {} + inputs_modified = False + + for param, inputs in op.inputs.items(): + # First loop, iterates over all the input parameters of an operation. + if not self.is_valid_parameter(op, param): + continue + + is_list_input = isinstance(inputs, (list, tuple)) + if not is_list_input: + inputs = [inputs] + + casted_inputs[param] = list(inputs[:]) + for i, var in enumerate(inputs): + # Second loop, iterates over all the vars of a python list corresponding to an input parameter. + if not var.is_tensor_or_scalar_of(dtype="fp32"): + continue + + inputs_modified = True + casted_var_name = var.name + "_to_fp16" + if ( + len(var._child_ops) > 1 + and casted_var_name in self.cache_vars + and (block.is_var_visible_in_block(self.cache_vars[casted_var_name])) + ): + casted_inputs[param][i] = self.cache_vars[casted_var_name] + else: + x = mb.cast(x=var, dtype="fp16", name=casted_var_name, before_op=op) + self._check_underflow_to_zero(x, var) + + casted_inputs[param][i] = x + if len(var._child_ops) > 1: + self.cache_vars[casted_var_name] = casted_inputs[param][i] + + if not is_list_input: + casted_inputs[param] = casted_inputs[param][0] + + if inputs_modified: + casted_inputs.update({k: v for k, v in op.inputs.items() if k not in casted_inputs}) + casted_inputs["name"] = op.name + "_cast" + casted_inputs["before_op"] = op + quant_output = getattr(mb, op.op_type)(**casted_inputs) + + if not isinstance(quant_output, (list, tuple)): + quant_output = [quant_output] + + for old_output_var, new_output_var in zip(op.outputs, quant_output): + if old_output_var.is_tensor_or_scalar_of(dtype="fp32") and ( + not new_output_var.is_tensor_or_scalar_of(dtype="fp32") + ): + x = mb.cast( + x=new_output_var, + dtype="fp32", + name=new_output_var.name + "_to_fp32", + before_op=op, + ) + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=old_output_var, + new_var=x, + force_replace=True, + ) + else: + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=old_output_var, + new_var=new_output_var, + force_replace=True, + ) + + block.remove_ops([op]) + + +@register_pass(namespace="common") +class add_fp16_cast(FP16ComputePrecision): + """ + For each input of dtype float32, inject a ``cast`` op to change it to float16 dtype. + + For each output of dtype float16, inject a ``cast`` op to change it back to float32. + + This pass is the registered interface for FP16ComputePrecision, which makes it consistent with + other passes' interfaces. + + Support options: + + - ``skip_ops_by_type``: Skip op types specified by comma-separated string; for example, ``"mul,const"``. + """ + + _skip_ops_by_type: Set[Text] = set() + + @property + def skip_ops_by_type(self): + return self._skip_ops_by_type + + @skip_ops_by_type.setter + def skip_ops_by_type(self, criteria: Text): + self._skip_ops_by_type = set(criteria.split(",")) + + +class SparseParams: + def __init__(self, nonzero_data=None, mask=None, shape=None): + self.nonzero_data = nonzero_data + self.mask = mask + self.shape = shape + + +class WeightSparsifier(AbstractQuantizationPass): + """ + This transform does the following, for each const op and if the "op_selector" return True: + - (self.sparsity) fraction of values with the least absolute value are zeroed out. + - If fake_compression=False, Zeroed-Out Value is encoded via constexpr_sparse_to_dense op + - If fake_compression=True, Zeroed-Out Value is encoded via const op + - Old const is replaced by a new operation with zeroed-out value. + """ + + WEIGHT_SPARSIFICATION_MODES = ("THRESHOLD_BASED", "PERCENTILE_BASED") + + def __init__( + self, + mode="threshold_based", + threshold=1e-3, + target_percentile=1.0, + fake_compression=False, + op_selector=None, + ): + super().__init__(op_selector=op_selector) + self.fake_compression = fake_compression + self.mode = mode.upper() + self.threshold = threshold + self.target_percentile = target_percentile + + if not self.mode in WeightSparsifier.WEIGHT_SPARSIFICATION_MODES: + msg = "Only mode {} supported for weight sparsification. Got mode {}.".format( + WeightSparsifier.WEIGHT_SPARSIFICATION_MODES, self.mode + ) + raise ValueError(msg) + + if self.mode == "PERCENTILE_BASED" and ( + self.target_percentile < 0 or self.target_percentile > 1 + ): + raise ValueError( + "Invalid value of target_percentile: {}. Needs to be in [0, 1]".format( + self.target_percentile + ) + ) + + if self.mode == "THRESHOLD_BASED" and self.threshold < 0: + raise ValueError( + "Invalid value of threshold: {}. Needs to be in [0, inf)".format(self.threshold) + ) + + def is_valid_op(self, op): + if op.op_type == "const" and should_use_weight_file(op.val.val): + return True + return False + + @staticmethod + def compress(val, mode, target_percentile=None, threshold=None): + + mode = mode.upper() + + def sparsify_with_percentile(val, target_percentile): + q = target_percentile * 100 + return np.where(np.abs(val) <= np.percentile(np.abs(val), q), 0, val) + + def sparsify_with_thresohld(val, threshold): + return np.where(np.abs(val) <= threshold, 0, val) + + if not isinstance(val, (np.ndarray, np.generic)): + raise ValueError("Only numpy arrays are supported") + + flattened_val = val.flatten() + + if mode == "PERCENTILE_BASED": + flattened_val = sparsify_with_percentile(flattened_val, target_percentile) + elif mode == "THRESHOLD_BASED": + flattened_val = sparsify_with_thresohld(flattened_val, threshold) + + params = SparseParams() + params.nonzero_data = flattened_val[np.where(flattened_val != 0)] + params.mask = np.packbits(np.where(flattened_val != 0, 1, 0), bitorder="little") + params.shape = val.shape + return params + + @staticmethod + def decompress(params): + if not isinstance(params, SparseParams): + raise ValueError("Invalid type of params") + return constexpr_sparse_to_dense.decompress(params.nonzero_data, params.mask, params.shape) + + def transform_op(self, op): + block = op.enclosing_block + sparse_params = self.compress(op.val.val, self.mode, self.target_percentile, self.threshold) + + if not self.fake_compression: + new_var = mb.constexpr_sparse_to_dense( + nonzero_data=sparse_params.nonzero_data, + mask=sparse_params.mask, + shape=np.uint32(sparse_params.shape), + before_op=op, + name=op.name + "_sparsified", + ) + else: + decompressed_val = self.decompress(sparse_params) + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name + "_fake_sparsified", + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + ) + + block.remove_ops([op]) + + +class LutParams: + def __init__(self, lut=None, indices=None, shape=None): + self.lut = lut + self.indices = indices + self.shape = shape + + +class WeightPalettizer(AbstractQuantizationPass): + """ + This transform does the following, for each const op and if the "op_selector" return True: + - A linear look up table with 2**(nbits) entries is created and value is represented via indexing into this look up table. + - If fake_compression=False, compressed value is encoded via constexpr_lut_to_dense op + - If fake_compression=True, compressed value is decompressed and then encoded via const op + - Old const op is replaced by a newly created operation. + """ + + WEIGHT_PALETTIZATION_MODES = ("KMEANS", "UNIFORM", "UNIQUE", "CUSTOM") + + def __init__( + self, nbits, fake_compression=False, op_selector=None, mode="kmeans", lut_function=None + ): + super().__init__(op_selector=op_selector) + self.fake_compression = fake_compression + self.nbits = nbits + self.mode = mode.upper() + self.lut_function = lut_function + + if not self.mode in WeightPalettizer.WEIGHT_PALETTIZATION_MODES: + msg = "Only mode {} supported for weight palettization. Got mode {}.".format( + WeightPalettizer.WEIGHT_PALETTIZATION_MODES, self.mode + ) + raise ValueError(msg) + + if nbits is None and self.mode in ("KMEANS", "UNIFORM"): + msg = "nbits must be provided for mode {}".format(mode) + raise ValueError(msg) + + if nbits is not None and self.mode in ("UNIQUE", "CUSTOM"): + msg = "nbits must NOT be provided for mode {}".format(mode) + raise ValueError(msg) + + if self.nbits is not None and self.nbits not in (1, 2, 4, 6, 8): + raise ValueError( + "Invalid value of nbits ({}) for palettization. Supported bits are {{1, 2, 4, 6, 8}}".format( + nbits + ) + ) + + if (self.mode == "CUSTOM") ^ (lut_function is not None): + msg = "lut_function must be None if mode is not custom, and that it cannot be None when the mode is custom." + raise ValueError(msg) + + if self.mode == "CUSTOM" and not callable(self.lut_function): + msg = "A function object must be provided as lut_function. Got a lut_functions as type {}".format( + type(self.lut_function) + ) + raise ValueError(msg) + + def is_valid_op(self, op): + if op.op_type == "const" and should_use_weight_file(op.val.val): + return True + return False + + @staticmethod + def compress(val, mode, nbits=None, lut_function=None): + + mode = mode.upper() + + def compress_kmeans(val, nbits): + lut, indices = _get_kmeans_lookup_table_and_weight(nbits, val) + lut = lut.astype(val.dtype) + indices = indices.astype(np.uint8) + return lut, indices + + def compress_uniform(val, nbits): + val = val.flatten() + val_min = np.amin(val) + val_max = np.amax(val) + scale = (val_max - val_min) / ((1 << nbits) - 1) + indices = np.round(((val - val_min) / (val_max - val_min)) * ((1 << nbits) - 1)).astype( + np.uint8 + ) + lut = np.array(range(0, 1 << nbits)) * scale + val_min + lut = lut.astype(val.dtype) + return lut, indices + + def get_nbits_for_unique_mode(val): + val = val.flatten() + unique_vals = np.unique(val).tolist() + for nbits in (1, 2, 4, 6, 8): + if len(unique_vals) <= 1 << nbits: + return nbits + msg = "weight value cannot be represented in an 8 bits palettization. Skipped." + logger.warning(msg) + return None + + def compress_unique(val, nbits): + val = val.flatten() + unique_vals = np.unique(val).tolist() + if len(unique_vals) > 1 << nbits: + msg = "Too many unique values {} in the weight. Couldn't represented in {} bits.".format( + len(unique_vals), nbits + ) + raise ValueError(msg) + lut = [0] * (1 << nbits) + lut[: len(unique_vals)] = unique_vals + indices = np.zeros((len(val),)) + for i, k in enumerate(lut[:len(unique_vals)]): + indices += (i + 1) * (val == k).astype(np.int32) + indices = indices - 1 + assert ( + len(np.where(indices == -1)[0]) == 0 + ), "weight must be corresponding to one existing indice" + + lut = np.array(lut).astype(val.dtype) + indices = indices.astype(np.uint8) + return lut, indices + + def pack_indices_into_bytes_array(indices, nbits): + bitarray = np.unpackbits(indices.reshape(-1, 1), bitorder="little", axis=-1)[:, :nbits] + return np.packbits(bitarray.flatten(), bitorder="little") + + def check_lut_parameters_are_valid(val, lut, indices): + if not isinstance(lut, np.ndarray) or not isinstance(indices, np.ndarray): + raise ValueError("LUT and indices must be type of numpy array.") + + if indices.size != val.size: + msg = "Indices size ({}) mismatched with the original weight({}).".format( + indices.size, val.size + ) + raise ValueError(msg) + + if len(indices.shape) != 1 or indices.dtype != np.uint8: + msg = "Indices must be a numpy vector of type uint8. Found shape {} with type {}".format( + indices.shape, indices.dtype + ) + raise ValueError(msg) + + if lut.dtype != val.dtype: + msg = "Dtype mismatched between LUT ({}) and weight ({})".format( + lut.dtype, val.dtype + ) + raise ValueError(msg) + + if not isinstance(val, (np.ndarray, np.generic)): + raise ValueError("Only numpy arrays are supported") + + if mode == "KMEANS": + lut, indices = compress_kmeans(val, nbits) + elif mode == "UNIFORM": + lut, indices = compress_uniform(val, nbits) + elif mode == "UNIQUE": + nbits = get_nbits_for_unique_mode(val) + if nbits is None: + return None + lut, indices = compress_unique(val, nbits) + elif mode == "CUSTOM": + lut, indices = lut_function(val) + + check_lut_parameters_are_valid(val, lut, indices) + + params = LutParams() + params.lut = lut + params.shape = val.shape + params.indices = pack_indices_into_bytes_array(indices, int(np.log2(lut.shape[0]))) + return params + + @staticmethod + def decompress(params): + if not isinstance(params, LutParams): + raise ValueError("Invalid type of params") + return constexpr_lut_to_dense.decompress(params.lut, params.indices, params.shape) + + def transform_op(self, op): + block = op.enclosing_block + lut_params = self.compress(op.val.val, self.mode, self.nbits, self.lut_function) + + if lut_params is None: + return + + if not self.fake_compression: + new_var = mb.constexpr_lut_to_dense( + indices=lut_params.indices, + lut=lut_params.lut, + shape=np.uint32(lut_params.shape), + before_op=op, + name=op.name + "_palettized", + ) + else: + decompressed_val = self.decompress(lut_params) + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name + "_fake_palettized", + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + ) + + block.remove_ops([op]) + + +class AffineQuantParams: + def __init__(self, quantized_data=None, zero_point=None, scale=None, axis=None): + self.quantized_data = quantized_data + self.zero_point = zero_point + self.scale = scale + self.axis = axis + + +class WeightAffineQuantizer(AbstractQuantizationPass): + """ + This transform does the following, for each const op and if the "op_selector" return True: + - Values are linearly quantized into unsigned 8-bits. + - If fake_compression=False, compressed value is encoded via constexpr_affine_dequantize op + - If fake_compression=True, compressed value is decompressed and then encoded via const op + - Old const is replaced by a newly created operation. + """ + + WEIGHT_AFFINE_QUANTIZATION_MODES = ("LINEAR_SYMMETRIC", "LINEAR") + WEIGHT_AFFINE_DTYPES = (types.int8, types.uint8) + + def __init__(self, fake_compression=False, op_selector=None, mode="linear", dtype=np.int8): + super().__init__(op_selector=op_selector) + self.fake_compression = fake_compression + self.mode = mode.upper() + + # check mode + if not self.mode in WeightAffineQuantizer.WEIGHT_AFFINE_QUANTIZATION_MODES: + msg = "Only mode {} supported for weight affine quantization. Got mode {}.".format( + WeightAffineQuantizer.WEIGHT_AFFINE_QUANTIZATION_MODES, self.mode + ) + raise ValueError(msg) + + # check dtype + msg = f"dtype={dtype} is unsupported for affine_quantize_weights." + if is_builtin(dtype): + self.dtype = dtype + else: + try: + self.dtype = numpy_type_to_builtin_type(dtype) + except TypeError: + raise ValueError(msg) + + if self.dtype not in WeightAffineQuantizer.WEIGHT_AFFINE_DTYPES: + raise ValueError(msg) + + def is_valid_op(self, op): + if op.op_type == "const" and should_use_weight_file(op.val.val): + return True + return False + + @staticmethod + def _get_axis(op): + axis = 0 + var = op.outputs[0] + if len(var.child_ops) == 1 and var.child_ops[0].op_type == "conv_transpose": + axis = 1 + return axis + + @staticmethod + def compress(val, axis, mode, dtype): + def _ensure_numerical_range_and_cast(val, low, high, np_dtype): + ''' + For some cases, the computed quantized data might exceed the data range. + For instance, after rounding and addition, we might get `128` for the int8 quantization. + This utility function ensures the val in the data range before doing the cast. + ''' + val = np.minimum(val, high) + val = np.maximum(val, low) + return val.astype(np_dtype) + + mode = mode.upper() + mode_dtype_to_range = { + (types.int8, "LINEAR"): (-128, 127), + (types.int8, "LINEAR_SYMMETRIC"): (-127, 127), + (types.uint8, "LINEAR"): (0, 255), + (types.uint8, "LINEAR_SYMMETRIC"): (0, 254), + } + + if not isinstance(val, (np.ndarray, np.generic)): + raise ValueError("Only numpy arrays are supported") + + params = AffineQuantParams() + axes = tuple([i for i in range(len(val.shape)) if i != axis]) + val_min = np.amin(val, axis=axes, keepdims=True) + val_max = np.amax(val, axis=axes, keepdims=True) + + if mode == "LINEAR_SYMMETRIC": + # For the linear_symmetric mode, the range is symmetrical to 0 + max_abs = np.maximum(np.abs(val_min), np.abs(val_max)) + val_min = -max_abs + val_max = max_abs + else: + assert mode == "LINEAR" + # For the linear mode, we need to make sure the data range contains `0` + val_min = np.minimum(0.0, val_min) + val_max = np.maximum(0.0, val_max) + + q_val_min, q_val_max = mode_dtype_to_range[(dtype, mode)] + + # Set the zero point to symmetric mode + np_dtype = nptype_from_builtin(dtype) + if mode == "LINEAR_SYMMETRIC": + if dtype == types.int8: + params.zero_point = (0 * np.ones(val_min.shape)).astype(np.int8) + else: + assert dtype == types.uint8 + params.zero_point = (127 * np.ones(val_min.shape)).astype(np.uint8) + else: + assert mode == "LINEAR" + params.zero_point = (q_val_min * val_max - q_val_max * val_min) / (val_max - val_min) + params.zero_point = np.round(params.zero_point) + params.zero_point = _ensure_numerical_range_and_cast(params.zero_point, q_val_min, q_val_max, np_dtype) + + # compute the params + params.scale = (val_max - val_min) / (q_val_max - q_val_min) + params.scale = params.scale.astype(val.dtype).squeeze() + + params.quantized_data = np.round( + val * (q_val_max - q_val_min) / (val_max - val_min) + ) + params.quantized_data = (params.quantized_data + params.zero_point) + params.quantized_data = _ensure_numerical_range_and_cast(params.quantized_data, q_val_min, q_val_max, np_dtype) + + params.zero_point = params.zero_point.squeeze() + params.axis = axis + + return params + + @staticmethod + def decompress(params): + if not isinstance(params, AffineQuantParams): + raise ValueError("Invalid type of params") + return constexpr_affine_dequantize.decompress( + params.quantized_data, params.zero_point, params.scale, params.axis + ) + + def transform_op(self, op): + block = op.enclosing_block + quant_params = self.compress(op.val.val, self._get_axis(op), self.mode, self.dtype) + + if not self.fake_compression: + new_var = mb.constexpr_affine_dequantize( + quantized_data=quant_params.quantized_data, + zero_point=quant_params.zero_point, + scale=quant_params.scale, + axis=quant_params.axis, + before_op=op, + name=op.name + "_affine_quantized", + ) + else: + decompressed_val = self.decompress(quant_params) + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name + "_fake_affine_quantized", + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + ) + + block.remove_ops([op]) + + +class WeightDecompressor(AbstractQuantizationPass): + """ + This graph pass transforms the constexpr ops back into mb.const op. + constexpr ops includes: + (1) constexpr_affine_dequantize + (2) constexpr_lut_to_dense + (3) constexpr_sparse_to_dense + """ + + def __init__(self, op_selector): + super().__init__(op_selector=op_selector) + + def is_valid_op(self, op): + return op.op_type in ( + "constexpr_affine_dequantize", + "constexpr_lut_to_dense", + "constexpr_sparse_to_dense", + ) + + def transform_op(self, op): + block = op.enclosing_block + + decompressed_val = op.value_inference() + new_var = mb.const( + val=decompressed_val, + before_op=op, + name=op.name, + ) + + op.enclosing_block.replace_uses_of_var_after_op( + anchor_op=op, + old_var=op.outputs[0], + new_var=new_var, + no_check_var_types=True, + force_replace=True, + ) + + block.remove_ops([op]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/graph_pass.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/graph_pass.py new file mode 100644 index 00000000..14869c50 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/graph_pass.py @@ -0,0 +1,73 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from abc import ABC, abstractmethod +from typing import Callable, List, Optional, Text, Union + +from coremltools.converters.mil import Operation, Program + + +class PassOption: + """ + Option that will be applied in a graph pass. + + Each graph pass need to have their own implementation to support the corresponding option. + Available options are documented in each pass's docstring. + """ + + # The Callable option_val is for op_selector backward compatibility only. + def __init__(self, option_name: Text, option_val: Union[Text, Callable[[Operation], bool]]): + if not isinstance(option_name, Text): + raise ValueError(f"The option name should be text, but got {type(option_name)}") + if not isinstance(option_val, Text) and not isinstance(option_val, Callable): + raise ValueError( + f"The option value should be text or callable, but got {type(option_val)}" + ) + self._option_name = option_name + self._option_val = option_val + + def __str__(self): + return f"{self.option_name}: {self.option_val}" + + @property + def option_name(self): + return self._option_name + + @property + def option_val(self): + return self._option_val + + +class AbstractGraphPass(ABC): + """ + Base class for a graph pass. + + Each graph pass should be a subclass of this and implement the `apply` method. + Each graph pass can also implement their own supported options. + See examples of `skip_ops_by_type` in `add_fp16_cast` and `skip_const_by_size` in + `const_elimination` about how to support new options in each pass. + """ + + def __call__(self, prog: Program): + if not prog.skip_all_passes: + self.apply(prog) + + def __str__(self): + return type(self).__name__ + + @abstractmethod + def apply(self, prog: Program): + pass + + def set_options(self, pass_options: Optional[List[PassOption]] = None): + """Set pass options.""" + if pass_options is not None: + for pass_option in pass_options: + option_name = pass_option.option_name + if not hasattr(self, option_name): + raise NotImplementedError( + f"The graph pass `{self}` doesn't support option `{option_name}`." + ) + setattr(self, option_name, pass_option.option_val) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/helper.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/helper.py new file mode 100644 index 00000000..d9dddca8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/helper.py @@ -0,0 +1,188 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import List + +import numpy as np + +from coremltools.converters.mil.mil import Block, Operation, Var +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass + + +def block_context_manager(func): + """ + This decorator executes a function under the context manager `with block`. + For instance, given a function `func` with an input block and other arguments: + + def func(block, *args): + ... + with block: + op_1 = mb.add(...) + ... + with block: + op_2 = mb.relu...() + + It can be be streamlined as: + + @block_context_manager + def func(block, *args): + ... + op_1 = mb.add(...) + ... + op_2 = mb.relu...() + + Note that, the first argument of the function must have type Block. + It is highly recommended to decorate a function with block_context_manager if it is calling `with block` multiple times, + since when the code exit `block`, an expensive _propagate_nonreplaceable_vars() is invoked. + The decorator reduces the amount of calling `with block` overally. + """ + def wrapper(*args): + # Make it compatible with class method. + if isinstance(args[0], AbstractGraphPass): + block = args[1] + else: + block = args[0] + + if not isinstance(block, Block): + raise ValueError( + "The function decorated with block_context_manager must have a Block " + "type argument as the first input." + ) + with block: + return func(*args) + return wrapper + + +def _check_child_op_type(op, child_op_type): + """ + :param op: operation + :param child_op_type: str + :return: Return True if op has 1 child and type of that child matches child_op_type + """ + if len(op.outputs) != 1: + return False + child_ops = list(op.outputs[0].child_ops) + if len(child_ops) != 1: + return False + if child_ops[0].op_type == child_op_type: + return True + return False + + +def _check_no_output_connection(block: Block, ops: List[Operation]) -> bool: + """ + Check that none of the op in this pattern is connected to the output + (except the last op) + + :param block: Block + :param ops: List of operations to check on. + """ + for op in ops[:-1]: + for out in op.outputs: + if out in block.outputs: + return False + return True + + +def _check_var_scalar_value_in_interval(x, lower_bound, upper_bound): + """ + :param x: var + :param lower_bound: a scalar value + :param upper_bound: a scalar value + :return: True if the value of var is in the interval [lower_bound, upper_bound] + """ + if x.val is None: + return False + if not isinstance(x.val, (np.ndarray, np.generic)): + return False + + if isinstance(x.val, np.ndarray): + if x.val.size != 1: + return False + x_val = x.val[:][0] if len(x.val.shape) > 0 else x.val[()] + else: + x_val = x.val + + if x_val >= lower_bound and x_val <= upper_bound: + return True + return False + + +def _check_var_scalar_value(x, val, tol=1e-3): + """ + :param x: var + :param val: a scalar value + :return: True if x.val is equal to val otherwise return False + """ + if x.val is None: + return False + if not isinstance(x.val, np.ndarray) and not np.isscalar(x.val): + return False + + if isinstance(x.val, np.ndarray): + if x.val.size != 1: + return False + if len(x.val.shape) == 0: + x_val = x.val + else: + x_val = x.val[:][0] if len(x.val.shape) > 0 else x.val[()] + else: + x_val = x.val + + if abs(x_val - val) < tol: + return True + return False + +def _are_ops_identical(op1, op2): + ''' + Return True, if all inputs of op1 and op2 are identical. + non-constant inputs must refer to the same object, and constant inputs must have the same value + ''' + + def _are_values_identical(val1, val2): + np_arr1 = np.array(val1) + np_arr2 = np.array(val2) + return np.array_equal(np_arr1, np_arr2) + + def _are_vars_identical(var1, var2): + if var1.val is None and var2.val is None: + if var1 != var2: + return False + elif var1.val is not None and var2.val is not None: + if var1.dtype != var2.dtype: + return False + if not _are_values_identical(var1.val, var2.val): + return False + else: + return False + return True + + if op1 == op2: + return True + if op1.op_type != op2.op_type: + return False + if len(op1.inputs) != len(op2.inputs): + return False + + for key, value1 in op1.inputs.items(): + if key not in op2.inputs: + return False + value2 = op2.inputs[key] + if isinstance(value1, Var) and isinstance(value2, Var): + if not _are_vars_identical(value1, value2): + return False + elif isinstance(value1, (list, tuple)) and isinstance(value2, (list, tuple)): + if len(value1) != len(value2): + return False + else: + for i, v in enumerate(value1): + if not _are_vars_identical(v, value2[i]): + return False + else: + return False + + assert len(op1.blocks) == 0, "this method does not handle ops that have blocks in it" + assert len(op2.blocks) == 0, "this method does not handle ops that have blocks in it" + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_pipeline.py new file mode 100644 index 00000000..359876e9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_pipeline.py @@ -0,0 +1,380 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from __future__ import annotations + +from typing import Dict, List, Optional, Set, Text, Union + +from tqdm import tqdm + +from coremltools import _logger as logger +from coremltools.converters._profile_utils import _profile +from coremltools.converters.mil import Program +from coremltools.converters.mil.mil.passes.graph_pass import PassOption +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY + +_COMMON_PASSES: List[Text] = [ + "common::lower_complex_dialect_ops", + "common::update_output_dtypes", + "common::cast_optimization", + "common::const_elimination", + "common::sanitize_input_output_names", + "common::divide_to_multiply", + "common::add_conv_transpose_output_shape", + "common::const_elimination", + "common::loop_invariant_elimination", + "common::remove_symbolic_reshape", + "common::noop_elimination", + "common::fuse_matmul_weight_bias", + "common::fuse_linear_bias", + "common::fuse_gelu_tanh_approximation", + "common::fuse_gelu_exact", + "common::fuse_leaky_relu", + "common::rank0_expand_dims_swap", + "common::compose_conv1d", # compose conv1d before any other conv passes + "common::use_reflection_padding", + "common::merge_consecutive_paddings", + # Should come after use_reflection_padding, which will introduce new padding layers + "common::fuse_pad_conv", # Should come after merge_consecutive_paddings + "common::image_input_preprocess", + "common::replace_stack_reshape", + # should come before detect_concat_interleave since it may add concat + "common::reduce_transposes", + "common::fuse_conv_scale", + "common::fuse_conv_bias", + "common::fuse_onehot_matmul_to_gather", + "common::fuse_layernorm_or_instancenorm", + # should come after reduce_transposes, to detect instance_norm + "common::fuse_elementwise_to_batchnorm", # should come after fuse_layernorm_or_instancenorm + "common::fuse_reduce_mean", # should come after fuse_layernorm_or_instancenorm + "common::fuse_conv_batchnorm", # should come after fuse_elementwise_to_batchnorm + "common::fuse_conv_scale", + # Re-run the fuse conv scale pass after the conv and batch_norm are fused + "common::fuse_conv_bias", + # Re-run the fuse conv bias pass after the conv and batch_norm are fused + "common::fuse_conv_batchnorm", + # In some cases, we need to run conv / batch_norm fusion again after the fuse_conv_scale and fuse_conv_bias passes + "common::detect_concat_interleave", + "common::concat_to_pixel_shuffle", + # should come after detect_concat_interleave and after replace_stack_reshape + "common::fuse_prelu", + # reduce_transpose pass should run before and after this pass (the one after will be run during the cleanup passes stage) + "common::prelu_to_lrelu", + "common::merge_consecutive_relus", + "common::merge_consecutive_reshapes", + "common::merge_consecutive_transposes", + # "expand_high_rank_reshape_and_transpose" must come after "common::merge_consecutive_transposes" + "common::expand_high_rank_reshape_and_transpose", + "common::reduce_transposes", + # "remove_redundant_ops" pass should be applied towards the end, once other graph passes have done their optimizations. + # For instance, it should come after passes such as "reduce_transpose" that can introduce redundant transposes + # in the network (while reducing the total number of transposes), and after passes such as "fuse_layernorm_or_instancenorm" + # which detects patterns that involve redundant ops ("sub") etc. + "common::remove_redundant_ops", + "common::add_fp16_cast", # Will be removed if compute precision is not FP16. + "common::dead_code_elimination", # always end with dce +] + +_CLEANUP_PASSES: List[Text] = [ + "common::dead_code_elimination", + "common::const_elimination", + "common::cast_optimization", + "common::const_elimination", + "common::loop_invariant_elimination", + "common::noop_elimination", + "common::dedup_op_and_var_names", + "common::reduce_transposes", # fuse_layernorm_or_instancenorm can potentially add transposes + "common::remove_redundant_ops", + "common::topological_reorder", + "common::dead_code_elimination", # always end with dce +] + +_FRONTEND_TORCH_PASSES = [ + "common::dead_code_elimination", + "common::loop_invariant_elimination", + "common::dead_code_elimination", + "torch::torch_upsample_to_core_upsample", + "torch::torch_tensor_assign_to_core", +] + +_FRONTEND_TF1_PASSES = [ + "common::dead_code_elimination", + "common::loop_invariant_elimination", + "tensorflow::backfill_make_list_elem_type", + # DCE to reduce tf_lstm_block outputs and allow lstm_rewrite to + # ssa lstm + "common::dead_code_elimination", + # tensorflow::tf_lstm_to_core_lstm must come before + # tensorflow::expand_tf_lstm + "tensorflow::tf_lstm_to_core_lstm", + "tensorflow::expand_tf_lstm", +] + +_FRONTEND_TF2_PASSES = [ + "common::dead_code_elimination", + "common::loop_invariant_elimination", + # tensorflow2::remove_vacuous_cond should come before + # tensorflow::backfill_make_list_elem_type. + "tensorflow2::remove_vacuous_cond", + "tensorflow::backfill_make_list_elem_type", + # DCE to reduce tf_lstm_block outputs and allow lstm_rewrite to + # ssa lstm + "common::dead_code_elimination", + # tensorflow::tf_lstm_to_core_lstm must come before + # tensorflow::expand_tf_lstm + "tensorflow::tf_lstm_to_core_lstm", + "tensorflow::expand_tf_lstm", +] + +_BACKEND_MIL_PASSES = [ + "common::const_elimination", + "mil_backend::adjust_io_to_supported_types", + "mil_backend::insert_image_preprocessing_ops", + "mil_backend::fuse_activation_silu", + "common::const_elimination", # rank0_expand_dims_swap might introduce some new const tensor + "common::cast_optimization", + "common::dead_code_elimination", + "mil_backend::sanitize_name_strings", + "common::dedup_op_and_var_names", + "nn_backend::handle_unused_inputs", # must come after dce. +] + +_BACKEND_NN_PASSES = [ + "nn_backend::decompose_conv1d", # at the beginning of nn pass + "nn_backend::commingle_loop_vars", + "nn_backend::handle_return_inputs_as_outputs", + "common::const_elimination", + # "remove_redundant_ops" pass should be applied towards the end, once other graph passes have done their optimizations. + # For instance, it should come after passes such as "reduce_transpose" that can introduce redundant transposes + # in the network (while reducing the total number of transposes), and after passes such as "fuse_layernorm_or_instancenorm" + # which detects patterns that involve redundant ops ("sub") etc. + "common::remove_redundant_ops", + "common::dead_code_elimination", + "nn_backend::handle_unused_inputs", # must come after dce. + "nn_backend::alert_return_type_cast", # must be at the end. +] + + +class PassPipeline: + """ + A pipeline that contains graph passes. + + Create a default pipeline (with all default graph passes that will operate on the program): + + .. sourcecode:: python + + pipeline = PassPipeline() + + Create an empty pipeline (this will result in no graph passes being applied to the model): + + .. sourcecode:: python + + pipeline = PassPipeline.get_empty_pipeline() + + Add passes to pipeline: + + .. sourcecode:: python + + pipeline=ct.PassPipeline() + pipeline.append_pass("common::reduce_transposes") + pipeline.insert_pass(index=0, pass_name="common::reduce_transposes") + # Can also specify all passes by setting the passes of the pipeline. + pipeline.passes = ["common::reduce_transposes", "common::add_fp16_cast"] + + Remove passes: + + .. sourcecode:: python + + # Remove a pass at a specific index. + pipeline.remove_pass(index=10) + # Remove passes by names. + pipeline.remove_passes({"common::add_fp16_cast", "common::reduce_transposes"}) + + Inspect passes in the pipeline: + + .. sourcecode:: python + + # Get all passes. + pass_names = pipeline.passes + # Find indexes of a specific pass. + pass_indexes = [idx for idx, pass_name in enumerate(pass_names) if pass_names[idx] == "common::reduce_transposes"] + + Set options for a specific pass: + + .. sourcecode:: python + + pipeline=ct.PassPipeline() + pipeline.set_options(pass_name="common::const_elimination", options={"skip_const_by_size": + "100000"}, override=False) + """ + + _PIPELINE_NAME_TO_PASSES = { + "default": _COMMON_PASSES + _CLEANUP_PASSES, + "empty": [], + # Frontend pipelines. + "frontend_milinternal": [], + "frontend_pytorch": _FRONTEND_TORCH_PASSES, + "frontend_tensorflow": _FRONTEND_TF1_PASSES, + "frontend_tensorflow2": _FRONTEND_TF2_PASSES, + # Backend pipelines. + "backend_mlprogram": _BACKEND_MIL_PASSES, + "backend_neuralnetwork": _BACKEND_NN_PASSES, + "backend_milinternal": [], + } + + def __init__(self, pass_names=None, pipeline_name="default"): + if pass_names is None: + pass_names = _COMMON_PASSES + _CLEANUP_PASSES + self._pass_names: List[Text] = pass_names + self._pass_options: Dict[Text, List[PassOption]] = dict() + self._pipeline_name = pipeline_name + + def __str__(self): + return self._pipeline_name + + @property + def passes(self): + return self._pass_names + + @passes.setter + def passes(self, passes: List[Text]): + for pass_name in passes: + if pass_name not in PASS_REGISTRY: + raise ValueError(f"The pass {pass_name} is not registered.") + self._pass_names = list(passes) + + @property + def pipeline_name(self): + return self._pipeline_name + + @pipeline_name.setter + def pipeline_name(self, pipeline_name: Text): + self._pipeline_name = pipeline_name + + def append_pass(self, pass_name: Text): + """Append a pass at the end of the current passes in the pipeline.""" + if pass_name not in PASS_REGISTRY: + raise ValueError(f"The pass {pass_name} is not registered.") + self._pass_names.append(pass_name) + + def insert_pass(self, index: int, pass_name: Text) -> None: + """Adds a pass at a specific index""" + if pass_name not in PASS_REGISTRY: + raise ValueError(f"The pass {pass_name} is not registered.") + self._pass_names.insert(index, pass_name) + + def remove_pass(self, index: int) -> None: + """Removes a pass at a specific index.""" + del self._pass_names[index] + + def remove_passes(self, passes_names: Union[Set[Text], List[Text]]) -> None: + """Removes all passes with specific name.""" + self._pass_names = [ + pass_name for pass_name in self._pass_names if pass_name not in passes_names + ] + + def get_options(self, pass_name: Text) -> Optional[List[PassOption]]: + """ + Gets options of a pass that has been set by the user. Return None if the pass doesn't have + any associated option set by the user. + """ + return self._pass_options.get(pass_name, None) + + def get_all_options(self) -> Dict[Text, List[PassOption]]: + """Gets all options in the pipeline.""" + return self._pass_options + + def set_options(self, pass_name: Text, options: Dict[Text, Text], override: bool = False): + """Sets options for a specific pass.""" + if self._pass_options.get(pass_name, None) and not override: + raise ValueError(f"The pass {pass_name} already has associated options.") + pass_options: List[PassOption] = [] + for option_name, option_val in options.items(): + if not (isinstance(option_name, str) and isinstance(option_val, str)): + raise ValueError( + f"The options must be specified by Dict[Text, Text], but got " + f"Dict[{type(option_name)}, {type(option_val)}]" + ) + pass_option = PassOption(option_name=option_name, option_val=option_val) + pass_options.append(pass_option) + self._pass_options[pass_name] = pass_options + + def set_options_by_another_pipeline(self, other_pipeline: PassPipeline): + """ + Convenience method for setting options from another pipeline's options. + For each option in other_pipeline, set it if it's also applicable to this pipeline. + """ + for pass_name, options in other_pipeline.get_all_options().items(): + if pass_name in self.passes: + self._pass_options[pass_name] = options + + def validate(self): + """Validates the pipeline (including options).""" + pass_names_set = set(self._pass_names) + for pass_name in self._pass_options.keys(): + if pass_name not in pass_names_set: + raise ValueError( + f"This pass pipeline is not valid. The pass {pass_name} has " + f"associated options but it's not in the passes. Passes in this " + f"pipeline: {self._pass_names}" + ) + + @staticmethod + def get_empty_pipeline() -> PassPipeline: + """Creates an empty pipeline without any pass.""" + return PassPipeline(pass_names=[]) + + @staticmethod + def get_pipeline(pipeline_name: Text) -> PassPipeline: + """ + Gets a pipeline based on the name. Raises an error if no pipeline is found. + Available Pipelines: + - "default": _COMMON_PASSES + _CLEANUP_PASSES + - "empty": empty + - "frontend_pytorch": _FRONTEND_TORCH_PASSES + - "frontend_tensorflow": _FRONTEND_TF1_PASSES + - "frontend_tensorflow2": _FRONTEND_TF2_PASSES + - "frontend_milinternal": empty + - "backend_mlprogram": _BACKEND_MIL_PASSES + - "backend_neuralnetwork": _BACKEND_NN_PASSES + - "backend_milinternal": empty + """ + if pipeline_name not in PassPipeline._PIPELINE_NAME_TO_PASSES: + raise ValueError( + f"There is no pipeline for `{pipeline_name}`. " + f"Available pipelines: {PassPipeline._PIPELINE_NAME_TO_PASSES.keys()}" + ) + return PassPipeline(PassPipeline._PIPELINE_NAME_TO_PASSES[pipeline_name], pipeline_name) + + +class PipelineManager: + @staticmethod + @_profile + def apply_pipeline(prog: Program, pass_pipeline: PassPipeline): + """Apply a pass pipeline to a program, which modifies the program in-place.""" + if pass_pipeline is None: + raise ValueError("The pass_pipeline cannot be None.") + + pass_pipeline.validate() + prog.validate() + + logger.debug(f"Program before {pass_pipeline} pipeline:\n{prog}") + for pass_name in tqdm( + pass_pipeline.passes, + desc=f"Running MIL {pass_pipeline} pipeline", + unit=" passes", + ): + logger.info(f'Performing pass: "{pass_name}"') + pass_options = pass_pipeline.get_options(pass_name) + if pass_options is not None: + logger.debug( + f"The graph pass options for {pass_name} is set to {pass_options}. " + f"It will change the pass behavior. Make sure the option is intended." + ) + graph_pass = PASS_REGISTRY[pass_name] + graph_pass.set_options(pass_options) + graph_pass(prog) + prog.validate() + logger.debug(f"Program after {pass_pipeline} pipeline:\n{prog}") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_registry.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_registry.py new file mode 100644 index 00000000..c562bcca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/pass_registry.py @@ -0,0 +1,65 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import inspect +from typing import Dict, Optional, Text, Type + +from coremltools import _logger as logger +from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass + + +class PassRegistry: + def __init__(self): + """ + Store the pass class instead of instance to avoid the same instance got modified by several + callers. + """ + self.passes: Dict[Text, Type[AbstractGraphPass]] = {} + + def __getitem__(self, pass_id: Text) -> AbstractGraphPass: + """ + pass_id: namespace::func_name (e.g., 'common::const_elimination') + """ + if pass_id not in self.passes: + raise KeyError(f"Pass {pass_id} not found") + current_pass = self.passes[pass_id] + # The current_pass could be a PassContainer instance if registered by register_generic_pass. + return current_pass() if inspect.isclass(current_pass) else current_pass + + def __contains__(self, pass_id: Text) -> bool: + return pass_id in self.passes + + def add( + self, + namespace: Text, + pass_cls: Type[AbstractGraphPass], + override: bool, + name: Optional[Text], + ): + cls_name = pass_cls.__name__ if name is None else name + pass_id = namespace + "::" + cls_name + logger.debug(f"Registering pass {pass_id}") + if pass_id in self.passes and not override: + raise KeyError(f"Pass {pass_id} already registered.") + self.passes[pass_id] = pass_cls + + +PASS_REGISTRY = PassRegistry() + + +def register_pass(namespace: Text, override: bool = False, name: Optional[Text] = None): + """ + namespaces like {'common', 'nn_backend', , } + + Params: + override: indicate the graph pass can override an existing pass with the same name. + name: name of the graph pass. Default to class name if not provided + """ + + def class_wrapper(pass_cls: Type[AbstractGraphPass]): + PASS_REGISTRY.add(namespace, pass_cls, override, name) + return pass_cls + + return class_wrapper diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/__init__.py new file mode 100644 index 00000000..25c7d28c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py new file mode 100644 index 00000000..a7a03fd1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_lower_complex_dialect_ops.py @@ -0,0 +1,56 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + get_op_types_in_program, +) + +np.random.seed(9) + + +class TestLowerComplexDialectOps: + def test_lower_complex_real(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + complex_data = mb.complex(real_data=x, imag_data=x) + real_data = mb.complex_real(data=complex_data) + return real_data + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::lower_complex_dialect_ops") + assert get_op_types_in_program(prev_prog) == ["complex", "complex_real"] + assert get_op_types_in_program(prog) == ["identity"] + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + def test_lower_fft(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + fft_res = mb.complex_fft(data=x) + real_data = mb.complex_real(data=fft_res) + return real_data + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::lower_complex_dialect_ops") + assert get_op_types_in_program(prev_prog) == ["complex_fft", "complex_real"] + after_pass_op_types_set = set(get_op_types_in_program(prog)) + # Verifies that the complex dialect ops got lowered to core ops. + assert "complex_fft" not in after_pass_op_types_set + assert "complex_real" not in after_pass_op_types_set + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py new file mode 100644 index 00000000..553f6072 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_pass_pipeline.py @@ -0,0 +1,113 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.pass_pipeline import PassPipeline, PipelineManager +from coremltools.converters.mil.testing_utils import assert_model_is_valid, get_op_types_in_program + +np.random.seed(1984) + + +class TestPassPipeline: + def test_add_pass(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3))]) + def prog(x): + x = mb.relu(x=x) + x = mb.relu(x=x) + x = mb.add(x=x, y=1.0) + return x + + assert get_op_types_in_program(prog) == ["relu", "relu", "add"] + pipeline = PassPipeline.get_empty_pipeline() + pipeline.append_pass("common::merge_consecutive_relus") + assert pipeline.passes == ["common::merge_consecutive_relus"] + PipelineManager.apply_pipeline(prog, pipeline) + assert get_op_types_in_program(prog) == ["relu", "add"] + + inputs = {"x": (2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={prog.functions["main"].outputs[0].name: (2, 3)}, + ) + + def test_insert_pass_at_index(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.insert_pass(index=0, pass_name="common::merge_consecutive_relus") + pipeline.insert_pass(index=0, pass_name="common::noop_elimination") + pipeline.insert_pass(index=1, pass_name="common::noop_elimination") + pipeline.insert_pass(index=1, pass_name="common::merge_consecutive_reshapes") + assert pipeline.passes == [ + "common::noop_elimination", + "common::merge_consecutive_reshapes", + "common::noop_elimination", + "common::merge_consecutive_relus", + ] + + def test_insert_invalid_pass(self): + pipeline = PassPipeline.get_empty_pipeline() + with pytest.raises(ValueError, match="The pass test_pass is not registered."): + pipeline.append_pass("test_pass") + with pytest.raises(ValueError, match="The pass test_pass is not registered."): + pipeline.insert_pass(0, "test_pass") + with pytest.raises(ValueError, match="The pass invalid_pass is not registered."): + pipeline.passes = ["invalid_pass"] + + def test_remove_passes(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.passes = [ + "common::noop_elimination", + "common::merge_consecutive_reshapes", + "common::noop_elimination", + "common::merge_consecutive_relus", + ] + pipeline.remove_passes(passes_names=["common::noop_elimination"]) + assert pipeline.passes == [ + "common::merge_consecutive_reshapes", + "common::merge_consecutive_relus", + ] + pipeline.remove_pass(index=1) + assert pipeline.passes == ["common::merge_consecutive_reshapes"] + + def test_set_pass_options(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.append_pass("common::add_fp16_cast") + assert pipeline.get_options("common::add_fp16_cast") is None + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "matmul,const"}) + assert len(pipeline.get_options("common::add_fp16_cast")) == 1 + assert pipeline.get_options("common::add_fp16_cast")[0].option_name == "skip_ops_by_type" + assert pipeline.get_options("common::add_fp16_cast")[0].option_val == "matmul,const" + + def test_set_pass_options_already_exist(self): + pipeline = PassPipeline() + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "matmul,const"}) + with pytest.raises( + ValueError, match="The pass common::add_fp16_cast already has associated options." + ): + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "concat"}) + # Override the options. + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "concat"}, override=True) + assert pipeline.get_options("common::add_fp16_cast")[0].option_name == "skip_ops_by_type" + assert pipeline.get_options("common::add_fp16_cast")[0].option_val == "concat" + + def test_set_pass_options_for_pass_not_in_pipeline(self): + pipeline = PassPipeline.get_empty_pipeline() + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "matmul,const"}) + with pytest.raises( + ValueError, + match="This pass pipeline is not valid. The pass common::add_fp16_cast " + "has associated options but it's not in the passes.", + ): + pipeline.validate() + + def test_get_invalid_pipeline(self): + with pytest.raises( + ValueError, + match="There is no pipeline for `invalid`.", + ): + PassPipeline.get_pipeline("invalid") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_passes.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_passes.py new file mode 100644 index 00000000..aa7b1671 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_passes.py @@ -0,0 +1,7475 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import itertools +import unittest + +import numpy as np +import pytest +from mock import patch + +import coremltools as ct +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import ( + register_generic_pass, +) +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, Symbol, get_new_symbol, types +from coremltools.converters.mil.mil.passes.defs import quantization +from coremltools.converters.mil.mil.passes.defs.cleanup import topological_reorder +from coremltools.converters.mil.mil.passes.helper import _check_var_scalar_value +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.mil.types import numpy_type_to_builtin_type +from coremltools.converters.mil.testing_reqs import backends +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + assert_op_count_match, + assert_same_output_names, + get_op_names_in_program, + get_op_types_in_program, +) + +np.random.seed(1984) +_VALIDATE_MODEL = True + + +class TestConstElimination: + def test_const_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + a = np.random.rand(2, 4).astype(np.float32) + double_a = mb.add(x=a, y=a) + return mb.add(x=x, y=double_a) + + assert_op_count_match(prog, expect=2, op="const") + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::const_elimination"](prog) + assert_same_output_names(prev_prog, prog) + assert_op_count_match(prog, expect=3, op="const") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (2, 4)}) + + def test_const_elimination_nonreplaceable(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + a = np.random.rand(2, 4).astype(np.float16) + constexpr_a = mb.constexpr_cast(source_val=a, output_dtype="fp32") + double_a = mb.add(x=constexpr_a, y=a.astype(np.float32)) + return mb.add(x=x, y=double_a) + + prev_prog, _, _ = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prev_prog) == ["constexpr_cast", "add", "add"] + # Not fold into const because the upstream constexpr_cast op is non-replaceable. + assert get_op_types_in_program(prog) == ["constexpr_cast", "add", "add"] + + @patch( + "coremltools.converters.mil.mil.passes.defs.cleanup.const_elimination._skip_const_by_size", + 1000, + ) + def test_const_elimination_larger_than_threshold(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3))]) + def prog(x): + # Construct a 10 x 10 matrix (100 elements) which is smaller than the threshold (1000). + tmp = mb.range_1d(start=0, end=10, step=1) + tmp_x = mb.reshape(x=tmp, shape=[-1, 1]) + tmp_y = mb.reshape(x=tmp, shape=[1, -1]) + return mb.matmul(x=tmp_x, y=tmp_y) + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3))]) + def prog_large_const_size(x): + # Construct a 100 x 100 matrix (10000 elements) which is larger than the threshold (1000). + tmp = mb.range_1d(start=0, end=100, step=1) + tmp_x = mb.reshape(x=tmp, shape=[-1, 1]) + tmp_y = mb.reshape(x=tmp, shape=[1, -1]) + return mb.matmul(x=tmp_x, y=tmp_y) + + prev_prog, _, _ = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prev_prog) == [ + "range_1d", + "reshape", + "reshape", + "matmul", + ] + # All ops (range_1d, reshape, matmul) constructing that 10x10 matrix is folded into a const. + assert get_op_types_in_program(prog) == [] + + prev_prog_large_const_size, _, _ = apply_pass_and_basic_check( + prog_large_const_size, "common::const_elimination" + ) + assert get_op_types_in_program(prev_prog_large_const_size) == [ + "range_1d", + "reshape", + "reshape", + "matmul", + ] + # The matmul op constructing the large matrix is kept due to size larger than threshold. + assert get_op_types_in_program(prog_large_const_size) == ["matmul"] + + +class TestDeadCodeElimination: + def test_dead_code_elimination(self): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(2, 4)), + mb.TensorSpec(shape=(2, 4)), + ] + ) + def program0(x, y): + # following three unused op should be eliminated + a = mb.const(val=np.zeros(shape=(1,))) + b = mb.const(val=np.zeros(shape=(1,))) + _ = mb.add(x=a, y=b) + return mb.add(x=x, y=y) + + assert_op_count_match(program0, expect=4) + prev_prog = copy.deepcopy(program0) + PASS_REGISTRY["common::dead_code_elimination"](program0) + assert_same_output_names(prev_prog, program0) + assert_op_count_match(program0, expect=1) + + if _VALIDATE_MODEL: + assert_model_is_valid(program0, {"x": (2, 4), "y": (2, 4)}) + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def program1(x): + weights_val = np.random.rand(4, 2).T.astype(np.float32) + weights = mb.const(val=weights_val) + bias_val = np.random.rand(2).astype(np.float32) + bias = mb.const(val=bias_val) + + # unused op and its inputs should be eliminated + weights_for_matmul = mb.transpose(x=weights, perm=[1, 0]) + mb.matmul(x=x, y=weights_for_matmul) + + return mb.linear(x=x, weight=weights, bias=bias) + + assert_op_count_match(program1, expect=8) + prev_prog = copy.deepcopy(program1) + PASS_REGISTRY["common::dead_code_elimination"](program1) + assert_same_output_names(prev_prog, program1) + assert_op_count_match(program1, expect=3) + + if _VALIDATE_MODEL: + assert_model_is_valid(program1, {"x": (2, 4)}) + + +class TestDedupOpAndVarNames(unittest.TestCase): + def test_unchanged(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + x = mb.reshape(x=x, shape=(1, 8), name="reshape") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + + self.assertEqual(get_op_types_in_program(prev_prog), ["reshape"]) + self.assertEqual(get_op_names_in_program(prev_prog), ["reshape"]) + + self.assertEqual(get_op_types_in_program(prog), ["reshape"]) + self.assertEqual(get_op_names_in_program(prog), ["reshape"]) + + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 8)}, + ) + + def test_op_name_duplicated_once(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="fp32", name="castop") + x = mb.square(x=x, name="square_last") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + + self.assertEqual(get_op_types_in_program(prev_prog), ["cast", "cast", "square"]) + self.assertEqual(get_op_names_in_program(prev_prog), ["castop", "castop", "square_last"]) + + self.assertEqual(get_op_types_in_program(prog), ["cast", "cast", "square"]) + self.assertEqual(get_op_names_in_program(prog), ["castop", "castop_1", "square_last"]) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + def test_op_name_duplicated_many(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="fp16", name="castop") + x = mb.cast(x=x, dtype="int32", name="castop_2") + x = mb.cast(x=x, dtype="int64", name="castop") + x = mb.cast(x=x, dtype="fp32", name="castop_2") + x = mb.square(x=x, name="square") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + + self.assertEqual( + get_op_types_in_program(prev_prog), ["cast", "cast", "cast", "cast", "cast", "square"] + ) + self.assertEqual( + get_op_names_in_program(prev_prog), + ["castop", "castop", "castop_2", "castop", "castop_2", "square"], + ) + + self.assertEqual( + get_op_types_in_program(prog), ["cast", "cast", "cast", "cast", "cast", "square"] + ) + self.assertEqual( + get_op_names_in_program(prog), + ["castop", "castop_1", "castop_2", "castop_3", "castop_2_1", "square"], + ) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + def test_input_name_shadow(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + # op name "x" results in output var name "x", which shadows prog + # input var name "x" + x = mb.transpose(x=x, perm=[1, 0], name="x") + x = mb.relu(x=x, name="relu") + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "relu"]) + self.assertEqual(get_op_names_in_program(prev_prog), ["x", "relu"]) + + self.assertEqual(get_op_types_in_program(prog), ["transpose", "relu"]) + self.assertEqual(get_op_names_in_program(prog), ["x", "relu"]) + + op = prog["main"].find_ops(op_type="transpose")[0] + self.assertEqual("x_1", op.outputs[0].name) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (20, 10)}, + ) + + def test_nested_block(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1,))]) + def prog(x): + def true_fn(): + # returns var with name x shadows input 'x' + return mb.add(x=x, y=1.0, name="x") + + def false_fn(): + # two ops with name "x" + return mb.add(x=x, y=-1.0, name="x") + + pred = mb.equal(x=mb.squeeze(x=x), y=1.0) + return mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn) + + cond_op = prog.functions["main"].operations[-1] + assert cond_op.blocks[0].outputs[0].name == "x" + assert cond_op.blocks[1].outputs[0].name == "x" + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::dedup_op_and_var_names") + cond_op = prog.functions["main"].operations[-1] + assert cond_op.blocks[0].outputs[0].name == "x_1" + assert cond_op.blocks[1].outputs[0].name == "x_2" + + assert_model_is_valid( + prog, + {"x": (1,)}, + expected_output_shapes={block.outputs[0].name: (1,)}, + ) + + +class TestAddConvTransposeOutputShape: + def test_add_conv_transpose_output_shape(self): + """ + Given: + %1: (1, 5, 39, fp32) = conv_transpose(...) # no output_shape input. + + Result: + %2: (3, i32) = const(val=[1,5,39]) + %3: (1, 5, 39, fp32) = conv_transpose(..., output_shape=%2) + """ + N, C_in, C_out, D1 = 1, 3, 5, 20 + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, D1))]) + def prog(x): + weight = np.random.rand(C_in, C_out, D1).astype(np.float32) + return mb.conv_transpose(x=x, weight=weight) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::add_conv_transpose_output_shape" + ) + assert get_op_types_in_program(prev_prog) == ["conv_transpose"] + assert get_op_types_in_program(prog) == ["conv_transpose"] + prev_conv_transpose_op = prev_prog.find_ops(op_type="conv_transpose", exactly_one=True)[0] + conv_transpose_op = prog.find_ops(op_type="conv_transpose", exactly_one=True)[0] + assert np.all(conv_transpose_op.output_shape.val == prev_conv_transpose_op.outputs[0].shape) + + +class TestNoopElimination: + @pytest.mark.parametrize( + "op_type, pos, val", + itertools.product( + ["add", "mul", "floor_div", "pow", "real_div", "sub"], + ["x", "y"], + [0.0, 1.0, [0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]], + ), + ) + def test_elementwise_elimination(self, op_type, pos, val): + if "div" in op_type and np.prod(val) == 0: + return + if "pow" in op_type and (val != 0 or val != 1): + return + + test_op = getattr(mb, op_type) + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + if pos == "x": + r1 = test_op(x=val, y=x) + else: + r1 = test_op(x=x, y=val) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + original_program = [op_type, "relu"] + new_program = original_program + if op_type in {"add"}: + if val == 0.0 or val == [0.0, 0.0, 0.0, 0.0]: + new_program = ["relu"] + elif op_type in {"mul"}: + if val == 1.0 or val == [1.0, 1.0, 1.0, 1.0]: + new_program = ["relu"] + elif op_type in {"real_div"}: + if pos == "y" and (val == 1.0 or val == [1.0, 1.0, 1.0, 1.0]): + new_program = ["relu"] + elif op_type in {"pow", "floor_div"}: + if pos == "y" and (val == 1.0 or val == [1.0, 1.0, 1.0, 1.0]): + new_program = ["relu"] + elif op_type in {"sub"}: + if pos == "y" and (val == 0.0 or val == [0.0, 0.0, 0.0, 0.0]): + new_program = ["relu"] + + assert get_op_types_in_program(prev_prog) == original_program + assert get_op_types_in_program(prog) == new_program + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_elementwise_broadcast(self): + @mb.program(input_specs=[mb.TensorSpec(shape=[4])]) + def prog(x): + r1 = mb.add(x=x, y=[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + original_program = ["add", "relu"] + + assert get_op_types_in_program(prev_prog) == original_program + assert get_op_types_in_program(prog) == original_program + assert_model_is_valid( + prog, + {"x": [4]}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_elementwise_elimination_fill(self): + """ + When fill layer with dynamic shape is fed to elementwise-binary operation, + even though the tensor can't be materialized at conversion time but no-op + elimination can still be performed based on fill-value + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, get_new_symbol()))]) + def prog(x): + shape = mb.shape(x=x) + y = mb.fill(value=0.0, shape=shape) + x = mb.add(x=x, y=y) + return mb.relu(x=x) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["shape", "fill", "add", "relu"] + assert get_op_types_in_program(prog) == ["shape", "fill", "relu"] + + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["relu"] + + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_reshape_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.reshape(x=x, shape=[1, 8]) + mb.reshape(x=r1, shape=[1, 8]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "relu"] + assert get_op_types_in_program(prog) == ["reshape", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 8)}, + ) + + def test_oneway_split_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.split(x=x, num_splits=1, axis=-1) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["split", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_full_split_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.split(x=x, split_sizes=[4], axis=-1) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["split", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebysize_full_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[2, 4]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebysize_to_end_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[-1, -1]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebyindex_full_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_index(x=x, begin=[0, 0], end=[2, 4]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_slicebyindex_negative_stride(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.slice_by_index( + x=x, + begin=[0, 0], + end=[0, 0], + stride=[1, -1], + begin_mask=[True, True], + end_mask=[True, True], + ) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"] + assert get_op_types_in_program(prog) == ["slice_by_index", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + @pytest.mark.parametrize( + "begin_mask, end_mask", + itertools.product( + itertools.product([True, False], [True, False]), + itertools.product([True, False], [True, False]), + ), + ) + def test_slicebyindex_mask_elimination(self, begin_mask, end_mask): + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 4))]) + def prog(x): + begin = [1, 1] + end = [1, 1] + for i in range(2): + if not begin_mask[i]: + begin[i] = 0 + if not end_mask[i]: + end[i] = 4 + r1 = mb.slice_by_index( + x=x, begin=begin, end=end, begin_mask=begin_mask, end_mask=end_mask + ) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (4, 4)}, + expected_output_shapes={block.outputs[0].name: (4, 4)}, + ) + + def test_pad_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.pad(x=x, pad=[0, 0, 0, 0]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["pad", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_keep_pad(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.pad(x=x, pad=[4, 4, 2, 2]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["pad", "relu"] + assert get_op_types_in_program(prog) == ["pad", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (10, 8)}, + ) + + def test_tile_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.tile(x=x, reps=[1, 1]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["tile", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_keep_tile(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.tile(x=x, reps=[2, 2]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["tile", "relu"] + assert get_op_types_in_program(prog) == ["tile", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (4, 8)}, + ) + + def test_upsample_nearest_neighbor_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.upsample_nearest_neighbor(x=x) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["upsample_nearest_neighbor", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_upsample_bilinear_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.upsample_bilinear(x=x) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["upsample_bilinear", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_resize_bilinear_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.resize_bilinear(x=x, target_size_height=2, target_size_width=4) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["resize_bilinear", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_crop_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))]) + def prog(x): + r1 = mb.crop(x=x, crop_height=[0, 0], crop_width=[0, 0]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["crop", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (3, 2, 4)}, + expected_output_shapes={block.outputs[0].name: (3, 2, 4)}, + ) + + def test_linear_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + r1 = mb.linear_activation(x=x, alpha=1.0, beta=0.0) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["linear_activation", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 4)}, + ) + + def test_transpose_elimination(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 4))]) + def prog(x): + r1 = mb.transpose(x=x, perm=[0, 1, 2]) + return mb.relu(x=r1) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::noop_elimination") + assert get_op_types_in_program(prev_prog) == ["transpose", "relu"] + assert get_op_types_in_program(prog) == ["relu"] + assert_model_is_valid( + prog, + {"x": (2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (2, 3, 4)}, + ) + + +class TestRemoveSymbolicReshape: + def test_remove_symbolic_reshape(self): + sym_b = Symbol("s0") + original_shape = (sym_b, Symbol("s1"), 2) + reshape_name = "reshape" + + @mb.program(input_specs=[mb.TensorSpec(shape=(sym_b, 4))]) + def prog(x): + # const cannot represent symbolic values. Use _const_symbolic + shape = mb._const_symbolic(val=original_shape) + return mb.reshape(x=x, shape=shape, name=reshape_name) + + reshape_op = prog.find_ops(prefix=reshape_name, op_type="reshape", exactly_one=True)[0] + shape_var = reshape_op.shape + reshaped_var = reshape_op.outputs[0] + assert np.all(shape_var.sym_val == original_shape) + assert np.all(reshaped_var.shape == (sym_b, 2, 2)) + + # Note: we cannot deepcopy prog with symbol. + prev_outputs = [o.name for o in prog["main"].outputs] + PASS_REGISTRY["common::remove_symbolic_reshape"](prog) + curr_outputs = [o.name for o in prog["main"].outputs] + assert curr_outputs == prev_outputs + + reshape_op = prog.find_ops(prefix=reshape_name, op_type="reshape", exactly_one=True)[0] + shape_var = reshape_op.shape + reshaped_var = reshape_op.outputs[0] + # shape param cannot be symbolic after the pass + assert np.all(shape_var.sym_val == (-1, 2, 2)) + # output shape is still symbolic + assert np.all(reshaped_var.shape == (sym_b, 2, 2)) + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (3, 4)}) + + +class TestLoopInvariantElimination: + def test_loop_invariant_elimination1(self): + """ + Invariant pattern: Block input vars are returned as block output vars. + """ + + def body(a, b): + return mb.add(x=a, y=b), b + + def cond(a, b): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=b, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2)), + mb.TensorSpec(shape=(1, 2)), + ] + ) + def prog(a, b): + # b is loop invariant + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 2 + assert len(while_op.outputs) == 2 + assert len(while_op.loop_vars) == 2 + assert while_op.blocks[0].inputs[0].name == "a_x0" + assert while_op.blocks[0].inputs[1].name == "b_x0" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::loop_invariant_elimination"](prog) + assert_same_output_names(prev_prog, prog) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 1 + assert len(while_op.outputs) == 1 + assert len(while_op.loop_vars) == 1 + assert while_op.blocks[0].inputs[0].name == "a_x0" + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)}) + + def test_loop_invariant_elimination2(self): + """ + Invariant pattern: Block outputs var from outside of the block + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2)), + mb.TensorSpec(shape=(1, 2)), + ] + ) + def prog(a, b): + def body(a, bx): + return mb.add(x=a, y=b), b + + def cond(a, bx): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=bx, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + # b is loop invariant + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 2 + assert len(while_op.outputs) == 2 + assert len(while_op.loop_vars) == 2 + assert while_op.blocks[0].inputs[0].name == "a_x0" + assert while_op.blocks[0].inputs[1].name == "b_x0" + + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::loop_invariant_elimination"](prog) + assert_same_output_names(prev_prog, prog) + + while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] + assert len(while_op.blocks[0].inputs) == 1 + assert len(while_op.outputs) == 1 + assert len(while_op.loop_vars) == 1 + assert while_op.blocks[0].inputs[0].name == "a_x0" + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)}) + + +class TestReduceMeanFusion: + def test_valid_pattern1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.mul(x=1.0 / 30, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prev_prog) == ["reduce_sum", "mul"] + assert get_op_types_in_program(prog) == ["reduce_mean"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 1, 1)}, + ) + + def test_valid_pattern2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 5))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[0], keep_dims=False) + x1 = mb.real_div(x=x1, y=4.0) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prev_prog) == ["reduce_sum", "real_div"] + assert get_op_types_in_program(prog) == ["reduce_mean"] + assert_model_is_valid( + prog, + {"x": (4, 5)}, + expected_output_shapes={block.outputs[0].name: (5,)}, + ) + + def test_invalid_pattern1(self): + """ + The mul does not correspond to "1/count" + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.mul(x=5.0, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prog) == ["reduce_sum", "mul"] + + def test_invalid_pattern2(self): + """ + The div does not correspond to "count" + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.real_div(x=x1, y=31.0) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_reduce_mean") + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div"] + + def test_invalid_pattern3(self): + """ + One of the reduction dim is symbolic + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, get_new_symbol(), 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + x1 = mb.real_div(x=x1, y=30.0) + return x1 + + pass_name = "common::fuse_reduce_mean" + PASS_REGISTRY[pass_name](prog) + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div"] + + def test_invalid_pattern4(self): + """ + output of reduce_sum is model output + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + y1 = mb.real_div(x=x1, y=30.0) + return y1, x1 + + pass_name = "common::fuse_reduce_mean" + PASS_REGISTRY[pass_name](prog) + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div"] + + def test_invalid_pattern5(self): + """ + output of reduce_sum is feeding into another op + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.reduce_sum(x=x, axes=[-1, -2], keep_dims=True) + y1 = mb.real_div(x=x1, y=30.0) + y2 = mb.mul(x=x1, y=10.0) + y3 = mb.add(x=y1, y=y2) + return y3 + + pass_name = "common::fuse_reduce_mean" + PASS_REGISTRY[pass_name](prog) + assert get_op_types_in_program(prog) == ["reduce_sum", "real_div", "mul", "add"] + + +class TestRemoveRedundantOps: + def test_redundant_ops_just_after_input_valid_pattern_1(self): + """ + Input graph: + input----->transpose(perm=[0, 2, 1])--->add---> add ---> out + | ^ ^ + | | | + |---->transpose(perm=[0, 2, 1])---- | + | | + | | + |---->transpose(perm=[0, 2, 1])------------ + + Output graph: + input----->transpose(perm=[0, 2, 1])--->add---> add ----> out + | ^ ^ + | | | + |------------- | + | | + |-------------------- + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 1]) + x3 = mb.transpose(x=x, perm=[0, 2, 1]) + z = mb.add(x=x1, y=x2) + z = mb.add(x=z, y=x3) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "transpose", + "transpose", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["transpose", "add", "add"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (2, 5, 3)}, + ) + + def test_redundant_ops_just_after_input_valid_pattern_2(self): + """ + Input graph: + input----->leaky_relu(alpha=0.3)--->add---> add ---> out + | ^ ^ + | | | + |----->leaky_relu(alpha=0.3)--- | + | | + | | + |---->leaky_relu(alpha=0.3)------------ + + Output graph: + input--------->leaky_relu(alpha=0.3)--->add---> add ----> out + | ^ ^ + | | | + |------------- | + | | + |--------------------- + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.leaky_relu(x=x, alpha=0.3) + x2 = mb.leaky_relu(x=x, alpha=0.3) + x3 = mb.leaky_relu(x=x, alpha=0.3) + z = mb.add(x=x1, y=x2) + z = mb.add(x=z, y=x3) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "leaky_relu", + "leaky_relu", + "leaky_relu", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["leaky_relu", "add", "add"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (2, 3, 5)}, + ) + + def test_redundant_ops_just_after_input_invalid_pattern_1(self): + """ + input----->transpose(perm=[0, 2, 1])---> reshape(shape=[-1]) -----> add ---> out + | ^ + | | + |---->transpose(perm=[1, 0, 2])----> reshape(shape=[-1])------ + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 1]) + x2 = mb.transpose(x=x, perm=[1, 0, 2]) + x1 = mb.reshape(x=x1, shape=[-1]) + x2 = mb.reshape(x=x2, shape=[-1]) + z = mb.add(x=x1, y=x2) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "transpose", + "reshape", + "reshape", + "add", + ] + assert get_op_types_in_program(prog) == [ + "transpose", + "transpose", + "reshape", + "reshape", + "add", + ] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (30,)}, + ) + + def test_redundant_ops_just_after_input_invalid_pattern_2(self): + """ + input----->leaky_relu(alpha=0.3) -----> add ---> out + | ^ + | | + |---->leaky_relu(alpha=0.4)------- + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.leaky_relu(x=x, alpha=0.3) + x2 = mb.leaky_relu(x=x, alpha=0.4) + z = mb.add(x=x1, y=x2) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == ["leaky_relu", "leaky_relu", "add"] + assert get_op_types_in_program(prog) == ["leaky_relu", "leaky_relu", "add"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={block.outputs[0].name: (2, 3, 5)}, + ) + + def test_redundant_ops_just_after_input_invalid_pattern_3(self): + """ + test case, when inputs of 1 op is a subset of the inputs of the other op + + input----->layer_norm1 -----> add ---> out + | ^ + | | + |---->layer_norm2------- + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 2))]) + def prog(x): + x1 = mb.layer_norm(x=x, axes=[2], epsilon=1e-4) + gamma_val = np.array([1.0, 1.0], dtype=np.float32) + beta_val = np.array([1.0, 0.0], dtype=np.float32) + x2 = mb.layer_norm(x=x, axes=[2], epsilon=1e-4, gamma=gamma_val, beta=beta_val) + z = mb.add(x=x1, y=x2) + return z + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == ["layer_norm", "layer_norm", "add"] + assert get_op_types_in_program(prog) == ["layer_norm", "layer_norm", "add"] + assert_model_is_valid( + prog, + {"x": (1, 3, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 3, 2)}, + ) + + @staticmethod + def _make_repeated_conv_prog(redundant_conv=True): + prog = Program() + func_inputs = {"x": mb.placeholder(shape=[1, 4, 5, 5])} + with Function(func_inputs) as ssa_fun: + x = ssa_fun.inputs["x"] + x = mb.relu(x=x) + W = np.random.rand(8, 4, 3, 3) + if redundant_conv: + bias = np.random.rand(8) + x1 = mb.conv(x=x, weight=W, bias=bias, pad_type="same", strides=[1, 1]) + x2 = mb.conv(x=x, weight=W, bias=bias, pad_type="same", strides=[1, 1]) + else: + x1 = mb.conv(x=x, weight=W, bias=np.random.rand(8), pad_type="same", strides=[1, 1]) + x2 = mb.conv(x=x, weight=W, bias=np.random.rand(8), pad_type="same", strides=[1, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x1 = mb.avg_pool(x=x1, kernel_sizes=[2, 2], strides=[1, 1], pad_type="same") + z = mb.concat(values=(x1, x2), axis=-3) + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + return prog + + def test_redundant_ops_inside_graph_valid_pattern(self): + """ + Input graph: + input--> relu--------->conv------>relu----> pool ---> concat ---> out + | ^ + | | + |---->conv---->relu---------------------------- + + Output graph: + input-> relu--->conv------>relu----> pool ---> concat ---> out + | ^ + | | + |------------------- + """ + prog = self._make_repeated_conv_prog(redundant_conv=True) + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "relu", + "conv", + "conv", + "relu", + "relu", + "avg_pool", + "concat", + ] + assert get_op_types_in_program(prog) == ["relu", "conv", "relu", "avg_pool", "concat"] + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 16, 5, 5)}, + ) + + def test_redundant_ops_inside_graph_invalid_pattern(self): + """ + input--->relu--------->conv1------>relu----> pool ---> concat ---> out + | ^ + | | + |---->conv2---->relu--------------------------- + """ + prog = self._make_repeated_conv_prog(redundant_conv=False) + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == [ + "relu", + "conv", + "conv", + "relu", + "relu", + "avg_pool", + "concat", + ] + assert get_op_types_in_program(prog) == [ + "relu", + "conv", + "conv", + "relu", + "relu", + "avg_pool", + "concat", + ] + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 16, 5, 5)}, + ) + + def test_redundant_op_as_output_valid_pattern_1(self): + """ + Input graph: + input--------->relu------> out1 + | + | + |---->relu---->tanh---> out2 + + Output graph: + input--------->relu------> out1 + | + | + |---->tanh---> out2 + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + return x1, mb.tanh(x=x2) + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::remove_redundant_ops") + assert get_op_types_in_program(prev_prog) == ["relu", "relu", "tanh"] + assert get_op_types_in_program(prog) == ["relu", "tanh"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (2, 3, 5), + block.outputs[1].name: (2, 3, 5), + }, + ) + + def test_redundant_op_as_output_invalid_pattern_1(self): + """ + Input graph: + input--------->relu------> out1 + | + | + |---->relu---> out2 + + "common::remove_redundant_ops" pass does not remove ops if their outputs + are block outputs. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 5))]) + def prog(x): + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + return x1, x2 + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == ["relu", "relu"] + assert get_op_types_in_program(prog) == ["relu", "relu"] + assert_model_is_valid( + prog, + {"x": (2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (2, 3, 5), + block.outputs[1].name: (2, 3, 5), + }, + ) + + def test_cond_block_program(self): + """ + - Test identical ops within different blocks are not removed. The "relu" op inside true and + false blocks are not removed since they are in different blocks. + - Test ops that have blocks inside them are not removed. There are two cond ops here, + with identical inputs but they are not removed, since they are ops that have nested block + inside them. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1,))]) + def prog(x): + x1 = mb.cast(x=x, dtype="bool") + + def true_fn(): + x = mb.shape(x=x1) + x = mb.cast(x=x, dtype="fp32") + return mb.add(x=x, y=1.0) + + def false_fn(): + x = mb.shape(x=x1) + x = mb.cast(x=x, dtype="fp32") + return mb.add(x=x, y=-1.0) + + z1 = mb.cond(pred=x1, _true_fn=true_fn, _false_fn=false_fn) + z2 = mb.cond(pred=x1, _true_fn=true_fn, _false_fn=false_fn) + z = mb.add(x=z1, y=z2) + return z + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == ["cast", "cond", "cond", "add"] + assert get_op_types_in_program(prog) == ["cast", "cond", "cond", "add"] + cond_op = prog.find_ops(op_type="cond")[0] + assert cond_op.blocks[0].operations[0].op_type == "shape" + assert cond_op.blocks[1].operations[0].op_type == "shape" + assert_model_is_valid( + prog, + {"x": (1,)}, + expected_output_shapes={block.outputs[0].name: (1,)}, + ) + + def test_concat_op_pattern(self): + """ + Input graph: + ---------------> concat ------> log ------> out1 + | ^ + | | + input--------->relu------> concat ------> relu----> out2 + | ^ | + | | | + |---->tanh-------------------- + + Output graph: + |------>log ------> out1 + | + | + input--------->relu------> concat ------> relu----> out2 + | ^ + | | + |---->tanh--------- + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 5))]) + def prog(x): + x1 = mb.relu(x=x) + x2 = mb.tanh(x=x) + c1 = mb.concat(values=(x1, x2), axis=0) + c2 = mb.concat(values=(x1, x2), axis=0) + z1 = mb.log(x=c1) + z2 = mb.relu(x=c2) + return z1, z2 + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == [ + "relu", + "tanh", + "concat", + "concat", + "log", + "relu", + ] + assert get_op_types_in_program(prog) == ["relu", "tanh", "concat", "log", "relu"] + assert_model_is_valid( + prog, + {"x": (10, 5)}, + expected_output_shapes={block.outputs[0].name: (20, 5), block.outputs[1].name: (20, 5)}, + ) + + def test_multiple_redundant_child_ops_pattern(self): + """ + Input graph + + input -------------> reshape ----------> add ---------> out1 + | ^ + | | + |-------> reshape --------------- + | + |------> slice_by_size-----> add ----------> out2 + | ^ + | | + |------> slice_by_size ------- + + Output graph + + input -------------> reshape ----------> add ------------> out1 + | | ^ + | | | + | |--------- + | + |------> slice_by_size----------> add -----------------> out2 + | ^ + | | + |--------------------- + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 5, 4))]) + def prog(x): + x1 = mb.reshape(x=x, shape=[5, 2, -1]) + x2 = mb.reshape(x=x, shape=[5, 2, -1]) + x3 = mb.slice_by_size(x=x, begin=[0, 0, 1], size=[2, 4, 3]) + x4 = mb.slice_by_size(x=x, begin=[0, 0, 1], size=[2, 4, 3]) + z1 = mb.add(x=x1, y=x2) + z2 = mb.add(x=x3, y=x4) + return z1, z2 + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == [ + "reshape", + "reshape", + "slice_by_size", + "slice_by_size", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["reshape", "slice_by_size", "add", "add"] + assert_model_is_valid( + prog, + {"x": (10, 5, 4)}, + expected_output_shapes={ + block.outputs[0].name: (5, 2, 20), + block.outputs[1].name: (2, 4, 3), + }, + ) + + def test_random_distribution_op_invalid_pattern(self): + """ + Identical random ops are not removed + + input----->cast---->random_uniform------> add ---> out + | ^ + | | + |---->random_uniform------------ + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3,))]) + def prog(shape): + shape = mb.cast(x=shape, dtype="int32") + x1 = mb.random_uniform(shape=shape, low=0.0, high=1.0, seed=11) + x2 = mb.random_uniform(shape=shape, low=0.0, high=1.0, seed=11) + return mb.add(x=x1, y=x2) + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::remove_redundant_ops", + ) + assert get_op_types_in_program(prev_prog) == [ + "cast", + "random_uniform", + "random_uniform", + "add", + ] + assert get_op_types_in_program(prog) == ["cast", "random_uniform", "random_uniform", "add"] + + +class TestTopologicalReorder: + def test_move_sink_casts_to_the_end(self): + """ + Input graph: + x (input) ---> square ---> cast (output) + | + | -----------> log ------> cast (output) + | + | -----------> relu -----> cast ----> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x2 = mb.cast(x=x1, dtype="fp32") + x3 = mb.log(x=x) + x4 = mb.cast(x=x3, dtype="fp32") + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + return x2, x4, x7 + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "cast", + "log", + "cast", + "relu", + "cast", + "relu", + ] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "cast", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + def test_move_sink_cast_transpose_to_the_end(self): + """ + Input graph: + x (input) ---> square ---> transpose ---> cast (output) + | + | -----------> log ------> transpose ---> cast (output) + | + | -----------> relu -----> cast ----> relu (output) + | + | -----------> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + x3 = mb.log(x=x) + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + return x2, x4, x7, x8 + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "transpose", + "cast", + "log", + "transpose", + "cast", + "relu", + "cast", + "relu", + "relu", + ] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "relu", + "transpose", + "cast", + "transpose", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (20, 10), + block.outputs[1].name: (20, 10), + block.outputs[2].name: (10, 20), + block.outputs[3].name: (10, 20), + }, + ) + + def test_move_multiple_uses_overlapping(self): + """ + Input graph: + x (input) ---> cast ---> cast (output) + | + |-------> transpose ---> transpose (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x1 = mb.cast(x=x, dtype="fp16") + x2 = mb.cast(x=x1, dtype="fp32") + x3 = mb.transpose(x=x1, perm=[1, 0]) + x4 = mb.transpose(x=x3, perm=[1, 0]) + return x2, x4 + + assert get_op_types_in_program(prog) == ["cast", "cast", "transpose", "transpose"] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "transpose", "transpose", "cast"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + }, + ) + + def test_move_split_to_first_use(self): + """ + Input graph: + x (input) ---> split ---> square ---> add (output) + | | | + | | --------------------| + | + | -----------> square --------------> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + s1, s2 = mb.split(x=x, num_splits=2, axis=0) + x2 = mb.square(x=x) + x3 = mb.relu(x=x2) + s1_1 = mb.square(x=s1) + s3 = mb.add(x=s1_1, y=s2) + return x3, s3 + + assert get_op_types_in_program(prog) == ["split", "square", "relu", "square", "add"] + + block = prog.functions["main"] + # Reorder `split` op to test op with multiple output case + topological_reorder._move_operations_to_the_end_block(block, ["split"]) + assert get_op_types_in_program(prog) == ["square", "relu", "split", "square", "add"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (5, 20), + }, + ) + + def test_move_transpose_before_subblock(self): + """ + Input graph: + x (input) ---> cast ---> transpose ---> cast (output) + | + | -----------> square ------> transpose (x1_t) ---> cast (output) + | + | -----------> squeeze ----> equal ----> squeeze + | + (true) <--- / \ ---> (false) + | | + | /<-(x1_t)->\ | + add <-/ \--> add + |---------> | <---------| + | + add ---> cast (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + + def true_fn(): + return mb.add(x=x1_t, y=np.float16(1), name="x2") + + def false_fn(): + return mb.add(x=x1_t, y=np.float16(2), name="x2") + + is_one = mb.equal(x=mb.squeeze(x=x), y=np.float16(1.0)) + pred = mb.squeeze(x=is_one) + x3 = mb.cond(pred=pred, _true_fn=true_fn, _false_fn=false_fn) + x4 = mb.add(x=x1_t, y=x3) + x5 = mb.cast(x=x4, dtype="fp32") + return x5 + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "squeeze", + "equal", + "squeeze", + "transpose", + "cond", + "add", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (20, 10)}, + ) + + def test_cast_transpose_already_at_the_end(self): + """ + Input graph: + x (input) ---> square ---> transpose ---> cast (output) + | + | -----------> log ------> transpose ---> cast (output) + | + | -----------> relu -----> cast ----> relu (output) + | + | -----------> relu (output) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.square(x=x) + x3 = mb.log(x=x) + x5 = mb.relu(x=x) + x6 = mb.cast(x=x5, dtype="fp32") + x7 = mb.relu(x=x6) + x8 = mb.relu(x=x) + x1_t = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.cast(x=x1_t, dtype="fp32") + x3_t = mb.transpose(x=x3, perm=[1, 0]) + x4 = mb.cast(x=x3_t, dtype="fp32") + return x2, x4, x7, x8 + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "relu", + "transpose", + "cast", + "transpose", + "cast", + ] + + apply_pass_and_basic_check(prog, "common::topological_reorder") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == [ + "cast", + "square", + "log", + "relu", + "cast", + "relu", + "relu", + "transpose", + "cast", + "transpose", + "cast", + ] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (20, 10), + block.outputs[1].name: (20, 10), + block.outputs[2].name: (10, 20), + block.outputs[3].name: (10, 20), + }, + ) + + +class TestChildOrdering: + def test_generic_child_ordering(self): + """ + Checks that the new generic pattern matching infrastructure works + regardless of the ordering of an operation's children + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + power = mb.pow(x=x, y=3.0, name="thepowerop") + add_0 = mb.add(x=power, y=5.0, name="add_0") + sub_0 = mb.sub(x=power, y=5.0, name="sub_0") + mul_0 = mb.mul(x=power, y=5.0, name="mul_0") + add_1 = mb.add(x=add_0, y=mul_0, name="add_1") + add_2 = mb.add(x=sub_0, y=add_1, name="add_2") + return add_2 + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def ops_arrangement(x): + power = mb.pow(x=x, y=3.0, name="thepowerop") + sub_0 = mb.sub(x=power, y=5.0, name="sub_0") + add_0 = mb.add(x=power, y=5.0, name="add_0") + mul_0 = mb.mul(x=power, y=5.0, name="mul_0") + add_1 = mb.add(x=add_0, y=mul_0, name="add_1") + add_2 = mb.add(x=sub_0, y=add_1, name="add_2") + return add_2 + + def var_constraints(pattern): + constraints_passed = True + constraints_passed &= _check_var_scalar_value(pattern.thepowerop.y, 3) + constraints_passed &= _check_var_scalar_value(pattern.sub_0.y, 5) + constraints_passed &= _check_var_scalar_value( + pattern.add_0.x, 5 + ) or _check_var_scalar_value(pattern.add_0.y, 5) + constraints_passed &= _check_var_scalar_value( + pattern.mul_0.x, 5 + ) or _check_var_scalar_value(pattern.mul_0.y, 5) + return constraints_passed + + def transform_pattern(pattern): + out_name = "new operation" + x = mb.gelu( + x=pattern.root_var, + mode="TANH_APPROXIMATION", + name=out_name, + before_op=pattern.thepowerop, + ) + + pattern.add_2.enclosing_block.replace_uses_of_var_after_op( + anchor_op=pattern.add_2, old_var=pattern.add_2.outputs[0], new_var=x + ) + + pattern.block.remove_ops(pattern.op_list()) + + register_generic_pass( + ops_arrangement=ops_arrangement, + var_constraints=var_constraints, + transform_pattern=transform_pattern, + pass_name="test_generic_child_ordering", + namespace="common", + ) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::test_generic_child_ordering" + ) + assert get_op_types_in_program(prev_prog) == [ + "pow", + "add", + "sub", + "mul", + "add", + "add", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + +class TestGeluFusion: + def test_gelu_tanh_approximation1(self): + """ + Detect gelu tanh approx pattern, found in the TF bert model. + y = ( tanh((.0447)x^3 + x ) * (sqrt(2/pi)) + 1 ) * 0.5 * x + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.pow(x=x, y=3.0) + x1 = mb.mul(x=0.044715, y=x1) + x1 = mb.add(x=x1, y=x) + x1 = mb.mul(x=x1, y=np.sqrt(2 / np.pi)) + x1 = mb.tanh(x=x1) + x1 = mb.add(x=1.0, y=x1) + x1 = mb.mul(x=0.5, y=x1) + x1 = mb.mul(x=x, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_gelu_tanh_approximation" + ) + assert get_op_types_in_program(prev_prog) == [ + "pow", + "mul", + "add", + "mul", + "tanh", + "add", + "mul", + "mul", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + @pytest.mark.parametrize( + "first_op_1, first_op_2, first_op_3, first_op_4, first_op_5, first_op_6", + itertools.product( + [True, False], [True, False], [True, False], [True, False], [True, False], [True, False] + ), + ) + def test_gelu_tanh_approximation2( + self, first_op_1, first_op_2, first_op_3, first_op_4, first_op_5, first_op_6 + ): + """ + Detect gelu tanh approx pattern, found in the TF Sanitized GPT2 model. + y = ( tanh((.0447)x^3 + x ) * (sqrt(2/pi)) + 1 ) * 0.5 * x + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + firstmul = mb.mul(x=x, y=0.5) if first_op_1 else mb.mul(x=0.5, y=x) + x1 = mb.pow(x=x, y=3.0) + x1 = mb.mul(x=0.044715, y=x1) if first_op_2 else mb.mul(x=x1, y=0.044715) + x1 = mb.add(x=x1, y=x) if first_op_3 else mb.add(x=x, y=x1) + x1 = ( + mb.mul(x=x1, y=np.sqrt(2 / np.pi)) + if first_op_4 + else mb.mul(x=np.sqrt(2 / np.pi), y=x1) + ) + x1 = mb.tanh(x=x1) + x1 = mb.add(x=1.0, y=x1) if first_op_5 else mb.add(x=x1, y=1.0) + x1 = mb.mul(x=firstmul, y=x1) if first_op_6 else mb.mul(x=x1, y=firstmul) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_gelu_tanh_approximation" + ) + assert get_op_types_in_program(prev_prog) == [ + "mul", + "pow", + "mul", + "add", + "mul", + "tanh", + "add", + "mul", + ] + + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + @pytest.mark.parametrize( + "op_type, is_first_op1, is_first_op2, is_first_op3, is_first_op4, const_mul_first", + itertools.product( + ["real_div", "mul"], + [True, False], + [True, False], + [True, False], + [True, False], + [True, False], + ), + ) + def test_gelu_exact( + self, op_type, is_first_op1, is_first_op2, is_first_op3, is_first_op4, const_mul_first + ): + """ + Detect gelu exact pattern. + y = 0.5 * (x * ( 1 + erf ( x / srqt(2)))) + or + y = x * (0.5 * ( 1 + erf ( x / srqt(2)))) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + if op_type == "real_div": + x1 = mb.real_div(x=x, y=2**0.5) + elif op_type == "mul": + x1 = mb.mul(x=x, y=2**-0.5) if is_first_op1 else mb.mul(x=2**-0.5, y=x) + + x2 = mb.erf(x=x1) + x3 = mb.add(x=x2, y=1.0) if is_first_op2 else mb.add(x=1.0, y=x2) + + if const_mul_first: + y1 = mb.const(val=0.5) + y2 = x + else: + y1 = x + y2 = mb.const(val=0.5) + + x4 = mb.mul(x=x3, y=y1) if is_first_op3 else mb.mul(x=y1, y=x3) + x5 = mb.mul(x=x4, y=y2) if is_first_op4 else mb.mul(x=y2, y=x4) + + return x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_gelu_exact") + + assert get_op_types_in_program(prev_prog) == [ + op_type, + "erf", + "add", + "mul", + "mul", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + @pytest.mark.parametrize( + "is_first_op0, is_first_op4", + itertools.product( + [True, False], + [True, False], + ), + ) + def test_gelu_exact_pattern_2(self, is_first_op0, is_first_op4): + """ + Detect gelu exact pattern. + y = (0.5 * x) * ( 1 + erf ( x / srqt(2))) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x0 = mb.mul(x=x, y=0.5) if is_first_op0 else mb.mul(x=0.5, y=x) + x1 = mb.mul(x=x, y=2**-0.5) + x2 = mb.erf(x=x1) + x3 = mb.add(x=x2, y=1.0) + x4 = mb.mul(x=x0, y=x3) if is_first_op4 else mb.mul(x=x3, y=x0) + return x4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_gelu_exact") + + assert get_op_types_in_program(prev_prog) == [ + "mul", + "mul", + "erf", + "add", + "mul", + ] + assert get_op_types_in_program(prog) == ["gelu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + +class TestLeakyReluFusion: + @pytest.mark.parametrize( + "swap_mul_input_order, swap_max_input_order", + itertools.product( + [True, False], + [True, False], + ), + ) + def test_valid_leaky_relu_pattern(self, swap_mul_input_order, swap_max_input_order): + """ + Input graph: + + const (val = 0.3) + | + input ----> mul ---------------> maximum -----------> output + | | + |---------------------------------- + + Output graph: + + input --------> leaky_relu ---------> output + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + if swap_mul_input_order: + x1 = mb.mul(x=x, y=0.3) + else: + x1 = mb.mul(x=0.3, y=x) + if swap_max_input_order: + x1 = mb.maximum(x=x1, y=x) + else: + x1 = mb.maximum(x=x, y=x1) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_leaky_relu") + assert get_op_types_in_program(prev_prog) == ["mul", "maximum"] + assert get_op_types_in_program(prog) == ["leaky_relu"] + assert_model_is_valid( + prog, + {"x": (3, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (3, 5, 6)}, + ) + + def test_invalid_leaky_relu_pattern1(self): + """ + Invalid because alpha value greater than 1 + + Input graph: + + const (val = 1.3) + | + input ----> mul ---------------> maximum -----------> output + | | + |---------------------------------- + + Output graph: same as input graph + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.mul(x=x, y=1.3) + x1 = mb.maximum(x=x1, y=x) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_leaky_relu") + assert get_op_types_in_program(prev_prog) == ["mul", "maximum"] + assert get_op_types_in_program(prog) == ["mul", "maximum"] + + def test_invalid_leaky_relu_pattern2(self): + """ + Invalid because input to the "maximum" op is not same as the input of the "mul" op + + Input graph: + + const (val = 0.3) + | + input ----> mul ---------------> maximum -----------> output + | + const + + + Output graph: same as input graph + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 5, 6))]) + def prog(x): + x1 = mb.mul(x=x, y=0.3) + x1 = mb.maximum(x=x1, y=0.4) + return x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_leaky_relu") + assert get_op_types_in_program(prev_prog) == ["mul", "maximum"] + assert get_op_types_in_program(prog) == ["mul", "maximum"] + + +class TestPreluFusion: + @pytest.mark.parametrize( + "swap_input_order, alpha_rank", + itertools.product( + [True, False], + [3, 4], + ), + ) + def test_channel_first_pattern(self, swap_input_order, alpha_rank): + """ + Input: + | ------------> relu --------------------| + | V + x (BCHW) ------| add -----> y (BCHW) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(name=a, shape=(1,C,1,1)) + + Output: + x (BCHW) ------> prelu(alpha=a, shape=(C,)) ---------> y (BCHW) + """ + B, C, H, W = 2, 3, 5, 6 + + if alpha_rank == 3: + alpha = np.random.rand(C, 1, 1) + elif alpha_rank == 4: + alpha = np.random.rand(1, C, 1, 1) + else: + raise NotImplementedError("alpha rank must be 3 or 4") + + @mb.program(input_specs=[mb.TensorSpec(shape=(B, C, H, W))]) + def prog(x): + if swap_input_order: + neg = mb.mul(x=x, y=-1.0) + else: + neg = mb.mul(x=-1.0, y=x) + relu1 = mb.relu(x=neg) + if swap_input_order: + mul = mb.mul(x=relu1, y=alpha) + else: + mul = mb.mul(x=alpha, y=relu1) + relu2 = mb.relu(x=x) + if swap_input_order: + out = mb.add(x=relu2, y=mul) + else: + out = mb.add(x=mul, y=relu2) + return out + + prev_prog, _, _ = apply_pass_and_basic_check( + prog, + "common::fuse_prelu", + ) + assert get_op_types_in_program(prev_prog) == ["mul", "relu", "mul", "relu", "add"] + assert get_op_types_in_program(prog) == ["prelu"] + + @pytest.mark.parametrize( + "swap_input_order, alpha_rank", + itertools.product( + [True, False], + [1, 2, 3], + ), + ) + def test_channel_last_transpose_pattern(self, swap_input_order, alpha_rank): + """ + Input: + + | ------------> relu --------------------| + | V + x (shappe=BCHW)-->transpose(out_shape=BHWC)---->| add -----> y (BHWC) + | ^ + --------> mul -------> relu -----> mul---| + ^ ^ + | | + Const(val=-1) Const(shape=(1,1,C)) + + Output: + x (BCHW) ------> prelu ---------> transpose ------> y (BHWC) + """ + B, C, H, W = 2, 3, 5, 6 + if alpha_rank == 1: + alpha = np.random.rand(C) + elif alpha_rank == 2: + alpha = np.random.rand(1, C) + elif alpha_rank == 3: + alpha = np.random.rand(1, 1, C) + else: + raise NotImplementedError("alpha rank must be 1 or 2 or 3") + + @mb.program(input_specs=[mb.TensorSpec(shape=(B, C, H, W))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + if swap_input_order: + neg = mb.mul(x=x, y=-1.0) + else: + neg = mb.mul(x=-1.0, y=x) + relu1 = mb.relu(x=neg) + if swap_input_order: + mul = mb.mul(x=relu1, y=alpha) + else: + mul = mb.mul(x=alpha, y=relu1) + relu2 = mb.relu(x=x) + if swap_input_order: + out = mb.add(x=relu2, y=mul) + else: + out = mb.add(x=mul, y=relu2) + return out + + prev_prog, _, block = apply_pass_and_basic_check( + prog, + "common::fuse_prelu", + ) + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "mul", + "relu", + "mul", + "relu", + "add", + ] + assert get_op_types_in_program(prog) == ["prelu", "transpose"] + assert_model_is_valid( + prog, + {"x": (B, C, H, W)}, + expected_output_shapes={block.outputs[0].name: (B, H, W, C)}, + ) + + +class TestPreluToLrelu: + def test_prelu_to_lrelu(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 2, 3, 1))]) + def prog(x): + # Not a common leakage factor. + alpha_0 = np.array([1.0, 2.0], dtype=np.float32) + x = mb.prelu(x=x, alpha=alpha_0) + + add_val = np.random.rand(4, 2, 3, 1).astype(np.float32) + x = mb.add(x=x, y=add_val) + + # Common leakage factor. + alpha_1 = np.array([1.5, 1.5], dtype=np.float32) + x = mb.prelu(x=x, alpha=alpha_1) + + return x + + assert_op_count_match(prog, expect=2, op="prelu") + assert_op_count_match(prog, expect=0, op="leaky_relu") + prev_prog, _, _ = apply_pass_and_basic_check(prog, "common::prelu_to_lrelu") + assert_same_output_names(prev_prog, prog) + # The prelu with a common leakage factor becomes leaky_relu. + assert_op_count_match(prog, expect=1, op="prelu") + assert_op_count_match(prog, expect=1, op="leaky_relu") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (4, 2, 3, 1)}) + + +class TestSkipConstexprOps: + @staticmethod + def _get_constexpr_cast(shape): + val = np.random.rand(*shape).astype(np.float16) + return mb.constexpr_cast(source_val=val, output_dtype="fp32") + + @staticmethod + def _get_constexpr_sparse_to_dense(shape): + val = np.random.rand(*shape) + sparse_params = quantization.WeightSparsifier.compress( + val=val, mode="PERCENTILE_MODE", target_percentile=0.4 + ) + return mb.constexpr_sparse_to_dense( + nonzero_data=sparse_params.nonzero_data, + mask=sparse_params.mask, + shape=np.uint32(sparse_params.shape), + ) + + @staticmethod + def _get_constexpr_lut_to_dense(shape): + val = np.random.rand(*shape) + lut_params = quantization.WeightPalettizer.compress(val=val, nbits=4, mode="UNIFORM") + return mb.constexpr_lut_to_dense( + indices=lut_params.indices, + lut=lut_params.lut, + shape=np.uint32(lut_params.shape), + ) + + @staticmethod + def _get_constexpr_affine_dequantize(shape): + val = np.random.rand(*shape) + quant_params = quantization.WeightAffineQuantizer.compress( + val=val, axis=0, mode="LINEAR_SYMMETRIC", dtype=types.uint8 + ) + return mb.constexpr_affine_dequantize( + quantized_data=quant_params.quantized_data, + zero_point=quant_params.zero_point, + scale=quant_params.scale, + axis=quant_params.axis, + ) + + # Static method cannot be stored as a function without attribute access. + CONSTEXPR_FUNCS = { + "constexpr_cast": _get_constexpr_cast.__func__, + "constexpr_sparse_to_dense": _get_constexpr_sparse_to_dense.__func__, + "constexpr_lut_to_dense": _get_constexpr_lut_to_dense.__func__, + "constexpr_affine_dequantize": _get_constexpr_affine_dequantize.__func__, + } + + CONSTEXPR_OPS = [ + "constexpr_cast", + "constexpr_sparse_to_dense", + "constexpr_lut_to_dense", + "constexpr_affine_dequantize", + ] + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op", + CONSTEXPR_OPS, + ) + def test_skip_const_elimination(constexpr_op): + """ + constexpr_op + | + v + const -> linear + | + v + input --------------> add -> output + + We are testing that: + 1. constexpr_op can serve as a const input weight for linear op + 2. linear op shoudn't be removed by the const_elimination pass + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(4,))]) + def prog(x): + a = np.random.rand( + 2, + ) + constexpr = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + linear = mb.linear(x=a, weight=constexpr) + return mb.add(x=x, y=linear) + + PASS_REGISTRY["common::const_elimination"](prog) + assert get_op_types_in_program(prog) == [constexpr_op, "linear", "add"] + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, weight_constexpr, bias_constexpr", + itertools.product( + CONSTEXPR_OPS, + [True, False], + [True, False], + ), + ) + def test_skip_fuse_matmul_weight_bias(constexpr_op, weight_constexpr, bias_constexpr): + """ + const_1 const_2 + | | + v v + input -----> matmul -----> add ---> out + + In this case, if either const_1 or const_2 is constexpr op, they should be not fused into a single linear op + """ + + def get_matmul(x, weight_constexpr): + weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((3, 2)) + if not weight_constexpr: + weight = weight.val + return mb.matmul(x=x, y=weight) + + def get_add(x, bias_constexpr): + bias = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((2,)) + if not bias_constexpr: + bias = bias.val + return mb.add(x=x, y=bias) + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3))]) + def prog(x): + x = get_matmul(x, weight_constexpr) + x = get_add(x, bias_constexpr) + return x + + apply_pass_and_basic_check(prog, "common::fuse_matmul_weight_bias") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + if not weight_constexpr and not bias_constexpr: + expected_ops = ["linear"] + else: + expected_ops = [] + if weight_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("matmul") + if bias_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("add") + + assert get_op_types_in_program(prog) == expected_ops + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, op, weight_constexpr, const_constexpr", + itertools.product( + CONSTEXPR_OPS, + ["mul", "add"], + [True, False], + [True, False], + ), + ) + def test_skip_fuse_conv(constexpr_op, op, weight_constexpr, const_constexpr): + + """ + const_1 const_2 + | | + v v + input -----> conv -----> mul/add ---> out + + This pattern shouldn't be fused into a single conv layer if one of const_1 or const_2 is a constexpr op. + """ + Cin, Cout = 3, 3 + input_shape = (2, Cin, 5, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + conv_weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout, Cin, 2, 2)) + if not weight_constexpr: + conv_weight = conv_weight.val + x = mb.conv(x=x, weight=conv_weight) + const = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout, 1, 1)) + if not const_constexpr: + const = const.val + return getattr(mb, op)(x=x, y=const) + + apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + expected_ops = [] + if not weight_constexpr and not const_constexpr: + expected_ops = ["conv"] + else: + if weight_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("conv") + if const_constexpr: + expected_ops.append(constexpr_op) + if op != "add" or const_constexpr: + expected_ops.append(op) + + assert get_op_types_in_program(prog) == expected_ops + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, weight_constexpr, bias_constexpr", + itertools.product( + CONSTEXPR_OPS, + [True, False], + [True, False], + ), + ) + def test_skip_fuse_linear_bias(constexpr_op, weight_constexpr, bias_constexpr): + """ + const_1 const_2 + | | + v V + input -----> linear -----> add ---> out + + This pattern shouldn't be fused into a single linear layer if one of const_1 or const_2 is a constexpr op. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2,))]) + def prog(x): + weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + if not weight_constexpr: + weight = weight.val + linear = mb.linear(x=x, weight=weight) + bias = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4,)) + if not bias_constexpr: + bias = bias.val + return mb.add(x=linear, y=bias) + + apply_pass_and_basic_check(prog, "common::fuse_linear_bias") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + expected_ops = [] + if not weight_constexpr and not bias_constexpr: + expected_ops = ["linear"] + else: + if weight_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("linear") + if bias_constexpr: + expected_ops.append(constexpr_op) + expected_ops.append("add") + + assert get_op_types_in_program(prog) == expected_ops + + @staticmethod + @pytest.mark.parametrize( + "constexpr_op, weight_constexpr, bias_constexpr", + itertools.product( + CONSTEXPR_OPS, + [True, False], + [True, False], + ), + ) + def test_skip_fuse_conv_batchnorm(constexpr_op, weight_constexpr, bias_constexpr): + """ + weight bias + | | + |_____ ____| + | | + v v + input -----> conv -----> batch_norm ---> out + + This pattern shouldn't be fused into a single conv layer if one of the weight / bias is a constexpr op. + """ + Cin, Cout = 2, 3 + input_shape = (2, Cin, 5, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + weight = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout, Cin, 2, 2)) + if not weight_constexpr: + weight = weight.val + bias = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((Cout,)) + if not bias_constexpr: + bias = bias.val + + x = mb.conv( + x=x, + weight=weight, + bias=bias, + ) + + # batch_norm layer + gamma = np.random.rand(Cout) + beta = np.random.rand(Cout) + mean = np.random.rand(Cout) + variance = np.random.rand(Cout) + epsilon = 1e-2 + return mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=gamma, + beta=beta, + epsilon=epsilon, + ) + + apply_pass_and_basic_check(prog, "common::fuse_conv_batchnorm") + apply_pass_and_basic_check(prog, "common::const_elimination") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + expected_ops = [] + if not weight_constexpr and not bias_constexpr: + expected_ops = ["conv"] + else: + expected_ops = [constexpr_op] * sum([weight_constexpr, bias_constexpr]) + [ + "conv", + "batch_norm", + ] + + assert get_op_types_in_program(prog) == expected_ops + + +class TestMergeConsecutivePaddings: + def test_success_reflect(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="reflect") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + @pytest.mark.parametrize("swap_axes", [False, True]) + def test_success_different_rank1(self, swap_axes): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + if swap_axes: + pad1 = mb.pad(x=x1, pad=[1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="reflect") + else: + pad1 = mb.pad(x=x1, pad=[1, 1, 0, 0], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1], mode="reflect") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_success_constant(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="constant", constant_val=3.0) + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant", constant_val=3.0) + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + pad_ops = [op for op in prog["main"].operations if op.op_type == "pad"] + assert pad_ops[0].inputs["constant_val"].val == 3.0 + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_success_3_layers(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="constant", constant_val=3.0) + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant", constant_val=3.0) + pad3 = mb.pad(x=pad2, pad=[1, 1, 0, 0], mode="constant", constant_val=3.0) + + return pad3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad", "pad"] + assert get_op_types_in_program(prog) == ["pad"] + + pad_ops = [op for op in prog["main"].operations if op.op_type == "pad"] + assert pad_ops[0].inputs["constant_val"].val == 3.0 + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 10, 10)}, + ) + + def test_failure_different_mode(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad", "pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_failure_different_constants(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode="constant", constant_val=1.0) + pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode="constant", constant_val=2.0) + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad", "pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)}, + ) + + def test_failure_repeat_on_same_axis(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + pad1 = mb.pad(x=x1, pad=[1, 1], mode="reflect") + pad2 = mb.pad(x=pad1, pad=[1, 1], mode="reflect") + + return pad2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_paddings") + assert get_op_types_in_program(prev_prog) == ["pad", "pad"] + assert get_op_types_in_program(prog) == ["pad", "pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + +class TestMergeConsecutiveTransposes: + def test_success_reduce_consecutive_transposes(self): + """ + Input: + |--> transpose_1 -> transpose_2 -> output_1 + x - + |--> transpose_3 -> tranpose_4 -> transpose_5 -> output_2 + + Output: + |--> transpose_6 -> output_1 + x - + |--> transpose_7 -> output_2 + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.transpose(x=x1, perm=[3, 2, 0, 1]) + x2 = mb.transpose(x=x, perm=[3, 2, 1, 0]) + x2 = mb.transpose(x=x2, perm=[2, 3, 0, 1]) + x2 = mb.transpose(x=x2, perm=[0, 2, 1, 3]) + + return x1, x2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_transposes") + assert get_op_types_in_program(prev_prog) == ["transpose"] * 5 + assert get_op_types_in_program(prog) == ["transpose"] * 2 + + inputs = {"x": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={ + block.outputs[0].name: (4, 2, 1, 3), + block.outputs[1].name: (2, 4, 1, 3), + }, + ) + + def test_success_reduce_consecutive_transposes_with_output_constrain(self): + """ + Input: + x --> transpose_1 -> transpose_2 -> transpose_3 -> transpose_4 -> transpose_5 -> add -> output_3 + | | + v v + output_1 output_2 + + Output: + x --> transpose_1 -> transpose_6 -> transpose_7-> add -> output_1 + | | + v v + output_2 output_3 + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[3, 2, 1, 0], name="output_1") + x2 = mb.transpose(x=x1, perm=[1, 3, 2, 0]) + x2 = mb.transpose(x=x2, perm=[2, 3, 0, 1], name="output_2") + x3 = mb.transpose(x=x2, perm=[0, 2, 1, 3]) + x3 = mb.transpose(x=x3, perm=[3, 2, 1, 0]) + x3 = mb.add(x=x3, y=1., name="output_3") + return x1, x2, x3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_transposes") + assert get_op_types_in_program(prev_prog) == ["transpose"] * 5 + ["add"] + assert get_op_types_in_program(prog) == ["transpose"] * 3 + ["add"] + + inputs = {"x": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={ + block.outputs[0].name: (4, 3, 2, 1), + block.outputs[1].name: (2, 4, 3, 1), + block.outputs[2].name: (1, 4, 3, 2), + }, + ) + + assert block.outputs[0].name == "output_1" + assert block.outputs[1].name == "output_2" + assert block.outputs[2].name == "output_3" + + def test_not_merge_transposes(self): + """ + Input: + x --> transpose_1 -> add -> transpose_2 -> output + + Output: + x --> transpose_1 -> add -> transpose_2 -> output + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[3, 2, 1, 0]) + x = mb.add(x=x, y=1.) + x = mb.transpose(x=x, perm=[1, 3, 2, 0]) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_transposes") + assert get_op_types_in_program(prev_prog) == ["transpose", "add", "transpose"] + assert get_op_types_in_program(prog) == ["transpose", "add", "transpose"] + + inputs = {"x": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (3, 1, 2, 4),}, + ) + +class TestExpandHighRankReshapeAndTranspose: + @staticmethod + def _test_numerical(prog, input_shape, reshape_shape, perm, output_shape): + x = np.random.rand(*input_shape) + coreml_input = {"x": x} + mlmodel = ct.convert(prog, source="milinternal") + coreml_output = list(mlmodel.predict(coreml_input).values())[0] + + gt = np.reshape(x, reshape_shape) + gt = np.transpose(gt, perm) + gt = np.reshape(gt, output_shape) + np.testing.assert_allclose(gt, coreml_output, rtol=1e-03, atol=1e-05) + + def test_rank6(self): + input_shape = (1, 2, 3, 4, 5) + reshape_shape = (1, 2, 3, 2, 2, 5) + perm = (4, 5, 3, 2, 0, 1) + output_shape = (5, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = mb.reshape(x=x, shape=reshape_shape) + x = mb.transpose(x=x, perm=perm) + x = mb.reshape(x=x, shape=output_shape) + return x + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + prog._check_invalid_tensor_rank() + assert get_op_types_in_program(prog) == ["reshape", "transpose", "reshape"] + TestExpandHighRankReshapeAndTranspose._test_numerical(prev_prog, input_shape, reshape_shape, perm, output_shape) + + def test_rank10(self): + input_shape = (2, 3, 4, 5, 6) + reshape_shape = (1, 2, 1, 3, 2, 2, 1, 5, 2, 3) + perm = (0, 1, 2, 3, 4, 9, 5, 6, 7, 8) + output_shape = (30, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = mb.reshape(x=x, shape=reshape_shape) + x = mb.transpose(x=x, perm=perm) + x = mb.reshape(x=x, shape=output_shape) + return x + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + prog._check_invalid_tensor_rank() + assert get_op_types_in_program(prog) == ["reshape", "transpose", "reshape"] + TestExpandHighRankReshapeAndTranspose._test_numerical(prev_prog, input_shape, reshape_shape, perm, output_shape) + + def test_rank20(self): + input_shape = (4, 6, 8, 20, 40) + reshape_shape = (1, 2, 2, 1, 2, 3, 2, 2, 2, 2, 2, 1, 1, 1, 5, 2, 2, 2, 1, 5) + perm = (19, 14, 13, 12, 0, 3, 1, 2, 10, 5, 4, 6, 15, 11, 17, 18, 7, 8, 9, 16) + output_shape = (24, 160, 40) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = mb.reshape(x=x, shape=reshape_shape) + x = mb.transpose(x=x, perm=perm) + x = mb.reshape(x=x, shape=output_shape) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + prog._check_invalid_tensor_rank() + assert get_op_types_in_program(prog) == ["reshape", "transpose"] * 16 + ["reshape"] + TestExpandHighRankReshapeAndTranspose._test_numerical(prev_prog, input_shape, reshape_shape, perm, output_shape) + + def test_negative_case(self): + input_shape = (4, 6, 8, 20, 40) + reshape_shape = (1, 2, 2, 1, 2, 3, 2, 2, 2, 2, 2, 1, 1, 1, 5, 2, 2, 2, 1, 5) + perm = (19, 14, 13, 12, 0, 3, 1, 2, 10, 5, 4, 6, 15, 11, 17, 18, 7, 8, 9, 16) + output_shape = (24, 160, 40) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x1 = mb.reshape(x=x, shape=reshape_shape) + x2 = mb.transpose(x=x1, perm=perm) + x3 = mb.reshape(x=x2, shape=output_shape) + return x, x1 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::expand_high_rank_reshape_and_transpose") + + with pytest.raises(ValueError, match="Core ML only supports tensors with rank <= 5"): + prog._check_invalid_tensor_rank() + + +class TestMergeConsecutiveRelus: + @pytest.mark.parametrize( + "relu_num", + [2, 3, 4], + ) + def test_success_reduce_consecutive_relus(self, relu_num): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + for _ in range(relu_num): + x = mb.relu(x=x) + x = mb.add(x=x, y=1.0) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_relus") + assert get_op_types_in_program(prev_prog) == ["relu"] * relu_num + ["add"] + assert get_op_types_in_program(prog) == ["relu", "add"] + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + @pytest.mark.parametrize( + "relu_num", + [2, 3, 4], + ) + def test_keep_not_consecutive_relus(self, relu_num): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + for _ in range(relu_num): + x = mb.relu(x=x) + x = mb.add(x=x, y=1.0) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_relus") + assert get_op_types_in_program(prev_prog) == ["relu", "add"] * relu_num + assert get_op_types_in_program(prog) == get_op_types_in_program(prev_prog) + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + def test_mix_situation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog(x): + relu1 = mb.relu(x=x) + relu_after_add = mb.add(x=relu1, y=1.0) + relu2 = mb.relu(x=relu_after_add) + relu3 = mb.relu(x=relu2) + return relu3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_relus") + assert get_op_types_in_program(prev_prog) == ["relu", "add", "relu", "relu"] + assert get_op_types_in_program(prog) == ["relu", "add", "relu"] + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 3)}, + ) + + def test_name_change_depend_on_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog_output_transpose_2(x): + transpose_1 = mb.relu(x=x, name="transpose_1") + transpose_2 = mb.relu(x=transpose_1, name="transpose_2") + transpose_3 = mb.transpose(x=transpose_2, perm=[0, 2, 1], name="transpose_3") + return transpose_2, transpose_3 + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3))]) + def prog_output_transpose_3(x): + transpose_1 = mb.relu(x=x, name="transpose_1") + transpose_2 = mb.relu(x=transpose_1, name="transpose_2") + transpose_3 = mb.transpose(x=transpose_2, perm=[0, 2, 1], name="transpose_3") + return transpose_3 + + prev_prog_output_transpose_2, _, block = apply_pass_and_basic_check( + prog_output_transpose_2, "common::merge_consecutive_relus" + ) + assert get_op_types_in_program(prev_prog_output_transpose_2) == [ + "relu", + "relu", + "transpose", + ] + assert get_op_types_in_program(prog_output_transpose_2) == ["relu", "transpose"] + assert prog_output_transpose_2["main"].operations[0].name == "transpose_1" + # As the block's output has transpose_2, the original output name of the first operation + # is replaced. + assert prog_output_transpose_2["main"].operations[0].outputs[0].name == "transpose_2" + + prev_prog_output_transpose_3, _, block = apply_pass_and_basic_check( + prog_output_transpose_3, "common::merge_consecutive_relus" + ) + assert get_op_types_in_program(prev_prog_output_transpose_3) == [ + "relu", + "relu", + "transpose", + ] + assert get_op_types_in_program(prog_output_transpose_3) == ["relu", "transpose"] + assert prog_output_transpose_3["main"].operations[0].name == "transpose_1" + # As the block's output only has transpose_3, the entire transpose_2 gets removed. + assert prog_output_transpose_3["main"].operations[0].outputs[0].name == "transpose_1" + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog_output_transpose_2, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 3, 2)}, + ) + + inputs = {"x": (1, 2, 3)} + assert_model_is_valid( + prog_output_transpose_3, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 3, 2)}, + ) + + +class TestMergeConsecutiveReshapes: + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_merge_consecutive_2reshapes(self, backend): + INPUT_SHAPE = (2, 3) + OUTPUT_SHAPE = (3, 2) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(-1,)) + y2 = mb.reshape(x=y1, shape=OUTPUT_SHAPE) + return y2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape"] * 2 + assert get_op_types_in_program(prog) == ["reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_merge_consecutive_4reshapes(self, backend): + INPUT_SHAPE = (2, 3, 5) + OUTPUT_SHAPE = (10, 3) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(15, 2)) + y2 = mb.reshape(x=y1, shape=(2, 5, 3)) + y3 = mb.reshape(x=y2, shape=(6, 5)) + y4 = mb.reshape(x=y3, shape=OUTPUT_SHAPE) + return y4 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape"] * 4 + assert get_op_types_in_program(prog) == ["reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_keep_separate_reshapes(self, backend): + INPUT_SHAPE = (3, 5, 7) + OUTPUT_SHAPE = (7, 3, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(21, 5)) + + # Note [elementwise op and reshape] + # In principle, elementwise ops can be swapped with the reshapes, e.g. + # in -> reshape1 -> elementwise1 -> reshape2 -> elementwise2 -> reshape3 -> out + # is equivalent to + # in -> elementwise1 -> elementwise2 -> reshape1 -> reshape2 -> reshape3 -> out + # which can then be optimized to + # in -> elementwise1 -> elementwise2 -> reshape3 -> out + # + # so here we divide the reshape sequence with something non-elementwise + bias = np.random.rand(5) * 2.0 - 1.0 + y2 = mb.add(x=y1, y=bias) + + y3 = mb.reshape(x=y2, shape=OUTPUT_SHAPE) + return y3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "add", "reshape"] + assert get_op_types_in_program(prog) == ["reshape", "add", "reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_merge_2consecutive_keep_1separate(self, backend): + INPUT_SHAPE = (5, 7, 11) + OUTPUT_SHAPE = (11, 5, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=(INPUT_SHAPE))]) + def prog(x): + # these 2 reshapes will be merged + y1 = mb.reshape(x=x, shape=(35, 11)) + y2 = mb.reshape(x=y1, shape=(55, 7)) + + # see Note [elementwise op and reshape] + bias = np.random.rand(7) * 2.0 - 1.0 + y3 = mb.sub(x=y2, y=bias) + + # this reshape is seperated, so it will be kept + y4 = mb.reshape(x=y3, shape=OUTPUT_SHAPE) + return y4 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "sub", "reshape"] + assert get_op_types_in_program(prog) == ["reshape", "sub", "reshape"] + + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_keep_block_outputs(self, backend): + INPUT_SHAPE = (5, 6) + OUTPUT0_SHAPE = (15, 2) + OUTPUT1_SHAPE = (3, 10) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=OUTPUT0_SHAPE) + y2 = mb.reshape(x=y1, shape=OUTPUT1_SHAPE) + return y1, y2 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape"] + assert get_op_types_in_program(prog) == ["reshape", "reshape"] + + assert len(block.outputs) == 2 + expected_output_shapes = { + block.outputs[0].name: OUTPUT0_SHAPE, + block.outputs[1].name: OUTPUT1_SHAPE, + } + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes=expected_output_shapes, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend", + backends, + ) + def test_keep_nonreshape_child(self, backend): + INPUT_SHAPE = (6, 7) + OUTPUT_SHAPE = (14, 3) + + @mb.program(input_specs=[mb.TensorSpec(shape=INPUT_SHAPE)]) + def prog(x): + y1 = mb.reshape(x=x, shape=(21, 2)) + y2 = mb.reshape(x=y1, shape=OUTPUT_SHAPE) + # the 1st reshape creating y1 has a non-reshape child op (matmul), + # so it will not be merged + y3 = mb.matmul(x=y1, y=np.random.rand(2, 5)) + return y2, y3 + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::merge_consecutive_reshapes") + assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "matmul"] + assert get_op_types_in_program(prog) == ["reshape", "reshape", "matmul"] + + assert len(block.outputs) == 2 + assert_model_is_valid( + prog, + {"x": INPUT_SHAPE}, + expected_output_shapes={block.outputs[0].name: OUTPUT_SHAPE}, + backend=backend, + ) + + +class TestCastOptimization: + """Test the cast optimization pass.""" + + """ + Input graph: + input -----> cast(dtype="fp32") -----> square -----> cast(dtype="fp32") ---> out + + Output graph: + input -----> square -----> out + """ + + def test_remove_redundant_casts(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp32") + x = mb.square(x=x) + x = mb.cast(x=x, dtype="fp32") + return x + + assert get_op_types_in_program(prog) == ["cast", "square", "cast"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["square"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input -----> cast(dtype="fp16") -----> cast(dtype="fp32") ----> square ---> out + + Output graph: + input -----> square -----> out + """ + + def test_linear_consecutive_cast_ops_cancellation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="fp32") + x = mb.square(x=x) + return x + + assert get_op_types_in_program(prog) == ["cast", "cast", "square"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["square"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input---->cast(dtype="int32")---->cast(dtype="fp16")--->square--->out + + Output graph: + input----->cast(dtype="fp16")----->square--->out + """ + + def test_linear_consecutive_cast_ops_fusion(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x = mb.cast(x=x, dtype="fp16") + x = mb.square(x=x) + return x + + assert get_op_types_in_program(prog) == ["cast", "cast", "square"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input-->cast(dtype="fp16")-->cast(dtype="fp16")-->cast(dtype="int32")-->cast(dtype="int64")-->cast(dtype="fp32")-->cast(dtype="fp16")-->square->out + + Output graph: + input---->cast(dtype="fp16")----->square--->out + """ + + def test_linear_multiple_consecutive_cast_ops(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="int32") + x = mb.cast(x=x, dtype="int64") + x = mb.cast(x=x, dtype="fp32") + x = mb.cast(x=x, dtype="fp16") + x = mb.square(x=x) + return x + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "cast", + "cast", + "square", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + |---->cast(dtype="fp32")---->square--->out_1 + | + input---->cast(dtype="fp16")---->cast(dtype="fp32")---->relu--->out_2 + | + |---->cast(dtype="fp32")---->log--->out_3 + + Output graph: + + |---->square--->out_1 + | + input---->relu--->out_2 + | + |---->log--->out_3 + """ + + def test_same_consecutive_cancelling_casts_on_all_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="fp16") + x1 = mb.cast(x=x, dtype="fp32") + x2 = mb.cast(x=x, dtype="fp32") + x3 = mb.cast(x=x, dtype="fp32") + x4 = mb.square(x=x1) + x5 = mb.relu(x=x2) + x6 = mb.log(x=x3) + return x4, x5, x6 + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "square", + "relu", + "log", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["square", "relu", "log"] + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + |---->cast(dtype="fp16")---->square--->out_1 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_2 + | + |---->cast(dtype="fp16")---->log--->out_3 + + Output graph: + + |---->square--->out_1 + | + input---->cast(dtype="fp16")---->relu--->out_2 + | + |---->log--->out_3 + """ + + def test_consecutive_fusable_casts_on_all_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x1 = mb.cast(x=x, dtype="fp16") + x2 = mb.cast(x=x, dtype="fp16") + x3 = mb.cast(x=x, dtype="fp16") + x4 = mb.square(x=x1) + x5 = mb.relu(x=x2) + x6 = mb.log(x=x3) + return x4, x5, x6 + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "square", + "relu", + "log", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square", "relu", "log"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + + |---->cast(dtype="fp32")---->square--->out_1 + | + |---->cast(dtype="fp16")---->square--->out_2 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_3 + | + |---->cast(dtype="fp16")---->log--->out_4 + | + |---->cast(dtype="fp32")---->log--->out_5 + + Output graph: + + |---->square--->out_1 + | + | |---->square--->out_2 + | | + input---->cast(dtype="fp16")---->relu--->out_3 + | | + | |---->log--->out_4 + | + | + |---->log--->out_5 + + """ + + def test_mixed_consecutive_casts_on_different_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x1 = mb.cast(x=x, dtype="fp32") + x2 = mb.cast(x=x, dtype="fp16") + x3 = mb.cast(x=x, dtype="fp16") + x4 = mb.cast(x=x, dtype="fp16") + x5 = mb.cast(x=x, dtype="fp32") + x6 = mb.square(x=x1) + x7 = mb.square(x=x2) + x8 = mb.relu(x=x3) + x9 = mb.log(x=x4) + x10 = mb.log(x=x5) + return x6, x7, x8, x9, x10 + + assert get_op_types_in_program(prog) == [ + "cast", + "cast", + "cast", + "cast", + "cast", + "cast", + "square", + "square", + "relu", + "log", + "log", + ] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "square", "square", "relu", "log", "log"] + assert block.find_ops(op_type="cast")[0].dtype.val == "fp16" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + + |---->cast(dtype="fp32")---->square--->out_1 + | + input---->cast(dtype="int32")---->cast(dtype="fp16")---->relu--->out_2 + | + |---->log--->out_3 + + + Output graph: + + |---->square--->out_1 + | + | + | + input---->cast(dtype="fp16")---->relu--->out_2 + | + | + | + | + |---->cast(dtype="int32")---->abs--->out_3 + + """ + + def test_different_consecutive_casts__config_on_different_branches(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.cast(x=x, dtype="int32") + x1 = mb.cast(x=x, dtype="fp32") + x2 = mb.cast(x=x, dtype="fp16") + x3 = mb.square(x=x1) + x4 = mb.relu(x=x2) + x5 = mb.abs(x=x) + return x3, x4, x5 + + assert get_op_types_in_program(prog) == ["cast", "cast", "cast", "square", "relu", "abs"] + + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + assert get_op_types_in_program(prog) == ["cast", "cast", "square", "relu", "abs"] + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + assert cast_1.dtype.val == "int32" + assert len(cast_1.outputs) == 1 + assert len(cast_1.outputs[0].child_ops) == 1 + assert cast_1.outputs[0].child_ops[0].op_type == "abs" + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + assert cast_2.dtype.val == "fp16" + assert len(cast_2.outputs) == 1 + assert len(cast_2.outputs[0].child_ops) == 1 + assert cast_2.outputs[0].child_ops[0].op_type == "relu" + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + block.outputs[2].name: (10, 20), + }, + ) + + """ + Input graph: + input(dtype="fp16")---->relu----->relu + | + --------| + | + V + cast(dtype="fp32")---->cast(dtype="fp16") + | + ----------------------| + | + V + cast(dtype="fp32")---->cast(dtype="fp16")---->output(dtype="fp16") + + Output graph: + input(dtype="fp16")---->relu----->relu---->output(dtype="fp16") + """ + + def test_two_casts_at_the_end(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20), dtype=types.fp16)]) + def prog(x): + x = mb.relu(x=x) + x = mb.relu(x=x) + x = mb.cast(x=x, dtype="fp32") + x = mb.cast(x=x, dtype="fp16") + x = mb.cast(x=x, dtype="fp32") + x = mb.cast(x=x, dtype="fp16", name="original_output_name") + return x + + assert get_op_types_in_program(prog) == ["relu", "relu", "cast", "cast", "cast", "cast"] + apply_pass_and_basic_check(prog, "common::cast_optimization") + _, prev_block, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + assert get_op_types_in_program(prog) == ["relu", "relu"] + assert prev_block.outputs[0].name == "original_output_name" + assert block.outputs[0].name == "original_output_name" + assert block.outputs[0].dtype == types.fp16 + + +class TestConv1dCompositionPasses: + @pytest.mark.parametrize( + "backend, has_strides, pad_type, has_pad, has_dilations, has_bias", + itertools.product( + backends, + (True, False), + ("valid", "custom", "same"), + (True, False), + (True, False), + (True, False), + ), + ) + def test_conv1d_composition( + self, backend, has_strides, pad_type, has_pad, has_dilations, has_bias + ): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 8 + C_in, C_out = 3, 4 + K = 3 + + conv_kwargs = {"weight": np.random.rand(C_out, C_in, 1, K), "pad_type": pad_type} + if has_strides: + conv_kwargs["strides"] = (2, 2) + if has_pad: + conv_kwargs["pad"] = (1, 1, 1, 1) + if has_dilations: + conv_kwargs["dilations"] = (2, 2) + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, L))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(2,)) + y_conv = mb.conv(x=y_expand, **conv_kwargs) + y_squeeze = mb.squeeze(x=y_conv, axes=(2,)) + return y_squeeze + + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["conv"] + + # infer output shape + strides = conv_kwargs["strides"] if has_strides else (1, 1) + pad = conv_kwargs["pad"] if has_pad else (0, 0, 0, 0) + dilations = conv_kwargs["dilations"] if has_dilations else (1, 1) + L_out = None + if pad_type == "valid": + L_out = (L - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "custom": + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "same": + L_out = np.ceil(L / strides[-1]) + else: + raise Exception("unsupported pad type") + output_shape = (N, C_out, L_out) + + assert_model_is_valid( + prog, + {"x": (N, C_in, L)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv1d_composotion_dynamic_weight(self, backend): + """ + Input graph: + input -> expand_dims -> conv2d -> squeeze -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 9 + C_in, C_out = 4, 3 + K = 4 + + strides = (1, 2) + pad = (0, 0, 1, 1) + # MIL convolution with dynamic weights does not support dilations != 1 + # see coremltools/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py + dilations = (1, 1) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(N, C_in, L)), + mb.TensorSpec(shape=(C_out, C_in, 1, K)), + ] + ) + def prog(x, weight): + y_expand = mb.expand_dims(x=x, axes=(-2,)) + y_conv = mb.conv(x=y_expand, weight=weight, **conv_kwargs) + y_squeeze = mb.squeeze(x=y_conv, axes=(-2,)) + return y_squeeze + + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + output_shape = (N, C_out, L_out) + assert_model_is_valid( + prog, + {"x": (N, C_in, L), "weight": (C_out, C_in, 1, K)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend, has_bias, bias_op_type", + itertools.product( + backends, + (True, False), + ("add", "sub"), + ), + ) + def test_conv1d_bias_fusion(self, backend, has_bias, bias_op_type): + """ + After recomposing the shattered conv1d, conv1d optimization passes should work + + Input graph: + input -> expand_dims -> conv2d -> squeeze -> add/sub a constant -> out + + Output graph: + input -> conv1d -> out + """ + N, L = 2, 8 + C_in, C_out = 3, 5 + K = 3 + + strides = (1, 2) + pad = (0, 0, 0, 1) + dilations = (1, 2) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "weight": np.random.rand(C_out, C_in, 1, K), + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + bias2 = np.random.rand(C_out, 1) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, C_in, L))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(-2,)) + y_conv = mb.conv(x=y_expand, **conv_kwargs) + y_squeeze = mb.squeeze(x=y_conv, axes=(-2,)) + y_bias2 = ( + mb.add(x=y_squeeze, y=bias2) + if bias_op_type == "add" + else mb.sub(x=y_squeeze, y=bias2) + ) + return y_bias2 + + assert get_op_types_in_program(prog) == ["expand_dims", "conv", "squeeze", bias_op_type] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["squeeze", "conv", bias_op_type] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prog) == ["squeeze", "conv"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["conv"] + + output_shape = (N, C_out, L_out) + assert_model_is_valid( + prog, + {"x": (N, C_in, L)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestConv1dChannellastCompositionPasses: + @pytest.mark.parametrize( + "backend, has_strides, pad_type, has_pad, has_dilations, has_bias", + itertools.product( + backends, + (True, False), + ("valid", "custom", "same"), + (True, False), + (True, False), + (True, False), + ), + ) + def test_conv1d_channellast_composition( + self, backend, has_strides, pad_type, has_pad, has_dilations, has_bias + ): + """ + Input graph: + input -> expand_dims -> transpose -> conv2d -> transpose -> squeeze -> out + + Output graph: + input -> transpose -> conv1d -> transpose -> out + """ + N, L = 2, 8 + C_in, C_out = 5, 3 + K = 3 + + conv_kwargs = { + "weight": np.random.rand(C_out, C_in, 1, K), + "pad_type": pad_type, + } + if has_strides: + conv_kwargs["strides"] = (2, 2) + if has_pad: + conv_kwargs["pad"] = (1, 1, 1, 1) + if has_dilations: + conv_kwargs["dilations"] = (2, 2) + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, L, C_in))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(1,)) + y_transpose1 = mb.transpose(x=y_expand, perm=(0, 3, 1, 2)) + y_conv = mb.conv(x=y_transpose1, **conv_kwargs) + y_transpose2 = mb.transpose(x=y_conv, perm=(0, 2, 3, 1)) + y_squeeze = mb.squeeze(x=y_transpose2, axes=(1,)) + return y_squeeze + + assert get_op_types_in_program(prog) == [ + "expand_dims", + "transpose", + "conv", + "transpose", + "squeeze", + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["transpose", "conv", "transpose"] + + # infer output shape + strides = conv_kwargs["strides"] if has_strides else (1, 1) + pad = conv_kwargs["pad"] if has_pad else (0, 0, 0, 0) + dilations = conv_kwargs["dilations"] if has_dilations else (1, 1) + L_out = None + if pad_type == "valid": + L_out = (L - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "custom": + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + elif pad_type == "same": + L_out = np.ceil(L / strides[-1]) + else: + raise Exception("unsupported pad type") + output_shape = (N, L_out, C_out) + + assert_model_is_valid( + prog, + {"x": (N, L, C_in)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize("backend", backends) + def test_conv1d_channellast_composotion_dynamic_weight(self, backend): + """ + Input graph: + input -> expand_dims -> transpose -> conv2d -> transpose -> squeeze -> out + + Output graph: + input -> transpose -> conv1d -> transpose -> out + """ + N, L = 2, 9 + C_in, C_out = 4, 5 + K = 4 + + strides = (1, 2) + pad = (1, 0, 0, 1) + # MIL convolution with dynamic weights does not support dilations != 1 + # see coremltools/coremltools/converters/mil/mil/ops/defs/iOS15/conv.py + dilations = (1, 1) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(N, L, C_in)), + mb.TensorSpec(shape=(C_out, C_in, 1, K)), + ] + ) + def prog(x, weight): + y_expand = mb.expand_dims(x=x, axes=(1,)) + y_transpose1 = mb.transpose(x=y_expand, perm=(0, 3, 1, 2)) + y_conv = mb.conv(x=y_transpose1, weight=weight, **conv_kwargs) + y_transpose2 = mb.transpose(x=y_conv, perm=(0, 2, 3, 1)) + y_squeeze = mb.squeeze(x=y_transpose2, axes=(1,)) + return y_squeeze + + assert get_op_types_in_program(prog) == [ + "expand_dims", + "transpose", + "conv", + "transpose", + "squeeze", + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + output_shape = (N, L_out, C_out) + assert_model_is_valid( + prog, + {"x": (N, L, C_in), "weight": (C_out, C_in, 1, K)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "backend, has_bias, bias_op_type", + itertools.product( + backends, + (True, False), + ("add", "sub"), + ), + ) + def test_conv1d_channellast_bias_fusion(self, backend, has_bias, bias_op_type): + """ + After recomposing the shattered conv1d, conv1d optimization passes should work + + Input graph: + input -> expand_dims -> transpose -> conv2d -> transpose -> squeeze -> add/sub a constant -> out + + Output graph: + input -> transpose -> conv1d -> transpose -> out + """ + N, L = 2, 8 + C_in, C_out = 5, 4 + K = 4 + + strides = (1, 2) + pad = (0, 1, 1, 0) + dilations = (1, 2) + + # infer L_out with pad_type fixed to custom + L_out = (L + pad[-2] + pad[-1] - dilations[-1] * (K - 1) - 1) // strides[-1] + 1 + + conv_kwargs = { + "weight": np.random.rand(C_out, C_in, 1, K), + "strides": strides, + "pad_type": "custom", + "pad": pad, + "dilations": dilations, + } + if has_bias: + conv_kwargs["bias"] = np.random.rand(C_out) + + bias2 = np.random.rand(C_out) + + @mb.program(input_specs=[mb.TensorSpec(shape=(N, L, C_in))]) + def prog(x): + y_expand = mb.expand_dims(x=x, axes=(-3,)) + y_transpose1 = mb.transpose(x=y_expand, perm=(0, 3, 1, 2)) + y_conv = mb.conv(x=y_transpose1, **conv_kwargs) + y_transpose2 = mb.transpose(x=y_conv, perm=(0, 2, 3, 1)) + y_squeeze = mb.squeeze(x=y_transpose2, axes=(-3,)) + y_bias2 = ( + mb.add(x=y_squeeze, y=bias2) + if bias_op_type == "add" + else mb.sub(x=y_squeeze, y=bias2) + ) + return y_bias2 + + assert get_op_types_in_program(prog) == [ + "expand_dims", + "transpose", + "conv", + "transpose", + "squeeze", + bias_op_type, + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::compose_conv1d") + assert get_op_types_in_program(prog) == [ + "transpose", + "squeeze", + "conv", + "transpose", + bias_op_type, + ] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prog) == ["transpose", "squeeze", "conv", "transpose"] + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::const_elimination") + assert get_op_types_in_program(prog) == ["transpose", "conv", "transpose"] + + output_shape = (N, L_out, C_out) + assert_model_is_valid( + prog, + {"x": (N, L, C_in)}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestConvBatchNormFusion: + @staticmethod + def _apply_weight_transform(inputs, is_deconv, dtype=np.float32): + """ + Utility funtion to test the weight transform function in conv batch_norm fusion pass. + """ + Cin, _, groups = 10, 20, 10 + input_shape = (1, Cin, 2, 2) + + @mb.program( + input_specs=[mb.TensorSpec(shape=input_shape, dtype=numpy_type_to_builtin_type(dtype))] + ) + def prog(x): + + if is_deconv: + x = mb.conv_transpose( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + else: + x = mb.conv( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + + x = mb.batch_norm( + x=x, + mean=inputs["mean"], + variance=inputs["variance"], + gamma=inputs["gamma"], + beta=inputs["beta"], + epsilon=inputs["epsilon"], + ) + return x + + apply_pass_and_basic_check(prog, "common::fuse_conv_batchnorm") + + # get the updated weight from the prog + conv_op = [] + for op in prog["main"].operations: + if op.op_type == "const": + continue + conv_op.append(op) + assert len(conv_op) == 1, "should only have one conv / conv_transpose layer." + + return conv_op[0].weight.val, conv_op[0].bias.val + + @pytest.mark.parametrize( + "conv_type", + ["conv", "conv_transpose"], + ) + def test_weight_transform_conv_identity(self, conv_type): + """ + Test the weight transform function with an identity batchnorm layer. + """ + # parameters for conv + is_deconv = conv_type == "conv_transpose" + conv_weight = np.arange(20).astype(np.float32) + conv_weight = ( + np.reshape(conv_weight, (10, 2, 1, 1)) + if is_deconv + else np.reshape(conv_weight, (20, 1, 1, 1)) + ) + conv_bias = np.arange(20).astype(np.float32) + + # parameters for batch_norm + gamma = np.ones(20).astype(np.float32) + beta = np.zeros(20).astype(np.float32) + mean = np.zeros(20).astype(np.float32) + variance = np.ones(20).astype(np.float32) + epsilon = 0.0 + + inputs = { + "conv_weight": conv_weight, + "conv_bias": conv_bias, + "gamma": gamma, + "beta": beta, + "mean": mean, + "variance": variance, + "epsilon": epsilon, + } + + new_conv_weight, new_conv_bias = self._apply_weight_transform(inputs, is_deconv) + + np.testing.assert_equal(new_conv_weight, conv_weight) + np.testing.assert_equal(new_conv_bias, conv_bias) + + @pytest.mark.parametrize( + "conv_type, dtype", + itertools.product( + ["conv", "conv_transpose"], + [np.float16, np.float32], + ), + ) + def test_weight_transform_conv_type(self, conv_type, dtype): + """ + The weight transform function should return an updated conv weight with correct data type + """ + # parameters for conv + is_deconv = conv_type == "conv_transpose" + conv_weight = np.arange(20).astype(dtype) + conv_weight = ( + np.reshape(conv_weight, (10, 2, 1, 1)) + if is_deconv + else np.reshape(conv_weight, (20, 1, 1, 1)) + ) + conv_bias = np.arange(20).astype(dtype) + + # parameters for batch_norm + gamma = np.ones(20).astype(dtype) + beta = np.zeros(20).astype(dtype) + mean = np.zeros(20).astype(dtype) + variance = np.ones(20).astype(dtype) + epsilon = dtype(0.1) + + inputs = { + "conv_weight": conv_weight, + "conv_bias": conv_bias, + "gamma": gamma, + "beta": beta, + "mean": mean, + "variance": variance, + "epsilon": epsilon, + } + + new_conv_weight, _ = self._apply_weight_transform(inputs, is_deconv, dtype) + + assert ( + new_conv_weight.dtype == dtype + ), "the weight transform function should retain the weight's original dtype." + + @pytest.mark.parametrize( + "rank, groups, has_bias, backend", + itertools.product([3, 4, 5], [1, 2, 10], [False, True], backends), + ) + def test_conv(self, rank, groups, has_bias, backend): + """ + Input graph: + input -----> conv -----> batch_norm ---> out + + Output graph: + input -----> conv ----> out + + Different `rank` represents different conv dimensions: rank=3 for Conv1d, rank=4 for Conv2d, rank=5 for Conv3d. + """ + Cin, Cout = 10, 30 + rank_to_input_shape = {3: (2, Cin, 20), 4: (2, Cin, 20, 24), 5: (2, Cin, 20, 24, 24)} + rank_to_conv_weight_shape = { + 3: (Cout, Cin // groups, 2), + 4: (Cout, Cin // groups, 2, 3), + 5: (Cout, Cin // groups, 2, 3, 3), + } + rank_to_output_shape = {3: (2, Cout, 19), 4: (2, Cout, 19, 22), 5: (2, Cout, 19, 22, 22)} + + input_shape = rank_to_input_shape[rank] + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = np.random.rand(*rank_to_conv_weight_shape[rank]) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + + # batch_norm layer + gamma = np.random.rand(Cout) + beta = np.random.rand(Cout) + mean = np.random.rand(Cout) + variance = np.random.rand(Cout) + epsilon = 1e-2 + x = mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=gamma, + beta=beta, + epsilon=epsilon, + ) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_conv_batchnorm" + ) + + assert get_op_types_in_program(prev_prog) == ["conv", "batch_norm"] + assert get_op_types_in_program(prog) == ["conv"] + + # validate graph pass + output_shape = rank_to_output_shape[rank] + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "rank, groups, has_bias, backend", + itertools.product([3, 4, 5], [1, 2, 10], [False, True], backends), + ) + def test_conv_transpose(self, rank, groups, has_bias, backend): + """ + Input graph: + input -----> conv_transpose -----> batch_norm ---> out + + Output graph: + input -----> conv_transpose ----> out + """ + Cin, Cout = 10, 30 + rank_to_input_shape = {3: (2, Cin, 20), 4: (2, Cin, 20, 24), 5: (2, Cin, 20, 24, 24)} + rank_to_conv_weight_shape = { + 3: (Cin, Cout // groups, 2), + 4: (Cin, Cout // groups, 2, 3), + 5: (Cin, Cout // groups, 2, 3, 3), + } + rank_to_output_shape = {3: (2, Cout, 21), 4: (2, Cout, 21, 26), 5: (2, Cout, 21, 26, 26)} + + input_shape = rank_to_input_shape[rank] + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = np.random.rand(*rank_to_conv_weight_shape[rank]) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv_transpose( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + + # batch_norm layer + gamma = np.random.rand(Cout) + beta = np.random.rand(Cout) + mean = np.random.rand(Cout) + variance = np.random.rand(Cout) + + epsilon = 1e-5 + x = mb.batch_norm( + x=x, + mean=mean, + variance=variance, + gamma=gamma, + beta=beta, + epsilon=epsilon, + ) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_conv_batchnorm" + ) + + assert get_op_types_in_program(prev_prog) == ["conv_transpose", "batch_norm"] + assert get_op_types_in_program(prog) == ["conv_transpose"] + + # validate graph pass + output_shape = rank_to_output_shape[rank] + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestConvBiasFusion: + @staticmethod + def get_conv(x, name, Cin=3, Cout=3): + conv_weight = np.random.rand(Cout, Cin, 2, 2) + x = mb.conv(x=x, weight=conv_weight, name=name) + return x + + @staticmethod + def get_linear(x, name, linear_op, C=3): + bias = np.arange(C).astype(np.float32) + bias = np.reshape(bias, (C, 1, 1)) + x = getattr(mb, linear_op)(x=x, y=bias, name=name) + return x + + @pytest.mark.parametrize( + "rank, linear_op", + itertools.product([4], ["add", "sub"]), + ) + def test_conv(self, rank, linear_op): + """ + Input graph: + input -----> conv -----> add/sub ---> out + + Output graph: + If the linear op is trainable, the program is not modified. + Otherwise, conv and the linear op will be fused: + input -----> conv ----> out + """ + Cin, Cout = 3, 3 + input_shape = (2, Cin, 100, 100) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + x = self.get_conv(x, "conv") + x = self.get_linear(x, "linear", linear_op) + return x + + apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + apply_pass_and_basic_check(prog, "common::dead_code_elimination") + assert get_op_types_in_program(prog) == ["conv"] + + """ + Input graph: + Const + | + V + input -----> convolution -----> add/sub ----> relu ---> out + + Output graph: + input -----> convolution -----> relu ----> out + """ + + @pytest.mark.parametrize( + "conv_dim, \ + flip_add_input_order, \ + add_batch_dim_to_const, \ + use_sub_instead, \ + prebuilt_bias, \ + scalar_elementwise, \ + use_conv_transpose", + itertools.product( + [2, 3], # 1D conv conversion broken even without the pass: rdar://problem/62960720 + [True, False], # flip_add_input_order + [True, False], # add_batch_dim_to_const + [True, False], # use_sub_instead + [True, False], # prebuilt_bias + [True, False], # scalar_elementwise + [True, False], # use_conv_transpose + ), + ) + def test_fuse_conv_bias( + self, + conv_dim, + flip_add_input_order, + add_batch_dim_to_const, + use_sub_instead, + prebuilt_bias, + scalar_elementwise, + use_conv_transpose, + ): + + if flip_add_input_order and use_sub_instead: + return + + if use_conv_transpose and conv_dim != 2: + return + + input_shape = None + W = None + Cout = 8 + Cin = 3 + D = 10 + const = np.random.rand(Cout) if add_batch_dim_to_const else np.random.rand(1, Cout) + const = np.expand_dims(const, axis=-1) + + if conv_dim == 1: + input_shape = (1, Cin, D) + W = np.random.rand(Cout, Cin, 1) + elif conv_dim == 2: + input_shape = (1, Cin, D, D) + W = np.random.rand(Cout, Cin, 1, 1) + const = np.expand_dims(const, axis=-1) + elif conv_dim == 3: + input_shape = (1, Cin, D, D, D) + W = np.random.rand(Cout, Cin, 1, 1, 1) + const = np.expand_dims(const, axis=-1) + const = np.expand_dims(const, axis=-1) + + if use_conv_transpose: + W = np.swapaxes(W, 0, 1) + output_shape = list(input_shape) + output_shape[1] = Cout + + if scalar_elementwise: + const = np.random.uniform(0) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + kwargs = { + "x": x, + "weight": W, + "pad_type": "valid", + "dilations": [1] * conv_dim, + "strides": [1] * conv_dim, + } + if prebuilt_bias: + kwargs["bias"] = np.random.rand(Cout) + + x = mb.conv_transpose(**kwargs) if use_conv_transpose else mb.conv(**kwargs) + + if use_sub_instead: + x = mb.sub(x=x, y=const) + else: + x = mb.add( + x=const if flip_add_input_order else x, + y=x if flip_add_input_order else const, + ) + x = mb.relu(x=x) + return x + + element_op = "sub" if use_sub_instead else "add" + conv_op = "conv" if not use_conv_transpose else "conv_transpose" + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prev_prog) == [conv_op, element_op, "relu"] + assert get_op_types_in_program(prog) == [conv_op, "relu"] + + old_bias = prev_block.find_ops(op_type=conv_op)[0].inputs.get("bias", None) + old_bias_val = 0 if old_bias is None else old_bias.val + assert old_bias_val is not None + assert block.find_ops(op_type=conv_op)[0].inputs["bias"] is not None + new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val + assert new_bias_val is not None + if use_sub_instead: + np.testing.assert_almost_equal(old_bias_val - np.squeeze(const), new_bias_val) + else: + np.testing.assert_almost_equal(old_bias_val + np.squeeze(const), new_bias_val) + + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: tuple(output_shape)}, + ) + + """ + Input graph: + Const + | + V + input -----> convolution -----> transpose -----> add/sub ---> out + + Output graph: + input -----> convolution -----> transpose -----> out + """ + + @pytest.mark.parametrize( + "conv_dim, has_bias, is_sub, is_conv_first_input, is_bias_scalar, is_deconv, is_all_1s", + itertools.product( + [1, 2, 3], # conv_dim + [True, False], # has_bias + [True, False], # is_sub + [True, False], # is_conv_first_input + [True, False], # is_bias_scalar + [True, False], # is_deconv + [True, False], # is_all_1s + ), + ) + def test_fuse_conv_bias_transpose_pattern( + self, + conv_dim, + has_bias, + is_sub, + is_conv_first_input, + is_bias_scalar, + is_deconv, + is_all_1s, + ): + if is_all_1s and is_bias_scalar: + return + + # construct the conv weight/bias + input_shape = None + Cout = 8 + Cin = 3 + D = 10 + conv_weight = None + conv_bias = ( + np.arange(Cout).astype(np.float32) if has_bias else np.zeros(Cout).astype(np.float32) + ) + rank = conv_dim + 2 + + if conv_dim == 1: + input_shape = (1, Cin, D) + conv_weight = np.random.rand(Cout, Cin, 1) + elif conv_dim == 2: + input_shape = (1, Cin, D, D) + conv_weight = np.random.rand(Cout, Cin, 1, 1) + elif conv_dim == 3: + input_shape = (1, Cin, D, D, D) + conv_weight = np.random.rand(Cout, Cin, 1, 1, 1) + + if is_deconv: + conv_weight = np.swapaxes(conv_weight, 0, 1) + + output_shape = list(input_shape) + output_shape[1] = Cout + output_shape = np.array(output_shape) + + # generate the perm for the tranpose op + perm = np.arange(rank) + np.random.shuffle(perm) + output_shape = output_shape[perm] + cout_index = np.where(perm == 1)[0][0] + + # generate the const bias, and reshape it to a random broadcasable shape + bias = np.arange(Cout).astype(np.float32) + bias_shape = [1] * rank + bias_shape[cout_index] = Cout + if cout_index != 0: + crop_index = np.random.randint(low=0, high=cout_index + 1) + bias_shape = bias_shape[crop_index:] + bias = np.reshape(bias, bias_shape) + + # for the scalar case, random generate a number + if is_bias_scalar: + bias = np.random.uniform(0) + + # for the all 1s case, random generate a number and reshape it to (1, 1, ..., 1) + if is_all_1s: + bias = np.array([np.random.uniform(0)]) + bias_rank = np.random.randint(low=1, high=rank + 1) + bias_shape = [1] * bias_rank + bias = np.reshape(bias, bias_shape) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv or conv_transpose + kwargs = { + "x": x, + "weight": conv_weight, + "pad_type": "valid", + "dilations": [1] * conv_dim, + "strides": [1] * conv_dim, + } + if has_bias: + kwargs["bias"] = conv_bias + x = mb.conv_transpose(**kwargs) if is_deconv else mb.conv(**kwargs) + + # transpose + x = mb.transpose(x=x, perm=perm) + + # elementwise op + element_args = {"x": x, "y": bias} if is_conv_first_input else {"x": bias, "y": x} + element_op = mb.sub if is_sub else mb.add + x = element_op(**element_args) + return x + + element_op = "sub" if is_sub else "add" + conv_op = "conv" if not is_deconv else "conv_transpose" + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_bias") + assert get_op_types_in_program(prev_prog) == [conv_op, "transpose", element_op] + assert get_op_types_in_program(prog) == [conv_op, "transpose"] + + # get the value of new weight/bias + new_bias_val = block.find_ops(op_type=conv_op)[0].inputs["bias"].val + assert new_bias_val is not None + + new_weight_val = block.find_ops(op_type=conv_op)[0].inputs["weight"].val + assert new_weight_val is not None + + # compare the weight + if is_sub and not is_conv_first_input: + np.testing.assert_almost_equal(new_weight_val, -conv_weight) + else: + np.testing.assert_almost_equal(new_weight_val, conv_weight) + + # compare the bias + if is_sub: + if is_conv_first_input: + bias = -bias + else: + conv_bias = -conv_bias + expected_conv_bias_val = conv_bias + np.squeeze(bias) + np.testing.assert_almost_equal(expected_conv_bias_val, new_bias_val) + + # run the model + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: tuple(output_shape)}, + ) + + +class TestConvScaleFusion: + @staticmethod + def _apply_weight_transform(inputs, is_deconv, is_real_div, is_conv_first_input, const_type): + """ + Utility funtion to test the weight transform function in conv scale fusion pass. + """ + Cin, _, groups = 10, 20, 10 + input_shape = (1, Cin, 2, 2) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # create conv or deconv op + if is_deconv: + conv = mb.conv_transpose( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + else: + conv = mb.conv( + x=x, + weight=inputs["conv_weight"], + bias=inputs["conv_bias"], + groups=groups, + ) + + # create const op based on different mode + scale = inputs["scale"] + + if const_type == "python_scale": + scale = mb.const(val=scale) + elif const_type == "numpy_scale": + if type(scale) == int: + np_value = np.int32(scale) + elif type(scale) == float: + np_value = np.float32(scale) + scale = mb.const(val=np_value) + elif const_type == "numpy_0d_array": + scale = mb.const(val=np.array(scale)) + elif const_type == "numpy_1d_array": + scale = mb.const(val=np.array([scale])) + else: + scale = mb.const(val=scale) + + # do the scale operation + if is_real_div: + x = mb.real_div( + x=conv, + y=scale, + ) + else: + if is_conv_first_input: + x = mb.mul( + x=conv, + y=scale, + ) + else: + x = mb.mul( + x=scale, + y=conv, + ) + + return x + + apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + + # get the updated weight from the prog + conv_op = [] + for op in prog["main"].operations: + if op.op_type == "const": + continue + conv_op.append(op) + assert len(conv_op) == 1, "should only have one conv / conv_transpose layer." + + return conv_op[0].weight.val, conv_op[0].bias.val + + @pytest.mark.parametrize( + "conv_type, is_real_div, is_conv_first_input, const_type", + itertools.product( + ["conv", "conv_transpose"], + [True, False], + [True, False], + [ + "python_scale", + "numpy_scale", + "numpy_0d_array", + "numpy_1d_array", + "numpy_3d_array", + "numpy_4d_array", + ], + ), + ) + def test_weight_transform_conv(self, conv_type, is_real_div, is_conv_first_input, const_type): + """ + Test the weight transform function in the conv scale fusion pass + """ + # parameters for conv + is_deconv = conv_type == "conv_type" + conv_weight = np.arange(20).astype(np.float32) + conv_weight = ( + np.reshape(conv_weight, (10, 2, 1, 1)) + if is_deconv + else np.reshape(conv_weight, (20, 1, 1, 1)) + ) + conv_bias = np.arange(20).astype(np.float32) + + if const_type == "numpy_3d_array": + scale = np.reshape(np.arange(20).astype(np.float32), (20, 1, 1)) + elif const_type == "numpy_4d_array": + scale = np.reshape(np.arange(20).astype(np.float32), (1, 20, 1, 1)) + else: + scale = 12.7 + + inputs = { + "conv_weight": conv_weight, + "conv_bias": conv_bias, + "scale": scale, + } + + new_conv_weight, new_conv_bias = self._apply_weight_transform( + inputs, is_deconv, is_real_div, is_conv_first_input, const_type + ) + + if is_real_div: + scale = 1.0 / scale + + if const_type != "numpy_3d_array" and const_type != "numpy_4d_array": + expected_bias = conv_bias * scale + expected_weight = conv_weight * scale + else: + scale = np.reshape(scale, (20)) + expected_bias = conv_bias * scale + if is_deconv: + scale = np.reshape(scale, (20, 1, 1)) + expected_weight = np.reshape(np.arange(20), (20, 1, 1)) + expected_weight = expected_weight * scale + expected_weight = np.reshape(expected_weight, (10, 2, 1, 1)).astype(np.float32) + else: + scale = np.reshape(scale, (20, 1, 1, 1)) + expected_weight = conv_weight * scale + + np.testing.assert_almost_equal(new_conv_weight, expected_weight) + np.testing.assert_almost_equal(new_conv_bias, expected_bias) + + assert ( + new_conv_weight.dtype == conv_weight.dtype + ), "weight data type should not be changed after conv_scale_fusion pass." + assert ( + new_conv_bias.dtype == conv_weight.dtype + ), "bias data type should be the same as the weight for conv layer." + + @pytest.mark.parametrize( + "rank, groups, has_bias, scale_op, scale_type, backend", + itertools.product( + [3, 4], [1, 10], [False, True], ["mul", "real_div"], ["scalar", "vector"], backends + ), + ) + def test_conv(self, rank, groups, has_bias, scale_op, scale_type, backend): + """ + Input graph: + input -----> conv -----> mul/real_div ---> out + + Output graph: + input -----> conv ----> out + """ + Cin, Cout = 10, 30 + input_shape = (2, Cin, 20) if rank == 3 else (2, Cin, 20, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = ( + np.random.rand(Cout, Cin // groups, 2) + if rank == 3 + else np.random.rand(Cout, Cin // groups, 2, 3) + ) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + if scale_type == "scalar": + scale = np.array([2.3]) + else: + scale = np.arange(Cout).astype(np.float32) + scale = np.reshape(scale, (1, Cout, 1) if rank == 3 else (Cout, 1, 1)) + + # scale layer + if scale_op == "mul": + x = mb.mul(x=x, y=scale) + elif scale_op == "real_div": + x = mb.real_div(x=x, y=scale) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + + assert get_op_types_in_program(prev_prog) == ["conv", scale_op] + assert get_op_types_in_program(prog) == ["conv"] + + # validate graph pass + output_shape = (2, Cout, 19) if rank == 3 else (2, Cout, 19, 22) + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + @pytest.mark.parametrize( + "rank, groups, has_bias, scale_op, scale_type, backend", + itertools.product( + [3, 4], [1, 10], [False, True], ["mul", "real_div"], ["scalar", "vector"], backends + ), + ) + def test_conv_transpose(self, rank, groups, has_bias, scale_op, scale_type, backend): + """ + Input graph: + input -----> conv_transpose -----> mul/real_div ---> out + + Output graph: + input -----> conv_transpose ----> out + """ + Cin, Cout = 10, 30 + input_shape = (2, Cin, 20) if rank == 3 else (2, Cin, 20, 24) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + # conv layer + conv_weight = ( + np.random.rand(Cin, Cout // groups, 2) + if rank == 3 + else np.random.rand(Cin, Cout // groups, 2, 3) + ) + conv_bias = np.random.rand(Cout) if has_bias else None + x = mb.conv_transpose( + x=x, + weight=conv_weight, + bias=conv_bias, + groups=groups, + ) + + if scale_type == "scalar": + scale = np.array([2.3]) + else: + scale = np.arange(Cout).astype(np.float32) + scale = np.reshape(scale, (Cout, 1) if rank == 3 else (1, Cout, 1, 1)) + + # scale layer + if scale_op == "mul": + x = mb.mul(x=x, y=scale) + elif scale_op == "real_div": + x = mb.real_div(x=x, y=scale) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_conv_scale") + + assert get_op_types_in_program(prev_prog) == ["conv_transpose", scale_op] + assert get_op_types_in_program(prog) == ["conv_transpose"] + + # validate graph pass + output_shape = (2, Cout, 21) if rank == 3 else (2, Cout, 21, 26) + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestFusePadConv(unittest.TestCase): + """ + Input graph: + input -----> pad -----> transpose -----> conv -----> transpose ---> out + + Output graph: + input -----> transpose -----> pad ----> conv -----> transpose ----> out + """ + + def test_simple_direct_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 16, 20, 24))]) + def prog(x): + x = mb.pad(x=x, pad=[0, 0, 1, 1, 1, 1, 0, 0]) + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.conv(x=x, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_pad_conv") + self.assertEqual( + get_op_types_in_program(prev_prog), ["pad", "transpose", "conv", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["transpose", "pad", "conv", "transpose"]) + assert_model_is_valid( + prog, + {"x": (1, 16, 20, 24)}, + expected_output_shapes={block.outputs[0].name: (1, 16, 20, 24)}, + ) + + """ + Input graph: + input -----> pad -----> transpose -----> conv -----> transpose ---> out + | + | + --------> transpose -----> conv -----> transpose ---> out + + Output graph: + input ---------> transpose -----> pad -----> conv -----> transpose ---> out + | + | + ------> transpose -----> pad -----> conv -----> transpose ---> out + + """ + + def test_pad_transposed_forked_conv(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 16, 20, 24))]) + def prog(x): + pad = mb.pad(x=x, pad=[0, 0, 1, 1, 1, 1, 0, 0]) + x = mb.transpose(x=pad, perm=[0, 3, 1, 2]) + x = mb.conv(x=x, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + y = mb.transpose(x=pad, perm=[0, 3, 1, 2]) + y = mb.conv(x=y, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + y = mb.transpose(x=y, perm=[0, 2, 3, 1]) + return x, y + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_pad_conv") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["pad", "transpose", "conv", "transpose", "transpose", "conv", "transpose"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["transpose", "pad", "conv", "transpose", "transpose", "pad", "conv", "transpose"], + ) + assert_model_is_valid( + prog, + {"x": (1, 16, 20, 24)}, + expected_output_shapes={ + block.outputs[0].name: (1, 16, 20, 24), + block.outputs[1].name: (1, 16, 20, 24), + }, + ) + + """ + Input graph: + input -----> pad -----> transpose -----> conv -----> transpose ---> out + | + | + ---------> out + + Output graph: + No change. + """ + + def test_pad_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 16, 20, 24))]) + def prog(x): + pad = mb.pad(x=x, pad=[0, 0, 1, 1, 1, 1, 0, 0]) + x = mb.transpose(x=pad, perm=[0, 3, 1, 2]) + x = mb.conv(x=x, weight=np.random.random([24, 24, 3, 3]), pad_type="valid") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x, pad + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_pad_conv") + self.assertEqual( + get_op_types_in_program(prev_prog), ["pad", "transpose", "conv", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["pad", "transpose", "conv", "transpose"]) + assert_model_is_valid( + prog, + {"x": (1, 16, 20, 24)}, + expected_output_shapes={ + block.outputs[0].name: (1, 16, 20, 24), + block.outputs[1].name: (1, 18, 22, 24), + }, + ) + + +class TestConcatToPixelShuffle(unittest.TestCase): + def test_success(self): + """ + Input graph: + input1(1, 2, 3, 4) -----> concat(axis=2, interleave=True) -----> concat(axis=3, interleave=True) ---> out(1, 2, 6, 8) + ^ ^ + | | + input2(1, 2, 3, 4) ------------------- | + | + input3(1, 2, 3, 4) -----> concat(axis=2, interleave=True) -----------------------| + ^ + | + input4(1, 2, 3, 4) ------------------| + + Output graph: + input1(1, 2, 3, 4) -----> concat(axis=1) ---> pixel_shuffle(upsample_factor=2) ----> out(1, 2, 6, 8) + ^ + input2(1, 2, 3, 4) ----------| + | + input3(1, 2, 3, 4) ----------| + | + input4(1, 2, 3, 4) ----------| + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "pixel_shuffle"]) + + inputs = {"x1": (1, 2, 3, 4), "x2": (1, 2, 3, 4), "x3": (1, 2, 3, 4), "x4": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 8)}, + ) + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + input_dict["x1"] = np.ones(inputs["x1"]) + input_dict["x2"] = np.ones(inputs["x2"]) * 2 + input_dict["x3"] = np.ones(inputs["x3"]) * 3 + input_dict["x4"] = np.ones(inputs["x4"]) * 4 + + output_name = block.outputs[0].name + + ab = np.reshape( + np.stack((input_dict["x1"], input_dict["x2"]), axis=3), newshape=[1, 2, 6, 4] + ) + cd = np.reshape( + np.stack((input_dict["x3"], input_dict["x4"]), axis=3), newshape=[1, 2, 6, 4] + ) + old_prediction = np.reshape(np.stack((ab, cd), axis=4), newshape=[1, 2, 6, 8]) + + prediction = mlmodel.predict(input_dict) + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_nested(self): + """ + Two nested blocks that will each be transformed. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4, x5, x6, x7, x8): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + ef = mb.concat(values=[x5, x6], axis=2, interleave=True) + gh = mb.concat(values=[x7, x8], axis=2, interleave=True) + y = mb.concat(values=[ef, gh], axis=3, interleave=True) + + z = mb.concat(values=[x, y], axis=1) + + return z + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual( + get_op_types_in_program(prev_prog), + ["concat", "concat", "concat", "concat", "concat", "concat", "concat"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["concat", "pixel_shuffle", "concat", "pixel_shuffle", "concat"], + ) + + inputs = { + "x1": (1, 2, 3, 4), + "x2": (1, 2, 3, 4), + "x3": (1, 2, 3, 4), + "x4": (1, 2, 3, 4), + "x5": (1, 2, 3, 4), + "x6": (1, 2, 3, 4), + "x7": (1, 2, 3, 4), + "x8": (1, 2, 3, 4), + } + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 4, 6, 8)}, + ) + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + output_name = block.outputs[0].name + + ab = np.reshape( + np.stack((input_dict["x1"], input_dict["x2"]), axis=3), newshape=[1, 2, 6, 4] + ) + cd = np.reshape( + np.stack((input_dict["x3"], input_dict["x4"]), axis=3), newshape=[1, 2, 6, 4] + ) + x = np.reshape(np.stack((ab, cd), axis=4), newshape=[1, 2, 6, 8]) + + ef = np.reshape( + np.stack((input_dict["x5"], input_dict["x6"]), axis=3), newshape=[1, 2, 6, 4] + ) + gh = np.reshape( + np.stack((input_dict["x7"], input_dict["x8"]), axis=3), newshape=[1, 2, 6, 4] + ) + y = np.reshape(np.stack((ef, gh), axis=4), newshape=[1, 2, 6, 8]) + + old_prediction = np.concatenate((x, y), axis=1) + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if _IS_MACOS: + prediction = mlmodel.predict(input_dict) + np.testing.assert_allclose( + old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05 + ) + + def test_failure_0(self): + """ + The h_concat has three inputs, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2, x3], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4, x1], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_1(self): + """ + The first concat is on the wrong axis, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=3, interleave=True) + cd = mb.concat(values=[x3, x4], axis=3, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_2(self): + """ + The last concat is on the wrong axis, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=2, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_3(self): + """ + The first concat is not interleaved, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=False) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_4(self): + """ + The second concat is not interleaved, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=False) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_5(self): + """ + The last concat is not interleaved, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=False) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_6(self): + """ + The inputs are the wrong rank, so the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + mb.TensorSpec(shape=(1, 2, 3, 4, 5)), + ] + ) + def prog(x1, x2, x3, x4): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + def test_failure_7(self): + """ + Extra input to the w_concats means the pattern won't match. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 4, 4)), + mb.TensorSpec(shape=(1, 2, 8, 4)), + ] + ) + def prog(x1, x2, x3, x4, x5): + ab = mb.concat(values=[x1, x2], axis=2, interleave=True) + cd = mb.concat(values=[x3, x4], axis=2, interleave=True) + x = mb.concat(values=[ab, cd, x5], axis=3, interleave=True) + + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::concat_to_pixel_shuffle" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["concat", "concat", "concat"]) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + +class TestConcatInterleave: + def test_concat_interleave_fusion_pass(self): + """ + Given: + %3 = concat(%1.a, %1.b, axis=-3, interleave=False) #shape = (B, n*C, H, W) + %4 = reshape(%3) #shape = (B, n, C, H, W) + %5 = transpose(%4, perm=[0, 2, 1, 3, 4]) # shape = (B, C, n, H, W) + %6 = reshape(%5) # shape = (B, C*n, H, W) + + Result: + %6 = concat(%1.a, %1.b, axis=-3, interleave=True) + """ + B, C, H, W = 1, 10, 20, 20 + + @mb.program( + input_specs=[mb.TensorSpec(shape=(B, C, H, W)), mb.TensorSpec(shape=(B, C, H, W))] + ) + def prog(x, y): + z = mb.concat(values=[x, y], axis=1) + z = mb.reshape(x=z, shape=(B, 2, C, H, W)) + z = mb.transpose(x=z, perm=[0, 2, 1, 3, 4]) + z = mb.reshape(x=z, shape=(B, -1, H, W)) + return z + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::detect_concat_interleave" + ) + assert get_op_types_in_program(prev_prog) == ["concat", "reshape", "transpose", "reshape"] + assert get_op_types_in_program(prog) == ["concat"] + concat_op = prog.find_ops(op_type="concat", exactly_one=True)[0] + assert concat_op.interleave.val + assert_model_is_valid( + prog, + {"x": (B, C, H, W), "y": (B, C, H, W)}, + expected_output_shapes={block.outputs[0].name: (B, 2 * C, H, W)}, + ) + + +class TestFuseOnehotMatmulToGather: + @pytest.mark.parametrize("rank", [1, 2, 3, 4]) + def test_fuse_onehot_matmul_to_gather(self, rank): + """ + Input: + %2 = one_hot(%1, on_value=1, off_value=0, axis=-1) + %3 = const() # rank 2 + %4 = matmul(%2, %3) + + Output: + %4 = gather(%3, %2, axis=0) + """ + rank4_shape = (10, 3, 6, 7) + input_shape = rank4_shape[-rank:] + vocab_size = 15 + embedding_size = 12 + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape, dtype=types.int32)]) + def prog(x): + x = mb.one_hot( + indices=x, on_value=1.0, off_value=0.0, axis=-1, one_hot_vector_size=vocab_size + ) + x = mb.matmul(x=x, y=np.random.rand(vocab_size, embedding_size)) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_onehot_matmul_to_gather" + ) + assert get_op_types_in_program(prev_prog) == ["one_hot", "matmul"] + assert get_op_types_in_program(prog) == ["gather"] + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: input_shape + (embedding_size,)}, + ) + + +class TestReplaceStackReshape(unittest.TestCase): + def test_with_interleave(self): + """ + input1(1, 5, 3, 4) -----> stack(axis=2) -----> reshape(shape=(1, 10, 3, 4)) ---> out(1, 10, 3, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + input -----> concat ----> out + + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + x = mb.stack(values=[x1, x2], axis=2) + x = mb.reshape(x=x, shape=[1, 10, 3, 4]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + inputs = {"x1": (1, 5, 3, 4), "x2": (1, 5, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 10, 3, 4)}, + ) + + concat_ops = [op for op in block.operations if op.op_type == "concat"] + concat_op = concat_ops[0] + assert concat_op.interleave.val == True + + output_name = block.outputs[0].name + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + old_prediction = np.reshape( + np.stack([input_dict["x1"], input_dict["x2"]], axis=2), newshape=[1, 10, 3, 4] + ) + + prediction = mlmodel.predict(input_dict) + + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_without_interleave(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(1, 10, 3, 4)) ---> out(1, 10, 3, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + input -----> concat ----> out + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + x = mb.stack(values=[x1, x2], axis=1) + x = mb.reshape(x=x, shape=[1, 10, 3, 4]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + inputs = {"x1": (1, 5, 3, 4), "x2": (1, 5, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 10, 3, 4)}, + ) + + concat_ops = [op for op in block.operations if op.op_type == "concat"] + concat_op = concat_ops[0] + assert concat_op.interleave.val == False + + output_name = block.outputs[0].name + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + old_prediction = np.reshape( + np.stack([input_dict["x1"], input_dict["x2"]], axis=1), newshape=[1, 10, 3, 4] + ) + + prediction = mlmodel.predict(input_dict) + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_multiple(self): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + mb.TensorSpec(shape=(1, 2, 3, 4)), + ] + ) + def prog(x1, x2, x3, x4): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[1, 4, 3, 4]) + + b = mb.stack(values=[x3, x4], axis=1) + b = mb.reshape(x=b, shape=[1, 4, 3, 4]) + + c = mb.stack(values=[a, b], axis=2) + c = mb.reshape(x=c, shape=[1, 4, 6, 4]) + + return c + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + self.assertEqual( + get_op_types_in_program(prev_prog), + ["stack", "reshape", "stack", "reshape", "stack", "reshape"], + ) + self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) + + inputs = {"x1": (1, 2, 3, 4), "x2": (1, 2, 3, 4), "x3": (1, 2, 3, 4), "x4": (1, 2, 3, 4)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 4, 6, 4)}, + ) + + output_name = block.outputs[0].name + + mlmodel = ct.convert( + prog, + source="milinternal", + convert_to="neuralnetwork", + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + + if not _IS_MACOS: + # Can not get predictions unless on macOS. + return + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + branch_1 = np.reshape( + np.stack([input_dict["x1"], input_dict["x2"]], axis=1), newshape=[1, 4, 3, 4] + ) + branch_2 = np.reshape( + np.stack([input_dict["x3"], input_dict["x4"]], axis=1), newshape=[1, 4, 3, 4] + ) + old_prediction = np.reshape(np.stack([branch_1, branch_2], axis=2), newshape=[1, 4, 6, 4]) + + prediction = mlmodel.predict(input_dict) + + np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05) + + def test_negative_1(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(-1, 5, 6, 4)) ---> out(1, 5, 6, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + Unchanged -- this graph is not equivalent to a concat. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[-1, 5, 6, 4]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_2(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(-1, 5, 12, 2)) ---> out(1, 5, 6, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + Unchanged -- this graph is not equivalent to a concat. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[-1, 5, 12, 2]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_3(self): + """ + Input graph: + input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(-1, 2, 5, 4, 3)) ---> out(1, 5, 6, 4) + ^ + | + input2(1, 5, 3, 4) ---------- + + Output graph: + Unchanged -- this graph is not equivalent to a concat. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.reshape(x=a, shape=[-1, 2, 5, 4, 3]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_4(self): + """ + More than two inputs to the stack op -- can't be transformed. + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 5, 3, 4)), + mb.TensorSpec(shape=(1, 5, 3, 4)), + mb.TensorSpec(shape=(1, 5, 3, 4)), + ] + ) + def prog(x1, x2, x3): + a = mb.stack(values=[x1, x2, x3], axis=1) + a = mb.reshape(x=a, shape=[-1, 15, 4, 3]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"]) + + def test_negative_5(self): + """ + The stack and reshape are not adjacent, so the graph is not transformed. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + a = mb.relu(x=a) + a = mb.reshape(x=a, shape=[-1, 10, 4, 3]) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack", "relu", "reshape"]) + self.assertEqual(get_op_types_in_program(prog), ["stack", "relu", "reshape"]) + + def test_negative_6(self): + """ + The stack op's output is used elsewhere in the graph, so it can't be removed + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + b = mb.reshape(x=a, shape=[-1, 10, 4, 3]) + c = mb.relu(x=a) + c = mb.reshape(x=c, shape=[-1, 10, 4, 3]) + d = mb.add(x=b, y=c) + return d + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual( + get_op_types_in_program(prev_prog), ["stack", "reshape", "relu", "reshape", "add"] + ) + self.assertEqual( + get_op_types_in_program(prog), ["stack", "reshape", "relu", "reshape", "add"] + ) + + def test_negative_7(self): + """ + The stack op is not followed by any other ops. + """ + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))] + ) + def prog(x1, x2): + a = mb.stack(values=[x1, x2], axis=1) + return a + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::replace_stack_reshape" + ) + + self.assertEqual(get_op_types_in_program(prev_prog), ["stack"]) + self.assertEqual(get_op_types_in_program(prog), ["stack"]) + + +class TestUseReflectionPadding: + def test_success_w_axis(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left, x1, right], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 10)}, + ) + + def test_success_w_axis_multiple(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 2], end=[0, 0, 0, 3], end_mask=[True, True, True, False] + ) + left1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + right0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + right1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -3], end=[0, 0, 0, -2], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left0, left1, x1, right0, right1], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + + def test_success_h_axis(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 1, 0], end=[0, 0, 2, 0], end_mask=[True, True, False, True] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, -2, 0], end=[0, 0, -1, 0], end_mask=[True, True, False, True] + ) + x = mb.concat(values=[left, x1, right], axis=2) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["pad"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 8)}, + ) + + def test_failure_wrong_concat_order(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 1, 0], end=[0, 0, 2, 0], end_mask=[True, True, False, True] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, -2, 0], end=[0, 0, -1, 0], end_mask=[True, True, False, True] + ) + # Concat is not in correct order + x = mb.concat(values=[left, right, x1], axis=2) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["slice_by_index", "slice_by_index", "concat"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 8, 8)}, + ) + + def test_failure_wrong_concat_order_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + left1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 2], end=[0, 0, 0, 3], end_mask=[True, True, True, False] + ) + right0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -3], end=[0, 0, 0, -2], end_mask=[True, True, True, False] + ) + right1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + # concat args are out of order + x = mb.concat(values=[left0, left1, x1, right1, right0], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + assert get_op_types_in_program(prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + + def test_failure_wrong_slice_size(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + # slice is too big + left = mb.slice_by_index( + x=x1, begin=[0, 0, 1, 0], end=[0, 0, 3, 0], end_mask=[True, True, False, True] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, -2, 0], end=[0, 0, -1, 0], end_mask=[True, True, False, True] + ) + x = mb.concat(values=[left, x1, right], axis=2) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["slice_by_index", "slice_by_index", "concat"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 9, 8)}, + ) + + def test_failure_not_all_same_input(self): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8)), mb.TensorSpec(shape=(1, 2, 6, 8))] + ) + def prog(x1, x2): + left0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + left1 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 2], end=[0, 0, 0, 3], end_mask=[True, True, True, False] + ) + right0 = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -3], end=[0, 0, 0, -2], end_mask=[True, True, True, False] + ) + # one of the slices consumes a different input from the others + right1 = mb.slice_by_index( + x=x2, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left0, left1, x1, right0, right1], axis=3) + + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + assert get_op_types_in_program(prog) == [ + "slice_by_index", + "slice_by_index", + "slice_by_index", + "slice_by_index", + "concat", + ] + + inputs = {"x1": (1, 2, 6, 8), "x2": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)}, + ) + + def test_failure_slice_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x1): + left = mb.slice_by_index( + x=x1, begin=[0, 0, 0, 1], end=[0, 0, 0, 2], end_mask=[True, True, True, False] + ) + right = mb.slice_by_index( + x=x1, begin=[0, 0, 0, -2], end=[0, 0, 0, -1], end_mask=[True, True, True, False] + ) + x = mb.concat(values=[left, x1, right], axis=3) + + # slice is an output + return x, right + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prev_prog) == ["slice_by_index", "slice_by_index", "concat"] + assert get_op_types_in_program(prog) == ["slice_by_index", "slice_by_index", "concat"] + + inputs = {"x1": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={ + block.outputs[0].name: (1, 2, 6, 10), + block.outputs[1].name: (1, 2, 6, 1), + }, + ) + + def test_concat_input_only(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))]) + def prog(x): + x = mb.concat(values=[x, x, x], axis=0) + return x + + prev_prog, _, block = apply_pass_and_basic_check(prog, "common::use_reflection_padding") + assert get_op_types_in_program(prog) == ["concat"] + + inputs = {"x": (1, 2, 6, 8)} + assert_model_is_valid( + prog, + inputs, + expected_output_shapes={block.outputs[0].name: (3, 2, 6, 8)}, + ) + + +class TestDivideToMultiply: + def test_divide_to_multiply(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + div_val = np.random.rand(2, 4).astype(np.float32) + div_const = mb.const(val=div_val) + + div_val_1 = np.random.rand(2, 4).astype(np.float32) + div_const_1 = mb.const(val=div_val_1) + + real_div = mb.real_div(x=x, y=div_const) + + return mb.real_div(x=real_div, y=div_const_1) + + assert_op_count_match(prog, expect=2, op="real_div") + assert_op_count_match(prog, expect=0, op="mul") + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::divide_to_multiply"](prog) + assert_same_output_names(prev_prog, prog) + assert_op_count_match(prog, expect=0, op="real_div") + assert_op_count_match(prog, expect=2, op="mul") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (2, 4)}) + + +class TestFuseElementwiseToBatchNorm: + """ + Input graph: + Const Const + | | + V V + input -----> transpose -----> mul ----> add ---> out + + Output graph: + input -----> transpose -----> batchnorm ----> out + """ + + @pytest.mark.parametrize( + "flip_mul_input_order, flip_add_input_order, rank_3_const_input", + itertools.product([False, True], [False, True], [False, True]), + ) + def test_mul_add_fusion_to_batchnorm( + self, flip_mul_input_order, flip_add_input_order, rank_3_const_input + ): + + C = 3 + gamma = np.random.rand(1, C, 1, 1) + beta = np.random.rand(1, C, 1, 1) + if rank_3_const_input: + gamma = np.squeeze(gamma, axis=0) + beta = np.squeeze(beta, axis=0) + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 10, 10, C))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + if flip_mul_input_order: + x = mb.mul(x=gamma, y=x) + else: + x = mb.mul(x=x, y=gamma) + if flip_add_input_order: + x = mb.add(x=beta, y=x) + else: + x = mb.add(x=x, y=beta) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_elementwise_to_batchnorm" + ) + assert get_op_types_in_program(prev_prog) == ["transpose", "mul", "add"] + assert get_op_types_in_program(prog) == ["transpose", "batch_norm"] + assert_model_is_valid( + prog, + {"x": (1, 10, 10, C)}, + expected_output_shapes={block.outputs[0].name: (1, C, 10, 10)}, + ) + + +class TestRank0ExpandDimsSwap: + """ + Input graph: + 2.0 + | + v + input --> slice_by_index --> sub --> expand_dims --> output + + Output graph: + [2.0] + | + v + input --> slice_by_index --> expand_dims --> sub --> output + """ + + @pytest.mark.skipif( + ct.utils._macos_version() < (12, 0), reason="mlprogram predict available only on macOS12+" + ) + @pytest.mark.parametrize( + "reverse_order, elem_op", + itertools.product( + [True, False], + ["add", "sub", "mul", "real_div", "floor_div"], + ), + ) + def test(self, reverse_order, elem_op): + x_shape = [ + 1, + ] + + @mb.program(input_specs=[mb.TensorSpec(shape=x_shape)]) + def program(x): + x = mb.slice_by_index(x=x, begin=[0], end=[1], squeeze_mask=[True]) + func = getattr(mb, elem_op) + + if reverse_order: + x = func(x=2.0, y=x) + else: + x = func(x=x, y=2.0) + + expand = mb.expand_dims(x=x, axes=[0]) + other_1 = mb.add(x=x, y=[1.0, 2.0, 3.0]) + other_2 = mb.sub(x=x, y=[1.0, 2.0, 3.0]) + return expand, other_1, other_2 + + prev_prog, prev_block, block = apply_pass_and_basic_check( + program, "common::rank0_expand_dims_swap" + ) + assert get_op_types_in_program(prev_prog) == [ + "slice_by_index", + elem_op, + "expand_dims", + "add", + "sub", + ] + assert get_op_types_in_program(program) == [ + "slice_by_index", + "expand_dims", + "expand_dims", + elem_op, + "squeeze", + "add", + "sub", + ] + assert_model_is_valid( + program=program, + inputs={"x": x_shape}, + expected_output_shapes={ + block.outputs[0].name: tuple(x_shape), + block.outputs[1].name: (3,), + block.outputs[2].name: (3,), + }, + ) + + +class TestImageInputPreprocess(unittest.TestCase): + """ + Input graph: + input (format=NHWC) ------> transpose(axis=[0, 3, 1, 2]) ---------> add ----> relu ---> out + | ^ + | | + ---> relu ---> transpose(axis=[0, 3, 1, 2]) --- + + Intermediate graph: + input (format=NCHW) -----> transpose(axis=[0, 2, 3, 1]) ----> transpose(axis=[0, 3, 1, 2]) ---------> add ----> relu ---> out + | ^ + | | + ---> relu ---> transpose(axis=[0, 3, 1, 2]) --- + + + Output graph: + input (format=NCHW) -----> relu -----> add -----> relu -----> out + | ^ + | | + ------------------- + """ + + def test_fusion_with_image_intermediate_graph(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.relu(x=x) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x1, y=x3) + return mb.relu(x=x4) + + prog.main_input_types = [ct.ImageType(name="x", shape=(10, 20, 30, 3), channel_first=False)] + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::image_input_preprocess" + ) + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "relu", "transpose", "add", "relu"] + ) + self.assertEqual( + get_op_types_in_program(prog), + ["transpose", "transpose", "relu", "transpose", "add", "relu"], + ) + + def test_fusion_with_image_full(self): + # Avoid circular import + from coremltools import convert + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.relu(x=x) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x1, y=x3) + return mb.relu(x=x4) + + mlmodel = convert( + prog, + inputs=[ct.ImageType(name="x", shape=(10, 20, 30, 3), channel_first=False)], + source="milinternal", + convert_to="neuralnetwork", + ) + assert mlmodel is not None + assert len(mlmodel.get_spec().neuralNetwork.layers) == 3 + + +class TestSanitizeInputOutputNames: + def test_nn_backend_style_sanitization(self): + """ + Test that intermediate var names are unchanged, and + only model input and output names are modified, i.e. + sanitized (adhering to the format [a-zA-Z_][a-zA-Z0-9_]*) + for the NN backend. + """ + + prog = Program() + func_inputs = {"x/0": mb.placeholder(shape=[2, 3]), "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x/0"], ssa_fun.inputs["y"] + x = mb.relu(x=x, name="relu/1") + z = mb.add(x=x, y=y, name="out/1") + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::sanitize_input_output_names", skip_output_name_check=True + ) + + relu_op = prog.find_ops(op_type="relu", exactly_one=True)[0] + assert relu_op.inputs["x"].name == "x_0" # input name: sanitized + assert relu_op.outputs[0].name == "relu/1" # intermediate name: unchanged + assert block.outputs[0].name == "out_1" # output name: sanitized + + # convert prev_prog to NN backend + mlmodel = ct.convert(prev_prog) + spec = mlmodel._spec + assert spec.description.input[0].name == "x_0" + assert spec.description.output[0].name == "out_1" + relu_layer = spec.neuralNetwork.layers[0] + assert relu_layer.output[0] == "relu/1" + + +class TestUpdateOutputDtypes: + def test_single_output(self): + """ + Given: + ------ + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %abs: (1, 20, int32)(Tensor) = abs(x=%input, name="abs") + %output_square: (1, 20, int32)(Tensor) = square(x=%input, name="output_square") + } -> (%output_square) + } + prog.main_output_types = [ct.TensorType(dtype=np.float16)] + + Result: + ------ + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %abs: (1, 20, int32)(Tensor) = abs(x=%input, name="abs") + %output_square_type_int32: (1, 20, int32)(Tensor) = square(x=%input, name="output_square") + %output_square: (1, 20, fp16)(Tensor) = cast(x=%output_square_type_int32, dtype="fp16", name="cast_0") + } -> (%output_square) + } + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 20), dtype=types.int32)]) + def prog(input): + x = mb.abs(x=input, name="abs") + x = mb.square(x=input, name="output_square") + return x + + prog.set_main_output_types([ct.TensorType(dtype=np.float16)]) + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::update_output_dtypes" + ) + assert get_op_types_in_program(prev_prog) == ["abs", "square"] + assert prev_block.outputs[0].dtype == types.int32 + assert get_op_types_in_program(prog) == ["abs", "square", "cast"] + assert block.outputs[0].dtype == types.fp16 + assert block.outputs[0].name == "output_square" + + def test_multiple_outputs(self): + """ + Given: + ----- + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %split_0: (1, 10, int32)(Tensor), %split_1: (1, 10, int32)(Tensor) = split(x=%input, num_splits=2, axis=1, name="split") + } -> (%split_0, %split_1) + } + prog.main_output_types = [ct.TensorType(), ct.TensorType(dtype=np.float16)] + + Result: + ------ + main(%input: (1, 20, int32)(Tensor)) { + block0() { + %split_0: (1, 10, int32)(Tensor), %split_1_type_int32: (1, 10, int32)(Tensor) = split(x=%input, num_splits=2, axis=1, name="split") + %split_1: (1, 10, fp16)(Tensor) = cast(x=%split_1_type_int32, dtype="fp16", name="cast_0") + } -> (%split_0, %split_1) + } + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 20), dtype=types.int32)]) + def prog(input): + x1, x2 = mb.split(x=input, num_splits=2, axis=1, name="split") + return x1, x2 + + prog.set_main_output_types([ct.TensorType(), ct.TensorType(dtype=np.float16)]) + _, _, block = apply_pass_and_basic_check(prog, "common::update_output_dtypes") + assert get_op_types_in_program(prog) == ["split", "cast"] + assert block.outputs[1].dtype == types.fp16 + assert block.outputs[1].name == "split_1" + + +class TestFuseLayerNormOrInstanceNorm: + @pytest.mark.parametrize("axes_size", [1, 2, 3]) + def test_layer_norm(self, axes_size): + """ + Detect layer norm pattern, found in the TF bert model. + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + where mean and variance are computed along axes [-1] or [-1,-2] and so on + and gamma and beta are constants with rank equal to the length of the axes parameter. + """ + shape = (3, 5, 6) + rank = len(shape) + axes = list(range(rank - axes_size, rank)) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x1 = mb.reduce_mean(x=x, axes=axes, keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=axes, keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(*shape[-len(axes) :]), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(*shape[-len(axes) :]), y=x4) + y = mb.add(x=x4, y=x5) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["layer_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_1(self): + """ + Detect instance norm pattern + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + where input is rank 4, (N,C,H,W), axis=[2, 3], along which reduction happens, + and gamma and beta are of shape (1,C,1,1) + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x1 = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=[2, 3], keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(1, shape[1], 1, 1), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(1, shape[1], 1, 1), y=x4) + y = mb.add(x=x4, y=x5) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_1_rank_1_gamma_beta(self): + """ + Detect instance norm pattern + y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + + where input is rank 4, (N,C,H,W), axis=[2, 3], along which reduction happens, + and gamma and beta are of shape (C,) + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x1 = mb.reduce_mean(x=x, axes=[1, 2], keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=[1, 2], keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(shape[3]), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(shape[3]), y=x4) + y = mb.add(x=x4, y=x5) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["transpose", "instance_norm", "transpose"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_1_with_channel_last_data_format(self): + """ + Detect instance norm pattern with channel last data format + x = transpose(x) # channel first to channel last, NCHW -> NHWC + x = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)]) + x = transpose(x) # channel last to channel first, NHWC -> NCHW + + The input is rank 4 (N, C, H, W) and the input for fused "instance_norm" op is + rank 4 (N, H, W, C), and axis=[1, 2] or [-3, -2], along which reduction happens. + + This is common in TensorFlow model when data format is channel last. + PyMIL inserts transposes around "conv" layer to make "conv" channel first. + "fuse_layernorm_or_instancenorm" pass is expected to fuse this pattern as well. + """ + shape = (1, 3, 5, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.reduce_mean(x=x, axes=[1, 2], keep_dims=True) + x2 = mb.sub(x=x, y=x1) + x2 = mb.square(x=x2) + x2 = mb.reduce_mean(x=x2, axes=[1, 2], keep_dims=True) + x2 = mb.add(x=x2, y=1e-5) + x2 = mb.rsqrt(x=x2) + x3 = mb.mul(x=np.random.rand(1, 1, 1, shape[1]), y=x2) + x4 = mb.mul(x=x3, y=x1) + x5 = mb.mul(x=x, y=x3) + x4 = mb.sub(x=np.random.rand(1, 1, 1, shape[1]), y=x4) + x6 = mb.add(x=x4, y=x5) + y = mb.transpose(x=x6, perm=[0, 3, 1, 2]) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "transpose", + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + "transpose", + ] + assert get_op_types_in_program(prog) == [ + "transpose", + "transpose", + "instance_norm", + "transpose", + "transpose", + ] + assert_model_is_valid( + prog, + {"x": shape}, + expected_output_shapes={block.outputs[0].name: shape}, + ) + # reduce transpose pass should remove extra ones + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, + {"x": shape}, + expected_output_shapes={block.outputs[0].name: shape}, + ) + + def test_instance_norm_pattern_2(self): + """ + Detect instance norm pattern 2 and fusion. + + |----> sub0 ----| const (0.5) + | ^ | | + | | V V + x ---> mean0 square --> mean1 --> add_eps ---> pow const_gamma const_beta + | | | | | + | V V V V + |----> sub1 --------------------------------> real_div --> mul_gamma --> add_beta --> ... + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + mean0 = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True) + sub0 = mb.sub(x=x, y=mean0) + sub1 = mb.sub(x=x, y=mean0) + square = mb.square(x=sub0) + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True) + add_eps = mb.add(x=mean1, y=1e-5) # epsilon + pow = mb.pow(x=add_eps, y=0.5) + div = mb.real_div(x=sub1, y=pow) + mul_gamma = mb.mul(x=np.random.rand(1, shape[1], 1, 1), y=div) # + add_beta = mb.add(x=np.random.rand(1, shape[1], 1, 1), y=mul_gamma) + return add_beta + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "sub", + "square", + "reduce_mean", + "add", + "pow", + "real_div", + "mul", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_3(self): + """ + Detect and fuse instance norm pattern 3 (pattern in TensorFlow-Addons). + + |-------------------------------------------------| + | | + | V + x --> mean square --> mean1 --> add_eps --> rsqrt --> mul2 --> mul_sub + | | ^ | | + | V | | | + | --> sub -----| | | + | V V + |--------------------------------------------> mul1 -------------> add --> ... + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + mean0 = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=True) + sub = mb.sub(x=x, y=mean0) + square = mb.square(x=sub) + mean1 = mb.reduce_mean(x=square, axes=[2, 3], keep_dims=True) + add_eps = mb.add(x=mean1, y=1e-5) # epsilon + rsqrt = mb.rsqrt(x=add_eps) + mul1 = mb.mul(x=rsqrt, y=x) + mul2 = mb.mul(x=mean0, y=rsqrt) + mul_sub = mb.mul(x=mul2, y=-1.0) + add = mb.add(x=mul1, y=mul_sub) + return add + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "reduce_mean", + "sub", + "square", + "reduce_mean", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + def test_instance_norm_pattern_4(self): + """ + Detect and fuse instance norm pattern 4. + + |-----------| + | V + |------> mul_square1 -----> sum1 -----> mul_mean1 + | | + | V + x --> sum --> mul_mean ==> mul_square --> sub_variance --> add_eps --> rsqrt + | | | + | | V + | | mul_gamma + | | | + | | |----------------| + | | | V + | |--------------------------------------------+-------------> mul2 + | V | + |----------------------------------------------------------> mul1 | + | V + | sub_beta --> add --> [...] + | ^ + |---------------------------| + """ + shape = (3, 5, 6, 7) + + @mb.program(input_specs=[mb.TensorSpec(shape=shape)]) + def prog(x): + mul_square1 = mb.mul(x=x, y=x) + sum = mb.reduce_sum(x=x, axes=[2, 3], keep_dims=True) + mul_mean = mb.mul(x=sum, y=3.3333334e-05) # dummy value here + mul_square = mb.mul(x=mul_mean, y=mul_mean) + sum1 = mb.reduce_sum(x=mul_square1, axes=[2, 3], keep_dims=True) + mul_mean1 = mb.mul(x=sum1, y=8.333333e-06) # dummy value here + sub_variance = mb.sub(x=mul_mean1, y=mul_square) + add_eps = mb.add(x=sub_variance, y=1e-5) # epsilon + rsqrt = mb.rsqrt(x=add_eps) + mul_gamma = mb.mul(x=rsqrt, y=np.random.rand(1, shape[1], 1, 1)) + mul1 = mb.mul(x=mul_gamma, y=x) + mul2 = mb.mul(x=mul_mean, y=mul_gamma) + sub_beta = mb.sub(x=np.random.rand(1, shape[1], 1, 1), y=mul2) + add = mb.add(x=mul1, y=sub_beta) + return add + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, "common::fuse_layernorm_or_instancenorm" + ) + assert get_op_types_in_program(prev_prog) == [ + "mul", + "reduce_sum", + "mul", + "mul", + "reduce_sum", + "mul", + "sub", + "add", + "rsqrt", + "mul", + "mul", + "mul", + "sub", + "add", + ] + assert get_op_types_in_program(prog) == ["instance_norm"] + assert_model_is_valid( + prog, {"x": shape}, expected_output_shapes={block.outputs[0].name: shape} + ) + + +class TestFuseLinearBias: + @staticmethod + def _apply_transform(inputs, func, is_first_input, has_bias): + """ + Utility funtion to test the weight/bias transform function in linear bias fusion pass. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(3, 4))]) + def prog(x): + + if has_bias: + linear = mb.linear( + x=x, + weight=inputs["linear_weight"], + bias=inputs["linear_bias"], + ) + else: + linear = mb.linear( + x=x, + weight=inputs["linear_weight"], + ) + + if is_first_input: + kwargs = { + "x": linear, + "y": inputs["bias"], + } + else: + kwargs = { + "x": inputs["bias"], + "y": linear, + } + + x = func(**kwargs) + return x + + apply_pass_and_basic_check( + prog, + "common::fuse_linear_bias", + ) + + # get the updated weight from the prog + linear_op = [] + for op in prog["main"].operations: + if op.op_type == "const": + continue + linear_op.append(op) + assert len(linear_op) == 1, "should only have one linear layer." + + return linear_op[0].weight.val, linear_op[0].bias.val + + @pytest.mark.parametrize( + "op_type, is_first_input, has_bias, broadcast", + itertools.product( + ["add", "sub"], + [True, False], + [True, False], + [True, False], + ), + ) + def test_transform_linear(self, op_type, is_first_input, has_bias, broadcast): + """ + Test the weight / bias transform function in the linear bias fusion pass + """ + weight = np.reshape(np.arange(8), (2, 4)).astype(np.float32) + linear_bias = ( + np.array([1, 2]).astype(np.float32) if has_bias else np.array([0, 0]).astype(np.float32) + ) + bias = np.array([3, 4]).astype(np.float32) + if broadcast: + bias = np.reshape(bias, (1, 2)) + + inputs = { + "linear_weight": weight, + "linear_bias": linear_bias, + "bias": bias, + } + + if op_type == "add": + func = mb.add + elif op_type == "sub": + func = mb.sub + + new_weight, new_bias = self._apply_transform( + inputs, + func, + is_first_input, + has_bias, + ) + if broadcast: + bias = np.reshape(bias, (2,)) + + if op_type == "sub" and not is_first_input: + expected_weight = -weight + else: + expected_weight = weight + + if op_type == "sub": + if is_first_input: + expected_bias = linear_bias - bias + else: + expected_bias = bias - linear_bias + else: + expected_bias = linear_bias + bias + + np.testing.assert_almost_equal(new_weight, expected_weight) + np.testing.assert_almost_equal(new_bias, expected_bias) + + @pytest.mark.parametrize( + "rank, op_type, is_first_input, broadcast, backend", + itertools.product([1, 2, 3], ["add", "sub"], [True, False], [True, False], backends), + ) + def test_linear_bias_fusion(self, rank, op_type, is_first_input, broadcast, backend): + """ + Input graph: + Const + | + V + input -----> linear -----> add/sub ---> out + + Output graph: + input -----> linear ----> out + """ + input_shape = [1, 2, 3] + input_shape = input_shape[-rank:] + input_shape = tuple(input_shape) + + @mb.program(input_specs=[mb.TensorSpec(shape=input_shape)]) + def prog(x): + linear_weight = np.reshape(np.arange(6), (2, 3)).astype(np.float32) + linear_bias = np.array([1.0, 2.0]) + bias = np.array([3.0, 4.0]) + if broadcast: + if rank >= 2: + bias = np.reshape(bias, (1, 2)) + + x = mb.linear( + x=x, + weight=linear_weight, + bias=linear_bias, + ) + + func = mb.add if op_type == "add" else mb.sub + if is_first_input: + kwargs = { + "x": x, + "y": bias, + } + else: + kwargs = { + "x": bias, + "y": x, + } + x = func(**kwargs) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::fuse_linear_bias") + + assert get_op_types_in_program(prev_prog) == ["linear", op_type] + assert get_op_types_in_program(prog) == ["linear"] + + # validate graph pass + output_shape = [1, 2, 2] + output_shape = tuple(output_shape[-rank:]) + assert_model_is_valid( + prog, + {"x": input_shape}, + expected_output_shapes={block.outputs[0].name: output_shape}, + backend=backend, + ) + + +class TestFuseMatmulWeightBias: + def test_fuse_matmul_weight_bias(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x): + weights_val = np.random.rand(2, 4).T.astype(np.float32) + weights = mb.const(val=weights_val) + bias_val = np.random.rand(2).astype(np.float32) + bias = mb.const(val=bias_val) + + matmul = mb.matmul(x=x, y=weights) + return mb.add(x=matmul, y=bias) + + assert_op_count_match(prog, expect=1, op="matmul") + assert_op_count_match(prog, expect=0, op="linear") + prev_prog = copy.deepcopy(prog) + PASS_REGISTRY["common::fuse_matmul_weight_bias"](prog) + assert_same_output_names(prev_prog, prog) + assert_op_count_match(prog, expect=0, op="matmul") + assert_op_count_match(prog, expect=1, op="linear") + + if _VALIDATE_MODEL: + assert_model_is_valid(prog, {"x": (2, 4)}) + + +class TestCompressionGraphPass: + """ + Most of the numerical tests are already convered in coremltools.tests.ml_program.test_compression_utils. + This test is checking the basic behavior of the graph pass classes. + """ + + @staticmethod + def _get_conv_program(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 30, 10, 10))], opset_version=ct.target.iOS16 + ) + def prog(x): + conv_weight = np.random.rand(90, 30, 2, 2).astype(np.float32) + x = mb.conv(x=x, weight=conv_weight) + return x + + return prog + + @pytest.mark.parametrize( + "fake_compression", + [True, False], + ) + def test_affine_quantizer(self, fake_compression): + quantizer = quantization.WeightAffineQuantizer( + fake_compression=fake_compression, op_selector=lambda const: True + ) + prog = self._get_conv_program() + quantizer.apply(prog) + expected_ops = ["constexpr_affine_dequantize", "conv"] if not fake_compression else ["conv"] + assert get_op_types_in_program(prog) == expected_ops + + @pytest.mark.parametrize( + "fake_compression", + [True, False], + ) + def test_weight_sparsifier(self, fake_compression): + quantizer = quantization.WeightSparsifier( + fake_compression=fake_compression, + op_selector=lambda const: True, + mode="percentile_based", + target_percentile=0.75, + ) + prog = self._get_conv_program() + quantizer.apply(prog) + expected_ops = ["constexpr_sparse_to_dense", "conv"] if not fake_compression else ["conv"] + assert get_op_types_in_program(prog) == expected_ops + + @pytest.mark.parametrize( + "fake_compression", + [True, False], + ) + def test_weight_palettization(self, fake_compression): + quantizer = quantization.WeightPalettizer( + fake_compression=fake_compression, + op_selector=lambda const: True, + mode="uniform", + nbits=4, + ) + prog = self._get_conv_program() + quantizer.apply(prog) + expected_ops = ["constexpr_lut_to_dense", "conv"] if not fake_compression else ["conv"] + assert get_op_types_in_program(prog) == expected_ops + + @pytest.mark.parametrize( + "axis, mode, source_dtype, target_dtype, data_range", + itertools.product( + [0, 1, 2, 3, -1], + ["linear", "linear_symmetric"], + [np.float16, np.float32], + [types.uint8, types.int8], + [ + [-1., 1.], + [-3., -1.], + [1., 3.], + # Test corner case of same values + [0., 0.], + [1., 1.], + [-1., -1.], + ] + ), + ) + def test_affine_quantizer_compression(self, axis, mode, source_dtype, target_dtype, data_range): + input_shape = (10, 20, 30, 40) + low, high = data_range + val = np.random.uniform(low, high, input_shape).astype(source_dtype) + + params = quantization.WeightAffineQuantizer.compress(val, axis, mode, target_dtype) + decompressed_val = quantization.WeightAffineQuantizer.decompress(params) + + np.testing.assert_allclose(val, decompressed_val, rtol=1e-02, atol=1e-02) + + @pytest.mark.parametrize( + "mode, nbits, shape", + itertools.product( + ["KMEANS", "UNIFORM", "UNIQUE"], + [1, 2, 4, 6, 8], + [ + (1,), + (1, 1), + (1, 10), + (2, 20), + (3, 7, 9), + (17, 17, 17), + ] + ), + ) + def test_palettizer_compression(self, mode, nbits, shape): + val_size = np.prod(shape) + max_val = 2 ** nbits + val = np.arange(max_val).tolist() + val = np.array(val * (val_size // max_val + 1))[:val_size].astype(np.float32) + params = quantization.WeightPalettizer.compress(val, mode=mode, nbits=nbits) + decompressed_val = quantization.WeightPalettizer.decompress(params) + + # For + # 1. UNIQUE / KMEANS mode + # 2. UNIFORM mode with the data range <= tensor size + # We can perfecting re-construct the original value + if (mode in ["UNIQUE", "KMEANS"]) or (mode == "UNIFORM" and max_val <= val_size): + np.testing.assert_allclose(val, decompressed_val, rtol=1e-02, atol=1e-02) + +class TestFP16CastTransform(unittest.TestCase): + """""" + + """ + Input graph: + input -----> square -----> out + + Output graph: + input -----> cast(dtype="fp16") -----> square -----> cast(dtype="fp32") ---> out + """ + + def test_single_input_to_single_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + self.assertEqual(get_op_types_in_program(prog), ["square"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "square", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 1) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp32") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input -----> div -----> out + ^ + const(eps) ---| + + Output graph: + input --------> cast(dtype="fp16") -----> div -----> cast(dtype="fp32") ---> out + ^ + const(eps) ---> cast(dtype="fp16") --------| + """ + + def test_divide_by_zero_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + eps = mb.const(val=1e-10) + x = mb.real_div(x=x, y=eps) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + + mlmodel = ct.convert(prog, source="milinternal", compute_units=ct.ComputeUnit.CPU_ONLY) + input_dict = {"x": np.random.rand(10, 20)} + + if _IS_MACOS: + prediction = mlmodel.predict(input_dict) + assert not np.isnan(prediction["real_div_0"]).any() + assert np.isfinite(prediction["real_div_0"]).all() + + """ + Input graph: + input1 ----->| + concat -----> out + input2 ----->| + + Output graph: + input1 -----> cast(dtype="fp16") ----->| + concat -----> cast(dtype="fp32") ---> out + input2 -----> cast(dtype="fp16") ----->| + + """ + + def test_multiple_inputs_to_single_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20)), mb.TensorSpec(shape=(10, 20))]) + def prog(x, y): + x = mb.concat(values=(x, y), axis=0) + return x + + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "cast", "concat", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 1) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "concat") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp16") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 1) + self.assertEqual(cast_2.outputs[0].child_ops[0].op_type, "concat") + + # Asserting third cast configuration + cast_3 = block.find_ops(op_type="cast")[2] + self.assertEqual(cast_3.dtype.val, "fp32") + self.assertEqual(len(cast_3.outputs), 1) + self.assertEqual(len(cast_3.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20), "y": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (20, 20)}, + ) + + """ + Input graph: + |-----> output_1 + input -----> split + |-----> output_2 + + Output graph: + + |-----> cast(dtype="fp32") ---> output_1 + input -----> cast(dtype="fp16") -----> split + |-----> cast(dtype="fp32") ---> output_2 + + """ + + def test_multiple_outputs_from_single_operation(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.split(x=x, axis=0, num_splits=2) + return x + + self.assertEqual(get_op_types_in_program(prog), ["split"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "split", "cast", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 1) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "split") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp32") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 0) + + # Asserting third cast configuration + cast_3 = block.find_ops(op_type="cast")[2] + self.assertEqual(cast_3.dtype.val, "fp32") + self.assertEqual(len(cast_3.outputs), 1) + self.assertEqual(len(cast_3.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (5, 20), block.outputs[1].name: (5, 20)}, + ) + + """ + Input graph: + + |----> square ---> output_1 + input| + |----> relu ---> output_2 + + Output graph: + + |---->square-----> cast(dtype="fp32") ---> output_1 + input -----> cast(dtype="fp16") + |----> relu -----> cast(dtype="fp32") ---> output_2 + + """ + + def test_single_input_to_multiple_operations(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + y = mb.square(x=x) + z = mb.relu(x=x) + return y, z + + self.assertEqual(get_op_types_in_program(prog), ["square", "relu"]) + + apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + _, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination") + + self.assertEqual(get_op_types_in_program(prog), ["cast", "square", "cast", "relu", "cast"]) + + # Asserting first cast configuration + cast_1 = block.find_ops(op_type="cast")[0] + self.assertEqual(cast_1.dtype.val, "fp16") + self.assertEqual(len(cast_1.outputs), 1) + self.assertEqual(len(cast_1.outputs[0].child_ops), 2) + self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square") + self.assertEqual(cast_1.outputs[0].child_ops[1].op_type, "relu") + + # Asserting second cast configuration + cast_2 = block.find_ops(op_type="cast")[1] + self.assertEqual(cast_2.dtype.val, "fp32") + self.assertEqual(len(cast_2.outputs), 1) + self.assertEqual(len(cast_2.outputs[0].child_ops), 0) + + # Asserting third cast configuration + cast_3 = block.find_ops(op_type="cast")[2] + self.assertEqual(cast_3.dtype.val, "fp32") + self.assertEqual(len(cast_3.outputs), 1) + self.assertEqual(len(cast_3.outputs[0].child_ops), 0) + + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (10, 20), + block.outputs[1].name: (10, 20), + }, + ) + + def test_duplicate_output_vars(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2))]) + def prog(x): + relu1 = mb.relu(x=x) + return relu1, relu1 + + _, _, block = apply_pass_and_basic_check( + prog, quantization.FP16ComputePrecision(op_selector=lambda op: True) + ) + self.assertEqual(get_op_types_in_program(prog), ["cast", "relu", "cast"]) + + assert_model_is_valid( + prog, + {"x": (1, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 2), block.outputs[1].name: (1, 2)}, + backend=("mlprogram", "fp16"), + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py new file mode 100644 index 00000000..c42a5b44 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/passes/tests/test_reduce_transposes_pass.py @@ -0,0 +1,1967 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import get_new_symbol +from coremltools.converters.mil.mil.passes.defs.optimize_repeat_ops import TransformAxisUpdateOps +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import ( + apply_pass_and_basic_check, + assert_model_is_valid, + get_op_types_in_program, +) + +np.random.seed(1984) + + +class TransposeOptimizationPass(unittest.TestCase): + """ + Input graph: + input -----> transpose(axis=[1,0]) -----> transpose(axis=[1,0]) ---> out + + Output graph: + input -----> identity -----> out + """ + + def test_simple_consecutive_ops_fusion_direct_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.transpose(x=x, perm=[1, 0]) + x = mb.transpose(x=x, perm=[1, 0]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["identity"]) + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input -----> transpose(axis=[1,0]) -----> transpose(axis=[1,0]) ----> relu ---> out + + Output graph: + input -----> relu -----> out + """ + + def test_simple_consecutive_ops_fusion(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.transpose(x=x, perm=[1, 0]) + x = mb.transpose(x=x, perm=[1, 0]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "transpose", "relu"]) + self.assertEqual(get_op_types_in_program(prog), ["relu"]) + assert_model_is_valid( + prog, + {"x": (10, 20)}, + expected_output_shapes={block.outputs[0].name: (10, 20)}, + ) + + """ + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->log--->transpose(axis=[0,2,3,1])--->relu--->out + + Output graph: + input----->relu----->log----->relu--->out + """ + + def test_linear_graph_two_op_fusion(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x = mb.log(x=x) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "log", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "log", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 3, 4)}, + ) + + """ + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->identity--->transpose(axis=[0,2,3,1])--->relu--->out + + Output graph: + input----->relu----->identity----->relu--->out + """ + + def test_linear_graph_two_op_fusion_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x = mb.identity(x=x) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "identity", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "identity", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 3, 4)}, + ) + + """ + Input graph: + input(shape=1,2,3,4)---->transpose(axis=[0,3,1,2])---->relu---->log--->transpose(axis=[0,2,3,1])--->relu--->out1(shape=1,2,3,4) + | + v + out2(shape=1,4,2,3) + + Output graph: + input(shape=1,2,3,4)---->relu---->log--->relu--->out1(shape=1,2,3,4) + | + |----->transpose(axis=[0,3,1,2])----->out2(shape=1,4,2,3) + """ + + def test_fusion_with_output_edge_inbetween(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x1 = mb.relu(x=x) + x2 = mb.log(x=x1) + x3 = mb.transpose(x=x2, perm=[0, 2, 3, 1]) + x4 = mb.relu(x=x3) + return x4, x1 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "log", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "log", "relu", "transpose"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={ + block.outputs[0].name: (1, 2, 3, 4), + block.outputs[1].name: (1, 4, 2, 3), + }, + ) + + """ + Input graph: + input---->transpose(axis=[0,3,1,2])---->relu---->transpose(axis=[0,2,3,1])--->out + + Output graph: + input----->relu----->out + """ + + def test_linear_graph_two_op_fusion_with_last_op_removal(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "relu", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["relu"]) + assert_model_is_valid( + prog, + {"x": (1, 2, 3, 4)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 3, 4)}, + ) + + """ + Input graph: + input(shape=10,2,3)--->transpose(axis=[0,2,1])----->relu---->transpose(axis=[0,2,1])---->out1 + | + | + --->relu----->log---->transpose(axis=[0,2,1])---->out2 + + Output graph: + input(shape=10,2,3)----->relu---->out1 + | + | + --->relu----->log---->out2 + """ + + def test_multiple_fusions(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 1]) + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + y1 = mb.transpose(x=x1, perm=[0, 2, 1]) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 1]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "relu", "transpose", "log", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "log"]) + + assert prev_block.inputs["x"] == prev_block.find_ops(op_type="transpose")[0].inputs["x"] + assert block.find_ops(op_type="log")[0].outputs[0] in block.outputs + assert_model_is_valid( + prog, + {"x": (10, 2, 3)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3), + block.outputs[1].name: (10, 2, 3), + }, + ) + + """ + Input graph: + input(shape=10,2,3,5)--->transpose(axis=[0,2,3,1])----->relu---->pool----->out1 + | + | + --->relu----->log---->transpose(axis=[0,3,1,2])---->out2 + + + Output graph: + input(shape=10,2,3,5)----->relu---->transpose(axis=[0,2,3,1])---->pool----->out1 + | + | + --->relu----->log---->out2 + """ + + def test_partial_fusion_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x) + x2 = mb.relu(x=x) + y1 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "relu", "avg_pool", "log", "transpose"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "transpose", "avg_pool", "log"], + ) + + assert prev_block.inputs["x"] == prev_block.find_ops(op_type="transpose")[0].inputs["x"] + assert block.find_ops(op_type="log")[0].outputs[0] == block.outputs[1] + assert ( + block.find_ops(op_type="transpose")[0].outputs[0] + == block.find_ops(op_type="avg_pool")[0].inputs["x"] + ) + assert list(block.find_ops(op_type="transpose")[0].perm.val) == [0, 2, 3, 1] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 3, 5, 2), + block.outputs[1].name: (10, 2, 3, 5), + }, + ) + + """ + Input graph: + input(shape=10,2,3,5)--->transpose(axis=[0,2,1,3])----->relu---->transpose(axis=[0,2,1,3])---->out1 + | + | + --->pool--->log---->transpose(axis=[0,2,1,3])---->out2 + + Output graph: + input(shape=10,2,3,5)----->relu---->out1 + | + | + --->transpose(axis=[0,2,1,3])---->pool----->log---->transpose(axis=[0,2,1,3])---->out2 + """ + + def test_partial_fusion_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.relu(x=x) + x2 = mb.avg_pool(x=x, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + y1 = mb.transpose(x=x1, perm=[0, 2, 1, 3]) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 1, 3]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "avg_pool", "transpose", "log", "transpose"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "transpose", "avg_pool", "log", "transpose"], + ) + + assert block.inputs["x"] == block.find_ops(op_type="relu")[0].inputs["x"] + assert block.outputs[0] == block.find_ops(op_type="relu")[0].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 2, 3, 5), + }, + ) + + """ + Input graph: + + |-------> transpose(axis=[0,2,1,3]) ---->out1(shape=10,2,3,5) + | + input(shape=10,2,3,5)-->relu-->transpose(axis=[0,2,1,3])--->relu--->transpose(axis=[0,2,1,3]) ---->out2(shape=10,2,3,5) + | + |----->pool--------------->out3(shape=10,3,2,5) + | + |----->pool--------------->out4(shape=10.3.2.5) + + + Output graph: + + |---->out1(shape=10,2,3,5) + | + input---->relu---------->relu------->out2(shape=10,2,3,5) + | + |----->transpose(axis=[0,2,1,3])--->pool---->out3(shape=10,3,2,5) + | + |----->transpose(axis=[0,2,1,3])---->pool--->out4(shape=10.3.2.5) + """ + + def test_partial_fusion_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + y1 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.relu(x=x) + y2 = mb.transpose(x=x1, perm=[0, 2, 1, 3]) + y3 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + y4 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return y1, y2, y3, y4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "relu", + "transpose", + "transpose", + "relu", + "transpose", + "avg_pool", + "avg_pool", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "transpose", "avg_pool", "transpose", "avg_pool"], + ) + + assert block.outputs[0] == block.find_ops(op_type="relu")[0].outputs[0] + assert block.outputs[1] == block.find_ops(op_type="relu")[1].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + # Two consecutive relus are merged, so the first two outputs have the same name. See + # `test_name_change_depend_on_output` in TestMergeConsecutiveRelus. + block.outputs[1].name: (10, 2, 3, 5), + block.outputs[2].name: (10, 3, 2, 5), + block.outputs[3].name: (10, 3, 2, 5), + }, + # rdar://100243127 ([PyTorch] Duplicate Output Tensor Doesn't work for neuralnetwork). + backend=("mlprogram", "fp16"), + ) + + """ + Input graph: + + input(shape=10,2,3,5)-->relu--->transpose(axis=[0,2,1,3])----->transpose(axis=[0,2,1,3])---->out1(shape=10,2,3,5) + | + ---->relu------>out2(shape=10,3,2,5) + + Output graph: + + input(shape=10,2,3,5)-->relu---->out1(shape=10,2,3,5) + | + ---->relu--->transpose(axis=[0,2,1,3])------>out2(shape=10,3,2,5) + """ + + def test_partial_fusion_3(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x2 = mb.relu(x=x) + return x1, x2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["relu", "transpose", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "transpose"]) + + assert block.outputs[0] == block.find_ops(op_type="relu")[0].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 3, 2, 5), + }, + ) + + """ + Input graph: + + input(shape=10,2,3,5)-->relu--->transpose(axis=[0,2,1,3])----->transpose(axis=[0,2,1,3])---->out1(shape=10,2,3,5) + | + ------>out2(shape=10,3,2,5) + + Output graph: + same as input graph as one of the optimizing transpose is connected to model output + """ + + def test_partial_fusion_4(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + out2 = mb.transpose(x=x, perm=[0, 2, 1, 3]) + out1 = mb.transpose(x=out2, perm=[0, 2, 1, 3]) + return out1, out2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["relu", "transpose", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["relu", "transpose", "transpose"]) + + assert block.outputs[1] == block.find_ops(op_type="transpose")[0].outputs[0] + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 3, 2, 5), + }, + ) + + """ + Input graph: + input(shape=10,2,3,5)-->relu-->transpose(axis=[0,2,1,3])--->relu--->transpose(axis=[0,2,1,3]) ---->out1(shape=10,2,3,5) + | + |--->relu-->pool--------------->out2(shape=10,3,2,5) + | + |----->pool--------------->out3(shape=10.3.2.5) + + + Output graph: + same as the input graph as materialization ops are greater than cancel ops + """ + + def test_no_fusion_more_materialization_ops(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3, 5))]) + def prog(x): + x = mb.relu(x=x) + x = mb.transpose(x=x, perm=[0, 2, 1, 3]) + x1 = mb.relu(x=x) + y2 = mb.transpose(x=x1, perm=[0, 2, 1, 3]) + x2 = mb.relu(x=x1) + y3 = mb.avg_pool(x=x2, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + y4 = mb.avg_pool(x=x1, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return y2, y3, y4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["relu", "transpose", "relu", "transpose", "relu", "avg_pool", "avg_pool"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "transpose", "relu", "transpose", "relu", "avg_pool", "avg_pool"], + ) + + assert_model_is_valid( + prog, + {"x": (10, 2, 3, 5)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3, 5), + block.outputs[1].name: (10, 3, 2, 5), + block.outputs[2].name: (10, 3, 2, 5), + }, + ) + + """ + Input graph: + input(shape=10,2,3)--->transpose(axis=[0,2,1])----->relu---->transpose(axis=[0,2,1])---->out1 + | + | + --->reduce(axis=2)----->log---->transpose(axis=[0,2,1])---->out2 + + Output graph: + input(shape=10,2,3)----->relu---->out1 + | + | + --->reduce(axis=1)----->log---->out2 + """ + + def test_fusion_with_axis_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 1]) + x1 = mb.relu(x=x) + x2 = mb.reduce_mean(x=x, axes=[2], keep_dims=True) + y1 = mb.transpose(x=x1, perm=[0, 2, 1]) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 1]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "reduce_mean", "transpose", "log", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "reduce_mean", "log"]) + + assert list(block.find_ops(op_type="reduce_mean")[0].inputs["axes"].val) == [1] + assert_model_is_valid( + prog, + {"x": (10, 2, 3)}, + expected_output_shapes={ + block.outputs[0].name: (10, 2, 3), + block.outputs[1].name: (10, 1, 3), + }, + ) + + """ + Input graph: + input(shape=11,2,3,6)--->transpose(axis=[0,3,1,2])--- + | + | + --->pad(pad=[0,0,0,0,1,2,3,4]) + | + |-->log--->transpose(axis=[0,2,3,1])-->out1(shape=11,5,10,6) + + Output graph: + same as input graph, as transpose cannot be pushed through the pad op since "reflect" mode is only supported + along the last two axis + """ + + def test_fusion_with_pad_reflective_op_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="reflect") + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 3, 1]) + return y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["transpose", "pad", "log", "transpose"]) + + assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [ + 0, + 0, + 0, + 0, + 1, + 2, + 3, + 4, + ] + assert_model_is_valid( + prog, + {"x": (11, 2, 3, 6)}, + expected_output_shapes={block.outputs[0].name: (11, 5, 10, 6)}, + ) + + """ + Input graph: + input(shape=11,2,3,6)--->transpose(axis=[0,1,3,2])--- + | + | + --->pad(pad=[0,0,0,0,1,2,3,4]) + | + |-->log--->transpose(axis=[0,1,3,2])-->out1(shape=11,2,10,9) + + Output graph: + input(shape=11,2,3,6)--->pad(pad=[0,0,0,0,3,4,1,2])-->log-->out1(shape=11,2,10,9) + """ + + def test_fusion_with_pad_reflective_op_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 1, 3, 2]) + x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="reflect") + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 1, 3, 2]) + return y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["pad", "log"]) + + assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [ + 0, + 0, + 0, + 0, + 3, + 4, + 1, + 2, + ] + assert_model_is_valid( + prog, + {"x": (11, 2, 3, 6)}, + expected_output_shapes={block.outputs[0].name: (11, 2, 10, 9)}, + ) + + """ + Input graph: + input(shape=11,2,3,6)--->transpose(axis=[0,3,1,2])--- + | + | + --->pad(pad=[0,0,0,0,1,2,3,4]) + | + |-->log--->transpose(axis=[0,2,3,1])-->out1(shape=11,5,10,6) + + Output graph: + input(shape=11,2,3,6)--->pad(pad=[0,0,1,2,3,4,0,0])-->log-->out1(shape=11,5,10,6) + """ + + def test_fusion_with_pad_constant_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="constant", constant_val=3.0) + x3 = mb.log(x=x2) + y2 = mb.transpose(x=x3, perm=[0, 2, 3, 1]) + return y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"] + ) + self.assertEqual(get_op_types_in_program(prog), ["pad", "log"]) + + assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [ + 0, + 0, + 1, + 2, + 3, + 4, + 0, + 0, + ] + assert_model_is_valid( + prog, + {"x": (11, 2, 3, 6)}, + expected_output_shapes={block.outputs[0].name: (11, 5, 10, 6)}, + ) + + """ + Input graph: + const(shape=2) + | + V + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + + Output graph: + const(shape=1,2,1,1) + | + V + input(shape=1,2,5,5)--->add--->out(shape=1,2,5,5) + """ + + def test_fusion_with_add_constant_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.add(x=x, y=np.array([10.0, 100.0])) + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "add", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["add"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + const(scalar) + | + V + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + + Output graph: + const(scalar) + | + V + input(shape=1,2,5,5)--->add--->out(shape=1,2,5,5) + """ + + def test_fusion_with_add_scalar_constant_op(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.add(x=5.0, y=x) + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "add", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["add"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + | ^ + | | + |---->relu---->transpose(axis=[0,2,3,1]) + + Output graph: + input(shape=1,2,5,5)----->add--->out(shape=1,2,5,5) + | ^ + | | + |------>relu + """ + + def test_fusion_with_add_broadcastable_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x) + x2 = mb.transpose(x=x2, perm=[0, 2, 3, 1]) + x3 = mb.add(x=x1, y=x2) + y = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "transpose", "add", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "add"]) + + assert block.find_ops(op_type="relu")[0].inputs["x"] == block.inputs["x"] + assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"] + assert ( + block.find_ops(op_type="add")[0].inputs["y"] + == block.find_ops(op_type="relu")[0].outputs[0] + ) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5) + | ^ + | | + |----------------------->transpose(axis=[0,2,3,1]) + + Output graph: + input(shape=1,2,5,5)----->add--->out(shape=1,2,5,5) + | ^ + | | + |--------- + """ + + def test_fusion_with_add_broadcastable_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x3 = mb.add(x=x1, y=x2) + y = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "add", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["add"]) + + assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"] + assert block.find_ops(op_type="add")[0].inputs["y"] == block.inputs["x"] + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])--->relu------------ + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + """ + + def test_concat_pattern_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return x4 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "relu", "relu", "concat", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + pool--->out2(shape=1,5,5,2) + + + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + | + |--->transpose(axis=[0,2,3,1])---->pool--->out2(shape=1,5,5,2) + """ + + def test_concat_pattern_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + x5 = mb.avg_pool(x=x2, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "transpose", + "relu", + "relu", + "concat", + "transpose", + "avg_pool", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "concat", "transpose", "avg_pool"], + ) + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 5, 5, 2), + }, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + relu--->out2(shape=1,5,5,2) + + + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + | + |--->relu---->transpose(axis=[0,2,3,1])---->out2(shape=1,5,5,2) + """ + + def test_concat_pattern_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + x5 = mb.relu(x=x2) + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "relu", "relu", "concat", "transpose", "relu"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "concat", "relu", "transpose"], + ) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 5, 5, 2), + }, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + out2(shape=1,5,5,2) + + + + Output graph: + input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5) + | ^ + | | + |---->relu------------ + | + |--->transpose(axis=[0,2,3,1])---->out2(shape=1,5,5,2) + """ + + def test_concat_pattern_3(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return x4, x2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "relu", "relu", "concat", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat", "transpose"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 5, 5, 2), + }, + ) + + """ + Input graph: + input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5) + | ^ + | | + |->transpose(axis=[0,2,3,1])------->relu-------- + | + V + transpose(axis=[0,3,1,2]) -----> out2(shape=1,2,5,5) + + Output graph: + input(shape=1,2,5,5)---> relu---->concat(axis=1)----->out1(shape=1,4,5,5) + | ^ + | | + |------------------->relu-------->out2(shape=1,2,5,5) + """ + + def test_concat_pattern_4(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x1 = mb.relu(x=x1) + x2 = mb.relu(x=x2) + x3 = mb.concat(values=[x1, x2], axis=3) + x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + x5 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "transpose", + "relu", + "relu", + "concat", + "transpose", + "transpose", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat"]) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 5), + block.outputs[1].name: (1, 2, 5, 5), + }, + ) + + """ + Input graph: + constant(shape=[30,10,5]) + | + V + input(shape=10,20,30)--->transpose(axis=[2,0,1])--->concat(axis=2)----->transpose(axis=[1,2,0])----->out1(shape=10,25,30) + + Output graph: + constant(shape=[10,5,30]) + | + V + input(shape=10,20,30)--->concat(axis=1)----->out1(shape=10,25,30) + """ + + def test_concat_pattern_5(self): + const = np.random.rand(30, 10, 5) + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[2, 0, 1]) + c = mb.const(val=const) + x2 = mb.concat(values=[x1, c], axis=2) + x3 = mb.transpose(x=x2, perm=[1, 2, 0]) + return x3 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual(get_op_types_in_program(prev_prog), ["transpose", "concat", "transpose"]) + self.assertEqual(get_op_types_in_program(prog), ["concat"]) + + assert_model_is_valid( + prog, + {"x": (10, 20, 30)}, + expected_output_shapes={block.outputs[0].name: (10, 25, 30)}, + ) + + """ + Input graph: + input2(shape=30,10,20)-----| + | + input(shape=10,20,30)--->transpose(axis=[2,0,1])----->relu-----|----->concat(axis=2)------>out1(shape=90,10,20) + | | + |-->relu-----| + | + |-->relu---->transpose(axis=[1,2,0])---->out2(shape=10,20,30) + | + |-->relu---->transpose(axis=[1,2,0])---->out3(shape=10,20,30) + | + |-->relu---->transpose(axis=[1,2,0])---->out4(shape=10,20,30) + + Output graph: + + input2(shape=30,10,20)-----| + | + input(shape=10,20,30)----->relu--->transpose(axis=[2,0,1])-----|----->concat(axis=2)------>out1(shape=90,10,20) + | | + |-->relu--->transpose(axis=[2,0,1])-----| + | + |-->relu---->out2(shape=10,20,30) + | + |-->relu---->out3(shape=10,20,30) + | + |-->relu---->out4(shape=10,20,30) + + Output graph: + """ + + def test_concat_pattern_6(self): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(10, 20, 30)), + mb.TensorSpec(shape=(30, 10, 20)), + ] + ) + def prog(x, y): + x1 = mb.transpose(x=x, perm=[2, 0, 1]) + r1 = mb.relu(x=x1) + r2 = mb.relu(x=x1) + r3 = mb.relu(x=x1) + r4 = mb.relu(x=x1) + r5 = mb.relu(x=x1) + + x2 = mb.concat(values=[r1, r2, y], axis=0) + x3 = mb.transpose(x=r3, perm=[1, 2, 0]) + x4 = mb.transpose(x=r4, perm=[1, 2, 0]) + x5 = mb.transpose(x=r5, perm=[1, 2, 0]) + return x2, x3, x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "relu", + "relu", + "relu", + "relu", + "concat", + "transpose", + "transpose", + "transpose", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + [ + "relu", + "relu", + "relu", + "relu", + "relu", + "transpose", + "transpose", + "concat", + ], + ) + + assert_model_is_valid( + prog, + {"x": (10, 20, 30), "y": (30, 10, 20)}, + expected_output_shapes={ + block.outputs[0].name: (90, 10, 20), + block.outputs[1].name: (10, 20, 30), + block.outputs[2].name: (10, 20, 30), + block.outputs[3].name: (10, 20, 30), + }, + ) + + """ + Input graph: + input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])--->relu---->split(axis=1, num_splits=2)----->transpose(axis=[0,3,2,1])----->out1(shape=1,4,5,3) + | + v + transpose(axis[0,3,2,1])-------------------------->out2(shape=1,4,5,3) + + Output graph: + input(shape=1,4,5,6)------> relu ---->split(axis=3)--->out1(shape=1,4,5,3) + | + v + out2(shape=1,4,5,3) + """ + + def test_split_nd_pattern_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 2, 1]) + x1 = mb.relu(x=x1) + x2, x3 = mb.split(x=x1, axis=1, num_splits=2) + x4 = mb.transpose(x=x2, perm=[0, 3, 2, 1]) + x5 = mb.transpose(x=x3, perm=[0, 3, 2, 1]) + return x4, x5 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "split", "transpose", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "split"]) + + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 6)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 3), + block.outputs[1].name: (1, 4, 5, 3), + }, + ) + + self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3) + + """ + Input graph: + input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])--->relu---->splitd(axis=1, num_splits=6)----->transpose(axis=[0,3,2,1])----->out1(shape=1,4,5,3) + | + v + transpose(axis[0,3,2,1])-------------------------------------->out2(shape=1,4,5,3) + + Output graph: + input(shape=1,4,5,6)------>relu---->split(axis=3)--->out1(shape=1,4,5,3) + | + v + out2(shape=1,4,5,3) + """ + + def test_split_nd_pattern_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 2, 1]) + x1 = mb.relu(x=x1) + x2, x3, x4, x5, x6, x7 = mb.split(x=x1, axis=1, num_splits=6) + x2 = mb.transpose(x=x2, perm=[0, 3, 2, 1]) + x3 = mb.transpose(x=x3, perm=[0, 3, 2, 1]) + x4 = mb.transpose(x=x4, perm=[0, 3, 2, 1]) + x5 = mb.transpose(x=x5, perm=[0, 3, 2, 1]) + x6 = mb.transpose(x=x6, perm=[0, 3, 2, 1]) + x7 = mb.transpose(x=x7, perm=[0, 3, 2, 1]) + return x2, x3, x4, x5, x6, x7 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "split", + "transpose", + "transpose", + "transpose", + "transpose", + "transpose", + "transpose", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "split"]) + + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 6)}, + expected_output_shapes={ + block.outputs[0].name: (1, 4, 5, 1), + block.outputs[1].name: (1, 4, 5, 1), + block.outputs[2].name: (1, 4, 5, 1), + block.outputs[3].name: (1, 4, 5, 1), + block.outputs[4].name: (1, 4, 5, 1), + block.outputs[5].name: (1, 4, 5, 1), + }, + ) + + self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3) + + """ + Input graph: + input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])---> split(axis=1, num_splits=2) ----> concat(axis=1) ----->transpose(axis=[0,3,2,1]) ----->out1(shape=1,4,5,6) + | ^ + v | + relu() ---------------------- + + Output graph: + input(shape=1,4,5,6)------>split(axis=3)--->concat(axis=3) -------> out1(shape=1,4,5,6) + | ^ + v | + relu() -------------- + """ + + def test_split_nd_pattern_2(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 3, 2, 1]) + x2, x3 = mb.split(x=x1, axis=1, num_splits=2) + x4 = mb.relu(x=x2) + x5 = mb.concat(values=[x4, x3], axis=1) + x6 = mb.transpose(x=x5, perm=[0, 3, 2, 1]) + return x6 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "split", "relu", "concat", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["split", "relu", "concat"]) + + assert_model_is_valid( + prog, + {"x": (1, 4, 5, 6)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 5, 6)}, + ) + + self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3) + + """ + Input graph: + input(shape=1,5,5,3)----->transpose(axis=[0,3,1,2]) + | + ---->relu-------------->transpose(axis=[0,2,3,1]) + | | + | V + | relu + | | + | V + | transpose(axis=[0,3,1,2]) + | | + | V + ----------------> add --------> relu---->pool---->out(shape=1,3,5,5) + + + Output graph: + + + input(shape=1,5,5,3)---->relu------------------------> relu + | | + | V + ----------------> add + | + V + relu + | + V + transpose(axis=[0,3,1,2])-->pool---->out(shape=1,3,5,5) + + """ + + def test_skip_connection_pattern_0(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 5, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x, y=x3) + x5 = mb.relu(x=x4) + x6 = mb.avg_pool(x=x5, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return x6 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "transpose", + "relu", + "transpose", + "add", + "relu", + "avg_pool", + ], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["relu", "relu", "add", "relu", "transpose", "avg_pool"], + ) + assert_model_is_valid( + prog, + {"x": (1, 5, 5, 3)}, + expected_output_shapes={block.outputs[0].name: (1, 3, 5, 5)}, + ) + + """ + Input graph: + input(shape=1,5,5,3)----->transpose(axis=[0,3,1,2]) + | + ---->relu-------------->transpose(axis=[0,2,3,1]) + | | + | V + | relu + | | + | V + | transpose(axis=[0,3,1,2]) + | | + | V + ----------------> add -->transpose(axis=[0,2,3,1]) + | + V + relu---->pool---->out(shape=1,5,5,3) + + + Output graph: + + + input(shape=1,5,5,3)---->relu------------------------> relu + | | + | V + ----------------> add + | + V + relu + | + V + pool---->out(shape=1,5,5,3) + + """ + + def test_skip_connection_pattern_1(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 5, 3))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.relu(x=x) + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + x4 = mb.add(x=x, y=x3) + x4 = mb.transpose(x=x4, perm=[0, 2, 3, 1]) + x5 = mb.relu(x=x4) + x6 = mb.avg_pool(x=x5, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + return x6 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "transpose", + "relu", + "transpose", + "add", + "transpose", + "relu", + "avg_pool", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "add", "relu", "avg_pool"]) + assert_model_is_valid( + prog, + {"x": (1, 5, 5, 3)}, + expected_output_shapes={block.outputs[0].name: (1, 5, 5, 3)}, + ) + + """ + Input graph: + input(shape=2,5)--->transpose(axis=[1,0])--->transpose(axis=[1,0])-->reduce(axis=1) + | | + | V + | transpose(axis=[1,0]) + | | + | V + -------------------------------------------->add------->out(shape=5,2) + + Output graph: + input(shape=2,5)--->reduce(axis=1)---->add---->transpose(axis=[1,0])--->out(shape=5,2) + | ^ + | | + ------------------------ + """ + + def test_residual_with_unmaterialized_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[1, 0]) + t1 = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.reduce_mean(x=t1, axes=[1], keep_dims=True) + t2 = mb.transpose(x=x2, perm=[1, 0]) + return mb.add(x=x1, y=t2) + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "reduce_mean", "transpose", "add"], + ) + self.assertEqual(get_op_types_in_program(prog), ["reduce_mean", "add", "transpose"]) + + assert_model_is_valid( + prog, {"x": (2, 5)}, expected_output_shapes={block.outputs[0].name: (5, 2)} + ) + + """ + Input graph: + input(shape=2,5)--->transpose(axis=[1,0])--->transpose(axis=[1,0])-->reduce(axis=1) + | | + | V + | transpose(axis=[1,0]) + | | + | V + -------------------------------------------->add------->out1(shape=5,2) + | + V + relu------->out2(shape=5,2) + + Output graph: + input(shape=2,5)--->reduce(axis=1)----> add ----->transpose(axis=[1,0])----->out1(shape=5,2) + | | + | V + ---------------------> relu----->transpose(axis=[1,0])----->out2(shape=5,2) + """ + + def test_residual_with_unmaterialized_multiple_output(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[1, 0]) + t1 = mb.transpose(x=x1, perm=[1, 0]) + x2 = mb.reduce_mean(x=t1, axes=[1], keep_dims=True) + t2 = mb.transpose(x=x2, perm=[1, 0]) + out1 = mb.add(x=x1, y=t2) + out2 = mb.relu(x=out1) + return out1, out2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "transpose", "reduce_mean", "transpose", "add", "relu"], + ) + self.assertEqual( + get_op_types_in_program(prog), ["reduce_mean", "add", "relu", "transpose", "transpose"] + ) + + assert_model_is_valid( + prog, + {"x": (2, 5)}, + expected_output_shapes={block.outputs[0].name: (5, 2), block.outputs[1].name: (5, 2)}, + ) + + """ + Input graph: + input(shape=2,5)---->transpose(axis=[1,0])------>relu----->transpose(axis=[1,0])------>out2(shape=2,5) + | + ------->out1(shape=5,2) + + Output graph: + input(shape=2,5)---->relu-----> out2(shape=2,5) + | + V + transpose(axis=[1,0]) -----> out1(shape=5,2) + """ + + def test_materialized_output_reuse(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[1, 0]) + y1 = mb.relu(x=x1) + y2 = mb.transpose(x=y1, perm=[1, 0]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + [ + "transpose", + "relu", + "transpose", + ], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "transpose"]) + + assert_model_is_valid( + prog, + {"x": (2, 5)}, + expected_output_shapes={block.outputs[0].name: (5, 2), block.outputs[1].name: (2, 5)}, + ) + + """ + Input graph: + input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])------->add------------>transpose(axis=[0,3,1,2])--->out1(shape=1,2,5,5) + | ^ | + | | | + ---->relu ----->transpose(axis=[0,3,1,2])--->out2(shape=1,2,5,5) + + Output graph: + input(shape=1,2,5,5)----->add------->out1(shape=1,2,5,5) + | ^ | + | | | + |------>relu ------identity(renaming)---->out2(shape=1,2,5,5) + """ + + def test_fusion_with_double_outputs(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + x3 = mb.add(x=x1, y=x2) + y1 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + y2 = mb.transpose(x=x3, perm=[0, 3, 1, 2]) + return y1, y2 + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "relu", "add", "transpose", "transpose"], + ) + self.assertEqual(get_op_types_in_program(prog), ["relu", "add", "identity"]) + + assert block.find_ops(op_type="relu")[0].inputs["x"] == block.inputs["x"] + assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"] + assert ( + block.find_ops(op_type="add")[0].inputs["y"] + == block.find_ops(op_type="relu")[0].outputs[0] + ) + + assert_model_is_valid( + prog, + {"x": (1, 2, 5, 5)}, + expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)}, + ) + + def test_pass_through_broadcasted_binary_op(self): + """ + Input graph: + const (shape=(1,1,1,3)) + | + input (shape=(1,4,3,2)) --> transpose (shape=(1,2,4,3)) --> add --> transpose --> relu + + Output graph: + + const (shape=(1,1,3,1)) + | + input (shape=(1,4,3,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 3, 2))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=np.array(np.ones(shape=(1, 1, 1, 3)))) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["add", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 4, 3, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 3, 2)}, + ) + + def test_binary_op_with_constant_input(self): + """ + Input graph: + const (shape=(4,3)) + | + input (shape=(1,4,3,2)) --> transpose (shape=(1,2,4,3)) --> add --> transpose --> relu + + Output graph: + + const (shape=(1,4,3,1)) + | + input (shape=(1,4,3,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 3, 2))]) + def prog(x): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=np.array(np.ones(shape=(4, 3)))) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["add", "relu"]) + assert_model_is_valid( + prog, + {"x": (1, 4, 3, 2)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 3, 2)}, + ) + + def test_binary_op_with_non_constant_input1(self): + """ + Input graph: + input (shape=(3,)) + | + input (shape=(1,4,3,2)) --> transpose (shape=(1,2,4,3)) --> add --> transpose --> relu + + Output graph: + + input (shape=(3,)) + | + reshape (shape=(1,1,3,1)) + | + input (shape=(1,4,3,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 3, 2)), mb.TensorSpec(shape=(3,))]) + def prog(x, y): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["reshape", "add", "relu"]) + reshape_op = prog.find_ops(op_type="reshape", exactly_one=True)[0] + assert reshape_op.outputs[0].shape == (1, 1, 3, 1) + assert_model_is_valid( + prog, + {"x": (1, 4, 3, 2), "y": (3,)}, + expected_output_shapes={block.outputs[0].name: (1, 4, 3, 2)}, + ) + + def test_binary_op_with_non_constant_input2(self): + """ + Input graph: + input (shape=(3,1,2)) + | + input (shape=(5,3,4,2)) --> transpose (shape=(4,3,5,2)) --> add --> transpose --> relu + + Output graph: + + input (shape=(3,1,2)) + | + reshape (shape=(1,3,1,2)) + | + input (shape=(5,3,4,2)) --> add --> relu + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(5, 3, 4, 2)), mb.TensorSpec(shape=(3, 1, 2))]) + def prog(x, y): + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual(get_op_types_in_program(prog), ["reshape", "add", "relu"]) + reshape_op = prog.find_ops(op_type="reshape", exactly_one=True)[0] + assert reshape_op.outputs[0].shape == (1, 3, 1, 2) + assert_model_is_valid( + prog, + {"x": (5, 3, 4, 2), "y": (3, 1, 2)}, + expected_output_shapes={block.outputs[0].name: (5, 3, 4, 2)}, + ) + + def test_binary_op_with_non_constant_input3(self): + """ + Input graph: + input (shape=(3,1,2)) + | + input (shape=(s,3,4,2)) --> transpose (shape=(4,3,s,2)) --> add --> transpose --> relu + + Output graph: + + input (shape=(3,1,2)) + | + reshape (shape=(1,3,1,2)) + | + input (shape=(s,3,4,2)) --> add --> relu + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(get_new_symbol(), 3, 4, 2)), + mb.TensorSpec(shape=(3, 1, 2)), + ] + ) + def prog(x, y): + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.relu(x=x) + return x + + pass_name = "common::reduce_transposes" + PASS_REGISTRY[pass_name](prog) + self.assertEqual(get_op_types_in_program(prog), ["reshape", "add", "relu"]) + reshape_op = prog.find_ops(op_type="reshape", exactly_one=True)[0] + assert reshape_op.outputs[0].shape == (1, 3, 1, 2) + block = prog.functions["main"] + assert_model_is_valid( + prog, + {"x": (5, 3, 4, 2), "y": (3, 1, 2)}, + expected_output_shapes={block.outputs[0].name: (5, 3, 4, 2)}, + ) + + def test_binary_op_with_non_constant_input4(self): + """ + Input graph: + input (shape=(3,s,2)) + | + input (shape=(1,3,4,2)) --> transpose (shape=(4,3,1,2)) --> add --> transpose --> relu + + Output graph: same as input graph since the non-transpose input of the add op has symbolic shape + """ + + @mb.program( + input_specs=[ + mb.TensorSpec(shape=(1, 3, 4, 2)), + mb.TensorSpec(shape=(3, get_new_symbol(), 2)), + ] + ) + def prog(x, y): + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[2, 1, 0, 3]) + x = mb.relu(x=x) + return x + + pass_name = "common::reduce_transposes" + PASS_REGISTRY[pass_name](prog) + self.assertEqual(get_op_types_in_program(prog), ["transpose", "add", "transpose", "relu"]) + block = prog.functions["main"] + assert_model_is_valid( + prog, + {"x": (1, 3, 4, 2), "y": (3, 10, 2)}, + expected_output_shapes={block.outputs[0].name: (10, 3, 4, 2)}, + ) + + def test_binary_op_with_non_constant_input5(self): + """ + Input graph: + input (shape=(3,4)) + | + input (shape=(5,3,4,2)) --> transpose (shape=(5,2,3,4)) --> add --> transpose --> relu + + Output graph: same as input graph since transpose compliment for 2nd input of add cannot be represented + as a static reshape + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(5, 3, 4, 2)), mb.TensorSpec(shape=(3, 4))]) + def prog(x, y): + x = mb.transpose(x=x, perm=[0, 3, 1, 2]) + x = mb.add(x=x, y=y) + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.relu(x=x) + return x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + self.assertEqual( + get_op_types_in_program(prev_prog), + ["transpose", "add", "transpose", "relu"], + ) + self.assertEqual( + get_op_types_in_program(prog), + ["transpose", "add", "transpose", "relu"], + ) + assert_model_is_valid( + prog, + {"x": (5, 3, 4, 2), "y": (3, 4)}, + expected_output_shapes={block.outputs[0].name: (5, 3, 4, 2)}, + ) + + def test_input_duplicate_output(self): + """ + Input graph: + input -----> out (consist of duplicated input) + + Output graph: + input -----> out (consist of duplicated input) + + Notice that a temp identity sink is added for all outputs, so the block before going through the pass is: + function[CoreML3](%x: (2, 2, 1, 1, fp32)(Tensor)) { + block0() { + %identity_0: (2, 2, 1, 1, fp32)(Tensor) = identity(x=%x, name="identity_0") + } -> (%identity_0, %identity_0) + } + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 2, 1, 1))]) + def prog(x): + return x, x + + prev_prog, prev_block, block = apply_pass_and_basic_check(prog, "common::reduce_transposes") + + self.assertEqual(get_op_types_in_program(prev_prog), []) + self.assertEqual(get_op_types_in_program(prog), []) + assert_model_is_valid( + prog, + {"x": (2, 2, 1, 1)}, + backend=("mlprogram", "fp16"), + expected_output_shapes={ + block.outputs[0].name: (2, 2, 1, 1), + block.outputs[1].name: (2, 2, 1, 1), + }, + ) + + +class TestTransposePassUtilityMethods: + @staticmethod + @pytest.mark.parametrize("rank", [1, 2, 3, 4, 5]) + def test_transpose_compliment_method(rank): + x = np.random.rand(*np.random.randint(low=1, high=15, size=rank)) + perm = np.random.permutation(rank) + reverse_perm = TransformAxisUpdateOps._find_transpose_compliment(perm) + x_transpose = np.transpose(x, perm) + x_transpose_transpose = np.transpose(x_transpose, reverse_perm) + np.testing.assert_equal(x, x_transpose_transpose) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/program.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/program.py new file mode 100644 index 00000000..39e67cf3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/program.py @@ -0,0 +1,274 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +import sympy as _sm + +from coremltools import _logger as logger +from coremltools.converters.mil._deployment_compatibility import \ + AvailableTarget as _target +from coremltools.converters.mil.input_types import InputType +from coremltools.converters.mil.mil.var import ListVar +from coremltools.converters.mil.mil.ops.helper import _get_version_of_op + +from . import types +from .block import Function +from .types.symbolic import k_num_internal_syms, k_used_symbols +from .var import Var + + +class Program: + def __init__(self): + self.main_input_types = [] + self.main_output_types = None + self.functions = {} + self.parameters = {} + self.skip_all_passes = False + + def _get_max_opset_version_and_op(self): + max_opset_version = _target.iOS13 + op_with_max_opset_version = None + def update_max_opset_version_block(block): + nonlocal max_opset_version + nonlocal op_with_max_opset_version + for op in list(block.operations): + for b in op.blocks: + update_max_opset_version_block(b) + if not hasattr(op, "_op_variants") or not isinstance(op._op_variants, dict): + continue + if op.opset_version > max_opset_version: + max_opset_version = op.opset_version + op_with_max_opset_version = op + for func in self.functions.values(): + update_max_opset_version_block(func) + return max_opset_version, op_with_max_opset_version + + def _check_ops_version_compatibility(self, max_opset_version): + def check_version_compatibility_block(block): + for op in list(block.operations): + for b in op.blocks: + check_version_compatibility_block(b) + if not hasattr(op, "_op_variants") or not isinstance(op._op_variants, dict): + continue + expected_op_cls = _get_version_of_op(op._op_variants, max_opset_version) + if type(op) is not expected_op_cls: + msg = ( + "Op {} with an out of date version {!s} is detected. Please use @mb.program(input_specs=..., " + "opset_version={!s})" + ).format(op.op_type, op.opset_version, max_opset_version) + raise ValueError(msg) + for func in self.functions.values(): + check_version_compatibility_block(func) + + def _check_or_set_functions_opset_version(self, max_opset_version): + funcs = list(self.functions.values()) + for func in funcs: + if func.opset_version is None: + func.opset_version = max_opset_version + else: + if func.opset_version < max_opset_version: + msg = "function should have at least opset_version {!s}. Got {!s}".format(max_opset_version, func.opset_version) + raise ValueError(msg) + for func in funcs: + if func.opset_version != funcs[0].opset_version: + msg = "all functions must have the same opset_version. Got {!s} and {!s}.".format(func.opset_version, funcs[0].opset_version) + raise ValueError(msg) + + def _check_program_opset_version(self): + max_opset_version, _ = self._get_max_opset_version_and_op() + self._check_ops_version_compatibility(max_opset_version) + self._check_or_set_functions_opset_version(max_opset_version) + + def _check_invalid_tensor_rank(self): + ''' + Early error out for tensor with rank >= 6 + ''' + def _check_invalid_tensor_rank_block(block): + for op in block.operations: + for b in op.blocks: + _check_invalid_tensor_rank_block(b) + for o in op.outputs: + if not isinstance(o, ListVar) and (o.rank < 0 or o.rank >= 6): + raise ValueError( + f'Core ML only supports tensors with rank <= 5. Layer "{op.name}", ' + f'with type "{op.op_type}", outputs a rank {o.rank} tensor. ' + ) + for f in self.functions.values(): + _check_invalid_tensor_rank_block(f) + + def add_function(self, name, ssa_func): + if not isinstance(ssa_func, Function): + raise ValueError("Only Function can be added to Program.") + self.functions[name] = ssa_func + self._check_program_opset_version() + + def add_parameters(self, name, ssa_val): + raise NotImplementedError() + + def set_main_input_types(self, inputs): + if not isinstance(inputs, tuple): + raise ValueError("main inputs should be tuple of TensorType or ImageType") + elif not all([isinstance(inp, InputType) for inp in inputs]): + raise ValueError("main inputs should be tuple of InputSpec") + self.main_input_types = inputs + + def set_main_output_types(self, outputs=None): + if outputs is not None: + if not (isinstance(outputs, list) and all([isinstance(out, InputType) for out in outputs])): + raise TypeError("main outputs should be a list of type ct.TensorType or ct.ImageType") + self.main_output_types = outputs + + + def find_ops(self, prefix=None, op_type=None, exactly_one=False): + """ + Return list of ops with name matching `prefix` if specified, and + op_type, if specified. At least one of {prefix, op_type} must be + specified. + + If `exactly_one` == True, raise ValueError if we find <1 or >1 ops satisfying + the criteria. + + prefix: str + + Return list[Operation]. Empty list if no op satisfies. + """ + found_ops = [] + for f_name, f in self.functions.items(): + found_ops.extend(f.find_ops(prefix=prefix, op_type=op_type)) + if exactly_one and len(found_ops) != 1: + msg = "Found matching ops not exactly one. Found ops: {}" + raise ValueError(msg.format(found_ops)) + return found_ops + + def validate(self): + for f in self.functions.values(): + f.validate() + + def __getitem__(self, func_name): + if func_name not in self.functions: + msg = "Function {} not found in among functions {}." + raise KeyError(msg.format(func_name, self.functions.keys())) + return self.functions[func_name] + + def __repr__(self): + return self.__str__() + + def __str__(self): + s = "" + for f_name, f in self.functions.items(): + s += f.to_str(f_name) + return s + + +class Placeholder: + counter = 0 + + def __init__(self, sym_shape, dtype=None, name=None, allow_rank0_input=False): + """ + sym_shape: () or [] for scalar. list, tuple, np.ndarray for tensor. May + contain Symbol as symbolic shape (but not string). + + dtype: types.float or other scalar builtin types. + allow_rank0_input: A flag that allows the rank 0 placeholder. + """ + if not isinstance(sym_shape, (list, tuple, _np.ndarray)): + raise ValueError("Illegal shape for Placeholder: {}".format(sym_shape)) + + if len(sym_shape) == 0: + if not allow_rank0_input: + raise ValueError('Rank-0 (input {}) is unsupported'.format(name)) + else: + logger.warning('Rank-0 (input {}) is unsupported in coreml. You might run into error while\ + running this model'.format(name)) + + for i, d in enumerate(sym_shape): + if not isinstance(d, (_np.generic, int, Symbol)): + msg = 'Placeholder dim {} in {} is not integer or symbol' + raise ValueError(msg.format(i, sym_shape)) + self.sym_shape = sym_shape + self.dtype = dtype + if self.dtype is None: + self.dtype = types.float + sym_type = self.type_inference() + + # Globally unique var name for placeholders + if name is None: + name = 'placeholder_' + str(self.__class__.counter) + self.__class__.counter += 1 + + # List of output vars (consistent w/ other ops) + self.outputs = [Var(name, sym_type)] + + def set_name(self, name): + self.name = name + self.outputs[0].name = name + + def type_inference(self): + if len(self.sym_shape) == 0: + return self.dtype + return types.tensor(self.dtype, self.sym_shape) + + def __str__(self): + return str(self.outputs[0]) + + +def get_new_variadic_symbol(): + global k_num_internal_syms + s = Symbol("*is" + str(k_num_internal_syms)) + k_num_internal_syms += 1 + return s + + +def get_new_symbol(name=None): + """ + Returns a new symbol, optionally named. + + name: str (optional) + Optional name that provides more readability. If the name specified is + not available, an extra integer will be appended. + """ + global k_used_symbols + global k_num_internal_syms + + if name is not None: + s = Symbol(name) + if s in k_used_symbols: + new_name = name + k_num_internal_syms + msg = 'Symbol name "{}" already occupied. Renaming to {}' + logger.warning(msg.format(name, new_name)) + s = Symbol(new_name) + else: + s = Symbol("is" + str(k_num_internal_syms)) + k_num_internal_syms += 1 + return s + +def get_existing_symbol(name): + global k_used_symbols + if name not in k_used_symbols: + msg = 'Symbol name {} does not exist' + raise ValueError(msg.format(name)) + return k_used_symbols[name] + + +class Symbol(_sm.Symbol): + def __init__(self, sym_name): + """ + Essentially sympy.Symbol representing an i32 value in shape. + + sym_name: str. If first character is *, then this symbol represents + variadic rank. Otherwise the symbol name should start with a alpha + character. `sym_name` must be unique if specified, or it'd be auto + generated (to a non-variadic symbol). Furthermore, sym_name may not + start with 'is' (internal symbol) + """ + if not (sym_name[0].isalpha() or sym_name[0] == "*"): + msg = "Symbol name must start with a letter or *. Got {}" + raise ValueError(msg.format(sym_name)) + global k_used_symbols + if sym_name in k_used_symbols: + msg = "Symbol `{}` is used already." + raise ValueError(msg.format(sym_name)) + k_used_symbols[sym_name] = self + self.name = sym_name diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/__init__.py new file mode 100644 index 00000000..af4a4e02 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_block.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_block.py new file mode 100644 index 00000000..a4ccfe27 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_block.py @@ -0,0 +1,495 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy + +import numpy as np +import pytest + +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil.passes.tests.test_passes import TestSkipConstexprOps +from coremltools.converters.mil.testing_utils import ( + assert_same_output_names, + assert_same_output_shapes, + get_op_types_in_program, +) + +""" +Test manipulating variable and operations in the Block. + +In the test, we are actually testing Function, which is a child class of +Block. Technically Function should not inherit from Block, which is a +debt to be resolved in the future. + +Function has some different behaviors from Block that are irrelevant to +the core API being tested here. +""" + + +def test_empty_block(): + """ + Test an empty program + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + return x0 + + block = prog.functions["main"] + assert len(block.operations) == 0 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_add_op(): + """ + Test add statement to an empty program, also change the output + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + return x0 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + x0 = block.inputs["x0"] + with block: + x1 = mb.log(x=x0) + block.set_outputs([x1]) + print("after:\n{}".format(prog)) + assert block.inputs["x0"] == block.find_ops(op_type="log")[0].inputs["x"] + assert len(block.operations) == 2 # const op for epsilon + log + assert block.operations[1].op_type == "log" + assert block.outputs[0] == x1 + + +def test_remove_op(): + """ + Test remove all ops and return empty program + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.log(x=x0) + return x1 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.operations) == 2 + x0 = block.inputs["x0"] + ops = block.find_ops(op_type="log") + block.set_outputs([x0]) + block.remove_ops(ops) + print("after:\n{}".format(prog)) + assert len(block.operations) == 1 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_remove_op2(): + """ + Test remove ops with multiple identical inputs + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + return x1 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + x0 = block.inputs["x0"] + ops = block.find_ops(op_type="add") + block.set_outputs([x0]) + block.remove_ops(ops) + print("after:\n{}".format(prog)) + assert len(block.operations) == 0 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_remove_duplicate_ops(): + """Test remove duplicated ops.""" + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + return x1 + + block = prog.functions["main"] + x0 = block.inputs["x0"] + ops = block.find_ops(op_type="add") + duplicate_ops = ops + ops + block.set_outputs([x0]) + block.remove_ops(duplicate_ops) + assert len(block.operations) == 0 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + assert block.inputs["x0"] == block.outputs[0] + + +def test_remove_duplicate_ops_not_affect_others(): + """ + Test remove duplicated ops doesn't affect other ops. We add another `add` op here, but keep + the input to remove_ops only restricted to the first `add` op. This test is for checking that + the second add op doesn't get removed. + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + x2 = mb.add(x=x0, y=x0) + return x1, x2 + + block = prog.functions["main"] + x0 = block.inputs["x0"] + ops = [block.find_ops(op_type="add")[0]] + block.set_outputs([x0]) + block.remove_ops(ops) + # Deleting one add operation should not affect the other one. + assert len(block.operations) == 1 + assert len(block.inputs) == 1 + assert len(block.outputs) == 1 + + +def test_remove_ops_fail_for_block_output(): + """Block's output cannot be removed.""" + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.add(x=x0, y=x0) + x2 = mb.add(x=x0, y=x0) + return x1, x2 + + block = prog.functions["main"] + ops = block.find_ops(op_type="add") + expected_err_str = "cannot delete op add_.* with output 0: add_.* that's block block.*'s output" + with pytest.raises(ValueError, match=expected_err_str): + block.remove_ops(ops) + assert len(block.operations) == 2 + assert len(block.inputs) == 1 + assert len(block.outputs) == 2 + + +def test_op_removal_and_insertion(): + """ + Remove a transpose pair and materialize one transpose before another op + Given: + %x1 = transpose(%x) + %x2 = relu(%x1) + %out1 = avg_pool(%x2) + %x3 = transpose(%x2) + %out2 = log(%x3) + + After removing both transposes: + %x2 = relu(%x) + %out1 = avg_pool(%x2) + %out2 = log(%x2) + + After inserting a transpose: + %x2 = relu(%x) + %x4 = transpose(%x2) + %out1 = avg_pool(%x4) + %out2 = log(%x2) + + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 6))]) + def prog(x): + x1 = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x2 = mb.relu(x=x1) + out1 = mb.avg_pool(x=x2, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid") + x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) + out2 = mb.log(x=x3) + return out1, out2 + + prev_prog = copy.deepcopy(prog) + + print("before:\n{}".format(prog)) + assert get_op_types_in_program(prog) == [ + "transpose", + "relu", + "avg_pool", + "transpose", + "log", + ] + block = prog.functions["main"] + + def remove_transpose(block): + op = block.find_ops(op_type="transpose")[0] + block.replace_uses_of_var_after_op( + anchor_op=op.inputs["x"].op, + old_var=op.outputs[0], + new_var=op.inputs["x"], + no_check_var_types=True, + ) + block.remove_ops([op]) + + with block: + # remove 1st transpose + remove_transpose(block) + assert get_op_types_in_program(prog) == ["relu", "avg_pool", "transpose", "log"] + + # remove 2nd transpose + remove_transpose(block) + assert get_op_types_in_program(prog) == ["relu", "avg_pool", "log"] + + print("after transpose ops removal:\n{}".format(prog)) + + # insert transpose before pool + pool_op = block.find_ops(op_type="avg_pool")[0] + with block: + y = mb.transpose(x=pool_op.inputs["x"], perm=[0, 2, 3, 1], before_op=pool_op) + + block.replace_uses_of_var_after_op( + anchor_op=y.op, + end_op=pool_op, + old_var=pool_op.inputs["x"], + new_var=y, + no_check_var_types=True, + ) + + print("after transpose insertion:\n{}".format(prog)) + assert get_op_types_in_program(prog) == ["relu", "transpose", "avg_pool", "log"] + + for op in block.operations: + op.type_value_inference(overwrite_output=True) + + assert_same_output_names(prev_prog, prog) + assert_same_output_shapes(prev_prog, prog) + + +def test_replace_nonreplaceable_vars(): + """ + The conversion should error out if an invalid replacement is invoked with nonreplaceable vars + """ + constexpr_op = "constexpr_sparse_to_dense" + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 2))]) + def prog(x): + constexpr = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + return mb.add(x=x, y=constexpr) + + block = prog.functions["main"] + constexpr_op = block.find_ops(op_type=constexpr_op)[0] + + with block: + const = mb.const(val=np.random.rand(4, 2), before_op=constexpr_op) + expected_err_str = "might potentially be removed during the replacement of those vars." + with pytest.raises(ValueError, match=expected_err_str): + block.replace_uses_of_var_after_op( + anchor_op=constexpr_op, + old_var=constexpr_op.outputs[0], + new_var=const + ) + + +def test_replace_nonreplaceable_vars_force(): + """ + The conversion should not error out if the replace_uses_of_vars_after_op is executed with + force_replace=True Also we test that, the new nonreplaceable_vars_upstream is propagated + after the code exist `with block`. + """ + constexpr_op = "constexpr_sparse_to_dense" + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 2))]) + def prog(x): + constexpr = TestSkipConstexprOps.CONSTEXPR_FUNCS[constexpr_op]((4, 2)) + return mb.add(x=x, y=constexpr) + + block = prog.functions["main"] + constexpr_op = block.find_ops(op_type=constexpr_op)[0] + add_op = block.find_ops(op_type="add")[0] + + assert len(add_op.outputs[0].nonreplaceable_vars_upstream) == 1 + + with block: + const = mb.const(val=np.random.rand(4, 2), before_op=constexpr_op) + block.replace_uses_of_var_after_op( + anchor_op=constexpr_op, + old_var=constexpr_op.outputs[0], + new_var=const, + force_replace=True, + ) + block.remove_ops([constexpr_op]) + + assert len(add_op.outputs[0].nonreplaceable_vars_upstream) == 0 + + +def test_simple_substituion(): + """ + Replace log(x+y) with log(x*y) + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + x1 = mb.add(x=x0, y=y0) + z = mb.log(x=x1) + return z + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.find_ops(op_type="log")) == 1 + assert len(block.find_ops(op_type="add")) == 1 + assert len(block.find_ops(op_type="mul")) == 0 + + add = block.find_ops(op_type="add")[0] + + x0 = add.inputs["x"] + y0 = add.inputs["y"] + x1 = add.outputs[0] + + with block: + # It's important to add 'mul' before 'add' (its even better to do it + # immediately after 'add' but we don't have the API) + # because we need to replace any op affected by add with 'mul' + x2 = mb.mul(x=x0, y=y0, before_op=add) + + assert len(block.find_ops(op_type="mul")) == 1 + assert len(block.find_ops(op_type="add")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + + # It's important to set anchor_op = 'mul' because new_var is only visible + # after 'mul'. + block.replace_uses_of_var_after_op(anchor_op=x2.op, old_var=x1, new_var=x2) + block.remove_ops([add]) + + print("after:\n{}".format(prog)) + assert len(block.find_ops(op_type="add")) == 0 + assert len(block.find_ops(op_type="mul")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + + +def test_substitute_nested_op(): + """" + Replace an conditional op with nested block + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + pred = mb.less(x=x0, y=y0) + z = mb.cond( + pred=pred, _true_fn=lambda: mb.abs(x=x0), _false_fn=lambda: mb.abs(x=y0) + ) + z1 = mb.log(x=z) + return z1 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.find_ops(op_type="less")) == 1 + assert len(block.find_ops(op_type="abs")) == 2 + assert len(block.find_ops(op_type="cond")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + + cond = block.find_ops(op_type="cond")[0] + x0 = block.inputs["x0"] + z = cond.outputs[0] + block.replace_uses_of_var_after_op(anchor_op=None, old_var=z, new_var=x0) + + # removing cond will also remove the abs ops within its block + block.remove_ops([cond]) + + print("after:\n{}".format(prog)) + assert len(block.find_ops(op_type="less")) == 1 + assert len(block.find_ops(op_type="log")) == 1 + assert len(block.find_ops(op_type="cond")) == 0 + assert len(block.find_ops(op_type="abs")) == 0 + + +def test_simple_transpose_squash(): + """ + Test eliminate consecutive transpose can be canceled + """ + + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))]) + def prog(x0): + x1 = mb.transpose(x=x0, perm=[1, 0]) + x2 = mb.transpose(x=x1, perm=[1, 0]) + x3 = mb.log(x=x2) + x4 = mb.transpose(x=x3, perm=[1, 0]) + x5 = mb.transpose(x=x4, perm=[1, 0]) + x6 = mb.transpose(x=x5, perm=[1, 0]) + x7 = mb.transpose(x=x6, perm=[1, 0]) + return x7 + + print("before:\n{}".format(prog)) + block = prog.functions["main"] + assert len(block.find_ops(op_type="transpose")) == 6 + + def can_squash(trans1, trans2): + return ( + len(trans1.outputs) == 1 + and len(trans2.outputs) == 1 + and all(trans1.perm.val == trans2.perm.val) + ) + + # Find all candidate pairs transposes + # we ignore all const (transpose_perm_%x), and add pairs of transpose op as + # candidate. This won't generalize to more complicated program with other + # shape invariant ops in between. + candidates = [] + non_const_ops = [op for op in block.operations if op.op_type != "const"] + for i in range(len(non_const_ops) - 1): + op = non_const_ops[i] + if len(candidates) and op == candidates[-1][1]: + # op is already a squash candidate + continue + next_op = non_const_ops[i + 1] + if ( + op.op_type == "transpose" + and next_op.op_type == "transpose" + and can_squash(op, next_op) + ): + candidates.append((op, next_op)) + + # Remove each candidate pairs + for (trans1, trans2) in candidates: + before = trans1.inputs["x"] + after = trans2.outputs[0] + block.replace_uses_of_var_after_op( + anchor_op=trans2, old_var=after, new_var=before + ) + block.remove_ops([trans1, trans2]) + + print("after:\n{}".format(prog)) + assert len(block.find_ops(op_type="transpose")) == 0 + + +def test_duplicate_outputs_add_consuming_block_once(): + """The same consuming block should only be added once.""" + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + x1 = mb.add(x=x0, y=y0) + return x1, x1, x1 + + block = prog.functions["main"] + assert len(block.outputs[0].consuming_blocks) == 1 + assert len(block.outputs[1].consuming_blocks) == 1 + assert len(block.outputs[2].consuming_blocks) == 1 + + +def test_duplicate_outputs_substituion(): + """Replaces var that appears more than once in outputs.""" + @mb.program(input_specs=[mb.TensorSpec(shape=(2, 4)), mb.TensorSpec(shape=(2, 4))]) + def prog(x0, y0): + x1 = mb.add(x=x0, y=y0) + z = mb.log(x=x1) + return x1, x1, z + + block = prog.functions["main"] + add = block.find_ops(op_type="add")[0] + x0 = add.inputs["x"] + y0 = add.inputs["y"] + x1 = add.outputs[0] + + with block: + x2 = mb.mul(x=x0, y=y0, before_op=add, name="new_output") + + block.replace_uses_of_var_after_op(anchor_op=x2.op, old_var=x1, new_var=x2) + block.remove_ops([add]) + assert block.outputs[0].op.name == "new_output" + assert block.outputs[1].op.name == "new_output" + assert len(block.outputs[0].consuming_blocks) == 1 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_debug.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_debug.py new file mode 100644 index 00000000..b6601ccf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_debug.py @@ -0,0 +1,302 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import os +import tempfile + +import pytest +import numpy as np + +import coremltools as ct +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.debugging_utils import extract_submodel +from coremltools.converters.mil.mil import get_new_symbol +from coremltools.converters.mil.mil.types.symbolic import is_symbolic +from coremltools.converters.mil.testing_utils import get_op_types_in_program + +def get_simple_program(): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4)),]) + def prog(x): + x = mb.add(x=x, y=1.2, name="add") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + x = mb.square(x=x, name="output_0") + x = mb.tanh(x=x, name="output_1") + x = mb.transpose(x=x, perm=[0, 2, 3, 1]) + return x + + return prog + +def compute_ground_truth_answer(input): + x = input + 1.2 + x = np.transpose(x, axes=[0, 2, 3, 1]) + square = x * x + tanh = np.tanh(square) + return {"output_0": square, "output_1":tanh} + +class TestExtractSubModel: + + def test_extract_submodel_error_handling(self): + prog = get_simple_program() + mlmodel = ct.convert(prog, convert_to="neuralnetwork") + + invalid_outputs = set() + with pytest.raises(ValueError, match="outputs must be of type list/tuple. Got "): + extract_submodel(mlmodel, outputs=invalid_outputs) + + invalid_outputs = ["output_1", 1] + with pytest.raises(ValueError, match="outputs must be a list of str. Got element 1 with type ."): + extract_submodel(mlmodel, outputs=invalid_outputs) + + invalid_outputs = ["output_1", "output_1"] + with pytest.raises(ValueError, match="outputs must be a list of unique elements. 'output_1' occurs 2 times"): + extract_submodel(mlmodel, outputs=invalid_outputs) + + invalid_outputs = ["error"] + with pytest.raises(ValueError, match="outputs \['error'\] not found in the function."): + extract_submodel(mlmodel, outputs=invalid_outputs) + + model_dir = tempfile.TemporaryDirectory() + mlmodel_path = os.path.join(model_dir.name, "model.mlmodel") + mlmodel.save(mlmodel_path) + mlmodel = ct.models.MLModel(mlmodel_path) + with pytest.raises(ValueError, match="NeuralNetwork model loaded from the disk is not supported by the extract_submodel util"): + extract_submodel(mlmodel, outputs=["output_0", "output_1"]) + + def test_extract_submodel_symbolic_input(self): + """ + Input graph: + x -> sin ---> sub -> output_1 + | + v + mul -> tan -> output_2 + + If x has symbolic shape, then the subgraph mil -> tan should also have symbolic shape + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, get_new_symbol()))]) + def prog(x): + sin = mb.sin(x=x, name="sin") + sub = mb.sub(x=sin, y=1.5, name="sub") + mul = mb.mul(x=sin, y=1.2, name="mul") + tan = mb.tan(x=mul, name="tan") + return sub, tan + model = ct.convert(prog, convert_to="neuralnetwork") + submodel = extract_submodel(model, outputs=["tan"], inputs=["mul"]) + func = submodel._mil_program.functions["main"] + + input = list(func.inputs.values())[0] + assert input.shape[0] == 1 + assert is_symbolic(input.shape[1]) + + output = func.outputs[0] + assert output.shape[0] == 1 + assert is_symbolic(output.shape[1]) + + def test_extract_submodel_complex(self): + """ + Input graph: + x -> sin ------> sub -> output_1 + | | + v v + y -> add -> mul -> tan -> realdiv -> output_2 + """ + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2))]) + def prog(x, y): + sin = mb.sin(x=x, name="sin") + add = mb.add(x=sin, y=y, name="add") + sub = mb.sub(x=sin, y=1.5, name="sub") + mul = mb.mul(x=sin, y=add, name="mul") + tan = mb.tan(x=mul, name="tan") + realdiv = mb.real_div(x=tan, y=4.7, name="realdiv") + return sub, realdiv + model = ct.convert(prog, convert_to="neuralnetwork") + + """ + Case 1: + inputs = None + outputs = [sin, mul] + + Output graph: + x -> sin ------> output_1 + | | + v v + y -> add -> mul -> output_2 + """ + submodel = extract_submodel(model, outputs=["sin", "mul"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add", "mul"] + + """ + Case 2: + inputs = None + outputs = [sin, add] + + Output graph: + x -> sin -> output_1 + | + v + y -> add -> output_2 + """ + submodel = extract_submodel(model, outputs=["sin", "add"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add"] + + """ + Case 3: + inputs = None + outputs = [mul] + + Output graph: + x -> sin ----- + | | + v v + y -> add -> mul -> output_1 + """ + submodel = extract_submodel(model, outputs=["mul"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add", "mul"] + + """ + Case 4: + inputs = None + outputs = [sin, sub] + + Output graph: + x -> sin -> sub -> output_2 + | + V + output_1 + y + """ + submodel = extract_submodel(model, outputs=["sin", "sub"]) + print(submodel._mil_program) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "sub"] + + """ + Case 5: + inputs = [x, y] + outputs = [mul] + + Output graph: + x -> sin ----- + | | + v v + y -> add -> mul -> output_1 + """ + submodel = extract_submodel(model, outputs=["mul"], inputs=["x", "y"]) + assert get_op_types_in_program(submodel._mil_program) == ["sin", "add", "mul"] + + """ + Case 6: + inputs = [mul] + outputs = [tan] + + mul -> tan -> output_1 + """ + submodel = extract_submodel(model, outputs=["tan"], inputs=["mul"]) + assert get_op_types_in_program(submodel._mil_program) == ["tan"] + + """ + Case 7: + inputs = [sin, add] + outputs = [sub, mul] + + sin ------> sub -> output_1 + | + v + add -> mul -> output_2 + """ + submodel = extract_submodel(model, outputs=["sub", "mul"], inputs=["sin", "add"]) + assert get_op_types_in_program(submodel._mil_program) == ["sub", "mul"] + + """ + Case 8 (Negative): + inputs = [sin] + outputs = [mul] + + mul not reachable merely through sin + """ + with pytest.raises(ValueError, match="output mul not reachable from inputs"): + submodel = extract_submodel(model, outputs=["mul"], inputs=["sin"]) + + """ + Case 9 (Negative): + inputs = [mul] + outputs = [sin] + + sin not reachable merely through sin + """ + with pytest.raises(ValueError, match="output sin not reachable from inputs"): + submodel = extract_submodel(model, outputs=["sin"], inputs=["mul"]) + + @pytest.mark.parametrize( + "compute_unit", + [ + ct.ComputeUnit.ALL, + ct.ComputeUnit.CPU_ONLY, + ], + ) + def test_extract_submodel_neuralnetwork(self, compute_unit): + prog = get_simple_program() + model = ct.convert(prog, convert_to="neuralnetwork", compute_units=compute_unit) + submodel = extract_submodel(model, outputs=["output_0", "output_1"]) + + # check that the submodel retains the same backend + assert submodel.get_spec().WhichOneof("Type") == "neuralNetwork" + + # check that the submodel retains the same compute unit + assert submodel.compute_unit == compute_unit + + # check the subgraph + assert get_op_types_in_program(submodel._mil_program) == ["add", "transpose", "square", "tanh"] + + # check the numerical outputs + coreml_in = np.random.rand(1, 2, 3, 4) + coreml_out = submodel.predict({"x": coreml_in}) + gt = compute_ground_truth_answer(coreml_in) + assert len(coreml_out) == len(gt) + for k, v in gt.items(): + np.testing.assert_allclose(v, coreml_out[k], atol=0.2) + + @pytest.mark.parametrize( + "compute_unit, store_to_disk", + itertools.product( + [ + ct.ComputeUnit.ALL, + ct.ComputeUnit.CPU_ONLY, + ], + [True, False], + ) + ) + def test_extract_submodel_mlprogram(self, compute_unit, store_to_disk): + prog = get_simple_program() + model = ct.convert( + prog, + convert_to="mlprogram", + compute_units=compute_unit, + compute_precision=ct.precision.FLOAT32 + ) + + if store_to_disk: + model_dir = tempfile.TemporaryDirectory() + mlmodel_path = os.path.join(model_dir.name, "model.mlpackage") + model.save(mlmodel_path) + model = ct.models.MLModel(mlmodel_path, compute_units=compute_unit) + + submodel = extract_submodel(model, outputs=["output_0", "output_1"]) + + # check that the submodel retains the same backend + assert submodel.get_spec().WhichOneof("Type") == "mlProgram" + + # check that the submodel retains the same compute unit + assert submodel.compute_unit == compute_unit + + # check the subgraph + assert get_op_types_in_program(submodel._mil_program) == ["add", "transpose", "square", "tanh"] + + # check the numerical outputs + coreml_in = np.random.rand(1, 2, 3, 4) + coreml_out = submodel.predict({"x": coreml_in}) + gt = compute_ground_truth_answer(coreml_in) + assert len(coreml_out) == len(gt) + for k, v in gt.items(): + np.testing.assert_allclose(v, coreml_out[k], atol=0.2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_programs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_programs.py new file mode 100644 index 00000000..1a7e8e1f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_programs.py @@ -0,0 +1,347 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest + +import coremltools as ct +from coremltools import _logger as logger +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import types + +np.random.seed(0) + +def test_single_layer_example(): + batch_size, input_dim, output_dim = 2, 4, 2 + + @mb.program( + input_specs=[mb.TensorSpec(shape=(batch_size, input_dim)),] + ) + def prog(x): + # Weight + W_val = ( + np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]) + .reshape(input_dim, output_dim) + .T.astype(np.float32) + ) + W = mb.const(val=W_val, name="const_W") + + # bias + b_val = np.array([-0.5, 0.5]).astype(np.float32) + b = mb.const(val=b_val, name="const_b") + + return mb.linear(x=x, weight=W, bias=b, name="lin") + + logger.info("prog:\n" + str(prog)) + + mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + + feed_dict = { + "x": np.random.rand(batch_size, input_dim).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 1 + + +def test_conv_example(): + batch, C_in, C_out, H, W = 2, 2, 3, 7, 10 + kH, kW = 3, 5 + img_shape, seq_shape = (batch, C_in, H, W), (batch, C_in, H) + + @mb.program( + input_specs=[mb.TensorSpec(shape=img_shape), mb.TensorSpec(shape=seq_shape),] + ) + def prog(img, seq): + ## 2D convolution + # Weight + W_2d = np.random.rand(C_out, C_in, kH, kW).astype(np.float32) + W_2d = mb.const(val=W_2d, name="const_W") + + # Test 1: provide only required arguments. + conv1 = mb.conv(x=img, weight=W_2d, pad_type="valid") + logger.info("conv1 shape: {}".format(conv1.shape)) + + # Test 2: stride > 1 + conv2 = mb.conv(x=img, weight=W_2d, pad_type="valid", strides=[2, 3]) + logger.info("conv2 shape: {}".format(conv2.shape)) + + # Test 3: same padding + conv3 = mb.conv(x=img, weight=W_2d, pad_type="same", strides=[2, 3]) + logger.info("conv3 shape: {}".format(conv3.shape)) + + # Test max_pool + pool1 = mb.max_pool( + x=img, kernel_sizes=[kH, kW], pad_type="valid", strides=[2, 3] + ) + logger.info("pool1 shape: {}".format(pool1.shape)) + + # Test max_pool + pool2 = mb.max_pool( + x=img, kernel_sizes=[kH, kW], pad_type="same", strides=[2, 3] + ) + logger.info("pool2 shape: {}".format(pool2.shape)) + + ## 1D convolution + W_1d = np.random.rand(C_out, C_in, kH).astype(np.float32) + W_1d = mb.const(val=W_1d, name="const_W_1d") + logger.info("W_1d val: {}".format(W_1d.val)) + + # Test 4: provide only required arguments for 1D. + conv4 = mb.conv(x=seq, weight=W_1d, pad_type="valid") + + logger.info("conv4 shape: {}".format(conv4.shape)) + + return conv1, conv2, conv3, pool1, pool2, conv4 + + # rdar://105988903 ([Infra] re-enable the test_conv_example unit test on M1 with compute_units=ALL) + mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork", compute_units=ct.ComputeUnit.CPU_ONLY) + + feed_dict = { + "img": np.random.rand(*img_shape).astype(np.float32), + "seq": np.random.rand(*seq_shape).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 6 + + +def test_while_example(): + def body(a, b): + return mb.add(x=a, y=b), b + + def cond(a, b): + a_mean = mb.reduce_mean(x=a, axes=[0, 1]) + b_mean = mb.reduce_mean(x=b, axes=[0, 1]) + return mb.less(x=a_mean, y=b_mean) + + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)),] + ) + def prog(a, b): + return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) + + logger.info("prog:\n" + str(prog)) + + mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") + + feed_dict = { + "a": np.random.rand(1, 2).astype(np.float32), + "b": np.random.rand(1, 2).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 2 + +def test_reserved_node_names(): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + return mb.square(x=x, name="tensor") + + mlmodel = ct.convert(prog, source="milinternal", convert_to="mlprogram") + + feed_dict = { + "x": np.random.rand(10, 20).astype(np.float32), + } + assert mlmodel is not None + + if ct.utils._is_macos(): + prediction = mlmodel.predict(feed_dict) + assert len(prediction) == 1 + +def get_simple_topk_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + return prog + +def get_simple_pixel_unshuffle_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + return x + return prog + +def get_simple_topk_pixel_unshuffle_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + x = mb.topk(x=x, k=1, axis=-1, ascending=True) + return x + return prog + +def get_simple_nested_block_program(opset_version=None): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=opset_version) + def prog(x): + def true_fn(): + topk, _ = mb.topk(x=x, k=1, axis=-1, ascending=True) + return mb.add(x=topk, y=1.) + + def false_fn(): + topk, _ = mb.topk(x=x, k=1, axis=-1, ascending=True) + return mb.add(x=topk, y=2.) + + shape = mb.shape(x=x) + rank = mb.shape(x=shape) + pred = mb.squeeze(x=rank) + return mb.cond(pred=mb.cast(x=pred, dtype="bool"), _true_fn=true_fn, _false_fn=false_fn) + return prog + +class TestMLProgramVersionHandling: + + @staticmethod + def test_multi_versions_op_selection(): + ''' + Builder should pick up the right version of op based on opset_version + ''' + # pick up the oldest version (iOS13) topk by default + prog = get_simple_topk_program() + main_func = prog.functions["main"] + topk_op = main_func.find_ops(op_type="topk")[0] + assert topk_op.opset_version == ct.target.iOS13 + + # pick up iOS13 version topk + prog = get_simple_topk_program(opset_version=ct.target.iOS15) + main_func = prog.functions["main"] + topk_op = main_func.find_ops(op_type="topk")[0] + assert topk_op.opset_version == ct.target.iOS13 + + # pick up iOS16 version topk + prog = get_simple_topk_program(opset_version=ct.target.iOS16) + main_func = prog.functions["main"] + topk_op = main_func.find_ops(op_type="topk")[0] + assert topk_op.opset_version == ct.target.iOS16 + + @staticmethod + def test_pymil_front_end_conversion(): + prog = get_simple_topk_pixel_unshuffle_program(opset_version=ct.target.iOS16) + mlmodel = ct.convert(prog, minimum_deployment_target=ct.target.iOS16) + + @staticmethod + def test_nested_block_opset_version_selection(): + # pick up the oldest version (iOS13) topk by default + prog = get_simple_nested_block_program() + main_func = prog.functions["main"] + topk_ops = main_func.find_ops(op_type="topk") + assert all([topk.opset_version == ct.target.iOS13 for topk in topk_ops]) + + # pick up iOS16 version topk + prog = get_simple_nested_block_program(opset_version=ct.target.iOS16) + main_func = prog.functions["main"] + topk_ops = main_func.find_ops(op_type="topk") + assert all([topk.opset_version == ct.target.iOS16 for topk in topk_ops]) + + @staticmethod + def test_pymil_opset_version_inference(): + ''' + The program consist of pixel_unshuffle should be inferred as an iOS16 version program + ''' + prog = get_simple_pixel_unshuffle_program() + assert prog.functions["main"].opset_version == ct.target.iOS16 + + expected_err_str = ( + "Please update the minimum_deployment_target to coremltools.target.iOS16, " + "since op pixel_unshuffle is only available in opset coremltools.target.iOS16 or newer." + ) + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, convert_to="mlprogram") + + @staticmethod + def test_pymil_front_end_conversion_early_error_out(): + prog = get_simple_topk_pixel_unshuffle_program(opset_version=ct.target.iOS16) + expected_err_str = ( + "Please update the minimum_deployment_target to coremltools.target.iOS16, " + "since op pixel_unshuffle is only available in opset coremltools.target.iOS16 or newer." + ) + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, minimum_deployment_target=ct.target.iOS15) + + @staticmethod + def test_unsupported_op_early_error_out(): + ''' + We should error out at the point when Builder tries to add an op which is only supported in a newer spec version + ''' + expected_err_str = ( + "No available version for pixel_unshuffle in the coremltools.target.iOS15 opset. " + "Please update the minimum_deployment_target to at least coremltools.target.iOS16" + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(1, 1, 4, 4))], opset_version=ct.target.iOS15) + def prog(x): + x = mb.pixel_unshuffle(x=x, downscale_factor=np.uint32(2)) + return x + + @staticmethod + def test_bulid_non_compatible_program_early_error_out(): + ''' + `mb.program` API should detect potential non compatible ops in the program, and error out early + In this example, `pixel_unshuffle` is an iO16 op, and `topk` has iOS13 and iOS16 version. + If the builder version is not set, it is picking up the iOS13 version of topk, which would + potentially create an invalid program. + In this case, `mb.program` should error out, and tell the user to set `opset_version=target.iOS16` + ''' + expected_err_str = ( + "Op topk with an out of date version coremltools.target.iOS13 is detected. Please use @mb.program\(input_specs=..., opset_version=coremltools.target.iOS16\)" + ) + with pytest.raises(ValueError, match=expected_err_str): + get_simple_topk_pixel_unshuffle_program() + + @staticmethod + def test_type_domain_validation(): + ''' + The builder should error out early when detecting the input type violation against the defined type_domain + ''' + expected_err_str = ( + "In op, of type rsqrt, named rsqrt_0, the named input `epsilon` must have the same data type as the named input `x`. However, epsilon has dtype int32 whereas x has dtype fp32" + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(2,), dtype=types.fp32)]) + def prog(x): + res = mb.rsqrt(x=x, epsilon=1) + return res + + @staticmethod + def test_rank6_tensor_early_error_out(): + ''' + The builder should error out early when detecting a rank 6 (or higher) tensor which cannot be eliminated by graph passes + ''' + expected_err_str = ( + "Core ML only supports tensors with rank <= 5. Layer \"reshape_0\", with type \"reshape\", outputs a rank 6 tensor" + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)]) + def prog(x): + res = mb.reshape(x=x, shape=(1, 1, 1, 1, 1, 1), name="reshape_0") + return res + ct.convert(prog, source="milinternal") + + @staticmethod + def test_rank5_list_early_error_out(): + ''' + The builder should error out early when detecting a list of rank 5 (or higher) tensors is created + ''' + expected_err_str = ( + "Core ML only supports list of elements with rank <= 4. Layer \"list_0\", with type \"make_list\", outputs a list of rank 5 tensors." + ) + with pytest.raises(ValueError, match=expected_err_str): + @mb.program(input_specs=[mb.TensorSpec(shape=(1,), dtype=types.fp32)]) + def prog(x): + ls = mb.make_list( + init_length=1, + dtype="fp32", + elem_shape=(1, 1, 1, 1, 1), + dynamic_length=True, + name="list_0", + ) + return ls + + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_types.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_types.py new file mode 100644 index 00000000..134f64cb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/tests/test_types.py @@ -0,0 +1,27 @@ +# Copyright (c) 2023, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import pytest + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import type_mapping + + +class TestTypeMapping: + def test_promote_dtypes_basic(self): + assert type_mapping.promote_dtypes([types.int32, types.int32]) == types.int32 + assert type_mapping.promote_dtypes([types.int32, types.int64, types.int16]) == types.int64 + assert type_mapping.promote_dtypes([types.fp16, types.fp32, types.fp64]) == types.fp64 + assert type_mapping.promote_dtypes([types.fp16, types.int32, types.int64]) == types.fp16 + + @pytest.mark.parametrize( + "input_size", + [10, 10000], + ) + def test_promote_dtypes_different_input_sizes(self, input_size): + assert ( + type_mapping.promote_dtypes([types.int32, types.int64, types.int16] * input_size) + == types.int64 + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/__init__.py new file mode 100644 index 00000000..6cdc9fc3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .annotate import annotate, apply_delayed_types, class_annotate, delay_type +from .get_type_info import get_type_info +from .global_methods import global_remap +from .type_bool import bool, is_bool +from .type_complex import complex, complex64, complex128, is_complex +from .type_dict import dict, empty_dict +from .type_double import double, float, fp16, fp32, fp64, is_float +from .type_globals_pseudo_type import globals_pseudo_type +from .type_int import (int8, int16, int32, int64, is_int, uint, uint8, + uint16, uint32, uint64) +from .type_list import empty_list, is_list, list +from .type_mapping import (builtin_to_proto_types, builtin_to_string, + is_builtin, is_dict, is_primitive, is_scalar, + is_str, is_subtype, is_tensor, is_tuple, + np_dtype_to_py_type, nptype_from_builtin, + numpy_type_to_builtin_type, + numpy_val_to_builtin_val, promote_dtypes, + promote_types, proto_to_builtin_types, + string_to_builtin, type_to_builtin_type) +from .type_str import str +from .type_tensor import (is_compatible_type, is_tensor_and_is_compatible, + is_tensor_and_is_compatible_general_shape, tensor, + tensor_has_complete_shape) +from .type_tuple import tuple +from .type_unknown import unknown +from .type_void import void + +apply_delayed_types() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/annotate.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/annotate.py new file mode 100644 index 00000000..0ccd104c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/annotate.py @@ -0,0 +1,115 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +class delay_type_cls: + def __getattr__(self, t): + return t + + +# this delay type thingee is useful for class annotations. +# for instance: the following code is invalid because when the annotate +# function is invoked, the "double" class does not yet exist +# +# class double: +# @annotate(double, other=double) +# def __add__(self, other): +# +# So it is necessary to add one level of laziness and delay the type +# +# class double: +# @annotate(delay_type.double, other=delay_type.double) +# def __add__(self, other): +# +# This basically replaces the annotation with the string "double" which we will +# then replace with the actual type later +# +delay_type = delay_type_cls() + +annotated_function_list = [] +annotated_class_list = {} + + +class _invalid_placeholder_type: + pass + + +def annotate(return_type=_invalid_placeholder_type, **kwargs): + """ + A decorator that informs the compyler about the return type of a function + and a collection of hint for other variable names. These can include + - captured variables + - function arguments + - other variables within the function + + Ex: + + @annotate(compyler.double, a=compyler.double, b=compyler.double) + def add(a, b): + + In certain cases when the class members are annotated this does not work. + For instance this fails because the annotate decorator is called before + the class double is fully defined. + + class double: + @annotate(double, other=double) + def __add__(self, other): + + So it is necessary to add one level of laziness and delay the type + + @class_annotate() + class double: + @annotate(delay_type.double, other=delay_type.double) + def __add__(self, other): + + After which apply_delayed_types() must be called to fill in the delayed + type. + """ + global annotated_function_list + + def decorator(func): + global annotated_function_list + func.type_annotations = kwargs + if return_type is not _invalid_placeholder_type: + func.return_type = return_type + annotated_function_list += [func] + return func + + return decorator + + +def class_annotate(): + """ + Registers a class to be used by delay_type. See annotate() + """ + global annotated_class_list + + def decorator(cls): + global annotated_class_list + annotated_class_list[cls.__name__] = cls + return cls + + return decorator + + +def apply_delayed_types( + type_map=annotated_class_list, fnlist=annotated_function_list +): # pylint: disable=dangerous-default-value + """ + Apply all delayed types. See annotate() + """ + # pylint: disable=no-member + # type name is a dict from str to type + for func in fnlist: + if ( + hasattr(func, "return_type") + and isinstance(func.return_type, str) + and func.return_type in type_map + ): + func.return_type = type_map[func.return_type] + if hasattr(func, "type_annotations"): + for key in func.type_annotations: + if func.type_annotations[key] in type_map: + func.type_annotations[key] = type_map[func.type_annotations[key]] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/get_type_info.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/get_type_info.py new file mode 100644 index 00000000..74d47139 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/get_type_info.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import FunctionType, Type +from .type_void import void + + +def get_python_method_type(py_function): + # given a python class method, parse the annotations to figure out the type + function_inputs = [] + function_output = get_type_info(void) + annotations = {} + if hasattr(py_function, "type_annotations"): + annotations = { + k: get_type_info(v) for k, v in py_function.type_annotations.items() + } + if hasattr(py_function, "return_type"): + function_output = get_type_info(py_function.return_type) + try: + if hasattr(py_function, "__func__"): + argcount = py_function.__func__.__code__.co_argcount + argnames = py_function.__func__.__code__.co_varnames[:argcount] + else: + argcount = py_function.__code__.co_argcount + argnames = py_function.__code__.co_varnames[:argcount] + except: + raise TypeError( + "Unable to derive type information from method %s. " + "You might have a misspecified type. Ex: use compyler.int and not int" + % py_function + ) + + for arg in argnames: + if arg in annotations: + function_inputs.append(annotations[arg]) + elif arg != "self": + raise TypeError( + "Function " + + str(py_function) + + " insufficient annotations. " + + arg + + " needs a type" + ) + typeinfo = FunctionType(function_inputs, function_output, py_function) + return typeinfo + + +def get_type_info(t): + if hasattr(t, "__type_info__"): + ret = t.__type_info__() + assert ret.python_class is not None + return ret + elif isinstance(t, type): + return Type(t.__name__, python_class=t) + elif hasattr(t, "__call__"): + return get_python_method_type(t) + raise TypeError("Unsupported type %s" % t) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/global_methods.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/global_methods.py new file mode 100644 index 00000000..b739beed --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/global_methods.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +""" +This defines a list of all the "global methods" like len. Or type cast +operators like int, list, double, etc. + +The difficulty with some of these methods is that they don't have fixed types. +For instance len(x) allows x to be list or a dictionary. + +However we don't support function overloading based on types, and we don't +intend to. (It is complicated, requires the parser to be far more intelligent +and do good type inference; will either require genre to support overloading +or do name mangling. + +The final quirk is that we probably should not call these functions "len" +or "int" because that will conflict with the existing python methods. + +So what we will simply do is to rewrite them to things like __len__, __str__ +and __int__ and __double__ +""" + +global_remap = { + "len": "__len__", + "str": "__str__", + "int": "__int__", + "double": "__double__", + "float": "__double__", + "bool": "__bool__", + "log": "__log__", + "exp": "__exp__", + "max": "__max__", + "min": "__min__", +} + +global_invremap = { + "__len__": "len", + "__str__": "str", + "__int__": "int", + "__double__": "float", + "__bool__": "bool", + "__log__": "math.log", + "__exp__": "math.exp", + "__max__": "max", + "__min__": "min", +} diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/symbolic.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/symbolic.py new file mode 100644 index 00000000..6222383a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/symbolic.py @@ -0,0 +1,81 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import sympy as sm + +k_used_symbols = {} +k_num_internal_syms = 0 + + +def is_compatible_symbolic_vector(val_a, val_b): + """ + compare two vector and check if they are compatible. + ([is0, 4], [9, 4]), ([is0, 1],[is1, is2]) are twp compatible examples. + """ + val_a = tuple(val_a) + val_b = tuple(val_b) + + if len(val_a) != len(val_b): + return False + + for a, b in zip(val_a, val_b): + if not is_symbolic(a) and not is_symbolic(b): + if a != b: + return False + return True + + +def is_symbolic(val): + return issubclass(type(val), sm.Basic) # pylint: disable=consider-using-ternary + + +def is_variadic(val): + return ( + issubclass(type(val), sm.Symbol) and val.name[0] == "*" + ) # pylint: disable=consider-using-ternary + + +def num_symbolic(val): + """ + Return the number of symbols in val + """ + if is_symbolic(val): + return 1 + elif isinstance(val, np.ndarray) and np.issctype(val.dtype): + return 0 + elif hasattr(val, "__iter__"): + return sum(any_symbolic(i) for i in val) + return 0 + + +def any_symbolic(val): + if is_symbolic(val): + return True + if isinstance(val, np.ndarray) and val.ndim == 0: + return is_symbolic(val[()]) + elif isinstance(val, np.ndarray) and np.issctype(val.dtype): + return False + elif isinstance(val, str): # string is iterable + return False + elif hasattr(val, "__iter__"): + return any(any_symbolic(i) for i in val) + return False + + +def any_variadic(val): + if is_variadic(val): + return True + elif isinstance(val, np.ndarray) and np.issctype(val.dtype): + return False + elif isinstance(val, str): # string is iterable + return False + elif hasattr(val, "__iter__"): + return any(any_variadic(i) for i in val) + return False + + +def isscalar(val): + return np.isscalar(val) or issubclass(type(val), sm.Basic) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_bool.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_bool.py new file mode 100644 index 00000000..9c74bd3d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_bool.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .annotate import annotate, class_annotate, delay_type +from .type_spec import Type + + +@class_annotate() +class bool: + def __init__(self, v=False): + self.val = v + + @classmethod + def __type_info__(cls): + return Type("bool", python_class=cls) + + @annotate(delay_type.bool, other=delay_type.bool) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type.bool) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __not__(self, other): + return bool(not other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val + + @annotate(delay_type.int) + def __int__(self): + return int(self) + + @annotate(delay_type.double) + def __double__(self): + return float(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + +def is_bool(t): + return t is bool or isinstance(t, bool) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_complex.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_complex.py new file mode 100644 index 00000000..ed92614d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_complex.py @@ -0,0 +1,171 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from coremltools import _logger as logger + +from .annotate import annotate, class_annotate, delay_type +from .type_bool import bool +from .type_spec import Type + + +def make_complex(width): + delay_type_complex = getattr(delay_type, "complex" + str(width)) + + @class_annotate() + class complex: + _width = width + + def __init__(self, v=0 + 0j): + self._val: np.complexfloating = ( + np.complex64(v) if width == 64 else np.complex128(v) + ) + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + from .type_mapping import ( + builtin_to_string, + nptype_from_builtin, + numpy_type_to_builtin_type, + ) + + if not isinstance(v, np.generic): + + if isinstance(v, np.ndarray) and v.ndim == 0: + # Rank zero tensor case. Use as a scalar. + self._val = v.item() + else: + raise ValueError( + f"Types should have zero-rank ndarray input, got {v} instead." + ) + + elif isinstance(v, np.complexfloating): + v_type = numpy_type_to_builtin_type(v.dtype) + if v_type.get_bitwidth() <= self.get_bitwidth(): + self._val = v + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might lose precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might be incompatible or " + "loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + + @classmethod + def __type_info__(cls): + return Type("complex" + str(cls._width), python_class=cls) + + @classmethod + def get_bitwidth(cls): + return cls._width + + @annotate(delay_type_complex, other=delay_type_complex) + def __add__(self, other): + assert isinstance(other, complex) + return complex(self.val + other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __sub__(self, other): + assert isinstance(other, complex) + return complex(self.val - other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __mul__(self, other): + assert isinstance(other, complex) + return complex(self.val * other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __div__(self, other): + assert isinstance(other, complex) + return complex(self.val / other.val) + + @annotate(delay_type_complex, other=delay_type_complex) + def __mod__(self, other): + raise ValueError("Can't mod complex numbers.") + + @annotate(delay_type.bool, other=delay_type_complex) + def __lt__(self, other): + return bool(self.val < other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __gt__(self, other): + return bool(self.val > other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __le__(self, other): + return bool(self.val <= other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __ge__(self, other): + return bool(self.val >= other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type_complex) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val + + @annotate(delay_type.int) + def __int__(self): + logger.warning( + "ComplexWarning: Casting complex to real discards the imaginary part." + ) + return int(np.real(self.val)) + + @annotate(delay_type_complex) + def __complex__(self): + return complex(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + @annotate(delay_type_complex) + def __log__(self): + # The `math.log` doesn't support complex numbers yet. + return np.log(self.val) + + @annotate(delay_type_complex) + def __exp__(self): + return np.exp(self.val) + + @annotate(delay_type_complex) + def __neg__(self): + return complex(-self.val) + + complex.__name__ = "complex%d" % complex.get_bitwidth() + return complex + + +# We keep consistent with PyTorch and Tensorflow: +# - complex64 consists of a fp32 real and a fp32 imag. +# - complex128 consists of a fp64 real and a fp64 imag. +complex64 = make_complex(64) +complex128 = make_complex(128) +complex = complex64 + + +def is_complex(t): + complex_types_set = (complex64, complex128) + return (t in complex_types_set) or isinstance(t, complex_types_set) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_dict.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_dict.py new file mode 100644 index 00000000..bf711211 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_dict.py @@ -0,0 +1,62 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import type_bool, type_int +from .annotate import annotate +from .get_type_info import get_type_info +from .type_spec import Type +from .type_void import void + + +def memoize(f): + memo = {} + + def helper(x, y): + if (x, y) not in memo: + memo[(x, y)] = f(x, y) + return memo[(x, y)] + + return helper + + +class empty_dict: + @classmethod + def __type_info__(cls): + return Type("empty_dict", python_class=cls) + + +@memoize +def dict(keytype, valuetype): + class dict: + T = [keytype, valuetype] + + def __init__(self): + self.val = {} + + @classmethod + def __type_info__(cls): + return Type("dict", [get_type_info(keytype), get_type_info(valuetype)], cls) + + @annotate(T[1], key=T[0]) + def __getitem__(self, key): + assert isinstance(key, self.T[0]) + return self.val[key] + + @annotate(void, key=T[0], newval=T[1]) + def __setitem__(self, key, newval): + assert isinstance(key, self.T[0]) + assert isinstance(newval, self.T[1]) + self.val[key] = newval + + @annotate(type_int.int64) + def __len__(self): + return type_int.int64(len(self.val)) + + @annotate(type_bool.bool, key=T[0]) + def __contains__(self, key): + return key in self.val[key] + + dict.__template_name__ = "dict[" + keytype.__name__ + "," + valuetype.__name__ + "]" + return dict diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_double.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_double.py new file mode 100644 index 00000000..6c3024df --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_double.py @@ -0,0 +1,162 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np + +from coremltools import _logger as logger + +from .annotate import annotate, class_annotate, delay_type +from .type_bool import bool +from .type_spec import Type + + +def make_float(width): + delay_type_float = getattr(delay_type, "fp" + str(width)) + + @class_annotate() + class double: + _width = width + + def __init__(self, v=0.0): + self._val = v + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + from .type_mapping import (builtin_to_string, nptype_from_builtin, + numpy_type_to_builtin_type) + + if not isinstance(v, np.generic): + + if isinstance(v, np.ndarray) and v.ndim == 0: + # Rank zero tensor case. Use as a scalar. + self._val = v.item() + else: + raise ValueError( + f"Types should have zero-rank ndarray input, got {v} instead." + ) + + elif isinstance(v, np.floating): + v_type = numpy_type_to_builtin_type(v.dtype) + if v_type.get_bitwidth() <= self.get_bitwidth(): + self._val = v + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might lose precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might be incompatible or loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + + @classmethod + def __type_info__(cls): + return Type("fp" + str(cls._width), python_class=cls) + + @classmethod + def get_bitwidth(cls): + return cls._width + + @annotate(delay_type_float, other=delay_type_float) + def __add__(self, other): + assert isinstance(other, double) + return double(self.val + other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __sub__(self, other): + assert isinstance(other, double) + return double(self.val - other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __mul__(self, other): + assert isinstance(other, double) + return double(self.val * other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __div__(self, other): + assert isinstance(other, double) + return double(self.val / other.val) + + @annotate(delay_type_float, other=delay_type_float) + def __mod__(self, other): + assert isinstance(other, double) + return double(self.val % other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __lt__(self, other): + return bool(self.val < other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __gt__(self, other): + return bool(self.val > other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __le__(self, other): + return bool(self.val <= other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __ge__(self, other): + return bool(self.val >= other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type_float) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val != 0 + + @annotate(delay_type.int) + def __int__(self): + return int(self) + + @annotate(delay_type_float) + def __double__(self): + return float(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + @annotate(delay_type_float) + def __log__(self): + return math.log(self.val) + + @annotate(delay_type_float) + def __exp__(self): + return math.exp(self.val) + + @annotate(delay_type_float) + def __neg__(self): + return double(-self.val) + + double.__name__ = "fp%d" % double.get_bitwidth() + return double + + +fp16 = make_float(16) +fp32 = make_float(32) +fp64 = make_float(64) +float = fp32 +double = fp64 + + +def is_float(t): + return any(t is i or isinstance(t, i) for i in [fp16, fp32, fp64]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_globals_pseudo_type.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_globals_pseudo_type.py new file mode 100644 index 00000000..b849ba95 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_globals_pseudo_type.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import Type + + +class globals_pseudo_type: + @classmethod + def __type_info__(cls): + return Type("globals", python_class=cls) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_int.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_int.py new file mode 100644 index 00000000..132ee9f1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_int.py @@ -0,0 +1,177 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import math + +import numpy as np +import sympy as sm + +from coremltools import _logger as logger + +from .annotate import annotate, class_annotate, delay_type +from .type_bool import bool +from .type_spec import Type + + +def make_int(width, unsigned): + delay_type_int = getattr(delay_type, unsigned + "int" + str(width)) + + @class_annotate() + class int: + _width = width + _unsigned = unsigned + + @annotate(v=delay_type_int) + def __init__(self, v=0): + self._val = v + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + from .type_mapping import (builtin_to_string, nptype_from_builtin, + numpy_type_to_builtin_type) + + if not isinstance(v, (np.generic, sm.Basic)): + raise ValueError( + "types should have value of numpy type or Symbols, got {} instead".format( + type(v) + ) + ) + + if isinstance(v, sm.Basic): + self._val = v + elif isinstance(v, np.integer): + v_type = numpy_type_to_builtin_type(v.dtype) + if v_type.get_bitwidth() <= self.get_bitwidth() and ( + v >= 0 or v < 0 and not self.is_unsigned() + ): + self._val = v + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might overflow or loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + else: + self._val = v.astype(nptype_from_builtin(self.__class__)) + logger.warning( + "Saving value type of {} into a builtin type of {}, might be incompatible or loses precision!".format( + v.dtype, builtin_to_string(self.__class__) + ) + ) + + @classmethod + def __type_info__(cls): + return Type(cls._unsigned + "int" + str(cls._width), python_class=cls) + + @classmethod + def get_bitwidth(cls): + return cls._width + + @classmethod + def is_unsigned(cls): + return cls._unsigned == "u" + + @annotate(delay_type_int, other=delay_type_int) + def __add__(self, other): + assert isinstance(other, int) + return int(self.val + other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __sub__(self, other): + assert isinstance(other, int) + return int(self.val - other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __mul__(self, other): + assert isinstance(other, int) + return int(self.val * other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __div__(self, other): + assert isinstance(other, int) + return int(self.val // other.val) + + @annotate(delay_type_int, other=delay_type_int) + def __mod__(self, other): + assert isinstance(other, int) + return int(self.val % other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __lt__(self, other): + return bool(self.val < other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __gt__(self, other): + return bool(self.val > other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __le__(self, other): + return bool(self.val <= other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __ge__(self, other): + return bool(self.val >= other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __eq__(self, other): + return bool(self.val == other.val) + + @annotate(delay_type.bool, other=delay_type_int) + def __ne__(self, other): + return bool(self.val != other.val) + + @annotate(delay_type.bool) + def __bool__(self): + return self.val != 0 + + @annotate(delay_type_int) + def __int__(self): + return int(self) + + @annotate(delay_type.double) + def __double__(self): + return float(self.val) + + @annotate(delay_type.str) + def __str__(self): + return str(self.val) + + @annotate(delay_type.double) + def __log__(self): + return math.log(self.val) + + @annotate(delay_type.double) + def __exp__(self): + return math.exp(self.val) + + @annotate(delay_type_int) + def __neg__(self): + return int(-self.val) + + return int + + +int8 = make_int(8, "") +int16 = make_int(16, "") +int32 = make_int(32, "") +int64 = make_int(64, "") + +uint8 = make_int(8, "u") +uint16 = make_int(16, "u") +uint32 = make_int(32, "u") +uint64 = make_int(64, "u") +uint = uint64 + + +def is_int(t): + return any( + t is i or isinstance(t, i) + for i in [int8, int16, int32, int64, uint8, uint16, uint32, uint64] + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_list.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_list.py new file mode 100644 index 00000000..5d3134c2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_list.py @@ -0,0 +1,69 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import type_int +from .annotate import annotate +from .get_type_info import get_type_info +from .type_spec import Type +from .type_void import void + + +def memoize(f): + memo = {} + + def helper(x, init_length=None, dynamic_length=True): + if x not in memo: + memo[(x, init_length, dynamic_length)] = f(x, init_length, dynamic_length) + return memo[(x, init_length, dynamic_length)] + + return helper + + +class empty_list: + @classmethod + def __type_info__(cls): + return Type("empty_list", python_class=cls) + + +@memoize +def list(arg, init_length=None, dynamic_length=True): + class list: + T = [arg, init_length, dynamic_length] + + def __init__(self): + self.val = [] + + @classmethod + def __type_info__(cls): + return Type("list", [get_type_info(arg)], python_class=cls) + + @annotate(void, other=T[0]) + def append(self, other): + assert isinstance(other, self.T[0]) + self.val.append(other) + + @annotate(T[0], index=type_int.int64) + def __getitem__(self, index): + assert isinstance(index, type_int.int64) + return self.val[index.val] + + @annotate(void, index=type_int.int64, newval=T[0]) + def __setitem__(self, index, newval): + assert isinstance(index, type_int.int64) + assert isinstance(newval, self.T[0]) + self.val[index.val] = newval + + @annotate(type_int.int64) + def __len__(self): + return type_int.int64(len(self.val)) if self.T[1] is None else self.T[1] + + list.__template_name__ = "list[" + arg.__name__ + "]" + return list + + +def is_list(t): + if t is None: + return False + return get_type_info(t).name == "list" diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_mapping.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_mapping.py new file mode 100644 index 00000000..9dbdcd9c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_mapping.py @@ -0,0 +1,449 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import numpy as _np +import numpy as np +import sympy as sm + +import coremltools.proto.MIL_pb2 as _mil_pm + +from .get_type_info import get_type_info +from .type_bool import bool as types_bool +from .type_bool import is_bool +from .type_complex import complex64 as types_complex64 +from .type_complex import complex128 as types_complex128 +from .type_complex import is_complex +from .type_double import fp16 as types_fp16 +from .type_double import fp32 as types_fp32 +from .type_double import fp64 as types_fp64 +from .type_double import is_float +from .type_int import int8 as types_int8 +from .type_int import int16 as types_int16 +from .type_int import int32 as types_int32 +from .type_int import int64 as types_int64 +from .type_int import is_int +from .type_int import uint8 as types_uint8 +from .type_int import uint16 as types_uint16 +from .type_int import uint32 as types_uint32 +from .type_int import uint64 as types_uint64 +from .type_list import is_list +from .type_str import str as types_str +from .type_unknown import unknown + +_types_TO_NPTYPES = { + types_bool: np.bool_, + types_int8: np.int8, + types_int16: np.int16, + types_int32: np.int32, + types_int64: np.int64, + types_uint8: np.uint8, + types_uint16: np.uint16, + types_uint32: np.uint32, + types_uint64: np.uint64, + types_fp16: np.float16, + types_fp32: np.float32, + types_fp64: np.float64, + types_complex64: np.complex64, + types_complex128: np.complex128, + types_str: np.str_, +} + +_types_TO_STRINGS = { + types_bool: "bool", + types_int8: "int8", + types_int16: "int16", + types_int32: "int32", + types_int64: "int64", + types_uint8: "uint8", + types_uint16: "uint16", + types_uint32: "uint32", + types_uint64: "uint64", + types_fp16: "fp16", + types_fp32: "fp32", + types_fp64: "fp64", + types_complex64: "complex64", + types_complex128: "complex128", + types_str: "string", +} + +builtin_to_proto_types = { + # bool: + types_bool: _mil_pm.BOOL, + + # fp + types_fp16: _mil_pm.FLOAT16, + types_fp32: _mil_pm.FLOAT32, + types_fp64: _mil_pm.FLOAT64, + + # int + types_uint8: _mil_pm.UINT8, + types_int8: _mil_pm.INT8, + + types_uint16: _mil_pm.UINT16, + types_int16: _mil_pm.INT16, + + types_uint32: _mil_pm.UINT32, + types_int32: _mil_pm.INT32, + + types_uint64: _mil_pm.UINT64, + types_int64: _mil_pm.INT64, + + # str + types_str: _mil_pm.STRING, +} + +proto_to_builtin_types = {v: k for k, v in builtin_to_proto_types.items()} + + +def np_dtype_to_py_type(np_dtype): + # Can't use dict, as hash(np.int32) != hash(val.dtype) + if np_dtype in [np.int32, np.int64]: + return int + if np_dtype in [bool, np.bool_]: + return bool + if np_dtype in [np.float32, np.float64]: + return float + if np_dtype in [np.complex64, np.complex128]: + return complex + raise NotImplementedError('{} is not supported'.format(np_dtype)) + + +_STRINGS_TO_types = {v: k for k, v in _types_TO_STRINGS.items()} + + +def string_to_builtin(s): + """ + Given a str, return its corresponding builtin type. + """ + return _STRINGS_TO_types.get(s, None) + + +def builtin_to_string(builtin_type): + """ + Given a builtin type, return its corresponding string representation. + """ + return _types_TO_STRINGS.get(builtin_type, None) + + +def nptype_from_builtin(btype): + """ + Given a builtin type, return its corresponding Numpy dtype. + """ + return _types_TO_NPTYPES.get(btype, None) + + +def promote_types(dtype1, dtype2): + """ + Get the smallest type to which the given scalar types can be cast. + + Args: + dtype1 (builtin): + dtype2 (builtin): + + Returns: + A builtin datatype or None. + + Examples: + >>> promote_types(int32, int64) + builtin('int64') + + >>> promote_types(fp16, fp32) + builtin('fp32') + + >>> promote_types(fp16, int32) + builtin('fp16') + """ + nptype1 = nptype_from_builtin(dtype1) + nptype2 = nptype_from_builtin(dtype2) + # Circumvent the undesirable np type promotion: + # >> np.promote_types(np.float32, np.int32) + # dtype('float64') + if np.issubdtype(nptype1, np.floating) and np.issubdtype(nptype2, np.signedinteger): + nppromoted = nptype1 + elif np.issubdtype(nptype2, np.floating) and np.issubdtype( + nptype1, np.signedinteger + ): + nppromoted = nptype2 + else: + nppromoted = np.promote_types(nptype1, nptype2) + return numpy_type_to_builtin_type(nppromoted) + + +def promote_dtypes(dtypes): + """ + Get the smallest promoted dtype, to which all scalar dtypes (provided through dtypes list argument) can be casted. + Args: + List [dtype (builtin)] + Returns: + A builtin datatype or None. + + Examples: + >>> promote_dtypes([int32, int64, int16]) + builtin('int64') + + >>> promote_dtypes([fp16, fp32, fp64]) + builtin('fp64') + + >>> promote_dtypes([fp16, int32, int64]) + builtin('fp16') + + """ + if not isinstance(dtypes, (list, tuple)) or len(dtypes) < 1: + raise ValueError("dtypes needs to be a list/tuple of at least 1 element") + + # Deduplicate inputs to avoid redundant calculations. + # Without dedup, too large input will cause maximum recursion depth exceeded error. + dtypes = list(set(dtypes)) + + if len(dtypes) == 1: + return dtypes[0] + + return promote_types(dtypes[0], promote_dtypes(dtypes[1:])) + + +def is_primitive(btype): + """ + Is the indicated builtin type a primitive? + """ + return ( + btype is types_bool + or btype is types_str + or is_float(btype) + or is_int(btype) + or is_complex(btype) + ) + + +def is_scalar(btype): + """ + Is the given builtin type a scalar integer, float, boolean or string? + """ + return ( + is_bool(btype) + or is_int(btype) + or is_float(btype) + or is_str(btype) + or is_complex(btype) + ) + + +def is_tensor(tensor_type): + if tensor_type is None: + return False + try: + type_info = get_type_info(tensor_type).name + except TypeError: + return False + return type_info == "tensor" + + +def is_str(t): + if t is None: + return False + try: + type_info = get_type_info(t).name + except TypeError: + return False + return type_info == "str" + + +def is_tuple(t): + if t is None: + return False + try: + type_info = get_type_info(t).name + except TypeError: + return False + return type_info == "tuple" + + +def is_dict(t): + if t is None: + return False + try: + type_info = get_type_info(t).name + except TypeError: + return False + return type_info == "dict" + + +def is_builtin(t): + return is_scalar(t) or is_tensor(t) or is_str(t) or is_tuple(t) + + +# Converts a numpy type to its types equivalent. +# Supports both dtypes and numpy primitive types. +def numpy_type_to_builtin_type(nptype): + # If this is a data type object, use the corresponding scalar data type. + if np.issubclass_(type(nptype), np.dtype): + nptype = nptype.type + + if np.issubclass_(nptype, (bool, np.bool_)): + # numpy as 2 bool types it looks like. what is the difference? + return types_bool + # Because np.uint is a subclass of int, + # we need to first check for np.uint before + # checking for int + elif np.issubclass_(nptype, np.uint8): + return types_uint8 + elif np.issubclass_(nptype, np.int8): + return types_int8 + elif np.issubclass_(nptype, np.uint16): + return types_uint16 + elif np.issubclass_(nptype, np.int16): + return types_int16 + elif np.issubclass_(nptype, np.uint32): + return types_uint32 + elif np.issubclass_(nptype, np.int32): + return types_int32 + elif np.issubclass_(nptype, np.uint64): + return types_uint64 + elif np.issubclass_(nptype, np.int64): + return types_int64 + elif np.issubclass_(nptype, int) or nptype == int: + # Catch all int + return types_int32 + elif np.issubclass_(nptype, np.object_): + # symbolic shape is considered int32 + return types_int32 + elif np.issubclass_(nptype, np.float16): + return types_fp16 + elif ( + np.issubclass_(nptype, (np.float32, np.single)) or nptype == float + ): + return types_fp32 + elif np.issubclass_(nptype, (np.float64, np.double)): + return types_fp64 + elif np.issubclass_(nptype, np.complex64): + return types_complex64 + elif np.issubclass_(nptype, (np.complex128, complex)): + return types_complex128 + elif np.issubclass_(nptype, (str, np.string_, np.str_)): + return types_str + else: + raise TypeError(f"Unsupported numpy type: {nptype}.") + + +# Tries to get the equivalent builtin type of a +# numpy or python type. +def type_to_builtin_type(type): + # Infer from numpy type if it is one + if type.__module__ == np.__name__: + return numpy_type_to_builtin_type(type) + + # Otherwise, try to infer from a few generic python types + if np.issubclass_(type, bool): + return types_bool + elif np.issubclass_(type, int): + return types_int32 + elif np.issubclass_(type, str): + return types_str + elif np.issubclass_(type, float): + return types_fp32 + elif np.issubclass_(type, complex): + return types_complex64 + else: + raise TypeError("Could not determine builtin type for " + str(type)) + + +def numpy_val_to_builtin_val(npval): + if np.isscalar(npval): + ret_type = type_to_builtin_type(type(npval)) + ret = ret_type() + ret.val = npval + return ret, ret_type + else: + builtintype = numpy_type_to_builtin_type(npval.dtype) + from . import tensor as types_tensor + + ret_type = types_tensor(builtintype, npval.shape) + ret = ret_type() + ret.val = npval + return ret, ret_type + + +def is_subtype_tensor(type1, type2): + # requires primitive types match + if type1.get_primitive() != type2.get_primitive(): + return False + + shape1 = type1.get_shape() + shape2 = type2.get_shape() + # Same rank + if len(shape1) != len(shape2): + return False + + for d1, d2 in zip(shape1, shape2): + if d1 == d2: + continue + + # tensor with shape (3, s0) is not a subtype of tensor with shape (3, + # 1), but is a subtype of tensor with shape (3, s1) + d1_is_symbolic = issubclass(type(d1), sm.Basic) + d2_is_symbolic = issubclass(type(d2), sm.Basic) + if d1_is_symbolic and d2_is_symbolic: + continue + if d1_is_symbolic and not d2_is_symbolic: + return False + if not d1_is_symbolic and not d2_is_symbolic and d1 != d2: + return False + return True + + +def is_subtype(type1, type2): + """ + Return True if type1 is a subtype of type2. False otherwise. + """ + if type2 == unknown: + return True # any class is a subclass of unknown (None) type. + if is_list(type2): + return is_list(type1) and is_subtype(type1.T[0], type2.T[0]) + if is_tensor(type1) and is_tensor(type2): + return is_subtype_tensor(type1, type2) + return type1 == type2 + + +def np_val_to_py_type(val): + """Convert numpy val to python primitive equivalent. Ex: + + Given: val = np.array([True, False]) + Returns: [True, False] + + Given: val = np.array(32, dtype=np.int32) + Returns 32 + """ + if not isinstance(val, (_np.ndarray, _np.generic)): + return val + + if val.dtype in [_np.float16, _np.uint8, _np.int8, _np.uint32]: + return val.tobytes() + else: + # val is np.ndarray or np.generic + is_np_scalar = isinstance(val, _np.generic) or val.shape == () + py_type = np_dtype_to_py_type(val.dtype) + return py_type(val) if is_np_scalar else tuple(py_type(v) for v in val.flatten()) + + +def infer_complex_dtype(real_dtype, imag_dtype): + """Infers the complex dtype from real and imaginary part's dtypes.""" + promoted_dtype = promote_types(real_dtype, imag_dtype) + if promoted_dtype == types_fp32: + return types_complex64 + elif promoted_dtype == types_fp64: + return types_complex128 + else: + raise ValueError( + f"Unsupported real/imag dtype ({real_dtype}/{imag_dtype}) to construct a " + f"complex dtype." + ) + + +def infer_fp_dtype_from_complex(complex_dtype): + """Infers the fp dtype of real and imaginary part from the complex dtype.""" + if complex_dtype == types_complex64: + return types_fp32 + elif complex_dtype == types_complex128: + return types_fp64 + else: + raise ValueError(f"Unsupported complex dtype ({complex_dtype}).") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_spec.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_spec.py new file mode 100644 index 00000000..ef46bd89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_spec.py @@ -0,0 +1,89 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +class Type: + """ + - Type.name : A string with the name of the object + - Type.tparam : For classes with template parameters, (list, dict), this + contains a list of Type objects of the template parameters + - Type.python_class : The original python class implementing this type. + Two Type objects compare equal + only on name and tparam and not python_class + """ + + __slots__ = ["name", "tparam", "python_class"] + + def __init__(self, name, tparam=None, python_class=None): + if tparam is None: + tparam = [] + assert isinstance(name, str) + assert isinstance(tparam, list) + self.name = name + self.tparam = tparam + self.python_class = python_class + + def __hash__(self): + return hash((self.name, tuple(self.tparam))) + + def __eq__(self, other): + return self.name == other.name and self.tparam == other.tparam + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + ret = self.name + if len(self.tparam) > 0: + ret += "[" + ",".join(repr(x) for x in self.tparam) + "]" + return ret + + def __str__(self): + return self.__repr__() + + def sexp(self): + if len(self.tparam) == 0: + return self.name + else: + ret = [self.name] + ret.append([a.sexp() if hasattr(a, "sexp") else a for a in self.tparam]) + return ret + + +class FunctionType: + """ + - FunctionType.inputs : A list of Type objects defining the types of the input + - FunctionType.output: A Type object defining the type of the output + - FunctionType.python_function : The original python function implementing + this type. Two FunctionType objects compare + equal only on inputs and output and not + python_function + """ + + __slots__ = ["inputs", "output", "python_function"] + + def __init__(self, inputs, output, python_function=None): + assert isinstance(inputs, list) + assert isinstance(output, (FunctionType, Type)) + self.inputs = inputs + self.output = output + self.python_function = python_function + + def __hash__(self): + return hash((tuple(self.inputs), self.output)) + + def __eq__(self, other): + return self.inputs == other.inputs and self.output == other.output + + def __repr__(self): + return "(" + ",".join(repr(x) for x in self.inputs) + ")->" + repr(self.output) + + def __str__(self): + return self.__repr__() + + def return_sexp(self): + return self.output.sexp() + + def inputs_sexp(self): + return [i.sexp() for i in self.inputs] diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_str.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_str.py new file mode 100644 index 00000000..98ddc177 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_str.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .annotate import annotate, class_annotate, delay_type +from .type_spec import Type + + +@class_annotate() +class str: + def __init__(self, v=""): + self.val = v + + @classmethod + def __type_info__(cls): + return Type("str", python_class=cls) + + @annotate(delay_type.str, other=delay_type.str) + def __add__(self, other): + assert isinstance(other, str) + return str(self.val + other.val) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tensor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tensor.py new file mode 100644 index 00000000..a56cf3cc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tensor.py @@ -0,0 +1,233 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import sympy as sm + +from coremltools import _logger as logger + +from .get_type_info import get_type_info +from .type_mapping import (builtin_to_string, is_subtype, is_tensor, + nptype_from_builtin, numpy_type_to_builtin_type, + promote_types) +from .type_spec import Type + + +def memoize(f): + memo = {} + + def helper(x, y): + y = tuple(y) + if (x, y,) not in memo: + memo[(x, y,)] = f(x, y,) + return memo[(x, y,)] + + return helper + + +def canonical_shape(shape): + """ Return shape as tuple of int or Symbol. + + This utility function ensures the shape tuple + using a single integer type (to its best effort). + + Args: + shape: tuple(int|long|np.int*|Symbol|SymbolExpr...) + """ + + def try_cast(x): + try: + # In python2.7, long and int are different types. + # If we cast a long int whose value is out of the range of int, + # the result is still long, avoiding overflow: + # + # `type(2<<64) == long # true` + # `type(int(2<<64)) == long # true` + x = int(x) + except TypeError: + # ignore symbolic value (sm.Symbol or sm.Expr) + pass + return x + + return tuple(try_cast(x) for x in shape) + + +@memoize +def tensor(primitive, shape): + shape = canonical_shape(shape) + + class tensor: + T = [primitive, shape] + + def __init__(self): + self._val = [] + + @classmethod + def __type_info__(cls): + return Type( + "tensor", list(shape) + [get_type_info(primitive)], python_class=cls + ) + + @classmethod + def get_primitive(cls): + return primitive + + @classmethod + def get_shape(cls): + return shape + + @property + def val(self): + return self._val + + @val.setter + def val(self, v): + if not isinstance(v, np.ndarray): + raise ValueError( + "tensor should have value of type ndarray, got {} instead".format( + type(v) + ) + ) + + v_type = numpy_type_to_builtin_type(v.dtype) + promoted_type = promote_types(v_type, primitive) + if v_type == primitive or v.dtype == np.dtype("O"): + # np.array of symbolic has object type. Don't cast type. + self._val = v + elif promoted_type == primitive: + self._val = v.astype(nptype_from_builtin(primitive)) + else: + logger.warning( + "Saving value type of {} into a builtin type of {}, might lose precision!".format( + v.dtype, builtin_to_string(primitive) + ) + ) + self._val = v.astype(nptype_from_builtin(primitive)) + + tensor.__template_name__ = ( + "tensor[" + primitive.__name__ + "," + ",".join(str(s) for s in shape) + "]" + ) + tensor.__name__ = ( + "tensor[" + ",".join(str(s) for s in shape) + "," + primitive.__name__ + "]" + ) + return tensor + + +def tensor_has_complete_shape(tensor_type): + if not is_tensor(tensor_type): + return True + s = tensor_type.get_shape() + if -1 in s: + return False + elif len(s) == 0: + return False + else: + return True + +def is_tensor_and_is_compatible(tensor_type1, tensor_type2, allow_promotion=False): + """ + Try to find a tensor type compatible with both input types. + + Compatible means that the tensors have the same rank and matching or unspecified + dimensions. For example, (10, -1) is compatible with (-1, 20) with the compatible + shape (10, 20). + + Args: + tensor_type1 (types.tensor) + tensor_type2 (types.tensor) + allow_promotion (bool): If True, allow primitive types to be promoted. + + Returns: + A pair of (bool, type). If the given types are not tensor types with + (1) compatible shapes and (2) either identical primitive types or + allow_promition=True, return is False, None. Otherwise, return True + and the compatible shape. Note that the returned shape may + not be the same as either input. For example, + + is_tensor_and_is_compatible( + tensor[fp32,[10,-1]], + tensor[fp32,[-1,20]]) --> tensor[fp32, [10,20]] + """ + + if not is_tensor(tensor_type1) or not is_tensor(tensor_type2): + return False, None + shape1 = tensor_type1.get_shape() + shape2 = tensor_type2.get_shape() + + primitive_type = tensor_type1.get_primitive() + if primitive_type != tensor_type2.get_primitive(): + promoted_type = promote_types(primitive_type, tensor_type2.get_primitive()) + if allow_promotion: + primitive_type = promoted_type + else: + return False, promoted_type + + if len(shape1) == 0: + return True, tensor_type2 + if len(shape2) == 0: + return True, tensor_type1 + + if len(shape1) != len(shape2): + return False, None + + most_specific_shape = [] + for i in range(len(shape1)): + if shape1[i] == -1 or issubclass(type(shape1[i]), sm.Basic): + most_specific_shape.append(shape2[i]) + elif shape2[i] == -1 or issubclass(type(shape2[i]), sm.Basic): + most_specific_shape.append(shape1[i]) + elif shape1[i] == shape2[i]: + most_specific_shape.append(shape1[i]) + elif shape1[i] != shape2[i]: + return False, None + + return True, tensor(primitive_type, most_specific_shape) + +def is_tensor_and_is_compatible_general_shape(tensor_type1, tensor_type2): + # returns a pair of (bool, type) + # If Both are tensors, and have compatible shape, the first return is true + # The return will be the most general version of the tensor type. + # Note that this may not be either tensor types. i.e. + # + # is_tensor_and_is_compatible(tensor[fp32,[10,-1]] ,tensor[fp32,[-1,20]]) + # will return True, tensor[fp32, [-1,-1]] + + if not is_tensor(tensor_type1) or not is_tensor(tensor_type2): + return False, None + shape1 = tensor_type1.get_shape() + shape2 = tensor_type2.get_shape() + + if tensor_type1.get_primitive() != tensor_type2.get_primitive(): + return False, None + + if len(shape1) == 0: + return True, tensor_type2 + if len(shape2) == 0: + return True, tensor_type1 + + if len(shape1) != len(shape2): + return False, None + + most_general_shape = [] + for i in range(len(shape1)): + if shape1[i] == -1 or issubclass(type(shape1[i]), sm.Basic): + most_general_shape.append(shape1[i]) + elif shape2[i] == -1 or issubclass(type(shape2[i]), sm.Basic): + most_general_shape.append(shape2[i]) + elif shape1[i] == shape2[i]: + most_general_shape.append(shape1[i]) + elif shape1[i] != shape2[i]: + return False, None + + return True, tensor(tensor_type1.get_primitive(), most_general_shape) + +def is_compatible_type(type1, type2): + """ + Return if type1 and type2 are compatible. + """ + if not is_subtype(type1, type2): + is_comp, _ = is_tensor_and_is_compatible(type1, type2) + return is_comp + return True diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tuple.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tuple.py new file mode 100644 index 00000000..a2a8fe7d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_tuple.py @@ -0,0 +1,53 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import type_int, type_unknown +from .annotate import annotate +from .get_type_info import get_type_info +from .type_spec import Type + +_global_tuple = tuple + + +def memoize(f): + memo = {} + + def helper(x): + x = _global_tuple(x) + if x not in memo: + memo[x] = f(x) + return memo[x] + + return helper + + +class empty_list: + @classmethod + def __type_info__(cls): + return Type("empty_list", python_class=cls) + + +@memoize +def tuple(args): + args = _global_tuple(i if i is not None else type_unknown.unknown for i in args) + + class tuple: + T = args + + def __init__(self): + self.val = [arg() for arg in args] + + @classmethod + def __type_info__(cls): + return Type("tuple", [get_type_info(arg) for arg in args], python_class=cls) + + @annotate(type_int.int64) + def __len__(self): + return len(args) + + tuple.__template_name__ = ( + "tuple[" + ",".join([get_type_info(arg).name for arg in args]) + "]" + ) + return tuple diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_unknown.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_unknown.py new file mode 100644 index 00000000..af940229 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_unknown.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import Type + + +class unknown: + """ + unknown is basically Any type. + """ + + @classmethod + def __type_info__(cls): + return Type("unknown", python_class=cls) + + def __init__(self, val=None): + self.val = val diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_void.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_void.py new file mode 100644 index 00000000..7abb9008 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/types/type_void.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .type_spec import Type + + +class void: + @classmethod + def __type_info__(cls): + return Type("void", python_class=cls) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/var.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/var.py new file mode 100644 index 00000000..8af32bad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/var.py @@ -0,0 +1,397 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from typing import Optional + +from coremltools.converters.mil.mil import types +from coremltools.converters.mil.mil.types import builtin_to_string +from coremltools.converters.mil.mil.types.symbolic import any_symbolic + + +class Var: + """ + Var represents the outputs of an Operation. Most Vars are derived from an + Operation (including const), and all Vars must have `sym_type`. + + Example Usage: + + from coremltools.converters.mil.mil import ( + Builder as mb, + Function, + types + ) + + func_inputs = {"a": mb.placeholder(shape=(1,2)), + "b": mb.placeholder(shape=(1,2)) } + with Function(func_inputs) as ssa_func: + a, b = ssa_func.inputs["a"], ssa_func.inputs["b"] + res = mb.add(x=a, y=b) # res is Var + assert types.is_tensor(res.sym_type) + assert res.rank == 2 + assert res.dtype == types.float # since a, b are by default float + + # value is not available at compile time in this case. If + # materializable, res.val would be a numpy / primitive value + assert res.val is None + + + Comment: Except InternalVar and Vars created in while_loop and by + placeholder, all Var should only be constructed by Operation to represent + outputs. + + Comment: Var hides the details of sym_type vs sym_val vs materialized + value, which was represented by 2 objects prior to refactoring. + + + # Properties: + + name: (str) + name in MIL proto NamedValueType. Name is assigned by the parent + Operation. + + sym_type [_sym_type]: (builtin type class) + All Var must have a (possibly symbolic) type, usually derived from + type inference of upstream ops or from default values in _Input. + + sym_val [_sym_val]: (builtin type instance) + Possibly symbolic value. + + val [_sym_val]: (np.ndarray or python primitive scalar) + Numpy (scalar / tensor) value. `val` is not None iff `sym_val` is + not None and does not contain symbols. Read-only. + + op [_op]: (Operation) + The Operation this Var is derived from. May not be None except + for InternalVar. Read-only. + + op_output_idx: (int) + Idx of the output from Operation corresponding to _Input. May be + None. + + child_ops [_child_ops]: list[Operation] + Ops that take this Var as an input. + + nonreplaceable_vars_upstream: set[Var] + Set that consists of nonreplaceable vars upstream + """ + + __slots__ = [ + "name", + "_sym_type", + "_sym_val", + "_op", + "op_output_idx", + "_child_ops", + "consuming_blocks", + "_nonreplaceable_vars_upstream", + ] + + def __init__( + self, + name, + sym_type, + sym_val=None, + op=None, + op_output_idx=None, + ): + """ + sym_type (builtin type) + sym_val (builtin value) + op (Operation) + op_output_idx (int) + """ + self.name = name + self._sym_type = sym_type + self._sym_val = sym_val + self._op = op + self.op_output_idx = op_output_idx + # An op can appear twice if it consumes a var twice (e.g., + # add(%1, %1), while_loop(loop_vars=(%1, %1)). + self._child_ops = list() + + # A variable may not be consumed by any op (i.e. len(self._child_ops) + # == 0) but is still used as block output. A var can be output of + # multiple blocks (e.g., both current block and nested blocks) + self.consuming_blocks = list() + + # replaceability + self._nonreplaceable_vars_upstream = set() + self._set_nonreplaceable_vars_upstream() + + @property + def nonreplaceable_vars_upstream(self): + return self._nonreplaceable_vars_upstream + + @nonreplaceable_vars_upstream.setter + def nonreplaceable_vars_upstream(self, val): + assert isinstance(val, set) + self._nonreplaceable_vars_upstream = val + + @staticmethod + def _is_nonreplaceable_var(var): + op = var.op + if op is None: + return False + return op.op_type.startswith("constexpr_") + + def _set_nonreplaceable_vars_upstream(self): + """ + A utility function to set the value of the "nonreplaceable_vars_upstream" property. + If self is a non-replaceable var, then "nonreplaceable_vars_upstream" is a single element set, containing self. + Otherwise, it is a union of the "nonreplaceable_vars_upstream" sets of all the input vars of its parent ops. + """ + op = self.op + if op is None: + return + if Var._is_nonreplaceable_var(self): + self.nonreplaceable_vars_upstream = set([self]) + else: + flattened_inputs = op.get_flattened_inputs() + inputs_nonreplaceable_vars_upstream = [p.nonreplaceable_vars_upstream for p in flattened_inputs] + if len(inputs_nonreplaceable_vars_upstream) > 0: + self.nonreplaceable_vars_upstream = set.union(*inputs_nonreplaceable_vars_upstream) + + def _reset_nonreplaceable_vars_upstream(self): + self.nonreplaceable_vars_upstream = set() + + def can_be_replaced_by_var(self, new_var): + """ + A var can be replaced by a new var only if the new var's nonreplaceable_vars_upstream is the super set of the old one + """ + return self.nonreplaceable_vars_upstream.issubset(new_var.nonreplaceable_vars_upstream) + + def can_be_folded_to_const(self) -> bool: + """ + When translating frontend ops to PyMIL ops, some vars could be directly folded into a const. + For example, in PyTorch's `to()` op, the input could be converted by `cast` op, or directly + be folded to const. + + We only fold the var to a const when its value is known AND it doesn't have any + non-replaceable vars in the upstream. + """ + return self.val is not None and not self.nonreplaceable_vars_upstream + + @property + def sym_type(self): + return self._sym_type + + @property + def shape(self): + if types.is_tensor(self._sym_type): + return self._sym_type.get_shape() + return tuple() + + @property + def rank(self): + return len(self.shape) + + @property + def dtype(self): + if types.is_tensor(self._sym_type): + return self._sym_type.get_primitive() + return self._sym_type + + @property + def sym_val(self): + if self._sym_val is None: + return None + return self._sym_val.val + + @property + def val(self): + if self._sym_val is None or any_symbolic(self._sym_val.val): + return None + return self._sym_val.val + + @property + def op(self): + return self._op + + @property + def child_ops(self): + return self._child_ops + + def add_child_op(self, new_op): + self._child_ops.append(new_op) + + def remove_child_op(self, target_op, no_check=False): + if target_op not in self._child_ops: + if no_check: + return # no-op + msg = "Op {} does not takes Var {} as input" + raise ValueError(msg.format(target_op.name, self.name)) + self._child_ops.remove(target_op) + + def shape_str(self): + annotation = "" + if self.val is not None: + annotation = "*" + elif self.sym_val is not None: + annotation = "^" + shape_str = str(self.shape)[:-1] # trim the ")" + if self.rank > 1: + shape_str += ", " + if types.builtin_to_string(self.dtype) is None: + shape_str += ")" + annotation + else: + shape_str += types.builtin_to_string(self.dtype) + ")" + annotation + return shape_str + + def type_str(self): + is_tensor = types.is_tensor(self.sym_type) + is_list = types.is_list(self.sym_type) + if is_tensor: + type_string = "(Tensor)" + elif is_list: + type_string = "(List)" + else: + type_string = "(Scalar)" + return type_string + + def set_name(self, name): + self.name = name + + def is_tensor_or_scalar_of(self, dtype: str): + return (types.is_tensor(self.sym_type) or types.is_scalar(self.sym_type)) and builtin_to_string(self.dtype) == dtype + + def __str__(self): + return "%" + self.name + ": " + self.shape_str() + self.type_str() + + +class ListVar(Var): + __slots__ = ["_elem_type", "init_length", "dynamic_length"] + + def __init__( + self, name, elem_type=None, init_length=None, dynamic_length=True, sym_val=None, **kwargs + ): + """ + elem_type (builtin.tensor) + + init_length (int): initial length + + dynamic_length (bool): True to allow list to grow. False uses + init_length as the fixed size (init_length is runtime length). + + sym_val: value of the list, if available + """ + super().__init__( + name=name, + sym_type=types.list(elem_type, init_length, dynamic_length), + sym_val=sym_val, + **kwargs + ) + self._elem_type = elem_type + self.init_length = init_length + self.dynamic_length = dynamic_length + + @property + def shape(self): + raise ValueError("shape not applicable to ListVar '{}'.".format(self.name)) + + @property + def rank(self): + raise ValueError("rank not applicable to ListVar '{}'".format(self.name)) + + @property + def dtype(self): + raise ValueError("dtype not applicable to ListVar '{}'".format(self.name)) + + @property + def elem_type(self): + return self._elem_type + + @property + def elem_shape(self): + if self._elem_type == types.unknown: + return None + elif types.is_tensor(self._elem_type): + return self._elem_type.get_shape() + return () + + def shape_str(self): + length = "?" + if not self.dynamic_length: + length = str(self.init_length) + if self._elem_type == types.unknown: + return "List[{}, unknown]".format(length) + if self._elem_type == types.str: + return "List[{}, str]".format(length) + elif self._elem_type == types.int64: + return "List[{}, int]".format(length) + else: + elem_shape = self._elem_type.get_shape() + elem_dtype = self._elem_type.get_primitive() + shape_str = str(elem_shape)[:-1] # trim the ")" + if len(elem_shape) > 1: + shape_str += ", " + shape_str += types.builtin_to_string(elem_dtype) + ")" + return "List[{}, {}]".format(length, shape_str) + + +class InternalVar(Var): + """ + Internal Var (with '__' prefix and won't appear in SSA) will ALWAYS have + `sym_val == builtin.unknown`. InternalVar are constructed by builder only. + + Comment: Internal Var can be used to represent diverse types such as enum + type `DataType.FLOAT32`. + """ + + def __init__(self, val, name=None): + super().__init__( + name=name, sym_type=types.unknown, sym_val=types.unknown(val) + ) + + +class ComplexVar(Var): + """Var to handle complex data.""" + + __slots__ = ["_real", "_imag"] + + def __init__( + self, + name, + sym_type, + sym_val=None, + op=None, + op_output_idx=None, + real: Optional[Var] = None, + imag: Optional[Var] = None, + ): + super().__init__( + name=name, + sym_type=sym_type, + sym_val=sym_val, + op=op, + op_output_idx=op_output_idx, + ) + + # Handle complex data types. + self._real: Optional[Var] = real + self._imag: Optional[Var] = imag + + @property + def real(self): + return self._real + + @property + def imag(self): + return self._imag + + @real.setter + def real(self, real): + if not types.is_complex(self.dtype): + raise ValueError( + f"Only complex number can set `real`. This var is {self.dtype}." + ) + self._real = real + + @imag.setter + def imag(self, imag): + if not types.is_complex(self.dtype): + raise ValueError( + f"Only complex number can set `imag`. This var is {self.dtype}." + ) + self._imag = imag diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/dot_visitor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/dot_visitor.py new file mode 100644 index 00000000..4471f61b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/mil/visitors/dot_visitor.py @@ -0,0 +1,206 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..var import Var + + +def _get_input_vars(op, only_nonconst_vars=False): + """ + Return type : List[Var] + """ + input_vars = [] + for name, val in op.inputs.items(): + if isinstance(val, Var): + if only_nonconst_vars: + if val.op and val.op.op_type == "const": + continue + input_vars.append(val) + elif isinstance(val, (list, tuple)): + for var in val: + if not isinstance(var, Var): + msg = "unrecognized input type of op='{}', input='{}'" + raise ValueError(msg.format(op.name, name)) + if only_nonconst_vars: + if var.op and var.op.op_type == "const": + continue + input_vars.append(var) + else: + msg = "unrecognized input type of op='{}', input='{}'" + raise ValueError(msg.format(op.name, name)) + return input_vars + + +class DotVisitor: + """ + Generates a dot description of a ssa block + """ + + def __init__(self, annotation=True): + self.result = [] + self.visited_memo = {} + self.highlights = {} + self.alternate_labeller = lambda o: o.op_type + ": " + o.name + self.annotation = annotation + + def labeller(self, labeller): + self.alternate_labeller = labeller + return self + + def highlight_nodes(self, nodeset, color="yellow"): + for i in nodeset: + self.highlights[i] = color + return self + + def visit(self, block, op, nodename_prefix=""): + """ + Append edges connecting parents of op to the op + """ + + if op in self.visited_memo: + return self + + label = self.alternate_labeller(op) + self.visited_memo[op] = 1 + + if op.name in self.highlights and op.name not in [ + o.name for o in block.outputs + ]: + self.result.append( + '"' + + nodename_prefix + + "op: " + + op.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % (self.highlights[op.name], "violetred") + ) + else: + self.result.append( + '"' + + nodename_prefix + + "op: " + + op.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' % ("violetred") + ) + + for input_var in _get_input_vars(op, only_nonconst_vars=True): + if input_var.op is not None: + input_name = "op: " + input_var.op.name + else: + input_name = input_var.name + + edge = ( + '"' + + nodename_prefix + + input_name + + '"' + + " -> " + + '"' + + nodename_prefix + + "op: " + + op.name + + '"' + ) + self.result.append(edge) + if input_var.op is not None: + self.visit(block, input_var.op, nodename_prefix) + else: + self.visit_input_var(input_var, nodename_prefix) + + return self + + def visit_input_var(self, var, nodename_prefix=""): + label = "input: " + var.name + + if var.name in self.highlights: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % (self.highlights[var.name], "violetred") + ) + else: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' % ("violetred") + ) + + def visit_output_vars(self, block, var, nodename_prefix=""): + + label = "output: " + var.name + if var.name in self.highlights: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fillcolor=%s,style=filled,fontcolor=%s]' + % (self.highlights[var.name], "violetred") + ) + else: + self.result.append( + '"' + + nodename_prefix + + var.name + + '"' + + '[label="' + + label + + '",fontcolor=%s]' % ("violetred") + ) + + parent_op = var.op + edge = ( + '"' + + nodename_prefix + + "op: " + + parent_op.name + + '"' + + " -> " + + '"' + + nodename_prefix + + var.name + + '"' + ) + self.result.append(edge) + self.visit(block, parent_op, nodename_prefix=nodename_prefix) + + def visit_all(self, block, nodename_prefix=""): + for out_var in block.outputs: + self.visit_output_vars(block, out_var, nodename_prefix=nodename_prefix) + for op in block.operations: + if op.op_type != "const": + self.visit(block, op, nodename_prefix=nodename_prefix) + return self + + def get_result(self, graphtype="digraph", graph_name="g"): + return ( + graphtype + + " " + + graph_name + + " {\n\t" + + "\n\t".join(str(i) for i in self.result) + + ';\n\tlabel="' + + graph_name[8:] + + '";\n\tfontsize=96;\n}' + ) + + def __str__(self): + return self.get_result() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/test_flexible_shape_inputs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/test_flexible_shape_inputs.py new file mode 100644 index 00000000..9920b758 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/test_flexible_shape_inputs.py @@ -0,0 +1,146 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np +import PIL.Image +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH, MSG_TORCH_NOT_FOUND + +if _HAS_TORCH: + import torch + torch.manual_seed(10) + + class TestConvModule(torch.nn.Module): + def __init__(self, in_channels=3, out_channels=10, kernel_size=3): + super(TestConvModule, self).__init__() + self.conv = torch.nn.Conv2d(in_channels, out_channels, + kernel_size) + + def forward(self, x): + return self.conv(x) + + +def _numpy_array_to_pil_image(x): + """ + convert x of shape (1, 3, H, W) to PIL image + """ + assert len(x.shape) == 4 + assert list(x.shape[:2]) == [1, 3] + x = x[0, :, :, :] # (3, H, W) + x = _np.transpose(x, [1, 2, 0]) # (H, W, 3) + x = x.astype(_np.uint8) + return PIL.Image.fromarray(x) + + +def _compute_snr(arr1, arr2): + arr1 = arr1.flatten() + arr2 = arr2.flatten() + noise = arr1 - arr2 + noise_var = _np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = _np.sum(arr2 ** 2) / len(arr2) + max_signal_energy = _np.amax(arr2 ** 2) + snr = 10 * _np.log10(signal_energy / noise_var) + psnr = 10 * _np.log10(max_signal_energy / noise_var) + return snr, psnr + +def _assert_torch_coreml_output_shapes(coreml_model, spec, torch_model, torch_example_input, is_image_input=False): + torch_out = torch_model(torch_example_input) + input_name = spec.description.input[0].name + output_name = spec.description.output[0].name + input_dict = {} + if is_image_input: + input_dict[input_name] = _numpy_array_to_pil_image(torch_example_input.numpy()) + else: + input_dict[input_name] = torch_example_input.numpy() + coreml_out = coreml_model.predict(input_dict)[output_name] + assert torch_out.shape == coreml_out.shape + snr, psnr = _compute_snr(torch_out.cpu().detach().numpy(), coreml_out) + _np.testing.assert_array_less(20, snr) + _np.testing.assert_array_less(30, psnr) + + +@pytest.mark.skipif(not _HAS_TORCH or not ct.utils._is_macos(), reason=MSG_TORCH_NOT_FOUND) +class TestFlexibleInputShapes: + + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_multiarray_input_rangedim(self, convert_to): + if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0): + return + + example_input = torch.rand(1, 3, 50, 50) * 100 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45), ct.RangeDim(25, 100, default=45))) + model = ct.convert(traced_model, + inputs=[ct.TensorType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert list(spec.description.input[0].type.multiArrayType.shape) == [1, 3, 45, 45] + assert spec.description.input[0].type.multiArrayType.shapeRange.sizeRanges[2].lowerBound == 25 + assert spec.description.input[0].type.multiArrayType.shapeRange.sizeRanges[2].upperBound == 100 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input) + + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_multiarray_input_enumerated(self, convert_to): + if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0): + return + + example_input = torch.rand(1, 3, 50, 50) * 100 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.EnumeratedShapes(shapes=[[1, 3, 25, 25], [1, 3, 50, 50], [1, 3, 67, 67]], + default=[1, 3, 67, 67]) + model = ct.convert(traced_model, + inputs=[ct.TensorType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert list(spec.description.input[0].type.multiArrayType.shape) == [1, 3, 67, 67] + assert list(spec.description.input[0].type.multiArrayType.enumeratedShapes.shapes[0].shape) == [1, 3, 67, 67] + assert len(spec.description.input[0].type.multiArrayType.enumeratedShapes.shapes) == 3 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input) + + @pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason="Image input with RangeDim works correctly on macOS12+") + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_image_input_rangedim(self, convert_to): + example_input = torch.rand(1, 3, 50, 50) * 255 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45), ct.RangeDim(25, 100, default=45))) + model = ct.convert(traced_model, + inputs=[ct.ImageType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert spec.description.input[0].type.imageType.width == 45 + assert spec.description.input[0].type.imageType.height == 45 + assert spec.description.input[0].type.imageType.imageSizeRange.widthRange.lowerBound == 25 + assert spec.description.input[0].type.imageType.imageSizeRange.widthRange.upperBound == 100 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input, is_image_input=True) + + @pytest.mark.parametrize("convert_to", ['neuralnetwork', 'mlprogram']) + def test_image_input_enumerated(self, convert_to): + if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0): + return + + example_input = torch.rand(1, 3, 50, 50) * 255 + traced_model = torch.jit.trace(TestConvModule().eval(), example_input) + + input_shape = ct.EnumeratedShapes(shapes=[[1, 3, 25, 25], [1, 3, 50, 50], [1, 3, 67, 67]], + default=[1, 3, 67, 67]) + model = ct.convert(traced_model, + inputs=[ct.ImageType(shape=input_shape)], + convert_to=convert_to) + + spec = model.get_spec() + assert spec.description.input[0].type.imageType.width == 67 + assert spec.description.input[0].type.imageType.height == 67 + assert len(spec.description.input[0].type.imageType.enumeratedSizes.sizes) == 3 + assert spec.description.input[0].type.imageType.enumeratedSizes.sizes[0].width == 25 + assert spec.description.input[0].type.imageType.enumeratedSizes.sizes[0].height == 25 + _assert_torch_coreml_output_shapes(model, spec, traced_model, example_input, is_image_input=True) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_reqs.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_reqs.py new file mode 100644 index 00000000..00c07487 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_reqs.py @@ -0,0 +1,54 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import os + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import (_HAS_TF_1, _HAS_TF_2, _HAS_TORCH) + + +# Setting up backend / precision +backends = [] +if 'PYMIL_TEST_TARGETS' in os.environ: + targets = os.environ['PYMIL_TEST_TARGETS'].split(',') + for i in range(len(targets)): + targets[i] = targets[i].strip() + + if 'mlprogram' in targets: + backends.append(('mlprogram', 'fp16')) + if os.getenv('INCLUDE_MIL_FP32_UNIT_TESTS') == '1': + backends.append(('mlprogram', 'fp32')) + if 'neuralnetwork' in targets: + backends.append(('neuralnetwork', 'fp32')) + + if not backends: + raise ValueError("PYMIL_TEST_TARGETS can be set to one or more of: neuralnetwork, mlprogram") +else: + backends = [('mlprogram', "fp16"), ('neuralnetwork', "fp32")] + if os.getenv('INCLUDE_MIL_FP32_UNIT_TESTS') == '1': + backends.append(('mlprogram', 'fp32')) + +# Setting up compute unit +compute_units = [] +if 'COMPUTE_UNITS' in os.environ: + for i, cur_str_val in enumerate(os.environ['COMPUTE_UNITS'].split(',')): + cur_str_val = cur_str_val.strip().upper() + if cur_str_val not in ct.ComputeUnit.__members__: + raise ValueError("Compute unit \"{}\" not supported in coremltools.".format(cur_str_val)) + compute_units.append(ct.ComputeUnit[cur_str_val]) +else: + compute_units = [ct.ComputeUnit.CPU_ONLY] + +np.random.seed(1984) + +if _HAS_TF_1: + tf = pytest.importorskip("tensorflow") + tf.compat.v1.set_random_seed(1234) + +if _HAS_TF_2: + tf = pytest.importorskip("tensorflow") + tf.random.set_seed(1234) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_utils.py new file mode 100644 index 00000000..781d2fb9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/mil/testing_utils.py @@ -0,0 +1,545 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import os +import re +from functools import partial +from pathlib import Path + +import numpy as np +from PIL import Image + +import coremltools as ct +import coremltools.models.utils as coremltoolsutils +from coremltools._deps import _IS_MACOS +from coremltools.converters.mil.mil import Function, Program +from coremltools.converters.mil.mil.passes.defs.quantization import AbstractQuantizationPass +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.proto import FeatureTypes_pb2 as ft + +np.random.seed(10) + +DTYPE_TO_FEATURE_TYPE_MAP = {"int32": ft.ArrayFeatureType.INT32, + "fp32": ft.ArrayFeatureType.FLOAT32, + "fp16": ft.ArrayFeatureType.FLOAT16, + } + +einsum_equations = [ + # hardcoded cases + "abcd,adce->abce", + "abc,cbd->abd", + "bnqd,bnkd->bnqk", + "abc,cd->abd", + "abc,cde->abde", + "btnh,bfnh->bnft", + "bnft,btnh->bfnh", + "abcd,cde->abe", + "a b c d , a d c e -> a b c e", + # with-diagonal generic cases + "jiii,ijjk->jk", + "iji,ji->j", + "jii,ijk->jk", + "ijij,iij->ij", + # no-diagonal generic cases + "i,j->ij", # outer product + "a,a->a", # batched outer product + "ija,la->ijal", # batched outer product + "i,i->", # inner product + "ia,ia->a", # batched inner product + "ai,ia->a", # batched inner product + "abi,abi->ab", # batched inner product + "iab,iab->ab", # batched inner product + "abi,bai->ba", # batched inner product + "ij,j->i", # matrix-vector multiplication + "i,ij->j", # vector-matrix multiplication + "ai,ija->aj", # batched vector-matrix multiplication + "aibj,bi->jba", # batched matrix-vector multiplication + "ij,jk->ik", # matrix multiplication + "aij,ajk->iak", # batched matrix multiplication + "abij,abjk->abik", # batched matrix multiplication + "aijb,bajk->abik", # batched matrix multiplication + "ij,ij->", # double-inner product + "ij,ji->", # double-inner product + "aij,aij->a", # batched double-inner product + "ija,ija->a", # batched double-inner product + "ija,jia->a", # batched double-inner product + "aijb,ajbi->ab", # batched double-inner product + "aibj,cdij->cadb", # batched double-inner product + "ijk,lmj->iklm", # 3rd-order tensor contraction + "ijak,akl->aijl", # batched 3rd-order tensor and matrix contraction + # Generic with sum + "ij,j->ij", + "ij,kjl->j", + "iijj,j->j", +] + +def _serialize_current_pytest(mlmodel): + class_name = os.environ.get('PYTEST_CURRENT_TEST').split("::")[1].strip() + test_name = "::".join(os.environ.get('PYTEST_CURRENT_TEST').split("::")[2:]).split("(call)")[0].strip() + mlpackage_path = "/tmp/pytest_failures/{}/{}/model.mlpackage".format(class_name, test_name) + Path(mlpackage_path).mkdir(parents=True, exist_ok=True) + mlmodel.save(mlpackage_path) + +def assert_op_count_match(program, expect, op=None, verbose=False): + """ + Assert number of ops match expected number. If op is not specified, + Count total number of ops and match with expect. + """ + if verbose: + print(program) + + count = 0 + for _, func in program.functions.items(): + for o in func.operations: + if not op: + count += 1 + elif o.op_type.lower() == op.lower(): + count += 1 + np.testing.assert_equal(count, expect) + + +def assert_model_is_valid( + program, inputs, backend=("neuralnetwork", "fp32"), verbose=True, expected_output_shapes=None +): + """ + Assert Core ML model is valid. + + Inputs: + + - input: str -> shape tuple. All program input names need to appear in str. + shape tuple can only contain positive integers. + """ + # Avoid circular import + from coremltools.converters.mil.testing_reqs import ct + + input_dict = dict() + for name, shape in inputs.items(): + input_dict[name] = np.random.rand(*shape) + + mlmodel = ct_convert(program, source="milinternal", convert_to=backend, + compute_units=ct.ComputeUnit.CPU_ONLY) + assert mlmodel is not None + + if verbose: + from coremltools.models.neural_network.printer import print_network_spec + print_network_spec(mlmodel.get_spec(), style="coding") + + if _IS_MACOS and (not mlmodel.is_package or coremltoolsutils._macos_version() >= (12, 0)): + prediction = mlmodel.predict(input_dict) + assert prediction is not None + if expected_output_shapes is not None: + for out_name, out_shape in expected_output_shapes.items(): + assert out_name in prediction + assert out_shape == prediction[out_name].shape, \ + "{} != {}".format(out_shape, prediction[out_name].shape) + + +def assert_same_output_names(prog1, prog2, func_name="main"): + prog1_outputs = [o.name for o in prog1[func_name].outputs] + prog2_outputs = [o.name for o in prog2[func_name].outputs] + assert prog1_outputs == prog2_outputs + + +def assert_same_output_shapes(prog1, prog2, func_name="main"): + prog1_output_shapes = [o.shape for o in prog1[func_name].outputs] + prog2_output_shapes = [o.shape for o in prog2[func_name].outputs] + assert prog1_output_shapes == prog2_output_shapes + +def get_op_names_in_program(prog, func_name="main", skip_const_ops=True): + """ + Return the operations names in prog[func_name], + in the same order as they are stored (topological) + """ + op_names_in_program = [] + for op in prog[func_name].operations: + if skip_const_ops: + if op.op_type == "const": + continue + op_names_in_program.append(op.name) + return op_names_in_program + +def get_op_types_in_program(prog, func_name="main", skip_const_ops=True): + """ + Return the operation types in prog[func_name], + in the same order as they are stored (topological) + """ + op_types_in_program = [] + for op in prog[func_name].operations: + if skip_const_ops: + if op.op_type == "const": + continue + op_types_in_program.append(op.op_type) + return op_types_in_program + + +def random_gen( + shape, + rand_min=0.0, + rand_max=1.0, + eps_from_int=0.0, + allow_duplicate=True, + dtype=np.float32, +): + """ + This helper function generates a random array of shape `shape` + The range of generated numbers will be between (rand_min, rand_max]. + The value of generated numbers will be at least `eps_from_int` apart from integers. + If allow_duplicate is set to false, it is guaranteed that value generated are all different. + Default data type is np.float32. + """ + elem = np.prod(shape).astype(np.int32) + ret = [] + for _ in range(elem): + while True: + r = dtype((rand_max - rand_min) * np.random.random() + rand_min) + if not allow_duplicate and r in ret: + continue + if np.issubdtype(dtype, np.integer) or np.fabs(np.round(r) - r) > eps_from_int: + ret.append(r) + break + ret = np.array(ret).reshape(shape) + return ret.astype(dtype) + + +def ssa_fn(func): + """ + Deprecated: use @mb.program() + """ + + def wrapper(*args, **kwargs): + prog = Program() + with Function({}) as ssa_func: + func(*args, **kwargs) + + return wrapper + + +def to_tuple(v): + if not isinstance(v, (list, tuple)): + return tuple([v]) + return tuple(v) + + +def run_core_ml_predict(mlmodel, input_key_values): + for k, v in input_key_values.items(): + if isinstance(v, Image.Image): + continue + elif not np.isscalar(v) and not v.shape == (): + input_key_values[k] = v.astype(np.float32) + else: + input_key_values[k] = np.array([v], dtype=np.float32) + return mlmodel.predict(input_key_values) + +def _get_coreml_out_from_dict(out_dict, out_name): + if out_name in out_dict: + return out_dict[out_name] + elif re.sub("[^a-zA-Z0-9_]", "_", out_name) in out_dict: + return out_dict[re.sub("[^a-zA-Z0-9_]", "_", out_name)] + else: + raise KeyError("{} output not found in Core ML outputs".format(out_name)) + +def compare_backend( + mlmodel, + input_key_values, + expected_outputs, + dtype = "fp32", + atol=1e-04, + rtol=1e-05, + also_compare_shapes=True, +): + """ + Inputs: + - mlmodel: MLModel. + + - input_key_values: str -> np.array. Keys must match those in + input_placeholders. + + - expected_outputs: dict[str, np.array]. Required iff + frontend_only is False + """ + if _IS_MACOS and (not mlmodel.is_package or coremltoolsutils._macos_version() >= (12, 0)): + + if dtype not in ["fp32", "fp16"]: + raise ValueError("Unsupported dtype config") + + pred = run_core_ml_predict(mlmodel, input_key_values) + if also_compare_shapes: + compare_shapes( + mlmodel, + input_key_values, + expected_outputs, + pred=pred, + ) + if mlmodel.compute_unit != ct.ComputeUnit.CPU_ONLY or (dtype == "fp16"): + atol = max(atol * 100.0, 5e-1) + rtol = max(rtol * 100.0, 5e-2) + for o, expected in expected_outputs.items(): + coreml_out = _get_coreml_out_from_dict(pred, o) + + if isinstance(coreml_out, np.ndarray): + np.testing.assert_allclose(coreml_out, expected, atol=atol, rtol=rtol) + elif isinstance(coreml_out, dict): + for k, v in coreml_out.items(): + assert k in expected + assert expected[k] == v + else: + assert coreml_out == expected + + return pred + return None + + +def compare_shapes( + mlmodel, input_key_values, expected_outputs, pred=None +): + """ + Inputs: + - mlmodel: MLModel. + + - input_key_values: str -> np.array or PIL.Image. Keys must match those in + input_placeholders. + + - expected_outputs: dict[str, np.array]. + + - pred: Prediction to use, if it has already been computed. + """ + + if _IS_MACOS: + if not pred: + pred = run_core_ml_predict(mlmodel, input_key_values) + for o, expected in expected_outputs.items(): + coreml_out = _get_coreml_out_from_dict(pred, o) + + # output is dictionary (for classifier) + if isinstance(coreml_out, dict) and isinstance(expected, dict): + assert len(coreml_out) == len(expected) + continue + + # output is numpy objects + np_types = (np.generic, np.ndarray) + if isinstance(coreml_out, np_types) and isinstance(expected, np_types): + msg = "Output: {}. expected shape {} != actual shape {}".format( + o, expected.shape, coreml_out.shape + ) + # Core ML does not support scalar as output + # remove this special case when support is added + if expected.shape == () and coreml_out.shape == (1,): + continue + assert coreml_out.shape == expected.shape, msg + continue + + # output is other types (for classifier) + assert type(coreml_out) == type(expected) + +def ct_convert( + program, + source="auto", + inputs=None, + outputs=None, + classifier_config=None, + minimum_deployment_target=None, + convert_to=None, + compute_precision=None, + skip_model_load=False, + converter=ct.convert, + **kwargs, +): + + """ + Overloaded ct.convert function with the only difference being in the argument `convert_to` + which in this overloaded call accepts a tuple of (target, dtype). + Ex: ("neuralnetwork", "fp32"), ("mlprogram", "fp16") + """ + + if isinstance(converter, partial): + raise ValueError("Partial function is not supported for function-parameter 'converter' since its keywords arguments could get overriden.") + + target, dtype = convert_to + + if dtype not in ["fp32", "fp16"]: + raise ValueError("Unsupported dtype config") + + compute_precision = ct.precision.FLOAT16 if dtype == "fp16" else ct.precision.FLOAT32 + if target == "neuralnetwork": + compute_precision = None + + mlmodel = converter( + program, + source=source, + inputs=inputs, + outputs=outputs, + classifier_config=classifier_config, + minimum_deployment_target=minimum_deployment_target, + convert_to=target, + compute_precision=compute_precision, + skip_model_load=skip_model_load, + **kwargs + ) + + if os.environ.get("DEBUG_SAVE_MLMODEL", "0") == "1": + from coremltools.converters.mil.testing_utils import _serialize_current_pytest + _serialize_current_pytest(mlmodel) + + return mlmodel + +def get_core_ml_prediction( + build, input_placeholders, input_values, compute_unit=ct.ComputeUnit.CPU_ONLY, + backend=("neuralnetwork", "fp32")): + """ + Return predictions of the given model. + """ + program = Program() + with Function(input_placeholders) as ssa_func: + output_vars = build(**ssa_func.inputs) + if isinstance(output_vars, tuple): + output_vars = list(output_vars) + elif not isinstance(output_vars, list): + output_vars = [output_vars] + ssa_func.set_outputs(output_vars) + program.add_function("main", ssa_func) + + mlmodel = ct_convert( + program, + source="milinternal", + convert_to=backend, + compute_units=compute_unit + ) + return mlmodel.predict(input_values) + + +def apply_pass_and_basic_check(prog, pass_name, skip_output_name_check=False): + """ + Apply pass to the program + """ + prev_prog = copy.deepcopy(prog) + graph_pass = pass_name if isinstance(pass_name, AbstractQuantizationPass) else PASS_REGISTRY[pass_name] + graph_pass(prog) + block = prog.functions["main"] + prev_block = prev_prog.functions["main"] + if not skip_output_name_check: + assert_same_output_names(prev_prog, prog) + assert_same_output_shapes(prev_prog, prog) + return prev_prog, prev_block, block + + +def assert_prog_input_type(prog, expected_dtype_str, expected_name=None, index=0): + block = prog.functions["main"] + if expected_name is None: + input_var = list(block.inputs.values())[index] + assert input_var.is_tensor_or_scalar_of(dtype=expected_dtype_str) + else: + for input_var in block.inputs.values(): + if input_var.name == expected_name: + assert input_var.is_tensor_or_scalar_of(dtype=expected_dtype_str) + +def assert_spec_input_type(spec, expected_feature_type, expected_name=None, index=0): + if expected_name is None: + assert spec.description.input[index].type.multiArrayType.dataType == expected_feature_type + else: + for input in spec.description.input: + if input.name == expected_name: + assert input.type.multiArrayType.dataType == expected_feature_type + +def assert_input_dtype(mlmodel, expected_type_str, expected_name=None, index=0): + assert_prog_input_type(mlmodel._mil_program, expected_type_str, + expected_name=expected_name, index=index) + assert_spec_input_type(mlmodel._spec, DTYPE_TO_FEATURE_TYPE_MAP[expected_type_str], + expected_name=expected_name, index=index) + +def assert_spec_output_type(spec, expected_feature_type, expected_name=None, index=0): + assert spec.description.output[index].type.multiArrayType.dataType == expected_feature_type + if expected_name is not None: + assert spec.description.output[index].name == expected_name + +def assert_prog_output_type(prog, expected_dtype_str, expected_name=None, index=0): + block = prog.functions["main"] + output_var = block.outputs[index] + assert output_var.is_tensor_or_scalar_of(dtype=expected_dtype_str) + if expected_name is not None: + assert output_var.name == expected_name + +def assert_output_dtype(mlmodel, expected_type_str, expected_name=None, index=0): + assert_prog_output_type(mlmodel._mil_program, expected_type_str, + expected_name=expected_name, index=index) + assert_spec_output_type(mlmodel._spec, DTYPE_TO_FEATURE_TYPE_MAP[expected_type_str], + expected_name=expected_name, index=index) + +def random_gen_input_feature_type(input_desc): + if input_desc.type.WhichOneof("Type") == "multiArrayType": + shape = [s for s in input_desc.type.multiArrayType.shape] + if input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.FLOAT32: + dtype = np.float32 + elif input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.INT32: + dtype = np.int32 + elif input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.FLOAT16: + dtype = np.float16 + elif input_desc.type.multiArrayType.dataType == ft.ArrayFeatureType.FLOAT64: + dtype = np.float64 + else: + raise ValueError("unsupported type") + return np.random.rand(*shape).astype(dtype) + elif input_desc.type.WhichOneof("Type") == "imageType": + if input_desc.type.imageType.colorSpace in (ft.ImageFeatureType.BGR, ft.ImageFeatureType.RGB): + shape = [3, input_desc.type.imageType.height, input_desc.type.imageType.width] + x = np.random.randint(low=0, high=256, size=shape) + return Image.fromarray(np.transpose(x, [1, 2, 0]).astype(np.uint8)) + elif input_desc.type.imageType.colorSpace == ft.ImageFeatureType.GRAYSCALE: + shape = [input_desc.type.imageType.height, input_desc.type.imageType.width] + x = np.random.randint(low=0, high=256, size=shape) + return Image.fromarray(x.astype(np.uint8), 'L') + elif input_desc.type.imageType.colorSpace == ft.ImageFeatureType.GRAYSCALE_FLOAT16: + shape = (input_desc.type.imageType.height, input_desc.type.imageType.width) + x = np.random.rand(*shape) + return Image.fromarray(x.astype(np.float32), 'F') + else: + raise ValueError("unrecognized image type") + else: + raise ValueError('unsupported type') + +def gen_input_shapes_einsum(equation, dynamic): + equation = equation.replace(" ", "") + left = equation.split("->")[0] + a_desc, b_desc = left.split(",") + converter_shapes = {} + shapes = {} + cur_default_shape = 2 + for symbol in a_desc + b_desc: + if symbol not in shapes: + shapes[symbol] = cur_default_shape + if dynamic: + converter_shapes[symbol] = ct.RangeDim(default=cur_default_shape) + else: + converter_shapes[symbol] = cur_default_shape + cur_default_shape += 1 + a_shape = [shapes[symbol] for symbol in a_desc] + b_shape = [shapes[symbol] for symbol in b_desc] + a_converter_shape = [converter_shapes[symbol] for symbol in a_desc] + b_converter_shape = [converter_shapes[symbol] for symbol in b_desc] + return ([a_shape, b_shape], + [ct.TensorType(shape=a_converter_shape, dtype=np.float32), + ct.TensorType(shape=b_converter_shape, dtype=np.float32)]) + +def verify_prediction(mlmodel, multiarray_type=None): + spec = mlmodel._spec + input_dict = {} + for input_desc in spec.description.input: + input_dict[input_desc.name] = random_gen_input_feature_type(input_desc) + if multiarray_type is not None: + input_dict[input_desc.name] = input_dict[input].astype(multiarray_type) + mlmodel.predict(input_dict) + +def assert_spec_input_image_type(spec, expected_feature_type): + assert spec.description.input[0].type.imageType.colorSpace == expected_feature_type + +def assert_spec_output_image_type(spec, expected_feature_type): + assert spec.description.output[0].type.imageType.colorSpace == expected_feature_type + +def assert_cast_ops_count(mlmodel, expected_count): + block = mlmodel._mil_program.functions["main"] + assert len(block.find_ops(op_type="cast")) == expected_count + +def assert_ops_in_mil_program(mlmodel, expected_op_list): + assert expected_op_list == get_op_types_in_program(mlmodel._mil_program) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVC.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVC.py new file mode 100644 index 00000000..433c9b78 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVC.py @@ -0,0 +1,58 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + from sklearn.svm import LinearSVC as _LinearSVC + + sklearn_class = _LinearSVC + from . import _sklearn_util + +from . import _logistic_regression + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a LinearSVC model to the protobuf spec. + Parameters + ---------- + model: LinearSVC + A trained LinearSVC model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _LinearSVC) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_logistic_regression._convert(model, feature_names, target)) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return _logistic_regression.get_output_classes(model) + + +def get_input_dimension(model): + return _logistic_regression.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVR.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVR.py new file mode 100644 index 00000000..d64719a5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_LinearSVR.py @@ -0,0 +1,53 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + import sklearn + from sklearn.svm import LinearSVR as _LinearSVR + + from . import _sklearn_util + + sklearn_class = sklearn.svm.LinearSVR + +from . import _linear_regression + +model_type = "regressor" + + +def convert(model, features, target): + """Convert a LinearSVR model to the protobuf spec. + Parameters + ---------- + model: LinearSVR + A trained LinearSVR model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Check the scikit learn model + _sklearn_util.check_expected_type(model, _LinearSVR) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_linear_regression._convert(model, features, target)) + + +def get_input_dimension(model): + return _linear_regression.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVC.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVC.py new file mode 100644 index 00000000..53414882 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVC.py @@ -0,0 +1,68 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from . import _SVC as _SVC + +if _HAS_SKLEARN: + from sklearn.svm import NuSVC as _NuSVC + + from . import _sklearn_util + from ._sklearn_util import check_fitted + + sklearn_class = _NuSVC + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a Nu-Support Vector Classification (NuSVC) model to the protobuf spec. + Parameters + ---------- + model: NuSVC + A trained NuSVC encoder model. + + feature_names: [str], optional (default=None) + Name of the input columns. + + target: str, optional (default=None) + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _NuSVC) + return _SVC.convert(model, feature_names, target) + + +def supports_output_scores(model): + return _SVC.supports_output_scores(model) + + +def get_output_classes(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return _SVC.get_output_classes(model) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return _SVC.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVR.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVR.py new file mode 100644 index 00000000..65e3d868 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_NuSVR.py @@ -0,0 +1,54 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from . import _SVR as _SVR + +if _HAS_SKLEARN: + from sklearn.svm import NuSVR as _NuSVR + + from . import _sklearn_util + from ._sklearn_util import check_fitted + + sklearn_class = _NuSVR + +model_type = "regressor" + + +def convert(model, feature_names, target): + """Convert a Nu Support Vector Regression (NuSVR) model to the protobuf spec. + Parameters + ---------- + model: NuSVR + A trained NuSVR encoder model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _NuSVR) + return _SVR.convert(model, feature_names, target) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return _SVR.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVC.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVC.py new file mode 100644 index 00000000..fa8a3fad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVC.py @@ -0,0 +1,132 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import set_classifier_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.svm import SVC as _SVC + + from ._sklearn_util import check_fitted + + sklearn_class = _SVC + +model_type = "classifier" + +from ._svm_common import _set_kernel + + +def _generate_base_svm_classifier_spec(model): + """ + Takes an SVM classifier produces a starting spec using the parts. that are + shared between all SVMs. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + svm = spec.supportVectorClassifier + + _set_kernel(model, svm) + + for cur_rho in model.intercept_: + if len(model.classes_) == 2: + # For some reason Scikit Learn doesn't negate for binary classification + svm.rho.append(cur_rho) + else: + svm.rho.append(-cur_rho) + + for i in range(len(model._dual_coef_)): + svm.coefficients.add() + for cur_alpha in model._dual_coef_[i]: + svm.coefficients[i].alpha.append(cur_alpha) + + for cur_src_vector in model.support_vectors_: + cur_dest_vector = svm.denseSupportVectors.vectors.add() + for i in cur_src_vector: + cur_dest_vector.values.append(i) + return spec + + +def convert(model, feature_names, target): + """Convert a Support Vector Classtion (SVC) model to the protobuf spec. + Parameters + ---------- + model: SVC + A trained SVC encoder model. + + feature_names: [str], optional (default=None) + Name of the input columns. + + target: str, optional (default=None) + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + spec = _generate_base_svm_classifier_spec(model) + spec = set_classifier_interface_params( + spec, + feature_names, + model.classes_, + "supportVectorClassifier", + output_features=target, + ) + + svm = spec.supportVectorClassifier + for i in model.n_support_: + svm.numberOfSupportVectorsPerClass.append(int(i)) + + if len(model.probA_) != 0 and len(model.classes_) == 2: + print( + "[WARNING] Scikit Learn uses a technique to normalize pairwise probabilities even for binary classification. " + "This can cause differences in predicted probabilities, usually less than 0.5%." + ) + + # If this is an empty list, then model.probA_ will be an empty list. + if len(model.probA_) != 0: + for i in model.probA_: + svm.probA.append(i) + + for i in model.probB_: + svm.probB.append(i) + + return _MLModel(spec) + + +def supports_output_scores(model): + return len(model.probA_) != 0 + + +def get_output_classes(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return list(model.classes_) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return len(model.support_vectors_[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVR.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVR.py new file mode 100644 index 00000000..6e66a337 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_SVR.py @@ -0,0 +1,81 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import set_regressor_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.svm import SVR as _SVR + + from ._sklearn_util import check_fitted + + sklearn_class = _SVR + +model_type = "regressor" + +from ._svm_common import _set_kernel + + +def _generate_base_svm_regression_spec(model): + """ + Takes an SVM regression model produces a starting spec using the parts. + that are shared between all SVMs. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + svm = spec.supportVectorRegressor + + _set_kernel(model, svm) + + svm.rho = -model.intercept_[0] + for i in range(len(model._dual_coef_)): + for cur_alpha in model._dual_coef_[i]: + svm.coefficients.alpha.append(cur_alpha) + + for cur_src_vector in model.support_vectors_: + cur_dest_vector = svm.denseSupportVectors.vectors.add() + for i in cur_src_vector: + cur_dest_vector.values.append(i) + return spec + + +def convert(model, features, target): + """Convert a Support Vector Regressor (SVR) model to the protobuf spec. + Parameters + ---------- + model: SVR + A trained SVR encoder model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + spec = _generate_base_svm_regression_spec(model) + spec = set_regressor_interface_params(spec, features, target) + return _MLModel(spec) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + check_fitted(model, lambda m: hasattr(m, "support_vectors_")) + return len(model.support_vectors_[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/__init__.py new file mode 100644 index 00000000..77268c40 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +# A single function to manage the importing. + +from ._converter import convert diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter.py new file mode 100644 index 00000000..6dd251b2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter.py @@ -0,0 +1,161 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import __version__ as ct_version +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION + +""" +Defines the primary function for converting scikit-learn models. +""" + + +def convert(sk_obj, input_features=None, output_feature_names=None): + """ + Convert scikit-learn pipeline, classifier, or regressor to Core ML format. + + Parameters + ---------- + sk_obj: model | [model] of scikit-learn format. + Scikit learn model(s) to convert to a Core ML format. + + The input model may be a single scikit learn model, a scikit learn + pipeline model, or a list of scikit learn models. + + Currently supported scikit learn models are: + + - Linear and Logistic Regression + - LinearSVC and LinearSVR + - Ridge Regression + - SVC and SVR + - NuSVC and NuSVR + - Gradient Boosting Classifier and Regressor + - Decision Tree Classifier and Regressor + - Random Forest Classifier and Regressor + - Normalizer + - Imputer + - Standard Scaler + - DictVectorizer + - One Hot Encoder + - KNeighborsClassifier + + The input model, or the last model in a pipeline or list of models, + determines whether this is exposed as a Transformer, Regressor, + or Classifier. + + Note that there may not be a one-to-one correspondence between scikit + learn models and the Core ML models chosen to represent them. For + example, many scikit learn models are embedded in a pipeline to handle + processing of input features. + + + input_features: str | dict | list + + Optional name(s) that can be given to the inputs of the scikit-learn + model. Defaults to ``"input"``. + + Input features can be specified in a number of forms. + + - Single string: In this case, the input is assumed to be a single + array, with the number of dimensions set using ``num_dimensions``. + + - List of strings: In this case, the overall input dimensions to the + scikit-learn model are assumed to be the length of the list. If + neighboring names are identical, they are assumed to be an input + array of that length. For example: + + ``["a", "b", "c"]`` + + resolves to: + + ``[("a", Double), ("b", Double), ("c", Double)]``. + + In addition: + + ``["a", "a", "b"]`` + + resolves to: + + ``[("a", Array(2)), ("b", Double)]``. + + - Dictionary: Where the keys are the names and the indices or ranges of + feature indices. + + In this case, the Dictionary is presented as a mapping from keys to indices or + ranges of contiguous indices. For example: + + ``{"a" : 0, "b" : [2,3], "c" : 1}`` + + resolves to: + + ``[("a", Double), ("c", Double), ("b", Array(2))]``. + + Note that the ordering is determined by the indices. + + - List of tuples of the form ``(name, datatype)``, in which ``name`` is the + name of the exposed feature, and ``datatype`` is an instance of + ``String``, ``Double``, ``Int64``, ``Array``, or ``Dictionary``. + + output_feature_names: string or list of strings + Optional name(s) that can be given to the inputs of the scikit-learn + model. + + The ``output_feature_names`` is interpreted according to the model type: + + - If the scikit-learn model is a transformer, it is the name of the + array feature output by the final sequence of the transformer + (defaults to ``"output"``). + - If it is a classifier, it should be a 2-tuple of names giving the top + class prediction and the array of scores for each class (defaults to + ``"classLabel"`` and ``"classScores"``). + - If it is a regressor, it should give the name of the prediction value + (defaults to ``"prediction"``). + + Returns + ------- + model:MLModel + Returns an MLModel instance representing a Core ML model. + + Examples + -------- + .. sourcecode:: python + + >>> from sklearn.linear_model import LinearRegression + >>> import pandas as pd + + # Load data + >>> data = pd.read_csv('houses.csv') + + # Train a model + >>> model = LinearRegression() + >>> model.fit(data[["bedroom", "bath", "size"]], data["price"]) + + # Convert and save the scikit-learn model + >>> import coremltools + >>> coreml_model = coremltools.converters.sklearn.convert(model, + ["bedroom", "bath", "size"], + "price") + >>> coreml_model.save('HousePricer.mlmodel') + """ + + # This function is just a thin wrapper around the internal converter so + # that sklearn isn't actually imported unless this function is called + from ...models import MLModel + # NOTE: Providing user-defined class labels will be enabled when + # several issues with the ordering of the classes are worked out. For now, + # to use custom class labels, directly import the internal function below. + from ._converter_internal import _convert_sklearn_model + + spec = _convert_sklearn_model( + sk_obj, input_features, output_feature_names, class_labels=None + ) + + model = MLModel(spec) + from sklearn import __version__ as sklearn_version + + model.user_defined_metadata[_METADATA_VERSION] = ct_version + model.user_defined_metadata[_METADATA_SOURCE] = "scikit-learn=={0}".format( + sklearn_version + ) + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter_internal.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter_internal.py new file mode 100644 index 00000000..2c2a9e54 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_converter_internal.py @@ -0,0 +1,350 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +The primary file for converting Scikit-learn models. + + +""" + +from ..._deps import _HAS_SKLEARN +from ...models import _feature_management as _fm +from ...models import datatypes +from ...models.feature_vectorizer import create_feature_vectorizer +from ...models.pipeline import Pipeline, PipelineClassifier, PipelineRegressor + +if _HAS_SKLEARN: + from sklearn.pipeline import Pipeline as sk_Pipeline + +from collections import namedtuple as _namedtuple + + +from . import (_SVC, _SVR, _decision_tree_classifier, _decision_tree_regressor, + _dict_vectorizer, _gradient_boosting_classifier, + _gradient_boosting_regressor, _imputer, _k_neighbors_classifier, + _linear_regression, _LinearSVC, _LinearSVR, + _logistic_regression, _normalizer, _NuSVC, _NuSVR, + _one_hot_encoder, _random_forest_classifier, + _random_forest_regressor, _standard_scaler, _ridge_regression) + +_PIPELINE_INTERNAL_FEATURE_NAME = "__feature_vector__" + +_converter_module_list = [ + _dict_vectorizer, + _one_hot_encoder, + _normalizer, + _standard_scaler, + _imputer, + _NuSVC, + _NuSVR, + _SVC, + _SVR, + _linear_regression, + _LinearSVC, + _LinearSVR, + _logistic_regression, + _random_forest_classifier, + _random_forest_regressor, + _decision_tree_classifier, + _decision_tree_regressor, + _gradient_boosting_classifier, + _gradient_boosting_regressor, + _k_neighbors_classifier, + _ridge_regression +] + + +def _test_module(m): + assert m.model_type in ["transformer", "regressor", "classifier"], m.__name__ + if m.model_type == "transformer": + assert hasattr(m, "update_dimension"), m.__name__ + if m.model_type == "classifier": + assert hasattr(m, "supports_output_scores"), m.__name__ + assert hasattr(m, "get_output_classes"), m.__name__ + assert hasattr(m, "sklearn_class"), m.__name__ + assert hasattr(m, "get_input_dimension"), m.__name__ + + return True + + +assert all(_test_module(m) for m in _converter_module_list) + +_converter_lookup = dict( + (md.sklearn_class, i) for i, md in enumerate(_converter_module_list) +) +_converter_functions = [md.convert for md in _converter_module_list] + + +def _get_converter_module(sk_obj): + """ + Returns the module holding the conversion functions for a + particular model). + """ + try: + cv_idx = _converter_lookup[sk_obj.__class__] + except KeyError: + raise ValueError( + "Transformer '%s' not supported; supported transformers are %s." + % (repr(sk_obj), ",".join(k.__name__ for k in _converter_module_list)) + ) + + return _converter_module_list[cv_idx] + + +def _is_sklearn_model(sk_obj): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + from sklearn.pipeline import Pipeline as sk_Pipeline + + return isinstance(sk_obj, sk_Pipeline) or sk_obj.__class__ in _converter_lookup + + +def _convert_sklearn_model( + input_sk_obj, input_features=None, output_feature_names=None, class_labels=None +): + """ + Converts a generic sklearn pipeline, transformer, classifier, or regressor + into an coreML specification. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + from sklearn.pipeline import Pipeline as sk_Pipeline + + if input_features is None: + input_features = "input" + + if isinstance(input_sk_obj, sk_Pipeline): + sk_obj_list = input_sk_obj.steps + else: + sk_obj_list = [("SKObj", input_sk_obj)] + + if len(sk_obj_list) == 0: + raise ValueError("No SKLearn transformers supplied.") + + # Put the transformers into a pipeline list to hold them so that they can + # later be added to a pipeline object. (Hold off adding them to the + # pipeline now in case it's a single model at the end, in which case it + # gets returned as is.) + # + # Each member of the pipeline list is a tuple of the proto spec for that + # model, the input features, and the output features. + pipeline_list = [] + + # These help us keep track of what's going on a bit easier. + Input = _namedtuple("InputTransformer", ["name", "sk_obj", "module"]) + Output = _namedtuple( + "CoreMLTransformer", ["spec", "input_features", "output_features"] + ) + + # Get a more information rich representation of the list for convenience. + # obj_list is a list of tuples of (name, sk_obj, and the converter module for + # that step in the list. + obj_list = [ + Input(sk_obj_name, sk_obj, _get_converter_module(sk_obj)) + for sk_obj_name, sk_obj in sk_obj_list + ] + + # Various preprocessing steps. + + # If the first component of the object list is the sklearn dict vectorizer, + # which is unique in that it accepts a list of dictionaries, then we can + # get the feature type mapping from that. This then may require the addition + # of several OHE steps, so those need to be processed in the first stage. + if isinstance(obj_list[0].sk_obj, _dict_vectorizer.sklearn_class): + + dv_obj = obj_list[0].sk_obj + output_dim = len(_dict_vectorizer.get_input_feature_names(dv_obj)) + + if not isinstance(input_features, str): + raise TypeError( + "If the first transformer in a pipeline is a " + "DictVectorizer, then the input feature must be the name " + "of the input dictionary." + ) + + input_features = [(input_features, datatypes.Dictionary(str))] + + if len(obj_list) > 1: + output_feature_name = _PIPELINE_INTERNAL_FEATURE_NAME + + else: + if output_feature_names is None: + output_feature_name = "transformed_features" + + elif isinstance(output_feature_names, str): + output_feature_name = output_feature_names + + else: + raise TypeError( + "For a transformer pipeline, the " + "output_features needs to be None or a string " + "for the predicted value." + ) + + output_features = [(output_feature_name, datatypes.Array(output_dim))] + + spec = _dict_vectorizer.convert(dv_obj, input_features, output_features)._spec + pipeline_list.append(Output(spec, input_features, output_features)) + + # Set up the environment for the rest of the pipeline + current_input_features = output_features + current_num_dimensions = output_dim + + # In the corner case that it's only the dict vectorizer here, just return + # and exit with that at this point. + if len(obj_list) == 1: + return spec + else: + del obj_list[0] + + else: + + # First, we need to resolve the input feature types as the sklearn pipeline + # expects just an array as input, but what we want to expose to the coreML + # user is an interface with named variables. This resolution has to handle + # a number of cases. + + # Can we get the number of features from the model? If so, pass that + # information into the feature resolution function. If we can't, then this + # function should return None. + first_sk_obj = obj_list[0].sk_obj + num_dimensions = _get_converter_module(first_sk_obj).get_input_dimension( + first_sk_obj + ) + # Resolve the input features. + features = _fm.process_or_validate_features(input_features, num_dimensions) + current_num_dimensions = _fm.dimension_of_array_features(features) + + # Add in a feature vectorizer that consolodates all of the feature inputs + # into the form expected by scipy's pipelines. Essentially this is a + # translation layer between the coreML form with named arguments and the + # scikit learn variable form. + if len(features) == 1 and isinstance(features[0][1], datatypes.Array): + current_input_features = features + else: + spec, _output_dimension = create_feature_vectorizer( + features, _PIPELINE_INTERNAL_FEATURE_NAME + ) + + assert _output_dimension == current_num_dimensions + ft_out_features = [ + ( + _PIPELINE_INTERNAL_FEATURE_NAME, + datatypes.Array(current_num_dimensions), + ) + ] + pipeline_list.append(Output(spec, features, ft_out_features)) + current_input_features = ft_out_features + + # Now, validate the sequence of transformers to make sure we have something + # that can work with all of this. + for i, (_, _, m) in enumerate(obj_list[:-1]): + if m.model_type != "transformer": + raise ValueError( + "Only a sequence of transformer classes followed by a " + "single transformer, regressor, or classifier is currently supported. " + "(object in position %d interpreted as %s)" % (i, m.model_type) + ) + + overall_mode = obj_list[-1].module.model_type + assert overall_mode in ("transformer", "regressor", "classifier") + + # Now, go through each transformer in the sequence of transformers and add + # it to the pipeline. + for _, sk_obj, sk_m in obj_list[:-1]: + next_dimension = sk_m.update_dimension(sk_obj, current_num_dimensions) + + output_features = [ + (_PIPELINE_INTERNAL_FEATURE_NAME, datatypes.Array(next_dimension)) + ] + spec = sk_m.convert(sk_obj, current_input_features, output_features)._spec + + pipeline_list.append(Output(spec, current_input_features, output_features)) + + current_input_features = output_features + current_num_dimensions = next_dimension + + # Now, handle the final transformer. This is where we need to have different + # behavior depending on whether it's a classifier, transformer, or regressor. + _, last_sk_obj, last_sk_m = obj_list[-1] + + if overall_mode == "classifier": + supports_output_scores = last_sk_m.supports_output_scores(last_sk_obj) + _internal_output_classes = list(last_sk_m.get_output_classes(last_sk_obj)) + + if class_labels is None: + class_labels = _internal_output_classes + + output_features = _fm.process_or_validate_classifier_output_features( + output_feature_names, class_labels, supports_output_scores + ) + + elif overall_mode == "regressor": + if output_feature_names is None: + output_features = [("prediction", datatypes.Double())] + elif isinstance(output_feature_names, str): + output_features = [(output_feature_names, datatypes.Double())] + else: + raise TypeError( + "For a regressor object or regressor pipeline, the " + "output_features needs to be None or a string for the predicted value." + ) + + else: # transformer + final_output_dimension = last_sk_m.update_dimension( + last_sk_obj, current_num_dimensions + ) + + if output_feature_names is None: + output_features = [ + ("transformed_features", datatypes.Array(final_output_dimension)) + ] + + elif isinstance(output_feature_names, str): + output_features = [ + (output_feature_names, datatypes.Array(final_output_dimension)) + ] + + else: + raise TypeError( + "For a transformer object or transformer pipeline, the " + "output_features needs to be None or a string for the " + "name of the transformed value." + ) + + last_spec = last_sk_m.convert( + last_sk_obj, current_input_features, output_features + )._spec + + pipeline_list.append(Output(last_spec, current_input_features, output_features)) + + # Now, create the pipeline and return the spec for it. + + # If it's just one element, we can return it. + if len(pipeline_list) == 1: + return pipeline_list[0].spec + + original_input_features = pipeline_list[0].input_features + + if overall_mode == "regressor": + pipeline = PipelineRegressor(original_input_features, output_features) + + elif overall_mode == "classifier": + pipeline = PipelineClassifier( + original_input_features, class_labels, output_features + ) + + else: + pipeline = Pipeline(original_input_features, output_features) + + # Okay, now we can build the pipeline spec. + for spec, input_features, output_features in pipeline_list: + pipeline.add_model(spec) + + return pipeline.spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_classifier.py new file mode 100644 index 00000000..e22d6298 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_classifier.py @@ -0,0 +1,68 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble + +if _HAS_SKLEARN: + import sklearn.tree as _tree + + from . import _sklearn_util + +model_type = "classifier" +sklearn_class = _tree.DecisionTreeClassifier + + +def convert(model, input_name, output_features): + """Convert a decision tree model to protobuf format. + + Parameters + ---------- + decision_tree : DecisionTreeClassifier + A trained scikit-learn tree model. + + input_name: str + Name of the input columns. + + output_name: str + Name of the output columns. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _tree.DecisionTreeClassifier) + _sklearn_util.check_fitted( + model, lambda m: hasattr(m, "tree_") and model.tree_ is not None + ) + + return _MLModel( + convert_tree_ensemble( + model, + input_name, + output_features, + mode="classifier", + class_labels=model.classes_, + ) + ) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return list(model.classes_) + + +def get_input_dimension(model): + return model.n_features_ diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_regressor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_regressor.py new file mode 100644 index 00000000..15dfb967 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_decision_tree_regressor.py @@ -0,0 +1,51 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble + +if _HAS_SKLEARN: + import sklearn.tree as _tree + + from . import _sklearn_util + +model_type = "regressor" +sklearn_class = _tree.DecisionTreeRegressor + + +def convert(model, feature_names, target): + """Convert a decision tree model to protobuf format. + + Parameters + ---------- + decision_tree : DecisionTreeRegressor + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _tree.DecisionTreeRegressor) + _sklearn_util.check_fitted( + model, lambda m: hasattr(m, "tree_") and model.tree_ is not None + ) + return _MLModel(_convert_tree_ensemble(model, feature_names, target)) + + +def get_input_dimension(model): + return model.n_features_ diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_dict_vectorizer.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_dict_vectorizer.py new file mode 100644 index 00000000..ac6cbf2f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_dict_vectorizer.py @@ -0,0 +1,113 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._feature_management import process_or_validate_features +from ...models._interface_management import set_transform_interface_params +from ...models.feature_vectorizer import create_feature_vectorizer +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.feature_extraction import DictVectorizer + + sklearn_class = DictVectorizer + +from ...models import datatypes +from ...models.pipeline import Pipeline + +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a _imputer model to the protobuf spec. + + Parameters + ---------- + model: Imputer + A trained Imputer model. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + _INTERMEDIATE_FEATURE_NAME = "__sparse_vector_features__" + + n_dimensions = len(model.feature_names_) + input_features = process_or_validate_features(input_features) + + # Ensure that the output_features are also solid. + output_features = process_or_validate_features(output_features, n_dimensions) + + # The DictVectorizer in the framework outputs a sparse dictionary + # of index to value due to other considerations, but we are expecting + # the output of this to be a dense feature vector. To make that happen, + # put a feature_vectorizer immediately after the dict vectorizer. + pline = Pipeline(input_features, output_features) + + # Set the basic model parameters of the dict vectorizer component. + dv_spec = _Model_pb2.Model() + dv_spec.specificationVersion = SPECIFICATION_VERSION + + # Set up the dict vectorizer parameters + tr_spec = dv_spec.dictVectorizer + is_str = None + for feature_name in model.feature_names_: + if isinstance(feature_name, str): + if is_str is False: + raise ValueError("Mapping of DictVectorizer mixes int and str types.") + + tr_spec.stringToIndex.vector.append(feature_name) + is_str == True + + if isinstance(feature_name, int): + if is_str is True: + raise ValueError("Mapping of DictVectorizer mixes int and str types.") + + tr_spec.int64ToIndex.vector.append(feature_name) + is_str == False + + intermediate_features = [ + (_INTERMEDIATE_FEATURE_NAME, datatypes.Dictionary(key_type=int)) + ] + + # Set the interface for the dict vectorizer with the input and the + # intermediate output + set_transform_interface_params(dv_spec, input_features, intermediate_features) + + pline.add_model(dv_spec) + + # Follow the dict vectorizer by a feature_vectorizer to change the sparse + # output layer into a dense vector as expected. + fvec, _num_out_dim = create_feature_vectorizer( + intermediate_features, + output_features[0][0], + {"__sparse_vector_features__": n_dimensions}, + ) + + pline.add_model(fvec) + + return _MLModel(pline.spec) + + +def update_dimension(m, current_num_dimensions): + return len(m.feature_names_) + + +def get_input_dimension(m): + return None + + +def get_input_feature_names(m): + return m.feature_names_ diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_classifier.py new file mode 100644 index 00000000..df790aee --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_classifier.py @@ -0,0 +1,102 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.GradientBoostingClassifier + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : GradientBoostingClassifier + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.GradientBoostingClassifier) + + def is_gbr_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_.flatten(): + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_gbr_model) + post_evaluation_transform = None + if model.n_classes_ == 2: + post_evaluation_transform = "Regression_Logistic" + else: + post_evaluation_transform = "Classification_SoftMax" + # Here we enumerate known methods GradientBoostingClassifier use for initializing the raw predictions. + # Alternatively we can enumerate known estimators/strategies combinations. + # This covers more combinations with less hacks + base_prediction = None + dummy_x = np.zeros((1, model.n_features_)) + for base_init_func in ('_init_decision_function', '_raw_predict_init'): + if not hasattr(model, base_init_func): + continue + raw_predictions = getattr(model, base_init_func)(dummy_x)[0, :] + if '_init_decision_function' == base_init_func and model.n_classes_ > 2: + # fix initial default prediction for multiclass classification + # https://github.com/scikit-learn/scikit-learn/pull/12983 + raw_predictions = np.log(raw_predictions) + base_prediction = list(raw_predictions) + break + if base_prediction is None: + raise ValueError("We don't support your classifier: cannot initialize base_prediction. " + "Please file a bug report.") + + return _MLModel( + _convert_tree_ensemble( + model, + feature_names, + target, + mode="classifier", + base_prediction=base_prediction, + class_labels=model.classes_, + post_evaluation_transform=post_evaluation_transform, + ) + ) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return list(model.classes_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_regressor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_regressor.py new file mode 100644 index 00000000..f1494d7e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_gradient_boosting_regressor.py @@ -0,0 +1,74 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from distutils.version import StrictVersion + +from ..._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.GradientBoostingRegressor + +model_type = "regressor" + + +def convert(model, input_features, output_features): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : GradientBoostingRegressor + A trained scikit-learn tree model. + + input_feature: [str] + Name of the input columns. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.GradientBoostingRegressor) + + def is_gbr_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_.flatten(): + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_gbr_model) + + if model.loss == "huber": + base_prediction = model.init_.quantile + else: + # >= 0.22 GradientBoostingRegressor deprecated "mean" in favor of "constant_" attribute + if _SKLEARN_VERSION < StrictVersion("0.22"): + base_prediction = model.init_.mean + else: + base_prediction = model.init_.constant_ + + return _MLModel( + _convert_tree_ensemble( + model, input_features, output_features, base_prediction=base_prediction + ) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_imputer.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_imputer.py new file mode 100644 index 00000000..fc4d5866 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_imputer.py @@ -0,0 +1,113 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from distutils.version import StrictVersion + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from ...models import MLModel as _MLModel +from ...models import datatypes +from ...models._interface_management import set_transform_interface_params +from ...proto import Model_pb2 as _Model_pb2 +from . import _sklearn_util + +if _HAS_SKLEARN: + import sklearn + + try: + # scikit-learn >= 0.21 + from sklearn.impute import SimpleImputer as Imputer + + sklearn_class = sklearn.impute.SimpleImputer + except ImportError: + # scikit-learn < 0.21 + from sklearn.preprocessing import Imputer + + sklearn_class = sklearn.preprocessing.Imputer + + model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a DictVectorizer model to the protobuf spec. + + Parameters + ---------- + model: DictVectorizer + A fitted DictVectorizer model. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Set the interface params. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + assert len(input_features) == 1 + assert isinstance(input_features[0][1], datatypes.Array) + + # feature name in and out are the same here + spec = set_transform_interface_params(spec, input_features, output_features) + + # Test the scikit-learn model + _sklearn_util.check_expected_type(model, Imputer) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "statistics_")) + + # model.axis deprecated in SimpleImputer >= 0.22. which now imputes only + # along columns as desired here. + if _SKLEARN_VERSION < StrictVersion("0.22"): + if model.axis != 0: + raise ValueError("Imputation is only supported along axis = 0.") + + # The imputer in our framework only works on single columns, so + # we need to translate that over. The easiest way to do that is to + # put it in a nested pipeline with a feature extractor and a + + tr_spec = spec.imputer + + for v in model.statistics_: + tr_spec.imputedDoubleArray.vector.append(v) + + try: + tr_spec.replaceDoubleValue = float(model.missing_values) + except ValueError: + raise ValueError( + "Only scalar values or NAN as missing_values " "in _imputer are supported." + ) + + return _MLModel(spec) + + +def update_dimension(model, input_dimension): + """ + Given a model that takes an array of dimension input_dimension, returns + the output dimension. + """ + + # This doesn't expand anything. + return input_dimension + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "statistics_")) + return len(model.statistics_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_k_neighbors_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_k_neighbors_classifier.py new file mode 100644 index 00000000..09df92b6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_k_neighbors_classifier.py @@ -0,0 +1,291 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import coremltools +from coremltools.proto import FeatureTypes_pb2 + +from ..._deps import _HAS_SCIPY, _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + import sklearn.neighbors as _neighbors + + from . import _sklearn_util + +if _HAS_SCIPY: + import scipy as sp + +import numpy as np + +model_type = "classifier" +sklearn_class = _neighbors.KNeighborsClassifier + + +def convert(model, input_name, output_name): + """Convert a scikit KNeighborsClassifier to protobuf format. + + Parameters + ---------- + model : KNeighborsClassifier + A trained scikit-learn KNeighborsClassifier model. + + input_name: str + Name of the input column. + + output_name: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, sklearn_class) + + _check_fitted(model) + _check_algorithm(model) + _check_weighting_scheme(model) + _check_distance_metric(model) + + return _MLModel(_convert_k_neighbors_classifier(model, input_name, output_name)) + + +def supports_output_scores(model): + """KNeighborsClassifier models do not support output scores.""" + return False + + +def get_output_classes(model): + """Get the candidate classes for the model.""" + _check_fitted(model) + return list(model.classes_) + + +def _convert_k_neighbors_classifier(model, input_name, output_name): + """Convert the scikit KNeighborsClassifier to CoreML. Assumes initial validation of the scikit model has been done.""" + + spec = coremltools.proto.Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue = model.n_neighbors + spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue = 1 + spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue = _number_of_samples( + model, spec + ) # is there a better heuristic to use here? + + number_of_dimensions = 0 + if _is_algorithm_brute(model): + number_of_dimensions = model._fit_X.shape[1] + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.linearIndex.MergeFromString( + b"" + ) + elif _is_algorithm_kd_tree(model): + npdata = np.asarray(model._tree.data) + number_of_dimensions = get_input_dimension(model) + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize = ( + model.leaf_size + ) + else: + raise TypeError( + "KNeighbors algorithm not supported for CoreML conversion: {}".format( + model.algorithm + ) + ) + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions = ( + number_of_dimensions + ) + + # Make sure the distance function is set + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.squaredEuclideanDistance.MergeFromString( + b"" + ) + + input_features = spec.description.input.add() + input_features.name = input_name[0][0] + input_features.type.multiArrayType.shape.extend([number_of_dimensions]) + input_features.type.multiArrayType.dataType = ( + FeatureTypes_pb2.ArrayFeatureType.FLOAT32 + ) + + output_label = spec.description.output.add() + output_label.name = output_name[0][0] + + # predictedFeatureName is required since KNN is a classifier and it should be same as outputName. + spec.description.predictedFeatureName = output_label.name + + # Need to confirm if scikit only accepts integer labels + output_label.type.int64Type.MergeFromString(b"") + spec.kNearestNeighborsClassifier.uniformWeighting.MergeFromString(b"") + + _extract_training_data(model, spec) + + return spec + + +def _number_of_samples(model, spec): + """Get the number of samples the model is fitted to.""" + + if _is_algorithm_brute(model): + return model._fit_X.shape[0] + elif _is_algorithm_kd_tree(model): + return len(np.asarray(model._tree.data)) + return 0 + + +def _extract_training_data(model, spec): + """Extract the training data from the scikit model and add it to the CoreML spec""" + + if _is_algorithm_brute(model): + X = model._fit_X + if _is_valid_sparse_format(X): + X = _unpack_sparse(X) + + for sample in X: + coreml_sample = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples.add() + ) + for feature in sample: + coreml_sample.vector.append(feature) + + elif _is_algorithm_kd_tree(model): + # sklearn guarantees that tree data is not stored in a sparse format + npdata = np.asarray(model._tree.data) + for sample in npdata: + coreml_sample = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples.add() + ) + for feature in sample: + coreml_sample.vector.append(feature) + + for label in model._y: + spec.kNearestNeighborsClassifier.int64ClassLabels.vector.append(label) + + +def get_input_dimension(model): + """Get the input dimension for the model""" + _check_fitted(model) + number_of_dimensions = 0 + if _is_algorithm_brute(model): + number_of_dimensions = model._fit_X.shape[1] + elif _is_algorithm_kd_tree(model): + npdata = np.asarray(model._tree.data) + number_of_dimensions = len(npdata[0]) + else: + raise TypeError( + "KNeighbors algorithm not supported for CoreML conversion: {}".format( + model.algorithm + ) + ) + return number_of_dimensions + + +def _check_fitted(model): + """Simple wrapper to check if the KNeighborsClassifier has been fitted.""" + return _sklearn_util.check_fitted( + model, lambda m: hasattr(m, "_fit_method") or hasattr(m, "_fit_X") + ) + + +def _check_algorithm(model): + """Ensure the kNeighbors algorithm for the given scikit model is a supported type""" + is_valid = False + print_name = "" + if model.algorithm == "brute" or model.algorithm == "kd_tree": + is_valid = True + print_name = model.algorithm + elif model.algorithm == "auto" and model._fit_method == "kd_tree": + is_valid = True + print_name = "kd_tree" + elif model.algorithm == "auto" and model._fit_method == "brute": + is_valid = True + print_name = "brute" + if not is_valid: + raise TypeError( + "KNeighbors algorithm not supported for CoreML conversion: {}".format( + print_name + ) + ) + + +def _check_weighting_scheme(model): + """Simple wrapper to ensure the weighting scheme is valid for CoreML conversion""" + is_valid = False + if model.weights == "uniform": + is_valid = True + + # Other cases CoreML doesn't support include weighting by distance or a user-provided 'callable' object. + + if not is_valid: + print_name = "" + if _is_printable(model.weights): + print_name = model.weights + else: + print_name = getattr(model.weights, "__name__", repr(model.weights)) + raise TypeError( + "KNeighbors weight function not supported for CoreML conversion: {}".format( + print_name + ) + ) + + +def _check_distance_metric(model): + """Simple wrapper to ensure the distance metric is valid for CoreML conversion""" + is_valid = False + if model.metric == "euclidean": + is_valid = True + elif model.metric == "minkowski" and model.p == 2: + is_valid = True + + # There are a number of other distance metrics supported by scikit that CoreML doesn't currently support. + + if not is_valid: + print_name = "" + if _is_printable(model.metric): + print_name = model.metric + else: + print_name = getattr(model.metric, "__name__", repr(model.metric)) + raise TypeError( + "KNeighbors distance metric not supported for CoreML conversion: {}".format( + print_name + ) + ) + + +def _is_algorithm_brute(model): + """Checks if the algorithm for the scikit model is set to 'brute'.""" + return model.algorithm == "brute" or ( + model.algorithm == "auto" and model._fit_method == "brute" + ) + + +def _is_algorithm_kd_tree(model): + """Checks if the algorithm for the scikit model is set to 'kd_tree'.""" + return model.algorithm == "kd_tree" or ( + model.algorithm == "auto" and model._fit_method == "kd_tree" + ) + + +def _is_printable(obj): + """Check if the object is a valid text type.""" + return isinstance(obj, str) + + +def _is_valid_sparse_format(obj): + """Check if the object is in CSR sparse format (the only valid type for KNeighborsClassifier)""" + if not _HAS_SCIPY: + return False + return isinstance(obj, sp.sparse.csr_matrix) + + +def _unpack_sparse(obj): + """Unpack the sparse matrix into a format that we can easily iterate over for insertion into a CoreML model.""" + if not _HAS_SCIPY and not sp.sparse.issparse(obj): + raise TypeError("Object {} is not a scipy sparse matrix type".format(type(obj))) + return obj.toarray() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_linear_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_linear_regression.py new file mode 100644 index 00000000..46f4b6da --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_linear_regression.py @@ -0,0 +1,81 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import set_regressor_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + import sklearn + from sklearn.linear_model import LinearRegression + + from . import _sklearn_util + + model_type = "regressor" + sklearn_class = sklearn.linear_model.LinearRegression + + +def convert(model, features, target): + """Convert a linear regression model to the protobuf spec. + Parameters + ---------- + model: LinearRegression + A trained linear regression encoder model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Check the scikit learn model + _sklearn_util.check_expected_type(model, LinearRegression) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_convert(model, features, target)) + + +def _convert(model, features, target): + # Set the model class (regressor) + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + spec = set_regressor_interface_params(spec, features, target) + + # Add parameters for the linear regression. + lr = spec.glmRegressor + + if isinstance(model.intercept_, _np.ndarray): + assert len(model.intercept_) == 1 + lr.offset.append(model.intercept_[0]) + else: + lr.offset.append(model.intercept_) + + weights = lr.weights.add() + for i in model.coef_: + weights.value.append(i) + return spec + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + return model.coef_.size diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_logistic_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_logistic_regression.py new file mode 100644 index 00000000..f1dc91f6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_logistic_regression.py @@ -0,0 +1,108 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from collections.abc import Iterable + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + from sklearn.linear_model import LogisticRegression + + from . import _sklearn_util + + sklearn_class = LogisticRegression + +from ... import SPECIFICATION_VERSION +from ...models._interface_management import set_classifier_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a Logistic Regression model to the protobuf spec. + Parameters + ---------- + model: LogisticRegression + A trained LogisticRegression model. + + feature_names: [str], optional (default=None) + Name of the input columns. + + target: str, optional (default=None) + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, LogisticRegression) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_convert(model, feature_names, target)) + + +def _convert(model, feature_names, target): + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + set_classifier_interface_params( + spec, feature_names, model.classes_, "glmClassifier", output_features=target + ) + + glmClassifier = spec.glmClassifier + + if model.multi_class == "ovr": + glmClassifier.classEncoding = glmClassifier.OneVsRest + else: + print( + '[ERROR] Currently "One Vs Rest" is the only supported multiclass option.' + ) + return None + + glmClassifier.postEvaluationTransform = glmClassifier.Logit + + if isinstance(model.intercept_, Iterable): + for val in model.intercept_: + glmClassifier.offset.append(val) + else: + for _ in model.coef_: + glmClassifier.offset.append(model.intercept_) + + for cur_in_row in model.coef_: + cur_out_row = glmClassifier.weights.add() + for val in cur_in_row: + cur_out_row.value.append(val) + + return spec + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + return list(model.classes_) + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + return len(model.coef_[0]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_normalizer.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_normalizer.py new file mode 100644 index 00000000..3bfcb61b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_normalizer.py @@ -0,0 +1,82 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import \ + set_transform_interface_params as _set_transform_interface_params +from ...proto import Model_pb2 as _Model_pb2 +from ...proto.Normalizer_pb2 import Normalizer as _proto__normalizer + +if _HAS_SKLEARN: + from sklearn.preprocessing import Normalizer + + from . import _sklearn_util + + sklearn_class = Normalizer + +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a normalizer model to the protobuf spec. + + Parameters + ---------- + model: Normalizer + A Normalizer. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Test the scikit-learn model + _sklearn_util.check_expected_type(model, Normalizer) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "norm")) + + # Set the interface params. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + spec = _set_transform_interface_params(spec, input_features, output_features) + + # Set the one hot encoder parameters + _normalizer_spec = spec.normalizer + if model.norm == "l1": + _normalizer_spec.normType = _proto__normalizer.L1 + elif model.norm == "l2": + _normalizer_spec.normType = _proto__normalizer.L2 + elif model.norm == "max": + _normalizer_spec.normType = _proto__normalizer.LMax + return _MLModel(spec) + + +def update_dimension(model, input_dimension): + """ + Given a model that takes an array of dimension input_dimension, returns + the output dimension. + """ + + # No change + return input_dimension + + +def get_input_dimension(model): + # Cannot determine this now. + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_one_hot_encoder.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_one_hot_encoder.py new file mode 100644 index 00000000..f12a6619 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_one_hot_encoder.py @@ -0,0 +1,264 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from ...models import MLModel as _MLModel +from ...models import datatypes +from ...models._interface_management import set_transform_interface_params +from ...models.array_feature_extractor import create_array_feature_extractor +from ...models.feature_vectorizer import create_feature_vectorizer +from ...models.pipeline import Pipeline +from ...proto import Model_pb2 as _Model_pb2 +from ...proto import OneHotEncoder_pb2 as _OHE_pb2 +from . import _sklearn_util + +if _HAS_SKLEARN: + from distutils.version import StrictVersion + + from sklearn.preprocessing import OneHotEncoder + + sklearn_class = OneHotEncoder + +# model type determines the behavior of this module. +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a one-hot-encoder model to the protobuf spec. + + Parameters + ---------- + model: OneHotEncoder + A trained one-hot encoder model. + + input_features: str, optional + Name of the input column. + + output_features: str, optional + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Make sure the model is fitted. + _sklearn_util.check_expected_type(model, OneHotEncoder) + if _SKLEARN_VERSION >= StrictVersion("0.22"): + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "categories_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_features_in_")) + else: + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "active_features_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_values_")) + + input_dimension = get_input_dimension(model) + + if input_dimension is not None: + # Make sure that our starting dimensions are correctly managed. + assert len(input_features) == 1 + assert input_features[0][1] == datatypes.Array(input_dimension) + + input_dimension = input_features[0][1].num_elements + + expected_output_dimension = update_dimension(model, input_dimension) + assert output_features[0][1] == datatypes.Array(expected_output_dimension) + + if _SKLEARN_VERSION >= StrictVersion("0.22"): + model.categorical_features = "all" + model.active_features_ = range(expected_output_dimension) + model.feature_indices_ = [0] + t = 0 + for i in model._n_features_outs: + t = t + i + model.feature_indices_.append(t) + + # Create a pipeline that can do all of the subsequent feature extraction. + feature_vectorizer_input_features = [] + feature_vectorizer_size_map = {} + + if model.categorical_features == "all": + _categorical_features = set(range(input_dimension)) + _cat_feature_idx_mapping = dict((i, i) for i in range(input_dimension)) + else: + _categorical_features = set(model.categorical_features) + _cat_feature_idx_mapping = dict( + (_idx, i) for i, _idx in enumerate(sorted(model.categorical_features)) + ) + + pline = Pipeline(input_features, output_features) + + # Track the overall packing index, which determines the output ordering. + pack_idx = 0 + + # First, go through all the columns that are encoded. The sklearn OHE puts + # all of these first, regardless of their original ordering. + for idx in range(input_dimension): + f_name = "__OHE_%d__" % pack_idx + + if idx in _categorical_features: + + # This input column is one hot encoded + feature_extractor_spec = create_array_feature_extractor( + input_features, f_name, idx, output_type="Int64" + ) + + pline.add_model(feature_extractor_spec) + + _cat_feature_idx = _cat_feature_idx_mapping[idx] + + ohe_input_features = [(f_name, datatypes.Int64())] + ohe_output_features = [(f_name, datatypes.Dictionary("Int64"))] + + # Create a one hot encoder per column + o_spec = _Model_pb2.Model() + o_spec.specificationVersion = SPECIFICATION_VERSION + o_spec = set_transform_interface_params( + o_spec, ohe_input_features, ohe_output_features + ) + + ohe_spec = o_spec.oneHotEncoder + ohe_spec.outputSparse = True + + if model.handle_unknown == "error": + ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value( + "ErrorOnUnknown" + ) + else: + ohe_spec.handleUnknown = _OHE_pb2.OneHotEncoder.HandleUnknown.Value( + "IgnoreUnknown" + ) + + # Need to do a quick search to find the part of the active_features_ mask + # that represents the categorical variables in our part. Could do this + # with binary search, but we probably don't need speed so much here. + def bs_find(a, i): + lb, k = 0, len(a) + while k > 0: + _idx = lb + (k // 2) + if a[_idx] < i: + lb = _idx + 1 + k -= 1 + k = k // 2 + + return lb + + # Here are the indices we are looking for + f_idx_bottom = model.feature_indices_[_cat_feature_idx] + f_idx_top = model.feature_indices_[_cat_feature_idx + 1] + + # Now find where in the active features list we should look. + cat_feat_idx_bottom = bs_find(model.active_features_, f_idx_bottom) + cat_feat_idx_top = bs_find(model.active_features_, f_idx_top) + n_cat_values = cat_feat_idx_top - cat_feat_idx_bottom + + for i in range(cat_feat_idx_bottom, cat_feat_idx_top): + # The actual categorical value is stored as an offset in the active_features list. + cat_idx = model.active_features_[i] - f_idx_bottom + ohe_spec.int64Categories.vector.append(cat_idx) + + # Add the ohe to the pipeline + pline.add_model(o_spec) + + # Add the result to the feature_vectorizer at the end. + feature_vectorizer_input_features.append( + (f_name, datatypes.Dictionary("Int64")) + ) + feature_vectorizer_size_map[f_name] = n_cat_values + + pack_idx += 1 + + # Now go through all the columns that are not encoded as the sklearn OHE puts + # these after the encoded ones. For speed, we can put these all in a single + # ArrayFeatureExtractor + # + pass_through_features = [ + idx for idx in range(input_dimension) if idx not in _categorical_features + ] + + if pass_through_features: + f_name = "__OHE_pass_through__" + + # This input column is not one hot encoded + feature_extractor_spec = create_array_feature_extractor( + input_features, f_name, pass_through_features + ) + + pline.add_model(feature_extractor_spec) + feature_vectorizer_input_features.append( + (f_name, datatypes.Array(len(pass_through_features))) + ) + + # Finally, add the feature vectorizer to the pipeline. + output_feature_name = output_features[0][0] + output_feature_dimension = output_features[0][1].num_elements + + fvec, _num_out_dim = create_feature_vectorizer( + feature_vectorizer_input_features, + output_features[0][0], + feature_vectorizer_size_map, + ) + + # Make sure that the feature vectorizer input actually matches up with the + assert _num_out_dim == output_features[0][1].num_elements + + pline.add_model(fvec) + + return _MLModel(pline.spec) + + +def update_dimension(model, input_dimension): + """ + Given a model that takes an array of dimension input_dimension, returns + the output dimension. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + if _SKLEARN_VERSION >= StrictVersion("0.22"): + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "categories_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_features_in_")) + return sum(model._n_features_outs) + else: + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "active_features_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_values_")) + + if model.categorical_features == "all": + return len(model.active_features_) + else: + out_dimension = len(model.active_features_) + ( + input_dimension - len(model.n_values_) + ) + + return out_dimension + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + if _SKLEARN_VERSION >= StrictVersion("0.22"): + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "categories_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_features_in_")) + return model.n_features_in_ + else: + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "active_features_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "n_values_")) + + if model.categorical_features == "all": + return len(model.feature_indices_) - 1 + else: + # This can't actually be determined from the model as indices after the + # rest of the categorical values don't seem to be tracked + return None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_classifier.py new file mode 100644 index 00000000..905f2d12 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_classifier.py @@ -0,0 +1,70 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.RandomForestClassifier + +model_type = "classifier" + + +def convert(model, feature_names, target): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : RandomForestClassifier + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.RandomForestClassifier) + + def is_rf_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_: + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_rf_model) + return _MLModel( + _convert_tree_ensemble( + model, feature_names, target, mode="classifier", class_labels=model.classes_ + ) + ) + + +def supports_output_scores(model): + return True + + +def get_output_classes(model): + return list(model.classes_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_regressor.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_regressor.py new file mode 100644 index 00000000..df61a135 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_random_forest_regressor.py @@ -0,0 +1,58 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble, get_input_dimension + +if _HAS_SKLEARN: + import sklearn.ensemble as _ensemble + + from . import _sklearn_util + + sklearn_class = _ensemble.RandomForestRegressor + +model_type = "regressor" + + +def convert(model, feature_names, target): + """Convert a boosted tree model to protobuf format. + + Parameters + ---------- + decision_tree : RandomForestRegressor + A trained scikit-learn tree model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_expected_type(model, _ensemble.RandomForestRegressor) + + def is_rf_model(m): + if len(m.estimators_) == 0: + return False + if hasattr(m, "estimators_") and m.estimators_ is not None: + for t in m.estimators_: + if not hasattr(t, "tree_") or t.tree_ is None: + return False + return True + else: + return False + + _sklearn_util.check_fitted(model, is_rf_model) + return _MLModel(_convert_tree_ensemble(model, feature_names, target)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_ridge_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_ridge_regression.py new file mode 100644 index 00000000..84208c6c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_ridge_regression.py @@ -0,0 +1,53 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel + +if _HAS_SKLEARN: + import sklearn + from sklearn.linear_model import Ridge as _Ridge + + from . import _sklearn_util + + sklearn_class = sklearn.linear_model.Ridge + +from . import _linear_regression + +model_type = "regressor" + + +def convert(model, features, target): + """Convert a Ridge Regression model to the protobuf spec. + Parameters + ---------- + model: LinearSVR + A trained Ridge Regression model. + + feature_names: [str] + Name of the input columns. + + target: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Check the scikit learn model + _sklearn_util.check_expected_type(model, _Ridge) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_")) + + return _MLModel(_linear_regression._convert(model, features, target)) + + +def get_input_dimension(model): + return _linear_regression.get_input_dimension(model) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_sklearn_util.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_sklearn_util.py new file mode 100644 index 00000000..e313de1e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_sklearn_util.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +def check_fitted(model, func): + """Check if a model is fitted. Raise error if not. + + Parameters + ---------- + model: model + Any scikit-learn model + + func: model + Function to check if a model is not trained. + """ + if not func(model): + raise TypeError("Expected a 'fitted' model for conversion") + + +def check_expected_type(model, expected_type): + """Check if a model is of the right type. Raise error if not. + + Parameters + ---------- + model: model + Any scikit-learn model + + expected_type: Type + Expected type of the scikit-learn. + """ + if model.__class__.__name__ != expected_type.__name__: + raise TypeError( + "Expected model of type '%s' (got %s)" + % (expected_type.__name__, model.__class__.__name__) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_standard_scaler.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_standard_scaler.py new file mode 100644 index 00000000..92e96a9a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_standard_scaler.py @@ -0,0 +1,89 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +from ... import SPECIFICATION_VERSION +from ..._deps import _HAS_SKLEARN +from ...models import MLModel as _MLModel +from ...models._interface_management import \ + set_transform_interface_params as _set_transform_interface_params +from ...proto import Model_pb2 as _Model_pb2 + +if _HAS_SKLEARN: + from sklearn.preprocessing import StandardScaler + + from . import _sklearn_util + + sklearn_class = StandardScaler + +model_type = "transformer" + + +def convert(model, input_features, output_features): + """Convert a _imputer model to the protobuf spec. + + Parameters + ---------- + model: Imputer + A trained Imputer model. + + input_features: str + Name of the input column. + + output_features: str + Name of the output column. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + # Test the scikit-learn model + _sklearn_util.check_expected_type(model, StandardScaler) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "mean_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "scale_")) + + # Set the interface params. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + spec = _set_transform_interface_params(spec, input_features, output_features) + + # Set the parameters + tr_spec = spec.scaler + for x in model.mean_: + tr_spec.shiftValue.append(-x) + + for x in model.scale_: + tr_spec.scaleValue.append(1.0 / x) + + return _MLModel(spec) + + +def update_dimension(model, input_dimension): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "mean_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "scale_")) + # Nothing to do for this model + return input_dimension + + +def get_input_dimension(model): + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "mean_")) + _sklearn_util.check_fitted(model, lambda m: hasattr(m, "scale_")) + return len(model.mean_) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_svm_common.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_svm_common.py new file mode 100644 index 00000000..77289a49 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_svm_common.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Common stuff for SVMs +""" + + +def _set_kernel(model, spec): + """ + Takes the sklearn SVM model and returns the spec with the protobuf kernel for that model. + """ + + def gamma_value(model): + return model._gamma + + result = None + if model.kernel == "linear": + spec.kernel.linearKernel.MergeFromString( + b"" + ) # hack to set kernel to an empty type + elif model.kernel == "rbf": + spec.kernel.rbfKernel.gamma = gamma_value(model) + elif model.kernel == "poly": + spec.kernel.polyKernel.gamma = gamma_value(model) + spec.kernel.polyKernel.c = model.coef0 + spec.kernel.polyKernel.degree = model.degree + elif model.kernel == "sigmoid": + spec.kernel.sigmoidKernel.gamma = gamma_value(model) + spec.kernel.sigmoidKernel.c = model.coef0 + else: + raise ValueError( + "Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid." + ) + return result diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_tree_ensemble.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_tree_ensemble.py new file mode 100644 index 00000000..a65e61bd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/sklearn/_tree_ensemble.py @@ -0,0 +1,263 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..._deps import _HAS_SKLEARN +from ...models._feature_management import process_or_validate_features +from ...models.tree_ensemble import (TreeEnsembleClassifier, + TreeEnsembleRegressor) + +if _HAS_SKLEARN: + from sklearn.tree import _tree + + + +def _get_value(scikit_value, mode="regressor", scaling=1.0, n_classes=2, tree_index=0): + """ Get the right value from the scikit-tree + """ + # Regression + if mode == "regressor": + return scikit_value[0] * scaling + + # Binary classification + if n_classes == 2: + # Decision tree + if len(scikit_value[0]) != 1: + value = scikit_value[0][1] * scaling / scikit_value[0].sum() + # boosted tree + else: + value = scikit_value[0][0] * scaling + if value == 0.5: + value = value - 1e-7 + + # Multiclass classification + else: + # Decision tree + if len(scikit_value[0]) != 1: + value = scikit_value[0] / scikit_value[0].sum() + # boosted tree + else: + value = {tree_index: scikit_value[0] * scaling} + return value + + +def _recurse( + coreml_tree, + scikit_tree, + tree_id, + node_id, + scaling=1.0, + mode="regressor", + n_classes=2, + tree_index=0, +): + """Traverse through the tree and append to the tree spec. + """ + if not (_HAS_SKLEARN): + raise RuntimeError( + "scikit-learn not found. scikit-learn conversion API is disabled." + ) + + ## Recursion should not be called on the leaf node. + if node_id == _tree.TREE_LEAF: + raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) + + # Add a branch node to the tree + if scikit_tree.children_left[node_id] != _tree.TREE_LEAF: + branch_mode = "BranchOnValueLessThanEqual" + feature_index = scikit_tree.feature[node_id] + feature_value = scikit_tree.threshold[node_id] + left_child_id = scikit_tree.children_left[node_id] + right_child_id = scikit_tree.children_right[node_id] + + # Add a branch node + coreml_tree.add_branch_node( + tree_id, + node_id, + feature_index, + feature_value, + branch_mode, + left_child_id, + right_child_id, + ) + + # Now recurse + _recurse( + coreml_tree, + scikit_tree, + tree_id, + left_child_id, + scaling, + mode, + n_classes, + tree_index, + ) + _recurse( + coreml_tree, + scikit_tree, + tree_id, + right_child_id, + scaling, + mode, + n_classes, + tree_index, + ) + + # Add a leaf node to the tree + else: + # Get the scikit-learn value + if scikit_tree.n_outputs != 1: + raise ValueError("Expected only 1 output in the scikit-learn tree.") + value = _get_value( + scikit_tree.value[node_id], mode, scaling, n_classes, tree_index + ) + coreml_tree.add_leaf_node(tree_id, node_id, value) + + +def get_input_dimension(model): + if hasattr(model, "n_features_"): + return model.n_features_ + + elif hasattr(model, "n_estimators"): + if model.n_estimators == 0: + raise ValueError("model not trained.") + + try: + return model.estimators_[0, 0].n_features_ + except IndexError: + raise ValueError("Model not trained or invalid model.") + else: + raise ValueError("Unable to obtain input dimension from model.") + + +def convert_tree_ensemble( + model, + input_features, + output_features=("predicted_class", float), + mode="regressor", + base_prediction=None, + class_labels=None, + post_evaluation_transform=None, +): + """ + Convert a generic tree regressor model to the protobuf spec. + + This currently supports: + * Decision tree regression + * Gradient boosted tree regression + * Random forest regression + * Decision tree classifier. + * Gradient boosted tree classifier. + * Random forest classifier. + + ---------- + Parameters + model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor] + A scikit learn tree model. + + feature_names : list of strings, optional (default=None) + Names of each of the features. + + target: str + Name of the output column. + + base_prediction: double + Base prediction value. + + mode: str in ['regressor', 'classifier'] + Mode of the tree model. + + class_labels: list[int] + List of classes + + post_evaluation_transform: list[int] + Post evaluation transform + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + + num_dimensions = get_input_dimension(model) + features = process_or_validate_features(input_features, num_dimensions) + + n_classes = None + if mode == "classifier": + n_classes = model.n_classes_ + if class_labels is None: + class_labels = range(n_classes) + else: + if len(class_labels) != n_classes: + raise ValueError( + "Number of classes in model (%d) does not match " + "length of supplied class list (%d)." + % (n_classes, len(class_labels)) + ) + + coreml_tree = TreeEnsembleClassifier( + input_features, class_labels, output_features + ) + if post_evaluation_transform is not None: + coreml_tree.set_post_evaluation_transform(post_evaluation_transform) + + # Base prediction not provided + if base_prediction is None: + if n_classes == 2: + base_prediction = [0.0] + else: + base_prediction = [0.0 for c in range(n_classes)] + coreml_tree.set_default_prediction_value(base_prediction) + else: + if base_prediction is None: + base_prediction = 0.0 + coreml_tree = TreeEnsembleRegressor(input_features, output_features) + coreml_tree.set_default_prediction_value(base_prediction) + + # Single tree + if hasattr(model, "tree_"): + _recurse( + coreml_tree, + model.tree_, + tree_id=0, + node_id=0, + mode=mode, + n_classes=n_classes, + ) + + # Multiple trees + elif hasattr(model, "estimators_"): + is_ensembling_in_separate_trees = False + if type(model.estimators_) != list: + is_ensembling_in_separate_trees = ( + len(model.estimators_.shape) > 0 and model.estimators_.shape[1] > 1 + ) + estimators = model.estimators_.flatten() + else: + estimators = model.estimators_ + + scaling = ( + model.learning_rate + if hasattr(model, "learning_rate") + else 1.0 / len(estimators) + ) + for tree_id, base_model in enumerate(estimators): + if is_ensembling_in_separate_trees: + tree_index = tree_id % n_classes + else: + tree_index = 0 + _recurse( + coreml_tree, + base_model.tree_, + tree_id, + node_id=0, + scaling=scaling, + mode=mode, + n_classes=n_classes, + tree_index=tree_index, + ) + else: + raise TypeError("Unknown scikit-learn tree model type.") + + return coreml_tree.spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/__init__.py new file mode 100644 index 00000000..a619ce8f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ._tree import convert diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree.py new file mode 100644 index 00000000..46615fbf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree.py @@ -0,0 +1,93 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from coremltools import __version__ as ct_version +from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION + +from ...models import MLModel as _MLModel +from ._tree_ensemble import convert_tree_ensemble as _convert_tree_ensemble + + +def convert( + model, + feature_names=None, + target="target", + force_32bit_float=True, + mode="regressor", + class_labels=None, + n_classes=None, +): + """ + Convert a trained XGBoost model to Core ML format. + + Parameters + ---------- + decision_tree : Booster + A trained XGboost tree model. + + feature_names: [str] | str + Names of input features that will be exposed in the Core ML model + interface. + + Can be set to one of the following: + + - ``None`` for using the feature names from the model. + - List of names of the input features that should be exposed in the + interface to the Core ML model. These input features are in the same + order as the XGboost model. + + target: str + Name of the output feature name exposed to the Core ML model. + + force_32bit_float: bool + If ``True``, then the resulting CoreML model will use 32 bit floats internally. + + mode: str in ['regressor', 'classifier'] + Mode of the tree model. + + class_labels: list[int] or None + List of classes. When set to None, the class labels are just the range from + 0 to ``n_classes - 1``. + + n_classes: int or None + Number of classes in classification. When set to ``None``, the number of + classes is expected from the model or ``class_labels`` should be provided. + + Returns + ------- + model:MLModel + Returns an MLModel instance representing a Core ML model. + + Examples + -------- + .. sourcecode:: python + + # Convert it with default input and output names + >>> import coremltools + >>> coreml_model = coremltools.converters.xgboost.convert(model) + + # Saving the Core ML model to a file. + >>> coreml_model.save('my_model.mlmodel') + """ + model = _MLModel( + _convert_tree_ensemble( + model, + feature_names, + target, + force_32bit_float=force_32bit_float, + mode=mode, + class_labels=class_labels, + n_classes=n_classes, + ) + ) + + from xgboost import __version__ as xgboost_version + + model.user_defined_metadata[_METADATA_VERSION] = ct_version + model.user_defined_metadata[_METADATA_SOURCE] = "xgboost=={0}".format( + xgboost_version + ) + + return model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree_ensemble.py b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree_ensemble.py new file mode 100644 index 00000000..cf3fd9a6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/converters/xgboost/_tree_ensemble.py @@ -0,0 +1,280 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from ..._deps import _HAS_XGBOOST +from ...models.tree_ensemble import TreeEnsembleClassifier +from ...models.tree_ensemble import \ + TreeEnsembleRegressor as _TreeEnsembleRegressor + +if _HAS_XGBOOST: + import xgboost as _xgboost + + +def recurse_json( + mlkit_tree, + xgb_tree_json, + tree_id, + node_id, + feature_map, + force_32bit_float, + mode="regressor", + tree_index=0, + n_classes=2, +): + """Traverse through the tree and append to the tree spec. + """ + relative_hit_rate = None + + try: + relative_hit_rate = xgb_tree_json["cover"] + except KeyError: + pass + + # Fill node attributes + if "leaf" not in xgb_tree_json: + branch_mode = "BranchOnValueLessThan" + split_name = xgb_tree_json["split"] + feature_index = split_name if not feature_map else feature_map[split_name] + + # xgboost internally uses float32, but the parsing from json pulls it out + # as a 64bit double. To trigger the internal float32 detection in the + # tree ensemble compiler, we need to explicitly cast it to a float 32 + # value, then back to the 64 bit float that protobuf expects. This is + # controlled with the force_32bit_float flag. + feature_value = xgb_tree_json["split_condition"] + + if force_32bit_float: + feature_value = float(_np.float32(feature_value)) + + true_child_id = xgb_tree_json["yes"] + false_child_id = xgb_tree_json["no"] + + # Get the missing value behavior correct + missing_value_tracks_true_child = False + + try: + if xgb_tree_json["missing"] == true_child_id: + missing_value_tracks_true_child = True + except KeyError: + pass + + mlkit_tree.add_branch_node( + tree_id, + node_id, + feature_index, + feature_value, + branch_mode, + true_child_id, + false_child_id, + relative_hit_rate=relative_hit_rate, + missing_value_tracks_true_child=missing_value_tracks_true_child, + ) + + else: + value = xgb_tree_json["leaf"] + if force_32bit_float: + value = float(_np.float32(value)) + + if mode == "classifier" and n_classes > 2: + value = {tree_index: value} + + mlkit_tree.add_leaf_node( + tree_id, node_id, value, relative_hit_rate=relative_hit_rate + ) + + # Now recurse + if "children" in xgb_tree_json: + for child in xgb_tree_json["children"]: + recurse_json( + mlkit_tree, + child, + tree_id, + child["nodeid"], + feature_map, + force_32bit_float, + mode=mode, + tree_index=tree_index, + n_classes=n_classes, + ) + + +def convert_tree_ensemble( + model, + feature_names, + target, + force_32bit_float, + mode="regressor", + class_labels=None, + n_classes=None, +): + """Convert a generic tree model to the protobuf spec. + + This currently supports: + * Decision tree regression + + Parameters + ---------- + model: str | Booster + Path on disk where the XGboost JSON representation of the model is or + a handle to the XGboost model. + + feature_names : list of strings or None + Names of each of the features. When set to None, the feature names are + extracted from the model. + + target: str, + Name of the output column. + + force_32bit_float: bool + If True, then the resulting CoreML model will use 32 bit floats internally. + + mode: str in ['regressor', 'classifier'] + Mode of the tree model. + + class_labels: list[int] or None + List of classes. When set to None, the class labels are just the range from + 0 to n_classes - 1. + + n_classes: int or None + Number of classes in classification. When set to None, the number of + classes is expected from the model or class_labels should be provided. + + Returns + ------- + model_spec: An object of type Model_pb. + Protobuf representation of the model + """ + if not (_HAS_XGBOOST): + raise RuntimeError("xgboost not found. xgboost conversion API is disabled.") + accepted_modes = ["regressor", "classifier"] + if mode not in accepted_modes: + raise ValueError("mode should be in %s" % accepted_modes) + import json + import os + + feature_map = None + if isinstance( + model, (_xgboost.core.Booster, _xgboost.XGBRegressor, _xgboost.XGBClassifier) + ): + + # Testing a few corner cases that we don't support + if isinstance(model, _xgboost.XGBRegressor): + if mode == "classifier": + raise ValueError("mode is classifier but provided a regressor") + try: + objective = model.get_xgb_params()["objective"] + except: + objective = None + if objective in ["reg:gamma", "reg:tweedie"]: + raise ValueError( + "Regression objective '%s' not supported for export." % objective + ) + + if isinstance(model, _xgboost.XGBClassifier): + if mode == "regressor": + raise ValueError("mode is regressor but provided a classifier") + n_classes = model.n_classes_ + if class_labels is not None: + if len(class_labels) != n_classes: + raise ValueError( + "Number of classes in model (%d) does not match " + "length of supplied class list (%d)." + % (n_classes, len(class_labels)) + ) + else: + class_labels = list(range(n_classes)) + + # Now use the booster API. + if isinstance(model, (_xgboost.XGBRegressor, _xgboost.XGBClassifier)): + # Name change in 0.7 + if hasattr(model, "get_booster"): + model = model.get_booster() + else: + model = model.booster() + + # Xgboost sometimes has feature names in there. Sometimes does not. + if (feature_names is None) and (model.feature_names is None): + raise ValueError( + "The XGBoost model does not have feature names. They must be provided in convert method." + ) + feature_names = model.feature_names + if feature_names is None: + feature_names = model.feature_names + + xgb_model_str = model.get_dump(with_stats=True, dump_format="json") + + if model.feature_names: + feature_map = {f: i for i, f in enumerate(model.feature_names)} + + # Path on the file system where the XGboost model exists. + elif isinstance(model, str): + if not os.path.exists(model): + raise TypeError("Invalid path %s." % model) + with open(model) as f: + xgb_model_str = json.load(f) + + if feature_names is None: + raise ValueError( + "feature names must be provided in convert method if the model is a path on file system." + ) + else: + feature_map = {f: i for i, f in enumerate(feature_names)} + + else: + raise TypeError("Unexpected type. Expecting XGBoost model.") + + if mode == "classifier": + if n_classes is None and class_labels is None: + raise ValueError( + "You must provide class_labels or n_classes when not providing the XGBClassifier" + ) + elif n_classes is None: + n_classes = len(class_labels) + elif class_labels is None: + class_labels = range(n_classes) + if n_classes == 2: + # if we have only 2 classes we only have one sequence of estimators + base_prediction = [0.0] + else: + base_prediction = [0.0 for c in range(n_classes)] + # target here is the equivalent of output_features in scikit learn + mlkit_tree = TreeEnsembleClassifier(feature_names, class_labels, target) + mlkit_tree.set_default_prediction_value(base_prediction) + if n_classes == 2: + mlkit_tree.set_post_evaluation_transform("Regression_Logistic") + else: + mlkit_tree.set_post_evaluation_transform("Classification_SoftMax") + else: + mlkit_tree = _TreeEnsembleRegressor(feature_names, target) + mlkit_tree.set_default_prediction_value(0.5) + + for xgb_tree_id, xgb_tree_str in enumerate(xgb_model_str): + if mode == "classifier" and n_classes > 2: + tree_index = xgb_tree_id % n_classes + else: + tree_index = 0 + + try: + # this means that the xgb_tree_str is a json dump and needs to be loaded + xgb_tree_json = json.loads(xgb_tree_str) + except: + # this means that the xgb_tree_str is loaded from a path in file system already and does not need to be reloaded + xgb_tree_json = xgb_tree_str + + recurse_json( + mlkit_tree, + xgb_tree_json, + xgb_tree_id, + node_id=0, + feature_map=feature_map, + force_32bit_float=force_32bit_float, + mode=mode, + tree_index=tree_index, + n_classes=n_classes, + ) + + return mlkit_tree.spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/__init__.py new file mode 100644 index 00000000..526e1e5d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import datatypes + +from . import _feature_management + +from . import nearest_neighbors +from . import pipeline +from . import tree_ensemble +from . import feature_vectorizer + +from . import _interface_management + +from .model import MLModel +from .model import ( + _MLMODEL_FULL_PRECISION, + _MLMODEL_HALF_PRECISION, + _MLMODEL_QUANTIZED, + _VALID_MLMODEL_PRECISION_TYPES, + _SUPPORTED_QUANTIZATION_MODES, + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + _QUANTIZATION_MODE_DEQUANTIZE, + _LUT_BASED_QUANTIZATION, + _QUANTIZATION_MODE_DEQUANTIZE, + _METADATA_VERSION, + _METADATA_SOURCE, +) + +from . import neural_network +from . import ml_program diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/_deprecation.py b/__packaged__/coreml/.python_dependencies/coremltools/models/_deprecation.py new file mode 100644 index 00000000..5eb9d43d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/_deprecation.py @@ -0,0 +1,37 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause +import functools +import warnings + + +def deprecated(obj=None, suffix=""): + """ + Decorator to mark a function or a class as deprecated + """ + + def decorator_deprecation_warning(obj): + @functools.wraps(obj) + def wrapped(*args, **kwargs): + if isinstance(obj, type): + msg = ( + 'Class "%s" is deprecated and will be removed in 6.0.' + % obj.__name__ + ) + else: + msg = ( + 'Function "%s" is deprecated and will be removed in 6.0.' + % obj.__name__ + ) + if suffix: + msg += "; %s" % suffix + warnings.warn(msg, category=FutureWarning) + return obj(*args, **kwargs) + + return wrapped + + if obj is None: + return decorator_deprecation_warning + + return decorator_deprecation_warning(obj) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/_feature_management.py b/__packaged__/coreml/.python_dependencies/coremltools/models/_feature_management.py new file mode 100644 index 00000000..9e8a7c89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/_feature_management.py @@ -0,0 +1,354 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import operator as op +from collections import defaultdict +from copy import copy +from functools import reduce + +import numpy as _np + +from . import datatypes + + +def process_or_validate_classifier_output_features( + output_features, class_labels, supports_class_scores=True +): + """ + Given a list of class labels and a list of output_features, validate the + list and return a valid version of output_features with all the correct + data type information included. + """ + + def raise_error(msg): + + raise ValueError("Classifier error: %s" % msg) + + class_labels = list(class_labels) + + # First, we need to determine the type of the classes. + _int_types = (int, bool, _np.bool_, _np.int32, _np.int64) + + if all(isinstance(cl, _int_types) for cl in class_labels): + output_class_type = datatypes.Int64() + + elif all(isinstance(cl, str) for cl in class_labels): + output_class_type = datatypes.String() + + else: + raise ValueError("Class labels must be all of type int or all of type string.") + + if output_features is None: + + out = [("classLabel", output_class_type)] + + if supports_class_scores: + out += [("classProbability", datatypes.Dictionary(output_class_type))] + + elif isinstance(output_features, str): + + out = [(output_features, output_class_type)] + + if supports_class_scores: + out += [("classProbability", datatypes.Dictionary(output_class_type))] + + elif ( + isinstance(output_features, (list, tuple)) + and all(isinstance(fn, str) for fn in output_features) + and len(output_features) == 2 + ): + + if supports_class_scores: + out = [ + (output_features[0], output_class_type), + (output_features[1], datatypes.Dictionary(output_class_type)), + ] + else: + raise ValueError( + "Classifier model (as trained) does not support output scores for classes." + ) + + elif is_valid_feature_list(output_features): + + output_features = [ + (k, datatypes._normalize_datatype(dt)) for k, dt in output_features + ] + + if len(output_features) == 1 or not supports_class_scores: + if not output_features[0][1] == output_class_type: + raise ValueError( + "Type of output class feature does not match type of class labels." + ) + + else: + # Make sure the first two output features specified give the output + # class field and the output class scores dictionary field + if isinstance(output_features[0][1], datatypes.Dictionary) and isinstance( + output_features[1][1], output_class_type + ): + output_features[0], output_features[1] = ( + output_features[1], + output_features[0], + ) + + if not isinstance(output_features[1][1], datatypes.Dictionary): + raise_error("Output features class scores should be dictionary type.") + + if output_features[1][1].key_type != output_class_type: + raise_error( + "Class scores dictionary key type does not match type of class labels." + ) + + if output_features[0][1] != output_class_type: + raise_error( + "Specified type of output class does not match type of class labels." + ) + + # NOTE: We are intentionally allowing the case where additional fields are allowed + # beyond the original two features. + + out = output_features + + else: + raise_error("Form of output features not recognized") + + return out + + +def is_valid_feature_list(features): + # Just test all the ways this could be + return ( + type(features) is list + and len(features) >= 1 + and all(type(t) is tuple and len(t) == 2 for t in features) + and all(isinstance(n, str) for n, td in features) + and all(datatypes._is_valid_datatype(td) for n, td in features) + ) + + +def dimension_of_array_features(features): + if not is_valid_feature_list(features): + raise ValueError("Expected feature list in valid form.") + + dim = 0 + for n, td in features: + if isinstance(td, (datatypes.Int64, datatypes.Double)): + dim += 1 + elif isinstance(td, datatypes.Array): + dim += reduce(op.mul, td.dimensions, 1) + else: + raise ValueError( + "Unable to determine number of dimensions from feature list." + ) + + return dim + + +def process_or_validate_features(features, num_dimensions=None, feature_type_map={}): + """ + Puts features into a standard form from a number of different possible forms. + + The standard form is a list of 2-tuples of (name, datatype) pairs. The name + is a string and the datatype is an object as defined in the _datatype module. + + The possible input forms are as follows: + + * A list of strings. in this case, the overall dimension is assumed to be + the length of the list. If neighboring names are identical, they are + assumed to be an input array of that length. For example: + + ["a", "b", "c"] + + resolves to + + [("a", Double), ("b", Double), ("c", Double)]. + + And: + + ["a", "a", "b"] + + resolves to + + [("a", Array(2)), ("b", Double)]. + + * A dictionary of keys to indices or ranges of feature indices. + + In this case, it's presented as a mapping from keys to indices or + ranges of contiguous indices. For example, + + {"a" : 0, "b" : [2,3], "c" : 1} + + Resolves to + + [("a", Double), ("c", Double), ("b", Array(2))]. + + Note that the ordering is determined by the indices. + + * A single string. In this case, the input is assumed to be a single array, + with the number of dimensions set using num_dimensions. + + + Notes: + + If the features variable is in the standard form, it is simply checked and + returned. + + If num_dimensions is given, it is used to check against the existing features, + or fill in missing information in the case when features is a single string. + """ + + original_features = copy(features) + + if num_dimensions is not None and not isinstance(num_dimensions, int): + raise TypeError( + "num_dimensions must be None, an integer or a long, not '%s'" + % str(type(num_dimensions)) + ) + + def raise_type_error(additional_msg): + raise TypeError( + "Error processing feature list: %s\nfeatures = %s" + % (additional_msg, str(original_features)) + ) + + if type(features) is dict and is_valid_feature_list(features.items()): + features = features.items() + + # First, see if the features are already in the correct form. If they are, + # then we + if is_valid_feature_list(features): + if num_dimensions is not None: + try: + feature_dims = dimension_of_array_features(features) + except ValueError: + feature_dims = None + + if feature_dims is not None and feature_dims != num_dimensions: + raise_type_error("Dimension mismatch.") + + # We may need to translate some parts of this back to the actual + # datatype class -- e.g. translate str to datatypes.String(). + return [(k, datatypes._normalize_datatype(dt)) for k, dt in features] + + if isinstance(features, str): + if num_dimensions is None: + raise_type_error( + "If a single feature name is given, then " + "num_dimensions must be provided." + ) + features = {features: range(num_dimensions)} + + if isinstance(features, (list, tuple, _np.ndarray)): + # Change this into a dictionary + + mapping = defaultdict(lambda: []) + + for i, k in enumerate(features): + if not isinstance(k, str): + raise_type_error( + "List of feature names must either be a list of strings, or a list of (name, datatypes.Array instance) tuples." + ) + + if num_dimensions is not None and len(features) != num_dimensions: + raise_type_error( + ("List of feature names has wrong length; " "%d required, %d provided.") + % (num_dimensions, len(features)) + ) + + for i, k in enumerate(features): + mapping[k].append(i) + + # Replace the features + features = mapping + + if not isinstance(features, dict): + raise_type_error( + "features must be either a list of feature names " + "or a dictionary of feature names to ranges." + ) + + # We'll be invasive here so make a copy. + features = copy(features) + + for k, v in list(features.items()): + + if not isinstance(k, str): + raise_type_error("Feature names must be strings.") + + def test_index(val): + error = False + try: + if val != int(val): + error = True + except: + error = True + + if error: + raise_type_error( + "Specified indices for feature %s must be integers." % k + ) + + if val < 0 or (num_dimensions is not None and val >= num_dimensions): + raise_type_error("Index in feature %s out of range." % k) + + iterable_types = [tuple, list, set] + iterable_types.append(range) + if isinstance(v, tuple(iterable_types)): + for idx in v: + test_index(idx) + + # Replace and update + features[k] = v = list(sorted(v)) + + elif isinstance(v, int): + test_index(v) + features[k] = v = [v] + else: + raise_type_error( + ( + "Value type for feature %s not recognized; " + "values must be either integers, lists or range objects." + ) + % k + ) + + # check to make sure things are contiguous + if v != list(range(v[0], v[-1] + 1)): + raise_type_error( + "Index list for feature %s must consist of " + "a contiguous range of indices." % k + ) + + if len(set(v)) != len(v): + raise_type_error("Index list for feature %s contains duplicates." % k) + + # Now, set num dimensions from the list if it's actually None + if num_dimensions is None: + from itertools import chain + num_dimensions = 1 + max(chain.from_iterable(features.values())) + + if ( + set().union(*features.values()) != set(range(num_dimensions)) + or sum(len(v) for v in features.values()) != num_dimensions + ): + raise_type_error( + "Supplied indices must cover entire range of 0, ..., num_dimensions-1." + ) + + # Define the output feature types + output_features = [None] * len(features) + + # Finally, go through and map all these things out as types. + # Sort by first value of the index range. + for i, (k, v) in enumerate(sorted(features.items(), key=lambda t: t[1][0])): + if k in feature_type_map: + output_features[i] = (k, feature_type_map[k]) + + elif len(v) == 1: + output_features[i] = (k, datatypes.Double()) + else: + output_features[i] = (k, datatypes.Array(len(v))) + + return output_features diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/_interface_management.py b/__packaged__/coreml/.python_dependencies/coremltools/models/_interface_management.py new file mode 100644 index 00000000..e22ab742 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/_interface_management.py @@ -0,0 +1,211 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ..proto import Model_pb2 +from . import _feature_management as _fm +from . import datatypes + + +def set_classifier_interface_params( + spec, + features, + class_labels, + model_accessor_for_class_labels, + output_features=None, + training_features=None, +): + """ + Common utilities to set the regression interface params. + """ + # Normalize the features list. + features = _fm.process_or_validate_features(features) + + if class_labels is None: + raise ValueError("List of class labels must be provided.") + + n_classes = len(class_labels) + + output_features = _fm.process_or_validate_classifier_output_features( + output_features, class_labels + ) + + if len(output_features) == 1: + predicted_class_output, pred_cl_type = output_features[0] + score_output = None + elif len(output_features) == 2: + predicted_class_output, pred_cl_type = output_features[0] + score_output, score_output_type = output_features[1] + else: + raise ValueError( + "Provided output classes for a classifier must be " + "a list of features, predicted class and (optionally) class_score." + ) + + spec.description.predictedFeatureName = predicted_class_output + + # Are they out of order? + if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()): + raise ValueError( + "Provided predicted class output type not Int64 or String (%s)." + % repr(pred_cl_type) + ) + + if score_output is not None: + if not isinstance(score_output_type, datatypes.Dictionary): + raise ValueError( + "Provided class score output type not a Dictionary (%s)." + % repr(score_output_type) + ) + + if score_output_type.key_type != pred_cl_type: + raise ValueError( + ( + "Provided class score output (%s) key_type (%s) does not " + "match type of class prediction (%s)." + ) + % (score_output, repr(score_output_type.key_type), repr(pred_cl_type)) + ) + + spec.description.predictedProbabilitiesName = score_output + + # add input + for index, (cur_input_name, input_type) in enumerate(features): + input_ = spec.description.input.add() + input_.name = cur_input_name + datatypes._set_datatype(input_.type, input_type) + + # add output + for index, (cur_output_name, output_type) in enumerate(output_features): + output_ = spec.description.output.add() + output_.name = cur_output_name + datatypes._set_datatype(output_.type, output_type) + + # Add training features + if training_features is not None: + spec = set_training_features(spec, training_features) + + # Worry about the class labels + if pred_cl_type == datatypes.String(): + try: + for c in class_labels: + getattr( + spec, model_accessor_for_class_labels + ).stringClassLabels.vector.append(str(c)) + # Not all the classifiers have class labels; in particular the pipeline + # classifier. Thus it's not an error if we can't actually set them. + except AttributeError: + pass + + else: + for c in class_labels: + conv_error = False + try: + if not (int(c) == c): + conv_error = True + except: + conv_error = True + + if conv_error: + raise TypeError( + ("Cannot cast '%s' class to an int type " % str(c)) + + "(class type determined by type of first class)." + ) + + try: + getattr( + spec, model_accessor_for_class_labels + ).int64ClassLabels.vector.append(int(c)) + # Not all the classifiers have class labels; in particular the pipeline + # classifier. Thus it's not an error if we can't actually set them. + except AttributeError: + break + + # And we are done! + return spec + + +def set_regressor_interface_params( + spec, features, output_features, training_features=None +): + """ + Common utilities to set the regressor interface params. + """ + if output_features is None: + output_features = [("predicted_class", datatypes.Double())] + else: + output_features = _fm.process_or_validate_features(output_features, 1) + + if len(output_features) != 1: + raise ValueError( + "Provided output features for a regressor must be " "one Double feature." + ) + + if output_features[0][1] != datatypes.Double(): + raise ValueError("Output type of a regressor must be a Double.") + + prediction_name = output_features[0][0] + spec.description.predictedFeatureName = prediction_name + + # Normalize the features list. + features = _fm.process_or_validate_features(features) + + # add input and output features + for cur_input_name, feature_type in features: + input_ = spec.description.input.add() + input_.name = cur_input_name + datatypes._set_datatype(input_.type, feature_type) + + # Add training features + if training_features is not None: + spec = set_training_features(spec, training_features) + + output_ = spec.description.output.add() + output_.name = prediction_name + datatypes._set_datatype(output_.type, "Double") + return spec + + +def set_transform_interface_params( + spec, + input_features, + output_features, + are_optional=False, + training_features=None, + array_datatype=Model_pb2.ArrayFeatureType.DOUBLE, +): + """ + Common utilities to set transform interface params. + """ + input_features = _fm.process_or_validate_features(input_features) + output_features = _fm.process_or_validate_features(output_features) + + # Add input and output features + for (fname, ftype) in input_features: + input_ = spec.description.input.add() + input_.name = fname + datatypes._set_datatype(input_.type, ftype, array_datatype=array_datatype) + if are_optional: + input_.type.isOptional = are_optional + + for (fname, ftype) in output_features: + output_ = spec.description.output.add() + output_.name = fname + datatypes._set_datatype(output_.type, ftype, array_datatype=array_datatype) + + # Add training features + if training_features is not None: + spec = set_training_features(spec, training_features) + + return spec + + +def set_training_features(spec, training_features): + for (fname, ftype) in training_features: + training_input_ = spec.description.trainingInput.add() + training_input_.name = fname + if ftype: + datatypes._set_datatype(training_input_.type, ftype) + + return spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/array_feature_extractor.py b/__packaged__/coreml/.python_dependencies/coremltools/models/array_feature_extractor.py new file mode 100644 index 00000000..9363b6cb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/array_feature_extractor.py @@ -0,0 +1,60 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .. import SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from . import datatypes +from ._interface_management import set_transform_interface_params + + +def create_array_feature_extractor( + input_features, output_name, extract_indices, output_type=None +): + """ + Creates a feature extractor from an input array ``(feature, return)``. + + Parameters + ---------- + input_features: + A list of one ``(name, array)`` tuple. + + extract_indices: + Either an integer or a list. + If it's an integer, the output type is by default a double (but may also be an integer). + If a list, the output type is an array. + """ + + # Make sure that our starting stuff is in the proper form. + assert len(input_features) == 1 + assert isinstance(input_features[0][1], datatypes.Array) + + # Create the model. + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + if isinstance(extract_indices, int): + extract_indices = [extract_indices] + if output_type is None: + output_type = datatypes.Double() + + elif isinstance(extract_indices, (list, tuple)): + if not all(isinstance(x, int) for x in extract_indices): + raise TypeError("extract_indices must be an integer or a list of integers.") + + if output_type is None: + output_type = datatypes.Array(len(extract_indices)) + + else: + raise TypeError("extract_indices must be an integer or a list of integers.") + + output_features = [(output_name, output_type)] + + for idx in extract_indices: + assert idx < input_features[0][1].num_elements + spec.arrayFeatureExtractor.extractIndex.append(idx) + + set_transform_interface_params(spec, input_features, output_features) + + return spec diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/datatypes.py b/__packaged__/coreml/.python_dependencies/coremltools/models/datatypes.py new file mode 100644 index 00000000..6656f76a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/datatypes.py @@ -0,0 +1,244 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Basic Data Types. +""" +import numpy as _np + +from ..proto import Model_pb2 + + +class _DatatypeBase: + def __init__(self, type_tag, full_tag, num_elements): + self.type_tag, self.full_tag = type_tag, full_tag + self.num_elements = num_elements + + def __eq__(self, other): + return hasattr(other, "full_tag") and self.full_tag == other.full_tag + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self.full_tag) + + def __repr__(self): + return self.full_tag + + +class Int64(_DatatypeBase): + """ + Int64 Data Type + """ + + def __init__(self): + _DatatypeBase.__init__(self, "Int64", "Int64", 1) + + +class Double(_DatatypeBase): + """ + Double Data Type + """ + + def __init__(self): + _DatatypeBase.__init__(self, "Double", "Double", 1) + + +class String(_DatatypeBase): + """ + String Data Type + """ + + def __init__(self): + _DatatypeBase.__init__(self, "String", "String", 1) + + +class Array(_DatatypeBase): + """ + Array Data Type + """ + + def __init__(self, *dimensions): + """ + Constructs a Array, given its dimensions + + Parameters + ---------- + dimensions: ints | longs + + Examples + -------- + # Create a single dimensions array of length five + >>> arr = coremltools.models.datatypes.Array(5) + + # Create a multi dimension array five by two by ten. + >>> multi_arr = coremltools.models.datatypes.Array(5, 2, 10) + """ + assert len(dimensions) >= 1 + assert all( + isinstance(d, (int, _np.int64, _np.int32)) for d in dimensions + ), "Dimensions must be ints, not {}".format(str(dimensions)) + self.dimensions = dimensions + + num_elements = 1 + for d in self.dimensions: + num_elements *= d + + _DatatypeBase.__init__( + self, + "Array", + "Array({%s})" % (",".join("%d" % d for d in self.dimensions)), + num_elements, + ) + + +class Dictionary(_DatatypeBase): + """ + Dictionary Data Type + """ + + def __init__(self, key_type=None): + """ + Constructs a Dictionary, given its key type + + Parameters + ---------- + key_type: Int64 | String + + Examples + -------- + >>> from coremltools.models.datatypes import Dictionary, Int64, String + + # Create a dictionary with string keys + >>> str_key_dict = Dictionary(key_type=String) + + # Create a dictionary with int keys + >>> int_key_dict = Dictionary(Int64) + """ + # Resolve it to a class if it's + global _simple_type_remap + if key_type in _simple_type_remap: + key_type = _simple_type_remap[key_type] + + if not isinstance(key_type, (Int64, String)): + raise TypeError("Key type for dictionary must be either string or integer.") + + self.key_type = key_type + + _DatatypeBase.__init__( + self, "Dictionary", "Dictionary(%s)" % repr(self.key_type), None + ) + + +_simple_type_remap = { + int: Int64(), + str: String(), + float: Double(), + Double: Double(), + Int64: Int64(), + String: String(), + "Double": Double(), + "Int64": Int64(), + "String": String(), +} + + +def _is_valid_datatype(datatype_instance): + """ + Returns true if datatype_instance is a valid datatype object and false otherwise. + """ + + # Remap so we can still use the python types for the simple cases + global _simple_type_remap + if datatype_instance in _simple_type_remap: + return True + + # Now set the protobuf from this interface. + if isinstance(datatype_instance, (Int64, Double, String, Array)): + return True + + elif isinstance(datatype_instance, Dictionary): + kt = datatype_instance.key_type + + if isinstance(kt, (Int64, String)): + return True + + return False + + +def _normalize_datatype(datatype_instance): + """ + Translates a user specified datatype to an instance of the ones defined above. + + Valid data types are passed through, and the following type specifications + are translated to the proper instances: + + str, "String" -> String() + int, "Int64" -> Int64() + float, "Double" -> Double() + + If a data type is not recognized, then an error is raised. + """ + global _simple_type_remap + if datatype_instance in _simple_type_remap: + return _simple_type_remap[datatype_instance] + + # Now set the protobuf from this interface. + if isinstance(datatype_instance, (Int64, Double, String, Array)): + return datatype_instance + + elif isinstance(datatype_instance, Dictionary): + kt = datatype_instance.key_type + + if isinstance(kt, (Int64, String)): + return datatype_instance + + raise ValueError("Datatype instance not recognized.") + + +def _set_datatype( + proto_type_obj, datatype_instance, array_datatype=Model_pb2.ArrayFeatureType.DOUBLE +): + # Remap so we can still use the python types for the simple cases + global _simple_type_remap + if datatype_instance in _simple_type_remap: + datatype_instance = _simple_type_remap[datatype_instance] + + # Now set the protobuf from this interface. + if isinstance(datatype_instance, Int64): + proto_type_obj.int64Type.MergeFromString(b"") + + elif isinstance(datatype_instance, Double): + proto_type_obj.doubleType.MergeFromString(b"") + + elif isinstance(datatype_instance, String): + proto_type_obj.stringType.MergeFromString(b"") + + elif isinstance(datatype_instance, Array): + proto_type_obj.multiArrayType.MergeFromString(b"") + proto_type_obj.multiArrayType.dataType = array_datatype + + for n in datatype_instance.dimensions: + proto_type_obj.multiArrayType.shape.append(n) + + elif isinstance(datatype_instance, Dictionary): + proto_type_obj.dictionaryType.MergeFromString(b"") + + kt = datatype_instance.key_type + + if isinstance(kt, Int64): + proto_type_obj.dictionaryType.int64KeyType.MergeFromString(b"") + elif isinstance(kt, String): + proto_type_obj.dictionaryType.stringKeyType.MergeFromString(b"") + else: + raise ValueError("Dictionary key type must be either string or int.") + + else: + raise TypeError( + "Datatype parameter not recognized; must be an instance " + "of datatypes.{Double, Int64, String, Dictionary, Array}, or " + "python int, float, or str types." + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/feature_vectorizer.py b/__packaged__/coreml/.python_dependencies/coremltools/models/feature_vectorizer.py new file mode 100644 index 00000000..41f34ba6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/feature_vectorizer.py @@ -0,0 +1,98 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .. import SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from . import datatypes +from ._feature_management import (is_valid_feature_list, + process_or_validate_features) +from ._interface_management import set_transform_interface_params + + +def create_feature_vectorizer(input_features, output_feature_name, known_size_map={}): + """ + Create a feature vectorizer from input features. This returns a 2-tuple + ``(spec, num_dimension)`` for a feature vectorizer that puts everything into a + single array with a length equal to the total size of all the input features. + + Parameters + ---------- + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of ``('name', datatype)`` + tuples. The datatypes entry is one of the data types defined in the + ``datatypes`` module. Allowed ``datatypes`` are ``datatype.Int64``, + ``datatype.Double``, ``datatypes.Dictionary``, and ``datatype.Array``. + + If the feature is a dictionary type, then the dictionary must have integer + keys, and the number of dimensions to expand it into must be provided by + ``known_size_map``. + + Feature indices in the final array are counted sequentially from the + from 0 through the total number of features. + + + output_feature_name: str + The name of the output feature. The type is an Array + List of the output features of the network. + + known_size_map: + A dictionary mapping the feature name to the expanded size in the final + array. This is most useful for specifying the size of sparse vectors + given as dictionaries of index to value. + + """ + + spec = _Model_pb2.Model() + spec.specificationVersion = SPECIFICATION_VERSION + + input_features = process_or_validate_features(input_features) + + feature_vectorizer = spec.featureVectorizer + + num_output_dimensions = 0 + + for n, ft in input_features: + if n in known_size_map: + dim = known_size_map[n] + + if ft.num_elements is not None: + if dim != ft.num_elements: + raise ValueError( + "In feature {}, override size {} not compatible with inherent " + "value size {}.".format(n, dim, ft.num_elements) + ) + else: + if ft.num_elements is None: + raise ValueError( + "In feature {}, inherent size unknown so must be manually supplied.".format( + n + ) + ) + dim = ft.num_elements + + num_output_dimensions += dim + + new_feature = feature_vectorizer.inputList.add() + new_feature.inputColumn = n + new_feature.inputDimensions = dim + + if not isinstance(output_feature_name, str): + if ( + is_valid_feature_list(output_feature_name) + and len(output_feature_name) == 1 + and output_feature_name[0][1] == datatypes.Array(num_output_dimensions) + ): + + output_feature_name = output_feature_name[0][0] + + else: + raise TypeError( + "Output feature must be specified as a feature name or correct output feature list." + ) + + output_features = [(output_feature_name, datatypes.Array(num_output_dimensions))] + set_transform_interface_params(spec, input_features, output_features) + + return spec, num_output_dimensions diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/__init__.py new file mode 100644 index 00000000..9c0d8b44 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import compression_utils \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/compression_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/compression_utils.py new file mode 100644 index 00000000..ab63fc6f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/ml_program/compression_utils.py @@ -0,0 +1,609 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +from coremltools import _SPECIFICATION_VERSION_IOS_16 +from coremltools.converters.mil import Operation as _Operation +from coremltools.converters.mil.converter import mil_convert as _mil_convert +from coremltools.converters.mil.frontend.milproto.load import load as _milproto_to_pymil +from coremltools.converters.mil.mil.passes.defs.quantization import ( + AbstractQuantizationPass as _AbstractQuantizationPass, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightAffineQuantizer as _WeightAffineQuantizer, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightDecompressor as _WeightDecompressor, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightPalettizer as _WeightPalettizer, +) +from coremltools.converters.mil.mil.passes.defs.quantization import ( + WeightSparsifier as _WeightSparsifier, +) + +_DEFAULT_MIN_WEIGHT_SIZE_TO_COMPRESS = 2048 +_DEFAULT_SPECIFICATION_VERSION_FOR_COMPRESSION = _SPECIFICATION_VERSION_IOS_16 + + +def _default_op_selector(const_op): + if not isinstance(const_op, _Operation) or const_op.op_type != "const": + raise ValueError("Input of the op_selector must be type of const Operation, got {}.".format(type(const_op))) + return const_op.val.val.size > _DEFAULT_MIN_WEIGHT_SIZE_TO_COMPRESS + +def _apply_graph_pass(mlmodel, graph_pass): + # Utility function which compresses a coreml model + # convert the fully precision mlmodel into pymil program + model_spec = mlmodel.get_spec() + model_type = model_spec.WhichOneof("Type") + if model_type in ("neuralNetwork", "neuralNetworkClassifier", "neuralNetworkRegressor", "pipeline", "PipelineClassifier", "PipelineRegressor"): + msg = ("coremltools.compression_utils are meant to be used only with mlprogram typed coreml models. " + "This model has type {}. Please use coremltools.models.neural_network.quantization_utils.quantize_weights" + "instead to compress the weights of the model.") + raise TypeError(msg.format(model_type)) + elif model_type == "mlProgram": + pass + else: + raise TypeError("weight compression not applicable for model type {}".format(model_type)) + + assert isinstance(graph_pass, _AbstractQuantizationPass), "compression pass must be an AbstractQuantizationPass instance" + specification_version = max(model_spec.specificationVersion, _DEFAULT_SPECIFICATION_VERSION_FOR_COMPRESSION) + prog = _milproto_to_pymil( + model_spec=model_spec, + specification_version=specification_version, + file_weights_dir=mlmodel.weights_dir, + ) + + # apply compression graph pass + graph_pass.apply(prog) + + # convert the pymil program back to mlmodel + compressed_mlmodel = _mil_convert( + prog, + convert_to="mlprogram", + convert_from="milinternal", + specification_version=specification_version, + compute_units=mlmodel.compute_unit, + model_description=model_spec.description, + ) + return compressed_mlmodel + +def affine_quantize_weights(mlmodel, mode="linear_symmetric", op_selector=None, dtype=_np.int8): + """ + Utility function to convert a float precision MLModel of type ``mlprogram`` that uses + float-precision weights into a compressed MLModel that uses 8-bit weights. This is + achieved by converting the float weight values that are stored in the ``const`` op + into the ``constexpr_affine_dequantize`` op. + + This function uses affine quantization on the float weights, providing up to 2x + savings in storage compared to float 16, or up to 4x savings compared to float 32. + All computation at runtime uses float precision; the precision of the intermediate + tensors and the compute precision of the ops are not altered. + + For each weight, this utility function converts the weight into the int8 or uint8 type using + either `Linear interpolation` (``"linear"`` mode) or `Linear symmetric + interpolation` (``"linear_symmetric"`` mode, the default). + + **Linear interpolation** + + Linear interpolation (``"linear"`` mode) maps the min/max of the float + range to the 8-bit integer range ``[low, high]`` using a zero point (also called quantization bias, or + offset) and a scale factor. For the int8 quantization, ``[low, high] = [-128, 127]``, while uint8 + quantization uses range ``[0, 255]``. + + ``"linear"`` mode uses the quantization formula: + + .. math:: + w_r = s * (w_q - z) + + Where: + + * :math:`w_r` and :math:`s` are of type float. + * :math:`w_r`` represents the float precision weight. + * :math:`s` represents the scale. + * :math:`w_q` and :math:`z` are of type 8-bit integer. + * :math:`w_q` represents quantized weight. + * :math:`z` represents the zero point. + + Quantized weights are computed as follows: + + .. math:: + w_q = cast\_to\_8\_bit\_integer(w_r / s + cast\_to\_float(z)) + + Note: :math:`cast\_to\_8\_bit\_integer` is the process of clipping the input to range ``[low, high]`` followed by rounding and casting to 8-bit integer. + + In ``"linear"`` mode, ``s, z`` are computed by mapping the original float range + ``[A, B]`` into the 8-bit integer range ``[-128, 127]`` or ``[0, 255]``. That is, you are solving the + following linear equations: + + * ``B = s * (high - z)`` + * ``A = s * (low - z)`` + + The equations result in the following: + + * ``s = (B - A) / (high - low)`` + * ``z = cast_to_8_bit_integer((low * B - high * A) / (B - A))`` + + When the rank of weight ``w`` is 1, then ``s`` and ``z`` are both scalars. When the + rank of the weight is greater than 1, then ``s`` and ``z`` are both vectors. In that + case, scales are computed per `channel`, in which `channel` is the output dimension, + which corresponds to the first dimension for ops such as ``conv`` and ``linear``, and + the second dimension for the ``conv_transpose`` op. + + For ``"linear"`` mode, :math:`A = min(w_r)`, :math:`B = max(w_r)`. + + **Linear symmetric interpolation** + + With linear symmetric interpolation (``"linear_symmetric"`` mode, the default), rather than + mapping the exact min/max of the float range to the quantized range, + + the function chooses the maximum absolute value between the min/max, which results in a + floating-point range that is symmetric with respect to zero. This also makes the resulting zero + point ``0`` for int8 weight and ``127`` for uint8 weight. + + For ``"linear_symmetric"`` mode: + + * :math:`A = -R` and :math:`B = R`, where :math:`R = max(abs(w_r))`. + * This function maps to the range of ``[-127, 127]`` for int8 weight and ``[0, 254]`` for uint8 weight. + * The result is ``s=(B-A)/254`` -> ``s=2R/254`` -> ``s=R/127``. + * Solving for ``z``: + * int8: ``z = (-127 * R + 127 * R)/2R`` -> ``z=0``. + * uint8: ``z = (0 * R + 254 * R)/2R`` -> ``z=127``. + + Parameters + ---------- + mlmodel: MLModel + Model to be quantized. This MLModel should be of type ``mlprogram``. + + mode: str + Mode for linear quantization: + + * ``"linear_symmetric"`` (default): Input data are quantized in the range + ``[-R, R]``, where :math:`R = max(abs(w_r))`. + * ``"linear"``: Input data are quantized in the range + :math:`[min(w_r), max(w_r)]`. + + op_selector: callable + This function takes a single parameter with type ``coremltools.converters.mil.Const``; + that is, a ``const`` operation. It returns a ``bool``: ``True`` to compress ``const_op``, + otherwise ``False``. See the following examples: + + * All constants in the network are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return True + + * Only the constant with ``tensor.size > 2048`` is compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + * Compress the constant if it is the weight of a convolution layer + and ``tensor.size > 2048``: + + .. sourcecode:: python + + def op_selector(const_op): + return ( + const_op.val.val.size > 2048 + and const_op.val.child_ops[0].op_type == "conv" + and const_op.val == const_op.val.child_ops[0].weight + ) + + * When creating a custom ``op_selector`` function, the following attributes are helpful: + + * ``const_op.val.val``: The numpy array holding the value of the const. + * ``const_op.val.child_ops``: A list of ops into which this constant is feeding. + * ``const_op.val.child_ops[i].op_type``: The string corresponding to the op type + of the i-th child op. + * ``const_op.val.child_ops[i].name``: The string corresponding to the name the + i-th child op. + + * If ``op_selector`` is not provided, it will be set to the behavior in which + weights bigger than 2048 elements are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + dtype: np.generic or mil.type type + Determines the quantizaed data type (int8/uint8). + + * The allowed values are: + * ``np.int8`` (the default) + * ``np.uint8`` + * ``coremltools.converters.mil.mil.types.int8`` + * ``coremltools.converters.mil.mil.types.uint8`` + + Returns + ------- + + model: MLModel + The quantized MLModel instance. + + Examples + -------- + + import coremltools as ct + model = ct.models.MLModel('my_model.mlpackage') + compressed_model = ct.compression_utils.affine_quantize_weights(model, mode="linear_symmetric") + + """ + if op_selector is None: + op_selector = _default_op_selector + affine_weight_quantizer = _WeightAffineQuantizer(fake_compression=False, mode=mode, op_selector=op_selector, dtype=dtype) + return _apply_graph_pass(mlmodel, affine_weight_quantizer) + + +def palettize_weights(mlmodel, nbits=None, mode="kmeans", op_selector=None, lut_function=None): + """ + Utility function to convert a float precision MLModel of type ``mlprogram`` to a + compressed MLModel by reducing the overall number of weights using a lookup table + (LUT). A LUT contains a list of float values. An `nbit` LUT has 2\ :sup:`nbits` entries. + + For example, a float weight vector such as ``{0.3, 0.3, 0.5, 0.5}`` can be compressed + using a 1-bit LUT: ``{0.3, 0.5}``. In this case the float vector can be replaced + with a 1-bit vector ``{0, 0, 1, 1}``. + + This function iterates over all the weights in the ``mlprogram``, discretizes its values, + and constructs the LUT according to the algorithm specified in ``mode``. The float + values are then converted to the `nbit` values, and the LUT is saved alongside each + weight. The ``const`` ops storing weight values are replaced by + ``constexpr_lut_to_dense`` ops. + + At runtime, the LUT and the `nbit` values are used to reconstruct the float weight + values, which are then used to perform the float operaton the weight is feeding into. + + Consider the following example of ``"uniform"`` mode (a linear histogram): + + * ``nbits = 4`` + * ``mode = "uniform"`` + * ``weight = [0.11, 0.19, 0.3, 0.08, 0.0, 0.02]`` + + The weight can be converted to a palette with indices ``[0, 1, 2, 3]`` (2 bits). The + indices are a byte array. + + The data range ``[0.0, 0.3]`` is divided into 4 partitions linearly, which is + ``[0.0, 0.1, 0.2, 0.3]``. + + * The LUT would be ``[0.0, 0.1, 0.2, 0.3]``. + + * The weight is rounded to ``[0.1, 0.2, 0.3, 0.1, 0.0, 0.0]``, and represented in + the palette as indices ``[01b, 10b, 11b, 01b, 00b, 00b]``. + + Parameters + ---------- + mlmodel: MLModel + Model to be converted by a LUT. This MLModel should be of type ``mlprogram``. + + nbits: int + Number of bits per weight. Required for ``kmeans`` or ``uniform`` mode, but must + not be set for ``unique`` or ``custom`` mode. A LUT would have + 2\ :sup:`nbits` entries, where `nbits` can be ``{1, 2, 4, 6, 8}``. + + mode: str + Determine how the LUT is constructed by specifying one of the following: + + * ``"kmeans"`` (default): The LUT is generated by `k-means clustering`, a method of vector + quantization that groups similar data points together to discover underlying + patterns by using a fixed number (`k`) of clusters in a dataset. A cluster + refers to a collection of data points aggregated together because of certain + similarities. `nbits` is required. + + * ``"uniform"``: The LUT is generated by a linear histogram. + + - ``[v_min, v_min + scale, v_min + 2 * scale, ..., v_max]`` + - Where the weight is in the range ``[v_min, v_max]``, and + ``scale = (v_max - v_min) / (1 << nbits - 1)``. + - ``nbits`` is required. + + A `histogram` is a representation of the distribution of a continuous variable, + in which the entire range of values is divided into a series of intervals (or + `bins`) and the representation displays how many values fall into each bin. + Linear histograms have one bin at even intervals, such as one bin per integer. + + * ``"unique"``: The LUT is generated by unique values in the weights. The weights + are assumed to be on a discrete lattice but stored in a float data type. This + parameter identifies the weights and converts them into the palettized representation. + + Do not provide ``nbits`` for this mode. ``nbits`` is picked up automatically, + with the smallest possible value in ``{1, 2, 4, 6, 8}`` such that the + number of the unique values is ``<= (1 << nbits)``. If the weight has ``> 256`` + unique values, the compression is skipped. + + For example: + + * If the weights are ``{0.1, 0.2, 0.3, 0.4}`` and ``nbits=2``, the weights are + converted to ``{00b, 01b, 10b, 11b}``, and the generated LUT is + ``[0.1, 0.2, 0.3, 0.4]``. + * If the weights are ``{0.1, 0.2, 0.3, 0.4}`` and ``nbits=1``, nothing happens + because the weights are not a 1-bit lattice. + * If the weights are ``{0.1, 0.2, 0.3, 0.4, 0.5}`` and ``nbits=2``, nothing + happens because the weights are not a 2-bit lattice. + + * ``"custom"``: The LUT and palettization parameters are calculated using a custom + function. If this mode is selected then ``lut_function`` must be provided. + + Do not provide ``nbits`` for this mode. The user should customize ``nbits`` in the + ``lut_function`` implementation. + + op_selector: callable + This function takes a single parameter with type ``coremltools.converters.mil.Operation``. + It returns a ``bool``: ``True`` to compress ``const_op``, otherwise ``False``. + See the following examples: + + * All constants in the network are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return True + + * Only the constant with ``tensor.size > 2048`` is compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + * Compress the constant if it is the weight of a convolution layer + and ``tensor.size > 2048``: + + .. sourcecode:: python + + def op_selector(const_op): + return ( + const_op.val.val.size > 2048 + and const_op.val.child_ops[0].op_type == "conv" + and const_op.val == const_op.val.child_ops[0].weight + ) + + * When creating a custom ``op_selector`` function, the following attributes are helpful: + + * ``const_op.val.val``: The numpy array holding the value of the const. + * ``const_op.val.child_ops``: A list of ops into which this constant is feeding. + * ``const_op.val.child_ops[i].op_type``: The string corresponding to the op type + of the i-th child op. + * ``const_op.val.child_ops[i].name``: The string corresponding to the name the + i-th child op. + + * If ``op_selector`` is not provided, it will be set to the behavior in which + weights bigger than 2048 elements are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + lut_function: callable + A callable function which computes the weight palettization parameters. This must + be provided if the mode is set to ``"custom"``. + + weight: np.ndarray + A float precision numpy array. + + Returns: lut: list[float] + The lookup table. + + indices: list[int] + A list of indices for each element. + + The following is an example that extract the ``top_k`` elements as the LUT. Given + that ``weight = [0.1, 0.5, 0.3, 0.3, 0.5, 0.6, 0.7]``, the ``lut_function`` + produces ``lut = [0, 0.5, 0.6, 0.7], indices = [0, 1, 0, 0, 2, 3]``. + + .. sourcecode:: python + + def lut_function(weight): + # In this example, we assume elements in the weights >= 0 + weight = weight.flatten() + nbits = 4 + + # Get the LUT, from extracting top k maximum unique elements in the weight to be the LUT + # Note that k = 1 << nbits - 1, so we have the first element be 0 + unique_elements = np.unique(weight) + k = (1 << nbits) - 1 + top_k = np.partition(weight, -k)[-k:] + np.sort(top_k) + lut = [0.0] + top_k.tolist() + + # Compute the indices + mapping = {v: idx for idx, v in enumerate(lut)} + indices = [mapping[v] if v in mapping else 0 for v in weight] + + return lut, indices + + Returns + ------- + model: MLModel + The palettized MLModel instance. + + Examples + -------- + + .. sourcecode:: python + + import coremltools as ct + + model = ct.models.MLModel("my_model.mlpackage") + compressed_model = ct.compression_utils.palettize_weights(model, mode="kmeans", nbits=4) + + + """ + if op_selector is None: + op_selector = _default_op_selector + weight_palettizer = _WeightPalettizer(nbits=nbits, fake_compression=False, op_selector=op_selector, mode=mode, lut_function=lut_function) + return _apply_graph_pass(mlmodel, weight_palettizer) + + +def sparsify_weights(mlmodel, mode="threshold_based", threshold=1e-3, target_percentile=1.0, op_selector=None): + """ + Utility function to convert a float precision MLModel of type ``mlprogram`` to a + compressed MLModel using sparse representation. The ``const`` ops storing weight + values are replaced by ``constexpr_sparse_to_dense`` ops. + + This function is useful if the model is trained with pruning techniques so that + a lot of weights have zero values. If a large percentage of weight values are zero, + a sparse representation is more efficient than a dense one (the default). + + The sparsified weights are stored in a bit mask. If the weight values are + ``{0, 0, 0, 0, 0, 0, 0, 56.3}``, its sparse representation contains a bit mask with + ones on locations where the value is non-zero: ``00000001b``. This is accompanied by + non-zero data, which is a size-1 vector of value ``{56.3}``. + + For example, given the following: + + * ``weight = [0.3, 0, 0, 0.5, 0, 0]`` + * ``non_zero_data, bit_mask = sparsify(weight)`` + + The indices of the non-zero elements are: + + * ``non_zero_data = [0.3, 0.5]`` + * ``bit_mask = "100100"`` + + Parameters + ---------- + mlmodel: MLModel + Model to be sparsified. This MLModel should be of type ``mlprogram``. + + mode: str + Determine the scheme to sparsify the model by specifying one of the following: + + * ``"threshold_based"`` (default): All the absolute weight values that are smaller + than ``threshold`` are changed to 0, and the tensor is stored in a sparse format. + For example, given the following: + + * ``weight = [0.3, -0.2, -0.01, 0.05]`` + * ``threshold = 0.03`` + + The sparsified weight would be ``[0.3, -0.2, 0, 0.05]``. + + * ``"percentile_based"``: Sparsify the weight with a constant sparsity percentile, + which is ``target_percentile``. Where + ``n = floor(size_of_weight_tensor * target_percentile)``, the ``n`` lowest + absolute weight values are changed to 0. For example, given the following: + + * ``weight = [0.3, -0.2, -0.01, 0.05]`` + * ``target_percentile = 0.75`` + + The sparsified weight would be ``[0.3, 0, 0, 0]``. + + threshold: float + Required when ``mode = "prune_threshold"``. The absolute threshold to sparsify the weight. + + target_percentile: float + Required when ``mode = "percentile_based"``. The percentage of sparsity for + compression, which needs to be in the range [0, 1]. When 0, no sparsification + occurs. For 1, all weights become 0. + + op_selector: callable + This function takes a single parameter with type ``coremltools.converters.mil.Operation``. + It returns a ``bool``: ``True`` to compress ``const_op``, otherwise ``False``. + See the following examples: + + * All constants in the network are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return True + + * Only the constant with ``tensor.size > 2048`` is compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + * Compress the constant if it is the weight of a convolution layer + and ``tensor.size > 2048``: + + .. sourcecode:: python + + def op_selector(const_op): + return ( + const_op.val.val.size > 2048 + and const_op.val.child_ops[0].op_type == "conv" + and const_op.val == const_op.val.child_ops[0].weight + ) + + * When creating a custom ``op_selector`` function, the following attributes are helpful: + + * ``const_op.val.val``: The numpy array holding the value of the const. + * ``const_op.val.child_ops``: A list of ops into which this constant is feeding. + * ``const_op.val.child_ops[i].op_type``: The string corresponding to the op type + of the i-th child op. + * ``const_op.val.child_ops[i].name``: The string corresponding to the name the + i-th child op. + + * If ``op_selector`` is not provided, it will be set to the behavior in which + weights bigger than 2048 elements are compressed: + + .. sourcecode:: python + + def op_selector(const_op): + return const_op.val.val.size > 2048 + + Returns + ------- + model: MLModel + The sparse MLModel instance. + + Examples + -------- + .. sourcecode:: python + + import coremltools as ct + + model = ct.models.MLModel("my_model.mlpackage") + compressed_model = ct.compression_utils.sparsify_weights( + model, mode="threshold_based", threshold=0.01 + ) + + """ + if op_selector is None: + op_selector = _default_op_selector + weight_sparsifier = _WeightSparsifier(mode=mode, threshold=threshold, target_percentile=target_percentile, op_selector=op_selector) + return _apply_graph_pass(mlmodel, weight_sparsifier) + +def decompress_weights(mlmodel): + """ + Utility function to convert weights that are sparse or palettized or affine quantized, back to the float format. + That is, convert any of the follwing three ops: + + (1) constexpr_affine_dequantize + (2) constexpr_lut_to_dense + (3) constexpr_sparse_to_dense + + to mb.const + + Parameters + ---------- + mlmodel: MLModel + Model which will be decompressed. + + Returns + ------- + model: MLModel + The MLModel with no constexpr ops included. + + Examples + -------- + .. sourcecode:: python + + import coremltools as ct + + model = ct.models.MLModel("my_compressed_model.mlpackage") + decompressed_model = ct.compression_utils.decompress_weights(model) + + """ + weight_decompressor = _WeightDecompressor(op_selector=lambda op: True) + return _apply_graph_pass(mlmodel, weight_decompressor) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/model.py b/__packaged__/coreml/.python_dependencies/coremltools/models/model.py new file mode 100644 index 00000000..ac7f1a11 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/model.py @@ -0,0 +1,670 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import atexit as _atexit +import os as _os +import shutil as _shutil +import tempfile as _tempfile +import warnings as _warnings +from copy import deepcopy as _deepcopy + +import numpy as _np +import numpy as _numpy + +from coremltools import ComputeUnit as _ComputeUnit +from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH +from coremltools.converters.mil.mil.program import Program as _Program + +from ..proto import FeatureTypes_pb2 as _ft +from ..proto import MIL_pb2 as _MIL_pb2 +from ..proto import Model_pb2 as _Model_pb2 +from .utils import (_MLMODEL_EXTENSION, _MLPACKAGE_AUTHOR_NAME, + _MLPACKAGE_EXTENSION, _WEIGHTS_DIR_NAME, _create_mlpackage, + _has_custom_layer, _is_macos, _macos_version, + load_spec as _load_spec, save_spec as _save_spec, + ) + +if _HAS_TORCH: + import torch as _torch + +if _HAS_TF_1 or _HAS_TF_2: + import tensorflow as _tf + + +try: + from ..libmodelpackage import ModelPackage as _ModelPackage +except: + _ModelPackage = None + +_HAS_PIL = True +try: + from PIL import Image as _PIL_IMAGE +except: + _HAS_PIL = False + + +_MLMODEL_FULL_PRECISION = "float32" +_MLMODEL_HALF_PRECISION = "float16" +_MLMODEL_QUANTIZED = "quantized_model" + +_VALID_MLMODEL_PRECISION_TYPES = [ + _MLMODEL_FULL_PRECISION, + _MLMODEL_HALF_PRECISION, + _MLMODEL_QUANTIZED, +] + +# Linear quantization +_QUANTIZATION_MODE_LINEAR_QUANTIZATION = "_linear_quantization" +# Linear quantization represented as a lookup table +_QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR = "_lookup_table_quantization_linear" +# Lookup table quantization generated by K-Means +_QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS = "_lookup_table_quantization_kmeans" +# Custom lookup table quantization +_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE = "_lookup_table_quantization_custom" +# Dequantization +_QUANTIZATION_MODE_DEQUANTIZE = "_dequantize_network" # used for testing +# Symmetric linear quantization +_QUANTIZATION_MODE_LINEAR_SYMMETRIC = "_linear_quantization_symmetric" + +_SUPPORTED_QUANTIZATION_MODES = [ + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + _QUANTIZATION_MODE_DEQUANTIZE, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, +] + +_LUT_BASED_QUANTIZATION = [ + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, +] + +_METADATA_VERSION = "com.github.apple.coremltools.version" +_METADATA_SOURCE = "com.github.apple.coremltools.source" + + + +class _FeatureDescription: + def __init__(self, fd_spec): + self._fd_spec = fd_spec + + def __repr__(self): + return "Features(%s)" % ",".join(map(lambda x: x.name, self._fd_spec)) + + def __len__(self): + return len(self._fd_spec) + + def __getitem__(self, key): + for f in self._fd_spec: + if key == f.name: + return f.shortDescription + raise KeyError("No feature with name %s." % key) + + def __contains__(self, key): + for f in self._fd_spec: + if key == f.name: + return True + return False + + def __setitem__(self, key, value): + for f in self._fd_spec: + if key == f.name: + f.shortDescription = value + return + raise AttributeError("No feature with name %s." % key) + + def __iter__(self): + for f in self._fd_spec: + yield f.name + + +def _get_proxy_and_spec(filename, compute_units, skip_model_load=False): + try: + from ..libcoremlpython import _MLModelProxy + except Exception: + _MLModelProxy = None + + filename = _os.path.expanduser(filename) + specification = _load_spec(filename) + + if _MLModelProxy and not skip_model_load: + + # check if the version is supported + engine_version = _MLModelProxy.maximum_supported_specification_version() + if specification.specificationVersion > engine_version: + # in this case the specification is a newer kind of .mlmodel than this + # version of the engine can support so we'll not try to have a proxy object + return None, specification, None + + try: + return _MLModelProxy(filename, compute_units.name), specification, None + except RuntimeError as e: + _warnings.warn( + "You will not be able to run predict() on this Core ML model." + + " Underlying exception message was: " + + str(e), + RuntimeWarning, + ) + return None, specification, e + + return None, specification, None + + +def _try_get_weights_dir_path(mlpackage_path): + """ + Try to find the weights in mlpackage and return the path to the weights directory if found. + Return None if not found. + :param mlpackage_path: str, path to the mlpackage directory + :return: path to the weights directory inside the mlpackage directory + """ + weights_dir = None + try: + if _ModelPackage.isValid(mlpackage_path): + item_info = _ModelPackage(mlpackage_path).findItemByNameAuthor(_WEIGHTS_DIR_NAME, _MLPACKAGE_AUTHOR_NAME) + if item_info is not None: + weights_dir = item_info.path() + except: + pass + return weights_dir + + +class MLModel: + """ + This class defines the minimal interface to a CoreML object in Python. + + At a high level, the protobuf specification consists of: + + - Model description: Encodes names and type information of the inputs and outputs to the model. + - Model parameters: The set of parameters required to represent a specific instance of the model. + - Metadata: Information about the origin, license, and author of the model. + + With this class, you can inspect a CoreML model, modify metadata, and make + predictions for the purposes of testing (on select platforms). + + Examples + -------- + .. sourcecode:: python + + # Load the model + model = MLModel("HousePricer.mlmodel") + + # Set the model metadata + model.author = "Author" + model.license = "BSD" + model.short_description = "Predicts the price of a house in the Seattle area." + + # Get the interface to the model + model.input_description + model.output_description + + # Set feature descriptions manually + model.input_description["bedroom"] = "Number of bedrooms" + model.input_description["bathrooms"] = "Number of bathrooms" + model.input_description["size"] = "Size (in square feet)" + + # Set + model.output_description["price"] = "Price of the house" + + # Make predictions + predictions = model.predict({"bedroom": 1.0, "bath": 1.0, "size": 1240}) + + # Get the spec of the model + spec = model.get_spec() + + # Save the model + model.save("HousePricer.mlpackage") + + # Load the model from the spec object + spec = model.get_spec() + # modify spec (e.g. rename inputs/ouputs etc) + model = MLModel(spec) + # if model type is mlprogram, i.e. spec.WhichOneof('Type') == "mlProgram", then: + model = MLModel(spec, weights_dir=model.weights_dir) + + See Also + -------- + predict + """ + + def __init__( + self, + model, + is_temp_package=False, + mil_program=None, + skip_model_load=False, + compute_units=_ComputeUnit.ALL, + weights_dir=None, + ): + """ + Construct an MLModel from an ``.mlmodel``. + + Parameters + ---------- + model: str or Model_pb2 + + For MLProgram, the model can be a path string (``.mlpackage``) or ``Model_pb2``. + If its a path string, it must point to a directory containing bundle + artifacts (such as ``weights.bin``). + If it is of type ``Model_pb2`` (spec), then ``weights_dir`` must also be provided, if the model + has weights, since to initialize and load the model, both the proto spec and the weights are + required. Proto spec for an MLProgram, unlike the NeuralNetwork, does not contain the weights, + they are stored separately. If the model does not have weights, an empty weights_dir can be provided. + + For non mlprogram model types, the model can be a path string (``.mlmodel``) or type ``Model_pb2``, + i.e. a spec object. + + is_temp_package: bool + Set to True if the input model package dir is temporary and can be deleted upon interpreter termination. + + mil_program: coremltools.converters.mil.Program + Set to the MIL program object, if available. + It is available whenever an MLModel object is constructed using + the unified converter API `coremltools.convert() `_. + + skip_model_load: bool + Set to True to prevent coremltools from calling into the Core ML framework + to compile and load the model. In that case, the returned model object cannot + be used to make a prediction. This flag may be used to load a newer model + type on an older Mac, to inspect or load/save the spec. + + Example: Loading an ML Program model type on a macOS 11, since an ML Program can be + compiled and loaded only from macOS12+. + + Defaults to False. + + compute_units: coremltools.ComputeUnit + An enum with three possible values: + - ``coremltools.ComputeUnit.ALL``: Use all compute units available, including the + neural engine. + - ``coremltools.ComputeUnit.CPU_ONLY``: Limit the model to only use the CPU. + - ``coremltools.ComputeUnit.CPU_AND_GPU``: Use both the CPU and GPU, + but not the neural engine. + + weights_dir: str + Path to the weight directory, required when loading an MLModel of type mlprogram, + from a spec object, i.e. when the argument ``model`` is of type ``Model_pb2`` + + Notes + ----- + Internally this maintains the following: + + - ``_MLModelProxy``: A pybind wrapper around + CoreML::Python::Model (see + `coremltools/coremlpython/CoreMLPython.mm `_) + + - ``package_path`` (mlprogram only): Directory containing all artifacts (``.mlmodel``, + weights, and so on). + + - ``weights_dir`` (mlprogram only): Directory containing weights inside the package_path. + + Examples + -------- + loaded_model = MLModel('my_model.mlmodel') + loaded_model = MLModel("my_model.mlpackage") + """ + + def cleanup(package_path): + if _os.path.exists(package_path): + _shutil.rmtree(package_path) + + if not isinstance(compute_units, _ComputeUnit): + raise TypeError('"compute_units" parameter must be of type: coremltools.ComputeUnit') + elif (compute_units == _ComputeUnit.CPU_AND_NE + and _is_macos() + and _macos_version() < (13, 0) + ): + raise ValueError( + 'coremltools.ComputeUnit.CPU_AND_NE is only available on macOS >= 13.0' + ) + self.compute_unit = compute_units + + self.is_package = False + self.is_temp_package = False + self.package_path = None + self._weights_dir = None + if mil_program is not None and not isinstance(mil_program, _Program): + raise ValueError('"mil_program" must be of type "coremltools.converters.mil.Program"') + self._mil_program = mil_program + + if isinstance(model, str): + model = _os.path.abspath(_os.path.expanduser(_os.path.expandvars(model))) + if _os.path.isdir(model): + self.is_package = True + self.package_path = model + self.is_temp_package = is_temp_package + self._weights_dir = _try_get_weights_dir_path(model) + self.__proxy__, self._spec, self._framework_error = _get_proxy_and_spec( + model, compute_units, skip_model_load=skip_model_load, + ) + elif isinstance(model, _Model_pb2.Model): + model_type = model.WhichOneof('Type') + if model_type in ("mlProgram", 'pipelineClassifier', 'pipelineRegressor', 'pipeline'): + if model_type == "mlProgram" and weights_dir is None: + raise Exception('MLModel of type mlProgram cannot be loaded just from the model spec object. ' + 'It also needs the path to the weights file. Please provide that as well, ' + 'using the \'weights_dir\' argument.') + self.is_package = True + self.is_temp_package = True + filename = _create_mlpackage(model, weights_dir) + self.package_path = filename + self._weights_dir = _try_get_weights_dir_path(filename) + else: + filename = _tempfile.mktemp(suffix=_MLMODEL_EXTENSION) + _save_spec(model, filename) + + self.__proxy__, self._spec, self._framework_error = _get_proxy_and_spec( + filename, compute_units, skip_model_load=skip_model_load, + ) + try: + _os.remove(filename) + except OSError: + pass + else: + raise TypeError( + "Expected model to be a .mlmodel file, .mlpackage file or a Model_pb2 object" + ) + + self._input_description = _FeatureDescription(self._spec.description.input) + self._output_description = _FeatureDescription(self._spec.description.output) + + if self.is_package and self.is_temp_package: + _atexit.register(cleanup, self.package_path) + + @property + def short_description(self): + return self._spec.description.metadata.shortDescription + + @short_description.setter + def short_description(self, short_description): + self._spec.description.metadata.shortDescription = short_description + + @property + def input_description(self): + return self._input_description + + @property + def output_description(self): + return self._output_description + + @property + def user_defined_metadata(self): + return self._spec.description.metadata.userDefined + + @property + def author(self): + return self._spec.description.metadata.author + + @author.setter + def author(self, author): + self._spec.description.metadata.author = author + + @property + def license(self): + return self._spec.description.metadata.license + + @license.setter + def license(self, license): + self._spec.description.metadata.license = license + + @property + def version(self): + return self._spec.description.metadata.versionString + + @property + def weights_dir(self): + return self._weights_dir + + @version.setter + def version(self, version_string): + self._spec.description.metadata.versionString = version_string + + def __repr__(self): + return self._spec.description.__repr__() + + def __str__(self): + return self.__repr__() + + def save(self, save_path: str): + """ + Save the model to a ``.mlmodel`` format. For an MIL program, the save_path is + a package directory containing the ``mlmodel`` and weights. + + Parameters + ---------- + save_path: Target file path / bundle directory for the model. + + Examples + -------- + model.save('my_model_file.mlmodel') + loaded_model = MLModel('my_model_file.mlmodel') + """ + save_path = _os.path.expanduser(save_path) + + # Clean up existing file or directory. + if _os.path.exists(save_path): + if _os.path.isdir(save_path): + _shutil.rmtree(save_path) + else: + _os.remove(save_path) + + if self.is_package: + name, ext = _os.path.splitext(save_path) + if not ext: + save_path = "{}{}".format(save_path, _MLPACKAGE_EXTENSION) + elif ext != _MLPACKAGE_EXTENSION: + raise Exception("For an ML Program, extension must be {} (not {})".format(_MLPACKAGE_EXTENSION, ext)) + _shutil.copytree(self.package_path, save_path) + else: + _save_spec(self._spec, save_path) + + def get_spec(self): + """ + Get a deep copy of the protobuf specification of the model. + + Returns + ------- + model: Model_pb2 + Protobuf specification of the model. + + Examples + -------- + spec = model.get_spec() + """ + return _deepcopy(self._spec) + + + def predict(self, data): + """ + Return predictions for the model. + + Parameters + ---------- + data: dict[str, value] + Dictionary of data to make predictions from where the keys are + the names of the input features. + If value is array type, numpy.ndarray, tensorflow.Tensor and torch.Tensor are acceptable. + + Returns + ------- + dict[str, value] + Predictions as a dictionary where each key is the output feature + name. + + Examples + -------- + data = {'bedroom': 1.0, 'bath': 1.0, 'size': 1240} + predictions = model.predict(data) + data = {'array': numpy.array([[1.0, 2.0], [3.0, 4.0]])} + predictions = model.predict(data) + data = {'array': torch.Tensor([[1.0, 2.0], [3.0, 4.0]])} + predictions = model.predict(data) + data = {'array': tensorflow.Tensor([[1.0, 2.0], [3.0, 4.0]])} + predictions = model.predict(data) + """ + if self.is_package and _is_macos() and _macos_version() < (12, 0): + raise Exception( + "predict() for .mlpackage is not supported in macOS version older than 12.0." + ) + + if self.__proxy__: + self._verify_input_dict(data) + self._convert_tensor_to_numpy(data) + # TODO: remove the following call when this is fixed: rdar://92239209 + self._update_float16_multiarray_input_to_float32(data) + return self.__proxy__.predict(data) + else: + if _macos_version() < (10, 13): + raise Exception( + "Model prediction is only supported on macOS version 10.13 or later." + ) + + try: + from ..libcoremlpython import _MLModelProxy + except Exception as e: + print("Exception loading model proxy: %s\n" % e) + _MLModelProxy = None + except: + print("Exception while loading model proxy.\n") + _MLModelProxy = None + + if not _MLModelProxy: + raise Exception("Unable to load CoreML.framework. Cannot make predictions.") + elif ( + _MLModelProxy.maximum_supported_specification_version() + < self._spec.specificationVersion + ): + engineVersion = _MLModelProxy.maximum_supported_specification_version() + raise Exception( + "The specification has version " + + str(self._spec.specificationVersion) + + " but the Core ML framework version installed only supports Core ML model specification version " + + str(engineVersion) + + " or older." + ) + elif _has_custom_layer(self._spec): + raise Exception( + "This model contains a custom neural network layer, so predict is not supported." + ) + else: + if self._framework_error: + raise self._framework_error + else: + raise Exception("Unable to load CoreML.framework. Cannot make predictions.") + + + def _set_build_info_mil_attributes(self, metadata): + if self._spec.WhichOneof('Type') != "mlProgram": + # No MIL attributes to set + return + + ml_program_attributes = self._spec.mlProgram.attributes + build_info_proto = ml_program_attributes["buildInfo"] + + # Set ValueType to dictionary of string to string + str_type = _MIL_pb2.ValueType() + str_type.tensorType.dataType = _MIL_pb2.DataType.STRING + dict_type_str_to_str = _MIL_pb2.ValueType() + dict_type_str_to_str.dictionaryType.keyType.CopyFrom(str_type) + dict_type_str_to_str.dictionaryType.valueType.CopyFrom(str_type) + build_info_proto.type.CopyFrom(dict_type_str_to_str) + + # Copy the metadata + build_info_dict = build_info_proto.immediateValue.dictionary + for k, v in metadata.items(): + key_pair = _MIL_pb2.DictionaryValue.KeyValuePair() + key_pair.key.immediateValue.tensor.strings.values.append(k) + key_pair.key.type.CopyFrom(str_type) + key_pair.value.immediateValue.tensor.strings.values.append(v) + key_pair.value.type.CopyFrom(str_type) + build_info_dict.values.append(key_pair) + + + def _get_mil_internal(self): + """ + Get a deep copy of the MIL program object, if available. + It's available whenever an MLModel object is constructed using + the unified converter API [`coremltools.convert()`](https://apple.github.io/coremltools/source/coremltools.converters.mil.html#coremltools.converters._converters_entry.convert). + + Returns + ------- + program: coremltools.converters.mil.Program + + Examples + -------- + mil_prog = model._get_mil_internal() + """ + return _deepcopy(self._mil_program) + + def _verify_input_dict(self, input_dict): + # Check if the input name given by the user is valid. + # Although this is checked during prediction inside CoreML Framework, + # we still check it here to return early and + # return a more verbose error message + self._verify_input_name_exists(input_dict) + + # verify that the pillow image modes are correct, for image inputs + self._verify_pil_image_modes(input_dict) + + def _verify_pil_image_modes(self, input_dict): + if not _HAS_PIL: + return + for input_desc in self._spec.description.input: + if input_desc.type.WhichOneof("Type") == "imageType": + input_val = input_dict.get(input_desc.name, None) + if not isinstance(input_val, _PIL_IMAGE.Image): + msg = "Image input, '{}' must be of type PIL.Image.Image in the input dict" + raise TypeError(msg.format(input_desc.name)) + if input_desc.type.imageType.colorSpace in (_ft.ImageFeatureType.BGR, _ft.ImageFeatureType.RGB): + if input_val.mode != 'RGB': + msg = "RGB/BGR image input, '{}', must be of type PIL.Image.Image with mode=='RGB'" + raise TypeError(msg.format(input_desc.name)) + elif input_desc.type.imageType.colorSpace == _ft.ImageFeatureType.GRAYSCALE: + if input_val.mode != 'L': + msg = "GRAYSCALE image input, '{}', must be of type PIL.Image.Image with mode=='L'" + raise TypeError(msg.format(input_desc.name)) + elif input_desc.type.imageType.colorSpace == _ft.ImageFeatureType.GRAYSCALE_FLOAT16: + if input_val.mode != 'F': + msg = "GRAYSCALE_FLOAT16 image input, '{}', must be of type PIL.Image.Image with mode=='F'" + raise TypeError(msg.format(input_desc.name)) + + def _verify_input_name_exists(self, input_dict): + model_input_names = [inp.name for inp in self._spec.description.input] + model_input_names_set = set(model_input_names) + for given_input in input_dict.keys(): + if given_input not in model_input_names_set: + err_msg = "Provided key \"{}\", in the input dict, " \ + "does not match any of the model input name(s), which are: {}" + raise KeyError(err_msg.format(given_input, ",".join(model_input_names))) + + def _update_float16_multiarray_input_to_float32(self, input_data): + for k, v in input_data.items(): + if isinstance(v, _np.ndarray) and v.dtype == _np.float16: + input_data[k] = v.astype(_np.float32) + + def _convert_tensor_to_numpy(self, input_dict): + def convert(given_input): + if isinstance(given_input, _numpy.ndarray): + sanitized_input = given_input + elif _HAS_TORCH and isinstance(given_input, _torch.Tensor): + sanitized_input = given_input.detach().numpy() + elif (_HAS_TF_1 or _HAS_TF_2) and isinstance(given_input, _tf.Tensor): + sanitized_input = given_input.eval(session=_tf.compat.v1.Session()) + else: + sanitized_input = _numpy.array(given_input) + return sanitized_input + + model_input_to_types = {} + for inp in self._spec.description.input: + type_value = inp.type.multiArrayType.dataType + type_name = inp.type.multiArrayType.ArrayDataType.Name(type_value) + if type_name != "INVALID_ARRAY_DATA_TYPE": + model_input_to_types[inp.name] = type_name + + for given_input_name, given_input in input_dict.items(): + if not given_input_name in model_input_to_types: + continue + input_dict[given_input_name] = convert(given_input) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/__init__.py new file mode 100644 index 00000000..6dd839ad --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .builder import KNearestNeighborsClassifierBuilder diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/builder.py b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/builder.py new file mode 100644 index 00000000..0026897d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/nearest_neighbors/builder.py @@ -0,0 +1,664 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as _np + +import coremltools + +from ...proto import FeatureTypes_pb2 +from .. import datatypes + + +class KNearestNeighborsClassifierBuilder: + """ + Construct a CoreML KNearestNeighborsClassifier specification. + + Please see the Core ML Nearest Neighbors protobuf message for more information + on KNearestNeighborsClassifier parameters. + + Examples + -------- + .. sourcecode:: python + + from coremltools.models.nearest_neighbors import KNearestNeighborsClassifierBuilder + from coremltools.models.utils import save_spec + + # Create a KNearestNeighborsClassifier model that takes 4-dimensional input data and outputs a string label. + >>> builder = KNearestNeighborsClassifierBuilder(input_name='input', + ... output_name='output', + ... number_of_dimensions=4, + ... default_class_label='default_label') + + # save the spec by the builder + >>> save_spec(builder.spec, 'knnclassifier.mlmodel') + + + """ + + _VALID_INDEX_TYPES = ["linear", "kd_tree"] + + _VALID_WEIGHTING_SCHEMES = ["uniform", "inverse_distance"] + + _VALID_DISTANCE_METRICS = ["squared_euclidean"] + + # Optional parameter keys for constructor + _PARAMETER_KEY_NUMBER_OF_NEIGHBORS = "number_of_neighbors" + _PARAMETER_KEY_WEIGHTING_SCHEME = "weighting_scheme" + _PARAMETER_KEY_INDEX_TYPE = "index_type" + _PARAMETER_KEY_LEAF_SIZE = "leaf_size" + _PARAMETER_KEY_INPUT_TYPE = "input_type" + + # Optional parameter default values + _PARAMETER_DEFAULT_NUMBER_OF_NEIGHBORS = 5 + _PARAMETER_DEFAULT_WEIGHTING_SCHEME = "uniform" + _PARAMETER_DEFAULT_INDEX_TYPE = "linear" + _PARAMETER_DEFAULT_LEAF_SIZE = 30 + _PARAMETER_DEFAULT_INPUT_TYPE = "NotSpecified" + + def __init__( + self, + input_name, + output_name, + number_of_dimensions, + default_class_label, + **kwargs + ): + """ + Create a KNearestNeighborsClassifierBuilder object. + + Parameters + ---------- + input_name + Name of the model input. + + output_name + Name of the output. + + number_of_dimensions + Number of dimensions of the input data. + + default_class_label + The default class label to use for predictions. Must be either an + int64 or a string. + + number_of_neighbors + Number of neighbors to use for predictions. Default = 5 with allowed values + between 1-1000. + + weighting_scheme + Weight function used in prediction. One of ``'uniform'`` (default) or + ``'inverse_distance'``. + + index_type + Algorithm to compute nearest neighbors. One of ``'linear'`` (default), or + ``'kd_tree'``. + + leaf_size + Leaf size for the kd-tree. Ignored if index type is ``'linear'``. Default = 30. + """ + self.spec = coremltools.proto.Model_pb2.Model() + self.spec.specificationVersion = ( + coremltools._MINIMUM_NEAREST_NEIGHBORS_SPEC_VERSION + ) + + # the model is initially empty - assume it's updatable + self.is_updatable = True + + if number_of_dimensions <= 0: + raise ValueError("number_of_dimensions must be >= 0") + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions = ( + number_of_dimensions + ) + + input_type = kwargs.get( + self._PARAMETER_KEY_INPUT_TYPE, self._PARAMETER_DEFAULT_INPUT_TYPE + ) + input_feature_type = FeatureTypes_pb2.ArrayFeatureType.FLOAT32 + if input_type == datatypes.Double: + input_feature_type = FeatureTypes_pb2.ArrayFeatureType.DOUBLE + + input_feature = self.spec.description.input.add() + input_feature.name = input_name + input_feature.type.multiArrayType.dataType = input_feature_type + input_feature.type.multiArrayType.shape.extend([number_of_dimensions]) + + training_features = self.spec.description.trainingInput.add() + training_features.name = input_name + training_features.type.multiArrayType.dataType = input_feature_type + training_features.type.multiArrayType.shape.extend([number_of_dimensions]) + + output_label = self.spec.description.output.add() + output_label.name = output_name + output_label_probs = self.spec.description.output.add() + output_label_probs.name = output_name + "Probs" + training_features = self.spec.description.trainingInput.add() + training_features.name = output_name + + if self._is_valid_text_type(default_class_label): + output_label.type.stringType.MergeFromString(b"") + training_features.type.stringType.MergeFromString(b"") + output_label_probs.type.dictionaryType.stringKeyType.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.stringClassLabels.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.defaultStringLabel = ( + default_class_label + ) + elif self._is_valid_number_type(default_class_label): + output_label.type.int64Type.MergeFromString(b"") + training_features.type.int64Type.MergeFromString(b"") + output_label_probs.type.dictionaryType.int64KeyType.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.int64ClassLabels.MergeFromString(b"") + self.spec.kNearestNeighborsClassifier.defaultInt64Label = ( + default_class_label + ) + else: + raise TypeError( + "default_class_label type ({}) is invalid. Must be either string or int64".format( + type(default_class_label) + ) + ) + + self.spec.description.predictedFeatureName = output_label.name + self.spec.description.predictedProbabilitiesName = output_label_probs.name + + number_of_neighbors = kwargs.get( + self._PARAMETER_KEY_NUMBER_OF_NEIGHBORS, + self._PARAMETER_DEFAULT_NUMBER_OF_NEIGHBORS, + ) + self.set_number_of_neighbors_with_bounds( + number_of_neighbors, allowed_range=(1, 1000) + ) # Can we think of a more sensible default value? + + self.weighting_scheme = kwargs.get( + self._PARAMETER_KEY_WEIGHTING_SCHEME, + self._PARAMETER_DEFAULT_WEIGHTING_SCHEME, + ) + + index_type = kwargs.get( + self._PARAMETER_KEY_INDEX_TYPE, self._PARAMETER_DEFAULT_INDEX_TYPE + ) + leaf_size = kwargs.get( + self._PARAMETER_KEY_LEAF_SIZE, self._PARAMETER_DEFAULT_LEAF_SIZE + ) + self.set_index_type(index_type, leaf_size) + + # SED is currently the only supported distance metric + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.squaredEuclideanDistance.MergeFromString( + b"" + ) + + @property + def author(self): + """ + Get the author for the KNearestNeighborsClassifier model. + + Returns + ------- + The author + """ + return self.spec.description.metadata.author + + @author.setter + def author(self, author): + """ + Add an author for the KNearestNeighborsClassifier model. + + Parameters + ---------- + author + The author. + + Returns + ------- + None + """ + self.spec.description.metadata.author = author + + @property + def license(self): + """ + Get the license for the KNearestNeighborsClassifier model. + + Returns + ------- + The license. + """ + return self.spec.description.metadata.license + + @author.setter + def license(self, license): + """ + Add a license for the KNearestNeighborsClassifier model. + + Parameters + ---------- + license + The license. + + Returns + ------- + None + """ + self.spec.description.metadata.license = license + + @property + def description(self): + """ + Get the description for the KNearestNeighborsClassifier model. + + Returns + ------- + The description. + """ + return self.spec.description.metadata.shortDescription + + @description.setter + def description(self, description): + """ + Add a description for the model. + + Parameters + ---------- + description + The description + + Returns + ------- + None + """ + self.spec.description.metadata.shortDescription = description + + @property + def is_updatable(self): + """ + Check if the KNearestNeighborsClassifier is updatable. + + Returns + ------- + Is updatable. + """ + return self.spec.isUpdatable + + @is_updatable.setter + def is_updatable(self, is_updatable): + """ + Set the KNearestNeighborsClassifier to be updatable. + + Parameters + ---------- + is_updatable + Boolean + + Returns + ------- + None + """ + self.spec.isUpdatable = is_updatable + + @property + def weighting_scheme(self): + """ + Get the weighting scheme for the KNearestNeighborsClassifier model. + + Returns + ------- + The weighting scheme. + """ + return self._weighting_scheme + + @weighting_scheme.setter + def weighting_scheme(self, weighting_scheme): + """ + Set the weighting scheme for the KNearestNeighborsClassifier model. + + Parameters + ---------- + weighting_scheme + One of [ ``'uniform'``, ``'inverse_distance'`` ]. + + Returns + ------- + None + """ + weighting_scheme = weighting_scheme.lower() + if weighting_scheme not in self._VALID_WEIGHTING_SCHEMES: + raise TypeError("Invalid weighting scheme") + + if weighting_scheme == "inverse_distance": + self.spec.kNearestNeighborsClassifier.inverseDistanceWeighting.MergeFromString( + b"" + ) + else: + self.spec.kNearestNeighborsClassifier.uniformWeighting.MergeFromString(b"") + + # storing this in the object is just a convenience + self._weighting_scheme = weighting_scheme + + @property + def index_type(self): + """ + Get the index type for the KNearestNeighborsClassifier model. + + Returns + ------- + The index type. + """ + return self._index_type + + def set_index_type(self, index_type, leaf_size=30): + """ + Set the index type for the KNearestNeighborsClassifier model. + + Parameters + ---------- + index_type + One of [ ``'linear'``, ``'kd_tree'`` ]. + + leaf_size + For kd_tree indexes, the leaf size to use (default = 30). + + Returns + ------- + None + """ + index_type = index_type.lower() + if not index_type in self._VALID_INDEX_TYPES: + raise TypeError("Invalid index type") + + if index_type == "kd_tree": + if leaf_size <= 0: + raise TypeError("leaf_size must be > 0") + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize = ( + leaf_size + ) + else: + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.linearIndex.MergeFromString( + b"" + ) + + # storing this in the object is just a convenience + self._index_type = index_type + + @property + def leaf_size(self): + """ + Get the leaf size for the KNearestNeighborsClassifier. + + Returns + ------- + The leaf size. + """ + return ( + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize + ) + + @leaf_size.setter + def leaf_size(self, leaf_size): + """ + Set the leaf size for a KNearestNeighborsClassifier model. Only for kd-tree indexes. + + Parameters + ---------- + leaf_size + The leaf size. + + Returns + ------- + None + """ + if leaf_size <= 0: + raise ValueError("leaf_size must be > 0") + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize = ( + leaf_size + ) + + @property + def number_of_dimensions(self): + """ + Get the number of dimensions of the input data for the + KNearestNeighborsClassifier model. + + Returns + ------- + Number of dimensions. + """ + return ( + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions + ) + + @property + def number_of_neighbors(self): + """ + Get the number of neighbors value for the KNearestNeighborsClassifier model. + + Returns + ------- + The number of neighbors default value. + """ + return self.spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue + + def set_number_of_neighbors_with_bounds( + self, number_of_neighbors, allowed_range=None, allowed_set=None + ): + """ + Set the numberOfNeighbors parameter for the KNearestNeighborsClassifier model. + + Parameters + ---------- + allowed_range + Tuple of (``min_value``, ``max_value``) defining the range of allowed values. + + allowed_values + Set of allowed values for the number of neighbors. + + Returns + ------- + None + """ + if number_of_neighbors <= 0: + raise ValueError("number_of_neighbors must be > 0") + if allowed_range is None and allowed_set is None: + raise ValueError( + "Exactly one of allowed_range or allowed_values must be provided" + ) + if allowed_range is not None and allowed_set is not None: + raise ValueError( + "Exactly one of allowed_range or allowed_values must be provided" + ) + + if allowed_range is not None: + if not isinstance(allowed_range, tuple): + raise TypeError( + "allowed_range expects a tuple of (min_value, max_value)" + ) + if len(allowed_range) != 2: + raise TypeError( + "allowed_range expects a tuple of (min_value, max_value)" + ) + + (min_value, max_value) = allowed_range + if min_value <= 0: + raise ValueError("allowed_range minimum must be > 0") + if max_value < min_value: + raise ValueError("allowed_range max_value must be >= min_value") + if number_of_neighbors < min_value or number_of_neighbors > max_value: + raise ValueError("number_of_neighbors is not within allowed range") + + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue = ( + number_of_neighbors + ) + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue = ( + min_value + ) + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue = ( + max_value + ) + + elif allowed_set is not None: + if not isinstance(allowed_set, set): + raise TypeError("allowed_values expects 'set' type") + if len(allowed_set) == 0: + raise TypeError("allowed_values cannot be empty") + + found_match = False + for v in allowed_set: + if not self._is_valid_number_type(v): + raise TypeError("allowed_values must contain only integer types") + if v <= 0: + raise TypeError("allowed_values must only contain values > 0") + if number_of_neighbors == v: + found_match = True + + if found_match: + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue = ( + number_of_neighbors + ) + for v in allowed_set: + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.set.values.append( + v + ) + else: + raise ValueError("number_of_neighbors is not a valid value") + + def number_of_neighbors_allowed_range(self): + """ + Get the range of allowed values for the numberOfNeighbors parameter. + + Returns + ------- + Tuple of (``min_value``, ``max_value``) or ``None`` if the range hasn't been set. + """ + if self.spec.kNearestNeighborsClassifier.numberOfNeighbors.HasField("range"): + return ( + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue, + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue, + ) + return None + + def number_of_neighbors_allowed_set(self): + """ + Get the set of allowed values for the numberOfNeighbors parameter. + + Returns + ------- + Set of allowed values or ``None`` if the set of allowed values hasn't been + populated. + """ + if self.spec.kNearestNeighborsClassifier.numberOfNeighbors.HasField("set"): + spec_values = ( + self.spec.kNearestNeighborsClassifier.numberOfNeighbors.set.values + ) + allowed_values = set() + for v in spec_values: + allowed_values.add(v) + return allowed_values + return None + + def add_samples(self, data_points, labels): + """ + Add some samples to the KNearestNeighborsClassifier model. + + Parameters + ---------- + data_points + List of input data points. + + labels + List of corresponding labels. + + Returns + ------- + None + """ + if len(data_points) == 0: + raise TypeError("data_points is empty") + + if len(labels) == 0: + raise TypeError("labels is empty") + + if len(data_points[0]) != self.number_of_dimensions: + raise TypeError( + "dimensionality of data_points != expected number of dimensions" + ) + + if len(data_points) != len(labels): + raise TypeError("len(data_points) != len(labels)") + + # Validate the types of the labels before adding any points. + self._validate_label_types(labels) + + for data_point in data_points: + sample = ( + self.spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples.add() + ) + for feature in data_point: + sample.vector.append(feature) + + if self.spec.kNearestNeighborsClassifier.HasField("int64ClassLabels"): + for label in labels: + self.spec.kNearestNeighborsClassifier.int64ClassLabels.vector.append( + label + ) + else: + # string labels + for label in labels: + self.spec.kNearestNeighborsClassifier.stringClassLabels.vector.append( + label + ) + + def _validate_label_types(self, labels): + """ + Ensure the label types matched the expected types. + + Parameters + ---------- + spec + The spec. + + labels + The list of labels. + + Returns + ------- + None, throws a TypeError if not expected. + """ + if self.spec.kNearestNeighborsClassifier.HasField("int64ClassLabels"): + check_is_valid = KNearestNeighborsClassifierBuilder._is_valid_number_type + else: + check_is_valid = KNearestNeighborsClassifierBuilder._is_valid_text_type + for label in labels: + if not check_is_valid(label): + raise TypeError("Invalid type for label: {}".format(type(label))) + + @staticmethod + def _is_valid_text_type(obj): + """ + Checks if the object is a valid text type. + + Parameters + ---------- + obj + The object to check. + + Returns + ------- + True if a valid text type, False otherwise. + """ + return isinstance(obj, str) + + @staticmethod + def _is_valid_number_type(obj): + """ + Checks if the object is a valid number type. + + Parameters + ---------- + obj + The object to check. + + Returns + ------- + True if a valid number type, False otherwise. + """ + return isinstance(obj, (int, _np.integer)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/__init__.py new file mode 100644 index 00000000..3e3fe7e8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2018, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from . import (flexible_shape_utils, optimization_utils, printer, + quantization_utils, spec_inspection_utils, + update_optimizer_utils, utils) +from .builder import NeuralNetworkBuilder +from .update_optimizer_utils import AdamParams, SgdParams diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/builder.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/builder.py new file mode 100644 index 00000000..8500bf71 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/builder.py @@ -0,0 +1,8857 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Neural network builder class to construct Core ML models. +""" +from math import floor as _math_floor + +import numpy as _np + +from ... import (_MINIMUM_NDARRAY_SPEC_VERSION, + _MINIMUM_UPDATABLE_SPEC_VERSION, + _SPECIFICATION_VERSION_IOS_14) +from ... import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ...proto import FeatureTypes_pb2 as _FeatureTypes_pb2 +from ...proto import Model_pb2 as _Model_pb2 +from ...proto import NeuralNetwork_pb2 as _NeuralNetwork_pb2 +from .. import datatypes +from .._interface_management import (set_training_features, + set_transform_interface_params) +from .quantization_utils import (_convert_array_to_nbit_quantized_bytes, + _unpack_to_bytes) +from .spec_inspection_utils import _summarize_network_layer_info +from .update_optimizer_utils import AdamParams, SgdParams + +_SUPPORTED_UPDATABLE_LAYERS = ["innerProduct", "convolution"] + + +def _set_recurrent_activation(param, activation): + if isinstance(activation, bytes): + activation = activation.decode("utf8") + + activation = ( + activation.upper() if isinstance(activation, str) else activation + ) + + if activation == "SIGMOID": + param.sigmoid.MergeFromString(b"") + elif activation == "TANH": + param.tanh.MergeFromString(b"") + elif activation == "LINEAR": + param.linear.MergeFromString(b"") + elif activation == "SIGMOID_HARD": + param.sigmoidHard.MergeFromString(b"") + elif activation == "SCALED_TANH": + param.scaledTanh.MergeFromString(b"") + elif activation == "RELU": + param.ReLU.MergeFromString(b"") + else: + raise TypeError( + "Unsupported activation type with Recurrent layer: %s." % activation + ) + + +def _verify_quantization_arguments(weight=bytes(), output_channels=1, **kwargs): + quantization_type = kwargs.get("quantization_type", "").lower() + nbits = kwargs.get("nbits", 8) + quant_scale = kwargs.get("quant_scale", None) + quant_bias = kwargs.get("quant_bias", None) + quant_lut = kwargs.get("quant_lut", None) + int_8_dynamic_quantize = kwargs.get("int_8_dynamic_quantize", False) + + if int_8_dynamic_quantize and nbits != 8: + raise ValueError("nbits must be 8 when 'int_8_dynamic_quantize' is true ") + + if int_8_dynamic_quantize and quant_bias is not None: + raise ValueError( + "quant_bias must be empty when 'int_8_dynamic_quantize' is true " + ) + + if int_8_dynamic_quantize and quant_scale.size != 1: + raise ValueError( + "quant_scale must be of size 1 when 'int_8_dynamic_quantize' is true " + ) + + if not isinstance(weight, bytes): + raise ValueError("Weight must be of type bytes() for quantization") + + if quantization_type == "linear": + if not int_8_dynamic_quantize: + if quant_scale is None or quant_bias is None: + raise ValueError( + "quant_scale and quant_bias parameters must be provided for linear quantization type" + ) + if not _np.isscalar(quant_scale) and (len(quant_scale) != 1 and len(quant_scale) != output_channels): + raise ValueError( + "quant_scale should be of type float or an array of length outputChannels" + ) + if not int_8_dynamic_quantize: + if not _np.isscalar(quant_scale) and len(quant_bias) != 1 and len(quant_bias) != output_channels: + raise ValueError( + "quant_bias should be of type float or an array of length outputChannels" + ) + elif quantization_type == "lut": + if quant_lut is None: + raise ValueError( + "quant_lut must be provided for look up table quantization type" + ) + if len(quant_lut) != 2 ** nbits: + raise ValueError("quant_lut must be an array of length 2^nbits") + else: + raise ValueError("quantization_type must be either linear or lut") + + if quantization_type == "linear" or "lut": + if nbits > 8 or nbits < 1: + raise ValueError("nbits must be between 1 and 8") + + +def _fill_quantized_weights(weights_message=None, W=bytes(), use_int_8=False, **kwargs): + if use_int_8: + weights_message.int8RawValue = bytes() + weights_message.int8RawValue += W + else: + weights_message.rawValue = bytes() + weights_message.rawValue += W + nbits = kwargs.get("nbits", 8) + weights_message.quantization.numberOfBits = nbits + quantization_type = kwargs.get("quantization_type", "").lower() + if quantization_type == "linear": + quant_scale = kwargs.get("quant_scale", [1.0]) + quant_bias = kwargs.get("quant_bias", [0.0]) + weights_message.quantization.linearQuantization.scale.extend(quant_scale) + if not use_int_8: + weights_message.quantization.linearQuantization.bias.extend(quant_bias) + else: + quant_lut = kwargs.get("quant_lut", [0.0, 1.0]) + weights_message.quantization.lookupTableQuantization.floatValue.extend( + quant_lut + ) + + +def _get_nn_spec(spec): + if spec.HasField("neuralNetworkClassifier"): + return spec.neuralNetworkClassifier + elif spec.HasField("neuralNetworkRegressor"): + return spec.neuralNetworkRegressor + elif spec.HasField("neuralNetwork"): + return spec.neuralNetwork + else: + return None + + +def _get_lstm_weight_fields(lstm_wp): + """ + Get LSTM weight fields. + lstm_wp: _NeuralNetwork_pb2.LSTMWeightParams + """ + return [ + lstm_wp.inputGateWeightMatrix, + lstm_wp.forgetGateWeightMatrix, + lstm_wp.blockInputWeightMatrix, + lstm_wp.outputGateWeightMatrix, + lstm_wp.inputGateRecursionMatrix, + lstm_wp.forgetGateRecursionMatrix, + lstm_wp.blockInputRecursionMatrix, + lstm_wp.outputGateRecursionMatrix, + lstm_wp.inputGateBiasVector, + lstm_wp.forgetGateBiasVector, + lstm_wp.blockInputBiasVector, + lstm_wp.outputGateBiasVector, + lstm_wp.inputGatePeepholeVector, + lstm_wp.forgetGatePeepholeVector, + lstm_wp.outputGatePeepholeVector, + ] + + +def _fill_tensor_fields(tensor_field, ranks=None, shapes=None): + """ + Fill the tensor fields. + ranks - ``NONE`` or a list of integers with the same length of number of inputs/outputs + shapes - ``NONE`` or a list of shapes the same length of number of inputs/outputs. Each shape is a list or tuple + """ + if ranks is None and shapes is None: + return + + if ranks is None and shapes is not None: + ranks = [len(shape) for shape in shapes] + + # Fill ranks only + for rank in ranks: + if rank is None: + continue + + if not _np.issubclass_(type(rank), (int, _np.integer)): + rank = -1 # Variable rank set to -1 + + field = tensor_field.add() + field.rank = rank + + if ranks is not None and shapes is not None: + if len(ranks) != len(shapes): + raise ValueError("Number of rank and shape of tensor field does not match.") + + for i in range(0, len(ranks)): + shape = shapes[i] + rank = ranks[i] + + # Ignore incomplete info + if shape is None or rank is None: + continue + + # Raise error on inconsistent input + if rank != len(shape): + raise ValueError("Rank and shape does not match") + + # Add the shape to the proto + is_symbolic = False + for s in shape: + if not _np.issubclass_(type(s), (int, _np.integer)): + s = -1 # Symbolic shape set to -1 + tensor_field[i].dimValue.append(s) + + +class NeuralNetworkBuilder: + """ + Neural network builder class to construct Core ML models. + + The NeuralNetworkBuilder constructs a Core ML neural network specification + layer by layer. The layers should be added in such an order that the inputs + to each layer (referred to as blobs of each layer) have been previously + defined. The builder can also set preprocessing steps to handle + specialized input formats (such as images), and set class labels for neural + network classifiers. + + Refer to the protobuf messages in the specification (NeuralNetwork.proto) + for more details. + + Examples + -------- + .. sourcecode:: python + + from coremltools.models.neural_network import datatypes, NeuralNetworkBuilder + from coremltools.models.utils import save_spec + + # Create a neural network binary classifier that classifies + # 3-dimensional data points + # Specify input and output dimensions + >>> input_dim = (3,) + >>> output_dim = (2,) + + # Specify input and output features + >>> input_features = [('data', datatypes.Array(*input_dim))] + >>> output_features = [('probs', datatypes.Array(*output_dim))] + + # Build a simple neural network with 1 inner product layer + >>> builder = NeuralNetworkBuilder(input_features, output_features) + >>> builder.add_inner_product(name='ip_layer', W=weights, b=bias, input_channels=3, output_channels=2, + ... has_bias=True, input_name='data', output_name='probs') + + # save the spec by the builder + >>> save_spec(builder.spec, 'network.mlmodel') + """ + + def __init__( + self, + input_features=None, + output_features=None, + mode=None, + spec=None, + nn_spec=None, + disable_rank5_shape_mapping=False, + training_features=None, + use_float_arraytype=False, + ): + """ + Construct a NeuralNetworkBuilder object to build an MLModel specification with a + model interface, or a NeuralNetwork protobuf message, either from scratch or using an + existing specification. + + Parameters + ---------- + + input_features: [(str, datatypes.Array)] or None + List of input feature of the network. + Each feature is a ``(name, array)`` tuple, where ``name`` is the + name of the feature, and ``array`` is a ``datatype.Array`` object + describing the feature type. + + * When ``spec`` is ``None`` (building from scratch), ``input_features`` must not be ``None``. + + output_features: [(str, datatypes.Array or None)] or None + List of output feature of the network. Each feature is a + ``(name, array)`` tuple, where ``name`` is the name of the feature, + and ``array`` is a ``datatypes.Array`` object describing the feature type. + + * The ``array`` can be ``None`` if not known. + + * When ``spec`` is ``None`` (building from scratch), ``output_features`` must not be ``None``. + + mode: str ('classifier', 'regressor' or None) + Mode (one of ``'classifier'``, ``'regressor'``, or ``None``). + + When ``mode = 'classifier'``, a NeuralNetworkClassifier spec will be + constructed. When ``mode = 'regressor'``, a NeuralNetworkRegressor + spec will be constructed. + + disable_rank5_shape_mapping: bool + Only applicable for neural networks. + + If True, inputs are no longer forced to map to rank 5 tensors + (rank is equal to the length of the shape of the tensor). + Instead, for multi-array inputs ``"EXACT_ARRAY_MAPPING"`` mapping is used, whereas + for image inputs ``"RANK4_IMAGE_MAPPING"`` is used. For details, + see description of enums ``NeuralNetworkMultiArrayShapeMapping`` + and ``NeuralNetworkImageShapeMapping`` in NeuralNetwork.proto. + + When ``spec`` is not ``None``, this argument will be ignored. + + spec: None or coremltools.proto.Model_pb2 + If ``None``, a new MLModel spec will be created by the builder with + input and output features. + + Otherwise, the builder will continue to build on ``spec``. + This is useful when the MLModel is built incrementally. + + nn_spec: None or coremltools.proto.NeuralNetwork_pb2 + If ``None``, a new, empty NeuralNetwork proto will be created for spec. + + If ``nn_spec`` is not ``None`` and ``spec`` is ``None``, the builder will + build a NeuralNetwork spec without wrapping it within an MLModel. + This is useful to create nested NeuralNetworks for models + with control flow operations. + + use_float_arraytype: bool + If true, the datatype of input/output multiarrays is set to Float32 instead + of double. + + Examples + -------- + .. sourcecode:: python + + # Construct a builder that builds a neural network classifier with a 299 x 299 x 3 + # dimensional input and 1000 dimensional output + >>> input_features = [('data', datatypes.Array((299, 299, 3)))] + >>> output_features = [('probs', datatypes.Array((1000,)))] + >>> builder = NeuralNetworkBuilder(input_features, output_features, mode='classifier') + + See Also + -------- + set_input, set_output, set_class_labels + """ + self.spec = spec + self.nn_spec = nn_spec + self._disable_rank5_shape_mapping = disable_rank5_shape_mapping + self.layers = [] + self.layer_specs = {} + self.named_parameters = [] + self.rank_dict = {} + + if self.spec is not None: # Existing spec + if self.nn_spec is None: + self.nn_spec = _get_nn_spec(self.spec) + for layer_spec in self.nn_spec.layers: + self.layers.append(layer_spec.name) + self.layer_specs[layer_spec.name] = layer_spec + else: + # Both spec and nn_spec are not None + raise ValueError( + "Attempting to assign another NeuralNetwork Spec to an existing MLModel Spec" + ) + if input_features is None and output_features is None: + return + + if ( + self.spec is None and self.nn_spec is not None + ): # Building nested Neural Network + return + + # Set the interface params. + if self.spec is None: + self.spec = _Model_pb2.Model() + self.spec.specificationVersion = _SPECIFICATION_VERSION + if disable_rank5_shape_mapping: + self.spec.specificationVersion = _MINIMUM_NDARRAY_SPEC_VERSION + + # When output_features in None, use some dummy sized type + out_features_with_shape = [] + for out_feature in output_features: + feat_name, feat_type = out_feature + if feat_type is None: + out_features_with_shape.append((str(feat_name), datatypes.Array(1))) + else: + out_features_with_shape.append(out_feature) + + # Set interface inputs and outputs + if len(self.spec.description.input) > 0: + del self.spec.description.input[:] + if len(self.spec.description.output) > 0: + del self.spec.description.output[:] + + if use_float_arraytype: + array_datatype = _Model_pb2.ArrayFeatureType.FLOAT32 + else: + array_datatype = _Model_pb2.ArrayFeatureType.DOUBLE + + self.spec = set_transform_interface_params( + self.spec, + input_features, + out_features_with_shape, + training_features=training_features, + array_datatype=array_datatype, + ) + + for input in input_features: + self.rank_dict[input[0]] = len(input[1].dimensions) + + for idx, output_feature in enumerate(output_features): + if output_features[idx][1] is None: + self.spec.description.output[idx].type.multiArrayType.ClearField( + "shape" + ) + + if self.nn_spec is None: + if mode == "classifier": + nn_spec = self.spec.neuralNetworkClassifier + elif mode == "regressor": + nn_spec = self.spec.neuralNetworkRegressor + else: + nn_spec = self.spec.neuralNetwork + self.nn_spec = nn_spec + + if disable_rank5_shape_mapping and self.nn_spec: + self.nn_spec.arrayInputShapeMapping = _NeuralNetwork_pb2.NeuralNetworkMultiArrayShapeMapping.Value( + "EXACT_ARRAY_MAPPING" + ) + self.nn_spec.imageInputShapeMapping = _NeuralNetwork_pb2.NeuralNetworkImageShapeMapping.Value( + "RANK4_IMAGE_MAPPING" + ) + + def set_input(self, input_names, input_dims): + """ + Set the inputs of the network spec. + + Parameters + ---------- + input_names: list of str + The input names of the network. + + input_dims: [tuple] + The input dimensions of the network. The ordering of ``input_dims`` + is the same as ``input_names``. + + Examples + -------- + .. sourcecode:: python + + # Set the neural network spec inputs to be 3 dimensional vector data1 and + # 4 dimensional vector data2. + >>> builder.set_input(input_names=['data1', 'data2'], input_dims=[(3,), (4,)]) + + See Also + -------- + set_output, set_class_labels + """ + + if len(input_names) != len(input_dims): + raise ValueError("input_names and input_dims must be of the same sizes.") + + spec = self.spec + for idx, dim in enumerate(input_dims): + if ( + hasattr(self, "_disable_rank5_shape_mapping") + and self._disable_rank5_shape_mapping + ): + input_shape = dim + else: + if len(dim) == 3: + input_shape = (dim[0], dim[1], dim[2]) + elif len(dim) == 2: + input_shape = (dim[1],) + elif len(dim) == 1: + input_shape = tuple(dim) + else: + raise RuntimeError( + "Attempting to add a neural network " + + "input with rank " + + str(len(dim)) + + ". All networks should take inputs of rank 1 or 3." + ) + + spec.description.input[idx].type.multiArrayType.ClearField("shape") + spec.description.input[idx].type.multiArrayType.shape.extend(input_shape) + + # TODO: if it's an embedding, this should be integer + spec.description.input[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + + spec.description.input[idx].name = input_names[idx] + + def set_output(self, output_names, output_dims): + """ + Set the outputs of the network spec. + + Parameters + ---------- + output_names: list of str + The output names of the network. + + output_dims: [tuple] + The output dimensions of the network. The ordering of ``output_dims`` is the same + as ``output_names``. + + Examples + -------- + .. sourcecode:: python + + # Set the neural network spec outputs to be 3 dimensional vector feature1 and + # 4 dimensional vector feature2. + >>> builder.set_output(output_names=['feature1', 'feature2'], output_dims=[(3,), (4,)]) + + See Also + -------- + set_input, set_class_labels + """ + + if len(output_names) != len(output_dims): + raise ValueError("output_names and output_dims must be of the same sizes.") + + spec = self.spec + for idx, dim in enumerate(output_dims): + spec.description.output[idx].type.multiArrayType.ClearField("shape") + spec.description.output[idx].type.multiArrayType.shape.extend(dim) + spec.description.output[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + + spec.description.output[idx].name = output_names[idx] + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + The training input names and type of the network. + + Examples + -------- + .. sourcecode:: python + + # Set the neural network spec training inputs to be 3 dimensional vector for 'input' and + # Double for 'target'. + >>> builder.set_training_input([('input', datatypes.Array(3)), ('target', 'Double')]) + """ + spec = self.spec + set_training_features(spec, training_input) + + def set_class_labels( + self, class_labels, predicted_feature_name="classLabel", prediction_blob="" + ): + """ + Set class labels to the model spec to make it a neural network classifier. + + Parameters + ---------- + class_labels: list of int or list of str + A list of integers or strings that map the index of the output of a + neural network to labels in a classifier. + + predicted_feature_name: str + Name of the output feature for the class labels exposed in the + Core ML neural network classifier, defaults: ``'classLabel'``. + + prediction_blob: str + If provided, then this is the name of the neural network blob which + generates the probabilities for each class label (typically the output + of a softmax layer). If not provided, then the last output layer is + assumed. + + See Also + -------- + set_input, set_output, set_pre_processing_parameters + """ + spec = self.spec + nn_spec = self.nn_spec + + if len(spec.description.output) == 0: + raise ValueError( + "Model should have at least one output (the probabilities) to automatically make it a classifier." + ) + probOutput = spec.description.output[0] + probOutput.type.dictionaryType.MergeFromString(b"") + if len(class_labels) == 0: + return + class_type = type(class_labels[0]) + if not isinstance(class_labels[0], (int, str)): + raise TypeError( + "Class labels must be of type Integer or String. (not %s)" % class_type + ) + + spec.description.predictedProbabilitiesName = probOutput.name + spec.description.predictedFeatureName = predicted_feature_name + + classLabel = spec.description.output.add() + classLabel.name = predicted_feature_name + if class_type == int: + nn_spec.ClearField("int64ClassLabels") + probOutput.type.dictionaryType.int64KeyType.MergeFromString(b"") + classLabel.type.int64Type.MergeFromString(b"") + for c in class_labels: + nn_spec.int64ClassLabels.vector.append(c) + else: + nn_spec.ClearField("stringClassLabels") + probOutput.type.dictionaryType.stringKeyType.MergeFromString(b"") + classLabel.type.stringType.MergeFromString(b"") + for c in class_labels: + nn_spec.stringClassLabels.vector.append(c) + + if prediction_blob != "": + # correctness here will be checked in the validator -- i.e. to + # make sure this string corresponds to a real blob + nn_spec.labelProbabilityLayerName = prediction_blob + else: # not provided + # assume it's the last blob produced in the network + nn_spec.labelProbabilityLayerName = nn_spec.layers[-1].output[0] + + def set_optional_input(self, input_idx, value=None, format="float"): + """ + Marks given input as optional input. + Optionally, sets default value for optional input if value is not ``None``. + + Parameters + ---------- + input_idx: int + Index of input to be marked and fill with default value. + value: int/double/float/None + Value to be fill as default value. + format: str + Format of default value. + Must be one of ``'float'``, ``'double'``, or ``'int'``. + """ + if input_idx >= len(self.spec.description.input): + msg = ( + str(input_idx) + + " out of " + + str(len(self.spec.description.input)) + + " inputs!" + ) + raise ValueError("Setting invalid input as optional! {}".format(msg)) + self.spec.description.input[input_idx].type.isOptional = True + if value is None: + return + # Default value is supported from CoreML 4 onwards. + self.spec.specificationVersion = max( + self.spec.specificationVersion, _SPECIFICATION_VERSION_IOS_14 + ) + format = format.lower() + if format == "float": + self.spec.description.input[ + input_idx + ].type.multiArrayType.floatDefaultValue = value + elif format == "double": + self.spec.description.input[ + input_idx + ].type.multiArrayType.doubleDefaultValue = value + elif format == "int": + self.spec.description.input[ + input_idx + ].type.multiArrayType.intDefaultValue = value + else: + raise ValueError( + "Incorrect format for optional inputs! Expecting int/float/double, got {}!".format( + format + ) + ) + + def add_optionals(self, optionals_in, optionals_out): + """ + Add optional inputs and outputs to the model spec. + + Parameters + ---------- + optionals_in: list of str + List of inputs that are optionals. + + optionals_out: list of str + List of outputs that are optionals. + + See Also + -------- + set_input, set_output + + """ + spec = self.spec + if (not optionals_in) and (not optionals_out): + return + + input_types = [ + datatypes.Array(dim) if isinstance(dim, int) else datatypes.Array(*dim) + for (name, dim) in optionals_in + ] + output_types = [] + for name, dim in optionals_out: + if not dim: + output_types.append(None) + elif isinstance(dim, int): + output_types.append(datatypes.Array(dim)) + else: + output_types.append(datatypes.Array(*dim)) + + input_names = [str(name) for (name, dim) in optionals_in] + output_names = [str(name) for (name, dim) in optionals_out] + + input_features = list(zip(input_names, input_types)) + output_features = list(zip(output_names, output_types)) + + len_before_in = len(spec.description.input) + len_before_out = len(spec.description.output) + + # this appends to the existing model interface + set_transform_interface_params(spec, input_features, output_features, True) + + # add types for any extra hidden inputs + for idx in range(len_before_in, len(spec.description.input)): + spec.description.input[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + for idx in range(len_before_out, len(spec.description.output)): + spec.description.output[ + idx + ].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + + + def _check_fp16_weight_params_lstms(self, lstm_wp, has_peephole=True): + """ + Checks if an LSTM layer has at least one ``weight_param`` which is in FP16 format. + + Parameters + ---------- + lstm_wp: LSTM weights. + has_peephole: if the LSTM has a peephole. + """ + if len(lstm_wp.inputGateWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.forgetGateWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.blockInputWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.outputGateWeightMatrix.float16Value) > 0: + return True + if len(lstm_wp.inputGateRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.forgetGateRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.blockInputRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.outputGateRecursionMatrix.float16Value) > 0: + return True + if len(lstm_wp.inputGateWeightMatrix.float16Value) > 0: + return True + + if has_peephole: + if len(lstm_wp.inputGatePeepholeVector.float16Value) > 0: + return True + if len(lstm_wp.forgetGatePeepholeVector.float16Value) > 0: + return True + if len(lstm_wp.outputGatePeepholeVector.float16Value) > 0: + return True + + return False + + + def _check_fp16_weight_param_exists(self, layers): + """ + Checks if the network has at least one ``weight_param`` which is in FP16 format. + + Parameters + ---------- + layers: list of nn_spec.layer + List of layers. + """ + + for layer in layers: + layer_type = layer.WhichOneof("layer") + + # Convolution + if layer_type == "convolution": + if len(layer.convolution.weights.float16Value) > 0: + return True + if layer.convolution.hasBias and len(layer.convolution.bias.float16Value) > 0: + return True + # Batchnorm + elif layer_type == "batchnorm": + if len(layer.batchnorm.mean.float16Value) > 0: + return True + + # InnerProduct + elif layer_type == "innerProduct": + if len(layer.innerProduct.weights.float16Value) > 0: + return True + if layer.innerProduct.hasBias and len(layer.innerProduct.bias.float16Value) > 0: + return True + + # BatchedMatmul + elif layer_type == "batchedMatmul": + if len(layer.batchedMatmul.weights.float16Value) > 0: + return True + if layer.batchedMatmul.hasBias and len(layer.batchedMatmul.bias.float16Value) > 0: + return True + + # Embedding layer + elif layer_type == "embedding": + if len(layer.embedding.weights.float16Value) > 0: + return True + if layer.embedding.hasBias and len(layer.embedding.bias.float16Value) > 0: + return True + + # Embedding ND layer + elif layer_type == "embeddingND": + if len(layer.embeddingND.weights.float16Value) > 0: + return True + if layer.embeddingND.hasBias and len(layer.embeddingND.bias.float16Value) > 0: + return True + + # Scale layer + elif layer_type == "scale": + if len(layer.scale.shapeScale.float16Value) > 0: + return True + if layer.scale.hasBias and len(layer.scale.bias.float16Value) > 0: + return True + + # Bias layer + elif layer_type == "bias": + if len(layer.bias.bias.float16Value) > 0: + return True + + # LoadConstant layer + elif layer_type == "loadConstant": + if len(layer.loadConstant.data.float16Value) > 0: + return True + + # Simple Recurrent + elif layer_type == "simpleRecurrent": + if len(layer.simpleRecurrent.weightMatrix.float16Value) > 0: + return True + if layer.simpleRecurrent.hasBiasVector and len(layer.simpleRecurrent.biasVector.float16Value) > 0: + return True + + # GRU + elif layer_type == "gru": + if len(layer.gru.updateGateWeightMatrix.float16Value) > 0: + return True + if layer.gru.hasBiasVectors and len(layer.gru.outputGateBiasVector.float16Value) > 0: + return True + + # uniDirectionalLSTM Layers + elif layer_type == "uniDirectionalLSTM": + return self._check_fp16_weight_params_lstms(lstm_wp=layer.uniDirectionalLSTM.weightParams, + has_peephole=layer.uniDirectionalLSTM.params.hasPeepholeVectors) + + # biDirectionalLSTM Layers + elif layer_type == "biDirectionalLSTM": + for lstm_wp in layer.biDirectionalLSTM.weightParams: + if self._check_fp16_weight_params_lstms(lstm_wp=lstm_wp, + has_peephole=layer.biDirectionalLSTM.params.hasPeepholeVectors): + return True + + # branch Layers + elif layer_type == "branch": + if len(layer.branch.ifBranch.float16Value) > 0: + return True + if len(layer.branch.elseBranch.float16Value) > 0: + return True + + # loop Layers + elif layer_type == "loop": + if len(layer.loop.conditionNetwork.float16Value) > 0: + return True + if len(layer.loop.bodyNetwork.float16Value) > 0: + return True + + return False + + def make_updatable(self, trainables): + """ + Make the builder's NeuralNetwork spec updatable. + + Parameters + ---------- + trainables: list of str + List of layer names to be set trainable. + """ + if self.spec is None: + return + + # check if any layer weights/biases is in FP16 format + if self._check_fp16_weight_param_exists(self.nn_spec.layers): + raise ValueError("This model has at least one layer with FP16 weights or bias formats. These networks will " + "always be optimized to a full FP16 model format which is not supported to be marked " + "updatable. Either make sure the model has no FP16 WeightParams or split the " + "network to two models with updatable part of the model as a separate model with no FP16 " + "WeightParams. Note that updatable pipelines model can only have the last sub model marked " + "as updatable.") + + self.spec.isUpdatable = True + + if ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _MINIMUM_UPDATABLE_SPEC_VERSION + ): + self.spec.specificationVersion = _MINIMUM_UPDATABLE_SPEC_VERSION + + self.nn_spec.updateParams.MergeFromString(b"") + self.set_shuffle() + + for trainable in trainables: + if trainable not in self.layer_specs: + raise ValueError("Layer %s does not exist." % trainable) + spec_layer = self.layer_specs[trainable] + spec_layer_type = spec_layer.WhichOneof("layer") + if spec_layer_type not in _SUPPORTED_UPDATABLE_LAYERS: + raise ValueError( + "Layer %s is not supported to be marked as updatable. Only %s layers " + "are supported to be marked updatable." + % (trainable, _SUPPORTED_UPDATABLE_LAYERS) + ) + spec_layer.isUpdatable = True + typed_layer = getattr(spec_layer, spec_layer.WhichOneof("layer")) + for fd in typed_layer.DESCRIPTOR.fields: + field = getattr(typed_layer, fd.name) + if type(field) == _NeuralNetwork_pb2.LSTMWeightParams: + wfs = _get_lstm_weight_fields(field) + for wf in wfs: + wf.isUpdatable = True + elif type(field) == _NeuralNetwork_pb2.WeightParams: + field.isUpdatable = True + else: + pass + + def set_categorical_cross_entropy_loss(self, name, input): + r""" + Categorical Cross Entropy is used for single label categorization + (only one category is applicable for each data point). + + Parameters + ---------- + name: The name of the loss layer + input: The name of the input + The ``input`` should be a vector of length N representing the + distribution over N categories. This must be the output of a softmax. + + Notes + ----- + + .. math:: + Loss_ {CCE}(input, target) = -\sum_{i = 1} ^ {N}(target == i) log(input[i]) = - log(input[target]) + """ + if self.spec is None: + return + + if name in self.layer_specs: + raise ValueError("Name %s is already used." % name) + + if input is None: + raise ValueError("Loss Layer input must be specified") + + target = input + "_true" + + if len(self.nn_spec.layers) < 1: + raise ValueError( + "Loss layer (%s) cannot be attached to an empty model." % name + ) + + # validate input + # input must be a softmax layer output + input_validated = False + for _, layer in enumerate(self.nn_spec.layers[::-1]): + layer_outputs = list(layer.output) + layer_type = layer.WhichOneof("layer") + + if input in layer_outputs and layer_type == "softmax": + input_validated = True + break + + if not input_validated: + raise ValueError( + "Categorical Cross Entropy loss layer input (%s) must be a softmax layer output." + % input + ) + + # validate target + output_names = [x.name for x in self.spec.description.output] + if target in output_names: + raise ValueError( + "Loss layer target (%s) must not be a model output." % target + ) + + updating_classifier = False + predicted_probabilities_name = self.spec.description.predictedProbabilitiesName + predicted_feature_name = self.spec.description.predictedFeatureName + if ( + self.spec.HasField("neuralNetworkClassifier") + and input == predicted_probabilities_name + ): + updating_classifier = True + + loss_layer = self.nn_spec.updateParams.lossLayers.add() + self.layers.append(name) + self.layer_specs[name] = loss_layer + loss_layer.name = name + loss_layer.categoricalCrossEntropyLossLayer.input = input + loss_layer.categoricalCrossEntropyLossLayer.target = target + + training_inputs = self.spec.description.trainingInput + training_inputs.extend(self.spec.description.input) + training_input = training_inputs.add() + + if updating_classifier: + training_input.name = predicted_feature_name + classifier_output_type = [ + x.type + for x in self.spec.description.output + if x.name == predicted_feature_name + ] + + model_type = classifier_output_type[0].WhichOneof("Type") + if model_type == "stringType": + datatypes._set_datatype(training_input.type, datatypes.String()) + elif model_type == "int64Type": + datatypes._set_datatype(training_input.type, datatypes.Int64()) + else: + training_input.name = target + datatypes._set_datatype(training_input.type, datatypes.Array(1)) + training_input.type.multiArrayType.dataType = ( + _Model_pb2.ArrayFeatureType.INT32 + ) + + print( + "Now adding input {} as target for categorical cross-entropy loss layer.".format( + target + ) + ) + + def set_mean_squared_error_loss(self, name, input_feature=None): + """ + input_feature: [(str, datatypes.Array)] or None + The input feature of the loss layer. Each feature is a + ``(name, array)`` tuple, where ``name`` is the name of the model's + tensor our loss will be attached to, and ``array`` is a + ``datatypes.Array`` object describing the shape of that tensor. + Both the name and the array's shape must be provided in the tuple. + + Examples + -------- + + >>> feature = [('output_tensor', datatypes.Array((299, 299, 3)))] + """ + if self.spec is None: + return + + if name in self.layer_specs: + raise ValueError("Name %s is already used." % name) + + if input_feature is None: + raise ValueError("Loss Layer input must be specified") + + if not isinstance(input_feature, tuple): + raise ValueError( + "Loss layer input must be a tuple of type (string, datatype)" + ) + + (fname, ftype) = input_feature + if not isinstance(fname, str): + raise ValueError( + "Loss layer input must be a tuple of type (string, datatype)" + ) + if not isinstance(ftype, datatypes.Array): + raise ValueError( + "Loss layer input must be a tuple of type (string, datatype)" + ) + + target = fname + "_true" + + loss_layer = self.nn_spec.updateParams.lossLayers.add() + self.layers.append(name) + self.layer_specs[name] = loss_layer + loss_layer.name = name + + output_names = [x.name for x in self.spec.description.output] + if target in output_names: + raise ValueError( + "Loss Layer target (%s) must not be a model output" % target + ) + + loss_layer.meanSquaredErrorLossLayer.input = input_feature[0] + loss_layer.meanSquaredErrorLossLayer.target = target + + training_inputs = self.spec.description.trainingInput + training_inputs.extend(self.spec.description.input) + training_input = training_inputs.add() + training_input.name = target + + datatypes._set_datatype(training_input.type, input_feature[1]) + training_input.type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE + print( + "Now adding input {} as target for mean squared error loss layer.".format( + target + ) + ) + + def set_sgd_optimizer(self, sgd_params): + if self.spec is None: + return + + if not isinstance(sgd_params, SgdParams): + raise Exception("sgd_params must be of instance SgdParams") + + sgd_optimizer = self.nn_spec.updateParams.optimizer.sgdOptimizer + + # set learning rate + sgd_optimizer.learningRate.defaultValue = sgd_params.lr.value + sgd_optimizer.learningRate.range.minValue = sgd_params.lr.min + sgd_optimizer.learningRate.range.maxValue = sgd_params.lr.max + + # set mini batch size + sgd_optimizer.miniBatchSize.defaultValue = sgd_params.batch.value + sgd_optimizer.miniBatchSize.set.values.extend(sgd_params.batch.allowed_set) + + # set momentum + sgd_optimizer.momentum.defaultValue = sgd_params.momentum.value + sgd_optimizer.momentum.range.minValue = sgd_params.momentum.min + sgd_optimizer.momentum.range.maxValue = sgd_params.momentum.max + + def set_adam_optimizer(self, adam_params): + if self.spec is None: + return + + if not isinstance(adam_params, AdamParams): + raise Exception("adam_params must be of instance AdamParams") + + adam_optimizer = self.nn_spec.updateParams.optimizer.adamOptimizer + + # set learning rate + adam_optimizer.learningRate.defaultValue = adam_params.lr.value + adam_optimizer.learningRate.range.minValue = adam_params.lr.min + adam_optimizer.learningRate.range.maxValue = adam_params.lr.max + + # set mini batch size + adam_optimizer.miniBatchSize.defaultValue = adam_params.batch.value + adam_optimizer.miniBatchSize.set.values.extend(adam_params.batch.allowed_set) + + # set beta1 + adam_optimizer.beta1.defaultValue = adam_params.beta1.value + adam_optimizer.beta1.range.minValue = adam_params.beta1.min + adam_optimizer.beta1.range.maxValue = adam_params.beta1.max + + # set beta2 + adam_optimizer.beta2.defaultValue = adam_params.beta2.value + adam_optimizer.beta2.range.minValue = adam_params.beta2.min + adam_optimizer.beta2.range.maxValue = adam_params.beta2.max + + # set eps + adam_optimizer.eps.defaultValue = adam_params.eps.value + adam_optimizer.eps.range.minValue = adam_params.eps.min + adam_optimizer.eps.range.maxValue = adam_params.eps.max + + def set_epochs(self, epochs=1, allowed_set=None): + if self.spec is None: + return + + self.nn_spec.updateParams.epochs.defaultValue = epochs + + if allowed_set is None: + self.nn_spec.updateParams.epochs.set.values.extend([epochs]) + else: + self.nn_spec.updateParams.epochs.set.values.extend(allowed_set) + + def set_shuffle(self, seed=None): + if self.spec is None: + return + + # Validate that seed passed in is integer + if seed is not None: + if not isinstance(seed, int): + raise TypeError("Shuffle seed value must be integer") + + self.nn_spec.updateParams.shuffle.defaultValue = True + if seed is not None: + self.nn_spec.updateParams.seed.defaultValue = seed + + def _add_generic_layer( + self, + name, + input_names, + output_names, + input_ranks=None, + input_shapes=None, + output_ranks=None, + output_shapes=None, + ): + generic_layer = self.nn_spec.layers.add() + generic_layer.name = name + generic_layer.input.extend(input_names) + generic_layer.output.extend(output_names) + self.layers.append(name) + if name in self.layer_specs: + raise ValueError( + 'Layer with name "%s" has already been added. Please use a unique name.' + % name + ) + self.layer_specs[name] = generic_layer + _fill_tensor_fields(generic_layer.inputTensor, input_ranks, input_shapes) + _fill_tensor_fields(generic_layer.outputTensor, output_ranks, output_shapes) + + # Pass Rank Information + # Generic Layer copies rank of first input to all of its output + # All the layers that modifies rank apart from first input must override + if input_names is not None and len(input_names) > 0: + for output_ in output_names: + self.rank_dict[output_] = self._get_rank(input_names[0]) + return generic_layer + + def inspect_layers(self, last=-1, verbose=False): + """ Prints the summary for last "last" number of layers. + + Parameters + ---------- + last: int + The numbers of layers to inspect, starting from the last one. + verbose: bool + Whether to display layer-specific parameters or not. + """ + n_layers = len(self.nn_spec.layers) + if last < 0: + last = n_layers + for i, alayer in enumerate(self.nn_spec.layers[::-1]): + if i >= last: + break + ( + layer_type, + name, + in_blobs, + out_blobs, + params_info, + ) = _summarize_network_layer_info(alayer) + print( + "[Id: {}], Name: {} (Type: {})".format( + n_layers - i - 1, name, layer_type + ) + ) + print(" " * 10 + "Updatable: {}".format(alayer.isUpdatable)) + print(" " * 10 + "Input blobs: {}".format(in_blobs)) + print(" " * 10 + "Output blobs: {}".format(out_blobs)) + if verbose and len(params_info) > 0: + print(" " * 10 + "Parameters: ") + for param in params_info: + print(" " * 14 + "{} = {}".format(param[0], param[1])) + + def inspect_loss_layers(self): + """ Prints the summary for the loss layer. + """ + n_loss_layers = len(self.nn_spec.updateParams.lossLayers) + if n_loss_layers < 1: + print("no loss layer detected.") + for i, loss_layer in enumerate(self.nn_spec.updateParams.lossLayers[::-1]): + loss_type = loss_layer.WhichOneof("LossLayerType") + loss_name = loss_layer.name + loss_input = None + loss_target = None + if loss_type == "categoricalCrossEntropyLossLayer": + loss_input = loss_layer.categoricalCrossEntropyLossLayer.input + loss_target = loss_layer.categoricalCrossEntropyLossLayer.target + elif loss_type == "meanSquaredErrorLossLayer": + loss_input = loss_layer.meanSquaredErrorLossLayer.input + loss_target = loss_layer.meanSquaredErrorLossLayer.target + + print( + "[Id: {}], Name: {} (Type: {})".format( + n_loss_layers - i - 1, loss_name, loss_type + ) + ) + print(" " * 10 + "Loss Input: {}".format(loss_input)) + print(" " * 10 + "Loss Target: {}".format(loss_target)) + + def inspect_optimizer(self): + """ Prints the summary for the optimizer. + """ + optimizer = self.nn_spec.updateParams.optimizer + optimizer_type = optimizer.WhichOneof("OptimizerType") + print("Optimizer Type: {}".format(optimizer_type)) + if optimizer_type == "sgdOptimizer": + lr = optimizer.sgdOptimizer.learningRate + batch = optimizer.sgdOptimizer.miniBatchSize + momentum = optimizer.sgdOptimizer.momentum + print( + "lr: {}, min: {}, max: {}".format( + lr.defaultValue, lr.range.minValue, lr.range.maxValue + ) + ) + print( + "batch: {}, allowed_set: {}".format( + batch.defaultValue, batch.set.values + ) + ) + print( + "momentum: {}, min: {}, max: {}".format( + momentum.defaultValue, + momentum.range.minValue, + momentum.range.maxValue, + ) + ) + elif optimizer_type == "adamOptimizer": + lr = optimizer.adamOptimizer.learningRate + batch = optimizer.adamOptimizer.miniBatchSize + beta1 = optimizer.adamOptimizer.beta1 + beta2 = optimizer.adamOptimizer.beta2 + eps = optimizer.adamOptimizer.eps + print( + "lr: {}, min: {}, max: {}".format( + lr.defaultValue, lr.range.minValue, lr.range.maxValue + ) + ) + print( + "batch: {}, allowed_set: {}".format( + batch.defaultValue, batch.set.values + ) + ) + print( + "beta1: {}, min: {}, max: {}".format( + beta1.defaultValue, beta1.range.minValue, beta1.range.maxValue + ) + ) + print( + "beta2: {}, min: {}, max: {}".format( + beta2.defaultValue, beta2.range.minValue, beta2.range.maxValue + ) + ) + print( + "epsilon: {}, min: {}, max: {}".format( + eps.defaultValue, eps.range.minValue, eps.range.maxValue + ) + ) + + def inspect_updatable_layers(self): + """ Prints all updatable layers with their inputs and outputs. + """ + for _, layer in enumerate(self.nn_spec.layers[::-1]): + if layer.isUpdatable: + ( + layer_type, + name, + in_blobs, + out_blobs, + _, + ) = _summarize_network_layer_info(layer) + print("Name: {} (Type: {})".format(name, layer_type)) + print(" " * 10 + "Input blobs: {}".format(in_blobs)) + print(" " * 10 + "Output blobs: {}".format(out_blobs)) + + def inspect_input_features(self): + """ Prints the name and type of input features. + """ + input_features = self.spec.description.input + n_input_features = len(input_features) + if n_input_features < 1: + return + for i, input_feature in enumerate(input_features[::-1]): + print( + "[Id: {}] Name: {}".format(n_input_features - i - 1, input_feature.name) + ) + print(" " * 10 + "Type: {}".format(input_feature.type)) + + def inspect_output_features(self): + """ Prints the name and type of output features. + """ + output_features = self.spec.description.output + n_output_features = len(output_features) + if n_output_features < 1: + return + for i, output_feature in enumerate(output_features[::-1]): + print( + "[Id: {}] Name: {}".format( + n_output_features - i - 1, output_feature.name + ) + ) + print(" " * 10 + "Type: {}".format(output_feature.type)) + + def inspect_conv_channels(self, layer_name): + """ Prints the output and kernel channels of a convolution layer. + """ + if self.spec is None: + return + if layer_name not in self.layer_specs: + raise ValueError("Layer %s does not exist." % (layer_name)) + spec_layer = self.layer_specs[layer_name] + + if spec_layer.WhichOneof("layer") != "convolution": + raise ValueError("Layer %s is not a convolution layer." % (layer_name)) + + output_channels = spec_layer.convolution.outputChannels + kernel_channels = spec_layer.convolution.kernelChannels + print("outputChannels: {}".format(output_channels)) + print("kernelChannels: {}".format(kernel_channels)) + + def inspect_innerproduct_channels(self, layer_name): + """ Prints the output and kernel channels of an innerProduct layer. + """ + if self.spec is None: + return + if layer_name not in self.layer_specs: + raise ValueError("Layer %s does not exist." % (layer_name)) + spec_layer = self.layer_specs[layer_name] + + if spec_layer.WhichOneof("layer") != "innerProduct": + raise ValueError("Layer %s is not an innerProduct layer." % (layer_name)) + + input_channels = spec_layer.innerProduct.inputChannels + output_channels = spec_layer.innerProduct.outputChannels + print("inputChannels: {}".format(input_channels)) + print("outputChannels: {}".format(output_channels)) + + def _get_rank(self, name): + return self.rank_dict[name] if name in self.rank_dict else -1 + + def _set_max_input_rank(self, input_names, output_name): + if len(input_names) == 0: + raise ValueError("Input name list empty for collecting rank information") + self.rank_dict[output_name] = -1 + for i in range(0, len(input_names)): + input_rank = self._get_rank(input_names[i]) + if input_rank == -1: + self.rank_dict[output_name] = -1 + return + self.rank_dict[output_name] = max(self._get_rank(output_name), input_rank) + + def _set_rank_for_reduce_op( + self, input_name, output_name, axes, keepdims, reduce_all + ): + if keepdims: + self.rank_dict[output_name] = self._get_rank(input_name) + else: + if reduce_all or self._get_rank(input_name) == 1: + self.rank_dict[output_name] = 1 + elif axes is not None and len(axes) > 0: + rank = self._get_rank(input_name) - len(axes) + self.rank_dict[output_name] = rank if rank != 0 else 1 + else: + raise ValueError( + "Reduce Ops must provide axes to reduce on if reduce_all is False" + ) + + def add_inner_product( + self, + name, + W, + b, + input_channels, + output_channels, + has_bias, + input_name, + output_name, + int_8_dynamic_quantize=False, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add an inner product layer to the model. + Refer to the ``InnerProductLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W: numpy.array or bytes() + Weight matrix of shape ``(output_channels, input_channels)``. + If ``W`` is of type ``bytes()`` (quantized), other quantization + related arguments must be provided as well (see below). + b: numpy.array + Bias vector of shape: ``(output_channels, )``. + input_channels: int + Number of input channels. + output_channels: int + Number of output channels. + has_bias: boolean + Whether the bias vector of this layer is ignored in the spec. + + - If True, the bias vector of this layer is not ignored. + - If False, the bias vector is ignored. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + Quantization arguments, used when ``W`` is of type ``bytes()``: + int_8_dynamic_quantize: boolean + Whether to quantize and dequantize before and after inner product, respectively. + Expects byte weights, representing int8 values, if True. + See NeuralNetwork.proto for other validation conditions. + + is_quantized_weight: bool, optional + Set it to true when ``W`` is of type ``bytes()``, representing + quantized weights, default: false. + + quantization_type: str + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``. + + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight + value. Only applicable when weights are quantized. + + quant_scale: numpy.array(dtype=numpy.float32) + scale vector to be used with linear quantization. Must be of + length either 1 or output_channels. + + quant_bias: numpy.array(dtype=numpy.float32) + bias vector to be used with linear quantization. Must be of + length either 1 or output_channels. + + quant_lut: numpy.array(dtype=numpy.float32) + the LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits. + + See Also + -------- + add_embedding, add_convolution, add_batched_mat_mul + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.innerProduct + + # Fill in the parameters + spec_layer_params.inputChannels = input_channels + spec_layer_params.outputChannels = output_channels + spec_layer_params.hasBias = has_bias + spec_layer_params.int8DynamicQuantize = int_8_dynamic_quantize + + weights = spec_layer_params.weights + if not is_quantized_weight and isinstance(W, _np.ndarray): + weights.floatValue.extend(W.flatten()) + else: + + _verify_quantization_arguments( + weight=W, + output_channels=output_channels, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + int_8_dynamic_quantize=int_8_dynamic_quantize, + ) + + _fill_quantized_weights( + weights_message=weights, + W=W, + use_int_8=int_8_dynamic_quantize, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if has_bias: + bias = spec_layer_params.bias + bias.floatValue.extend(b.flatten()) + + return spec_layer + + def add_embedding( + self, + name, + W, + b, + input_dim, + output_channels, + has_bias, + input_name, + output_name, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add an embedding layer to the model. + Refer to the ``EmbeddingLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W: float32 numpy.array or bytes() + Weight matrix of shape ``(output_channels, input_dim)``. + If ``W`` is of type ``bytes()`` (quantized to 1-8 bits), other + quantization related arguments must be provided as well (see below). + b: numpy.array + Bias vector of shape ``(output_channels, )``. + input_dim: int + Size of the vocabulary (1 + maximum integer index of the words). + output_channels: int + Number of output channels. + has_bias: boolean + Whether the bias vector of this layer is ignored in the ``spec``. + + - If True, the bias vector of this layer is not ignored. + - If False, the bias vector is ignored. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + + Quantization arguments expected, when ``W`` is of type ``bytes()``: + + is_quantized_weight: bool + Set it to true when ``W`` is of type ``bytes()``, representing quantized weights. + + quantization_type: str + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``. + + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight value. + + quant_scale: numpy.array(dtype=numpy.float32) + Scale vector to be used with linear quantization. + Must be of length either 1 or output_channels. + + quant_bias: numpy.array(dtype=numpy.float32) + Bias vector to be used with linear quantization. + Must be of length either 1 or output_channels. + + quant_lut: numpy.array(dtype=numpy.float32) + The LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits. + + See Also + -------- + add_inner_product + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + # Fill in the parameters + spec_layer_params = spec_layer.embedding + + spec_layer_params.inputDim = input_dim + spec_layer_params.outputChannels = output_channels + spec_layer_params.hasBias = has_bias + + weights = spec_layer_params.weights + if not is_quantized_weight: + weights.floatValue.extend(W.flatten()) + else: + _verify_quantization_arguments( + weight=W, + output_channels=output_channels, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + _fill_quantized_weights( + weights_message=weights, + W=W, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if has_bias: + bias = spec_layer_params.bias + bias.floatValue.extend(b.flatten()) + + return spec_layer + + def add_softmax(self, name, input_name, output_name): + """ + Add a softmax layer to the model. + Refer to the ``SoftmaxLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_activation, add_inner_product, add_convolution + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.softmax.MergeFromString(b"") + return spec_layer + + def add_activation( + self, + name, + non_linearity, + input_name, + output_name, + params=None, + input_rank=None, + input_shape=None, + output_rank=None, + output_shape=None, + ): + """ + Add an activation layer to the model. + Refer to the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + non_linearity: str + The ``non_linearity`` (activation) function of this layer. + It can be one of the following: + + - ``'RELU'``: Rectified Linear Unit (ReLU) function. + - ``'SIGMOID'``: sigmoid function. + - ``'TANH'``: tanh function. + - ``'SCALED_TANH'``: scaled tanh function, defined as: + + ``f(x) = alpha * tanh(beta * x)`` + + where ``alpha`` and ``beta`` are constant scalars. + + - ``'SOFTPLUS'``: softplus function. + - ``'SOFTSIGN'``: softsign function. + - ``'SIGMOID_HARD'``: hard sigmoid function, defined as: + + ``f(x) = min(max(alpha * x + beta, -1), 1)`` + + where ``alpha`` and ``beta`` are constant scalars. + + - ``'LEAKYRELU'``: leaky relu function, defined as: + + ``f(x) = (x >= 0) * x + (x < 0) * alpha * x`` + + where ``alpha`` is a constant scalar. + + - ``'PRELU'``: Parametric ReLU function, defined as: + + ``f(x) = (x >= 0) * x + (x < 0) * alpha * x`` + + where ``alpha`` is a multi-dimensional array of same size as ``x``. + + - ``'ELU'``: Exponential linear unit function, defined as: + + ``f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)`` + + where ``alpha`` is a constant scalar. + + - ``'PARAMETRICSOFTPLUS'``: Parametric softplus function, defined as: + + ``f(x) = alpha * log(1 + exp(beta * x))`` + + where ``alpha`` and ``beta`` are two multi-dimensional arrays + of same size as ``x``. + + - ``'THRESHOLDEDRELU'``: Thresholded ReLU function, defined as: + + ``f(x) = (x >= alpha) * x`` + + where ``alpha`` is a constant scalar. + + - ``'LINEAR'``: linear function. + + ``f(x) = alpha * x + beta`` + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + params: list of float or numpy.array + Parameters for the activation, depending on non_linearity. + + - When ``non_linearity`` is one of [``'RELU'``, ``'SIGMOID'``, ``'TANH'``, ``'SCALED_TANH'``, ``'SOFTPLUS'``, ``'SOFTSIGN'``], + params is ignored. + - When ``non_linearity`` is one of [``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``], + param is a list of 2 floats ``[alpha, beta]``. + - When ``non_linearity`` is one of [``'LEAKYRELU'``, ``'ELU'``, ``'THRESHOLDEDRELU'``], + param is a list of 1 float ``[alpha]``. + - When ``non_linearity`` is ``'PRELU'``, param is a list of 1 numpy array ``[alpha]``. + The shape of ``alpha`` is ``(C,)``, where ``C`` is either the number of input channels or + 1. When ``C = 1``, same ``alpha`` is applied to all channels. + - When ``non_linearity`` is ``'PARAMETRICSOFTPLUS'``, param is a + list of 2 numpy arrays ``[alpha, beta]``. The shape of ``alpha`` and + `beta` is ``(C, )``, where ``C`` is either + the number of input channels or 1. When ``C = 1``, same ``alpha`` and + ``beta`` are applied to all channels. + + See Also + -------- + add_convolution, add_softmax + """ + input_rank = ( + len(input_shape) if (input_shape and not input_rank) else input_rank + ) + output_rank = ( + len(output_shape) if (output_shape and not output_rank) else output_rank + ) + spec_layer = self._add_generic_layer( + name, + [input_name], + [output_name], + [input_rank] if input_rank else None, + [input_shape] if input_shape else None, + [output_rank] if output_rank else None, + [output_shape] if output_shape else None, + ) + spec_layer_params = spec_layer.activation + + # Fill in the parameters + non_linearity = ( + non_linearity.upper() + if isinstance(non_linearity, str) + else non_linearity + ) + if non_linearity == "RELU": + spec_layer_params.ReLU.MergeFromString(b"") + + elif non_linearity == "SIGMOID": + spec_layer_params.sigmoid.MergeFromString(b"") + + elif non_linearity == "TANH": + spec_layer_params.tanh.MergeFromString(b"") + + elif non_linearity == "SCALED_TANH": + spec_layer_params.scaledTanh.MergeFromString(b"") + if params is None: + alpha, beta = (0.0, 0.0) + else: + alpha, beta = params[0], params[1] + spec_layer_params.scaledTanh.alpha = alpha + spec_layer_params.scaledTanh.beta = beta + + elif non_linearity == "SOFTPLUS": + spec_layer_params.softplus.MergeFromString(b"") + + elif non_linearity == "SOFTSIGN": + spec_layer_params.softsign.MergeFromString(b"") + + elif non_linearity == "SIGMOID_HARD": + if params is None: + alpha, beta = (0.2, 0.5) + else: + alpha, beta = params[0], params[1] + spec_layer_params.sigmoidHard.alpha = alpha + spec_layer_params.sigmoidHard.beta = beta + + elif non_linearity == "LEAKYRELU": + if params is None: + alpha = 0.3 + else: + alpha = params[0] + spec_layer_params.leakyReLU.alpha = float(alpha) + + elif non_linearity == "PRELU": + # PReLU must provide an np array in params[0] + spec_layer_params.PReLU.alpha.floatValue.extend(params.flatten()) + + elif non_linearity == "ELU": + # ELU must provide an alpha in params[0] + spec_layer_params.ELU.alpha = float(params) + + elif non_linearity == "PARAMETRICSOFTPLUS": + # Parametric softplus must provide two np arrays for alpha and beta + alphas, betas = (params[0], params[1]) + # Weight alignment: Keras [H,W,C,F] + spec_layer_params.parametricSoftplus.alpha.floatValue.extend( + alphas.flatten() + ) + spec_layer_params.parametricSoftplus.beta.floatValue.extend(betas.flatten()) + + elif non_linearity == "THRESHOLDEDRELU": + if params is None: + theta = 1.0 + else: + theta = params + spec_layer_params.thresholdedReLU.alpha = float(theta) + + elif non_linearity == "LINEAR": + if params is None: + alpha, beta = (1.0, 0.0) + else: + alpha, beta = params[0], params[1] + spec_layer_params.linear.alpha = alpha + spec_layer_params.linear.beta = beta + else: + raise TypeError("Unknown activation type %s." % non_linearity) + return spec_layer + + def add_elementwise(self, name, input_names, output_name, mode, alpha=None): + """ + Add an element-wise operation layer to the model. + Refer to the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + A list of input blob names of this layer. The input blobs should have the same shape. + output_name: str + The output blob name of this layer. + mode: str + A string specifying the mode of the elementwise layer. It can be one of the following: + + - ``'CONCAT'``: Concatenate input blobs along the channel axis. + - ``'SEQUENCE_CONCAT'``: Concatenate input blobs along the sequence axis. + - ``'ADD'``: Perform an element-wise summation over the input blobs. + - ``'MULTIPLY'``: Perform an element-wise multiplication over the input blobs. + - ``'DOT'``: Compute the dot product of the two input blobs. + In this mode, the length of ``input_names`` should be 2. + - ``'COS'``: Compute the cosine similarity of the two input blobs. + In this mode, the length of ``input_names`` should be 2. + - ``'MAX'``: Compute the element-wise maximum over the input blobs. + - ```'MIN'```: Compute the element-wise minimum over the input blobs. + - ``'AVE'``: Compute the element-wise average over the input blobs. + + alpha: float + * if ``mode == 'ADD'`` and there is only one ``input_name``, + ``alpha`` is added to the input. + * if ``mode == 'MULTIPLY'`` and there is only one ``input_name``, + ``alpha`` is multiplied to the input. + + See Also + -------- + add_upsample, add_sequence_repeat + + """ + input_names = input_names if isinstance(input_names, list) else [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + # add one of the following layers + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "CONCAT": + spec_layer.concat.sequenceConcat = False + elif mode == "SEQUENCE_CONCAT": + spec_layer.concat.sequenceConcat = True + elif mode == "ADD": + spec_layer.add.MergeFromString(b"") + if alpha: + spec_layer.add.alpha = alpha + elif mode == "MULTIPLY": + spec_layer.multiply.MergeFromString(b"") + if alpha: + spec_layer.multiply.alpha = alpha + elif mode == "COS": + spec_layer.dot.cosineSimilarity = True + elif mode == "DOT": + spec_layer.dot.cosineSimilarity = False + elif mode == "MAX": + spec_layer.max.MergeFromString(b"") + elif mode == "MIN": + spec_layer.min.MergeFromString(b"") + elif mode == "AVE": + spec_layer.average.MergeFromString(b"") + else: + raise ValueError("Unsupported elementwise mode %s" % mode) + return spec_layer + + def add_upsample( + self, + name, + scaling_factor_h, + scaling_factor_w, + input_name, + output_name, + mode="NN", + linear_upsample_mode="DEFAULT", + ): + """ + Add an upsample layer to the model. + Refer to the ``UpsampleLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + scaling_factor_h: int or float + Scaling factor on the vertical direction. Float values only + supported with ``BILINEAR`` and ``ALIGN_CORNERS_*``. + scaling_factor_w: int or float + Scaling factor on the horizontal direction. Float values only + supported with ``BILINEAR`` and ``ALIGN_CORNERS_*``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + mode: str + Overall interpolation mode. The following values are supported: + + * ``'NN'``: nearest neighbour + * ``'BILINEAR'``: bilinear interpolation + + linear_upsample_mode: str + Specifies the behavior for linear upsampling. Only valid when + Interpolation Mode is ``BILINEAR``. + + If input grid is ``[0, Xin-1]`` (corresponding to an input size of + ``Xin``), and if the output size is ``Xout``, + then the grid points are sampled in the following manner: + + 'DEFAULT': + - ``spacing = (Xin-Xin/Xout) / (Xout-1)`` + - ``grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,..,Xout-1`` + + 'ALIGN_CORNERS_TRUE': + - ``spacing = (Xin-1) / (Xout-1)`` + - ``grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,..,Xout-1`` + + 'ALIGN_CORNERS_FALSE': + - ``spacing = Xin / Xout`` + - ``grid_point[i] = min(Xin-1, max(0, i * spacing + 0.5 * spacing - 0.5)), for i = 0,1,2,..,Xout-1`` + + See Also + -------- + add_resize_bilinear + + """ + + mode = mode.upper() if isinstance(mode, str) else mode + linear_upsample_mode = ( + linear_upsample_mode.upper() + if isinstance(linear_upsample_mode, str) + else linear_upsample_mode + ) + if not mode in ["NN", "BILINEAR"]: + raise ValueError("Unsupported upsampling mode %s" % mode) + if not linear_upsample_mode in [ + "DEFAULT", + "ALIGN_CORNERS_TRUE", + "ALIGN_CORNERS_FALSE", + ]: + raise ValueError( + "Unsupported linear upsampling mode %s" % linear_upsample_mode + ) + + # Default linear upsample mode is backwards compatible, else set spec to iOS14 + if ( + linear_upsample_mode != "DEFAULT" + and self.spec + and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ) + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.upsample + if ( + scaling_factor_h - _math_floor(scaling_factor_h) > 0.001 + or scaling_factor_w - _math_floor(scaling_factor_w) > 0.001 + ): + if mode != "BILINEAR" or linear_upsample_mode not in [ + "ALIGN_CORNERS_TRUE", + "ALIGN_CORNERS_FALSE", + ]: + raise ValueError( + "Fractional upsampling only compatible with BILINEAR and ALIGN_CORNERS_TRUE or ALIGN_CORNERS_FALSE" + ) + spec_layer_params.fractionalScalingFactor.append(float(scaling_factor_h)) + spec_layer_params.fractionalScalingFactor.append(float(scaling_factor_w)) + else: + spec_layer_params.scalingFactor.append(int(scaling_factor_h)) + spec_layer_params.scalingFactor.append(int(scaling_factor_w)) + + spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value( + mode + ) + spec_layer_params.linearUpsampleMode = _NeuralNetwork_pb2.UpsampleLayerParams.LinearUpsampleMode.Value( + linear_upsample_mode + ) + + return spec_layer + + def add_scale( + self, + name, + W, + b, + has_bias, + input_name, + output_name, + shape_scale=None, + shape_bias=None, + ): + """ + Add a scale layer to the model. + Refer to the ``ScaleLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W: int or numpy.array + Scale of the input. + b: int or numpy.array + Bias to add to the input. + has_bias: boolean + Whether the bias vector of this layer is ignored in the ``spec``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + shape_scale: list of int or tuple of int + List of ints that specifies the shape of the scale parameter. + Can be ``[1]``, ``[C]``, ``[1,H,W]``, or ``[C,H,W]``. + shape_bias: list of int + List of ints that specifies the shape of the bias parameter + (if present). Can be ``[1]``, ``[C]``, ``[1,H,W]``, or ``[C,H,W]``. + + See Also + -------- + add_bias + """ + + if not shape_scale: + shape_scale = [1] + if not shape_bias: + shape_bias = [1] + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.scale + + spec_layer_params.hasBias = has_bias + + # add scale and its shape + scale = spec_layer_params.scale + spec_layer_params.shapeScale.extend(shape_scale) + if isinstance(W, int): + scale.floatValue.append(float(W)) + else: + scale.floatValue.extend(W.flatten()) + if len(scale.floatValue) != _np.prod(shape_scale): + raise ValueError( + "Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter" + ) + + # add bias and its shape + if has_bias: + bias = spec_layer_params.bias + spec_layer_params.shapeBias.extend(shape_bias) + if isinstance(b, int): + bias.floatValue.append(float(b)) + else: + bias.floatValue.extend(b.flatten()) + if len(bias.floatValue) != _np.prod(shape_bias): + raise ValueError( + "Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter" + ) + return spec_layer + + def add_bias(self, name, b, input_name, output_name, shape_bias=None): + """ + Add a bias layer to the model. + Refer to the ``BiasLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + b: int or numpy.array + Bias to add to the input. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + shape_bias: list of int + List of ints that specifies the shape of the bias parameter + (if present). Can be ``[1]``, ``[C]``, ``[1,H,W]``, or ``[C,H,W]``. + + See Also + -------- + add_scale + """ + + if not shape_bias: + shape_bias = [1] + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.bias + + # add bias and its shape + bias = spec_layer_params.bias + if len(shape_bias) != 1 and len(shape_bias) != 3: + raise ValueError("Shape of bias layer must have length 1 or 3.") + + spec_layer_params.shape.extend(shape_bias) + if isinstance(b, int): + bias.floatValue.append(float(b)) + else: + bias.floatValue.extend(b.flatten()) + if len(bias.floatValue) != _np.prod(shape_bias): + raise ValueError( + "Dimensions of 'shape_bias' do not match the size" + "of the provided 'b' parameter" + ) + return spec_layer + + def add_sequence_repeat(self, name, nrep, input_name, output_name): + """ + Add a sequence repeat layer to the model. + Refer to the ``SequenceRepeatLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + nrep: int + Number of repetitions of the input blob along the sequence axis. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_upsample, add_elementwise + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.sequenceRepeat + spec_layer_params.nRepetitions = nrep + return spec_layer + + def add_convolution( + self, + name, + kernel_channels, + output_channels, + height, + width, + stride_height, + stride_width, + border_mode, + groups, + W, + b, + has_bias, + is_deconv=False, + output_shape=None, + input_name="data", + output_name="out", + dilation_factors=[1, 1], + padding_top=0, + padding_bottom=0, + padding_left=0, + padding_right=0, + same_padding_asymmetry_mode="BOTTOM_RIGHT_HEAVY", + **kwargs + ): + """ + Add a convolution layer to the network. + Refer to the ``ConvolutionLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + kernel_channels: int + Number of channels for the convolution kernels. + + output_channels: int + Number of filter kernels. This is equal to the number of channels in the output blob. + + height: int + Height of each kernel. + + width: int + Width of each kernel. + + stride_height: int + Stride along the height direction. + + stride_width: int + Stride along the height direction. + + border_mode: str + Option for the padding type and output blob shape. Can be either 'valid' or 'same'. + + groups: int + Number of kernel groups. Input is divided into groups along the channel axis. + Each kernel group share the same weights. + + W: numpy.array or bytes() or None + + Weight of the convolution kernels. + + * If ``is_deconv`` is False, ``W`` should have + shape ``(height, width, kernel_channels, output_channels)``, where: + ``kernel_channel = input_channels / groups`` + * If ``is_deconv`` is True, ``W`` should have + shape ``(height, width, kernel_channels, output_channels / groups)``, where: + ``kernel_channel = input_channels`` + + If ``W`` is of type ``bytes()`` (quantized), other quantization + related arguments must be provided as well (see below). + + For Core ML specification version >=4, ``W`` can be ``None``. In this case, + the convolution layer takes 2 inputs, where the 1st input represents + the input feature map, and the 2nd input represents the weight blob. + + b: numpy.array + Biases of the convolution kernels. ``b`` should have shape ``(outputChannels, )``. + + has_bias: boolean + Whether bias is ignored. + + - If True, bias is not ignored. + - If False, bias is ignored. + + is_deconv: boolean + Whether the convolution layer is performing a convolution or a + transposed convolution (deconvolution). + + - If True, the convolution layer is performing transposed convolution. + - If False, the convolution layer is performing regular convolution. + + output_shape: tuple or None + Either ``None`` or a 2-tuple, specifying the output + shape ``(output_height, output_width)``. + + - Used only when ``is_deconv == True``. + - When ``is_deconv == False``, this parameter is ignored. + - If it is ``None``, the output shape is calculated automatically using the ``border_mode``. + + input_name: str or list of str + The input blob name(s) of this layer. + + output_name: str + The output blob name of this layer. + + dilation_factors: list of int + Dilation factors across height and width directions. Must be a list of two positive integers. + Defaults to ``[1, 1]``. + + padding_top, padding_bottom, padding_left, padding_right: int + Values of height (top, bottom) and width (left, right) padding + to be used if ``border_more`` is ``"valid"``. + + same_padding_asymmetry_mode: str + Type of asymmetric padding to be used when ``border_mode`` is ``'same'``. + Can be either ``'BOTTOM_RIGHT_HEAVY'`` or ``'TOP_LEFT_HEAVY'``. + + Quantization + Quantization arguments expected in ``kwargs``, when ``W`` is of type ``bytes()``. + + quantization_type: str + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``. + + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight + value. Only applicable when weights are quantized. + + quant_scale: numpy.array(dtype=numpy.float32) + scale vector to be used with linear quantization. Must be of + length either 1 or ``output_channels``. + + quant_bias: numpy.array(dtype=numpy.float32) + bias vector to be used with linear quantization. Must be of + length either 1 or ``output_channels``. + + quant_lut: numpy.array(dtype=numpy.float32) + the LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits. + + Depthwise convolution + Depthwise convolution is a special case of convolution, in which: + + * ``kernel_channels = 1 (== input_channels / groups)`` + * ``output_channels = channel_multiplier * input_channels`` + * ``groups = input_channels`` + * ``W``: ``[Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]`` + + See Also + -------- + add_convolution3d, add_pooling, add_activation, add_batchnorm + + """ + + if isinstance(input_name, tuple): + input_names = list(input_name) + elif isinstance(input_name, list): + input_names = input_name + else: + input_names = [input_name] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + # Set the layer params + spec_layer_params = spec_layer.convolution + spec_layer_params.isDeconvolution = is_deconv + + if is_deconv and output_shape: + spec_layer_params.outputShape.append(output_shape[0]) + spec_layer_params.outputShape.append(output_shape[1]) + + spec_layer_params.outputChannels = output_channels + spec_layer_params.kernelChannels = kernel_channels + spec_layer_params.kernelSize.append(height) + spec_layer_params.kernelSize.append(width) + spec_layer_params.stride.append(stride_height) + spec_layer_params.stride.append(stride_width) + + border_mode = ( + border_mode.lower() + if isinstance(border_mode, str) + else border_mode + ) + same_padding_asymmetry_mode = ( + same_padding_asymmetry_mode.upper() + if isinstance(same_padding_asymmetry_mode, str) + else same_padding_asymmetry_mode + ) + + if border_mode == "valid": + height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + height_border.startEdgeSize = padding_top + height_border.endEdgeSize = padding_bottom + width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + width_border.startEdgeSize = padding_left + width_border.endEdgeSize = padding_right + elif border_mode == "same": + if not ( + same_padding_asymmetry_mode == "BOTTOM_RIGHT_HEAVY" + or same_padding_asymmetry_mode == "TOP_LEFT_HEAVY" + ): + raise ValueError( + "Invalid value %d of same_padding_asymmetry_mode parameter" + % same_padding_asymmetry_mode + ) + spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value( + same_padding_asymmetry_mode + ) + else: + raise NotImplementedError( + "Border mode %s is not implemented." % border_mode + ) + + spec_layer_params.nGroups = groups + spec_layer_params.hasBias = has_bias + + # add dilation factors + spec_layer_params.dilationFactor.append(dilation_factors[0]) + spec_layer_params.dilationFactor.append(dilation_factors[1]) + + # If weight comes from another tensor just return + if len(input_names) > 1: + return + + # Weight assignments + quantization = len(kwargs) > 0 and ('quantization_type' in kwargs and kwargs.get('quantization_type') is not None) + if quantization: + _verify_quantization_arguments( + weight=W, output_channels=output_channels, **kwargs + ) + + nbits = kwargs.get("nbits", 8) + num_weights = (output_channels * kernel_channels * height * width) / groups + if nbits < 8: + byte_arr = _np.frombuffer(W, dtype=_np.uint8) + W = _unpack_to_bytes(byte_arr, num_weights, nbits) + else: + W = _np.frombuffer(W, dtype=_np.uint8) + + if is_deconv: + W = _np.reshape( + W, (height, width, kernel_channels, output_channels / groups) + ) + else: + W = _np.reshape(W, (height, width, kernel_channels, output_channels)) + + # Weight alignment: MLModel Spec requires following weight arrangement: + # is_deconv == False ==> (output_channels, kernel_channels, height, width), where kernel_channel = input_channels / groups + # is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels + if not is_deconv: + Wt = W.transpose((3, 2, 0, 1)) + Wt = Wt.flatten() + else: + Wt = W.transpose((2, 3, 0, 1)).flatten() + + # Assign weights + weights = spec_layer_params.weights + if not quantization: # no quantization + weights.floatValue.extend(Wt.flatten()) + else: # there is quantization + W_bytes = bytes() + if nbits == 8: + W_bytes += Wt.flatten().tobytes() + else: + W_bytes += _convert_array_to_nbit_quantized_bytes( + Wt.flatten(), nbits + ).tobytes() + _fill_quantized_weights(weights_message=weights, W=W_bytes, **kwargs) + + # Assign biases + if has_bias: + bias = spec_layer_params.bias + for f in range(output_channels): + bias.floatValue.append(float(b[f])) + + return spec_layer + + def add_convolution3d( + self, + name, + input_channels, + output_channels, + depth, + height, + width, + W, + b, + has_bias, + groups=1, + stride_depth=1, + stride_height=1, + stride_width=1, + dilation_width=1, + dilation_height=1, + dilation_depth=1, + is_deconv=False, + output_shape=None, + padding_mode="valid", + padding_front=0, + padding_back=0, + padding_top=0, + padding_bottom=0, + padding_left=0, + padding_right=0, + input_name="data", + output_name="out", + ): + """ + Add a 3 dimensional convolution layer to the network. + Refer to the ``Convolution3DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + input_channels: int + Number of input channels. + + output_channels: int + Number of filter kernels. This is equal to the number of channels in the output blob. + + depth: int + Depth of each kernel. + + height: int + Height of each kernel. + + width: int + Width of each kernel. + + W: numpy.array or bytes() + Weight of the convolution kernels. ``W`` should have shape: + + - If ``deconv`` is False: + + ``(output_channels, kernel_channels, depth, height, width)``, where: + + ``kernel_channels = input_channels / groups`` + + - If ``deconv`` is True: + + ``(output_channels / groups, kernel_channels, depth, height, width)``, where: + + ``kernel_channels = input_channels`` + + b: numpy.array + Biases of the convolution kernels. ``b`` should have shape ``(outputChannels, )``. + + has_bias: boolean + Whether bias is ignored. + - If True, bias is not ignored. + - If False, bias is ignored. + + groups: int + Number of kernel groups. Input is divided into groups along the channel axis. Each + kernel group share the same weights. Defaults to 1. + + stride_depth, stride_height, stride_width: int + Stride along the depth, height, and width directions, respectively. Must all be positive + integers. Defaults to 1. + + dilation_depth, dilation_width, dilation_height: int + Dilation factors across depth, height, and width directions. Must all be positive + integers. Defaults to 1 in each dimension. + + is_deconv: bool + True if this is Convolution Transpose, otherwise False. + + output_shape: None or Tuple of int + Applicable only for Deconvolution layer. + ``None`` if Convolution. + Tuple of length 3 if Convolution Transpose. + + padding_mode: str + Option for the padding type and output blob shape. + Can be ``'custom'``, ``'valid'``, or ``'same'``. + Defaults to ``'valid'``. Case-insensitive. + + padding_front, padding_back, padding_top, padding_bottom, padding_left, padding_right: int + Values of depth (front, back), height (top, bottom), and width (left, right) padding to + be used. Must all be positive integers. All default to 0. + + input_name: str or list of str + The input blob name(s) of this layer. + + output_name: str + The output blob name of this layer. + + Depthwise convolution + Depthwise convolution is a special case of convolution, in which: + + * ``kernel_channels = 1`` (``== input_channels / groups``) + * ``output_channels = channel_multiplier * input_channels`` + * ``groups = input_channels`` + * ``W``: ``[Kernel_depth, Kernel_height, Kernel_width, 1, channel_multiplier * input_channels]`` + + See Also + -------- + add_convolution, add_pooling, add_activation, add_batchnorm + + """ + # Update spec version if necessary + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + if isinstance(input_name, tuple): + input_names = list(input_name) + elif isinstance(input_name, list): + input_names = input_name + else: + input_names = [input_name] + + # 3D convolution doesn't currently support 2-inputs + if len(input_names) > 1: + raise ValueError("3D convolution only supports 1 input.") + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + # Set the layer params + spec_layer_params = spec_layer.convolution3d + spec_layer_params.isDeconvolution = is_deconv + spec_layer_params.nGroups = groups + + spec_layer_params.outputChannels = output_channels + spec_layer_params.inputChannels = input_channels + + spec_layer_params.kernelDepth = depth + spec_layer_params.kernelHeight = height + spec_layer_params.kernelWidth = width + + spec_layer_params.strideDepth = stride_depth + spec_layer_params.strideHeight = stride_height + spec_layer_params.strideWidth = stride_width + + if is_deconv and output_shape: + spec_layer_params.outputShape.append(output_shape[0]) + spec_layer_params.outputShape.append(output_shape[1]) + spec_layer_params.outputShape.append(output_shape[2]) + + supported_padding_modes = {"CUSTOM", "VALID", "SAME"} + if padding_mode.upper() not in supported_padding_modes: + raise ValueError( + "Unsupported padding mode: %s. Must be one of %s" + % (padding_mode, supported_padding_modes) + ) + if padding_mode.upper() == "CUSTOM": + spec_layer_params.customPaddingFront = padding_front + spec_layer_params.customPaddingBack = padding_back + spec_layer_params.customPaddingTop = padding_top + spec_layer_params.customPaddingBottom = padding_bottom + spec_layer_params.customPaddingLeft = padding_left + spec_layer_params.customPaddingRight = padding_right + spec_layer_params.paddingType = _NeuralNetwork_pb2.Convolution3DLayerParams.PaddingType.Value( + padding_mode.upper() + ) + + spec_layer_params.dilationDepth = dilation_depth + spec_layer_params.dilationHeight = dilation_height + spec_layer_params.dilationWidth = dilation_width + + # Weight alignment: MLModel Spec requires following weight arrangement: + # is_deconv == False ==> (output_channels, kernel_channels, depth, height, width), where kernel_channel = input_channels / groups + # is_deconv == True ==> (kernel_channels, output_channels / groups, height, width), where kernel_channel = input_channels + if is_deconv: + W = W.transpose((1, 0, 2, 3, 4)) + + # Assign weights + weights = spec_layer_params.weights + weights.floatValue.extend(W.flatten()) + + # Assign biases + spec_layer_params.hasBias = has_bias + if has_bias: + bias = spec_layer_params.bias + for f in range(output_channels): + bias.floatValue.append(float(b[f])) + + return spec_layer + + def add_pooling( + self, + name, + height, + width, + stride_height, + stride_width, + layer_type, + padding_type, + input_name, + output_name, + exclude_pad_area=True, + is_global=False, + padding_top=0, + padding_bottom=0, + padding_left=0, + padding_right=0, + same_padding_asymmetry_mode="BOTTOM_RIGHT_HEAVY", + ): + """ + Add a pooling layer to the model that performs spatial pooling. + Refer to the ``PoolingLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + height: int + Height of pooling region. + + width: int + Width of pooling region. + + stride_height: int + Stride along the height direction. + + stride_width: int + Stride along the width direction. + + layer_type: str + Type of pooling performed. Can either be ``'MAX'``, ``'AVERAGE'``, or ``'L2'``. + + padding_type: str + Option for the type of padding and output blob shape. Can be either + ``'VALID'``, ``'SAME'``, or ``'INCLUDE_LAST_PIXEL'``. + + input_name: str + The input blob name of this layer. + + output_name: str + The output blob name of this layer. + + exclude_pad_area: boolean + Whether to exclude padded area in the ``'AVERAGE'`` pooling operation, + default: true. This flag is only used with average pooling. + + - If True, the value of the padded area will be excluded. + - If False, the padded area will be included. + + is_global: boolean + Whether the pooling operation is global. Defaults to False. + + - If True, the pooling operation is global. The pooling region + is of the same size of the input blob. + Parameters ``height``, ``width``, ``stride_height``, and ``stride_width`` will be ignored. + - If False, the pooling operation is not global. + + padding_top, padding_bottom, padding_left, padding_right: int + Values of height (top, bottom) and width (left, right) padding + to be used if padding type is ``"VALID"`` or ``"INCLUDE_LAST_PIXEL"``. + + same_padding_asymmetry_mode: str. + Type of asymmetric padding to be used when ``padding_type = 'SAME'``. + Can be either ``'BOTTOM_RIGHT_HEAVY'`` or ``'TOP_LEFT_HEAVY'``. + + See Also + -------- + add_pooling3d, add_convolution, add_activation + """ + + # Create spec layer + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.pooling + + # Set the parameters + spec_layer_params.type = _NeuralNetwork_pb2.PoolingLayerParams.PoolingType.Value( + layer_type.upper() + ) + + padding_type = ( + padding_type.upper() + if isinstance(padding_type, str) + else padding_type + ) + same_padding_asymmetry_mode = ( + same_padding_asymmetry_mode.upper() + if isinstance(same_padding_asymmetry_mode, str) + else same_padding_asymmetry_mode + ) + + if padding_type == "VALID": + height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + height_border.startEdgeSize = padding_top + height_border.endEdgeSize = padding_bottom + width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add() + width_border.startEdgeSize = padding_left + width_border.endEdgeSize = padding_right + elif padding_type == "SAME": + if not ( + same_padding_asymmetry_mode == "BOTTOM_RIGHT_HEAVY" + or same_padding_asymmetry_mode == "TOP_LEFT_HEAVY" + ): + raise ValueError( + "Invalid value %d of same_padding_asymmetry_mode parameter" + % same_padding_asymmetry_mode + ) + spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value( + same_padding_asymmetry_mode + ) + elif padding_type == "INCLUDE_LAST_PIXEL": + if padding_top != padding_bottom or padding_left != padding_right: + raise ValueError( + "Only symmetric padding is supported with the INCLUDE_LAST_PIXEL padding type" + ) + spec_layer_params.includeLastPixel.paddingAmounts.append(padding_top) + spec_layer_params.includeLastPixel.paddingAmounts.append(padding_left) + else: + raise ValueError("Unknown padding_type %s in pooling" % padding_type) + + spec_layer_params.kernelSize.append(height) + spec_layer_params.kernelSize.append(width) + spec_layer_params.stride.append(stride_height) + spec_layer_params.stride.append(stride_width) + spec_layer_params.avgPoolExcludePadding = exclude_pad_area + spec_layer_params.globalPooling = is_global + return spec_layer + + def add_pooling3d( + self, + name, + input_name, + output_name, + pooling_type, + kernel_depth, + kernel_height, + kernel_width, + stride_depth, + stride_height, + stride_width, + padding_mode="valid", + custom_padding_front=0, + custom_padding_back=0, + custom_padding_top=0, + custom_padding_bottom=0, + custom_padding_left=0, + custom_padding_right=0, + average_pooling_count_excludes_padding=False, + ): + """ + Add a pooling layer to the model that performs spatial pooling across three dimensions. + Refer to the ``Pooling3DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + pooling_type: str + Type of pooling performed. Can either be ``'MAX'`` OR ``'AVERAGE'``. + kernel_depth: int + Depth of the pooling region. + kernel_height: int + Height of pooling region. + kernel_width: int + Width of pooling region. + stride_depth: int + Stride along the depth direction + stride_height: int + Stride along the height direction. + stride_width: int + Stride along the width direction. + padding_mode: str + Option for the padding type and output blob shape. + Can be ``'VALID'``, ``'SAME'``, or ``'CUSTOM'``. + custom_padding_front: int + Padding before the input in the depth direction. + custom_padding_back: int + Padding after the input in the depth direction. + custom_padding_top: int + Padding before the input in the height direction. + custom_padding_bottom: int + Padding after the input in the height direction. + custom_padding_left: int + Padding before the input in the width direction. + custom_padding_right: int + Padding after the input in the width direction. + average_pooling_count_excludes_padding: boolean + If true, exclude zeros from padding in average pooling. + Can only be true for ``AVERAGE`` padding. + + See Also + -------- + add_pooling, add_global_pooling3d + """ + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.pooling3d + + spec_layer_params.type = _NeuralNetwork_pb2.Pooling3DLayerParams.PoolingType3D.Value( + pooling_type.upper() + ) + + spec_layer_params.kernelDepth = kernel_depth + spec_layer_params.kernelHeight = kernel_height + spec_layer_params.kernelWidth = kernel_width + + spec_layer_params.strideDepth = stride_depth + spec_layer_params.strideHeight = stride_height + spec_layer_params.strideWidth = stride_width + + supported_padding_modes = {"CUSTOM", "VALID", "SAME"} + if padding_mode.upper() not in supported_padding_modes: + raise ValueError( + "Unsupported padding mode: %s. Must be one of %s" + % (padding_mode, supported_padding_modes) + ) + if padding_mode.upper() == "CUSTOM": + spec_layer_params.customPaddingFront = custom_padding_front + spec_layer_params.customPaddingBack = custom_padding_back + spec_layer_params.customPaddingTop = custom_padding_top + spec_layer_params.customPaddingBottom = custom_padding_bottom + spec_layer_params.customPaddingLeft = custom_padding_left + spec_layer_params.customPaddingRight = custom_padding_right + spec_layer_params.paddingType = _NeuralNetwork_pb2.Pooling3DLayerParams.Pooling3DPaddingType.Value( + padding_mode.upper() + ) + + spec_layer_params.countExcludePadding = average_pooling_count_excludes_padding + + return spec_layer + + def add_global_pooling3d(self, name, input_name, output_name, pooling_type): + """ + Add a layer to pool three spatial dimensions down to one value. + This behaves like a special case of Pooling3DLayerParams in which + the Kernel is the size of the input and there is no padding. + + Refer to the ``GlobalPooling3DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + pooling_type: str + Type of pooling performed. Can either be ``'MAX'`` OR ``'AVERAGE'``. + + See Also + -------- + add_pooling, add_pooling3d + """ + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.globalPooling3d + + spec_layer_params.type = _NeuralNetwork_pb2.GlobalPooling3DLayerParams.GlobalPoolingType3D.Value( + pooling_type.upper() + ) + + return spec_layer + + def add_padding( + self, + name, + left=0, + right=0, + top=0, + bottom=0, + value=0, + input_name="data", + output_name="out", + padding_type="constant", + ): + """ + + + Add a padding layer to the model that performs padding along spatial dimensions. + + Refer to the ``PaddingLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + left: int + Number of elements to be padded on the left side of the input blob. + right: int + Number of elements to be padded on the right side of the input blob. + top: int + Number of elements to be padded on the top of the input blob. + bottom: int + Number of elements to be padded on the bottom of the input blob. + value: float + Value of the elements padded. Used only when ``padding_type = 'constant'``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + padding_type: str + Type of the padding. Can be one of ``'constant'``, ``'reflection'``, or ``'replication'``. + + See Also + -------- + add_crop, add_convolution, add_pooling, add_constant_pad + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.padding + + # Set the parameters + padding_type = ( + padding_type.lower() + if isinstance(padding_type, str) + else padding_type + ) + if padding_type == "constant": + spec_layer_params.constant.value = value + elif padding_type == "reflection": + spec_layer_params.reflection.MergeFromString(b"") + elif padding_type == "replication": + spec_layer_params.replication.MergeFromString(b"") + else: + raise ValueError("Unknown padding_type %s" % padding_type) + + height_border = spec_layer_params.paddingAmounts.borderAmounts.add() + height_border.startEdgeSize = top + height_border.endEdgeSize = bottom + width_border = spec_layer_params.paddingAmounts.borderAmounts.add() + width_border.startEdgeSize = left + width_border.endEdgeSize = right + return spec_layer + + def add_crop( + self, name, left, right, top, bottom, offset, input_names, output_name + ): + """ + Add a cropping layer to the model. + The cropping layer have two functional modes: + + - When it has 1 input blob, it crops the input blob based + on the 4 parameters ``[left, right, top, bottom]``. + - When it has 2 input blobs, it crops the first input blob based + on the dimension of the second blob with an offset. + + Refer to the ``CropLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + left: int + Number of elements to be cropped on the left side of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + right: int + Number of elements to be cropped on the right side of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + top: int + Number of elements to be cropped on the top of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + bottom: int + Number of elements to be cropped on the bottom of the input blob. + When the crop layer takes 2 inputs, this parameter is ignored. + offset: list of int + Offset along the height and width directions when the crop layer takes 2 inputs. Must be a list of length 2. + When the crop layer takes 1 input, this parameter is ignored. + input_names: list of str + The input blob names of this layer. Must be either a list of 1 string (1 input crop layer), + or a list of 2 strings (2-input crop layer). + output_name: str + The output blob name of this layer. + + See Also + -------- + add_padding, add_convolution, add_pooling + """ + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.crop + + # Set the parameters + offset = [0, 0] if len(input_names) == 1 else offset + spec_layer_params.offset.extend(offset) + height_border = spec_layer_params.cropAmounts.borderAmounts.add() + height_border.startEdgeSize = top + height_border.endEdgeSize = bottom + width_border = spec_layer_params.cropAmounts.borderAmounts.add() + width_border.startEdgeSize = left + width_border.endEdgeSize = right + return spec_layer + + def add_simple_rnn( + self, + name, + W_h, + W_x, + b, + hidden_size, + input_size, + activation, + input_names, + output_names, + output_all=False, + reverse_input=False, + ): + """ + Add a simple recurrent layer to the model. + Refer to the ``SimpleRecurrentLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: numpy.array + Weights of the recurrent layer's hidden state. + Must be of shape ``(hidden_size, hidden_size)``. + W_x: numpy.array + Weights of the recurrent layer's input. + Must be of shape ``(hidden_size, input_size)``. + b: numpy.array or None + Bias of the recurrent layer's output. If ``None``, bias is ignored. + Otherwise it must be of shape ``(hidden_size, )``. + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + activation: str + Activation function name. Can be one of the following option: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + See add_activation for more detailed description. + input_names: list of str + The input blob names list of this layer, in the order of ``[x, h_input]``. + output_names: list of str + The output blob names list of this layer, in the order of ``[y, h_output]``. + output_all: boolean + Whether the recurrent layer should output at every time step. + + - If False, the output is the result after the final state update. + - If True, the output is a sequence, containing outputs at all time steps. + + reverse_input: boolean + Whether the recurrent layer should process the input sequence in the reverse order. + + - If False, the input sequence order is not reversed. + - If True, the input sequence order is reversed. + + See Also + -------- + add_activation, add_gru, add_unilstm, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.simpleRecurrent + spec_layer_params.reverseInput = reverse_input + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + if b is not None: + spec_layer_params.hasBiasVector = True + spec_layer_params.sequenceOutput = output_all + + activation_f = spec_layer_params.activation + _set_recurrent_activation(activation_f, activation) + + # Write the weights + spec_layer_params.weightMatrix.floatValue.extend(W_x.flatten()) + spec_layer_params.recursionMatrix.floatValue.extend(W_h.flatten()) + + if b is not None: + spec_layer_params.biasVector.floatValue.extend(b.flatten()) + return spec_layer + + def add_gru( + self, + name, + W_h, + W_x, + b, + hidden_size, + input_size, + input_names, + output_names, + activation="TANH", + inner_activation="SIGMOID_HARD", + output_all=False, + reverse_input=False, + ): + """ + Add a Gated-Recurrent Unit (GRU) layer to the model. + Refer to the ``GRULayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: [numpy.array] + List of recursion weight matrices. The ordering is ``[R_z, R_r, R_o]``, + where ``R_z``, ``R_r`` and ``R_o`` are weight matrices at update gate, + reset gate and output gate. + The shapes of these matrices are ``(hidden_size, hidden_size)``. + W_x: [numpy.array] + List of input weight matrices. The ordering is ``[W_z, W_r, W_o]``, + where ``W_z``, ``W_r``, and ``W_o`` are weight matrices at update gate, + reset gate and output gate. + The shapes of these matrices are ``(hidden_size, input_size)``. + b: [numpy.array] or None + List of biases of the GRU layer. The ordering is ``[b_z, b_r, b_o]``, + where ``b_z``, ``b_r``, and ``b_o`` are biases at update gate, + reset gate and output gate. + If ``None``, biases are ignored. Otherwise the shapes of the biases are ``(hidden_size, )``. + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + activation: str + Activation function used at the output gate. Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'TANH'``. + See add_activation for more detailed description. + inner_activation: str + Inner activation function used at update and reset gates. + Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'SIGMOID_HARD'``. + See add_activation for more detailed description. + input_names: list of str + The input blob names list of this layer, in the order of ``[x, h_input]``. + output_names: list of str + The output blob names list of this layer, in the order of ``[y, h_output]``. + output_all: boolean + Whether the recurrent layer should output at every time step. + + - If False, the output is the result after the final state update. + - If True, the output is a sequence, containing outputs at all time steps. + + reverse_input: boolean + Whether the recurrent layer should process the input sequence in the reverse order. + + - If False, the input sequence order is not reversed. + - If True, the input sequence order is reversed. + + See Also + -------- + add_activation, add_simple_rnn, add_unilstm, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.gru + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + if b is not None: + spec_layer_params.hasBiasVectors = True + spec_layer_params.sequenceOutput = output_all + spec_layer_params.reverseInput = reverse_input + + activation_f = spec_layer_params.activations.add() + activation_g = spec_layer_params.activations.add() + _set_recurrent_activation(activation_f, inner_activation) + _set_recurrent_activation(activation_g, activation) + + # Write the weights + R_z, R_r, R_o = W_h + W_z, W_r, W_o = W_x + + spec_layer_params.updateGateWeightMatrix.floatValue.extend(W_z.flatten()) + spec_layer_params.resetGateWeightMatrix.floatValue.extend(W_r.flatten()) + spec_layer_params.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + + spec_layer_params.updateGateRecursionMatrix.floatValue.extend(R_z.flatten()) + spec_layer_params.resetGateRecursionMatrix.floatValue.extend(R_r.flatten()) + spec_layer_params.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + + if b is not None: + b_z, b_r, b_o = b + spec_layer_params.updateGateBiasVector.floatValue.extend(b_z.flatten()) + spec_layer_params.resetGateBiasVector.floatValue.extend(b_r.flatten()) + spec_layer_params.outputGateBiasVector.floatValue.extend(b_o.flatten()) + return spec_layer + + def add_unilstm( + self, + name, + W_h, + W_x, + b, + hidden_size, + input_size, + input_names, + output_names, + inner_activation="SIGMOID", + cell_state_update_activation="TANH", + output_activation="TANH", + peep=None, + output_all=False, + forget_bias=False, + coupled_input_forget_gate=False, + cell_clip_threshold=50000.0, + reverse_input=False, + ): + """ + Add a Uni-directional LSTM layer to the model. + Refer to the ``UniDirectionalLSTMLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: [numpy.array] + List of recursion weight matrices. The ordering is [R_i, R_f, R_o, R_z], + where R_i, R_f, R_o, R_z are weight matrices at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are (hidden_size, hidden_size). + W_x: [numpy.array] + List of input weight matrices. The ordering is [W_i, W_f, W_o, W_z], + where W_i, W_f, W_o, W_z are weight matrices at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are (hidden_size, input_size). + b: [numpy.array] or None + List of biases. The ordering is [b_i, b_f, b_o, b_z], + where b_i, b_f, b_o, b_z are biases at input gate, forget gate, output gate and cell gate. + If ``None``, biases are ignored. Otherwise the shapes of the biases are (hidden_size, ). + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + input_names: list of str + The input blob names list of this layer, in the order of [x, h_input, c_input]. + output_names: list of str + The output blob names list of this layer, in the order of [y, h_output, c_output]. + inner_activation: str + Inner activation function used at input and forget gate. Can be one of the following option: + ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. + cell_state_update_activation: str + Cell state update activation function used at the cell state update gate. + ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. + output_activation: str + Activation function used at the output gate. Can be one of the following option: + ['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR']. + peep: [numpy.array] or None + List of peephole vectors. The ordering is [p_i, p_f, p_o], + where p_i, p_f, and p_o are peephole vectors at input gate, forget gate, output gate. + The shapes of the peephole vectors are (hidden_size,). + output_all: boolean + Whether the LSTM layer should output at every time step. + + - If False, the output is the result after the final state update. + - If True, the output is a sequence, containing outputs at all time steps. + + forget_bias: boolean + If True, a vector of 1s is added to forget gate bias. + coupled_input_forget_gate: boolean + If True, the input gate and forget gate is coupled. i.e. forget gate is not used. + cell_clip_threshold: float + The limit on the maximum and minimum values on the cell state. + If not provided, it is defaulted to 50.0. + reverse_input: boolean + Whether the LSTM layer should process the input sequence in the reverse order. + + - If False, the input sequence order is not reversed. + - If True, the input sequence order is reversed. + + See Also + -------- + add_activation, add_simple_rnn, add_gru, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.uniDirectionalLSTM + params = spec_layer_params.params + weight_params = spec_layer_params.weightParams + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + params.sequenceOutput = output_all + params.forgetBias = False + if b is not None: + params.hasBiasVectors = True + if peep is not None: + params.hasPeepholeVectors = True + params.coupledInputAndForgetGate = coupled_input_forget_gate + params.cellClipThreshold = cell_clip_threshold + params.forgetBias = forget_bias + + spec_layer_params.reverseInput = reverse_input + + activation_f = spec_layer_params.activations.add() + activation_g = spec_layer_params.activations.add() + activation_h = spec_layer_params.activations.add() + _set_recurrent_activation(activation_f, inner_activation) + _set_recurrent_activation(activation_g, cell_state_update_activation) + _set_recurrent_activation(activation_h, output_activation) + + # Write the weights + R_i, R_f, R_o, R_z = W_h + W_i, W_f, W_o, W_z = W_x + + weight_params.inputGateWeightMatrix.floatValue.extend(W_i.flatten()) + weight_params.forgetGateWeightMatrix.floatValue.extend(W_f.flatten()) + weight_params.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + weight_params.blockInputWeightMatrix.floatValue.extend(W_z.flatten()) + + weight_params.inputGateRecursionMatrix.floatValue.extend(R_i.flatten()) + weight_params.forgetGateRecursionMatrix.floatValue.extend(R_f.flatten()) + weight_params.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + weight_params.blockInputRecursionMatrix.floatValue.extend(R_z.flatten()) + + if b is not None: + b_i, b_f, b_o, b_z = b + weight_params.inputGateBiasVector.floatValue.extend(b_i.flatten()) + weight_params.forgetGateBiasVector.floatValue.extend(b_f.flatten()) + weight_params.outputGateBiasVector.floatValue.extend(b_o.flatten()) + weight_params.blockInputBiasVector.floatValue.extend(b_z.flatten()) + + if peep is not None: + p_i, p_f, p_o = peep + weight_params.inputGatePeepholeVector.floatValue.extend(p_i.flatten()) + weight_params.forgetGatePeepholeVector.floatValue.extend(p_f.flatten()) + weight_params.outputGatePeepholeVector.floatValue.extend(p_o.flatten()) + + return spec_layer + + def add_bidirlstm( + self, + name, + W_h, + W_x, + b, + W_h_back, + W_x_back, + b_back, + hidden_size, + input_size, + input_names, + output_names, + inner_activation="SIGMOID", + cell_state_update_activation="TANH", + output_activation="TANH", + peep=None, + peep_back=None, + output_all=False, + forget_bias=False, + coupled_input_forget_gate=False, + cell_clip_threshold=50000.0, + ): + + """ + Add a Bi-directional LSTM layer to the model. + Refer to the ``BiDirectionalLSTMLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + W_h: [numpy.array] + List of recursion weight matrices for the forward layer. + The ordering is ``[R_i, R_f, R_o, R_z]``, + where ``R_i``, ``R_f``, ``R_o``, and ``R_z`` are weight matrices at + input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, hidden_size)``. + W_x: [numpy.array] + List of input weight matrices for the forward layer. The ordering + is ``[W_i, W_f, W_o, W_z]``, + where ``W_i``, ``W_f``, ``W_o``, and ``W_z`` are weight matrices at + input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, input_size)``. + b: [numpy.array] + List of biases for the forward layer. The ordering is + ``[b_i, b_f, b_o, b_z]``, + where ``b_i``, ``b_f``, ``b_o``, and ``b_z`` are biases at input + gate, forget gate, output gate and cell gate. + If ``None``, biases are ignored. Otherwise the shapes of the biases + are ``(hidden_size, )``. + W_h_back: [numpy.array] + List of recursion weight matrices for the backward layer. The + ordering is ``[R_i, R_f, R_o, R_z]``, + where ``R_i``, ``R_f``, ``R_o``, and ``R_z`` are weight matrices + at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, hidden_size)``. + W_x_back: [numpy.array] + List of input weight matrices for the backward layer. The ordering + is `[W_i, W_f, W_o, W_z]``, + where ``W_i``, ``W_f``, ``W_o``, and ``W_z`` are weight matrices + at input gate, forget gate, output gate and cell gate. + The shapes of these matrices are ``(hidden_size, input_size)``. + b_back: [numpy.array] + List of biases for the backward layer. The ordering is ``[b_i, b_f, b_o, b_z]``, + where ``b_i``, ``b_f``, ``b_o``, and ``b_z`` are biases at input + gate, forget gate, output gate and cell gate. + The shapes of the biases ``(hidden_size)``. + hidden_size: int + Number of hidden units. This is equal to the number of channels of output shape. + input_size: int + Number of the number of channels of input shape. + input_names: list of str + The input blob names of this layer, in the order of + ``[x, h_input, c_input, h_reverse_input, c_reverse_input]``. + output_names: list of str + The output blob names of this layer, in the order of + ``[y, h_output, c_output, h_reverse_output, c_reverse_output]``. + inner_activation: str + Inner activation function used at input and forget gate. Can be one + of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'SIGMOID'``. + cell_state_update_activation: str + Cell state update activation function used at the cell state update gate. + Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'TANH'``. + output_activation: str + Activation function used at the output gate. Can be one of the following options: + [``'RELU'``, ``'TANH'``, ``'SIGMOID'``, ``'SCALED_TANH'``, ``'SIGMOID_HARD'``, ``'LINEAR'``]. + Defaults to ``'TANH'``. + peep: [numpy.array] or None + List of peephole vectors for the forward layer. The ordering + is ``[p_i, p_f, p_o]``, + where ``p_i``, ``p_f``, and ``p_o`` are peephole vectors at input + gate, forget gate, and output gate. + The shapes of the peephole vectors are ``(hidden_size,)``. Defaults to ``None``. + peep_back: [numpy.array] or None + List of peephole vectors for the backward layer. The ordering + is ``[p_i, p_f, p_o]``, + where ``p_i``, ``p_f``, and ``p_o`` are peephole vectors at input + gate, forget gate, and output gate. + The shapes of the peephole vectors are ``(hidden_size,)``. Defaults to ``None``. + output_all: boolean + Whether the LSTM layer should output at every time step. Defaults to ``False``. + + - If ``False``, the output is the result after the final state update. + - If ``True``, the output is a sequence, containing outputs at all time steps. + + forget_bias: boolean + If ``True``, a vector of 1s is added to forget gate bias. Defaults to ``False``. + coupled_input_forget_gate: boolean + If ``True``, the input gate and forget gate is coupled. That is, the + forget gate is not used. + Defaults to ``False``. + cell_clip_threshold: float + The limit on the maximum and minimum values on the cell state. + Defaults to 50.0. + + See Also + -------- + add_activation, add_simple_rnn, add_unilstm, add_bidirlstm + """ + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.biDirectionalLSTM + params = spec_layer_params.params + weight_params = spec_layer_params.weightParams.add() + weight_params_back = spec_layer_params.weightParams.add() + + # set the parameters + spec_layer_params.inputVectorSize = input_size + spec_layer_params.outputVectorSize = hidden_size + if b is not None: + params.hasBiasVectors = True + params.sequenceOutput = output_all + params.forgetBias = forget_bias + if peep is not None: + params.hasPeepholeVectors = True + params.coupledInputAndForgetGate = coupled_input_forget_gate + params.cellClipThreshold = cell_clip_threshold + + # set activations + activation_f = spec_layer_params.activationsForwardLSTM.add() + activation_g = spec_layer_params.activationsForwardLSTM.add() + activation_h = spec_layer_params.activationsForwardLSTM.add() + _set_recurrent_activation(activation_f, inner_activation) + _set_recurrent_activation(activation_g, cell_state_update_activation) + _set_recurrent_activation(activation_h, output_activation) + + activation_f_back = spec_layer_params.activationsBackwardLSTM.add() + activation_g_back = spec_layer_params.activationsBackwardLSTM.add() + activation_h_back = spec_layer_params.activationsBackwardLSTM.add() + _set_recurrent_activation(activation_f_back, inner_activation) + _set_recurrent_activation(activation_g_back, cell_state_update_activation) + _set_recurrent_activation(activation_h_back, output_activation) + + # Write the forward lstm weights + R_i, R_f, R_o, R_z = W_h + W_i, W_f, W_o, W_z = W_x + + weight_params.inputGateWeightMatrix.floatValue.extend(W_i.flatten()) + weight_params.forgetGateWeightMatrix.floatValue.extend(W_f.flatten()) + weight_params.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + weight_params.blockInputWeightMatrix.floatValue.extend(W_z.flatten()) + + weight_params.inputGateRecursionMatrix.floatValue.extend(R_i.flatten()) + weight_params.forgetGateRecursionMatrix.floatValue.extend(R_f.flatten()) + weight_params.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + weight_params.blockInputRecursionMatrix.floatValue.extend(R_z.flatten()) + + if b is not None: + b_i, b_f, b_o, b_z = b + weight_params.inputGateBiasVector.floatValue.extend(b_i.flatten()) + weight_params.forgetGateBiasVector.floatValue.extend(b_f.flatten()) + weight_params.outputGateBiasVector.floatValue.extend(b_o.flatten()) + weight_params.blockInputBiasVector.floatValue.extend(b_z.flatten()) + + if peep is not None: + p_i, p_f, p_o = peep + weight_params.inputGatePeepholeVector.floatValue.extend(p_i.flatten()) + weight_params.forgetGatePeepholeVector.floatValue.extend(p_f.flatten()) + weight_params.outputGatePeepholeVector.floatValue.extend(p_o.flatten()) + + # Write the backward lstm weights + R_i, R_f, R_o, R_z = W_h_back + W_i, W_f, W_o, W_z = W_x_back + + weight_params_back.inputGateWeightMatrix.floatValue.extend(W_i.flatten()) + weight_params_back.forgetGateWeightMatrix.floatValue.extend(W_f.flatten()) + weight_params_back.outputGateWeightMatrix.floatValue.extend(W_o.flatten()) + weight_params_back.blockInputWeightMatrix.floatValue.extend(W_z.flatten()) + + weight_params_back.inputGateRecursionMatrix.floatValue.extend(R_i.flatten()) + weight_params_back.forgetGateRecursionMatrix.floatValue.extend(R_f.flatten()) + weight_params_back.outputGateRecursionMatrix.floatValue.extend(R_o.flatten()) + weight_params_back.blockInputRecursionMatrix.floatValue.extend(R_z.flatten()) + + if b_back is not None: + b_i, b_f, b_o, b_z = b_back + weight_params_back.inputGateBiasVector.floatValue.extend(b_i.flatten()) + weight_params_back.forgetGateBiasVector.floatValue.extend(b_f.flatten()) + weight_params_back.outputGateBiasVector.floatValue.extend(b_o.flatten()) + weight_params_back.blockInputBiasVector.floatValue.extend(b_z.flatten()) + + if peep_back is not None: + p_i, p_f, p_o = peep_back + weight_params_back.inputGatePeepholeVector.floatValue.extend(p_i.flatten()) + weight_params_back.forgetGatePeepholeVector.floatValue.extend(p_f.flatten()) + weight_params_back.outputGatePeepholeVector.floatValue.extend(p_o.flatten()) + return spec_layer + + def add_flatten(self, name, mode, input_name, output_name): + """ + Add a flatten layer. Only flattens the channel, height and width axis. + Leaves the sequence axis as is. + Refer to the ``FlattenLayerParams`` message in the + specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + mode: int + + - If mode == 0, the flatten layer is in CHANNEL_FIRST mode. + - If mode == 1, the flatten layer is in CHANNEL_LAST mode. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_permute, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.flatten + + # Set the parameters + if mode == 0: + spec_layer_params.mode = _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value( + "CHANNEL_FIRST" + ) + elif mode == 1: + spec_layer_params.mode = _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value( + "CHANNEL_LAST" + ) + else: + raise NotImplementedError("Unknown flatten mode %d " % mode) + + return spec_layer + + def add_slice( + self, name, input_name, output_name, axis, start_index=0, end_index=-1, stride=1 + ): + """ + Add a slice layer. Equivalent to to numpy slice [start_index:end_index:stride], + start_index is included, while end_index is exclusive. + Refer to the ``SliceLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: str + axis along which input is sliced. + allowed values: 'channel', 'height', 'width' + start_index: int + must be non-negative. + end_index: int + negative indexing is supported. + stride: int + must be positive. + + See Also + -------- + add_permute, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.slice + + # Set the parameters + if start_index < 0: + raise ValueError( + "Invalid start_index value %d. Must be non-negative." % start_index + ) + if stride < 1: + raise ValueError("Invalid stride value %d. Must be positive." % stride) + + spec_layer_params.startIndex = start_index + spec_layer_params.endIndex = end_index + spec_layer_params.stride = stride + + axis = axis.lower() if isinstance(axis, str) else axis + if axis == "channel": + spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value( + "CHANNEL_AXIS" + ) + elif axis == "height": + spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value( + "HEIGHT_AXIS" + ) + elif axis == "width": + spec_layer_params.axis = _NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Value( + "WIDTH_AXIS" + ) + else: + raise NotImplementedError("Unsupported Slice axis %s " % axis) + return spec_layer + + def add_slice_by_size(self, name, input_names, output_name, axis, size): + """ + Add a slice layer. Equivalent to to numpy slice [start_index: start_index+size], + Input is list of str which is [input_tensor, begin_id]. + + Assume input_tensor has shape (2, 3, 4), and axis=1, size=2. + This would produce input_tensor[:, begin_id:begin_id+2, :] + + Refer to the ``SliceBySizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + axis along which input is sliced. + size: int + The size of which input will be taken + + See Also + -------- + add_slice, add_slice_static, add_slice_dynamic + """ + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.sliceBySize + + if size < 1: + raise ValueError("Invalid size value %d. Must be positive." % size) + + spec_layer_params.axis = axis + spec_layer_params.size = size + + return spec_layer + + def add_reorganize_data( + self, name, input_name, output_name, mode="SPACE_TO_DEPTH", block_size=2 + ): + """ + Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". + Refer to the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + mode: str + + - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. + Input is spatially divided into non-overlapping blocks of size block_size X block_size + and data from each block is moved to the channel dimension. + Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. + + - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. + Reverse of the operation 'SPACE_TO_DEPTH'. + Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. + + - If mode == 'PIXEL_SHUFFLE': data is moved from the channel to the spatial dimension. + Reverse of the operation 'SPACE_TO_DEPTH'. + Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. + + block_size: int + Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) + must divide C when mode is 'DEPTH_TO_SPACE' or 'PIXEL_SHUFFLE'. + + See Also + -------- + add_flatten, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reorganizeData + + # Set the parameters + if block_size < 2: + raise ValueError( + "Invalid block_size value %d. Must be greater than 1." % block_size + ) + spec_layer_params.blockSize = block_size + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "SPACE_TO_DEPTH": + spec_layer_params.mode = _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value( + "SPACE_TO_DEPTH" + ) + elif mode == "DEPTH_TO_SPACE": + spec_layer_params.mode = _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value( + "DEPTH_TO_SPACE" + ) + elif mode == "PIXEL_SHUFFLE": + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer_params.mode = _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value( + "PIXEL_SHUFFLE" + ) + else: + raise NotImplementedError("Unknown reorganization mode %s." % mode) + return spec_layer + + def add_batchnorm( + self, + name, + channels, + gamma, + beta, + mean=None, + variance=None, + input_name="data", + output_name="out", + compute_mean_var=False, + instance_normalization=False, + epsilon=1e-5, + ): + """ + Add a batch normalization layer. Batch normalization operation is + defined as: + + ``y = gamma * (x - mean) / sqrt(variance + epsilon) + beta`` + + Refer to the ``BatchnormLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + channels: int + Number of channels of the input blob. + gamma: numpy.array + Values of gamma. Must be numpy array of shape ``(channels, )``. + beta: numpy.array + Values of beta. Must be numpy array of shape ``(channels, )``. + mean: numpy.array + Means of the input blob on each channel. Must be numpy array of shape ``(channels, )``. + variance: + Variances of the input blob on each channel. Must be numpy array of shape ``(channels, )``. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + compute_mean_var: bool + Set to ``True`` if mean and variance is to be computed from the input data. + instance_normalization: bool + Set compute_mean_var and this to ``True`` to perform + instance normalization. That is, mean and variance are computed + from the single input instance. + epsilon: float + Value of epsilon. Defaults to ``1e-5`` if not specified. + + See Also + -------- + add_convolution, add_pooling, add_inner_product + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.batchnorm + + # Set the parameters + spec_layer_params.channels = channels + spec_layer_params.gamma.floatValue.extend(gamma.flatten()) + spec_layer_params.beta.floatValue.extend(beta.flatten()) + spec_layer_params.epsilon = epsilon + spec_layer_params.computeMeanVar = compute_mean_var + spec_layer_params.instanceNormalization = instance_normalization + + if compute_mean_var: + if not instance_normalization: + raise NotImplementedError( + "Batch-instance norm is currently not supported" + ) + + if not compute_mean_var: + spec_layer_params.mean.floatValue.extend(mean.flatten()) + spec_layer_params.variance.floatValue.extend(variance.flatten()) + + return spec_layer + + def add_permute(self, name, dim, input_name, output_name): + """ + Add a permute layer. Assumes that the input has dimensions in the order [Seq, C, H, W] + Refer to the ``PermuteLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + dim: tuple + The order in which to permute the input dimensions = [seq,C,H,W]. + Must have length 4 and a permutation of ``[0, 1, 2, 3]``. + + examples: + + Lets say input has shape: [seq, C, H, W]. + + If ``dim`` is set to ``[0, 3, 1, 2]``, + then the output has shape ``[W,C,H]`` + and has the same sequence length that of the input. + + If ``dim`` is set to ``[3, 1, 2, 0]``, + and the input is a sequence of data + with length ``Seq`` and shape ``[C, 1, 1]``, + then the output is a unit sequence of data with shape ``[C, 1, Seq]``. + + If ``dim`` is set to ``[0, 3, 2, 1]``, + the output is a reverse of the input: ``[C, H, W] -> [W, H, C]``. + + If ``dim`` is not set, or is set to ``[0, 1, 2, 3]``, + the output is the same as the input. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_flatten, add_reshape + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.permute + spec_layer_params.axis.extend(list(dim)) + + if len(dim) != 4: + raise ValueError("Length of the 'dim' parameter must be equal to 4") + return spec_layer + + def add_reshape(self, name, input_name, output_name, target_shape, mode): + """ + Add a reshape layer. + Refer to the ``ReshapeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + target_shape: tuple + Shape of the output blob. The product of target_shape must be equal + to the shape of the input blob. + Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W). + mode: int + + - If mode == 0, the reshape layer is in CHANNEL_FIRST mode. + - If mode == 1, the reshape layer is in CHANNEL_LAST mode. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_flatten, add_permute + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reshape + spec_layer_params.targetShape.extend(target_shape) + if mode == 0: + spec_layer_params.mode = _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value( + "CHANNEL_FIRST" + ) + else: + spec_layer_params.mode = _NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value( + "CHANNEL_LAST" + ) + + if len(target_shape) != 4 and len(target_shape) != 3: + raise ValueError( + "Length of the 'target-shape' parameter must be equal to 3 or 4" + ) + self.rank_dict[output_name] = len(target_shape) + return spec_layer + + def add_reduce(self, name, input_name, output_name, axis, mode, epsilon=1e-6): + """ + Add a reduce layer. Applies the function specified by the parameter mode, + along dimension(s) specified by the parameter axis. + Refer to the ``ReduceLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + axis: str + dimensions along which the reduction operation is applied. + Allowed values: 'CHW', 'HW', 'C', 'H', 'W' + + mode: str + Reduction operation to be applied. + Allowed values: + 'sum', 'avg', 'prod', 'logsum', 'sumsquare', 'L1', 'L2', 'max', 'min', 'argmax'. + 'argmax' is only supported with axis values 'C', 'H' and 'W'. + + epsilon: float + number that is added to the input when 'logsum' function is applied. + + See Also + -------- + add_activation + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduce + spec_layer_params.epsilon = epsilon + + mode = mode.lower() if isinstance(mode, str) else mode + if mode == "sum": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "SUM" + ) + elif mode == "avg": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "AVG" + ) + elif mode == "prod": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "PROD" + ) + elif mode == "logsum": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "LOGSUM" + ) + elif mode == "sumsquare": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "SUMSQUARE" + ) + elif mode == "l1": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "L1" + ) + elif mode == "l2": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "L2" + ) + elif mode == "max": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "MAX" + ) + elif mode == "min": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "MIN" + ) + elif mode == "argmax": + spec_layer_params.mode = _NeuralNetwork_pb2.ReduceLayerParams.ReduceOperation.Value( + "ARGMAX" + ) + else: + raise NotImplementedError("Unknown reduction operation %s." % mode) + + axis = axis.upper() if isinstance(axis, str) else axis + if axis == "CHW": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "CHW" + ) + elif axis == "HW": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "HW" + ) + elif axis == "C": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "C" + ) + elif axis == "H": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "H" + ) + elif axis == "W": + spec_layer_params.axis = _NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Value( + "W" + ) + else: + raise NotImplementedError("Unknown reduction axis %s." % axis) + return spec_layer + + def add_lrn(self, name, input_name, output_name, alpha, beta, local_size, k=1.0): + """ + Add a LRN (local response normalization) layer. Supports "across" channels normalization. + Refer to the ``LRNLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + alpha: float + multiplicative constant in the denominator. + beta: float + exponent of the normalizing term in the denominator. + k: float + bias term in the denominator. Must be positive. + local_size: int + size of the neighborhood along the channel axis. + + See Also + -------- + add_l2_normalize, add_mvn + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.lrn + spec_layer_params.alpha = alpha + spec_layer_params.beta = beta + spec_layer_params.localSize = local_size + spec_layer_params.k = k + return spec_layer + + def add_mvn( + self, + name, + input_name, + output_name, + across_channels=True, + normalize_variance=True, + epsilon=1e-5, + ): + """ + Add an MVN (mean variance normalization) layer. Computes mean, variance and normalizes the input. + Refer to the ``MeanVarianceNormalizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + across_channels: boolean + If False, each channel plane is normalized separately + If True, mean/variance is computed across all C, H and W dimensions + + normalize_variance: boolean + If False, only mean subtraction is performed. + + epsilon: float + small bias to avoid division by zero. + + + See Also + -------- + add_l2_normalize, add_lrn + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + spec_layer_params = spec_layer.mvn + spec_layer_params.acrossChannels = across_channels + spec_layer_params.normalizeVariance = normalize_variance + spec_layer_params.epsilon = epsilon + return spec_layer + + def add_l2_normalize(self, name, input_name, output_name, epsilon=1e-5): + """ + Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the + the square root of the sum of squares of all elements of the input along C, H and W dimensions. + Refer to the ``L2NormalizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + epsilon: float + small bias to avoid division by zero. + + + See Also + -------- + add_mvn, add_lrn + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.l2normalize + spec_layer_params.epsilon = epsilon + return spec_layer + + def add_unary( + self, + name, + input_name, + output_name, + mode, + alpha=1.0, + shift=0, + scale=1.0, + epsilon=None, + ): + """ + Add a Unary layer. Applies the specified function (mode) to all the elements of the input. + Prior to the application of the function the input can be scaled and shifted by using the 'scale', + 'shift' parameters. + Refer to the ``UnaryFunctionLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + mode: str + Unary function. + Allowed values: 'sqrt', 'rsqrt', 'inverse', 'power', 'exp', 'log', 'abs', threshold'. + + alpha: float + constant used in with modes 'power' and 'threshold'. + + shift, scale: float + input is modified by scale and shift prior to the application of the unary function. + + epsilon: float + small bias to prevent division by zero. + + See Also + -------- + add_activation + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.unary + if epsilon is None: + # Use the default value of epsilon to be 1e-4, instead of 1e-6, if mode = "rsqrt" or "inverse" + if mode == "inverse" or mode == "rsqrt": + epsilon = 1e-4 + elif mode == "log": + epsilon = 1e-45 + else: + epsilon = 1e-6 + spec_layer_params.epsilon = epsilon + spec_layer_params.alpha = alpha + spec_layer_params.shift = shift + spec_layer_params.scale = scale + + mode = mode.lower() if isinstance(mode, str) else mode + if mode == "sqrt": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "SQRT" + ) + elif mode == "rsqrt": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "RSQRT" + ) + elif mode == "inverse": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "INVERSE" + ) + elif mode == "power": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "POWER" + ) + elif mode == "exp": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "EXP" + ) + elif mode == "log": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "LOG" + ) + elif mode == "abs": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "ABS" + ) + elif mode == "threshold": + spec_layer_params.type = _NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Value( + "THRESHOLD" + ) + else: + raise NotImplementedError("Unknown unary function %s " % mode) + return spec_layer + + def add_split(self, name, input_name, output_names): + """ + Add a split layer that uniformly splits the input along the channel dimension + to produce multiple outputs. + Refer to the ``SplitLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_names: list of str + List of output blob names of this layer. + + See Also + -------- + add_elementwise + """ + spec_layer = self._add_generic_layer(name, [input_name], output_names) + spec_layer_params = spec_layer.split + spec_layer_params.nOutputs = len(output_names) + return spec_layer + + def add_load_constant(self, name, output_name, constant_value, shape): + """ + Add a load constant layer. + Refer to the ``LoadConstantLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + output_name: str + The output blob name of this layer. + + constant_value: numpy.array + value of the constant as a numpy array. + + shape: list of int or tuple of int + List of ints representing the shape of the constant. Must be of length 3: [C,H,W] + + + See Also + -------- + add_elementwise + """ + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.loadConstant + + data = spec_layer_params.data + data.floatValue.extend(constant_value.flatten()) + + spec_layer_params.shape.extend(shape) + + self.rank_dict[output_name] = 5 + if len(data.floatValue) != _np.prod(shape): + raise ValueError( + "Dimensions of 'shape' do not match the size of the provided constant" + ) + if not self._disable_rank5_shape_mapping: + if len(shape) != 3: + raise ValueError("'shape' must be of length 3") + return spec_layer + + def add_custom(self, name, input_names, output_names, custom_proto_spec=None): + """ + Add a custom layer. + Refer to the ``CustomLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_names: list of str + The input blob names to this layer. + + output_names: list of str + The output blob names from this layer. + + custom_proto_spec: CustomLayerParams + A protobuf CustomLayerParams message. This can also be left blank and filled in later. + """ + # custom layers require a newer specification version + from coremltools import _MINIMUM_CUSTOM_LAYER_SPEC_VERSION + + if self.spec: + self.spec.specificationVersion = max( + self.spec.specificationVersion, _MINIMUM_CUSTOM_LAYER_SPEC_VERSION + ) + + spec_layer = self._add_generic_layer(name, input_names, output_names) + + spec_layer.custom.MergeFromString(b"") + if custom_proto_spec: + spec_layer.custom.CopyFrom(custom_proto_spec) + return spec_layer + + def add_resize_bilinear( + self, + name, + input_name, + output_name, + target_height=1, + target_width=1, + mode="ALIGN_ENDPOINTS_MODE", + ): + """ + Add a resize bilinear layer to the model. A layer that resize the input to a given spatial size using bilinear interpolation. + Refer to the ``ResizeBilinearLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + target_height: int + Output height dimension. + target_width: int + Output width dimension. + mode: str + Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'. + This parameter determines the sampling grid used for bilinear interpolation. + + See Also + -------- + add_upsample + """ + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.resizeBilinear + spec_layer_params.targetSize.append(target_height) + spec_layer_params.targetSize.append(target_width) + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "ALIGN_ENDPOINTS_MODE": + + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ALIGN_ENDPOINTS_MODE" + ) + elif mode == "STRICT_ALIGN_ENDPOINTS_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "STRICT_ALIGN_ENDPOINTS_MODE" + ) + elif mode == "UPSAMPLE_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "UPSAMPLE_MODE" + ) + elif mode == "ROI_ALIGN_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ROI_ALIGN_MODE" + ) + else: + raise ValueError("Unsupported resize bilinear mode %s" % mode) + return spec_layer + + def add_crop_resize( + self, + name, + input_names, + output_name, + target_height=1, + target_width=1, + mode="STRICT_ALIGN_ENDPOINTS_MODE", + normalized_roi=False, + box_indices_mode="CORNERS_HEIGHT_FIRST", + spatial_scale=1.0, + ): + """ + Add a crop resize layer to the model. A layer that extracts cropped spatial patches or RoIs (regions of interest) + from the input and resizes them to a pre-specified size using bilinear interpolation. + Note that RoI Align layer can be implemented with this layer followed by a pooling layer. + Refer to the ``CropResizeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + + name: str + The name of this layer. + + input_names: list of str + * Must be a list of two names: image feature map and crop indices/RoI input. + * First input corresponds to a blob with shape ``[1, Batch, C, H_in, W_in]``. + This represents a batch of input image feature data with ``C`` channels. + * The second input shape must be ``[N, 1, 4, 1, 1]`` or ``[N, 1, 5, 1, 1]``. + This represents the bounding box coordinates for ``N`` patches/RoIs. + * ``N``: number of patches/RoIs to be extracted. + * If RoI shape = ``[N, 1, 4, 1, 1]``, the channel axis corresponds + to the four coordinates specifying the bounding box. + All the N~ RoIs are extracted from all the batches of the input. + * If RoI shape = ``[N, 1, 5, 1, 1]``, the first element of the + channel axis specifies the input batch id from which to extract the RoI and + must be in the interval ``[0, Batch - 1]``. That is, ``n`` -th RoI is + extracted from the ``RoI[n,0,0,0]`` -th input batch id. + The last four elements of the channel axis specify the + bounding box coordinates. + + output_name: str + The output blob name of this layer. + + target_height: int + Output height dimension. + + target_width: int + Output width dimension. + + mode: str + * The following values are supported: + ``'STRICT_ALIGN_ENDPOINTS_MODE'``, ``'ALIGN_ENDPOINTS_MODE'``, + ``'UPSAMPLE_MODE'``, ``'ROI_ALIGN_MODE'``. + * This parameter determines the sampling grid used for bilinear interpolation. + + normalized_roi: bool + * If true the bounding box coordinates must be in the interval ``[0, 1]``. + They are scaled by ``(input_height - 1)``, ``(input_width - 1)``; + that is, based on the input spatial dimensions. + * If false the bounding box coordinates must be in the interval + ``[0, input_height - 1]`` and ``[0, input_width - 1]``, + respectively for height and width dimensions. + + box_indices_mode: str + * The following values are supported: + ``'CORNERS_HEIGHT_FIRST'``, ``'CORNERS_WIDTH_FIRST'``, + ``'CENTER_SIZE_HEIGHT_FIRST'``, ``'CENTER_SIZE_WIDTH_FIRST'``. + * Representation used to interpret the bounding box coordinates (RoI) input. + * ``'CORNERS_HEIGHT_FIRST'``: ``[h_start, w_start, h_end, w_end]`` + * ``'CORNERS_WIDTH_FIRST'``: ``[w_start, h_start, w_end, h_end]`` + * ``'CENTER_SIZE_HEIGHT_FIRST'``: ``[h_center, w_center, box_height, box_width]`` + * ``'CENTER_SIZE_WIDTH_FIRST'``: ``[w_center, h_center, box_width, box_height]`` + + spatial_scale: float + Additional spatial scale that multiplies the bounding box coordinates. + Generally used while implementing the RoI Align layer, + which uses unnormalized RoI coordinates along with a spatial scale less than or equal to 1. + + See Also + -------- + add_resize_bilinear, add_crop + """ + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.cropResize + spec_layer_params.targetSize.append(target_height) + spec_layer_params.targetSize.append(target_width) + spec_layer_params.normalizedCoordinates = normalized_roi + spec_layer_params.spatialScale = spatial_scale + + mode = mode.upper() if isinstance(mode, str) else mode + box_indices_mode = ( + box_indices_mode.upper() + if isinstance(box_indices_mode, str) + else box_indices_mode + ) + + if mode == "ALIGN_ENDPOINTS_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ALIGN_ENDPOINTS_MODE" + ) + elif mode == "STRICT_ALIGN_ENDPOINTS_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "STRICT_ALIGN_ENDPOINTS_MODE" + ) + elif mode == "UPSAMPLE_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "UPSAMPLE_MODE" + ) + elif mode == "ROI_ALIGN_MODE": + spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value( + "ROI_ALIGN_MODE" + ) + else: + raise ValueError("Unsupported crop resize mode %s" % mode) + + if box_indices_mode == "CORNERS_HEIGHT_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CORNERS_HEIGHT_FIRST" + ) + elif box_indices_mode == "CORNERS_WIDTH_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CORNERS_WIDTH_FIRST" + ) + elif box_indices_mode == "CENTER_SIZE_HEIGHT_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CENTER_SIZE_HEIGHT_FIRST" + ) + elif box_indices_mode == "CENTER_SIZE_WIDTH_FIRST": + spec_layer_params.boxIndicesMode.boxMode = _NeuralNetwork_pb2.BoxCoordinatesMode.Coordinates.Value( + "CENTER_SIZE_WIDTH_FIRST" + ) + else: + raise ValueError( + "Unsupported crop resize box indices mode %s" % box_indices_mode + ) + return spec_layer + + def set_pre_processing_parameters( + self, + image_input_names=None, + is_bgr=False, + red_bias=0.0, + green_bias=0.0, + blue_bias=0.0, + gray_bias=0.0, + image_scale=1.0, + image_format="NCHW", + ): + """ + Add a pre-processing parameters layer to the neural network object. + + Parameters + ---------- + image_input_names: list of str + Name of input blobs that are images + + is_bgr: boolean or dict() + Channel order for input blobs that are images. BGR if True else RGB. + To specify a different value for each image input, + provide a dictionary with input names as keys. + + red_bias: float or dict() + Image re-centering parameter (red channel) + + blue_bias: float or dict() + Image re-centering parameter (blue channel) + + green_bias: float or dict() + Image re-centering parameter (green channel) + + gray_bias: float or dict() + Image re-centering parameter (for grayscale images) + + image_scale: float or dict() + Value by which to scale the images. + + image_format: str + Image format, either 'NCHW' / 'NHWC' + + See Also + -------- + set_input, set_output, set_class_labels + """ + if not image_input_names: + return # nothing to do here + + image_format = ( + image_format.upper() + if isinstance(image_format, str) + else image_format + ) + if image_format != "NCHW" and image_format != "NHWC": + raise ValueError( + "Input image format must be either 'NCHW' or 'NHWC'. Provided {}".format( + image_format + ) + ) + + if not isinstance(is_bgr, dict): + is_bgr = dict.fromkeys(image_input_names, is_bgr) + if not isinstance(red_bias, dict): + red_bias = dict.fromkeys(image_input_names, red_bias) + if not isinstance(blue_bias, dict): + blue_bias = dict.fromkeys(image_input_names, blue_bias) + if not isinstance(green_bias, dict): + green_bias = dict.fromkeys(image_input_names, green_bias) + if not isinstance(gray_bias, dict): + gray_bias = dict.fromkeys(image_input_names, gray_bias) + if not isinstance(image_scale, dict): + image_scale = dict.fromkeys(image_input_names, image_scale) + + # Raise error if any key in image preprocessing parameters + # are not in image_input_names. + def check_valid_preprocessing_keys(input, target, input_name): + for key in input: + if not key in target: + raise ValueError("Invalid key {} in {}.".format(key, input_name)) + + target = image_input_names + check_valid_preprocessing_keys(is_bgr, target, "is_bgr") + check_valid_preprocessing_keys(red_bias, target, "red_bias") + check_valid_preprocessing_keys(blue_bias, target, "blue_bias") + check_valid_preprocessing_keys(green_bias, target, "green_bias") + check_valid_preprocessing_keys(gray_bias, target, "gray_bias") + check_valid_preprocessing_keys(image_scale, target, "image_scale") + + spec = self.spec + + # Add image inputs + for input_ in spec.description.input: + if input_.name in image_input_names: + if input_.type.WhichOneof("Type") == "multiArrayType": + array_shape = tuple(input_.type.multiArrayType.shape) + + if len(array_shape) == 4: + input_indices = ( + [0, 1, 2, 3] if image_format == "NCHW" else [0, 3, 1, 2] + ) + elif len(array_shape) == 3: + # Adding dummy index for 'batch' for compatibility + input_indices = ( + [0, 0, 1, 2] if image_format == "NCHW" else [0, 2, 0, 1] + ) + else: + raise ValueError( + "Invalid input shape. Input of rank {}, but expecting input of either rank 3 or rank 4".format( + len(array_shape) + ) + ) + + # Extract image shape depending on input format + _, channels, height, width = [array_shape[e] for e in input_indices] + + if image_format == "NHWC": + # If input format is 'NHWC' for TF model, it will be + # 'NCHW' for CoreML model. Therefore, add transpose to + # NHWC after the input and replace all use of input + layers = self.nn_spec.layers + complement_transpose = True + transpose_names = set() + transpose_outputs = [] + for layer_ in layers: + if ( + layer_.HasField("transpose") + and layer_.input[0] == input_.name + ): + transpose_order = list(layer_.transpose.axes) + if transpose_order == [ + 0, + 3, + 1, + 2, + ] or transpose_order == [2, 0, 1]: + transpose_names.add(layer_.name) + transpose_outputs += list(layer_.output) + else: + complement_transpose = False + break + else: + for i in layer_.input: + if i == input_.name: + complement_transpose = False + break + if complement_transpose: + for layer_ in layers: + for i in range(len(layer_.input)): + if layer_.input[i] in transpose_names: + layer_.input[i] = input_.name + for layer_ in layers: + if layer_.name == input_.name: + del layer_.output[:] + layer_.output.extend(transpose_outputs) + break + while len(transpose_names) > 0: + for idx, layer_ in enumerate(layers): + if layer_.name in transpose_names: + del layers[idx] + transpose_names.remove(layer_.name) + else: + axes = [1, 2, 0] + if len(array_shape) == 4: + axes = [0, 2, 3, 1] + input_transpose = input_.name + "_to_nhwc" + transpose_layer = self.add_transpose( + name=input_transpose, + axes=axes, + input_name=input_.name, + output_name=input_transpose, + ) + layers.insert(0, layers.pop()) + for layer_ in layers: + for i in range(len(layer_.input)): + if layer_.name == input_transpose: + continue + if layer_.input[i] == input_.name: + layer_.input[i] = input_transpose + + # TODO: If input is not rank 3 or 4, then accordingly handle + # e.g. for rank-2 input, squeeze additional dimension in case of Gray scale image + if channels == 1: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "GRAYSCALE" + ) + elif channels == 3: + if input_.name in is_bgr: + if is_bgr[input_.name]: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "BGR" + ) + else: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "RGB" + ) + else: + input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "RGB" + ) + else: + raise ValueError( + "Channel Value %d not supported for image inputs" % channels + ) + input_.type.imageType.width = width + input_.type.imageType.height = height + + preprocessing = self.nn_spec.preprocessing.add() + preprocessing.featureName = input_.name + scaler = preprocessing.scaler + if input_.name in image_scale: + scaler.channelScale = image_scale[input_.name] + else: + scaler.channelScale = 1.0 + if input_.name in red_bias: + scaler.redBias = red_bias[input_.name] + if input_.name in blue_bias: + scaler.blueBias = blue_bias[input_.name] + if input_.name in green_bias: + scaler.greenBias = green_bias[input_.name] + if input_.name in gray_bias: + scaler.grayBias = gray_bias[input_.name] + + def add_transpose(self, name, axes, input_name, output_name): + """ + Add a N-D transpose layer with axes as a parameter. + Refer to the ``TransposeLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + axes: list of int or tuple of int + The list containing a permutation of "[0,1,2,...,N-1]" where N is the rank of input/output tensor. + + input_name: str + The input blob name of this layer. + + output_name: str + The output blob name of this layer. + + See Also + -------- + add_permute, add_reshape + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + rank = len(axes) + axes = [rank + axis if axis < 0 else axis for axis in axes] + spec_layer.transpose.axes.extend(axes) + + return spec_layer + + def add_softmax_nd(self, name, input_name, output_name, axis): + """ + Add a softmax_nd layer to the model that performs softmax operation along + the given axis. + Refer to the ``SoftmaxNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + Axis to perform the softmax operation on. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.softmaxND + spec_layer_params.axis = axis + return spec_layer + + def add_concat_nd(self, name, input_names, output_name, axis, interleave=False): + """ + Add a concat_nd layer to the model that performs concatenation along the + given axis. + Refer to the ``ConcatNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + Axis to perform the concat operation on. + interleave : bool + (Only available in Core ML Specification >= 5 (iOS >= 14, macOS >= 11.0) + If true, concatenate by interleaving the inputs + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.concatND + spec_layer_params.axis = axis + if interleave: + spec_layer_params.interleave = True + if self.spec: + self.spec.specificationVersion = max(self.spec.specificationVersion, _SPECIFICATION_VERSION_IOS_14) + return spec_layer + + def add_erf(self, name, input_name, output_name): + """ + Add an erf function (gaussian error function) layer to the model. + Refer to the ``ErfLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.erf.MergeFromString(b"") + return spec_layer + + def add_gelu(self, name, input_name, output_name, mode="EXACT"): + """ + Add a GELU (gaussian error linear unit) activation layer, which is: + ``0.5 * x * (1 + erf(x / sqrt(2)))``. + Refer to the ``GeluLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + mode: str, optional + Gelu mode in [EXACT | TANH_APPROXIMATION | SIGMOID_APPROXIMATION], default EXACT. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.gelu + + if mode == "EXACT": + spec_layer_params.mode = _NeuralNetwork_pb2.GeluLayerParams.GeluMode.Value( + "EXACT" + ) + elif mode == "TANH_APPROXIMATION": + spec_layer_params.mode = _NeuralNetwork_pb2.GeluLayerParams.GeluMode.Value( + "TANH_APPROXIMATION" + ) + elif mode == "SIGMOID_APPROXIMATION": + spec_layer_params.mode = _NeuralNetwork_pb2.GeluLayerParams.GeluMode.Value( + "SIGMOID_APPROXIMATION" + ) + else: + raise ValueError("Unsupported Gelu mode %s" % mode) + return spec_layer + + def add_sin(self, name, input_name, output_name): + """ + Add a sin layer to the model that computes element-wise sine for the + input tensor. + Refer to the ``SinLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sinh, add_asin, add_asinh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.sin.MergeFromString(b"") + return spec_layer + + def add_cos(self, name, input_name, output_name): + """ + Add a cos layer to the model that computes element-wise cosine for the + input tensor. + Refer to the ``CosLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cosh, add_acos, add_acosh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.cos.MergeFromString(b"") + return spec_layer + + def add_tan(self, name, input_name, output_name): + """ + Add a tan layer to the model that computes element-wise tangent for the + input tensor. + Refer to the ``TanLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tanh, add_atan, add_atanh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.tan.MergeFromString(b"") + return spec_layer + + def add_asin(self, name, input_name, output_name): + """ + Add an asin layer to the model that computes element-wise arc-sine for + the input tensor. + Refer to the ``AsinLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sin, add_sinh, add_asinh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.asin.MergeFromString(b"") + return spec_layer + + def add_acos(self, name, input_name, output_name): + """ + Add an acos layer to the model that computes element-wise arc-cosine + for the input tensor. + Refer to the ``AcosLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cos, add_cosh, add_acosh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.acos.MergeFromString(b"") + return spec_layer + + def add_atan(self, name, input_name, output_name): + """ + Add an atan layer to the model that computes element-wise arc-tangent + for the input tensor. + Refer to the ``AtanLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tan, add_tanh, add_atanh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.atan.MergeFromString(b"") + return spec_layer + + def add_sinh(self, name, input_name, output_name): + """ + Add a sinh layer to the model that computes element-wise hyperbolic sine for the input tensor. + Refer to the ``SinhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sin, add_asin, add_asinh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.sinh.MergeFromString(b"") + return spec_layer + + def add_cosh(self, name, input_name, output_name): + """ + Add a osh layer to the model that computes element-wise hyperbolic + cosine for the input tensor. + Refer to the ``CoshLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cos, add_acos, add_acosh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.cosh.MergeFromString(b"") + return spec_layer + + def add_tanh(self, name, input_name, output_name): + """ + Add a tanh layer to the model that computes element-wise hyperbolic + tangent for the input tensor. + Refer to the ``TanhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tan, add_atan, add_atanh + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.tanh.MergeFromString(b"") + return spec_layer + + def add_asinh(self, name, input_name, output_name): + """ + Add an asinh layer to the model that computes element-wise inverse + hyperbolic sine for the input tensor. + Refer to the ``AsinhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_sin, add_sinh, add_asin + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.asinh.MergeFromString(b"") + return spec_layer + + def add_acosh(self, name, input_name, output_name): + """ + Add an acosh layer to the model that computes element-wise inverse + hyperbolic cosine for the input tensor. + Refer to the ``AcoshLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_cos, add_cosh, add_acos + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.acosh.MergeFromString(b"") + return spec_layer + + def add_atanh(self, name, input_name, output_name): + """ + Add an atanh layer to the model that computes element-wise inverse + hyperbolic tangent for the input tensor. + Refer to the ``AtanhLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_tan, add_tanh, add_atan + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.atanh.MergeFromString(b"") + return spec_layer + + def add_exp2(self, name, input_name, output_name): + """ + Add an exp2 layer to the model that performs element-wise experiential operation. + Refer to the ``Exp2LayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.exp2.MergeFromString(b"") + return spec_layer + + def add_add_broadcastable(self, name, input_names, output_name): + """ + Add an add_broadcastable layer to the model that performs element-wise + addition operation with broadcast support. + Refer to the ``AddBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.addBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_multiply_broadcastable(self, name, input_names, output_name): + """ + Add a multiply_broadcastable layer to the model that performs element-wise + multiplication operation with broadcast support. + Refer to the ``MultiplyBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.multiplyBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_divide_broadcastable(self, name, input_names, output_name): + """ + Add a divide_broadcastable layer to the model that performs element-wise + division operation with broadcast support. + Refer to the ``DivideBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.divideBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_subtract_broadcastable(self, name, input_names, output_name): + """ + Add a subtract_broadcastable layer to the model that performs element-wise + subtraction operation with broadcast support. + Refer to the ``SubtractBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.subtractBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_max_broadcastable(self, name, input_names, output_name): + """ + Add a max_broadcastable layer to the model that performs element-wise + maximum operation with broadcast support. + Refer to the ``MaxBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.maxBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_min_broadcastable(self, name, input_names, output_name): + """ + Add a min_broadcastable layer to the model that performs element-wise + minimum operation with broadcast support. + Refer to the ``MinBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.minBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_floor_div_broadcastable(self, name, input_names, output_name): + """ + Add a floor_div_broadcastable layer to the model that performs floor + division operation with broadcast support. + Refer to the ``FloorDivBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_divide_broadcastable + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.floorDivBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_mod_broadcastable(self, name, input_names, output_name): + """ + Add a mod_broadcastable layer to the model that performs element-wise + modular operation with broadcast support. + Refer to the ``ModBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.modBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_pow_broadcastable(self, name, input_names, output_name): + """ + Add a pow_broadcastable layer to the model that performs element-wise + power operation with broadcast support. + Refer to the ``PowBroadcastableLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.powBroadcastable.MergeFromString(b"") + self._set_max_input_rank(input_names, output_name) + return spec_layer + + def add_stack(self, name, input_names, output_name, axis=0): + """ + Add a stack layer to the model that performs stack operation on a list of + tensors into one rank+1 tensor on the given axis. + Refer to the ``StackLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + The axis to perform stack operation, default: 0. + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.stack.axis = axis + self.rank_dict[output_name] = self._get_rank(input_names[0]) + 1 + return spec_layer + + def add_ceil(self, name, input_name, output_name): + """ + Add a ceil layer to the model that performs element-wise ceil operation + on the input tensor that rounds the value to the smallest integer not + less than x. + Refer to the ``CeilLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_floor, add_clip + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.ceil.MergeFromString(b"") + return spec_layer + + def add_floor(self, name, input_name, output_name): + """ + Add a floor layer to the model that performs element-wise floor operation + on the input tensor that rounds the value to the largest integer not + greater than x. + Refer to the ``FloorLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_ceil, add_clip + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.floor.MergeFromString(b"") + return spec_layer + + def add_round(self, name, input_name, output_name): + """ + Add a round layer to the model that performs element-wise round operation + on the input tensor that rounds the value to the nearest integer. + Refer to the ``RoundLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.round.MergeFromString(b"") + return spec_layer + + def add_sign(self, name, input_name, output_name): + """ + Add a sign layer to the model that performs element-wise sign operation + (+1 for positive values, -1 for negative values, 0 for zeroes). + Refer to the ``SignLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.sign.MergeFromString(b"") + return spec_layer + + def add_clip(self, name, input_name, output_name, min_value=0.0, max_value=1.0): + """ + Add a clip layer to the model that performs element-wise clip operation. + Clip the values in the input tensor to the range [min_value, max_value]. + Refer to the ``ClipLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + min_value: float, optional + Lower bound / minimum value for clip, default: 0.0. + max_value: float, optional + Upper bound / maximum value for clip, default: 1.0. + + See Also + -------- + add_floor, add_ceil + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.clip.MergeFromString(b"") + spec_params = spec_layer.clip + + spec_params.minVal = float(min_value) + spec_params.maxVal = float(max_value) + + return spec_layer + + def add_split_nd( + self, name, input_name, output_names, axis, num_splits=2, split_sizes=None + ): + """ + Add a split layer to the model that splits the input tensor into multiple + output tensors. Either uniformly split the input tensor into ``num_splits`` + tensors, or split into given size list ``split_sizes`` output tensors. + Refer to the ``SplitNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_names: list of str + The output blob names of this layer. + axis: int + Axis to perform split on. + num_splits: int, optional + Number of splits, default: 2. + split_sizes: list of int or tuple of int, optional + List of size to split, default ``[]`` or ``None``. + """ + + if not split_sizes: + split_sizes = [] + + spec_layer = self._add_generic_layer(name, [input_name], output_names) + spec_layer_params = spec_layer.splitND + spec_layer_params.axis = axis + + if split_sizes and len(split_sizes) > 0: + spec_layer_params.splitSizes.extend(split_sizes) + spec_layer_params.numSplits = len(split_sizes) + else: + spec_layer_params.numSplits = num_splits + + assert len(output_names) == spec_layer_params.numSplits + return spec_layer + + def add_slice_static( + self, + name, + input_name, + output_name, + begin_ids, + end_ids, + strides, + begin_masks, + end_masks, + squeeze_masks=None, + ): + """ + Add a slice_static layer to the model that extracts a slice of size + ``(end - begin) / stride`` from the given input tensor. + Refer to the ``SliceStaticLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + begin_ids: list of int or tuple of int + Begin offsets for slice layer. + end_ids: list of int or tuple of int + End offsets for slice layer. + strides: list of int or tuple of int + Strides for slice layer. + begin_masks: list of bool + Boolean masks for begin offsets. + end_masks: list of bool + Boolean masks for end offsets. + squeeze_masks: list of bool + Boolean masks for squeezing axis. + + See Also + -------- + add_slice_dynamic + """ + + rank = len(begin_ids) + assert len(end_ids) == rank + assert len(strides) == rank + assert len(begin_masks) == rank + assert len(end_masks) == rank + assert squeeze_masks is None or len(squeeze_masks) == rank + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.sliceStatic + + spec_layer_params.beginIds.extend(begin_ids) + spec_layer_params.endIds.extend(end_ids) + spec_layer_params.strides.extend(strides) + spec_layer_params.beginMasks.extend(begin_masks) + spec_layer_params.endMasks.extend(end_masks) + + if not (squeeze_masks and any(squeeze_masks)): + return spec_layer + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer_params.squeezeMasks.extend(squeeze_masks) + + return spec_layer + + def add_slice_dynamic( + self, + name, + input_names, + output_name, + end_ids=None, + strides=None, + begin_masks=None, + end_masks=None, + squeeze_masks=None, + ): + """ + Add a slice_dynamic layer to the model that extracts a slice of size + ``(end - begin) / stride`` from the given input tensor. + Refer to the ``SliceDynamicLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + end_ids: list of int or tuple of int, optional + End offsets for slice layer, default: [1]. + strides: list of int or tuple of int, optional + Strides for slice layer, default: [1]. + begin_masks: list of bool, optional + Boolean masks for begin offsets, default: [false]. + end_masks: list of bool, optional + Boolean masks for end offsets, default: [false]. + squeeze_masks: list of bool, optional + Boolean masks for squeezing axis, default: [false]. + + See Also + -------- + add_slice_static + """ + + if not end_ids: + end_ids = [1 for _ in range(5)] + if not strides: + strides = [1 for _ in range(5)] + if not begin_masks: + begin_masks = [False for _ in range(5)] + if not end_masks: + end_masks = [False for _ in range(5)] + if not squeeze_masks: + squeeze_masks = [False for _ in range(5)] + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.sliceDynamic + + spec_layer_params.endIds.extend(end_ids) + spec_layer_params.strides.extend(strides) + spec_layer_params.beginMasks.extend(begin_masks) + spec_layer_params.endMasks.extend(end_masks) + if not any(squeeze_masks): + return spec_layer + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + spec_layer_params.squeezeMasks.extend(squeeze_masks) + + return spec_layer + + def add_tile(self, name, input_name, output_name, reps=[]): + """ + Add a tile layer to the model that construct a tensor by repeating the + input tensor multiple number of times. + Refer to the ``TileLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str or list[str] + The input blob name of this layer. + If second input is provided, reps parameter is ignored. + output_name: str + The output blob name of this layer. + reps: list of int or tuple of int + Number of times to replicate. + If `input_name` provides two inputs, second input is used as + reps and this parameter is ignored. + + See Also + -------- + add_stack, add_concat_nd + """ + if isinstance(input_name, tuple): + input_names = list(input_name) + elif isinstance(input_name, list): + input_names = input_name + else: + input_names = [input_name] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + spec_layer_params = spec_layer.tile + # If two inputs are provided, + # ignore reps attribute. + if len(input_names) == 2: + reps = [] + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + assert all([i > 0 for i in reps]) + spec_layer_params.reps.extend(reps) + return spec_layer + + def add_range_static( + self, name, output_name, input_names=None, end=1, start=0, step=1 + ): + """ + Add a range_static layer that returns a tensor that contains evenly spaced values. + This layer has no input and three parameters. + Refer to the ``RangeStaticLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + input_names: list of str + The input blob names of this layer. + end: int, optional + Range parameter: end, default: 1. + start: int, optional + Range parameter: start, default: 0. + step: int, optional + Range parameter: step size, default: 1. + + See Also + -------- + add_range_dynamic + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.rangeStatic.MergeFromString(b"") + spec_params = spec_layer.rangeStatic + + spec_params.endValue = float(end) + spec_params.startValue = float(start) + spec_params.stepSizeValue = float(step) + + self.rank_dict[output_name] = 1 + return spec_layer + + def add_range_dynamic(self, name, input_names, output_name, start=0, step=1): + """ + Add a range_dynamic layer that returns a tensor that contains evenly spaced values. + This layer has up to three inputs or no input and three parameters. + Refer to the ``RangeDynamicLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names. + If input size == 1: end is input, start and step are read from parameters + If input size == 2: end, start are inputs, step is read from parameters + If input size == 3: start, end, step are all inputs, none of the parameters are used. + output_name: str + The output blob name of this layer. + start: int, optional + Range parameter: start. Ignored if start is provided as input, default: 0. + step: int, optional + Range parameter: step. Ignored if step is provided as input, default: 1. + + See Also + -------- + add_range_static + """ + + if len(input_names) < 1 or len(input_names) > 3: + raise ValueError("RangeDynamic layer must have either 1, 2 or 3 inputs.") + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.rangeDynamic.MergeFromString(b"") + spec_params = spec_layer.rangeDynamic + + spec_params.startValue = float(start) + spec_params.stepSizeValue = float(step) + + self.rank_dict[output_name] = 1 + return spec_layer + + def add_branch(self, name, input_name, if_branch=None, else_branch=None): + """ + Add a branch layer to the model that provides the functionality of + branching or an ``if-else`` block. + Refer to the ``BranchLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + if_branch: NeuralNetwork + Neural network to execute if the absolute value of the input tensor is greater than 1e-6. + else_branch: NeuralNetwork, optional + Neural network to execute if the absolute value of the input tensor is less than 1e-6. + + See Also + -------- + add_loop, add_loop_continue, add_loop_break + """ + + layer = self._add_generic_layer(name, [input_name], []) + branch = layer.branch + if if_branch: + branch.ifBranch = if_branch + else: + branch.ifBranch.MergeFromString(b"") + if else_branch: + branch.elseBranch = else_branch + else: + branch.elseBranch.MergeFromString(b"") + return layer + + def add_loop( + self, + name, + body_network=None, + input_name=None, + condition=None, + condition_network=None, + max_iterations=None, + ): + """ + Add a loop layer to the model that provides the functionality of a ``for`` + loop, or a ``while`` loop. + Refer to the ``LoopLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + body_network: NeuralNetwork + Neural network to execute for the body of the loop. + input_name: str + The input blob name of this layer. + condition: str, optional + Condition of the loop. + condition_network: NeuralNetwork, optional + Neural network to execute for the condition of the loop. + max_iterations: int, optional + Maximum number of iterations of the loop. + + See Also + -------- + add_loop_break, add_loop_continue, add_branch + """ + + input_names = [] if input_name is None else [input_name] + spec_layer = self._add_generic_layer(name, input_names, []) + loop = spec_layer.loop + if condition_network is None: + loop.conditionNetwork.MergeFromString(b"") + else: + loop.conditionNetwork = condition_network + + if condition is not None: + loop.conditionVar = str(condition) + if max_iterations is not None: + loop.maxLoopIterations = ( + max_iterations if max_iterations is not None else -1 + ) + + if body_network is None: + loop.bodyNetwork.MergeFromString(b"") + else: + loop.bodyNetwork = body_network + return spec_layer + + def add_loop_break(self, name): + """ + Add a loop_break layer to the model that terminates the loop that + contains this layer. Must reside in the ``bodyNetwork`` of the loop layer. + Refer to the ``LoopBreakLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + See Also + -------- + add_loop, add_loop_continue, add_branch + """ + + spec_layer = self.nn_spec.layers.add() + spec_layer.name = name + spec_layer.loopBreak.MergeFromString(b"") + return spec_layer + + def add_loop_continue(self, name): + """ + Add a loop_continue layer to the model that stops the current loop + iteration and continue on the next iteration. Must reside in the + ``bodyNetwork`` of the loop layer. + Refer to the ``LoopContinueLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + See Also + -------- + add_loop, add_loop_break, add_branch + """ + + spec_layer = self.nn_spec.layers.add() + spec_layer.name = name + spec_layer.loopContinue.MergeFromString(b"") + return spec_layer + + def add_copy(self, name, input_name, output_name): + """ + Add a copy layer to the model that copies its input tensor to the output + tensor. Input tensor and output tensor must have distinct names. + Refer to the ``CopyLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.copy.MergeFromString(b"") + # If output name rank is different than earlier, + # mark it as unknown + if output_name in self.rank_dict and self._get_rank( + output_name + ) != self._get_rank(input_name): + self.rank_dict[output_name] = -1 + else: + self.rank_dict[output_name] = self._get_rank(input_name) + return spec_layer + + def add_greater_than( + self, name, input_names, output_name, use_greater_than_equal=False, alpha=0.0 + ): + """ + Add a greater_than layer to the model that performs the element-wise + greater-than (>) operation or greater-than-or-equal-to (>=) operation. + Broadcasting is supported. + Refer to the ``GreaterThanLayerParams``, ``GreaterEqualLayerParams`` messages + in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + use_greater_than_equal: bool, optional + Whether or not to allow greater than or equal to, default: false. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_equal, add_not_equal, add_less_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + if use_greater_than_equal: + spec_layer.greaterEqual.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.greaterEqual.alpha = alpha + else: + spec_layer.greaterThan.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.greaterThan.alpha = alpha + + return spec_layer + + def add_less_than( + self, name, input_names, output_name, use_less_than_equal=False, alpha=0.0 + ): + """ + Add a less_than layer to the model that performs the element-wise + less-than (<) operation or less-than-or-equal-to (<=) operation. + Broadcasting is supported. + Refer to the ``LessThanL_ayerParams``, ``LessEqualLayerParams`` messages in + specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + use_less_than_equal: bool, optional + Whether or not to allow less than or equal to, default: false. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_equal, add_not_equal, add_greater_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + if use_less_than_equal: + spec_layer.lessEqual.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.lessEqual.alpha = alpha + else: + spec_layer.lessThan.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.lessThan.alpha = alpha + return spec_layer + + def add_equal(self, name, input_names, output_name, alpha=0.0): + """ + Add an equal layer to the model that performs the element-wise equal + (=) operation. Broadcasting is supported. + Refer to the ``EqualLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_not_equal, add_greater_than, add_less_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.equal.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.equal.alpha = alpha + return spec_layer + + def add_not_equal(self, name, input_names, output_name, alpha=0.0): + """ + Add a not_equal layer to the model that performs the element-wise not + equal (!=) operation. Broadcasting is supported. + Refer to the ``NotEqualLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + alpha: float, optional + y = x1 != alpha, if only one input is provided, default: 0. + + See Also + -------- + add_equal, add_greater_than, add_less_than + """ + + if isinstance(input_names, str): + input_names = [input_names] + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.notEqual.MergeFromString(b"") + if len(input_names) == 1: + spec_layer.notEqual.alpha = alpha + return spec_layer + + def add_logical(self, name, input_names, output_name, mode): + """ + Add a logical layer to the model that performs element-wise logical + and/or/xor/not operation. Broadcasting is supported. + Refer to the ``LogicalOrLayerParams``, ``LogicalNotLayerParams``, + ``LogicalNotLayerParams``, and ``LogicalAndLayerParam`` messages in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + mode: str + Logical operation mode in [AND | OR | XOR | NOT]. + """ + + if isinstance(input_names, str): + input_names = [input_names] + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + if mode in ["AND", "OR", "XOR"] and len(input_names) != 2: + raise ValueError('Logical operation "%s" requires 2 inputs' % name) + if mode in ["NOT"] and len(input_names) != 1: + raise ValueError('Logical operation "%s" requires 1 input' % name) + + if mode == "AND": + spec_layer.logicalAnd.MergeFromString(b"") + elif mode == "OR": + spec_layer.logicalOr.MergeFromString(b"") + elif mode == "XOR": + spec_layer.logicalXor.MergeFromString(b"") + elif mode == "NOT": + spec_layer.logicalNot.MergeFromString(b"") + else: + raise ValueError('Logical operation "%s" is not supported' % mode) + + return spec_layer + + def add_sliding_windows( + self, name, input_name, output_name, axis, window_size, step=1 + ): + """ + Add a sliding_windows layer to the model that returns a tensor containing + all windows of size ``window_size`` * separated by ``step`` along the dimension ``axis``. + Refer to the ``SlidingWindowsLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + Axis to perform the operation. + window_size: int + Number of elements in the sliding window. + step: int, optional + The stride of the input elements in the sliding window, default: 1. + + See Also + -------- + add_slice, add_slice_static, add_slice_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + spec_layer_params = spec_layer.slidingWindows + spec_layer_params.axis = axis + spec_layer_params.windowSize = window_size + spec_layer_params.step = step + + self.rank_dict[output_name] = self._get_rank(input_name) + 1 + return spec_layer + + def add_reverse(self, name, input_name, output_name, reverse_dim=None): + """ + Add a reverse layer to the model that reverses specific dimensions of + the input tensor. + Refer to the ``ReverseLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + reverse_dim: list of int or tuple of int + Reverse along the dimension, default [1]. + + See Also + -------- + add_reverse_sequence + """ + + if not reverse_dim: + reverse_dim = [1] + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reverse + spec_layer_params.reverseDim.extend(map(bool, reverse_dim)) + return spec_layer + + def add_reverse_sequence( + self, name, input_names, output_name, batch_axis=0, seq_axis=-1 + ): + """ + Add a reverse sequence layer to the model that reverses variable length slices. + Refer to the ``ReverseSeqLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + batch_axis: int, optional + Slices input along the dimension batch_axis, default 0. + seq_axis: int, optional + Reverse along the dimension seq_axis, default: -1. + + See Also + -------- + add_reverse + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.reverseSeq.batchAxis = batch_axis + spec_layer.reverseSeq.sequenceAxis = seq_axis + + return spec_layer + + def add_gather(self, name, input_names, output_name, axis=0): + """ + Add a gather layer to the model that gathers elements or slices from + data and store to a tensor whose shape is defined by indices from the + input. + Refer to the ``GatherLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + The axis the operation perform on, default: 0. + + See Also + -------- + add_gather_nd, add_gather_along_axis, add_scatter, add_scatter_nd, add_scatter_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.gather.axis = axis + self.rank_dict[output_name] = ( + self._get_rank(input_names[0]) - 1 + self._get_rank(input_names[1]) + ) + return spec_layer + + def add_scatter(self, name, input_names, output_name, axis=0, mode="UPDATE"): + """ + Add a scatter layer to the model that scatters data into a new tensor + according to indices from the input. + Refer to the ``ScatterLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + The axis the operation perform on, default: 0. + mode: str, optional + Scatter accumulation mode in [UPDATE | ADD | SUB | MUL | DIV | MAX | MIN], default: UPDATE. + + See Also + -------- + add_scatter_nd, add_scatter_along_axis, add_gather, add_gather_nd, add_gather_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.scatter + spec_layer_params.axis = axis + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "UPDATE": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value( + "SCATTER_UPDATE" + ) + elif mode == "ADD": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_ADD") + elif mode == "SUB": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_SUB") + elif mode == "MUL": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MUL") + elif mode == "DIV": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_DIV") + elif mode == "MAX": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MAX") + elif mode == "MIN": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MIN") + else: + raise ValueError("Unsupported Scatter mode %s" % mode) + + return spec_layer + + def add_gather_along_axis(self, name, input_names, output_name, axis=0): + """ + Add a gather_along_axis layer to the model that gathers elements or slices + from data and store to a tensor whose shape is defined by indices from the + input along the given axis into the output tensor. + Refer to the ``GatherAlongAxisLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + The axis the operation perform on, default: 0. + + See Also + -------- + add_gather, add_gather_nd, add_scatter, add_scatter_nd, add_scatter_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.gatherAlongAxis.axis = axis + self.rank_dict[output_name] = self._get_rank(input_names[1]) + return spec_layer + + def add_scatter_along_axis( + self, name, input_names, output_name, axis=0, mode="UPDATE" + ): + """ + Add a scatter_along_axis layer to the model that scatters data into a new + tensor according to indices from the input along the given axis into the + output tensor. + Refer to the ``ScatterAlongAxisLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int + The axis to perform on, default: 0. + mode: str, optional + Scatter accumulation mode in [UPDATE | ADD | SUB | MUL | DIV | MAX | MIN], default: UPDATE + + See Also + -------- + add_scatter, add_scatter_nd, add_gather, add_gather_nd, add_gather_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.scatterAlongAxis + spec_layer_params.axis = axis + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "UPDATE": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value( + "SCATTER_UPDATE" + ) + elif mode == "ADD": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_ADD") + elif mode == "SUB": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_SUB") + elif mode == "MUL": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MUL") + elif mode == "DIV": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_DIV") + elif mode == "MAX": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MAX") + elif mode == "MIN": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MIN") + else: + raise ValueError("Unsupported scatter_along_axis mode %s" % mode) + + return spec_layer + + def add_gather_nd(self, name, input_names, output_name): + """ + Add a gather layer to the model that gathers elements or slices from + data and store to a tensor whose shape is defined by indices from the + input. This is the reverse operation of the scatter operation. + Refer to the ``GatherNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_gather, add_gather_along_axis, add_scatter, add_scatter_nd, add_scatter_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.gatherND.MergeFromString(b"") + # NOTE: ideally, following is formula for computing output rank + # self.rank_dict[output_name] = self._get_rank(input_names[1]) - 1 + self._get_rank(input_names[0]) + # + shape_dict[input_names[1]][-1] + # But, shape of indices (input_names[1]) is unknown and hence marking as -1 + # Converter should update rank if indices are known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_scatter_nd(self, name, input_names, output_name, mode="UPDATE"): + """ + Add a scatter layer to the model that scatters data into a new tensor + according to indices from input. This is the reverse operation of the + gather operation. + Refer to the ``ScatterNDLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + mode: str, optional + Scatter accumulation mode in [UPDATE | ADD | SUB | MUL | DIV | MAX | MIN], default: UPDATE + + See Also + -------- + add_scatter, add_scatter_along_axis, add_gather, add_gather_nd, add_gather_along_axis + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.scatterND + + mode = mode.upper() if isinstance(mode, str) else mode + if mode == "UPDATE": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value( + "SCATTER_UPDATE" + ) + elif mode == "ADD": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_ADD") + elif mode == "SUB": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_SUB") + elif mode == "MUL": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MUL") + elif mode == "DIV": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_DIV") + elif mode == "MAX": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MAX") + elif mode == "MIN": + spec_layer_params.mode = _NeuralNetwork_pb2.ScatterMode.Value("SCATTER_MIN") + else: + raise ValueError("Unsupported scatter mode %s" % mode) + + return spec_layer + + def add_topk( + self, name, input_names, output_names, k=0, axis=0, use_bottom_k=False + ): + """ + Add a topk layer to the model that returns top or bottom k values and + the corresponding indices of the input tensor along a given axis. + Refer to the ``TopKLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. It must be of length 1 or 2. + The optional second input corresponds to value of K. + output_names: list of str + The output blob names of this layer. First and second correspond to + values and indices, respectively. + k: int, optional + number of values/indices to be computed along the axis. + Need not be given of there are two inputs, default: 0. + axis: int, optional + axis along which the topk values/indices are computed. + negative indexing is supported, default: 0 + use_bottom_k: bool, optional + if true, bottom k values are computed instead, default: false. + """ + + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.topK + spec_layer_params.axis = axis + spec_layer_params.K = k + spec_layer_params.useBottomK = use_bottom_k + return spec_layer + + def add_argmax(self, name, input_name, output_name, axis, keepdims=True): + """ + Add an argmax layer to the model that returns the indices of the maximum + value along a specified axis in the input tensor. + Refer to the ``ArgMaxLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + axis along which the argmax is computed. Negative indexing is supported. + keepdims: bool, optional + if true, output rank is same as input rank, default: true. + + See Also + -------- + add_argmin + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.argMax + spec_layer_params.axis = axis + spec_layer_params.removeDim = not keepdims + + input_rank = self._get_rank(input_name) + if input_rank == 1: + self.rank_dict[output_name] = 1 + else: + if keepdims: + self.rank_dict[output_name] = input_rank + else: + self.rank_dict[output_name] = input_rank - 1 + return spec_layer + + def add_argmin(self, name, input_name, output_name, axis, keepdims=True): + """ + Add an argmin layer to the model that returns the indices of the minimum + value along a specified axis in the input tensor. + Refer to the ``ArgMinLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int + axis along which the argmin is computed. Negative indexing is supported. + keepdims: bool, optional + if true, output rank is same as input rank, default: true. + + See Also + -------- + add_argmax + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.argMin + spec_layer_params.axis = axis + spec_layer_params.removeDim = not keepdims + + input_rank = self._get_rank(input_name) + if input_rank == 1: + self.rank_dict[output_name] = 1 + else: + if keepdims: + self.rank_dict[output_name] = input_rank + else: + self.rank_dict[output_name] = input_rank - 1 + return spec_layer + + def add_constant_pad( + self, + name, + input_names, + output_name, + value=0.0, + pad_to_given_output_size_mode=False, + pad_amounts=[], + ): + """ + Add a constant pad layer. + Refer to the ``ConstantPaddingLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob name(s) of this layer. + output_name: str + The output blob name of this layer. + value: float + value to be used for padding. + pad_to_given_output_size_mode: bool + if true, pad_amounts are interpreted as output shapes (see example in NeuralNetwork.proto) + pad_amounts: [int], optional + must be non negative. Amount to pad in each dimension. Length of the list must be twice the input/output rank. + Not required if second input is present. + + See Also + -------- + add_padding + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.constantPad + spec_layer_params.value = value + spec_layer_params.padToGivenOutputSizeMode = pad_to_given_output_size_mode + if len(pad_amounts) > 0: + spec_layer_params.padAmounts.extend(map(int, pad_amounts)) + if len(input_names) == 1 and len(pad_amounts) == 0: + raise ValueError( + "Constant_pad layer: pad_amounts must be provided when there is a single input" + ) + return spec_layer + + def add_nms( + self, + name, + input_names, + output_names, + iou_threshold=0.5, + score_threshold=0.0, + max_boxes=1, + per_class_suppression=False, + ): + """ + Add a non maximum suppression layer. + Refer to the ``NonMaximumSuppressionLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. Must be at least 2, and maximum 5. + output_names: list of str + The output blob names of this layer. Must be of length 4 exactly. + iou_threshold: float + intersection over union threshold for suppression. Ignored if 3rd input is present. + score_threshold: float + threshold for selecting boxes to be used for NMS algorithm. Ignored if 4th input is present. + max_boxes: int + maximum number of boxes to output. Ignored if 5th input is present. + per_class_suppression: bool + If true, boxes are organized into classes and suppression is applied to each class group separately + + See Also + -------- + add_constant_pad + """ + + spec_layer = self._add_generic_layer(name, input_names, output_names) + spec_layer_params = spec_layer.NonMaximumSuppression + spec_layer_params.iouThreshold = iou_threshold + spec_layer_params.scoreThreshold = score_threshold + spec_layer_params.maxBoxes = max_boxes + spec_layer_params.perClassSuppression = per_class_suppression + + self.rank_dict[output_names[0]] = 3 + self.rank_dict[output_names[1]] = 3 + self.rank_dict[output_names[2]] = 2 + self.rank_dict[output_names[3]] = 1 + return spec_layer + + def add_embedding_nd( + self, + name, + input_name, + output_name, + vocab_size, + embedding_size, + W, + b=None, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add an embedding layer to the model that performs a matrix lookup and + optionally adds a bias. + Refer to the ``EmbeddingNDLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + vocab_size: int + Size of the vocabulary (1 + maximum integer index of the words). + embedding_size: int + Size of the embedded vector. + W: float32 numpy.array or bytes() + Weight matrix of shape (embedding_size, vocab_size). + If W is of type bytes(), i.e. quantized to 1-8 bits, other quantization + related arguments must be provided as well (see below). + b: numpy.array , optional + Bias vector of shape (embedding_size, ). + Quantization arguments expected, when W is of type bytes(): + is_quantized_weight: bool + Set it to true when W is of type bytes(), representing quantized weights + quantization_type: str + When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut". + nbits: int + Should be between 1 and 8 (inclusive). Number of bits per weight value. + quant_scale: numpy.array(dtype=numpy.float32) + scale vector to be used with linear quantization. Must be of length either 1 or embedding_size. + quant_bias: numpy.array(dtype=numpy.float32) + bias vector to be used with linear quantization. Must be of length either 1 or embedding_size. + quant_lut: numpy.array(dtype=numpy.float32) + the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits. + + See Also + -------- + add_inner_product, add_embedding + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + + # Fill in the parameters + spec_layer_params = spec_layer.embeddingND + + spec_layer_params.vocabSize = vocab_size + spec_layer_params.embeddingSize = embedding_size + spec_layer_params.hasBias = b is not None + + weights = spec_layer_params.weights + if not is_quantized_weight: + weights.floatValue.extend(W.flatten()) + else: + _verify_quantization_arguments( + weight=W, + output_channels=embedding_size, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + _fill_quantized_weights( + weights_message=weights, + W=W, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if b is not None: + bias = spec_layer_params.bias + bias.floatValue.extend(b.flatten()) + return spec_layer + + def add_batched_mat_mul( + self, + name, + input_names, + output_name, + transpose_a=False, + transpose_b=False, + weight_matrix_rows=0, + weight_matrix_columns=0, + W=None, + bias=None, + int_8_dynamic_quantize=False, + is_quantized_weight=False, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + ): + """ + Add a N-D Batched Matrix Multiplication layer with NumPy-like broadcasting. + Refer to the ``BatchedMatMulLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + + input_names: list of str + The input blob names of this layer. + + output_name: str + The output blob name of this layer. + + transpose_a: bool, optional + Whether or not to transpose A, default: false. + + transpose_b: bool, optional + Whether or not to transpose B, default: false. + + weight_matrix_rows: int, optional + Must be equal to the last dimension of the input, default: 0. + + weight_matrix_columns: int, optional + Must be equal to the last dimension of the output, default: 0. + + W: float32 numpy.array or bytes(), optional + Weight matrix of shape ``(weight_matrix_rows, weight_matrix_columns)``. + If ``W`` is of type ``bytes()`` (quantized to 1-8 bits), other + quantization-related arguments must be provided as well (see below). + + bias: float32 numpy.array, optional + Bias vector of shape (weight_matrix_columns,). + + Quantization + Quantization arguments, used when ``W`` is of type ``bytes()``: + + is_quantized_weight: bool, optional + Set it to true when ``W`` is of type ``bytes()``, representing + quantized weights, default: false. + + quantization_type: str, optional + When weights are quantized (that is, ``W`` is of type ``bytes()``), + this should be either ``"linear"`` or ``"lut"``, default: ``"linear"``. + + nbits: int, optional + Should be between 1 and 8 (inclusive). Number of bits per weight value, default: 8. + + quant_scale: numpy.array(dtype=numpy.float32), optional + Scale vector to be used with linear quantization. + Must be of length either 1 or ``weight_matrix_columns``, default: ``None``. + + quant_bias: numpy.array(dtype=numpy.float32), optional + Bias vector to be used with linear quantization. + Must be of length either 1 or ``weight_matrix_columns``, default: ``None``. + + quant_lut: numpy.array(dtype=numpy.float32), optional + The LUT (look up table) to be used with LUT quantization. + Must be of length 2^n bits, default: ``None``. + + int_8_dynamic_quantize: bool + Whether to quantize and dequantize before and after + batched matmul, respectively. + Expects byte weights, representing int8 values, if True. + See NeuralNetwork.proto for other validation conditions. + + See Also + -------- + add_inner_product + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + + spec_layer_params = spec_layer.batchedMatmul + spec_layer_params.transposeA = transpose_a + spec_layer_params.transposeB = transpose_b + spec_layer_params.int8DynamicQuantize = int_8_dynamic_quantize + + if ((W is not None) or (bias is not None)) and len(input_names) == 2: + raise ValueError( + "batched_mat_mul: Weight and/or bias are ignored when there are two inputs" + ) + + if (W is None) and len(input_names) == 1: + raise ValueError( + "batched_mat_mul: Weight parameter must be provided when there is one input" + ) + + self.rank_dict[output_name] = 2 + for input_ in input_names: + self.rank_dict[output_name] = max( + self._get_rank(output_name), self._get_rank(input_) + ) + + if len(input_names) == 1: + spec_layer_params.weightMatrixFirstDimension = weight_matrix_rows + spec_layer_params.weightMatrixSecondDimension = weight_matrix_columns + spec_layer_params.hasBias = bias is not None + + weights = spec_layer_params.weights + + if not is_quantized_weight: + weights.floatValue.extend(_np.transpose(W).flatten()) + else: + _verify_quantization_arguments( + weight=W, + output_channels=weight_matrix_columns, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + int_8_dynamic_quantize=int_8_dynamic_quantize, + ) + + if nbits < 8: + num_weights = weight_matrix_rows * weight_matrix_columns + byte_arr = _np.frombuffer(W, dtype=_np.uint8) + W = _unpack_to_bytes(byte_arr, num_weights, nbits) + elif int_8_dynamic_quantize: + W = _np.frombuffer(W, dtype=_np.int8) + else: + W = _np.frombuffer(W, dtype=_np.uint8) + + W = _np.reshape(W, (weight_matrix_rows, weight_matrix_columns)) + W = _np.transpose(W) + + W_bytes = bytes() + if nbits == 8: + W_bytes += W.flatten().tobytes() + else: + W_bytes += _convert_array_to_nbit_quantized_bytes( + W.flatten(), nbits + ).tobytes() + + _fill_quantized_weights( + weights_message=weights, + W=W_bytes, + use_int_8=int_8_dynamic_quantize, + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + + if bias is not None: + bias_param = spec_layer_params.bias + bias_param.floatValue.extend(bias.flatten()) + + return spec_layer + + def add_get_shape(self, name, input_name, output_name): + """ + Add a get_shape layer to the model. + Refer to the ``GetShapeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_static, add_reshape_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.getShape.MergeFromString(b"") + self.rank_dict[output_name] = 1 + return spec_layer + + def add_load_constant_nd(self, name, output_name, constant_value, shape): + """ + Add a load_constant layer that loads data as a parameter and provides it + as an output. + Refer to the ``LoadConstantNDLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + constant_value: numpy.array() + value of the constant as a numpy array. + shape: list of int or tuple of int + List of ints representing the shape of the constant. + + See Also + -------- + add_elementwise + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.loadConstantND + + data = spec_layer_params.data + data.floatValue.extend(constant_value.flatten()) + spec_layer_params.shape.extend(shape) + + # Rank information + self.rank_dict[output_name] = len(shape) + + if len(data.floatValue) != _np.prod(shape): + raise ValueError( + "Dimensions of 'shape' do not match the size of the provided constant" + ) + return spec_layer + + def add_fill_like(self, name, input_name, output_name, value=0.0): + """ + Add a fill_like layer to the model outputs a tensor filled with a + scalar value. + Refer to the ``FillLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + value: float, optional + A scalar value for the fill operation, default 0. + + See Also + -------- + add_fill_static, add_fill_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.fillLike + spec_layer_params.value = value + return spec_layer + + def add_fill_static(self, name, output_name, output_shape, value=0.0): + """ + Add a fill_static layer to the model that outputs a tensor filled + with a scalar value given shape as parameter. + Refer to the ``FillStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + The target shape of the output tensor. + value: float, optional + A scalar value for the fill operation, default 0. + + See Also + -------- + add_fill_like, add_fill_static + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.fillStatic + spec_layer_params.value = value + spec_layer_params.targetShape.extend(output_shape) + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_fill_dynamic(self, name, input_name, output_name, value=0.0): + """ + Add a fill_dynamic layer to the model that outputs a tensor filled + with a scalar value. + Refer to the ``FillDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + value: float, optional + A scalar value for the fill operation, default: 0. + + See Also + -------- + add_fill_like, add_fill_static + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.fillDynamic + spec_layer_params.value = value + self.rank_dict[output_name] = -1 + return spec_layer + + def add_broadcast_to_like(self, name, input_names, output_name): + """ + Add a broadcast_to_like layer to the model that broadcasts a tensor + to a compatible shape. + Refer to the ``BroadcastToLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_broadcast_to_static, add_broadcast_to_dynamic + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.broadcastToLike.MergeFromString(b"") + + if len(input_names) != 2: + raise ValueError("BroadcastToLikeLayer must have two inputs") + + self.rank_dict[output_name] = self._get_rank(input_names[1]) + return spec_layer + + def add_broadcast_to_static(self, name, input_name, output_name, output_shape): + """ + Add a broadcast_to_static layer to the model that broadcasts a tensor + to a compatible shape. + Refer to the ``BroadcastToStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + The target shape of the output tensor. + + See Also + -------- + add_broadcast_to_like, add_broadcast_to_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.broadcastToStatic + spec_layer_params.targetShape.extend(output_shape) + + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_broadcast_to_dynamic(self, name, input_names, output_name): + """ + Add a broadcast_to_dynamic layer to the model that broadcasts a tensor + to a compatible shape. + Refer to the ``BroadcastToDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_broadcast_to_like, add_broadcast_to_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.broadcastToDynamic.MergeFromString(b"") + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_expand_dims(self, name, input_name, output_name, axes): + """ + Add an expand dims layer to the model that increases the rank of the + input tensor by adding unit dimensions. + Refer to the ``ExpandDimsLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int + Dimensions the operation perform on. + + See Also + -------- + add_squeeze + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.expandDims + spec_layer_params.axes.extend(axes) + self.rank_dict[output_name] = self._get_rank(input_name) + len(axes) + return spec_layer + + def add_squeeze(self, name, input_name, output_name, axes=None, squeeze_all=False): + """ + Add a squeeze layer to the model that decrease the rank of the input + tensor by removing unit dimensions. + Refer to the ``SqueezeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + Dimensions to perform the operation, default: ``None`` (squeeze_all). + squeeze_all: bool, optional + If true, all dimensions that are 1 are squeezed, default: false. + + See Also + -------- + add_expand_dims + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.squeeze + if axes is not None: + spec_layer_params.axes.extend(axes) + spec_layer_params.squeezeAll = squeeze_all + + if squeeze_all or axes is None: + # All the dimensions that are 1 will be squeezed + # converter should update rank if shape is known + self.rank_dict[output_name] = -1 + else: + rank = self._get_rank(input_name) - len(axes) + self.rank_dict[output_name] = rank if rank != 0 else 1 + return spec_layer + + def add_flatten_to_2d(self, name, input_name, output_name, axis=1): + """ + Add a flatten_to_2d layer to the model that flattens the input tensor + into a 2-dimensional matrix. + Refer to the ``FlattenTo2DLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + Axis to perform the operation, default: 1. + + See Also + -------- + add_flatten + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.flattenTo2D + spec_layer_params.axis = axis + self.rank_dict[output_name] = 2 + return spec_layer + + def add_reshape_like(self, name, input_names, output_name): + """ + Add a reshape_like layer to the model that reshapes a tensor. + Refer to the ``ReshapeLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_reshape, add_reshape_static, add_reshape_dynamic, add_rank_preserving_reshape + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.reshapeLike.MergeFromString(b"") + self.rank_dict[output_name] = self._get_rank(input_names[1]) + return spec_layer + + def add_reshape_static(self, name, input_name, output_name, output_shape): + """ + Add a reshape_static layer to the model that reshapes a tensor. + Refer to the ``ReshapeStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_dynamic, add_rank_preserving_reshape + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reshapeStatic + spec_layer_params.targetShape.extend(output_shape) + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_reshape_dynamic(self, name, input_names, output_name): + """ + Add a reshape_dynamic layer to the model that reshapes a tensor. + Refer to the ``ReshapeDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_static, add_rank_preserving_reshape + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.reshapeDynamic.MergeFromString(b"") + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_rank_preserving_reshape(self, name, input_name, output_name, output_shape): + """ + Add a rank_preserving_reshape layer to the model that reshapes the input + tensor without altering the rank of the tensor. + Refer to the ``RankPreservingReshapeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Determines the shape of the output blob. + 0: copy the dimension of the input to output + -1: calculate dimensions from the rest of the shape + + See Also + -------- + add_reshape, add_reshape_like, add_reshape_static, add_reshape_dynamic + """ + + spec_layer = self._add_generic_layer( + name, + [input_name], + [output_name], + input_ranks=[len(output_shape)], + input_shapes=[[int(x) for x in output_shape]], + output_ranks=[len(output_shape)], + output_shapes=[[int(x) for x in output_shape]], + ) + + spec_layer_params = spec_layer.rankPreservingReshape + spec_layer_params.targetShape.extend(map(int, output_shape)) + return spec_layer + + def add_random_normal_like( + self, name, input_name, output_name, mean=0.0, stddev=0.0, seed=-1 + ): + """ + Add a random_normal_like layer to the model that fills the output + tensor with random values from normal distribution. + Refer to the ``RandomNormalLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + mean: float, optional + The mean of the normal distribution, default: 0.0. + stddev: float, optional + The standard deviation of the normal distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution, default -1 (random). + + See Also + -------- + add_random_normal_static, add_random_normal_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.randomNormalLike + + spec_layer_params.mean = mean + spec_layer_params.stdDev = stddev + spec_layer_params.seed = seed + + return spec_layer + + def add_random_normal_static( + self, name, output_name, output_shape, mean=0.0, stddev=0.0, seed=-1 + ): + """ + Add a random_normal_static layer to the model that fills the output + tensor with random values from normal distribution. + Refer to the ``RandomNormaStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + mean: float, optional + The mean of the normal distribution, default: 0.0. + stddev: float, optional + The standard deviation of the normal distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. Default -1 (random). + + See Also + -------- + add_random_normal_like, add_random_normal_dynamic + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.randomNormalStatic + + spec_layer_params.outputShape.extend(output_shape) + spec_layer_params.mean = mean + spec_layer_params.stdDev = stddev + spec_layer_params.seed = seed + + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_random_normal_dynamic( + self, name, input_names, output_name, mean=0.0, stddev=0.0, seed=-1 + ): + """ + Add a random_normal_dynamic layer to the model that fills the output + tensor with random values from normal distribution. + Refer to the ``RandomNormalDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + mean: float, optional + The mean of the normal distribution, default: 0.0. + stddev: float, optional + The standard deviation of the normal distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. Default -1 (random). + + See Also + -------- + add_random_normal_like, add_random_normal_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.randomNormalDynamic + + spec_layer_params.mean = mean + spec_layer_params.stdDev = stddev + spec_layer_params.seed = seed + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_random_uniform_like( + self, name, input_name, output_name, minval=0.0, maxval=1.0, seed=-1 + ): + """ + Add a random_uniform_like layer to the model that fills the output + tensors with random values from uniform distribution. + Refer to the ``RandomUniformLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + minval: float, optional + Lower bound / minimum value of the uniform distribution, default: 0.0. + maxval: float, optional + Upper bound / maximum value of the uniform distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_uniform_static, add_random_uniform_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.randomUniformLike + + spec_layer_params.minVal = minval + spec_layer_params.maxVal = maxval + spec_layer_params.seed = seed + + return spec_layer + + def add_random_uniform_static( + self, name, output_name, output_shape, minval=0.0, maxval=1.0, seed=-1 + ): + """ + Add a random_uniform_static layer to the model that fills the output + tensors with random values from uniform distribution. + Refer to the ``RandomUniformStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + minval: float, optional + Lower bound / minimum value of the uniform distribution, default: 0.0. + maxval: float, optional + Upper bound / maximum value of the uniform distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_uniform_like, add_random_uniform_dynamic + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.randomUniformStatic + + spec_layer_params.outputShape.extend(output_shape) + spec_layer_params.minVal = minval + spec_layer_params.maxVal = maxval + spec_layer_params.seed = seed + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_random_uniform_dynamic( + self, name, input_names, output_name, minval=0.0, maxval=1.0, seed=-1 + ): + """ + Add a random_uniform_dynamic layer to the model that fills the output + tensors with random values from uniform distribution. + Refer to the ``RandomUniformDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + minval: float, optional + Lower bound / minimum value of the uniform distribution, default: 0.0. + maxval: float, optional + Upper bound / maximum value of the uniform distribution, default: 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_uniform_like, add_random_uniform_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.randomUniformDynamic + + spec_layer_params.minVal = minval + spec_layer_params.maxVal = maxval + spec_layer_params.seed = seed + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_random_bernoulli_like( + self, name, input_name, output_name, prob=0.5, seed=-1 + ): + """ + Add a random_bernoulli_like layer to the model that fills the output + tensor with random values from Bernoulli distribution. + Refer to the ``RandomBernoulliLikeLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + prob: float, optional + Probabilities for Bernoulli distribution, default: 0.5. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_bernoulli_static, add_random_bernoulli_dynamic + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.randomBernoulliLike + + spec_layer_params.prob = prob + spec_layer_params.seed = seed + + return spec_layer + + def add_random_bernoulli_static( + self, name, output_name, output_shape, prob=0.5, seed=-1 + ): + """ + Add a random_bernoulli_static layer to the model that fills the output + tensor with random values from Bernoulli distribution. + Refer to the ``RandomBernoulliStaticLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + output_name: str + The output blob name of this layer. + output_shape: list of int or tuple of int + Target shape of the output tensor. + prob: float, optional + Probabilities for Bernoulli distribution, default: 0.5. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_bernoulli_like, add_random_bernoulli_dynamic + """ + + spec_layer = self._add_generic_layer(name, [], [output_name]) + spec_layer_params = spec_layer.randomBernoulliStatic + + spec_layer_params.outputShape.extend(output_shape) + spec_layer_params.prob = prob + spec_layer_params.seed = seed + + self.rank_dict[output_name] = len(output_shape) + return spec_layer + + def add_random_bernoulli_dynamic( + self, name, input_names, output_name, prob=0.5, seed=-1 + ): + """ + Add a random_bernoulli_dynamic layer to the model that fills the output + tensor with random values from Bernoulli distribution. + Refer to the ``RandomBernoulliDynamicLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + prob: float, optional + Probabilities for Bernoulli distribution, default: 0.5. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + + See Also + -------- + add_random_bernoulli_like, add_random_bernoulli_static + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.randomBernoulliDynamic + + spec_layer_params.prob = prob + spec_layer_params.seed = seed + + # Setting rank to -1 is a hint that Rank was not computed + # converter can modify if it's a constant and known + self.rank_dict[output_name] = -1 + return spec_layer + + def add_categorical_distribution( + self, + name, + input_name, + output_name, + num_samples, + is_logits=True, + eps=1e-10, + temperature=1.0, + seed=-1, + ): + """ + Add a categorical_distribution layer to the model that fills the output + tensor with random values from categorical distribution. + Refer to the ``CategoricalDistributionLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + num_samples: int + List of dimensions for the reduce operations. + is_logits: bool, optional + If true, the input is log probabilities. If false, the input is + probabilities, default: True + eps: float, optional + Epsilon parameter for categorical distribution, default 1e-10. + temperature: float, optional + Temperature parameter for categorical distribution, default 1.0. + seed: int, optional + Used to create a random seed for the distribution. default -1 (random). + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.categoricalDistribution + + spec_layer_params.numSamples = num_samples + spec_layer_params.isLogits = is_logits + spec_layer_params.eps = eps + spec_layer_params.temperature = temperature + spec_layer_params.seed = seed + + return spec_layer + + def add_reduce_sum( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_sum layer to the model that reduces the input tensor + using ``sum(elements across given dimensions)``. + Refer to the ``ReduceSumLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range ``[-rank(input), rank(input))``, default: ``None`` (``reduce_all``). + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_min, add_reduce_prod, + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, + add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceSum + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_prod( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_prod layer to the model that reduces the input tensor + using ``prod(elements across given dimensions)``. + Refer to the ``ReduceProdLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes. If axes list is empty, it will + be set to true, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, + add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceProd + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_mean( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_mean layer to the model that reduces the input tensor + using ``mean(elements across given dimensions)``. + Refer to the ``ReduceMeanLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceMean + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_max( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_max layer to the model that reduces the input tensor + using ``max(elements across given dimensions)``. + Refer to the ``ReduceMaxLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceMax + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_min( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_min layer to the model that reduces the input tensor + using ``min(elements across given dimensions)``. + Refer to the ``ReduceMinLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_max, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceMin + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_l2( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_l2 layer to the model that reduces the input tensor + using ``l2_normalization(elements across given dimensions)``. + Refer to the ``ReduceL2LayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_sum, add_reduce_min, add_reduce_max, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceL2 + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_l1( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_l1 layer to the model that reduces the input tensor + using ``l1_normalization(elements across given dimensions)``. + Refer to the ``ReduceL1LayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_max, add_reduce_prod + add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceL1 + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_sumsquare( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_sumsquare layer to the model that reduces the input tensor + using ``sum(square(elements across given dimensions))``. + Refer to the ``ReduceSumSquareLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_logsumexp + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceSumSquare + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_logsum( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_logsum layer to the model that reduces the input tensor + using log(sum(elements across given dimensions)). + Refer to the ``ReduceLogSumLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_mean, add_reduce_logsumexp, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceLogSum + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_reduce_logsumexp( + self, name, input_name, output_name, axes=None, keepdims=True, reduce_all=False + ): + """ + Add a reduce_logsumexp layer to the model that computes ``log(sum(exp(tensor)))`` + and reduces along the given axis. + Refer to the ``ReduceLogSumExpLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axes: list of int or tuple of int, optional + List of dimensions for the reduce operations. + Each should be in range [-rank(input), rank(input)), default: ``None`` (reduce_all) + keepdims: bool, optional + Whether or not to retain the reduced dimensions with length 1, default: true. + reduce_all: bool, optional + Whether or not to reduce on all axes, default: false. + + See Also + -------- + add_reduce_l1, add_reduce_l2, add_reduce_sum, add_reduce_min, add_reduce_prod + add_reduce_max, add_reduce_mean, add_reduce_logsum, add_reduce_sumsquare + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.reduceLogSumExp + + if axes is not None and len(axes) != 0: + spec_layer_params.axes.extend(map(int, axes)) + else: + reduce_all = True + + spec_layer_params.keepDims = keepdims + spec_layer_params.reduceAll = reduce_all + + self._set_rank_for_reduce_op( + input_name, output_name, axes, keepdims, reduce_all + ) + return spec_layer + + def add_where_nonzero(self, name, input_name, output_name): + """ + Add a where_nonzero layer to the model that returns a tensor containing + the indices of all non-zero elements of input tensor. + Refer to the ``WhereNonZeroLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_where_broadcastable + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.whereNonZero.MergeFromString(b"") + + self.rank_dict[output_name] = 2 + return spec_layer + + def add_matrix_band_part( + self, name, input_name, output_name, num_lower=-1, num_upper=-1 + ): + """ + Add a matrix_band_part layer to the model that copies a tensor setting + everything outside a central band in each inner-most matrix to zero. + Refer to the ``MatrixBandPartLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + num_lower: int, optional + Number of lower sub-diagonals to keep. + Default: -1 (keep entire lower triangle). + num_upper: int, optional + Number of upper sub-diagonals to keep. + Default: -1 (keep entire upper triangle). + + See Also + -------- + add_lower_triangular, add_lower_triangular + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.matrixBandPart + spec_layer_params.numLower = num_lower + spec_layer_params.numUpper = num_upper + return spec_layer + + def add_lower_triangular(self, name, input_name, output_name, k=0): + """ + Add a lower_triangular layer to the model that copies a tensor setting + everything outside lower triangular to zero. + Refer to the ``LowerTriangularLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + k: int, optional + Diagonal below which to zero elements, default: 0 (main diagonal), + k < 0 is lower it and k > 0 is upper. + + See Also + -------- + add_upper_triangular, add_matrix_band_part + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.lowerTriangular + spec_layer_params.k = k + return spec_layer + + def add_upper_triangular(self, name, input_name, output_name, k=0): + """ + Add a upper_triangular layer to the model that copies a tensor setting + everything outside upper triangular to zero. + Refer to the ``UpperTriangularLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The of input blob name of this layer. + output_name: str + The output blob name of this layer. + k: int, optional + Diagonal above which to zero elements, default: 0 (main diagonal), + k < 0 is lower it and k > 0 is upper. + + See Also + -------- + add_lower_triangular, add_matrix_band_part + """ + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.upperTriangular + spec_layer_params.k = k + return spec_layer + + def add_where_broadcastable(self, name, input_names, output_name): + """ + Add a where_broadcastable layer to the model that returns the elements + either from tensor x or tensor y, depending on the value in the + condition tensor. + Refer to the ``WhereBroadcastableLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + + See Also + -------- + add_where_nonzero + """ + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer.whereBroadcastable.MergeFromString(b"") + + self._set_max_input_rank(input_names, output_name) + + return spec_layer + + def add_layer_normalization( + self, name, input_name, output_name, normalized_shape, gamma, beta, eps=1e-5 + ): + """ + Add a layer normalization layer to the model that applies layer + normalization over the input tensor. + Refer to the ``LayerNormalizationLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + normalized_shape: list of int or tuple of int + Input shape from an expected input of size. + gamma: WeightParams + Weight parameters. + beta: WeightParams + Bias parameters. + eps: float, optional + Constant value added to the denominator, default: 1e-5. + """ + + if gamma.shape != tuple(normalized_shape): + raise ValueError("Shape of parameter gamma should match normalized_shape") + + if beta.shape != tuple(normalized_shape): + raise ValueError("Shape of parameter beta should match normalized_shape") + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer_params = spec_layer.layerNormalization + + spec_layer_params.normalizedShape.extend(normalized_shape) + + weights = spec_layer_params.gamma + weights.floatValue.extend(gamma.flatten()) + + bias = spec_layer_params.beta + bias.floatValue.extend(beta.flatten()) + + spec_layer_params.eps = eps + + return spec_layer + + def add_one_hot( + self, + name, + input_names, + output_name, + one_hot_vector_size=None, + axis=-1, + on_value=1.0, + off_value=0.0, + ): + """ + Add a one hot layer to the model that computes the one hot representation of the input tensor. + Refer to the ``OneHotLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + one_hot_vector_size: int > 0 + size of the one hot vector. + axis: int, optional + refers to the axis in the output tensor, default: -1. + on_value: float, optional + Constant value on locations represented by first input, default: 1.0. + off_value: float, optional + Constant value at all other locations, default: 0.0. + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.oneHot + spec_layer_params.axis = axis + if one_hot_vector_size: + spec_layer_params.oneHotVectorSize = one_hot_vector_size + spec_layer_params.onValue = on_value + spec_layer_params.offValue = off_value + return spec_layer + + def add_cumsum( + self, name, input_names, output_name, axis=-1, reverse=False, exclusive=False + ): + """ + Add a cum sum layer to the model computes the cumulative sum values of the input along a given axis. + Refer to the ``CumSumLayerParams`` message in the specification + (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_names: list of str + The input blob names of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + Axis to perform the operation, default: -1. + reverse: bool, optional + if true, cumsum is performed in the opposite direction, default: False. + exclusive: bool, optional + whether to perform exclusive or inclusive cumulative summation, default: False. + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, input_names, [output_name]) + spec_layer_params = spec_layer.cumSum + spec_layer_params.axis = axis + spec_layer_params.reverse = reverse + spec_layer_params.excludeFinalSum = exclusive + return spec_layer + + def add_clamped_relu(self, name, input_name, output_name, alpha=0.0, beta=6.0): + """ + Add a clamped relu layer to the model. + Clamped relu formula is f(x) = min((x >= 0 ? x : alpha * x), beta) + Refer to the ``ClampedReluLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + alpha: float, optional + slope of the output when input is negative, default: 0.0. + beta: float, optional + Upper bound on the output value, default: 6.0. + + See Also + -------- + add_clip + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.clampedReLU.MergeFromString(b"") + spec_params = spec_layer.clampedReLU + + spec_params.alpha = float(alpha) + spec_params.beta = float(beta) + + return spec_layer + + def add_argsort(self, name, input_name, output_name, axis=0, descending=False): + """ + Add an argsort layer to the model. + Refer to the ``ArgsortLayerParams`` message in the specification (NeuralNetwork.proto) for more details. + + Parameters + ---------- + name: str + The name of this layer. + input_name: str + The input blob name of this layer. + output_name: str + The output blob name of this layer. + axis: int, optional + axis along which to compute the sorting indices + descending: bool, optional + order of sorting + + See Also + -------- + add_topk + """ + + if self.spec and ( + not self.spec.specificationVersion + or self.spec.specificationVersion < _SPECIFICATION_VERSION_IOS_14 + ): + self.spec.specificationVersion = _SPECIFICATION_VERSION_IOS_14 + + spec_layer = self._add_generic_layer(name, [input_name], [output_name]) + spec_layer.argSort.MergeFromString(b"") + spec_params = spec_layer.argSort + + spec_params.axis = int(axis) + spec_params.descending = descending + + return spec_layer diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/flexible_shape_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/flexible_shape_utils.py new file mode 100644 index 00000000..f2e42794 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/flexible_shape_utils.py @@ -0,0 +1,738 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Utilities to annotate Neural Network Features with flexible shape information. +""" + +from ... import (_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, + _MINIMUM_NDARRAY_SPEC_VERSION) +from ..utils import _get_feature + +_SEQUENCE_KEY = "S" +_BATCH_KEY = "B" +_CHANNEL_KEY = "C" +_HEIGHT_KEY = "H" +_WIDTH_KEY = "W" + +_CONSTRAINED_KEYS = [_CHANNEL_KEY, _HEIGHT_KEY, _WIDTH_KEY] + + +class Shape: + def __init__(self, shape_value): + if shape_value < 1: + raise Exception("Invalid value. Size/Shape values must be > 0") + self._value = shape_value + + @property + def value(self): + return self._value + + +class Size(Shape): + def __init__(self, size_value): + super(Size, self).__init__(size_value) + + +class NeuralNetworkMultiArrayShape: + """ + An object representing a shape for a multiArray feature in a + neural network. Valid shapes must have have only the Channel [C] + shape or the Channel, Height and Width [C, H, W] shapes populated + """ + + def __init__(self, channel=None, height=None, width=None): + self._shape = { + _CHANNEL_KEY: Shape(int(channel)) if channel else None, + _HEIGHT_KEY: Shape(int(height)) if height else None, + _WIDTH_KEY: Shape(int(width)) if width else None, + } + + def set_channel_shape(self, channel_shape): + self._shape[_CHANNEL_KEY] = Shape(channel_shape) + + def set_height_shape(self, height_shape): + self._shape[_HEIGHT_KEY] = Shape(height_shape) + + def set_width_shape(self, width_shape): + self._shape[_WIDTH_KEY] = Shape(width_shape) + + def _validate_multiarray_shape(self): + num_dims = len([v for v in self._shape.values() if v]) + if num_dims != 1 and num_dims != 3: + raise Exception( + "For neural networks, shape must be of length 1 or 3" + ", representing input shape [C] or [C,H,W], respectively" + ) + + if num_dims == 1: + if not self._shape["C"]: + raise Exception("Channel Shape not specified") + + @property + def multiarray_shape(self): + num_dims = len([v for v in self._shape.values() if v]) + if num_dims == 1: + return [self._shape[_CHANNEL_KEY].value] + elif num_dims == 3: + return [ + self._shape[_CHANNEL_KEY].value, + self._shape[_HEIGHT_KEY].value, + self._shape[_WIDTH_KEY].value, + ] + else: + raise Exception("Invalid multiarray shape for neural network") + + +class NeuralNetworkImageSize: + """ + An object representing a size for an image feature inside a + neural network. Valid sizess for height and width are > 0. + """ + + def __init__(self, height=None, width=None): + self._height = Size(height) + self._width = Size(width) + + def set_width(self, width): + self._width = Size(width) + + def set_height(self, height): + self._height = Size(height) + + @property + def width(self): + return self._width.value + + @property + def height(self): + return self._height.value + + +class ShapeRange: + def __init__(self, lowerBound, upperBound): + unBounded = False + + if upperBound == -1: + unBounded = True + + if not unBounded and lowerBound > upperBound: + raise Exception( + "lowerBound > upperBound for range ({},{})".format( + lowerBound, upperBound + ) + ) + + if not unBounded and upperBound < 1: + raise Exception("Invalid upperBound: {} ".format(upperBound)) + + if lowerBound == 0: + lowerBound = 1 + + if lowerBound < 1: + raise Exception("Invalid lowerBound: {}".format(lowerBound)) + + self._lowerBound = lowerBound + self._upperBound = upperBound + self._unBounded = unBounded + + @property + def lowerBound(self): + return self._lowerBound + + @property + def upperBound(self): + return self._upperBound + + @property + def isUnbounded(self): + return self._unBounded + + @property + def isFlexible(self): + return not (self._lowerBound == self._upperBound) + + +class NeuralNetworkMultiArrayShapeRange: + """ + An object representing a range of shapes for a multiArray feature in a + neural network. Valid shape ranges must have have only the Channel [C] + range or the Channel, Height and Width [C, H, W] ranges populated. A "-1" + value in an upper bound represents an unbounded range. + """ + + def __init__(self, input_ranges=None): + self.arrayShapeRange = {} + + if input_ranges: + if not isinstance(input_ranges, dict): + raise Exception( + "Attempting to initialize a shape range with something other than a dictionary of shapes." + ) + self.arrayShapeRange = {} + for key, value in input_ranges.items(): + if key in _CONSTRAINED_KEYS: + self.arrayShapeRange[key] = self._create_shape_range(value) + self.validate_array_shape_range() + + def _create_shape_range(self, r): + if not isinstance(r, tuple): + raise Exception("Range should be a ShapeRange or a tuple object") + elif len(r) != 2: + raise Exception("Range tuple should be at least length 2") + return ShapeRange(r[0], r[1]) + + def add_channel_range(self, channel_range): + if not isinstance(channel_range, ShapeRange): + channel_range = self._create_shape_range(channel_range) + self.arrayShapeRange[_CHANNEL_KEY] = channel_range + + def add_height_range(self, height_range): + if not isinstance(height_range, ShapeRange): + height_range = self._create_shape_range(height_range) + self.arrayShapeRange[_HEIGHT_KEY] = height_range + + def add_width_range(self, width_range): + if not isinstance(width_range, ShapeRange): + width_range = self._create_shape_range(width_range) + self.arrayShapeRange[_WIDTH_KEY] = width_range + + def get_shape_range_dims(self): + return len(self.arrayShapeRange.keys()) + + def validate_array_shape_range(self): + num_dims = self.get_shape_range_dims() + if num_dims != 1 and num_dims != 3: + raise Exception( + "For neural networks, shape must be of length 1 or 3" + ", representing input shape [C] or [C,H,W], respectively" + ) + + if num_dims == 1: + if _CHANNEL_KEY not in self.arrayShapeRange.keys(): + raise Exception("Channel Shape Range not specified") + + if num_dims == 3: + if ( + _CHANNEL_KEY not in self.arrayShapeRange.keys() + or _HEIGHT_KEY not in self.arrayShapeRange.keys() + or _WIDTH_KEY not in self.arrayShapeRange.keys() + ): + raise Exception( + "Shape range constraint missing for either channel, height, or width." + ) + + def get_channel_range(self): + return self.arrayShapeRange[_CHANNEL_KEY] + + def get_height_range(self): + return self.arrayShapeRange[_HEIGHT_KEY] + + def get_width_range(self): + return self.arrayShapeRange[_WIDTH_KEY] + + def isFlexible(self): + """ + Returns true if any one of the channel, height, or width ranges of this shape allow more than one input value. + """ + for key, value in self.arrayShapeRange.items(): + if key in _CONSTRAINED_KEYS: + if value.isFlexible: + return True + + return False + + +class NeuralNetworkImageSizeRange: + """ + An object representing a range of sizes for an image feature inside a + neural network. Valid ranges for height and width are > 0. A "-1" + upper bound value for either width or height represents an unbounded size + for that dimension. + """ + + def __init__(self, height_range=None, width_range=None): + if height_range and not isinstance(height_range, ShapeRange): + if not isinstance(height_range, tuple): + raise Exception("Height range should be a ShapeRange or a tuple object") + elif len(height_range) != 2: + raise Exception("Height range tuple should be at least length 2") + height_range = ShapeRange(height_range[0], height_range[1]) + + if width_range and not isinstance(width_range, ShapeRange): + if not isinstance(width_range, tuple): + raise Exception("Width range should be a ShapeRange or a tuple object") + elif len(width_range) != 2: + raise Exception("Width range tuple should be at least length 2") + width_range = ShapeRange(width_range[0], width_range[1]) + + self._height_range = height_range + self._width_range = width_range + + def add_width_range(self, width_range): + if not isinstance(width_range, ShapeRange): + if not isinstance(width_range, tuple): + raise Exception("Width range should be a ShapeRange or a tuple object") + elif len(width_range) != 2: + raise Exception("Width range tuple should be at least length 2") + + self._width_range = ShapeRange(width_range[0], width_range[1]) + + def add_height_range(self, height_range): + if not isinstance(height_range, ShapeRange): + if not isinstance(height_range, tuple): + raise Exception("Height range should be a ShapeRange or a tuple object") + elif len(height_range) != 2: + raise Exception("Height range tuple should be at least length 2") + + self._height_range = ShapeRange(height_range[0], height_range[1]) + + def get_width_range(self): + return self._width_range + + def get_height_range(self): + return self._height_range + + +def add_enumerated_multiarray_shapes(spec, feature_name, shapes): + """ + Annotate an input or output multiArray feature in a Neural Network spec to + to accommodate a list of enumerated array shapes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the image feature for which to add shape information. + If the feature is not found in the input or output descriptions then + an exception is thrown + + :param shapes: [] | NeuralNetworkMultiArrayShape + A single or a list of NeuralNetworkImageSize objects which encode valid + size information for a image feature + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> array_shapes = [flexible_shape_utils.NeuralNetworkMultiArrayShape(3)] + >>> second_shape = flexible_shape_utils.NeuralNetworkMultiArrayShape() + >>> second_shape.set_channel_shape(3) + >>> second_shape.set_height_shape(10) + >>> second_shape.set_width_shape(15) + >>> array_shapes.append(second_shape) + >>> flexible_shape_utils.add_enumerated_multiarray_shapes(spec, feature_name='my_multiarray_featurename', shapes=array_shapes) + + :return: + None. The spec object is updated + """ + + if not isinstance(shapes, list): + shapes = [shapes] + + for shape in shapes: + if not isinstance(shape, NeuralNetworkMultiArrayShape): + raise Exception( + "Shape ranges should be of type NeuralNetworkMultiArrayShape" + ) + shape._validate_multiarray_shape() + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to add enumerated shapes to " "a non-multiArray feature type" + ) + + if feature.type.multiArrayType.WhichOneof("ShapeFlexibility") != "enumeratedShapes": + feature.type.multiArrayType.ClearField("ShapeFlexibility") + + eshape_len = len(feature.type.multiArrayType.enumeratedShapes.shapes) + + # Add default array shape to list of enumerated shapes if enumerated shapes + # field is currently empty + if eshape_len == 0: + fixed_shape = feature.type.multiArrayType.shape + if len(fixed_shape) == 1: + fs = NeuralNetworkMultiArrayShape(fixed_shape[0]) + shapes.append(fs) + elif len(fixed_shape) == 3: + fs = NeuralNetworkMultiArrayShape() + fs.set_channel_shape(fixed_shape[0]) + fs.set_height_shape(fixed_shape[1]) + fs.set_width_shape(fixed_shape[2]) + shapes.append(fs) + else: + raise Exception( + "Original fixed multiArray shape for {} is invalid".format(feature_name) + ) + + for shape in shapes: + s = feature.type.multiArrayType.enumeratedShapes.shapes.add() + s.shape.extend(shape.multiarray_shape) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def add_enumerated_image_sizes(spec, feature_name, sizes): + """ + Annotate an input or output image feature in a Neural Network spec to + to accommodate a list of enumerated image sizes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the image feature for which to add size information. + If the feature is not found in the input or output descriptions then + an exception is thrown + + :param sizes: [] | NeuralNetworkImageSize + A single or a list of NeuralNetworkImageSize objects which encode valid + size information for a image feature + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> image_sizes = [flexible_shape_utils.NeuralNetworkImageSize(128, 128)] + >>> image_sizes.append(flexible_shape_utils.NeuralNetworkImageSize(256, 256)) + >>> flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='my_multiarray_featurename', sizes=image_sizes) + + :return: + None. The spec object is updated + """ + if not isinstance(sizes, list): + sizes = [sizes] + + for size in sizes: + if not isinstance(size, NeuralNetworkImageSize): + raise Exception("Shape ranges should be of type NeuralNetworkImageSize") + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "imageType": + raise Exception("Trying to add enumerated sizes to " "a non-image feature type") + + if feature.type.imageType.WhichOneof("SizeFlexibility") != "enumeratedSizes": + feature.type.imageType.ClearField("SizeFlexibility") + + esizes_len = len(feature.type.imageType.enumeratedSizes.sizes) + + # Add default image size to list of enumerated sizes if enumerated sizes + # field is currently empty + if esizes_len == 0: + fixed_height = feature.type.imageType.height + fixed_width = feature.type.imageType.width + sizes.append(NeuralNetworkImageSize(fixed_height, fixed_width)) + + shapes_added_so_far = [] + for size in sizes: + if [size.height, size.width] not in shapes_added_so_far: + s = feature.type.imageType.enumeratedSizes.sizes.add() + s.height = size.height + s.width = size.width + shapes_added_so_far.append([s.height, s.width]) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def update_image_size_range(spec, feature_name, size_range): + """ + Annotate an input or output Image feature in a Neural Network spec to + to accommodate a range of image sizes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the Image feature for which to add shape information. + If the feature is not found in the input or output descriptions then + an exception is thrown + + :param size_range: NeuralNetworkImageSizeRange + A NeuralNetworkImageSizeRange object with the populated image size + range information. + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange() + >>> img_size_ranges.add_height_range(64, 128) + >>> img_size_ranges.add_width_range(128, -1) + >>> flexible_shape_utils.update_image_size_range(spec, feature_name='my_multiarray_featurename', size_range=img_size_ranges) + + :return: + None. The spec object is updated + """ + if not isinstance(size_range, NeuralNetworkImageSizeRange): + raise Exception("Shape ranges should be of type NeuralNetworkImageSizeRange") + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "imageType": + raise Exception("Trying to add size ranges for " "a non-image feature type") + + feature.type.imageType.ClearField("SizeFlexibility") + feature.type.imageType.imageSizeRange.heightRange.lowerBound = ( + size_range.get_height_range().lowerBound + ) + feature.type.imageType.imageSizeRange.heightRange.upperBound = ( + size_range.get_height_range().upperBound + ) + + feature.type.imageType.imageSizeRange.widthRange.lowerBound = ( + size_range.get_width_range().lowerBound + ) + feature.type.imageType.imageSizeRange.widthRange.upperBound = ( + size_range.get_width_range().upperBound + ) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def update_multiarray_shape_range(spec, feature_name, shape_range): + """ + Annotate an input or output MLMultiArray feature in a Neural Network spec + to accommodate a range of shapes + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the feature for which to add shape range + information. If the feature is not found in the input or output + descriptions then an exception is thrown + + :param shape_range: NeuralNetworkMultiArrayShapeRange + A NeuralNetworkMultiArrayShapeRange object with the populated shape + range information. The shape_range object must either contain only + shape information for channel or channel, height and width. If + the object is invalid then an exception is thrown + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> shape_range = flexible_shape_utils.NeuralNetworkMultiArrayShapeRange() + >>> shape_range.add_channel_range((1, 3)) + >>> shape_range.add_width_range((128, 256)) + >>> shape_range.add_height_range((128, 256)) + >>> flexible_shape_utils.update_multiarray_shape_range(spec, feature_name='my_multiarray_featurename', shape_range=shape_range) + + :return: + None. The spec is updated + """ + if not isinstance(shape_range, NeuralNetworkMultiArrayShapeRange): + raise Exception("Shape range should be of type MultiArrayShapeRange") + + shape_range.validate_array_shape_range() + feature = _get_feature(spec, feature_name) + + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to update shape range for " "a non-multiArray feature type" + ) + + # Add channel range + feature.type.multiArrayType.ClearField("ShapeFlexibility") + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = shape_range.get_channel_range().lowerBound + s.upperBound = shape_range.get_channel_range().upperBound + + if shape_range.get_shape_range_dims() > 1: + # Add height range + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = shape_range.get_height_range().lowerBound + s.upperBound = shape_range.get_height_range().upperBound + # Add width range + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = shape_range.get_width_range().lowerBound + s.upperBound = shape_range.get_width_range().upperBound + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION, spec.specificationVersion + ) + + +def set_multiarray_ndshape_range(spec, feature_name, lower_bounds, upper_bounds): + """ + Annotate an input or output MLMultiArray feature in a Neural Network spec + to accommodate a range of shapes. + This is different from "update_multiarray_shape_range", which works with rank 5 + SBCHW mapping. + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the feature for which to add shape range + information. If the feature is not found in the input or output + descriptions then an exception is thrown + + :param lower_bounds: List[int] + list of integers specifying the lower bounds of each dimension. + Length must be same as the rank (length of shape) of the feature_name. + + :param upper_bounds: List[int] + list of integers specifying the upper bounds of each dimension. + -1 corresponds to unbounded range. + Length must be same as the rank (length of shape) of the feature_name. + + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> # say, the default shape of "my_multiarray_featurename" is (2,3) + >>> flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='my_multiarray_featurename', lower_bounds=[1,2], upper_bounds=[10,-1]) + + :return: + None. The spec is updated + """ + if not isinstance(lower_bounds, list): + raise Exception("lower_bounds must be a list") + if not isinstance(upper_bounds, list): + raise Exception("upper_bounds must be a list") + + feature = _get_feature(spec, feature_name) + + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to update shape range for " "a non-multiArray feature type" + ) + + shape = feature.type.multiArrayType.shape + + if len(shape) != len(lower_bounds): + raise Exception( + "Length of lower_bounds is not equal to the number of dimensions in the default shape" + ) + if len(shape) != len(upper_bounds): + raise Exception( + "Length of upper_bounds is not equal to the number of dimensions in the default shape" + ) + + feature.type.multiArrayType.ClearField("ShapeFlexibility") + + for i in range(len(lower_bounds)): + if shape[i] < lower_bounds[i]: + raise Exception( + "Default shape in %d-th dimension, which is %d, is smaller" + " than the lower bound of %d" % (i, int(shape[i]), lower_bounds[i]) + ) + if upper_bounds[i] != -1: + if shape[i] > upper_bounds[i]: + raise Exception( + "Default shape in %d-th dimension, which is %d, is greater" + " than the upper bound of %d" % (i, int(shape[i]), upper_bounds[i]) + ) + + s = feature.type.multiArrayType.shapeRange.sizeRanges.add() + s.lowerBound = lower_bounds[i] + s.upperBound = upper_bounds[i] + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_NDARRAY_SPEC_VERSION, spec.specificationVersion + ) + + +def add_multiarray_ndshape_enumeration(spec, feature_name, enumerated_shapes): + """ + Annotate an input or output MLMultiArray feature in a Neural Network spec + to accommodate a range of shapes. + Add provided enumerated shapes to the list of shapes already present. + This method is different from "add_enumerated_multiarray_shapes", which is applicable + for rank 5 mapping, SBCHW, arrays. + + :param spec: MLModel + The MLModel spec containing the feature + + :param feature_name: str + The name of the feature for which to add shape range + information. If the feature is not found in the input or output + descriptions then an exception is thrown + + :param enumerated_shapes: List[Tuple(int)] + list of shapes, where each shape is specified as a tuple of integers. + + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import flexible_shape_utils + >>> spec = coremltools.utils.load_spec('mymodel.mlmodel') + >>> # say, the default shape of "my_multiarray_featurename" is (2,3) + >>> flexible_shape_utils.add_multiarray_ndshape_enumeration(spec, feature_name='my_multiarray_featurename', enumerated_shapes=[(2,4), (2,6)]) + + :return: + None. The spec is updated + """ + if not isinstance(enumerated_shapes, list): + raise Exception("enumerated_shapes must be a list") + if len(enumerated_shapes) == 0: + raise Exception("enumerated_shapes is empty") + + feature = _get_feature(spec, feature_name) + if feature.type.WhichOneof("Type") != "multiArrayType": + raise Exception( + "Trying to update shape range for " "a non-multiArray feature type" + ) + + shape = feature.type.multiArrayType.shape + + if feature.type.multiArrayType.WhichOneof("ShapeFlexibility") != "enumeratedShapes": + feature.type.multiArrayType.ClearField("ShapeFlexibility") + + eshape_len = len(feature.type.multiArrayType.enumeratedShapes.shapes) + + shapes_added_so_far = [] + + # Add default array shape to list of enumerated shapes if enumerated shapes + # field is currently empty + if eshape_len == 0: + fixed_shape = feature.type.multiArrayType.shape + s = feature.type.multiArrayType.enumeratedShapes.shapes.add() + s.shape.extend(fixed_shape) + shapes_added_so_far.append(list(fixed_shape)) + + for shape in enumerated_shapes: + if not isinstance(shape, tuple): + raise Exception("An element in 'enumerated_shapes' is not a tuple") + if list(shape) not in shapes_added_so_far: + s = feature.type.multiArrayType.enumeratedShapes.shapes.add() + s.shape.extend(list(shape)) + shapes_added_so_far.append(list(shape)) + + # Bump up specification version + spec.specificationVersion = max( + _MINIMUM_NDARRAY_SPEC_VERSION, spec.specificationVersion + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/optimization_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/optimization_utils.py new file mode 100644 index 00000000..61772c57 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/optimization_utils.py @@ -0,0 +1,255 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Neural Network optimization utilities. +""" + +import numpy as _np + + +def _fuse_layer_with_scale_layer(layer_idx, scale_idx, layers): + layer_type = layers[layer_idx].WhichOneof("layer") + if layer_type == "convolution": + layer = layers[layer_idx].convolution + elif layer_type == "innerProduct": + layer = layers[layer_idx].innerProduct + else: + raise Exception( + "Scale fusion not supper for layer " "type {} ".format(layer_type) + ) + + scale = layers[scale_idx].scale + + # Update weights + sw = _np.array(scale.scale.floatValue) + w = _np.array(layer.weights.floatValue) + w = w.reshape(layer.outputChannels, int(len(w) / layer.outputChannels)) + wp = w * sw[:, None] + del layer.weights.floatValue[:] + layer.weights.floatValue.extend(wp.flatten()) + + # Update biases + if scale.hasBias: + sb = _np.array(scale.bias.floatValue) + if not layer.hasBias: + layer.bias.floatValue.extend(sb) + layer.hasBias = True + else: + lb = _np.array(layer.bias.floatValue) + bp = sw * lb + sb + del layer.bias.floatValue[:] + layer.bias.floatValue.extend(bp) + + # re-wire outputs and delete scale layer + print("Fused {}->{}".format(layers[layer_idx].name, layers[scale_idx].name)) + del layers[layer_idx].output[:] + layers[layer_idx].output.extend(layers[scale_idx].output) + del layers[scale_idx] + + +def _fuse_layer_with_bias_layer(layer_idx, bias_idx, layers): + layer_type = layers[layer_idx].WhichOneof("layer") + if layer_type == "convolution": + layer = layers[layer_idx].convolution + elif layer_type == "innerProduct": + layer = layers[layer_idx].innerProduct + else: + raise Exception( + "Bias fusion not supper for layer " "type {} ".format(layer_type) + ) + + bias = layers[bias_idx].bias + + bb = _np.array(bias.bias.floatValue) + if not layer.hasBias: + layer.bias.floatValue.extend(bb) + layer.hasBias = True + else: + lb = _np.array(layer.bias.floatValue) + bp = lb + bb + del layer.bias.floatValue[:] + layer.bias.floatValue.extend(bp) + + # re-wire outputs and delete bias layer + print("Fused {}->{}".format(layers[layer_idx].name, layers[bias_idx].name)) + del layers[layer_idx].output[:] + layers[layer_idx].output.extend(layers[bias_idx].output) + del layers[bias_idx] + + +def _bn_scale_fusion(bn_idx, scale_idx, layers): + bn = layers[bn_idx].batchnorm + scale = layers[scale_idx].scale + + gamma = _np.array(bn.gamma.floatValue) + beta = _np.array(bn.beta.floatValue) + sw = _np.array(scale.scale.floatValue) + + gamma = gamma * sw + beta = beta * sw + + if scale.hasBias: + sb = _np.array(scale.bias.floatValue) + beta = beta + sb + + del bn.gamma.floatValue[:] + del bn.beta.floatValue[:] + + bn.gamma.floatValue.extend(gamma) + bn.beta.floatValue.extend(beta) + + # re-wire outputs and delete scale layer + print("Fused {}->{}".format(layers[bn_idx].name, layers[scale_idx].name)) + del layers[bn_idx].output[:] + layers[bn_idx].output.extend(layers[scale_idx].output) + del layers[scale_idx] + + +def _conv_bn_fusion(conv_idx, bn_idx, layers): + conv = layers[conv_idx].convolution + bn = layers[bn_idx].batchnorm + + mean = _np.array(bn.mean.floatValue) + variance = _np.array(bn.variance.floatValue) + bn.epsilon + gamma = _np.array(bn.gamma.floatValue) + beta = _np.array(bn.beta.floatValue) + w = _np.array(conv.weights.floatValue) + + if conv.hasBias: + b = _np.array(conv.bias.floatValue) + else: + b = _np.zeros(conv.outputChannels) + + w = w.reshape(conv.outputChannels, int(len(w) / conv.outputChannels)) + wp = (gamma / _np.sqrt(variance))[:, None] * w + bp = (gamma * b / _np.sqrt(variance)) - (gamma * mean / _np.sqrt(variance)) + beta + + del conv.weights.floatValue[:] + if conv.hasBias: + del conv.bias.floatValue[:] + + conv.weights.floatValue.extend(wp.flatten()) + conv.bias.floatValue.extend(bp) + conv.hasBias = True + + print("Fused {}->{}".format(layers[conv_idx].name, layers[bn_idx].name)) + # re-wire outputs and delete batchnorm layer + del layers[conv_idx].output[:] + layers[conv_idx].output.extend(layers[bn_idx].output) + del layers[bn_idx] + + +def _get_nn_mappings(layers): + layer_map = {} + type_map = {} + output_map = {} + input_map = {} + for idx, layer in enumerate(layers): + layer_name = "{}".format(idx) + layer_map[layer_name] = {"outputs": [], "inputs": []} + layer_type = layer.WhichOneof("layer") + if layer_type not in type_map.keys(): + type_map[layer_type] = [] + type_map[layer_type].append(layer_name) + + # Add inputs and outputs for layer + for o in layer.output: + layer_map[layer_name]["outputs"].append(o) + for i in layer.input: + layer_map[layer_name]["inputs"].append(i) + + # Construct input/output graph dict + for l in layer_map.keys(): + output_map[l] = [] + input_map[l] = [] + for cl in layer_map.keys(): + if any(x in layer_map[l]["outputs"] for x in layer_map[cl]["inputs"]): + output_map[l].append(cl) + if any(x in layer_map[l]["inputs"] for x in layer_map[cl]["outputs"]): + input_map[l].append(cl) + + return type_map, output_map, input_map + + +def _optimize_nn(layers): + type_map, output_map, input_map = _get_nn_mappings(layers) + bn_layers = [] + conv_layers = [] + ip_layers = [] + bias_layers = [] + scale_layers = [] + + # Only fuse with non-instance batchnorm layers + if "batchnorm" in type_map.keys(): + for bn_layer_idx in type_map["batchnorm"]: + if not layers[int(bn_layer_idx)].batchnorm.instanceNormalization: + bn_layers.append(bn_layer_idx) + + if "convolution" in type_map.keys(): + conv_layers = type_map["convolution"] + + if "innerProduct" in type_map.keys(): + ip_layers = type_map["innerProduct"] + + if "bias" in type_map.keys(): + bias_layers = type_map["bias"] + + if "scale" in type_map.keys(): + scale_layers = type_map["scale"] + + # Convolution optimizations + for conv_idx in conv_layers: + if len(output_map[conv_idx]) != 1: + continue + output_idx = output_map[conv_idx][0] + if len(input_map[output_idx]) != 1: + continue + + # Batchnorm fusion + if output_idx in bn_layers: + _conv_bn_fusion(int(conv_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Scale fusion + if output_idx in scale_layers: + _fuse_layer_with_scale_layer(int(conv_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Bias fusion + if output_idx in bias_layers: + _fuse_layer_with_bias_layer(int(conv_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Inner Product optimizations + for ip_idx in ip_layers: + if len(output_map[ip_idx]) != 1: + continue + output_idx = output_map[ip_idx][0] + if len(input_map[output_idx]) != 1: + continue + + # Scale Fusion + if output_idx in scale_layers: + _fuse_layer_with_scale_layer(int(ip_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Bias Fusion + if output_idx in bias_layers: + _fuse_layer_with_bias_layer(int(ip_idx), int(output_idx), layers) + return _optimize_nn(layers) + + # Batchnorm optimizations + for bn_idx in bn_layers: + if len(output_map[bn_idx]) != 1: + continue + output_idx = output_map[bn_idx][0] + if len(input_map[output_idx]) != 1: + continue + + # Scale Fusion + if output_idx in scale_layers: + _bn_scale_fusion(int(bn_idx), int(output_idx), layers) + return _optimize_nn(layers) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/printer.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/printer.py new file mode 100644 index 00000000..69cb8406 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/printer.py @@ -0,0 +1,114 @@ +# Copyright (c) 2018, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from .spec_inspection_utils import (_get_feature_description_summary, + _summarize_neural_network_spec, + _summarize_neural_network_spec_code_style) + + +def _print_network_spec_parameter_info_style(mlmodel_spec, interface_only=False): + """ Print the network information summary. + Args: + mlmodel_spec : the mlmodel spec + interface_only : Shows only the input and output of the network + """ + inputs, outputs, layers_info = _summarize_neural_network_spec(mlmodel_spec) + + print("Inputs:") + for i in inputs: + name, description = i + print(" {} {}".format(name, description)) + + print("Outputs:") + for o in outputs: + name, description = o + print(" {} {}".format(name, description)) + + if layers_info is None: + print( + "\n(This MLModel is not a neural network model or does not contain any layers)" + ) + + if layers_info and not interface_only: + print("\nLayers:") + for idx, l in enumerate(layers_info): + layer_type, name, in_blobs, out_blobs, params_info = l + print("[{}] ({}) {}".format(idx, layer_type, name)) + print(" Input blobs: {}".format(in_blobs)) + print(" Output blobs: {}".format(out_blobs)) + if len(params_info) > 0: + print(" Parameters: ") + for param in params_info: + print(" {} = {}".format(param[0], param[1])) + + print("\n") + + +def _print_network_spec_coding_style(mlmodel_spec, interface_only=False): + """ + Args: + mlmodel_spec : the mlmodel spec + interface_only : Shows only the input and output of the network + """ + + inputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.input + ] + outputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.output + ] + + input_names = [] + print("Inputs:") + for i in inputs: + name, description = i + print(" {} {}".format(name, description)) + input_names.append(name) + + output_names = [] + print("Outputs:") + for o in outputs: + name, description = o + print(" {} {}".format(name, description)) + output_names.append(name) + + if interface_only: + return + + nn_spec = None + + if mlmodel_spec.HasField("neuralNetwork"): + nn_spec = mlmodel_spec.neuralNetwork + elif mlmodel_spec.HasField("neuralNetworkClassifier"): + nn_spec = mlmodel_spec.neuralNetworkClassifier + elif mlmodel_spec.HasField("neuralNetworkRegressor"): + nn_spec = mlmodel_spec.neuralNetworkRegressor + + if nn_spec is None: + print("\n(This MLModel is not a neural network model)") + return + + print("\n") + _summarize_neural_network_spec_code_style( + nn_spec, input_names=input_names, output_names=output_names + ) + + +def print_network_spec(mlmodel_spec, interface_only=False, style=""): + """ Print the network information summary. + Args: + mlmodel_spec : the mlmodel spec + interface_only : Shows only the input and output of the network + style : str. Either 'coding' or default, which prints information on parameters of layers. + """ + + if style == "coding": + _print_network_spec_coding_style(mlmodel_spec, interface_only=interface_only) + else: + _print_network_spec_parameter_info_style( + mlmodel_spec, interface_only=interface_only + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/quantization_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/quantization_utils.py new file mode 100644 index 00000000..cef3aff8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/quantization_utils.py @@ -0,0 +1,1651 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Utilities to compress Neural Network Models. +Only available in coremltools 2.0b1 and onwards +""" +from os import listdir as _listdir +from sys import stdout as _stdout + +import numpy as _np + +from coremltools import ComputeUnit as _ComputeUnit +from coremltools.models import (_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + _QUANTIZATION_MODE_DEQUANTIZE, + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + _SUPPORTED_QUANTIZATION_MODES) +from coremltools.models import MLModel as _MLModel + +from ... import (_MINIMUM_FP16_SPEC_VERSION, + _MINIMUM_QUANTIZED_MODEL_SPEC_VERSION, + _SPECIFICATION_VERSION_IOS_14) +from ..._deps import _HAS_SKLEARN as _HAS_SKLEARN +from ..utils import _get_model, _macos_version, _wp_to_fp16wp +from .optimization_utils import _optimize_nn + + +class QuantizedLayerSelector: + """ + This is the base class to implement custom selectors to skip certain + layers during quantization. To implement a custom selector, create a class + that inherits this class and override `do_quantize()` method. + + Examples + -------- + .. highlight:: python + .. code-block:: python + + class MyLayerSelector(QuantizedLayerSelector): + def __init__(self): + super().__init__() + + def do_quantize(self, layer, **kwargs): + ret = super().do_quantize(layer) + if not ret or layer.name == 'dense_2': + return False + return True + + selector = MyLayerSelector() + quantized_model = quantize_weights(mlmodel, 8, quantization_mode='linear', selector=selector) + + """ + + def __init__(self): + self.quantizable_layer_types = { + "convolution", + "innerProduct", + "embedding", + "embeddingND", + "batchnorm", + "scale", + "bias", + "loadConstant", + "simpleRecurrent", + "gru", + "uniDirectionalLSTM", + "biDirectionalLSTM", + "batchedMatmul", + "depthwiseConv", + "loop", + "branch", + } + + def do_quantize(self, layer, **kwargs): + return layer.WhichOneof("layer") in self.quantizable_layer_types + + +class AdvancedQuantizedLayerSelector(QuantizedLayerSelector): + """ Quantized layer selector allowing the user to specify some types of + layers to skip during quantization process and the minimum size parameters + in quantized convolution layers. + + Examples + -------- + .. highlight:: python + .. code-block:: python + + from coremltools.models.neural_network.quantization_utils import AdvancedQuantizedLayerSelector + selector = AdvancedQuantizedLayerSelector( + skip_layer_types=['batchnorm', 'bias', 'depthwiseConv'], + minimum_conv_kernel_channels=4, + minimum_conv_weight_count=4096) + quantized_model = quantize_weights(model, 8, selector=selector) + + """ + + def __init__( + self, + skip_layer_types=[], + minimum_conv_kernel_channels=4, + minimum_conv_weight_count=4096, + ): + + super().__init__() + self.skip_layer_types = skip_layer_types + + # Error checking + invalid_skip_types = [] + for lt in skip_layer_types: + if lt not in self.quantizable_layer_types: + invalid_skip_types.append(lt) + if len(invalid_skip_types) > 0: + err_msg = "Skip quantization layer types ({}) is not supported.\n".format( + ",".join(invalid_skip_types) + ) + err_msg += "Supported quantization layers: ({})".format( + ",".join(self.quantizable_layer_types) + ) + raise ValueError(err_msg) + + self.minimum_conv_kernel_channels = minimum_conv_kernel_channels + self.minimum_conv_weight_count = minimum_conv_weight_count + + def do_quantize(self, layer, weight_param=None): + """ weight_param - should be name of the WeightParam field + """ + ret = super().do_quantize(layer) + if not ret: + return False + + layer_type = layer.WhichOneof("layer") + if layer_type in self.skip_layer_types: + return False + + if layer_type == "convolution": + oc = layer.convolution.outputChannels + kc = layer.convolution.kernelChannels + kh = layer.convolution.kernelSize[0] + kw = layer.convolution.kernelSize[1] + groups = layer.convolution.nGroups + counts = oc * kc * kh * kw + has_bias = layer.convolution.hasBias + + if weight_param is None or weight_param == "weights": + if "depthwiseConv" in self.skip_layer_types and kc == 1 and groups > 1: + return False + + if ( + kc < self.minimum_conv_kernel_channels + or counts < self.minimum_conv_weight_count + ): + return False + + elif weight_param == "bias": + return not "bias" in self.skip_layer_types + else: + raise ValueError( + "Unrecognized quantization weight field {}".format(weight_param) + ) + + elif layer_type == "innerProduct" or "batchedMatmul": + if weight_param is None or weight_param == "weights": + return True + if weight_param == "bias": + return not "bias" in self.skip_layer_types + else: + raise ValueError( + "Unrecognized quantization weight field {}".format(weight_param) + ) + + return True + + +class MatrixMultiplyLayerSelector(QuantizedLayerSelector): + """ + Layer selector object that allows users to select matrix multiplication layers + with one of the matrices being constant, based on some criterions like total + numbers of parameters/weights, number of input or output channels and/or layer + names. If any of the criterion is not valid, the corresponding layer is not + selected. + """ + + def __init__( + self, + minimum_weight_count=1, + minimum_input_channels=1, + minimum_output_channels=1, + maximum_input_channels=None, + maximum_output_channels=None, + include_layers_with_names=None, + ): + + super().__init__() + + # weight count refers to number of parameters/weights and is equal to product of input & output channels + self.minimum_weight_count = minimum_weight_count + self.minimum_input_channels = minimum_input_channels + self.minimum_output_channels = minimum_output_channels + self.maximum_input_channels = maximum_input_channels + self.maximum_output_channels = maximum_output_channels + if include_layers_with_names is None: + self.include_layers_with_names = [] + + if not ( + isinstance(self.include_layers_with_names, (list, tuple)) + and all( + [isinstance(s, str) for s in self.include_layers_with_names] + ) + ): + raise ValueError( + "Property 'include_layers_with_names' must be a list/tuple of str objects" + ) + + def do_quantize(self, layer, weight_param=None): + """ + weight_param - should be name of the WeightParam field + """ + ret = super().do_quantize(layer) + if not ret: + return False + + layer_type = layer.WhichOneof("layer") + + if layer_type in ["innerProduct", "batchedMatmul"]: + if weight_param == "bias": + return True + elif weight_param is None or weight_param == "weights": + + if layer_type == "innerProduct": + ic = layer.innerProduct.inputChannels + oc = layer.innerProduct.outputChannels + else: + ic = layer.batchedMatmul.weightMatrixFirstDimension + oc = layer.batchedMatmul.weightMatrixSecondDimension + + wc = ic * oc + + if wc < self.minimum_weight_count: + return False + if ic < self.minimum_input_channels: + return False + if oc < self.minimum_output_channels: + return False + if self.maximum_input_channels and ic > self.maximum_input_channels: + return False + if self.maximum_output_channels and oc > self.maximum_output_channels: + return False + if ( + self.include_layers_with_names + and layer.name not in self.include_layers_with_names + ): + return False + + return True + else: + raise ValueError( + "Unrecognized quantization weight field {}".format(weight_param) + ) + + elif layer_type in ["loop", "branch"]: + return True + + return False + + +def _convert_1bit_array_to_byte_array(arr): + """ + Convert bit array to byte array. + + arr: list + Bits as a list where each element is an integer of 0 or 1 + + Returns + ------- + numpy.array + 1D numpy array of type uint8 + """ + # Padding if necessary + while len(arr) < 8 or len(arr) % 8: + arr.append(0) + + arr = _np.array(arr, dtype="uint8") + bit_arr = [] + idx = 0 + # Iterate and combine 8-bits into a uint8 + for arr_idx in range(int(len(arr) / 8)): + bit_arr.append( + ((arr[idx] << 7) & (1 << 7)) + | ((arr[idx + 1] << 6) & (1 << 6)) + | ((arr[idx + 2] << 5) & (1 << 5)) + | ((arr[idx + 3] << 4) & (1 << 4)) + | ((arr[idx + 4] << 3) & (1 << 3)) + | ((arr[idx + 5] << 2) & (1 << 2)) + | ((arr[idx + 6] << 1) & (1 << 1)) + | ((arr[idx + 7] << 0) & (1 << 0)) + ) + idx += 8 + return _np.array(bit_arr, dtype="uint8") + + +def _convert_array_to_nbit_quantized_bytes(arr, nbits): + split_arr = [] + for idx in range(len(arr)): + for i in reversed(range(nbits)): + split_arr.append((arr[idx] >> i) & (1 << 0)) + + return _convert_1bit_array_to_byte_array(split_arr) + + +def _decompose_bytes_to_bit_arr(arr): + """ + Unpack bytes to bits + + arr: list + Byte Stream, as a list of uint8 values + + Returns + ------- + bit_arr: list + Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) + """ + bit_arr = [] + for idx in range(len(arr)): + for i in reversed(range(8)): + bit_arr.append((arr[idx] >> i) & (1 << 0)) + return bit_arr + + +def _get_linear_lookup_table_and_weight(nbits, wp): + """ + Generate a linear lookup table. + + nbits: int + Number of bits to represent a quantized weight value + + wp: numpy.array + Weight blob to be quantized + + Returns + ------- + lookup_table: numpy.array + Lookup table of shape (2^nbits, ) + qw: numpy.array + Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) + """ + w = wp.reshape(1, -1) + qw, scales, biases = _quantize_channelwise_linear(w, nbits, axis=0) + indices = _np.array(range(0, 2 ** nbits)) + lookup_table = indices * scales[0] + biases[0] + return lookup_table, qw + + +def _get_kmeans_lookup_table_and_weight( + nbits, w, init="k-means++", tol=1e-2, n_init=1, rand_seed=0 +): + """ + Generate K-Means lookup table given a weight parameter field + + nbits: + Number of bits for quantization + + w: + Weight as numpy array + + Returns + ------- + lut: numpy.array + Lookup table, numpy array of shape (1 << nbits, ); + wq: numpy.array + Quantized weight of type numpy.uint8 + """ + if _HAS_SKLEARN: + from sklearn.cluster import KMeans + else: + raise ModuleNotFoundError( + "scikit-learn is required for k-means quantization." + " To install, run: \"pip install -U scikit-learn\"." + ) + units = _np.prod(w.shape) + lut_len = 1 << nbits + n_clusters = units if (units < lut_len) else lut_len + wf = w.reshape(-1, 1) + kmeans = KMeans( + n_clusters=n_clusters, init=init, tol=tol, n_init=n_init, random_state=rand_seed + ).fit(wf) + wq = kmeans.labels_[:units] + lut = _np.zeros(lut_len) + lut[:n_clusters] = kmeans.cluster_centers_.flatten() + return lut, wq + + +def _quantize_channelwise_linear(weight, nbits, axis=0, symmetric=False): + """ + Linearly quantize weight blob. + + weight: numpy.array + Weight to be quantized. + + nbits: int + Number of bits per weight element + + axis: int + Axis of the weight blob to compute channel-wise quantization, can be 0 or 1 + + symmetric: bool + If true, set quantization range to be symmetrical to 0. + Otherwise, set quantization range to be the minimum and maximum of + weight parameters. + + Returns + ------- + quantized_weight: numpy.array + quantized weight as float numpy array, with the same shape as weight + scale: numpy.array + per channel scale + bias: numpy.array + per channel bias + """ + if len(weight.shape) == 1: # vector situation, treat as 1 channel + weight = weight.reshape((1, weight.shape[0])) + + rank = len(weight.shape) + if axis == 1: + transposed_axis_order = (1, 0) + tuple(range(2, rank)) + weight = _np.transpose(weight, transposed_axis_order) + + num_channels = weight.shape[0] + shape = weight.shape + weight = weight.reshape((num_channels, -1)) # [C, L] + + a = _np.amin(weight, axis=-1) # [C,] + b = _np.amax(weight, axis=-1) # [C,] + + if symmetric: + r = _np.maximum(_np.abs(a), _np.abs(b)) + scale = r / ((1 << nbits) / 2.0 - 1) + bias = -(1 << nbits) / 2.0 * scale + num = weight - bias[:, None] + denom = scale[:, None] + qw = _np.divide( + num, denom, out=_np.zeros_like(num), where=(_np.abs(denom) > 1e-6) + ) + qw = _np.round(qw) + else: + qb = (1 << nbits) - 1 + scale = (b - a) / qb + inv_scale = _np.divide( + 1.0, scale, out=_np.zeros_like(scale), where=(_np.abs(scale) > 1e-6) + ) + bias = a + qw = (weight - a[:, None]) * inv_scale[:, None] + qw = _np.round(qw) + + # Reshape + quantized_weight = qw.reshape(shape) + if axis == 1: + quantized_weight = _np.transpose(quantized_weight, transposed_axis_order) + + return (quantized_weight, scale, bias) + + +def _quantize_wp(wp, nbits, qm, axis=0, **kwargs): + """ + Quantize the weight blob + + wp: numpy.array + Weight parameters + nbits: int + Number of bits + qm: + Quantization mode + lut_function: (``callable function``) + Python callable representing a look-up table + + Returns + ------- + scale: numpy.array + Per-channel scale + bias: numpy.array + Per-channel bias + lut: numpy.array + Lookup table + quantized_wp: numpy.array + Quantized weight of same shape as wp, with dtype numpy.uint8 + """ + + scale = bias = lut = None + + # Linear Quantization + if qm in [ + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + ]: + symmetric = qm == _QUANTIZATION_MODE_LINEAR_SYMMETRIC + qw, scale, bias = _quantize_channelwise_linear(wp, nbits, axis, symmetric) + # Lookup tables + elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS: + lut, qw = _get_kmeans_lookup_table_and_weight(nbits, wp) + elif qm == _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE: + if "lut_function" not in kwargs.keys(): + raise Exception( + "Custom lookup table quantization mode " + "selected but no lookup table function passed" + ) + lut_function = kwargs["lut_function"] + if not callable(lut_function): + raise Exception( + "Argument for Lookup Table passed in but is " "not callable" + ) + try: + lut, qw = lut_function(nbits, wp) + except Exception as e: + raise Exception( + "{}\nCall to Lookup Table function failed".format(e.message) + ) + elif qm == _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR: + lut, qw = _get_linear_lookup_table_and_weight(nbits, wp) + else: + raise NotImplementedError('Quantization method "{}" not supported'.format(qm)) + + quantized_wp = _np.uint8(qw) + return scale, bias, lut, quantized_wp + + +def _quantize_wp_field(wp, nbits, qm, shape, axis=0, **kwargs): + """ + Quantize WeightParam field in Neural Network Protobuf + + wp: MLModel.NeuralNetwork.WeightParam + WeightParam field + nbits: int + Number of bits to be quantized + qm: str + Quantization mode + shape: tuple + Tensor shape held by wp + axis: int + Axis over which quantization is performed on, can be either 0 or 1 + lut_function: (``callable function``) + Python callable representing a LUT table function + """ + + # De-quantization + if qm == _QUANTIZATION_MODE_DEQUANTIZE: + return _dequantize_wp(wp, shape, axis) + + # If the float32 field is empty do nothing and return + if len(wp.floatValue) == 0: + return + + # Half precision (16-bit) quantization + if nbits == 16: + return _wp_to_fp16wp(wp) + + if nbits > 8: + raise Exception("Only 8-bit and lower quantization is supported") + + if qm not in _SUPPORTED_QUANTIZATION_MODES: + raise Exception("Quantization mode {} not supported".format(qm)) + + # axis parameter check + if axis == 1 and len(shape) != 4: + raise Exception( + "Quantization on second axis is only supported " "for rank-4 weight blob." + ) + if axis != 0 and axis != 1: + raise Exception( + "Invalid quantization axis {} passed in. Allowed" + "values are 0 (first axis) and 1 (second axis)".format(axis) + ) + + # WeightParam size check - non-linear quantizations are applied on layer level + num_channels = ( + shape[axis] + if qm + in [_QUANTIZATION_MODE_LINEAR_QUANTIZATION, _QUANTIZATION_MODE_LINEAR_SYMMETRIC] + else 1 + ) + if len(wp.floatValue) % num_channels: + raise Exception( + "Number of quantization channels does not divide evenly into weights" + ) + + qparams = wp.quantization + qparams.numberOfBits = nbits + + weights = _np.array(wp.floatValue).reshape(shape) + scale, bias, lut, uint8_weights = _quantize_wp(weights, nbits, qm, axis, **kwargs) + uint8_weights = uint8_weights.flatten() + if qm in [ + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + ]: + qparams.linearQuantization.scale.extend(scale) + qparams.linearQuantization.bias.extend(bias) + else: + qparams.lookupTableQuantization.floatValue.extend(lut) + + wp.rawValue = bytes() + if nbits == 8: + wp.rawValue += uint8_weights.tobytes() + else: + wp.rawValue += _convert_array_to_nbit_quantized_bytes( + uint8_weights, nbits + ).tobytes() + del wp.floatValue[:] + + +def _unpack_to_bytes(byte_arr, num_weights, nbits): + assert num_weights % 1 == 0 + num_weights = int(num_weights) + bit_arr = _decompose_bytes_to_bit_arr(byte_arr.flatten().tolist()) + bit_arr = _np.array(bit_arr[: num_weights * nbits]).reshape((num_weights, nbits)) + expo = 2 ** _np.array(list(reversed(range(0, nbits)))) + byte_arr = _np.sum(bit_arr * expo, axis=1) + return byte_arr + + +def _dequantize_linear(weight_8bit, scale, bias, axis=0): + if len(weight_8bit.shape) == 1: # vector situation, treat as 1 channel + weight_8bit = weight_8bit.reshape((1, weight_8bit.shape[0])) + + rank = len(weight_8bit.shape) + if axis == 1: + transposed_axis_order = (1, 0) + tuple(range(2, rank)) + weight_8bit = _np.transpose(weight_8bit, transposed_axis_order) + + num_channels = weight_8bit.shape[0] + broadcast_shape = (num_channels,) + (1,) * (rank - 1) + scale = scale.reshape(broadcast_shape) + bias = bias.reshape(broadcast_shape) + weight = weight_8bit.astype("float") * scale + bias + if axis == 1: + weight = _np.transpose(weight, transposed_axis_order) + + return weight + + +def _dequantize_lut(weight_8bit, lut): + return lut[weight_8bit.astype("uint8")] + + +def _dequantize_wp(wp, shape, axis=0): + if len(wp.floatValue) != 0: + return + + is_linear = wp.quantization.WhichOneof("QuantizationType") == "linearQuantization" + if is_linear: + if len(wp.quantization.linearQuantization.scale) != len( + wp.quantization.linearQuantization.bias + ): + raise Exception( + "Linear quantization scale and bias vectors are " "different lengths" + ) + + # axis parameter check + if axis == 1 and len(shape) != 4: + raise Exception( + "Dequantization on second axis is only supported " "for rank-4 weight blob." + ) + if axis != 0 and axis != 1: + raise Exception( + "Invalid quantization axis {} passed in. Allowed" + "values are 0 (first axis) and 1 (second axis)".format(axis) + ) + + nbits = wp.quantization.numberOfBits + num_weights = _np.prod(shape) + byte_arr = _np.frombuffer(wp.rawValue, dtype=_np.uint8) + + weight_8bit = ( + byte_arr if nbits == 8 else _unpack_to_bytes(byte_arr, num_weights, nbits) + ) + weight_8bit = weight_8bit.reshape(shape) + + if is_linear: + scale = _np.array(wp.quantization.linearQuantization.scale) + bias = _np.array(wp.quantization.linearQuantization.bias) + dequantized_weight = _dequantize_linear(weight_8bit, scale, bias, axis) + else: + lut = _np.array(wp.quantization.lookupTableQuantization.floatValue) + dequantized_weight = _dequantize_lut(weight_8bit, lut) + + wp.rawValue = bytes() + wp.quantization.Clear() + wp.floatValue.extend(dequantized_weight.flatten()) + + +def _dequantize_nn_spec(spec): + """ + Dequantize weights in NeuralNetwork type mlmodel specifications. + """ + _quantize_nn_spec(spec, None, _QUANTIZATION_MODE_DEQUANTIZE) + + +def _quantize_nn_spec(nn_spec, nbits, qm, **kwargs): + """ + Quantize weights in NeuralNetwork type mlmodel specifications. + """ + selector = kwargs.get("selector", QuantizedLayerSelector()) + + if qm not in _SUPPORTED_QUANTIZATION_MODES: + raise Exception("Quantization mode {} not supported".format(qm)) + + if qm != _QUANTIZATION_MODE_DEQUANTIZE: + if nbits is None: + raise Exception('Missing argument "nbits"') + if not (nbits > 0 and nbits <= 8 or nbits == 16): + raise Exception( + "Only half precision (16-bit), 1 to 8-bit " "quantization is supported" + ) + + if qm == _QUANTIZATION_MODE_LINEAR_SYMMETRIC and nbits != 8: + raise Exception("Symmetric quantization is only applicable for 8 bit" "linear") + + layers = nn_spec.layers + + # Perform optimization step + if nbits is not None and nbits < 16 and qm != _QUANTIZATION_MODE_DEQUANTIZE: + print("Optimizing Neural Network before Quantization:") + _optimize_nn(layers) + print("Finished optimizing network. Quantizing neural network..") + + # Quantize each layer + for layer in layers: + layer_type = layer.WhichOneof("layer") + if not selector.do_quantize(layer): + continue + print("Quantizing layer {} of type {}".format(layer.name, layer_type)) + + # Convolution + if layer_type == "convolution": + output_channels = layer.convolution.outputChannels + kernel_channels = layer.convolution.kernelChannels + kernel_height = layer.convolution.kernelSize[0] + kernel_width = layer.convolution.kernelSize[1] + groups = layer.convolution.nGroups + counts = output_channels * kernel_channels * kernel_height * kernel_width + has_bias = layer.convolution.hasBias + if layer.convolution.isDeconvolution: + shape = ( + kernel_channels, + int(output_channels / groups), + kernel_height, + kernel_width, + ) + _quantize_wp_field( + layer.convolution.weights, nbits, qm, shape, axis=1, **kwargs + ) + else: + shape = (output_channels, kernel_channels, kernel_height, kernel_width) + _quantize_wp_field( + layer.convolution.weights, nbits, qm, shape, **kwargs + ) + + if has_bias and selector.do_quantize(layer, weight_param="bias"): + _quantize_wp_field( + layer.convolution.bias, + nbits, + qm, + shape=(output_channels,), + **kwargs + ) + + # Batchnorm + elif layer_type == "batchnorm": + nw = layer.batchnorm.channels + _quantize_wp_field(layer.batchnorm.gamma, nbits, qm, shape=(nw,), **kwargs) + _quantize_wp_field(layer.batchnorm.beta, nbits, qm, shape=(nw,), **kwargs) + _quantize_wp_field(layer.batchnorm.mean, nbits, qm, shape=(nw,), **kwargs) + _quantize_wp_field( + layer.batchnorm.variance, nbits, qm, shape=(nw,), **kwargs + ) + + # InnerProduct + elif layer_type == "innerProduct": + output_channels = layer.innerProduct.outputChannels + input_channels = layer.innerProduct.inputChannels + _quantize_wp_field( + layer.innerProduct.weights, + nbits, + qm, + shape=(output_channels, input_channels), + **kwargs + ) + has_bias = layer.innerProduct.hasBias + if has_bias and selector.do_quantize(layer, weight_param="bias"): + _quantize_wp_field( + layer.innerProduct.bias, + nbits, + qm, + shape=(output_channels,), + **kwargs + ) + + # BatchedMatmul + elif layer_type == "batchedMatmul": + x1 = layer.batchedMatmul.weightMatrixFirstDimension + x2 = layer.batchedMatmul.weightMatrixSecondDimension + _quantize_wp_field( + layer.batchedMatmul.weights, nbits, qm, shape=(x2, x1), **kwargs + ) + has_bias = layer.batchedMatmul.hasBias + if has_bias and selector.do_quantize(layer, weight_param="bias"): + _quantize_wp_field( + layer.batchedMatmul.bias, nbits, qm, shape=(x2,), **kwargs + ) + + # Embedding layer + elif layer_type == "embedding": + output_channels = layer.embedding.outputChannels + input_channels = layer.embedding.inputDim + _quantize_wp_field( + layer.embedding.weights, + nbits, + qm, + shape=(output_channels, input_channels), + **kwargs + ) + if layer.embedding.hasBias: + _quantize_wp_field( + layer.embedding.bias, nbits, qm, shape=(output_channels,), **kwargs + ) + + # Embedding ND layer + elif layer_type == "embeddingND": + output_channels = layer.embeddingND.embeddingSize + input_channels = layer.embeddingND.vocabSize + _quantize_wp_field( + layer.embeddingND.weights, + nbits, + qm, + shape=(output_channels, input_channels), + **kwargs + ) + if layer.embeddingND.hasBias: + _quantize_wp_field( + layer.embeddingND.bias, + nbits, + qm, + shape=(output_channels,), + **kwargs + ) + + # Scale layer + elif layer_type == "scale": + nw = _np.prod(layer.scale.shapeScale) + _quantize_wp_field(layer.scale.scale, nbits, qm, shape=(nw,), **kwargs) + if layer.scale.hasBias: + nw = _np.prod(layer.scale.shapeBias) + _quantize_wp_field(layer.scale.bias, nbits, qm, shape=(nw,), **kwargs) + + # Bias layer + elif layer_type == "bias": + nw = _np.prod(layer.bias.shape) + _quantize_wp_field(layer.bias.bias, nbits, qm, shape=(nw,), **kwargs) + + # LoadConstant layer + elif layer_type == "loadConstant": + nw = _np.prod(layer.loadConstant.shape) + _quantize_wp_field( + layer.loadConstant.data, nbits, qm, shape=(nw,), **kwargs + ) + + # Simple Recurrent + elif layer_type == "simpleRecurrent": + i_size = layer.simpleRecurrent.inputVectorSize + o_size = layer.simpleRecurrent.outputVectorSize + _quantize_wp_field( + layer.simpleRecurrent.weightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + layer.simpleRecurrent.recursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + if layer.simpleRecurrent.hasBiasVector: + _quantize_wp_field( + layer.simpleRecurrent.biasVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + + # GRU + elif layer_type == "gru": + i_size = layer.gru.inputVectorSize + o_size = layer.gru.outputVectorSize + # Weight Matrix + _quantize_wp_field( + layer.gru.updateGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.resetGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.outputGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + # Recursion Weights + _quantize_wp_field( + layer.gru.updateGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.resetGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + layer.gru.outputGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + # Bias + if layer.gru.hasBiasVectors: + _quantize_wp_field( + layer.gru.updateGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + layer.gru.resetGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + layer.gru.outputGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + + # LSTM Layers + elif layer_type in ["uniDirectionalLSTM", "biDirectionalLSTM"]: + + def _lstmwp_to_fp16_lstmwp( + lstm_wp, nbits, qm, i_size, o_size, has_peephole=True + ): + assert lstm_wp + _quantize_wp_field( + lstm_wp.inputGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.blockInputWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGateWeightMatrix, + nbits, + qm, + shape=(o_size, i_size), + **kwargs + ) + + _quantize_wp_field( + lstm_wp.inputGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.blockInputRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGateRecursionMatrix, + nbits, + qm, + shape=(o_size, o_size), + **kwargs + ) + + _quantize_wp_field( + lstm_wp.inputGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + lstm_wp.blockInputBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGateBiasVector, nbits, qm, shape=(o_size,), **kwargs + ) + + if has_peephole: + _quantize_wp_field( + lstm_wp.inputGatePeepholeVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + _quantize_wp_field( + lstm_wp.forgetGatePeepholeVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + _quantize_wp_field( + lstm_wp.outputGatePeepholeVector, + nbits, + qm, + shape=(o_size,), + **kwargs + ) + + if layer_type == "uniDirectionalLSTM": + _lstmwp_to_fp16_lstmwp( + lstm_wp=layer.uniDirectionalLSTM.weightParams, + nbits=nbits, + qm=qm, + i_size=layer.uniDirectionalLSTM.inputVectorSize, + o_size=layer.uniDirectionalLSTM.outputVectorSize, + has_peephole=layer.uniDirectionalLSTM.params.hasPeepholeVectors, + ) + + elif layer_type == "biDirectionalLSTM": + for lstm_wp in layer.biDirectionalLSTM.weightParams: + _lstmwp_to_fp16_lstmwp( + lstm_wp=lstm_wp, + nbits=nbits, + qm=qm, + i_size=layer.biDirectionalLSTM.inputVectorSize, + o_size=layer.biDirectionalLSTM.outputVectorSize, + has_peephole=layer.biDirectionalLSTM.params.hasPeepholeVectors, + ) + + elif layer_type == "custom": + print( + "Skipping custom layer {}. Weights for this layer need to" + "be converted manually".format(layer.name) + ) + elif layer_type == "branch": + _quantize_nn_spec(layer.branch.ifBranch, nbits, qm, **kwargs) + _quantize_nn_spec(layer.branch.elseBranch, nbits, qm, **kwargs) + elif layer_type == "loop": + _quantize_nn_spec(layer.loop.conditionNetwork, nbits, qm, **kwargs) + _quantize_nn_spec(layer.loop.bodyNetwork, nbits, qm, **kwargs) + else: + raise Exception("Unknown layer " + layer_type + " to be quantized") + + +def _quantize_spec_weights(spec, nbits, quantization_mode, **kwargs): + nn_model_types = [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ] + + model_type = spec.WhichOneof("Type") + + # Neural network models + if model_type in nn_model_types: + # Bump up to appropriate spec version if required + if nbits == 16: + spec.specificationVersion = max( + _MINIMUM_FP16_SPEC_VERSION, spec.specificationVersion + ) + else: + spec.specificationVersion = max( + _MINIMUM_QUANTIZED_MODEL_SPEC_VERSION, spec.specificationVersion + ) + + if spec.WhichOneof("Type") == "neuralNetwork": + _quantize_nn_spec(spec.neuralNetwork, nbits, quantization_mode, **kwargs) + + elif spec.WhichOneof("Type") in "neuralNetworkClassifier": + _quantize_nn_spec( + spec.neuralNetworkClassifier, nbits, quantization_mode, **kwargs + ) + + elif spec.WhichOneof("Type") in "neuralNetworkRegressor": + _quantize_nn_spec( + spec.neuralNetworkRegressor, nbits, quantization_mode, **kwargs + ) + + # Recursively convert all pipeline models + elif spec.WhichOneof("Type") == "pipeline": + for model_spec in spec.pipeline.models: + _quantize_spec_weights(model_spec, nbits, quantization_mode, **kwargs) + + elif spec.WhichOneof("Type") in ["pipelineClassifier", "pipelineRegressor"]: + _quantize_spec_weights(spec.pipeline, nbits, quantization_mode, **kwargs) + + return spec + + +def _load_and_resize_image(image_path, size): + from PIL import Image + + img = Image.open(image_path) + return img.resize(size, Image.ANTIALIAS) + + +class TopKMetrics: + def __init__(self, topk): + self._topk = topk + self._correct_count = 0 + self._total_count = 0 + + def add_metric(self, output1, output2): + self._total_count += 1 + if self._topk == 1: + if output1 == output2: + self._correct_count += 1 + else: + self._topk = min(len(output1.keys()), self._topk) + out1_topk = sorted(output1, key=output1.get, reverse=True)[: self._topk] + out2_topk = sorted(output2, key=output2.get, reverse=True)[: self._topk] + if out1_topk[0] in out2_topk: + self._correct_count += 1 + + def display_metrics(self): + pcorrect = (float(self._correct_count) / float(self._total_count)) * 100 + pcorrect = _np.round(pcorrect, decimals=2) + if self._topk == 1: + print("Top 1 Agreement: {}%\n".format(pcorrect)) + else: + print("Top {} Agreement: {}%\n".format(self._topk, pcorrect)) + + +class NoiseMetrics: + def __init__(self): + self._snr = [] + self._psnr = [] + + @staticmethod + def _compute_snr(arr1, arr2): + noise = arr1 - arr2 + noise_var = _np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = _np.sum(arr2 ** 2) / len(arr2) + max_signal_energy = _np.amax(arr2 ** 2) + snr = 10 * _np.log10(signal_energy / noise_var) + psnr = 10 * _np.log10(max_signal_energy / noise_var) + return snr, psnr + + def add_metric(self, output1, output2): + import PIL + + # Output is Image + if isinstance(output1, PIL.Image.Image): + if output1.mode == "RGBA": + output1 = output1.convert("RGB") + output2 = output2.convert("RGB") + arr1 = _np.array(output1).flatten() + arr2 = _np.array(output2).flatten() + snr, psnr = self._compute_snr(arr1, arr2) + self._snr.append(snr) + self._psnr.append(psnr) + + # Output is multiArray + else: + arr1 = output1.flatten() + arr2 = output2.flatten() + snr, psnr = self._compute_snr(arr1, arr2) + self._snr.append(snr) + self._psnr.append(psnr) + + def display_metrics(self): + print("SNR: {} +/- {}".format(_np.mean(self._snr), _np.var(self._snr))) + print("PSNR: {} +/- {}\n".format(_np.mean(self._psnr), _np.var(self._psnr))) + + +class OutputMetric: + """ + Utility class to calculate and hold metrics between + two model outputs + """ + + def __init__(self, name, type): + self.name = name + self._metrics = [] + + if type == "stringType": + self._metrics.append(TopKMetrics(topk=1)) + + elif type == "dictionaryType": + self._metrics.append(TopKMetrics(topk=5)) + + elif type == "imageType" or type == "multiArrayType": + self._metrics.append(NoiseMetrics()) + + else: + raise Exception( + """Unable to determine which metric to + compute for output: {}""".format( + name + ) + ) + + def add_metric(self, output1, output2): + for metric in self._metrics: + metric.add_metric(output1, output2) + + def display_metrics(self): + for metric in self._metrics: + metric.display_metrics() + + +class ModelMetrics: + """ + A utility class to hold evaluation metrics + """ + + def __init__(self, spec): + self.model_metrics = {} + for output in spec.description.output: + output_type = output.type.WhichOneof("Type") + self.model_metrics[output.name] = OutputMetric(output.name, output_type) + + def add_metrics(self, model1_output, model2_output): + outputs = model1_output.keys() + for output in outputs: + self.model_metrics[output].add_metric( + model1_output[output], model2_output[output] + ) + + def display_metrics(self): + for metric in self.model_metrics: + print("Output {}:".format(metric)) + dash = "----------" + for x in range(0, len(metric)): + dash += "-" + print(dash) + self.model_metrics[metric].display_metrics() + + +def _characterize_qmodel_perf_with_data_dir(fpmodel, qspec, data_dir): + supported_image_exts = ["jpg", "bmp", "png", "jpeg"] + test_image_paths = [ + "{}/{}".format(data_dir, fn) + for fn in _listdir(data_dir) + if any(fn.endswith(ext) for ext in supported_image_exts) + ] + + if not test_image_paths: + raise Exception( + "{} contains no supported image files. " + "Supported file types include jpg, bmp, png and jpeg.".format( + data_dir + ) + ) + + qmodel = _get_model(qspec, compute_units=_ComputeUnit.CPU_ONLY) + model_metrics = ModelMetrics(qspec) + + input_name = qspec.description.input[0].name + input_size = ( + qspec.description.input[0].type.imageType.width, + qspec.description.input[0].type.imageType.height, + ) + + print("\n\n") + print("Analyzing {} images".format(len(test_image_paths))) + print("Running Analysis this may take a while ...") + print("\n") + + analyzed = 0 + tried = 0 + if fpmodel.compute_unit != _ComputeUnit.CPU_ONLY: + fpmodel = _MLModel(fpmodel.get_spec(), compute_units=_ComputeUnit.CPU_ONLY) + for image in test_image_paths: + try: + input = {input_name: _load_and_resize_image(image, input_size)} + fp_pred = fpmodel.predict(input) + q_pred = qmodel.predict(input) + analyzed += 1 + model_metrics.add_metrics(fp_pred, q_pred) + + except Exception as e: + print(e) + continue + + # Update Progress + tried += 1 + if tried % 10 == 0: + _stdout.write("\r") + _stdout.write("Analyzed {}/{}".format(tried, len(test_image_paths))) + _stdout.flush() + + print("\n") + model_metrics.display_metrics() + + +def _characterize_quantized_model_perf(fpmodel, qspec, sample_data): + qmodel = _get_model(qspec) + model_metrics = ModelMetrics(qspec) + + print("\n\n") + print("Analyzing {} samples".format(len(sample_data))) + print("Running Analysis this may take a while ...") + print("\n") + + analyzed = 0 + tried = 0 + fpmodel = _MLModel(fpmodel.get_spec(), compute_units=_ComputeUnit.CPU_ONLY) + qmodel = _MLModel(qmodel.get_spec(), compute_units=_ComputeUnit.CPU_ONLY) + for data in sample_data: + try: + fp_pred = fpmodel.predict(data) + q_pred = qmodel.predict(data) + analyzed += 1 + model_metrics.add_metrics(fp_pred, q_pred) + + except Exception as e: + print(e) + continue + + # Update Progress + tried += 1 + if tried % 10 == 0: + _stdout.write("\r") + _stdout.write("Analyzed {}/{}".format(tried, len(sample_data))) + _stdout.flush() + + print("\n") + model_metrics.display_metrics() + + +def compare_models(full_precision_model, quantized_model, sample_data): + """ + Utility function to compare the performance of a full precision vs quantized model + + full_precision_model: MLModel + The full precision model with float32 weights + + quantized_model: MLModel + Quantized version of the model with quantized weights + + sample_data: str | [dict] + Data used to characterize performance of the quantized model in + comparison to the full precision model. Either a list of sample input + dictionaries or an absolute path to a directory containing images. + Path to a directory containing images is only valid for models with + one image input. For all other models a list of sample inputs must be + provided. + + :return: + None. Performance metrics are printed out + """ + emessage = """ + Invalid sample data provided. Only a list of dictionaries + containing sample data or path to a folder containing images is + supported""" + + spec = full_precision_model.get_spec() + num_inputs = len(spec.description.input) + if isinstance(sample_data, str): + input_type = spec.description.input[0].type.WhichOneof("Type") + if num_inputs != 1 or input_type != "imageType": + raise Exception( + """Unable to analyze quantized models. Sample data + was a path to a directory which is only supported with models with + one image type input. Please try passing in a list of sample inputs + as sample data. + """ + ) + _characterize_qmodel_perf_with_data_dir( + full_precision_model, quantized_model.get_spec(), sample_data + ) + + elif isinstance(sample_data, list): + if not all(type(d) is dict for d in sample_data): + raise Exception(emessage) + _characterize_quantized_model_perf( + full_precision_model, quantized_model.get_spec(), sample_data + ) + + else: + raise Exception(emessage) + + +def activate_int8_int8_matrix_multiplications(spec, selector=None): + """ + Utility function that takes in either a full precision (float) spec or + an nbit quantized spec to selectively enable int8 activation + weight quantization + of matrix multiplication operations where the second matrix represents a constant weight. + + spec: MLModel.get_spec() + Currently conversion for only neural network models is supported. + If a pipeline model is passed in then all embedded neural network models embedded within + will be modified. + + selector: (optional) MatrixMultiplyLayerSelector + A MatrixMultiplyLayerSelector object that enables int8 activation + weight quantization + only on those layers for which the user-specified criterion on the minimum/maximum number + of size/channels in constant weight parameters is met. + It can also be derived to provide custom selection. + + """ + + # Recursively convert all pipeline models + if spec.WhichOneof("Type") == "pipeline": + for model_spec in spec.pipeline.models: + activate_int8_int8_matrix_multiplications(model_spec, selector=selector) + return spec + + elif spec.WhichOneof("Type") in ["pipelineClassifier", "pipelineRegressor"]: + activate_int8_int8_matrix_multiplications(spec.pipeline, selector=selector) + return spec + + # Neural network models + elif spec.WhichOneof("Type") in [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ]: + + if selector is None: + selector = MatrixMultiplyLayerSelector() + + # Dequantize all the selected matrix multiplication layers + spec = _quantize_spec_weights( + spec, + nbits=None, + quantization_mode=_QUANTIZATION_MODE_DEQUANTIZE, + selector=selector, + ) + + def _quantized_weight_and_scale(W): + W_max = max(_np.abs(_np.min(W)), _np.abs(_np.max(W))) + W_normalized = W / W_max # [-1,1] + W_quantized_int8 = 127.0 * W_normalized # [-127, 127] + W_quantized_int8 = W_quantized_int8.astype(_np.int8) + quant_scale = W_max / 127.0 + return W_quantized_int8, quant_scale + + if spec.WhichOneof("Type") == "neuralNetwork": + nn_spec = spec.neuralNetwork + + elif spec.WhichOneof("Type") in "neuralNetworkClassifier": + nn_spec = spec.neuralNetworkClassifier + + elif spec.WhichOneof("Type") in "neuralNetworkRegressor": + nn_spec = spec.neuralNetworkRegressor + + def _process_nn_layers(nn_spec): + layers = nn_spec.layers + + # Replacing each matrix multiplication + for layer in layers: + layer_type = layer.WhichOneof("layer") + if not selector.do_quantize(layer): + continue + + if layer_type == "branch": + _process_nn_layers(layer.branch.ifBranch) + _process_nn_layers(layer.branch.elseBranch) + + elif layer_type == "loop": + _process_nn_layers(layer.loop.conditionNetwork) + _process_nn_layers(layer.loop.bodyNetwork) + + elif layer_type in ["innerProduct", "batchedMatmul"]: + # Bump up to appropriate spec version if at least one replacement occurs + spec.specificationVersion = max( + _SPECIFICATION_VERSION_IOS_14, spec.specificationVersion, + ) + + # InnerProduct + if layer_type == "innerProduct": + matmul_layer = layer.innerProduct + + # BatchedMatmul + elif layer_type == "batchedMatmul": + matmul_layer = layer.batchedMatmul + + wp = matmul_layer.weights + + if len(wp.floatValue) == 0: + continue + else: + qw, qs = _quantized_weight_and_scale(wp.floatValue) + + print( + "Modifying layer {} with size of weights {}, to use Int8 * Int8 matrix multiplication".format( + layer.name, qw.size + ) + ) + + matmul_layer.int8DynamicQuantize = True + wp.quantization.numberOfBits = 8 + wp.quantization.linearQuantization.scale.extend(map(float, [qs])) + wp.int8RawValue = bytes() + wp.int8RawValue += qw.tobytes() + del wp.floatValue[:] + + _process_nn_layers(nn_spec) + + return spec + + else: + raise ValueError("Model Type {} not supported.".format(spec.WhichOneof("Type"))) + + +def quantize_weights( + full_precision_model, nbits, quantization_mode="linear", sample_data=None, **kwargs +): + """ + Utility function to convert a full precision (float) MLModel to a + nbit quantized MLModel (float16). + + full_precision_model: MLModel + Model which will be converted to half precision. Currently conversion + for only neural network models is supported. If a pipeline model is + passed in then all embedded neural network models embedded within + will be converted. + + nbits: int + Number of bits per quantized weight. Only 16-bit float point and + 1-8 bit is supported + + quantization_mode: str + One of the following: + + "linear": + Linear quantization with scale and bias assuming the range of weight + values is [A, B], where A = min(weight), B = max(weight) + "linear_lut": + Simple linear quantization represented as a lookup table + "kmeans_lut": + LUT based quantization, where LUT is generated by K-Means clustering + "custom_lut": + LUT quantization where LUT and quantized weight params are + calculated using a custom function. If this mode is selected then + a custom function must be passed in kwargs with key lut_function. + The function must have input params (nbits, wp) where nbits is the + number of quantization bits and wp is the list of weights for a + given layer. The function should return two parameters (lut, qw) + where lut is an array of length (2^n bits)containing LUT values and + qw is the list of quantized weight parameters. See + ``_get_linear_lookup_table_and_weight`` for a sample implementation. + "linear_symmetric": + Linear quantization with scale and bias assuming the range of weight + values is [-A, A], where A = max(abs(weight)). + + sample_data: str | [dict] + Data used to characterize performance of the quantized model in + comparison to the full precision model. Either a list of sample input + dictionaries or an absolute path to a directory containing images. + Path to a directory containing images is only valid for models with + one image input. For all other models a list of sample inputs must be + provided. + + kwargs: keyword arguments + *lut_function* : (``callable function``) + A callable function provided when quantization mode is set to + ``_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE``. See ``quantization_mode`` + for more details. + *selector*: QuantizedLayerSelector + A QuanatizedLayerSelector object that can be derived to provide + custom quantization selection. + + Returns + ------- + model: MLModel + The quantized MLModel instance if running on macOS 10.14 or later, + otherwise the quantized model specification is returned + + Examples + -------- + .. sourcecode:: python + + >>> import coremltools + >>> from coremltools.models.neural_network import quantization_utils + >>> model = coremltools.models.MLModel('my_model.mlmodel') + >>> quantized_model = quantization_utils.quantize_weights(model, 8, "linear") + """ + + qmode_mapping = { + "linear": _QUANTIZATION_MODE_LINEAR_QUANTIZATION, + "kmeans": _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + "kmeans_lut": _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS, + "linear_lut": _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR, + "custom_lut": _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE, + "dequantization": _QUANTIZATION_MODE_DEQUANTIZE, + "linear_symmetric": _QUANTIZATION_MODE_LINEAR_SYMMETRIC, + } + try: + qmode = qmode_mapping[quantization_mode] + except KeyError: + # kmeans is deprecated. Instead kmeans_lut is used. No need to show it. + del qmode_mapping["kmeans"] + raise Exception( + "Invalid quantization mode. Quantization mode must be " + "one of {}".format(qmode_mapping) + ) + + print("Quantizing using {} quantization".format(quantization_mode)) + spec = full_precision_model.get_spec() + if nbits == 16 and spec.isUpdatable: + raise Exception("updatable models cannot get quantized to FP16.") + + qspec = _quantize_spec_weights(spec, nbits, qmode, **kwargs) + quantized_model = _get_model(qspec, compute_units=full_precision_model.compute_unit) + + if _macos_version() >= (10, 14) and sample_data: + compare_models(full_precision_model, quantized_model, sample_data) + + return quantized_model diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/spec_inspection_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/spec_inspection_utils.py new file mode 100644 index 00000000..52d481dd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/spec_inspection_utils.py @@ -0,0 +1,297 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +from ...proto import NeuralNetwork_pb2 as _NeuralNetwork_pb2 + + +def _get_weight_param_summary(wp): + """Get a summary of _NeuralNetwork_pb2.WeightParams + Args: + wp : _NeuralNetwork_pb2.WeightParams - the _NeuralNetwork_pb2.WeightParams message to display + Returns: + a str summary for wp + """ + summary_str = "" + if wp.HasField("quantization"): + nbits = wp.quantization.numberOfBits + quant_type = ( + "linearly" + if wp.quantization.HasField("linearQuantization") + else "lookup-table" + ) + summary_str += "{}-bit {} quantized".format(nbits, quant_type) + + if len(wp.floatValue) > 0: + summary_str += "({} floatValues)".format(len(wp.floatValue)) + if len(wp.float16Value) > 0: + summary_str += "({} bytes float16Values)".format(len(wp.float16Value)) + if len(wp.rawValue) > 0: + summary_str += "({} bytes rawValues)".format(len(wp.rawValue)) + + return summary_str + + +def _get_lstm_weight_param_summary(lstm_wp): + weight_name_list = [ + "W_i", + "W_f", + "W_z", + "W_o", + "H_i", + "H_f", + "H_z", + "H_o", + "b_i", + "b_f", + "b_z", + "b_o", + "p_i", + "p_f", + "p_o", + ] + wp_summary_list = [ + _get_weight_param_summary(lstm_wp.inputGateWeightMatrix), + _get_weight_param_summary(lstm_wp.forgetGateWeightMatrix), + _get_weight_param_summary(lstm_wp.blockInputWeightMatrix), + _get_weight_param_summary(lstm_wp.outputGateWeightMatrix), + _get_weight_param_summary(lstm_wp.inputGateRecursionMatrix), + _get_weight_param_summary(lstm_wp.forgetGateRecursionMatrix), + _get_weight_param_summary(lstm_wp.blockInputRecursionMatrix), + _get_weight_param_summary(lstm_wp.outputGateRecursionMatrix), + _get_weight_param_summary(lstm_wp.inputGateBiasVector), + _get_weight_param_summary(lstm_wp.forgetGateBiasVector), + _get_weight_param_summary(lstm_wp.blockInputBiasVector), + _get_weight_param_summary(lstm_wp.outputGateBiasVector), + _get_weight_param_summary(lstm_wp.inputGatePeepholeVector), + _get_weight_param_summary(lstm_wp.forgetGatePeepholeVector), + _get_weight_param_summary(lstm_wp.outputGatePeepholeVector), + ] + lstm_wp_summary_list = [] + for idx, summary in enumerate(wp_summary_list): + if len(summary) > 0: + lstm_wp_summary_list.append(weight_name_list[idx] + ", " + summary) + + return ("\n" + " " * 8).join(lstm_wp_summary_list) + + +def _get_feature_description_summary(feature): + if feature.type.HasField("multiArrayType"): + shape = list(feature.type.multiArrayType.shape) + int_shape = [int(x) for x in shape] + return str(int_shape) + else: + return ("({})".format(str(feature.type))).replace("\n", "") + + +def _summarize_network_layer_info(layer): + """ + Args: + layer - an MLModel NeuralNetwork Layer protobuf message + Returns: + layer_type : str - type of layer + layer_name : str - name of the layer + layer_inputs : list[str] - a list of strings representing input blobs of the layer + layer_outputs : list[str] - a list of strings representing output blobs of the layer + layer_field_content : list[(str, str)] - a list of two-tuple of (parameter_name, content) + """ + layer_type_str = layer.WhichOneof("layer") + + layer_name = layer.name + layer_inputs = list(layer.input) + layer_outputs = list(layer.output) + + typed_layer = getattr(layer, layer_type_str) + layer_field_names = [l.name for l in typed_layer.DESCRIPTOR.fields] + layer_field_content = [] + + for name in layer_field_names: + field = getattr(typed_layer, name) + summary_str = "" + if type(field) == _NeuralNetwork_pb2.LSTMWeightParams: + summary_str = _get_lstm_weight_param_summary(field) + elif type(field) == _NeuralNetwork_pb2.WeightParams: + summary_str = _get_weight_param_summary(field) + else: + field_str = str(field) + if len(field_str) > 0: + summary_str = field_str.replace("\n", " ") + if len(summary_str) > 0: + layer_field_content.append([name, summary_str]) + + return layer_type_str, layer_name, layer_inputs, layer_outputs, layer_field_content + + +def _summarize_neural_network_spec(mlmodel_spec): + """ Summarize network into the following structure. + Args: + mlmodel_spec : mlmodel spec + Returns: + inputs : list[(str, str)] - a list of two tuple (name, descriptor) for each input blob. + outputs : list[(str, str)] - a list of two tuple (name, descriptor) for each output blob + layers : list[(str, list[str], list[str], list[(str, str)])] - a list of layers represented by + layer name, input blobs, output blobs, a list of (parameter name, content) + """ + inputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.input + ] + outputs = [ + (blob.name, _get_feature_description_summary(blob)) + for blob in mlmodel_spec.description.output + ] + nn = None + + if mlmodel_spec.HasField("neuralNetwork"): + nn = mlmodel_spec.neuralNetwork + elif mlmodel_spec.HasField("neuralNetworkClassifier"): + nn = mlmodel_spec.neuralNetworkClassifier + elif mlmodel_spec.HasField("neuralNetworkRegressor"): + nn = mlmodel_spec.neuralNetworkRegressor + + layers = ( + [_summarize_network_layer_info(layer) for layer in nn.layers] + if nn != None + else None + ) + return (inputs, outputs, layers) + + +def _prRed(skk, end=None): + print("\033[91m {}\033[00m".format(skk), end=end) + + +def _prLightPurple(skk, end=None): + print("\033[94m {}\033[00m".format(skk), end=end) + + +def _prPurple(skk, end=None): + print("\033[95m {}\033[00m".format(skk), end=end) + + +def _prGreen(skk, end=None): + print("\033[92m {}\033[00m".format(skk), end=end) + + +def _print_layer_type_and_arguments( + layer_type_str, layer_inputs, indentation, to_indent=True, shape=None, value=None +): + if to_indent: + _prRed(indentation * "\t" + "{}".format(layer_type_str), end="") + else: + _prRed("{}".format(layer_type_str), end="") + + if shape is None: + _prLightPurple("({})".format(", ".join(layer_inputs))) + elif value is not None: + _prLightPurple("(shape = ", end="") + print("{}, ".format(str(shape)), end="") + _prLightPurple("value = ", end="") + values = ",".join(["{0: 0.1f}".format(v) for v in value]).lstrip() + print("[{}]".format(values), end="") + _prLightPurple(")") + else: + _prLightPurple("(shape = ", end="") + print("{}".format(str(shape)), end="") + _prLightPurple(")") + + +def _find_size(arr): + s = 1 + for a in arr: + s *= a + return s + + +def _summarize_neural_network_spec_code_style( + nn_spec, indentation=0, input_names=None, output_names=None +): + """ + print nn_spec as if writing code + """ + indentation_size = 1 + + if input_names: + print("def model({}):".format(", ".join(input_names))) + indentation += indentation_size + + for i, layer in enumerate(nn_spec.layers): + layer_type_str = layer.WhichOneof("layer") + layer_inputs = list(layer.input) + layer_outputs = list(layer.output) + + if layer_type_str == "loop": + if len(layer.loop.conditionNetwork.layers) > 0: + _prPurple(indentation * "\t" + "Condition Network: ") + _summarize_neural_network_spec_code_style( + layer.loop.conditionNetwork, indentation=indentation + ) + if layer.loop.conditionVar: + layer_inputs.append(layer.loop.conditionVar) + _print_layer_type_and_arguments(layer_type_str, layer_inputs, indentation) + indentation += indentation_size + _summarize_neural_network_spec_code_style( + layer.loop.bodyNetwork, indentation=indentation + ) + if len(layer.loop.conditionNetwork.layers) > 0: + _prPurple(indentation * "\t" + "Condition Network: ") + _summarize_neural_network_spec_code_style( + layer.loop.conditionNetwork, indentation=indentation + ) + indentation -= indentation_size + continue + + if layer_type_str == "branch": + _print_layer_type_and_arguments(layer_type_str, layer_inputs, indentation) + _prRed(indentation * "\t" + "IfBranch:") + indentation += indentation_size + _summarize_neural_network_spec_code_style( + layer.branch.ifBranch, indentation=indentation + ) + indentation -= indentation_size + if len(layer.branch.elseBranch.layers) > 0: + _prRed(indentation * "\t" + "ElseBranch:") + indentation += indentation_size + _summarize_neural_network_spec_code_style( + layer.branch.elseBranch, indentation=indentation + ) + indentation -= indentation_size + continue + + if layer_type_str == "loopBreak" or layer_type_str == "loopContinue": + _prRed(indentation * "\t" + layer_type_str) + continue + + shape = None + value = None + if layer_type_str == "loadConstant": + shape = layer.loadConstant.shape + shape = list(shape) + int_shape = [int(x) for x in shape] + shape = tuple([1, 1] + int_shape) + size = _find_size(shape) + if size < 4 and len(layer.loadConstant.data.floatValue) > 0: + value = map(float, list(layer.loadConstant.data.floatValue)) + + if layer_type_str == "loadConstantND": + shape = layer.loadConstantND.shape + shape = tuple(map(int, list(shape))) + size = _find_size(shape) + if size < 4 and len(layer.loadConstantND.data.floatValue) > 0: + value = map(float, list(layer.loadConstantND.data.floatValue)) + + print(indentation * "\t", end="") + print("{} =".format(", ".join(layer_outputs)), end="") + _print_layer_type_and_arguments( + layer_type_str, + layer_inputs, + indentation, + to_indent=False, + shape=shape, + value=value, + ) + + if output_names: + _prRed("\n" + indentation * "\t" + "return ", end="") + print("{}".format(", ".join(output_names))) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/update_optimizer_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/update_optimizer_utils.py new file mode 100644 index 00000000..760946ea --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/update_optimizer_utils.py @@ -0,0 +1,191 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Neural Network optimizer utilities. +""" + + +class AdamParams: + """ + Adam - A Method for Stochastic Optimization. + + Attributes + ---------- + lr: float + The learning rate that controls learning step size. Adjustable in progress, default: 0.01. + batch: int + The mini-batch size, number of examples used to compute single gradient step, default: 10. + beta1: float + Controls the exponential decay rate for the first moment estimates, default: 0.9. + beta2: float + Controls the exponential decay rate for the second moment estimates, default: 0.999. + eps: float + The epsilon, a very small number to prevent any division by zero in the implementation, default: 1e-8. + + Methods + ------- + set_lr(value, min, max) + Set value for learning rate. + set_batch(value, allow_set) + Set value for batch size. + set_beta1(value, min, max) + Set value for beta1. + set_beta2(value, min, max) + Set value for beta2. + set_eps(value, min, max) + Set value for epsilon. + """ + + def __init__(self, lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8): + self._lr = RangeParam(lr) + self._batch = Batch(batch) + self._beta1 = RangeParam(beta1) + self._beta2 = RangeParam(beta2) + self._eps = RangeParam(eps) + + def set_lr(self, value, min, max): + self._lr = RangeParam(value, min, max) + + def set_batch(self, value, allowed_set): + self._batch = Batch(value, allowed_set) + + def set_beta1(self, value, min, max): + self._beta1 = RangeParam(value, min, max) + + def set_beta2(self, value, min, max): + self._beta2 = RangeParam(value, min, max) + + def set_eps(self, value, min, max): + self._eps = RangeParam(value, min, max) + + @property + def lr(self): + return self._lr + + @property + def batch(self): + return self._batch + + @property + def beta1(self): + return self._beta1 + + @property + def beta2(self): + return self._beta2 + + @property + def eps(self): + return self._eps + + +class SgdParams: + """ + SGD - Stochastic Gradient Descent optimizer. + + Attributes + ---------- + lr: float + The learning rate that controls learning step size. Adjustable in progress, default: 0.01. + batch: int + The mini-batch size, number of examples used to compute single gradient step, default: 10. + momentum: float + The momentum factor that helps accelerate gradients vectors in the right direction, default 0. + + Methods + ------- + set_lr(value, min, max) + Set value for learning rate. + set_batch(value, allow_set) + Set value for batch size. + set_momentum(value, min, max) + Set value for momentum. + """ + + def __init__(self, lr=1e-2, batch=10, momentum=0): + self._lr = RangeParam(lr) + self._batch = Batch(batch) + self._momentum = RangeParam(momentum) + + def set_lr(self, value, min, max): + self._lr = RangeParam(value, min, max) + + def set_batch(self, value, allowed_set): + self._batch = Batch(value, allowed_set) + + def set_momentum(self, value, min, max): + self._momentum = RangeParam(value, min, max) + + @property + def lr(self): + return self._lr + + @property + def batch(self): + return self._batch + + @property + def momentum(self): + return self._momentum + + +class RangeParam: + """ + Range Parameter optimizer. + + Attributes + ---------- + value: float + min: float + max: float + """ + + def __init__(self, value, min=0, max=1): + self._value = value + if min >= max: + raise ValueError("min value must be less than max value.") + self._min = min + self._max = max + + @property + def value(self): + return self._value + + @property + def min(self): + return self._min + + @property + def max(self): + return self._max + + +class Batch: + """ + Batch optimizer. + + Attributes + ---------- + value: float + allowed_set: float + """ + + def __init__(self, value, allowed_set=None): + self._value = value + if allowed_set is None: + self._allowed_set = [value] + else: + if len(allowed_set) > len(set(allowed_set)): + raise ValueError("values in allowed_set must be unique.") + self._allowed_set = allowed_set + + @property + def value(self): + return self._value + + @property + def allowed_set(self): + return self._allowed_set diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/utils.py new file mode 100644 index 00000000..ed96300d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/neural_network/utils.py @@ -0,0 +1,140 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy as _copy + +from coremltools.models.utils import _get_model + +from .builder import NeuralNetworkBuilder + + +def make_image_input( + model, + input_name, + is_bgr=False, + red_bias=0.0, + blue_bias=0.0, + green_bias=0.0, + gray_bias=0.0, + scale=1.0, + image_format="NHWC", +): + """ + Convert input of type multiarray to type image + + Parameters + ---------- + TODO + + Returns + ------- + model: MLModel + A coreML MLModel object + + Examples + -------- + TODO + """ + + spec = model.get_spec() + + if spec.WhichOneof("Type") not in [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ]: + raise ValueError( + "Provided model must be of type neuralNetwork, neuralNetworkClassifier or neuralNetworkRegressor" + ) + + if not isinstance(input_name, list): + input_name = [input_name] + + spec_inputs = [i.name for i in spec.description.input] + for name in input_name: + if name not in spec_inputs: + msg = "Provided input_name: {}, is not an existing input to the model" + raise ValueError(msg.format(name)) + + builder = NeuralNetworkBuilder(spec=spec) + builder.set_pre_processing_parameters( + image_input_names=input_name, + is_bgr=is_bgr, + red_bias=red_bias, + green_bias=green_bias, + blue_bias=blue_bias, + gray_bias=gray_bias, + image_scale=scale, + image_format=image_format, + ) + return _get_model(spec) + + +def make_nn_classifier( + model, + class_labels, + predicted_feature_name=None, + predicted_probabilities_output=None, +): + """ + Convert a model of type "neuralNetwork" to type "neuralNetworkClassifier" + + Parameters + ---------- + TODO + + Returns + ------- + model: MLModel + A coreML MLModel object + + Examples + -------- + TODO + """ + + spec = model.get_spec() + + if spec.WhichOneof("Type") != "neuralNetwork": + raise ValueError('Provided model must be of type "neuralNetwork"') + + # convert type to "neuralNetworkClassifier" and copy messages from "neuralNetwork" + nn_spec = _copy.deepcopy(spec.neuralNetwork) + spec.ClearField("neuralNetwork") + for layer in nn_spec.layers: + spec.neuralNetworkClassifier.layers.add().CopyFrom(layer) + for preprocessing in nn_spec.preprocessing: + spec.neuralNetworkClassifier.preprocessing.add().CopyFrom(preprocessing) + spec.neuralNetworkClassifier.arrayInputShapeMapping = nn_spec.arrayInputShapeMapping + spec.neuralNetworkClassifier.imageInputShapeMapping = nn_spec.imageInputShapeMapping + spec.neuralNetworkClassifier.updateParams.CopyFrom(nn_spec.updateParams) + + # set properties related to classifier + builder = NeuralNetworkBuilder(spec=spec) + message = "Class labels must be a list of integers / strings or a file path" + classes_in = class_labels + if isinstance(classes_in, str): + import os + + if not os.path.isfile(classes_in): + raise ValueError("Path to class labels (%s) does not exist." % classes_in) + with open(classes_in, "r") as f: + classes = f.read() + classes = classes.splitlines() + elif isinstance(classes_in, list): # list[int or str] + classes = classes_in + assert all([isinstance(x, \ + (int, str)) for x in classes]), message + else: + raise ValueError(message) + + kwargs = {} + if predicted_feature_name is not None: + kwargs["predicted_feature_name"] = predicted_feature_name + if predicted_probabilities_output is not None: + kwargs["prediction_blob"] = predicted_probabilities_output + builder.set_class_labels(classes, **kwargs) + + return _get_model(spec) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/models/pipeline.py new file mode 100644 index 00000000..487c2466 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/pipeline.py @@ -0,0 +1,305 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Pipeline utils for this package. +""" +from .. import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from . import _feature_management +from . import model as _model +from ._interface_management import (set_classifier_interface_params, + set_regressor_interface_params, + set_training_features, + set_transform_interface_params) + + +class Pipeline: + """ + A pipeline model that exposes a sequence of models as a single model, + It requires a set of inputs, a sequence of other models and a set of outputs. + + This class is the base class for :py:class:`PipelineClassifier` and + :py:class:`PipelineRegressor`, which contain a sequence ending in a classifier + or regressor and themselves behave like a classifier or regressor. This class + may be used directly for a sequence of feature transformer objects. + + """ + + def __init__(self, input_features, output_features, training_features=None): + """ + Create a pipeline of models to be executed sequentially. + + Parameters + ---------- + + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of `('name', datatype)` + tuples. The datatypes entry can be any of the data types defined in the + :py:mod:`models.datatypes` module. + + output_features: [list of features] + Name(s) of the output features, given as a list of + `('name',datatype)` tuples. The datatypes entry can be any of the + data types defined in the :py:mod:`models.datatypes` module. All features + must be either defined in the inputs or be produced by one of the + contained models. + + """ + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + + # Access this to declare it as a pipeline + spec.pipeline + + spec = set_transform_interface_params( + spec, input_features, output_features, training_features + ) + + # Save the spec as a member variable. + self.spec = spec + + def _validate_updatable_pipeline_on_add_model(self, spec): + if spec.isUpdatable: + raise ValueError( + "New sub-models cannot be added after the pipeline has been marked as updatable" + ) + + def add_model(self, spec): + """ + Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline. + + All input features of this model must either match the input_features + of the pipeline, or match the outputs of a previous model. + + Parameters + ---------- + spec: [MLModel, Model_pb2] + A protobuf spec or MLModel instance containing a model. + """ + + self._validate_updatable_pipeline_on_add_model(self.spec) + + if isinstance(spec, _model.MLModel): + spec = spec._spec + + pipeline = self.spec.pipeline + step_spec = pipeline.models.add() + step_spec.CopyFrom(spec) + + def _validate_sub_models_and_make_updatable(self, pipeline, spec): + + num_models = len(pipeline.models) + if num_models < 1: + raise ValueError( + "Pipeline does not seem to have any models. It should be marked as updatable only after adding all sub-models." + ) + + for model in pipeline.models[:-1]: + if model.isUpdatable: + raise ValueError( + "Only the last model can be updatable in an updatable pipeline." + ) + + last_model = pipeline.models[num_models - 1] + if not last_model.isUpdatable: + raise ValueError( + "A pipeline can be made updatable only if the last model is updatable." + ) + + spec.isUpdatable = True + + def make_updatable(self): + self._validate_sub_models_and_make_updatable(self.spec.pipeline, self.spec) + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + List of training input names and type of the network. + """ + spec = self.spec + set_training_features(spec, training_input) + + +class PipelineRegressor(Pipeline): + """ + A pipeline model that exposes a sequence of models as a single model, + It requires a set of inputs, a sequence of other models and a set of outputs. + In this case the pipeline itself behaves as a regression model by designating + a real valued output feature as its 'predicted feature'. + """ + + def __init__(self, input_features, output_features, training_features=None): + """ + Create a set of pipeline models given a set of model specs. The final + output model must be a regression model. + + Parameters + ---------- + + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of `('name', datatype)` + tuples. The datatypes entry can be any of the data types defined in the + :py:mod:`models.datatypes` module. + + output_features: [list of features] + Name(s) of the output features, given as a list of + `('name',datatype)` tuples. The datatypes entry can be any of the + data types defined in the :py:mod:`models.datatypes` module. All features + must be either defined in the inputs or be produced by one of the + contained models. + + """ + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + + # Access this to declare it as a pipeline + spec.pipelineRegressor + spec = set_regressor_interface_params( + spec, input_features, output_features, training_features + ) + + # Save as a member variable + self.spec = spec + + def add_model(self, spec): + """ + Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline. + + All input features of this model must either match the input_features + of the pipeline, or match the outputs of a previous model. + + Parameters + ---------- + spec: [MLModel, Model_pb2] + A protobuf spec or MLModel instance containing a model. + """ + + super()._validate_updatable_pipeline_on_add_model(self.spec) + + if isinstance(spec, _model.MLModel): + spec = spec._spec + + pipeline = self.spec.pipelineRegressor.pipeline + step_spec = pipeline.models.add() + step_spec.CopyFrom(spec) + + def make_updatable(self): + super()._validate_sub_models_and_make_updatable( + self.spec.pipelineRegressor.pipeline, self.spec + ) + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + List of training input names and type of the network. + """ + spec = self.spec + set_training_features(spec, training_input) + + +class PipelineClassifier(Pipeline): + """ + A pipeline model that exposes a sequence of models as a single model, + It requires a set of inputs, a sequence of other models and a set of outputs. + In this case the pipeline itself behaves as a classification model by designating + a discrete categorical output feature as its 'predicted feature'. + """ + + def __init__( + self, input_features, class_labels, output_features=None, training_features=None + ): + """ + Create a set of pipeline models given a set of model specs. The last + model in this list must be a classifier model. + + Parameters + ---------- + input_features: [list of 2-tuples] + Name(s) of the input features, given as a list of `('name', datatype)` + tuples. The datatypes entry can be any of the data types defined in the + :py:mod:`models.datatypes` module. + + class_labels: [list] + A list of string or integer class labels to use in making predictions. + This list must match the class labels in the model outputting the categorical + predictedFeatureName + + output_features: [list] + A string or a list of two strings specifying the names of the two + output features, the first being a class label corresponding + to the class with the highest predicted score, and the second being + a dictionary mapping each class to its score. If `output_features` + is a string, it specifies the predicted class label and the class + scores is set to the default value of `"classProbability."` + + """ + + output_features = _feature_management.process_or_validate_classifier_output_features( + output_features, class_labels + ) + + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + spec = set_classifier_interface_params( + spec, + input_features, + class_labels, + "pipelineClassifier", + output_features, + training_features, + ) + + # Access this to declare it as a pipeline + spec.pipelineClassifier + + # Save as a member variable + self.spec = spec + + def add_model(self, spec): + """ + Add a protobuf spec or :py:class:`models.MLModel` instance to the pipeline. + + All input features of this model must either match the input_features + of the pipeline, or match the outputs of a previous model. + + Parameters + ---------- + spec: [MLModel, Model_pb2] + A protobuf spec or MLModel instance containing a model. + """ + + super()._validate_updatable_pipeline_on_add_model(self.spec) + + if isinstance(spec, _model.MLModel): + spec = spec._spec + pipeline = self.spec.pipelineClassifier.pipeline + step_spec = pipeline.models.add() + step_spec.CopyFrom(spec) + + def make_updatable(self): + super(PipelineClassifier, self)._validate_sub_models_and_make_updatable( + self.spec.pipelineClassifier.pipeline, self.spec + ) + + def set_training_input(self, training_input): + """ + Set the training inputs of the network spec. + + Parameters + ---------- + training_input: [tuple] + List of training input names and type of the network. + """ + spec = self.spec + set_training_features(spec, training_input) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/tree_ensemble.py b/__packaged__/coreml/.python_dependencies/coremltools/models/tree_ensemble.py new file mode 100644 index 00000000..7cd023a0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/tree_ensemble.py @@ -0,0 +1,426 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Tree ensemble builder class to construct CoreML models. +""" +import collections as _collections + +from .. import SPECIFICATION_VERSION as _SPECIFICATION_VERSION +from ..proto import Model_pb2 as _Model_pb2 +from ..proto import TreeEnsemble_pb2 as _TreeEnsemble_pb2 +from ._interface_management import (set_classifier_interface_params, + set_regressor_interface_params) + + +class TreeEnsembleBase: + """ + Base class for the tree ensemble builder class. This should be instantiated + either through the :py:class:`TreeEnsembleRegressor` or + :py:class:`TreeEnsembleClassifier` classes. + """ + + def __init__(self): + """ + High level Python API to build a tree ensemble model for Core ML. + """ + # Set inputs and outputs + spec = _Model_pb2.Model() + spec.specificationVersion = _SPECIFICATION_VERSION + + # Save the spec in the protobuf + self.spec = spec + + def set_default_prediction_value(self, values): + """ + Set the default prediction value(s). + + The values given here form the base prediction value that the values + at activated leaves are added to. If values is a scalar, then + the output of the tree must also be 1 dimensional; otherwise, values + must be a list with length matching the dimension of values in the tree. + + Parameters + ---------- + values: [int | double | list[double]] + Default values for predictions. + + """ + if type(values) is not list: + values = [float(values)] + self.tree_parameters.numPredictionDimensions = len(values) + for value in values: + self.tree_parameters.basePredictionValue.append(value) + + def set_post_evaluation_transform(self, value): + r""" + Set the post processing transform applied after the prediction value + from the tree ensemble. + + Parameters + ---------- + + value: str + + A value denoting the transform applied. Possible values are: + + - ``"NoTransform"`` (default). Do not apply a transform. + + - ``"Classification_SoftMax"``. + + Apply a softmax function to the outcome to produce normalized, + non-negative scores that sum to 1. The transformation applied to + dimension `i` is equivalent to: + + .. math:: + + \frac{e^{x_i}}{\sum_j e^{x_j}} + + Note: This is the output transformation applied by the XGBoost package + with multiclass classification. + + - ``"Regression_Logistic"``. + + Applies a logistic transform the predicted value, specifically: + + .. math:: + + (1 + e^{-v})^{-1} + + This is the transformation used in binary classification. + + + """ + self.tree_spec.postEvaluationTransform = _TreeEnsemble_pb2.TreeEnsemblePostEvaluationTransform.Value( + value + ) + + def add_branch_node( + self, + tree_id, + node_id, + feature_index, + feature_value, + branch_mode, + true_child_id, + false_child_id, + relative_hit_rate=None, + missing_value_tracks_true_child=False, + ): + """ + Add a branch node to the tree ensemble. + + Parameters + ---------- + tree_id: int + ID of the tree to add the node to. + + node_id: int + ID of the node within the tree. + + feature_index: int + Index of the feature in the input being split on. + + feature_value: double or int + The value used in the feature comparison determining the traversal + direction from this node. + + branch_mode: str + Branch mode of the node, specifying the condition under which the node + referenced by ``true_child_id`` is called next. + + Must be one of the following: + + - ``"BranchOnValueLessThanEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] <= feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueLessThan"``. Traverse to node ``true_child_id`` + if ``input[feature_index] < feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueGreaterThanEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] >= feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueGreaterThan"``. Traverse to node ``true_child_id`` + if ``input[feature_index] > feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] == feature_value``, and ``false_child_id`` + otherwise. + + - ``"BranchOnValueNotEqual"``. Traverse to node ``true_child_id`` + if ``input[feature_index] != feature_value``, and ``false_child_id`` + otherwise. + + true_child_id: int + ID of the child under the true condition of the split. An error will + be raised at model validation if this does not match the ``node_id`` + of a node instantiated by ``add_branch_node`` or ``add_leaf_node`` within + this ``tree_id``. + + false_child_id: int + ID of the child under the false condition of the split. An error will + be raised at model validation if this does not match the ``node_id`` + of a node instantiated by ``add_branch_node`` or ``add_leaf_node`` within + this ``tree_id``. + + relative_hit_rate: float [optional] + When the model is converted compiled by CoreML, this gives hints to + Core ML about which node is more likely to be hit on evaluation, + allowing for additional optimizations. The values can be on any scale, + with the values between child nodes being compared relative to each + other. + + missing_value_tracks_true_child: bool [optional] + If the training data contains NaN values or missing values, then this + flag determines which direction a NaN value traverses. + + """ + spec_node = self.tree_parameters.nodes.add() + spec_node.treeId = tree_id + spec_node.nodeId = node_id + spec_node.branchFeatureIndex = int(feature_index) + spec_node.branchFeatureValue = feature_value + spec_node.trueChildNodeId = true_child_id + spec_node.falseChildNodeId = false_child_id + spec_node.nodeBehavior = _TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value( + branch_mode + ) + + if relative_hit_rate is not None: + spec_node.relativeHitRate = relative_hit_rate + spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child + + def add_leaf_node(self, tree_id, node_id, values, relative_hit_rate=None): + """ + Add a leaf node to the tree ensemble. + + Parameters + ---------- + tree_id: int + ID of the tree to add the node to. + + node_id: int + ID of the node within the tree. + + values: [float | int | list | dict] + Value(s) at the leaf node to add to the prediction when this node is + activated. If the prediction dimension of the tree is 1, then the + value is specified as a float or integer value. + + For multidimensional predictions, the values can be a list of numbers + with length matching the dimension of the predictions or a dictionary + mapping index to value added to that dimension. + + Note that the dimension of any tree must match the dimension given + when :py:meth:`set_default_prediction_value` is called. + + """ + spec_node = self.tree_parameters.nodes.add() + spec_node.treeId = tree_id + spec_node.nodeId = node_id + spec_node.nodeBehavior = _TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value( + "LeafNode" + ) + + if not isinstance(values, _collections.abc.Iterable): + values = [values] + + if relative_hit_rate is not None: + spec_node.relativeHitRate = relative_hit_rate + + if type(values) == dict: + iter = values.items() + else: + iter = enumerate(values) + + for index, value in iter: + ev_info = spec_node.evaluationInfo.add() + ev_info.evaluationIndex = index + ev_info.evaluationValue = float(value) + spec_node.nodeBehavior = _TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value( + "LeafNode" + ) + + +class TreeEnsembleRegressor(TreeEnsembleBase): + """ + Tree Ensemble builder class to construct a Tree Ensemble regression model. + + The TreeEnsembleRegressor class constructs a Tree Ensemble model incrementally + using methods to add branch and leaf nodes specifying the behavior of the model. + + Examples + -------- + + In the following example, the code saves the model to disk, which is a + recommended practice but not required. + + .. sourcecode:: python + + >>> # Required inputs + >>> import coremltools + >>> from coremltools.models import datatypes + >>> from coremltools.models.tree_ensemble import TreeEnsembleRegressor + >>> import numpy as np + + >>> # Define input features + >>> input_features = [("a", datatypes.Array(3)), ("b", (datatypes.Double()))] + + >>> # Define output_features + >>> output_features = [("predicted_values", datatypes.Double())] + + >>> tm = TreeEnsembleRegressor(features = input_features, target = output_features) + + >>> # Split on a[2] <= 3 + >>> tm.add_branch_node(0, 0, 2, 3, "BranchOnValueLessThanEqual", 1, 2) + + >>> # Add leaf to the true branch of node 0 that subtracts 1. + >>> tm.add_leaf_node(0, 1, -1) + + >>> # Add split on b == 0 to the false branch of node 0, which is index 3 + >>> tm.add_branch_node(0, 2, 3, 0, "BranchOnValueEqual", 3, 4) + + >>> # Add leaf to the true branch of node 2 that adds 1 to the result. + >>> tm.add_leaf_node(0, 3, 1) + + >>> # Add leaf to the false branch of node 2 that subtracts 1 from the result. + >>> tm.add_leaf_node(0, 4, -1) + + >>> tm.set_default_prediction_value([0, 0]) + + >>> # save the model to a .mlmodel file + >>> model_path = './tree.mlmodel' + >>> coremltools.models.utils.save_spec(tm.spec, model_path) + + >>> # load the .mlmodel + >>> mlmodel = coremltools.models.MLModel(model_path) + + >>> # make predictions + >>> test_input = { + >>> 'a': np.array([0, 1, 2]).astype(np.float32), + >>> "b": 3.0, + >>> } + >>> predictions = mlmodel.predict(test_input) + + """ + + def __init__(self, features, target): + """ + Create a Tree Ensemble regression model that takes one or more input + features and maps them to an output feature. + + Parameters + ---------- + + features: [list of features] + Name(s) of the input features, given as a list of ``('name', datatype)`` + tuples. The features are one of ``models.datatypes.Int64``, + ``datatypes.Double``, or ``models.datatypes.Array``. + Feature indices in the nodes are counted sequentially from 0 through + the features. + + target: (default = None) + Name of the target feature predicted. + """ + super().__init__() + spec = self.spec + spec = set_regressor_interface_params(spec, features, target) + self.tree_spec = spec.treeEnsembleRegressor + self.tree_parameters = self.tree_spec.treeEnsemble + + +class TreeEnsembleClassifier(TreeEnsembleBase): + """ + Tree Ensemble builder class to construct a Tree Ensemble classification model. + + The TreeEnsembleClassifier class constructs a Tree Ensemble model incrementally + using methods to add branch and leaf nodes specifying the behavior of the model. + + + Examples + -------- + + In the following example, the code saves the model to disk, which is a + recommended practice but not required. + + .. sourcecode:: python + + >>> input_features = [("a", datatypes.Array(3)), ("b", datatypes.Double())] + + >>> tm = TreeEnsembleClassifier(features = input_features, class_labels = [0, 1], + output_features = "predicted_class") + + >>> # Split on a[2] <= 3 + >>> tm.add_branch_node(0, 0, 2, 3, "BranchOnValueLessThanEqual", 1, 2) + + >>> # Add leaf to the true branch of node 0 that subtracts 1. + >>> tm.add_leaf_node(0, 1, -1) + + >>> # Add split on b == 0 to the false branch of node 0. + >>> tm.add_branch_node(0, 2, 3, 0, "BranchOnValueEqual", 3, 4) + + >>> # Add leaf to the true branch of node 2 that adds 1 to the result. + >>> tm.add_leaf_node(0, 3, 1) + + >>> # Add leaf to the false branch of node 2 that subtracts 1 from the result. + >>> tm.add_leaf_node(0, 4, -1) + + >>> # Put in a softmax transform to translate these into probabilities. + >>> tm.set_post_evaluation_transform("Classification_SoftMax") + + >>> tm.set_default_prediction_value([0, 0]) + + >>> # save the model to a .mlmodel file + >>> model_path = './tree.mlmodel' + >>> coremltools.models.utils.save_spec(tm.spec, model_path) + + >>> # load the .mlmodel + >>> mlmodel = coremltools.models.MLModel(model_path) + + >>> # make predictions + >>> test_input = { + >>> 'a': np.array([0, 1, 2]).astype(np.float32), + >>> "b": 3.0, + >>> } + >>> predictions = mlmodel.predict(test_input) + + """ + + def __init__(self, features, class_labels, output_features): + """ + Create a tree ensemble classifier model. + + Parameters + ---------- + features: [list of features] + Name(s) of the input features, given as a list of ``('name', datatype)`` + tuples. The features are one of ``models.datatypes.Int64``, + ``datatypes.Double``, or ``models.datatypes.Array``. + Feature indices in the nodes are counted sequentially from 0 through + the features. + + class_labels: [list] + A list of string or integer class labels to use in making predictions. + The length of this must match the dimension of the tree model. + + output_features: [list] + A string or a list of two strings specifying the names of the two + output features, the first being a class label corresponding + to the class with the highest predicted score, and the second being + a dictionary mapping each class to its score. If ``output_features`` + is a string, it specifies the predicted class label and the class + scores is set to the default value of ``"classProbability"``. + """ + super().__init__() + spec = self.spec + spec = set_classifier_interface_params( + spec, features, class_labels, "treeEnsembleClassifier", output_features + ) + self.tree_spec = spec.treeEnsembleClassifier + self.tree_parameters = self.tree_spec.treeEnsemble diff --git a/__packaged__/coreml/.python_dependencies/coremltools/models/utils.py b/__packaged__/coreml/.python_dependencies/coremltools/models/utils.py new file mode 100644 index 00000000..fe6c30df --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/models/utils.py @@ -0,0 +1,1097 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Utilities for the entire package. +""" + +from collections.abc import Iterable as _Iterable +from functools import lru_cache as _lru_cache +import math as _math +import os as _os +import shutil as _shutil +import subprocess as _subprocess +import sys as _sys +import tempfile as _tempfile +from typing import Optional as _Optional +import warnings as _warnings + +import numpy as _np + +import coremltools as _ct +from coremltools import ComputeUnit as _ComputeUnit +from coremltools.converters.mil.mil.passes.defs.preprocess import NameSanitizer as _NameSanitizer +from coremltools.proto import Model_pb2 as _Model_pb2 +import coremltools.proto.MIL_pb2 as _mil_proto + +from .._deps import _HAS_SCIPY + +_MLMODEL_EXTENSION = ".mlmodel" +_MLPACKAGE_EXTENSION = ".mlpackage" +_MODEL_FILE_NAME = 'model.mlmodel' +_WEIGHTS_FILE_NAME = 'weight.bin' +_WEIGHTS_DIR_NAME = 'weights' +_MLPACKAGE_AUTHOR_NAME = "com.apple.CoreML" + +try: + from ..libmodelpackage import ModelPackage as _ModelPackage +except: + _ModelPackage = None + +if _HAS_SCIPY: + import scipy.sparse as _sp + + +def _to_unicode(x): + if isinstance(x, bytes): + return x.decode() + else: + return x + + +def _remove_invalid_keys(input_dict, model): + # make sure that input_dict does not contain an input name, which + # is not present in the list of model inputs + input_dict_keys = list(input_dict.keys()) + model_input_names = set([inp.name for inp in model._spec.description.input]) + for k in input_dict_keys: + if k not in model_input_names: + del input_dict[k] + + +def _create_mlpackage( + proto_spec: _Model_pb2, + weights_dir: _Optional[str] = None, + package_path: _Optional[str] = None, +) -> str: + """ + Args: + proto_spec: The proto spec of the model. + weights_dir: Copy weights from this path to the mlpackage. + package_path: Place the created mlpackage at this path. Error out if this path is a non-empty directory. + + Returns: + path to the mlpackage + """ + if package_path is None: + package_path = _tempfile.mkdtemp(suffix=_MLPACKAGE_EXTENSION) + if _os.path.exists(package_path): + if _os.listdir(package_path): + raise FileExistsError( + f"The package_path is invalid because it's a non-empty directory: {package_path}" + ) + # If package_path is an empty dir, the ModelPackage load will error out with `manifest.json not found` issue. + _shutil.rmtree(package_path) + + _, ext = _os.path.splitext(package_path) + if ext != _MLPACKAGE_EXTENSION: + raise Exception( + f"For an ML Package, extension must be {_MLPACKAGE_EXTENSION} (not {ext})" + ) + + package = _ModelPackage(package_path) + + # Save proto to disk as the root model file, and copy into the model package. + spec_file = _tempfile.NamedTemporaryFile(suffix=_MLMODEL_EXTENSION) + spec_file.write(proto_spec.SerializeToString()) + spec_file.flush() + package.setRootModel(spec_file.name, _MODEL_FILE_NAME, _MLPACKAGE_AUTHOR_NAME, + "CoreML Model Specification") + # Spec file is auto cleaned after close, which is fine because it is already added to the model package. + spec_file.close() + + # Add weights bundle into the model package. + if weights_dir is not None: + package.addItem( + weights_dir, + _WEIGHTS_DIR_NAME, + _MLPACKAGE_AUTHOR_NAME, + "CoreML Model Weights", + ) + + return package_path + + +def save_spec(spec, filename, auto_set_specification_version=False, weights_dir=None): + """ + Save a protobuf model specification to file. + + Parameters + ---------- + spec: Model_pb + Protobuf representation of the model + + filename: str + File path where the spec gets saved. + + auto_set_specification_version: bool + If true, will always try to set specification version automatically. + + weights_dir: str + Path to the directory containing the weigths.bin file. This is required + when the spec if of model type mlprogram. If the mlprogram does not contain + any weights, this path can be an empty directory. + + Examples + -------- + .. sourcecode:: python + + coremltools.utils.save_spec(spec, "HousePricer.mlmodel") + coremltools.utils.save_spec(spec, "HousePricer.mlpackage") + coremltools.utils.save_spec( + spec, "mlprogram_model.mlpackage", weights_dir="/path/to/weights/directory" + ) + + See Also + -------- + load_spec + """ + name, ext = _os.path.splitext(filename) + + is_package = False + + if not ext: + filename = "{}{}".format(filename, _MLMODEL_EXTENSION) + elif ext == _MLPACKAGE_EXTENSION: + is_package = True + elif ext == _MLMODEL_EXTENSION: + is_package = False + else: + raise Exception("Extension must be {} or {} (not {})".format(_MLMODEL_EXTENSION, _MLPACKAGE_EXTENSION, ext)) + + if auto_set_specification_version: + try: + # always try to downgrade the specification version to the + # minimal version that supports everything in this mlmodel + from ..libcoremlpython import _MLModelProxy + + spec = _MLModelProxy.auto_set_specification_version(spec) + except Exception as e: + print(e) + _warnings.warn( + "Failed to automatic set specification version for this model.", + RuntimeWarning, + ) + + if is_package: + if _ModelPackage is None: + raise Exception( + "Unable to load libmodelpackage. Cannot save spec" + ) + if spec.WhichOneof('Type') == "mlProgram" and weights_dir is None: + raise Exception('spec of type mlProgram cannot be saved without the' + ' weights file. Please provide the path to the weights file as well, ' + 'using the \'weights_dir\' argument.') + _create_mlpackage(spec, weights_dir=weights_dir, package_path=filename) + else: + with open(filename, "wb") as f: + f.write(spec.SerializeToString()) + + +def load_spec(model_path: str) -> _Model_pb2: + """ + Load a protobuf model specification from file (mlmodel) or directory (mlpackage). + + Parameters + ---------- + model_path: Path to the model from which the protobuf spec is loaded. + + Returns + ------- + model_spec: Model_pb + Protobuf representation of the model + + Examples + -------- + .. sourcecode:: python + + spec = coremltools.utils.load_spec("HousePricer.mlmodel") + spec = coremltools.utils.load_spec("HousePricer.mlpackage") + + See Also + -------- + save_spec + """ + if _os.path.isdir(model_path): + if _ModelPackage is None: + raise Exception("Unable to load libmodelpackage. Cannot make save spec.") + specfile = _ModelPackage(model_path).getRootModel().path() + else: + specfile = model_path + + spec = _Model_pb2.Model() + with open(specfile, "rb") as f: + spec.ParseFromString(f.read()) + return spec + + +def _get_nn_layers(spec): + """ + Returns a list of neural network layers if the model contains any. + + Parameters + ---------- + spec: Model_pb + A model protobuf specification. + + Returns + ------- + [NN layer] + list of all layers (including layers from elements of a pipeline + + """ + + layers = [] + if spec.WhichOneof("Type") == "pipeline": + layers = [] + for model_spec in spec.pipeline.models: + if not layers: + return _get_nn_layers(model_spec) + else: + layers.extend(_get_nn_layers(model_spec)) + + elif spec.WhichOneof("Type") in ["pipelineClassifier", "pipelineRegressor"]: + layers = [] + for model_spec in spec.pipeline.models: + if not layers: + return _get_nn_layers(model_spec) + else: + layers.extend(_get_nn_layers(model_spec)) + + elif spec.neuralNetwork.layers: + layers = spec.neuralNetwork.layers + elif spec.neuralNetworkClassifier.layers: + layers = spec.neuralNetworkClassifier.layers + elif spec.neuralNetworkRegressor.layers: + layers = spec.neuralNetworkRegressor.layers + + return layers + + +def _fp32_to_reversed_fp16_byte_array(fp32_arr): + raw_fp16 = _np.float16(fp32_arr) + x = "" + for fp16 in raw_fp16: + all_bytes = _np.fromstring(fp16.tobytes(), dtype="int8") + x += all_bytes[1].tobytes() + x += all_bytes[0].tobytes() + return x + + +def _fp32_to_fp16_byte_array(fp32_arr): + if _np.amax(fp32_arr) >= 65504 or _np.amin(fp32_arr) <= -65504: + raise Exception( + "Model cannot be converted as " + "it has weights that cannot be represented in " + "half precision.\n" + ) + + if _sys.byteorder == "little": + return _np.float16(fp32_arr).tobytes() + else: + return _fp32_to_reversed_fp16_byte_array(fp32_arr) + + +def _wp_to_fp16wp(wp): + assert wp + # If the float32 field is empty do nothing. + if len(wp.floatValue) == 0: + return + wp.float16Value = _fp32_to_fp16_byte_array(wp.floatValue) + del wp.floatValue[:] + +def _convert_neural_network_spec_weights_to_fp16(fp_spec): + from .neural_network.quantization_utils import ( + _QUANTIZATION_MODE_LINEAR_QUANTIZATION, _quantize_spec_weights) + + qspec = _quantize_spec_weights(fp_spec, 16, _QUANTIZATION_MODE_LINEAR_QUANTIZATION) + return qspec + + +def _convert_neural_network_weights_to_fp16(full_precision_model): + """ + Utility function to convert a full precision (float) MLModel to a + half precision MLModel (float16). + + Parameters + ---------- + full_precision_model: MLModel + Model which will be converted to half precision. Currently conversion + for only neural network models is supported. If a pipeline model is + passed in then all embedded neural network models embedded within + will be converted. + + Returns + ------- + model: MLModel + The converted half precision MLModel + + """ + spec = full_precision_model.get_spec() + return _get_model(_convert_neural_network_spec_weights_to_fp16(spec)) + + +def _get_model(spec, compute_units=_ComputeUnit.ALL): + """ + Utility to get the model and the data. + """ + from . import MLModel + + if isinstance(spec, MLModel): + return spec + else: + return MLModel(spec, compute_units=compute_units) + + +def evaluate_regressor(model, data, target="target", verbose=False): + """ + Evaluate a CoreML regression model and compare against predictions + from the original framework (for testing correctness of conversion). + + Parameters + ---------- + model: MLModel or str + A loaded MLModel or a path to a saved MLModel + + data: Dataframe + Test data on which to evaluate the models + + target: str + Name of the column in the dataframe to be compared against the prediction + + verbose: bool + Set to true for a more verbose output. + + See Also + -------- + evaluate_classifier + + Examples + -------- + .. sourcecode:: python + + metrics = coremltools.utils.evaluate_regressor( + spec, "data_and_predictions.csv", "target" + ) + print(metrics) + {"samples": 10, "rmse": 0.0, max_error: 0.0} + """ + model = _get_model(model) + + if verbose: + print("") + print("Other Framework\t\tPredicted\t\tDelta") + + max_error = 0 + error_squared = 0 + + for _, row in data.iterrows(): + input_dict = dict(row) + _remove_invalid_keys(input_dict, model) + predicted = model.predict(input_dict)[_to_unicode(target)] + other_framework = row[target] + delta = predicted - other_framework + + if verbose: + print("{}\t\t\t\t{}\t\t\t{:0.4f}".format(other_framework, predicted, delta)) + + max_error = max(abs(delta), max_error) + error_squared = error_squared + (delta * delta) + + ret = { + "samples": len(data), + "rmse": _math.sqrt(error_squared / len(data)), + "max_error": max_error, + } + + if verbose: + print("results: {}".format(ret)) + return ret + + +def evaluate_classifier(model, data, target="target", verbose=False): + """ + Evaluate a Core ML classifier model and compare against predictions + from the original framework (for testing correctness of conversion). + Use this evaluation for models that don't deal with probabilities. + + Parameters + ---------- + filename: list of str or list of MLModel + File from where to load the model from (OR) a loaded + version of the MLModel. + + data: list of str or list of Dataframe + Test data on which to evaluate the models (dataframe, + or path to a csv file). + + target: str + Column to interpret as the target column + + verbose: bool + Set to true for a more verbose output. + + See Also + -------- + evaluate_regressor, evaluate_classifier_with_probabilities + + Examples + -------- + .. sourcecode:: python + + metrics = coremltools.utils.evaluate_classifier( + spec, "data_and_predictions.csv", "target" + ) + print(metrics) + {"samples": 10, num_errors: 0} + """ + model = _get_model(model) + if verbose: + print("") + print("Other Framework\t\tPredicted") + + num_errors = 0 + + for _, row in data.iterrows(): + input_dict = dict(row) + _remove_invalid_keys(input_dict, model) + predicted = model.predict(input_dict)[_to_unicode(target)] + other_framework = row[target] + if predicted != other_framework: + num_errors += 1 + + if verbose: + print("{}\t\t\t\t{}".format(other_framework, predicted)) + + ret = {"num_samples": len(data), "num_errors": num_errors} + + if verbose: + print("results: {}".format(ret)) + + return ret + + +def evaluate_classifier_with_probabilities( + model, data, probabilities="probabilities", verbose=False +): + """ + Evaluate a classifier specification for testing. + + Parameters + ---------- + filename: [str | Model] + File from where to load the model from (OR) a loaded + version of the MLModel. + + data: [str | Dataframe] + Test data on which to evaluate the models (dataframe, + or path to a csv file). + + probabilities: str + Column to interpret as the probabilities column + + verbose: bool + Verbosity levels of the predictions. + """ + + model = _get_model(model) + if verbose: + print("") + print("Other Framework\t\tPredicted") + + max_probability_error, num_key_mismatch = 0, 0 + + for _, row in data.iterrows(): + input_dict = {k: v for k, v in dict(row).items() if k != probabilities} + _remove_invalid_keys(input_dict, model) + predicted_values = model.predict(input_dict)[_to_unicode(probabilities)] + other_values = row[probabilities] + + if set(predicted_values.keys()) != set(other_values.keys()): + if verbose: + print( + "Different classes: ", + str(predicted_values.keys()), + str(other_values.keys()), + ) + num_key_mismatch += 1 + continue + + for cur_class, cur_predicted_class_values in predicted_values.items(): + delta = cur_predicted_class_values - other_values[cur_class] + if verbose: + print(delta, cur_predicted_class_values, other_values[cur_class]) + + max_probability_error = max(abs(delta), max_probability_error) + + if verbose: + print("") + + ret = { + "num_samples": len(data), + "max_probability_error": max_probability_error, + "num_key_mismatch": num_key_mismatch, + } + + if verbose: + print("results: {}".format(ret)) + + return ret + + +def rename_feature( + spec, current_name, new_name, rename_inputs=True, rename_outputs=True +): + """ + Rename a feature in the specification. + + Parameters + ---------- + spec: Model_pb + The specification containing the feature to rename. + + current_name: str + Current name of the feature. If this feature doesn't exist, the rename + is a no-op. + + new_name: str + New name of the feature. + + rename_inputs: bool + Search for `current_name` only in the input features (i.e ignore output + features) + + rename_outputs: bool + Search for `current_name` only in the output features (i.e ignore input + features) + + Examples + -------- + .. sourcecode:: python + + # In-place rename of spec + model = MLModel("model.mlmodel") + spec = model.get_spec() + coremltools.utils.rename_feature(spec, "old_feature", "new_feature_name") + # re-initialize model + model = MLModel(spec) + model.save("model.mlmodel") + + # Rename a spec when the model is an mlprogram, in that case, weights are stored outside of the spec + model = coremltools.convert(torch_model, convert_to="mlprogram") + spec = model.get_spec() + # print info about inputs and outputs + print(spec.description) + coremltools.utils.rename_feature(spec, "old_feature", "new_feature_name") + # re-initialize model + model = MLModel(spec, weights_dir=model.weights_dir) + model.save("model.mlpackage") + """ + + if not rename_inputs and not rename_outputs: + return + + changed_input = False + changed_output = False + + if rename_inputs: + for input in spec.description.input: + if input.name == current_name: + input.name = new_name + changed_input = True + + if rename_outputs: + for output in spec.description.output: + if output.name == current_name: + output.name = new_name + changed_output = True + + if spec.description.predictedFeatureName == current_name: + spec.description.predictedFeatureName = new_name + + if spec.description.predictedProbabilitiesName == current_name: + spec.description.predictedProbabilitiesName = new_name + + if not changed_input and not changed_output: + return + + # Rename internally in NN model + nn = None + for nn_type in [ + "neuralNetwork", + "neuralNetworkClassifier", + "neuralNetworkRegressor", + ]: + if spec.HasField(nn_type): + nn = getattr(spec, nn_type) + + if nn is not None: + for layer in nn.layers: + if rename_inputs: + for index, name in enumerate(layer.input): + if name == current_name: + layer.input[index] = new_name + if rename_outputs: + for index, name in enumerate(layer.output): + if name == current_name: + layer.output[index] = new_name + + if rename_inputs: + for preprocess_params in nn.preprocessing: + if preprocess_params.featureName == current_name: + preprocess_params.featureName = new_name + + if spec.HasField("neuralNetworkClassifier"): + if nn.labelProbabilityLayerName == current_name: + nn.labelProbabilityLayerName = new_name + + # Rename internally for feature vectorizer + if spec.HasField("featureVectorizer") and rename_inputs: + for input in spec.featureVectorizer.inputList: + if input.inputColumn == current_name: + input.inputColumn = new_name + changed_input = True + + # Rename for pipeline models + pipeline = None + if spec.HasField("pipeline"): + pipeline = spec.pipeline + elif spec.HasField("pipelineClassifier"): + pipeline = spec.pipelineClassifier.pipeline + elif spec.HasField("pipelineRegressor"): + pipeline = spec.pipelineRegressor.pipeline + + if pipeline is not None: + for index, model in enumerate(pipeline.models): + rename_feature( + model, + current_name, + new_name, + rename_inputs or (index != 0), + rename_outputs or (index < len(spec.pipeline.models)), + ) + + # Rename for mlProgram + if spec.HasField("mlProgram"): + new_name_sanitized = _NameSanitizer().sanitize_name(new_name) + if new_name != new_name_sanitized: + raise ValueError("Input/output names for ML Program must be of the format [a-zA-Z_][a-zA-Z0-9_]*. " + "That is, it must start with a letter and only contain numerals, underscore or letters. " + "Provided feature name, \"{}\" does not satisfy these requirements.".format(new_name)) + mil = spec.mlProgram + for function in mil.functions.values(): + for name_value_type in function.inputs: + if name_value_type.name == current_name: + name_value_type.name = new_name + for block in function.block_specializations.values(): + for i, out_name in enumerate(block.outputs): + if out_name == current_name: + block.outputs[i] = new_name + for op in block.operations: + for argument in op.inputs.values(): + for binding in argument.arguments: + if binding.HasField("name"): + if binding.name == current_name: + binding.name = new_name + for name_value_type in op.outputs: + if name_value_type.name == current_name: + name_value_type.name = new_name + + +def _sanitize_value(x): + """ + Performs cleaning steps on the data so various type comparisons can + be performed correctly. + """ + if isinstance(x, (str, int, float,)): + return x + elif _HAS_SCIPY and _sp.issparse(x): + return x.todense() + elif isinstance(x, _np.ndarray): + return x + elif isinstance(x, tuple): + return (_sanitize_value(v) for v in x) + elif isinstance(x, list): + return [_sanitize_value(v) for v in x] + elif isinstance(x, dict): + return dict((_sanitize_value(k), _sanitize_value(v)) for k, v in x.items()) + else: + assert False, str(x) + + +def _element_equal(x, y): + """ + Performs a robust equality test between elements. + """ + if isinstance(x, _np.ndarray) or isinstance(y, _np.ndarray): + try: + return (abs(_np.asarray(x) - _np.asarray(y)) < 1e-5).all() + except: + return False + elif isinstance(x, dict): + return ( + isinstance(y, dict) + and _element_equal(x.keys(), y.keys()) + and all(_element_equal(x[k], y[k]) for k in x.keys()) + ) + elif isinstance(x, float): + return abs(x - y) < 1e-5 * (abs(x) + abs(y)) + elif isinstance(x, (list, tuple)): + return x == y + else: + return bool(x == y) + + +def evaluate_transformer(model, input_data, reference_output, verbose=False): + """ + Evaluate a transformer specification for testing. + + Parameters + ---------- + spec: list of str or list of MLModel + File from where to load the Model from (OR) a loaded + version of MLModel. + + input_data: list of dict + Test data on which to evaluate the models. + + reference_output: list of dict + Expected results for the model. + + verbose: bool + Verbosity levels of the predictions. + + Examples + -------- + .. sourcecode:: python + + input_data = [{"input_1": 1, "input_2": 2}, {"input_1": 3, "input_2": 3}] + expected_output = [{"input_1": 2.5, "input_2": 2.0}, {"input_1": 1.3, "input_2": 2.3}] + metrics = coremltools.utils.evaluate_transformer( + scaler_spec, input_data, expected_output + ) + + See Also + -------- + evaluate_regressor, evaluate_classifier + """ + model = _get_model(model) + if verbose: + print(model) + print("") + print("Other Framework\t\tPredicted") + + num_errors = 0 + for index, row in enumerate(input_data): + assert isinstance(row, dict) + sanitized_row = _sanitize_value(row) + ref_data = _sanitize_value(reference_output[index]) + if verbose: + print("Input:\n\t", str(row)) + print("Correct output:\n\t", str(ref_data)) + + predicted = _sanitize_value(model.predict(sanitized_row)) + + assert isinstance(ref_data, dict) + assert isinstance(predicted, dict) + + predicted_trimmed = dict((k, predicted[k]) for k in ref_data.keys()) + + if verbose: + print("Predicted:\n\t", str(predicted_trimmed)) + + if not _element_equal(predicted_trimmed, ref_data): + num_errors += 1 + + ret = {"num_samples": len(input_data), "num_errors": num_errors} + + if verbose: + print("results: {}".format(ret)) + return ret + + +def _has_custom_layer(spec): + """ + + Returns true if the given protobuf specification has a custom layer, and false otherwise. + + Parameters + ---------- + spec: mlmodel spec + + Returns + ------- + + True if the protobuf specification contains a neural network with a custom layer, False otherwise. + + """ + + layers = _get_nn_layers(spec) + for layer in layers: + if layer.WhichOneof("layer") == "custom": + return True + + return False + + +def _get_custom_layer_names(spec): + """ + + Returns a list of className fields which appear in the given protobuf spec + + Parameters + ---------- + spec: mlmodel spec + + Returns + ------- + + set(str) A set of unique className fields of custom layers that appear in the model. + + """ + layers = _get_nn_layers(spec) + layers_out = set() + for layer in layers: + if layer.WhichOneof("layer") == "custom": + layers_out.add(layer.custom.className) + + return layers_out + + +def _get_custom_layers(spec): + """ + + Returns a list of all neural network custom layers in the spec. + + Parameters + ---------- + spec: mlmodel spec + + Returns + ------- + + [NN layer] A list of custom layer implementations + """ + layers = _get_nn_layers(spec) + layers_out = [] + for layer in layers: + if layer.WhichOneof("layer") == "custom": + layers_out.append(layer) + + return layers_out + + +def _replace_custom_layer_name(spec, oldname, newname): + """ + + Substitutes newname for oldname in the className field of custom layers. If there are no custom layers, or no + layers with className=oldname, then the spec is unchanged. + + Parameters + ---------- + spec: mlmodel spec + + oldname: str The custom layer className to be replaced. + + newname: str The new className value to replace oldname + + Returns + ------- + + An mlmodel spec. + + """ + layers = _get_custom_layers(spec) + for layer in layers: + if layer.custom.className == oldname: + layer.custom.className = newname + + +def _is_macos(): + """Returns True if current platform is MacOS, False otherwise.""" + return _sys.platform == "darwin" + + +@_lru_cache() +def _macos_version(): + """ + Returns macOS version as a tuple of integers, making it easy to do proper + version comparisons. On non-Macs, it returns an empty tuple. + """ + if _is_macos(): + try: + ver_str = _subprocess.run(["sw_vers", "-productVersion"], stdout=_subprocess.PIPE).stdout.decode('utf-8').strip('\n') + return tuple([int(v) for v in ver_str.split(".")]) + except: + raise Exception("Unable to detemine the macOS version") + return () + + +def _python_version(): + """ + Return python version as a tuple of integers + """ + version = _sys.version.split(" ")[0] + version = list(map(int, list(version.split(".")))) + return tuple(version) + + +def _get_feature(spec, feature_name): + for input_feature in spec.description.input: + if input_feature.name == feature_name: + return input_feature + + for output_feature in spec.description.output: + if output_feature.name == feature_name: + return output_feature + + raise Exception("Feature with name {} does not exist".format(feature_name)) + + +def _get_input_names(spec): + """ + Returns a list of the names of the inputs to this model. + :param spec: The model protobuf specification + :return: list of str A list of input feature names + """ + retval = [feature.name for feature in spec.description.input] + return retval + + +def convert_double_to_float_multiarray_type(spec): + """ + Convert all double multiarrays feature descriptions (input, output, training input) + to float multiarrays + + Parameters + ---------- + spec: Model_pb + The specification containing the multiarrays types to convert + + Examples + -------- + .. sourcecode:: python + + # In-place convert multiarray type of spec + spec = mlmodel.get_spec() + coremltools.utils.convert_double_to_float_multiarray_type(spec) + model = coremltools.models.MLModel(spec) + """ + + def _convert_to_float(feature): + if feature.type.HasField("multiArrayType"): + if ( + feature.type.multiArrayType.dataType + == _Model_pb2.ArrayFeatureType.DOUBLE + ): + feature.type.multiArrayType.dataType = ( + _Model_pb2.ArrayFeatureType.FLOAT32 + ) + + for feature in spec.description.input: + _convert_to_float(feature) + + for feature in spec.description.output: + _convert_to_float(feature) + + for feature in spec.description.trainingInput: + _convert_to_float(feature) + + if spec.WhichOneof("Type") == "pipeline": + for model_spec in spec.pipeline.models: + convert_double_to_float_multiarray_type(model_spec) + + +def make_pipeline(*models): + """ + Makes a pipeline with the given models. + + Parameters + ---------- + *models - two or more instances of ct.models.MLModel + + Returns + ------- + ct.models.MLModel + + Examples + -------- + my_model1 = ct.models.MLModel('/tmp/m1.mlpackage') + my_model2 = ct.models.MLModel('/tmp/m2.mlmodel') + + my_pipeline_model = ct.utils.make_pipeline(my_model1, my_model2) + """ + + def updateBlobFileName(proto_message, new_path): + if type(proto_message) == _mil_proto.Value: + # Value protobuf message. This is what might need to be updated. + if proto_message.WhichOneof('value') == 'blobFileValue': + assert proto_message.blobFileValue.fileName == "@model_path/weights/weight.bin" + proto_message.blobFileValue.fileName = new_path + elif hasattr(proto_message, 'ListFields'): + # Normal protobuf message + for f in proto_message.ListFields(): + updateBlobFileName(f[1], new_path) + elif hasattr(proto_message, 'values'): + # Protobuf map + for v in proto_message.values(): + updateBlobFileName(v, new_path) + elif isinstance(proto_message, _Iterable) and not isinstance(proto_message, str): + # Repeated protobuf message + for e in proto_message: + updateBlobFileName(e, new_path) + + + assert len(models) > 1 + input_specs = list(map(lambda m: m.get_spec(), models)) + + pipeline_spec = _ct.proto.Model_pb2.Model() + pipeline_spec.specificationVersion = max( + map(lambda spec: spec.specificationVersion, input_specs) + ) + + # Set pipeline input + pipeline_spec.description.input.MergeFrom( + input_specs[0].description.input + ) + + # Set pipeline output + pipeline_spec.description.output.MergeFrom( + input_specs[-1].description.output + ) + + # Map input shapes to output shapes + var_name_to_type = {} + for i in range(len(input_specs) - 1): + for j in input_specs[i + 1].description.input: + var_name_to_type[j.name] = j.type + + for j in input_specs[i].description.output: + # If shape is already present, don't override it + if j.type.WhichOneof('Type') == 'multiArrayType' and len(j.type.multiArrayType.shape) != 0: + continue + + if j.name in var_name_to_type: + j.type.CopyFrom(var_name_to_type[j.name]) + + # Update each model's spec to have a unique weight filename + for i, cur_spec in enumerate(input_specs): + if cur_spec.WhichOneof("Type") == "mlProgram": + new_file_path = f"@model_path/weights/{i}-weight.bin" + updateBlobFileName(cur_spec.mlProgram, new_file_path) + pipeline_spec.pipeline.models.append(cur_spec) + + mlpackage_path = _create_mlpackage(pipeline_spec) + dst = mlpackage_path + '/Data/' + _MLPACKAGE_AUTHOR_NAME + '/' + _WEIGHTS_DIR_NAME + _os.mkdir(dst) + + # Copy and rename each model's weight file + for i, cur_model in enumerate(models): + if cur_model.weights_dir is not None: + weight_file_path = cur_model.weights_dir + "/" + _WEIGHTS_FILE_NAME + if _os.path.exists(weight_file_path): + _shutil.copyfile(weight_file_path, dst + f"/{i}-weight.bin") + + return _ct.models.MLModel(pipeline_spec, weights_dir=dst) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/ArrayFeatureExtractor_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/ArrayFeatureExtractor_pb2.py new file mode 100644 index 00000000..a94fcc18 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/ArrayFeatureExtractor_pb2.py @@ -0,0 +1,71 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: ArrayFeatureExtractor.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='ArrayFeatureExtractor.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1b\x41rrayFeatureExtractor.proto\x12\x14\x43oreML.Specification\"-\n\x15\x41rrayFeatureExtractor\x12\x14\n\x0c\x65xtractIndex\x18\x01 \x03(\x04\x42\x02H\x03\x62\x06proto3') +) + + + + +_ARRAYFEATUREEXTRACTOR = _descriptor.Descriptor( + name='ArrayFeatureExtractor', + full_name='CoreML.Specification.ArrayFeatureExtractor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='extractIndex', full_name='CoreML.Specification.ArrayFeatureExtractor.extractIndex', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=53, + serialized_end=98, +) + +DESCRIPTOR.message_types_by_name['ArrayFeatureExtractor'] = _ARRAYFEATUREEXTRACTOR +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ArrayFeatureExtractor = _reflection.GeneratedProtocolMessageType('ArrayFeatureExtractor', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATUREEXTRACTOR, + __module__ = 'ArrayFeatureExtractor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureExtractor) + )) +_sym_db.RegisterMessage(ArrayFeatureExtractor) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/AudioFeaturePrint_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/AudioFeaturePrint_pb2.py new file mode 100644 index 00000000..b48e078e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/AudioFeaturePrint_pb2.py @@ -0,0 +1,142 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: AudioFeaturePrint.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='AudioFeaturePrint.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x17\x41udioFeaturePrint.proto\x12!CoreML.Specification.CoreMLModels\"\x9d\x02\n\x11\x41udioFeaturePrint\x12K\n\x05sound\x18\x14 \x01(\x0b\x32:.CoreML.Specification.CoreMLModels.AudioFeaturePrint.SoundH\x00\x1a\xa1\x01\n\x05Sound\x12X\n\x07version\x18\x01 \x01(\x0e\x32G.CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.SoundVersion\">\n\x0cSoundVersion\x12\x19\n\x15SOUND_VERSION_INVALID\x10\x00\x12\x13\n\x0fSOUND_VERSION_1\x10\x01\x42\x17\n\x15\x41udioFeaturePrintTypeB\x02H\x03\x62\x06proto3') +) + + + +_AUDIOFEATUREPRINT_SOUND_SOUNDVERSION = _descriptor.EnumDescriptor( + name='SoundVersion', + full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.SoundVersion', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SOUND_VERSION_INVALID', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SOUND_VERSION_1', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=261, + serialized_end=323, +) +_sym_db.RegisterEnumDescriptor(_AUDIOFEATUREPRINT_SOUND_SOUNDVERSION) + + +_AUDIOFEATUREPRINT_SOUND = _descriptor.Descriptor( + name='Sound', + full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound.version', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _AUDIOFEATUREPRINT_SOUND_SOUNDVERSION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=162, + serialized_end=323, +) + +_AUDIOFEATUREPRINT = _descriptor.Descriptor( + name='AudioFeaturePrint', + full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sound', full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.sound', index=0, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_AUDIOFEATUREPRINT_SOUND, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AudioFeaturePrintType', full_name='CoreML.Specification.CoreMLModels.AudioFeaturePrint.AudioFeaturePrintType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=63, + serialized_end=348, +) + +_AUDIOFEATUREPRINT_SOUND.fields_by_name['version'].enum_type = _AUDIOFEATUREPRINT_SOUND_SOUNDVERSION +_AUDIOFEATUREPRINT_SOUND.containing_type = _AUDIOFEATUREPRINT +_AUDIOFEATUREPRINT_SOUND_SOUNDVERSION.containing_type = _AUDIOFEATUREPRINT_SOUND +_AUDIOFEATUREPRINT.fields_by_name['sound'].message_type = _AUDIOFEATUREPRINT_SOUND +_AUDIOFEATUREPRINT.oneofs_by_name['AudioFeaturePrintType'].fields.append( + _AUDIOFEATUREPRINT.fields_by_name['sound']) +_AUDIOFEATUREPRINT.fields_by_name['sound'].containing_oneof = _AUDIOFEATUREPRINT.oneofs_by_name['AudioFeaturePrintType'] +DESCRIPTOR.message_types_by_name['AudioFeaturePrint'] = _AUDIOFEATUREPRINT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +AudioFeaturePrint = _reflection.GeneratedProtocolMessageType('AudioFeaturePrint', (_message.Message,), dict( + + Sound = _reflection.GeneratedProtocolMessageType('Sound', (_message.Message,), dict( + DESCRIPTOR = _AUDIOFEATUREPRINT_SOUND, + __module__ = 'AudioFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.AudioFeaturePrint.Sound) + )) + , + DESCRIPTOR = _AUDIOFEATUREPRINT, + __module__ = 'AudioFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.AudioFeaturePrint) + )) +_sym_db.RegisterMessage(AudioFeaturePrint) +_sym_db.RegisterMessage(AudioFeaturePrint.Sound) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/BayesianProbitRegressor_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/BayesianProbitRegressor_pb2.py new file mode 100644 index 00000000..eeaad0a0 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/BayesianProbitRegressor_pb2.py @@ -0,0 +1,283 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: BayesianProbitRegressor.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='BayesianProbitRegressor.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1d\x42\x61yesianProbitRegressor.proto\x12\x14\x43oreML.Specification\"\xa0\x06\n\x17\x42\x61yesianProbitRegressor\x12\x18\n\x10numberOfFeatures\x18\x01 \x01(\r\x12\x44\n\x04\x62ias\x18\x02 \x01(\x0b\x32\x36.CoreML.Specification.BayesianProbitRegressor.Gaussian\x12M\n\x08\x66\x65\x61tures\x18\x03 \x03(\x0b\x32;.CoreML.Specification.BayesianProbitRegressor.FeatureWeight\x12\"\n\x1aregressionInputFeatureName\x18\n \x01(\t\x12 \n\x18optimismInputFeatureName\x18\x0b \x01(\t\x12%\n\x1dsamplingScaleInputFeatureName\x18\x0c \x01(\t\x12*\n\"samplingTruncationInputFeatureName\x18\r \x01(\t\x12\x1d\n\x15meanOutputFeatureName\x18\x14 \x01(\t\x12!\n\x19varianceOutputFeatureName\x18\x15 \x01(\t\x12/\n\'pessimisticProbabilityOutputFeatureName\x18\x16 \x01(\t\x12+\n#sampledProbabilityOutputFeatureName\x18\x17 \x01(\t\x1a+\n\x08Gaussian\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x11\n\tprecision\x18\x02 \x01(\x01\x1ay\n\x12\x46\x65\x61tureValueWeight\x12\x14\n\x0c\x66\x65\x61tureValue\x18\x01 \x01(\r\x12M\n\rfeatureWeight\x18\x02 \x01(\x0b\x32\x36.CoreML.Specification.BayesianProbitRegressor.Gaussian\x1au\n\rFeatureWeight\x12\x11\n\tfeatureId\x18\x01 \x01(\r\x12Q\n\x07weights\x18\x02 \x03(\x0b\x32@.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeightB\x02H\x03\x62\x06proto3') +) + + + + +_BAYESIANPROBITREGRESSOR_GAUSSIAN = _descriptor.Descriptor( + name='Gaussian', + full_name='CoreML.Specification.BayesianProbitRegressor.Gaussian', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.BayesianProbitRegressor.Gaussian.mean', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='precision', full_name='CoreML.Specification.BayesianProbitRegressor.Gaussian.precision', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=571, + serialized_end=614, +) + +_BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT = _descriptor.Descriptor( + name='FeatureValueWeight', + full_name='CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='featureValue', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.featureValue', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='featureWeight', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.featureWeight', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=616, + serialized_end=737, +) + +_BAYESIANPROBITREGRESSOR_FEATUREWEIGHT = _descriptor.Descriptor( + name='FeatureWeight', + full_name='CoreML.Specification.BayesianProbitRegressor.FeatureWeight', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='featureId', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureWeight.featureId', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.BayesianProbitRegressor.FeatureWeight.weights', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=739, + serialized_end=856, +) + +_BAYESIANPROBITREGRESSOR = _descriptor.Descriptor( + name='BayesianProbitRegressor', + full_name='CoreML.Specification.BayesianProbitRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numberOfFeatures', full_name='CoreML.Specification.BayesianProbitRegressor.numberOfFeatures', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.BayesianProbitRegressor.bias', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='features', full_name='CoreML.Specification.BayesianProbitRegressor.features', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='regressionInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.regressionInputFeatureName', index=3, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='optimismInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.optimismInputFeatureName', index=4, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='samplingScaleInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.samplingScaleInputFeatureName', index=5, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='samplingTruncationInputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.samplingTruncationInputFeatureName', index=6, + number=13, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='meanOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.meanOutputFeatureName', index=7, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='varianceOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.varianceOutputFeatureName', index=8, + number=21, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pessimisticProbabilityOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.pessimisticProbabilityOutputFeatureName', index=9, + number=22, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sampledProbabilityOutputFeatureName', full_name='CoreML.Specification.BayesianProbitRegressor.sampledProbabilityOutputFeatureName', index=10, + number=23, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_BAYESIANPROBITREGRESSOR_GAUSSIAN, _BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT, _BAYESIANPROBITREGRESSOR_FEATUREWEIGHT, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=56, + serialized_end=856, +) + +_BAYESIANPROBITREGRESSOR_GAUSSIAN.containing_type = _BAYESIANPROBITREGRESSOR +_BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT.fields_by_name['featureWeight'].message_type = _BAYESIANPROBITREGRESSOR_GAUSSIAN +_BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT.containing_type = _BAYESIANPROBITREGRESSOR +_BAYESIANPROBITREGRESSOR_FEATUREWEIGHT.fields_by_name['weights'].message_type = _BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT +_BAYESIANPROBITREGRESSOR_FEATUREWEIGHT.containing_type = _BAYESIANPROBITREGRESSOR +_BAYESIANPROBITREGRESSOR.fields_by_name['bias'].message_type = _BAYESIANPROBITREGRESSOR_GAUSSIAN +_BAYESIANPROBITREGRESSOR.fields_by_name['features'].message_type = _BAYESIANPROBITREGRESSOR_FEATUREWEIGHT +DESCRIPTOR.message_types_by_name['BayesianProbitRegressor'] = _BAYESIANPROBITREGRESSOR +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +BayesianProbitRegressor = _reflection.GeneratedProtocolMessageType('BayesianProbitRegressor', (_message.Message,), dict( + + Gaussian = _reflection.GeneratedProtocolMessageType('Gaussian', (_message.Message,), dict( + DESCRIPTOR = _BAYESIANPROBITREGRESSOR_GAUSSIAN, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor.Gaussian) + )) + , + + FeatureValueWeight = _reflection.GeneratedProtocolMessageType('FeatureValueWeight', (_message.Message,), dict( + DESCRIPTOR = _BAYESIANPROBITREGRESSOR_FEATUREVALUEWEIGHT, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight) + )) + , + + FeatureWeight = _reflection.GeneratedProtocolMessageType('FeatureWeight', (_message.Message,), dict( + DESCRIPTOR = _BAYESIANPROBITREGRESSOR_FEATUREWEIGHT, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor.FeatureWeight) + )) + , + DESCRIPTOR = _BAYESIANPROBITREGRESSOR, + __module__ = 'BayesianProbitRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BayesianProbitRegressor) + )) +_sym_db.RegisterMessage(BayesianProbitRegressor) +_sym_db.RegisterMessage(BayesianProbitRegressor.Gaussian) +_sym_db.RegisterMessage(BayesianProbitRegressor.FeatureValueWeight) +_sym_db.RegisterMessage(BayesianProbitRegressor.FeatureWeight) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/CategoricalMapping_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/CategoricalMapping_pb2.py new file mode 100644 index 00000000..25e4daf8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/CategoricalMapping_pb2.py @@ -0,0 +1,120 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: CategoricalMapping.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='CategoricalMapping.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x18\x43\x61tegoricalMapping.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xe7\x01\n\x12\x43\x61tegoricalMapping\x12\x42\n\x10stringToInt64Map\x18\x01 \x01(\x0b\x32&.CoreML.Specification.StringToInt64MapH\x00\x12\x42\n\x10int64ToStringMap\x18\x02 \x01(\x0b\x32&.CoreML.Specification.Int64ToStringMapH\x00\x12\x12\n\x08strValue\x18\x65 \x01(\tH\x01\x12\x14\n\nint64Value\x18\x66 \x01(\x03H\x01\x42\r\n\x0bMappingTypeB\x10\n\x0eValueOnUnknownB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_CATEGORICALMAPPING = _descriptor.Descriptor( + name='CategoricalMapping', + full_name='CoreML.Specification.CategoricalMapping', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='stringToInt64Map', full_name='CoreML.Specification.CategoricalMapping.stringToInt64Map', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ToStringMap', full_name='CoreML.Specification.CategoricalMapping.int64ToStringMap', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strValue', full_name='CoreML.Specification.CategoricalMapping.strValue', index=2, + number=101, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64Value', full_name='CoreML.Specification.CategoricalMapping.int64Value', index=3, + number=102, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='MappingType', full_name='CoreML.Specification.CategoricalMapping.MappingType', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ValueOnUnknown', full_name='CoreML.Specification.CategoricalMapping.ValueOnUnknown', + index=1, containing_type=None, fields=[]), + ], + serialized_start=73, + serialized_end=304, +) + +_CATEGORICALMAPPING.fields_by_name['stringToInt64Map'].message_type = DataStructures__pb2._STRINGTOINT64MAP +_CATEGORICALMAPPING.fields_by_name['int64ToStringMap'].message_type = DataStructures__pb2._INT64TOSTRINGMAP +_CATEGORICALMAPPING.oneofs_by_name['MappingType'].fields.append( + _CATEGORICALMAPPING.fields_by_name['stringToInt64Map']) +_CATEGORICALMAPPING.fields_by_name['stringToInt64Map'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['MappingType'] +_CATEGORICALMAPPING.oneofs_by_name['MappingType'].fields.append( + _CATEGORICALMAPPING.fields_by_name['int64ToStringMap']) +_CATEGORICALMAPPING.fields_by_name['int64ToStringMap'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['MappingType'] +_CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'].fields.append( + _CATEGORICALMAPPING.fields_by_name['strValue']) +_CATEGORICALMAPPING.fields_by_name['strValue'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'] +_CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'].fields.append( + _CATEGORICALMAPPING.fields_by_name['int64Value']) +_CATEGORICALMAPPING.fields_by_name['int64Value'].containing_oneof = _CATEGORICALMAPPING.oneofs_by_name['ValueOnUnknown'] +DESCRIPTOR.message_types_by_name['CategoricalMapping'] = _CATEGORICALMAPPING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CategoricalMapping = _reflection.GeneratedProtocolMessageType('CategoricalMapping', (_message.Message,), dict( + DESCRIPTOR = _CATEGORICALMAPPING, + __module__ = 'CategoricalMapping_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CategoricalMapping) + )) +_sym_db.RegisterMessage(CategoricalMapping) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/ClassConfidenceThresholding_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/ClassConfidenceThresholding_pb2.py new file mode 100644 index 00000000..8268f33e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/ClassConfidenceThresholding_pb2.py @@ -0,0 +1,80 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: ClassConfidenceThresholding.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='ClassConfidenceThresholding.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n!ClassConfidenceThresholding.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"h\n\x1b\x43lassConfidenceThresholding\x12I\n\x15precisionRecallCurves\x18\x64 \x03(\x0b\x32*.CoreML.Specification.PrecisionRecallCurveB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_CLASSCONFIDENCETHRESHOLDING = _descriptor.Descriptor( + name='ClassConfidenceThresholding', + full_name='CoreML.Specification.ClassConfidenceThresholding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='precisionRecallCurves', full_name='CoreML.Specification.ClassConfidenceThresholding.precisionRecallCurves', index=0, + number=100, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=81, + serialized_end=185, +) + +_CLASSCONFIDENCETHRESHOLDING.fields_by_name['precisionRecallCurves'].message_type = DataStructures__pb2._PRECISIONRECALLCURVE +DESCRIPTOR.message_types_by_name['ClassConfidenceThresholding'] = _CLASSCONFIDENCETHRESHOLDING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ClassConfidenceThresholding = _reflection.GeneratedProtocolMessageType('ClassConfidenceThresholding', (_message.Message,), dict( + DESCRIPTOR = _CLASSCONFIDENCETHRESHOLDING, + __module__ = 'ClassConfidenceThresholding_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ClassConfidenceThresholding) + )) +_sym_db.RegisterMessage(ClassConfidenceThresholding) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/CustomModel_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/CustomModel_pb2.py new file mode 100644 index 00000000..6dcb25a6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/CustomModel_pb2.py @@ -0,0 +1,230 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: CustomModel.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='CustomModel.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x11\x43ustomModel.proto\x12\x14\x43oreML.Specification\"\x8d\x03\n\x0b\x43ustomModel\x12\x11\n\tclassName\x18\n \x01(\t\x12\x45\n\nparameters\x18\x1e \x03(\x0b\x32\x31.CoreML.Specification.CustomModel.ParametersEntry\x12\x13\n\x0b\x64\x65scription\x18( \x01(\t\x1a\xa2\x01\n\x15\x43ustomModelParamValue\x12\x15\n\x0b\x64oubleValue\x18\n \x01(\x01H\x00\x12\x15\n\x0bstringValue\x18\x14 \x01(\tH\x00\x12\x12\n\x08intValue\x18\x1e \x01(\x05H\x00\x12\x13\n\tlongValue\x18( \x01(\x03H\x00\x12\x13\n\tboolValue\x18\x32 \x01(\x08H\x00\x12\x14\n\nbytesValue\x18< \x01(\x0cH\x00\x42\x07\n\x05value\x1aj\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x46\n\x05value\x18\x02 \x01(\x0b\x32\x37.CoreML.Specification.CustomModel.CustomModelParamValue:\x02\x38\x01\x42\x02H\x03\x62\x06proto3') +) + + + + +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE = _descriptor.Descriptor( + name='CustomModelParamValue', + full_name='CoreML.Specification.CustomModel.CustomModelParamValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='doubleValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.doubleValue', index=0, + number=10, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.stringValue', index=1, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.intValue', index=2, + number=30, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.longValue', index=3, + number=40, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='boolValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.boolValue', index=4, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bytesValue', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.bytesValue', index=5, + number=60, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.CustomModel.CustomModelParamValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=171, + serialized_end=333, +) + +_CUSTOMMODEL_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='CoreML.Specification.CustomModel.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.CustomModel.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.CustomModel.ParametersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=335, + serialized_end=441, +) + +_CUSTOMMODEL = _descriptor.Descriptor( + name='CustomModel', + full_name='CoreML.Specification.CustomModel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='className', full_name='CoreML.Specification.CustomModel.className', index=0, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parameters', full_name='CoreML.Specification.CustomModel.parameters', index=1, + number=30, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='description', full_name='CoreML.Specification.CustomModel.description', index=2, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CUSTOMMODEL_CUSTOMMODELPARAMVALUE, _CUSTOMMODEL_PARAMETERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=44, + serialized_end=441, +) + +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.containing_type = _CUSTOMMODEL +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['doubleValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['doubleValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['stringValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['stringValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['intValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['intValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['longValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['longValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['boolValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['boolValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['bytesValue']) +_CUSTOMMODEL_CUSTOMMODELPARAMVALUE.fields_by_name['bytesValue'].containing_oneof = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE.oneofs_by_name['value'] +_CUSTOMMODEL_PARAMETERSENTRY.fields_by_name['value'].message_type = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE +_CUSTOMMODEL_PARAMETERSENTRY.containing_type = _CUSTOMMODEL +_CUSTOMMODEL.fields_by_name['parameters'].message_type = _CUSTOMMODEL_PARAMETERSENTRY +DESCRIPTOR.message_types_by_name['CustomModel'] = _CUSTOMMODEL +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CustomModel = _reflection.GeneratedProtocolMessageType('CustomModel', (_message.Message,), dict( + + CustomModelParamValue = _reflection.GeneratedProtocolMessageType('CustomModelParamValue', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMMODEL_CUSTOMMODELPARAMVALUE, + __module__ = 'CustomModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomModel.CustomModelParamValue) + )) + , + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMMODEL_PARAMETERSENTRY, + __module__ = 'CustomModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomModel.ParametersEntry) + )) + , + DESCRIPTOR = _CUSTOMMODEL, + __module__ = 'CustomModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomModel) + )) +_sym_db.RegisterMessage(CustomModel) +_sym_db.RegisterMessage(CustomModel.CustomModelParamValue) +_sym_db.RegisterMessage(CustomModel.ParametersEntry) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_CUSTOMMODEL_PARAMETERSENTRY.has_options = True +_CUSTOMMODEL_PARAMETERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/DataStructures_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/DataStructures_pb2.py new file mode 100644 index 00000000..1f4b301c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/DataStructures_pb2.py @@ -0,0 +1,739 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: DataStructures.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import FeatureTypes_pb2 as FeatureTypes__pb2 + +from .FeatureTypes_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='DataStructures.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x14\x44\x61taStructures.proto\x12\x14\x43oreML.Specification\x1a\x12\x46\x65\x61tureTypes.proto\"|\n\x10StringToInt64Map\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.StringToInt64Map.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\"|\n\x10Int64ToStringMap\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.Int64ToStringMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x11StringToDoubleMap\x12=\n\x03map\x18\x01 \x03(\x0b\x32\x30.CoreML.Specification.StringToDoubleMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\"|\n\x10Int64ToDoubleMap\x12<\n\x03map\x18\x01 \x03(\x0b\x32/.CoreML.Specification.Int64ToDoubleMap.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\"\x1e\n\x0cStringVector\x12\x0e\n\x06vector\x18\x01 \x03(\t\"\x1d\n\x0bInt64Vector\x12\x0e\n\x06vector\x18\x01 \x03(\x03\"\x1d\n\x0b\x46loatVector\x12\x0e\n\x06vector\x18\x01 \x03(\x02\"\x1e\n\x0c\x44oubleVector\x12\x0e\n\x06vector\x18\x01 \x03(\x01\"0\n\nInt64Range\x12\x10\n\x08minValue\x18\x01 \x01(\x03\x12\x10\n\x08maxValue\x18\x02 \x01(\x03\"\x1a\n\x08Int64Set\x12\x0e\n\x06values\x18\x01 \x03(\x03\"1\n\x0b\x44oubleRange\x12\x10\n\x08minValue\x18\x01 \x01(\x01\x12\x10\n\x08maxValue\x18\x02 \x01(\x01\"\x9c\x02\n\x14PrecisionRecallCurve\x12:\n\x0fprecisionValues\x18\x01 \x01(\x0b\x32!.CoreML.Specification.FloatVector\x12H\n\x1dprecisionConfidenceThresholds\x18\x02 \x01(\x0b\x32!.CoreML.Specification.FloatVector\x12\x37\n\x0crecallValues\x18\x03 \x01(\x0b\x32!.CoreML.Specification.FloatVector\x12\x45\n\x1arecallConfidenceThresholds\x18\x04 \x01(\x0b\x32!.CoreML.Specification.FloatVectorB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[FeatureTypes__pb2.DESCRIPTOR,], + public_dependencies=[FeatureTypes__pb2.DESCRIPTOR,]) + + + + +_STRINGTOINT64MAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.StringToInt64Map.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.StringToInt64Map.MapEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.StringToInt64Map.MapEntry.value', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=148, + serialized_end=190, +) + +_STRINGTOINT64MAP = _descriptor.Descriptor( + name='StringToInt64Map', + full_name='CoreML.Specification.StringToInt64Map', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.StringToInt64Map.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_STRINGTOINT64MAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=66, + serialized_end=190, +) + + +_INT64TOSTRINGMAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.Int64ToStringMap.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.Int64ToStringMap.MapEntry.key', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.Int64ToStringMap.MapEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=274, + serialized_end=316, +) + +_INT64TOSTRINGMAP = _descriptor.Descriptor( + name='Int64ToStringMap', + full_name='CoreML.Specification.Int64ToStringMap', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.Int64ToStringMap.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_INT64TOSTRINGMAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=192, + serialized_end=316, +) + + +_STRINGTODOUBLEMAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.StringToDoubleMap.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.StringToDoubleMap.MapEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.StringToDoubleMap.MapEntry.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=402, + serialized_end=444, +) + +_STRINGTODOUBLEMAP = _descriptor.Descriptor( + name='StringToDoubleMap', + full_name='CoreML.Specification.StringToDoubleMap', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.StringToDoubleMap.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_STRINGTODOUBLEMAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=318, + serialized_end=444, +) + + +_INT64TODOUBLEMAP_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='CoreML.Specification.Int64ToDoubleMap.MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.Int64ToDoubleMap.MapEntry.key', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.Int64ToDoubleMap.MapEntry.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=528, + serialized_end=570, +) + +_INT64TODOUBLEMAP = _descriptor.Descriptor( + name='Int64ToDoubleMap', + full_name='CoreML.Specification.Int64ToDoubleMap', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='map', full_name='CoreML.Specification.Int64ToDoubleMap.map', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_INT64TODOUBLEMAP_MAPENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=446, + serialized_end=570, +) + + +_STRINGVECTOR = _descriptor.Descriptor( + name='StringVector', + full_name='CoreML.Specification.StringVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.StringVector.vector', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=572, + serialized_end=602, +) + + +_INT64VECTOR = _descriptor.Descriptor( + name='Int64Vector', + full_name='CoreML.Specification.Int64Vector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.Int64Vector.vector', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=604, + serialized_end=633, +) + + +_FLOATVECTOR = _descriptor.Descriptor( + name='FloatVector', + full_name='CoreML.Specification.FloatVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.FloatVector.vector', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=635, + serialized_end=664, +) + + +_DOUBLEVECTOR = _descriptor.Descriptor( + name='DoubleVector', + full_name='CoreML.Specification.DoubleVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vector', full_name='CoreML.Specification.DoubleVector.vector', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=666, + serialized_end=696, +) + + +_INT64RANGE = _descriptor.Descriptor( + name='Int64Range', + full_name='CoreML.Specification.Int64Range', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.Int64Range.minValue', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.Int64Range.maxValue', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=698, + serialized_end=746, +) + + +_INT64SET = _descriptor.Descriptor( + name='Int64Set', + full_name='CoreML.Specification.Int64Set', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.Int64Set.values', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=748, + serialized_end=774, +) + + +_DOUBLERANGE = _descriptor.Descriptor( + name='DoubleRange', + full_name='CoreML.Specification.DoubleRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.DoubleRange.minValue', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.DoubleRange.maxValue', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=776, + serialized_end=825, +) + + +_PRECISIONRECALLCURVE = _descriptor.Descriptor( + name='PrecisionRecallCurve', + full_name='CoreML.Specification.PrecisionRecallCurve', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='precisionValues', full_name='CoreML.Specification.PrecisionRecallCurve.precisionValues', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='precisionConfidenceThresholds', full_name='CoreML.Specification.PrecisionRecallCurve.precisionConfidenceThresholds', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recallValues', full_name='CoreML.Specification.PrecisionRecallCurve.recallValues', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recallConfidenceThresholds', full_name='CoreML.Specification.PrecisionRecallCurve.recallConfidenceThresholds', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=828, + serialized_end=1112, +) + +_STRINGTOINT64MAP_MAPENTRY.containing_type = _STRINGTOINT64MAP +_STRINGTOINT64MAP.fields_by_name['map'].message_type = _STRINGTOINT64MAP_MAPENTRY +_INT64TOSTRINGMAP_MAPENTRY.containing_type = _INT64TOSTRINGMAP +_INT64TOSTRINGMAP.fields_by_name['map'].message_type = _INT64TOSTRINGMAP_MAPENTRY +_STRINGTODOUBLEMAP_MAPENTRY.containing_type = _STRINGTODOUBLEMAP +_STRINGTODOUBLEMAP.fields_by_name['map'].message_type = _STRINGTODOUBLEMAP_MAPENTRY +_INT64TODOUBLEMAP_MAPENTRY.containing_type = _INT64TODOUBLEMAP +_INT64TODOUBLEMAP.fields_by_name['map'].message_type = _INT64TODOUBLEMAP_MAPENTRY +_PRECISIONRECALLCURVE.fields_by_name['precisionValues'].message_type = _FLOATVECTOR +_PRECISIONRECALLCURVE.fields_by_name['precisionConfidenceThresholds'].message_type = _FLOATVECTOR +_PRECISIONRECALLCURVE.fields_by_name['recallValues'].message_type = _FLOATVECTOR +_PRECISIONRECALLCURVE.fields_by_name['recallConfidenceThresholds'].message_type = _FLOATVECTOR +DESCRIPTOR.message_types_by_name['StringToInt64Map'] = _STRINGTOINT64MAP +DESCRIPTOR.message_types_by_name['Int64ToStringMap'] = _INT64TOSTRINGMAP +DESCRIPTOR.message_types_by_name['StringToDoubleMap'] = _STRINGTODOUBLEMAP +DESCRIPTOR.message_types_by_name['Int64ToDoubleMap'] = _INT64TODOUBLEMAP +DESCRIPTOR.message_types_by_name['StringVector'] = _STRINGVECTOR +DESCRIPTOR.message_types_by_name['Int64Vector'] = _INT64VECTOR +DESCRIPTOR.message_types_by_name['FloatVector'] = _FLOATVECTOR +DESCRIPTOR.message_types_by_name['DoubleVector'] = _DOUBLEVECTOR +DESCRIPTOR.message_types_by_name['Int64Range'] = _INT64RANGE +DESCRIPTOR.message_types_by_name['Int64Set'] = _INT64SET +DESCRIPTOR.message_types_by_name['DoubleRange'] = _DOUBLERANGE +DESCRIPTOR.message_types_by_name['PrecisionRecallCurve'] = _PRECISIONRECALLCURVE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +StringToInt64Map = _reflection.GeneratedProtocolMessageType('StringToInt64Map', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _STRINGTOINT64MAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToInt64Map.MapEntry) + )) + , + DESCRIPTOR = _STRINGTOINT64MAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToInt64Map) + )) +_sym_db.RegisterMessage(StringToInt64Map) +_sym_db.RegisterMessage(StringToInt64Map.MapEntry) + +Int64ToStringMap = _reflection.GeneratedProtocolMessageType('Int64ToStringMap', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _INT64TOSTRINGMAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToStringMap.MapEntry) + )) + , + DESCRIPTOR = _INT64TOSTRINGMAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToStringMap) + )) +_sym_db.RegisterMessage(Int64ToStringMap) +_sym_db.RegisterMessage(Int64ToStringMap.MapEntry) + +StringToDoubleMap = _reflection.GeneratedProtocolMessageType('StringToDoubleMap', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _STRINGTODOUBLEMAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToDoubleMap.MapEntry) + )) + , + DESCRIPTOR = _STRINGTODOUBLEMAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringToDoubleMap) + )) +_sym_db.RegisterMessage(StringToDoubleMap) +_sym_db.RegisterMessage(StringToDoubleMap.MapEntry) + +Int64ToDoubleMap = _reflection.GeneratedProtocolMessageType('Int64ToDoubleMap', (_message.Message,), dict( + + MapEntry = _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), dict( + DESCRIPTOR = _INT64TODOUBLEMAP_MAPENTRY, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToDoubleMap.MapEntry) + )) + , + DESCRIPTOR = _INT64TODOUBLEMAP, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64ToDoubleMap) + )) +_sym_db.RegisterMessage(Int64ToDoubleMap) +_sym_db.RegisterMessage(Int64ToDoubleMap.MapEntry) + +StringVector = _reflection.GeneratedProtocolMessageType('StringVector', (_message.Message,), dict( + DESCRIPTOR = _STRINGVECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringVector) + )) +_sym_db.RegisterMessage(StringVector) + +Int64Vector = _reflection.GeneratedProtocolMessageType('Int64Vector', (_message.Message,), dict( + DESCRIPTOR = _INT64VECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Vector) + )) +_sym_db.RegisterMessage(Int64Vector) + +FloatVector = _reflection.GeneratedProtocolMessageType('FloatVector', (_message.Message,), dict( + DESCRIPTOR = _FLOATVECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloatVector) + )) +_sym_db.RegisterMessage(FloatVector) + +DoubleVector = _reflection.GeneratedProtocolMessageType('DoubleVector', (_message.Message,), dict( + DESCRIPTOR = _DOUBLEVECTOR, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleVector) + )) +_sym_db.RegisterMessage(DoubleVector) + +Int64Range = _reflection.GeneratedProtocolMessageType('Int64Range', (_message.Message,), dict( + DESCRIPTOR = _INT64RANGE, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Range) + )) +_sym_db.RegisterMessage(Int64Range) + +Int64Set = _reflection.GeneratedProtocolMessageType('Int64Set', (_message.Message,), dict( + DESCRIPTOR = _INT64SET, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Set) + )) +_sym_db.RegisterMessage(Int64Set) + +DoubleRange = _reflection.GeneratedProtocolMessageType('DoubleRange', (_message.Message,), dict( + DESCRIPTOR = _DOUBLERANGE, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleRange) + )) +_sym_db.RegisterMessage(DoubleRange) + +PrecisionRecallCurve = _reflection.GeneratedProtocolMessageType('PrecisionRecallCurve', (_message.Message,), dict( + DESCRIPTOR = _PRECISIONRECALLCURVE, + __module__ = 'DataStructures_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PrecisionRecallCurve) + )) +_sym_db.RegisterMessage(PrecisionRecallCurve) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_STRINGTOINT64MAP_MAPENTRY.has_options = True +_STRINGTOINT64MAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_INT64TOSTRINGMAP_MAPENTRY.has_options = True +_INT64TOSTRINGMAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_STRINGTODOUBLEMAP_MAPENTRY.has_options = True +_STRINGTODOUBLEMAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_INT64TODOUBLEMAP_MAPENTRY.has_options = True +_INT64TODOUBLEMAP_MAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/DictVectorizer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/DictVectorizer_pb2.py new file mode 100644 index 00000000..1ba214ca --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/DictVectorizer_pb2.py @@ -0,0 +1,97 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: DictVectorizer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='DictVectorizer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x14\x44ictVectorizer.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x8f\x01\n\x0e\x44ictVectorizer\x12;\n\rstringToIndex\x18\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12\x39\n\x0cint64ToIndex\x18\x02 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x42\x05\n\x03MapB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_DICTVECTORIZER = _descriptor.Descriptor( + name='DictVectorizer', + full_name='CoreML.Specification.DictVectorizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='stringToIndex', full_name='CoreML.Specification.DictVectorizer.stringToIndex', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ToIndex', full_name='CoreML.Specification.DictVectorizer.int64ToIndex', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Map', full_name='CoreML.Specification.DictVectorizer.Map', + index=0, containing_type=None, fields=[]), + ], + serialized_start=69, + serialized_end=212, +) + +_DICTVECTORIZER.fields_by_name['stringToIndex'].message_type = DataStructures__pb2._STRINGVECTOR +_DICTVECTORIZER.fields_by_name['int64ToIndex'].message_type = DataStructures__pb2._INT64VECTOR +_DICTVECTORIZER.oneofs_by_name['Map'].fields.append( + _DICTVECTORIZER.fields_by_name['stringToIndex']) +_DICTVECTORIZER.fields_by_name['stringToIndex'].containing_oneof = _DICTVECTORIZER.oneofs_by_name['Map'] +_DICTVECTORIZER.oneofs_by_name['Map'].fields.append( + _DICTVECTORIZER.fields_by_name['int64ToIndex']) +_DICTVECTORIZER.fields_by_name['int64ToIndex'].containing_oneof = _DICTVECTORIZER.oneofs_by_name['Map'] +DESCRIPTOR.message_types_by_name['DictVectorizer'] = _DICTVECTORIZER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +DictVectorizer = _reflection.GeneratedProtocolMessageType('DictVectorizer', (_message.Message,), dict( + DESCRIPTOR = _DICTVECTORIZER, + __module__ = 'DictVectorizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DictVectorizer) + )) +_sym_db.RegisterMessage(DictVectorizer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureTypes_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureTypes_pb2.py new file mode 100644 index 00000000..ef54f112 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureTypes_pb2.py @@ -0,0 +1,924 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: FeatureTypes.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='FeatureTypes.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x12\x46\x65\x61tureTypes.proto\x12\x14\x43oreML.Specification\"\x12\n\x10Int64FeatureType\"\x13\n\x11\x44oubleFeatureType\"\x13\n\x11StringFeatureType\"3\n\tSizeRange\x12\x12\n\nlowerBound\x18\x01 \x01(\x04\x12\x12\n\nupperBound\x18\x02 \x01(\x03\"\x95\x05\n\x10ImageFeatureType\x12\r\n\x05width\x18\x01 \x01(\x03\x12\x0e\n\x06height\x18\x02 \x01(\x03\x12V\n\x0f\x65numeratedSizes\x18\x15 \x01(\x0b\x32;.CoreML.Specification.ImageFeatureType.EnumeratedImageSizesH\x00\x12O\n\x0eimageSizeRange\x18\x1f \x01(\x0b\x32\x35.CoreML.Specification.ImageFeatureType.ImageSizeRangeH\x00\x12\x45\n\ncolorSpace\x18\x03 \x01(\x0e\x32\x31.CoreML.Specification.ImageFeatureType.ColorSpace\x1a*\n\tImageSize\x12\r\n\x05width\x18\x01 \x01(\x04\x12\x0e\n\x06height\x18\x02 \x01(\x04\x1aW\n\x14\x45numeratedImageSizes\x12?\n\x05sizes\x18\x01 \x03(\x0b\x32\x30.CoreML.Specification.ImageFeatureType.ImageSize\x1a{\n\x0eImageSizeRange\x12\x33\n\nwidthRange\x18\x01 \x01(\x0b\x32\x1f.CoreML.Specification.SizeRange\x12\x34\n\x0bheightRange\x18\x02 \x01(\x0b\x32\x1f.CoreML.Specification.SizeRange\"]\n\nColorSpace\x12\x17\n\x13INVALID_COLOR_SPACE\x10\x00\x12\r\n\tGRAYSCALE\x10\n\x12\x07\n\x03RGB\x10\x14\x12\x07\n\x03\x42GR\x10\x1e\x12\x15\n\x11GRAYSCALE_FLOAT16\x10(B\x11\n\x0fSizeFlexibility\"\x9d\x05\n\x10\x41rrayFeatureType\x12\r\n\x05shape\x18\x01 \x03(\x03\x12\x46\n\x08\x64\x61taType\x18\x02 \x01(\x0e\x32\x34.CoreML.Specification.ArrayFeatureType.ArrayDataType\x12S\n\x10\x65numeratedShapes\x18\x15 \x01(\x0b\x32\x37.CoreML.Specification.ArrayFeatureType.EnumeratedShapesH\x00\x12G\n\nshapeRange\x18\x1f \x01(\x0b\x32\x31.CoreML.Specification.ArrayFeatureType.ShapeRangeH\x00\x12\x19\n\x0fintDefaultValue\x18) \x01(\x05H\x01\x12\x1b\n\x11\x66loatDefaultValue\x18\x33 \x01(\x02H\x01\x12\x1c\n\x12\x64oubleDefaultValue\x18= \x01(\x01H\x01\x1a\x16\n\x05Shape\x12\r\n\x05shape\x18\x01 \x03(\x03\x1aP\n\x10\x45numeratedShapes\x12<\n\x06shapes\x18\x01 \x03(\x0b\x32,.CoreML.Specification.ArrayFeatureType.Shape\x1a\x41\n\nShapeRange\x12\x33\n\nsizeRanges\x18\x01 \x03(\x0b\x32\x1f.CoreML.Specification.SizeRange\"e\n\rArrayDataType\x12\x1b\n\x17INVALID_ARRAY_DATA_TYPE\x10\x00\x12\r\n\x07\x46LOAT32\x10\xa0\x80\x04\x12\x0c\n\x06\x44OUBLE\x10\xc0\x80\x04\x12\x0b\n\x05INT32\x10\xa0\x80\x08\x12\r\n\x07\x46LOAT16\x10\x90\x80\x04\x42\x12\n\x10ShapeFlexibilityB\x16\n\x14\x64\x65\x66\x61ultOptionalValue\"\xa4\x01\n\x15\x44ictionaryFeatureType\x12>\n\x0cint64KeyType\x18\x01 \x01(\x0b\x32&.CoreML.Specification.Int64FeatureTypeH\x00\x12@\n\rstringKeyType\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.StringFeatureTypeH\x00\x42\t\n\x07KeyType\"\xcd\x01\n\x13SequenceFeatureType\x12;\n\tint64Type\x18\x01 \x01(\x0b\x32&.CoreML.Specification.Int64FeatureTypeH\x00\x12=\n\nstringType\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.StringFeatureTypeH\x00\x12\x32\n\tsizeRange\x18\x65 \x01(\x0b\x32\x1f.CoreML.Specification.SizeRangeB\x06\n\x04Type\"\xee\x03\n\x0b\x46\x65\x61tureType\x12;\n\tint64Type\x18\x01 \x01(\x0b\x32&.CoreML.Specification.Int64FeatureTypeH\x00\x12=\n\ndoubleType\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.DoubleFeatureTypeH\x00\x12=\n\nstringType\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.StringFeatureTypeH\x00\x12;\n\timageType\x18\x04 \x01(\x0b\x32&.CoreML.Specification.ImageFeatureTypeH\x00\x12@\n\x0emultiArrayType\x18\x05 \x01(\x0b\x32&.CoreML.Specification.ArrayFeatureTypeH\x00\x12\x45\n\x0e\x64ictionaryType\x18\x06 \x01(\x0b\x32+.CoreML.Specification.DictionaryFeatureTypeH\x00\x12\x41\n\x0csequenceType\x18\x07 \x01(\x0b\x32).CoreML.Specification.SequenceFeatureTypeH\x00\x12\x13\n\nisOptional\x18\xe8\x07 \x01(\x08\x42\x06\n\x04TypeB\x02H\x03\x62\x06proto3') +) + + + +_IMAGEFEATURETYPE_COLORSPACE = _descriptor.EnumDescriptor( + name='ColorSpace', + full_name='CoreML.Specification.ImageFeatureType.ColorSpace', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='INVALID_COLOR_SPACE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GRAYSCALE', index=1, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RGB', index=2, number=20, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BGR', index=3, number=30, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GRAYSCALE_FLOAT16', index=4, number=40, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=709, + serialized_end=802, +) +_sym_db.RegisterEnumDescriptor(_IMAGEFEATURETYPE_COLORSPACE) + +_ARRAYFEATURETYPE_ARRAYDATATYPE = _descriptor.EnumDescriptor( + name='ArrayDataType', + full_name='CoreML.Specification.ArrayFeatureType.ArrayDataType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='INVALID_ARRAY_DATA_TYPE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT32', index=1, number=65568, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DOUBLE', index=2, number=65600, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT32', index=3, number=131104, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT16', index=4, number=65552, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1348, + serialized_end=1449, +) +_sym_db.RegisterEnumDescriptor(_ARRAYFEATURETYPE_ARRAYDATATYPE) + + +_INT64FEATURETYPE = _descriptor.Descriptor( + name='Int64FeatureType', + full_name='CoreML.Specification.Int64FeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=44, + serialized_end=62, +) + + +_DOUBLEFEATURETYPE = _descriptor.Descriptor( + name='DoubleFeatureType', + full_name='CoreML.Specification.DoubleFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=64, + serialized_end=83, +) + + +_STRINGFEATURETYPE = _descriptor.Descriptor( + name='StringFeatureType', + full_name='CoreML.Specification.StringFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=85, + serialized_end=104, +) + + +_SIZERANGE = _descriptor.Descriptor( + name='SizeRange', + full_name='CoreML.Specification.SizeRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='lowerBound', full_name='CoreML.Specification.SizeRange.lowerBound', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upperBound', full_name='CoreML.Specification.SizeRange.upperBound', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=106, + serialized_end=157, +) + + +_IMAGEFEATURETYPE_IMAGESIZE = _descriptor.Descriptor( + name='ImageSize', + full_name='CoreML.Specification.ImageFeatureType.ImageSize', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='width', full_name='CoreML.Specification.ImageFeatureType.ImageSize.width', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='CoreML.Specification.ImageFeatureType.ImageSize.height', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=451, + serialized_end=493, +) + +_IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES = _descriptor.Descriptor( + name='EnumeratedImageSizes', + full_name='CoreML.Specification.ImageFeatureType.EnumeratedImageSizes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sizes', full_name='CoreML.Specification.ImageFeatureType.EnumeratedImageSizes.sizes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=495, + serialized_end=582, +) + +_IMAGEFEATURETYPE_IMAGESIZERANGE = _descriptor.Descriptor( + name='ImageSizeRange', + full_name='CoreML.Specification.ImageFeatureType.ImageSizeRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='widthRange', full_name='CoreML.Specification.ImageFeatureType.ImageSizeRange.widthRange', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='heightRange', full_name='CoreML.Specification.ImageFeatureType.ImageSizeRange.heightRange', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=584, + serialized_end=707, +) + +_IMAGEFEATURETYPE = _descriptor.Descriptor( + name='ImageFeatureType', + full_name='CoreML.Specification.ImageFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='width', full_name='CoreML.Specification.ImageFeatureType.width', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='CoreML.Specification.ImageFeatureType.height', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='enumeratedSizes', full_name='CoreML.Specification.ImageFeatureType.enumeratedSizes', index=2, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageSizeRange', full_name='CoreML.Specification.ImageFeatureType.imageSizeRange', index=3, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='colorSpace', full_name='CoreML.Specification.ImageFeatureType.colorSpace', index=4, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_IMAGEFEATURETYPE_IMAGESIZE, _IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES, _IMAGEFEATURETYPE_IMAGESIZERANGE, ], + enum_types=[ + _IMAGEFEATURETYPE_COLORSPACE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='SizeFlexibility', full_name='CoreML.Specification.ImageFeatureType.SizeFlexibility', + index=0, containing_type=None, fields=[]), + ], + serialized_start=160, + serialized_end=821, +) + + +_ARRAYFEATURETYPE_SHAPE = _descriptor.Descriptor( + name='Shape', + full_name='CoreML.Specification.ArrayFeatureType.Shape', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.ArrayFeatureType.Shape.shape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1175, + serialized_end=1197, +) + +_ARRAYFEATURETYPE_ENUMERATEDSHAPES = _descriptor.Descriptor( + name='EnumeratedShapes', + full_name='CoreML.Specification.ArrayFeatureType.EnumeratedShapes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shapes', full_name='CoreML.Specification.ArrayFeatureType.EnumeratedShapes.shapes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1199, + serialized_end=1279, +) + +_ARRAYFEATURETYPE_SHAPERANGE = _descriptor.Descriptor( + name='ShapeRange', + full_name='CoreML.Specification.ArrayFeatureType.ShapeRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sizeRanges', full_name='CoreML.Specification.ArrayFeatureType.ShapeRange.sizeRanges', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1281, + serialized_end=1346, +) + +_ARRAYFEATURETYPE = _descriptor.Descriptor( + name='ArrayFeatureType', + full_name='CoreML.Specification.ArrayFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.ArrayFeatureType.shape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dataType', full_name='CoreML.Specification.ArrayFeatureType.dataType', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='enumeratedShapes', full_name='CoreML.Specification.ArrayFeatureType.enumeratedShapes', index=2, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shapeRange', full_name='CoreML.Specification.ArrayFeatureType.shapeRange', index=3, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intDefaultValue', full_name='CoreML.Specification.ArrayFeatureType.intDefaultValue', index=4, + number=41, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floatDefaultValue', full_name='CoreML.Specification.ArrayFeatureType.floatDefaultValue', index=5, + number=51, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='doubleDefaultValue', full_name='CoreML.Specification.ArrayFeatureType.doubleDefaultValue', index=6, + number=61, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ARRAYFEATURETYPE_SHAPE, _ARRAYFEATURETYPE_ENUMERATEDSHAPES, _ARRAYFEATURETYPE_SHAPERANGE, ], + enum_types=[ + _ARRAYFEATURETYPE_ARRAYDATATYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ShapeFlexibility', full_name='CoreML.Specification.ArrayFeatureType.ShapeFlexibility', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='defaultOptionalValue', full_name='CoreML.Specification.ArrayFeatureType.defaultOptionalValue', + index=1, containing_type=None, fields=[]), + ], + serialized_start=824, + serialized_end=1493, +) + + +_DICTIONARYFEATURETYPE = _descriptor.Descriptor( + name='DictionaryFeatureType', + full_name='CoreML.Specification.DictionaryFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int64KeyType', full_name='CoreML.Specification.DictionaryFeatureType.int64KeyType', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringKeyType', full_name='CoreML.Specification.DictionaryFeatureType.stringKeyType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='KeyType', full_name='CoreML.Specification.DictionaryFeatureType.KeyType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1496, + serialized_end=1660, +) + + +_SEQUENCEFEATURETYPE = _descriptor.Descriptor( + name='SequenceFeatureType', + full_name='CoreML.Specification.SequenceFeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int64Type', full_name='CoreML.Specification.SequenceFeatureType.int64Type', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringType', full_name='CoreML.Specification.SequenceFeatureType.stringType', index=1, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sizeRange', full_name='CoreML.Specification.SequenceFeatureType.sizeRange', index=2, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.SequenceFeatureType.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1663, + serialized_end=1868, +) + + +_FEATURETYPE = _descriptor.Descriptor( + name='FeatureType', + full_name='CoreML.Specification.FeatureType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int64Type', full_name='CoreML.Specification.FeatureType.int64Type', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='doubleType', full_name='CoreML.Specification.FeatureType.doubleType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringType', full_name='CoreML.Specification.FeatureType.stringType', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageType', full_name='CoreML.Specification.FeatureType.imageType', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='multiArrayType', full_name='CoreML.Specification.FeatureType.multiArrayType', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictionaryType', full_name='CoreML.Specification.FeatureType.dictionaryType', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceType', full_name='CoreML.Specification.FeatureType.sequenceType', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isOptional', full_name='CoreML.Specification.FeatureType.isOptional', index=7, + number=1000, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.FeatureType.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1871, + serialized_end=2365, +) + +_IMAGEFEATURETYPE_IMAGESIZE.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES.fields_by_name['sizes'].message_type = _IMAGEFEATURETYPE_IMAGESIZE +_IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE_IMAGESIZERANGE.fields_by_name['widthRange'].message_type = _SIZERANGE +_IMAGEFEATURETYPE_IMAGESIZERANGE.fields_by_name['heightRange'].message_type = _SIZERANGE +_IMAGEFEATURETYPE_IMAGESIZERANGE.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE.fields_by_name['enumeratedSizes'].message_type = _IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES +_IMAGEFEATURETYPE.fields_by_name['imageSizeRange'].message_type = _IMAGEFEATURETYPE_IMAGESIZERANGE +_IMAGEFEATURETYPE.fields_by_name['colorSpace'].enum_type = _IMAGEFEATURETYPE_COLORSPACE +_IMAGEFEATURETYPE_COLORSPACE.containing_type = _IMAGEFEATURETYPE +_IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'].fields.append( + _IMAGEFEATURETYPE.fields_by_name['enumeratedSizes']) +_IMAGEFEATURETYPE.fields_by_name['enumeratedSizes'].containing_oneof = _IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'] +_IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'].fields.append( + _IMAGEFEATURETYPE.fields_by_name['imageSizeRange']) +_IMAGEFEATURETYPE.fields_by_name['imageSizeRange'].containing_oneof = _IMAGEFEATURETYPE.oneofs_by_name['SizeFlexibility'] +_ARRAYFEATURETYPE_SHAPE.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE_ENUMERATEDSHAPES.fields_by_name['shapes'].message_type = _ARRAYFEATURETYPE_SHAPE +_ARRAYFEATURETYPE_ENUMERATEDSHAPES.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE_SHAPERANGE.fields_by_name['sizeRanges'].message_type = _SIZERANGE +_ARRAYFEATURETYPE_SHAPERANGE.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE.fields_by_name['dataType'].enum_type = _ARRAYFEATURETYPE_ARRAYDATATYPE +_ARRAYFEATURETYPE.fields_by_name['enumeratedShapes'].message_type = _ARRAYFEATURETYPE_ENUMERATEDSHAPES +_ARRAYFEATURETYPE.fields_by_name['shapeRange'].message_type = _ARRAYFEATURETYPE_SHAPERANGE +_ARRAYFEATURETYPE_ARRAYDATATYPE.containing_type = _ARRAYFEATURETYPE +_ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['enumeratedShapes']) +_ARRAYFEATURETYPE.fields_by_name['enumeratedShapes'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'] +_ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['shapeRange']) +_ARRAYFEATURETYPE.fields_by_name['shapeRange'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['ShapeFlexibility'] +_ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['intDefaultValue']) +_ARRAYFEATURETYPE.fields_by_name['intDefaultValue'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'] +_ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['floatDefaultValue']) +_ARRAYFEATURETYPE.fields_by_name['floatDefaultValue'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'] +_ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'].fields.append( + _ARRAYFEATURETYPE.fields_by_name['doubleDefaultValue']) +_ARRAYFEATURETYPE.fields_by_name['doubleDefaultValue'].containing_oneof = _ARRAYFEATURETYPE.oneofs_by_name['defaultOptionalValue'] +_DICTIONARYFEATURETYPE.fields_by_name['int64KeyType'].message_type = _INT64FEATURETYPE +_DICTIONARYFEATURETYPE.fields_by_name['stringKeyType'].message_type = _STRINGFEATURETYPE +_DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'].fields.append( + _DICTIONARYFEATURETYPE.fields_by_name['int64KeyType']) +_DICTIONARYFEATURETYPE.fields_by_name['int64KeyType'].containing_oneof = _DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'] +_DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'].fields.append( + _DICTIONARYFEATURETYPE.fields_by_name['stringKeyType']) +_DICTIONARYFEATURETYPE.fields_by_name['stringKeyType'].containing_oneof = _DICTIONARYFEATURETYPE.oneofs_by_name['KeyType'] +_SEQUENCEFEATURETYPE.fields_by_name['int64Type'].message_type = _INT64FEATURETYPE +_SEQUENCEFEATURETYPE.fields_by_name['stringType'].message_type = _STRINGFEATURETYPE +_SEQUENCEFEATURETYPE.fields_by_name['sizeRange'].message_type = _SIZERANGE +_SEQUENCEFEATURETYPE.oneofs_by_name['Type'].fields.append( + _SEQUENCEFEATURETYPE.fields_by_name['int64Type']) +_SEQUENCEFEATURETYPE.fields_by_name['int64Type'].containing_oneof = _SEQUENCEFEATURETYPE.oneofs_by_name['Type'] +_SEQUENCEFEATURETYPE.oneofs_by_name['Type'].fields.append( + _SEQUENCEFEATURETYPE.fields_by_name['stringType']) +_SEQUENCEFEATURETYPE.fields_by_name['stringType'].containing_oneof = _SEQUENCEFEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.fields_by_name['int64Type'].message_type = _INT64FEATURETYPE +_FEATURETYPE.fields_by_name['doubleType'].message_type = _DOUBLEFEATURETYPE +_FEATURETYPE.fields_by_name['stringType'].message_type = _STRINGFEATURETYPE +_FEATURETYPE.fields_by_name['imageType'].message_type = _IMAGEFEATURETYPE +_FEATURETYPE.fields_by_name['multiArrayType'].message_type = _ARRAYFEATURETYPE +_FEATURETYPE.fields_by_name['dictionaryType'].message_type = _DICTIONARYFEATURETYPE +_FEATURETYPE.fields_by_name['sequenceType'].message_type = _SEQUENCEFEATURETYPE +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['int64Type']) +_FEATURETYPE.fields_by_name['int64Type'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['doubleType']) +_FEATURETYPE.fields_by_name['doubleType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['stringType']) +_FEATURETYPE.fields_by_name['stringType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['imageType']) +_FEATURETYPE.fields_by_name['imageType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['multiArrayType']) +_FEATURETYPE.fields_by_name['multiArrayType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['dictionaryType']) +_FEATURETYPE.fields_by_name['dictionaryType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +_FEATURETYPE.oneofs_by_name['Type'].fields.append( + _FEATURETYPE.fields_by_name['sequenceType']) +_FEATURETYPE.fields_by_name['sequenceType'].containing_oneof = _FEATURETYPE.oneofs_by_name['Type'] +DESCRIPTOR.message_types_by_name['Int64FeatureType'] = _INT64FEATURETYPE +DESCRIPTOR.message_types_by_name['DoubleFeatureType'] = _DOUBLEFEATURETYPE +DESCRIPTOR.message_types_by_name['StringFeatureType'] = _STRINGFEATURETYPE +DESCRIPTOR.message_types_by_name['SizeRange'] = _SIZERANGE +DESCRIPTOR.message_types_by_name['ImageFeatureType'] = _IMAGEFEATURETYPE +DESCRIPTOR.message_types_by_name['ArrayFeatureType'] = _ARRAYFEATURETYPE +DESCRIPTOR.message_types_by_name['DictionaryFeatureType'] = _DICTIONARYFEATURETYPE +DESCRIPTOR.message_types_by_name['SequenceFeatureType'] = _SEQUENCEFEATURETYPE +DESCRIPTOR.message_types_by_name['FeatureType'] = _FEATURETYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Int64FeatureType = _reflection.GeneratedProtocolMessageType('Int64FeatureType', (_message.Message,), dict( + DESCRIPTOR = _INT64FEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64FeatureType) + )) +_sym_db.RegisterMessage(Int64FeatureType) + +DoubleFeatureType = _reflection.GeneratedProtocolMessageType('DoubleFeatureType', (_message.Message,), dict( + DESCRIPTOR = _DOUBLEFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleFeatureType) + )) +_sym_db.RegisterMessage(DoubleFeatureType) + +StringFeatureType = _reflection.GeneratedProtocolMessageType('StringFeatureType', (_message.Message,), dict( + DESCRIPTOR = _STRINGFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringFeatureType) + )) +_sym_db.RegisterMessage(StringFeatureType) + +SizeRange = _reflection.GeneratedProtocolMessageType('SizeRange', (_message.Message,), dict( + DESCRIPTOR = _SIZERANGE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SizeRange) + )) +_sym_db.RegisterMessage(SizeRange) + +ImageFeatureType = _reflection.GeneratedProtocolMessageType('ImageFeatureType', (_message.Message,), dict( + + ImageSize = _reflection.GeneratedProtocolMessageType('ImageSize', (_message.Message,), dict( + DESCRIPTOR = _IMAGEFEATURETYPE_IMAGESIZE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType.ImageSize) + )) + , + + EnumeratedImageSizes = _reflection.GeneratedProtocolMessageType('EnumeratedImageSizes', (_message.Message,), dict( + DESCRIPTOR = _IMAGEFEATURETYPE_ENUMERATEDIMAGESIZES, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType.EnumeratedImageSizes) + )) + , + + ImageSizeRange = _reflection.GeneratedProtocolMessageType('ImageSizeRange', (_message.Message,), dict( + DESCRIPTOR = _IMAGEFEATURETYPE_IMAGESIZERANGE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType.ImageSizeRange) + )) + , + DESCRIPTOR = _IMAGEFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ImageFeatureType) + )) +_sym_db.RegisterMessage(ImageFeatureType) +_sym_db.RegisterMessage(ImageFeatureType.ImageSize) +_sym_db.RegisterMessage(ImageFeatureType.EnumeratedImageSizes) +_sym_db.RegisterMessage(ImageFeatureType.ImageSizeRange) + +ArrayFeatureType = _reflection.GeneratedProtocolMessageType('ArrayFeatureType', (_message.Message,), dict( + + Shape = _reflection.GeneratedProtocolMessageType('Shape', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATURETYPE_SHAPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType.Shape) + )) + , + + EnumeratedShapes = _reflection.GeneratedProtocolMessageType('EnumeratedShapes', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATURETYPE_ENUMERATEDSHAPES, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType.EnumeratedShapes) + )) + , + + ShapeRange = _reflection.GeneratedProtocolMessageType('ShapeRange', (_message.Message,), dict( + DESCRIPTOR = _ARRAYFEATURETYPE_SHAPERANGE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType.ShapeRange) + )) + , + DESCRIPTOR = _ARRAYFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureType) + )) +_sym_db.RegisterMessage(ArrayFeatureType) +_sym_db.RegisterMessage(ArrayFeatureType.Shape) +_sym_db.RegisterMessage(ArrayFeatureType.EnumeratedShapes) +_sym_db.RegisterMessage(ArrayFeatureType.ShapeRange) + +DictionaryFeatureType = _reflection.GeneratedProtocolMessageType('DictionaryFeatureType', (_message.Message,), dict( + DESCRIPTOR = _DICTIONARYFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DictionaryFeatureType) + )) +_sym_db.RegisterMessage(DictionaryFeatureType) + +SequenceFeatureType = _reflection.GeneratedProtocolMessageType('SequenceFeatureType', (_message.Message,), dict( + DESCRIPTOR = _SEQUENCEFEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SequenceFeatureType) + )) +_sym_db.RegisterMessage(SequenceFeatureType) + +FeatureType = _reflection.GeneratedProtocolMessageType('FeatureType', (_message.Message,), dict( + DESCRIPTOR = _FEATURETYPE, + __module__ = 'FeatureTypes_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureType) + )) +_sym_db.RegisterMessage(FeatureType) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureVectorizer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureVectorizer_pb2.py new file mode 100644 index 00000000..ede75fc4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/FeatureVectorizer_pb2.py @@ -0,0 +1,118 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: FeatureVectorizer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='FeatureVectorizer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x17\x46\x65\x61tureVectorizer.proto\x12\x14\x43oreML.Specification\"\x98\x01\n\x11\x46\x65\x61tureVectorizer\x12\x46\n\tinputList\x18\x01 \x03(\x0b\x32\x33.CoreML.Specification.FeatureVectorizer.InputColumn\x1a;\n\x0bInputColumn\x12\x13\n\x0binputColumn\x18\x01 \x01(\t\x12\x17\n\x0finputDimensions\x18\x02 \x01(\x04\x42\x02H\x03\x62\x06proto3') +) + + + + +_FEATUREVECTORIZER_INPUTCOLUMN = _descriptor.Descriptor( + name='InputColumn', + full_name='CoreML.Specification.FeatureVectorizer.InputColumn', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputColumn', full_name='CoreML.Specification.FeatureVectorizer.InputColumn.inputColumn', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputDimensions', full_name='CoreML.Specification.FeatureVectorizer.InputColumn.inputDimensions', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=143, + serialized_end=202, +) + +_FEATUREVECTORIZER = _descriptor.Descriptor( + name='FeatureVectorizer', + full_name='CoreML.Specification.FeatureVectorizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputList', full_name='CoreML.Specification.FeatureVectorizer.inputList', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_FEATUREVECTORIZER_INPUTCOLUMN, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=50, + serialized_end=202, +) + +_FEATUREVECTORIZER_INPUTCOLUMN.containing_type = _FEATUREVECTORIZER +_FEATUREVECTORIZER.fields_by_name['inputList'].message_type = _FEATUREVECTORIZER_INPUTCOLUMN +DESCRIPTOR.message_types_by_name['FeatureVectorizer'] = _FEATUREVECTORIZER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +FeatureVectorizer = _reflection.GeneratedProtocolMessageType('FeatureVectorizer', (_message.Message,), dict( + + InputColumn = _reflection.GeneratedProtocolMessageType('InputColumn', (_message.Message,), dict( + DESCRIPTOR = _FEATUREVECTORIZER_INPUTCOLUMN, + __module__ = 'FeatureVectorizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureVectorizer.InputColumn) + )) + , + DESCRIPTOR = _FEATUREVECTORIZER, + __module__ = 'FeatureVectorizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureVectorizer) + )) +_sym_db.RegisterMessage(FeatureVectorizer) +_sym_db.RegisterMessage(FeatureVectorizer.InputColumn) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMClassifier_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMClassifier_pb2.py new file mode 100644 index 00000000..134de9b3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMClassifier_pb2.py @@ -0,0 +1,215 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: GLMClassifier.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='GLMClassifier.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x13GLMClassifier.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x9c\x04\n\rGLMClassifier\x12@\n\x07weights\x18\x01 \x03(\x0b\x32/.CoreML.Specification.GLMClassifier.DoubleArray\x12\x0e\n\x06offset\x18\x02 \x03(\x01\x12\\\n\x17postEvaluationTransform\x18\x03 \x01(\x0e\x32;.CoreML.Specification.GLMClassifier.PostEvaluationTransform\x12H\n\rclassEncoding\x18\x04 \x01(\x0e\x32\x31.CoreML.Specification.GLMClassifier.ClassEncoding\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x1a\x1c\n\x0b\x44oubleArray\x12\r\n\x05value\x18\x01 \x03(\x01\"0\n\x17PostEvaluationTransform\x12\t\n\x05Logit\x10\x00\x12\n\n\x06Probit\x10\x01\"2\n\rClassEncoding\x12\x12\n\x0eReferenceClass\x10\x00\x12\r\n\tOneVsRest\x10\x01\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + +_GLMCLASSIFIER_POSTEVALUATIONTRANSFORM = _descriptor.EnumDescriptor( + name='PostEvaluationTransform', + full_name='CoreML.Specification.GLMClassifier.PostEvaluationTransform', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='Logit', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Probit', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=493, + serialized_end=541, +) +_sym_db.RegisterEnumDescriptor(_GLMCLASSIFIER_POSTEVALUATIONTRANSFORM) + +_GLMCLASSIFIER_CLASSENCODING = _descriptor.EnumDescriptor( + name='ClassEncoding', + full_name='CoreML.Specification.GLMClassifier.ClassEncoding', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='ReferenceClass', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OneVsRest', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=543, + serialized_end=593, +) +_sym_db.RegisterEnumDescriptor(_GLMCLASSIFIER_CLASSENCODING) + + +_GLMCLASSIFIER_DOUBLEARRAY = _descriptor.Descriptor( + name='DoubleArray', + full_name='CoreML.Specification.GLMClassifier.DoubleArray', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.GLMClassifier.DoubleArray.value', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=463, + serialized_end=491, +) + +_GLMCLASSIFIER = _descriptor.Descriptor( + name='GLMClassifier', + full_name='CoreML.Specification.GLMClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.GLMClassifier.weights', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.GLMClassifier.offset', index=1, + number=2, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.GLMClassifier.postEvaluationTransform', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='classEncoding', full_name='CoreML.Specification.GLMClassifier.classEncoding', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.GLMClassifier.stringClassLabels', index=4, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.GLMClassifier.int64ClassLabels', index=5, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_GLMCLASSIFIER_DOUBLEARRAY, ], + enum_types=[ + _GLMCLASSIFIER_POSTEVALUATIONTRANSFORM, + _GLMCLASSIFIER_CLASSENCODING, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.GLMClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=68, + serialized_end=608, +) + +_GLMCLASSIFIER_DOUBLEARRAY.containing_type = _GLMCLASSIFIER +_GLMCLASSIFIER.fields_by_name['weights'].message_type = _GLMCLASSIFIER_DOUBLEARRAY +_GLMCLASSIFIER.fields_by_name['postEvaluationTransform'].enum_type = _GLMCLASSIFIER_POSTEVALUATIONTRANSFORM +_GLMCLASSIFIER.fields_by_name['classEncoding'].enum_type = _GLMCLASSIFIER_CLASSENCODING +_GLMCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_GLMCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_GLMCLASSIFIER_POSTEVALUATIONTRANSFORM.containing_type = _GLMCLASSIFIER +_GLMCLASSIFIER_CLASSENCODING.containing_type = _GLMCLASSIFIER +_GLMCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _GLMCLASSIFIER.fields_by_name['stringClassLabels']) +_GLMCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _GLMCLASSIFIER.oneofs_by_name['ClassLabels'] +_GLMCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _GLMCLASSIFIER.fields_by_name['int64ClassLabels']) +_GLMCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _GLMCLASSIFIER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['GLMClassifier'] = _GLMCLASSIFIER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +GLMClassifier = _reflection.GeneratedProtocolMessageType('GLMClassifier', (_message.Message,), dict( + + DoubleArray = _reflection.GeneratedProtocolMessageType('DoubleArray', (_message.Message,), dict( + DESCRIPTOR = _GLMCLASSIFIER_DOUBLEARRAY, + __module__ = 'GLMClassifier_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMClassifier.DoubleArray) + )) + , + DESCRIPTOR = _GLMCLASSIFIER, + __module__ = 'GLMClassifier_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMClassifier) + )) +_sym_db.RegisterMessage(GLMClassifier) +_sym_db.RegisterMessage(GLMClassifier.DoubleArray) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMRegressor_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMRegressor_pb2.py new file mode 100644 index 00000000..cb7491e7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/GLMRegressor_pb2.py @@ -0,0 +1,154 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: GLMRegressor.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='GLMRegressor.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x12GLMRegressor.proto\x12\x14\x43oreML.Specification\"\x9d\x02\n\x0cGLMRegressor\x12?\n\x07weights\x18\x01 \x03(\x0b\x32..CoreML.Specification.GLMRegressor.DoubleArray\x12\x0e\n\x06offset\x18\x02 \x03(\x01\x12[\n\x17postEvaluationTransform\x18\x03 \x01(\x0e\x32:.CoreML.Specification.GLMRegressor.PostEvaluationTransform\x1a\x1c\n\x0b\x44oubleArray\x12\r\n\x05value\x18\x01 \x03(\x01\"A\n\x17PostEvaluationTransform\x12\x0f\n\x0bNoTransform\x10\x00\x12\t\n\x05Logit\x10\x01\x12\n\n\x06Probit\x10\x02\x42\x02H\x03\x62\x06proto3') +) + + + +_GLMREGRESSOR_POSTEVALUATIONTRANSFORM = _descriptor.EnumDescriptor( + name='PostEvaluationTransform', + full_name='CoreML.Specification.GLMRegressor.PostEvaluationTransform', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='NoTransform', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Logit', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Probit', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=265, + serialized_end=330, +) +_sym_db.RegisterEnumDescriptor(_GLMREGRESSOR_POSTEVALUATIONTRANSFORM) + + +_GLMREGRESSOR_DOUBLEARRAY = _descriptor.Descriptor( + name='DoubleArray', + full_name='CoreML.Specification.GLMRegressor.DoubleArray', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.GLMRegressor.DoubleArray.value', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=235, + serialized_end=263, +) + +_GLMREGRESSOR = _descriptor.Descriptor( + name='GLMRegressor', + full_name='CoreML.Specification.GLMRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.GLMRegressor.weights', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.GLMRegressor.offset', index=1, + number=2, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.GLMRegressor.postEvaluationTransform', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_GLMREGRESSOR_DOUBLEARRAY, ], + enum_types=[ + _GLMREGRESSOR_POSTEVALUATIONTRANSFORM, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=45, + serialized_end=330, +) + +_GLMREGRESSOR_DOUBLEARRAY.containing_type = _GLMREGRESSOR +_GLMREGRESSOR.fields_by_name['weights'].message_type = _GLMREGRESSOR_DOUBLEARRAY +_GLMREGRESSOR.fields_by_name['postEvaluationTransform'].enum_type = _GLMREGRESSOR_POSTEVALUATIONTRANSFORM +_GLMREGRESSOR_POSTEVALUATIONTRANSFORM.containing_type = _GLMREGRESSOR +DESCRIPTOR.message_types_by_name['GLMRegressor'] = _GLMREGRESSOR +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +GLMRegressor = _reflection.GeneratedProtocolMessageType('GLMRegressor', (_message.Message,), dict( + + DoubleArray = _reflection.GeneratedProtocolMessageType('DoubleArray', (_message.Message,), dict( + DESCRIPTOR = _GLMREGRESSOR_DOUBLEARRAY, + __module__ = 'GLMRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMRegressor.DoubleArray) + )) + , + DESCRIPTOR = _GLMREGRESSOR, + __module__ = 'GLMRegressor_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GLMRegressor) + )) +_sym_db.RegisterMessage(GLMRegressor) +_sym_db.RegisterMessage(GLMRegressor.DoubleArray) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Gazetteer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Gazetteer_pb2.py new file mode 100644 index 00000000..4c917353 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Gazetteer_pb2.py @@ -0,0 +1,107 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Gazetteer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Gazetteer.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x0fGazetteer.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\x9c\x01\n\tGazetteer\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12@\n\x11stringClassLabels\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_GAZETTEER = _descriptor.Descriptor( + name='Gazetteer', + full_name='CoreML.Specification.CoreMLModels.Gazetteer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.Gazetteer.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.Gazetteer.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.Gazetteer.modelParameterData', index=2, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.stringClassLabels', index=3, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=77, + serialized_end=233, +) + +_GAZETTEER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_GAZETTEER.oneofs_by_name['ClassLabels'].fields.append( + _GAZETTEER.fields_by_name['stringClassLabels']) +_GAZETTEER.fields_by_name['stringClassLabels'].containing_oneof = _GAZETTEER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['Gazetteer'] = _GAZETTEER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Gazetteer = _reflection.GeneratedProtocolMessageType('Gazetteer', (_message.Message,), dict( + DESCRIPTOR = _GAZETTEER, + __module__ = 'Gazetteer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.Gazetteer) + )) +_sym_db.RegisterMessage(Gazetteer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Identity_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Identity_pb2.py new file mode 100644 index 00000000..c6411dd5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Identity_pb2.py @@ -0,0 +1,64 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Identity.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Identity.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x0eIdentity.proto\x12\x14\x43oreML.Specification\"\n\n\x08IdentityB\x02H\x03\x62\x06proto3') +) + + + + +_IDENTITY = _descriptor.Descriptor( + name='Identity', + full_name='CoreML.Specification.Identity', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=40, + serialized_end=50, +) + +DESCRIPTOR.message_types_by_name['Identity'] = _IDENTITY +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Identity = _reflection.GeneratedProtocolMessageType('Identity', (_message.Message,), dict( + DESCRIPTOR = _IDENTITY, + __module__ = 'Identity_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Identity) + )) +_sym_db.RegisterMessage(Identity) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Imputer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Imputer_pb2.py new file mode 100644 index 00000000..e18f0f3d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Imputer_pb2.py @@ -0,0 +1,182 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Imputer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Imputer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\rImputer.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xf3\x03\n\x07Imputer\x12\x1c\n\x12imputedDoubleValue\x18\x01 \x01(\x01H\x00\x12\x1b\n\x11imputedInt64Value\x18\x02 \x01(\x03H\x00\x12\x1c\n\x12imputedStringValue\x18\x03 \x01(\tH\x00\x12@\n\x12imputedDoubleArray\x18\x04 \x01(\x0b\x32\".CoreML.Specification.DoubleVectorH\x00\x12>\n\x11imputedInt64Array\x18\x05 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12J\n\x17imputedStringDictionary\x18\x06 \x01(\x0b\x32\'.CoreML.Specification.StringToDoubleMapH\x00\x12H\n\x16imputedInt64Dictionary\x18\x07 \x01(\x0b\x32&.CoreML.Specification.Int64ToDoubleMapH\x00\x12\x1c\n\x12replaceDoubleValue\x18\x0b \x01(\x01H\x01\x12\x1b\n\x11replaceInt64Value\x18\x0c \x01(\x03H\x01\x12\x1c\n\x12replaceStringValue\x18\r \x01(\tH\x01\x42\x0e\n\x0cImputedValueB\x0e\n\x0cReplaceValueB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_IMPUTER = _descriptor.Descriptor( + name='Imputer', + full_name='CoreML.Specification.Imputer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='imputedDoubleValue', full_name='CoreML.Specification.Imputer.imputedDoubleValue', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedInt64Value', full_name='CoreML.Specification.Imputer.imputedInt64Value', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedStringValue', full_name='CoreML.Specification.Imputer.imputedStringValue', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedDoubleArray', full_name='CoreML.Specification.Imputer.imputedDoubleArray', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedInt64Array', full_name='CoreML.Specification.Imputer.imputedInt64Array', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedStringDictionary', full_name='CoreML.Specification.Imputer.imputedStringDictionary', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputedInt64Dictionary', full_name='CoreML.Specification.Imputer.imputedInt64Dictionary', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replaceDoubleValue', full_name='CoreML.Specification.Imputer.replaceDoubleValue', index=7, + number=11, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replaceInt64Value', full_name='CoreML.Specification.Imputer.replaceInt64Value', index=8, + number=12, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replaceStringValue', full_name='CoreML.Specification.Imputer.replaceStringValue', index=9, + number=13, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ImputedValue', full_name='CoreML.Specification.Imputer.ImputedValue', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ReplaceValue', full_name='CoreML.Specification.Imputer.ReplaceValue', + index=1, containing_type=None, fields=[]), + ], + serialized_start=62, + serialized_end=561, +) + +_IMPUTER.fields_by_name['imputedDoubleArray'].message_type = DataStructures__pb2._DOUBLEVECTOR +_IMPUTER.fields_by_name['imputedInt64Array'].message_type = DataStructures__pb2._INT64VECTOR +_IMPUTER.fields_by_name['imputedStringDictionary'].message_type = DataStructures__pb2._STRINGTODOUBLEMAP +_IMPUTER.fields_by_name['imputedInt64Dictionary'].message_type = DataStructures__pb2._INT64TODOUBLEMAP +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedDoubleValue']) +_IMPUTER.fields_by_name['imputedDoubleValue'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedInt64Value']) +_IMPUTER.fields_by_name['imputedInt64Value'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedStringValue']) +_IMPUTER.fields_by_name['imputedStringValue'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedDoubleArray']) +_IMPUTER.fields_by_name['imputedDoubleArray'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedInt64Array']) +_IMPUTER.fields_by_name['imputedInt64Array'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedStringDictionary']) +_IMPUTER.fields_by_name['imputedStringDictionary'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ImputedValue'].fields.append( + _IMPUTER.fields_by_name['imputedInt64Dictionary']) +_IMPUTER.fields_by_name['imputedInt64Dictionary'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue'] +_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append( + _IMPUTER.fields_by_name['replaceDoubleValue']) +_IMPUTER.fields_by_name['replaceDoubleValue'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue'] +_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append( + _IMPUTER.fields_by_name['replaceInt64Value']) +_IMPUTER.fields_by_name['replaceInt64Value'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue'] +_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append( + _IMPUTER.fields_by_name['replaceStringValue']) +_IMPUTER.fields_by_name['replaceStringValue'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue'] +DESCRIPTOR.message_types_by_name['Imputer'] = _IMPUTER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Imputer = _reflection.GeneratedProtocolMessageType('Imputer', (_message.Message,), dict( + DESCRIPTOR = _IMPUTER, + __module__ = 'Imputer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Imputer) + )) +_sym_db.RegisterMessage(Imputer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/ItemSimilarityRecommender_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/ItemSimilarityRecommender_pb2.py new file mode 100644 index 00000000..b70f3c10 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/ItemSimilarityRecommender_pb2.py @@ -0,0 +1,238 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: ItemSimilarityRecommender.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='ItemSimilarityRecommender.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1fItemSimilarityRecommender.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xb2\x05\n\x19ItemSimilarityRecommender\x12Z\n\x14itemItemSimilarities\x18\x01 \x03(\x0b\x32<.CoreML.Specification.ItemSimilarityRecommender.SimilarItems\x12\x39\n\ritemStringIds\x18\x02 \x01(\x0b\x32\".CoreML.Specification.StringVector\x12\x37\n\x0citemInt64Ids\x18\x03 \x01(\x0b\x32!.CoreML.Specification.Int64Vector\x12\x1c\n\x14itemInputFeatureName\x18\n \x01(\t\x12*\n\"numRecommendationsInputFeatureName\x18\x0b \x01(\t\x12\'\n\x1fitemRestrictionInputFeatureName\x18\x0c \x01(\t\x12%\n\x1ditemExclusionInputFeatureName\x18\r \x01(\t\x12,\n$recommendedItemListOutputFeatureName\x18\x14 \x01(\t\x12-\n%recommendedItemScoreOutputFeatureName\x18\x15 \x01(\t\x1a\x38\n\rConnectedItem\x12\x0e\n\x06itemId\x18\x01 \x01(\x04\x12\x17\n\x0fsimilarityScore\x18\x02 \x01(\x01\x1a\x93\x01\n\x0cSimilarItems\x12\x0e\n\x06itemId\x18\x01 \x01(\x04\x12V\n\x0fsimilarItemList\x18\x02 \x03(\x0b\x32=.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem\x12\x1b\n\x13itemScoreAdjustment\x18\x03 \x01(\x01\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM = _descriptor.Descriptor( + name='ConnectedItem', + full_name='CoreML.Specification.ItemSimilarityRecommender.ConnectedItem', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='itemId', full_name='CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.itemId', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='similarityScore', full_name='CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.similarityScore', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=564, + serialized_end=620, +) + +_ITEMSIMILARITYRECOMMENDER_SIMILARITEMS = _descriptor.Descriptor( + name='SimilarItems', + full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='itemId', full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems.itemId', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='similarItemList', full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems.similarItemList', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemScoreAdjustment', full_name='CoreML.Specification.ItemSimilarityRecommender.SimilarItems.itemScoreAdjustment', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=623, + serialized_end=770, +) + +_ITEMSIMILARITYRECOMMENDER = _descriptor.Descriptor( + name='ItemSimilarityRecommender', + full_name='CoreML.Specification.ItemSimilarityRecommender', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='itemItemSimilarities', full_name='CoreML.Specification.ItemSimilarityRecommender.itemItemSimilarities', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemStringIds', full_name='CoreML.Specification.ItemSimilarityRecommender.itemStringIds', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemInt64Ids', full_name='CoreML.Specification.ItemSimilarityRecommender.itemInt64Ids', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.itemInputFeatureName', index=3, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numRecommendationsInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.numRecommendationsInputFeatureName', index=4, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemRestrictionInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.itemRestrictionInputFeatureName', index=5, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemExclusionInputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.itemExclusionInputFeatureName', index=6, + number=13, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recommendedItemListOutputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.recommendedItemListOutputFeatureName', index=7, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recommendedItemScoreOutputFeatureName', full_name='CoreML.Specification.ItemSimilarityRecommender.recommendedItemScoreOutputFeatureName', index=8, + number=21, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM, _ITEMSIMILARITYRECOMMENDER_SIMILARITEMS, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=80, + serialized_end=770, +) + +_ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM.containing_type = _ITEMSIMILARITYRECOMMENDER +_ITEMSIMILARITYRECOMMENDER_SIMILARITEMS.fields_by_name['similarItemList'].message_type = _ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM +_ITEMSIMILARITYRECOMMENDER_SIMILARITEMS.containing_type = _ITEMSIMILARITYRECOMMENDER +_ITEMSIMILARITYRECOMMENDER.fields_by_name['itemItemSimilarities'].message_type = _ITEMSIMILARITYRECOMMENDER_SIMILARITEMS +_ITEMSIMILARITYRECOMMENDER.fields_by_name['itemStringIds'].message_type = DataStructures__pb2._STRINGVECTOR +_ITEMSIMILARITYRECOMMENDER.fields_by_name['itemInt64Ids'].message_type = DataStructures__pb2._INT64VECTOR +DESCRIPTOR.message_types_by_name['ItemSimilarityRecommender'] = _ITEMSIMILARITYRECOMMENDER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ItemSimilarityRecommender = _reflection.GeneratedProtocolMessageType('ItemSimilarityRecommender', (_message.Message,), dict( + + ConnectedItem = _reflection.GeneratedProtocolMessageType('ConnectedItem', (_message.Message,), dict( + DESCRIPTOR = _ITEMSIMILARITYRECOMMENDER_CONNECTEDITEM, + __module__ = 'ItemSimilarityRecommender_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ItemSimilarityRecommender.ConnectedItem) + )) + , + + SimilarItems = _reflection.GeneratedProtocolMessageType('SimilarItems', (_message.Message,), dict( + DESCRIPTOR = _ITEMSIMILARITYRECOMMENDER_SIMILARITEMS, + __module__ = 'ItemSimilarityRecommender_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ItemSimilarityRecommender.SimilarItems) + )) + , + DESCRIPTOR = _ITEMSIMILARITYRECOMMENDER, + __module__ = 'ItemSimilarityRecommender_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ItemSimilarityRecommender) + )) +_sym_db.RegisterMessage(ItemSimilarityRecommender) +_sym_db.RegisterMessage(ItemSimilarityRecommender.ConnectedItem) +_sym_db.RegisterMessage(ItemSimilarityRecommender.SimilarItems) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/LinkedModel_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/LinkedModel_pb2.py new file mode 100644 index 00000000..325492d1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/LinkedModel_pb2.py @@ -0,0 +1,138 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: LinkedModel.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 + +from .Parameters_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='LinkedModel.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x11LinkedModel.proto\x12\x14\x43oreML.Specification\x1a\x10Parameters.proto\"[\n\x0bLinkedModel\x12@\n\x0flinkedModelFile\x18\x01 \x01(\x0b\x32%.CoreML.Specification.LinkedModelFileH\x00\x42\n\n\x08LinkType\"\x9b\x01\n\x0fLinkedModelFile\x12\x42\n\x13linkedModelFileName\x18\x01 \x01(\x0b\x32%.CoreML.Specification.StringParameter\x12\x44\n\x15linkedModelSearchPath\x18\x02 \x01(\x0b\x32%.CoreML.Specification.StringParameterB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[Parameters__pb2.DESCRIPTOR,], + public_dependencies=[Parameters__pb2.DESCRIPTOR,]) + + + + +_LINKEDMODEL = _descriptor.Descriptor( + name='LinkedModel', + full_name='CoreML.Specification.LinkedModel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linkedModelFile', full_name='CoreML.Specification.LinkedModel.linkedModelFile', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='LinkType', full_name='CoreML.Specification.LinkedModel.LinkType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=61, + serialized_end=152, +) + + +_LINKEDMODELFILE = _descriptor.Descriptor( + name='LinkedModelFile', + full_name='CoreML.Specification.LinkedModelFile', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linkedModelFileName', full_name='CoreML.Specification.LinkedModelFile.linkedModelFileName', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linkedModelSearchPath', full_name='CoreML.Specification.LinkedModelFile.linkedModelSearchPath', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=155, + serialized_end=310, +) + +_LINKEDMODEL.fields_by_name['linkedModelFile'].message_type = _LINKEDMODELFILE +_LINKEDMODEL.oneofs_by_name['LinkType'].fields.append( + _LINKEDMODEL.fields_by_name['linkedModelFile']) +_LINKEDMODEL.fields_by_name['linkedModelFile'].containing_oneof = _LINKEDMODEL.oneofs_by_name['LinkType'] +_LINKEDMODELFILE.fields_by_name['linkedModelFileName'].message_type = Parameters__pb2._STRINGPARAMETER +_LINKEDMODELFILE.fields_by_name['linkedModelSearchPath'].message_type = Parameters__pb2._STRINGPARAMETER +DESCRIPTOR.message_types_by_name['LinkedModel'] = _LINKEDMODEL +DESCRIPTOR.message_types_by_name['LinkedModelFile'] = _LINKEDMODELFILE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +LinkedModel = _reflection.GeneratedProtocolMessageType('LinkedModel', (_message.Message,), dict( + DESCRIPTOR = _LINKEDMODEL, + __module__ = 'LinkedModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinkedModel) + )) +_sym_db.RegisterMessage(LinkedModel) + +LinkedModelFile = _reflection.GeneratedProtocolMessageType('LinkedModelFile', (_message.Message,), dict( + DESCRIPTOR = _LINKEDMODELFILE, + __module__ = 'LinkedModel_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinkedModelFile) + )) +_sym_db.RegisterMessage(LinkedModelFile) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/MIL_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/MIL_pb2.py new file mode 100644 index 00000000..0e9bf64f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/MIL_pb2.py @@ -0,0 +1,2086 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: MIL.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='MIL.proto', + package='CoreML.Specification.MILSpec', + syntax='proto3', + serialized_pb=_b('\n\tMIL.proto\x12\x1c\x43oreML.Specification.MILSpec\"\xf3\x02\n\x07Program\x12\x0f\n\x07version\x18\x01 \x01(\x03\x12G\n\tfunctions\x18\x02 \x03(\x0b\x32\x34.CoreML.Specification.MILSpec.Program.FunctionsEntry\x12\x11\n\tdocString\x18\x03 \x01(\t\x12I\n\nattributes\x18\x04 \x03(\x0b\x32\x35.CoreML.Specification.MILSpec.Program.AttributesEntry\x1aX\n\x0e\x46unctionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.CoreML.Specification.MILSpec.Function:\x02\x38\x01\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"\xbe\x03\n\x08\x46unction\x12<\n\x06inputs\x18\x01 \x03(\x0b\x32,.CoreML.Specification.MILSpec.NamedValueType\x12\r\n\x05opset\x18\x02 \x01(\t\x12_\n\x15\x62lock_specializations\x18\x03 \x03(\x0b\x32@.CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry\x12J\n\nattributes\x18\x04 \x03(\x0b\x32\x36.CoreML.Specification.MILSpec.Function.AttributesEntry\x1a`\n\x19\x42lockSpecializationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Block:\x02\x38\x01\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"\xb4\x02\n\x05\x42lock\x12<\n\x06inputs\x18\x01 \x03(\x0b\x32,.CoreML.Specification.MILSpec.NamedValueType\x12\x0f\n\x07outputs\x18\x02 \x03(\t\x12;\n\noperations\x18\x03 \x03(\x0b\x32\'.CoreML.Specification.MILSpec.Operation\x12G\n\nattributes\x18\x04 \x03(\x0b\x32\x33.CoreML.Specification.MILSpec.Block.AttributesEntry\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"\xa9\x01\n\x08\x41rgument\x12\x41\n\targuments\x18\x01 \x03(\x0b\x32..CoreML.Specification.MILSpec.Argument.Binding\x1aZ\n\x07\x42inding\x12\x0e\n\x04name\x18\x01 \x01(\tH\x00\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.ValueH\x00\x42\t\n\x07\x62inding\"\xce\x03\n\tOperation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x43\n\x06inputs\x18\x02 \x03(\x0b\x32\x33.CoreML.Specification.MILSpec.Operation.InputsEntry\x12=\n\x07outputs\x18\x03 \x03(\x0b\x32,.CoreML.Specification.MILSpec.NamedValueType\x12\x33\n\x06\x62locks\x18\x04 \x03(\x0b\x32#.CoreML.Specification.MILSpec.Block\x12K\n\nattributes\x18\x05 \x03(\x0b\x32\x37.CoreML.Specification.MILSpec.Operation.AttributesEntry\x1aU\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.CoreML.Specification.MILSpec.Argument:\x02\x38\x01\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"U\n\x0eNamedValueType\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x04type\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\"\x95\x02\n\tValueType\x12>\n\ntensorType\x18\x01 \x01(\x0b\x32(.CoreML.Specification.MILSpec.TensorTypeH\x00\x12:\n\x08listType\x18\x02 \x01(\x0b\x32&.CoreML.Specification.MILSpec.ListTypeH\x00\x12<\n\ttupleType\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.TupleTypeH\x00\x12\x46\n\x0e\x64ictionaryType\x18\x04 \x01(\x0b\x32,.CoreML.Specification.MILSpec.DictionaryTypeH\x00\x42\x06\n\x04type\"\xb7\x02\n\nTensorType\x12\x38\n\x08\x64\x61taType\x18\x01 \x01(\x0e\x32&.CoreML.Specification.MILSpec.DataType\x12\x0c\n\x04rank\x18\x02 \x01(\x03\x12;\n\ndimensions\x18\x03 \x03(\x0b\x32\'.CoreML.Specification.MILSpec.Dimension\x12L\n\nattributes\x18\x04 \x03(\x0b\x32\x38.CoreML.Specification.MILSpec.TensorType.AttributesEntry\x1aV\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value:\x02\x38\x01\"C\n\tTupleType\x12\x36\n\x05types\x18\x01 \x03(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\"z\n\x08ListType\x12\x35\n\x04type\x18\x01 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\x12\x37\n\x06length\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.Dimension\"\x86\x01\n\x0e\x44ictionaryType\x12\x38\n\x07keyType\x18\x01 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\x12:\n\tvalueType\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\"\xfd\x01\n\tDimension\x12M\n\x08\x63onstant\x18\x01 \x01(\x0b\x32\x39.CoreML.Specification.MILSpec.Dimension.ConstantDimensionH\x00\x12K\n\x07unknown\x18\x02 \x01(\x0b\x32\x38.CoreML.Specification.MILSpec.Dimension.UnknownDimensionH\x00\x1a!\n\x11\x43onstantDimension\x12\x0c\n\x04size\x18\x01 \x01(\x04\x1a$\n\x10UnknownDimension\x12\x10\n\x08variadic\x18\x01 \x01(\x08\x42\x0b\n\tdimension\"\xb9\x04\n\x05Value\x12\x11\n\tdocString\x18\x01 \x01(\t\x12\x35\n\x04type\x18\x02 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ValueType\x12L\n\x0eimmediateValue\x18\x03 \x01(\x0b\x32\x32.CoreML.Specification.MILSpec.Value.ImmediateValueH\x00\x12J\n\rblobFileValue\x18\x05 \x01(\x0b\x32\x31.CoreML.Specification.MILSpec.Value.BlobFileValueH\x00\x1a\x8f\x02\n\x0eImmediateValue\x12;\n\x06tensor\x18\x01 \x01(\x0b\x32).CoreML.Specification.MILSpec.TensorValueH\x00\x12\x39\n\x05tuple\x18\x02 \x01(\x0b\x32(.CoreML.Specification.MILSpec.TupleValueH\x00\x12\x37\n\x04list\x18\x03 \x01(\x0b\x32\'.CoreML.Specification.MILSpec.ListValueH\x00\x12\x43\n\ndictionary\x18\x04 \x01(\x0b\x32-.CoreML.Specification.MILSpec.DictionaryValueH\x00\x42\x07\n\x05value\x1a\x31\n\rBlobFileValue\x12\x10\n\x08\x66ileName\x18\x01 \x01(\t\x12\x0e\n\x06offset\x18\x02 \x01(\x04\x42\x07\n\x05value\"\xac\x06\n\x0bTensorValue\x12J\n\x06\x66loats\x18\x01 \x01(\x0b\x32\x38.CoreML.Specification.MILSpec.TensorValue.RepeatedFloatsH\x00\x12\x46\n\x04ints\x18\x02 \x01(\x0b\x32\x36.CoreML.Specification.MILSpec.TensorValue.RepeatedIntsH\x00\x12H\n\x05\x62ools\x18\x03 \x01(\x0b\x32\x37.CoreML.Specification.MILSpec.TensorValue.RepeatedBoolsH\x00\x12L\n\x07strings\x18\x04 \x01(\x0b\x32\x39.CoreML.Specification.MILSpec.TensorValue.RepeatedStringsH\x00\x12N\n\x08longInts\x18\x05 \x01(\x0b\x32:.CoreML.Specification.MILSpec.TensorValue.RepeatedLongIntsH\x00\x12L\n\x07\x64oubles\x18\x06 \x01(\x0b\x32\x39.CoreML.Specification.MILSpec.TensorValue.RepeatedDoublesH\x00\x12H\n\x05\x62ytes\x18\x07 \x01(\x0b\x32\x37.CoreML.Specification.MILSpec.TensorValue.RepeatedBytesH\x00\x1a$\n\x0eRepeatedFloats\x12\x12\n\x06values\x18\x01 \x03(\x02\x42\x02\x10\x01\x1a%\n\x0fRepeatedDoubles\x12\x12\n\x06values\x18\x01 \x03(\x01\x42\x02\x10\x01\x1a\"\n\x0cRepeatedInts\x12\x12\n\x06values\x18\x01 \x03(\x05\x42\x02\x10\x01\x1a&\n\x10RepeatedLongInts\x12\x12\n\x06values\x18\x01 \x03(\x03\x42\x02\x10\x01\x1a#\n\rRepeatedBools\x12\x12\n\x06values\x18\x01 \x03(\x08\x42\x02\x10\x01\x1a!\n\x0fRepeatedStrings\x12\x0e\n\x06values\x18\x01 \x03(\t\x1a\x1f\n\rRepeatedBytes\x12\x0e\n\x06values\x18\x01 \x01(\x0c\x42\x07\n\x05value\"A\n\nTupleValue\x12\x33\n\x06values\x18\x01 \x03(\x0b\x32#.CoreML.Specification.MILSpec.Value\"@\n\tListValue\x12\x33\n\x06values\x18\x01 \x03(\x0b\x32#.CoreML.Specification.MILSpec.Value\"\xd3\x01\n\x0f\x44ictionaryValue\x12J\n\x06values\x18\x01 \x03(\x0b\x32:.CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair\x1at\n\x0cKeyValuePair\x12\x30\n\x03key\x18\x01 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.CoreML.Specification.MILSpec.Value*\xb2\x01\n\x08\x44\x61taType\x12\x0f\n\x0bUNUSED_TYPE\x10\x00\x12\x08\n\x04\x42OOL\x10\x01\x12\n\n\x06STRING\x10\x02\x12\x0b\n\x07\x46LOAT16\x10\n\x12\x0b\n\x07\x46LOAT32\x10\x0b\x12\x0b\n\x07\x46LOAT64\x10\x0c\x12\x08\n\x04INT8\x10\x15\x12\t\n\x05INT16\x10\x16\x12\t\n\x05INT32\x10\x17\x12\t\n\x05INT64\x10\x18\x12\t\n\x05UINT8\x10\x1f\x12\n\n\x06UINT16\x10 \x12\n\n\x06UINT32\x10!\x12\n\n\x06UINT64\x10\"B\x02H\x03\x62\x06proto3') +) + +_DATATYPE = _descriptor.EnumDescriptor( + name='DataType', + full_name='CoreML.Specification.MILSpec.DataType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNUSED_TYPE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BOOL', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='STRING', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT16', index=3, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT32', index=4, number=11, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLOAT64', index=5, number=12, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT8', index=6, number=21, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT16', index=7, number=22, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT32', index=8, number=23, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INT64', index=9, number=24, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT8', index=10, number=31, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT16', index=11, number=32, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT32', index=12, number=33, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UINT64', index=13, number=34, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=4816, + serialized_end=4994, +) +_sym_db.RegisterEnumDescriptor(_DATATYPE) + +DataType = enum_type_wrapper.EnumTypeWrapper(_DATATYPE) +UNUSED_TYPE = 0 +BOOL = 1 +STRING = 2 +FLOAT16 = 10 +FLOAT32 = 11 +FLOAT64 = 12 +INT8 = 21 +INT16 = 22 +INT32 = 23 +INT64 = 24 +UINT8 = 31 +UINT16 = 32 +UINT32 = 33 +UINT64 = 34 + + + +_PROGRAM_FUNCTIONSENTRY = _descriptor.Descriptor( + name='FunctionsEntry', + full_name='CoreML.Specification.MILSpec.Program.FunctionsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Program.FunctionsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Program.FunctionsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=239, + serialized_end=327, +) + +_PROGRAM_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Program.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Program.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Program.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_PROGRAM = _descriptor.Descriptor( + name='Program', + full_name='CoreML.Specification.MILSpec.Program', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.MILSpec.Program.version', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='functions', full_name='CoreML.Specification.MILSpec.Program.functions', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='docString', full_name='CoreML.Specification.MILSpec.Program.docString', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Program.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_PROGRAM_FUNCTIONSENTRY, _PROGRAM_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=44, + serialized_end=415, +) + + +_FUNCTION_BLOCKSPECIALIZATIONSENTRY = _descriptor.Descriptor( + name='BlockSpecializationsEntry', + full_name='CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=680, + serialized_end=776, +) + +_FUNCTION_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Function.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Function.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Function.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_FUNCTION = _descriptor.Descriptor( + name='Function', + full_name='CoreML.Specification.MILSpec.Function', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputs', full_name='CoreML.Specification.MILSpec.Function.inputs', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='opset', full_name='CoreML.Specification.MILSpec.Function.opset', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='block_specializations', full_name='CoreML.Specification.MILSpec.Function.block_specializations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Function.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_FUNCTION_BLOCKSPECIALIZATIONSENTRY, _FUNCTION_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=418, + serialized_end=864, +) + + +_BLOCK_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Block.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Block.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Block.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_BLOCK = _descriptor.Descriptor( + name='Block', + full_name='CoreML.Specification.MILSpec.Block', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputs', full_name='CoreML.Specification.MILSpec.Block.inputs', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputs', full_name='CoreML.Specification.MILSpec.Block.outputs', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='operations', full_name='CoreML.Specification.MILSpec.Block.operations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Block.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_BLOCK_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=867, + serialized_end=1175, +) + + +_ARGUMENT_BINDING = _descriptor.Descriptor( + name='Binding', + full_name='CoreML.Specification.MILSpec.Argument.Binding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.MILSpec.Argument.Binding.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Argument.Binding.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='binding', full_name='CoreML.Specification.MILSpec.Argument.Binding.binding', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1257, + serialized_end=1347, +) + +_ARGUMENT = _descriptor.Descriptor( + name='Argument', + full_name='CoreML.Specification.MILSpec.Argument', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='arguments', full_name='CoreML.Specification.MILSpec.Argument.arguments', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ARGUMENT_BINDING, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1178, + serialized_end=1347, +) + + +_OPERATION_INPUTSENTRY = _descriptor.Descriptor( + name='InputsEntry', + full_name='CoreML.Specification.MILSpec.Operation.InputsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Operation.InputsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Operation.InputsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1639, + serialized_end=1724, +) + +_OPERATION_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.Operation.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.Operation.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Operation.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_OPERATION = _descriptor.Descriptor( + name='Operation', + full_name='CoreML.Specification.MILSpec.Operation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.Operation.type', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputs', full_name='CoreML.Specification.MILSpec.Operation.inputs', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputs', full_name='CoreML.Specification.MILSpec.Operation.outputs', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blocks', full_name='CoreML.Specification.MILSpec.Operation.blocks', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.Operation.attributes', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_OPERATION_INPUTSENTRY, _OPERATION_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1350, + serialized_end=1812, +) + + +_NAMEDVALUETYPE = _descriptor.Descriptor( + name='NamedValueType', + full_name='CoreML.Specification.MILSpec.NamedValueType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.MILSpec.NamedValueType.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.NamedValueType.type', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1814, + serialized_end=1899, +) + + +_VALUETYPE = _descriptor.Descriptor( + name='ValueType', + full_name='CoreML.Specification.MILSpec.ValueType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tensorType', full_name='CoreML.Specification.MILSpec.ValueType.tensorType', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='listType', full_name='CoreML.Specification.MILSpec.ValueType.listType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tupleType', full_name='CoreML.Specification.MILSpec.ValueType.tupleType', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictionaryType', full_name='CoreML.Specification.MILSpec.ValueType.dictionaryType', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.ValueType.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1902, + serialized_end=2179, +) + + +_TENSORTYPE_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='CoreML.Specification.MILSpec.TensorType.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.TensorType.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.TensorType.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=329, + serialized_end=415, +) + +_TENSORTYPE = _descriptor.Descriptor( + name='TensorType', + full_name='CoreML.Specification.MILSpec.TensorType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='dataType', full_name='CoreML.Specification.MILSpec.TensorType.dataType', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rank', full_name='CoreML.Specification.MILSpec.TensorType.rank', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dimensions', full_name='CoreML.Specification.MILSpec.TensorType.dimensions', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='attributes', full_name='CoreML.Specification.MILSpec.TensorType.attributes', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TENSORTYPE_ATTRIBUTESENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2182, + serialized_end=2493, +) + + +_TUPLETYPE = _descriptor.Descriptor( + name='TupleType', + full_name='CoreML.Specification.MILSpec.TupleType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='types', full_name='CoreML.Specification.MILSpec.TupleType.types', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2495, + serialized_end=2562, +) + + +_LISTTYPE = _descriptor.Descriptor( + name='ListType', + full_name='CoreML.Specification.MILSpec.ListType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.ListType.type', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='length', full_name='CoreML.Specification.MILSpec.ListType.length', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2564, + serialized_end=2686, +) + + +_DICTIONARYTYPE = _descriptor.Descriptor( + name='DictionaryType', + full_name='CoreML.Specification.MILSpec.DictionaryType', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keyType', full_name='CoreML.Specification.MILSpec.DictionaryType.keyType', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='valueType', full_name='CoreML.Specification.MILSpec.DictionaryType.valueType', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2689, + serialized_end=2823, +) + + +_DIMENSION_CONSTANTDIMENSION = _descriptor.Descriptor( + name='ConstantDimension', + full_name='CoreML.Specification.MILSpec.Dimension.ConstantDimension', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size', full_name='CoreML.Specification.MILSpec.Dimension.ConstantDimension.size', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2995, + serialized_end=3028, +) + +_DIMENSION_UNKNOWNDIMENSION = _descriptor.Descriptor( + name='UnknownDimension', + full_name='CoreML.Specification.MILSpec.Dimension.UnknownDimension', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='variadic', full_name='CoreML.Specification.MILSpec.Dimension.UnknownDimension.variadic', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3030, + serialized_end=3066, +) + +_DIMENSION = _descriptor.Descriptor( + name='Dimension', + full_name='CoreML.Specification.MILSpec.Dimension', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='constant', full_name='CoreML.Specification.MILSpec.Dimension.constant', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unknown', full_name='CoreML.Specification.MILSpec.Dimension.unknown', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_DIMENSION_CONSTANTDIMENSION, _DIMENSION_UNKNOWNDIMENSION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='dimension', full_name='CoreML.Specification.MILSpec.Dimension.dimension', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2826, + serialized_end=3079, +) + + +_VALUE_IMMEDIATEVALUE = _descriptor.Descriptor( + name='ImmediateValue', + full_name='CoreML.Specification.MILSpec.Value.ImmediateValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tensor', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.tensor', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tuple', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.tuple', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='list', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.list', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictionary', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.dictionary', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Value.ImmediateValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=3320, + serialized_end=3591, +) + +_VALUE_BLOBFILEVALUE = _descriptor.Descriptor( + name='BlobFileValue', + full_name='CoreML.Specification.MILSpec.Value.BlobFileValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='fileName', full_name='CoreML.Specification.MILSpec.Value.BlobFileValue.fileName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.MILSpec.Value.BlobFileValue.offset', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3593, + serialized_end=3642, +) + +_VALUE = _descriptor.Descriptor( + name='Value', + full_name='CoreML.Specification.MILSpec.Value', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='docString', full_name='CoreML.Specification.MILSpec.Value.docString', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.MILSpec.Value.type', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='immediateValue', full_name='CoreML.Specification.MILSpec.Value.immediateValue', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blobFileValue', full_name='CoreML.Specification.MILSpec.Value.blobFileValue', index=3, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_VALUE_IMMEDIATEVALUE, _VALUE_BLOBFILEVALUE, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.Value.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=3082, + serialized_end=3651, +) + + +_TENSORVALUE_REPEATEDFLOATS = _descriptor.Descriptor( + name='RepeatedFloats', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedFloats', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedFloats.values', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4201, + serialized_end=4237, +) + +_TENSORVALUE_REPEATEDDOUBLES = _descriptor.Descriptor( + name='RepeatedDoubles', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles.values', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4239, + serialized_end=4276, +) + +_TENSORVALUE_REPEATEDINTS = _descriptor.Descriptor( + name='RepeatedInts', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedInts', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedInts.values', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4278, + serialized_end=4312, +) + +_TENSORVALUE_REPEATEDLONGINTS = _descriptor.Descriptor( + name='RepeatedLongInts', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts.values', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4314, + serialized_end=4352, +) + +_TENSORVALUE_REPEATEDBOOLS = _descriptor.Descriptor( + name='RepeatedBools', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBools', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBools.values', index=0, + number=1, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4354, + serialized_end=4389, +) + +_TENSORVALUE_REPEATEDSTRINGS = _descriptor.Descriptor( + name='RepeatedStrings', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedStrings', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedStrings.values', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4391, + serialized_end=4424, +) + +_TENSORVALUE_REPEATEDBYTES = _descriptor.Descriptor( + name='RepeatedBytes', + full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBytes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TensorValue.RepeatedBytes.values', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4426, + serialized_end=4457, +) + +_TENSORVALUE = _descriptor.Descriptor( + name='TensorValue', + full_name='CoreML.Specification.MILSpec.TensorValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='floats', full_name='CoreML.Specification.MILSpec.TensorValue.floats', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ints', full_name='CoreML.Specification.MILSpec.TensorValue.ints', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bools', full_name='CoreML.Specification.MILSpec.TensorValue.bools', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strings', full_name='CoreML.Specification.MILSpec.TensorValue.strings', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longInts', full_name='CoreML.Specification.MILSpec.TensorValue.longInts', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='doubles', full_name='CoreML.Specification.MILSpec.TensorValue.doubles', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bytes', full_name='CoreML.Specification.MILSpec.TensorValue.bytes', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TENSORVALUE_REPEATEDFLOATS, _TENSORVALUE_REPEATEDDOUBLES, _TENSORVALUE_REPEATEDINTS, _TENSORVALUE_REPEATEDLONGINTS, _TENSORVALUE_REPEATEDBOOLS, _TENSORVALUE_REPEATEDSTRINGS, _TENSORVALUE_REPEATEDBYTES, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.TensorValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=3654, + serialized_end=4466, +) + + +_TUPLEVALUE = _descriptor.Descriptor( + name='TupleValue', + full_name='CoreML.Specification.MILSpec.TupleValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.TupleValue.values', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4468, + serialized_end=4533, +) + + +_LISTVALUE = _descriptor.Descriptor( + name='ListValue', + full_name='CoreML.Specification.MILSpec.ListValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.ListValue.values', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4535, + serialized_end=4599, +) + + +_DICTIONARYVALUE_KEYVALUEPAIR = _descriptor.Descriptor( + name='KeyValuePair', + full_name='CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.key', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4697, + serialized_end=4813, +) + +_DICTIONARYVALUE = _descriptor.Descriptor( + name='DictionaryValue', + full_name='CoreML.Specification.MILSpec.DictionaryValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.MILSpec.DictionaryValue.values', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_DICTIONARYVALUE_KEYVALUEPAIR, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4602, + serialized_end=4813, +) + +_PROGRAM_FUNCTIONSENTRY.fields_by_name['value'].message_type = _FUNCTION +_PROGRAM_FUNCTIONSENTRY.containing_type = _PROGRAM +_PROGRAM_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_PROGRAM_ATTRIBUTESENTRY.containing_type = _PROGRAM +_PROGRAM.fields_by_name['functions'].message_type = _PROGRAM_FUNCTIONSENTRY +_PROGRAM.fields_by_name['attributes'].message_type = _PROGRAM_ATTRIBUTESENTRY +_FUNCTION_BLOCKSPECIALIZATIONSENTRY.fields_by_name['value'].message_type = _BLOCK +_FUNCTION_BLOCKSPECIALIZATIONSENTRY.containing_type = _FUNCTION +_FUNCTION_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_FUNCTION_ATTRIBUTESENTRY.containing_type = _FUNCTION +_FUNCTION.fields_by_name['inputs'].message_type = _NAMEDVALUETYPE +_FUNCTION.fields_by_name['block_specializations'].message_type = _FUNCTION_BLOCKSPECIALIZATIONSENTRY +_FUNCTION.fields_by_name['attributes'].message_type = _FUNCTION_ATTRIBUTESENTRY +_BLOCK_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_BLOCK_ATTRIBUTESENTRY.containing_type = _BLOCK +_BLOCK.fields_by_name['inputs'].message_type = _NAMEDVALUETYPE +_BLOCK.fields_by_name['operations'].message_type = _OPERATION +_BLOCK.fields_by_name['attributes'].message_type = _BLOCK_ATTRIBUTESENTRY +_ARGUMENT_BINDING.fields_by_name['value'].message_type = _VALUE +_ARGUMENT_BINDING.containing_type = _ARGUMENT +_ARGUMENT_BINDING.oneofs_by_name['binding'].fields.append( + _ARGUMENT_BINDING.fields_by_name['name']) +_ARGUMENT_BINDING.fields_by_name['name'].containing_oneof = _ARGUMENT_BINDING.oneofs_by_name['binding'] +_ARGUMENT_BINDING.oneofs_by_name['binding'].fields.append( + _ARGUMENT_BINDING.fields_by_name['value']) +_ARGUMENT_BINDING.fields_by_name['value'].containing_oneof = _ARGUMENT_BINDING.oneofs_by_name['binding'] +_ARGUMENT.fields_by_name['arguments'].message_type = _ARGUMENT_BINDING +_OPERATION_INPUTSENTRY.fields_by_name['value'].message_type = _ARGUMENT +_OPERATION_INPUTSENTRY.containing_type = _OPERATION +_OPERATION_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_OPERATION_ATTRIBUTESENTRY.containing_type = _OPERATION +_OPERATION.fields_by_name['inputs'].message_type = _OPERATION_INPUTSENTRY +_OPERATION.fields_by_name['outputs'].message_type = _NAMEDVALUETYPE +_OPERATION.fields_by_name['blocks'].message_type = _BLOCK +_OPERATION.fields_by_name['attributes'].message_type = _OPERATION_ATTRIBUTESENTRY +_NAMEDVALUETYPE.fields_by_name['type'].message_type = _VALUETYPE +_VALUETYPE.fields_by_name['tensorType'].message_type = _TENSORTYPE +_VALUETYPE.fields_by_name['listType'].message_type = _LISTTYPE +_VALUETYPE.fields_by_name['tupleType'].message_type = _TUPLETYPE +_VALUETYPE.fields_by_name['dictionaryType'].message_type = _DICTIONARYTYPE +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['tensorType']) +_VALUETYPE.fields_by_name['tensorType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['listType']) +_VALUETYPE.fields_by_name['listType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['tupleType']) +_VALUETYPE.fields_by_name['tupleType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_VALUETYPE.oneofs_by_name['type'].fields.append( + _VALUETYPE.fields_by_name['dictionaryType']) +_VALUETYPE.fields_by_name['dictionaryType'].containing_oneof = _VALUETYPE.oneofs_by_name['type'] +_TENSORTYPE_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _VALUE +_TENSORTYPE_ATTRIBUTESENTRY.containing_type = _TENSORTYPE +_TENSORTYPE.fields_by_name['dataType'].enum_type = _DATATYPE +_TENSORTYPE.fields_by_name['dimensions'].message_type = _DIMENSION +_TENSORTYPE.fields_by_name['attributes'].message_type = _TENSORTYPE_ATTRIBUTESENTRY +_TUPLETYPE.fields_by_name['types'].message_type = _VALUETYPE +_LISTTYPE.fields_by_name['type'].message_type = _VALUETYPE +_LISTTYPE.fields_by_name['length'].message_type = _DIMENSION +_DICTIONARYTYPE.fields_by_name['keyType'].message_type = _VALUETYPE +_DICTIONARYTYPE.fields_by_name['valueType'].message_type = _VALUETYPE +_DIMENSION_CONSTANTDIMENSION.containing_type = _DIMENSION +_DIMENSION_UNKNOWNDIMENSION.containing_type = _DIMENSION +_DIMENSION.fields_by_name['constant'].message_type = _DIMENSION_CONSTANTDIMENSION +_DIMENSION.fields_by_name['unknown'].message_type = _DIMENSION_UNKNOWNDIMENSION +_DIMENSION.oneofs_by_name['dimension'].fields.append( + _DIMENSION.fields_by_name['constant']) +_DIMENSION.fields_by_name['constant'].containing_oneof = _DIMENSION.oneofs_by_name['dimension'] +_DIMENSION.oneofs_by_name['dimension'].fields.append( + _DIMENSION.fields_by_name['unknown']) +_DIMENSION.fields_by_name['unknown'].containing_oneof = _DIMENSION.oneofs_by_name['dimension'] +_VALUE_IMMEDIATEVALUE.fields_by_name['tensor'].message_type = _TENSORVALUE +_VALUE_IMMEDIATEVALUE.fields_by_name['tuple'].message_type = _TUPLEVALUE +_VALUE_IMMEDIATEVALUE.fields_by_name['list'].message_type = _LISTVALUE +_VALUE_IMMEDIATEVALUE.fields_by_name['dictionary'].message_type = _DICTIONARYVALUE +_VALUE_IMMEDIATEVALUE.containing_type = _VALUE +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['tensor']) +_VALUE_IMMEDIATEVALUE.fields_by_name['tensor'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['tuple']) +_VALUE_IMMEDIATEVALUE.fields_by_name['tuple'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['list']) +_VALUE_IMMEDIATEVALUE.fields_by_name['list'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_IMMEDIATEVALUE.oneofs_by_name['value'].fields.append( + _VALUE_IMMEDIATEVALUE.fields_by_name['dictionary']) +_VALUE_IMMEDIATEVALUE.fields_by_name['dictionary'].containing_oneof = _VALUE_IMMEDIATEVALUE.oneofs_by_name['value'] +_VALUE_BLOBFILEVALUE.containing_type = _VALUE +_VALUE.fields_by_name['type'].message_type = _VALUETYPE +_VALUE.fields_by_name['immediateValue'].message_type = _VALUE_IMMEDIATEVALUE +_VALUE.fields_by_name['blobFileValue'].message_type = _VALUE_BLOBFILEVALUE +_VALUE.oneofs_by_name['value'].fields.append( + _VALUE.fields_by_name['immediateValue']) +_VALUE.fields_by_name['immediateValue'].containing_oneof = _VALUE.oneofs_by_name['value'] +_VALUE.oneofs_by_name['value'].fields.append( + _VALUE.fields_by_name['blobFileValue']) +_VALUE.fields_by_name['blobFileValue'].containing_oneof = _VALUE.oneofs_by_name['value'] +_TENSORVALUE_REPEATEDFLOATS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDDOUBLES.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDINTS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDLONGINTS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDBOOLS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDSTRINGS.containing_type = _TENSORVALUE +_TENSORVALUE_REPEATEDBYTES.containing_type = _TENSORVALUE +_TENSORVALUE.fields_by_name['floats'].message_type = _TENSORVALUE_REPEATEDFLOATS +_TENSORVALUE.fields_by_name['ints'].message_type = _TENSORVALUE_REPEATEDINTS +_TENSORVALUE.fields_by_name['bools'].message_type = _TENSORVALUE_REPEATEDBOOLS +_TENSORVALUE.fields_by_name['strings'].message_type = _TENSORVALUE_REPEATEDSTRINGS +_TENSORVALUE.fields_by_name['longInts'].message_type = _TENSORVALUE_REPEATEDLONGINTS +_TENSORVALUE.fields_by_name['doubles'].message_type = _TENSORVALUE_REPEATEDDOUBLES +_TENSORVALUE.fields_by_name['bytes'].message_type = _TENSORVALUE_REPEATEDBYTES +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['floats']) +_TENSORVALUE.fields_by_name['floats'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['ints']) +_TENSORVALUE.fields_by_name['ints'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['bools']) +_TENSORVALUE.fields_by_name['bools'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['strings']) +_TENSORVALUE.fields_by_name['strings'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['longInts']) +_TENSORVALUE.fields_by_name['longInts'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['doubles']) +_TENSORVALUE.fields_by_name['doubles'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TENSORVALUE.oneofs_by_name['value'].fields.append( + _TENSORVALUE.fields_by_name['bytes']) +_TENSORVALUE.fields_by_name['bytes'].containing_oneof = _TENSORVALUE.oneofs_by_name['value'] +_TUPLEVALUE.fields_by_name['values'].message_type = _VALUE +_LISTVALUE.fields_by_name['values'].message_type = _VALUE +_DICTIONARYVALUE_KEYVALUEPAIR.fields_by_name['key'].message_type = _VALUE +_DICTIONARYVALUE_KEYVALUEPAIR.fields_by_name['value'].message_type = _VALUE +_DICTIONARYVALUE_KEYVALUEPAIR.containing_type = _DICTIONARYVALUE +_DICTIONARYVALUE.fields_by_name['values'].message_type = _DICTIONARYVALUE_KEYVALUEPAIR +DESCRIPTOR.message_types_by_name['Program'] = _PROGRAM +DESCRIPTOR.message_types_by_name['Function'] = _FUNCTION +DESCRIPTOR.message_types_by_name['Block'] = _BLOCK +DESCRIPTOR.message_types_by_name['Argument'] = _ARGUMENT +DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION +DESCRIPTOR.message_types_by_name['NamedValueType'] = _NAMEDVALUETYPE +DESCRIPTOR.message_types_by_name['ValueType'] = _VALUETYPE +DESCRIPTOR.message_types_by_name['TensorType'] = _TENSORTYPE +DESCRIPTOR.message_types_by_name['TupleType'] = _TUPLETYPE +DESCRIPTOR.message_types_by_name['ListType'] = _LISTTYPE +DESCRIPTOR.message_types_by_name['DictionaryType'] = _DICTIONARYTYPE +DESCRIPTOR.message_types_by_name['Dimension'] = _DIMENSION +DESCRIPTOR.message_types_by_name['Value'] = _VALUE +DESCRIPTOR.message_types_by_name['TensorValue'] = _TENSORVALUE +DESCRIPTOR.message_types_by_name['TupleValue'] = _TUPLEVALUE +DESCRIPTOR.message_types_by_name['ListValue'] = _LISTVALUE +DESCRIPTOR.message_types_by_name['DictionaryValue'] = _DICTIONARYVALUE +DESCRIPTOR.enum_types_by_name['DataType'] = _DATATYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Program = _reflection.GeneratedProtocolMessageType('Program', (_message.Message,), dict( + + FunctionsEntry = _reflection.GeneratedProtocolMessageType('FunctionsEntry', (_message.Message,), dict( + DESCRIPTOR = _PROGRAM_FUNCTIONSENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Program.FunctionsEntry) + )) + , + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _PROGRAM_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Program.AttributesEntry) + )) + , + DESCRIPTOR = _PROGRAM, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Program) + )) +_sym_db.RegisterMessage(Program) +_sym_db.RegisterMessage(Program.FunctionsEntry) +_sym_db.RegisterMessage(Program.AttributesEntry) + +Function = _reflection.GeneratedProtocolMessageType('Function', (_message.Message,), dict( + + BlockSpecializationsEntry = _reflection.GeneratedProtocolMessageType('BlockSpecializationsEntry', (_message.Message,), dict( + DESCRIPTOR = _FUNCTION_BLOCKSPECIALIZATIONSENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Function.BlockSpecializationsEntry) + )) + , + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _FUNCTION_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Function.AttributesEntry) + )) + , + DESCRIPTOR = _FUNCTION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Function) + )) +_sym_db.RegisterMessage(Function) +_sym_db.RegisterMessage(Function.BlockSpecializationsEntry) +_sym_db.RegisterMessage(Function.AttributesEntry) + +Block = _reflection.GeneratedProtocolMessageType('Block', (_message.Message,), dict( + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _BLOCK_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Block.AttributesEntry) + )) + , + DESCRIPTOR = _BLOCK, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Block) + )) +_sym_db.RegisterMessage(Block) +_sym_db.RegisterMessage(Block.AttributesEntry) + +Argument = _reflection.GeneratedProtocolMessageType('Argument', (_message.Message,), dict( + + Binding = _reflection.GeneratedProtocolMessageType('Binding', (_message.Message,), dict( + DESCRIPTOR = _ARGUMENT_BINDING, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Argument.Binding) + )) + , + DESCRIPTOR = _ARGUMENT, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Argument) + )) +_sym_db.RegisterMessage(Argument) +_sym_db.RegisterMessage(Argument.Binding) + +Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), dict( + + InputsEntry = _reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), dict( + DESCRIPTOR = _OPERATION_INPUTSENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Operation.InputsEntry) + )) + , + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _OPERATION_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Operation.AttributesEntry) + )) + , + DESCRIPTOR = _OPERATION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Operation) + )) +_sym_db.RegisterMessage(Operation) +_sym_db.RegisterMessage(Operation.InputsEntry) +_sym_db.RegisterMessage(Operation.AttributesEntry) + +NamedValueType = _reflection.GeneratedProtocolMessageType('NamedValueType', (_message.Message,), dict( + DESCRIPTOR = _NAMEDVALUETYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.NamedValueType) + )) +_sym_db.RegisterMessage(NamedValueType) + +ValueType = _reflection.GeneratedProtocolMessageType('ValueType', (_message.Message,), dict( + DESCRIPTOR = _VALUETYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.ValueType) + )) +_sym_db.RegisterMessage(ValueType) + +TensorType = _reflection.GeneratedProtocolMessageType('TensorType', (_message.Message,), dict( + + AttributesEntry = _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), dict( + DESCRIPTOR = _TENSORTYPE_ATTRIBUTESENTRY, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorType.AttributesEntry) + )) + , + DESCRIPTOR = _TENSORTYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorType) + )) +_sym_db.RegisterMessage(TensorType) +_sym_db.RegisterMessage(TensorType.AttributesEntry) + +TupleType = _reflection.GeneratedProtocolMessageType('TupleType', (_message.Message,), dict( + DESCRIPTOR = _TUPLETYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TupleType) + )) +_sym_db.RegisterMessage(TupleType) + +ListType = _reflection.GeneratedProtocolMessageType('ListType', (_message.Message,), dict( + DESCRIPTOR = _LISTTYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.ListType) + )) +_sym_db.RegisterMessage(ListType) + +DictionaryType = _reflection.GeneratedProtocolMessageType('DictionaryType', (_message.Message,), dict( + DESCRIPTOR = _DICTIONARYTYPE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.DictionaryType) + )) +_sym_db.RegisterMessage(DictionaryType) + +Dimension = _reflection.GeneratedProtocolMessageType('Dimension', (_message.Message,), dict( + + ConstantDimension = _reflection.GeneratedProtocolMessageType('ConstantDimension', (_message.Message,), dict( + DESCRIPTOR = _DIMENSION_CONSTANTDIMENSION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Dimension.ConstantDimension) + )) + , + + UnknownDimension = _reflection.GeneratedProtocolMessageType('UnknownDimension', (_message.Message,), dict( + DESCRIPTOR = _DIMENSION_UNKNOWNDIMENSION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Dimension.UnknownDimension) + )) + , + DESCRIPTOR = _DIMENSION, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Dimension) + )) +_sym_db.RegisterMessage(Dimension) +_sym_db.RegisterMessage(Dimension.ConstantDimension) +_sym_db.RegisterMessage(Dimension.UnknownDimension) + +Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict( + + ImmediateValue = _reflection.GeneratedProtocolMessageType('ImmediateValue', (_message.Message,), dict( + DESCRIPTOR = _VALUE_IMMEDIATEVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Value.ImmediateValue) + )) + , + + BlobFileValue = _reflection.GeneratedProtocolMessageType('BlobFileValue', (_message.Message,), dict( + DESCRIPTOR = _VALUE_BLOBFILEVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Value.BlobFileValue) + )) + , + DESCRIPTOR = _VALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.Value) + )) +_sym_db.RegisterMessage(Value) +_sym_db.RegisterMessage(Value.ImmediateValue) +_sym_db.RegisterMessage(Value.BlobFileValue) + +TensorValue = _reflection.GeneratedProtocolMessageType('TensorValue', (_message.Message,), dict( + + RepeatedFloats = _reflection.GeneratedProtocolMessageType('RepeatedFloats', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDFLOATS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedFloats) + )) + , + + RepeatedDoubles = _reflection.GeneratedProtocolMessageType('RepeatedDoubles', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDDOUBLES, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedDoubles) + )) + , + + RepeatedInts = _reflection.GeneratedProtocolMessageType('RepeatedInts', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDINTS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedInts) + )) + , + + RepeatedLongInts = _reflection.GeneratedProtocolMessageType('RepeatedLongInts', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDLONGINTS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedLongInts) + )) + , + + RepeatedBools = _reflection.GeneratedProtocolMessageType('RepeatedBools', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDBOOLS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedBools) + )) + , + + RepeatedStrings = _reflection.GeneratedProtocolMessageType('RepeatedStrings', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDSTRINGS, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedStrings) + )) + , + + RepeatedBytes = _reflection.GeneratedProtocolMessageType('RepeatedBytes', (_message.Message,), dict( + DESCRIPTOR = _TENSORVALUE_REPEATEDBYTES, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue.RepeatedBytes) + )) + , + DESCRIPTOR = _TENSORVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TensorValue) + )) +_sym_db.RegisterMessage(TensorValue) +_sym_db.RegisterMessage(TensorValue.RepeatedFloats) +_sym_db.RegisterMessage(TensorValue.RepeatedDoubles) +_sym_db.RegisterMessage(TensorValue.RepeatedInts) +_sym_db.RegisterMessage(TensorValue.RepeatedLongInts) +_sym_db.RegisterMessage(TensorValue.RepeatedBools) +_sym_db.RegisterMessage(TensorValue.RepeatedStrings) +_sym_db.RegisterMessage(TensorValue.RepeatedBytes) + +TupleValue = _reflection.GeneratedProtocolMessageType('TupleValue', (_message.Message,), dict( + DESCRIPTOR = _TUPLEVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.TupleValue) + )) +_sym_db.RegisterMessage(TupleValue) + +ListValue = _reflection.GeneratedProtocolMessageType('ListValue', (_message.Message,), dict( + DESCRIPTOR = _LISTVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.ListValue) + )) +_sym_db.RegisterMessage(ListValue) + +DictionaryValue = _reflection.GeneratedProtocolMessageType('DictionaryValue', (_message.Message,), dict( + + KeyValuePair = _reflection.GeneratedProtocolMessageType('KeyValuePair', (_message.Message,), dict( + DESCRIPTOR = _DICTIONARYVALUE_KEYVALUEPAIR, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.DictionaryValue.KeyValuePair) + )) + , + DESCRIPTOR = _DICTIONARYVALUE, + __module__ = 'MIL_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MILSpec.DictionaryValue) + )) +_sym_db.RegisterMessage(DictionaryValue) +_sym_db.RegisterMessage(DictionaryValue.KeyValuePair) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_PROGRAM_FUNCTIONSENTRY.has_options = True +_PROGRAM_FUNCTIONSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_PROGRAM_ATTRIBUTESENTRY.has_options = True +_PROGRAM_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_FUNCTION_BLOCKSPECIALIZATIONSENTRY.has_options = True +_FUNCTION_BLOCKSPECIALIZATIONSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_FUNCTION_ATTRIBUTESENTRY.has_options = True +_FUNCTION_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_BLOCK_ATTRIBUTESENTRY.has_options = True +_BLOCK_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_OPERATION_INPUTSENTRY.has_options = True +_OPERATION_INPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_OPERATION_ATTRIBUTESENTRY.has_options = True +_OPERATION_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TENSORTYPE_ATTRIBUTESENTRY.has_options = True +_TENSORTYPE_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TENSORVALUE_REPEATEDFLOATS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDFLOATS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDDOUBLES.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDDOUBLES.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDINTS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDINTS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDLONGINTS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDLONGINTS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +_TENSORVALUE_REPEATEDBOOLS.fields_by_name['values'].has_options = True +_TENSORVALUE_REPEATEDBOOLS.fields_by_name['values']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Model_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Model_pb2.py new file mode 100644 index 00000000..86743064 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Model_pb2.py @@ -0,0 +1,1153 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Model.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import VisionFeaturePrint_pb2 as VisionFeaturePrint__pb2 +from . import AudioFeaturePrint_pb2 as AudioFeaturePrint__pb2 +from . import TextClassifier_pb2 as TextClassifier__pb2 +try: + DataStructures__pb2 = TextClassifier__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = TextClassifier__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = TextClassifier__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = TextClassifier__pb2.FeatureTypes_pb2 +from . import WordTagger_pb2 as WordTagger__pb2 +try: + DataStructures__pb2 = WordTagger__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = WordTagger__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = WordTagger__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = WordTagger__pb2.FeatureTypes_pb2 +from . import Gazetteer_pb2 as Gazetteer__pb2 +try: + DataStructures__pb2 = Gazetteer__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Gazetteer__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Gazetteer__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Gazetteer__pb2.FeatureTypes_pb2 +from . import WordEmbedding_pb2 as WordEmbedding__pb2 +try: + DataStructures__pb2 = WordEmbedding__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = WordEmbedding__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = WordEmbedding__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = WordEmbedding__pb2.FeatureTypes_pb2 +from . import ArrayFeatureExtractor_pb2 as ArrayFeatureExtractor__pb2 +from . import BayesianProbitRegressor_pb2 as BayesianProbitRegressor__pb2 +from . import CategoricalMapping_pb2 as CategoricalMapping__pb2 +try: + DataStructures__pb2 = CategoricalMapping__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = CategoricalMapping__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = CategoricalMapping__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = CategoricalMapping__pb2.FeatureTypes_pb2 +from . import CustomModel_pb2 as CustomModel__pb2 +from . import DictVectorizer_pb2 as DictVectorizer__pb2 +try: + DataStructures__pb2 = DictVectorizer__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = DictVectorizer__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = DictVectorizer__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DictVectorizer__pb2.FeatureTypes_pb2 +from . import FeatureTypes_pb2 as FeatureTypes__pb2 +from . import FeatureVectorizer_pb2 as FeatureVectorizer__pb2 +from . import GLMRegressor_pb2 as GLMRegressor__pb2 +from . import GLMClassifier_pb2 as GLMClassifier__pb2 +try: + DataStructures__pb2 = GLMClassifier__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = GLMClassifier__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = GLMClassifier__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = GLMClassifier__pb2.FeatureTypes_pb2 +from . import NearestNeighbors_pb2 as NearestNeighbors__pb2 +try: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes_pb2 +try: + Parameters__pb2 = NearestNeighbors__pb2.Parameters__pb2 +except AttributeError: + Parameters__pb2 = NearestNeighbors__pb2.Parameters_pb2 +try: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NearestNeighbors__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NearestNeighbors__pb2.FeatureTypes_pb2 +from . import Identity_pb2 as Identity__pb2 +from . import Imputer_pb2 as Imputer__pb2 +try: + DataStructures__pb2 = Imputer__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Imputer__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Imputer__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Imputer__pb2.FeatureTypes_pb2 +from . import MIL_pb2 as MIL__pb2 +from . import NeuralNetwork_pb2 as NeuralNetwork__pb2 +try: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes_pb2 +try: + Parameters__pb2 = NeuralNetwork__pb2.Parameters__pb2 +except AttributeError: + Parameters__pb2 = NeuralNetwork__pb2.Parameters_pb2 +try: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NeuralNetwork__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NeuralNetwork__pb2.FeatureTypes_pb2 +from . import Normalizer_pb2 as Normalizer__pb2 +from . import OneHotEncoder_pb2 as OneHotEncoder__pb2 +try: + DataStructures__pb2 = OneHotEncoder__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = OneHotEncoder__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = OneHotEncoder__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = OneHotEncoder__pb2.FeatureTypes_pb2 +from . import Scaler_pb2 as Scaler__pb2 +from . import NonMaximumSuppression_pb2 as NonMaximumSuppression__pb2 +try: + DataStructures__pb2 = NonMaximumSuppression__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = NonMaximumSuppression__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = NonMaximumSuppression__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = NonMaximumSuppression__pb2.FeatureTypes_pb2 +from . import SVM_pb2 as SVM__pb2 +try: + DataStructures__pb2 = SVM__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = SVM__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = SVM__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = SVM__pb2.FeatureTypes_pb2 +from . import TreeEnsemble_pb2 as TreeEnsemble__pb2 +try: + DataStructures__pb2 = TreeEnsemble__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = TreeEnsemble__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = TreeEnsemble__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = TreeEnsemble__pb2.FeatureTypes_pb2 +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 +from . import ItemSimilarityRecommender_pb2 as ItemSimilarityRecommender__pb2 +try: + DataStructures__pb2 = ItemSimilarityRecommender__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = ItemSimilarityRecommender__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = ItemSimilarityRecommender__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = ItemSimilarityRecommender__pb2.FeatureTypes_pb2 +from . import SoundAnalysisPreprocessing_pb2 as SoundAnalysisPreprocessing__pb2 +from . import LinkedModel_pb2 as LinkedModel__pb2 +try: + Parameters__pb2 = LinkedModel__pb2.Parameters__pb2 +except AttributeError: + Parameters__pb2 = LinkedModel__pb2.Parameters_pb2 +try: + DataStructures__pb2 = LinkedModel__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = LinkedModel__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = LinkedModel__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = LinkedModel__pb2.FeatureTypes_pb2 +from . import ClassConfidenceThresholding_pb2 as ClassConfidenceThresholding__pb2 +try: + DataStructures__pb2 = ClassConfidenceThresholding__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = ClassConfidenceThresholding__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = ClassConfidenceThresholding__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = ClassConfidenceThresholding__pb2.FeatureTypes_pb2 + +from .VisionFeaturePrint_pb2 import * +from .AudioFeaturePrint_pb2 import * +from .TextClassifier_pb2 import * +from .WordTagger_pb2 import * +from .Gazetteer_pb2 import * +from .WordEmbedding_pb2 import * +from .ArrayFeatureExtractor_pb2 import * +from .BayesianProbitRegressor_pb2 import * +from .CategoricalMapping_pb2 import * +from .CustomModel_pb2 import * +from .DictVectorizer_pb2 import * +from .FeatureTypes_pb2 import * +from .FeatureVectorizer_pb2 import * +from .GLMRegressor_pb2 import * +from .GLMClassifier_pb2 import * +from .NearestNeighbors_pb2 import * +from .Identity_pb2 import * +from .Imputer_pb2 import * +from .MIL_pb2 import * +from .NeuralNetwork_pb2 import * +from .Normalizer_pb2 import * +from .OneHotEncoder_pb2 import * +from .Scaler_pb2 import * +from .NonMaximumSuppression_pb2 import * +from .SVM_pb2 import * +from .TreeEnsemble_pb2 import * +from .Parameters_pb2 import * +from .ItemSimilarityRecommender_pb2 import * +from .SoundAnalysisPreprocessing_pb2 import * +from .LinkedModel_pb2 import * +from .ClassConfidenceThresholding_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Model.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x0bModel.proto\x12\x14\x43oreML.Specification\x1a\x18VisionFeaturePrint.proto\x1a\x17\x41udioFeaturePrint.proto\x1a\x14TextClassifier.proto\x1a\x10WordTagger.proto\x1a\x0fGazetteer.proto\x1a\x13WordEmbedding.proto\x1a\x1b\x41rrayFeatureExtractor.proto\x1a\x1d\x42\x61yesianProbitRegressor.proto\x1a\x18\x43\x61tegoricalMapping.proto\x1a\x11\x43ustomModel.proto\x1a\x14\x44ictVectorizer.proto\x1a\x12\x46\x65\x61tureTypes.proto\x1a\x17\x46\x65\x61tureVectorizer.proto\x1a\x12GLMRegressor.proto\x1a\x13GLMClassifier.proto\x1a\x16NearestNeighbors.proto\x1a\x0eIdentity.proto\x1a\rImputer.proto\x1a\tMIL.proto\x1a\x13NeuralNetwork.proto\x1a\x10Normalizer.proto\x1a\x13OneHotEncoder.proto\x1a\x0cScaler.proto\x1a\x1bNonMaximumSuppression.proto\x1a\tSVM.proto\x1a\x12TreeEnsemble.proto\x1a\x10Parameters.proto\x1a\x1fItemSimilarityRecommender.proto\x1a SoundAnalysisPreprocessing.proto\x1a\x11LinkedModel.proto\x1a!ClassConfidenceThresholding.proto\"F\n\x08Pipeline\x12+\n\x06models\x18\x01 \x03(\x0b\x32\x1b.CoreML.Specification.Model\x12\r\n\x05names\x18\x02 \x03(\t\"F\n\x12PipelineClassifier\x12\x30\n\x08pipeline\x18\x01 \x01(\x0b\x32\x1e.CoreML.Specification.Pipeline\"E\n\x11PipelineRegressor\x12\x30\n\x08pipeline\x18\x01 \x01(\x0b\x32\x1e.CoreML.Specification.Pipeline\"m\n\x12\x46\x65\x61tureDescription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10shortDescription\x18\x02 \x01(\t\x12/\n\x04type\x18\x03 \x01(\x0b\x32!.CoreML.Specification.FeatureType\"\xd6\x01\n\x08Metadata\x12\x18\n\x10shortDescription\x18\x01 \x01(\t\x12\x15\n\rversionString\x18\x02 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x03 \x01(\t\x12\x0f\n\x07license\x18\x04 \x01(\t\x12\x44\n\x0buserDefined\x18\x64 \x03(\x0b\x32/.CoreML.Specification.Metadata.UserDefinedEntry\x1a\x32\n\x10UserDefinedEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xba\x02\n\x10ModelDescription\x12\x37\n\x05input\x18\x01 \x03(\x0b\x32(.CoreML.Specification.FeatureDescription\x12\x38\n\x06output\x18\n \x03(\x0b\x32(.CoreML.Specification.FeatureDescription\x12\x1c\n\x14predictedFeatureName\x18\x0b \x01(\t\x12\"\n\x1apredictedProbabilitiesName\x18\x0c \x01(\t\x12?\n\rtrainingInput\x18\x32 \x03(\x0b\x32(.CoreML.Specification.FeatureDescription\x12\x30\n\x08metadata\x18\x64 \x01(\x0b\x32\x1e.CoreML.Specification.Metadata\"4\n\x0fSerializedModel\x12\x12\n\nidentifier\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\x0c\"\xf1\x15\n\x05Model\x12\x1c\n\x14specificationVersion\x18\x01 \x01(\x05\x12;\n\x0b\x64\x65scription\x18\x02 \x01(\x0b\x32&.CoreML.Specification.ModelDescription\x12\x13\n\x0bisUpdatable\x18\n \x01(\x08\x12G\n\x12pipelineClassifier\x18\xc8\x01 \x01(\x0b\x32(.CoreML.Specification.PipelineClassifierH\x00\x12\x45\n\x11pipelineRegressor\x18\xc9\x01 \x01(\x0b\x32\'.CoreML.Specification.PipelineRegressorH\x00\x12\x33\n\x08pipeline\x18\xca\x01 \x01(\x0b\x32\x1e.CoreML.Specification.PipelineH\x00\x12;\n\x0cglmRegressor\x18\xac\x02 \x01(\x0b\x32\".CoreML.Specification.GLMRegressorH\x00\x12O\n\x16supportVectorRegressor\x18\xad\x02 \x01(\x0b\x32,.CoreML.Specification.SupportVectorRegressorH\x00\x12M\n\x15treeEnsembleRegressor\x18\xae\x02 \x01(\x0b\x32+.CoreML.Specification.TreeEnsembleRegressorH\x00\x12O\n\x16neuralNetworkRegressor\x18\xaf\x02 \x01(\x0b\x32,.CoreML.Specification.NeuralNetworkRegressorH\x00\x12Q\n\x17\x62\x61yesianProbitRegressor\x18\xb0\x02 \x01(\x0b\x32-.CoreML.Specification.BayesianProbitRegressorH\x00\x12=\n\rglmClassifier\x18\x90\x03 \x01(\x0b\x32#.CoreML.Specification.GLMClassifierH\x00\x12Q\n\x17supportVectorClassifier\x18\x91\x03 \x01(\x0b\x32-.CoreML.Specification.SupportVectorClassifierH\x00\x12O\n\x16treeEnsembleClassifier\x18\x92\x03 \x01(\x0b\x32,.CoreML.Specification.TreeEnsembleClassifierH\x00\x12Q\n\x17neuralNetworkClassifier\x18\x93\x03 \x01(\x0b\x32-.CoreML.Specification.NeuralNetworkClassifierH\x00\x12Y\n\x1bkNearestNeighborsClassifier\x18\x94\x03 \x01(\x0b\x32\x31.CoreML.Specification.KNearestNeighborsClassifierH\x00\x12=\n\rneuralNetwork\x18\xf4\x03 \x01(\x0b\x32#.CoreML.Specification.NeuralNetworkH\x00\x12U\n\x19itemSimilarityRecommender\x18\xf5\x03 \x01(\x0b\x32/.CoreML.Specification.ItemSimilarityRecommenderH\x00\x12;\n\tmlProgram\x18\xf6\x03 \x01(\x0b\x32%.CoreML.Specification.MILSpec.ProgramH\x00\x12\x39\n\x0b\x63ustomModel\x18\xab\x04 \x01(\x0b\x32!.CoreML.Specification.CustomModelH\x00\x12\x39\n\x0blinkedModel\x18\xac\x04 \x01(\x0b\x32!.CoreML.Specification.LinkedModelH\x00\x12Y\n\x1b\x63lassConfidenceThresholding\x18\xb0\x04 \x01(\x0b\x32\x31.CoreML.Specification.ClassConfidenceThresholdingH\x00\x12=\n\roneHotEncoder\x18\xd8\x04 \x01(\x0b\x32#.CoreML.Specification.OneHotEncoderH\x00\x12\x31\n\x07imputer\x18\xd9\x04 \x01(\x0b\x32\x1d.CoreML.Specification.ImputerH\x00\x12\x45\n\x11\x66\x65\x61tureVectorizer\x18\xda\x04 \x01(\x0b\x32\'.CoreML.Specification.FeatureVectorizerH\x00\x12?\n\x0e\x64ictVectorizer\x18\xdb\x04 \x01(\x0b\x32$.CoreML.Specification.DictVectorizerH\x00\x12/\n\x06scaler\x18\xdc\x04 \x01(\x0b\x32\x1c.CoreML.Specification.ScalerH\x00\x12G\n\x12\x63\x61tegoricalMapping\x18\xde\x04 \x01(\x0b\x32(.CoreML.Specification.CategoricalMappingH\x00\x12\x37\n\nnormalizer\x18\xdf\x04 \x01(\x0b\x32 .CoreML.Specification.NormalizerH\x00\x12M\n\x15\x61rrayFeatureExtractor\x18\xe1\x04 \x01(\x0b\x32+.CoreML.Specification.ArrayFeatureExtractorH\x00\x12M\n\x15nonMaximumSuppression\x18\xe2\x04 \x01(\x0b\x32+.CoreML.Specification.NonMaximumSuppressionH\x00\x12\x33\n\x08identity\x18\x84\x07 \x01(\x0b\x32\x1e.CoreML.Specification.IdentityH\x00\x12L\n\x0etextClassifier\x18\xd0\x0f \x01(\x0b\x32\x31.CoreML.Specification.CoreMLModels.TextClassifierH\x00\x12\x44\n\nwordTagger\x18\xd1\x0f \x01(\x0b\x32-.CoreML.Specification.CoreMLModels.WordTaggerH\x00\x12T\n\x12visionFeaturePrint\x18\xd2\x0f \x01(\x0b\x32\x35.CoreML.Specification.CoreMLModels.VisionFeaturePrintH\x00\x12\x64\n\x1asoundAnalysisPreprocessing\x18\xd3\x0f \x01(\x0b\x32=.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessingH\x00\x12\x42\n\tgazetteer\x18\xd4\x0f \x01(\x0b\x32,.CoreML.Specification.CoreMLModels.GazetteerH\x00\x12J\n\rwordEmbedding\x18\xd5\x0f \x01(\x0b\x32\x30.CoreML.Specification.CoreMLModels.WordEmbeddingH\x00\x12R\n\x11\x61udioFeaturePrint\x18\xd6\x0f \x01(\x0b\x32\x34.CoreML.Specification.CoreMLModels.AudioFeaturePrintH\x00\x12\x41\n\x0fserializedModel\x18\xb8\x17 \x01(\x0b\x32%.CoreML.Specification.SerializedModelH\x00\x42\x06\n\x04TypeB\x02H\x03P\x00P\x01P\x02P\x03P\x04P\x05P\x06P\x07P\x08P\tP\nP\x0bP\x0cP\rP\x0eP\x0fP\x10P\x11P\x12P\x13P\x14P\x15P\x16P\x17P\x18P\x19P\x1aP\x1bP\x1cP\x1dP\x1e\x62\x06proto3') + , + dependencies=[VisionFeaturePrint__pb2.DESCRIPTOR,AudioFeaturePrint__pb2.DESCRIPTOR,TextClassifier__pb2.DESCRIPTOR,WordTagger__pb2.DESCRIPTOR,Gazetteer__pb2.DESCRIPTOR,WordEmbedding__pb2.DESCRIPTOR,ArrayFeatureExtractor__pb2.DESCRIPTOR,BayesianProbitRegressor__pb2.DESCRIPTOR,CategoricalMapping__pb2.DESCRIPTOR,CustomModel__pb2.DESCRIPTOR,DictVectorizer__pb2.DESCRIPTOR,FeatureTypes__pb2.DESCRIPTOR,FeatureVectorizer__pb2.DESCRIPTOR,GLMRegressor__pb2.DESCRIPTOR,GLMClassifier__pb2.DESCRIPTOR,NearestNeighbors__pb2.DESCRIPTOR,Identity__pb2.DESCRIPTOR,Imputer__pb2.DESCRIPTOR,MIL__pb2.DESCRIPTOR,NeuralNetwork__pb2.DESCRIPTOR,Normalizer__pb2.DESCRIPTOR,OneHotEncoder__pb2.DESCRIPTOR,Scaler__pb2.DESCRIPTOR,NonMaximumSuppression__pb2.DESCRIPTOR,SVM__pb2.DESCRIPTOR,TreeEnsemble__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,ItemSimilarityRecommender__pb2.DESCRIPTOR,SoundAnalysisPreprocessing__pb2.DESCRIPTOR,LinkedModel__pb2.DESCRIPTOR,ClassConfidenceThresholding__pb2.DESCRIPTOR,], + public_dependencies=[VisionFeaturePrint__pb2.DESCRIPTOR,AudioFeaturePrint__pb2.DESCRIPTOR,TextClassifier__pb2.DESCRIPTOR,WordTagger__pb2.DESCRIPTOR,Gazetteer__pb2.DESCRIPTOR,WordEmbedding__pb2.DESCRIPTOR,ArrayFeatureExtractor__pb2.DESCRIPTOR,BayesianProbitRegressor__pb2.DESCRIPTOR,CategoricalMapping__pb2.DESCRIPTOR,CustomModel__pb2.DESCRIPTOR,DictVectorizer__pb2.DESCRIPTOR,FeatureTypes__pb2.DESCRIPTOR,FeatureVectorizer__pb2.DESCRIPTOR,GLMRegressor__pb2.DESCRIPTOR,GLMClassifier__pb2.DESCRIPTOR,NearestNeighbors__pb2.DESCRIPTOR,Identity__pb2.DESCRIPTOR,Imputer__pb2.DESCRIPTOR,MIL__pb2.DESCRIPTOR,NeuralNetwork__pb2.DESCRIPTOR,Normalizer__pb2.DESCRIPTOR,OneHotEncoder__pb2.DESCRIPTOR,Scaler__pb2.DESCRIPTOR,NonMaximumSuppression__pb2.DESCRIPTOR,SVM__pb2.DESCRIPTOR,TreeEnsemble__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,ItemSimilarityRecommender__pb2.DESCRIPTOR,SoundAnalysisPreprocessing__pb2.DESCRIPTOR,LinkedModel__pb2.DESCRIPTOR,ClassConfidenceThresholding__pb2.DESCRIPTOR,]) + + + + +_PIPELINE = _descriptor.Descriptor( + name='Pipeline', + full_name='CoreML.Specification.Pipeline', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='models', full_name='CoreML.Specification.Pipeline.models', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='names', full_name='CoreML.Specification.Pipeline.names', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=718, + serialized_end=788, +) + + +_PIPELINECLASSIFIER = _descriptor.Descriptor( + name='PipelineClassifier', + full_name='CoreML.Specification.PipelineClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='pipeline', full_name='CoreML.Specification.PipelineClassifier.pipeline', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=790, + serialized_end=860, +) + + +_PIPELINEREGRESSOR = _descriptor.Descriptor( + name='PipelineRegressor', + full_name='CoreML.Specification.PipelineRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='pipeline', full_name='CoreML.Specification.PipelineRegressor.pipeline', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=862, + serialized_end=931, +) + + +_FEATUREDESCRIPTION = _descriptor.Descriptor( + name='FeatureDescription', + full_name='CoreML.Specification.FeatureDescription', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.FeatureDescription.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shortDescription', full_name='CoreML.Specification.FeatureDescription.shortDescription', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.FeatureDescription.type', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=933, + serialized_end=1042, +) + + +_METADATA_USERDEFINEDENTRY = _descriptor.Descriptor( + name='UserDefinedEntry', + full_name='CoreML.Specification.Metadata.UserDefinedEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.Metadata.UserDefinedEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.Metadata.UserDefinedEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1209, + serialized_end=1259, +) + +_METADATA = _descriptor.Descriptor( + name='Metadata', + full_name='CoreML.Specification.Metadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shortDescription', full_name='CoreML.Specification.Metadata.shortDescription', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='versionString', full_name='CoreML.Specification.Metadata.versionString', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='author', full_name='CoreML.Specification.Metadata.author', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='license', full_name='CoreML.Specification.Metadata.license', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='userDefined', full_name='CoreML.Specification.Metadata.userDefined', index=4, + number=100, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_METADATA_USERDEFINEDENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1045, + serialized_end=1259, +) + + +_MODELDESCRIPTION = _descriptor.Descriptor( + name='ModelDescription', + full_name='CoreML.Specification.ModelDescription', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.ModelDescription.input', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='output', full_name='CoreML.Specification.ModelDescription.output', index=1, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predictedFeatureName', full_name='CoreML.Specification.ModelDescription.predictedFeatureName', index=2, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predictedProbabilitiesName', full_name='CoreML.Specification.ModelDescription.predictedProbabilitiesName', index=3, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='trainingInput', full_name='CoreML.Specification.ModelDescription.trainingInput', index=4, + number=50, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='metadata', full_name='CoreML.Specification.ModelDescription.metadata', index=5, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1262, + serialized_end=1576, +) + + +_SERIALIZEDMODEL = _descriptor.Descriptor( + name='SerializedModel', + full_name='CoreML.Specification.SerializedModel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='identifier', full_name='CoreML.Specification.SerializedModel.identifier', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='model', full_name='CoreML.Specification.SerializedModel.model', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1578, + serialized_end=1630, +) + + +_MODEL = _descriptor.Descriptor( + name='Model', + full_name='CoreML.Specification.Model', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='specificationVersion', full_name='CoreML.Specification.Model.specificationVersion', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='description', full_name='CoreML.Specification.Model.description', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isUpdatable', full_name='CoreML.Specification.Model.isUpdatable', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pipelineClassifier', full_name='CoreML.Specification.Model.pipelineClassifier', index=3, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pipelineRegressor', full_name='CoreML.Specification.Model.pipelineRegressor', index=4, + number=201, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pipeline', full_name='CoreML.Specification.Model.pipeline', index=5, + number=202, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='glmRegressor', full_name='CoreML.Specification.Model.glmRegressor', index=6, + number=300, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='supportVectorRegressor', full_name='CoreML.Specification.Model.supportVectorRegressor', index=7, + number=301, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='treeEnsembleRegressor', full_name='CoreML.Specification.Model.treeEnsembleRegressor', index=8, + number=302, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='neuralNetworkRegressor', full_name='CoreML.Specification.Model.neuralNetworkRegressor', index=9, + number=303, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bayesianProbitRegressor', full_name='CoreML.Specification.Model.bayesianProbitRegressor', index=10, + number=304, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='glmClassifier', full_name='CoreML.Specification.Model.glmClassifier', index=11, + number=400, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='supportVectorClassifier', full_name='CoreML.Specification.Model.supportVectorClassifier', index=12, + number=401, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='treeEnsembleClassifier', full_name='CoreML.Specification.Model.treeEnsembleClassifier', index=13, + number=402, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='neuralNetworkClassifier', full_name='CoreML.Specification.Model.neuralNetworkClassifier', index=14, + number=403, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kNearestNeighborsClassifier', full_name='CoreML.Specification.Model.kNearestNeighborsClassifier', index=15, + number=404, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='neuralNetwork', full_name='CoreML.Specification.Model.neuralNetwork', index=16, + number=500, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='itemSimilarityRecommender', full_name='CoreML.Specification.Model.itemSimilarityRecommender', index=17, + number=501, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mlProgram', full_name='CoreML.Specification.Model.mlProgram', index=18, + number=502, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customModel', full_name='CoreML.Specification.Model.customModel', index=19, + number=555, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linkedModel', full_name='CoreML.Specification.Model.linkedModel', index=20, + number=556, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='classConfidenceThresholding', full_name='CoreML.Specification.Model.classConfidenceThresholding', index=21, + number=560, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='oneHotEncoder', full_name='CoreML.Specification.Model.oneHotEncoder', index=22, + number=600, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imputer', full_name='CoreML.Specification.Model.imputer', index=23, + number=601, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='featureVectorizer', full_name='CoreML.Specification.Model.featureVectorizer', index=24, + number=602, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dictVectorizer', full_name='CoreML.Specification.Model.dictVectorizer', index=25, + number=603, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaler', full_name='CoreML.Specification.Model.scaler', index=26, + number=604, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='categoricalMapping', full_name='CoreML.Specification.Model.categoricalMapping', index=27, + number=606, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='normalizer', full_name='CoreML.Specification.Model.normalizer', index=28, + number=607, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayFeatureExtractor', full_name='CoreML.Specification.Model.arrayFeatureExtractor', index=29, + number=609, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nonMaximumSuppression', full_name='CoreML.Specification.Model.nonMaximumSuppression', index=30, + number=610, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='identity', full_name='CoreML.Specification.Model.identity', index=31, + number=900, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='textClassifier', full_name='CoreML.Specification.Model.textClassifier', index=32, + number=2000, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='wordTagger', full_name='CoreML.Specification.Model.wordTagger', index=33, + number=2001, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='visionFeaturePrint', full_name='CoreML.Specification.Model.visionFeaturePrint', index=34, + number=2002, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='soundAnalysisPreprocessing', full_name='CoreML.Specification.Model.soundAnalysisPreprocessing', index=35, + number=2003, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gazetteer', full_name='CoreML.Specification.Model.gazetteer', index=36, + number=2004, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='wordEmbedding', full_name='CoreML.Specification.Model.wordEmbedding', index=37, + number=2005, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='audioFeaturePrint', full_name='CoreML.Specification.Model.audioFeaturePrint', index=38, + number=2006, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='serializedModel', full_name='CoreML.Specification.Model.serializedModel', index=39, + number=3000, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.Model.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1633, + serialized_end=4434, +) + +_PIPELINE.fields_by_name['models'].message_type = _MODEL +_PIPELINECLASSIFIER.fields_by_name['pipeline'].message_type = _PIPELINE +_PIPELINEREGRESSOR.fields_by_name['pipeline'].message_type = _PIPELINE +_FEATUREDESCRIPTION.fields_by_name['type'].message_type = FeatureTypes__pb2._FEATURETYPE +_METADATA_USERDEFINEDENTRY.containing_type = _METADATA +_METADATA.fields_by_name['userDefined'].message_type = _METADATA_USERDEFINEDENTRY +_MODELDESCRIPTION.fields_by_name['input'].message_type = _FEATUREDESCRIPTION +_MODELDESCRIPTION.fields_by_name['output'].message_type = _FEATUREDESCRIPTION +_MODELDESCRIPTION.fields_by_name['trainingInput'].message_type = _FEATUREDESCRIPTION +_MODELDESCRIPTION.fields_by_name['metadata'].message_type = _METADATA +_MODEL.fields_by_name['description'].message_type = _MODELDESCRIPTION +_MODEL.fields_by_name['pipelineClassifier'].message_type = _PIPELINECLASSIFIER +_MODEL.fields_by_name['pipelineRegressor'].message_type = _PIPELINEREGRESSOR +_MODEL.fields_by_name['pipeline'].message_type = _PIPELINE +_MODEL.fields_by_name['glmRegressor'].message_type = GLMRegressor__pb2._GLMREGRESSOR +_MODEL.fields_by_name['supportVectorRegressor'].message_type = SVM__pb2._SUPPORTVECTORREGRESSOR +_MODEL.fields_by_name['treeEnsembleRegressor'].message_type = TreeEnsemble__pb2._TREEENSEMBLEREGRESSOR +_MODEL.fields_by_name['neuralNetworkRegressor'].message_type = NeuralNetwork__pb2._NEURALNETWORKREGRESSOR +_MODEL.fields_by_name['bayesianProbitRegressor'].message_type = BayesianProbitRegressor__pb2._BAYESIANPROBITREGRESSOR +_MODEL.fields_by_name['glmClassifier'].message_type = GLMClassifier__pb2._GLMCLASSIFIER +_MODEL.fields_by_name['supportVectorClassifier'].message_type = SVM__pb2._SUPPORTVECTORCLASSIFIER +_MODEL.fields_by_name['treeEnsembleClassifier'].message_type = TreeEnsemble__pb2._TREEENSEMBLECLASSIFIER +_MODEL.fields_by_name['neuralNetworkClassifier'].message_type = NeuralNetwork__pb2._NEURALNETWORKCLASSIFIER +_MODEL.fields_by_name['kNearestNeighborsClassifier'].message_type = NearestNeighbors__pb2._KNEARESTNEIGHBORSCLASSIFIER +_MODEL.fields_by_name['neuralNetwork'].message_type = NeuralNetwork__pb2._NEURALNETWORK +_MODEL.fields_by_name['itemSimilarityRecommender'].message_type = ItemSimilarityRecommender__pb2._ITEMSIMILARITYRECOMMENDER +_MODEL.fields_by_name['mlProgram'].message_type = MIL__pb2._PROGRAM +_MODEL.fields_by_name['customModel'].message_type = CustomModel__pb2._CUSTOMMODEL +_MODEL.fields_by_name['linkedModel'].message_type = LinkedModel__pb2._LINKEDMODEL +_MODEL.fields_by_name['classConfidenceThresholding'].message_type = ClassConfidenceThresholding__pb2._CLASSCONFIDENCETHRESHOLDING +_MODEL.fields_by_name['oneHotEncoder'].message_type = OneHotEncoder__pb2._ONEHOTENCODER +_MODEL.fields_by_name['imputer'].message_type = Imputer__pb2._IMPUTER +_MODEL.fields_by_name['featureVectorizer'].message_type = FeatureVectorizer__pb2._FEATUREVECTORIZER +_MODEL.fields_by_name['dictVectorizer'].message_type = DictVectorizer__pb2._DICTVECTORIZER +_MODEL.fields_by_name['scaler'].message_type = Scaler__pb2._SCALER +_MODEL.fields_by_name['categoricalMapping'].message_type = CategoricalMapping__pb2._CATEGORICALMAPPING +_MODEL.fields_by_name['normalizer'].message_type = Normalizer__pb2._NORMALIZER +_MODEL.fields_by_name['arrayFeatureExtractor'].message_type = ArrayFeatureExtractor__pb2._ARRAYFEATUREEXTRACTOR +_MODEL.fields_by_name['nonMaximumSuppression'].message_type = NonMaximumSuppression__pb2._NONMAXIMUMSUPPRESSION +_MODEL.fields_by_name['identity'].message_type = Identity__pb2._IDENTITY +_MODEL.fields_by_name['textClassifier'].message_type = TextClassifier__pb2._TEXTCLASSIFIER +_MODEL.fields_by_name['wordTagger'].message_type = WordTagger__pb2._WORDTAGGER +_MODEL.fields_by_name['visionFeaturePrint'].message_type = VisionFeaturePrint__pb2._VISIONFEATUREPRINT +_MODEL.fields_by_name['soundAnalysisPreprocessing'].message_type = SoundAnalysisPreprocessing__pb2._SOUNDANALYSISPREPROCESSING +_MODEL.fields_by_name['gazetteer'].message_type = Gazetteer__pb2._GAZETTEER +_MODEL.fields_by_name['wordEmbedding'].message_type = WordEmbedding__pb2._WORDEMBEDDING +_MODEL.fields_by_name['audioFeaturePrint'].message_type = AudioFeaturePrint__pb2._AUDIOFEATUREPRINT +_MODEL.fields_by_name['serializedModel'].message_type = _SERIALIZEDMODEL +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['pipelineClassifier']) +_MODEL.fields_by_name['pipelineClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['pipelineRegressor']) +_MODEL.fields_by_name['pipelineRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['pipeline']) +_MODEL.fields_by_name['pipeline'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['glmRegressor']) +_MODEL.fields_by_name['glmRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['supportVectorRegressor']) +_MODEL.fields_by_name['supportVectorRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['treeEnsembleRegressor']) +_MODEL.fields_by_name['treeEnsembleRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['neuralNetworkRegressor']) +_MODEL.fields_by_name['neuralNetworkRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['bayesianProbitRegressor']) +_MODEL.fields_by_name['bayesianProbitRegressor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['glmClassifier']) +_MODEL.fields_by_name['glmClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['supportVectorClassifier']) +_MODEL.fields_by_name['supportVectorClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['treeEnsembleClassifier']) +_MODEL.fields_by_name['treeEnsembleClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['neuralNetworkClassifier']) +_MODEL.fields_by_name['neuralNetworkClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['kNearestNeighborsClassifier']) +_MODEL.fields_by_name['kNearestNeighborsClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['neuralNetwork']) +_MODEL.fields_by_name['neuralNetwork'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['itemSimilarityRecommender']) +_MODEL.fields_by_name['itemSimilarityRecommender'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['mlProgram']) +_MODEL.fields_by_name['mlProgram'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['customModel']) +_MODEL.fields_by_name['customModel'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['linkedModel']) +_MODEL.fields_by_name['linkedModel'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['classConfidenceThresholding']) +_MODEL.fields_by_name['classConfidenceThresholding'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['oneHotEncoder']) +_MODEL.fields_by_name['oneHotEncoder'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['imputer']) +_MODEL.fields_by_name['imputer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['featureVectorizer']) +_MODEL.fields_by_name['featureVectorizer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['dictVectorizer']) +_MODEL.fields_by_name['dictVectorizer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['scaler']) +_MODEL.fields_by_name['scaler'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['categoricalMapping']) +_MODEL.fields_by_name['categoricalMapping'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['normalizer']) +_MODEL.fields_by_name['normalizer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['arrayFeatureExtractor']) +_MODEL.fields_by_name['arrayFeatureExtractor'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['nonMaximumSuppression']) +_MODEL.fields_by_name['nonMaximumSuppression'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['identity']) +_MODEL.fields_by_name['identity'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['textClassifier']) +_MODEL.fields_by_name['textClassifier'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['wordTagger']) +_MODEL.fields_by_name['wordTagger'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['visionFeaturePrint']) +_MODEL.fields_by_name['visionFeaturePrint'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['soundAnalysisPreprocessing']) +_MODEL.fields_by_name['soundAnalysisPreprocessing'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['gazetteer']) +_MODEL.fields_by_name['gazetteer'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['wordEmbedding']) +_MODEL.fields_by_name['wordEmbedding'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['audioFeaturePrint']) +_MODEL.fields_by_name['audioFeaturePrint'].containing_oneof = _MODEL.oneofs_by_name['Type'] +_MODEL.oneofs_by_name['Type'].fields.append( + _MODEL.fields_by_name['serializedModel']) +_MODEL.fields_by_name['serializedModel'].containing_oneof = _MODEL.oneofs_by_name['Type'] +DESCRIPTOR.message_types_by_name['Pipeline'] = _PIPELINE +DESCRIPTOR.message_types_by_name['PipelineClassifier'] = _PIPELINECLASSIFIER +DESCRIPTOR.message_types_by_name['PipelineRegressor'] = _PIPELINEREGRESSOR +DESCRIPTOR.message_types_by_name['FeatureDescription'] = _FEATUREDESCRIPTION +DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA +DESCRIPTOR.message_types_by_name['ModelDescription'] = _MODELDESCRIPTION +DESCRIPTOR.message_types_by_name['SerializedModel'] = _SERIALIZEDMODEL +DESCRIPTOR.message_types_by_name['Model'] = _MODEL +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Pipeline = _reflection.GeneratedProtocolMessageType('Pipeline', (_message.Message,), dict( + DESCRIPTOR = _PIPELINE, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Pipeline) + )) +_sym_db.RegisterMessage(Pipeline) + +PipelineClassifier = _reflection.GeneratedProtocolMessageType('PipelineClassifier', (_message.Message,), dict( + DESCRIPTOR = _PIPELINECLASSIFIER, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PipelineClassifier) + )) +_sym_db.RegisterMessage(PipelineClassifier) + +PipelineRegressor = _reflection.GeneratedProtocolMessageType('PipelineRegressor', (_message.Message,), dict( + DESCRIPTOR = _PIPELINEREGRESSOR, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PipelineRegressor) + )) +_sym_db.RegisterMessage(PipelineRegressor) + +FeatureDescription = _reflection.GeneratedProtocolMessageType('FeatureDescription', (_message.Message,), dict( + DESCRIPTOR = _FEATUREDESCRIPTION, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FeatureDescription) + )) +_sym_db.RegisterMessage(FeatureDescription) + +Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), dict( + + UserDefinedEntry = _reflection.GeneratedProtocolMessageType('UserDefinedEntry', (_message.Message,), dict( + DESCRIPTOR = _METADATA_USERDEFINEDENTRY, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Metadata.UserDefinedEntry) + )) + , + DESCRIPTOR = _METADATA, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Metadata) + )) +_sym_db.RegisterMessage(Metadata) +_sym_db.RegisterMessage(Metadata.UserDefinedEntry) + +ModelDescription = _reflection.GeneratedProtocolMessageType('ModelDescription', (_message.Message,), dict( + DESCRIPTOR = _MODELDESCRIPTION, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ModelDescription) + )) +_sym_db.RegisterMessage(ModelDescription) + +SerializedModel = _reflection.GeneratedProtocolMessageType('SerializedModel', (_message.Message,), dict( + DESCRIPTOR = _SERIALIZEDMODEL, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SerializedModel) + )) +_sym_db.RegisterMessage(SerializedModel) + +Model = _reflection.GeneratedProtocolMessageType('Model', (_message.Message,), dict( + DESCRIPTOR = _MODEL, + __module__ = 'Model_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Model) + )) +_sym_db.RegisterMessage(Model) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_METADATA_USERDEFINEDENTRY.has_options = True +_METADATA_USERDEFINEDENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NamedParameters_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NamedParameters_pb2.py new file mode 100644 index 00000000..3d47504d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NamedParameters_pb2.py @@ -0,0 +1,393 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NamedParameters.proto + +import sys + +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pb2 +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NamedParameters.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x15NamedParameters.proto\x12\x14\x43oreML.Specification\"0\n\nInt32Range\x12\x10\n\x08minValue\x18\x01 \x01(\x05\x12\x10\n\x08maxValue\x18\x02 \x01(\x05\"\x1a\n\x08Int32Set\x12\x0e\n\x06values\x18\x01 \x03(\x05\"0\n\nFloatRange\x12\x10\n\x08minValue\x18\x01 \x01(\x02\x12\x10\n\x08maxValue\x18\x02 \x01(\x02\"\x99\x01\n\x0eInt32Parameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x05\x12\x31\n\x05range\x18\n \x01(\x0b\x32 .CoreML.Specification.Int32RangeH\x00\x12-\n\x03set\x18\x0b \x01(\x0b\x32\x1e.CoreML.Specification.Int32SetH\x00\x42\x0f\n\rAllowedValues\"j\n\x0e\x46loatParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x02\x12\x31\n\x05range\x18\n \x01(\x0b\x32 .CoreML.Specification.FloatRangeH\x00\x42\x0f\n\rAllowedValues\"\x93\x01\n\tParameter\x12>\n\x0eint32Parameter\x18\x01 \x01(\x0b\x32$.CoreML.Specification.Int32ParameterH\x00\x12>\n\x0e\x66loatParameter\x18\x02 \x01(\x0b\x32$.CoreML.Specification.FloatParameterH\x00\x42\x06\n\x04Type\"l\n\x0eNamedParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10shortDescription\x18\x02 \x01(\t\x12\x32\n\tparameter\x18\x03 \x01(\x0b\x32\x1f.CoreML.Specification.ParameterB\x02H\x03\x62\x06proto3') +) + + + + +_INT32RANGE = _descriptor.Descriptor( + name='Int32Range', + full_name='CoreML.Specification.Int32Range', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.Int32Range.minValue', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.Int32Range.maxValue', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=47, + serialized_end=95, +) + + +_INT32SET = _descriptor.Descriptor( + name='Int32Set', + full_name='CoreML.Specification.Int32Set', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.Int32Set.values', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=97, + serialized_end=123, +) + + +_FLOATRANGE = _descriptor.Descriptor( + name='FloatRange', + full_name='CoreML.Specification.FloatRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minValue', full_name='CoreML.Specification.FloatRange.minValue', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxValue', full_name='CoreML.Specification.FloatRange.maxValue', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=125, + serialized_end=173, +) + + +_INT32PARAMETER = _descriptor.Descriptor( + name='Int32Parameter', + full_name='CoreML.Specification.Int32Parameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.Int32Parameter.defaultValue', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.Int32Parameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set', full_name='CoreML.Specification.Int32Parameter.set', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.Int32Parameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=176, + serialized_end=329, +) + + +_FLOATPARAMETER = _descriptor.Descriptor( + name='FloatParameter', + full_name='CoreML.Specification.FloatParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.FloatParameter.defaultValue', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.FloatParameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.FloatParameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=331, + serialized_end=437, +) + + +_PARAMETER = _descriptor.Descriptor( + name='Parameter', + full_name='CoreML.Specification.Parameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='int32Parameter', full_name='CoreML.Specification.Parameter.int32Parameter', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floatParameter', full_name='CoreML.Specification.Parameter.floatParameter', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Type', full_name='CoreML.Specification.Parameter.Type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=440, + serialized_end=587, +) + + +_NAMEDPARAMETER = _descriptor.Descriptor( + name='NamedParameter', + full_name='CoreML.Specification.NamedParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.NamedParameter.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shortDescription', full_name='CoreML.Specification.NamedParameter.shortDescription', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parameter', full_name='CoreML.Specification.NamedParameter.parameter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=589, + serialized_end=697, +) + +_INT32PARAMETER.fields_by_name['range'].message_type = _INT32RANGE +_INT32PARAMETER.fields_by_name['set'].message_type = _INT32SET +_INT32PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT32PARAMETER.fields_by_name['range']) +_INT32PARAMETER.fields_by_name['range'].containing_oneof = _INT32PARAMETER.oneofs_by_name['AllowedValues'] +_INT32PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT32PARAMETER.fields_by_name['set']) +_INT32PARAMETER.fields_by_name['set'].containing_oneof = _INT32PARAMETER.oneofs_by_name['AllowedValues'] +_FLOATPARAMETER.fields_by_name['range'].message_type = _FLOATRANGE +_FLOATPARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _FLOATPARAMETER.fields_by_name['range']) +_FLOATPARAMETER.fields_by_name['range'].containing_oneof = _FLOATPARAMETER.oneofs_by_name['AllowedValues'] +_PARAMETER.fields_by_name['int32Parameter'].message_type = _INT32PARAMETER +_PARAMETER.fields_by_name['floatParameter'].message_type = _FLOATPARAMETER +_PARAMETER.oneofs_by_name['Type'].fields.append( + _PARAMETER.fields_by_name['int32Parameter']) +_PARAMETER.fields_by_name['int32Parameter'].containing_oneof = _PARAMETER.oneofs_by_name['Type'] +_PARAMETER.oneofs_by_name['Type'].fields.append( + _PARAMETER.fields_by_name['floatParameter']) +_PARAMETER.fields_by_name['floatParameter'].containing_oneof = _PARAMETER.oneofs_by_name['Type'] +_NAMEDPARAMETER.fields_by_name['parameter'].message_type = _PARAMETER +DESCRIPTOR.message_types_by_name['Int32Range'] = _INT32RANGE +DESCRIPTOR.message_types_by_name['Int32Set'] = _INT32SET +DESCRIPTOR.message_types_by_name['FloatRange'] = _FLOATRANGE +DESCRIPTOR.message_types_by_name['Int32Parameter'] = _INT32PARAMETER +DESCRIPTOR.message_types_by_name['FloatParameter'] = _FLOATPARAMETER +DESCRIPTOR.message_types_by_name['Parameter'] = _PARAMETER +DESCRIPTOR.message_types_by_name['NamedParameter'] = _NAMEDPARAMETER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Int32Range = _reflection.GeneratedProtocolMessageType('Int32Range', (_message.Message,), dict( + DESCRIPTOR = _INT32RANGE, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int32Range) + )) +_sym_db.RegisterMessage(Int32Range) + +Int32Set = _reflection.GeneratedProtocolMessageType('Int32Set', (_message.Message,), dict( + DESCRIPTOR = _INT32SET, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int32Set) + )) +_sym_db.RegisterMessage(Int32Set) + +FloatRange = _reflection.GeneratedProtocolMessageType('FloatRange', (_message.Message,), dict( + DESCRIPTOR = _FLOATRANGE, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloatRange) + )) +_sym_db.RegisterMessage(FloatRange) + +Int32Parameter = _reflection.GeneratedProtocolMessageType('Int32Parameter', (_message.Message,), dict( + DESCRIPTOR = _INT32PARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int32Parameter) + )) +_sym_db.RegisterMessage(Int32Parameter) + +FloatParameter = _reflection.GeneratedProtocolMessageType('FloatParameter', (_message.Message,), dict( + DESCRIPTOR = _FLOATPARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloatParameter) + )) +_sym_db.RegisterMessage(FloatParameter) + +Parameter = _reflection.GeneratedProtocolMessageType('Parameter', (_message.Message,), dict( + DESCRIPTOR = _PARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Parameter) + )) +_sym_db.RegisterMessage(Parameter) + +NamedParameter = _reflection.GeneratedProtocolMessageType('NamedParameter', (_message.Message,), dict( + DESCRIPTOR = _NAMEDPARAMETER, + __module__ = 'NamedParameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NamedParameter) + )) +_sym_db.RegisterMessage(NamedParameter) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NearestNeighbors_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NearestNeighbors_pb2.py new file mode 100644 index 00000000..68ebea13 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NearestNeighbors_pb2.py @@ -0,0 +1,424 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NearestNeighbors.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * +from .Parameters_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NearestNeighbors.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x16NearestNeighbors.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\x1a\x10Parameters.proto\"\xb6\x04\n\x1bKNearestNeighborsClassifier\x12J\n\x15nearestNeighborsIndex\x18\x01 \x01(\x0b\x32+.CoreML.Specification.NearestNeighborsIndex\x12?\n\x11numberOfNeighbors\x18\x03 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12\x1c\n\x12\x64\x65\x66\x61ultStringLabel\x18n \x01(\tH\x01\x12\x1b\n\x11\x64\x65\x66\x61ultInt64Label\x18o \x01(\x03H\x01\x12\x43\n\x10uniformWeighting\x18\xc8\x01 \x01(\x0b\x32&.CoreML.Specification.UniformWeightingH\x02\x12S\n\x18inverseDistanceWeighting\x18\xd2\x01 \x01(\x0b\x32..CoreML.Specification.InverseDistanceWeightingH\x02\x42\r\n\x0b\x43lassLabelsB\x13\n\x11\x44\x65\x66\x61ultClassLabelB\x11\n\x0fWeightingScheme\"\xe2\x02\n\x15NearestNeighborsIndex\x12\x1a\n\x12numberOfDimensions\x18\x01 \x01(\x05\x12\x37\n\x0c\x66loatSamples\x18\x02 \x03(\x0b\x32!.CoreML.Specification.FloatVector\x12\x38\n\x0blinearIndex\x18\x64 \x01(\x0b\x32!.CoreML.Specification.LinearIndexH\x00\x12\x44\n\x11singleKdTreeIndex\x18n \x01(\x0b\x32\'.CoreML.Specification.SingleKdTreeIndexH\x00\x12S\n\x18squaredEuclideanDistance\x18\xc8\x01 \x01(\x0b\x32..CoreML.Specification.SquaredEuclideanDistanceH\x01\x42\x0b\n\tIndexTypeB\x12\n\x10\x44istanceFunction\"\x12\n\x10UniformWeighting\"\x1a\n\x18InverseDistanceWeighting\"\r\n\x0bLinearIndex\"%\n\x11SingleKdTreeIndex\x12\x10\n\x08leafSize\x18\x01 \x01(\x05\"\x1a\n\x18SquaredEuclideanDistanceB\x02H\x03P\x00P\x01\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,]) + + + + +_KNEARESTNEIGHBORSCLASSIFIER = _descriptor.Descriptor( + name='KNearestNeighborsClassifier', + full_name='CoreML.Specification.KNearestNeighborsClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nearestNeighborsIndex', full_name='CoreML.Specification.KNearestNeighborsClassifier.nearestNeighborsIndex', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numberOfNeighbors', full_name='CoreML.Specification.KNearestNeighborsClassifier.numberOfNeighbors', index=1, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.KNearestNeighborsClassifier.stringClassLabels', index=2, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.KNearestNeighborsClassifier.int64ClassLabels', index=3, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='defaultStringLabel', full_name='CoreML.Specification.KNearestNeighborsClassifier.defaultStringLabel', index=4, + number=110, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='defaultInt64Label', full_name='CoreML.Specification.KNearestNeighborsClassifier.defaultInt64Label', index=5, + number=111, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='uniformWeighting', full_name='CoreML.Specification.KNearestNeighborsClassifier.uniformWeighting', index=6, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inverseDistanceWeighting', full_name='CoreML.Specification.KNearestNeighborsClassifier.inverseDistanceWeighting', index=7, + number=210, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.KNearestNeighborsClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='DefaultClassLabel', full_name='CoreML.Specification.KNearestNeighborsClassifier.DefaultClassLabel', + index=1, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='WeightingScheme', full_name='CoreML.Specification.KNearestNeighborsClassifier.WeightingScheme', + index=2, containing_type=None, fields=[]), + ], + serialized_start=89, + serialized_end=655, +) + + +_NEARESTNEIGHBORSINDEX = _descriptor.Descriptor( + name='NearestNeighborsIndex', + full_name='CoreML.Specification.NearestNeighborsIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numberOfDimensions', full_name='CoreML.Specification.NearestNeighborsIndex.numberOfDimensions', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floatSamples', full_name='CoreML.Specification.NearestNeighborsIndex.floatSamples', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linearIndex', full_name='CoreML.Specification.NearestNeighborsIndex.linearIndex', index=2, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='singleKdTreeIndex', full_name='CoreML.Specification.NearestNeighborsIndex.singleKdTreeIndex', index=3, + number=110, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squaredEuclideanDistance', full_name='CoreML.Specification.NearestNeighborsIndex.squaredEuclideanDistance', index=4, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='IndexType', full_name='CoreML.Specification.NearestNeighborsIndex.IndexType', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='DistanceFunction', full_name='CoreML.Specification.NearestNeighborsIndex.DistanceFunction', + index=1, containing_type=None, fields=[]), + ], + serialized_start=658, + serialized_end=1012, +) + + +_UNIFORMWEIGHTING = _descriptor.Descriptor( + name='UniformWeighting', + full_name='CoreML.Specification.UniformWeighting', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1014, + serialized_end=1032, +) + + +_INVERSEDISTANCEWEIGHTING = _descriptor.Descriptor( + name='InverseDistanceWeighting', + full_name='CoreML.Specification.InverseDistanceWeighting', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1034, + serialized_end=1060, +) + + +_LINEARINDEX = _descriptor.Descriptor( + name='LinearIndex', + full_name='CoreML.Specification.LinearIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1062, + serialized_end=1075, +) + + +_SINGLEKDTREEINDEX = _descriptor.Descriptor( + name='SingleKdTreeIndex', + full_name='CoreML.Specification.SingleKdTreeIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='leafSize', full_name='CoreML.Specification.SingleKdTreeIndex.leafSize', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1077, + serialized_end=1114, +) + + +_SQUAREDEUCLIDEANDISTANCE = _descriptor.Descriptor( + name='SquaredEuclideanDistance', + full_name='CoreML.Specification.SquaredEuclideanDistance', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1116, + serialized_end=1142, +) + +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['nearestNeighborsIndex'].message_type = _NEARESTNEIGHBORSINDEX +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['numberOfNeighbors'].message_type = Parameters__pb2._INT64PARAMETER +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['uniformWeighting'].message_type = _UNIFORMWEIGHTING +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['inverseDistanceWeighting'].message_type = _INVERSEDISTANCEWEIGHTING +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['stringClassLabels']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['int64ClassLabels']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['ClassLabels'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultStringLabel']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultStringLabel'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultInt64Label']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['defaultInt64Label'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['DefaultClassLabel'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['uniformWeighting']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['uniformWeighting'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'] +_KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'].fields.append( + _KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['inverseDistanceWeighting']) +_KNEARESTNEIGHBORSCLASSIFIER.fields_by_name['inverseDistanceWeighting'].containing_oneof = _KNEARESTNEIGHBORSCLASSIFIER.oneofs_by_name['WeightingScheme'] +_NEARESTNEIGHBORSINDEX.fields_by_name['floatSamples'].message_type = DataStructures__pb2._FLOATVECTOR +_NEARESTNEIGHBORSINDEX.fields_by_name['linearIndex'].message_type = _LINEARINDEX +_NEARESTNEIGHBORSINDEX.fields_by_name['singleKdTreeIndex'].message_type = _SINGLEKDTREEINDEX +_NEARESTNEIGHBORSINDEX.fields_by_name['squaredEuclideanDistance'].message_type = _SQUAREDEUCLIDEANDISTANCE +_NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'].fields.append( + _NEARESTNEIGHBORSINDEX.fields_by_name['linearIndex']) +_NEARESTNEIGHBORSINDEX.fields_by_name['linearIndex'].containing_oneof = _NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'] +_NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'].fields.append( + _NEARESTNEIGHBORSINDEX.fields_by_name['singleKdTreeIndex']) +_NEARESTNEIGHBORSINDEX.fields_by_name['singleKdTreeIndex'].containing_oneof = _NEARESTNEIGHBORSINDEX.oneofs_by_name['IndexType'] +_NEARESTNEIGHBORSINDEX.oneofs_by_name['DistanceFunction'].fields.append( + _NEARESTNEIGHBORSINDEX.fields_by_name['squaredEuclideanDistance']) +_NEARESTNEIGHBORSINDEX.fields_by_name['squaredEuclideanDistance'].containing_oneof = _NEARESTNEIGHBORSINDEX.oneofs_by_name['DistanceFunction'] +DESCRIPTOR.message_types_by_name['KNearestNeighborsClassifier'] = _KNEARESTNEIGHBORSCLASSIFIER +DESCRIPTOR.message_types_by_name['NearestNeighborsIndex'] = _NEARESTNEIGHBORSINDEX +DESCRIPTOR.message_types_by_name['UniformWeighting'] = _UNIFORMWEIGHTING +DESCRIPTOR.message_types_by_name['InverseDistanceWeighting'] = _INVERSEDISTANCEWEIGHTING +DESCRIPTOR.message_types_by_name['LinearIndex'] = _LINEARINDEX +DESCRIPTOR.message_types_by_name['SingleKdTreeIndex'] = _SINGLEKDTREEINDEX +DESCRIPTOR.message_types_by_name['SquaredEuclideanDistance'] = _SQUAREDEUCLIDEANDISTANCE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +KNearestNeighborsClassifier = _reflection.GeneratedProtocolMessageType('KNearestNeighborsClassifier', (_message.Message,), dict( + DESCRIPTOR = _KNEARESTNEIGHBORSCLASSIFIER, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.KNearestNeighborsClassifier) + )) +_sym_db.RegisterMessage(KNearestNeighborsClassifier) + +NearestNeighborsIndex = _reflection.GeneratedProtocolMessageType('NearestNeighborsIndex', (_message.Message,), dict( + DESCRIPTOR = _NEARESTNEIGHBORSINDEX, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NearestNeighborsIndex) + )) +_sym_db.RegisterMessage(NearestNeighborsIndex) + +UniformWeighting = _reflection.GeneratedProtocolMessageType('UniformWeighting', (_message.Message,), dict( + DESCRIPTOR = _UNIFORMWEIGHTING, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UniformWeighting) + )) +_sym_db.RegisterMessage(UniformWeighting) + +InverseDistanceWeighting = _reflection.GeneratedProtocolMessageType('InverseDistanceWeighting', (_message.Message,), dict( + DESCRIPTOR = _INVERSEDISTANCEWEIGHTING, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.InverseDistanceWeighting) + )) +_sym_db.RegisterMessage(InverseDistanceWeighting) + +LinearIndex = _reflection.GeneratedProtocolMessageType('LinearIndex', (_message.Message,), dict( + DESCRIPTOR = _LINEARINDEX, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinearIndex) + )) +_sym_db.RegisterMessage(LinearIndex) + +SingleKdTreeIndex = _reflection.GeneratedProtocolMessageType('SingleKdTreeIndex', (_message.Message,), dict( + DESCRIPTOR = _SINGLEKDTREEINDEX, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SingleKdTreeIndex) + )) +_sym_db.RegisterMessage(SingleKdTreeIndex) + +SquaredEuclideanDistance = _reflection.GeneratedProtocolMessageType('SquaredEuclideanDistance', (_message.Message,), dict( + DESCRIPTOR = _SQUAREDEUCLIDEANDISTANCE, + __module__ = 'NearestNeighbors_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SquaredEuclideanDistance) + )) +_sym_db.RegisterMessage(SquaredEuclideanDistance) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NeuralNetwork_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NeuralNetwork_pb2.py new file mode 100644 index 00000000..dc1a8150 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NeuralNetwork_pb2.py @@ -0,0 +1,12661 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NeuralNetwork.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 +from . import Parameters_pb2 as Parameters__pb2 +try: + DataStructures__pb2 = Parameters__pb2.DataStructures__pb2 +except AttributeError: + DataStructures__pb2 = Parameters__pb2.DataStructures_pb2 +try: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = Parameters__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * +from .Parameters_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NeuralNetwork.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x13NeuralNetwork.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\x1a\x10Parameters.proto\"\x88\x03\n\rNeuralNetwork\x12\x38\n\x06layers\x18\x01 \x03(\x0b\x32(.CoreML.Specification.NeuralNetworkLayer\x12G\n\rpreprocessing\x18\x02 \x03(\x0b\x32\x30.CoreML.Specification.NeuralNetworkPreprocessing\x12Y\n\x16\x61rrayInputShapeMapping\x18\x05 \x01(\x0e\x32\x39.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping\x12T\n\x16imageInputShapeMapping\x18\x06 \x01(\x0e\x32\x34.CoreML.Specification.NeuralNetworkImageShapeMapping\x12\x43\n\x0cupdateParams\x18\n \x01(\x0b\x32-.CoreML.Specification.NetworkUpdateParameters\"x\n\x18NeuralNetworkImageScaler\x12\x14\n\x0c\x63hannelScale\x18\n \x01(\x02\x12\x10\n\x08\x62lueBias\x18\x14 \x01(\x02\x12\x11\n\tgreenBias\x18\x15 \x01(\x02\x12\x0f\n\x07redBias\x18\x16 \x01(\x02\x12\x10\n\x08grayBias\x18\x1e \x01(\x02\"+\n\x16NeuralNetworkMeanImage\x12\x11\n\tmeanImage\x18\x01 \x03(\x02\"\xc6\x01\n\x1aNeuralNetworkPreprocessing\x12\x13\n\x0b\x66\x65\x61tureName\x18\x01 \x01(\t\x12@\n\x06scaler\x18\n \x01(\x0b\x32..CoreML.Specification.NeuralNetworkImageScalerH\x00\x12\x41\n\tmeanImage\x18\x0b \x01(\x0b\x32,.CoreML.Specification.NeuralNetworkMeanImageH\x00\x42\x0e\n\x0cpreprocessor\"\x10\n\x0e\x41\x63tivationReLU\"$\n\x13\x41\x63tivationLeakyReLU\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x10\n\x0e\x41\x63tivationTanh\"3\n\x14\x41\x63tivationScaledTanh\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"\x13\n\x11\x41\x63tivationSigmoid\"/\n\x10\x41\x63tivationLinear\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"4\n\x15\x41\x63tivationSigmoidHard\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"D\n\x0f\x41\x63tivationPReLU\x12\x31\n\x05\x61lpha\x18\x01 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\x1e\n\rActivationELU\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"*\n\x19\x41\x63tivationThresholdedReLU\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x14\n\x12\x41\x63tivationSoftsign\"\x14\n\x12\x41\x63tivationSoftplus\"\x83\x01\n\x1c\x41\x63tivationParametricSoftplus\x12\x31\n\x05\x61lpha\x18\x01 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62\x65ta\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xd4\x06\n\x10\x41\x63tivationParams\x12\x38\n\x06linear\x18\x05 \x01(\x0b\x32&.CoreML.Specification.ActivationLinearH\x00\x12\x34\n\x04ReLU\x18\n \x01(\x0b\x32$.CoreML.Specification.ActivationReLUH\x00\x12>\n\tleakyReLU\x18\x0f \x01(\x0b\x32).CoreML.Specification.ActivationLeakyReLUH\x00\x12J\n\x0fthresholdedReLU\x18\x14 \x01(\x0b\x32/.CoreML.Specification.ActivationThresholdedReLUH\x00\x12\x36\n\x05PReLU\x18\x19 \x01(\x0b\x32%.CoreML.Specification.ActivationPReLUH\x00\x12\x34\n\x04tanh\x18\x1e \x01(\x0b\x32$.CoreML.Specification.ActivationTanhH\x00\x12@\n\nscaledTanh\x18\x1f \x01(\x0b\x32*.CoreML.Specification.ActivationScaledTanhH\x00\x12:\n\x07sigmoid\x18( \x01(\x0b\x32\'.CoreML.Specification.ActivationSigmoidH\x00\x12\x42\n\x0bsigmoidHard\x18) \x01(\x0b\x32+.CoreML.Specification.ActivationSigmoidHardH\x00\x12\x32\n\x03\x45LU\x18\x32 \x01(\x0b\x32#.CoreML.Specification.ActivationELUH\x00\x12<\n\x08softsign\x18< \x01(\x0b\x32(.CoreML.Specification.ActivationSoftsignH\x00\x12<\n\x08softplus\x18\x46 \x01(\x0b\x32(.CoreML.Specification.ActivationSoftplusH\x00\x12P\n\x12parametricSoftplus\x18G \x01(\x0b\x32\x32.CoreML.Specification.ActivationParametricSoftplusH\x00\x42\x12\n\x10NonlinearityType\"(\n\x06Tensor\x12\x0c\n\x04rank\x18\x01 \x01(\r\x12\x10\n\x08\x64imValue\x18\x02 \x03(\x03\"\xeaU\n\x12NeuralNetworkLayer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x03(\t\x12\x0e\n\x06output\x18\x03 \x03(\t\x12\x31\n\x0binputTensor\x18\x04 \x03(\x0b\x32\x1c.CoreML.Specification.Tensor\x12\x32\n\x0coutputTensor\x18\x05 \x03(\x0b\x32\x1c.CoreML.Specification.Tensor\x12\x13\n\x0bisUpdatable\x18\n \x01(\x08\x12\x43\n\x0b\x63onvolution\x18\x64 \x01(\x0b\x32,.CoreML.Specification.ConvolutionLayerParamsH\x00\x12;\n\x07pooling\x18x \x01(\x0b\x32(.CoreML.Specification.PoolingLayerParamsH\x00\x12=\n\nactivation\x18\x82\x01 \x01(\x0b\x32&.CoreML.Specification.ActivationParamsH\x00\x12\x46\n\x0cinnerProduct\x18\x8c\x01 \x01(\x0b\x32-.CoreML.Specification.InnerProductLayerParamsH\x00\x12@\n\tembedding\x18\x96\x01 \x01(\x0b\x32*.CoreML.Specification.EmbeddingLayerParamsH\x00\x12@\n\tbatchnorm\x18\xa0\x01 \x01(\x0b\x32*.CoreML.Specification.BatchnormLayerParamsH\x00\x12\x46\n\x03mvn\x18\xa5\x01 \x01(\x0b\x32\x36.CoreML.Specification.MeanVarianceNormalizeLayerParamsH\x00\x12\x44\n\x0bl2normalize\x18\xaa\x01 \x01(\x0b\x32,.CoreML.Specification.L2NormalizeLayerParamsH\x00\x12<\n\x07softmax\x18\xaf\x01 \x01(\x0b\x32(.CoreML.Specification.SoftmaxLayerParamsH\x00\x12\x34\n\x03lrn\x18\xb4\x01 \x01(\x0b\x32$.CoreML.Specification.LRNLayerParamsH\x00\x12\x36\n\x04\x63rop\x18\xbe\x01 \x01(\x0b\x32%.CoreML.Specification.CropLayerParamsH\x00\x12<\n\x07padding\x18\xc8\x01 \x01(\x0b\x32(.CoreML.Specification.PaddingLayerParamsH\x00\x12>\n\x08upsample\x18\xd2\x01 \x01(\x0b\x32).CoreML.Specification.UpsampleLayerParamsH\x00\x12J\n\x0eresizeBilinear\x18\xd3\x01 \x01(\x0b\x32/.CoreML.Specification.ResizeBilinearLayerParamsH\x00\x12\x42\n\ncropResize\x18\xd4\x01 \x01(\x0b\x32+.CoreML.Specification.CropResizeLayerParamsH\x00\x12@\n\x05unary\x18\xdc\x01 \x01(\x0b\x32..CoreML.Specification.UnaryFunctionLayerParamsH\x00\x12\x34\n\x03\x61\x64\x64\x18\xe6\x01 \x01(\x0b\x32$.CoreML.Specification.AddLayerParamsH\x00\x12>\n\x08multiply\x18\xe7\x01 \x01(\x0b\x32).CoreML.Specification.MultiplyLayerParamsH\x00\x12<\n\x07\x61verage\x18\xf0\x01 \x01(\x0b\x32(.CoreML.Specification.AverageLayerParamsH\x00\x12\x38\n\x05scale\x18\xf5\x01 \x01(\x0b\x32&.CoreML.Specification.ScaleLayerParamsH\x00\x12\x36\n\x04\x62ias\x18\xfa\x01 \x01(\x0b\x32%.CoreML.Specification.BiasLayerParamsH\x00\x12\x34\n\x03max\x18\x84\x02 \x01(\x0b\x32$.CoreML.Specification.MaxLayerParamsH\x00\x12\x34\n\x03min\x18\x85\x02 \x01(\x0b\x32$.CoreML.Specification.MinLayerParamsH\x00\x12;\n\x03\x64ot\x18\x8e\x02 \x01(\x0b\x32+.CoreML.Specification.DotProductLayerParamsH\x00\x12:\n\x06reduce\x18\x98\x02 \x01(\x0b\x32\'.CoreML.Specification.ReduceLayerParamsH\x00\x12\x46\n\x0cloadConstant\x18\xa2\x02 \x01(\x0b\x32-.CoreML.Specification.LoadConstantLayerParamsH\x00\x12<\n\x07reshape\x18\xac\x02 \x01(\x0b\x32(.CoreML.Specification.ReshapeLayerParamsH\x00\x12<\n\x07\x66latten\x18\xad\x02 \x01(\x0b\x32(.CoreML.Specification.FlattenLayerParamsH\x00\x12<\n\x07permute\x18\xb6\x02 \x01(\x0b\x32(.CoreML.Specification.PermuteLayerParamsH\x00\x12:\n\x06\x63oncat\x18\xc0\x02 \x01(\x0b\x32\'.CoreML.Specification.ConcatLayerParamsH\x00\x12\x38\n\x05split\x18\xca\x02 \x01(\x0b\x32&.CoreML.Specification.SplitLayerParamsH\x00\x12J\n\x0esequenceRepeat\x18\xd4\x02 \x01(\x0b\x32/.CoreML.Specification.SequenceRepeatLayerParamsH\x00\x12J\n\x0ereorganizeData\x18\xd9\x02 \x01(\x0b\x32/.CoreML.Specification.ReorganizeDataLayerParamsH\x00\x12\x38\n\x05slice\x18\xde\x02 \x01(\x0b\x32&.CoreML.Specification.SliceLayerParamsH\x00\x12L\n\x0fsimpleRecurrent\x18\x90\x03 \x01(\x0b\x32\x30.CoreML.Specification.SimpleRecurrentLayerParamsH\x00\x12\x34\n\x03gru\x18\x9a\x03 \x01(\x0b\x32$.CoreML.Specification.GRULayerParamsH\x00\x12R\n\x12uniDirectionalLSTM\x18\xa4\x03 \x01(\x0b\x32\x33.CoreML.Specification.UniDirectionalLSTMLayerParamsH\x00\x12P\n\x11\x62iDirectionalLSTM\x18\xae\x03 \x01(\x0b\x32\x32.CoreML.Specification.BiDirectionalLSTMLayerParamsH\x00\x12:\n\x06\x63ustom\x18\xf4\x03 \x01(\x0b\x32\'.CoreML.Specification.CustomLayerParamsH\x00\x12\x36\n\x04\x63opy\x18\xd8\x04 \x01(\x0b\x32%.CoreML.Specification.CopyLayerParamsH\x00\x12:\n\x06\x62ranch\x18\xdd\x04 \x01(\x0b\x32\'.CoreML.Specification.BranchLayerParamsH\x00\x12\x36\n\x04loop\x18\xe7\x04 \x01(\x0b\x32%.CoreML.Specification.LoopLayerParamsH\x00\x12@\n\tloopBreak\x18\xec\x04 \x01(\x0b\x32*.CoreML.Specification.LoopBreakLayerParamsH\x00\x12\x46\n\x0cloopContinue\x18\xf1\x04 \x01(\x0b\x32-.CoreML.Specification.LoopContinueLayerParamsH\x00\x12\x44\n\x0brangeStatic\x18\xfb\x04 \x01(\x0b\x32,.CoreML.Specification.RangeStaticLayerParamsH\x00\x12\x46\n\x0crangeDynamic\x18\x80\x05 \x01(\x0b\x32-.CoreML.Specification.RangeDynamicLayerParamsH\x00\x12\x36\n\x04\x63lip\x18\x94\x05 \x01(\x0b\x32%.CoreML.Specification.ClipLayerParamsH\x00\x12\x36\n\x04\x63\x65il\x18\x99\x05 \x01(\x0b\x32%.CoreML.Specification.CeilLayerParamsH\x00\x12\x38\n\x05\x66loor\x18\x9e\x05 \x01(\x0b\x32&.CoreML.Specification.FloorLayerParamsH\x00\x12\x36\n\x04sign\x18\xa8\x05 \x01(\x0b\x32%.CoreML.Specification.SignLayerParamsH\x00\x12\x38\n\x05round\x18\xad\x05 \x01(\x0b\x32&.CoreML.Specification.RoundLayerParamsH\x00\x12\x36\n\x04\x65xp2\x18\xbc\x05 \x01(\x0b\x32%.CoreML.Specification.Exp2LayerParamsH\x00\x12\x34\n\x03sin\x18\xc6\x05 \x01(\x0b\x32$.CoreML.Specification.SinLayerParamsH\x00\x12\x34\n\x03\x63os\x18\xcb\x05 \x01(\x0b\x32$.CoreML.Specification.CosLayerParamsH\x00\x12\x34\n\x03tan\x18\xd0\x05 \x01(\x0b\x32$.CoreML.Specification.TanLayerParamsH\x00\x12\x36\n\x04\x61sin\x18\xda\x05 \x01(\x0b\x32%.CoreML.Specification.AsinLayerParamsH\x00\x12\x36\n\x04\x61\x63os\x18\xdf\x05 \x01(\x0b\x32%.CoreML.Specification.AcosLayerParamsH\x00\x12\x36\n\x04\x61tan\x18\xe4\x05 \x01(\x0b\x32%.CoreML.Specification.AtanLayerParamsH\x00\x12\x36\n\x04sinh\x18\xee\x05 \x01(\x0b\x32%.CoreML.Specification.SinhLayerParamsH\x00\x12\x36\n\x04\x63osh\x18\xf3\x05 \x01(\x0b\x32%.CoreML.Specification.CoshLayerParamsH\x00\x12\x36\n\x04tanh\x18\xf8\x05 \x01(\x0b\x32%.CoreML.Specification.TanhLayerParamsH\x00\x12\x38\n\x05\x61sinh\x18\x82\x06 \x01(\x0b\x32&.CoreML.Specification.AsinhLayerParamsH\x00\x12\x38\n\x05\x61\x63osh\x18\x87\x06 \x01(\x0b\x32&.CoreML.Specification.AcoshLayerParamsH\x00\x12\x38\n\x05\x61tanh\x18\x8c\x06 \x01(\x0b\x32&.CoreML.Specification.AtanhLayerParamsH\x00\x12\x34\n\x03\x65rf\x18\x96\x06 \x01(\x0b\x32$.CoreML.Specification.ErfLayerParamsH\x00\x12\x36\n\x04gelu\x18\x9b\x06 \x01(\x0b\x32%.CoreML.Specification.GeluLayerParamsH\x00\x12\x38\n\x05\x65qual\x18\xaf\x06 \x01(\x0b\x32&.CoreML.Specification.EqualLayerParamsH\x00\x12>\n\x08notEqual\x18\xb4\x06 \x01(\x0b\x32).CoreML.Specification.NotEqualLayerParamsH\x00\x12>\n\x08lessThan\x18\xb9\x06 \x01(\x0b\x32).CoreML.Specification.LessThanLayerParamsH\x00\x12@\n\tlessEqual\x18\xbb\x06 \x01(\x0b\x32*.CoreML.Specification.LessEqualLayerParamsH\x00\x12\x44\n\x0bgreaterThan\x18\xbe\x06 \x01(\x0b\x32,.CoreML.Specification.GreaterThanLayerParamsH\x00\x12\x46\n\x0cgreaterEqual\x18\xc0\x06 \x01(\x0b\x32-.CoreML.Specification.GreaterEqualLayerParamsH\x00\x12@\n\tlogicalOr\x18\xc8\x06 \x01(\x0b\x32*.CoreML.Specification.LogicalOrLayerParamsH\x00\x12\x42\n\nlogicalXor\x18\xcd\x06 \x01(\x0b\x32+.CoreML.Specification.LogicalXorLayerParamsH\x00\x12\x42\n\nlogicalNot\x18\xd2\x06 \x01(\x0b\x32+.CoreML.Specification.LogicalNotLayerParamsH\x00\x12\x42\n\nlogicalAnd\x18\xd7\x06 \x01(\x0b\x32+.CoreML.Specification.LogicalAndLayerParamsH\x00\x12N\n\x10modBroadcastable\x18\xe1\x06 \x01(\x0b\x32\x31.CoreML.Specification.ModBroadcastableLayerParamsH\x00\x12N\n\x10minBroadcastable\x18\xe6\x06 \x01(\x0b\x32\x31.CoreML.Specification.MinBroadcastableLayerParamsH\x00\x12N\n\x10maxBroadcastable\x18\xeb\x06 \x01(\x0b\x32\x31.CoreML.Specification.MaxBroadcastableLayerParamsH\x00\x12N\n\x10\x61\x64\x64\x42roadcastable\x18\xf0\x06 \x01(\x0b\x32\x31.CoreML.Specification.AddBroadcastableLayerParamsH\x00\x12N\n\x10powBroadcastable\x18\xf5\x06 \x01(\x0b\x32\x31.CoreML.Specification.PowBroadcastableLayerParamsH\x00\x12T\n\x13\x64ivideBroadcastable\x18\xfa\x06 \x01(\x0b\x32\x34.CoreML.Specification.DivideBroadcastableLayerParamsH\x00\x12X\n\x15\x66loorDivBroadcastable\x18\xff\x06 \x01(\x0b\x32\x36.CoreML.Specification.FloorDivBroadcastableLayerParamsH\x00\x12X\n\x15multiplyBroadcastable\x18\x84\x07 \x01(\x0b\x32\x36.CoreML.Specification.MultiplyBroadcastableLayerParamsH\x00\x12X\n\x15subtractBroadcastable\x18\x89\x07 \x01(\x0b\x32\x36.CoreML.Specification.SubtractBroadcastableLayerParamsH\x00\x12\x36\n\x04tile\x18\x98\x07 \x01(\x0b\x32%.CoreML.Specification.TileLayerParamsH\x00\x12\x38\n\x05stack\x18\x9d\x07 \x01(\x0b\x32&.CoreML.Specification.StackLayerParamsH\x00\x12:\n\x06gather\x18\xa2\x07 \x01(\x0b\x32\'.CoreML.Specification.GatherLayerParamsH\x00\x12<\n\x07scatter\x18\xa7\x07 \x01(\x0b\x32(.CoreML.Specification.ScatterLayerParamsH\x00\x12>\n\x08gatherND\x18\xac\x07 \x01(\x0b\x32).CoreML.Specification.GatherNDLayerParamsH\x00\x12@\n\tscatterND\x18\xb1\x07 \x01(\x0b\x32*.CoreML.Specification.ScatterNDLayerParamsH\x00\x12@\n\tsoftmaxND\x18\xb6\x07 \x01(\x0b\x32*.CoreML.Specification.SoftmaxNDLayerParamsH\x00\x12L\n\x0fgatherAlongAxis\x18\xb8\x07 \x01(\x0b\x32\x30.CoreML.Specification.GatherAlongAxisLayerParamsH\x00\x12N\n\x10scatterAlongAxis\x18\xba\x07 \x01(\x0b\x32\x31.CoreML.Specification.ScatterAlongAxisLayerParamsH\x00\x12<\n\x07reverse\x18\xc0\x07 \x01(\x0b\x32(.CoreML.Specification.ReverseLayerParamsH\x00\x12\x42\n\nreverseSeq\x18\xc5\x07 \x01(\x0b\x32+.CoreML.Specification.ReverseSeqLayerParamsH\x00\x12<\n\x07splitND\x18\xcf\x07 \x01(\x0b\x32(.CoreML.Specification.SplitNDLayerParamsH\x00\x12>\n\x08\x63oncatND\x18\xd4\x07 \x01(\x0b\x32).CoreML.Specification.ConcatNDLayerParamsH\x00\x12@\n\ttranspose\x18\xd9\x07 \x01(\x0b\x32*.CoreML.Specification.TransposeLayerParamsH\x00\x12\x44\n\x0bsliceStatic\x18\xe3\x07 \x01(\x0b\x32,.CoreML.Specification.SliceStaticLayerParamsH\x00\x12\x46\n\x0csliceDynamic\x18\xe8\x07 \x01(\x0b\x32-.CoreML.Specification.SliceDynamicLayerParamsH\x00\x12J\n\x0eslidingWindows\x18\xed\x07 \x01(\x0b\x32/.CoreML.Specification.SlidingWindowsLayerParamsH\x00\x12\x36\n\x04topK\x18\xf7\x07 \x01(\x0b\x32%.CoreML.Specification.TopKLayerParamsH\x00\x12:\n\x06\x61rgMin\x18\xfc\x07 \x01(\x0b\x32\'.CoreML.Specification.ArgMinLayerParamsH\x00\x12:\n\x06\x61rgMax\x18\x81\x08 \x01(\x0b\x32\'.CoreML.Specification.ArgMaxLayerParamsH\x00\x12\x44\n\x0b\x65mbeddingND\x18\x90\x08 \x01(\x0b\x32,.CoreML.Specification.EmbeddingNDLayerParamsH\x00\x12H\n\rbatchedMatmul\x18\x95\x08 \x01(\x0b\x32..CoreML.Specification.BatchedMatMulLayerParamsH\x00\x12>\n\x08getShape\x18\xa9\x08 \x01(\x0b\x32).CoreML.Specification.GetShapeLayerParamsH\x00\x12J\n\x0eloadConstantND\x18\xae\x08 \x01(\x0b\x32/.CoreML.Specification.LoadConstantNDLayerParamsH\x00\x12>\n\x08\x66illLike\x18\xb8\x08 \x01(\x0b\x32).CoreML.Specification.FillLikeLayerParamsH\x00\x12\x42\n\nfillStatic\x18\xbd\x08 \x01(\x0b\x32+.CoreML.Specification.FillStaticLayerParamsH\x00\x12\x44\n\x0b\x66illDynamic\x18\xc2\x08 \x01(\x0b\x32,.CoreML.Specification.FillDynamicLayerParamsH\x00\x12L\n\x0f\x62roadcastToLike\x18\xcc\x08 \x01(\x0b\x32\x30.CoreML.Specification.BroadcastToLikeLayerParamsH\x00\x12P\n\x11\x62roadcastToStatic\x18\xd1\x08 \x01(\x0b\x32\x32.CoreML.Specification.BroadcastToStaticLayerParamsH\x00\x12R\n\x12\x62roadcastToDynamic\x18\xd6\x08 \x01(\x0b\x32\x33.CoreML.Specification.BroadcastToDynamicLayerParamsH\x00\x12<\n\x07squeeze\x18\xe0\x08 \x01(\x0b\x32(.CoreML.Specification.SqueezeLayerParamsH\x00\x12\x42\n\nexpandDims\x18\xe5\x08 \x01(\x0b\x32+.CoreML.Specification.ExpandDimsLayerParamsH\x00\x12\x44\n\x0b\x66lattenTo2D\x18\xea\x08 \x01(\x0b\x32,.CoreML.Specification.FlattenTo2DLayerParamsH\x00\x12\x44\n\x0breshapeLike\x18\xef\x08 \x01(\x0b\x32,.CoreML.Specification.ReshapeLikeLayerParamsH\x00\x12H\n\rreshapeStatic\x18\xf4\x08 \x01(\x0b\x32..CoreML.Specification.ReshapeStaticLayerParamsH\x00\x12J\n\x0ereshapeDynamic\x18\xf9\x08 \x01(\x0b\x32/.CoreML.Specification.ReshapeDynamicLayerParamsH\x00\x12X\n\x15rankPreservingReshape\x18\xfe\x08 \x01(\x0b\x32\x36.CoreML.Specification.RankPreservingReshapeLayerParamsH\x00\x12H\n\x0b\x63onstantPad\x18\x83\t \x01(\x0b\x32\x30.CoreML.Specification.ConstantPaddingLayerParamsH\x00\x12N\n\x10randomNormalLike\x18\x92\t \x01(\x0b\x32\x31.CoreML.Specification.RandomNormalLikeLayerParamsH\x00\x12R\n\x12randomNormalStatic\x18\x97\t \x01(\x0b\x32\x33.CoreML.Specification.RandomNormalStaticLayerParamsH\x00\x12T\n\x13randomNormalDynamic\x18\x9c\t \x01(\x0b\x32\x34.CoreML.Specification.RandomNormalDynamicLayerParamsH\x00\x12P\n\x11randomUniformLike\x18\xa6\t \x01(\x0b\x32\x32.CoreML.Specification.RandomUniformLikeLayerParamsH\x00\x12T\n\x13randomUniformStatic\x18\xab\t \x01(\x0b\x32\x34.CoreML.Specification.RandomUniformStaticLayerParamsH\x00\x12V\n\x14randomUniformDynamic\x18\xb0\t \x01(\x0b\x32\x35.CoreML.Specification.RandomUniformDynamicLayerParamsH\x00\x12T\n\x13randomBernoulliLike\x18\xba\t \x01(\x0b\x32\x34.CoreML.Specification.RandomBernoulliLikeLayerParamsH\x00\x12X\n\x15randomBernoulliStatic\x18\xbf\t \x01(\x0b\x32\x36.CoreML.Specification.RandomBernoulliStaticLayerParamsH\x00\x12Z\n\x16randomBernoulliDynamic\x18\xc4\t \x01(\x0b\x32\x37.CoreML.Specification.RandomBernoulliDynamicLayerParamsH\x00\x12\\\n\x17\x63\x61tegoricalDistribution\x18\xce\t \x01(\x0b\x32\x38.CoreML.Specification.CategoricalDistributionLayerParamsH\x00\x12>\n\x08reduceL1\x18\xe2\t \x01(\x0b\x32).CoreML.Specification.ReduceL1LayerParamsH\x00\x12>\n\x08reduceL2\x18\xe7\t \x01(\x0b\x32).CoreML.Specification.ReduceL2LayerParamsH\x00\x12@\n\treduceMax\x18\xec\t \x01(\x0b\x32*.CoreML.Specification.ReduceMaxLayerParamsH\x00\x12@\n\treduceMin\x18\xf1\t \x01(\x0b\x32*.CoreML.Specification.ReduceMinLayerParamsH\x00\x12@\n\treduceSum\x18\xf6\t \x01(\x0b\x32*.CoreML.Specification.ReduceSumLayerParamsH\x00\x12\x42\n\nreduceProd\x18\xfb\t \x01(\x0b\x32+.CoreML.Specification.ReduceProdLayerParamsH\x00\x12\x42\n\nreduceMean\x18\x80\n \x01(\x0b\x32+.CoreML.Specification.ReduceMeanLayerParamsH\x00\x12\x46\n\x0creduceLogSum\x18\x85\n \x01(\x0b\x32-.CoreML.Specification.ReduceLogSumLayerParamsH\x00\x12L\n\x0freduceSumSquare\x18\x8a\n \x01(\x0b\x32\x30.CoreML.Specification.ReduceSumSquareLayerParamsH\x00\x12L\n\x0freduceLogSumExp\x18\x8f\n \x01(\x0b\x32\x30.CoreML.Specification.ReduceLogSumExpLayerParamsH\x00\x12\x46\n\x0cwhereNonZero\x18\xa1\n \x01(\x0b\x32-.CoreML.Specification.WhereNonZeroLayerParamsH\x00\x12J\n\x0ematrixBandPart\x18\xa3\n \x01(\x0b\x32/.CoreML.Specification.MatrixBandPartLayerParamsH\x00\x12L\n\x0flowerTriangular\x18\xa8\n \x01(\x0b\x32\x30.CoreML.Specification.LowerTriangularLayerParamsH\x00\x12L\n\x0fupperTriangular\x18\xad\n \x01(\x0b\x32\x30.CoreML.Specification.UpperTriangularLayerParamsH\x00\x12R\n\x12whereBroadcastable\x18\xb2\n \x01(\x0b\x32\x33.CoreML.Specification.WhereBroadcastableLayerParamsH\x00\x12R\n\x12layerNormalization\x18\xc6\n \x01(\x0b\x32\x33.CoreML.Specification.LayerNormalizationLayerParamsH\x00\x12X\n\x15NonMaximumSuppression\x18\xf8\n \x01(\x0b\x32\x36.CoreML.Specification.NonMaximumSuppressionLayerParamsH\x00\x12:\n\x06oneHot\x18\xaa\x0b \x01(\x0b\x32\'.CoreML.Specification.OneHotLayerParamsH\x00\x12:\n\x06\x63umSum\x18\xaf\x0b \x01(\x0b\x32\'.CoreML.Specification.CumSumLayerParamsH\x00\x12\x44\n\x0b\x63lampedReLU\x18\xb4\x0b \x01(\x0b\x32,.CoreML.Specification.ClampedReLULayerParamsH\x00\x12<\n\x07\x61rgSort\x18\xb5\x0b \x01(\x0b\x32(.CoreML.Specification.ArgSortLayerParamsH\x00\x12@\n\tpooling3d\x18\xb9\x0b \x01(\x0b\x32*.CoreML.Specification.Pooling3DLayerParamsH\x00\x12L\n\x0fglobalPooling3d\x18\xba\x0b \x01(\x0b\x32\x30.CoreML.Specification.GlobalPooling3DLayerParamsH\x00\x12\x44\n\x0bsliceBySize\x18\xbe\x0b \x01(\x0b\x32,.CoreML.Specification.SliceBySizeLayerParamsH\x00\x12H\n\rconvolution3d\x18\xbf\x0b \x01(\x0b\x32..CoreML.Specification.Convolution3DLayerParamsH\x00\x42\x07\n\x05layer\"\x83\x01\n\x11\x42ranchLayerParams\x12\x35\n\x08ifBranch\x18\x01 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\x12\x37\n\nelseBranch\x18\x02 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\"\xbb\x01\n\x0fLoopLayerParams\x12\x19\n\x11maxLoopIterations\x18\x01 \x01(\x04\x12\x14\n\x0c\x63onditionVar\x18\x02 \x01(\t\x12=\n\x10\x63onditionNetwork\x18\x03 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\x12\x38\n\x0b\x62odyNetwork\x18\x04 \x01(\x0b\x32#.CoreML.Specification.NeuralNetwork\"\x16\n\x14LoopBreakLayerParams\"\x19\n\x17LoopContinueLayerParams\"\x11\n\x0f\x43opyLayerParams\"\'\n\x16GreaterThanLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"(\n\x17GreaterEqualLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"$\n\x13LessThanLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"%\n\x14LessEqualLayerParams\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\"!\n\x10\x45qualLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"$\n\x13NotEqualLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x17\n\x15LogicalAndLayerParams\"\x16\n\x14LogicalOrLayerParams\"\x17\n\x15LogicalXorLayerParams\"\x17\n\x15LogicalNotLayerParams\"\x8e\x01\n\rBorderAmounts\x12\x44\n\rborderAmounts\x18\n \x03(\x0b\x32-.CoreML.Specification.BorderAmounts.EdgeSizes\x1a\x37\n\tEdgeSizes\x12\x15\n\rstartEdgeSize\x18\x01 \x01(\x04\x12\x13\n\x0b\x65ndEdgeSize\x18\x02 \x01(\x04\"K\n\x0cValidPadding\x12;\n\x0epaddingAmounts\x18\x01 \x01(\x0b\x32#.CoreML.Specification.BorderAmounts\"\x96\x01\n\x0bSamePadding\x12H\n\rasymmetryMode\x18\x01 \x01(\x0e\x32\x31.CoreML.Specification.SamePadding.SamePaddingMode\"=\n\x0fSamePaddingMode\x12\x16\n\x12\x42OTTOM_RIGHT_HEAVY\x10\x00\x12\x12\n\x0eTOP_LEFT_HEAVY\x10\x01\"\xbd\x01\n\x0cSamplingMode\x12\x41\n\x0esamplingMethod\x18\x01 \x01(\x0e\x32).CoreML.Specification.SamplingMode.Method\"j\n\x06Method\x12\x1f\n\x1bSTRICT_ALIGN_ENDPOINTS_MODE\x10\x00\x12\x18\n\x14\x41LIGN_ENDPOINTS_MODE\x10\x01\x12\x11\n\rUPSAMPLE_MODE\x10\x02\x12\x12\n\x0eROI_ALIGN_MODE\x10\x03\"\xd8\x01\n\x12\x42oxCoordinatesMode\x12\x45\n\x07\x62oxMode\x18\x01 \x01(\x0e\x32\x34.CoreML.Specification.BoxCoordinatesMode.Coordinates\"{\n\x0b\x43oordinates\x12\x18\n\x14\x43ORNERS_HEIGHT_FIRST\x10\x00\x12\x17\n\x13\x43ORNERS_WIDTH_FIRST\x10\x01\x12\x1c\n\x18\x43\x45NTER_SIZE_HEIGHT_FIRST\x10\x02\x12\x1b\n\x17\x43\x45NTER_SIZE_WIDTH_FIRST\x10\x03\"\xb5\x01\n\x0cWeightParams\x12\x12\n\nfloatValue\x18\x01 \x03(\x02\x12\x14\n\x0c\x66loat16Value\x18\x02 \x01(\x0c\x12\x10\n\x08rawValue\x18\x1e \x01(\x0c\x12\x14\n\x0cint8RawValue\x18\x1f \x01(\x0c\x12>\n\x0cquantization\x18( \x01(\x0b\x32(.CoreML.Specification.QuantizationParams\x12\x13\n\x0bisUpdatable\x18\x32 \x01(\x08\"\xe4\x01\n\x12QuantizationParams\x12\x14\n\x0cnumberOfBits\x18\x01 \x01(\x04\x12L\n\x12linearQuantization\x18\x65 \x01(\x0b\x32..CoreML.Specification.LinearQuantizationParamsH\x00\x12V\n\x17lookupTableQuantization\x18\x66 \x01(\x0b\x32\x33.CoreML.Specification.LookUpTableQuantizationParamsH\x00\x42\x12\n\x10QuantizationType\"7\n\x18LinearQuantizationParams\x12\r\n\x05scale\x18\x01 \x03(\x02\x12\x0c\n\x04\x62ias\x18\x02 \x03(\x02\"3\n\x1dLookUpTableQuantizationParams\x12\x12\n\nfloatValue\x18\x01 \x03(\x02\"\xbd\x03\n\x16\x43onvolutionLayerParams\x12\x16\n\x0eoutputChannels\x18\x01 \x01(\x04\x12\x16\n\x0ekernelChannels\x18\x02 \x01(\x04\x12\x0f\n\x07nGroups\x18\n \x01(\x04\x12\x12\n\nkernelSize\x18\x14 \x03(\x04\x12\x0e\n\x06stride\x18\x1e \x03(\x04\x12\x16\n\x0e\x64ilationFactor\x18( \x03(\x04\x12\x33\n\x05valid\x18\x32 \x01(\x0b\x32\".CoreML.Specification.ValidPaddingH\x00\x12\x31\n\x04same\x18\x33 \x01(\x0b\x32!.CoreML.Specification.SamePaddingH\x00\x12\x17\n\x0fisDeconvolution\x18< \x01(\x08\x12\x0f\n\x07hasBias\x18\x46 \x01(\x08\x12\x33\n\x07weights\x18Z \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18[ \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x13\n\x0boutputShape\x18\x64 \x03(\x04\x42\x18\n\x16\x43onvolutionPaddingType\"\xec\x05\n\x18\x43onvolution3DLayerParams\x12\x16\n\x0eoutputChannels\x18\x01 \x01(\x05\x12\x15\n\rinputChannels\x18\x02 \x01(\x05\x12\x0f\n\x07nGroups\x18\n \x01(\x05\x12\x13\n\x0bkernelDepth\x18\x14 \x01(\x05\x12\x14\n\x0ckernelHeight\x18\x15 \x01(\x05\x12\x13\n\x0bkernelWidth\x18\x16 \x01(\x05\x12\x13\n\x0bstrideDepth\x18\x1f \x01(\x05\x12\x14\n\x0cstrideHeight\x18 \x01(\x05\x12\x13\n\x0bstrideWidth\x18! \x01(\x05\x12\x15\n\rdilationDepth\x18( \x01(\x05\x12\x16\n\x0e\x64ilationHeight\x18) \x01(\x05\x12\x15\n\rdilationWidth\x18* \x01(\x05\x12\x0f\n\x07hasBias\x18\x32 \x01(\x08\x12\x33\n\x07weights\x18< \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18= \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12O\n\x0bpaddingType\x18\x46 \x01(\x0e\x32:.CoreML.Specification.Convolution3DLayerParams.PaddingType\x12\x1a\n\x12\x63ustomPaddingFront\x18P \x01(\x05\x12\x19\n\x11\x63ustomPaddingBack\x18Q \x01(\x05\x12\x18\n\x10\x63ustomPaddingTop\x18R \x01(\x05\x12\x1b\n\x13\x63ustomPaddingBottom\x18S \x01(\x05\x12\x19\n\x11\x63ustomPaddingLeft\x18T \x01(\x05\x12\x1a\n\x12\x63ustomPaddingRight\x18U \x01(\x05\x12\x17\n\x0fisDeconvolution\x18V \x01(\x08\x12\x13\n\x0boutputShape\x18W \x03(\x04\".\n\x0bPaddingType\x12\n\n\x06\x43USTOM\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x08\n\x04SAME\x10\x02\"\xdd\x01\n\x17InnerProductLayerParams\x12\x15\n\rinputChannels\x18\x01 \x01(\x04\x12\x16\n\x0eoutputChannels\x18\x02 \x01(\x04\x12\x0f\n\x07hasBias\x18\n \x01(\x08\x12\x33\n\x07weights\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x1b\n\x13int8DynamicQuantize\x18\x16 \x01(\x08\"\xb8\x01\n\x14\x45mbeddingLayerParams\x12\x10\n\x08inputDim\x18\x01 \x01(\x04\x12\x16\n\x0eoutputChannels\x18\x02 \x01(\x04\x12\x0f\n\x07hasBias\x18\n \x01(\x08\x12\x33\n\x07weights\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xba\x01\n\x16\x45mbeddingNDLayerParams\x12\x11\n\tvocabSize\x18\x01 \x01(\x04\x12\x15\n\rembeddingSize\x18\x02 \x01(\x04\x12\x0f\n\x07hasBias\x18\x03 \x01(\x08\x12\x33\n\x07weights\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xbd\x02\n\x14\x42\x61tchnormLayerParams\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x04\x12\x16\n\x0e\x63omputeMeanVar\x18\x05 \x01(\x08\x12\x1d\n\x15instanceNormalization\x18\x06 \x01(\x08\x12\x0f\n\x07\x65psilon\x18\n \x01(\x02\x12\x31\n\x05gamma\x18\x0f \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62\x65ta\x18\x10 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04mean\x18\x11 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x34\n\x08variance\x18\x12 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xe8\x03\n\x12PoolingLayerParams\x12\x42\n\x04type\x18\x01 \x01(\x0e\x32\x34.CoreML.Specification.PoolingLayerParams.PoolingType\x12\x12\n\nkernelSize\x18\n \x03(\x04\x12\x0e\n\x06stride\x18\x14 \x03(\x04\x12\x33\n\x05valid\x18\x1e \x01(\x0b\x32\".CoreML.Specification.ValidPaddingH\x00\x12\x31\n\x04same\x18\x1f \x01(\x0b\x32!.CoreML.Specification.SamePaddingH\x00\x12Y\n\x10includeLastPixel\x18 \x01(\x0b\x32=.CoreML.Specification.PoolingLayerParams.ValidCompletePaddingH\x00\x12\x1d\n\x15\x61vgPoolExcludePadding\x18\x32 \x01(\x08\x12\x15\n\rglobalPooling\x18< \x01(\x08\x1a.\n\x14ValidCompletePadding\x12\x16\n\x0epaddingAmounts\x18\n \x03(\x04\"+\n\x0bPoolingType\x12\x07\n\x03MAX\x10\x00\x12\x0b\n\x07\x41VERAGE\x10\x01\x12\x06\n\x02L2\x10\x02\x42\x14\n\x12PoolingPaddingType\"\xd6\x04\n\x14Pooling3DLayerParams\x12\x46\n\x04type\x18\x01 \x01(\x0e\x32\x38.CoreML.Specification.Pooling3DLayerParams.PoolingType3D\x12\x13\n\x0bkernelDepth\x18\x02 \x01(\x05\x12\x14\n\x0ckernelHeight\x18\x03 \x01(\x05\x12\x13\n\x0bkernelWidth\x18\x04 \x01(\x05\x12\x13\n\x0bstrideDepth\x18\x05 \x01(\x05\x12\x14\n\x0cstrideHeight\x18\x06 \x01(\x05\x12\x13\n\x0bstrideWidth\x18\x07 \x01(\x05\x12T\n\x0bpaddingType\x18\x0f \x01(\x0e\x32?.CoreML.Specification.Pooling3DLayerParams.Pooling3DPaddingType\x12\x1a\n\x12\x63ustomPaddingFront\x18\x08 \x01(\x05\x12\x19\n\x11\x63ustomPaddingBack\x18\t \x01(\x05\x12\x18\n\x10\x63ustomPaddingTop\x18\n \x01(\x05\x12\x1b\n\x13\x63ustomPaddingBottom\x18\x0b \x01(\x05\x12\x19\n\x11\x63ustomPaddingLeft\x18\x0c \x01(\x05\x12\x1a\n\x12\x63ustomPaddingRight\x18\r \x01(\x05\x12\x1b\n\x13\x63ountExcludePadding\x18\x0e \x01(\x08\"%\n\rPoolingType3D\x12\x07\n\x03MAX\x10\x00\x12\x0b\n\x07\x41VERAGE\x10\x01\"7\n\x14Pooling3DPaddingType\x12\n\n\x06\x43USTOM\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x08\n\x04SAME\x10\x02\"\x9d\x01\n\x1aGlobalPooling3DLayerParams\x12R\n\x04type\x18\x01 \x01(\x0e\x32\x44.CoreML.Specification.GlobalPooling3DLayerParams.GlobalPoolingType3D\"+\n\x13GlobalPoolingType3D\x12\x07\n\x03MAX\x10\x00\x12\x0b\n\x07\x41VERAGE\x10\x01\"\xa1\x03\n\x12PaddingLayerParams\x12L\n\x08\x63onstant\x18\x01 \x01(\x0b\x32\x38.CoreML.Specification.PaddingLayerParams.PaddingConstantH\x00\x12P\n\nreflection\x18\x02 \x01(\x0b\x32:.CoreML.Specification.PaddingLayerParams.PaddingReflectionH\x00\x12R\n\x0breplication\x18\x03 \x01(\x0b\x32;.CoreML.Specification.PaddingLayerParams.PaddingReplicationH\x00\x12;\n\x0epaddingAmounts\x18\n \x01(\x0b\x32#.CoreML.Specification.BorderAmounts\x1a \n\x0fPaddingConstant\x12\r\n\x05value\x18\x01 \x01(\x02\x1a\x13\n\x11PaddingReflection\x1a\x14\n\x12PaddingReplicationB\r\n\x0bPaddingType\"+\n\x11\x43oncatLayerParams\x12\x16\n\x0esequenceConcat\x18\x64 \x01(\x08\"K\n\x0eLRNLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\x12\x11\n\tlocalSize\x18\x03 \x01(\x04\x12\t\n\x01k\x18\x04 \x01(\x02\"\x14\n\x12SoftmaxLayerParams\"$\n\x10SplitLayerParams\x12\x10\n\x08nOutputs\x18\x01 \x01(\x04\"\x1f\n\x0e\x41\x64\x64LayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"$\n\x13MultiplyLayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\"\x84\x02\n\x18UnaryFunctionLayerParams\x12\x46\n\x04type\x18\x01 \x01(\x0e\x32\x38.CoreML.Specification.UnaryFunctionLayerParams.Operation\x12\r\n\x05\x61lpha\x18\x02 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x02\x12\r\n\x05shift\x18\x04 \x01(\x02\x12\r\n\x05scale\x18\x05 \x01(\x02\"b\n\tOperation\x12\x08\n\x04SQRT\x10\x00\x12\t\n\x05RSQRT\x10\x01\x12\x0b\n\x07INVERSE\x10\x02\x12\t\n\x05POWER\x10\x03\x12\x07\n\x03\x45XP\x10\x04\x12\x07\n\x03LOG\x10\x05\x12\x07\n\x03\x41\x42S\x10\x06\x12\r\n\tTHRESHOLD\x10\x07\"\xf1\x02\n\x13UpsampleLayerParams\x12\x15\n\rscalingFactor\x18\x01 \x03(\x04\x12\x1f\n\x17\x66ractionalScalingFactor\x18\x07 \x03(\x02\x12I\n\x04mode\x18\x05 \x01(\x0e\x32;.CoreML.Specification.UpsampleLayerParams.InterpolationMode\x12X\n\x12linearUpsampleMode\x18\x06 \x01(\x0e\x32<.CoreML.Specification.UpsampleLayerParams.LinearUpsampleMode\")\n\x11InterpolationMode\x12\x06\n\x02NN\x10\x00\x12\x0c\n\x08\x42ILINEAR\x10\x01\"R\n\x12LinearUpsampleMode\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x16\n\x12\x41LIGN_CORNERS_TRUE\x10\x01\x12\x17\n\x13\x41LIGN_CORNERS_FALSE\x10\x02\"a\n\x19ResizeBilinearLayerParams\x12\x12\n\ntargetSize\x18\x01 \x03(\x04\x12\x30\n\x04mode\x18\x02 \x01(\x0b\x32\".CoreML.Specification.SamplingMode\"\xd4\x01\n\x15\x43ropResizeLayerParams\x12\x12\n\ntargetSize\x18\x01 \x03(\x04\x12\x1d\n\x15normalizedCoordinates\x18\x02 \x01(\x08\x12\x30\n\x04mode\x18\x03 \x01(\x0b\x32\".CoreML.Specification.SamplingMode\x12@\n\x0e\x62oxIndicesMode\x18\x04 \x01(\x0b\x32(.CoreML.Specification.BoxCoordinatesMode\x12\x14\n\x0cspatialScale\x18\x05 \x01(\x02\"R\n\x0f\x42iasLayerParams\x12\r\n\x05shape\x18\x01 \x03(\x04\x12\x30\n\x04\x62ias\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\xaf\x01\n\x10ScaleLayerParams\x12\x12\n\nshapeScale\x18\x01 \x03(\x04\x12\x31\n\x05scale\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x0f\n\x07hasBias\x18\x03 \x01(\x08\x12\x11\n\tshapeBias\x18\x04 \x03(\x04\x12\x30\n\x04\x62ias\x18\x05 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"Z\n\x17LoadConstantLayerParams\x12\r\n\x05shape\x18\x01 \x03(\x04\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\")\n\x16L2NormalizeLayerParams\x12\x0f\n\x07\x65psilon\x18\x01 \x01(\x02\"\x8e\x01\n\x12\x46lattenLayerParams\x12\x43\n\x04mode\x18\x01 \x01(\x0e\x32\x35.CoreML.Specification.FlattenLayerParams.FlattenOrder\"3\n\x0c\x46lattenOrder\x12\x11\n\rCHANNEL_FIRST\x10\x00\x12\x10\n\x0c\x43HANNEL_LAST\x10\x01\"\xa3\x01\n\x12ReshapeLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x03\x12\x43\n\x04mode\x18\x02 \x01(\x0e\x32\x35.CoreML.Specification.ReshapeLayerParams.ReshapeOrder\"3\n\x0cReshapeOrder\x12\x11\n\rCHANNEL_FIRST\x10\x00\x12\x10\n\x0c\x43HANNEL_LAST\x10\x01\"\"\n\x12PermuteLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x03(\x04\"\xd1\x01\n\x19ReorganizeDataLayerParams\x12P\n\x04mode\x18\x01 \x01(\x0e\x32\x42.CoreML.Specification.ReorganizeDataLayerParams.ReorganizationType\x12\x11\n\tblockSize\x18\x02 \x01(\x04\"O\n\x12ReorganizationType\x12\x12\n\x0eSPACE_TO_DEPTH\x10\x00\x12\x12\n\x0e\x44\x45PTH_TO_SPACE\x10\x01\x12\x11\n\rPIXEL_SHUFFLE\x10\x02\"\xc8\x01\n\x10SliceLayerParams\x12\x12\n\nstartIndex\x18\x01 \x01(\x03\x12\x10\n\x08\x65ndIndex\x18\x02 \x01(\x03\x12\x0e\n\x06stride\x18\x03 \x01(\x04\x12>\n\x04\x61xis\x18\x04 \x01(\x0e\x32\x30.CoreML.Specification.SliceLayerParams.SliceAxis\">\n\tSliceAxis\x12\x10\n\x0c\x43HANNEL_AXIS\x10\x00\x12\x0f\n\x0bHEIGHT_AXIS\x10\x01\x12\x0e\n\nWIDTH_AXIS\x10\x02\"\xd9\x02\n\x11ReduceLayerParams\x12\x45\n\x04mode\x18\x01 \x01(\x0e\x32\x37.CoreML.Specification.ReduceLayerParams.ReduceOperation\x12\x0f\n\x07\x65psilon\x18\x02 \x01(\x02\x12@\n\x04\x61xis\x18\x03 \x01(\x0e\x32\x32.CoreML.Specification.ReduceLayerParams.ReduceAxis\"v\n\x0fReduceOperation\x12\x07\n\x03SUM\x10\x00\x12\x07\n\x03\x41VG\x10\x01\x12\x08\n\x04PROD\x10\x02\x12\n\n\x06LOGSUM\x10\x03\x12\r\n\tSUMSQUARE\x10\x04\x12\x06\n\x02L1\x10\x05\x12\x06\n\x02L2\x10\x06\x12\x07\n\x03MAX\x10\x07\x12\x07\n\x03MIN\x10\x08\x12\n\n\x06\x41RGMAX\x10\t\"2\n\nReduceAxis\x12\x07\n\x03\x43HW\x10\x00\x12\x06\n\x02HW\x10\x01\x12\x05\n\x01\x43\x10\x02\x12\x05\n\x01H\x10\x03\x12\x05\n\x01W\x10\x04\"[\n\x0f\x43ropLayerParams\x12\x38\n\x0b\x63ropAmounts\x18\x01 \x01(\x0b\x32#.CoreML.Specification.BorderAmounts\x12\x0e\n\x06offset\x18\x05 \x03(\x04\"\x14\n\x12\x41verageLayerParams\"\x10\n\x0eMaxLayerParams\"\x10\n\x0eMinLayerParams\"1\n\x15\x44otProductLayerParams\x12\x18\n\x10\x63osineSimilarity\x18\x01 \x01(\x08\"f\n MeanVarianceNormalizeLayerParams\x12\x16\n\x0e\x61\x63rossChannels\x18\x01 \x01(\x08\x12\x19\n\x11normalizeVariance\x18\x02 \x01(\x08\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x02\"1\n\x19SequenceRepeatLayerParams\x12\x14\n\x0cnRepetitions\x18\x01 \x01(\x04\"\xff\x02\n\x1aSimpleRecurrentLayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12:\n\nactivation\x18\n \x01(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x16\n\x0esequenceOutput\x18\x0f \x01(\x08\x12\x15\n\rhasBiasVector\x18\x14 \x01(\x08\x12\x38\n\x0cweightMatrix\x18\x1e \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12;\n\x0frecursionMatrix\x18\x1f \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x36\n\nbiasVector\x18 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x14\n\x0creverseInput\x18\x64 \x01(\x08\"\xaa\x06\n\x0eGRULayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12;\n\x0b\x61\x63tivations\x18\n \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x16\n\x0esequenceOutput\x18\x0f \x01(\x08\x12\x16\n\x0ehasBiasVectors\x18\x14 \x01(\x08\x12\x42\n\x16updateGateWeightMatrix\x18\x1e \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x41\n\x15resetGateWeightMatrix\x18\x1f \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16outputGateWeightMatrix\x18 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19updateGateRecursionMatrix\x18\x32 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18resetGateRecursionMatrix\x18\x33 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19outputGateRecursionMatrix\x18\x34 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14updateGateBiasVector\x18\x46 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12?\n\x13resetGateBiasVector\x18G \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14outputGateBiasVector\x18H \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x14\n\x0creverseInput\x18\x64 \x01(\x08\"\xaa\x01\n\nLSTMParams\x12\x16\n\x0esequenceOutput\x18\n \x01(\x08\x12\x16\n\x0ehasBiasVectors\x18\x14 \x01(\x08\x12\x12\n\nforgetBias\x18\x1e \x01(\x08\x12\x1a\n\x12hasPeepholeVectors\x18( \x01(\x08\x12!\n\x19\x63oupledInputAndForgetGate\x18\x32 \x01(\x08\x12\x19\n\x11\x63\x65llClipThreshold\x18< \x01(\x02\"\x94\x08\n\x10LSTMWeightParams\x12\x41\n\x15inputGateWeightMatrix\x18\x01 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16\x66orgetGateWeightMatrix\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16\x62lockInputWeightMatrix\x18\x03 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x42\n\x16outputGateWeightMatrix\x18\x04 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18inputGateRecursionMatrix\x18\x14 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19\x66orgetGateRecursionMatrix\x18\x15 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19\x62lockInputRecursionMatrix\x18\x16 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x45\n\x19outputGateRecursionMatrix\x18\x17 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12?\n\x13inputGateBiasVector\x18( \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14\x66orgetGateBiasVector\x18) \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14\x62lockInputBiasVector\x18* \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12@\n\x14outputGateBiasVector\x18+ \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x43\n\x17inputGatePeepholeVector\x18< \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18\x66orgetGatePeepholeVector\x18= \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x44\n\x18outputGatePeepholeVector\x18> \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\x95\x02\n\x1dUniDirectionalLSTMLayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12;\n\x0b\x61\x63tivations\x18\n \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x30\n\x06params\x18\x0f \x01(\x0b\x32 .CoreML.Specification.LSTMParams\x12<\n\x0cweightParams\x18\x14 \x01(\x0b\x32&.CoreML.Specification.LSTMWeightParams\x12\x14\n\x0creverseInput\x18\x64 \x01(\x08\"\xd2\x02\n\x1c\x42iDirectionalLSTMLayerParams\x12\x17\n\x0finputVectorSize\x18\x01 \x01(\x04\x12\x18\n\x10outputVectorSize\x18\x02 \x01(\x04\x12\x46\n\x16\x61\x63tivationsForwardLSTM\x18\n \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12G\n\x17\x61\x63tivationsBackwardLSTM\x18\x0b \x03(\x0b\x32&.CoreML.Specification.ActivationParams\x12\x30\n\x06params\x18\x0f \x01(\x0b\x32 .CoreML.Specification.LSTMParams\x12<\n\x0cweightParams\x18\x14 \x03(\x0b\x32&.CoreML.Specification.LSTMWeightParams\"\xbe\x03\n\x11\x43ustomLayerParams\x12\x11\n\tclassName\x18\n \x01(\t\x12\x33\n\x07weights\x18\x14 \x03(\x0b\x32\".CoreML.Specification.WeightParams\x12K\n\nparameters\x18\x1e \x03(\x0b\x32\x37.CoreML.Specification.CustomLayerParams.ParametersEntry\x12\x13\n\x0b\x64\x65scription\x18( \x01(\t\x1a\x8c\x01\n\x15\x43ustomLayerParamValue\x12\x15\n\x0b\x64oubleValue\x18\n \x01(\x01H\x00\x12\x15\n\x0bstringValue\x18\x14 \x01(\tH\x00\x12\x12\n\x08intValue\x18\x1e \x01(\x05H\x00\x12\x13\n\tlongValue\x18( \x01(\x03H\x00\x12\x13\n\tboolValue\x18\x32 \x01(\x08H\x00\x42\x07\n\x05value\x1ap\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12L\n\x05value\x18\x02 \x01(\x0b\x32=.CoreML.Specification.CustomLayerParams.CustomLayerParamValue:\x02\x38\x01\"$\n\x14TransposeLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x04\"\xa0\x02\n\x18\x42\x61tchedMatMulLayerParams\x12\x12\n\ntransposeA\x18\x01 \x01(\x08\x12\x12\n\ntransposeB\x18\x02 \x01(\x08\x12\"\n\x1aweightMatrixFirstDimension\x18\x05 \x01(\x04\x12#\n\x1bweightMatrixSecondDimension\x18\x06 \x01(\x04\x12\x0f\n\x07hasBias\x18\x07 \x01(\x08\x12\x33\n\x07weights\x18\x08 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62ias\x18\t \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x1b\n\x13int8DynamicQuantize\x18\n \x01(\x08\"7\n\x13\x43oncatNDLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x12\n\ninterleave\x18\x02 \x01(\x08\"$\n\x14SoftmaxNDLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"(\n\x12ReverseLayerParams\x12\x12\n\nreverseDim\x18\x01 \x03(\x08\"@\n\x15ReverseSeqLayerParams\x12\x11\n\tbatchAxis\x18\x01 \x01(\x03\x12\x14\n\x0csequenceAxis\x18\x02 \x01(\x03\"\\\n\x19LoadConstantNDLayerParams\x12\r\n\x05shape\x18\x01 \x03(\x04\x12\x30\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"$\n\x13\x46illLikeLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\";\n\x15\x46illStaticLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\x12\x13\n\x0btargetShape\x18\x02 \x03(\x04\"\'\n\x16\x46illDynamicLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1f\n\x1dWhereBroadcastableLayerParams\"\x10\n\x0eSinLayerParams\"\x10\n\x0e\x43osLayerParams\"\x10\n\x0eTanLayerParams\"\x11\n\x0f\x41sinLayerParams\"\x11\n\x0f\x41\x63osLayerParams\"\x11\n\x0f\x41tanLayerParams\"\x11\n\x0fSinhLayerParams\"\x11\n\x0f\x43oshLayerParams\"\x11\n\x0fTanhLayerParams\"\x12\n\x10\x41sinhLayerParams\"\x12\n\x10\x41\x63oshLayerParams\"\x12\n\x10\x41tanhLayerParams\"\x1d\n\x1bPowBroadcastableLayerParams\"\x11\n\x0f\x45xp2LayerParams\"\x19\n\x17WhereNonZeroLayerParams\"?\n\x19MatrixBandPartLayerParams\x12\x10\n\x08numLower\x18\x01 \x01(\x03\x12\x10\n\x08numUpper\x18\x02 \x01(\x03\"\'\n\x1aUpperTriangularLayerParams\x12\t\n\x01k\x18\x01 \x01(\x03\"\'\n\x1aLowerTriangularLayerParams\x12\t\n\x01k\x18\x01 \x01(\x03\"\x1c\n\x1a\x42roadcastToLikeLayerParams\"3\n\x1c\x42roadcastToStaticLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x04\"\x1f\n\x1d\x42roadcastToDynamicLayerParams\"\x1d\n\x1b\x41\x64\x64\x42roadcastableLayerParams\"\x1d\n\x1bMaxBroadcastableLayerParams\"\x1d\n\x1bMinBroadcastableLayerParams\"\x1d\n\x1bModBroadcastableLayerParams\"\"\n FloorDivBroadcastableLayerParams\"\"\n SubtractBroadcastableLayerParams\"\"\n MultiplyBroadcastableLayerParams\" \n\x1e\x44ivideBroadcastableLayerParams\"!\n\x11GatherLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"S\n\x12ScatterLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12/\n\x04mode\x18\x02 \x01(\x0e\x32!.CoreML.Specification.ScatterMode\"\x15\n\x13GatherNDLayerParams\"G\n\x14ScatterNDLayerParams\x12/\n\x04mode\x18\x01 \x01(\x0e\x32!.CoreML.Specification.ScatterMode\"*\n\x1aGatherAlongAxisLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"\\\n\x1bScatterAlongAxisLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12/\n\x04mode\x18\x02 \x01(\x0e\x32!.CoreML.Specification.ScatterMode\" \n\x10StackLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"7\n RankPreservingReshapeLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x03\"a\n\x1a\x43onstantPaddingLayerParams\x12\r\n\x05value\x18\x01 \x01(\x02\x12\x12\n\npadAmounts\x18\x02 \x03(\x04\x12 \n\x18padToGivenOutputSizeMode\x18\x03 \x01(\x08\"I\n\x1bRandomNormalLikeLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x02\x12\x0e\n\x06stdDev\x18\x03 \x01(\x02\"`\n\x1dRandomNormalStaticLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x02\x12\x0e\n\x06stdDev\x18\x03 \x01(\x02\x12\x13\n\x0boutputShape\x18\x04 \x03(\x04\"L\n\x1eRandomNormalDynamicLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x02\x12\x0e\n\x06stdDev\x18\x03 \x01(\x02\"L\n\x1cRandomUniformLikeLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0e\n\x06minVal\x18\x02 \x01(\x02\x12\x0e\n\x06maxVal\x18\x03 \x01(\x02\"c\n\x1eRandomUniformStaticLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0e\n\x06minVal\x18\x02 \x01(\x02\x12\x0e\n\x06maxVal\x18\x03 \x01(\x02\x12\x13\n\x0boutputShape\x18\x04 \x03(\x04\"O\n\x1fRandomUniformDynamicLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0e\n\x06minVal\x18\x02 \x01(\x02\x12\x0e\n\x06maxVal\x18\x03 \x01(\x02\"<\n\x1eRandomBernoulliLikeLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04prob\x18\x02 \x01(\x02\"S\n RandomBernoulliStaticLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04prob\x18\x02 \x01(\x02\x12\x13\n\x0boutputShape\x18\x03 \x03(\x04\"?\n!RandomBernoulliDynamicLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x0c\n\x04prob\x18\x02 \x01(\x02\"z\n\"CategoricalDistributionLayerParams\x12\x0c\n\x04seed\x18\x01 \x01(\x03\x12\x12\n\nnumSamples\x18\x02 \x01(\x03\x12\x10\n\x08isLogits\x18\x03 \x01(\x08\x12\x0b\n\x03\x65ps\x18\x04 \x01(\x02\x12\x13\n\x0btemperature\x18\x05 \x01(\x02\"H\n\x13ReduceL1LayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"H\n\x13ReduceL2LayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"I\n\x14ReduceMaxLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"I\n\x14ReduceMinLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"I\n\x14ReduceSumLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"J\n\x15ReduceProdLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"J\n\x15ReduceMeanLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"L\n\x17ReduceLogSumLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"O\n\x1aReduceSumSquareLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"O\n\x1aReduceLogSumExpLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x10\n\x08keepDims\x18\x02 \x01(\x08\x12\x11\n\treduceAll\x18\x03 \x01(\x08\"%\n\x15\x45xpandDimsLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\"&\n\x16\x46lattenTo2DLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\"/\n\x18ReshapeStaticLayerParams\x12\x13\n\x0btargetShape\x18\x01 \x03(\x03\"\x18\n\x16ReshapeLikeLayerParams\"\x1b\n\x19ReshapeDynamicLayerParams\"6\n\x12SqueezeLayerParams\x12\x0c\n\x04\x61xes\x18\x01 \x03(\x03\x12\x12\n\nsqueezeAll\x18\x02 \x01(\x08\">\n\x0fTopKLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\t\n\x01K\x18\x02 \x01(\x04\x12\x12\n\nuseBottomK\x18\x03 \x01(\x08\"4\n\x11\x41rgMaxLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x11\n\tremoveDim\x18\x02 \x01(\x08\"4\n\x11\x41rgMinLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x11\n\tremoveDim\x18\x02 \x01(\x08\"I\n\x12SplitNDLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x11\n\tnumSplits\x18\x02 \x01(\x04\x12\x12\n\nsplitSizes\x18\x03 \x03(\x04\"\x11\n\x0f\x43\x65ilLayerParams\"\x12\n\x10RoundLayerParams\"\x12\n\x10\x46loorLayerParams\"\x11\n\x0fSignLayerParams\"1\n\x0f\x43lipLayerParams\x12\x0e\n\x06minVal\x18\x01 \x01(\x02\x12\x0e\n\x06maxVal\x18\x02 \x01(\x02\"\x87\x01\n\x16SliceStaticLayerParams\x12\x10\n\x08\x62\x65ginIds\x18\x01 \x03(\x03\x12\x12\n\nbeginMasks\x18\x02 \x03(\x08\x12\x0e\n\x06\x65ndIds\x18\x03 \x03(\x03\x12\x10\n\x08\x65ndMasks\x18\x04 \x03(\x08\x12\x0f\n\x07strides\x18\x05 \x03(\x03\x12\x14\n\x0csqueezeMasks\x18\x06 \x03(\x08\"v\n\x17SliceDynamicLayerParams\x12\x12\n\nbeginMasks\x18\x02 \x03(\x08\x12\x0e\n\x06\x65ndIds\x18\x03 \x03(\x03\x12\x10\n\x08\x65ndMasks\x18\x04 \x03(\x08\x12\x0f\n\x07strides\x18\x05 \x03(\x03\x12\x14\n\x0csqueezeMasks\x18\x06 \x03(\x08\"\x1f\n\x0fTileLayerParams\x12\x0c\n\x04reps\x18\x01 \x03(\x04\"\x15\n\x13GetShapeLayerParams\"\x10\n\x0e\x45rfLayerParams\"\x99\x01\n\x0fGeluLayerParams\x12<\n\x04mode\x18\x01 \x01(\x0e\x32..CoreML.Specification.GeluLayerParams.GeluMode\"H\n\x08GeluMode\x12\t\n\x05\x45XACT\x10\x00\x12\x16\n\x12TANH_APPROXIMATION\x10\x01\x12\x19\n\x15SIGMOID_APPROXIMATION\x10\x02\"U\n\x16RangeStaticLayerParams\x12\x10\n\x08\x65ndValue\x18\x01 \x01(\x02\x12\x12\n\nstartValue\x18\x02 \x01(\x02\x12\x15\n\rstepSizeValue\x18\x03 \x01(\x02\"D\n\x17RangeDynamicLayerParams\x12\x12\n\nstartValue\x18\x02 \x01(\x02\x12\x15\n\rstepSizeValue\x18\x03 \x01(\x02\"K\n\x19SlidingWindowsLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x12\n\nwindowSize\x18\x02 \x01(\x04\x12\x0c\n\x04step\x18\x03 \x01(\x04\"\xaa\x01\n\x1dLayerNormalizationLayerParams\x12\x17\n\x0fnormalizedShape\x18\x01 \x03(\x03\x12\x0b\n\x03\x65ps\x18\x02 \x01(\x02\x12\x31\n\x05gamma\x18\x03 \x01(\x0b\x32\".CoreML.Specification.WeightParams\x12\x30\n\x04\x62\x65ta\x18\x04 \x01(\x0b\x32\".CoreML.Specification.WeightParams\"\x7f\n NonMaximumSuppressionLayerParams\x12\x14\n\x0ciouThreshold\x18\x01 \x01(\x02\x12\x16\n\x0escoreThreshold\x18\x02 \x01(\x02\x12\x10\n\x08maxBoxes\x18\x03 \x01(\x04\x12\x1b\n\x13perClassSuppression\x18\x04 \x01(\x08\"5\n\x16\x43lampedReLULayerParams\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x0c\n\x04\x62\x65ta\x18\x02 \x01(\x02\"6\n\x12\x41rgSortLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x12\n\ndescending\x18\x02 \x01(\x08\"4\n\x16SliceBySizeLayerParams\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x03\"\xc5\x04\n\x17NeuralNetworkClassifier\x12\x38\n\x06layers\x18\x01 \x03(\x0b\x32(.CoreML.Specification.NeuralNetworkLayer\x12G\n\rpreprocessing\x18\x02 \x03(\x0b\x32\x30.CoreML.Specification.NeuralNetworkPreprocessing\x12Y\n\x16\x61rrayInputShapeMapping\x18\x05 \x01(\x0e\x32\x39.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping\x12T\n\x16imageInputShapeMapping\x18\x06 \x01(\x0e\x32\x34.CoreML.Specification.NeuralNetworkImageShapeMapping\x12\x43\n\x0cupdateParams\x18\n \x01(\x0b\x32-.CoreML.Specification.NetworkUpdateParameters\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12\"\n\x19labelProbabilityLayerName\x18\xc8\x01 \x01(\tB\r\n\x0b\x43lassLabels\"^\n\x11OneHotLayerParams\x12\x18\n\x10oneHotVectorSize\x18\x01 \x01(\x04\x12\x0c\n\x04\x61xis\x18\x02 \x01(\x03\x12\x0f\n\x07onValue\x18\x03 \x01(\x02\x12\x10\n\x08offValue\x18\x04 \x01(\x02\"K\n\x11\x43umSumLayerParams\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x03\x12\x17\n\x0f\x65xcludeFinalSum\x18\x02 \x01(\x08\x12\x0f\n\x07reverse\x18\x03 \x01(\x08\"\x91\x03\n\x16NeuralNetworkRegressor\x12\x38\n\x06layers\x18\x01 \x03(\x0b\x32(.CoreML.Specification.NeuralNetworkLayer\x12G\n\rpreprocessing\x18\x02 \x03(\x0b\x32\x30.CoreML.Specification.NeuralNetworkPreprocessing\x12Y\n\x16\x61rrayInputShapeMapping\x18\x05 \x01(\x0e\x32\x39.CoreML.Specification.NeuralNetworkMultiArrayShapeMapping\x12T\n\x16imageInputShapeMapping\x18\x06 \x01(\x0e\x32\x34.CoreML.Specification.NeuralNetworkImageShapeMapping\x12\x43\n\x0cupdateParams\x18\n \x01(\x0b\x32-.CoreML.Specification.NetworkUpdateParameters\"\xa2\x02\n\x17NetworkUpdateParameters\x12\x33\n\nlossLayers\x18\x01 \x03(\x0b\x32\x1f.CoreML.Specification.LossLayer\x12\x32\n\toptimizer\x18\x02 \x01(\x0b\x32\x1f.CoreML.Specification.Optimizer\x12\x34\n\x06\x65pochs\x18\x03 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12\x34\n\x07shuffle\x18\n \x01(\x0b\x32#.CoreML.Specification.BoolParameter\x12\x32\n\x04seed\x18\x14 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\"\xe4\x01\n\tLossLayer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x62\n categoricalCrossEntropyLossLayer\x18\n \x01(\x0b\x32\x36.CoreML.Specification.CategoricalCrossEntropyLossLayerH\x00\x12T\n\x19meanSquaredErrorLossLayer\x18\x0b \x01(\x0b\x32/.CoreML.Specification.MeanSquaredErrorLossLayerH\x00\x42\x0f\n\rLossLayerType\"A\n CategoricalCrossEntropyLossLayer\x12\r\n\x05input\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\":\n\x19MeanSquaredErrorLossLayer\x12\r\n\x05input\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"\x96\x01\n\tOptimizer\x12:\n\x0csgdOptimizer\x18\n \x01(\x0b\x32\".CoreML.Specification.SGDOptimizerH\x00\x12<\n\radamOptimizer\x18\x0b \x01(\x0b\x32#.CoreML.Specification.AdamOptimizerH\x00\x42\x0f\n\rOptimizerType\"\xc1\x01\n\x0cSGDOptimizer\x12;\n\x0clearningRate\x18\x01 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12;\n\rminiBatchSize\x18\x02 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12\x37\n\x08momentum\x18\x03 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\"\xa9\x02\n\rAdamOptimizer\x12;\n\x0clearningRate\x18\x01 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12;\n\rminiBatchSize\x18\x02 \x01(\x0b\x32$.CoreML.Specification.Int64Parameter\x12\x34\n\x05\x62\x65ta1\x18\x03 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12\x34\n\x05\x62\x65ta2\x18\x04 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter\x12\x32\n\x03\x65ps\x18\x05 \x01(\x0b\x32%.CoreML.Specification.DoubleParameter*W\n#NeuralNetworkMultiArrayShapeMapping\x12\x17\n\x13RANK5_ARRAY_MAPPING\x10\x00\x12\x17\n\x13\x45XACT_ARRAY_MAPPING\x10\x01*R\n\x1eNeuralNetworkImageShapeMapping\x12\x17\n\x13RANK5_IMAGE_MAPPING\x10\x00\x12\x17\n\x13RANK4_IMAGE_MAPPING\x10\x01*\x87\x01\n\x0bScatterMode\x12\x12\n\x0eSCATTER_UPDATE\x10\x00\x12\x0f\n\x0bSCATTER_ADD\x10\x01\x12\x0f\n\x0bSCATTER_SUB\x10\x02\x12\x0f\n\x0bSCATTER_MUL\x10\x03\x12\x0f\n\x0bSCATTER_DIV\x10\x04\x12\x0f\n\x0bSCATTER_MAX\x10\x05\x12\x0f\n\x0bSCATTER_MIN\x10\x06\x42\x02H\x03P\x00P\x01\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,Parameters__pb2.DESCRIPTOR,]) + +_NEURALNETWORKMULTIARRAYSHAPEMAPPING = _descriptor.EnumDescriptor( + name='NeuralNetworkMultiArrayShapeMapping', + full_name='CoreML.Specification.NeuralNetworkMultiArrayShapeMapping', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='RANK5_ARRAY_MAPPING', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXACT_ARRAY_MAPPING', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=33746, + serialized_end=33833, +) +_sym_db.RegisterEnumDescriptor(_NEURALNETWORKMULTIARRAYSHAPEMAPPING) + +NeuralNetworkMultiArrayShapeMapping = enum_type_wrapper.EnumTypeWrapper(_NEURALNETWORKMULTIARRAYSHAPEMAPPING) +_NEURALNETWORKIMAGESHAPEMAPPING = _descriptor.EnumDescriptor( + name='NeuralNetworkImageShapeMapping', + full_name='CoreML.Specification.NeuralNetworkImageShapeMapping', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='RANK5_IMAGE_MAPPING', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RANK4_IMAGE_MAPPING', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=33835, + serialized_end=33917, +) +_sym_db.RegisterEnumDescriptor(_NEURALNETWORKIMAGESHAPEMAPPING) + +NeuralNetworkImageShapeMapping = enum_type_wrapper.EnumTypeWrapper(_NEURALNETWORKIMAGESHAPEMAPPING) +_SCATTERMODE = _descriptor.EnumDescriptor( + name='ScatterMode', + full_name='CoreML.Specification.ScatterMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SCATTER_UPDATE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_ADD', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_SUB', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_MUL', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_DIV', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_MAX', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCATTER_MIN', index=6, number=6, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=33920, + serialized_end=34055, +) +_sym_db.RegisterEnumDescriptor(_SCATTERMODE) + +ScatterMode = enum_type_wrapper.EnumTypeWrapper(_SCATTERMODE) +RANK5_ARRAY_MAPPING = 0 +EXACT_ARRAY_MAPPING = 1 +RANK5_IMAGE_MAPPING = 0 +RANK4_IMAGE_MAPPING = 1 +SCATTER_UPDATE = 0 +SCATTER_ADD = 1 +SCATTER_SUB = 2 +SCATTER_MUL = 3 +SCATTER_DIV = 4 +SCATTER_MAX = 5 +SCATTER_MIN = 6 + + +_SAMEPADDING_SAMEPADDINGMODE = _descriptor.EnumDescriptor( + name='SamePaddingMode', + full_name='CoreML.Specification.SamePadding.SamePaddingMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='BOTTOM_RIGHT_HEAVY', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TOP_LEFT_HEAVY', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=14347, + serialized_end=14408, +) +_sym_db.RegisterEnumDescriptor(_SAMEPADDING_SAMEPADDINGMODE) + +_SAMPLINGMODE_METHOD = _descriptor.EnumDescriptor( + name='Method', + full_name='CoreML.Specification.SamplingMode.Method', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STRICT_ALIGN_ENDPOINTS_MODE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALIGN_ENDPOINTS_MODE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UPSAMPLE_MODE', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ROI_ALIGN_MODE', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=14494, + serialized_end=14600, +) +_sym_db.RegisterEnumDescriptor(_SAMPLINGMODE_METHOD) + +_BOXCOORDINATESMODE_COORDINATES = _descriptor.EnumDescriptor( + name='Coordinates', + full_name='CoreML.Specification.BoxCoordinatesMode.Coordinates', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CORNERS_HEIGHT_FIRST', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CORNERS_WIDTH_FIRST', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CENTER_SIZE_HEIGHT_FIRST', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CENTER_SIZE_WIDTH_FIRST', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=14696, + serialized_end=14819, +) +_sym_db.RegisterEnumDescriptor(_BOXCOORDINATESMODE_COORDINATES) + +_CONVOLUTION3DLAYERPARAMS_PADDINGTYPE = _descriptor.EnumDescriptor( + name='PaddingType', + full_name='CoreML.Specification.Convolution3DLayerParams.PaddingType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CUSTOM', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VALID', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SAME', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=16497, + serialized_end=16543, +) +_sym_db.RegisterEnumDescriptor(_CONVOLUTION3DLAYERPARAMS_PADDINGTYPE) + +_POOLINGLAYERPARAMS_POOLINGTYPE = _descriptor.EnumDescriptor( + name='PoolingType', + full_name='CoreML.Specification.PoolingLayerParams.PoolingType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='MAX', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVERAGE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=17889, + serialized_end=17932, +) +_sym_db.RegisterEnumDescriptor(_POOLINGLAYERPARAMS_POOLINGTYPE) + +_POOLING3DLAYERPARAMS_POOLINGTYPE3D = _descriptor.EnumDescriptor( + name='PoolingType3D', + full_name='CoreML.Specification.Pooling3DLayerParams.PoolingType3D', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='MAX', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVERAGE', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=18461, + serialized_end=18498, +) +_sym_db.RegisterEnumDescriptor(_POOLING3DLAYERPARAMS_POOLINGTYPE3D) + +_POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE = _descriptor.EnumDescriptor( + name='Pooling3DPaddingType', + full_name='CoreML.Specification.Pooling3DLayerParams.Pooling3DPaddingType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CUSTOM', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VALID', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SAME', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=18500, + serialized_end=18555, +) +_sym_db.RegisterEnumDescriptor(_POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE) + +_GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D = _descriptor.EnumDescriptor( + name='GlobalPoolingType3D', + full_name='CoreML.Specification.GlobalPooling3DLayerParams.GlobalPoolingType3D', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='MAX', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVERAGE', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=18672, + serialized_end=18715, +) +_sym_db.RegisterEnumDescriptor(_GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D) + +_UNARYFUNCTIONLAYERPARAMS_OPERATION = _descriptor.EnumDescriptor( + name='Operation', + full_name='CoreML.Specification.UnaryFunctionLayerParams.Operation', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SQRT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RSQRT', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INVERSE', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='POWER', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EXP', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LOG', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ABS', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='THRESHOLD', index=7, number=7, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=19553, + serialized_end=19651, +) +_sym_db.RegisterEnumDescriptor(_UNARYFUNCTIONLAYERPARAMS_OPERATION) + +_UPSAMPLELAYERPARAMS_INTERPOLATIONMODE = _descriptor.EnumDescriptor( + name='InterpolationMode', + full_name='CoreML.Specification.UpsampleLayerParams.InterpolationMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='NN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BILINEAR', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=19898, + serialized_end=19939, +) +_sym_db.RegisterEnumDescriptor(_UPSAMPLELAYERPARAMS_INTERPOLATIONMODE) + +_UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE = _descriptor.EnumDescriptor( + name='LinearUpsampleMode', + full_name='CoreML.Specification.UpsampleLayerParams.LinearUpsampleMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='DEFAULT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALIGN_CORNERS_TRUE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ALIGN_CORNERS_FALSE', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=19941, + serialized_end=20023, +) +_sym_db.RegisterEnumDescriptor(_UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE) + +_FLATTENLAYERPARAMS_FLATTENORDER = _descriptor.EnumDescriptor( + name='FlattenOrder', + full_name='CoreML.Specification.FlattenLayerParams.FlattenOrder', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHANNEL_FIRST', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CHANNEL_LAST', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=20828, + serialized_end=20879, +) +_sym_db.RegisterEnumDescriptor(_FLATTENLAYERPARAMS_FLATTENORDER) + +_RESHAPELAYERPARAMS_RESHAPEORDER = _descriptor.EnumDescriptor( + name='ReshapeOrder', + full_name='CoreML.Specification.ReshapeLayerParams.ReshapeOrder', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHANNEL_FIRST', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CHANNEL_LAST', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=20994, + serialized_end=21045, +) +_sym_db.RegisterEnumDescriptor(_RESHAPELAYERPARAMS_RESHAPEORDER) + +_REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE = _descriptor.EnumDescriptor( + name='ReorganizationType', + full_name='CoreML.Specification.ReorganizeDataLayerParams.ReorganizationType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SPACE_TO_DEPTH', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DEPTH_TO_SPACE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PIXEL_SHUFFLE', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21214, + serialized_end=21293, +) +_sym_db.RegisterEnumDescriptor(_REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE) + +_SLICELAYERPARAMS_SLICEAXIS = _descriptor.EnumDescriptor( + name='SliceAxis', + full_name='CoreML.Specification.SliceLayerParams.SliceAxis', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHANNEL_AXIS', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HEIGHT_AXIS', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='WIDTH_AXIS', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21434, + serialized_end=21496, +) +_sym_db.RegisterEnumDescriptor(_SLICELAYERPARAMS_SLICEAXIS) + +_REDUCELAYERPARAMS_REDUCEOPERATION = _descriptor.EnumDescriptor( + name='ReduceOperation', + full_name='CoreML.Specification.ReduceLayerParams.ReduceOperation', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SUM', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AVG', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PROD', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LOGSUM', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SUMSQUARE', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L1', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L2', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MAX', index=7, number=7, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MIN', index=8, number=8, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ARGMAX', index=9, number=9, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21674, + serialized_end=21792, +) +_sym_db.RegisterEnumDescriptor(_REDUCELAYERPARAMS_REDUCEOPERATION) + +_REDUCELAYERPARAMS_REDUCEAXIS = _descriptor.EnumDescriptor( + name='ReduceAxis', + full_name='CoreML.Specification.ReduceLayerParams.ReduceAxis', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='CHW', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HW', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='C', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='H', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='W', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=21794, + serialized_end=21844, +) +_sym_db.RegisterEnumDescriptor(_REDUCELAYERPARAMS_REDUCEAXIS) + +_GELULAYERPARAMS_GELUMODE = _descriptor.EnumDescriptor( + name='GeluMode', + full_name='CoreML.Specification.GeluLayerParams.GeluMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='EXACT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TANH_APPROXIMATION', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SIGMOID_APPROXIMATION', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=30510, + serialized_end=30582, +) +_sym_db.RegisterEnumDescriptor(_GELULAYERPARAMS_GELUMODE) + + +_NEURALNETWORK = _descriptor.Descriptor( + name='NeuralNetwork', + full_name='CoreML.Specification.NeuralNetwork', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='layers', full_name='CoreML.Specification.NeuralNetwork.layers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='preprocessing', full_name='CoreML.Specification.NeuralNetwork.preprocessing', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayInputShapeMapping', full_name='CoreML.Specification.NeuralNetwork.arrayInputShapeMapping', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageInputShapeMapping', full_name='CoreML.Specification.NeuralNetwork.imageInputShapeMapping', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateParams', full_name='CoreML.Specification.NeuralNetwork.updateParams', index=4, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=86, + serialized_end=478, +) + + +_NEURALNETWORKIMAGESCALER = _descriptor.Descriptor( + name='NeuralNetworkImageScaler', + full_name='CoreML.Specification.NeuralNetworkImageScaler', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='channelScale', full_name='CoreML.Specification.NeuralNetworkImageScaler.channelScale', index=0, + number=10, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blueBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.blueBias', index=1, + number=20, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='greenBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.greenBias', index=2, + number=21, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='redBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.redBias', index=3, + number=22, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='grayBias', full_name='CoreML.Specification.NeuralNetworkImageScaler.grayBias', index=4, + number=30, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=480, + serialized_end=600, +) + + +_NEURALNETWORKMEANIMAGE = _descriptor.Descriptor( + name='NeuralNetworkMeanImage', + full_name='CoreML.Specification.NeuralNetworkMeanImage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='meanImage', full_name='CoreML.Specification.NeuralNetworkMeanImage.meanImage', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=602, + serialized_end=645, +) + + +_NEURALNETWORKPREPROCESSING = _descriptor.Descriptor( + name='NeuralNetworkPreprocessing', + full_name='CoreML.Specification.NeuralNetworkPreprocessing', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='featureName', full_name='CoreML.Specification.NeuralNetworkPreprocessing.featureName', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaler', full_name='CoreML.Specification.NeuralNetworkPreprocessing.scaler', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='meanImage', full_name='CoreML.Specification.NeuralNetworkPreprocessing.meanImage', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='preprocessor', full_name='CoreML.Specification.NeuralNetworkPreprocessing.preprocessor', + index=0, containing_type=None, fields=[]), + ], + serialized_start=648, + serialized_end=846, +) + + +_ACTIVATIONRELU = _descriptor.Descriptor( + name='ActivationReLU', + full_name='CoreML.Specification.ActivationReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=848, + serialized_end=864, +) + + +_ACTIVATIONLEAKYRELU = _descriptor.Descriptor( + name='ActivationLeakyReLU', + full_name='CoreML.Specification.ActivationLeakyReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationLeakyReLU.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=866, + serialized_end=902, +) + + +_ACTIVATIONTANH = _descriptor.Descriptor( + name='ActivationTanh', + full_name='CoreML.Specification.ActivationTanh', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=904, + serialized_end=920, +) + + +_ACTIVATIONSCALEDTANH = _descriptor.Descriptor( + name='ActivationScaledTanh', + full_name='CoreML.Specification.ActivationScaledTanh', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationScaledTanh.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationScaledTanh.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=922, + serialized_end=973, +) + + +_ACTIVATIONSIGMOID = _descriptor.Descriptor( + name='ActivationSigmoid', + full_name='CoreML.Specification.ActivationSigmoid', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=975, + serialized_end=994, +) + + +_ACTIVATIONLINEAR = _descriptor.Descriptor( + name='ActivationLinear', + full_name='CoreML.Specification.ActivationLinear', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationLinear.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationLinear.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=996, + serialized_end=1043, +) + + +_ACTIVATIONSIGMOIDHARD = _descriptor.Descriptor( + name='ActivationSigmoidHard', + full_name='CoreML.Specification.ActivationSigmoidHard', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationSigmoidHard.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationSigmoidHard.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1045, + serialized_end=1097, +) + + +_ACTIVATIONPRELU = _descriptor.Descriptor( + name='ActivationPReLU', + full_name='CoreML.Specification.ActivationPReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationPReLU.alpha', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1099, + serialized_end=1167, +) + + +_ACTIVATIONELU = _descriptor.Descriptor( + name='ActivationELU', + full_name='CoreML.Specification.ActivationELU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationELU.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1169, + serialized_end=1199, +) + + +_ACTIVATIONTHRESHOLDEDRELU = _descriptor.Descriptor( + name='ActivationThresholdedReLU', + full_name='CoreML.Specification.ActivationThresholdedReLU', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationThresholdedReLU.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1201, + serialized_end=1243, +) + + +_ACTIVATIONSOFTSIGN = _descriptor.Descriptor( + name='ActivationSoftsign', + full_name='CoreML.Specification.ActivationSoftsign', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1245, + serialized_end=1265, +) + + +_ACTIVATIONSOFTPLUS = _descriptor.Descriptor( + name='ActivationSoftplus', + full_name='CoreML.Specification.ActivationSoftplus', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1267, + serialized_end=1287, +) + + +_ACTIVATIONPARAMETRICSOFTPLUS = _descriptor.Descriptor( + name='ActivationParametricSoftplus', + full_name='CoreML.Specification.ActivationParametricSoftplus', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ActivationParametricSoftplus.alpha', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ActivationParametricSoftplus.beta', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1290, + serialized_end=1421, +) + + +_ACTIVATIONPARAMS = _descriptor.Descriptor( + name='ActivationParams', + full_name='CoreML.Specification.ActivationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linear', full_name='CoreML.Specification.ActivationParams.linear', index=0, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ReLU', full_name='CoreML.Specification.ActivationParams.ReLU', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='leakyReLU', full_name='CoreML.Specification.ActivationParams.leakyReLU', index=2, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='thresholdedReLU', full_name='CoreML.Specification.ActivationParams.thresholdedReLU', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='PReLU', full_name='CoreML.Specification.ActivationParams.PReLU', index=4, + number=25, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tanh', full_name='CoreML.Specification.ActivationParams.tanh', index=5, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaledTanh', full_name='CoreML.Specification.ActivationParams.scaledTanh', index=6, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sigmoid', full_name='CoreML.Specification.ActivationParams.sigmoid', index=7, + number=40, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sigmoidHard', full_name='CoreML.Specification.ActivationParams.sigmoidHard', index=8, + number=41, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ELU', full_name='CoreML.Specification.ActivationParams.ELU', index=9, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softsign', full_name='CoreML.Specification.ActivationParams.softsign', index=10, + number=60, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softplus', full_name='CoreML.Specification.ActivationParams.softplus', index=11, + number=70, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parametricSoftplus', full_name='CoreML.Specification.ActivationParams.parametricSoftplus', index=12, + number=71, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='NonlinearityType', full_name='CoreML.Specification.ActivationParams.NonlinearityType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1424, + serialized_end=2276, +) + + +_TENSOR = _descriptor.Descriptor( + name='Tensor', + full_name='CoreML.Specification.Tensor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rank', full_name='CoreML.Specification.Tensor.rank', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dimValue', full_name='CoreML.Specification.Tensor.dimValue', index=1, + number=2, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2278, + serialized_end=2318, +) + + +_NEURALNETWORKLAYER = _descriptor.Descriptor( + name='NeuralNetworkLayer', + full_name='CoreML.Specification.NeuralNetworkLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.NeuralNetworkLayer.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.NeuralNetworkLayer.input', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='output', full_name='CoreML.Specification.NeuralNetworkLayer.output', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputTensor', full_name='CoreML.Specification.NeuralNetworkLayer.inputTensor', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputTensor', full_name='CoreML.Specification.NeuralNetworkLayer.outputTensor', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isUpdatable', full_name='CoreML.Specification.NeuralNetworkLayer.isUpdatable', index=5, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='convolution', full_name='CoreML.Specification.NeuralNetworkLayer.convolution', index=6, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pooling', full_name='CoreML.Specification.NeuralNetworkLayer.pooling', index=7, + number=120, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activation', full_name='CoreML.Specification.NeuralNetworkLayer.activation', index=8, + number=130, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='innerProduct', full_name='CoreML.Specification.NeuralNetworkLayer.innerProduct', index=9, + number=140, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='embedding', full_name='CoreML.Specification.NeuralNetworkLayer.embedding', index=10, + number=150, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='batchnorm', full_name='CoreML.Specification.NeuralNetworkLayer.batchnorm', index=11, + number=160, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mvn', full_name='CoreML.Specification.NeuralNetworkLayer.mvn', index=12, + number=165, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='l2normalize', full_name='CoreML.Specification.NeuralNetworkLayer.l2normalize', index=13, + number=170, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softmax', full_name='CoreML.Specification.NeuralNetworkLayer.softmax', index=14, + number=175, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lrn', full_name='CoreML.Specification.NeuralNetworkLayer.lrn', index=15, + number=180, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='crop', full_name='CoreML.Specification.NeuralNetworkLayer.crop', index=16, + number=190, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='padding', full_name='CoreML.Specification.NeuralNetworkLayer.padding', index=17, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upsample', full_name='CoreML.Specification.NeuralNetworkLayer.upsample', index=18, + number=210, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resizeBilinear', full_name='CoreML.Specification.NeuralNetworkLayer.resizeBilinear', index=19, + number=211, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cropResize', full_name='CoreML.Specification.NeuralNetworkLayer.cropResize', index=20, + number=212, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unary', full_name='CoreML.Specification.NeuralNetworkLayer.unary', index=21, + number=220, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='add', full_name='CoreML.Specification.NeuralNetworkLayer.add', index=22, + number=230, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='multiply', full_name='CoreML.Specification.NeuralNetworkLayer.multiply', index=23, + number=231, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='average', full_name='CoreML.Specification.NeuralNetworkLayer.average', index=24, + number=240, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.NeuralNetworkLayer.scale', index=25, + number=245, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.NeuralNetworkLayer.bias', index=26, + number=250, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max', full_name='CoreML.Specification.NeuralNetworkLayer.max', index=27, + number=260, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min', full_name='CoreML.Specification.NeuralNetworkLayer.min', index=28, + number=261, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dot', full_name='CoreML.Specification.NeuralNetworkLayer.dot', index=29, + number=270, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduce', full_name='CoreML.Specification.NeuralNetworkLayer.reduce', index=30, + number=280, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loadConstant', full_name='CoreML.Specification.NeuralNetworkLayer.loadConstant', index=31, + number=290, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshape', full_name='CoreML.Specification.NeuralNetworkLayer.reshape', index=32, + number=300, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='flatten', full_name='CoreML.Specification.NeuralNetworkLayer.flatten', index=33, + number=301, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='permute', full_name='CoreML.Specification.NeuralNetworkLayer.permute', index=34, + number=310, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='concat', full_name='CoreML.Specification.NeuralNetworkLayer.concat', index=35, + number=320, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='split', full_name='CoreML.Specification.NeuralNetworkLayer.split', index=36, + number=330, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceRepeat', full_name='CoreML.Specification.NeuralNetworkLayer.sequenceRepeat', index=37, + number=340, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reorganizeData', full_name='CoreML.Specification.NeuralNetworkLayer.reorganizeData', index=38, + number=345, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='slice', full_name='CoreML.Specification.NeuralNetworkLayer.slice', index=39, + number=350, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='simpleRecurrent', full_name='CoreML.Specification.NeuralNetworkLayer.simpleRecurrent', index=40, + number=400, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gru', full_name='CoreML.Specification.NeuralNetworkLayer.gru', index=41, + number=410, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='uniDirectionalLSTM', full_name='CoreML.Specification.NeuralNetworkLayer.uniDirectionalLSTM', index=42, + number=420, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='biDirectionalLSTM', full_name='CoreML.Specification.NeuralNetworkLayer.biDirectionalLSTM', index=43, + number=430, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='custom', full_name='CoreML.Specification.NeuralNetworkLayer.custom', index=44, + number=500, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='copy', full_name='CoreML.Specification.NeuralNetworkLayer.copy', index=45, + number=600, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='branch', full_name='CoreML.Specification.NeuralNetworkLayer.branch', index=46, + number=605, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loop', full_name='CoreML.Specification.NeuralNetworkLayer.loop', index=47, + number=615, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loopBreak', full_name='CoreML.Specification.NeuralNetworkLayer.loopBreak', index=48, + number=620, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loopContinue', full_name='CoreML.Specification.NeuralNetworkLayer.loopContinue', index=49, + number=625, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rangeStatic', full_name='CoreML.Specification.NeuralNetworkLayer.rangeStatic', index=50, + number=635, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rangeDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.rangeDynamic', index=51, + number=640, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='clip', full_name='CoreML.Specification.NeuralNetworkLayer.clip', index=52, + number=660, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='ceil', full_name='CoreML.Specification.NeuralNetworkLayer.ceil', index=53, + number=665, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floor', full_name='CoreML.Specification.NeuralNetworkLayer.floor', index=54, + number=670, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sign', full_name='CoreML.Specification.NeuralNetworkLayer.sign', index=55, + number=680, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='round', full_name='CoreML.Specification.NeuralNetworkLayer.round', index=56, + number=685, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='exp2', full_name='CoreML.Specification.NeuralNetworkLayer.exp2', index=57, + number=700, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sin', full_name='CoreML.Specification.NeuralNetworkLayer.sin', index=58, + number=710, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cos', full_name='CoreML.Specification.NeuralNetworkLayer.cos', index=59, + number=715, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tan', full_name='CoreML.Specification.NeuralNetworkLayer.tan', index=60, + number=720, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='asin', full_name='CoreML.Specification.NeuralNetworkLayer.asin', index=61, + number=730, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='acos', full_name='CoreML.Specification.NeuralNetworkLayer.acos', index=62, + number=735, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='atan', full_name='CoreML.Specification.NeuralNetworkLayer.atan', index=63, + number=740, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sinh', full_name='CoreML.Specification.NeuralNetworkLayer.sinh', index=64, + number=750, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cosh', full_name='CoreML.Specification.NeuralNetworkLayer.cosh', index=65, + number=755, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tanh', full_name='CoreML.Specification.NeuralNetworkLayer.tanh', index=66, + number=760, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='asinh', full_name='CoreML.Specification.NeuralNetworkLayer.asinh', index=67, + number=770, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='acosh', full_name='CoreML.Specification.NeuralNetworkLayer.acosh', index=68, + number=775, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='atanh', full_name='CoreML.Specification.NeuralNetworkLayer.atanh', index=69, + number=780, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='erf', full_name='CoreML.Specification.NeuralNetworkLayer.erf', index=70, + number=790, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gelu', full_name='CoreML.Specification.NeuralNetworkLayer.gelu', index=71, + number=795, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='equal', full_name='CoreML.Specification.NeuralNetworkLayer.equal', index=72, + number=815, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='notEqual', full_name='CoreML.Specification.NeuralNetworkLayer.notEqual', index=73, + number=820, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lessThan', full_name='CoreML.Specification.NeuralNetworkLayer.lessThan', index=74, + number=825, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lessEqual', full_name='CoreML.Specification.NeuralNetworkLayer.lessEqual', index=75, + number=827, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='greaterThan', full_name='CoreML.Specification.NeuralNetworkLayer.greaterThan', index=76, + number=830, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='greaterEqual', full_name='CoreML.Specification.NeuralNetworkLayer.greaterEqual', index=77, + number=832, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalOr', full_name='CoreML.Specification.NeuralNetworkLayer.logicalOr', index=78, + number=840, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalXor', full_name='CoreML.Specification.NeuralNetworkLayer.logicalXor', index=79, + number=845, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalNot', full_name='CoreML.Specification.NeuralNetworkLayer.logicalNot', index=80, + number=850, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='logicalAnd', full_name='CoreML.Specification.NeuralNetworkLayer.logicalAnd', index=81, + number=855, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.modBroadcastable', index=82, + number=865, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.minBroadcastable', index=83, + number=870, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.maxBroadcastable', index=84, + number=875, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='addBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.addBroadcastable', index=85, + number=880, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='powBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.powBroadcastable', index=86, + number=885, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='divideBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.divideBroadcastable', index=87, + number=890, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='floorDivBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.floorDivBroadcastable', index=88, + number=895, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='multiplyBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.multiplyBroadcastable', index=89, + number=900, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='subtractBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.subtractBroadcastable', index=90, + number=905, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tile', full_name='CoreML.Specification.NeuralNetworkLayer.tile', index=91, + number=920, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stack', full_name='CoreML.Specification.NeuralNetworkLayer.stack', index=92, + number=925, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gather', full_name='CoreML.Specification.NeuralNetworkLayer.gather', index=93, + number=930, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scatter', full_name='CoreML.Specification.NeuralNetworkLayer.scatter', index=94, + number=935, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gatherND', full_name='CoreML.Specification.NeuralNetworkLayer.gatherND', index=95, + number=940, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scatterND', full_name='CoreML.Specification.NeuralNetworkLayer.scatterND', index=96, + number=945, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='softmaxND', full_name='CoreML.Specification.NeuralNetworkLayer.softmaxND', index=97, + number=950, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gatherAlongAxis', full_name='CoreML.Specification.NeuralNetworkLayer.gatherAlongAxis', index=98, + number=952, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scatterAlongAxis', full_name='CoreML.Specification.NeuralNetworkLayer.scatterAlongAxis', index=99, + number=954, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverse', full_name='CoreML.Specification.NeuralNetworkLayer.reverse', index=100, + number=960, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseSeq', full_name='CoreML.Specification.NeuralNetworkLayer.reverseSeq', index=101, + number=965, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='splitND', full_name='CoreML.Specification.NeuralNetworkLayer.splitND', index=102, + number=975, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='concatND', full_name='CoreML.Specification.NeuralNetworkLayer.concatND', index=103, + number=980, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='transpose', full_name='CoreML.Specification.NeuralNetworkLayer.transpose', index=104, + number=985, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sliceStatic', full_name='CoreML.Specification.NeuralNetworkLayer.sliceStatic', index=105, + number=995, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sliceDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.sliceDynamic', index=106, + number=1000, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='slidingWindows', full_name='CoreML.Specification.NeuralNetworkLayer.slidingWindows', index=107, + number=1005, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='topK', full_name='CoreML.Specification.NeuralNetworkLayer.topK', index=108, + number=1015, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='argMin', full_name='CoreML.Specification.NeuralNetworkLayer.argMin', index=109, + number=1020, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='argMax', full_name='CoreML.Specification.NeuralNetworkLayer.argMax', index=110, + number=1025, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='embeddingND', full_name='CoreML.Specification.NeuralNetworkLayer.embeddingND', index=111, + number=1040, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='batchedMatmul', full_name='CoreML.Specification.NeuralNetworkLayer.batchedMatmul', index=112, + number=1045, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='getShape', full_name='CoreML.Specification.NeuralNetworkLayer.getShape', index=113, + number=1065, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='loadConstantND', full_name='CoreML.Specification.NeuralNetworkLayer.loadConstantND', index=114, + number=1070, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fillLike', full_name='CoreML.Specification.NeuralNetworkLayer.fillLike', index=115, + number=1080, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fillStatic', full_name='CoreML.Specification.NeuralNetworkLayer.fillStatic', index=116, + number=1085, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fillDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.fillDynamic', index=117, + number=1090, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='broadcastToLike', full_name='CoreML.Specification.NeuralNetworkLayer.broadcastToLike', index=118, + number=1100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='broadcastToStatic', full_name='CoreML.Specification.NeuralNetworkLayer.broadcastToStatic', index=119, + number=1105, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='broadcastToDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.broadcastToDynamic', index=120, + number=1110, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeeze', full_name='CoreML.Specification.NeuralNetworkLayer.squeeze', index=121, + number=1120, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='expandDims', full_name='CoreML.Specification.NeuralNetworkLayer.expandDims', index=122, + number=1125, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='flattenTo2D', full_name='CoreML.Specification.NeuralNetworkLayer.flattenTo2D', index=123, + number=1130, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshapeLike', full_name='CoreML.Specification.NeuralNetworkLayer.reshapeLike', index=124, + number=1135, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshapeStatic', full_name='CoreML.Specification.NeuralNetworkLayer.reshapeStatic', index=125, + number=1140, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reshapeDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.reshapeDynamic', index=126, + number=1145, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rankPreservingReshape', full_name='CoreML.Specification.NeuralNetworkLayer.rankPreservingReshape', index=127, + number=1150, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='constantPad', full_name='CoreML.Specification.NeuralNetworkLayer.constantPad', index=128, + number=1155, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomNormalLike', full_name='CoreML.Specification.NeuralNetworkLayer.randomNormalLike', index=129, + number=1170, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomNormalStatic', full_name='CoreML.Specification.NeuralNetworkLayer.randomNormalStatic', index=130, + number=1175, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomNormalDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.randomNormalDynamic', index=131, + number=1180, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomUniformLike', full_name='CoreML.Specification.NeuralNetworkLayer.randomUniformLike', index=132, + number=1190, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomUniformStatic', full_name='CoreML.Specification.NeuralNetworkLayer.randomUniformStatic', index=133, + number=1195, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomUniformDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.randomUniformDynamic', index=134, + number=1200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomBernoulliLike', full_name='CoreML.Specification.NeuralNetworkLayer.randomBernoulliLike', index=135, + number=1210, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomBernoulliStatic', full_name='CoreML.Specification.NeuralNetworkLayer.randomBernoulliStatic', index=136, + number=1215, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='randomBernoulliDynamic', full_name='CoreML.Specification.NeuralNetworkLayer.randomBernoulliDynamic', index=137, + number=1220, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='categoricalDistribution', full_name='CoreML.Specification.NeuralNetworkLayer.categoricalDistribution', index=138, + number=1230, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceL1', full_name='CoreML.Specification.NeuralNetworkLayer.reduceL1', index=139, + number=1250, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceL2', full_name='CoreML.Specification.NeuralNetworkLayer.reduceL2', index=140, + number=1255, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceMax', full_name='CoreML.Specification.NeuralNetworkLayer.reduceMax', index=141, + number=1260, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceMin', full_name='CoreML.Specification.NeuralNetworkLayer.reduceMin', index=142, + number=1265, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceSum', full_name='CoreML.Specification.NeuralNetworkLayer.reduceSum', index=143, + number=1270, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceProd', full_name='CoreML.Specification.NeuralNetworkLayer.reduceProd', index=144, + number=1275, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceMean', full_name='CoreML.Specification.NeuralNetworkLayer.reduceMean', index=145, + number=1280, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceLogSum', full_name='CoreML.Specification.NeuralNetworkLayer.reduceLogSum', index=146, + number=1285, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceSumSquare', full_name='CoreML.Specification.NeuralNetworkLayer.reduceSumSquare', index=147, + number=1290, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceLogSumExp', full_name='CoreML.Specification.NeuralNetworkLayer.reduceLogSumExp', index=148, + number=1295, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='whereNonZero', full_name='CoreML.Specification.NeuralNetworkLayer.whereNonZero', index=149, + number=1313, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='matrixBandPart', full_name='CoreML.Specification.NeuralNetworkLayer.matrixBandPart', index=150, + number=1315, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lowerTriangular', full_name='CoreML.Specification.NeuralNetworkLayer.lowerTriangular', index=151, + number=1320, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upperTriangular', full_name='CoreML.Specification.NeuralNetworkLayer.upperTriangular', index=152, + number=1325, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='whereBroadcastable', full_name='CoreML.Specification.NeuralNetworkLayer.whereBroadcastable', index=153, + number=1330, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='layerNormalization', full_name='CoreML.Specification.NeuralNetworkLayer.layerNormalization', index=154, + number=1350, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='NonMaximumSuppression', full_name='CoreML.Specification.NeuralNetworkLayer.NonMaximumSuppression', index=155, + number=1400, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='oneHot', full_name='CoreML.Specification.NeuralNetworkLayer.oneHot', index=156, + number=1450, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cumSum', full_name='CoreML.Specification.NeuralNetworkLayer.cumSum', index=157, + number=1455, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='clampedReLU', full_name='CoreML.Specification.NeuralNetworkLayer.clampedReLU', index=158, + number=1460, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='argSort', full_name='CoreML.Specification.NeuralNetworkLayer.argSort', index=159, + number=1461, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pooling3d', full_name='CoreML.Specification.NeuralNetworkLayer.pooling3d', index=160, + number=1465, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='globalPooling3d', full_name='CoreML.Specification.NeuralNetworkLayer.globalPooling3d', index=161, + number=1466, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sliceBySize', full_name='CoreML.Specification.NeuralNetworkLayer.sliceBySize', index=162, + number=1470, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='convolution3d', full_name='CoreML.Specification.NeuralNetworkLayer.convolution3d', index=163, + number=1471, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='layer', full_name='CoreML.Specification.NeuralNetworkLayer.layer', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2321, + serialized_end=13307, +) + + +_BRANCHLAYERPARAMS = _descriptor.Descriptor( + name='BranchLayerParams', + full_name='CoreML.Specification.BranchLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='ifBranch', full_name='CoreML.Specification.BranchLayerParams.ifBranch', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='elseBranch', full_name='CoreML.Specification.BranchLayerParams.elseBranch', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13310, + serialized_end=13441, +) + + +_LOOPLAYERPARAMS = _descriptor.Descriptor( + name='LoopLayerParams', + full_name='CoreML.Specification.LoopLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='maxLoopIterations', full_name='CoreML.Specification.LoopLayerParams.maxLoopIterations', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='conditionVar', full_name='CoreML.Specification.LoopLayerParams.conditionVar', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='conditionNetwork', full_name='CoreML.Specification.LoopLayerParams.conditionNetwork', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bodyNetwork', full_name='CoreML.Specification.LoopLayerParams.bodyNetwork', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13444, + serialized_end=13631, +) + + +_LOOPBREAKLAYERPARAMS = _descriptor.Descriptor( + name='LoopBreakLayerParams', + full_name='CoreML.Specification.LoopBreakLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13633, + serialized_end=13655, +) + + +_LOOPCONTINUELAYERPARAMS = _descriptor.Descriptor( + name='LoopContinueLayerParams', + full_name='CoreML.Specification.LoopContinueLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13657, + serialized_end=13682, +) + + +_COPYLAYERPARAMS = _descriptor.Descriptor( + name='CopyLayerParams', + full_name='CoreML.Specification.CopyLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13684, + serialized_end=13701, +) + + +_GREATERTHANLAYERPARAMS = _descriptor.Descriptor( + name='GreaterThanLayerParams', + full_name='CoreML.Specification.GreaterThanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.GreaterThanLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13703, + serialized_end=13742, +) + + +_GREATEREQUALLAYERPARAMS = _descriptor.Descriptor( + name='GreaterEqualLayerParams', + full_name='CoreML.Specification.GreaterEqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.GreaterEqualLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13744, + serialized_end=13784, +) + + +_LESSTHANLAYERPARAMS = _descriptor.Descriptor( + name='LessThanLayerParams', + full_name='CoreML.Specification.LessThanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.LessThanLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13786, + serialized_end=13822, +) + + +_LESSEQUALLAYERPARAMS = _descriptor.Descriptor( + name='LessEqualLayerParams', + full_name='CoreML.Specification.LessEqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.LessEqualLayerParams.alpha', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13824, + serialized_end=13861, +) + + +_EQUALLAYERPARAMS = _descriptor.Descriptor( + name='EqualLayerParams', + full_name='CoreML.Specification.EqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.EqualLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13863, + serialized_end=13896, +) + + +_NOTEQUALLAYERPARAMS = _descriptor.Descriptor( + name='NotEqualLayerParams', + full_name='CoreML.Specification.NotEqualLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.NotEqualLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13898, + serialized_end=13934, +) + + +_LOGICALANDLAYERPARAMS = _descriptor.Descriptor( + name='LogicalAndLayerParams', + full_name='CoreML.Specification.LogicalAndLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13936, + serialized_end=13959, +) + + +_LOGICALORLAYERPARAMS = _descriptor.Descriptor( + name='LogicalOrLayerParams', + full_name='CoreML.Specification.LogicalOrLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13961, + serialized_end=13983, +) + + +_LOGICALXORLAYERPARAMS = _descriptor.Descriptor( + name='LogicalXorLayerParams', + full_name='CoreML.Specification.LogicalXorLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=13985, + serialized_end=14008, +) + + +_LOGICALNOTLAYERPARAMS = _descriptor.Descriptor( + name='LogicalNotLayerParams', + full_name='CoreML.Specification.LogicalNotLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14010, + serialized_end=14033, +) + + +_BORDERAMOUNTS_EDGESIZES = _descriptor.Descriptor( + name='EdgeSizes', + full_name='CoreML.Specification.BorderAmounts.EdgeSizes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='startEdgeSize', full_name='CoreML.Specification.BorderAmounts.EdgeSizes.startEdgeSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endEdgeSize', full_name='CoreML.Specification.BorderAmounts.EdgeSizes.endEdgeSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14123, + serialized_end=14178, +) + +_BORDERAMOUNTS = _descriptor.Descriptor( + name='BorderAmounts', + full_name='CoreML.Specification.BorderAmounts', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='borderAmounts', full_name='CoreML.Specification.BorderAmounts.borderAmounts', index=0, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_BORDERAMOUNTS_EDGESIZES, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14036, + serialized_end=14178, +) + + +_VALIDPADDING = _descriptor.Descriptor( + name='ValidPadding', + full_name='CoreML.Specification.ValidPadding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='paddingAmounts', full_name='CoreML.Specification.ValidPadding.paddingAmounts', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14180, + serialized_end=14255, +) + + +_SAMEPADDING = _descriptor.Descriptor( + name='SamePadding', + full_name='CoreML.Specification.SamePadding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='asymmetryMode', full_name='CoreML.Specification.SamePadding.asymmetryMode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SAMEPADDING_SAMEPADDINGMODE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14258, + serialized_end=14408, +) + + +_SAMPLINGMODE = _descriptor.Descriptor( + name='SamplingMode', + full_name='CoreML.Specification.SamplingMode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='samplingMethod', full_name='CoreML.Specification.SamplingMode.samplingMethod', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SAMPLINGMODE_METHOD, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14411, + serialized_end=14600, +) + + +_BOXCOORDINATESMODE = _descriptor.Descriptor( + name='BoxCoordinatesMode', + full_name='CoreML.Specification.BoxCoordinatesMode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='boxMode', full_name='CoreML.Specification.BoxCoordinatesMode.boxMode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _BOXCOORDINATESMODE_COORDINATES, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14603, + serialized_end=14819, +) + + +_WEIGHTPARAMS = _descriptor.Descriptor( + name='WeightParams', + full_name='CoreML.Specification.WeightParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='floatValue', full_name='CoreML.Specification.WeightParams.floatValue', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='float16Value', full_name='CoreML.Specification.WeightParams.float16Value', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rawValue', full_name='CoreML.Specification.WeightParams.rawValue', index=2, + number=30, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int8RawValue', full_name='CoreML.Specification.WeightParams.int8RawValue', index=3, + number=31, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='quantization', full_name='CoreML.Specification.WeightParams.quantization', index=4, + number=40, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isUpdatable', full_name='CoreML.Specification.WeightParams.isUpdatable', index=5, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=14822, + serialized_end=15003, +) + + +_QUANTIZATIONPARAMS = _descriptor.Descriptor( + name='QuantizationParams', + full_name='CoreML.Specification.QuantizationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numberOfBits', full_name='CoreML.Specification.QuantizationParams.numberOfBits', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linearQuantization', full_name='CoreML.Specification.QuantizationParams.linearQuantization', index=1, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lookupTableQuantization', full_name='CoreML.Specification.QuantizationParams.lookupTableQuantization', index=2, + number=102, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='QuantizationType', full_name='CoreML.Specification.QuantizationParams.QuantizationType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=15006, + serialized_end=15234, +) + + +_LINEARQUANTIZATIONPARAMS = _descriptor.Descriptor( + name='LinearQuantizationParams', + full_name='CoreML.Specification.LinearQuantizationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.LinearQuantizationParams.scale', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.LinearQuantizationParams.bias', index=1, + number=2, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=15236, + serialized_end=15291, +) + + +_LOOKUPTABLEQUANTIZATIONPARAMS = _descriptor.Descriptor( + name='LookUpTableQuantizationParams', + full_name='CoreML.Specification.LookUpTableQuantizationParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='floatValue', full_name='CoreML.Specification.LookUpTableQuantizationParams.floatValue', index=0, + number=1, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=15293, + serialized_end=15344, +) + + +_CONVOLUTIONLAYERPARAMS = _descriptor.Descriptor( + name='ConvolutionLayerParams', + full_name='CoreML.Specification.ConvolutionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.ConvolutionLayerParams.outputChannels', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelChannels', full_name='CoreML.Specification.ConvolutionLayerParams.kernelChannels', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nGroups', full_name='CoreML.Specification.ConvolutionLayerParams.nGroups', index=2, + number=10, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelSize', full_name='CoreML.Specification.ConvolutionLayerParams.kernelSize', index=3, + number=20, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stride', full_name='CoreML.Specification.ConvolutionLayerParams.stride', index=4, + number=30, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationFactor', full_name='CoreML.Specification.ConvolutionLayerParams.dilationFactor', index=5, + number=40, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='valid', full_name='CoreML.Specification.ConvolutionLayerParams.valid', index=6, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='same', full_name='CoreML.Specification.ConvolutionLayerParams.same', index=7, + number=51, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isDeconvolution', full_name='CoreML.Specification.ConvolutionLayerParams.isDeconvolution', index=8, + number=60, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.ConvolutionLayerParams.hasBias', index=9, + number=70, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.ConvolutionLayerParams.weights', index=10, + number=90, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.ConvolutionLayerParams.bias', index=11, + number=91, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.ConvolutionLayerParams.outputShape', index=12, + number=100, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ConvolutionPaddingType', full_name='CoreML.Specification.ConvolutionLayerParams.ConvolutionPaddingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=15347, + serialized_end=15792, +) + + +_CONVOLUTION3DLAYERPARAMS = _descriptor.Descriptor( + name='Convolution3DLayerParams', + full_name='CoreML.Specification.Convolution3DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.Convolution3DLayerParams.outputChannels', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputChannels', full_name='CoreML.Specification.Convolution3DLayerParams.inputChannels', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nGroups', full_name='CoreML.Specification.Convolution3DLayerParams.nGroups', index=2, + number=10, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelDepth', full_name='CoreML.Specification.Convolution3DLayerParams.kernelDepth', index=3, + number=20, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelHeight', full_name='CoreML.Specification.Convolution3DLayerParams.kernelHeight', index=4, + number=21, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelWidth', full_name='CoreML.Specification.Convolution3DLayerParams.kernelWidth', index=5, + number=22, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideDepth', full_name='CoreML.Specification.Convolution3DLayerParams.strideDepth', index=6, + number=31, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideHeight', full_name='CoreML.Specification.Convolution3DLayerParams.strideHeight', index=7, + number=32, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideWidth', full_name='CoreML.Specification.Convolution3DLayerParams.strideWidth', index=8, + number=33, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationDepth', full_name='CoreML.Specification.Convolution3DLayerParams.dilationDepth', index=9, + number=40, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationHeight', full_name='CoreML.Specification.Convolution3DLayerParams.dilationHeight', index=10, + number=41, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dilationWidth', full_name='CoreML.Specification.Convolution3DLayerParams.dilationWidth', index=11, + number=42, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.Convolution3DLayerParams.hasBias', index=12, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.Convolution3DLayerParams.weights', index=13, + number=60, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.Convolution3DLayerParams.bias', index=14, + number=61, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='paddingType', full_name='CoreML.Specification.Convolution3DLayerParams.paddingType', index=15, + number=70, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingFront', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingFront', index=16, + number=80, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBack', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingBack', index=17, + number=81, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingTop', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingTop', index=18, + number=82, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBottom', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingBottom', index=19, + number=83, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingLeft', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingLeft', index=20, + number=84, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingRight', full_name='CoreML.Specification.Convolution3DLayerParams.customPaddingRight', index=21, + number=85, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isDeconvolution', full_name='CoreML.Specification.Convolution3DLayerParams.isDeconvolution', index=22, + number=86, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.Convolution3DLayerParams.outputShape', index=23, + number=87, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CONVOLUTION3DLAYERPARAMS_PADDINGTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=15795, + serialized_end=16543, +) + + +_INNERPRODUCTLAYERPARAMS = _descriptor.Descriptor( + name='InnerProductLayerParams', + full_name='CoreML.Specification.InnerProductLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputChannels', full_name='CoreML.Specification.InnerProductLayerParams.inputChannels', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.InnerProductLayerParams.outputChannels', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.InnerProductLayerParams.hasBias', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.InnerProductLayerParams.weights', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.InnerProductLayerParams.bias', index=4, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int8DynamicQuantize', full_name='CoreML.Specification.InnerProductLayerParams.int8DynamicQuantize', index=5, + number=22, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=16546, + serialized_end=16767, +) + + +_EMBEDDINGLAYERPARAMS = _descriptor.Descriptor( + name='EmbeddingLayerParams', + full_name='CoreML.Specification.EmbeddingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputDim', full_name='CoreML.Specification.EmbeddingLayerParams.inputDim', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputChannels', full_name='CoreML.Specification.EmbeddingLayerParams.outputChannels', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.EmbeddingLayerParams.hasBias', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.EmbeddingLayerParams.weights', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.EmbeddingLayerParams.bias', index=4, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=16770, + serialized_end=16954, +) + + +_EMBEDDINGNDLAYERPARAMS = _descriptor.Descriptor( + name='EmbeddingNDLayerParams', + full_name='CoreML.Specification.EmbeddingNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vocabSize', full_name='CoreML.Specification.EmbeddingNDLayerParams.vocabSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='embeddingSize', full_name='CoreML.Specification.EmbeddingNDLayerParams.embeddingSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.EmbeddingNDLayerParams.hasBias', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.EmbeddingNDLayerParams.weights', index=3, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.EmbeddingNDLayerParams.bias', index=4, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=16957, + serialized_end=17143, +) + + +_BATCHNORMLAYERPARAMS = _descriptor.Descriptor( + name='BatchnormLayerParams', + full_name='CoreML.Specification.BatchnormLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='channels', full_name='CoreML.Specification.BatchnormLayerParams.channels', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='computeMeanVar', full_name='CoreML.Specification.BatchnormLayerParams.computeMeanVar', index=1, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instanceNormalization', full_name='CoreML.Specification.BatchnormLayerParams.instanceNormalization', index=2, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.BatchnormLayerParams.epsilon', index=3, + number=10, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.BatchnormLayerParams.gamma', index=4, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.BatchnormLayerParams.beta', index=5, + number=16, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.BatchnormLayerParams.mean', index=6, + number=17, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='variance', full_name='CoreML.Specification.BatchnormLayerParams.variance', index=7, + number=18, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=17146, + serialized_end=17463, +) + + +_POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING = _descriptor.Descriptor( + name='ValidCompletePadding', + full_name='CoreML.Specification.PoolingLayerParams.ValidCompletePadding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='paddingAmounts', full_name='CoreML.Specification.PoolingLayerParams.ValidCompletePadding.paddingAmounts', index=0, + number=10, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=17841, + serialized_end=17887, +) + +_POOLINGLAYERPARAMS = _descriptor.Descriptor( + name='PoolingLayerParams', + full_name='CoreML.Specification.PoolingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.PoolingLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelSize', full_name='CoreML.Specification.PoolingLayerParams.kernelSize', index=1, + number=10, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stride', full_name='CoreML.Specification.PoolingLayerParams.stride', index=2, + number=20, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='valid', full_name='CoreML.Specification.PoolingLayerParams.valid', index=3, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='same', full_name='CoreML.Specification.PoolingLayerParams.same', index=4, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='includeLastPixel', full_name='CoreML.Specification.PoolingLayerParams.includeLastPixel', index=5, + number=32, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='avgPoolExcludePadding', full_name='CoreML.Specification.PoolingLayerParams.avgPoolExcludePadding', index=6, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='globalPooling', full_name='CoreML.Specification.PoolingLayerParams.globalPooling', index=7, + number=60, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING, ], + enum_types=[ + _POOLINGLAYERPARAMS_POOLINGTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='PoolingPaddingType', full_name='CoreML.Specification.PoolingLayerParams.PoolingPaddingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=17466, + serialized_end=17954, +) + + +_POOLING3DLAYERPARAMS = _descriptor.Descriptor( + name='Pooling3DLayerParams', + full_name='CoreML.Specification.Pooling3DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.Pooling3DLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelDepth', full_name='CoreML.Specification.Pooling3DLayerParams.kernelDepth', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelHeight', full_name='CoreML.Specification.Pooling3DLayerParams.kernelHeight', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='kernelWidth', full_name='CoreML.Specification.Pooling3DLayerParams.kernelWidth', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideDepth', full_name='CoreML.Specification.Pooling3DLayerParams.strideDepth', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideHeight', full_name='CoreML.Specification.Pooling3DLayerParams.strideHeight', index=5, + number=6, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strideWidth', full_name='CoreML.Specification.Pooling3DLayerParams.strideWidth', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='paddingType', full_name='CoreML.Specification.Pooling3DLayerParams.paddingType', index=7, + number=15, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingFront', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingFront', index=8, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBack', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingBack', index=9, + number=9, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingTop', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingTop', index=10, + number=10, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingBottom', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingBottom', index=11, + number=11, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingLeft', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingLeft', index=12, + number=12, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='customPaddingRight', full_name='CoreML.Specification.Pooling3DLayerParams.customPaddingRight', index=13, + number=13, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='countExcludePadding', full_name='CoreML.Specification.Pooling3DLayerParams.countExcludePadding', index=14, + number=14, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _POOLING3DLAYERPARAMS_POOLINGTYPE3D, + _POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=17957, + serialized_end=18555, +) + + +_GLOBALPOOLING3DLAYERPARAMS = _descriptor.Descriptor( + name='GlobalPooling3DLayerParams', + full_name='CoreML.Specification.GlobalPooling3DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.GlobalPooling3DLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=18558, + serialized_end=18715, +) + + +_PADDINGLAYERPARAMS_PADDINGCONSTANT = _descriptor.Descriptor( + name='PaddingConstant', + full_name='CoreML.Specification.PaddingLayerParams.PaddingConstant', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.PaddingLayerParams.PaddingConstant.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19045, + serialized_end=19077, +) + +_PADDINGLAYERPARAMS_PADDINGREFLECTION = _descriptor.Descriptor( + name='PaddingReflection', + full_name='CoreML.Specification.PaddingLayerParams.PaddingReflection', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19079, + serialized_end=19098, +) + +_PADDINGLAYERPARAMS_PADDINGREPLICATION = _descriptor.Descriptor( + name='PaddingReplication', + full_name='CoreML.Specification.PaddingLayerParams.PaddingReplication', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19100, + serialized_end=19120, +) + +_PADDINGLAYERPARAMS = _descriptor.Descriptor( + name='PaddingLayerParams', + full_name='CoreML.Specification.PaddingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='constant', full_name='CoreML.Specification.PaddingLayerParams.constant', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reflection', full_name='CoreML.Specification.PaddingLayerParams.reflection', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replication', full_name='CoreML.Specification.PaddingLayerParams.replication', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='paddingAmounts', full_name='CoreML.Specification.PaddingLayerParams.paddingAmounts', index=3, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_PADDINGLAYERPARAMS_PADDINGCONSTANT, _PADDINGLAYERPARAMS_PADDINGREFLECTION, _PADDINGLAYERPARAMS_PADDINGREPLICATION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='PaddingType', full_name='CoreML.Specification.PaddingLayerParams.PaddingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=18718, + serialized_end=19135, +) + + +_CONCATLAYERPARAMS = _descriptor.Descriptor( + name='ConcatLayerParams', + full_name='CoreML.Specification.ConcatLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sequenceConcat', full_name='CoreML.Specification.ConcatLayerParams.sequenceConcat', index=0, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19137, + serialized_end=19180, +) + + +_LRNLAYERPARAMS = _descriptor.Descriptor( + name='LRNLayerParams', + full_name='CoreML.Specification.LRNLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.LRNLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.LRNLayerParams.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='localSize', full_name='CoreML.Specification.LRNLayerParams.localSize', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='k', full_name='CoreML.Specification.LRNLayerParams.k', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19182, + serialized_end=19257, +) + + +_SOFTMAXLAYERPARAMS = _descriptor.Descriptor( + name='SoftmaxLayerParams', + full_name='CoreML.Specification.SoftmaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19259, + serialized_end=19279, +) + + +_SPLITLAYERPARAMS = _descriptor.Descriptor( + name='SplitLayerParams', + full_name='CoreML.Specification.SplitLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nOutputs', full_name='CoreML.Specification.SplitLayerParams.nOutputs', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19281, + serialized_end=19317, +) + + +_ADDLAYERPARAMS = _descriptor.Descriptor( + name='AddLayerParams', + full_name='CoreML.Specification.AddLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.AddLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19319, + serialized_end=19350, +) + + +_MULTIPLYLAYERPARAMS = _descriptor.Descriptor( + name='MultiplyLayerParams', + full_name='CoreML.Specification.MultiplyLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.MultiplyLayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19352, + serialized_end=19388, +) + + +_UNARYFUNCTIONLAYERPARAMS = _descriptor.Descriptor( + name='UnaryFunctionLayerParams', + full_name='CoreML.Specification.UnaryFunctionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='CoreML.Specification.UnaryFunctionLayerParams.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.UnaryFunctionLayerParams.alpha', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.UnaryFunctionLayerParams.epsilon', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shift', full_name='CoreML.Specification.UnaryFunctionLayerParams.shift', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.UnaryFunctionLayerParams.scale', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _UNARYFUNCTIONLAYERPARAMS_OPERATION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19391, + serialized_end=19651, +) + + +_UPSAMPLELAYERPARAMS = _descriptor.Descriptor( + name='UpsampleLayerParams', + full_name='CoreML.Specification.UpsampleLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='scalingFactor', full_name='CoreML.Specification.UpsampleLayerParams.scalingFactor', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fractionalScalingFactor', full_name='CoreML.Specification.UpsampleLayerParams.fractionalScalingFactor', index=1, + number=7, type=2, cpp_type=6, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.UpsampleLayerParams.mode', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linearUpsampleMode', full_name='CoreML.Specification.UpsampleLayerParams.linearUpsampleMode', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _UPSAMPLELAYERPARAMS_INTERPOLATIONMODE, + _UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=19654, + serialized_end=20023, +) + + +_RESIZEBILINEARLAYERPARAMS = _descriptor.Descriptor( + name='ResizeBilinearLayerParams', + full_name='CoreML.Specification.ResizeBilinearLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetSize', full_name='CoreML.Specification.ResizeBilinearLayerParams.targetSize', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ResizeBilinearLayerParams.mode', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20025, + serialized_end=20122, +) + + +_CROPRESIZELAYERPARAMS = _descriptor.Descriptor( + name='CropResizeLayerParams', + full_name='CoreML.Specification.CropResizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetSize', full_name='CoreML.Specification.CropResizeLayerParams.targetSize', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='normalizedCoordinates', full_name='CoreML.Specification.CropResizeLayerParams.normalizedCoordinates', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.CropResizeLayerParams.mode', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='boxIndicesMode', full_name='CoreML.Specification.CropResizeLayerParams.boxIndicesMode', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='spatialScale', full_name='CoreML.Specification.CropResizeLayerParams.spatialScale', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20125, + serialized_end=20337, +) + + +_BIASLAYERPARAMS = _descriptor.Descriptor( + name='BiasLayerParams', + full_name='CoreML.Specification.BiasLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.BiasLayerParams.shape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.BiasLayerParams.bias', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20339, + serialized_end=20421, +) + + +_SCALELAYERPARAMS = _descriptor.Descriptor( + name='ScaleLayerParams', + full_name='CoreML.Specification.ScaleLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shapeScale', full_name='CoreML.Specification.ScaleLayerParams.shapeScale', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scale', full_name='CoreML.Specification.ScaleLayerParams.scale', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.ScaleLayerParams.hasBias', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shapeBias', full_name='CoreML.Specification.ScaleLayerParams.shapeBias', index=3, + number=4, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.ScaleLayerParams.bias', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20424, + serialized_end=20599, +) + + +_LOADCONSTANTLAYERPARAMS = _descriptor.Descriptor( + name='LoadConstantLayerParams', + full_name='CoreML.Specification.LoadConstantLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.LoadConstantLayerParams.shape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='data', full_name='CoreML.Specification.LoadConstantLayerParams.data', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20601, + serialized_end=20691, +) + + +_L2NORMALIZELAYERPARAMS = _descriptor.Descriptor( + name='L2NormalizeLayerParams', + full_name='CoreML.Specification.L2NormalizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.L2NormalizeLayerParams.epsilon', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20693, + serialized_end=20734, +) + + +_FLATTENLAYERPARAMS = _descriptor.Descriptor( + name='FlattenLayerParams', + full_name='CoreML.Specification.FlattenLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.FlattenLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FLATTENLAYERPARAMS_FLATTENORDER, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20737, + serialized_end=20879, +) + + +_RESHAPELAYERPARAMS = _descriptor.Descriptor( + name='ReshapeLayerParams', + full_name='CoreML.Specification.ReshapeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.ReshapeLayerParams.targetShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ReshapeLayerParams.mode', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _RESHAPELAYERPARAMS_RESHAPEORDER, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=20882, + serialized_end=21045, +) + + +_PERMUTELAYERPARAMS = _descriptor.Descriptor( + name='PermuteLayerParams', + full_name='CoreML.Specification.PermuteLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.PermuteLayerParams.axis', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21047, + serialized_end=21081, +) + + +_REORGANIZEDATALAYERPARAMS = _descriptor.Descriptor( + name='ReorganizeDataLayerParams', + full_name='CoreML.Specification.ReorganizeDataLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ReorganizeDataLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockSize', full_name='CoreML.Specification.ReorganizeDataLayerParams.blockSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21084, + serialized_end=21293, +) + + +_SLICELAYERPARAMS = _descriptor.Descriptor( + name='SliceLayerParams', + full_name='CoreML.Specification.SliceLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='startIndex', full_name='CoreML.Specification.SliceLayerParams.startIndex', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endIndex', full_name='CoreML.Specification.SliceLayerParams.endIndex', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stride', full_name='CoreML.Specification.SliceLayerParams.stride', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SliceLayerParams.axis', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SLICELAYERPARAMS_SLICEAXIS, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21296, + serialized_end=21496, +) + + +_REDUCELAYERPARAMS = _descriptor.Descriptor( + name='ReduceLayerParams', + full_name='CoreML.Specification.ReduceLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ReduceLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.ReduceLayerParams.epsilon', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ReduceLayerParams.axis', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _REDUCELAYERPARAMS_REDUCEOPERATION, + _REDUCELAYERPARAMS_REDUCEAXIS, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21499, + serialized_end=21844, +) + + +_CROPLAYERPARAMS = _descriptor.Descriptor( + name='CropLayerParams', + full_name='CoreML.Specification.CropLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cropAmounts', full_name='CoreML.Specification.CropLayerParams.cropAmounts', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset', full_name='CoreML.Specification.CropLayerParams.offset', index=1, + number=5, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21846, + serialized_end=21937, +) + + +_AVERAGELAYERPARAMS = _descriptor.Descriptor( + name='AverageLayerParams', + full_name='CoreML.Specification.AverageLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21939, + serialized_end=21959, +) + + +_MAXLAYERPARAMS = _descriptor.Descriptor( + name='MaxLayerParams', + full_name='CoreML.Specification.MaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21961, + serialized_end=21977, +) + + +_MINLAYERPARAMS = _descriptor.Descriptor( + name='MinLayerParams', + full_name='CoreML.Specification.MinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21979, + serialized_end=21995, +) + + +_DOTPRODUCTLAYERPARAMS = _descriptor.Descriptor( + name='DotProductLayerParams', + full_name='CoreML.Specification.DotProductLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cosineSimilarity', full_name='CoreML.Specification.DotProductLayerParams.cosineSimilarity', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21997, + serialized_end=22046, +) + + +_MEANVARIANCENORMALIZELAYERPARAMS = _descriptor.Descriptor( + name='MeanVarianceNormalizeLayerParams', + full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='acrossChannels', full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams.acrossChannels', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='normalizeVariance', full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams.normalizeVariance', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epsilon', full_name='CoreML.Specification.MeanVarianceNormalizeLayerParams.epsilon', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22048, + serialized_end=22150, +) + + +_SEQUENCEREPEATLAYERPARAMS = _descriptor.Descriptor( + name='SequenceRepeatLayerParams', + full_name='CoreML.Specification.SequenceRepeatLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nRepetitions', full_name='CoreML.Specification.SequenceRepeatLayerParams.nRepetitions', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22152, + serialized_end=22201, +) + + +_SIMPLERECURRENTLAYERPARAMS = _descriptor.Descriptor( + name='SimpleRecurrentLayerParams', + full_name='CoreML.Specification.SimpleRecurrentLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.SimpleRecurrentLayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.SimpleRecurrentLayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activation', full_name='CoreML.Specification.SimpleRecurrentLayerParams.activation', index=2, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceOutput', full_name='CoreML.Specification.SimpleRecurrentLayerParams.sequenceOutput', index=3, + number=15, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBiasVector', full_name='CoreML.Specification.SimpleRecurrentLayerParams.hasBiasVector', index=4, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightMatrix', full_name='CoreML.Specification.SimpleRecurrentLayerParams.weightMatrix', index=5, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='recursionMatrix', full_name='CoreML.Specification.SimpleRecurrentLayerParams.recursionMatrix', index=6, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='biasVector', full_name='CoreML.Specification.SimpleRecurrentLayerParams.biasVector', index=7, + number=32, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseInput', full_name='CoreML.Specification.SimpleRecurrentLayerParams.reverseInput', index=8, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22204, + serialized_end=22587, +) + + +_GRULAYERPARAMS = _descriptor.Descriptor( + name='GRULayerParams', + full_name='CoreML.Specification.GRULayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.GRULayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.GRULayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activations', full_name='CoreML.Specification.GRULayerParams.activations', index=2, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceOutput', full_name='CoreML.Specification.GRULayerParams.sequenceOutput', index=3, + number=15, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBiasVectors', full_name='CoreML.Specification.GRULayerParams.hasBiasVectors', index=4, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateGateWeightMatrix', full_name='CoreML.Specification.GRULayerParams.updateGateWeightMatrix', index=5, + number=30, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resetGateWeightMatrix', full_name='CoreML.Specification.GRULayerParams.resetGateWeightMatrix', index=6, + number=31, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateWeightMatrix', full_name='CoreML.Specification.GRULayerParams.outputGateWeightMatrix', index=7, + number=32, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateGateRecursionMatrix', full_name='CoreML.Specification.GRULayerParams.updateGateRecursionMatrix', index=8, + number=50, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resetGateRecursionMatrix', full_name='CoreML.Specification.GRULayerParams.resetGateRecursionMatrix', index=9, + number=51, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateRecursionMatrix', full_name='CoreML.Specification.GRULayerParams.outputGateRecursionMatrix', index=10, + number=52, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateGateBiasVector', full_name='CoreML.Specification.GRULayerParams.updateGateBiasVector', index=11, + number=70, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='resetGateBiasVector', full_name='CoreML.Specification.GRULayerParams.resetGateBiasVector', index=12, + number=71, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateBiasVector', full_name='CoreML.Specification.GRULayerParams.outputGateBiasVector', index=13, + number=72, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseInput', full_name='CoreML.Specification.GRULayerParams.reverseInput', index=14, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22590, + serialized_end=23400, +) + + +_LSTMPARAMS = _descriptor.Descriptor( + name='LSTMParams', + full_name='CoreML.Specification.LSTMParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sequenceOutput', full_name='CoreML.Specification.LSTMParams.sequenceOutput', index=0, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBiasVectors', full_name='CoreML.Specification.LSTMParams.hasBiasVectors', index=1, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetBias', full_name='CoreML.Specification.LSTMParams.forgetBias', index=2, + number=30, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasPeepholeVectors', full_name='CoreML.Specification.LSTMParams.hasPeepholeVectors', index=3, + number=40, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coupledInputAndForgetGate', full_name='CoreML.Specification.LSTMParams.coupledInputAndForgetGate', index=4, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cellClipThreshold', full_name='CoreML.Specification.LSTMParams.cellClipThreshold', index=5, + number=60, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=23403, + serialized_end=23573, +) + + +_LSTMWEIGHTPARAMS = _descriptor.Descriptor( + name='LSTMWeightParams', + full_name='CoreML.Specification.LSTMWeightParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputGateWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.inputGateWeightMatrix', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGateWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.forgetGateWeightMatrix', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockInputWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.blockInputWeightMatrix', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateWeightMatrix', full_name='CoreML.Specification.LSTMWeightParams.outputGateWeightMatrix', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputGateRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.inputGateRecursionMatrix', index=4, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGateRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.forgetGateRecursionMatrix', index=5, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockInputRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.blockInputRecursionMatrix', index=6, + number=22, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateRecursionMatrix', full_name='CoreML.Specification.LSTMWeightParams.outputGateRecursionMatrix', index=7, + number=23, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputGateBiasVector', full_name='CoreML.Specification.LSTMWeightParams.inputGateBiasVector', index=8, + number=40, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGateBiasVector', full_name='CoreML.Specification.LSTMWeightParams.forgetGateBiasVector', index=9, + number=41, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='blockInputBiasVector', full_name='CoreML.Specification.LSTMWeightParams.blockInputBiasVector', index=10, + number=42, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGateBiasVector', full_name='CoreML.Specification.LSTMWeightParams.outputGateBiasVector', index=11, + number=43, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='inputGatePeepholeVector', full_name='CoreML.Specification.LSTMWeightParams.inputGatePeepholeVector', index=12, + number=60, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='forgetGatePeepholeVector', full_name='CoreML.Specification.LSTMWeightParams.forgetGatePeepholeVector', index=13, + number=61, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputGatePeepholeVector', full_name='CoreML.Specification.LSTMWeightParams.outputGatePeepholeVector', index=14, + number=62, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=23576, + serialized_end=24620, +) + + +_UNIDIRECTIONALLSTMLAYERPARAMS = _descriptor.Descriptor( + name='UniDirectionalLSTMLayerParams', + full_name='CoreML.Specification.UniDirectionalLSTMLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activations', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.activations', index=2, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='params', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.params', index=3, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightParams', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.weightParams', index=4, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverseInput', full_name='CoreML.Specification.UniDirectionalLSTMLayerParams.reverseInput', index=5, + number=100, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=24623, + serialized_end=24900, +) + + +_BIDIRECTIONALLSTMLAYERPARAMS = _descriptor.Descriptor( + name='BiDirectionalLSTMLayerParams', + full_name='CoreML.Specification.BiDirectionalLSTMLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='inputVectorSize', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.inputVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputVectorSize', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.outputVectorSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activationsForwardLSTM', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.activationsForwardLSTM', index=2, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='activationsBackwardLSTM', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.activationsBackwardLSTM', index=3, + number=11, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='params', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.params', index=4, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightParams', full_name='CoreML.Specification.BiDirectionalLSTMLayerParams.weightParams', index=5, + number=20, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=24903, + serialized_end=25241, +) + + +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE = _descriptor.Descriptor( + name='CustomLayerParamValue', + full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='doubleValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.doubleValue', index=0, + number=10, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.stringValue', index=1, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.intValue', index=2, + number=30, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.longValue', index=3, + number=40, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='boolValue', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.boolValue', index=4, + number=50, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='value', full_name='CoreML.Specification.CustomLayerParams.CustomLayerParamValue.value', + index=0, containing_type=None, fields=[]), + ], + serialized_start=25436, + serialized_end=25576, +) + +_CUSTOMLAYERPARAMS_PARAMETERSENTRY = _descriptor.Descriptor( + name='ParametersEntry', + full_name='CoreML.Specification.CustomLayerParams.ParametersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='CoreML.Specification.CustomLayerParams.ParametersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.CustomLayerParams.ParametersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25578, + serialized_end=25690, +) + +_CUSTOMLAYERPARAMS = _descriptor.Descriptor( + name='CustomLayerParams', + full_name='CoreML.Specification.CustomLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='className', full_name='CoreML.Specification.CustomLayerParams.className', index=0, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.CustomLayerParams.weights', index=1, + number=20, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='parameters', full_name='CoreML.Specification.CustomLayerParams.parameters', index=2, + number=30, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='description', full_name='CoreML.Specification.CustomLayerParams.description', index=3, + number=40, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE, _CUSTOMLAYERPARAMS_PARAMETERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25244, + serialized_end=25690, +) + + +_TRANSPOSELAYERPARAMS = _descriptor.Descriptor( + name='TransposeLayerParams', + full_name='CoreML.Specification.TransposeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.TransposeLayerParams.axes', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25692, + serialized_end=25728, +) + + +_BATCHEDMATMULLAYERPARAMS = _descriptor.Descriptor( + name='BatchedMatMulLayerParams', + full_name='CoreML.Specification.BatchedMatMulLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='transposeA', full_name='CoreML.Specification.BatchedMatMulLayerParams.transposeA', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='transposeB', full_name='CoreML.Specification.BatchedMatMulLayerParams.transposeB', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightMatrixFirstDimension', full_name='CoreML.Specification.BatchedMatMulLayerParams.weightMatrixFirstDimension', index=2, + number=5, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weightMatrixSecondDimension', full_name='CoreML.Specification.BatchedMatMulLayerParams.weightMatrixSecondDimension', index=3, + number=6, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hasBias', full_name='CoreML.Specification.BatchedMatMulLayerParams.hasBias', index=4, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weights', full_name='CoreML.Specification.BatchedMatMulLayerParams.weights', index=5, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias', full_name='CoreML.Specification.BatchedMatMulLayerParams.bias', index=6, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int8DynamicQuantize', full_name='CoreML.Specification.BatchedMatMulLayerParams.int8DynamicQuantize', index=7, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25731, + serialized_end=26019, +) + + +_CONCATNDLAYERPARAMS = _descriptor.Descriptor( + name='ConcatNDLayerParams', + full_name='CoreML.Specification.ConcatNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ConcatNDLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interleave', full_name='CoreML.Specification.ConcatNDLayerParams.interleave', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26021, + serialized_end=26076, +) + + +_SOFTMAXNDLAYERPARAMS = _descriptor.Descriptor( + name='SoftmaxNDLayerParams', + full_name='CoreML.Specification.SoftmaxNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SoftmaxNDLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26078, + serialized_end=26114, +) + + +_REVERSELAYERPARAMS = _descriptor.Descriptor( + name='ReverseLayerParams', + full_name='CoreML.Specification.ReverseLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='reverseDim', full_name='CoreML.Specification.ReverseLayerParams.reverseDim', index=0, + number=1, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26116, + serialized_end=26156, +) + + +_REVERSESEQLAYERPARAMS = _descriptor.Descriptor( + name='ReverseSeqLayerParams', + full_name='CoreML.Specification.ReverseSeqLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='batchAxis', full_name='CoreML.Specification.ReverseSeqLayerParams.batchAxis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sequenceAxis', full_name='CoreML.Specification.ReverseSeqLayerParams.sequenceAxis', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26158, + serialized_end=26222, +) + + +_LOADCONSTANTNDLAYERPARAMS = _descriptor.Descriptor( + name='LoadConstantNDLayerParams', + full_name='CoreML.Specification.LoadConstantNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='CoreML.Specification.LoadConstantNDLayerParams.shape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='data', full_name='CoreML.Specification.LoadConstantNDLayerParams.data', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26224, + serialized_end=26316, +) + + +_FILLLIKELAYERPARAMS = _descriptor.Descriptor( + name='FillLikeLayerParams', + full_name='CoreML.Specification.FillLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.FillLikeLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26318, + serialized_end=26354, +) + + +_FILLSTATICLAYERPARAMS = _descriptor.Descriptor( + name='FillStaticLayerParams', + full_name='CoreML.Specification.FillStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.FillStaticLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.FillStaticLayerParams.targetShape', index=1, + number=2, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26356, + serialized_end=26415, +) + + +_FILLDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='FillDynamicLayerParams', + full_name='CoreML.Specification.FillDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.FillDynamicLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26417, + serialized_end=26456, +) + + +_WHEREBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='WhereBroadcastableLayerParams', + full_name='CoreML.Specification.WhereBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26458, + serialized_end=26489, +) + + +_SINLAYERPARAMS = _descriptor.Descriptor( + name='SinLayerParams', + full_name='CoreML.Specification.SinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26491, + serialized_end=26507, +) + + +_COSLAYERPARAMS = _descriptor.Descriptor( + name='CosLayerParams', + full_name='CoreML.Specification.CosLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26509, + serialized_end=26525, +) + + +_TANLAYERPARAMS = _descriptor.Descriptor( + name='TanLayerParams', + full_name='CoreML.Specification.TanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26527, + serialized_end=26543, +) + + +_ASINLAYERPARAMS = _descriptor.Descriptor( + name='AsinLayerParams', + full_name='CoreML.Specification.AsinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26545, + serialized_end=26562, +) + + +_ACOSLAYERPARAMS = _descriptor.Descriptor( + name='AcosLayerParams', + full_name='CoreML.Specification.AcosLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26564, + serialized_end=26581, +) + + +_ATANLAYERPARAMS = _descriptor.Descriptor( + name='AtanLayerParams', + full_name='CoreML.Specification.AtanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26583, + serialized_end=26600, +) + + +_SINHLAYERPARAMS = _descriptor.Descriptor( + name='SinhLayerParams', + full_name='CoreML.Specification.SinhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26602, + serialized_end=26619, +) + + +_COSHLAYERPARAMS = _descriptor.Descriptor( + name='CoshLayerParams', + full_name='CoreML.Specification.CoshLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26621, + serialized_end=26638, +) + + +_TANHLAYERPARAMS = _descriptor.Descriptor( + name='TanhLayerParams', + full_name='CoreML.Specification.TanhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26640, + serialized_end=26657, +) + + +_ASINHLAYERPARAMS = _descriptor.Descriptor( + name='AsinhLayerParams', + full_name='CoreML.Specification.AsinhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26659, + serialized_end=26677, +) + + +_ACOSHLAYERPARAMS = _descriptor.Descriptor( + name='AcoshLayerParams', + full_name='CoreML.Specification.AcoshLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26679, + serialized_end=26697, +) + + +_ATANHLAYERPARAMS = _descriptor.Descriptor( + name='AtanhLayerParams', + full_name='CoreML.Specification.AtanhLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26699, + serialized_end=26717, +) + + +_POWBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='PowBroadcastableLayerParams', + full_name='CoreML.Specification.PowBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26719, + serialized_end=26748, +) + + +_EXP2LAYERPARAMS = _descriptor.Descriptor( + name='Exp2LayerParams', + full_name='CoreML.Specification.Exp2LayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26750, + serialized_end=26767, +) + + +_WHERENONZEROLAYERPARAMS = _descriptor.Descriptor( + name='WhereNonZeroLayerParams', + full_name='CoreML.Specification.WhereNonZeroLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26769, + serialized_end=26794, +) + + +_MATRIXBANDPARTLAYERPARAMS = _descriptor.Descriptor( + name='MatrixBandPartLayerParams', + full_name='CoreML.Specification.MatrixBandPartLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='numLower', full_name='CoreML.Specification.MatrixBandPartLayerParams.numLower', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numUpper', full_name='CoreML.Specification.MatrixBandPartLayerParams.numUpper', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26796, + serialized_end=26859, +) + + +_UPPERTRIANGULARLAYERPARAMS = _descriptor.Descriptor( + name='UpperTriangularLayerParams', + full_name='CoreML.Specification.UpperTriangularLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='k', full_name='CoreML.Specification.UpperTriangularLayerParams.k', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26861, + serialized_end=26900, +) + + +_LOWERTRIANGULARLAYERPARAMS = _descriptor.Descriptor( + name='LowerTriangularLayerParams', + full_name='CoreML.Specification.LowerTriangularLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='k', full_name='CoreML.Specification.LowerTriangularLayerParams.k', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26902, + serialized_end=26941, +) + + +_BROADCASTTOLIKELAYERPARAMS = _descriptor.Descriptor( + name='BroadcastToLikeLayerParams', + full_name='CoreML.Specification.BroadcastToLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26943, + serialized_end=26971, +) + + +_BROADCASTTOSTATICLAYERPARAMS = _descriptor.Descriptor( + name='BroadcastToStaticLayerParams', + full_name='CoreML.Specification.BroadcastToStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.BroadcastToStaticLayerParams.targetShape', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=26973, + serialized_end=27024, +) + + +_BROADCASTTODYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='BroadcastToDynamicLayerParams', + full_name='CoreML.Specification.BroadcastToDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27026, + serialized_end=27057, +) + + +_ADDBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='AddBroadcastableLayerParams', + full_name='CoreML.Specification.AddBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27059, + serialized_end=27088, +) + + +_MAXBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='MaxBroadcastableLayerParams', + full_name='CoreML.Specification.MaxBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27090, + serialized_end=27119, +) + + +_MINBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='MinBroadcastableLayerParams', + full_name='CoreML.Specification.MinBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27121, + serialized_end=27150, +) + + +_MODBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='ModBroadcastableLayerParams', + full_name='CoreML.Specification.ModBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27152, + serialized_end=27181, +) + + +_FLOORDIVBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='FloorDivBroadcastableLayerParams', + full_name='CoreML.Specification.FloorDivBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27183, + serialized_end=27217, +) + + +_SUBTRACTBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='SubtractBroadcastableLayerParams', + full_name='CoreML.Specification.SubtractBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27219, + serialized_end=27253, +) + + +_MULTIPLYBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='MultiplyBroadcastableLayerParams', + full_name='CoreML.Specification.MultiplyBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27255, + serialized_end=27289, +) + + +_DIVIDEBROADCASTABLELAYERPARAMS = _descriptor.Descriptor( + name='DivideBroadcastableLayerParams', + full_name='CoreML.Specification.DivideBroadcastableLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27291, + serialized_end=27323, +) + + +_GATHERLAYERPARAMS = _descriptor.Descriptor( + name='GatherLayerParams', + full_name='CoreML.Specification.GatherLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.GatherLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27325, + serialized_end=27358, +) + + +_SCATTERLAYERPARAMS = _descriptor.Descriptor( + name='ScatterLayerParams', + full_name='CoreML.Specification.ScatterLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ScatterLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ScatterLayerParams.mode', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27360, + serialized_end=27443, +) + + +_GATHERNDLAYERPARAMS = _descriptor.Descriptor( + name='GatherNDLayerParams', + full_name='CoreML.Specification.GatherNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27445, + serialized_end=27466, +) + + +_SCATTERNDLAYERPARAMS = _descriptor.Descriptor( + name='ScatterNDLayerParams', + full_name='CoreML.Specification.ScatterNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ScatterNDLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27468, + serialized_end=27539, +) + + +_GATHERALONGAXISLAYERPARAMS = _descriptor.Descriptor( + name='GatherAlongAxisLayerParams', + full_name='CoreML.Specification.GatherAlongAxisLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.GatherAlongAxisLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27541, + serialized_end=27583, +) + + +_SCATTERALONGAXISLAYERPARAMS = _descriptor.Descriptor( + name='ScatterAlongAxisLayerParams', + full_name='CoreML.Specification.ScatterAlongAxisLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ScatterAlongAxisLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.ScatterAlongAxisLayerParams.mode', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27585, + serialized_end=27677, +) + + +_STACKLAYERPARAMS = _descriptor.Descriptor( + name='StackLayerParams', + full_name='CoreML.Specification.StackLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.StackLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27679, + serialized_end=27711, +) + + +_RANKPRESERVINGRESHAPELAYERPARAMS = _descriptor.Descriptor( + name='RankPreservingReshapeLayerParams', + full_name='CoreML.Specification.RankPreservingReshapeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.RankPreservingReshapeLayerParams.targetShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27713, + serialized_end=27768, +) + + +_CONSTANTPADDINGLAYERPARAMS = _descriptor.Descriptor( + name='ConstantPaddingLayerParams', + full_name='CoreML.Specification.ConstantPaddingLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.ConstantPaddingLayerParams.value', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='padAmounts', full_name='CoreML.Specification.ConstantPaddingLayerParams.padAmounts', index=1, + number=2, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='padToGivenOutputSizeMode', full_name='CoreML.Specification.ConstantPaddingLayerParams.padToGivenOutputSizeMode', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27770, + serialized_end=27867, +) + + +_RANDOMNORMALLIKELAYERPARAMS = _descriptor.Descriptor( + name='RandomNormalLikeLayerParams', + full_name='CoreML.Specification.RandomNormalLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomNormalLikeLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.RandomNormalLikeLayerParams.mean', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stdDev', full_name='CoreML.Specification.RandomNormalLikeLayerParams.stdDev', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27869, + serialized_end=27942, +) + + +_RANDOMNORMALSTATICLAYERPARAMS = _descriptor.Descriptor( + name='RandomNormalStaticLayerParams', + full_name='CoreML.Specification.RandomNormalStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomNormalStaticLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.RandomNormalStaticLayerParams.mean', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stdDev', full_name='CoreML.Specification.RandomNormalStaticLayerParams.stdDev', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.RandomNormalStaticLayerParams.outputShape', index=3, + number=4, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27944, + serialized_end=28040, +) + + +_RANDOMNORMALDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RandomNormalDynamicLayerParams', + full_name='CoreML.Specification.RandomNormalDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomNormalDynamicLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mean', full_name='CoreML.Specification.RandomNormalDynamicLayerParams.mean', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stdDev', full_name='CoreML.Specification.RandomNormalDynamicLayerParams.stdDev', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28042, + serialized_end=28118, +) + + +_RANDOMUNIFORMLIKELAYERPARAMS = _descriptor.Descriptor( + name='RandomUniformLikeLayerParams', + full_name='CoreML.Specification.RandomUniformLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomUniformLikeLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.RandomUniformLikeLayerParams.minVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.RandomUniformLikeLayerParams.maxVal', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28120, + serialized_end=28196, +) + + +_RANDOMUNIFORMSTATICLAYERPARAMS = _descriptor.Descriptor( + name='RandomUniformStaticLayerParams', + full_name='CoreML.Specification.RandomUniformStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomUniformStaticLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.RandomUniformStaticLayerParams.minVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.RandomUniformStaticLayerParams.maxVal', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.RandomUniformStaticLayerParams.outputShape', index=3, + number=4, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28198, + serialized_end=28297, +) + + +_RANDOMUNIFORMDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RandomUniformDynamicLayerParams', + full_name='CoreML.Specification.RandomUniformDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomUniformDynamicLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.RandomUniformDynamicLayerParams.minVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.RandomUniformDynamicLayerParams.maxVal', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28299, + serialized_end=28378, +) + + +_RANDOMBERNOULLILIKELAYERPARAMS = _descriptor.Descriptor( + name='RandomBernoulliLikeLayerParams', + full_name='CoreML.Specification.RandomBernoulliLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomBernoulliLikeLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='prob', full_name='CoreML.Specification.RandomBernoulliLikeLayerParams.prob', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28380, + serialized_end=28440, +) + + +_RANDOMBERNOULLISTATICLAYERPARAMS = _descriptor.Descriptor( + name='RandomBernoulliStaticLayerParams', + full_name='CoreML.Specification.RandomBernoulliStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomBernoulliStaticLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='prob', full_name='CoreML.Specification.RandomBernoulliStaticLayerParams.prob', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputShape', full_name='CoreML.Specification.RandomBernoulliStaticLayerParams.outputShape', index=2, + number=3, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28442, + serialized_end=28525, +) + + +_RANDOMBERNOULLIDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RandomBernoulliDynamicLayerParams', + full_name='CoreML.Specification.RandomBernoulliDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.RandomBernoulliDynamicLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='prob', full_name='CoreML.Specification.RandomBernoulliDynamicLayerParams.prob', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28527, + serialized_end=28590, +) + + +_CATEGORICALDISTRIBUTIONLAYERPARAMS = _descriptor.Descriptor( + name='CategoricalDistributionLayerParams', + full_name='CoreML.Specification.CategoricalDistributionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.CategoricalDistributionLayerParams.seed', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numSamples', full_name='CoreML.Specification.CategoricalDistributionLayerParams.numSamples', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='isLogits', full_name='CoreML.Specification.CategoricalDistributionLayerParams.isLogits', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='eps', full_name='CoreML.Specification.CategoricalDistributionLayerParams.eps', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='temperature', full_name='CoreML.Specification.CategoricalDistributionLayerParams.temperature', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28592, + serialized_end=28714, +) + + +_REDUCEL1LAYERPARAMS = _descriptor.Descriptor( + name='ReduceL1LayerParams', + full_name='CoreML.Specification.ReduceL1LayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceL1LayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceL1LayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceL1LayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28716, + serialized_end=28788, +) + + +_REDUCEL2LAYERPARAMS = _descriptor.Descriptor( + name='ReduceL2LayerParams', + full_name='CoreML.Specification.ReduceL2LayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceL2LayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceL2LayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceL2LayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28790, + serialized_end=28862, +) + + +_REDUCEMAXLAYERPARAMS = _descriptor.Descriptor( + name='ReduceMaxLayerParams', + full_name='CoreML.Specification.ReduceMaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceMaxLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceMaxLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceMaxLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28864, + serialized_end=28937, +) + + +_REDUCEMINLAYERPARAMS = _descriptor.Descriptor( + name='ReduceMinLayerParams', + full_name='CoreML.Specification.ReduceMinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceMinLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceMinLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceMinLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28939, + serialized_end=29012, +) + + +_REDUCESUMLAYERPARAMS = _descriptor.Descriptor( + name='ReduceSumLayerParams', + full_name='CoreML.Specification.ReduceSumLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceSumLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceSumLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceSumLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29014, + serialized_end=29087, +) + + +_REDUCEPRODLAYERPARAMS = _descriptor.Descriptor( + name='ReduceProdLayerParams', + full_name='CoreML.Specification.ReduceProdLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceProdLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceProdLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceProdLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29089, + serialized_end=29163, +) + + +_REDUCEMEANLAYERPARAMS = _descriptor.Descriptor( + name='ReduceMeanLayerParams', + full_name='CoreML.Specification.ReduceMeanLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceMeanLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceMeanLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceMeanLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29165, + serialized_end=29239, +) + + +_REDUCELOGSUMLAYERPARAMS = _descriptor.Descriptor( + name='ReduceLogSumLayerParams', + full_name='CoreML.Specification.ReduceLogSumLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceLogSumLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceLogSumLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceLogSumLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29241, + serialized_end=29317, +) + + +_REDUCESUMSQUARELAYERPARAMS = _descriptor.Descriptor( + name='ReduceSumSquareLayerParams', + full_name='CoreML.Specification.ReduceSumSquareLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceSumSquareLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceSumSquareLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceSumSquareLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29319, + serialized_end=29398, +) + + +_REDUCELOGSUMEXPLAYERPARAMS = _descriptor.Descriptor( + name='ReduceLogSumExpLayerParams', + full_name='CoreML.Specification.ReduceLogSumExpLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ReduceLogSumExpLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keepDims', full_name='CoreML.Specification.ReduceLogSumExpLayerParams.keepDims', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduceAll', full_name='CoreML.Specification.ReduceLogSumExpLayerParams.reduceAll', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29400, + serialized_end=29479, +) + + +_EXPANDDIMSLAYERPARAMS = _descriptor.Descriptor( + name='ExpandDimsLayerParams', + full_name='CoreML.Specification.ExpandDimsLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.ExpandDimsLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29481, + serialized_end=29518, +) + + +_FLATTENTO2DLAYERPARAMS = _descriptor.Descriptor( + name='FlattenTo2DLayerParams', + full_name='CoreML.Specification.FlattenTo2DLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.FlattenTo2DLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29520, + serialized_end=29558, +) + + +_RESHAPESTATICLAYERPARAMS = _descriptor.Descriptor( + name='ReshapeStaticLayerParams', + full_name='CoreML.Specification.ReshapeStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='targetShape', full_name='CoreML.Specification.ReshapeStaticLayerParams.targetShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29560, + serialized_end=29607, +) + + +_RESHAPELIKELAYERPARAMS = _descriptor.Descriptor( + name='ReshapeLikeLayerParams', + full_name='CoreML.Specification.ReshapeLikeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29609, + serialized_end=29633, +) + + +_RESHAPEDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='ReshapeDynamicLayerParams', + full_name='CoreML.Specification.ReshapeDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29635, + serialized_end=29662, +) + + +_SQUEEZELAYERPARAMS = _descriptor.Descriptor( + name='SqueezeLayerParams', + full_name='CoreML.Specification.SqueezeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axes', full_name='CoreML.Specification.SqueezeLayerParams.axes', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeezeAll', full_name='CoreML.Specification.SqueezeLayerParams.squeezeAll', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29664, + serialized_end=29718, +) + + +_TOPKLAYERPARAMS = _descriptor.Descriptor( + name='TopKLayerParams', + full_name='CoreML.Specification.TopKLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.TopKLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='K', full_name='CoreML.Specification.TopKLayerParams.K', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='useBottomK', full_name='CoreML.Specification.TopKLayerParams.useBottomK', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29720, + serialized_end=29782, +) + + +_ARGMAXLAYERPARAMS = _descriptor.Descriptor( + name='ArgMaxLayerParams', + full_name='CoreML.Specification.ArgMaxLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ArgMaxLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='removeDim', full_name='CoreML.Specification.ArgMaxLayerParams.removeDim', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29784, + serialized_end=29836, +) + + +_ARGMINLAYERPARAMS = _descriptor.Descriptor( + name='ArgMinLayerParams', + full_name='CoreML.Specification.ArgMinLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ArgMinLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='removeDim', full_name='CoreML.Specification.ArgMinLayerParams.removeDim', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29838, + serialized_end=29890, +) + + +_SPLITNDLAYERPARAMS = _descriptor.Descriptor( + name='SplitNDLayerParams', + full_name='CoreML.Specification.SplitNDLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SplitNDLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numSplits', full_name='CoreML.Specification.SplitNDLayerParams.numSplits', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='splitSizes', full_name='CoreML.Specification.SplitNDLayerParams.splitSizes', index=2, + number=3, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29892, + serialized_end=29965, +) + + +_CEILLAYERPARAMS = _descriptor.Descriptor( + name='CeilLayerParams', + full_name='CoreML.Specification.CeilLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29967, + serialized_end=29984, +) + + +_ROUNDLAYERPARAMS = _descriptor.Descriptor( + name='RoundLayerParams', + full_name='CoreML.Specification.RoundLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=29986, + serialized_end=30004, +) + + +_FLOORLAYERPARAMS = _descriptor.Descriptor( + name='FloorLayerParams', + full_name='CoreML.Specification.FloorLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30006, + serialized_end=30024, +) + + +_SIGNLAYERPARAMS = _descriptor.Descriptor( + name='SignLayerParams', + full_name='CoreML.Specification.SignLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30026, + serialized_end=30043, +) + + +_CLIPLAYERPARAMS = _descriptor.Descriptor( + name='ClipLayerParams', + full_name='CoreML.Specification.ClipLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='minVal', full_name='CoreML.Specification.ClipLayerParams.minVal', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxVal', full_name='CoreML.Specification.ClipLayerParams.maxVal', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30045, + serialized_end=30094, +) + + +_SLICESTATICLAYERPARAMS = _descriptor.Descriptor( + name='SliceStaticLayerParams', + full_name='CoreML.Specification.SliceStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='beginIds', full_name='CoreML.Specification.SliceStaticLayerParams.beginIds', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beginMasks', full_name='CoreML.Specification.SliceStaticLayerParams.beginMasks', index=1, + number=2, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endIds', full_name='CoreML.Specification.SliceStaticLayerParams.endIds', index=2, + number=3, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endMasks', full_name='CoreML.Specification.SliceStaticLayerParams.endMasks', index=3, + number=4, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strides', full_name='CoreML.Specification.SliceStaticLayerParams.strides', index=4, + number=5, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeezeMasks', full_name='CoreML.Specification.SliceStaticLayerParams.squeezeMasks', index=5, + number=6, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30097, + serialized_end=30232, +) + + +_SLICEDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='SliceDynamicLayerParams', + full_name='CoreML.Specification.SliceDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='beginMasks', full_name='CoreML.Specification.SliceDynamicLayerParams.beginMasks', index=0, + number=2, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endIds', full_name='CoreML.Specification.SliceDynamicLayerParams.endIds', index=1, + number=3, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='endMasks', full_name='CoreML.Specification.SliceDynamicLayerParams.endMasks', index=2, + number=4, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strides', full_name='CoreML.Specification.SliceDynamicLayerParams.strides', index=3, + number=5, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='squeezeMasks', full_name='CoreML.Specification.SliceDynamicLayerParams.squeezeMasks', index=4, + number=6, type=8, cpp_type=7, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30234, + serialized_end=30352, +) + + +_TILELAYERPARAMS = _descriptor.Descriptor( + name='TileLayerParams', + full_name='CoreML.Specification.TileLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='reps', full_name='CoreML.Specification.TileLayerParams.reps', index=0, + number=1, type=4, cpp_type=4, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30354, + serialized_end=30385, +) + + +_GETSHAPELAYERPARAMS = _descriptor.Descriptor( + name='GetShapeLayerParams', + full_name='CoreML.Specification.GetShapeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30387, + serialized_end=30408, +) + + +_ERFLAYERPARAMS = _descriptor.Descriptor( + name='ErfLayerParams', + full_name='CoreML.Specification.ErfLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30410, + serialized_end=30426, +) + + +_GELULAYERPARAMS = _descriptor.Descriptor( + name='GeluLayerParams', + full_name='CoreML.Specification.GeluLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='mode', full_name='CoreML.Specification.GeluLayerParams.mode', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _GELULAYERPARAMS_GELUMODE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30429, + serialized_end=30582, +) + + +_RANGESTATICLAYERPARAMS = _descriptor.Descriptor( + name='RangeStaticLayerParams', + full_name='CoreML.Specification.RangeStaticLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='endValue', full_name='CoreML.Specification.RangeStaticLayerParams.endValue', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='startValue', full_name='CoreML.Specification.RangeStaticLayerParams.startValue', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stepSizeValue', full_name='CoreML.Specification.RangeStaticLayerParams.stepSizeValue', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30584, + serialized_end=30669, +) + + +_RANGEDYNAMICLAYERPARAMS = _descriptor.Descriptor( + name='RangeDynamicLayerParams', + full_name='CoreML.Specification.RangeDynamicLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='startValue', full_name='CoreML.Specification.RangeDynamicLayerParams.startValue', index=0, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stepSizeValue', full_name='CoreML.Specification.RangeDynamicLayerParams.stepSizeValue', index=1, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30671, + serialized_end=30739, +) + + +_SLIDINGWINDOWSLAYERPARAMS = _descriptor.Descriptor( + name='SlidingWindowsLayerParams', + full_name='CoreML.Specification.SlidingWindowsLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SlidingWindowsLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='windowSize', full_name='CoreML.Specification.SlidingWindowsLayerParams.windowSize', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='step', full_name='CoreML.Specification.SlidingWindowsLayerParams.step', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30741, + serialized_end=30816, +) + + +_LAYERNORMALIZATIONLAYERPARAMS = _descriptor.Descriptor( + name='LayerNormalizationLayerParams', + full_name='CoreML.Specification.LayerNormalizationLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='normalizedShape', full_name='CoreML.Specification.LayerNormalizationLayerParams.normalizedShape', index=0, + number=1, type=3, cpp_type=2, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='eps', full_name='CoreML.Specification.LayerNormalizationLayerParams.eps', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.LayerNormalizationLayerParams.gamma', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.LayerNormalizationLayerParams.beta', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30819, + serialized_end=30989, +) + + +_NONMAXIMUMSUPPRESSIONLAYERPARAMS = _descriptor.Descriptor( + name='NonMaximumSuppressionLayerParams', + full_name='CoreML.Specification.NonMaximumSuppressionLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='iouThreshold', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.iouThreshold', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scoreThreshold', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.scoreThreshold', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='maxBoxes', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.maxBoxes', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='perClassSuppression', full_name='CoreML.Specification.NonMaximumSuppressionLayerParams.perClassSuppression', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=30991, + serialized_end=31118, +) + + +_CLAMPEDRELULAYERPARAMS = _descriptor.Descriptor( + name='ClampedReLULayerParams', + full_name='CoreML.Specification.ClampedReLULayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.ClampedReLULayerParams.alpha', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta', full_name='CoreML.Specification.ClampedReLULayerParams.beta', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31120, + serialized_end=31173, +) + + +_ARGSORTLAYERPARAMS = _descriptor.Descriptor( + name='ArgSortLayerParams', + full_name='CoreML.Specification.ArgSortLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.ArgSortLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='descending', full_name='CoreML.Specification.ArgSortLayerParams.descending', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31175, + serialized_end=31229, +) + + +_SLICEBYSIZELAYERPARAMS = _descriptor.Descriptor( + name='SliceBySizeLayerParams', + full_name='CoreML.Specification.SliceBySizeLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size', full_name='CoreML.Specification.SliceBySizeLayerParams.size', index=0, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.SliceBySizeLayerParams.axis', index=1, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31231, + serialized_end=31283, +) + + +_NEURALNETWORKCLASSIFIER = _descriptor.Descriptor( + name='NeuralNetworkClassifier', + full_name='CoreML.Specification.NeuralNetworkClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='layers', full_name='CoreML.Specification.NeuralNetworkClassifier.layers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='preprocessing', full_name='CoreML.Specification.NeuralNetworkClassifier.preprocessing', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkClassifier.arrayInputShapeMapping', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkClassifier.imageInputShapeMapping', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateParams', full_name='CoreML.Specification.NeuralNetworkClassifier.updateParams', index=4, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.NeuralNetworkClassifier.stringClassLabels', index=5, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.NeuralNetworkClassifier.int64ClassLabels', index=6, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labelProbabilityLayerName', full_name='CoreML.Specification.NeuralNetworkClassifier.labelProbabilityLayerName', index=7, + number=200, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.NeuralNetworkClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=31286, + serialized_end=31867, +) + + +_ONEHOTLAYERPARAMS = _descriptor.Descriptor( + name='OneHotLayerParams', + full_name='CoreML.Specification.OneHotLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='oneHotVectorSize', full_name='CoreML.Specification.OneHotLayerParams.oneHotVectorSize', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.OneHotLayerParams.axis', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='onValue', full_name='CoreML.Specification.OneHotLayerParams.onValue', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offValue', full_name='CoreML.Specification.OneHotLayerParams.offValue', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31869, + serialized_end=31963, +) + + +_CUMSUMLAYERPARAMS = _descriptor.Descriptor( + name='CumSumLayerParams', + full_name='CoreML.Specification.CumSumLayerParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='axis', full_name='CoreML.Specification.CumSumLayerParams.axis', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='excludeFinalSum', full_name='CoreML.Specification.CumSumLayerParams.excludeFinalSum', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reverse', full_name='CoreML.Specification.CumSumLayerParams.reverse', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31965, + serialized_end=32040, +) + + +_NEURALNETWORKREGRESSOR = _descriptor.Descriptor( + name='NeuralNetworkRegressor', + full_name='CoreML.Specification.NeuralNetworkRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='layers', full_name='CoreML.Specification.NeuralNetworkRegressor.layers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='preprocessing', full_name='CoreML.Specification.NeuralNetworkRegressor.preprocessing', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='arrayInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkRegressor.arrayInputShapeMapping', index=2, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='imageInputShapeMapping', full_name='CoreML.Specification.NeuralNetworkRegressor.imageInputShapeMapping', index=3, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updateParams', full_name='CoreML.Specification.NeuralNetworkRegressor.updateParams', index=4, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32043, + serialized_end=32444, +) + + +_NETWORKUPDATEPARAMETERS = _descriptor.Descriptor( + name='NetworkUpdateParameters', + full_name='CoreML.Specification.NetworkUpdateParameters', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='lossLayers', full_name='CoreML.Specification.NetworkUpdateParameters.lossLayers', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='optimizer', full_name='CoreML.Specification.NetworkUpdateParameters.optimizer', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='epochs', full_name='CoreML.Specification.NetworkUpdateParameters.epochs', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='shuffle', full_name='CoreML.Specification.NetworkUpdateParameters.shuffle', index=3, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='seed', full_name='CoreML.Specification.NetworkUpdateParameters.seed', index=4, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32447, + serialized_end=32737, +) + + +_LOSSLAYER = _descriptor.Descriptor( + name='LossLayer', + full_name='CoreML.Specification.LossLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='CoreML.Specification.LossLayer.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='categoricalCrossEntropyLossLayer', full_name='CoreML.Specification.LossLayer.categoricalCrossEntropyLossLayer', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='meanSquaredErrorLossLayer', full_name='CoreML.Specification.LossLayer.meanSquaredErrorLossLayer', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='LossLayerType', full_name='CoreML.Specification.LossLayer.LossLayerType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=32740, + serialized_end=32968, +) + + +_CATEGORICALCROSSENTROPYLOSSLAYER = _descriptor.Descriptor( + name='CategoricalCrossEntropyLossLayer', + full_name='CoreML.Specification.CategoricalCrossEntropyLossLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.CategoricalCrossEntropyLossLayer.input', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='target', full_name='CoreML.Specification.CategoricalCrossEntropyLossLayer.target', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32970, + serialized_end=33035, +) + + +_MEANSQUAREDERRORLOSSLAYER = _descriptor.Descriptor( + name='MeanSquaredErrorLossLayer', + full_name='CoreML.Specification.MeanSquaredErrorLossLayer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='input', full_name='CoreML.Specification.MeanSquaredErrorLossLayer.input', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='target', full_name='CoreML.Specification.MeanSquaredErrorLossLayer.target', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33037, + serialized_end=33095, +) + + +_OPTIMIZER = _descriptor.Descriptor( + name='Optimizer', + full_name='CoreML.Specification.Optimizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sgdOptimizer', full_name='CoreML.Specification.Optimizer.sgdOptimizer', index=0, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='adamOptimizer', full_name='CoreML.Specification.Optimizer.adamOptimizer', index=1, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='OptimizerType', full_name='CoreML.Specification.Optimizer.OptimizerType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=33098, + serialized_end=33248, +) + + +_SGDOPTIMIZER = _descriptor.Descriptor( + name='SGDOptimizer', + full_name='CoreML.Specification.SGDOptimizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='learningRate', full_name='CoreML.Specification.SGDOptimizer.learningRate', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='miniBatchSize', full_name='CoreML.Specification.SGDOptimizer.miniBatchSize', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='momentum', full_name='CoreML.Specification.SGDOptimizer.momentum', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33251, + serialized_end=33444, +) + + +_ADAMOPTIMIZER = _descriptor.Descriptor( + name='AdamOptimizer', + full_name='CoreML.Specification.AdamOptimizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='learningRate', full_name='CoreML.Specification.AdamOptimizer.learningRate', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='miniBatchSize', full_name='CoreML.Specification.AdamOptimizer.miniBatchSize', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta1', full_name='CoreML.Specification.AdamOptimizer.beta1', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='beta2', full_name='CoreML.Specification.AdamOptimizer.beta2', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='eps', full_name='CoreML.Specification.AdamOptimizer.eps', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33447, + serialized_end=33744, +) + +_NEURALNETWORK.fields_by_name['layers'].message_type = _NEURALNETWORKLAYER +_NEURALNETWORK.fields_by_name['preprocessing'].message_type = _NEURALNETWORKPREPROCESSING +_NEURALNETWORK.fields_by_name['arrayInputShapeMapping'].enum_type = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +_NEURALNETWORK.fields_by_name['imageInputShapeMapping'].enum_type = _NEURALNETWORKIMAGESHAPEMAPPING +_NEURALNETWORK.fields_by_name['updateParams'].message_type = _NETWORKUPDATEPARAMETERS +_NEURALNETWORKPREPROCESSING.fields_by_name['scaler'].message_type = _NEURALNETWORKIMAGESCALER +_NEURALNETWORKPREPROCESSING.fields_by_name['meanImage'].message_type = _NEURALNETWORKMEANIMAGE +_NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'].fields.append( + _NEURALNETWORKPREPROCESSING.fields_by_name['scaler']) +_NEURALNETWORKPREPROCESSING.fields_by_name['scaler'].containing_oneof = _NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'] +_NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'].fields.append( + _NEURALNETWORKPREPROCESSING.fields_by_name['meanImage']) +_NEURALNETWORKPREPROCESSING.fields_by_name['meanImage'].containing_oneof = _NEURALNETWORKPREPROCESSING.oneofs_by_name['preprocessor'] +_ACTIVATIONPRELU.fields_by_name['alpha'].message_type = _WEIGHTPARAMS +_ACTIVATIONPARAMETRICSOFTPLUS.fields_by_name['alpha'].message_type = _WEIGHTPARAMS +_ACTIVATIONPARAMETRICSOFTPLUS.fields_by_name['beta'].message_type = _WEIGHTPARAMS +_ACTIVATIONPARAMS.fields_by_name['linear'].message_type = _ACTIVATIONLINEAR +_ACTIVATIONPARAMS.fields_by_name['ReLU'].message_type = _ACTIVATIONRELU +_ACTIVATIONPARAMS.fields_by_name['leakyReLU'].message_type = _ACTIVATIONLEAKYRELU +_ACTIVATIONPARAMS.fields_by_name['thresholdedReLU'].message_type = _ACTIVATIONTHRESHOLDEDRELU +_ACTIVATIONPARAMS.fields_by_name['PReLU'].message_type = _ACTIVATIONPRELU +_ACTIVATIONPARAMS.fields_by_name['tanh'].message_type = _ACTIVATIONTANH +_ACTIVATIONPARAMS.fields_by_name['scaledTanh'].message_type = _ACTIVATIONSCALEDTANH +_ACTIVATIONPARAMS.fields_by_name['sigmoid'].message_type = _ACTIVATIONSIGMOID +_ACTIVATIONPARAMS.fields_by_name['sigmoidHard'].message_type = _ACTIVATIONSIGMOIDHARD +_ACTIVATIONPARAMS.fields_by_name['ELU'].message_type = _ACTIVATIONELU +_ACTIVATIONPARAMS.fields_by_name['softsign'].message_type = _ACTIVATIONSOFTSIGN +_ACTIVATIONPARAMS.fields_by_name['softplus'].message_type = _ACTIVATIONSOFTPLUS +_ACTIVATIONPARAMS.fields_by_name['parametricSoftplus'].message_type = _ACTIVATIONPARAMETRICSOFTPLUS +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['linear']) +_ACTIVATIONPARAMS.fields_by_name['linear'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['ReLU']) +_ACTIVATIONPARAMS.fields_by_name['ReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['leakyReLU']) +_ACTIVATIONPARAMS.fields_by_name['leakyReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['thresholdedReLU']) +_ACTIVATIONPARAMS.fields_by_name['thresholdedReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['PReLU']) +_ACTIVATIONPARAMS.fields_by_name['PReLU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['tanh']) +_ACTIVATIONPARAMS.fields_by_name['tanh'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['scaledTanh']) +_ACTIVATIONPARAMS.fields_by_name['scaledTanh'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['sigmoid']) +_ACTIVATIONPARAMS.fields_by_name['sigmoid'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['sigmoidHard']) +_ACTIVATIONPARAMS.fields_by_name['sigmoidHard'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['ELU']) +_ACTIVATIONPARAMS.fields_by_name['ELU'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['softsign']) +_ACTIVATIONPARAMS.fields_by_name['softsign'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['softplus']) +_ACTIVATIONPARAMS.fields_by_name['softplus'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'].fields.append( + _ACTIVATIONPARAMS.fields_by_name['parametricSoftplus']) +_ACTIVATIONPARAMS.fields_by_name['parametricSoftplus'].containing_oneof = _ACTIVATIONPARAMS.oneofs_by_name['NonlinearityType'] +_NEURALNETWORKLAYER.fields_by_name['inputTensor'].message_type = _TENSOR +_NEURALNETWORKLAYER.fields_by_name['outputTensor'].message_type = _TENSOR +_NEURALNETWORKLAYER.fields_by_name['convolution'].message_type = _CONVOLUTIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['pooling'].message_type = _POOLINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['activation'].message_type = _ACTIVATIONPARAMS +_NEURALNETWORKLAYER.fields_by_name['innerProduct'].message_type = _INNERPRODUCTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['embedding'].message_type = _EMBEDDINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['batchnorm'].message_type = _BATCHNORMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['mvn'].message_type = _MEANVARIANCENORMALIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['l2normalize'].message_type = _L2NORMALIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['softmax'].message_type = _SOFTMAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lrn'].message_type = _LRNLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['crop'].message_type = _CROPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['padding'].message_type = _PADDINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['upsample'].message_type = _UPSAMPLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['resizeBilinear'].message_type = _RESIZEBILINEARLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cropResize'].message_type = _CROPRESIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['unary'].message_type = _UNARYFUNCTIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['add'].message_type = _ADDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['multiply'].message_type = _MULTIPLYLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['average'].message_type = _AVERAGELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scale'].message_type = _SCALELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['bias'].message_type = _BIASLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['max'].message_type = _MAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['min'].message_type = _MINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['dot'].message_type = _DOTPRODUCTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduce'].message_type = _REDUCELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loadConstant'].message_type = _LOADCONSTANTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshape'].message_type = _RESHAPELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['flatten'].message_type = _FLATTENLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['permute'].message_type = _PERMUTELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['concat'].message_type = _CONCATLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['split'].message_type = _SPLITLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sequenceRepeat'].message_type = _SEQUENCEREPEATLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reorganizeData'].message_type = _REORGANIZEDATALAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['slice'].message_type = _SLICELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['simpleRecurrent'].message_type = _SIMPLERECURRENTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gru'].message_type = _GRULAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['uniDirectionalLSTM'].message_type = _UNIDIRECTIONALLSTMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['biDirectionalLSTM'].message_type = _BIDIRECTIONALLSTMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['custom'].message_type = _CUSTOMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['copy'].message_type = _COPYLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['branch'].message_type = _BRANCHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loop'].message_type = _LOOPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loopBreak'].message_type = _LOOPBREAKLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loopContinue'].message_type = _LOOPCONTINUELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['rangeStatic'].message_type = _RANGESTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['rangeDynamic'].message_type = _RANGEDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['clip'].message_type = _CLIPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['ceil'].message_type = _CEILLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['floor'].message_type = _FLOORLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sign'].message_type = _SIGNLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['round'].message_type = _ROUNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['exp2'].message_type = _EXP2LAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sin'].message_type = _SINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cos'].message_type = _COSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['tan'].message_type = _TANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['asin'].message_type = _ASINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['acos'].message_type = _ACOSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['atan'].message_type = _ATANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sinh'].message_type = _SINHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cosh'].message_type = _COSHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['tanh'].message_type = _TANHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['asinh'].message_type = _ASINHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['acosh'].message_type = _ACOSHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['atanh'].message_type = _ATANHLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['erf'].message_type = _ERFLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gelu'].message_type = _GELULAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['equal'].message_type = _EQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['notEqual'].message_type = _NOTEQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lessThan'].message_type = _LESSTHANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lessEqual'].message_type = _LESSEQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['greaterThan'].message_type = _GREATERTHANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['greaterEqual'].message_type = _GREATEREQUALLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalOr'].message_type = _LOGICALORLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalXor'].message_type = _LOGICALXORLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalNot'].message_type = _LOGICALNOTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['logicalAnd'].message_type = _LOGICALANDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['modBroadcastable'].message_type = _MODBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['minBroadcastable'].message_type = _MINBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['maxBroadcastable'].message_type = _MAXBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['addBroadcastable'].message_type = _ADDBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['powBroadcastable'].message_type = _POWBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['divideBroadcastable'].message_type = _DIVIDEBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['floorDivBroadcastable'].message_type = _FLOORDIVBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['multiplyBroadcastable'].message_type = _MULTIPLYBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['subtractBroadcastable'].message_type = _SUBTRACTBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['tile'].message_type = _TILELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['stack'].message_type = _STACKLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gather'].message_type = _GATHERLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scatter'].message_type = _SCATTERLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gatherND'].message_type = _GATHERNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scatterND'].message_type = _SCATTERNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['softmaxND'].message_type = _SOFTMAXNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['gatherAlongAxis'].message_type = _GATHERALONGAXISLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['scatterAlongAxis'].message_type = _SCATTERALONGAXISLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reverse'].message_type = _REVERSELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reverseSeq'].message_type = _REVERSESEQLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['splitND'].message_type = _SPLITNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['concatND'].message_type = _CONCATNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['transpose'].message_type = _TRANSPOSELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sliceStatic'].message_type = _SLICESTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sliceDynamic'].message_type = _SLICEDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['slidingWindows'].message_type = _SLIDINGWINDOWSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['topK'].message_type = _TOPKLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['argMin'].message_type = _ARGMINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['argMax'].message_type = _ARGMAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['embeddingND'].message_type = _EMBEDDINGNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['batchedMatmul'].message_type = _BATCHEDMATMULLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['getShape'].message_type = _GETSHAPELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['loadConstantND'].message_type = _LOADCONSTANTNDLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['fillLike'].message_type = _FILLLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['fillStatic'].message_type = _FILLSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['fillDynamic'].message_type = _FILLDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['broadcastToLike'].message_type = _BROADCASTTOLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['broadcastToStatic'].message_type = _BROADCASTTOSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['broadcastToDynamic'].message_type = _BROADCASTTODYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['squeeze'].message_type = _SQUEEZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['expandDims'].message_type = _EXPANDDIMSLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['flattenTo2D'].message_type = _FLATTENTO2DLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshapeLike'].message_type = _RESHAPELIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshapeStatic'].message_type = _RESHAPESTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reshapeDynamic'].message_type = _RESHAPEDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['rankPreservingReshape'].message_type = _RANKPRESERVINGRESHAPELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['constantPad'].message_type = _CONSTANTPADDINGLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomNormalLike'].message_type = _RANDOMNORMALLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomNormalStatic'].message_type = _RANDOMNORMALSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomNormalDynamic'].message_type = _RANDOMNORMALDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomUniformLike'].message_type = _RANDOMUNIFORMLIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomUniformStatic'].message_type = _RANDOMUNIFORMSTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomUniformDynamic'].message_type = _RANDOMUNIFORMDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliLike'].message_type = _RANDOMBERNOULLILIKELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliStatic'].message_type = _RANDOMBERNOULLISTATICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliDynamic'].message_type = _RANDOMBERNOULLIDYNAMICLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['categoricalDistribution'].message_type = _CATEGORICALDISTRIBUTIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceL1'].message_type = _REDUCEL1LAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceL2'].message_type = _REDUCEL2LAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceMax'].message_type = _REDUCEMAXLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceMin'].message_type = _REDUCEMINLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceSum'].message_type = _REDUCESUMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceProd'].message_type = _REDUCEPRODLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceMean'].message_type = _REDUCEMEANLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceLogSum'].message_type = _REDUCELOGSUMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceSumSquare'].message_type = _REDUCESUMSQUARELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['reduceLogSumExp'].message_type = _REDUCELOGSUMEXPLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['whereNonZero'].message_type = _WHERENONZEROLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['matrixBandPart'].message_type = _MATRIXBANDPARTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['lowerTriangular'].message_type = _LOWERTRIANGULARLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['upperTriangular'].message_type = _UPPERTRIANGULARLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['whereBroadcastable'].message_type = _WHEREBROADCASTABLELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['layerNormalization'].message_type = _LAYERNORMALIZATIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['NonMaximumSuppression'].message_type = _NONMAXIMUMSUPPRESSIONLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['oneHot'].message_type = _ONEHOTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['cumSum'].message_type = _CUMSUMLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['clampedReLU'].message_type = _CLAMPEDRELULAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['argSort'].message_type = _ARGSORTLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['pooling3d'].message_type = _POOLING3DLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['globalPooling3d'].message_type = _GLOBALPOOLING3DLAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['sliceBySize'].message_type = _SLICEBYSIZELAYERPARAMS +_NEURALNETWORKLAYER.fields_by_name['convolution3d'].message_type = _CONVOLUTION3DLAYERPARAMS +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['convolution']) +_NEURALNETWORKLAYER.fields_by_name['convolution'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['pooling']) +_NEURALNETWORKLAYER.fields_by_name['pooling'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['activation']) +_NEURALNETWORKLAYER.fields_by_name['activation'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['innerProduct']) +_NEURALNETWORKLAYER.fields_by_name['innerProduct'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['embedding']) +_NEURALNETWORKLAYER.fields_by_name['embedding'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['batchnorm']) +_NEURALNETWORKLAYER.fields_by_name['batchnorm'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['mvn']) +_NEURALNETWORKLAYER.fields_by_name['mvn'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['l2normalize']) +_NEURALNETWORKLAYER.fields_by_name['l2normalize'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['softmax']) +_NEURALNETWORKLAYER.fields_by_name['softmax'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lrn']) +_NEURALNETWORKLAYER.fields_by_name['lrn'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['crop']) +_NEURALNETWORKLAYER.fields_by_name['crop'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['padding']) +_NEURALNETWORKLAYER.fields_by_name['padding'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['upsample']) +_NEURALNETWORKLAYER.fields_by_name['upsample'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['resizeBilinear']) +_NEURALNETWORKLAYER.fields_by_name['resizeBilinear'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cropResize']) +_NEURALNETWORKLAYER.fields_by_name['cropResize'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['unary']) +_NEURALNETWORKLAYER.fields_by_name['unary'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['add']) +_NEURALNETWORKLAYER.fields_by_name['add'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['multiply']) +_NEURALNETWORKLAYER.fields_by_name['multiply'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['average']) +_NEURALNETWORKLAYER.fields_by_name['average'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scale']) +_NEURALNETWORKLAYER.fields_by_name['scale'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['bias']) +_NEURALNETWORKLAYER.fields_by_name['bias'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['max']) +_NEURALNETWORKLAYER.fields_by_name['max'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['min']) +_NEURALNETWORKLAYER.fields_by_name['min'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['dot']) +_NEURALNETWORKLAYER.fields_by_name['dot'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduce']) +_NEURALNETWORKLAYER.fields_by_name['reduce'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loadConstant']) +_NEURALNETWORKLAYER.fields_by_name['loadConstant'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshape']) +_NEURALNETWORKLAYER.fields_by_name['reshape'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['flatten']) +_NEURALNETWORKLAYER.fields_by_name['flatten'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['permute']) +_NEURALNETWORKLAYER.fields_by_name['permute'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['concat']) +_NEURALNETWORKLAYER.fields_by_name['concat'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['split']) +_NEURALNETWORKLAYER.fields_by_name['split'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sequenceRepeat']) +_NEURALNETWORKLAYER.fields_by_name['sequenceRepeat'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reorganizeData']) +_NEURALNETWORKLAYER.fields_by_name['reorganizeData'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['slice']) +_NEURALNETWORKLAYER.fields_by_name['slice'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['simpleRecurrent']) +_NEURALNETWORKLAYER.fields_by_name['simpleRecurrent'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gru']) +_NEURALNETWORKLAYER.fields_by_name['gru'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['uniDirectionalLSTM']) +_NEURALNETWORKLAYER.fields_by_name['uniDirectionalLSTM'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['biDirectionalLSTM']) +_NEURALNETWORKLAYER.fields_by_name['biDirectionalLSTM'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['custom']) +_NEURALNETWORKLAYER.fields_by_name['custom'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['copy']) +_NEURALNETWORKLAYER.fields_by_name['copy'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['branch']) +_NEURALNETWORKLAYER.fields_by_name['branch'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loop']) +_NEURALNETWORKLAYER.fields_by_name['loop'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loopBreak']) +_NEURALNETWORKLAYER.fields_by_name['loopBreak'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loopContinue']) +_NEURALNETWORKLAYER.fields_by_name['loopContinue'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['rangeStatic']) +_NEURALNETWORKLAYER.fields_by_name['rangeStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['rangeDynamic']) +_NEURALNETWORKLAYER.fields_by_name['rangeDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['clip']) +_NEURALNETWORKLAYER.fields_by_name['clip'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['ceil']) +_NEURALNETWORKLAYER.fields_by_name['ceil'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['floor']) +_NEURALNETWORKLAYER.fields_by_name['floor'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sign']) +_NEURALNETWORKLAYER.fields_by_name['sign'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['round']) +_NEURALNETWORKLAYER.fields_by_name['round'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['exp2']) +_NEURALNETWORKLAYER.fields_by_name['exp2'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sin']) +_NEURALNETWORKLAYER.fields_by_name['sin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cos']) +_NEURALNETWORKLAYER.fields_by_name['cos'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['tan']) +_NEURALNETWORKLAYER.fields_by_name['tan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['asin']) +_NEURALNETWORKLAYER.fields_by_name['asin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['acos']) +_NEURALNETWORKLAYER.fields_by_name['acos'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['atan']) +_NEURALNETWORKLAYER.fields_by_name['atan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sinh']) +_NEURALNETWORKLAYER.fields_by_name['sinh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cosh']) +_NEURALNETWORKLAYER.fields_by_name['cosh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['tanh']) +_NEURALNETWORKLAYER.fields_by_name['tanh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['asinh']) +_NEURALNETWORKLAYER.fields_by_name['asinh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['acosh']) +_NEURALNETWORKLAYER.fields_by_name['acosh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['atanh']) +_NEURALNETWORKLAYER.fields_by_name['atanh'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['erf']) +_NEURALNETWORKLAYER.fields_by_name['erf'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gelu']) +_NEURALNETWORKLAYER.fields_by_name['gelu'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['equal']) +_NEURALNETWORKLAYER.fields_by_name['equal'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['notEqual']) +_NEURALNETWORKLAYER.fields_by_name['notEqual'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lessThan']) +_NEURALNETWORKLAYER.fields_by_name['lessThan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lessEqual']) +_NEURALNETWORKLAYER.fields_by_name['lessEqual'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['greaterThan']) +_NEURALNETWORKLAYER.fields_by_name['greaterThan'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['greaterEqual']) +_NEURALNETWORKLAYER.fields_by_name['greaterEqual'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalOr']) +_NEURALNETWORKLAYER.fields_by_name['logicalOr'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalXor']) +_NEURALNETWORKLAYER.fields_by_name['logicalXor'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalNot']) +_NEURALNETWORKLAYER.fields_by_name['logicalNot'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['logicalAnd']) +_NEURALNETWORKLAYER.fields_by_name['logicalAnd'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['modBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['modBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['minBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['minBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['maxBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['maxBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['addBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['addBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['powBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['powBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['divideBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['divideBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['floorDivBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['floorDivBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['multiplyBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['multiplyBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['subtractBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['subtractBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['tile']) +_NEURALNETWORKLAYER.fields_by_name['tile'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['stack']) +_NEURALNETWORKLAYER.fields_by_name['stack'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gather']) +_NEURALNETWORKLAYER.fields_by_name['gather'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scatter']) +_NEURALNETWORKLAYER.fields_by_name['scatter'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gatherND']) +_NEURALNETWORKLAYER.fields_by_name['gatherND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scatterND']) +_NEURALNETWORKLAYER.fields_by_name['scatterND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['softmaxND']) +_NEURALNETWORKLAYER.fields_by_name['softmaxND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['gatherAlongAxis']) +_NEURALNETWORKLAYER.fields_by_name['gatherAlongAxis'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['scatterAlongAxis']) +_NEURALNETWORKLAYER.fields_by_name['scatterAlongAxis'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reverse']) +_NEURALNETWORKLAYER.fields_by_name['reverse'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reverseSeq']) +_NEURALNETWORKLAYER.fields_by_name['reverseSeq'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['splitND']) +_NEURALNETWORKLAYER.fields_by_name['splitND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['concatND']) +_NEURALNETWORKLAYER.fields_by_name['concatND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['transpose']) +_NEURALNETWORKLAYER.fields_by_name['transpose'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sliceStatic']) +_NEURALNETWORKLAYER.fields_by_name['sliceStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sliceDynamic']) +_NEURALNETWORKLAYER.fields_by_name['sliceDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['slidingWindows']) +_NEURALNETWORKLAYER.fields_by_name['slidingWindows'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['topK']) +_NEURALNETWORKLAYER.fields_by_name['topK'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['argMin']) +_NEURALNETWORKLAYER.fields_by_name['argMin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['argMax']) +_NEURALNETWORKLAYER.fields_by_name['argMax'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['embeddingND']) +_NEURALNETWORKLAYER.fields_by_name['embeddingND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['batchedMatmul']) +_NEURALNETWORKLAYER.fields_by_name['batchedMatmul'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['getShape']) +_NEURALNETWORKLAYER.fields_by_name['getShape'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['loadConstantND']) +_NEURALNETWORKLAYER.fields_by_name['loadConstantND'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['fillLike']) +_NEURALNETWORKLAYER.fields_by_name['fillLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['fillStatic']) +_NEURALNETWORKLAYER.fields_by_name['fillStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['fillDynamic']) +_NEURALNETWORKLAYER.fields_by_name['fillDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['broadcastToLike']) +_NEURALNETWORKLAYER.fields_by_name['broadcastToLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['broadcastToStatic']) +_NEURALNETWORKLAYER.fields_by_name['broadcastToStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['broadcastToDynamic']) +_NEURALNETWORKLAYER.fields_by_name['broadcastToDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['squeeze']) +_NEURALNETWORKLAYER.fields_by_name['squeeze'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['expandDims']) +_NEURALNETWORKLAYER.fields_by_name['expandDims'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['flattenTo2D']) +_NEURALNETWORKLAYER.fields_by_name['flattenTo2D'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshapeLike']) +_NEURALNETWORKLAYER.fields_by_name['reshapeLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshapeStatic']) +_NEURALNETWORKLAYER.fields_by_name['reshapeStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reshapeDynamic']) +_NEURALNETWORKLAYER.fields_by_name['reshapeDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['rankPreservingReshape']) +_NEURALNETWORKLAYER.fields_by_name['rankPreservingReshape'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['constantPad']) +_NEURALNETWORKLAYER.fields_by_name['constantPad'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomNormalLike']) +_NEURALNETWORKLAYER.fields_by_name['randomNormalLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomNormalStatic']) +_NEURALNETWORKLAYER.fields_by_name['randomNormalStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomNormalDynamic']) +_NEURALNETWORKLAYER.fields_by_name['randomNormalDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomUniformLike']) +_NEURALNETWORKLAYER.fields_by_name['randomUniformLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomUniformStatic']) +_NEURALNETWORKLAYER.fields_by_name['randomUniformStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomUniformDynamic']) +_NEURALNETWORKLAYER.fields_by_name['randomUniformDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomBernoulliLike']) +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliLike'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomBernoulliStatic']) +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliStatic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['randomBernoulliDynamic']) +_NEURALNETWORKLAYER.fields_by_name['randomBernoulliDynamic'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['categoricalDistribution']) +_NEURALNETWORKLAYER.fields_by_name['categoricalDistribution'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceL1']) +_NEURALNETWORKLAYER.fields_by_name['reduceL1'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceL2']) +_NEURALNETWORKLAYER.fields_by_name['reduceL2'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceMax']) +_NEURALNETWORKLAYER.fields_by_name['reduceMax'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceMin']) +_NEURALNETWORKLAYER.fields_by_name['reduceMin'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceSum']) +_NEURALNETWORKLAYER.fields_by_name['reduceSum'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceProd']) +_NEURALNETWORKLAYER.fields_by_name['reduceProd'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceMean']) +_NEURALNETWORKLAYER.fields_by_name['reduceMean'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceLogSum']) +_NEURALNETWORKLAYER.fields_by_name['reduceLogSum'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceSumSquare']) +_NEURALNETWORKLAYER.fields_by_name['reduceSumSquare'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['reduceLogSumExp']) +_NEURALNETWORKLAYER.fields_by_name['reduceLogSumExp'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['whereNonZero']) +_NEURALNETWORKLAYER.fields_by_name['whereNonZero'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['matrixBandPart']) +_NEURALNETWORKLAYER.fields_by_name['matrixBandPart'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['lowerTriangular']) +_NEURALNETWORKLAYER.fields_by_name['lowerTriangular'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['upperTriangular']) +_NEURALNETWORKLAYER.fields_by_name['upperTriangular'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['whereBroadcastable']) +_NEURALNETWORKLAYER.fields_by_name['whereBroadcastable'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['layerNormalization']) +_NEURALNETWORKLAYER.fields_by_name['layerNormalization'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['NonMaximumSuppression']) +_NEURALNETWORKLAYER.fields_by_name['NonMaximumSuppression'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['oneHot']) +_NEURALNETWORKLAYER.fields_by_name['oneHot'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['cumSum']) +_NEURALNETWORKLAYER.fields_by_name['cumSum'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['clampedReLU']) +_NEURALNETWORKLAYER.fields_by_name['clampedReLU'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['argSort']) +_NEURALNETWORKLAYER.fields_by_name['argSort'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['pooling3d']) +_NEURALNETWORKLAYER.fields_by_name['pooling3d'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['globalPooling3d']) +_NEURALNETWORKLAYER.fields_by_name['globalPooling3d'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['sliceBySize']) +_NEURALNETWORKLAYER.fields_by_name['sliceBySize'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_NEURALNETWORKLAYER.oneofs_by_name['layer'].fields.append( + _NEURALNETWORKLAYER.fields_by_name['convolution3d']) +_NEURALNETWORKLAYER.fields_by_name['convolution3d'].containing_oneof = _NEURALNETWORKLAYER.oneofs_by_name['layer'] +_BRANCHLAYERPARAMS.fields_by_name['ifBranch'].message_type = _NEURALNETWORK +_BRANCHLAYERPARAMS.fields_by_name['elseBranch'].message_type = _NEURALNETWORK +_LOOPLAYERPARAMS.fields_by_name['conditionNetwork'].message_type = _NEURALNETWORK +_LOOPLAYERPARAMS.fields_by_name['bodyNetwork'].message_type = _NEURALNETWORK +_BORDERAMOUNTS_EDGESIZES.containing_type = _BORDERAMOUNTS +_BORDERAMOUNTS.fields_by_name['borderAmounts'].message_type = _BORDERAMOUNTS_EDGESIZES +_VALIDPADDING.fields_by_name['paddingAmounts'].message_type = _BORDERAMOUNTS +_SAMEPADDING.fields_by_name['asymmetryMode'].enum_type = _SAMEPADDING_SAMEPADDINGMODE +_SAMEPADDING_SAMEPADDINGMODE.containing_type = _SAMEPADDING +_SAMPLINGMODE.fields_by_name['samplingMethod'].enum_type = _SAMPLINGMODE_METHOD +_SAMPLINGMODE_METHOD.containing_type = _SAMPLINGMODE +_BOXCOORDINATESMODE.fields_by_name['boxMode'].enum_type = _BOXCOORDINATESMODE_COORDINATES +_BOXCOORDINATESMODE_COORDINATES.containing_type = _BOXCOORDINATESMODE +_WEIGHTPARAMS.fields_by_name['quantization'].message_type = _QUANTIZATIONPARAMS +_QUANTIZATIONPARAMS.fields_by_name['linearQuantization'].message_type = _LINEARQUANTIZATIONPARAMS +_QUANTIZATIONPARAMS.fields_by_name['lookupTableQuantization'].message_type = _LOOKUPTABLEQUANTIZATIONPARAMS +_QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'].fields.append( + _QUANTIZATIONPARAMS.fields_by_name['linearQuantization']) +_QUANTIZATIONPARAMS.fields_by_name['linearQuantization'].containing_oneof = _QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'] +_QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'].fields.append( + _QUANTIZATIONPARAMS.fields_by_name['lookupTableQuantization']) +_QUANTIZATIONPARAMS.fields_by_name['lookupTableQuantization'].containing_oneof = _QUANTIZATIONPARAMS.oneofs_by_name['QuantizationType'] +_CONVOLUTIONLAYERPARAMS.fields_by_name['valid'].message_type = _VALIDPADDING +_CONVOLUTIONLAYERPARAMS.fields_by_name['same'].message_type = _SAMEPADDING +_CONVOLUTIONLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_CONVOLUTIONLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'].fields.append( + _CONVOLUTIONLAYERPARAMS.fields_by_name['valid']) +_CONVOLUTIONLAYERPARAMS.fields_by_name['valid'].containing_oneof = _CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'] +_CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'].fields.append( + _CONVOLUTIONLAYERPARAMS.fields_by_name['same']) +_CONVOLUTIONLAYERPARAMS.fields_by_name['same'].containing_oneof = _CONVOLUTIONLAYERPARAMS.oneofs_by_name['ConvolutionPaddingType'] +_CONVOLUTION3DLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_CONVOLUTION3DLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_CONVOLUTION3DLAYERPARAMS.fields_by_name['paddingType'].enum_type = _CONVOLUTION3DLAYERPARAMS_PADDINGTYPE +_CONVOLUTION3DLAYERPARAMS_PADDINGTYPE.containing_type = _CONVOLUTION3DLAYERPARAMS +_INNERPRODUCTLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_INNERPRODUCTLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_EMBEDDINGLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_EMBEDDINGLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_EMBEDDINGNDLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_EMBEDDINGNDLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['gamma'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['beta'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['mean'].message_type = _WEIGHTPARAMS +_BATCHNORMLAYERPARAMS.fields_by_name['variance'].message_type = _WEIGHTPARAMS +_POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING.containing_type = _POOLINGLAYERPARAMS +_POOLINGLAYERPARAMS.fields_by_name['type'].enum_type = _POOLINGLAYERPARAMS_POOLINGTYPE +_POOLINGLAYERPARAMS.fields_by_name['valid'].message_type = _VALIDPADDING +_POOLINGLAYERPARAMS.fields_by_name['same'].message_type = _SAMEPADDING +_POOLINGLAYERPARAMS.fields_by_name['includeLastPixel'].message_type = _POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING +_POOLINGLAYERPARAMS_POOLINGTYPE.containing_type = _POOLINGLAYERPARAMS +_POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'].fields.append( + _POOLINGLAYERPARAMS.fields_by_name['valid']) +_POOLINGLAYERPARAMS.fields_by_name['valid'].containing_oneof = _POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'] +_POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'].fields.append( + _POOLINGLAYERPARAMS.fields_by_name['same']) +_POOLINGLAYERPARAMS.fields_by_name['same'].containing_oneof = _POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'] +_POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'].fields.append( + _POOLINGLAYERPARAMS.fields_by_name['includeLastPixel']) +_POOLINGLAYERPARAMS.fields_by_name['includeLastPixel'].containing_oneof = _POOLINGLAYERPARAMS.oneofs_by_name['PoolingPaddingType'] +_POOLING3DLAYERPARAMS.fields_by_name['type'].enum_type = _POOLING3DLAYERPARAMS_POOLINGTYPE3D +_POOLING3DLAYERPARAMS.fields_by_name['paddingType'].enum_type = _POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE +_POOLING3DLAYERPARAMS_POOLINGTYPE3D.containing_type = _POOLING3DLAYERPARAMS +_POOLING3DLAYERPARAMS_POOLING3DPADDINGTYPE.containing_type = _POOLING3DLAYERPARAMS +_GLOBALPOOLING3DLAYERPARAMS.fields_by_name['type'].enum_type = _GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D +_GLOBALPOOLING3DLAYERPARAMS_GLOBALPOOLINGTYPE3D.containing_type = _GLOBALPOOLING3DLAYERPARAMS +_PADDINGLAYERPARAMS_PADDINGCONSTANT.containing_type = _PADDINGLAYERPARAMS +_PADDINGLAYERPARAMS_PADDINGREFLECTION.containing_type = _PADDINGLAYERPARAMS +_PADDINGLAYERPARAMS_PADDINGREPLICATION.containing_type = _PADDINGLAYERPARAMS +_PADDINGLAYERPARAMS.fields_by_name['constant'].message_type = _PADDINGLAYERPARAMS_PADDINGCONSTANT +_PADDINGLAYERPARAMS.fields_by_name['reflection'].message_type = _PADDINGLAYERPARAMS_PADDINGREFLECTION +_PADDINGLAYERPARAMS.fields_by_name['replication'].message_type = _PADDINGLAYERPARAMS_PADDINGREPLICATION +_PADDINGLAYERPARAMS.fields_by_name['paddingAmounts'].message_type = _BORDERAMOUNTS +_PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'].fields.append( + _PADDINGLAYERPARAMS.fields_by_name['constant']) +_PADDINGLAYERPARAMS.fields_by_name['constant'].containing_oneof = _PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'] +_PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'].fields.append( + _PADDINGLAYERPARAMS.fields_by_name['reflection']) +_PADDINGLAYERPARAMS.fields_by_name['reflection'].containing_oneof = _PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'] +_PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'].fields.append( + _PADDINGLAYERPARAMS.fields_by_name['replication']) +_PADDINGLAYERPARAMS.fields_by_name['replication'].containing_oneof = _PADDINGLAYERPARAMS.oneofs_by_name['PaddingType'] +_UNARYFUNCTIONLAYERPARAMS.fields_by_name['type'].enum_type = _UNARYFUNCTIONLAYERPARAMS_OPERATION +_UNARYFUNCTIONLAYERPARAMS_OPERATION.containing_type = _UNARYFUNCTIONLAYERPARAMS +_UPSAMPLELAYERPARAMS.fields_by_name['mode'].enum_type = _UPSAMPLELAYERPARAMS_INTERPOLATIONMODE +_UPSAMPLELAYERPARAMS.fields_by_name['linearUpsampleMode'].enum_type = _UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE +_UPSAMPLELAYERPARAMS_INTERPOLATIONMODE.containing_type = _UPSAMPLELAYERPARAMS +_UPSAMPLELAYERPARAMS_LINEARUPSAMPLEMODE.containing_type = _UPSAMPLELAYERPARAMS +_RESIZEBILINEARLAYERPARAMS.fields_by_name['mode'].message_type = _SAMPLINGMODE +_CROPRESIZELAYERPARAMS.fields_by_name['mode'].message_type = _SAMPLINGMODE +_CROPRESIZELAYERPARAMS.fields_by_name['boxIndicesMode'].message_type = _BOXCOORDINATESMODE +_BIASLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_SCALELAYERPARAMS.fields_by_name['scale'].message_type = _WEIGHTPARAMS +_SCALELAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_LOADCONSTANTLAYERPARAMS.fields_by_name['data'].message_type = _WEIGHTPARAMS +_FLATTENLAYERPARAMS.fields_by_name['mode'].enum_type = _FLATTENLAYERPARAMS_FLATTENORDER +_FLATTENLAYERPARAMS_FLATTENORDER.containing_type = _FLATTENLAYERPARAMS +_RESHAPELAYERPARAMS.fields_by_name['mode'].enum_type = _RESHAPELAYERPARAMS_RESHAPEORDER +_RESHAPELAYERPARAMS_RESHAPEORDER.containing_type = _RESHAPELAYERPARAMS +_REORGANIZEDATALAYERPARAMS.fields_by_name['mode'].enum_type = _REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE +_REORGANIZEDATALAYERPARAMS_REORGANIZATIONTYPE.containing_type = _REORGANIZEDATALAYERPARAMS +_SLICELAYERPARAMS.fields_by_name['axis'].enum_type = _SLICELAYERPARAMS_SLICEAXIS +_SLICELAYERPARAMS_SLICEAXIS.containing_type = _SLICELAYERPARAMS +_REDUCELAYERPARAMS.fields_by_name['mode'].enum_type = _REDUCELAYERPARAMS_REDUCEOPERATION +_REDUCELAYERPARAMS.fields_by_name['axis'].enum_type = _REDUCELAYERPARAMS_REDUCEAXIS +_REDUCELAYERPARAMS_REDUCEOPERATION.containing_type = _REDUCELAYERPARAMS +_REDUCELAYERPARAMS_REDUCEAXIS.containing_type = _REDUCELAYERPARAMS +_CROPLAYERPARAMS.fields_by_name['cropAmounts'].message_type = _BORDERAMOUNTS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['activation'].message_type = _ACTIVATIONPARAMS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['weightMatrix'].message_type = _WEIGHTPARAMS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['recursionMatrix'].message_type = _WEIGHTPARAMS +_SIMPLERECURRENTLAYERPARAMS.fields_by_name['biasVector'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['activations'].message_type = _ACTIVATIONPARAMS +_GRULAYERPARAMS.fields_by_name['updateGateWeightMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['resetGateWeightMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['outputGateWeightMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['updateGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['resetGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['outputGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['updateGateBiasVector'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['resetGateBiasVector'].message_type = _WEIGHTPARAMS +_GRULAYERPARAMS.fields_by_name['outputGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGateWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGateWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['blockInputWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGateWeightMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['blockInputRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGateRecursionMatrix'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['blockInputBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGateBiasVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['inputGatePeepholeVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['forgetGatePeepholeVector'].message_type = _WEIGHTPARAMS +_LSTMWEIGHTPARAMS.fields_by_name['outputGatePeepholeVector'].message_type = _WEIGHTPARAMS +_UNIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['activations'].message_type = _ACTIVATIONPARAMS +_UNIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['params'].message_type = _LSTMPARAMS +_UNIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['weightParams'].message_type = _LSTMWEIGHTPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['activationsForwardLSTM'].message_type = _ACTIVATIONPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['activationsBackwardLSTM'].message_type = _ACTIVATIONPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['params'].message_type = _LSTMPARAMS +_BIDIRECTIONALLSTMLAYERPARAMS.fields_by_name['weightParams'].message_type = _LSTMWEIGHTPARAMS +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.containing_type = _CUSTOMLAYERPARAMS +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['doubleValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['doubleValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['stringValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['stringValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['intValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['intValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['longValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['longValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'].fields.append( + _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['boolValue']) +_CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.fields_by_name['boolValue'].containing_oneof = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE.oneofs_by_name['value'] +_CUSTOMLAYERPARAMS_PARAMETERSENTRY.fields_by_name['value'].message_type = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE +_CUSTOMLAYERPARAMS_PARAMETERSENTRY.containing_type = _CUSTOMLAYERPARAMS +_CUSTOMLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_CUSTOMLAYERPARAMS.fields_by_name['parameters'].message_type = _CUSTOMLAYERPARAMS_PARAMETERSENTRY +_BATCHEDMATMULLAYERPARAMS.fields_by_name['weights'].message_type = _WEIGHTPARAMS +_BATCHEDMATMULLAYERPARAMS.fields_by_name['bias'].message_type = _WEIGHTPARAMS +_LOADCONSTANTNDLAYERPARAMS.fields_by_name['data'].message_type = _WEIGHTPARAMS +_SCATTERLAYERPARAMS.fields_by_name['mode'].enum_type = _SCATTERMODE +_SCATTERNDLAYERPARAMS.fields_by_name['mode'].enum_type = _SCATTERMODE +_SCATTERALONGAXISLAYERPARAMS.fields_by_name['mode'].enum_type = _SCATTERMODE +_GELULAYERPARAMS.fields_by_name['mode'].enum_type = _GELULAYERPARAMS_GELUMODE +_GELULAYERPARAMS_GELUMODE.containing_type = _GELULAYERPARAMS +_LAYERNORMALIZATIONLAYERPARAMS.fields_by_name['gamma'].message_type = _WEIGHTPARAMS +_LAYERNORMALIZATIONLAYERPARAMS.fields_by_name['beta'].message_type = _WEIGHTPARAMS +_NEURALNETWORKCLASSIFIER.fields_by_name['layers'].message_type = _NEURALNETWORKLAYER +_NEURALNETWORKCLASSIFIER.fields_by_name['preprocessing'].message_type = _NEURALNETWORKPREPROCESSING +_NEURALNETWORKCLASSIFIER.fields_by_name['arrayInputShapeMapping'].enum_type = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +_NEURALNETWORKCLASSIFIER.fields_by_name['imageInputShapeMapping'].enum_type = _NEURALNETWORKIMAGESHAPEMAPPING +_NEURALNETWORKCLASSIFIER.fields_by_name['updateParams'].message_type = _NETWORKUPDATEPARAMETERS +_NEURALNETWORKCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_NEURALNETWORKCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _NEURALNETWORKCLASSIFIER.fields_by_name['stringClassLabels']) +_NEURALNETWORKCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'] +_NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _NEURALNETWORKCLASSIFIER.fields_by_name['int64ClassLabels']) +_NEURALNETWORKCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _NEURALNETWORKCLASSIFIER.oneofs_by_name['ClassLabels'] +_NEURALNETWORKREGRESSOR.fields_by_name['layers'].message_type = _NEURALNETWORKLAYER +_NEURALNETWORKREGRESSOR.fields_by_name['preprocessing'].message_type = _NEURALNETWORKPREPROCESSING +_NEURALNETWORKREGRESSOR.fields_by_name['arrayInputShapeMapping'].enum_type = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +_NEURALNETWORKREGRESSOR.fields_by_name['imageInputShapeMapping'].enum_type = _NEURALNETWORKIMAGESHAPEMAPPING +_NEURALNETWORKREGRESSOR.fields_by_name['updateParams'].message_type = _NETWORKUPDATEPARAMETERS +_NETWORKUPDATEPARAMETERS.fields_by_name['lossLayers'].message_type = _LOSSLAYER +_NETWORKUPDATEPARAMETERS.fields_by_name['optimizer'].message_type = _OPTIMIZER +_NETWORKUPDATEPARAMETERS.fields_by_name['epochs'].message_type = Parameters__pb2._INT64PARAMETER +_NETWORKUPDATEPARAMETERS.fields_by_name['shuffle'].message_type = Parameters__pb2._BOOLPARAMETER +_NETWORKUPDATEPARAMETERS.fields_by_name['seed'].message_type = Parameters__pb2._INT64PARAMETER +_LOSSLAYER.fields_by_name['categoricalCrossEntropyLossLayer'].message_type = _CATEGORICALCROSSENTROPYLOSSLAYER +_LOSSLAYER.fields_by_name['meanSquaredErrorLossLayer'].message_type = _MEANSQUAREDERRORLOSSLAYER +_LOSSLAYER.oneofs_by_name['LossLayerType'].fields.append( + _LOSSLAYER.fields_by_name['categoricalCrossEntropyLossLayer']) +_LOSSLAYER.fields_by_name['categoricalCrossEntropyLossLayer'].containing_oneof = _LOSSLAYER.oneofs_by_name['LossLayerType'] +_LOSSLAYER.oneofs_by_name['LossLayerType'].fields.append( + _LOSSLAYER.fields_by_name['meanSquaredErrorLossLayer']) +_LOSSLAYER.fields_by_name['meanSquaredErrorLossLayer'].containing_oneof = _LOSSLAYER.oneofs_by_name['LossLayerType'] +_OPTIMIZER.fields_by_name['sgdOptimizer'].message_type = _SGDOPTIMIZER +_OPTIMIZER.fields_by_name['adamOptimizer'].message_type = _ADAMOPTIMIZER +_OPTIMIZER.oneofs_by_name['OptimizerType'].fields.append( + _OPTIMIZER.fields_by_name['sgdOptimizer']) +_OPTIMIZER.fields_by_name['sgdOptimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['OptimizerType'] +_OPTIMIZER.oneofs_by_name['OptimizerType'].fields.append( + _OPTIMIZER.fields_by_name['adamOptimizer']) +_OPTIMIZER.fields_by_name['adamOptimizer'].containing_oneof = _OPTIMIZER.oneofs_by_name['OptimizerType'] +_SGDOPTIMIZER.fields_by_name['learningRate'].message_type = Parameters__pb2._DOUBLEPARAMETER +_SGDOPTIMIZER.fields_by_name['miniBatchSize'].message_type = Parameters__pb2._INT64PARAMETER +_SGDOPTIMIZER.fields_by_name['momentum'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['learningRate'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['miniBatchSize'].message_type = Parameters__pb2._INT64PARAMETER +_ADAMOPTIMIZER.fields_by_name['beta1'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['beta2'].message_type = Parameters__pb2._DOUBLEPARAMETER +_ADAMOPTIMIZER.fields_by_name['eps'].message_type = Parameters__pb2._DOUBLEPARAMETER +DESCRIPTOR.message_types_by_name['NeuralNetwork'] = _NEURALNETWORK +DESCRIPTOR.message_types_by_name['NeuralNetworkImageScaler'] = _NEURALNETWORKIMAGESCALER +DESCRIPTOR.message_types_by_name['NeuralNetworkMeanImage'] = _NEURALNETWORKMEANIMAGE +DESCRIPTOR.message_types_by_name['NeuralNetworkPreprocessing'] = _NEURALNETWORKPREPROCESSING +DESCRIPTOR.message_types_by_name['ActivationReLU'] = _ACTIVATIONRELU +DESCRIPTOR.message_types_by_name['ActivationLeakyReLU'] = _ACTIVATIONLEAKYRELU +DESCRIPTOR.message_types_by_name['ActivationTanh'] = _ACTIVATIONTANH +DESCRIPTOR.message_types_by_name['ActivationScaledTanh'] = _ACTIVATIONSCALEDTANH +DESCRIPTOR.message_types_by_name['ActivationSigmoid'] = _ACTIVATIONSIGMOID +DESCRIPTOR.message_types_by_name['ActivationLinear'] = _ACTIVATIONLINEAR +DESCRIPTOR.message_types_by_name['ActivationSigmoidHard'] = _ACTIVATIONSIGMOIDHARD +DESCRIPTOR.message_types_by_name['ActivationPReLU'] = _ACTIVATIONPRELU +DESCRIPTOR.message_types_by_name['ActivationELU'] = _ACTIVATIONELU +DESCRIPTOR.message_types_by_name['ActivationThresholdedReLU'] = _ACTIVATIONTHRESHOLDEDRELU +DESCRIPTOR.message_types_by_name['ActivationSoftsign'] = _ACTIVATIONSOFTSIGN +DESCRIPTOR.message_types_by_name['ActivationSoftplus'] = _ACTIVATIONSOFTPLUS +DESCRIPTOR.message_types_by_name['ActivationParametricSoftplus'] = _ACTIVATIONPARAMETRICSOFTPLUS +DESCRIPTOR.message_types_by_name['ActivationParams'] = _ACTIVATIONPARAMS +DESCRIPTOR.message_types_by_name['Tensor'] = _TENSOR +DESCRIPTOR.message_types_by_name['NeuralNetworkLayer'] = _NEURALNETWORKLAYER +DESCRIPTOR.message_types_by_name['BranchLayerParams'] = _BRANCHLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoopLayerParams'] = _LOOPLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoopBreakLayerParams'] = _LOOPBREAKLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoopContinueLayerParams'] = _LOOPCONTINUELAYERPARAMS +DESCRIPTOR.message_types_by_name['CopyLayerParams'] = _COPYLAYERPARAMS +DESCRIPTOR.message_types_by_name['GreaterThanLayerParams'] = _GREATERTHANLAYERPARAMS +DESCRIPTOR.message_types_by_name['GreaterEqualLayerParams'] = _GREATEREQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['LessThanLayerParams'] = _LESSTHANLAYERPARAMS +DESCRIPTOR.message_types_by_name['LessEqualLayerParams'] = _LESSEQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['EqualLayerParams'] = _EQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['NotEqualLayerParams'] = _NOTEQUALLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalAndLayerParams'] = _LOGICALANDLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalOrLayerParams'] = _LOGICALORLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalXorLayerParams'] = _LOGICALXORLAYERPARAMS +DESCRIPTOR.message_types_by_name['LogicalNotLayerParams'] = _LOGICALNOTLAYERPARAMS +DESCRIPTOR.message_types_by_name['BorderAmounts'] = _BORDERAMOUNTS +DESCRIPTOR.message_types_by_name['ValidPadding'] = _VALIDPADDING +DESCRIPTOR.message_types_by_name['SamePadding'] = _SAMEPADDING +DESCRIPTOR.message_types_by_name['SamplingMode'] = _SAMPLINGMODE +DESCRIPTOR.message_types_by_name['BoxCoordinatesMode'] = _BOXCOORDINATESMODE +DESCRIPTOR.message_types_by_name['WeightParams'] = _WEIGHTPARAMS +DESCRIPTOR.message_types_by_name['QuantizationParams'] = _QUANTIZATIONPARAMS +DESCRIPTOR.message_types_by_name['LinearQuantizationParams'] = _LINEARQUANTIZATIONPARAMS +DESCRIPTOR.message_types_by_name['LookUpTableQuantizationParams'] = _LOOKUPTABLEQUANTIZATIONPARAMS +DESCRIPTOR.message_types_by_name['ConvolutionLayerParams'] = _CONVOLUTIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['Convolution3DLayerParams'] = _CONVOLUTION3DLAYERPARAMS +DESCRIPTOR.message_types_by_name['InnerProductLayerParams'] = _INNERPRODUCTLAYERPARAMS +DESCRIPTOR.message_types_by_name['EmbeddingLayerParams'] = _EMBEDDINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['EmbeddingNDLayerParams'] = _EMBEDDINGNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['BatchnormLayerParams'] = _BATCHNORMLAYERPARAMS +DESCRIPTOR.message_types_by_name['PoolingLayerParams'] = _POOLINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['Pooling3DLayerParams'] = _POOLING3DLAYERPARAMS +DESCRIPTOR.message_types_by_name['GlobalPooling3DLayerParams'] = _GLOBALPOOLING3DLAYERPARAMS +DESCRIPTOR.message_types_by_name['PaddingLayerParams'] = _PADDINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['ConcatLayerParams'] = _CONCATLAYERPARAMS +DESCRIPTOR.message_types_by_name['LRNLayerParams'] = _LRNLAYERPARAMS +DESCRIPTOR.message_types_by_name['SoftmaxLayerParams'] = _SOFTMAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['SplitLayerParams'] = _SPLITLAYERPARAMS +DESCRIPTOR.message_types_by_name['AddLayerParams'] = _ADDLAYERPARAMS +DESCRIPTOR.message_types_by_name['MultiplyLayerParams'] = _MULTIPLYLAYERPARAMS +DESCRIPTOR.message_types_by_name['UnaryFunctionLayerParams'] = _UNARYFUNCTIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['UpsampleLayerParams'] = _UPSAMPLELAYERPARAMS +DESCRIPTOR.message_types_by_name['ResizeBilinearLayerParams'] = _RESIZEBILINEARLAYERPARAMS +DESCRIPTOR.message_types_by_name['CropResizeLayerParams'] = _CROPRESIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['BiasLayerParams'] = _BIASLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScaleLayerParams'] = _SCALELAYERPARAMS +DESCRIPTOR.message_types_by_name['LoadConstantLayerParams'] = _LOADCONSTANTLAYERPARAMS +DESCRIPTOR.message_types_by_name['L2NormalizeLayerParams'] = _L2NORMALIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['FlattenLayerParams'] = _FLATTENLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeLayerParams'] = _RESHAPELAYERPARAMS +DESCRIPTOR.message_types_by_name['PermuteLayerParams'] = _PERMUTELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReorganizeDataLayerParams'] = _REORGANIZEDATALAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceLayerParams'] = _SLICELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceLayerParams'] = _REDUCELAYERPARAMS +DESCRIPTOR.message_types_by_name['CropLayerParams'] = _CROPLAYERPARAMS +DESCRIPTOR.message_types_by_name['AverageLayerParams'] = _AVERAGELAYERPARAMS +DESCRIPTOR.message_types_by_name['MaxLayerParams'] = _MAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['MinLayerParams'] = _MINLAYERPARAMS +DESCRIPTOR.message_types_by_name['DotProductLayerParams'] = _DOTPRODUCTLAYERPARAMS +DESCRIPTOR.message_types_by_name['MeanVarianceNormalizeLayerParams'] = _MEANVARIANCENORMALIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['SequenceRepeatLayerParams'] = _SEQUENCEREPEATLAYERPARAMS +DESCRIPTOR.message_types_by_name['SimpleRecurrentLayerParams'] = _SIMPLERECURRENTLAYERPARAMS +DESCRIPTOR.message_types_by_name['GRULayerParams'] = _GRULAYERPARAMS +DESCRIPTOR.message_types_by_name['LSTMParams'] = _LSTMPARAMS +DESCRIPTOR.message_types_by_name['LSTMWeightParams'] = _LSTMWEIGHTPARAMS +DESCRIPTOR.message_types_by_name['UniDirectionalLSTMLayerParams'] = _UNIDIRECTIONALLSTMLAYERPARAMS +DESCRIPTOR.message_types_by_name['BiDirectionalLSTMLayerParams'] = _BIDIRECTIONALLSTMLAYERPARAMS +DESCRIPTOR.message_types_by_name['CustomLayerParams'] = _CUSTOMLAYERPARAMS +DESCRIPTOR.message_types_by_name['TransposeLayerParams'] = _TRANSPOSELAYERPARAMS +DESCRIPTOR.message_types_by_name['BatchedMatMulLayerParams'] = _BATCHEDMATMULLAYERPARAMS +DESCRIPTOR.message_types_by_name['ConcatNDLayerParams'] = _CONCATNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['SoftmaxNDLayerParams'] = _SOFTMAXNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReverseLayerParams'] = _REVERSELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReverseSeqLayerParams'] = _REVERSESEQLAYERPARAMS +DESCRIPTOR.message_types_by_name['LoadConstantNDLayerParams'] = _LOADCONSTANTNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['FillLikeLayerParams'] = _FILLLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['FillStaticLayerParams'] = _FILLSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['FillDynamicLayerParams'] = _FILLDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['WhereBroadcastableLayerParams'] = _WHEREBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['SinLayerParams'] = _SINLAYERPARAMS +DESCRIPTOR.message_types_by_name['CosLayerParams'] = _COSLAYERPARAMS +DESCRIPTOR.message_types_by_name['TanLayerParams'] = _TANLAYERPARAMS +DESCRIPTOR.message_types_by_name['AsinLayerParams'] = _ASINLAYERPARAMS +DESCRIPTOR.message_types_by_name['AcosLayerParams'] = _ACOSLAYERPARAMS +DESCRIPTOR.message_types_by_name['AtanLayerParams'] = _ATANLAYERPARAMS +DESCRIPTOR.message_types_by_name['SinhLayerParams'] = _SINHLAYERPARAMS +DESCRIPTOR.message_types_by_name['CoshLayerParams'] = _COSHLAYERPARAMS +DESCRIPTOR.message_types_by_name['TanhLayerParams'] = _TANHLAYERPARAMS +DESCRIPTOR.message_types_by_name['AsinhLayerParams'] = _ASINHLAYERPARAMS +DESCRIPTOR.message_types_by_name['AcoshLayerParams'] = _ACOSHLAYERPARAMS +DESCRIPTOR.message_types_by_name['AtanhLayerParams'] = _ATANHLAYERPARAMS +DESCRIPTOR.message_types_by_name['PowBroadcastableLayerParams'] = _POWBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['Exp2LayerParams'] = _EXP2LAYERPARAMS +DESCRIPTOR.message_types_by_name['WhereNonZeroLayerParams'] = _WHERENONZEROLAYERPARAMS +DESCRIPTOR.message_types_by_name['MatrixBandPartLayerParams'] = _MATRIXBANDPARTLAYERPARAMS +DESCRIPTOR.message_types_by_name['UpperTriangularLayerParams'] = _UPPERTRIANGULARLAYERPARAMS +DESCRIPTOR.message_types_by_name['LowerTriangularLayerParams'] = _LOWERTRIANGULARLAYERPARAMS +DESCRIPTOR.message_types_by_name['BroadcastToLikeLayerParams'] = _BROADCASTTOLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['BroadcastToStaticLayerParams'] = _BROADCASTTOSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['BroadcastToDynamicLayerParams'] = _BROADCASTTODYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['AddBroadcastableLayerParams'] = _ADDBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['MaxBroadcastableLayerParams'] = _MAXBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['MinBroadcastableLayerParams'] = _MINBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['ModBroadcastableLayerParams'] = _MODBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['FloorDivBroadcastableLayerParams'] = _FLOORDIVBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['SubtractBroadcastableLayerParams'] = _SUBTRACTBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['MultiplyBroadcastableLayerParams'] = _MULTIPLYBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['DivideBroadcastableLayerParams'] = _DIVIDEBROADCASTABLELAYERPARAMS +DESCRIPTOR.message_types_by_name['GatherLayerParams'] = _GATHERLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScatterLayerParams'] = _SCATTERLAYERPARAMS +DESCRIPTOR.message_types_by_name['GatherNDLayerParams'] = _GATHERNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScatterNDLayerParams'] = _SCATTERNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['GatherAlongAxisLayerParams'] = _GATHERALONGAXISLAYERPARAMS +DESCRIPTOR.message_types_by_name['ScatterAlongAxisLayerParams'] = _SCATTERALONGAXISLAYERPARAMS +DESCRIPTOR.message_types_by_name['StackLayerParams'] = _STACKLAYERPARAMS +DESCRIPTOR.message_types_by_name['RankPreservingReshapeLayerParams'] = _RANKPRESERVINGRESHAPELAYERPARAMS +DESCRIPTOR.message_types_by_name['ConstantPaddingLayerParams'] = _CONSTANTPADDINGLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomNormalLikeLayerParams'] = _RANDOMNORMALLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomNormalStaticLayerParams'] = _RANDOMNORMALSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomNormalDynamicLayerParams'] = _RANDOMNORMALDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomUniformLikeLayerParams'] = _RANDOMUNIFORMLIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomUniformStaticLayerParams'] = _RANDOMUNIFORMSTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomUniformDynamicLayerParams'] = _RANDOMUNIFORMDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomBernoulliLikeLayerParams'] = _RANDOMBERNOULLILIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomBernoulliStaticLayerParams'] = _RANDOMBERNOULLISTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RandomBernoulliDynamicLayerParams'] = _RANDOMBERNOULLIDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['CategoricalDistributionLayerParams'] = _CATEGORICALDISTRIBUTIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceL1LayerParams'] = _REDUCEL1LAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceL2LayerParams'] = _REDUCEL2LAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceMaxLayerParams'] = _REDUCEMAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceMinLayerParams'] = _REDUCEMINLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceSumLayerParams'] = _REDUCESUMLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceProdLayerParams'] = _REDUCEPRODLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceMeanLayerParams'] = _REDUCEMEANLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceLogSumLayerParams'] = _REDUCELOGSUMLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceSumSquareLayerParams'] = _REDUCESUMSQUARELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReduceLogSumExpLayerParams'] = _REDUCELOGSUMEXPLAYERPARAMS +DESCRIPTOR.message_types_by_name['ExpandDimsLayerParams'] = _EXPANDDIMSLAYERPARAMS +DESCRIPTOR.message_types_by_name['FlattenTo2DLayerParams'] = _FLATTENTO2DLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeStaticLayerParams'] = _RESHAPESTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeLikeLayerParams'] = _RESHAPELIKELAYERPARAMS +DESCRIPTOR.message_types_by_name['ReshapeDynamicLayerParams'] = _RESHAPEDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['SqueezeLayerParams'] = _SQUEEZELAYERPARAMS +DESCRIPTOR.message_types_by_name['TopKLayerParams'] = _TOPKLAYERPARAMS +DESCRIPTOR.message_types_by_name['ArgMaxLayerParams'] = _ARGMAXLAYERPARAMS +DESCRIPTOR.message_types_by_name['ArgMinLayerParams'] = _ARGMINLAYERPARAMS +DESCRIPTOR.message_types_by_name['SplitNDLayerParams'] = _SPLITNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['CeilLayerParams'] = _CEILLAYERPARAMS +DESCRIPTOR.message_types_by_name['RoundLayerParams'] = _ROUNDLAYERPARAMS +DESCRIPTOR.message_types_by_name['FloorLayerParams'] = _FLOORLAYERPARAMS +DESCRIPTOR.message_types_by_name['SignLayerParams'] = _SIGNLAYERPARAMS +DESCRIPTOR.message_types_by_name['ClipLayerParams'] = _CLIPLAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceStaticLayerParams'] = _SLICESTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceDynamicLayerParams'] = _SLICEDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['TileLayerParams'] = _TILELAYERPARAMS +DESCRIPTOR.message_types_by_name['GetShapeLayerParams'] = _GETSHAPELAYERPARAMS +DESCRIPTOR.message_types_by_name['ErfLayerParams'] = _ERFLAYERPARAMS +DESCRIPTOR.message_types_by_name['GeluLayerParams'] = _GELULAYERPARAMS +DESCRIPTOR.message_types_by_name['RangeStaticLayerParams'] = _RANGESTATICLAYERPARAMS +DESCRIPTOR.message_types_by_name['RangeDynamicLayerParams'] = _RANGEDYNAMICLAYERPARAMS +DESCRIPTOR.message_types_by_name['SlidingWindowsLayerParams'] = _SLIDINGWINDOWSLAYERPARAMS +DESCRIPTOR.message_types_by_name['LayerNormalizationLayerParams'] = _LAYERNORMALIZATIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['NonMaximumSuppressionLayerParams'] = _NONMAXIMUMSUPPRESSIONLAYERPARAMS +DESCRIPTOR.message_types_by_name['ClampedReLULayerParams'] = _CLAMPEDRELULAYERPARAMS +DESCRIPTOR.message_types_by_name['ArgSortLayerParams'] = _ARGSORTLAYERPARAMS +DESCRIPTOR.message_types_by_name['SliceBySizeLayerParams'] = _SLICEBYSIZELAYERPARAMS +DESCRIPTOR.message_types_by_name['NeuralNetworkClassifier'] = _NEURALNETWORKCLASSIFIER +DESCRIPTOR.message_types_by_name['OneHotLayerParams'] = _ONEHOTLAYERPARAMS +DESCRIPTOR.message_types_by_name['CumSumLayerParams'] = _CUMSUMLAYERPARAMS +DESCRIPTOR.message_types_by_name['NeuralNetworkRegressor'] = _NEURALNETWORKREGRESSOR +DESCRIPTOR.message_types_by_name['NetworkUpdateParameters'] = _NETWORKUPDATEPARAMETERS +DESCRIPTOR.message_types_by_name['LossLayer'] = _LOSSLAYER +DESCRIPTOR.message_types_by_name['CategoricalCrossEntropyLossLayer'] = _CATEGORICALCROSSENTROPYLOSSLAYER +DESCRIPTOR.message_types_by_name['MeanSquaredErrorLossLayer'] = _MEANSQUAREDERRORLOSSLAYER +DESCRIPTOR.message_types_by_name['Optimizer'] = _OPTIMIZER +DESCRIPTOR.message_types_by_name['SGDOptimizer'] = _SGDOPTIMIZER +DESCRIPTOR.message_types_by_name['AdamOptimizer'] = _ADAMOPTIMIZER +DESCRIPTOR.enum_types_by_name['NeuralNetworkMultiArrayShapeMapping'] = _NEURALNETWORKMULTIARRAYSHAPEMAPPING +DESCRIPTOR.enum_types_by_name['NeuralNetworkImageShapeMapping'] = _NEURALNETWORKIMAGESHAPEMAPPING +DESCRIPTOR.enum_types_by_name['ScatterMode'] = _SCATTERMODE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +NeuralNetwork = _reflection.GeneratedProtocolMessageType('NeuralNetwork', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORK, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetwork) + )) +_sym_db.RegisterMessage(NeuralNetwork) + +NeuralNetworkImageScaler = _reflection.GeneratedProtocolMessageType('NeuralNetworkImageScaler', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKIMAGESCALER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkImageScaler) + )) +_sym_db.RegisterMessage(NeuralNetworkImageScaler) + +NeuralNetworkMeanImage = _reflection.GeneratedProtocolMessageType('NeuralNetworkMeanImage', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKMEANIMAGE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkMeanImage) + )) +_sym_db.RegisterMessage(NeuralNetworkMeanImage) + +NeuralNetworkPreprocessing = _reflection.GeneratedProtocolMessageType('NeuralNetworkPreprocessing', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKPREPROCESSING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkPreprocessing) + )) +_sym_db.RegisterMessage(NeuralNetworkPreprocessing) + +ActivationReLU = _reflection.GeneratedProtocolMessageType('ActivationReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationReLU) + )) +_sym_db.RegisterMessage(ActivationReLU) + +ActivationLeakyReLU = _reflection.GeneratedProtocolMessageType('ActivationLeakyReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONLEAKYRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationLeakyReLU) + )) +_sym_db.RegisterMessage(ActivationLeakyReLU) + +ActivationTanh = _reflection.GeneratedProtocolMessageType('ActivationTanh', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONTANH, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationTanh) + )) +_sym_db.RegisterMessage(ActivationTanh) + +ActivationScaledTanh = _reflection.GeneratedProtocolMessageType('ActivationScaledTanh', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSCALEDTANH, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationScaledTanh) + )) +_sym_db.RegisterMessage(ActivationScaledTanh) + +ActivationSigmoid = _reflection.GeneratedProtocolMessageType('ActivationSigmoid', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSIGMOID, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSigmoid) + )) +_sym_db.RegisterMessage(ActivationSigmoid) + +ActivationLinear = _reflection.GeneratedProtocolMessageType('ActivationLinear', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONLINEAR, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationLinear) + )) +_sym_db.RegisterMessage(ActivationLinear) + +ActivationSigmoidHard = _reflection.GeneratedProtocolMessageType('ActivationSigmoidHard', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSIGMOIDHARD, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSigmoidHard) + )) +_sym_db.RegisterMessage(ActivationSigmoidHard) + +ActivationPReLU = _reflection.GeneratedProtocolMessageType('ActivationPReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONPRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationPReLU) + )) +_sym_db.RegisterMessage(ActivationPReLU) + +ActivationELU = _reflection.GeneratedProtocolMessageType('ActivationELU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationELU) + )) +_sym_db.RegisterMessage(ActivationELU) + +ActivationThresholdedReLU = _reflection.GeneratedProtocolMessageType('ActivationThresholdedReLU', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONTHRESHOLDEDRELU, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationThresholdedReLU) + )) +_sym_db.RegisterMessage(ActivationThresholdedReLU) + +ActivationSoftsign = _reflection.GeneratedProtocolMessageType('ActivationSoftsign', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSOFTSIGN, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSoftsign) + )) +_sym_db.RegisterMessage(ActivationSoftsign) + +ActivationSoftplus = _reflection.GeneratedProtocolMessageType('ActivationSoftplus', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONSOFTPLUS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationSoftplus) + )) +_sym_db.RegisterMessage(ActivationSoftplus) + +ActivationParametricSoftplus = _reflection.GeneratedProtocolMessageType('ActivationParametricSoftplus', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONPARAMETRICSOFTPLUS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationParametricSoftplus) + )) +_sym_db.RegisterMessage(ActivationParametricSoftplus) + +ActivationParams = _reflection.GeneratedProtocolMessageType('ActivationParams', (_message.Message,), dict( + DESCRIPTOR = _ACTIVATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ActivationParams) + )) +_sym_db.RegisterMessage(ActivationParams) + +Tensor = _reflection.GeneratedProtocolMessageType('Tensor', (_message.Message,), dict( + DESCRIPTOR = _TENSOR, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Tensor) + )) +_sym_db.RegisterMessage(Tensor) + +NeuralNetworkLayer = _reflection.GeneratedProtocolMessageType('NeuralNetworkLayer', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkLayer) + )) +_sym_db.RegisterMessage(NeuralNetworkLayer) + +BranchLayerParams = _reflection.GeneratedProtocolMessageType('BranchLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BRANCHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BranchLayerParams) + )) +_sym_db.RegisterMessage(BranchLayerParams) + +LoopLayerParams = _reflection.GeneratedProtocolMessageType('LoopLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOOPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoopLayerParams) + )) +_sym_db.RegisterMessage(LoopLayerParams) + +LoopBreakLayerParams = _reflection.GeneratedProtocolMessageType('LoopBreakLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOOPBREAKLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoopBreakLayerParams) + )) +_sym_db.RegisterMessage(LoopBreakLayerParams) + +LoopContinueLayerParams = _reflection.GeneratedProtocolMessageType('LoopContinueLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOOPCONTINUELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoopContinueLayerParams) + )) +_sym_db.RegisterMessage(LoopContinueLayerParams) + +CopyLayerParams = _reflection.GeneratedProtocolMessageType('CopyLayerParams', (_message.Message,), dict( + DESCRIPTOR = _COPYLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CopyLayerParams) + )) +_sym_db.RegisterMessage(CopyLayerParams) + +GreaterThanLayerParams = _reflection.GeneratedProtocolMessageType('GreaterThanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GREATERTHANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GreaterThanLayerParams) + )) +_sym_db.RegisterMessage(GreaterThanLayerParams) + +GreaterEqualLayerParams = _reflection.GeneratedProtocolMessageType('GreaterEqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GREATEREQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GreaterEqualLayerParams) + )) +_sym_db.RegisterMessage(GreaterEqualLayerParams) + +LessThanLayerParams = _reflection.GeneratedProtocolMessageType('LessThanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LESSTHANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LessThanLayerParams) + )) +_sym_db.RegisterMessage(LessThanLayerParams) + +LessEqualLayerParams = _reflection.GeneratedProtocolMessageType('LessEqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LESSEQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LessEqualLayerParams) + )) +_sym_db.RegisterMessage(LessEqualLayerParams) + +EqualLayerParams = _reflection.GeneratedProtocolMessageType('EqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.EqualLayerParams) + )) +_sym_db.RegisterMessage(EqualLayerParams) + +NotEqualLayerParams = _reflection.GeneratedProtocolMessageType('NotEqualLayerParams', (_message.Message,), dict( + DESCRIPTOR = _NOTEQUALLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NotEqualLayerParams) + )) +_sym_db.RegisterMessage(NotEqualLayerParams) + +LogicalAndLayerParams = _reflection.GeneratedProtocolMessageType('LogicalAndLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALANDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalAndLayerParams) + )) +_sym_db.RegisterMessage(LogicalAndLayerParams) + +LogicalOrLayerParams = _reflection.GeneratedProtocolMessageType('LogicalOrLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALORLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalOrLayerParams) + )) +_sym_db.RegisterMessage(LogicalOrLayerParams) + +LogicalXorLayerParams = _reflection.GeneratedProtocolMessageType('LogicalXorLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALXORLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalXorLayerParams) + )) +_sym_db.RegisterMessage(LogicalXorLayerParams) + +LogicalNotLayerParams = _reflection.GeneratedProtocolMessageType('LogicalNotLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOGICALNOTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LogicalNotLayerParams) + )) +_sym_db.RegisterMessage(LogicalNotLayerParams) + +BorderAmounts = _reflection.GeneratedProtocolMessageType('BorderAmounts', (_message.Message,), dict( + + EdgeSizes = _reflection.GeneratedProtocolMessageType('EdgeSizes', (_message.Message,), dict( + DESCRIPTOR = _BORDERAMOUNTS_EDGESIZES, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BorderAmounts.EdgeSizes) + )) + , + DESCRIPTOR = _BORDERAMOUNTS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BorderAmounts) + )) +_sym_db.RegisterMessage(BorderAmounts) +_sym_db.RegisterMessage(BorderAmounts.EdgeSizes) + +ValidPadding = _reflection.GeneratedProtocolMessageType('ValidPadding', (_message.Message,), dict( + DESCRIPTOR = _VALIDPADDING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ValidPadding) + )) +_sym_db.RegisterMessage(ValidPadding) + +SamePadding = _reflection.GeneratedProtocolMessageType('SamePadding', (_message.Message,), dict( + DESCRIPTOR = _SAMEPADDING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SamePadding) + )) +_sym_db.RegisterMessage(SamePadding) + +SamplingMode = _reflection.GeneratedProtocolMessageType('SamplingMode', (_message.Message,), dict( + DESCRIPTOR = _SAMPLINGMODE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SamplingMode) + )) +_sym_db.RegisterMessage(SamplingMode) + +BoxCoordinatesMode = _reflection.GeneratedProtocolMessageType('BoxCoordinatesMode', (_message.Message,), dict( + DESCRIPTOR = _BOXCOORDINATESMODE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BoxCoordinatesMode) + )) +_sym_db.RegisterMessage(BoxCoordinatesMode) + +WeightParams = _reflection.GeneratedProtocolMessageType('WeightParams', (_message.Message,), dict( + DESCRIPTOR = _WEIGHTPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.WeightParams) + )) +_sym_db.RegisterMessage(WeightParams) + +QuantizationParams = _reflection.GeneratedProtocolMessageType('QuantizationParams', (_message.Message,), dict( + DESCRIPTOR = _QUANTIZATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.QuantizationParams) + )) +_sym_db.RegisterMessage(QuantizationParams) + +LinearQuantizationParams = _reflection.GeneratedProtocolMessageType('LinearQuantizationParams', (_message.Message,), dict( + DESCRIPTOR = _LINEARQUANTIZATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinearQuantizationParams) + )) +_sym_db.RegisterMessage(LinearQuantizationParams) + +LookUpTableQuantizationParams = _reflection.GeneratedProtocolMessageType('LookUpTableQuantizationParams', (_message.Message,), dict( + DESCRIPTOR = _LOOKUPTABLEQUANTIZATIONPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LookUpTableQuantizationParams) + )) +_sym_db.RegisterMessage(LookUpTableQuantizationParams) + +ConvolutionLayerParams = _reflection.GeneratedProtocolMessageType('ConvolutionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONVOLUTIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConvolutionLayerParams) + )) +_sym_db.RegisterMessage(ConvolutionLayerParams) + +Convolution3DLayerParams = _reflection.GeneratedProtocolMessageType('Convolution3DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONVOLUTION3DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Convolution3DLayerParams) + )) +_sym_db.RegisterMessage(Convolution3DLayerParams) + +InnerProductLayerParams = _reflection.GeneratedProtocolMessageType('InnerProductLayerParams', (_message.Message,), dict( + DESCRIPTOR = _INNERPRODUCTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.InnerProductLayerParams) + )) +_sym_db.RegisterMessage(InnerProductLayerParams) + +EmbeddingLayerParams = _reflection.GeneratedProtocolMessageType('EmbeddingLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EMBEDDINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.EmbeddingLayerParams) + )) +_sym_db.RegisterMessage(EmbeddingLayerParams) + +EmbeddingNDLayerParams = _reflection.GeneratedProtocolMessageType('EmbeddingNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EMBEDDINGNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.EmbeddingNDLayerParams) + )) +_sym_db.RegisterMessage(EmbeddingNDLayerParams) + +BatchnormLayerParams = _reflection.GeneratedProtocolMessageType('BatchnormLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BATCHNORMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BatchnormLayerParams) + )) +_sym_db.RegisterMessage(BatchnormLayerParams) + +PoolingLayerParams = _reflection.GeneratedProtocolMessageType('PoolingLayerParams', (_message.Message,), dict( + + ValidCompletePadding = _reflection.GeneratedProtocolMessageType('ValidCompletePadding', (_message.Message,), dict( + DESCRIPTOR = _POOLINGLAYERPARAMS_VALIDCOMPLETEPADDING, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PoolingLayerParams.ValidCompletePadding) + )) + , + DESCRIPTOR = _POOLINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PoolingLayerParams) + )) +_sym_db.RegisterMessage(PoolingLayerParams) +_sym_db.RegisterMessage(PoolingLayerParams.ValidCompletePadding) + +Pooling3DLayerParams = _reflection.GeneratedProtocolMessageType('Pooling3DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _POOLING3DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Pooling3DLayerParams) + )) +_sym_db.RegisterMessage(Pooling3DLayerParams) + +GlobalPooling3DLayerParams = _reflection.GeneratedProtocolMessageType('GlobalPooling3DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GLOBALPOOLING3DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GlobalPooling3DLayerParams) + )) +_sym_db.RegisterMessage(GlobalPooling3DLayerParams) + +PaddingLayerParams = _reflection.GeneratedProtocolMessageType('PaddingLayerParams', (_message.Message,), dict( + + PaddingConstant = _reflection.GeneratedProtocolMessageType('PaddingConstant', (_message.Message,), dict( + DESCRIPTOR = _PADDINGLAYERPARAMS_PADDINGCONSTANT, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams.PaddingConstant) + )) + , + + PaddingReflection = _reflection.GeneratedProtocolMessageType('PaddingReflection', (_message.Message,), dict( + DESCRIPTOR = _PADDINGLAYERPARAMS_PADDINGREFLECTION, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams.PaddingReflection) + )) + , + + PaddingReplication = _reflection.GeneratedProtocolMessageType('PaddingReplication', (_message.Message,), dict( + DESCRIPTOR = _PADDINGLAYERPARAMS_PADDINGREPLICATION, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams.PaddingReplication) + )) + , + DESCRIPTOR = _PADDINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PaddingLayerParams) + )) +_sym_db.RegisterMessage(PaddingLayerParams) +_sym_db.RegisterMessage(PaddingLayerParams.PaddingConstant) +_sym_db.RegisterMessage(PaddingLayerParams.PaddingReflection) +_sym_db.RegisterMessage(PaddingLayerParams.PaddingReplication) + +ConcatLayerParams = _reflection.GeneratedProtocolMessageType('ConcatLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONCATLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConcatLayerParams) + )) +_sym_db.RegisterMessage(ConcatLayerParams) + +LRNLayerParams = _reflection.GeneratedProtocolMessageType('LRNLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LRNLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LRNLayerParams) + )) +_sym_db.RegisterMessage(LRNLayerParams) + +SoftmaxLayerParams = _reflection.GeneratedProtocolMessageType('SoftmaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SOFTMAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SoftmaxLayerParams) + )) +_sym_db.RegisterMessage(SoftmaxLayerParams) + +SplitLayerParams = _reflection.GeneratedProtocolMessageType('SplitLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SPLITLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SplitLayerParams) + )) +_sym_db.RegisterMessage(SplitLayerParams) + +AddLayerParams = _reflection.GeneratedProtocolMessageType('AddLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ADDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AddLayerParams) + )) +_sym_db.RegisterMessage(AddLayerParams) + +MultiplyLayerParams = _reflection.GeneratedProtocolMessageType('MultiplyLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MULTIPLYLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MultiplyLayerParams) + )) +_sym_db.RegisterMessage(MultiplyLayerParams) + +UnaryFunctionLayerParams = _reflection.GeneratedProtocolMessageType('UnaryFunctionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UNARYFUNCTIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UnaryFunctionLayerParams) + )) +_sym_db.RegisterMessage(UnaryFunctionLayerParams) + +UpsampleLayerParams = _reflection.GeneratedProtocolMessageType('UpsampleLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UPSAMPLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UpsampleLayerParams) + )) +_sym_db.RegisterMessage(UpsampleLayerParams) + +ResizeBilinearLayerParams = _reflection.GeneratedProtocolMessageType('ResizeBilinearLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESIZEBILINEARLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ResizeBilinearLayerParams) + )) +_sym_db.RegisterMessage(ResizeBilinearLayerParams) + +CropResizeLayerParams = _reflection.GeneratedProtocolMessageType('CropResizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CROPRESIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CropResizeLayerParams) + )) +_sym_db.RegisterMessage(CropResizeLayerParams) + +BiasLayerParams = _reflection.GeneratedProtocolMessageType('BiasLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BIASLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BiasLayerParams) + )) +_sym_db.RegisterMessage(BiasLayerParams) + +ScaleLayerParams = _reflection.GeneratedProtocolMessageType('ScaleLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCALELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScaleLayerParams) + )) +_sym_db.RegisterMessage(ScaleLayerParams) + +LoadConstantLayerParams = _reflection.GeneratedProtocolMessageType('LoadConstantLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOADCONSTANTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoadConstantLayerParams) + )) +_sym_db.RegisterMessage(LoadConstantLayerParams) + +L2NormalizeLayerParams = _reflection.GeneratedProtocolMessageType('L2NormalizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _L2NORMALIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.L2NormalizeLayerParams) + )) +_sym_db.RegisterMessage(L2NormalizeLayerParams) + +FlattenLayerParams = _reflection.GeneratedProtocolMessageType('FlattenLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLATTENLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FlattenLayerParams) + )) +_sym_db.RegisterMessage(FlattenLayerParams) + +ReshapeLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeLayerParams) + )) +_sym_db.RegisterMessage(ReshapeLayerParams) + +PermuteLayerParams = _reflection.GeneratedProtocolMessageType('PermuteLayerParams', (_message.Message,), dict( + DESCRIPTOR = _PERMUTELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PermuteLayerParams) + )) +_sym_db.RegisterMessage(PermuteLayerParams) + +ReorganizeDataLayerParams = _reflection.GeneratedProtocolMessageType('ReorganizeDataLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REORGANIZEDATALAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReorganizeDataLayerParams) + )) +_sym_db.RegisterMessage(ReorganizeDataLayerParams) + +SliceLayerParams = _reflection.GeneratedProtocolMessageType('SliceLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceLayerParams) + )) +_sym_db.RegisterMessage(SliceLayerParams) + +ReduceLayerParams = _reflection.GeneratedProtocolMessageType('ReduceLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceLayerParams) + )) +_sym_db.RegisterMessage(ReduceLayerParams) + +CropLayerParams = _reflection.GeneratedProtocolMessageType('CropLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CROPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CropLayerParams) + )) +_sym_db.RegisterMessage(CropLayerParams) + +AverageLayerParams = _reflection.GeneratedProtocolMessageType('AverageLayerParams', (_message.Message,), dict( + DESCRIPTOR = _AVERAGELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AverageLayerParams) + )) +_sym_db.RegisterMessage(AverageLayerParams) + +MaxLayerParams = _reflection.GeneratedProtocolMessageType('MaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MaxLayerParams) + )) +_sym_db.RegisterMessage(MaxLayerParams) + +MinLayerParams = _reflection.GeneratedProtocolMessageType('MinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MinLayerParams) + )) +_sym_db.RegisterMessage(MinLayerParams) + +DotProductLayerParams = _reflection.GeneratedProtocolMessageType('DotProductLayerParams', (_message.Message,), dict( + DESCRIPTOR = _DOTPRODUCTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DotProductLayerParams) + )) +_sym_db.RegisterMessage(DotProductLayerParams) + +MeanVarianceNormalizeLayerParams = _reflection.GeneratedProtocolMessageType('MeanVarianceNormalizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MEANVARIANCENORMALIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MeanVarianceNormalizeLayerParams) + )) +_sym_db.RegisterMessage(MeanVarianceNormalizeLayerParams) + +SequenceRepeatLayerParams = _reflection.GeneratedProtocolMessageType('SequenceRepeatLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SEQUENCEREPEATLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SequenceRepeatLayerParams) + )) +_sym_db.RegisterMessage(SequenceRepeatLayerParams) + +SimpleRecurrentLayerParams = _reflection.GeneratedProtocolMessageType('SimpleRecurrentLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SIMPLERECURRENTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SimpleRecurrentLayerParams) + )) +_sym_db.RegisterMessage(SimpleRecurrentLayerParams) + +GRULayerParams = _reflection.GeneratedProtocolMessageType('GRULayerParams', (_message.Message,), dict( + DESCRIPTOR = _GRULAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GRULayerParams) + )) +_sym_db.RegisterMessage(GRULayerParams) + +LSTMParams = _reflection.GeneratedProtocolMessageType('LSTMParams', (_message.Message,), dict( + DESCRIPTOR = _LSTMPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LSTMParams) + )) +_sym_db.RegisterMessage(LSTMParams) + +LSTMWeightParams = _reflection.GeneratedProtocolMessageType('LSTMWeightParams', (_message.Message,), dict( + DESCRIPTOR = _LSTMWEIGHTPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LSTMWeightParams) + )) +_sym_db.RegisterMessage(LSTMWeightParams) + +UniDirectionalLSTMLayerParams = _reflection.GeneratedProtocolMessageType('UniDirectionalLSTMLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UNIDIRECTIONALLSTMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UniDirectionalLSTMLayerParams) + )) +_sym_db.RegisterMessage(UniDirectionalLSTMLayerParams) + +BiDirectionalLSTMLayerParams = _reflection.GeneratedProtocolMessageType('BiDirectionalLSTMLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BIDIRECTIONALLSTMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BiDirectionalLSTMLayerParams) + )) +_sym_db.RegisterMessage(BiDirectionalLSTMLayerParams) + +CustomLayerParams = _reflection.GeneratedProtocolMessageType('CustomLayerParams', (_message.Message,), dict( + + CustomLayerParamValue = _reflection.GeneratedProtocolMessageType('CustomLayerParamValue', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMLAYERPARAMS_CUSTOMLAYERPARAMVALUE, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomLayerParams.CustomLayerParamValue) + )) + , + + ParametersEntry = _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), dict( + DESCRIPTOR = _CUSTOMLAYERPARAMS_PARAMETERSENTRY, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomLayerParams.ParametersEntry) + )) + , + DESCRIPTOR = _CUSTOMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CustomLayerParams) + )) +_sym_db.RegisterMessage(CustomLayerParams) +_sym_db.RegisterMessage(CustomLayerParams.CustomLayerParamValue) +_sym_db.RegisterMessage(CustomLayerParams.ParametersEntry) + +TransposeLayerParams = _reflection.GeneratedProtocolMessageType('TransposeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TRANSPOSELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TransposeLayerParams) + )) +_sym_db.RegisterMessage(TransposeLayerParams) + +BatchedMatMulLayerParams = _reflection.GeneratedProtocolMessageType('BatchedMatMulLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BATCHEDMATMULLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BatchedMatMulLayerParams) + )) +_sym_db.RegisterMessage(BatchedMatMulLayerParams) + +ConcatNDLayerParams = _reflection.GeneratedProtocolMessageType('ConcatNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONCATNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConcatNDLayerParams) + )) +_sym_db.RegisterMessage(ConcatNDLayerParams) + +SoftmaxNDLayerParams = _reflection.GeneratedProtocolMessageType('SoftmaxNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SOFTMAXNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SoftmaxNDLayerParams) + )) +_sym_db.RegisterMessage(SoftmaxNDLayerParams) + +ReverseLayerParams = _reflection.GeneratedProtocolMessageType('ReverseLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REVERSELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReverseLayerParams) + )) +_sym_db.RegisterMessage(ReverseLayerParams) + +ReverseSeqLayerParams = _reflection.GeneratedProtocolMessageType('ReverseSeqLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REVERSESEQLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReverseSeqLayerParams) + )) +_sym_db.RegisterMessage(ReverseSeqLayerParams) + +LoadConstantNDLayerParams = _reflection.GeneratedProtocolMessageType('LoadConstantNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOADCONSTANTNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LoadConstantNDLayerParams) + )) +_sym_db.RegisterMessage(LoadConstantNDLayerParams) + +FillLikeLayerParams = _reflection.GeneratedProtocolMessageType('FillLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FILLLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FillLikeLayerParams) + )) +_sym_db.RegisterMessage(FillLikeLayerParams) + +FillStaticLayerParams = _reflection.GeneratedProtocolMessageType('FillStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FILLSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FillStaticLayerParams) + )) +_sym_db.RegisterMessage(FillStaticLayerParams) + +FillDynamicLayerParams = _reflection.GeneratedProtocolMessageType('FillDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FILLDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FillDynamicLayerParams) + )) +_sym_db.RegisterMessage(FillDynamicLayerParams) + +WhereBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('WhereBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _WHEREBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.WhereBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(WhereBroadcastableLayerParams) + +SinLayerParams = _reflection.GeneratedProtocolMessageType('SinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SinLayerParams) + )) +_sym_db.RegisterMessage(SinLayerParams) + +CosLayerParams = _reflection.GeneratedProtocolMessageType('CosLayerParams', (_message.Message,), dict( + DESCRIPTOR = _COSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CosLayerParams) + )) +_sym_db.RegisterMessage(CosLayerParams) + +TanLayerParams = _reflection.GeneratedProtocolMessageType('TanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TanLayerParams) + )) +_sym_db.RegisterMessage(TanLayerParams) + +AsinLayerParams = _reflection.GeneratedProtocolMessageType('AsinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ASINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AsinLayerParams) + )) +_sym_db.RegisterMessage(AsinLayerParams) + +AcosLayerParams = _reflection.GeneratedProtocolMessageType('AcosLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ACOSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AcosLayerParams) + )) +_sym_db.RegisterMessage(AcosLayerParams) + +AtanLayerParams = _reflection.GeneratedProtocolMessageType('AtanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ATANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AtanLayerParams) + )) +_sym_db.RegisterMessage(AtanLayerParams) + +SinhLayerParams = _reflection.GeneratedProtocolMessageType('SinhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SINHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SinhLayerParams) + )) +_sym_db.RegisterMessage(SinhLayerParams) + +CoshLayerParams = _reflection.GeneratedProtocolMessageType('CoshLayerParams', (_message.Message,), dict( + DESCRIPTOR = _COSHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoshLayerParams) + )) +_sym_db.RegisterMessage(CoshLayerParams) + +TanhLayerParams = _reflection.GeneratedProtocolMessageType('TanhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TANHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TanhLayerParams) + )) +_sym_db.RegisterMessage(TanhLayerParams) + +AsinhLayerParams = _reflection.GeneratedProtocolMessageType('AsinhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ASINHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AsinhLayerParams) + )) +_sym_db.RegisterMessage(AsinhLayerParams) + +AcoshLayerParams = _reflection.GeneratedProtocolMessageType('AcoshLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ACOSHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AcoshLayerParams) + )) +_sym_db.RegisterMessage(AcoshLayerParams) + +AtanhLayerParams = _reflection.GeneratedProtocolMessageType('AtanhLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ATANHLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AtanhLayerParams) + )) +_sym_db.RegisterMessage(AtanhLayerParams) + +PowBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('PowBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _POWBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PowBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(PowBroadcastableLayerParams) + +Exp2LayerParams = _reflection.GeneratedProtocolMessageType('Exp2LayerParams', (_message.Message,), dict( + DESCRIPTOR = _EXP2LAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Exp2LayerParams) + )) +_sym_db.RegisterMessage(Exp2LayerParams) + +WhereNonZeroLayerParams = _reflection.GeneratedProtocolMessageType('WhereNonZeroLayerParams', (_message.Message,), dict( + DESCRIPTOR = _WHERENONZEROLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.WhereNonZeroLayerParams) + )) +_sym_db.RegisterMessage(WhereNonZeroLayerParams) + +MatrixBandPartLayerParams = _reflection.GeneratedProtocolMessageType('MatrixBandPartLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MATRIXBANDPARTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MatrixBandPartLayerParams) + )) +_sym_db.RegisterMessage(MatrixBandPartLayerParams) + +UpperTriangularLayerParams = _reflection.GeneratedProtocolMessageType('UpperTriangularLayerParams', (_message.Message,), dict( + DESCRIPTOR = _UPPERTRIANGULARLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.UpperTriangularLayerParams) + )) +_sym_db.RegisterMessage(UpperTriangularLayerParams) + +LowerTriangularLayerParams = _reflection.GeneratedProtocolMessageType('LowerTriangularLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LOWERTRIANGULARLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LowerTriangularLayerParams) + )) +_sym_db.RegisterMessage(LowerTriangularLayerParams) + +BroadcastToLikeLayerParams = _reflection.GeneratedProtocolMessageType('BroadcastToLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BROADCASTTOLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BroadcastToLikeLayerParams) + )) +_sym_db.RegisterMessage(BroadcastToLikeLayerParams) + +BroadcastToStaticLayerParams = _reflection.GeneratedProtocolMessageType('BroadcastToStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BROADCASTTOSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BroadcastToStaticLayerParams) + )) +_sym_db.RegisterMessage(BroadcastToStaticLayerParams) + +BroadcastToDynamicLayerParams = _reflection.GeneratedProtocolMessageType('BroadcastToDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _BROADCASTTODYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BroadcastToDynamicLayerParams) + )) +_sym_db.RegisterMessage(BroadcastToDynamicLayerParams) + +AddBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('AddBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ADDBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AddBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(AddBroadcastableLayerParams) + +MaxBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('MaxBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MAXBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MaxBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(MaxBroadcastableLayerParams) + +MinBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('MinBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MINBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MinBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(MinBroadcastableLayerParams) + +ModBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('ModBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MODBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ModBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(ModBroadcastableLayerParams) + +FloorDivBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('FloorDivBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLOORDIVBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloorDivBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(FloorDivBroadcastableLayerParams) + +SubtractBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('SubtractBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SUBTRACTBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SubtractBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(SubtractBroadcastableLayerParams) + +MultiplyBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('MultiplyBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _MULTIPLYBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MultiplyBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(MultiplyBroadcastableLayerParams) + +DivideBroadcastableLayerParams = _reflection.GeneratedProtocolMessageType('DivideBroadcastableLayerParams', (_message.Message,), dict( + DESCRIPTOR = _DIVIDEBROADCASTABLELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DivideBroadcastableLayerParams) + )) +_sym_db.RegisterMessage(DivideBroadcastableLayerParams) + +GatherLayerParams = _reflection.GeneratedProtocolMessageType('GatherLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GATHERLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GatherLayerParams) + )) +_sym_db.RegisterMessage(GatherLayerParams) + +ScatterLayerParams = _reflection.GeneratedProtocolMessageType('ScatterLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCATTERLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScatterLayerParams) + )) +_sym_db.RegisterMessage(ScatterLayerParams) + +GatherNDLayerParams = _reflection.GeneratedProtocolMessageType('GatherNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GATHERNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GatherNDLayerParams) + )) +_sym_db.RegisterMessage(GatherNDLayerParams) + +ScatterNDLayerParams = _reflection.GeneratedProtocolMessageType('ScatterNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCATTERNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScatterNDLayerParams) + )) +_sym_db.RegisterMessage(ScatterNDLayerParams) + +GatherAlongAxisLayerParams = _reflection.GeneratedProtocolMessageType('GatherAlongAxisLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GATHERALONGAXISLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GatherAlongAxisLayerParams) + )) +_sym_db.RegisterMessage(GatherAlongAxisLayerParams) + +ScatterAlongAxisLayerParams = _reflection.GeneratedProtocolMessageType('ScatterAlongAxisLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SCATTERALONGAXISLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ScatterAlongAxisLayerParams) + )) +_sym_db.RegisterMessage(ScatterAlongAxisLayerParams) + +StackLayerParams = _reflection.GeneratedProtocolMessageType('StackLayerParams', (_message.Message,), dict( + DESCRIPTOR = _STACKLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StackLayerParams) + )) +_sym_db.RegisterMessage(StackLayerParams) + +RankPreservingReshapeLayerParams = _reflection.GeneratedProtocolMessageType('RankPreservingReshapeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANKPRESERVINGRESHAPELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RankPreservingReshapeLayerParams) + )) +_sym_db.RegisterMessage(RankPreservingReshapeLayerParams) + +ConstantPaddingLayerParams = _reflection.GeneratedProtocolMessageType('ConstantPaddingLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CONSTANTPADDINGLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ConstantPaddingLayerParams) + )) +_sym_db.RegisterMessage(ConstantPaddingLayerParams) + +RandomNormalLikeLayerParams = _reflection.GeneratedProtocolMessageType('RandomNormalLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMNORMALLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomNormalLikeLayerParams) + )) +_sym_db.RegisterMessage(RandomNormalLikeLayerParams) + +RandomNormalStaticLayerParams = _reflection.GeneratedProtocolMessageType('RandomNormalStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMNORMALSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomNormalStaticLayerParams) + )) +_sym_db.RegisterMessage(RandomNormalStaticLayerParams) + +RandomNormalDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RandomNormalDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMNORMALDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomNormalDynamicLayerParams) + )) +_sym_db.RegisterMessage(RandomNormalDynamicLayerParams) + +RandomUniformLikeLayerParams = _reflection.GeneratedProtocolMessageType('RandomUniformLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMUNIFORMLIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomUniformLikeLayerParams) + )) +_sym_db.RegisterMessage(RandomUniformLikeLayerParams) + +RandomUniformStaticLayerParams = _reflection.GeneratedProtocolMessageType('RandomUniformStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMUNIFORMSTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomUniformStaticLayerParams) + )) +_sym_db.RegisterMessage(RandomUniformStaticLayerParams) + +RandomUniformDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RandomUniformDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMUNIFORMDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomUniformDynamicLayerParams) + )) +_sym_db.RegisterMessage(RandomUniformDynamicLayerParams) + +RandomBernoulliLikeLayerParams = _reflection.GeneratedProtocolMessageType('RandomBernoulliLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMBERNOULLILIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomBernoulliLikeLayerParams) + )) +_sym_db.RegisterMessage(RandomBernoulliLikeLayerParams) + +RandomBernoulliStaticLayerParams = _reflection.GeneratedProtocolMessageType('RandomBernoulliStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMBERNOULLISTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomBernoulliStaticLayerParams) + )) +_sym_db.RegisterMessage(RandomBernoulliStaticLayerParams) + +RandomBernoulliDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RandomBernoulliDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANDOMBERNOULLIDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RandomBernoulliDynamicLayerParams) + )) +_sym_db.RegisterMessage(RandomBernoulliDynamicLayerParams) + +CategoricalDistributionLayerParams = _reflection.GeneratedProtocolMessageType('CategoricalDistributionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CATEGORICALDISTRIBUTIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CategoricalDistributionLayerParams) + )) +_sym_db.RegisterMessage(CategoricalDistributionLayerParams) + +ReduceL1LayerParams = _reflection.GeneratedProtocolMessageType('ReduceL1LayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEL1LAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceL1LayerParams) + )) +_sym_db.RegisterMessage(ReduceL1LayerParams) + +ReduceL2LayerParams = _reflection.GeneratedProtocolMessageType('ReduceL2LayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEL2LAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceL2LayerParams) + )) +_sym_db.RegisterMessage(ReduceL2LayerParams) + +ReduceMaxLayerParams = _reflection.GeneratedProtocolMessageType('ReduceMaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEMAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceMaxLayerParams) + )) +_sym_db.RegisterMessage(ReduceMaxLayerParams) + +ReduceMinLayerParams = _reflection.GeneratedProtocolMessageType('ReduceMinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEMINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceMinLayerParams) + )) +_sym_db.RegisterMessage(ReduceMinLayerParams) + +ReduceSumLayerParams = _reflection.GeneratedProtocolMessageType('ReduceSumLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCESUMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceSumLayerParams) + )) +_sym_db.RegisterMessage(ReduceSumLayerParams) + +ReduceProdLayerParams = _reflection.GeneratedProtocolMessageType('ReduceProdLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEPRODLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceProdLayerParams) + )) +_sym_db.RegisterMessage(ReduceProdLayerParams) + +ReduceMeanLayerParams = _reflection.GeneratedProtocolMessageType('ReduceMeanLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCEMEANLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceMeanLayerParams) + )) +_sym_db.RegisterMessage(ReduceMeanLayerParams) + +ReduceLogSumLayerParams = _reflection.GeneratedProtocolMessageType('ReduceLogSumLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCELOGSUMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceLogSumLayerParams) + )) +_sym_db.RegisterMessage(ReduceLogSumLayerParams) + +ReduceSumSquareLayerParams = _reflection.GeneratedProtocolMessageType('ReduceSumSquareLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCESUMSQUARELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceSumSquareLayerParams) + )) +_sym_db.RegisterMessage(ReduceSumSquareLayerParams) + +ReduceLogSumExpLayerParams = _reflection.GeneratedProtocolMessageType('ReduceLogSumExpLayerParams', (_message.Message,), dict( + DESCRIPTOR = _REDUCELOGSUMEXPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReduceLogSumExpLayerParams) + )) +_sym_db.RegisterMessage(ReduceLogSumExpLayerParams) + +ExpandDimsLayerParams = _reflection.GeneratedProtocolMessageType('ExpandDimsLayerParams', (_message.Message,), dict( + DESCRIPTOR = _EXPANDDIMSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ExpandDimsLayerParams) + )) +_sym_db.RegisterMessage(ExpandDimsLayerParams) + +FlattenTo2DLayerParams = _reflection.GeneratedProtocolMessageType('FlattenTo2DLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLATTENTO2DLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FlattenTo2DLayerParams) + )) +_sym_db.RegisterMessage(FlattenTo2DLayerParams) + +ReshapeStaticLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPESTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeStaticLayerParams) + )) +_sym_db.RegisterMessage(ReshapeStaticLayerParams) + +ReshapeLikeLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeLikeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPELIKELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeLikeLayerParams) + )) +_sym_db.RegisterMessage(ReshapeLikeLayerParams) + +ReshapeDynamicLayerParams = _reflection.GeneratedProtocolMessageType('ReshapeDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RESHAPEDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ReshapeDynamicLayerParams) + )) +_sym_db.RegisterMessage(ReshapeDynamicLayerParams) + +SqueezeLayerParams = _reflection.GeneratedProtocolMessageType('SqueezeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SQUEEZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SqueezeLayerParams) + )) +_sym_db.RegisterMessage(SqueezeLayerParams) + +TopKLayerParams = _reflection.GeneratedProtocolMessageType('TopKLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TOPKLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TopKLayerParams) + )) +_sym_db.RegisterMessage(TopKLayerParams) + +ArgMaxLayerParams = _reflection.GeneratedProtocolMessageType('ArgMaxLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ARGMAXLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArgMaxLayerParams) + )) +_sym_db.RegisterMessage(ArgMaxLayerParams) + +ArgMinLayerParams = _reflection.GeneratedProtocolMessageType('ArgMinLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ARGMINLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArgMinLayerParams) + )) +_sym_db.RegisterMessage(ArgMinLayerParams) + +SplitNDLayerParams = _reflection.GeneratedProtocolMessageType('SplitNDLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SPLITNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SplitNDLayerParams) + )) +_sym_db.RegisterMessage(SplitNDLayerParams) + +CeilLayerParams = _reflection.GeneratedProtocolMessageType('CeilLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CEILLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CeilLayerParams) + )) +_sym_db.RegisterMessage(CeilLayerParams) + +RoundLayerParams = _reflection.GeneratedProtocolMessageType('RoundLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ROUNDLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RoundLayerParams) + )) +_sym_db.RegisterMessage(RoundLayerParams) + +FloorLayerParams = _reflection.GeneratedProtocolMessageType('FloorLayerParams', (_message.Message,), dict( + DESCRIPTOR = _FLOORLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.FloorLayerParams) + )) +_sym_db.RegisterMessage(FloorLayerParams) + +SignLayerParams = _reflection.GeneratedProtocolMessageType('SignLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SIGNLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SignLayerParams) + )) +_sym_db.RegisterMessage(SignLayerParams) + +ClipLayerParams = _reflection.GeneratedProtocolMessageType('ClipLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CLIPLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ClipLayerParams) + )) +_sym_db.RegisterMessage(ClipLayerParams) + +SliceStaticLayerParams = _reflection.GeneratedProtocolMessageType('SliceStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICESTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceStaticLayerParams) + )) +_sym_db.RegisterMessage(SliceStaticLayerParams) + +SliceDynamicLayerParams = _reflection.GeneratedProtocolMessageType('SliceDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICEDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceDynamicLayerParams) + )) +_sym_db.RegisterMessage(SliceDynamicLayerParams) + +TileLayerParams = _reflection.GeneratedProtocolMessageType('TileLayerParams', (_message.Message,), dict( + DESCRIPTOR = _TILELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TileLayerParams) + )) +_sym_db.RegisterMessage(TileLayerParams) + +GetShapeLayerParams = _reflection.GeneratedProtocolMessageType('GetShapeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GETSHAPELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GetShapeLayerParams) + )) +_sym_db.RegisterMessage(GetShapeLayerParams) + +ErfLayerParams = _reflection.GeneratedProtocolMessageType('ErfLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ERFLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ErfLayerParams) + )) +_sym_db.RegisterMessage(ErfLayerParams) + +GeluLayerParams = _reflection.GeneratedProtocolMessageType('GeluLayerParams', (_message.Message,), dict( + DESCRIPTOR = _GELULAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.GeluLayerParams) + )) +_sym_db.RegisterMessage(GeluLayerParams) + +RangeStaticLayerParams = _reflection.GeneratedProtocolMessageType('RangeStaticLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANGESTATICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RangeStaticLayerParams) + )) +_sym_db.RegisterMessage(RangeStaticLayerParams) + +RangeDynamicLayerParams = _reflection.GeneratedProtocolMessageType('RangeDynamicLayerParams', (_message.Message,), dict( + DESCRIPTOR = _RANGEDYNAMICLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RangeDynamicLayerParams) + )) +_sym_db.RegisterMessage(RangeDynamicLayerParams) + +SlidingWindowsLayerParams = _reflection.GeneratedProtocolMessageType('SlidingWindowsLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLIDINGWINDOWSLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SlidingWindowsLayerParams) + )) +_sym_db.RegisterMessage(SlidingWindowsLayerParams) + +LayerNormalizationLayerParams = _reflection.GeneratedProtocolMessageType('LayerNormalizationLayerParams', (_message.Message,), dict( + DESCRIPTOR = _LAYERNORMALIZATIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LayerNormalizationLayerParams) + )) +_sym_db.RegisterMessage(LayerNormalizationLayerParams) + +NonMaximumSuppressionLayerParams = _reflection.GeneratedProtocolMessageType('NonMaximumSuppressionLayerParams', (_message.Message,), dict( + DESCRIPTOR = _NONMAXIMUMSUPPRESSIONLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NonMaximumSuppressionLayerParams) + )) +_sym_db.RegisterMessage(NonMaximumSuppressionLayerParams) + +ClampedReLULayerParams = _reflection.GeneratedProtocolMessageType('ClampedReLULayerParams', (_message.Message,), dict( + DESCRIPTOR = _CLAMPEDRELULAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ClampedReLULayerParams) + )) +_sym_db.RegisterMessage(ClampedReLULayerParams) + +ArgSortLayerParams = _reflection.GeneratedProtocolMessageType('ArgSortLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ARGSORTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.ArgSortLayerParams) + )) +_sym_db.RegisterMessage(ArgSortLayerParams) + +SliceBySizeLayerParams = _reflection.GeneratedProtocolMessageType('SliceBySizeLayerParams', (_message.Message,), dict( + DESCRIPTOR = _SLICEBYSIZELAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SliceBySizeLayerParams) + )) +_sym_db.RegisterMessage(SliceBySizeLayerParams) + +NeuralNetworkClassifier = _reflection.GeneratedProtocolMessageType('NeuralNetworkClassifier', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKCLASSIFIER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkClassifier) + )) +_sym_db.RegisterMessage(NeuralNetworkClassifier) + +OneHotLayerParams = _reflection.GeneratedProtocolMessageType('OneHotLayerParams', (_message.Message,), dict( + DESCRIPTOR = _ONEHOTLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.OneHotLayerParams) + )) +_sym_db.RegisterMessage(OneHotLayerParams) + +CumSumLayerParams = _reflection.GeneratedProtocolMessageType('CumSumLayerParams', (_message.Message,), dict( + DESCRIPTOR = _CUMSUMLAYERPARAMS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CumSumLayerParams) + )) +_sym_db.RegisterMessage(CumSumLayerParams) + +NeuralNetworkRegressor = _reflection.GeneratedProtocolMessageType('NeuralNetworkRegressor', (_message.Message,), dict( + DESCRIPTOR = _NEURALNETWORKREGRESSOR, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NeuralNetworkRegressor) + )) +_sym_db.RegisterMessage(NeuralNetworkRegressor) + +NetworkUpdateParameters = _reflection.GeneratedProtocolMessageType('NetworkUpdateParameters', (_message.Message,), dict( + DESCRIPTOR = _NETWORKUPDATEPARAMETERS, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NetworkUpdateParameters) + )) +_sym_db.RegisterMessage(NetworkUpdateParameters) + +LossLayer = _reflection.GeneratedProtocolMessageType('LossLayer', (_message.Message,), dict( + DESCRIPTOR = _LOSSLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LossLayer) + )) +_sym_db.RegisterMessage(LossLayer) + +CategoricalCrossEntropyLossLayer = _reflection.GeneratedProtocolMessageType('CategoricalCrossEntropyLossLayer', (_message.Message,), dict( + DESCRIPTOR = _CATEGORICALCROSSENTROPYLOSSLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CategoricalCrossEntropyLossLayer) + )) +_sym_db.RegisterMessage(CategoricalCrossEntropyLossLayer) + +MeanSquaredErrorLossLayer = _reflection.GeneratedProtocolMessageType('MeanSquaredErrorLossLayer', (_message.Message,), dict( + DESCRIPTOR = _MEANSQUAREDERRORLOSSLAYER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.MeanSquaredErrorLossLayer) + )) +_sym_db.RegisterMessage(MeanSquaredErrorLossLayer) + +Optimizer = _reflection.GeneratedProtocolMessageType('Optimizer', (_message.Message,), dict( + DESCRIPTOR = _OPTIMIZER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Optimizer) + )) +_sym_db.RegisterMessage(Optimizer) + +SGDOptimizer = _reflection.GeneratedProtocolMessageType('SGDOptimizer', (_message.Message,), dict( + DESCRIPTOR = _SGDOPTIMIZER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SGDOptimizer) + )) +_sym_db.RegisterMessage(SGDOptimizer) + +AdamOptimizer = _reflection.GeneratedProtocolMessageType('AdamOptimizer', (_message.Message,), dict( + DESCRIPTOR = _ADAMOPTIMIZER, + __module__ = 'NeuralNetwork_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.AdamOptimizer) + )) +_sym_db.RegisterMessage(AdamOptimizer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +_CUSTOMLAYERPARAMS_PARAMETERSENTRY.has_options = True +_CUSTOMLAYERPARAMS_PARAMETERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/NonMaximumSuppression_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/NonMaximumSuppression_pb2.py new file mode 100644 index 00000000..285ac82d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/NonMaximumSuppression_pb2.py @@ -0,0 +1,206 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: NonMaximumSuppression.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='NonMaximumSuppression.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x1bNonMaximumSuppression.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xc0\x04\n\x15NonMaximumSuppression\x12\x46\n\x07pickTop\x18\x01 \x01(\x0b\x32\x33.CoreML.Specification.NonMaximumSuppression.PickTopH\x00\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x01\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x01\x12\x14\n\x0ciouThreshold\x18n \x01(\x01\x12\x1b\n\x13\x63onfidenceThreshold\x18o \x01(\x01\x12#\n\x1a\x63onfidenceInputFeatureName\x18\xc8\x01 \x01(\t\x12$\n\x1b\x63oordinatesInputFeatureName\x18\xc9\x01 \x01(\t\x12%\n\x1ciouThresholdInputFeatureName\x18\xca\x01 \x01(\t\x12,\n#confidenceThresholdInputFeatureName\x18\xcb\x01 \x01(\t\x12$\n\x1b\x63onfidenceOutputFeatureName\x18\xd2\x01 \x01(\t\x12%\n\x1c\x63oordinatesOutputFeatureName\x18\xd3\x01 \x01(\t\x1a\x1b\n\x07PickTop\x12\x10\n\x08perClass\x18\x01 \x01(\x08\x42\x13\n\x11SuppressionMethodB\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_NONMAXIMUMSUPPRESSION_PICKTOP = _descriptor.Descriptor( + name='PickTop', + full_name='CoreML.Specification.NonMaximumSuppression.PickTop', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='perClass', full_name='CoreML.Specification.NonMaximumSuppression.PickTop.perClass', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=589, + serialized_end=616, +) + +_NONMAXIMUMSUPPRESSION = _descriptor.Descriptor( + name='NonMaximumSuppression', + full_name='CoreML.Specification.NonMaximumSuppression', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='pickTop', full_name='CoreML.Specification.NonMaximumSuppression.pickTop', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.NonMaximumSuppression.stringClassLabels', index=1, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.NonMaximumSuppression.int64ClassLabels', index=2, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='iouThreshold', full_name='CoreML.Specification.NonMaximumSuppression.iouThreshold', index=3, + number=110, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceThreshold', full_name='CoreML.Specification.NonMaximumSuppression.confidenceThreshold', index=4, + number=111, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.confidenceInputFeatureName', index=5, + number=200, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coordinatesInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.coordinatesInputFeatureName', index=6, + number=201, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='iouThresholdInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.iouThresholdInputFeatureName', index=7, + number=202, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceThresholdInputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.confidenceThresholdInputFeatureName', index=8, + number=203, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='confidenceOutputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.confidenceOutputFeatureName', index=9, + number=210, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coordinatesOutputFeatureName', full_name='CoreML.Specification.NonMaximumSuppression.coordinatesOutputFeatureName', index=10, + number=211, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_NONMAXIMUMSUPPRESSION_PICKTOP, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='SuppressionMethod', full_name='CoreML.Specification.NonMaximumSuppression.SuppressionMethod', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.NonMaximumSuppression.ClassLabels', + index=1, containing_type=None, fields=[]), + ], + serialized_start=76, + serialized_end=652, +) + +_NONMAXIMUMSUPPRESSION_PICKTOP.containing_type = _NONMAXIMUMSUPPRESSION +_NONMAXIMUMSUPPRESSION.fields_by_name['pickTop'].message_type = _NONMAXIMUMSUPPRESSION_PICKTOP +_NONMAXIMUMSUPPRESSION.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_NONMAXIMUMSUPPRESSION.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_NONMAXIMUMSUPPRESSION.oneofs_by_name['SuppressionMethod'].fields.append( + _NONMAXIMUMSUPPRESSION.fields_by_name['pickTop']) +_NONMAXIMUMSUPPRESSION.fields_by_name['pickTop'].containing_oneof = _NONMAXIMUMSUPPRESSION.oneofs_by_name['SuppressionMethod'] +_NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'].fields.append( + _NONMAXIMUMSUPPRESSION.fields_by_name['stringClassLabels']) +_NONMAXIMUMSUPPRESSION.fields_by_name['stringClassLabels'].containing_oneof = _NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'] +_NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'].fields.append( + _NONMAXIMUMSUPPRESSION.fields_by_name['int64ClassLabels']) +_NONMAXIMUMSUPPRESSION.fields_by_name['int64ClassLabels'].containing_oneof = _NONMAXIMUMSUPPRESSION.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['NonMaximumSuppression'] = _NONMAXIMUMSUPPRESSION +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +NonMaximumSuppression = _reflection.GeneratedProtocolMessageType('NonMaximumSuppression', (_message.Message,), dict( + + PickTop = _reflection.GeneratedProtocolMessageType('PickTop', (_message.Message,), dict( + DESCRIPTOR = _NONMAXIMUMSUPPRESSION_PICKTOP, + __module__ = 'NonMaximumSuppression_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NonMaximumSuppression.PickTop) + )) + , + DESCRIPTOR = _NONMAXIMUMSUPPRESSION, + __module__ = 'NonMaximumSuppression_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.NonMaximumSuppression) + )) +_sym_db.RegisterMessage(NonMaximumSuppression) +_sym_db.RegisterMessage(NonMaximumSuppression.PickTop) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Normalizer_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Normalizer_pb2.py new file mode 100644 index 00000000..0cf2a4c5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Normalizer_pb2.py @@ -0,0 +1,100 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Normalizer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Normalizer.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x10Normalizer.proto\x12\x14\x43oreML.Specification\"o\n\nNormalizer\x12;\n\x08normType\x18\x01 \x01(\x0e\x32).CoreML.Specification.Normalizer.NormType\"$\n\x08NormType\x12\x08\n\x04LMax\x10\x00\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\x42\x02H\x03\x62\x06proto3') +) + + + +_NORMALIZER_NORMTYPE = _descriptor.EnumDescriptor( + name='NormType', + full_name='CoreML.Specification.Normalizer.NormType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='LMax', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L1', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='L2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=117, + serialized_end=153, +) +_sym_db.RegisterEnumDescriptor(_NORMALIZER_NORMTYPE) + + +_NORMALIZER = _descriptor.Descriptor( + name='Normalizer', + full_name='CoreML.Specification.Normalizer', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='normType', full_name='CoreML.Specification.Normalizer.normType', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _NORMALIZER_NORMTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=42, + serialized_end=153, +) + +_NORMALIZER.fields_by_name['normType'].enum_type = _NORMALIZER_NORMTYPE +_NORMALIZER_NORMTYPE.containing_type = _NORMALIZER +DESCRIPTOR.message_types_by_name['Normalizer'] = _NORMALIZER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Normalizer = _reflection.GeneratedProtocolMessageType('Normalizer', (_message.Message,), dict( + DESCRIPTOR = _NORMALIZER, + __module__ = 'Normalizer_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Normalizer) + )) +_sym_db.RegisterMessage(Normalizer) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/OneHotEncoder_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/OneHotEncoder_pb2.py new file mode 100644 index 00000000..6219f13b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/OneHotEncoder_pb2.py @@ -0,0 +1,136 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: OneHotEncoder.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='OneHotEncoder.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x13OneHotEncoder.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xb5\x02\n\rOneHotEncoder\x12>\n\x10stringCategories\x18\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12<\n\x0fint64Categories\x18\x02 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12\x14\n\x0coutputSparse\x18\n \x01(\x08\x12H\n\rhandleUnknown\x18\x0b \x01(\x0e\x32\x31.CoreML.Specification.OneHotEncoder.HandleUnknown\"6\n\rHandleUnknown\x12\x12\n\x0e\x45rrorOnUnknown\x10\x00\x12\x11\n\rIgnoreUnknown\x10\x01\x42\x0e\n\x0c\x43\x61tegoryTypeB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + +_ONEHOTENCODER_HANDLEUNKNOWN = _descriptor.EnumDescriptor( + name='HandleUnknown', + full_name='CoreML.Specification.OneHotEncoder.HandleUnknown', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='ErrorOnUnknown', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IgnoreUnknown', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=307, + serialized_end=361, +) +_sym_db.RegisterEnumDescriptor(_ONEHOTENCODER_HANDLEUNKNOWN) + + +_ONEHOTENCODER = _descriptor.Descriptor( + name='OneHotEncoder', + full_name='CoreML.Specification.OneHotEncoder', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='stringCategories', full_name='CoreML.Specification.OneHotEncoder.stringCategories', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64Categories', full_name='CoreML.Specification.OneHotEncoder.int64Categories', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='outputSparse', full_name='CoreML.Specification.OneHotEncoder.outputSparse', index=2, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='handleUnknown', full_name='CoreML.Specification.OneHotEncoder.handleUnknown', index=3, + number=11, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _ONEHOTENCODER_HANDLEUNKNOWN, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='CategoryType', full_name='CoreML.Specification.OneHotEncoder.CategoryType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=68, + serialized_end=377, +) + +_ONEHOTENCODER.fields_by_name['stringCategories'].message_type = DataStructures__pb2._STRINGVECTOR +_ONEHOTENCODER.fields_by_name['int64Categories'].message_type = DataStructures__pb2._INT64VECTOR +_ONEHOTENCODER.fields_by_name['handleUnknown'].enum_type = _ONEHOTENCODER_HANDLEUNKNOWN +_ONEHOTENCODER_HANDLEUNKNOWN.containing_type = _ONEHOTENCODER +_ONEHOTENCODER.oneofs_by_name['CategoryType'].fields.append( + _ONEHOTENCODER.fields_by_name['stringCategories']) +_ONEHOTENCODER.fields_by_name['stringCategories'].containing_oneof = _ONEHOTENCODER.oneofs_by_name['CategoryType'] +_ONEHOTENCODER.oneofs_by_name['CategoryType'].fields.append( + _ONEHOTENCODER.fields_by_name['int64Categories']) +_ONEHOTENCODER.fields_by_name['int64Categories'].containing_oneof = _ONEHOTENCODER.oneofs_by_name['CategoryType'] +DESCRIPTOR.message_types_by_name['OneHotEncoder'] = _ONEHOTENCODER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +OneHotEncoder = _reflection.GeneratedProtocolMessageType('OneHotEncoder', (_message.Message,), dict( + DESCRIPTOR = _ONEHOTENCODER, + __module__ = 'OneHotEncoder_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.OneHotEncoder) + )) +_sym_db.RegisterMessage(OneHotEncoder) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Parameters_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Parameters_pb2.py new file mode 100644 index 00000000..05273b44 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Parameters_pb2.py @@ -0,0 +1,235 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Parameters.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Parameters.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x10Parameters.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x99\x01\n\x0eInt64Parameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x03\x12\x31\n\x05range\x18\n \x01(\x0b\x32 .CoreML.Specification.Int64RangeH\x00\x12-\n\x03set\x18\x0b \x01(\x0b\x32\x1e.CoreML.Specification.Int64SetH\x00\x42\x0f\n\rAllowedValues\"l\n\x0f\x44oubleParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x01\x12\x32\n\x05range\x18\n \x01(\x0b\x32!.CoreML.Specification.DoubleRangeH\x00\x42\x0f\n\rAllowedValues\"\'\n\x0fStringParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\t\"%\n\rBoolParameter\x12\x14\n\x0c\x64\x65\x66\x61ultValue\x18\x01 \x01(\x08\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_INT64PARAMETER = _descriptor.Descriptor( + name='Int64Parameter', + full_name='CoreML.Specification.Int64Parameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.Int64Parameter.defaultValue', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.Int64Parameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set', full_name='CoreML.Specification.Int64Parameter.set', index=2, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.Int64Parameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=65, + serialized_end=218, +) + + +_DOUBLEPARAMETER = _descriptor.Descriptor( + name='DoubleParameter', + full_name='CoreML.Specification.DoubleParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.DoubleParameter.defaultValue', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='CoreML.Specification.DoubleParameter.range', index=1, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='AllowedValues', full_name='CoreML.Specification.DoubleParameter.AllowedValues', + index=0, containing_type=None, fields=[]), + ], + serialized_start=220, + serialized_end=328, +) + + +_STRINGPARAMETER = _descriptor.Descriptor( + name='StringParameter', + full_name='CoreML.Specification.StringParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.StringParameter.defaultValue', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=330, + serialized_end=369, +) + + +_BOOLPARAMETER = _descriptor.Descriptor( + name='BoolParameter', + full_name='CoreML.Specification.BoolParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='defaultValue', full_name='CoreML.Specification.BoolParameter.defaultValue', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=371, + serialized_end=408, +) + +_INT64PARAMETER.fields_by_name['range'].message_type = DataStructures__pb2._INT64RANGE +_INT64PARAMETER.fields_by_name['set'].message_type = DataStructures__pb2._INT64SET +_INT64PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT64PARAMETER.fields_by_name['range']) +_INT64PARAMETER.fields_by_name['range'].containing_oneof = _INT64PARAMETER.oneofs_by_name['AllowedValues'] +_INT64PARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _INT64PARAMETER.fields_by_name['set']) +_INT64PARAMETER.fields_by_name['set'].containing_oneof = _INT64PARAMETER.oneofs_by_name['AllowedValues'] +_DOUBLEPARAMETER.fields_by_name['range'].message_type = DataStructures__pb2._DOUBLERANGE +_DOUBLEPARAMETER.oneofs_by_name['AllowedValues'].fields.append( + _DOUBLEPARAMETER.fields_by_name['range']) +_DOUBLEPARAMETER.fields_by_name['range'].containing_oneof = _DOUBLEPARAMETER.oneofs_by_name['AllowedValues'] +DESCRIPTOR.message_types_by_name['Int64Parameter'] = _INT64PARAMETER +DESCRIPTOR.message_types_by_name['DoubleParameter'] = _DOUBLEPARAMETER +DESCRIPTOR.message_types_by_name['StringParameter'] = _STRINGPARAMETER +DESCRIPTOR.message_types_by_name['BoolParameter'] = _BOOLPARAMETER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Int64Parameter = _reflection.GeneratedProtocolMessageType('Int64Parameter', (_message.Message,), dict( + DESCRIPTOR = _INT64PARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Int64Parameter) + )) +_sym_db.RegisterMessage(Int64Parameter) + +DoubleParameter = _reflection.GeneratedProtocolMessageType('DoubleParameter', (_message.Message,), dict( + DESCRIPTOR = _DOUBLEPARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DoubleParameter) + )) +_sym_db.RegisterMessage(DoubleParameter) + +StringParameter = _reflection.GeneratedProtocolMessageType('StringParameter', (_message.Message,), dict( + DESCRIPTOR = _STRINGPARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.StringParameter) + )) +_sym_db.RegisterMessage(StringParameter) + +BoolParameter = _reflection.GeneratedProtocolMessageType('BoolParameter', (_message.Message,), dict( + DESCRIPTOR = _BOOLPARAMETER, + __module__ = 'Parameters_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.BoolParameter) + )) +_sym_db.RegisterMessage(BoolParameter) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/SVM_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/SVM_pb2.py new file mode 100644 index 00000000..5ad1de30 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/SVM_pb2.py @@ -0,0 +1,739 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: SVM.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='SVM.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\tSVM.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\x0e\n\x0cLinearKernel\"\x1a\n\tRBFKernel\x12\r\n\x05gamma\x18\x01 \x01(\x01\"6\n\nPolyKernel\x12\x0e\n\x06\x64\x65gree\x18\x01 \x01(\x05\x12\t\n\x01\x63\x18\x02 \x01(\x01\x12\r\n\x05gamma\x18\x03 \x01(\x01\")\n\rSigmoidKernel\x12\r\n\x05gamma\x18\x01 \x01(\x01\x12\t\n\x01\x63\x18\x02 \x01(\x01\"\xfa\x01\n\x06Kernel\x12:\n\x0clinearKernel\x18\x01 \x01(\x0b\x32\".CoreML.Specification.LinearKernelH\x00\x12\x34\n\trbfKernel\x18\x02 \x01(\x0b\x32\x1f.CoreML.Specification.RBFKernelH\x00\x12\x36\n\npolyKernel\x18\x03 \x01(\x0b\x32 .CoreML.Specification.PolyKernelH\x00\x12<\n\rsigmoidKernel\x18\x04 \x01(\x0b\x32#.CoreML.Specification.SigmoidKernelH\x00\x42\x08\n\x06kernel\"*\n\nSparseNode\x12\r\n\x05index\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x01\"?\n\x0cSparseVector\x12/\n\x05nodes\x18\x01 \x03(\x0b\x32 .CoreML.Specification.SparseNode\"K\n\x14SparseSupportVectors\x12\x33\n\x07vectors\x18\x01 \x03(\x0b\x32\".CoreML.Specification.SparseVector\"\x1d\n\x0b\x44\x65nseVector\x12\x0e\n\x06values\x18\x01 \x03(\x01\"I\n\x13\x44\x65nseSupportVectors\x12\x32\n\x07vectors\x18\x01 \x03(\x0b\x32!.CoreML.Specification.DenseVector\"\x1d\n\x0c\x43oefficients\x12\r\n\x05\x61lpha\x18\x01 \x03(\x01\"\xb5\x02\n\x16SupportVectorRegressor\x12,\n\x06kernel\x18\x01 \x01(\x0b\x32\x1c.CoreML.Specification.Kernel\x12J\n\x14sparseSupportVectors\x18\x02 \x01(\x0b\x32*.CoreML.Specification.SparseSupportVectorsH\x00\x12H\n\x13\x64\x65nseSupportVectors\x18\x03 \x01(\x0b\x32).CoreML.Specification.DenseSupportVectorsH\x00\x12\x38\n\x0c\x63oefficients\x18\x04 \x01(\x0b\x32\".CoreML.Specification.Coefficients\x12\x0b\n\x03rho\x18\x05 \x01(\x01\x42\x10\n\x0esupportVectors\"\x8b\x04\n\x17SupportVectorClassifier\x12,\n\x06kernel\x18\x01 \x01(\x0b\x32\x1c.CoreML.Specification.Kernel\x12&\n\x1enumberOfSupportVectorsPerClass\x18\x02 \x03(\x05\x12J\n\x14sparseSupportVectors\x18\x03 \x01(\x0b\x32*.CoreML.Specification.SparseSupportVectorsH\x00\x12H\n\x13\x64\x65nseSupportVectors\x18\x04 \x01(\x0b\x32).CoreML.Specification.DenseSupportVectorsH\x00\x12\x38\n\x0c\x63oefficients\x18\x05 \x03(\x0b\x32\".CoreML.Specification.Coefficients\x12\x0b\n\x03rho\x18\x06 \x03(\x01\x12\r\n\x05probA\x18\x07 \x03(\x01\x12\r\n\x05probB\x18\x08 \x03(\x01\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x01\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x01\x42\x10\n\x0esupportVectorsB\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_LINEARKERNEL = _descriptor.Descriptor( + name='LinearKernel', + full_name='CoreML.Specification.LinearKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=57, + serialized_end=71, +) + + +_RBFKERNEL = _descriptor.Descriptor( + name='RBFKernel', + full_name='CoreML.Specification.RBFKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.RBFKernel.gamma', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=73, + serialized_end=99, +) + + +_POLYKERNEL = _descriptor.Descriptor( + name='PolyKernel', + full_name='CoreML.Specification.PolyKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='degree', full_name='CoreML.Specification.PolyKernel.degree', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='c', full_name='CoreML.Specification.PolyKernel.c', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.PolyKernel.gamma', index=2, + number=3, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=101, + serialized_end=155, +) + + +_SIGMOIDKERNEL = _descriptor.Descriptor( + name='SigmoidKernel', + full_name='CoreML.Specification.SigmoidKernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gamma', full_name='CoreML.Specification.SigmoidKernel.gamma', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='c', full_name='CoreML.Specification.SigmoidKernel.c', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=157, + serialized_end=198, +) + + +_KERNEL = _descriptor.Descriptor( + name='Kernel', + full_name='CoreML.Specification.Kernel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='linearKernel', full_name='CoreML.Specification.Kernel.linearKernel', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rbfKernel', full_name='CoreML.Specification.Kernel.rbfKernel', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='polyKernel', full_name='CoreML.Specification.Kernel.polyKernel', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sigmoidKernel', full_name='CoreML.Specification.Kernel.sigmoidKernel', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='kernel', full_name='CoreML.Specification.Kernel.kernel', + index=0, containing_type=None, fields=[]), + ], + serialized_start=201, + serialized_end=451, +) + + +_SPARSENODE = _descriptor.Descriptor( + name='SparseNode', + full_name='CoreML.Specification.SparseNode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='CoreML.Specification.SparseNode.index', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='CoreML.Specification.SparseNode.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=453, + serialized_end=495, +) + + +_SPARSEVECTOR = _descriptor.Descriptor( + name='SparseVector', + full_name='CoreML.Specification.SparseVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nodes', full_name='CoreML.Specification.SparseVector.nodes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=497, + serialized_end=560, +) + + +_SPARSESUPPORTVECTORS = _descriptor.Descriptor( + name='SparseSupportVectors', + full_name='CoreML.Specification.SparseSupportVectors', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vectors', full_name='CoreML.Specification.SparseSupportVectors.vectors', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=562, + serialized_end=637, +) + + +_DENSEVECTOR = _descriptor.Descriptor( + name='DenseVector', + full_name='CoreML.Specification.DenseVector', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='values', full_name='CoreML.Specification.DenseVector.values', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=639, + serialized_end=668, +) + + +_DENSESUPPORTVECTORS = _descriptor.Descriptor( + name='DenseSupportVectors', + full_name='CoreML.Specification.DenseSupportVectors', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vectors', full_name='CoreML.Specification.DenseSupportVectors.vectors', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=670, + serialized_end=743, +) + + +_COEFFICIENTS = _descriptor.Descriptor( + name='Coefficients', + full_name='CoreML.Specification.Coefficients', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='alpha', full_name='CoreML.Specification.Coefficients.alpha', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=745, + serialized_end=774, +) + + +_SUPPORTVECTORREGRESSOR = _descriptor.Descriptor( + name='SupportVectorRegressor', + full_name='CoreML.Specification.SupportVectorRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='kernel', full_name='CoreML.Specification.SupportVectorRegressor.kernel', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sparseSupportVectors', full_name='CoreML.Specification.SupportVectorRegressor.sparseSupportVectors', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='denseSupportVectors', full_name='CoreML.Specification.SupportVectorRegressor.denseSupportVectors', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coefficients', full_name='CoreML.Specification.SupportVectorRegressor.coefficients', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rho', full_name='CoreML.Specification.SupportVectorRegressor.rho', index=4, + number=5, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='supportVectors', full_name='CoreML.Specification.SupportVectorRegressor.supportVectors', + index=0, containing_type=None, fields=[]), + ], + serialized_start=777, + serialized_end=1086, +) + + +_SUPPORTVECTORCLASSIFIER = _descriptor.Descriptor( + name='SupportVectorClassifier', + full_name='CoreML.Specification.SupportVectorClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='kernel', full_name='CoreML.Specification.SupportVectorClassifier.kernel', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numberOfSupportVectorsPerClass', full_name='CoreML.Specification.SupportVectorClassifier.numberOfSupportVectorsPerClass', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sparseSupportVectors', full_name='CoreML.Specification.SupportVectorClassifier.sparseSupportVectors', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='denseSupportVectors', full_name='CoreML.Specification.SupportVectorClassifier.denseSupportVectors', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='coefficients', full_name='CoreML.Specification.SupportVectorClassifier.coefficients', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rho', full_name='CoreML.Specification.SupportVectorClassifier.rho', index=5, + number=6, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='probA', full_name='CoreML.Specification.SupportVectorClassifier.probA', index=6, + number=7, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='probB', full_name='CoreML.Specification.SupportVectorClassifier.probB', index=7, + number=8, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.SupportVectorClassifier.stringClassLabels', index=8, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.SupportVectorClassifier.int64ClassLabels', index=9, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='supportVectors', full_name='CoreML.Specification.SupportVectorClassifier.supportVectors', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.SupportVectorClassifier.ClassLabels', + index=1, containing_type=None, fields=[]), + ], + serialized_start=1089, + serialized_end=1612, +) + +_KERNEL.fields_by_name['linearKernel'].message_type = _LINEARKERNEL +_KERNEL.fields_by_name['rbfKernel'].message_type = _RBFKERNEL +_KERNEL.fields_by_name['polyKernel'].message_type = _POLYKERNEL +_KERNEL.fields_by_name['sigmoidKernel'].message_type = _SIGMOIDKERNEL +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['linearKernel']) +_KERNEL.fields_by_name['linearKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['rbfKernel']) +_KERNEL.fields_by_name['rbfKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['polyKernel']) +_KERNEL.fields_by_name['polyKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_KERNEL.oneofs_by_name['kernel'].fields.append( + _KERNEL.fields_by_name['sigmoidKernel']) +_KERNEL.fields_by_name['sigmoidKernel'].containing_oneof = _KERNEL.oneofs_by_name['kernel'] +_SPARSEVECTOR.fields_by_name['nodes'].message_type = _SPARSENODE +_SPARSESUPPORTVECTORS.fields_by_name['vectors'].message_type = _SPARSEVECTOR +_DENSESUPPORTVECTORS.fields_by_name['vectors'].message_type = _DENSEVECTOR +_SUPPORTVECTORREGRESSOR.fields_by_name['kernel'].message_type = _KERNEL +_SUPPORTVECTORREGRESSOR.fields_by_name['sparseSupportVectors'].message_type = _SPARSESUPPORTVECTORS +_SUPPORTVECTORREGRESSOR.fields_by_name['denseSupportVectors'].message_type = _DENSESUPPORTVECTORS +_SUPPORTVECTORREGRESSOR.fields_by_name['coefficients'].message_type = _COEFFICIENTS +_SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORREGRESSOR.fields_by_name['sparseSupportVectors']) +_SUPPORTVECTORREGRESSOR.fields_by_name['sparseSupportVectors'].containing_oneof = _SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'] +_SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORREGRESSOR.fields_by_name['denseSupportVectors']) +_SUPPORTVECTORREGRESSOR.fields_by_name['denseSupportVectors'].containing_oneof = _SUPPORTVECTORREGRESSOR.oneofs_by_name['supportVectors'] +_SUPPORTVECTORCLASSIFIER.fields_by_name['kernel'].message_type = _KERNEL +_SUPPORTVECTORCLASSIFIER.fields_by_name['sparseSupportVectors'].message_type = _SPARSESUPPORTVECTORS +_SUPPORTVECTORCLASSIFIER.fields_by_name['denseSupportVectors'].message_type = _DENSESUPPORTVECTORS +_SUPPORTVECTORCLASSIFIER.fields_by_name['coefficients'].message_type = _COEFFICIENTS +_SUPPORTVECTORCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_SUPPORTVECTORCLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['sparseSupportVectors']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['sparseSupportVectors'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'] +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['denseSupportVectors']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['denseSupportVectors'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['supportVectors'] +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['stringClassLabels']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'] +_SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _SUPPORTVECTORCLASSIFIER.fields_by_name['int64ClassLabels']) +_SUPPORTVECTORCLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _SUPPORTVECTORCLASSIFIER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['LinearKernel'] = _LINEARKERNEL +DESCRIPTOR.message_types_by_name['RBFKernel'] = _RBFKERNEL +DESCRIPTOR.message_types_by_name['PolyKernel'] = _POLYKERNEL +DESCRIPTOR.message_types_by_name['SigmoidKernel'] = _SIGMOIDKERNEL +DESCRIPTOR.message_types_by_name['Kernel'] = _KERNEL +DESCRIPTOR.message_types_by_name['SparseNode'] = _SPARSENODE +DESCRIPTOR.message_types_by_name['SparseVector'] = _SPARSEVECTOR +DESCRIPTOR.message_types_by_name['SparseSupportVectors'] = _SPARSESUPPORTVECTORS +DESCRIPTOR.message_types_by_name['DenseVector'] = _DENSEVECTOR +DESCRIPTOR.message_types_by_name['DenseSupportVectors'] = _DENSESUPPORTVECTORS +DESCRIPTOR.message_types_by_name['Coefficients'] = _COEFFICIENTS +DESCRIPTOR.message_types_by_name['SupportVectorRegressor'] = _SUPPORTVECTORREGRESSOR +DESCRIPTOR.message_types_by_name['SupportVectorClassifier'] = _SUPPORTVECTORCLASSIFIER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +LinearKernel = _reflection.GeneratedProtocolMessageType('LinearKernel', (_message.Message,), dict( + DESCRIPTOR = _LINEARKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.LinearKernel) + )) +_sym_db.RegisterMessage(LinearKernel) + +RBFKernel = _reflection.GeneratedProtocolMessageType('RBFKernel', (_message.Message,), dict( + DESCRIPTOR = _RBFKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.RBFKernel) + )) +_sym_db.RegisterMessage(RBFKernel) + +PolyKernel = _reflection.GeneratedProtocolMessageType('PolyKernel', (_message.Message,), dict( + DESCRIPTOR = _POLYKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.PolyKernel) + )) +_sym_db.RegisterMessage(PolyKernel) + +SigmoidKernel = _reflection.GeneratedProtocolMessageType('SigmoidKernel', (_message.Message,), dict( + DESCRIPTOR = _SIGMOIDKERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SigmoidKernel) + )) +_sym_db.RegisterMessage(SigmoidKernel) + +Kernel = _reflection.GeneratedProtocolMessageType('Kernel', (_message.Message,), dict( + DESCRIPTOR = _KERNEL, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Kernel) + )) +_sym_db.RegisterMessage(Kernel) + +SparseNode = _reflection.GeneratedProtocolMessageType('SparseNode', (_message.Message,), dict( + DESCRIPTOR = _SPARSENODE, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SparseNode) + )) +_sym_db.RegisterMessage(SparseNode) + +SparseVector = _reflection.GeneratedProtocolMessageType('SparseVector', (_message.Message,), dict( + DESCRIPTOR = _SPARSEVECTOR, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SparseVector) + )) +_sym_db.RegisterMessage(SparseVector) + +SparseSupportVectors = _reflection.GeneratedProtocolMessageType('SparseSupportVectors', (_message.Message,), dict( + DESCRIPTOR = _SPARSESUPPORTVECTORS, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SparseSupportVectors) + )) +_sym_db.RegisterMessage(SparseSupportVectors) + +DenseVector = _reflection.GeneratedProtocolMessageType('DenseVector', (_message.Message,), dict( + DESCRIPTOR = _DENSEVECTOR, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DenseVector) + )) +_sym_db.RegisterMessage(DenseVector) + +DenseSupportVectors = _reflection.GeneratedProtocolMessageType('DenseSupportVectors', (_message.Message,), dict( + DESCRIPTOR = _DENSESUPPORTVECTORS, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.DenseSupportVectors) + )) +_sym_db.RegisterMessage(DenseSupportVectors) + +Coefficients = _reflection.GeneratedProtocolMessageType('Coefficients', (_message.Message,), dict( + DESCRIPTOR = _COEFFICIENTS, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Coefficients) + )) +_sym_db.RegisterMessage(Coefficients) + +SupportVectorRegressor = _reflection.GeneratedProtocolMessageType('SupportVectorRegressor', (_message.Message,), dict( + DESCRIPTOR = _SUPPORTVECTORREGRESSOR, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SupportVectorRegressor) + )) +_sym_db.RegisterMessage(SupportVectorRegressor) + +SupportVectorClassifier = _reflection.GeneratedProtocolMessageType('SupportVectorClassifier', (_message.Message,), dict( + DESCRIPTOR = _SUPPORTVECTORCLASSIFIER, + __module__ = 'SVM_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.SupportVectorClassifier) + )) +_sym_db.RegisterMessage(SupportVectorClassifier) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/Scaler_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/Scaler_pb2.py new file mode 100644 index 00000000..c5f3d323 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/Scaler_pb2.py @@ -0,0 +1,78 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: Scaler.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='Scaler.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x0cScaler.proto\x12\x14\x43oreML.Specification\"0\n\x06Scaler\x12\x12\n\nshiftValue\x18\x01 \x03(\x01\x12\x12\n\nscaleValue\x18\x02 \x03(\x01\x42\x02H\x03\x62\x06proto3') +) + + + + +_SCALER = _descriptor.Descriptor( + name='Scaler', + full_name='CoreML.Specification.Scaler', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shiftValue', full_name='CoreML.Specification.Scaler.shiftValue', index=0, + number=1, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='scaleValue', full_name='CoreML.Specification.Scaler.scaleValue', index=1, + number=2, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=38, + serialized_end=86, +) + +DESCRIPTOR.message_types_by_name['Scaler'] = _SCALER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Scaler = _reflection.GeneratedProtocolMessageType('Scaler', (_message.Message,), dict( + DESCRIPTOR = _SCALER, + __module__ = 'Scaler_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.Scaler) + )) +_sym_db.RegisterMessage(Scaler) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/SoundAnalysisPreprocessing_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/SoundAnalysisPreprocessing_pb2.py new file mode 100644 index 00000000..271a0a62 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/SoundAnalysisPreprocessing_pb2.py @@ -0,0 +1,110 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: SoundAnalysisPreprocessing.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='SoundAnalysisPreprocessing.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n SoundAnalysisPreprocessing.proto\x12!CoreML.Specification.CoreMLModels\"\xa0\x01\n\x1aSoundAnalysisPreprocessing\x12V\n\x06vggish\x18\x14 \x01(\x0b\x32\x44.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.VggishH\x00\x1a\x08\n\x06VggishB \n\x1eSoundAnalysisPreprocessingTypeB\x02H\x03\x62\x06proto3') +) + + + + +_SOUNDANALYSISPREPROCESSING_VGGISH = _descriptor.Descriptor( + name='Vggish', + full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=190, + serialized_end=198, +) + +_SOUNDANALYSISPREPROCESSING = _descriptor.Descriptor( + name='SoundAnalysisPreprocessing', + full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='vggish', full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.vggish', index=0, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_SOUNDANALYSISPREPROCESSING_VGGISH, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='SoundAnalysisPreprocessingType', full_name='CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.SoundAnalysisPreprocessingType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=72, + serialized_end=232, +) + +_SOUNDANALYSISPREPROCESSING_VGGISH.containing_type = _SOUNDANALYSISPREPROCESSING +_SOUNDANALYSISPREPROCESSING.fields_by_name['vggish'].message_type = _SOUNDANALYSISPREPROCESSING_VGGISH +_SOUNDANALYSISPREPROCESSING.oneofs_by_name['SoundAnalysisPreprocessingType'].fields.append( + _SOUNDANALYSISPREPROCESSING.fields_by_name['vggish']) +_SOUNDANALYSISPREPROCESSING.fields_by_name['vggish'].containing_oneof = _SOUNDANALYSISPREPROCESSING.oneofs_by_name['SoundAnalysisPreprocessingType'] +DESCRIPTOR.message_types_by_name['SoundAnalysisPreprocessing'] = _SOUNDANALYSISPREPROCESSING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +SoundAnalysisPreprocessing = _reflection.GeneratedProtocolMessageType('SoundAnalysisPreprocessing', (_message.Message,), dict( + + Vggish = _reflection.GeneratedProtocolMessageType('Vggish', (_message.Message,), dict( + DESCRIPTOR = _SOUNDANALYSISPREPROCESSING_VGGISH, + __module__ = 'SoundAnalysisPreprocessing_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish) + )) + , + DESCRIPTOR = _SOUNDANALYSISPREPROCESSING, + __module__ = 'SoundAnalysisPreprocessing_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing) + )) +_sym_db.RegisterMessage(SoundAnalysisPreprocessing) +_sym_db.RegisterMessage(SoundAnalysisPreprocessing.Vggish) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/TextClassifier_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/TextClassifier_pb2.py new file mode 100644 index 00000000..9edaafcf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/TextClassifier_pb2.py @@ -0,0 +1,107 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: TextClassifier.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='TextClassifier.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x14TextClassifier.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\xa1\x01\n\x0eTextClassifier\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12@\n\x11stringClassLabels\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_TEXTCLASSIFIER = _descriptor.Descriptor( + name='TextClassifier', + full_name='CoreML.Specification.CoreMLModels.TextClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.TextClassifier.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.TextClassifier.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.TextClassifier.modelParameterData', index=2, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.CoreMLModels.TextClassifier.stringClassLabels', index=3, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.CoreMLModels.TextClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=82, + serialized_end=243, +) + +_TEXTCLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_TEXTCLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _TEXTCLASSIFIER.fields_by_name['stringClassLabels']) +_TEXTCLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _TEXTCLASSIFIER.oneofs_by_name['ClassLabels'] +DESCRIPTOR.message_types_by_name['TextClassifier'] = _TEXTCLASSIFIER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +TextClassifier = _reflection.GeneratedProtocolMessageType('TextClassifier', (_message.Message,), dict( + DESCRIPTOR = _TEXTCLASSIFIER, + __module__ = 'TextClassifier_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.TextClassifier) + )) +_sym_db.RegisterMessage(TextClassifier) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/TreeEnsemble_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/TreeEnsemble_pb2.py new file mode 100644 index 00000000..b2a96adf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/TreeEnsemble_pb2.py @@ -0,0 +1,446 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: TreeEnsemble.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='TreeEnsemble.proto', + package='CoreML.Specification', + syntax='proto3', + serialized_pb=_b('\n\x12TreeEnsemble.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xc4\x06\n\x16TreeEnsembleParameters\x12\x44\n\x05nodes\x18\x01 \x03(\x0b\x32\x35.CoreML.Specification.TreeEnsembleParameters.TreeNode\x12\x1f\n\x17numPredictionDimensions\x18\x02 \x01(\x04\x12\x1b\n\x13\x62\x61sePredictionValue\x18\x03 \x03(\x01\x1a\xa5\x05\n\x08TreeNode\x12\x0e\n\x06treeId\x18\x01 \x01(\x04\x12\x0e\n\x06nodeId\x18\x02 \x01(\x04\x12\\\n\x0cnodeBehavior\x18\x03 \x01(\x0e\x32\x46.CoreML.Specification.TreeEnsembleParameters.TreeNode.TreeNodeBehavior\x12\x1a\n\x12\x62ranchFeatureIndex\x18\n \x01(\x04\x12\x1a\n\x12\x62ranchFeatureValue\x18\x0b \x01(\x01\x12\x17\n\x0ftrueChildNodeId\x18\x0c \x01(\x04\x12\x18\n\x10\x66\x61lseChildNodeId\x18\r \x01(\x04\x12#\n\x1bmissingValueTracksTrueChild\x18\x0e \x01(\x08\x12\\\n\x0e\x65valuationInfo\x18\x14 \x03(\x0b\x32\x44.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo\x12\x17\n\x0frelativeHitRate\x18\x1e \x01(\x01\x1a\x42\n\x0e\x45valuationInfo\x12\x17\n\x0f\x65valuationIndex\x18\x01 \x01(\x04\x12\x17\n\x0f\x65valuationValue\x18\x02 \x01(\x01\"\xcf\x01\n\x10TreeNodeBehavior\x12\x1e\n\x1a\x42ranchOnValueLessThanEqual\x10\x00\x12\x19\n\x15\x42ranchOnValueLessThan\x10\x01\x12!\n\x1d\x42ranchOnValueGreaterThanEqual\x10\x02\x12\x1c\n\x18\x42ranchOnValueGreaterThan\x10\x03\x12\x16\n\x12\x42ranchOnValueEqual\x10\x04\x12\x19\n\x15\x42ranchOnValueNotEqual\x10\x05\x12\x0c\n\x08LeafNode\x10\x06\"\xc7\x02\n\x16TreeEnsembleClassifier\x12\x42\n\x0ctreeEnsemble\x18\x01 \x01(\x0b\x32,.CoreML.Specification.TreeEnsembleParameters\x12Z\n\x17postEvaluationTransform\x18\x02 \x01(\x0e\x32\x39.CoreML.Specification.TreeEnsemblePostEvaluationTransform\x12?\n\x11stringClassLabels\x18\x64 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x12=\n\x10int64ClassLabels\x18\x65 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x42\r\n\x0b\x43lassLabels\"\xb7\x01\n\x15TreeEnsembleRegressor\x12\x42\n\x0ctreeEnsemble\x18\x01 \x01(\x0b\x32,.CoreML.Specification.TreeEnsembleParameters\x12Z\n\x17postEvaluationTransform\x18\x02 \x01(\x0e\x32\x39.CoreML.Specification.TreeEnsemblePostEvaluationTransform*\x9d\x01\n#TreeEnsemblePostEvaluationTransform\x12\x0f\n\x0bNoTransform\x10\x00\x12\x1a\n\x16\x43lassification_SoftMax\x10\x01\x12\x17\n\x13Regression_Logistic\x10\x02\x12\x30\n,Classification_SoftMaxWithZeroClassReference\x10\x03\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + +_TREEENSEMBLEPOSTEVALUATIONTRANSFORM = _descriptor.EnumDescriptor( + name='TreeEnsemblePostEvaluationTransform', + full_name='CoreML.Specification.TreeEnsemblePostEvaluationTransform', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='NoTransform', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Classification_SoftMax', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Regression_Logistic', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Classification_SoftMaxWithZeroClassReference', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1422, + serialized_end=1579, +) +_sym_db.RegisterEnumDescriptor(_TREEENSEMBLEPOSTEVALUATIONTRANSFORM) + +TreeEnsemblePostEvaluationTransform = enum_type_wrapper.EnumTypeWrapper(_TREEENSEMBLEPOSTEVALUATIONTRANSFORM) +NoTransform = 0 +Classification_SoftMax = 1 +Regression_Logistic = 2 +Classification_SoftMaxWithZeroClassReference = 3 + + +_TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR = _descriptor.EnumDescriptor( + name='TreeNodeBehavior', + full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.TreeNodeBehavior', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='BranchOnValueLessThanEqual', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueLessThan', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueGreaterThanEqual', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueGreaterThan', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueEqual', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BranchOnValueNotEqual', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LeafNode', index=6, number=6, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=696, + serialized_end=903, +) +_sym_db.RegisterEnumDescriptor(_TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR) + + +_TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO = _descriptor.Descriptor( + name='EvaluationInfo', + full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='evaluationIndex', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.evaluationIndex', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='evaluationValue', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.evaluationValue', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=627, + serialized_end=693, +) + +_TREEENSEMBLEPARAMETERS_TREENODE = _descriptor.Descriptor( + name='TreeNode', + full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='treeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.treeId', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nodeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.nodeId', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='nodeBehavior', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.nodeBehavior', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='branchFeatureIndex', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.branchFeatureIndex', index=3, + number=10, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='branchFeatureValue', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.branchFeatureValue', index=4, + number=11, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='trueChildNodeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.trueChildNodeId', index=5, + number=12, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='falseChildNodeId', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.falseChildNodeId', index=6, + number=13, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='missingValueTracksTrueChild', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.missingValueTracksTrueChild', index=7, + number=14, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='evaluationInfo', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.evaluationInfo', index=8, + number=20, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='relativeHitRate', full_name='CoreML.Specification.TreeEnsembleParameters.TreeNode.relativeHitRate', index=9, + number=30, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO, ], + enum_types=[ + _TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=226, + serialized_end=903, +) + +_TREEENSEMBLEPARAMETERS = _descriptor.Descriptor( + name='TreeEnsembleParameters', + full_name='CoreML.Specification.TreeEnsembleParameters', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='nodes', full_name='CoreML.Specification.TreeEnsembleParameters.nodes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='numPredictionDimensions', full_name='CoreML.Specification.TreeEnsembleParameters.numPredictionDimensions', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='basePredictionValue', full_name='CoreML.Specification.TreeEnsembleParameters.basePredictionValue', index=2, + number=3, type=1, cpp_type=5, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TREEENSEMBLEPARAMETERS_TREENODE, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=67, + serialized_end=903, +) + + +_TREEENSEMBLECLASSIFIER = _descriptor.Descriptor( + name='TreeEnsembleClassifier', + full_name='CoreML.Specification.TreeEnsembleClassifier', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='treeEnsemble', full_name='CoreML.Specification.TreeEnsembleClassifier.treeEnsemble', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.TreeEnsembleClassifier.postEvaluationTransform', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringClassLabels', full_name='CoreML.Specification.TreeEnsembleClassifier.stringClassLabels', index=2, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='int64ClassLabels', full_name='CoreML.Specification.TreeEnsembleClassifier.int64ClassLabels', index=3, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='ClassLabels', full_name='CoreML.Specification.TreeEnsembleClassifier.ClassLabels', + index=0, containing_type=None, fields=[]), + ], + serialized_start=906, + serialized_end=1233, +) + + +_TREEENSEMBLEREGRESSOR = _descriptor.Descriptor( + name='TreeEnsembleRegressor', + full_name='CoreML.Specification.TreeEnsembleRegressor', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='treeEnsemble', full_name='CoreML.Specification.TreeEnsembleRegressor.treeEnsemble', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postEvaluationTransform', full_name='CoreML.Specification.TreeEnsembleRegressor.postEvaluationTransform', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1236, + serialized_end=1419, +) + +_TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO.containing_type = _TREEENSEMBLEPARAMETERS_TREENODE +_TREEENSEMBLEPARAMETERS_TREENODE.fields_by_name['nodeBehavior'].enum_type = _TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR +_TREEENSEMBLEPARAMETERS_TREENODE.fields_by_name['evaluationInfo'].message_type = _TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO +_TREEENSEMBLEPARAMETERS_TREENODE.containing_type = _TREEENSEMBLEPARAMETERS +_TREEENSEMBLEPARAMETERS_TREENODE_TREENODEBEHAVIOR.containing_type = _TREEENSEMBLEPARAMETERS_TREENODE +_TREEENSEMBLEPARAMETERS.fields_by_name['nodes'].message_type = _TREEENSEMBLEPARAMETERS_TREENODE +_TREEENSEMBLECLASSIFIER.fields_by_name['treeEnsemble'].message_type = _TREEENSEMBLEPARAMETERS +_TREEENSEMBLECLASSIFIER.fields_by_name['postEvaluationTransform'].enum_type = _TREEENSEMBLEPOSTEVALUATIONTRANSFORM +_TREEENSEMBLECLASSIFIER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR +_TREEENSEMBLECLASSIFIER.fields_by_name['int64ClassLabels'].message_type = DataStructures__pb2._INT64VECTOR +_TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _TREEENSEMBLECLASSIFIER.fields_by_name['stringClassLabels']) +_TREEENSEMBLECLASSIFIER.fields_by_name['stringClassLabels'].containing_oneof = _TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'] +_TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'].fields.append( + _TREEENSEMBLECLASSIFIER.fields_by_name['int64ClassLabels']) +_TREEENSEMBLECLASSIFIER.fields_by_name['int64ClassLabels'].containing_oneof = _TREEENSEMBLECLASSIFIER.oneofs_by_name['ClassLabels'] +_TREEENSEMBLEREGRESSOR.fields_by_name['treeEnsemble'].message_type = _TREEENSEMBLEPARAMETERS +_TREEENSEMBLEREGRESSOR.fields_by_name['postEvaluationTransform'].enum_type = _TREEENSEMBLEPOSTEVALUATIONTRANSFORM +DESCRIPTOR.message_types_by_name['TreeEnsembleParameters'] = _TREEENSEMBLEPARAMETERS +DESCRIPTOR.message_types_by_name['TreeEnsembleClassifier'] = _TREEENSEMBLECLASSIFIER +DESCRIPTOR.message_types_by_name['TreeEnsembleRegressor'] = _TREEENSEMBLEREGRESSOR +DESCRIPTOR.enum_types_by_name['TreeEnsemblePostEvaluationTransform'] = _TREEENSEMBLEPOSTEVALUATIONTRANSFORM +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +TreeEnsembleParameters = _reflection.GeneratedProtocolMessageType('TreeEnsembleParameters', (_message.Message,), dict( + + TreeNode = _reflection.GeneratedProtocolMessageType('TreeNode', (_message.Message,), dict( + + EvaluationInfo = _reflection.GeneratedProtocolMessageType('EvaluationInfo', (_message.Message,), dict( + DESCRIPTOR = _TREEENSEMBLEPARAMETERS_TREENODE_EVALUATIONINFO, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo) + )) + , + DESCRIPTOR = _TREEENSEMBLEPARAMETERS_TREENODE, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleParameters.TreeNode) + )) + , + DESCRIPTOR = _TREEENSEMBLEPARAMETERS, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleParameters) + )) +_sym_db.RegisterMessage(TreeEnsembleParameters) +_sym_db.RegisterMessage(TreeEnsembleParameters.TreeNode) +_sym_db.RegisterMessage(TreeEnsembleParameters.TreeNode.EvaluationInfo) + +TreeEnsembleClassifier = _reflection.GeneratedProtocolMessageType('TreeEnsembleClassifier', (_message.Message,), dict( + DESCRIPTOR = _TREEENSEMBLECLASSIFIER, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleClassifier) + )) +_sym_db.RegisterMessage(TreeEnsembleClassifier) + +TreeEnsembleRegressor = _reflection.GeneratedProtocolMessageType('TreeEnsembleRegressor', (_message.Message,), dict( + DESCRIPTOR = _TREEENSEMBLEREGRESSOR, + __module__ = 'TreeEnsemble_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.TreeEnsembleRegressor) + )) +_sym_db.RegisterMessage(TreeEnsembleRegressor) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/VisionFeaturePrint_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/VisionFeaturePrint_pb2.py new file mode 100644 index 00000000..face7914 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/VisionFeaturePrint_pb2.py @@ -0,0 +1,232 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: VisionFeaturePrint.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='VisionFeaturePrint.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x18VisionFeaturePrint.proto\x12!CoreML.Specification.CoreMLModels\"\xe0\x04\n\x12VisionFeaturePrint\x12L\n\x05scene\x18\x14 \x01(\x0b\x32;.CoreML.Specification.CoreMLModels.VisionFeaturePrint.SceneH\x00\x12P\n\x07objects\x18\x15 \x01(\x0b\x32=.CoreML.Specification.CoreMLModels.VisionFeaturePrint.ObjectsH\x00\x1a\xb7\x01\n\x05Scene\x12Y\n\x07version\x18\x01 \x01(\x0e\x32H.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion\"S\n\x0cSceneVersion\x12\x19\n\x15SCENE_VERSION_INVALID\x10\x00\x12\x13\n\x0fSCENE_VERSION_1\x10\x01\x12\x13\n\x0fSCENE_VERSION_2\x10\x02\x1a\xd5\x01\n\x07Objects\x12]\n\x07version\x18\x01 \x01(\x0e\x32L.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion\x12\x0e\n\x06output\x18\x64 \x03(\t\"[\n\x0eObjectsVersion\x12\x1b\n\x17OBJECTS_VERSION_INVALID\x10\x00\x12\x15\n\x11OBJECTS_VERSION_1\x10\x01\x12\x15\n\x11OBJECTS_VERSION_2\x10\x02\x42\x18\n\x16VisionFeaturePrintTypeB\x02H\x03\x62\x06proto3') +) + + + +_VISIONFEATUREPRINT_SCENE_SCENEVERSION = _descriptor.EnumDescriptor( + name='SceneVersion', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='SCENE_VERSION_INVALID', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCENE_VERSION_1', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCENE_VERSION_2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=347, + serialized_end=430, +) +_sym_db.RegisterEnumDescriptor(_VISIONFEATUREPRINT_SCENE_SCENEVERSION) + +_VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION = _descriptor.EnumDescriptor( + name='ObjectsVersion', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='OBJECTS_VERSION_INVALID', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OBJECTS_VERSION_1', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OBJECTS_VERSION_2', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=555, + serialized_end=646, +) +_sym_db.RegisterEnumDescriptor(_VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION) + + +_VISIONFEATUREPRINT_SCENE = _descriptor.Descriptor( + name='Scene', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.version', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _VISIONFEATUREPRINT_SCENE_SCENEVERSION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=247, + serialized_end=430, +) + +_VISIONFEATUREPRINT_OBJECTS = _descriptor.Descriptor( + name='Objects', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.version', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='output', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.output', index=1, + number=100, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=433, + serialized_end=646, +) + +_VISIONFEATUREPRINT = _descriptor.Descriptor( + name='VisionFeaturePrint', + full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='scene', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.scene', index=0, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='objects', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.objects', index=1, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_VISIONFEATUREPRINT_SCENE, _VISIONFEATUREPRINT_OBJECTS, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='VisionFeaturePrintType', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.VisionFeaturePrintType', + index=0, containing_type=None, fields=[]), + ], + serialized_start=64, + serialized_end=672, +) + +_VISIONFEATUREPRINT_SCENE.fields_by_name['version'].enum_type = _VISIONFEATUREPRINT_SCENE_SCENEVERSION +_VISIONFEATUREPRINT_SCENE.containing_type = _VISIONFEATUREPRINT +_VISIONFEATUREPRINT_SCENE_SCENEVERSION.containing_type = _VISIONFEATUREPRINT_SCENE +_VISIONFEATUREPRINT_OBJECTS.fields_by_name['version'].enum_type = _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION +_VISIONFEATUREPRINT_OBJECTS.containing_type = _VISIONFEATUREPRINT +_VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION.containing_type = _VISIONFEATUREPRINT_OBJECTS +_VISIONFEATUREPRINT.fields_by_name['scene'].message_type = _VISIONFEATUREPRINT_SCENE +_VISIONFEATUREPRINT.fields_by_name['objects'].message_type = _VISIONFEATUREPRINT_OBJECTS +_VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'].fields.append( + _VISIONFEATUREPRINT.fields_by_name['scene']) +_VISIONFEATUREPRINT.fields_by_name['scene'].containing_oneof = _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'] +_VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'].fields.append( + _VISIONFEATUREPRINT.fields_by_name['objects']) +_VISIONFEATUREPRINT.fields_by_name['objects'].containing_oneof = _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'] +DESCRIPTOR.message_types_by_name['VisionFeaturePrint'] = _VISIONFEATUREPRINT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +VisionFeaturePrint = _reflection.GeneratedProtocolMessageType('VisionFeaturePrint', (_message.Message,), dict( + + Scene = _reflection.GeneratedProtocolMessageType('Scene', (_message.Message,), dict( + DESCRIPTOR = _VISIONFEATUREPRINT_SCENE, + __module__ = 'VisionFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene) + )) + , + + Objects = _reflection.GeneratedProtocolMessageType('Objects', (_message.Message,), dict( + DESCRIPTOR = _VISIONFEATUREPRINT_OBJECTS, + __module__ = 'VisionFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects) + )) + , + DESCRIPTOR = _VISIONFEATUREPRINT, + __module__ = 'VisionFeaturePrint_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint) + )) +_sym_db.RegisterMessage(VisionFeaturePrint) +_sym_db.RegisterMessage(VisionFeaturePrint.Scene) +_sym_db.RegisterMessage(VisionFeaturePrint.Objects) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/WordEmbedding_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordEmbedding_pb2.py new file mode 100644 index 00000000..a10ac8c4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordEmbedding_pb2.py @@ -0,0 +1,93 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: WordEmbedding.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='WordEmbedding.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x13WordEmbedding.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"O\n\rWordEmbedding\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x42\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_WORDEMBEDDING = _descriptor.Descriptor( + name='WordEmbedding', + full_name='CoreML.Specification.CoreMLModels.WordEmbedding', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.WordEmbedding.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.WordEmbedding.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.WordEmbedding.modelParameterData', index=2, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=80, + serialized_end=159, +) + +DESCRIPTOR.message_types_by_name['WordEmbedding'] = _WORDEMBEDDING +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +WordEmbedding = _reflection.GeneratedProtocolMessageType('WordEmbedding', (_message.Message,), dict( + DESCRIPTOR = _WORDEMBEDDING, + __module__ = 'WordEmbedding_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.WordEmbedding) + )) +_sym_db.RegisterMessage(WordEmbedding) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/WordTagger_pb2.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordTagger_pb2.py new file mode 100644 index 00000000..a8333094 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/WordTagger_pb2.py @@ -0,0 +1,135 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: WordTagger.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import DataStructures_pb2 as DataStructures__pb2 +try: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2 +except AttributeError: + FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2 + +from .DataStructures_pb2 import * + +DESCRIPTOR = _descriptor.FileDescriptor( + name='WordTagger.proto', + package='CoreML.Specification.CoreMLModels', + syntax='proto3', + serialized_pb=_b('\n\x10WordTagger.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\xa4\x02\n\nWordTagger\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1f\n\x17tokensOutputFeatureName\x18\x14 \x01(\t\x12\"\n\x1atokenTagsOutputFeatureName\x18\x15 \x01(\t\x12\'\n\x1ftokenLocationsOutputFeatureName\x18\x16 \x01(\t\x12%\n\x1dtokenLengthsOutputFeatureName\x18\x17 \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12\x39\n\nstringTags\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\x06\n\x04TagsB\x02H\x03P\x00\x62\x06proto3') + , + dependencies=[DataStructures__pb2.DESCRIPTOR,], + public_dependencies=[DataStructures__pb2.DESCRIPTOR,]) + + + + +_WORDTAGGER = _descriptor.Descriptor( + name='WordTagger', + full_name='CoreML.Specification.CoreMLModels.WordTagger', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='revision', full_name='CoreML.Specification.CoreMLModels.WordTagger.revision', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='language', full_name='CoreML.Specification.CoreMLModels.WordTagger.language', index=1, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokensOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokensOutputFeatureName', index=2, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokenTagsOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokenTagsOutputFeatureName', index=3, + number=21, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokenLocationsOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokenLocationsOutputFeatureName', index=4, + number=22, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tokenLengthsOutputFeatureName', full_name='CoreML.Specification.CoreMLModels.WordTagger.tokenLengthsOutputFeatureName', index=5, + number=23, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.WordTagger.modelParameterData', index=6, + number=100, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stringTags', full_name='CoreML.Specification.CoreMLModels.WordTagger.stringTags', index=7, + number=200, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='Tags', full_name='CoreML.Specification.CoreMLModels.WordTagger.Tags', + index=0, containing_type=None, fields=[]), + ], + serialized_start=78, + serialized_end=370, +) + +_WORDTAGGER.fields_by_name['stringTags'].message_type = DataStructures__pb2._STRINGVECTOR +_WORDTAGGER.oneofs_by_name['Tags'].fields.append( + _WORDTAGGER.fields_by_name['stringTags']) +_WORDTAGGER.fields_by_name['stringTags'].containing_oneof = _WORDTAGGER.oneofs_by_name['Tags'] +DESCRIPTOR.message_types_by_name['WordTagger'] = _WORDTAGGER +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +WordTagger = _reflection.GeneratedProtocolMessageType('WordTagger', (_message.Message,), dict( + DESCRIPTOR = _WORDTAGGER, + __module__ = 'WordTagger_pb2' + # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.WordTagger) + )) +_sym_db.RegisterMessage(WordTagger) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) +# @@protoc_insertion_point(module_scope) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/proto/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/proto/__init__.py new file mode 100644 index 00000000..013a7fd1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/proto/__init__.py @@ -0,0 +1 @@ +### Module for proto generated Python code. diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/__init__.py new file mode 100644 index 00000000..61aafff4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/api/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/api/__init__.py new file mode 100644 index 00000000..1665bc37 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/api/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017 - 2020, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_examples.py b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_examples.py new file mode 100644 index 00000000..f13e3742 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_examples.py @@ -0,0 +1,519 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import os +import tempfile + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_TORCH +from coremltools.converters.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program, get_new_symbol +from coremltools.converters.mil.testing_utils import get_op_types_in_program + +if _HAS_TORCH: + import torch + + +class TestMILExamples: + @staticmethod + def test_tutorial(): + @mb.program( + input_specs=[mb.TensorSpec(shape=(1, 100, 100, 3))] + ) + def prog(x): + x = mb.relu(x=x, name="relu") + x = mb.transpose(x=x, perm=[0, 3, 1, 2], name="transpose") + x = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=False, name="reduce") + x = mb.log(x=x, name="log") + y = mb.add(x=1, y=2) + return x + + # Convert and verify + mlmodel = ct.convert(prog) + + # running predict() is only supported on macOS + if ct.utils._is_macos(): + prediction = mlmodel.predict( + {"x": np.random.rand(1, 100, 100, 3).astype(np.float32)} + ) + assert len(prediction) == 1 + + +@pytest.mark.skipif(ct.utils._macos_version() < (10, 15), reason='Model produces specification 4.') +class TestInputs: + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="Platform is not Mac OS") + def test_unsanitized_input_name_during_prediction(): + ''' + input name : "x/0" becomes "x_0" due to name sanitization applied during conversion + ''' + prog = Program() + func_inputs = {"x/0": mb.placeholder(shape=[2, 3]), + "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x/0"], ssa_fun.inputs["y"] + x = mb.relu(x=x, name="relu") + z = mb.add(x=x, y=y, name="out") + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + + mlmodel = ct.convert(prog) + + with pytest.raises(KeyError) as error_info: + mlmodel.predict( + {"x/0": np.random.rand(2, 3).astype(np.float32), + "y": np.random.rand(2, 3).astype(np.float32)} + ) + error_str = str(error_info.value) + assert "does not match any of the model input" in error_str + + @staticmethod + def _test_variant_input_type_prediction(to_tensor): + prog = Program() + func_inputs = {"x": mb.placeholder(shape=[2, 3]), + "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x"], ssa_fun.inputs["y"] + x = mb.relu(x=x, name="relu") + z = mb.add(x=x, y=y, name="out") + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + + mlmodel = ct.convert(prog) + x_numpy = np.random.rand(2, 3) + y_numpy = np.random.rand(2, 3) + out_by_numpy = mlmodel.predict( + {"x": x_numpy, + "y": y_numpy} + ) + out_by_tensor = mlmodel.predict( + {"x": to_tensor(x_numpy), + "y": to_tensor(y_numpy)} + ) + np.allclose(out_by_numpy["out"], out_by_tensor["out"]) + + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="test needs predictions") + def test_list_predict_input(): + TestInputs._test_variant_input_type_prediction(lambda x: x.tolist()) + + @staticmethod + def test_rank0_inputs_mil(): + with pytest.raises(ValueError, match=r"Rank-0"): + @mb.program( + input_specs=[ + mb.TensorSpec(shape=()), + ] + ) + def prog(x): + return x + + +############################################################################### +# Note: all tests are examples of conversion to the Core ML format +# Each test case is expected to be runnable and self-complete. +############################################################################### + +class TestMLProgramConverterExamples: + + @staticmethod + def test_model_save(tmpdir): + save_path_dir = str(tmpdir) + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + # save neuralnetwork model without extension and check that it is saved with + # mlmodel extension + mlmodel = ct.convert(prog) + mlmodel_path = os.path.join(save_path_dir, "model_nn") + mlmodel.save(mlmodel_path) + assert os.path.exists(mlmodel_path + ".mlmodel") + + # save neuralnetwork model with mlpackage extension + mlmodel_path = os.path.join(save_path_dir, "model_nn2.mlpackage") + mlmodel.save(mlmodel_path) + assert os.path.exists(mlmodel_path) + + # save mlprogram model without extension and check that it is saved with + # mlpackage extension + mlmodel = ct.convert(prog, convert_to="mlprogram") + mlmodel_path = os.path.join(save_path_dir, "model_mlprogram") + mlmodel.save(mlmodel_path) + assert os.path.exists(mlmodel_path + ".mlpackage") + + # check error if mlprogram is saved with mlmodel extension + mlmodel_path = os.path.join(save_path_dir, "model_mlprogram.mlmodel") + with pytest.raises(Exception) as e: + mlmodel.save(mlmodel_path) + expected_error = "For an ML Program, extension must be .mlpackage (not .mlmodel)" + assert expected_error == str(e.value) + + @staticmethod + @pytest.mark.skipif(not ct.utils._is_macos(), reason="Platform is not Mac OS") + def test_deepcopy_error_with_symbols_in_prog(): + prog = Program() + func_inputs = {"x": mb.placeholder(shape=[get_new_symbol(), 3]), + "y": mb.placeholder(shape=[2, 3])} + with Function(func_inputs) as ssa_fun: + x, y = ssa_fun.inputs["x"], ssa_fun.inputs["y"] + x = mb.relu(x=x) + z = mb.add(x=x, y=y) + ssa_fun.set_outputs([z]) + prog.add_function("main", ssa_fun) + mlmodel = ct.convert(prog, convert_to="mlprogram", compute_precision=ct.precision.FLOAT32) + prog2 = mlmodel._get_mil_internal() # this will invoke a deepcopy on the prog + + @pytest.mark.skipif(not ct.utils._is_macos(), reason="Platform is not Mac OS") + @pytest.mark.parametrize("skip_model_load", [True, False]) + def test_model_load_skip_flag(self, skip_model_load): + @mb.program(input_specs=[mb.TensorSpec(shape=(3,)), ]) + def prog(x): + return mb.relu(x=x, name='relu') + + if ct.utils._macos_version() < (12, 0) and not skip_model_load: + # converting to mlprogram, on macOS < 12 + # should raise a runtime error when skip_model_load is False + with pytest.warns(RuntimeWarning): + model = ct.convert(prog, convert_to='mlprogram', + skip_model_load=skip_model_load) + else: + model = ct.convert(prog, convert_to="mlprogram", skip_model_load=skip_model_load) + + assert model is not None + if skip_model_load: + assert model.__proxy__ is None + model_dir = tempfile.TemporaryDirectory() + filename = os.path.join(model_dir.name, "test.mlpackage") + model.save(filename) + assert os.path.exists(filename) + + +@pytest.mark.skipif(ct.utils._macos_version() < (12, 0), reason='Model produces specification 6.') +class TestMLProgramFP16Transform: + @staticmethod + def test_compute_precision_api(): + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + mlmodel = ct.convert(copy.deepcopy(prog), + compute_precision=ct.precision.FLOAT16, + convert_to='mlprogram') + mil_prog = mlmodel._get_mil_internal() + np.testing.assert_array_equal(["cast", "square", "cast"], get_op_types_in_program(mil_prog)) + + mlmodel = ct.convert(copy.deepcopy(prog), + compute_precision=ct.precision.FLOAT32, + convert_to='mlprogram') + mil_prog = mlmodel._get_mil_internal() + np.testing.assert_array_equal(["square"], get_op_types_in_program(mil_prog)) + + mlmodel = ct.convert( + copy.deepcopy(prog), + compute_precision=ct.transform.FP16ComputePrecision( + op_selector=lambda op: op.op_type != "square" + ), + convert_to="mlprogram", + ) + mil_prog = mlmodel._get_mil_internal() + np.testing.assert_array_equal(["square"], get_op_types_in_program(mil_prog)) + + with pytest.raises(ValueError) as e: + mlmodel = ct.convert(copy.deepcopy(prog), + compute_precision='fp64', + convert_to='mlprogram') + expected_error = "'compute_precision' must be either coremltools.precision.FLOAT32 or " \ + "coremltools.precision.FLOAT16 or of type coremltools.transform.FP16ComputePrecision()" + assert expected_error == str(e.value) + + expected_pattern = "compute_precision .* supported .* mlprogram .* None .* target=='neuralnetwork'.*minimum_deployment_target.*" + with pytest.raises(ValueError, match=expected_pattern) as e: + mlmodel = ct.convert(copy.deepcopy(prog), compute_precision='fp16') + + @staticmethod + def test_invalid_argument_nn_backend(): + ''' + Since the compute_precision argument is only applicable when converting to "mlprogram", + check that an error is correctly raised when conversion is targeted at the neuralnetwork backend + ''' + + @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) + def prog(x): + x = mb.square(x=x) + return x + + expected_err_str = "compute_precision is only supported for mlprogram target and must be None if target.*" + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, compute_precision=ct.precision.FLOAT16) + with pytest.raises(ValueError, match=expected_err_str): + mlmodel = ct.convert(prog, compute_precision=ct.precision.FLOAT32) + + +@pytest.mark.skipif(not _HAS_TORCH, reason="PyTorch not found") +class TestGraphPassManagement: + @staticmethod + def _get_test_model(): + class TestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(1, 8, 5, padding="same") + self.bn1 = torch.nn.BatchNorm2d(8) + self.linear1 = torch.nn.Linear(28 * 28 * 8, 5) + self.alpha = 0.7 + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.linear1(torch.flatten(x)) + x = torch.maximum(self.alpha * x, x) + return x + + return TestModel().eval() + + def test_default_pipeline(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=ct.PassPipeline(), + ) + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "cast", + "conv", + "reshape", + "linear", + "leaky_relu", + "cast", + ] + + def test_skip_pass(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + model_converted = ct.convert( + traced_model, inputs=[ct.TensorType(shape=example_input.shape)], convert_to="mlprogram" + ) + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "cast", + "conv", + "reshape", + "linear", + "leaky_relu", + "cast", + ] + + pipeline = ct.PassPipeline() + pipeline.remove_passes(passes_names=["common::fuse_conv_batchnorm"]) + model_converted_with_skipped_passes = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert get_op_types_in_program(model_converted_with_skipped_passes._get_mil_internal()) == [ + "cast", + "conv", + "batch_norm", + "reshape", + "linear", + "leaky_relu", + "cast", + ] + + def test_skip_two_passes(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.remove_passes( + passes_names=["common::fuse_conv_batchnorm", "common::fuse_leaky_relu"] + ) + model_converted_with_skipped_passes = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert get_op_types_in_program(model_converted_with_skipped_passes._get_mil_internal()) == [ + "cast", + "conv", + "batch_norm", + "reshape", + "linear", + "mul", + "maximum", + "cast", + ] + + def test_skip_passes_in_different_pipelines(self): + """ + Some passes exist in different pipelines. For example, const_elimination is in both main + and backend pipelines. If the user want to skip the const_elimination pass, we want to make + sure both pipelines skip that pass. + """ + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.remove_passes(passes_names=["common::const_elimination"]) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert ( + get_op_types_in_program( + model_converted._get_mil_internal(), skip_const_ops=False + ).count("const") + == 24 + ) + + def test_empty_pipeline(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline.get_empty_pipeline() + + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "conv", + "batch_norm", + "shape", + "slice_by_index", + "slice_by_index", + "concat", + "cast", + "reshape", + "linear", + "mul", + "maximum", + ] + + def test_pass_option_skip_ops_by_type(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::add_fp16_cast", {"skip_ops_by_type": "conv,linear"}) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + # The fp16 cast is skipped for conv and linear as we specified them in the pass options. + assert get_op_types_in_program(model_converted._get_mil_internal()) == [ + "conv", + "cast", + "reshape", + "cast", + "linear", + "cast", + "leaky_relu", + "cast", + ] + + def test_pass_option_skip_const_by_size(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + model_converted_without_pipeline = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + ) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::const_elimination", {"skip_const_by_size": "1e8"}) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + # When the threshold is set to 1e8, no var is skipped in const elimination. + assert get_op_types_in_program( + model_converted._get_mil_internal(), skip_const_ops=False + ).count("const") == get_op_types_in_program( + model_converted_without_pipeline._get_mil_internal(), skip_const_ops=False + ).count( + "const" + ) + + pipeline.set_options( + "common::const_elimination", {"skip_const_by_size": "-1"}, override=True + ) + model_converted = ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + # When the threshold -1, almost all vars (except scalars) are skipped in const elimination. + assert ( + get_op_types_in_program( + model_converted._get_mil_internal(), skip_const_ops=False + ).count("const") + == 23 + ) + + def test_pass_unsupported_option(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::fuse_conv_batchnorm", {"skip_ops_by_type": "conv,linear"}) + with pytest.raises( + NotImplementedError, + match="The graph pass `fuse_conv_batchnorm` doesn't support option `skip_ops_by_type`.", + ): + ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) + + def test_pass_option_invalid_val(self): + model = self._get_test_model() + example_input = torch.rand(1, 1, 28, 28) + traced_model = torch.jit.trace(model, example_input) + + pipeline = ct.PassPipeline() + pipeline.set_options("common::const_elimination", {"skip_const_by_size": "dummy"}) + with pytest.raises( + ValueError, + match="Expected to get float threshold, but got `dummy` which cannot be converted to float", + ): + ct.convert( + traced_model, + inputs=[ct.TensorType(shape=example_input.shape)], + convert_to="mlprogram", + pass_pipeline=pipeline, + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_visibilities.py b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_visibilities.py new file mode 100644 index 00000000..c66e0c6d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/api/test_api_visibilities.py @@ -0,0 +1,230 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +import coremltools as ct + + +def _get_visible_items(d): + return [x for x in dir(d) if not x.startswith("_")] + + +def _check_visible_modules(actual, expected): + assert set(actual) == set(expected), "API mis-matched. Got %s, expected %s" % ( + actual, + expected, + ) + + +EXPECTED_MODULES = [ + "ClassifierConfig", + "ComputeUnit", + "EnumeratedShapes", + "ImageType", + "RangeDim", + "SPECIFICATION_VERSION", + "Shape", + "TensorType", + "colorlayout", + "compression_utils", + "convert", + "converters", + "libcoremlpython", + "models", + "PassPipeline", + "proto", + "precision", + "target", + "utils", + "version", + "test", + "transform", + "libmodelpackage", + "libmilstoragepython", +] + + +class TestApiVisibilities: + """Test public coremltools API visibilities.""" + + def test_top_level(self): + if not ct.utils._is_macos(): + EXPECTED_MODULES.remove("libcoremlpython") + _check_visible_modules(_get_visible_items(ct), EXPECTED_MODULES) + + def test_utils(self): + expected = [ + "convert_double_to_float_multiarray_type", + "evaluate_classifier", + "evaluate_classifier_with_probabilities", + "evaluate_regressor", + "evaluate_transformer", + "make_pipeline", + "load_spec", + "rename_feature", + "save_spec", + ] + _check_visible_modules(_get_visible_items(ct.utils), expected) + + def test_models(self): + expected = [ + "MLModel", + "datatypes", + "feature_vectorizer", + "ml_program", + "model", + "nearest_neighbors", + "neural_network", + "pipeline", + "tree_ensemble", + "utils", + ] + _check_visible_modules(_get_visible_items(ct.models), expected) + + def test_models_mlmodel(self): + expected = [ + "author", + "get_spec", + "input_description", + "license", + "output_description", + "predict", + "save", + "short_description", + "user_defined_metadata", + "version", + "weights_dir", + ] + _check_visible_modules(_get_visible_items(ct.models.MLModel), expected) + + def test_models_neural_network(self): + expected = [ + "AdamParams", + "NeuralNetworkBuilder", + "SgdParams", + "builder", + "flexible_shape_utils", + "optimization_utils", + "printer", + "quantization_utils", + "spec_inspection_utils", + "update_optimizer_utils", + "utils", + ] + _check_visible_modules(_get_visible_items(ct.models.neural_network), expected) + + def test_models_neural_network_utils(self): + expected = ["NeuralNetworkBuilder", "make_image_input", "make_nn_classifier"] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.utils), expected + ) + + def test_models_tree_ensemble(self): + expected = [ + "TreeEnsembleBase", + "TreeEnsembleClassifier", + "TreeEnsembleRegressor", + "set_classifier_interface_params", + "set_regressor_interface_params", + ] + _check_visible_modules(_get_visible_items(ct.models.tree_ensemble), expected) + + def test_models_pipeline(self): + expected = [ + "Pipeline", + "PipelineClassifier", + "PipelineRegressor", + "set_classifier_interface_params", + "set_regressor_interface_params", + "set_training_features", + "set_transform_interface_params", + ] + _check_visible_modules(_get_visible_items(ct.models.pipeline), expected) + + def test_converters(self): + expected = [ + "ClassifierConfig", + "ColorLayout", + "EnumeratedShapes", + "ImageType", + "RangeDim", + "Shape", + "TensorType", + "convert", + "libsvm", + "mil", + "sklearn", + "xgboost", + ] + _check_visible_modules(_get_visible_items(ct.converters), expected) + + def test_converters_libsvm(self): + _check_visible_modules(_get_visible_items(ct.converters.libsvm), ["convert"]) + + def test_converters_sklearn(self): + _check_visible_modules(_get_visible_items(ct.converters.sklearn), ["convert"]) + + def test_converters_xgboost(self): + _check_visible_modules(_get_visible_items(ct.converters.xgboost), ["convert"]) + + def test_models_neural_network_quantization_utils(self): + expected = [ + "AdvancedQuantizedLayerSelector", + "MatrixMultiplyLayerSelector", + "ModelMetrics", + "NoiseMetrics", + "OutputMetric", + "QuantizedLayerSelector", + "TopKMetrics", + "activate_int8_int8_matrix_multiplications", + "compare_models", + "quantize_weights", + ] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.quantization_utils), expected + ) + + def test_compression_utils(self): + expected = [ + "affine_quantize_weights", + "palettize_weights", + "sparsify_weights", + "decompress_weights", + ] + _check_visible_modules( + _get_visible_items(ct.compression_utils), expected + ) + + def test_models_neural_network_flexible_shape_utils(self): + expected = [ + "NeuralNetworkImageSize", + "NeuralNetworkImageSizeRange", + "NeuralNetworkMultiArrayShape", + "NeuralNetworkMultiArrayShapeRange", + "Shape", + "ShapeRange", + "Size", + "add_enumerated_image_sizes", + "add_enumerated_multiarray_shapes", + "add_multiarray_ndshape_enumeration", + "set_multiarray_ndshape_range", + "update_image_size_range", + "update_multiarray_shape_range", + ] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.flexible_shape_utils), expected + ) + + def test_models_neural_network_update_optimizer_utils(self): + expected = ["AdamParams", "Batch", "RangeParam", "SgdParams"] + _check_visible_modules( + _get_visible_items(ct.models.neural_network.update_optimizer_utils), + expected, + ) + + def test_models_neural_network_optimization_utils(self): + _check_visible_modules( + _get_visible_items(ct.models.neural_network.optimization_utils), [], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/blob/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/__init__.py new file mode 100644 index 00000000..9293abe9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017 - 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/blob/test_weights.py b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/test_weights.py new file mode 100644 index 00000000..c0818346 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/blob/test_weights.py @@ -0,0 +1,68 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile +import unittest + +import numpy as np + +from coremltools.libmilstoragepython import _BlobStorageReader as BlobReader +from coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter + + +class WeightTest(unittest.TestCase): + def setUp(self): + self.working_dir = tempfile.mkdtemp() + + def tearDown(self): + if os.path.exists(self.working_dir): + shutil.rmtree(self.working_dir) + + def test_weight_blob_int8(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([-5, -2, 0, 2, 5], dtype=np.int8) + offset = writer.write_int8_data(input_arr) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr = np.array(reader.read_int8_data(offset), np.int8) + np.testing.assert_equal(input_arr, output_arr) + + def test_weight_blob_uint8(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([1, 2, 3, 4, 5], dtype=np.uint8) + offset = writer.write_uint8_data(input_arr) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr = np.array(reader.read_uint8_data(offset), np.uint8) + np.testing.assert_almost_equal(input_arr, output_arr) + + def test_weight_blob_fp16(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([2.3, 4.6, 7.9], dtype=np.float16) + input_arr_to_bytes_uint16 = np.frombuffer(input_arr.tobytes(), np.uint16) + offset = writer.write_fp16_data(input_arr_to_bytes_uint16) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr_uint16 = np.array(reader.read_fp16_data(offset), np.uint16) + output_arr = np.frombuffer(output_arr_uint16.tobytes(), np.float16) + np.testing.assert_almost_equal(input_arr, output_arr) + + def test_weight_blob_fp32(self): + writer = BlobWriter(self.working_dir + "/net.wt") + input_arr = np.array([1.0, 2.4, 3.9, -4.8, 5.2], dtype=np.float32) + offset = writer.write_float_data(input_arr) + writer = None + + reader = BlobReader(self.working_dir + "/net.wt") + output_arr = np.array(reader.read_float_data(offset), np.float32) + np.testing.assert_almost_equal(input_arr, output_arr) + +if __name__ == "__main__": + unittest.main() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/__init__.py new file mode 100644 index 00000000..9fcc9060 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/test_compression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/test_compression.py new file mode 100644 index 00000000..283efbfb --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/ml_program/test_compression.py @@ -0,0 +1,432 @@ +# Copyright (c) 2022, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools + +import numpy as np +import pytest +import torch + +import coremltools as ct +from coremltools._deps import _HAS_SKLEARN +from coremltools.converters.mil.testing_utils import get_op_types_in_program +from coremltools.converters.mil.mil import types + + +def create_unique_weight(weight, nbits): + shape = weight.detach().numpy().shape + size = weight.detach().numpy().size + + unique_number = 1 << 4 + weight = [] + partition_len = size // unique_number + 1 + for i in range(unique_number): + weight += [i] * (partition_len) + weight = np.reshape(np.array(weight[:size]).astype(np.float32), shape) + return weight + +def get_test_model_and_data(multi_layer=False): + inputs = [ct.TensorType(name="data", shape=(1, 64, 10, 10))] + torch_input_values = [torch.rand(*i.shape.to_list()) for i in inputs] + coreml_input_values = { + i.name: val.detach().numpy() for i, val in zip(inputs, torch_input_values) + } + if multi_layer: + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.conv_1 = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=2) + self.conv_2 = torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=2) + + def forward(self, x): + conv_1 = self.conv_1(x) + conv_2 = self.conv_2(conv_1) + return conv_2 + + model = Model().eval() + else: + model = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=2) + + return model, inputs, torch_input_values, coreml_input_values + + +class TestCompressionUtils: + + affine_quantize_weights = ct.compression_utils.affine_quantize_weights + palettize_weights = ct.compression_utils.palettize_weights + sparsify_weights = ct.compression_utils.sparsify_weights + decompress_weights = ct.compression_utils.decompress_weights + + @staticmethod + def verify_model_outputs(model, compressed_model, input_values): + """ + This utility functions does the following checks: + + (1) Verify the output of the compressed model has the same shape / type of the original model + (2) The decompressed and compressed model have the same numerical outputs + """ + + # Make sure the model can be decompressed + decompressed_model = TestCompressionUtils.decompress_weights(compressed_model) + + # Validate the output shape / type + ref_outputs = model._mil_program.functions["main"].outputs + outputs = compressed_model._mil_program.functions["main"].outputs + + assert len(ref_outputs) == len(outputs) + + for a, b in zip(ref_outputs, outputs): + assert a.name == b.name + assert a.shape == a.shape + assert a.dtype == b.dtype + + if ct.utils._macos_version() < (13, 0): + return + + # Validate that the compressed model could be decompressed, and produces correct outputs + output_dict = compressed_model.predict(input_values) + de_output_dict = decompressed_model.predict(input_values) + for k, v in de_output_dict.items(): + assert k in output_dict + np.testing.assert_allclose(v, output_dict[k]) + + @staticmethod + def test_op_selector(): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_no_quantized = TestCompressionUtils.affine_quantize_weights(mlmodel, mode="linear", op_selector=lambda const_op: const_op.val.val.size > 1e7) + expected_ops = ['cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_no_quantized._mil_program) == expected_ops + + @staticmethod + @pytest.mark.skipif(not _HAS_SKLEARN, reason="Missing scikit-learn. Skipping tests.") + def test_weight_decompression(): + """ + This test is doing the following steps + + (1) compress a model with two conv layers into a compressed model with two different constexpr ops + + [Original model]: + + weight_1 weight_2 + | | + v v + input -> conv_1 -----> conv_2 ---> output + + + [Compressed model]: + + weight_1_lut weight_2_affine + | | + v v + input -> conv_1 ------> conv_2 ---> output + + , where weight_1_lut is a constexpr_lut_to_dense op and weight_2_affine is a constexpr_affine_dequantize op + + (2) decompress the compressed model + + [Decompressed model]: + + weight_1_new weight_2_new + | | + v v + input -> conv_1 ------> conv_2 ---> output + + , note that, weight_1_new is equivalent to weight_1_lut, and weight_2_new is equivalent to weight_2_affine + """ + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data(multi_layer=True) + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # we first compress the model + mlmodel = TestCompressionUtils.palettize_weights(mlmodel, mode="kmeans", nbits=4, op_selector=lambda const_op: const_op.name == "conv_1_weight_to_fp16") + mlmodel = TestCompressionUtils.affine_quantize_weights(mlmodel, mode="linear", op_selector=lambda const_op: const_op.name == "conv_2_weight_to_fp16") + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'constexpr_affine_dequantize', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel._mil_program) == expected_ops + + # decompress the model + decompressed_model = TestCompressionUtils.decompress_weights(mlmodel) + assert get_op_types_in_program(decompressed_model._mil_program) == ['cast', 'conv', 'conv', 'cast'] + + if ct.utils._macos_version() < (13, 0): + return + + # compared the numerical outputs + output_dict = mlmodel.predict(coreml_input_values) + de_output_dict = decompressed_model.predict(coreml_input_values) + + for k, v in output_dict.items(): + assert k in de_output_dict + np.testing.assert_allclose(v, de_output_dict[k]) + + @staticmethod + def test_compression_utils_error_handling(): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # Test invalid mode for affine quantization + expected_err_str = "supported for weight affine quantization. Got mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.affine_quantize_weights(mlmodel, mode="invalid_mode") + + # Test invalid dtype for affine quantization + expected_err_str = "is unsupported for affine_quantize_weight" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.affine_quantize_weights(mlmodel, dtype=np.int32) + + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.affine_quantize_weights(mlmodel, dtype="int32") + + # Test invalid mode for weight sparsification + expected_err_str = "supported for weight sparsification. Got mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.sparsify_weights(mlmodel, mode="invalid_mode") + + # Test invalid threshold for weight sparsification + expected_err_str = "Invalid value of threshold: \-1. Needs to be in \[0, inf\)" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.sparsify_weights(mlmodel, mode="threshold_based", threshold=-1) + + # Test invalid percentile for weight sparsification + expected_err_str = "Invalid value of target_percentile: 1.2. Needs to be in \[0, 1\]" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.sparsify_weights(mlmodel, mode="percentile_based", target_percentile=1.2) + + # Test invalid mode for weight palettization + expected_err_str = "supported for weight palettization. Got mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="invalid_mode") + + # Test nbits must be provided for kmeans, uniform mode for weight palettization + expected_err_str = "nbits must be provided for mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="kmeans") + + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="uniform") + + # Test nbits must not be provided for unique, custom mode for weight palettization + expected_err_str = "nbits must NOT be provided for mode" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="unique", nbits=2) + + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="custom", nbits=2) + + # Test lut_function must be provided for custom mode, and must not be provided otherwise + expected_err_str = "lut_function must be None if mode is not custom, and that it cannot be None when the mode is custom." + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="custom") + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="unique", lut_function=lambda op: True) + + # Test lut_function must be a function obejct + expected_err_str = "A function object must be provided as lut_function" + with pytest.raises(ValueError, match=expected_err_str): + TestCompressionUtils.palettize_weights(mlmodel, mode="custom", lut_function=1) + + + @staticmethod + @pytest.mark.parametrize( + "mode, dtype", + itertools.product( + ("linear", "linear_symmetric"), + (np.int8, np.uint8, types.int8, types.uint8), + ), + ) + def test_linear_quanitzation(mode, dtype): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + mlmodel_quantized = TestCompressionUtils.affine_quantize_weights(mlmodel, mode=mode, dtype=dtype) + + # validate parameters + expected_ops = ['constexpr_affine_dequantize', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_quantized._mil_program) == expected_ops + + quanitze_op = mlmodel_quantized._mil_program.functions["main"].find_ops(op_type="constexpr_affine_dequantize")[0] + assert model.weight.detach().numpy().shape == quanitze_op.quantized_data.shape + + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_quantized, coreml_input_values) + + @staticmethod + @pytest.mark.parametrize( + "threshold", + (0.0, 0.001, 1e2), + ) + def test_weight_sparsify_threshold_based(threshold): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + with torch.no_grad(): + model.weight[0][0][0][0] = 101 + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_sparsified = TestCompressionUtils.sparsify_weights(mlmodel, mode="threshold_based", threshold=threshold) + + # validate parameters + expected_ops = ['constexpr_sparse_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_sparsified._mil_program) == expected_ops + + main_func = mlmodel_sparsified._mil_program.functions["main"] + sparse_to_dense_op = main_func.find_ops(op_type="constexpr_sparse_to_dense")[0] + non_sparse_data = sparse_to_dense_op.nonzero_data + + if threshold != 1e2: + assert np.min(np.absolute(non_sparse_data.val)) >= threshold + else: + assert non_sparse_data.val.size == 1 + + assert sparse_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_sparsified, coreml_input_values) + + @staticmethod + @pytest.mark.parametrize( + "percentile", + (0., 0.5, 1.0), + ) + def test_weight_sparsify_percentile_based(percentile): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_sparsified = TestCompressionUtils.sparsify_weights(mlmodel, mode="percentile_based", target_percentile=percentile) + + # validate parameters + expected_ops = ['constexpr_sparse_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_sparsified._mil_program) == expected_ops + + main_func = mlmodel_sparsified._mil_program.functions["main"] + sparse_to_dense_op = main_func.find_ops(op_type="constexpr_sparse_to_dense")[0] + non_sparse_data = sparse_to_dense_op.nonzero_data + weight = model.weight.detach().numpy() + + if percentile == 0.: + assert non_sparse_data.val.size == weight.size - 1 + elif percentile == 0.5: + assert non_sparse_data.val.size <= 0.51 * (weight.size) and non_sparse_data.val.size >= 0.49 * (weight.size) + else: + assert non_sparse_data.val.size == 0 + + assert sparse_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_sparsified, coreml_input_values) + + @staticmethod + @pytest.mark.parametrize( + "mode", + ("uniform", "kmeans") if _HAS_SKLEARN else ("uniform",) + ) + def test_weight_palettization(mode): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, nbits=4, mode=mode) + + # validate parameters + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + + assert lut_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) + + @staticmethod + def test_weight_palettization_unique_case_1(): + # In this model, both conv weights can be palettized + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data(multi_layer=True) + + weight_1_unique = create_unique_weight(model.conv_1.weight, nbits=2) + weight_2_unique = create_unique_weight(model.conv_2.weight, nbits=6) + + with torch.no_grad(): + model.conv_1.weight = torch.nn.Parameter(torch.Tensor(weight_1_unique)) + model.conv_2.weight = torch.nn.Parameter(torch.Tensor(weight_2_unique)) + + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # validate parameters + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, mode="unique") + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'constexpr_lut_to_dense', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op_1 = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + lut_to_dense_op_2 = main_func.find_ops(op_type="constexpr_lut_to_dense")[1] + + assert lut_to_dense_op_1.shape.val.tolist() == list(model.conv_1.weight.detach().numpy().shape) + assert lut_to_dense_op_2.shape.val.tolist() == list(model.conv_2.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) + + @staticmethod + def test_weight_palettization_unique_case_2(caplog): + # In this model, only one conv weights can be palettized, the converter should warn the users that one weight is skipped + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data(multi_layer=True) + + weight_1_unique = create_unique_weight(model.conv_1.weight, nbits=2) + + with torch.no_grad(): + model.conv_1.weight = torch.nn.Parameter(torch.Tensor(weight_1_unique)) + + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + # validate parameters + # converter should warn the user that one weight is not compressed + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, mode="unique") + warning_msg = "weight value cannot be represented in an 8 bits palettization. Skipped." + assert any([warning_msg in rec.message for rec in caplog.records]) + + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op_1 = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + assert lut_to_dense_op_1.shape.val.tolist() == list(model.conv_1.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) + + @staticmethod + def test_weight_palettization_custom(): + model, inputs, torch_input_values, coreml_input_values = get_test_model_and_data() + torchmodel = torch.jit.trace(model, torch_input_values) + mlmodel = ct.convert(torchmodel, inputs=inputs, convert_to="mlprogram") + + def lut_function(weight): + nbits = 4 + weight = weight.flatten() + unique_elements = np.unique(weight) + k = (1 << nbits) - 1 + top_k = np.partition(weight, -k)[-k:] + np.sort(top_k) + lut = np.array([0.] + top_k.tolist()).astype(weight.dtype) + mapping = {v: idx for idx, v in enumerate(lut)} + indices = np.array([mapping[v] if v in mapping else 0 for v in weight]).astype(np.uint8) + return lut, indices + + mlmodel_palettized = TestCompressionUtils.palettize_weights(mlmodel, mode="custom", lut_function=lut_function) + + # validate parameters + expected_ops = ['constexpr_lut_to_dense', 'cast', 'conv', 'cast'] + assert get_op_types_in_program(mlmodel_palettized._mil_program) == expected_ops + + main_func = mlmodel_palettized._mil_program.functions["main"] + lut_to_dense_op = main_func.find_ops(op_type="constexpr_lut_to_dense")[0] + + assert lut_to_dense_op.shape.val.tolist() == list(model.weight.detach().numpy().shape) + + # validate the model + TestCompressionUtils.verify_model_outputs(mlmodel, mlmodel_palettized, coreml_input_values) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/__init__.py new file mode 100644 index 00000000..9293abe9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017 - 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_mlmodel.py b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_mlmodel.py new file mode 100644 index 00000000..8d2f97e1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_mlmodel.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil + +import numpy as np +import torch + +import coremltools as ct +from coremltools._deps import _IS_MACOS +from coremltools.models.model import MLModel +from coremltools.models.utils import _macos_version + + +def test_mlmodel_demo(tmpdir): + NUM_TOKENS = 3 + EMBEDDING_SIZE = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(NUM_TOKENS, EMBEDDING_SIZE) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=NUM_TOKENS, size=(2,), + dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + mlmodel = ct.convert( + traced_model, + source='pytorch', + convert_to='mlprogram', + inputs=[ + ct.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + compute_precision=ct.precision.FLOAT32, + compute_units=ct.ComputeUnit.CPU_ONLY + ) + assert isinstance(mlmodel, MLModel) + + # mlpackage_path is a model package + mlpackage_path = os.path.join(str(tmpdir), 'mymodel.mlpackage') + mlmodel.save(mlpackage_path) + + # Read back the saved bundle and compile + mlmodel2 = MLModel(mlpackage_path) + + if not _IS_MACOS or _macos_version() < (12, 0): + # Can not get predictions unless on macOS 12 or higher. + shutil.rmtree(mlpackage_path) + return + + result = mlmodel2.predict( + {"input": example_input.cpu().detach().numpy().astype(np.float32)}, + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.cpu().detach().numpy()) + + # Cleanup package + shutil.rmtree(mlpackage_path) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_modelpackage.py b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_modelpackage.py new file mode 100644 index 00000000..887f7788 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/modelpackage/test_modelpackage.py @@ -0,0 +1,519 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile + +import numpy as np +import pytest + +import coremltools +from coremltools import ComputeUnit, utils +from coremltools.converters.mil import Builder as mb +from coremltools.libmodelpackage import ModelPackage +from coremltools.models import MLModel +from coremltools.models.utils import (_MLPACKAGE_AUTHOR_NAME, + _WEIGHTS_DIR_NAME) +from coremltools.proto import Model_pb2 + + +def _remove_path(path): + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + +class TestMLModel: + + def setup_class(self): + + spec = Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + features = ["feature_1", "feature_2"] + output = "output" + for f in features: + input_ = spec.description.input.add() + input_.name = f + input_.type.doubleType.MergeFromString(b"") + + output_ = spec.description.output.add() + output_.name = output + output_.type.doubleType.MergeFromString(b"") + + lr = spec.glmRegressor + lr.offset.append(0.1) + weights = lr.weights.add() + coefs = [1.0, 2.0] + for i in coefs: + weights.value.append(i) + + spec.description.predictedFeatureName = "output" + self.spec = spec + + def test_model_creation(self): + model = MLModel(self.spec) + assert model is not None + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + utils.save_spec(self.spec, package.name) + model = MLModel(package.name) + assert model is not None + + # cleanup + _remove_path(package.name) + + def test_model_api(self): + model = MLModel(self.spec) + assert model is not None + + model.author = "Test author" + assert model.author == "Test author" + assert model.get_spec().description.metadata.author == "Test author" + + model.license = "Test license" + assert model.license == "Test license" + assert model.get_spec().description.metadata.license == "Test license" + + model.short_description = "Test model" + assert model.short_description == "Test model" + assert model.get_spec().description.metadata.shortDescription == "Test model" + + model.version = "1.3" + assert model.version == "1.3" + assert model.get_spec().description.metadata.versionString == "1.3" + + model.input_description["feature_1"] = "This is feature 1" + assert model.input_description["feature_1"] == "This is feature 1" + + model.output_description["output"] = "This is output" + assert model.output_description["output"] == "This is output" + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + assert model.author == "Test author" + assert model.license == "Test license" + assert model.short_description == "Test model" + assert model.input_description["feature_1"] == "This is feature 1" + assert model.output_description["output"] == "This is output" + + # cleanup + _remove_path(package.name) + + def test_predict_api(self): + model = MLModel(self.spec) + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + + if utils._macos_version() >= (12, 0): + for compute_units in coremltools.ComputeUnit: + if (compute_units == coremltools.ComputeUnit.CPU_AND_NE + and utils._macos_version() < (13, 0)): + continue + + loaded_model = MLModel(package.name, compute_units=compute_units) + + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + assert loaded_model.compute_unit == compute_units + else: + # just check if we can load it + loaded_model = MLModel(package.name) + + # cleanup + _remove_path(package.name) + + def test_rename_input(self): + utils.rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True) + model = MLModel(self.spec) + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"renamed_feature": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + # reset the spec for next run + utils.rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True) + + # cleanup + _remove_path(package.name) + + def test_rename_input_bad(self): + utils.rename_feature(self.spec, "blah", "bad_name", rename_inputs=True) + model = MLModel(self.spec) + + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + # cleanup + _remove_path(package.name) + + def test_save(self): + model = MLModel(self.spec) + + # Verify "save" can be called twice and the saved + # model can be loaded successfully each time + for _ in range(0, 2): + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + _remove_path(package.name) + + def test_save_in_place(self): + model = MLModel(self.spec) + + # Verify "save" can be called twice and the saved + # model can be loaded successfully each time + # the mlpackage remains in place after the first save + package = tempfile.TemporaryDirectory(suffix=".mlpackage") + package.cleanup() + for _ in range(2): + + model.save(package.name) + loaded_model = MLModel(package.name) + + if utils._macos_version() >= (12, 0): + preds = loaded_model.predict({"feature_1": 1.0, "feature_2": 1.0}) + assert preds is not None + assert preds["output"] == 3.1 + + _remove_path(package.name) + + def test_mil_as_package(self): + import torch + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(2,), dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + + temp_package_dir = tempfile.TemporaryDirectory(suffix=".mlpackage") + for converted_package_path in [None, temp_package_dir.name]: + mlmodel = coremltools.convert( + traced_model, + package_dir=converted_package_path, + source='pytorch', + convert_to='mlprogram', + compute_precision=coremltools.precision.FLOAT32, + inputs=[ + coremltools.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + + assert isinstance(mlmodel, MLModel) + + package_path = tempfile.mkdtemp(suffix=".mlpackage") + mlmodel.save(package_path) + + assert ModelPackage.isValid(package_path) + assert os.path.exists(ModelPackage(package_path).getRootModel().path()) + + # Read back the saved bundle and compile + mlmodel2 = MLModel(package_path, compute_units=ComputeUnit.CPU_ONLY) + + if utils._macos_version() >= (12, 0): + result = mlmodel2.predict( + {"input": example_input.cpu().detach().numpy().astype(np.float32)} + ) + + # Verify outputs + expected = model(example_input) + name = list(result.keys())[0] + np.testing.assert_allclose(result[name], expected.cpu().detach().numpy()) + + # Cleanup package + shutil.rmtree(package_path) + + tmp_package_path = mlmodel.package_path + assert os.path.exists(tmp_package_path) + del mlmodel + if converted_package_path is not None: + # Verify we leave the provided package dir alone + assert os.path.exists(tmp_package_path) + + temp_package_dir.cleanup() + + def test_model_save_no_extension(self): + import torch + + num_tokens = 3 + embedding_size = 5 + + class TestModule(torch.nn.Module): + def __init__(self): + super(TestModule, self).__init__() + self.embedding = torch.nn.Embedding(num_tokens, embedding_size) + + def forward(self, x): + return self.embedding(x) + + model = TestModule() + model.eval() + + example_input = torch.randint(high=num_tokens, size=(2,), dtype=torch.int64) + traced_model = torch.jit.trace(model, example_input) + + mlmodel = coremltools.convert( + traced_model, + package_dir=None, + source='pytorch', + convert_to='mlprogram', + inputs=[ + coremltools.TensorType( + name="input", + shape=example_input.shape, + dtype=example_input.numpy().dtype, + ) + ], + ) + assert isinstance(mlmodel, MLModel) + + package = tempfile.TemporaryDirectory() + package.cleanup() + package_path = package.name + + mlmodel.save(package_path) + assert not os.path.exists(package_path) + + package_path = package_path + ".mlpackage" + assert os.path.exists(package_path) + + shutil.rmtree(package_path) + +class TestSpecAndMLModelAPIs: + + def setup_class(self): + # define an mlprogram, which has weights + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 5000))]) + def linear_prog(input): + W = mb.const(val=np.random.rand(100, 5000), name="const_W") + out = mb.linear(x=input, weight=W, name="output") + return out + + # define another mlprogram, which does not have weights + @mb.program(input_specs=[mb.TensorSpec(shape=(4, 5, 2))]) + def relu_prog(input): + out = mb.relu(x=input, name="output") + return out + + # convert and save model on disk + self.mlmodel = coremltools.convert(linear_prog, convert_to="mlprogram") + self.mlpackage_path = tempfile.mkdtemp(suffix=utils._MLPACKAGE_EXTENSION) + self.mlmodel.save(self.mlpackage_path) + self.mlmodel_no_weights = coremltools.convert(relu_prog, convert_to="mlprogram") + + def teardown_class(self): + _remove_path(self.mlpackage_path) + self.mlmodel = None + self.mlmodel_no_weights = None + + def _test_mlmodel_correctness(self, mlmodel): + """ + :param mlmodel: coremltools.models.MLModel + Test the following: + - calling .predict on mlmodel works correctly + - calling .save on mlmodel works correctly + """ + # construct input dictionary + spec = mlmodel.get_spec() + inputs = spec.description.input + input_dict = {} + for input in inputs: + input_dict[input.name] = np.random.rand(*tuple(input.type.multiArrayType.shape)) + # check prediction + preds = mlmodel.predict(input_dict) + assert preds is not None + # save, load and predict again to check that the saving and loading worked correctly + with tempfile.TemporaryDirectory(suffix=utils._MLPACKAGE_EXTENSION) as temp_path: + mlmodel.save(temp_path) + mlmodel_reloaded = MLModel(temp_path) + preds = mlmodel_reloaded.predict(input_dict) + assert preds is not None + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_mlmodel_to_spec_to_mlmodel(self): + """ + convert mlmodel to spec, and then back to mlmodel and verify that it works + """ + spec = self.mlmodel.get_spec() + # reload the model from the spec and verify it + weights_dir = self.mlmodel.weights_dir + mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + # check that the original model still works + self._test_mlmodel_correctness(self.mlmodel) + # check that an error is raised when MLModel is initialized without the weights + with pytest.raises(Exception, match="MLModel of type mlProgram cannot be loaded just from the model " + "spec object. It also needs the path to the weights file. " + "Please provide that as well, using the 'weights_dir' argument."): + MLModel(spec) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_path_to_mlmodel_to_spec_to_mlmodel(self): + """ + load an mlmodel from disk, convert it to spec, and then convert the spec back to mlmodel + """ + mlmodel_from_disk = MLModel(self.mlpackage_path) + spec = mlmodel_from_disk.get_spec() + mlmodel_from_spec = MLModel(spec, weights_dir=mlmodel_from_disk.weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_path_to_spec_to_mlmodel(self): + """ + load a spec from disk, then convert it to mlmodel, and check that it works + """ + spec = utils.load_spec(self.mlpackage_path) + weights_dir = self.mlpackage_path + "/Data/" + _MLPACKAGE_AUTHOR_NAME + "/weights" + mlmodel = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_save_spec_api_mlprogram_without_weights_dir(self): + """ + save an mlpackage using the save_spec API. It should error out because no weights dir. + """ + spec = self.mlmodel.get_spec() + with tempfile.TemporaryDirectory(suffix=utils._MLPACKAGE_EXTENSION) as model_path: + # this should raise error: + with pytest.raises(Exception, match="spec of type mlProgram cannot be saved without" + " the weights file. Please provide the path to " + "the weights file as well, using the 'weights_dir' argument."): + utils.save_spec(spec, model_path) + + @pytest.mark.skipif( + utils._macos_version() < (12, 0), + reason="prediction on mlprogram model " "available only on macOS12+", + ) + def test_save_spec_api(self): + """ + save an mlpackage using the save_spec API. Reload the model from disk and verify it works + """ + spec = self.mlmodel.get_spec() + with tempfile.TemporaryDirectory( + suffix=utils._MLPACKAGE_EXTENSION + ) as model_path: + utils.save_spec(spec, model_path, weights_dir=self.mlmodel.weights_dir) + model = MLModel(model_path) + self._test_mlmodel_correctness(model) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_save_spec_api_model_with_no_weights(self): + """ + save an mlprogram model with no weights, using the save SPI and an empty weights directory. + Reload the model from disk and verify it works + """ + spec = self.mlmodel_no_weights.get_spec() + with tempfile.TemporaryDirectory(suffix=utils._MLPACKAGE_EXTENSION) as model_path: + with tempfile.TemporaryDirectory() as empty_weight_dir: + utils.save_spec(spec, model_path, weights_dir=empty_weight_dir) + model = MLModel(model_path) + self._test_mlmodel_correctness(model) + + @pytest.mark.skipif(utils._macos_version() < (12, 0), reason="prediction on mlprogram model " + "available only on macOS12+") + def test_mlmodel_to_spec_to_mlmodel_with_no_weights_model(self): + """ + convert mlmodel to spec, and then back to mlmodel and verify that it works + """ + spec = self.mlmodel_no_weights.get_spec() + # if no weights_dir is passed, error will be raised + with pytest.raises(Exception, match="MLModel of type mlProgram cannot be loaded just from the model " + "spec object. It also needs the path to the weights file. " + "Please provide that as well, using the 'weights_dir' argument."): + MLModel(spec) + + # weights_dir will still exist, even though the model has no weights, + # with a weights file that only has header and no data + weights_dir = self.mlmodel_no_weights.weights_dir + assert weights_dir is not None + mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + + # load mlmodel from spec using an empty weights_dir + with tempfile.TemporaryDirectory() as empty_weight_dir: + mlmodel_from_spec = MLModel(spec, weights_dir=weights_dir) + self._test_mlmodel_correctness(mlmodel_from_spec) + + def test_weights_path_correctness(self): + """ + test that after reloading an mlmodel from the spec, the weights path is updated + """ + spec = self.mlmodel.get_spec() + original_weight_dir_path = self.mlmodel.weights_dir + assert os.path.exists(original_weight_dir_path) + # load mlmodel from spec: this will create a new mlpackage in a temp location + # and copy over the weights + mlmodel_reloaded = MLModel(spec, weights_dir=original_weight_dir_path) + assert os.path.exists(mlmodel_reloaded.weights_dir) + assert mlmodel_reloaded.weights_dir != original_weight_dir_path + assert mlmodel_reloaded.weights_dir == mlmodel_reloaded.package_path + "/Data/" \ + + _MLPACKAGE_AUTHOR_NAME + "/weights" + + def test_weights_dir_discovery_method(self): + """ + Test "coremltools.libmodelpackage.ModelPackage.findItemByNameAuthor" function + """ + mlpackage = ModelPackage(self.mlpackage_path) + model_package_item_info = mlpackage.findItemByNameAuthor(_WEIGHTS_DIR_NAME, _MLPACKAGE_AUTHOR_NAME) + weights_dir_path = model_package_item_info.path() + assert weights_dir_path == self.mlpackage_path + "/Data/" + _MLPACKAGE_AUTHOR_NAME + "/weights" + # verify that findItemByNameAuthor returns None, when item not found + model_package_item_info = mlpackage.findItemByNameAuthor(_WEIGHTS_DIR_NAME, "inexistent_author_name") + assert model_package_item_info is None diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_custom_neural_nets.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_custom_neural_nets.py new file mode 100644 index 00000000..5c5d1f88 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_custom_neural_nets.py @@ -0,0 +1,89 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile +import unittest + +import numpy as np + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools.models import neural_network as neural_network +from coremltools.models.utils import _is_macos, _macos_version + + +class SimpleTest(unittest.TestCase): + def test_fixed_seq_len(self): + """ + Input has a fixed sequence length. + (this happens when model is trained using padded sequences, inspiration: https://forums.developer.apple.com/thread/80407) + + (Seq,Batch,C,H,W) + embedding: input shape (15,1,1,1,1) --> output shape (15,1,32,1,1) + permute : input shape (15,1,32,1,1) --> output shape (1,1,32,1,15) + flatten : input shape (1,1,32,1,15) --> output shape (1,1,32 * 15,1,1) + dense : input shape (1,1,480,1,1) --> output shape (1,1,2,1,1) + """ + + coreml_preds = [] + input_dim = (1, 1, 1) + output_dim = ( + 1, + 1, + 1, + ) # some random dimensions here: we are going to remove this information later + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*output_dim))] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + # ADD Layers + builder.add_embedding( + "embed", + W=np.random.rand(10, 32), + b=None, + input_dim=10, + output_channels=32, + has_bias=0, + input_name="data", + output_name="embed", + ) + builder.add_permute( + "permute", dim=[3, 1, 2, 0], input_name="embed", output_name="permute" + ) + builder.add_flatten( + "flatten", mode=0, input_name="permute", output_name="flatten" + ) + builder.add_inner_product( + "dense", + W=np.random.rand(480, 2), + b=None, + input_channels=480, + output_channels=2, + has_bias=0, + input_name="flatten", + output_name="output", + ) + + # Remove output shape by deleting and adding an output + del builder.spec.description.output[-1] + output = builder.spec.description.output.add() + output.name = "output" + output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value( + "DOUBLE" + ) + + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + # preprare input and get predictions + coreml_model = coremltools.models.MLModel(model_path) + X = np.random.randint(low=0, high=10, size=15) + X = np.reshape(X, (15, 1, 1, 1, 1)).astype(np.float32) + coreml_input = {"data": X} + if _is_macos() and _macos_version() >= (10, 13): + coreml_preds = coreml_model.predict(coreml_input)["output"] + self.assertEqual(len(coreml_preds.flatten()), 2) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_model.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_model.py new file mode 100644 index 00000000..1b08d187 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_model.py @@ -0,0 +1,569 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import tempfile +import unittest + +import numpy as np +import PIL.Image + +import coremltools +from coremltools import ComputeUnit +from coremltools._deps import _HAS_TORCH +from coremltools.converters.mil import Builder as mb +from coremltools.models import MLModel, datatypes +from coremltools.models.neural_network import NeuralNetworkBuilder +from coremltools.models.neural_network.utils import (make_image_input, + make_nn_classifier) +from coremltools.models.utils import ( + _convert_neural_network_spec_weights_to_fp16, _is_macos, _macos_version, + convert_double_to_float_multiarray_type, rename_feature, save_spec) +from coremltools.proto import Model_pb2 + +if _HAS_TORCH: + import torch as _torch + + +class MLModelTest(unittest.TestCase): + @classmethod + def setUpClass(self): + + spec = Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + features = ["feature_1", "feature_2"] + output = "output" + for f in features: + input_ = spec.description.input.add() + input_.name = f + input_.type.doubleType.MergeFromString(b"") + + output_ = spec.description.output.add() + output_.name = output + output_.type.doubleType.MergeFromString(b"") + + lr = spec.glmRegressor + lr.offset.append(0.1) + weights = lr.weights.add() + coefs = [1.0, 2.0] + for i in coefs: + weights.value.append(i) + + spec.description.predictedFeatureName = "output" + self.spec = spec + + def test_model_creation(self): + model = MLModel(self.spec) + self.assertIsNotNone(model) + + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename) + model = MLModel(filename) + self.assertIsNotNone(model) + + def test_model_save_no_extension(self): + model = MLModel(self.spec) + self.assertIsNotNone(model) + + filename = tempfile.mktemp(suffix="") + save_spec(self.spec, filename) # appends .mlmodel extension when it is not provided + self.assertFalse(os.path.exists(filename)) + + filename = filename + ".mlmodel" + self.assertTrue(os.path.exists(filename)) + + model = MLModel(filename) + self.assertIsNotNone(model) + os.remove(filename) + + def test_model_api(self): + model = MLModel(self.spec) + self.assertIsNotNone(model) + + model.author = "Test author" + self.assertEqual(model.author, "Test author") + self.assertEqual(model.get_spec().description.metadata.author, "Test author") + + model.license = "Test license" + self.assertEqual(model.license, "Test license") + self.assertEqual(model.get_spec().description.metadata.license, "Test license") + + model.short_description = "Test model" + self.assertEqual(model.short_description, "Test model") + self.assertEqual( + model.get_spec().description.metadata.shortDescription, "Test model" + ) + + model.version = "1.3" + self.assertEqual(model.version, "1.3") + self.assertEqual(model.get_spec().description.metadata.versionString, "1.3") + + model.input_description["feature_1"] = "This is feature 1" + self.assertEqual(model.input_description["feature_1"], "This is feature 1") + + model.output_description["output"] = "This is output" + self.assertEqual(model.output_description["output"], "This is output") + + filename = tempfile.mktemp(suffix=".mlmodel") + model.save(filename) + loaded_model = MLModel(filename) + + self.assertEqual(model.author, "Test author") + self.assertEqual(model.license, "Test license") + # self.assertEqual(model.short_description, 'Test model') + self.assertEqual(model.input_description["feature_1"], "This is feature 1") + self.assertEqual(model.output_description["output"], "This is output") + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_predict_api(self): + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_input(self): + rename_feature(self.spec, "feature_1", "renamed_feature", rename_inputs=True) + model = MLModel(self.spec) + preds = model.predict({"renamed_feature": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + # reset the spec for next run + rename_feature(self.spec, "renamed_feature", "feature_1", rename_inputs=True) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_input_bad(self): + rename_feature(self.spec, "blah", "bad_name", rename_inputs=True) + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_output(self): + rename_feature( + self.spec, + "output", + "renamed_output", + rename_inputs=False, + rename_outputs=True, + ) + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["renamed_output"], 3.1) + rename_feature( + self.spec, + "renamed_output", + "output", + rename_inputs=False, + rename_outputs=True, + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_output_bad(self): + rename_feature( + self.spec, "blah", "bad_name", rename_inputs=False, rename_outputs=True + ) + model = MLModel(self.spec) + preds = model.predict({"feature_1": 1.0, "feature_2": 1.0}) + self.assertIsNotNone(preds) + self.assertEqual(preds["output"], 3.1) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_future_version(self): + self.spec.specificationVersion = 10000 + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename, auto_set_specification_version=False) + model = MLModel(filename) + # this model should exist, but throw an exception when we try to use + # predict because the engine doesn't support this model version + self.assertIsNotNone(model) + with self.assertRaises(Exception): + try: + model.predict({}) + except Exception as e: + assert "Core ML model specification version" in str(e) + raise + self.spec.specificationVersion = 1 + + @unittest.skipUnless( + _is_macos() and _macos_version() < (10, 13), "Only supported on macOS 10.13-" + ) + def test_MLModel_warning(self): + self.spec.specificationVersion = 3 + import warnings + + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + model = MLModel(self.spec) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + assert "not able to run predict()" in str(w[-1].message) + self.spec.specificationVersion = 1 + model = MLModel(self.spec) + + def test_convert_nn_spec_to_half_precision(self): + # simple network with quantization layer + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + weights = np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="inner_product", + W=weights, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="data", + output_name="out", + ) + model = MLModel(builder.spec) + spec = _convert_neural_network_spec_weights_to_fp16(model.get_spec()) + self.assertIsNotNone(spec) + + # simple network without quantization layer + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_lrn( + name="lrn", + input_name="data", + output_name="out", + alpha=2, + beta=3, + local_size=1, + k=8, + ) + model = MLModel(builder.spec) + spec = _convert_neural_network_spec_weights_to_fp16(model.get_spec()) + self.assertIsNotNone(spec) + + @unittest.skip + def test_downgrade_specification_version(self): + # manually set a invalid specification version + self.spec.specificationVersion = -1 + model = MLModel(self.spec) + assert model.get_spec().specificationVersion == 1 + + # manually set a high specification version + self.spec.specificationVersion = 4 + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename, auto_set_specification_version=True) + model = MLModel(filename) + assert model.get_spec().specificationVersion == 1 + + # simple neural network with only spec 1 layer + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_activation("relu", "RELU", "data", "out") + # set a high specification version + builder.spec.specificationVersion = 3 + model = MLModel(builder.spec) + filename = tempfile.mktemp(suffix=".mlmodel") + model.save(filename) + # load the model back + model = MLModel(filename) + assert model.get_spec().specificationVersion == 1 + + # test save without automatic set specification version + self.spec.specificationVersion = 3 + filename = tempfile.mktemp(suffix=".mlmodel") + save_spec(self.spec, filename, auto_set_specification_version=False) + model = MLModel(filename) + # the specification version should be original + assert model.get_spec().specificationVersion == 3 + + def test_multiarray_type_convert_to_float(self): + input_features = [("data", datatypes.Array(2))] + output_features = [("out", datatypes.Array(2))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_ceil("ceil", "data", "out") + spec = builder.spec + self.assertEqual( + spec.description.input[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.DOUBLE, + ) + self.assertEqual( + spec.description.output[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.DOUBLE, + ) + convert_double_to_float_multiarray_type(spec) + self.assertEqual( + spec.description.input[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.FLOAT32, + ) + self.assertEqual( + spec.description.output[0].type.multiArrayType.dataType, + Model_pb2.ArrayFeatureType.FLOAT32, + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_multiarray_to_image_input_util(self): + H, W, C = 1, 1, 3 + input_features = [("data", datatypes.Array(C, H, W))] + output_features = [("out", datatypes.Array(C, H, W))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_image_input( + mlmodel, + "data", + red_bias=-5, + green_bias=-6, + blue_bias=-2.5, + scale=10.0, + image_format="NCHW", + ) + x = np.array([4, 2, 5], dtype=np.uint8) + x = np.reshape(x, (H, W, C)) + pil_img = PIL.Image.fromarray(x) + y = mlmodel.predict({"data": pil_img})["out"] + self.assertEqual(y.shape, (C, H, W)) + np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5]) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_multiarray_to_image_input_util_transpose_elimination(self): + H, W, C = 1, 1, 3 + input_features = [("data", datatypes.Array(H, W, C))] + output_features = [("out", datatypes.Array(H, W, C))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_transpose("transpose", [2, 0, 1], "data", "transpose") + builder.add_activation("linear", "LINEAR", "transpose", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_image_input( + mlmodel, + "data", + red_bias=-5, + green_bias=-6, + blue_bias=-2.5, + scale=10.0, + image_format="NHWC", + ) + x = np.array([4, 2, 5], dtype=np.uint8) + x = np.reshape(x, (H, W, C)) + pil_img = PIL.Image.fromarray(x) + y = mlmodel.predict({"data": pil_img})["out"] + self.assertEqual(y.shape, (H, W, C)) + np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5]) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_multiarray_to_image_input_util_HWC_format(self): + H, W, C = 1, 1, 3 + input_features = [("data", datatypes.Array(H, W, C))] + output_features = [("out", datatypes.Array(H, W, C))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_image_input( + mlmodel, + "data", + red_bias=-5, + green_bias=-6, + blue_bias=-2.5, + scale=10.0, + image_format="NHWC", + ) + x = np.array([4, 2, 5], dtype=np.uint8) + x = np.reshape(x, (H, W, C)) + pil_img = PIL.Image.fromarray(x) + y = mlmodel.predict({"data": pil_img})["out"] + self.assertEqual(y.shape, (H, W, C)) + np.testing.assert_almost_equal(y.flatten(), [35.0, 14.0, 47.5]) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_nn_classifier_util(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + mlmodel = make_nn_classifier( + mlmodel, + class_labels=["a", "b", "c"], + predicted_feature_name="out_confidence", + predicted_probabilities_output="out", + ) + out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}) + self.assertEqual(out_dict["out_confidence"], "c") + self.assertEqual( + mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier" + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_nn_classifier_util_file(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + + class_labels = ["a", "b", "c"] + with tempfile.NamedTemporaryFile(mode="w", suffix=".txt") as f: + f.write("\n".join(class_labels)) + f.flush() + mlmodel = make_nn_classifier( + mlmodel, + class_labels=f.name, + predicted_feature_name="out_confidence", + predicted_probabilities_output="out", + ) + out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}) + self.assertEqual(out_dict["out_confidence"], "c") + self.assertEqual( + mlmodel.get_spec().WhichOneof("Type"), "neuralNetworkClassifier" + ) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_output_nn_classifier(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", datatypes.Array(3))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + mlmodel = MLModel(spec) + + class_labels = ["a", "b", "c"] + mlmodel = make_nn_classifier(mlmodel, class_labels=["a", "b", "c"]) + + # rename output + spec = mlmodel.get_spec() + rename_feature(spec, "out", "new_out_name") + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + + out_dict = mlmodel.predict({"data": np.array([4.0, 5.5, 6.0])}) + self.assertEqual(out_dict["classLabel"], "c") + self.assertTrue("new_out_name" in out_dict) + self.assertTrue(isinstance(out_dict["new_out_name"], dict)) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_rename_image_input(self): + input_features = [("data", datatypes.Array(3, 1, 1))] + output_features = [("out", datatypes.Array(3, 1, 1))] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_activation("linear", "LINEAR", "data", "out") + spec = builder.spec + # make an image input + mlmodel = make_image_input(MLModel(spec), "data", image_format="NCHW", scale=2.0) + # rename the input + spec = mlmodel.get_spec() + rename_feature(spec, "data", "new_input_name") + mlmodel = MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + + # test + x = np.array([4, 5, 6], dtype=np.uint8).reshape(1, 1, 3) + pil_img = PIL.Image.fromarray(x) + out = mlmodel.predict({"new_input_name": pil_img})['out'] + np.testing.assert_equal(out, np.array([8.0, 10.0, 12.0]).reshape(3, 1, 1)) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (12, 0), "Only supported on macOS 12+" + ) + def test_rename_feature_mlprogram(self): + @mb.program(input_specs=[mb.TensorSpec(shape=(3,))]) + def linear_prog(input): + W = np.ones((10, 3), dtype=np.float32) + out = mb.linear(x=input, weight=W, name="output") + return out + + model = coremltools.convert( + linear_prog, + convert_to='mlprogram' + ) + + spec = model.get_spec() + input_name = spec.description.input[0].name + output_name = spec.description.output[0].name + + # rename input + rename_feature(spec, input_name, "new_input_name") + self.assertEqual(spec.description.input[0].name, "new_input_name") + model = coremltools.models.MLModel(spec, weights_dir=model.weights_dir) + out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})[output_name] + self.assertEqual(out.shape, (10,)) + self.assertEqual(out[0], 6.0) + + # rename output + rename_feature(spec, output_name, "new_output_name") + self.assertEqual(spec.description.output[0].name, "new_output_name") + model = coremltools.models.MLModel(spec, weights_dir=model.weights_dir) + out = model.predict({"new_input_name": np.array([1.0, 2.0, 3.0])})["new_output_name"] + self.assertEqual(out.shape, (10,)) + self.assertEqual(out[1], 6.0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (12, 0) and _HAS_TORCH, "Only supported on macOS 12+" + ) + def test_rename_feature_classifier_mlprogram(self): + torch_model = _torch.nn.ReLU().eval() + model = coremltools.convert( + _torch.jit.trace(torch_model, _torch.rand(3, )), + inputs=[coremltools.TensorType(shape=(3,))], + classifier_config=coremltools.ClassifierConfig(['a', 'b', 'c']), + convert_to='mlprogram' + ) + spec = model.get_spec() + input_name = spec.description.input[0].name + + rename_feature(spec, 'classLabel', 'highestProbClass') + model = coremltools.models.MLModel(spec, weights_dir=model.weights_dir) + output_class = model.predict({input_name: np.array([1.0, 2.0, 3.0])})['highestProbClass'] + self.assertEqual(output_class, 'c') + + +if __name__ == "__main__": + unittest.main() + # suite = unittest.TestSuite() + # suite.addTest(MLModelTest('test_multiarray_type_convert_to_float')) + # unittest.TextTestRunner().run(suite) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_neural_networks.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_neural_networks.py new file mode 100644 index 00000000..2d0360b5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_neural_networks.py @@ -0,0 +1,60 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import coremltools +from coremltools.models.utils import (_get_custom_layer_names, + _replace_custom_layer_name) +from coremltools.proto import Model_pb2 + + +class CustomLayerUtilsTest(unittest.TestCase): + @classmethod + def setUpClass(self): + spec = Model_pb2.Model() + spec.specificationVersion = coremltools.SPECIFICATION_VERSION + + features = ["feature_1", "feature_2"] + output = "output" + for f in features: + input_ = spec.description.input.add() + input_.name = f + input_.type.doubleType.MergeFromString(b"") + + output_ = spec.description.output.add() + output_.name = output + output_.type.doubleType.MergeFromString(b"") + + layer = spec.neuralNetwork.layers.add() + layer.name = "custom1" + layer.input.append("input") + layer.output.append("temp1") + layer.custom.className = "name1" + + layer2 = spec.neuralNetwork.layers.add() + layer2.name = "custom2" + layer2.input.append("temp1") + layer2.output.append("temp2") + layer2.custom.className = "name2" + + layer3 = spec.neuralNetwork.layers.add() + layer3.name = "custom3" + layer3.input.append("temp2") + layer3.output.append("output") + layer3.custom.className = "name1" + + self.spec = spec + + def test_get_custom_names(self): + names = _get_custom_layer_names(self.spec) + self.assertEqual(names, {"name1", "name2"}) + + def test_change_custom_name(self): + _replace_custom_layer_name(self.spec, "name1", "notname1") + names = _get_custom_layer_names(self.spec) + self.assertEqual(names, {"notname1", "name2"}) + # set it back for future tests + _replace_custom_layer_name(self.spec, "notname1", "name1") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_nn_builder.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_nn_builder.py new file mode 100644 index 00000000..3303e440 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_nn_builder.py @@ -0,0 +1,627 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import numpy as np +import pytest +import unittest + +import coremltools +from coremltools import ComputeUnit +from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type +from coremltools.models import MLModel, datatypes +from coremltools.models.neural_network import NeuralNetworkBuilder +from coremltools.models.neural_network.quantization_utils import ( + _convert_array_to_nbit_quantized_bytes, quantize_weights) +from coremltools.models.utils import _is_macos, _macos_version + +MIN_MACOS_VERSION_REQUIRED = (10, 13) +LAYERS_10_14_MACOS_VERSION = (10, 14) +LAYERS_10_15_MACOS_VERSION = (10, 15) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < LAYERS_10_15_MACOS_VERSION, + "Only supported on macOS 10.15+", +) +class ControlFlowCorrectnessTest(unittest.TestCase): + @classmethod + def setup_class(cls): + pass + + def runTest(): + pass + + def _test_model(self, model, input_dict, output_ref, delta=1e-2): + preds = model.predict(input_dict) + for name in output_ref: + ref_val = output_ref[name] + val = preds[name] + self.assertTrue(np.allclose(val, ref_val, rtol=delta)) + + def test_simple_branch(self): + """ Test a simple if-else branch network + """ + input_features = [("data", datatypes.Array(3)), ("cond", datatypes.Array(1))] + output_features = [("output", None)] + + builder_top = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + layer = builder_top.add_branch("branch_layer", "cond") + + builder_ifbranch = NeuralNetworkBuilder( + input_features=None, + output_features=None, + spec=None, + nn_spec=layer.branch.ifBranch, + ) + builder_ifbranch.add_elementwise( + "mult_layer", + input_names=["data"], + output_name="output", + mode="MULTIPLY", + alpha=10, + ) + builder_elsebranch = NeuralNetworkBuilder( + input_features=None, + output_features=None, + spec=None, + nn_spec=layer.branch.elseBranch, + ) + builder_elsebranch.add_elementwise( + "add_layer", + input_names=["data"], + output_name="output", + mode="ADD", + alpha=10, + ) + coremltools.models.utils.save_spec( + builder_top.spec, "/tmp/simple_branch.mlmodel" + ) + mlmodel = MLModel(builder_top.spec) + + # True branch case + input_dict = { + "data": np.array(range(1, 4), dtype="float"), + "cond": np.array([1], dtype="float"), + } + output_ref = {"output": input_dict["data"] * 10} + self._test_model(mlmodel, input_dict, output_ref) + + # False branch case + input_dict["cond"] = np.array([0], dtype="float") + output_ref["output"] = input_dict["data"] + 10 + self._test_model(mlmodel, input_dict, output_ref) + + def test_simple_loop_fixed_iterations(self): + input_features = [("data", datatypes.Array(1))] + output_features = [("output", None)] + + builder_top = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder_top.add_copy("copy_1", input_name="data", output_name="output") + + loop_layer = builder_top.add_loop("loop_layer") + loop_layer.loop.maxLoopIterations = 5 + builder_body = NeuralNetworkBuilder( + input_features=None, + output_features=None, + spec=None, + nn_spec=loop_layer.loop.bodyNetwork, + ) + builder_body.add_elementwise( + "add", input_names=["output"], output_name="x", mode="ADD", alpha=2 + ) + + builder_body.add_copy("copy_2", input_name="x", output_name="output") + coremltools.models.utils.save_spec( + builder_top.spec, "/tmp/simple_loop_fixed_iterations.mlmodel" + ) + mlmodel = MLModel(builder_top.spec) + + # True branch case + input_dict = {"data": np.array([0], dtype="float")} + output_ref = {"output": np.array([10], dtype="float")} + self._test_model(mlmodel, input_dict, output_ref) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= LAYERS_10_14_MACOS_VERSION, + "Only supported on macOS 10.14+", +) +class BasicNumericCorrectnessTest_1014NewLayers(unittest.TestCase): + def build_quant_conv_layer( + self, + W=None, + quantization_type="linear", + nbits=8, + quant_scale=None, + quant_bias=None, + quant_lut=None, + output_channels=2, + ): + input_features = [("data", datatypes.Array(1, 2, 2))] + output_features = [("out", datatypes.Array(2, 1, 1))] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_convolution( + name="conv", + kernel_channels=1, + output_channels=output_channels, + height=2, + width=2, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="out", + quantization_type=quantization_type, + nbits=nbits, + quant_scale=quant_scale, + quant_bias=quant_bias, + quant_lut=quant_lut, + ) + return MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + + def test_linear_quant_convolution_8bit(self): + W = np.ones((2, 2, 1, 2), dtype=np.uint8) + W[:, :, :, 1] = 2 + mlmodel = self.build_quant_conv_layer( + W=W.flatten().tobytes(), + quantization_type="linear", + nbits=8, + quant_scale=[4.0], + quant_bias=[-2.0], + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.reshape(np.array([8, 24]), (2, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_linear_quant_convolution_8bit_vector_scalebias(self): + W = np.ones((2, 2, 1, 2), dtype=np.uint8) + W[:, :, :, 1] = 2 + mlmodel = self.build_quant_conv_layer( + W=W.flatten().tobytes(), + quantization_type="linear", + nbits=8, + quant_scale=[4.0, 5.0], + quant_bias=[-2.0, 1.0], + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.reshape(np.array([8, 44]), (2, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_linear_quant_convolution_8bit_float_scale_and_bias(self): + W = np.array(([[[[1, 248], [248, 248]]]]), dtype=np.uint8) + mlmodel = self.build_quant_conv_layer( + W=W.flatten().tobytes(), + quantization_type="linear", + nbits=8, + quant_scale=[15], + quant_bias=[-3913], + output_channels=1, + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + # Output should be equal to: (scale*(1+248+248+248)+(4*bias)) + expected_out = np.reshape(np.array([-4477]), (1, 1, 1, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_lut_quant_convolution_2bit(self): + W = np.zeros((2, 2, 1, 2), dtype=np.uint8) + W[:, :, :, 0] = 0 + W[:, :, :, 1] = 2 + W = _convert_array_to_nbit_quantized_bytes(W.flatten(), 2).tobytes() + mlmodel = self.build_quant_conv_layer( + W=W, quantization_type="lut", nbits=2, quant_lut=[10.0, 11.0, -3.0, -1.0] + ) + data = np.ones((1, 2, 2)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.reshape(np.array([40, -12]), (2, 1, 1)) + self.assertTrue(np.allclose(out, expected_out)) + + def test_linear_quant_inner_product_3bit(self): + pytest.xfail("rdar://101370330 ([CI] nnv1 model compression tests are failing after roots is updated)") + W = np.reshape(np.arange(6), (2, 3)).astype(np.uint8) + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_inner_product( + name="ip1", + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 3).tobytes(), + b=None, + input_channels=3, + output_channels=2, + has_bias=False, + input_name="data", + output_name="probs", + quantization_type="linear", + nbits=3, + quant_scale=[11.0, 2.0], + quant_bias=[-2.0, 10.0], + ) + mlmodel = MLModel(builder.spec) + data = np.array([1.0, 3.0, 5.0]) + data_dict = {"data": data} + probs = mlmodel.predict(data_dict)["probs"] + expected_out = np.array([125, 170]) + self.assertTrue(np.allclose(probs.flatten(), expected_out.flatten())) + + def test_lut_quant_inner_product_1bit(self): + pytest.xfail("rdar://101370330 ([CI] nnv1 model compression tests are failing after roots is updated)") + W = np.zeros((2, 3), dtype=np.uint8) + W[0, :] = [0, 1, 1] + W[1, :] = [1, 0, 0] + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_inner_product( + name="ip1", + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 1).tobytes(), + b=None, + input_channels=3, + output_channels=2, + has_bias=False, + input_name="data", + output_name="probs", + quantization_type="lut", + nbits=1, + quant_lut=[5.0, -3.0], + ) + mlmodel = MLModel(builder.spec) + data = np.array([1.0, 3.0, 5.0]) + data_dict = {"data": data} + probs = mlmodel.predict(data_dict)["probs"] + expected_out = np.array([-19, 37]) + self.assertTrue(np.allclose(probs.flatten(), expected_out.flatten())) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= LAYERS_10_15_MACOS_VERSION, + "Only supported on macOS 10.15+", +) +class BasicNumericCorrectnessTest_1015NewLayers(unittest.TestCase): + def test_linear_quant_batchedmatmul_5bit(self): + W = np.zeros((2, 3), dtype=np.uint8) + W[0, :] = [31, 20, 11] + W[1, :] = [1, 0, 8] + quant_scale = np.reshape(np.array([10.0, 2.0, 3.0]), (1, 3)) + quant_bias = np.reshape(np.array([-2.0, -10.0, 6.0]), (1, 3)) + W_unquantized = np.broadcast_to(quant_scale, (2, 3)) * W + np.broadcast_to( + quant_bias, (2, 3) + ) + bias = np.array([1.0, 2.0, 3.0]) + + input_features = [("data", datatypes.Array(2, 2))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="out", + weight_matrix_rows=2, + weight_matrix_columns=3, + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 5).tobytes(), + bias=bias, + is_quantized_weight=True, + quantization_type="linear", + nbits=5, + quant_scale=quant_scale.flatten(), + quant_bias=quant_bias.flatten(), + ) + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.zeros((2, 2), dtype=np.float32) + data[0, :] = [5, 6] + data[1, :] = [10, 12] + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.matmul(data, W_unquantized) + bias + self.assertTrue(out.shape == expected_out.shape) + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten())) + + def test_linear_quant_batchedmatmul_8bit(self): + np.random.seed(1988) + W = np.random.rand(32, 32) * 2.0 - 1 + bias = np.random.rand(32) + + input_features = [("data", datatypes.Array(2, 32))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="out", + weight_matrix_rows=32, + weight_matrix_columns=32, + W=W, + bias=bias, + ) + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + q_mlmodel = quantize_weights(mlmodel, 8) + q_spec = q_mlmodel.get_spec() + q_layer = q_spec.neuralNetwork.layers[0].batchedMatmul + + self.assertTrue(len(q_layer.weights.floatValue) == 0) + self.assertTrue(len(q_layer.weights.rawValue) > 0) + + data = np.random.rand(2, 32) + data_dict = {"data": data} + out = q_mlmodel.predict(data_dict)["out"] + expected_out = np.matmul(data, W) + bias + self.assertTrue(out.shape == expected_out.shape) + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten(), atol=0.1)) + + def test_lut_quant_embedding_nd_2bit(self): + embed_size = 2 + vocab_size = 3 + W = np.zeros((embed_size, vocab_size), dtype=np.uint8) + W[:, 0] = [1, 0] + W[:, 1] = [0, 1] + W[:, 2] = [3, 2] + bias = np.array([1.0, 2.0]) + quant_lut = np.array([34.0, 12.0, -6.0, 6.0]) + + input_features = [("data", datatypes.Array(4, 1))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_embedding_nd( + name="embedding_nd", + input_name="data", + output_name="out", + vocab_size=vocab_size, + embedding_size=embed_size, + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 2).tobytes(), + b=bias, + is_quantized_weight=True, + quantization_type="lut", + nbits=2, + quant_lut=quant_lut, + ) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.reshape(np.array([2.0, 2.0, 1.0, 0.0]), (4, 1)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + expected_out = np.zeros((4, embed_size), dtype=np.float32) + expected_out[0, :] = [quant_lut[W[0, 2]], quant_lut[W[1, 2]]] + bias + expected_out[1, :] = [quant_lut[W[0, 2]], quant_lut[W[1, 2]]] + bias + expected_out[2, :] = [quant_lut[W[0, 1]], quant_lut[W[1, 1]]] + bias + expected_out[3, :] = [quant_lut[W[0, 0]], quant_lut[W[1, 0]]] + bias + self.assertTrue(out.shape == expected_out.shape) + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten())) + + + def test_linear_quant_embedding_7bit(self): + embed_size = 2 + vocab_size = 3 + W = np.zeros((embed_size, vocab_size), dtype=np.uint8) + W[:, 0] = [100, 127] + W[:, 1] = [20, 40] + W[:, 2] = [90, 1] + quant_scale = np.reshape(np.array([10.0, 2.0]), (2, 1)) + quant_bias = np.reshape(np.array([-2.0, -10.0]), (2, 1)) + W_unquantized = np.broadcast_to(quant_scale, (2, 3)) * W + np.broadcast_to( + quant_bias, (2, 3) + ) + bias = np.reshape(np.array([1.0, 2.0]), (2, 1)) + W_unquantized = W_unquantized + np.broadcast_to(bias, (2, 3)) + + input_features = [("data", datatypes.Array(4, 1, 1, 1))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_embedding( + name="embed", + W=_convert_array_to_nbit_quantized_bytes(W.flatten(), 7).tobytes(), + b=bias, + input_dim=vocab_size, + output_channels=embed_size, + has_bias=True, + input_name="data", + output_name="out", + is_quantized_weight=True, + quantization_type="linear", + nbits=7, + quant_scale=np_val_to_py_type(quant_scale), + quant_bias=np_val_to_py_type(quant_bias), + ) + + mlmodel = MLModel(builder.spec, compute_units=ComputeUnit.CPU_ONLY) + data = np.reshape(np.array([2.0, 2.0, 1.0, 0.0]), (4, 1, 1, 1)) + data_dict = {"data": data} + out = mlmodel.predict(data_dict)["out"] + self.assertTrue(out.shape == (4, embed_size, 1, 1)) + expected_out = np.zeros((4, embed_size), dtype=np.float32) + expected_out[0, :] = W_unquantized[:, 2].flatten() + expected_out[1, :] = W_unquantized[:, 2].flatten() + expected_out[2, :] = W_unquantized[:, 1].flatten() + expected_out[3, :] = W_unquantized[:, 0].flatten() + self.assertTrue(np.allclose(out.flatten(), expected_out.flatten())) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < (10, 13), "Only supported on macOS 10.13+" +) +class BasicNumericCorrectnessTest(unittest.TestCase): + def _build_nn_with_one_ip_layer(self): + input_features = [("data", datatypes.Array(3))] + output_features = [("out", None)] + builder = NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + w = np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="ip1", + W=w, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="input", + output_name="hidden", + ) + return builder + + def test_undefined_shape_single_output(self): + W = np.ones((3, 3)) + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder(input_features, output_features) + builder.add_inner_product( + name="ip1", + W=W, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="data", + output_name="probs", + ) + mlmodel = MLModel(builder.spec) + data = np.ones((3,)) + data_dict = {"data": data} + probs = mlmodel.predict(data_dict)["probs"] + self.assertTrue(np.allclose(probs, np.ones(3) * 3)) + + def test_set_input(self): + builder = self._build_nn_with_one_ip_layer() + builder.set_input(input_names=["data_renamed"], input_dims=[(2,)]) + + self.assertEqual( + builder.spec.description.input[0].type.multiArrayType.shape[0], 2 + ) + self.assertEqual(builder.spec.description.input[0].name, "data_renamed") + + def test_set_input_fail(self): + builder = self._build_nn_with_one_ip_layer() + + # fails since input_names and input_dims do not have same size + with self.assertRaises(ValueError): + builder.set_input(input_names=["data_1", "data_2"], input_dims=[(3,)]) + + def test_set_output(self): + builder = self._build_nn_with_one_ip_layer() + builder.set_output(output_names=["out_renamed"], output_dims=[(2,)]) + + self.assertEqual( + builder.spec.description.output[0].type.multiArrayType.shape[0], 2 + ) + self.assertEqual(builder.spec.description.output[0].name, "out_renamed") + + def test_set_output_fail(self): + builder = self._build_nn_with_one_ip_layer() + + # fails since output_names and output_dims do not have same size + with self.assertRaises(ValueError): + builder.set_output(output_names=["out_1", "out_2"], output_dims=[(3,)]) + + def test_invalid_image_preprocessing_params(self): + builder = self._build_nn_with_one_ip_layer() + image_input_names = ["input1", "input2"] + with self.assertRaises(ValueError): + image_scale = {"invalid": 1.0 / 255.0} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, image_scale=image_scale + ) + with self.assertRaises(ValueError): + red_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, red_bias=red_bias + ) + with self.assertRaises(ValueError): + blue_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, blue_bias=blue_bias + ) + with self.assertRaises(ValueError): + green_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, green_bias=green_bias + ) + with self.assertRaises(ValueError): + gray_bias = {"invalid": -1} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, gray_bias=gray_bias + ) + with self.assertRaises(ValueError): + is_bgr = {"invalid": False} + builder.set_pre_processing_parameters( + image_input_names=image_input_names, is_bgr=is_bgr + ) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +class UseFloatArraytypeTest(unittest.TestCase): + """Test that the boolean flag `use_float_arraytype` correctly changes the datatype of the + network's inputs and outputs and produces a spec that the `MLModel` class can call `predict` + with. + """ + + def _test_use_float_array_helper(self, use_float_arraytype): + input_features = [("data", datatypes.Array(3))] + output_features = [("probs", None)] + builder = NeuralNetworkBuilder( + input_features=input_features, + output_features=output_features, + use_float_arraytype=use_float_arraytype, + ) + weights = np.ones((3, 3)) + builder.add_inner_product( + name="ip1", + W=weights, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="data", + output_name="probs", + ) + spec = builder.spec + array_feature_type = ( + coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.FLOAT32 + if use_float_arraytype + else coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.DOUBLE + ) + for input in spec.description.input: + self.assertEqual(input.type.multiArrayType.dataType, array_feature_type) + for output in spec.description.input: + self.assertEqual(output.type.multiArrayType.dataType, array_feature_type) + + # Assert that the generated spec is functional + mlmodel = MLModel(spec) + data = np.ones((3,)) + data_dict = {"data": data} + try: + predictions = mlmodel.predict(data_dict) + except Exception as e: + self.fail(e) + self.assertTrue(np.allclose(predictions["probs"], np.ones(3) * 3)) + + def test_true_use_float_array(self): + # Instruct the builder to use the Float32 datatype for inputs and outputs + self._test_use_float_array_helper(True) + + def test_false_use_float_array(self): + # Instruct the builder to use its default Double datatype for inputs and outputs + self._test_use_float_array_helper(False) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_numpy_nn_layers.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_numpy_nn_layers.py new file mode 100644 index 00000000..bca34e14 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_numpy_nn_layers.py @@ -0,0 +1,7086 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import math +import os +import platform +import random +import tempfile +import unittest + +import numpy as np +import pytest + +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND + +if _HAS_TF_2: + import tensorflow as tf + +import torch + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools.converters.mil.mil.ops.defs._utils import aggregated_pad +from coremltools.models import (_MLMODEL_FULL_PRECISION, + _MLMODEL_HALF_PRECISION, neural_network) +from coremltools.models.neural_network import flexible_shape_utils +from coremltools.models.utils import _MODEL_FILE_NAME, _is_macos, _macos_version + +np.random.seed(10) + +MIN_MACOS_VERSION_REQUIRED = (10, 13) +LAYERS_10_15_MACOS_VERSION = (10, 15) +LAYERS_11_0_MACOS_VERSION = (11, 0) + + +def _get_unary_model_spec(x, mode, alpha=1.0): + input_dim = x.shape + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + builder.add_unary( + name="unary", input_name="data", output_name="output", mode=mode, alpha=alpha + ) + return builder.spec + + +class CorrectnessTest(unittest.TestCase): + def runTest(self): + pass + + def _compare_shapes(self, np_preds, coreml_preds): + return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape + + def _test_shape_equality(self, np_preds, coreml_preds): + np.testing.assert_array_equal( + np.squeeze(coreml_preds).shape, np.squeeze(np_preds).shape + ) + + def _test_nd_shape_equality(self, np_preds, coreml_preds, shape=()): + if shape: + np.testing.assert_array_equal(coreml_preds.shape, shape) + else: + # check if shape has 0 valued dimension + if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0: + return + np.testing.assert_array_equal(coreml_preds.shape, np_preds.shape) + + def _compare_predictions(self, np_preds, coreml_preds, delta=0.01): + np_preds = np_preds.flatten() + coreml_preds = coreml_preds.flatten() + max_arr = np.maximum(np.maximum(np_preds, coreml_preds), 1.0) + all_deltas = np.abs(np_preds / max_arr - coreml_preds / max_arr) + max_delta = np.amax(all_deltas) + if max_delta > delta: + return False + return True + + def _test_predictions( + self, + np_preds, + coreml_preds, + delta=0.01, + test_metric="rel_error", + SNR=30, + PSNR=40, + ): + np_preds = np_preds.flatten() + coreml_preds = coreml_preds.flatten() + if test_metric == "rel_error": + max_arr = np.maximum(np.abs(np_preds), 1.0) + all_deltas = np.abs(np_preds / max_arr - coreml_preds / max_arr) + max_delta = np.amax(all_deltas, initial=0) + self.assertLessEqual( + max_delta, + delta, + "Expected %s to be within %s of %s" % (coreml_preds, delta, np_preds), + ) + elif test_metric == "SNR": + noise = np_preds - coreml_preds + noise_var = np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = np.sum(np_preds ** 2) / len(np_preds) + max_signal_energy = np.amax(np_preds ** 2) + snr = 10 * np.log10(signal_energy / noise_var) + psnr = 10 * np.log10(max_signal_energy / noise_var) + self.assertGreaterEqual(snr, SNR) + self.assertGreaterEqual(psnr, PSNR) + else: + raise ValueError("Test metric not supported") + + @staticmethod + def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10): + """ + This utility function is used for validate random distributions layers. + It validates the first 10 moments of prediction and expected values. + """ + + def get_moment(data, k): + return np.mean(np.power(data - np.mean(data), k)) + + if isinstance(model, str): + model = coremltools.models.MLModel(model) + + if use_cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + + model = coremltools.models.MLModel(model, compute_units=compute_unit) + prediction = model.predict(inputs) + + for output_name in expected: + np_preds = expected[output_name] + coreml_preds = prediction[output_name] + + np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)] + coreml_moments = [ + get_moment(coreml_preds.flatten(), k) for k in range(num_moments) + ] + + np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2) + + # override expected values to allow element-wise compares + for output_name in expected: + expected[output_name] = prediction[output_name] + + def _test_model( + self, + model, + input, + expected, + model_precision=_MLMODEL_FULL_PRECISION, + useCPUOnly=False, + output_name_shape_dict={}, + validate_shapes_only=False, + test_metric="rel_error", + delta=0.01, + SNR=30, + ): + + if useCPUOnly: + compute_unit = ComputeUnit.CPU_ONLY + else: + compute_unit = ComputeUnit.ALL + + # if we're given a path to a model + if isinstance(model, str): + model = coremltools.models.MLModel(model, compute_units=compute_unit) + # If we're passed in a specification, save out the model and then load it back. + elif isinstance(model, coremltools.proto.Model_pb2.Model): + tmp_model_file = tempfile.NamedTemporaryFile(suffix=_MODEL_FILE_NAME) + coremltools.utils.save_spec(model, tmp_model_file.name) + model = coremltools.models.MLModel( + tmp_model_file.name, compute_units=compute_unit + ) + + # If we want to test the half precision case + if model_precision == _MLMODEL_HALF_PRECISION: + model = coremltools.utils._convert_neural_network_weights_to_fp16(model) + + prediction = model.predict(input) + for output_name in expected: + if self.__class__.__name__ == "SimpleTest": + self._test_shape_equality( + expected[output_name], prediction[output_name] + ) + else: + if output_name in output_name_shape_dict: + output_shape = output_name_shape_dict[output_name] + else: + output_shape = [] + + if len(output_shape) == 0 and len(expected[output_name].shape) == 0: + output_shape = (1,) + + self._test_nd_shape_equality( + expected[output_name], prediction[output_name], output_shape + ) + + if not validate_shapes_only: + self._test_predictions( + expected[output_name], + prediction[output_name], + delta=delta, + test_metric=test_metric, + SNR=SNR, + ) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < MIN_MACOS_VERSION_REQUIRED, + "macOS 10.13+ is required. Skipping tests.", +) +class SimpleTest(CorrectnessTest): + def test_tiny_upsample_linear_mode(self): + input_dim = (1, 1, 3) # (C,H,W) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_upsample( + name="upsample", + scaling_factor_h=2, + scaling_factor_w=3, + input_name="data", + output_name="output", + mode="BILINEAR", + ) + + input = {"data": np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))} + expected = { + "output": np.array( + [ + [1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3], + [1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3], + ] + ) + } + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_LRN(self): + input_dim = (1, 3, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_lrn( + name="lrn", + input_name="data", + output_name="output", + alpha=2, + beta=3, + local_size=1, + k=8, + ) + + input = {"data": np.ones((1, 3, 3))} + expected = {"output": 1e-3 * np.ones((1, 3, 3))} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_MVN(self): + input_dim = (2, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_mvn( + name="mvn", + input_name="data", + output_name="output", + across_channels=False, + normalize_variance=False, + ) + + input = {"data": np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))} + expected = { + "output": np.reshape( + np.arange(8) - np.array([1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), + (2, 2, 2), + ) + } + + self._test_model(builder.spec, input, expected) + + def test_L2_normalize(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_l2_normalize(name="mvn", input_name="data", output_name="output") + + input = {"data": np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))} + expected = { + "output": np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + / np.sqrt(14) + } + + self._test_model(builder.spec, input, expected) + + def test_unary_sqrt(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.sqrt(x)} + spec = _get_unary_model_spec(x, "sqrt") + self._test_model(spec, input, expected) + + def test_unary_rsqrt(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": 1 / np.sqrt(x)} + spec = _get_unary_model_spec(x, "rsqrt") + self._test_model(spec, input, expected) + + def test_unary_inverse(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": 1 / x} + spec = _get_unary_model_spec(x, "inverse") + self._test_model(spec, input, expected) + + def test_unary_power(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x ** 3} + spec = _get_unary_model_spec(x, "power", 3) + self._test_model(spec, input, expected) + + def test_unary_exp(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.exp(x)} + spec = _get_unary_model_spec(x, "exp") + self._test_model(spec, input, expected) + + def test_unary_log(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.log(x)} + spec = _get_unary_model_spec(x, "log") + self._test_model(spec, input, expected) + + def test_unary_abs(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.abs(x)} + spec = _get_unary_model_spec(x, "abs") + self._test_model(spec, input, expected) + + def test_unary_threshold(self): + x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": np.maximum(x, 2)} + spec = _get_unary_model_spec(x, "threshold", 2) + self._test_model(spec, input, expected) + + def test_split(self): + input_dim = (9, 2, 2) + x = np.random.rand(*input_dim) + + input_features = [("data", datatypes.Array(*input_dim))] + output_names = [] + output_features = [] + for i in range(3): + out = "out_" + str(i) + output_names.append(out) + output_features.append((out, None)) + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_split(name="split", input_name="data", output_names=output_names) + + input = {"data": x} + expected = {"out_0": x[0:3, :, :], "out_1": x[3:6, :, :], "out_2": x[6:9, :, :]} + + self._test_model(builder.spec, input, expected) + for output_ in output_names: + self.assertEqual(len(input_dim), builder._get_rank(output_)) + + def test_scale_constant(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_scale( + name="scale", + W=5, + b=45, + has_bias=True, + input_name="data", + output_name="output", + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": 5 * x + 45} + + self._test_model(builder.spec, input, expected) + + def test_scale_matrix(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_scale( + name="scale", + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="output", + shape_scale=[1, 2, 2], + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": W * x} + + self._test_model(builder.spec, input, expected) + + def test_bias_constant(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_bias(name="bias", b=45, input_name="data", output_name="output") + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + 45} + + self._test_model(builder.spec, input, expected) + + def test_bias_matrix(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + b = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_bias( + name="bias", + b=b, + input_name="data", + output_name="output", + shape_bias=[1, 2, 2], + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + b} + + self._test_model(builder.spec, input, expected) + + def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + b = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_load_constant( + name="load_constant", output_name="bias", constant_value=b, shape=[1, 2, 2] + ) + builder.add_elementwise( + name="add", input_names=["data", "bias"], output_name="output", mode="ADD" + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + b} + + self._test_model(builder.spec, input, expected, model_precision) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_load_constant_half_precision(self): + self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION) + + def test_min(self): + input_dim = (1, 2, 2) + input_features = [ + ("data_0", datatypes.Array(*input_dim)), + ("data_1", datatypes.Array(*input_dim)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + builder.add_elementwise( + name="min", + input_names=["data_0", "data_1"], + output_name="output", + mode="MIN", + ) + x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2)) + + input = {"data_0": x1, "data_1": x2} + expected = {"output": np.minimum(x1, x2)} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_conv_same_padding(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.random.rand(3, 3, 10, 20) + + builder.add_convolution( + name="conv", + kernel_channels=10, + output_channels=20, + height=3, + width=3, + stride_height=2, + stride_width=2, + border_mode="same", + groups=1, + W=W, + b=None, + has_bias=False, + input_name="data", + output_name="output", + same_padding_asymmetry_mode="TOP_LEFT_HEAVY", + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.random.rand(20, 8, 8)} + + self._test_model(builder.spec, input, expected, validate_shapes_only=True) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_deconv_valid_padding(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + W = np.random.rand(3, 3, 10, 20) + + builder.add_convolution( + name="deconv", + kernel_channels=10, + output_channels=20, + height=3, + width=3, + stride_height=2, + stride_width=2, + border_mode="valid", + groups=1, + W=W, + b=None, + has_bias=False, + is_deconv=True, + input_name="data", + output_name="output", + padding_top=2, + padding_bottom=3, + padding_left=2, + padding_right=3, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.random.rand(20, 26, 26)} + + self._test_model(builder.spec, input, expected, validate_shapes_only=True) + + def test_deconv_non_unit_groups(self): + input_dim = (16, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + + W = np.random.rand(3, 3, 16, 5) + builder.add_convolution( + name="deconv", + kernel_channels=16, + output_channels=20, + height=3, + width=3, + stride_height=2, + stride_width=2, + border_mode="valid", + groups=4, + W=W, + b=None, + has_bias=False, + is_deconv=True, + input_name="data", + output_name="output", + padding_top=2, + padding_bottom=3, + padding_left=2, + padding_right=3, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.random.rand(20, 26, 26)} + + self._test_model(builder.spec, input, expected, validate_shapes_only=True) + + def test_linear_activation(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_activation( + name="activation", + non_linearity="LINEAR", + input_name="data", + output_name="output", + params=[34.0, 67.0], + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": 34.0 * x + 67.0} + + self._test_model(builder.spec, input, expected) + + def test_padding_constant(self): + input_dim = (1, 2, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_padding( + name="pad", + left=1, + right=0, + top=2, + bottom=0, + value=-1, + input_name="data", + output_name="output", + ) + + x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(np.float32) + input = {"data": x} + y = np.reshape( + np.array( + [[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3], [-1, 4, 5, 6]] + ), + (1, 4, 4), + ).astype(np.float32) + expected = {"output": y} + + self._test_model(builder.spec, input, expected) + + def test_padding_replication(self): + input_dim = (1, 2, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_padding( + name="pad", + left=1, + top=2, + input_name="data", + output_name="output", + padding_type="replication", + ) + + x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(np.float32) + input = {"data": x} + y = np.reshape( + np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3], [4, 4, 5, 6]]), + (1, 4, 4), + ).astype(np.float32) + expected = {"output": y} + + self._test_model(builder.spec, input, expected) + + def test_reshape_target_shape_3(self): + input_dim = (1, 2, 5) # (C,H,W) + target_dim = (10, 1, 1) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_reshape( + name="reshape", + input_name="data", + output_name="output", + target_shape=target_dim, + mode=0, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.reshape(x, (10, 1, 1))} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(target_dim), builder._get_rank("output")) + + def test_reshape_target_shape_4(self): + input_dim = (1, 2, 5) # (C,H,W) + target_dim = (1, 10, 1, 1) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_reshape( + name="reshape", + input_name="data", + output_name="output", + target_shape=target_dim, + mode=0, + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": np.reshape(x, (1, 10, 1, 1))} + + self._test_model(builder.spec, input, expected) + self.assertEqual(len(target_dim), builder._get_rank("output")) + + def test_bias_matrix_cpu(self): + input_dim = (1, 2, 2) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + b = np.reshape(np.arange(5, 9), (1, 2, 2)) + + builder.add_bias( + name="bias", + b=b, + input_name="data", + output_name="output", + shape_bias=[1, 2, 2], + ) + + x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2)) + input = {"data": x} + expected = {"output": x + b} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_linear_activation_cpu(self): + input_dim = (10, 15, 15) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_activation( + name="activation", + non_linearity="LINEAR", + input_name="data", + output_name="output", + params=[34.0, 67.0], + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": 34.0 * x + 67.0} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + +@unittest.skipIf( + not _is_macos() or _macos_version() < LAYERS_10_15_MACOS_VERSION, + "macOS 10.15+ required. Skipping tests.", +) +class NewLayersSimpleTest(CorrectnessTest): + def test_shape_flexibility_range(self): + + input_features = [("data", datatypes.Array(*(3, 4)))] + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_sin(name="sin", input_name="data", output_name="output") + spec = builder.spec + + flexible_shape_utils.set_multiarray_ndshape_range( + spec, feature_name="data", lower_bounds=[1, 1], upper_bounds=[-1, 5] + ) + + shapes = [(3, 4), (1, 5), (60, 5), (22, 4), (5, 3)] + for s in shapes: + x = np.random.rand(*s) + expected = {"output": np.sin(x)} + self._test_model(spec, {"data": x}, expected, useCPUOnly=True) + + def test_shape_flexibility_enumeration(self, rank=4): + default_shape = tuple(np.random.randint(1, 15, size=rank)) + input_features = [("data", datatypes.Array(*default_shape))] + builder = neural_network.NeuralNetworkBuilder( + input_features=input_features, + output_features=[("output", None)], + disable_rank5_shape_mapping=True, + ) + builder.add_sin(name="sin", input_name="data", output_name="output") + spec = builder.spec + + shapes = [ + tuple(np.random.randint(1, 15, size=rank)), + tuple(np.random.randint(1, 15, size=rank)), + ] + flexible_shape_utils.add_multiarray_ndshape_enumeration( + spec, feature_name="data", enumerated_shapes=shapes + ) + + shapes.append(default_shape) + for s in shapes: + x = np.random.rand(*s) + expected = {"output": np.sin(x)} + self._test_model(spec, {"data": x}, expected, useCPUOnly=True) + + def test_shape_flexibility_enumeration_rank3(self): + self.test_shape_flexibility_enumeration(rank=3) + + def test_shape_flexibility_enumeration_rank2(self): + self.test_shape_flexibility_enumeration(rank=2) + + def test_transpose_cpu(self): + for rank in range(1, 6): + axes = np.random.permutation(rank) + axes = [ + axis - rank if np.random.choice([True, False]) else axis + for axis in axes + ] + input_shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_transpose( + name="TransposeND", axes=axes, input_name="data", output_name="output" + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.transpose(x, axes)} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_dynamic_weight_conv(self): + + input_dim = (1, 3, 16, 16) + # weight layout: (output_channels, kernel_channels, height, width) + weight_dim = (4, 3, 3, 3) + output_dim = (1, 4, 14, 14) + + kernel_channels = input_dim[0] + output_channels, kernel_channels, height, width = weight_dim + + input_features = [ + ("input", datatypes.Array(*input_dim)), + ("weight", datatypes.Array(*weight_dim)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_convolution( + name="two_input_conv_layer", + kernel_channels=kernel_channels, + output_channels=output_channels, + height=height, + width=width, + stride_height=1, + stride_width=1, + border_mode="valid", + groups=1, + W=None, + b=None, + has_bias=False, + input_name=["input", "weight"], + output_name="output", + ) + + # Assigning everything to ones should cover the execution path + # and engine failures, but is not a complete check on numerics. + input_val = np.ones(input_dim) + weight_val = np.ones(weight_dim) + expected = np.ones(output_dim) * 27 + + feed_dict = {"input": input_val, "weight": weight_val} + expected = {"output": expected} + + self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True) + self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False) + + def test_batched_mat_mul_cpu(self, cpu_only=True): + a_shapes = [ + (10,), + (4, 10), + (10,), + (10,), + (2, 3), + (1, 3, 4), + (1, 3, 1, 2, 3), + (2, 3, 1, 3, 4), + ] + b_shapes = [ + (10,), + (10,), + (10, 3), + (2, 10, 3), + (3, 4), + (3, 2, 4, 5), + (1, 4, 3, 2), + (2, 1, 2, 4, 5), + ] + out_shapes = [ + (1, 1), + (4, 1), + (1, 3), + (2, 1, 3), + (2, 4), + (3, 2, 3, 5), + (1, 3, 4, 2, 2), + (2, 3, 2, 3, 5), + ] + + for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes): + input_shapes = [a_shape, b_shape] + input_features = [ + ("A", datatypes.Array(*input_shapes[0])), + ("B", datatypes.Array(*input_shapes[1])), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_batched_mat_mul( + name="batched_mat_mul", + input_names=["A", "B"], + output_name="output", + transpose_a=False, + transpose_b=False, + ) + + a = np.random.rand(*input_shapes[0]) + b = np.random.rand(*input_shapes[1]) + input_ = {"A": a, "B": b} + expected = {"output": np.array(np.matmul(a, b))} + shape_dict = {"output": outShape} + self._test_model( + builder.spec, + input_, + expected, + useCPUOnly=cpu_only, + output_name_shape_dict=shape_dict, + ) + self.assertEqual(len(outShape), builder._get_rank("output")) + + def test_batched_mat_mul_gpu(self): + self.test_batched_mat_mul_cpu(cpu_only=False) + + def test_batched_mat_mul_with_transposes_cpu(self, cpu_only=True): + for transpose_a, transpose_b in itertools.product([True, False], [True, False]): + a_shape = (3, 4) + b_shape = (4, 5) + a_shape = a_shape[::-1] if transpose_a else a_shape + b_shape = b_shape[::-1] if transpose_b else b_shape + input_shapes = [a_shape, b_shape] + input_features = [ + ("A", datatypes.Array(*input_shapes[0])), + ("B", datatypes.Array(*input_shapes[1])), + ] + + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_batched_mat_mul( + name="BatchedMatMul", + input_names=["A", "B"], + output_name="output", + transpose_a=transpose_a, + transpose_b=transpose_b, + ) + a = np.random.rand(*input_shapes[0]) + b = np.random.rand(*input_shapes[1]) + inputs = {"A": a, "B": b} + a = a.T if transpose_a else a + b = b.T if transpose_b else b + expected = {"output": np.matmul(a, b)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_batched_mat_mul_with_transposes_gpu(self): + self.test_batched_mat_mul_with_transposes_cpu(cpu_only=False) + + def test_batched_mat_mul_single_input_cpu( + self, model_precision=_MLMODEL_FULL_PRECISION, cpu_only=True + ): + X1 = 11 + X2 = 23 + W = np.random.rand(X1, X2) + bias = np.random.rand(X2) + input_shapes = [ + (X1,), + (5, X1), + (2, 3, X1), + (4, 1, X1), + (12, 5, 8, X1), + (2, 3, 1, 5, X1), + ] + for input_shape in input_shapes: + x = np.random.rand(*input_shape) + np_out = np.matmul(x, W) + bias + expected = {"output": np_out} + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_batched_mat_mul( + name="batched_mat_mul", + input_names=["data"], + output_name="output", + weight_matrix_rows=X1, + weight_matrix_columns=X2, + W=W, + bias=bias, + ) + inputs = {"data": x} + + self._test_model( + builder.spec, + inputs, + expected, + model_precision=model_precision, + useCPUOnly=cpu_only, + ) + + def test_batched_mat_mul_single_input_half_precision_cpu(self): + self.test_batched_mat_mul_single_input_cpu( + model_precision=_MLMODEL_HALF_PRECISION, cpu_only=True + ) + + def test_batched_mat_mul_single_input_gpu(self): + self.test_batched_mat_mul_single_input_cpu( + model_precision=_MLMODEL_FULL_PRECISION, cpu_only=False + ) + + def test_embedding_nd_cpu( + self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True + ): + vocab_size = 10 + embedding_size = 19 + W = np.random.rand(embedding_size, vocab_size) + input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1), (2, 3, 1, 5, 1)] + for input_shape in input_shapes: + x = np.random.randint(vocab_size, size=input_shape) + + np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0) + expected = {"output": np_out} + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_embedding_nd( + name="embedding_nd", + input_name="data", + output_name="output", + vocab_size=vocab_size, + embedding_size=embedding_size, + W=W, + ) + + input = {"data": x.astype(np.float32)} + + self._test_model( + builder.spec, + input, + expected, + model_precision=model_precision, + useCPUOnly=use_cpu_only, + ) + + def test_embedding_nd_half_precision_cpu(self): + self.test_embedding_nd_cpu( + model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True + ) + + def test_embedding_nd_GPU(self): + self.test_embedding_nd_cpu( + model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False + ) + + def test_embedding_nd_half_precision_GPU(self): + self.test_embedding_nd_cpu( + model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False + ) + + def test_softmax_nan_bug_cpu(self): + input_shape = [2, 2] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + for axis in [0, 1]: + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_softmax_nd( + name="softmax_nd", input_name="data", output_name="output", axis=axis + ) + + x = np.array([[0.5, 0.5], [1e8, 1e8]]) + input = {"data": x} + y = np.exp(x - np.max(x, axis=axis, keepdims=True)) + y = y / np.sum(y, axis=axis, keepdims=True) + expected = {"output": y} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_softmax_nd_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + input_shape = np.random.randint(low=2, high=5, size=rank) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_softmax_nd( + name="softmax_nd", + input_name="data", + output_name="output", + axis=axis, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + y = np.exp(x - np.max(x, axis=axis, keepdims=True)) + y = y / np.sum(y, axis=axis, keepdims=True) + expected = {"output": y} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_softmax_nd_gpu(self): + self.test_softmax_nd_cpu(cpu_only=False) + + def test_concat_nd_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + n_inputs = np.random.choice(range(2, 5)) + output_shape = np.random.randint(low=2, high=5, size=rank) + output_shape[axis] = 0 + input_shapes = [] + input_features = [] + input_names = [] + for _ in range(n_inputs): + input_shapes.append(np.copy(output_shape)) + input_shapes[-1][axis] = np.random.choice(range(2, 8)) + output_shape[axis] += input_shapes[-1][axis] + for i, input_dim in enumerate(input_shapes): + input_name = "input_%s" % str(i) + input_names.append(input_name) + input_features.append((input_name, datatypes.Array(*input_dim))) + + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_concat_nd( + name="concat_nd", + input_names=input_names, + output_name="output", + axis=axis, + ) + + input_tensors = [] + for input_dim in input_shapes: + input_tensors.append(np.random.rand(*input_dim)) + input = dict(zip(input_names, input_tensors)) + expected = {"output": np.concatenate(input_tensors, axis)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_concat_nd_gpu(self): + self.test_concat_nd_cpu(cpu_only=False) + + def test_fill_like_cpu(self, cpu_only=True): + + for rank in range(1, 6): + target_shape = np.random.randint(low=2, high=6, size=rank) + value = float(np.random.rand()) + + input_features = [("tensor", datatypes.Array(*target_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_fill_like( + name="fill_like", input_name="tensor", output_name="output", value=value + ) + + tensor = np.random.rand(*target_shape) + input = {"tensor": tensor} + expected = {"output": np.zeros(target_shape) + value} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_fill_like_gpu(self): + self.test_fill_like_cpu(cpu_only=False) + + def test_fill_static_cpu(self, cpu_only=True): + + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + value = float(np.random.rand()) + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_fill_static( + name="fill_static", + output_name="tmp", + output_shape=list(shape), + value=value, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.random.rand(*shape) + input = {"data": data} + expected = {"output": data + value} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(len(shape), builder._get_rank("output")) + + def test_fill_static_gpu(self): + self.test_fill_static_cpu(cpu_only=False) + + def test_fill_dynamic_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + value = float(np.random.rand()) + + input_features = [("shape", datatypes.Array(len(input_shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_fill_dynamic( + name="fill_dynamic", + input_name="shape", + output_name="output", + value=value, + ) + + input = {"shape": np.array(input_shape, dtype="float")} + expected = {"output": np.zeros(input_shape) + value} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(builder._get_rank("output"), -1) + + def test_fill_dynamic_gpu(self): + self.test_fill_dynamic_cpu(cpu_only=False) + + def test_broadcast_to_like_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("tensor", datatypes.Array(*target_shape)), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_like( + name="broadcast_to_like", + input_names=["data", "tensor"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + tensor = np.random.rand(*target_shape) + inputs = {"data": data, "tensor": tensor} + expected = {"output": np.broadcast_to(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_broadcast_to_like_gpu(self): + self.test_broadcast_to_like_cpu(cpu_only=False) + + def test_broadcast_to_static_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_static( + name="broadcast_to_static", + input_name="data", + output_name="output", + output_shape=list(target_shape), + ) + + data = np.random.rand(*input_shape) + input = {"data": data} + expected = {"output": np.broadcast_to(data, target_shape)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(target_rank, builder._get_rank("output")) + + def test_broadcast_to_static_gpu(self): + self.test_broadcast_to_static_cpu(cpu_only=False) + + def test_broadcast_to_dynamic_cpu(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("shape", datatypes.Array(len(target_shape))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_dynamic( + name="broadcast_to_dynamic", + input_names=["data", "shape"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + inputs = {"data": data, "shape": np.array(target_shape, dtype="float")} + expected = {"output": np.broadcast_to(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(builder._get_rank("output"), -1) + + def test_broadcast_to_dynamic_gpu(self): + self.test_broadcast_to_dynamic_cpu(cpu_only=False) + + # Test Rank being set to unknown when one of the input rank is unknown + # For max rank case + def test_unknown_rank(self, cpu_only=True): + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=8, size=rank) + mask = [np.random.choice([True, False, False]) for _ in range(rank)] + input_shape = np.where(mask, 1, input_shape) + + target_rank = np.random.randint(low=rank, high=6) + target_shape = [ + np.random.randint(low=2, high=8) + if (-i > rank or input_shape[i] == 1) + else input_shape[i] + for i in range(-1, -target_rank - 1, -1) + ][::-1] + + input_features = [ + ("x", datatypes.Array(*input_shape)), + ("shape", datatypes.Array(len(target_shape))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_broadcast_to_dynamic( + name="broadcast_to_dynamic", input_names=["x", "shape"], output_name="y" + ) + + condition = np.random.randint(0, 2, input_shape).astype(np.float32) + builder.add_load_constant_nd( + name="load_constant_condition", + output_name="condition", + constant_value=condition, + shape=input_shape, + ) + + builder.add_where_broadcastable( + name="where", input_names=["condition", "x", "y"], output_name="output" + ) + + self.assertEqual(builder._get_rank("output"), -1) + + def test_trigonometry_cpu(self, cpu_only=True): + + ops = [ + "sin", + "cos", + "tan", + "asin", + "acos", + "atan", + "sinh", + "cosh", + "tanh", + "asinh", + "acosh", + "atanh", + ] + + for op in ops: + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + x = np.random.rand(*shape) + + if op == "sin": + builder.add_sin(name=op, input_name="data", output_name="output") + expected = {"output": np.sin(x)} + elif op == "cos": + builder.add_cos(name=op, input_name="data", output_name="output") + expected = {"output": np.cos(x)} + elif op == "tan": + builder.add_tan(name=op, input_name="data", output_name="output") + expected = {"output": np.tan(x)} + elif op == "asin": + builder.add_asin(name=op, input_name="data", output_name="output") + expected = {"output": np.arcsin(x)} + elif op == "acos": + builder.add_acos(name=op, input_name="data", output_name="output") + expected = {"output": np.arccos(x)} + elif op == "atan": + builder.add_atan(name=op, input_name="data", output_name="output") + expected = {"output": np.arctan(x)} + elif op == "sinh": + builder.add_sinh(name=op, input_name="data", output_name="output") + expected = {"output": np.sinh(x)} + elif op == "cosh": + builder.add_cosh(name=op, input_name="data", output_name="output") + expected = {"output": np.cosh(x)} + elif op == "tanh": + builder.add_tanh(name=op, input_name="data", output_name="output") + expected = {"output": np.tanh(x)} + elif op == "asinh": + builder.add_asinh(name=op, input_name="data", output_name="output") + expected = {"output": np.arcsinh(x)} + elif op == "acosh": + x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32) + builder.add_acosh(name=op, input_name="data", output_name="output") + expected = {"output": np.arccosh(x)} + elif op == "atanh": + builder.add_atanh(name=op, input_name="data", output_name="output") + expected = {"output": np.arctanh(x)} + + self._test_model( + builder.spec, {"data": x}, expected, useCPUOnly=cpu_only + ) + + def test_trigonometry_gpu(self): + self.test_trigonometry_cpu(cpu_only=False) + + def test_exp2_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_exp2(name="exp2", input_name="data", output_name="output") + + x = np.random.rand(*shape) + input = {"data": x} + expected = {"output": np.exp2(x)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_exp2_gpu(self): + self.test_exp2_cpu(cpu_only=False) + + def test_elementwise_binary_cpu(self, cpu_only=True): + input_names = ["A", "B"] + test_cases = [ + "greater", + "less", + "equal", + "not_equal", + "greater_equal", + "less_equal", + "logical_and", + "logical_or", + "logical_xor", + "add", + "subtract", + "multiply", + "divide", + "power", + "maximum", + "minimum", + "floor_divide", + "mod", + ] + for test_case in test_cases: + for _ in range(10): + rank_a = np.random.randint(low=1, high=6) + rank_b = np.random.randint(low=1, high=6) + + rank_out = max(rank_a, rank_b) + + shape_a = np.random.randint(low=2, high=8, size=rank_a) + shape_b = np.random.randint(low=2, high=8, size=rank_b) + + for i in range(-1, -rank_out - 1, -1): + dims = [] + if -i <= rank_a: + dims.append(shape_a[i]) + if -i <= rank_b: + dims.append(shape_b[i]) + + dim = np.random.choice(dims) + if -i <= rank_a: + shape_a[i] = np.random.choice([1, dim]) + if -i <= rank_b: + shape_b[i] = np.random.choice([1, dim]) + + input_shapes = [shape_a, shape_b] + input_features = [ + ("A", datatypes.Array(*input_shapes[0])), + ("B", datatypes.Array(*input_shapes[1])), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + func = getattr(np, test_case) + if test_case == "greater": + builder.add_greater_than( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "less": + builder.add_less_than( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "equal": + builder.add_equal( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "not_equal": + builder.add_not_equal( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "greater_equal": + builder.add_greater_than( + test_case, + input_names=input_names, + output_name="output", + use_greater_than_equal=True, + ) + elif test_case == "less_equal": + builder.add_less_than( + test_case, + input_names=input_names, + output_name="output", + use_less_than_equal=True, + ) + elif test_case == "logical_and": + builder.add_logical( + test_case, + input_names=input_names, + output_name="output", + mode="AND", + ) + elif test_case == "logical_or": + builder.add_logical( + test_case, + input_names=input_names, + output_name="output", + mode="OR", + ) + elif test_case == "logical_xor": + builder.add_logical( + test_case, + input_names=input_names, + output_name="output", + mode="XOR", + ) + elif test_case == "add": + builder.add_add_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "subtract": + builder.add_subtract_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "multiply": + builder.add_multiply_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "divide": + builder.add_divide_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "power": + builder.add_pow_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "maximum": + builder.add_max_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "minimum": + builder.add_min_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "floor_divide": + builder.add_floor_div_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + elif test_case == "mod": + builder.add_mod_broadcastable( + test_case, input_names=input_names, output_name="output" + ) + a = np.random.rand(*input_shapes[0]) + b = np.random.rand(*input_shapes[1]) + input = {"A": a, "B": b} + expected = {"output": func(a, b)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_elementwise_binary_gpu(self): + self.test_elementwise_binary_cpu(cpu_only=False) + + def test_elementwise_boolean_unary_cpu(self, cpu_only=True): + input_names = ["input"] + shapes = [ + (1, 2, 3, 1), + (3, 1, 2, 1, 2), + (1, 2, 1, 3), + (2, 3), + (2, 1, 1), + (2, 3, 4), + (2, 4), + (1,), + (1,), + ] + test_cases = [ + "greater", + "less", + "equal", + "not_equal", + "greater_equal", + "less_equal", + ] + for test_case in test_cases: + for shape in shapes: + input_features = [("input", datatypes.Array(*shape))] + b = np.random.rand() + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + func = getattr(np, test_case) + if test_case == "greater": + builder.add_greater_than( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "less": + builder.add_less_than( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "equal": + builder.add_equal( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "not_equal": + builder.add_not_equal( + test_case, + input_names=input_names, + output_name="output", + alpha=b, + ) + elif test_case == "greater_equal": + builder.add_greater_than( + test_case, + input_names=input_names, + output_name="output", + use_greater_than_equal=True, + alpha=b, + ) + elif test_case == "less_equal": + builder.add_less_than( + test_case, + input_names=input_names, + output_name="output", + use_less_than_equal=True, + alpha=b, + ) + + a = np.random.rand(*shape) + input = {"input": a} + expected = {"output": func(a, b)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_elementwise_boolean_unary_gpu(self): + self.test_elementwise_boolean_unary_cpu(cpu_only=False) + + def test_logical_not_cpu(self, cpu_only=True): + input_names = ["input"] + shapes = [ + (1, 2, 3, 1), + (3, 1, 2, 1, 2), + (1, 2, 1, 3), + (2, 3), + (2, 1, 1), + (2, 3, 4), + (2, 4), + (1,), + (1,), + ] + for shape in shapes: + input_features = [("input", datatypes.Array(*shape))] + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + builder.add_logical( + "logical_not", input_names=input_names, output_name="output", mode="NOT" + ) + + a = np.random.rand(*shape) + input = {"input": a} + expected = {"output": np.logical_not(a)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_logical_not_gpu(self): + self.test_logical_not_cpu(cpu_only=False) + + def test_stack_cpu(self, cpu_only=True): + for input_rank in range(1, 5): + for axis in range(-input_rank - 1, input_rank + 1): + n_inputs = np.random.choice(range(2, 5)) + input_shape = np.random.randint(low=2, high=5, size=input_rank) + input_features = [] + input_names = [] + for i in range(n_inputs): + input_name = "input_%s" % str(i) + input_names.append(input_name) + input_features.append((input_name, datatypes.Array(*input_shape))) + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_stack( + name="stack", + input_names=input_names, + output_name="output", + axis=axis, + ) + + input_tensors = [] + for _ in range(n_inputs): + input_tensors.append(np.random.rand(*input_shape)) + input = dict(zip(input_names, input_tensors)) + expected = {"output": np.stack(input_tensors, axis)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(input_rank + 1, builder._get_rank("output")) + + def test_stack_gpu(self): + self.test_stack_cpu(cpu_only=False) + + def test_ceil_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_ceil(name="ceil", input_name="data", output_name="output") + + x = np.random.rand(*shape) + inputs = {"data": x} + expected = {"output": np.ceil(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_ceil_gpu(self): + self.test_ceil_cpu(cpu_only=False) + + def test_floor_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_floor(name="floor", input_name="data", output_name="output") + + x = np.random.rand(*shape) + inputs = {"data": x} + expected = {"output": np.floor(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_round_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_round(name="round", input_name="data", output_name="output") + + x = np.float32( + np.random.rand(*shape) * np.random.randint(low=-100, high=101) + ) + inputs = {"data": x} + expected = {"output": np.around(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_round_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_round_cpu(cpu_only=False) + + def test_sign_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_sign(name="sign", input_name="data", output_name="output") + + x = np.random.choice( + [-np.random.rand(1)[0], 0.0, np.random.rand(1)[0]], tuple(shape) + ).astype(np.float32) + inputs = {"data": x} + expected = {"output": np.sign(x)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_sign_gpu(self): + self.test_sign_cpu(cpu_only=False) + + def test_clip_cpu(self, cpu_only=True): + for rank in range(1, 6): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", datatypes.Array(*shape))] + + x = np.random.rand(*shape) + min_value = np.percentile(x, 25) + max_value = np.percentile(x, 75) + input = {"data": x} + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_clip( + name="clip", + input_name="data", + output_name="output", + min_value=min_value, + max_value=max_value, + ) + + expected = {"output": np.clip(x, min_value, max_value)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_clip_gpu(self): + self.test_clip_cpu(cpu_only=False) + + def test_split_nd_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + n_outputs = np.random.choice(range(2, 4)) + input_shape = np.random.randint(low=2, high=5, size=rank) + input_shape[axis] = 0 + output_shapes = [] + output_features = [] + output_names = [] + almost_equal = random.choice([True, False]) + remainder = np.random.choice(range(1, n_outputs)) if almost_equal else 0 + value = np.random.choice(range(2, 5)) + for k in range(n_outputs): + output_shapes.append(np.copy(input_shape)) + output_shapes[-1][axis] = value + 1 if k < remainder else value + input_shape[axis] += output_shapes[-1][axis] + + for i in range(n_outputs): + output_name = "output_%s" % str(i) + output_names.append(output_name) + output_features.append((output_name, None)) + + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_split_nd( + name="split_nd", + input_name="data", + output_names=output_names, + axis=axis, + num_splits=n_outputs, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = dict( + zip( + output_names, + np.array_split(x, n_outputs, axis=axis) + if almost_equal + else np.split(x, n_outputs, axis=axis), + ) + ) # Explicitly trying to compare against both versions of numpy split + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + for output_ in output_names: + self.assertEqual(rank, builder._get_rank(output_)) + + def test_split_nd_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_split_nd_cpu(cpu_only=False) + + def test_split_nd_with_split_sizes_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + n_outputs = np.random.choice(range(2, 4)) + input_shape = np.random.randint(low=2, high=5, size=rank) + input_shape[axis] = 0 + output_shapes, output_features, output_names = [], [], [] + sections, split_sizes = [], [] + for _ in range(n_outputs): + output_shapes.append(np.copy(input_shape)) + output_shapes[-1][axis] = np.random.choice(range(2, 5)) + input_shape[axis] += output_shapes[-1][axis] + sections.append(input_shape[axis]) + split_sizes.append(output_shapes[-1][axis]) + + sections.pop() + for i in range(n_outputs): + output_name = "output_%s" % str(i) + output_names.append(output_name) + output_features.append((output_name, None)) + + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_split_nd( + name="split_nd", + input_name="data", + output_names=output_names, + axis=axis, + split_sizes=split_sizes, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = dict(zip(output_names, np.split(x, sections, axis=axis))) + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + for output_ in output_names: + self.assertEqual(rank, builder._get_rank(output_)) + + def test_split_nd_with_split_sizes_gpu(self): + self.test_split_nd_with_split_sizes_cpu(cpu_only=False) + + def test_slice_static_cpu(self, cpu_only=True): + for rank in range(1, 6): + for _ in range(200): + input_shape = np.array([5 for _ in range(rank)]) + objs, strides, begin_masks, end_ids, end_masks, begin_ids = ( + [], + [], + [], + [], + [], + [], + ) + for dim in range(rank): + stride = random.choice([-3, -1, 1, 2]) + begin_mask = random.choice([True, False]) + end_mask = random.choice([True, False]) + length = 0 + while length <= 0: + begin_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + end_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + obj = slice( + None if begin_mask else begin_id, + None if end_mask else end_id, + stride, + ) + length = np.arange(input_shape[dim])[(obj,)].shape[0] + + objs.append(obj), strides.append(stride), begin_masks.append( + begin_mask + ) + end_masks.append(end_mask), begin_ids.append( + begin_id + ), end_ids.append(end_id) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_slice_static( + "slice_static", + "data", + "output", + begin_ids=begin_ids, + end_ids=end_ids, + strides=strides, + begin_masks=begin_masks, + end_masks=end_masks, + ) + + x = np.random.rand(*input_shape) + inputs = {"data": x} + expected = {"output": x[tuple(objs)]} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_slice_static_gpu(self): + self.test_slice_static_cpu(cpu_only=False) + + def test_slice_dynamic_cpu(self, cpu_only=True): + for rank in range(1, 6): + input_shape = np.array([5 for _ in range(rank)]) + objs, strides, begin_masks, end_ids, end_masks, begin_ids = ( + [], + [], + [], + [], + [], + [], + ) + squeeze_masks = [] + squeeze_axes = [] + for dim in range(rank): + stride = random.choice([-3, -1, 1, 2]) + begin_mask = random.choice([True, False]) + end_mask = random.choice([True, False]) + if len(squeeze_axes) + 1 < rank: + squeeze_mask = random.choice([True, False]) + else: + squeeze_mask = False + if squeeze_mask: + squeeze_axes.append(dim) + length = 0 + while length <= 0: + begin_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + end_id = np.random.randint( + low=-input_shape[dim], high=input_shape[dim] + ) + obj = slice( + None if begin_mask else begin_id, + None if end_mask else end_id, + stride, + ) + length = np.arange(input_shape[dim])[(obj,)].shape[0] + + objs.append(obj), strides.append(stride), begin_masks.append(begin_mask) + end_masks.append(end_mask), begin_ids.append(begin_id), end_ids.append( + end_id + ) + squeeze_masks.append(squeeze_mask) + + # test different number of inputs, from 2 inputs up to 7 inputs + # when num_inputs == 2, begin_ids are inputs, rest are read from parameters + # when num_inputs == 7, all read from inputs, none are read from parameters + for num_inputs in [2, 3, 4, 5, 6]: + x = np.random.rand(*input_shape) + + input_features = [("data", datatypes.Array(*input_shape))] + input_names = ["data"] + inputs = dict() + inputs["data"] = x + + if num_inputs == 2: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ] + input_names = ["data", "begin_ids"] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + elif num_inputs == 3: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ] + input_names = ["data", "begin_ids", "end_ids"] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + elif num_inputs == 4: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ] + input_names = ["data", "begin_ids", "end_ids", "strides"] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + elif num_inputs == 5: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ("begin_masks", datatypes.Array(len(begin_masks))), + ] + input_names = [ + "data", + "begin_ids", + "end_ids", + "strides", + "begin_masks", + ] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + inputs["begin_masks"] = np.array(begin_masks, dtype=np.int32) + elif num_inputs == 6: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ("begin_masks", datatypes.Array(len(begin_masks))), + ("end_masks", datatypes.Array(len(end_masks))), + ] + input_names = [ + "data", + "begin_ids", + "end_ids", + "strides", + "begin_masks", + "end_masks", + ] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + inputs["begin_masks"] = np.array(begin_masks, dtype=np.int32) + inputs["end_masks"] = np.array(end_masks, dtype=np.int32) + elif num_inputs == 7: + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("begin_ids", datatypes.Array(len(begin_ids))), + ("end_ids", datatypes.Array(len(end_ids))), + ("strides", datatypes.Array(len(strides))), + ("begin_masks", datatypes.Array(len(begin_masks))), + ("end_masks", datatypes.Array(len(end_masks))), + ("squeeze_masks", datatypes.Array(len(squeeze_masks))), + ] + input_names = [ + "data", + "begin_ids", + "end_ids", + "strides", + "begin_masks", + "end_masks", + "squeeze_masks", + ] + inputs["begin_ids"] = np.array(begin_ids, dtype=np.int32) + inputs["end_ids"] = np.array(end_ids, dtype=np.int32) + inputs["strides"] = np.array(strides, dtype=np.int32) + inputs["begin_masks"] = np.array(begin_masks, dtype=np.int32) + inputs["end_masks"] = np.array(end_masks, dtype=np.int32) + inputs["squeeze_masks"] = np.array(squeeze_masks, dtype=np.int32) + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + if num_inputs == 2: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + end_ids=end_ids, + strides=strides, + begin_masks=begin_masks, + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 3: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + strides=strides, + begin_masks=begin_masks, + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 4: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + begin_masks=begin_masks, + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 5: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + end_masks=end_masks, + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 6: + builder.add_slice_dynamic( + "slice_dynamic", + input_names, + "output", + squeeze_masks=squeeze_masks, + ) + elif num_inputs == 7: + builder.add_slice_dynamic("slice_dynamic", input_names, "output") + + expected_x = x[tuple(objs)] + squeeze_slices = [] + for squeeze in squeeze_masks: + if squeeze: + squeeze_slices.append(slice(None, 1, None)) + else: + squeeze_slices.append(slice(None, None, None)) + expected_x = np.squeeze( + expected_x[tuple(squeeze_slices)], axis=tuple(squeeze_axes) + ) + expected = {"output": expected_x} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_slice_dynamic_gpu(self): + self.test_slice_dynamic_cpu(cpu_only=False) + + def test_tile_cpu(self, cpu_only=True): + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=5, size=rank) + for rep_rank in range(1, rank + 1): + reps = list(np.random.randint(low=1, high=9, size=rep_rank)) + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_tile("Tile", "data", "output", reps) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.tile(x, reps)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_tile_gpu(self): + self.test_tile_cpu(cpu_only=False) + + def test_dynamic_tile_cpu(self, cpu_only=True): + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=5, size=rank) + for rep_rank in range(1, rank + 1): + reps = np.random.randint(low=1, high=9, size=rep_rank) + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("reps", datatypes.Array(*reps.shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_tile("Tile", ["data", "reps"], "output") + + x = np.random.rand(*input_shape) + input = {"data": x, "reps": reps.astype(np.float32)} + expected = {"output": np.tile(x, list(reps))} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_sliding_windows_cpu(self, cpu_only=True): + def numpy_sliding_windows(a, np_axis, np_size, np_step): + n = (a.shape[np_axis] - np_size) // np_step + 1 + shape = list(a.shape) + shape[np_axis] = n + if np_axis < 0: + np_axis += len(shape) + shape.insert(np_axis + 1, np_size) + strides = list(a.strides) + effstride = strides[np_axis] * np_step + strides.insert(np_axis, effstride) + return np.lib.stride_tricks.as_strided(a, shape, strides) + + for rank in range(1, 5): + for axis in range(-rank, rank): + input_shape = np.random.randint(low=2, high=5, size=rank) + output_shape = list(input_shape) + window_size = np.random.randint(low=1, high=input_shape[axis]) + + length = 0 + while length <= 0: + step = np.random.randint(low=1, high=input_shape[axis]) + length = (input_shape[axis] - window_size) // step + 1 + + output_shape[axis] = length + + pos_axis = axis if axis >= 0 else axis + rank + output_shape.insert(pos_axis + 1, window_size) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_sliding_windows( + "sliding_windows", + input_name="data", + output_name="output", + axis=axis, + window_size=window_size, + step=step, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": numpy_sliding_windows(x, axis, window_size, step)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(rank + 1, builder._get_rank("output")) + + def test_sliding_windows_gpu(self): + self.test_sliding_windows_cpu(cpu_only=False) + + def test_range_static_cpu(self, cpu_only=True): + + params = [ + (-10.4, 23, 12.2), + (0, 1000, 1), + (50.5, 90.5, 1.5), + (5, 8, 2), + (5, 8, 98), + (5, 8, 1.5), + (10, 5, -0.6), + (24, -65, -2), + ] + + for param in params: + start, end, step = param + input_features = [("multiplicative_input", datatypes.Array(1))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_range_static( + "range_static", "output_range", end=end, start=start, step=step + ) + builder.add_multiply_broadcastable( + name="multiply_broadcastable", + input_names=["multiplicative_input", "output_range"], + output_name="output", + ) + + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + + inputs = dict() + inputs["multiplicative_input"] = np.ones((1,), dtype=np.float64) + expected = {"output": np.arange(start, end, step)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(1, builder._get_rank("output")) + + def test_range_static_gpu(self): + self.test_range_static_cpu(cpu_only=False) + + def test_range_dynamic_cpu(self, cpu_only=True): + params = [ + (-10.4, 23, 12.2), + (0, 1000, 1), + (50.5, 90.5, 1.5), + (5, 8, 2), + (5, 8, 98), + (5, 8, 1.5), + (10, 5, -0.6), + (24, -65, -2), + ] + + # input size == 1: end is input, start and step are read from parameters + # input size == 2: end, start are inputs, step is read from parameters + # input size == 3: start, end, step are all inputs, none of the parameters are used. + for num_inputs in [1, 2, 3]: + for param in params: + inputs = dict() + start, end, step = param + + if num_inputs == 1: + input_features = [("end", datatypes.Array(1))] + elif num_inputs == 2: + input_features = [ + ("end", datatypes.Array(1)), + ("start", datatypes.Array(1)), + ] + elif num_inputs == 3: + input_features = [ + ("end", datatypes.Array(1)), + ("start", datatypes.Array(1)), + ("step", datatypes.Array(1)), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + if num_inputs == 1: + inputs["end"] = end * np.ones((1,), dtype=np.float64) + builder.add_range_dynamic( + "range_dynamic", + output_name="output", + input_names=["end"], + start=start, + step=step, + ) + elif num_inputs == 2: + inputs["end"] = end * np.ones((1,), dtype=np.float64) + inputs["start"] = start * np.ones((1,), dtype=np.float64) + builder.add_range_dynamic( + "range_dynamic", + output_name="output", + input_names=["end", "start"], + step=step, + ) + elif num_inputs == 3: + inputs["end"] = end * np.ones((1,), dtype=np.float64) + inputs["start"] = start * np.ones((1,), dtype=np.float64) + inputs["step"] = step * np.ones((1,), dtype=np.float64) + builder.add_range_dynamic( + "range_dynamic", + output_name="output", + input_names=["end", "start", "step"], + ) + + expected = {"output": np.arange(start, end, step)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(1, builder._get_rank("output")) + + def test_range_dynamic_gpu(self): + self.test_range_dynamic_cpu(cpu_only=False) + + def test_linear_activation_different_ranks_cpu(self, cpu_only=True): + for input_dim in [(10, 15), (10, 15, 2, 3), (10, 2, 4, 15, 1), (6,)]: + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_activation( + name="activation", + non_linearity="LINEAR", + input_name="data", + output_name="output", + params=[34.0, 67.0], + ) + + x = np.random.rand(*input_dim) + input = {"data": x} + expected = {"output": 34.0 * x + 67.0} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_linear_activation_different_ranks_gpu(self): + self.test_linear_activation_different_ranks_cpu(cpu_only=False) + + def test_topk_cpu(self, cpu_only=True): + test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)] + K = [3, 5] + axes = [[0], [0, 1], [1, 2], [0, 3, 1], [1, 3, 4]] + + for ii, input_shape in enumerate(test_input_shapes): + for k in K: + for n_inputs in [1, 2]: + for bottom_k_flag in [False, True]: + for axis in axes[ii]: + for negative_axis in [False, True]: + + if negative_axis: + axis = axis - len(input_shape) + + input_features = [ + ("data", datatypes.Array(*input_shape)) + ] + output_features = [("values", None), ("indices", None)] + + input_names = ["data"] + output_names = ["values", "indices"] + + if n_inputs == 2: + input_names.append("k_in") + input_features.append(("k_in", datatypes.Array(1))) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + if n_inputs == 2: + builder.add_topk( + "topk", + input_names, + output_names, + axis=axis, + use_bottom_k=bottom_k_flag, + ) + else: + builder.add_topk( + "topk", + input_names, + output_names, + k=k, + axis=axis, + use_bottom_k=bottom_k_flag, + ) + + data = np.random.randint( + low=0, + high=int(np.prod(input_shape)), + size=input_shape, + ) + data = data.astype(np.float32) + + input = {"data": data} + if n_inputs == 2: + input["k_in"] = k * np.ones([1], dtype=np.float32) + + # numpy reference values + if bottom_k_flag: + ref_indices = np.argsort(data, axis=axis) + else: + ref_indices = np.argsort(-data, axis=axis) + + slc = [slice(None)] * len(input_shape) + slc[axis] = slice(0, k) + ref_indices = ref_indices[tuple(slc)] + ref_values = np.take_along_axis( + data, ref_indices, axis=axis + ) + expected = { + "values": ref_values, + "indices": ref_indices, + } + + self._test_model( + builder.spec, input, expected, useCPUOnly=cpu_only + ) + + def test_topk_gpu(self): + self.test_topk_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_const_pad_cpu(self, cpu_only=True): + def get_reference(data, pads, value): + res = tf.pad(data, pads, mode='CONSTANT', constant_values=value) + return res.numpy() + + value = 34.0 + shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)] + + ctr = 0 + for shape in shapes: + rank = len(shape) + for force_zeros_in_end in [0, 2, 6]: + for max_pad_value in range(1, 6): + for n_inputs in [1, 2]: + pads = np.random.randint( + low=0, high=max_pad_value, size=(rank, 2) + ) + + if force_zeros_in_end > 2 * rank: + continue + + # pads = np.reshape(np.array([1,1,1,0,0,1]), (rank, 2)) + if force_zeros_in_end != 0: + pads[-force_zeros_in_end:] = 0 + + data = np.random.rand(*shape) + reference = get_reference(data, pads, value) + + ctr += 1 + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + input_names = ["data"] + if n_inputs == 2: + input_names.append("pads") + input_features.append(("pads", datatypes.Array(2 * rank,))) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + if n_inputs == 2: + builder.add_constant_pad( + "pad", input_names, "output", value=value + ) + else: + builder.add_constant_pad( + "pad", + input_names, + "output", + value=value, + pad_amounts=pads.flatten(), + ) + + input = {"data": data} + if n_inputs == 2: + input["pads"] = pads.flatten().astype(np.float32) + + expected = {"output": reference} + self._test_model( + builder.spec, input, expected, useCPUOnly=cpu_only + ) + + def test_const_pad_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_const_pad_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_const_pad_mode2_cpu(self, cpu_only=True): + def get_reference(data, output_shape, value, left_pad=False): + pads = np.zeros((len(output_shape), 2)) + if left_pad: + pads[:, 0] = np.array(output_shape) - np.array(data.shape) + else: + pads[:, 1] = np.array(output_shape) - np.array(data.shape) + res = tf.pad(data, pads, mode="CONSTANT", constant_values=value) + return res.numpy() + + + value = 34.0 + shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)] + out_shapes = [(5,), (4, 8), (2, 4, 10), (20, 6, 7, 10, 7), (5, 24, 10, 4, 10)] + + ctr = 0 + for ii, shape in enumerate(shapes): + rank = len(shape) + for left_pad in [True, False]: + for n_inputs in [1, 2]: + + data = np.random.rand(*shape) + reference = get_reference(data, out_shapes[ii], value, left_pad) + + pads = np.zeros((rank, 2)) + tmp = np.zeros((rank)) + + for i in range(rank): + if out_shapes[ii][i] == shape[i]: + tmp[i] = 0 + else: + tmp[i] = out_shapes[ii][i] + + if left_pad: + pads[:, 0] = tmp + else: + pads[:, 1] = tmp + + ctr += 1 + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + input_names = ["data"] + if n_inputs == 2: + input_names.append("pads") + input_features.append(("pads", datatypes.Array(2 * rank,))) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + if n_inputs == 2: + builder.add_constant_pad( + "pad", + input_names, + "output", + value=value, + pad_to_given_output_size_mode=True, + ) + else: + builder.add_constant_pad( + "pad", + input_names, + "output", + value=value, + pad_amounts=pads.flatten(), + pad_to_given_output_size_mode=True, + ) + + input = {"data": data} + if n_inputs == 2: + input["pads"] = pads.flatten().astype(np.float32) + + expected = {"output": reference} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_const_pad_mode2_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.test_const_pad_mode2_cpu(cpu_only=False) + + def test_nms_cpu(self, cpu_only=True): + def _compute_iou_matrix(boxes): + # input is (N,4), in order [center_w, center_h, width, height] + self.assertEqual(len(boxes.shape), 2) + self.assertEqual(boxes.shape[1], 4) + boxes = boxes.astype(np.float32) + center_w, center_h, width, height = np.split( + boxes, 4, axis=1 + ) # outs are all (N,1) + top = center_h + 0.5 * height + bottom = center_h - 0.5 * height + left = center_w - 0.5 * width + right = center_w + 0.5 * width + area = width * height + + hB = np.minimum(top, np.transpose(top)) + wB = np.minimum(right, np.transpose(right)) + hA = np.maximum(bottom, np.transpose(bottom)) + wA = np.maximum(left, np.transpose(left)) + + intersection_area = np.maximum(0, hB - hA) * np.maximum(0, wB - wA) + union_area = area + np.transpose(area) - intersection_area + iou = intersection_area / union_area + return iou + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def _nms_TF( + boxes, scores, iou_threshold, score_threshold, per_class_suppression, M + ): + # boxes is (B,N,4), in order [center_w, center_h, width, height] + # scores is (B,N,C) + # output shapes: (B,M,4), (B,M,C), (B,M), (B,) + """ + this is implementation of CoreML's NMS layer + """ + B, N, C = scores.shape + + iou_threshold = iou_threshold.astype(np.float32) + score_threshold = score_threshold.astype(np.float32) + + # convert box ids to TF style + center_w, center_h, width, height = np.split( + boxes, 4, axis=-1 + ) # outs are all (B,N,1) + y1 = center_h - 0.5 * height + y2 = center_h + 0.5 * height + x1 = center_w - 0.5 * width + x2 = center_w + 0.5 * width + boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (B,N,4) + + out1 = np.zeros((B, M, 4)) + out2 = np.zeros((B, M, C)) + out3 = -1 * np.ones((B, M)) + out4 = np.zeros((B,)) + + for b in range(B): + box_coord_matrix = boxes_tf[b, :, :] # (N,4) + score_vector = np.max(scores[b, :, :], axis=-1) # (N,) + if not per_class_suppression: + # this is the simple case as TF directly supports it + ids_g = tf.image.non_max_suppression( + box_coord_matrix, + score_vector, + max_output_size=M, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + ids = ids_g.numpy() + else: + # this is slightly complicated as TF does not directly support it + class_ids = np.argmax(scores[b, :, :], axis=-1) # (N,) + sorted_score_ids = np.argsort(-score_vector) + box_coord_matrix2 = np.take( + box_coord_matrix, sorted_score_ids, axis=0 + ) + score_vector2 = np.take(score_vector, sorted_score_ids) + class_ids = np.take(class_ids, sorted_score_ids) + classes_seen = dict() + ids_intermediate = np.array([], dtype=np.int32) + for n in range(N): + if class_ids[n] in classes_seen: + continue + c = class_ids[n] + classes_seen[c] = True + current_class_ids = np.where(class_ids == c)[0] + if len(current_class_ids) > 0: + feed_in1 = np.take( + box_coord_matrix2, current_class_ids, axis=0 + ) + feed_in2 = np.take(score_vector2, current_class_ids) + cur_ids_g = tf.image.non_max_suppression( + feed_in1, + feed_in2, + max_output_size=M, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + cur_ids = cur_ids_g.numpy() + + from_sort_ids = np.take(current_class_ids, cur_ids) + ids_intermediate = np.append( + ids_intermediate, from_sort_ids + ) + ids_intermediate.sort() + ids = np.take(sorted_score_ids, ids_intermediate) + + xx = len(ids) + if xx == 0: + ids = np.array([np.argmax(score_vector)]) + xx = 1 + if xx > M: + ids = ids[:M] + xx = len(ids) + out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0) + out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0) + out3[b, :xx] = ids + out4[b] = xx + + return out1, out2, out3, out4 + + iou_threshold_percentile = [0, 30, 80, 100] + score_threshold_percentile_arr = [0, 40, 100] + N_M_pairs_to_test = [[100, 48], [100, 112]] # N : boxes in, M: max boxes out + + number_of_test = 0 + for N_M in N_M_pairs_to_test: + for B in [1]: # [1, 5] TODO Re-enable when rdar://60280745 is fixed + for C in [1, 7]: + N, M = N_M + + boxes = np.random.rand(B, N, 4) + scores = np.random.rand(B, N, C) + + iou_matrix = _compute_iou_matrix(boxes[0, :, :]) # (N,N) + iou_matrix = iou_matrix[ + ~np.eye(iou_matrix.shape[0], dtype=bool) + ].reshape(iou_matrix.shape[0], -1) + + for per_class_suppression in [False, True]: + for iou_thresh in iou_threshold_percentile: + for score_thresh in score_threshold_percentile_arr: + for is_dynamic in [False, True]: + + if score_thresh == 0: + score_threshold = np.min(scores) - 1 + elif score_thresh == 100: + score_threshold = np.max(scores) + 1 + else: + score_threshold = ( + np.percentile(scores, score_thresh) + 0.01 + ) + + if iou_thresh == 0: + iou_threshold = np.maximum( + np.min(iou_matrix) - 0.01, 0.0 + ) + else: + iou_threshold = ( + np.percentile(iou_matrix, iou_thresh) + 0.01 + ) + iou_threshold = np.maximum(iou_threshold, 1e-8) + + number_of_test += 1 + + tf_boxes, tf_scores, tf_ids, tf_num_boxes = _nms_TF( + boxes, + scores, + iou_threshold, + score_threshold, + per_class_suppression, + M, + ) + expected = dict() + expected["selected_boxes"] = tf_boxes + expected["selected_scores"] = tf_scores + expected["selected_box_ids"] = tf_ids + expected["number_of_boxes"] = tf_num_boxes + + # define CoreML model + + input_features = [ + ("boxes", datatypes.Array(B, N, 4)), + ("scores", datatypes.Array(B, N, C)), + ] + output_features = [ + ("selected_boxes", None), + ("selected_scores", None), + ("selected_box_ids", None), + ("number_of_boxes", None), + ] + + input_names = ["boxes", "scores"] + if is_dynamic: + input_names.extend( + [ + "iou_threshold", + "score_threshold", + "max_boxes", + ] + ) + input_features.append( + ("iou_threshold", datatypes.Array(1,)) + ) + input_features.append( + ("score_threshold", datatypes.Array(1,)) + ) + input_features.append( + ("max_boxes", datatypes.Array(1,)) + ) + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + input_dict = dict() + input_dict["boxes"] = boxes + input_dict["scores"] = scores + + if is_dynamic: + builder.add_nms( + "nms", + input_names, + [ + "selected_boxes", + "selected_scores", + "selected_box_ids", + "number_of_boxes", + ], + per_class_suppression=per_class_suppression, + ) + + input_dict[ + "iou_threshold" + ] = iou_threshold * np.ones([1], dtype=np.float32) + input_dict["score_threshold"] = ( + score_threshold + * np.ones([1], dtype=np.float32) + ) + input_dict["max_boxes"] = M * np.ones( + [1], dtype=np.float32 + ) + else: + builder.add_nms( + "nms", + input_names, + [ + "selected_boxes", + "selected_scores", + "selected_box_ids", + "number_of_boxes", + ], + iou_threshold=iou_threshold, + score_threshold=score_threshold, + max_boxes=M, + per_class_suppression=per_class_suppression, + ) + + self._test_model( + builder.spec, + input_dict, + expected, + useCPUOnly=cpu_only, + ) + + def test_nms_gpu(self): + self.test_nms_cpu(cpu_only=False) + + def test_rank_preserving_reshape(self): + input_shapes = [(20, 10), (20, 10, 5), (10, 3, 5)] + target_shapes = [(5, -1), (0, 2, 25), (25, 0, -1)] + output_shapes = [(5, 40), (20, 2, 25), (25, 3, 2)] + + for i in range(len(input_shapes)): + input_features = [("data", datatypes.Array(*input_shapes[i]))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_rank_preserving_reshape( + name="rank_preserving_reshape", + input_name="data", + output_name="output", + output_shape=target_shapes[i], + ) + + x = np.random.rand(*input_shapes[i]) + input = {"data": x} + expected = {"output": np.reshape(x, output_shapes[i])} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(len(output_shapes[i]), builder._get_rank("output")) + + def test_expand_dims(self): + input_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (10,)] + axes = [(0, 1), (0, 2), (2, 0), (-2, -1), (1, 0, -2)] + output_shapes = [ + (1, 1, 10, 5), + (1, 10, 1, 5), + (1, 10, 1, 5), + (10, 5, 1, 1), + (1, 1, 1, 10), + ] + + for i in range(len(input_shapes)): + input_features = [("data", datatypes.Array(*input_shapes[i]))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_expand_dims( + name="expand_dims", + input_name="data", + output_name="output", + axes=axes[i], + ) + + x = np.random.rand(*input_shapes[i]) + input = {"data": x} + expected = {"output": np.reshape(x, output_shapes[i])} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(len(output_shapes[i]), builder._get_rank("output")) + + def test_squeeze(self): + input_shapes = [ + (1, 1, 10, 5), + (1, 10, 1, 5), + (10, 5, 1, 1), + (10, 5, 1, 1), + (1,), + (10, 5, 1, 1), + (3, 1, 7), + ] + axes = [(0, 1), (0, 2), (-2, -1), (-1, -2), (0,), (3, -2), (1,)] + output_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (1,), (10, 5), (3, 7)] + + for i in range(len(input_shapes)): + input_features = [("data", datatypes.Array(*input_shapes[i]))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_squeeze( + name="squeeze_layer", + input_name="data", + output_name="output", + axes=list(axes[i]), + ) + + x = np.random.rand(*input_shapes[i]) + input = {"data": x} + expected = {"output": np.reshape(x, output_shapes[i])} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(len(output_shapes[i]), builder._get_rank("output")) + + def test_squeeze_all(self): + input_shapes = [ + (1, 1, 10, 5), + (1, 10, 1, 5), + (10, 5, 1, 1), + (10, 5, 1, 1), + (1,), + (10, 5, 1, 1), + (3, 1, 7), + (3,), + (5, 6), + ] + for input_shape in input_shapes: + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_squeeze( + name="squeeze_layer", + input_name="data", + output_name="output", + squeeze_all=True, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + reference = np.squeeze(x) + if not reference.shape: + reference = np.reshape(reference, (1,)) + expected = {"output": reference} + + self._test_model(builder.spec, input, expected, useCPUOnly=True) + self.assertEqual(-1, builder._get_rank("output")) + + def test_argmax_argmin(self): + test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)] + + # (1+2+3+4+5) * 2^3 = 120 test cases + for input_shape in test_input_shapes: + for negative_axis in [False, True]: + for mode in ["argmax", "argmin"]: + for keep_dims in [True, False]: + for axis in np.arange(len(input_shape)): + + if negative_axis: + axis_val = axis - len(input_shape) + else: + axis_val = axis + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + x = np.random.rand(*input_shape) + + if mode == "argmax": + builder.add_argmax( + "argmax", + "data", + "output", + axis=axis_val, + keepdims=keep_dims, + ) + np_out = np.argmax(x, axis=axis_val) + else: + builder.add_argmin( + "argmin", + "data", + "output", + axis=axis_val, + keepdims=keep_dims, + ) + np_out = np.argmin(x, axis=axis_val) + + if keep_dims: + np_out = np.expand_dims(np_out, axis=axis_val) + elif len(input_shape) == 1: + np_out = np.expand_dims(np_out, axis=axis_val) + + input = {"data": x} + expected = {"output": np_out} + + test_case = "test_argmax_argmin_input_shape_{}_axis_{}_keep_dims_{}_numpy_out_shape_{}".format( + x.shape, axis_val, keep_dims, np_out.shape + ) + + self._test_model( + builder.spec, input, expected, useCPUOnly=True + ) + if len(np_out.shape) != 0: + self.assertEqual( + len(np_out.shape), builder._get_rank("output") + ) + + def test_get_shape(self): + dims = [1, 2, 3, 4, 5] + for rank in range(1, len(dims) + 1): + input_shape = dims[:rank] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_get_shape( + name="get_shape_layer", input_name="data", output_name="output" + ) + + feed = {"data": np.random.rand(*input_shape)} + expected = {"output": np.array(input_shape)} + + self._test_model(builder.spec, feed, expected, useCPUOnly=True) + self.assertEqual(1, builder._get_rank("output")) + + def test_load_constant_nd(self): + dims = [2, 3, 4, 5, 6] + for rank in range(1, len(dims) + 1): + input_shape = dims[:rank] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_load_constant_nd( + "load_const_nd_layer", + "tmp", + constant_value=np.ones(input_shape), + shape=input_shape, + ) + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + feed = {"data": np.random.rand(*input_shape)} + expected = {"output": feed["data"] + 1} + + self._test_model(builder.spec, feed, expected, useCPUOnly=True) + self.assertEqual(rank, builder._get_rank("output")) + + def test_simple_array_alloc_scatter(self): + alloc_shape = [2, 3, 4] + value_shape = [1, 3, 4] + input_features = [ + ("alloc_shape", datatypes.Array(len(alloc_shape))), + ("value", datatypes.Array(*value_shape)), + ("index", datatypes.Array(1)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_fill_dynamic( + name="fill_dynamic_layer", + input_name="alloc_shape", + output_name="array", + value=np.float32(0.0), + ) + # CoreML input order: container (array), indices, slices (value) + builder.add_scatter( + name="scatter_layer", + input_names=["array", "index", "value"], + output_name="output", + ) + + value = np.random.rand(*value_shape).astype("float") + feed = { + "alloc_shape": np.array(alloc_shape, dtype="float"), + "value": value, + "index": np.array([1], dtype="float"), + } + + ref = np.zeros(alloc_shape) + ref[1, :, :] = value + expected = {"output": ref} + + self._test_model(builder.spec, feed, expected, useCPUOnly=True) + + def test_erf_activation_cpu(self, cpu_only=True): + input_features = [("data", datatypes.Array(10, 45))] + output_features = [("output", datatypes.Array(10, 45))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_erf(name="erf", input_name="data", output_name="output") + x = np.random.rand(10, 45) + input = {"data": x} + expected = { + "output": np.asarray([math.erf(i) for i in x.flatten().tolist()]).reshape( + 10, 45 + ) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_erf_activation_gpu(self): + self.test_erf_activation_cpu(cpu_only=False) + + def test_gelu_activation(self): + + for mode in ["EXACT", "TANH_APPROXIMATION", "SIGMOID_APPROXIMATION"]: + for rank in range(1, 6): + shape = np.random.randint(low=2, high=5, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_gelu( + name="gelu", input_name="data", output_name="output", mode=mode + ) + + x = np.random.rand(*shape) + input = {"data": x} + exact = np.asarray( + [ + 0.5 * i * (1.0 + math.erf(i / math.sqrt(2))) + for i in x.flatten().tolist() + ] + ).reshape(*shape) + + expected = {"output": exact} + self._test_model(builder.spec, input, expected, useCPUOnly=True) + + def test_lower_triangular_cpu(self, cpu_only=True): + for rank in range(2, 6): + for k in range(-3, 4): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_lower_triangular("tril", "data", "output", k=k) + + x = np.random.rand(*shape) + input = {"data": x} + expected = {"output": np.tril(x, k=k)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_lower_triangular_gpu(self): + self.test_lower_triangular_cpu(cpu_only=False) + + def test_upper_triangular_cpu(self, cpu_only=True): + for rank in range(2, 6): + for k in range(-3, 4): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_upper_triangular("triu", "data", "output", k=k) + + x = np.random.rand(*shape) + input = {"data": x} + expected = {"output": np.triu(x, k=k)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_upper_triangular_gpu(self): + self.test_upper_triangular_cpu(cpu_only=False) + + def test_where_broadcastable_cpu(self, cpu_only=True): + for _ in range(150): + rank_cond = np.random.randint(low=1, high=6) + rank_true = np.random.randint(low=1, high=6) + rank_false = np.random.randint(low=1, high=6) + + rank_out = max(rank_cond, rank_true, rank_false) + + shape_cond = np.random.randint(low=2, high=8, size=rank_cond) + shape_true = np.random.randint(low=2, high=8, size=rank_true) + shape_false = np.random.randint(low=2, high=8, size=rank_false) + + for i in range(-1, -rank_out - 1, -1): + dims = [] + if -i <= rank_cond: + dims.append(shape_cond[i]) + if -i <= rank_true: + dims.append(shape_true[i]) + if -i <= rank_false: + dims.append(shape_false[i]) + + dim = np.random.choice(dims) + if -i <= rank_cond: + shape_cond[i] = np.random.choice([1, dim]) + if -i <= rank_true: + shape_true[i] = np.random.choice([1, dim]) + if -i <= rank_false: + shape_false[i] = np.random.choice([1, dim]) + + input_features = [ + ("cond", datatypes.Array(*shape_cond)), + ("true", datatypes.Array(*shape_true)), + ("false", datatypes.Array(*shape_false)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_where_broadcastable( + "if_broadcastable", + input_names=["cond", "true", "false"], + output_name="output", + ) + + cond = np.random.choice([1.0, 0.0], size=shape_cond) + true = np.random.rand(*shape_true) + false = np.random.rand(*shape_false) + + input = {"cond": cond, "true": true, "false": false} + expected = {"output": np.where(cond, true, false)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(len(expected["output"].shape), builder._get_rank("output")) + + def test_where_broadcastable_gpu(self): + self.test_where_broadcastable_cpu(cpu_only=False) + + @pytest.mark.slow + def test_random_normal_like_cpu(self, cpu_only=True): + mean, stddev, seed = 0.0, 1.0, 42 + + for rank in range(5, -1, -1): + if rank > 0: + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + shape = np.random.randint(low=low, high=high, size=rank) + else: # one extra test to test more moments + shape = np.array([10, 10, 10, 10, 10000]) + + input_features = [("tensor", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_normal_like( + name="random_normal_like", + input_name="tensor", + output_name="output", + mean=mean, + stddev=stddev, + seed=seed, + ) + + inputs = {"tensor": np.random.rand(*shape)} + expected = {"output": np.random.normal(mean, stddev, shape)} + + if rank > 0: + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=2 + ) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + else: # one extra test to test more moments + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=6 + ) + + @pytest.mark.slow + def test_random_normal_like_gpu(self): + self.test_random_normal_like_cpu(cpu_only=False) + + def test_random_normal_static_cpu(self, cpu_only=True): + + mean, stddev, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_normal_static( + name="random_normal_static", + output_name="tmp", + output_shape=list(shape), + mean=mean, + stddev=stddev, + seed=seed, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.zeros(shape) + inputs = {"data": data} + expected = {"output": data + np.random.normal(mean, stddev, shape)} + + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=2 + ) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_random_normal_static_gpu(self): + self.test_random_normal_static_cpu(cpu_only=False) + + def test_random_normal_dynamic_cpu(self, cpu_only=True): + mean, stddev, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("shape", datatypes.Array(len(shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_normal_dynamic( + name="random_normal_dynamic", + input_names=["shape"], + output_name="output", + mean=mean, + stddev=stddev, + seed=seed, + ) + + inputs = {"shape": np.array(shape, np.float32)} + expected = {"output": np.random.normal(mean, stddev, shape)} + + CorrectnessTest._compare_moments( + builder.spec, inputs, expected, num_moments=2 + ) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_random_normal_dynamic_gpu(self): + self.test_random_normal_dynamic_cpu(cpu_only=False) + + def test_random_uniform_like_cpu(self, cpu_only=True): + minval, maxval, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("tensor", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_uniform_like( + name="random_uniform_like", + input_name="tensor", + output_name="output", + minval=minval, + maxval=maxval, + seed=seed, + ) + + tensor = np.random.rand(*shape) + inputs = {"tensor": tensor} + expected = {"output": np.random.uniform(minval, maxval, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_random_uniform_like_gpu(self): + self.test_random_uniform_like_cpu(cpu_only=False) + + def test_random_uniform_static_cpu(self, cpu_only=True): + minval, maxval, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_uniform_static( + name="random_uniform_static", + output_name="tmp", + output_shape=list(shape), + minval=minval, + maxval=maxval, + seed=seed, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.zeros(shape) + inputs = {"data": data} + expected = {"output": data + np.random.uniform(minval, maxval, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(rank, builder._get_rank("output")) + + def test_random_uniform_static_gpu(self): + self.test_random_uniform_static_cpu(cpu_only=False) + + def test_random_uniform_dynamic_cpu(self, cpu_only=True): + minval, maxval, seed = 0.0, 1.0, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("shape", datatypes.Array(len(shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_uniform_dynamic( + name="random_uniform_dynamic", + input_names=["shape"], + output_name="output", + minval=minval, + maxval=maxval, + seed=seed, + ) + + inputs = {"shape": np.array(shape, np.float32)} + expected = {"output": np.random.uniform(minval, maxval, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_random_uniform_dynamic_gpu(self): + self.test_random_uniform_dynamic_cpu(cpu_only=False) + + def test_random_bernoulli_like_cpu(self, cpu_only=True): + + prob, seed = 0.5, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("tensor", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_bernoulli_like( + name="random_bernoulli_like", + input_name="tensor", + output_name="output", + prob=prob, + seed=seed, + ) + + tensor = np.random.rand(*shape) + inputs = {"tensor": tensor} + expected = {"output": np.random.binomial(1, prob, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_random_bernoulli_like_gpu(self): + self.test_random_bernoulli_like_cpu(cpu_only=False) + + def test_random_bernoulli_static_cpu(self, cpu_only=True): + prob, seed = 0.5, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_bernoulli_static( + name="random_bernoulli_static", + output_name="tmp", + output_shape=list(shape), + prob=prob, + seed=seed, + ) + + builder.add_elementwise("add_layer", ["data", "tmp"], "output", mode="ADD") + + data = np.zeros(shape) + inputs = {"data": data} + expected = {"output": data + np.random.binomial(1, prob, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_random_bernoulli_static_gpu(self): + self.test_random_bernoulli_static_cpu(cpu_only=False) + + def test_random_bernoulli_dynamic_cpu(self, cpu_only=True): + prob, seed = 0.5, 42 + + for rank in range(1, 6): + low_factor = np.random.randint(low=2, high=4) + low = int(np.power(1000, 1.0 / rank)) * low_factor + high = int(np.power(2000, 1.0 / rank)) * np.random.randint( + low=low_factor, high=4 + ) + + shape = np.random.randint(low=low, high=high, size=rank) + + input_features = [("shape", datatypes.Array(len(shape)))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_random_bernoulli_dynamic( + name="random_bernoulli_dynamic", + input_names=["shape"], + output_name="output", + prob=prob, + seed=seed, + ) + + inputs = {"shape": np.array(shape, np.float32)} + expected = {"output": np.random.binomial(1, prob, shape)} + + CorrectnessTest._compare_moments(builder.spec, inputs, expected) + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_random_bernoulli_dynamic_gpu(self): + self.test_random_bernoulli_dynamic_cpu(cpu_only=False) + + def test_categorical_distribution_cpu_shapes(self): + + for rank in range(1, 6): + shape = np.random.randint(low=2, high=8, size=rank) + num_samples = np.random.randint(low=10, high=1000) + + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_categorical_distribution( + name="categorical_distribution", + input_name="data", + output_name="output", + num_samples=num_samples, + ) + + x = np.random.randint(low=0, high=20, size=shape).astype(np.float32) + inputs = {"data": x} + shape[-1] = num_samples + expected = {"output": np.random.rand(*shape)} + + self._test_model( + builder.spec, + inputs, + expected, + useCPUOnly=True, + validate_shapes_only=True, + ) + + @pytest.mark.xfail( + reason="rdar://64153463 ([GitLab CI] test_categorical_distribution_cpu_probs failing)" + ) + def test_categorical_distribution_cpu_logits(self): + def softmax(data): + e_data = np.exp(data - np.max(data)) + return e_data / e_data.sum() + + num_samples, num_class = 50000, 10 + input_name, output_name = "data", "output" + + shapes = [ + (2, num_class), + (2, 1, num_class), + (1, 2, num_class), + (2, 1, 1, num_class), + (1, 2, 1, num_class), + (1, 1, 2, num_class), + (2, 1, 1, 1, num_class), + (1, 2, 1, 1, num_class), + (1, 1, 2, 1, num_class), + (1, 1, 1, 2, num_class), + ] + + for shape in shapes: + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_categorical_distribution( + name="categorical_distribution", + input_name=input_name, + output_name=output_name, + num_samples=num_samples, + is_logits=True, + seed=42, + ) + + x = np.random.rand(*shape) + inputs = {input_name: x} + + model = builder.spec + if isinstance(model, str): + model = coremltools.models.MLModel(model) + + model = coremltools.models.MLModel(model) + prediction = model.predict(inputs) + + # validate each distribution separately + logits = x.reshape(2, num_class) + probs = [softmax(logits[0]), softmax(logits[1])] + + ref0 = np.random.multinomial(num_samples, probs[0]) + ref1 = np.random.multinomial(num_samples, probs[1]) + + pre0 = prediction[output_name].reshape(2, num_samples)[0] + pre1 = prediction[output_name].reshape(2, num_samples)[1] + + expected = {output_name: np.stack((pre0, pre1))} + + # convert to bincount and validate probabilities + pre0 = np.bincount(np.array(pre0).astype(np.int32), minlength=num_class) + pre1 = np.bincount(np.array(pre1).astype(np.int32), minlength=num_class) + + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), probs[0], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), + np.true_divide(ref0, num_samples), + atol=1e-2, + ) + + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), probs[1], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), + np.true_divide(ref1, num_samples), + atol=1e-2, + ) + + self._test_model( + model, + inputs, + expected, + useCPUOnly=True, + output_name_shape_dict={"output": prediction["output"].shape}, + ) + + @pytest.mark.xfail( + reason="rdar://64153463 ([GitLab CI] test_categorical_distribution_cpu_probs failing)" + ) + def test_categorical_distribution_cpu_probs(self): + def softmax(data): + e_data = np.exp(data - np.max(data)) + return e_data / e_data.sum() + + num_samples, num_class = 50000, 10 + input_name, output_name = "data", "output" + + shapes = [ + (2, num_class), + (2, 1, num_class), + (1, 2, num_class), + (2, 1, 1, num_class), + (1, 2, 1, num_class), + (1, 1, 2, num_class), + (2, 1, 1, 1, num_class), + (1, 2, 1, 1, num_class), + (1, 1, 2, 1, num_class), + (1, 1, 1, 2, num_class), + ] + + for shape in shapes: + input_features = [("data", datatypes.Array(*shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_categorical_distribution( + name="categorical_distribution", + input_name=input_name, + output_name=output_name, + num_samples=num_samples, + is_logits=False, + seed=42, + ) + + x = np.random.rand(*shape) + probs = x.reshape(2, num_class) + probs[0], probs[1] = softmax(probs[0]), softmax(probs[1]) + inputs = {input_name: np.reshape(probs, shape)} + + model = builder.spec + if isinstance(model, str): + model = coremltools.models.MLModel(model) + + model = coremltools.models.MLModel(model, useCPUOnly=True) + prediction = model.predict(inputs, useCPUOnly=True) + + # validate each distribution separately + probs = probs.reshape(2, num_class) + + ref0 = np.random.multinomial(num_samples, probs[0]) + ref1 = np.random.multinomial(num_samples, probs[1]) + + pre0 = prediction[output_name].reshape(2, num_samples)[0] + pre1 = prediction[output_name].reshape(2, num_samples)[1] + + expected = {output_name: np.stack((pre0, pre1))} + + # convert to bincount and validate probabilities + pre0 = np.bincount(np.array(pre0).astype(np.int32), minlength=num_class) + pre1 = np.bincount(np.array(pre1).astype(np.int32), minlength=num_class) + + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), probs[0], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre0, num_samples), + np.true_divide(ref0, num_samples), + atol=1e-2, + ) + + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), probs[1], atol=1e-2 + ) + np.testing.assert_allclose( + np.true_divide(pre1, num_samples), + np.true_divide(ref1, num_samples), + atol=1e-2, + ) + + self._test_model( + model, + inputs, + expected, + useCPUOnly=True, + output_name_shape_dict={"output": prediction["output"].shape}, + ) + + def test_reverse_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + reverse_dim = [np.random.choice([True, False]) for _ in range(rank)] + axes = [i for i in range(rank) if reverse_dim[i] == True] + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_reverse("reverse", "data", "output", reverse_dim) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.flip(x, axis=axes)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reverse_gpu(self): + self.test_reverse_cpu(cpu_only=False) + + def test_matrix_band_part_cpu(self, cpu_only=True): + + for rank in range(2, 6): + for _ in range(20): + num_lower = np.random.randint(low=-7, high=8) + num_upper = np.random.randint(low=-7, high=8) + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_matrix_band_part( + "matrix_band_part", + "data", + "output", + num_lower=num_lower, + num_upper=num_upper, + ) + + x = np.random.rand(*shape) + input = {"data": x} + + rows, cols = shape[-2:] + band = np.ones((rows, cols)) + for m in range(rows): + for n in range(cols): + band[m, n] = (num_lower < 0 or (m - n) <= num_lower) and ( + num_upper < 0 or (n - m) <= num_upper + ) + + expected = {"output": np.multiply(band, x)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_matrix_band_part_gpu(self): + self.test_matrix_band_part_cpu(cpu_only=False) + + def test_flatten_to_2d_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for axis in range(-rank, rank + 1): + shape = np.random.randint(low=2, high=6, size=rank) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_flatten_to_2d("flatten_to_2d", "data", "output", axis=axis) + + x = np.random.rand(*shape) + np_axis = axis + rank if axis < 0 else axis + pl, pr = 1, 1 + for i in range(0, np_axis): + pl *= shape[i] + for i in range(np_axis, len(shape)): + pr *= shape[i] + + new_shape = [pl, pr] + ref = x.reshape(new_shape) + + input = {"data": x} + expected = {"output": ref} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(2, builder._get_rank("output")) + + def test_flatten_to_2d_gpu(self): + self.test_flatten_to_2d_cpu(cpu_only=False) + + def test_reshape_like_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + n = int(np.prod(input_shape)) + divisors = [d for d in range(1, n) if n % d == 0] + target_rank = np.random.randint(low=2, high=6) + target_shape = [1] + for i in range(target_rank - 1): + dim_size = np.random.choice(divisors) + while n % (np.prod(target_shape) * dim_size) != 0: + dim_size = np.random.choice(divisors) + target_shape.append(dim_size) + target_shape[0] = n // np.prod(target_shape) + + np.random.shuffle(target_shape) + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("tensor", datatypes.Array(*target_shape)), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_reshape_like( + name="reshape_like", + input_names=["data", "tensor"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + tensor = np.random.rand(*target_shape) + inputs = {"data": data, "tensor": tensor} + expected = {"output": np.reshape(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(target_rank, builder._get_rank("output")) + + def test_reshape_like_gpu(self): + self.test_reshape_like_cpu(cpu_only=False) + + def test_reshape_static_cpu(self, cpu_only=True): + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + n = int(np.prod(input_shape)) + divisors = [d for d in range(1, n) if n % d == 0] + target_rank = np.random.randint(low=2, high=6) + + target_shape = [1] + for i in range(target_rank - 1): + dim_size = np.random.choice(divisors) + while n % (np.prod(target_shape) * dim_size) != 0: + dim_size = np.random.choice(divisors) + target_shape.append(dim_size) + + target_shape[0] = -1 + + np.random.shuffle(target_shape) + input_features = [("data", datatypes.Array(*input_shape))] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_reshape_static( + name="reshape_static", + input_name="data", + output_name="output", + output_shape=target_shape, + ) + + data = np.random.rand(*input_shape) + inputs = {"data": data} + expected = {"output": np.reshape(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(len(target_shape), builder._get_rank("output")) + + def test_reshape_static_gpu(self): + self.test_reshape_static_cpu(cpu_only=False) + + def test_reshape_dynamic_cpu(self, cpu_only=True): + for rank in range(1, 6): + for _ in range(20): + input_shape = np.random.randint(low=2, high=8, size=rank) + n = int(np.prod(input_shape)) + divisors = [d for d in range(1, n) if n % d == 0] + target_rank = np.random.randint(low=2, high=6) + + target_shape = [1] + for i in range(target_rank - 1): + dim_size = np.random.choice(divisors) + while n % (np.prod(target_shape) * dim_size) != 0: + dim_size = np.random.choice(divisors) + target_shape.append(dim_size) + + target_shape[0] = -1 + + np.random.shuffle(target_shape) + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("shape", datatypes.Array(len(target_shape))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, [("output", None)], disable_rank5_shape_mapping=True + ) + + builder.add_reshape_dynamic( + name="reshape_dynamic", + input_names=["data", "shape"], + output_name="output", + ) + + data = np.random.rand(*input_shape) + inputs = {"data": data, "shape": np.array(target_shape, dtype="float")} + expected = {"output": np.reshape(data, target_shape)} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_reshape_dynamic_gpu(self): + self.test_reshape_dynamic_cpu(cpu_only=False) + + def test_reduce_sum_cpu(self, cpu_only=True): + + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_sum( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.add.reduce(x, axes, keepdims=keep_dims)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + expected_rank = len(expected["output"].shape) + if expected_rank == 0: + expected_rank = 1 + self.assertEqual(expected_rank, builder._get_rank("output")) + + def test_reduce_sum_gpu(self): + self.test_reduce_sum_cpu(cpu_only=False) + + def test_reduce_prod_cpu(self, cpu_only=True): + + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_prod( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.multiply.reduce(x, axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + expected_rank = len(expected["output"].shape) + if expected_rank == 0: + expected_rank = 1 + self.assertEqual(expected_rank, builder._get_rank("output")) + + def test_reduce_prod_gpu(self): + self.test_reduce_prod_cpu(cpu_only=False) + + def test_reduce_mean_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_mean( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = {"output": np.mean(x, axes, keepdims=keep_dims)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_mean_gpu(self): + self.test_reduce_mean_cpu(cpu_only=False) + + def test_reduce_max_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_max( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.maximum.reduce(x, axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_max_gpu(self): + self.test_reduce_max_cpu(cpu_only=False) + + def test_reduce_min_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_min( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.minimum.reduce(x, axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_min_gpu(self): + self.test_reduce_min_cpu(cpu_only=False) + + def test_reduce_l2_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_l2( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.sqrt( + np.sum(np.square(x), axis=axes, keepdims=keep_dims) + ) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_l2_gpu(self): + self.test_reduce_l2_cpu(cpu_only=False) + + def test_reduce_l1_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_l1( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.sum(np.abs(x), axis=axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_l1_gpu(self): + self.test_reduce_l1_cpu(cpu_only=False) + + def test_reduce_sumsquare_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_sumsquare( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.sum(np.square(x), axis=axes, keepdims=keep_dims) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_sumsquare_gpu(self): + self.test_reduce_sumsquare_cpu(cpu_only=False) + + def test_reduce_logsum_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_logsum( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.log(np.sum(x, axis=axes, keepdims=keep_dims)) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_logsum_gpu(self): + self.test_reduce_logsum_cpu(cpu_only=False) + + def test_reduce_logsumexp_cpu(self, cpu_only=True): + for rank in range(1, 6): + axes_list = [ + axes + for length in range(1, rank + 1) + for axes in itertools.combinations(range(rank), length) + ] + axes_list.append(None) + + for axes in axes_list: + if axes: + axes = tuple( + [ + axis if np.random.choice([True, False]) else axis - rank + for axis in axes + ] + ) + reduce_all = False + else: + reduce_all = True + + for keep_dims in [True, False]: + input_shape = np.random.randint(low=2, high=5, size=rank) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_reduce_logsumexp( + "reduce", + "data", + "output", + axes, + keepdims=keep_dims, + reduce_all=reduce_all, + ) + + x = np.random.rand(*input_shape) + input = {"data": x} + expected = { + "output": np.log( + np.sum(np.exp(x), axis=axes, keepdims=keep_dims) + ) + } + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reduce_logsumexp_gpu(self): + self.test_reduce_logsumexp_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_reverse_sequence_cpu(self, cpu_only=True): + for rank in range(2, 6): + for i in range(20): + input_shape = np.random.randint(low=2, high=6, size=rank) + + seq_axis = np.random.randint(low=-rank, high=rank) + batch_axis = np.random.randint(low=-rank, high=rank) + pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis + pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis + while pos_batch_axis >= pos_seq_axis: + seq_axis = np.random.randint(low=-rank, high=rank) + batch_axis = np.random.randint(low=-rank, high=rank) + pos_batch_axis = ( + batch_axis if batch_axis >= 0 else rank + batch_axis + ) + pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis + + input_features = [ + ("data", datatypes.Array(*input_shape)), + ("lengths", datatypes.Array(input_shape[batch_axis])), + ] + + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_reverse_sequence( + "reverse_sequence", + ["data", "lengths"], + "output", + batch_axis=batch_axis, + seq_axis=seq_axis, + ) + + data = np.random.rand(*input_shape) + lengths = np.random.randint( + low=0, high=input_shape[seq_axis], size=input_shape[batch_axis] + ) + + input = {"data": data, "lengths": lengths.astype(np.float32)} + + tf_op = tf.reverse_sequence( + input=data, + seq_lengths=lengths, + seq_axis=pos_seq_axis, + batch_axis=pos_batch_axis, + ) + expected = {"output": tf_op.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_reverse_sequence_gpu(self): + self.test_reverse_sequence_cpu(cpu_only=False) + + def test_where_nonzero_cpu(self, cpu_only=True): + + for rank in range(1, 6): + for i in range(10): + shape = np.random.randint(low=2, high=8, size=rank) + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_where_nonzero("multi_indices", "data", "output") + + x = np.random.randint(low=0, high=3, size=shape) + + input = {"data": x.astype(np.float32)} + expected = {"output": np.transpose(np.nonzero(x)).astype(np.float32)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_where_nonzero_gpu(self): + self.test_where_nonzero_cpu(cpu_only=False) + + def test_gather_cpu(self, cpu_only=True): + for rankParams, rankIndices in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + for axis in range(-rankParams, rankParams): + shapeParams = np.random.randint(low=2, high=5, size=rankParams) + shapeIndices = np.random.randint(low=2, high=5, size=rankIndices) + input_shapes = [shapeParams, shapeIndices] + posAxis = axis if axis >= 0 else axis + rankParams + output_shape = ( + list(shapeParams[:posAxis]) + + list(shapeIndices) + + list(shapeParams[posAxis + 1 :]) + ) + + if len(output_shape) > 5: + continue + + input_names = ["params", "indices"] + input_features = [ + ("params", datatypes.Array(*input_shapes[0])), + ("indices", datatypes.Array(*input_shapes[1])), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_gather( + name="gather", + input_names=input_names, + output_name="output", + axis=axis, + ) + + a = np.random.rand(*input_shapes[0]) + b = np.random.randint( + -shapeParams[axis], shapeParams[axis], size=shapeIndices + ) + input = {"params": a, "indices": b.astype(np.float32)} + expected = {"output": np.take(a, b, axis=axis)} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual( + len(expected["output"].shape), builder._get_rank("output") + ) + + def test_gather_gpu(self): + self.test_gather_cpu(cpu_only=False) + + def test_gather_along_axis_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + for _ in range(5): + params_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(params_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + + input_features = [ + ("params", datatypes.Array(*params_shape)), + ("indices", datatypes.Array(*indices_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + builder.add_gather_along_axis( + "gather_along_axis", ["params", "indices"], "output", axis=axis + ) + + a = np.random.rand(*params_shape) + b = np.random.randint( + -params_shape[axis], params_shape[axis], size=indices_shape + ) + + input = {"params": a, "indices": b.astype(np.float32)} + expected = {"output": np.take_along_axis(a, b, axis=axis)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual( + len(expected["output"].shape), builder._get_rank("output") + ) + + def test_gather_along_axis_gpu(self): + self.test_gather_along_axis_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_gather_nd_cpu(self, cpu_only=True): + for params_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + params_shape = np.random.randint(low=2, high=8, size=params_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=params_rank + 1) + + for _ in range(5): + input_features = [ + ("params", datatypes.Array(*params_shape)), + ("indices", datatypes.Array(*indices_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + output_shape = list(indices_shape[:-1]) + list( + params_shape[indices_shape[-1] :] + ) + if len(output_shape) > 5: + continue + + builder.add_gather_nd("gather_nd", ["params", "indices"], "output") + + a = np.random.rand(*params_shape) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, params_shape[i], size=indices_shape[:-1]) + ) + + indices = np.stack(indices_list, axis=-1) + input = {"params": a, "indices": indices.astype(np.float32)} + + tf_op = tf.gather_nd(a, indices) + expected = {"output": tf_op.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(-1, builder._get_rank("output")) + + def test_gather_nd_gpu(self): + self.test_gather_nd_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_scatter_cpu(self, cpu_only=True): + for ref_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + for accumulate_mode in ["UPDATE", "ADD", "SUB", "MUL", "DIV", "MAX", "MIN"]: + for _ in range(5): + ref_shape = np.random.randint(low=2, high=8, size=ref_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + updates_shape = list(indices_shape) + list(ref_shape[1:]) + + input_features = [ + ("ref", datatypes.Array(*ref_shape)), + ("indices", datatypes.Array(*indices_shape)), + ("updates", datatypes.Array(*updates_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + if len(updates_shape) > 5: + continue + + builder.add_scatter( + "scatter", + ["ref", "indices", "updates"], + "output", + axis=0, + mode=accumulate_mode, + ) + + ref = np.random.rand(*ref_shape) + updates = np.random.rand(*updates_shape) + if accumulate_mode == "DIV": + updates += 10.0 + indices = np.random.randint(0, ref_shape[0], size=indices_shape) + input = { + "ref": ref, + "indices": indices.astype(np.float32), + "updates": updates, + } + + tf_output = tf.Variable(ref) + if accumulate_mode == "UPDATE": + tf.compat.v1.scatter_update(tf_output, indices, updates) + if accumulate_mode == "ADD": + tf.compat.v1.scatter_add(tf_output, indices, updates) + if accumulate_mode == "SUB": + tf.compat.v1.scatter_sub(tf_output, indices, updates) + if accumulate_mode == "MUL": + tf.compat.v1.scatter_mul(tf_output, indices, updates) + if accumulate_mode == "DIV": + tf.compat.v1.scatter_div(tf_output, indices, updates) + if accumulate_mode == "MAX": + tf.compat.v1.scatter_max(tf_output, indices, updates) + if accumulate_mode == "MIN": + tf.compat.v1.scatter_min(tf_output, indices, updates) + expected = {"output": tf_output.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_scatter_gpu(self): + self.test_scatter_cpu(cpu_only=False) + + def test_gather_scatter_multiple_axis_cpu(self, cpu_only=True): + + for params_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(1, 6) + ]: + for axis in range(-params_rank, params_rank): + for _ in range(5): + params_shape = np.random.randint(low=2, high=8, size=params_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + + pos_axis = axis if axis >= 0 else axis + params_rank + output_shape = ( + list(params_shape[:pos_axis]) + + list(indices_shape) + + list(params_shape[pos_axis + 1 :]) + ) + + if len(output_shape) > 5: + continue + + input_features = [ + ("params", datatypes.Array(*params_shape)), + ("indices", datatypes.Array(*indices_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_gather( + "gather", ["params", "indices"], "updates", axis=axis + ) + builder.add_scatter( + "scatter", + ["params", "indices", "updates"], + "output", + axis=axis, + mode="UPDATE", + ) + + a = np.random.rand(*params_shape) + b = np.random.randint( + -params_shape[axis], params_shape[axis], size=indices_shape + ) + + input = {"params": a, "indices": b.astype(np.float32)} + expected = {"output": a} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_gather_scatter_multiple_axis_gpu(self): + self.test_gather_scatter_multiple_axis_cpu(cpu_only=False) + + def test_scatter_along_axis_cpu(self, cpu_only=True): + for rank in range(1, 6): + for axis in range(-rank, rank): + for id in range(5): + ref_shape = np.random.randint(low=2, high=8, size=rank) + indices_shape = np.copy(ref_shape) + indices_shape[axis] = np.random.randint(low=1, high=8) + updates_shape = indices_shape + + input_features = [ + ("ref", datatypes.Array(*ref_shape)), + ("indices", datatypes.Array(*indices_shape)), + ("updates", datatypes.Array(*updates_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_scatter_along_axis( + "scatter_along_axis", + ["ref", "indices", "updates"], + "output", + axis=axis, + mode="UPDATE", + ) + + ref = np.random.rand(*ref_shape) + updates = np.random.rand(*updates_shape) + indices = np.random.randint( + -ref_shape[axis], ref_shape[axis], size=indices_shape + ) + input = { + "ref": ref, + "indices": indices.astype(np.float32), + "updates": updates, + } + + np_output = np.copy(ref) + np.put_along_axis(np_output, indices, updates, axis=axis) + expected = {"output": np_output} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_scatter_along_axis_gpu(self): + self.test_scatter_along_axis_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_scatter_nd_cpu(self, cpu_only=True): + for ref_rank, indices_rank in [ + (i, j) for i in range(1, 6) for j in range(2, 6) + ]: + ref_shape = np.random.randint(low=2, high=8, size=ref_rank) + indices_shape = np.random.randint(low=2, high=8, size=indices_rank) + indices_shape[-1] = np.random.randint(low=1, high=ref_rank + 1) + for accumulate_mode in ["UPDATE", "ADD", "SUB"]: + for id in range(20): + updates_shape = list(indices_shape[:-1]) + list( + ref_shape[indices_shape[-1] :] + ) + if len(updates_shape) > 5: + continue + + input_features = [ + ("ref", datatypes.Array(*ref_shape)), + ("indices", datatypes.Array(*indices_shape)), + ("updates", datatypes.Array(*updates_shape)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + + builder.add_scatter_nd( + "scatter_nd", + ["ref", "indices", "updates"], + "output", + mode=accumulate_mode, + ) + + ref = np.random.rand(*ref_shape) + updates = np.random.rand(*updates_shape) + indices_list = [] + for i in range(indices_shape[-1]): + indices_list.append( + np.random.randint(0, ref_shape[i], size=indices_shape[:-1]) + ) + + indices = np.stack(indices_list, axis=-1) + + input = { + "ref": ref, + "indices": indices.astype(np.float32), + "updates": updates, + } + + tf_output = tf.Variable(ref) + if accumulate_mode == "UPDATE": + tf.compat.v1.scatter_nd_update(tf_output, indices, updates) + if accumulate_mode == "ADD": + tf.compat.v1.scatter_nd_add(tf_output, indices, updates) + if accumulate_mode == "SUB": + tf.compat.v1.scatter_nd_sub(tf_output, indices, updates) + expected = {"output": tf_output.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_scatter_nd_gpu(self): + self.test_scatter_nd_cpu(cpu_only=False) + + def test_layer_normalization_cpu(self, cpu_only=True): + def layer_norm_numpy(x, shapes, gamma_, beta_, eps=1e-5): + axes = [-i - 1 for i, _ in enumerate(shapes)] + num = x - np.mean(x, axis=tuple(axes), keepdims=True) + dem = np.sqrt( + np.sum(np.square(num), axis=tuple(axes), keepdims=True) + / np.prod(shapes) + + eps + ) + return num / dem * gamma_ + beta_ + + for rank in range(1, 6): + input_shape = np.random.randint(low=2, high=6, size=rank) + for axis in range(1, len(input_shape) + 1): + norm_shapes = input_shape[-axis:] + + data = np.random.rand(*input_shape) + + gamma = np.random.rand(*norm_shapes) + beta = np.random.rand(*norm_shapes) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_layer_normalization( + name="layer_normalization", + input_name="data", + output_name="output", + normalized_shape=norm_shapes, + gamma=gamma, + beta=beta, + ) + + inputs = {"data": data} + ref = layer_norm_numpy(data, norm_shapes, gamma, beta) + expected = {"output": ref} + + self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only) + + def test_layer_normalization_gpu(self): + self.test_layer_normalization_cpu(cpu_only=False) + + +def get_size_after_stride(X, params): + start = params["start"] + end = params["end"] + stride = params["stride"] + if params["axis"] == "width": + axis = 2 + if params["axis"] == "height": + axis = 1 + if params["axis"] == "channel": + axis = 0 + N = X.shape[axis] + if end < 0: + end = end + N + end = min(end, N) + if start > N - 1: + L = 0 + else: + L = np.floor((end - 1 - start) / stride) + 1 + if L < 0: + L = 0 + return L + + +def get_numpy_predictions_slice(X, params): + start = params["start"] + end = params["end"] + stride = params["stride"] + if params["axis"] == "width": + return X[:, :, start:end:stride] + if params["axis"] == "height": + return X[:, start:end:stride, :] + if params["axis"] == "channel": + return X[start:end:stride, :, :] + + +def get_coreml_predictions_slice(X, params): + coreml_preds = [] + eval = True + try: + input_dim = X.shape + output_dim = ( + 1, + 1, + 1, + ) # some random dimensions here: we are going to remove this information later + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*output_dim))] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_slice( + "slice", + "data", + "output", + start_index=params["start"], + end_index=params["end"], + stride=params["stride"], + axis=params["axis"], + ) + # Remove output shape by deleting and adding an output + del builder.spec.description.output[-1] + output = builder.spec.description.output.add() + output.name = "output" + output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value( + "DOUBLE" + ) + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + # prepare input and get predictions + coreml_model = coremltools.models.MLModel(model_path) + coreml_input = {"data": X} + if _is_macos() and _macos_version() >= (10, 13): + coreml_preds = coreml_model.predict(coreml_input)["output"] + else: + coreml_preds = None + except RuntimeError as e: + print(e) + eval = False + + return coreml_preds, eval + + +def get_numpy_predictions_reduce(X, params): + if params["axis"] == "CHW": + axis = (0, 1, 2) + if params["axis"] == "HW": + axis = (1, 2) + if params["axis"] == "C": + axis = 0 + if params["axis"] == "H": + axis = 1 + if params["axis"] == "W": + axis = 2 + + if params["mode"] == "sum": + return np.sum(X, axis) + if params["mode"] == "avg": + return np.mean(X, axis) + if params["mode"] == "prod": + return np.prod(X, axis) + if params["mode"] == "logsum": + return np.sum(np.log(X + 1e-6), axis) + if params["mode"] == "sumsquare": + return np.sum(X ** 2, axis) + if params["mode"] == "L2": + return np.sqrt(np.sum(X ** 2, axis)) + if params["mode"] == "L1": + return np.sum(np.abs(X), axis) + if params["mode"] == "max": + return np.amax(X, axis) + if params["mode"] == "min": + return np.amin(X, axis) + if params["mode"] == "argmax": + return np.argmax(X, axis) + + +def get_coreml_predictions_reduce(X, params): + coreml_preds = [] + eval = True + try: + input_dim = X.shape + # some random dimensions here: we are going to remove this information later + output_dim = (1, 1, 1) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*output_dim))] + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_reduce( + "reduce", "data", "output", axis=params["axis"], mode=params["mode"] + ) + # Remove output shape by deleting and adding an output + del builder.spec.description.output[-1] + output = builder.spec.description.output.add() + output.name = "output" + output.type.multiArrayType.dataType = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value( + "DOUBLE" + ) + # save the model + model_dir = tempfile.TemporaryDirectory() + model_path = os.path.join(model_dir.name, "test_layer.mlmodel") + coremltools.utils.save_spec(builder.spec, model_path) + # prepare input and get predictions + coreml_model = coremltools.models.MLModel(model_path) + coreml_input = {"data": X} + if _is_macos() and _macos_version() >= (10, 13): + coreml_preds = coreml_model.predict(coreml_input)["output"] + else: + coreml_preds = None + except RuntimeError as e: + print(e) + eval = False + + return coreml_preds, eval + + +@pytest.mark.slow +class StressTest(CorrectnessTest): + def test_slice_layer(self): + params_dict = dict( + input_shape=[[30, 100, 8], [80, 50, 5], [4, 12, 5], [56, 8, 14]], + axis=["channel", "height", "width"], + start=[0, 1, 2, 5], + end=[5, 100, 56, -1, -2, -4], + stride=[1, 2, 3], + ) + params = list(itertools.product(*params_dict.values())) + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + X = np.random.rand(*pr["input_shape"]) + if get_size_after_stride(X, pr): + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of candidates: ", + len(all_candidates), + ) + + failed_tests_compile = [] + failed_tests_shape = [] + failed_tests_numerical = [] + for i in range(len(valid_params)): + params = valid_params[i] + X = np.random.rand(*params["input_shape"]) + np_preds = get_numpy_predictions_slice(X, params) + coreml_preds, eval = get_coreml_predictions_slice(X, params) + if eval is False: + failed_tests_compile.append(params) + elif coreml_preds is not None: + if not self._compare_shapes(np_preds, coreml_preds): + failed_tests_shape.append(params) + elif not self._compare_predictions(np_preds, coreml_preds): + failed_tests_numerical.append(params) + + self.assertEqual(failed_tests_compile, []) + self.assertEqual(failed_tests_shape, []) + self.assertEqual(failed_tests_numerical, []) + + def test_reduce_layer(self): + params_dict = dict( + input_shape=[[3, 10, 8], [8, 5, 5], [4, 12, 10], [7, 1, 14]], + mode=[ + "sum", + "avg", + "prod", + "sumsquare", + "L1", + "L2", + "max", + "min", + "argmax", + ], + axis=["CHW", "HW", "C", "H", "W"], + ) + params = list(itertools.product(*params_dict.values())) + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + if pr["mode"] == "argmax": + if pr["axis"] == "CHW" or pr["axis"] == "HW": + continue + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of candidates: ", + len(all_candidates), + ) + + failed_tests_compile = [] + failed_tests_shape = [] + failed_tests_numerical = [] + for i in range(len(valid_params)): + params = valid_params[i] + X = np.random.rand(*params["input_shape"]) + np_preds = get_numpy_predictions_reduce(X, params) + coreml_preds, eval = get_coreml_predictions_reduce(X, params) + if eval is False: + failed_tests_compile.append(params) + elif coreml_preds is not None: + if not self._compare_shapes(np_preds, coreml_preds): + failed_tests_shape.append(params) + elif not self._compare_predictions(np_preds, coreml_preds): + failed_tests_numerical.append(params) + + self.assertEqual(failed_tests_compile, []) + self.assertEqual(failed_tests_shape, []) + self.assertEqual(failed_tests_numerical, []) + + +@pytest.mark.slow +@unittest.skipIf( + not _is_macos() or _macos_version() < LAYERS_10_15_MACOS_VERSION, + "macOS 10.15+ required. Skipping tests.", +) +class CoreML3NetworkStressTest(CorrectnessTest): + def test_dyn_weight_conv2d_stress(self): + options = dict( + padding=["valid"], + filters=[1, 2, 4], + kernel_size=[1, 3, 5], # square kernels + strides=[1, 2], + dilation_rate=[1], + batch_size=[1, 64, 512], + ) + + input_size = 64 + input_channels = 64 + input_dim = [1, input_channels, input_size, input_size] + + def conv_spatial_size(image_size, kernel_size, stride, dilation, padding): + if padding == "valid": + kernel_size_dilated = (kernel_size - 1) * dilation + 1 + return (image_size - kernel_size_dilated) // stride + 1 + elif padding == "same": + return int(math.ceil(image_size * 1.0 / stride)) + else: + return 0 + + for x in itertools.product(*options.values()): + kwargs = dict(zip(options.keys(), x)) + if kwargs["strides"] > 1 and kwargs["dilation_rate"] > 1: + continue + # weight layout: (output_channels, kernel_channels, height, width) + weight_dim = ( + kwargs["filters"], + input_channels, + kwargs["kernel_size"], + kwargs["kernel_size"], + ) + + input_dim[0] = kwargs["batch_size"] + input_features = [ + ("input", datatypes.Array(*input_dim)), + ("weight", datatypes.Array(*weight_dim)), + ] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_convolution( + name="two_input_conv_layer", + kernel_channels=input_channels, + output_channels=kwargs["filters"], + height=kwargs["kernel_size"], + width=kwargs["kernel_size"], + stride_height=kwargs["strides"], + stride_width=kwargs["strides"], + border_mode=kwargs["padding"], + groups=1, + W=None, + b=None, + has_bias=False, + dilation_rate=kwargs["dilation_rate"], + input_name=["input", "weight"], + output_name="output", + ) + + # Assigning everything to ones should cover the execution path + # and engine failures, but is not a complete check on numerics. + out_spatial_size = conv_spatial_size( + input_size, + kwargs["kernel_size"], + kwargs["strides"], + kwargs["dilation_rate"], + kwargs["padding"], + ) + + input_val = np.ones(input_dim) + weight_val = np.ones(weight_dim) + output_dim = ( + kwargs["batch_size"], + kwargs["filters"], + out_spatial_size, + out_spatial_size, + ) + expected = np.ones(output_dim) * ( + kwargs["kernel_size"] * kwargs["kernel_size"] * input_channels + ) + + feed_dict = {"input": input_val, "weight": weight_val} + expected = {"output": expected} + + self._test_model(builder.spec, feed_dict, expected) + + def test_static_weight_conv2d_stress(self): + options = dict( + padding=["valid"], + filters=[1, 2, 5], + kernel_size=[1, 3, 4], # square kernels + strides=[1, 2], + dilation_rate=[1, 2], + batch_size=[1, 32, 512], + ) + + input_size = 64 + input_channels = 64 + input_dim = [1, input_channels, input_size, input_size] + + def conv_spatial_size(image_size, kernel_size, stride, dilation, padding): + if padding == "valid": + kernel_size_dilated = (kernel_size - 1) * dilation + 1 + return (image_size - kernel_size_dilated) // stride + 1 + elif padding == "same": + return int(math.ceil(image_size * 1.0 / stride)) + else: + return 0 + + for x in itertools.product(*options.values()): + kwargs = dict(zip(options.keys(), x)) + if kwargs["strides"] > 1 and kwargs["dilation_rate"] > 1: + continue + # weight layout: (output_channels, kernel_channels, height, width) + weight_dim = ( + kwargs["filters"], + input_channels, + kwargs["kernel_size"], + kwargs["kernel_size"], + ) + + input_dim[0] = kwargs["batch_size"] + input_features = [("input", datatypes.Array(*input_dim))] + # ('weight', datatypes.Array(*weight_dim))] + output_features = [("output", None)] + + input_weight = np.ones(weight_dim) + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_convolution( + name="two_input_conv_layer", + kernel_channels=input_channels, + output_channels=kwargs["filters"], + height=kwargs["kernel_size"], + width=kwargs["kernel_size"], + stride_height=kwargs["strides"], + stride_width=kwargs["strides"], + border_mode=kwargs["padding"], + groups=1, + W=input_weight, + b=None, + has_bias=False, + dilation_factors=[kwargs["dilation_rate"]] * 2, + input_name=["input"], + output_name="output", + ) + + # Assigning everything to ones should cover the execution path + # and engine failures, but is not a complete check on numerics. + out_spatial_size = conv_spatial_size( + input_size, + kwargs["kernel_size"], + kwargs["strides"], + kwargs["dilation_rate"], + kwargs["padding"], + ) + + input_val = np.ones(input_dim) + weight_val = np.ones(weight_dim) + output_dim = ( + kwargs["batch_size"], + kwargs["filters"], + out_spatial_size, + out_spatial_size, + ) + expected = np.ones(output_dim) * ( + kwargs["kernel_size"] * kwargs["kernel_size"] * input_channels + ) + + feed_dict = {"input": input_val} # , 'weight': weight_val} + expected = {"output": expected} + + self._test_model(builder.spec, feed_dict, expected) + + def test_power_iteration_cpu(self): + + convergence_tolerance = 1e-8 + number_of_iterations = 200 + + input_features = [ + ("matrix", datatypes.Array(*(2, 2))), + ("starting_vector", datatypes.Array(*(2,))), + ] + + output_features = [ + ("maximum_eigen_value", datatypes.Array(*(1, 1))), + ("eigen_vector", None), + ("iteration_count", datatypes.Array(*(1,))), + ] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_expand_dims("expand_dims", "starting_vector", "x", axes=[-1]) + builder.add_load_constant_nd( + "iteration_count", + "iteration_count", + constant_value=np.zeros((1,)), + shape=(1,), + ) + + loop_layer = builder.add_loop("loop", max_iterations=number_of_iterations) + loop_body_builder = neural_network.NeuralNetworkBuilder( + nn_spec=loop_layer.loop.bodyNetwork + ) + # output shape: (n,1) + loop_body_builder.add_batched_mat_mul( + "bmm.1", input_names=["matrix", "x"], output_name="y" + ) + loop_body_builder.add_reduce_l2( + "reduce", input_name="y", output_name="norm", axes=[0] + ) + loop_body_builder.add_divide_broadcastable( + "divide", ["y", "norm"], "y_normalized" + ) + # find diff: 1- abs(cosine) + loop_body_builder.add_batched_mat_mul( + "cosine", ["y_normalized", "x"], "cosine_diff", transpose_a=True + ) + loop_body_builder.add_squeeze( + "squeeze_all", "cosine_diff", "cosine_diff_squeeze", squeeze_all=True + ) + loop_body_builder.add_unary( + "abs_cosine", "cosine_diff_squeeze", "abs_cosine_diff", mode="abs" + ) + loop_body_builder.add_activation( + "diff", + non_linearity="LINEAR", + input_name="abs_cosine_diff", + output_name="diff", + params=[-1, 1], + ) + + # update iteration count + loop_body_builder.add_activation( + "iteration_count_add", + non_linearity="LINEAR", + input_name="iteration_count", + output_name="iteration_count_plus_1", + params=[1, 1], + ) + loop_body_builder.add_copy( + "iteration_count_copy", "iteration_count_plus_1", "iteration_count" + ) + + # update 'x' + loop_body_builder.add_copy("update_x", "y_normalized", "x") + + # add condition to break from the loop, if convergence criterion is met + loop_body_builder.add_less_than( + "cond", ["diff"], "cond", alpha=convergence_tolerance + ) + branch_layer = loop_body_builder.add_branch("branch_layer", "cond") + builder_ifbranch = neural_network.NeuralNetworkBuilder( + nn_spec=branch_layer.branch.ifBranch + ) + builder_ifbranch.add_loop_break("break") + + # now we are out of the loop, compute the eigen value + builder.add_batched_mat_mul( + "bmm.2", input_names=["matrix", "x"], output_name="x_right" + ) + builder.add_batched_mat_mul( + "bmm.3", + input_names=["x", "x_right"], + output_name="maximum_eigen_value", + transpose_a=True, + ) + builder.add_squeeze("squeeze", "x", "eigen_vector", squeeze_all=True) + + # make input sizes flexible + spec = builder.spec + + flexible_shape_utils.add_multiarray_ndshape_enumeration( + spec, feature_name="matrix", enumerated_shapes=[(3, 3), (4, 4)] + ) + + flexible_shape_utils.add_multiarray_ndshape_enumeration( + spec, feature_name="starting_vector", enumerated_shapes=[(3,), (4,)] + ) + + from numpy import linalg as LA + + # try on 3x3 matrix + A = np.array([[2, -6, 8], [-6, 4, 5], [8, 5, 3]], dtype=np.float32) + starting_vector = np.random.rand(3) + starting_vector = starting_vector / np.sqrt(np.sum(starting_vector ** 2)) + + e, v = LA.eig(A) + idx = np.argmax(abs(e)) + input = {"starting_vector": starting_vector, "matrix": A.astype(np.float32)} + expected = {"maximum_eigen_value": np.array([[e[idx]]])} + self._test_model(spec, input, expected, useCPUOnly=True) + + # try on 2x2 matrix + A = np.array([[4, -5], [-5, 3]], dtype=np.float32) + starting_vector = np.random.rand(2) + starting_vector = starting_vector / np.sqrt(np.sum(starting_vector ** 2)) + + e, v = LA.eig(A) + idx = np.argmax(abs(e)) + + input = {"starting_vector": starting_vector, "matrix": A.astype(np.float32)} + expected = {"maximum_eigen_value": np.array([[e[idx]]])} + self._test_model(spec, input, expected, useCPUOnly=True) + + +@unittest.skipIf( + _macos_version() < LAYERS_11_0_MACOS_VERSION, + "macOS 11.0+ required. Skipping tests.", +) +class IOS14SingleLayerTests(CorrectnessTest): + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_onehot_layer_cpu(self, cpu_only=True): + ctr = 0 + params_dict = dict( + input_rank=[1, 2, 3, 4], + negative_axis=[True, False], + depth=[30], + on_value=[30.0], + off_value=[-4.0], + ) + params = list(itertools.product(*params_dict.values())) + for param in params: + param = dict(zip(params_dict.keys(), param)) + input_rank = param["input_rank"] + vectorSize = param["depth"] + on_value = param["on_value"] + off_value = param["off_value"] + + for axis in range(input_rank + 1): + ctr += 1 + if param["negative_axis"]: + axis_param = axis - (input_rank + 1) + else: + axis_param = axis + + input_shape = np.random.randint(1, 10, size=(input_rank,)) + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_one_hot( + "one_hot", + ["data"], + "output", + one_hot_vector_size=vectorSize, + axis=axis_param, + on_value=on_value, + off_value=off_value, + ) + + x = np.random.randint(0, vectorSize, size=input_shape) + # x[::4] -= vectorSize # [To do] Need to Handle this case. + + # TF seems to have a bug with axis < -1 + if axis_param < -1: + axis_param += input_rank + 1 + tf_op = tf.one_hot( + x, + axis=axis_param, + depth=vectorSize, + on_value=on_value, + off_value=off_value, + ) + expected = {"output": tf_op.numpy()} + + input = {"data": x.astype(np.float32)} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_batched_mat_mul_dynamic_quantization_cpu(self, cpu_only=True): + X1 = 11 + X2 = 23 + W = np.random.rand(X1, X2) * 20 - 10 # uniform between [-10, 10] + b = np.random.rand(X2) * 20 - 10 + input_shapes = [ + (X1,), + (5, X1), + (2, 3, X1), + (4, 1, X1), + ] # , (12, 5, 8, X1), (2, 3, 1, 5, X1)] + + W_max = max(np.abs(np.min(W)), np.abs(np.max(W))) + W_normalized = W / W_max # [-1,1] + W_quantized_int8 = 127.0 * W_normalized # [-127, 127] + W_quantized_int8 = W_quantized_int8.astype(np.int8) + quant_scale = W_max / 127.0 + + for input_shape in input_shapes: + x = np.random.rand(*input_shape) * 10 + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + + for has_bias in [True, False]: + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_batched_mat_mul( + name="batched_mat_mul", + input_names=["data"], + output_name="output", + weight_matrix_rows=X1, + weight_matrix_columns=X2, + int_8_dynamic_quantize=True, + is_quantized_weight=True, + quantization_type="linear", + nbits=8, + W=W_quantized_int8.tobytes(), + bias=b if has_bias else None, + quant_scale=np.array([quant_scale]), + ) + inputs = {"data": x} + expected = { + "output": np.matmul( + x, W_quantized_int8.astype(np.float32) * quant_scale + ) + + (b if has_bias else np.zeros(X2)) + } + self._test_model( + builder.spec, + inputs, + expected, + useCPUOnly=cpu_only, + test_metric="SNR", + SNR=40, + ) + + def test_batched_mat_mul_dynamic_quantization_gpu(self): + self.test_batched_mat_mul_dynamic_quantization_cpu(cpu_only=False) + + def test_inner_product_dynamic_quantization_cpu(self, cpu_only=True): + Xin = 24 + Xout = 23 + W = np.random.rand(Xout, Xin) + b = np.random.rand(Xout) + # For rank 4 and 5, the product of the last 3 dimensions must equal Xin + input_shapes = [ + (Xin,), + (5, Xin), + (2, 3, Xin), + (4, 1, Xin), + (5, 2, 3, 4), + (5, 6, 2, 3, 4), + ] + + W_max = max(np.abs(np.min(W)), np.abs(np.max(W))) + W_normalized = W / W_max # [-1,1] + W_quantized_int8 = 127.0 * W_normalized # [-127, 127] + W_quantized_int8 = W_quantized_int8.astype(np.int8) + quant_scale = W_max / 127.0 + + for input_shape in input_shapes: + rank = len(input_shape) + x = np.random.rand(*input_shape) * 5 + + W_for_numpy = W_quantized_int8.astype(np.float32) * quant_scale + for has_bias in [True, False]: + b = b if has_bias else np.zeros(Xout) + if rank == 1 or rank == 2 or rank == 3: + np_out = np.matmul(x, np.transpose(W_for_numpy)) + b + expected = {"output": np_out} + elif rank == 4: + x_shaped = np.reshape(x, (x.shape[0], np.product(x.shape[1:]))) + np_out = np.matmul(x_shaped, np.transpose(W_for_numpy)) + b + expected = {"output": np.reshape(np_out, np_out.shape + (1, 1))} + elif rank == 5: + x_shaped = np.reshape(x, x.shape[0:2] + (np.product(x.shape[2:]),)) + np_out = np.matmul(x_shaped, np.transpose(W_for_numpy)) + b + expected = { + "output": np.reshape( + np_out, x.shape[0:2] + (np_out.shape[-1],) + (1, 1) + ) + } + + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_inner_product( + name="ip", + W=W_quantized_int8.tobytes(), + b=b if has_bias else None, + input_channels=Xin, + output_channels=Xout, + has_bias=has_bias, + input_name="data", + output_name="output", + int_8_dynamic_quantize=True, + is_quantized_weight=True, + quantization_type="linear", + nbits=8, + quant_scale=np.array([quant_scale]), + ) + inputs = {"data": x} + self._test_model( + builder.spec, + inputs, + expected, + useCPUOnly=cpu_only, + test_metric="SNR", + SNR=40, + ) + + def test_inner_product_dynamic_quantization_gpu(self): + self.test_inner_product_dynamic_quantization_cpu(cpu_only=False) + + def test_onehot_layer_gpu(self): + self.test_onehot_layer_cpu(cpu_only=False) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_cumsum_layer_cpu(self, cpu_only=True): + ctr = 0 + params_dict = dict( + rank=[1, 2, 3, 4, 5], + exclusive=[False, True], + reverse=[False, True], + n_inputs=[1, 2], + ) + params = list(itertools.product(*params_dict.values())) + for param in params: + param = dict(zip(params_dict.keys(), param)) + rank = param["rank"] + exclusive = param["exclusive"] + reverse = param["reverse"] + n_inputs = param["n_inputs"] + + for axis in range(rank): + ctr += 1 + if np.random.rand(1) > 0.5: + axis_param = axis + else: + axis_param = axis - rank + + input_shape = np.random.randint(1, 10, size=(rank,)) + + input_features = [("data", datatypes.Array(*input_shape))] + if n_inputs == 2: + input_features.append(("axis", datatypes.Array(1,))) + + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + if n_inputs == 1: + builder.add_cumsum( + "cumsum", + ["data"], + "output", + axis=axis_param, + reverse=reverse, + exclusive=exclusive, + ) + else: + builder.add_cumsum( + "cumsum", + ["data", "axis"], + "output", + reverse=reverse, + exclusive=exclusive, + ) + + x = np.random.rand(*input_shape) + + tf_op = tf.cumsum( + x, axis=axis_param, exclusive=exclusive, reverse=reverse + ) + expected = {"output": tf_op.numpy()} + + input = {"data": x} + if n_inputs == 2: + input["axis"] = axis_param * np.ones((1,), dtype=np.float32) + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_cumsum_layer_gpu(self): + self.test_cumsum_layer_cpu(cpu_only=False) + + def test_clamped_relu_cpu(self, cpu_only=True): + + params_dict = dict(alpha=[0.0, 2.0, -3.0], beta=[7.0, -8.0]) + params = list(itertools.product(*params_dict.values())) + for param in params: + param = dict(zip(params_dict.keys(), param)) + alpha = param["alpha"] + beta = param["beta"] + input_shape = [40] + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_clamped_relu( + "clamped_relu", "data", "output", alpha=alpha, beta=beta + ) + + x = np.arange(-20, 20, dtype=np.float32) + input = {"data": x} + expected = {"output": np.minimum(beta, np.where(x >= 0, x, x * alpha))} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_clamped_relu_gpu(self): + self.test_clamped_relu_cpu(cpu_only=False) + + def _test_pool3d(self, cpu_only): + pool_types = ("MAX", "AVERAGE") + # Defining shapes as (batch, channel, depth, height, width) + shapes = ((1, 1, 1, 2, 2), (1, 1, 3, 3, 3), (3, 4, 10, 17, 90)) + # Defining kernels and strides as (depth, height, width) + kernels = ((2, 2, 2), (1, 3, 4), (2, 3, 4), (5, 1, 6), (8, 9, 1), (7, 11, 13)) + strides = ((1, 1, 1), (1, 2, 3), (2, 3, 2), (4, 1, 2), (3, 4, 1), (7, 11, 13)) + # Defining paddings as (left, right, top, bottom, front, back) + # This is backwards from how we define shapes, kernels, and strides, + # but it better matches pytorch, making the creation of pytorch layers + # much easier. + paddings = ( + ("CUSTOM", (0, 0, 0, 0, 0, 0)), + ("CUSTOM", (2, 2, 2, 2, 2, 2)), + ("CUSTOM", (5, 6, 3, 4, 2, 2)), + # VALID and SAME padding must have custom paddings unset or set to zero. + ("VALID", (0, 0, 0, 0, 0, 0)), + ("SAME", (0, 0, 0, 0, 0, 0)), + ) + + # Structure to collect failures so + # we can run all tests, even if one fails. + # This should be able to go away when we can parameterize + # our tests: Enable parameterized tests in test_numpy_nn_layers.py + failures = [] + num_successes = 0 + num_skipped = 0 + + for pool_type in pool_types: + for shape in shapes: + for kernel in kernels: + for stride in strides: + for padding in paddings: + for average_pooling_count_excludes_padding in (False, True): + result = self._test_pool3d_single_case( + cpu_only, + pool_type, + shape, + kernel, + stride, + padding, + average_pooling_count_excludes_padding, + ) + if type(result) is str: + failures.append(result) + elif result: + num_successes += 1 + else: + num_skipped += 1 + self.assertEqual( + len(failures), + 0, + "Got %s successes, %s skipped, %s failures: %s" + % (num_successes, num_skipped, len(failures), failures), + ) + + def _test_pool3d_single_case( + self, + cpu_only, + pool_type, + shape, + kernel, + stride, + padding, + average_pooling_count_excludes_padding, + ): + """ + + Args: + cpu_only: + pool_type: + shape: + kernel: + stride: + padding: + average_pooling_count_excludes_padding: + + Returns: True if success, False if skipped, Str if error + + """ + test_case = ( + "Test case:: pool_type: %s, shape: %s, kernel: %s, stride: %s, padding: %s, average_pooling_count_excludes_padding: %s" + % ( + pool_type, + shape, + kernel, + stride, + padding, + average_pooling_count_excludes_padding, + ) + ) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + padding_mode = padding[0] + padding_values = padding[1] + builder.add_pooling3d( + name="pooling3d", + input_name="data", + output_name="output", + pooling_type=pool_type, + kernel_depth=kernel[0], + kernel_height=kernel[1], + kernel_width=kernel[2], + stride_depth=stride[0], + stride_height=stride[1], + stride_width=stride[2], + padding_mode=padding_mode, + custom_padding_front=padding_values[4], + custom_padding_back=padding_values[5], + custom_padding_top=padding_values[2], + custom_padding_bottom=padding_values[3], + custom_padding_left=padding_values[0], + custom_padding_right=padding_values[1], + average_pooling_count_excludes_padding=average_pooling_count_excludes_padding, + ) + + # Expected output + input = np.random.rand(*shape) + torch_input = torch.from_numpy(np.reshape(input, shape)) + + # Padding + if padding_mode == "CUSTOM": + torch_padding = torch.nn.ConstantPad3d(padding_values, 0) + elif padding_mode == "VALID": + torch_padding = torch.nn.ConstantPad3d(0, 0) + elif padding_mode == "SAME": + padding_list = [] + # torch.nn.ConstantPad3d wants (left, right, top, bottom, front, back) + # but our shape, kernel, and stride are (depth, height, width). + total_paddings = aggregated_pad( + pad_type=padding_mode.lower(), + kernel_shape=kernel, + input_shape=shape[2:], + strides=stride, + ) + total_paddings.reverse() + for p in total_paddings: + before = int(math.floor(float(p) / 2.0)) + after = int(math.ceil(float(p) / 2.0)) + padding_list.append(before) + padding_list.append(after) + + torch_padding = torch.nn.ConstantPad3d(tuple(padding_list), 0) + padding_values = padding_list[:] + else: + assert False + + # Validate output shape + for i in range(3): + try: + IOS14SingleLayerTests._validate_pooling_dimension( + shape[i + 2], + kernel[i], + stride[i], + padding_values[6 - i - 2], + padding_values[6 - i - 1], + ) + except ValueError: + return False + + # Pooling type + # Average pooling + if pool_type == "AVERAGE": + # torch.nn.AvgPool3d only accepts a single integer for padding, so we normally + # create a pooling layer first which allows us to fully specify the + # before and after padding in all three dimensions. + # + # However, when we use a padding layer, torch.nn.AvgPool3d doesn't + # know what is padding and what isn't, which means that its + # `count_include_pad` parameter has no effect. + # + # Therefore, we can only test average_pooling_count_excludes_padding=True + # when padding is homogeneous. + is_padding_homogeneous = all(p == padding_values[0] for p in padding_values) + if average_pooling_count_excludes_padding: + if not is_padding_homogeneous: + return False + else: + # padding is homogeneous + torch_model = torch.nn.AvgPool3d( + kernel, + stride=stride, + padding=padding_values[0], + count_include_pad=not average_pooling_count_excludes_padding, + ) + else: + # average_pooling_count_excludes_padding == False + torch_pool = torch.nn.AvgPool3d( + kernel, + stride=stride, + count_include_pad=not average_pooling_count_excludes_padding, + ) + torch_model = torch.nn.Sequential(torch_padding, torch_pool) + # Max pooling + else: + torch_pool = torch.nn.MaxPool3d(kernel, stride=stride) + torch_model = torch.nn.Sequential(torch_padding, torch_pool) + + try: + expected = torch_model(torch_input).numpy() + self._test_model( + builder.spec, {"data": input}, {"output": expected}, useCPUOnly=cpu_only + ) + return True + except AssertionError as e: + print(e) + return "test_case: %s, error: %s" % (test_case, e) + + @staticmethod + def _validate_pooling_dimension( + input_size, kernel_size, stride, start_padding, end_padding + ): + # https://adeshpande3.github.io/A-Beginner%27s-Guide-To-Understanding-Convolutional-Neural-Networks-Part-2/ + output_size = ( + input_size + start_padding + end_padding - kernel_size + ) / stride + 1 + if output_size < 1: + raise ValueError( + "Dimension with input_size: %s, kernel_size: %s, stride: %s, start_padding: %s, end_padding: %s " + "has output size of %s, but must be >= 1" + % ( + input_size, + kernel_size, + stride, + start_padding, + end_padding, + output_size, + ) + ) + if input_size < kernel_size: + raise ValueError( + "Dimension has input_size (%s) less than kernel_size (%s)" + % (input_size, kernel_size) + ) + if (start_padding + end_padding) / 2 >= kernel_size / 2: + raise ValueError( + "The average of the start (%s) and end (%s) padding must be less than half the kernel size (%s / 2 = %s)" + % (start_padding, end_padding, kernel_size, kernel_size / 2) + ) + + def test_pool3d_cpu(self): + self._test_pool3d(cpu_only=True) + + def test_pool3d_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self._test_pool3d(cpu_only=False) + + def _test_global_pool3d(self, cpu_only): + shapes = ((1, 1, 1, 2, 2), (1, 1, 3, 3, 3), (3, 4, 10, 17, 90)) + pool_types = ("MAX", "AVERAGE") + + for shape in shapes: + for pool_type in pool_types: + test_case = "test_case:: shape: %s, pool_type: %s" % (shape, pool_type) + print(test_case) + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_global_pooling3d( + name="pooling3d", + input_name="data", + output_name="output", + pooling_type=pool_type, + ) + input = np.random.rand(*shape) + + # Expected output from Torch + torch_input = torch.from_numpy(np.reshape(input, shape)) + if pool_type == "AVERAGE": + torch_pool = torch.nn.AvgPool3d(shape[-3:]) + else: + torch_pool = torch.nn.MaxPool3d(shape[-3:]) + exptected = torch_pool(torch_input).numpy() + + self._test_model( + builder.spec, + {"data": input}, + {"output": exptected}, + useCPUOnly=cpu_only, + ) + + def test_global_pool3d_cpu(self): + self._test_global_pool3d(cpu_only=True) + + def test_global_pool3d_gpu(self): + self._test_global_pool3d(cpu_only=False) + + def test_argsort_cpu(self, cpu_only=True): + + shapes = [(4,), (3, 4), (2, 5, 6), (3, 5, 2, 4), (4, 5, 3, 6, 7)] + + for shape in shapes: + for descending in [False, True]: + for axis in range(len(shape)): + + input_features = [("data", datatypes.Array(*shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + builder.add_argsort( + "argsort", "data", "output", axis=axis, descending=descending + ) + + x = np.random.rand(*shape) + if descending: + expected = {"output": np.argsort(-x, axis)} + else: + expected = {"output": np.argsort(x, axis)} + + input = {"data": x} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_argsort_gpu(self): + self.test_argsort_cpu(cpu_only=False) + + def test_upsample_pytorch_cpu(self): + self.upsample_pytorch_test_iter(np.arange(1, 4), True) + self.upsample_pytorch_test_iter(np.arange(1.0, 3.0, 0.66), True) + + def test_upsample_pytorch_gpu(self): + if platform.machine() == "arm64": + pytest.xfail("rdar://98010495 (Some old nnv1 test are failing on M1 machine when running on ANE)") + self.upsample_pytorch_test_iter(np.arange(1, 4), False) + self.upsample_pytorch_test_iter(np.arange(1.0, 3.0, 0.66), False) + + def upsample_pytorch_test_iter(self, scale_range, cpu_only): + for align_corners in [False, True]: + for scale_h in scale_range: + for scale_w in scale_range: + for input_h in range(2, 6): + for input_w in range(2, 6): + self.upsample_pytorch_test( + input_h, + input_w, + scale_h, + scale_w, + align_corners, + cpu_only, + ) + + def upsample_pytorch_test(self, h, w, scale_h, scale_w, align_corners, cpu_only): + input_dim = (1, 1, h, w) + if align_corners: + linear_upsample_mode = "ALIGN_CORNERS_TRUE" + else: + linear_upsample_mode = "ALIGN_CORNERS_FALSE" + + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_upsample( + name="upsample", + scaling_factor_h=scale_h, + scaling_factor_w=scale_w, + linear_upsample_mode=linear_upsample_mode, + input_name="data", + output_name="output", + mode="BILINEAR", + ) + + input_tensor = np.reshape(np.arange(1.0, 1.0 + (h * w), 1.0), input_dim) + input = {"data": input_tensor} + + # Get result from PyTorch + x = torch.from_numpy(np.reshape(input_tensor, (1, 1, h, w))) + pytorch_output = torch.nn.functional.interpolate( + x, + scale_factor=(scale_h, scale_w), + mode="bilinear", + align_corners=align_corners, + recompute_scale_factor=True, + ) + + # Expect PyTorch output matches CoreML output + expected = {"output": pytorch_output.numpy()} + + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + self.assertEqual(len(input_dim), builder._get_rank("output")) + + def test_slice_by_size_cpu(self, cpu_only=True): + + shapes = [(4,), (3, 4), (2, 5, 6), (3, 5, 2, 4), (4, 5, 3, 6, 7)] + + for shape in shapes: + for axis in range(len(shape)): + begin = np.random.randint(shape[axis]) + begin_input = np.array([begin]).astype(np.float32) + size = np.random.randint(shape[axis] - begin) + 1 + + x = np.random.rand(*shape) + slices = [] + for i in range(len(shape)): + if i != axis: + slices.append(slice(None, None, None)) + else: + slices.append(slice(begin, begin + size, 1)) + slices = tuple(slices) + expected = {"output": x[slices]} + + input_features = [ + ("data", datatypes.Array(*shape)), + ("begin", datatypes.Array(1)), + ] + output_features = [("output", datatypes.Array(*x[slices].shape))] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_slice_by_size( + "slice_by_size", ["data", "begin"], "output", axis=axis, size=size + ) + + input = {"data": x, "begin": begin_input} + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def _test_conv3d(self, cpu_only, full_test): + # Input shape defined by us and PyTorch as [batch, channels, depth, height, width] + input_shapes = [ + [1, 3, 3, 8, 8], + [1, 1, 3, 8, 8], + [1, 7, 8, 15, 63], + [4, 32, 8, 16, 16], + ] + # Large enough kernels and/or input causes int overflow and seg fault: see rdar://60309763 + kernels = [[3, 3, 3], [2, 2, 2]] + strides = [[1, 1, 1], [2, 2, 2]] + dilations = [[1, 1, 1], [2, 2, 2]] + has_biases = [True, False] + # Note: PyTorch's `torch.nn.Conv3d` doesn't support these padding modes, just a single + # padding value (for all dimensions) or 3 values (for each dimension) + padding_modes = ["custom", "valid", "same"] + # Padding shape is front, back, top, bottom, left, right + paddings = [[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]] + + # Add some additional test cases if `full_test` is True + if full_test: + input_shapes.extend([[1, 4, 3, 128, 128]]) + kernels.extend([[1, 2, 3], [5, 5, 5]]) + strides.extend([[1, 2, 3]]) + dilations.extend([[1, 2, 3]]) + paddings.extend([[2, 0, 2, 0, 2, 0], [0, 1, 2, 3, 4, 5]]) + + test_case_format_str = ( + "Conv3d test case | Input shape: {}, Output channels: {}, Groups: {}, Kernel shape: {}," + " Stride: {}, Padding: {}, Padding mode: {}, Dilation: {}, Has bias: {}" + ) + + for in_shape in input_shapes: + # Test "normal" and depthwise convolution with corresponding groups and output channels + groups_outchannels = [(1, 2), (in_shape[1], 2 * in_shape[1])] + for kernel in kernels: + for has_bias in has_biases: + for stride in strides: + for dilation in dilations: + for padding_mode in padding_modes: + # For all modes besides 'custom', the padding values are ignored + if padding_mode == "custom": + loop_paddings = paddings + else: + loop_paddings = [[0, 0, 0, 0, 0, 0]] + for padding in loop_paddings: + for groups, output_channels in groups_outchannels: + # Dilated kernel shape = (K - 1) * D + 1 + dilated_kernel = list( + map( + lambda k, d: (k - 1) * d + 1, + kernel, + dilation, + ) + ) + + # Use paddings if padding_mode is "custom", else compute + # them according to + # https://stanford.edu/~shervine/teaching/cs-230/cheatsheet-convolutional-neural-networks#filter + if padding_mode == "same": + pad_d = max( + 0, + ( + stride[0] + * math.ceil( + in_shape[2] / float(stride[0]) + ) + - in_shape[2] + + dilated_kernel[0] + - stride[0] + ) + / 2.0, + ) + pad_h = max( + 0, + ( + stride[1] + * math.ceil( + in_shape[3] / float(stride[1]) + ) + - in_shape[3] + + dilated_kernel[1] + - stride[1] + ) + / 2.0, + ) + pad_w = max( + 0, + ( + stride[2] + * math.ceil( + in_shape[4] / float(stride[2]) + ) + - in_shape[4] + + dilated_kernel[2] + - stride[2] + ) + / 2.0, + ) + + # Depth + padding[0] = int(math.floor(pad_d)) + padding[1] = int(math.ceil(pad_d)) + # Height + padding[2] = int(math.floor(pad_h)) + padding[3] = int(math.ceil(pad_h)) + # Width + padding[4] = int(math.floor(pad_w)) + padding[5] = int(math.ceil(pad_w)) + elif padding_mode == "valid": + # Set to zero for PyTorch padding + padding = [0] * 6 + elif padding_mode == "custom": + # No-op: valid ignores padding and custom uses the + # specified padding + pass + + input_features = [ + ("data", datatypes.Array(*in_shape)) + ] + output_features = [("output", None)] + input_channels = in_shape[1] + # [output_channels, kernel_channels, depth, height, width] + weights_shape = [ + output_channels, + int(input_channels / groups), + kernel[0], + kernel[1], + kernel[2], + ] + + # Init random input + input_tensor = np.random.normal(size=in_shape) + input_torch = torch.tensor(input_tensor) + # Init random weights + weights_tensor = np.random.normal( + size=weights_shape + ) + weights_torch = torch.DoubleTensor( + weights_tensor + ) + # Init random bias if applicable + if has_bias: + bias_tensor = np.random.normal( + size=output_channels + ) + bias_torch = torch.DoubleTensor(bias_tensor) + else: + bias_tensor = None + bias_torch = None + + builder = neural_network.NeuralNetworkBuilder( + input_features, + output_features, + disable_rank5_shape_mapping=True, + ) + builder.add_convolution3d( + name="conv3d", + input_channels=input_channels, + output_channels=output_channels, + depth=kernel[0], + height=kernel[1], + width=kernel[2], + W=weights_tensor, + b=bias_tensor, + has_bias=has_bias, + groups=groups, + stride_depth=stride[0], + stride_height=stride[1], + stride_width=stride[2], + dilation_depth=dilation[0], + dilation_height=dilation[1], + dilation_width=dilation[2], + padding_mode=padding_mode, + padding_front=padding[0], + padding_back=padding[1], + padding_top=padding[2], + padding_bottom=padding[3], + padding_left=padding[4], + padding_right=padding[5], + input_name="data", + output_name="output", + ) + + # Get PyTorch output to compare ours to + # First pad, since PyTorch Conv3d only supports custom and + # same symmetric padding. Padding shape is + # (left, right, top, bottom, front, back) + padded_input = input_torch + if any(p > 0 for p in padding): + torch_padding = ( + padding[4], + padding[5], + padding[2], + padding[3], + padding[0], + padding[1], + ) + pad_layer = torch.nn.ConstantPad3d( + torch_padding, 0 + ) + padded_input = pad_layer(input_torch) + # Check if dilated kernel size exceeds padded input size in + # any dimension. If it does, it's not a valid convolution + if ( + dilated_kernel[0] > padded_input.shape[2] + or dilated_kernel[1] > padded_input.shape[3] + or dilated_kernel[2] > padded_input.shape[4] + ): + print( + "SKIPPING: Dilated kernel exceeds padded input." + ) + continue + # Using Sequential with a padding layer first produces + # incorrect convolution output + model = torch.nn.Sequential( + torch.nn.Conv3d( + input_channels, + output_channels, + kernel, + stride=stride, + padding=0, + dilation=dilation, + groups=groups, + bias=False, + ) + ) + with torch.no_grad(): + model[0].weight = torch.nn.Parameter( + weights_torch + ) + if has_bias: + model[0].bias = torch.nn.Parameter( + bias_torch + ) + torch_expected = model(padded_input) + + test_case = test_case_format_str.format( + in_shape, + output_channels, + groups, + weights_shape, + stride, + padding, + padding_mode, + dilation, + has_bias, + ) + try: + self._test_model( + builder.spec, + {"data": input_tensor}, + { + "output": torch_expected.detach().numpy() + }, + useCPUOnly=cpu_only, + test_metric="SNR", + SNR=40, + validate_shapes_only=False, + ) + except AssertionError as e: + print(test_case) + raise + + def test_conv3d_cpu_basic(self): + self._test_conv3d(cpu_only=True, full_test=False) + + @pytest.mark.slow + def test_conv3d_cpu_slow(self): + self._test_conv3d(cpu_only=True, full_test=True) + + def test_conv3d_gpu_basic(self): + self._test_conv3d(cpu_only=False, full_test=False) + + @pytest.mark.slow + def test_conv3d_gpu_slow(self): + self._test_conv3d(cpu_only=False, full_test=True) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= LAYERS_11_0_MACOS_VERSION, + "Only supported on macOS 10.16+", +) +class TestReorganizeDataTests(CorrectnessTest): + def _to_rank_4(self, x): + from_rank = len(x.shape) + if from_rank == 3: + return np.reshape(x, [1] + list(x.shape)) + elif from_rank == 4: + return x + elif from_rank == 5: + return np.squeeze(x, axis=0) + + def _from_rank_4(self, x, to_rank): + if to_rank == 3: + return np.squeeze(x, axis=0) + elif to_rank == 4: + return x + elif to_rank == 5: + return np.reshape(x, [1] + list(x.shape)) + + @unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) + def test_depth_to_space_cpu(self, cpu_only=True): + + params_dict = { + "block_size": [2, 3, 4], + "channels_div_bsq": [1, 2, 3, 7], + "spatial": [[2, 3], [4, 4], [1, 1]], + "batch_size": [None, 1, 2], + "seq_length": [None, 1], + } + params_product = list(itertools.product(*params_dict.values())) + for param in params_product: + param = dict(zip(params_dict.keys(), param)) + # Create input based on params + block_size = param["block_size"] + bsq = block_size * block_size + input_shape = [bsq * param["channels_div_bsq"]] + param["spatial"] + if param["batch_size"] is not None: + input_shape = [param["batch_size"]] + input_shape + if param["seq_length"] is not None: + input_shape = [param["seq_length"]] + input_shape + rank = len(input_shape) + x = np.random.random(input_shape) + input = {"data": x} + + # Set up network + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_reorganize_data( + "reorganize_data", + "data", + "output", + mode="DEPTH_TO_SPACE", + block_size=block_size, + ) + + # Run tensorflow to calculate expected values + # TensorFlow requires rank 4, NHWC order on CPU + x_tf = self._to_rank_4(x).transpose(0, 2, 3, 1) + out_tf = tf.nn.depth_to_space(x_tf, block_size, data_format="NHWC").numpy() + out = self._from_rank_4(out_tf.transpose(0, 3, 1, 2), to_rank=rank) + expected = {"output": out} + + # Run model to calculate CoreML values and compare with expected + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + def test_depth_to_space_gpu(self): + self.test_depth_to_space_cpu(cpu_only=False) + + @unittest.skipIf( + _macos_version() < LAYERS_11_0_MACOS_VERSION, + "macOS 11.0+ required. Skipping tests.", + ) + def test_pixel_shuffle_cpu(self, cpu_only=True): + + params_dict = { + "block_size": [2, 3, 4], + "channels_div_bsq": [1, 2, 3, 7], + "spatial": [[2, 3], [4, 4], [1, 1]], + "batch_size": [None, 1, 2], + "seq_length": [None, 1], + } + params_product = list(itertools.product(*params_dict.values())) + for param in params_product: + param = dict(zip(params_dict.keys(), param)) + # Create input based on params + block_size = param["block_size"] + bsq = block_size * block_size + input_shape = [bsq * param["channels_div_bsq"]] + param["spatial"] + if param["batch_size"] is not None: + input_shape = [param["batch_size"]] + input_shape + if param["seq_length"] is not None: + input_shape = [param["seq_length"]] + input_shape + rank = len(input_shape) + x = np.random.random(input_shape) + input = {"data": x} + + # Set up network + input_features = [("data", datatypes.Array(*input_shape))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + builder.add_reorganize_data( + "reorganize_data", + "data", + "output", + mode="PIXEL_SHUFFLE", + block_size=block_size, + ) + + # Run pytorch to calculate expected values + x_torch = torch.from_numpy(self._to_rank_4(x)) + out_torch = torch.pixel_shuffle(x_torch, upscale_factor=block_size) + out = self._from_rank_4(out_torch.numpy(), to_rank=rank) + expected = {"output": out} + + # Run model to calculate CoreML values and compare with expected + self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only) + + @unittest.skipIf( + _macos_version() < LAYERS_11_0_MACOS_VERSION, + "macOS 10.16+ required. Skipping tests.", + ) + def test_pixel_shuffle_gpu(self): + self.test_pixel_shuffle_cpu(cpu_only=False) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_quantization.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_quantization.py new file mode 100644 index 00000000..ff589914 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_quantization.py @@ -0,0 +1,562 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +""" +Module containing unit tests for verifying various quantizations. +""" + +import unittest + +import numpy as np +import pytest + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools.models import (_QUANTIZATION_MODE_LINEAR_QUANTIZATION, + neural_network) +from coremltools.models.neural_network import quantization_utils +from coremltools.models.neural_network.quantization_utils import ( + MatrixMultiplyLayerSelector, _quantize_spec_weights, + activate_int8_int8_matrix_multiplications) + + +@unittest.skipIf( + not coremltools.utils._is_macos() or coremltools.utils._macos_version() < (10, 16), + "Missing macOS 10.16+. Skipping tests.", +) +class DynamicQuantizedInt8Int8MatMul(unittest.TestCase): + """ + Quantization tests for dynamic Int8 - Int8 matrix multiplications + """ + + def initialize(self): + np.random.seed(1988) + self.Cout, self.Cin = 16, 32 + self.W = np.random.rand(self.Cout, self.Cin) * 20.0 - 10.0 + self.b = np.random.rand(self.Cout) * 20.0 - 10.0 + self.input_shape = (5, self.Cin) + input_features = [("data", datatypes.Array(*self.input_shape))] + output_features = [("output", None)] + self.builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + self.selector = MatrixMultiplyLayerSelector() + + def _test_predictions( + self, np_preds, coreml_preds, SNR=30, PSNR=40, + ): + + np_preds = np_preds.flatten() + coreml_preds = coreml_preds.flatten() + + noise = np_preds - coreml_preds + noise_var = np.sum(noise ** 2) / len(noise) + 1e-7 + signal_energy = np.sum(np_preds ** 2) / len(np_preds) + max_signal_energy = np.amax(np_preds ** 2) + snr = 10 * np.log10(signal_energy / noise_var) + psnr = 10 * np.log10(max_signal_energy / noise_var) + self.assertGreaterEqual(snr, SNR) + self.assertGreaterEqual(psnr, PSNR) + + def compare(self, specification_modified=True): + x = np.random.rand(*self.input_shape) + + def _get_preds(spec): + mlmodel = coremltools.models.MLModel(spec, compute_units=ComputeUnit.CPU_ONLY) + return mlmodel.predict({"data": x})["output"] + + preds = _get_preds(self.builder.spec) + self.assertEqual(self.builder.spec.specificationVersion, 4) + + quantized_spec = activate_int8_int8_matrix_multiplications( + self.builder.spec, self.selector + ) + + layer = self.builder.spec.neuralNetwork.layers[0] + layer_type = layer.WhichOneof("layer") + if layer_type == "innerProduct": + matmul_layer = layer.innerProduct + + elif layer_type == "batchedMatmul": + matmul_layer = layer.batchedMatmul + wp = matmul_layer.weights + + if specification_modified: + self.assertEqual(self.builder.spec.specificationVersion, 5) + quant_preds = _get_preds(quantized_spec) + self._test_predictions(preds, quant_preds, SNR=40) + self.assertEqual(len(wp.floatValue), 0) + else: + self.assertEqual(self.builder.spec.specificationVersion, 4) + quant_preds = _get_preds(quantized_spec) + np.testing.assert_array_almost_equal(preds, quant_preds) + self.assertGreater(len(wp.floatValue), 0) + + def test_single_batched_matmul_no_bias(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.compare() + + def test_single_batched_matmul_with_bias(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + bias=self.b, + ) + self.compare() + + def test_single_inner_product_no_bias(self): + + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=None, + has_bias=False, + ) + self.compare() + + def test_single_inner_product_with_bias(self): + + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.compare() + + def test_inner_product_min_input_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_input_channels = 31 + self.compare() + + def test_batched_matmul_min_input_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_input_channels = 32 + self.compare() + + def test_inner_product_min_input_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_input_channels = 33 + self.compare(specification_modified=False) + + def test_batched_matmul_min_input_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_input_channels = 33 + self.compare(specification_modified=False) + + def test_batched_matmul_max_input_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_input_channels = 32 + self.compare() + + def test_inner_product_max_input_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_input_channels = 33 + self.compare() + + def test_batched_matmul_max_input_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_input_channels = 31 + self.compare(specification_modified=False) + + def test_inner_product_max_input_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_input_channels = 30 + self.compare(specification_modified=False) + + def test_inner_product_min_output_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_output_channels = 16 + self.compare() + + def test_batched_matmul_min_output_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_output_channels = 16 + self.compare() + + def test_inner_product_min_output_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_output_channels = 17 + self.compare(specification_modified=False) + + def test_batched_matmul_min_output_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_output_channels = 17 + self.compare(specification_modified=False) + + def test_batched_matmul_max_output_channels_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_output_channels = 17 + self.compare() + + def test_inner_product_max_output_channels_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_output_channels = 16 + self.compare() + + def test_batched_matmul_max_output_channels_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.maximum_output_channels = 14 + self.compare(specification_modified=False) + + def test_inner_product_max_output_channels_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.maximum_output_channels = 15 + self.compare(specification_modified=False) + + def test_inner_product_min_weight_count_valid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.minimum_weight_count = 512 + self.compare() + + def test_batched_matmul_min_weight_count_invalid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.minimum_weight_count = 513 + self.compare(specification_modified=False) + + def test_inner_product_layer_names_invalid(self): + self.initialize() + self.builder.add_inner_product( + name="ip", + input_name="data", + output_name="output", + input_channels=self.Cin, + output_channels=self.Cout, + W=self.W, + b=self.b, + has_bias=True, + ) + self.selector.include_layers_with_names = ["ip1", "ip2"] + self.compare(specification_modified=False) + + def test_batched_matmul_layer_names_valid(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + self.selector.include_layers_with_names = ["bm1", "batched_matmul"] + self.compare() + + def test_batched_matmul_8bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 8, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + def test_batched_matmul_4bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 4, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + def test_batched_matmul_2bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 2, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + def test_batched_matmul_1bit_weight_quantized(self): + + self.initialize() + self.builder.add_batched_mat_mul( + name="batched_matmul", + input_names=["data"], + output_name="output", + weight_matrix_rows=self.Cin, + weight_matrix_columns=self.Cout, + W=self.W, + ) + _quantize_spec_weights( + self.builder.spec, 1, _QUANTIZATION_MODE_LINEAR_QUANTIZATION + ) + self.compare() + + +class TestQuantizeWeightsAPI: + @staticmethod + @pytest.mark.parametrize( + "compute_units", [ComputeUnit.ALL, ComputeUnit.CPU_AND_GPU, ComputeUnit.CPU_ONLY] + ) + def test_embeddingND_quantize(compute_units): + input_features = [("data", datatypes.Array(10, 1))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features, disable_rank5_shape_mapping=True + ) + + builder.add_embedding_nd( + name="embedding_nd", + input_name="data", + output_name="output", + vocab_size=300, + embedding_size=20, + W=np.random.rand(20, 300), + ) + + spec = builder.spec + model_fp32 = coremltools.models.MLModel(spec, compute_units=compute_units) + assert len(spec.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 6000 + + # quantize to FP16 + model_fp16 = quantization_utils.quantize_weights(model_fp32, nbits=16) + assert model_fp16.compute_unit == compute_units + spec_fp16 = model_fp16.get_spec() + assert len(spec_fp16.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 0 + assert len(spec_fp16.neuralNetwork.layers[0].embeddingND.weights.float16Value) == 2 * 6000 + + # quantize to uint8 + model_uint8 = quantization_utils.quantize_weights(model_fp32, nbits=8) + assert model_uint8.compute_unit == compute_units + spec_uint8 = model_uint8.get_spec() + assert len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 0 + assert len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.float16Value) == 0 + assert len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.rawValue) == 6000 + + # quantize to uint5 + model_uint5 = quantization_utils.quantize_weights(model_fp32, nbits=5) + assert model_uint5.compute_unit == compute_units + spec_uint5 = model_uint5.get_spec() + assert len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.floatValue) == 0 + assert len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.float16Value) == 0 + assert len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.rawValue) == 3750 # 3750 = 5*6000/8 + + @unittest.skipIf(coremltools.utils._macos_version() < (13, 0), + 'ComputeUnit.CPU_AND_NE is only available on macOS >= 13.0' + ) + def test_embeddingND_quantize_CPU_and_NE(self): + self.test_embeddingND_quantize(ComputeUnit.CPU_AND_NE) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_simple_nn_inference.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_simple_nn_inference.py new file mode 100644 index 00000000..49663d48 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_simple_nn_inference.py @@ -0,0 +1,53 @@ +# Copyright (c) 2021, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os + +import numpy as np + +import coremltools +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit, utils +from coremltools.models import neural_network as neural_network + + +class TestNeuralNetworkPrediction: + + @staticmethod + def test_lrn_model(tmpdir): + + input_dim = (1, 3, 3) + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", datatypes.Array(*input_dim))] + + builder = neural_network.NeuralNetworkBuilder(input_features, output_features) + builder.add_lrn( + name="lrn", + input_name="data", + output_name="output", + alpha=2, + beta=3, + local_size=1, + k=8, + ) + + input = {"data": np.ones((1, 3, 3))} + expected = 1e-3 * np.ones((1, 3, 3)) + model_path = os.path.join(str(tmpdir), "lrn_model.mlmodel") + coremltools.models.utils.save_spec(builder.spec, model_path) + + try: + model = coremltools.models.MLModel(model_path, compute_units=ComputeUnit.CPU_ONLY) + if utils._macos_version() >= (10, 13): + out = model.predict(input) + except RuntimeError as e: + print(e) + assert str(e) == "Error compiling model: \"The file couldn’t be saved.\"." + else: + if utils._macos_version() >= (10, 13): + assert out['output'].shape == (1, 3, 3) + np.testing.assert_allclose(expected, out['output']) + print("Core ML output", out) + diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_tf_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_tf_numeric.py new file mode 100644 index 00000000..8c0ebbfa --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/neural_network/test_tf_numeric.py @@ -0,0 +1,508 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import numpy as np + +import coremltools.models.datatypes as datatypes +from coremltools import ComputeUnit +from coremltools._deps import _HAS_TF_2, MSG_TF2_NOT_FOUND +from coremltools.models import MLModel, neural_network +from coremltools.models.utils import _is_macos, _macos_version + +if _HAS_TF_2: + import tensorflow as tf + + +np.random.seed(10) +np.set_printoptions(precision=4, suppress=True) + + +@unittest.skipIf(not _HAS_TF_2, MSG_TF2_NOT_FOUND) +class CorrectnessTest(unittest.TestCase): + def _compare_shapes(self, ref_preds, coreml_preds): + if np.squeeze(ref_preds).shape != np.squeeze(coreml_preds).shape: + return False + else: + return True + + def _compare_predictions_numerical( + self, ref_preds, coreml_preds, snr_thresh=15, psnr_thresh=30 + ): + ref_preds = ref_preds.flatten() + coreml_preds = coreml_preds.flatten() + noise = coreml_preds - ref_preds + noise_var = np.mean(noise ** 2) + signal_energy = np.mean(ref_preds ** 2) + max_signal_energy = np.amax(ref_preds ** 2) + + if noise_var > 1e-6 and signal_energy > 1e-6: + SNR = 10 * np.log10(signal_energy / noise_var) + PSNR = 10 * np.log10(max_signal_energy / noise_var) + + print("SNR: {}, PSNR: {}".format(SNR, PSNR)) + print("noise var: ", np.mean(noise ** 2)) + print("max signal energy: ", np.amax(ref_preds ** 2)) + print("signal energy: ", np.mean(ref_preds ** 2)) + + self.assertGreaterEqual(PSNR, psnr_thresh) + self.assertGreaterEqual(SNR, snr_thresh) + + def _test_model( + self, + input_dict, + ref_output_dict, + coreml_model, + snr_thresh=15, + psnr_thresh=30, + ): + coreml_out_dict = coreml_model.predict(input_dict) + for out_ in list(ref_output_dict.keys()): + ref_out = ref_output_dict[out_].flatten() + coreml_out = coreml_out_dict[out_].flatten() + self.assertEqual(len(coreml_out), len(ref_out)) + self._compare_predictions_numerical( + ref_out, coreml_out, snr_thresh=snr_thresh, psnr_thresh=psnr_thresh + ) + + +@unittest.skipUnless(_is_macos(), "Only supported for MacOS platform.") +class StressTest(CorrectnessTest): + def test_data_reorganize(self, cpu_only=False): + def get_coreml_model_reorganize(X, params): + eval = True + mlmodel = None + try: + input_dim = X.shape[2:] + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_reorganize_data( + "reorg", + "data", + "output", + mode=params["mode"], + block_size=params["block_size"], + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + + return mlmodel, eval + + def get_tf_predictions_reorganize(X, params): + if params["mode"] == "SPACE_TO_DEPTH": + y = tf.nn.space_to_depth(X, params["block_size"]) + else: + y = tf.nn.depth_to_space(X, params["block_size"]) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + C=[1, 2, 8, 16, 15, 27], + H=[2, 4, 6, 8, 10, 15, 21, 16], + W=[2, 4, 6, 8, 10, 15, 21, 16], + block_size=[2, 3, 4, 5], + mode=["SPACE_TO_DEPTH", "DEPTH_TO_SPACE"], + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + if pr["mode"] == "SPACE_TO_DEPTH": + if pr["H"] % pr["block_size"] == 0 and pr["W"] % pr["block_size"] == 0: + valid_params.append(pr) + else: + if pr["C"] % (pr["block_size"] ** 2) == 0: + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of canditates: ", + len(all_candidates), + ) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + # print("=========: ", params) + # if i % 10 == 0: print("======== Testing {}/{}".format(str(i), str(len(valid_params)))) + X = np.random.rand(1, params["C"], params["H"], params["W"]) + tf_preds = get_tf_predictions_reorganize( + np.transpose(X, [0, 2, 3, 1]), params + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_reorganize( + np.expand_dims(X, axis=0), params + ) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + ref_output_dict = {"output": tf_preds[0, :, :, :]} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + def test_data_reorganize_cpu_only(self): + self.test_data_reorganize(cpu_only=True) + + def test_depthwise_conv(self, cpu_only=False): + def get_coreml_model_depthwise(X, params, w): + eval = True + mlmodel = None + try: + input_dim = X.shape[2:] + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + # tranlate weights : (Kh, Kw, kernel_channels, output_channels) == (Kh, Kw, Cin/g, Cout) == (Kh, Kw, 1, channel_multiplier * Cin) + w_e = np.reshape( + w, + ( + params["kernel_size"], + params["kernel_size"], + params["multiplier"] * params["C"], + 1, + ), + ) + w_e = np.transpose(w_e, [0, 1, 3, 2]) + if params["padding"] == "SAME": + pad_mode = "same" + else: + pad_mode = "valid" + builder.add_convolution( + "conv", + kernel_channels=1, + output_channels=params["multiplier"] * params["C"], + height=params["kernel_size"], + width=params["kernel_size"], + stride_height=params["stride"], + stride_width=params["stride"], + border_mode=pad_mode, + groups=params["C"], + W=w_e, + b=None, + has_bias=0, + is_deconv=0, + output_shape=None, + input_name="data", + output_name="output", + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + return mlmodel, eval + + def get_tf_predictions_depthwise(X, params, w): + Cin = params["C"] + Kh = Kw = params["kernel_size"] + channel_multiplier = params["multiplier"] + y = tf.nn.depthwise_conv2d( + X, + w, + strides=[1, params["stride"], params["stride"], 1], + padding=params["padding"], + ) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + C=[1, 4, 7], + H=[11, 16], + stride=[1, 2, 3], + kernel_size=[1, 2, 3, 5], + multiplier=[1, 2, 3, 4], + padding=["SAME", "VALID"], + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + all_candidates = [dict(zip(params_dict.keys(), x)) for x in params] + valid_params = [] + for pr in all_candidates: + if pr["padding"] == "VALID": + if np.floor((pr["H"] - pr["kernel_size"]) / pr["stride"]) + 1 <= 0: + continue + valid_params.append(pr) + print( + "Total params to be tested: ", + len(valid_params), + "out of canditates: ", + len(all_candidates), + ) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + # print("=========: ", params) + # if i % 10 == 0: print "======== Testing {}/{}".format(str(i), str(len(valid_params))) + X = np.random.rand(1, params["C"], params["H"], params["H"]) + w = np.random.rand( + params["kernel_size"], + params["kernel_size"], + params["C"], + params["multiplier"], + ) + tf_preds = get_tf_predictions_depthwise( + np.transpose(X, [0, 2, 3, 1]), params, w + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_depthwise( + np.expand_dims(X, axis=0), params, w + ) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + ref_output_dict = {"output": tf_preds[0, :, :, :]} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + def test_depthwise_conv_cpu_only(self, cpu_only=False): + self.test_depthwise_conv(cpu_only=True) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_resize_bilinear(self, cpu_only=False): + def get_coreml_model_resize_bilinear(X, params): + eval = True + mlmodel = None + try: + input_dim = X.shape[2:] + input_features = [("data", datatypes.Array(*input_dim))] + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + if params["align_corners"]: + mode = "STRICT_ALIGN_ENDPOINTS_MODE" + else: + mode = "UPSAMPLE_MODE" + builder.add_resize_bilinear( + "resize", + "data", + "output", + target_height=params["Hnew"], + target_width=params["Wnew"], + mode=mode, + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + + return mlmodel, eval + + def get_tf_predictions_resize_bilinear(X, params): + y = tf.compat.v1.image.resize_bilinear( + X, + size=[params["Hnew"], params["Wnew"]], + align_corners=params["align_corners"], + ) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + H=[1, 3, 10], # [1,2,3,10] + W=[1, 3, 10], # [1,2,3,10] + Hnew=[1, 2, 6], # [1,3,6,12,20] + Wnew=[1, 2, 6], # [1,3,6,12,20] + align_corners=[False, True], # [False, True] + ch=[1, 5], # [1,5] + batch=[1, 3], # [1, 3] + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + valid_params = [dict(zip(params_dict.keys(), x)) for x in params] + print("Total params to be tested: {}".format(len(valid_params))) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + # #print("=========: ", params) + if i % 100 == 0: + print( + "======================= Testing {}/{}".format( + str(i), str(len(valid_params)) + ) + ) + X = np.round( + 255 + * np.random.rand( + params["batch"], params["ch"], params["H"], params["W"] + ) + ) + tf_preds = get_tf_predictions_resize_bilinear( + np.transpose(X, [0, 2, 3, 1]), params + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_resize_bilinear( + np.expand_dims(X, axis=0), params + ) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + ref_output_dict = {"output": np.expand_dims(tf_preds, axis=0)} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_resize_bilinear_cpu_only(self): + self.test_resize_bilinear(cpu_only=True) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_crop_resize(self, cpu_only=False): + def get_coreml_model_crop_resize(params): + eval = True + mlmodel = None + batch, ch, n_roi = params["b_c_n"] + H = params["H"] + W = params["W"] + try: + input_features = [("data", datatypes.Array(ch, H, W))] + input_features.append(("roi", datatypes.Array(4, 1, 1))) + if batch != 1: + input_features.append(("box_ind", datatypes.Array(1, 1, 1))) + output_features = [("output", None)] + builder = neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + + if batch != 1: + builder.add_elementwise( + "concat", ["box_ind", "roi"], "roi_out", "CONCAT" + ) + input_names = ["data", "roi_out"] + else: + input_names = ["data", "roi"] + + builder.add_crop_resize( + "resize", + input_names, + "output", + target_height=params["Hnew"], + target_width=params["Wnew"], + mode="ALIGN_ENDPOINTS_MODE", + normalized_roi=True, + box_indices_mode="CORNERS_HEIGHT_FIRST", + spatial_scale=1.0, + ) + + if cpu_only: + compute_unit=ComputeUnit.CPU_ONLY + else: + compute_unit=ComputeUnit.ALL + mlmodel = MLModel(builder.spec, compute_units=compute_unit) + except RuntimeError as e: + print(e) + eval = False + + return mlmodel, eval + + def get_tf_predictions_crop_resize(X, boxes, box_ind, params): + y = tf.image.crop_and_resize( + X, boxes, box_ind, crop_size=[params["Hnew"], params["Wnew"]] + ) + return y.numpy() + + """ + Define Params + """ + params_dict = dict( + H=[1, 3, 10], # [1,2,3,6,10] + W=[1, 3, 10], # [1,2,3,6,10] + Hnew=[1, 2, 3, 6], # [1,2,3,6,12,20] + Wnew=[1, 2, 3, 6], # [1,2,3,6,12,20] + b_c_n=[ + (1, 1, 1), + (1, 2, 3), + (3, 2, 1), + (3, 4, 3), + ], # [(1,1,1),(1,2,3),(3,2,1),(3,4,3)] + ) + params = [x for x in list(itertools.product(*params_dict.values()))] + valid_params = [dict(zip(params_dict.keys(), x)) for x in params] + print("Total params to be tested: {}".format(len(valid_params))) + """ + Test + """ + failed_tests_compile = [] + for i in range(len(valid_params)): + params = valid_params[i] + batch, ch, n_roi = params["b_c_n"] + X = np.round(255 * np.random.rand(batch, ch, params["H"], params["W"])) + roi = np.zeros((n_roi, 4), dtype=np.float32) + box_ind = np.zeros((n_roi)) + if batch != 1: + box_ind = np.random.randint(low=0, high=batch, size=(n_roi)) + for ii in range(n_roi): + r = np.random.rand(4) + w_start = r[0] + h_start = r[1] + w_end = r[2] * (1 - w_start) + w_start + h_end = r[3] * (1 - h_start) + h_start + roi[ii, :] = [h_start, w_start, h_end, w_end] + roi[ii, :] = np.round(100 * roi[ii, :]) / 100 + assert roi[ii, 0] <= roi[ii, 2] + assert roi[ii, 1] <= roi[ii, 3] + + tf_preds = get_tf_predictions_crop_resize( + np.transpose(X, [0, 2, 3, 1]), roi, box_ind, params + ) + tf_preds = np.transpose(tf_preds, [0, 3, 1, 2]) + coreml_model, eval = get_coreml_model_crop_resize(params) + if eval is False: + failed_tests_compile.append(params) + else: + input_dict = {"data": np.expand_dims(X, axis=0)} + input_dict["roi"] = np.reshape(roi, (n_roi, 1, 4, 1, 1)) + if batch != 1: + input_dict["box_ind"] = np.reshape( + box_ind.astype(np.float32), (n_roi, 1, 1, 1, 1) + ) + ref_output_dict = {"output": np.expand_dims(tf_preds, axis=0)} + self._test_model(input_dict, ref_output_dict, coreml_model) + + self.assertEqual(failed_tests_compile, []) + + @unittest.skipUnless(_macos_version() >= (10, 14), "Only supported on MacOS 10.14+") + def test_crop_resize_cpu_only(self): + self.test_crop_resize(cpu_only=True) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_model_updatable.py b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_model_updatable.py new file mode 100644 index 00000000..3406d37f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_model_updatable.py @@ -0,0 +1,796 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import tempfile +import unittest + +import numpy as _np + +import coremltools.models.datatypes as datatypes +from coremltools.models import MLModel +from coremltools.models.neural_network import (AdamParams, + NeuralNetworkBuilder, SgdParams, + quantization_utils) +from coremltools.models.pipeline import PipelineClassifier, PipelineRegressor +from coremltools.models.utils import save_spec + + +class LayerSelector(quantization_utils.QuantizedLayerSelector): + def __init__(self, layer_name): + super(LayerSelector, self).__init__() + self.layer_name = layer_name + + def do_quantize(self, layer, weight_param="bias"): + ret = super(LayerSelector, self).do_quantize(layer) + if not ret or layer.name == self.layer_name: + return False + return True + + +class MLModelUpdatableTest(unittest.TestCase): + @classmethod + def setUpClass(self): + self.model_dir = tempfile.mkdtemp() + + @classmethod + def tearDownClass(self): + if os.path.exists(self.model_dir): + shutil.rmtree(self.model_dir) + + def create_base_builder(self, is_updatable=True): + self.input_features = [("input", datatypes.Array(3))] + self.output_features = [("output", None)] + self.output_names = ["output"] + + builder = NeuralNetworkBuilder(self.input_features, self.output_features) + + W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) + W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="ip1", + W=W1, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="input", + output_name="hidden", + ) + builder.add_inner_product( + name="ip2", + W=W2, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="hidden", + output_name="output", + ) + + if is_updatable: + builder.make_updatable(["ip1", "ip2"]) + + return builder + + def test_updatable_model_creation_ce_sgd(self): + builder = self.create_base_builder() + + builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="softmax_output" + ) + + builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0)) + builder.set_epochs(20, allowed_set=[10, 20, 30, 40]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue, + 0, + atol=1e-8, + ) + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values + == [10] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue + == 1 + ) + + def test_updatable_model_creation_ce_adam(self): + builder = self.create_base_builder() + + builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="softmax_output" + ) + + adam_params = AdamParams() + adam_params.set_batch(value=10, allowed_set=[10, 20]) + builder.set_adam_optimizer(adam_params) + builder.set_epochs(20) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue, + 0.9, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue, + 0.999, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue, + 1e-8, + atol=1e-8, + ) + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values + == [10, 20] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue + == 1 + ) + + self.assertTrue(spec.neuralNetwork.updateParams.epochs.set.values == [20]) + + def test_updatable_model_creation_mse_sgd(self): + builder = self.create_base_builder() + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0)) + + builder.set_epochs(20) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue, + 0, + atol=1e-8, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values + == [10] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue + == 1 + ) + + def test_updatable_model_creation_mse_adam(self): + builder = self.create_base_builder() + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_adam_optimizer( + AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8) + ) + builder.set_epochs(20, allowed_set=[10, 20, 30]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertTrue(spec.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable) + self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable) + + self.assertTrue( + spec.neuralNetwork.updateParams.lossLayers[ + 0 + ].categoricalCrossEntropyLossLayer + is not None + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None + ) + + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue, + 1e-2, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue, + 10, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue, + 0.9, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue, + 0.999, + atol=1e-4, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue, + 1e-8, + atol=1e-8, + ) + ) + self.assertTrue( + _np.isclose( + spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4 + ) + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values + == [10] + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue + == 0 + ) + self.assertTrue( + spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue + == 1 + ) + + self.assertTrue( + spec.neuralNetwork.updateParams.epochs.set.values == [10, 20, 30] + ) + + def test_nn_set_cce_without_softmax_fail(self): + nn_builder = self.create_base_builder() + + # fails since adding CCE without softmax must raise error + with self.assertRaises(ValueError): + nn_builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="output" + ) + + def test_nn_set_cce_invalid(self): + nn_builder = self.create_base_builder() + nn_builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + # fails since CCE input must be softmax output + with self.assertRaises(ValueError): + nn_builder.set_categorical_cross_entropy_loss( + name="cross_entropy", input="output" + ) + + def test_nn_set_softmax_updatable_invalid(self): + nn_builder = self.create_base_builder() + nn_builder.add_softmax( + name="softmax", input_name="output", output_name="softmax_output" + ) + + # fails since marking softmax as updatable layer is not allowed + with self.assertRaises(ValueError): + nn_builder.make_updatable(["softmax"]) + + def test_nn_set_training_input(self): + builder = self.create_base_builder() + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_adam_optimizer( + AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8) + ) + builder.set_epochs(20, allowed_set=[10, 20, 30]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertEqual(spec.description.trainingInput[0].name, "input") + self.assertEqual( + spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType" + ) + self.assertEqual(spec.description.trainingInput[1].name, "output_true") + self.assertEqual( + spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType" + ) + + def test_nn_builder_with_training_features(self): + input_features = [("input", datatypes.Array(3))] + output_features = [("output", datatypes.Array(3))] + builder = NeuralNetworkBuilder(input_features, output_features) + + W1 = _np.random.uniform(-0.5, 0.5, (3, 3)) + W2 = _np.random.uniform(-0.5, 0.5, (3, 3)) + builder.add_inner_product( + name="ip1", + W=W1, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="input", + output_name="hidden", + ) + builder.add_inner_product( + name="ip2", + W=W2, + b=None, + input_channels=3, + output_channels=3, + has_bias=False, + input_name="hidden", + output_name="output", + ) + + builder.make_updatable(["ip1", "ip2"]) # or a dict for weightParams + + builder.set_mean_squared_error_loss( + name="mse", input_feature=("output", datatypes.Array(3)) + ) + + builder.set_adam_optimizer( + AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8) + ) + builder.set_epochs(20, allowed_set=[10, 20, 30]) + + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(builder.spec, model_path) + + mlmodel = MLModel(model_path) + self.assertTrue(mlmodel is not None) + spec = mlmodel.get_spec() + self.assertEqual(spec.description.trainingInput[0].name, "input") + self.assertEqual( + spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType" + ) + self.assertEqual(spec.description.trainingInput[1].name, "output_true") + self.assertEqual( + spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType" + ) + + def test_nn_fp16_make_updatable_fail(self): + nn_builder = self.create_base_builder(is_updatable=False) + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + quantized_result = quantization_utils.quantize_weights(mlmodel, 16, "linear") + q_nn_builder = NeuralNetworkBuilder(spec=quantized_result._spec) + + # fails since an FP16 model cannot be marked updatable + with self.assertRaises(ValueError): + q_nn_builder.make_updatable(["ip1", "ip2"]) + + def test_nn_partial_fp16_make_updatable_fail(self): + nn_builder = self.create_base_builder(is_updatable=False) + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + selector = LayerSelector(layer_name='ip1') + quantized_model = quantization_utils.quantize_weights(mlmodel, 16, "linear", selector=selector) + + q_nn_builder = NeuralNetworkBuilder(spec=quantized_model._spec) + + # fails since model has a layer with FP16 bias + with self.assertRaises(ValueError): + q_nn_builder.make_updatable(["ip2"]) + + def test_nn_partial_fp16_make_updatable_quantized_layer_fail(self): + nn_builder = self.create_base_builder(is_updatable=False) + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + selector = LayerSelector(layer_name='ip2') + quantized_result = quantization_utils.quantize_weights(mlmodel, 16, "linear", selector=selector) + quantized_spec = quantized_result._spec + q_nn_builder = NeuralNetworkBuilder(spec=quantized_spec) + + # fails since model has a layer with FP16 bias + with self.assertRaises(ValueError): + q_nn_builder.make_updatable(["ip2"]) + + def test_nn_partial_fp16_make_updatable_fail(self): + nn_builder = self.create_base_builder() + model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel") + print(model_path) + save_spec(nn_builder.spec, model_path) + mlmodel = MLModel(model_path) + + # fails since updatable models cannot get quantized to FP16 + with self.assertRaises(Exception): + quantization_utils.quantize_weights(mlmodel, 16, "linear") + + def test_pipeline_regressor_make_updatable(self): + builder = self.create_base_builder() + builder.spec.isUpdatable = False + + training_input = [("input", datatypes.Array(3)), ("target", "Double")] + + # fails due to missing sub-models + p_regressor = PipelineRegressor( + self.input_features, self.output_names, training_input + ) + with self.assertRaises(ValueError): + p_regressor.make_updatable() + self.assertEqual(p_regressor.spec.isUpdatable, False) + + # fails due to sub-model being not updatable + p_regressor.add_model(builder.spec) + with self.assertRaises(ValueError): + p_regressor.make_updatable() + self.assertEqual(p_regressor.spec.isUpdatable, False) + + builder.spec.isUpdatable = True + p_regressor.add_model(builder.spec) + + self.assertEqual(p_regressor.spec.isUpdatable, False) + p_regressor.make_updatable() + self.assertEqual(p_regressor.spec.isUpdatable, True) + self.assertEqual(p_regressor.spec.description.trainingInput[0].name, "input") + self.assertEqual( + p_regressor.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(p_regressor.spec.description.trainingInput[1].name, "target") + self.assertEqual( + p_regressor.spec.description.trainingInput[1].type.WhichOneof("Type"), + "doubleType", + ) + + # fails since once updatable does not allow adding new models + with self.assertRaises(ValueError): + p_regressor.add_model(builder.spec) + self.assertEqual(p_regressor.spec.isUpdatable, True) + + def test_pipeline_classifier_make_updatable(self): + builder = self.create_base_builder() + builder.spec.isUpdatable = False + training_input = [("input", datatypes.Array(3)), ("target", "String")] + + # fails due to missing sub-models + p_classifier = PipelineClassifier( + self.input_features, self.output_names, training_features=training_input + ) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + # fails due to sub-model being not updatable + p_classifier.add_model(builder.spec) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + builder.spec.isUpdatable = True + p_classifier.add_model(builder.spec) + + self.assertEqual(p_classifier.spec.isUpdatable, False) + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, True) + self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input") + self.assertEqual( + p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target") + self.assertEqual( + p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"), + "stringType", + ) + + # fails since once updatable does not allow adding new models + with self.assertRaises(ValueError): + p_classifier.add_model(builder.spec) + self.assertEqual(p_classifier.spec.isUpdatable, True) + + def test_pipeline_classifier_set_training_inputs(self): + builder = self.create_base_builder() + builder.spec.isUpdatable = False + training_input = [("input", datatypes.Array(3)), ("target", "String")] + + # fails due to missing sub-models + p_classifier = PipelineClassifier(self.input_features, self.output_names) + p_classifier.set_training_input(training_input) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + # fails due to sub-model being not updatable + p_classifier.add_model(builder.spec) + with self.assertRaises(ValueError): + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, False) + + builder.spec.isUpdatable = True + p_classifier.add_model(builder.spec) + + self.assertEqual(p_classifier.spec.isUpdatable, False) + p_classifier.make_updatable() + self.assertEqual(p_classifier.spec.isUpdatable, True) + self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input") + self.assertEqual( + p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target") + self.assertEqual( + p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"), + "stringType", + ) + + # fails since once updatable does not allow adding new models + with self.assertRaises(ValueError): + p_classifier.add_model(builder.spec) + self.assertEqual(p_classifier.spec.isUpdatable, True) + + def test_shuffle_on_by_default(self): + builder = self.create_base_builder() + + # base builder already marks two layers as updatable + self.assertTrue( + builder.nn_spec.updateParams.shuffle.defaultValue, + "Shuffle not turned on by default for updatable models", + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_pipeline.py b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_pipeline.py new file mode 100644 index 00000000..06d2a6bd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/pipeline/test_pipeline.py @@ -0,0 +1,277 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import tempfile +import unittest + +import numpy as np +import pytest + +import coremltools as ct +from coremltools._deps import _HAS_LIBSVM, _HAS_SKLEARN +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Function, Program +from coremltools.models.pipeline import PipelineClassifier, PipelineRegressor + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import LinearRegression + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import OneHotEncoder + + from coremltools.converters import sklearn as converter + +if _HAS_LIBSVM: + from libsvm import svmutil + + from coremltools.converters import libsvm as libsvm_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +@unittest.skipIf(not _HAS_LIBSVM, "Missing libsvm. Skipping tests.") +class LinearRegressionPipelineCreationTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + + if not (_HAS_SKLEARN): + return + + scikit_data = load_boston() + feature_names = scikit_data.feature_names + + scikit_model = LinearRegression() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + scikit_spec = converter.convert( + scikit_model, feature_names, "target" + ).get_spec() + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + self.scikit_spec = scikit_spec + + def test_pipeline_regression_creation(self): + + input_names = self.scikit_data.feature_names + output_name = "target" + p_regressor = PipelineRegressor(input_names, "target") + p_regressor.add_model(self.scikit_spec) + + self.assertIsNotNone(p_regressor.spec) + self.assertEqual(len(p_regressor.spec.pipelineRegressor.pipeline.models), 1) + + # Test the model class of the linear regressor model + spec = p_regressor.spec.pipelineRegressor.pipeline.models[0] + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +@unittest.skipIf(not _HAS_LIBSVM, "Missing libsvm. Skipping tests.") +class LibSVMPipelineCreationTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + if not _HAS_LIBSVM: + return + + scikit_data = load_boston() + prob = svmutil.svm_problem( + scikit_data["target"] > scikit_data["target"].mean(), + scikit_data["data"].tolist(), + ) + param = svmutil.svm_parameter() + param.svm_type = svmutil.C_SVC + param.kernel_type = svmutil.LINEAR + param.eps = 1 + + libsvm_model = svmutil.svm_train(prob, param) + libsvm_spec = libsvm_converter.convert( + libsvm_model, scikit_data.feature_names, "target" + ).get_spec() + + # Save the data and the model + self.scikit_data = scikit_data + self.libsvm_spec = libsvm_spec + + def test_pipeline_classifier_creation(self): + + input_names = self.scikit_data.feature_names + p_classifier = PipelineClassifier(input_names, [1, 0]) + p_classifier.add_model(self.libsvm_spec) + + self.assertIsNotNone(p_classifier.spec) + self.assertEqual(len(p_classifier.spec.pipelineClassifier.pipeline.models), 1) + + # Test the model class of the svm model + spec = p_classifier.spec.pipelineClassifier.pipeline.models[0] + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class LinearRegressionPipeline(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + scikit_data = load_boston() + feature_names = scikit_data.feature_names + + scikit_model = Pipeline(steps=[("linear", LinearRegression())]) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_pipeline_regression_creation(self): + input_names = self.scikit_data.feature_names + output_name = "target" + + p_regressor = converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(p_regressor) + self.assertEqual(len(p_regressor.pipelineRegressor.pipeline.models), 2) + + # Test the model class of the linear regressor model + spec = p_regressor.pipelineRegressor.pipeline.models[-1] + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + + for input_type in p_regressor.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), + sorted(map(lambda x: x.name, p_regressor.description.input)), + ) + + def test_conversion_bad_inputs(self): + """ + Failure testing for bad conversion. + """ + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = converter.convert(model, "data", "out", "regressor") + + +class TestMakePipeline: + @staticmethod + def _make_model(input_name, input_length, + output_name, output_length, + convert_to): + + weight_tensor = np.arange(input_length * output_length, dtype='float32') + weight_tensor = weight_tensor.reshape(output_length, input_length) + + prog = Program() + func_inputs = {input_name: mb.placeholder(shape=(input_length,))} + with Function(func_inputs) as ssa_fun: + input = ssa_fun.inputs[input_name] + y = mb.linear(x=input, weight=weight_tensor, name=output_name) + ssa_fun.set_outputs([y]) + prog.add_function("main", ssa_fun) + + return ct.convert(prog, convert_to=convert_to) + + + @staticmethod + @pytest.mark.parametrize( + "model1_backend, model2_backend", + itertools.product(["mlprogram", "neuralnetwork"], ["mlprogram", "neuralnetwork"]), + ) + def test_simple(model1_backend, model2_backend): + # Create models + m1 = TestMakePipeline._make_model("x", 20, "y1", 10, model1_backend) + m2 = TestMakePipeline._make_model("y1", 10, "y2", 2, model2_backend) + + # Get non-pipeline result + x = np.random.rand(20) + y1 = m1.predict({"x": x})["y1"] + y2 = m2.predict({"y1": y1}) + + pipeline_model = ct.utils.make_pipeline(m1, m2) + + y_pipeline = pipeline_model.predict({"x": x}) + np.testing.assert_allclose(y2["y2"], y_pipeline["y2"]) + + # Check save/load + with tempfile.TemporaryDirectory() as save_dir: + # Save pipeline + save_path = save_dir + "/test.mlpackage" + pipeline_model.save(save_path) + + # Check loading from a mlpackage path + p2 = ct.models.MLModel(save_path) + y_pipeline = p2.predict({"x": x}) + np.testing.assert_allclose(y2["y2"], y_pipeline["y2"]) + + # Check loading from spec and weight dir + p3 = ct.models.MLModel(p2.get_spec(), weights_dir=p2.weights_dir) + y_pipeline = p3.predict({"x": x}) + np.testing.assert_allclose(y2["y2"], y_pipeline["y2"]) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVC.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVC.py new file mode 100644 index 00000000..2d916802 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVC.py @@ -0,0 +1,309 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import random +import tempfile +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import (_HAS_LIBSVM, _HAS_SKLEARN, _SKLEARN_VERSION, + MSG_LIBSVM_NOT_FOUND, MSG_SKLEARN_NOT_FOUND) +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_LIBSVM: + from libsvm import svmutil + from svmutil import svm_predict, svm_train + + from coremltools.converters import libsvm + +if _HAS_SKLEARN: + from distutils.version import StrictVersion + + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import NuSVC + + from coremltools.converters import sklearn as scikit_converter + + +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSvcScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def _evaluation_test_helper( + self, + class_labels, + use_probability_estimates, + allow_slow, + allowed_prob_delta=0.00001, + ): + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + ] + # sklearn version > 0.22 NuSVC introduced finiteness checks that fail for + # the 'sigmoid' and one 'poly' kernel test cases. Avoid those. + # See https://github.com/scikit-learn/scikit-learn/issues/17925 + if _SKLEARN_VERSION <= StrictVersion("0.22"): + kernel_parameters += [ + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + + non_kernel_parameters = [ + {}, + {"nu": 0.75}, + {"nu": 0.25, "shrinking": True}, + {"shrinking": False}, + ] + + # Generate some random data + x, y = [], [] + random.seed(42) + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice(class_labels)) + column_names = ["x1", "x2", "x3"] + # make sure first label is seen first, second is seen second, and so on. + for i, val in enumerate(class_labels): + y[i] = val + df = pd.DataFrame(x, columns=column_names) + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + cur_params["probability"] = use_probability_estimates + cur_params["max_iter"] = 10 # Don't want test to take too long + + cur_model = NuSVC(**cur_params) + cur_model.fit(x, y) + + spec = scikit_converter.convert(cur_model, column_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + if use_probability_estimates: + probability_lists = cur_model.predict_proba(x) + df["classProbability"] = [ + dict(zip(cur_model.classes_, cur_vals)) + for cur_vals in probability_lists + ] + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability" + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess( + metrics["max_probability_error"], allowed_prob_delta + ) + else: + df["target"] = cur_model.predict(x) + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + @pytest.mark.slow + def test_binary_class_int_label_without_probability_stress_test(self): + self._evaluation_test_helper([1, 3], False, allow_slow=True) + + def test_binary_class_int_label_without_probability(self): + self._evaluation_test_helper([1, 3], False, allow_slow=False) + + @pytest.mark.slow + def test_binary_class_string_label_with_probability_stress_test(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=True, allowed_prob_delta=0.005 + ) + + def test_binary_class_string_label_with_probability(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=False, allowed_prob_delta=0.005 + ) + + @pytest.mark.slow + def test_multi_class_int_label_without_probability_stress_test(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=True) + + def test_multi_class_int_label_without_probability(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=False) + + @pytest.mark.slow + def test_multi_class_string_label_with_probability_stress_test(self): + self._evaluation_test_helper(["X", "Y", "z"], True, allow_slow=True) + + def test_multi_class_string_label_with_probability(self): + self._evaluation_test_helper(["X", "Y", "z"], True, allow_slow=False) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = NuSVC() + spec = scikit_converter.convert(model, "data", "out") + + # Check the expected class during conversion + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = scikit_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSVCLibSVMTest(unittest.TestCase): + # Model parameters for testing + base_param = "-s 1 -q" # model type C-SVC and quiet mode + non_kernel_parameters = ["", "-n 0.6 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "-t 0", # linear kernel + "", + "-t 2 -g 1.2", # rbf kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + + """ + Unit test class for testing the libsvm sklearn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_LIBSVM: + # setUpClass is still called even if class is skipped. + return + + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + self.x, self.y = [], [] + random.seed(42) + for _ in range(50): + self.x.append([random.gauss(200, 30), random.gauss(-100, 22)]) + self.y.append(random.choice([1, 2])) + self.y[0] = 1 # Make sure 1 is always the first label it sees + self.y[1] = 2 + self.column_names = ["x1", "x2"] + self.prob = svmutil.svm_problem(self.y, self.x) + + param = svmutil.svm_parameter() + param.svm_type = svmutil.NU_SVC + param.kernel_type = svmutil.LINEAR + param.eps = 1 + param.probability = 1 + + # Save the data and the model + self.libsvm_model = svmutil.svm_train(self.prob, param) + + self.df = pd.DataFrame(self.x, columns=self.column_names) + + def _test_prob_model(self, param1, param2): + probability_param = "-b 1" + df = self.df + + param_str = " ".join([self.base_param, param1, param2, probability_param]) + param = svmutil.svm_parameter(param_str) + model = svm_train(self.prob, param) + + # Get predictions with probabilities as dictionaries + (df["prediction"], _, probability_lists) = svm_predict( + self.y, self.x, model, probability_param + " -q" + ) + probability_dicts = [ + dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists + ] + df["probabilities"] = probability_dicts + + spec = libsvm.convert(model, self.column_names, "target", "probabilities") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_classifier_with_probabilities(spec, df, verbose=False) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 0.00001) + + @pytest.mark.slow + def test_binary_classificaiton_with_probability_stress_test(self): + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + self._test_prob_model(param1, param2) + + def test_binary_classificaiton_with_probability(self): + param1 = self.non_kernel_parameters[0] + param2 = self.kernel_parameters[0] + self._test_prob_model(param1, param2) + + @pytest.mark.slow + @unittest.skip( + "LibSVM's Python library is broken for NuSVC without probabilities. It always segfaults during prediction time." + ) + def test_multi_class_without_probability(self): + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + x, y = [], [] + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice([1, 2, 10, 12])) + y[0], y[1], y[2], y[3] = 1, 2, 10, 12 + column_names = ["x1", "x2", "x3"] + prob = svmutil.svm_problem(y, x) + + df = pd.DataFrame(x, columns=column_names) + + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + param_str = " ".join([self.base_param, param1, param2]) + param = svmutil.svm_parameter(param_str) + + model = svm_train(prob, param) + + # Get predictions with probabilities as dictionaries + (df["prediction"], _, _) = svm_predict(y, x, model, " -q") + + spec = libsvm.convert(model, column_names, "target") + + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + spec = libsvm.convert(libsvm_model_path, "data", "target") + + def test_conversion_bad_inputs(self): + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = libsvm.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVR.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVR.py new file mode 100644 index 00000000..7646abd8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_NuSVR.py @@ -0,0 +1,224 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import random +import tempfile +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import (_HAS_LIBSVM, _HAS_SKLEARN, MSG_LIBSVM_NOT_FOUND, + MSG_SKLEARN_NOT_FOUND) +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_LIBSVM: + from libsvm import svmutil + from svmutil import svm_predict, svm_train + + from coremltools.converters import libsvm + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import NuSVR + + from coremltools.converters import sklearn as scikit_converter + + +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSVRScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + + self.scikit_model = NuSVR(kernel="linear") + self.data = load_boston() + self.scikit_model.fit(self.data["data"], self.data["target"]) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = NuSVR() + spec = scikit_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = scikit_converter.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + + # Generate some smallish (some kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + non_kernel_parameters = [ + {}, + {"C": 1}, + {"C": 1.5, "shrinking": True}, + {"C": 0.5, "shrinking": False, "nu": 0.9}, + ] + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + + cur_model = NuSVR(**cur_params) + cur_model.fit(x, y) + df["target"] = cur_model.predict(x) + + spec = scikit_converter.convert(cur_model, input_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + +@unittest.skipIf(not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class NuSVRLibSVMTest(unittest.TestCase): + """ + Unit test class for testing the libsvm sklearn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + if not _HAS_LIBSVM: + return + + scikit_data = load_boston() + prob = svmutil.svm_problem(scikit_data["target"], scikit_data["data"].tolist()) + param = svmutil.svm_parameter() + param.svm_type = svmutil.NU_SVR + param.kernel_type = svmutil.LINEAR + param.eps = 1 + + self.libsvm_model = svmutil.svm_train(prob, param) + + def test_conversion(self): + spec = libsvm.convert(self.libsvm_model, "data", "target") + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + spec = libsvm.convert(libsvm_model_path, "data", "target") + + def test_conversion_bad_inputs(self): + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = libsvm.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + # Generate some smallish (poly kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + prob = svmutil.svm_problem(y, x) + + # Parameters + base_param = "-s 4" # model type is nu-SVR + non_kernel_parameters = ["", "-c 1.5 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "", + "-t 2 -g 1.2", # rbf kernel + "-t 0", # linear kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + param_str = " ".join([base_param, param1, param2]) + param = svmutil.svm_parameter(param_str) + + model = svm_train(prob, param) + (df["target"], _, _) = svm_predict(y, x, model) + + spec = libsvm.convert(model, input_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVC.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVC.py new file mode 100644 index 00000000..749f38a2 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVC.py @@ -0,0 +1,369 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import copy +import random +import tempfile +import unittest + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_LIBSVM, _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_SKLEARN: + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import SVC + + from coremltools.converters import sklearn as scikit_converter + +if _HAS_LIBSVM: + import svmutil + from svm import svm_parameter + from svmutil import svm_predict, svm_train + + from coremltools.converters import libsvm + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class SvcScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def _evaluation_test_helper( + self, + class_labels, + use_probability_estimates, + allow_slow, + allowed_prob_delta=0.00001, + ): + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + non_kernel_parameters = [ + {}, + {"C": 1}, + {"C": 1.5, "shrinking": True}, + {"C": 0.5, "shrinking": False}, + ] + + # Generate some random data + x, y = [], [] + random.seed(42) + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice(class_labels)) + column_names = ["x1", "x2", "x3"] + # make sure first label is seen first, second is seen second, and so on. + for i, val in enumerate(class_labels): + y[i] = val + df = pd.DataFrame(x, columns=column_names) + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + cur_params["probability"] = use_probability_estimates + cur_params["max_iter"] = 10 # Don't want test to take too long + + cur_model = SVC(**cur_params) + cur_model.fit(x, y) + + spec = scikit_converter.convert(cur_model, column_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + if use_probability_estimates: + probability_lists = cur_model.predict_proba(x) + df["classProbability"] = [ + dict(zip(cur_model.classes_, cur_vals)) + for cur_vals in probability_lists + ] + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability", verbose=True + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess( + metrics["max_probability_error"], allowed_prob_delta + ) + else: + df["target"] = cur_model.predict(x) + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + @pytest.mark.slow + def test_binary_class_string_label_without_probability_stress_test(self): + self._evaluation_test_helper(["A", "B"], False, allow_slow=True) + + def test_binary_class_string_label_without_probability(self): + self._evaluation_test_helper(["A", "B"], False, allow_slow=False) + + @pytest.mark.slow + def test_binary_class_string_label_with_probability_stress_test(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=True, allowed_prob_delta=0.005 + ) + + def test_binary_class_string_label_with_probability(self): + # Scikit Learn uses technique to normalize pairwise probabilities even for binary classification. + # This leads to difference in probabilities. + self._evaluation_test_helper( + ["foo", "bar"], True, allow_slow=False, allowed_prob_delta=0.005 + ) + + @pytest.mark.slow + def test_multi_class_int_label_without_probability_stress_test(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=True) + + def test_multi_class_int_label_without_probability(self): + self._evaluation_test_helper([12, 33, -1, 1234], False, allow_slow=False) + + @pytest.mark.slow + def test_multi_class_int_label_with_probability_stress_test(self): + self._evaluation_test_helper([1, 2, 3], True, allow_slow=True) + + def test_multi_class_int_label_with_probability(self): + self._evaluation_test_helper([1, 2, 3], True, allow_slow=False) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = SVC() + spec = scikit_converter.convert(model, "data", "out") + + # Check the expected class during conversion + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = scikit_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_LIBSVM, "Missing libsvm. Skipping tests.") +class CSVCLibSVMTest(unittest.TestCase): + # Model parameters for testing + base_param = "-s 0 -q " # model type C-SVC and quiet mode + non_kernel_parameters = ["", "-c 1.5 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "-t 0", # linear kernel + "", + "-t 2 -g 1.2", # rbf kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + # XXX: wi params? + + """ + Unit test class for testing the libsvm converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_LIBSVM: + # setUpClass is still called even if class is skipped. + return + + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + self.x, self.y = [], [] + random.seed(42) + for _ in range(50): + self.x.append([random.gauss(200, 30), random.gauss(-100, 22)]) + self.y.append(random.choice([1, 2])) + self.y[0] = 1 # Make sure 1 is always the first label it sees + self.y[1] = 2 + self.column_names = ["x1", "x2"] + self.prob = svmutil.svm_problem(self.y, self.x) + + param = svmutil.svm_parameter() + param.svm_type = svmutil.C_SVC + param.kernel_type = svmutil.LINEAR + param.eps = 1 + param.probability = 1 + + self.libsvm_model = svmutil.svm_train(self.prob, param) + + def test_default_names(self): + df = pd.DataFrame({"input": self.x}) + df["input"] = df["input"].apply(np.array) + + # Test with probabilities + spec = libsvm.convert(self.libsvm_model).get_spec() + if _is_macos() and _macos_version() >= (10, 13): + (_, _, probability_lists) = svm_predict( + self.y, self.x, self.libsvm_model, "-b 1 -q" + ) + probability_dicts = [ + dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists + ] + df["classProbability"] = probability_dicts + metrics = evaluate_classifier_with_probabilities( + spec, df, verbose=False, probabilities="classProbability" + ) + self.assertLess(metrics["max_probability_error"], 0.00001) + + # Test model without probabilities + no_probability_model = svmutil.svm_train(self.prob, svmutil.svm_parameter()) + spec = libsvm.convert(no_probability_model).get_spec() + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, u"target") + if _is_macos() and _macos_version() >= (10, 13): + (df["target"], _, _) = svm_predict( + self.y, self.x, no_probability_model, " -q" + ) + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + # LibSVM only supports string labels + @pytest.mark.slow + def test_binary_class_without_probability_stress_test(self): + self._evaluation_test_helper_no_probability([0, 1], allow_slow=True) + + @pytest.mark.slow + def test_binary_class_with_probability_stress_test(self): + self._evaluation_test_helper_with_probability([-1, 90], allow_slow=True) + + @pytest.mark.slow + def test_multi_class_without_probability_stress_test(self): + self._evaluation_test_helper_no_probability([12, 33, 12341], allow_slow=True) + + @pytest.mark.slow + def test_multi_class_with_probability_stress_test(self): + self._evaluation_test_helper_with_probability([1, 2, 3], allow_slow=True) + + # LibSVM only supports string labels + def test_binary_class_without_probability(self): + self._evaluation_test_helper_no_probability([0, 1], allow_slow=False) + + def test_binary_class_with_probability(self): + self._evaluation_test_helper_with_probability([-1, 90], allow_slow=False) + + def test_multi_class_without_probability(self): + self._evaluation_test_helper_no_probability([12, 33, 12341], allow_slow=False) + + def test_multi_class_with_probability(self): + self._evaluation_test_helper_with_probability([1, 2, 3], allow_slow=False) + + def _evaluation_test_helper_with_probability(self, labels, allow_slow): + df = pd.DataFrame(self.x, columns=self.column_names) + y = copy.copy(self.y) + for i, val in enumerate(labels): + y[i] = val + probability_param = "-b 1" + + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + param_str = " ".join( + [self.base_param, param1, param2, probability_param] + ) + param = svm_parameter(param_str) + + model = svm_train(self.prob, param) + + # Get predictions with probabilities as dictionaries + (df["target"], _, probability_lists) = svm_predict( + y, self.x, model, probability_param + " -q" + ) + probability_dicts = [ + dict(zip([1, 2], cur_vals)) for cur_vals in probability_lists + ] + df["probabilities"] = probability_dicts + + spec = libsvm.convert( + model, self.column_names, "target", "probabilities" + ) + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_classifier_with_probabilities( + spec, df, verbose=False + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 0.00001) + + if not allow_slow: + break + + if not allow_slow: + break + + def _evaluation_test_helper_no_probability(self, labels, allow_slow): + # Generate some random data. + # This unit test should not rely on scikit learn for test data. + x, y = [], [] + random.seed(42) + for _ in range(50): + x.append( + [random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)] + ) + y.append(random.choice(labels)) + # make sure first label is seen first, second is seen second, and so on. + for i, val in enumerate(labels): + y[i] = val + column_names = ["x1", "x2", "x3"] + prob = svmutil.svm_problem(y, x) + + df = pd.DataFrame(x, columns=column_names) + + for param1 in self.non_kernel_parameters: + for param2 in self.kernel_parameters: + param_str = " ".join([self.base_param, param1, param2]) + param = svm_parameter(param_str) + + model = svm_train(prob, param) + + # Get predictions with probabilities as dictionaries + (df["target"], _, _) = svm_predict(y, x, model, " -q") + + spec = libsvm.convert(model, column_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(metrics["num_errors"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + # libsvm's save(...) truncates floating points. So it's not going to match self.libsvm_model any more. + spec = libsvm.convert(libsvm_model_path, self.column_names, "target") + self.assertIsNotNone(spec) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVR.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVR.py new file mode 100644 index 00000000..39bee077 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_SVR.py @@ -0,0 +1,259 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import random +import tempfile +import unittest + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import (_HAS_LIBSVM, _HAS_SKLEARN, MSG_LIBSVM_NOT_FOUND, + MSG_SKLEARN_NOT_FOUND) +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_LIBSVM: + import svmutil + + from coremltools.converters import libsvm + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import SVR + + from coremltools.converters import sklearn as sklearn_converter + + +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class SvrScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn sklearn_converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + + scikit_data = load_boston() + scikit_model = SVR(kernel="linear") + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = SVR() + spec = sklearn_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = sklearn_converter.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + + # Generate some smallish (some kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + + # Parameters to test + kernel_parameters = [ + {}, + {"kernel": "rbf", "gamma": 1.2}, + {"kernel": "linear"}, + {"kernel": "poly"}, + {"kernel": "poly", "degree": 2}, + {"kernel": "poly", "gamma": 0.75}, + {"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2}, + {"kernel": "sigmoid"}, + {"kernel": "sigmoid", "gamma": 1.3}, + {"kernel": "sigmoid", "coef0": 0.8}, + {"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5}, + ] + non_kernel_parameters = [ + {}, + {"C": 1}, + {"C": 1.5, "epsilon": 0.5, "shrinking": True}, + {"C": 0.5, "epsilon": 1.5, "shrinking": False}, + ] + + # Test + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + cur_params = param1.copy() + cur_params.update(param2) + print("cur_params=" + str(cur_params)) + + cur_model = SVR(**cur_params) + cur_model.fit(x, y) + df["target"] = cur_model.predict(x) + + spec = sklearn_converter.convert(cur_model, input_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break + + +@unittest.skipIf(not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class EpsilonSVRLibSVMTest(unittest.TestCase): + """ + Unit test class for testing the libsvm sklearn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + if not _HAS_LIBSVM: + return + + scikit_data = load_boston() + prob = svmutil.svm_problem(scikit_data["target"], scikit_data["data"].tolist()) + param = svmutil.svm_parameter() + param.svm_type = svmutil.EPSILON_SVR + param.kernel_type = svmutil.LINEAR + param.eps = 1 + + self.libsvm_model = svmutil.svm_train(prob, param) + + def test_input_names(self): + data = load_boston() + df = pd.DataFrame({"input": data["data"].tolist()}) + df["input"] = df["input"].apply(np.array) + + # Default values + spec = libsvm.convert(self.libsvm_model) + if _is_macos() and _macos_version() >= (10, 13): + (df["target"], _, _) = svmutil.svm_predict( + data["target"], data["data"].tolist(), self.libsvm_model + ) + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + # One extra parameters. This is legal/possible. + num_inputs = len(data["data"][0]) + spec = libsvm.convert(self.libsvm_model, input_length=num_inputs + 1) + + # Not enought input names. + input_names = ["this", "is", "not", "enought", "names"] + with self.assertRaises(ValueError): + libsvm.convert(self.libsvm_model, input_names=input_names) + with self.assertRaises(ValueError): + libsvm.convert(self.libsvm_model, input_length=num_inputs - 1) + + def test_conversion_from_filesystem(self): + libsvm_model_path = tempfile.mktemp(suffix="model.libsvm") + svmutil.svm_save_model(libsvm_model_path, self.libsvm_model) + spec = libsvm.convert( + libsvm_model_path, input_names="data", target_name="target" + ) + + def test_conversion_bad_inputs(self): + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = libsvm.convert(model, "data", "out") + + @pytest.mark.slow + def test_evaluation_stress_test(self): + self._test_evaluation(allow_slow=True) + + def test_evaluation(self): + self._test_evaluation(allow_slow=False) + + def _test_evaluation(self, allow_slow): + """ + Test that the same predictions are made + """ + from svm import svm_parameter, svm_problem + from svmutil import svm_predict, svm_train + + # Generate some smallish (poly kernels take too long on anything else) random data + x, y = [], [] + for _ in range(50): + cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2) + x.append([cur_x1, cur_x2]) + y.append(1 + 2 * cur_x1 + 3 * cur_x2) + + input_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=input_names) + prob = svm_problem(y, x) + + # Parameters + base_param = "-s 3" # model type is epsilon SVR + non_kernel_parameters = ["", "-c 1.5 -p 0.5 -h 1", "-c 0.5 -p 0.5 -h 0"] + kernel_parameters = [ + "", + "-t 2 -g 1.2", # rbf kernel + "-t 0", # linear kernel + "-t 1", + "-t 1 -d 2", + "-t 1 -g 0.75", + "-t 1 -d 0 -g 0.9 -r 2", # poly kernel + "-t 3", + "-t 3 -g 1.3", + "-t 3 -r 0.8", + "-t 3 -r 0.8 -g 0.5", # sigmoid kernel + ] + + for param1 in non_kernel_parameters: + for param2 in kernel_parameters: + param_str = " ".join([base_param, param1, param2]) + print(param_str) + param = svm_parameter(param_str) + + model = svm_train(prob, param) + (df["target"], _, _) = svm_predict(y, x, model) + + spec = libsvm.convert( + model, input_names=input_names, target_name="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + if not allow_slow: + break + + if not allow_slow: + break diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_categorical_imputer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_categorical_imputer.py new file mode 100644 index 00000000..2076c75a --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_categorical_imputer.py @@ -0,0 +1,78 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from distutils.version import StrictVersion + +import numpy as np + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION + +if _HAS_SKLEARN: + import sklearn + + from coremltools.converters import sklearn as converter + try: + # scikit-learn >= 0.21 + from sklearn.impute import SimpleImputer as Imputer + + sklearn_class = sklearn.impute.SimpleImputer + except ImportError: + # scikit-learn < 0.21 + from sklearn.preprocessing import Imputer + + sklearn_class = sklearn.preprocessing.Imputer + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class ImputerTestCase(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + scikit_data = load_boston() + # axis parameter deprecated in SimpleImputer >= 0.22. which now imputes + # only along columns as desired here. + if _SKLEARN_VERSION >= StrictVersion("0.22"): + scikit_model = Imputer(strategy="most_frequent") + else: + scikit_model = Imputer(strategy="most_frequent", axis=0) + scikit_data["data"][1, 8] = np.NaN + + input_data = scikit_data["data"][:, 8].reshape(-1, 1) + scikit_model.fit(input_data, scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + spec = converter.convert(self.scikit_model, "data", "out").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface + self.assertTrue(spec.pipeline.models[-1].HasField("imputer")) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = Imputer() + spec = converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + from sklearn.linear_model import LinearRegression + + model = LinearRegression() + spec = converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_composite_pipelines.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_composite_pipelines.py new file mode 100644 index 00000000..95cdcd56 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_composite_pipelines.py @@ -0,0 +1,85 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from distutils.version import StrictVersion + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.converters.sklearn import convert +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor, evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingRegressor + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import OneHotEncoder, StandardScaler + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GradientBoostingRegressorBostonHousingScikitNumericTest(unittest.TestCase): + + @unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_boston_OHE_plus_normalizer(self): + data = load_boston() + + pl = Pipeline( + [ + ("OHE", OneHotEncoder(categorical_features=[8], sparse=False)), + ("Scaler", StandardScaler()), + ] + ) + + pl.fit(data.data, data.target) + + # Convert the model + spec = convert(pl, data.feature_names, "out") + + if _is_macos() and _macos_version() >= (10, 13): + input_data = [dict(zip(data.feature_names, row)) for row in data.data] + output_data = [{"out": row} for row in pl.transform(data.data)] + + result = evaluate_transformer(spec, input_data, output_data) + assert result["num_errors"] == 0 + + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def _test_boston_OHE_plus_trees(self, loss='ls'): + + data = load_boston() + + pl = Pipeline( + [ + ("OHE", OneHotEncoder(categorical_features=[8], sparse=False)), + ("Trees", GradientBoostingRegressor(random_state=1, loss=loss)), + ] + ) + + pl.fit(data.data, data.target) + + # Convert the model + spec = convert(pl, data.feature_names, "target") + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(data.data, columns=data.feature_names) + df["target"] = pl.predict(data.data) + + # Evaluate it + result = evaluate_regressor(spec, df, "target", verbose=False) + + assert result["max_error"] < 0.0001 + + def test_boston_OHE_plus_trees(self): + self._test_boston_OHE_plus_trees() + + def test_boston_OHE_plus_trees_with_huber_loss(self): + self._test_boston_OHE_plus_trees(loss='huber') diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_dict_vectorizer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_dict_vectorizer.py new file mode 100644 index 00000000..c0323cbd --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_dict_vectorizer.py @@ -0,0 +1,102 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as np +import numpy.random as rn +import pandas as pd + +import coremltools +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.feature_extraction import DictVectorizer + from sklearn.linear_model import LogisticRegression + from sklearn.pipeline import Pipeline + + from coremltools.converters import sklearn + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DictVectorizerScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def _test_conversion(self, data, trained_dict_vectorizer): + + X = trained_dict_vectorizer.transform(data) + + m = sklearn.convert( + trained_dict_vectorizer, + input_features="features", + output_feature_names="output", + ) + + if _is_macos() and _macos_version() >= (10, 13): + ret = evaluate_transformer( + m, + [{"features": row} for row in data], + [{"output": x_r} for x_r in X], + True, + ) + assert ret["num_errors"] == 0 + + + def test_dictvectorizer(self): + D = [ + {"foo": 1, "bar": 3}, + {"bar": 4, "baz": 2}, + {"bar": 1, "quux": 1, "quuux": 2}, + ] + + for sparse in (True, False): + for dtype in (int, np.float32, np.int16): + for sort in (True, False): + v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) + v = v.fit(D) + self._test_conversion(D, v) + + + def test_unseen_or_no_features(self): + D1 = [{"camelot": 0, "spamalot": 1}] + D2 = [{}, {"nothing": 21}] + + for sparse in (True, False): + for dtype in (int, np.float32, np.int16): + for sort in (True, False): + v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) + v = v.fit(D1) + self._test_conversion(D2, v) + + + def test_int_features_in_pipeline(self): + rn.seed(0) + + x_train_dict = [ + dict((rn.randint(100), 1) for i in range(20)) for j in range(100) + ] + y_train = [0, 1] * 50 + + # multi_class default changed in version >= 0.22 from ‘ovr’ to ‘auto’. + # Specify explicitly to match < 0.22 behavior. + pl = Pipeline([("dv", DictVectorizer()), ("lm", LogisticRegression(multi_class='ovr'))]) + pl.fit(x_train_dict, y_train) + + model = coremltools.converters.sklearn.convert( + pl, input_features="features", output_feature_names="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + x = pd.DataFrame( + {"features": x_train_dict, "target": pl.predict(x_train_dict)} + ) + + cur_eval_metics = evaluate_classifier(model, x) + self.assertEqual(cur_eval_metics["num_errors"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_feature_names.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_feature_names.py new file mode 100644 index 00000000..88b5d47f --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_feature_names.py @@ -0,0 +1,30 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import coremltools.models._feature_management as fm +import coremltools.models.datatypes as dt +from coremltools._deps import _HAS_SKLEARN + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class FeatureManagementTests(unittest.TestCase): + def test_all_strings(self): + features = ["a", "b", "c"] + processed_features = [ + ("a", dt.Double()), + ("b", dt.Double()), + ("c", dt.Double()), + ] + out = fm.process_or_validate_features(features) + self.assertEqual(out, processed_features) + self.assertTrue(fm.is_valid_feature_list(out)) + + def test_single_array(self): + self.assertEqual( + fm.process_or_validate_features("a", num_dimensions=10), + [("a", dt.Array(10))], + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_glm_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_glm_classifier.py new file mode 100644 index 00000000..b912f69b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_glm_classifier.py @@ -0,0 +1,112 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import random +import unittest + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN +from coremltools.converters.sklearn import convert +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_SKLEARN: + from sklearn.linear_model import LogisticRegression + from sklearn.svm import LinearSVC + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GlmCassifierTest(unittest.TestCase): + def test_logistic_regression_binary_classification_with_string_labels(self): + self._conversion_and_evaluation_helper_for_logistic_regression(["Foo", "Bar"]) + + def test_logistic_regression_multiclass_classification_with_int_labels(self): + self._conversion_and_evaluation_helper_for_logistic_regression([1, 2, 3, 4]) + + @staticmethod + def _generate_random_data(labels): + random.seed(42) + + # Generate some random data + x, y = [], [] + for _ in range(100): + x.append([random.gauss(2, 3), random.gauss(-1, 2)]) + y.append(random.choice(labels)) + return x, y + + def _conversion_and_evaluation_helper_for_logistic_regression(self, class_labels): + options = { + "C": (0.1, 1.0, 2.0), + "fit_intercept": (True, False), + "class_weight": ("balanced", None), + "solver": ("newton-cg", "lbfgs", "liblinear", "sag"), + } + + # Generate a list of all combinations of options and the default parameters + product = itertools.product(*options.values()) + args = [{}] + [dict(zip(options.keys(), p)) for p in product] + + x, y = GlmCassifierTest._generate_random_data(class_labels) + column_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=column_names) + + for cur_args in args: + # multi_class default changed in version 0.22 from ‘ovr’ to ‘auto’ in 0.22. + # Specify explicitly to match <0.22 behavior. + cur_model = LogisticRegression(**cur_args, multi_class='ovr') + cur_model.fit(x, y) + + spec = convert( + cur_model, input_features=column_names, output_feature_names="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + probability_lists = cur_model.predict_proba(x) + df["classProbability"] = [ + dict(zip(cur_model.classes_, cur_vals)) + for cur_vals in probability_lists + ] + + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability", verbose=False + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 0.00001) + + def test_linear_svc_binary_classification_with_string_labels(self): + self._conversion_and_evaluation_helper_for_linear_svc(["Foo", "Bar"]) + + def test_linear_svc_multiclass_classification_with_int_labels(self): + self._conversion_and_evaluation_helper_for_linear_svc([1, 2, 3, 4]) + + def _conversion_and_evaluation_helper_for_linear_svc(self, class_labels): + ARGS = [ + {}, + {"C": 0.75, "loss": "hinge"}, + {"penalty": "l1", "dual": False}, + {"tol": 0.001, "fit_intercept": False}, + {"intercept_scaling": 1.5}, + ] + + x, y = GlmCassifierTest._generate_random_data(class_labels) + column_names = ["x1", "x2"] + df = pd.DataFrame(x, columns=column_names) + + for cur_args in ARGS: + cur_model = LinearSVC(**cur_args) + cur_model.fit(x, y) + + spec = convert( + cur_model, input_features=column_names, output_feature_names="target" + ) + + if _is_macos() and _macos_version() >= (10, 13): + df["target"] = cur_model.predict(x) + + cur_eval_metics = evaluate_classifier(spec, df, verbose=False) + self.assertEqual(cur_eval_metics["num_errors"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_imputer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_imputer.py new file mode 100644 index 00000000..7afef95d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_imputer.py @@ -0,0 +1,80 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from distutils.version import StrictVersion + +import numpy as np +import numpy.random as rn + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + import sklearn + + try: + # scikit-learn >= 0.21 + from sklearn.impute import SimpleImputer as Imputer + + sklearn_class = sklearn.impute.SimpleImputer + except ImportError: + # scikit-learn < 0.21 + from sklearn.preprocessing import Imputer + + sklearn_class = sklearn.preprocessing.Imputer + + from coremltools.converters import sklearn as converter + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class NumericalImputerTestCase(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def test_conversion_boston(self): + + from sklearn.datasets import load_boston + + scikit_data = load_boston() + + sh = scikit_data.data.shape + + rn.seed(0) + missing_value_indices = [ + (rn.randint(sh[0]), rn.randint(sh[1])) for k in range(sh[0]) + ] + + for strategy in ["mean", "median", "most_frequent"]: + for missing_value in [0, "NaN", -999]: + # SimpleImputer >=0.22 does not accept missing values encoded as NaN. + if _SKLEARN_VERSION >= StrictVersion("0.22"): + if missing_value == "NaN": + continue + + X = np.array(scikit_data.data).copy() + + for i, j in missing_value_indices: + X[i, j] = missing_value + + model = Imputer(missing_values=missing_value, strategy=strategy) + model = model.fit(X) + + tr_X = model.transform(X.copy()) + + spec = converter.convert(model, scikit_data.feature_names, "out") + + input_data = [dict(zip(scikit_data.feature_names, row)) for row in X] + + output_data = [{"out": row} for row in tr_X] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_io_types.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_io_types.py new file mode 100644 index 00000000..e74b20d9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_io_types.py @@ -0,0 +1,342 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as np +import PIL.Image + +import coremltools +from coremltools._deps import _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND +from coremltools.models.utils import _is_macos, _macos_version + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor + from sklearn.linear_model import LinearRegression + from sklearn.svm import SVC, SVR + from sklearn.tree import DecisionTreeRegressor + + +def create_model(spec): + """ + Create MLModel with specified types + Parameters + ---------- + spec: Pb spec from 3rd party converted model + + Returns + ------- + MLModel + """ + return coremltools.models.MLModel(spec) + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND) +class TestIODataTypes(unittest.TestCase): + """ + This class tests for different I/O feature data types for an .mlmodel + It will cover the following areas to test for: + - All features must have a valid type + - Multiarrays must have a valid dataType. Inputs must specify shape. Shape must have >= 0 elements + - Images must have a valid colorspace. width & height have to be >= 0 + - Dictionaries must have a valid key type + """ + + @property + def scikit_data(self): + return load_boston() + + def _feature_data_type(self, dtype): + feature_dict = {np.int32: "INT32", np.float32: "FLOAT32", np.float64: "DOUBLE"} + return feature_dict[dtype] + + @property + def number_data_type(self): + return dict( + int8=np.int8, + int16=np.int16, + int32=np.int32, + uint8=np.uint8, + uint16=np.uint16, + uint32=np.uint32, + float=np.float32, + double=np.double, + ) + + def _sklearn_setup(self, model, dtype, data, target): + model.fit(data, target) + spec = coremltools.converters.sklearn.convert( + model, "data", "target" + ).get_spec() + return model, spec + + def _check_tree_model(self, spec, inputType, outputType, n_out): + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), n_out) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual(spec.description.output[0].type.WhichOneof("Type"), outputType) + self.assertEqual(spec.description.input[0].name, "data") + self.assertEqual(spec.description.input[0].type.WhichOneof("Type"), inputType) + + def test_tree_regressor(self): + for dtype in self.number_data_type.keys(): + scikit_model = DecisionTreeRegressor(random_state=1) + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + self._check_tree_model(spec, "multiArrayType", "doubleType", 1) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0].dtype, + type(coreml_model.predict({"data": test_data})["target"]), + ) + self.assertEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_random_forest_classifier(self): + for dtype in self.number_data_type.keys(): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestClassifier(random_state=1, n_estimators=10) + data = self.scikit_data["data"].astype(dtype) + target = ( + self.scikit_data["target"].astype(dtype) + > self.scikit_data["target"].astype(dtype).mean() + ) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + self._check_tree_model(spec, "multiArrayType", "int64Type", 2) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0], + bool(int(coreml_model.predict({"data": test_data})["target"])), + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + bool(int(coreml_model.predict({"data": test_data})["target"])), + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_random_forest_regressor(self): + for dtype in self.number_data_type.keys(): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestRegressor(random_state=1, n_estimators=10) + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + self._check_tree_model(spec, "multiArrayType", "doubleType", 1) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0].dtype, + type(coreml_model.predict({"data": test_data})["target"]), + ) + self.assertAlmostEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_support_vector_classifier(self): + for dtype in self.number_data_type.keys(): + scikit_model = SVC(kernel="rbf", gamma=1.2, C=1) + data = self.scikit_data["data"].astype(dtype) + target = ( + self.scikit_data["target"].astype(dtype) + > self.scikit_data["target"].astype(dtype).mean() + ) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + coreml_model = create_model(spec) + for idx in range(0, 10): + test_data = data[idx].reshape(1, -1) + try: + self.assertEqual( + scikit_model.predict(test_data)[0], + bool(int(coreml_model.predict({"data": test_data})["target"])), + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + bool( + int(coreml_model.predict({"data": test_data})["target"]) + ), + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_support_vector_regressor(self): + for dtype in self.number_data_type.keys(): + scikit_model = SVR(kernel="rbf") + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + coreml_model = create_model(spec) + try: + self.assertAlmostEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_linear_regressor(self): + for dtype in self.number_data_type.keys(): + scikit_model = LinearRegression(normalize=True) + data = self.scikit_data["data"].astype(dtype) + target = self.scikit_data["target"].astype(dtype) + scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) + test_data = data[0].reshape(1, -1) + coreml_model = create_model(spec) + try: + self.assertEqual( + scikit_model.predict(test_data)[0].dtype, + type(coreml_model.predict({"data": test_data})["target"]), + ) + self.assertAlmostEqual( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + msg="{} != {} for Dtype: {}".format( + scikit_model.predict(test_data)[0], + coreml_model.predict({"data": test_data})["target"], + dtype, + ), + ) + except RuntimeError: + print("{} not supported. ".format(dtype)) + + def test_image_output_rgb(self): + input_shape = (3, 10, 20) + input_features = [("data", coremltools.models.datatypes.Array(*input_shape))] + output_features = [("target", coremltools.models.datatypes.Array(*input_shape))] + builder = coremltools.models.neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_elementwise( + "Identity", + input_names=["data"], + output_name="target", + mode="ADD", + alpha=0.0, + ) + spec = builder.spec + output = spec.description.output[0] + output.type.imageType.colorSpace = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "RGB" + ) + output.type.imageType.height = input_shape[1] + output.type.imageType.width = input_shape[2] + + coreml_model = coremltools.models.MLModel(spec) + input_data = np.floor(np.random.rand(*input_shape) * 255) + + coreml_out = coreml_model.predict({"data": input_data})["target"] + self.assertEqual(PIL.Image.Image, type(coreml_out)) + self.assertEqual("RGBA", coreml_out.mode) + np.testing.assert_equal( + np.uint8(input_data), np.array(coreml_out).transpose(2, 0, 1)[:3, :] + ) + + @unittest.skip("rdar://71638164") + def test_image_output_bgr(self): + input_shape = (3, 15, 25) + input_features = [("data", coremltools.models.datatypes.Array(*input_shape))] + output_features = [("target", coremltools.models.datatypes.Array(*input_shape))] + builder = coremltools.models.neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_elementwise( + "Identity", + input_names=["data"], + output_name="target", + mode="ADD", + alpha=0.0, + ) + spec = builder.spec + output = spec.description.output[0] + output.type.imageType.colorSpace = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "BGR" + ) + output.type.imageType.height = input_shape[1] + output.type.imageType.width = input_shape[2] + + coreml_model = coremltools.models.MLModel(spec) + input_data = np.floor(np.random.rand(*input_shape) * 255) + + coreml_out = coreml_model.predict({"data": input_data})["target"] + self.assertEqual(PIL.Image.Image, type(coreml_out)) + self.assertEqual("RGBA", coreml_out.mode) + np.testing.assert_equal( + np.uint8(input_data), + np.array(coreml_out)[:, :, ::-1].transpose(2, 0, 1)[1:, :], + ) + + def test_image_output_grayscale(self): + input_shape = (1, 20, 30) + input_features = [("data", coremltools.models.datatypes.Array(*input_shape))] + output_features = [("target", coremltools.models.datatypes.Array(*input_shape))] + builder = coremltools.models.neural_network.NeuralNetworkBuilder( + input_features, output_features + ) + builder.add_elementwise( + "Identity", + input_names=["data"], + output_name="target", + mode="ADD", + alpha=0.0, + ) + spec = builder.spec + output = spec.description.output[0] + output.type.imageType.colorSpace = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value( + "GRAYSCALE" + ) + output.type.imageType.height = input_shape[1] + output.type.imageType.width = input_shape[2] + + coreml_model = coremltools.models.MLModel(spec) + input_data = np.floor(np.random.rand(*input_shape) * 255) + + coreml_out = coreml_model.predict({"data": input_data})["target"] + self.assertEqual(PIL.Image.Image, type(coreml_out)) + self.assertEqual("L", coreml_out.mode) + np.testing.assert_equal(np.uint8(input_data)[0], np.array(coreml_out)) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_k_neighbors_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_k_neighbors_classifier.py new file mode 100644 index 00000000..1781d139 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_k_neighbors_classifier.py @@ -0,0 +1,277 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from scipy import sparse + +from coremltools._deps import _HAS_SKLEARN + +if _HAS_SKLEARN: + from sklearn.datasets import load_iris + from sklearn.neighbors import KNeighborsClassifier + + from coremltools.converters import sklearn + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class KNeighborsClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + print("Setting up KNeighborsClassifier converter tests") + iris_samples = load_iris() + self.iris_X = iris_samples.data + self.iris_y = iris_samples.target + + def test_conversion_unfitted(self): + """Tests conversion failure for an unfitted scikit model.""" + scikit_model = KNeighborsClassifier() + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_brute_algorithm(self): + """Tests conversion of a scikit KNeighborsClassifier using the brute force algorithm.""" + scikit_model = KNeighborsClassifier(algorithm="brute", n_neighbors=42) + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + + self.assertIsNotNone(coreml_spec) + self.assertTrue(coreml_spec.HasField("kNearestNeighborsClassifier")) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue, 42 + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue, 1 + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue, + len(self.iris_X), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.HasField("uniformWeighting") + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions, + len(self.iris_X[0]), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "linearIndex" + ) + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + self.validate_labels(coreml_spec, self.iris_y) + self.validate_float_samples(coreml_spec, self.iris_X) + + def test_conversion_kd_tree_algorithm(self): + """Tests conversion of a scikit KNeighborsClassifier using the brute force algorithm.""" + test_leaf_size = 23 + test_n_neighbors = 42 + scikit_model = KNeighborsClassifier( + algorithm="kd_tree", leaf_size=test_leaf_size, n_neighbors=test_n_neighbors + ) + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + + self.assertIsNotNone(coreml_spec) + self.assertTrue(coreml_spec.HasField("kNearestNeighborsClassifier")) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.defaultValue, + test_n_neighbors, + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.minValue, 1 + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.numberOfNeighbors.range.maxValue, + len(self.iris_X), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.HasField("uniformWeighting") + ) + self.assertEqual( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions, + len(self.iris_X[0]), + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "singleKdTreeIndex" + ) + ) + self.assertEqual( + test_leaf_size, + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.singleKdTreeIndex.leafSize, + ) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + self.validate_labels(coreml_spec, self.iris_y) + self.validate_float_samples(coreml_spec, self.iris_X) + + def test_conversion_auto_algorithm(self): + """Tests conversion of a scikit KNeighborsClassifier using the brute force algorithm.""" + test_n_neighbors = 42 + scikit_model = KNeighborsClassifier( + algorithm="auto", n_neighbors=test_n_neighbors + ) + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + + def test_conversion_unsupported_algorithm(self): + """Test a scikit KNeighborsClassifier with an invalid algorithm.""" + scikit_model = KNeighborsClassifier(algorithm="ball_tree") + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_weight_function_good(self): + scikit_model = KNeighborsClassifier(weights="uniform") + scikit_model.fit(self.iris_X, self.iris_y) + + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.HasField("uniformWeighting") + ) + + def test_conversion_unsupported_weight_function(self): + scikit_model = KNeighborsClassifier(algorithm="brute", weights="distance") + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def callable_weight_function(): + print("Inside callable_weight_function") + + scikit_model = KNeighborsClassifier( + algorithm="brute", weights=callable_weight_function + ) + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_distance_function_good(self): + """Tests conversion of a scikit KNeighborsClassifier with a valid distance metric.""" + scikit_model = KNeighborsClassifier(algorithm="brute", metric="euclidean") + scikit_model.fit(self.iris_X, self.iris_y) + coreml_model = sklearn.convert(scikit_model, "single_input", "single_output") + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + # Minkowski metric with p=2 is equivalent to the squared Euclidean distance + scikit_model = KNeighborsClassifier(algorithm="brute", metric="minkowski", p=2) + scikit_model.fit(self.iris_X, self.iris_y) + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + self.assertTrue( + coreml_spec.kNearestNeighborsClassifier.nearestNeighborsIndex.HasField( + "squaredEuclideanDistance" + ) + ) + + def test_conversion_unsupported_distance_function(self): + """Tests conversion of a scikit KNeighborsClassifier with an invalid distance metric.""" + # There are many possible distance functions for a brute force neighbors function, but these 3 should give us + # coverage over the converter code. + scikit_model = KNeighborsClassifier(algorithm="brute", metric="manhattan") + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + scikit_model = KNeighborsClassifier(algorithm="kd_tree", metric="chebyshev") + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + scikit_model = KNeighborsClassifier(algorithm="brute", metric="minkowski", p=3) + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def callable_distance_function(): + print("Inside callable_distance_function") + + scikit_model = KNeighborsClassifier( + algorithm="brute", metric=callable_distance_function + ) + scikit_model.fit(self.iris_X, self.iris_y) + self.assertRaises(TypeError, sklearn.convert, scikit_model) + + def test_conversion_with_sparse_X(self): + """Tests conversion of a model that's fitted with sparse data.""" + num_samples = 100 + num_dims = 64 + sparse_X = sparse.rand( + num_samples, num_dims, format="csr" + ) # KNeighborsClassifier only supports CSR format + y = self.iris_y[ + 0:num_samples + ] # the labels themselves don't matter - just use 100 of the Iris ones + + sklearn_model = KNeighborsClassifier(algorithm="brute") + sklearn_model.fit(sparse_X, y) + + coreml_model = sklearn.convert(sklearn_model) + coreml_spec = coreml_model.get_spec() + self.assertIsNotNone(coreml_spec) + + def test_conversion_with_sparse_y(self): + """Tests conversion of a model that's fitted with y values in a sparse format.""" + from sklearn.model_selection import train_test_split + + X_train, X_test, y_train, y_test = train_test_split( + self.iris_X, self.iris_y, test_size=0.2, train_size=0.8 + ) + + from sklearn import preprocessing + + lb = preprocessing.LabelBinarizer(sparse_output=True) + binarized_y = lb.fit_transform(y_train) + + sklearn_model = KNeighborsClassifier(algorithm="brute") + sklearn_model.fit(X_train, binarized_y) + + self.assertRaises(ValueError, sklearn.convert, sklearn_model) + + def validate_labels(self, spec, expected): + """Validate the labels returned from the converted scikit KNeighborsClassifier""" + self.assertTrue(spec.kNearestNeighborsClassifier.HasField("int64ClassLabels")) + for index, label in enumerate( + spec.kNearestNeighborsClassifier.int64ClassLabels.vector + ): + self.assertEqual(label, expected[index]) + + def validate_float_samples(self, spec, expected): + """Validate the float samples returned from the converted scikit KNeighborsClassifier""" + num_dimensions = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions + ) + for index, sample in enumerate( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples + ): + for dim in range(0, num_dimensions): + self.assertAlmostEqual( + sample.vector[dim], expected[index][dim], places=6 + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_linear_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_linear_regression.py new file mode 100644 index 00000000..e94fc595 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_linear_regression.py @@ -0,0 +1,136 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import LinearRegression + from sklearn.preprocessing import OneHotEncoder + from sklearn.svm import LinearSVR + + from coremltools.converters.sklearn import convert + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikitlearn. Skipping tests.") +class LinearRegressionScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + scikit_data = load_boston() + scikit_model = LinearRegression() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + spec = convert(self.scikit_model, input_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + self.assertTrue( + spec.pipelineRegressor.pipeline.models[-1].HasField("glmRegressor") + ) + lr = spec.pipelineRegressor.pipeline.models[-1].glmRegressor + self.assertEqual(lr.offset, self.scikit_model.intercept_) + self.assertEqual(len(lr.weights), 1) + self.assertEqual(len(lr.weights[0].value), 13) + i = 0 + for w in lr.weights[0].value: + self.assertAlmostEqual(w, self.scikit_model.coef_[i]) + i = i + 1 + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = LinearRegression() + spec = convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = convert(model, "data", "out") + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_linear_regression_evaluation(self): + """ + Check that the evaluation results are the same in scikit learn and coremltools + """ + input_names = self.scikit_data.feature_names + df = pd.DataFrame(self.scikit_data.data, columns=input_names) + + for normalize_value in (True, False): + cur_model = LinearRegression(normalize=normalize_value) + cur_model.fit(self.scikit_data["data"], self.scikit_data["target"]) + spec = convert(cur_model, input_names, "target") + + df["target"] = cur_model.predict(self.scikit_data.data) + + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_linear_svr_evaluation(self): + """ + Check that the evaluation results are the same in scikit learn and coremltools + """ + ARGS = [ + {}, + {"C": 0.5, "epsilon": 0.25}, + {"dual": False, "loss": "squared_epsilon_insensitive"}, + {"tol": 0.005}, + {"fit_intercept": False}, + {"intercept_scaling": 1.5}, + ] + + input_names = self.scikit_data.feature_names + df = pd.DataFrame(self.scikit_data.data, columns=input_names) + + for cur_args in ARGS: + cur_model = LinearSVR(**cur_args) + cur_model.fit(self.scikit_data["data"], self.scikit_data["target"]) + spec = convert(cur_model, input_names, "target") + + df["target"] = cur_model.predict(self.scikit_data.data) + + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py new file mode 100644 index 00000000..7fb34d4e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_nearest_neighbors_builder.py @@ -0,0 +1,418 @@ +# Copyright (c) 2019, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import os +import shutil +import unittest + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models import MLModel +from coremltools.models.nearest_neighbors import \ + KNearestNeighborsClassifierBuilder +from coremltools.models.utils import _is_macos + +if _HAS_SKLEARN: + from sklearn.datasets import load_iris + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class NearestNeighborsBuilderTest(unittest.TestCase): + """ + Unit tests for the nearest neighbors builder class. + """ + + def setUp(self): + iris_samples = load_iris() + self.iris_X = iris_samples.data + self.iris_y = iris_samples.target + self.training_X = self.iris_X[-30:] + self.training_y = self.iris_y[-30:] + + def tearDown(self): + # Do any cleanup here + pass + + def create_builder(self, default_class_label="default_label"): + builder = KNearestNeighborsClassifierBuilder( + input_name="input", + output_name="output", + number_of_dimensions=4, + default_class_label=default_class_label, + ) + return builder + + def test_builder_output_types(self): + builder = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder) + self.assertTrue( + builder.spec.kNearestNeighborsClassifier.HasField("stringClassLabels") + ) + + builder = self.create_builder(default_class_label=12) + self.assertIsNotNone(builder) + self.assertTrue( + builder.spec.kNearestNeighborsClassifier.HasField("int64ClassLabels") + ) + + with self.assertRaises(TypeError): + bad_default_label = float(21.32) + self.create_builder(default_class_label=bad_default_label) + + def test_builder_training_input(self): + builder = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder) + self.assertTrue( + builder.spec.kNearestNeighborsClassifier.HasField("stringClassLabels") + ) + + self.assertEqual(builder.spec.description.trainingInput[0].name, "input") + self.assertEqual( + builder.spec.description.trainingInput[0].type.WhichOneof("Type"), + "multiArrayType", + ) + self.assertEqual(builder.spec.description.trainingInput[1].name, "output") + self.assertEqual( + builder.spec.description.trainingInput[1].type.WhichOneof("Type"), + "stringType", + ) + + def test_make_updatable(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertTrue(builder.spec.isUpdatable) + builder.is_updatable = False + self.assertFalse(builder.spec.isUpdatable) + builder.is_updatable = True + self.assertTrue(builder.spec.isUpdatable) + + def test_author(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.spec.description.metadata.author, "") + builder.author = "John Doe" + self.assertEqual(builder.author, "John Doe") + self.assertEqual(builder.spec.description.metadata.author, "John Doe") + + def test_description(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.spec.description.metadata.shortDescription, "") + builder.description = "This is a description" + self.assertEqual(builder.description, "This is a description") + self.assertEqual( + builder.spec.description.metadata.shortDescription, "This is a description" + ) + + def test_weighting_scheme(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + builder.weighting_scheme = "uniform" + self.assertEqual(builder.weighting_scheme, "uniform") + + builder.weighting_scheme = "inverse_distance" + self.assertEqual(builder.weighting_scheme, "inverse_distance") + + builder.weighting_scheme = "unIfOrM" + self.assertEqual(builder.weighting_scheme, "uniform") + + builder.weighting_scheme = "InVerSE_DISTance" + self.assertEqual(builder.weighting_scheme, "inverse_distance") + + with self.assertRaises(TypeError): + builder.weighting_scheme = "test" + + def test_index_type(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + builder.set_index_type("kd_tree") + self.assertEqual(builder.index_type, "kd_tree") # test default value + self.assertEqual(builder.leaf_size, 30) + + builder.set_index_type("linear") + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + builder.set_index_type("kd_tree", leaf_size=45) # test user-defined value + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 45) + + builder.set_index_type("linear", leaf_size=37) + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + builder.set_index_type("KD_TrEe", leaf_size=22) # test user-defined value + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 22) + + builder.set_index_type("linEAR") + self.assertEqual(builder.index_type, "linear") + self.assertEqual(builder.leaf_size, 0) + + with self.assertRaises(TypeError): + builder.set_index_type("unsupported_index") + + with self.assertRaises(TypeError): + builder.set_index_type("kd_tree", -10) + + with self.assertRaises(TypeError): + builder.set_index_type("kd_tree", 0) + + def test_leaf_size(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + builder.set_index_type("kd_tree", leaf_size=45) # test user-defined value + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 45) + + builder.leaf_size = 12 + self.assertEqual(builder.index_type, "kd_tree") + self.assertEqual(builder.leaf_size, 12) + + def test_set_number_of_neighbors_with_bounds(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + self.assertEqual(builder.number_of_neighbors, 5) + (min_value, max_value) = builder.number_of_neighbors_allowed_range() + self.assertEqual(min_value, 1) + self.assertEqual(max_value, 1000) + + builder.set_number_of_neighbors_with_bounds(12, allowed_range=(2, 24)) + (min_value, max_value) = builder.number_of_neighbors_allowed_range() + self.assertEqual(builder.number_of_neighbors, 12) + self.assertEqual(min_value, 2) + self.assertEqual(max_value, 24) + allowed_values = builder.number_of_neighbors_allowed_set() + self.assertIsNone(allowed_values) + + test_set = {3, 5, 7, 9} + builder.set_number_of_neighbors_with_bounds(7, allowed_set=test_set) + self.assertEqual(builder.number_of_neighbors, 7) + allowed_values = builder.number_of_neighbors_allowed_set() + self.assertIsNotNone(allowed_values) + self.assertEqual(allowed_values, test_set) + + def test_set_number_of_neighbors_with_bounds_error_conditions(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(3) + + test_range = (3, 15) + test_set = {1, 3, 5} + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds( + 3, allowed_range=test_range, allowed_set=test_set + ) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(3, allowed_range=(-5, 5)) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(3, allowed_range=(5, 1)) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds( + 3, allowed_range=test_range, allowed_set=test_set + ) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(2, allowed_range=test_range) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(5, allowed_set={5, -3, 7}) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=test_set) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=test_set) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(2, allowed_set=[1, 2, 3]) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_range={2, 200}) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_range=(2, 10, 20)) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=set()) + + with self.assertRaises(TypeError): + builder.set_number_of_neighbors_with_bounds(4, allowed_range=[]) + + def test_set_number_of_neighbors(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + builder.set_number_of_neighbors_with_bounds(12, allowed_range=(2, 24)) + self.assertEqual(builder.number_of_neighbors, 12) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(1, allowed_range=(2, 24)) + builder.set_number_of_neighbors_with_bounds(4, allowed_range=(2, 24)) + self.assertEqual(builder.number_of_neighbors, 4) + + test_set = {3, 5, 7, 9} + builder.set_number_of_neighbors_with_bounds(7, allowed_set=test_set) + + with self.assertRaises(ValueError): + builder.set_number_of_neighbors_with_bounds(4, allowed_set=test_set) + builder.set_number_of_neighbors_with_bounds(5, allowed_set=test_set) + self.assertEqual(builder.number_of_neighbors, 5) + + def test_add_samples_invalid_data(self): + builder = self.create_builder() + self.assertIsNotNone(builder) + + invalid_X = [[1.0, 2.4]] + with self.assertRaises(TypeError): + builder.add_samples(invalid_X, self.training_y) + + with self.assertRaises(TypeError): + builder.add_samples(self.training_X, self.training_y[:3]) + + with self.assertRaises(TypeError): + builder.add_samples([], self.training_y) + + with self.assertRaises(TypeError): + builder.add_samples(self.training_X, []) + + def test_add_samples_int_labels(self): + builder = self.create_builder(default_class_label=12) + self.assertIsNotNone(builder) + + some_X = self.training_X[:10] + some_y = self.training_y[:10] + builder.add_samples(some_X, some_y) + self._validate_samples(builder.spec, some_X, some_y) + + addl_X = self.training_X[10:20] + addl_y = self.training_y[10:20] + builder.add_samples(addl_X, addl_y) + self._validate_samples(builder.spec, self.training_X[:20], self.training_y[:20]) + + def test_add_samples_string_labels(self): + builder = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder) + + some_X = self.training_X[:3] + some_y = ["one", "two", "three"] + builder.add_samples(some_X, some_y) + self._validate_samples(builder.spec, some_X, some_y) + + addl_X = self.training_X[3:6] + addl_y = ["four", "five", "six"] + builder.add_samples(addl_X, addl_y) + self._validate_samples(builder.spec, self.training_X[0:6], some_y + addl_y) + + def test_add_samples_invalid_label_types(self): + builder_int_labels = self.create_builder(default_class_label=42) + self.assertIsNotNone(builder_int_labels) + + some_X = self.training_X[:3] + invalid_int_y = [0, "one", 2] + with self.assertRaises(TypeError): + builder_int_labels.add_samples(some_X, invalid_int_y) + + builder_string_labels = self.create_builder(default_class_label="default") + self.assertIsNotNone(builder_string_labels) + + invalid_string_y = ["zero", "one", 2] + with self.assertRaises(TypeError): + builder_string_labels.add_samples(some_X, invalid_string_y) + + @unittest.skipUnless(_is_macos(), "Only supported on MacOS platform.") + def test_can_init_and_save_model_from_builder_with_updated_spec(self): + builder = KNearestNeighborsClassifierBuilder( + input_name="input", + output_name="output", + number_of_dimensions=10, + default_class_label="defaultLabel", + k=3, + weighting_scheme="inverse_distance", + index_type="kd_tree", + leaf_size=50, + ) + builder.author = "CoreML Team" + builder.license = "MIT" + builder.description = "test_builder_with_validation" + + # Save the updated spec + coreml_model = MLModel(builder.spec) + self.assertIsNotNone(coreml_model) + coreml_model_path = "/tmp/__test_builder_with_validation.mlmodel" + + try: + coreml_model.save(coreml_model_path) + self.assertTrue(os.path.isfile(coreml_model_path)) + finally: + self._delete_mlmodel_and_mlmodelc(coreml_model_path) + + @unittest.skipUnless(_is_macos(), "Only supported on MacOS platform.") + def test_can_init_and_save_model_from_builder_default_parameters(self): + builder = KNearestNeighborsClassifierBuilder( + input_name="input", + output_name="output", + number_of_dimensions=4, + default_class_label="defaultLabel", + ) + + # Save the updated spec + coreml_model = MLModel(builder.spec) + self.assertIsNotNone(coreml_model) + coreml_model_path = "/tmp/__test_builder_with_validation.mlmodel" + + try: + coreml_model.save(coreml_model_path) + self.assertTrue(os.path.isfile(coreml_model_path)) + finally: + self._delete_mlmodel_and_mlmodelc(coreml_model_path) + + def _validate_samples(self, spec, expected_X, expected_y): + """Validate the float samples returned from the converted scikit KNeighborsClassifier""" + num_dimensions = ( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.numberOfDimensions + ) + for index, sample in enumerate( + spec.kNearestNeighborsClassifier.nearestNeighborsIndex.floatSamples + ): + for dim in range(0, num_dimensions): + self.assertAlmostEqual( + sample.vector[dim], expected_X[index][dim], places=6 + ) + + if spec.kNearestNeighborsClassifier.HasField("int64ClassLabels"): + for index, label in enumerate( + spec.kNearestNeighborsClassifier.int64ClassLabels.vector + ): + self.assertEqual(label, expected_y[index]) + + elif spec.kNearestNeighborsClassifier.HasField("stringClassLabels"): + for index, label in enumerate( + spec.kNearestNeighborsClassifier.stringClassLabels.vector + ): + self.assertEqual(label, expected_y[index]) + + @staticmethod + def _delete_mlmodel_and_mlmodelc(path_to_mlmodel): + """Delete the .mlmodel and .mlmodelc for the given .mlmodel.""" + if os.path.exists(path_to_mlmodel): + os.remove(path_to_mlmodel) + path_to_mlmodelc = "{}c".format(path_to_mlmodel) + if os.path.exists(path_to_mlmodelc): + shutil.rmtree(path_to_mlmodelc) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_normalizer.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_normalizer.py new file mode 100644 index 00000000..b396a21b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_normalizer.py @@ -0,0 +1,60 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as _np + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.preprocessing import Normalizer + + from coremltools.converters import sklearn as converter + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class NormalizerScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def test_random(self): + # Generate some random data_imputeValue.multiArrayValue[i] + X = _np.random.random(size=(50, 3)) + + for param in ("l1", "l2", "max"): + cur_model = Normalizer(norm=param) + + output = cur_model.fit_transform(X) + + spec = converter.convert(cur_model, ["a", "b", "c"], "out") + + evaluate_transformer( + spec, + [dict(zip(["a", "b", "c"], row)) for row in X], + [{"out": row} for row in output], + ) + + def test_boston(self): + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = Normalizer(norm="l2").fit(scikit_data.data) + + spec = converter.convert(scikit_model, scikit_data.feature_names, "out") + + input_data = [ + dict(zip(scikit_data.feature_names, row)) for row in scikit_data.data + ] + + output_data = [{"out": row} for row in scikit_model.transform(scikit_data.data)] + + evaluate_transformer(spec, input_data, output_data) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_one_hot_encoder.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_one_hot_encoder.py new file mode 100644 index 00000000..93be1124 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_one_hot_encoder.py @@ -0,0 +1,290 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest +from copy import copy +from distutils.version import StrictVersion + +import numpy as np + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.pipeline import Pipeline + from sklearn.preprocessing import Normalizer, OneHotEncoder + + from coremltools.converters import sklearn + from coremltools.models.datatypes import Array + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class OneHotEncoderScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + scikit_data = [[0], [1], [2], [4], [3], [2], [4], [5], [6], [7]] + scikit_data_multiple_cols = [[0, 1], [1, 0], [2, 2], [3, 3], [4, 4]] + scikit_model = OneHotEncoder() + scikit_model.fit(scikit_data) + + # Save the data and the model + self.scikit_data = np.asarray(scikit_data, dtype="d") + self.scikit_data_multiple_cols = np.asarray( + scikit_data_multiple_cols, dtype="d" + ) + self.scikit_model = scikit_model + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_conversion_one_column(self): + # Fit a single OHE + scikit_model = OneHotEncoder() + scikit_model.fit(self.scikit_data) + spec = sklearn.convert(scikit_model, "single_feature", "out").get_spec() + + test_data = [{"single_feature": row} for row in self.scikit_data] + scikit_output = [ + {"out": row} for row in scikit_model.transform(self.scikit_data).toarray() + ] + metrics = evaluate_transformer(spec, test_data, scikit_output) + + self.assertIsNotNone(spec) + self.assertIsNotNone(spec.description) + self.assertEqual(metrics["num_errors"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_conversion_many_columns(self): + scikit_model = OneHotEncoder() + scikit_model.fit(self.scikit_data_multiple_cols) + spec = sklearn.convert( + scikit_model, ["feature_1", "feature_2"], "out" + ).get_spec() + + test_data = [ + {"feature_1": row[0], "feature_2": row[1]} + for row in self.scikit_data_multiple_cols + ] + scikit_output = [ + {"out": row} + for row in scikit_model.transform(self.scikit_data_multiple_cols).toarray() + ] + metrics = evaluate_transformer(spec, test_data, scikit_output) + + self.assertIsNotNone(spec) + self.assertIsNotNone(spec.description) + self.assertEqual(metrics["num_errors"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_conversion_one_column_of_several(self): + if _SKLEARN_VERSION >= StrictVersion("0.22"): + scikit_model = OneHotEncoder() + else: + scikit_model = OneHotEncoder(categorical_features=[0]) + + scikit_model.fit(copy(self.scikit_data_multiple_cols)) + spec = sklearn.convert( + scikit_model, ["feature_1", "feature_2"], "out" + ).get_spec() + + test_data = [ + {"feature_1": row[0], "feature_2": row[1]} + for row in self.scikit_data_multiple_cols + ] + scikit_output = [ + {"out": row} + for row in scikit_model.transform(self.scikit_data_multiple_cols).toarray() + ] + metrics = evaluate_transformer(spec, test_data, scikit_output) + + self.assertIsNotNone(spec) + self.assertIsNotNone(spec.description) + self.assertEqual(metrics["num_errors"], 0) + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_boston_OHE(self): + data = load_boston() + + for categorical_features in [[3], [8], [3, 8], [8, 3]]: + model = OneHotEncoder( + categorical_features=categorical_features, sparse=False + ) + model.fit(data.data, data.target) + + # Convert the model + spec = sklearn.convert(model, data.feature_names, "out").get_spec() + + input_data = [dict(zip(data.feature_names, row)) for row in data.data] + output_data = [{"out": row} for row in model.transform(data.data)] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_boston_OHE_pipeline(self): + data = load_boston() + + for categorical_features in [[3], [8], [3, 8], [8, 3]]: + # Put it in a pipeline so that we can test whether the output dimension + # handling is correct. + + model = Pipeline( + [ + ("OHE", OneHotEncoder(categorical_features=categorical_features)), + ("Normalizer", Normalizer()), + ] + ) + + model.fit(data.data.copy(), data.target) + + # Convert the model + spec = sklearn.convert(model, data.feature_names, "out").get_spec() + + input_data = [dict(zip(data.feature_names, row)) for row in data.data] + output_data = [{"out": row} for row in model.transform(data.data.copy())] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + @unittest.skipIf(_SKLEARN_VERSION >= StrictVersion("0.22"), + "categorical_features parameter to OneHotEncoder() deprecated after SciKit Learn 0.22." + ) + def test_random_sparse_data(self): + + n_columns = 8 + n_categories = 20 + + import numpy.random as rn + + rn.seed(0) + categories = rn.randint(50000, size=(n_columns, n_categories)) + + for dt in ["int32", "float32", "float64"]: + + _X = np.array( + [ + [categories[j, rn.randint(n_categories)] for j in range(n_columns)] + for i in range(100) + ], + dtype=dt, + ) + + # Test this data on a bunch of possible inputs. + for sparse in (True, False): + for categorical_features in [ + "all", + [3], + [4], + range(2, 8), + range(0, 4), + range(0, 8), + ]: + X = _X.copy() + + # This appears to be the only type now working. + assert X.dtype == np.dtype(dt) + + model = OneHotEncoder( + categorical_features=categorical_features, sparse=sparse + ) + model.fit(X) + + # Convert the model + spec = sklearn.convert(model, [("data", Array(n_columns))], "out") + + X_out = model.transform(X) + if sparse: + X_out = X_out.todense() + + input_data = [{"data": row} for row in X] + output_data = [{"out": row} for row in X_out] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + # Test normal data inside a pipeline + for sparse in (True, False): + for categorical_features in [ + "all", + [3], + [4], + range(2, 8), + range(0, 4), + range(0, 8), + ]: + X = _X.copy() + + model = Pipeline( + [ + ( + "OHE", + OneHotEncoder( + categorical_features=categorical_features, + sparse=sparse, + ), + ), + ("Normalizer", Normalizer()), + ] + ) + + model.fit(X) + + # Convert the model + spec = sklearn.convert( + model, [("data", Array(n_columns))], "out" + ).get_spec() + + X_out = model.transform(X) + if sparse: + X_out = X_out.todense() + + input_data = [{"data": row} for row in X] + output_data = [{"out": row} for row in X_out] + + result = evaluate_transformer(spec, input_data, output_data) + + assert result["num_errors"] == 0 + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = sklearn.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(TypeError): + from sklearn.linear_model import LinearRegression + + model = LinearRegression() + spec = sklearn.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier.py new file mode 100644 index 00000000..198dee9d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier.py @@ -0,0 +1,168 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN + +if _HAS_SKLEARN: + from sklearn.ensemble import RandomForestClassifier + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestBinaryClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier + + scikit_data = load_boston() + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestClassifier(random_state=1, n_estimators=10) + target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + scikit_model.fit(scikit_data["data"], target) + + self.scikit_model_node_count = sum(map(lambda e: e.tree_.node_count, + scikit_model.estimators_)) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + self.assertEqual(len(spec.pipelineClassifier.pipeline.models), 2) + tr = spec.pipelineClassifier.pipeline.models[ + -1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + model = RandomForestClassifier(n_estimators=10) + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestMultiClassClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier + + scikit_data = load_boston() + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestClassifier(random_state=1, n_estimators=10) + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + scikit_model.fit(scikit_data.data, target) + + self.scikit_model_node_count = sum(map(lambda e: e.tree_.node_count, + scikit_model.estimators_)) + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + self.assertEqual(len(spec.pipelineClassifier.pipeline.models), 2) + tr = spec.pipelineClassifier.pipeline.models[ + -1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + model = RandomForestClassifier(n_estimators=10) + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + from sklearn.preprocessing import OneHotEncoder + + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py new file mode 100644 index 00000000..b1be9b54 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_classifier_numeric.py @@ -0,0 +1,141 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest +from distutils.version import StrictVersion + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestClassifier + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestClassificationBostonHousingScikitNumericTest(unittest.TestCase): + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + scikit_model = RandomForestClassifier(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_classifier(spec, df, verbose=False) + self._check_metrics(metrics, scikit_params) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestBinaryClassifierBostonHousingScikitNumericTest( + RandomForestClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + self.scikit_data = scikit_data + + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert(max_depth=13) + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + + options = dict( + n_estimators=[1, 5, 10], + max_depth=[1, 5, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_leaf_nodes=[None, 20], + ) + + if _SKLEARN_VERSION >= StrictVersion("0.19"): + options["min_impurity_decrease"] = [1e-07, 0.1] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestMultiClassClassificationBostonHousingScikitNumericTest( + RandomForestClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + num_classes = 3 + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + options = dict( + n_estimators=[1, 5, 10], + max_depth=[1, 5, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_leaf_nodes=[None, 20], + ) + + if _SKLEARN_VERSION >= StrictVersion("0.19"): + options["min_impurity_decrease"] = [1e-07, 0.1] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression.py new file mode 100644 index 00000000..0c263585 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression.py @@ -0,0 +1,88 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN + +if _HAS_SKLEARN: + from sklearn.ensemble import RandomForestRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class RandomForestRegressorScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestRegressor + + scikit_data = load_boston() + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + scikit_model = RandomForestRegressor(random_state=1, n_estimators=10) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + self.scikit_model_node_count = sum(map(lambda e: e.tree_.node_count, + scikit_model.estimators_)) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + self.assertEqual(len(spec.pipelineRegressor.pipeline.models), 2) + tr = spec.pipelineRegressor.pipeline.models[ + -1 + ].treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + # n_estimators default changed >= 0.22. Specify explicitly to match <0.22 behavior. + model = RandomForestRegressor(n_estimators=10) + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py new file mode 100644 index 00000000..cdb9aed8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_random_forest_regression_numeric.py @@ -0,0 +1,107 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import RandomForestRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class RandomForestRegressorBostonHousingScikitNumericTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter and running both models + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = scikit_data.target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + """ + Check the metrics + """ + self.assertAlmostEqual( + metrics["rmse"], + 0.0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + 0.0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + scikit_model = RandomForestRegressor(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, verbose=False) + self._check_metrics(metrics, scikit_params) + + def test_boston_housing_simple_regression(self): + self._train_convert_evaluate_assert() + + def test_boston_housing_float_double_corner_case(self): + self._train_convert_evaluate_assert(max_depth=13) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + ## These are all the options in decision tree regression of scikit-learn + options = dict( + criterion=["mse"], + n_estimators=[1, 5, 10], + max_depth=[1, 5], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_leaf_nodes=[None, 20], + min_impurity_decrease=[1e-07, 0.1, 0.0], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_ridge_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_ridge_regression.py new file mode 100644 index 00000000..6eabce89 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_ridge_regression.py @@ -0,0 +1,106 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import pandas as pd + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import Ridge + from sklearn.preprocessing import OneHotEncoder + + from coremltools.converters.sklearn import convert + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikitlearn. Skipping tests.") +class RidgeRegressionScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + scikit_data = load_boston() + scikit_model = Ridge() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + spec = convert(self.scikit_model, input_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the ridge regression parameters. + self.assertTrue( + spec.pipelineRegressor.pipeline.models[-1].HasField("glmRegressor") + ) + lr = spec.pipelineRegressor.pipeline.models[-1].glmRegressor + self.assertEqual(lr.offset, self.scikit_model.intercept_) + self.assertEqual(len(lr.weights), 1) + self.assertEqual(len(lr.weights[0].value), 13) + i = 0 + for w in lr.weights[0].value: + self.assertAlmostEqual(w, self.scikit_model.coef_[i]) + i = i + 1 + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = Ridge() + spec = convert(model, "data", "out") + + # Check the expected class during conversion. + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = convert(model, "data", "out") + + @unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" + ) + def test_ridge_regression_evaluation(self): + """ + Check that the evaluation results are the same in scikit learn and coremltools + """ + input_names = self.scikit_data.feature_names + df = pd.DataFrame(self.scikit_data.data, columns=input_names) + + for normalize_value in (True, False): + cur_model = Ridge() + cur_model.fit(self.scikit_data["data"], self.scikit_data["target"]) + spec = convert(cur_model, input_names, "target") + + df["target"] = cur_model.predict(self.scikit_data.data) + + metrics = evaluate_regressor(spec, df) + self.assertAlmostEqual(metrics["max_error"], 0) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_standard_scalar.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_standard_scalar.py new file mode 100644 index 00000000..834a6ce3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_standard_scalar.py @@ -0,0 +1,65 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +import numpy as _np + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_transformer) + +if _HAS_SKLEARN: + from sklearn.preprocessing import StandardScaler + + from coremltools.converters import sklearn as converter + + +@unittest.skipUnless( + _is_macos() and _macos_version() >= (10, 13), "Only supported on macOS 10.13+" +) +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class StandardScalerTestCase(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + def test_random(self): + # Generate some random data + X = _np.random.random(size=(50, 3)) + + cur_model = StandardScaler() + + output = cur_model.fit_transform(X) + + spec = converter.convert(cur_model, ["a", "b", "c"], "out").get_spec() + + metrics = evaluate_transformer( + spec, + [dict(zip(["a", "b", "c"], row)) for row in X], + [{"out": row} for row in output], + ) + + assert metrics["num_errors"] == 0 + + def test_boston(self): + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = StandardScaler().fit(scikit_data.data) + + spec = converter.convert( + scikit_model, scikit_data.feature_names, "out" + ).get_spec() + + input_data = [ + dict(zip(scikit_data.feature_names, row)) for row in scikit_data.data + ] + + output_data = [{"out": row} for row in scikit_model.transform(scikit_data.data)] + + metrics = evaluate_transformer(spec, input_data, output_data) + + assert metrics["num_errors"] == 0 diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_utils.py b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_utils.py new file mode 100644 index 00000000..11db0456 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/sklearn_tests/test_utils.py @@ -0,0 +1,49 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN +from coremltools.models import MLModel +from coremltools.models.utils import _is_macos, _macos_version, rename_feature + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.linear_model import LinearRegression + + from coremltools.converters import sklearn as converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class PipeLineRenameTests(unittest.TestCase): + @classmethod + def setUpClass(self): + scikit_data = load_boston() + feature_names = scikit_data.feature_names + + scikit_model = LinearRegression() + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + @unittest.skip("rdar://71638164") + def test_pipeline_rename(self): + # Convert + scikit_spec = converter.convert(self.scikit_model).get_spec() + model = MLModel(scikit_spec) + sample_data = self.scikit_data.data[0] + + # Rename + rename_feature(scikit_spec, "input", "renamed_input") + renamed_model = MLModel(scikit_spec) + + # Check the predictions + if _is_macos() and _macos_version() >= (10, 13): + out_dict = model.predict({"input": sample_data}) + out_dict_renamed = renamed_model.predict({"renamed_input": sample_data}) + self.assertAlmostEqual(list(out_dict.keys()), list(out_dict_renamed.keys())) + self.assertAlmostEqual( + list(out_dict.values()), list(out_dict_renamed.values()) + ) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/__init__.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/__init__.py new file mode 100644 index 00000000..8aa13a28 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier.py new file mode 100644 index 00000000..08efb96c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier.py @@ -0,0 +1,342 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import json +import tempfile +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.converters import sklearn as skl_converter +from coremltools.models.utils import _macos_version + +if _HAS_SKLEARN: + from sklearn.ensemble import GradientBoostingClassifier + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GradientBoostingBinaryClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = GradientBoostingClassifier(random_state=1) + target = scikit_data["target"] > scikit_data["target"].mean() + scikit_model.fit(scikit_data["data"], target) + + s = 0 + for est in scikit_model.estimators_: + for e in est: + s = s + e.tree_.node_count + self.scikit_model_node_count = s + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.pipelineClassifier.pipeline.models[ + 1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = GradientBoostingClassifier() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + +class GradientBoostingMulticlassClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + + scikit_data = load_boston() + scikit_model = GradientBoostingClassifier(random_state=1) + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + scikit_model.fit(scikit_data.data, target) + self.target = target + + s = 0 + for est in scikit_model.estimators_: + for e in est: + s = s + e.tree_.node_count + self.scikit_model_node_count = s + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + self.assertEqual(len(spec.pipelineClassifier.pipeline.models), 2) + tr = spec.pipelineClassifier.pipeline.models[ + -1 + ].treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = GradientBoostingClassifier() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class GradientBoostingBinaryClassifierXGboostTest(unittest.TestCase): + """ + Unit test class for testing xgboost converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + scikit_data = load_boston() + self.xgb_model = xgboost.XGBClassifier() + target = scikit_data["target"] > scikit_data["target"].mean() + self.xgb_model.fit(scikit_data["data"], target) + + # Save the data and the model + self.scikit_data = scikit_data + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = xgb_converter.convert( + self.xgb_model, input_names, output_name, mode="classifier" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, output_name) + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, output_name) + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = xgboost.XGBClassifier() + spec = xgb_converter.convert(model, "data", "out", mode="classifier") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + model = xgboost.XGBRegressor() + spec = xgb_converter.convert(model, "data", "out", mode="classifier") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class GradientBoostingMulticlassClassifierXGboostTest(unittest.TestCase): + """ + Unit test class for testing xgboost converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + + scikit_data = load_boston() + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + dtrain = xgboost.DMatrix( + scikit_data.data, label=target, feature_names=scikit_data.feature_names + ) + self.xgb_model = xgboost.train({}, dtrain) + self.target = target + + # Save the data and the model + self.scikit_data = scikit_data + self.n_classes = len(np.unique(self.target)) + + def test_conversion(self): + + input_names = self.scikit_data.feature_names + output_name = "target" + spec = xgb_converter.convert( + self.xgb_model, + input_names, + output_name, + mode="classifier", + n_classes=self.n_classes, + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertEqual(spec.description.predictedFeatureName, output_name) + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, output_name) + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + + def test_conversion_from_file(self): + import numpy as np + + output_name = "target" + feature_names = self.scikit_data.feature_names + + xgb_model_json = tempfile.mktemp("xgb_tree_model_classifier.json") + xgb_json_out = self.xgb_model.get_dump(with_stats=True, dump_format="json") + with open(xgb_model_json, "w") as f: + json.dump(xgb_json_out, f) + spec = xgb_converter.convert( + xgb_model_json, + feature_names, + output_name, + mode="classifier", + n_classes=self.n_classes, + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, output_name) + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, output_name) + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(self.scikit_data.feature_names), + sorted(map(lambda x: x.name, spec.description.input)), + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py new file mode 100644 index 00000000..c246f3fc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_classifier_numeric.py @@ -0,0 +1,264 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier, + evaluate_classifier_with_probabilities) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingClassifier + + from coremltools.converters import sklearn as skl_converter + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class BoostedTreeClassificationBostonHousingScikitNumericTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter and running both models + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + scikit_model = GradientBoostingClassifier(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if hasattr(scikit_model, '_init_decision_function') and scikit_model.n_classes_ > 2: + # fix initial default prediction for multiclass classification + # https://github.com/scikit-learn/scikit-learn/pull/12983 + assert hasattr(scikit_model, 'init_') + assert hasattr(scikit_model.init_, 'priors') + scikit_model.init_.priors = np.log(scikit_model.init_.priors) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_classifier(spec, df) + self._check_metrics(metrics) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class BoostedTreeBinaryClassificationBostonHousingScikitNumericTest( + BoostedTreeClassificationBostonHousingScikitNumericTest +): + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + options = dict( + max_depth=[1, 10, None], + min_samples_split=[2, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1], + max_leaf_nodes=[None, 20], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class BoostedTreeMultiClassClassificationBostonHousingScikitNumericTest( + BoostedTreeClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + num_classes = 3 + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + options = dict( + max_depth=[1, 10, None], + min_samples_split=[2, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1], + max_leaf_nodes=[None, 20], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeClassificationBostonHousingXGboostNumericTest(unittest.TestCase): + """ + Unit test class for testing xgboost converter and running both models + """ + + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **xgboost_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + xgb_model = xgboost.XGBClassifier(**xgboost_params) + xgb_model.fit(self.X, self.target) + + # Convert the model + spec = xgb_converter.convert( + xgb_model, self.feature_names, self.output_name, mode="classifier" + ) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + probabilities = xgb_model.predict_proba(self.X) + df["classProbability"] = [ + dict(zip(xgb_model.classes_, cur_vals)) for cur_vals in probabilities + ] + metrics = evaluate_classifier_with_probabilities( + spec, df, probabilities="classProbability", verbose=False + ) + self.assertEqual(metrics["num_key_mismatch"], 0) + self.assertLess(metrics["max_probability_error"], 1e-3) + + def _classifier_stress_test(self): + options = dict( + max_depth=[1, 10], min_child_weight=[2, 0.5], max_delta_step=[1, 5], + ) + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(_macos_version() >= (10, 16), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeBinaryClassificationBostonHousingXGboostNumericTest( + BoostedTreeClassificationBostonHousingXGboostNumericTest +): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + self._classifier_stress_test() + + +@unittest.skipIf(_macos_version() >= (12, 0), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeMultiClassClassificationBostonHousingXGboostNumericTest( + BoostedTreeClassificationBostonHousingXGboostNumericTest +): + @classmethod + def setUpClass(self): + scikit_data = load_boston() + num_classes = 3 + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + self._classifier_stress_test() diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression.py new file mode 100644 index 00000000..2ed9fa32 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression.py @@ -0,0 +1,218 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import json +import tempfile +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.models.utils import _macos_version + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingRegressor + from sklearn.preprocessing import OneHotEncoder + + from coremltools.converters import sklearn as skl_converter + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class GradientBoostingRegressorScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(cls): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_SKLEARN: + return + + scikit_data = load_boston() + scikit_model = GradientBoostingRegressor(random_state=1) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + s = 0 + for est in scikit_model.estimators_: + for e in est: + s = s + e.tree_.node_count + cls.scikit_model_node_count = s + + # Save the data and the model + cls.scikit_data = scikit_data + cls.scikit_model = scikit_model + + def test_conversion(self): + input_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, input_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(input_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + tr = spec.pipelineRegressor.pipeline.models[ + -1 + ].treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model_node_count) + + def test_conversion_bad_inputs(self): + + # Error on converting an untrained model + with self.assertRaises(Exception): + model = GradientBoostingRegressor() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") + + +@unittest.skipIf(_macos_version() >= (10, 16), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +@unittest.skipIf(not _HAS_XGBOOST, "Skipping, no xgboost") +class BoostedTreeRegressorXGboostTest(unittest.TestCase): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + if not _HAS_XGBOOST: + return + if not _HAS_SKLEARN: + return + + scikit_data = load_boston() + dtrain = xgboost.DMatrix( + scikit_data.data, + label=scikit_data.target, + feature_names=scikit_data.feature_names, + ) + xgb_model = xgboost.train({}, dtrain, 1) + + # Save the data and the model + self.scikit_data = scikit_data + self.xgb_model = xgb_model + self.feature_names = self.scikit_data.feature_names + + def test_conversion(self): + + feature_names = self.scikit_data.feature_names + output_name = "target" + spec = xgb_converter.convert(self.xgb_model, feature_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(self.feature_names), + sorted(map(lambda x: x.name, spec.description.input)), + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), 23) + + def test_conversion_from_file(self): + + output_name = "target" + feature_names = self.feature_names + + xgb_model_json = tempfile.mktemp("tree_model.json") + xgb_json_out = self.xgb_model.get_dump(dump_format="json") + with open(xgb_model_json, "w") as f: + json.dump(xgb_json_out, f) + spec = xgb_converter.convert(xgb_model_json, feature_names, "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(self.feature_names), + sorted(map(lambda x: x.name, spec.description.input)), + ) + + # Test the linear regression parameters. + tr = spec.treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), 23) + + def test_unsupported_conversion(self): + + feature_names = self.scikit_data.feature_names + output_name = "target" + xgb_model = xgboost.XGBRegressor(objective="reg:gamma") + xgb_model.fit(self.scikit_data.data, self.scikit_data.target) + with self.assertRaises(ValueError): + spec = xgb_converter.convert(xgb_model, feature_names, "target") + + xgb_model = xgboost.XGBRegressor(objective="reg:tweedie") + xgb_model.fit(self.scikit_data.data, self.scikit_data.target) + with self.assertRaises(ValueError): + spec = xgb_converter.convert(xgb_model, feature_names, "target") + + def test_conversion_bad_inputs(self): + + # Error on converting an untrained model + with self.assertRaises(TypeError): + model = GradientBoostingRegressor() + spec = xgb_converter.convert(model, "data", "out") + + # Check the expected class during conversion + with self.assertRaises(TypeError): + model = OneHotEncoder() + spec = xgb_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py new file mode 100644 index 00000000..98ea3022 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_boosted_trees_regression_numeric.py @@ -0,0 +1,309 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest + +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + +if _HAS_XGBOOST: + import xgboost + + from coremltools.converters import xgboost as xgb_converter + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.ensemble import GradientBoostingRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class GradientBoostingRegressorBostonHousingScikitNumericTest(unittest.TestCase): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data["data"] + self.target = scikit_data["target"] + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + self.assertAlmostEqual( + metrics["rmse"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + scikit_model = GradientBoostingRegressor(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, "target", verbose=False) + self._check_metrics(metrics, scikit_params) + + def test_boston_housing_simple_regression(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + options = dict( + max_depth=[1, 10, None], + min_samples_split=[2, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1], + max_leaf_nodes=[None, 20], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(_macos_version() >= (12, 0), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_XGBOOST, "Missing xgboost. Skipping") +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class XgboostBoosterBostonHousingNumericTest(unittest.TestCase): + @classmethod + def setUpClass(self): + if not _HAS_XGBOOST: + return + if not _HAS_SKLEARN: + return + + # Load data and train model + scikit_data = load_boston() + self.X = scikit_data.data.astype("f").astype("d") + self.dtrain = xgboost.DMatrix( + scikit_data.data, + label=scikit_data.target, + feature_names=scikit_data.feature_names, + ) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, allowed_error={}, params={}): + """ + Check the metrics + """ + self.assertAlmostEqual( + metrics["rmse"], + allowed_error.get("rmse", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + allowed_error.get("max_error", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, bt_params={}, allowed_error={}, **params): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Train a model + xgb_model = xgboost.train(bt_params, self.dtrain, **params) + + # Convert the model + spec = xgb_converter.convert( + xgb_model, self.feature_names, self.output_name, force_32bit_float=False + ) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = xgb_model.predict(self.dtrain) + + # Evaluate it + metrics = evaluate_regressor(spec, df, target="target", verbose=False) + self._check_metrics(metrics, allowed_error, bt_params) + + def test_boston_housing_simple_decision_tree_regression(self): + self._train_convert_evaluate_assert(num_boost_round=1) + + def test_boston_housing_simple_boosted_tree_regression(self): + self._train_convert_evaluate_assert(num_boost_round=10) + + def test_boston_housing_simple_random_forest_regression(self): + self._train_convert_evaluate_assert(bt_params={"subsample": 0.5}, + allowed_error={"rmse": 0.004, "max_error": 0.09}) + + def test_boston_housing_float_double_corner_case(self): + self._train_convert_evaluate_assert( + { + "colsample_bytree": 1, + "colsample_bylevel": 1, + "scale_pos_weight": 1, + "learning_rate": 0.5, + "max_delta_step": 0, + "min_child_weight": 1, + "n_estimators": 1, + "subsample": 0.5, + "objective": "reg:linear", + "max_depth": 5, + }, + num_boost_round=2, + ) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + options = dict( + max_depth=[1, 5], + learning_rate=[0.1, 0.5], + n_estimators=[1, 10], + min_child_weight=[1, 2], + max_delta_step=[0, 0.1], + colsample_bytree=[1, 0.5], + colsample_bylevel=[1, 0.5], + scale_pos_weight=[1], + objective=["reg:linear"], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(arg) + + +@unittest.skipIf(_macos_version() >= (12, 0), "rdar://problem/84898245") +@unittest.skipIf(not _HAS_XGBOOST, "Missing xgboost. Skipping") +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class XGboostRegressorBostonHousingNumericTest(unittest.TestCase): + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + + # Load data and train model + scikit_data = load_boston() + + self.X = scikit_data.data + self.scikit_data = self.X + self.target = scikit_data.target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}, allowed_error={}): + self.assertAlmostEqual( + metrics["rmse"], + allowed_error.get("rmse", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + allowed_error.get("max_error", 0), + delta=1e-2, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, bt_params={}, allowed_error={}, **params): + """ + Set up the unit test by loading the dataset and training a model. + """ + # Train a model + xgb_model = xgboost.XGBRegressor(**params) + xgb_model.fit(self.X, self.target) + + # Convert the model (feature_names can't be given because of XGboost) + spec = xgb_converter.convert( + xgb_model, self.feature_names, self.output_name, force_32bit_float=False + ) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = xgb_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, target="target", verbose=False) + self._check_metrics(metrics, bt_params, allowed_error) + + def test_boston_housing_simple_boosted_tree_regression(self): + self._train_convert_evaluate_assert() + + def test_boston_housing_simple_random_forest_regression(self): + self._train_convert_evaluate_assert( + allowed_error={"rmse": 0.05, "max_error": 0.81}, subsample=0.5 + ) + + def test_boston_housing_simple_decision_tree_regression(self): + self._train_convert_evaluate_assert(n_estimators=1) + + def test_boston_housing_float_double_corner_case(self): + self._train_convert_evaluate_assert( + { + "colsample_bytree": 1, + "colsample_bylevel": 1, + "scale_pos_weight": 1, + "learning_rate": 0.1, + "max_delta_step": 0, + "min_child_weight": 1, + "n_estimators": 10, + "subsample": 0.3, + "objective": "reg:linear", + "max_depth": 1, + } + ) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + options = dict( + max_depth=[1, 5], + learning_rate=[0.1, 0.5], + n_estimators=[1, 10], + objective=["reg:linear"], + min_child_weight=[1, 2], + max_delta_step=[0, 0.1], + subsample=[1, 0.5, 0.3], + colsample_bytree=[1, 0.5], + colsample_bylevel=[1, 0.5], + scale_pos_weight=[1], + ) + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier.py new file mode 100644 index 00000000..8df139e6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier.py @@ -0,0 +1,150 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST + +if _HAS_SKLEARN: + from sklearn.tree import DecisionTreeClassifier + + from coremltools.converters.sklearn import convert as skl_converter + +if _HAS_XGBOOST: + pass + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class DecisionTreeBinaryClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeClassifier + + scikit_data = load_boston() + scikit_model = DecisionTreeClassifier(random_state=1) + target = scikit_data["target"] > scikit_data["target"].mean() + scikit_model.fit(scikit_data["data"], target) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + output_name = "target" + spec = skl_converter(self.scikit_model, "data", "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + self.assertEqual(len(spec.description.input), 1) + + input_type = spec.description.input[0] + + self.assertEqual(input_type.type.WhichOneof("Type"), "multiArrayType") + self.assertEqual(input_type.name, "data") + + # Test the linear regression parameters. + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model.tree_.node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = DecisionTreeClassifier() + spec = skl_converter(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter(model, "data", "out") + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing scikit-learn. Skipping tests.") +class DecisionTreeMultiClassClassifierScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + import numpy as np + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeClassifier + + scikit_data = load_boston() + scikit_model = DecisionTreeClassifier(random_state=1) + t = scikit_data.target + target = np.digitize(t, np.histogram(t)[1]) - 1 + scikit_model.fit(scikit_data.data, target) + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.scikit_model = scikit_model + + def test_conversion(self): + output_name = "target" + spec = skl_converter(self.scikit_model, "data", "target").get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleClassifier) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 2) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "int64Type" + ) + self.assertEqual(spec.description.input[0].name, "data") + self.assertEqual( + spec.description.input[0].type.WhichOneof("Type"), "multiArrayType" + ) + + tr = spec.treeEnsembleClassifier.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model.tree_.node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = DecisionTreeClassifier() + spec = skl_converter(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py new file mode 100644 index 00000000..bc507b2e --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_classifier_numeric.py @@ -0,0 +1,137 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest +from distutils.version import StrictVersion + +import numpy as np +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_classifier) + +if _HAS_SKLEARN: + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeClassifier + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeClassificationBostonHousingScikitNumericTest(unittest.TestCase): + def _check_metrics(self, metrics, params={}): + self.assertEqual( + metrics["num_errors"], + 0, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + scikit_model = DecisionTreeClassifier(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_classifier(spec, df) + self._check_metrics(metrics, scikit_params) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeBinaryClassificationBostonHousingScikitNumericTest( + DecisionTreeClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + self.target = 1 * (scikit_data["target"] > scikit_data["target"].mean()) + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_binary_classifier(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_binary_classifier_stress_test(self): + options = dict( + splitter=["best"], + max_depth=[1, 10, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1, 5], + max_leaf_nodes=[None, 20], + ) + if _SKLEARN_VERSION < StrictVersion("0.22"): # 'presort' option deprecated >=0.22 + options["presort"] = [False, True] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeMultiClassClassificationBostonHousingScikitNumericTest( + DecisionTreeClassificationBostonHousingScikitNumericTest +): + @classmethod + def setUpClass(self): + # Load data and train model + scikit_data = load_boston() + num_classes = 3 + self.X = scikit_data.data.astype("f").astype( + "d" + ) ## scikit-learn downcasts data + t = scikit_data.target + target = np.digitize(t, np.histogram(t, bins=num_classes - 1)[1]) - 1 + + # Save the data and the model + self.scikit_data = scikit_data + self.target = target + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def test_simple_multiclass(self): + self._train_convert_evaluate_assert() + + @pytest.mark.slow + def test_multiclass_stress_test(self): + options = dict( + splitter=["best"], + max_depth=[1, 10, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1, 5], + max_leaf_nodes=[None, 20], + ) + if _SKLEARN_VERSION < StrictVersion("0.22"): # 'presort' option deprecated >=0.22 + options["presort"] = [False, True] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression.py new file mode 100644 index 00000000..c8d9e2b5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression.py @@ -0,0 +1,87 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import unittest + +from coremltools._deps import _HAS_SKLEARN, _HAS_XGBOOST + +if _HAS_XGBOOST: + + pass + +if _HAS_SKLEARN: + from sklearn.tree import DecisionTreeRegressor + + from coremltools.converters import sklearn as skl_converter + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeRegressorScikitTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter. + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + from sklearn.tree import DecisionTreeRegressor + + scikit_data = load_boston() + scikit_model = DecisionTreeRegressor(random_state=1) + scikit_model.fit(scikit_data["data"], scikit_data["target"]) + + # Save the data and the model + self.scikit_data = scikit_data + self.scikit_model = scikit_model + + def test_conversion(self): + feature_names = self.scikit_data.feature_names + output_name = "target" + spec = skl_converter.convert( + self.scikit_model, feature_names, "target" + ).get_spec() + self.assertIsNotNone(spec) + + # Test the model class + self.assertIsNotNone(spec.description) + self.assertIsNotNone(spec.treeEnsembleRegressor) + + # Test the interface class + self.assertEqual(spec.description.predictedFeatureName, "target") + + # Test the inputs and outputs + self.assertEqual(len(spec.description.output), 1) + self.assertEqual(spec.description.output[0].name, "target") + self.assertEqual( + spec.description.output[0].type.WhichOneof("Type"), "doubleType" + ) + for input_type in spec.description.input: + self.assertEqual(input_type.type.WhichOneof("Type"), "doubleType") + self.assertEqual( + sorted(feature_names), sorted(map(lambda x: x.name, spec.description.input)) + ) + + # Test the linear regression parameters. + tr = spec.pipelineRegressor.pipeline.models[ + 1 + ].treeEnsembleRegressor.treeEnsemble + self.assertIsNotNone(tr) + self.assertEqual(len(tr.nodes), self.scikit_model.tree_.node_count) + + def test_conversion_bad_inputs(self): + # Error on converting an untrained model + with self.assertRaises(Exception): + model = DecisionTreeRegressor() + spec = skl_converter.convert(model, "data", "out") + + # Check the expected class during covnersion. + from sklearn.preprocessing import OneHotEncoder + + with self.assertRaises(Exception): + model = OneHotEncoder() + spec = skl_converter.convert(model, "data", "out") diff --git a/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py new file mode 100644 index 00000000..eb3e5a37 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/test/xgboost_tests/test_decision_tree_regression_numeric.py @@ -0,0 +1,106 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +import itertools +import unittest +from distutils.version import StrictVersion + +import pandas as pd +import pytest + +from coremltools._deps import _HAS_SKLEARN, _SKLEARN_VERSION +from coremltools.models.utils import (_is_macos, _macos_version, + evaluate_regressor) + + +@unittest.skipIf(not _HAS_SKLEARN, "Missing sklearn. Skipping tests.") +class DecisionTreeRegressorBostonHousingScikitNumericTest(unittest.TestCase): + """ + Unit test class for testing scikit-learn converter and running both models + """ + + @classmethod + def setUpClass(self): + """ + Set up the unit test by loading the dataset and training a model. + """ + from sklearn.datasets import load_boston + + # Load data and train model + scikit_data = load_boston() + self.scikit_data = scikit_data + self.X = scikit_data["data"] + self.target = scikit_data["target"] + self.feature_names = scikit_data.feature_names + self.output_name = "target" + + def _check_metrics(self, metrics, params={}): + """ + Check the metrics + """ + self.assertAlmostEqual( + metrics["rmse"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + self.assertAlmostEqual( + metrics["max_error"], + 0, + delta=1e-5, + msg="Failed case %s. Results %s" % (params, metrics), + ) + + def _train_convert_evaluate_assert(self, **scikit_params): + """ + Train a scikit-learn model, convert it and then evaluate it with CoreML + """ + from sklearn.tree import DecisionTreeRegressor + + from coremltools.converters import sklearn as skl_converter + + scikit_model = DecisionTreeRegressor(random_state=1, **scikit_params) + scikit_model.fit(self.X, self.target) + + # Convert the model + spec = skl_converter.convert(scikit_model, self.feature_names, self.output_name) + + if _is_macos() and _macos_version() >= (10, 13): + # Get predictions + df = pd.DataFrame(self.X, columns=self.feature_names) + df["target"] = scikit_model.predict(self.X) + + # Evaluate it + metrics = evaluate_regressor(spec, df, target="target", verbose=False) + self._check_metrics(metrics, scikit_params) + + def test_boston_housing_simple_regression(self): + self._train_convert_evaluate_assert(max_depth=20) + + @pytest.mark.slow + def test_boston_housing_parameter_stress_test(self): + + ## These are all the options in decision tree regression of scikit-learn + options = dict( + criterion=["mse"], + splitter=["best"], + max_depth=[1, 10, None], + min_samples_split=[2, 10, 0.5], + min_samples_leaf=[1, 5], + min_weight_fraction_leaf=[0.0, 0.5], + max_features=[None, 1, 5], + max_leaf_nodes=[None, 20], + min_impurity_decrease=[0.0, 1e-07, 0.1], + ) + if _SKLEARN_VERSION < StrictVersion("0.22"): # 'presort' option deprecated >=0.22 + options["presort"] = [False, True] + + # Make a cartesian product of all options + product = itertools.product(*options.values()) + args = [dict(zip(options.keys(), p)) for p in product] + + print("Testing a total of %s cases. This could take a while" % len(args)) + for it, arg in enumerate(args): + self._train_convert_evaluate_assert(**arg) diff --git a/__packaged__/coreml/.python_dependencies/coremltools/version.py b/__packaged__/coreml/.python_dependencies/coremltools/version.py new file mode 100644 index 00000000..b1b16114 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/coremltools/version.py @@ -0,0 +1,7 @@ +# Copyright (c) 2017, Apple Inc. All rights reserved. +# +# Use of this source code is governed by a BSD-3-clause license that can be +# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + + +__version__ = "6.3.0" # VERSION_STRING diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md new file mode 100644 index 00000000..76bb5d96 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md @@ -0,0 +1,39 @@ +Copyright (C) 2022 Apple Inc. All Rights Reserved. + +IMPORTANT: This Apple software is supplied to you by Apple +Inc. ("Apple") in consideration of your agreement to the following +terms, and your use, installation, modification or redistribution of +this Apple software constitutes acceptance of these terms. If you do +not agree with these terms, please do not use, install, modify or +redistribute this Apple software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, Apple grants you a personal, non-exclusive +license, under Apple's copyrights in this original Apple software (the +"Apple Software"), to use, reproduce, modify and redistribute the Apple +Software, with or without modifications, in source and/or binary forms; +provided that if you redistribute the Apple Software in its entirety and +without modifications, you must retain this notice and the following +text and disclaimers in all such redistributions of the Apple Software. +Neither the name, trademarks, service marks or logos of Apple Inc. may +be used to endorse or promote products derived from the Apple Software +without specific prior written permission from Apple. Except as +expressly stated in this notice, no other rights or licenses, express or +implied, are granted by Apple herein, including but not limited to any +patent rights that may be infringed by your derivative works or by other +works in which the Apple Software may be incorporated. + +The Apple Software is provided by Apple on an "AS IS" basis. APPLE +MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION +THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND +OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS. + +IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, +MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED +AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE), +STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/METADATA b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/METADATA new file mode 100644 index 00000000..1dddbb0d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/METADATA @@ -0,0 +1,486 @@ +Metadata-Version: 2.1 +Name: python-coreml-stable-diffusion +Version: 0.1.0 +Summary: Run Stable Diffusion on Apple Silicon with Core ML (Python and Swift) +Home-page: https://github.com/apple/ml-stable-diffusion +Author: Apple Inc. +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Topic :: Artificial Intelligence +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Software Development +Description-Content-Type: text/markdown +License-File: LICENSE.md +Requires-Dist: coremltools (>=6.1) +Requires-Dist: diffusers[torch] +Requires-Dist: torch +Requires-Dist: transformers +Requires-Dist: huggingface-hub +Requires-Dist: scipy +Requires-Dist: numpy (<1.24) + +# Core ML Stable Diffusion + +Run Stable Diffusion on Apple Silicon with Core ML + + + +This repository comprises: + +- `python_coreml_stable_diffusion`, a Python package for converting PyTorch models to Core ML format and performing image generation with Hugging Face [diffusers](https://github.com/huggingface/diffusers) in Python +- `StableDiffusion`, a Swift package that developers can add to their Xcode projects as a dependency to deploy image generation capabilities in their apps. The Swift package relies on the Core ML model files generated by `python_coreml_stable_diffusion` + +If you run into issues during installation or runtime, please refer to the [FAQ](#faq) section. Please refer to the [System Requirements](#system-requirements) section before getting started. + + +## Example Results + +There are numerous versions of Stable Diffusion available on the [Hugging Face Hub](https://huggingface.co/models?search=stable-diffusion). Here are example results from three of those models: + +`--model-version` | [stabilityai/stable-diffusion-2-base](https://huggingface.co/stabilityai/stable-diffusion-2-base) | [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) | [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) | +:------:|:------:|:------:|:------: +Output | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_11_computeUnit_CPU_AND_GPU_modelVersion_stabilityai_stable-diffusion-2-base.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_13_computeUnit_CPU_AND_NE_modelVersion_CompVis_stable-diffusion-v1-4.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_CPU_AND_NE_modelVersion_runwayml_stable-diffusion-v1-5.png) +M1 iPad Pro 8GB Latency (s) | 29 | 38 | 38 | +M1 MacBook Pro 16GB Latency (s) | 24 | 35 | 35 | +M2 MacBook Air 8GB Latency (s) | 18 | 23 | 23 | + +Please see [Important Notes on Performance Benchmarks](#important-notes-on-performance-benchmarks) section for details. + +## System Requirements + +The following is recommended to use all the functionality in this repository: + + Python | macOS | Xcode | iPadOS, iOS | +:------:|:-----:|:-----:|:-----------:| + 3.8 | 13.1 | 14.3 | 16.2 | + +## Using Ready-made Core ML Models from Hugging Face Hub + +
+ Click to expand + +🤗 Hugging Face ran the [conversion procedure](#converting-models-to-coreml) on the following models and made the Core ML weights publicly available on the Hub. If you would like to convert a version of Stable Diffusion that is not already available on the Hub, please refer to the [Converting Models to Core ML](#converting-models-to-core-ml). + +* [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/apple/coreml-stable-diffusion-v1-4) +* [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/apple/coreml-stable-diffusion-v1-5) +* [`stabilityai/stable-diffusion-2-base`](https://huggingface.co/apple/coreml-stable-diffusion-2-base) + +If you want to use any of those models you may download the weights and proceed to [generate images with Python](#image-generation-with-python) or [Swift](#image-generation-with-swift). + +There are several variants in each model repository. You may clone the whole repos using `git` and `git lfs` to download all variants, or selectively download the ones you need. + +To clone the repos using `git`, please follow this process: + +**Step 1:** Install the `git lfs` extension for your system. + +`git lfs` stores large files outside the main git repo, and it downloads them from the appropriate server after you clone or checkout. It is available in most package managers, check [the installation page](https://git-lfs.com) for details. + +**Step 2:** Enable `git lfs` by running this command once: + +```bash +git lfs install +``` + +**Step 3:** Use `git clone` to download a copy of the repo that includes all model variants. For Stable Diffusion version 1.4, you'd issue the following command in your terminal: + +```bash +git clone https://huggingface.co/apple/coreml-stable-diffusion-v1-4 +``` + +If you prefer to download specific variants instead of cloning the repos, you can use the `huggingface_hub` Python library. For example, to do generation in Python using the `ORIGINAL` attention implementation (read [this section](#converting-models-to-core-ml) for details), you could use the following helper code: + +```Python +from huggingface_hub import snapshot_download +from huggingface_hub.file_download import repo_folder_name +from pathlib import Path +import shutil + +repo_id = "apple/coreml-stable-diffusion-v1-4" +variant = "original/packages" + +def download_model(repo_id, variant, output_dir): + destination = Path(output_dir) / (repo_id.split("/")[-1] + "_" + variant.replace("/", "_")) + if destination.exists(): + raise Exception(f"Model already exists at {destination}") + + # Download and copy without symlinks + downloaded = snapshot_download(repo_id, allow_patterns=f"{variant}/*", cache_dir=output_dir) + downloaded_bundle = Path(downloaded) / variant + shutil.copytree(downloaded_bundle, destination) + + # Remove all downloaded files + cache_folder = Path(output_dir) / repo_folder_name(repo_id=repo_id, repo_type="model") + shutil.rmtree(cache_folder) + return destination + +model_path = download_model(repo_id, variant, output_dir="./models") +print(f"Model downloaded at {model_path}") +``` + +`model_path` would be the path in your local filesystem where the checkpoint was saved. Please, refer to [this post](https://huggingface.co/blog/diffusers-coreml) for additional details. + +
+ +## Converting Models to Core ML + +
+ Click to expand + +**Step 1:** Create a Python environment and install dependencies: + +```bash +conda create -n coreml_stable_diffusion python=3.8 -y +conda activate coreml_stable_diffusion +cd /path/to/cloned/ml-stable-diffusion/repository +pip install -e . +``` + +**Step 2:** Log in to or register for your [Hugging Face account](https://huggingface.co), generate a [User Access Token](https://huggingface.co/settings/tokens) and use this token to set up Hugging Face API access by running `huggingface-cli login` in a Terminal window. + +**Step 3:** Navigate to the version of Stable Diffusion that you would like to use on [Hugging Face Hub](https://huggingface.co/models?search=stable-diffusion) and accept its Terms of Use. The default model version is [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4). The model version may be changed by the user as described in the next step. + +**Step 4:** Execute the following command from the Terminal to generate Core ML model files (`.mlpackage`) + +```shell +python -m python_coreml_stable_diffusion.torch2coreml --convert-unet --convert-text-encoder --convert-vae-decoder --convert-safety-checker -o +``` + +**WARNING:** This command will download several GB worth of PyTorch checkpoints from Hugging Face. Please ensure that you are on Wi-Fi and have enough disk space. + +This generally takes 15-20 minutes on an M1 MacBook Pro. Upon successful execution, the 4 neural network models that comprise Stable Diffusion will have been converted from PyTorch to Core ML (`.mlpackage`) and saved into the specified ``. Some additional notable arguments: + +- `--model-version`: The model version defaults to [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4). Developers may specify other versions that are available on [Hugging Face Hub](https://huggingface.co/models?search=stable-diffusion), e.g. [stabilityai/stable-diffusion-2-base](https://huggingface.co/stabilityai/stable-diffusion-2-base) & [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5). + + +- `--bundle-resources-for-swift-cli`: Compiles all 4 models and bundles them along with necessary resources for text tokenization into `/Resources` which should provided as input to the Swift package. This flag is not necessary for the diffusers-based Python pipeline. + +- `--chunk-unet`: Splits the Unet model in two approximately equal chunks (each with less than 1GB of weights) for mobile-friendly deployment. This is **required** for Neural Engine deployment on iOS and iPadOS. This is not required for macOS. Swift CLI is able to consume both the chunked and regular versions of the Unet model but prioritizes the former. Note that chunked unet is not compatible with the Python pipeline because Python pipeline is intended for macOS only. Chunking is for on-device deployment with Swift only. + +- `--attention-implementation`: Defaults to `SPLIT_EINSUM` which is the implementation described in [Deploying Transformers on the Apple Neural Engine](https://machinelearning.apple.com/research/neural-engine-transformers). `--attention-implementation ORIGINAL` will switch to an alternative that should be used for CPU or GPU deployment. Please refer to the [Performance Benchmark](#performance-benchmark) section for further guidance. + +- `--check-output-correctness`: Compares original PyTorch model's outputs to final Core ML model's outputs. This flag increases RAM consumption significantly so it is recommended only for debugging purposes. + +- `--convert-controlnet`: Converts ControlNet models specified after this option. This can also convert multiple models if you specify like `--convert-controlnet lllyasviel/sd-controlnet-mlsd lllyasviel/sd-controlnet-depth`. + +- `--unet-support-controlnet`: enables a converted UNet model to receive additional inputs from ControlNet. This is required for generating image with using ControlNet and saved with a different name, `*_control-unet.mlpackage`, distinct from normal UNet. On the other hand, this UNet model can not work without ControlNet. Please use normal UNet for just txt2img. + +
+ +## Image Generation with Python + +
+ Click to expand + +Run text-to-image generation using the example Python pipeline based on [diffusers](https://github.com/huggingface/diffusers): + +```shell +python -m python_coreml_stable_diffusion.pipeline --prompt "a photo of an astronaut riding a horse on mars" -i -o --compute-unit ALL --seed 93 +``` +Please refer to the help menu for all available arguments: `python -m python_coreml_stable_diffusion.pipeline -h`. Some notable arguments: + +- `-i`: Should point to the `-o` directory from Step 4 of [Converting Models to Core ML](#converting-models-to-coreml) section from above. +- `--model-version`: If you overrode the default model version while converting models to Core ML, you will need to specify the same model version here. +- `--compute-unit`: Note that the most performant compute unit for this particular implementation may differ across different hardware. `CPU_AND_GPU` or `CPU_AND_NE` may be faster than `ALL`. Please refer to the [Performance Benchmark](#performance-benchmark) section for further guidance. +- `--scheduler`: If you would like to experiment with different schedulers, you may specify it here. For available options, please see the help menu. You may also specify a custom number of inference steps by `--num-inference-steps` which defaults to 50. +- `--controlnet`: ControlNet models specified with this option are used in image generation. Use this option in the format `--controlnet lllyasviel/sd-controlnet-mlsd lllyasviel/sd-controlnet-depth` and make sure to use `--controlnet-inputs` in conjunction. +- `--controlnet-inputs`: Image inputs corresponding to each ControlNet model. Please provide image paths in same order as models in `--controlnet`, for example: `--controlnet-inputs image_mlsd image_depth`. + +
+ +## Image Generation with Swift + +
+ Click to expand + +### System Requirements + +**Building** (minimum): + +- Xcode 14.3 +- Command Line Tools for Xcode 14.3 + +Check [developer.apple.com](https://developer.apple.com/download/all/?q=xcode) for the latest versions. + +**Running** (minimum): + +| Mac | iPad\* | iPhone\* | +|:----------:|:-----------:|:-------------:| +| macOS 13.1 | iPadOS 16.2 | iOS 16.2 | +| M1 | M1 | iPhone 12 Pro | + +You will also need the resources generated by the `--bundle-resources-for-swift-cli` option described in [Converting Models to Core ML](#converting-models-to-coreml) + +\* Please see [FAQ](#faq) [Q6](#q-mobile-app) regarding deploying on iPad and iPhone. + +### Example CLI Usage +```shell +swift run StableDiffusionSample "a photo of an astronaut riding a horse on mars" --resource-path /Resources/ --seed 93 --output-path +``` +The output will be named based on the prompt and random seed: +e.g. `/a_photo_of_an_astronaut_riding_a_horse_on_mars.93.final.png` + +Please use the `--help` flag to learn about batched generation and more. + +### Example Library Usage + +```swift +import StableDiffusion +... +let pipeline = try StableDiffusionPipeline(resourcesAt: resourceURL) +pipeline.loadResources() +let image = try pipeline.generateImages(prompt: prompt, seed: seed).first +``` +On iOS, the `reduceMemory` option should be set to `true` when constructing `StableDiffusionPipeline` + +### Swift Package Details + +This Swift package contains two products: + +- `StableDiffusion` library +- `StableDiffusionSample` command-line tool + +Both of these products require the Core ML models and tokenization resources to be supplied. When specifying resources via a directory path that directory must contain the following: + +- `TextEncoder.mlmodelc` (text embedding model) +- `Unet.mlmodelc` or `UnetChunk1.mlmodelc` & `UnetChunk2.mlmodelc` (denoising autoencoder model) +- `VAEDecoder.mlmodelc` (image decoder model) +- `vocab.json` (tokenizer vocabulary file) +- `merges.text` (merges for byte pair encoding file) + +Optionally, for image2image, in-painting, or similar: + +- `VAEEncoder.mlmodelc` (image encoder model) + +Optionally, it may also include the safety checker model that some versions of Stable Diffusion include: + +- `SafetyChecker.mlmodelc` + +Optionally, for ControlNet: + +- `ControlledUNet.mlmodelc` or `ControlledUnetChunk1.mlmodelc` & `ControlledUnetChunk2.mlmodelc` (enabled to receive ControlNet values) +- `controlnet/` (directory containing ControlNet models) + - `LllyasvielSdControlnetMlsd.mlmodelc` (for example, from lllyasviel/sd-controlnet-mlsd) + - `LllyasvielSdControlnetDepth.mlmodelc` (for example, from lllyasviel/sd-controlnet-depth) + - Other models you converted + +Note that the chunked version of Unet is checked for first. Only if it is not present will the full `Unet.mlmodelc` be loaded. Chunking is required for iOS and iPadOS and not necessary for macOS. + +
+ +## Example Swift App + +
+ Click to expand + +🤗 Hugging Face created an [open-source demo app](https://github.com/huggingface/swift-coreml-diffusers) on top of this library. It's written in native Swift and Swift UI, and runs on macOS, iOS and iPadOS. You can use the code as a starting point for your app, or to see how to integrate this library in your own projects. + +Hugging Face has made the app [available in the Mac App Store](https://apps.apple.com/app/diffusers/id1666309574?mt=12). + +
+ +## Performance Benchmark + +
+ Click to expand + +Standard [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) Benchmark + +| Device | `--compute-unit`| `--attention-implementation` | Latency (seconds) | +| ---------------------------------- | -------------- | ---------------------------- | ----------------- | +| Mac Studio (M1 Ultra, 64-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 9 | +| Mac Studio (M1 Ultra, 48-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 13 | +| MacBook Pro (M1 Max, 32-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 18 | +| MacBook Pro (M1 Max, 24-core GPU) | `CPU_AND_GPU` | `ORIGINAL` | 20 | +| MacBook Pro (M1 Pro, 16-core GPU) | `ALL` | `SPLIT_EINSUM (default)` | 26 | +| MacBook Pro (M2) | `CPU_AND_NE` | `SPLIT_EINSUM (default)` | 23 | +| MacBook Pro (M1) | `CPU_AND_NE` | `SPLIT_EINSUM (default)` | 35 | +| iPad Pro (5th gen, M1) | `CPU_AND_NE` | `SPLIT_EINSUM (default)` | 38 | + + +Please see [Important Notes on Performance Benchmarks](#important-notes-on-performance-benchmarks) section for details. + +
+ +## Important Notes on Performance Benchmarks + +
+ Click to expand + +- This benchmark was conducted by Apple using public beta versions of iOS 16.2, iPadOS 16.2 and macOS 13.1 in November 2022. +- The executed program is `python_coreml_stable_diffusion.pipeline` for macOS devices and a minimal Swift test app built on the `StableDiffusion` Swift package for iOS and iPadOS devices. +- The median value across 3 end-to-end executions is reported. +- Performance may materially differ across different versions of Stable Diffusion due to architecture changes in the model itself. Each reported number is specific to the model version mentioned in that context. +- The image generation procedure follows the standard configuration: 50 inference steps, 512x512 output image resolution, 77 text token sequence length, classifier-free guidance (batch size of 2 for unet). +- The actual prompt length does not impact performance because the Core ML model is converted with a static shape that computes the forward pass for all of the 77 elements (`tokenizer.model_max_length`) in the text token sequence regardless of the actual length of the input text. +- Pipelining across the 4 models is not optimized and these performance numbers are subject to variance under increased system load from other applications. Given these factors, we do not report sub-second variance in latency. +- Weights and activations are in float16 precision for both the GPU and the Neural Engine. +- The Swift CLI program consumes a peak memory of approximately 2.6GB (without the safety checker), 2.1GB of which is model weights in float16 precision. We applied [8-bit weight quantization](https://coremltools.readme.io/docs/compressing-ml-program-weights#use-affine-quantization) to reduce peak memory consumption by approximately 1GB. However, we observed that it had an adverse effect on generated image quality and we rolled it back. We encourage developers to experiment with other advanced weight compression techniques such as [palettization](https://coremltools.readme.io/docs/compressing-ml-program-weights#use-a-lookup-table) and/or [pruning](https://coremltools.readme.io/docs/compressing-ml-program-weights#use-sparse-representation) which may yield better results. +- In the [benchmark table](performance-benchmark), we report the best performing `--compute-unit` and `--attention-implementation` values per device. The former does not modify the Core ML model and can be applied during runtime. The latter modifies the Core ML model. Note that the best performing compute unit is model version and hardware-specific. + +
+ + +## Results with Different Compute Units + +
+ Click to expand + +It is highly probable that there will be slight differences across generated images using different compute units. + +The following images were generated on an M1 MacBook Pro and macOS 13.1 with the prompt *"a photo of an astronaut riding a horse on mars"* using the [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) model version. The random seed was set to 93: + + CPU_AND_NE | CPU_AND_GPU | ALL | +:------------:|:-------------:|:------: +![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_CPU_AND_NE_modelVersion_runwayml_stable-diffusion-v1-5.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_CPU_AND_GPU_modelVersion_runwayml_stable-diffusion-v1-5.png) | ![](assets/a_high_quality_photo_of_an_astronaut_riding_a_horse_in_space/randomSeed_93_computeUnit_ALL_modelVersion_runwayml_stable-diffusion-v1-5.png) | + +Differences may be less or more pronounced for different inputs. Please see the [FAQ](#faq) Q8 for a detailed explanation. + +
+ +## Results with ControlNet + +
+ Click to expand + +[ControlNet](https://huggingface.co/lllyasviel/ControlNet) allows users to condition image generation with Stable Diffusion on signals such as edge maps, depth maps, segmentation maps, scribbles and pose. Thanks to [@ryu38's contribution](https://github.com/apple/ml-stable-diffusion/pull/153), both the Python CLI and the Swift package support ControlNet models. Please refer to CLI arguments in previous sections to exercise this new feature. + +Example results using the prompt "a high quality photo of a surfing dog" conditioned on the scribble (leftmost): + + + +
+ + +## FAQ + +
+ Click to expand +
+ + + Q1: ERROR: Failed building wheel for tokenizers or error: can't find Rust compiler + + A1: Please review this [potential solution](https://github.com/huggingface/transformers/issues/2831#issuecomment-592724471). +
+ + +
+ Q2: RuntimeError: {NSLocalizedDescription = "Error computing NN outputs." + + A2: There are many potential causes for this error. In this context, it is highly likely to be encountered when your system is under increased memory pressure from other applications. Reducing memory utilization of other applications is likely to help alleviate the issue. +
+ +
+ Q3: My Mac has 8GB RAM and I am converting models to Core ML using the example command. The process is getting killed because of memory issues. How do I fix this issue? + + A3: In order to minimize the memory impact of the model conversion process, please execute the following command instead: + +```bash +python -m python_coreml_stable_diffusion.torch2coreml --convert-vae-encoder -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-vae-decoder -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-unet -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-text-encoder -o && \ +python -m python_coreml_stable_diffusion.torch2coreml --convert-safety-checker -o && +``` + +If you need `--chunk-unet`, you may do so in yet another independent command which will reuse the previously exported Unet model and simply chunk it in place: + +```bash +python -m python_coreml_stable_diffusion.torch2coreml --convert-unet --chunk-unet -o +``` + +
+ +
+ Q4: My Mac has 8GB RAM, should image generation work on my machine? + + A4: Yes! Especially the `--compute-unit CPU_AND_NE` option should work under reasonable system load from other applications. Note that part of the [Example Results](#example-results) were generated using an M2 MacBook Air with 8GB RAM. +
+ +
+ Q5: Every time I generate an image using the Python pipeline, loading all the Core ML models takes 2-3 minutes. Is this expected? + + A5: Yes and using the Swift library reduces this to just a few seconds. The reason is that `coremltools` loads Core ML models (`.mlpackage`) and each model is compiled to be run on the requested compute unit during load time. Because of the size and number of operations of the unet model, it takes around 2-3 minutes to compile it for Neural Engine execution. Other models should take at most a few seconds. Note that `coremltools` does not cache the compiled model for later loads so each load takes equally long. In order to benefit from compilation caching, `StableDiffusion` Swift package by default relies on compiled Core ML models (`.mlmodelc`) which will be compiled down for the requested compute unit upon first load but then the cache will be reused on subsequent loads until it is purged due to lack of use. + +If you intend to use the Python pipeline in an application, we recommend initializing the pipeline once so that the load time is only incurred once. Afterwards, generating images using different prompts and random seeds will not incur the load time for the current session of your application. + +
+ + +
+ Q6: I want to deploy StableDiffusion, the Swift package, in my mobile app. What should I be aware of? + + A6: The [Image Generation with Swift](#image-gen-swift) section describes the minimum SDK and OS versions as well as the device models supported by this package. We recommend carefully testing the package on the device with the least amount of RAM available among your deployment targets. + +The image generation process in `StableDiffusion` can yield over 2 GB of peak memory during runtime depending on the compute units selected. On iPadOS, we recommend using `.cpuAndNeuralEngine` in your configuration and the `reduceMemory` option when constructing a `StableDiffusionPipeline` to minimize memory pressure. + +If your app crashes during image generation, consider adding the [Increased Memory Limit](https://developer.apple.com/documentation/bundleresources/entitlements/com_apple_developer_kernel_increased-memory-limit) capability to inform the system that some of your app’s core features may perform better by exceeding the default app memory limit on supported devices. + +On iOS, depending on the iPhone model, Stable Diffusion model versions, selected compute units, system load and design of your app, this may still not be sufficient to keep your apps peak memory under the limit. Please remember, because the device shares memory between apps and iOS processes, one app using too much memory can compromise the user experience across the whole device. +
+ +
+ Q7: How do I generate images with different resolutions using the same Core ML models? + + A7: The current version of `python_coreml_stable_diffusion` does not support single-model multi-resolution out of the box. However, developers may fork this project and leverage the [flexible shapes](https://coremltools.readme.io/docs/flexible-inputs) support from coremltools to extend the `torch2coreml` script by using `coremltools.EnumeratedShapes`. Note that, while the `text_encoder` is agnostic to the image resolution, the inputs and outputs of `vae_decoder` and `unet` models are dependent on the desired image resolution. +
+ +
+ Q8: Are the Core ML and PyTorch generated images going to be identical? + + A8: If desired, the generated images across PyTorch and Core ML can be made approximately identical. However, it is not guaranteed by default. There are several factors that might lead to different images across PyTorch and Core ML: + + + 1. Random Number Generator Behavior + + The main source of potentially different results across PyTorch and Core ML is the Random Number Generator ([RNG](https://en.wikipedia.org/wiki/Random_number_generation)) behavior. PyTorch and Numpy have different sources of randomness. `python_coreml_stable_diffusion` generally relies on Numpy for RNG (e.g. latents initialization) and `StableDiffusion` Swift Library reproduces this RNG behavior by default. However, PyTorch-based pipelines such as Hugging Face `diffusers` relies on PyTorch's RNG behavior. Thanks to @liuliu's [contribution](https://github.com/apple/ml-stable-diffusion/pull/124), one can match the PyTorch (CPU) RNG behavior in Swift by specifying `--rng torch` which selects the `torchRNG` mode. + + 2. PyTorch + + *"Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms. Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds."* ([source](https://pytorch.org/docs/stable/notes/randomness.html#reproducibility)). + + 3. Model Function Drift During Conversion + + The difference in outputs across corresponding PyTorch and Core ML models is a potential cause. The signal integrity is tested during the conversion process (enabled via `--check-output-correctness` argument to `python_coreml_stable_diffusion.torch2coreml`) and it is verified to be above a minimum [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) value as tested on random inputs. Note that this is simply a sanity check and does not guarantee this minimum PSNR across all possible inputs. Furthermore, the results are not guaranteed to be identical when executing the same Core ML models across different compute units. This is not expected to be a major source of difference as the sample visual results indicate in [this section](#results-with-different-compute-units). + + 4. Weights and Activations Data Type + + When quantizing models from float32 to lower-precision data types such as float16, the generated images are [known to vary slightly](https://lambdalabs.com/blog/inference-benchmark-stable-diffusion) in semantics even when using the same PyTorch model. Core ML models generated by coremltools have float16 weights and activations by default [unless explicitly overridden](https://github.com/apple/coremltools/blob/main/coremltools/converters/_converters_entry.py#L256). This is not expected to be a major source of difference. + +
+ +
+ Q9: The model files are very large, how do I avoid a large binary for my App? + + A9: The recommended option is to prompt the user to download these assets upon first launch of the app. This keeps the app binary size independent of the Core ML models being deployed. Disclosing the size of the download to the user is extremely important as there could be data charges or storage impact that the user might not be comfortable with. + +
+ +
+ Q10: `Could not initialize NNPACK! Reason: Unsupported hardware` + + A10: This warning is safe to ignore in the context of this repository. + +
+ +
+ Q11: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect + + A11: This warning is safe to ignore in the context of this repository. +
+ +
+ Q12: UserWarning: resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown + + A12: If this warning is printed right after zsh: killed python -m python_coreml_stable_diffusion.torch2coreml ... , then it is highly likely that your Mac has run out of memory while converting models to Core ML. Please see [Q3](#low-mem-conversion) from above for the solution. + +
+ +
+ + diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/RECORD b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/RECORD new file mode 100644 index 00000000..53a8ba8c --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/RECORD @@ -0,0 +1,30 @@ +python_coreml_stable_diffusion-0.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +python_coreml_stable_diffusion-0.1.0.dist-info/LICENSE.md,sha256=JIQZkbAux5If54HQ603cmYQpkWgaZ6Rs-FlRuv2gYsg,2316 +python_coreml_stable_diffusion-0.1.0.dist-info/METADATA,sha256=_lUpsIqDtVlF7G8X0iz6VmrwwWbw1bQl1SQgWHn2puU,33120 +python_coreml_stable_diffusion-0.1.0.dist-info/RECORD,, +python_coreml_stable_diffusion-0.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json,sha256=sSoaLV-iELt5XHJbVI1L2cR5thhycX_nVjy_DEpeaUE,170 +python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt,sha256=c-I_5aLPp9EFhGmVoH3pIdzaf99seReL3bpJMpmOuac,37 +python_coreml_stable_diffusion/__init__.py,sha256=ZygAIkX6Nbjag1czWdQa-yP-GM1mBE_9ss21Xh__JFc,34 +python_coreml_stable_diffusion/__pycache__/__init__.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/_version.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/chunk_mlprogram.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/controlnet.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/coreml_model.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/layer_norm.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/pipeline.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/torch2coreml.cpython-310.pyc,, +python_coreml_stable_diffusion/__pycache__/unet.cpython-310.pyc,, +python_coreml_stable_diffusion/_version.py,sha256=QTYqXqSTHFRkM9TEgpDFcHvwLbvqHDqvqfQ9EiXkcAM,23 +python_coreml_stable_diffusion/chunk_mlprogram.py,sha256=ozrlOyq1919a6K29E1lw51DiWfPLKmoVmhQdG6iUvMc,12168 +python_coreml_stable_diffusion/controlnet.py,sha256=ccviLEpauaPZlnyNDRcLLjygaWcD7h4v281fc1vzwTk,8976 +python_coreml_stable_diffusion/coreml_model.py,sha256=Z115OM7t70TXf-CL9w8O7yh2NlU68K4urfHEFezGNR0,3918 +python_coreml_stable_diffusion/layer_norm.py,sha256=78mpmGHnQBO_jjyvrt3m0D8A6eb-uK5hr0wzzZFNsjw,3001 +python_coreml_stable_diffusion/pipeline.py,sha256=wo7dDCyh9gd4bmD18hHd1t7B9t8Ryk5IWsjuI-7iMYk,25751 +python_coreml_stable_diffusion/torch2coreml.py,sha256=KgeiBuUoU0egVBLKEtPMbVK-9hdy1WszbmLjdWqtRiU,54327 +python_coreml_stable_diffusion/unet.py,sha256=Zv72BBiC5GID13GZzwcPBpOqD2OxYhqDWXqtvDwWD-8,38574 +tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tests/__pycache__/__init__.cpython-310.pyc,, +tests/__pycache__/test_stable_diffusion.cpython-310.pyc,, +tests/test_stable_diffusion.py,sha256=bRZUK3joxhgSoPKAbwMWn6GG_G-V4smOrPzbKozIfcw,15965 diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/REQUESTED b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL new file mode 100644 index 00000000..57e3d840 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json new file mode 100644 index 00000000..5f116baa --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/direct_url.json @@ -0,0 +1 @@ +{"url": "https://github.com/apple/ml-stable-diffusion", "vcs_info": {"commit_id": "940dba02ee6dbdd0ae1238dcdef6cd259b345603", "requested_revision": "main", "vcs": "git"}} \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt new file mode 100644 index 00000000..6d9c39a4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion-0.1.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +python_coreml_stable_diffusion +tests diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/__init__.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/__init__.py new file mode 100644 index 00000000..8dee4bf8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/__init__.py @@ -0,0 +1 @@ +from ._version import __version__ diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/_version.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/_version.py new file mode 100644 index 00000000..3f5c4a7d --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/_version.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/chunk_mlprogram.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/chunk_mlprogram.py new file mode 100644 index 00000000..1aef76c6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/chunk_mlprogram.py @@ -0,0 +1,337 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import argparse +from collections import OrderedDict + +import coremltools as ct +from coremltools.converters.mil import Block, Program, Var +from coremltools.converters.mil.frontend.milproto.load import load as _milproto_to_pymil +from coremltools.converters.mil.mil import Builder as mb +from coremltools.converters.mil.mil import Placeholder +from coremltools.converters.mil.mil import types as types +from coremltools.converters.mil.mil.passes.helper import block_context_manager +from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY +from coremltools.converters.mil.testing_utils import random_gen_input_feature_type + +import gc + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np +import os +from python_coreml_stable_diffusion import torch2coreml +import shutil +import time + + +def _verify_output_correctness_of_chunks(full_model, first_chunk_model, + second_chunk_model): + """ Verifies the end-to-end output correctness of full (original) model versus chunked models + """ + # Generate inputs for first chunk and full model + input_dict = {} + for input_desc in full_model._spec.description.input: + input_dict[input_desc.name] = random_gen_input_feature_type(input_desc) + + # Generate outputs for first chunk and full model + outputs_from_full_model = full_model.predict(input_dict) + outputs_from_first_chunk_model = first_chunk_model.predict(input_dict) + + # Prepare inputs for second chunk model from first chunk's outputs and regular inputs + second_chunk_input_dict = {} + for input_desc in second_chunk_model._spec.description.input: + if input_desc.name in outputs_from_first_chunk_model: + second_chunk_input_dict[ + input_desc.name] = outputs_from_first_chunk_model[ + input_desc.name] + else: + second_chunk_input_dict[input_desc.name] = input_dict[ + input_desc.name] + + # Generate output for second chunk model + outputs_from_second_chunk_model = second_chunk_model.predict( + second_chunk_input_dict) + + # Verify correctness across all outputs from second chunk and full model + for out_name in outputs_from_full_model.keys(): + torch2coreml.report_correctness( + original_outputs=outputs_from_full_model[out_name], + final_outputs=outputs_from_second_chunk_model[out_name], + log_prefix=f"{out_name}") + + +def _load_prog_from_mlmodel(model): + """ Load MIL Program from an MLModel + """ + model_spec = model.get_spec() + start_ = time.time() + logger.info( + "Loading MLModel object into a MIL Program object (including the weights).." + ) + prog = _milproto_to_pymil( + model_spec=model_spec, + specification_version=model_spec.specificationVersion, + file_weights_dir=model.weights_dir, + ) + logger.info(f"Program loaded in {time.time() - start_:.1f} seconds") + + return prog + + +def _get_op_idx_split_location(prog: Program): + """ Find the op that approximately bisects the graph as measure by weights size on each side + """ + main_block = prog.functions["main"] + total_size_in_mb = 0 + + for op in main_block.operations: + if op.op_type == "const" and isinstance(op.val.val, np.ndarray): + size_in_mb = op.val.val.size * op.val.val.itemsize / (1024 * 1024) + total_size_in_mb += size_in_mb + half_size = total_size_in_mb / 2 + + # Find the first non const op (single child), where the total cumulative size exceeds + # the half size for the first time + cumulative_size_in_mb = 0 + for op in main_block.operations: + if op.op_type == "const" and isinstance(op.val.val, np.ndarray): + size_in_mb = op.val.val.size * op.val.val.itemsize / (1024 * 1024) + cumulative_size_in_mb += size_in_mb + + if (cumulative_size_in_mb > half_size and op.op_type != "const" + and len(op.outputs) == 1 + and len(op.outputs[0].child_ops) == 1): + op_idx = main_block.operations.index(op) + return op_idx, cumulative_size_in_mb, total_size_in_mb + + +def _get_first_chunk_outputs(block, op_idx): + # Get the list of all vars that go across from first program (all ops from 0 to op_idx (inclusive)) + # to the second program (all ops from op_idx+1 till the end). These all vars need to be made the output + # of the first program and the input of the second program + boundary_vars = set() + for i in range(op_idx + 1): + op = block.operations[i] + for var in op.outputs: + if var.val is None: # only consider non const vars + for child_op in var.child_ops: + child_op_idx = block.operations.index(child_op) + if child_op_idx > op_idx: + boundary_vars.add(var) + return list(boundary_vars) + + +@block_context_manager +def _add_fp32_casts(block, boundary_vars): + new_boundary_vars = [] + for var in boundary_vars: + if var.dtype != types.fp16: + new_boundary_vars.append(var) + else: + fp32_var = mb.cast(x=var, dtype="fp32", name=var.name) + new_boundary_vars.append(fp32_var) + return new_boundary_vars + + +def _make_first_chunk_prog(prog, op_idx): + """ Build first chunk by declaring early outputs and removing unused subgraph + """ + block = prog.functions["main"] + boundary_vars = _get_first_chunk_outputs(block, op_idx) + + # Due to possible numerical issues, cast any fp16 var to fp32 + new_boundary_vars = _add_fp32_casts(block, boundary_vars) + + block.outputs.clear() + block.set_outputs(new_boundary_vars) + PASS_REGISTRY["common::dead_code_elimination"](prog) + return prog + + +def _make_second_chunk_prog(prog, op_idx): + """ Build second chunk by rebuilding a pristine MIL Program from MLModel + """ + block = prog.functions["main"] + block.opset_version = ct.target.iOS16 + + # First chunk outputs are second chunk inputs (e.g. skip connections) + boundary_vars = _get_first_chunk_outputs(block, op_idx) + + # This op will not be included in this program. Its output var will be made into an input + boundary_op = block.operations[op_idx] + + # Add all boundary ops as inputs + with block: + for var in boundary_vars: + new_placeholder = Placeholder( + sym_shape=var.shape, + dtype=var.dtype if var.dtype != types.fp16 else types.fp32, + name=var.name, + ) + + block._input_dict[ + new_placeholder.outputs[0].name] = new_placeholder.outputs[0] + + block.function_inputs = tuple(block._input_dict.values()) + new_var = None + if var.dtype == types.fp16: + new_var = mb.cast(x=new_placeholder.outputs[0], + dtype="fp16", + before_op=var.op) + else: + new_var = new_placeholder.outputs[0] + + block.replace_uses_of_var_after_op( + anchor_op=boundary_op, + old_var=var, + new_var=new_var, + ) + + PASS_REGISTRY["common::dead_code_elimination"](prog) + + # Remove any unused inputs + new_input_dict = OrderedDict() + for k, v in block._input_dict.items(): + if len(v.child_ops) > 0: + new_input_dict[k] = v + block._input_dict = new_input_dict + block.function_inputs = tuple(block._input_dict.values()) + + return prog + + +def main(args): + os.makedirs(args.o, exist_ok=True) + + # Check filename extension + mlpackage_name = os.path.basename(args.mlpackage_path) + name, ext = os.path.splitext(mlpackage_name) + assert ext == ".mlpackage", f"`--mlpackage-path` (args.mlpackage_path) is not an .mlpackage file" + + # Load CoreML model + logger.info("Loading model from {}".format(args.mlpackage_path)) + start_ = time.time() + model = ct.models.MLModel( + args.mlpackage_path, + compute_units=ct.ComputeUnit.CPU_ONLY, + ) + logger.info( + f"Loading {args.mlpackage_path} took {time.time() - start_:.1f} seconds" + ) + + # Load the MIL Program from MLModel + prog = _load_prog_from_mlmodel(model) + + # Compute the incision point by bisecting the program based on weights size + op_idx, first_chunk_weights_size, total_weights_size = _get_op_idx_split_location( + prog) + main_block = prog.functions["main"] + incision_op = main_block.operations[op_idx] + logger.info(f"{args.mlpackage_path} will chunked into two pieces.") + logger.info( + f"The incision op: name={incision_op.name}, type={incision_op.op_type}, index={op_idx}/{len(main_block.operations)}" + ) + logger.info(f"First chunk size = {first_chunk_weights_size:.2f} MB") + logger.info( + f"Second chunk size = {total_weights_size - first_chunk_weights_size:.2f} MB" + ) + + # Build first chunk (in-place modifies prog by declaring early exits and removing unused subgraph) + prog_chunk1 = _make_first_chunk_prog(prog, op_idx) + + # Build the second chunk + prog_chunk2 = _make_second_chunk_prog(_load_prog_from_mlmodel(model), + op_idx) + + if not args.check_output_correctness: + # Original model no longer needed in memory + del model + gc.collect() + + # Convert the MIL Program objects into MLModels + logger.info("Converting the two programs") + model_chunk1 = ct.convert( + prog_chunk1, + convert_to="mlprogram", + compute_units=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=ct.target.iOS16, + ) + del prog_chunk1 + gc.collect() + logger.info("Conversion of first chunk done.") + + model_chunk2 = ct.convert( + prog_chunk2, + convert_to="mlprogram", + compute_units=ct.ComputeUnit.CPU_ONLY, + minimum_deployment_target=ct.target.iOS16, + ) + del prog_chunk2 + gc.collect() + logger.info("Conversion of second chunk done.") + + # Verify output correctness + if args.check_output_correctness: + logger.info("Verifying output correctness of chunks") + _verify_output_correctness_of_chunks( + full_model=model, + first_chunk_model=model_chunk1, + second_chunk_model=model_chunk2, + ) + + # Remove original (non-chunked) model if requested + if args.remove_original: + logger.info( + "Removing original (non-chunked) model at {args.mlpackage_path}") + shutil.rmtree(args.mlpackage_path) + logger.info("Done.") + + # Save the chunked models to disk + out_path_chunk1 = os.path.join(args.o, name + "_chunk1.mlpackage") + out_path_chunk2 = os.path.join(args.o, name + "_chunk2.mlpackage") + + logger.info( + f"Saved chunks in {args.o} with the suffix _chunk1.mlpackage and _chunk2.mlpackage" + ) + model_chunk1.save(out_path_chunk1) + model_chunk2.save(out_path_chunk2) + logger.info("Done.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--mlpackage-path", + required=True, + help= + "Path to the mlpackage file to be split into two mlpackages of approximately same file size.", + ) + parser.add_argument( + "-o", + required=True, + help= + "Path to output directory where the two model chunks should be saved.", + ) + parser.add_argument( + "--remove-original", + action="store_true", + help= + "If specified, removes the original (non-chunked) model to avoid duplicating storage." + ) + parser.add_argument( + "--check-output-correctness", + action="store_true", + help= + ("If specified, compares the outputs of original Core ML model with that of pipelined CoreML model chunks and reports PSNR in dB. ", + "Enabling this feature uses more memory. Disable it if your machine runs out of memory." + )) + + args = parser.parse_args() + main(args) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/controlnet.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/controlnet.py new file mode 100644 index 00000000..4482e7bf --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/controlnet.py @@ -0,0 +1,244 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers import ModelMixin + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .unet import Timesteps, TimestepEmbedding, get_down_block, UNetMidBlock2DCrossAttn, linear_to_conv2d_map + +class ControlNetConditioningEmbedding(nn.Module): + + def __init__( + self, + conditioning_embedding_channels, + conditioning_channels=3, + block_out_channels=(16, 32, 96, 256), + ): + super().__init__() + + self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) + + self.blocks = nn.ModuleList([]) + + for i in range(len(block_out_channels) - 1): + channel_in = block_out_channels[i] + channel_out = block_out_channels[i + 1] + self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) + self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) + + self.conv_out = nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) + + def forward(self, conditioning): + embedding = self.conv_in(conditioning) + embedding = F.silu(embedding) + + for block in self.blocks: + embedding = block(embedding) + embedding = F.silu(embedding) + + embedding = self.conv_out(embedding) + + return embedding + +class ControlNetModel(ModelMixin, ConfigMixin): + + @register_to_config + def __init__( + self, + in_channels=4, + flip_sin_to_cos=True, + freq_shift=0, + down_block_types=( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + only_cross_attention=False, + block_out_channels=(320, 640, 1280, 1280), + layers_per_block=2, + downsample_padding=1, + mid_block_scale_factor=1, + act_fn="silu", + norm_num_groups=32, + norm_eps=1e-5, + cross_attention_dim=1280, + attention_head_dim=8, + use_linear_projection=False, + upcast_attention=False, + resnet_time_scale_shift="default", + conditioning_embedding_out_channels=(16, 32, 96, 256), + **kwargs, + ): + super().__init__() + + # Check inputs + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." + ) + + self._register_load_state_dict_pre_hook(linear_to_conv2d_map) + + # input + conv_in_kernel = 3 + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + time_embed_dim = block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + ) + + # control net conditioning embedding + self.controlnet_cond_embedding = ControlNetConditioningEmbedding( + conditioning_embedding_channels=block_out_channels[0], + block_out_channels=conditioning_embedding_out_channels, + ) + + self.down_blocks = nn.ModuleList([]) + self.controlnet_down_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + self.controlnet_down_blocks.append(controlnet_block) + + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[i], + downsample_padding=downsample_padding, + ) + self.down_blocks.append(down_block) + + for _ in range(layers_per_block): + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + self.controlnet_down_blocks.append(controlnet_block) + + if not is_final_block: + controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) + self.controlnet_down_blocks.append(controlnet_block) + + # mid + mid_block_channel = block_out_channels[-1] + + controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) + self.controlnet_mid_block = controlnet_block + + self.mid_block = UNetMidBlock2DCrossAttn( + in_channels=mid_block_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[-1], + resnet_groups=norm_num_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + ) + + def get_num_residuals(self): + num_res = 2 # initial sample + mid block + for down_block in self.down_blocks: + num_res += len(down_block.resnets) + if hasattr(down_block, "downsamplers") and down_block.downsamplers is not None: + num_res += len(down_block.downsamplers) + return num_res + + def forward( + self, + sample, + timestep, + encoder_hidden_states, + controlnet_cond, + ): + # 1. time + t_emb = self.time_proj(timestep) + emb = self.time_embedding(t_emb) + + # 2. pre-process + sample = self.conv_in(sample) + + controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) + + sample += controlnet_cond + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "attentions") and downsample_block.attentions is not None: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. mid + if self.mid_block is not None: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + ) + + # 5. Control net blocks + controlnet_down_block_res_samples = () + + for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): + down_block_res_sample = controlnet_block(down_block_res_sample) + controlnet_down_block_res_samples += (down_block_res_sample,) + + down_block_res_samples = controlnet_down_block_res_samples + + mid_block_res_sample = self.controlnet_mid_block(sample) + + return down_block_res_samples, mid_block_res_sample \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/coreml_model.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/coreml_model.py new file mode 100644 index 00000000..ce0375b9 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/coreml_model.py @@ -0,0 +1,119 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import coremltools as ct + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np + +import os +import time + + +class CoreMLModel: + """ Wrapper for running CoreML models using coremltools + """ + + def __init__(self, model_path, compute_unit): + assert os.path.exists(model_path) and model_path.endswith(".mlpackage") + + logger.info(f"Loading {model_path}") + + start = time.time() + self.model = ct.models.MLModel( + model_path, compute_units=ct.ComputeUnit[compute_unit]) + load_time = time.time() - start + logger.info(f"Done. Took {load_time:.1f} seconds.") + + if load_time > LOAD_TIME_INFO_MSG_TRIGGER: + logger.info( + "Loading a CoreML model through coremltools triggers compilation every time. " + "The Swift package we provide uses precompiled Core ML models (.mlmodelc) to avoid compile-on-load." + ) + + + DTYPE_MAP = { + 65552: np.float16, + 65568: np.float32, + 131104: np.int32, + } + + self.expected_inputs = { + input_tensor.name: { + "shape": tuple(input_tensor.type.multiArrayType.shape), + "dtype": DTYPE_MAP[input_tensor.type.multiArrayType.dataType], + } + for input_tensor in self.model._spec.description.input + } + + def _verify_inputs(self, **kwargs): + for k, v in kwargs.items(): + if k in self.expected_inputs: + if not isinstance(v, np.ndarray): + raise TypeError( + f"Expected numpy.ndarray, got {v} for input: {k}") + + expected_dtype = self.expected_inputs[k]["dtype"] + if not v.dtype == expected_dtype: + raise TypeError( + f"Expected dtype {expected_dtype}, got {v.dtype} for input: {k}" + ) + + expected_shape = self.expected_inputs[k]["shape"] + if not v.shape == expected_shape: + raise TypeError( + f"Expected shape {expected_shape}, got {v.shape} for input: {k}" + ) + else: + raise ValueError("Received unexpected input kwarg: {k}") + + def __call__(self, **kwargs): + self._verify_inputs(**kwargs) + return self.model.predict(kwargs) + + +LOAD_TIME_INFO_MSG_TRIGGER = 10 # seconds + + +def _load_mlpackage(submodule_name, mlpackages_dir, model_version, + compute_unit): + """ Load Core ML (mlpackage) models from disk (As exported by torch2coreml.py) + """ + logger.info(f"Loading {submodule_name} mlpackage") + + fname = f"Stable_Diffusion_version_{model_version}_{submodule_name}.mlpackage".replace( + "/", "_") + mlpackage_path = os.path.join(mlpackages_dir, fname) + + if not os.path.exists(mlpackage_path): + raise FileNotFoundError( + f"{submodule_name} CoreML model doesn't exist at {mlpackage_path}") + + return CoreMLModel(mlpackage_path, compute_unit) + +def _load_mlpackage_controlnet(mlpackages_dir, model_version, compute_unit): + """ Load Core ML (mlpackage) models from disk (As exported by torch2coreml.py) + """ + model_name = model_version.replace("/", "_") + + logger.info(f"Loading controlnet_{model_name} mlpackage") + + fname = f"ControlNet_{model_name}.mlpackage" + + mlpackage_path = os.path.join(mlpackages_dir, fname) + + if not os.path.exists(mlpackage_path): + raise FileNotFoundError( + f"controlnet_{model_name} CoreML model doesn't exist at {mlpackage_path}") + + return CoreMLModel(mlpackage_path, compute_unit) + +def get_available_compute_units(): + return tuple(cu for cu in ct.ComputeUnit._member_names_) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/layer_norm.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/layer_norm.py new file mode 100644 index 00000000..677758e1 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/layer_norm.py @@ -0,0 +1,80 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import torch +import torch.nn as nn + + +# Reference: https://github.com/apple/ml-ane-transformers/blob/main/ane_transformers/reference/layer_norm.py +class LayerNormANE(nn.Module): + """ LayerNorm optimized for Apple Neural Engine (ANE) execution + + Note: This layer only supports normalization over the final dim. It expects `num_channels` + as an argument and not `normalized_shape` which is used by `torch.nn.LayerNorm`. + """ + + def __init__(self, + num_channels, + clip_mag=None, + eps=1e-5, + elementwise_affine=True): + """ + Args: + num_channels: Number of channels (C) where the expected input data format is BC1S. S stands for sequence length. + clip_mag: Optional float value to use for clamping the input range before layer norm is applied. + If specified, helps reduce risk of overflow. + eps: Small value to avoid dividing by zero + elementwise_affine: If true, adds learnable channel-wise shift (bias) and scale (weight) parameters + """ + super().__init__() + # Principle 1: Picking the Right Data Format (machinelearning.apple.com/research/apple-neural-engine) + self.expected_rank = len("BC1S") + + self.num_channels = num_channels + self.eps = eps + self.clip_mag = clip_mag + self.elementwise_affine = elementwise_affine + + if self.elementwise_affine: + self.weight = nn.Parameter(torch.Tensor(num_channels)) + self.bias = nn.Parameter(torch.Tensor(num_channels)) + + self._reset_parameters() + + def _reset_parameters(self): + if self.elementwise_affine: + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, inputs): + input_rank = len(inputs.size()) + + # Principle 1: Picking the Right Data Format (machinelearning.apple.com/research/apple-neural-engine) + # Migrate the data format from BSC to BC1S (most conducive to ANE) + if input_rank == 3 and inputs.size(2) == self.num_channels: + inputs = inputs.transpose(1, 2).unsqueeze(2) + input_rank = len(inputs.size()) + + assert input_rank == self.expected_rank + assert inputs.size(1) == self.num_channels + + if self.clip_mag is not None: + inputs.clamp_(-self.clip_mag, self.clip_mag) + + channels_mean = inputs.mean(dim=1, keepdims=True) + + zero_mean = inputs - channels_mean + + zero_mean_sq = zero_mean * zero_mean + + denom = (zero_mean_sq.mean(dim=1, keepdims=True) + self.eps).rsqrt() + + out = zero_mean * denom + + if self.elementwise_affine: + out = (out + self.bias.view(1, self.num_channels, 1, 1) + ) * self.weight.view(1, self.num_channels, 1, 1) + + return out diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/pipeline.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/pipeline.py new file mode 100644 index 00000000..6a5a47bc --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/pipeline.py @@ -0,0 +1,656 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +import argparse + +from diffusers.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from diffusers.schedulers.scheduling_utils import SchedulerMixin + +import gc +import inspect + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np +import os + +from python_coreml_stable_diffusion.coreml_model import ( + CoreMLModel, + _load_mlpackage, + _load_mlpackage_controlnet, + get_available_compute_units, +) + +import time +import torch # Only used for `torch.from_tensor` in `pipe.scheduler.step()` +from transformers import CLIPFeatureExtractor, CLIPTokenizer +from typing import List, Optional, Union +from PIL import Image + + +class CoreMLStableDiffusionPipeline(DiffusionPipeline): + """ Core ML version of + `diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline` + """ + + def __init__( + self, + text_encoder: CoreMLModel, + unet: CoreMLModel, + vae_decoder: CoreMLModel, + feature_extractor: CLIPFeatureExtractor, + safety_checker: Optional[CoreMLModel], + scheduler: Union[DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler], + tokenizer: CLIPTokenizer, + controlnet: Optional[List[CoreMLModel]], + ): + super().__init__() + + # Register non-Core ML components of the pipeline similar to the original pipeline + self.register_modules( + tokenizer=tokenizer, + scheduler=scheduler, + feature_extractor=feature_extractor, + ) + + if safety_checker is None: + # Reproduce original warning: + # https://github.com/huggingface/diffusers/blob/v0.9.0/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L119 + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + # Register Core ML components of the pipeline + self.safety_checker = safety_checker + self.text_encoder = text_encoder + self.unet = unet + self.unet.in_channels = self.unet.expected_inputs["sample"]["shape"][1] + + self.controlnet = controlnet + + self.vae_decoder = vae_decoder + + VAE_DECODER_UPSAMPLE_FACTOR = 8 + + # In PyTorch, users can determine the tensor shapes dynamically by default + # In CoreML, tensors have static shapes unless flexible shapes were used during export + # See https://coremltools.readme.io/docs/flexible-inputs + latent_h, latent_w = self.unet.expected_inputs["sample"]["shape"][2:] + self.height = latent_h * VAE_DECODER_UPSAMPLE_FACTOR + self.width = latent_w * VAE_DECODER_UPSAMPLE_FACTOR + + logger.info( + f"Stable Diffusion configured to generate {self.height}x{self.width} images" + ) + + def _encode_prompt(self, prompt, num_images_per_prompt, + do_classifier_free_guidance, negative_prompt): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode( + text_input_ids[:, self.tokenizer.model_max_length:]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}") + text_input_ids = text_input_ids[:, :self.tokenizer. + model_max_length] + + text_embeddings = self.text_encoder( + input_ids=text_input_ids.astype(np.float32))["last_hidden_state"] + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + "`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + " {type(prompt)}.") + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`.") + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + + uncond_embeddings = self.text_encoder( + input_ids=uncond_input.input_ids.astype( + np.float32))["last_hidden_state"] + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = np.concatenate( + [uncond_embeddings, text_embeddings]) + + text_embeddings = text_embeddings.transpose(0, 2, 1)[:, :, None, :] + + return text_embeddings + + def run_controlnet(self, + sample, + timestep, + encoder_hidden_states, + controlnet_cond, + output_dtype=np.float16): + if not self.controlnet: + raise ValueError( + "Conditions for controlnet are given but the pipeline has no controlnet modules") + + for i, (module, cond) in enumerate(zip(self.controlnet, controlnet_cond)): + module_outputs = module( + sample=sample.astype(np.float16), + timestep=timestep.astype(np.float16), + encoder_hidden_states=encoder_hidden_states.astype(np.float16), + controlnet_cond=cond.astype(np.float16), + ) + if i == 0: + outputs = module_outputs + else: + for key in outputs.keys(): + outputs[key] += module_outputs[key] + + outputs = {k: v.astype(output_dtype) for k, v in outputs.items()} + + return outputs + + def run_safety_checker(self, image): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), + return_tensors="np", + ) + + safety_checker_outputs = self.safety_checker( + clip_input=safety_checker_input.pixel_values.astype( + np.float16), + images=image.astype(np.float16), + adjustment=np.array([0.]).astype( + np.float16), # defaults to 0 in original pipeline + ) + + # Unpack dict + has_nsfw_concept = safety_checker_outputs["has_nsfw_concepts"] + image = safety_checker_outputs["filtered_images"] + concept_scores = safety_checker_outputs["concept_scores"] + + logger.info( + f"Generated image has nsfw concept={has_nsfw_concept.any()}") + else: + has_nsfw_concept = None + + return image, has_nsfw_concept + + def decode_latents(self, latents): + latents = 1 / 0.18215 * latents + image = self.vae_decoder(z=latents.astype(np.float16))["image"] + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + return image + + def prepare_latents(self, + batch_size, + num_channels_latents, + height, + width, + latents=None): + latents_shape = (batch_size, num_channels_latents, self.height // 8, + self.width // 8) + if latents is None: + latents = np.random.randn(*latents_shape).astype(np.float16) + elif latents.shape != latents_shape: + raise ValueError( + f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" + ) + + latents = latents * self.scheduler.init_noise_sigma + + return latents + + def prepare_control_cond(self, + controlnet_cond, + do_classifier_free_guidance, + batch_size, + num_images_per_prompt): + processed_cond_list = [] + for cond in controlnet_cond: + cond = np.stack([cond] * batch_size * num_images_per_prompt) + if do_classifier_free_guidance: + cond = np.concatenate([cond] * 2) + processed_cond_list.append(cond) + return processed_cond_list + + def check_inputs(self, prompt, height, width, callback_steps): + if height != self.height or width != self.width: + logger.warning( + "`height` and `width` dimensions (of the output image tensor) are fixed when exporting the Core ML models " \ + "unless flexible shapes are used during export (https://coremltools.readme.io/docs/flexible-inputs). " \ + "This pipeline was provided with Core ML models that generate {self.height}x{self.width} images (user requested {height}x{width})" + ) + + if not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError( + f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError( + f"`height` and `width` have to be divisible by 8 but are {height} and {width}." + ) + + if (callback_steps is None) or (callback_steps is not None and + (not isinstance(callback_steps, int) + or callback_steps <= 0)): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}.") + + def prepare_extra_step_kwargs(self, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + return extra_step_kwargs + + def __call__( + self, + prompt, + height=512, + width=512, + num_inference_steps=50, + guidance_scale=7.5, + negative_prompt=None, + num_images_per_prompt=1, + eta=0.0, + latents=None, + output_type="pil", + return_dict=True, + callback=None, + callback_steps=1, + controlnet_cond=None, + **kwargs, + ): + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + if batch_size > 1 or num_images_per_prompt > 1: + raise NotImplementedError( + "For batched generation of multiple images and/or multiple prompts, please refer to the Swift package." + ) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_embeddings = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables and controlnet cond + num_channels_latents = self.unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + latents, + ) + + if controlnet_cond: + controlnet_cond = self.prepare_control_cond( + controlnet_cond, + do_classifier_free_guidance, + batch_size, + num_images_per_prompt, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(eta) + + # 7. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate( + [latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t) + + # controlnet + if controlnet_cond: + additional_residuals = self.run_controlnet( + sample=latent_model_input, + timestep=np.array([t, t]), + encoder_hidden_states=text_embeddings, + controlnet_cond=controlnet_cond, + ) + else: + additional_residuals = {} + + # predict the noise residual + noise_pred = self.unet( + sample=latent_model_input.astype(np.float16), + timestep=np.array([t, t], np.float16), + encoder_hidden_states=text_embeddings.astype(np.float16), + **additional_residuals, + )["noise_pred"] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(torch.from_numpy(noise_pred), + t, + torch.from_numpy(latents), + **extra_step_kwargs, + ).prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image) + + # 10. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput( + images=image, nsfw_content_detected=has_nsfw_concept) + + +def get_available_schedulers(): + schedulers = {} + for scheduler in [DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler]: + schedulers[scheduler().__class__.__name__.replace("Scheduler", "")] = scheduler + return schedulers + +SCHEDULER_MAP = get_available_schedulers() + +def get_coreml_pipe(pytorch_pipe, + mlpackages_dir, + model_version, + compute_unit, + delete_original_pipe=True, + scheduler_override=None, + controlnet_models=None): + """ Initializes and returns a `CoreMLStableDiffusionPipeline` from an original + diffusers PyTorch pipeline + """ + # Ensure `scheduler_override` object is of correct type if specified + if scheduler_override is not None: + assert isinstance(scheduler_override, SchedulerMixin) + logger.warning( + "Overriding scheduler in pipeline: " + f"Default={pytorch_pipe.scheduler}, Override={scheduler_override}") + + # Gather configured tokenizer and scheduler attributes from the original pipe + coreml_pipe_kwargs = { + "tokenizer": pytorch_pipe.tokenizer, + "scheduler": pytorch_pipe.scheduler if scheduler_override is None else scheduler_override, + "feature_extractor": pytorch_pipe.feature_extractor, + } + + model_names_to_load = ["text_encoder", "unet", "vae_decoder"] + if getattr(pytorch_pipe, "safety_checker", None) is not None: + model_names_to_load.append("safety_checker") + else: + logger.warning( + f"Original diffusers pipeline for {model_version} does not have a safety_checker, " + "Core ML pipeline will mirror this behavior.") + coreml_pipe_kwargs["safety_checker"] = None + + if delete_original_pipe: + del pytorch_pipe + gc.collect() + logger.info("Removed PyTorch pipe to reduce peak memory consumption") + + if controlnet_models: + model_names_to_load.remove("unet") + coreml_pipe_kwargs["unet"] = _load_mlpackage( + "control-unet", + mlpackages_dir, + model_version, + compute_unit, + ) + coreml_pipe_kwargs["controlnet"] = [_load_mlpackage_controlnet( + mlpackages_dir, + model_version, + compute_unit, + ) for model_version in controlnet_models] + else: + coreml_pipe_kwargs["controlnet"] = None + + # Load Core ML models + logger.info(f"Loading Core ML models in memory from {mlpackages_dir}") + coreml_pipe_kwargs.update({ + model_name: _load_mlpackage( + model_name, + mlpackages_dir, + model_version, + compute_unit, + ) + for model_name in model_names_to_load + }) + logger.info("Done.") + + logger.info("Initializing Core ML pipe for image generation") + coreml_pipe = CoreMLStableDiffusionPipeline(**coreml_pipe_kwargs) + logger.info("Done.") + + return coreml_pipe + + +def get_image_path(args, **override_kwargs): + """ mkdir output folder and encode metadata in the filename + """ + out_folder = os.path.join(args.o, "_".join(args.prompt.replace("/", "_").rsplit(" "))) + os.makedirs(out_folder, exist_ok=True) + + out_fname = f"randomSeed_{override_kwargs.get('seed', None) or args.seed}" + out_fname += f"_computeUnit_{override_kwargs.get('compute_unit', None) or args.compute_unit}" + out_fname += f"_modelVersion_{override_kwargs.get('model_version', None) or args.model_version.replace('/', '_')}" + + if args.scheduler is not None: + out_fname += f"_customScheduler_{override_kwargs.get('scheduler', None) or args.scheduler}" + out_fname += f"_numInferenceSteps{override_kwargs.get('num_inference_steps', None) or args.num_inference_steps}" + + return os.path.join(out_folder, out_fname + ".png") + +def prepare_controlnet_cond(image_path, height, width): + image = Image.open(image_path).convert("RGB") + image = image.resize((height, width), resample=Image.LANCZOS) + image = np.array(image).transpose(2, 0, 1) / 255.0 + return image + +def main(args): + logger.info(f"Setting random seed to {args.seed}") + np.random.seed(args.seed) + + logger.info("Initializing PyTorch pipe for reference configuration") + from diffusers import StableDiffusionPipeline + pytorch_pipe = StableDiffusionPipeline.from_pretrained(args.model_version, + use_auth_token=True) + + user_specified_scheduler = None + if args.scheduler is not None: + user_specified_scheduler = SCHEDULER_MAP[ + args.scheduler].from_config(pytorch_pipe.scheduler.config) + + coreml_pipe = get_coreml_pipe(pytorch_pipe=pytorch_pipe, + mlpackages_dir=args.i, + model_version=args.model_version, + compute_unit=args.compute_unit, + scheduler_override=user_specified_scheduler, + controlnet_models=args.controlnet) + + if args.controlnet: + controlnet_cond = [] + for i, _ in enumerate(args.controlnet): + image_path = args.controlnet_inputs[i] + image = prepare_controlnet_cond(image_path, coreml_pipe.height, coreml_pipe.width) + controlnet_cond.append(image) + else: + controlnet_cond = None + + logger.info("Beginning image generation.") + image = coreml_pipe( + prompt=args.prompt, + height=coreml_pipe.height, + width=coreml_pipe.width, + num_inference_steps=args.num_inference_steps, + guidance_scale=args.guidance_scale, + controlnet_cond=controlnet_cond, + negative_prompt=args.negative_prompt, + ) + + out_path = get_image_path(args) + logger.info(f"Saving generated image to {out_path}") + image["images"][0].save(out_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--prompt", + required=True, + help="The text prompt to be used for text-to-image generation.") + parser.add_argument( + "-i", + required=True, + help=("Path to input directory with the .mlpackage files generated by " + "python_coreml_stable_diffusion.torch2coreml")) + parser.add_argument("-o", required=True) + parser.add_argument("--seed", + "-s", + default=93, + type=int, + help="Random seed to be able to reproduce results") + parser.add_argument( + "--model-version", + default="CompVis/stable-diffusion-v1-4", + help= + ("The pre-trained model checkpoint and configuration to restore. " + "For available versions: https://huggingface.co/models?search=stable-diffusion" + )) + parser.add_argument( + "--compute-unit", + choices=get_available_compute_units(), + default="ALL", + help=("The compute units to be used when executing Core ML models. " + f"Options: {get_available_compute_units()}")) + parser.add_argument( + "--scheduler", + choices=tuple(SCHEDULER_MAP.keys()), + default=None, + help=("The scheduler to use for running the reverse diffusion process. " + "If not specified, the default scheduler from the diffusers pipeline is utilized")) + parser.add_argument( + "--num-inference-steps", + default=50, + type=int, + help="The number of iterations the unet model will be executed throughout the reverse diffusion process") + parser.add_argument( + "--guidance-scale", + default=7.5, + type=float, + help="Controls the influence of the text prompt on sampling process (0=random images)") + parser.add_argument( + "--controlnet", + nargs="*", + type=str, + help=("Enables ControlNet and use control-unet instead of unet for additional inputs. " + "For Multi-Controlnet, provide the model names separated by spaces.")) + parser.add_argument( + "--controlnet-inputs", + nargs="*", + type=str, + help=("Image paths for ControlNet inputs. " + "Please enter images corresponding to each controlnet provided at --controlnet option in same order.")) + parser.add_argument( + "--negative-prompt", + default=None, + help="The negative text prompt to be used for text-to-image generation.") + + args = parser.parse_args() + main(args) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/torch2coreml.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/torch2coreml.py new file mode 100644 index 00000000..89b9f212 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/torch2coreml.py @@ -0,0 +1,1311 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +from python_coreml_stable_diffusion import unet, controlnet + +import argparse +from collections import OrderedDict, defaultdict +from copy import deepcopy +import coremltools as ct +from diffusers import StableDiffusionPipeline, ControlNetModel +import gc + +import logging + +logging.basicConfig() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import numpy as np +import os +from python_coreml_stable_diffusion import chunk_mlprogram +import requests +import shutil +import time +import re +import pathlib + +import torch +import torch.nn as nn +import torch.nn.functional as F + +torch.set_grad_enabled(False) + +from types import MethodType + + +def _get_coreml_inputs(sample_inputs, args): + return [ + ct.TensorType( + name=k, + shape=v.shape, + dtype=v.numpy().dtype if isinstance(v, torch.Tensor) else v.dtype, + ) for k, v in sample_inputs.items() + ] + + +def compute_psnr(a, b): + """ Compute Peak-Signal-to-Noise-Ratio across two numpy.ndarray objects + """ + max_b = np.abs(b).max() + sumdeltasq = 0.0 + + sumdeltasq = ((a - b) * (a - b)).sum() + + sumdeltasq /= b.size + sumdeltasq = np.sqrt(sumdeltasq) + + eps = 1e-5 + eps2 = 1e-10 + psnr = 20 * np.log10((max_b + eps) / (sumdeltasq + eps2)) + + return psnr + + +ABSOLUTE_MIN_PSNR = 35 + + +def report_correctness(original_outputs, final_outputs, log_prefix): + """ Report PSNR values across two compatible tensors + """ + original_psnr = compute_psnr(original_outputs, original_outputs) + final_psnr = compute_psnr(original_outputs, final_outputs) + + dB_change = final_psnr - original_psnr + logger.info( + f"{log_prefix}: PSNR changed by {dB_change:.1f} dB ({original_psnr:.1f} -> {final_psnr:.1f})" + ) + + if final_psnr < ABSOLUTE_MIN_PSNR: + raise ValueError(f"{final_psnr:.1f} dB is too low!") + else: + logger.info( + f"{final_psnr:.1f} dB > {ABSOLUTE_MIN_PSNR} dB (minimum allowed) parity check passed" + ) + return final_psnr + +def _get_out_path(args, submodule_name): + fname = f"Stable_Diffusion_version_{args.model_version}_{submodule_name}.mlpackage" + fname = fname.replace("/", "_") + return os.path.join(args.o, fname) + + +# https://github.com/apple/coremltools/issues/1680 +def _save_mlpackage(model, output_path): + # First recreate MLModel object using its in memory spec, then save + ct.models.MLModel(model._spec, + weights_dir=model._weights_dir, + is_temp_package=True).save(output_path) + + +def _convert_to_coreml(submodule_name, torchscript_module, sample_inputs, + output_names, args, out_path=None): + + if out_path is None: + out_path = _get_out_path(args, submodule_name) + + if os.path.exists(out_path): + logger.info(f"Skipping export because {out_path} already exists") + logger.info(f"Loading model from {out_path}") + + start = time.time() + # Note: Note that each model load will trigger a model compilation which takes up to a few minutes. + # The Swifty CLI we provide uses precompiled Core ML models (.mlmodelc) which incurs compilation only + # upon first load and mitigates the load time in subsequent runs. + coreml_model = ct.models.MLModel( + out_path, compute_units=ct.ComputeUnit[args.compute_unit]) + logger.info( + f"Loading {out_path} took {time.time() - start:.1f} seconds") + + coreml_model.compute_unit = ct.ComputeUnit[args.compute_unit] + else: + logger.info(f"Converting {submodule_name} to CoreML..") + coreml_model = ct.convert( + torchscript_module, + convert_to="mlprogram", + minimum_deployment_target=ct.target.macOS13, + inputs=_get_coreml_inputs(sample_inputs, args), + outputs=[ct.TensorType(name=name) for name in output_names], + compute_units=ct.ComputeUnit[args.compute_unit], + # skip_model_load=True, + ) + + del torchscript_module + gc.collect() + + coreml_model.save(out_path) + logger.info(f"Saved {submodule_name} model to {out_path}") + + return coreml_model, out_path + + +def quantize_weights_to_8bits(args): + for model_name in [ + "text_encoder", "vae_decoder", "vae_encoder", "unet", "unet_chunk1", "unet_chunk2", + "control-unet", "control-unet_chunk1", "control-unet_chunk2", "safety_checker" + ]: + out_path = _get_out_path(args, model_name) + _quantize_and_save_8bits_model(out_path, model_name) + + if args.convert_controlnet: + for controlnet_model_version in args.convert_controlnet: + controlnet_model_name = controlnet_model_version.replace("/", "_") + fname = f"ControlNet_{controlnet_model_name}.mlpackage" + out_path = os.path.join(args.o, fname) + _quantize_and_save_8bits_model(out_path, controlnet_model_name) + + +def _quantize_and_save_8bits_model(out_path, model_name): + if os.path.exists(out_path): + logger.info(f"Quantizing {model_name}") + mlmodel = ct.models.MLModel(out_path, + compute_units=ct.ComputeUnit.CPU_ONLY) + mlmodel = ct.compression_utils.affine_quantize_weights( + mlmodel, mode="linear") + mlmodel.save(out_path) + logger.info("Done") + else: + logger.info( + f"Skipped quantizing {model_name} (Not found at {out_path})") + + +def _compile_coreml_model(source_model_path, output_dir, final_name): + """ Compiles Core ML models using the coremlcompiler utility from Xcode toolchain + """ + target_path = os.path.join(output_dir, f"{final_name}.mlmodelc") + if os.path.exists(target_path): + logger.warning( + f"Found existing compiled model at {target_path}! Skipping..") + return target_path + + logger.info(f"Compiling {source_model_path}") + source_model_name = os.path.basename( + os.path.splitext(source_model_path)[0]) + + os.system(f"xcrun coremlcompiler compile {source_model_path} {output_dir}") + compiled_output = os.path.join(output_dir, f"{source_model_name}.mlmodelc") + shutil.move(compiled_output, target_path) + + return target_path + + +def bundle_resources_for_swift_cli(args): + """ + - Compiles Core ML models from mlpackage into mlmodelc format + - Download tokenizer resources for the text encoder + """ + resources_dir = os.path.join(args.o, "Resources") + if not os.path.exists(resources_dir): + os.makedirs(resources_dir, exist_ok=True) + logger.info(f"Created {resources_dir} for Swift CLI assets") + + # Compile model using coremlcompiler (Significantly reduces the load time for unet) + for source_name, target_name in [("text_encoder", "TextEncoder"), + ("vae_decoder", "VAEDecoder"), + ("vae_encoder", "VAEEncoder"), + ("unet", "Unet"), + ("unet_chunk1", "UnetChunk1"), + ("unet_chunk2", "UnetChunk2"), + ("control-unet", "ControlledUnet"), + ("control-unet_chunk1", "ControlledUnetChunk1"), + ("control-unet_chunk2", "ControlledUnetChunk2"), + ("safety_checker", "SafetyChecker")]: + source_path = _get_out_path(args, source_name) + if os.path.exists(source_path): + target_path = _compile_coreml_model(source_path, resources_dir, + target_name) + logger.info(f"Compiled {source_path} to {target_path}") + else: + logger.warning( + f"{source_path} not found, skipping compilation to {target_name}.mlmodelc" + ) + + if args.convert_controlnet: + for controlnet_model_version in args.convert_controlnet: + controlnet_model_name = controlnet_model_version.replace("/", "_") + fname = f"ControlNet_{controlnet_model_name}.mlpackage" + source_path = os.path.join(args.o, fname) + controlnet_dir = os.path.join(resources_dir, "controlnet") + target_name = "".join([word.title() for word in re.split('_|-', controlnet_model_name)]) + + if os.path.exists(source_path): + target_path = _compile_coreml_model(source_path, controlnet_dir, + target_name) + logger.info(f"Compiled {source_path} to {target_path}") + else: + logger.warning( + f"{source_path} not found, skipping compilation to {target_name}.mlmodelc" + ) + + # Fetch and save vocabulary JSON file for text tokenizer + logger.info("Downloading and saving tokenizer vocab.json") + with open(os.path.join(resources_dir, "vocab.json"), "wb") as f: + f.write(requests.get(args.text_encoder_vocabulary_url).content) + logger.info("Done") + + # Fetch and save merged pairs JSON file for text tokenizer + logger.info("Downloading and saving tokenizer merges.txt") + with open(os.path.join(resources_dir, "merges.txt"), "wb") as f: + f.write(requests.get(args.text_encoder_merges_url).content) + logger.info("Done") + + return resources_dir + + +def convert_text_encoder(pipe, args): + """ Converts the text encoder component of Stable Diffusion + """ + out_path = _get_out_path(args, "text_encoder") + if os.path.exists(out_path): + logger.info( + f"`text_encoder` already exists at {out_path}, skipping conversion." + ) + return + + # Create sample inputs for tracing, conversion and correctness verification + text_encoder_sequence_length = pipe.tokenizer.model_max_length + text_encoder_hidden_size = pipe.text_encoder.config.hidden_size + + sample_text_encoder_inputs = { + "input_ids": + torch.randint( + pipe.text_encoder.config.vocab_size, + (1, text_encoder_sequence_length), + # https://github.com/apple/coremltools/issues/1423 + dtype=torch.float32, + ) + } + sample_text_encoder_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_text_encoder_inputs.items() + } + logger.info(f"Sample inputs spec: {sample_text_encoder_inputs_spec}") + + def _build_causal_attention_mask(self, bsz, seq_len, dtype): + mask = torch.ones((bsz, seq_len, seq_len), dtype=dtype) * -1e4 + mask.triu_(1) + mask = mask.unsqueeze(1) + return mask + + class TextEncoder(nn.Module): + + def __init__(self): + super().__init__() + self.text_encoder = pipe.text_encoder + setattr( + self.text_encoder.text_model, "_build_causal_attention_mask", + MethodType(_build_causal_attention_mask, + self.text_encoder.text_model)) + + def forward(self, input_ids): + return self.text_encoder(input_ids, return_dict=False) + + reference_text_encoder = TextEncoder().eval() + + logger.info("JIT tracing text_encoder..") + reference_text_encoder = torch.jit.trace( + reference_text_encoder, + (sample_text_encoder_inputs["input_ids"].to(torch.int32), ), + ) + logger.info("Done.") + + coreml_text_encoder, out_path = _convert_to_coreml( + "text_encoder", reference_text_encoder, sample_text_encoder_inputs, + ["last_hidden_state", "pooled_outputs"], args) + + # Set model metadata + coreml_text_encoder.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_text_encoder.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_text_encoder.version = args.model_version + coreml_text_encoder.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_text_encoder.input_description[ + "input_ids"] = "The token ids that represent the input text" + + # Set the output descriptions + coreml_text_encoder.output_description[ + "last_hidden_state"] = "The token embeddings as encoded by the Transformer model" + coreml_text_encoder.output_description[ + "pooled_outputs"] = "The version of the `last_hidden_state` output after pooling" + + _save_mlpackage(coreml_text_encoder, out_path) + + logger.info(f"Saved text_encoder into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + baseline_out = pipe.text_encoder( + sample_text_encoder_inputs["input_ids"].to(torch.int32), + return_dict=False, + )[1].numpy() + + coreml_out = list( + coreml_text_encoder.predict( + {k: v.numpy() + for k, v in sample_text_encoder_inputs.items()}).values())[0] + report_correctness( + baseline_out, coreml_out, + "text_encoder baseline PyTorch to reference CoreML") + + del reference_text_encoder, coreml_text_encoder, pipe.text_encoder + gc.collect() + + +def modify_coremltools_torch_frontend_badbmm(): + """ + Modifies coremltools torch frontend for baddbmm to be robust to the `beta` argument being of non-float dtype: + e.g. https://github.com/huggingface/diffusers/blob/v0.8.1/src/diffusers/models/attention.py#L315 + """ + from coremltools.converters.mil import register_torch_op + from coremltools.converters.mil.mil import Builder as mb + from coremltools.converters.mil.frontend.torch.ops import _get_inputs + from coremltools.converters.mil.frontend.torch.torch_op_registry import _TORCH_OPS_REGISTRY + if "baddbmm" in _TORCH_OPS_REGISTRY: + del _TORCH_OPS_REGISTRY["baddbmm"] + + @register_torch_op + def baddbmm(context, node): + """ + baddbmm(Tensor input, Tensor batch1, Tensor batch2, Scalar beta=1, Scalar alpha=1) + output = beta * input + alpha * batch1 * batch2 + Notice that batch1 and batch2 must be 3-D tensors each containing the same number of matrices. + If batch1 is a (b×n×m) tensor, batch2 is a (b×m×p) tensor, then input must be broadcastable with a (b×n×p) tensor + and out will be a (b×n×p) tensor. + """ + assert len(node.outputs) == 1 + inputs = _get_inputs(context, node, expected=5) + bias, batch1, batch2, beta, alpha = inputs + + if beta.val != 1.0: + # Apply scaling factor beta to the bias. + if beta.val.dtype == np.int32: + beta = mb.cast(x=beta, dtype="fp32") + logger.warning( + f"Casted the `beta`(value={beta.val}) argument of `baddbmm` op " + "from int32 to float32 dtype for conversion!") + bias = mb.mul(x=beta, y=bias, name=bias.name + "_scaled") + + context.add(bias) + + if alpha.val != 1.0: + # Apply scaling factor alpha to the input. + batch1 = mb.mul(x=alpha, y=batch1, name=batch1.name + "_scaled") + context.add(batch1) + + bmm_node = mb.matmul(x=batch1, y=batch2, name=node.name + "_bmm") + context.add(bmm_node) + + baddbmm_node = mb.add(x=bias, y=bmm_node, name=node.name) + context.add(baddbmm_node) + + +def convert_vae_decoder(pipe, args): + """ Converts the VAE Decoder component of Stable Diffusion + """ + out_path = _get_out_path(args, "vae_decoder") + if os.path.exists(out_path): + logger.info( + f"`vae_decoder` already exists at {out_path}, skipping conversion." + ) + return + + if not hasattr(pipe, "unet"): + raise RuntimeError( + "convert_unet() deletes pipe.unet to save RAM. " + "Please use convert_vae_decoder() before convert_unet()") + + z_shape = ( + 1, # B + pipe.vae.config.latent_channels, # C + args.latent_h or pipe.unet.config.sample_size, # H + args.latent_w or pipe.unet.config.sample_size, # w + ) + + sample_vae_decoder_inputs = { + "z": torch.rand(*z_shape, dtype=torch.float16) + } + + class VAEDecoder(nn.Module): + """ Wrapper nn.Module wrapper for pipe.decode() method + """ + + def __init__(self): + super().__init__() + self.post_quant_conv = pipe.vae.post_quant_conv + self.decoder = pipe.vae.decoder + # Disable torch 2.0 scaled dot-product attention: https://github.com/apple/coremltools/issues/1823 + self.decoder.mid_block.attentions[0]._use_2_0_attn = False + + def forward(self, z): + return self.decoder(self.post_quant_conv(z)) + + baseline_decoder = VAEDecoder().eval() + + # No optimization needed for the VAE Decoder as it is a pure ConvNet + traced_vae_decoder = torch.jit.trace( + baseline_decoder, (sample_vae_decoder_inputs["z"].to(torch.float32), )) + + modify_coremltools_torch_frontend_badbmm() + coreml_vae_decoder, out_path = _convert_to_coreml( + "vae_decoder", traced_vae_decoder, sample_vae_decoder_inputs, + ["image"], args) + + # Set model metadata + coreml_vae_decoder.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_vae_decoder.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_vae_decoder.version = args.model_version + coreml_vae_decoder.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_vae_decoder.input_description["z"] = \ + "The denoised latent embeddings from the unet model after the last step of reverse diffusion" + + # Set the output descriptions + coreml_vae_decoder.output_description[ + "image"] = "Generated image normalized to range [-1, 1]" + + _save_mlpackage(coreml_vae_decoder, out_path) + + logger.info(f"Saved vae_decoder into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + baseline_out = baseline_decoder( + z=sample_vae_decoder_inputs["z"].to(torch.float32)).numpy() + coreml_out = list( + coreml_vae_decoder.predict( + {k: v.numpy() + for k, v in sample_vae_decoder_inputs.items()}).values())[0] + report_correctness(baseline_out, coreml_out, + "vae_decoder baseline PyTorch to baseline CoreML") + + del traced_vae_decoder, pipe.vae.decoder, coreml_vae_decoder + gc.collect() + + +def convert_vae_encoder(pipe, args): + """ Converts the VAE Encoder component of Stable Diffusion + """ + out_path = _get_out_path(args, "vae_encoder") + if os.path.exists(out_path): + logger.info( + f"`vae_encoder` already exists at {out_path}, skipping conversion." + ) + return + + if not hasattr(pipe, "unet"): + raise RuntimeError( + "convert_unet() deletes pipe.unet to save RAM. " + "Please use convert_vae_encoder() before convert_unet()") + + height = (args.latent_h or pipe.unet.config.sample_size) * 8 + width = (args.latent_w or pipe.unet.config.sample_size) * 8 + + z_shape = ( + 1, # B + 3, # C (RGB range from -1 to 1) + height, # H + width, # w + ) + + sample_vae_encoder_inputs = { + "z": torch.rand(*z_shape, dtype=torch.float16) + } + + class VAEEncoder(nn.Module): + """ Wrapper nn.Module wrapper for pipe.encode() method + """ + + def __init__(self): + super().__init__() + self.quant_conv = pipe.vae.quant_conv + self.encoder = pipe.vae.encoder + # Disable torch 2.0 scaled dot-product attention: https://github.com/apple/coremltools/issues/1823 + self.encoder.mid_block.attentions[0]._use_2_0_attn = False + + def forward(self, z): + return self.quant_conv(self.encoder(z)) + + baseline_encoder = VAEEncoder().eval() + + # No optimization needed for the VAE Encoder as it is a pure ConvNet + traced_vae_encoder = torch.jit.trace( + baseline_encoder, (sample_vae_encoder_inputs["z"].to(torch.float32), )) + + modify_coremltools_torch_frontend_badbmm() + coreml_vae_encoder, out_path = _convert_to_coreml( + "vae_encoder", traced_vae_encoder, sample_vae_encoder_inputs, + ["latent"], args) + + # Set model metadata + coreml_vae_encoder.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_vae_encoder.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_vae_encoder.version = args.model_version + coreml_vae_encoder.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_vae_encoder.input_description["z"] = \ + "The input image to base the initial latents on normalized to range [-1, 1]" + + # Set the output descriptions + coreml_vae_encoder.output_description["latent"] = "The latent embeddings from the unet model from the input image." + + _save_mlpackage(coreml_vae_encoder, out_path) + + logger.info(f"Saved vae_encoder into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + baseline_out = baseline_encoder( + z=sample_vae_encoder_inputs["z"].to(torch.float32)).numpy() + coreml_out = list( + coreml_vae_encoder.predict( + {k: v.numpy() + for k, v in sample_vae_encoder_inputs.items()}).values())[0] + report_correctness(baseline_out, coreml_out, + "vae_encoder baseline PyTorch to baseline CoreML") + + del traced_vae_encoder, pipe.vae.encoder, coreml_vae_encoder + gc.collect() + + +def convert_unet(pipe, args): + """ Converts the UNet component of Stable Diffusion + """ + if args.unet_support_controlnet: + unet_name = "control-unet" + else: + unet_name = "unet" + + out_path = _get_out_path(args, unet_name) + + # Check if Unet was previously exported and then chunked + unet_chunks_exist = all( + os.path.exists( + out_path.replace(".mlpackage", f"_chunk{idx+1}.mlpackage")) + for idx in range(2)) + + if args.chunk_unet and unet_chunks_exist: + logger.info("`unet` chunks already exist, skipping conversion.") + del pipe.unet + gc.collect() + return + + # If original Unet does not exist, export it from PyTorch+diffusers + elif not os.path.exists(out_path): + # Prepare sample input shapes and values + batch_size = 2 # for classifier-free guidance + sample_shape = ( + batch_size, # B + pipe.unet.config.in_channels, # C + args.latent_h or pipe.unet.config.sample_size, # H + args.latent_w or pipe.unet.config.sample_size, # W + ) + + if not hasattr(pipe, "text_encoder"): + raise RuntimeError( + "convert_text_encoder() deletes pipe.text_encoder to save RAM. " + "Please use convert_unet() before convert_text_encoder()") + + encoder_hidden_states_shape = ( + batch_size, + pipe.text_encoder.config.hidden_size, + 1, + pipe.text_encoder.config.max_position_embeddings, + ) + + # Create the scheduled timesteps for downstream use + DEFAULT_NUM_INFERENCE_STEPS = 50 + pipe.scheduler.set_timesteps(DEFAULT_NUM_INFERENCE_STEPS) + + sample_unet_inputs = OrderedDict([ + ("sample", torch.rand(*sample_shape)), + ("timestep", + torch.tensor([pipe.scheduler.timesteps[0].item()] * + (batch_size)).to(torch.float32)), + ("encoder_hidden_states", torch.rand(*encoder_hidden_states_shape)) + ]) + + # Prepare inputs + baseline_sample_unet_inputs = deepcopy(sample_unet_inputs) + baseline_sample_unet_inputs[ + "encoder_hidden_states"] = baseline_sample_unet_inputs[ + "encoder_hidden_states"].squeeze(2).transpose(1, 2) + + # Initialize reference unet + reference_unet = unet.UNet2DConditionModel(**pipe.unet.config).eval() + load_state_dict_summary = reference_unet.load_state_dict( + pipe.unet.state_dict()) + + if args.unet_support_controlnet: + from .unet import calculate_conv2d_output_shape + additional_residuals_shapes = [] + + # conv_in + out_h, out_w = calculate_conv2d_output_shape( + (args.latent_h or pipe.unet.config.sample_size), + (args.latent_w or pipe.unet.config.sample_size), + reference_unet.conv_in, + ) + additional_residuals_shapes.append( + (batch_size, reference_unet.conv_in.out_channels, out_h, out_w)) + + # down_blocks + for down_block in reference_unet.down_blocks: + additional_residuals_shapes += [ + (batch_size, resnet.out_channels, out_h, out_w) for resnet in down_block.resnets + ] + if hasattr(down_block, "downsamplers") and down_block.downsamplers is not None: + for downsampler in down_block.downsamplers: + out_h, out_w = calculate_conv2d_output_shape(out_h, out_w, downsampler.conv) + additional_residuals_shapes.append( + (batch_size, down_block.downsamplers[-1].conv.out_channels, out_h, out_w)) + + # mid_block + additional_residuals_shapes.append( + (batch_size, reference_unet.mid_block.resnets[-1].out_channels, out_h, out_w) + ) + + baseline_sample_unet_inputs["down_block_additional_residuals"] = () + for i, shape in enumerate(additional_residuals_shapes): + sample_residual_input = torch.rand(*shape) + sample_unet_inputs[f"additional_residual_{i}"] = sample_residual_input + if i == len(additional_residuals_shapes) - 1: + baseline_sample_unet_inputs["mid_block_additional_residual"] = sample_residual_input + else: + baseline_sample_unet_inputs["down_block_additional_residuals"] += (sample_residual_input, ) + + sample_unet_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_unet_inputs.items() + } + logger.info(f"Sample UNet inputs spec: {sample_unet_inputs_spec}") + + # JIT trace + logger.info("JIT tracing..") + reference_unet = torch.jit.trace(reference_unet, + list(sample_unet_inputs.values())) + logger.info("Done.") + + if args.check_output_correctness: + baseline_out = pipe.unet(**baseline_sample_unet_inputs, + return_dict=False)[0].numpy() + reference_out = reference_unet(*sample_unet_inputs.values())[0].numpy() + report_correctness(baseline_out, reference_out, + "unet baseline to reference PyTorch") + + del pipe.unet + gc.collect() + + coreml_sample_unet_inputs = { + k: v.numpy().astype(np.float16) + for k, v in sample_unet_inputs.items() + } + + coreml_unet, out_path = _convert_to_coreml(unet_name, reference_unet, + coreml_sample_unet_inputs, + ["noise_pred"], args) + del reference_unet + gc.collect() + + # Set model metadata + coreml_unet.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_unet.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_unet.version = args.model_version + coreml_unet.short_description = \ + "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_unet.input_description["sample"] = \ + "The low resolution latent feature maps being denoised through reverse diffusion" + coreml_unet.input_description["timestep"] = \ + "A value emitted by the associated scheduler object to condition the model on a given noise schedule" + coreml_unet.input_description["encoder_hidden_states"] = \ + "Output embeddings from the associated text_encoder model to condition to generated image on text. " \ + "A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. " \ + "Shorter text does not reduce computation." + + # Set the output descriptions + coreml_unet.output_description["noise_pred"] = \ + "Same shape and dtype as the `sample` input. " \ + "The predicted noise to facilitate the reverse diffusion (denoising) process" + + _save_mlpackage(coreml_unet, out_path) + logger.info(f"Saved unet into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + coreml_out = list( + coreml_unet.predict(coreml_sample_unet_inputs).values())[0] + report_correctness(baseline_out, coreml_out, + "unet baseline PyTorch to reference CoreML") + + del coreml_unet + gc.collect() + else: + del pipe.unet + gc.collect() + logger.info( + f"`unet` already exists at {out_path}, skipping conversion.") + + if args.chunk_unet and not unet_chunks_exist: + logger.info("Chunking unet in two approximately equal MLModels") + args.mlpackage_path = out_path + args.remove_original = False + chunk_mlprogram.main(args) + + +def convert_safety_checker(pipe, args): + """ Converts the Safety Checker component of Stable Diffusion + """ + if pipe.safety_checker is None: + logger.warning( + f"diffusers pipeline for {args.model_version} does not have a `safety_checker` module! " \ + "`--convert-safety-checker` will be ignored." + ) + return + + out_path = _get_out_path(args, "safety_checker") + if os.path.exists(out_path): + logger.info( + f"`safety_checker` already exists at {out_path}, skipping conversion." + ) + return + + im_h = pipe.vae.config.sample_size + im_w = pipe.vae.config.sample_size + + if args.latent_h is not None: + im_h = args.latent_h * 8 + + if args.latent_w is not None: + im_w = args.latent_w * 8 + + sample_image = np.random.randn( + 1, # B + im_h, # H + im_w, # w + 3 # C + ).astype(np.float32) + + # Note that pipe.feature_extractor is not an ML model. It simply + # preprocesses data for the pipe.safety_checker module. + safety_checker_input = pipe.feature_extractor( + pipe.numpy_to_pil(sample_image), + return_tensors="pt", + ).pixel_values.to(torch.float32) + + sample_safety_checker_inputs = OrderedDict([ + ("clip_input", safety_checker_input), + ("images", torch.from_numpy(sample_image)), + ("adjustment", torch.tensor([0]).to(torch.float32)), + ]) + + sample_safety_checker_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_safety_checker_inputs.items() + } + logger.info(f"Sample inputs spec: {sample_safety_checker_inputs_spec}") + + # Patch safety_checker's forward pass to be vectorized and avoid conditional blocks + # (similar to pipe.safety_checker.forward_onnx) + from diffusers.pipelines.stable_diffusion import safety_checker + + def forward_coreml(self, clip_input, images, adjustment): + """ Forward pass implementation for safety_checker + """ + + def cosine_distance(image_embeds, text_embeds): + return F.normalize(image_embeds) @ F.normalize( + text_embeds).transpose(0, 1) + + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, + self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + special_care = special_scores.gt(0).float().sum(dim=1).gt(0).float() + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand( + -1, cos_dist.shape[1]) + + concept_scores = (cos_dist - + self.concept_embeds_weights) + special_adjustment + has_nsfw_concepts = concept_scores.gt(0).float().sum(dim=1).gt(0)[:, + None, + None, + None] + + has_nsfw_concepts_inds, _ = torch.broadcast_tensors( + has_nsfw_concepts, images) + images[has_nsfw_concepts_inds] = 0.0 # black image + + return images, has_nsfw_concepts.float(), concept_scores + + baseline_safety_checker = deepcopy(pipe.safety_checker.eval()) + setattr(baseline_safety_checker, "forward", + MethodType(forward_coreml, baseline_safety_checker)) + + # In order to parity check the actual signal, we need to override the forward pass to return `concept_scores` which is the + # output before thresholding + # Reference: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L100 + def forward_extended_return(self, clip_input, images, adjustment): + + def cosine_distance(image_embeds, text_embeds): + normalized_image_embeds = F.normalize(image_embeds) + normalized_text_embeds = F.normalize(text_embeds) + return torch.mm(normalized_image_embeds, + normalized_text_embeds.t()) + + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, + self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + special_care = torch.any(special_scores > 0, dim=1) + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand( + -1, cos_dist.shape[1]) + + concept_scores = (cos_dist - + self.concept_embeds_weights) + special_adjustment + has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) + + images[has_nsfw_concepts] = 0.0 + + return images, has_nsfw_concepts, concept_scores + + setattr(pipe.safety_checker, "forward", + MethodType(forward_extended_return, pipe.safety_checker)) + + # Trace the safety_checker model + logger.info("JIT tracing..") + traced_safety_checker = torch.jit.trace( + baseline_safety_checker, list(sample_safety_checker_inputs.values())) + logger.info("Done.") + del baseline_safety_checker + gc.collect() + + # Cast all inputs to float16 + coreml_sample_safety_checker_inputs = { + k: v.numpy().astype(np.float16) + for k, v in sample_safety_checker_inputs.items() + } + + # Convert safety_checker model to Core ML + coreml_safety_checker, out_path = _convert_to_coreml( + "safety_checker", traced_safety_checker, + coreml_sample_safety_checker_inputs, + ["filtered_images", "has_nsfw_concepts", "concept_scores"], args) + + # Set model metadata + coreml_safety_checker.author = f"Please refer to the Model Card available at huggingface.co/{args.model_version}" + coreml_safety_checker.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_safety_checker.version = args.model_version + coreml_safety_checker.short_description = \ + "Stable Diffusion generates images conditioned on text and/or other images as input through the diffusion process. " \ + "Please refer to https://arxiv.org/abs/2112.10752 for details." + + # Set the input descriptions + coreml_safety_checker.input_description["clip_input"] = \ + "The normalized image input tensor resized to (224x224) in channels-first (BCHW) format" + coreml_safety_checker.input_description["images"] = \ + f"Output of the vae_decoder ({pipe.vae.config.sample_size}x{pipe.vae.config.sample_size}) in channels-last (BHWC) format" + coreml_safety_checker.input_description["adjustment"] = \ + "Bias added to the concept scores to trade off increased recall for reduce precision in the safety checker classifier" + + # Set the output descriptions + coreml_safety_checker.output_description["filtered_images"] = \ + f"Identical to the input `images`. If safety checker detected any sensitive content, " \ + "the corresponding image is replaced with a blank image (zeros)" + coreml_safety_checker.output_description["has_nsfw_concepts"] = \ + "Indicates whether the safety checker model found any sensitive content in the given image" + coreml_safety_checker.output_description["concept_scores"] = \ + "Concept scores are the scores before thresholding at zero yields the `has_nsfw_concepts` output. " \ + "These scores can be used to tune the `adjustment` input" + + _save_mlpackage(coreml_safety_checker, out_path) + + if args.check_output_correctness: + baseline_out = pipe.safety_checker( + **sample_safety_checker_inputs)[2].numpy() + coreml_out = coreml_safety_checker.predict( + coreml_sample_safety_checker_inputs)["concept_scores"] + report_correctness( + baseline_out, coreml_out, + "safety_checker baseline PyTorch to reference CoreML") + + del traced_safety_checker, coreml_safety_checker, pipe.safety_checker + gc.collect() + +def _get_controlnet_base_model(controlnet_model_version): + from huggingface_hub import model_info + info = model_info(controlnet_model_version) + return info.cardData.get("base_model", None) + +def convert_controlnet(pipe, args): + """ Converts each ControlNet for Stable Diffusion + """ + if not hasattr(pipe, "unet"): + raise RuntimeError( + "convert_unet() deletes pipe.unet to save RAM. " + "Please use convert_vae_encoder() before convert_unet()") + + if not hasattr(pipe, "text_encoder"): + raise RuntimeError( + "convert_text_encoder() deletes pipe.text_encoder to save RAM. " + "Please use convert_unet() before convert_text_encoder()") + + for i, controlnet_model_version in enumerate(args.convert_controlnet): + base_model = _get_controlnet_base_model(controlnet_model_version) + + if base_model is None and args.model_version != "runwayml/stable-diffusion-v1-5": + logger.warning( + f"The original ControlNet models were trained using Stable Diffusion v1.5. " + f"It is possible that model {args.model_version} is not compatible with controlnet.") + if base_model is not None and base_model != args.model_version: + raise RuntimeError( + f"ControlNet model {controlnet_model_version} was trained using " + f"Stable Diffusion model {base_model}.\n However, you specified " + f"version {args.model_version} in the command line. Please, use " + f"--model-version {base_model} to convert this model.") + + controlnet_model_name = controlnet_model_version.replace("/", "_") + fname = f"ControlNet_{controlnet_model_name}.mlpackage" + out_path = os.path.join(args.o, fname) + + if os.path.exists(out_path): + logger.info( + f"`controlnet_{controlnet_model_name}` already exists at {out_path}, skipping conversion." + ) + continue + + if i == 0: + batch_size = 2 # for classifier-free guidance + sample_shape = ( + batch_size, # B + pipe.unet.config.in_channels, # C + (args.latent_h or pipe.unet.config.sample_size), # H + (args.latent_w or pipe.unet.config.sample_size), # W + ) + + encoder_hidden_states_shape = ( + batch_size, + pipe.text_encoder.config.hidden_size, + 1, + pipe.text_encoder.config.max_position_embeddings, + ) + + controlnet_cond_shape = ( + batch_size, # B + 3, # C + (args.latent_h or pipe.unet.config.sample_size) * 8, # H + (args.latent_w or pipe.unet.config.sample_size) * 8, # w + ) + + # Create the scheduled timesteps for downstream use + DEFAULT_NUM_INFERENCE_STEPS = 50 + pipe.scheduler.set_timesteps(DEFAULT_NUM_INFERENCE_STEPS) + + # Prepare inputs + sample_controlnet_inputs = OrderedDict([ + ("sample", torch.rand(*sample_shape)), + ("timestep", + torch.tensor([pipe.scheduler.timesteps[0].item()] * + (batch_size)).to(torch.float32)), + ("encoder_hidden_states", torch.rand(*encoder_hidden_states_shape)), + ("controlnet_cond", torch.rand(*controlnet_cond_shape)), + ]) + sample_controlnet_inputs_spec = { + k: (v.shape, v.dtype) + for k, v in sample_controlnet_inputs.items() + } + logger.info( + f"Sample ControlNet inputs spec: {sample_controlnet_inputs_spec}") + + baseline_sample_controlnet_inputs = deepcopy(sample_controlnet_inputs) + baseline_sample_controlnet_inputs[ + "encoder_hidden_states"] = baseline_sample_controlnet_inputs[ + "encoder_hidden_states"].squeeze(2).transpose(1, 2) + + # Import controlnet model and initialize reference controlnet + original_controlnet = ControlNetModel.from_pretrained( + controlnet_model_version, + use_auth_token=True + ) + reference_controlnet = controlnet.ControlNetModel(**original_controlnet.config).eval() + load_state_dict_summary = reference_controlnet.load_state_dict( + original_controlnet.state_dict()) + + num_residuals = reference_controlnet.get_num_residuals() + output_keys = [f"additional_residual_{i}" for i in range(num_residuals)] + + # JIT trace + logger.info("JIT tracing..") + reference_controlnet = torch.jit.trace(reference_controlnet, + list(sample_controlnet_inputs.values())) + logger.info("Done.") + + if args.check_output_correctness: + baseline_out = original_controlnet(**baseline_sample_controlnet_inputs, + return_dict=False) + reference_out = reference_controlnet(*sample_controlnet_inputs.values()) + + baseline_down_residuals, baseline_mid_residuals = baseline_out + baseline_out = baseline_down_residuals + (baseline_mid_residuals,) + reference_down_residuals, reference_mid_residuals = reference_out + reference_out = reference_down_residuals +(reference_mid_residuals,) + + for key, b_out, r_out in zip(output_keys, baseline_out, reference_out): + b_out = b_out.numpy() + r_out = r_out.numpy() + logger.info(f"Check {key} correctness") + report_correctness(b_out, r_out, + f"controlnet({controlnet_model_name}) baseline to reference PyTorch") + + del original_controlnet + gc.collect() + + coreml_sample_controlnet_inputs = { + k: v.numpy().astype(np.float16) + for k, v in sample_controlnet_inputs.items() + } + + coreml_controlnet, out_path = _convert_to_coreml(f"controlnet_{controlnet_model_name}", reference_controlnet, + coreml_sample_controlnet_inputs, + output_keys, args, + out_path=out_path) + + del reference_controlnet + gc.collect() + + coreml_controlnet.author = f"Please refer to the Model Card available at huggingface.co/{controlnet_model_version}" + coreml_controlnet.license = "OpenRAIL (https://huggingface.co/spaces/CompVis/stable-diffusion-license)" + coreml_controlnet.version = controlnet_model_version + coreml_controlnet.short_description = \ + "ControlNet is a neural network structure to control diffusion models by adding extra conditions. " \ + "Please refer to https://arxiv.org/abs/2302.05543 for details." + + # Set the input descriptions + coreml_controlnet.input_description["sample"] = \ + "The low resolution latent feature maps being denoised through reverse diffusion" + coreml_controlnet.input_description["timestep"] = \ + "A value emitted by the associated scheduler object to condition the model on a given noise schedule" + coreml_controlnet.input_description["encoder_hidden_states"] = \ + "Output embeddings from the associated text_encoder model to condition to generated image on text. " \ + "A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. " \ + "Shorter text does not reduce computation." + coreml_controlnet.input_description["controlnet_cond"] = \ + "An additional input image for ControlNet to condition the generated images." + + # Set the output descriptions + for i in range(num_residuals): + coreml_controlnet.output_description[f"additional_residual_{i}"] = \ + "One of the outputs of each downsampling block in ControlNet. " \ + "The value added to the corresponding resnet output in UNet." + + _save_mlpackage(coreml_controlnet, out_path) + logger.info(f"Saved controlnet into {out_path}") + + # Parity check PyTorch vs CoreML + if args.check_output_correctness: + coreml_out = coreml_controlnet.predict(coreml_sample_controlnet_inputs) + for key, b_out in zip(output_keys, baseline_out): + b_out = b_out.numpy() + logger.info(f"Check {key} correctness") + report_correctness(b_out, coreml_out[key], + "controlnet baseline PyTorch to reference CoreML") + + del coreml_controlnet + gc.collect() + + +def main(args): + os.makedirs(args.o, exist_ok=True) + + # Instantiate diffusers pipe as reference + logger.info( + f"Initializing StableDiffusionPipeline with {args.model_version}..") + pipe = StableDiffusionPipeline.from_pretrained(args.model_version, + use_auth_token=True) + logger.info("Done.") + + # Register the selected attention implementation globally + unet.ATTENTION_IMPLEMENTATION_IN_EFFECT = unet.AttentionImplementations[ + args.attention_implementation] + logger.info( + f"Attention implementation in effect: {unet.ATTENTION_IMPLEMENTATION_IN_EFFECT}" + ) + + # Convert models + if args.convert_vae_decoder: + logger.info("Converting vae_decoder") + convert_vae_decoder(pipe, args) + logger.info("Converted vae_decoder") + + if args.convert_vae_encoder: + logger.info("Converting vae_encoder") + convert_vae_encoder(pipe, args) + logger.info("Converted vae_encoder") + + if args.convert_controlnet: + logger.info("Converting controlnet") + convert_controlnet(pipe, args) + logger.info("Converted controlnet") + + if args.convert_unet: + logger.info("Converting unet") + convert_unet(pipe, args) + logger.info("Converted unet") + + if args.convert_text_encoder: + logger.info("Converting text_encoder") + convert_text_encoder(pipe, args) + logger.info("Converted text_encoder") + + if args.convert_safety_checker: + logger.info("Converting safety_checker") + convert_safety_checker(pipe, args) + logger.info("Converted safety_checker") + + if args.bundle_resources_for_swift_cli: + logger.info("Bundling resources for the Swift CLI") + bundle_resources_for_swift_cli(args) + logger.info("Bundled resources for the Swift CLI") + + if args.quantize_weights_to_8bits: + # Note: Not recommended, significantly degrades generated image quality + logger.info("Quantizing weights to 8-bit precision") + quantize_weights_to_8bits(args) + logger.info("Quantized weights to 8-bit precision") + + +def parser_spec(): + parser = argparse.ArgumentParser() + + # Select which models to export (All are needed for text-to-image pipeline to function) + parser.add_argument("--convert-text-encoder", action="store_true") + parser.add_argument("--convert-vae-decoder", action="store_true") + parser.add_argument("--convert-vae-encoder", action="store_true") + parser.add_argument("--convert-unet", action="store_true") + parser.add_argument("--convert-safety-checker", action="store_true") + parser.add_argument( + "--convert-controlnet", + nargs="*", + type=str, + help= + "Converts a ControlNet model hosted on HuggingFace to coreML format. " \ + "To convert multiple models, provide their names separated by spaces.", + ) + parser.add_argument( + "--model-version", + default="CompVis/stable-diffusion-v1-4", + help= + ("The pre-trained model checkpoint and configuration to restore. " + "For available versions: https://huggingface.co/models?search=stable-diffusion" + )) + parser.add_argument("--compute-unit", + choices=tuple(cu + for cu in ct.ComputeUnit._member_names_), + default="ALL") + + parser.add_argument( + "--latent-h", + type=int, + default=None, + help= + "The spatial resolution (number of rows) of the latent space. `Defaults to pipe.unet.config.sample_size`", + ) + parser.add_argument( + "--latent-w", + type=int, + default=None, + help= + "The spatial resolution (number of cols) of the latent space. `Defaults to pipe.unet.config.sample_size`", + ) + parser.add_argument( + "--attention-implementation", + choices=tuple(ai + for ai in unet.AttentionImplementations._member_names_), + default=unet.ATTENTION_IMPLEMENTATION_IN_EFFECT.name, + help= + "The enumerated implementations trade off between ANE and GPU performance", + ) + parser.add_argument( + "-o", + default=os.getcwd(), + help="The resulting mlpackages will be saved into this directory") + parser.add_argument( + "--check-output-correctness", + action="store_true", + help= + "If specified, compares the outputs of original PyTorch and final CoreML models and reports PSNR in dB. " + "Enabling this feature uses more memory. Disable it if your machine runs out of memory." + ) + parser.add_argument( + "--chunk-unet", + action="store_true", + help= + "If specified, generates two mlpackages out of the unet model which approximately equal weights sizes. " + "This is required for ANE deployment on iOS and iPadOS. Not required for macOS." + ) + parser.add_argument( + "--quantize-weights-to-8bits", + action="store_true", + help= + "If specified, quantize 16-bits weights to 8-bits weights in-place for all models. " + "Not recommended as the generated image quality degraded significantly after 8-bit weight quantization" + ) + parser.add_argument( + "--unet-support-controlnet", + action="store_true", + help= + "If specified, enable unet to receive additional inputs from controlnet. " + "Each input added to corresponding resnet output." + ) + + # Swift CLI Resource Bundling + parser.add_argument( + "--bundle-resources-for-swift-cli", + action="store_true", + help= + "If specified, creates a resources directory compatible with the sample Swift CLI. " + "It compiles all four models and adds them to a StableDiffusionResources directory " + "along with a `vocab.json` and `merges.txt` for the text tokenizer") + parser.add_argument( + "--text-encoder-vocabulary-url", + default= + "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json", + help="The URL to the vocabulary file use by the text tokenizer") + parser.add_argument( + "--text-encoder-merges-url", + default= + "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt", + help="The URL to the merged pairs used in by the text tokenizer.") + + return parser + + +if __name__ == "__main__": + parser = parser_spec() + args = parser.parse_args() + + main(args) diff --git a/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/unet.py b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/unet.py new file mode 100644 index 00000000..cf5cdb39 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/python_coreml_stable_diffusion/unet.py @@ -0,0 +1,1104 @@ +# +# For licensing see accompanying LICENSE.md file. +# Copyright (C) 2022 Apple Inc. All Rights Reserved. +# + +from python_coreml_stable_diffusion.layer_norm import LayerNormANE + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers import ModelMixin + +from enum import Enum + +import logging + +logger = logging.getLogger(__name__) +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +# Ensure minimum macOS version requirement is met for this particular model +from coremltools.models.utils import _macos_version +if not _macos_version() >= (13, 1): + logger.warning( + "!!! macOS 13.1 and newer or iOS/iPadOS 16.2 and newer is required for best performance !!!" + ) + + +class AttentionImplementations(Enum): + ORIGINAL = "ORIGINAL" + SPLIT_EINSUM = "SPLIT_EINSUM" + + +ATTENTION_IMPLEMENTATION_IN_EFFECT = AttentionImplementations.SPLIT_EINSUM + +WARN_MSG = \ + "This `nn.Module` is intended for Apple Silicon deployment only. " \ + "PyTorch-specific optimizations and training is disabled" + +class CrossAttention(nn.Module): + """ Apple Silicon friendly version of `diffusers.models.attention.CrossAttention` + """ + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64): + super().__init__() + inner_dim = dim_head * heads + context_dim = context_dim if context_dim is not None else query_dim + + self.scale = dim_head**-0.5 + self.heads = heads + self.dim_head = dim_head + + self.to_q = nn.Conv2d(query_dim, inner_dim, kernel_size=1, bias=False) + self.to_k = nn.Conv2d(context_dim, + inner_dim, + kernel_size=1, + bias=False) + self.to_v = nn.Conv2d(context_dim, + inner_dim, + kernel_size=1, + bias=False) + self.to_out = nn.Sequential( + nn.Conv2d(inner_dim, query_dim, kernel_size=1, bias=True)) + + def forward(self, hidden_states, context=None, mask=None): + if self.training: + raise NotImplementedError(WARN_MSG) + + batch_size, dim, _, sequence_length = hidden_states.shape + + q = self.to_q(hidden_states) + context = context if context is not None else hidden_states + k = self.to_k(context) + v = self.to_v(context) + + # Validate mask + if mask is not None: + expected_mask_shape = [batch_size, sequence_length, 1, 1] + if mask.dtype == torch.bool: + mask = mask.logical_not().float() * -1e4 + elif mask.dtype == torch.int64: + mask = (1 - mask).float() * -1e4 + elif mask.dtype != torch.float32: + raise TypeError(f"Unexpected dtype for mask: {mask.dtype}") + + if len(mask.size()) == 2: + mask = mask.unsqueeze(2).unsqueeze(2) + + if list(mask.size()) != expected_mask_shape: + raise RuntimeError( + f"Invalid shape for `mask` (Expected {expected_mask_shape}, got {list(mask.size())}" + ) + + if ATTENTION_IMPLEMENTATION_IN_EFFECT == AttentionImplementations.ORIGINAL: + # This version of the attention function is recommended for high GPU core count + # devices such as the M1 Max and M1 Ultra + bs = q.size(0) + mh_q = q.view(bs, self.heads, self.dim_head, -1) + mh_k = k.view(bs, self.heads, self.dim_head, -1) + mh_v = v.view(bs, self.heads, self.dim_head, -1) + + attn_weights = torch.einsum("bhcq,bhck->bhqk", [mh_q, mh_k]) + attn_weights.mul_(self.scale) + + if mask is not None: + attn_weights = attn_weights + mask + + attn_weights = attn_weights.softmax(dim=3) + + attn = torch.einsum("bhqk,bhck->bhcq", [attn_weights, mh_v]) + attn = attn.contiguous().view(bs, self.heads * self.dim_head, 1, + -1) + + elif ATTENTION_IMPLEMENTATION_IN_EFFECT == AttentionImplementations.SPLIT_EINSUM: + # The split attention and einsum from https://machinelearning.apple.com/research/neural-engine-transformers + # are utilized to build an ANE implementation. This version is marginally slower on the GPU engine and is + # not recommended for Max and Ultra Mac variants + mh_q = [ + q[:, head_idx * self.dim_head:(head_idx + 1) * + self.dim_head, :, :] for head_idx in range(self.heads) + ] # (bs, dim_head, 1, max_seq_length) * heads + + k = k.transpose(1, 3) + mh_k = [ + k[:, :, :, + head_idx * self.dim_head:(head_idx + 1) * self.dim_head] + for head_idx in range(self.heads) + ] # (bs, max_seq_length, 1, dim_head) * heads + + mh_v = [ + v[:, head_idx * self.dim_head:(head_idx + 1) * + self.dim_head, :, :] for head_idx in range(self.heads) + ] # (bs, dim_head, 1, max_seq_length) * heads + + attn_weights = [ + torch.einsum("bchq,bkhc->bkhq", [qi, ki]) * self.scale + for qi, ki in zip(mh_q, mh_k) + ] # (bs, max_seq_length, 1, max_seq_length) * heads + + if mask is not None: + for head_idx in range(self.heads): + attn_weights[head_idx] = attn_weights[head_idx] + mask + + attn_weights = [ + aw.softmax(dim=1) for aw in attn_weights + ] # (bs, max_seq_length, 1, max_seq_length) * heads + attn = [ + torch.einsum("bkhq,bchk->bchq", wi, vi) + for wi, vi in zip(attn_weights, mh_v) + ] # (bs, dim_head, 1, max_seq_length) * heads + + attn = torch.cat(attn, dim=1) # (bs, dim, 1, max_seq_length) + + else: + raise ValueError(ATTENTION_IMPLEMENTATION_IN_EFFECT) + + return self.to_out(attn) + + +def linear_to_conv2d_map(state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """ Unsqueeze twice to map nn.Linear weights to nn.Conv2d weights + """ + for k in state_dict: + is_internal_proj = all(substr in k for substr in ["to_", ".weight"]) + is_ff_proj = all(substr in k for substr in ["ff.", ".weight"]) + is_temb_proj = all(substr in k for substr in ["time_emb", ".weight"]) + is_proj_in = "proj_in.weight" in k + is_proj_out = "proj_out.weight" in k + + if is_internal_proj or is_ff_proj or is_temb_proj or is_proj_in or is_proj_out: + if len(state_dict[k].shape) == 2: + state_dict[k] = state_dict[k][:, :, None, None] + +# Note: torch.nn.LayerNorm and ane_transformers.reference.layer_norm.LayerNormANE +# apply scale and bias terms in opposite orders. In order to accurately restore a +# state_dict trained using the former into the the latter, we adjust the bias term +def correct_for_bias_scale_order_inversion(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs): + state_dict[prefix + + "bias"] = state_dict[prefix + "bias"] / state_dict[prefix + + "weight"] + return state_dict + + +class LayerNormANE(LayerNormANE): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._register_load_state_dict_pre_hook( + correct_for_bias_scale_order_inversion) + + +# Reference: https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py +# (modified, e.g. the attention implementation) +class CrossAttnUpBlock2D(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + attn_num_head_channels=1, + cross_attention_dim=768, + attention_type="default", + output_scale_factor=1.0, + downsample_padding=1, + add_upsample=True, + ): + super().__init__() + resnets = [] + attentions = [] + + self.attention_type = attention_type + self.attn_num_head_channels = attn_num_head_channels + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - + 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + attentions.append( + SpatialTransformer( + out_channels, + attn_num_head_channels, + out_channels // attn_num_head_channels, + depth=1, + context_dim=cross_attention_dim, + )) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + self.upsamplers = None + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels)]) + + def forward(self, + hidden_states, + res_hidden_states_tuple, + temb=None, + encoder_hidden_states=None): + for resnet, attn in zip(self.resnets, self.attentions): + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], + dim=1) + + hidden_states = resnet(hidden_states, temb) + hidden_states = attn(hidden_states, context=encoder_hidden_states) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class UpBlock2D(nn.Module): + + def __init__( + self, + in_channels, + prev_output_channel, + out_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + add_upsample=True, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - + 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + + self.resnets = nn.ModuleList(resnets) + self.upsamplers = None + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels)]) + + def forward(self, hidden_states, res_hidden_states_tuple, temb=None): + for resnet in self.resnets: + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], + dim=1) + + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states + + +class CrossAttnDownBlock2D(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + attn_num_head_channels=1, + cross_attention_dim=768, + attention_type="default", + output_scale_factor=1.0, + downsample_padding=1, + add_downsample=True, + ): + super().__init__() + resnets = [] + attentions = [] + + self.attention_type = attention_type + self.attn_num_head_channels = attn_num_head_channels + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + attentions.append( + SpatialTransformer( + out_channels, + attn_num_head_channels, + out_channels // attn_num_head_channels, + depth=1, + context_dim=cross_attention_dim, + )) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList([Downsample2D(out_channels)]) + else: + self.downsamplers = None + + def forward(self, hidden_states, temb=None, encoder_hidden_states=None): + output_states = () + + for resnet, attn in zip(self.resnets, self.attentions): + hidden_states = resnet(hidden_states, temb) + hidden_states = attn(hidden_states, context=encoder_hidden_states) + output_states += (hidden_states, ) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states += (hidden_states, ) + + return hidden_states, output_states + + +class DownBlock2D(nn.Module): + + def __init__( + self, + in_channels, + out_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states, temb=None): + output_states = () + + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb) + output_states += (hidden_states, ) + + return hidden_states, output_states + + +class ResnetBlock2D(nn.Module): + + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + temb_channels=512, + groups=32, + groups_out=None, + eps=1e-6, + time_embedding_norm="default", + use_nin_shortcut=None, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = in_channels if out_channels is None else out_channels + self.use_conv_shortcut = conv_shortcut + self.time_embedding_norm = time_embedding_norm + + if groups_out is None: + groups_out = groups + + self.norm1 = torch.nn.GroupNorm(num_groups=groups, + num_channels=in_channels, + eps=eps, + affine=True) + + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + if temb_channels is not None: + self.time_emb_proj = torch.nn.Conv2d(temb_channels, + out_channels, + kernel_size=1) + else: + self.time_emb_proj = None + + self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, + num_channels=out_channels, + eps=eps, + affine=True) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + self.nonlinearity = nn.SiLU() + + self.use_nin_shortcut = self.in_channels != self.out_channels if use_nin_shortcut is None else use_nin_shortcut + + self.conv_shortcut = None + if self.use_nin_shortcut: + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x, temb): + hidden_states = x + hidden_states = self.norm1(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv1(hidden_states) + + if temb is not None: + temb = self.time_emb_proj(self.nonlinearity(temb)) + hidden_states = hidden_states + temb + + hidden_states = self.norm2(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + x = self.conv_shortcut(x) + + out = (x + hidden_states) + + return out + + +class Upsample2D(nn.Module): + + def __init__(self, channels): + super().__init__() + self.conv = nn.Conv2d(channels, channels, 3, padding=1) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2.0, mode="nearest") + return self.conv(x) + + +class Downsample2D(nn.Module): + + def __init__(self, channels): + super().__init__() + self.conv = nn.Conv2d(channels, channels, 3, stride=2, padding=1) + + def forward(self, x): + return self.conv(x) + + +class SpatialTransformer(nn.Module): + + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + context_dim=None, + ): + super().__init__() + self.n_heads = n_heads + self.d_head = d_head + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = torch.nn.GroupNorm(num_groups=32, + num_channels=in_channels, + eps=1e-6, + affine=True) + + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) + + self.transformer_blocks = nn.ModuleList([ + BasicTransformerBlock(inner_dim, + n_heads, + d_head, + context_dim=context_dim) + for d in range(depth) + ]) + + self.proj_out = nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, hidden_states, context=None): + batch, channel, height, weight = hidden_states.shape + residual = hidden_states + hidden_states = self.norm(hidden_states) + hidden_states = self.proj_in(hidden_states) + hidden_states = hidden_states.view(batch, channel, 1, height * weight) + for block in self.transformer_blocks: + hidden_states = block(hidden_states, context=context) + hidden_states = hidden_states.view(batch, channel, height, weight) + hidden_states = self.proj_out(hidden_states) + return hidden_states + residual + + +class BasicTransformerBlock(nn.Module): + + def __init__(self, dim, n_heads, d_head, context_dim=None, gated_ff=True): + super().__init__() + self.attn1 = CrossAttention( + query_dim=dim, + heads=n_heads, + dim_head=d_head, + ) + self.ff = FeedForward(dim, glu=gated_ff) + self.attn2 = CrossAttention( + query_dim=dim, + context_dim=context_dim, + heads=n_heads, + dim_head=d_head, + ) + self.norm1 = LayerNormANE(dim) + self.norm2 = LayerNormANE(dim) + self.norm3 = LayerNormANE(dim) + + def forward(self, hidden_states, context=None): + hidden_states = self.attn1(self.norm1(hidden_states)) + hidden_states + hidden_states = self.attn2(self.norm2(hidden_states), + context=context) + hidden_states + hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states + return hidden_states + + +class FeedForward(nn.Module): + + def __init__(self, dim, dim_out=None, mult=4, glu=False): + super().__init__() + inner_dim = int(dim * mult) + self.net = nn.Sequential( + GEGLU(dim_in=dim, dim_out=inner_dim), nn.Identity(), + nn.Conv2d(inner_dim, + dim_out if dim_out is not None else dim, + kernel_size=1)) + + def forward(self, hidden_states): + return self.net(hidden_states) + + +class GEGLU(nn.Module): + + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Conv2d(dim_in, dim_out * 2, kernel_size=1) + + def forward(self, hidden_states): + hidden_states, gate = self.proj(hidden_states).chunk(2, dim=1) + return hidden_states * F.gelu(gate) + + +class TimestepEmbedding(nn.Module): + + def __init__(self, channel, time_embed_dim, act_fn="silu"): + super().__init__() + + self.linear_1 = nn.Conv2d(channel, time_embed_dim, kernel_size=1) + self.act = None + if act_fn == "silu": + self.act = nn.SiLU() + self.linear_2 = nn.Conv2d(time_embed_dim, + time_embed_dim, + kernel_size=1) + + def forward(self, sample): + if len(sample.shape) == 2: + sample = sample.unsqueeze(-1).unsqueeze(-1) + sample = self.linear_1(sample) + + if self.act is not None: + sample = self.act(sample) + + sample = self.linear_2(sample) + return sample + + +class Timesteps(nn.Module): + + def __init__(self, num_channels, flip_sin_to_cos, downscale_freq_shift): + super().__init__() + self.num_channels = num_channels + self.flip_sin_to_cos = flip_sin_to_cos + self.downscale_freq_shift = downscale_freq_shift + + def forward(self, timesteps): + t_emb = get_timestep_embedding( + timesteps, + self.num_channels, + flip_sin_to_cos=self.flip_sin_to_cos, + downscale_freq_shift=self.downscale_freq_shift, + ) + return t_emb + + +def get_timestep_embedding( + timesteps, + embedding_dim, + flip_sin_to_cos=False, + downscale_freq_shift=1, + scale=1, + max_period=10000, +): + assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" + + half_dim = embedding_dim // 2 + exponent = -math.log(max_period) * torch.arange( + start=0, end=half_dim, dtype=torch.float32) + exponent = exponent / (half_dim - downscale_freq_shift) + + emb = torch.exp(exponent).to(device=timesteps.device) + emb = timesteps[:, None].float() * emb[None, :] + emb = scale * emb + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) + + if flip_sin_to_cos: + emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) + + if embedding_dim % 2 == 1: + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +class UNetMidBlock2DCrossAttn(nn.Module): + + def __init__( + self, + in_channels, + temb_channels, + num_layers=1, + resnet_eps=1e-6, + resnet_time_scale_shift="default", + resnet_act_fn="swish", + resnet_groups=32, + attn_num_head_channels=1, + attention_type="default", + cross_attention_dim=768, + **kwargs, + ): + super().__init__() + + self.attention_type = attention_type + self.attn_num_head_channels = attn_num_head_channels + resnet_groups = resnet_groups if resnet_groups is not None else min( + in_channels // 4, 32) + + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + ) + ] + attentions = [] + + for _ in range(num_layers): + attentions.append( + SpatialTransformer( + in_channels, + attn_num_head_channels, + in_channels // attn_num_head_channels, + depth=1, + context_dim=cross_attention_dim, + )) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + time_embedding_norm=resnet_time_scale_shift, + )) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states, temb=None, encoder_hidden_states=None): + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + hidden_states = attn(hidden_states, encoder_hidden_states) + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +class UNet2DConditionModel(ModelMixin, ConfigMixin): + + @register_to_config + def __init__( + self, + sample_size=None, + in_channels=4, + out_channels=4, + center_input_sample=False, + flip_sin_to_cos=True, + freq_shift=0, + down_block_types=( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + up_block_types=("UpBlock2D", "CrossAttnUpBlock2D", + "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), + only_cross_attention=False, + block_out_channels=(320, 640, 1280, 1280), + layers_per_block=2, + downsample_padding=1, + mid_block_scale_factor=1, + act_fn="silu", + norm_num_groups=32, + norm_eps=1e-5, + cross_attention_dim=768, + attention_head_dim=8, + **kwargs, + ): + if kwargs.get("dual_cross_attention", None): + raise NotImplementedError + if kwargs.get("num_classs_embeds", None): + raise NotImplementedError + if only_cross_attention: + raise NotImplementedError + if kwargs.get("use_linear_projection", None): + logger.warning("`use_linear_projection=True` is ignored!") + + super().__init__() + self._register_load_state_dict_pre_hook(linear_to_conv2d_map) + + self.sample_size = sample_size + time_embed_dim = block_out_channels[0] * 4 + + # input + self.conv_in = nn.Conv2d(in_channels, + block_out_channels[0], + kernel_size=3, + padding=(1, 1)) + + # time + time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, + freq_shift) + timestep_input_dim = block_out_channels[0] + time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) + + self.time_proj = time_proj + self.time_embedding = time_embedding + + self.down_blocks = nn.ModuleList([]) + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[i], + downsample_padding=downsample_padding, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlock2DCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift="default", + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attention_head_dim[i], + resnet_groups=norm_num_groups, + ) + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_attention_head_dim = list(reversed(attention_head_dim)) + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min( + i + 1, + len(block_out_channels) - 1)] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = get_up_block( + up_block_type, + num_layers=layers_per_block + 1, + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=time_embed_dim, + add_upsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=reversed_attention_head_dim[i], + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], + num_groups=norm_num_groups, + eps=norm_eps) + self.conv_act = nn.SiLU() + self.conv_out = nn.Conv2d(block_out_channels[0], + out_channels, + 3, + padding=1) + + def forward( + self, + sample, + timestep, + encoder_hidden_states, + *additional_residuals, + ): + # 0. Project (or look-up) time embeddings + t_emb = self.time_proj(timestep) + emb = self.time_embedding(t_emb) + + # 1. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 2. pre-process + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample, ) + for downsample_block in self.down_blocks: + if hasattr( + downsample_block, + "attentions") and downsample_block.attentions is not None: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states) + else: + sample, res_samples = downsample_block(hidden_states=sample, + temb=emb) + + down_block_res_samples += res_samples + + if additional_residuals: + new_down_block_res_samples = () + for i, down_block_res_sample in enumerate(down_block_res_samples): + down_block_res_sample = down_block_res_sample + additional_residuals[i] + new_down_block_res_samples += (down_block_res_sample,) + down_block_res_samples = new_down_block_res_samples + + # 4. mid + sample = self.mid_block(sample, + emb, + encoder_hidden_states=encoder_hidden_states) + + if additional_residuals: + sample = sample + additional_residuals[-1] + + # 5. up + for upsample_block in self.up_blocks: + res_samples = down_block_res_samples[-len(upsample_block.resnets):] + down_block_res_samples = down_block_res_samples[:-len( + upsample_block.resnets)] + + if hasattr(upsample_block, + "attentions") and upsample_block.attentions is not None: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + ) + else: + sample = upsample_block(hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples) + + # 6. post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + return (sample, ) + + +def get_down_block( + down_block_type, + num_layers, + in_channels, + out_channels, + temb_channels, + resnet_eps, + resnet_act_fn, + attn_num_head_channels, + cross_attention_dim=None, + downsample_padding=None, +): + down_block_type = down_block_type[7:] if down_block_type.startswith( + "UNetRes") else down_block_type + if down_block_type == "DownBlock2D": + return DownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + ) + elif down_block_type == "CrossAttnDownBlock2D": + if cross_attention_dim is None: + raise ValueError( + "cross_attention_dim must be specified for CrossAttnDownBlock2D" + ) + return CrossAttnDownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attn_num_head_channels, + ) + + +def get_up_block( + up_block_type, + num_layers, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + add_upsample, + resnet_eps, + resnet_act_fn, + attn_num_head_channels, + cross_attention_dim=None, +): + up_block_type = up_block_type[7:] if up_block_type.startswith( + "UNetRes") else up_block_type + if up_block_type == "UpBlock2D": + return UpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + ) + elif up_block_type == "CrossAttnUpBlock2D": + if cross_attention_dim is None: + raise ValueError( + "cross_attention_dim must be specified for CrossAttnUpBlock2D") + return CrossAttnUpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + cross_attention_dim=cross_attention_dim, + attn_num_head_channels=attn_num_head_channels, + ) + raise ValueError(f"{up_block_type} does not exist.") + +def calculate_conv2d_output_shape(in_h, in_w, conv2d_layer): + k_h, k_w = conv2d_layer.kernel_size + pad_h, pad_w = conv2d_layer.padding + stride_h, stride_w = conv2d_layer.stride + + out_h = math.floor((in_h + 2 * pad_h - k_h) / stride_h + 1) + out_w = math.floor((in_w + 2 * pad_w - k_w) / stride_w + 1) + + return out_h, out_w diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/INSTALLER b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/LICENSE.txt b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/LICENSE.txt new file mode 100644 index 00000000..601c3f4b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/LICENSE.txt @@ -0,0 +1,819 @@ +Copyright (c) 2001-2002 Enthought, Inc. 2003-2022, SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +This binary distribution of SciPy also bundles the following software: + + +Name: GCC runtime library +Files: .dylibs/* +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/METADATA b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/METADATA new file mode 100644 index 00000000..99d9ea2b --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/METADATA @@ -0,0 +1,952 @@ +Metadata-Version: 2.1 +Name: scipy +Version: 1.10.1 +Summary: Fundamental algorithms for scientific computing in Python +Home-page: https://scipy.org/ +Maintainer-Email: SciPy Developers +License: Copyright (c) 2001-2002 Enthought, Inc. 2003-2022, SciPy Developers. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---- + + This binary distribution of SciPy also bundles the following software: + + + Name: GCC runtime library + Files: .dylibs/* + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/viewcvs/gcc/ + License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + ---- + + Full text of license texts referred to above follows (that they are + listed below does not necessarily imply the conditions apply to the + present binary release): + + ---- + + GCC RUNTIME LIBRARY EXCEPTION + + Version 3.1, 31 March 2009 + + Copyright (C) 2009 Free Software Foundation, Inc. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + + This GCC Runtime Library Exception ("Exception") is an additional + permission under section 7 of the GNU General Public License, version + 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that + bears a notice placed by the copyright holder of the file stating that + the file is governed by GPLv3 along with this Exception. + + When you use GCC to compile a program, GCC may combine portions of + certain GCC header files and runtime libraries with the compiled + program. The purpose of this Exception is to allow compilation of + non-GPL (including proprietary) programs to use, in this way, the + header files and runtime libraries covered by this Exception. + + 0. Definitions. + + A file is an "Independent Module" if it either requires the Runtime + Library for execution after a Compilation Process, or makes use of an + interface provided by the Runtime Library, but is not otherwise based + on the Runtime Library. + + "GCC" means a version of the GNU Compiler Collection, with or without + modifications, governed by version 3 (or a specified later version) of + the GNU General Public License (GPL) with the option of using any + subsequent versions published by the FSF. + + "GPL-compatible Software" is software whose conditions of propagation, + modification and use would permit combination with GCC in accord with + the license of GCC. + + "Target Code" refers to output from any compiler for a real or virtual + target processor architecture, in executable form or suitable for + input to an assembler, loader, linker and/or execution + phase. Notwithstanding that, Target Code does not include data in any + format that is used as a compiler intermediate representation, or used + for producing a compiler intermediate representation. + + The "Compilation Process" transforms code entirely represented in + non-intermediate languages designed for human-written code, and/or in + Java Virtual Machine byte code, into Target Code. Thus, for example, + use of source code generators and preprocessors need not be considered + part of the Compilation Process, since the Compilation Process can be + understood as starting with the output of the generators or + preprocessors. + + A Compilation Process is "Eligible" if it is done using GCC, alone or + with other GPL-compatible software, or if it is done without using any + work based on GCC. For example, using non-GPL-compatible Software to + optimize any GCC intermediate representations would not qualify as an + Eligible Compilation Process. + + 1. Grant of Additional Permission. + + You have permission to propagate a work of Target Code formed by + combining the Runtime Library with Independent Modules, even if such + propagation would otherwise violate the terms of GPLv3, provided that + all Target Code was generated by Eligible Compilation Processes. You + may then convey such a combination under terms of your choice, + consistent with the licensing of the Independent Modules. + + 2. No Weakening of GCC Copyleft. + + The availability of this Exception does not imply any general + presumption that third-party software is unaffected by the copyleft + requirements of the license of GCC. + + ---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for + software and other kinds of works. + + The licenses for most software and other practical works are designed + to take away your freedom to share and change the works. By contrast, + the GNU General Public License is intended to guarantee your freedom to + share and change all versions of a program--to make sure it remains free + software for all its users. We, the Free Software Foundation, use the + GNU General Public License for most of our software; it applies also to + any other work released this way by its authors. You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + them if you wish), that you receive source code or can get it if you + want it, that you can change the software or use pieces of it in new + free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you + these rights or asking you to surrender the rights. Therefore, you have + certain responsibilities if you distribute copies of the software, or if + you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must pass on to the recipients the same + freedoms that you received. You must make sure that they, too, receive + or can get the source code. And you must show them these terms so they + know their rights. + + Developers that use the GNU GPL protect your rights with two steps: + (1) assert copyright on the software, and (2) offer you this License + giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains + that there is no warranty for this free software. For both users' and + authors' sake, the GPL requires that modified versions be marked as + changed, so that their problems will not be attributed erroneously to + authors of previous versions. + + Some devices are designed to deny users access to install or run + modified versions of the software inside them, although the manufacturer + can do so. This is fundamentally incompatible with the aim of + protecting users' freedom to change the software. The systematic + pattern of such abuse occurs in the area of products for individuals to + use, which is precisely where it is most unacceptable. Therefore, we + have designed this version of the GPL to prohibit the practice for those + products. If such problems arise substantially in other domains, we + stand ready to extend this provision to those domains in future versions + of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of + software on general-purpose computers, but in those that do, we wish to + avoid the special danger that patents applied to a free program could + make it effectively proprietary. To prevent this, the GPL assures that + patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and + modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of + works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this + License. Each licensee is addressed as "you". "Licensees" and + "recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work + in a fashion requiring copyright permission, other than the making of an + exact copy. The resulting work is called a "modified version" of the + earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based + on the Program. + + To "propagate" a work means to do anything with it that, without + permission, would make you directly or secondarily liable for + infringement under applicable copyright law, except executing it on a + computer or modifying a private copy. Propagation includes copying, + distribution (with or without modification), making available to the + public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other + parties to make or receive copies. Mere interaction with a user through + a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" + to the extent that it includes a convenient and prominently visible + feature that (1) displays an appropriate copyright notice, and (2) + tells the user that there is no warranty for the work (except to the + extent that warranties are provided), that licensees may convey the + work under this License, and how to view a copy of this License. If + the interface presents a list of user commands or options, such as a + menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work + for making modifications to it. "Object code" means any non-source + form of a work. + + A "Standard Interface" means an interface that either is an official + standard defined by a recognized standards body, or, in the case of + interfaces specified for a particular programming language, one that + is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other + than the work as a whole, that (a) is included in the normal form of + packaging a Major Component, but which is not part of that Major + Component, and (b) serves only to enable use of the work with that + Major Component, or to implement a Standard Interface for which an + implementation is available to the public in source code form. A + "Major Component", in this context, means a major essential component + (kernel, window system, and so on) of the specific operating system + (if any) on which the executable work runs, or a compiler used to + produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all + the source code needed to generate, install, and (for an executable + work) run the object code and to modify the work, including scripts to + control those activities. However, it does not include the work's + System Libraries, or general-purpose tools or generally available free + programs which are used unmodified in performing those activities but + which are not part of the work. For example, Corresponding Source + includes interface definition files associated with source files for + the work, and the source code for shared libraries and dynamically + linked subprograms that the work is specifically designed to require, + such as by intimate data communication or control flow between those + subprograms and other parts of the work. + + The Corresponding Source need not include anything that users + can regenerate automatically from other parts of the Corresponding + Source. + + The Corresponding Source for a work in source code form is that + same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of + copyright on the Program, and are irrevocable provided the stated + conditions are met. This License explicitly affirms your unlimited + permission to run the unmodified Program. The output from running a + covered work is covered by this License only if the output, given its + content, constitutes a covered work. This License acknowledges your + rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not + convey, without conditions so long as your license otherwise remains + in force. You may convey covered works to others for the sole purpose + of having them make modifications exclusively for you, or provide you + with facilities for running those works, provided that you comply with + the terms of this License in conveying all material for which you do + not control copyright. Those thus making or running the covered works + for you must do so exclusively on your behalf, under your direction + and control, on terms that prohibit them from making any copies of + your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under + the conditions stated below. Sublicensing is not allowed; section 10 + makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological + measure under any applicable law fulfilling obligations under article + 11 of the WIPO copyright treaty adopted on 20 December 1996, or + similar laws prohibiting or restricting circumvention of such + measures. + + When you convey a covered work, you waive any legal power to forbid + circumvention of technological measures to the extent such circumvention + is effected by exercising rights under this License with respect to + the covered work, and you disclaim any intention to limit operation or + modification of the work as a means of enforcing, against the work's + users, your or third parties' legal rights to forbid circumvention of + technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you + receive it, in any medium, provided that you conspicuously and + appropriately publish on each copy an appropriate copyright notice; + keep intact all notices stating that this License and any + non-permissive terms added in accord with section 7 apply to the code; + keep intact all notices of the absence of any warranty; and give all + recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, + and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to + produce it from the Program, in the form of source code under the + terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent + works, which are not by their nature extensions of the covered work, + and which are not combined with it such as to form a larger program, + in or on a volume of a storage or distribution medium, is called an + "aggregate" if the compilation and its resulting copyright are not + used to limit the access or legal rights of the compilation's users + beyond what the individual works permit. Inclusion of a covered work + in an aggregate does not cause this License to apply to the other + parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms + of sections 4 and 5, provided that you also convey the + machine-readable Corresponding Source under the terms of this License, + in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded + from the Corresponding Source as a System Library, need not be + included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any + tangible personal property which is normally used for personal, family, + or household purposes, or (2) anything designed or sold for incorporation + into a dwelling. In determining whether a product is a consumer product, + doubtful cases shall be resolved in favor of coverage. For a particular + product received by a particular user, "normally used" refers to a + typical or common use of that class of product, regardless of the status + of the particular user or of the way in which the particular user + actually uses, or expects or is expected to use, the product. A product + is a consumer product regardless of whether the product has substantial + commercial, industrial or non-consumer uses, unless such uses represent + the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, + procedures, authorization keys, or other information required to install + and execute modified versions of a covered work in that User Product from + a modified version of its Corresponding Source. The information must + suffice to ensure that the continued functioning of the modified object + code is in no case prevented or interfered with solely because + modification has been made. + + If you convey an object code work under this section in, or with, or + specifically for use in, a User Product, and the conveying occurs as + part of a transaction in which the right of possession and use of the + User Product is transferred to the recipient in perpetuity or for a + fixed term (regardless of how the transaction is characterized), the + Corresponding Source conveyed under this section must be accompanied + by the Installation Information. But this requirement does not apply + if neither you nor any third party retains the ability to install + modified object code on the User Product (for example, the work has + been installed in ROM). + + The requirement to provide Installation Information does not include a + requirement to continue to provide support service, warranty, or updates + for a work that has been modified or installed by the recipient, or for + the User Product in which it has been modified or installed. Access to a + network may be denied when the modification itself materially and + adversely affects the operation of the network or violates the rules and + protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, + in accord with this section must be in a format that is publicly + documented (and with an implementation available to the public in + source code form), and must require no special password or key for + unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this + License by making exceptions from one or more of its conditions. + Additional permissions that are applicable to the entire Program shall + be treated as though they were included in this License, to the extent + that they are valid under applicable law. If additional permissions + apply only to part of the Program, that part may be used separately + under those permissions, but the entire Program remains governed by + this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option + remove any additional permissions from that copy, or from any part of + it. (Additional permissions may be written to require their own + removal in certain cases when you modify the work.) You may place + additional permissions on material, added by you to a covered work, + for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you + add to a covered work, you may (if authorized by the copyright holders of + that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further + restrictions" within the meaning of section 10. If the Program as you + received it, or any part of it, contains a notice stating that it is + governed by this License along with a term that is a further + restriction, you may remove that term. If a license document contains + a further restriction but permits relicensing or conveying under this + License, you may add to a covered work material governed by the terms + of that license document, provided that the further restriction does + not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you + must place, in the relevant source files, a statement of the + additional terms that apply to those files, or a notice indicating + where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the + form of a separately written license, or stated as exceptions; + the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly + provided under this License. Any attempt otherwise to propagate or + modify it is void, and will automatically terminate your rights under + this License (including any patent licenses granted under the third + paragraph of section 11). + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the copyright + holder fails to notify you of the violation by some reasonable means + prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from that + copyright holder, and you cure the violation prior to 30 days after + your receipt of the notice. + + Termination of your rights under this section does not terminate the + licenses of parties who have received copies or rights from you under + this License. If your rights have been terminated and not permanently + reinstated, you do not qualify to receive new licenses for the same + material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or + run a copy of the Program. Ancillary propagation of a covered work + occurring solely as a consequence of using peer-to-peer transmission + to receive a copy likewise does not require acceptance. However, + nothing other than this License grants you permission to propagate or + modify any covered work. These actions infringe copyright if you do + not accept this License. Therefore, by modifying or propagating a + covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically + receives a license from the original licensors, to run, modify and + propagate that work, subject to this License. You are not responsible + for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an + organization, or substantially all assets of one, or subdividing an + organization, or merging organizations. If propagation of a covered + work results from an entity transaction, each party to that + transaction who receives a copy of the work also receives whatever + licenses to the work the party's predecessor in interest had or could + give under the previous paragraph, plus a right to possession of the + Corresponding Source of the work from the predecessor in interest, if + the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the + rights granted or affirmed under this License. For example, you may + not impose a license fee, royalty, or other charge for exercise of + rights granted under this License, and you may not initiate litigation + (including a cross-claim or counterclaim in a lawsuit) alleging that + any patent claim is infringed by making, using, selling, offering for + sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this + License of the Program or a work on which the Program is based. The + work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims + owned or controlled by the contributor, whether already acquired or + hereafter acquired, that would be infringed by some manner, permitted + by this License, of making, using, or selling its contributor version, + but do not include claims that would be infringed only as a + consequence of further modification of the contributor version. For + purposes of this definition, "control" includes the right to grant + patent sublicenses in a manner consistent with the requirements of + this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free + patent license under the contributor's essential patent claims, to + make, use, sell, offer for sale, import and otherwise run, modify and + propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express + agreement or commitment, however denominated, not to enforce a patent + (such as an express permission to practice a patent or covenant not to + sue for patent infringement). To "grant" such a patent license to a + party means to make such an agreement or commitment not to enforce a + patent against the party. + + If you convey a covered work, knowingly relying on a patent license, + and the Corresponding Source of the work is not available for anyone + to copy, free of charge and under the terms of this License, through a + publicly available network server or other readily accessible means, + then you must either (1) cause the Corresponding Source to be so + available, or (2) arrange to deprive yourself of the benefit of the + patent license for this particular work, or (3) arrange, in a manner + consistent with the requirements of this License, to extend the patent + license to downstream recipients. "Knowingly relying" means you have + actual knowledge that, but for the patent license, your conveying the + covered work in a country, or your recipient's use of the covered work + in a country, would infringe one or more identifiable patents in that + country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or + arrangement, you convey, or propagate by procuring conveyance of, a + covered work, and grant a patent license to some of the parties + receiving the covered work authorizing them to use, propagate, modify + or convey a specific copy of the covered work, then the patent license + you grant is automatically extended to all recipients of the covered + work and works based on it. + + A patent license is "discriminatory" if it does not include within + the scope of its coverage, prohibits the exercise of, or is + conditioned on the non-exercise of one or more of the rights that are + specifically granted under this License. You may not convey a covered + work if you are a party to an arrangement with a third party that is + in the business of distributing software, under which you make payment + to the third party based on the extent of your activity of conveying + the work, and under which the third party grants, to any of the + parties who would receive the covered work from you, a discriminatory + patent license (a) in connection with copies of the covered work + conveyed by you (or copies made from those copies), or (b) primarily + for and in connection with specific products or compilations that + contain the covered work, unless you entered into that arrangement, + or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot convey a + covered work so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you may + not convey it at all. For example, if you agree to terms that obligate you + to collect a royalty for further conveying from those to whom you convey + the Program, the only way you could satisfy both those terms and this + License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU Affero General Public License into a single + combined work, and to convey the resulting work. The terms of this + License will continue to apply to the part which is the covered work, + but the special requirements of the GNU Affero General Public License, + section 13, concerning interaction through a network will apply to the + combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of + the GNU General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU General + Public License "or any later version" applies to it, you have the + option of following the terms and conditions either of that numbered + version or of any later version published by the Free Software + Foundation. If the Program does not specify a version number of the + GNU General Public License, you may choose any version ever published + by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU General Public License can be used, that proxy's + public statement of acceptance of a version permanently authorizes you + to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT + HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY + OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM + IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF + ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS + THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY + GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE + USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF + DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), + EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely approximates + an absolute waiver of all civil liability in connection with the + Program, unless a warranty or assumption of liability accompanies a + copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + state the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short + notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, your program's commands + might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, + if any, to sign a "copyright disclaimer" for the program, if necessary. + For more information on this, and how to apply and follow the GNU GPL, see + . + + The GNU General Public License does not permit incorporating your program + into proprietary programs. If your program is a subroutine library, you + may consider it more useful to permit linking proprietary applications with + the library. If this is what you want to do, use the GNU Lesser General + Public License instead of this License. But first, please read + . +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Scientific/Engineering +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: Homepage, https://scipy.org/ +Project-URL: Documentation, https://docs.scipy.org/doc/scipy/ +Project-URL: Source, https://github.com/scipy/scipy +Project-URL: Download, https://github.com/scipy/scipy/releases +Project-URL: Tracker, https://github.com/scipy/scipy/issues +Requires-Python: <3.12,>=3.8 +Requires-Dist: numpy<1.27.0,>=1.19.5 +Requires-Dist: pytest; extra == "test" +Requires-Dist: pytest-cov; extra == "test" +Requires-Dist: pytest-timeout; extra == "test" +Requires-Dist: pytest-xdist; extra == "test" +Requires-Dist: asv; extra == "test" +Requires-Dist: mpmath; extra == "test" +Requires-Dist: gmpy2; extra == "test" +Requires-Dist: threadpoolctl; extra == "test" +Requires-Dist: scikit-umfpack; extra == "test" +Requires-Dist: pooch; extra == "test" +Requires-Dist: sphinx!=4.1.0; extra == "doc" +Requires-Dist: pydata-sphinx-theme==0.9.0; extra == "doc" +Requires-Dist: sphinx-design>=0.2.0; extra == "doc" +Requires-Dist: matplotlib>2; extra == "doc" +Requires-Dist: numpydoc; extra == "doc" +Requires-Dist: mypy; extra == "dev" +Requires-Dist: typing_extensions; extra == "dev" +Requires-Dist: pycodestyle; extra == "dev" +Requires-Dist: flake8; extra == "dev" +Requires-Dist: rich-click; extra == "dev" +Requires-Dist: click; extra == "dev" +Requires-Dist: doit>=0.36.0; extra == "dev" +Requires-Dist: pydevtool; extra == "dev" +Provides-Extra: test +Provides-Extra: doc +Provides-Extra: dev +Description-Content-Type: text/x-rst + +.. image:: https://github.com/scipy/scipy/blob/main/doc/source/_static/logo.svg + :target: https://scipy.org + :width: 110 + :height: 110 + :align: left + +.. image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A + :target: https://numfocus.org + +.. image:: https://img.shields.io/pypi/dm/scipy.svg?label=Pypi%20downloads + :target: https://pypi.org/project/scipy/ + +.. image:: https://img.shields.io/conda/dn/conda-forge/scipy.svg?label=Conda%20downloads + :target: https://anaconda.org/conda-forge/scipy + +.. image:: https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg + :target: https://stackoverflow.com/questions/tagged/scipy + +.. image:: https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue + :target: https://www.nature.com/articles/s41592-019-0686-2 + +SciPy (pronounced "Sigh Pie") is an open-source software for mathematics, +science, and engineering. It includes modules for statistics, optimization, +integration, linear algebra, Fourier transforms, signal and image processing, +ODE solvers, and more. + +- **Website:** https://scipy.org +- **Documentation:** https://docs.scipy.org/doc/scipy/ +- **Development version of the documentation:** https://scipy.github.io/devdocs +- **Mailing list:** https://mail.python.org/mailman3/lists/scipy-dev.python.org/ +- **Source code:** https://github.com/scipy/scipy +- **Contributing:** https://scipy.github.io/devdocs/dev/index.html +- **Bug reports:** https://github.com/scipy/scipy/issues +- **Code of Conduct:** https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html +- **Report a security vulnerability:** https://tidelift.com/docs/security +- **Citing in your work:** https://www.scipy.org/citing-scipy/ + +SciPy is built to work with +NumPy arrays, and provides many user-friendly and efficient numerical routines, +such as routines for numerical integration and optimization. Together, they +run on all popular operating systems, are quick to install, and are free of +charge. NumPy and SciPy are easy to use, but powerful enough to be depended +upon by some of the world's leading scientists and engineers. If you need to +manipulate numbers on a computer and display or publish the results, give +SciPy a try! + +For the installation instructions, see `our install +guide `__. + + +Call for Contributions +---------------------- + +We appreciate and welcome contributions. Small improvements or fixes are always appreciated; issues labeled as "good +first issue" may be a good starting point. Have a look at `our contributing +guide `__. + +Writing code isn’t the only way to contribute to SciPy. You can also: + +- review pull requests +- triage issues +- develop tutorials, presentations, and other educational materials +- maintain and improve `our website `__ +- develop graphic design for our brand assets and promotional materials +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by leaving a +comment on a relevant issue that is already open. + +If you are new to contributing to open source, `this +guide `__ helps explain why, what, +and how to get involved. diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/RECORD b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/RECORD new file mode 100644 index 00000000..86ae4d87 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/RECORD @@ -0,0 +1,2037 @@ +scipy-1.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +scipy-1.10.1.dist-info/LICENSE.txt,sha256=90B8ODojKU86elJLmtrzM1tNG5GKEJNxQOYjZuA5YFc,41429 +scipy-1.10.1.dist-info/METADATA,sha256=O0g8a_iszEmeFEN7kfAIlMqS2kLICR8Aph6B9yimsd0,53928 +scipy-1.10.1.dist-info/RECORD,, +scipy-1.10.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy-1.10.1.dist-info/WHEEL,sha256=Ep2SgF6yV45nIBdxm49hl_xnVh0uKnFXRsk2oguDYE8,93 +scipy/.dylibs/libgcc_s.1.1.dylib,sha256=eMgpcUIhwd2iIQii-PWOGKdIDUZJcAtg58Jc-vx5beM,156896 +scipy/.dylibs/libgfortran.5.dylib,sha256=H3N-g4-YnOnXuaH7Q4xEHh9tfzyoY-uTeEwvDeRIPkA,1846176 +scipy/.dylibs/libopenblas.0.dylib,sha256=Lao87Mh3f70q32BxFVJXIeXOGgkbRzbBDHfiX0o8ugw,20952288 +scipy/.dylibs/libquadmath.0.dylib,sha256=w2NvO_dk1WG46nm6G9H0tB8wFqp25V_PrPvGOUQLxt8,349408 +scipy/__config__.py,sha256=QjarxZy9mtRzzblgt5H4Ep-6mr8oFrYEUqzo9kVTrY4,4409 +scipy/__init__.py,sha256=e1Kwf9PJyelCWzxN7d2m-2fGSq5OCPEiIkhn0Odf3a8,7110 +scipy/__pycache__/__config__.cpython-310.pyc,, +scipy/__pycache__/__init__.cpython-310.pyc,, +scipy/__pycache__/_distributor_init.cpython-310.pyc,, +scipy/__pycache__/conftest.cpython-310.pyc,, +scipy/__pycache__/version.cpython-310.pyc,, +scipy/_distributor_init.py,sha256=2LDC4c2QoxdDkay0RO61CkHdMYLo-TdsihTtkbjt7XA,331 +scipy/_lib/__init__.py,sha256=CXrH_YBpZ-HImHHrqXIhQt_vevp4P5NXClp7hnFMVLM,353 +scipy/_lib/__pycache__/__init__.cpython-310.pyc,, +scipy/_lib/__pycache__/_bunch.cpython-310.pyc,, +scipy/_lib/__pycache__/_ccallback.cpython-310.pyc,, +scipy/_lib/__pycache__/_disjoint_set.cpython-310.pyc,, +scipy/_lib/__pycache__/_docscrape.cpython-310.pyc,, +scipy/_lib/__pycache__/_finite_differences.cpython-310.pyc,, +scipy/_lib/__pycache__/_gcutils.cpython-310.pyc,, +scipy/_lib/__pycache__/_pep440.cpython-310.pyc,, +scipy/_lib/__pycache__/_testutils.cpython-310.pyc,, +scipy/_lib/__pycache__/_threadsafety.cpython-310.pyc,, +scipy/_lib/__pycache__/_tmpdirs.cpython-310.pyc,, +scipy/_lib/__pycache__/_util.cpython-310.pyc,, +scipy/_lib/__pycache__/decorator.cpython-310.pyc,, +scipy/_lib/__pycache__/deprecation.cpython-310.pyc,, +scipy/_lib/__pycache__/doccer.cpython-310.pyc,, +scipy/_lib/__pycache__/uarray.cpython-310.pyc,, +scipy/_lib/_bunch.py,sha256=r3I77OVyYCaMtQy_kNPEYvpfdKB_8YOeuTjHCVuGYQI,8117 +scipy/_lib/_ccallback.py,sha256=_PGPShGcultXRpGR7HuATDHMR6kJpWcxeQPzL0gOh0k,6213 +scipy/_lib/_ccallback_c.cpython-310-darwin.so,sha256=48CkXHcHYazqA5g2OV52Hl8_BI_i8gY_BbNRtAwsyps,106367 +scipy/_lib/_disjoint_set.py,sha256=NeAK884rqiJRghFrwT5s5bVij9WHv65R6CAN0CIW4F4,5483 +scipy/_lib/_docscrape.py,sha256=qRbjNYTP_efI3r3w1aVBnj0PbglqsepL07EbObgiXOo,21584 +scipy/_lib/_finite_differences.py,sha256=llaIPvCOxpE4VA8O8EycPEU8i6LHJyOD-y7Y9OvQHt0,4172 +scipy/_lib/_fpumode.cpython-310-darwin.so,sha256=O9wYQjvaHpDafKOG4YKIVm-2wOpsOAihOfggfajLgCg,50203 +scipy/_lib/_gcutils.py,sha256=hajQd-HUw9ckK7QeBaqXVRpmnxPgyXO3QqqniEh7tRk,2669 +scipy/_lib/_pep440.py,sha256=Vr7B3QsijR5p6h8YAz2LjNGUyzHUJ5gZ4v26NpZAKDc,14069 +scipy/_lib/_test_ccallback.cpython-310-darwin.so,sha256=E2691ASrwEjo6Z8ogUyKx1kuCs9JoX52sI-nIYMIDFM,53218 +scipy/_lib/_test_deprecation_call.cpython-310-darwin.so,sha256=K-dLwjR81Pd1cnmkcI3un2TPAc8zzTwjJP0OGX5m6YA,55577 +scipy/_lib/_test_deprecation_def.cpython-310-darwin.so,sha256=gUpmabNKudY8hov3_21avYvgWi7OgjRiCyDA9FO08DI,56360 +scipy/_lib/_testutils.py,sha256=SRljev7z4zQOvviYWKL3ZMV9xq-evelO_-kU57IZ8Wc,6841 +scipy/_lib/_threadsafety.py,sha256=xuVqUS2jv46fOOQf7bcrhiYtnPVygqmrIVJc-7_LlI8,1455 +scipy/_lib/_tmpdirs.py,sha256=z3IYpzACnWdN_BMjOvqYbkTvYyUbfbQvfehq7idENSo,2374 +scipy/_lib/_uarray/LICENSE,sha256=yAw5tfzga6SJfhTgsKiLVEWDNNlR6xNhQC_60s-4Y7Q,1514 +scipy/_lib/_uarray/__init__.py,sha256=JLZP3pTSOy4i3Usw4odj4P9dtImMNFrxT4_A9dcgzQU,4493 +scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc,, +scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc,, +scipy/_lib/_uarray/_backend.py,sha256=E1Hzup7YGgJVpb9OVo97zx72a6wgcngxsD12sjylcD4,20498 +scipy/_lib/_uarray/_uarray.cpython-310-darwin.so,sha256=kPml_lt8I8KEOWZ3TVRtOZEokqCtbTbfbkW_gpBEgv8,121706 +scipy/_lib/_util.py,sha256=CFxZxCjAIo2fZzefbaieCRd2aJz4B63JdcpXlSETKZE,24486 +scipy/_lib/decorator.py,sha256=hxTEDkl4BUscis3hwGs8HswgI3ojTml1-UIsLlgdHHY,15059 +scipy/_lib/deprecation.py,sha256=PKadEK4--UfW_-IzAJgwW-olBYTUDsTaHSZlEanpIWg,3176 +scipy/_lib/doccer.py,sha256=shdWIi3u7QBN5CyyKwqWW99qOEsiFewB8eH10FWhYLM,8362 +scipy/_lib/messagestream.cpython-310-darwin.so,sha256=aqLLG76n3MLzcQarXsfYYUK1F3IpiSTd6wiqoldQkl0,79728 +scipy/_lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/_lib/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__gcutils.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__pep440.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__testutils.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__threadsafety.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test__util.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_deprecation.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_import_cycles.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_public_api.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_scipy_version.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_tmpdirs.cpython-310.pyc,, +scipy/_lib/tests/__pycache__/test_warnings.cpython-310.pyc,, +scipy/_lib/tests/test__gcutils.py,sha256=OdEmx9K4QVSokt0sV8VF4Uxp4bnSyJyjdFLi2F7nYcg,3416 +scipy/_lib/tests/test__pep440.py,sha256=u9hPoolK4AoIIS-Rq74Du5SJu5og2RxMwgaAvGgWvRo,2277 +scipy/_lib/tests/test__testutils.py,sha256=P4WDJpUgy19wD9tknQSjIivuQvZF7YUBGSBWlur2QRA,800 +scipy/_lib/tests/test__threadsafety.py,sha256=qSfCF5OG_5lbnSl-grmDN_QCU4QLe-fS3sqnwL04pf8,1322 +scipy/_lib/tests/test__util.py,sha256=G5lSPfcPxs7erNfBZjIn9fsUEgHrEnfAEv90zqgBrmU,13325 +scipy/_lib/tests/test_bunch.py,sha256=cQoJPEalhaGSOiwvGwMtNyv4IAVxnjJJxDiBluiS-SY,6169 +scipy/_lib/tests/test_ccallback.py,sha256=mvo9OeGktIqO-vfLLU1FPAfFwxPzX0wcYh_Lnwby7ik,5995 +scipy/_lib/tests/test_deprecation.py,sha256=a_3r_9pFx1sxJXeFgiTSV9DXYnktc4fio1hR0ITPywA,364 +scipy/_lib/tests/test_import_cycles.py,sha256=3MoMy-2qdOw1UOcjQMStTIbyoM6fdSdp6BF_Ie0lzec,1306 +scipy/_lib/tests/test_public_api.py,sha256=tRZud6CKVSFu20f_c79YIXOEAlzGTUcejmLL7E0Aa1g,9942 +scipy/_lib/tests/test_scipy_version.py,sha256=jgo-2YhCkBksXHM6xKiN_iJJZkqz0CvXqn2jVxx1djA,606 +scipy/_lib/tests/test_tmpdirs.py,sha256=jusM--qpUMscMAdbgNGkmCU23UGhytuqZM1gX76oWcE,1242 +scipy/_lib/tests/test_warnings.py,sha256=FIn3ndQMNIiR-o_CjvX5D2ZnAYFKWi8aqunLgkIappU,4295 +scipy/_lib/uarray.py,sha256=wmH9RAWa-jXxiokMHx-nv0dazCR0UoPlitauJCWspQs,773 +scipy/cluster/__init__.py,sha256=Sco_jwP4eqTtUfElVvmub0n5Ue75puxGtCXYIbt7ZKI,871 +scipy/cluster/__pycache__/__init__.cpython-310.pyc,, +scipy/cluster/__pycache__/hierarchy.cpython-310.pyc,, +scipy/cluster/__pycache__/vq.cpython-310.pyc,, +scipy/cluster/_hierarchy.cpython-310-darwin.so,sha256=zmL_sZd-R3bgEmnHy90h2PtWh16w6JMZJy2gQe4pFU8,340013 +scipy/cluster/_optimal_leaf_ordering.cpython-310-darwin.so,sha256=-JIo2jp88ehM_Hp22IL_blEsLMqknzAHm4jI-G8mgfE,252425 +scipy/cluster/_vq.cpython-310-darwin.so,sha256=u1oAw36G-ECJ96qLHk81Qpw3vaauT_smdddtvick2OI,121718 +scipy/cluster/hierarchy.py,sha256=8xdDObqJN983cw1rOfgF3S-5oGxC60WXHi7e4bAjnSU,148455 +scipy/cluster/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc,, +scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc,, +scipy/cluster/tests/hierarchy_test_data.py,sha256=7syUYdIaDVr7hgvMliX0CW4386utjBJn1DOgX0USXls,6850 +scipy/cluster/tests/test_disjoint_set.py,sha256=UXzuhddiuQIu9kgftWBvqOg6MCSej3NHJCdEhvcl0cY,5469 +scipy/cluster/tests/test_hierarchy.py,sha256=OScakHr3N2uDXNZ12g8WAzAPD6NtSr83aRLoodqUfSo,43629 +scipy/cluster/tests/test_vq.py,sha256=NZQd3jloOCPjMnhZ5x5nuMIZ38eX_Nkwgx0ZMjOEd34,13434 +scipy/cluster/vq.py,sha256=M6Sf9qSV4-2y6m-Dgi0FHDRNy3GcFlTfNZh2hdp8avA,29222 +scipy/conftest.py,sha256=DsaVf3409ME_kaiWcxChA2bS5f1CCzbxgPFznQBuyJM,3478 +scipy/constants/__init__.py,sha256=sRLD0haSgEtXA4TphGkVv2p2qQxTyeyVlmpzZSq0Ygg,12297 +scipy/constants/__pycache__/__init__.cpython-310.pyc,, +scipy/constants/__pycache__/_codata.cpython-310.pyc,, +scipy/constants/__pycache__/_constants.cpython-310.pyc,, +scipy/constants/__pycache__/codata.cpython-310.pyc,, +scipy/constants/__pycache__/constants.cpython-310.pyc,, +scipy/constants/_codata.py,sha256=7zEUms7rnmSfBppsh38PNx6p2c-k0SVCcVpea3QZK5k,155898 +scipy/constants/_constants.py,sha256=V9mm4Dp35Vun9RjG_DGoVywCvE2N2mWk8XfeamBVgjM,10275 +scipy/constants/codata.py,sha256=F87N9rObCx8B3y_wcoPEzFWGhZmdXJ6B0Nll7IUEfv8,1015 +scipy/constants/constants.py,sha256=k8IODtGkknZ44clDFEihVparvjJFwEDG454V2of4BpQ,2477 +scipy/constants/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/constants/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/constants/tests/__pycache__/test_codata.cpython-310.pyc,, +scipy/constants/tests/__pycache__/test_constants.cpython-310.pyc,, +scipy/constants/tests/test_codata.py,sha256=ToO_lhQOsusJlP3QjrYqa1vw7x6wTCuKH17fg87tH08,1959 +scipy/constants/tests/test_constants.py,sha256=PY1oy6bbM2zoPAPgUeBqVThnVRuu4lBt_uMmxm7Ct38,1632 +scipy/datasets/__init__.py,sha256=lO6WMYM5CbayWGLjzgcJdZoxQHUYijYbfzyHxo9Bbt0,2816 +scipy/datasets/__pycache__/__init__.cpython-310.pyc,, +scipy/datasets/__pycache__/_download_all.cpython-310.pyc,, +scipy/datasets/__pycache__/_fetchers.cpython-310.pyc,, +scipy/datasets/__pycache__/_registry.cpython-310.pyc,, +scipy/datasets/__pycache__/_utils.cpython-310.pyc,, +scipy/datasets/_download_all.py,sha256=iRPR2IUk6C3B5u2q77yOhac449MRSoRaTlCy2oCIknE,1701 +scipy/datasets/_fetchers.py,sha256=Ef8RxSZkB0KIjmF-wFoW_QX8wbXHAgOzSAp1zFgE2QU,6759 +scipy/datasets/_registry.py,sha256=br0KfyalEbh5yrQLznQ_QvBtmN4rMsm0UxOjnsJp4OQ,1072 +scipy/datasets/_utils.py,sha256=0uGnuXK3KyLzUV4cGL76mGqQzrtEpSP8NAQxWuaw4cU,2914 +scipy/datasets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc,, +scipy/datasets/tests/test_data.py,sha256=GelFTF2yZqiiQkgTv8ukv8sKTJBdmpsyK5fr0G6z7Ls,4064 +scipy/fft/__init__.py,sha256=aihIkaW0Nr76Ct84OInhv-8AjbV8Z9ah44KiDEYEFSM,3567 +scipy/fft/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/__pycache__/_backend.cpython-310.pyc,, +scipy/fft/__pycache__/_basic.cpython-310.pyc,, +scipy/fft/__pycache__/_debug_backends.cpython-310.pyc,, +scipy/fft/__pycache__/_fftlog.cpython-310.pyc,, +scipy/fft/__pycache__/_fftlog_multimethods.cpython-310.pyc,, +scipy/fft/__pycache__/_helper.cpython-310.pyc,, +scipy/fft/__pycache__/_realtransforms.cpython-310.pyc,, +scipy/fft/_backend.py,sha256=7a3Gx0WLclcFFSeQA1RdARuC8QJ4oLpI3V9pqJvGQv4,6396 +scipy/fft/_basic.py,sha256=KXnf-LBgrD0XToSowdJ64Uvg5f0jyoinJV3-UwuklqA,62991 +scipy/fft/_debug_backends.py,sha256=RlvyunZNqaDDsI3-I6QH6GSBz_faT6EN4OONWsvMtR8,598 +scipy/fft/_fftlog.py,sha256=WVibBtg6iin0R-hBahHS-VnUxTlNZj3KTyL510D2MmE,11879 +scipy/fft/_fftlog_multimethods.py,sha256=wFwqCnjY_DH6_XVm6cQ4pIlu0LpCp76c5GeXWvOvTH0,575 +scipy/fft/_helper.py,sha256=L1WGWDlBmD0gTBpfyGWag7StqYI0KcQ5LMuNbVVDV3c,3416 +scipy/fft/_pocketfft/LICENSE.md,sha256=wlSytf0wrjyJ02ugYXMFY7l2D8oE8bdGobLDFX2ix4k,1498 +scipy/fft/_pocketfft/__init__.py,sha256=dROVDi9kRvkbSdynd3L09tp9_exzQ4QqG3xnNx78JeU,207 +scipy/fft/_pocketfft/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/_pocketfft/__pycache__/basic.cpython-310.pyc,, +scipy/fft/_pocketfft/__pycache__/helper.cpython-310.pyc,, +scipy/fft/_pocketfft/__pycache__/realtransforms.cpython-310.pyc,, +scipy/fft/_pocketfft/basic.py,sha256=Wn-qvc2r1lfrU1df-rBVjhq1m0kGssOGCQWrGB2etG0,9845 +scipy/fft/_pocketfft/helper.py,sha256=CWvGbhM_ZtVz1U1y1U0-kY16Mya4lesliJFfik5jCC8,5725 +scipy/fft/_pocketfft/pypocketfft.cpython-310-darwin.so,sha256=Uh-CUyqObhE3X9nVVNOSLa7SB8Ta7Zz4M5-alfUX_9w,788350 +scipy/fft/_pocketfft/realtransforms.py,sha256=zv9ABJnBOIthx-5-eXjve3SsR3i2TgMSx_IEaiBjNxQ,3379 +scipy/fft/_pocketfft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/fft/_pocketfft/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/_pocketfft/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/fft/_pocketfft/tests/__pycache__/test_real_transforms.cpython-310.pyc,, +scipy/fft/_pocketfft/tests/test_basic.py,sha256=zwwMq-1QTwIhQXC8AqKc1bO7lNaVddGc58Dd7RPalwo,35706 +scipy/fft/_pocketfft/tests/test_real_transforms.py,sha256=a4uH-yIEXgOzNeI7NSTnB6gCSxywBxJQ0M3ojQ1xl7c,16426 +scipy/fft/_realtransforms.py,sha256=y4PJZkRhuwnJTy4-J2U7SP-Soj69dtnYXSK073Ur06Y,25280 +scipy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/fft/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/fft/tests/__pycache__/mock_backend.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_backend.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_fft_function.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_fftlog.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_helper.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_multithreading.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_numpy.cpython-310.pyc,, +scipy/fft/tests/__pycache__/test_real_transforms.cpython-310.pyc,, +scipy/fft/tests/mock_backend.py,sha256=00ZsBjrauFbGgMKB9-vh-CBvJPLsRFPVDp015PbiWjk,1769 +scipy/fft/tests/test_backend.py,sha256=29ZzhDK9ySCXfqgazIgBfMtp1fUpQXl0xTS0IE-ccoc,4256 +scipy/fft/tests/test_fft_function.py,sha256=ZVK0wunPrwE-LkgQOxp3B4sgqcD6aLmyWcpytKvDBWE,1048 +scipy/fft/tests/test_fftlog.py,sha256=gcPRfbarV_rijIIbcU_oQuY2Y1J7s6CIShZqK8rxvQk,5819 +scipy/fft/tests/test_helper.py,sha256=STdMQCUMckqAcIIW6T1Wv2th3M8nfBvTKkPWbEfWQCE,9807 +scipy/fft/tests/test_multithreading.py,sha256=Ub0qD3_iSApPT9E71i0dvKnsKrctLiwMq95y3370POE,2132 +scipy/fft/tests/test_numpy.py,sha256=kI1Y5jjZdLXHhklOFHDtDN2FGq4xKmbl5e-lceK5Zhw,14432 +scipy/fft/tests/test_real_transforms.py,sha256=0zZDdJ0xVI1d7MwZEKg6iaoM0vc0Zm4kG_BpFlGqLbI,7592 +scipy/fftpack/__init__.py,sha256=QuxHBvLU1MJt2nMuZ7n9AuXoHVxhseHaOmxfb2G5JFU,3200 +scipy/fftpack/__pycache__/__init__.cpython-310.pyc,, +scipy/fftpack/__pycache__/_basic.cpython-310.pyc,, +scipy/fftpack/__pycache__/_helper.cpython-310.pyc,, +scipy/fftpack/__pycache__/_pseudo_diffs.cpython-310.pyc,, +scipy/fftpack/__pycache__/_realtransforms.cpython-310.pyc,, +scipy/fftpack/__pycache__/basic.cpython-310.pyc,, +scipy/fftpack/__pycache__/helper.cpython-310.pyc,, +scipy/fftpack/__pycache__/pseudo_diffs.cpython-310.pyc,, +scipy/fftpack/__pycache__/realtransforms.cpython-310.pyc,, +scipy/fftpack/_basic.py,sha256=Sk_gfswmWKb3za6wrU_mIrRVBl69qjzAu9ltznbDCKs,13098 +scipy/fftpack/_helper.py,sha256=6oIZ6ErA0Bt61s460_WjQfwmpENR0NnjNmPlO3ImhXo,3354 +scipy/fftpack/_pseudo_diffs.py,sha256=eCln0ZImNYr-wUWpOZ-SmKKIbhJsV8VBLmwT_C79RsQ,14200 +scipy/fftpack/_realtransforms.py,sha256=ledb21L13ofGnOU4pkx8uWuARCxsh3IFQrHctxTgzzw,19214 +scipy/fftpack/basic.py,sha256=DMX__JJaJK_FEPw5LhxVaiwqM8ive616PGZ1uzXBLNM,790 +scipy/fftpack/convolve.cpython-310-darwin.so,sha256=EWQFwh3d4aEix7IvqzqSyaWFUaafTqfR9DVpvYNyzww,208555 +scipy/fftpack/helper.py,sha256=RWzRMKNW8K5M2jHGRwWB7CtvYVEoWdP63LISGcGgMaI,795 +scipy/fftpack/pseudo_diffs.py,sha256=gWafKeFKkbnvaxQAtgj7Vzj_q60xwLR3ghZn3ttO3wU,901 +scipy/fftpack/realtransforms.py,sha256=79A6XfPab3kR0KN4XfkDrTzTZH41LQmW4AcMYYTnpyY,826 +scipy/fftpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/fftpack/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_helper.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_import.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_pseudo_diffs.cpython-310.pyc,, +scipy/fftpack/tests/__pycache__/test_real_transforms.cpython-310.pyc,, +scipy/fftpack/tests/fftw_double_ref.npz,sha256=pgxklBW2RSI5JNg0LMxcCXgByGkBKHo2nlP8kln17E4,162120 +scipy/fftpack/tests/fftw_longdouble_ref.npz,sha256=pAbL1NrQTQxZ3Tj1RBb7SUJMgiKcGgdLakTsDN4gAOM,296072 +scipy/fftpack/tests/fftw_single_ref.npz,sha256=J2qRQTGOb8NuSrb_VKYbZAVO-ISbZg8XNZ5fVBtDxSY,95144 +scipy/fftpack/tests/test.npz,sha256=Nt6ASiLY_eoFRZDOSd3zyFmDi32JGTxWs7y2YMv0N5c,11968 +scipy/fftpack/tests/test_basic.py,sha256=3dFa77VxVp-DeN6y1ofmfPfqsG6Qdx3INu4PHu-lBQ4,30373 +scipy/fftpack/tests/test_helper.py,sha256=8JaPSJOwsk5XXOf1zFahJ_ktUTfNGSk2-k3R6e420XI,1675 +scipy/fftpack/tests/test_import.py,sha256=X_rM3ncYPlL78G_xvHyYU48IBhy7YuG6mU4-uoo3978,1129 +scipy/fftpack/tests/test_pseudo_diffs.py,sha256=SEVPHPDdSxDSUCC8qkwuKD7mIX8rFIx9puxGzBYd1uk,13389 +scipy/fftpack/tests/test_real_transforms.py,sha256=YoN1b8ZhPbJTK0ww6U8ZxWXL52a1-HqiY45fN9LvQgI,23941 +scipy/integrate/__init__.py,sha256=83lQ1LAVQqQGGcVn-u5BecQN99D3pyRwpsg9BvGT_W4,4003 +scipy/integrate/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/__pycache__/_bvp.cpython-310.pyc,, +scipy/integrate/__pycache__/_ode.cpython-310.pyc,, +scipy/integrate/__pycache__/_odepack_py.cpython-310.pyc,, +scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc,, +scipy/integrate/__pycache__/_quadpack_py.cpython-310.pyc,, +scipy/integrate/__pycache__/_quadrature.cpython-310.pyc,, +scipy/integrate/__pycache__/dop.cpython-310.pyc,, +scipy/integrate/__pycache__/lsoda.cpython-310.pyc,, +scipy/integrate/__pycache__/odepack.cpython-310.pyc,, +scipy/integrate/__pycache__/quadpack.cpython-310.pyc,, +scipy/integrate/__pycache__/vode.cpython-310.pyc,, +scipy/integrate/_bvp.py,sha256=fOUu518B3bcRpitEQWNM2DfvMvszELAbJt2n8wQD5HE,41083 +scipy/integrate/_dop.cpython-310-darwin.so,sha256=nfDz8wpiHtTEaucHgBR93AzjTNR1ZCg8P6rEGoQYlKM,126000 +scipy/integrate/_ivp/__init__.py,sha256=gKFR_pPjr8fRLgAGY5sOzYKGUFu2nGX8x1RrXT-GZZc,256 +scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc,, +scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc,, +scipy/integrate/_ivp/base.py,sha256=lLM1Oj1nuUUY3m_YDZkecSjAY9ovcFCG9N14lkLzkys,9550 +scipy/integrate/_ivp/bdf.py,sha256=niJPOk99OdWyeqWnuzhW4dV_eo5wcBXrzXhY7aIK_SQ,17161 +scipy/integrate/_ivp/common.py,sha256=xFX5sqbjrNEvv7bjcOVknh48K1TeJ8aOqjvkZPoiXmo,14780 +scipy/integrate/_ivp/dop853_coefficients.py,sha256=OrYvW0Hu6X7sOh37FU58gNkgC77KVpYclewv_ARGMAE,7237 +scipy/integrate/_ivp/ivp.py,sha256=_CvCn29i1deNn-LNYftbJpZ-YMXNV9a92luZYlFvGVM,28282 +scipy/integrate/_ivp/lsoda.py,sha256=dIVlRXlOcNSvYCXKbT9YYS47cTe3i3QPHww38Y3bTYk,8298 +scipy/integrate/_ivp/radau.py,sha256=flnYZBLAW0-ZRdZTdhz0oZwj6wpqlU8L5Qwd8GEVPew,19383 +scipy/integrate/_ivp/rk.py,sha256=SlUEo9QM5WN205QAT1CAGUJlyhSFWxBq3aPaQX2wcCs,22244 +scipy/integrate/_ivp/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc,, +scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc,, +scipy/integrate/_ivp/tests/test_ivp.py,sha256=amcGZPZT8rWRTU8zvhbtfyX1BqjWcT4Bo-u1h__Suk4,34762 +scipy/integrate/_ivp/tests/test_rk.py,sha256=K9UxZghBzSL2BzmgLndPJcWOWV4Nr530TGKWakpsoeM,1326 +scipy/integrate/_lsoda.cpython-310-darwin.so,sha256=1cdq1L_l6rpmblDfSFEYYrWvdBJEJHK70akSnT2taJ0,127120 +scipy/integrate/_ode.py,sha256=bMLLW6Z2T_-kKpzDyZeoQvwcjhbyDStwvLNWn4kYIHY,47945 +scipy/integrate/_odepack.cpython-310-darwin.so,sha256=BB9F9idSg_MZw8DwJUYlnpqo6XE3FRBreqe1rFbi-xU,105808 +scipy/integrate/_odepack_py.py,sha256=aTlpXCI0qvXG0Z8ibHqiWyHTo2Q16RN8MyolfiMIVB8,10769 +scipy/integrate/_quad_vec.py,sha256=1Gu10Jyj6BKODMVlq5fQGKuJl-pJgdKGOTlAzo4nhnQ,21194 +scipy/integrate/_quadpack.cpython-310-darwin.so,sha256=GVmzTMOxVcpw4rQL824E1105M6s7Dp6YSQehQVQNN6Y,123632 +scipy/integrate/_quadpack_py.py,sha256=UB42Y4nPnIObv_nwznM3VT3Cl12S3UkMU0XjZfEX4Gg,52346 +scipy/integrate/_quadrature.py,sha256=lE3Lvo6sglaI41DzhQaT3K8dpGmdYm7rhTXRQKBK0BU,45913 +scipy/integrate/_test_multivariate.cpython-310-darwin.so,sha256=kc01VVc7Y9Uq8XkZYKmaP4-b8JRm59tsBSzqPG0iGiw,50693 +scipy/integrate/_test_odeint_banded.cpython-310-darwin.so,sha256=CxcILmKWbeQk0ARC0NxSeoUvjD_PiUQbHBJdkqgdNIg,127088 +scipy/integrate/_vode.cpython-310-darwin.so,sha256=jQ7EmKKxFjRhhIwRQltdufo3-lEfWk64VFT8kpCpQGQ,161680 +scipy/integrate/dop.py,sha256=yx0rG-U_s77y6_cRKkuIo27IFepKhow6VnXQmYHq6vk,622 +scipy/integrate/lsoda.py,sha256=I4nTMQz101vjwrDVjO1eR7mZjwP7CJW1P5aA_Qo3394,610 +scipy/integrate/odepack.py,sha256=bGHp-nnd-dVQHYxy_PogCiY8CODz6pok9adiUtgq7zI,771 +scipy/integrate/quadpack.py,sha256=fy0Vz51sZkG5Cjdp_EXGEDfHlFGjLdOqz8EtnXdMwSY,845 +scipy/integrate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc,, +scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc,, +scipy/integrate/tests/test__quad_vec.py,sha256=HzFkvaykp7RSen7XMBP3NKL9qMfl9FkSuGjG4T-va6M,6219 +scipy/integrate/tests/test_banded_ode_solvers.py,sha256=kJWirYckJ7k4tfweg1ds-Tozp3GEhxTbuXfgSdeJw7k,6687 +scipy/integrate/tests/test_bvp.py,sha256=xWrQWFzmrpt4sq9XOFrquxh4yp57ydJMJoQ7ST5qgSU,20159 +scipy/integrate/tests/test_integrate.py,sha256=y2kHglNbcpvE6oRdGCi9UjLMV-uz5OqzEGztuO3bMVY,24335 +scipy/integrate/tests/test_odeint_jac.py,sha256=VW63bDRP3uOg6uzm-3787qJl-UQ5Wsht3Ttc6YRybnE,1820 +scipy/integrate/tests/test_quadpack.py,sha256=8MRf70VLUdg7m5Y9G45m7Aq614ondwOjs4ZNvtFWzAA,27946 +scipy/integrate/tests/test_quadrature.py,sha256=NBf9Ok9wim8HOSF487sGkBNvUHsNFnAsT6gzCYe884Y,15317 +scipy/integrate/vode.py,sha256=xv-9AX3Yh1T0w-YoIPRrpQwavFTnoak81AsWiH_HsGA,625 +scipy/interpolate/__init__.py,sha256=zdsw-3YouJ38EFdizaPWGjsaq_G0nWRd2i5vVvHqNZs,3483 +scipy/interpolate/__pycache__/__init__.cpython-310.pyc,, +scipy/interpolate/__pycache__/_bsplines.cpython-310.pyc,, +scipy/interpolate/__pycache__/_cubic.cpython-310.pyc,, +scipy/interpolate/__pycache__/_fitpack2.cpython-310.pyc,, +scipy/interpolate/__pycache__/_fitpack_impl.cpython-310.pyc,, +scipy/interpolate/__pycache__/_fitpack_py.cpython-310.pyc,, +scipy/interpolate/__pycache__/_interpnd_info.cpython-310.pyc,, +scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc,, +scipy/interpolate/__pycache__/_ndgriddata.cpython-310.pyc,, +scipy/interpolate/__pycache__/_pade.cpython-310.pyc,, +scipy/interpolate/__pycache__/_polyint.cpython-310.pyc,, +scipy/interpolate/__pycache__/_rbf.cpython-310.pyc,, +scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc,, +scipy/interpolate/__pycache__/_rgi.cpython-310.pyc,, +scipy/interpolate/__pycache__/fitpack.cpython-310.pyc,, +scipy/interpolate/__pycache__/fitpack2.cpython-310.pyc,, +scipy/interpolate/__pycache__/interpolate.cpython-310.pyc,, +scipy/interpolate/__pycache__/ndgriddata.cpython-310.pyc,, +scipy/interpolate/__pycache__/polyint.cpython-310.pyc,, +scipy/interpolate/__pycache__/rbf.cpython-310.pyc,, +scipy/interpolate/_bspl.cpython-310-darwin.so,sha256=hUckd5kz90BSl2vEONBiPuz48os5KuaBBCpuulc_egE,290888 +scipy/interpolate/_bsplines.py,sha256=5Bbuvj9ICltuswGfT-SnfIJdItJk0HSZKilFYbU09qo,69165 +scipy/interpolate/_cubic.py,sha256=VK-OgwTY_qnhP2tXkj3CrusiiZrhbTGME5VJqdQOIRA,33771 +scipy/interpolate/_fitpack.cpython-310-darwin.so,sha256=ENTXD-AhaphLEMgxkA3Bo3M_g1V6JpaT82sz5l3P--Y,138576 +scipy/interpolate/_fitpack2.py,sha256=ke_ZIEQHfCxk8TVO-btWA8XDH1AYfzvCQGSM80xPnF0,81559 +scipy/interpolate/_fitpack_impl.py,sha256=qupBfm0JYWvQ5-A3d68tUkeRzOD4McOFjvVt2Pd8BVI,46808 +scipy/interpolate/_fitpack_py.py,sha256=qAPP7AIvJMdo3mqbL_BoJIpp5cbj6WJihqSixvPrX_Y,27540 +scipy/interpolate/_interpnd_info.py,sha256=B0E0S3ozMrYkGSJ_XTX_Qj6U9vle0U59i8dlqpTCd4g,869 +scipy/interpolate/_interpolate.py,sha256=clz2h9p7TU3FtkMgyK1mAX7bvqzz4Fe2o6GaVExDow4,87752 +scipy/interpolate/_ndgriddata.py,sha256=v8yn1kC3OkZf7hWij6lclUhAjSJ6UjS4T02Zrpxt88I,9087 +scipy/interpolate/_pade.py,sha256=OBorKWc3vCSGlsWrajoF1_7WeNd9QtdbX0wOHLdRI2A,1827 +scipy/interpolate/_polyint.py,sha256=cd1FXF4VGMe-vabBZ_PigaFrId8yRTrdQYf5vKuj2zY,25992 +scipy/interpolate/_ppoly.cpython-310-darwin.so,sha256=M5sGSDwlNNpr09AJGI2qihqHApWoPVLmUPMwrWEQiks,331241 +scipy/interpolate/_rbf.py,sha256=Ck3PXK7IK6Ac7kx5qY-DP64gLFKxVxjisW8y4PClH_I,11663 +scipy/interpolate/_rbfinterp.py,sha256=xcSiwrp4i46ZiRejBcfJYwFM45ZZRVnELYuRJwE-imk,19380 +scipy/interpolate/_rbfinterp_pythran.cpython-310-darwin.so,sha256=WqvZeJWnaWjqB3p_CG6Aa5pf2VzCadNgxl3-5WnyHPw,272789 +scipy/interpolate/_rgi.py,sha256=NaypNbXpDoD6EwioYCqzCYkTvaXWR0_nr-1wlNL9gGI,26925 +scipy/interpolate/_rgi_cython.cpython-310-darwin.so,sha256=vG0sXeIiOkjUFk1xTVz56lBLD9u342hgavmxgx_YT-s,228894 +scipy/interpolate/dfitpack.cpython-310-darwin.so,sha256=DwkYNNXdXH-KB0EompfFxjzA3sHoeTQdFTCLoLfNWiM,279264 +scipy/interpolate/fitpack.py,sha256=w__c8vjFPORQpgpmWIi6MN_PpsJGBeDPYxoxpkUOdRQ,948 +scipy/interpolate/fitpack2.py,sha256=ivHIjzk8VqI33aDkUo58-pl5eOmDrhInm8CMhgd0lJs,1195 +scipy/interpolate/interpnd.cpython-310-darwin.so,sha256=hucI-BTy1c3J2_R1pzxCLYXX6xZ4bTzmpK9vbQ_rJwc,340731 +scipy/interpolate/interpolate.py,sha256=iz2Yifiy51N7r3tgsAdiSt1swa7C4kQOkbZWPBp_9GM,1180 +scipy/interpolate/ndgriddata.py,sha256=AXW0AnerFGis7MyHWVvYBrnde7g5rBg3FeYV_NY-Xb0,912 +scipy/interpolate/polyint.py,sha256=24_OrluWJYXC0hIuLf6O7h3B0Te364bTPhqKXsV5N3M,941 +scipy/interpolate/rbf.py,sha256=X_dHEfyyCI_XSRmK1d1vnkkWwPlbx7kSHhORv9WByPk,818 +scipy/interpolate/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/interpolate/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_bsplines.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_fitpack.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_fitpack2.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_gil.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_interpnd.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_interpolate.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_ndgriddata.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_pade.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_polyint.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_rbf.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_rbfinterp.cpython-310.pyc,, +scipy/interpolate/tests/__pycache__/test_rgi.cpython-310.pyc,, +scipy/interpolate/tests/data/bug-1310.npz,sha256=jWgDwLOY8nBMI28dG56OXt4GvRZaCrsPIoKBq71FWuk,2648 +scipy/interpolate/tests/data/estimate_gradients_hang.npy,sha256=QGwQhXQX_16pjYzSiUXJ0OT1wk-SpIrQ6Pq5Vb8kd_E,35680 +scipy/interpolate/tests/data/gcvspl.npz,sha256=A86BVabLoMG_CiRBoQwigZH5Ft7DbLggcjQpgRKWu6g,3138 +scipy/interpolate/tests/test_bsplines.py,sha256=E9OaU0hxyAx73hGrBxX2Z33csh13dCCrFNgV8xnBq8E,59651 +scipy/interpolate/tests/test_fitpack.py,sha256=HV5vP86olfXEgl0uDAKsk-2vB_cTAcVnNMgt6M360Nw,14533 +scipy/interpolate/tests/test_fitpack2.py,sha256=JrAx37unJ8nhICmovLeSX7fGonVKB390WVkjcTBS44E,58477 +scipy/interpolate/tests/test_gil.py,sha256=wt92CaxUlVgRGB-Wl2EuQxveqdARU8rZucD9IKl-pUE,1874 +scipy/interpolate/tests/test_interpnd.py,sha256=RVc-0onUWODNoc_S3_21pAGDF5U0COyEXZqfLhGwPEw,13627 +scipy/interpolate/tests/test_interpolate.py,sha256=YsknISeWB2pd648RFCiNgwMw3JSH95Yy-zOS3H4ERnE,95734 +scipy/interpolate/tests/test_ndgriddata.py,sha256=lBMoAFWMMZcKLkZlvT2g4laNH3KsWIGpB3TQOaOvMjE,9445 +scipy/interpolate/tests/test_pade.py,sha256=x5VyACjEgqIsz5e5vIOoCaIVb-ToZsFw6baxLQjRFZQ,3786 +scipy/interpolate/tests/test_polyint.py,sha256=8HuqeZIBzmHxmcNUwMJdFYZn73GsLHsLnOKDDTYqSzU,30292 +scipy/interpolate/tests/test_rbf.py,sha256=d0RoNqSChlizopvy5x6vQxhiGkc-LekNbnCYFIN_Z1g,6547 +scipy/interpolate/tests/test_rbfinterp.py,sha256=5gnAZ2c8OFiT5AV_B160XzXNmVwNoMhoGX_iDt0OQLQ,18127 +scipy/interpolate/tests/test_rgi.py,sha256=wxVGOX8sVtpg2CS1wRuV7DL333U4Mj0VV6ncVwGBCkY,41234 +scipy/io/__init__.py,sha256=71Eyk9AluJQpbnqeJq1YWU_43nTUU4AW2lT6p_JPTw4,2746 +scipy/io/__pycache__/__init__.cpython-310.pyc,, +scipy/io/__pycache__/_fortran.cpython-310.pyc,, +scipy/io/__pycache__/_idl.cpython-310.pyc,, +scipy/io/__pycache__/_mmio.cpython-310.pyc,, +scipy/io/__pycache__/_netcdf.cpython-310.pyc,, +scipy/io/__pycache__/harwell_boeing.cpython-310.pyc,, +scipy/io/__pycache__/idl.cpython-310.pyc,, +scipy/io/__pycache__/mmio.cpython-310.pyc,, +scipy/io/__pycache__/netcdf.cpython-310.pyc,, +scipy/io/__pycache__/wavfile.cpython-310.pyc,, +scipy/io/_fortran.py,sha256=3Pa-LQ2iDECm1ADv_QtjhmMZwgR_WzUEmZdd_sM9lao,10903 +scipy/io/_harwell_boeing/__init__.py,sha256=2iVxlj6ZquU8_XPA37npOdeHCXe8XbQrmMZO7k6Bzxs,574 +scipy/io/_harwell_boeing/__pycache__/__init__.cpython-310.pyc,, +scipy/io/_harwell_boeing/__pycache__/_fortran_format_parser.cpython-310.pyc,, +scipy/io/_harwell_boeing/__pycache__/hb.cpython-310.pyc,, +scipy/io/_harwell_boeing/_fortran_format_parser.py,sha256=8F5psqkhiR1M4JzOWOGH1PoQAsZUjbctTlibFpXMAFA,8922 +scipy/io/_harwell_boeing/hb.py,sha256=Y1NLBc2yoADFy28_Vx2SzI1fyhoUF2sq84RFbwB6jUw,19167 +scipy/io/_harwell_boeing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/_harwell_boeing/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/_harwell_boeing/tests/__pycache__/test_fortran_format.cpython-310.pyc,, +scipy/io/_harwell_boeing/tests/__pycache__/test_hb.cpython-310.pyc,, +scipy/io/_harwell_boeing/tests/test_fortran_format.py,sha256=0LxOjUewBj1Fwf7EOxMWZG_PdzMbVrFYMUeGgs23VII,2360 +scipy/io/_harwell_boeing/tests/test_hb.py,sha256=3eLwxTSg_Ebt2pjBLvZhpq8WUMjkFhM1lsTu_mgvDTI,2284 +scipy/io/_idl.py,sha256=cTTwTYp-ukhGKr9vQZaohOAfNSEmvl2bKvfsxDCKUzM,26930 +scipy/io/_mmio.py,sha256=yFRIpdGC8lyRf2ZMvGPUX_scTpiUZDyok8dPr8CX3Qw,33165 +scipy/io/_netcdf.py,sha256=4j56RRusPvC3TAx4gKj927ab3LqWmCLcRk0aAWX3LxM,39085 +scipy/io/_test_fortran.cpython-310-darwin.so,sha256=8f_SU_Cs25WOT2TomV2Yre4TDoE5jwD0JKKteaw0xls,91856 +scipy/io/arff/__init__.py,sha256=czaV8hvY6JnmEn2qyU3_fzcy_P55aXVT09OzGnhJT9I,805 +scipy/io/arff/__pycache__/__init__.cpython-310.pyc,, +scipy/io/arff/__pycache__/_arffread.cpython-310.pyc,, +scipy/io/arff/__pycache__/arffread.cpython-310.pyc,, +scipy/io/arff/_arffread.py,sha256=eLnxbFfejH5pmbWQCo8aXExNL26F4jYzHk3X2_E2ynU,26373 +scipy/io/arff/arffread.py,sha256=2_W-Wt0drknNg734xtup-U1AeuqGMYKQUzCE3I3CW0k,1364 +scipy/io/arff/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/arff/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/arff/tests/__pycache__/test_arffread.cpython-310.pyc,, +scipy/io/arff/tests/data/iris.arff,sha256=fTS6VWSX6dwoM16mYoo30dvLoJChriDcLenHAy0ZkVM,7486 +scipy/io/arff/tests/data/missing.arff,sha256=ga__Te95i1Yf-yu2kmYDBVTz0xpSTemz7jS74_OfI4I,120 +scipy/io/arff/tests/data/nodata.arff,sha256=DBXdnIe28vrbf4C-ar7ZgeFIa0kGD4pDBJ4YP-z4QHQ,229 +scipy/io/arff/tests/data/quoted_nominal.arff,sha256=01mPSc-_OpcjXFy3EoIzKdHCmzWSag4oK1Ek2tUc6_U,286 +scipy/io/arff/tests/data/quoted_nominal_spaces.arff,sha256=bcMOl-E0I5uTT27E7bDTbW2mYOp9jS8Yrj0NfFjQdKU,292 +scipy/io/arff/tests/data/test1.arff,sha256=nUFDXUbV3sIkur55rL4qvvBdqUTbzSRrTiIPwmtmG8I,191 +scipy/io/arff/tests/data/test10.arff,sha256=va7cXiWX_AnHf-_yz25ychD8hOgf7-sEMJITGwQla30,199009 +scipy/io/arff/tests/data/test11.arff,sha256=G-cbOUUxuc3859vVkRDNjcLRSnUu8-T-Y8n0dSpvweo,241 +scipy/io/arff/tests/data/test2.arff,sha256=COGWCYV9peOGLqlYWhqG4ANT2UqlAtoVehbJLW6fxHw,300 +scipy/io/arff/tests/data/test3.arff,sha256=jUTWGaZbzoeGBneCmKu6V6RwsRPp9_0sJaSCdBg6tyI,72 +scipy/io/arff/tests/data/test4.arff,sha256=mtyuSFKUeiRR2o3mNlwvDCxWq4DsHEBHj_8IthNzp-M,238 +scipy/io/arff/tests/data/test5.arff,sha256=2Q_prOBCfM_ggsGRavlOaJ_qnWPFf2akFXJFz0NtTIE,365 +scipy/io/arff/tests/data/test6.arff,sha256=V8FNv-WUdurutFXKTOq8DADtNDrzfW65gyOlv-lquOU,195 +scipy/io/arff/tests/data/test7.arff,sha256=rxsqdev8WeqC_nKJNwetjVYXA1-qCzWmaHlMvSaVRGk,559 +scipy/io/arff/tests/data/test8.arff,sha256=c34srlkU8hkXYpdKXVozEutiPryR8bf_5qEmiGQBoG4,429 +scipy/io/arff/tests/data/test9.arff,sha256=ZuXQQzprgmTXxENW7we3wBJTpByBlpakrvRgG8n7fUk,311 +scipy/io/arff/tests/test_arffread.py,sha256=RaecyT864asNEteY0kiRZ2FqA_LcCMBL4zXh6htC0t8,13098 +scipy/io/harwell_boeing.py,sha256=Wdd3nB8n1bxmvbjYBUBa1_ZmWbsPaIy3AJBZt2JJQmQ,898 +scipy/io/idl.py,sha256=YhznpLgDnxrm9bwG7PP8zb6volg9oofEXYBTL86X7E0,794 +scipy/io/matlab/__init__.py,sha256=uBmpYjqjkLRikI2im0mk6SOa13aAuQpSqwHY79RsoSE,2022 +scipy/io/matlab/__pycache__/__init__.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_byteordercodes.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio4.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio5.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_mio5_params.cpython-310.pyc,, +scipy/io/matlab/__pycache__/_miobase.cpython-310.pyc,, +scipy/io/matlab/__pycache__/byteordercodes.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio4.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio5.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio5_params.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio5_utils.cpython-310.pyc,, +scipy/io/matlab/__pycache__/mio_utils.cpython-310.pyc,, +scipy/io/matlab/__pycache__/miobase.cpython-310.pyc,, +scipy/io/matlab/__pycache__/streams.cpython-310.pyc,, +scipy/io/matlab/_byteordercodes.py,sha256=8RmsRKpJt_48P6bWbQw4HmwwP6g6uZhmWM3DX_dIAok,1902 +scipy/io/matlab/_mio.py,sha256=Rr89q5cFlKwH4svZy_VzHXFZ48PlXjSzcP9TpAr0MA0,12799 +scipy/io/matlab/_mio4.py,sha256=9gZ9pV_Esuh63jDaFc1w7cH0zu6_V6ee24YKPJa3ryk,20612 +scipy/io/matlab/_mio5.py,sha256=zCXqiLNVRNFHNIsKDsOkGj2RS_0EQBSlOpuXgswT0jg,33426 +scipy/io/matlab/_mio5_params.py,sha256=2NBQ0IEVRQS5GQ7_AoKY3Dl_CqzaA3kltnw8-_D1tXU,8199 +scipy/io/matlab/_mio5_utils.cpython-310-darwin.so,sha256=j3HGVZYyWSExXgNbkqZGvEbhv4u2VzbZuCd52kEMrbQ,218430 +scipy/io/matlab/_mio_utils.cpython-310-darwin.so,sha256=v4dPYSipx5Xf50x0-4pp9R7uoMdwSlrsxt7YU6OjZS8,77869 +scipy/io/matlab/_miobase.py,sha256=_3woLhBtEIy8ezRSNVK9RA65JWq9TmcxKgCP27A4vvY,12908 +scipy/io/matlab/_streams.cpython-310-darwin.so,sha256=fmOf7YSU7xqNGRcoJ0ccUwPmcGBVVs2KzMDV62Xdv3c,128075 +scipy/io/matlab/byteordercodes.py,sha256=SjReEJ2PzTMsU5fNeZ2m3i05uX6LiJ_GLsFi-PVKXyE,849 +scipy/io/matlab/mio.py,sha256=HQSGsh4b1F6KoHWV8uEdPIgu2nBjclubz0ZaE5mwup0,894 +scipy/io/matlab/mio4.py,sha256=gsMWD_lpymj7BLAh0dwVHXMervPkdLu_79PZtABjcCM,1201 +scipy/io/matlab/mio5.py,sha256=3dgxKJjhjooruN_ch9UxlAIN1_Re_to8I5v-x_PB7TE,1435 +scipy/io/matlab/mio5_params.py,sha256=g3Jk-weBAqKSwV9IqtB-cf0DkuYrKcxsO4cojGRFwPk,1526 +scipy/io/matlab/mio5_utils.py,sha256=K_ILFiIcD5EispmZtCidJJD69_ygB4OaFI6-fiiJ9oo,899 +scipy/io/matlab/mio_utils.py,sha256=jthSqDwKuvQaNYuKx-02atSoNiQ5PD9uAVzlyWZeRIo,786 +scipy/io/matlab/miobase.py,sha256=l6sTgtB3-CGjin4L_6Xbf-gnCOGFnBvh0yCEG3_U354,988 +scipy/io/matlab/streams.py,sha256=wgX5MSEUPdAhxK0DHw9iQsLzwnHU9GIrxIDm6JdWMGg,809 +scipy/io/matlab/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/matlab/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_byteordercodes.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio5_utils.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio_funcs.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_mio_utils.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_miobase.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_pathological.cpython-310.pyc,, +scipy/io/matlab/tests/__pycache__/test_streams.cpython-310.pyc,, +scipy/io/matlab/tests/data/bad_miuint32.mat,sha256=CVkYHp_U4jxYKRRHSuZ5fREop4tJjnZcQ02DKfObkRA,272 +scipy/io/matlab/tests/data/bad_miutf8_array_name.mat,sha256=V-jfVMkYyy8qRGcOIsNGcoO0GCgTxchrsQUBGBnfWHE,208 +scipy/io/matlab/tests/data/big_endian.mat,sha256=2ttpiaH2B6nmHnq-gsFeMvZ2ZSLOlpzt0IJiqBTcc8M,273 +scipy/io/matlab/tests/data/broken_utf8.mat,sha256=nm8aotRl6NIxlM3IgPegKR3EeevYZoJCrYpV4Sa1T5I,216 +scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat,sha256=X4dvE7K9DmGEF3D6I-48hC86W41jB54H7bD8KTXjtYA,276 +scipy/io/matlab/tests/data/corrupted_zlib_data.mat,sha256=DfE1YBH-pYw-dAaEeKA6wZcyKeo9GlEfrzZtql-fO_w,3451 +scipy/io/matlab/tests/data/japanese_utf8.txt,sha256=rgxiBH7xmEKF91ZkB3oMLrqABBXINEMHPXDKdZXNBEY,270 +scipy/io/matlab/tests/data/little_endian.mat,sha256=FQP_2MNod-FFF-JefN7ZxovQ6QLCdHQ0DPL_qBCP44Y,265 +scipy/io/matlab/tests/data/logical_sparse.mat,sha256=qujUUpYewaNsFKAwGpYS05z7kdUv9TQZTHV5_lWhRrs,208 +scipy/io/matlab/tests/data/malformed1.mat,sha256=DTuTr1-IzpLMBf8u5DPb3HXmw9xJo1aWfayA5S_3zUI,2208 +scipy/io/matlab/tests/data/miuint32_for_miint32.mat,sha256=romrBP_BS46Sl2-pKWsUnxYDad2wehyjq4wwLaVqums,272 +scipy/io/matlab/tests/data/miutf8_array_name.mat,sha256=Vo8JptFr-Kg2f2cEoDg8LtELSjVNyccdJY74WP_kqtc,208 +scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat,sha256=bvdmj6zDDUIpOfIP8J4Klo107RYCDd5VK5gtOYx3GsU,8168 +scipy/io/matlab/tests/data/one_by_zero_char.mat,sha256=Z3QdZjTlOojjUpS0cfBP4XfNQI3GTjqU0n_pnAzgQhU,184 +scipy/io/matlab/tests/data/parabola.mat,sha256=ENWuWX_uwo4Av16dIGOwnbMReAMrShDhalkq8QUI8Rg,729 +scipy/io/matlab/tests/data/single_empty_string.mat,sha256=4uTmX0oydTjmtnhxqi9SyPWCG2I24gj_5LarS80bPik,171 +scipy/io/matlab/tests/data/some_functions.mat,sha256=JA736oG3s8PPdKhdsYK-BndLUsGrJCJAIRBseSIEZtM,1397 +scipy/io/matlab/tests/data/sqr.mat,sha256=3DtGl_V4wABKCDQ0P3He5qfOzpUTC-mINdK73MKS7AM,679 +scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat,sha256=-odiBIQAbOLERg0Vg682QHGfs7C8MaA_gY77OWR8x78,232 +scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat,sha256=G5siwvZ-7Uv5KJ6h7AA3OHL6eiFsd8Lnjx4IcoByzCU,232 +scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat,sha256=EVj1wPnoyWGIdTpkSj3YAwqzTAm27eqZNxCaJAs3pwU,213 +scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat,sha256=S_Sd3sxorDd8tZ5CxD5_J8vXbfcksLWzhUQY5b82L9g,213 +scipy/io/matlab/tests/data/test_empty_struct.mat,sha256=WoC7g7TyXqNr2T0d5xE3IUq5PRzatE0mxXjqoHX5Xec,173 +scipy/io/matlab/tests/data/test_mat4_le_floats.mat,sha256=2xvn3Cg4039shJl62T-bH-VeVP_bKtwdqvGfIxv8FJ4,38 +scipy/io/matlab/tests/data/test_skip_variable.mat,sha256=pJLVpdrdEb-9SMZxaDu-uryShlIi90l5LfXhvpVipJ0,20225 +scipy/io/matlab/tests/data/testbool_8_WIN64.mat,sha256=_xBw_2oZA7u9Xs6GJItUpSIEV4jVdfdcwzmLNFWM6ow,185 +scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat,sha256=OWOBzNpWTyAHIcZABRytVMcABiRYgEoMyF9gDaIkFe4,536 +scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat,sha256=7111TN_sh1uMHmYx-bjd_v9uaAnWhJMhrQFAtAw6Nvk,536 +scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat,sha256=62p6LRW6PbM-Y16aUeGVhclTVqS5IxPUtsohe7MjrYo,283 +scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat,sha256=NkTA8UW98hIQ0t5hGx_leG-MzNroDelYwqx8MPnO63Q,283 +scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat,sha256=AeNaog8HUDCVrIuGICAXYu9SGDsvV6qeGjgvWHrVQho,568 +scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat,sha256=Gl4QA0yYwGxjiajjgWS939WVAM-W2ahNIm9wwMaT5oc,568 +scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat,sha256=CUGtkwIU9CBa0Slx13mbaM67_ec0p-unZdu8Z4YYM3c,228 +scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat,sha256=TeTk5yjl5j_bcnmIkpzuYHxGGQXNu-rK6xOsN4t6lX8,228 +scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat,sha256=WOwauWInSVUFBuOJ1Bo3spmUQ3UWUIlsIe4tYGlrU7o,176 +scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat,sha256=GpAEccizI8WvlrBPdvlKUv6uKbZOo_cjUK3WVVb2lo4,352 +scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat,sha256=3MEbf0zJdQGAO7x-pzFCup2QptfYJHQG59z0vVOdxl4,352 +scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat,sha256=VNHV2AIEkvPuhae1kKIqt5t8AMgUyr0L_CAp-ykLxt4,247 +scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat,sha256=8rWGf5bqY7_2mcd5w5gTYgMkXVePlLL8qT7lh8kApn0,247 +scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat,sha256=MzT7OYPEUXHYNPBrVkyKEaG5Cas2aOA0xvrO7l4YTrQ,103 +scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat,sha256=DpB-mVKx1gsjl-3IbxfxHNuzU5dnuku-MDQCA8kALVI,272 +scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat,sha256=4hY5VEubavNEv5KvcqQnd7MWWvFUzHXXpYIqUuUt-50,272 +scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat,sha256=N2QOOIXPyy0zPZZ_qY7xIDaodMGrTq3oXNBEHZEscw0,232 +scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat,sha256=TrkJ4Xx_dC9YrPdewlsOvYs_xag7gT3cN4HkDsJmT8I,232 +scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat,sha256=g96Vh9FpNhkiWKsRm4U6KqeKd1hNAEyYSD7IVzdzwsU,472 +scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat,sha256=2Zw-cMv-Mjbs2HkSl0ubmh_htFUEpkn7XVHG8iM32o0,472 +scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat,sha256=t5Ar8EgjZ7fkTUHIVpdXg-yYWo_MBaigMDJUGWEIrmU,218 +scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat,sha256=5PPvfOoL-_Q5ou_2nIzIrHgeaOZGFXGxAFdYzCQuwEQ,218 +scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat,sha256=ScTKftENe78imbMc0I5ouBlIMcEEmZgu8HVKWAMNr58,381 +scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat,sha256=ZoVbGk38_MCppZ0LRr6OE07HL8ZB4rHXgMj9LwUBgGg,4168 +scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat,sha256=14YMiKAN9JCPTqSDXxa58BK6Un7EM4hEoSGAUuwKWGQ,151 +scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat,sha256=ZdjNbcIE75V5Aht5EVBvJX26aabvNqbUH0Q9VBnxBS4,216 +scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat,sha256=OB82QgB6SwtsxT4t453OVSj-B777XrHGEGOMgMD1XGc,216 +scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat,sha256=-TYB0kREY7i7gt5x15fOYjXi410pXuDWUFxPYuMwywI,193 +scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat,sha256=l9psDc5K1bpxNeuFlyYIYauswLnOB6dTX6-jvelW0kU,193 +scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat,sha256=2914WYQajPc9-Guy3jDOLU3YkuE4OXC_63FUSDzJzX0,38 +scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat,sha256=2X2fZKomz0ktBvibj7jvHbEvt2HRA8D6hN9qA1IDicw,200 +scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat,sha256=i364SgUCLSYRjQsyygvY1ArjEaO5uLip3HyU-R7zaLo,200 +scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat,sha256=gtYNC9_TciYdq8X9IwyGEjiw2f1uCVTGgiOPFOiQbJc,184 +scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat,sha256=eXcoTM8vKuh4tQnl92lwdDaqssGB6G9boSHh3FOCkng,184 +scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat,sha256=Zhyu2KCsseSJ5NARdS00uwddCs4wmjcWNP2LJFns2-Q,240 +scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat,sha256=KI3H58BVj6k6MFsj8icSbjy_0Z-jOesWN5cafStLPG8,276 +scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat,sha256=Yr4YKCP27yMWlK5UOK3BAEOAyMr-m0yYGcj8v1tCx-I,276 +scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat,sha256=kzLxy_1o1HclPXWyA-SX5gl6LsG1ioHuN4eS6x5iZio,800 +scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat,sha256=dq_6_n0v7cUz9YziXn-gZFNc9xYtNxZ8exTsziWIM7s,672 +scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat,sha256=3z-boFw0SC5142YPOLo2JqdusPItVzjCFMhXAQNaQUQ,306 +scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat,sha256=5OwLTMgCBlxsDfiEUzlVjqcSbVQG-X5mIw5JfW3wQXA,306 +scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat,sha256=BCvppGhO19-j-vxAvbdsORIiyuJqzCuQog9Ao8V1lvA,40 +scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat,sha256=ThppTHGJFrUfal5tewS70DL00dSwk1otazuVdJrTioE,200 +scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat,sha256=SBfN6e7Vz1rAdi8HLguYXcHUHk1viaXTYccdEyhhob4,200 +scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat,sha256=m8W9GqvflfAsizkhgAfT0lLcxuegZIWCLNuHVX69Jac,184 +scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat,sha256=t9ObKZOLy3vufnER8TlvQcUkd_wmXbJSdQoG4f3rVKY,184 +scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat,sha256=5LX9sLH7Y6h_N_a1XRN2GuMgp_P7ECpPsXGDOypAJg0,194 +scipy/io/matlab/tests/data/testsimplecell.mat,sha256=Aoeh0PX2yiLDTwkxMEyZ_CNX2mJHZvyfuFJl817pA1c,220 +scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat,sha256=dFUcB1gunfWqexgR4YDZ_Ec0w0HffM1DUE1C5PVfDDc,223 +scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat,sha256=9Sgd_SPkGNim7ZL0xgD71qml3DK0yDHYC7VSNLNQEXA,280 +scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat,sha256=jp1ILNxLyV6XmCCGxAz529XoZ9dhCqGEO-ExPH70_Pg,328 +scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat,sha256=k8QuQ_4Zu7FWTzHjRnHCVZ9Yu5vwNP0WyNzu6TuiY-4,229 +scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat,sha256=QbZOCqIvnaK0XOH3kaSXBe-m_1_Rb33psq8E-WMSBTU,229 +scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat,sha256=QMVoBXVyl9RBGvAjLoiW85kAXYJ-hHprUMegEG69A5w,294 +scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat,sha256=WfEroAT5YF4HGAKq3jTJxlFrKaTCh3rwlSlKu__VjwA,304 +scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat,sha256=e0s6cyoKJeYMArdceHpnKDvtCVcw7XuB44OBDHpoa6U,400 +scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat,sha256=kgHcuq-deI2y8hfkGwlMOkW7lntexdPHfuz0ar6b3jo,241 +scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat,sha256=rYCaWNLXK7f_jjMc6_UvZz6ZDuMCuVRmJV5RyeXiDm8,241 +scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat,sha256=hnNV6GZazEeqTXuA9vcOUo4xam_UnKRYGYH9PUGTLv8,219 +scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat,sha256=cAhec51DlqIYfDXXGaumOE3Hqb3cFWM1UsUK3K_lDP8,375 +scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat,sha256=ciFzNGMO7gjYecony-E8vtOwBY4vXIUhyug6Euaz3Kg,288 +scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat,sha256=yrJrpLiwLvU_LI1D6rw1Pk1qJK1YlC7Cmw7lwyJVLtw,288 +scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat,sha256=zo7sh-8dMpGqhoNxLEnfz3Oc7RonxiY5j0B3lxk0e8o,224 +scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat,sha256=igL_CvtAcNEa1nxunDjQZY5wS0rJOlzsUkBiDreJssk,224 +scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat,sha256=pRldk-R0ig1k3ouvaR9oVtBwZsQcDW_b4RBEDYu1-Vk,156 +scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat,sha256=B9IdaSsyb0wxjyYyHOj_GDO0laAeWDEJhoEhC9xdm1E,232 +scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat,sha256=t4tKGJg2NEg_Ar5MkOjCoQb2hVL8Q_Jdh9FF4TPL_4g,232 +scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat,sha256=lpYkBZX8K-c4FO5z0P9DMfYc7Y-yzyg11J6m-19uYTU,203 +scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat,sha256=lG-c7U-5Bo8j8xZLpd0JAsMYwewT6cAw4eJCZH5xf6E,203 +scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat,sha256=3GJbA4O7LP57J6IYzmJqTPeSJrEaiNSk-rg7h0ANR1w,608 +scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat,sha256=fRbqAnzTeOU3dTQx7O24MfMVFr6pM5u594FRrPPkYJE,552 +scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat,sha256=mCtI_Yot08NazvWHvehOZbTV4bW_I4-D5jBgJ6T9EbI,314 +scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat,sha256=52qaF4HRCtPl1jE6ljbkEl2mofZVAPpmBxrm-J5OTTI,314 +scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat,sha256=vneCpWBwApBGfeKzdZcybyajxjR-ZYf64j0l08_hU84,528 +scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat,sha256=gqhRpSfNNB5SR9sCp-wWrvokr5VV_heGnvco6dmfOvY,472 +scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat,sha256=6VDU0mtTBEG0bBHqKP1p8xq846eMhSZ_WvBZv8MzE7M,246 +scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat,sha256=ejtyxeeX_W1a2rNrEUUiG9txPW8_UtSgt8IaDOxE2pg,246 +scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat,sha256=sbi0wUwOrbU-gBq3lyDwhAbvchdtOJkflOR_MU7uGKA,496 +scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat,sha256=uTkKtrYBTuz4kICVisEaG7V5C2nJDKjy92mPDswTLPE,416 +scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat,sha256=o4F2jOhYyNpJCo-BMg6v_ITZQvjenXfXHLq94e7iwRo,252 +scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat,sha256=CNXO12O6tedEuMG0jNma4qfbTgCswAbHwh49a3uE3Yk,252 +scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat,sha256=KV97FCW-1XZiXrwXJoZPbgyAht79oIFHa917W1KFLwE,357 +scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat,sha256=9-8xzACZleBkMjZnbr8t4Ncs9B6mbzrONDblPnteBPU,357 +scipy/io/matlab/tests/data/testvec_4_GLNX86.mat,sha256=GQzR3mBVS266_NBfrRC9X0dLgmeu8Jl4r4ZYMOrn1V0,93 +scipy/io/matlab/tests/test_byteordercodes.py,sha256=FCHBAxeQZlhvTXw-AO-ukwTWvpN7NzmncBEDJ1P4de4,938 +scipy/io/matlab/tests/test_mio.py,sha256=61iYn2ELM4i7YXQZzlLr-ifMetmOdHwVCs38kBW6YQY,43337 +scipy/io/matlab/tests/test_mio5_utils.py,sha256=4uKkvA7p6pc8ybktQGAdGZaNFzNT4yan0dyCs4ruC4A,5419 +scipy/io/matlab/tests/test_mio_funcs.py,sha256=fSDaeVPvCRBFzqjWtXR5xIv9UQ_yv6Y_Nl5D5u0HIGo,1392 +scipy/io/matlab/tests/test_mio_utils.py,sha256=GX85RuLqr2HxS5_f7ZgrxbhswJy2GPQQoQbiQYg0s14,1594 +scipy/io/matlab/tests/test_miobase.py,sha256=xH4ZOR_b25TJLyIGqYQdeSASpTi8j-oIkRcO4D-R4us,1464 +scipy/io/matlab/tests/test_pathological.py,sha256=qir2euvFJnsXQYDzg0xAy5VUsNqCJPvI19IOLPj2T0A,1060 +scipy/io/matlab/tests/test_streams.py,sha256=-Yf5bbmFQnEdyW_zmQstHdMBkn95RYVxCzg-Cfdg9Qs,7319 +scipy/io/mmio.py,sha256=TkHUGo7h8JCkFI5se5T_rSC3Wc_Ojkb-yLhp99cmV-M,779 +scipy/io/netcdf.py,sha256=A5jSFgdrJGZHgeoFHvLuEHMFi0ZYZt76eyOErVHy04Q,1080 +scipy/io/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/io/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_fortran.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_idl.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_mmio.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_netcdf.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_paths.cpython-310.pyc,, +scipy/io/tests/__pycache__/test_wavfile.cpython-310.pyc,, +scipy/io/tests/data/Transparent Busy.ani,sha256=vwoK3ysYo87-TwzvjerHjFjSPIGpw83jjiMDXcHPWjA,4362 +scipy/io/tests/data/array_float32_1d.sav,sha256=A_xXWkfS1sQCxP4ONezeEZvlKEXwZ1TPG2rCCFdmBNM,2628 +scipy/io/tests/data/array_float32_2d.sav,sha256=qJmN94pywXznXMHzt-L6DJgaIq_FfruVKJl_LMaI8UU,3192 +scipy/io/tests/data/array_float32_3d.sav,sha256=U7P6As7Nw6LdBY1pTOaW9C-O_NlXLXZwSgbT3H8Z8uk,13752 +scipy/io/tests/data/array_float32_4d.sav,sha256=Tl6erEw_Zq3dwVbVyPXRWqB83u_o4wkIVFOe3wQrSro,6616 +scipy/io/tests/data/array_float32_5d.sav,sha256=VmaBgCD854swYyLouDMHJf4LL6iUNgajEOQf0pUjHjg,7896 +scipy/io/tests/data/array_float32_6d.sav,sha256=lb7modI0OQDweJWbDxEV2OddffKgMgq1tvCy5EK6sOU,19416 +scipy/io/tests/data/array_float32_7d.sav,sha256=pqLWIoxev9sLCs9LLwxFlM4RCFwxHC4Q0dEEz578mpI,3288 +scipy/io/tests/data/array_float32_8d.sav,sha256=R8A004f9XLWvF6eKMNEqIrC6PGP1vLZr9sFqawqM8ZA,13656 +scipy/io/tests/data/array_float32_pointer_1d.sav,sha256=sV7qFNwHK-prG5vODa7m5HYK7HlH_lqdfsI5Y1RWDyg,2692 +scipy/io/tests/data/array_float32_pointer_2d.sav,sha256=b0brvK6xQeezoRuujmEcJNw2v6bfASLM3FSY9u5dMSg,3256 +scipy/io/tests/data/array_float32_pointer_3d.sav,sha256=a_Iyg1YjPBRh6B-N_n_BGIVjFje4K-EPibKV-bPbF7E,13816 +scipy/io/tests/data/array_float32_pointer_4d.sav,sha256=cXrkHHlPyoYstDL_OJ15-55sZOOeDNW2OJ3KWhBv-Kk,6680 +scipy/io/tests/data/array_float32_pointer_5d.sav,sha256=gRVAZ6jeqFZyIQI9JVBHed9Y0sjS-W4bLseb01rIcGs,7960 +scipy/io/tests/data/array_float32_pointer_6d.sav,sha256=9yic-CQiS0YR_ow2yUA2Nix0Nb_YCKMUsIgPhgcJT1c,19480 +scipy/io/tests/data/array_float32_pointer_7d.sav,sha256=Rp1s8RbW8eoEIRTqxba4opAyY0uhTuyy3YkwRlNspQU,3352 +scipy/io/tests/data/array_float32_pointer_8d.sav,sha256=Wk3Dd2ClAwWprXLKZon3blY7aMvMrJqz_NXzK0J5MFY,13720 +scipy/io/tests/data/example_1.nc,sha256=EkfC57dWXeljgXy5sidrJHJG12D1gmQUyPDK18WzlT4,1736 +scipy/io/tests/data/example_2.nc,sha256=wywMDspJ2QT431_sJUr_5DHqG3pt9VTvDJzfR9jeWCk,272 +scipy/io/tests/data/example_3_maskedvals.nc,sha256=P9N92jCJgKJo9VmNd7FeeJSvl4yUUFwBy6JpR4MeuME,1424 +scipy/io/tests/data/fortran-3x3d-2i.dat,sha256=oYCXgtY6qqIqLAhoh_46ob_RVQRcV4uu333pOiLKgRM,451 +scipy/io/tests/data/fortran-mixed.dat,sha256=zTi7RLEnyAat_DdC3iSEcSbyDtAu0aTKwUT-tExjasw,40 +scipy/io/tests/data/fortran-sf8-11x1x10.dat,sha256=KwaOrZOAe-wRhuxvmHIK-Wr59us40MmiA9QyWtIAUaA,888 +scipy/io/tests/data/fortran-sf8-15x10x22.dat,sha256=5ohvjjOUcIsGimSqDhpUUKwflyhVsfwKL5ElQe_SU0I,26408 +scipy/io/tests/data/fortran-sf8-1x1x1.dat,sha256=Djmoip8zn-UcxWGUPKV5wzKOYOf7pbU5L7HaR3BYlec,16 +scipy/io/tests/data/fortran-sf8-1x1x5.dat,sha256=Btgavm3w3c9md_5yFfq6Veo_5IK9KtlLF1JEPeHhZoU,48 +scipy/io/tests/data/fortran-sf8-1x1x7.dat,sha256=L0r9yAEMbfMwYQytzYsS45COqaVk-o_hi6zRY3yIiO4,64 +scipy/io/tests/data/fortran-sf8-1x3x5.dat,sha256=c2LTocHclwTIeaR1Pm3mVMyf5Pl_imfjIFwi4Lpv0Xs,128 +scipy/io/tests/data/fortran-si4-11x1x10.dat,sha256=OesvSIGsZjpKZlZsV74PNwy0Co0KH8-3gxL9-DWoa08,448 +scipy/io/tests/data/fortran-si4-15x10x22.dat,sha256=OJcKyw-GZmhHb8REXMsHDn7W5VP5bhmxgVPIAYG-Fj4,13208 +scipy/io/tests/data/fortran-si4-1x1x1.dat,sha256=1Lbx01wZPCOJHwg99MBDuc6QZKdMnccxNgICt4omfFM,12 +scipy/io/tests/data/fortran-si4-1x1x5.dat,sha256=L1St4yiHTA3v91JjnndYfUrdKfT1bWxckwnnrscEZXc,28 +scipy/io/tests/data/fortran-si4-1x1x7.dat,sha256=Dmqt-tD1v2DiPZkghGGZ9Ss-nJGfei-3yFXPO5Acpk4,36 +scipy/io/tests/data/fortran-si4-1x3x5.dat,sha256=3vl6q93m25jEcZVKD0CuKNHmhZwZKp-rv0tfHoPVP88,68 +scipy/io/tests/data/invalid_pointer.sav,sha256=JmgoISXC4r5fSmI5FqyapvmzQ4qpYLf-9N7_Et1p1HQ,1280 +scipy/io/tests/data/null_pointer.sav,sha256=P_3a_sU614F3InwM82jSMtWycSZkvqRn1apwd8XxbtE,2180 +scipy/io/tests/data/scalar_byte.sav,sha256=dNJbcE5OVDY_wHwN_UBUtfIRd13Oqu-RBEO74g5SsBA,2076 +scipy/io/tests/data/scalar_byte_descr.sav,sha256=DNTmDgDWOuzlQnrceER6YJ0NutUUwZ9tozVMBWQmuuY,2124 +scipy/io/tests/data/scalar_complex32.sav,sha256=NGd-EvmFZgt8Ko5MP3T_TLwyby6yS0BXM_OW8197hpU,2076 +scipy/io/tests/data/scalar_complex64.sav,sha256=gFBWtxuAajazupGFSbvlWUPDYK-JdWgZcEWih2-7IYU,2084 +scipy/io/tests/data/scalar_float32.sav,sha256=EwWQw2JTwq99CHVpDAh4R20R0jWaynXABaE2aTRmXrs,2072 +scipy/io/tests/data/scalar_float64.sav,sha256=iPcDlgF1t0HoabvNLWCbSiTPIa9rvVEbOGGmE_3Ilsk,2076 +scipy/io/tests/data/scalar_heap_pointer.sav,sha256=JXZbPmntXILsNOuLIKL8qdu8gDJekYrlN9DQxAWve0E,2204 +scipy/io/tests/data/scalar_int16.sav,sha256=kDBLbPYGo2pzmZDhyl8rlDv0l6TMEWLIoLtmgJXDMkk,2072 +scipy/io/tests/data/scalar_int32.sav,sha256=IzJwLvEoqWLO5JRaHp8qChfptlauU-ll3rb0TfDDM8Y,2072 +scipy/io/tests/data/scalar_int64.sav,sha256=-aSHQRiaE3wjAxINwuLX33_8qmWl4GUkTH45elTkA-8,2076 +scipy/io/tests/data/scalar_string.sav,sha256=AQ7iZ8dKk9QfnLdP9idKv1ojz0M_SwpL7XAUmbHodDQ,2124 +scipy/io/tests/data/scalar_uint16.sav,sha256=928fmxLsQM83ue4eUS3IEnsLSEzmHBklDA59JAUvGK8,2072 +scipy/io/tests/data/scalar_uint32.sav,sha256=X3RbPhS6_e-u-1S1gMyF7s9ys7oV6ZNwPrJqJ6zIJsk,2072 +scipy/io/tests/data/scalar_uint64.sav,sha256=ffVyS2oKn9PDtWjJdOjSRT2KZzy6Mscgd4u540MPHC4,2076 +scipy/io/tests/data/struct_arrays.sav,sha256=TzH-Gf0JgbP_OgeKYbV8ZbJXvWt1VetdUr6C_ziUlzg,2580 +scipy/io/tests/data/struct_arrays_byte_idl80.sav,sha256=oOmhTnmKlE60-JMJRRMv_zfFs4zqioMN8QA0ldlgQZo,1388 +scipy/io/tests/data/struct_arrays_replicated.sav,sha256=kXU8j9QI2Q8D22DVboH9fwwDQSLVvuWMJl3iIOhUAH8,2936 +scipy/io/tests/data/struct_arrays_replicated_3d.sav,sha256=s3ZUwhT6TfiVfk4AGBSyxYR4FRzo4sZQkTxFCJbIQMI,4608 +scipy/io/tests/data/struct_inherit.sav,sha256=4YajBZcIjqMQ4CI0lRUjXpYDY3rI5vzJJzOYpjWqOJk,2404 +scipy/io/tests/data/struct_pointer_arrays.sav,sha256=fkldO6-RO2uAN_AI9hM6SEaBPrBf8TfiodFGJpViaqg,2408 +scipy/io/tests/data/struct_pointer_arrays_replicated.sav,sha256=eKVerR0LoD9CuNlpwoBcn7BIdj3-8x56VNg--Qn7Hgc,2492 +scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav,sha256=vsqhGpn3YkZEYjQuI-GoX8Jg5Dv8A2uRtP0kzQkq4lg,2872 +scipy/io/tests/data/struct_pointers.sav,sha256=Zq6d5V9ZijpocxJpimrdFTQG827GADBkMB_-6AweDYI,2268 +scipy/io/tests/data/struct_pointers_replicated.sav,sha256=aIXPBIXTfPmd4IaLpYD5W_HUoIOdL5Y3Hj7WOeRM2sA,2304 +scipy/io/tests/data/struct_pointers_replicated_3d.sav,sha256=t1jhVXmhW6VotQMNZ0fv0sDO2pkN4EutGsx5No4VJQs,2456 +scipy/io/tests/data/struct_scalars.sav,sha256=LYICjERzGJ_VvYgtwJ_Up2svQTv8wBzNcVD3nsd_OPg,2316 +scipy/io/tests/data/struct_scalars_replicated.sav,sha256=lw3fC4kppi6BUWAd4n81h8_KgoUdiJl5UIt3CvJIuBs,2480 +scipy/io/tests/data/struct_scalars_replicated_3d.sav,sha256=xVAup6f1dSV_IsSwBQC3KVs0eLEZ6-o5EaZT9yUoDZI,3240 +scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav,sha256=gjv__ng9xH_sm34hyxCbCgO4AP--PZAfDOArH5omkjM,3586 +scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav,sha256=H0LLyv2lc2guzYGnx4DWXU6vB57JrRX-G9Dd4qGh0hM,3586 +scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav,sha256=KKz9SXv_R3gX_AVeED2vyhYnj4BvD1uyDiKpCT3ulZ0,17720 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav,sha256=YX1g8qdCOAG16vX9G6q4SsfCj2ZVk199jzDQ8S0zWYI,72 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav,sha256=bFrsRqw0QXmsaDtjD6TFP8hZ5jEYMyaCmt-ka_C6GNk,1024 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav,sha256=zMnhvZvrP4kyOWKVKfbBneyv03xvzgqXYhHNxsAxDJ4,13 +scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav,sha256=9qTCvpgdz3raecVN1ViggHPnQjBf47xmXod9iCDsEik,17720 +scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav,sha256=EqYBnEgTxTKvaTAtdA5HIl47CCFIje93y4hawR6Pyu0,7792 +scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav,sha256=hGYchxQFjrtvZCBo0ULi-xdZ8krqXcKdTl3NSUfqe8k,90 +scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav,sha256=h8CXsW5_ShKR197t_d-TUTlgDqOZ-7wK_EcVGucR-aY,74 +scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav,sha256=BoUCDct3GiY_JJV_HoghF3mzAebT18j02c-MOn19KxU,70 +scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav,sha256=R6EJshvQp5YVR4GB9u4Khn5HM1VMfJUj082i8tkBIJ8,1644 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav,sha256=t2Mgri3h6JLQDekrwIhDBOaG46OUzHynUz0pKbvOpNU,90 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav,sha256=yCv0uh-ux_skJsxeOjzog0YBk3ZQO_kw5HJHMqtVyI0,90 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav,sha256=oiMVsQV9-qGBz_ZwsfAkgA9BZXNjXbH4zxCGvvdT0RY,120 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav,sha256=e97XoPrPGJDIh8nO6mii__ViY5yVlmt4OnPQoDN1djs,134 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav,sha256=wbonKlzvzQ_bQYyBsj-GwnihZOhn0uxfKhL_nENCGNc,150 +scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav,sha256=Uu5QPQcbtnFlnxOd4zFGxpiTC4wgdp6JOoYJ2VMZIU0,164 +scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav,sha256=1F67h8tr2xz0C5K21T9y9gspcGA0qnSOzsl2vjArAMs,116 +scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav,sha256=TJvGU7GpgXdCrdrjzMlDtpieDMnDK-lWMMqlWjT23BY,89 +scipy/io/tests/data/various_compressed.sav,sha256=H-7pc-RCQx5y6_IbHk1hB6OfnhvuPyW6EJq4EwI9iMc,1015 +scipy/io/tests/test_fortran.py,sha256=2NZb7RoXsoH5pqh1WHCH6j0PTf4q_Lnee_vmgcmU1Xs,7572 +scipy/io/tests/test_idl.py,sha256=rQd2IH7BwOzS1X1sO6dlLax85_i3OWgjRGzZqJOyI2w,19874 +scipy/io/tests/test_mmio.py,sha256=oFKsQi8sGfj3KGPqOQ9Hz0scj-Ih0P7NZpqdN5OOHxg,26585 +scipy/io/tests/test_netcdf.py,sha256=5RMWHfw349f7Gjp-RLNTSxLpYaRlnxIjXHaa-z46M0g,19317 +scipy/io/tests/test_paths.py,sha256=3ewh_1yXujx3NIZ3deUjepFJgJDa5IHIugxupLDhHoU,3178 +scipy/io/tests/test_wavfile.py,sha256=UluHY_ZPAbAaot_5ykV2aArBmwMRlKhEdZHiTzj-JLc,15303 +scipy/io/wavfile.py,sha256=CXcu2wq38iAExx-bBeGHeYbStPxF9uhss3nA9lgyUow,26642 +scipy/linalg.pxd,sha256=M28Y_hLKRSlomUNFNm0LbL9lhYINd7mgo7maa_WiHmw,48 +scipy/linalg/__init__.py,sha256=rQCI6sMFRogHgCboA0NsCvLrnKFH0iEoDeZE72BHbS4,7604 +scipy/linalg/__pycache__/__init__.cpython-310.pyc,, +scipy/linalg/__pycache__/_basic.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_cholesky.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_cossin.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_ldl.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_polar.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_qz.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_schur.cpython-310.pyc,, +scipy/linalg/__pycache__/_decomp_svd.cpython-310.pyc,, +scipy/linalg/__pycache__/_expm_frechet.cpython-310.pyc,, +scipy/linalg/__pycache__/_flinalg_py.cpython-310.pyc,, +scipy/linalg/__pycache__/_interpolative_backend.cpython-310.pyc,, +scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc,, +scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc,, +scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc,, +scipy/linalg/__pycache__/_misc.cpython-310.pyc,, +scipy/linalg/__pycache__/_procrustes.cpython-310.pyc,, +scipy/linalg/__pycache__/_sketches.cpython-310.pyc,, +scipy/linalg/__pycache__/_solvers.cpython-310.pyc,, +scipy/linalg/__pycache__/_special_matrices.cpython-310.pyc,, +scipy/linalg/__pycache__/_testutils.cpython-310.pyc,, +scipy/linalg/__pycache__/basic.cpython-310.pyc,, +scipy/linalg/__pycache__/blas.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_cholesky.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_lu.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_qr.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_schur.cpython-310.pyc,, +scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc,, +scipy/linalg/__pycache__/flinalg.cpython-310.pyc,, +scipy/linalg/__pycache__/interpolative.cpython-310.pyc,, +scipy/linalg/__pycache__/lapack.cpython-310.pyc,, +scipy/linalg/__pycache__/matfuncs.cpython-310.pyc,, +scipy/linalg/__pycache__/misc.cpython-310.pyc,, +scipy/linalg/__pycache__/special_matrices.cpython-310.pyc,, +scipy/linalg/_basic.py,sha256=Sv96ztCBTc7u9hfO2Rq0nWUA3d0f1RhtX9z8YEVRVmE,64381 +scipy/linalg/_blas_subroutine_wrappers.f,sha256=pnqlE8yxj0Uh8HGug6v0JsD76QbNdRE-_5ErKUXAOxs,7757 +scipy/linalg/_blas_subroutines.h,sha256=iodn74tn1PwQFzOX-cbqOus6LjAx43ETe5YhndHhxs4,19068 +scipy/linalg/_cythonized_array_utils.cpython-310-darwin.so,sha256=kTQ6egcfye3bGd-L-gqcCA6PKix7vvH_ZbWqXT79WDU,460026 +scipy/linalg/_cythonized_array_utils.pxd,sha256=iFr-x1jLaNiZroQEErGwaMYZU09DI26lWf1h-cbhVMQ,861 +scipy/linalg/_cythonized_array_utils.pyi,sha256=7SHh1oIR-ET5or3WkUfCnwP2dGTpdaUwIsgGaQqrMYg,346 +scipy/linalg/_decomp.py,sha256=966DxNaiIrwXzsHI-zPDV0TO6znvTRvIibCtHPfIKqE,61405 +scipy/linalg/_decomp_cholesky.py,sha256=iCRl5kCijw__9VXbrE5Fdy_X1yUAAaP4vi1XBtZH9nA,11903 +scipy/linalg/_decomp_cossin.py,sha256=vtCgl-6E8AT3iwoBcMTMmVAhDvLyNXPOkg-xPRdmIsQ,9136 +scipy/linalg/_decomp_ldl.py,sha256=dUj9QPKS1o1jjKWNr37MAU3MGQE_00XyQdIIxR6lL_g,12516 +scipy/linalg/_decomp_lu.py,sha256=MUbpsSQpQtXve7y-yPnkhrVMaKSa7lhj4_LQmoKUn-c,6922 +scipy/linalg/_decomp_polar.py,sha256=arzJ40FP1-TFsRvXPCP1qdNTsT60lkBcKBHfhB2JxxY,3578 +scipy/linalg/_decomp_qr.py,sha256=E-ibpl1QdMu8HnllINd81j-FufmWCCbL35ImOoOiWAA,13727 +scipy/linalg/_decomp_qz.py,sha256=6lgUlMUdnWjJbyfzbgYaxzjFPixiyRSV-iJFpIBEmxc,16333 +scipy/linalg/_decomp_schur.py,sha256=_X7GVtoRpGB4BuBbBWUZWV0QYFpadrCl-q0CTocR3Mo,10272 +scipy/linalg/_decomp_svd.py,sha256=IqMNuweROqy1NYqj-2R3tuSf4JQbQyuI6T5bzmmFKIM,14907 +scipy/linalg/_decomp_update.cpython-310-darwin.so,sha256=-aP-iW80NI_7FzQoh11urXRmHK_d5PP3QVpbTM5MVco,273873 +scipy/linalg/_expm_frechet.py,sha256=gJQcBbSQ_Q6OORSvHNPokB5ahvXt9LC48zA5t3jftB8,12326 +scipy/linalg/_fblas.cpython-310-darwin.so,sha256=FQS7CG33SGlzbYWv-KLoHxG4VgbYfrg0v0lPqBMDvJk,564256 +scipy/linalg/_flapack.cpython-310-darwin.so,sha256=YXWupmpwncuWL76Qpw3LSb_bQWuJ7Ge3ofDupYDnUxk,1719648 +scipy/linalg/_flinalg.cpython-310-darwin.so,sha256=y6cgkk0xh7mf5EW8ENLJVPQd2WOiPFZYuU7wfTOL-A8,109296 +scipy/linalg/_flinalg_py.py,sha256=qSJJm0OCTEkswqZyyTjQ3-WalPnECw2jgDxSymdbaSM,1658 +scipy/linalg/_interpolative.cpython-310-darwin.so,sha256=yxlmxVgTRs5eKJVy-gU_p0y6KXC-PQmwQGGV0N7mUQo,378912 +scipy/linalg/_interpolative_backend.py,sha256=yycf_ceX0dgf7Usjvtaxmkm_cT-2jmEMBuWY6tJST2g,45192 +scipy/linalg/_lapack_subroutine_wrappers.f,sha256=lSvEytuOblN5KOmcHlyfj7MVfn5CyyTllZQAp7i_woM,34384 +scipy/linalg/_lapack_subroutines.h,sha256=WOzLcinUl8EqEGuYUMDwOvEal_sviBjztpLIrTp3eZc,246836 +scipy/linalg/_matfuncs.py,sha256=bvz-QzmziCuXBHqQoSEydk6RogSnesUy13toGJx6scI,25062 +scipy/linalg/_matfuncs_expm.cpython-310-darwin.so,sha256=tfmapOGU1IkJrQsE7bCx8qQCH3tAJ_jPH7sdEvIokRU,366929 +scipy/linalg/_matfuncs_expm.pyi,sha256=zpJd5n0J2JAlPSUKSumo46MN8X3hjJZcRMkD1msZKI0,194 +scipy/linalg/_matfuncs_inv_ssq.py,sha256=ddfyRK1AB1Toub3qnM9pyhfmmYwAiYadvddSrQeLkmM,28038 +scipy/linalg/_matfuncs_sqrtm.py,sha256=VnF1JNk4h7N9bfTF66ab6mKHGoFrDLcVDNrywzs1JSY,6637 +scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-darwin.so,sha256=dn5zJRQPU-50H5xtDBn3NvJqvmfUhzIBwAVSyq8tvvQ,209383 +scipy/linalg/_misc.py,sha256=3IPq-LIQcxV7ELbtcgZK8Ri60YWbhpN_y7UYe6BKEgA,6283 +scipy/linalg/_procrustes.py,sha256=bdr2I5Lcw68IPsupyRYdSC1_8-INJ2dlNtf34j73pwA,2745 +scipy/linalg/_sketches.py,sha256=n6PEJILrFpzWhdf-sKFgGN-0elEwqvBlI0Z3H54tk0c,6145 +scipy/linalg/_solve_toeplitz.cpython-310-darwin.so,sha256=C115lmElXcyZp35dWLtJ5Xd-37tZxBiLhZEmnstE8B4,231570 +scipy/linalg/_solvers.py,sha256=Q4sTmL0pn6_MlnAztXisB04dHgS3j500chK8AurpCvI,28379 +scipy/linalg/_special_matrices.py,sha256=VAGDaA1T36-YcxYb6ii73mNtwMYbyQF11eiIMlQRXG4,40052 +scipy/linalg/_testutils.py,sha256=Bbc3qTyBt3M0beyXqaqF8OI9fDeiZlXom6pL_DdWE8A,1732 +scipy/linalg/basic.py,sha256=Bm9qs8IDccT4i5ZYP148BRMRDXM5ltzS7acZ3gJwg6s,1026 +scipy/linalg/blas.py,sha256=9BFJUhgB6DkYFyfLyYMsUIm72icLVJPFm9h2e-b860M,11683 +scipy/linalg/cython_blas.cpython-310-darwin.so,sha256=iRbhuXkUbvHORUaNXUG6whHJ4EWHoH2n393fso43SEo,256288 +scipy/linalg/cython_blas.pxd,sha256=AlS8WsmUysG87D5T-hOuLiBsakmGMani_L8D_h_lzPs,14403 +scipy/linalg/cython_blas.pyx,sha256=nS4d8mvgs1Z8dMh3HMyY_wKYy2pAXxmG4omyFvrjo2I,63232 +scipy/linalg/cython_lapack.cpython-310-darwin.so,sha256=xi5S91EbrGY-Tdv4i7076iekSKPoQA5pJr-UGiGhXms,675328 +scipy/linalg/cython_lapack.pxd,sha256=P3BMEOCHBOopT4ijb3NtNXJMyYXBp_j5LiNnXAmGKZw,192579 +scipy/linalg/cython_lapack.pyx,sha256=Zkb13Sh2Yq98ul6EPQNPOdpcfk73No53Xge2JpSmtn4,688160 +scipy/linalg/decomp.py,sha256=2GO63DouH59OirrwuOdp1OsunrRrQqpUp_rh9cpzxAg,1057 +scipy/linalg/decomp_cholesky.py,sha256=GqKhTklJCBpycKC_hSTwMJ73t6HS4GdOItHbXzjLRb8,917 +scipy/linalg/decomp_lu.py,sha256=FS7TGi24g8Q33UBJagevgpbXB7i887Yk65VbKMLOF0U,856 +scipy/linalg/decomp_qr.py,sha256=wwe2u4Fn6IAOqO50_WTCQrwPKrhPzIpbxkqfPER4WpI,796 +scipy/linalg/decomp_schur.py,sha256=qTcDsoWVD4viq7eySdcIGaixEPuyvoLh0LESLzrzazI,882 +scipy/linalg/decomp_svd.py,sha256=KoBb6aFnHzFkHTi_DIbD5L3rhBFvwcZ6Tb5ROHayfhA,850 +scipy/linalg/flinalg.py,sha256=q4wlBcwHjfEYhUJ-qjI0FSpnb2-rjLPTqKFDu2fMGNw,677 +scipy/linalg/interpolative.py,sha256=tqgLvf-x0TeAX0Nd2ipyS8fEMcXXscHXkQAuaChNmCY,32091 +scipy/linalg/lapack.py,sha256=B-sC0kfbRO7zON2Iji4WlSBzuuRpuIbWJJjiROHOBRA,15626 +scipy/linalg/matfuncs.py,sha256=yj2Xh_u2Re1ytR3kuwK5L2o590JZANtEv_10Z92loa0,1098 +scipy/linalg/misc.py,sha256=YY_fRbg979uqbgyYrsjsDTnhkyLc1MjNjKNLDHBNeCs,799 +scipy/linalg/special_matrices.py,sha256=qaR-sECUZCHGI2G4tO4OOsC0kGs_C_AOW9YWnbBWCjo,1026 +scipy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_cython_lapack.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_cythonized_array_utils.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_cholesky.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_ldl.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_polar.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_decomp_update.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_lapack.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_matmul_toeplitz.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_procrustes.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_sketches.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_solve_toeplitz.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_solvers.cpython-310.pyc,, +scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc,, +scipy/linalg/tests/data/carex_15_data.npz,sha256=E_PhSRqHa79Z1-oQrSnB-bWZaiq5khbzHVv81lkBLB4,34462 +scipy/linalg/tests/data/carex_18_data.npz,sha256=Wfg5Rn8nUrffb7bUCUOW7dMqWSm3ZPf_oeZmZDHmysY,161487 +scipy/linalg/tests/data/carex_19_data.npz,sha256=OOj8ewQd8LI9flyhXq0aBl5kZ2Ee-ahIzH25P4Ct_Yc,34050 +scipy/linalg/tests/data/carex_20_data.npz,sha256=FOIi00pxGMcoShZ1xv7O7ne4TflRpca6Kl7p_zBU-h0,31231 +scipy/linalg/tests/data/carex_6_data.npz,sha256=GyoHNrVB6_XEubTADW2rKB5zyfuZE8biWBp4Gze2Avk,15878 +scipy/linalg/tests/data/gendare_20170120_data.npz,sha256=o9-rRR2dXCAkPg7YXNi2yWV2afuaD4O1vhZVhXg9VbU,2164 +scipy/linalg/tests/test_basic.py,sha256=WKjiUj-WFzjujT7eGvGAHdMjmhYX4cNG-OQgkIAjxYQ,64797 +scipy/linalg/tests/test_blas.py,sha256=o6BEfT7IQLvhciT3waCSZbTZCKoIWqf51QTemINUe14,40206 +scipy/linalg/tests/test_cython_blas.py,sha256=lj8hm4wptSgUVe5969QH0AsRLxAkHox36kd3y9WPksg,4217 +scipy/linalg/tests/test_cython_lapack.py,sha256=EDhd6pmXxX0U4xxl5buBGH2ZjHj-J7LGq6rw6CZKA0k,574 +scipy/linalg/tests/test_cythonized_array_utils.py,sha256=O1EKWxsYt6k1zMWjFlQhTndQVOhHsJlSm-bHfPMny1U,3840 +scipy/linalg/tests/test_decomp.py,sha256=g19D_YQ895IaiXdnBapptZM6twDQ5ZYNoSxCS-NAElA,108164 +scipy/linalg/tests/test_decomp_cholesky.py,sha256=O8kkqod4sj46DtNpeyuZrKQfMmJeU5sSRANXuUyP6PM,7265 +scipy/linalg/tests/test_decomp_cossin.py,sha256=5PF6FGd-WisBFeWLJqKmgbbIdWedJ-skZ9NevCM5x1k,5772 +scipy/linalg/tests/test_decomp_ldl.py,sha256=8bbKacJFfH_x7RJm6AhfS434c3Plu1xI7ElW_j0uLi4,4979 +scipy/linalg/tests/test_decomp_polar.py,sha256=5x5vz9rJE2U2nvo0kx6xMX5Z9OcnqxayPZvAd4dwsUQ,2646 +scipy/linalg/tests/test_decomp_update.py,sha256=U1333Q_d13QnUeiXcQkJsE_rBJq6olHXu-6K3nUmEHg,68486 +scipy/linalg/tests/test_fblas.py,sha256=TIdXGmuvQ_na6eMlq7k4UdCELNWWDa7VG4imiyrSC0I,18685 +scipy/linalg/tests/test_interpolative.py,sha256=cAx8lJhE9YH-mXgC-Ltf4xv4nDhq0m0jq65tRkIut1g,8956 +scipy/linalg/tests/test_lapack.py,sha256=LsvtBfQIDf_pWe_aa1J8CSfFkgMLsc1u7roYEPxI6A8,125052 +scipy/linalg/tests/test_matfuncs.py,sha256=SLToErp3Mj1ujgg9QY5JKATMtyMFvy_u0yLE1ZkMqm0,38696 +scipy/linalg/tests/test_matmul_toeplitz.py,sha256=Wd9T03zZRwX3M3ppkhYJiJbkWZ_xop4VKj57TjeozUs,3870 +scipy/linalg/tests/test_misc.py,sha256=HP9jfKohbJIaKVcBqov9hAOHYk5dZck497-V5DMHe6E,76 +scipy/linalg/tests/test_procrustes.py,sha256=WkNNarBf69izBmlOhu4-u0eWdzkSzYHQuDZh-w89fOU,6758 +scipy/linalg/tests/test_sketches.py,sha256=FVEcNV43JteZZU7GDdBjtl-_alYDimxnjgKvpmtzVsI,3960 +scipy/linalg/tests/test_solve_toeplitz.py,sha256=KuTAYh-8MRWjaHclgQuIaBBx8IBTGEzXgZnhM_gjWxo,4010 +scipy/linalg/tests/test_solvers.py,sha256=2OkwSyCRE8Z-K6UgNlNIR6n95935DA8GXsVUYw3K2kw,31074 +scipy/linalg/tests/test_special_matrices.py,sha256=9cRvhLdK5udWkN_WIYmJ5rawGIFV_vSIYwO0dSEB4AE,26710 +scipy/misc/__init__.py,sha256=CdX9k6HUYu_cqVF4l2X5h1eqd9xUCuKafO_0aIY5RNE,1726 +scipy/misc/__pycache__/__init__.cpython-310.pyc,, +scipy/misc/__pycache__/_common.cpython-310.pyc,, +scipy/misc/__pycache__/common.cpython-310.pyc,, +scipy/misc/__pycache__/doccer.cpython-310.pyc,, +scipy/misc/_common.py,sha256=ndBktpW3llbZYf6IwS3lT7wBZIqV7AZygw2m9UTqoTA,11120 +scipy/misc/ascent.dat,sha256=6KhJOUhEY6uAUa7cW0CqJiqzOpHWRYps0TxqHK1aAj0,527630 +scipy/misc/common.py,sha256=BM-V8TKsvDKG_EtDRE4iIw8Of1q4U6JOwl7WSj6-1GI,869 +scipy/misc/doccer.py,sha256=D-G2jEalH4nXXlDEfZ59Ao9aj1_1t2SIb5ZlW9NHONE,766 +scipy/misc/ecg.dat,sha256=8grTNl-5t_hF0OXEi2_mcIE3fuRmw6Igt_afNciVi68,119035 +scipy/misc/face.dat,sha256=nYsLTQgTE-K0hXSMdwRy5ale0XOBRog9hMcDBJPoKIY,1581821 +scipy/misc/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/misc/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/misc/tests/__pycache__/test_common.cpython-310.pyc,, +scipy/misc/tests/__pycache__/test_config.cpython-310.pyc,, +scipy/misc/tests/__pycache__/test_doccer.cpython-310.pyc,, +scipy/misc/tests/test_common.py,sha256=0h_qT7hwQnqx4Oc6ccvM-U79EkbXPq5LNlC3QSvR88M,833 +scipy/misc/tests/test_config.py,sha256=j1Ppp6DCZy9wMxTmBEGxq4MScvsQXTQk7268EnNnPFQ,1244 +scipy/misc/tests/test_doccer.py,sha256=V1B5Z-XfIQFiSyRNo3PXG-AQfToFmoQ1oOBGjxK2zmo,3738 +scipy/ndimage/__init__.py,sha256=WKSnd4UmzibmbEZV-Sw31c9u7qDOa6WDqB7KMVcRIOU,5155 +scipy/ndimage/__pycache__/__init__.cpython-310.pyc,, +scipy/ndimage/__pycache__/_filters.cpython-310.pyc,, +scipy/ndimage/__pycache__/_fourier.cpython-310.pyc,, +scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc,, +scipy/ndimage/__pycache__/_measurements.cpython-310.pyc,, +scipy/ndimage/__pycache__/_morphology.cpython-310.pyc,, +scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc,, +scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc,, +scipy/ndimage/__pycache__/filters.cpython-310.pyc,, +scipy/ndimage/__pycache__/fourier.cpython-310.pyc,, +scipy/ndimage/__pycache__/interpolation.cpython-310.pyc,, +scipy/ndimage/__pycache__/measurements.cpython-310.pyc,, +scipy/ndimage/__pycache__/morphology.cpython-310.pyc,, +scipy/ndimage/_ctest.cpython-310-darwin.so,sha256=jKqB8CqimxdJPdIZMp9Bq6wlh6z5Ya1JbLYtOdQsisc,51081 +scipy/ndimage/_cytest.cpython-310-darwin.so,sha256=HyLcwCbF6pg1kx5l80ejTSWy1S1NjRB8X3BAmT76_0Q,80266 +scipy/ndimage/_filters.py,sha256=R4A5u_fxFvH_6x_Y7Sj0yNtxlX-p9tXHrPE-USD9XDs,57442 +scipy/ndimage/_fourier.py,sha256=57ONJoo_8CmvhP5vCL7ijVvaK5U-gvp7LM0fL3YZ55o,11390 +scipy/ndimage/_interpolation.py,sha256=C1WSqoNdHJCy2DBGFAQWJnOYMB6cHG63xrteSatgovU,35437 +scipy/ndimage/_measurements.py,sha256=2gQhZgxNW_6gcU7NByEmE-KEbmvBCufIOE2rxRxy20Y,55935 +scipy/ndimage/_morphology.py,sha256=Vi9jTT-rIpw8Kvi3M1HNnNHqtHHyb9SOlKlK3eT72Y0,87519 +scipy/ndimage/_nd_image.cpython-310-darwin.so,sha256=VR4XaT5AbEnHvg9mSEeOMDLAD_88zg3J6TaAlwsX160,139548 +scipy/ndimage/_ni_docstrings.py,sha256=9DSB07qpihY6Gv_czcMN3BNzKNJ0rq9zISTtFIe3LPk,8516 +scipy/ndimage/_ni_label.cpython-310-darwin.so,sha256=q8lE0JtnEIXYQSpq0niCUK460FgCGp1gYtiH8JadqgQ,342956 +scipy/ndimage/_ni_support.py,sha256=Zcl8cNKmR-InX_Vp_0GWg11RvY3CenJCEfJxkANnlHM,3827 +scipy/ndimage/filters.py,sha256=aflHOtOL7ZL3EtpYctkPPW-iqJhH2pAhN4JENdgv4kI,1217 +scipy/ndimage/fourier.py,sha256=ftajFZrIBb9HBkLjDUT8PgdnrGuUCVskcca_FeswrFc,840 +scipy/ndimage/interpolation.py,sha256=56huW77Dwa3DizXKT87dd4Jpf7Qt0ygq5dYyeOFbzuM,933 +scipy/ndimage/measurements.py,sha256=9gi9FD20M8lPdLPkI8iA8VVdUZPbYKmjhlQfLxqfoPM,1015 +scipy/ndimage/morphology.py,sha256=tufpeFNW3Amik0BGaITG9NToqtZtR6ejFtx1s75MNQM,1188 +scipy/ndimage/tests/__init__.py,sha256=P1A2R3ZwnUUvIQA8ao8JuRbIc1xwUL8z2H7goUd5hFM,427 +scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc,, +scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc,, +scipy/ndimage/tests/data/label_inputs.txt,sha256=JPbEnncwUyhlAAv6grN8ysQW9w9M7ZSIn_NPopqU7z4,294 +scipy/ndimage/tests/data/label_results.txt,sha256=Cf2_l7FCWNjIkyi-XU1MaGzmLnf2J7NK2SZ_10O-8d0,4309 +scipy/ndimage/tests/data/label_strels.txt,sha256=AU2FUAg0WghfvnPDW6lhMB1kpNdfv3coCR8blcRNBJ8,252 +scipy/ndimage/tests/dots.png,sha256=sgtW-tx0ccBpTT6BSNniioPXlnusFr-IUglK_qOVBBQ,2114 +scipy/ndimage/tests/test_c_api.py,sha256=EPNsGMHzZHqd9jETd9Pw3gOQvo43On-jH5_4CJzf0S0,3476 +scipy/ndimage/tests/test_datatypes.py,sha256=UCYf_2mKXeZHxUsBRCAbadB1ojEnKimbuV499h0Jb7E,2742 +scipy/ndimage/tests/test_filters.py,sha256=tu9arp33w8c7WelWbcZerDmF6fYfQj5H8RnRDz7ofOc,84085 +scipy/ndimage/tests/test_fourier.py,sha256=5ykl99Q0o44pVqB4WUDdzmoMrEhjw0Fs9U9LemcqHAo,6668 +scipy/ndimage/tests/test_interpolation.py,sha256=6Y6hgQm6PPCKFLswWE1RCzK0HK8wgr9UvcbmQcGO2Jk,54798 +scipy/ndimage/tests/test_measurements.py,sha256=EQHm61KX66vOjQsm4TkAalYOi4PFRWdUCcNOKUhHM0I,47805 +scipy/ndimage/tests/test_morphology.py,sha256=r7PHBbFLn08tCqPvcViVsfv77TCuy7nStV-8Pg_BCM0,105591 +scipy/ndimage/tests/test_splines.py,sha256=KXQaTR1Odj45IQB4pfn8zWpWq26G2vPuFQxgc9qDYRk,2207 +scipy/odr/__init__.py,sha256=CErxMJ0yBfu_cvCoKJMu9WjqUaohLIqqf228Gm9XWJI,4325 +scipy/odr/__odrpack.cpython-310-darwin.so,sha256=GaqGXE68T4RwOSSAHuCaeRp192Ts7iNNyngJ-FJtOR0,223648 +scipy/odr/__pycache__/__init__.cpython-310.pyc,, +scipy/odr/__pycache__/_add_newdocs.cpython-310.pyc,, +scipy/odr/__pycache__/_models.cpython-310.pyc,, +scipy/odr/__pycache__/_odrpack.cpython-310.pyc,, +scipy/odr/__pycache__/models.cpython-310.pyc,, +scipy/odr/__pycache__/odrpack.cpython-310.pyc,, +scipy/odr/_add_newdocs.py,sha256=zX9DJ9c4fJX-6RU9xYZEJVxlO72wmNxV6_aTKSQjoGk,1090 +scipy/odr/_models.py,sha256=tfOLgqnV4LR3VKi7NAg1g1Jp_Zw8lG_PA5BHwU_pTH0,7800 +scipy/odr/_odrpack.py,sha256=B4cL2RIuMzPDpLOQ5ZqlvHF3qEUyQWBKifNY77pR_Wg,42071 +scipy/odr/models.py,sha256=EuQE3U_-9jUSMATZySrKiUXiB-WxgIBjI8kTrVHOSKw,793 +scipy/odr/odrpack.py,sha256=nWDtxoCtRhx35KJPu2-UgH7YYuI_RxlwG4VZJqS8Ngo,837 +scipy/odr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/odr/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/odr/tests/__pycache__/test_odr.cpython-310.pyc,, +scipy/odr/tests/test_odr.py,sha256=bupweXZsix6BVprTAQmSMXoH-sVteK6fQjsPZGu0A2o,19779 +scipy/optimize.pxd,sha256=kFYBK9tveJXql1KXuOkKGvj4Fu67GmuyRP5kMVkMbyk,39 +scipy/optimize/README,sha256=q7vAotiT7affj-8xYhiy0g9r0fQBE2caLUnvjqjgSv4,3416 +scipy/optimize/__init__.py,sha256=pEIFKGBrADiF4VTbWCPjwz9pgK9ORKlTSr8XE4E2z7M,12794 +scipy/optimize/__nnls.cpython-310-darwin.so,sha256=am_FZVcFIjn-77kLwCJkIoNo1BlpGgLETKtu5E9drOU,90624 +scipy/optimize/__nnls.pyi,sha256=fyGifwzaKwkeNdCWuW7au4oghq-H3pvS3Utng4UpBrE,448 +scipy/optimize/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/__pycache__/_basinhopping.cpython-310.pyc,, +scipy/optimize/__pycache__/_cobyla_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_constraints.cpython-310.pyc,, +scipy/optimize/__pycache__/_differentiable_functions.cpython-310.pyc,, +scipy/optimize/__pycache__/_differentialevolution.cpython-310.pyc,, +scipy/optimize/__pycache__/_direct_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_dual_annealing.cpython-310.pyc,, +scipy/optimize/__pycache__/_hessian_update_strategy.cpython-310.pyc,, +scipy/optimize/__pycache__/_lbfgsb_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_linesearch.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_doc.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_highs.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_ip.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_rs.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_simplex.cpython-310.pyc,, +scipy/optimize/__pycache__/_linprog_util.cpython-310.pyc,, +scipy/optimize/__pycache__/_milp.cpython-310.pyc,, +scipy/optimize/__pycache__/_minimize.cpython-310.pyc,, +scipy/optimize/__pycache__/_minpack_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_nnls.cpython-310.pyc,, +scipy/optimize/__pycache__/_nonlin.cpython-310.pyc,, +scipy/optimize/__pycache__/_numdiff.cpython-310.pyc,, +scipy/optimize/__pycache__/_optimize.cpython-310.pyc,, +scipy/optimize/__pycache__/_qap.cpython-310.pyc,, +scipy/optimize/__pycache__/_remove_redundancy.cpython-310.pyc,, +scipy/optimize/__pycache__/_root.cpython-310.pyc,, +scipy/optimize/__pycache__/_root_scalar.cpython-310.pyc,, +scipy/optimize/__pycache__/_shgo.cpython-310.pyc,, +scipy/optimize/__pycache__/_slsqp_py.cpython-310.pyc,, +scipy/optimize/__pycache__/_spectral.cpython-310.pyc,, +scipy/optimize/__pycache__/_tnc.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_dogleg.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_exact.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_krylov.cpython-310.pyc,, +scipy/optimize/__pycache__/_trustregion_ncg.cpython-310.pyc,, +scipy/optimize/__pycache__/_tstutils.cpython-310.pyc,, +scipy/optimize/__pycache__/_zeros_py.cpython-310.pyc,, +scipy/optimize/__pycache__/cobyla.cpython-310.pyc,, +scipy/optimize/__pycache__/lbfgsb.cpython-310.pyc,, +scipy/optimize/__pycache__/linesearch.cpython-310.pyc,, +scipy/optimize/__pycache__/minpack.cpython-310.pyc,, +scipy/optimize/__pycache__/minpack2.cpython-310.pyc,, +scipy/optimize/__pycache__/moduleTNC.cpython-310.pyc,, +scipy/optimize/__pycache__/nonlin.cpython-310.pyc,, +scipy/optimize/__pycache__/optimize.cpython-310.pyc,, +scipy/optimize/__pycache__/slsqp.cpython-310.pyc,, +scipy/optimize/__pycache__/tnc.cpython-310.pyc,, +scipy/optimize/__pycache__/zeros.cpython-310.pyc,, +scipy/optimize/_basinhopping.py,sha256=E3RK0RqvqOxak8cq1Jne2BaUrmvIM0nj4pu7UUR_rtA,29911 +scipy/optimize/_bglu_dense.cpython-310-darwin.so,sha256=YpKi-IdWfCxlHHG5ImreopJJUB1D4SBFtOhmAKlXW5A,297486 +scipy/optimize/_cobyla.cpython-310-darwin.so,sha256=o2d1_jYMVQrypymaYqM-VFSnJizNizityFPlsK6N4Ek,108784 +scipy/optimize/_cobyla_py.py,sha256=dKXCUT4O7WYhk3CQ_fgB7Sr7vu4Pn_OH9xi2wKHalTw,10184 +scipy/optimize/_constraints.py,sha256=BUAwllKXWzTkLYVZChS5fY2nkoDivquONZWHgsHukOA,22017 +scipy/optimize/_differentiable_functions.py,sha256=vpi8XCbBFAYgfA2DjSO7CfGWFIQvBFN-v-9g25vfbhk,22719 +scipy/optimize/_differentialevolution.py,sha256=AsGDj8nKndqhi0Yawzx_5CHnvnK9BN3ZWI90hD7NjHw,73494 +scipy/optimize/_direct.cpython-310-darwin.so,sha256=LxuO3BoNU6vsqfRRZcWc_pGTJ0_VF-yoYTc6iaLaSOw,69082 +scipy/optimize/_direct_py.py,sha256=UewdYnj8R9_6hkS1CeGSGEGNL5UL9KO3UX-xLOELLFw,11860 +scipy/optimize/_dual_annealing.py,sha256=5NEzcHEPi7VEeiu_PljsYP_qeOj6SZWIE84exskKmmk,30149 +scipy/optimize/_group_columns.cpython-310-darwin.so,sha256=zhr4IMGE6mk5tY5wpblVMy3iKZ1COm-rtM8Pw7-I6mY,94689 +scipy/optimize/_hessian_update_strategy.py,sha256=PJcNU7ERwtm_WTMzXZzZsBowDtWia4kHAEuvzbkD8fg,15830 +scipy/optimize/_highs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_highs/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_highs/_highs_constants.cpython-310-darwin.so,sha256=05EdwqZFFwM-86-_qDZNIGpI61Kg414kPawp-dEENH0,59699 +scipy/optimize/_highs/_highs_wrapper.cpython-310-darwin.so,sha256=Hct6NCbb7FSuhOx4LhP3GvGGjUDu9QF7F7deuj6SZac,2972033 +scipy/optimize/_highs/src/cython/HConst.pxd,sha256=3a2mEDSx55TBAUYOuptSQYQzYTAHgP9c3anJGBlB7DE,5537 +scipy/optimize/_highs/src/cython/Highs.pxd,sha256=YKQXbDfI6gSg0X8j0iQrkBjX3HTb6J2ZIoFguGLkao8,2123 +scipy/optimize/_highs/src/cython/HighsIO.pxd,sha256=K7KXJoGm4s_cWtW4tQNByhujVF7SMIjDi86jWILzNoA,731 +scipy/optimize/_highs/src/cython/HighsInfo.pxd,sha256=RUePUSonorGWf2DYHjvpUc-ZhVQI6LDiro5eZftZlOg,761 +scipy/optimize/_highs/src/cython/HighsLp.pxd,sha256=_bcpjF7o-rK8gguxtXOF30XfVUrLjhLal90mAHumwAs,1132 +scipy/optimize/_highs/src/cython/HighsLpUtils.pxd,sha256=be2KJVDOjTNYH8AfXqDdp7HPB0EiUWL8I1RGVqCeKz4,315 +scipy/optimize/_highs/src/cython/HighsModelUtils.pxd,sha256=apvVhKle97Az82zThiHvCSdV9Bk2AsoVgx3rQdAsU2o,361 +scipy/optimize/_highs/src/cython/HighsOptions.pxd,sha256=xwBux1AmfTUtvLa3PJSdxCrdxeD851Cf4_ktkw7BnjA,3186 +scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd,sha256=GyX_sgvBmUhzpDJBsex6LWeKesV1L3fbGdH166j6K68,287 +scipy/optimize/_highs/src/cython/HighsStatus.pxd,sha256=s4nC9ViGKPTRD4VvqbZ5dZohVCtDqQPze_5svik6GjQ,365 +scipy/optimize/_highs/src/cython/SimplexConst.pxd,sha256=I6RjfzaBahKI9Eerg52c5tYU1gS1ZA2DWfYfWefgWVE,5044 +scipy/optimize/_highs/src/cython/highs_c_api.pxd,sha256=ut0M5I_pG2D6K3fUHzU9WD67Y1VMZuQOKhLLjv0umqo,358 +scipy/optimize/_lbfgsb.cpython-310-darwin.so,sha256=kxXARF0Fd_CC74VbMU82pysuDEk5mnbY8Xhg0V-Nx-A,126304 +scipy/optimize/_lbfgsb_py.py,sha256=uCrUiUdc_o0eW6n2np9mjnRW5L4J30AAx-Dh7XAYeXo,18643 +scipy/optimize/_linesearch.py,sha256=-DVtW9UYL3Oumt7_DP8i8wVMmz9jcpUvxxiy-SB5wU8,26582 +scipy/optimize/_linprog.py,sha256=9RAOhcMbgZtRvrRexfrYF3uKt6K0-POxAk1rvpU4UYY,29443 +scipy/optimize/_linprog_doc.py,sha256=kErL9JPKJr2hNOFomI0LA7UhxuoB6jEEzgfiiC1UScc,61967 +scipy/optimize/_linprog_highs.py,sha256=loTGHnHF_jZmEvGxCKLh1cmes025_X-OllqEJN3Jnho,17464 +scipy/optimize/_linprog_ip.py,sha256=R761IX5jLYttiMvyc9rMw7XPIVzOGye_rzby0CV7gdo,45913 +scipy/optimize/_linprog_rs.py,sha256=JruGeJ2uPVQUxN_A5A_4WuRTQfx4Mz4xoH27HNgGcFI,23149 +scipy/optimize/_linprog_simplex.py,sha256=WIlD3TQ6iBkbIeG4cBAwJW1pknMqt_mKnjBTKoTdNsg,24727 +scipy/optimize/_linprog_util.py,sha256=4UxdfkqjnsQ4bQZa59bCaVrkIXC6vox5tj_zmbcfgg4,62528 +scipy/optimize/_lsap.cpython-310-darwin.so,sha256=85oF_Ohuy3JZ_dJ10p0pl2UqdPs1ZyBxpiNETV02WKU,53384 +scipy/optimize/_lsq/__init__.py,sha256=Yk4FSVEqe1h-qPqVX7XSkQNBYDtZO2veTmMAebCxhIQ,172 +scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/bvls.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc,, +scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc,, +scipy/optimize/_lsq/bvls.py,sha256=7u5B8LfUbv3ZRZ8DAZKuDTSNRfDEBmTsn25VZtMMsKk,5195 +scipy/optimize/_lsq/common.py,sha256=mzYK9KDZeFtIUeBHrjrp1Gbx3NeaALwQ6Wj9-33QGRg,20606 +scipy/optimize/_lsq/dogbox.py,sha256=97htRlr-Yt-u4Ob3ks7avAMdnjJsO83uHUMjMYrhyjc,11682 +scipy/optimize/_lsq/givens_elimination.cpython-310-darwin.so,sha256=2BgprQGV5Bwm1gpTP2g-q95YjXYH9Hs6nxrWeZsfCjw,166645 +scipy/optimize/_lsq/least_squares.py,sha256=pSNhQP063G8wwQUQEVqiivlzKLrIBDWEJ_5VoneKCzM,39531 +scipy/optimize/_lsq/lsq_linear.py,sha256=j0FZ0JXhN6S4KzozB4f7PBp0WxnmWV2QvoEWSPkuM2Y,14847 +scipy/optimize/_lsq/trf.py,sha256=W0gc6j9vLIqZvDMOhM5BxubrWsd5PQpdncUJB88ol44,19479 +scipy/optimize/_lsq/trf_linear.py,sha256=jIs7WviOu_8Kpb7sTln8W7YLgkcndv0eGIP15g_mC4g,7642 +scipy/optimize/_milp.py,sha256=moAQzWyIq_y_Sbobzjayc66boGvWXWGtUwpg66yuNXc,14923 +scipy/optimize/_minimize.py,sha256=o7qAlosm9jJmPEcciDN-KLfgBODUxHMjS-BRg9dD9nY,45960 +scipy/optimize/_minpack.cpython-310-darwin.so,sha256=AOHosNK5tk1NbX8kBvfvVescSQTjMCOs-MDe5cHQofU,87515 +scipy/optimize/_minpack2.cpython-310-darwin.so,sha256=2fifxNQfTNIRPQ7qZFtWCN-Pxd9tIgJ0qx0PmDsbEuc,72812 +scipy/optimize/_minpack_py.py,sha256=oVLlB8oNkSgCvm0fD7l76DaSKuymKms267qQ5wmru6M,38340 +scipy/optimize/_moduleTNC.cpython-310-darwin.so,sha256=seGBK8K_TBOM6Ys0tDKwyh0ghooJebzcOjLmWndIYIc,135933 +scipy/optimize/_nnls.py,sha256=LSyKaugiuKDUf0Lrsep8OUaPeuUPReD5y5BbUaLAx9s,2300 +scipy/optimize/_nonlin.py,sha256=eW9U5cb0gXNxSTieb5G4c5otm-K87pRH8DaIsMyYdu4,49030 +scipy/optimize/_numdiff.py,sha256=o29kYYYEMXuqfigeXNcKNohnJaIsqfk8sA3IWWgr58g,28281 +scipy/optimize/_optimize.py,sha256=FlRWwdPH4xfOBcj0QBooS-s52sNwwvlIiyAfpHztL64,139726 +scipy/optimize/_qap.py,sha256=UkIA7YMjoaw00Lj_tdZ4u9VjSPNOmMDINPMK9GTv3MM,27658 +scipy/optimize/_remove_redundancy.py,sha256=Z-bdCyBuanuMzEhKGR-6Rs03b4L9uK7dKfWIdW1LA0E,18767 +scipy/optimize/_root.py,sha256=_V6gD8vJNPKnT3qrEyC0YYSg08TSOA9qcPIvfNFh-iU,28276 +scipy/optimize/_root_scalar.py,sha256=SiFpEx-bmQN8z8ih3chfmbt-7red0-DtwKT7_VVaEq8,18465 +scipy/optimize/_shgo.py,sha256=ZK0VYBt7IOfi2fjM273HhypMHeti5KbiYvhfAzTjiRE,60887 +scipy/optimize/_shgo_lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_shgo_lib/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_shgo_lib/__pycache__/triangulation.cpython-310.pyc,, +scipy/optimize/_shgo_lib/triangulation.py,sha256=TpXFU0HDdYDxE52KakU27EkiK308kOxr7rYGabyIPuo,21439 +scipy/optimize/_slsqp.cpython-310-darwin.so,sha256=Av3ZRCQxwlFtn58DXR7J21O6pUGRfIdYA3O43REnhQY,89961 +scipy/optimize/_slsqp_py.py,sha256=VPQzFxE-L2lD-ghFTvVcm6wkPR54LiH4fb-8NUNWgvI,18767 +scipy/optimize/_spectral.py,sha256=felsd958JeNcy-mmGAw91MSt2AsuKVdMLf2h2_fmgvU,7923 +scipy/optimize/_tnc.py,sha256=Dp5GTXv81TsqnPXXGvYvSCgqLS9-52P16VJo6Dxh9nY,17304 +scipy/optimize/_trlib/__init__.py,sha256=cNGWE1VffijqhPtSaqwagtBJvjJK-XrJ6K80RURLd48,524 +scipy/optimize/_trlib/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_trlib/_trlib.cpython-310-darwin.so,sha256=ck5d4NNiEEqevg9aFkW-HwcysB1jfTlQuxHFtGSjTL8,303536 +scipy/optimize/_trustregion.py,sha256=35raAmEKyYGWxw6QciVxLKnIXhMqho8a7fW65beLPKQ,10651 +scipy/optimize/_trustregion_constr/__init__.py,sha256=c8J2wYGQZr9WpLIT4zE4MUgEj4YNbHEWYYYsFmxAeXI,180 +scipy/optimize/_trustregion_constr/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/canonical_constraint.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/equality_constrained_sqp.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/minimize_trustregion_constr.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/projections.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/qp_subproblem.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/report.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/__pycache__/tr_interior_point.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/canonical_constraint.py,sha256=690VxTb7JJ9RzGwa-LN2hASKlqQPmulyEDZA7I-XyLY,12538 +scipy/optimize/_trustregion_constr/equality_constrained_sqp.py,sha256=5NiEruWnhYL2zhhgZsuLMn-yb5NOFs_bX3sm5giG7I8,8592 +scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py,sha256=iqoRHz6J31UbAbLOZ_r39sA6bzA7KXMKN_yTCfsncLU,24890 +scipy/optimize/_trustregion_constr/projections.py,sha256=2V9GysEHMzuYcE93CpnK2Q5iwQQBIc1rbtOJJBIZUZQ,13105 +scipy/optimize/_trustregion_constr/qp_subproblem.py,sha256=EtAhRcEtSnGsEeEZ2HGEzm-7r0pnXMCgl9NemKWvdzg,22592 +scipy/optimize/_trustregion_constr/report.py,sha256=8Iyb1jm3xwUbAAny3KIDfo-YDaijPEvNqU-7GmB3_mQ,1858 +scipy/optimize/_trustregion_constr/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/_trustregion_constr/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_canonical_constraint.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_projections.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_qp_subproblem.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/__pycache__/test_report.cpython-310.pyc,, +scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py,sha256=zVPxZDa0WkG_tw9Fm_eo_JzsQ8rQrUJyQicq4J12Nd4,9869 +scipy/optimize/_trustregion_constr/tests/test_projections.py,sha256=P4GZxs_6RJnlb6OXJX-wnvFqzFeQAgs9qHvnHxjvD4o,8820 +scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py,sha256=vrP52LzzAA3D8T5fhVzQv9Eo-G9t3F8qfrNuq7XrzQM,27719 +scipy/optimize/_trustregion_constr/tests/test_report.py,sha256=M3e3flV1TB0g2_cUViF0Fd_VWb81kuApfT0aC9bmrvU,1088 +scipy/optimize/_trustregion_constr/tr_interior_point.py,sha256=fXuyoZ5WmIwce2EA-Gdld7S2YrM7usImXWBNk3DnURw,13802 +scipy/optimize/_trustregion_dogleg.py,sha256=HS783IZYHE-EEuF82c4rkFp9u3MNKUdCeynZ6ap8y8s,4389 +scipy/optimize/_trustregion_exact.py,sha256=VOk6Se5UrBGN3nqiRnSynQU_kftWItJLmw9uzJtw6Xw,15407 +scipy/optimize/_trustregion_krylov.py,sha256=KGdudJsoXXROXAc82aZ8ACojD3rimvyx5PYitbo4UzQ,3030 +scipy/optimize/_trustregion_ncg.py,sha256=y7b7QjFBfnB1wDtbwnvKD9DYpz7y7NqVrJ9RhNPcipw,4580 +scipy/optimize/_tstutils.py,sha256=Fk1jlD75jHH-5r9HeNCLRMm-aoYya2wkmwmdmKoAUGY,29475 +scipy/optimize/_zeros.cpython-310-darwin.so,sha256=6zkUxEGrZfbEcDkmS0e39uNtSpV1lMwOAtTRBZPhiMc,51481 +scipy/optimize/_zeros_py.py,sha256=cdMQEi1kF31zU8e5PEMfT5xuFFUVTqWQkMqtU88T_EA,51228 +scipy/optimize/cobyla.py,sha256=sJD7CvPLBZgAU1y0JsdB6BtPAJE1hBViTdAEtNyj0no,840 +scipy/optimize/cython_optimize.pxd,sha256=UQVKui1CYlTt1HS9ydLPLdgKNWH_-phR8fy4Rq2eEno,428 +scipy/optimize/cython_optimize/__init__.py,sha256=WZI65aSGqyqC3HJaxgmQuK6lljqdqTA4jw3WstIQ9HQ,4831 +scipy/optimize/cython_optimize/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/cython_optimize/_zeros.cpython-310-darwin.so,sha256=5fAwXtBdVWib2DKBSsTmQSfE_Z9qaGvBaYHsRrS17FA,102185 +scipy/optimize/cython_optimize/_zeros.pxd,sha256=wTtD2hT2XHUhSHem5glipOQNY67vExQxzxncdQPtbJ4,1194 +scipy/optimize/cython_optimize/c_zeros.pxd,sha256=9PVpBHg4R8ItYnwRX_lD_H7SHS_hSJzvtPY0E2edOHE,1109 +scipy/optimize/lbfgsb.py,sha256=9bkq6iN1Gx6yPu-VE0K7bIMm1vsDhoccQf9waNmc7vQ,929 +scipy/optimize/linesearch.py,sha256=oYmcsZxSYrEH5XDI_kIbeVywN-yVHZbsJuDaOmCndUQ,1007 +scipy/optimize/minpack.py,sha256=tjMKdQWY6z4mQQ5G7nwy8i4eXWJjPSqfqMvfIuQntqU,1277 +scipy/optimize/minpack2.py,sha256=oFSeWNLqI8ca-Aa0Kk5F0DMdNUjHdryvPLhtPo_k83o,769 +scipy/optimize/moduleTNC.py,sha256=E43jvlDbe0G4glHXWRC8GsrTdVLIaPxVMP90Ir6U6gU,746 +scipy/optimize/nonlin.py,sha256=9z4Q0LQ6mbuQBozfw98N9FgTvoOKeIPdDhf7nU7lOYY,1418 +scipy/optimize/optimize.py,sha256=tJkFXkADd5aHUyaIMIIMJMKHX5UzCjhXFH-SGsj8wtA,1524 +scipy/optimize/slsqp.py,sha256=b6vja9q2sl50Kmv-VxsMkj3bNcariO8IBL3Q1KRMhrc,1044 +scipy/optimize/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/optimize/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__basinhopping.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__differential_evolution.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__dual_annealing.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__linprog_clean_inputs.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__numdiff.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__remove_redundancy.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__root.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__shgo.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test__spectral.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_cobyla.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_constraint_conversion.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_constraints.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_cython_optimize.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_differentiable_functions.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_direct.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_hessian_update_strategy.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lbfgsb_hessinv.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lbfgsb_setulb.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_least_squares.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_linear_assignment.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_linesearch.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_linprog.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lsq_common.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_lsq_linear.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_milp.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_minimize_constrained.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_minpack.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_nnls.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_nonlin.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_optimize.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_quadratic_assignment.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_regression.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_slsqp.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_tnc.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_trustregion.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_trustregion_exact.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_trustregion_krylov.cpython-310.pyc,, +scipy/optimize/tests/__pycache__/test_zeros.cpython-310.pyc,, +scipy/optimize/tests/test__basinhopping.py,sha256=iERgf2vX2bhuw-7bj-5vOxA21ZZnj6pSEL9o_Y9-WUE,16708 +scipy/optimize/tests/test__differential_evolution.py,sha256=spTUBZRSW5WSidUCNY7XA57CmNorNqqCp5_Wwh-5YpI,61413 +scipy/optimize/tests/test__dual_annealing.py,sha256=LoyGUSDfpawnDHuqH7Lhqu1lLncU2DVR3BEYm_eChj0,14315 +scipy/optimize/tests/test__linprog_clean_inputs.py,sha256=MDCIrO33d3jVd6swt3Wi156x1wxWC-cxdjueqFRMJH8,11106 +scipy/optimize/tests/test__numdiff.py,sha256=d_ktsoY1KHb_xGAZB3Dgng-68nIeUjCFxJp7GYjZsDg,31338 +scipy/optimize/tests/test__remove_redundancy.py,sha256=y_JpKtO_0N1O2-Q9FE7fbewzn_cFdzBxPrrCvxtTBSM,7533 +scipy/optimize/tests/test__root.py,sha256=2whCM-nKElXilngI5EE42GuF9-z8oEzsNqqcqMtFa74,2613 +scipy/optimize/tests/test__shgo.py,sha256=PcdLagMromdRfr-BGrJBsC5hxORYwg1DSaBvid88XkU,29034 +scipy/optimize/tests/test__spectral.py,sha256=JR6rTBUdhOYdgOmRBCA6sJ4ks0JTBYZ7aI6u6aiVUag,6519 +scipy/optimize/tests/test_cobyla.py,sha256=bw-zvlr620bR4dXDUNmcrTQwyNFtUTNuVikSArb_qhk,4179 +scipy/optimize/tests/test_constraint_conversion.py,sha256=rKTA_E0KQEjGlsrDa5RltxtNhjCBQ72d0pInF8M2EDk,11746 +scipy/optimize/tests/test_constraints.py,sha256=dgSeg8h_0Y-hmlitXmVdznF6At-V9mdwLVzoLcUQD2E,8390 +scipy/optimize/tests/test_cython_optimize.py,sha256=n-HccBWoUmmBWq_OsNrAVnt4QrdssIYm4PWG29Ocias,2638 +scipy/optimize/tests/test_differentiable_functions.py,sha256=KoU2GotR94yJgb0Pf4pDgKrwNNDP0X_NSd7HbmiHLFw,26154 +scipy/optimize/tests/test_direct.py,sha256=dUfsmTx9phFmlwv93UYgjYBoHh-iuWUrdc_KBn7jGlY,13152 +scipy/optimize/tests/test_hessian_update_strategy.py,sha256=zwfXj6jxvihE0vXU5pBeOyERCpimSZu6gXSLcm-FRfo,10112 +scipy/optimize/tests/test_lbfgsb_hessinv.py,sha256=rpJbiCUfgJrjp-xVe4JiXjVNe6-l8-s8uPqzKROgmJQ,1137 +scipy/optimize/tests/test_lbfgsb_setulb.py,sha256=w1a-RPnLnZRyaKUK4tWgFks7eO1pEgEmcWH038oepcc,3172 +scipy/optimize/tests/test_least_squares.py,sha256=wSVHy0AVN_mey0ur9xVJtKVJacODvJMn1ojILkO_nno,31773 +scipy/optimize/tests/test_linear_assignment.py,sha256=84d4YHCf9RzjYDKUujQe2GbudkP8dtlSpZtMBwCf_Oc,4085 +scipy/optimize/tests/test_linesearch.py,sha256=vtWDWIdHFzvKQyZ5vSntIPOGfM7JlcMqzXFuLbJBs3k,10791 +scipy/optimize/tests/test_linprog.py,sha256=5jLH6V0FZxye7w2YWyPHEkNjw0eAcrPDQ02luYwtWH4,95566 +scipy/optimize/tests/test_lsq_common.py,sha256=alCLPPQB4mrxLIAo_rn7eg9xrCEH7DerNBozSimOQRA,9500 +scipy/optimize/tests/test_lsq_linear.py,sha256=jk1sneQrUahqIqTyq76ZIUtrdVfVX0y0Do09KYqWJR0,10342 +scipy/optimize/tests/test_milp.py,sha256=AwnZEpbRXLdjretaY-P0BRwK4YeUWDEZslnZOtDGXpk,13866 +scipy/optimize/tests/test_minimize_constrained.py,sha256=ph9e1hOPr3p-whDdEtIJC7fL-bXnJ_3rOuTa42-dRjU,25620 +scipy/optimize/tests/test_minpack.py,sha256=GW4DV_PKy9zbOUAQ_O_dbbarkB7SfSJHUCqsSNyt4-M,36498 +scipy/optimize/tests/test_nnls.py,sha256=VHlSu-AYWWgONgkRI7oGPXNzd5XuoLu4wRhp4dyAL9M,914 +scipy/optimize/tests/test_nonlin.py,sha256=6JUMvvGR9Pe4EX-b3nw28wF4t1FLGPLNltzOMjQI398,16949 +scipy/optimize/tests/test_optimize.py,sha256=kfsl-5X0Wc6pVtLLzaQXtnagmXZ22Rpw8IiGvxrrjm8,111180 +scipy/optimize/tests/test_quadratic_assignment.py,sha256=iZ6wJDGx4T0lDM2N5mjQhXwwTdIGGWrgCCGtLiMOC14,16309 +scipy/optimize/tests/test_regression.py,sha256=CSg8X-hq6-6jW8vki6aVfEFYRUGTWOg58silM1XNXbU,1077 +scipy/optimize/tests/test_slsqp.py,sha256=IRXdyEM9Yo1c6FYJhGDAptxExNq-BkTqJUJUVTX4J4E,23194 +scipy/optimize/tests/test_tnc.py,sha256=zMau_V2C9YuCVHdC2D3HahrPavjM2rhzUo1xyXWwbiI,13168 +scipy/optimize/tests/test_trustregion.py,sha256=HJtCc8Gdjznkzyn7Ei3XByBM_10pqv7VXgXBR9kCc8k,4701 +scipy/optimize/tests/test_trustregion_exact.py,sha256=lJ0RXXFvgqbDfWrjzm-6H1PNKHDLXPVEbDltbRHezNQ,12954 +scipy/optimize/tests/test_trustregion_krylov.py,sha256=K90fBdvxYKgsdl_lvopRf28nfcBN1CgrR-N2zjVXvhQ,6587 +scipy/optimize/tests/test_zeros.py,sha256=1w_yoE2wWU5hIuF1B03Tvfo4I1kNVC_zTdJbYyC5Adw,28439 +scipy/optimize/tnc.py,sha256=7HKQvI0end6nabnkAAtVcX8jMrvSCWi8CD-tBShfHkk,1148 +scipy/optimize/zeros.py,sha256=ybE9F-jqrlzpGrXW9DLGluOkWjPqlNJGmAyJyv0qIBY,1008 +scipy/signal/__init__.py,sha256=MhQZYpEurvBpEkA-xRAm-OiJ28zE2gX0T7aGP3KxEkw,15510 +scipy/signal/__pycache__/__init__.cpython-310.pyc,, +scipy/signal/__pycache__/_arraytools.cpython-310.pyc,, +scipy/signal/__pycache__/_bsplines.cpython-310.pyc,, +scipy/signal/__pycache__/_czt.cpython-310.pyc,, +scipy/signal/__pycache__/_filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc,, +scipy/signal/__pycache__/_ltisys.cpython-310.pyc,, +scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc,, +scipy/signal/__pycache__/_peak_finding.cpython-310.pyc,, +scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc,, +scipy/signal/__pycache__/_signaltools.cpython-310.pyc,, +scipy/signal/__pycache__/_spectral.cpython-310.pyc,, +scipy/signal/__pycache__/_spectral_py.cpython-310.pyc,, +scipy/signal/__pycache__/_upfirdn.cpython-310.pyc,, +scipy/signal/__pycache__/_waveforms.cpython-310.pyc,, +scipy/signal/__pycache__/_wavelets.cpython-310.pyc,, +scipy/signal/__pycache__/bsplines.cpython-310.pyc,, +scipy/signal/__pycache__/filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/fir_filter_design.cpython-310.pyc,, +scipy/signal/__pycache__/lti_conversion.cpython-310.pyc,, +scipy/signal/__pycache__/ltisys.cpython-310.pyc,, +scipy/signal/__pycache__/signaltools.cpython-310.pyc,, +scipy/signal/__pycache__/spectral.cpython-310.pyc,, +scipy/signal/__pycache__/spline.cpython-310.pyc,, +scipy/signal/__pycache__/waveforms.cpython-310.pyc,, +scipy/signal/__pycache__/wavelets.cpython-310.pyc,, +scipy/signal/_arraytools.py,sha256=qHqX1pgjguFawwag8J81ZEQMAa2J64FBUG7ihSGGBWQ,7489 +scipy/signal/_bsplines.py,sha256=LDC-JQ7swR5kD62AQLDaY-xf90pXJPj9XoSQqy1AIqc,19753 +scipy/signal/_czt.py,sha256=t5P1kRCM3iw3eCaL9hTgctMfQKezkqnjbghLjCkffQE,19445 +scipy/signal/_filter_design.py,sha256=qwdhF1P1758BPQmcKyB4JATJm_evMjNJrq-VYyvx4ls,185184 +scipy/signal/_fir_filter_design.py,sha256=xN4R3bVjEsqulO66Tw3Pi54fEjx8WMR_PYTYWehkB_w,49077 +scipy/signal/_lti_conversion.py,sha256=P3v4T4O01N2E5oQEQVwF9rae17YJNjWpAHxZFx3ivdA,16130 +scipy/signal/_ltisys.py,sha256=EX0uSOt2QwzqXCFJHip-mJxioTEqtIidQnL_ncuekLo,129028 +scipy/signal/_max_len_seq.py,sha256=FSOVHmSTQqBpUV3ThijyNdHYHNN7mwaTUjoDDn9m3eQ,5062 +scipy/signal/_max_len_seq_inner.cpython-310-darwin.so,sha256=wViRBV4unjvaS1THEWz41AiKOEphTBli0CyC9mcv7II,77285 +scipy/signal/_peak_finding.py,sha256=W2v4ZNjtXdRj-J6VS3KXRrr42ama-PhokFoKunAfDew,48807 +scipy/signal/_peak_finding_utils.cpython-310-darwin.so,sha256=J4sAJMfkBh-lvTSXHixfCKsm1uIsw_FVL4BI-iuiFFY,232070 +scipy/signal/_savitzky_golay.py,sha256=mnltOfknWRlNiZmNLLy-zKTCrw6nZSdJPEvpGi0kv8E,13417 +scipy/signal/_signaltools.py,sha256=oYLYGR2htJPrez1KBvB1GOyaWTMxrX8EuIGchRd_sbg,155686 +scipy/signal/_sigtools.cpython-310-darwin.so,sha256=cQwKULnu4oNOPKZMFvwcqYa_lj7XbotF3ksxti6iJb8,105084 +scipy/signal/_sosfilt.cpython-310-darwin.so,sha256=EI0hZz_LI876mNWXI20AHxuhve-VKjP0xI1THajL3FE,228379 +scipy/signal/_spectral.cpython-310-darwin.so,sha256=UkdZieNaX-EZ2tHNVFbP5OPTrgRyRy1Buwmx8Sre-8c,77532 +scipy/signal/_spectral.py,sha256=tWz_fFeYGjfkpQLNmTlKR7RVkOqUsG_jkjzzisLN_9M,1940 +scipy/signal/_spectral_py.py,sha256=tNcs0bR-6lO66mVp3KFxRRI_5QP4AnbvZ9Kk2h54VhM,76570 +scipy/signal/_spline.cpython-310-darwin.so,sha256=FfbKmlx6u89cVsO-eJ0MFVNuOGmyNLw37ZTe7qB1EHw,69562 +scipy/signal/_upfirdn.py,sha256=WsElY_Gj9RBlR8pMBqJmAU0Za-BR_Jy1SrTzKDJI5LE,7884 +scipy/signal/_upfirdn_apply.cpython-310-darwin.so,sha256=YK3TJiaxryO7rEyHoDHlswhZn1QEAXXvYZilXdBCjSw,288801 +scipy/signal/_waveforms.py,sha256=Bm5WOBhk1nXwK0A6yFVTY7tCCv6trdrUjje_xmM878Y,20523 +scipy/signal/_wavelets.py,sha256=98q-YzA3eGjhL-EF6s5EiYYd-lfB9TvyHW1t9ZjMtK0,14047 +scipy/signal/bsplines.py,sha256=RFFNZHHyfJ1WEbdgboPvqV_rif6ZpP2XcQY6yAZFMvs,1085 +scipy/signal/filter_design.py,sha256=eyU6-xkaC6gpGec_KU899jWj_J7NyHavnmH6ayjSNPE,1719 +scipy/signal/fir_filter_design.py,sha256=4YYa4xY42pwC_ROuj_GyuWRcV-qJk9-3mWGQJxNWha8,1003 +scipy/signal/lti_conversion.py,sha256=NLMKn39KEc0te0VpuQ8pi0ABrwq6T20JR9JQX_8K7NU,936 +scipy/signal/ltisys.py,sha256=6VEgR9mC1lrVrCiMUgSOnM39TxdpkKTb5Ouw9Xe0m0o,1470 +scipy/signal/signaltools.py,sha256=Ul0U0FNf6G3ifaxVu-nx66hW1JWy6bW_F2SxdNg-ME4,1401 +scipy/signal/spectral.py,sha256=AGqvyefESNmSpYkZFBKr2gu5dMvNergOOxxZjvunrL0,944 +scipy/signal/spline.py,sha256=iisoUmgbyuuEukQjBz99HM3SYao7j1ZsXXmtE-wo5cU,810 +scipy/signal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/signal/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/signal/tests/__pycache__/mpsig.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_array_tools.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_bsplines.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_cont2discrete.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_czt.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_dltisys.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_filter_design.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_fir_filter_design.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_ltisys.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_max_len_seq.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_peak_finding.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_result_type.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_savitzky_golay.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_signaltools.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_spectral.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_upfirdn.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_waveforms.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_wavelets.cpython-310.pyc,, +scipy/signal/tests/__pycache__/test_windows.cpython-310.pyc,, +scipy/signal/tests/mpsig.py,sha256=DHB3eHB0KYA-E0SBebKG36YLk-T5egbwwryne3RwIHM,3308 +scipy/signal/tests/test_array_tools.py,sha256=J9Mr5DtqmhiTReWvsk3YclL6Cnv32bDuklBnw2zprJY,3632 +scipy/signal/tests/test_bsplines.py,sha256=dyiJg6ggE-UXMJcq5DlnQ_DZ8RZzlbhZTxOGIaHnlOg,13220 +scipy/signal/tests/test_cont2discrete.py,sha256=yhR7o0D0CMeCsIIpuaGdZh_rfOP25mHYqRRM4UZDDGk,14821 +scipy/signal/tests/test_czt.py,sha256=3HxxWwOWIrIc0GC-K5h6f0NRjkLrWRA5OhoB5y0zbw0,6993 +scipy/signal/tests/test_dltisys.py,sha256=f4wDe0rF_FATRWHkHddbPDOsFGV-Kv2Unz8QeOUUs-k,21558 +scipy/signal/tests/test_filter_design.py,sha256=0HnfV_hptqQw5VRvUxx69mD8g_q0ATd1H9KCePnGN04,185321 +scipy/signal/tests/test_fir_filter_design.py,sha256=mG_6Bo1NHN9Gj2LAzGHwWKqlcVwYSMic6FojcSLiIC0,28932 +scipy/signal/tests/test_ltisys.py,sha256=1FzNFb7i-1XFBADecWAZpZe7bj5M36UGpOf3AVkVCqU,47325 +scipy/signal/tests/test_max_len_seq.py,sha256=X9oyCvW0Ny8hOAVX22HmKaMgi2oioe1cZWO3PTgPOgw,3106 +scipy/signal/tests/test_peak_finding.py,sha256=ckPd0IqoaRcdCg8yJ2TzXdU1kWZPIEHw0cLdEC_VIlI,33667 +scipy/signal/tests/test_result_type.py,sha256=25ha15iRfFZxy3nDODyOuvaWequyBpA42YNiiU43iAc,1627 +scipy/signal/tests/test_savitzky_golay.py,sha256=hMD2YqRw3WypwzVQlHwAwa3s6yJHiujXd_Ccspk1yNs,12424 +scipy/signal/tests/test_signaltools.py,sha256=GgATAUyIae-wbOPiu9kiPv9rZk_Tlk-Z4BaVwleYBBM,137105 +scipy/signal/tests/test_spectral.py,sha256=cfpub2uP_6wpdbv3Mpu4O4co04oaQh-XXU21kICKRw4,59276 +scipy/signal/tests/test_upfirdn.py,sha256=i3EjQKnwS6FRRRPPzwl1B_zWsQ20Dfa_6WUUYH8I3xM,11240 +scipy/signal/tests/test_waveforms.py,sha256=sTT0DeOER5U9h8Xp54VGvGlbtcxhp_wjGNQXw1yOaGM,11975 +scipy/signal/tests/test_wavelets.py,sha256=PWe19weLoxo_iyrCQ-49oxBVZRXvYh055147ykS7vU8,5947 +scipy/signal/tests/test_windows.py,sha256=xgbGYyBhjSvLoTZDveBSJ8z1oxclr_jL2fpOff4hI8U,41019 +scipy/signal/waveforms.py,sha256=hHOTVCfrIOMD95n5v_jET4nJVTpB68SyMhnSraPTPhQ,890 +scipy/signal/wavelets.py,sha256=Xkoj6JZqZKRb0CSB_BDQRclk-gMEJFhwqPY8PgRRk4U,828 +scipy/signal/windows/__init__.py,sha256=BUSXzc_D5Agp59RacDdG6EE9QjkXXtlcfQrTop_IJwo,2119 +scipy/signal/windows/__pycache__/__init__.cpython-310.pyc,, +scipy/signal/windows/__pycache__/_windows.cpython-310.pyc,, +scipy/signal/windows/__pycache__/windows.cpython-310.pyc,, +scipy/signal/windows/_windows.py,sha256=x5gqdgq7htP9vjd5F7kg-SjWnHkSnDu_ej4NAXFAQYA,83617 +scipy/signal/windows/windows.py,sha256=a08un2az27LnmEwYno88Wwo4-yQaCUK8DogsOAcZwlE,1117 +scipy/sparse/__init__.py,sha256=PEu7Ji674DDoqIM4ONi_zbRpGe0kj6-R6r3YGfPnIw0,8636 +scipy/sparse/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/__pycache__/_arrays.cpython-310.pyc,, +scipy/sparse/__pycache__/_base.cpython-310.pyc,, +scipy/sparse/__pycache__/_bsr.cpython-310.pyc,, +scipy/sparse/__pycache__/_compressed.cpython-310.pyc,, +scipy/sparse/__pycache__/_construct.cpython-310.pyc,, +scipy/sparse/__pycache__/_coo.cpython-310.pyc,, +scipy/sparse/__pycache__/_csc.cpython-310.pyc,, +scipy/sparse/__pycache__/_csr.cpython-310.pyc,, +scipy/sparse/__pycache__/_data.cpython-310.pyc,, +scipy/sparse/__pycache__/_dia.cpython-310.pyc,, +scipy/sparse/__pycache__/_dok.cpython-310.pyc,, +scipy/sparse/__pycache__/_extract.cpython-310.pyc,, +scipy/sparse/__pycache__/_index.cpython-310.pyc,, +scipy/sparse/__pycache__/_lil.cpython-310.pyc,, +scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc,, +scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc,, +scipy/sparse/__pycache__/_sputils.cpython-310.pyc,, +scipy/sparse/__pycache__/base.cpython-310.pyc,, +scipy/sparse/__pycache__/bsr.cpython-310.pyc,, +scipy/sparse/__pycache__/compressed.cpython-310.pyc,, +scipy/sparse/__pycache__/construct.cpython-310.pyc,, +scipy/sparse/__pycache__/coo.cpython-310.pyc,, +scipy/sparse/__pycache__/csc.cpython-310.pyc,, +scipy/sparse/__pycache__/csr.cpython-310.pyc,, +scipy/sparse/__pycache__/data.cpython-310.pyc,, +scipy/sparse/__pycache__/dia.cpython-310.pyc,, +scipy/sparse/__pycache__/dok.cpython-310.pyc,, +scipy/sparse/__pycache__/extract.cpython-310.pyc,, +scipy/sparse/__pycache__/lil.cpython-310.pyc,, +scipy/sparse/__pycache__/sparsetools.cpython-310.pyc,, +scipy/sparse/__pycache__/spfuncs.cpython-310.pyc,, +scipy/sparse/__pycache__/sputils.cpython-310.pyc,, +scipy/sparse/_arrays.py,sha256=eZvXoGNqBBA5UMzeXCqUepqi8lxAXKlrOzBEs8Y_K-U,2196 +scipy/sparse/_base.py,sha256=XQ3MgGcplmQHbfMspycFwEe6HC3fiIEHaZFBAVcLlcY,44784 +scipy/sparse/_bsr.py,sha256=_1bnxR-6fBW16De1D3zoo7fzUeUm-pyf-gHskcfPT3w,25249 +scipy/sparse/_compressed.py,sha256=pCgN3XaoPeBcv7pE68xrNPzNceaV14Lwnj-EE2g5Uuo,51120 +scipy/sparse/_construct.py,sha256=t62-ISypioEFe-5H5gnHcmOfuiPw3NhSr_mFDxHOQPU,30308 +scipy/sparse/_coo.py,sha256=F4wm0cX3dCyX40cltyfb0tFC7Yv0pcU36WkBrSmaRPs,22174 +scipy/sparse/_csc.py,sha256=LzCgbMhiVvSOkyRrpf9xjWFZwpJ8elyF4EsdzFBM42I,7925 +scipy/sparse/_csparsetools.cpython-310-darwin.so,sha256=hmUCQuo18-BtmPgd0IG8cj6xVMW2YoVfntd9eIt3SPs,530208 +scipy/sparse/_csr.py,sha256=e2HdbFHQUjhgRM5nEuyTQBoFhi58UGeJN5PuvNx628o,11683 +scipy/sparse/_data.py,sha256=gbK511_hPDo0cpSSCZ1pPMvqFQnc6QsbhTnsWq6Rz_g,12891 +scipy/sparse/_dia.py,sha256=ptNC4BNfdCnD5D7H15A0b1C6g3prCxac9mZGjVNbWhY,16047 +scipy/sparse/_dok.py,sha256=0Tx47NAnzrK1BTWZpKgGvRnaZSJaW152N1SRTM4CXXY,15903 +scipy/sparse/_extract.py,sha256=Pz2B8VAcBZod80FM7ssBhjs6QNMNlZOkjNg9pjgv36I,4648 +scipy/sparse/_index.py,sha256=t9wzlSuxpj64cpoe2VI-028DmIv0kHE2RziRy9PqhyE,12928 +scipy/sparse/_lil.py,sha256=qEqH5OaAbaUN_SrifgSXYrjrvieejhgxbwExxZ4KfoQ,18296 +scipy/sparse/_matrix_io.py,sha256=KMTc-Y1jlYK3HctQ6EwvfBBFBDqziqSIKWh1LQFGbgQ,5379 +scipy/sparse/_sparsetools.cpython-310-darwin.so,sha256=MYmmkRdHamE-7zUijOlLH-3RRPyAbObQnj28NehmDJg,3671071 +scipy/sparse/_spfuncs.py,sha256=G4a1nPdxRJIxG1f1rCiJiFYZK8IFmks_EvCXcv-Opd0,1981 +scipy/sparse/_sputils.py,sha256=mYCdgcYrRMAe_oN1-5_bKubQBVWktBEz4eI7Kc0_Kgw,13136 +scipy/sparse/base.py,sha256=qxhdhbOyBKTVIhO4s5kFAZEcm7QhKBzDHBzRYUCNO9I,1016 +scipy/sparse/bsr.py,sha256=Ci4qlM-aGqtmHJD3RZgAwO0iuA7ziSnrdyfCUhjP3RE,1058 +scipy/sparse/compressed.py,sha256=aWYREJ4E9KXgqmk5-yivQx88HMNcFNRABmtomE2vUOk,1286 +scipy/sparse/construct.py,sha256=RdZSkoRKiwLOMiJxAzNOMCTTK5ZyW_m7cFkBFOa6MLk,1158 +scipy/sparse/coo.py,sha256=lZhO4RChFbyJEt9AYjQ49JMiILNIwDIku2TuW2NtdJQ,1091 +scipy/sparse/csc.py,sha256=CltVpoEbj8MM4iyNqsyLv7y2K_PUM9gFLhMiXvH1-KU,838 +scipy/sparse/csgraph/__init__.py,sha256=UzgDvD2sNSRqbF7YNIeg9UgIuJdl3huUVfCogfiy3c0,7739 +scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc,, +scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc,, +scipy/sparse/csgraph/__pycache__/setup.cpython-310.pyc,, +scipy/sparse/csgraph/_flow.cpython-310-darwin.so,sha256=9gOMLmpMMFXuwQTyDyrFnRD2Ntoxh6Bc8TxGHKy6eK8,271928 +scipy/sparse/csgraph/_laplacian.py,sha256=6_f6BBt-WSj4aI5wYx4rQBKgx0qObiPyvmwZCm281_Y,17833 +scipy/sparse/csgraph/_matching.cpython-310-darwin.so,sha256=OssONQhZ2bWV4onASD0Fkwo9ubBMZVNO_gMYHTn6O94,269228 +scipy/sparse/csgraph/_min_spanning_tree.cpython-310-darwin.so,sha256=pujQMf0dfA_1M-33R0-wFCdG0roxIICANxCyhVI3J9o,189237 +scipy/sparse/csgraph/_reordering.cpython-310-darwin.so,sha256=hDRf7fD0MRcbVAolZdH_puBDBI8DKFTJIly0DBWT1e8,255582 +scipy/sparse/csgraph/_shortest_path.cpython-310-darwin.so,sha256=IfNgTBxgR39Ks0C05ats7STuL5YC-cfxXzWp6nmJBh8,423889 +scipy/sparse/csgraph/_tools.cpython-310-darwin.so,sha256=GwE0kXbd1lsDtaVJ7j6O2pt-KO5n4T5YZZc5KhCo01g,187961 +scipy/sparse/csgraph/_traversal.cpython-310-darwin.so,sha256=unNMBTkIaLE3MspXWtxCgI9N6OBu6him5pjfg9libmk,161725 +scipy/sparse/csgraph/_validation.py,sha256=QNT8OusAIavxH4C989ljtgnduh61H6RBzqk3xRIO8Ho,2327 +scipy/sparse/csgraph/setup.py,sha256=hAWJsFo4-YNix-AKUqEkUROyUfe7l4c7I9D-V5XOPQc,1099 +scipy/sparse/csgraph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc,, +scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc,, +scipy/sparse/csgraph/tests/test_connected_components.py,sha256=DKsvhuW2BgDvMAa-MJ4GlYvyIDIOVe58QjxUhQ5yfgQ,3199 +scipy/sparse/csgraph/tests/test_conversions.py,sha256=Y48qwFRsE4tTxFYS_Bn8ndCkAwe8n0rovbaVYppCy34,1855 +scipy/sparse/csgraph/tests/test_flow.py,sha256=AWq0Gah3IuyWrZ3Lr0NReS-NIoPWiD4_avmYCBHsDAQ,7620 +scipy/sparse/csgraph/tests/test_graph_laplacian.py,sha256=9U_z12K0Ctoh0RaPCsLccz-_LpFFhcN2HN-iB7NzlUk,10623 +scipy/sparse/csgraph/tests/test_matching.py,sha256=Fj82svwQgmWKC9Fis1Lb5F5bH30RuUG72dMj7Km5v20,8532 +scipy/sparse/csgraph/tests/test_reordering.py,sha256=by-44sshHL-yaYE23lDp1EqnG-72MRbExi_HYSMJEz8,2613 +scipy/sparse/csgraph/tests/test_shortest_path.py,sha256=RmRAk_RxMo3C9do0f01DsHSPyDUVEUZXuq4h6aALrDo,14441 +scipy/sparse/csgraph/tests/test_spanning_tree.py,sha256=uyOB_TB8E1O2JFDuB16_r3kw7fizTpKy5ce81AT9XP8,2115 +scipy/sparse/csgraph/tests/test_traversal.py,sha256=bdZc-7WE4SPhyL2SLUdsKC-B_DNmscl4Z5bO9zNrh6k,2325 +scipy/sparse/csr.py,sha256=cmPYY83pa6OwO19bquQiRi4BpVkUa-uHT5yFoCWROS4,887 +scipy/sparse/data.py,sha256=dOqfmIpX9TfoosFAbq18WfFWfz10ai1a9-yhDrgvocQ,811 +scipy/sparse/dia.py,sha256=UjBrPBeMEoIShw-qEEEK5pCLRHxJk2wu8Eztw5ohxXE,936 +scipy/sparse/dok.py,sha256=5zAGkQHTx7ZOKaPcunLitFoROb4q4gyH48bva-Bg13A,980 +scipy/sparse/extract.py,sha256=O-kmKGLk118RQfbUnvo_jFUd18bxgM32oww_5DSMTfI,781 +scipy/sparse/lil.py,sha256=mWSsX2-CEsJL1DkRIghTf9GnEE8jaJ-gXVQ8-MMNlK4,981 +scipy/sparse/linalg/__init__.py,sha256=UTXDqJ3GiSh9tU5dSO9GhUmY7xwh4R4zBzdKkTq0cQ8,3717 +scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc,, +scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/__init__.py,sha256=mB_3u89ASCCQA48XGBS3bwRj2agYvgTuIJ0tnLnJly0,1991 +scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/_add_newdocs.py,sha256=Sjol-MfXrIch0chc7T5TeCVxaJowfFqJnsBlGnX8DZ8,3795 +scipy/sparse/linalg/_dsolve/_superlu.cpython-310-darwin.so,sha256=pQZZ76xKU18yT8sr8PyMeqZucv9pZSiXfit1NB2tA7o,341008 +scipy/sparse/linalg/_dsolve/linsolve.py,sha256=7Tvfh8DDqPdL3EgPy-nIonoOUssal1_w5UXHZ0XPlO4,25223 +scipy/sparse/linalg/_dsolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc,, +scipy/sparse/linalg/_dsolve/tests/test_linsolve.py,sha256=5h-YDYQNsmQSN8Kz2IpwqymiXsB7my04nvykRZZF5k8,27279 +scipy/sparse/linalg/_eigen/__init__.py,sha256=SwNho3iWZu_lJvcdSomA5cQdcDU8gocKbmRnm6Bf9-0,460 +scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/_svds.py,sha256=yjiKIxB6fcTAjAAqjXIUlXUg3WSaVK5xPzYxenWXFeY,20730 +scipy/sparse/linalg/_eigen/_svds_doc.py,sha256=gogZuEa_0k6W1MN6xB74Uvo0nrWO0jiV0FhJ6GE3nNU,15525 +scipy/sparse/linalg/_eigen/arpack/COPYING,sha256=CSZWb59AYXjRIU-Mx5bhZrEhPdfAXgxbRhqLisnlC74,1892 +scipy/sparse/linalg/_eigen/arpack/__init__.py,sha256=zDxf9LokyPitn3_0d-PUXoBCh6tWK0eUSvsAj6nkXI0,562 +scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-darwin.so,sha256=wQdzTtO19dxKCRV52_3xtxl13BEkA7kiVlmSDp_K_Ag,441232 +scipy/sparse/linalg/_eigen/arpack/arpack.py,sha256=qdhRRo9QUq0Zi66m1JmhP8h5Onjxh5SOpUnnwefPuRY,67330 +scipy/sparse/linalg/_eigen/arpack/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py,sha256=NlBJrPyeqJ0eMfGUhAYIxxNRRzg8ECxDJR87b-vdXGI,23895 +scipy/sparse/linalg/_eigen/lobpcg/__init__.py,sha256=E5JEPRoVz-TaLrj_rPm5LP3jCwei4XD-RxbcxYwf5lM,420 +scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py,sha256=gri-uEW1qMWM7I3tNoDE2DQCnsw2O6xGxYLE8_jEo5A,37341 +scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py,sha256=in4XV7JPmi2jVeCYJKzp-nNLHsQ7MN-rmUNLDj96nPg,18876 +scipy/sparse/linalg/_eigen/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc,, +scipy/sparse/linalg/_eigen/tests/test_svds.py,sha256=qsfQiXGsiC3zNSycsv6bQfT_xV8K_ABpN3gG_pYEA6Q,37285 +scipy/sparse/linalg/_expm_multiply.py,sha256=UktXq-SBk_CKuakScGsvMAAjw8x5X2Wf_Gr_jTSPIWg,26145 +scipy/sparse/linalg/_interface.py,sha256=lYf3tJ3IEiVhaEtMxz0ZbSrJKI1GCKWCcgIBS2evOO8,25289 +scipy/sparse/linalg/_isolve/__init__.py,sha256=Z_eQUYbe6RWMSNi09T9TfPEWm8RsVxcIKYAlihM-U-c,479 +scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/__pycache__/utils.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/_gcrotmk.py,sha256=CDI2Qwt-FY4-aCMM8wFP56dN9l0GcJKPW1s519ij1k0,15957 +scipy/sparse/linalg/_isolve/_iterative.cpython-310-darwin.so,sha256=XS0WhTNrOD-_zRnusHzrPi5GenXvOQ5UHAUGkSiGnBw,259872 +scipy/sparse/linalg/_isolve/iterative.py,sha256=dPp6f9WwZJedXqyl-AAPMpPu1PcpZTCqxGjUB_-1fQ4,30410 +scipy/sparse/linalg/_isolve/lgmres.py,sha256=HXHikhzZRBGJnp775MlwLbteR05C4A2KypTB4O0-kZQ,8932 +scipy/sparse/linalg/_isolve/lsmr.py,sha256=P-RPAaeSflcy_oSrOb49U3R1RCXHNaI76uqYubEBsPo,15681 +scipy/sparse/linalg/_isolve/lsqr.py,sha256=jeNMnBtkXWzUJykxUVi2YOTEnFF5IFBDBVFv7Upce9g,21242 +scipy/sparse/linalg/_isolve/minres.py,sha256=WJSdHAyuqtekVd3HWYDF55ApUvhMN8eD1e1I2lWbLLo,11425 +scipy/sparse/linalg/_isolve/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc,, +scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py,sha256=7vJGIsxohY1QHMzNkQQ8T6PGNmleQUqUCntj-hdaNng,5408 +scipy/sparse/linalg/_isolve/tests/test_iterative.py,sha256=qCVU0sFS0b-Rz1-nxGIQMlCqdNvo_aTsLvnLHcUtldQ,27115 +scipy/sparse/linalg/_isolve/tests/test_lgmres.py,sha256=4I7jokIZTWBq_Zbd49JaK2QnfLUF2lYGCecWGoqHtLw,7060 +scipy/sparse/linalg/_isolve/tests/test_lsmr.py,sha256=XNkeOk-sK94sddu3dYVxJsXtQA0Lh80EgQYZ2pnSB48,7324 +scipy/sparse/linalg/_isolve/tests/test_lsqr.py,sha256=Ma1rAw3jw7nXFoP5-ZYLlTgjn2tG03tu6evCSsHaTUg,4810 +scipy/sparse/linalg/_isolve/tests/test_minres.py,sha256=17a2ezMO2OXKtruk_Rp9-e7QtGaXZ5h2sUUJz67JwHg,2446 +scipy/sparse/linalg/_isolve/tests/test_utils.py,sha256=whURuUHl3jyNnsS-QgHSfDe342LBTwf3C_JbK7q_Ft4,247 +scipy/sparse/linalg/_isolve/tfqmr.py,sha256=blnP76yRYJuYTkAYntQoZUFEqydaXVxjpzXDC_PBHf0,6241 +scipy/sparse/linalg/_isolve/utils.py,sha256=I-Fjco_b83YKUtZPVdobTjPyY41-2SHruVvKZVOIXaU,3598 +scipy/sparse/linalg/_matfuncs.py,sha256=3eWF5bLBu6SUVxrPdBZOP9WR_nrPyT68fyJxtalKhpo,27228 +scipy/sparse/linalg/_norm.py,sha256=YRUM-eEnJ2D_8pvaz4LEeiV4MFaWHrO2jx546dw_KQ8,6062 +scipy/sparse/linalg/_onenormest.py,sha256=8Yxe16ox-G9UV41iMN4yfGb_1JQxENeq2YhN8Icwg5M,15486 +scipy/sparse/linalg/_propack/_cpropack.cpython-310-darwin.so,sha256=I7cgY1sSA89P-OSpb-W7QoCn90UVVdgzWIWtvS8sAvc,145888 +scipy/sparse/linalg/_propack/_dpropack.cpython-310-darwin.so,sha256=EirZXSghebkCd94bchV0PCrsNqguRcpGcZE_UO6Wc3U,128208 +scipy/sparse/linalg/_propack/_spropack.cpython-310-darwin.so,sha256=IyyzxJVMNU7TrwnL1q6csh018b906Y9R2KVSaLGhMik,128208 +scipy/sparse/linalg/_propack/_zpropack.cpython-310-darwin.so,sha256=ATgscBaa_U4sOIjrqzD8OBYjRPqenetvPsZg3Ty43Rg,145888 +scipy/sparse/linalg/_svdp.py,sha256=EDQ1BVBxRsCygrGKoLCQHnUKEqUGO9LS5vjq2TEvAls,11581 +scipy/sparse/linalg/dsolve.py,sha256=s0PkMvkadWLa87Zi84K4fO3S82RyuOqA6xy1ZPaQEcs,1203 +scipy/sparse/linalg/eigen.py,sha256=onUc3vZGnS8jtajnkSvYxbXA9kq4KG1Djv1EqOkuvmw,1151 +scipy/sparse/linalg/interface.py,sha256=JyH79SJ72jqeE05MgNhNxXI8S-fucoR6B4d1yc448DU,935 +scipy/sparse/linalg/isolve.py,sha256=QAcHU8MkRlKU0ZJbHEc_H99HCSY6XKop3bF7bydcg54,904 +scipy/sparse/linalg/matfuncs.py,sha256=WgjTo4WEUMVlMoZK6iQ3C-R1bDQ6cNZRbTR1LHV1VdY,948 +scipy/sparse/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/linalg/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_expm_multiply.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_interface.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_norm.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_onenormest.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_propack.cpython-310.pyc,, +scipy/sparse/linalg/tests/__pycache__/test_pydata_sparse.cpython-310.pyc,, +scipy/sparse/linalg/tests/propack_test_data.npz,sha256=v-NNmpI1Pgj0APODcTblU6jpHUQRhpE9ObWb-KYnu6M,600350 +scipy/sparse/linalg/tests/test_expm_multiply.py,sha256=pDCHrcxhGvknCrsInqi_Y37Bl4FrDVsqWwNyRJEQzG0,13919 +scipy/sparse/linalg/tests/test_interface.py,sha256=XFkDuO2dmyHxPLT1VKmojr8dps0oe50ZaRQjp9AYOkA,16519 +scipy/sparse/linalg/tests/test_matfuncs.py,sha256=67tWWI3Y3vGIa-w4k-DNr9NHw8QozESxqeBhKPneWT0,21280 +scipy/sparse/linalg/tests/test_norm.py,sha256=8waDQ-csiw4jTIQPz8qlseqgosvjY9OHfAU7lJ8yLxo,6163 +scipy/sparse/linalg/tests/test_onenormest.py,sha256=PSXSoTvGkBI2AlJy81kQaGh0qicMg89hoak919dpQ7U,9229 +scipy/sparse/linalg/tests/test_propack.py,sha256=6CL7xhQqPdAd1DGduqx0fmeo6NNn6anT5te3rl_yMkw,6284 +scipy/sparse/linalg/tests/test_pydata_sparse.py,sha256=MNBaBg4m-fnRrv4BHIPiyxsHGdRuU6iV_UphO7a2IbM,6124 +scipy/sparse/sparsetools.py,sha256=pe8yKLT3FTs7C2d3ZB6V8sZRkMbp0KKEH_teY_mks3E,2390 +scipy/sparse/spfuncs.py,sha256=-L313g_Rr1j-Gy8dqgKetxQFDGKYJu6P53l6CrYIWqg,842 +scipy/sparse/sputils.py,sha256=rMARLPcXcI1v00eEt5bOCOI9uEh-kk7pxEpbQ_ijcNM,1187 +scipy/sparse/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_construct.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_csc.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_csr.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_extract.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_matrix_io.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_sparsetools.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_spfuncs.cpython-310.pyc,, +scipy/sparse/tests/__pycache__/test_sputils.cpython-310.pyc,, +scipy/sparse/tests/data/csc_py2.npz,sha256=usJ_Gj6x_dEC2uObfdYc6D6C8JY4jjROFChQcZhNAfo,846 +scipy/sparse/tests/data/csc_py3.npz,sha256=axuEMVxwd0F-cgUS0IalpiF8KHW4GNJ3BK6bcjfGnf4,851 +scipy/sparse/tests/test_array_api.py,sha256=7d4y5GS8e1sdGfBcP2ZV9rKk6DQaiwl1nqqEy1N5eps,7480 +scipy/sparse/tests/test_base.py,sha256=xccr0CEEG-cxigJ6yuLDeZ3szYTv1eyzOx2r0qqd2zg,181600 +scipy/sparse/tests/test_construct.py,sha256=bZLWowr_WkJx8eddudGXZk6dQGYEKLW_Xfh8nnxED7o,24891 +scipy/sparse/tests/test_csc.py,sha256=5JW9c3EiAPI_lgPwKXwHtx3yYPtAn9fskbSYAcoNVEw,2902 +scipy/sparse/tests/test_csr.py,sha256=vgQ2nH5-73Qd_ujYGIaScGv9_ErAjXHxgkJHN_eN1vQ,5651 +scipy/sparse/tests/test_extract.py,sha256=NhizzkOSFkX_qSQi3coKIaDJKcDOvrJYew98VJlTyeU,1313 +scipy/sparse/tests/test_matrix_io.py,sha256=vU0N5HkcjleHZhQlDt83bk5NsUU_NePl7rUr2zmAFA0,2542 +scipy/sparse/tests/test_sparsetools.py,sha256=zA_dsqsQLDpNeRb5BZilLr2zP5H9FQ9yC4X61CAveAM,10441 +scipy/sparse/tests/test_spfuncs.py,sha256=ECs34sgYYhTBWe4hIkx357obH2lLsnJWkh7TfacjThw,3258 +scipy/sparse/tests/test_sputils.py,sha256=3mJaPEf8-EICEljDNJr8z5WqjVYpghDryO9Axmu0l8U,6835 +scipy/spatial/__init__.py,sha256=B5EiDg59_GTnvqag3eFt-CAWo-4wEGzRMcYGtBu_EF4,3636 +scipy/spatial/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc,, +scipy/spatial/__pycache__/_kdtree.cpython-310.pyc,, +scipy/spatial/__pycache__/_plotutils.cpython-310.pyc,, +scipy/spatial/__pycache__/_procrustes.cpython-310.pyc,, +scipy/spatial/__pycache__/_spherical_voronoi.cpython-310.pyc,, +scipy/spatial/__pycache__/ckdtree.cpython-310.pyc,, +scipy/spatial/__pycache__/distance.cpython-310.pyc,, +scipy/spatial/__pycache__/kdtree.cpython-310.pyc,, +scipy/spatial/__pycache__/qhull.cpython-310.pyc,, +scipy/spatial/_ckdtree.cpython-310-darwin.so,sha256=7Qh85nAi0Ypnj3UDAGysGdWf7uz1VJ81mWNjb-b8wp0,674043 +scipy/spatial/_ckdtree.pyi,sha256=xLU8bXLL7QLB54h2y7H2m805k8-k6k8fO2gUoDf_YfE,6002 +scipy/spatial/_distance_pybind.cpython-310-darwin.so,sha256=vSk_johZYiyWQs2_s1tN50Ky54RbqKDICAT8DI-TF10,315523 +scipy/spatial/_distance_wrap.cpython-310-darwin.so,sha256=5GbBRR7vNWQBbaCEoXakQe3hUGM4TnnqO9VQJnrbUGg,120769 +scipy/spatial/_geometric_slerp.py,sha256=Ix-OSGGMTibHipoTLzApaVTmjtoOvA5y-A75b6uaTfs,7945 +scipy/spatial/_hausdorff.cpython-310-darwin.so,sha256=WexJ55nkYbIKITWa2knJATq0Ez-mA-bdM83EUKjVQxs,186973 +scipy/spatial/_kdtree.py,sha256=jFcpz1pozP1KGz0hRpHiHtUAkHyEEae9oOzzoYa7pzI,33444 +scipy/spatial/_plotutils.py,sha256=3IO7u0bDFNa6t1uPM5hkmj9uJFzgI76wAdUDM5ZB5AM,7168 +scipy/spatial/_procrustes.py,sha256=So7XHpYPIZ5hhrGQkmokNTgkiZHqlvmczIgWHi8eiEc,4427 +scipy/spatial/_qhull.cpython-310-darwin.so,sha256=9qiyMIcfnJoyRuSN4MECj3qcf0qZQbidf--i0AM5dVc,1015728 +scipy/spatial/_qhull.pyi,sha256=d7r0hRuSn0EE_K3B0Dd1f2EzoeYvEFZYykGhpUI09Yc,6008 +scipy/spatial/_spherical_voronoi.py,sha256=wfA6t_JOfFAUlsbhfsjB1YnGrSYF4aazfHhkjZoQg3s,13573 +scipy/spatial/_voronoi.cpython-310-darwin.so,sha256=Ufgaej--C9Dg-AB9O0ZgKLBLAYLo2JVHJ3kWtbo7KJg,186075 +scipy/spatial/_voronoi.pyi,sha256=O0O1J1x1rfhJwJmiBYohBD55WhL2124tVdFNNAxj-0M,136 +scipy/spatial/ckdtree.py,sha256=60uL2ynovFUeQT9NmAqPmYK_rsiT1YKwaMAW-FMeBr8,862 +scipy/spatial/distance.py,sha256=Vo39903djlfTGBgqc-G4B6sRHaYO4lVl_WYzY5SbTLw,90214 +scipy/spatial/distance.pyi,sha256=SJeY7R6v41Ej6RBOGQPNnC7hi4r8RwdZuJHCIaCnEVc,5497 +scipy/spatial/kdtree.py,sha256=L4l9CG0wUNP8ARMUagNXzNC8vA8k134tXpAjHsI3hpg,870 +scipy/spatial/qhull.py,sha256=4jL-ImgXrnmMo4zxfaNgPxE6uTAbGID_CJ22NrSWbR4,889 +scipy/spatial/qhull_src/COPYING.txt,sha256=NNsMDE-TGGHXIFVcnNei4ijRKQuimvDy7oDEG7IDivs,1635 +scipy/spatial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/spatial/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test__plotutils.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test__procrustes.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_distance.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_hausdorff.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_kdtree.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_qhull.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_slerp.cpython-310.pyc,, +scipy/spatial/tests/__pycache__/test_spherical_voronoi.cpython-310.pyc,, +scipy/spatial/tests/data/cdist-X1.txt,sha256=ULnYAgX2_AwOVF-VE7XfnW5S0pzhx7UAoocxSnXMaWs,5750 +scipy/spatial/tests/data/cdist-X2.txt,sha256=_IJVjXsp3pvd8NNPNTLmVbHOrzl_RiEXz7cb86NfvZ4,11500 +scipy/spatial/tests/data/degenerate_pointset.npz,sha256=BIq8Hd2SS_LU0fIWAVVS7ZQx-emVRvvzgnaO2lh4gXU,22548 +scipy/spatial/tests/data/iris.txt,sha256=k19QSfkqhMmByqNMzwWDmM6wf5dt6whdGyfAyUO3AW0,15000 +scipy/spatial/tests/data/pdist-boolean-inp.txt,sha256=5Z9SMsXrtmzeUwJlVmGkrPDC_Km7nVpZIbBl7p3Hdc0,50000 +scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt,sha256=Yerj1wqIzcdyULlha-q02WBNGyS2Q5o2wAr0XVEkzis,178801 +scipy/spatial/tests/data/pdist-chebyshev-ml.txt,sha256=NEd2b-DONqUMV9f8gJ2yod17C_5fXGHHZ38PeFsXkyw,3041 +scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt,sha256=UCWZJeMkMajbpjeG0FW60b0q-4r1geAyguNY6Chx5bM,178801 +scipy/spatial/tests/data/pdist-cityblock-ml.txt,sha256=8Iq7cF8oMJjpqd6qsDt_mKPQK0T8Ldot2P8C5rgbGIU,3041 +scipy/spatial/tests/data/pdist-correlation-ml-iris.txt,sha256=l2kEAu0Pm3OsFJsQtHf9Qdy5jnnoOu1v3MooBISnjP0,178801 +scipy/spatial/tests/data/pdist-correlation-ml.txt,sha256=S4GY3z-rf_BGuHmsnColMvR8KwYDyE9lqEbYT_a3Qag,3041 +scipy/spatial/tests/data/pdist-cosine-ml-iris.txt,sha256=hQzzoZrmw9OXAbqkxC8eTFXtJZrbFzMgcWMLbJlOv7U,178801 +scipy/spatial/tests/data/pdist-cosine-ml.txt,sha256=P92Tm6Ie8xg4jGSP7k7bmFRAP5MfxtVR_KacS73a6PI,3041 +scipy/spatial/tests/data/pdist-double-inp.txt,sha256=0Sx5yL8D8pyYDXTIBZAoTiSsRpG_eJz8uD2ttVrklhU,50000 +scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt,sha256=3-UwBM7WZa4aCgmW_ZAdRSq8KYMq2gnkIUqU73Z0OLI,178801 +scipy/spatial/tests/data/pdist-euclidean-ml.txt,sha256=rkQA2-_d7uByKmw003lFXbXNDjHrUGBplZ8nB_TU5pk,3041 +scipy/spatial/tests/data/pdist-hamming-ml.txt,sha256=IAYroplsdz6n7PZ-vIMIJ4FjG9jC1OSxc3-oVJdSFDM,3041 +scipy/spatial/tests/data/pdist-jaccard-ml.txt,sha256=Zb42SoVEnlTj_N_ndnym3_d4RNZWeHm290hTtpp_zO8,3041 +scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt,sha256=L7STTmlRX-z-YvksmiAxEe1UoTmDnQ_lnAjZH53Szp0,172738 +scipy/spatial/tests/data/pdist-jensenshannon-ml.txt,sha256=-sZUikGMWskONojs6fJIMX8VEWpviYYg4u1vipY6Bak,2818 +scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt,sha256=N5L5CxRT5yf_vq6pFjorJ09Sr-RcnrAlH-_F3kEsyUU,178801 +scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt,sha256=DRgzqxRtvQVzFnpFAjNC9TDNgRtk2ZRkWPyAaeOx3q4,3041 +scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt,sha256=jz7SGKU8GuJWASH2u428QL9c-G_-8nZvOFSOUlMdCyA,178801 +scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt,sha256=37H01o6GibccR_hKIwwbWxGX0Tuxnb-4Qc6rmDxwwUI,178801 +scipy/spatial/tests/data/pdist-seuclidean-ml.txt,sha256=YmcI7LZ6i-Wg1wjAkLVX7fmxzCj621Pc5itO3PvCm_k,3041 +scipy/spatial/tests/data/pdist-spearman-ml.txt,sha256=IrtJmDQliv4lDZ_UUjkZNso3EZyu7pMACxMB-rvHUj0,3041 +scipy/spatial/tests/data/random-bool-data.txt,sha256=MHAQdE4hPVzgu-csVVbm1DNJ80dP7XthJ1kb2In8ImM,6000 +scipy/spatial/tests/data/random-double-data.txt,sha256=GA8hYrHsTBeS864GJf0X6JRTvGlbpM8P8sJairmfnBU,75000 +scipy/spatial/tests/data/random-int-data.txt,sha256=xTUbCgoT4X8nll3kXu7S9lv-eJzZtwewwm5lFepxkdQ,10266 +scipy/spatial/tests/data/random-uint-data.txt,sha256=8IPpXhwglxzinL5PcK-PEqleZRlNKdx3zCVMoDklyrY,8711 +scipy/spatial/tests/data/selfdual-4d-polytope.txt,sha256=rkVhIL1mupGuqDrw1a5QFaODzZkdoaLMbGI_DbLLTzM,480 +scipy/spatial/tests/test__plotutils.py,sha256=vmDDeXOe4N2XPMeyw8Zx1T8b8bl3Nw5ZwT9uXx21JkU,1943 +scipy/spatial/tests/test__procrustes.py,sha256=wmmnUHRdw_oID0YLi404IEWPH6vEGhvHXSeGPY_idHo,4974 +scipy/spatial/tests/test_distance.py,sha256=OV3o042VNfGcoCsnD09hbTZwL_lT5I3OoxDxZi2pypw,83940 +scipy/spatial/tests/test_hausdorff.py,sha256=n-Qm2gVF0zc11tDSCnXBznt5Mp0E1ekTtzfWXjqG54M,7114 +scipy/spatial/tests/test_kdtree.py,sha256=cxhOBCD5tBaPcnWi3dIynKihO1ooUCExAk0Lu40wXcs,47337 +scipy/spatial/tests/test_qhull.py,sha256=C_7pd_EDYfD-9kZKV-0rggx25TQ2D0vgtfD4WB_r5Os,44147 +scipy/spatial/tests/test_slerp.py,sha256=hYH-2ROq0iswTsli4c-yBLZfACvQL0QVCKrPWTeBNls,16396 +scipy/spatial/tests/test_spherical_voronoi.py,sha256=UJU6By1eOzOhxgVYTEF5RVEkryXN70PHsXMRDG9-awQ,14361 +scipy/spatial/transform/__init__.py,sha256=vkvtowJUcu-FrMMXjEiyfnG94Cqwl000z5Nwx2F8OX0,700 +scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/transform/__pycache__/_rotation_groups.cpython-310.pyc,, +scipy/spatial/transform/__pycache__/_rotation_spline.cpython-310.pyc,, +scipy/spatial/transform/__pycache__/rotation.cpython-310.pyc,, +scipy/spatial/transform/_rotation.cpython-310-darwin.so,sha256=BvclmwoAhFgfDvnQebKYe00Mg1HaiQXxyeDRT9r8NEg,608284 +scipy/spatial/transform/_rotation.pyi,sha256=aCmi2IBxlGWobw_qo0LG4wMWhYjnUhcC_8miNQIzMEk,2643 +scipy/spatial/transform/_rotation_groups.py,sha256=XS-9K6xYnnwWywMMYMVznBYc1-0DPhADHQp_FIT3_f8,4422 +scipy/spatial/transform/_rotation_spline.py,sha256=M2i8qbPQwQ49D3mNtqll31gsCMqfqBJe8vOxMPRlD5M,14083 +scipy/spatial/transform/rotation.py,sha256=1c1MrrZJrKsQXLpqM0MWV-0d8XNYW9xytpcGQAVbtfk,872 +scipy/spatial/transform/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/spatial/transform/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/spatial/transform/tests/__pycache__/test_rotation.cpython-310.pyc,, +scipy/spatial/transform/tests/__pycache__/test_rotation_groups.cpython-310.pyc,, +scipy/spatial/transform/tests/__pycache__/test_rotation_spline.cpython-310.pyc,, +scipy/spatial/transform/tests/test_rotation.py,sha256=DhAKh5Yj4QjFmMtZ1Y16ICxoJInZexf7nOxBN-ceXrk,41370 +scipy/spatial/transform/tests/test_rotation_groups.py,sha256=V6DiLWvJsrdklhS-GlzcA9qEy0cTQpwaNR-7vkhBt1M,5560 +scipy/spatial/transform/tests/test_rotation_spline.py,sha256=DRNIQM5Da8xFtVnRQcI5VRJIo7DgtCeiHYn52zD0qMk,5035 +scipy/special.pxd,sha256=h8GS4dlnM_hFchSEzjL74WPstvZWYXNMJRNAJMyFzM8,37 +scipy/special/__init__.py,sha256=KEN5FQt2nN_Ka3-RkXnGgC35W-JTvU2gpNhK-3u2W_8,29005 +scipy/special/__pycache__/__init__.cpython-310.pyc,, +scipy/special/__pycache__/_add_newdocs.cpython-310.pyc,, +scipy/special/__pycache__/_basic.cpython-310.pyc,, +scipy/special/__pycache__/_ellip_harm.cpython-310.pyc,, +scipy/special/__pycache__/_lambertw.cpython-310.pyc,, +scipy/special/__pycache__/_logsumexp.cpython-310.pyc,, +scipy/special/__pycache__/_mptestutils.cpython-310.pyc,, +scipy/special/__pycache__/_orthogonal.cpython-310.pyc,, +scipy/special/__pycache__/_sf_error.cpython-310.pyc,, +scipy/special/__pycache__/_spfun_stats.cpython-310.pyc,, +scipy/special/__pycache__/_spherical_bessel.cpython-310.pyc,, +scipy/special/__pycache__/_testutils.cpython-310.pyc,, +scipy/special/__pycache__/add_newdocs.cpython-310.pyc,, +scipy/special/__pycache__/basic.cpython-310.pyc,, +scipy/special/__pycache__/orthogonal.cpython-310.pyc,, +scipy/special/__pycache__/sf_error.cpython-310.pyc,, +scipy/special/__pycache__/specfun.cpython-310.pyc,, +scipy/special/__pycache__/spfun_stats.cpython-310.pyc,, +scipy/special/_add_newdocs.py,sha256=_sXy4AxEFRgLVduyyqQszjyg5EotjcHRN18eyNTJkR0,367144 +scipy/special/_basic.py,sha256=8PC7IIwTNWHJx0LxLKb-Wsev3eTcoH22zM1STgrmN3I,90027 +scipy/special/_comb.cpython-310-darwin.so,sha256=xxUk8eFBh1XARzSI0J4oydvUghYX_HAC669lrsIVBww,76056 +scipy/special/_ellip_harm.py,sha256=VLIdzP4XHbSHGbfbtSXdLenSZnh3c6MsYUFmED5kqhM,5272 +scipy/special/_ellip_harm_2.cpython-310-darwin.so,sha256=rkA2PfQ187sYMNZcJamYJ76RRQAcUeM41KGj71wPfTI,120224 +scipy/special/_lambertw.py,sha256=SHKKdhTcrB5dOUke16wftnSuwLpNJn1nlq6vAUohIYw,2994 +scipy/special/_logsumexp.py,sha256=YBUutkjQ35HNbJDPNvNLyhlQL2A3HqL7BJviY3DwjAY,8523 +scipy/special/_mptestutils.py,sha256=pgvlSc2cW_ZqOWb1vtR9-5NcKeTfm93XM1SYred_12I,14547 +scipy/special/_orthogonal.py,sha256=E4Cz3Ox0y0MCRP9-hxrRdJyeYcLIlIqTLpD2L0WI514,73921 +scipy/special/_orthogonal.pyi,sha256=Z92f9ZKkfz14rL1KXJdtuzuStoYK2lgMvBdi_jgICKU,8336 +scipy/special/_precompute/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/special/_precompute/__pycache__/__init__.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/cosine_cdf.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/expn_asy.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/gammainc_asy.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/gammainc_data.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/lambertw.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/loggamma.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/struve_convergence.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/utils.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/wright_bessel.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/wright_bessel_data.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/wrightomega.cpython-310.pyc,, +scipy/special/_precompute/__pycache__/zetac.cpython-310.pyc,, +scipy/special/_precompute/cosine_cdf.py,sha256=OizpVXf6odTKmDw6vWuI87rNn88JcWr_GX8Rawh6ezk,355 +scipy/special/_precompute/expn_asy.py,sha256=o22tuPbjkwSLoJ2IJZ2I3xiV5TuXQS-eScXVaUcENbU,1333 +scipy/special/_precompute/gammainc_asy.py,sha256=P5OFRcPkkpjGQeYCaMZ8SFSUmZG_CjrEHv8OLwgcGFc,2502 +scipy/special/_precompute/gammainc_data.py,sha256=P9INHXlJgOB2cELqfoIiDggw_wf0PlXD0WC5sTGPX0c,4093 +scipy/special/_precompute/lambertw.py,sha256=YcjE3Wi1r67oTyo0hIR4TcodY6TpwTCFFL_zpqtVL6Q,1977 +scipy/special/_precompute/loggamma.py,sha256=iq7ZBrUmk8pXYZwO_wINI4u8ENsLbL9VUShGjGO0Pt0,1094 +scipy/special/_precompute/struve_convergence.py,sha256=rYyGbATscSQvamp2TjR4UscGwJnkTXpNCDdfNRPjiM0,3432 +scipy/special/_precompute/utils.py,sha256=JXJuI07Jlm4bDHJFVtj0jHq05p-V1ofeXZB16Y05kzI,887 +scipy/special/_precompute/wright_bessel.py,sha256=2DwcOwBAs8DgdHb1I-U6RxHNYNoVG1TqoyyNl70w3NU,12882 +scipy/special/_precompute/wright_bessel_data.py,sha256=F8N4cdbzh3_2sN5rX1kyRrMjjVn4FLoKnzQiLOILI6k,5655 +scipy/special/_precompute/wrightomega.py,sha256=YpmLwtGJ4qazMDY0RXjhnQiuRAISI-Pr9MwKc7pZlhc,955 +scipy/special/_precompute/zetac.py,sha256=LmhJP7JFg7XktHvfm-DgzuiWZFtVdpvYzzLOB1ePG1Q,591 +scipy/special/_sf_error.py,sha256=q_Rbfkws1ttgTQKYLt6zFTdY6DFX2HajJe_lXiNWC0c,375 +scipy/special/_specfun.cpython-310-darwin.so,sha256=L0peldyc-eaZVQ4UQpbZoMLUgcTL8_VlyGyg4YbtLVw,333984 +scipy/special/_spfun_stats.py,sha256=Xnh6seX993udMM_6ftVaUHHwKpRuD9IopER6lPixxS0,3806 +scipy/special/_spherical_bessel.py,sha256=2gQUI5_JeJ-OZ5XCrYlYgOHk8s0E6O_Qx62PusdUWAA,10217 +scipy/special/_test_internal.cpython-310-darwin.so,sha256=BSxBY03JAQmQ-4qXAkTDu0YkNfDFSDNfio6t69vnZAo,206721 +scipy/special/_test_internal.pyi,sha256=nwSk_u-Jhkkkhtz0ePxpD4e0PaC26pQTHljEBRgZiBQ,363 +scipy/special/_testutils.py,sha256=2sGwBxdXpnwijIZrOtPM-jde8DXJ2z6Dc0XoVz-MPno,11974 +scipy/special/_ufuncs.cpython-310-darwin.so,sha256=ifmmVApoDn8ibOK3bzaM7qXLBdOHs7XhvJKfBnlpkG0,1346112 +scipy/special/_ufuncs.pyi,sha256=kfqgOV4gytlYdvYkcYUT2u8Ysd4Z1kYsf1vMapYiE8A,8809 +scipy/special/_ufuncs.pyx,sha256=3oHEVhdFctsJ8GBBkf4s_RAHrkUvPak7t3Tjoit4GcM,868083 +scipy/special/_ufuncs_cxx.cpython-310-darwin.so,sha256=EmLlNl1WgqS7AmYBHcZ7BmRkU_tRO1U6DPtCh9gPN08,370734 +scipy/special/_ufuncs_cxx.pxd,sha256=6_gtnxTzfG9FT9n1mtjp7_QPcBu9JdAr7YK37pQh5Fo,1351 +scipy/special/_ufuncs_cxx.pyx,sha256=WZV7nN09oJjJwr1tQxId6lPz7xOU6gDvCECLpDClv4o,11474 +scipy/special/_ufuncs_cxx_defs.h,sha256=ILA7eh7wHjkImCubKcRkgsGCBAfxq8qVOpelwXSyvzI,2005 +scipy/special/_ufuncs_defs.h,sha256=TPRFhAmCC1d4yHGSOpiz9XIayKPtq_g_a-2geb_1Wu0,11058 +scipy/special/add_newdocs.py,sha256=lapv7DVKDeAr2vYaZr_6lMUK9hAP6IXy-wvzx8Qifi8,644 +scipy/special/basic.py,sha256=KUTMYKk038KKpJ8fsoQoZSEJV6djRQL_Y_RgdxbXe4k,1896 +scipy/special/cython_special.cpython-310-darwin.so,sha256=HR1u1yqSYuiVFlsuAx9-od9L9P4rv_czTJkSFrCdPUM,2051200 +scipy/special/cython_special.pxd,sha256=ivthlz5-cp0SCxYhC1cM6voxvKNOYLsIETolipP8muQ,14020 +scipy/special/cython_special.pyi,sha256=BQVUCzV8lCylnmLCtnN0Yz_ttlqyzcLc-BZx2KPXPzM,58 +scipy/special/cython_special.pyx,sha256=ceO_rFKhvaRO1dIhfM7XZ81OerEaJ45upvTA5DLFAdo,157472 +scipy/special/orthogonal.py,sha256=nL0enQ_z9S6eHvkjlszRdlV1GFU8q75LEaQWnhGERKs,2053 +scipy/special/sf_error.py,sha256=He7080Os7bMgBh9v42PDsv0pLDJ8u94GWjzU-5QemTc,792 +scipy/special/specfun.py,sha256=107XC40GRWPXmx3X2Hge0K5AtNMJPIdCOCJQo2c7f9I,1059 +scipy/special/spfun_stats.py,sha256=-oa8b53MxKJrtmgb5jbwLeEyZDp9l9yvsCuv6337p7U,770 +scipy/special/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/special/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_basic.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_bdtr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_boxcox.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cdflib.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cdft_asymptotic.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cosine_distr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_cython_special.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_data.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_dd.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_digamma.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_ellip_harm.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_erfinv.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_exponential_integrals.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_faddeeva.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_gamma.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_gammainc.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_hyp2f1.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_hypergeometric.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_kolmogorov.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_lambertw.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_log_softmax.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_loggamma.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_logit.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_logsumexp.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_mpmath.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_nan_inputs.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_ndtr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_ndtri_exp.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_orthogonal.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_orthogonal_eval.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_owens_t.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_pcf.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_pdtr.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_powm1.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_precompute_expn_asy.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_precompute_gammainc.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_precompute_utils.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_round.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_sf_error.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_sici.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_spence.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_spfun_stats.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_sph_harm.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_spherical_bessel.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_trig.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_wright_bessel.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_wrightomega.cpython-310.pyc,, +scipy/special/tests/__pycache__/test_zeta.cpython-310.pyc,, +scipy/special/tests/data/boost.npz,sha256=DEqbmLTura2MI3Eqrp4w43cS_EaeFSStRcWUveTRe7c,1270643 +scipy/special/tests/data/gsl.npz,sha256=ec84WYD-4TUetsmSSLVxLW918p8zeGJ-2rwUsiRV85s,51433 +scipy/special/tests/data/local.npz,sha256=GY1SrPnE6R-j7t7_akoe8nIlX13YXUWmAFlp_6-W3nA,203438 +scipy/special/tests/test_basic.py,sha256=w_zC-nn2VTjPRh79ukTlteBiV2s6hMe2mQ99DPUqk20,144186 +scipy/special/tests/test_bdtr.py,sha256=QwGyt0tnutuou25mS0u2LjRgDTYI6ohM2cbZ-He6Os4,3231 +scipy/special/tests/test_boxcox.py,sha256=gUrGF7Ql1adxiPl_YxpsGunDfg-B_WpqI9Zghzool7o,2672 +scipy/special/tests/test_cdflib.py,sha256=a1t0yM7RowZPl8RhjXhMnIzh7ZvBxItK2aMu0ud41Vw,13165 +scipy/special/tests/test_cdft_asymptotic.py,sha256=UMwy8bSxUzzcj9MkG4FHzojJRFeshe05ZqFk_32iHKA,1429 +scipy/special/tests/test_cosine_distr.py,sha256=0NpWGY9XJ2MZw5gkDMP99IxXmhgPt3RysG_hZ5HQpO4,2691 +scipy/special/tests/test_cython_special.py,sha256=du2yZxAOZPE5mF3ANUbCVW4kTzKIY9o0Gy9sOXNcwVg,18694 +scipy/special/tests/test_data.py,sha256=bSnsxztKyPlUZWbA6mERLlOTMCTjJpLBGyBBJA4KLm0,28534 +scipy/special/tests/test_dd.py,sha256=GROHQEkzIAW6KXkj8J3nPcRDAONcf1nCoArcfx30_5s,1974 +scipy/special/tests/test_digamma.py,sha256=NlaFqc08di2L5FXArct9r0GaP_ciAxTIx-bfk-kdaks,1394 +scipy/special/tests/test_ellip_harm.py,sha256=51KiCpQjqmf2uLZEsty-Vmr0FhoABtvMUz4218WR_S0,9640 +scipy/special/tests/test_erfinv.py,sha256=fzdEHd6MxfSyzQDO93qndXukG2jWj-XNY2X4BJRIdBI,3059 +scipy/special/tests/test_exponential_integrals.py,sha256=6_iQmb3Y6G96dpgkB6z_saitaMPdJH1gLYjYwDngITQ,1868 +scipy/special/tests/test_faddeeva.py,sha256=YLY3Ylp4u_8zxTGxOb5kxNfXXEW0ld_GP2ceOR2ev_Y,2568 +scipy/special/tests/test_gamma.py,sha256=hb-ZlA2ZNz6gUGvVtMBgXFl_w30HPmthuUEAmNcz0sw,258 +scipy/special/tests/test_gammainc.py,sha256=Avv52EDQ7M8kUpiVU1BVsW_Gj5HDCzAOojLtoFojKbw,3815 +scipy/special/tests/test_hyp2f1.py,sha256=siIotfifZrhk-922XfChi9rPq_--2tsM_JZNyF2NeBk,78503 +scipy/special/tests/test_hypergeometric.py,sha256=jralqwVanO0mR0t8CI5zF0L8PqCnc_6oAtreg845Akc,5598 +scipy/special/tests/test_kolmogorov.py,sha256=nRZHg4P3GEcax-vgB-LlG9KrdlDRd36ZvGTF3oCv9po,18407 +scipy/special/tests/test_lambertw.py,sha256=A6SAKE2KBWY2YlqdOSnVRzxw1RKJ7f2ZYcBTg1-q7Bk,4556 +scipy/special/tests/test_log_softmax.py,sha256=JdiC5C1Fm16rNdQHVWRu-FGMVOv24DPWRnguDDd1zEY,3415 +scipy/special/tests/test_loggamma.py,sha256=x6kuJf-bEnn5ECdkDSgvk3An_A-9UxVsZpqa49IwAq8,1992 +scipy/special/tests/test_logit.py,sha256=PvIgcK33vQjcvHE3_3fVarKTjZ0t35-ksZnhvoqKQrA,5540 +scipy/special/tests/test_logsumexp.py,sha256=vcHdTDJQKvUfkO0I8VDRUQF4MhnF0dQi2pjDzRsggB0,6180 +scipy/special/tests/test_mpmath.py,sha256=yXaU8yhq3xT9gdiIO2uJWT5DNgbbme80-0KYT5Chbk4,75189 +scipy/special/tests/test_nan_inputs.py,sha256=1F3CRXp_DGmfUcJMr_61eW2yItjQQD9xzaIiDgbsXvI,1845 +scipy/special/tests/test_ndtr.py,sha256=-UMxTIi4CaaLoJ5-SGW9THChPIM3e1_fTY0L877ioNA,2680 +scipy/special/tests/test_ndtri_exp.py,sha256=13eabgdbfcL37RReiUH7g9amT9XMsTLOfwxFJXR_2Ww,3708 +scipy/special/tests/test_orthogonal.py,sha256=N-DdAMqe1c4o7-jHnn3aSu_gmI7ojfV-HogDLpJ251c,31295 +scipy/special/tests/test_orthogonal_eval.py,sha256=xj3-5r1s70kev3d-qiTk8m7tZ09b6JjJY9OTVxVVNx0,9319 +scipy/special/tests/test_owens_t.py,sha256=zRbiKje7KrYJ25f1ZuIBfiFSyNtK_bnkIW7dRETIqME,1792 +scipy/special/tests/test_pcf.py,sha256=RNjEWZGFS99DOGZkkPJ8HNqLULko8UkX0nEWFYX26NE,664 +scipy/special/tests/test_pdtr.py,sha256=VmupC2ezUR3p5tgZx0rqXEHAtzsikBW2YgaIxuGwO5A,1284 +scipy/special/tests/test_powm1.py,sha256=9hZeiQVKqV63J5oguYXv_vqolpnJX2XRO1JN0ouLWAM,2276 +scipy/special/tests/test_precompute_expn_asy.py,sha256=bCQikPkWbxVUeimvo79ToVPgwaudzxGC7Av-hPBgIU4,583 +scipy/special/tests/test_precompute_gammainc.py,sha256=H0UtrkFRZhqoIbyUZTcztwg81Q4j8Xkc6nOQCyuO8-8,4527 +scipy/special/tests/test_precompute_utils.py,sha256=MOvdbLbzjN5Z1JQQgtIyjwjuIMPX4s2bTc_kxaX67wc,1165 +scipy/special/tests/test_round.py,sha256=oZdjvm0Fxhv6o09IFOi8UUuLb3msbq00UdD8P_2Jwaw,421 +scipy/special/tests/test_sf_error.py,sha256=leNORk4GIa8gYQH69OK0TK_SPXOMGPvtNC-x5aJ_nT8,3521 +scipy/special/tests/test_sici.py,sha256=w4anBf8fiq2fmkwMSz3MX0uy35NLXVqfuW3Fwt2Nqek,1227 +scipy/special/tests/test_spence.py,sha256=fChPw7xncNCTPMUGb0C8BC-lDKHWoEXSz8Rb4Wv8vNo,1099 +scipy/special/tests/test_spfun_stats.py,sha256=A5SOVsQOyC12-BeIIHsi--hrog88mFmH190MOKdP4qU,1998 +scipy/special/tests/test_sph_harm.py,sha256=PQehyslic3K2uwj8lV2g0Gh6JNVjpSYLCuVnihUlByQ,1116 +scipy/special/tests/test_spherical_bessel.py,sha256=5f2tsw0DUbs_Q4A4-BNrrDA7NzFuKEGnSJ3nwnDNWqI,14284 +scipy/special/tests/test_trig.py,sha256=WiZ-ryT7F8-kaACJKcXaA7PXSbuU4gIz_MK9Pv1gsTc,2097 +scipy/special/tests/test_wright_bessel.py,sha256=v1yLL6Ki01VuKPj5nfL-9_FaACvwdIlDsarKsm-z9EQ,4155 +scipy/special/tests/test_wrightomega.py,sha256=8afmPCC6IYN-SqbeBgqTyRgz0JfQdCs2vtxFcR_Bj9I,3550 +scipy/special/tests/test_zeta.py,sha256=IoBUdssBRj7noPjW-xs9xGFFihZ7wvQpPJidgMOFCOs,1367 +scipy/stats/__init__.py,sha256=Pyu_QRExXWdwzWfTbGTiS-UKWveKjZTM4ZlDk6PG7Gs,13702 +scipy/stats/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/__pycache__/_axis_nan_policy.cpython-310.pyc,, +scipy/stats/__pycache__/_binned_statistic.cpython-310.pyc,, +scipy/stats/__pycache__/_binomtest.cpython-310.pyc,, +scipy/stats/__pycache__/_common.cpython-310.pyc,, +scipy/stats/__pycache__/_constants.cpython-310.pyc,, +scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc,, +scipy/stats/__pycache__/_covariance.cpython-310.pyc,, +scipy/stats/__pycache__/_crosstab.cpython-310.pyc,, +scipy/stats/__pycache__/_discrete_distns.cpython-310.pyc,, +scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc,, +scipy/stats/__pycache__/_distr_params.cpython-310.pyc,, +scipy/stats/__pycache__/_entropy.cpython-310.pyc,, +scipy/stats/__pycache__/_fit.cpython-310.pyc,, +scipy/stats/__pycache__/_generate_pyx.cpython-310.pyc,, +scipy/stats/__pycache__/_hypotests.cpython-310.pyc,, +scipy/stats/__pycache__/_kde.cpython-310.pyc,, +scipy/stats/__pycache__/_ksstats.cpython-310.pyc,, +scipy/stats/__pycache__/_mannwhitneyu.cpython-310.pyc,, +scipy/stats/__pycache__/_morestats.cpython-310.pyc,, +scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc,, +scipy/stats/__pycache__/_mstats_extras.cpython-310.pyc,, +scipy/stats/__pycache__/_multivariate.cpython-310.pyc,, +scipy/stats/__pycache__/_odds_ratio.cpython-310.pyc,, +scipy/stats/__pycache__/_page_trend_test.cpython-310.pyc,, +scipy/stats/__pycache__/_qmc.cpython-310.pyc,, +scipy/stats/__pycache__/_relative_risk.cpython-310.pyc,, +scipy/stats/__pycache__/_resampling.cpython-310.pyc,, +scipy/stats/__pycache__/_result_classes.cpython-310.pyc,, +scipy/stats/__pycache__/_rvs_sampling.cpython-310.pyc,, +scipy/stats/__pycache__/_stats_mstats_common.cpython-310.pyc,, +scipy/stats/__pycache__/_stats_py.cpython-310.pyc,, +scipy/stats/__pycache__/_tukeylambda_stats.cpython-310.pyc,, +scipy/stats/__pycache__/_variation.cpython-310.pyc,, +scipy/stats/__pycache__/_warnings_errors.cpython-310.pyc,, +scipy/stats/__pycache__/biasedurn.cpython-310.pyc,, +scipy/stats/__pycache__/contingency.cpython-310.pyc,, +scipy/stats/__pycache__/distributions.cpython-310.pyc,, +scipy/stats/__pycache__/kde.cpython-310.pyc,, +scipy/stats/__pycache__/morestats.cpython-310.pyc,, +scipy/stats/__pycache__/mstats.cpython-310.pyc,, +scipy/stats/__pycache__/mstats_basic.cpython-310.pyc,, +scipy/stats/__pycache__/mstats_extras.cpython-310.pyc,, +scipy/stats/__pycache__/mvn.cpython-310.pyc,, +scipy/stats/__pycache__/qmc.cpython-310.pyc,, +scipy/stats/__pycache__/sampling.cpython-310.pyc,, +scipy/stats/__pycache__/statlib.cpython-310.pyc,, +scipy/stats/__pycache__/stats.cpython-310.pyc,, +scipy/stats/_axis_nan_policy.py,sha256=75c6IO_lVMWARPqyumohgNDKuysE3rtF3UxGDc6jlmE,26865 +scipy/stats/_biasedurn.cpython-310-darwin.so,sha256=fhfAGDPc9ScXTu7KeTU0KTP-hcb4ZOJlL59nQsGNfMU,251357 +scipy/stats/_biasedurn.pxd,sha256=bQC6xG4RH1E5h2jCKXRMADfgGctiO5TgNlJegKrR7DY,1046 +scipy/stats/_binned_statistic.py,sha256=RXieYDA8LuPe_qycLZAUhFbfV4KVlqqTYbX0wQzP1yY,32710 +scipy/stats/_binomtest.py,sha256=cbmBHbwpXRap9zZElMvdYhy7ccTvH1kgi_7_iNctD1A,13043 +scipy/stats/_boost/__init__.py,sha256=e1_a5N-BBpz7qb0VeLQ7FOEURW9OfQ3tV42_fMDVkOU,1759 +scipy/stats/_boost/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_boost/beta_ufunc.cpython-310-darwin.so,sha256=WxoZFBGUj2TvLC6opL8eD4d8xB6bpChHOjeQZx2qv-g,314653 +scipy/stats/_boost/binom_ufunc.cpython-310-darwin.so,sha256=jkldO_L3Ihf5HJB5MGcO4RiY4KTpuYtR5exS25BFqI4,273710 +scipy/stats/_boost/hypergeom_ufunc.cpython-310-darwin.so,sha256=PVx8B10prncz_uQt7NcFT9iHJhq59WUB8b7jnI7vFVs,201314 +scipy/stats/_boost/invgauss_ufunc.cpython-310-darwin.so,sha256=aNYy363TzaRtjzXidcbdZi11XZWEwi_5nOCqtU03SsE,257313 +scipy/stats/_boost/nbinom_ufunc.cpython-310-darwin.so,sha256=eRY2zh_3rlm5JNNVgI5svjchTGohPHz91SppG8bEhyU,294095 +scipy/stats/_boost/ncf_ufunc.cpython-310-darwin.so,sha256=mU7v-ZdOGAzOlk1KdtBewV_uQPbP1E6UeLycvng20wk,271612 +scipy/stats/_boost/nct_ufunc.cpython-310-darwin.so,sha256=iw9Kt_7mJOcbD8Qj2n6sOP44Of15IyiTbiFNSEwLD_o,345436 +scipy/stats/_boost/ncx2_ufunc.cpython-310-darwin.so,sha256=aNhOLY4cscIFHiWW02UtCaYfKIzYMRcwgupArVqt6bc,281581 +scipy/stats/_boost/skewnorm_ufunc.cpython-310-darwin.so,sha256=F3AV8mtU97jJ5KO3rI5_6tyqy0MOr1qcuoAUg3aZw0Q,186625 +scipy/stats/_common.py,sha256=orK_lFT7s7t4Vpvqcv5NkxduZuXTdZocoPCej0iGHQ0,173 +scipy/stats/_constants.py,sha256=IsVndgfRnFz5ULWeygOfPRypaWYkUn2lAuJDypEVMNA,793 +scipy/stats/_continuous_distns.py,sha256=Dh_iHMaiXaGwlr1wqD_u3k0TVsCsRXrKhRJRrCk2dWU,322887 +scipy/stats/_covariance.py,sha256=8lQg_auysz81C9728O2ZiPsbiDhm7b0pRWqIEFr9HZA,22475 +scipy/stats/_crosstab.py,sha256=zdAePa0po_x1FwZ_j8F6ok2XNRCDBO2x17XrBr_axbY,7354 +scipy/stats/_discrete_distns.py,sha256=AYDMqBDk68gVftqNRUZrLhpXI-KGMUoY7rQwto6tz2U,54699 +scipy/stats/_distn_infrastructure.py,sha256=_pL08snbzhpBNZUiYASyPgx3RRkQRK3SQdl2xBFUcN0,149072 +scipy/stats/_distr_params.py,sha256=rblTZ_5Y1LJgitA6J3LosKwWBwyauMzAKOb8T1IL_ms,8495 +scipy/stats/_entropy.py,sha256=rvKQ2GqTfOMqHhvt9Z2FJEyNuny-xpTkslKzBQbwV0w,14362 +scipy/stats/_fit.py,sha256=GWIhw-0YtCPXXM4Ad_pNxO2BHEtCjeAmjoxk8kSJ2Ys,56992 +scipy/stats/_generate_pyx.py,sha256=U02HJYA9AUARKiSwa8S2aUpvZUOuGTA4dQvLkNpUSxk,2684 +scipy/stats/_hypotests.py,sha256=Vn0S65eR6t63PdAOktVhSuPZ4HuPpzq_XoaYfdXTtVw,78541 +scipy/stats/_kde.py,sha256=g6aGUrnpzb6sP5tfX-Al_FMRM4tDTwVg75uMH0FWGjc,24984 +scipy/stats/_ksstats.py,sha256=679ltXKXKtbr1gJagXwlxa_cyaZdMMZZKqlrrJErOuw,20086 +scipy/stats/_levy_stable/__init__.py,sha256=rDM-vHqixRhEu8eDhr8FJ0ZdUu4Vg7dlBjz5lNexQNw,43857 +scipy/stats/_levy_stable/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_levy_stable/levyst.cpython-310-darwin.so,sha256=ibzMZkH1APalK9_ZY2LaiyZzjR49ehiPpUchmTTc5SM,60153 +scipy/stats/_mannwhitneyu.py,sha256=EuSeGllPIXOLGSZXn7Bb3TNSVQmLznfQT6zccgupOvg,18955 +scipy/stats/_morestats.py,sha256=gM1Qo4ni_CFHRk_SA4r-gGbP9Czmv49QpVUlb2xaPx0,149863 +scipy/stats/_mstats_basic.py,sha256=0qHcJERJmjQlzbTpXq2RZS8NxvS5T8FYfmXIOJ4vpfs,117907 +scipy/stats/_mstats_extras.py,sha256=Eyg8-p6J5G20nZS0laPOFrLpfqcyqdK4t9Sn6z1laLs,15610 +scipy/stats/_multivariate.py,sha256=AXRSODqmzDf_8CtRqj7SrqxrdATBjqDQ9kBV6bAm7zI,189331 +scipy/stats/_mvn.cpython-310-darwin.so,sha256=_Ab1UqG-4cXrbxUHUu4nL2rPCQvEPNGDFUMTHcAPwtg,90711 +scipy/stats/_odds_ratio.py,sha256=7K-BHxttu8uS5IbvS2yWw-_LQlkedNKtDkjZ14h2Pck,16895 +scipy/stats/_page_trend_test.py,sha256=OUkFeY8ck7DqDTUfXsrDcy5RhNpCTRXwcvGVY_CDmVY,19042 +scipy/stats/_qmc.py,sha256=wDnTo7hsUiO9vREdDCSYTvn5bYarGyJV-VL4lVmRLHU,91136 +scipy/stats/_qmc_cy.cpython-310-darwin.so,sha256=36UKDdZCqb2rbXbtcMAVxgpQtplAa5P1tI9__MGCC1E,220010 +scipy/stats/_qmc_cy.pyi,sha256=xOpTSlaG_1YDZhkJjQQtukbcgOTAR9FpcRMkU5g9mXc,1134 +scipy/stats/_rcont/__init__.py,sha256=y84KLdKD_7JU_mfLrlTDtpdQG5LgqRFb_bBUbQVxZOY,108 +scipy/stats/_rcont/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_rcont/rcont.cpython-310-darwin.so,sha256=8QVCEgpih-Xm3kPggRrDuCsdDfpC6n04EK5BRB42db8,272840 +scipy/stats/_relative_risk.py,sha256=Vj_pgUV9UkQ6li_4uPl7EXBk9Ou7r4kDUzc9G8rAA0Q,9572 +scipy/stats/_resampling.py,sha256=_h_ivvbTIVw33mPREo3gdlJ0tPzlRJ6N_3C594X_YwI,69836 +scipy/stats/_result_classes.py,sha256=z04e8P0QG0Rm7CsZjOo3mWtIp_A06adcknXZSoZQUyI,843 +scipy/stats/_rvs_sampling.py,sha256=EhgTP7HlSKWr7Jg7RwsT9k4ICzP2btnxSOYxkOvAaF0,7228 +scipy/stats/_sobol.cpython-310-darwin.so,sha256=FOVUvcAMabjfktvojdgiDuvxeoGRTKJovsnsAhp-JzU,292345 +scipy/stats/_sobol.pyi,sha256=TAywylI75AF9th9QZY8TYfHvIQ1cyM5QZi7eBOAkrbg,971 +scipy/stats/_sobol_direction_numbers.npz,sha256=SFmTEUfULORluGBcsnf5V9mLg50DGU_fBleTV5BtGTs,589334 +scipy/stats/_statlib.cpython-310-darwin.so,sha256=td7CrOEVe0yUnDE2x-0elQT-kNlx2_lM9qlHJIgSdpA,73547 +scipy/stats/_stats.cpython-310-darwin.so,sha256=rpxTrZnUYjDLXeJ38-Of_e2S5kuTDJTYC41goahLLcI,582825 +scipy/stats/_stats.pxd,sha256=7WeZIqov-BqAINcZdV2YajNlXERyeafQRPqNjMC_fhA,663 +scipy/stats/_stats_mstats_common.py,sha256=ESU0KLV8iXwxxrmHV-nwtfmBr5B1_EG1daw4N8XPhCM,18652 +scipy/stats/_stats_py.py,sha256=M-RUYxGOtIafRCe4NuFXN1sNrigmp7PSlX0qlAen5Bk,355576 +scipy/stats/_stats_pythran.cpython-310-darwin.so,sha256=wBQbAgmA8vO30tA0u5f-J6KsaQjXZJ32yAGxfocqG8Q,132865 +scipy/stats/_tukeylambda_stats.py,sha256=ZL8ifXFFS5O-5IAGVmx8nsErRwLWJK68585cvLC5khg,6869 +scipy/stats/_unuran/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/stats/_unuran/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/_unuran/unuran.pxd,sha256=IIB-izI1fWQKXZ6xt7_y827d-Z6RzKiZkBJ-sJHEsEE,43357 +scipy/stats/_unuran/unuran_wrapper.cpython-310-darwin.so,sha256=AO1Rh88tfJjlgKyLWVxPTu04mckI5vEbZP1RHco4WHQ,1230177 +scipy/stats/_unuran/unuran_wrapper.pyi,sha256=-AoK2lySYcifl6GtQSs26ZkcCBw_wYpvuY4mhxRYk2U,5601 +scipy/stats/_variation.py,sha256=XqYI_QMi9Oj9CPte7k5gxr7ifFvrByAmJCz3i9D0NFU,8329 +scipy/stats/_warnings_errors.py,sha256=Dp4wSeo1NC2JmqcUMbyVfEcLeWdTEpVDWqOFTg4134g,1195 +scipy/stats/biasedurn.py,sha256=EPX7Ft1Uo28nEk1Bo_XIFNASdYfknXRbx-lXnrw4jLU,690 +scipy/stats/contingency.py,sha256=tG8X-wpLLp18_7MUSZ3a6wwfimH3UUqDDNGHmw0T9G0,14208 +scipy/stats/distributions.py,sha256=0VRiF_gbq3hZMdB9AONbXNE8bDLfO0dlQMjZIbldSTg,803 +scipy/stats/kde.py,sha256=CteJ2T0-4kFFT0wqpwwa3nhewyE_LnAUC0qlLnfoWNo,923 +scipy/stats/morestats.py,sha256=Lzo2TJSmmnO2VuujfDTx6R-1h2mc-rdutjbKXQx6bJ8,1620 +scipy/stats/mstats.py,sha256=Uqwz-P46lDBWfL7uumXpD_qhV-M-OTJfSTCBJVUnJZk,2262 +scipy/stats/mstats_basic.py,sha256=fcqdbCirE88xnXXOu2fEgFOISLDwobB9_oBKb7Ma9YI,2123 +scipy/stats/mstats_extras.py,sha256=zpvhK6MODW78ymWOpnj-QHc7bxpdPdG22Yr2Rypndw8,1001 +scipy/stats/mvn.py,sha256=lBrOC0EQSv585vPnhUCdNCSvqq4Ns5X1i7zKJDy3rXU,784 +scipy/stats/qmc.py,sha256=RTwVBoIQCDo-oCbfa_O3RN5ZhPow6-aGgulYQhu9Te8,11648 +scipy/stats/sampling.py,sha256=Ca2PMnize44Q_bUlwysc31IrKKCs5VDkGghLcx8iRNE,1196 +scipy/stats/statlib.py,sha256=KU5sYHMhlfnpCbLt5FdMASKQ57GLIDA4AZ4gZWDCK4w,776 +scipy/stats/stats.py,sha256=MiI6nrmK_b2MoC14DIAzUCe9jGyuuhEtgSpUSr61ONQ,2702 +scipy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +scipy/stats/tests/__pycache__/__init__.cpython-310.pyc,, +scipy/stats/tests/__pycache__/common_tests.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_axis_nan_policy.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_boost_ufuncs.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_contingency.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_continuous_basic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_discrete_basic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_discrete_distns.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_entropy.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_fit.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_hypotests.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_kdeoth.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_mstats_basic.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_qmc.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_rank.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_relative_risk.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_resampling.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_sampling.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_tukeylambda_stats.cpython-310.pyc,, +scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc,, +scipy/stats/tests/common_tests.py,sha256=dF0Hy7dxuTS2M51fSPOMYBQ8_4Nmu1uCYDJH3h1Uz0I,15380 +scipy/stats/tests/data/__pycache__/fisher_exact_results_from_r.cpython-310.pyc,, +scipy/stats/tests/data/fisher_exact_results_from_r.py,sha256=BKxPAi4h3IOebcZYGxCbutYuAX0tlb40P0DEkfEi918,27349 +scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy,sha256=zxjB8tZaIyvyxxISgt8xvyqL6Cevr8TtgQ7TdFfuiYo,183728 +scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy,sha256=_umVErq0zMZWm0e5JOSwNOHNurViT6_H4SBki9X3oSg,183688 +scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy,sha256=88cZ7dVDH7nnuey20Z48p6kJUpi9GfImaFsPykDwwHM,9328 +scipy/stats/tests/data/nist_anova/AtmWtAg.dat,sha256=Qdd0i7H4cNhAABfFOZPuplhi_9SCquFpO-hNkyRcMD8,3063 +scipy/stats/tests/data/nist_anova/SiRstv.dat,sha256=x9wJ2g1qnzf4DK_w9F_WiOiDMDEg4td2z6uU77G07xM,1947 +scipy/stats/tests/data/nist_anova/SmLs01.dat,sha256=KdnJedRthF7XLA-w7XkIPIMTgzu89yBAMmZA2H4uQOQ,6055 +scipy/stats/tests/data/nist_anova/SmLs02.dat,sha256=nCPyxRk1dAoSPWiC7kG4dLaXs2GL3-KRXRt2NwgXoIA,46561 +scipy/stats/tests/data/nist_anova/SmLs03.dat,sha256=6yPHiQSk0KI4oURQOk99t-uEm-IZN-8eIPHb_y0mQ1U,451566 +scipy/stats/tests/data/nist_anova/SmLs04.dat,sha256=fI-HpgJF9cdGdBinclhVzOcWCCc5ZJZuXalUwirV-lc,6815 +scipy/stats/tests/data/nist_anova/SmLs05.dat,sha256=iJTaAWUFn7DPLTd9bQh_EMKEK1DPG0fnN8xk7BQlPRE,53799 +scipy/stats/tests/data/nist_anova/SmLs06.dat,sha256=riOkYT-LRgmJhPpCK32x7xYnD38gwnh_Eo1X8OK3eN8,523605 +scipy/stats/tests/data/nist_anova/SmLs07.dat,sha256=QtSS11d-vkVvqaIEeJ6oNwyET1CKoyQqjlfBl2sTOJA,7381 +scipy/stats/tests/data/nist_anova/SmLs08.dat,sha256=qrxQQ0I6gnhrefygKwT48x-bz-8laD8Vpn7c81nITRg,59228 +scipy/stats/tests/data/nist_anova/SmLs09.dat,sha256=qmELOQyNlH7CWOMt8PQ0Z_yxgg9Hxc4lqZOuHZxxWuc,577633 +scipy/stats/tests/data/nist_linregress/Norris.dat,sha256=zD_RTRxfqJHVZTAAyddzLDDbhCzKSfwFGr3hwZ1nq30,2591 +scipy/stats/tests/data/studentized_range_mpmath_ref.json,sha256=icZGNBodwmJNzOyEki9MreI2lS6nQJNWfnVJiHRNRNM,29239 +scipy/stats/tests/test_axis_nan_policy.py,sha256=KXk4_PqHAh1SkQPJSxfX03d8Ly65Eg1-S0-GolyDDV4,44465 +scipy/stats/tests/test_binned_statistic.py,sha256=CCsd8CaAe5Obajj6q8hyrPEbFZJcc2OFVHiE5aRLxTk,18818 +scipy/stats/tests/test_boost_ufuncs.py,sha256=5IWVCvQ7-pD9EiXUBUMtbPePE5dbjsm5tkdZFY7zLHg,1612 +scipy/stats/tests/test_contingency.py,sha256=fMeGnTldQjLa5CSaaQ6qH90JXzrUivthVD-9DafgQm0,7706 +scipy/stats/tests/test_continuous_basic.py,sha256=K0eoTRnKzalMiQ5ZfXIsk3ew9IeT3FzIqEwuGx9WULs,41523 +scipy/stats/tests/test_crosstab.py,sha256=tvCoZGfVasNIhYxLQIe3dcdMm34s2ykxxPmCRTIOFc0,3882 +scipy/stats/tests/test_discrete_basic.py,sha256=Pw969IIdB0yf-HF9sZ5-WyUchbqlfgZTu6LnM4hV2BY,20097 +scipy/stats/tests/test_discrete_distns.py,sha256=hDZ5_6IX8Ec2tOTSMrdhxee3WMcjQi6hMpOZwgAGCuA,20066 +scipy/stats/tests/test_distributions.py,sha256=i3AzoT2RVTlm7q6_Vw08dcnQYqR9h67mOmhxXcKvNaw,299382 +scipy/stats/tests/test_entropy.py,sha256=yHMmAmQgvm7QyrQKuh5gnEXJra-NbcVDwOaI-FgCP4M,11278 +scipy/stats/tests/test_fit.py,sha256=18Bionw5I29f6EaKAgzbSO9tbSz-tj3XzqlWaGxe4R8,37232 +scipy/stats/tests/test_hypotests.py,sha256=YUJlkTWbcv1UaXYK9UUsNCirnHSb59ndyP_qVxltb00,73067 +scipy/stats/tests/test_kdeoth.py,sha256=7KOD-TJb8aDMQJ3DWnVWk9pm5LEf6LUuaqE7oy3GZHM,20331 +scipy/stats/tests/test_morestats.py,sha256=xQglIKZOdelT6vPZwPvqB-1nNDS4upctkj_KmQI9z1A,113187 +scipy/stats/tests/test_mstats_basic.py,sha256=1lUwHbHd5GkbPd1DTrheoK3md4P7kAUt3aKQj8KTQew,83139 +scipy/stats/tests/test_mstats_extras.py,sha256=miYVK6uwePW4c42pieoRv7GewL5C2WEpX99ZNPDzMIk,6066 +scipy/stats/tests/test_multivariate.py,sha256=ouctxcObCIpH04gjcbyrP8-YaWyvQZPcODiMdTILCAQ,112519 +scipy/stats/tests/test_odds_ratio.py,sha256=RIsmgnmUUH3DvynDRZUaS6llCbXm2oWIfPa48IJJ-gI,6705 +scipy/stats/tests/test_qmc.py,sha256=NcZJt5QliBs3jGJ_ApbkVrmXOwLHL6Zk_0-BPmK3R3Q,51173 +scipy/stats/tests/test_rank.py,sha256=SNomJb8wNZAZGMblTOQeonH7qt2Tv9DUTBIbRYo5gm0,11273 +scipy/stats/tests/test_relative_risk.py,sha256=oKdAXLoWNI4Wnk2N1NFH_I3kTj4jP_43kksW94WFL-4,3647 +scipy/stats/tests/test_resampling.py,sha256=hYKH9OscFqp9sSyaFJ3f2rXcXviamyFqTcDhnyAiD1Y,67172 +scipy/stats/tests/test_sampling.py,sha256=NMzyeq6R3XgH-OWWTAopz526szwNE-xB_LY4TpBPRjw,50757 +scipy/stats/tests/test_stats.py,sha256=g90ZFdlUNO-gCqC-GhO-vWeYlz2HuaxLANqPyW__FsY,337777 +scipy/stats/tests/test_tukeylambda_stats.py,sha256=eF6_VaRX71mlU3QdQnJF60uo0LcSzog-BHWFfunt_uI,3232 +scipy/stats/tests/test_variation.py,sha256=WUZAV07LhaScxztzN7Vv2OAWjV-b42FxgsGZtGf0WYI,6245 +scipy/version.py,sha256=xTEo67E68esPZSDVaIfIO7zyfB9m0OTr4fdGzYO6SyM,261 diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/REQUESTED b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/WHEEL b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/WHEEL new file mode 100644 index 00000000..5f08ebc3 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy-1.10.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp310-cp310-macosx_12_0_arm64 \ No newline at end of file diff --git a/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgcc_s.1.1.dylib b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgcc_s.1.1.dylib new file mode 100644 index 00000000..99a74266 Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgcc_s.1.1.dylib differ diff --git a/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgfortran.5.dylib b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgfortran.5.dylib new file mode 100755 index 00000000..f058d1cc Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libgfortran.5.dylib differ diff --git a/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libopenblas.0.dylib b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libopenblas.0.dylib new file mode 100755 index 00000000..bebeee53 Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libopenblas.0.dylib differ diff --git a/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libquadmath.0.dylib b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libquadmath.0.dylib new file mode 100755 index 00000000..8006b8bf Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/.dylibs/libquadmath.0.dylib differ diff --git a/__packaged__/coreml/.python_dependencies/scipy/__config__.py b/__packaged__/coreml/.python_dependencies/scipy/__config__.py new file mode 100644 index 00000000..08500de5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/__config__.py @@ -0,0 +1,147 @@ +# This file is generated by SciPy's build process +# It contains system_info results at the time of building this package. +from enum import Enum + +__all__ = ["show"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return { k: _cleanup(v) for k, v in d.items() if v != '' and _cleanup(v) != '' } + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "clang", + "linker": "ld64", + "version": "13.1.6", + "commands": "cc", + }, + "cython": { + "name": "cython", + "linker": "cython", + "version": "0.29.33", + "commands": "cython", + }, + "c++": { + "name": "clang", + "linker": "ld64", + "version": "13.1.6", + "commands": "c++", + }, + "fortran": { + "name": "gcc", + "linker": "ld64", + "version": "12.1.0", + "commands": "gfortran", + }, + "pythran": { + "version": "0.12.1", + "include directory": r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/pip-build-env-jl8pxyjb/overlay/lib/python3.10/site-packages/pythran" + }, + }, + "Machine Information": { + "host": { + "cpu": "aarch64", + "family": "aarch64", + "endian": "little", + "system": "darwin", + }, + "build": { + "cpu": "aarch64", + "family": "aarch64", + "endian": "little", + "system": "darwin", + }, + "cross-compiled": bool("False".lower().replace('false', '')), + }, + "Build Dependencies": { + "blas": { + "name": "OpenBLAS", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.18", + "detection method": "cmake", + "include directory": r"unknown", + "lib directory": r"unknown", + "openblas configuration": "unknown", + "pc file directory": r"unknown", + }, + "lapack": { + "name": "OpenBLAS", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.18", + "detection method": "cmake", + "include directory": r"unknown", + "lib directory": r"unknown", + "openblas configuration": "unknown", + "pc file directory": r"unknown", + }, + }, + "Python Information": { + "path": r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cibw-run-s3_k_ke5/cp310-macosx_arm64/build/venv/bin/python", + "version": "3.10", + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which SciPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) diff --git a/__packaged__/coreml/.python_dependencies/scipy/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/__init__.py new file mode 100644 index 00000000..428a14d7 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/__init__.py @@ -0,0 +1,207 @@ +""" +SciPy: A scientific computing package for Python +================================================ + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +Contents +-------- +SciPy imports all the functions from the NumPy namespace, and in +addition provides: + +Subpackages +----------- +Using any of these subpackages requires an explicit import. For example, +``import scipy.cluster``. + +:: + + cluster --- Vector Quantization / Kmeans + datasets --- Dataset methods + fft --- Discrete Fourier transforms + fftpack --- Legacy discrete Fourier transforms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + linalg.blas --- Wrappers to BLAS library + linalg.lapack --- Wrappers to LAPACK library + misc --- Various utilities that don't have + another home. + ndimage --- N-D image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + signal.windows --- Window functions + sparse --- Sparse Matrices + sparse.linalg --- Sparse Linear Algebra + sparse.linalg.dsolve --- Linear Solvers + sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library: + Conjugate Gradient Method (LOBPCG) + sparse.linalg.eigen --- Sparse Eigenvalue Solvers + sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned + Conjugate Gradient Method (LOBPCG) + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +Utility tools +------------- +:: + + test --- Run scipy unittests + show_config --- Show scipy build configuration + show_numpy_config --- Show numpy build configuration + __version__ --- SciPy version string + __numpy_version__ --- Numpy version string + +""" + +from numpy import show_config as show_numpy_config +if show_numpy_config is None: + raise ImportError( + "Cannot import SciPy when running from NumPy source directory.") +from numpy import __version__ as __numpy_version__ + +# Import numpy symbols to scipy name space (DEPRECATED) +from ._lib.deprecation import _deprecated +import numpy as np +_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, ' + 'use numpy.{0} instead') + +# deprecate callable objects from numpy, skipping classes and modules +import types as _types # noqa: E402 +for _key in np.__all__: + if _key.startswith('_'): + continue + _fun = getattr(np, _key) + if isinstance(_fun, _types.ModuleType): + continue + if callable(_fun) and not isinstance(_fun, type): + _fun = _deprecated(_msg.format(_key))(_fun) + globals()[_key] = _fun +del np, _types + +from numpy.random import rand, randn +_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, ' + 'use numpy.random.{0} instead') +rand = _deprecated(_msg.format('rand'))(rand) +randn = _deprecated(_msg.format('randn'))(randn) + +# fft is especially problematic, so was removed in SciPy 1.6.0 +from numpy.fft import ifft +ifft = _deprecated('scipy.ifft is deprecated and will be removed in SciPy ' + '2.0.0, use scipy.fft.ifft instead')(ifft) + +from numpy.lib import scimath # noqa: E402 +_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, ' + 'use numpy.lib.scimath.{0} instead') +for _key in scimath.__all__: + _fun = getattr(scimath, _key) + if callable(_fun): + _fun = _deprecated(_msg.format(_key))(_fun) + globals()[_key] = _fun +del scimath +del _msg, _fun, _key, _deprecated + +# We first need to detect if we're being called as part of the SciPy +# setup procedure itself in a reliable manner. +try: + __SCIPY_SETUP__ +except NameError: + __SCIPY_SETUP__ = False + + +if __SCIPY_SETUP__: + import sys + sys.stderr.write('Running from SciPy source directory.\n') + del sys +else: + try: + from scipy.__config__ import show as show_config + except ImportError as e: + msg = """Error importing SciPy: you cannot import SciPy while + being in scipy source directory; please exit the SciPy source + tree first and relaunch your Python interpreter.""" + raise ImportError(msg) from e + + from scipy.version import version as __version__ + + # Allow distributors to run custom init code + from . import _distributor_init + del _distributor_init + + from scipy._lib import _pep440 + # In maintenance branch, change to np_maxversion N+3 if numpy is at N + # See setup.py for more details + np_minversion = '1.19.5' + np_maxversion = '1.27.0' + if (_pep440.parse(__numpy_version__) < _pep440.Version(np_minversion) or + _pep440.parse(__numpy_version__) >= _pep440.Version(np_maxversion)): + import warnings + warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" + f" is required for this version of SciPy (detected " + f"version {__numpy_version__})", + UserWarning) + del _pep440 + + # This is the first import of an extension module within SciPy. If there's + # a general issue with the install, such that extension modules are missing + # or cannot be imported, this is where we'll get a failure - so give an + # informative error message. + try: + from scipy._lib._ccallback import LowLevelCallable + except ImportError as e: + msg = "The `scipy` install you are using seems to be broken, " + \ + "(extension modules cannot be imported), " + \ + "please try reinstalling." + raise ImportError(msg) from e + + from scipy._lib._testutils import PytestTester + test = PytestTester(__name__) + del PytestTester + + submodules = [ + 'cluster', + 'datasets', + 'fft', + 'fftpack', + 'integrate', + 'interpolate', + 'io', + 'linalg', + 'misc', + 'ndimage', + 'odr', + 'optimize', + 'signal', + 'sparse', + 'spatial', + 'special', + 'stats' + ] + + __all__ = submodules + [ + 'LowLevelCallable', + 'test', + 'show_config', + '__version__', + '__numpy_version__' + ] + + def __dir__(): + return __all__ + + import importlib as _importlib + + def __getattr__(name): + if name in submodules: + return _importlib.import_module(f'scipy.{name}') + else: + try: + return globals()[name] + except KeyError: + raise AttributeError( + f"Module 'scipy' has no attribute '{name}'" + ) diff --git a/__packaged__/coreml/.python_dependencies/scipy/_distributor_init.py b/__packaged__/coreml/.python_dependencies/scipy/_distributor_init.py new file mode 100644 index 00000000..552143c8 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_distributor_init.py @@ -0,0 +1,10 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of SciPy. + +For example, this is a good place to put any checks for hardware requirements. + +The SciPy standard source distribution will not put code in this file, so you +can safely replace this file with your own version. +""" diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/__init__.py new file mode 100644 index 00000000..21409700 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/__init__.py @@ -0,0 +1,14 @@ +""" +Module containing private utility functions +=========================================== + +The ``scipy._lib`` namespace is empty (for now). Tests for all +utilities in submodules of ``_lib`` can be run with:: + + from scipy import _lib + _lib.test() + +""" +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_bunch.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_bunch.py new file mode 100644 index 00000000..4fe15959 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_bunch.py @@ -0,0 +1,226 @@ + +import sys as _sys +from keyword import iskeyword as _iskeyword + + +def _validate_names(typename, field_names, extra_field_names): + """ + Ensure that all the given names are valid Python identifiers that + do not start with '_'. Also check that there are no duplicates + among field_names + extra_field_names. + """ + for name in [typename] + field_names + extra_field_names: + if type(name) is not str: + raise TypeError('typename and all field names must be strings') + if not name.isidentifier(): + raise ValueError('typename and all field names must be valid ' + f'identifiers: {name!r}') + if _iskeyword(name): + raise ValueError('typename and all field names cannot be a ' + f'keyword: {name!r}') + + seen = set() + for name in field_names + extra_field_names: + if name.startswith('_'): + raise ValueError('Field names cannot start with an underscore: ' + f'{name!r}') + if name in seen: + raise ValueError(f'Duplicate field name: {name!r}') + seen.add(name) + + +# Note: This code is adapted from CPython:Lib/collections/__init__.py +def _make_tuple_bunch(typename, field_names, extra_field_names=None, + module=None): + """ + Create a namedtuple-like class with additional attributes. + + This function creates a subclass of tuple that acts like a namedtuple + and that has additional attributes. + + The additional attributes are listed in `extra_field_names`. The + values assigned to these attributes are not part of the tuple. + + The reason this function exists is to allow functions in SciPy + that currently return a tuple or a namedtuple to returned objects + that have additional attributes, while maintaining backwards + compatibility. + + This should only be used to enhance *existing* functions in SciPy. + New functions are free to create objects as return values without + having to maintain backwards compatibility with an old tuple or + namedtuple return value. + + Parameters + ---------- + typename : str + The name of the type. + field_names : list of str + List of names of the values to be stored in the tuple. These names + will also be attributes of instances, so the values in the tuple + can be accessed by indexing or as attributes. At least one name + is required. See the Notes for additional restrictions. + extra_field_names : list of str, optional + List of names of values that will be stored as attributes of the + object. See the notes for additional restrictions. + + Returns + ------- + cls : type + The new class. + + Notes + ----- + There are restrictions on the names that may be used in `field_names` + and `extra_field_names`: + + * The names must be unique--no duplicates allowed. + * The names must be valid Python identifiers, and must not begin with + an underscore. + * The names must not be Python keywords (e.g. 'def', 'and', etc., are + not allowed). + + Examples + -------- + >>> from scipy._lib._bunch import _make_tuple_bunch + + Create a class that acts like a namedtuple with length 2 (with field + names `x` and `y`) that will also have the attributes `w` and `beta`: + + >>> Result = _make_tuple_bunch('Result', ['x', 'y'], ['w', 'beta']) + + `Result` is the new class. We call it with keyword arguments to create + a new instance with given values. + + >>> result1 = Result(x=1, y=2, w=99, beta=0.5) + >>> result1 + Result(x=1, y=2, w=99, beta=0.5) + + `result1` acts like a tuple of length 2: + + >>> len(result1) + 2 + >>> result1[:] + (1, 2) + + The values assigned when the instance was created are available as + attributes: + + >>> result1.y + 2 + >>> result1.beta + 0.5 + """ + if len(field_names) == 0: + raise ValueError('field_names must contain at least one name') + + if extra_field_names is None: + extra_field_names = [] + _validate_names(typename, field_names, extra_field_names) + + typename = _sys.intern(str(typename)) + field_names = tuple(map(_sys.intern, field_names)) + extra_field_names = tuple(map(_sys.intern, extra_field_names)) + + all_names = field_names + extra_field_names + arg_list = ', '.join(field_names) + full_list = ', '.join(all_names) + repr_fmt = ''.join(('(', + ', '.join(f'{name}=%({name})r' for name in all_names), + ')')) + tuple_new = tuple.__new__ + _dict, _tuple, _zip = dict, tuple, zip + + # Create all the named tuple methods to be added to the class namespace + + s = f"""\ +def __new__(_cls, {arg_list}, **extra_fields): + return _tuple_new(_cls, ({arg_list},)) + +def __init__(self, {arg_list}, **extra_fields): + for key in self._extra_fields: + if key not in extra_fields: + raise TypeError("missing keyword argument '%s'" % (key,)) + for key, val in extra_fields.items(): + if key not in self._extra_fields: + raise TypeError("unexpected keyword argument '%s'" % (key,)) + self.__dict__[key] = val + +def __setattr__(self, key, val): + if key in {repr(field_names)}: + raise AttributeError("can't set attribute %r of class %r" + % (key, self.__class__.__name__)) + else: + self.__dict__[key] = val +""" + del arg_list + namespace = {'_tuple_new': tuple_new, + '__builtins__': dict(TypeError=TypeError, + AttributeError=AttributeError), + '__name__': f'namedtuple_{typename}'} + exec(s, namespace) + __new__ = namespace['__new__'] + __new__.__doc__ = f'Create new instance of {typename}({full_list})' + __init__ = namespace['__init__'] + __init__.__doc__ = f'Instantiate instance of {typename}({full_list})' + __setattr__ = namespace['__setattr__'] + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + repr_fmt % self._asdict() + + def _asdict(self): + 'Return a new dict which maps field names to their values.' + out = _dict(_zip(self._fields, self)) + out.update(self.__dict__) + return out + + def __getnewargs_ex__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return _tuple(self), self.__dict__ + + # Modify function metadata to help with introspection and debugging + for method in (__new__, __repr__, _asdict, __getnewargs_ex__): + method.__qualname__ = f'{typename}.{method.__name__}' + + # Build-up the class namespace dictionary + # and use type() to build the result class + class_namespace = { + '__doc__': f'{typename}({full_list})', + '_fields': field_names, + '__new__': __new__, + '__init__': __init__, + '__repr__': __repr__, + '__setattr__': __setattr__, + '_asdict': _asdict, + '_extra_fields': extra_field_names, + '__getnewargs_ex__': __getnewargs_ex__, + } + for index, name in enumerate(field_names): + + def _get(self, index=index): + return self[index] + class_namespace[name] = property(_get) + for name in extra_field_names: + + def _get(self, name=name): + return self.__dict__[name] + class_namespace[name] = property(_get) + + result = type(typename, (tuple,), class_namespace) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the named tuple is created. Bypass this step in environments + # where sys._getframe is not defined (Jython for example) or sys._getframe + # is not defined for arguments greater than 0 (IronPython), or where the + # user has specified a particular module. + if module is None: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + result.__module__ = module + __new__.__module__ = module + + return result diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_ccallback.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_ccallback.py new file mode 100644 index 00000000..1811d967 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_ccallback.py @@ -0,0 +1,227 @@ +from . import _ccallback_c + +import ctypes + +PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0] + +ffi = None + +class CData: + pass + +def _import_cffi(): + global ffi, CData + + if ffi is not None: + return + + try: + import cffi + ffi = cffi.FFI() + CData = ffi.CData + except ImportError: + ffi = False + + +class LowLevelCallable(tuple): + """ + Low-level callback function. + + Parameters + ---------- + function : {PyCapsule, ctypes function pointer, cffi function pointer} + Low-level callback function. + user_data : {PyCapsule, ctypes void pointer, cffi void pointer} + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*, + if possible. + + Attributes + ---------- + function + Callback function given. + user_data + User data given. + signature + Signature of the function. + + Methods + ------- + from_cython + Class method for constructing callables from Cython C-exported + functions. + + Notes + ----- + The argument ``function`` can be one of: + + - PyCapsule, whose name contains the C function signature + - ctypes function pointer + - cffi function pointer + + The signature of the low-level callback must match one of those expected + by the routine it is passed to. + + If constructing low-level functions from a PyCapsule, the name of the + capsule must be the corresponding signature, in the format:: + + return_type (arg1_type, arg2_type, ...) + + For example:: + + "void (double)" + "double (double, int *, void *)" + + The context of a PyCapsule passed in as ``function`` is used as ``user_data``, + if an explicit value for ``user_data`` was not given. + + """ + + # Make the class immutable + __slots__ = () + + def __new__(cls, function, user_data=None, signature=None): + # We need to hold a reference to the function & user data, + # to prevent them going out of scope + item = cls._parse_callback(function, user_data, signature) + return tuple.__new__(cls, (item, function, user_data)) + + def __repr__(self): + return "LowLevelCallable({!r}, {!r})".format(self.function, self.user_data) + + @property + def function(self): + return tuple.__getitem__(self, 1) + + @property + def user_data(self): + return tuple.__getitem__(self, 2) + + @property + def signature(self): + return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0)) + + def __getitem__(self, idx): + raise ValueError() + + @classmethod + def from_cython(cls, module, name, user_data=None, signature=None): + """ + Create a low-level callback function from an exported Cython function. + + Parameters + ---------- + module : module + Cython module where the exported function resides + name : str + Name of the exported function + user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional + User data to pass on to the callback function. + signature : str, optional + Signature of the function. If omitted, determined from *function*. + + """ + try: + function = module.__pyx_capi__[name] + except AttributeError as e: + raise ValueError("Given module is not a Cython module with __pyx_capi__ attribute") from e + except KeyError as e: + raise ValueError("No function {!r} found in __pyx_capi__ of the module".format(name)) from e + return cls(function, user_data, signature) + + @classmethod + def _parse_callback(cls, obj, user_data=None, signature=None): + _import_cffi() + + if isinstance(obj, LowLevelCallable): + func = tuple.__getitem__(obj, 0) + elif isinstance(obj, PyCFuncPtr): + func, signature = _get_ctypes_func(obj, signature) + elif isinstance(obj, CData): + func, signature = _get_cffi_func(obj, signature) + elif _ccallback_c.check_capsule(obj): + func = obj + else: + raise ValueError("Given input is not a callable or a low-level callable (pycapsule/ctypes/cffi)") + + if isinstance(user_data, ctypes.c_void_p): + context = _get_ctypes_data(user_data) + elif isinstance(user_data, CData): + context = _get_cffi_data(user_data) + elif user_data is None: + context = 0 + elif _ccallback_c.check_capsule(user_data): + context = user_data + else: + raise ValueError("Given user data is not a valid low-level void* pointer (pycapsule/ctypes/cffi)") + + return _ccallback_c.get_raw_capsule(func, signature, context) + + +# +# ctypes helpers +# + +def _get_ctypes_func(func, signature=None): + # Get function pointer + func_ptr = ctypes.cast(func, ctypes.c_void_p).value + + # Construct function signature + if signature is None: + signature = _typename_from_ctypes(func.restype) + " (" + for j, arg in enumerate(func.argtypes): + if j == 0: + signature += _typename_from_ctypes(arg) + else: + signature += ", " + _typename_from_ctypes(arg) + signature += ")" + + return func_ptr, signature + + +def _typename_from_ctypes(item): + if item is None: + return "void" + elif item is ctypes.c_void_p: + return "void *" + + name = item.__name__ + + pointer_level = 0 + while name.startswith("LP_"): + pointer_level += 1 + name = name[3:] + + if name.startswith('c_'): + name = name[2:] + + if pointer_level > 0: + name += " " + "*"*pointer_level + + return name + + +def _get_ctypes_data(data): + # Get voidp pointer + return ctypes.cast(data, ctypes.c_void_p).value + + +# +# CFFI helpers +# + +def _get_cffi_func(func, signature=None): + # Get function pointer + func_ptr = ffi.cast('uintptr_t', func) + + # Get signature + if signature is None: + signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ') + + return func_ptr, signature + + +def _get_cffi_data(data): + # Get pointer + return ffi.cast('uintptr_t', data) diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_disjoint_set.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_disjoint_set.py new file mode 100644 index 00000000..703942a6 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_disjoint_set.py @@ -0,0 +1,228 @@ +""" +Disjoint set data structure +""" + + +class DisjointSet: + """ Disjoint set data structure for incremental connectivity queries. + + .. versionadded:: 1.6.0 + + Attributes + ---------- + n_subsets : int + The number of subsets. + + Methods + ------- + add + merge + connected + subset + subsets + __getitem__ + + Notes + ----- + This class implements the disjoint set [1]_, also known as the *union-find* + or *merge-find* data structure. The *find* operation (implemented in + `__getitem__`) implements the *path halving* variant. The *merge* method + implements the *merge by size* variant. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure + + Examples + -------- + >>> from scipy.cluster.hierarchy import DisjointSet + + Initialize a disjoint set: + + >>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b']) + + Merge some subsets: + + >>> disjoint_set.merge(1, 2) + True + >>> disjoint_set.merge(3, 'a') + True + >>> disjoint_set.merge('a', 'b') + True + >>> disjoint_set.merge('b', 'b') + False + + Find root elements: + + >>> disjoint_set[2] + 1 + >>> disjoint_set['b'] + 3 + + Test connectivity: + + >>> disjoint_set.connected(1, 2) + True + >>> disjoint_set.connected(1, 'b') + False + + List elements in disjoint set: + + >>> list(disjoint_set) + [1, 2, 3, 'a', 'b'] + + Get the subset containing 'a': + + >>> disjoint_set.subset('a') + {'a', 3, 'b'} + + Get all subsets in the disjoint set: + + >>> disjoint_set.subsets() + [{1, 2}, {'a', 3, 'b'}] + """ + def __init__(self, elements=None): + self.n_subsets = 0 + self._sizes = {} + self._parents = {} + # _nbrs is a circular linked list which links connected elements. + self._nbrs = {} + # _indices tracks the element insertion order in `__iter__`. + self._indices = {} + if elements is not None: + for x in elements: + self.add(x) + + def __iter__(self): + """Returns an iterator of the elements in the disjoint set. + + Elements are ordered by insertion order. + """ + return iter(self._indices) + + def __len__(self): + return len(self._indices) + + def __contains__(self, x): + return x in self._indices + + def __getitem__(self, x): + """Find the root element of `x`. + + Parameters + ---------- + x : hashable object + Input element. + + Returns + ------- + root : hashable object + Root element of `x`. + """ + if x not in self._indices: + raise KeyError(x) + + # find by "path halving" + parents = self._parents + while self._indices[x] != self._indices[parents[x]]: + parents[x] = parents[parents[x]] + x = parents[x] + return x + + def add(self, x): + """Add element `x` to disjoint set + """ + if x in self._indices: + return + + self._sizes[x] = 1 + self._parents[x] = x + self._nbrs[x] = x + self._indices[x] = len(self._indices) + self.n_subsets += 1 + + def merge(self, x, y): + """Merge the subsets of `x` and `y`. + + The smaller subset (the child) is merged into the larger subset (the + parent). If the subsets are of equal size, the root element which was + first inserted into the disjoint set is selected as the parent. + + Parameters + ---------- + x, y : hashable object + Elements to merge. + + Returns + ------- + merged : bool + True if `x` and `y` were in disjoint sets, False otherwise. + """ + xr = self[x] + yr = self[y] + if self._indices[xr] == self._indices[yr]: + return False + + sizes = self._sizes + if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]): + xr, yr = yr, xr + self._parents[yr] = xr + self._sizes[xr] += self._sizes[yr] + self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr] + self.n_subsets -= 1 + return True + + def connected(self, x, y): + """Test whether `x` and `y` are in the same subset. + + Parameters + ---------- + x, y : hashable object + Elements to test. + + Returns + ------- + result : bool + True if `x` and `y` are in the same set, False otherwise. + """ + return self._indices[self[x]] == self._indices[self[y]] + + def subset(self, x): + """Get the subset containing `x`. + + Parameters + ---------- + x : hashable object + Input element. + + Returns + ------- + result : set + Subset containing `x`. + """ + if x not in self._indices: + raise KeyError(x) + + result = [x] + nxt = self._nbrs[x] + while self._indices[nxt] != self._indices[x]: + result.append(nxt) + nxt = self._nbrs[nxt] + return set(result) + + def subsets(self): + """Get all the subsets in the disjoint set. + + Returns + ------- + result : list + Subsets in the disjoint set. + """ + result = [] + visited = set() + for x in self: + if x not in visited: + xset = self.subset(x) + visited.update(xset) + result.append(xset) + return result diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_docscrape.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_docscrape.py new file mode 100644 index 00000000..49fc9a79 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_docscrape.py @@ -0,0 +1,680 @@ +"""Extract reference documentation from the NumPy source tree. + +""" +# copied from numpydoc/docscrape.py +import inspect +import textwrap +import re +import pydoc +from warnings import warn +from collections import namedtuple +from collections.abc import Callable, Mapping +import copy +import sys + + +def strip_blank_lines(l): # noqa + "Remove leading and trailing blank lines from a list of lines" + while l and not l[0].strip(): + del l[0] + while l and not l[-1].strip(): + del l[-1] + return l + + +class Reader(object): + """A line-based string reader. + + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\\n'. + + """ + if isinstance(data, list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: # noqa + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + + def is_empty(line): + return not line.strip() + + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self, n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class ParseError(Exception): + def __str__(self): + message = self.args[0] + if hasattr(self, 'docstring'): + message = "%s in %r" % (message, self.docstring) + return message + + +Parameter = namedtuple('Parameter', ['name', 'type', 'desc']) + + +class NumpyDocString(Mapping): + """Parses a numpydoc string to an abstract representation + + Instances define a mapping from section title to structured data. + + """ + + sections = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Yields': [], + 'Receives': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + def __init__(self, docstring, config={}): + orig_docstring = docstring + docstring = textwrap.dedent(docstring).split('\n') + + self._doc = Reader(docstring) + self._parsed_data = copy.deepcopy(self.sections) + + try: + self._parse() + except ParseError as e: + e.docstring = orig_docstring + raise + + def __getitem__(self, key): + return self._parsed_data[key] + + def __setitem__(self, key, val): + if key not in self._parsed_data: + self._error_location("Unknown section %s" % key, error=False) + else: + self._parsed_data[key] = val + + def __iter__(self): + return iter(self._parsed_data) + + def __len__(self): + return len(self._parsed_data) + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self, doc): + i = 0 + j = 0 + for i, line in enumerate(doc): + if line.strip(): + break + + for j, line in enumerate(doc[::-1]): + if line.strip(): + break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self, content, single_element_is_type=False): + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' : ' in header: + arg_name, arg_type = header.split(' : ')[:2] + else: + if single_element_is_type: + arg_name, arg_type = '', header + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + desc = dedent_lines(desc) + desc = strip_blank_lines(desc) + + params.append(Parameter(arg_name, arg_type, desc)) + + return params + + # See also supports the following formats. + # + # + # SPACE* COLON SPACE+ SPACE* + # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE* + # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE* + + # is one of + # + # COLON COLON BACKTICK BACKTICK + # where + # is a legal function name, and + # is any nonempty sequence of word characters. + # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j` + # is a string describing the function. + + _role = r":(?P\w+):" + _funcbacktick = r"`(?P(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`" + _funcplain = r"(?P[a-zA-Z0-9_\.-]+)" + _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")" + _funcnamenext = _funcname.replace('role', 'rolenext') + _funcnamenext = _funcnamenext.replace('name', 'namenext') + _description = r"(?P\s*:(\s+(?P\S+.*))?)?\s*$" + _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*") + _line_rgx = re.compile( + r"^\s*" + + r"(?P" + # group for all function names + _funcname + + r"(?P([,]\s+" + _funcnamenext + r")*)" + + r")" + # end of "allfuncs" + # Some function lists have a trailing comma (or period) '\s*' + r"(?P[,\.])?" + + _description) + + # Empty elements are replaced with '..' + empty_description = '..' + + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'.""" + m = self._func_rgx.match(text) + if not m: + raise ParseError("%s is not a item name" % text) + role = m.group('role') + name = m.group('name') if role else m.group('name2') + return name, role, m.end() + + rest = [] + for line in content: + if not line.strip(): + continue + + line_match = self._line_rgx.match(line) + description = None + if line_match: + description = line_match.group('desc') + if line_match.group('trailing') and description: + self._error_location( + 'Unexpected comma or period after function list at ' + 'index %d of line "%s"' % (line_match.end('trailing'), + line), + error=False) + if not description and line.startswith(' '): + rest.append(line.strip()) + elif line_match: + funcs = [] + text = line_match.group('allfuncs') + while True: + if not text.strip(): + break + name, role, match_end = parse_item_name(text) + funcs.append((name, role)) + text = text[match_end:].strip() + if text and text[0] == ',': + text = text[1:].strip() + rest = list(filter(None, [description])) + items.append((funcs, rest)) + else: + raise ParseError("%s is not a item name" % line) + return items + + def _parse_index(self, section, content): + """ + .. index: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$') + if compiled.match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + sections = list(self._read_sections()) + section_names = set([section for section, content in sections]) + + has_returns = 'Returns' in section_names + has_yields = 'Yields' in section_names + # We could do more tests, but we are not. Arbitrarily. + if has_returns and has_yields: + msg = 'Docstring contains both a Returns and Yields section.' + raise ValueError(msg) + if not has_yields and 'Receives' in section_names: + msg = 'Docstring contains a Receives section but not Yields.' + raise ValueError(msg) + + for (section, content) in sections: + if not section.startswith('..'): + section = (s.capitalize() for s in section.split(' ')) + section = ' '.join(section) + if self.get(section): + self._error_location("The section %s appears twice" + % section) + + if section in ('Parameters', 'Other Parameters', 'Attributes', + 'Methods'): + self[section] = self._parse_param_list(content) + elif section in ('Returns', 'Yields', 'Raises', 'Warns', + 'Receives'): + self[section] = self._parse_param_list( + content, single_element_is_type=True) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + def _error_location(self, msg, error=True): + if hasattr(self, '_obj'): + # we know where the docs came from: + try: + filename = inspect.getsourcefile(self._obj) + except TypeError: + filename = None + msg = msg + (" in the docstring of %s in %s." + % (self._obj, filename)) + if error: + raise ValueError(msg) + else: + warn(msg) + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*', r'\*')] + [''] + else: + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + else: + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + else: + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param in self[name]: + parts = [] + if param.name: + parts.append(param.name) + if param.type: + parts.append(param.type) + out += [' : '.join(parts)] + if param.desc and ''.join(param.desc).strip(): + out += self._str_indent(param.desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: + return [] + out = [] + out += self._str_header("See Also") + out += [''] + last_had_desc = True + for funcs, desc in self['See Also']: + assert isinstance(funcs, list) + links = [] + for func, role in funcs: + if role: + link = ':%s:`%s`' % (role, func) + elif func_role: + link = ':%s:`%s`' % (func_role, func) + else: + link = "`%s`_" % func + links.append(link) + link = ', '.join(links) + out += [link] + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += self._str_indent([self.empty_description]) + + if last_had_desc: + out += [''] + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + output_index = False + default_index = idx.get('default', '') + if default_index: + output_index = True + out += ['.. index:: %s' % default_index] + for section, references in idx.items(): + if section == 'default': + continue + output_index = True + out += [' :%s: %s' % (section, ', '.join(references))] + if output_index: + return out + else: + return '' + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Yields', 'Receives', + 'Other Parameters', 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes', 'References', 'Examples'): + out += self._str_section(s) + for param_list in ('Attributes', 'Methods'): + out += self._str_param_list(param_list) + out += self._str_index() + return '\n'.join(out) + + +def indent(str, indent=4): # noqa + indent_str = ' '*indent + if str is None: + return indent_str + lines = str.split('\n') + return '\n'.join(indent_str + l for l in lines) # noqa + + +def dedent_lines(lines): + """Deindent a list of lines maximally""" + return textwrap.dedent("\n".join(lines)).split("\n") + + +def header(text, style='-'): + return text + '\n' + style*len(text) + '\n' + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func', doc=None, config={}): + self._f = func + self._role = role # e.g. "func" or "meth" + + if doc is None: + if func is None: + raise ValueError("No function or docstring given") + doc = inspect.getdoc(func) or '' + NumpyDocString.__init__(self, doc, config) + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if self._role not in roles: + print("Warning: invalid role %s" % self._role) + out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), + func_name) + + out += super(FunctionDoc, self).__str__(func_role=self._role) + return out + + +class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, + config={}): + if not inspect.isclass(cls) and cls is not None: + raise ValueError("Expected a class or None, but got %r" % cls) + self._cls = cls + + if 'sphinx' in sys.modules: + from sphinx.ext.autodoc import ALL + else: + ALL = object() + + self.show_inherited_members = config.get( + 'show_inherited_class_members', True) + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + + if doc is None: + if cls is None: + raise ValueError("No class or documentation string given") + doc = pydoc.getdoc(cls) + + NumpyDocString.__init__(self, doc) + + _members = config.get('members', []) + if _members is ALL: + _members = None + _exclude = config.get('exclude-members', []) + + if config.get('show_class_members', True) and _exclude is not ALL: + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + if (name in _exclude or + (_members and name not in _members)): + continue + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append( + Parameter(name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list + + @property + def methods(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if ((not name.startswith('_') + or name in self.extra_public_methods) + and isinstance(func, Callable) + and self._is_show_member(name))] + + @property + def properties(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if (not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isdatadescriptor(func)) + and self._is_show_member(name))] + + def _is_show_member(self, name): + if self.show_inherited_members: + return True # show all class members + if name not in self._cls.__dict__: + return False # class member is inherited, we do not show it + return True diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_finite_differences.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_finite_differences.py new file mode 100644 index 00000000..506057b4 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_finite_differences.py @@ -0,0 +1,145 @@ +from numpy import arange, newaxis, hstack, prod, array + + +def _central_diff_weights(Np, ndiv=1): + """ + Return weights for an Np-point central derivative. + + Assumes equally-spaced function points. + + If weights are in the vector w, then + derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx) + + Parameters + ---------- + Np : int + Number of points for the central derivative. + ndiv : int, optional + Number of divisions. Default is 1. + + Returns + ------- + w : ndarray + Weights for an Np-point central derivative. Its size is `Np`. + + Notes + ----- + Can be inaccurate for a large number of points. + + Examples + -------- + We can calculate a derivative value of a function. + + >>> def f(x): + ... return 2 * x**2 + 3 + >>> x = 3.0 # derivative point + >>> h = 0.1 # differential step + >>> Np = 3 # point number for central derivative + >>> weights = _central_diff_weights(Np) # weights for first derivative + >>> vals = [f(x + (i - Np/2) * h) for i in range(Np)] + >>> sum(w * v for (w, v) in zip(weights, vals))/h + 11.79999999999998 + + This value is close to the analytical solution: + f'(x) = 4x, so f'(3) = 12 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Finite_difference + + """ + if Np < ndiv + 1: + raise ValueError( + "Number of points must be at least the derivative order + 1." + ) + if Np % 2 == 0: + raise ValueError("The number of points must be odd.") + from scipy import linalg + + ho = Np >> 1 + x = arange(-ho, ho + 1.0) + x = x[:, newaxis] + X = x**0.0 + for k in range(1, Np): + X = hstack([X, x**k]) + w = prod(arange(1, ndiv + 1), axis=0) * linalg.inv(X)[ndiv] + return w + + +def _derivative(func, x0, dx=1.0, n=1, args=(), order=3): + """ + Find the nth derivative of a function at a point. + + Given a function, use a central difference formula with spacing `dx` to + compute the nth derivative at `x0`. + + Parameters + ---------- + func : function + Input function. + x0 : float + The point at which the nth derivative is found. + dx : float, optional + Spacing. + n : int, optional + Order of the derivative. Default is 1. + args : tuple, optional + Arguments + order : int, optional + Number of points to use, must be odd. + + Notes + ----- + Decreasing the step size too small can result in round-off error. + + Examples + -------- + >>> def f(x): + ... return x**3 + x**2 + >>> _derivative(f, 1.0, dx=1e-6) + 4.9999999999217337 + + """ + if order < n + 1: + raise ValueError( + "'order' (the number of points used to compute the derivative), " + "must be at least the derivative order 'n' + 1." + ) + if order % 2 == 0: + raise ValueError( + "'order' (the number of points used to compute the derivative) " + "must be odd." + ) + # pre-computed for n=1 and 2 and low-order for speed. + if n == 1: + if order == 3: + weights = array([-1, 0, 1]) / 2.0 + elif order == 5: + weights = array([1, -8, 0, 8, -1]) / 12.0 + elif order == 7: + weights = array([-1, 9, -45, 0, 45, -9, 1]) / 60.0 + elif order == 9: + weights = array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0 + else: + weights = _central_diff_weights(order, 1) + elif n == 2: + if order == 3: + weights = array([1, -2.0, 1]) + elif order == 5: + weights = array([-1, 16, -30, 16, -1]) / 12.0 + elif order == 7: + weights = array([2, -27, 270, -490, 270, -27, 2]) / 180.0 + elif order == 9: + weights = ( + array([-9, 128, -1008, 8064, -14350, 8064, -1008, 128, -9]) + / 5040.0 + ) + else: + weights = _central_diff_weights(order, 2) + else: + weights = _central_diff_weights(order, n) + val = 0.0 + ho = order >> 1 + for k in range(order): + val += weights[k] * func(x0 + (k - ho) * dx, *args) + return val / prod((dx,) * n, axis=0) diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_gcutils.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_gcutils.py new file mode 100644 index 00000000..854ae362 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_gcutils.py @@ -0,0 +1,105 @@ +""" +Module for testing automatic garbage collection of objects + +.. autosummary:: + :toctree: generated/ + + set_gc_state - enable or disable garbage collection + gc_state - context manager for given state of garbage collector + assert_deallocated - context manager to check for circular references on object + +""" +import weakref +import gc + +from contextlib import contextmanager +from platform import python_implementation + +__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated'] + + +IS_PYPY = python_implementation() == 'PyPy' + + +class ReferenceError(AssertionError): + pass + + +def set_gc_state(state): + """ Set status of garbage collector """ + if gc.isenabled() == state: + return + if state: + gc.enable() + else: + gc.disable() + + +@contextmanager +def gc_state(state): + """ Context manager to set state of garbage collector to `state` + + Parameters + ---------- + state : bool + True for gc enabled, False for disabled + + Examples + -------- + >>> with gc_state(False): + ... assert not gc.isenabled() + >>> with gc_state(True): + ... assert gc.isenabled() + """ + orig_state = gc.isenabled() + set_gc_state(state) + yield + set_gc_state(orig_state) + + +@contextmanager +def assert_deallocated(func, *args, **kwargs): + """Context manager to check that object is deallocated + + This is useful for checking that an object can be freed directly by + reference counting, without requiring gc to break reference cycles. + GC is disabled inside the context manager. + + This check is not available on PyPy. + + Parameters + ---------- + func : callable + Callable to create object to check + \\*args : sequence + positional arguments to `func` in order to create object to check + \\*\\*kwargs : dict + keyword arguments to `func` in order to create object to check + + Examples + -------- + >>> class C: pass + >>> with assert_deallocated(C) as c: + ... # do something + ... del c + + >>> class C: + ... def __init__(self): + ... self._circular = self # Make circular reference + >>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL + ... # do something + ... del c + Traceback (most recent call last): + ... + ReferenceError: Remaining reference(s) to object + """ + if IS_PYPY: + raise RuntimeError("assert_deallocated is unavailable on PyPy") + + with gc_state(False): + obj = func(*args, **kwargs) + ref = weakref.ref(obj) + yield obj + del obj + if ref() is not None: + raise ReferenceError("Remaining reference(s) to object") diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_pep440.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_pep440.py new file mode 100644 index 00000000..73d0afb5 --- /dev/null +++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_pep440.py @@ -0,0 +1,487 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_testutils.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_testutils.py
new file mode 100644
index 00000000..42f59729
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_testutils.py
@@ -0,0 +1,217 @@
+"""
+Generic test utilities.
+
+"""
+
+import os
+import re
+import sys
+import numpy as np
+import inspect
+
+
+__all__ = ['PytestTester', 'check_free_memory', '_TestPythranFunc']
+
+
+class FPUModeChangeWarning(RuntimeWarning):
+    """Warning about FPU mode change"""
+    pass
+
+
+class PytestTester:
+    """
+    Pytest test runner entry point.
+    """
+
+    def __init__(self, module_name):
+        self.module_name = module_name
+
+    def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
+                 coverage=False, tests=None, parallel=None):
+        import pytest
+
+        module = sys.modules[self.module_name]
+        module_path = os.path.abspath(module.__path__[0])
+
+        pytest_args = ['--showlocals', '--tb=short']
+
+        if doctests:
+            raise ValueError("Doctests not supported")
+
+        if extra_argv:
+            pytest_args += list(extra_argv)
+
+        if verbose and int(verbose) > 1:
+            pytest_args += ["-" + "v"*(int(verbose)-1)]
+
+        if coverage:
+            pytest_args += ["--cov=" + module_path]
+
+        if label == "fast":
+            pytest_args += ["-m", "not slow"]
+        elif label != "full":
+            pytest_args += ["-m", label]
+
+        if tests is None:
+            tests = [self.module_name]
+
+        if parallel is not None and parallel > 1:
+            if _pytest_has_xdist():
+                pytest_args += ['-n', str(parallel)]
+            else:
+                import warnings
+                warnings.warn('Could not run tests in parallel because '
+                              'pytest-xdist plugin is not available.')
+
+        pytest_args += ['--pyargs'] + list(tests)
+
+        try:
+            code = pytest.main(pytest_args)
+        except SystemExit as exc:
+            code = exc.code
+
+        return (code == 0)
+
+
+class _TestPythranFunc:
+    '''
+    These are situations that can be tested in our pythran tests:
+    - A function with multiple array arguments and then
+      other positional and keyword arguments.
+    - A function with array-like keywords (e.g. `def somefunc(x0, x1=None)`.
+    Note: list/tuple input is not yet tested!
+
+    `self.arguments`: A dictionary which key is the index of the argument,
+                      value is tuple(array value, all supported dtypes)
+    `self.partialfunc`: A function used to freeze some non-array argument
+                        that of no interests in the original function
+    '''
+    ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
+    ALL_FLOAT = [np.float32, np.float64]
+    ALL_COMPLEX = [np.complex64, np.complex128]
+
+    def setup_method(self):
+        self.arguments = {}
+        self.partialfunc = None
+        self.expected = None
+
+    def get_optional_args(self, func):
+        # get optional arguments with its default value,
+        # used for testing keywords
+        signature = inspect.signature(func)
+        optional_args = {}
+        for k, v in signature.parameters.items():
+            if v.default is not inspect.Parameter.empty:
+                optional_args[k] = v.default
+        return optional_args
+
+    def get_max_dtype_list_length(self):
+        # get the max supported dtypes list length in all arguments
+        max_len = 0
+        for arg_idx in self.arguments:
+            cur_len = len(self.arguments[arg_idx][1])
+            if cur_len > max_len:
+                max_len = cur_len
+        return max_len
+
+    def get_dtype(self, dtype_list, dtype_idx):
+        # get the dtype from dtype_list via index
+        # if the index is out of range, then return the last dtype
+        if dtype_idx > len(dtype_list)-1:
+            return dtype_list[-1]
+        else:
+            return dtype_list[dtype_idx]
+
+    def test_all_dtypes(self):
+        for type_idx in range(self.get_max_dtype_list_length()):
+            args_array = []
+            for arg_idx in self.arguments:
+                new_dtype = self.get_dtype(self.arguments[arg_idx][1],
+                                           type_idx)
+                args_array.append(self.arguments[arg_idx][0].astype(new_dtype))
+            self.pythranfunc(*args_array)
+
+    def test_views(self):
+        args_array = []
+        for arg_idx in self.arguments:
+            args_array.append(self.arguments[arg_idx][0][::-1][::-1])
+        self.pythranfunc(*args_array)
+
+    def test_strided(self):
+        args_array = []
+        for arg_idx in self.arguments:
+            args_array.append(np.repeat(self.arguments[arg_idx][0],
+                                        2, axis=0)[::2])
+        self.pythranfunc(*args_array)
+
+
+def _pytest_has_xdist():
+    """
+    Check if the pytest-xdist plugin is installed, providing parallel tests
+    """
+    # Check xdist exists without importing, otherwise pytests emits warnings
+    from importlib.util import find_spec
+    return find_spec('xdist') is not None
+
+
+def check_free_memory(free_mb):
+    """
+    Check *free_mb* of memory is available, otherwise do pytest.skip
+    """
+    import pytest
+
+    try:
+        mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
+        msg = '{0} MB memory required, but environment SCIPY_AVAILABLE_MEM={1}'.format(
+            free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
+    except KeyError:
+        mem_free = _get_mem_available()
+        if mem_free is None:
+            pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
+                        "variable to free memory in MB to run the test.")
+        msg = '{0} MB memory required, but {1} MB available'.format(
+            free_mb, mem_free/1e6)
+
+    if mem_free < free_mb * 1e6:
+        pytest.skip(msg)
+
+
+def _parse_size(size_str):
+    suffixes = {'': 1e6,
+                'b': 1.0,
+                'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
+                'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
+                'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
+    m = re.match(r'^\s*(\d+)\s*({0})\s*$'.format('|'.join(suffixes.keys())),
+                 size_str,
+                 re.I)
+    if not m or m.group(2) not in suffixes:
+        raise ValueError("Invalid size string")
+
+    return float(m.group(1)) * suffixes[m.group(2)]
+
+
+def _get_mem_available():
+    """
+    Get information about memory available, not counting swap.
+    """
+    try:
+        import psutil
+        return psutil.virtual_memory().available
+    except (ImportError, AttributeError):
+        pass
+
+    if sys.platform.startswith('linux'):
+        info = {}
+        with open('/proc/meminfo', 'r') as f:
+            for line in f:
+                p = line.split()
+                info[p[0].strip(':').lower()] = float(p[1]) * 1e3
+
+        if 'memavailable' in info:
+            # Linux >= 3.14
+            return info['memavailable']
+        else:
+            return info['memfree'] + info['cached']
+
+    return None
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_threadsafety.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_threadsafety.py
new file mode 100644
index 00000000..feea0c59
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_threadsafety.py
@@ -0,0 +1,58 @@
+import threading
+
+import scipy._lib.decorator
+
+
+__all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant']
+
+
+class ReentrancyError(RuntimeError):
+    pass
+
+
+class ReentrancyLock:
+    """
+    Threading lock that raises an exception for reentrant calls.
+
+    Calls from different threads are serialized, and nested calls from the
+    same thread result to an error.
+
+    The object can be used as a context manager or to decorate functions
+    via the decorate() method.
+
+    """
+
+    def __init__(self, err_msg):
+        self._rlock = threading.RLock()
+        self._entered = False
+        self._err_msg = err_msg
+
+    def __enter__(self):
+        self._rlock.acquire()
+        if self._entered:
+            self._rlock.release()
+            raise ReentrancyError(self._err_msg)
+        self._entered = True
+
+    def __exit__(self, type, value, traceback):
+        self._entered = False
+        self._rlock.release()
+
+    def decorate(self, func):
+        def caller(func, *a, **kw):
+            with self:
+                return func(*a, **kw)
+        return scipy._lib.decorator.decorate(func, caller)
+
+
+def non_reentrant(err_msg=None):
+    """
+    Decorate a function with a threading lock and prevent reentrant calls.
+    """
+    def decorator(func):
+        msg = err_msg
+        if msg is None:
+            msg = "%s is not re-entrant" % func.__name__
+        lock = ReentrancyLock(msg)
+        return lock.decorate(func)
+    return decorator
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_tmpdirs.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_tmpdirs.py
new file mode 100644
index 00000000..0f9fd546
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_tmpdirs.py
@@ -0,0 +1,86 @@
+''' Contexts for *with* statement providing temporary directories
+'''
+import os
+from contextlib import contextmanager
+from shutil import rmtree
+from tempfile import mkdtemp
+
+
+@contextmanager
+def tempdir():
+    """Create and return a temporary directory. This has the same
+    behavior as mkdtemp but can be used as a context manager.
+
+    Upon exiting the context, the directory and everything contained
+    in it are removed.
+
+    Examples
+    --------
+    >>> import os
+    >>> with tempdir() as tmpdir:
+    ...     fname = os.path.join(tmpdir, 'example_file.txt')
+    ...     with open(fname, 'wt') as fobj:
+    ...         _ = fobj.write('a string\\n')
+    >>> os.path.exists(tmpdir)
+    False
+    """
+    d = mkdtemp()
+    yield d
+    rmtree(d)
+
+
+@contextmanager
+def in_tempdir():
+    ''' Create, return, and change directory to a temporary directory
+
+    Examples
+    --------
+    >>> import os
+    >>> my_cwd = os.getcwd()
+    >>> with in_tempdir() as tmpdir:
+    ...     _ = open('test.txt', 'wt').write('some text')
+    ...     assert os.path.isfile('test.txt')
+    ...     assert os.path.isfile(os.path.join(tmpdir, 'test.txt'))
+    >>> os.path.exists(tmpdir)
+    False
+    >>> os.getcwd() == my_cwd
+    True
+    '''
+    pwd = os.getcwd()
+    d = mkdtemp()
+    os.chdir(d)
+    yield d
+    os.chdir(pwd)
+    rmtree(d)
+
+
+@contextmanager
+def in_dir(dir=None):
+    """ Change directory to given directory for duration of ``with`` block
+
+    Useful when you want to use `in_tempdir` for the final test, but
+    you are still debugging. For example, you may want to do this in the end:
+
+    >>> with in_tempdir() as tmpdir:
+    ...     # do something complicated which might break
+    ...     pass
+
+    But, indeed, the complicated thing does break, and meanwhile, the
+    ``in_tempdir`` context manager wiped out the directory with the
+    temporary files that you wanted for debugging. So, while debugging, you
+    replace with something like:
+
+    >>> with in_dir() as tmpdir: # Use working directory by default
+    ...     # do something complicated which might break
+    ...     pass
+
+    You can then look at the temporary file outputs to debug what is happening,
+    fix, and finally replace ``in_dir`` with ``in_tempdir`` again.
+    """
+    cwd = os.getcwd()
+    if dir is None:
+        yield cwd
+        return
+    os.chdir(dir)
+    yield dir
+    os.chdir(cwd)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/LICENSE b/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/LICENSE
new file mode 100644
index 00000000..5f2b90a0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2018, Quansight-Labs
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/__init__.py
new file mode 100644
index 00000000..fd5c385b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/__init__.py
@@ -0,0 +1,116 @@
+"""
+.. note:
+    If you are looking for overrides for NumPy-specific methods, see the
+    documentation for :obj:`unumpy`. This page explains how to write
+    back-ends and multimethods.
+
+``uarray`` is built around a back-end protocol, and overridable multimethods.
+It is necessary to define multimethods for back-ends to be able to override them.
+See the documentation of :obj:`generate_multimethod` on how to write multimethods.
+
+
+
+Let's start with the simplest:
+
+``__ua_domain__`` defines the back-end *domain*. The domain consists of period-
+separated string consisting of the modules you extend plus the submodule. For
+example, if a submodule ``module2.submodule`` extends ``module1``
+(i.e., it exposes dispatchables marked as types available in ``module1``),
+then the domain string should be ``"module1.module2.submodule"``.
+
+
+For the purpose of this demonstration, we'll be creating an object and setting
+its attributes directly. However, note that you can use a module or your own type
+as a backend as well.
+
+>>> class Backend: pass
+>>> be = Backend()
+>>> be.__ua_domain__ = "ua_examples"
+
+It might be useful at this point to sidetrack to the documentation of
+:obj:`generate_multimethod` to find out how to generate a multimethod
+overridable by :obj:`uarray`. Needless to say, writing a backend and
+creating multimethods are mostly orthogonal activities, and knowing
+one doesn't necessarily require knowledge of the other, although it
+is certainly helpful. We expect core API designers/specifiers to write the
+multimethods, and implementors to override them. But, as is often the case,
+similar people write both.
+
+Without further ado, here's an example multimethod:
+
+>>> import uarray as ua
+>>> from uarray import Dispatchable
+>>> def override_me(a, b):
+...   return Dispatchable(a, int),
+>>> def override_replacer(args, kwargs, dispatchables):
+...     return (dispatchables[0], args[1]), {}
+>>> overridden_me = ua.generate_multimethod(
+...     override_me, override_replacer, "ua_examples"
+... )
+
+Next comes the part about overriding the multimethod. This requires
+the ``__ua_function__`` protocol, and the ``__ua_convert__``
+protocol. The ``__ua_function__`` protocol has the signature
+``(method, args, kwargs)`` where ``method`` is the passed
+multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables``
+is the list of converted dispatchables passed in.
+
+>>> def __ua_function__(method, args, kwargs):
+...     return method.__name__, args, kwargs
+>>> be.__ua_function__ = __ua_function__
+
+The other protocol of interest is the ``__ua_convert__`` protocol. It has the
+signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion
+between the formats should ideally be an ``O(1)`` operation, but it means that
+no memory copying should be involved, only views of the existing data.
+
+>>> def __ua_convert__(dispatchables, coerce):
+...     for d in dispatchables:
+...         if d.type is int:
+...             if coerce and d.coercible:
+...                 yield str(d.value)
+...             else:
+...                 yield d.value
+>>> be.__ua_convert__ = __ua_convert__
+
+Now that we have defined the backend, the next thing to do is to call the multimethod.
+
+>>> with ua.set_backend(be):
+...      overridden_me(1, "2")
+('override_me', (1, '2'), {})
+
+Note that the marked type has no effect on the actual type of the passed object.
+We can also coerce the type of the input.
+
+>>> with ua.set_backend(be, coerce=True):
+...     overridden_me(1, "2")
+...     overridden_me(1.0, "2")
+('override_me', ('1', '2'), {})
+('override_me', ('1.0', '2'), {})
+
+Another feature is that if you remove ``__ua_convert__``, the arguments are not
+converted at all and it's up to the backend to handle that.
+
+>>> del be.__ua_convert__
+>>> with ua.set_backend(be):
+...     overridden_me(1, "2")
+('override_me', (1, '2'), {})
+
+You also have the option to return ``NotImplemented``, in which case processing moves on
+to the next back-end, which in this case, doesn't exist. The same applies to
+``__ua_convert__``.
+
+>>> be.__ua_function__ = lambda *a, **kw: NotImplemented
+>>> with ua.set_backend(be):
+...     overridden_me(1, "2")
+Traceback (most recent call last):
+    ...
+uarray.BackendNotImplementedError: ...
+
+The last possibility is if we don't have ``__ua_convert__``, in which case the job is left
+up to ``__ua_function__``, but putting things back into arrays after conversion will not be
+possible.
+"""
+
+from ._backend import *
+__version__ = '0.8.8.dev0+aa94c5a4.scipy'
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/_backend.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/_backend.py
new file mode 100644
index 00000000..3a38d74c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_uarray/_backend.py
@@ -0,0 +1,703 @@
+import typing
+import types
+import inspect
+import functools
+from . import _uarray
+import copyreg
+import pickle
+import contextlib
+
+ArgumentExtractorType = typing.Callable[..., typing.Tuple["Dispatchable", ...]]
+ArgumentReplacerType = typing.Callable[
+    [typing.Tuple, typing.Dict, typing.Tuple], typing.Tuple[typing.Tuple, typing.Dict]
+]
+
+from ._uarray import (  # type: ignore
+    BackendNotImplementedError,
+    _Function,
+    _SkipBackendContext,
+    _SetBackendContext,
+    _BackendState,
+)
+
+__all__ = [
+    "set_backend",
+    "set_global_backend",
+    "skip_backend",
+    "register_backend",
+    "determine_backend",
+    "determine_backend_multi",
+    "clear_backends",
+    "create_multimethod",
+    "generate_multimethod",
+    "_Function",
+    "BackendNotImplementedError",
+    "Dispatchable",
+    "wrap_single_convertor",
+    "wrap_single_convertor_instance",
+    "all_of_type",
+    "mark_as",
+    "set_state",
+    "get_state",
+    "reset_state",
+    "_BackendState",
+    "_SkipBackendContext",
+    "_SetBackendContext",
+]
+
+
+def unpickle_function(mod_name, qname, self_):
+    import importlib
+
+    try:
+        module = importlib.import_module(mod_name)
+        qname = qname.split(".")
+        func = module
+        for q in qname:
+            func = getattr(func, q)
+
+        if self_ is not None:
+            func = types.MethodType(func, self_)
+
+        return func
+    except (ImportError, AttributeError) as e:
+        from pickle import UnpicklingError
+
+        raise UnpicklingError from e
+
+
+def pickle_function(func):
+    mod_name = getattr(func, "__module__", None)
+    qname = getattr(func, "__qualname__", None)
+    self_ = getattr(func, "__self__", None)
+
+    try:
+        test = unpickle_function(mod_name, qname, self_)
+    except pickle.UnpicklingError:
+        test = None
+
+    if test is not func:
+        raise pickle.PicklingError(
+            "Can't pickle {}: it's not the same object as {}".format(func, test)
+        )
+
+    return unpickle_function, (mod_name, qname, self_)
+
+
+def pickle_state(state):
+    return _uarray._BackendState._unpickle, state._pickle()
+
+
+def pickle_set_backend_context(ctx):
+    return _SetBackendContext, ctx._pickle()
+
+
+def pickle_skip_backend_context(ctx):
+    return _SkipBackendContext, ctx._pickle()
+
+
+copyreg.pickle(_Function, pickle_function)
+copyreg.pickle(_uarray._BackendState, pickle_state)
+copyreg.pickle(_SetBackendContext, pickle_set_backend_context)
+copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context)
+
+
+def get_state():
+    """
+    Returns an opaque object containing the current state of all the backends.
+
+    Can be used for synchronization between threads/processes.
+
+    See Also
+    --------
+    set_state
+        Sets the state returned by this function.
+    """
+    return _uarray.get_state()
+
+
+@contextlib.contextmanager
+def reset_state():
+    """
+    Returns a context manager that resets all state once exited.
+
+    See Also
+    --------
+    set_state
+        Context manager that sets the backend state.
+    get_state
+        Gets a state to be set by this context manager.
+    """
+    with set_state(get_state()):
+        yield
+
+
+@contextlib.contextmanager
+def set_state(state):
+    """
+    A context manager that sets the state of the backends to one returned by :obj:`get_state`.
+
+    See Also
+    --------
+    get_state
+        Gets a state to be set by this context manager.
+    """
+    old_state = get_state()
+    _uarray.set_state(state)
+    try:
+        yield
+    finally:
+        _uarray.set_state(old_state, True)
+
+
+def create_multimethod(*args, **kwargs):
+    """
+    Creates a decorator for generating multimethods.
+
+    This function creates a decorator that can be used with an argument
+    extractor in order to generate a multimethod. Other than for the
+    argument extractor, all arguments are passed on to
+    :obj:`generate_multimethod`.
+
+    See Also
+    --------
+    generate_multimethod
+        Generates a multimethod.
+    """
+
+    def wrapper(a):
+        return generate_multimethod(a, *args, **kwargs)
+
+    return wrapper
+
+
+def generate_multimethod(
+    argument_extractor: ArgumentExtractorType,
+    argument_replacer: ArgumentReplacerType,
+    domain: str,
+    default: typing.Optional[typing.Callable] = None,
+):
+    """
+    Generates a multimethod.
+
+    Parameters
+    ----------
+    argument_extractor : ArgumentExtractorType
+        A callable which extracts the dispatchable arguments. Extracted arguments
+        should be marked by the :obj:`Dispatchable` class. It has the same signature
+        as the desired multimethod.
+    argument_replacer : ArgumentReplacerType
+        A callable with the signature (args, kwargs, dispatchables), which should also
+        return an (args, kwargs) pair with the dispatchables replaced inside the args/kwargs.
+    domain : str
+        A string value indicating the domain of this multimethod.
+    default: Optional[Callable], optional
+        The default implementation of this multimethod, where ``None`` (the default) specifies
+        there is no default implementation.
+
+    Examples
+    --------
+    In this example, ``a`` is to be dispatched over, so we return it, while marking it as an ``int``.
+    The trailing comma is needed because the args have to be returned as an iterable.
+
+    >>> def override_me(a, b):
+    ...   return Dispatchable(a, int),
+
+    Next, we define the argument replacer that replaces the dispatchables inside args/kwargs with the
+    supplied ones.
+
+    >>> def override_replacer(args, kwargs, dispatchables):
+    ...     return (dispatchables[0], args[1]), {}
+
+    Next, we define the multimethod.
+
+    >>> overridden_me = generate_multimethod(
+    ...     override_me, override_replacer, "ua_examples"
+    ... )
+
+    Notice that there's no default implementation, unless you supply one.
+
+    >>> overridden_me(1, "a")
+    Traceback (most recent call last):
+        ...
+    uarray.BackendNotImplementedError: ...
+
+    >>> overridden_me2 = generate_multimethod(
+    ...     override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y)
+    ... )
+    >>> overridden_me2(1, "a")
+    (1, 'a')
+
+    See Also
+    --------
+    uarray
+        See the module documentation for how to override the method by creating backends.
+    """
+    kw_defaults, arg_defaults, opts = get_defaults(argument_extractor)
+    ua_func = _Function(
+        argument_extractor,
+        argument_replacer,
+        domain,
+        arg_defaults,
+        kw_defaults,
+        default,
+    )
+
+    return functools.update_wrapper(ua_func, argument_extractor)
+
+
+def set_backend(backend, coerce=False, only=False):
+    """
+    A context manager that sets the preferred backend.
+
+    Parameters
+    ----------
+    backend
+        The backend to set.
+    coerce
+        Whether or not to coerce to a specific backend's types. Implies ``only``.
+    only
+        Whether or not this should be the last backend to try.
+
+    See Also
+    --------
+    skip_backend: A context manager that allows skipping of backends.
+    set_global_backend: Set a single, global backend for a domain.
+    """
+    try:
+        return backend.__ua_cache__["set", coerce, only]
+    except AttributeError:
+        backend.__ua_cache__ = {}
+    except KeyError:
+        pass
+
+    ctx = _SetBackendContext(backend, coerce, only)
+    backend.__ua_cache__["set", coerce, only] = ctx
+    return ctx
+
+
+def skip_backend(backend):
+    """
+    A context manager that allows one to skip a given backend from processing
+    entirely. This allows one to use another backend's code in a library that
+    is also a consumer of the same backend.
+
+    Parameters
+    ----------
+    backend
+        The backend to skip.
+
+    See Also
+    --------
+    set_backend: A context manager that allows setting of backends.
+    set_global_backend: Set a single, global backend for a domain.
+    """
+    try:
+        return backend.__ua_cache__["skip"]
+    except AttributeError:
+        backend.__ua_cache__ = {}
+    except KeyError:
+        pass
+
+    ctx = _SkipBackendContext(backend)
+    backend.__ua_cache__["skip"] = ctx
+    return ctx
+
+
+def get_defaults(f):
+    sig = inspect.signature(f)
+    kw_defaults = {}
+    arg_defaults = []
+    opts = set()
+    for k, v in sig.parameters.items():
+        if v.default is not inspect.Parameter.empty:
+            kw_defaults[k] = v.default
+        if v.kind in (
+            inspect.Parameter.POSITIONAL_ONLY,
+            inspect.Parameter.POSITIONAL_OR_KEYWORD,
+        ):
+            arg_defaults.append(v.default)
+        opts.add(k)
+
+    return kw_defaults, tuple(arg_defaults), opts
+
+
+def set_global_backend(backend, coerce=False, only=False, *, try_last=False):
+    """
+    This utility method replaces the default backend for permanent use. It
+    will be tried in the list of backends automatically, unless the
+    ``only`` flag is set on a backend. This will be the first tried
+    backend outside the :obj:`set_backend` context manager.
+
+    Note that this method is not thread-safe.
+
+    .. warning::
+        We caution library authors against using this function in
+        their code. We do *not* support this use-case. This function
+        is meant to be used only by users themselves, or by a reference
+        implementation, if one exists.
+
+    Parameters
+    ----------
+    backend
+        The backend to register.
+    coerce : bool
+        Whether to coerce input types when trying this backend.
+    only : bool
+        If ``True``, no more backends will be tried if this fails.
+        Implied by ``coerce=True``.
+    try_last : bool
+        If ``True``, the global backend is tried after registered backends.
+
+    See Also
+    --------
+    set_backend: A context manager that allows setting of backends.
+    skip_backend: A context manager that allows skipping of backends.
+    """
+    _uarray.set_global_backend(backend, coerce, only, try_last)
+
+
+def register_backend(backend):
+    """
+    This utility method sets registers backend for permanent use. It
+    will be tried in the list of backends automatically, unless the
+    ``only`` flag is set on a backend.
+
+    Note that this method is not thread-safe.
+
+    Parameters
+    ----------
+    backend
+        The backend to register.
+    """
+    _uarray.register_backend(backend)
+
+
+def clear_backends(domain, registered=True, globals=False):
+    """
+    This utility method clears registered backends.
+
+    .. warning::
+        We caution library authors against using this function in
+        their code. We do *not* support this use-case. This function
+        is meant to be used only by users themselves.
+
+    .. warning::
+        Do NOT use this method inside a multimethod call, or the
+        program is likely to crash.
+
+    Parameters
+    ----------
+    domain : Optional[str]
+        The domain for which to de-register backends. ``None`` means
+        de-register for all domains.
+    registered : bool
+        Whether or not to clear registered backends. See :obj:`register_backend`.
+    globals : bool
+        Whether or not to clear global backends. See :obj:`set_global_backend`.
+
+    See Also
+    --------
+    register_backend : Register a backend globally.
+    set_global_backend : Set a global backend.
+    """
+    _uarray.clear_backends(domain, registered, globals)
+
+
+class Dispatchable:
+    """
+    A utility class which marks an argument with a specific dispatch type.
+
+
+    Attributes
+    ----------
+    value
+        The value of the Dispatchable.
+
+    type
+        The type of the Dispatchable.
+
+    Examples
+    --------
+    >>> x = Dispatchable(1, str)
+    >>> x
+    , value=1>
+
+    See Also
+    --------
+    all_of_type
+        Marks all unmarked parameters of a function.
+
+    mark_as
+        Allows one to create a utility function to mark as a given type.
+    """
+
+    def __init__(self, value, dispatch_type, coercible=True):
+        self.value = value
+        self.type = dispatch_type
+        self.coercible = coercible
+
+    def __getitem__(self, index):
+        return (self.type, self.value)[index]
+
+    def __str__(self):
+        return "<{0}: type={1!r}, value={2!r}>".format(
+            type(self).__name__, self.type, self.value
+        )
+
+    __repr__ = __str__
+
+
+def mark_as(dispatch_type):
+    """
+    Creates a utility function to mark something as a specific type.
+
+    Examples
+    --------
+    >>> mark_int = mark_as(int)
+    >>> mark_int(1)
+    , value=1>
+    """
+    return functools.partial(Dispatchable, dispatch_type=dispatch_type)
+
+
+def all_of_type(arg_type):
+    """
+    Marks all unmarked arguments as a given type.
+
+    Examples
+    --------
+    >>> @all_of_type(str)
+    ... def f(a, b):
+    ...     return a, Dispatchable(b, int)
+    >>> f('a', 1)
+    (, value='a'>, , value=1>)
+    """
+
+    def outer(func):
+        @functools.wraps(func)
+        def inner(*args, **kwargs):
+            extracted_args = func(*args, **kwargs)
+            return tuple(
+                Dispatchable(arg, arg_type)
+                if not isinstance(arg, Dispatchable)
+                else arg
+                for arg in extracted_args
+            )
+
+        return inner
+
+    return outer
+
+
+def wrap_single_convertor(convert_single):
+    """
+    Wraps a ``__ua_convert__`` defined for a single element to all elements.
+    If any of them return ``NotImplemented``, the operation is assumed to be
+    undefined.
+
+    Accepts a signature of (value, type, coerce).
+    """
+
+    @functools.wraps(convert_single)
+    def __ua_convert__(dispatchables, coerce):
+        converted = []
+        for d in dispatchables:
+            c = convert_single(d.value, d.type, coerce and d.coercible)
+
+            if c is NotImplemented:
+                return NotImplemented
+
+            converted.append(c)
+
+        return converted
+
+    return __ua_convert__
+
+
+def wrap_single_convertor_instance(convert_single):
+    """
+    Wraps a ``__ua_convert__`` defined for a single element to all elements.
+    If any of them return ``NotImplemented``, the operation is assumed to be
+    undefined.
+
+    Accepts a signature of (value, type, coerce).
+    """
+
+    @functools.wraps(convert_single)
+    def __ua_convert__(self, dispatchables, coerce):
+        converted = []
+        for d in dispatchables:
+            c = convert_single(self, d.value, d.type, coerce and d.coercible)
+
+            if c is NotImplemented:
+                return NotImplemented
+
+            converted.append(c)
+
+        return converted
+
+    return __ua_convert__
+
+
+def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False):
+    """Set the backend to the first active backend that supports ``value``
+
+    This is useful for functions that call multimethods without any dispatchable
+    arguments. You can use :func:`determine_backend` to ensure the same backend
+    is used everywhere in a block of multimethod calls.
+
+    Parameters
+    ----------
+    value
+        The value being tested
+    dispatch_type
+        The dispatch type associated with ``value``, aka
+        ":ref:`marking `".
+    domain: string
+        The domain to query for backends and set.
+    coerce: bool
+        Whether or not to allow coercion to the backend's types. Implies ``only``.
+    only: bool
+        Whether or not this should be the last backend to try.
+
+    See Also
+    --------
+    set_backend: For when you know which backend to set
+
+    Notes
+    -----
+
+    Support is determined by the ``__ua_convert__`` protocol. Backends not
+    supporting the type must return ``NotImplemented`` from their
+    ``__ua_convert__`` if they don't support input of that type.
+
+    Examples
+    --------
+
+    Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting
+    different types, ``TypeA`` and ``TypeB``. Neither supporting the other type:
+
+    >>> with ua.set_backend(ex.BackendA):
+    ...     ex.call_multimethod(ex.TypeB(), ex.TypeB())
+    Traceback (most recent call last):
+        ...
+    uarray.BackendNotImplementedError: ...
+
+    Now consider a multimethod that creates a new object of ``TypeA``, or
+    ``TypeB`` depending on the active backend.
+
+    >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
+    ...         res = ex.creation_multimethod()
+    ...         ex.call_multimethod(res, ex.TypeA())
+    Traceback (most recent call last):
+        ...
+    uarray.BackendNotImplementedError: ...
+
+    ``res`` is an object of ``TypeB`` because ``BackendB`` is set in the
+    innermost with statement. So, ``call_multimethod`` fails since the types
+    don't match.
+
+    Instead, we need to first find a backend suitable for all of our objects.
+
+    >>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
+    ...     x = ex.TypeA()
+    ...     with ua.determine_backend(x, "mark", domain="ua_examples"):
+    ...         res = ex.creation_multimethod()
+    ...         ex.call_multimethod(res, x)
+    TypeA
+
+    """
+    dispatchables = (Dispatchable(value, dispatch_type, coerce),)
+    backend = _uarray.determine_backend(domain, dispatchables, coerce)
+
+    return set_backend(backend, coerce=coerce, only=only)
+
+
+def determine_backend_multi(
+    dispatchables, *, domain, only=True, coerce=False, **kwargs
+):
+    """Set a backend supporting all ``dispatchables``
+
+    This is useful for functions that call multimethods without any dispatchable
+    arguments. You can use :func:`determine_backend_multi` to ensure the same
+    backend is used everywhere in a block of multimethod calls involving
+    multiple arrays.
+
+    Parameters
+    ----------
+    dispatchables: Sequence[Union[uarray.Dispatchable, Any]]
+        The dispatchables that must be supported
+    domain: string
+        The domain to query for backends and set.
+    coerce: bool
+        Whether or not to allow coercion to the backend's types. Implies ``only``.
+    only: bool
+        Whether or not this should be the last backend to try.
+    dispatch_type: Optional[Any]
+        The default dispatch type associated with ``dispatchables``, aka
+        ":ref:`marking `".
+
+    See Also
+    --------
+    determine_backend: For a single dispatch value
+    set_backend: For when you know which backend to set
+
+    Notes
+    -----
+
+    Support is determined by the ``__ua_convert__`` protocol. Backends not
+    supporting the type must return ``NotImplemented`` from their
+    ``__ua_convert__`` if they don't support input of that type.
+
+    Examples
+    --------
+
+    :func:`determine_backend` allows the backend to be set from a single
+    object. :func:`determine_backend_multi` allows multiple objects to be
+    checked simultaneously for support in the backend. Suppose we have a
+    ``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call,
+    and a ``BackendBC`` that doesn't support ``TypeA``.
+
+    >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
+    ...     a, b = ex.TypeA(), ex.TypeB()
+    ...     with ua.determine_backend_multi(
+    ...         [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")],
+    ...         domain="ua_examples"
+    ...     ):
+    ...         res = ex.creation_multimethod()
+    ...         ex.call_multimethod(res, a, b)
+    TypeA
+
+    This won't call ``BackendBC`` because it doesn't support ``TypeA``.
+
+    We can also use leave out the ``ua.Dispatchable`` if we specify the
+    default ``dispatch_type`` for the ``dispatchables`` argument.
+
+    >>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
+    ...     a, b = ex.TypeA(), ex.TypeB()
+    ...     with ua.determine_backend_multi(
+    ...         [a, b], dispatch_type="mark", domain="ua_examples"
+    ...     ):
+    ...         res = ex.creation_multimethod()
+    ...         ex.call_multimethod(res, a, b)
+    TypeA
+
+    """
+    if "dispatch_type" in kwargs:
+        disp_type = kwargs.pop("dispatch_type")
+        dispatchables = tuple(
+            d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type)
+            for d in dispatchables
+        )
+    else:
+        dispatchables = tuple(dispatchables)
+        if not all(isinstance(d, Dispatchable) for d in dispatchables):
+            raise TypeError("dispatchables must be instances of uarray.Dispatchable")
+
+    if len(kwargs) != 0:
+        raise TypeError("Received unexpected keyword arguments: {}".format(kwargs))
+
+    backend = _uarray.determine_backend(domain, dispatchables, coerce)
+
+    return set_backend(backend, coerce=coerce, only=only)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/_util.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/_util.py
new file mode 100644
index 00000000..726ffe47
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/_util.py
@@ -0,0 +1,711 @@
+from contextlib import contextmanager
+import functools
+import operator
+import warnings
+import numbers
+from collections import namedtuple
+import inspect
+import math
+from typing import (
+    Optional,
+    Union,
+    TYPE_CHECKING,
+    TypeVar,
+)
+
+import numpy as np
+
+IntNumber = Union[int, np.integer]
+DecimalNumber = Union[float, np.floating, np.integer]
+
+# Since Generator was introduced in numpy 1.17, the following condition is needed for
+# backward compatibility
+if TYPE_CHECKING:
+    SeedType = Optional[Union[IntNumber, np.random.Generator,
+                              np.random.RandomState]]
+    GeneratorType = TypeVar("GeneratorType", bound=Union[np.random.Generator,
+                                                         np.random.RandomState])
+
+try:
+    from numpy.random import Generator as Generator
+except ImportError:
+    class Generator():  # type: ignore[no-redef]
+        pass
+
+
+def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
+    """
+    np.where(cond, x, fillvalue) always evaluates x even where cond is False.
+    This one only evaluates f(arr1[cond], arr2[cond], ...).
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
+    >>> def f(a, b):
+    ...     return a*b
+    >>> _lazywhere(a > 2, (a, b), f, np.nan)
+    array([ nan,  nan,  21.,  32.])
+
+    Notice, it assumes that all `arrays` are of the same shape, or can be
+    broadcasted together.
+
+    """
+    cond = np.asarray(cond)
+    if fillvalue is None:
+        if f2 is None:
+            raise ValueError("One of (fillvalue, f2) must be given.")
+        else:
+            fillvalue = np.nan
+    else:
+        if f2 is not None:
+            raise ValueError("Only one of (fillvalue, f2) can be given.")
+
+    args = np.broadcast_arrays(cond, *arrays)
+    cond, arrays = args[0], args[1:]
+    temp = tuple(np.extract(cond, arr) for arr in arrays)
+    tcode = np.mintypecode([a.dtype.char for a in arrays])
+    out = np.full(np.shape(arrays[0]), fill_value=fillvalue, dtype=tcode)
+    np.place(out, cond, f(*temp))
+    if f2 is not None:
+        temp = tuple(np.extract(~cond, arr) for arr in arrays)
+        np.place(out, ~cond, f2(*temp))
+
+    return out
+
+
+def _lazyselect(condlist, choicelist, arrays, default=0):
+    """
+    Mimic `np.select(condlist, choicelist)`.
+
+    Notice, it assumes that all `arrays` are of the same shape or can be
+    broadcasted together.
+
+    All functions in `choicelist` must accept array arguments in the order
+    given in `arrays` and must return an array of the same shape as broadcasted
+    `arrays`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> x = np.arange(6)
+    >>> np.select([x <3, x > 3], [x**2, x**3], default=0)
+    array([  0,   1,   4,   0,  64, 125])
+
+    >>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
+    array([   0.,    1.,    4.,   0.,   64.,  125.])
+
+    >>> a = -np.ones_like(x)
+    >>> _lazyselect([x < 3, x > 3],
+    ...             [lambda x, a: x**2, lambda x, a: a * x**3],
+    ...             (x, a), default=np.nan)
+    array([   0.,    1.,    4.,   nan,  -64., -125.])
+
+    """
+    arrays = np.broadcast_arrays(*arrays)
+    tcode = np.mintypecode([a.dtype.char for a in arrays])
+    out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)
+    for func, cond in zip(choicelist, condlist):
+        if np.all(cond is False):
+            continue
+        cond, _ = np.broadcast_arrays(cond, arrays[0])
+        temp = tuple(np.extract(cond, arr) for arr in arrays)
+        np.place(out, cond, func(*temp))
+    return out
+
+
+def _aligned_zeros(shape, dtype=float, order="C", align=None):
+    """Allocate a new ndarray with aligned memory.
+
+    Primary use case for this currently is working around a f2py issue
+    in NumPy 1.9.1, where dtype.alignment is such that np.zeros() does
+    not necessarily create arrays aligned up to it.
+
+    """
+    dtype = np.dtype(dtype)
+    if align is None:
+        align = dtype.alignment
+    if not hasattr(shape, '__len__'):
+        shape = (shape,)
+    size = functools.reduce(operator.mul, shape) * dtype.itemsize
+    buf = np.empty(size + align + 1, np.uint8)
+    offset = buf.__array_interface__['data'][0] % align
+    if offset != 0:
+        offset = align - offset
+    # Note: slices producing 0-size arrays do not necessarily change
+    # data pointer --- so we use and allocate size+1
+    buf = buf[offset:offset+size+1][:-1]
+    data = np.ndarray(shape, dtype, buf, order=order)
+    data.fill(0)
+    return data
+
+
+def _prune_array(array):
+    """Return an array equivalent to the input array. If the input
+    array is a view of a much larger array, copy its contents to a
+    newly allocated array. Otherwise, return the input unchanged.
+    """
+    if array.base is not None and array.size < array.base.size // 2:
+        return array.copy()
+    return array
+
+
+def prod(iterable):
+    """
+    Product of a sequence of numbers.
+
+    Faster than np.prod for short lists like array shapes, and does
+    not overflow if using Python integers.
+    """
+    product = 1
+    for x in iterable:
+        product *= x
+    return product
+
+
+def float_factorial(n: int) -> float:
+    """Compute the factorial and return as a float
+
+    Returns infinity when result is too large for a double
+    """
+    return float(math.factorial(n)) if n < 171 else np.inf
+
+
+# copy-pasted from scikit-learn utils/validation.py
+# change this to scipy.stats._qmc.check_random_state once numpy 1.16 is dropped
+def check_random_state(seed):
+    """Turn `seed` into a `np.random.RandomState` instance.
+
+    Parameters
+    ----------
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
+        Random number generator.
+
+    """
+    if seed is None or seed is np.random:
+        return np.random.mtrand._rand
+    if isinstance(seed, (numbers.Integral, np.integer)):
+        return np.random.RandomState(seed)
+    if isinstance(seed, (np.random.RandomState, np.random.Generator)):
+        return seed
+
+    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+                     ' instance' % seed)
+
+
+def _asarray_validated(a, check_finite=True,
+                       sparse_ok=False, objects_ok=False, mask_ok=False,
+                       as_inexact=False):
+    """
+    Helper function for SciPy argument validation.
+
+    Many SciPy linear algebra functions do support arbitrary array-like
+    input arguments. Examples of commonly unsupported inputs include
+    matrices containing inf/nan, sparse matrix representations, and
+    matrices with complicated elements.
+
+    Parameters
+    ----------
+    a : array_like
+        The array-like input.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default: True
+    sparse_ok : bool, optional
+        True if scipy sparse matrices are allowed.
+    objects_ok : bool, optional
+        True if arrays with dype('O') are allowed.
+    mask_ok : bool, optional
+        True if masked arrays are allowed.
+    as_inexact : bool, optional
+        True to convert the input array to a np.inexact dtype.
+
+    Returns
+    -------
+    ret : ndarray
+        The converted validated array.
+
+    """
+    if not sparse_ok:
+        import scipy.sparse
+        if scipy.sparse.issparse(a):
+            msg = ('Sparse matrices are not supported by this function. '
+                   'Perhaps one of the scipy.sparse.linalg functions '
+                   'would work instead.')
+            raise ValueError(msg)
+    if not mask_ok:
+        if np.ma.isMaskedArray(a):
+            raise ValueError('masked arrays are not supported')
+    toarray = np.asarray_chkfinite if check_finite else np.asarray
+    a = toarray(a)
+    if not objects_ok:
+        if a.dtype is np.dtype('O'):
+            raise ValueError('object arrays are not supported')
+    if as_inexact:
+        if not np.issubdtype(a.dtype, np.inexact):
+            a = toarray(a, dtype=np.float_)
+    return a
+
+
+def _validate_int(k, name, minimum=None):
+    """
+    Validate a scalar integer.
+
+    This functon can be used to validate an argument to a function
+    that expects the value to be an integer.  It uses `operator.index`
+    to validate the value (so, for example, k=2.0 results in a
+    TypeError).
+
+    Parameters
+    ----------
+    k : int
+        The value to be validated.
+    name : str
+        The name of the parameter.
+    minimum : int, optional
+        An optional lower bound.
+    """
+    try:
+        k = operator.index(k)
+    except TypeError:
+        raise TypeError(f'{name} must be an integer.') from None
+    if minimum is not None and k < minimum:
+        raise ValueError(f'{name} must be an integer not less '
+                         f'than {minimum}') from None
+    return k
+
+
+# Add a replacement for inspect.getfullargspec()/
+# The version below is borrowed from Django,
+# https://github.com/django/django/pull/4846.
+
+# Note an inconsistency between inspect.getfullargspec(func) and
+# inspect.signature(func). If `func` is a bound method, the latter does *not*
+# list `self` as a first argument, while the former *does*.
+# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
+# mimics `inspect.getfullargspec` but does not list `self`.
+#
+# This way, the caller code does not need to know whether it uses a legacy
+# .getfullargspec or a bright and shiny .signature.
+
+FullArgSpec = namedtuple('FullArgSpec',
+                         ['args', 'varargs', 'varkw', 'defaults',
+                          'kwonlyargs', 'kwonlydefaults', 'annotations'])
+
+
+def getfullargspec_no_self(func):
+    """inspect.getfullargspec replacement using inspect.signature.
+
+    If func is a bound method, do not list the 'self' parameter.
+
+    Parameters
+    ----------
+    func : callable
+        A callable to inspect
+
+    Returns
+    -------
+    fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
+                              kwonlydefaults, annotations)
+
+        NOTE: if the first argument of `func` is self, it is *not*, I repeat
+        *not*, included in fullargspec.args.
+        This is done for consistency between inspect.getargspec() under
+        Python 2.x, and inspect.signature() under Python 3.x.
+
+    """
+    sig = inspect.signature(func)
+    args = [
+        p.name for p in sig.parameters.values()
+        if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
+                      inspect.Parameter.POSITIONAL_ONLY]
+    ]
+    varargs = [
+        p.name for p in sig.parameters.values()
+        if p.kind == inspect.Parameter.VAR_POSITIONAL
+    ]
+    varargs = varargs[0] if varargs else None
+    varkw = [
+        p.name for p in sig.parameters.values()
+        if p.kind == inspect.Parameter.VAR_KEYWORD
+    ]
+    varkw = varkw[0] if varkw else None
+    defaults = tuple(
+        p.default for p in sig.parameters.values()
+        if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
+            p.default is not p.empty)
+    ) or None
+    kwonlyargs = [
+        p.name for p in sig.parameters.values()
+        if p.kind == inspect.Parameter.KEYWORD_ONLY
+    ]
+    kwdefaults = {p.name: p.default for p in sig.parameters.values()
+                  if p.kind == inspect.Parameter.KEYWORD_ONLY and
+                  p.default is not p.empty}
+    annotations = {p.name: p.annotation for p in sig.parameters.values()
+                   if p.annotation is not p.empty}
+    return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
+                       kwdefaults or None, annotations)
+
+
+class _FunctionWrapper:
+    """
+    Object to wrap user's function, allowing picklability
+    """
+    def __init__(self, f, args):
+        self.f = f
+        self.args = [] if args is None else args
+
+    def __call__(self, x):
+        return self.f(x, *self.args)
+
+
+class MapWrapper:
+    """
+    Parallelisation wrapper for working with map-like callables, such as
+    `multiprocessing.Pool.map`.
+
+    Parameters
+    ----------
+    pool : int or map-like callable
+        If `pool` is an integer, then it specifies the number of threads to
+        use for parallelization. If ``int(pool) == 1``, then no parallel
+        processing is used and the map builtin is used.
+        If ``pool == -1``, then the pool will utilize all available CPUs.
+        If `pool` is a map-like callable that follows the same
+        calling sequence as the built-in map function, then this callable is
+        used for parallelization.
+    """
+    def __init__(self, pool=1):
+        self.pool = None
+        self._mapfunc = map
+        self._own_pool = False
+
+        if callable(pool):
+            self.pool = pool
+            self._mapfunc = self.pool
+        else:
+            from multiprocessing import Pool
+            # user supplies a number
+            if int(pool) == -1:
+                # use as many processors as possible
+                self.pool = Pool()
+                self._mapfunc = self.pool.map
+                self._own_pool = True
+            elif int(pool) == 1:
+                pass
+            elif int(pool) > 1:
+                # use the number of processors requested
+                self.pool = Pool(processes=int(pool))
+                self._mapfunc = self.pool.map
+                self._own_pool = True
+            else:
+                raise RuntimeError("Number of workers specified must be -1,"
+                                   " an int >= 1, or an object with a 'map' "
+                                   "method")
+
+    def __enter__(self):
+        return self
+
+    def terminate(self):
+        if self._own_pool:
+            self.pool.terminate()
+
+    def join(self):
+        if self._own_pool:
+            self.pool.join()
+
+    def close(self):
+        if self._own_pool:
+            self.pool.close()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if self._own_pool:
+            self.pool.close()
+            self.pool.terminate()
+
+    def __call__(self, func, iterable):
+        # only accept one iterable because that's all Pool.map accepts
+        try:
+            return self._mapfunc(func, iterable)
+        except TypeError as e:
+            # wrong number of arguments
+            raise TypeError("The map-like callable must be of the"
+                            " form f(func, iterable)") from e
+
+
+def rng_integers(gen, low, high=None, size=None, dtype='int64',
+                 endpoint=False):
+    """
+    Return random integers from low (inclusive) to high (exclusive), or if
+    endpoint=True, low (inclusive) to high (inclusive). Replaces
+    `RandomState.randint` (with endpoint=False) and
+    `RandomState.random_integers` (with endpoint=True).
+
+    Return random integers from the "discrete uniform" distribution of the
+    specified dtype. If high is None (the default), then results are from
+    0 to low.
+
+    Parameters
+    ----------
+    gen : {None, np.random.RandomState, np.random.Generator}
+        Random number generator. If None, then the np.random.RandomState
+        singleton is used.
+    low : int or array-like of ints
+        Lowest (signed) integers to be drawn from the distribution (unless
+        high=None, in which case this parameter is 0 and this value is used
+        for high).
+    high : int or array-like of ints
+        If provided, one above the largest (signed) integer to be drawn from
+        the distribution (see above for behavior if high=None). If array-like,
+        must contain integer values.
+    size : array-like of ints, optional
+        Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
+        samples are drawn. Default is None, in which case a single value is
+        returned.
+    dtype : {str, dtype}, optional
+        Desired dtype of the result. All dtypes are determined by their name,
+        i.e., 'int64', 'int', etc, so byteorder is not available and a specific
+        precision may have different C types depending on the platform.
+        The default value is np.int_.
+    endpoint : bool, optional
+        If True, sample from the interval [low, high] instead of the default
+        [low, high) Defaults to False.
+
+    Returns
+    -------
+    out: int or ndarray of ints
+        size-shaped array of random integers from the appropriate distribution,
+        or a single such random int if size not provided.
+    """
+    if isinstance(gen, Generator):
+        return gen.integers(low, high=high, size=size, dtype=dtype,
+                            endpoint=endpoint)
+    else:
+        if gen is None:
+            # default is RandomState singleton used by np.random.
+            gen = np.random.mtrand._rand
+        if endpoint:
+            # inclusive of endpoint
+            # remember that low and high can be arrays, so don't modify in
+            # place
+            if high is None:
+                return gen.randint(low + 1, size=size, dtype=dtype)
+            if high is not None:
+                return gen.randint(low, high=high + 1, size=size, dtype=dtype)
+
+        # exclusive
+        return gen.randint(low, high=high, size=size, dtype=dtype)
+
+
+@contextmanager
+def _fixed_default_rng(seed=1638083107694713882823079058616272161):
+    """Context with a fixed np.random.default_rng seed."""
+    orig_fun = np.random.default_rng
+    np.random.default_rng = lambda seed=seed: orig_fun(seed)
+    try:
+        yield
+    finally:
+        np.random.default_rng = orig_fun
+
+
+def _argmin(a, keepdims=False, axis=None):
+    """
+    argmin with a `keepdims` parameter.
+
+    See https://github.com/numpy/numpy/issues/8710
+
+    If axis is not None, a.shape[axis] must be greater than 0.
+    """
+    res = np.argmin(a, axis=axis)
+    if keepdims and axis is not None:
+        res = np.expand_dims(res, axis=axis)
+    return res
+
+
+def _first_nonnan(a, axis):
+    """
+    Return the first non-nan value along the given axis.
+
+    If a slice is all nan, nan is returned for that slice.
+
+    The shape of the return value corresponds to ``keepdims=True``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> nan = np.nan
+    >>> a = np.array([[ 3.,  3., nan,  3.],
+                      [ 1., nan,  2.,  4.],
+                      [nan, nan,  9., -1.],
+                      [nan,  5.,  4.,  3.],
+                      [ 2.,  2.,  2.,  2.],
+                      [nan, nan, nan, nan]])
+    >>> _first_nonnan(a, axis=0)
+    array([[3., 3., 2., 3.]])
+    >>> _first_nonnan(a, axis=1)
+    array([[ 3.],
+           [ 1.],
+           [ 9.],
+           [ 5.],
+           [ 2.],
+           [nan]])
+    """
+    k = _argmin(np.isnan(a), axis=axis, keepdims=True)
+    return np.take_along_axis(a, k, axis=axis)
+
+
+def _nan_allsame(a, axis, keepdims=False):
+    """
+    Determine if the values along an axis are all the same.
+
+    nan values are ignored.
+
+    `a` must be a numpy array.
+
+    `axis` is assumed to be normalized; that is, 0 <= axis < a.ndim.
+
+    For an axis of length 0, the result is True.  That is, we adopt the
+    convention that ``allsame([])`` is True. (There are no values in the
+    input that are different.)
+
+    `True` is returned for slices that are all nan--not because all the
+    values are the same, but because this is equivalent to ``allsame([])``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[ 3.,  3., nan,  3.],
+                      [ 1., nan,  2.,  4.],
+                      [nan, nan,  9., -1.],
+                      [nan,  5.,  4.,  3.],
+                      [ 2.,  2.,  2.,  2.],
+                      [nan, nan, nan, nan]])
+    >>> _nan_allsame(a, axis=1, keepdims=True)
+    array([[ True],
+           [False],
+           [False],
+           [False],
+           [ True],
+           [ True]])
+    """
+    if axis is None:
+        if a.size == 0:
+            return True
+        a = a.ravel()
+        axis = 0
+    else:
+        shp = a.shape
+        if shp[axis] == 0:
+            shp = shp[:axis] + (1,)*keepdims + shp[axis + 1:]
+            return np.full(shp, fill_value=True, dtype=bool)
+    a0 = _first_nonnan(a, axis=axis)
+    return ((a0 == a) | np.isnan(a)).all(axis=axis, keepdims=keepdims)
+
+
+def _contains_nan(a, nan_policy='propagate', use_summation=True):
+    if not isinstance(a, np.ndarray):
+        use_summation = False  # some array_likes ignore nans (e.g. pandas)
+    policies = ['propagate', 'raise', 'omit']
+    if nan_policy not in policies:
+        raise ValueError("nan_policy must be one of {%s}" %
+                         ', '.join("'%s'" % s for s in policies))
+
+    if np.issubdtype(a.dtype, np.inexact):
+        # The summation method avoids creating a (potentially huge) array.
+        if use_summation:
+            with np.errstate(invalid='ignore', over='ignore'):
+                contains_nan = np.isnan(np.sum(a))
+        else:
+            contains_nan = np.isnan(a).any()
+    elif np.issubdtype(a.dtype, object):
+        contains_nan = False
+        for el in a.ravel():
+            # isnan doesn't work on non-numeric elements
+            if np.issubdtype(type(el), np.number) and np.isnan(el):
+                contains_nan = True
+                break
+    else:
+        # Only `object` and `inexact` arrays can have NaNs
+        contains_nan = False
+
+    if contains_nan and nan_policy == 'raise':
+        raise ValueError("The input contains nan values")
+
+    return contains_nan, nan_policy
+
+
+def _rename_parameter(old_name, new_name, dep_version=None):
+    """
+    Generate decorator for backward-compatible keyword renaming.
+
+    Apply the decorator generated by `_rename_parameter` to functions with a
+    recently renamed parameter to maintain backward-compatibility.
+
+    After decoration, the function behaves as follows:
+    If only the new parameter is passed into the function, behave as usual.
+    If only the old parameter is passed into the function (as a keyword), raise
+    a DeprecationWarning if `dep_version` is provided, and behave as usual
+    otherwise.
+    If both old and new parameters are passed into the function, raise a
+    DeprecationWarning if `dep_version` is provided, and raise the appropriate
+    TypeError (function got multiple values for argument).
+
+    Parameters
+    ----------
+    old_name : str
+        Old name of parameter
+    new_name : str
+        New name of parameter
+    dep_version : str, optional
+        Version of SciPy in which old parameter was deprecated in the format
+        'X.Y.Z'. If supplied, the deprecation message will indicate that
+        support for the old parameter will be removed in version 'X.Y+2.Z'
+
+    Notes
+    -----
+    Untested with functions that accept *args. Probably won't work as written.
+
+    """
+    def decorator(fun):
+        @functools.wraps(fun)
+        def wrapper(*args, **kwargs):
+            if old_name in kwargs:
+                if dep_version:
+                    end_version = dep_version.split('.')
+                    end_version[1] = str(int(end_version[1]) + 2)
+                    end_version = '.'.join(end_version)
+                    message = (f"Use of keyword argument `{old_name}` is "
+                               f"deprecated and replaced by `{new_name}`.  "
+                               f"Support for `{old_name}` will be removed "
+                               f"in SciPy {end_version}.")
+                    warnings.warn(message, DeprecationWarning, stacklevel=2)
+                if new_name in kwargs:
+                    message = (f"{fun.__name__}() got multiple values for "
+                               f"argument now known as `{new_name}`")
+                    raise TypeError(message)
+                kwargs[new_name] = kwargs.pop(old_name)
+            return fun(*args, **kwargs)
+        return wrapper
+    return decorator
+
+
+def _rng_spawn(rng, n_children):
+    # spawns independent RNGs from a parent RNG
+    bg = rng._bit_generator
+    ss = bg._seed_seq
+    child_rngs = [np.random.Generator(type(bg)(child_ss))
+                  for child_ss in ss.spawn(n_children)]
+    return child_rngs
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/decorator.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/decorator.py
new file mode 100644
index 00000000..ce23811b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/decorator.py
@@ -0,0 +1,399 @@
+# #########################     LICENSE     ############################ #
+
+# Copyright (c) 2005-2015, Michele Simionato
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+
+#   Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+#   Redistributions in bytecode form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+"""
+Decorator module, see https://pypi.python.org/pypi/decorator
+for the documentation.
+"""
+import re
+import sys
+import inspect
+import operator
+import itertools
+import collections
+
+from inspect import getfullargspec
+
+__version__ = '4.0.5'
+
+
+def get_init(cls):
+    return cls.__init__
+
+
+# getargspec has been deprecated in Python 3.5
+ArgSpec = collections.namedtuple(
+    'ArgSpec', 'args varargs varkw defaults')
+
+
+def getargspec(f):
+    """A replacement for inspect.getargspec"""
+    spec = getfullargspec(f)
+    return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
+
+
+DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
+
+
+# basic functionality
+class FunctionMaker:
+    """
+    An object with the ability to create functions with a given signature.
+    It has attributes name, doc, module, signature, defaults, dict, and
+    methods update and make.
+    """
+
+    # Atomic get-and-increment provided by the GIL
+    _compile_count = itertools.count()
+
+    def __init__(self, func=None, name=None, signature=None,
+                 defaults=None, doc=None, module=None, funcdict=None):
+        self.shortsignature = signature
+        if func:
+            # func can be a class or a callable, but not an instance method
+            self.name = func.__name__
+            if self.name == '':  # small hack for lambda functions
+                self.name = '_lambda_'
+            self.doc = func.__doc__
+            self.module = func.__module__
+            if inspect.isfunction(func):
+                argspec = getfullargspec(func)
+                self.annotations = getattr(func, '__annotations__', {})
+                for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
+                          'kwonlydefaults'):
+                    setattr(self, a, getattr(argspec, a))
+                for i, arg in enumerate(self.args):
+                    setattr(self, 'arg%d' % i, arg)
+                allargs = list(self.args)
+                allshortargs = list(self.args)
+                if self.varargs:
+                    allargs.append('*' + self.varargs)
+                    allshortargs.append('*' + self.varargs)
+                elif self.kwonlyargs:
+                    allargs.append('*')  # single star syntax
+                for a in self.kwonlyargs:
+                    allargs.append('%s=None' % a)
+                    allshortargs.append('%s=%s' % (a, a))
+                if self.varkw:
+                    allargs.append('**' + self.varkw)
+                    allshortargs.append('**' + self.varkw)
+                self.signature = ', '.join(allargs)
+                self.shortsignature = ', '.join(allshortargs)
+                self.dict = func.__dict__.copy()
+        # func=None happens when decorating a caller
+        if name:
+            self.name = name
+        if signature is not None:
+            self.signature = signature
+        if defaults:
+            self.defaults = defaults
+        if doc:
+            self.doc = doc
+        if module:
+            self.module = module
+        if funcdict:
+            self.dict = funcdict
+        # check existence required attributes
+        assert hasattr(self, 'name')
+        if not hasattr(self, 'signature'):
+            raise TypeError('You are decorating a non-function: %s' % func)
+
+    def update(self, func, **kw):
+        "Update the signature of func with the data in self"
+        func.__name__ = self.name
+        func.__doc__ = getattr(self, 'doc', None)
+        func.__dict__ = getattr(self, 'dict', {})
+        func.__defaults__ = getattr(self, 'defaults', ())
+        func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
+        func.__annotations__ = getattr(self, 'annotations', None)
+        try:
+            frame = sys._getframe(3)
+        except AttributeError:  # for IronPython and similar implementations
+            callermodule = '?'
+        else:
+            callermodule = frame.f_globals.get('__name__', '?')
+        func.__module__ = getattr(self, 'module', callermodule)
+        func.__dict__.update(kw)
+
+    def make(self, src_templ, evaldict=None, addsource=False, **attrs):
+        "Make a new function from a given template and update the signature"
+        src = src_templ % vars(self)  # expand name and signature
+        evaldict = evaldict or {}
+        mo = DEF.match(src)
+        if mo is None:
+            raise SyntaxError('not a valid function template\n%s' % src)
+        name = mo.group(1)  # extract the function name
+        names = set([name] + [arg.strip(' *') for arg in
+                              self.shortsignature.split(',')])
+        for n in names:
+            if n in ('_func_', '_call_'):
+                raise NameError('%s is overridden in\n%s' % (n, src))
+        if not src.endswith('\n'):  # add a newline just for safety
+            src += '\n'  # this is needed in old versions of Python
+
+        # Ensure each generated function has a unique filename for profilers
+        # (such as cProfile) that depend on the tuple of (,
+        # , ) being unique.
+        filename = '' % (next(self._compile_count),)
+        try:
+            code = compile(src, filename, 'single')
+            exec(code, evaldict)
+        except:  # noqa: E722
+            print('Error in generated code:', file=sys.stderr)
+            print(src, file=sys.stderr)
+            raise
+        func = evaldict[name]
+        if addsource:
+            attrs['__source__'] = src
+        self.update(func, **attrs)
+        return func
+
+    @classmethod
+    def create(cls, obj, body, evaldict, defaults=None,
+               doc=None, module=None, addsource=True, **attrs):
+        """
+        Create a function from the strings name, signature, and body.
+        evaldict is the evaluation dictionary. If addsource is true, an
+        attribute __source__ is added to the result. The attributes attrs
+        are added, if any.
+        """
+        if isinstance(obj, str):  # "name(signature)"
+            name, rest = obj.strip().split('(', 1)
+            signature = rest[:-1]  # strip a right parens
+            func = None
+        else:  # a function
+            name = None
+            signature = None
+            func = obj
+        self = cls(func, name, signature, defaults, doc, module)
+        ibody = '\n'.join('    ' + line for line in body.splitlines())
+        return self.make('def %(name)s(%(signature)s):\n' + ibody,
+                         evaldict, addsource, **attrs)
+
+
+def decorate(func, caller):
+    """
+    decorate(func, caller) decorates a function using a caller.
+    """
+    evaldict = func.__globals__.copy()
+    evaldict['_call_'] = caller
+    evaldict['_func_'] = func
+    fun = FunctionMaker.create(
+        func, "return _call_(_func_, %(shortsignature)s)",
+        evaldict, __wrapped__=func)
+    if hasattr(func, '__qualname__'):
+        fun.__qualname__ = func.__qualname__
+    return fun
+
+
+def decorator(caller, _func=None):
+    """decorator(caller) converts a caller function into a decorator"""
+    if _func is not None:  # return a decorated function
+        # this is obsolete behavior; you should use decorate instead
+        return decorate(_func, caller)
+    # else return a decorator function
+    if inspect.isclass(caller):
+        name = caller.__name__.lower()
+        callerfunc = get_init(caller)
+        doc = 'decorator(%s) converts functions/generators into ' \
+            'factories of %s objects' % (caller.__name__, caller.__name__)
+    elif inspect.isfunction(caller):
+        if caller.__name__ == '':
+            name = '_lambda_'
+        else:
+            name = caller.__name__
+        callerfunc = caller
+        doc = caller.__doc__
+    else:  # assume caller is an object with a __call__ method
+        name = caller.__class__.__name__.lower()
+        callerfunc = caller.__call__.__func__
+        doc = caller.__call__.__doc__
+    evaldict = callerfunc.__globals__.copy()
+    evaldict['_call_'] = caller
+    evaldict['_decorate_'] = decorate
+    return FunctionMaker.create(
+        '%s(func)' % name, 'return _decorate_(func, _call_)',
+        evaldict, doc=doc, module=caller.__module__,
+        __wrapped__=caller)
+
+
+# ####################### contextmanager ####################### #
+
+try:  # Python >= 3.2
+    from contextlib import _GeneratorContextManager
+except ImportError:  # Python >= 2.5
+    from contextlib import GeneratorContextManager as _GeneratorContextManager
+
+
+class ContextManager(_GeneratorContextManager):
+    def __call__(self, func):
+        """Context manager decorator"""
+        return FunctionMaker.create(
+            func, "with _self_: return _func_(%(shortsignature)s)",
+            dict(_self_=self, _func_=func), __wrapped__=func)
+
+
+init = getfullargspec(_GeneratorContextManager.__init__)
+n_args = len(init.args)
+if n_args == 2 and not init.varargs:  # (self, genobj) Python 2.7
+    def __init__(self, g, *a, **k):
+        return _GeneratorContextManager.__init__(self, g(*a, **k))
+    ContextManager.__init__ = __init__
+elif n_args == 2 and init.varargs:  # (self, gen, *a, **k) Python 3.4
+    pass
+elif n_args == 4:  # (self, gen, args, kwds) Python 3.5
+    def __init__(self, g, *a, **k):
+        return _GeneratorContextManager.__init__(self, g, a, k)
+    ContextManager.__init__ = __init__
+
+contextmanager = decorator(ContextManager)
+
+
+# ############################ dispatch_on ############################ #
+
+def append(a, vancestors):
+    """
+    Append ``a`` to the list of the virtual ancestors, unless it is already
+    included.
+    """
+    add = True
+    for j, va in enumerate(vancestors):
+        if issubclass(va, a):
+            add = False
+            break
+        if issubclass(a, va):
+            vancestors[j] = a
+            add = False
+    if add:
+        vancestors.append(a)
+
+
+# inspired from simplegeneric by P.J. Eby and functools.singledispatch
+def dispatch_on(*dispatch_args):
+    """
+    Factory of decorators turning a function into a generic function
+    dispatching on the given arguments.
+    """
+    assert dispatch_args, 'No dispatch args passed'
+    dispatch_str = '(%s,)' % ', '.join(dispatch_args)
+
+    def check(arguments, wrong=operator.ne, msg=''):
+        """Make sure one passes the expected number of arguments"""
+        if wrong(len(arguments), len(dispatch_args)):
+            raise TypeError('Expected %d arguments, got %d%s' %
+                            (len(dispatch_args), len(arguments), msg))
+
+    def gen_func_dec(func):
+        """Decorator turning a function into a generic function"""
+
+        # first check the dispatch arguments
+        argset = set(getfullargspec(func).args)
+        if not set(dispatch_args) <= argset:
+            raise NameError('Unknown dispatch arguments %s' % dispatch_str)
+
+        typemap = {}
+
+        def vancestors(*types):
+            """
+            Get a list of sets of virtual ancestors for the given types
+            """
+            check(types)
+            ras = [[] for _ in range(len(dispatch_args))]
+            for types_ in typemap:
+                for t, type_, ra in zip(types, types_, ras):
+                    if issubclass(t, type_) and type_ not in t.__mro__:
+                        append(type_, ra)
+            return [set(ra) for ra in ras]
+
+        def ancestors(*types):
+            """
+            Get a list of virtual MROs, one for each type
+            """
+            check(types)
+            lists = []
+            for t, vas in zip(types, vancestors(*types)):
+                n_vas = len(vas)
+                if n_vas > 1:
+                    raise RuntimeError(
+                        'Ambiguous dispatch for %s: %s' % (t, vas))
+                elif n_vas == 1:
+                    va, = vas
+                    mro = type('t', (t, va), {}).__mro__[1:]
+                else:
+                    mro = t.__mro__
+                lists.append(mro[:-1])  # discard t and object
+            return lists
+
+        def register(*types):
+            """
+            Decorator to register an implementation for the given types
+            """
+            check(types)
+
+            def dec(f):
+                check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
+                typemap[types] = f
+                return f
+            return dec
+
+        def dispatch_info(*types):
+            """
+            An utility to introspect the dispatch algorithm
+            """
+            check(types)
+            lst = [tuple(a.__name__ for a in anc)
+                   for anc in itertools.product(*ancestors(*types))]
+            return lst
+
+        def _dispatch(dispatch_args, *args, **kw):
+            types = tuple(type(arg) for arg in dispatch_args)
+            try:  # fast path
+                f = typemap[types]
+            except KeyError:
+                pass
+            else:
+                return f(*args, **kw)
+            combinations = itertools.product(*ancestors(*types))
+            next(combinations)  # the first one has been already tried
+            for types_ in combinations:
+                f = typemap.get(types_)
+                if f is not None:
+                    return f(*args, **kw)
+
+            # else call the default implementation
+            return func(*args, **kw)
+
+        return FunctionMaker.create(
+            func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
+            dict(_f_=_dispatch), register=register, default=func,
+            typemap=typemap, vancestors=vancestors, ancestors=ancestors,
+            dispatch_info=dispatch_info, __wrapped__=func)
+
+    gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
+    return gen_func_dec
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/deprecation.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/deprecation.py
new file mode 100644
index 00000000..eb7e2bc6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/deprecation.py
@@ -0,0 +1,107 @@
+import functools
+import warnings
+
+__all__ = ["_deprecated"]
+
+
+def _deprecated(msg, stacklevel=2):
+    """Deprecate a function by emitting a warning on use."""
+    def wrap(fun):
+        if isinstance(fun, type):
+            warnings.warn(
+                "Trying to deprecate class {!r}".format(fun),
+                category=RuntimeWarning, stacklevel=2)
+            return fun
+
+        @functools.wraps(fun)
+        def call(*args, **kwargs):
+            warnings.warn(msg, category=DeprecationWarning,
+                          stacklevel=stacklevel)
+            return fun(*args, **kwargs)
+        call.__doc__ = fun.__doc__
+        return call
+
+    return wrap
+
+
+class _DeprecationHelperStr:
+    """
+    Helper class used by deprecate_cython_api
+    """
+    def __init__(self, content, message):
+        self._content = content
+        self._message = message
+
+    def __hash__(self):
+        return hash(self._content)
+
+    def __eq__(self, other):
+        res = (self._content == other)
+        if res:
+            warnings.warn(self._message, category=DeprecationWarning,
+                          stacklevel=2)
+        return res
+
+
+def deprecate_cython_api(module, routine_name, new_name=None, message=None):
+    """
+    Deprecate an exported cdef function in a public Cython API module.
+
+    Only functions can be deprecated; typedefs etc. cannot.
+
+    Parameters
+    ----------
+    module : module
+        Public Cython API module (e.g. scipy.linalg.cython_blas).
+    routine_name : str
+        Name of the routine to deprecate. May also be a fused-type
+        routine (in which case its all specializations are deprecated).
+    new_name : str
+        New name to include in the deprecation warning message
+    message : str
+        Additional text in the deprecation warning message
+
+    Examples
+    --------
+    Usually, this function would be used in the top-level of the
+    module ``.pyx`` file:
+
+    >>> from scipy._lib.deprecation import deprecate_cython_api
+    >>> import scipy.linalg.cython_blas as mod
+    >>> deprecate_cython_api(mod, "dgemm", "dgemm_new",
+    ...                      message="Deprecated in Scipy 1.5.0")
+    >>> del deprecate_cython_api, mod
+
+    After this, Cython modules that use the deprecated function emit a
+    deprecation warning when they are imported.
+
+    """
+    old_name = "{}.{}".format(module.__name__, routine_name)
+
+    if new_name is None:
+        depdoc = "`%s` is deprecated!" % old_name
+    else:
+        depdoc = "`%s` is deprecated, use `%s` instead!" % \
+                 (old_name, new_name)
+
+    if message is not None:
+        depdoc += "\n" + message
+
+    d = module.__pyx_capi__
+
+    # Check if the function is a fused-type function with a mangled name
+    j = 0
+    has_fused = False
+    while True:
+        fused_name = "__pyx_fuse_{}{}".format(j, routine_name)
+        if fused_name in d:
+            has_fused = True
+            d[_DeprecationHelperStr(fused_name, depdoc)] = d.pop(fused_name)
+            j += 1
+        else:
+            break
+
+    # If not, apply deprecation to the named routine
+    if not has_fused:
+        d[_DeprecationHelperStr(routine_name, depdoc)] = d.pop(routine_name)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/doccer.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/doccer.py
new file mode 100644
index 00000000..707f9701
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/doccer.py
@@ -0,0 +1,275 @@
+''' Utilities to allow inserting docstring fragments for common
+parameters into function and method docstrings'''
+
+import sys
+
+__all__ = [
+    'docformat', 'inherit_docstring_from', 'indentcount_lines',
+    'filldoc', 'unindent_dict', 'unindent_string', 'extend_notes_in_docstring',
+    'replace_notes_in_docstring', 'doc_replace'
+]
+
+
+def docformat(docstring, docdict=None):
+    ''' Fill a function docstring from variables in dictionary
+
+    Adapt the indent of the inserted docs
+
+    Parameters
+    ----------
+    docstring : string
+        docstring from function, possibly with dict formatting strings
+    docdict : dict, optional
+        dictionary with keys that match the dict formatting strings
+        and values that are docstring fragments to be inserted. The
+        indentation of the inserted docstrings is set to match the
+        minimum indentation of the ``docstring`` by adding this
+        indentation to all lines of the inserted string, except the
+        first.
+
+    Returns
+    -------
+    outstring : string
+        string with requested ``docdict`` strings inserted
+
+    Examples
+    --------
+    >>> docformat(' Test string with %(value)s', {'value':'inserted value'})
+    ' Test string with inserted value'
+    >>> docstring = 'First line\\n    Second line\\n    %(value)s'
+    >>> inserted_string = "indented\\nstring"
+    >>> docdict = {'value': inserted_string}
+    >>> docformat(docstring, docdict)
+    'First line\\n    Second line\\n    indented\\n    string'
+    '''
+    if not docstring:
+        return docstring
+    if docdict is None:
+        docdict = {}
+    if not docdict:
+        return docstring
+    lines = docstring.expandtabs().splitlines()
+    # Find the minimum indent of the main docstring, after first line
+    if len(lines) < 2:
+        icount = 0
+    else:
+        icount = indentcount_lines(lines[1:])
+    indent = ' ' * icount
+    # Insert this indent to dictionary docstrings
+    indented = {}
+    for name, dstr in docdict.items():
+        lines = dstr.expandtabs().splitlines()
+        try:
+            newlines = [lines[0]]
+            for line in lines[1:]:
+                newlines.append(indent+line)
+            indented[name] = '\n'.join(newlines)
+        except IndexError:
+            indented[name] = dstr
+    return docstring % indented
+
+
+def inherit_docstring_from(cls):
+    """
+    This decorator modifies the decorated function's docstring by
+    replacing occurrences of '%(super)s' with the docstring of the
+    method of the same name from the class `cls`.
+
+    If the decorated method has no docstring, it is simply given the
+    docstring of `cls`s method.
+
+    Parameters
+    ----------
+    cls : Python class or instance
+        A class with a method with the same name as the decorated method.
+        The docstring of the method in this class replaces '%(super)s' in the
+        docstring of the decorated method.
+
+    Returns
+    -------
+    f : function
+        The decorator function that modifies the __doc__ attribute
+        of its argument.
+
+    Examples
+    --------
+    In the following, the docstring for Bar.func created using the
+    docstring of `Foo.func`.
+
+    >>> class Foo:
+    ...     def func(self):
+    ...         '''Do something useful.'''
+    ...         return
+    ...
+    >>> class Bar(Foo):
+    ...     @inherit_docstring_from(Foo)
+    ...     def func(self):
+    ...         '''%(super)s
+    ...         Do it fast.
+    ...         '''
+    ...         return
+    ...
+    >>> b = Bar()
+    >>> b.func.__doc__
+    'Do something useful.\n        Do it fast.\n        '
+
+    """
+    def _doc(func):
+        cls_docstring = getattr(cls, func.__name__).__doc__
+        func_docstring = func.__doc__
+        if func_docstring is None:
+            func.__doc__ = cls_docstring
+        else:
+            new_docstring = func_docstring % dict(super=cls_docstring)
+            func.__doc__ = new_docstring
+        return func
+    return _doc
+
+
+def extend_notes_in_docstring(cls, notes):
+    """
+    This decorator replaces the decorated function's docstring
+    with the docstring from corresponding method in `cls`.
+    It extends the 'Notes' section of that docstring to include
+    the given `notes`.
+    """
+    def _doc(func):
+        cls_docstring = getattr(cls, func.__name__).__doc__
+        # If python is called with -OO option,
+        # there is no docstring
+        if cls_docstring is None:
+            return func
+        end_of_notes = cls_docstring.find('        References\n')
+        if end_of_notes == -1:
+            end_of_notes = cls_docstring.find('        Examples\n')
+            if end_of_notes == -1:
+                end_of_notes = len(cls_docstring)
+        func.__doc__ = (cls_docstring[:end_of_notes] + notes +
+                        cls_docstring[end_of_notes:])
+        return func
+    return _doc
+
+
+def replace_notes_in_docstring(cls, notes):
+    """
+    This decorator replaces the decorated function's docstring
+    with the docstring from corresponding method in `cls`.
+    It replaces the 'Notes' section of that docstring with
+    the given `notes`.
+    """
+    def _doc(func):
+        cls_docstring = getattr(cls, func.__name__).__doc__
+        notes_header = '        Notes\n        -----\n'
+        # If python is called with -OO option,
+        # there is no docstring
+        if cls_docstring is None:
+            return func
+        start_of_notes = cls_docstring.find(notes_header)
+        end_of_notes = cls_docstring.find('        References\n')
+        if end_of_notes == -1:
+            end_of_notes = cls_docstring.find('        Examples\n')
+            if end_of_notes == -1:
+                end_of_notes = len(cls_docstring)
+        func.__doc__ = (cls_docstring[:start_of_notes + len(notes_header)] +
+                        notes +
+                        cls_docstring[end_of_notes:])
+        return func
+    return _doc
+
+
+def indentcount_lines(lines):
+    ''' Minimum indent for all lines in line list
+
+    >>> lines = [' one', '  two', '   three']
+    >>> indentcount_lines(lines)
+    1
+    >>> lines = []
+    >>> indentcount_lines(lines)
+    0
+    >>> lines = [' one']
+    >>> indentcount_lines(lines)
+    1
+    >>> indentcount_lines(['    '])
+    0
+    '''
+    indentno = sys.maxsize
+    for line in lines:
+        stripped = line.lstrip()
+        if stripped:
+            indentno = min(indentno, len(line) - len(stripped))
+    if indentno == sys.maxsize:
+        return 0
+    return indentno
+
+
+def filldoc(docdict, unindent_params=True):
+    ''' Return docstring decorator using docdict variable dictionary
+
+    Parameters
+    ----------
+    docdict : dictionary
+        dictionary containing name, docstring fragment pairs
+    unindent_params : {False, True}, boolean, optional
+        If True, strip common indentation from all parameters in
+        docdict
+
+    Returns
+    -------
+    decfunc : function
+        decorator that applies dictionary to input function docstring
+
+    '''
+    if unindent_params:
+        docdict = unindent_dict(docdict)
+
+    def decorate(f):
+        f.__doc__ = docformat(f.__doc__, docdict)
+        return f
+    return decorate
+
+
+def unindent_dict(docdict):
+    ''' Unindent all strings in a docdict '''
+    can_dict = {}
+    for name, dstr in docdict.items():
+        can_dict[name] = unindent_string(dstr)
+    return can_dict
+
+
+def unindent_string(docstring):
+    ''' Set docstring to minimum indent for all lines, including first
+
+    >>> unindent_string(' two')
+    'two'
+    >>> unindent_string('  two\\n   three')
+    'two\\n three'
+    '''
+    lines = docstring.expandtabs().splitlines()
+    icount = indentcount_lines(lines)
+    if icount == 0:
+        return docstring
+    return '\n'.join([line[icount:] for line in lines])
+
+
+def doc_replace(obj, oldval, newval):
+    """Decorator to take the docstring from obj, with oldval replaced by newval
+
+    Equivalent to ``func.__doc__ = obj.__doc__.replace(oldval, newval)``
+
+    Parameters
+    ----------
+    obj : object
+        The object to take the docstring from.
+    oldval : string
+        The string to replace from the original docstring.
+    newval : string
+        The string to replace ``oldval`` with.
+    """
+    # __doc__ may be None for optimized Python (-OO)
+    doc = (obj.__doc__ or '').replace(oldval, newval)
+
+    def inner(func):
+        func.__doc__ = doc
+        return func
+
+    return inner
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__gcutils.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__gcutils.py
new file mode 100644
index 00000000..18f508b9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__gcutils.py
@@ -0,0 +1,101 @@
+""" Test for assert_deallocated context manager and gc utilities
+"""
+import gc
+
+from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated,
+                                 ReferenceError, IS_PYPY)
+
+from numpy.testing import assert_equal
+
+import pytest
+
+
+def test_set_gc_state():
+    gc_status = gc.isenabled()
+    try:
+        for state in (True, False):
+            gc.enable()
+            set_gc_state(state)
+            assert_equal(gc.isenabled(), state)
+            gc.disable()
+            set_gc_state(state)
+            assert_equal(gc.isenabled(), state)
+    finally:
+        if gc_status:
+            gc.enable()
+
+
+def test_gc_state():
+    # Test gc_state context manager
+    gc_status = gc.isenabled()
+    try:
+        for pre_state in (True, False):
+            set_gc_state(pre_state)
+            for with_state in (True, False):
+                # Check the gc state is with_state in with block
+                with gc_state(with_state):
+                    assert_equal(gc.isenabled(), with_state)
+                # And returns to previous state outside block
+                assert_equal(gc.isenabled(), pre_state)
+                # Even if the gc state is set explicitly within the block
+                with gc_state(with_state):
+                    assert_equal(gc.isenabled(), with_state)
+                    set_gc_state(not with_state)
+                assert_equal(gc.isenabled(), pre_state)
+    finally:
+        if gc_status:
+            gc.enable()
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated():
+    # Ordinary use
+    class C:
+        def __init__(self, arg0, arg1, name='myname'):
+            self.name = name
+    for gc_current in (True, False):
+        with gc_state(gc_current):
+            # We are deleting from with-block context, so that's OK
+            with assert_deallocated(C, 0, 2, 'another name') as c:
+                assert_equal(c.name, 'another name')
+                del c
+            # Or not using the thing in with-block context, also OK
+            with assert_deallocated(C, 0, 2, name='third name'):
+                pass
+            assert_equal(gc.isenabled(), gc_current)
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_nodel():
+    class C:
+        pass
+    with pytest.raises(ReferenceError):
+        # Need to delete after using if in with-block context
+        # Note: assert_deallocated(C) needs to be assigned for the test
+        # to function correctly.  It is assigned to c, but c itself is
+        # not referenced in the body of the with, it is only there for
+        # the refcount.
+        with assert_deallocated(C) as c:
+            pass
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_circular():
+    class C:
+        def __init__(self):
+            self._circular = self
+    with pytest.raises(ReferenceError):
+        # Circular reference, no automatic garbage collection
+        with assert_deallocated(C) as c:
+            del c
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_assert_deallocated_circular2():
+    class C:
+        def __init__(self):
+            self._circular = self
+    with pytest.raises(ReferenceError):
+        # Still circular reference, no automatic garbage collection
+        with assert_deallocated(C):
+            pass
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__pep440.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__pep440.py
new file mode 100644
index 00000000..7f5b71c8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__pep440.py
@@ -0,0 +1,67 @@
+from pytest import raises as assert_raises
+from scipy._lib._pep440 import Version, parse
+
+
+def test_main_versions():
+    assert Version('1.8.0') == Version('1.8.0')
+    for ver in ['1.9.0', '2.0.0', '1.8.1']:
+        assert Version('1.8.0') < Version(ver)
+
+    for ver in ['1.7.0', '1.7.1', '0.9.9']:
+        assert Version('1.8.0') > Version(ver)
+
+
+def test_version_1_point_10():
+    # regression test for gh-2998.
+    assert Version('1.9.0') < Version('1.10.0')
+    assert Version('1.11.0') < Version('1.11.1')
+    assert Version('1.11.0') == Version('1.11.0')
+    assert Version('1.99.11') < Version('1.99.12')
+
+
+def test_alpha_beta_rc():
+    assert Version('1.8.0rc1') == Version('1.8.0rc1')
+    for ver in ['1.8.0', '1.8.0rc2']:
+        assert Version('1.8.0rc1') < Version(ver)
+
+    for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
+        assert Version('1.8.0rc1') > Version(ver)
+
+    assert Version('1.8.0b1') > Version('1.8.0a2')
+
+
+def test_dev_version():
+    assert Version('1.9.0.dev+Unknown') < Version('1.9.0')
+    for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1']:
+        assert Version('1.9.0.dev+f16acvda') < Version(ver)
+
+    assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda')
+
+
+def test_dev_a_b_rc_mixed():
+    assert Version('1.9.0a2.dev+f16acvda') == Version('1.9.0a2.dev+f16acvda')
+    assert Version('1.9.0a2.dev+6acvda54') < Version('1.9.0a2')
+
+
+def test_dev0_version():
+    assert Version('1.9.0.dev0+Unknown') < Version('1.9.0')
+    for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
+        assert Version('1.9.0.dev0+f16acvda') < Version(ver)
+
+    assert Version('1.9.0.dev0+f16acvda') == Version('1.9.0.dev0+f16acvda')
+
+
+def test_dev0_a_b_rc_mixed():
+    assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda')
+    assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2')
+
+
+def test_raises():
+    for ver in ['1,9.0', '1.7.x']:
+        assert_raises(ValueError, Version, ver)
+
+def test_legacy_version():
+    # Non-PEP-440 version identifiers always compare less. For NumPy this only
+    # occurs on dev builds prior to 1.10.0 which are unsupported anyway.
+    assert parse('invalid') < Version('0.0.0')
+    assert parse('1.9.0-f16acvda') < Version('1.0.0')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__testutils.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__testutils.py
new file mode 100644
index 00000000..88db113d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__testutils.py
@@ -0,0 +1,32 @@
+import sys
+from scipy._lib._testutils import _parse_size, _get_mem_available
+import pytest
+
+
+def test__parse_size():
+    expected = {
+        '12': 12e6,
+        '12 b': 12,
+        '12k': 12e3,
+        '  12  M  ': 12e6,
+        '  12  G  ': 12e9,
+        ' 12Tb ': 12e12,
+        '12  Mib ': 12 * 1024.0**2,
+        '12Tib': 12 * 1024.0**4,
+    }
+
+    for inp, outp in sorted(expected.items()):
+        if outp is None:
+            with pytest.raises(ValueError):
+                _parse_size(inp)
+        else:
+            assert _parse_size(inp) == outp
+
+
+def test__mem_available():
+    # May return None on non-Linux platforms
+    available = _get_mem_available()
+    if sys.platform.startswith('linux'):
+        assert available >= 0
+    else:
+        assert available is None or available >= 0
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__threadsafety.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__threadsafety.py
new file mode 100644
index 00000000..87ae85ef
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__threadsafety.py
@@ -0,0 +1,51 @@
+import threading
+import time
+import traceback
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError
+
+
+def test_parallel_threads():
+    # Check that ReentrancyLock serializes work in parallel threads.
+    #
+    # The test is not fully deterministic, and may succeed falsely if
+    # the timings go wrong.
+
+    lock = ReentrancyLock("failure")
+
+    failflag = [False]
+    exceptions_raised = []
+
+    def worker(k):
+        try:
+            with lock:
+                assert_(not failflag[0])
+                failflag[0] = True
+                time.sleep(0.1 * k)
+                assert_(failflag[0])
+                failflag[0] = False
+        except Exception:
+            exceptions_raised.append(traceback.format_exc(2))
+
+    threads = [threading.Thread(target=lambda k=k: worker(k))
+               for k in range(3)]
+    for t in threads:
+        t.start()
+    for t in threads:
+        t.join()
+
+    exceptions_raised = "\n".join(exceptions_raised)
+    assert_(not exceptions_raised, exceptions_raised)
+
+
+def test_reentering():
+    # Check that ReentrancyLock prevents re-entering from the same thread.
+
+    @non_reentrant()
+    def func(x):
+        return func(x)
+
+    assert_raises(ReentrancyError, func, 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__util.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__util.py
new file mode 100644
index 00000000..e5a2d959
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test__util.py
@@ -0,0 +1,380 @@
+from multiprocessing import Pool
+from multiprocessing.pool import Pool as PWL
+import os
+import re
+import math
+from fractions import Fraction
+
+import numpy as np
+from numpy.testing import assert_equal, assert_
+import pytest
+from pytest import raises as assert_raises, deprecated_call
+
+import scipy
+from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper,
+                              getfullargspec_no_self, FullArgSpec,
+                              rng_integers, _validate_int, _rename_parameter,
+                              _contains_nan)
+
+
+def test__aligned_zeros():
+    niter = 10
+
+    def check(shape, dtype, order, align):
+        err_msg = repr((shape, dtype, order, align))
+        x = _aligned_zeros(shape, dtype, order, align=align)
+        if align is None:
+            align = np.dtype(dtype).alignment
+        assert_equal(x.__array_interface__['data'][0] % align, 0)
+        if hasattr(shape, '__len__'):
+            assert_equal(x.shape, shape, err_msg)
+        else:
+            assert_equal(x.shape, (shape,), err_msg)
+        assert_equal(x.dtype, dtype)
+        if order == "C":
+            assert_(x.flags.c_contiguous, err_msg)
+        elif order == "F":
+            if x.size > 0:
+                # Size-0 arrays get invalid flags on NumPy 1.5
+                assert_(x.flags.f_contiguous, err_msg)
+        elif order is None:
+            assert_(x.flags.c_contiguous, err_msg)
+        else:
+            raise ValueError()
+
+    # try various alignments
+    for align in [1, 2, 3, 4, 8, 16, 32, 64, None]:
+        for n in [0, 1, 3, 11]:
+            for order in ["C", "F", None]:
+                for dtype in [np.uint8, np.float64]:
+                    for shape in [n, (1, 2, 3, n)]:
+                        for j in range(niter):
+                            check(shape, dtype, order, align)
+
+
+def test_check_random_state():
+    # If seed is None, return the RandomState singleton used by np.random.
+    # If seed is an int, return a new RandomState instance seeded with seed.
+    # If seed is already a RandomState instance, return it.
+    # Otherwise raise ValueError.
+    rsi = check_random_state(1)
+    assert_equal(type(rsi), np.random.RandomState)
+    rsi = check_random_state(rsi)
+    assert_equal(type(rsi), np.random.RandomState)
+    rsi = check_random_state(None)
+    assert_equal(type(rsi), np.random.RandomState)
+    assert_raises(ValueError, check_random_state, 'a')
+    if hasattr(np.random, 'Generator'):
+        # np.random.Generator is only available in NumPy >= 1.17
+        rg = np.random.Generator(np.random.PCG64())
+        rsi = check_random_state(rg)
+        assert_equal(type(rsi), np.random.Generator)
+
+
+def test_getfullargspec_no_self():
+    p = MapWrapper(1)
+    argspec = getfullargspec_no_self(p.__init__)
+    assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [],
+                                      None, {}))
+    argspec = getfullargspec_no_self(p.__call__)
+    assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None,
+                                      [], None, {}))
+
+    class _rv_generic:
+        def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs):
+            return None
+
+    rv_obj = _rv_generic()
+    argspec = getfullargspec_no_self(rv_obj._rvs)
+    assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs',
+                                      (2, 3), ['size'], {'size': None}, {}))
+
+
+def test_mapwrapper_serial():
+    in_arg = np.arange(10.)
+    out_arg = np.sin(in_arg)
+
+    p = MapWrapper(1)
+    assert_(p._mapfunc is map)
+    assert_(p.pool is None)
+    assert_(p._own_pool is False)
+    out = list(p(np.sin, in_arg))
+    assert_equal(out, out_arg)
+
+    with assert_raises(RuntimeError):
+        p = MapWrapper(0)
+
+
+def test_pool():
+    with Pool(2) as p:
+        p.map(math.sin, [1, 2, 3, 4])
+
+
+def test_mapwrapper_parallel():
+    in_arg = np.arange(10.)
+    out_arg = np.sin(in_arg)
+
+    with MapWrapper(2) as p:
+        out = p(np.sin, in_arg)
+        assert_equal(list(out), out_arg)
+
+        assert_(p._own_pool is True)
+        assert_(isinstance(p.pool, PWL))
+        assert_(p._mapfunc is not None)
+
+    # the context manager should've closed the internal pool
+    # check that it has by asking it to calculate again.
+    with assert_raises(Exception) as excinfo:
+        p(np.sin, in_arg)
+
+    assert_(excinfo.type is ValueError)
+
+    # can also set a PoolWrapper up with a map-like callable instance
+    with Pool(2) as p:
+        q = MapWrapper(p.map)
+
+        assert_(q._own_pool is False)
+        q.close()
+
+        # closing the PoolWrapper shouldn't close the internal pool
+        # because it didn't create it
+        out = p.map(np.sin, in_arg)
+        assert_equal(list(out), out_arg)
+
+
+# get our custom ones and a few from the "import *" cases
+@pytest.mark.parametrize(
+    'key', ('ifft', 'diag', 'arccos', 'randn', 'rand', 'array'))
+def test_numpy_deprecation(key):
+    """Test that 'from numpy import *' functions are deprecated."""
+    if key in ('ifft', 'diag', 'arccos'):
+        arg = [1.0, 0.]
+    elif key == 'finfo':
+        arg = float
+    else:
+        arg = 2
+    func = getattr(scipy, key)
+    match = r'scipy\.%s is deprecated.*2\.0\.0' % key
+    with deprecated_call(match=match) as dep:
+        func(arg)  # deprecated
+    # in case we catch more than one dep warning
+    fnames = [os.path.splitext(d.filename)[0] for d in dep.list]
+    basenames = [os.path.basename(fname) for fname in fnames]
+    assert 'test__util' in basenames
+    if key in ('rand', 'randn'):
+        root = np.random
+    elif key == 'ifft':
+        root = np.fft
+    else:
+        root = np
+    func_np = getattr(root, key)
+    func_np(arg)  # not deprecated
+    assert func_np is not func
+    # classes should remain classes
+    if isinstance(func_np, type):
+        assert isinstance(func, type)
+
+
+def test_numpy_deprecation_functionality():
+    # Check that the deprecation wrappers don't break basic NumPy
+    # functionality
+    with deprecated_call():
+        x = scipy.array([1, 2, 3], dtype=scipy.float64)
+        assert x.dtype == scipy.float64
+        assert x.dtype == np.float64
+
+        x = scipy.finfo(scipy.float32)
+        assert x.eps == np.finfo(np.float32).eps
+
+        assert scipy.float64 == np.float64
+        assert issubclass(np.float64, scipy.float64)
+
+
+def test_rng_integers():
+    rng = np.random.RandomState()
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+    # now try with np.random.Generator
+    try:
+        rng = np.random.default_rng()
+    except AttributeError:
+        return
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are inclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=True)
+    assert np.max(arr) == 5
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 2
+    assert arr.shape == (100, )
+
+    # test that numbers are exclusive of high point
+    arr = rng_integers(rng, low=5, size=100, endpoint=False)
+    assert np.max(arr) == 4
+    assert np.min(arr) == 0
+    assert arr.shape == (100, )
+
+
+class TestValidateInt:
+
+    @pytest.mark.parametrize('n', [4, np.uint8(4), np.int16(4), np.array(4)])
+    def test_validate_int(self, n):
+        n = _validate_int(n, 'n')
+        assert n == 4
+
+    @pytest.mark.parametrize('n', [4.0, np.array([4]), Fraction(4, 1)])
+    def test_validate_int_bad(self, n):
+        with pytest.raises(TypeError, match='n must be an integer'):
+            _validate_int(n, 'n')
+
+    def test_validate_int_below_min(self):
+        with pytest.raises(ValueError, match='n must be an integer not '
+                                             'less than 0'):
+            _validate_int(-1, 'n', 0)
+
+
+class TestRenameParameter:
+    # check that wrapper `_rename_parameter` for backward-compatible
+    # keyword renaming works correctly
+
+    # Example method/function that still accepts keyword `old`
+    @_rename_parameter("old", "new")
+    def old_keyword_still_accepted(self, new):
+        return new
+
+    # Example method/function for which keyword `old` is deprecated
+    @_rename_parameter("old", "new", dep_version="1.9.0")
+    def old_keyword_deprecated(self, new):
+        return new
+
+    def test_old_keyword_still_accepted(self):
+        # positional argument and both keyword work identically
+        res1 = self.old_keyword_still_accepted(10)
+        res2 = self.old_keyword_still_accepted(new=10)
+        res3 = self.old_keyword_still_accepted(old=10)
+        assert res1 == res2 == res3 == 10
+
+        # unexpected keyword raises an error
+        message = re.escape("old_keyword_still_accepted() got an unexpected")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(unexpected=10)
+
+        # multiple values for the same parameter raises an error
+        message = re.escape("old_keyword_still_accepted() got multiple")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(10, new=10)
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(10, old=10)
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_still_accepted(new=10, old=10)
+
+    def test_old_keyword_deprecated(self):
+        # positional argument and both keyword work identically,
+        # but use of old keyword results in DeprecationWarning
+        dep_msg = "Use of keyword argument `old` is deprecated"
+        res1 = self.old_keyword_deprecated(10)
+        res2 = self.old_keyword_deprecated(new=10)
+        with pytest.warns(DeprecationWarning, match=dep_msg):
+            res3 = self.old_keyword_deprecated(old=10)
+        assert res1 == res2 == res3 == 10
+
+        # unexpected keyword raises an error
+        message = re.escape("old_keyword_deprecated() got an unexpected")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_deprecated(unexpected=10)
+
+        # multiple values for the same parameter raises an error and,
+        # if old keyword is used, results in DeprecationWarning
+        message = re.escape("old_keyword_deprecated() got multiple")
+        with pytest.raises(TypeError, match=message):
+            self.old_keyword_deprecated(10, new=10)
+        with pytest.raises(TypeError, match=message), \
+                pytest.warns(DeprecationWarning, match=dep_msg):
+            self.old_keyword_deprecated(10, old=10)
+        with pytest.raises(TypeError, match=message), \
+                pytest.warns(DeprecationWarning, match=dep_msg):
+            self.old_keyword_deprecated(new=10, old=10)
+
+
+class TestContainsNaNTest:
+
+    def test_policy(self):
+        data = np.array([1, 2, 3, np.nan])
+
+        contains_nan, nan_policy = _contains_nan(data, nan_policy="propagate")
+        assert contains_nan
+        assert nan_policy == "propagate"
+
+        contains_nan, nan_policy = _contains_nan(data, nan_policy="omit")
+        assert contains_nan
+        assert nan_policy == "omit"
+
+        msg = "The input contains nan values"
+        with pytest.raises(ValueError, match=msg):
+            _contains_nan(data, nan_policy="raise")
+
+        msg = "nan_policy must be one of"
+        with pytest.raises(ValueError, match=msg):
+            _contains_nan(data, nan_policy="nan")
+
+    def test_contains_nan_1d(self):
+        data1 = np.array([1, 2, 3])
+        assert not _contains_nan(data1)[0]
+
+        data2 = np.array([1, 2, 3, np.nan])
+        assert _contains_nan(data2)[0]
+
+        data3 = np.array([np.nan, 2, 3, np.nan])
+        assert _contains_nan(data3)[0]
+
+        data4 = np.array([1, 2, "3", np.nan])  # converted to string "nan"
+        assert not _contains_nan(data4)[0]
+
+        data5 = np.array([1, 2, "3", np.nan], dtype='object')
+        assert _contains_nan(data5)[0]
+
+    def test_contains_nan_2d(self):
+        data1 = np.array([[1, 2], [3, 4]])
+        assert not _contains_nan(data1)[0]
+
+        data2 = np.array([[1, 2], [3, np.nan]])
+        assert _contains_nan(data2)[0]
+
+        data3 = np.array([["1", 2], [3, np.nan]])  # converted to string "nan"
+        assert not _contains_nan(data3)[0]
+
+        data4 = np.array([["1", 2], [3, np.nan]], dtype='object')
+        assert _contains_nan(data4)[0]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_bunch.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_bunch.py
new file mode 100644
index 00000000..1af6f2d5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_bunch.py
@@ -0,0 +1,163 @@
+
+import pytest
+import pickle
+from numpy.testing import assert_equal
+from scipy._lib._bunch import _make_tuple_bunch
+
+
+# `Result` is defined at the top level of the module so it can be
+# used to test pickling.
+Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta'])
+
+
+class TestMakeTupleBunch:
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+    # Tests with Result
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+    def setup_method(self):
+        # Set up an instance of Result.
+        self.result = Result(x=1, y=2, z=3, w=99, beta=0.5)
+
+    def test_attribute_access(self):
+        assert_equal(self.result.x, 1)
+        assert_equal(self.result.y, 2)
+        assert_equal(self.result.z, 3)
+        assert_equal(self.result.w, 99)
+        assert_equal(self.result.beta, 0.5)
+
+    def test_indexing(self):
+        assert_equal(self.result[0], 1)
+        assert_equal(self.result[1], 2)
+        assert_equal(self.result[2], 3)
+        assert_equal(self.result[-1], 3)
+        with pytest.raises(IndexError, match='index out of range'):
+            self.result[3]
+
+    def test_unpacking(self):
+        x0, y0, z0 = self.result
+        assert_equal((x0, y0, z0), (1, 2, 3))
+        assert_equal(self.result, (1, 2, 3))
+
+    def test_slice(self):
+        assert_equal(self.result[1:], (2, 3))
+        assert_equal(self.result[::2], (1, 3))
+        assert_equal(self.result[::-1], (3, 2, 1))
+
+    def test_len(self):
+        assert_equal(len(self.result), 3)
+
+    def test_repr(self):
+        s = repr(self.result)
+        assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)')
+
+    def test_hash(self):
+        assert_equal(hash(self.result), hash((1, 2, 3)))
+
+    def test_pickle(self):
+        s = pickle.dumps(self.result)
+        obj = pickle.loads(s)
+        assert isinstance(obj, Result)
+        assert_equal(obj.x, self.result.x)
+        assert_equal(obj.y, self.result.y)
+        assert_equal(obj.z, self.result.z)
+        assert_equal(obj.w, self.result.w)
+        assert_equal(obj.beta, self.result.beta)
+
+    def test_read_only_existing(self):
+        with pytest.raises(AttributeError, match="can't set attribute"):
+            self.result.x = -1
+
+    def test_read_only_new(self):
+        self.result.plate_of_shrimp = "lattice of coincidence"
+        assert self.result.plate_of_shrimp == "lattice of coincidence"
+
+    def test_constructor_missing_parameter(self):
+        with pytest.raises(TypeError, match='missing'):
+            # `w` is missing.
+            Result(x=1, y=2, z=3, beta=0.75)
+
+    def test_constructor_incorrect_parameter(self):
+        with pytest.raises(TypeError, match='unexpected'):
+            # `foo` is not an existing field.
+            Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999)
+
+    def test_module(self):
+        m = 'scipy._lib.tests.test_bunch'
+        assert_equal(Result.__module__, m)
+        assert_equal(self.result.__module__, m)
+
+    def test_extra_fields_per_instance(self):
+        # This test exists to ensure that instances of the same class
+        # store their own values for the extra fields. That is, the values
+        # are stored per instance and not in the class.
+        result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0)
+        result2 = Result(x=4, y=5, z=6, w=99, beta=1.0)
+        assert_equal(result1.w, -1)
+        assert_equal(result1.beta, 0.0)
+        # The rest of these checks aren't essential, but let's check
+        # them anyway.
+        assert_equal(result1[:], (1, 2, 3))
+        assert_equal(result2.w, 99)
+        assert_equal(result2.beta, 1.0)
+        assert_equal(result2[:], (4, 5, 6))
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+    # Other tests
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+    def test_extra_field_names_is_optional(self):
+        Square = _make_tuple_bunch('Square', ['width', 'height'])
+        sq = Square(width=1, height=2)
+        assert_equal(sq.width, 1)
+        assert_equal(sq.height, 2)
+        s = repr(sq)
+        assert_equal(s, 'Square(width=1, height=2)')
+
+    def test_tuple_like(self):
+        Tup = _make_tuple_bunch('Tup', ['a', 'b'])
+        tu = Tup(a=1, b=2)
+        assert isinstance(tu, tuple)
+        assert isinstance(tu + (1,), tuple)
+
+    def test_explicit_module(self):
+        m = 'some.module.name'
+        Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m)
+        foo = Foo(x=1, a=355, b=113)
+        assert_equal(Foo.__module__, m)
+        assert_equal(foo.__module__, m)
+
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+    # Argument validation
+    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+    @pytest.mark.parametrize('args', [('123', ['a'], ['b']),
+                                      ('Foo', ['-3'], ['x']),
+                                      ('Foo', ['a'], ['+-*/'])])
+    def test_identifiers_not_allowed(self, args):
+        with pytest.raises(ValueError, match='identifiers'):
+            _make_tuple_bunch(*args)
+
+    @pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']),
+                                      ('Foo', ['a', 'b'], ['b', 'x'])])
+    def test_repeated_field_names(self, args):
+        with pytest.raises(ValueError, match='Duplicate'):
+            _make_tuple_bunch(*args)
+
+    @pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']),
+                                      ('Foo', ['a'], ['_x'])])
+    def test_leading_underscore_not_allowed(self, args):
+        with pytest.raises(ValueError, match='underscore'):
+            _make_tuple_bunch(*args)
+
+    @pytest.mark.parametrize('args', [('Foo', ['def'], ['x']),
+                                      ('Foo', ['a'], ['or']),
+                                      ('and', ['a'], ['x'])])
+    def test_keyword_not_allowed_in_fields(self, args):
+        with pytest.raises(ValueError, match='keyword'):
+            _make_tuple_bunch(*args)
+
+    def test_at_least_one_field_name_required(self):
+        with pytest.raises(ValueError, match='at least one name'):
+            _make_tuple_bunch('Qwerty', [], ['a', 'b'])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_ccallback.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_ccallback.py
new file mode 100644
index 00000000..a35adce9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_ccallback.py
@@ -0,0 +1,197 @@
+from numpy.testing import assert_equal, assert_
+from pytest import raises as assert_raises
+
+import time
+import pytest
+import ctypes
+import threading
+from scipy._lib import _ccallback_c as _test_ccallback_cython
+from scipy._lib import _test_ccallback
+from scipy._lib._ccallback import LowLevelCallable
+
+try:
+    import cffi
+    HAVE_CFFI = True
+except ImportError:
+    HAVE_CFFI = False
+
+
+ERROR_VALUE = 2.0
+
+
+def callback_python(a, user_data=None):
+    if a == ERROR_VALUE:
+        raise ValueError("bad value")
+
+    if user_data is None:
+        return a + 1
+    else:
+        return a + user_data
+
+def _get_cffi_func(base, signature):
+    if not HAVE_CFFI:
+        pytest.skip("cffi not installed")
+
+    # Get function address
+    voidp = ctypes.cast(base, ctypes.c_void_p)
+    address = voidp.value
+
+    # Create corresponding cffi handle
+    ffi = cffi.FFI()
+    func = ffi.cast(signature, address)
+    return func
+
+
+def _get_ctypes_data():
+    value = ctypes.c_double(2.0)
+    return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
+
+
+def _get_cffi_data():
+    if not HAVE_CFFI:
+        pytest.skip("cffi not installed")
+    ffi = cffi.FFI()
+    return ffi.new('double *', 2.0)
+
+
+CALLERS = {
+    'simple': _test_ccallback.test_call_simple,
+    'nodata': _test_ccallback.test_call_nodata,
+    'nonlocal': _test_ccallback.test_call_nonlocal,
+    'cython': _test_ccallback_cython.test_call_cython,
+}
+
+# These functions have signatures known to the callers
+FUNCS = {
+    'python': lambda: callback_python,
+    'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
+    'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1_cython"),
+    'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
+    'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
+                                   'double (*)(double, int *, void *)'),
+    'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
+    'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1b_cython"),
+    'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
+    'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
+                                     'double (*)(double, double, int *, void *)'),
+}
+
+# These functions have signatures the callers don't know
+BAD_FUNCS = {
+    'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
+    'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1bc_cython"),
+    'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
+    'cffi_bc': lambda: _get_cffi_func(_test_ccallback_cython.plus1bc_ctypes,
+                                      'double (*)(double, double, double, int *, void *)'),
+}
+
+USER_DATAS = {
+    'ctypes': _get_ctypes_data,
+    'cffi': _get_cffi_data,
+    'capsule': _test_ccallback.test_get_data_capsule,
+}
+
+
+def test_callbacks():
+    def check(caller, func, user_data):
+        caller = CALLERS[caller]
+        func = FUNCS[func]()
+        user_data = USER_DATAS[user_data]()
+
+        if func is callback_python:
+            func2 = lambda x: func(x, 2.0)
+        else:
+            func2 = LowLevelCallable(func, user_data)
+            func = LowLevelCallable(func)
+
+        # Test basic call
+        assert_equal(caller(func, 1.0), 2.0)
+
+        # Test 'bad' value resulting to an error
+        assert_raises(ValueError, caller, func, ERROR_VALUE)
+
+        # Test passing in user_data
+        assert_equal(caller(func2, 1.0), 3.0)
+
+    for caller in sorted(CALLERS.keys()):
+        for func in sorted(FUNCS.keys()):
+            for user_data in sorted(USER_DATAS.keys()):
+                check(caller, func, user_data)
+
+
+def test_bad_callbacks():
+    def check(caller, func, user_data):
+        caller = CALLERS[caller]
+        user_data = USER_DATAS[user_data]()
+        func = BAD_FUNCS[func]()
+
+        if func is callback_python:
+            func2 = lambda x: func(x, 2.0)
+        else:
+            func2 = LowLevelCallable(func, user_data)
+            func = LowLevelCallable(func)
+
+        # Test that basic call fails
+        assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
+
+        # Test that passing in user_data also fails
+        assert_raises(ValueError, caller, func2, 1.0)
+
+        # Test error message
+        llfunc = LowLevelCallable(func)
+        try:
+            caller(llfunc, 1.0)
+        except ValueError as err:
+            msg = str(err)
+            assert_(llfunc.signature in msg, msg)
+            assert_('double (double, double, int *, void *)' in msg, msg)
+
+    for caller in sorted(CALLERS.keys()):
+        for func in sorted(BAD_FUNCS.keys()):
+            for user_data in sorted(USER_DATAS.keys()):
+                check(caller, func, user_data)
+
+
+def test_signature_override():
+    caller = _test_ccallback.test_call_simple
+    func = _test_ccallback.test_get_plus1_capsule()
+
+    llcallable = LowLevelCallable(func, signature="bad signature")
+    assert_equal(llcallable.signature, "bad signature")
+    assert_raises(ValueError, caller, llcallable, 3)
+
+    llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
+    assert_equal(llcallable.signature, "double (double, int *, void *)")
+    assert_equal(caller(llcallable, 3), 4)
+
+
+def test_threadsafety():
+    def callback(a, caller):
+        if a <= 0:
+            return 1
+        else:
+            res = caller(lambda x: callback(x, caller), a - 1)
+            return 2*res
+
+    def check(caller):
+        caller = CALLERS[caller]
+
+        results = []
+
+        count = 10
+
+        def run():
+            time.sleep(0.01)
+            r = caller(lambda x: callback(x, caller), count)
+            results.append(r)
+
+        threads = [threading.Thread(target=run) for j in range(20)]
+        for thread in threads:
+            thread.start()
+        for thread in threads:
+            thread.join()
+
+        assert_equal(results, [2.0**count]*len(threads))
+
+    for caller in CALLERS.keys():
+        check(caller)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_deprecation.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_deprecation.py
new file mode 100644
index 00000000..7910bd56
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_deprecation.py
@@ -0,0 +1,10 @@
+import pytest
+
+
+def test_cython_api_deprecation():
+    match = ("`scipy._lib._test_deprecation_def.foo_deprecated` "
+             "is deprecated, use `foo` instead!\n"
+             "Deprecated in Scipy 42.0.0")
+    with pytest.warns(DeprecationWarning, match=match):
+        from .. import _test_deprecation_call
+    assert _test_deprecation_call.call() == (1, 1)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_import_cycles.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_import_cycles.py
new file mode 100644
index 00000000..f6d9419b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_import_cycles.py
@@ -0,0 +1,53 @@
+import sys
+import subprocess
+
+
+MODULES = [
+    "scipy.cluster",
+    "scipy.cluster.vq",
+    "scipy.cluster.hierarchy",
+    "scipy.constants",
+    "scipy.fft",
+    "scipy.fftpack",
+    "scipy.fftpack.convolve",
+    "scipy.integrate",
+    "scipy.interpolate",
+    "scipy.io",
+    "scipy.io.arff",
+    "scipy.io.harwell_boeing",
+    "scipy.io.idl",
+    "scipy.io.matlab",
+    "scipy.io.netcdf",
+    "scipy.io.wavfile",
+    "scipy.linalg",
+    "scipy.linalg.blas",
+    "scipy.linalg.cython_blas",
+    "scipy.linalg.lapack",
+    "scipy.linalg.cython_lapack",
+    "scipy.linalg.interpolative",
+    "scipy.misc",
+    "scipy.ndimage",
+    "scipy.odr",
+    "scipy.optimize",
+    "scipy.signal",
+    "scipy.signal.windows",
+    "scipy.sparse",
+    "scipy.sparse.linalg",
+    "scipy.sparse.csgraph",
+    "scipy.spatial",
+    "scipy.spatial.distance",
+    "scipy.special",
+    "scipy.stats",
+    "scipy.stats.distributions",
+    "scipy.stats.mstats",
+    "scipy.stats.contingency"
+]
+
+
+def test_modules_importable():
+    # Regression test for gh-6793.
+    # Check that all modules are importable in a new Python process.
+    # This is not necessarily true if there are import cycles present.
+    for module in MODULES:
+        cmd = 'import {}'.format(module)
+        subprocess.check_call([sys.executable, '-c', cmd])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_public_api.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_public_api.py
new file mode 100644
index 00000000..a8bfa5ec
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_public_api.py
@@ -0,0 +1,326 @@
+"""
+This test script is adopted from:
+    https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py
+"""
+
+import pkgutil
+import types
+import importlib
+import warnings
+
+import scipy
+
+
+def check_dir(module, module_name=None):
+    """Returns a mapping of all objects with the wrong __module__ attribute."""
+    if module_name is None:
+        module_name = module.__name__
+    results = {}
+    for name in dir(module):
+        item = getattr(module, name)
+        if (hasattr(item, '__module__') and hasattr(item, '__name__')
+                and item.__module__ != module_name):
+            results[name] = item.__module__ + '.' + item.__name__
+    return results
+
+
+def test_dir_testing():
+    """Assert that output of dir has only one "testing/tester"
+    attribute without duplicate"""
+    assert len(dir(scipy)) == len(set(dir(scipy)))
+
+
+# Historically SciPy has not used leading underscores for private submodules
+# much.  This has resulted in lots of things that look like public modules
+# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`),
+# but were never intended to be public.  The PUBLIC_MODULES list contains
+# modules that are either public because they were meant to be, or because they
+# contain public functions/objects that aren't present in any other namespace
+# for whatever reason and therefore should be treated as public.
+PUBLIC_MODULES = ["scipy." + s for s in [
+    "cluster",
+    "cluster.vq",
+    "cluster.hierarchy",
+    "constants",
+    "datasets",
+    "fft",
+    "fftpack",
+    "integrate",
+    "interpolate",
+    "io",
+    "io.arff",
+    "io.matlab",
+    "io.wavfile",
+    "linalg",
+    "linalg.blas",
+    "linalg.cython_blas",
+    "linalg.lapack",
+    "linalg.cython_lapack",
+    "linalg.interpolative",
+    "misc",
+    "ndimage",
+    "odr",
+    "optimize",
+    "signal",
+    "signal.windows",
+    "sparse",
+    "sparse.linalg",
+    "sparse.csgraph",
+    "spatial",
+    "spatial.distance",
+    "spatial.transform",
+    "special",
+    "stats",
+    "stats.contingency",
+    "stats.distributions",
+    "stats.mstats",
+    "stats.qmc",
+    "stats.sampling"
+]]
+
+# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
+# of underscores) but should not be used.  For many of those modules the
+# current status is fine.  For others it may make sense to work on making them
+# private, to clean up our public API and avoid confusion.
+# These private modules support will be removed in SciPy v2.0.0
+PRIVATE_BUT_PRESENT_MODULES = [
+    'scipy.constants.codata',
+    'scipy.constants.constants',
+    'scipy.fftpack.basic',
+    'scipy.fftpack.convolve',
+    'scipy.fftpack.helper',
+    'scipy.fftpack.pseudo_diffs',
+    'scipy.fftpack.realtransforms',
+    'scipy.integrate.odepack',
+    'scipy.integrate.quadpack',
+    'scipy.integrate.dop',
+    'scipy.integrate.lsoda',
+    'scipy.integrate.vode',
+    'scipy.interpolate.dfitpack',
+    'scipy.interpolate.fitpack',
+    'scipy.interpolate.fitpack2',
+    'scipy.interpolate.interpnd',
+    'scipy.interpolate.interpolate',
+    'scipy.interpolate.ndgriddata',
+    'scipy.interpolate.polyint',
+    'scipy.interpolate.rbf',
+    'scipy.io.arff.arffread',
+    'scipy.io.harwell_boeing',
+    'scipy.io.idl',
+    'scipy.io.mmio',
+    'scipy.io.netcdf',
+    'scipy.io.matlab.byteordercodes',
+    'scipy.io.matlab.mio',
+    'scipy.io.matlab.mio4',
+    'scipy.io.matlab.mio5',
+    'scipy.io.matlab.mio5_params',
+    'scipy.io.matlab.mio5_utils',
+    'scipy.io.matlab.mio_utils',
+    'scipy.io.matlab.miobase',
+    'scipy.io.matlab.streams',
+    'scipy.linalg.basic',
+    'scipy.linalg.decomp',
+    'scipy.linalg.decomp_cholesky',
+    'scipy.linalg.decomp_lu',
+    'scipy.linalg.decomp_qr',
+    'scipy.linalg.decomp_schur',
+    'scipy.linalg.decomp_svd',
+    'scipy.linalg.flinalg',
+    'scipy.linalg.matfuncs',
+    'scipy.linalg.misc',
+    'scipy.linalg.special_matrices',
+    'scipy.misc.common',
+    'scipy.misc.doccer',
+    'scipy.ndimage.filters',
+    'scipy.ndimage.fourier',
+    'scipy.ndimage.interpolation',
+    'scipy.ndimage.measurements',
+    'scipy.ndimage.morphology',
+    'scipy.odr.models',
+    'scipy.odr.odrpack',
+    'scipy.optimize.cobyla',
+    'scipy.optimize.cython_optimize',
+    'scipy.optimize.lbfgsb',
+    'scipy.optimize.linesearch',
+    'scipy.optimize.minpack',
+    'scipy.optimize.minpack2',
+    'scipy.optimize.moduleTNC',
+    'scipy.optimize.nonlin',
+    'scipy.optimize.optimize',
+    'scipy.optimize.slsqp',
+    'scipy.optimize.tnc',
+    'scipy.optimize.zeros',
+    'scipy.signal.bsplines',
+    'scipy.signal.filter_design',
+    'scipy.signal.fir_filter_design',
+    'scipy.signal.lti_conversion',
+    'scipy.signal.ltisys',
+    'scipy.signal.signaltools',
+    'scipy.signal.spectral',
+    'scipy.signal.spline',
+    'scipy.signal.waveforms',
+    'scipy.signal.wavelets',
+    'scipy.signal.windows.windows',
+    'scipy.sparse.base',
+    'scipy.sparse.bsr',
+    'scipy.sparse.compressed',
+    'scipy.sparse.construct',
+    'scipy.sparse.coo',
+    'scipy.sparse.csc',
+    'scipy.sparse.csr',
+    'scipy.sparse.data',
+    'scipy.sparse.dia',
+    'scipy.sparse.dok',
+    'scipy.sparse.extract',
+    'scipy.sparse.lil',
+    'scipy.sparse.linalg.dsolve',
+    'scipy.sparse.linalg.eigen',
+    'scipy.sparse.linalg.interface',
+    'scipy.sparse.linalg.isolve',
+    'scipy.sparse.linalg.matfuncs',
+    'scipy.sparse.sparsetools',
+    'scipy.sparse.spfuncs',
+    'scipy.sparse.sputils',
+    'scipy.spatial.ckdtree',
+    'scipy.spatial.kdtree',
+    'scipy.spatial.qhull',
+    'scipy.spatial.transform.rotation',
+    'scipy.special.add_newdocs',
+    'scipy.special.basic',
+    'scipy.special.cython_special',
+    'scipy.special.orthogonal',
+    'scipy.special.sf_error',
+    'scipy.special.specfun',
+    'scipy.special.spfun_stats',
+    'scipy.stats.biasedurn',
+    'scipy.stats.kde',
+    'scipy.stats.morestats',
+    'scipy.stats.mstats_basic',
+    'scipy.stats.mstats_extras',
+    'scipy.stats.mvn',
+    'scipy.stats.statlib',
+    'scipy.stats.stats',
+]
+
+
+def is_unexpected(name):
+    """Check if this needs to be considered."""
+    if '._' in name or '.tests' in name or '.setup' in name:
+        return False
+
+    if name in PUBLIC_MODULES:
+        return False
+
+    if name in PRIVATE_BUT_PRESENT_MODULES:
+        return False
+
+    return True
+
+
+SKIP_LIST = [
+    'scipy.conftest',
+    'scipy.version',
+]
+
+
+def test_all_modules_are_expected():
+    """
+    Test that we don't add anything that looks like a new public module by
+    accident.  Check is based on filenames.
+    """
+
+    modnames = []
+    for _, modname, ispkg in pkgutil.walk_packages(path=scipy.__path__,
+                                                   prefix=scipy.__name__ + '.',
+                                                   onerror=None):
+        if is_unexpected(modname) and modname not in SKIP_LIST:
+            # We have a name that is new.  If that's on purpose, add it to
+            # PUBLIC_MODULES.  We don't expect to have to add anything to
+            # PRIVATE_BUT_PRESENT_MODULES.  Use an underscore in the name!
+            modnames.append(modname)
+
+    if modnames:
+        raise AssertionError(f'Found unexpected modules: {modnames}')
+
+
+# Stuff that clearly shouldn't be in the API and is detected by the next test
+# below
+SKIP_LIST_2 = [
+    'scipy.char',
+    'scipy.rec',
+    'scipy.emath',
+    'scipy.math',
+    'scipy.random',
+    'scipy.ctypeslib',
+    'scipy.ma'
+]
+
+
+def test_all_modules_are_expected_2():
+    """
+    Method checking all objects. The pkgutil-based method in
+    `test_all_modules_are_expected` does not catch imports into a namespace,
+    only filenames.
+    """
+
+    def find_unexpected_members(mod_name):
+        members = []
+        module = importlib.import_module(mod_name)
+        if hasattr(module, '__all__'):
+            objnames = module.__all__
+        else:
+            objnames = dir(module)
+
+        for objname in objnames:
+            if not objname.startswith('_'):
+                fullobjname = mod_name + '.' + objname
+                if isinstance(getattr(module, objname), types.ModuleType):
+                    if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2:
+                        members.append(fullobjname)
+
+        return members
+
+    unexpected_members = find_unexpected_members("scipy")
+    for modname in PUBLIC_MODULES:
+        unexpected_members.extend(find_unexpected_members(modname))
+
+    if unexpected_members:
+        raise AssertionError("Found unexpected object(s) that look like "
+                             "modules: {}".format(unexpected_members))
+
+
+def test_api_importable():
+    """
+    Check that all submodules listed higher up in this file can be imported
+    Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
+    simply need to be removed from the list (deprecation may or may not be
+    needed - apply common sense).
+    """
+    def check_importable(module_name):
+        try:
+            importlib.import_module(module_name)
+        except (ImportError, AttributeError):
+            return False
+
+        return True
+
+    module_names = []
+    for module_name in PUBLIC_MODULES:
+        if not check_importable(module_name):
+            module_names.append(module_name)
+
+    if module_names:
+        raise AssertionError("Modules in the public API that cannot be "
+                             "imported: {}".format(module_names))
+
+    with warnings.catch_warnings(record=True) as w:
+        warnings.filterwarnings('always', category=DeprecationWarning)
+        warnings.filterwarnings('always', category=ImportWarning)
+        for module_name in PRIVATE_BUT_PRESENT_MODULES:
+            if not check_importable(module_name):
+                module_names.append(module_name)
+
+    if module_names:
+        raise AssertionError("Modules that are not really public but looked "
+                             "public and can not be imported: "
+                             "{}".format(module_names))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_scipy_version.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_scipy_version.py
new file mode 100644
index 00000000..21f0e8e2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_scipy_version.py
@@ -0,0 +1,18 @@
+import re
+
+import scipy
+from numpy.testing import assert_
+
+
+def test_valid_scipy_version():
+    # Verify that the SciPy version is a valid one (no .post suffix or other
+    # nonsense). See NumPy issue gh-6431 for an issue caused by an invalid
+    # version.
+    version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])"
+    dev_suffix = r"(\.dev0\+.+([0-9a-f]{7}|Unknown))"
+    if scipy.version.release:
+        res = re.match(version_pattern, scipy.__version__)
+    else:
+        res = re.match(version_pattern + dev_suffix, scipy.__version__)
+
+    assert_(res is not None, scipy.__version__)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_tmpdirs.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_tmpdirs.py
new file mode 100644
index 00000000..466f9264
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_tmpdirs.py
@@ -0,0 +1,42 @@
+""" Test tmpdirs module """
+from os import getcwd
+from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists
+
+from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir
+
+from numpy.testing import assert_, assert_equal
+
+MY_PATH = abspath(__file__)
+MY_DIR = dirname(MY_PATH)
+
+
+def test_tempdir():
+    with tempdir() as tmpdir:
+        fname = pjoin(tmpdir, 'example_file.txt')
+        with open(fname, 'wt') as fobj:
+            fobj.write('a string\\n')
+    assert_(not exists(tmpdir))
+
+
+def test_in_tempdir():
+    my_cwd = getcwd()
+    with in_tempdir() as tmpdir:
+        with open('test.txt', 'wt') as f:
+            f.write('some text')
+        assert_(isfile('test.txt'))
+        assert_(isfile(pjoin(tmpdir, 'test.txt')))
+    assert_(not exists(tmpdir))
+    assert_equal(getcwd(), my_cwd)
+
+
+def test_given_directory():
+    # Test InGivenDirectory
+    cwd = getcwd()
+    with in_dir() as tmpdir:
+        assert_equal(tmpdir, abspath(cwd))
+        assert_equal(tmpdir, abspath(getcwd()))
+    with in_dir(MY_DIR) as tmpdir:
+        assert_equal(tmpdir, MY_DIR)
+        assert_equal(realpath(MY_DIR), realpath(abspath(getcwd())))
+    # We were deleting the given directory! Check not so now.
+    assert_(isfile(MY_PATH))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_warnings.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_warnings.py
new file mode 100644
index 00000000..9f1090ad
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/tests/test_warnings.py
@@ -0,0 +1,131 @@
+"""
+Tests which scan for certain occurrences in the code, they may not find
+all of these occurrences but should catch almost all. This file was adapted
+from NumPy.
+"""
+
+
+import os
+from pathlib import Path
+import ast
+import tokenize
+
+import scipy
+
+import pytest
+
+
+class ParseCall(ast.NodeVisitor):
+    def __init__(self):
+        self.ls = []
+
+    def visit_Attribute(self, node):
+        ast.NodeVisitor.generic_visit(self, node)
+        self.ls.append(node.attr)
+
+    def visit_Name(self, node):
+        self.ls.append(node.id)
+
+class FindFuncs(ast.NodeVisitor):
+    def __init__(self, filename):
+        super().__init__()
+        self.__filename = filename
+        self.bad_filters = []
+        self.bad_stacklevels = []
+
+    def visit_Call(self, node):
+        p = ParseCall()
+        p.visit(node.func)
+        ast.NodeVisitor.generic_visit(self, node)
+
+        if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
+            if node.args[0].s == "ignore":
+                self.bad_filters.append(
+                    "{}:{}".format(self.__filename, node.lineno))
+
+        if p.ls[-1] == 'warn' and (
+                len(p.ls) == 1 or p.ls[-2] == 'warnings'):
+
+            if self.__filename == "_lib/tests/test_warnings.py":
+                # This file
+                return
+
+            # See if stacklevel exists:
+            if len(node.args) == 3:
+                return
+            args = {kw.arg for kw in node.keywords}
+            if "stacklevel" not in args:
+                self.bad_stacklevels.append(
+                    "{}:{}".format(self.__filename, node.lineno))
+
+
+@pytest.fixture(scope="session")
+def warning_calls():
+    # combined "ignore" and stacklevel error
+    base = Path(scipy.__file__).parent
+
+    bad_filters = []
+    bad_stacklevels = []
+
+    for path in base.rglob("*.py"):
+        # use tokenize to auto-detect encoding on systems where no
+        # default encoding is defined (e.g., LANG='C')
+        with tokenize.open(str(path)) as file:
+            tree = ast.parse(file.read(), filename=str(path))
+            finder = FindFuncs(path.relative_to(base))
+            finder.visit(tree)
+            bad_filters.extend(finder.bad_filters)
+            bad_stacklevels.extend(finder.bad_stacklevels)
+
+    return bad_filters, bad_stacklevels
+
+
+@pytest.mark.slow
+def test_warning_calls_filters(warning_calls):
+    bad_filters, bad_stacklevels = warning_calls
+
+    # We try not to add filters in the code base, because those filters aren't
+    # thread-safe. We aim to only filter in tests with
+    # np.testing.suppress_warnings. However, in some cases it may prove
+    # necessary to filter out warnings, because we can't (easily) fix the root
+    # cause for them and we don't want users to see some warnings when they use
+    # SciPy correctly. So we list exceptions here.  Add new entries only if
+    # there's a good reason.
+    allowed_filters = (
+        os.path.join('datasets', '_fetchers.py'),
+        os.path.join('datasets', '__init__.py'),
+        os.path.join('optimize', '_optimize.py'),
+        os.path.join('sparse', '__init__.py'),  # np.matrix pending-deprecation
+        os.path.join('stats', '_discrete_distns.py'),  # gh-14901
+        os.path.join('stats', '_continuous_distns.py'),
+    )
+    bad_filters = [item for item in bad_filters if item.split(':')[0] not in
+                   allowed_filters]
+
+    if bad_filters:
+        raise AssertionError(
+            "warning ignore filter should not be used, instead, use\n"
+            "numpy.testing.suppress_warnings (in tests only);\n"
+            "found in:\n    {}".format(
+                "\n    ".join(bad_filters)))
+
+
+@pytest.mark.slow
+@pytest.mark.xfail(reason="stacklevels currently missing")
+def test_warning_calls_stacklevels(warning_calls):
+    bad_filters, bad_stacklevels = warning_calls
+
+    msg = ""
+
+    if bad_filters:
+        msg += ("warning ignore filter should not be used, instead, use\n"
+                "numpy.testing.suppress_warnings (in tests only);\n"
+                "found in:\n    {}".format("\n    ".join(bad_filters)))
+        msg += "\n\n"
+
+    if bad_stacklevels:
+        msg += "warnings should have an appropriate stacklevel:\n    {}".format(
+                "\n    ".join(bad_stacklevels))
+
+    if msg:
+        raise AssertionError(msg)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/_lib/uarray.py b/__packaged__/coreml/.python_dependencies/scipy/_lib/uarray.py
new file mode 100644
index 00000000..e59d0faf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/_lib/uarray.py
@@ -0,0 +1,31 @@
+"""`uarray` provides functions for generating multimethods that dispatch to
+multiple different backends
+
+This should be imported, rather than `_uarray` so that an installed version could
+be used instead, if available. This means that users can call
+`uarray.set_backend` directly instead of going through SciPy.
+
+"""
+
+
+# Prefer an installed version of uarray, if available
+try:
+    import uarray as _uarray
+except ImportError:
+    _has_uarray = False
+else:
+    from scipy._lib._pep440 import Version as _Version
+
+    _has_uarray = _Version(_uarray.__version__) >= _Version("0.8")
+    del _uarray
+    del _Version
+
+
+if _has_uarray:
+    from uarray import *
+    from uarray import _Function
+else:
+    from ._uarray import *
+    from ._uarray import _Function
+
+del _has_uarray
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/__init__.py
new file mode 100644
index 00000000..8fe47ce9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/cluster/__init__.py
@@ -0,0 +1,29 @@
+"""
+=========================================
+Clustering package (:mod:`scipy.cluster`)
+=========================================
+
+.. currentmodule:: scipy.cluster
+
+:mod:`scipy.cluster.vq`
+
+Clustering algorithms are useful in information theory, target detection,
+communications, compression, and other areas. The `vq` module only
+supports vector quantization and the k-means algorithms.
+
+:mod:`scipy.cluster.hierarchy`
+
+The `hierarchy` module provides functions for hierarchical and
+agglomerative clustering.  Its features include generating hierarchical
+clusters from distance matrices,
+calculating statistics on clusters, cutting linkages
+to generate flat clusters, and visualizing clusters with dendrograms.
+
+"""
+__all__ = ['vq', 'hierarchy']
+
+from . import vq, hierarchy
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/hierarchy.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/hierarchy.py
new file mode 100644
index 00000000..b3e00ead
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/cluster/hierarchy.py
@@ -0,0 +1,4180 @@
+"""
+Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
+========================================================
+
+.. currentmodule:: scipy.cluster.hierarchy
+
+These functions cut hierarchical clusterings into flat clusterings
+or find the roots of the forest formed by a cut by providing the flat
+cluster ids of each observation.
+
+.. autosummary::
+   :toctree: generated/
+
+   fcluster
+   fclusterdata
+   leaders
+
+These are routines for agglomerative clustering.
+
+.. autosummary::
+   :toctree: generated/
+
+   linkage
+   single
+   complete
+   average
+   weighted
+   centroid
+   median
+   ward
+
+These routines compute statistics on hierarchies.
+
+.. autosummary::
+   :toctree: generated/
+
+   cophenet
+   from_mlab_linkage
+   inconsistent
+   maxinconsts
+   maxdists
+   maxRstat
+   to_mlab_linkage
+
+Routines for visualizing flat clusters.
+
+.. autosummary::
+   :toctree: generated/
+
+   dendrogram
+
+These are data structures and routines for representing hierarchies as
+tree objects.
+
+.. autosummary::
+   :toctree: generated/
+
+   ClusterNode
+   leaves_list
+   to_tree
+   cut_tree
+   optimal_leaf_ordering
+
+These are predicates for checking the validity of linkage and
+inconsistency matrices as well as for checking isomorphism of two
+flat cluster assignments.
+
+.. autosummary::
+   :toctree: generated/
+
+   is_valid_im
+   is_valid_linkage
+   is_isomorphic
+   is_monotonic
+   correspond
+   num_obs_linkage
+
+Utility routines for plotting:
+
+.. autosummary::
+   :toctree: generated/
+
+   set_link_color_palette
+
+Utility classes:
+
+.. autosummary::
+   :toctree: generated/
+
+   DisjointSet -- data structure for incremental connectivity queries
+
+"""
+# Copyright (C) Damian Eads, 2007-2008. New BSD License.
+
+# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
+#
+# Author: Damian Eads
+# Date:   September 22, 2007
+#
+# Copyright (c) 2007, 2008, Damian Eads
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#   - Redistributions of source code must retain the above
+#     copyright notice, this list of conditions and the
+#     following disclaimer.
+#   - Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer
+#     in the documentation and/or other materials provided with the
+#     distribution.
+#   - Neither the name of the author nor the names of its
+#     contributors may be used to endorse or promote products derived
+#     from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import warnings
+import bisect
+from collections import deque
+
+import numpy as np
+from . import _hierarchy, _optimal_leaf_ordering
+import scipy.spatial.distance as distance
+from scipy._lib._disjoint_set import DisjointSet
+
+
+_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
+                    'median': 4, 'ward': 5, 'weighted': 6}
+_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
+
+__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete',
+           'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster',
+           'fclusterdata', 'from_mlab_linkage', 'inconsistent',
+           'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage',
+           'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists',
+           'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering',
+           'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree',
+           'ward', 'weighted']
+
+
+class ClusterWarning(UserWarning):
+    pass
+
+
+def _warning(s):
+    warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)
+
+
+def _copy_array_if_base_present(a):
+    """
+    Copy the array if its base points to a parent array.
+    """
+    if a.base is not None:
+        return a.copy()
+    elif np.issubsctype(a, np.float32):
+        return np.array(a, dtype=np.double)
+    else:
+        return a
+
+
+def _copy_arrays_if_base_present(T):
+    """
+    Accept a tuple of arrays T. Copies the array T[i] if its base array
+    points to an actual array. Otherwise, the reference is just copied.
+    This is useful if the arrays are being passed to a C function that
+    does not do proper striding.
+    """
+    l = [_copy_array_if_base_present(a) for a in T]
+    return l
+
+
+def _randdm(pnts):
+    """
+    Generate a random distance matrix stored in condensed form.
+
+    Parameters
+    ----------
+    pnts : int
+        The number of points in the distance matrix. Has to be at least 2.
+
+    Returns
+    -------
+    D : ndarray
+        A ``pnts * (pnts - 1) / 2`` sized vector is returned.
+    """
+    if pnts >= 2:
+        D = np.random.rand(pnts * (pnts - 1) / 2)
+    else:
+        raise ValueError("The number of points in the distance matrix "
+                         "must be at least 2.")
+    return D
+
+
+def single(y):
+    """
+    Perform single/min/nearest linkage on the condensed distance matrix ``y``.
+
+    Parameters
+    ----------
+    y : ndarray
+        The upper triangular of the distance matrix. The result of
+        ``pdist`` is returned in this form.
+
+    Returns
+    -------
+    Z : ndarray
+        The linkage matrix.
+
+    See Also
+    --------
+    linkage : for advanced creation of hierarchical clusterings.
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import single, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    First, we need a toy dataset to play with::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    Then, we get a condensed distance matrix from this dataset:
+
+    >>> y = pdist(X)
+
+    Finally, we can perform the clustering:
+
+    >>> Z = single(y)
+    >>> Z
+    array([[ 0.,  1.,  1.,  2.],
+           [ 2., 12.,  1.,  3.],
+           [ 3.,  4.,  1.,  2.],
+           [ 5., 14.,  1.,  3.],
+           [ 6.,  7.,  1.,  2.],
+           [ 8., 16.,  1.,  3.],
+           [ 9., 10.,  1.,  2.],
+           [11., 18.,  1.,  3.],
+           [13., 15.,  2.,  6.],
+           [17., 20.,  2.,  9.],
+           [19., 21.,  2., 12.]])
+
+    The linkage matrix ``Z`` represents a dendrogram - see
+    `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+    contents.
+
+    We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+    each initial point would belong given a distance threshold:
+
+    >>> fcluster(Z, 0.9, criterion='distance')
+    array([ 7,  8,  9, 10, 11, 12,  4,  5,  6,  1,  2,  3], dtype=int32)
+    >>> fcluster(Z, 1, criterion='distance')
+    array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
+    >>> fcluster(Z, 2, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+    plot of the dendrogram.
+    """
+    return linkage(y, method='single', metric='euclidean')
+
+
+def complete(y):
+    """
+    Perform complete/max/farthest point linkage on a condensed distance matrix.
+
+    Parameters
+    ----------
+    y : ndarray
+        The upper triangular of the distance matrix. The result of
+        ``pdist`` is returned in this form.
+
+    Returns
+    -------
+    Z : ndarray
+        A linkage matrix containing the hierarchical clustering. See
+        the `linkage` function documentation for more information
+        on its structure.
+
+    See Also
+    --------
+    linkage : for advanced creation of hierarchical clusterings.
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import complete, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    First, we need a toy dataset to play with::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    Then, we get a condensed distance matrix from this dataset:
+
+    >>> y = pdist(X)
+
+    Finally, we can perform the clustering:
+
+    >>> Z = complete(y)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.41421356,  3.        ],
+           [ 5.        , 13.        ,  1.41421356,  3.        ],
+           [ 8.        , 14.        ,  1.41421356,  3.        ],
+           [11.        , 15.        ,  1.41421356,  3.        ],
+           [16.        , 17.        ,  4.12310563,  6.        ],
+           [18.        , 19.        ,  4.12310563,  6.        ],
+           [20.        , 21.        ,  5.65685425, 12.        ]])
+
+    The linkage matrix ``Z`` represents a dendrogram - see
+    `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+    contents.
+
+    We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+    each initial point would belong given a distance threshold:
+
+    >>> fcluster(Z, 0.9, criterion='distance')
+    array([ 1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12], dtype=int32)
+    >>> fcluster(Z, 1.5, criterion='distance')
+    array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+    >>> fcluster(Z, 4.5, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
+    >>> fcluster(Z, 6, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+    plot of the dendrogram.
+    """
+    return linkage(y, method='complete', metric='euclidean')
+
+
+def average(y):
+    """
+    Perform average/UPGMA linkage on a condensed distance matrix.
+
+    Parameters
+    ----------
+    y : ndarray
+        The upper triangular of the distance matrix. The result of
+        ``pdist`` is returned in this form.
+
+    Returns
+    -------
+    Z : ndarray
+        A linkage matrix containing the hierarchical clustering. See
+        `linkage` for more information on its structure.
+
+    See Also
+    --------
+    linkage : for advanced creation of hierarchical clusterings.
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import average, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    First, we need a toy dataset to play with::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    Then, we get a condensed distance matrix from this dataset:
+
+    >>> y = pdist(X)
+
+    Finally, we can perform the clustering:
+
+    >>> Z = average(y)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.20710678,  3.        ],
+           [ 5.        , 13.        ,  1.20710678,  3.        ],
+           [ 8.        , 14.        ,  1.20710678,  3.        ],
+           [11.        , 15.        ,  1.20710678,  3.        ],
+           [16.        , 17.        ,  3.39675184,  6.        ],
+           [18.        , 19.        ,  3.39675184,  6.        ],
+           [20.        , 21.        ,  4.09206523, 12.        ]])
+
+    The linkage matrix ``Z`` represents a dendrogram - see
+    `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+    contents.
+
+    We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+    each initial point would belong given a distance threshold:
+
+    >>> fcluster(Z, 0.9, criterion='distance')
+    array([ 1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12], dtype=int32)
+    >>> fcluster(Z, 1.5, criterion='distance')
+    array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+    >>> fcluster(Z, 4, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
+    >>> fcluster(Z, 6, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+    plot of the dendrogram.
+
+    """
+    return linkage(y, method='average', metric='euclidean')
+
+
+def weighted(y):
+    """
+    Perform weighted/WPGMA linkage on the condensed distance matrix.
+
+    See `linkage` for more information on the return
+    structure and algorithm.
+
+    Parameters
+    ----------
+    y : ndarray
+        The upper triangular of the distance matrix. The result of
+        ``pdist`` is returned in this form.
+
+    Returns
+    -------
+    Z : ndarray
+        A linkage matrix containing the hierarchical clustering. See
+        `linkage` for more information on its structure.
+
+    See Also
+    --------
+    linkage : for advanced creation of hierarchical clusterings.
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import weighted, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    First, we need a toy dataset to play with::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    Then, we get a condensed distance matrix from this dataset:
+
+    >>> y = pdist(X)
+
+    Finally, we can perform the clustering:
+
+    >>> Z = weighted(y)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 9.        , 11.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.20710678,  3.        ],
+           [ 8.        , 13.        ,  1.20710678,  3.        ],
+           [ 5.        , 14.        ,  1.20710678,  3.        ],
+           [10.        , 15.        ,  1.20710678,  3.        ],
+           [18.        , 19.        ,  3.05595762,  6.        ],
+           [16.        , 17.        ,  3.32379407,  6.        ],
+           [20.        , 21.        ,  4.06357713, 12.        ]])
+
+    The linkage matrix ``Z`` represents a dendrogram - see
+    `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+    contents.
+
+    We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+    each initial point would belong given a distance threshold:
+
+    >>> fcluster(Z, 0.9, criterion='distance')
+    array([ 7,  8,  9,  1,  2,  3, 10, 11, 12,  4,  6,  5], dtype=int32)
+    >>> fcluster(Z, 1.5, criterion='distance')
+    array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32)
+    >>> fcluster(Z, 4, criterion='distance')
+    array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32)
+    >>> fcluster(Z, 6, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+    plot of the dendrogram.
+
+    """
+    return linkage(y, method='weighted', metric='euclidean')
+
+
+def centroid(y):
+    """
+    Perform centroid/UPGMC linkage.
+
+    See `linkage` for more information on the input matrix,
+    return structure, and algorithm.
+
+    The following are common calling conventions:
+
+    1. ``Z = centroid(y)``
+
+       Performs centroid/UPGMC linkage on the condensed distance
+       matrix ``y``.
+
+    2. ``Z = centroid(X)``
+
+       Performs centroid/UPGMC linkage on the observation matrix ``X``
+       using Euclidean distance as the distance metric.
+
+    Parameters
+    ----------
+    y : ndarray
+        A condensed distance matrix. A condensed
+        distance matrix is a flat array containing the upper
+        triangular of the distance matrix. This is the form that
+        ``pdist`` returns. Alternatively, a collection of
+        m observation vectors in n dimensions may be passed as
+        an m by n array.
+
+    Returns
+    -------
+    Z : ndarray
+        A linkage matrix containing the hierarchical clustering. See
+        the `linkage` function documentation for more information
+        on its structure.
+
+    See Also
+    --------
+    linkage : for advanced creation of hierarchical clusterings.
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import centroid, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    First, we need a toy dataset to play with::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    Then, we get a condensed distance matrix from this dataset:
+
+    >>> y = pdist(X)
+
+    Finally, we can perform the clustering:
+
+    >>> Z = centroid(y)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.11803399,  3.        ],
+           [ 5.        , 13.        ,  1.11803399,  3.        ],
+           [ 8.        , 15.        ,  1.11803399,  3.        ],
+           [11.        , 14.        ,  1.11803399,  3.        ],
+           [18.        , 19.        ,  3.33333333,  6.        ],
+           [16.        , 17.        ,  3.33333333,  6.        ],
+           [20.        , 21.        ,  3.33333333, 12.        ]])
+
+    The linkage matrix ``Z`` represents a dendrogram - see
+    `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+    contents.
+
+    We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+    each initial point would belong given a distance threshold:
+
+    >>> fcluster(Z, 0.9, criterion='distance')
+    array([ 7,  8,  9, 10, 11, 12,  1,  2,  3,  4,  5,  6], dtype=int32)
+    >>> fcluster(Z, 1.1, criterion='distance')
+    array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
+    >>> fcluster(Z, 2, criterion='distance')
+    array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
+    >>> fcluster(Z, 4, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+    plot of the dendrogram.
+
+    """
+    return linkage(y, method='centroid', metric='euclidean')
+
+
+def median(y):
+    """
+    Perform median/WPGMC linkage.
+
+    See `linkage` for more information on the return structure
+    and algorithm.
+
+     The following are common calling conventions:
+
+     1. ``Z = median(y)``
+
+        Performs median/WPGMC linkage on the condensed distance matrix
+        ``y``.  See ``linkage`` for more information on the return
+        structure and algorithm.
+
+     2. ``Z = median(X)``
+
+        Performs median/WPGMC linkage on the observation matrix ``X``
+        using Euclidean distance as the distance metric. See `linkage`
+        for more information on the return structure and algorithm.
+
+    Parameters
+    ----------
+    y : ndarray
+        A condensed distance matrix. A condensed
+        distance matrix is a flat array containing the upper
+        triangular of the distance matrix. This is the form that
+        ``pdist`` returns.  Alternatively, a collection of
+        m observation vectors in n dimensions may be passed as
+        an m by n array.
+
+    Returns
+    -------
+    Z : ndarray
+        The hierarchical clustering encoded as a linkage matrix.
+
+    See Also
+    --------
+    linkage : for advanced creation of hierarchical clusterings.
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import median, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    First, we need a toy dataset to play with::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    Then, we get a condensed distance matrix from this dataset:
+
+    >>> y = pdist(X)
+
+    Finally, we can perform the clustering:
+
+    >>> Z = median(y)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.11803399,  3.        ],
+           [ 5.        , 13.        ,  1.11803399,  3.        ],
+           [ 8.        , 15.        ,  1.11803399,  3.        ],
+           [11.        , 14.        ,  1.11803399,  3.        ],
+           [18.        , 19.        ,  3.        ,  6.        ],
+           [16.        , 17.        ,  3.5       ,  6.        ],
+           [20.        , 21.        ,  3.25      , 12.        ]])
+
+    The linkage matrix ``Z`` represents a dendrogram - see
+    `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+    contents.
+
+    We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+    each initial point would belong given a distance threshold:
+
+    >>> fcluster(Z, 0.9, criterion='distance')
+    array([ 7,  8,  9, 10, 11, 12,  1,  2,  3,  4,  5,  6], dtype=int32)
+    >>> fcluster(Z, 1.1, criterion='distance')
+    array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
+    >>> fcluster(Z, 2, criterion='distance')
+    array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
+    >>> fcluster(Z, 4, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+    plot of the dendrogram.
+
+    """
+    return linkage(y, method='median', metric='euclidean')
+
+
+def ward(y):
+    """
+    Perform Ward's linkage on a condensed distance matrix.
+
+    See `linkage` for more information on the return structure
+    and algorithm.
+
+    The following are common calling conventions:
+
+    1. ``Z = ward(y)``
+       Performs Ward's linkage on the condensed distance matrix ``y``.
+
+    2. ``Z = ward(X)``
+       Performs Ward's linkage on the observation matrix ``X`` using
+       Euclidean distance as the distance metric.
+
+    Parameters
+    ----------
+    y : ndarray
+        A condensed distance matrix. A condensed
+        distance matrix is a flat array containing the upper
+        triangular of the distance matrix. This is the form that
+        ``pdist`` returns.  Alternatively, a collection of
+        m observation vectors in n dimensions may be passed as
+        an m by n array.
+
+    Returns
+    -------
+    Z : ndarray
+        The hierarchical clustering encoded as a linkage matrix. See
+        `linkage` for more information on the return structure and
+        algorithm.
+
+    See Also
+    --------
+    linkage : for advanced creation of hierarchical clusterings.
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    First, we need a toy dataset to play with::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    Then, we get a condensed distance matrix from this dataset:
+
+    >>> y = pdist(X)
+
+    Finally, we can perform the clustering:
+
+    >>> Z = ward(y)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.29099445,  3.        ],
+           [ 5.        , 13.        ,  1.29099445,  3.        ],
+           [ 8.        , 14.        ,  1.29099445,  3.        ],
+           [11.        , 15.        ,  1.29099445,  3.        ],
+           [16.        , 17.        ,  5.77350269,  6.        ],
+           [18.        , 19.        ,  5.77350269,  6.        ],
+           [20.        , 21.        ,  8.16496581, 12.        ]])
+
+    The linkage matrix ``Z`` represents a dendrogram - see
+    `scipy.cluster.hierarchy.linkage` for a detailed explanation of its
+    contents.
+
+    We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
+    each initial point would belong given a distance threshold:
+
+    >>> fcluster(Z, 0.9, criterion='distance')
+    array([ 1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12], dtype=int32)
+    >>> fcluster(Z, 1.1, criterion='distance')
+    array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
+    >>> fcluster(Z, 3, criterion='distance')
+    array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+    >>> fcluster(Z, 9, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
+    plot of the dendrogram.
+
+    """
+    return linkage(y, method='ward', metric='euclidean')
+
+
+def linkage(y, method='single', metric='euclidean', optimal_ordering=False):
+    """
+    Perform hierarchical/agglomerative clustering.
+
+    The input y may be either a 1-D condensed distance matrix
+    or a 2-D array of observation vectors.
+
+    If y is a 1-D condensed distance matrix,
+    then y must be a :math:`\\binom{n}{2}` sized
+    vector, where n is the number of original observations paired
+    in the distance matrix. The behavior of this function is very
+    similar to the MATLAB linkage function.
+
+    A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
+    :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
+    ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
+    cluster with an index less than :math:`n` corresponds to one of
+    the :math:`n` original observations. The distance between
+    clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
+    fourth value ``Z[i, 3]`` represents the number of original
+    observations in the newly formed cluster.
+
+    The following linkage methods are used to compute the distance
+    :math:`d(s, t)` between two clusters :math:`s` and
+    :math:`t`. The algorithm begins with a forest of clusters that
+    have yet to be used in the hierarchy being formed. When two
+    clusters :math:`s` and :math:`t` from this forest are combined
+    into a single cluster :math:`u`, :math:`s` and :math:`t` are
+    removed from the forest, and :math:`u` is added to the
+    forest. When only one cluster remains in the forest, the algorithm
+    stops, and this cluster becomes the root.
+
+    A distance matrix is maintained at each iteration. The ``d[i,j]``
+    entry corresponds to the distance between cluster :math:`i` and
+    :math:`j` in the original forest.
+
+    At each iteration, the algorithm must update the distance matrix
+    to reflect the distance of the newly formed cluster u with the
+    remaining clusters in the forest.
+
+    Suppose there are :math:`|u|` original observations
+    :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
+    :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
+    cluster :math:`v`. Recall, :math:`s` and :math:`t` are
+    combined to form cluster :math:`u`. Let :math:`v` be any
+    remaining cluster in the forest that is not :math:`u`.
+
+    The following are methods for calculating the distance between the
+    newly formed cluster :math:`u` and each :math:`v`.
+
+      * method='single' assigns
+
+        .. math::
+           d(u,v) = \\min(dist(u[i],v[j]))
+
+        for all points :math:`i` in cluster :math:`u` and
+        :math:`j` in cluster :math:`v`. This is also known as the
+        Nearest Point Algorithm.
+
+      * method='complete' assigns
+
+        .. math::
+           d(u, v) = \\max(dist(u[i],v[j]))
+
+        for all points :math:`i` in cluster u and :math:`j` in
+        cluster :math:`v`. This is also known by the Farthest Point
+        Algorithm or Voor Hees Algorithm.
+
+      * method='average' assigns
+
+        .. math::
+           d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
+                                   {(|u|*|v|)}
+
+        for all points :math:`i` and :math:`j` where :math:`|u|`
+        and :math:`|v|` are the cardinalities of clusters :math:`u`
+        and :math:`v`, respectively. This is also called the UPGMA
+        algorithm.
+
+      * method='weighted' assigns
+
+        .. math::
+           d(u,v) = (dist(s,v) + dist(t,v))/2
+
+        where cluster u was formed with cluster s and t and v
+        is a remaining cluster in the forest (also called WPGMA).
+
+      * method='centroid' assigns
+
+        .. math::
+           dist(s,t) = ||c_s-c_t||_2
+
+        where :math:`c_s` and :math:`c_t` are the centroids of
+        clusters :math:`s` and :math:`t`, respectively. When two
+        clusters :math:`s` and :math:`t` are combined into a new
+        cluster :math:`u`, the new centroid is computed over all the
+        original objects in clusters :math:`s` and :math:`t`. The
+        distance then becomes the Euclidean distance between the
+        centroid of :math:`u` and the centroid of a remaining cluster
+        :math:`v` in the forest. This is also known as the UPGMC
+        algorithm.
+
+      * method='median' assigns :math:`d(s,t)` like the ``centroid``
+        method. When two clusters :math:`s` and :math:`t` are combined
+        into a new cluster :math:`u`, the average of centroids s and t
+        give the new centroid :math:`u`. This is also known as the
+        WPGMC algorithm.
+
+      * method='ward' uses the Ward variance minimization algorithm.
+        The new entry :math:`d(u,v)` is computed as follows,
+
+        .. math::
+
+           d(u,v) = \\sqrt{\\frac{|v|+|s|}
+                               {T}d(v,s)^2
+                        + \\frac{|v|+|t|}
+                               {T}d(v,t)^2
+                        - \\frac{|v|}
+                               {T}d(s,t)^2}
+
+        where :math:`u` is the newly joined cluster consisting of
+        clusters :math:`s` and :math:`t`, :math:`v` is an unused
+        cluster in the forest, :math:`T=|v|+|s|+|t|`, and
+        :math:`|*|` is the cardinality of its argument. This is also
+        known as the incremental algorithm.
+
+    Warning: When the minimum distance pair in the forest is chosen, there
+    may be two or more pairs with the same minimum distance. This
+    implementation may choose a different minimum than the MATLAB
+    version.
+
+    Parameters
+    ----------
+    y : ndarray
+        A condensed distance matrix. A condensed distance matrix
+        is a flat array containing the upper triangular of the distance matrix.
+        This is the form that ``pdist`` returns. Alternatively, a collection of
+        :math:`m` observation vectors in :math:`n` dimensions may be passed as
+        an :math:`m` by :math:`n` array. All elements of the condensed distance
+        matrix must be finite, i.e., no NaNs or infs.
+    method : str, optional
+        The linkage algorithm to use. See the ``Linkage Methods`` section below
+        for full descriptions.
+    metric : str or function, optional
+        The distance metric to use in the case that y is a collection of
+        observation vectors; ignored otherwise. See the ``pdist``
+        function for a list of valid distance metrics. A custom distance
+        function can also be used.
+    optimal_ordering : bool, optional
+        If True, the linkage matrix will be reordered so that the distance
+        between successive leaves is minimal. This results in a more intuitive
+        tree structure when the data are visualized. defaults to False, because
+        this algorithm can be slow, particularly on large datasets [2]_. See
+        also the `optimal_leaf_ordering` function.
+
+        .. versionadded:: 1.0.0
+
+    Returns
+    -------
+    Z : ndarray
+        The hierarchical clustering encoded as a linkage matrix.
+
+    Notes
+    -----
+    1. For method 'single', an optimized algorithm based on minimum spanning
+       tree is implemented. It has time complexity :math:`O(n^2)`.
+       For methods 'complete', 'average', 'weighted' and 'ward', an algorithm
+       called nearest-neighbors chain is implemented. It also has time
+       complexity :math:`O(n^2)`.
+       For other methods, a naive algorithm is implemented with :math:`O(n^3)`
+       time complexity.
+       All algorithms use :math:`O(n^2)` memory.
+       Refer to [1]_ for details about the algorithms.
+    2. Methods 'centroid', 'median', and 'ward' are correctly defined only if
+       Euclidean pairwise metric is used. If `y` is passed as precomputed
+       pairwise distances, then it is the user's responsibility to assure that
+       these distances are in fact Euclidean, otherwise the produced result
+       will be incorrect.
+
+    See Also
+    --------
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    References
+    ----------
+    .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
+           algorithms", :arXiv:`1109.2378v1`.
+    .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal
+           leaf ordering for hierarchical clustering", 2001. Bioinformatics
+           :doi:`10.1093/bioinformatics/17.suppl_1.S22`
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import dendrogram, linkage
+    >>> from matplotlib import pyplot as plt
+    >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
+
+    >>> Z = linkage(X, 'ward')
+    >>> fig = plt.figure(figsize=(25, 10))
+    >>> dn = dendrogram(Z)
+
+    >>> Z = linkage(X, 'single')
+    >>> fig = plt.figure(figsize=(25, 10))
+    >>> dn = dendrogram(Z)
+    >>> plt.show()
+    """
+    if method not in _LINKAGE_METHODS:
+        raise ValueError("Invalid method: {0}".format(method))
+
+    y = _convert_to_double(np.asarray(y, order='c'))
+
+    if y.ndim == 1:
+        distance.is_valid_y(y, throw=True, name='y')
+        [y] = _copy_arrays_if_base_present([y])
+    elif y.ndim == 2:
+        if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
+            raise ValueError("Method '{0}' requires the distance metric "
+                             "to be Euclidean".format(method))
+        if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
+            if np.all(y >= 0) and np.allclose(y, y.T):
+                _warning('The symmetric non-negative hollow observation '
+                         'matrix looks suspiciously like an uncondensed '
+                         'distance matrix')
+        y = distance.pdist(y, metric)
+    else:
+        raise ValueError("`y` must be 1 or 2 dimensional.")
+
+    if not np.all(np.isfinite(y)):
+        raise ValueError("The condensed distance matrix must contain only "
+                         "finite values.")
+
+    n = int(distance.num_obs_y(y))
+    method_code = _LINKAGE_METHODS[method]
+
+    if method == 'single':
+        result = _hierarchy.mst_single_linkage(y, n)
+    elif method in ['complete', 'average', 'weighted', 'ward']:
+        result = _hierarchy.nn_chain(y, n, method_code)
+    else:
+        result = _hierarchy.fast_linkage(y, n, method_code)
+
+    if optimal_ordering:
+        return optimal_leaf_ordering(result, y)
+    else:
+        return result
+
+
+class ClusterNode:
+    """
+    A tree node class for representing a cluster.
+
+    Leaf nodes correspond to original observations, while non-leaf nodes
+    correspond to non-singleton clusters.
+
+    The `to_tree` function converts a matrix returned by the linkage
+    function into an easy-to-use tree representation.
+
+    All parameter names are also attributes.
+
+    Parameters
+    ----------
+    id : int
+        The node id.
+    left : ClusterNode instance, optional
+        The left child tree node.
+    right : ClusterNode instance, optional
+        The right child tree node.
+    dist : float, optional
+        Distance for this cluster in the linkage matrix.
+    count : int, optional
+        The number of samples in this cluster.
+
+    See Also
+    --------
+    to_tree : for converting a linkage matrix ``Z`` into a tree object.
+
+    """
+
+    def __init__(self, id, left=None, right=None, dist=0, count=1):
+        if id < 0:
+            raise ValueError('The id must be non-negative.')
+        if dist < 0:
+            raise ValueError('The distance must be non-negative.')
+        if (left is None and right is not None) or \
+           (left is not None and right is None):
+            raise ValueError('Only full or proper binary trees are permitted.'
+                             '  This node has one child.')
+        if count < 1:
+            raise ValueError('A cluster must contain at least one original '
+                             'observation.')
+        self.id = id
+        self.left = left
+        self.right = right
+        self.dist = dist
+        if self.left is None:
+            self.count = count
+        else:
+            self.count = left.count + right.count
+
+    def __lt__(self, node):
+        if not isinstance(node, ClusterNode):
+            raise ValueError("Can't compare ClusterNode "
+                             "to type {}".format(type(node)))
+        return self.dist < node.dist
+
+    def __gt__(self, node):
+        if not isinstance(node, ClusterNode):
+            raise ValueError("Can't compare ClusterNode "
+                             "to type {}".format(type(node)))
+        return self.dist > node.dist
+
+    def __eq__(self, node):
+        if not isinstance(node, ClusterNode):
+            raise ValueError("Can't compare ClusterNode "
+                             "to type {}".format(type(node)))
+        return self.dist == node.dist
+
+    def get_id(self):
+        """
+        The identifier of the target node.
+
+        For ``0 <= i < n``, `i` corresponds to original observation i.
+        For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
+        at iteration ``i-n``.
+
+        Returns
+        -------
+        id : int
+            The identifier of the target node.
+
+        """
+        return self.id
+
+    def get_count(self):
+        """
+        The number of leaf nodes (original observations) belonging to
+        the cluster node nd. If the target node is a leaf, 1 is
+        returned.
+
+        Returns
+        -------
+        get_count : int
+            The number of leaf nodes below the target node.
+
+        """
+        return self.count
+
+    def get_left(self):
+        """
+        Return a reference to the left child tree object.
+
+        Returns
+        -------
+        left : ClusterNode
+            The left child of the target node. If the node is a leaf,
+            None is returned.
+
+        """
+        return self.left
+
+    def get_right(self):
+        """
+        Return a reference to the right child tree object.
+
+        Returns
+        -------
+        right : ClusterNode
+            The left child of the target node. If the node is a leaf,
+            None is returned.
+
+        """
+        return self.right
+
+    def is_leaf(self):
+        """
+        Return True if the target node is a leaf.
+
+        Returns
+        -------
+        leafness : bool
+            True if the target node is a leaf node.
+
+        """
+        return self.left is None
+
+    def pre_order(self, func=(lambda x: x.id)):
+        """
+        Perform pre-order traversal without recursive function calls.
+
+        When a leaf node is first encountered, ``func`` is called with
+        the leaf node as its argument, and its result is appended to
+        the list.
+
+        For example, the statement::
+
+           ids = root.pre_order(lambda x: x.id)
+
+        returns a list of the node ids corresponding to the leaf nodes
+        of the tree as they appear from left to right.
+
+        Parameters
+        ----------
+        func : function
+            Applied to each leaf ClusterNode object in the pre-order traversal.
+            Given the ``i``-th leaf node in the pre-order traversal ``n[i]``,
+            the result of ``func(n[i])`` is stored in ``L[i]``. If not
+            provided, the index of the original observation to which the node
+            corresponds is used.
+
+        Returns
+        -------
+        L : list
+            The pre-order traversal.
+
+        """
+        # Do a preorder traversal, caching the result. To avoid having to do
+        # recursion, we'll store the previous index we've visited in a vector.
+        n = self.count
+
+        curNode = [None] * (2 * n)
+        lvisited = set()
+        rvisited = set()
+        curNode[0] = self
+        k = 0
+        preorder = []
+        while k >= 0:
+            nd = curNode[k]
+            ndid = nd.id
+            if nd.is_leaf():
+                preorder.append(func(nd))
+                k = k - 1
+            else:
+                if ndid not in lvisited:
+                    curNode[k + 1] = nd.left
+                    lvisited.add(ndid)
+                    k = k + 1
+                elif ndid not in rvisited:
+                    curNode[k + 1] = nd.right
+                    rvisited.add(ndid)
+                    k = k + 1
+                # If we've visited the left and right of this non-leaf
+                # node already, go up in the tree.
+                else:
+                    k = k - 1
+
+        return preorder
+
+
+_cnode_bare = ClusterNode(0)
+_cnode_type = type(ClusterNode)
+
+
+def _order_cluster_tree(Z):
+    """
+    Return clustering nodes in bottom-up order by distance.
+
+    Parameters
+    ----------
+    Z : scipy.cluster.linkage array
+        The linkage matrix.
+
+    Returns
+    -------
+    nodes : list
+        A list of ClusterNode objects.
+    """
+    q = deque()
+    tree = to_tree(Z)
+    q.append(tree)
+    nodes = []
+
+    while q:
+        node = q.popleft()
+        if not node.is_leaf():
+            bisect.insort_left(nodes, node)
+            q.append(node.get_right())
+            q.append(node.get_left())
+    return nodes
+
+
+def cut_tree(Z, n_clusters=None, height=None):
+    """
+    Given a linkage matrix Z, return the cut tree.
+
+    Parameters
+    ----------
+    Z : scipy.cluster.linkage array
+        The linkage matrix.
+    n_clusters : array_like, optional
+        Number of clusters in the tree at the cut point.
+    height : array_like, optional
+        The height at which to cut the tree. Only possible for ultrametric
+        trees.
+
+    Returns
+    -------
+    cutree : array
+        An array indicating group membership at each agglomeration step. I.e.,
+        for a full cut tree, in the first column each data point is in its own
+        cluster. At the next step, two nodes are merged. Finally, all
+        singleton and non-singleton clusters are in one group. If `n_clusters`
+        or `height` are given, the columns correspond to the columns of
+        `n_clusters` or `height`.
+
+    Examples
+    --------
+    >>> from scipy import cluster
+    >>> import numpy as np
+    >>> from numpy.random import default_rng
+    >>> rng = default_rng()
+    >>> X = rng.random((50, 4))
+    >>> Z = cluster.hierarchy.ward(X)
+    >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
+    >>> cutree[:10]
+    array([[0, 0],
+           [1, 1],
+           [2, 2],
+           [3, 3],
+           [3, 4],
+           [2, 2],
+           [0, 0],
+           [1, 5],
+           [3, 6],
+           [4, 7]])  # random
+
+    """
+    nobs = num_obs_linkage(Z)
+    nodes = _order_cluster_tree(Z)
+
+    if height is not None and n_clusters is not None:
+        raise ValueError("At least one of either height or n_clusters "
+                         "must be None")
+    elif height is None and n_clusters is None:  # return the full cut tree
+        cols_idx = np.arange(nobs)
+    elif height is not None:
+        heights = np.array([x.dist for x in nodes])
+        cols_idx = np.searchsorted(heights, height)
+    else:
+        cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
+
+    try:
+        n_cols = len(cols_idx)
+    except TypeError:  # scalar
+        n_cols = 1
+        cols_idx = np.array([cols_idx])
+
+    groups = np.zeros((n_cols, nobs), dtype=int)
+    last_group = np.arange(nobs)
+    if 0 in cols_idx:
+        groups[0] = last_group
+
+    for i, node in enumerate(nodes):
+        idx = node.pre_order()
+        this_group = last_group.copy()
+        this_group[idx] = last_group[idx].min()
+        this_group[this_group > last_group[idx].max()] -= 1
+        if i + 1 in cols_idx:
+            groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group
+        last_group = this_group
+
+    return groups.T
+
+
+def to_tree(Z, rd=False):
+    """
+    Convert a linkage matrix into an easy-to-use tree object.
+
+    The reference to the root `ClusterNode` object is returned (by default).
+
+    Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``,
+    and ``count`` attribute. The left and right attributes point to
+    ClusterNode objects that were combined to generate the cluster.
+    If both are None then the `ClusterNode` object is a leaf node, its count
+    must be 1, and its distance is meaningless but set to 0.
+
+    *Note: This function is provided for the convenience of the library
+    user. ClusterNodes are not used as input to any of the functions in this
+    library.*
+
+    Parameters
+    ----------
+    Z : ndarray
+        The linkage matrix in proper form (see the `linkage`
+        function documentation).
+    rd : bool, optional
+        When False (default), a reference to the root `ClusterNode` object is
+        returned.  Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a
+        reference to the root node while ``d`` is a list of `ClusterNode`
+        objects - one per original entry in the linkage matrix plus entries
+        for all clustering steps. If a cluster id is
+        less than the number of samples ``n`` in the data that the linkage
+        matrix describes, then it corresponds to a singleton cluster (leaf
+        node).
+        See `linkage` for more information on the assignment of cluster ids
+        to clusters.
+
+    Returns
+    -------
+    tree : ClusterNode or tuple (ClusterNode, list of ClusterNode)
+        If ``rd`` is False, a `ClusterNode`.
+        If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number
+        of samples.  See the description of `rd` above for more details.
+
+    See Also
+    --------
+    linkage, is_valid_linkage, ClusterNode
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster import hierarchy
+    >>> rng = np.random.default_rng()
+    >>> x = rng.random((5, 2))
+    >>> Z = hierarchy.linkage(x)
+    >>> hierarchy.to_tree(Z)
+    >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True)
+    >>> rootnode
+    >> len(nodelist)
+    9
+
+    """
+    Z = np.asarray(Z, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+
+    # Number of original objects is equal to the number of rows plus 1.
+    n = Z.shape[0] + 1
+
+    # Create a list full of None's to store the node objects
+    d = [None] * (n * 2 - 1)
+
+    # Create the nodes corresponding to the n original objects.
+    for i in range(0, n):
+        d[i] = ClusterNode(i)
+
+    nd = None
+
+    for i, row in enumerate(Z):
+        fi = int(row[0])
+        fj = int(row[1])
+        if fi > i + n:
+            raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
+                              'is used before it is formed. See row %d, '
+                              'column 0') % fi)
+        if fj > i + n:
+            raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
+                              'is used before it is formed. See row %d, '
+                              'column 1') % fj)
+
+        nd = ClusterNode(i + n, d[fi], d[fj], row[2])
+        #                ^ id   ^ left ^ right ^ dist
+        if row[3] != nd.count:
+            raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
+                              'incorrect.') % i)
+        d[n + i] = nd
+
+    if rd:
+        return (nd, d)
+    else:
+        return nd
+
+
+def optimal_leaf_ordering(Z, y, metric='euclidean'):
+    """
+    Given a linkage matrix Z and distance, reorder the cut tree.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The hierarchical clustering encoded as a linkage matrix. See
+        `linkage` for more information on the return structure and
+        algorithm.
+    y : ndarray
+        The condensed distance matrix from which Z was generated.
+        Alternatively, a collection of m observation vectors in n
+        dimensions may be passed as an m by n array.
+    metric : str or function, optional
+        The distance metric to use in the case that y is a collection of
+        observation vectors; ignored otherwise. See the ``pdist``
+        function for a list of valid distance metrics. A custom distance
+        function can also be used.
+
+    Returns
+    -------
+    Z_ordered : ndarray
+        A copy of the linkage matrix Z, reordered to minimize the distance
+        between adjacent leaves.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster import hierarchy
+    >>> rng = np.random.default_rng()
+    >>> X = rng.standard_normal((10, 10))
+    >>> Z = hierarchy.ward(X)
+    >>> hierarchy.leaves_list(Z)
+    array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32)
+    >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X))
+    array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32)
+
+    """
+    Z = np.asarray(Z, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+
+    y = _convert_to_double(np.asarray(y, order='c'))
+
+    if y.ndim == 1:
+        distance.is_valid_y(y, throw=True, name='y')
+        [y] = _copy_arrays_if_base_present([y])
+    elif y.ndim == 2:
+        if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
+            if np.all(y >= 0) and np.allclose(y, y.T):
+                _warning('The symmetric non-negative hollow observation '
+                         'matrix looks suspiciously like an uncondensed '
+                         'distance matrix')
+        y = distance.pdist(y, metric)
+    else:
+        raise ValueError("`y` must be 1 or 2 dimensional.")
+
+    if not np.all(np.isfinite(y)):
+        raise ValueError("The condensed distance matrix must contain only "
+                         "finite values.")
+
+    return _optimal_leaf_ordering.optimal_leaf_ordering(Z, y)
+
+
+def _convert_to_bool(X):
+    if X.dtype != bool:
+        X = X.astype(bool)
+    if not X.flags.contiguous:
+        X = X.copy()
+    return X
+
+
+def _convert_to_double(X):
+    if X.dtype != np.double:
+        X = X.astype(np.double)
+    if not X.flags.contiguous:
+        X = X.copy()
+    return X
+
+
+def cophenet(Z, Y=None):
+    """
+    Calculate the cophenetic distances between each observation in
+    the hierarchical clustering defined by the linkage ``Z``.
+
+    Suppose ``p`` and ``q`` are original observations in
+    disjoint clusters ``s`` and ``t``, respectively and
+    ``s`` and ``t`` are joined by a direct parent cluster
+    ``u``. The cophenetic distance between observations
+    ``i`` and ``j`` is simply the distance between
+    clusters ``s`` and ``t``.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The hierarchical clustering encoded as an array
+        (see `linkage` function).
+    Y : ndarray (optional)
+        Calculates the cophenetic correlation coefficient ``c`` of a
+        hierarchical clustering defined by the linkage matrix `Z`
+        of a set of :math:`n` observations in :math:`m`
+        dimensions. `Y` is the condensed distance matrix from which
+        `Z` was generated.
+
+    Returns
+    -------
+    c : ndarray
+        The cophentic correlation distance (if ``Y`` is passed).
+    d : ndarray
+        The cophenetic distance matrix in condensed form. The
+        :math:`ij` th entry is the cophenetic distance between
+        original observations :math:`i` and :math:`j`.
+
+    See Also
+    --------
+    linkage :
+        for a description of what a linkage matrix is.
+    scipy.spatial.distance.squareform :
+        transforming condensed matrices into square ones.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import single, cophenet
+    >>> from scipy.spatial.distance import pdist, squareform
+
+    Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance
+    between two points of ``X`` is the distance between the largest two
+    distinct clusters that each of the points:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    ``X`` corresponds to this dataset ::
+
+        x x    x x
+        x        x
+
+        x        x
+        x x    x x
+
+    >>> Z = single(pdist(X))
+    >>> Z
+    array([[ 0.,  1.,  1.,  2.],
+           [ 2., 12.,  1.,  3.],
+           [ 3.,  4.,  1.,  2.],
+           [ 5., 14.,  1.,  3.],
+           [ 6.,  7.,  1.,  2.],
+           [ 8., 16.,  1.,  3.],
+           [ 9., 10.,  1.,  2.],
+           [11., 18.,  1.,  3.],
+           [13., 15.,  2.,  6.],
+           [17., 20.,  2.,  9.],
+           [19., 21.,  2., 12.]])
+    >>> cophenet(Z)
+    array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2.,
+           2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2.,
+           2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
+           1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.])
+
+    The output of the `scipy.cluster.hierarchy.cophenet` method is
+    represented in condensed form. We can use
+    `scipy.spatial.distance.squareform` to see the output as a
+    regular matrix (where each element ``ij`` denotes the cophenetic distance
+    between each ``i``, ``j`` pair of points in ``X``):
+
+    >>> squareform(cophenet(Z))
+    array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
+           [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
+           [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
+           [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.],
+           [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.],
+           [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.],
+           [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.],
+           [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.],
+           [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.],
+           [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.],
+           [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.],
+           [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]])
+
+    In this example, the cophenetic distance between points on ``X`` that are
+    very close (i.e., in the same corner) is 1. For other pairs of points is 2,
+    because the points will be located in clusters at different
+    corners - thus, the distance between these clusters will be larger.
+
+    """
+    Z = np.asarray(Z, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+    Zs = Z.shape
+    n = Zs[0] + 1
+
+    zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
+    # Since the C code does not support striding using strides.
+    # The dimensions are used instead.
+    Z = _convert_to_double(Z)
+
+    _hierarchy.cophenetic_distances(Z, zz, int(n))
+    if Y is None:
+        return zz
+
+    Y = np.asarray(Y, order='c')
+    distance.is_valid_y(Y, throw=True, name='Y')
+
+    z = zz.mean()
+    y = Y.mean()
+    Yy = Y - y
+    Zz = zz - z
+    numerator = (Yy * Zz)
+    denomA = Yy**2
+    denomB = Zz**2
+    c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
+    return (c, zz)
+
+
+def inconsistent(Z, d=2):
+    r"""
+    Calculate inconsistency statistics on a linkage matrix.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
+        clustering).  See `linkage` documentation for more information on its
+        form.
+    d : int, optional
+        The number of links up to `d` levels below each non-singleton cluster.
+
+    Returns
+    -------
+    R : ndarray
+        A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link
+        statistics for the non-singleton cluster ``i``. The link statistics are
+        computed over the link heights for links :math:`d` levels below the
+        cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
+        deviation of the link heights, respectively; ``R[i,2]`` is the number
+        of links included in the calculation; and ``R[i,3]`` is the
+        inconsistency coefficient,
+
+        .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
+
+    Notes
+    -----
+    This function behaves similarly to the MATLAB(TM) ``inconsistent``
+    function.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import inconsistent, linkage
+    >>> from matplotlib import pyplot as plt
+    >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
+    >>> Z = linkage(X, 'ward')
+    >>> print(Z)
+    [[ 5.          6.          0.          2.        ]
+     [ 2.          7.          0.          2.        ]
+     [ 0.          4.          1.          2.        ]
+     [ 1.          8.          1.15470054  3.        ]
+     [ 9.         10.          2.12132034  4.        ]
+     [ 3.         12.          4.11096096  5.        ]
+     [11.         13.         14.07183949  8.        ]]
+    >>> inconsistent(Z)
+    array([[ 0.        ,  0.        ,  1.        ,  0.        ],
+           [ 0.        ,  0.        ,  1.        ,  0.        ],
+           [ 1.        ,  0.        ,  1.        ,  0.        ],
+           [ 0.57735027,  0.81649658,  2.        ,  0.70710678],
+           [ 1.04044011,  1.06123822,  3.        ,  1.01850858],
+           [ 3.11614065,  1.40688837,  2.        ,  0.70710678],
+           [ 6.44583366,  6.76770586,  3.        ,  1.12682288]])
+
+    """
+    Z = np.asarray(Z, order='c')
+
+    Zs = Z.shape
+    is_valid_linkage(Z, throw=True, name='Z')
+    if (not d == np.floor(d)) or d < 0:
+        raise ValueError('The second argument d must be a nonnegative '
+                         'integer value.')
+
+    # Since the C code does not support striding using strides.
+    # The dimensions are used instead.
+    [Z] = _copy_arrays_if_base_present([Z])
+
+    n = Zs[0] + 1
+    R = np.zeros((n - 1, 4), dtype=np.double)
+
+    _hierarchy.inconsistent(Z, R, int(n), int(d))
+    return R
+
+
+def from_mlab_linkage(Z):
+    """
+    Convert a linkage matrix generated by MATLAB(TM) to a new
+    linkage matrix compatible with this module.
+
+    The conversion does two things:
+
+     * the indices are converted from ``1..N`` to ``0..(N-1)`` form,
+       and
+
+     * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the
+       number of original observations (leaves) in the non-singleton
+       cluster ``i``.
+
+    This function is useful when loading in linkages from legacy data
+    files generated by MATLAB.
+
+    Parameters
+    ----------
+    Z : ndarray
+        A linkage matrix generated by MATLAB(TM).
+
+    Returns
+    -------
+    ZS : ndarray
+        A linkage matrix compatible with ``scipy.cluster.hierarchy``.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+    to_mlab_linkage : transform from SciPy to MATLAB format.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage
+
+    Given a linkage matrix in MATLAB format ``mZ``, we can use
+    `scipy.cluster.hierarchy.from_mlab_linkage` to import
+    it into SciPy format:
+
+    >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1],
+    ...                [10, 11, 1], [3, 13, 1.29099445],
+    ...                [6, 14, 1.29099445],
+    ...                [9, 15, 1.29099445],
+    ...                [12, 16, 1.29099445],
+    ...                [17, 18, 5.77350269],
+    ...                [19, 20, 5.77350269],
+    ...                [21, 22,  8.16496581]])
+
+    >>> Z = from_mlab_linkage(mZ)
+    >>> Z
+    array([[  0.        ,   1.        ,   1.        ,   2.        ],
+           [  3.        ,   4.        ,   1.        ,   2.        ],
+           [  6.        ,   7.        ,   1.        ,   2.        ],
+           [  9.        ,  10.        ,   1.        ,   2.        ],
+           [  2.        ,  12.        ,   1.29099445,   3.        ],
+           [  5.        ,  13.        ,   1.29099445,   3.        ],
+           [  8.        ,  14.        ,   1.29099445,   3.        ],
+           [ 11.        ,  15.        ,   1.29099445,   3.        ],
+           [ 16.        ,  17.        ,   5.77350269,   6.        ],
+           [ 18.        ,  19.        ,   5.77350269,   6.        ],
+           [ 20.        ,  21.        ,   8.16496581,  12.        ]])
+
+    As expected, the linkage matrix ``Z`` returned includes an
+    additional column counting the number of original samples in
+    each cluster. Also, all cluster indices are reduced by 1
+    (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing).
+
+    """
+    Z = np.asarray(Z, dtype=np.double, order='c')
+    Zs = Z.shape
+
+    # If it's empty, return it.
+    if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
+        return Z.copy()
+
+    if len(Zs) != 2:
+        raise ValueError("The linkage array must be rectangular.")
+
+    # If it contains no rows, return it.
+    if Zs[0] == 0:
+        return Z.copy()
+
+    Zpart = Z.copy()
+    if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
+        raise ValueError('The format of the indices is not 1..N')
+
+    Zpart[:, 0:2] -= 1.0
+    CS = np.zeros((Zs[0],), dtype=np.double)
+    _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
+    return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
+
+
+def to_mlab_linkage(Z):
+    """
+    Convert a linkage matrix to a MATLAB(TM) compatible one.
+
+    Converts a linkage matrix ``Z`` generated by the linkage function
+    of this module to a MATLAB(TM) compatible one. The return linkage
+    matrix has the last column removed and the cluster indices are
+    converted to ``1..N`` indexing.
+
+    Parameters
+    ----------
+    Z : ndarray
+        A linkage matrix generated by ``scipy.cluster.hierarchy``.
+
+    Returns
+    -------
+    to_mlab_linkage : ndarray
+        A linkage matrix compatible with MATLAB(TM)'s hierarchical
+        clustering functions.
+
+        The return linkage matrix has the last column removed
+        and the cluster indices are converted to ``1..N`` indexing.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+    from_mlab_linkage : transform from Matlab to SciPy format.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage
+    >>> from scipy.spatial.distance import pdist
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.29099445,  3.        ],
+           [ 5.        , 13.        ,  1.29099445,  3.        ],
+           [ 8.        , 14.        ,  1.29099445,  3.        ],
+           [11.        , 15.        ,  1.29099445,  3.        ],
+           [16.        , 17.        ,  5.77350269,  6.        ],
+           [18.        , 19.        ,  5.77350269,  6.        ],
+           [20.        , 21.        ,  8.16496581, 12.        ]])
+
+    After a linkage matrix ``Z`` has been created, we can use
+    `scipy.cluster.hierarchy.to_mlab_linkage` to convert it
+    into MATLAB format:
+
+    >>> mZ = to_mlab_linkage(Z)
+    >>> mZ
+    array([[  1.        ,   2.        ,   1.        ],
+           [  4.        ,   5.        ,   1.        ],
+           [  7.        ,   8.        ,   1.        ],
+           [ 10.        ,  11.        ,   1.        ],
+           [  3.        ,  13.        ,   1.29099445],
+           [  6.        ,  14.        ,   1.29099445],
+           [  9.        ,  15.        ,   1.29099445],
+           [ 12.        ,  16.        ,   1.29099445],
+           [ 17.        ,  18.        ,   5.77350269],
+           [ 19.        ,  20.        ,   5.77350269],
+           [ 21.        ,  22.        ,   8.16496581]])
+
+    The new linkage matrix ``mZ`` uses 1-indexing for all the
+    clusters (instead of 0-indexing). Also, the last column of
+    the original linkage matrix has been dropped.
+
+    """
+    Z = np.asarray(Z, order='c', dtype=np.double)
+    Zs = Z.shape
+    if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
+        return Z.copy()
+    is_valid_linkage(Z, throw=True, name='Z')
+
+    ZP = Z[:, 0:3].copy()
+    ZP[:, 0:2] += 1.0
+
+    return ZP
+
+
+def is_monotonic(Z):
+    """
+    Return True if the linkage passed is monotonic.
+
+    The linkage is monotonic if for every cluster :math:`s` and :math:`t`
+    joined, the distance between them is no less than the distance
+    between any previously joined clusters.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The linkage matrix to check for monotonicity.
+
+    Returns
+    -------
+    b : bool
+        A boolean indicating whether the linkage is monotonic.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import median, ward, is_monotonic
+    >>> from scipy.spatial.distance import pdist
+
+    By definition, some hierarchical clustering algorithms - such as
+    `scipy.cluster.hierarchy.ward` - produce monotonic assignments of
+    samples to clusters; however, this is not always true for other
+    hierarchical methods - e.g. `scipy.cluster.hierarchy.median`.
+
+    Given a linkage matrix ``Z`` (as the result of a hierarchical clustering
+    method) we can test programmatically whether it has the monotonicity
+    property or not, using `scipy.cluster.hierarchy.is_monotonic`:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.29099445,  3.        ],
+           [ 5.        , 13.        ,  1.29099445,  3.        ],
+           [ 8.        , 14.        ,  1.29099445,  3.        ],
+           [11.        , 15.        ,  1.29099445,  3.        ],
+           [16.        , 17.        ,  5.77350269,  6.        ],
+           [18.        , 19.        ,  5.77350269,  6.        ],
+           [20.        , 21.        ,  8.16496581, 12.        ]])
+    >>> is_monotonic(Z)
+    True
+
+    >>> Z = median(pdist(X))
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.11803399,  3.        ],
+           [ 5.        , 13.        ,  1.11803399,  3.        ],
+           [ 8.        , 15.        ,  1.11803399,  3.        ],
+           [11.        , 14.        ,  1.11803399,  3.        ],
+           [18.        , 19.        ,  3.        ,  6.        ],
+           [16.        , 17.        ,  3.5       ,  6.        ],
+           [20.        , 21.        ,  3.25      , 12.        ]])
+    >>> is_monotonic(Z)
+    False
+
+    Note that this method is equivalent to just verifying that the distances
+    in the third column of the linkage matrix appear in a monotonically
+    increasing order.
+
+    """
+    Z = np.asarray(Z, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+
+    # We expect the i'th value to be greater than its successor.
+    return (Z[1:, 2] >= Z[:-1, 2]).all()
+
+
+def is_valid_im(R, warning=False, throw=False, name=None):
+    """Return True if the inconsistency matrix passed is valid.
+
+    It must be a :math:`n` by 4 array of doubles. The standard
+    deviations ``R[:,1]`` must be nonnegative. The link counts
+    ``R[:,2]`` must be positive and no greater than :math:`n-1`.
+
+    Parameters
+    ----------
+    R : ndarray
+        The inconsistency matrix to check for validity.
+    warning : bool, optional
+        When True, issues a Python warning if the linkage
+        matrix passed is invalid.
+    throw : bool, optional
+        When True, throws a Python exception if the linkage
+        matrix passed is invalid.
+    name : str, optional
+        This string refers to the variable name of the invalid
+        linkage matrix.
+
+    Returns
+    -------
+    b : bool
+        True if the inconsistency matrix is valid.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+    inconsistent : for the creation of a inconsistency matrix.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im
+    >>> from scipy.spatial.distance import pdist
+
+    Given a data set ``X``, we can apply a clustering method to obtain a
+    linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
+    be also used to obtain the inconsistency matrix ``R`` associated to
+    this clustering process:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+    >>> R = inconsistent(Z)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.29099445,  3.        ],
+           [ 5.        , 13.        ,  1.29099445,  3.        ],
+           [ 8.        , 14.        ,  1.29099445,  3.        ],
+           [11.        , 15.        ,  1.29099445,  3.        ],
+           [16.        , 17.        ,  5.77350269,  6.        ],
+           [18.        , 19.        ,  5.77350269,  6.        ],
+           [20.        , 21.        ,  8.16496581, 12.        ]])
+    >>> R
+    array([[1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.14549722, 0.20576415, 2.        , 0.70710678],
+           [1.14549722, 0.20576415, 2.        , 0.70710678],
+           [1.14549722, 0.20576415, 2.        , 0.70710678],
+           [1.14549722, 0.20576415, 2.        , 0.70710678],
+           [2.78516386, 2.58797734, 3.        , 1.15470054],
+           [2.78516386, 2.58797734, 3.        , 1.15470054],
+           [6.57065706, 1.38071187, 3.        , 1.15470054]])
+
+    Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that
+    ``R`` is correct:
+
+    >>> is_valid_im(R)
+    True
+
+    However, if ``R`` is wrongly constructed (e.g., one of the standard
+    deviations is set to a negative value), then the check will fail:
+
+    >>> R[-1,1] = R[-1,1] * -1
+    >>> is_valid_im(R)
+    False
+
+    """
+    R = np.asarray(R, order='c')
+    valid = True
+    name_str = "%r " % name if name else ''
+    try:
+        if type(R) != np.ndarray:
+            raise TypeError('Variable %spassed as inconsistency matrix is not '
+                            'a numpy array.' % name_str)
+        if R.dtype != np.double:
+            raise TypeError('Inconsistency matrix %smust contain doubles '
+                            '(double).' % name_str)
+        if len(R.shape) != 2:
+            raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
+                             'be two-dimensional).' % name_str)
+        if R.shape[1] != 4:
+            raise ValueError('Inconsistency matrix %smust have 4 columns.' %
+                             name_str)
+        if R.shape[0] < 1:
+            raise ValueError('Inconsistency matrix %smust have at least one '
+                             'row.' % name_str)
+        if (R[:, 0] < 0).any():
+            raise ValueError('Inconsistency matrix %scontains negative link '
+                             'height means.' % name_str)
+        if (R[:, 1] < 0).any():
+            raise ValueError('Inconsistency matrix %scontains negative link '
+                             'height standard deviations.' % name_str)
+        if (R[:, 2] < 0).any():
+            raise ValueError('Inconsistency matrix %scontains negative link '
+                             'counts.' % name_str)
+    except Exception as e:
+        if throw:
+            raise
+        if warning:
+            _warning(str(e))
+        valid = False
+
+    return valid
+
+
+def is_valid_linkage(Z, warning=False, throw=False, name=None):
+    """
+    Check the validity of a linkage matrix.
+
+    A linkage matrix is valid if it is a 2-D array (type double)
+    with :math:`n` rows and 4 columns. The first two columns must contain
+    indices between 0 and :math:`2n-1`. For a given row ``i``, the following
+    two expressions have to hold:
+
+    .. math::
+
+        0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
+        0 \\leq Z[i,1] \\leq i+n-1
+
+    I.e., a cluster cannot join another cluster unless the cluster being joined
+    has been generated.
+
+    Parameters
+    ----------
+    Z : array_like
+        Linkage matrix.
+    warning : bool, optional
+        When True, issues a Python warning if the linkage
+        matrix passed is invalid.
+    throw : bool, optional
+        When True, throws a Python exception if the linkage
+        matrix passed is invalid.
+    name : str, optional
+        This string refers to the variable name of the invalid
+        linkage matrix.
+
+    Returns
+    -------
+    b : bool
+        True if the inconsistency matrix is valid.
+
+    See Also
+    --------
+    linkage: for a description of what a linkage matrix is.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, is_valid_linkage
+    >>> from scipy.spatial.distance import pdist
+
+    All linkage matrices generated by the clustering methods in this module
+    will be valid (i.e., they will have the appropriate dimensions and the two
+    required expressions will hold for all the rows).
+
+    We can check this using `scipy.cluster.hierarchy.is_valid_linkage`:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.29099445,  3.        ],
+           [ 5.        , 13.        ,  1.29099445,  3.        ],
+           [ 8.        , 14.        ,  1.29099445,  3.        ],
+           [11.        , 15.        ,  1.29099445,  3.        ],
+           [16.        , 17.        ,  5.77350269,  6.        ],
+           [18.        , 19.        ,  5.77350269,  6.        ],
+           [20.        , 21.        ,  8.16496581, 12.        ]])
+    >>> is_valid_linkage(Z)
+    True
+
+    However, if we create a linkage matrix in a wrong way - or if we modify
+    a valid one in a way that any of the required expressions don't hold
+    anymore, then the check will fail:
+
+    >>> Z[3][1] = 20    # the cluster number 20 is not defined at this point
+    >>> is_valid_linkage(Z)
+    False
+
+    """
+    Z = np.asarray(Z, order='c')
+    valid = True
+    name_str = "%r " % name if name else ''
+    try:
+        if type(Z) != np.ndarray:
+            raise TypeError('Passed linkage argument %sis not a valid array.' %
+                            name_str)
+        if Z.dtype != np.double:
+            raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
+        if len(Z.shape) != 2:
+            raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
+                             'two-dimensional).' % name_str)
+        if Z.shape[1] != 4:
+            raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
+        if Z.shape[0] == 0:
+            raise ValueError('Linkage must be computed on at least two '
+                             'observations.')
+        n = Z.shape[0]
+        if n > 1:
+            if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
+                raise ValueError('Linkage %scontains negative indices.' %
+                                 name_str)
+            if (Z[:, 2] < 0).any():
+                raise ValueError('Linkage %scontains negative distances.' %
+                                 name_str)
+            if (Z[:, 3] < 0).any():
+                raise ValueError('Linkage %scontains negative counts.' %
+                                 name_str)
+        if _check_hierarchy_uses_cluster_before_formed(Z):
+            raise ValueError('Linkage %suses non-singleton cluster before '
+                             'it is formed.' % name_str)
+        if _check_hierarchy_uses_cluster_more_than_once(Z):
+            raise ValueError('Linkage %suses the same cluster more than once.'
+                             % name_str)
+    except Exception as e:
+        if throw:
+            raise
+        if warning:
+            _warning(str(e))
+        valid = False
+
+    return valid
+
+
+def _check_hierarchy_uses_cluster_before_formed(Z):
+    n = Z.shape[0] + 1
+    for i in range(0, n - 1):
+        if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
+            return True
+    return False
+
+
+def _check_hierarchy_uses_cluster_more_than_once(Z):
+    n = Z.shape[0] + 1
+    chosen = set([])
+    for i in range(0, n - 1):
+        if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
+            return True
+        chosen.add(Z[i, 0])
+        chosen.add(Z[i, 1])
+    return False
+
+
+def _check_hierarchy_not_all_clusters_used(Z):
+    n = Z.shape[0] + 1
+    chosen = set([])
+    for i in range(0, n - 1):
+        chosen.add(int(Z[i, 0]))
+        chosen.add(int(Z[i, 1]))
+    must_chosen = set(range(0, 2 * n - 2))
+    return len(must_chosen.difference(chosen)) > 0
+
+
+def num_obs_linkage(Z):
+    """
+    Return the number of original observations of the linkage matrix passed.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The linkage matrix on which to perform the operation.
+
+    Returns
+    -------
+    n : int
+        The number of original observations in the linkage.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, num_obs_linkage
+    >>> from scipy.spatial.distance import pdist
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+
+    ``Z`` is a linkage matrix obtained after using the Ward clustering method
+    with ``X``, a dataset with 12 data points.
+
+    >>> num_obs_linkage(Z)
+    12
+
+    """
+    Z = np.asarray(Z, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+    return (Z.shape[0] + 1)
+
+
+def correspond(Z, Y):
+    """
+    Check for correspondence between linkage and condensed distance matrices.
+
+    They must have the same number of original observations for
+    the check to succeed.
+
+    This function is useful as a sanity check in algorithms that make
+    extensive use of linkage and distance matrices that must
+    correspond to the same set of original observations.
+
+    Parameters
+    ----------
+    Z : array_like
+        The linkage matrix to check for correspondence.
+    Y : array_like
+        The condensed distance matrix to check for correspondence.
+
+    Returns
+    -------
+    b : bool
+        A boolean indicating whether the linkage matrix and distance
+        matrix could possibly correspond to one another.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, correspond
+    >>> from scipy.spatial.distance import pdist
+
+    This method can be used to check if a given linkage matrix ``Z`` has been
+    obtained from the application of a cluster method over a dataset ``X``:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+    >>> X_condensed = pdist(X)
+    >>> Z = ward(X_condensed)
+
+    Here, we can compare ``Z`` and ``X`` (in condensed form):
+
+    >>> correspond(Z, X_condensed)
+    True
+
+    """
+    is_valid_linkage(Z, throw=True)
+    distance.is_valid_y(Y, throw=True)
+    Z = np.asarray(Z, order='c')
+    Y = np.asarray(Y, order='c')
+    return distance.num_obs_y(Y) == num_obs_linkage(Z)
+
+
+def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
+    """
+    Form flat clusters from the hierarchical clustering defined by
+    the given linkage matrix.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The hierarchical clustering encoded with the matrix returned
+        by the `linkage` function.
+    t : scalar
+        For criteria 'inconsistent', 'distance' or 'monocrit',
+         this is the threshold to apply when forming flat clusters.
+        For 'maxclust' or 'maxclust_monocrit' criteria,
+         this would be max number of clusters requested.
+    criterion : str, optional
+        The criterion to use in forming flat clusters. This can
+        be any of the following values:
+
+          ``inconsistent`` :
+              If a cluster node and all its
+              descendants have an inconsistent value less than or equal
+              to `t`, then all its leaf descendants belong to the
+              same flat cluster. When no non-singleton cluster meets
+              this criterion, every node is assigned to its own
+              cluster. (Default)
+
+          ``distance`` :
+              Forms flat clusters so that the original
+              observations in each flat cluster have no greater a
+              cophenetic distance than `t`.
+
+          ``maxclust`` :
+              Finds a minimum threshold ``r`` so that
+              the cophenetic distance between any two original
+              observations in the same flat cluster is no more than
+              ``r`` and no more than `t` flat clusters are formed.
+
+          ``monocrit`` :
+              Forms a flat cluster from a cluster node c
+              with index i when ``monocrit[j] <= t``.
+
+              For example, to threshold on the maximum mean distance
+              as computed in the inconsistency matrix R with a
+              threshold of 0.8 do::
+
+                  MR = maxRstat(Z, R, 3)
+                  fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
+
+          ``maxclust_monocrit`` :
+              Forms a flat cluster from a
+              non-singleton cluster node ``c`` when ``monocrit[i] <=
+              r`` for all cluster indices ``i`` below and including
+              ``c``. ``r`` is minimized such that no more than ``t``
+              flat clusters are formed. monocrit must be
+              monotonic. For example, to minimize the threshold t on
+              maximum inconsistency values so that no more than 3 flat
+              clusters are formed, do::
+
+                  MI = maxinconsts(Z, R)
+                  fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
+    depth : int, optional
+        The maximum depth to perform the inconsistency calculation.
+        It has no meaning for the other criteria. Default is 2.
+    R : ndarray, optional
+        The inconsistency matrix to use for the 'inconsistent'
+        criterion. This matrix is computed if not provided.
+    monocrit : ndarray, optional
+        An array of length n-1. `monocrit[i]` is the
+        statistics upon which non-singleton i is thresholded. The
+        monocrit vector must be monotonic, i.e., given a node c with
+        index i, for all node indices j corresponding to nodes
+        below c, ``monocrit[i] >= monocrit[j]``.
+
+    Returns
+    -------
+    fcluster : ndarray
+        An array of length ``n``. ``T[i]`` is the flat cluster number to
+        which original observation ``i`` belongs.
+
+    See Also
+    --------
+    linkage : for information about hierarchical clustering methods work.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, fcluster
+    >>> from scipy.spatial.distance import pdist
+
+    All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward`
+    generate a linkage matrix ``Z`` as their output:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.29099445,  3.        ],
+           [ 5.        , 13.        ,  1.29099445,  3.        ],
+           [ 8.        , 14.        ,  1.29099445,  3.        ],
+           [11.        , 15.        ,  1.29099445,  3.        ],
+           [16.        , 17.        ,  5.77350269,  6.        ],
+           [18.        , 19.        ,  5.77350269,  6.        ],
+           [20.        , 21.        ,  8.16496581, 12.        ]])
+
+    This matrix represents a dendrogram, where the first and second elements
+    are the two clusters merged at each step, the third element is the
+    distance between these clusters, and the fourth element is the size of
+    the new cluster - the number of original data points included.
+
+    `scipy.cluster.hierarchy.fcluster` can be used to flatten the
+    dendrogram, obtaining as a result an assignation of the original data
+    points to single clusters.
+
+    This assignation mostly depends on a distance threshold ``t`` - the maximum
+    inter-cluster distance allowed:
+
+    >>> fcluster(Z, t=0.9, criterion='distance')
+    array([ 1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12], dtype=int32)
+
+    >>> fcluster(Z, t=1.1, criterion='distance')
+    array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
+
+    >>> fcluster(Z, t=3, criterion='distance')
+    array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+
+    >>> fcluster(Z, t=9, criterion='distance')
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
+
+    In the first case, the threshold ``t`` is too small to allow any two
+    samples in the data to form a cluster, so 12 different clusters are
+    returned.
+
+    In the second case, the threshold is large enough to allow the first
+    4 points to be merged with their nearest neighbors. So, here, only 8
+    clusters are returned.
+
+    The third case, with a much higher threshold, allows for up to 8 data
+    points to be connected - so 4 clusters are returned here.
+
+    Lastly, the threshold of the fourth case is large enough to allow for
+    all data points to be merged together - so a single cluster is returned.
+
+    """
+    Z = np.asarray(Z, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+
+    n = Z.shape[0] + 1
+    T = np.zeros((n,), dtype='i')
+
+    # Since the C code does not support striding using strides.
+    # The dimensions are used instead.
+    [Z] = _copy_arrays_if_base_present([Z])
+
+    if criterion == 'inconsistent':
+        if R is None:
+            R = inconsistent(Z, depth)
+        else:
+            R = np.asarray(R, order='c')
+            is_valid_im(R, throw=True, name='R')
+            # Since the C code does not support striding using strides.
+            # The dimensions are used instead.
+            [R] = _copy_arrays_if_base_present([R])
+        _hierarchy.cluster_in(Z, R, T, float(t), int(n))
+    elif criterion == 'distance':
+        _hierarchy.cluster_dist(Z, T, float(t), int(n))
+    elif criterion == 'maxclust':
+        _hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
+    elif criterion == 'monocrit':
+        [monocrit] = _copy_arrays_if_base_present([monocrit])
+        _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
+    elif criterion == 'maxclust_monocrit':
+        [monocrit] = _copy_arrays_if_base_present([monocrit])
+        _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
+    else:
+        raise ValueError('Invalid cluster formation criterion: %s'
+                         % str(criterion))
+    return T
+
+
+def fclusterdata(X, t, criterion='inconsistent',
+                 metric='euclidean', depth=2, method='single', R=None):
+    """
+    Cluster observation data using a given metric.
+
+    Clusters the original observations in the n-by-m data
+    matrix X (n observations in m dimensions), using the euclidean
+    distance metric to calculate distances between original observations,
+    performs hierarchical clustering using the single linkage algorithm,
+    and forms flat clusters using the inconsistency method with `t` as the
+    cut-off threshold.
+
+    A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is
+    the index of the flat cluster to which the original observation ``i``
+    belongs.
+
+    Parameters
+    ----------
+    X : (N, M) ndarray
+        N by M data matrix with N observations in M dimensions.
+    t : scalar
+        For criteria 'inconsistent', 'distance' or 'monocrit',
+         this is the threshold to apply when forming flat clusters.
+        For 'maxclust' or 'maxclust_monocrit' criteria,
+         this would be max number of clusters requested.
+    criterion : str, optional
+        Specifies the criterion for forming flat clusters. Valid
+        values are 'inconsistent' (default), 'distance', or 'maxclust'
+        cluster formation algorithms. See `fcluster` for descriptions.
+    metric : str or function, optional
+        The distance metric for calculating pairwise distances. See
+        ``distance.pdist`` for descriptions and linkage to verify
+        compatibility with the linkage method.
+    depth : int, optional
+        The maximum depth for the inconsistency calculation. See
+        `inconsistent` for more information.
+    method : str, optional
+        The linkage method to use (single, complete, average,
+        weighted, median centroid, ward). See `linkage` for more
+        information. Default is "single".
+    R : ndarray, optional
+        The inconsistency matrix. It will be computed if necessary
+        if it is not passed.
+
+    Returns
+    -------
+    fclusterdata : ndarray
+        A vector of length n. T[i] is the flat cluster number to
+        which original observation i belongs.
+
+    See Also
+    --------
+    scipy.spatial.distance.pdist : pairwise distance metrics
+
+    Notes
+    -----
+    This function is similar to the MATLAB function ``clusterdata``.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import fclusterdata
+
+    This is a convenience method that abstracts all the steps to perform in a
+    typical SciPy's hierarchical clustering workflow.
+
+    * Transform the input data into a condensed matrix with `scipy.spatial.distance.pdist`.
+
+    * Apply a clustering method.
+
+    * Obtain flat clusters at a user defined distance threshold ``t`` using `scipy.cluster.hierarchy.fcluster`.
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> fclusterdata(X, t=1)
+    array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
+
+    The output here (for the dataset ``X``, distance threshold ``t``, and the
+    default settings) is four clusters with three data points each.
+
+    """
+    X = np.asarray(X, order='c', dtype=np.double)
+
+    if type(X) != np.ndarray or len(X.shape) != 2:
+        raise TypeError('The observation matrix X must be an n by m numpy '
+                        'array.')
+
+    Y = distance.pdist(X, metric=metric)
+    Z = linkage(Y, method=method)
+    if R is None:
+        R = inconsistent(Z, d=depth)
+    else:
+        R = np.asarray(R, order='c')
+    T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
+    return T
+
+
+def leaves_list(Z):
+    """
+    Return a list of leaf node ids.
+
+    The return corresponds to the observation vector index as it appears
+    in the tree from left to right. Z is a linkage matrix.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The hierarchical clustering encoded as a matrix.  `Z` is
+        a linkage matrix.  See `linkage` for more information.
+
+    Returns
+    -------
+    leaves_list : ndarray
+        The list of leaf node ids.
+
+    See Also
+    --------
+    dendrogram : for information about dendrogram structure.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list
+    >>> from scipy.spatial.distance import pdist
+    >>> from matplotlib import pyplot as plt
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+
+    The linkage matrix ``Z`` represents a dendrogram, that is, a tree that
+    encodes the structure of the clustering performed.
+    `scipy.cluster.hierarchy.leaves_list` shows the mapping between
+    indices in the ``X`` dataset and leaves in the dendrogram:
+
+    >>> leaves_list(Z)
+    array([ 2,  0,  1,  5,  3,  4,  8,  6,  7, 11,  9, 10], dtype=int32)
+
+    >>> fig = plt.figure(figsize=(25, 10))
+    >>> dn = dendrogram(Z)
+    >>> plt.show()
+
+    """
+    Z = np.asarray(Z, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+    n = Z.shape[0] + 1
+    ML = np.zeros((n,), dtype='i')
+    [Z] = _copy_arrays_if_base_present([Z])
+    _hierarchy.prelist(Z, ML, int(n))
+    return ML
+
+
+# Maps number of leaves to text size.
+#
+# p <= 20, size="12"
+# 20 < p <= 30, size="10"
+# 30 < p <= 50, size="8"
+# 50 < p <= np.inf, size="6"
+
+_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
+_drotation = {20: 0, 40: 45, np.inf: 90}
+_dtextsortedkeys = list(_dtextsizes.keys())
+_dtextsortedkeys.sort()
+_drotationsortedkeys = list(_drotation.keys())
+_drotationsortedkeys.sort()
+
+
+def _remove_dups(L):
+    """
+    Remove duplicates AND preserve the original order of the elements.
+
+    The set class is not guaranteed to do this.
+    """
+    seen_before = set([])
+    L2 = []
+    for i in L:
+        if i not in seen_before:
+            seen_before.add(i)
+            L2.append(i)
+    return L2
+
+
+def _get_tick_text_size(p):
+    for k in _dtextsortedkeys:
+        if p <= k:
+            return _dtextsizes[k]
+
+
+def _get_tick_rotation(p):
+    for k in _drotationsortedkeys:
+        if p <= k:
+            return _drotation[k]
+
+
+def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
+                     no_labels, color_list, leaf_font_size=None,
+                     leaf_rotation=None, contraction_marks=None,
+                     ax=None, above_threshold_color='C0'):
+    # Import matplotlib here so that it's not imported unless dendrograms
+    # are plotted. Raise an informative error if importing fails.
+    try:
+        # if an axis is provided, don't use pylab at all
+        if ax is None:
+            import matplotlib.pylab
+        import matplotlib.patches
+        import matplotlib.collections
+    except ImportError as e:
+        raise ImportError("You must install the matplotlib library to plot "
+                          "the dendrogram. Use no_plot=True to calculate the "
+                          "dendrogram without plotting.") from e
+
+    if ax is None:
+        ax = matplotlib.pylab.gca()
+        # if we're using pylab, we want to trigger a draw at the end
+        trigger_redraw = True
+    else:
+        trigger_redraw = False
+
+    # Independent variable plot width
+    ivw = len(ivl) * 10
+    # Dependent variable plot height
+    dvw = mh + mh * 0.05
+
+    iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
+    if orientation in ('top', 'bottom'):
+        if orientation == 'top':
+            ax.set_ylim([0, dvw])
+            ax.set_xlim([0, ivw])
+        else:
+            ax.set_ylim([dvw, 0])
+            ax.set_xlim([0, ivw])
+
+        xlines = icoords
+        ylines = dcoords
+        if no_labels:
+            ax.set_xticks([])
+            ax.set_xticklabels([])
+        else:
+            ax.set_xticks(iv_ticks)
+
+            if orientation == 'top':
+                ax.xaxis.set_ticks_position('bottom')
+            else:
+                ax.xaxis.set_ticks_position('top')
+
+            # Make the tick marks invisible because they cover up the links
+            for line in ax.get_xticklines():
+                line.set_visible(False)
+
+            leaf_rot = (float(_get_tick_rotation(len(ivl)))
+                        if (leaf_rotation is None) else leaf_rotation)
+            leaf_font = (float(_get_tick_text_size(len(ivl)))
+                         if (leaf_font_size is None) else leaf_font_size)
+            ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
+
+    elif orientation in ('left', 'right'):
+        if orientation == 'left':
+            ax.set_xlim([dvw, 0])
+            ax.set_ylim([0, ivw])
+        else:
+            ax.set_xlim([0, dvw])
+            ax.set_ylim([0, ivw])
+
+        xlines = dcoords
+        ylines = icoords
+        if no_labels:
+            ax.set_yticks([])
+            ax.set_yticklabels([])
+        else:
+            ax.set_yticks(iv_ticks)
+
+            if orientation == 'left':
+                ax.yaxis.set_ticks_position('right')
+            else:
+                ax.yaxis.set_ticks_position('left')
+
+            # Make the tick marks invisible because they cover up the links
+            for line in ax.get_yticklines():
+                line.set_visible(False)
+
+            leaf_font = (float(_get_tick_text_size(len(ivl)))
+                         if (leaf_font_size is None) else leaf_font_size)
+
+            if leaf_rotation is not None:
+                ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
+            else:
+                ax.set_yticklabels(ivl, size=leaf_font)
+
+    # Let's use collections instead. This way there is a separate legend item
+    # for each tree grouping, rather than stupidly one for each line segment.
+    colors_used = _remove_dups(color_list)
+    color_to_lines = {}
+    for color in colors_used:
+        color_to_lines[color] = []
+    for (xline, yline, color) in zip(xlines, ylines, color_list):
+        color_to_lines[color].append(list(zip(xline, yline)))
+
+    colors_to_collections = {}
+    # Construct the collections.
+    for color in colors_used:
+        coll = matplotlib.collections.LineCollection(color_to_lines[color],
+                                                     colors=(color,))
+        colors_to_collections[color] = coll
+
+    # Add all the groupings below the color threshold.
+    for color in colors_used:
+        if color != above_threshold_color:
+            ax.add_collection(colors_to_collections[color])
+    # If there's a grouping of links above the color threshold, it goes last.
+    if above_threshold_color in colors_to_collections:
+        ax.add_collection(colors_to_collections[above_threshold_color])
+
+    if contraction_marks is not None:
+        Ellipse = matplotlib.patches.Ellipse
+        for (x, y) in contraction_marks:
+            if orientation in ('left', 'right'):
+                e = Ellipse((y, x), width=dvw / 100, height=1.0)
+            else:
+                e = Ellipse((x, y), width=1.0, height=dvw / 100)
+            ax.add_artist(e)
+            e.set_clip_box(ax.bbox)
+            e.set_alpha(0.5)
+            e.set_facecolor('k')
+
+    if trigger_redraw:
+        matplotlib.pylab.draw_if_interactive()
+
+
+# C0  is used for above threshhold color
+_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9')
+_link_line_colors = list(_link_line_colors_default)
+
+
+def set_link_color_palette(palette):
+    """
+    Set list of matplotlib color codes for use by dendrogram.
+
+    Note that this palette is global (i.e., setting it once changes the colors
+    for all subsequent calls to `dendrogram`) and that it affects only the
+    the colors below ``color_threshold``.
+
+    Note that `dendrogram` also accepts a custom coloring function through its
+    ``link_color_func`` keyword, which is more flexible and non-global.
+
+    Parameters
+    ----------
+    palette : list of str or None
+        A list of matplotlib color codes.  The order of the color codes is the
+        order in which the colors are cycled through when color thresholding in
+        the dendrogram.
+
+        If ``None``, resets the palette to its default (which are matplotlib
+        default colors C1 to C9).
+
+    Returns
+    -------
+    None
+
+    See Also
+    --------
+    dendrogram
+
+    Notes
+    -----
+    Ability to reset the palette with ``None`` added in SciPy 0.17.0.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster import hierarchy
+    >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
+    ...                    400., 754., 564., 138., 219., 869., 669.])
+    >>> Z = hierarchy.linkage(ytdist, 'single')
+    >>> dn = hierarchy.dendrogram(Z, no_plot=True)
+    >>> dn['color_list']
+    ['C1', 'C0', 'C0', 'C0', 'C0']
+    >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
+    >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b')
+    >>> dn['color_list']
+    ['c', 'b', 'b', 'b', 'b']
+    >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
+    ...                           above_threshold_color='k')
+    >>> dn['color_list']
+    ['c', 'm', 'm', 'k', 'k']
+
+    Now, reset the color palette to its default:
+
+    >>> hierarchy.set_link_color_palette(None)
+
+    """
+    if palette is None:
+        # reset to its default
+        palette = _link_line_colors_default
+    elif type(palette) not in (list, tuple):
+        raise TypeError("palette must be a list or tuple")
+    _ptypes = [isinstance(p, str) for p in palette]
+
+    if False in _ptypes:
+        raise TypeError("all palette list elements must be color strings")
+
+    global _link_line_colors
+    _link_line_colors = palette
+
+
+def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
+               get_leaves=True, orientation='top', labels=None,
+               count_sort=False, distance_sort=False, show_leaf_counts=True,
+               no_plot=False, no_labels=False, leaf_font_size=None,
+               leaf_rotation=None, leaf_label_func=None,
+               show_contracted=False, link_color_func=None, ax=None,
+               above_threshold_color='C0'):
+    """
+    Plot the hierarchical clustering as a dendrogram.
+
+    The dendrogram illustrates how each cluster is
+    composed by drawing a U-shaped link between a non-singleton
+    cluster and its children. The top of the U-link indicates a
+    cluster merge. The two legs of the U-link indicate which clusters
+    were merged. The length of the two legs of the U-link represents
+    the distance between the child clusters. It is also the
+    cophenetic distance between original observations in the two
+    children clusters.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The linkage matrix encoding the hierarchical clustering to
+        render as a dendrogram. See the ``linkage`` function for more
+        information on the format of ``Z``.
+    p : int, optional
+        The ``p`` parameter for ``truncate_mode``.
+    truncate_mode : str, optional
+        The dendrogram can be hard to read when the original
+        observation matrix from which the linkage is derived is
+        large. Truncation is used to condense the dendrogram. There
+        are several modes:
+
+        ``None``
+          No truncation is performed (default).
+          Note: ``'none'`` is an alias for ``None`` that's kept for
+          backward compatibility.
+
+        ``'lastp'``
+          The last ``p`` non-singleton clusters formed in the linkage are the
+          only non-leaf nodes in the linkage; they correspond to rows
+          ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
+          contracted into leaf nodes.
+
+        ``'level'``
+          No more than ``p`` levels of the dendrogram tree are displayed.
+          A "level" includes all nodes with ``p`` merges from the final merge.
+
+          Note: ``'mtica'`` is an alias for ``'level'`` that's kept for
+          backward compatibility.
+
+    color_threshold : double, optional
+        For brevity, let :math:`t` be the ``color_threshold``.
+        Colors all the descendent links below a cluster node
+        :math:`k` the same color if :math:`k` is the first node below
+        the cut threshold :math:`t`. All links connecting nodes with
+        distances greater than or equal to the threshold are colored
+        with de default matplotlib color ``'C0'``. If :math:`t` is less
+        than or equal to zero, all nodes are colored ``'C0'``.
+        If ``color_threshold`` is None or 'default',
+        corresponding with MATLAB(TM) behavior, the threshold is set to
+        ``0.7*max(Z[:,2])``.
+
+    get_leaves : bool, optional
+        Includes a list ``R['leaves']=H`` in the result
+        dictionary. For each :math:`i`, ``H[i] == j``, cluster node
+        ``j`` appears in position ``i`` in the left-to-right traversal
+        of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
+    orientation : str, optional
+        The direction to plot the dendrogram, which can be any
+        of the following strings:
+
+        ``'top'``
+          Plots the root at the top, and plot descendent links going downwards.
+          (default).
+
+        ``'bottom'``
+          Plots the root at the bottom, and plot descendent links going
+          upwards.
+
+        ``'left'``
+          Plots the root at the left, and plot descendent links going right.
+
+        ``'right'``
+          Plots the root at the right, and plot descendent links going left.
+
+    labels : ndarray, optional
+        By default, ``labels`` is None so the index of the original observation
+        is used to label the leaf nodes.  Otherwise, this is an :math:`n`-sized
+        sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the
+        text to put under the :math:`i` th leaf node only if it corresponds to
+        an original observation and not a non-singleton cluster.
+    count_sort : str or bool, optional
+        For each node n, the order (visually, from left-to-right) n's
+        two descendent links are plotted is determined by this
+        parameter, which can be any of the following values:
+
+        ``False``
+          Nothing is done.
+
+        ``'ascending'`` or ``True``
+          The child with the minimum number of original objects in its cluster
+          is plotted first.
+
+        ``'descending'``
+          The child with the maximum number of original objects in its cluster
+          is plotted first.
+
+        Note, ``distance_sort`` and ``count_sort`` cannot both be True.
+    distance_sort : str or bool, optional
+        For each node n, the order (visually, from left-to-right) n's
+        two descendent links are plotted is determined by this
+        parameter, which can be any of the following values:
+
+        ``False``
+          Nothing is done.
+
+        ``'ascending'`` or ``True``
+          The child with the minimum distance between its direct descendents is
+          plotted first.
+
+        ``'descending'``
+          The child with the maximum distance between its direct descendents is
+          plotted first.
+
+        Note ``distance_sort`` and ``count_sort`` cannot both be True.
+    show_leaf_counts : bool, optional
+         When True, leaf nodes representing :math:`k>1` original
+         observation are labeled with the number of observations they
+         contain in parentheses.
+    no_plot : bool, optional
+        When True, the final rendering is not performed. This is
+        useful if only the data structures computed for the rendering
+        are needed or if matplotlib is not available.
+    no_labels : bool, optional
+        When True, no labels appear next to the leaf nodes in the
+        rendering of the dendrogram.
+    leaf_rotation : double, optional
+        Specifies the angle (in degrees) to rotate the leaf
+        labels. When unspecified, the rotation is based on the number of
+        nodes in the dendrogram (default is 0).
+    leaf_font_size : int, optional
+        Specifies the font size (in points) of the leaf labels. When
+        unspecified, the size based on the number of nodes in the
+        dendrogram.
+    leaf_label_func : lambda or function, optional
+        When ``leaf_label_func`` is a callable function, for each
+        leaf with cluster index :math:`k < 2n-1`. The function
+        is expected to return a string with the label for the
+        leaf.
+
+        Indices :math:`k < n` correspond to original observations
+        while indices :math:`k \\geq n` correspond to non-singleton
+        clusters.
+
+        For example, to label singletons with their node id and
+        non-singletons with their id, count, and inconsistency
+        coefficient, simply do::
+
+            # First define the leaf label function.
+            def llf(id):
+                if id < n:
+                    return str(id)
+                else:
+                    return '[%d %d %1.2f]' % (id, count, R[n-id,3])
+
+            # The text for the leaf nodes is going to be big so force
+            # a rotation of 90 degrees.
+            dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
+
+            # leaf_label_func can also be used together with ``truncate_mode`` parameter,
+            # in which case you will get your leaves labeled after truncation:
+            dendrogram(Z, leaf_label_func=llf, leaf_rotation=90,
+                       truncate_mode='level', p=2)
+
+    show_contracted : bool, optional
+        When True the heights of non-singleton nodes contracted
+        into a leaf node are plotted as crosses along the link
+        connecting that leaf node.  This really is only useful when
+        truncation is used (see ``truncate_mode`` parameter).
+    link_color_func : callable, optional
+        If given, `link_color_function` is called with each non-singleton id
+        corresponding to each U-shaped link it will paint. The function is
+        expected to return the color to paint the link, encoded as a matplotlib
+        color string code. For example::
+
+            dendrogram(Z, link_color_func=lambda k: colors[k])
+
+        colors the direct links below each untruncated non-singleton node
+        ``k`` using ``colors[k]``.
+    ax : matplotlib Axes instance, optional
+        If None and `no_plot` is not True, the dendrogram will be plotted
+        on the current axes.  Otherwise if `no_plot` is not True the
+        dendrogram will be plotted on the given ``Axes`` instance. This can be
+        useful if the dendrogram is part of a more complex figure.
+    above_threshold_color : str, optional
+        This matplotlib color string sets the color of the links above the
+        color_threshold. The default is ``'C0'``.
+
+    Returns
+    -------
+    R : dict
+        A dictionary of data structures computed to render the
+        dendrogram. Its has the following keys:
+
+        ``'color_list'``
+          A list of color names. The k'th element represents the color of the
+          k'th link.
+
+        ``'icoord'`` and ``'dcoord'``
+          Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
+          where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
+          where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
+          ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
+
+        ``'ivl'``
+          A list of labels corresponding to the leaf nodes.
+
+        ``'leaves'``
+          For each i, ``H[i] == j``, cluster node ``j`` appears in position
+          ``i`` in the left-to-right traversal of the leaves, where
+          :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
+          ``i``-th leaf node corresponds to an original observation.
+          Otherwise, it corresponds to a non-singleton cluster.
+
+        ``'leaves_color_list'``
+          A list of color names. The k'th element represents the color of the
+          k'th leaf.
+
+    See Also
+    --------
+    linkage, set_link_color_palette
+
+    Notes
+    -----
+    It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise
+    crossings appear in the dendrogram.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster import hierarchy
+    >>> import matplotlib.pyplot as plt
+
+    A very basic example:
+
+    >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
+    ...                    400., 754., 564., 138., 219., 869., 669.])
+    >>> Z = hierarchy.linkage(ytdist, 'single')
+    >>> plt.figure()
+    >>> dn = hierarchy.dendrogram(Z)
+
+    Now, plot in given axes, improve the color scheme and use both vertical and
+    horizontal orientations:
+
+    >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
+    >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
+    >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
+    ...                            orientation='top')
+    >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1],
+    ...                            above_threshold_color='#bcbddc',
+    ...                            orientation='right')
+    >>> hierarchy.set_link_color_palette(None)  # reset to default after use
+    >>> plt.show()
+
+    """
+    # This feature was thought about but never implemented (still useful?):
+    #
+    #         ... = dendrogram(..., leaves_order=None)
+    #
+    #         Plots the leaves in the order specified by a vector of
+    #         original observation indices. If the vector contains duplicates
+    #         or results in a crossing, an exception will be thrown. Passing
+    #         None orders leaf nodes based on the order they appear in the
+    #         pre-order traversal.
+    Z = np.asarray(Z, order='c')
+
+    if orientation not in ["top", "left", "bottom", "right"]:
+        raise ValueError("orientation must be one of 'top', 'left', "
+                         "'bottom', or 'right'")
+
+    if labels is not None and Z.shape[0] + 1 != len(labels):
+        raise ValueError("Dimensions of Z and labels must be consistent.")
+
+    is_valid_linkage(Z, throw=True, name='Z')
+    Zs = Z.shape
+    n = Zs[0] + 1
+    if type(p) in (int, float):
+        p = int(p)
+    else:
+        raise TypeError('The second argument must be a number')
+
+    if truncate_mode not in ('lastp', 'mtica', 'level', 'none', None):
+        # 'mtica' is kept working for backwards compat.
+        raise ValueError('Invalid truncation mode.')
+
+    if truncate_mode == 'lastp':
+        if p > n or p == 0:
+            p = n
+
+    if truncate_mode == 'mtica':
+        # 'mtica' is an alias
+        truncate_mode = 'level'
+
+    if truncate_mode == 'level':
+        if p <= 0:
+            p = np.inf
+
+    if get_leaves:
+        lvs = []
+    else:
+        lvs = None
+
+    icoord_list = []
+    dcoord_list = []
+    color_list = []
+    current_color = [0]
+    currently_below_threshold = [False]
+    ivl = []  # list of leaves
+
+    if color_threshold is None or (isinstance(color_threshold, str) and
+                                   color_threshold == 'default'):
+        color_threshold = max(Z[:, 2]) * 0.7
+
+    R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
+         'leaves': lvs, 'color_list': color_list}
+
+    # Empty list will be filled in _dendrogram_calculate_info
+    contraction_marks = [] if show_contracted else None
+
+    _dendrogram_calculate_info(
+        Z=Z, p=p,
+        truncate_mode=truncate_mode,
+        color_threshold=color_threshold,
+        get_leaves=get_leaves,
+        orientation=orientation,
+        labels=labels,
+        count_sort=count_sort,
+        distance_sort=distance_sort,
+        show_leaf_counts=show_leaf_counts,
+        i=2*n - 2,
+        iv=0.0,
+        ivl=ivl,
+        n=n,
+        icoord_list=icoord_list,
+        dcoord_list=dcoord_list,
+        lvs=lvs,
+        current_color=current_color,
+        color_list=color_list,
+        currently_below_threshold=currently_below_threshold,
+        leaf_label_func=leaf_label_func,
+        contraction_marks=contraction_marks,
+        link_color_func=link_color_func,
+        above_threshold_color=above_threshold_color)
+
+    if not no_plot:
+        mh = max(Z[:, 2])
+        _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
+                         no_labels, color_list,
+                         leaf_font_size=leaf_font_size,
+                         leaf_rotation=leaf_rotation,
+                         contraction_marks=contraction_marks,
+                         ax=ax,
+                         above_threshold_color=above_threshold_color)
+
+    R["leaves_color_list"] = _get_leaves_color_list(R)
+
+    return R
+
+
+def _get_leaves_color_list(R):
+    leaves_color_list = [None] * len(R['leaves'])
+    for link_x, link_y, link_color in zip(R['icoord'],
+                                          R['dcoord'],
+                                          R['color_list']):
+        for (xi, yi) in zip(link_x, link_y):
+            if yi == 0.0 and (xi % 5 == 0 and xi % 2 == 1):
+                # if yi is 0.0 and xi is divisible by 5 and odd,
+                # the point is a leaf
+                # xi of leaves are      5, 15, 25, 35, ... (see `iv_ticks`)
+                # index of leaves are   0,  1,  2,  3, ... as below
+                leaf_index = (int(xi) - 5) // 10
+                # each leaf has a same color of its link.
+                leaves_color_list[leaf_index] = link_color
+    return leaves_color_list
+
+
+def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
+                                i, labels):
+    # If the leaf id structure is not None and is a list then the caller
+    # to dendrogram has indicated that cluster id's corresponding to the
+    # leaf nodes should be recorded.
+
+    if lvs is not None:
+        lvs.append(int(i))
+
+    # If leaf node labels are to be displayed...
+    if ivl is not None:
+        # If a leaf_label_func has been provided, the label comes from the
+        # string returned from the leaf_label_func, which is a function
+        # passed to dendrogram.
+        if leaf_label_func:
+            ivl.append(leaf_label_func(int(i)))
+        else:
+            # Otherwise, if the dendrogram caller has passed a labels list
+            # for the leaf nodes, use it.
+            if labels is not None:
+                ivl.append(labels[int(i - n)])
+            else:
+                # Otherwise, use the id as the label for the leaf.x
+                ivl.append(str(int(i)))
+
+
+def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
+                                   i, labels, show_leaf_counts):
+    # If the leaf id structure is not None and is a list then the caller
+    # to dendrogram has indicated that cluster id's corresponding to the
+    # leaf nodes should be recorded.
+
+    if lvs is not None:
+        lvs.append(int(i))
+    if ivl is not None:
+        if leaf_label_func:
+            ivl.append(leaf_label_func(int(i)))
+        else:
+            if show_leaf_counts:
+                ivl.append("(" + str(int(Z[i - n, 3])) + ")")
+            else:
+                ivl.append("")
+
+
+def _append_contraction_marks(Z, iv, i, n, contraction_marks):
+    _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
+    _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
+
+
+def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
+    if i >= n:
+        contraction_marks.append((iv, Z[i - n, 2]))
+        _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
+        _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
+
+
+def _dendrogram_calculate_info(Z, p, truncate_mode,
+                               color_threshold=np.inf, get_leaves=True,
+                               orientation='top', labels=None,
+                               count_sort=False, distance_sort=False,
+                               show_leaf_counts=False, i=-1, iv=0.0,
+                               ivl=[], n=0, icoord_list=[], dcoord_list=[],
+                               lvs=None, mhr=False,
+                               current_color=[], color_list=[],
+                               currently_below_threshold=[],
+                               leaf_label_func=None, level=0,
+                               contraction_marks=None,
+                               link_color_func=None,
+                               above_threshold_color='C0'):
+    """
+    Calculate the endpoints of the links as well as the labels for the
+    the dendrogram rooted at the node with index i. iv is the independent
+    variable value to plot the left-most leaf node below the root node i
+    (if orientation='top', this would be the left-most x value where the
+    plotting of this root node i and its descendents should begin).
+
+    ivl is a list to store the labels of the leaf nodes. The leaf_label_func
+    is called whenever ivl != None, labels == None, and
+    leaf_label_func != None. When ivl != None and labels != None, the
+    labels list is used only for labeling the leaf nodes. When
+    ivl == None, no labels are generated for leaf nodes.
+
+    When get_leaves==True, a list of leaves is built as they are visited
+    in the dendrogram.
+
+    Returns a tuple with l being the independent variable coordinate that
+    corresponds to the midpoint of cluster to the left of cluster i if
+    i is non-singleton, otherwise the independent coordinate of the leaf
+    node if i is a leaf node.
+
+    Returns
+    -------
+    A tuple (left, w, h, md), where:
+        * left is the independent variable coordinate of the center of the
+          the U of the subtree
+
+        * w is the amount of space used for the subtree (in independent
+          variable units)
+
+        * h is the height of the subtree in dependent variable units
+
+        * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
+          the target node.
+
+    """
+    if n == 0:
+        raise ValueError("Invalid singleton cluster count n.")
+
+    if i == -1:
+        raise ValueError("Invalid root cluster index i.")
+
+    if truncate_mode == 'lastp':
+        # If the node is a leaf node but corresponds to a non-singleton
+        # cluster, its label is either the empty string or the number of
+        # original observations belonging to cluster i.
+        if 2*n - p > i >= n:
+            d = Z[i - n, 2]
+            _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
+                                           leaf_label_func, i, labels,
+                                           show_leaf_counts)
+            if contraction_marks is not None:
+                _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
+            return (iv + 5.0, 10.0, 0.0, d)
+        elif i < n:
+            _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
+                                        leaf_label_func, i, labels)
+            return (iv + 5.0, 10.0, 0.0, 0.0)
+    elif truncate_mode == 'level':
+        if i > n and level > p:
+            d = Z[i - n, 2]
+            _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
+                                           leaf_label_func, i, labels,
+                                           show_leaf_counts)
+            if contraction_marks is not None:
+                _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
+            return (iv + 5.0, 10.0, 0.0, d)
+        elif i < n:
+            _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
+                                        leaf_label_func, i, labels)
+            return (iv + 5.0, 10.0, 0.0, 0.0)
+
+    # Otherwise, only truncate if we have a leaf node.
+    #
+    # Only place leaves if they correspond to original observations.
+    if i < n:
+        _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
+                                    leaf_label_func, i, labels)
+        return (iv + 5.0, 10.0, 0.0, 0.0)
+
+    # !!! Otherwise, we don't have a leaf node, so work on plotting a
+    # non-leaf node.
+    # Actual indices of a and b
+    aa = int(Z[i - n, 0])
+    ab = int(Z[i - n, 1])
+    if aa >= n:
+        # The number of singletons below cluster a
+        na = Z[aa - n, 3]
+        # The distance between a's two direct children.
+        da = Z[aa - n, 2]
+    else:
+        na = 1
+        da = 0.0
+    if ab >= n:
+        nb = Z[ab - n, 3]
+        db = Z[ab - n, 2]
+    else:
+        nb = 1
+        db = 0.0
+
+    if count_sort == 'ascending' or count_sort == True:
+        # If a has a count greater than b, it and its descendents should
+        # be drawn to the right. Otherwise, to the left.
+        if na > nb:
+            # The cluster index to draw to the left (ua) will be ab
+            # and the one to draw to the right (ub) will be aa
+            ua = ab
+            ub = aa
+        else:
+            ua = aa
+            ub = ab
+    elif count_sort == 'descending':
+        # If a has a count less than or equal to b, it and its
+        # descendents should be drawn to the left. Otherwise, to
+        # the right.
+        if na > nb:
+            ua = aa
+            ub = ab
+        else:
+            ua = ab
+            ub = aa
+    elif distance_sort == 'ascending' or distance_sort == True:
+        # If a has a distance greater than b, it and its descendents should
+        # be drawn to the right. Otherwise, to the left.
+        if da > db:
+            ua = ab
+            ub = aa
+        else:
+            ua = aa
+            ub = ab
+    elif distance_sort == 'descending':
+        # If a has a distance less than or equal to b, it and its
+        # descendents should be drawn to the left. Otherwise, to
+        # the right.
+        if da > db:
+            ua = aa
+            ub = ab
+        else:
+            ua = ab
+            ub = aa
+    else:
+        ua = aa
+        ub = ab
+
+    # Updated iv variable and the amount of space used.
+    (uiva, uwa, uah, uamd) = \
+        _dendrogram_calculate_info(
+            Z=Z, p=p,
+            truncate_mode=truncate_mode,
+            color_threshold=color_threshold,
+            get_leaves=get_leaves,
+            orientation=orientation,
+            labels=labels,
+            count_sort=count_sort,
+            distance_sort=distance_sort,
+            show_leaf_counts=show_leaf_counts,
+            i=ua, iv=iv, ivl=ivl, n=n,
+            icoord_list=icoord_list,
+            dcoord_list=dcoord_list, lvs=lvs,
+            current_color=current_color,
+            color_list=color_list,
+            currently_below_threshold=currently_below_threshold,
+            leaf_label_func=leaf_label_func,
+            level=level + 1, contraction_marks=contraction_marks,
+            link_color_func=link_color_func,
+            above_threshold_color=above_threshold_color)
+
+    h = Z[i - n, 2]
+    if h >= color_threshold or color_threshold <= 0:
+        c = above_threshold_color
+
+        if currently_below_threshold[0]:
+            current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
+        currently_below_threshold[0] = False
+    else:
+        currently_below_threshold[0] = True
+        c = _link_line_colors[current_color[0]]
+
+    (uivb, uwb, ubh, ubmd) = \
+        _dendrogram_calculate_info(
+            Z=Z, p=p,
+            truncate_mode=truncate_mode,
+            color_threshold=color_threshold,
+            get_leaves=get_leaves,
+            orientation=orientation,
+            labels=labels,
+            count_sort=count_sort,
+            distance_sort=distance_sort,
+            show_leaf_counts=show_leaf_counts,
+            i=ub, iv=iv + uwa, ivl=ivl, n=n,
+            icoord_list=icoord_list,
+            dcoord_list=dcoord_list, lvs=lvs,
+            current_color=current_color,
+            color_list=color_list,
+            currently_below_threshold=currently_below_threshold,
+            leaf_label_func=leaf_label_func,
+            level=level + 1, contraction_marks=contraction_marks,
+            link_color_func=link_color_func,
+            above_threshold_color=above_threshold_color)
+
+    max_dist = max(uamd, ubmd, h)
+
+    icoord_list.append([uiva, uiva, uivb, uivb])
+    dcoord_list.append([uah, h, h, ubh])
+    if link_color_func is not None:
+        v = link_color_func(int(i))
+        if not isinstance(v, str):
+            raise TypeError("link_color_func must return a matplotlib "
+                            "color string!")
+        color_list.append(v)
+    else:
+        color_list.append(c)
+
+    return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
+
+
+def is_isomorphic(T1, T2):
+    """
+    Determine if two different cluster assignments are equivalent.
+
+    Parameters
+    ----------
+    T1 : array_like
+        An assignment of singleton cluster ids to flat cluster ids.
+    T2 : array_like
+        An assignment of singleton cluster ids to flat cluster ids.
+
+    Returns
+    -------
+    b : bool
+        Whether the flat cluster assignments `T1` and `T2` are
+        equivalent.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+    fcluster : for the creation of flat cluster assignments.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic
+    >>> from scipy.cluster.hierarchy import single, complete
+    >>> from scipy.spatial.distance import pdist
+
+    Two flat cluster assignments can be isomorphic if they represent the same
+    cluster assignment, with different labels.
+
+    For example, we can use the `scipy.cluster.hierarchy.single`: method
+    and flatten the output to four clusters:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = single(pdist(X))
+    >>> T = fcluster(Z, 1, criterion='distance')
+    >>> T
+    array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
+
+    We can then do the same using the
+    `scipy.cluster.hierarchy.complete`: method:
+
+    >>> Z = complete(pdist(X))
+    >>> T_ = fcluster(Z, 1.5, criterion='distance')
+    >>> T_
+    array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+
+    As we can see, in both cases we obtain four clusters and all the data
+    points are distributed in the same way - the only thing that changes
+    are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both
+    cluster assignments are isomorphic:
+
+    >>> is_isomorphic(T, T_)
+    True
+
+    """
+    T1 = np.asarray(T1, order='c')
+    T2 = np.asarray(T2, order='c')
+
+    if type(T1) != np.ndarray:
+        raise TypeError('T1 must be a numpy array.')
+    if type(T2) != np.ndarray:
+        raise TypeError('T2 must be a numpy array.')
+
+    T1S = T1.shape
+    T2S = T2.shape
+
+    if len(T1S) != 1:
+        raise ValueError('T1 must be one-dimensional.')
+    if len(T2S) != 1:
+        raise ValueError('T2 must be one-dimensional.')
+    if T1S[0] != T2S[0]:
+        raise ValueError('T1 and T2 must have the same number of elements.')
+    n = T1S[0]
+    d1 = {}
+    d2 = {}
+    for i in range(0, n):
+        if T1[i] in d1:
+            if not T2[i] in d2:
+                return False
+            if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]:
+                return False
+        elif T2[i] in d2:
+            return False
+        else:
+            d1[T1[i]] = T2[i]
+            d2[T2[i]] = T1[i]
+    return True
+
+
+def maxdists(Z):
+    """
+    Return the maximum distance between any non-singleton cluster.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The hierarchical clustering encoded as a matrix. See
+        ``linkage`` for more information.
+
+    Returns
+    -------
+    maxdists : ndarray
+        A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
+        the maximum distance between any cluster (including
+        singletons) below and including the node with index i. More
+        specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
+        set of all node indices below and including node i.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+    is_monotonic : for testing for monotonicity of a linkage matrix.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import median, maxdists
+    >>> from scipy.spatial.distance import pdist
+
+    Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists`
+    computes for each new cluster generated (i.e., for each row of the linkage
+    matrix) what is the maximum distance between any two child clusters.
+
+    Due to the nature of hierarchical clustering, in many cases this is going
+    to be just the distance between the two child clusters that were merged
+    to form the current one - that is, Z[:,2].
+
+    However, for non-monotonic cluster assignments such as
+    `scipy.cluster.hierarchy.median` clustering this is not always the
+    case: There may be cluster formations were the distance between the two
+    clusters merged is smaller than the distance between their children.
+
+    We can see this in an example:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = median(pdist(X))
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.11803399,  3.        ],
+           [ 5.        , 13.        ,  1.11803399,  3.        ],
+           [ 8.        , 15.        ,  1.11803399,  3.        ],
+           [11.        , 14.        ,  1.11803399,  3.        ],
+           [18.        , 19.        ,  3.        ,  6.        ],
+           [16.        , 17.        ,  3.5       ,  6.        ],
+           [20.        , 21.        ,  3.25      , 12.        ]])
+    >>> maxdists(Z)
+    array([1.        , 1.        , 1.        , 1.        , 1.11803399,
+           1.11803399, 1.11803399, 1.11803399, 3.        , 3.5       ,
+           3.5       ])
+
+    Note that while the distance between the two clusters merged when creating the
+    last cluster is 3.25, there are two children (clusters 16 and 17) whose distance
+    is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in
+    this case.
+
+    """
+    Z = np.asarray(Z, order='c', dtype=np.double)
+    is_valid_linkage(Z, throw=True, name='Z')
+
+    n = Z.shape[0] + 1
+    MD = np.zeros((n - 1,))
+    [Z] = _copy_arrays_if_base_present([Z])
+    _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
+    return MD
+
+
+def maxinconsts(Z, R):
+    """
+    Return the maximum inconsistency coefficient for each
+    non-singleton cluster and its children.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The hierarchical clustering encoded as a matrix. See
+        `linkage` for more information.
+    R : ndarray
+        The inconsistency matrix.
+
+    Returns
+    -------
+    MI : ndarray
+        A monotonic ``(n-1)``-sized numpy array of doubles.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+    inconsistent : for the creation of a inconsistency matrix.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts
+    >>> from scipy.spatial.distance import pdist
+
+    Given a data set ``X``, we can apply a clustering method to obtain a
+    linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
+    be also used to obtain the inconsistency matrix ``R`` associated to
+    this clustering process:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = median(pdist(X))
+    >>> R = inconsistent(Z)
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.11803399,  3.        ],
+           [ 5.        , 13.        ,  1.11803399,  3.        ],
+           [ 8.        , 15.        ,  1.11803399,  3.        ],
+           [11.        , 14.        ,  1.11803399,  3.        ],
+           [18.        , 19.        ,  3.        ,  6.        ],
+           [16.        , 17.        ,  3.5       ,  6.        ],
+           [20.        , 21.        ,  3.25      , 12.        ]])
+    >>> R
+    array([[1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.74535599, 1.08655358, 3.        , 1.15470054],
+           [1.91202266, 1.37522872, 3.        , 1.15470054],
+           [3.25      , 0.25      , 3.        , 0.        ]])
+
+    Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute
+    the maximum value of the inconsistency statistic (the last column of
+    ``R``) for each non-singleton cluster and its children:
+
+    >>> maxinconsts(Z, R)
+    array([0.        , 0.        , 0.        , 0.        , 0.70710678,
+           0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
+           1.15470054])
+
+    """
+    Z = np.asarray(Z, order='c')
+    R = np.asarray(R, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+    is_valid_im(R, throw=True, name='R')
+
+    n = Z.shape[0] + 1
+    if Z.shape[0] != R.shape[0]:
+        raise ValueError("The inconsistency matrix and linkage matrix each "
+                         "have a different number of rows.")
+    MI = np.zeros((n - 1,))
+    [Z, R] = _copy_arrays_if_base_present([Z, R])
+    _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
+    return MI
+
+
+def maxRstat(Z, R, i):
+    """
+    Return the maximum statistic for each non-singleton cluster and its
+    children.
+
+    Parameters
+    ----------
+    Z : array_like
+        The hierarchical clustering encoded as a matrix. See `linkage` for more
+        information.
+    R : array_like
+        The inconsistency matrix.
+    i : int
+        The column of `R` to use as the statistic.
+
+    Returns
+    -------
+    MR : ndarray
+        Calculates the maximum statistic for the i'th column of the
+        inconsistency matrix `R` for each non-singleton cluster
+        node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where
+        ``Q(j)`` the set of all node ids corresponding to nodes below
+        and including ``j``.
+
+    See Also
+    --------
+    linkage : for a description of what a linkage matrix is.
+    inconsistent : for the creation of a inconsistency matrix.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat
+    >>> from scipy.spatial.distance import pdist
+
+    Given a data set ``X``, we can apply a clustering method to obtain a
+    linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
+    be also used to obtain the inconsistency matrix ``R`` associated to
+    this clustering process:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = median(pdist(X))
+    >>> R = inconsistent(Z)
+    >>> R
+    array([[1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.        , 0.        , 1.        , 0.        ],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.05901699, 0.08346263, 2.        , 0.70710678],
+           [1.74535599, 1.08655358, 3.        , 1.15470054],
+           [1.91202266, 1.37522872, 3.        , 1.15470054],
+           [3.25      , 0.25      , 3.        , 0.        ]])
+
+    `scipy.cluster.hierarchy.maxRstat` can be used to compute
+    the maximum value of each column of ``R``, for each non-singleton
+    cluster and its children:
+
+    >>> maxRstat(Z, R, 0)
+    array([1.        , 1.        , 1.        , 1.        , 1.05901699,
+           1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266,
+           3.25      ])
+    >>> maxRstat(Z, R, 1)
+    array([0.        , 0.        , 0.        , 0.        , 0.08346263,
+           0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872,
+           1.37522872])
+    >>> maxRstat(Z, R, 3)
+    array([0.        , 0.        , 0.        , 0.        , 0.70710678,
+           0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
+           1.15470054])
+
+    """
+    Z = np.asarray(Z, order='c')
+    R = np.asarray(R, order='c')
+    is_valid_linkage(Z, throw=True, name='Z')
+    is_valid_im(R, throw=True, name='R')
+    if type(i) is not int:
+        raise TypeError('The third argument must be an integer.')
+    if i < 0 or i > 3:
+        raise ValueError('i must be an integer between 0 and 3 inclusive.')
+
+    if Z.shape[0] != R.shape[0]:
+        raise ValueError("The inconsistency matrix and linkage matrix each "
+                         "have a different number of rows.")
+
+    n = Z.shape[0] + 1
+    MR = np.zeros((n - 1,))
+    [Z, R] = _copy_arrays_if_base_present([Z, R])
+    _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
+    return MR
+
+
+def leaders(Z, T):
+    """
+    Return the root nodes in a hierarchical clustering.
+
+    Returns the root nodes in a hierarchical clustering corresponding
+    to a cut defined by a flat cluster assignment vector ``T``. See
+    the ``fcluster`` function for more information on the format of ``T``.
+
+    For each flat cluster :math:`j` of the :math:`k` flat clusters
+    represented in the n-sized flat cluster assignment vector ``T``,
+    this function finds the lowest cluster node :math:`i` in the linkage
+    tree Z, such that:
+
+      * leaf descendants belong only to flat cluster j
+        (i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where
+        :math:`S(i)` is the set of leaf ids of descendant leaf nodes
+        with cluster node :math:`i`)
+
+      * there does not exist a leaf that is not a descendant with
+        :math:`i` that also belongs to cluster :math:`j`
+        (i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
+        this condition is violated, ``T`` is not a valid cluster
+        assignment vector, and an exception will be thrown.
+
+    Parameters
+    ----------
+    Z : ndarray
+        The hierarchical clustering encoded as a matrix. See
+        `linkage` for more information.
+    T : ndarray
+        The flat cluster assignment vector.
+
+    Returns
+    -------
+    L : ndarray
+        The leader linkage node id's stored as a k-element 1-D array,
+        where ``k`` is the number of flat clusters found in ``T``.
+
+        ``L[j]=i`` is the linkage cluster node id that is the
+        leader of flat cluster with id M[j]. If ``i < n``, ``i``
+        corresponds to an original observation, otherwise it
+        corresponds to a non-singleton cluster.
+    M : ndarray
+        The leader linkage node id's stored as a k-element 1-D array, where
+        ``k`` is the number of flat clusters found in ``T``. This allows the
+        set of flat cluster ids to be any arbitrary set of ``k`` integers.
+
+        For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
+        id 8's leader is linkage node 2.
+
+    See Also
+    --------
+    fcluster : for the creation of flat cluster assignments.
+
+    Examples
+    --------
+    >>> from scipy.cluster.hierarchy import ward, fcluster, leaders
+    >>> from scipy.spatial.distance import pdist
+
+    Given a linkage matrix ``Z`` - obtained after apply a clustering method
+    to a dataset ``X`` - and a flat cluster assignment array ``T``:
+
+    >>> X = [[0, 0], [0, 1], [1, 0],
+    ...      [0, 4], [0, 3], [1, 4],
+    ...      [4, 0], [3, 0], [4, 1],
+    ...      [4, 4], [3, 4], [4, 3]]
+
+    >>> Z = ward(pdist(X))
+    >>> Z
+    array([[ 0.        ,  1.        ,  1.        ,  2.        ],
+           [ 3.        ,  4.        ,  1.        ,  2.        ],
+           [ 6.        ,  7.        ,  1.        ,  2.        ],
+           [ 9.        , 10.        ,  1.        ,  2.        ],
+           [ 2.        , 12.        ,  1.29099445,  3.        ],
+           [ 5.        , 13.        ,  1.29099445,  3.        ],
+           [ 8.        , 14.        ,  1.29099445,  3.        ],
+           [11.        , 15.        ,  1.29099445,  3.        ],
+           [16.        , 17.        ,  5.77350269,  6.        ],
+           [18.        , 19.        ,  5.77350269,  6.        ],
+           [20.        , 21.        ,  8.16496581, 12.        ]])
+
+    >>> T = fcluster(Z, 3, criterion='distance')
+    >>> T
+    array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
+
+    `scipy.cluster.hierarchy.leaders` returns the indices of the nodes
+    in the dendrogram that are the leaders of each flat cluster:
+
+    >>> L, M = leaders(Z, T)
+    >>> L
+    array([16, 17, 18, 19], dtype=int32)
+
+    (remember that indices 0-11 point to the 12 data points in ``X``,
+    whereas indices 12-22 point to the 11 rows of ``Z``)
+
+    `scipy.cluster.hierarchy.leaders` also returns the indices of
+    the flat clusters in ``T``:
+
+    >>> M
+    array([1, 2, 3, 4], dtype=int32)
+
+    """
+    Z = np.asarray(Z, order='c')
+    T = np.asarray(T, order='c')
+    if type(T) != np.ndarray or T.dtype != 'i':
+        raise TypeError('T must be a one-dimensional numpy array of integers.')
+    is_valid_linkage(Z, throw=True, name='Z')
+    if len(T) != Z.shape[0] + 1:
+        raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
+
+    Cl = np.unique(T)
+    kk = len(Cl)
+    L = np.zeros((kk,), dtype='i')
+    M = np.zeros((kk,), dtype='i')
+    n = Z.shape[0] + 1
+    [Z, T] = _copy_arrays_if_base_present([Z, T])
+    s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
+    if s >= 0:
+        raise ValueError(('T is not a valid assignment vector. Error found '
+                          'when examining linkage node %d (< 2n-1).') % s)
+    return (L, M)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/hierarchy_test_data.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/hierarchy_test_data.py
new file mode 100644
index 00000000..7d874ca5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/hierarchy_test_data.py
@@ -0,0 +1,145 @@
+from numpy import array
+
+
+Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02],
+             [7.50205180e-01, 4.60299830e-01, 8.98696460e-01],
+             [6.65461230e-01, 6.94011420e-01, 9.10465700e-01],
+             [9.64047590e-01, 1.43082200e-03, 7.39874220e-01],
+             [1.08159060e-01, 5.53028790e-01, 6.63804780e-02],
+             [9.31359130e-01, 8.25424910e-01, 9.52315440e-01],
+             [6.78086960e-01, 3.41903970e-01, 5.61481950e-01],
+             [9.82730940e-01, 7.04605210e-01, 8.70978630e-02],
+             [6.14691610e-01, 4.69989230e-02, 6.02406450e-01],
+             [5.80161260e-01, 9.17354970e-01, 5.88163850e-01],
+             [1.38246310e+00, 1.96358160e+00, 1.94437880e+00],
+             [2.10675860e+00, 1.67148730e+00, 1.34854480e+00],
+             [1.39880070e+00, 1.66142050e+00, 1.32224550e+00],
+             [1.71410460e+00, 1.49176380e+00, 1.45432170e+00],
+             [1.54102340e+00, 1.84374950e+00, 1.64658950e+00],
+             [2.08512480e+00, 1.84524350e+00, 2.17340850e+00],
+             [1.30748740e+00, 1.53801650e+00, 2.16007740e+00],
+             [1.41447700e+00, 1.99329070e+00, 1.99107420e+00],
+             [1.61943490e+00, 1.47703280e+00, 1.89788160e+00],
+             [1.59880600e+00, 1.54988980e+00, 1.57563350e+00],
+             [3.37247380e+00, 2.69635310e+00, 3.39981700e+00],
+             [3.13705120e+00, 3.36528090e+00, 3.06089070e+00],
+             [3.29413250e+00, 3.19619500e+00, 2.90700170e+00],
+             [2.65510510e+00, 3.06785900e+00, 2.97198540e+00],
+             [3.30941040e+00, 2.59283970e+00, 2.57714110e+00],
+             [2.59557220e+00, 3.33477370e+00, 3.08793190e+00],
+             [2.58206180e+00, 3.41615670e+00, 3.26441990e+00],
+             [2.71127000e+00, 2.77032450e+00, 2.63466500e+00],
+             [2.79617850e+00, 3.25473720e+00, 3.41801560e+00],
+             [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]])
+
+ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754.,
+                564., 138., 219., 869., 669.])
+
+linkage_ytdist_single = array([[2., 5., 138., 2.],
+                               [3., 4., 219., 2.],
+                               [0., 7., 255., 3.],
+                               [1., 8., 268., 4.],
+                               [6., 9., 295., 6.]])
+
+linkage_ytdist_complete = array([[2., 5., 138., 2.],
+                                 [3., 4., 219., 2.],
+                                 [1., 6., 400., 3.],
+                                 [0., 7., 412., 3.],
+                                 [8., 9., 996., 6.]])
+
+linkage_ytdist_average = array([[2., 5., 138., 2.],
+                                [3., 4., 219., 2.],
+                                [0., 7., 333.5, 3.],
+                                [1., 6., 347.5, 3.],
+                                [8., 9., 680.77777778, 6.]])
+
+linkage_ytdist_weighted = array([[2., 5., 138., 2.],
+                                 [3., 4., 219., 2.],
+                                 [0., 7., 333.5, 3.],
+                                 [1., 6., 347.5, 3.],
+                                 [8., 9., 670.125, 6.]])
+
+# the optimal leaf ordering of linkage_ytdist_single
+linkage_ytdist_single_olo = array([[5., 2., 138., 2.],
+                                   [4., 3., 219., 2.],
+                                   [7., 0., 255., 3.],
+                                   [1., 8., 268., 4.],
+                                   [6., 9., 295., 6.]])
+
+X = array([[1.43054825, -7.5693489],
+           [6.95887839, 6.82293382],
+           [2.87137846, -9.68248579],
+           [7.87974764, -6.05485803],
+           [8.24018364, -6.09495602],
+           [7.39020262, 8.54004355]])
+ 
+linkage_X_centroid = array([[3., 4., 0.36265956, 2.],
+                            [1., 5., 1.77045373, 2.],
+                            [0., 2., 2.55760419, 2.],
+                            [6., 8., 6.43614494, 4.],
+                            [7., 9., 15.17363237, 6.]])
+
+linkage_X_median = array([[3., 4., 0.36265956, 2.],
+                          [1., 5., 1.77045373, 2.],
+                          [0., 2., 2.55760419, 2.],
+                          [6., 8., 6.43614494, 4.],
+                          [7., 9., 15.17363237, 6.]])
+
+linkage_X_ward = array([[3., 4., 0.36265956, 2.],
+                        [1., 5., 1.77045373, 2.],
+                        [0., 2., 2.55760419, 2.],
+                        [6., 8., 9.10208346, 4.],
+                        [7., 9., 24.7784379, 6.]])
+
+# the optimal leaf ordering of linkage_X_ward
+linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.],
+                            [5., 1., 1.77045373, 2.],
+                            [2., 0., 2.55760419, 2.],
+                            [6., 8., 9.10208346, 4.],
+                            [7., 9., 24.7784379, 6.]])
+
+inconsistent_ytdist = {
+    1: array([[138., 0., 1., 0.],
+              [219., 0., 1., 0.],
+              [255., 0., 1., 0.],
+              [268., 0., 1., 0.],
+              [295., 0., 1., 0.]]),
+    2: array([[138., 0., 1., 0.],
+              [219., 0., 1., 0.],
+              [237., 25.45584412, 2., 0.70710678],
+              [261.5, 9.19238816, 2., 0.70710678],
+              [233.66666667, 83.9424406, 3., 0.7306594]]),
+    3: array([[138., 0., 1., 0.],
+              [219., 0., 1., 0.],
+              [237., 25.45584412, 2., 0.70710678],
+              [247.33333333, 25.38372182, 3., 0.81417007],
+              [239., 69.36377537, 4., 0.80733783]]),
+    4: array([[138., 0., 1., 0.],
+              [219., 0., 1., 0.],
+              [237., 25.45584412, 2., 0.70710678],
+              [247.33333333, 25.38372182, 3., 0.81417007],
+              [235., 60.73302232, 5., 0.98793042]])}
+
+fcluster_inconsistent = {
+    0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
+                1, 1, 1, 1, 1, 1, 1, 1, 1]),
+    1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
+                1, 1, 1, 1, 1, 1, 1, 1, 1]),
+    2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                1, 1, 1, 1, 1, 1, 1, 1, 1])}
+
+fcluster_distance = {
+    0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3,
+                1, 1, 1, 2, 1, 1, 1, 1, 1]),
+    1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
+                1, 1, 1, 1, 1, 1, 1, 1, 1]),
+    2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                1, 1, 1, 1, 1, 1, 1, 1, 1])}
+
+fcluster_maxclust = {
+    8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4,
+                1, 1, 1, 3, 1, 1, 1, 1, 2]),
+    4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
+                1, 1, 1, 1, 1, 1, 1, 1, 1]),
+    1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                1, 1, 1, 1, 1, 1, 1, 1, 1])}
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_disjoint_set.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_disjoint_set.py
new file mode 100644
index 00000000..3e693e9c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_disjoint_set.py
@@ -0,0 +1,201 @@
+import pytest
+from pytest import raises as assert_raises
+import numpy as np
+from scipy.cluster.hierarchy import DisjointSet
+import string
+
+
+def generate_random_token():
+    k = len(string.ascii_letters)
+    tokens = list(np.arange(k, dtype=int))
+    tokens += list(np.arange(k, dtype=float))
+    tokens += list(string.ascii_letters)
+    tokens += [None for i in range(k)]
+    tokens = np.array(tokens, dtype=object)
+    rng = np.random.RandomState(seed=0)
+
+    while 1:
+        size = rng.randint(1, 3)
+        element = rng.choice(tokens, size)
+        if size == 1:
+            yield element[0]
+        else:
+            yield tuple(element)
+
+
+def get_elements(n):
+    # dict is deterministic without difficulty of comparing numpy ints
+    elements = {}
+    for element in generate_random_token():
+        if element not in elements:
+            elements[element] = len(elements)
+            if len(elements) >= n:
+                break
+    return list(elements.keys())
+
+
+def test_init():
+    n = 10
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+    assert dis.n_subsets == n
+    assert list(dis) == elements
+
+
+def test_len():
+    n = 10
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+    assert len(dis) == n
+
+    dis.add("dummy")
+    assert len(dis) == n + 1
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_contains(n):
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+    for x in elements:
+        assert x in dis
+
+    assert "dummy" not in dis
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_add(n):
+    elements = get_elements(n)
+    dis1 = DisjointSet(elements)
+
+    dis2 = DisjointSet()
+    for i, x in enumerate(elements):
+        dis2.add(x)
+        assert len(dis2) == i + 1
+
+        # test idempotency by adding element again
+        dis2.add(x)
+        assert len(dis2) == i + 1
+
+    assert list(dis1) == list(dis2)
+
+
+def test_element_not_present():
+    elements = get_elements(n=10)
+    dis = DisjointSet(elements)
+
+    with assert_raises(KeyError):
+        dis["dummy"]
+
+    with assert_raises(KeyError):
+        dis.merge(elements[0], "dummy")
+
+    with assert_raises(KeyError):
+        dis.connected(elements[0], "dummy")
+
+
+@pytest.mark.parametrize("direction", ["forwards", "backwards"])
+@pytest.mark.parametrize("n", [10, 100])
+def test_linear_union_sequence(n, direction):
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+    assert elements == list(dis)
+
+    indices = list(range(n - 1))
+    if direction == "backwards":
+        indices = indices[::-1]
+
+    for it, i in enumerate(indices):
+        assert not dis.connected(elements[i], elements[i + 1])
+        assert dis.merge(elements[i], elements[i + 1])
+        assert dis.connected(elements[i], elements[i + 1])
+        assert dis.n_subsets == n - 1 - it
+
+    roots = [dis[i] for i in elements]
+    if direction == "forwards":
+        assert all(elements[0] == r for r in roots)
+    else:
+        assert all(elements[-2] == r for r in roots)
+    assert not dis.merge(elements[0], elements[-1])
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_self_unions(n):
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+
+    for x in elements:
+        assert dis.connected(x, x)
+        assert not dis.merge(x, x)
+        assert dis.connected(x, x)
+    assert dis.n_subsets == len(elements)
+
+    assert elements == list(dis)
+    roots = [dis[x] for x in elements]
+    assert elements == roots
+
+
+@pytest.mark.parametrize("order", ["ab", "ba"])
+@pytest.mark.parametrize("n", [10, 100])
+def test_equal_size_ordering(n, order):
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+
+    rng = np.random.RandomState(seed=0)
+    indices = np.arange(n)
+    rng.shuffle(indices)
+
+    for i in range(0, len(indices), 2):
+        a, b = elements[indices[i]], elements[indices[i + 1]]
+        if order == "ab":
+            assert dis.merge(a, b)
+        else:
+            assert dis.merge(b, a)
+
+        expected = elements[min(indices[i], indices[i + 1])]
+        assert dis[a] == expected
+        assert dis[b] == expected
+
+
+@pytest.mark.parametrize("kmax", [5, 10])
+def test_binary_tree(kmax):
+    n = 2**kmax
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+    rng = np.random.RandomState(seed=0)
+
+    for k in 2**np.arange(kmax):
+        for i in range(0, n, 2 * k):
+            r1, r2 = rng.randint(0, k, size=2)
+            a, b = elements[i + r1], elements[i + k + r2]
+            assert not dis.connected(a, b)
+            assert dis.merge(a, b)
+            assert dis.connected(a, b)
+
+        assert elements == list(dis)
+        roots = [dis[i] for i in elements]
+        expected_indices = np.arange(n) - np.arange(n) % (2 * k)
+        expected = [elements[i] for i in expected_indices]
+        assert roots == expected
+
+
+@pytest.mark.parametrize("n", [10, 100])
+def test_subsets(n):
+    elements = get_elements(n)
+    dis = DisjointSet(elements)
+
+    rng = np.random.RandomState(seed=0)
+    for i, j in rng.randint(0, n, (n, 2)):
+        x = elements[i]
+        y = elements[j]
+
+        expected = {element for element in dis if {dis[element]} == {dis[x]}}
+        assert expected == dis.subset(x)
+
+        expected = {dis[element]: set() for element in dis}
+        for element in dis:
+            expected[dis[element]].add(element)
+        expected = list(expected.values())
+        assert expected == dis.subsets()
+
+        dis.merge(x, y)
+        assert dis.subset(x) == dis.subset(y)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_hierarchy.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_hierarchy.py
new file mode 100644
index 00000000..cdccba66
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_hierarchy.py
@@ -0,0 +1,1121 @@
+#
+# Author: Damian Eads
+# Date: April 17, 2008
+#
+# Copyright (C) 2008 Damian Eads
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
+import pytest
+from pytest import raises as assert_raises
+
+import scipy.cluster.hierarchy
+from scipy.cluster.hierarchy import (
+    ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
+    num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
+    is_isomorphic, single, leaders,
+    correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
+    is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
+    set_link_color_palette, cut_tree, optimal_leaf_ordering,
+    _order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
+from scipy.spatial.distance import pdist
+from scipy.cluster._hierarchy import Heap
+
+from . import hierarchy_test_data
+
+
+# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
+# check if it's available
+try:
+    import matplotlib
+    # and set the backend to be Agg (no gui)
+    matplotlib.use('Agg')
+    # before importing pyplot
+    import matplotlib.pyplot as plt
+    have_matplotlib = True
+except Exception:
+    have_matplotlib = False
+
+
+class TestLinkage:
+    def test_linkage_non_finite_elements_in_distance_matrix(self):
+        # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
+        # Exception expected.
+        y = np.zeros((6,))
+        y[0] = np.nan
+        assert_raises(ValueError, linkage, y)
+
+    def test_linkage_empty_distance_matrix(self):
+        # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
+        y = np.zeros((0,))
+        assert_raises(ValueError, linkage, y)
+
+    def test_linkage_tdist(self):
+        for method in ['single', 'complete', 'average', 'weighted']:
+            self.check_linkage_tdist(method)
+
+    def check_linkage_tdist(self, method):
+        # Tests linkage(Y, method) on the tdist data set.
+        Z = linkage(hierarchy_test_data.ytdist, method)
+        expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
+        assert_allclose(Z, expectedZ, atol=1e-10)
+
+    def test_linkage_X(self):
+        for method in ['centroid', 'median', 'ward']:
+            self.check_linkage_q(method)
+
+    def check_linkage_q(self, method):
+        # Tests linkage(Y, method) on the Q data set.
+        Z = linkage(hierarchy_test_data.X, method)
+        expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
+        assert_allclose(Z, expectedZ, atol=1e-06)
+
+        y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
+                                         metric="euclidean")
+        Z = linkage(y, method)
+        assert_allclose(Z, expectedZ, atol=1e-06)
+
+    def test_compare_with_trivial(self):
+        rng = np.random.RandomState(0)
+        n = 20
+        X = rng.rand(n, 2)
+        d = pdist(X)
+
+        for method, code in _LINKAGE_METHODS.items():
+            Z_trivial = _hierarchy.linkage(d, n, code)
+            Z = linkage(d, method)
+            assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
+
+    def test_optimal_leaf_ordering(self):
+        Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True)
+        expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
+        assert_allclose(Z, expectedZ, atol=1e-10)
+
+
+class TestLinkageTies:
+    _expectations = {
+        'single': np.array([[0, 1, 1.41421356, 2],
+                            [2, 3, 1.41421356, 3]]),
+        'complete': np.array([[0, 1, 1.41421356, 2],
+                              [2, 3, 2.82842712, 3]]),
+        'average': np.array([[0, 1, 1.41421356, 2],
+                             [2, 3, 2.12132034, 3]]),
+        'weighted': np.array([[0, 1, 1.41421356, 2],
+                              [2, 3, 2.12132034, 3]]),
+        'centroid': np.array([[0, 1, 1.41421356, 2],
+                              [2, 3, 2.12132034, 3]]),
+        'median': np.array([[0, 1, 1.41421356, 2],
+                            [2, 3, 2.12132034, 3]]),
+        'ward': np.array([[0, 1, 1.41421356, 2],
+                          [2, 3, 2.44948974, 3]]),
+    }
+
+    def test_linkage_ties(self):
+        for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
+            self.check_linkage_ties(method)
+
+    def check_linkage_ties(self, method):
+        X = np.array([[-1, -1], [0, 0], [1, 1]])
+        Z = linkage(X, method=method)
+        expectedZ = self._expectations[method]
+        assert_allclose(Z, expectedZ, atol=1e-06)
+
+
+class TestInconsistent:
+    def test_inconsistent_tdist(self):
+        for depth in hierarchy_test_data.inconsistent_ytdist:
+            self.check_inconsistent_tdist(depth)
+
+    def check_inconsistent_tdist(self, depth):
+        Z = hierarchy_test_data.linkage_ytdist_single
+        assert_allclose(inconsistent(Z, depth),
+                        hierarchy_test_data.inconsistent_ytdist[depth])
+
+
+class TestCopheneticDistance:
+    def test_linkage_cophenet_tdist_Z(self):
+        # Tests cophenet(Z) on tdist data set.
+        expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
+                              295, 138, 219, 295, 295])
+        Z = hierarchy_test_data.linkage_ytdist_single
+        M = cophenet(Z)
+        assert_allclose(M, expectedM, atol=1e-10)
+
+    def test_linkage_cophenet_tdist_Z_Y(self):
+        # Tests cophenet(Z, Y) on tdist data set.
+        Z = hierarchy_test_data.linkage_ytdist_single
+        (c, M) = cophenet(Z, hierarchy_test_data.ytdist)
+        expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
+                              295, 138, 219, 295, 295])
+        expectedc = 0.639931296433393415057366837573
+        assert_allclose(c, expectedc, atol=1e-10)
+        assert_allclose(M, expectedM, atol=1e-10)
+
+
+class TestMLabLinkageConversion:
+    def test_mlab_linkage_conversion_empty(self):
+        # Tests from/to_mlab_linkage on empty linkage array.
+        X = np.asarray([])
+        assert_equal(from_mlab_linkage([]), X)
+        assert_equal(to_mlab_linkage([]), X)
+
+    def test_mlab_linkage_conversion_single_row(self):
+        # Tests from/to_mlab_linkage on linkage array with single row.
+        Z = np.asarray([[0., 1., 3., 2.]])
+        Zm = [[1, 2, 3]]
+        assert_equal(from_mlab_linkage(Zm), Z)
+        assert_equal(to_mlab_linkage(Z), Zm)
+
+    def test_mlab_linkage_conversion_multiple_rows(self):
+        # Tests from/to_mlab_linkage on linkage array with multiple rows.
+        Zm = np.asarray([[3, 6, 138], [4, 5, 219],
+                         [1, 8, 255], [2, 9, 268], [7, 10, 295]])
+        Z = np.array([[2., 5., 138., 2.],
+                      [3., 4., 219., 2.],
+                      [0., 7., 255., 3.],
+                      [1., 8., 268., 4.],
+                      [6., 9., 295., 6.]],
+                      dtype=np.double)
+        assert_equal(from_mlab_linkage(Zm), Z)
+        assert_equal(to_mlab_linkage(Z), Zm)
+
+
+class TestFcluster:
+    def test_fclusterdata(self):
+        for t in hierarchy_test_data.fcluster_inconsistent:
+            self.check_fclusterdata(t, 'inconsistent')
+        for t in hierarchy_test_data.fcluster_distance:
+            self.check_fclusterdata(t, 'distance')
+        for t in hierarchy_test_data.fcluster_maxclust:
+            self.check_fclusterdata(t, 'maxclust')
+
+    def check_fclusterdata(self, t, criterion):
+        # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
+        expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
+        X = hierarchy_test_data.Q_X
+        T = fclusterdata(X, criterion=criterion, t=t)
+        assert_(is_isomorphic(T, expectedT))
+
+    def test_fcluster(self):
+        for t in hierarchy_test_data.fcluster_inconsistent:
+            self.check_fcluster(t, 'inconsistent')
+        for t in hierarchy_test_data.fcluster_distance:
+            self.check_fcluster(t, 'distance')
+        for t in hierarchy_test_data.fcluster_maxclust:
+            self.check_fcluster(t, 'maxclust')
+
+    def check_fcluster(self, t, criterion):
+        # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
+        expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
+        Z = single(hierarchy_test_data.Q_X)
+        T = fcluster(Z, criterion=criterion, t=t)
+        assert_(is_isomorphic(T, expectedT))
+
+    def test_fcluster_monocrit(self):
+        for t in hierarchy_test_data.fcluster_distance:
+            self.check_fcluster_monocrit(t)
+        for t in hierarchy_test_data.fcluster_maxclust:
+            self.check_fcluster_maxclust_monocrit(t)
+
+    def check_fcluster_monocrit(self, t):
+        expectedT = hierarchy_test_data.fcluster_distance[t]
+        Z = single(hierarchy_test_data.Q_X)
+        T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
+        assert_(is_isomorphic(T, expectedT))
+
+    def check_fcluster_maxclust_monocrit(self, t):
+        expectedT = hierarchy_test_data.fcluster_maxclust[t]
+        Z = single(hierarchy_test_data.Q_X)
+        T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
+        assert_(is_isomorphic(T, expectedT))
+
+
+class TestLeaders:
+    def test_leaders_single(self):
+        # Tests leaders using a flat clustering generated by single linkage.
+        X = hierarchy_test_data.Q_X
+        Y = pdist(X)
+        Z = linkage(Y)
+        T = fcluster(Z, criterion='maxclust', t=3)
+        Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
+        L = leaders(Z, T)
+        assert_equal(L, Lright)
+
+
+class TestIsIsomorphic:
+    def test_is_isomorphic_1(self):
+        # Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
+        a = [1, 1, 1]
+        b = [2, 2, 2]
+        assert_(is_isomorphic(a, b))
+        assert_(is_isomorphic(b, a))
+
+    def test_is_isomorphic_2(self):
+        # Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
+        a = [1, 7, 1]
+        b = [2, 3, 2]
+        assert_(is_isomorphic(a, b))
+        assert_(is_isomorphic(b, a))
+
+    def test_is_isomorphic_3(self):
+        # Tests is_isomorphic on test case #3 (no flat clusters)
+        a = []
+        b = []
+        assert_(is_isomorphic(a, b))
+
+    def test_is_isomorphic_4A(self):
+        # Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
+        a = [1, 2, 3]
+        b = [1, 3, 2]
+        assert_(is_isomorphic(a, b))
+        assert_(is_isomorphic(b, a))
+
+    def test_is_isomorphic_4B(self):
+        # Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
+        a = [1, 2, 3, 3]
+        b = [1, 3, 2, 3]
+        assert_(is_isomorphic(a, b) == False)
+        assert_(is_isomorphic(b, a) == False)
+
+    def test_is_isomorphic_4C(self):
+        # Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
+        a = [7, 2, 3]
+        b = [6, 3, 2]
+        assert_(is_isomorphic(a, b))
+        assert_(is_isomorphic(b, a))
+
+    def test_is_isomorphic_5(self):
+        # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
+        # clusters, random permutation of the labeling).
+        for nc in [2, 3, 5]:
+            self.help_is_isomorphic_randperm(1000, nc)
+
+    def test_is_isomorphic_6(self):
+        # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
+        # clusters, random permutation of the labeling, slightly
+        # nonisomorphic.)
+        for nc in [2, 3, 5]:
+            self.help_is_isomorphic_randperm(1000, nc, True, 5)
+
+    def test_is_isomorphic_7(self):
+        # Regression test for gh-6271
+        assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
+
+    def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
+        for k in range(3):
+            a = np.int_(np.random.rand(nobs) * nclusters)
+            b = np.zeros(a.size, dtype=np.int_)
+            P = np.random.permutation(nclusters)
+            for i in range(0, a.shape[0]):
+                b[i] = P[a[i]]
+            if noniso:
+                Q = np.random.permutation(nobs)
+                b[Q[0:nerrors]] += 1
+                b[Q[0:nerrors]] %= nclusters
+            assert_(is_isomorphic(a, b) == (not noniso))
+            assert_(is_isomorphic(b, a) == (not noniso))
+
+
+class TestIsValidLinkage:
+    def test_is_valid_linkage_various_size(self):
+        for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
+                                  (1, 4, True), (2, 4, True)]:
+            self.check_is_valid_linkage_various_size(nrow, ncol, valid)
+
+    def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
+        # Tests is_valid_linkage(Z) with linkage matrics of various sizes
+        Z = np.asarray([[0, 1, 3.0, 2, 5],
+                        [3, 2, 4.0, 3, 3]], dtype=np.double)
+        Z = Z[:nrow, :ncol]
+        assert_(is_valid_linkage(Z) == valid)
+        if not valid:
+            assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+    def test_is_valid_linkage_int_type(self):
+        # Tests is_valid_linkage(Z) with integer type.
+        Z = np.asarray([[0, 1, 3.0, 2],
+                        [3, 2, 4.0, 3]], dtype=int)
+        assert_(is_valid_linkage(Z) == False)
+        assert_raises(TypeError, is_valid_linkage, Z, throw=True)
+
+    def test_is_valid_linkage_empty(self):
+        # Tests is_valid_linkage(Z) with empty linkage.
+        Z = np.zeros((0, 4), dtype=np.double)
+        assert_(is_valid_linkage(Z) == False)
+        assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+    def test_is_valid_linkage_4_and_up(self):
+        # Tests is_valid_linkage(Z) on linkage on observation sets between
+        # sizes 4 and 15 (step size 3).
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            assert_(is_valid_linkage(Z) == True)
+
+    def test_is_valid_linkage_4_and_up_neg_index_left(self):
+        # Tests is_valid_linkage(Z) on linkage on observation sets between
+        # sizes 4 and 15 (step size 3) with negative indices (left).
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            Z[i//2,0] = -2
+            assert_(is_valid_linkage(Z) == False)
+            assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+    def test_is_valid_linkage_4_and_up_neg_index_right(self):
+        # Tests is_valid_linkage(Z) on linkage on observation sets between
+        # sizes 4 and 15 (step size 3) with negative indices (right).
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            Z[i//2,1] = -2
+            assert_(is_valid_linkage(Z) == False)
+            assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+    def test_is_valid_linkage_4_and_up_neg_dist(self):
+        # Tests is_valid_linkage(Z) on linkage on observation sets between
+        # sizes 4 and 15 (step size 3) with negative distances.
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            Z[i//2,2] = -0.5
+            assert_(is_valid_linkage(Z) == False)
+            assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+    def test_is_valid_linkage_4_and_up_neg_counts(self):
+        # Tests is_valid_linkage(Z) on linkage on observation sets between
+        # sizes 4 and 15 (step size 3) with negative counts.
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            Z[i//2,3] = -2
+            assert_(is_valid_linkage(Z) == False)
+            assert_raises(ValueError, is_valid_linkage, Z, throw=True)
+
+
+class TestIsValidInconsistent:
+    def test_is_valid_im_int_type(self):
+        # Tests is_valid_im(R) with integer type.
+        R = np.asarray([[0, 1, 3.0, 2],
+                        [3, 2, 4.0, 3]], dtype=int)
+        assert_(is_valid_im(R) == False)
+        assert_raises(TypeError, is_valid_im, R, throw=True)
+
+    def test_is_valid_im_various_size(self):
+        for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
+                                  (1, 4, True), (2, 4, True)]:
+            self.check_is_valid_im_various_size(nrow, ncol, valid)
+
+    def check_is_valid_im_various_size(self, nrow, ncol, valid):
+        # Tests is_valid_im(R) with linkage matrics of various sizes
+        R = np.asarray([[0, 1, 3.0, 2, 5],
+                        [3, 2, 4.0, 3, 3]], dtype=np.double)
+        R = R[:nrow, :ncol]
+        assert_(is_valid_im(R) == valid)
+        if not valid:
+            assert_raises(ValueError, is_valid_im, R, throw=True)
+
+    def test_is_valid_im_empty(self):
+        # Tests is_valid_im(R) with empty inconsistency matrix.
+        R = np.zeros((0, 4), dtype=np.double)
+        assert_(is_valid_im(R) == False)
+        assert_raises(ValueError, is_valid_im, R, throw=True)
+
+    def test_is_valid_im_4_and_up(self):
+        # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+        # (step size 3).
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            R = inconsistent(Z)
+            assert_(is_valid_im(R) == True)
+
+    def test_is_valid_im_4_and_up_neg_index_left(self):
+        # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+        # (step size 3) with negative link height means.
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            R = inconsistent(Z)
+            R[i//2,0] = -2.0
+            assert_(is_valid_im(R) == False)
+            assert_raises(ValueError, is_valid_im, R, throw=True)
+
+    def test_is_valid_im_4_and_up_neg_index_right(self):
+        # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+        # (step size 3) with negative link height standard deviations.
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            R = inconsistent(Z)
+            R[i//2,1] = -2.0
+            assert_(is_valid_im(R) == False)
+            assert_raises(ValueError, is_valid_im, R, throw=True)
+
+    def test_is_valid_im_4_and_up_neg_dist(self):
+        # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
+        # (step size 3) with negative link counts.
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            R = inconsistent(Z)
+            R[i//2,2] = -0.5
+            assert_(is_valid_im(R) == False)
+            assert_raises(ValueError, is_valid_im, R, throw=True)
+
+
+class TestNumObsLinkage:
+    def test_num_obs_linkage_empty(self):
+        # Tests num_obs_linkage(Z) with empty linkage.
+        Z = np.zeros((0, 4), dtype=np.double)
+        assert_raises(ValueError, num_obs_linkage, Z)
+
+    def test_num_obs_linkage_1x4(self):
+        # Tests num_obs_linkage(Z) on linkage over 2 observations.
+        Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
+        assert_equal(num_obs_linkage(Z), 2)
+
+    def test_num_obs_linkage_2x4(self):
+        # Tests num_obs_linkage(Z) on linkage over 3 observations.
+        Z = np.asarray([[0, 1, 3.0, 2],
+                        [3, 2, 4.0, 3]], dtype=np.double)
+        assert_equal(num_obs_linkage(Z), 3)
+
+    def test_num_obs_linkage_4_and_up(self):
+        # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
+        # 4 and 15 (step size 3).
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            assert_equal(num_obs_linkage(Z), i)
+
+
+class TestLeavesList:
+    def test_leaves_list_1x4(self):
+        # Tests leaves_list(Z) on a 1x4 linkage.
+        Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
+        to_tree(Z)
+        assert_equal(leaves_list(Z), [0, 1])
+
+    def test_leaves_list_2x4(self):
+        # Tests leaves_list(Z) on a 2x4 linkage.
+        Z = np.asarray([[0, 1, 3.0, 2],
+                        [3, 2, 4.0, 3]], dtype=np.double)
+        to_tree(Z)
+        assert_equal(leaves_list(Z), [0, 1, 2])
+
+    def test_leaves_list_Q(self):
+        for method in ['single', 'complete', 'average', 'weighted', 'centroid',
+                       'median', 'ward']:
+            self.check_leaves_list_Q(method)
+
+    def check_leaves_list_Q(self, method):
+        # Tests leaves_list(Z) on the Q data set
+        X = hierarchy_test_data.Q_X
+        Z = linkage(X, method)
+        node = to_tree(Z)
+        assert_equal(node.pre_order(), leaves_list(Z))
+
+    def test_Q_subtree_pre_order(self):
+        # Tests that pre_order() works when called on sub-trees.
+        X = hierarchy_test_data.Q_X
+        Z = linkage(X, 'single')
+        node = to_tree(Z)
+        assert_equal(node.pre_order(), (node.get_left().pre_order()
+                                        + node.get_right().pre_order()))
+
+
+class TestCorrespond:
+    def test_correspond_empty(self):
+        # Tests correspond(Z, y) with empty linkage and condensed distance matrix.
+        y = np.zeros((0,))
+        Z = np.zeros((0,4))
+        assert_raises(ValueError, correspond, Z, y)
+
+    def test_correspond_2_and_up(self):
+        # Tests correspond(Z, y) on linkage and CDMs over observation sets of
+        # different sizes.
+        for i in range(2, 4):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            assert_(correspond(Z, y))
+        for i in range(4, 15, 3):
+            y = np.random.rand(i*(i-1)//2)
+            Z = linkage(y)
+            assert_(correspond(Z, y))
+
+    def test_correspond_4_and_up(self):
+        # Tests correspond(Z, y) on linkage and CDMs over observation sets of
+        # different sizes. Correspondence should be false.
+        for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
+                       list(zip(list(range(3, 5)), list(range(2, 4))))):
+            y = np.random.rand(i*(i-1)//2)
+            y2 = np.random.rand(j*(j-1)//2)
+            Z = linkage(y)
+            Z2 = linkage(y2)
+            assert_equal(correspond(Z, y2), False)
+            assert_equal(correspond(Z2, y), False)
+
+    def test_correspond_4_and_up_2(self):
+        # Tests correspond(Z, y) on linkage and CDMs over observation sets of
+        # different sizes. Correspondence should be false.
+        for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
+                       list(zip(list(range(2, 7)), list(range(16, 21))))):
+            y = np.random.rand(i*(i-1)//2)
+            y2 = np.random.rand(j*(j-1)//2)
+            Z = linkage(y)
+            Z2 = linkage(y2)
+            assert_equal(correspond(Z, y2), False)
+            assert_equal(correspond(Z2, y), False)
+
+    def test_num_obs_linkage_multi_matrix(self):
+        # Tests num_obs_linkage with observation matrices of multiple sizes.
+        for n in range(2, 10):
+            X = np.random.rand(n, 4)
+            Y = pdist(X)
+            Z = linkage(Y)
+            assert_equal(num_obs_linkage(Z), n)
+
+
+class TestIsMonotonic:
+    def test_is_monotonic_empty(self):
+        # Tests is_monotonic(Z) on an empty linkage.
+        Z = np.zeros((0, 4))
+        assert_raises(ValueError, is_monotonic, Z)
+
+    def test_is_monotonic_1x4(self):
+        # Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
+        Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
+        assert_equal(is_monotonic(Z), True)
+
+    def test_is_monotonic_2x4_T(self):
+        # Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
+        Z = np.asarray([[0, 1, 0.3, 2],
+                        [2, 3, 0.4, 3]], dtype=np.double)
+        assert_equal(is_monotonic(Z), True)
+
+    def test_is_monotonic_2x4_F(self):
+        # Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
+        Z = np.asarray([[0, 1, 0.4, 2],
+                        [2, 3, 0.3, 3]], dtype=np.double)
+        assert_equal(is_monotonic(Z), False)
+
+    def test_is_monotonic_3x4_T(self):
+        # Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
+        Z = np.asarray([[0, 1, 0.3, 2],
+                        [2, 3, 0.4, 2],
+                        [4, 5, 0.6, 4]], dtype=np.double)
+        assert_equal(is_monotonic(Z), True)
+
+    def test_is_monotonic_3x4_F1(self):
+        # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
+        Z = np.asarray([[0, 1, 0.3, 2],
+                        [2, 3, 0.2, 2],
+                        [4, 5, 0.6, 4]], dtype=np.double)
+        assert_equal(is_monotonic(Z), False)
+
+    def test_is_monotonic_3x4_F2(self):
+        # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
+        Z = np.asarray([[0, 1, 0.8, 2],
+                        [2, 3, 0.4, 2],
+                        [4, 5, 0.6, 4]], dtype=np.double)
+        assert_equal(is_monotonic(Z), False)
+
+    def test_is_monotonic_3x4_F3(self):
+        # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
+        Z = np.asarray([[0, 1, 0.3, 2],
+                        [2, 3, 0.4, 2],
+                        [4, 5, 0.2, 4]], dtype=np.double)
+        assert_equal(is_monotonic(Z), False)
+
+    def test_is_monotonic_tdist_linkage1(self):
+        # Tests is_monotonic(Z) on clustering generated by single linkage on
+        # tdist data set. Expecting True.
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+        assert_equal(is_monotonic(Z), True)
+
+    def test_is_monotonic_tdist_linkage2(self):
+        # Tests is_monotonic(Z) on clustering generated by single linkage on
+        # tdist data set. Perturbing. Expecting False.
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+        Z[2,2] = 0.0
+        assert_equal(is_monotonic(Z), False)
+
+    def test_is_monotonic_Q_linkage(self):
+        # Tests is_monotonic(Z) on clustering generated by single linkage on
+        # Q data set. Expecting True.
+        X = hierarchy_test_data.Q_X
+        Z = linkage(X, 'single')
+        assert_equal(is_monotonic(Z), True)
+
+
+class TestMaxDists:
+    def test_maxdists_empty_linkage(self):
+        # Tests maxdists(Z) on empty linkage. Expecting exception.
+        Z = np.zeros((0, 4), dtype=np.double)
+        assert_raises(ValueError, maxdists, Z)
+
+    def test_maxdists_one_cluster_linkage(self):
+        # Tests maxdists(Z) on linkage with one cluster.
+        Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+        MD = maxdists(Z)
+        expectedMD = calculate_maximum_distances(Z)
+        assert_allclose(MD, expectedMD, atol=1e-15)
+
+    def test_maxdists_Q_linkage(self):
+        for method in ['single', 'complete', 'ward', 'centroid', 'median']:
+            self.check_maxdists_Q_linkage(method)
+
+    def check_maxdists_Q_linkage(self, method):
+        # Tests maxdists(Z) on the Q data set
+        X = hierarchy_test_data.Q_X
+        Z = linkage(X, method)
+        MD = maxdists(Z)
+        expectedMD = calculate_maximum_distances(Z)
+        assert_allclose(MD, expectedMD, atol=1e-15)
+
+
+class TestMaxInconsts:
+    def test_maxinconsts_empty_linkage(self):
+        # Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
+        Z = np.zeros((0, 4), dtype=np.double)
+        R = np.zeros((0, 4), dtype=np.double)
+        assert_raises(ValueError, maxinconsts, Z, R)
+
+    def test_maxinconsts_difrow_linkage(self):
+        # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
+        # different numbers of clusters. Expecting exception.
+        Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+        R = np.random.rand(2, 4)
+        assert_raises(ValueError, maxinconsts, Z, R)
+
+    def test_maxinconsts_one_cluster_linkage(self):
+        # Tests maxinconsts(Z, R) on linkage with one cluster.
+        Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+        R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
+        MD = maxinconsts(Z, R)
+        expectedMD = calculate_maximum_inconsistencies(Z, R)
+        assert_allclose(MD, expectedMD, atol=1e-15)
+
+    def test_maxinconsts_Q_linkage(self):
+        for method in ['single', 'complete', 'ward', 'centroid', 'median']:
+            self.check_maxinconsts_Q_linkage(method)
+
+    def check_maxinconsts_Q_linkage(self, method):
+        # Tests maxinconsts(Z, R) on the Q data set
+        X = hierarchy_test_data.Q_X
+        Z = linkage(X, method)
+        R = inconsistent(Z)
+        MD = maxinconsts(Z, R)
+        expectedMD = calculate_maximum_inconsistencies(Z, R)
+        assert_allclose(MD, expectedMD, atol=1e-15)
+
+
+class TestMaxRStat:
+    def test_maxRstat_invalid_index(self):
+        for i in [3.3, -1, 4]:
+            self.check_maxRstat_invalid_index(i)
+
+    def check_maxRstat_invalid_index(self, i):
+        # Tests maxRstat(Z, R, i). Expecting exception.
+        Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+        R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
+        if isinstance(i, int):
+            assert_raises(ValueError, maxRstat, Z, R, i)
+        else:
+            assert_raises(TypeError, maxRstat, Z, R, i)
+
+    def test_maxRstat_empty_linkage(self):
+        for i in range(4):
+            self.check_maxRstat_empty_linkage(i)
+
+    def check_maxRstat_empty_linkage(self, i):
+        # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
+        Z = np.zeros((0, 4), dtype=np.double)
+        R = np.zeros((0, 4), dtype=np.double)
+        assert_raises(ValueError, maxRstat, Z, R, i)
+
+    def test_maxRstat_difrow_linkage(self):
+        for i in range(4):
+            self.check_maxRstat_difrow_linkage(i)
+
+    def check_maxRstat_difrow_linkage(self, i):
+        # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
+        # different numbers of clusters. Expecting exception.
+        Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+        R = np.random.rand(2, 4)
+        assert_raises(ValueError, maxRstat, Z, R, i)
+
+    def test_maxRstat_one_cluster_linkage(self):
+        for i in range(4):
+            self.check_maxRstat_one_cluster_linkage(i)
+
+    def check_maxRstat_one_cluster_linkage(self, i):
+        # Tests maxRstat(Z, R, i) on linkage with one cluster.
+        Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
+        R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
+        MD = maxRstat(Z, R, 1)
+        expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
+        assert_allclose(MD, expectedMD, atol=1e-15)
+
+    def test_maxRstat_Q_linkage(self):
+        for method in ['single', 'complete', 'ward', 'centroid', 'median']:
+            for i in range(4):
+                self.check_maxRstat_Q_linkage(method, i)
+
+    def check_maxRstat_Q_linkage(self, method, i):
+        # Tests maxRstat(Z, R, i) on the Q data set
+        X = hierarchy_test_data.Q_X
+        Z = linkage(X, method)
+        R = inconsistent(Z)
+        MD = maxRstat(Z, R, 1)
+        expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
+        assert_allclose(MD, expectedMD, atol=1e-15)
+
+
+class TestDendrogram:
+    def test_dendrogram_single_linkage_tdist(self):
+        # Tests dendrogram calculation on single linkage of the tdist data set.
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+        R = dendrogram(Z, no_plot=True)
+        leaves = R["leaves"]
+        assert_equal(leaves, [2, 5, 1, 0, 3, 4])
+
+    def test_valid_orientation(self):
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+        assert_raises(ValueError, dendrogram, Z, orientation="foo")
+
+    def test_labels_as_array_or_list(self):
+        # test for gh-12418
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+        labels = np.array([1, 3, 2, 6, 4, 5])
+        result1 = dendrogram(Z, labels=labels, no_plot=True)
+        result2 = dendrogram(Z, labels=labels.tolist(), no_plot=True)
+        assert result1 == result2
+
+    @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+    def test_valid_label_size(self):
+        link = np.array([
+            [0, 1, 1.0, 4],
+            [2, 3, 1.0, 5],
+            [4, 5, 2.0, 6],
+        ])
+        plt.figure()
+        with pytest.raises(ValueError) as exc_info:
+            dendrogram(link, labels=list(range(100)))
+        assert "Dimensions of Z and labels must be consistent."\
+               in str(exc_info.value)
+
+        with pytest.raises(
+                ValueError,
+                match="Dimensions of Z and labels must be consistent."):
+            dendrogram(link, labels=[])
+
+        plt.close()
+
+    @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+    def test_dendrogram_plot(self):
+        for orientation in ['top', 'bottom', 'left', 'right']:
+            self.check_dendrogram_plot(orientation)
+
+    def check_dendrogram_plot(self, orientation):
+        # Tests dendrogram plotting.
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+        expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
+                    'dcoord': [[0.0, 138.0, 138.0, 0.0],
+                               [0.0, 219.0, 219.0, 0.0],
+                               [0.0, 255.0, 255.0, 219.0],
+                               [0.0, 268.0, 268.0, 255.0],
+                               [138.0, 295.0, 295.0, 268.0]],
+                    'icoord': [[5.0, 5.0, 15.0, 15.0],
+                               [45.0, 45.0, 55.0, 55.0],
+                               [35.0, 35.0, 50.0, 50.0],
+                               [25.0, 25.0, 42.5, 42.5],
+                               [10.0, 10.0, 33.75, 33.75]],
+                    'ivl': ['2', '5', '1', '0', '3', '4'],
+                    'leaves': [2, 5, 1, 0, 3, 4],
+                    'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
+                    }
+
+        fig = plt.figure()
+        ax = fig.add_subplot(221)
+
+        # test that dendrogram accepts ax keyword
+        R1 = dendrogram(Z, ax=ax, orientation=orientation)
+        assert_equal(R1, expected)
+
+        # test that dendrogram accepts and handle the leaf_font_size and
+        # leaf_rotation keywords
+        dendrogram(Z, ax=ax, orientation=orientation,
+                   leaf_font_size=20, leaf_rotation=90)
+        testlabel = (
+            ax.get_xticklabels()[0]
+            if orientation in ['top', 'bottom']
+            else ax.get_yticklabels()[0]
+        )
+        assert_equal(testlabel.get_rotation(), 90)
+        assert_equal(testlabel.get_size(), 20)
+        dendrogram(Z, ax=ax, orientation=orientation,
+                   leaf_rotation=90)
+        testlabel = (
+            ax.get_xticklabels()[0]
+            if orientation in ['top', 'bottom']
+            else ax.get_yticklabels()[0]
+        )
+        assert_equal(testlabel.get_rotation(), 90)
+        dendrogram(Z, ax=ax, orientation=orientation,
+                   leaf_font_size=20)
+        testlabel = (
+            ax.get_xticklabels()[0]
+            if orientation in ['top', 'bottom']
+            else ax.get_yticklabels()[0]
+        )
+        assert_equal(testlabel.get_size(), 20)
+        plt.close()
+
+        # test plotting to gca (will import pylab)
+        R2 = dendrogram(Z, orientation=orientation)
+        plt.close()
+        assert_equal(R2, expected)
+
+    @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+    def test_dendrogram_truncate_mode(self):
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+
+        R = dendrogram(Z, 2, 'lastp', show_contracted=True)
+        plt.close()
+        assert_equal(R, {'color_list': ['C0'],
+                         'dcoord': [[0.0, 295.0, 295.0, 0.0]],
+                         'icoord': [[5.0, 5.0, 15.0, 15.0]],
+                         'ivl': ['(2)', '(4)'],
+                         'leaves': [6, 9],
+                         'leaves_color_list': ['C0', 'C0'],
+                         })
+
+        R = dendrogram(Z, 2, 'mtica', show_contracted=True)
+        plt.close()
+        assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
+                         'dcoord': [[0.0, 138.0, 138.0, 0.0],
+                                    [0.0, 255.0, 255.0, 0.0],
+                                    [0.0, 268.0, 268.0, 255.0],
+                                    [138.0, 295.0, 295.0, 268.0]],
+                         'icoord': [[5.0, 5.0, 15.0, 15.0],
+                                    [35.0, 35.0, 45.0, 45.0],
+                                    [25.0, 25.0, 40.0, 40.0],
+                                    [10.0, 10.0, 32.5, 32.5]],
+                         'ivl': ['2', '5', '1', '0', '(2)'],
+                         'leaves': [2, 5, 1, 0, 7],
+                         'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
+                         })
+
+    def test_dendrogram_colors(self):
+        # Tests dendrogram plots with alternate colors
+        Z = linkage(hierarchy_test_data.ytdist, 'single')
+
+        set_link_color_palette(['c', 'm', 'y', 'k'])
+        R = dendrogram(Z, no_plot=True,
+                       above_threshold_color='g', color_threshold=250)
+        set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
+
+        color_list = R['color_list']
+        assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
+
+        # reset color palette (global list)
+        set_link_color_palette(None)
+
+    def test_dendrogram_leaf_colors_zero_dist(self):
+        # tests that the colors of leafs are correct for tree
+        # with two identical points
+        x = np.array([[1, 0, 0],
+                      [0, 0, 1],
+                      [0, 2, 0],
+                      [0, 0, 1],
+                      [0, 1, 0],
+                      [0, 1, 0]])
+        z = linkage(x, "single")
+        d = dendrogram(z, no_plot=True)
+        exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
+        colors = d["leaves_color_list"]
+        assert_equal(colors, exp_colors)
+
+    def test_dendrogram_leaf_colors(self):
+        # tests that the colors are correct for a tree
+        # with two near points ((0, 0, 1.1) and (0, 0, 1))
+        x = np.array([[1, 0, 0],
+                      [0, 0, 1.1],
+                      [0, 2, 0],
+                      [0, 0, 1],
+                      [0, 1, 0],
+                      [0, 1, 0]])
+        z = linkage(x, "single")
+        d = dendrogram(z, no_plot=True)
+        exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
+        colors = d["leaves_color_list"]
+        assert_equal(colors, exp_colors)
+
+
+def calculate_maximum_distances(Z):
+    # Used for testing correctness of maxdists.
+    n = Z.shape[0] + 1
+    B = np.zeros((n-1,))
+    q = np.zeros((3,))
+    for i in range(0, n - 1):
+        q[:] = 0.0
+        left = Z[i, 0]
+        right = Z[i, 1]
+        if left >= n:
+            q[0] = B[int(left) - n]
+        if right >= n:
+            q[1] = B[int(right) - n]
+        q[2] = Z[i, 2]
+        B[i] = q.max()
+    return B
+
+
+def calculate_maximum_inconsistencies(Z, R, k=3):
+    # Used for testing correctness of maxinconsts.
+    n = Z.shape[0] + 1
+    B = np.zeros((n-1,))
+    q = np.zeros((3,))
+    for i in range(0, n - 1):
+        q[:] = 0.0
+        left = Z[i, 0]
+        right = Z[i, 1]
+        if left >= n:
+            q[0] = B[int(left) - n]
+        if right >= n:
+            q[1] = B[int(right) - n]
+        q[2] = R[i, k]
+        B[i] = q.max()
+    return B
+
+
+def within_tol(a, b, tol):
+    return np.abs(a - b).max() < tol
+
+
+def test_unsupported_uncondensed_distance_matrix_linkage_warning():
+    assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
+
+
+def test_euclidean_linkage_value_error():
+    for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
+        assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
+                      method=method, metric='cityblock')
+
+
+def test_2x2_linkage():
+    Z1 = linkage([1], method='single', metric='euclidean')
+    Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
+    assert_allclose(Z1, Z2)
+
+
+def test_node_compare():
+    np.random.seed(23)
+    nobs = 50
+    X = np.random.randn(nobs, 4)
+    Z = scipy.cluster.hierarchy.ward(X)
+    tree = to_tree(Z)
+    assert_(tree > tree.get_left())
+    assert_(tree.get_right() > tree.get_left())
+    assert_(tree.get_right() == tree.get_right())
+    assert_(tree.get_right() != tree.get_left())
+
+
+def test_cut_tree():
+    np.random.seed(23)
+    nobs = 50
+    X = np.random.randn(nobs, 4)
+    Z = scipy.cluster.hierarchy.ward(X)
+    cutree = cut_tree(Z)
+
+    assert_equal(cutree[:, 0], np.arange(nobs))
+    assert_equal(cutree[:, -1], np.zeros(nobs))
+    assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
+
+    assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
+    assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
+    assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
+
+    nodes = _order_cluster_tree(Z)
+    heights = np.array([node.dist for node in nodes])
+
+    assert_equal(cutree[:, np.searchsorted(heights, [5])],
+                 cut_tree(Z, height=5))
+    assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
+                 cut_tree(Z, height=[5, 10]))
+    assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
+                 cut_tree(Z, height=[10, 5]))
+
+
+def test_optimal_leaf_ordering():
+    # test with the distance vector y
+    Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist),
+                              hierarchy_test_data.ytdist)
+    expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
+    assert_allclose(Z, expectedZ, atol=1e-10)
+
+    # test with the observation matrix X
+    Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'),
+                              hierarchy_test_data.X)
+    expectedZ = hierarchy_test_data.linkage_X_ward_olo
+    assert_allclose(Z, expectedZ, atol=1e-06)
+
+
+def test_Heap():
+    values = np.array([2, -1, 0, -1.5, 3])
+    heap = Heap(values)
+
+    pair = heap.get_min()
+    assert_equal(pair['key'], 3)
+    assert_equal(pair['value'], -1.5)
+
+    heap.remove_min()
+    pair = heap.get_min()
+    assert_equal(pair['key'], 1)
+    assert_equal(pair['value'], -1)
+
+    heap.change_value(1, 2.5)
+    pair = heap.get_min()
+    assert_equal(pair['key'], 2)
+    assert_equal(pair['value'], 0)
+
+    heap.remove_min()
+    heap.remove_min()
+
+    heap.change_value(1, 10)
+    pair = heap.get_min()
+    assert_equal(pair['key'], 4)
+    assert_equal(pair['value'], 3)
+
+    heap.remove_min()
+    pair = heap.get_min()
+    assert_equal(pair['key'], 1)
+    assert_equal(pair['value'], 10)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_vq.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_vq.py
new file mode 100644
index 00000000..ab144a9c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/cluster/tests/test_vq.py
@@ -0,0 +1,336 @@
+
+import warnings
+import sys
+
+import numpy as np
+from numpy.testing import (assert_array_equal, assert_array_almost_equal,
+                           assert_allclose, assert_equal, assert_,
+                           suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
+                              ClusterError, _krandinit)
+from scipy.cluster import _vq
+from scipy.sparse._sputils import matrix
+
+
+TESTDATA_2D = np.array([
+    -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
+    -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
+    2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
+    -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
+    -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
+    -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
+    2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
+    -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
+    -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
+    -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
+    2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
+    -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
+    2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
+    -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
+    0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
+    -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
+    0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
+    3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
+    -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
+    3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
+    -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
+    -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
+    -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
+    0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
+    -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
+    3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
+    3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
+    -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
+    1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
+    4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
+    -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
+    -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
+    2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
+    -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
+    -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
+    -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
+    2.11]).reshape((200, 2))
+
+
+# Global data
+X = np.array([[3.0, 3], [4, 3], [4, 2],
+              [9, 2], [5, 1], [6, 2], [9, 4],
+              [5, 2], [5, 4], [7, 4], [6, 5]])
+
+CODET1 = np.array([[3.0000, 3.0000],
+                   [6.2000, 4.0000],
+                   [5.8000, 1.8000]])
+
+CODET2 = np.array([[11.0/3, 8.0/3],
+                   [6.7500, 4.2500],
+                   [6.2500, 1.7500]])
+
+LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
+
+
+class TestWhiten:
+    def test_whiten(self):
+        desired = np.array([[5.08738849, 2.97091878],
+                            [3.19909255, 0.69660580],
+                            [4.51041982, 0.02640918],
+                            [4.38567074, 0.95120889],
+                            [2.32191480, 1.63195503]])
+        for tp in np.array, matrix:
+            obs = tp([[0.98744510, 0.82766775],
+                      [0.62093317, 0.19406729],
+                      [0.87545741, 0.00735733],
+                      [0.85124403, 0.26499712],
+                      [0.45067590, 0.45464607]])
+            assert_allclose(whiten(obs), desired, rtol=1e-5)
+
+    def test_whiten_zero_std(self):
+        desired = np.array([[0., 1.0, 2.86666544],
+                            [0., 1.0, 1.32460034],
+                            [0., 1.0, 3.74382172]])
+        for tp in np.array, matrix:
+            obs = tp([[0., 1., 0.74109533],
+                      [0., 1., 0.34243798],
+                      [0., 1., 0.96785929]])
+            with warnings.catch_warnings(record=True) as w:
+                warnings.simplefilter('always')
+                assert_allclose(whiten(obs), desired, rtol=1e-5)
+                assert_equal(len(w), 1)
+                assert_(issubclass(w[-1].category, RuntimeWarning))
+
+    def test_whiten_not_finite(self):
+        for tp in np.array, matrix:
+            for bad_value in np.nan, np.inf, -np.inf:
+                obs = tp([[0.98744510, bad_value],
+                          [0.62093317, 0.19406729],
+                          [0.87545741, 0.00735733],
+                          [0.85124403, 0.26499712],
+                          [0.45067590, 0.45464607]])
+                assert_raises(ValueError, whiten, obs)
+
+
+class TestVq:
+    def test_py_vq(self):
+        initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+        for tp in np.array, matrix:
+            label1 = py_vq(tp(X), tp(initc))[0]
+            assert_array_equal(label1, LABEL1)
+
+    def test_vq(self):
+        initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+        for tp in np.array, matrix:
+            label1, dist = _vq.vq(tp(X), tp(initc))
+            assert_array_equal(label1, LABEL1)
+            tlabel1, tdist = vq(tp(X), tp(initc))
+
+    def test_vq_1d(self):
+        # Test special rank 1 vq algo, python implementation.
+        data = X[:, 0]
+        initc = data[:3]
+        a, b = _vq.vq(data, initc)
+        ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
+        assert_array_equal(a, ta)
+        assert_array_equal(b, tb)
+
+    def test__vq_sametype(self):
+        a = np.array([1.0, 2.0], dtype=np.float64)
+        b = a.astype(np.float32)
+        assert_raises(TypeError, _vq.vq, a, b)
+
+    def test__vq_invalid_type(self):
+        a = np.array([1, 2], dtype=int)
+        assert_raises(TypeError, _vq.vq, a, a)
+
+    def test_vq_large_nfeat(self):
+        X = np.random.rand(20, 20)
+        code_book = np.random.rand(3, 20)
+
+        codes0, dis0 = _vq.vq(X, code_book)
+        codes1, dis1 = py_vq(X, code_book)
+        assert_allclose(dis0, dis1, 1e-5)
+        assert_array_equal(codes0, codes1)
+
+        X = X.astype(np.float32)
+        code_book = code_book.astype(np.float32)
+
+        codes0, dis0 = _vq.vq(X, code_book)
+        codes1, dis1 = py_vq(X, code_book)
+        assert_allclose(dis0, dis1, 1e-5)
+        assert_array_equal(codes0, codes1)
+
+    def test_vq_large_features(self):
+        X = np.random.rand(10, 5) * 1000000
+        code_book = np.random.rand(2, 5) * 1000000
+
+        codes0, dis0 = _vq.vq(X, code_book)
+        codes1, dis1 = py_vq(X, code_book)
+        assert_allclose(dis0, dis1, 1e-5)
+        assert_array_equal(codes0, codes1)
+
+
+class TestKMean:
+    def test_large_features(self):
+        # Generate a data set with large values, and run kmeans on it to
+        # (regression for 1077).
+        d = 300
+        n = 100
+
+        m1 = np.random.randn(d)
+        m2 = np.random.randn(d)
+        x = 10000 * np.random.randn(n, d) - 20000 * m1
+        y = 10000 * np.random.randn(n, d) + 20000 * m2
+
+        data = np.empty((x.shape[0] + y.shape[0], d), np.double)
+        data[:x.shape[0]] = x
+        data[x.shape[0]:] = y
+
+        kmeans(data, 2)
+
+    def test_kmeans_simple(self):
+        np.random.seed(54321)
+        initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+        for tp in np.array, matrix:
+            code1 = kmeans(tp(X), tp(initc), iter=1)[0]
+            assert_array_almost_equal(code1, CODET2)
+
+    def test_kmeans_lost_cluster(self):
+        # This will cause kmeans to have a cluster with no points.
+        data = TESTDATA_2D
+        initk = np.array([[-1.8127404, -0.67128041],
+                         [2.04621601, 0.07401111],
+                         [-2.31149087, -0.05160469]])
+
+        kmeans(data, initk)
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       "One of the clusters is empty. Re-run kmeans with a "
+                       "different initialization")
+            kmeans2(data, initk, missing='warn')
+
+        assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
+
+    def test_kmeans2_simple(self):
+        np.random.seed(12345678)
+        initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
+        for tp in np.array, matrix:
+            code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
+            code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
+
+            assert_array_almost_equal(code1, CODET1)
+            assert_array_almost_equal(code2, CODET2)
+
+    def test_kmeans2_rank1(self):
+        data = TESTDATA_2D
+        data1 = data[:, 0]
+
+        initc = data1[:3]
+        code = initc.copy()
+        kmeans2(data1, code, iter=1)[0]
+        kmeans2(data1, code, iter=2)[0]
+
+    def test_kmeans2_rank1_2(self):
+        data = TESTDATA_2D
+        data1 = data[:, 0]
+        kmeans2(data1, 2, iter=1)
+
+    def test_kmeans2_high_dim(self):
+        # test kmeans2 when the number of dimensions exceeds the number
+        # of input points
+        data = TESTDATA_2D
+        data = data.reshape((20, 20))[:10]
+        kmeans2(data, 2)
+
+    def test_kmeans2_init(self):
+        np.random.seed(12345)
+        data = TESTDATA_2D
+
+        kmeans2(data, 3, minit='points')
+        kmeans2(data[:, :1], 3, minit='points')  # special case (1-D)
+
+        kmeans2(data, 3, minit='++')
+        kmeans2(data[:, :1], 3, minit='++')  # special case (1-D)
+
+        # minit='random' can give warnings, filter those
+        with suppress_warnings() as sup:
+            sup.filter(message="One of the clusters is empty. Re-run.")
+            kmeans2(data, 3, minit='random')
+            kmeans2(data[:, :1], 3, minit='random')  # special case (1-D)
+
+    @pytest.mark.skipif(sys.platform == 'win32',
+                        reason='Fails with MemoryError in Wine.')
+    def test_krandinit(self):
+        data = TESTDATA_2D
+        datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]]
+        k = int(1e6)
+        for data in datas:
+            # check that np.random.Generator can be used (numpy >= 1.17)
+            if hasattr(np.random, 'default_rng'):
+                rng = np.random.default_rng(1234)
+            else:
+                rng = np.random.RandomState(1234)
+
+            init = _krandinit(data, k, rng)
+            orig_cov = np.cov(data, rowvar=0)
+            init_cov = np.cov(init, rowvar=0)
+            assert_allclose(orig_cov, init_cov, atol=1e-2)
+
+    def test_kmeans2_empty(self):
+        # Regression test for gh-1032.
+        assert_raises(ValueError, kmeans2, [], 2)
+
+    def test_kmeans_0k(self):
+        # Regression test for gh-1073: fail when k arg is 0.
+        assert_raises(ValueError, kmeans, X, 0)
+        assert_raises(ValueError, kmeans2, X, 0)
+        assert_raises(ValueError, kmeans2, X, np.array([]))
+
+    def test_kmeans_large_thres(self):
+        # Regression test for gh-1774
+        x = np.array([1, 2, 3, 4, 10], dtype=float)
+        res = kmeans(x, 1, thresh=1e16)
+        assert_allclose(res[0], np.array([4.]))
+        assert_allclose(res[1], 2.3999999999999999)
+
+    def test_kmeans2_kpp_low_dim(self):
+        # Regression test for gh-11462
+        prev_res = np.array([[-1.95266667, 0.898],
+                             [-3.153375, 3.3945]])
+        np.random.seed(42)
+        res, _ = kmeans2(TESTDATA_2D, 2, minit='++')
+        assert_allclose(res, prev_res)
+
+    def test_kmeans2_kpp_high_dim(self):
+        # Regression test for gh-11462
+        n_dim = 100
+        size = 10
+        centers = np.vstack([5 * np.ones(n_dim),
+                             -5 * np.ones(n_dim)])
+        np.random.seed(42)
+        data = np.vstack([
+            np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
+            np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
+        ])
+        res, _ = kmeans2(data, 2, minit='++')
+        assert_array_almost_equal(res, centers, decimal=0)
+
+    def test_kmeans_and_kmeans2_random_seed(self):
+
+        seed_list = [1234, np.random.RandomState(1234)]
+
+        # check that np.random.Generator can be used (numpy >= 1.17)
+        if hasattr(np.random, 'default_rng'):
+            seed_list.append(np.random.default_rng(1234))
+
+        for seed in seed_list:
+            # test for kmeans
+            res1, _ = kmeans(TESTDATA_2D, 2, seed=seed)
+            res2, _ = kmeans(TESTDATA_2D, 2, seed=seed)
+            assert_allclose(res1, res1)  # should be same results
+
+            # test for kmeans2
+            for minit in ["random", "points", "++"]:
+                res1, _ = kmeans2(TESTDATA_2D, 2, minit=minit, seed=seed)
+                res2, _ = kmeans2(TESTDATA_2D, 2, minit=minit, seed=seed)
+                assert_allclose(res1, res1)  # should be same results
diff --git a/__packaged__/coreml/.python_dependencies/scipy/cluster/vq.py b/__packaged__/coreml/.python_dependencies/scipy/cluster/vq.py
new file mode 100644
index 00000000..9a329e88
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/cluster/vq.py
@@ -0,0 +1,795 @@
+"""
+K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
+====================================================================
+
+Provides routines for k-means clustering, generating code books
+from k-means models and quantizing vectors by comparing them with
+centroids in a code book.
+
+.. autosummary::
+   :toctree: generated/
+
+   whiten -- Normalize a group of observations so each feature has unit variance
+   vq -- Calculate code book membership of a set of observation vectors
+   kmeans -- Perform k-means on a set of observation vectors forming k clusters
+   kmeans2 -- A different implementation of k-means with more methods
+           -- for initializing centroids
+
+Background information
+----------------------
+The k-means algorithm takes as input the number of clusters to
+generate, k, and a set of observation vectors to cluster. It
+returns a set of centroids, one for each of the k clusters. An
+observation vector is classified with the cluster number or
+centroid index of the centroid closest to it.
+
+A vector v belongs to cluster i if it is closer to centroid i than
+any other centroid. If v belongs to i, we say centroid i is the
+dominating centroid of v. The k-means algorithm tries to
+minimize distortion, which is defined as the sum of the squared distances
+between each observation vector and its dominating centroid.
+The minimization is achieved by iteratively reclassifying
+the observations into clusters and recalculating the centroids until
+a configuration is reached in which the centroids are stable. One can
+also define a maximum number of iterations.
+
+Since vector quantization is a natural application for k-means,
+information theory terminology is often used. The centroid index
+or cluster index is also referred to as a "code" and the table
+mapping codes to centroids and, vice versa, is often referred to as a
+"code book". The result of k-means, a set of centroids, can be
+used to quantize vectors. Quantization aims to find an encoding of
+vectors that reduces the expected distortion.
+
+All routines expect obs to be an M by N array, where the rows are
+the observation vectors. The codebook is a k by N array, where the
+ith row is the centroid of code word i. The observation vectors
+and centroids have the same feature dimension.
+
+As an example, suppose we wish to compress a 24-bit color image
+(each pixel is represented by one byte for red, one for blue, and
+one for green) before sending it over the web. By using a smaller
+8-bit encoding, we can reduce the amount of data by two
+thirds. Ideally, the colors for each of the 256 possible 8-bit
+encoding values should be chosen to minimize distortion of the
+color. Running k-means with k=256 generates a code book of 256
+codes, which fills up all possible 8-bit sequences. Instead of
+sending a 3-byte value for each pixel, the 8-bit centroid index
+(or code word) of the dominating centroid is transmitted. The code
+book is also sent over the wire so each 8-bit code can be
+translated back to a 24-bit pixel value representation. If the
+image of interest was of an ocean, we would expect many 24-bit
+blues to be represented by 8-bit codes. If it was an image of a
+human face, more flesh-tone colors would be represented in the
+code book.
+
+"""
+import warnings
+import numpy as np
+from collections import deque
+from scipy._lib._util import _asarray_validated, check_random_state,\
+    rng_integers
+from scipy.spatial.distance import cdist
+
+from . import _vq
+
+__docformat__ = 'restructuredtext'
+
+__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
+
+
+class ClusterError(Exception):
+    pass
+
+
+def whiten(obs, check_finite=True):
+    """
+    Normalize a group of observations on a per feature basis.
+
+    Before running k-means, it is beneficial to rescale each feature
+    dimension of the observation set by its standard deviation (i.e. "whiten"
+    it - as in "white noise" where each frequency has equal power).
+    Each feature is divided by its standard deviation across all observations
+    to give it unit variance.
+
+    Parameters
+    ----------
+    obs : ndarray
+        Each row of the array is an observation.  The
+        columns are the features seen during each observation.
+
+        >>> #         f0    f1    f2
+        >>> obs = [[  1.,   1.,   1.],  #o0
+        ...        [  2.,   2.,   2.],  #o1
+        ...        [  3.,   3.,   3.],  #o2
+        ...        [  4.,   4.,   4.]]  #o3
+
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default: True
+
+    Returns
+    -------
+    result : ndarray
+        Contains the values in `obs` scaled by the standard deviation
+        of each column.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster.vq import whiten
+    >>> features  = np.array([[1.9, 2.3, 1.7],
+    ...                       [1.5, 2.5, 2.2],
+    ...                       [0.8, 0.6, 1.7,]])
+    >>> whiten(features)
+    array([[ 4.17944278,  2.69811351,  7.21248917],
+           [ 3.29956009,  2.93273208,  9.33380951],
+           [ 1.75976538,  0.7038557 ,  7.21248917]])
+
+    """
+    obs = _asarray_validated(obs, check_finite=check_finite)
+    std_dev = obs.std(axis=0)
+    zero_std_mask = std_dev == 0
+    if zero_std_mask.any():
+        std_dev[zero_std_mask] = 1.0
+        warnings.warn("Some columns have standard deviation zero. "
+                      "The values of these columns will not change.",
+                      RuntimeWarning)
+    return obs / std_dev
+
+
+def vq(obs, code_book, check_finite=True):
+    """
+    Assign codes from a code book to observations.
+
+    Assigns a code from a code book to each observation. Each
+    observation vector in the 'M' by 'N' `obs` array is compared with the
+    centroids in the code book and assigned the code of the closest
+    centroid.
+
+    The features in `obs` should have unit variance, which can be
+    achieved by passing them through the whiten function. The code
+    book can be created with the k-means algorithm or a different
+    encoding algorithm.
+
+    Parameters
+    ----------
+    obs : ndarray
+        Each row of the 'M' x 'N' array is an observation. The columns are
+        the "features" seen during each observation. The features must be
+        whitened first using the whiten function or something equivalent.
+    code_book : ndarray
+        The code book is usually generated using the k-means algorithm.
+        Each row of the array holds a different code, and the columns are
+        the features of the code.
+
+         >>> #              f0    f1    f2   f3
+         >>> code_book = [
+         ...             [  1.,   2.,   3.,   4.],  #c0
+         ...             [  1.,   2.,   3.,   4.],  #c1
+         ...             [  1.,   2.,   3.,   4.]]  #c2
+
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default: True
+
+    Returns
+    -------
+    code : ndarray
+        A length M array holding the code book index for each observation.
+    dist : ndarray
+        The distortion (distance) between the observation and its nearest
+        code.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster.vq import vq
+    >>> code_book = np.array([[1.,1.,1.],
+    ...                       [2.,2.,2.]])
+    >>> features  = np.array([[  1.9,2.3,1.7],
+    ...                       [  1.5,2.5,2.2],
+    ...                       [  0.8,0.6,1.7]])
+    >>> vq(features,code_book)
+    (array([1, 1, 0],'i'), array([ 0.43588989,  0.73484692,  0.83066239]))
+
+    """
+    obs = _asarray_validated(obs, check_finite=check_finite)
+    code_book = _asarray_validated(code_book, check_finite=check_finite)
+    ct = np.common_type(obs, code_book)
+
+    c_obs = obs.astype(ct, copy=False)
+    c_code_book = code_book.astype(ct, copy=False)
+
+    if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32):
+        return _vq.vq(c_obs, c_code_book)
+    return py_vq(obs, code_book, check_finite=False)
+
+
+def py_vq(obs, code_book, check_finite=True):
+    """ Python version of vq algorithm.
+
+    The algorithm computes the Euclidean distance between each
+    observation and every frame in the code_book.
+
+    Parameters
+    ----------
+    obs : ndarray
+        Expects a rank 2 array. Each row is one observation.
+    code_book : ndarray
+        Code book to use. Same format than obs. Should have same number of
+        features (e.g., columns) than obs.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default: True
+
+    Returns
+    -------
+    code : ndarray
+        code[i] gives the label of the ith obversation; its code is
+        code_book[code[i]].
+    mind_dist : ndarray
+        min_dist[i] gives the distance between the ith observation and its
+        corresponding code.
+
+    Notes
+    -----
+    This function is slower than the C version but works for
+    all input types. If the inputs have the wrong types for the
+    C versions of the function, this one is called as a last resort.
+
+    It is about 20 times slower than the C version.
+
+    """
+    obs = _asarray_validated(obs, check_finite=check_finite)
+    code_book = _asarray_validated(code_book, check_finite=check_finite)
+
+    if obs.ndim != code_book.ndim:
+        raise ValueError("Observation and code_book should have the same rank")
+
+    if obs.ndim == 1:
+        obs = obs[:, np.newaxis]
+        code_book = code_book[:, np.newaxis]
+
+    dist = cdist(obs, code_book)
+    code = dist.argmin(axis=1)
+    min_dist = dist[np.arange(len(code)), code]
+    return code, min_dist
+
+
+def _kmeans(obs, guess, thresh=1e-5):
+    """ "raw" version of k-means.
+
+    Returns
+    -------
+    code_book
+        The lowest distortion codebook found.
+    avg_dist
+        The average distance a observation is from a code in the book.
+        Lower means the code_book matches the data better.
+
+    See Also
+    --------
+    kmeans : wrapper around k-means
+
+    Examples
+    --------
+    Note: not whitened in this example.
+
+    >>> import numpy as np
+    >>> from scipy.cluster.vq import _kmeans
+    >>> features  = np.array([[ 1.9,2.3],
+    ...                       [ 1.5,2.5],
+    ...                       [ 0.8,0.6],
+    ...                       [ 0.4,1.8],
+    ...                       [ 1.0,1.0]])
+    >>> book = np.array((features[0],features[2]))
+    >>> _kmeans(features,book)
+    (array([[ 1.7       ,  2.4       ],
+           [ 0.73333333,  1.13333333]]), 0.40563916697728591)
+
+    """
+
+    code_book = np.asarray(guess)
+    diff = np.inf
+    prev_avg_dists = deque([diff], maxlen=2)
+    while diff > thresh:
+        # compute membership and distances between obs and code_book
+        obs_code, distort = vq(obs, code_book, check_finite=False)
+        prev_avg_dists.append(distort.mean(axis=-1))
+        # recalc code_book as centroids of associated obs
+        code_book, has_members = _vq.update_cluster_means(obs, obs_code,
+                                                          code_book.shape[0])
+        code_book = code_book[has_members]
+        diff = prev_avg_dists[0] - prev_avg_dists[1]
+
+    return code_book, prev_avg_dists[1]
+
+
+def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
+           *, seed=None):
+    """
+    Performs k-means on a set of observation vectors forming k clusters.
+
+    The k-means algorithm adjusts the classification of the observations
+    into clusters and updates the cluster centroids until the position of
+    the centroids is stable over successive iterations. In this
+    implementation of the algorithm, the stability of the centroids is
+    determined by comparing the absolute value of the change in the average
+    Euclidean distance between the observations and their corresponding
+    centroids against a threshold. This yields
+    a code book mapping centroids to codes and vice versa.
+
+    Parameters
+    ----------
+    obs : ndarray
+       Each row of the M by N array is an observation vector. The
+       columns are the features seen during each observation.
+       The features must be whitened first with the `whiten` function.
+
+    k_or_guess : int or ndarray
+       The number of centroids to generate. A code is assigned to
+       each centroid, which is also the row index of the centroid
+       in the code_book matrix generated.
+
+       The initial k centroids are chosen by randomly selecting
+       observations from the observation matrix. Alternatively,
+       passing a k by N array specifies the initial k centroids.
+
+    iter : int, optional
+       The number of times to run k-means, returning the codebook
+       with the lowest distortion. This argument is ignored if
+       initial centroids are specified with an array for the
+       ``k_or_guess`` parameter. This parameter does not represent the
+       number of iterations of the k-means algorithm.
+
+    thresh : float, optional
+       Terminates the k-means algorithm if the change in
+       distortion since the last k-means iteration is less than
+       or equal to threshold.
+
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default: True
+
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        Seed for initializing the pseudo-random number generator.
+        If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        The default is None.
+
+    Returns
+    -------
+    codebook : ndarray
+       A k by N array of k centroids. The ith centroid
+       codebook[i] is represented with the code i. The centroids
+       and codes generated represent the lowest distortion seen,
+       not necessarily the globally minimal distortion.
+       Note that the number of centroids is not necessarily the same as the
+       ``k_or_guess`` parameter, because centroids assigned to no observations
+       are removed during iterations.
+
+    distortion : float
+       The mean (non-squared) Euclidean distance between the observations
+       passed and the centroids generated. Note the difference to the standard
+       definition of distortion in the context of the k-means algorithm, which
+       is the sum of the squared distances.
+
+    See Also
+    --------
+    kmeans2 : a different implementation of k-means clustering
+       with more methods for generating initial centroids but without
+       using a distortion change threshold as a stopping criterion.
+
+    whiten : must be called prior to passing an observation matrix
+       to kmeans.
+
+    Notes
+    -----
+    For more functionalities or optimal performance, you can use
+    `sklearn.cluster.KMeans `_.
+    `This `_
+    is a benchmark result of several implementations.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.cluster.vq import vq, kmeans, whiten
+    >>> import matplotlib.pyplot as plt
+    >>> features  = np.array([[ 1.9,2.3],
+    ...                       [ 1.5,2.5],
+    ...                       [ 0.8,0.6],
+    ...                       [ 0.4,1.8],
+    ...                       [ 0.1,0.1],
+    ...                       [ 0.2,1.8],
+    ...                       [ 2.0,0.5],
+    ...                       [ 0.3,1.5],
+    ...                       [ 1.0,1.0]])
+    >>> whitened = whiten(features)
+    >>> book = np.array((whitened[0],whitened[2]))
+    >>> kmeans(whitened,book)
+    (array([[ 2.3110306 ,  2.86287398],    # random
+           [ 0.93218041,  1.24398691]]), 0.85684700941625547)
+
+    >>> codes = 3
+    >>> kmeans(whitened,codes)
+    (array([[ 2.3110306 ,  2.86287398],    # random
+           [ 1.32544402,  0.65607529],
+           [ 0.40782893,  2.02786907]]), 0.5196582527686241)
+
+    >>> # Create 50 datapoints in two clusters a and b
+    >>> pts = 50
+    >>> rng = np.random.default_rng()
+    >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
+    >>> b = rng.multivariate_normal([30, 10],
+    ...                             [[10, 2], [2, 1]],
+    ...                             size=pts)
+    >>> features = np.concatenate((a, b))
+    >>> # Whiten data
+    >>> whitened = whiten(features)
+    >>> # Find 2 clusters in the data
+    >>> codebook, distortion = kmeans(whitened, 2)
+    >>> # Plot whitened data and cluster centers in red
+    >>> plt.scatter(whitened[:, 0], whitened[:, 1])
+    >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
+    >>> plt.show()
+
+    """
+    obs = _asarray_validated(obs, check_finite=check_finite)
+    if iter < 1:
+        raise ValueError("iter must be at least 1, got %s" % iter)
+
+    # Determine whether a count (scalar) or an initial guess (array) was passed.
+    if not np.isscalar(k_or_guess):
+        guess = _asarray_validated(k_or_guess, check_finite=check_finite)
+        if guess.size < 1:
+            raise ValueError("Asked for 0 clusters. Initial book was %s" %
+                             guess)
+        return _kmeans(obs, guess, thresh=thresh)
+
+    # k_or_guess is a scalar, now verify that it's an integer
+    k = int(k_or_guess)
+    if k != k_or_guess:
+        raise ValueError("If k_or_guess is a scalar, it must be an integer.")
+    if k < 1:
+        raise ValueError("Asked for %d clusters." % k)
+
+    rng = check_random_state(seed)
+
+    # initialize best distance value to a large value
+    best_dist = np.inf
+    for i in range(iter):
+        # the initial code book is randomly selected from observations
+        guess = _kpoints(obs, k, rng)
+        book, dist = _kmeans(obs, guess, thresh=thresh)
+        if dist < best_dist:
+            best_book = book
+            best_dist = dist
+    return best_book, best_dist
+
+
+def _kpoints(data, k, rng):
+    """Pick k points at random in data (one row = one observation).
+
+    Parameters
+    ----------
+    data : ndarray
+        Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
+        dimensional data, rank 2 multidimensional data, in which case one
+        row is one observation.
+    k : int
+        Number of samples to generate.
+    rng : `numpy.random.Generator` or `numpy.random.RandomState`
+        Random number generator.
+
+    Returns
+    -------
+    x : ndarray
+        A 'k' by 'N' containing the initial centroids
+
+    """
+    idx = rng.choice(data.shape[0], size=k, replace=False)
+    return data[idx]
+
+
+def _krandinit(data, k, rng):
+    """Returns k samples of a random variable whose parameters depend on data.
+
+    More precisely, it returns k observations sampled from a Gaussian random
+    variable whose mean and covariances are the ones estimated from the data.
+
+    Parameters
+    ----------
+    data : ndarray
+        Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
+        data, rank 2 multidimensional data, in which case one
+        row is one observation.
+    k : int
+        Number of samples to generate.
+    rng : `numpy.random.Generator` or `numpy.random.RandomState`
+        Random number generator.
+
+    Returns
+    -------
+    x : ndarray
+        A 'k' by 'N' containing the initial centroids
+
+    """
+    mu = data.mean(axis=0)
+
+    if data.ndim == 1:
+        cov = np.cov(data)
+        x = rng.standard_normal(size=k)
+        x *= np.sqrt(cov)
+    elif data.shape[1] > data.shape[0]:
+        # initialize when the covariance matrix is rank deficient
+        _, s, vh = np.linalg.svd(data - mu, full_matrices=False)
+        x = rng.standard_normal(size=(k, s.size))
+        sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
+        x = x.dot(sVh)
+    else:
+        cov = np.atleast_2d(np.cov(data, rowvar=False))
+
+        # k rows, d cols (one row = one obs)
+        # Generate k sample of a random variable ~ Gaussian(mu, cov)
+        x = rng.standard_normal(size=(k, mu.size))
+        x = x.dot(np.linalg.cholesky(cov).T)
+
+    x += mu
+    return x
+
+
+def _kpp(data, k, rng):
+    """ Picks k points in the data based on the kmeans++ method.
+
+    Parameters
+    ----------
+    data : ndarray
+        Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
+        data, rank 2 multidimensional data, in which case one
+        row is one observation.
+    k : int
+        Number of samples to generate.
+    rng : `numpy.random.Generator` or `numpy.random.RandomState`
+        Random number generator.
+
+    Returns
+    -------
+    init : ndarray
+        A 'k' by 'N' containing the initial centroids.
+
+    References
+    ----------
+    .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
+       careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
+       on Discrete Algorithms, 2007.
+    """
+
+    dims = data.shape[1] if len(data.shape) > 1 else 1
+    init = np.ndarray((k, dims))
+
+    for i in range(k):
+        if i == 0:
+            init[i, :] = data[rng_integers(rng, data.shape[0])]
+
+        else:
+            D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
+            probs = D2/D2.sum()
+            cumprobs = probs.cumsum()
+            r = rng.uniform()
+            init[i, :] = data[np.searchsorted(cumprobs, r)]
+
+    return init
+
+
+_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
+
+
+def _missing_warn():
+    """Print a warning when called."""
+    warnings.warn("One of the clusters is empty. "
+                  "Re-run kmeans with a different initialization.")
+
+
+def _missing_raise():
+    """Raise a ClusterError when called."""
+    raise ClusterError("One of the clusters is empty. "
+                       "Re-run kmeans with a different initialization.")
+
+
+_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
+
+
+def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
+            missing='warn', check_finite=True, *, seed=None):
+    """
+    Classify a set of observations into k clusters using the k-means algorithm.
+
+    The algorithm attempts to minimize the Euclidean distance between
+    observations and centroids. Several initialization methods are
+    included.
+
+    Parameters
+    ----------
+    data : ndarray
+        A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
+        'M' array of 'M' 1-D observations.
+    k : int or ndarray
+        The number of clusters to form as well as the number of
+        centroids to generate. If `minit` initialization string is
+        'matrix', or if a ndarray is given instead, it is
+        interpreted as initial cluster to use instead.
+    iter : int, optional
+        Number of iterations of the k-means algorithm to run. Note
+        that this differs in meaning from the iters parameter to
+        the kmeans function.
+    thresh : float, optional
+        (not used yet)
+    minit : str, optional
+        Method for initialization. Available methods are 'random',
+        'points', '++' and 'matrix':
+
+        'random': generate k centroids from a Gaussian with mean and
+        variance estimated from the data.
+
+        'points': choose k observations (rows) at random from data for
+        the initial centroids.
+
+        '++': choose k observations accordingly to the kmeans++ method
+        (careful seeding)
+
+        'matrix': interpret the k parameter as a k by M (or length k
+        array for 1-D data) array of initial centroids.
+    missing : str, optional
+        Method to deal with empty clusters. Available methods are
+        'warn' and 'raise':
+
+        'warn': give a warning and continue.
+
+        'raise': raise an ClusterError and terminate the algorithm.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default: True
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        Seed for initializing the pseudo-random number generator.
+        If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        The default is None.
+
+    Returns
+    -------
+    centroid : ndarray
+        A 'k' by 'N' array of centroids found at the last iteration of
+        k-means.
+    label : ndarray
+        label[i] is the code or index of the centroid the
+        ith observation is closest to.
+
+    See Also
+    --------
+    kmeans
+
+    References
+    ----------
+    .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
+       careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
+       on Discrete Algorithms, 2007.
+
+    Examples
+    --------
+    >>> from scipy.cluster.vq import kmeans2
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    Create z, an array with shape (100, 2) containing a mixture of samples
+    from three multivariate normal distributions.
+
+    >>> rng = np.random.default_rng()
+    >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
+    >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
+    >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
+    >>> z = np.concatenate((a, b, c))
+    >>> rng.shuffle(z)
+
+    Compute three clusters.
+
+    >>> centroid, label = kmeans2(z, 3, minit='points')
+    >>> centroid
+    array([[ 2.22274463, -0.61666946],  # may vary
+           [ 0.54069047,  5.86541444],
+           [ 6.73846769,  4.01991898]])
+
+    How many points are in each cluster?
+
+    >>> counts = np.bincount(label)
+    >>> counts
+    array([29, 51, 20])  # may vary
+
+    Plot the clusters.
+
+    >>> w0 = z[label == 0]
+    >>> w1 = z[label == 1]
+    >>> w2 = z[label == 2]
+    >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
+    >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
+    >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
+    >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
+    >>> plt.axis('equal')
+    >>> plt.legend(shadow=True)
+    >>> plt.show()
+
+    """
+    if int(iter) < 1:
+        raise ValueError("Invalid iter (%s), "
+                         "must be a positive integer." % iter)
+    try:
+        miss_meth = _valid_miss_meth[missing]
+    except KeyError as e:
+        raise ValueError("Unknown missing method %r" % (missing,)) from e
+
+    data = _asarray_validated(data, check_finite=check_finite)
+    if data.ndim == 1:
+        d = 1
+    elif data.ndim == 2:
+        d = data.shape[1]
+    else:
+        raise ValueError("Input of rank > 2 is not supported.")
+
+    if data.size < 1:
+        raise ValueError("Empty input is not supported.")
+
+    # If k is not a single value, it should be compatible with data's shape
+    if minit == 'matrix' or not np.isscalar(k):
+        code_book = np.array(k, copy=True)
+        if data.ndim != code_book.ndim:
+            raise ValueError("k array doesn't match data rank")
+        nc = len(code_book)
+        if data.ndim > 1 and code_book.shape[1] != d:
+            raise ValueError("k array doesn't match data dimension")
+    else:
+        nc = int(k)
+
+        if nc < 1:
+            raise ValueError("Cannot ask kmeans2 for %d clusters"
+                             " (k was %s)" % (nc, k))
+        elif nc != k:
+            warnings.warn("k was not an integer, was converted.")
+
+        try:
+            init_meth = _valid_init_meth[minit]
+        except KeyError as e:
+            raise ValueError("Unknown init method %r" % (minit,)) from e
+        else:
+            rng = check_random_state(seed)
+            code_book = init_meth(data, k, rng)
+
+    for i in range(iter):
+        # Compute the nearest neighbor for each obs using the current code book
+        label = vq(data, code_book)[0]
+        # Update the code book by computing centroids
+        new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
+        if not has_members.all():
+            miss_meth()
+            # Set the empty clusters to their previous positions
+            new_code_book[~has_members] = code_book[~has_members]
+        code_book = new_code_book
+
+    return code_book, label
diff --git a/__packaged__/coreml/.python_dependencies/scipy/conftest.py b/__packaged__/coreml/.python_dependencies/scipy/conftest.py
new file mode 100644
index 00000000..0563e94e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/conftest.py
@@ -0,0 +1,95 @@
+# Pytest customization
+import os
+import pytest
+import warnings
+
+import numpy as np
+import numpy.testing as npt
+from scipy._lib._fpumode import get_fpu_mode
+from scipy._lib._testutils import FPUModeChangeWarning
+from scipy._lib import _pep440
+
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "slow: Tests that are very slow.")
+    config.addinivalue_line("markers",
+        "xslow: mark test as extremely slow (not run unless explicitly requested)")
+    config.addinivalue_line("markers",
+        "xfail_on_32bit: mark test as failing on 32-bit platforms")
+    try:
+        import pytest_timeout  # noqa:F401
+    except Exception:
+        config.addinivalue_line(
+            "markers", 'timeout: mark a test for a non-default timeout')
+
+
+def _get_mark(item, name):
+    if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"):
+        mark = item.get_closest_marker(name)
+    else:
+        mark = item.get_marker(name)
+    return mark
+
+
+def pytest_runtest_setup(item):
+    mark = _get_mark(item, "xslow")
+    if mark is not None:
+        try:
+            v = int(os.environ.get('SCIPY_XSLOW', '0'))
+        except ValueError:
+            v = False
+        if not v:
+            pytest.skip("very slow test; set environment variable SCIPY_XSLOW=1 to run it")
+    mark = _get_mark(item, 'xfail_on_32bit')
+    if mark is not None and np.intp(0).itemsize < 8:
+        pytest.xfail('Fails on our 32-bit test platform(s): %s' % (mark.args[0],))
+
+    # Older versions of threadpoolctl have an issue that may lead to this
+    # warning being emitted, see gh-14441
+    with npt.suppress_warnings() as sup:
+        sup.filter(pytest.PytestUnraisableExceptionWarning)
+
+        try:
+            from threadpoolctl import threadpool_limits
+
+            HAS_THREADPOOLCTL = True
+        except Exception:  # observed in gh-14441: (ImportError, AttributeError)
+            # Optional dependency only. All exceptions are caught, for robustness
+            HAS_THREADPOOLCTL = False
+
+        if HAS_THREADPOOLCTL:
+            # Set the number of openmp threads based on the number of workers
+            # xdist is using to prevent oversubscription. Simplified version of what
+            # sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper
+            # functions)
+            try:
+                xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT'])
+            except KeyError:
+                # raises when pytest-xdist is not installed
+                return
+
+            if not os.getenv('OMP_NUM_THREADS'):
+                max_openmp_threads = os.cpu_count() // 2  # use nr of physical cores
+                threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1)
+                try:
+                    threadpool_limits(threads_per_worker, user_api='blas')
+                except Exception:
+                    # May raise AttributeError for older versions of OpenBLAS.
+                    # Catch any error for robustness.
+                    return
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+    """
+    Check FPU mode was not changed during the test.
+    """
+    old_mode = get_fpu_mode()
+    yield
+    new_mode = get_fpu_mode()
+
+    if old_mode != new_mode:
+        warnings.warn("FPU mode changed from {0:#x} to {1:#x} during "
+                      "the test".format(old_mode, new_mode),
+                      category=FPUModeChangeWarning, stacklevel=0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/constants/__init__.py
new file mode 100644
index 00000000..437929c5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/constants/__init__.py
@@ -0,0 +1,343 @@
+r"""
+==================================
+Constants (:mod:`scipy.constants`)
+==================================
+
+.. currentmodule:: scipy.constants
+
+Physical and mathematical constants and units.
+
+
+Mathematical constants
+======================
+
+================  =================================================================
+``pi``            Pi
+``golden``        Golden ratio
+``golden_ratio``  Golden ratio
+================  =================================================================
+
+
+Physical constants
+==================
+
+===========================  =================================================================
+``c``                        speed of light in vacuum
+``speed_of_light``           speed of light in vacuum
+``mu_0``                     the magnetic constant :math:`\mu_0`
+``epsilon_0``                the electric constant (vacuum permittivity), :math:`\epsilon_0`
+``h``                        the Planck constant :math:`h`
+``Planck``                   the Planck constant :math:`h`
+``hbar``                     :math:`\hbar = h/(2\pi)`
+``G``                        Newtonian constant of gravitation
+``gravitational_constant``   Newtonian constant of gravitation
+``g``                        standard acceleration of gravity
+``e``                        elementary charge
+``elementary_charge``        elementary charge
+``R``                        molar gas constant
+``gas_constant``             molar gas constant
+``alpha``                    fine-structure constant
+``fine_structure``           fine-structure constant
+``N_A``                      Avogadro constant
+``Avogadro``                 Avogadro constant
+``k``                        Boltzmann constant
+``Boltzmann``                Boltzmann constant
+``sigma``                    Stefan-Boltzmann constant :math:`\sigma`
+``Stefan_Boltzmann``         Stefan-Boltzmann constant :math:`\sigma`
+``Wien``                     Wien displacement law constant
+``Rydberg``                  Rydberg constant
+``m_e``                      electron mass
+``electron_mass``            electron mass
+``m_p``                      proton mass
+``proton_mass``              proton mass
+``m_n``                      neutron mass
+``neutron_mass``             neutron mass
+===========================  =================================================================
+
+
+Constants database
+------------------
+
+In addition to the above variables, :mod:`scipy.constants` also contains the
+2018 CODATA recommended values [CODATA2018]_ database containing more physical
+constants.
+
+.. autosummary::
+   :toctree: generated/
+
+   value      -- Value in physical_constants indexed by key
+   unit       -- Unit in physical_constants indexed by key
+   precision  -- Relative precision in physical_constants indexed by key
+   find       -- Return list of physical_constant keys with a given string
+   ConstantWarning -- Constant sought not in newest CODATA data set
+
+.. data:: physical_constants
+
+   Dictionary of physical constants, of the format
+   ``physical_constants[name] = (value, unit, uncertainty)``.
+
+Available constants:
+
+======================================================================  ====
+%(constant_names)s
+======================================================================  ====
+
+
+Units
+=====
+
+SI prefixes
+-----------
+
+============  =================================================================
+``yotta``     :math:`10^{24}`
+``zetta``     :math:`10^{21}`
+``exa``       :math:`10^{18}`
+``peta``      :math:`10^{15}`
+``tera``      :math:`10^{12}`
+``giga``      :math:`10^{9}`
+``mega``      :math:`10^{6}`
+``kilo``      :math:`10^{3}`
+``hecto``     :math:`10^{2}`
+``deka``      :math:`10^{1}`
+``deci``      :math:`10^{-1}`
+``centi``     :math:`10^{-2}`
+``milli``     :math:`10^{-3}`
+``micro``     :math:`10^{-6}`
+``nano``      :math:`10^{-9}`
+``pico``      :math:`10^{-12}`
+``femto``     :math:`10^{-15}`
+``atto``      :math:`10^{-18}`
+``zepto``     :math:`10^{-21}`
+``yocto``     :math:`10^{-24}`
+============  =================================================================
+
+Binary prefixes
+---------------
+
+============  =================================================================
+``kibi``      :math:`2^{10}`
+``mebi``      :math:`2^{20}`
+``gibi``      :math:`2^{30}`
+``tebi``      :math:`2^{40}`
+``pebi``      :math:`2^{50}`
+``exbi``      :math:`2^{60}`
+``zebi``      :math:`2^{70}`
+``yobi``      :math:`2^{80}`
+============  =================================================================
+
+Mass
+----
+
+=================  ============================================================
+``gram``           :math:`10^{-3}` kg
+``metric_ton``     :math:`10^{3}` kg
+``grain``          one grain in kg
+``lb``             one pound (avoirdupous) in kg
+``pound``          one pound (avoirdupous) in kg
+``blob``           one inch version of a slug in kg (added in 1.0.0)
+``slinch``         one inch version of a slug in kg (added in 1.0.0)
+``slug``           one slug in kg (added in 1.0.0)
+``oz``             one ounce in kg
+``ounce``          one ounce in kg
+``stone``          one stone in kg
+``grain``          one grain in kg
+``long_ton``       one long ton in kg
+``short_ton``      one short ton in kg
+``troy_ounce``     one Troy ounce in kg
+``troy_pound``     one Troy pound in kg
+``carat``          one carat in kg
+``m_u``            atomic mass constant (in kg)
+``u``              atomic mass constant (in kg)
+``atomic_mass``    atomic mass constant (in kg)
+=================  ============================================================
+
+Angle
+-----
+
+=================  ============================================================
+``degree``         degree in radians
+``arcmin``         arc minute in radians
+``arcminute``      arc minute in radians
+``arcsec``         arc second in radians
+``arcsecond``      arc second in radians
+=================  ============================================================
+
+
+Time
+----
+
+=================  ============================================================
+``minute``         one minute in seconds
+``hour``           one hour in seconds
+``day``            one day in seconds
+``week``           one week in seconds
+``year``           one year (365 days) in seconds
+``Julian_year``    one Julian year (365.25 days) in seconds
+=================  ============================================================
+
+
+Length
+------
+
+=====================  ============================================================
+``inch``               one inch in meters
+``foot``               one foot in meters
+``yard``               one yard in meters
+``mile``               one mile in meters
+``mil``                one mil in meters
+``pt``                 one point in meters
+``point``              one point in meters
+``survey_foot``        one survey foot in meters
+``survey_mile``        one survey mile in meters
+``nautical_mile``      one nautical mile in meters
+``fermi``              one Fermi in meters
+``angstrom``           one Angstrom in meters
+``micron``             one micron in meters
+``au``                 one astronomical unit in meters
+``astronomical_unit``  one astronomical unit in meters
+``light_year``         one light year in meters
+``parsec``             one parsec in meters
+=====================  ============================================================
+
+Pressure
+--------
+
+=================  ============================================================
+``atm``            standard atmosphere in pascals
+``atmosphere``     standard atmosphere in pascals
+``bar``            one bar in pascals
+``torr``           one torr (mmHg) in pascals
+``mmHg``           one torr (mmHg) in pascals
+``psi``            one psi in pascals
+=================  ============================================================
+
+Area
+----
+
+=================  ============================================================
+``hectare``        one hectare in square meters
+``acre``           one acre in square meters
+=================  ============================================================
+
+
+Volume
+------
+
+===================    ========================================================
+``liter``              one liter in cubic meters
+``litre``              one liter in cubic meters
+``gallon``             one gallon (US) in cubic meters
+``gallon_US``          one gallon (US) in cubic meters
+``gallon_imp``         one gallon (UK) in cubic meters
+``fluid_ounce``        one fluid ounce (US) in cubic meters
+``fluid_ounce_US``     one fluid ounce (US) in cubic meters
+``fluid_ounce_imp``    one fluid ounce (UK) in cubic meters
+``bbl``                one barrel in cubic meters
+``barrel``             one barrel in cubic meters
+===================    ========================================================
+
+Speed
+-----
+
+==================    ==========================================================
+``kmh``               kilometers per hour in meters per second
+``mph``               miles per hour in meters per second
+``mach``              one Mach (approx., at 15 C, 1 atm) in meters per second
+``speed_of_sound``    one Mach (approx., at 15 C, 1 atm) in meters per second
+``knot``              one knot in meters per second
+==================    ==========================================================
+
+
+Temperature
+-----------
+
+=====================  =======================================================
+``zero_Celsius``       zero of Celsius scale in Kelvin
+``degree_Fahrenheit``  one Fahrenheit (only differences) in Kelvins
+=====================  =======================================================
+
+.. autosummary::
+   :toctree: generated/
+
+   convert_temperature
+
+Energy
+------
+
+====================  =======================================================
+``eV``                one electron volt in Joules
+``electron_volt``     one electron volt in Joules
+``calorie``           one calorie (thermochemical) in Joules
+``calorie_th``        one calorie (thermochemical) in Joules
+``calorie_IT``        one calorie (International Steam Table calorie, 1956) in Joules
+``erg``               one erg in Joules
+``Btu``               one British thermal unit (International Steam Table) in Joules
+``Btu_IT``            one British thermal unit (International Steam Table) in Joules
+``Btu_th``            one British thermal unit (thermochemical) in Joules
+``ton_TNT``           one ton of TNT in Joules
+====================  =======================================================
+
+Power
+-----
+
+====================  =======================================================
+``hp``                one horsepower in watts
+``horsepower``        one horsepower in watts
+====================  =======================================================
+
+Force
+-----
+
+====================  =======================================================
+``dyn``               one dyne in newtons
+``dyne``              one dyne in newtons
+``lbf``               one pound force in newtons
+``pound_force``       one pound force in newtons
+``kgf``               one kilogram force in newtons
+``kilogram_force``    one kilogram force in newtons
+====================  =======================================================
+
+Optics
+------
+
+.. autosummary::
+   :toctree: generated/
+
+   lambda2nu
+   nu2lambda
+
+References
+==========
+
+.. [CODATA2018] CODATA Recommended Values of the Fundamental
+   Physical Constants 2018.
+
+   https://physics.nist.gov/cuu/Constants/
+
+"""
+# Modules contributed by BasSw (wegwerp@gmail.com)
+from ._codata import *
+from ._constants import *
+from ._codata import _obsolete_constants, physical_constants
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import codata, constants
+
+_constant_names_list = [(_k.lower(), _k, _v)
+                        for _k, _v in physical_constants.items()
+                        if _k not in _obsolete_constants]
+_constant_names = "\n".join(["``%s``%s  %s %s" % (_x[1], " "*(66-len(_x[1])),
+                                                  _x[2][0], _x[2][1])
+                             for _x in sorted(_constant_names_list)])
+if __doc__:
+    __doc__ = __doc__ % dict(constant_names=_constant_names)
+
+del _constant_names
+del _constant_names_list
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/_codata.py b/__packaged__/coreml/.python_dependencies/scipy/constants/_codata.py
new file mode 100644
index 00000000..34f619a3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/constants/_codata.py
@@ -0,0 +1,1756 @@
+# Compiled by Charles Harris, dated October 3, 2002
+# updated to 2002 values by BasSw, 2006
+# Updated to 2006 values by Vincent Davis June 2010
+# Updated to 2014 values by Joseph Booker, 2015
+# Updated to 2018 values by Jakob Jakobson, 2019
+
+from __future__ import annotations
+
+"""
+Fundamental Physical Constants
+------------------------------
+
+These constants are taken from CODATA Recommended Values of the Fundamental
+Physical Constants 2018.
+
+Object
+------
+physical_constants : dict
+    A dictionary containing physical constants. Keys are the names of physical
+    constants, values are tuples (value, units, precision).
+
+Functions
+---------
+value(key):
+    Returns the value of the physical constant(key).
+unit(key):
+    Returns the units of the physical constant(key).
+precision(key):
+    Returns the relative precision of the physical constant(key).
+find(sub):
+    Prints or returns list of keys containing the string sub, default is all.
+
+Source
+------
+The values of the constants provided at this site are recommended for
+international use by CODATA and are the latest available. Termed the "2018
+CODATA recommended values," they are generally recognized worldwide for use in
+all fields of science and technology. The values became available on 20 May
+2019 and replaced the 2014 CODATA set. Also available is an introduction to the
+constants for non-experts at
+
+https://physics.nist.gov/cuu/Constants/introduction.html
+
+References
+----------
+Theoretical and experimental publications relevant to the fundamental constants
+and closely related precision measurements published since the mid 1980s, but
+also including many older papers of particular interest, some of which date
+back to the 1800s. To search the bibliography, visit
+
+https://physics.nist.gov/cuu/Constants/
+
+"""
+
+import warnings
+from math import pi, sqrt
+
+from typing import Any
+
+__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find',
+           'ConstantWarning']
+
+"""
+Source:  https://physics.nist.gov/cuu/Constants/
+
+The values of the constants provided at this site are recommended for
+international use by CODATA and are the latest available. Termed the "2018
+CODATA recommended values," they are generally recognized worldwide for use in
+all fields of science and technology. The values became available on 20 May
+2019 and replaced the 2014 CODATA set.
+"""
+
+#
+# Source:  https://physics.nist.gov/cuu/Constants/
+#
+
+# Quantity                                             Value                 Uncertainty          Unit
+# ---------------------------------------------------- --------------------- -------------------- -------------
+txt2002 = """\
+Wien displacement law constant                         2.897 7685e-3         0.000 0051e-3         m K
+atomic unit of 1st hyperpolarizablity                  3.206 361 51e-53      0.000 000 28e-53      C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizablity                  6.235 3808e-65        0.000 0011e-65        C^4 m^4 J^-3
+atomic unit of electric dipole moment                  8.478 353 09e-30      0.000 000 73e-30      C m
+atomic unit of electric polarizablity                  1.648 777 274e-41     0.000 000 016e-41     C^2 m^2 J^-1
+atomic unit of electric quadrupole moment              4.486 551 24e-40      0.000 000 39e-40      C m^2
+atomic unit of magn. dipole moment                     1.854 801 90e-23      0.000 000 16e-23      J T^-1
+atomic unit of magn. flux density                      2.350 517 42e5        0.000 000 20e5        T
+deuteron magn. moment                                  0.433 073 482e-26     0.000 000 038e-26     J T^-1
+deuteron magn. moment to Bohr magneton ratio           0.466 975 4567e-3     0.000 000 0050e-3
+deuteron magn. moment to nuclear magneton ratio        0.857 438 2329        0.000 000 0092
+deuteron-electron magn. moment ratio                   -4.664 345 548e-4     0.000 000 050e-4
+deuteron-proton magn. moment ratio                     0.307 012 2084        0.000 000 0045
+deuteron-neutron magn. moment ratio                    -0.448 206 52         0.000 000 11
+electron gyromagn. ratio                               1.760 859 74e11       0.000 000 15e11       s^-1 T^-1
+electron gyromagn. ratio over 2 pi                     28 024.9532           0.0024                MHz T^-1
+electron magn. moment                                  -928.476 412e-26      0.000 080e-26         J T^-1
+electron magn. moment to Bohr magneton ratio           -1.001 159 652 1859   0.000 000 000 0038
+electron magn. moment to nuclear magneton ratio        -1838.281 971 07      0.000 000 85
+electron magn. moment anomaly                          1.159 652 1859e-3     0.000 000 0038e-3
+electron to shielded proton magn. moment ratio         -658.227 5956         0.000 0071
+electron to shielded helion magn. moment ratio         864.058 255           0.000 010
+electron-deuteron magn. moment ratio                   -2143.923 493         0.000 023
+electron-muon magn. moment ratio                       206.766 9894          0.000 0054
+electron-neutron magn. moment ratio                    960.920 50            0.000 23
+electron-proton magn. moment ratio                     -658.210 6862         0.000 0066
+magn. constant                                         12.566 370 614...e-7  0                     N A^-2
+magn. flux quantum                                     2.067 833 72e-15      0.000 000 18e-15      Wb
+muon magn. moment                                      -4.490 447 99e-26     0.000 000 40e-26      J T^-1
+muon magn. moment to Bohr magneton ratio               -4.841 970 45e-3      0.000 000 13e-3
+muon magn. moment to nuclear magneton ratio            -8.890 596 98         0.000 000 23
+muon-proton magn. moment ratio                         -3.183 345 118        0.000 000 089
+neutron gyromagn. ratio                                1.832 471 83e8        0.000 000 46e8        s^-1 T^-1
+neutron gyromagn. ratio over 2 pi                      29.164 6950           0.000 0073            MHz T^-1
+neutron magn. moment                                   -0.966 236 45e-26     0.000 000 24e-26      J T^-1
+neutron magn. moment to Bohr magneton ratio            -1.041 875 63e-3      0.000 000 25e-3
+neutron magn. moment to nuclear magneton ratio         -1.913 042 73         0.000 000 45
+neutron to shielded proton magn. moment ratio          -0.684 996 94         0.000 000 16
+neutron-electron magn. moment ratio                    1.040 668 82e-3       0.000 000 25e-3
+neutron-proton magn. moment ratio                      -0.684 979 34         0.000 000 16
+proton gyromagn. ratio                                 2.675 222 05e8        0.000 000 23e8        s^-1 T^-1
+proton gyromagn. ratio over 2 pi                       42.577 4813           0.000 0037            MHz T^-1
+proton magn. moment                                    1.410 606 71e-26      0.000 000 12e-26      J T^-1
+proton magn. moment to Bohr magneton ratio             1.521 032 206e-3      0.000 000 015e-3
+proton magn. moment to nuclear magneton ratio          2.792 847 351         0.000 000 028
+proton magn. shielding correction                      25.689e-6             0.015e-6
+proton-neutron magn. moment ratio                      -1.459 898 05         0.000 000 34
+shielded helion gyromagn. ratio                        2.037 894 70e8        0.000 000 18e8        s^-1 T^-1
+shielded helion gyromagn. ratio over 2 pi              32.434 1015           0.000 0028            MHz T^-1
+shielded helion magn. moment                           -1.074 553 024e-26    0.000 000 093e-26     J T^-1
+shielded helion magn. moment to Bohr magneton ratio    -1.158 671 474e-3     0.000 000 014e-3
+shielded helion magn. moment to nuclear magneton ratio -2.127 497 723        0.000 000 025
+shielded helion to proton magn. moment ratio           -0.761 766 562        0.000 000 012
+shielded helion to shielded proton magn. moment ratio  -0.761 786 1313       0.000 000 0033
+shielded helion gyromagn. ratio                        2.037 894 70e8        0.000 000 18e8        s^-1 T^-1
+shielded helion gyromagn. ratio over 2 pi              32.434 1015           0.000 0028            MHz T^-1
+shielded proton magn. moment                           1.410 570 47e-26      0.000 000 12e-26      J T^-1
+shielded proton magn. moment to Bohr magneton ratio    1.520 993 132e-3      0.000 000 016e-3
+shielded proton magn. moment to nuclear magneton ratio 2.792 775 604         0.000 000 030
+{220} lattice spacing of silicon                       192.015 5965e-12      0.000 0070e-12        m"""
+
+txt2006 = """\
+lattice spacing of silicon                             192.015 5762 e-12     0.000 0050 e-12       m
+alpha particle-electron mass ratio                     7294.299 5365         0.000 0031
+alpha particle mass                                    6.644 656 20 e-27     0.000 000 33 e-27     kg
+alpha particle mass energy equivalent                  5.971 919 17 e-10     0.000 000 30 e-10     J
+alpha particle mass energy equivalent in MeV           3727.379 109          0.000 093             MeV
+alpha particle mass in u                               4.001 506 179 127     0.000 000 000 062     u
+alpha particle molar mass                              4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1
+alpha particle-proton mass ratio                       3.972 599 689 51      0.000 000 000 41
+Angstrom star                                          1.000 014 98 e-10     0.000 000 90 e-10     m
+atomic mass constant                                   1.660 538 782 e-27    0.000 000 083 e-27    kg
+atomic mass constant energy equivalent                 1.492 417 830 e-10    0.000 000 074 e-10    J
+atomic mass constant energy equivalent in MeV          931.494 028           0.000 023             MeV
+atomic mass unit-electron volt relationship            931.494 028 e6        0.000 023 e6          eV
+atomic mass unit-hartree relationship                  3.423 177 7149 e7     0.000 000 0049 e7     E_h
+atomic mass unit-hertz relationship                    2.252 342 7369 e23    0.000 000 0032 e23    Hz
+atomic mass unit-inverse meter relationship            7.513 006 671 e14     0.000 000 011 e14     m^-1
+atomic mass unit-joule relationship                    1.492 417 830 e-10    0.000 000 074 e-10    J
+atomic mass unit-kelvin relationship                   1.080 9527 e13        0.000 0019 e13        K
+atomic mass unit-kilogram relationship                 1.660 538 782 e-27    0.000 000 083 e-27    kg
+atomic unit of 1st hyperpolarizability                 3.206 361 533 e-53    0.000 000 081 e-53    C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability                 6.235 380 95 e-65     0.000 000 31 e-65     C^4 m^4 J^-3
+atomic unit of action                                  1.054 571 628 e-34    0.000 000 053 e-34    J s
+atomic unit of charge                                  1.602 176 487 e-19    0.000 000 040 e-19    C
+atomic unit of charge density                          1.081 202 300 e12     0.000 000 027 e12     C m^-3
+atomic unit of current                                 6.623 617 63 e-3      0.000 000 17 e-3      A
+atomic unit of electric dipole mom.                    8.478 352 81 e-30     0.000 000 21 e-30     C m
+atomic unit of electric field                          5.142 206 32 e11      0.000 000 13 e11      V m^-1
+atomic unit of electric field gradient                 9.717 361 66 e21      0.000 000 24 e21      V m^-2
+atomic unit of electric polarizability                 1.648 777 2536 e-41   0.000 000 0034 e-41   C^2 m^2 J^-1
+atomic unit of electric potential                      27.211 383 86         0.000 000 68          V
+atomic unit of electric quadrupole mom.                4.486 551 07 e-40     0.000 000 11 e-40     C m^2
+atomic unit of energy                                  4.359 743 94 e-18     0.000 000 22 e-18     J
+atomic unit of force                                   8.238 722 06 e-8      0.000 000 41 e-8      N
+atomic unit of length                                  0.529 177 208 59 e-10 0.000 000 000 36 e-10 m
+atomic unit of mag. dipole mom.                        1.854 801 830 e-23    0.000 000 046 e-23    J T^-1
+atomic unit of mag. flux density                       2.350 517 382 e5      0.000 000 059 e5      T
+atomic unit of magnetizability                         7.891 036 433 e-29    0.000 000 027 e-29    J T^-2
+atomic unit of mass                                    9.109 382 15 e-31     0.000 000 45 e-31     kg
+atomic unit of momentum                                1.992 851 565 e-24    0.000 000 099 e-24    kg m s^-1
+atomic unit of permittivity                            1.112 650 056... e-10 (exact)               F m^-1
+atomic unit of time                                    2.418 884 326 505 e-17 0.000 000 000 016 e-17 s
+atomic unit of velocity                                2.187 691 2541 e6     0.000 000 0015 e6     m s^-1
+Avogadro constant                                      6.022 141 79 e23      0.000 000 30 e23      mol^-1
+Bohr magneton                                          927.400 915 e-26      0.000 023 e-26        J T^-1
+Bohr magneton in eV/T                                  5.788 381 7555 e-5    0.000 000 0079 e-5    eV T^-1
+Bohr magneton in Hz/T                                  13.996 246 04 e9      0.000 000 35 e9       Hz T^-1
+Bohr magneton in inverse meters per tesla              46.686 4515           0.000 0012            m^-1 T^-1
+Bohr magneton in K/T                                   0.671 7131            0.000 0012            K T^-1
+Bohr radius                                            0.529 177 208 59 e-10 0.000 000 000 36 e-10 m
+Boltzmann constant                                     1.380 6504 e-23       0.000 0024 e-23       J K^-1
+Boltzmann constant in eV/K                             8.617 343 e-5         0.000 015 e-5         eV K^-1
+Boltzmann constant in Hz/K                             2.083 6644 e10        0.000 0036 e10        Hz K^-1
+Boltzmann constant in inverse meters per kelvin        69.503 56             0.000 12              m^-1 K^-1
+characteristic impedance of vacuum                     376.730 313 461...    (exact)               ohm
+classical electron radius                              2.817 940 2894 e-15   0.000 000 0058 e-15   m
+Compton wavelength                                     2.426 310 2175 e-12   0.000 000 0033 e-12   m
+Compton wavelength over 2 pi                           386.159 264 59 e-15   0.000 000 53 e-15     m
+conductance quantum                                    7.748 091 7004 e-5    0.000 000 0053 e-5    S
+conventional value of Josephson constant               483 597.9 e9          (exact)               Hz V^-1
+conventional value of von Klitzing constant            25 812.807            (exact)               ohm
+Cu x unit                                              1.002 076 99 e-13     0.000 000 28 e-13     m
+deuteron-electron mag. mom. ratio                      -4.664 345 537 e-4    0.000 000 039 e-4
+deuteron-electron mass ratio                           3670.482 9654         0.000 0016
+deuteron g factor                                      0.857 438 2308        0.000 000 0072
+deuteron mag. mom.                                     0.433 073 465 e-26    0.000 000 011 e-26    J T^-1
+deuteron mag. mom. to Bohr magneton ratio              0.466 975 4556 e-3    0.000 000 0039 e-3
+deuteron mag. mom. to nuclear magneton ratio           0.857 438 2308        0.000 000 0072
+deuteron mass                                          3.343 583 20 e-27     0.000 000 17 e-27     kg
+deuteron mass energy equivalent                        3.005 062 72 e-10     0.000 000 15 e-10     J
+deuteron mass energy equivalent in MeV                 1875.612 793          0.000 047             MeV
+deuteron mass in u                                     2.013 553 212 724     0.000 000 000 078     u
+deuteron molar mass                                    2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1
+deuteron-neutron mag. mom. ratio                       -0.448 206 52         0.000 000 11
+deuteron-proton mag. mom. ratio                        0.307 012 2070        0.000 000 0024
+deuteron-proton mass ratio                             1.999 007 501 08      0.000 000 000 22
+deuteron rms charge radius                             2.1402 e-15           0.0028 e-15           m
+electric constant                                      8.854 187 817... e-12 (exact)               F m^-1
+electron charge to mass quotient                       -1.758 820 150 e11    0.000 000 044 e11     C kg^-1
+electron-deuteron mag. mom. ratio                      -2143.923 498         0.000 018
+electron-deuteron mass ratio                           2.724 437 1093 e-4    0.000 000 0012 e-4
+electron g factor                                      -2.002 319 304 3622   0.000 000 000 0015
+electron gyromag. ratio                                1.760 859 770 e11     0.000 000 044 e11     s^-1 T^-1
+electron gyromag. ratio over 2 pi                      28 024.953 64         0.000 70              MHz T^-1
+electron mag. mom.                                     -928.476 377 e-26     0.000 023 e-26        J T^-1
+electron mag. mom. anomaly                             1.159 652 181 11 e-3  0.000 000 000 74 e-3
+electron mag. mom. to Bohr magneton ratio              -1.001 159 652 181 11 0.000 000 000 000 74
+electron mag. mom. to nuclear magneton ratio           -1838.281 970 92      0.000 000 80
+electron mass                                          9.109 382 15 e-31     0.000 000 45 e-31     kg
+electron mass energy equivalent                        8.187 104 38 e-14     0.000 000 41 e-14     J
+electron mass energy equivalent in MeV                 0.510 998 910         0.000 000 013         MeV
+electron mass in u                                     5.485 799 0943 e-4    0.000 000 0023 e-4    u
+electron molar mass                                    5.485 799 0943 e-7    0.000 000 0023 e-7    kg mol^-1
+electron-muon mag. mom. ratio                          206.766 9877          0.000 0052
+electron-muon mass ratio                               4.836 331 71 e-3      0.000 000 12 e-3
+electron-neutron mag. mom. ratio                       960.920 50            0.000 23
+electron-neutron mass ratio                            5.438 673 4459 e-4    0.000 000 0033 e-4
+electron-proton mag. mom. ratio                        -658.210 6848         0.000 0054
+electron-proton mass ratio                             5.446 170 2177 e-4    0.000 000 0024 e-4
+electron-tau mass ratio                                2.875 64 e-4          0.000 47 e-4
+electron to alpha particle mass ratio                  1.370 933 555 70 e-4  0.000 000 000 58 e-4
+electron to shielded helion mag. mom. ratio            864.058 257           0.000 010
+electron to shielded proton mag. mom. ratio            -658.227 5971         0.000 0072
+electron volt                                          1.602 176 487 e-19    0.000 000 040 e-19    J
+electron volt-atomic mass unit relationship            1.073 544 188 e-9     0.000 000 027 e-9     u
+electron volt-hartree relationship                     3.674 932 540 e-2     0.000 000 092 e-2     E_h
+electron volt-hertz relationship                       2.417 989 454 e14     0.000 000 060 e14     Hz
+electron volt-inverse meter relationship               8.065 544 65 e5       0.000 000 20 e5       m^-1
+electron volt-joule relationship                       1.602 176 487 e-19    0.000 000 040 e-19    J
+electron volt-kelvin relationship                      1.160 4505 e4         0.000 0020 e4         K
+electron volt-kilogram relationship                    1.782 661 758 e-36    0.000 000 044 e-36    kg
+elementary charge                                      1.602 176 487 e-19    0.000 000 040 e-19    C
+elementary charge over h                               2.417 989 454 e14     0.000 000 060 e14     A J^-1
+Faraday constant                                       96 485.3399           0.0024                C mol^-1
+Faraday constant for conventional electric current     96 485.3401           0.0048                C_90 mol^-1
+Fermi coupling constant                                1.166 37 e-5          0.000 01 e-5          GeV^-2
+fine-structure constant                                7.297 352 5376 e-3    0.000 000 0050 e-3
+first radiation constant                               3.741 771 18 e-16     0.000 000 19 e-16     W m^2
+first radiation constant for spectral radiance         1.191 042 759 e-16    0.000 000 059 e-16    W m^2 sr^-1
+hartree-atomic mass unit relationship                  2.921 262 2986 e-8    0.000 000 0042 e-8    u
+hartree-electron volt relationship                     27.211 383 86         0.000 000 68          eV
+Hartree energy                                         4.359 743 94 e-18     0.000 000 22 e-18     J
+Hartree energy in eV                                   27.211 383 86         0.000 000 68          eV
+hartree-hertz relationship                             6.579 683 920 722 e15 0.000 000 000 044 e15 Hz
+hartree-inverse meter relationship                     2.194 746 313 705 e7  0.000 000 000 015 e7  m^-1
+hartree-joule relationship                             4.359 743 94 e-18     0.000 000 22 e-18     J
+hartree-kelvin relationship                            3.157 7465 e5         0.000 0055 e5         K
+hartree-kilogram relationship                          4.850 869 34 e-35     0.000 000 24 e-35     kg
+helion-electron mass ratio                             5495.885 2765         0.000 0052
+helion mass                                            5.006 411 92 e-27     0.000 000 25 e-27     kg
+helion mass energy equivalent                          4.499 538 64 e-10     0.000 000 22 e-10     J
+helion mass energy equivalent in MeV                   2808.391 383          0.000 070             MeV
+helion mass in u                                       3.014 932 2473        0.000 000 0026        u
+helion molar mass                                      3.014 932 2473 e-3    0.000 000 0026 e-3    kg mol^-1
+helion-proton mass ratio                               2.993 152 6713        0.000 000 0026
+hertz-atomic mass unit relationship                    4.439 821 6294 e-24   0.000 000 0064 e-24   u
+hertz-electron volt relationship                       4.135 667 33 e-15     0.000 000 10 e-15     eV
+hertz-hartree relationship                             1.519 829 846 006 e-16 0.000 000 000010e-16 E_h
+hertz-inverse meter relationship                       3.335 640 951... e-9  (exact)               m^-1
+hertz-joule relationship                               6.626 068 96 e-34     0.000 000 33 e-34     J
+hertz-kelvin relationship                              4.799 2374 e-11       0.000 0084 e-11       K
+hertz-kilogram relationship                            7.372 496 00 e-51     0.000 000 37 e-51     kg
+inverse fine-structure constant                        137.035 999 679       0.000 000 094
+inverse meter-atomic mass unit relationship            1.331 025 0394 e-15   0.000 000 0019 e-15   u
+inverse meter-electron volt relationship               1.239 841 875 e-6     0.000 000 031 e-6     eV
+inverse meter-hartree relationship                     4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h
+inverse meter-hertz relationship                       299 792 458           (exact)               Hz
+inverse meter-joule relationship                       1.986 445 501 e-25    0.000 000 099 e-25    J
+inverse meter-kelvin relationship                      1.438 7752 e-2        0.000 0025 e-2        K
+inverse meter-kilogram relationship                    2.210 218 70 e-42     0.000 000 11 e-42     kg
+inverse of conductance quantum                         12 906.403 7787       0.000 0088            ohm
+Josephson constant                                     483 597.891 e9        0.012 e9              Hz V^-1
+joule-atomic mass unit relationship                    6.700 536 41 e9       0.000 000 33 e9       u
+joule-electron volt relationship                       6.241 509 65 e18      0.000 000 16 e18      eV
+joule-hartree relationship                             2.293 712 69 e17      0.000 000 11 e17      E_h
+joule-hertz relationship                               1.509 190 450 e33     0.000 000 075 e33     Hz
+joule-inverse meter relationship                       5.034 117 47 e24      0.000 000 25 e24      m^-1
+joule-kelvin relationship                              7.242 963 e22         0.000 013 e22         K
+joule-kilogram relationship                            1.112 650 056... e-17 (exact)               kg
+kelvin-atomic mass unit relationship                   9.251 098 e-14        0.000 016 e-14        u
+kelvin-electron volt relationship                      8.617 343 e-5         0.000 015 e-5         eV
+kelvin-hartree relationship                            3.166 8153 e-6        0.000 0055 e-6        E_h
+kelvin-hertz relationship                              2.083 6644 e10        0.000 0036 e10        Hz
+kelvin-inverse meter relationship                      69.503 56             0.000 12              m^-1
+kelvin-joule relationship                              1.380 6504 e-23       0.000 0024 e-23       J
+kelvin-kilogram relationship                           1.536 1807 e-40       0.000 0027 e-40       kg
+kilogram-atomic mass unit relationship                 6.022 141 79 e26      0.000 000 30 e26      u
+kilogram-electron volt relationship                    5.609 589 12 e35      0.000 000 14 e35      eV
+kilogram-hartree relationship                          2.061 486 16 e34      0.000 000 10 e34      E_h
+kilogram-hertz relationship                            1.356 392 733 e50     0.000 000 068 e50     Hz
+kilogram-inverse meter relationship                    4.524 439 15 e41      0.000 000 23 e41      m^-1
+kilogram-joule relationship                            8.987 551 787... e16  (exact)               J
+kilogram-kelvin relationship                           6.509 651 e39         0.000 011 e39         K
+lattice parameter of silicon                           543.102 064 e-12      0.000 014 e-12        m
+Loschmidt constant (273.15 K, 101.325 kPa)             2.686 7774 e25        0.000 0047 e25        m^-3
+mag. constant                                          12.566 370 614... e-7 (exact)               N A^-2
+mag. flux quantum                                      2.067 833 667 e-15    0.000 000 052 e-15    Wb
+molar gas constant                                     8.314 472             0.000 015             J mol^-1 K^-1
+molar mass constant                                    1 e-3                 (exact)               kg mol^-1
+molar mass of carbon-12                                12 e-3                (exact)               kg mol^-1
+molar Planck constant                                  3.990 312 6821 e-10   0.000 000 0057 e-10   J s mol^-1
+molar Planck constant times c                          0.119 626 564 72      0.000 000 000 17      J m mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa)          22.710 981 e-3        0.000 040 e-3         m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa)      22.413 996 e-3        0.000 039 e-3         m^3 mol^-1
+molar volume of silicon                                12.058 8349 e-6       0.000 0011 e-6        m^3 mol^-1
+Mo x unit                                              1.002 099 55 e-13     0.000 000 53 e-13     m
+muon Compton wavelength                                11.734 441 04 e-15    0.000 000 30 e-15     m
+muon Compton wavelength over 2 pi                      1.867 594 295 e-15    0.000 000 047 e-15    m
+muon-electron mass ratio                               206.768 2823          0.000 0052
+muon g factor                                          -2.002 331 8414       0.000 000 0012
+muon mag. mom.                                         -4.490 447 86 e-26    0.000 000 16 e-26     J T^-1
+muon mag. mom. anomaly                                 1.165 920 69 e-3      0.000 000 60 e-3
+muon mag. mom. to Bohr magneton ratio                  -4.841 970 49 e-3     0.000 000 12 e-3
+muon mag. mom. to nuclear magneton ratio               -8.890 597 05         0.000 000 23
+muon mass                                              1.883 531 30 e-28     0.000 000 11 e-28     kg
+muon mass energy equivalent                            1.692 833 510 e-11    0.000 000 095 e-11    J
+muon mass energy equivalent in MeV                     105.658 3668          0.000 0038            MeV
+muon mass in u                                         0.113 428 9256        0.000 000 0029        u
+muon molar mass                                        0.113 428 9256 e-3    0.000 000 0029 e-3    kg mol^-1
+muon-neutron mass ratio                                0.112 454 5167        0.000 000 0029
+muon-proton mag. mom. ratio                            -3.183 345 137        0.000 000 085
+muon-proton mass ratio                                 0.112 609 5261        0.000 000 0029
+muon-tau mass ratio                                    5.945 92 e-2          0.000 97 e-2
+natural unit of action                                 1.054 571 628 e-34    0.000 000 053 e-34    J s
+natural unit of action in eV s                         6.582 118 99 e-16     0.000 000 16 e-16     eV s
+natural unit of energy                                 8.187 104 38 e-14     0.000 000 41 e-14     J
+natural unit of energy in MeV                          0.510 998 910         0.000 000 013         MeV
+natural unit of length                                 386.159 264 59 e-15   0.000 000 53 e-15     m
+natural unit of mass                                   9.109 382 15 e-31     0.000 000 45 e-31     kg
+natural unit of momentum                               2.730 924 06 e-22     0.000 000 14 e-22     kg m s^-1
+natural unit of momentum in MeV/c                      0.510 998 910         0.000 000 013         MeV/c
+natural unit of time                                   1.288 088 6570 e-21   0.000 000 0018 e-21   s
+natural unit of velocity                               299 792 458           (exact)               m s^-1
+neutron Compton wavelength                             1.319 590 8951 e-15   0.000 000 0020 e-15   m
+neutron Compton wavelength over 2 pi                   0.210 019 413 82 e-15 0.000 000 000 31 e-15 m
+neutron-electron mag. mom. ratio                       1.040 668 82 e-3      0.000 000 25 e-3
+neutron-electron mass ratio                            1838.683 6605         0.000 0011
+neutron g factor                                       -3.826 085 45         0.000 000 90
+neutron gyromag. ratio                                 1.832 471 85 e8       0.000 000 43 e8       s^-1 T^-1
+neutron gyromag. ratio over 2 pi                       29.164 6954           0.000 0069            MHz T^-1
+neutron mag. mom.                                      -0.966 236 41 e-26    0.000 000 23 e-26     J T^-1
+neutron mag. mom. to Bohr magneton ratio               -1.041 875 63 e-3     0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio            -1.913 042 73         0.000 000 45
+neutron mass                                           1.674 927 211 e-27    0.000 000 084 e-27    kg
+neutron mass energy equivalent                         1.505 349 505 e-10    0.000 000 075 e-10    J
+neutron mass energy equivalent in MeV                  939.565 346           0.000 023             MeV
+neutron mass in u                                      1.008 664 915 97      0.000 000 000 43      u
+neutron molar mass                                     1.008 664 915 97 e-3  0.000 000 000 43 e-3  kg mol^-1
+neutron-muon mass ratio                                8.892 484 09          0.000 000 23
+neutron-proton mag. mom. ratio                         -0.684 979 34         0.000 000 16
+neutron-proton mass ratio                              1.001 378 419 18      0.000 000 000 46
+neutron-tau mass ratio                                 0.528 740             0.000 086
+neutron to shielded proton mag. mom. ratio             -0.684 996 94         0.000 000 16
+Newtonian constant of gravitation                      6.674 28 e-11         0.000 67 e-11         m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c         6.708 81 e-39         0.000 67 e-39         (GeV/c^2)^-2
+nuclear magneton                                       5.050 783 24 e-27     0.000 000 13 e-27     J T^-1
+nuclear magneton in eV/T                               3.152 451 2326 e-8    0.000 000 0045 e-8    eV T^-1
+nuclear magneton in inverse meters per tesla           2.542 623 616 e-2     0.000 000 064 e-2     m^-1 T^-1
+nuclear magneton in K/T                                3.658 2637 e-4        0.000 0064 e-4        K T^-1
+nuclear magneton in MHz/T                              7.622 593 84          0.000 000 19          MHz T^-1
+Planck constant                                        6.626 068 96 e-34     0.000 000 33 e-34     J s
+Planck constant in eV s                                4.135 667 33 e-15     0.000 000 10 e-15     eV s
+Planck constant over 2 pi                              1.054 571 628 e-34    0.000 000 053 e-34    J s
+Planck constant over 2 pi in eV s                      6.582 118 99 e-16     0.000 000 16 e-16     eV s
+Planck constant over 2 pi times c in MeV fm            197.326 9631          0.000 0049            MeV fm
+Planck length                                          1.616 252 e-35        0.000 081 e-35        m
+Planck mass                                            2.176 44 e-8          0.000 11 e-8          kg
+Planck mass energy equivalent in GeV                   1.220 892 e19         0.000 061 e19         GeV
+Planck temperature                                     1.416 785 e32         0.000 071 e32         K
+Planck time                                            5.391 24 e-44         0.000 27 e-44         s
+proton charge to mass quotient                         9.578 833 92 e7       0.000 000 24 e7       C kg^-1
+proton Compton wavelength                              1.321 409 8446 e-15   0.000 000 0019 e-15   m
+proton Compton wavelength over 2 pi                    0.210 308 908 61 e-15 0.000 000 000 30 e-15 m
+proton-electron mass ratio                             1836.152 672 47       0.000 000 80
+proton g factor                                        5.585 694 713         0.000 000 046
+proton gyromag. ratio                                  2.675 222 099 e8      0.000 000 070 e8      s^-1 T^-1
+proton gyromag. ratio over 2 pi                        42.577 4821           0.000 0011            MHz T^-1
+proton mag. mom.                                       1.410 606 662 e-26    0.000 000 037 e-26    J T^-1
+proton mag. mom. to Bohr magneton ratio                1.521 032 209 e-3     0.000 000 012 e-3
+proton mag. mom. to nuclear magneton ratio             2.792 847 356         0.000 000 023
+proton mag. shielding correction                       25.694 e-6            0.014 e-6
+proton mass                                            1.672 621 637 e-27    0.000 000 083 e-27    kg
+proton mass energy equivalent                          1.503 277 359 e-10    0.000 000 075 e-10    J
+proton mass energy equivalent in MeV                   938.272 013           0.000 023             MeV
+proton mass in u                                       1.007 276 466 77      0.000 000 000 10      u
+proton molar mass                                      1.007 276 466 77 e-3  0.000 000 000 10 e-3  kg mol^-1
+proton-muon mass ratio                                 8.880 243 39          0.000 000 23
+proton-neutron mag. mom. ratio                         -1.459 898 06         0.000 000 34
+proton-neutron mass ratio                              0.998 623 478 24      0.000 000 000 46
+proton rms charge radius                               0.8768 e-15           0.0069 e-15           m
+proton-tau mass ratio                                  0.528 012             0.000 086
+quantum of circulation                                 3.636 947 5199 e-4    0.000 000 0050 e-4    m^2 s^-1
+quantum of circulation times 2                         7.273 895 040 e-4     0.000 000 010 e-4     m^2 s^-1
+Rydberg constant                                       10 973 731.568 527    0.000 073             m^-1
+Rydberg constant times c in Hz                         3.289 841 960 361 e15 0.000 000 000 022 e15 Hz
+Rydberg constant times hc in eV                        13.605 691 93         0.000 000 34          eV
+Rydberg constant times hc in J                         2.179 871 97 e-18     0.000 000 11 e-18     J
+Sackur-Tetrode constant (1 K, 100 kPa)                 -1.151 7047           0.000 0044
+Sackur-Tetrode constant (1 K, 101.325 kPa)             -1.164 8677           0.000 0044
+second radiation constant                              1.438 7752 e-2        0.000 0025 e-2        m K
+shielded helion gyromag. ratio                         2.037 894 730 e8      0.000 000 056 e8      s^-1 T^-1
+shielded helion gyromag. ratio over 2 pi               32.434 101 98         0.000 000 90          MHz T^-1
+shielded helion mag. mom.                              -1.074 552 982 e-26   0.000 000 030 e-26    J T^-1
+shielded helion mag. mom. to Bohr magneton ratio       -1.158 671 471 e-3    0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio    -2.127 497 718        0.000 000 025
+shielded helion to proton mag. mom. ratio              -0.761 766 558        0.000 000 011
+shielded helion to shielded proton mag. mom. ratio     -0.761 786 1313       0.000 000 0033
+shielded proton gyromag. ratio                         2.675 153 362 e8      0.000 000 073 e8      s^-1 T^-1
+shielded proton gyromag. ratio over 2 pi               42.576 3881           0.000 0012            MHz T^-1
+shielded proton mag. mom.                              1.410 570 419 e-26    0.000 000 038 e-26    J T^-1
+shielded proton mag. mom. to Bohr magneton ratio       1.520 993 128 e-3     0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio    2.792 775 598         0.000 000 030
+speed of light in vacuum                               299 792 458           (exact)               m s^-1
+standard acceleration of gravity                       9.806 65              (exact)               m s^-2
+standard atmosphere                                    101 325               (exact)               Pa
+Stefan-Boltzmann constant                              5.670 400 e-8         0.000 040 e-8         W m^-2 K^-4
+tau Compton wavelength                                 0.697 72 e-15         0.000 11 e-15         m
+tau Compton wavelength over 2 pi                       0.111 046 e-15        0.000 018 e-15        m
+tau-electron mass ratio                                3477.48               0.57
+tau mass                                               3.167 77 e-27         0.000 52 e-27         kg
+tau mass energy equivalent                             2.847 05 e-10         0.000 46 e-10         J
+tau mass energy equivalent in MeV                      1776.99               0.29                  MeV
+tau mass in u                                          1.907 68              0.000 31              u
+tau molar mass                                         1.907 68 e-3          0.000 31 e-3          kg mol^-1
+tau-muon mass ratio                                    16.8183               0.0027
+tau-neutron mass ratio                                 1.891 29              0.000 31
+tau-proton mass ratio                                  1.893 90              0.000 31
+Thomson cross section                                  0.665 245 8558 e-28   0.000 000 0027 e-28   m^2
+triton-electron mag. mom. ratio                        -1.620 514 423 e-3    0.000 000 021 e-3
+triton-electron mass ratio                             5496.921 5269         0.000 0051
+triton g factor                                        5.957 924 896         0.000 000 076
+triton mag. mom.                                       1.504 609 361 e-26    0.000 000 042 e-26    J T^-1
+triton mag. mom. to Bohr magneton ratio                1.622 393 657 e-3     0.000 000 021 e-3
+triton mag. mom. to nuclear magneton ratio             2.978 962 448         0.000 000 038
+triton mass                                            5.007 355 88 e-27     0.000 000 25 e-27     kg
+triton mass energy equivalent                          4.500 387 03 e-10     0.000 000 22 e-10     J
+triton mass energy equivalent in MeV                   2808.920 906          0.000 070             MeV
+triton mass in u                                       3.015 500 7134        0.000 000 0025        u
+triton molar mass                                      3.015 500 7134 e-3    0.000 000 0025 e-3    kg mol^-1
+triton-neutron mag. mom. ratio                         -1.557 185 53         0.000 000 37
+triton-proton mag. mom. ratio                          1.066 639 908         0.000 000 010
+triton-proton mass ratio                               2.993 717 0309        0.000 000 0025
+unified atomic mass unit                               1.660 538 782 e-27    0.000 000 083 e-27    kg
+von Klitzing constant                                  25 812.807 557        0.000 018             ohm
+weak mixing angle                                      0.222 55              0.000 56
+Wien frequency displacement law constant               5.878 933 e10         0.000 010 e10         Hz K^-1
+Wien wavelength displacement law constant              2.897 7685 e-3        0.000 0051 e-3        m K"""
+
+txt2010 = """\
+{220} lattice spacing of silicon                       192.015 5714 e-12     0.000 0032 e-12       m
+alpha particle-electron mass ratio                     7294.299 5361         0.000 0029
+alpha particle mass                                    6.644 656 75 e-27     0.000 000 29 e-27     kg
+alpha particle mass energy equivalent                  5.971 919 67 e-10     0.000 000 26 e-10     J
+alpha particle mass energy equivalent in MeV           3727.379 240          0.000 082             MeV
+alpha particle mass in u                               4.001 506 179 125     0.000 000 000 062     u
+alpha particle molar mass                              4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1
+alpha particle-proton mass ratio                       3.972 599 689 33      0.000 000 000 36
+Angstrom star                                          1.000 014 95 e-10     0.000 000 90 e-10     m
+atomic mass constant                                   1.660 538 921 e-27    0.000 000 073 e-27    kg
+atomic mass constant energy equivalent                 1.492 417 954 e-10    0.000 000 066 e-10    J
+atomic mass constant energy equivalent in MeV          931.494 061           0.000 021             MeV
+atomic mass unit-electron volt relationship            931.494 061 e6        0.000 021 e6          eV
+atomic mass unit-hartree relationship                  3.423 177 6845 e7     0.000 000 0024 e7     E_h
+atomic mass unit-hertz relationship                    2.252 342 7168 e23    0.000 000 0016 e23    Hz
+atomic mass unit-inverse meter relationship            7.513 006 6042 e14    0.000 000 0053 e14    m^-1
+atomic mass unit-joule relationship                    1.492 417 954 e-10    0.000 000 066 e-10    J
+atomic mass unit-kelvin relationship                   1.080 954 08 e13      0.000 000 98 e13      K
+atomic mass unit-kilogram relationship                 1.660 538 921 e-27    0.000 000 073 e-27    kg
+atomic unit of 1st hyperpolarizability                 3.206 361 449 e-53    0.000 000 071 e-53    C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability                 6.235 380 54 e-65     0.000 000 28 e-65     C^4 m^4 J^-3
+atomic unit of action                                  1.054 571 726 e-34    0.000 000 047 e-34    J s
+atomic unit of charge                                  1.602 176 565 e-19    0.000 000 035 e-19    C
+atomic unit of charge density                          1.081 202 338 e12     0.000 000 024 e12     C m^-3
+atomic unit of current                                 6.623 617 95 e-3      0.000 000 15 e-3      A
+atomic unit of electric dipole mom.                    8.478 353 26 e-30     0.000 000 19 e-30     C m
+atomic unit of electric field                          5.142 206 52 e11      0.000 000 11 e11      V m^-1
+atomic unit of electric field gradient                 9.717 362 00 e21      0.000 000 21 e21      V m^-2
+atomic unit of electric polarizability                 1.648 777 2754 e-41   0.000 000 0016 e-41   C^2 m^2 J^-1
+atomic unit of electric potential                      27.211 385 05         0.000 000 60          V
+atomic unit of electric quadrupole mom.                4.486 551 331 e-40    0.000 000 099 e-40    C m^2
+atomic unit of energy                                  4.359 744 34 e-18     0.000 000 19 e-18     J
+atomic unit of force                                   8.238 722 78 e-8      0.000 000 36 e-8      N
+atomic unit of length                                  0.529 177 210 92 e-10 0.000 000 000 17 e-10 m
+atomic unit of mag. dipole mom.                        1.854 801 936 e-23    0.000 000 041 e-23    J T^-1
+atomic unit of mag. flux density                       2.350 517 464 e5      0.000 000 052 e5      T
+atomic unit of magnetizability                         7.891 036 607 e-29    0.000 000 013 e-29    J T^-2
+atomic unit of mass                                    9.109 382 91 e-31     0.000 000 40 e-31     kg
+atomic unit of mom.um                                  1.992 851 740 e-24    0.000 000 088 e-24    kg m s^-1
+atomic unit of permittivity                            1.112 650 056... e-10 (exact)               F m^-1
+atomic unit of time                                    2.418 884 326 502e-17 0.000 000 000 012e-17 s
+atomic unit of velocity                                2.187 691 263 79 e6   0.000 000 000 71 e6   m s^-1
+Avogadro constant                                      6.022 141 29 e23      0.000 000 27 e23      mol^-1
+Bohr magneton                                          927.400 968 e-26      0.000 020 e-26        J T^-1
+Bohr magneton in eV/T                                  5.788 381 8066 e-5    0.000 000 0038 e-5    eV T^-1
+Bohr magneton in Hz/T                                  13.996 245 55 e9      0.000 000 31 e9       Hz T^-1
+Bohr magneton in inverse meters per tesla              46.686 4498           0.000 0010            m^-1 T^-1
+Bohr magneton in K/T                                   0.671 713 88          0.000 000 61          K T^-1
+Bohr radius                                            0.529 177 210 92 e-10 0.000 000 000 17 e-10 m
+Boltzmann constant                                     1.380 6488 e-23       0.000 0013 e-23       J K^-1
+Boltzmann constant in eV/K                             8.617 3324 e-5        0.000 0078 e-5        eV K^-1
+Boltzmann constant in Hz/K                             2.083 6618 e10        0.000 0019 e10        Hz K^-1
+Boltzmann constant in inverse meters per kelvin        69.503 476            0.000 063             m^-1 K^-1
+characteristic impedance of vacuum                     376.730 313 461...    (exact)               ohm
+classical electron radius                              2.817 940 3267 e-15   0.000 000 0027 e-15   m
+Compton wavelength                                     2.426 310 2389 e-12   0.000 000 0016 e-12   m
+Compton wavelength over 2 pi                           386.159 268 00 e-15   0.000 000 25 e-15     m
+conductance quantum                                    7.748 091 7346 e-5    0.000 000 0025 e-5    S
+conventional value of Josephson constant               483 597.9 e9          (exact)               Hz V^-1
+conventional value of von Klitzing constant            25 812.807            (exact)               ohm
+Cu x unit                                              1.002 076 97 e-13     0.000 000 28 e-13     m
+deuteron-electron mag. mom. ratio                      -4.664 345 537 e-4    0.000 000 039 e-4
+deuteron-electron mass ratio                           3670.482 9652         0.000 0015
+deuteron g factor                                      0.857 438 2308        0.000 000 0072
+deuteron mag. mom.                                     0.433 073 489 e-26    0.000 000 010 e-26    J T^-1
+deuteron mag. mom. to Bohr magneton ratio              0.466 975 4556 e-3    0.000 000 0039 e-3
+deuteron mag. mom. to nuclear magneton ratio           0.857 438 2308        0.000 000 0072
+deuteron mass                                          3.343 583 48 e-27     0.000 000 15 e-27     kg
+deuteron mass energy equivalent                        3.005 062 97 e-10     0.000 000 13 e-10     J
+deuteron mass energy equivalent in MeV                 1875.612 859          0.000 041             MeV
+deuteron mass in u                                     2.013 553 212 712     0.000 000 000 077     u
+deuteron molar mass                                    2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1
+deuteron-neutron mag. mom. ratio                       -0.448 206 52         0.000 000 11
+deuteron-proton mag. mom. ratio                        0.307 012 2070        0.000 000 0024
+deuteron-proton mass ratio                             1.999 007 500 97      0.000 000 000 18
+deuteron rms charge radius                             2.1424 e-15           0.0021 e-15           m
+electric constant                                      8.854 187 817... e-12 (exact)               F m^-1
+electron charge to mass quotient                       -1.758 820 088 e11    0.000 000 039 e11     C kg^-1
+electron-deuteron mag. mom. ratio                      -2143.923 498         0.000 018
+electron-deuteron mass ratio                           2.724 437 1095 e-4    0.000 000 0011 e-4
+electron g factor                                      -2.002 319 304 361 53 0.000 000 000 000 53
+electron gyromag. ratio                                1.760 859 708 e11     0.000 000 039 e11     s^-1 T^-1
+electron gyromag. ratio over 2 pi                      28 024.952 66         0.000 62              MHz T^-1
+electron-helion mass ratio                             1.819 543 0761 e-4    0.000 000 0017 e-4
+electron mag. mom.                                     -928.476 430 e-26     0.000 021 e-26        J T^-1
+electron mag. mom. anomaly                             1.159 652 180 76 e-3  0.000 000 000 27 e-3
+electron mag. mom. to Bohr magneton ratio              -1.001 159 652 180 76 0.000 000 000 000 27
+electron mag. mom. to nuclear magneton ratio           -1838.281 970 90      0.000 000 75
+electron mass                                          9.109 382 91 e-31     0.000 000 40 e-31     kg
+electron mass energy equivalent                        8.187 105 06 e-14     0.000 000 36 e-14     J
+electron mass energy equivalent in MeV                 0.510 998 928         0.000 000 011         MeV
+electron mass in u                                     5.485 799 0946 e-4    0.000 000 0022 e-4    u
+electron molar mass                                    5.485 799 0946 e-7    0.000 000 0022 e-7    kg mol^-1
+electron-muon mag. mom. ratio                          206.766 9896          0.000 0052
+electron-muon mass ratio                               4.836 331 66 e-3      0.000 000 12 e-3
+electron-neutron mag. mom. ratio                       960.920 50            0.000 23
+electron-neutron mass ratio                            5.438 673 4461 e-4    0.000 000 0032 e-4
+electron-proton mag. mom. ratio                        -658.210 6848         0.000 0054
+electron-proton mass ratio                             5.446 170 2178 e-4    0.000 000 0022 e-4
+electron-tau mass ratio                                2.875 92 e-4          0.000 26 e-4
+electron to alpha particle mass ratio                  1.370 933 555 78 e-4  0.000 000 000 55 e-4
+electron to shielded helion mag. mom. ratio            864.058 257           0.000 010
+electron to shielded proton mag. mom. ratio            -658.227 5971         0.000 0072
+electron-triton mass ratio                             1.819 200 0653 e-4    0.000 000 0017 e-4
+electron volt                                          1.602 176 565 e-19    0.000 000 035 e-19    J
+electron volt-atomic mass unit relationship            1.073 544 150 e-9     0.000 000 024 e-9     u
+electron volt-hartree relationship                     3.674 932 379 e-2     0.000 000 081 e-2     E_h
+electron volt-hertz relationship                       2.417 989 348 e14     0.000 000 053 e14     Hz
+electron volt-inverse meter relationship               8.065 544 29 e5       0.000 000 18 e5       m^-1
+electron volt-joule relationship                       1.602 176 565 e-19    0.000 000 035 e-19    J
+electron volt-kelvin relationship                      1.160 4519 e4         0.000 0011 e4         K
+electron volt-kilogram relationship                    1.782 661 845 e-36    0.000 000 039 e-36    kg
+elementary charge                                      1.602 176 565 e-19    0.000 000 035 e-19    C
+elementary charge over h                               2.417 989 348 e14     0.000 000 053 e14     A J^-1
+Faraday constant                                       96 485.3365           0.0021                C mol^-1
+Faraday constant for conventional electric current     96 485.3321           0.0043                C_90 mol^-1
+Fermi coupling constant                                1.166 364 e-5         0.000 005 e-5         GeV^-2
+fine-structure constant                                7.297 352 5698 e-3    0.000 000 0024 e-3
+first radiation constant                               3.741 771 53 e-16     0.000 000 17 e-16     W m^2
+first radiation constant for spectral radiance         1.191 042 869 e-16    0.000 000 053 e-16    W m^2 sr^-1
+hartree-atomic mass unit relationship                  2.921 262 3246 e-8    0.000 000 0021 e-8    u
+hartree-electron volt relationship                     27.211 385 05         0.000 000 60          eV
+Hartree energy                                         4.359 744 34 e-18     0.000 000 19 e-18     J
+Hartree energy in eV                                   27.211 385 05         0.000 000 60          eV
+hartree-hertz relationship                             6.579 683 920 729 e15 0.000 000 000 033 e15 Hz
+hartree-inverse meter relationship                     2.194 746 313 708 e7  0.000 000 000 011 e7  m^-1
+hartree-joule relationship                             4.359 744 34 e-18     0.000 000 19 e-18     J
+hartree-kelvin relationship                            3.157 7504 e5         0.000 0029 e5         K
+hartree-kilogram relationship                          4.850 869 79 e-35     0.000 000 21 e-35     kg
+helion-electron mass ratio                             5495.885 2754         0.000 0050
+helion g factor                                        -4.255 250 613        0.000 000 050
+helion mag. mom.                                       -1.074 617 486 e-26   0.000 000 027 e-26    J T^-1
+helion mag. mom. to Bohr magneton ratio                -1.158 740 958 e-3    0.000 000 014 e-3
+helion mag. mom. to nuclear magneton ratio             -2.127 625 306        0.000 000 025
+helion mass                                            5.006 412 34 e-27     0.000 000 22 e-27     kg
+helion mass energy equivalent                          4.499 539 02 e-10     0.000 000 20 e-10     J
+helion mass energy equivalent in MeV                   2808.391 482          0.000 062             MeV
+helion mass in u                                       3.014 932 2468        0.000 000 0025        u
+helion molar mass                                      3.014 932 2468 e-3    0.000 000 0025 e-3    kg mol^-1
+helion-proton mass ratio                               2.993 152 6707        0.000 000 0025
+hertz-atomic mass unit relationship                    4.439 821 6689 e-24   0.000 000 0031 e-24   u
+hertz-electron volt relationship                       4.135 667 516 e-15    0.000 000 091 e-15    eV
+hertz-hartree relationship                             1.519 829 8460045e-16 0.000 000 0000076e-16 E_h
+hertz-inverse meter relationship                       3.335 640 951... e-9  (exact)               m^-1
+hertz-joule relationship                               6.626 069 57 e-34     0.000 000 29 e-34     J
+hertz-kelvin relationship                              4.799 2434 e-11       0.000 0044 e-11       K
+hertz-kilogram relationship                            7.372 496 68 e-51     0.000 000 33 e-51     kg
+inverse fine-structure constant                        137.035 999 074       0.000 000 044
+inverse meter-atomic mass unit relationship            1.331 025 051 20 e-15 0.000 000 000 94 e-15 u
+inverse meter-electron volt relationship               1.239 841 930 e-6     0.000 000 027 e-6     eV
+inverse meter-hartree relationship                     4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h
+inverse meter-hertz relationship                       299 792 458           (exact)               Hz
+inverse meter-joule relationship                       1.986 445 684 e-25    0.000 000 088 e-25    J
+inverse meter-kelvin relationship                      1.438 7770 e-2        0.000 0013 e-2        K
+inverse meter-kilogram relationship                    2.210 218 902 e-42    0.000 000 098 e-42    kg
+inverse of conductance quantum                         12 906.403 7217       0.000 0042            ohm
+Josephson constant                                     483 597.870 e9        0.011 e9              Hz V^-1
+joule-atomic mass unit relationship                    6.700 535 85 e9       0.000 000 30 e9       u
+joule-electron volt relationship                       6.241 509 34 e18      0.000 000 14 e18      eV
+joule-hartree relationship                             2.293 712 48 e17      0.000 000 10 e17      E_h
+joule-hertz relationship                               1.509 190 311 e33     0.000 000 067 e33     Hz
+joule-inverse meter relationship                       5.034 117 01 e24      0.000 000 22 e24      m^-1
+joule-kelvin relationship                              7.242 9716 e22        0.000 0066 e22        K
+joule-kilogram relationship                            1.112 650 056... e-17 (exact)               kg
+kelvin-atomic mass unit relationship                   9.251 0868 e-14       0.000 0084 e-14       u
+kelvin-electron volt relationship                      8.617 3324 e-5        0.000 0078 e-5        eV
+kelvin-hartree relationship                            3.166 8114 e-6        0.000 0029 e-6        E_h
+kelvin-hertz relationship                              2.083 6618 e10        0.000 0019 e10        Hz
+kelvin-inverse meter relationship                      69.503 476            0.000 063             m^-1
+kelvin-joule relationship                              1.380 6488 e-23       0.000 0013 e-23       J
+kelvin-kilogram relationship                           1.536 1790 e-40       0.000 0014 e-40       kg
+kilogram-atomic mass unit relationship                 6.022 141 29 e26      0.000 000 27 e26      u
+kilogram-electron volt relationship                    5.609 588 85 e35      0.000 000 12 e35      eV
+kilogram-hartree relationship                          2.061 485 968 e34     0.000 000 091 e34     E_h
+kilogram-hertz relationship                            1.356 392 608 e50     0.000 000 060 e50     Hz
+kilogram-inverse meter relationship                    4.524 438 73 e41      0.000 000 20 e41      m^-1
+kilogram-joule relationship                            8.987 551 787... e16  (exact)               J
+kilogram-kelvin relationship                           6.509 6582 e39        0.000 0059 e39        K
+lattice parameter of silicon                           543.102 0504 e-12     0.000 0089 e-12       m
+Loschmidt constant (273.15 K, 100 kPa)                 2.651 6462 e25        0.000 0024 e25        m^-3
+Loschmidt constant (273.15 K, 101.325 kPa)             2.686 7805 e25        0.000 0024 e25        m^-3
+mag. constant                                          12.566 370 614... e-7 (exact)               N A^-2
+mag. flux quantum                                      2.067 833 758 e-15    0.000 000 046 e-15    Wb
+molar gas constant                                     8.314 4621            0.000 0075            J mol^-1 K^-1
+molar mass constant                                    1 e-3                 (exact)               kg mol^-1
+molar mass of carbon-12                                12 e-3                (exact)               kg mol^-1
+molar Planck constant                                  3.990 312 7176 e-10   0.000 000 0028 e-10   J s mol^-1
+molar Planck constant times c                          0.119 626 565 779     0.000 000 000 084     J m mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa)          22.710 953 e-3        0.000 021 e-3         m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa)      22.413 968 e-3        0.000 020 e-3         m^3 mol^-1
+molar volume of silicon                                12.058 833 01 e-6     0.000 000 80 e-6      m^3 mol^-1
+Mo x unit                                              1.002 099 52 e-13     0.000 000 53 e-13     m
+muon Compton wavelength                                11.734 441 03 e-15    0.000 000 30 e-15     m
+muon Compton wavelength over 2 pi                      1.867 594 294 e-15    0.000 000 047 e-15    m
+muon-electron mass ratio                               206.768 2843          0.000 0052
+muon g factor                                          -2.002 331 8418       0.000 000 0013
+muon mag. mom.                                         -4.490 448 07 e-26    0.000 000 15 e-26     J T^-1
+muon mag. mom. anomaly                                 1.165 920 91 e-3      0.000 000 63 e-3
+muon mag. mom. to Bohr magneton ratio                  -4.841 970 44 e-3     0.000 000 12 e-3
+muon mag. mom. to nuclear magneton ratio               -8.890 596 97         0.000 000 22
+muon mass                                              1.883 531 475 e-28    0.000 000 096 e-28    kg
+muon mass energy equivalent                            1.692 833 667 e-11    0.000 000 086 e-11    J
+muon mass energy equivalent in MeV                     105.658 3715          0.000 0035            MeV
+muon mass in u                                         0.113 428 9267        0.000 000 0029        u
+muon molar mass                                        0.113 428 9267 e-3    0.000 000 0029 e-3    kg mol^-1
+muon-neutron mass ratio                                0.112 454 5177        0.000 000 0028
+muon-proton mag. mom. ratio                            -3.183 345 107        0.000 000 084
+muon-proton mass ratio                                 0.112 609 5272        0.000 000 0028
+muon-tau mass ratio                                    5.946 49 e-2          0.000 54 e-2
+natural unit of action                                 1.054 571 726 e-34    0.000 000 047 e-34    J s
+natural unit of action in eV s                         6.582 119 28 e-16     0.000 000 15 e-16     eV s
+natural unit of energy                                 8.187 105 06 e-14     0.000 000 36 e-14     J
+natural unit of energy in MeV                          0.510 998 928         0.000 000 011         MeV
+natural unit of length                                 386.159 268 00 e-15   0.000 000 25 e-15     m
+natural unit of mass                                   9.109 382 91 e-31     0.000 000 40 e-31     kg
+natural unit of mom.um                                 2.730 924 29 e-22     0.000 000 12 e-22     kg m s^-1
+natural unit of mom.um in MeV/c                        0.510 998 928         0.000 000 011         MeV/c
+natural unit of time                                   1.288 088 668 33 e-21 0.000 000 000 83 e-21 s
+natural unit of velocity                               299 792 458           (exact)               m s^-1
+neutron Compton wavelength                             1.319 590 9068 e-15   0.000 000 0011 e-15   m
+neutron Compton wavelength over 2 pi                   0.210 019 415 68 e-15 0.000 000 000 17 e-15 m
+neutron-electron mag. mom. ratio                       1.040 668 82 e-3      0.000 000 25 e-3
+neutron-electron mass ratio                            1838.683 6605         0.000 0011
+neutron g factor                                       -3.826 085 45         0.000 000 90
+neutron gyromag. ratio                                 1.832 471 79 e8       0.000 000 43 e8       s^-1 T^-1
+neutron gyromag. ratio over 2 pi                       29.164 6943           0.000 0069            MHz T^-1
+neutron mag. mom.                                      -0.966 236 47 e-26    0.000 000 23 e-26     J T^-1
+neutron mag. mom. to Bohr magneton ratio               -1.041 875 63 e-3     0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio            -1.913 042 72         0.000 000 45
+neutron mass                                           1.674 927 351 e-27    0.000 000 074 e-27    kg
+neutron mass energy equivalent                         1.505 349 631 e-10    0.000 000 066 e-10    J
+neutron mass energy equivalent in MeV                  939.565 379           0.000 021             MeV
+neutron mass in u                                      1.008 664 916 00      0.000 000 000 43      u
+neutron molar mass                                     1.008 664 916 00 e-3  0.000 000 000 43 e-3  kg mol^-1
+neutron-muon mass ratio                                8.892 484 00          0.000 000 22
+neutron-proton mag. mom. ratio                         -0.684 979 34         0.000 000 16
+neutron-proton mass difference                         2.305 573 92 e-30     0.000 000 76 e-30
+neutron-proton mass difference energy equivalent       2.072 146 50 e-13     0.000 000 68 e-13
+neutron-proton mass difference energy equivalent in MeV 1.293 332 17          0.000 000 42
+neutron-proton mass difference in u                    0.001 388 449 19      0.000 000 000 45
+neutron-proton mass ratio                              1.001 378 419 17      0.000 000 000 45
+neutron-tau mass ratio                                 0.528 790             0.000 048
+neutron to shielded proton mag. mom. ratio             -0.684 996 94         0.000 000 16
+Newtonian constant of gravitation                      6.673 84 e-11         0.000 80 e-11         m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c         6.708 37 e-39         0.000 80 e-39         (GeV/c^2)^-2
+nuclear magneton                                       5.050 783 53 e-27     0.000 000 11 e-27     J T^-1
+nuclear magneton in eV/T                               3.152 451 2605 e-8    0.000 000 0022 e-8    eV T^-1
+nuclear magneton in inverse meters per tesla           2.542 623 527 e-2     0.000 000 056 e-2     m^-1 T^-1
+nuclear magneton in K/T                                3.658 2682 e-4        0.000 0033 e-4        K T^-1
+nuclear magneton in MHz/T                              7.622 593 57          0.000 000 17          MHz T^-1
+Planck constant                                        6.626 069 57 e-34     0.000 000 29 e-34     J s
+Planck constant in eV s                                4.135 667 516 e-15    0.000 000 091 e-15    eV s
+Planck constant over 2 pi                              1.054 571 726 e-34    0.000 000 047 e-34    J s
+Planck constant over 2 pi in eV s                      6.582 119 28 e-16     0.000 000 15 e-16     eV s
+Planck constant over 2 pi times c in MeV fm            197.326 9718          0.000 0044            MeV fm
+Planck length                                          1.616 199 e-35        0.000 097 e-35        m
+Planck mass                                            2.176 51 e-8          0.000 13 e-8          kg
+Planck mass energy equivalent in GeV                   1.220 932 e19         0.000 073 e19         GeV
+Planck temperature                                     1.416 833 e32         0.000 085 e32         K
+Planck time                                            5.391 06 e-44         0.000 32 e-44         s
+proton charge to mass quotient                         9.578 833 58 e7       0.000 000 21 e7       C kg^-1
+proton Compton wavelength                              1.321 409 856 23 e-15 0.000 000 000 94 e-15 m
+proton Compton wavelength over 2 pi                    0.210 308 910 47 e-15 0.000 000 000 15 e-15 m
+proton-electron mass ratio                             1836.152 672 45       0.000 000 75
+proton g factor                                        5.585 694 713         0.000 000 046
+proton gyromag. ratio                                  2.675 222 005 e8      0.000 000 063 e8      s^-1 T^-1
+proton gyromag. ratio over 2 pi                        42.577 4806           0.000 0010            MHz T^-1
+proton mag. mom.                                       1.410 606 743 e-26    0.000 000 033 e-26    J T^-1
+proton mag. mom. to Bohr magneton ratio                1.521 032 210 e-3     0.000 000 012 e-3
+proton mag. mom. to nuclear magneton ratio             2.792 847 356         0.000 000 023
+proton mag. shielding correction                       25.694 e-6            0.014 e-6
+proton mass                                            1.672 621 777 e-27    0.000 000 074 e-27    kg
+proton mass energy equivalent                          1.503 277 484 e-10    0.000 000 066 e-10    J
+proton mass energy equivalent in MeV                   938.272 046           0.000 021             MeV
+proton mass in u                                       1.007 276 466 812     0.000 000 000 090     u
+proton molar mass                                      1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1
+proton-muon mass ratio                                 8.880 243 31          0.000 000 22
+proton-neutron mag. mom. ratio                         -1.459 898 06         0.000 000 34
+proton-neutron mass ratio                              0.998 623 478 26      0.000 000 000 45
+proton rms charge radius                               0.8775 e-15           0.0051 e-15           m
+proton-tau mass ratio                                  0.528 063             0.000 048
+quantum of circulation                                 3.636 947 5520 e-4    0.000 000 0024 e-4    m^2 s^-1
+quantum of circulation times 2                         7.273 895 1040 e-4    0.000 000 0047 e-4    m^2 s^-1
+Rydberg constant                                       10 973 731.568 539    0.000 055             m^-1
+Rydberg constant times c in Hz                         3.289 841 960 364 e15 0.000 000 000 017 e15 Hz
+Rydberg constant times hc in eV                        13.605 692 53         0.000 000 30          eV
+Rydberg constant times hc in J                         2.179 872 171 e-18    0.000 000 096 e-18    J
+Sackur-Tetrode constant (1 K, 100 kPa)                 -1.151 7078           0.000 0023
+Sackur-Tetrode constant (1 K, 101.325 kPa)             -1.164 8708           0.000 0023
+second radiation constant                              1.438 7770 e-2        0.000 0013 e-2        m K
+shielded helion gyromag. ratio                         2.037 894 659 e8      0.000 000 051 e8      s^-1 T^-1
+shielded helion gyromag. ratio over 2 pi               32.434 100 84         0.000 000 81          MHz T^-1
+shielded helion mag. mom.                              -1.074 553 044 e-26   0.000 000 027 e-26    J T^-1
+shielded helion mag. mom. to Bohr magneton ratio       -1.158 671 471 e-3    0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio    -2.127 497 718        0.000 000 025
+shielded helion to proton mag. mom. ratio              -0.761 766 558        0.000 000 011
+shielded helion to shielded proton mag. mom. ratio     -0.761 786 1313       0.000 000 0033
+shielded proton gyromag. ratio                         2.675 153 268 e8      0.000 000 066 e8      s^-1 T^-1
+shielded proton gyromag. ratio over 2 pi               42.576 3866           0.000 0010            MHz T^-1
+shielded proton mag. mom.                              1.410 570 499 e-26    0.000 000 035 e-26    J T^-1
+shielded proton mag. mom. to Bohr magneton ratio       1.520 993 128 e-3     0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio    2.792 775 598         0.000 000 030
+speed of light in vacuum                               299 792 458           (exact)               m s^-1
+standard acceleration of gravity                       9.806 65              (exact)               m s^-2
+standard atmosphere                                    101 325               (exact)               Pa
+standard-state pressure                                100 000               (exact)               Pa
+Stefan-Boltzmann constant                              5.670 373 e-8         0.000 021 e-8         W m^-2 K^-4
+tau Compton wavelength                                 0.697 787 e-15        0.000 063 e-15        m
+tau Compton wavelength over 2 pi                       0.111 056 e-15        0.000 010 e-15        m
+tau-electron mass ratio                                3477.15               0.31
+tau mass                                               3.167 47 e-27         0.000 29 e-27         kg
+tau mass energy equivalent                             2.846 78 e-10         0.000 26 e-10         J
+tau mass energy equivalent in MeV                      1776.82               0.16                  MeV
+tau mass in u                                          1.907 49              0.000 17              u
+tau molar mass                                         1.907 49 e-3          0.000 17 e-3          kg mol^-1
+tau-muon mass ratio                                    16.8167               0.0015
+tau-neutron mass ratio                                 1.891 11              0.000 17
+tau-proton mass ratio                                  1.893 72              0.000 17
+Thomson cross section                                  0.665 245 8734 e-28   0.000 000 0013 e-28   m^2
+triton-electron mass ratio                             5496.921 5267         0.000 0050
+triton g factor                                        5.957 924 896         0.000 000 076
+triton mag. mom.                                       1.504 609 447 e-26    0.000 000 038 e-26    J T^-1
+triton mag. mom. to Bohr magneton ratio                1.622 393 657 e-3     0.000 000 021 e-3
+triton mag. mom. to nuclear magneton ratio             2.978 962 448         0.000 000 038
+triton mass                                            5.007 356 30 e-27     0.000 000 22 e-27     kg
+triton mass energy equivalent                          4.500 387 41 e-10     0.000 000 20 e-10     J
+triton mass energy equivalent in MeV                   2808.921 005          0.000 062             MeV
+triton mass in u                                       3.015 500 7134        0.000 000 0025        u
+triton molar mass                                      3.015 500 7134 e-3    0.000 000 0025 e-3    kg mol^-1
+triton-proton mass ratio                               2.993 717 0308        0.000 000 0025
+unified atomic mass unit                               1.660 538 921 e-27    0.000 000 073 e-27    kg
+von Klitzing constant                                  25 812.807 4434       0.000 0084            ohm
+weak mixing angle                                      0.2223                0.0021
+Wien frequency displacement law constant               5.878 9254 e10        0.000 0053 e10        Hz K^-1
+Wien wavelength displacement law constant              2.897 7721 e-3        0.000 0026 e-3        m K"""
+
+txt2014 = """\
+{220} lattice spacing of silicon                       192.015 5714 e-12     0.000 0032 e-12       m
+alpha particle-electron mass ratio                     7294.299 541 36       0.000 000 24
+alpha particle mass                                    6.644 657 230 e-27    0.000 000 082 e-27    kg
+alpha particle mass energy equivalent                  5.971 920 097 e-10    0.000 000 073 e-10    J
+alpha particle mass energy equivalent in MeV           3727.379 378          0.000 023             MeV
+alpha particle mass in u                               4.001 506 179 127     0.000 000 000 063     u
+alpha particle molar mass                              4.001 506 179 127 e-3 0.000 000 000 063 e-3 kg mol^-1
+alpha particle-proton mass ratio                       3.972 599 689 07      0.000 000 000 36
+Angstrom star                                          1.000 014 95 e-10     0.000 000 90 e-10     m
+atomic mass constant                                   1.660 539 040 e-27    0.000 000 020 e-27    kg
+atomic mass constant energy equivalent                 1.492 418 062 e-10    0.000 000 018 e-10    J
+atomic mass constant energy equivalent in MeV          931.494 0954          0.000 0057            MeV
+atomic mass unit-electron volt relationship            931.494 0954 e6       0.000 0057 e6         eV
+atomic mass unit-hartree relationship                  3.423 177 6902 e7     0.000 000 0016 e7     E_h
+atomic mass unit-hertz relationship                    2.252 342 7206 e23    0.000 000 0010 e23    Hz
+atomic mass unit-inverse meter relationship            7.513 006 6166 e14    0.000 000 0034 e14    m^-1
+atomic mass unit-joule relationship                    1.492 418 062 e-10    0.000 000 018 e-10    J
+atomic mass unit-kelvin relationship                   1.080 954 38 e13      0.000 000 62 e13      K
+atomic mass unit-kilogram relationship                 1.660 539 040 e-27    0.000 000 020 e-27    kg
+atomic unit of 1st hyperpolarizability                 3.206 361 329 e-53    0.000 000 020 e-53    C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability                 6.235 380 085 e-65    0.000 000 077 e-65    C^4 m^4 J^-3
+atomic unit of action                                  1.054 571 800 e-34    0.000 000 013 e-34    J s
+atomic unit of charge                                  1.602 176 6208 e-19   0.000 000 0098 e-19   C
+atomic unit of charge density                          1.081 202 3770 e12    0.000 000 0067 e12    C m^-3
+atomic unit of current                                 6.623 618 183 e-3     0.000 000 041 e-3     A
+atomic unit of electric dipole mom.                    8.478 353 552 e-30    0.000 000 052 e-30    C m
+atomic unit of electric field                          5.142 206 707 e11     0.000 000 032 e11     V m^-1
+atomic unit of electric field gradient                 9.717 362 356 e21     0.000 000 060 e21     V m^-2
+atomic unit of electric polarizability                 1.648 777 2731 e-41   0.000 000 0011 e-41   C^2 m^2 J^-1
+atomic unit of electric potential                      27.211 386 02         0.000 000 17          V
+atomic unit of electric quadrupole mom.                4.486 551 484 e-40    0.000 000 028 e-40    C m^2
+atomic unit of energy                                  4.359 744 650 e-18    0.000 000 054 e-18    J
+atomic unit of force                                   8.238 723 36 e-8      0.000 000 10 e-8      N
+atomic unit of length                                  0.529 177 210 67 e-10 0.000 000 000 12 e-10 m
+atomic unit of mag. dipole mom.                        1.854 801 999 e-23    0.000 000 011 e-23    J T^-1
+atomic unit of mag. flux density                       2.350 517 550 e5      0.000 000 014 e5      T
+atomic unit of magnetizability                         7.891 036 5886 e-29   0.000 000 0090 e-29   J T^-2
+atomic unit of mass                                    9.109 383 56 e-31     0.000 000 11 e-31     kg
+atomic unit of mom.um                                  1.992 851 882 e-24    0.000 000 024 e-24    kg m s^-1
+atomic unit of permittivity                            1.112 650 056... e-10 (exact)               F m^-1
+atomic unit of time                                    2.418 884 326509e-17  0.000 000 000014e-17  s
+atomic unit of velocity                                2.187 691 262 77 e6   0.000 000 000 50 e6   m s^-1
+Avogadro constant                                      6.022 140 857 e23     0.000 000 074 e23     mol^-1
+Bohr magneton                                          927.400 9994 e-26     0.000 0057 e-26       J T^-1
+Bohr magneton in eV/T                                  5.788 381 8012 e-5    0.000 000 0026 e-5    eV T^-1
+Bohr magneton in Hz/T                                  13.996 245 042 e9     0.000 000 086 e9      Hz T^-1
+Bohr magneton in inverse meters per tesla              46.686 448 14         0.000 000 29          m^-1 T^-1
+Bohr magneton in K/T                                   0.671 714 05          0.000 000 39          K T^-1
+Bohr radius                                            0.529 177 210 67 e-10 0.000 000 000 12 e-10 m
+Boltzmann constant                                     1.380 648 52 e-23     0.000 000 79 e-23     J K^-1
+Boltzmann constant in eV/K                             8.617 3303 e-5        0.000 0050 e-5        eV K^-1
+Boltzmann constant in Hz/K                             2.083 6612 e10        0.000 0012 e10        Hz K^-1
+Boltzmann constant in inverse meters per kelvin        69.503 457            0.000 040             m^-1 K^-1
+characteristic impedance of vacuum                     376.730 313 461...    (exact)               ohm
+classical electron radius                              2.817 940 3227 e-15   0.000 000 0019 e-15   m
+Compton wavelength                                     2.426 310 2367 e-12   0.000 000 0011 e-12   m
+Compton wavelength over 2 pi                           386.159 267 64 e-15   0.000 000 18 e-15     m
+conductance quantum                                    7.748 091 7310 e-5    0.000 000 0018 e-5    S
+conventional value of Josephson constant               483 597.9 e9          (exact)               Hz V^-1
+conventional value of von Klitzing constant            25 812.807            (exact)               ohm
+Cu x unit                                              1.002 076 97 e-13     0.000 000 28 e-13     m
+deuteron-electron mag. mom. ratio                      -4.664 345 535 e-4    0.000 000 026 e-4
+deuteron-electron mass ratio                           3670.482 967 85       0.000 000 13
+deuteron g factor                                      0.857 438 2311        0.000 000 0048
+deuteron mag. mom.                                     0.433 073 5040 e-26   0.000 000 0036 e-26   J T^-1
+deuteron mag. mom. to Bohr magneton ratio              0.466 975 4554 e-3    0.000 000 0026 e-3
+deuteron mag. mom. to nuclear magneton ratio           0.857 438 2311        0.000 000 0048
+deuteron mass                                          3.343 583 719 e-27    0.000 000 041 e-27    kg
+deuteron mass energy equivalent                        3.005 063 183 e-10    0.000 000 037 e-10    J
+deuteron mass energy equivalent in MeV                 1875.612 928          0.000 012             MeV
+deuteron mass in u                                     2.013 553 212 745     0.000 000 000 040     u
+deuteron molar mass                                    2.013 553 212 745 e-3 0.000 000 000 040 e-3 kg mol^-1
+deuteron-neutron mag. mom. ratio                       -0.448 206 52         0.000 000 11
+deuteron-proton mag. mom. ratio                        0.307 012 2077        0.000 000 0015
+deuteron-proton mass ratio                             1.999 007 500 87      0.000 000 000 19
+deuteron rms charge radius                             2.1413 e-15           0.0025 e-15           m
+electric constant                                      8.854 187 817... e-12 (exact)               F m^-1
+electron charge to mass quotient                       -1.758 820 024 e11    0.000 000 011 e11     C kg^-1
+electron-deuteron mag. mom. ratio                      -2143.923 499         0.000 012
+electron-deuteron mass ratio                           2.724 437 107 484 e-4 0.000 000 000 096 e-4
+electron g factor                                      -2.002 319 304 361 82 0.000 000 000 000 52
+electron gyromag. ratio                                1.760 859 644 e11     0.000 000 011 e11     s^-1 T^-1
+electron gyromag. ratio over 2 pi                      28 024.951 64         0.000 17              MHz T^-1
+electron-helion mass ratio                             1.819 543 074 854 e-4 0.000 000 000 088 e-4
+electron mag. mom.                                     -928.476 4620 e-26    0.000 0057 e-26       J T^-1
+electron mag. mom. anomaly                             1.159 652 180 91 e-3  0.000 000 000 26 e-3
+electron mag. mom. to Bohr magneton ratio              -1.001 159 652 180 91 0.000 000 000 000 26
+electron mag. mom. to nuclear magneton ratio           -1838.281 972 34      0.000 000 17
+electron mass                                          9.109 383 56 e-31     0.000 000 11 e-31     kg
+electron mass energy equivalent                        8.187 105 65 e-14     0.000 000 10 e-14     J
+electron mass energy equivalent in MeV                 0.510 998 9461        0.000 000 0031        MeV
+electron mass in u                                     5.485 799 090 70 e-4  0.000 000 000 16 e-4  u
+electron molar mass                                    5.485 799 090 70 e-7  0.000 000 000 16 e-7  kg mol^-1
+electron-muon mag. mom. ratio                          206.766 9880          0.000 0046
+electron-muon mass ratio                               4.836 331 70 e-3      0.000 000 11 e-3
+electron-neutron mag. mom. ratio                       960.920 50            0.000 23
+electron-neutron mass ratio                            5.438 673 4428 e-4    0.000 000 0027 e-4
+electron-proton mag. mom. ratio                        -658.210 6866         0.000 0020
+electron-proton mass ratio                             5.446 170 213 52 e-4  0.000 000 000 52 e-4
+electron-tau mass ratio                                2.875 92 e-4          0.000 26 e-4
+electron to alpha particle mass ratio                  1.370 933 554 798 e-4 0.000 000 000 045 e-4
+electron to shielded helion mag. mom. ratio            864.058 257           0.000 010
+electron to shielded proton mag. mom. ratio            -658.227 5971         0.000 0072
+electron-triton mass ratio                             1.819 200 062 203 e-4 0.000 000 000 084 e-4
+electron volt                                          1.602 176 6208 e-19   0.000 000 0098 e-19   J
+electron volt-atomic mass unit relationship            1.073 544 1105 e-9    0.000 000 0066 e-9    u
+electron volt-hartree relationship                     3.674 932 248 e-2     0.000 000 023 e-2     E_h
+electron volt-hertz relationship                       2.417 989 262 e14     0.000 000 015 e14     Hz
+electron volt-inverse meter relationship               8.065 544 005 e5      0.000 000 050 e5      m^-1
+electron volt-joule relationship                       1.602 176 6208 e-19   0.000 000 0098 e-19   J
+electron volt-kelvin relationship                      1.160 452 21 e4       0.000 000 67 e4       K
+electron volt-kilogram relationship                    1.782 661 907 e-36    0.000 000 011 e-36    kg
+elementary charge                                      1.602 176 6208 e-19   0.000 000 0098 e-19   C
+elementary charge over h                               2.417 989 262 e14     0.000 000 015 e14     A J^-1
+Faraday constant                                       96 485.332 89         0.000 59              C mol^-1
+Faraday constant for conventional electric current     96 485.3251           0.0012                C_90 mol^-1
+Fermi coupling constant                                1.166 3787 e-5        0.000 0006 e-5        GeV^-2
+fine-structure constant                                7.297 352 5664 e-3    0.000 000 0017 e-3
+first radiation constant                               3.741 771 790 e-16    0.000 000 046 e-16    W m^2
+first radiation constant for spectral radiance         1.191 042 953 e-16    0.000 000 015 e-16    W m^2 sr^-1
+hartree-atomic mass unit relationship                  2.921 262 3197 e-8    0.000 000 0013 e-8    u
+hartree-electron volt relationship                     27.211 386 02         0.000 000 17          eV
+Hartree energy                                         4.359 744 650 e-18    0.000 000 054 e-18    J
+Hartree energy in eV                                   27.211 386 02         0.000 000 17          eV
+hartree-hertz relationship                             6.579 683 920 711 e15 0.000 000 000 039 e15 Hz
+hartree-inverse meter relationship                     2.194 746 313 702 e7  0.000 000 000 013 e7  m^-1
+hartree-joule relationship                             4.359 744 650 e-18    0.000 000 054 e-18    J
+hartree-kelvin relationship                            3.157 7513 e5         0.000 0018 e5         K
+hartree-kilogram relationship                          4.850 870 129 e-35    0.000 000 060 e-35    kg
+helion-electron mass ratio                             5495.885 279 22       0.000 000 27
+helion g factor                                        -4.255 250 616        0.000 000 050
+helion mag. mom.                                       -1.074 617 522 e-26   0.000 000 014 e-26    J T^-1
+helion mag. mom. to Bohr magneton ratio                -1.158 740 958 e-3    0.000 000 014 e-3
+helion mag. mom. to nuclear magneton ratio             -2.127 625 308        0.000 000 025
+helion mass                                            5.006 412 700 e-27    0.000 000 062 e-27    kg
+helion mass energy equivalent                          4.499 539 341 e-10    0.000 000 055 e-10    J
+helion mass energy equivalent in MeV                   2808.391 586          0.000 017             MeV
+helion mass in u                                       3.014 932 246 73      0.000 000 000 12      u
+helion molar mass                                      3.014 932 246 73 e-3  0.000 000 000 12 e-3  kg mol^-1
+helion-proton mass ratio                               2.993 152 670 46      0.000 000 000 29
+hertz-atomic mass unit relationship                    4.439 821 6616 e-24   0.000 000 0020 e-24   u
+hertz-electron volt relationship                       4.135 667 662 e-15    0.000 000 025 e-15    eV
+hertz-hartree relationship                             1.5198298460088 e-16  0.0000000000090e-16   E_h
+hertz-inverse meter relationship                       3.335 640 951... e-9  (exact)               m^-1
+hertz-joule relationship                               6.626 070 040 e-34    0.000 000 081 e-34    J
+hertz-kelvin relationship                              4.799 2447 e-11       0.000 0028 e-11       K
+hertz-kilogram relationship                            7.372 497 201 e-51    0.000 000 091 e-51    kg
+inverse fine-structure constant                        137.035 999 139       0.000 000 031
+inverse meter-atomic mass unit relationship            1.331 025 049 00 e-15 0.000 000 000 61 e-15 u
+inverse meter-electron volt relationship               1.239 841 9739 e-6    0.000 000 0076 e-6    eV
+inverse meter-hartree relationship                     4.556 335 252 767 e-8 0.000 000 000 027 e-8 E_h
+inverse meter-hertz relationship                       299 792 458           (exact)               Hz
+inverse meter-joule relationship                       1.986 445 824 e-25    0.000 000 024 e-25    J
+inverse meter-kelvin relationship                      1.438 777 36 e-2      0.000 000 83 e-2      K
+inverse meter-kilogram relationship                    2.210 219 057 e-42    0.000 000 027 e-42    kg
+inverse of conductance quantum                         12 906.403 7278       0.000 0029            ohm
+Josephson constant                                     483 597.8525 e9       0.0030 e9             Hz V^-1
+joule-atomic mass unit relationship                    6.700 535 363 e9      0.000 000 082 e9      u
+joule-electron volt relationship                       6.241 509 126 e18     0.000 000 038 e18     eV
+joule-hartree relationship                             2.293 712 317 e17     0.000 000 028 e17     E_h
+joule-hertz relationship                               1.509 190 205 e33     0.000 000 019 e33     Hz
+joule-inverse meter relationship                       5.034 116 651 e24     0.000 000 062 e24     m^-1
+joule-kelvin relationship                              7.242 9731 e22        0.000 0042 e22        K
+joule-kilogram relationship                            1.112 650 056... e-17 (exact)               kg
+kelvin-atomic mass unit relationship                   9.251 0842 e-14       0.000 0053 e-14       u
+kelvin-electron volt relationship                      8.617 3303 e-5        0.000 0050 e-5        eV
+kelvin-hartree relationship                            3.166 8105 e-6        0.000 0018 e-6        E_h
+kelvin-hertz relationship                              2.083 6612 e10        0.000 0012 e10        Hz
+kelvin-inverse meter relationship                      69.503 457            0.000 040             m^-1
+kelvin-joule relationship                              1.380 648 52 e-23     0.000 000 79 e-23     J
+kelvin-kilogram relationship                           1.536 178 65 e-40     0.000 000 88 e-40     kg
+kilogram-atomic mass unit relationship                 6.022 140 857 e26     0.000 000 074 e26     u
+kilogram-electron volt relationship                    5.609 588 650 e35     0.000 000 034 e35     eV
+kilogram-hartree relationship                          2.061 485 823 e34     0.000 000 025 e34     E_h
+kilogram-hertz relationship                            1.356 392 512 e50     0.000 000 017 e50     Hz
+kilogram-inverse meter relationship                    4.524 438 411 e41     0.000 000 056 e41     m^-1
+kilogram-joule relationship                            8.987 551 787... e16  (exact)               J
+kilogram-kelvin relationship                           6.509 6595 e39        0.000 0037 e39        K
+lattice parameter of silicon                           543.102 0504 e-12     0.000 0089 e-12       m
+Loschmidt constant (273.15 K, 100 kPa)                 2.651 6467 e25        0.000 0015 e25        m^-3
+Loschmidt constant (273.15 K, 101.325 kPa)             2.686 7811 e25        0.000 0015 e25        m^-3
+mag. constant                                          12.566 370 614... e-7 (exact)               N A^-2
+mag. flux quantum                                      2.067 833 831 e-15    0.000 000 013 e-15    Wb
+molar gas constant                                     8.314 4598            0.000 0048            J mol^-1 K^-1
+molar mass constant                                    1 e-3                 (exact)               kg mol^-1
+molar mass of carbon-12                                12 e-3                (exact)               kg mol^-1
+molar Planck constant                                  3.990 312 7110 e-10   0.000 000 0018 e-10   J s mol^-1
+molar Planck constant times c                          0.119 626 565 582     0.000 000 000 054     J m mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa)          22.710 947 e-3        0.000 013 e-3         m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa)      22.413 962 e-3        0.000 013 e-3         m^3 mol^-1
+molar volume of silicon                                12.058 832 14 e-6     0.000 000 61 e-6      m^3 mol^-1
+Mo x unit                                              1.002 099 52 e-13     0.000 000 53 e-13     m
+muon Compton wavelength                                11.734 441 11 e-15    0.000 000 26 e-15     m
+muon Compton wavelength over 2 pi                      1.867 594 308 e-15    0.000 000 042 e-15    m
+muon-electron mass ratio                               206.768 2826          0.000 0046
+muon g factor                                          -2.002 331 8418       0.000 000 0013
+muon mag. mom.                                         -4.490 448 26 e-26    0.000 000 10 e-26     J T^-1
+muon mag. mom. anomaly                                 1.165 920 89 e-3      0.000 000 63 e-3
+muon mag. mom. to Bohr magneton ratio                  -4.841 970 48 e-3     0.000 000 11 e-3
+muon mag. mom. to nuclear magneton ratio               -8.890 597 05         0.000 000 20
+muon mass                                              1.883 531 594 e-28    0.000 000 048 e-28    kg
+muon mass energy equivalent                            1.692 833 774 e-11    0.000 000 043 e-11    J
+muon mass energy equivalent in MeV                     105.658 3745          0.000 0024            MeV
+muon mass in u                                         0.113 428 9257        0.000 000 0025        u
+muon molar mass                                        0.113 428 9257 e-3    0.000 000 0025 e-3    kg mol^-1
+muon-neutron mass ratio                                0.112 454 5167        0.000 000 0025
+muon-proton mag. mom. ratio                            -3.183 345 142        0.000 000 071
+muon-proton mass ratio                                 0.112 609 5262        0.000 000 0025
+muon-tau mass ratio                                    5.946 49 e-2          0.000 54 e-2
+natural unit of action                                 1.054 571 800 e-34    0.000 000 013 e-34    J s
+natural unit of action in eV s                         6.582 119 514 e-16    0.000 000 040 e-16    eV s
+natural unit of energy                                 8.187 105 65 e-14     0.000 000 10 e-14     J
+natural unit of energy in MeV                          0.510 998 9461        0.000 000 0031        MeV
+natural unit of length                                 386.159 267 64 e-15   0.000 000 18 e-15     m
+natural unit of mass                                   9.109 383 56 e-31     0.000 000 11 e-31     kg
+natural unit of mom.um                                 2.730 924 488 e-22    0.000 000 034 e-22    kg m s^-1
+natural unit of mom.um in MeV/c                        0.510 998 9461        0.000 000 0031        MeV/c
+natural unit of time                                   1.288 088 667 12 e-21 0.000 000 000 58 e-21 s
+natural unit of velocity                               299 792 458           (exact)               m s^-1
+neutron Compton wavelength                             1.319 590 904 81 e-15 0.000 000 000 88 e-15 m
+neutron Compton wavelength over 2 pi                   0.210 019 415 36 e-15 0.000 000 000 14 e-15 m
+neutron-electron mag. mom. ratio                       1.040 668 82 e-3      0.000 000 25 e-3
+neutron-electron mass ratio                            1838.683 661 58       0.000 000 90
+neutron g factor                                       -3.826 085 45         0.000 000 90
+neutron gyromag. ratio                                 1.832 471 72 e8       0.000 000 43 e8       s^-1 T^-1
+neutron gyromag. ratio over 2 pi                       29.164 6933           0.000 0069            MHz T^-1
+neutron mag. mom.                                      -0.966 236 50 e-26    0.000 000 23 e-26     J T^-1
+neutron mag. mom. to Bohr magneton ratio               -1.041 875 63 e-3     0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio            -1.913 042 73         0.000 000 45
+neutron mass                                           1.674 927 471 e-27    0.000 000 021 e-27    kg
+neutron mass energy equivalent                         1.505 349 739 e-10    0.000 000 019 e-10    J
+neutron mass energy equivalent in MeV                  939.565 4133          0.000 0058            MeV
+neutron mass in u                                      1.008 664 915 88      0.000 000 000 49      u
+neutron molar mass                                     1.008 664 915 88 e-3  0.000 000 000 49 e-3  kg mol^-1
+neutron-muon mass ratio                                8.892 484 08          0.000 000 20
+neutron-proton mag. mom. ratio                         -0.684 979 34         0.000 000 16
+neutron-proton mass difference                         2.305 573 77 e-30     0.000 000 85 e-30
+neutron-proton mass difference energy equivalent       2.072 146 37 e-13     0.000 000 76 e-13
+neutron-proton mass difference energy equivalent in MeV 1.293 332 05         0.000 000 48
+neutron-proton mass difference in u                    0.001 388 449 00      0.000 000 000 51
+neutron-proton mass ratio                              1.001 378 418 98      0.000 000 000 51
+neutron-tau mass ratio                                 0.528 790             0.000 048
+neutron to shielded proton mag. mom. ratio             -0.684 996 94         0.000 000 16
+Newtonian constant of gravitation                      6.674 08 e-11         0.000 31 e-11         m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c         6.708 61 e-39         0.000 31 e-39         (GeV/c^2)^-2
+nuclear magneton                                       5.050 783 699 e-27    0.000 000 031 e-27    J T^-1
+nuclear magneton in eV/T                               3.152 451 2550 e-8    0.000 000 0015 e-8    eV T^-1
+nuclear magneton in inverse meters per tesla           2.542 623 432 e-2     0.000 000 016 e-2     m^-1 T^-1
+nuclear magneton in K/T                                3.658 2690 e-4        0.000 0021 e-4        K T^-1
+nuclear magneton in MHz/T                              7.622 593 285         0.000 000 047         MHz T^-1
+Planck constant                                        6.626 070 040 e-34    0.000 000 081 e-34    J s
+Planck constant in eV s                                4.135 667 662 e-15    0.000 000 025 e-15    eV s
+Planck constant over 2 pi                              1.054 571 800 e-34    0.000 000 013 e-34    J s
+Planck constant over 2 pi in eV s                      6.582 119 514 e-16    0.000 000 040 e-16    eV s
+Planck constant over 2 pi times c in MeV fm            197.326 9788          0.000 0012            MeV fm
+Planck length                                          1.616 229 e-35        0.000 038 e-35        m
+Planck mass                                            2.176 470 e-8         0.000 051 e-8         kg
+Planck mass energy equivalent in GeV                   1.220 910 e19         0.000 029 e19         GeV
+Planck temperature                                     1.416 808 e32         0.000 033 e32         K
+Planck time                                            5.391 16 e-44         0.000 13 e-44         s
+proton charge to mass quotient                         9.578 833 226 e7      0.000 000 059 e7      C kg^-1
+proton Compton wavelength                              1.321 409 853 96 e-15 0.000 000 000 61 e-15 m
+proton Compton wavelength over 2 pi                    0.210 308910109e-15   0.000 000 000097e-15  m
+proton-electron mass ratio                             1836.152 673 89       0.000 000 17
+proton g factor                                        5.585 694 702         0.000 000 017
+proton gyromag. ratio                                  2.675 221 900 e8      0.000 000 018 e8      s^-1 T^-1
+proton gyromag. ratio over 2 pi                        42.577 478 92         0.000 000 29          MHz T^-1
+proton mag. mom.                                       1.410 606 7873 e-26   0.000 000 0097 e-26   J T^-1
+proton mag. mom. to Bohr magneton ratio                1.521 032 2053 e-3    0.000 000 0046 e-3
+proton mag. mom. to nuclear magneton ratio             2.792 847 3508        0.000 000 0085
+proton mag. shielding correction                       25.691 e-6            0.011 e-6
+proton mass                                            1.672 621 898 e-27    0.000 000 021 e-27    kg
+proton mass energy equivalent                          1.503 277 593 e-10    0.000 000 018 e-10    J
+proton mass energy equivalent in MeV                   938.272 0813          0.000 0058            MeV
+proton mass in u                                       1.007 276 466 879     0.000 000 000 091     u
+proton molar mass                                      1.007 276 466 879 e-3 0.000 000 000 091 e-3 kg mol^-1
+proton-muon mass ratio                                 8.880 243 38          0.000 000 20
+proton-neutron mag. mom. ratio                         -1.459 898 05         0.000 000 34
+proton-neutron mass ratio                              0.998 623 478 44      0.000 000 000 51
+proton rms charge radius                               0.8751 e-15           0.0061 e-15           m
+proton-tau mass ratio                                  0.528 063             0.000 048
+quantum of circulation                                 3.636 947 5486 e-4    0.000 000 0017 e-4    m^2 s^-1
+quantum of circulation times 2                         7.273 895 0972 e-4    0.000 000 0033 e-4    m^2 s^-1
+Rydberg constant                                       10 973 731.568 508    0.000 065             m^-1
+Rydberg constant times c in Hz                         3.289 841 960 355 e15 0.000 000 000 019 e15 Hz
+Rydberg constant times hc in eV                        13.605 693 009        0.000 000 084         eV
+Rydberg constant times hc in J                         2.179 872 325 e-18    0.000 000 027 e-18    J
+Sackur-Tetrode constant (1 K, 100 kPa)                 -1.151 7084           0.000 0014
+Sackur-Tetrode constant (1 K, 101.325 kPa)             -1.164 8714           0.000 0014
+second radiation constant                              1.438 777 36 e-2      0.000 000 83 e-2      m K
+shielded helion gyromag. ratio                         2.037 894 585 e8      0.000 000 027 e8      s^-1 T^-1
+shielded helion gyromag. ratio over 2 pi               32.434 099 66         0.000 000 43          MHz T^-1
+shielded helion mag. mom.                              -1.074 553 080 e-26   0.000 000 014 e-26    J T^-1
+shielded helion mag. mom. to Bohr magneton ratio       -1.158 671 471 e-3    0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio    -2.127 497 720        0.000 000 025
+shielded helion to proton mag. mom. ratio              -0.761 766 5603       0.000 000 0092
+shielded helion to shielded proton mag. mom. ratio     -0.761 786 1313       0.000 000 0033
+shielded proton gyromag. ratio                         2.675 153 171 e8      0.000 000 033 e8      s^-1 T^-1
+shielded proton gyromag. ratio over 2 pi               42.576 385 07         0.000 000 53          MHz T^-1
+shielded proton mag. mom.                              1.410 570 547 e-26    0.000 000 018 e-26    J T^-1
+shielded proton mag. mom. to Bohr magneton ratio       1.520 993 128 e-3     0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio    2.792 775 600         0.000 000 030
+speed of light in vacuum                               299 792 458           (exact)               m s^-1
+standard acceleration of gravity                       9.806 65              (exact)               m s^-2
+standard atmosphere                                    101 325               (exact)               Pa
+standard-state pressure                                100 000               (exact)               Pa
+Stefan-Boltzmann constant                              5.670 367 e-8         0.000 013 e-8         W m^-2 K^-4
+tau Compton wavelength                                 0.697 787 e-15        0.000 063 e-15        m
+tau Compton wavelength over 2 pi                       0.111 056 e-15        0.000 010 e-15        m
+tau-electron mass ratio                                3477.15               0.31
+tau mass                                               3.167 47 e-27         0.000 29 e-27         kg
+tau mass energy equivalent                             2.846 78 e-10         0.000 26 e-10         J
+tau mass energy equivalent in MeV                      1776.82               0.16                  MeV
+tau mass in u                                          1.907 49              0.000 17              u
+tau molar mass                                         1.907 49 e-3          0.000 17 e-3          kg mol^-1
+tau-muon mass ratio                                    16.8167               0.0015
+tau-neutron mass ratio                                 1.891 11              0.000 17
+tau-proton mass ratio                                  1.893 72              0.000 17
+Thomson cross section                                  0.665 245 871 58 e-28 0.000 000 000 91 e-28 m^2
+triton-electron mass ratio                             5496.921 535 88       0.000 000 26
+triton g factor                                        5.957 924 920         0.000 000 028
+triton mag. mom.                                       1.504 609 503 e-26    0.000 000 012 e-26    J T^-1
+triton mag. mom. to Bohr magneton ratio                1.622 393 6616 e-3    0.000 000 0076 e-3
+triton mag. mom. to nuclear magneton ratio             2.978 962 460         0.000 000 014
+triton mass                                            5.007 356 665 e-27    0.000 000 062 e-27    kg
+triton mass energy equivalent                          4.500 387 735 e-10    0.000 000 055 e-10    J
+triton mass energy equivalent in MeV                   2808.921 112          0.000 017             MeV
+triton mass in u                                       3.015 500 716 32      0.000 000 000 11      u
+triton molar mass                                      3.015 500 716 32 e-3  0.000 000 000 11 e-3  kg mol^-1
+triton-proton mass ratio                               2.993 717 033 48      0.000 000 000 22
+unified atomic mass unit                               1.660 539 040 e-27    0.000 000 020 e-27    kg
+von Klitzing constant                                  25 812.807 4555       0.000 0059            ohm
+weak mixing angle                                      0.2223                0.0021
+Wien frequency displacement law constant               5.878 9238 e10        0.000 0034 e10        Hz K^-1
+Wien wavelength displacement law constant              2.897 7729 e-3        0.000 0017 e-3        m K"""
+
+txt2018 = """\
+alpha particle-electron mass ratio                          7294.299 541 42          0.000 000 24
+alpha particle mass                                         6.644 657 3357 e-27      0.000 000 0020 e-27      kg
+alpha particle mass energy equivalent                       5.971 920 1914 e-10      0.000 000 0018 e-10      J
+alpha particle mass energy equivalent in MeV                3727.379 4066            0.000 0011               MeV
+alpha particle mass in u                                    4.001 506 179 127        0.000 000 000 063        u
+alpha particle molar mass                                   4.001 506 1777 e-3       0.000 000 0012 e-3       kg mol^-1
+alpha particle-proton mass ratio                            3.972 599 690 09         0.000 000 000 22
+alpha particle relative atomic mass                         4.001 506 179 127        0.000 000 000 063
+Angstrom star                                               1.000 014 95 e-10        0.000 000 90 e-10        m
+atomic mass constant                                        1.660 539 066 60 e-27    0.000 000 000 50 e-27    kg
+atomic mass constant energy equivalent                      1.492 418 085 60 e-10    0.000 000 000 45 e-10    J
+atomic mass constant energy equivalent in MeV               931.494 102 42           0.000 000 28             MeV
+atomic mass unit-electron volt relationship                 9.314 941 0242 e8        0.000 000 0028 e8        eV
+atomic mass unit-hartree relationship                       3.423 177 6874 e7        0.000 000 0010 e7        E_h
+atomic mass unit-hertz relationship                         2.252 342 718 71 e23     0.000 000 000 68 e23     Hz
+atomic mass unit-inverse meter relationship                 7.513 006 6104 e14       0.000 000 0023 e14       m^-1
+atomic mass unit-joule relationship                         1.492 418 085 60 e-10    0.000 000 000 45 e-10    J
+atomic mass unit-kelvin relationship                        1.080 954 019 16 e13     0.000 000 000 33 e13     K
+atomic mass unit-kilogram relationship                      1.660 539 066 60 e-27    0.000 000 000 50 e-27    kg
+atomic unit of 1st hyperpolarizability                      3.206 361 3061 e-53      0.000 000 0015 e-53      C^3 m^3 J^-2
+atomic unit of 2nd hyperpolarizability                      6.235 379 9905 e-65      0.000 000 0038 e-65      C^4 m^4 J^-3
+atomic unit of action                                       1.054 571 817... e-34    (exact)                  J s
+atomic unit of charge                                       1.602 176 634 e-19       (exact)                  C
+atomic unit of charge density                               1.081 202 384 57 e12     0.000 000 000 49 e12     C m^-3
+atomic unit of current                                      6.623 618 237 510 e-3    0.000 000 000 013 e-3    A
+atomic unit of electric dipole mom.                         8.478 353 6255 e-30      0.000 000 0013 e-30      C m
+atomic unit of electric field                               5.142 206 747 63 e11     0.000 000 000 78 e11     V m^-1
+atomic unit of electric field gradient                      9.717 362 4292 e21       0.000 000 0029 e21       V m^-2
+atomic unit of electric polarizability                      1.648 777 274 36 e-41    0.000 000 000 50 e-41    C^2 m^2 J^-1
+atomic unit of electric potential                           27.211 386 245 988       0.000 000 000 053        V
+atomic unit of electric quadrupole mom.                     4.486 551 5246 e-40      0.000 000 0014 e-40      C m^2
+atomic unit of energy                                       4.359 744 722 2071 e-18  0.000 000 000 0085 e-18  J
+atomic unit of force                                        8.238 723 4983 e-8       0.000 000 0012 e-8       N
+atomic unit of length                                       5.291 772 109 03 e-11    0.000 000 000 80 e-11    m
+atomic unit of mag. dipole mom.                             1.854 802 015 66 e-23    0.000 000 000 56 e-23    J T^-1
+atomic unit of mag. flux density                            2.350 517 567 58 e5      0.000 000 000 71 e5      T
+atomic unit of magnetizability                              7.891 036 6008 e-29      0.000 000 0048 e-29      J T^-2
+atomic unit of mass                                         9.109 383 7015 e-31      0.000 000 0028 e-31      kg
+atomic unit of momentum                                     1.992 851 914 10 e-24    0.000 000 000 30 e-24    kg m s^-1
+atomic unit of permittivity                                 1.112 650 055 45 e-10    0.000 000 000 17 e-10    F m^-1
+atomic unit of time                                         2.418 884 326 5857 e-17  0.000 000 000 0047 e-17  s
+atomic unit of velocity                                     2.187 691 263 64 e6      0.000 000 000 33 e6      m s^-1
+Avogadro constant                                           6.022 140 76 e23         (exact)                  mol^-1
+Bohr magneton                                               9.274 010 0783 e-24      0.000 000 0028 e-24      J T^-1
+Bohr magneton in eV/T                                       5.788 381 8060 e-5       0.000 000 0017 e-5       eV T^-1
+Bohr magneton in Hz/T                                       1.399 624 493 61 e10     0.000 000 000 42 e10     Hz T^-1
+Bohr magneton in inverse meter per tesla                    46.686 447 783           0.000 000 014            m^-1 T^-1
+Bohr magneton in K/T                                        0.671 713 815 63         0.000 000 000 20         K T^-1
+Bohr radius                                                 5.291 772 109 03 e-11    0.000 000 000 80 e-11    m
+Boltzmann constant                                          1.380 649 e-23           (exact)                  J K^-1
+Boltzmann constant in eV/K                                  8.617 333 262... e-5     (exact)                  eV K^-1
+Boltzmann constant in Hz/K                                  2.083 661 912... e10     (exact)                  Hz K^-1
+Boltzmann constant in inverse meter per kelvin              69.503 480 04...         (exact)                  m^-1 K^-1
+classical electron radius                                   2.817 940 3262 e-15      0.000 000 0013 e-15      m
+Compton wavelength                                          2.426 310 238 67 e-12    0.000 000 000 73 e-12    m
+conductance quantum                                         7.748 091 729... e-5     (exact)                  S
+conventional value of ampere-90                             1.000 000 088 87...      (exact)                  A
+conventional value of coulomb-90                            1.000 000 088 87...      (exact)                  C
+conventional value of farad-90                              0.999 999 982 20...      (exact)                  F
+conventional value of henry-90                              1.000 000 017 79...      (exact)                  H
+conventional value of Josephson constant                    483 597.9 e9             (exact)                  Hz V^-1
+conventional value of ohm-90                                1.000 000 017 79...      (exact)                  ohm
+conventional value of volt-90                               1.000 000 106 66...      (exact)                  V
+conventional value of von Klitzing constant                 25 812.807               (exact)                  ohm
+conventional value of watt-90                               1.000 000 195 53...      (exact)                  W
+Cu x unit                                                   1.002 076 97 e-13        0.000 000 28 e-13        m
+deuteron-electron mag. mom. ratio                           -4.664 345 551 e-4       0.000 000 012 e-4
+deuteron-electron mass ratio                                3670.482 967 88          0.000 000 13
+deuteron g factor                                           0.857 438 2338           0.000 000 0022
+deuteron mag. mom.                                          4.330 735 094 e-27       0.000 000 011 e-27       J T^-1
+deuteron mag. mom. to Bohr magneton ratio                   4.669 754 570 e-4        0.000 000 012 e-4
+deuteron mag. mom. to nuclear magneton ratio                0.857 438 2338           0.000 000 0022
+deuteron mass                                               3.343 583 7724 e-27      0.000 000 0010 e-27      kg
+deuteron mass energy equivalent                             3.005 063 231 02 e-10    0.000 000 000 91 e-10    J
+deuteron mass energy equivalent in MeV                      1875.612 942 57          0.000 000 57             MeV
+deuteron mass in u                                          2.013 553 212 745        0.000 000 000 040        u
+deuteron molar mass                                         2.013 553 212 05 e-3     0.000 000 000 61 e-3     kg mol^-1
+deuteron-neutron mag. mom. ratio                            -0.448 206 53            0.000 000 11
+deuteron-proton mag. mom. ratio                             0.307 012 209 39         0.000 000 000 79
+deuteron-proton mass ratio                                  1.999 007 501 39         0.000 000 000 11
+deuteron relative atomic mass                               2.013 553 212 745        0.000 000 000 040
+deuteron rms charge radius                                  2.127 99 e-15            0.000 74 e-15            m
+electron charge to mass quotient                            -1.758 820 010 76 e11    0.000 000 000 53 e11     C kg^-1
+electron-deuteron mag. mom. ratio                           -2143.923 4915           0.000 0056
+electron-deuteron mass ratio                                2.724 437 107 462 e-4    0.000 000 000 096 e-4
+electron g factor                                           -2.002 319 304 362 56    0.000 000 000 000 35
+electron gyromag. ratio                                     1.760 859 630 23 e11     0.000 000 000 53 e11     s^-1 T^-1
+electron gyromag. ratio in MHz/T                            28 024.951 4242          0.000 0085               MHz T^-1
+electron-helion mass ratio                                  1.819 543 074 573 e-4    0.000 000 000 079 e-4
+electron mag. mom.                                          -9.284 764 7043 e-24     0.000 000 0028 e-24      J T^-1
+electron mag. mom. anomaly                                  1.159 652 181 28 e-3     0.000 000 000 18 e-3
+electron mag. mom. to Bohr magneton ratio                   -1.001 159 652 181 28    0.000 000 000 000 18
+electron mag. mom. to nuclear magneton ratio                -1838.281 971 88         0.000 000 11
+electron mass                                               9.109 383 7015 e-31      0.000 000 0028 e-31      kg
+electron mass energy equivalent                             8.187 105 7769 e-14      0.000 000 0025 e-14      J
+electron mass energy equivalent in MeV                      0.510 998 950 00         0.000 000 000 15         MeV
+electron mass in u                                          5.485 799 090 65 e-4     0.000 000 000 16 e-4     u
+electron molar mass                                         5.485 799 0888 e-7       0.000 000 0017 e-7       kg mol^-1
+electron-muon mag. mom. ratio                               206.766 9883             0.000 0046
+electron-muon mass ratio                                    4.836 331 69 e-3         0.000 000 11 e-3
+electron-neutron mag. mom. ratio                            960.920 50               0.000 23
+electron-neutron mass ratio                                 5.438 673 4424 e-4       0.000 000 0026 e-4
+electron-proton mag. mom. ratio                             -658.210 687 89          0.000 000 20
+electron-proton mass ratio                                  5.446 170 214 87 e-4     0.000 000 000 33 e-4
+electron relative atomic mass                               5.485 799 090 65 e-4     0.000 000 000 16 e-4
+electron-tau mass ratio                                     2.875 85 e-4             0.000 19 e-4
+electron to alpha particle mass ratio                       1.370 933 554 787 e-4    0.000 000 000 045 e-4
+electron to shielded helion mag. mom. ratio                 864.058 257              0.000 010
+electron to shielded proton mag. mom. ratio                 -658.227 5971            0.000 0072
+electron-triton mass ratio                                  1.819 200 062 251 e-4    0.000 000 000 090 e-4
+electron volt                                               1.602 176 634 e-19       (exact)                  J
+electron volt-atomic mass unit relationship                 1.073 544 102 33 e-9     0.000 000 000 32 e-9     u
+electron volt-hartree relationship                          3.674 932 217 5655 e-2   0.000 000 000 0071 e-2   E_h
+electron volt-hertz relationship                            2.417 989 242... e14     (exact)                  Hz
+electron volt-inverse meter relationship                    8.065 543 937... e5      (exact)                  m^-1
+electron volt-joule relationship                            1.602 176 634 e-19       (exact)                  J
+electron volt-kelvin relationship                           1.160 451 812... e4      (exact)                  K
+electron volt-kilogram relationship                         1.782 661 921... e-36    (exact)                  kg
+elementary charge                                           1.602 176 634 e-19       (exact)                  C
+elementary charge over h-bar                                1.519 267 447... e15     (exact)                  A J^-1
+Faraday constant                                            96 485.332 12...         (exact)                  C mol^-1
+Fermi coupling constant                                     1.166 3787 e-5           0.000 0006 e-5           GeV^-2
+fine-structure constant                                     7.297 352 5693 e-3       0.000 000 0011 e-3
+first radiation constant                                    3.741 771 852... e-16    (exact)                  W m^2
+first radiation constant for spectral radiance              1.191 042 972... e-16    (exact)                  W m^2 sr^-1
+hartree-atomic mass unit relationship                       2.921 262 322 05 e-8     0.000 000 000 88 e-8     u
+hartree-electron volt relationship                          27.211 386 245 988       0.000 000 000 053        eV
+Hartree energy                                              4.359 744 722 2071 e-18  0.000 000 000 0085 e-18  J
+Hartree energy in eV                                        27.211 386 245 988       0.000 000 000 053        eV
+hartree-hertz relationship                                  6.579 683 920 502 e15    0.000 000 000 013 e15    Hz
+hartree-inverse meter relationship                          2.194 746 313 6320 e7    0.000 000 000 0043 e7    m^-1
+hartree-joule relationship                                  4.359 744 722 2071 e-18  0.000 000 000 0085 e-18  J
+hartree-kelvin relationship                                 3.157 750 248 0407 e5    0.000 000 000 0061 e5    K
+hartree-kilogram relationship                               4.850 870 209 5432 e-35  0.000 000 000 0094 e-35  kg
+helion-electron mass ratio                                  5495.885 280 07          0.000 000 24
+helion g factor                                             -4.255 250 615           0.000 000 050
+helion mag. mom.                                            -1.074 617 532 e-26      0.000 000 013 e-26       J T^-1
+helion mag. mom. to Bohr magneton ratio                     -1.158 740 958 e-3       0.000 000 014 e-3
+helion mag. mom. to nuclear magneton ratio                  -2.127 625 307           0.000 000 025
+helion mass                                                 5.006 412 7796 e-27      0.000 000 0015 e-27      kg
+helion mass energy equivalent                               4.499 539 4125 e-10      0.000 000 0014 e-10      J
+helion mass energy equivalent in MeV                        2808.391 607 43          0.000 000 85             MeV
+helion mass in u                                            3.014 932 247 175        0.000 000 000 097        u
+helion molar mass                                           3.014 932 246 13 e-3     0.000 000 000 91 e-3     kg mol^-1
+helion-proton mass ratio                                    2.993 152 671 67         0.000 000 000 13
+helion relative atomic mass                                 3.014 932 247 175        0.000 000 000 097
+helion shielding shift                                      5.996 743 e-5            0.000 010 e-5
+hertz-atomic mass unit relationship                         4.439 821 6652 e-24      0.000 000 0013 e-24      u
+hertz-electron volt relationship                            4.135 667 696... e-15    (exact)                  eV
+hertz-hartree relationship                                  1.519 829 846 0570 e-16  0.000 000 000 0029 e-16  E_h
+hertz-inverse meter relationship                            3.335 640 951... e-9     (exact)                  m^-1
+hertz-joule relationship                                    6.626 070 15 e-34        (exact)                  J
+hertz-kelvin relationship                                   4.799 243 073... e-11    (exact)                  K
+hertz-kilogram relationship                                 7.372 497 323... e-51    (exact)                  kg
+hyperfine transition frequency of Cs-133                    9 192 631 770            (exact)                  Hz
+inverse fine-structure constant                             137.035 999 084          0.000 000 021
+inverse meter-atomic mass unit relationship                 1.331 025 050 10 e-15    0.000 000 000 40 e-15    u
+inverse meter-electron volt relationship                    1.239 841 984... e-6     (exact)                  eV
+inverse meter-hartree relationship                          4.556 335 252 9120 e-8   0.000 000 000 0088 e-8   E_h
+inverse meter-hertz relationship                            299 792 458              (exact)                  Hz
+inverse meter-joule relationship                            1.986 445 857... e-25    (exact)                  J
+inverse meter-kelvin relationship                           1.438 776 877... e-2     (exact)                  K
+inverse meter-kilogram relationship                         2.210 219 094... e-42    (exact)                  kg
+inverse of conductance quantum                              12 906.403 72...         (exact)                  ohm
+Josephson constant                                          483 597.848 4... e9      (exact)                  Hz V^-1
+joule-atomic mass unit relationship                         6.700 535 2565 e9        0.000 000 0020 e9        u
+joule-electron volt relationship                            6.241 509 074... e18     (exact)                  eV
+joule-hartree relationship                                  2.293 712 278 3963 e17   0.000 000 000 0045 e17   E_h
+joule-hertz relationship                                    1.509 190 179... e33     (exact)                  Hz
+joule-inverse meter relationship                            5.034 116 567... e24     (exact)                  m^-1
+joule-kelvin relationship                                   7.242 970 516... e22     (exact)                  K
+joule-kilogram relationship                                 1.112 650 056... e-17    (exact)                  kg
+kelvin-atomic mass unit relationship                        9.251 087 3014 e-14      0.000 000 0028 e-14      u
+kelvin-electron volt relationship                           8.617 333 262... e-5     (exact)                  eV
+kelvin-hartree relationship                                 3.166 811 563 4556 e-6   0.000 000 000 0061 e-6   E_h
+kelvin-hertz relationship                                   2.083 661 912... e10     (exact)                  Hz
+kelvin-inverse meter relationship                           69.503 480 04...         (exact)                  m^-1
+kelvin-joule relationship                                   1.380 649 e-23           (exact)                  J
+kelvin-kilogram relationship                                1.536 179 187... e-40    (exact)                  kg
+kilogram-atomic mass unit relationship                      6.022 140 7621 e26       0.000 000 0018 e26       u
+kilogram-electron volt relationship                         5.609 588 603... e35     (exact)                  eV
+kilogram-hartree relationship                               2.061 485 788 7409 e34   0.000 000 000 0040 e34   E_h
+kilogram-hertz relationship                                 1.356 392 489... e50     (exact)                  Hz
+kilogram-inverse meter relationship                         4.524 438 335... e41     (exact)                  m^-1
+kilogram-joule relationship                                 8.987 551 787... e16     (exact)                  J
+kilogram-kelvin relationship                                6.509 657 260... e39     (exact)                  K
+lattice parameter of silicon                                5.431 020 511 e-10       0.000 000 089 e-10       m
+lattice spacing of ideal Si (220)                           1.920 155 716 e-10       0.000 000 032 e-10       m
+Loschmidt constant (273.15 K, 100 kPa)                      2.651 645 804... e25     (exact)                  m^-3
+Loschmidt constant (273.15 K, 101.325 kPa)                  2.686 780 111... e25     (exact)                  m^-3
+luminous efficacy                                           683                      (exact)                  lm W^-1
+mag. flux quantum                                           2.067 833 848... e-15    (exact)                  Wb
+molar gas constant                                          8.314 462 618...         (exact)                  J mol^-1 K^-1
+molar mass constant                                         0.999 999 999 65 e-3     0.000 000 000 30 e-3     kg mol^-1
+molar mass of carbon-12                                     11.999 999 9958 e-3      0.000 000 0036 e-3       kg mol^-1
+molar Planck constant                                       3.990 312 712... e-10    (exact)                  J Hz^-1 mol^-1
+molar volume of ideal gas (273.15 K, 100 kPa)               22.710 954 64... e-3     (exact)                  m^3 mol^-1
+molar volume of ideal gas (273.15 K, 101.325 kPa)           22.413 969 54... e-3     (exact)                  m^3 mol^-1
+molar volume of silicon                                     1.205 883 199 e-5        0.000 000 060 e-5        m^3 mol^-1
+Mo x unit                                                   1.002 099 52 e-13        0.000 000 53 e-13        m
+muon Compton wavelength                                     1.173 444 110 e-14       0.000 000 026 e-14       m
+muon-electron mass ratio                                    206.768 2830             0.000 0046
+muon g factor                                               -2.002 331 8418          0.000 000 0013
+muon mag. mom.                                              -4.490 448 30 e-26       0.000 000 10 e-26        J T^-1
+muon mag. mom. anomaly                                      1.165 920 89 e-3         0.000 000 63 e-3
+muon mag. mom. to Bohr magneton ratio                       -4.841 970 47 e-3        0.000 000 11 e-3
+muon mag. mom. to nuclear magneton ratio                    -8.890 597 03            0.000 000 20
+muon mass                                                   1.883 531 627 e-28       0.000 000 042 e-28       kg
+muon mass energy equivalent                                 1.692 833 804 e-11       0.000 000 038 e-11       J
+muon mass energy equivalent in MeV                          105.658 3755             0.000 0023               MeV
+muon mass in u                                              0.113 428 9259           0.000 000 0025           u
+muon molar mass                                             1.134 289 259 e-4        0.000 000 025 e-4        kg mol^-1
+muon-neutron mass ratio                                     0.112 454 5170           0.000 000 0025
+muon-proton mag. mom. ratio                                 -3.183 345 142           0.000 000 071
+muon-proton mass ratio                                      0.112 609 5264           0.000 000 0025
+muon-tau mass ratio                                         5.946 35 e-2             0.000 40 e-2
+natural unit of action                                      1.054 571 817... e-34    (exact)                  J s
+natural unit of action in eV s                              6.582 119 569... e-16    (exact)                  eV s
+natural unit of energy                                      8.187 105 7769 e-14      0.000 000 0025 e-14      J
+natural unit of energy in MeV                               0.510 998 950 00         0.000 000 000 15         MeV
+natural unit of length                                      3.861 592 6796 e-13      0.000 000 0012 e-13      m
+natural unit of mass                                        9.109 383 7015 e-31      0.000 000 0028 e-31      kg
+natural unit of momentum                                    2.730 924 530 75 e-22    0.000 000 000 82 e-22    kg m s^-1
+natural unit of momentum in MeV/c                           0.510 998 950 00         0.000 000 000 15         MeV/c
+natural unit of time                                        1.288 088 668 19 e-21    0.000 000 000 39 e-21    s
+natural unit of velocity                                    299 792 458              (exact)                  m s^-1
+neutron Compton wavelength                                  1.319 590 905 81 e-15    0.000 000 000 75 e-15    m
+neutron-electron mag. mom. ratio                            1.040 668 82 e-3         0.000 000 25 e-3
+neutron-electron mass ratio                                 1838.683 661 73          0.000 000 89
+neutron g factor                                            -3.826 085 45            0.000 000 90
+neutron gyromag. ratio                                      1.832 471 71 e8          0.000 000 43 e8          s^-1 T^-1
+neutron gyromag. ratio in MHz/T                             29.164 6931              0.000 0069               MHz T^-1
+neutron mag. mom.                                           -9.662 3651 e-27         0.000 0023 e-27          J T^-1
+neutron mag. mom. to Bohr magneton ratio                    -1.041 875 63 e-3        0.000 000 25 e-3
+neutron mag. mom. to nuclear magneton ratio                 -1.913 042 73            0.000 000 45
+neutron mass                                                1.674 927 498 04 e-27    0.000 000 000 95 e-27    kg
+neutron mass energy equivalent                              1.505 349 762 87 e-10    0.000 000 000 86 e-10    J
+neutron mass energy equivalent in MeV                       939.565 420 52           0.000 000 54             MeV
+neutron mass in u                                           1.008 664 915 95         0.000 000 000 49         u
+neutron molar mass                                          1.008 664 915 60 e-3     0.000 000 000 57 e-3     kg mol^-1
+neutron-muon mass ratio                                     8.892 484 06             0.000 000 20
+neutron-proton mag. mom. ratio                              -0.684 979 34            0.000 000 16
+neutron-proton mass difference                              2.305 574 35 e-30        0.000 000 82 e-30        kg
+neutron-proton mass difference energy equivalent            2.072 146 89 e-13        0.000 000 74 e-13        J
+neutron-proton mass difference energy equivalent in MeV     1.293 332 36             0.000 000 46             MeV
+neutron-proton mass difference in u                         1.388 449 33 e-3         0.000 000 49 e-3         u
+neutron-proton mass ratio                                   1.001 378 419 31         0.000 000 000 49
+neutron relative atomic mass                                1.008 664 915 95         0.000 000 000 49
+neutron-tau mass ratio                                      0.528 779                0.000 036
+neutron to shielded proton mag. mom. ratio                  -0.684 996 94            0.000 000 16
+Newtonian constant of gravitation                           6.674 30 e-11            0.000 15 e-11            m^3 kg^-1 s^-2
+Newtonian constant of gravitation over h-bar c              6.708 83 e-39            0.000 15 e-39            (GeV/c^2)^-2
+nuclear magneton                                            5.050 783 7461 e-27      0.000 000 0015 e-27      J T^-1
+nuclear magneton in eV/T                                    3.152 451 258 44 e-8     0.000 000 000 96 e-8     eV T^-1
+nuclear magneton in inverse meter per tesla                 2.542 623 413 53 e-2     0.000 000 000 78 e-2     m^-1 T^-1
+nuclear magneton in K/T                                     3.658 267 7756 e-4       0.000 000 0011 e-4       K T^-1
+nuclear magneton in MHz/T                                   7.622 593 2291           0.000 000 0023           MHz T^-1
+Planck constant                                             6.626 070 15 e-34        (exact)                  J Hz^-1
+Planck constant in eV/Hz                                    4.135 667 696... e-15    (exact)                  eV Hz^-1
+Planck length                                               1.616 255 e-35           0.000 018 e-35           m
+Planck mass                                                 2.176 434 e-8            0.000 024 e-8            kg
+Planck mass energy equivalent in GeV                        1.220 890 e19            0.000 014 e19            GeV
+Planck temperature                                          1.416 784 e32            0.000 016 e32            K
+Planck time                                                 5.391 247 e-44           0.000 060 e-44           s
+proton charge to mass quotient                              9.578 833 1560 e7        0.000 000 0029 e7        C kg^-1
+proton Compton wavelength                                   1.321 409 855 39 e-15    0.000 000 000 40 e-15    m
+proton-electron mass ratio                                  1836.152 673 43          0.000 000 11
+proton g factor                                             5.585 694 6893           0.000 000 0016
+proton gyromag. ratio                                       2.675 221 8744 e8        0.000 000 0011 e8        s^-1 T^-1
+proton gyromag. ratio in MHz/T                              42.577 478 518           0.000 000 018            MHz T^-1
+proton mag. mom.                                            1.410 606 797 36 e-26    0.000 000 000 60 e-26    J T^-1
+proton mag. mom. to Bohr magneton ratio                     1.521 032 202 30 e-3     0.000 000 000 46 e-3
+proton mag. mom. to nuclear magneton ratio                  2.792 847 344 63         0.000 000 000 82
+proton mag. shielding correction                            2.5689 e-5               0.0011 e-5
+proton mass                                                 1.672 621 923 69 e-27    0.000 000 000 51 e-27    kg
+proton mass energy equivalent                               1.503 277 615 98 e-10    0.000 000 000 46 e-10    J
+proton mass energy equivalent in MeV                        938.272 088 16           0.000 000 29             MeV
+proton mass in u                                            1.007 276 466 621        0.000 000 000 053        u
+proton molar mass                                           1.007 276 466 27 e-3     0.000 000 000 31 e-3     kg mol^-1
+proton-muon mass ratio                                      8.880 243 37             0.000 000 20
+proton-neutron mag. mom. ratio                              -1.459 898 05            0.000 000 34
+proton-neutron mass ratio                                   0.998 623 478 12         0.000 000 000 49
+proton relative atomic mass                                 1.007 276 466 621        0.000 000 000 053
+proton rms charge radius                                    8.414 e-16               0.019 e-16               m
+proton-tau mass ratio                                       0.528 051                0.000 036
+quantum of circulation                                      3.636 947 5516 e-4       0.000 000 0011 e-4       m^2 s^-1
+quantum of circulation times 2                              7.273 895 1032 e-4       0.000 000 0022 e-4       m^2 s^-1
+reduced Compton wavelength                                  3.861 592 6796 e-13      0.000 000 0012 e-13      m
+reduced muon Compton wavelength                             1.867 594 306 e-15       0.000 000 042 e-15       m
+reduced neutron Compton wavelength                          2.100 194 1552 e-16      0.000 000 0012 e-16      m
+reduced Planck constant                                     1.054 571 817... e-34    (exact)                  J s
+reduced Planck constant in eV s                             6.582 119 569... e-16    (exact)                  eV s
+reduced Planck constant times c in MeV fm                   197.326 980 4...         (exact)                  MeV fm
+reduced proton Compton wavelength                           2.103 089 103 36 e-16    0.000 000 000 64 e-16    m
+reduced tau Compton wavelength                              1.110 538 e-16           0.000 075 e-16           m
+Rydberg constant                                            10 973 731.568 160       0.000 021                m^-1
+Rydberg constant times c in Hz                              3.289 841 960 2508 e15   0.000 000 000 0064 e15   Hz
+Rydberg constant times hc in eV                             13.605 693 122 994       0.000 000 000 026        eV
+Rydberg constant times hc in J                              2.179 872 361 1035 e-18  0.000 000 000 0042 e-18  J
+Sackur-Tetrode constant (1 K, 100 kPa)                      -1.151 707 537 06        0.000 000 000 45
+Sackur-Tetrode constant (1 K, 101.325 kPa)                  -1.164 870 523 58        0.000 000 000 45
+second radiation constant                                   1.438 776 877... e-2     (exact)                  m K
+shielded helion gyromag. ratio                              2.037 894 569 e8         0.000 000 024 e8         s^-1 T^-1
+shielded helion gyromag. ratio in MHz/T                     32.434 099 42            0.000 000 38             MHz T^-1
+shielded helion mag. mom.                                   -1.074 553 090 e-26      0.000 000 013 e-26       J T^-1
+shielded helion mag. mom. to Bohr magneton ratio            -1.158 671 471 e-3       0.000 000 014 e-3
+shielded helion mag. mom. to nuclear magneton ratio         -2.127 497 719           0.000 000 025
+shielded helion to proton mag. mom. ratio                   -0.761 766 5618          0.000 000 0089
+shielded helion to shielded proton mag. mom. ratio          -0.761 786 1313          0.000 000 0033
+shielded proton gyromag. ratio                              2.675 153 151 e8         0.000 000 029 e8         s^-1 T^-1
+shielded proton gyromag. ratio in MHz/T                     42.576 384 74            0.000 000 46             MHz T^-1
+shielded proton mag. mom.                                   1.410 570 560 e-26       0.000 000 015 e-26       J T^-1
+shielded proton mag. mom. to Bohr magneton ratio            1.520 993 128 e-3        0.000 000 017 e-3
+shielded proton mag. mom. to nuclear magneton ratio         2.792 775 599            0.000 000 030
+shielding difference of d and p in HD                       2.0200 e-8               0.0020 e-8
+shielding difference of t and p in HT                       2.4140 e-8               0.0020 e-8
+speed of light in vacuum                                    299 792 458              (exact)                  m s^-1
+standard acceleration of gravity                            9.806 65                 (exact)                  m s^-2
+standard atmosphere                                         101 325                  (exact)                  Pa
+standard-state pressure                                     100 000                  (exact)                  Pa
+Stefan-Boltzmann constant                                   5.670 374 419... e-8     (exact)                  W m^-2 K^-4
+tau Compton wavelength                                      6.977 71 e-16            0.000 47 e-16            m
+tau-electron mass ratio                                     3477.23                  0.23
+tau energy equivalent                                       1776.86                  0.12                     MeV
+tau mass                                                    3.167 54 e-27            0.000 21 e-27            kg
+tau mass energy equivalent                                  2.846 84 e-10            0.000 19 e-10            J
+tau mass in u                                               1.907 54                 0.000 13                 u
+tau molar mass                                              1.907 54 e-3             0.000 13 e-3             kg mol^-1
+tau-muon mass ratio                                         16.8170                  0.0011
+tau-neutron mass ratio                                      1.891 15                 0.000 13
+tau-proton mass ratio                                       1.893 76                 0.000 13
+Thomson cross section                                       6.652 458 7321 e-29      0.000 000 0060 e-29      m^2
+triton-electron mass ratio                                  5496.921 535 73          0.000 000 27
+triton g factor                                             5.957 924 931            0.000 000 012
+triton mag. mom.                                            1.504 609 5202 e-26      0.000 000 0030 e-26      J T^-1
+triton mag. mom. to Bohr magneton ratio                     1.622 393 6651 e-3       0.000 000 0032 e-3
+triton mag. mom. to nuclear magneton ratio                  2.978 962 4656           0.000 000 0059
+triton mass                                                 5.007 356 7446 e-27      0.000 000 0015 e-27      kg
+triton mass energy equivalent                               4.500 387 8060 e-10      0.000 000 0014 e-10      J
+triton mass energy equivalent in MeV                        2808.921 132 98          0.000 000 85             MeV
+triton mass in u                                            3.015 500 716 21         0.000 000 000 12         u
+triton molar mass                                           3.015 500 715 17 e-3     0.000 000 000 92 e-3     kg mol^-1
+triton-proton mass ratio                                    2.993 717 034 14         0.000 000 000 15
+triton relative atomic mass                                 3.015 500 716 21         0.000 000 000 12
+triton to proton mag. mom. ratio                            1.066 639 9191           0.000 000 0021
+unified atomic mass unit                                    1.660 539 066 60 e-27    0.000 000 000 50 e-27    kg
+vacuum electric permittivity                                8.854 187 8128 e-12      0.000 000 0013 e-12      F m^-1
+vacuum mag. permeability                                    1.256 637 062 12 e-6     0.000 000 000 19 e-6     N A^-2
+von Klitzing constant                                       25 812.807 45...         (exact)                  ohm
+weak mixing angle                                           0.222 90                 0.000 30
+Wien frequency displacement law constant                    5.878 925 757... e10     (exact)                  Hz K^-1
+Wien wavelength displacement law constant                   2.897 771 955... e-3     (exact)                  m K
+W to Z mass ratio                                           0.881 53                 0.000 17                   """
+
+# -----------------------------------------------------------------------------
+
+physical_constants: dict[str, tuple[float, str, float]] = {}
+
+
+def parse_constants_2002to2014(d: str) -> dict[str, tuple[float, str, float]]:
+    constants = {}
+    for line in d.split('\n'):
+        name = line[:55].rstrip()
+        val = float(line[55:77].replace(' ', '').replace('...', ''))
+        uncert = float(line[77:99].replace(' ', '').replace('(exact)', '0'))
+        units = line[99:].rstrip()
+        constants[name] = (val, units, uncert)
+    return constants
+
+
+def parse_constants_2018toXXXX(d: str) -> dict[str, tuple[float, str, float]]:
+    constants = {}
+    for line in d.split('\n'):
+        name = line[:60].rstrip()
+        val = float(line[60:85].replace(' ', '').replace('...', ''))
+        uncert = float(line[85:110].replace(' ', '').replace('(exact)', '0'))
+        units = line[110:].rstrip()
+        constants[name] = (val, units, uncert)
+    return constants
+
+
+_physical_constants_2002 = parse_constants_2002to2014(txt2002)
+_physical_constants_2006 = parse_constants_2002to2014(txt2006)
+_physical_constants_2010 = parse_constants_2002to2014(txt2010)
+_physical_constants_2014 = parse_constants_2002to2014(txt2014)
+_physical_constants_2018 = parse_constants_2018toXXXX(txt2018)
+
+
+physical_constants.update(_physical_constants_2002)
+physical_constants.update(_physical_constants_2006)
+physical_constants.update(_physical_constants_2010)
+physical_constants.update(_physical_constants_2014)
+physical_constants.update(_physical_constants_2018)
+_current_constants = _physical_constants_2018
+_current_codata = "CODATA 2018"
+
+# check obsolete values
+_obsolete_constants = {}
+for k in physical_constants:
+    if k not in _current_constants:
+        _obsolete_constants[k] = True
+
+# generate some additional aliases
+_aliases = {}
+for k in _physical_constants_2002:
+    if 'magn.' in k:
+        _aliases[k] = k.replace('magn.', 'mag.')
+for k in _physical_constants_2006:
+    if 'momentum' in k:
+        _aliases[k] = k.replace('momentum', 'mom.um')
+for k in _physical_constants_2018:
+    if 'momentum' in k:
+        _aliases[k] = k.replace('momentum', 'mom.um')
+
+# CODATA 2018: renamed and no longer exact; use as aliases
+_aliases['mag. constant'] = 'vacuum mag. permeability'
+_aliases['electric constant'] = 'vacuum electric permittivity'
+
+
+class ConstantWarning(DeprecationWarning):
+    """Accessing a constant no longer in current CODATA data set"""
+    pass
+
+
+def _check_obsolete(key: str) -> None:
+    if key in _obsolete_constants and key not in _aliases:
+        warnings.warn("Constant '%s' is not in current %s data set" % (
+            key, _current_codata), ConstantWarning)
+
+
+def value(key: str) -> float:
+    """
+    Value in physical_constants indexed by key
+
+    Parameters
+    ----------
+    key : Python string
+        Key in dictionary `physical_constants`
+
+    Returns
+    -------
+    value : float
+        Value in `physical_constants` corresponding to `key`
+
+    Examples
+    --------
+    >>> from scipy import constants
+    >>> constants.value('elementary charge')
+    1.602176634e-19
+
+    """
+    _check_obsolete(key)
+    return physical_constants[key][0]
+
+
+def unit(key: str) -> str:
+    """
+    Unit in physical_constants indexed by key
+
+    Parameters
+    ----------
+    key : Python string
+        Key in dictionary `physical_constants`
+
+    Returns
+    -------
+    unit : Python string
+        Unit in `physical_constants` corresponding to `key`
+
+    Examples
+    --------
+    >>> from scipy import constants
+    >>> constants.unit('proton mass')
+    'kg'
+
+    """
+    _check_obsolete(key)
+    return physical_constants[key][1]
+
+
+def precision(key: str) -> float:
+    """
+    Relative precision in physical_constants indexed by key
+
+    Parameters
+    ----------
+    key : Python string
+        Key in dictionary `physical_constants`
+
+    Returns
+    -------
+    prec : float
+        Relative precision in `physical_constants` corresponding to `key`
+
+    Examples
+    --------
+    >>> from scipy import constants
+    >>> constants.precision('proton mass')
+    5.1e-37
+
+    """
+    _check_obsolete(key)
+    return physical_constants[key][2] / physical_constants[key][0]
+
+
+def find(sub: str | None = None, disp: bool = False) -> Any:
+    """
+    Return list of physical_constant keys containing a given string.
+
+    Parameters
+    ----------
+    sub : str
+        Sub-string to search keys for. By default, return all keys.
+    disp : bool
+        If True, print the keys that are found and return None.
+        Otherwise, return the list of keys without printing anything.
+
+    Returns
+    -------
+    keys : list or None
+        If `disp` is False, the list of keys is returned.
+        Otherwise, None is returned.
+
+    Examples
+    --------
+    >>> from scipy.constants import find, physical_constants
+
+    Which keys in the ``physical_constants`` dictionary contain 'boltzmann'?
+
+    >>> find('boltzmann')
+    ['Boltzmann constant',
+     'Boltzmann constant in Hz/K',
+     'Boltzmann constant in eV/K',
+     'Boltzmann constant in inverse meter per kelvin',
+     'Stefan-Boltzmann constant']
+
+    Get the constant called 'Boltzmann constant in Hz/K':
+
+    >>> physical_constants['Boltzmann constant in Hz/K']
+    (20836619120.0, 'Hz K^-1', 0.0)
+
+    Find constants with 'radius' in the key:
+
+    >>> find('radius')
+    ['Bohr radius',
+     'classical electron radius',
+     'deuteron rms charge radius',
+     'proton rms charge radius']
+    >>> physical_constants['classical electron radius']
+    (2.8179403262e-15, 'm', 1.3e-24)
+
+    """
+    if sub is None:
+        result = list(_current_constants.keys())
+    else:
+        result = [key for key in _current_constants
+                  if sub.lower() in key.lower()]
+
+    result.sort()
+    if disp:
+        for key in result:
+            print(key)
+        return
+    else:
+        return result
+
+
+c = value('speed of light in vacuum')
+mu0 = value('vacuum mag. permeability')
+epsilon0 = value('vacuum electric permittivity')
+
+# Table is lacking some digits for exact values: calculate from definition
+exact_values = {
+    'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0),
+    'kilogram-joule relationship': (c * c, 'J', 0.0),
+    'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0),
+
+    # The following derived quantities are no longer exact (CODATA2018):
+    # specify separately
+    'characteristic impedance of vacuum': (
+        sqrt(mu0 / epsilon0), 'ohm',
+        sqrt(mu0 / epsilon0) * 0.5 * (
+            physical_constants['vacuum mag. permeability'][2] / mu0
+            + physical_constants['vacuum electric permittivity'][2] / epsilon0))
+}
+
+# sanity check
+for key in exact_values:
+    val = physical_constants[key][0]
+    if abs(exact_values[key][0] - val) / val > 1e-9:
+        raise ValueError("Constants.codata: exact values too far off.")
+    if exact_values[key][2] == 0 and physical_constants[key][2] != 0:
+        raise ValueError("Constants.codata: value not exact")
+
+physical_constants.update(exact_values)
+
+_tested_keys = ['natural unit of velocity',
+                'natural unit of action',
+                'natural unit of action in eV s',
+                'natural unit of mass',
+                'natural unit of energy',
+                'natural unit of energy in MeV',
+                'natural unit of mom.um',
+                'natural unit of mom.um in MeV/c',
+                'natural unit of length',
+                'natural unit of time']
+
+# finally, insert aliases for values
+for k, v in list(_aliases.items()):
+    if v in _current_constants or v in _tested_keys:
+        physical_constants[k] = physical_constants[v]
+    else:
+        del _aliases[k]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/_constants.py b/__packaged__/coreml/.python_dependencies/scipy/constants/_constants.py
new file mode 100644
index 00000000..f16b194c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/constants/_constants.py
@@ -0,0 +1,357 @@
+"""
+Collection of physical constants and conversion factors.
+
+Most constants are in SI units, so you can do
+print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
+
+The list is not meant to be comprehensive, but just convenient for everyday use.
+"""
+
+from __future__ import annotations
+
+import math as _math
+from typing import TYPE_CHECKING, Any
+
+from ._codata import value as _cd
+import numpy as _np
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+
+"""
+BasSw 2006
+physical constants: imported from CODATA
+unit conversion: see e.g., NIST special publication 811
+Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
+Some constants exist in a few variants, which are marked with suffixes.
+The ones without any suffix should be the most common ones.
+"""
+
+__all__ = [
+    'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G',
+    'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg',
+    'Stefan_Boltzmann', 'Wien', 'acre', 'alpha',
+    'angstrom', 'arcmin', 'arcminute', 'arcsec',
+    'arcsecond', 'astronomical_unit', 'atm',
+    'atmosphere', 'atomic_mass', 'atto', 'au', 'bar',
+    'barrel', 'bbl', 'blob', 'c', 'calorie',
+    'calorie_IT', 'calorie_th', 'carat', 'centi',
+    'convert_temperature', 'day', 'deci', 'degree',
+    'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e',
+    'eV', 'electron_mass', 'electron_volt',
+    'elementary_charge', 'epsilon_0', 'erg',
+    'exa', 'exbi', 'femto', 'fermi', 'fine_structure',
+    'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp',
+    'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp',
+    'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio',
+    'grain', 'gram', 'gravitational_constant', 'h', 'hbar',
+    'hectare', 'hecto', 'horsepower', 'hour', 'hp',
+    'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force',
+    'kmh', 'knot', 'lambda2nu', 'lb', 'lbf',
+    'light_year', 'liter', 'litre', 'long_ton', 'm_e',
+    'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega',
+    'metric_ton', 'micro', 'micron', 'mil', 'mile',
+    'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano',
+    'nautical_mile', 'neutron_mass', 'nu2lambda',
+    'ounce', 'oz', 'parsec', 'pebi', 'peta',
+    'pi', 'pico', 'point', 'pound', 'pound_force',
+    'proton_mass', 'psi', 'pt', 'short_ton',
+    'sigma', 'slinch', 'slug', 'speed_of_light',
+    'speed_of_sound', 'stone', 'survey_foot',
+    'survey_mile', 'tebi', 'tera', 'ton_TNT',
+    'torr', 'troy_ounce', 'troy_pound', 'u',
+    'week', 'yard', 'year', 'yobi', 'yocto',
+    'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta'
+]
+
+
+# mathematical constants
+pi = _math.pi
+golden = golden_ratio = (1 + _math.sqrt(5)) / 2
+
+# SI prefixes
+yotta = 1e24
+zetta = 1e21
+exa = 1e18
+peta = 1e15
+tera = 1e12
+giga = 1e9
+mega = 1e6
+kilo = 1e3
+hecto = 1e2
+deka = 1e1
+deci = 1e-1
+centi = 1e-2
+milli = 1e-3
+micro = 1e-6
+nano = 1e-9
+pico = 1e-12
+femto = 1e-15
+atto = 1e-18
+zepto = 1e-21
+yocto = 1e-24
+
+# binary prefixes
+kibi = 2**10
+mebi = 2**20
+gibi = 2**30
+tebi = 2**40
+pebi = 2**50
+exbi = 2**60
+zebi = 2**70
+yobi = 2**80
+
+# physical constants
+c = speed_of_light = _cd('speed of light in vacuum')
+mu_0 = _cd('vacuum mag. permeability')
+epsilon_0 = _cd('vacuum electric permittivity')
+h = Planck = _cd('Planck constant')
+hbar = h / (2 * pi)
+G = gravitational_constant = _cd('Newtonian constant of gravitation')
+g = _cd('standard acceleration of gravity')
+e = elementary_charge = _cd('elementary charge')
+R = gas_constant = _cd('molar gas constant')
+alpha = fine_structure = _cd('fine-structure constant')
+N_A = Avogadro = _cd('Avogadro constant')
+k = Boltzmann = _cd('Boltzmann constant')
+sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant')
+Wien = _cd('Wien wavelength displacement law constant')
+Rydberg = _cd('Rydberg constant')
+
+# mass in kg
+gram = 1e-3
+metric_ton = 1e3
+grain = 64.79891e-6
+lb = pound = 7000 * grain  # avoirdupois
+blob = slinch = pound * g / 0.0254  # lbf*s**2/in (added in 1.0.0)
+slug = blob / 12  # lbf*s**2/foot (added in 1.0.0)
+oz = ounce = pound / 16
+stone = 14 * pound
+long_ton = 2240 * pound
+short_ton = 2000 * pound
+
+troy_ounce = 480 * grain  # only for metals / gems
+troy_pound = 12 * troy_ounce
+carat = 200e-6
+
+m_e = electron_mass = _cd('electron mass')
+m_p = proton_mass = _cd('proton mass')
+m_n = neutron_mass = _cd('neutron mass')
+m_u = u = atomic_mass = _cd('atomic mass constant')
+
+# angle in rad
+degree = pi / 180
+arcmin = arcminute = degree / 60
+arcsec = arcsecond = arcmin / 60
+
+# time in second
+minute = 60.0
+hour = 60 * minute
+day = 24 * hour
+week = 7 * day
+year = 365 * day
+Julian_year = 365.25 * day
+
+# length in meter
+inch = 0.0254
+foot = 12 * inch
+yard = 3 * foot
+mile = 1760 * yard
+mil = inch / 1000
+pt = point = inch / 72  # typography
+survey_foot = 1200.0 / 3937
+survey_mile = 5280 * survey_foot
+nautical_mile = 1852.0
+fermi = 1e-15
+angstrom = 1e-10
+micron = 1e-6
+au = astronomical_unit = 149597870700.0
+light_year = Julian_year * c
+parsec = au / arcsec
+
+# pressure in pascal
+atm = atmosphere = _cd('standard atmosphere')
+bar = 1e5
+torr = mmHg = atm / 760
+psi = pound * g / (inch * inch)
+
+# area in meter**2
+hectare = 1e4
+acre = 43560 * foot**2
+
+# volume in meter**3
+litre = liter = 1e-3
+gallon = gallon_US = 231 * inch**3  # US
+# pint = gallon_US / 8
+fluid_ounce = fluid_ounce_US = gallon_US / 128
+bbl = barrel = 42 * gallon_US  # for oil
+
+gallon_imp = 4.54609e-3  # UK
+fluid_ounce_imp = gallon_imp / 160
+
+# speed in meter per second
+kmh = 1e3 / hour
+mph = mile / hour
+mach = speed_of_sound = 340.5  # approx value at 15 degrees in 1 atm. Is this a common value?
+knot = nautical_mile / hour
+
+# temperature in kelvin
+zero_Celsius = 273.15
+degree_Fahrenheit = 1/1.8  # only for differences
+
+# energy in joule
+eV = electron_volt = elementary_charge  # * 1 Volt
+calorie = calorie_th = 4.184
+calorie_IT = 4.1868
+erg = 1e-7
+Btu_th = pound * degree_Fahrenheit * calorie_th / gram
+Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
+ton_TNT = 1e9 * calorie_th
+# Wh = watt_hour
+
+# power in watt
+hp = horsepower = 550 * foot * pound * g
+
+# force in newton
+dyn = dyne = 1e-5
+lbf = pound_force = pound * g
+kgf = kilogram_force = g  # * 1 kg
+
+# functions for conversions that are not linear
+
+
+def convert_temperature(
+    val: npt.ArrayLike,
+    old_scale: str,
+    new_scale: str,
+) -> Any:
+    """
+    Convert from a temperature scale to another one among Celsius, Kelvin,
+    Fahrenheit, and Rankine scales.
+
+    Parameters
+    ----------
+    val : array_like
+        Value(s) of the temperature(s) to be converted expressed in the
+        original scale.
+    old_scale : str
+        Specifies as a string the original scale from which the temperature
+        value(s) will be converted. Supported scales are Celsius ('Celsius',
+        'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
+        Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
+        ('Rankine', 'rankine', 'R', 'r').
+    new_scale : str
+        Specifies as a string the new scale to which the temperature
+        value(s) will be converted. Supported scales are Celsius ('Celsius',
+        'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
+        Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f'), and Rankine
+        ('Rankine', 'rankine', 'R', 'r').
+
+    Returns
+    -------
+    res : float or array of floats
+        Value(s) of the converted temperature(s) expressed in the new scale.
+
+    Notes
+    -----
+    .. versionadded:: 0.18.0
+
+    Examples
+    --------
+    >>> from scipy.constants import convert_temperature
+    >>> import numpy as np
+    >>> convert_temperature(np.array([-40, 40]), 'Celsius', 'Kelvin')
+    array([ 233.15,  313.15])
+
+    """
+    # Convert from `old_scale` to Kelvin
+    if old_scale.lower() in ['celsius', 'c']:
+        tempo = _np.asanyarray(val) + zero_Celsius
+    elif old_scale.lower() in ['kelvin', 'k']:
+        tempo = _np.asanyarray(val)
+    elif old_scale.lower() in ['fahrenheit', 'f']:
+        tempo = (_np.asanyarray(val) - 32) * 5 / 9 + zero_Celsius
+    elif old_scale.lower() in ['rankine', 'r']:
+        tempo = _np.asanyarray(val) * 5 / 9
+    else:
+        raise NotImplementedError("%s scale is unsupported: supported scales "
+                                  "are Celsius, Kelvin, Fahrenheit, and "
+                                  "Rankine" % old_scale)
+    # and from Kelvin to `new_scale`.
+    if new_scale.lower() in ['celsius', 'c']:
+        res = tempo - zero_Celsius
+    elif new_scale.lower() in ['kelvin', 'k']:
+        res = tempo
+    elif new_scale.lower() in ['fahrenheit', 'f']:
+        res = (tempo - zero_Celsius) * 9 / 5 + 32
+    elif new_scale.lower() in ['rankine', 'r']:
+        res = tempo * 9 / 5
+    else:
+        raise NotImplementedError("'%s' scale is unsupported: supported "
+                                  "scales are 'Celsius', 'Kelvin', "
+                                  "'Fahrenheit', and 'Rankine'" % new_scale)
+
+    return res
+
+
+# optics
+
+
+def lambda2nu(lambda_: npt.ArrayLike) -> Any:
+    """
+    Convert wavelength to optical frequency
+
+    Parameters
+    ----------
+    lambda_ : array_like
+        Wavelength(s) to be converted.
+
+    Returns
+    -------
+    nu : float or array of floats
+        Equivalent optical frequency.
+
+    Notes
+    -----
+    Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
+    (vacuum) speed of light in meters/second.
+
+    Examples
+    --------
+    >>> from scipy.constants import lambda2nu, speed_of_light
+    >>> import numpy as np
+    >>> lambda2nu(np.array((1, speed_of_light)))
+    array([  2.99792458e+08,   1.00000000e+00])
+
+    """
+    return c / _np.asanyarray(lambda_)
+
+
+def nu2lambda(nu: npt.ArrayLike) -> Any:
+    """
+    Convert optical frequency to wavelength.
+
+    Parameters
+    ----------
+    nu : array_like
+        Optical frequency to be converted.
+
+    Returns
+    -------
+    lambda : float or array of floats
+        Equivalent wavelength(s).
+
+    Notes
+    -----
+    Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
+    (vacuum) speed of light in meters/second.
+
+    Examples
+    --------
+    >>> from scipy.constants import nu2lambda, speed_of_light
+    >>> import numpy as np
+    >>> nu2lambda(np.array((1, speed_of_light)))
+    array([  2.99792458e+08,   1.00000000e+00])
+
+    """
+    return c / _np.asanyarray(nu)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/codata.py b/__packaged__/coreml/.python_dependencies/scipy/constants/codata.py
new file mode 100644
index 00000000..99ff12e5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/constants/codata.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.constants` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _codata
+
+__all__ = [  # noqa: F822
+    'physical_constants', 'value', 'unit', 'precision', 'find',
+    'ConstantWarning', 'txt2002', 'txt2006', 'txt2010', 'txt2014',
+    'txt2018', 'parse_constants_2002to2014',
+    'parse_constants_2018toXXXX', 'k', 'c', 'mu0', 'epsilon0',
+    'exact_values', 'key', 'val', 'v'
+
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.constants.codata is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.constants instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.constants` namespace, "
+                  "the `scipy.constants.codata` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_codata, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/constants.py b/__packaged__/coreml/.python_dependencies/scipy/constants/constants.py
new file mode 100644
index 00000000..01ed9cd1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/constants/constants.py
@@ -0,0 +1,61 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.constants` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _constants
+
+
+__all__ = [  # noqa: F822
+    'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G',
+    'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg',
+    'Stefan_Boltzmann', 'Wien', 'acre', 'alpha',
+    'angstrom', 'arcmin', 'arcminute', 'arcsec',
+    'arcsecond', 'astronomical_unit', 'atm',
+    'atmosphere', 'atomic_mass', 'atto', 'au', 'bar',
+    'barrel', 'bbl', 'blob', 'c', 'calorie',
+    'calorie_IT', 'calorie_th', 'carat', 'centi',
+    'convert_temperature', 'day', 'deci', 'degree',
+    'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e',
+    'eV', 'electron_mass', 'electron_volt',
+    'elementary_charge', 'epsilon_0', 'erg',
+    'exa', 'exbi', 'femto', 'fermi', 'fine_structure',
+    'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp',
+    'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp',
+    'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio',
+    'grain', 'gram', 'gravitational_constant', 'h', 'hbar',
+    'hectare', 'hecto', 'horsepower', 'hour', 'hp',
+    'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force',
+    'kmh', 'knot', 'lambda2nu', 'lb', 'lbf',
+    'light_year', 'liter', 'litre', 'long_ton', 'm_e',
+    'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega',
+    'metric_ton', 'micro', 'micron', 'mil', 'mile',
+    'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano',
+    'nautical_mile', 'neutron_mass', 'nu2lambda',
+    'ounce', 'oz', 'parsec', 'pebi', 'peta',
+    'pi', 'pico', 'point', 'pound', 'pound_force',
+    'proton_mass', 'psi', 'pt', 'short_ton',
+    'sigma', 'slinch', 'slug', 'speed_of_light',
+    'speed_of_sound', 'stone', 'survey_foot',
+    'survey_mile', 'tebi', 'tera', 'ton_TNT',
+    'torr', 'troy_ounce', 'troy_pound', 'u',
+    'week', 'yard', 'year', 'yobi', 'yocto',
+    'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.constants.constants is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.constants instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.constants` namespace, "
+                  "the `scipy.constants.constants` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_constants, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/constants/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/tests/test_codata.py b/__packaged__/coreml/.python_dependencies/scipy/constants/tests/test_codata.py
new file mode 100644
index 00000000..ec9b69aa
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/constants/tests/test_codata.py
@@ -0,0 +1,57 @@
+from scipy.constants import find, value, ConstantWarning, c, speed_of_light
+from numpy.testing import (assert_equal, assert_, assert_almost_equal,
+                           suppress_warnings)
+import scipy.constants._codata as _cd
+
+
+def test_find():
+    keys = find('weak mixing', disp=False)
+    assert_equal(keys, ['weak mixing angle'])
+
+    keys = find('qwertyuiop', disp=False)
+    assert_equal(keys, [])
+
+    keys = find('natural unit', disp=False)
+    assert_equal(keys, sorted(['natural unit of velocity',
+                                'natural unit of action',
+                                'natural unit of action in eV s',
+                                'natural unit of mass',
+                                'natural unit of energy',
+                                'natural unit of energy in MeV',
+                                'natural unit of momentum',
+                                'natural unit of momentum in MeV/c',
+                                'natural unit of length',
+                                'natural unit of time']))
+
+
+def test_basic_table_parse():
+    c_s = 'speed of light in vacuum'
+    assert_equal(value(c_s), c)
+    assert_equal(value(c_s), speed_of_light)
+
+
+def test_basic_lookup():
+    assert_equal('%d %s' % (_cd.c, _cd.unit('speed of light in vacuum')),
+                 '299792458 m s^-1')
+
+
+def test_find_all():
+    assert_(len(find(disp=False)) > 300)
+
+
+def test_find_single():
+    assert_equal(find('Wien freq', disp=False)[0],
+                 'Wien frequency displacement law constant')
+
+
+def test_2002_vs_2006():
+    assert_almost_equal(value('magn. flux quantum'),
+                        value('mag. flux quantum'))
+
+
+def test_exact_values():
+    # Check that updating stored values with exact ones worked.
+    with suppress_warnings() as sup:
+        sup.filter(ConstantWarning)
+        for key in _cd.exact_values:
+            assert_((_cd.exact_values[key][0] - value(key)) / value(key) == 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/constants/tests/test_constants.py b/__packaged__/coreml/.python_dependencies/scipy/constants/tests/test_constants.py
new file mode 100644
index 00000000..8d7461d9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/constants/tests/test_constants.py
@@ -0,0 +1,35 @@
+from numpy.testing import assert_equal, assert_allclose
+import scipy.constants as sc
+
+
+def test_convert_temperature():
+    assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0)
+    assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'),
+                 [273.15, 273.15])
+    assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'),
+                 [-273.15, -273.15])
+    assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15])
+    assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'),
+                 [32, 32])
+    assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32])
+    assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67],
+                    rtol=0., atol=1e-13)
+    assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'),
+                    [0., 0.], rtol=0., atol=1e-13)
+    assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'),
+                    [32., 32.], rtol=0., atol=1e-13)
+    assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'),
+                    [491.67, 491.67], rtol=0., atol=1e-13)
+    assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'),
+                    [491.67, 491.67], rtol=0., atol=1e-13)
+    assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'),
+                    [273.15, 0.], rtol=0., atol=1e-13)
+
+
+def test_lambda_to_nu():
+    assert_equal(sc.lambda2nu([sc.speed_of_light, 1]), [1, sc.speed_of_light])
+
+
+def test_nu_to_lambda():
+    assert_equal(sc.nu2lambda([sc.speed_of_light, 1]), [1, sc.speed_of_light])
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/datasets/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/datasets/__init__.py
new file mode 100644
index 00000000..3d7790cc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/datasets/__init__.py
@@ -0,0 +1,90 @@
+"""
+================================
+Datasets (:mod:`scipy.datasets`)
+================================
+
+.. currentmodule:: scipy.datasets
+
+Dataset Methods
+===============
+
+.. autosummary::
+   :toctree: generated/
+
+   ascent
+   face
+   electrocardiogram
+
+Utility Methods
+===============
+
+.. autosummary::
+   :toctree: generated/
+
+   download_all    -- Download all the dataset files to specified path.
+   clear_cache     -- Clear cached dataset directory.
+
+
+Usage of Datasets
+=================
+
+SciPy dataset methods can be simply called as follows: ``'()'``
+This downloads the dataset files over the network once, and saves the cache,
+before returning a `numpy.ndarray` object representing the dataset.
+
+Note that the return data structure and data type might be different for
+different dataset methods. For a more detailed example on usage, please look
+into the particular dataset method documentation above.
+
+
+How dataset retrieval and storage works
+=======================================
+
+SciPy dataset files are stored within individual github repositories under the
+SciPy GitHub organization, following a naming convention as
+``'dataset-'``, for example `scipy.datasets.face` files live at
+https://github.com/scipy/dataset-face.  The `scipy.datasets` submodule utilizes
+and depends on `Pooch `_, a Python
+package built to simplify fetching data files. Pooch uses these repos to
+retrieve the respective dataset files when calling the dataset function.
+
+A registry of all the datasets, essentially a mapping of filenames with their
+SHA256 hash and repo urls are maintained, which Pooch uses to handle and verify
+the downloads on function call. After downloading the dataset once, the files
+are saved in the system cache directory under ``'scipy-data'``.
+
+Dataset cache locations may vary on different platforms.
+
+For macOS::
+
+    '~/Library/Caches/scipy-data'
+
+For Linux and other Unix-like platforms::
+
+    '~/.cache/scipy-data'  # or the value of the XDG_CACHE_HOME env var, if defined
+
+For Windows::
+
+    'C:\\Users\\\\AppData\\Local\\\\scipy-data\\Cache'
+
+
+In environments with constrained network connectivity for various security
+reasons or on systems without continuous internet connections, one may manually
+load the cache of the datasets by placing the contents of the dataset repo in
+the above mentioned cache directory to avoid fetching dataset errors without
+the internet connectivity.
+
+"""
+
+
+from ._fetchers import face, ascent, electrocardiogram  # noqa: E402
+from ._download_all import download_all
+from ._utils import clear_cache
+
+__all__ = ['ascent', 'electrocardiogram', 'face',
+           'download_all', 'clear_cache']
+
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/datasets/_download_all.py b/__packaged__/coreml/.python_dependencies/scipy/datasets/_download_all.py
new file mode 100644
index 00000000..255fdcaf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/datasets/_download_all.py
@@ -0,0 +1,57 @@
+"""
+Platform independent script to download all the
+`scipy.datasets` module data files.
+This doesn't require a full scipy build.
+
+Run: python _download_all.py 
+"""
+
+import argparse
+try:
+    import pooch
+except ImportError:
+    pooch = None
+
+
+if __package__ is None or __package__ == '':
+    # Running as python script, use absolute import
+    import _registry  # type: ignore
+else:
+    # Running as python module, use relative import
+    from . import _registry
+
+
+def download_all(path=None):
+    """
+    Utility method to download all the dataset files
+    for `scipy.datasets` module.
+
+    Parameters
+    ----------
+    path : str, optional
+        Directory path to download all the dataset files.
+        If None, default to the system cache_dir detected by pooch.
+    """
+    if pooch is None:
+        raise ImportError("Missing optional dependency 'pooch' required "
+                          "for scipy.datasets module. Please use pip or "
+                          "conda to install 'pooch'.")
+    if path is None:
+        path = pooch.os_cache('scipy-data')
+    for dataset_name, dataset_hash in _registry.registry.items():
+        pooch.retrieve(url=_registry.registry_urls[dataset_name],
+                       known_hash=dataset_hash,
+                       fname=dataset_name, path=path)
+
+
+def main():
+    parser = argparse.ArgumentParser(description='Download SciPy data files.')
+    parser.add_argument("path", nargs='?', type=str,
+                        default=pooch.os_cache('scipy-data'),
+                        help="Directory path to download all the data files.")
+    args = parser.parse_args()
+    download_all(args.path)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/datasets/_fetchers.py b/__packaged__/coreml/.python_dependencies/scipy/datasets/_fetchers.py
new file mode 100644
index 00000000..96d49b98
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/datasets/_fetchers.py
@@ -0,0 +1,220 @@
+from numpy import array, frombuffer, load
+from ._registry import registry, registry_urls
+
+try:
+    import pooch
+except ImportError:
+    pooch = None
+    data_fetcher = None
+else:
+    data_fetcher = pooch.create(
+        # Use the default cache folder for the operating system
+        # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to
+        # select an appropriate directory for the cache on each platform.
+        path=pooch.os_cache("scipy-data"),
+
+        # The remote data is on Github
+        # base_url is a required param, even though we override this
+        # using individual urls in the registry.
+        base_url="https://github.com/scipy/",
+        registry=registry,
+        urls=registry_urls
+    )
+
+
+def fetch_data(dataset_name, data_fetcher=data_fetcher):
+    if data_fetcher is None:
+        raise ImportError("Missing optional dependency 'pooch' required "
+                          "for scipy.datasets module. Please use pip or "
+                          "conda to install 'pooch'.")
+    # The "fetch" method returns the full path to the downloaded data file.
+    return data_fetcher.fetch(dataset_name)
+
+
+def ascent():
+    """
+    Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy
+    use in demos.
+
+    The image is derived from accent-to-the-top.jpg at
+    http://www.public-domain-image.com/people-public-domain-images-pictures/
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    ascent : ndarray
+       convenient image to use for testing and demonstration
+
+    Examples
+    --------
+    >>> import scipy.datasets
+    >>> ascent = scipy.datasets.ascent()
+    >>> ascent.shape
+    (512, 512)
+    >>> ascent.max()
+    255
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.gray()
+    >>> plt.imshow(ascent)
+    >>> plt.show()
+
+    """
+    import pickle
+
+    # The file will be downloaded automatically the first time this is run,
+    # returning the path to the downloaded file. Afterwards, Pooch finds
+    # it in the local cache and doesn't repeat the download.
+    fname = fetch_data("ascent.dat")
+    # Now we just need to load it with our standard Python tools.
+    with open(fname, 'rb') as f:
+        ascent = array(pickle.load(f))
+    return ascent
+
+
+def electrocardiogram():
+    """
+    Load an electrocardiogram as an example for a 1-D signal.
+
+    The returned signal is a 5 minute long electrocardiogram (ECG), a medical
+    recording of the heart's electrical activity, sampled at 360 Hz.
+
+    Returns
+    -------
+    ecg : ndarray
+        The electrocardiogram in millivolt (mV) sampled at 360 Hz.
+
+    Notes
+    -----
+    The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
+    (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
+    PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
+    heartbeats as well as pathological changes.
+
+    .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
+
+    .. versionadded:: 1.1.0
+
+    References
+    ----------
+    .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
+           IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
+           (PMID: 11446209); :doi:`10.13026/C2F305`
+    .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
+           Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
+           PhysioToolkit, and PhysioNet: Components of a New Research Resource
+           for Complex Physiologic Signals. Circulation 101(23):e215-e220;
+           :doi:`10.1161/01.CIR.101.23.e215`
+
+    Examples
+    --------
+    >>> from scipy.datasets import electrocardiogram
+    >>> ecg = electrocardiogram()
+    >>> ecg
+    array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
+    >>> ecg.shape, ecg.mean(), ecg.std()
+    ((108000,), -0.16510875, 0.5992473991177294)
+
+    As stated the signal features several areas with a different morphology.
+    E.g., the first few seconds show the electrical activity of a heart in
+    normal sinus rhythm as seen below.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> fs = 360
+    >>> time = np.arange(ecg.size) / fs
+    >>> plt.plot(time, ecg)
+    >>> plt.xlabel("time in s")
+    >>> plt.ylabel("ECG in mV")
+    >>> plt.xlim(9, 10.2)
+    >>> plt.ylim(-1, 1.5)
+    >>> plt.show()
+
+    After second 16, however, the first premature ventricular contractions,
+    also called extrasystoles, appear. These have a different morphology
+    compared to typical heartbeats. The difference can easily be observed
+    in the following plot.
+
+    >>> plt.plot(time, ecg)
+    >>> plt.xlabel("time in s")
+    >>> plt.ylabel("ECG in mV")
+    >>> plt.xlim(46.5, 50)
+    >>> plt.ylim(-2, 1.5)
+    >>> plt.show()
+
+    At several points large artifacts disturb the recording, e.g.:
+
+    >>> plt.plot(time, ecg)
+    >>> plt.xlabel("time in s")
+    >>> plt.ylabel("ECG in mV")
+    >>> plt.xlim(207, 215)
+    >>> plt.ylim(-2, 3.5)
+    >>> plt.show()
+
+    Finally, examining the power spectrum reveals that most of the biosignal is
+    made up of lower frequencies. At 60 Hz the noise induced by the mains
+    electricity can be clearly observed.
+
+    >>> from scipy.signal import welch
+    >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
+    >>> plt.semilogy(f, Pxx)
+    >>> plt.xlabel("Frequency in Hz")
+    >>> plt.ylabel("Power spectrum of the ECG in mV**2")
+    >>> plt.xlim(f[[0, -1]])
+    >>> plt.show()
+    """
+    fname = fetch_data("ecg.dat")
+    with load(fname) as file:
+        ecg = file["ecg"].astype(int)  # np.uint16 -> int
+    # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
+    ecg = (ecg - 1024) / 200.0
+    return ecg
+
+
+def face(gray=False):
+    """
+    Get a 1024 x 768, color image of a raccoon face.
+
+    raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
+
+    Parameters
+    ----------
+    gray : bool, optional
+        If True return 8-bit grey-scale image, otherwise return a color image
+
+    Returns
+    -------
+    face : ndarray
+        image of a racoon face
+
+    Examples
+    --------
+    >>> import scipy.datasets
+    >>> face = scipy.datasets.face()
+    >>> face.shape
+    (768, 1024, 3)
+    >>> face.max()
+    255
+    >>> face.dtype
+    dtype('uint8')
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.gray()
+    >>> plt.imshow(face)
+    >>> plt.show()
+
+    """
+    import bz2
+    fname = fetch_data("face.dat")
+    with open(fname, 'rb') as f:
+        rawdata = f.read()
+    face_data = bz2.decompress(rawdata)
+    face = frombuffer(face_data, dtype='uint8')
+    face.shape = (768, 1024, 3)
+    if gray is True:
+        face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] +
+                0.07 * face[:, :, 2]).astype('uint8')
+    return face
diff --git a/__packaged__/coreml/.python_dependencies/scipy/datasets/_registry.py b/__packaged__/coreml/.python_dependencies/scipy/datasets/_registry.py
new file mode 100644
index 00000000..969384ad
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/datasets/_registry.py
@@ -0,0 +1,26 @@
+##########################################################################
+# This file serves as the dataset registry for SciPy Datasets SubModule.
+##########################################################################
+
+
+# To generate the SHA256 hash, use the command
+# openssl sha256 
+registry = {
+    "ascent.dat": "03ce124c1afc880f87b55f6b061110e2e1e939679184f5614e38dacc6c1957e2",
+    "ecg.dat": "f20ad3365fb9b7f845d0e5c48b6fe67081377ee466c3a220b7f69f35c8958baf",
+    "face.dat": "9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886"
+}
+
+registry_urls = {
+    "ascent.dat": "https://raw.githubusercontent.com/scipy/dataset-ascent/main/ascent.dat",
+    "ecg.dat": "https://raw.githubusercontent.com/scipy/dataset-ecg/main/ecg.dat",
+    "face.dat": "https://raw.githubusercontent.com/scipy/dataset-face/main/face.dat"
+}
+
+# dataset method mapping with their associated filenames
+#  : ["filename1", "filename2", ...]
+method_files_map = {
+    "ascent": ["ascent.dat"],
+    "electrocardiogram": ["ecg.dat"],
+    "face": ["face.dat"]
+}
diff --git a/__packaged__/coreml/.python_dependencies/scipy/datasets/_utils.py b/__packaged__/coreml/.python_dependencies/scipy/datasets/_utils.py
new file mode 100644
index 00000000..dc6fd07d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/datasets/_utils.py
@@ -0,0 +1,81 @@
+import os
+import shutil
+from ._registry import method_files_map
+
+try:
+    import appdirs
+except ImportError:
+    appdirs = None
+
+
+def _clear_cache(datasets, cache_dir=None, method_map=None):
+    if method_map is None:
+        # Use SciPy Datasets method map
+        method_map = method_files_map
+    if cache_dir is None:
+        # Use default cache_dir path
+        if appdirs is None:
+            # appdirs is pooch dependency
+            raise ImportError("Missing optional dependency 'pooch' required "
+                              "for scipy.datasets module. Please use pip or "
+                              "conda to install 'pooch'.")
+        cache_dir = appdirs.user_cache_dir("scipy-data")
+
+    if not os.path.exists(cache_dir):
+        print(f"Cache Directory {cache_dir} doesn't exist. Nothing to clear.")
+        return
+
+    if datasets is None:
+        print(f"Cleaning the cache directory {cache_dir}!")
+        shutil.rmtree(cache_dir)
+    else:
+        if not isinstance(datasets, (list, tuple)):
+            # single dataset method passed should be converted to list
+            datasets = [datasets, ]
+        for dataset in datasets:
+            assert callable(dataset)
+            dataset_name = dataset.__name__  # Name of the dataset method
+            if dataset_name not in method_map:
+                raise ValueError(f"Dataset method {dataset_name} doesn't "
+                                 "exist. Please check if the passed dataset "
+                                 "is a subset of the following dataset "
+                                 f"methods: {list(method_map.keys())}")
+
+            data_files = method_map[dataset_name]
+            data_filepaths = [os.path.join(cache_dir, file)
+                              for file in data_files]
+            for data_filepath in data_filepaths:
+                if os.path.exists(data_filepath):
+                    print("Cleaning the file "
+                          f"{os.path.split(data_filepath)[1]} "
+                          f"for dataset {dataset_name}")
+                    os.remove(data_filepath)
+                else:
+                    print(f"Path {data_filepath} doesn't exist. "
+                          "Nothing to clear.")
+
+
+def clear_cache(datasets=None):
+    """
+    Cleans the scipy datasets cache directory.
+
+    If a scipy.datasets method or a list/tuple of the same is
+    provided, then clear_cache removes all the data files
+    associated to the passed dataset method callable(s).
+
+    By default, it removes all the cached data files.
+
+    Parameters
+    ----------
+    datasets : callable or list/tuple of callable or None
+
+    Examples
+    --------
+    >>> from scipy import datasets
+    >>> ascent_array = datasets.ascent()
+    >>> ascent_array.shape
+    (512, 512)
+    >>> datasets.clear_cache([datasets.ascent])
+    Cleaning the file ascent.dat for dataset ascent
+    """
+    _clear_cache(datasets)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/datasets/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/datasets/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/datasets/tests/test_data.py b/__packaged__/coreml/.python_dependencies/scipy/datasets/tests/test_data.py
new file mode 100644
index 00000000..f94ebbe7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/datasets/tests/test_data.py
@@ -0,0 +1,123 @@
+from scipy.datasets._registry import registry
+from scipy.datasets._fetchers import data_fetcher
+from scipy.datasets._utils import _clear_cache
+from scipy.datasets import ascent, face, electrocardiogram, download_all
+from numpy.testing import assert_equal, assert_almost_equal
+import os
+import pytest
+
+try:
+    import pooch
+except ImportError:
+    raise ImportError("Missing optional dependency 'pooch' required "
+                      "for scipy.datasets module. Please use pip or "
+                      "conda to install 'pooch'.")
+
+
+data_dir = data_fetcher.path  # type: ignore
+
+
+def _has_hash(path, expected_hash):
+    """Check if the provided path has the expected hash."""
+    if not os.path.exists(path):
+        return False
+    return pooch.file_hash(path) == expected_hash
+
+
+class TestDatasets:
+
+    @pytest.fixture(scope='module', autouse=True)
+    def test_download_all(self):
+        # This fixture requires INTERNET CONNECTION
+
+        # test_setup phase
+        download_all()
+
+        yield
+
+    def test_existence_all(self):
+        assert len(os.listdir(data_dir)) >= len(registry)
+
+    def test_ascent(self):
+        assert_equal(ascent().shape, (512, 512))
+
+        # hash check
+        assert _has_hash(os.path.join(data_dir, "ascent.dat"),
+                         registry["ascent.dat"])
+
+    def test_face(self):
+        assert_equal(face().shape, (768, 1024, 3))
+
+        # hash check
+        assert _has_hash(os.path.join(data_dir, "face.dat"),
+                         registry["face.dat"])
+
+    def test_electrocardiogram(self):
+        # Test shape, dtype and stats of signal
+        ecg = electrocardiogram()
+        assert_equal(ecg.dtype, float)
+        assert_equal(ecg.shape, (108000,))
+        assert_almost_equal(ecg.mean(), -0.16510875)
+        assert_almost_equal(ecg.std(), 0.5992473991177294)
+
+        # hash check
+        assert _has_hash(os.path.join(data_dir, "ecg.dat"),
+                         registry["ecg.dat"])
+
+
+def test_clear_cache(tmp_path):
+    # Note: `tmp_path` is a pytest fixture, it handles cleanup
+    dummy_basepath = tmp_path / "dummy_cache_dir"
+    dummy_basepath.mkdir()
+
+    # Create three dummy dataset files for dummy dataset methods
+    dummy_method_map = {}
+    for i in range(4):
+        dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
+        data_filepath = dummy_basepath / f"data{i}.dat"
+        data_filepath.write_text("")
+
+    # clear files associated to single dataset method data0
+    # also test callable argument instead of list of callables
+    def data0():
+        pass
+    _clear_cache(datasets=data0, cache_dir=dummy_basepath,
+                 method_map=dummy_method_map)
+    assert not os.path.exists(dummy_basepath/"data0.dat")
+
+    # clear files associated to multiple dataset methods "data3" and "data4"
+    def data1():
+        pass
+
+    def data2():
+        pass
+    _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
+                 method_map=dummy_method_map)
+    assert not os.path.exists(dummy_basepath/"data1.dat")
+    assert not os.path.exists(dummy_basepath/"data2.dat")
+
+    # clear multiple dataset files "data3_0.dat" and "data3_1.dat"
+    # associated with dataset method "data3"
+    def data4():
+        pass
+    # create files
+    (dummy_basepath / "data4_0.dat").write_text("")
+    (dummy_basepath / "data4_1.dat").write_text("")
+
+    dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
+    _clear_cache(datasets=[data4], cache_dir=dummy_basepath,
+                 method_map=dummy_method_map)
+    assert not os.path.exists(dummy_basepath/"data4_0.dat")
+    assert not os.path.exists(dummy_basepath/"data4_1.dat")
+
+    # wrong dataset method should raise ValueError since it
+    # doesn't exist in the dummy_method_map
+    def data5():
+        pass
+    with pytest.raises(ValueError):
+        _clear_cache(datasets=[data5], cache_dir=dummy_basepath,
+                     method_map=dummy_method_map)
+
+    # remove all dataset cache
+    _clear_cache(datasets=None, cache_dir=dummy_basepath)
+    assert not os.path.exists(dummy_basepath)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/fft/__init__.py
new file mode 100644
index 00000000..24a9173a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/__init__.py
@@ -0,0 +1,113 @@
+"""
+==============================================
+Discrete Fourier transforms (:mod:`scipy.fft`)
+==============================================
+
+.. currentmodule:: scipy.fft
+
+Fast Fourier Transforms (FFTs)
+==============================
+
+.. autosummary::
+   :toctree: generated/
+
+   fft - Fast (discrete) Fourier Transform (FFT)
+   ifft - Inverse FFT
+   fft2 - 2-D FFT
+   ifft2 - 2-D inverse FFT
+   fftn - N-D FFT
+   ifftn - N-D inverse FFT
+   rfft - FFT of strictly real-valued sequence
+   irfft - Inverse of rfft
+   rfft2 - 2-D FFT of real sequence
+   irfft2 - Inverse of rfft2
+   rfftn - N-D FFT of real sequence
+   irfftn - Inverse of rfftn
+   hfft - FFT of a Hermitian sequence (real spectrum)
+   ihfft - Inverse of hfft
+   hfft2 - 2-D FFT of a Hermitian sequence
+   ihfft2 - Inverse of hfft2
+   hfftn - N-D FFT of a Hermitian sequence
+   ihfftn - Inverse of hfftn
+
+Discrete Sin and Cosine Transforms (DST and DCT)
+================================================
+
+.. autosummary::
+   :toctree: generated/
+
+   dct - Discrete cosine transform
+   idct - Inverse discrete cosine transform
+   dctn - N-D Discrete cosine transform
+   idctn - N-D Inverse discrete cosine transform
+   dst - Discrete sine transform
+   idst - Inverse discrete sine transform
+   dstn - N-D Discrete sine transform
+   idstn - N-D Inverse discrete sine transform
+
+Fast Hankel Transforms
+======================
+
+.. autosummary::
+   :toctree: generated/
+
+   fht - Fast Hankel transform
+   ifht - Inverse of fht
+
+Helper functions
+================
+
+.. autosummary::
+   :toctree: generated/
+
+   fftshift - Shift the zero-frequency component to the center of the spectrum
+   ifftshift - The inverse of `fftshift`
+   fftfreq - Return the Discrete Fourier Transform sample frequencies
+   rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
+   fhtoffset - Compute an optimal offset for the Fast Hankel Transform
+   next_fast_len - Find the optimal length to zero-pad an FFT for speed
+   set_workers - Context manager to set default number of workers
+   get_workers - Get the current default number of workers
+
+Backend control
+===============
+
+.. autosummary::
+   :toctree: generated/
+
+   set_backend - Context manager to set the backend within a fixed scope
+   skip_backend - Context manager to skip a backend within a fixed scope
+   set_global_backend - Sets the global fft backend
+   register_backend - Register a backend for permanent use
+
+"""
+
+from ._basic import (
+    fft, ifft, fft2, ifft2, fftn, ifftn,
+    rfft, irfft, rfft2, irfft2, rfftn, irfftn,
+    hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn)
+from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn
+from ._fftlog import fhtoffset
+from ._fftlog_multimethods import fht, ifht
+from ._helper import next_fast_len
+from ._backend import (set_backend, skip_backend, set_global_backend,
+                       register_backend)
+from numpy.fft import fftfreq, rfftfreq, fftshift, ifftshift
+from ._pocketfft.helper import set_workers, get_workers
+
+__all__ = [
+    'fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
+    'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
+    'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn',
+    'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
+    'next_fast_len',
+    'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn',
+    'fht', 'ifht',
+    'fhtoffset',
+    'set_backend', 'skip_backend', 'set_global_backend', 'register_backend',
+    'get_workers', 'set_workers']
+
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_backend.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_backend.py
new file mode 100644
index 00000000..3b812716
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_backend.py
@@ -0,0 +1,191 @@
+import scipy._lib.uarray as ua
+from . import _fftlog
+from . import _pocketfft
+
+
+class _ScipyBackend:
+    """The default backend for fft calculations
+
+    Notes
+    -----
+    We use the domain ``numpy.scipy`` rather than ``scipy`` because ``uarray``
+    treats the domain as a hierarchy. This means the user can install a single
+    backend for ``numpy`` and have it implement ``numpy.scipy.fft`` as well.
+    """
+    __ua_domain__ = "numpy.scipy.fft"
+
+    @staticmethod
+    def __ua_function__(method, args, kwargs):
+
+        fn = getattr(_pocketfft, method.__name__, None)
+        if fn is None:
+            fn = getattr(_fftlog, method.__name__, None)
+        if fn is None:
+            return NotImplemented
+        return fn(*args, **kwargs)
+
+
+_named_backends = {
+    'scipy': _ScipyBackend,
+}
+
+
+def _backend_from_arg(backend):
+    """Maps strings to known backends and validates the backend"""
+
+    if isinstance(backend, str):
+        try:
+            backend = _named_backends[backend]
+        except KeyError as e:
+            raise ValueError('Unknown backend {}'.format(backend)) from e
+
+    if backend.__ua_domain__ != 'numpy.scipy.fft':
+        raise ValueError('Backend does not implement "numpy.scipy.fft"')
+
+    return backend
+
+
+def set_global_backend(backend, coerce=False, only=False, try_last=False):
+    """Sets the global fft backend
+
+    This utility method replaces the default backend for permanent use. It
+    will be tried in the list of backends automatically, unless the
+    ``only`` flag is set on a backend. This will be the first tried
+    backend outside the :obj:`set_backend` context manager.
+
+    Parameters
+    ----------
+    backend : {object, 'scipy'}
+        The backend to use.
+        Can either be a ``str`` containing the name of a known backend
+        {'scipy'} or an object that implements the uarray protocol.
+    coerce : bool
+        Whether to coerce input types when trying this backend.
+    only : bool
+        If ``True``, no more backends will be tried if this fails.
+        Implied by ``coerce=True``.
+    try_last : bool
+        If ``True``, the global backend is tried after registered backends.
+
+    Raises
+    ------
+    ValueError: If the backend does not implement ``numpy.scipy.fft``.
+
+    Notes
+    -----
+    This will overwrite the previously set global backend, which, by default, is
+    the SciPy implementation.
+
+    Examples
+    --------
+    We can set the global fft backend:
+
+    >>> from scipy.fft import fft, set_global_backend
+    >>> set_global_backend("scipy")  # Sets global backend. "scipy" is the default backend.
+    >>> fft([1])  # Calls the global backend
+    array([1.+0.j])
+    """
+    backend = _backend_from_arg(backend)
+    ua.set_global_backend(backend, coerce=coerce, only=only, try_last=try_last)
+
+
+def register_backend(backend):
+    """
+    Register a backend for permanent use.
+
+    Registered backends have the lowest priority and will be tried after the
+    global backend.
+
+    Parameters
+    ----------
+    backend : {object, 'scipy'}
+        The backend to use.
+        Can either be a ``str`` containing the name of a known backend
+        {'scipy'} or an object that implements the uarray protocol.
+
+    Raises
+    ------
+    ValueError: If the backend does not implement ``numpy.scipy.fft``.
+
+    Examples
+    --------
+    We can register a new fft backend:
+
+    >>> from scipy.fft import fft, register_backend, set_global_backend
+    >>> class NoopBackend:  # Define an invalid Backend
+    ...     __ua_domain__ = "numpy.scipy.fft"
+    ...     def __ua_function__(self, func, args, kwargs):
+    ...          return NotImplemented
+    >>> set_global_backend(NoopBackend())  # Set the invalid backend as global
+    >>> register_backend("scipy")  # Register a new backend
+    >>> fft([1])  # The registered backend is called because the global backend returns `NotImplemented`
+    array([1.+0.j])
+    >>> set_global_backend("scipy")  # Restore global backend to default
+
+    """
+    backend = _backend_from_arg(backend)
+    ua.register_backend(backend)
+
+
+def set_backend(backend, coerce=False, only=False):
+    """Context manager to set the backend within a fixed scope.
+
+    Upon entering the ``with`` statement, the given backend will be added to
+    the list of available backends with the highest priority. Upon exit, the
+    backend is reset to the state before entering the scope.
+
+    Parameters
+    ----------
+    backend : {object, 'scipy'}
+        The backend to use.
+        Can either be a ``str`` containing the name of a known backend
+        {'scipy'} or an object that implements the uarray protocol.
+    coerce : bool, optional
+        Whether to allow expensive conversions for the ``x`` parameter. e.g.,
+        copying a NumPy array to the GPU for a CuPy backend. Implies ``only``.
+    only : bool, optional
+        If only is ``True`` and this backend returns ``NotImplemented``, then a
+        BackendNotImplemented error will be raised immediately. Ignoring any
+        lower priority backends.
+
+    Examples
+    --------
+    >>> import scipy.fft as fft
+    >>> with fft.set_backend('scipy', only=True):
+    ...     fft.fft([1])  # Always calls the scipy implementation
+    array([1.+0.j])
+    """
+    backend = _backend_from_arg(backend)
+    return ua.set_backend(backend, coerce=coerce, only=only)
+
+
+def skip_backend(backend):
+    """Context manager to skip a backend within a fixed scope.
+
+    Within the context of a ``with`` statement, the given backend will not be
+    called. This covers backends registered both locally and globally. Upon
+    exit, the backend will again be considered.
+
+    Parameters
+    ----------
+    backend : {object, 'scipy'}
+        The backend to skip.
+        Can either be a ``str`` containing the name of a known backend
+        {'scipy'} or an object that implements the uarray protocol.
+
+    Examples
+    --------
+    >>> import scipy.fft as fft
+    >>> fft.fft([1])  # Calls default SciPy backend
+    array([1.+0.j])
+    >>> with fft.skip_backend('scipy'):  # We explicitly skip the SciPy backend
+    ...     fft.fft([1])                 # leaving no implementation available
+    Traceback (most recent call last):
+        ...
+    BackendNotImplementedError: No selected backends had an implementation ...
+    """
+    backend = _backend_from_arg(backend)
+    return ua.skip_backend(backend)
+
+
+set_global_backend('scipy', try_last=True)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_basic.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_basic.py
new file mode 100644
index 00000000..8a18faf2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_basic.py
@@ -0,0 +1,1629 @@
+from scipy._lib.uarray import generate_multimethod, Dispatchable
+import numpy as np
+
+
+def _x_replacer(args, kwargs, dispatchables):
+    """
+    uarray argument replacer to replace the transform input array (``x``)
+    """
+    if len(args) > 0:
+        return (dispatchables[0],) + args[1:], kwargs
+    kw = kwargs.copy()
+    kw['x'] = dispatchables[0]
+    return args, kw
+
+
+def _dispatch(func):
+    """
+    Function annotation that creates a uarray multimethod from the function
+    """
+    return generate_multimethod(func, _x_replacer, domain="numpy.scipy.fft")
+
+
+@_dispatch
+def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+        plan=None):
+    """
+    Compute the 1-D discrete Fourier Transform.
+
+    This function computes the 1-D *n*-point discrete Fourier
+    Transform (DFT) with the efficient Fast Fourier Transform (FFT)
+    algorithm [1]_.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, can be complex.
+    n : int, optional
+        Length of the transformed axis of the output.
+        If `n` is smaller than the length of the input, the input is cropped.
+        If it is larger, the input is padded with zeros. If `n` is not given,
+        the length of the input along the axis specified by `axis` is used.
+    axis : int, optional
+        Axis over which to compute the FFT. If not given, the last axis is
+        used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode. Default is "backward", meaning no normalization on
+        the forward transforms and scaling by ``1/n`` on the `ifft`.
+        "forward" instead applies the ``1/n`` factor on the forward tranform.
+        For ``norm="ortho"``, both directions are scaled by ``1/sqrt(n)``.
+
+        .. versionadded:: 1.6.0
+           ``norm={"forward", "backward"}`` options were added
+
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See the notes below for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``. See below for more
+        details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axis
+        indicated by `axis`, or the last one if `axis` is not specified.
+
+    Raises
+    ------
+    IndexError
+        if `axes` is larger than the last axis of `x`.
+
+    See Also
+    --------
+    ifft : The inverse of `fft`.
+    fft2 : The 2-D FFT.
+    fftn : The N-D FFT.
+    rfftn : The N-D FFT of real input.
+    fftfreq : Frequency bins for given FFT parameters.
+    next_fast_len : Size to pad input to for most efficient transforms
+
+    Notes
+    -----
+    FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform
+    (DFT) can be calculated efficiently, by using symmetries in the calculated
+    terms. The symmetry is highest when `n` is a power of 2, and the transform
+    is therefore most efficient for these sizes. For poorly factorizable sizes,
+    `scipy.fft` uses Bluestein's algorithm [2]_ and so is never worse than
+    O(`n` log `n`). Further performance improvements may be seen by zero-padding
+    the input using `next_fast_len`.
+
+    If ``x`` is a 1d array, then the `fft` is equivalent to ::
+
+        y[k] = np.sum(x * np.exp(-2j * np.pi * k * np.arange(n)/n))
+
+    The frequency term ``f=k/n`` is found at ``y[k]``. At ``y[n/2]`` we reach
+    the Nyquist frequency and wrap around to the negative-frequency terms. So,
+    for an 8-point transform, the frequencies of the result are
+    [0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the
+    zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3],
+    use `fftshift`.
+
+    Transforms can be done in single, double, or extended precision (long
+    double) floating point. Half precision inputs will be converted to single
+    precision and non-floating-point inputs will be converted to double
+    precision.
+
+    If the data type of ``x`` is real, a "real FFT" algorithm is automatically
+    used, which roughly halves the computation time. To increase efficiency
+    a little further, use `rfft`, which does the same calculation, but only
+    outputs half of the symmetrical spectrum. If the data are both real and
+    symmetrical, the `dct` can again double the efficiency, by generating
+    half of the spectrum from half of the signal.
+
+    When ``overwrite_x=True`` is specified, the memory referenced by ``x`` may
+    be used by the implementation in any way. This may include reusing the
+    memory for the result, but this is in no way guaranteed. You should not
+    rely on the contents of ``x`` after the transform as this may change in
+    future without warning.
+
+    The ``workers`` argument specifies the maximum number of parallel jobs to
+    split the FFT computation into. This will execute independent 1-D
+    FFTs within ``x``. So, ``x`` must be at least 2-D and the
+    non-transformed axes must be large enough to split into chunks. If ``x`` is
+    too small, fewer jobs may be used than requested.
+
+    References
+    ----------
+    .. [1] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
+           machine calculation of complex Fourier series," *Math. Comput.*
+           19: 297-301.
+    .. [2] Bluestein, L., 1970, "A linear filtering approach to the
+           computation of discrete Fourier transform". *IEEE Transactions on
+           Audio and Electroacoustics.* 18 (4): 451-455.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> scipy.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
+    array([-2.33486982e-16+1.14423775e-17j,  8.00000000e+00-1.25557246e-15j,
+            2.33486982e-16+2.33486982e-16j,  0.00000000e+00+1.22464680e-16j,
+           -1.14423775e-17+2.33486982e-16j,  0.00000000e+00+5.20784380e-16j,
+            1.14423775e-17+1.14423775e-17j,  0.00000000e+00+1.22464680e-16j])
+
+    In this example, real input has an FFT which is Hermitian, i.e., symmetric
+    in the real part and anti-symmetric in the imaginary part:
+
+    >>> from scipy.fft import fft, fftfreq, fftshift
+    >>> import matplotlib.pyplot as plt
+    >>> t = np.arange(256)
+    >>> sp = fftshift(fft(np.sin(t)))
+    >>> freq = fftshift(fftfreq(t.shape[-1]))
+    >>> plt.plot(freq, sp.real, freq, sp.imag)
+    [, ]
+    >>> plt.show()
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+         plan=None):
+    """
+    Compute the 1-D inverse discrete Fourier Transform.
+
+    This function computes the inverse of the 1-D *n*-point
+    discrete Fourier transform computed by `fft`.  In other words,
+    ``ifft(fft(x)) == x`` to within numerical accuracy.
+
+    The input should be ordered in the same way as is returned by `fft`,
+    i.e.,
+
+    * ``x[0]`` should contain the zero frequency term,
+    * ``x[1:n//2]`` should contain the positive-frequency terms,
+    * ``x[n//2 + 1:]`` should contain the negative-frequency terms, in
+      increasing order starting from the most negative frequency.
+
+    For an even number of input points, ``x[n//2]`` represents the sum of
+    the values at the positive and negative Nyquist frequencies, as the two
+    are aliased together. See `fft` for details.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, can be complex.
+    n : int, optional
+        Length of the transformed axis of the output.
+        If `n` is smaller than the length of the input, the input is cropped.
+        If it is larger, the input is padded with zeros. If `n` is not given,
+        the length of the input along the axis specified by `axis` is used.
+        See notes about padding issues.
+    axis : int, optional
+        Axis over which to compute the inverse DFT. If not given, the last
+        axis is used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axis
+        indicated by `axis`, or the last one if `axis` is not specified.
+
+    Raises
+    ------
+    IndexError
+        If `axes` is larger than the last axis of `x`.
+
+    See Also
+    --------
+    fft : The 1-D (forward) FFT, of which `ifft` is the inverse.
+    ifft2 : The 2-D inverse FFT.
+    ifftn : The N-D inverse FFT.
+
+    Notes
+    -----
+    If the input parameter `n` is larger than the size of the input, the input
+    is padded by appending zeros at the end. Even though this is the common
+    approach, it might lead to surprising results. If a different padding is
+    desired, it must be performed before calling `ifft`.
+
+    If ``x`` is a 1-D array, then the `ifft` is equivalent to ::
+
+        y[k] = np.sum(x * np.exp(2j * np.pi * k * np.arange(n)/n)) / len(x)
+
+    As with `fft`, `ifft` has support for all floating point types and is
+    optimized for real input.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> scipy.fft.ifft([0, 4, 0, 0])
+    array([ 1.+0.j,  0.+1.j, -1.+0.j,  0.-1.j]) # may vary
+
+    Create and plot a band-limited signal with random phases:
+
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> t = np.arange(400)
+    >>> n = np.zeros((400,), dtype=complex)
+    >>> n[40:60] = np.exp(1j*rng.uniform(0, 2*np.pi, (20,)))
+    >>> s = scipy.fft.ifft(n)
+    >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
+    [, ]
+    >>> plt.legend(('real', 'imaginary'))
+    
+    >>> plt.show()
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+         plan=None):
+    """
+    Compute the 1-D discrete Fourier Transform for real input.
+
+    This function computes the 1-D *n*-point discrete Fourier
+    Transform (DFT) of a real-valued array by means of an efficient algorithm
+    called the Fast Fourier Transform (FFT).
+
+    Parameters
+    ----------
+    x : array_like
+        Input array
+    n : int, optional
+        Number of points along transformation axis in the input to use.
+        If `n` is smaller than the length of the input, the input is cropped.
+        If it is larger, the input is padded with zeros. If `n` is not given,
+        the length of the input along the axis specified by `axis` is used.
+    axis : int, optional
+        Axis over which to compute the FFT. If not given, the last axis is
+        used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axis
+        indicated by `axis`, or the last one if `axis` is not specified.
+        If `n` is even, the length of the transformed axis is ``(n/2)+1``.
+        If `n` is odd, the length is ``(n+1)/2``.
+
+    Raises
+    ------
+    IndexError
+        If `axis` is larger than the last axis of `a`.
+
+    See Also
+    --------
+    irfft : The inverse of `rfft`.
+    fft : The 1-D FFT of general (complex) input.
+    fftn : The N-D FFT.
+    rfft2 : The 2-D FFT of real input.
+    rfftn : The N-D FFT of real input.
+
+    Notes
+    -----
+    When the DFT is computed for purely real input, the output is
+    Hermitian-symmetric, i.e., the negative frequency terms are just the complex
+    conjugates of the corresponding positive-frequency terms, and the
+    negative-frequency terms are therefore redundant. This function does not
+    compute the negative frequency terms, and the length of the transformed
+    axis of the output is therefore ``n//2 + 1``.
+
+    When ``X = rfft(x)`` and fs is the sampling frequency, ``X[0]`` contains
+    the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
+
+    If `n` is even, ``A[-1]`` contains the term representing both positive
+    and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
+    real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
+    the largest positive frequency (fs/2*(n-1)/n), and is complex in the
+    general case.
+
+    If the input `a` contains an imaginary part, it is silently discarded.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> scipy.fft.fft([0, 1, 0, 0])
+    array([ 1.+0.j,  0.-1.j, -1.+0.j,  0.+1.j]) # may vary
+    >>> scipy.fft.rfft([0, 1, 0, 0])
+    array([ 1.+0.j,  0.-1.j, -1.+0.j]) # may vary
+
+    Notice how the final element of the `fft` output is the complex conjugate
+    of the second element, for real input. For `rfft`, this symmetry is
+    exploited to compute only the non-negative frequency terms.
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Computes the inverse of `rfft`.
+
+    This function computes the inverse of the 1-D *n*-point
+    discrete Fourier Transform of real input computed by `rfft`.
+    In other words, ``irfft(rfft(x), len(x)) == x`` to within numerical
+    accuracy. (See Notes below for why ``len(a)`` is necessary here.)
+
+    The input is expected to be in the form returned by `rfft`, i.e., the
+    real zero-frequency term followed by the complex positive frequency terms
+    in order of increasing frequency. Since the discrete Fourier Transform of
+    real input is Hermitian-symmetric, the negative frequency terms are taken
+    to be the complex conjugates of the corresponding positive frequency terms.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    n : int, optional
+        Length of the transformed axis of the output.
+        For `n` output points, ``n//2+1`` input points are necessary. If the
+        input is longer than this, it is cropped. If it is shorter than this,
+        it is padded with zeros. If `n` is not given, it is taken to be
+        ``2*(m-1)``, where ``m`` is the length of the input along the axis
+        specified by `axis`.
+    axis : int, optional
+        Axis over which to compute the inverse FFT. If not given, the last
+        axis is used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The truncated or zero-padded input, transformed along the axis
+        indicated by `axis`, or the last one if `axis` is not specified.
+        The length of the transformed axis is `n`, or, if `n` is not given,
+        ``2*(m-1)`` where ``m`` is the length of the transformed axis of the
+        input. To get an odd number of output points, `n` must be specified.
+
+    Raises
+    ------
+    IndexError
+        If `axis` is larger than the last axis of `x`.
+
+    See Also
+    --------
+    rfft : The 1-D FFT of real input, of which `irfft` is inverse.
+    fft : The 1-D FFT.
+    irfft2 : The inverse of the 2-D FFT of real input.
+    irfftn : The inverse of the N-D FFT of real input.
+
+    Notes
+    -----
+    Returns the real valued `n`-point inverse discrete Fourier transform
+    of `x`, where `x` contains the non-negative frequency terms of a
+    Hermitian-symmetric sequence. `n` is the length of the result, not the
+    input.
+
+    If you specify an `n` such that `a` must be zero-padded or truncated, the
+    extra/removed values will be added/removed at high frequencies. One can
+    thus resample a series to `m` points via Fourier interpolation by:
+    ``a_resamp = irfft(rfft(a), m)``.
+
+    The default value of `n` assumes an even output length. By the Hermitian
+    symmetry, the last imaginary component must be 0 and so is ignored. To
+    avoid losing information, the correct length of the real input *must* be
+    given.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> scipy.fft.ifft([1, -1j, -1, 1j])
+    array([0.+0.j,  1.+0.j,  0.+0.j,  0.+0.j]) # may vary
+    >>> scipy.fft.irfft([1, -1j, -1])
+    array([0.,  1.,  0.,  0.])
+
+    Notice how the last term in the input to the ordinary `ifft` is the
+    complex conjugate of the second term, and the output has zero imaginary
+    part everywhere. When calling `irfft`, the negative frequencies are not
+    specified, and the output array is purely real.
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+         plan=None):
+    """
+    Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
+    spectrum.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    n : int, optional
+        Length of the transformed axis of the output. For `n` output
+        points, ``n//2 + 1`` input points are necessary. If the input is
+        longer than this, it is cropped. If it is shorter than this, it is
+        padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``,
+        where ``m`` is the length of the input along the axis specified by
+        `axis`.
+    axis : int, optional
+        Axis over which to compute the FFT. If not given, the last
+        axis is used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See `fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The truncated or zero-padded input, transformed along the axis
+        indicated by `axis`, or the last one if `axis` is not specified.
+        The length of the transformed axis is `n`, or, if `n` is not given,
+        ``2*m - 2``, where ``m`` is the length of the transformed axis of
+        the input. To get an odd number of output points, `n` must be
+        specified, for instance, as ``2*m - 1`` in the typical case,
+
+    Raises
+    ------
+    IndexError
+        If `axis` is larger than the last axis of `a`.
+
+    See Also
+    --------
+    rfft : Compute the 1-D FFT for real input.
+    ihfft : The inverse of `hfft`.
+    hfftn : Compute the N-D FFT of a Hermitian signal.
+
+    Notes
+    -----
+    `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
+    opposite case: here the signal has Hermitian symmetry in the time
+    domain and is real in the frequency domain. So, here, it's `hfft`, for
+    which you must supply the length of the result if it is to be odd.
+    * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
+    * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
+
+    Examples
+    --------
+    >>> from scipy.fft import fft, hfft
+    >>> import numpy as np
+    >>> a = 2 * np.pi * np.arange(10) / 10
+    >>> signal = np.cos(a) + 3j * np.sin(3 * a)
+    >>> fft(signal).round(10)
+    array([ -0.+0.j,   5.+0.j,  -0.+0.j,  15.-0.j,   0.+0.j,   0.+0.j,
+            -0.+0.j, -15.-0.j,   0.+0.j,   5.+0.j])
+    >>> hfft(signal[:6]).round(10) # Input first half of signal
+    array([  0.,   5.,   0.,  15.,  -0.,   0.,   0., -15.,  -0.,   5.])
+    >>> hfft(signal, 10)  # Input entire signal and truncate
+    array([  0.,   5.,   0.,  15.,  -0.,   0.,   0., -15.,  -0.,   5.])
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Compute the inverse FFT of a signal that has Hermitian symmetry.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    n : int, optional
+        Length of the inverse FFT, the number of points along
+        transformation axis in the input to use.  If `n` is smaller than
+        the length of the input, the input is cropped. If it is larger,
+        the input is padded with zeros. If `n` is not given, the length of
+        the input along the axis specified by `axis` is used.
+    axis : int, optional
+        Axis over which to compute the inverse FFT. If not given, the last
+        axis is used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See `fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axis
+        indicated by `axis`, or the last one if `axis` is not specified.
+        The length of the transformed axis is ``n//2 + 1``.
+
+    See Also
+    --------
+    hfft, irfft
+
+    Notes
+    -----
+    `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
+    opposite case: here, the signal has Hermitian symmetry in the time
+    domain and is real in the frequency domain. So, here, it's `hfft`, for
+    which you must supply the length of the result if it is to be odd:
+    * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
+    * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
+
+    Examples
+    --------
+    >>> from scipy.fft import ifft, ihfft
+    >>> import numpy as np
+    >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
+    >>> ifft(spectrum)
+    array([1.+0.j,  2.+0.j,  3.+0.j,  4.+0.j,  3.+0.j,  2.+0.j]) # may vary
+    >>> ihfft(spectrum)
+    array([ 1.-0.j,  2.-0.j,  3.-0.j,  4.-0.j]) # may vary
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+         plan=None):
+    """
+    Compute the N-D discrete Fourier Transform.
+
+    This function computes the N-D discrete Fourier Transform over
+    any number of axes in an M-D array by means of the Fast Fourier
+    Transform (FFT).
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, can be complex.
+    s : sequence of ints, optional
+        Shape (length of each transformed axis) of the output
+        (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+        This corresponds to ``n`` for ``fft(x, n)``.
+        Along any axis, if the given shape is smaller than that of the input,
+        the input is cropped. If it is larger, the input is padded with zeros.
+        if `s` is not given, the shape of the input along the axes specified
+        by `axes` is used.
+    axes : sequence of ints, optional
+        Axes over which to compute the FFT. If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or by a combination of `s` and `x`,
+        as explained in the parameters section above.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    ifftn : The inverse of `fftn`, the inverse N-D FFT.
+    fft : The 1-D FFT, with definitions and conventions used.
+    rfftn : The N-D FFT of real input.
+    fft2 : The 2-D FFT.
+    fftshift : Shifts zero-frequency terms to centre of array.
+
+    Notes
+    -----
+    The output, analogously to `fft`, contains the term for zero frequency in
+    the low-order corner of all axes, the positive frequency terms in the
+    first half of all axes, the term for the Nyquist frequency in the middle
+    of all axes and the negative frequency terms in the second half of all
+    axes, in order of decreasingly negative frequency.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = np.mgrid[:3, :3, :3][0]
+    >>> scipy.fft.fftn(x, axes=(1, 2))
+    array([[[ 0.+0.j,   0.+0.j,   0.+0.j], # may vary
+            [ 0.+0.j,   0.+0.j,   0.+0.j],
+            [ 0.+0.j,   0.+0.j,   0.+0.j]],
+           [[ 9.+0.j,   0.+0.j,   0.+0.j],
+            [ 0.+0.j,   0.+0.j,   0.+0.j],
+            [ 0.+0.j,   0.+0.j,   0.+0.j]],
+           [[18.+0.j,   0.+0.j,   0.+0.j],
+            [ 0.+0.j,   0.+0.j,   0.+0.j],
+            [ 0.+0.j,   0.+0.j,   0.+0.j]]])
+    >>> scipy.fft.fftn(x, (2, 2), axes=(0, 1))
+    array([[[ 2.+0.j,  2.+0.j,  2.+0.j], # may vary
+            [ 0.+0.j,  0.+0.j,  0.+0.j]],
+           [[-2.+0.j, -2.+0.j, -2.+0.j],
+            [ 0.+0.j,  0.+0.j,  0.+0.j]]])
+
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
+    ...                      2 * np.pi * np.arange(200) / 34)
+    >>> S = np.sin(X) + np.cos(Y) + rng.uniform(0, 1, X.shape)
+    >>> FS = scipy.fft.fftn(S)
+    >>> plt.imshow(np.log(np.abs(scipy.fft.fftshift(FS))**2))
+    
+    >>> plt.show()
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Compute the N-D inverse discrete Fourier Transform.
+
+    This function computes the inverse of the N-D discrete
+    Fourier Transform over any number of axes in an M-D array by
+    means of the Fast Fourier Transform (FFT).  In other words,
+    ``ifftn(fftn(x)) == x`` to within numerical accuracy.
+
+    The input, analogously to `ifft`, should be ordered in the same way as is
+    returned by `fftn`, i.e., it should have the term for zero frequency
+    in all axes in the low-order corner, the positive frequency terms in the
+    first half of all axes, the term for the Nyquist frequency in the middle
+    of all axes and the negative frequency terms in the second half of all
+    axes, in order of decreasingly negative frequency.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, can be complex.
+    s : sequence of ints, optional
+        Shape (length of each transformed axis) of the output
+        (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+        This corresponds to ``n`` for ``ifft(x, n)``.
+        Along any axis, if the given shape is smaller than that of the input,
+        the input is cropped. If it is larger, the input is padded with zeros.
+        if `s` is not given, the shape of the input along the axes specified
+        by `axes` is used. See notes for issue on `ifft` zero padding.
+    axes : sequence of ints, optional
+        Axes over which to compute the IFFT.  If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or by a combination of `s` or `x`,
+        as explained in the parameters section above.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    fftn : The forward N-D FFT, of which `ifftn` is the inverse.
+    ifft : The 1-D inverse FFT.
+    ifft2 : The 2-D inverse FFT.
+    ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
+        of array.
+
+    Notes
+    -----
+    Zero-padding, analogously with `ifft`, is performed by appending zeros to
+    the input along the specified dimension. Although this is the common
+    approach, it might lead to surprising results. If another form of zero
+    padding is desired, it must be performed before `ifftn` is called.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = np.eye(4)
+    >>> scipy.fft.ifftn(scipy.fft.fftn(x, axes=(0,)), axes=(1,))
+    array([[1.+0.j,  0.+0.j,  0.+0.j,  0.+0.j], # may vary
+           [0.+0.j,  1.+0.j,  0.+0.j,  0.+0.j],
+           [0.+0.j,  0.+0.j,  1.+0.j,  0.+0.j],
+           [0.+0.j,  0.+0.j,  0.+0.j,  1.+0.j]])
+
+
+    Create and plot an image with band-limited frequency content:
+
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> n = np.zeros((200,200), dtype=complex)
+    >>> n[60:80, 20:40] = np.exp(1j*rng.uniform(0, 2*np.pi, (20, 20)))
+    >>> im = scipy.fft.ifftn(n).real
+    >>> plt.imshow(im)
+    
+    >>> plt.show()
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+         plan=None):
+    """
+    Compute the 2-D discrete Fourier Transform
+
+    This function computes the N-D discrete Fourier Transform
+    over any axes in an M-D array by means of the
+    Fast Fourier Transform (FFT). By default, the transform is computed over
+    the last two axes of the input array, i.e., a 2-dimensional FFT.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, can be complex
+    s : sequence of ints, optional
+        Shape (length of each transformed axis) of the output
+        (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+        This corresponds to ``n`` for ``fft(x, n)``.
+        Along each axis, if the given shape is smaller than that of the input,
+        the input is cropped. If it is larger, the input is padded with zeros.
+        if `s` is not given, the shape of the input along the axes specified
+        by `axes` is used.
+    axes : sequence of ints, optional
+        Axes over which to compute the FFT. If not given, the last two axes are
+        used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or the last two axes if `axes` is not given.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length, or `axes` not given and
+        ``len(s) != 2``.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    ifft2 : The inverse 2-D FFT.
+    fft : The 1-D FFT.
+    fftn : The N-D FFT.
+    fftshift : Shifts zero-frequency terms to the center of the array.
+        For 2-D input, swaps first and third quadrants, and second
+        and fourth quadrants.
+
+    Notes
+    -----
+    `fft2` is just `fftn` with a different default for `axes`.
+
+    The output, analogously to `fft`, contains the term for zero frequency in
+    the low-order corner of the transformed axes, the positive frequency terms
+    in the first half of these axes, the term for the Nyquist frequency in the
+    middle of the axes and the negative frequency terms in the second half of
+    the axes, in order of decreasingly negative frequency.
+
+    See `fftn` for details and a plotting example, and `fft` for
+    definitions and conventions used.
+
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = np.mgrid[:5, :5][0]
+    >>> scipy.fft.fft2(x)
+    array([[ 50.  +0.j        ,   0.  +0.j        ,   0.  +0.j        , # may vary
+              0.  +0.j        ,   0.  +0.j        ],
+           [-12.5+17.20477401j,   0.  +0.j        ,   0.  +0.j        ,
+              0.  +0.j        ,   0.  +0.j        ],
+           [-12.5 +4.0614962j ,   0.  +0.j        ,   0.  +0.j        ,
+              0.  +0.j        ,   0.  +0.j        ],
+           [-12.5 -4.0614962j ,   0.  +0.j        ,   0.  +0.j        ,
+              0.  +0.j        ,   0.  +0.j        ],
+           [-12.5-17.20477401j,   0.  +0.j        ,   0.  +0.j        ,
+              0.  +0.j        ,   0.  +0.j        ]])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Compute the 2-D inverse discrete Fourier Transform.
+
+    This function computes the inverse of the 2-D discrete Fourier
+    Transform over any number of axes in an M-D array by means of
+    the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(x)) == x``
+    to within numerical accuracy. By default, the inverse transform is
+    computed over the last two axes of the input array.
+
+    The input, analogously to `ifft`, should be ordered in the same way as is
+    returned by `fft2`, i.e., it should have the term for zero frequency
+    in the low-order corner of the two axes, the positive frequency terms in
+    the first half of these axes, the term for the Nyquist frequency in the
+    middle of the axes and the negative frequency terms in the second half of
+    both axes, in order of decreasingly negative frequency.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, can be complex.
+    s : sequence of ints, optional
+        Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
+        ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
+        Along each axis, if the given shape is smaller than that of the input,
+        the input is cropped. If it is larger, the input is padded with zeros.
+        if `s` is not given, the shape of the input along the axes specified
+        by `axes` is used.  See notes for issue on `ifft` zero padding.
+    axes : sequence of ints, optional
+        Axes over which to compute the FFT. If not given, the last two
+        axes are used.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or the last two axes if `axes` is not given.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length, or `axes` not given and
+        ``len(s) != 2``.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    fft2 : The forward 2-D FFT, of which `ifft2` is the inverse.
+    ifftn : The inverse of the N-D FFT.
+    fft : The 1-D FFT.
+    ifft : The 1-D inverse FFT.
+
+    Notes
+    -----
+    `ifft2` is just `ifftn` with a different default for `axes`.
+
+    See `ifftn` for details and a plotting example, and `fft` for
+    definition and conventions used.
+
+    Zero-padding, analogously with `ifft`, is performed by appending zeros to
+    the input along the specified dimension. Although this is the common
+    approach, it might lead to surprising results. If another form of zero
+    padding is desired, it must be performed before `ifft2` is called.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = 4 * np.eye(4)
+    >>> scipy.fft.ifft2(x)
+    array([[1.+0.j,  0.+0.j,  0.+0.j,  0.+0.j], # may vary
+           [0.+0.j,  0.+0.j,  0.+0.j,  1.+0.j],
+           [0.+0.j,  0.+0.j,  1.+0.j,  0.+0.j],
+           [0.+0.j,  1.+0.j,  0.+0.j,  0.+0.j]])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Compute the N-D discrete Fourier Transform for real input.
+
+    This function computes the N-D discrete Fourier Transform over
+    any number of axes in an M-D real array by means of the Fast
+    Fourier Transform (FFT). By default, all axes are transformed, with the
+    real transform performed over the last axis, while the remaining
+    transforms are complex.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, taken to be real.
+    s : sequence of ints, optional
+        Shape (length along each transformed axis) to use from the input.
+        (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+        The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
+        for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
+        Along any axis, if the given shape is smaller than that of the input,
+        the input is cropped. If it is larger, the input is padded with zeros.
+        if `s` is not given, the shape of the input along the axes specified
+        by `axes` is used.
+    axes : sequence of ints, optional
+        Axes over which to compute the FFT. If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or by a combination of `s` and `x`,
+        as explained in the parameters section above.
+        The length of the last axis transformed will be ``s[-1]//2+1``,
+        while the remaining transformed axes will have lengths according to
+        `s`, or unchanged from the input.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    irfftn : The inverse of `rfftn`, i.e., the inverse of the N-D FFT
+         of real input.
+    fft : The 1-D FFT, with definitions and conventions used.
+    rfft : The 1-D FFT of real input.
+    fftn : The N-D FFT.
+    rfft2 : The 2-D FFT of real input.
+
+    Notes
+    -----
+    The transform for real input is performed over the last transformation
+    axis, as by `rfft`, then the transform over the remaining axes is
+    performed as by `fftn`. The order of the output is as for `rfft` for the
+    final transformation axis, and as for `fftn` for the remaining
+    transformation axes.
+
+    See `fft` for details, definitions and conventions used.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = np.ones((2, 2, 2))
+    >>> scipy.fft.rfftn(x)
+    array([[[8.+0.j,  0.+0.j], # may vary
+            [0.+0.j,  0.+0.j]],
+           [[0.+0.j,  0.+0.j],
+            [0.+0.j,  0.+0.j]]])
+
+    >>> scipy.fft.rfftn(x, axes=(2, 0))
+    array([[[4.+0.j,  0.+0.j], # may vary
+            [4.+0.j,  0.+0.j]],
+           [[0.+0.j,  0.+0.j],
+            [0.+0.j,  0.+0.j]]])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Compute the 2-D FFT of a real array.
+
+    Parameters
+    ----------
+    x : array
+        Input array, taken to be real.
+    s : sequence of ints, optional
+        Shape of the FFT.
+    axes : sequence of ints, optional
+        Axes over which to compute the FFT.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The result of the real 2-D FFT.
+
+    See Also
+    --------
+    irfft2 : The inverse of the 2-D FFT of real input.
+    rfft : The 1-D FFT of real input.
+    rfftn : Compute the N-D discrete Fourier Transform for real
+            input.
+
+    Notes
+    -----
+    This is really just `rfftn` with different default behavior.
+    For more details see `rfftn`.
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+           plan=None):
+    """
+    Computes the inverse of `rfftn`
+
+    This function computes the inverse of the N-D discrete
+    Fourier Transform for real input over any number of axes in an
+    M-D array by means of the Fast Fourier Transform (FFT). In
+    other words, ``irfftn(rfftn(x), x.shape) == x`` to within numerical
+    accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
+    and for the same reason.)
+
+    The input should be ordered in the same way as is returned by `rfftn`,
+    i.e., as for `irfft` for the final transformation axis, and as for `ifftn`
+    along all the other axes.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    s : sequence of ints, optional
+        Shape (length of each transformed axis) of the output
+        (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
+        number of input points used along this axis, except for the last axis,
+        where ``s[-1]//2+1`` points of the input are used.
+        Along any axis, if the shape indicated by `s` is smaller than that of
+        the input, the input is cropped. If it is larger, the input is padded
+        with zeros. If `s` is not given, the shape of the input along the axes
+        specified by axes is used. Except for the last axis which is taken to be
+        ``2*(m-1)``, where ``m`` is the length of the input along that axis.
+    axes : sequence of ints, optional
+        Axes over which to compute the inverse FFT. If not given, the last
+        `len(s)` axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or by a combination of `s` or `x`,
+        as explained in the parameters section above.
+        The length of each transformed axis is as given by the corresponding
+        element of `s`, or the length of the input in every axis except for the
+        last one if `s` is not given. In the final transformed axis the length
+        of the output when `s` is not given is ``2*(m-1)``, where ``m`` is the
+        length of the final transformed axis of the input. To get an odd
+        number of output points in the final axis, `s` must be specified.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    rfftn : The forward N-D FFT of real input,
+            of which `ifftn` is the inverse.
+    fft : The 1-D FFT, with definitions and conventions used.
+    irfft : The inverse of the 1-D FFT of real input.
+    irfft2 : The inverse of the 2-D FFT of real input.
+
+    Notes
+    -----
+    See `fft` for definitions and conventions used.
+
+    See `rfft` for definitions and conventions used for real input.
+
+    The default value of `s` assumes an even output length in the final
+    transformation axis. When performing the final complex to real
+    transformation, the Hermitian symmetry requires that the last imaginary
+    component along that axis must be 0 and so it is ignored. To avoid losing
+    information, the correct length of the real input *must* be given.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = np.zeros((3, 2, 2))
+    >>> x[0, 0, 0] = 3 * 2 * 2
+    >>> scipy.fft.irfftn(x)
+    array([[[1.,  1.],
+            [1.,  1.]],
+           [[1.,  1.],
+            [1.,  1.]],
+           [[1.,  1.],
+            [1.,  1.]]])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+           plan=None):
+    """
+    Computes the inverse of `rfft2`
+
+    Parameters
+    ----------
+    x : array_like
+        The input array
+    s : sequence of ints, optional
+        Shape of the real output to the inverse FFT.
+    axes : sequence of ints, optional
+        The axes over which to compute the inverse fft.
+        Default is the last two axes.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The result of the inverse real 2-D FFT.
+
+    See Also
+    --------
+    rfft2 : The 2-D FFT of real input.
+    irfft : The inverse of the 1-D FFT of real input.
+    irfftn : The inverse of the N-D FFT of real input.
+
+    Notes
+    -----
+    This is really `irfftn` with different defaults.
+    For more details see `irfftn`.
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Compute the N-D FFT of Hermitian symmetric complex input, i.e., a
+    signal with a real spectrum.
+
+    This function computes the N-D discrete Fourier Transform for a
+    Hermitian symmetric complex input over any number of axes in an
+    M-D array by means of the Fast Fourier Transform (FFT). In other
+    words, ``ihfftn(hfftn(x, s)) == x`` to within numerical accuracy. (``s``
+    here is ``x.shape`` with ``s[-1] = x.shape[-1] * 2 - 1``, this is necessary
+    for the same reason ``x.shape`` would be necessary for `irfft`.)
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    s : sequence of ints, optional
+        Shape (length of each transformed axis) of the output
+        (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
+        number of input points used along this axis, except for the last axis,
+        where ``s[-1]//2+1`` points of the input are used.
+        Along any axis, if the shape indicated by `s` is smaller than that of
+        the input, the input is cropped. If it is larger, the input is padded
+        with zeros. If `s` is not given, the shape of the input along the axes
+        specified by axes is used. Except for the last axis which is taken to be
+        ``2*(m-1)`` where ``m`` is the length of the input along that axis.
+    axes : sequence of ints, optional
+        Axes over which to compute the inverse FFT. If not given, the last
+        `len(s)` axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or by a combination of `s` or `x`,
+        as explained in the parameters section above.
+        The length of each transformed axis is as given by the corresponding
+        element of `s`, or the length of the input in every axis except for the
+        last one if `s` is not given.  In the final transformed axis the length
+        of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
+        length of the final transformed axis of the input.  To get an odd
+        number of output points in the final axis, `s` must be specified.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    ihfftn : The inverse N-D FFT with real spectrum. Inverse of `hfftn`.
+    fft : The 1-D FFT, with definitions and conventions used.
+    rfft : Forward FFT of real input.
+
+    Notes
+    -----
+    For a 1-D signal ``x`` to have a real spectrum, it must satisfy
+    the Hermitian property::
+
+        x[i] == np.conj(x[-i]) for all i
+
+    This generalizes into higher dimensions by reflecting over each axis in
+    turn::
+
+        x[i, j, k, ...] == np.conj(x[-i, -j, -k, ...]) for all i, j, k, ...
+
+    This should not be confused with a Hermitian matrix, for which the
+    transpose is its own conjugate::
+
+        x[i, j] == np.conj(x[j, i]) for all i, j
+
+
+    The default value of `s` assumes an even output length in the final
+    transformation axis. When performing the final complex to real
+    transformation, the Hermitian symmetry requires that the last imaginary
+    component along that axis must be 0 and so it is ignored. To avoid losing
+    information, the correct length of the real input *must* be given.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = np.ones((3, 2, 2))
+    >>> scipy.fft.hfftn(x)
+    array([[[12.,  0.],
+            [ 0.,  0.]],
+           [[ 0.,  0.],
+            [ 0.,  0.]],
+           [[ 0.,  0.],
+            [ 0.,  0.]]])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+          plan=None):
+    """
+    Compute the 2-D FFT of a Hermitian complex array.
+
+    Parameters
+    ----------
+    x : array
+        Input array, taken to be Hermitian complex.
+    s : sequence of ints, optional
+        Shape of the real output.
+    axes : sequence of ints, optional
+        Axes over which to compute the FFT.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See `fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The real result of the 2-D Hermitian complex real FFT.
+
+    See Also
+    --------
+    hfftn : Compute the N-D discrete Fourier Transform for Hermitian
+            complex input.
+
+    Notes
+    -----
+    This is really just `hfftn` with different default behavior.
+    For more details see `hfftn`.
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
+           plan=None):
+    """
+    Compute the N-D inverse discrete Fourier Transform for a real
+    spectrum.
+
+    This function computes the N-D inverse discrete Fourier Transform
+    over any number of axes in an M-D real array by means of the Fast
+    Fourier Transform (FFT). By default, all axes are transformed, with the
+    real transform performed over the last axis, while the remaining transforms
+    are complex.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array, taken to be real.
+    s : sequence of ints, optional
+        Shape (length along each transformed axis) to use from the input.
+        (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+        Along any axis, if the given shape is smaller than that of the input,
+        the input is cropped. If it is larger, the input is padded with zeros.
+        if `s` is not given, the shape of the input along the axes specified
+        by `axes` is used.
+    axes : sequence of ints, optional
+        Axes over which to compute the FFT. If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : complex ndarray
+        The truncated or zero-padded input, transformed along the axes
+        indicated by `axes`, or by a combination of `s` and `x`,
+        as explained in the parameters section above.
+        The length of the last axis transformed will be ``s[-1]//2+1``,
+        while the remaining transformed axes will have lengths according to
+        `s`, or unchanged from the input.
+
+    Raises
+    ------
+    ValueError
+        If `s` and `axes` have different length.
+    IndexError
+        If an element of `axes` is larger than the number of axes of `x`.
+
+    See Also
+    --------
+    hfftn : The forward N-D FFT of Hermitian input.
+    hfft : The 1-D FFT of Hermitian input.
+    fft : The 1-D FFT, with definitions and conventions used.
+    fftn : The N-D FFT.
+    hfft2 : The 2-D FFT of Hermitian input.
+
+    Notes
+    -----
+    The transform for real input is performed over the last transformation
+    axis, as by `ihfft`, then the transform over the remaining axes is
+    performed as by `ifftn`. The order of the output is the positive part of
+    the Hermitian output signal, in the same format as `rfft`.
+
+    Examples
+    --------
+    >>> import scipy.fft
+    >>> import numpy as np
+    >>> x = np.ones((2, 2, 2))
+    >>> scipy.fft.ihfftn(x)
+    array([[[1.+0.j,  0.+0.j], # may vary
+            [0.+0.j,  0.+0.j]],
+           [[0.+0.j,  0.+0.j],
+            [0.+0.j,  0.+0.j]]])
+    >>> scipy.fft.ihfftn(x, axes=(2, 0))
+    array([[[1.+0.j,  0.+0.j], # may vary
+            [1.+0.j,  0.+0.j]],
+           [[0.+0.j,  0.+0.j],
+            [0.+0.j,  0.+0.j]]])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
+           plan=None):
+    """
+    Compute the 2-D inverse FFT of a real spectrum.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array
+    s : sequence of ints, optional
+        Shape of the real input to the inverse FFT.
+    axes : sequence of ints, optional
+        The axes over which to compute the inverse fft.
+        Default is the last two axes.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see `fft`). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+        See :func:`fft` for more details.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    plan : object, optional
+        This argument is reserved for passing in a precomputed plan provided
+        by downstream FFT vendors. It is currently not used in SciPy.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    out : ndarray
+        The result of the inverse real 2-D FFT.
+
+    See Also
+    --------
+    ihfftn : Compute the inverse of the N-D FFT of Hermitian input.
+
+    Notes
+    -----
+    This is really `ihfftn` with different defaults.
+    For more details see `ihfftn`.
+
+    """
+    return (Dispatchable(x, np.ndarray),)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_debug_backends.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_debug_backends.py
new file mode 100644
index 00000000..c9647c5d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_debug_backends.py
@@ -0,0 +1,22 @@
+import numpy as np
+
+class NumPyBackend:
+    """Backend that uses numpy.fft"""
+    __ua_domain__ = "numpy.scipy.fft"
+
+    @staticmethod
+    def __ua_function__(method, args, kwargs):
+        kwargs.pop("overwrite_x", None)
+
+        fn = getattr(np.fft, method.__name__, None)
+        return (NotImplemented if fn is None
+                else fn(*args, **kwargs))
+
+
+class EchoBackend:
+    """Backend that just prints the __ua_function__ arguments"""
+    __ua_domain__ = "numpy.scipy.fft"
+
+    @staticmethod
+    def __ua_function__(method, args, kwargs):
+        print(method, args, kwargs, sep='\n')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_fftlog.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_fftlog.py
new file mode 100644
index 00000000..1ba07be3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_fftlog.py
@@ -0,0 +1,390 @@
+'''Fast Hankel transforms using the FFTLog algorithm.
+
+The implementation closely follows the Fortran code of Hamilton (2000).
+
+added: 14/11/2020 Nicolas Tessore 
+'''
+
+import numpy as np
+from warnings import warn
+from ._basic import rfft, irfft
+from ..special import loggamma, poch
+
+__all__ = [
+    'fht', 'ifht',
+    'fhtoffset',
+]
+
+
+# constants
+LN_2 = np.log(2)
+
+
+def fht(a, dln, mu, offset=0.0, bias=0.0):
+    r'''Compute the fast Hankel transform.
+
+    Computes the discrete Hankel transform of a logarithmically spaced periodic
+    sequence using the FFTLog algorithm [1]_, [2]_.
+
+    Parameters
+    ----------
+    a : array_like (..., n)
+        Real periodic input array, uniformly logarithmically spaced.  For
+        multidimensional input, the transform is performed over the last axis.
+    dln : float
+        Uniform logarithmic spacing of the input array.
+    mu : float
+        Order of the Hankel transform, any positive or negative real number.
+    offset : float, optional
+        Offset of the uniform logarithmic spacing of the output array.
+    bias : float, optional
+        Exponent of power law bias, any positive or negative real number.
+
+    Returns
+    -------
+    A : array_like (..., n)
+        The transformed output array, which is real, periodic, uniformly
+        logarithmically spaced, and of the same shape as the input array.
+
+    See Also
+    --------
+    ifht : The inverse of `fht`.
+    fhtoffset : Return an optimal offset for `fht`.
+
+    Notes
+    -----
+    This function computes a discrete version of the Hankel transform
+
+    .. math::
+
+        A(k) = \int_{0}^{\infty} \! a(r) \, J_\mu(kr) \, k \, dr \;,
+
+    where :math:`J_\mu` is the Bessel function of order :math:`\mu`.  The index
+    :math:`\mu` may be any real number, positive or negative.
+
+    The input array `a` is a periodic sequence of length :math:`n`, uniformly
+    logarithmically spaced with spacing `dln`,
+
+    .. math::
+
+        a_j = a(r_j) \;, \quad
+        r_j = r_c \exp[(j-j_c) \, \mathtt{dln}]
+
+    centred about the point :math:`r_c`.  Note that the central index
+    :math:`j_c = (n-1)/2` is half-integral if :math:`n` is even, so that
+    :math:`r_c` falls between two input elements.  Similarly, the output
+    array `A` is a periodic sequence of length :math:`n`, also uniformly
+    logarithmically spaced with spacing `dln`
+
+    .. math::
+
+       A_j = A(k_j) \;, \quad
+       k_j = k_c \exp[(j-j_c) \, \mathtt{dln}]
+
+    centred about the point :math:`k_c`.
+
+    The centre points :math:`r_c` and :math:`k_c` of the periodic intervals may
+    be chosen arbitrarily, but it would be usual to choose the product
+    :math:`k_c r_c = k_j r_{n-1-j} = k_{n-1-j} r_j` to be unity.  This can be
+    changed using the `offset` parameter, which controls the logarithmic offset
+    :math:`\log(k_c) = \mathtt{offset} - \log(r_c)` of the output array.
+    Choosing an optimal value for `offset` may reduce ringing of the discrete
+    Hankel transform.
+
+    If the `bias` parameter is nonzero, this function computes a discrete
+    version of the biased Hankel transform
+
+    .. math::
+
+        A(k) = \int_{0}^{\infty} \! a_q(r) \, (kr)^q \, J_\mu(kr) \, k \, dr
+
+    where :math:`q` is the value of `bias`, and a power law bias
+    :math:`a_q(r) = a(r) \, (kr)^{-q}` is applied to the input sequence.
+    Biasing the transform can help approximate the continuous transform of
+    :math:`a(r)` if there is a value :math:`q` such that :math:`a_q(r)` is
+    close to a periodic sequence, in which case the resulting :math:`A(k)` will
+    be close to the continuous transform.
+
+    References
+    ----------
+    .. [1] Talman J. D., 1978, J. Comp. Phys., 29, 35
+    .. [2] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191)
+
+    Examples
+    --------
+
+    This example is the adapted version of ``fftlogtest.f`` which is provided
+    in [2]_. It evaluates the integral
+
+    .. math::
+
+        \int^\infty_0 r^{\mu+1} \exp(-r^2/2) J_\mu(k, r) k dr
+        = k^{\mu+1} \exp(-k^2/2) .
+
+    >>> import numpy as np
+    >>> from scipy import fft
+    >>> import matplotlib.pyplot as plt
+
+    Parameters for the transform.
+
+    >>> mu = 0.0                     # Order mu of Bessel function
+    >>> r = np.logspace(-7, 1, 128)  # Input evaluation points
+    >>> dln = np.log(r[1]/r[0])      # Step size
+    >>> offset = fft.fhtoffset(dln, initial=-6*np.log(10), mu=mu)
+    >>> k = np.exp(offset)/r[::-1]   # Output evaluation points
+
+    Define the analytical function.
+
+    >>> def f(x, mu):
+    ...     """Analytical function: x^(mu+1) exp(-x^2/2)."""
+    ...     return x**(mu + 1)*np.exp(-x**2/2)
+
+    Evaluate the function at ``r`` and compute the corresponding values at
+    ``k`` using FFTLog.
+
+    >>> a_r = f(r, mu)
+    >>> fht = fft.fht(a_r, dln, mu=mu, offset=offset)
+
+    For this example we can actually compute the analytical response (which in
+    this case is the same as the input function) for comparison and compute the
+    relative error.
+
+    >>> a_k = f(k, mu)
+    >>> rel_err = abs((fht-a_k)/a_k)
+
+    Plot the result.
+
+    >>> figargs = {'sharex': True, 'sharey': True, 'constrained_layout': True}
+    >>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), **figargs)
+    >>> ax1.set_title(r'$r^{\mu+1}\ \exp(-r^2/2)$')
+    >>> ax1.loglog(r, a_r, 'k', lw=2)
+    >>> ax1.set_xlabel('r')
+    >>> ax2.set_title(r'$k^{\mu+1} \exp(-k^2/2)$')
+    >>> ax2.loglog(k, a_k, 'k', lw=2, label='Analytical')
+    >>> ax2.loglog(k, fht, 'C3--', lw=2, label='FFTLog')
+    >>> ax2.set_xlabel('k')
+    >>> ax2.legend(loc=3, framealpha=1)
+    >>> ax2.set_ylim([1e-10, 1e1])
+    >>> ax2b = ax2.twinx()
+    >>> ax2b.loglog(k, rel_err, 'C0', label='Rel. Error (-)')
+    >>> ax2b.set_ylabel('Rel. Error (-)', color='C0')
+    >>> ax2b.tick_params(axis='y', labelcolor='C0')
+    >>> ax2b.legend(loc=4, framealpha=1)
+    >>> ax2b.set_ylim([1e-9, 1e-3])
+    >>> plt.show()
+
+    '''
+
+    # size of transform
+    n = np.shape(a)[-1]
+
+    # bias input array
+    if bias != 0:
+        # a_q(r) = a(r) (r/r_c)^{-q}
+        j_c = (n-1)/2
+        j = np.arange(n)
+        a = a * np.exp(-bias*(j - j_c)*dln)
+
+    # compute FHT coefficients
+    u = fhtcoeff(n, dln, mu, offset=offset, bias=bias)
+
+    # transform
+    A = _fhtq(a, u)
+
+    # bias output array
+    if bias != 0:
+        # A(k) = A_q(k) (k/k_c)^{-q} (k_c r_c)^{-q}
+        A *= np.exp(-bias*((j - j_c)*dln + offset))
+
+    return A
+
+
+def ifht(A, dln, mu, offset=0.0, bias=0.0):
+    r'''Compute the inverse fast Hankel transform.
+
+    Computes the discrete inverse Hankel transform of a logarithmically spaced
+    periodic sequence. This is the inverse operation to `fht`.
+
+    Parameters
+    ----------
+    A : array_like (..., n)
+        Real periodic input array, uniformly logarithmically spaced.  For
+        multidimensional input, the transform is performed over the last axis.
+    dln : float
+        Uniform logarithmic spacing of the input array.
+    mu : float
+        Order of the Hankel transform, any positive or negative real number.
+    offset : float, optional
+        Offset of the uniform logarithmic spacing of the output array.
+    bias : float, optional
+        Exponent of power law bias, any positive or negative real number.
+
+    Returns
+    -------
+    a : array_like (..., n)
+        The transformed output array, which is real, periodic, uniformly
+        logarithmically spaced, and of the same shape as the input array.
+
+    See Also
+    --------
+    fht : Definition of the fast Hankel transform.
+    fhtoffset : Return an optimal offset for `ifht`.
+
+    Notes
+    -----
+    This function computes a discrete version of the Hankel transform
+
+    .. math::
+
+        a(r) = \int_{0}^{\infty} \! A(k) \, J_\mu(kr) \, r \, dk \;,
+
+    where :math:`J_\mu` is the Bessel function of order :math:`\mu`.  The index
+    :math:`\mu` may be any real number, positive or negative.
+
+    See `fht` for further details.
+
+    '''
+
+    # size of transform
+    n = np.shape(A)[-1]
+
+    # bias input array
+    if bias != 0:
+        # A_q(k) = A(k) (k/k_c)^{q} (k_c r_c)^{q}
+        j_c = (n-1)/2
+        j = np.arange(n)
+        A = A * np.exp(bias*((j - j_c)*dln + offset))
+
+    # compute FHT coefficients
+    u = fhtcoeff(n, dln, mu, offset=offset, bias=bias)
+
+    # transform
+    a = _fhtq(A, u, inverse=True)
+
+    # bias output array
+    if bias != 0:
+        # a(r) = a_q(r) (r/r_c)^{q}
+        a /= np.exp(-bias*(j - j_c)*dln)
+
+    return a
+
+
+def fhtcoeff(n, dln, mu, offset=0.0, bias=0.0):
+    '''Compute the coefficient array for a fast Hankel transform.
+    '''
+
+    lnkr, q = offset, bias
+
+    # Hankel transform coefficients
+    # u_m = (kr)^{-i 2m pi/(n dlnr)} U_mu(q + i 2m pi/(n dlnr))
+    # with U_mu(x) = 2^x Gamma((mu+1+x)/2)/Gamma((mu+1-x)/2)
+    xp = (mu+1+q)/2
+    xm = (mu+1-q)/2
+    y = np.linspace(0, np.pi*(n//2)/(n*dln), n//2+1)
+    u = np.empty(n//2+1, dtype=complex)
+    v = np.empty(n//2+1, dtype=complex)
+    u.imag[:] = y
+    u.real[:] = xm
+    loggamma(u, out=v)
+    u.real[:] = xp
+    loggamma(u, out=u)
+    y *= 2*(LN_2 - lnkr)
+    u.real -= v.real
+    u.real += LN_2*q
+    u.imag += v.imag
+    u.imag += y
+    np.exp(u, out=u)
+
+    # fix last coefficient to be real
+    u.imag[-1] = 0
+
+    # deal with special cases
+    if not np.isfinite(u[0]):
+        # write u_0 = 2^q Gamma(xp)/Gamma(xm) = 2^q poch(xm, xp-xm)
+        # poch() handles special cases for negative integers correctly
+        u[0] = 2**q * poch(xm, xp-xm)
+        # the coefficient may be inf or 0, meaning the transform or the
+        # inverse transform, respectively, is singular
+
+    return u
+
+
+def fhtoffset(dln, mu, initial=0.0, bias=0.0):
+    '''Return optimal offset for a fast Hankel transform.
+
+    Returns an offset close to `initial` that fulfils the low-ringing
+    condition of [1]_ for the fast Hankel transform `fht` with logarithmic
+    spacing `dln`, order `mu` and bias `bias`.
+
+    Parameters
+    ----------
+    dln : float
+        Uniform logarithmic spacing of the transform.
+    mu : float
+        Order of the Hankel transform, any positive or negative real number.
+    initial : float, optional
+        Initial value for the offset. Returns the closest value that fulfils
+        the low-ringing condition.
+    bias : float, optional
+        Exponent of power law bias, any positive or negative real number.
+
+    Returns
+    -------
+    offset : float
+        Optimal offset of the uniform logarithmic spacing of the transform that
+        fulfils a low-ringing condition.
+
+    See Also
+    --------
+    fht : Definition of the fast Hankel transform.
+
+    References
+    ----------
+    .. [1] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191)
+
+    '''
+
+    lnkr, q = initial, bias
+
+    xp = (mu+1+q)/2
+    xm = (mu+1-q)/2
+    y = np.pi/(2*dln)
+    zp = loggamma(xp + 1j*y)
+    zm = loggamma(xm + 1j*y)
+    arg = (LN_2 - lnkr)/dln + (zp.imag + zm.imag)/np.pi
+    return lnkr + (arg - np.round(arg))*dln
+
+
+def _fhtq(a, u, inverse=False):
+    '''Compute the biased fast Hankel transform.
+
+    This is the basic FFTLog routine.
+    '''
+
+    # size of transform
+    n = np.shape(a)[-1]
+
+    # check for singular transform or singular inverse transform
+    if np.isinf(u[0]) and not inverse:
+        warn('singular transform; consider changing the bias')
+        # fix coefficient to obtain (potentially correct) transform anyway
+        u = u.copy()
+        u[0] = 0
+    elif u[0] == 0 and inverse:
+        warn('singular inverse transform; consider changing the bias')
+        # fix coefficient to obtain (potentially correct) inverse anyway
+        u = u.copy()
+        u[0] = np.inf
+
+    # biased fast Hankel transform via real FFT
+    A = rfft(a, axis=-1)
+    if not inverse:
+        # forward transform
+        A *= u
+    else:
+        # backward transform
+        A /= u.conj()
+    A = irfft(A, n, axis=-1)
+    A = A[..., ::-1]
+
+    return A
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_fftlog_multimethods.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_fftlog_multimethods.py
new file mode 100644
index 00000000..f7bf0948
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_fftlog_multimethods.py
@@ -0,0 +1,29 @@
+'''Multimethods for fast Hankel transforms.
+'''
+
+import numpy as np
+
+from ._basic import _dispatch
+from ._fftlog import fht as _fht
+from ._fftlog import ifht as _ifht
+from scipy._lib.uarray import Dispatchable
+
+
+__all__ = ['fht', 'ifht']
+
+
+@_dispatch
+def fht(a, dln, mu, offset=0.0, bias=0.0):
+    """fht multimethod."""
+    return (Dispatchable(a, np.ndarray),)
+
+
+@_dispatch
+def ifht(A, dln, mu, offset=0.0, bias=0.0):
+    """ifht multimethod."""
+    return (Dispatchable(A, np.ndarray),)
+
+
+# copy over the docstrings
+fht.__doc__ = _fht.__doc__
+ifht.__doc__ = _ifht.__doc__
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_helper.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_helper.py
new file mode 100644
index 00000000..33dba429
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_helper.py
@@ -0,0 +1,101 @@
+from functools import update_wrapper, lru_cache
+
+from ._pocketfft import helper as _helper
+
+
+def next_fast_len(target, real=False):
+    """Find the next fast size of input data to ``fft``, for zero-padding, etc.
+
+    SciPy's FFT algorithms gain their speed by a recursive divide and conquer
+    strategy. This relies on efficient functions for small prime factors of the
+    input length. Thus, the transforms are fastest when using composites of the
+    prime factors handled by the fft implementation. If there are efficient
+    functions for all radices <= `n`, then the result will be a number `x`
+    >= ``target`` with only prime factors < `n`. (Also known as `n`-smooth
+    numbers)
+
+    Parameters
+    ----------
+    target : int
+        Length to start searching from. Must be a positive integer.
+    real : bool, optional
+        True if the FFT involves real input or output (e.g., `rfft` or `hfft`
+        but not `fft`). Defaults to False.
+
+    Returns
+    -------
+    out : int
+        The smallest fast length greater than or equal to ``target``.
+
+    Notes
+    -----
+    The result of this function may change in future as performance
+    considerations change, for example, if new prime factors are added.
+
+    Calling `fft` or `ifft` with real input data performs an ``'R2C'``
+    transform internally.
+
+    Examples
+    --------
+    On a particular machine, an FFT of prime length takes 11.4 ms:
+
+    >>> from scipy import fft
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> min_len = 93059  # prime length is worst case for speed
+    >>> a = rng.standard_normal(min_len)
+    >>> b = fft.fft(a)
+
+    Zero-padding to the next regular length reduces computation time to
+    1.6 ms, a speedup of 7.3 times:
+
+    >>> fft.next_fast_len(min_len, real=True)
+    93312
+    >>> b = fft.fft(a, 93312)
+
+    Rounding up to the next power of 2 is not optimal, taking 3.0 ms to
+    compute; 1.9 times longer than the size given by ``next_fast_len``:
+
+    >>> b = fft.fft(a, 131072)
+
+    """
+    pass
+
+
+# Directly wrap the c-function good_size but take the docstring etc., from the
+# next_fast_len function above
+next_fast_len = update_wrapper(lru_cache()(_helper.good_size), next_fast_len)
+next_fast_len.__wrapped__ = _helper.good_size
+
+
+def _init_nd_shape_and_axes(x, shape, axes):
+    """Handle shape and axes arguments for N-D transforms.
+
+    Returns the shape and axes in a standard form, taking into account negative
+    values and checking for various potential errors.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    shape : int or array_like of ints or None
+        The shape of the result. If both `shape` and `axes` (see below) are
+        None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+        not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
+        If `shape` is -1, the size of the corresponding dimension of `x` is
+        used.
+    axes : int or array_like of ints or None
+        Axes along which the calculation is computed.
+        The default is over all axes.
+        Negative indices are automatically converted to their positive
+        counterparts.
+
+    Returns
+    -------
+    shape : array
+        The shape of the result. It is a 1-D integer array.
+    axes : array
+        The shape of the result. It is a 1-D integer array.
+
+    """
+    return _helper._init_nd_shape_and_axes(x, shape, axes)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/LICENSE.md b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/LICENSE.md
new file mode 100644
index 00000000..1b5163d8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/LICENSE.md
@@ -0,0 +1,25 @@
+Copyright (C) 2010-2019 Max-Planck-Society
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this
+  list of conditions and the following disclaimer in the documentation and/or
+  other materials provided with the distribution.
+* Neither the name of the copyright holder nor the names of its contributors may
+  be used to endorse or promote products derived from this software without
+  specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/__init__.py
new file mode 100644
index 00000000..0671484c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/__init__.py
@@ -0,0 +1,9 @@
+""" FFT backend using pypocketfft """
+
+from .basic import *
+from .realtransforms import *
+from .helper import *
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/basic.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/basic.py
new file mode 100644
index 00000000..443f6b30
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/basic.py
@@ -0,0 +1,297 @@
+"""
+Discrete Fourier Transforms - basic.py
+"""
+import numpy as np
+import functools
+from . import pypocketfft as pfft
+from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied,
+                     _fix_shape, _fix_shape_1d, _normalization,
+                     _workers)
+
+def c2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
+        workers=None, *, plan=None):
+    """ Return discrete Fourier transform of real or complex sequence. """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    tmp = _asfarray(x)
+    overwrite_x = overwrite_x or _datacopied(tmp, x)
+    norm = _normalization(norm, forward)
+    workers = _workers(workers)
+
+    if n is not None:
+        tmp, copied = _fix_shape_1d(tmp, n, axis)
+        overwrite_x = overwrite_x or copied
+    elif tmp.shape[axis] < 1:
+        raise ValueError("invalid number of data points ({0}) specified"
+                         .format(tmp.shape[axis]))
+
+    out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None)
+
+    return pfft.c2c(tmp, (axis,), forward, norm, out, workers)
+
+
+fft = functools.partial(c2c, True)
+fft.__name__ = 'fft'
+ifft = functools.partial(c2c, False)
+ifft.__name__ = 'ifft'
+
+
+def r2c(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
+        workers=None, *, plan=None):
+    """
+    Discrete Fourier transform of a real sequence.
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    tmp = _asfarray(x)
+    norm = _normalization(norm, forward)
+    workers = _workers(workers)
+
+    if not np.isrealobj(tmp):
+        raise TypeError("x must be a real sequence")
+
+    if n is not None:
+        tmp, _ = _fix_shape_1d(tmp, n, axis)
+    elif tmp.shape[axis] < 1:
+        raise ValueError("invalid number of data points ({0}) specified"
+                         .format(tmp.shape[axis]))
+
+    # Note: overwrite_x is not utilised
+    return pfft.r2c(tmp, (axis,), forward, norm, None, workers)
+
+
+rfft = functools.partial(r2c, True)
+rfft.__name__ = 'rfft'
+ihfft = functools.partial(r2c, False)
+ihfft.__name__ = 'ihfft'
+
+
+def c2r(forward, x, n=None, axis=-1, norm=None, overwrite_x=False,
+        workers=None, *, plan=None):
+    """
+    Return inverse discrete Fourier transform of real sequence x.
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    tmp = _asfarray(x)
+    norm = _normalization(norm, forward)
+    workers = _workers(workers)
+
+    # TODO: Optimize for hermitian and real?
+    if np.isrealobj(tmp):
+        tmp = tmp + 0.j
+
+    # Last axis utilizes hermitian symmetry
+    if n is None:
+        n = (tmp.shape[axis] - 1) * 2
+        if n < 1:
+            raise ValueError("Invalid number of data points ({0}) specified"
+                             .format(n))
+    else:
+        tmp, _ = _fix_shape_1d(tmp, (n//2) + 1, axis)
+
+    # Note: overwrite_x is not utilized
+    return pfft.c2r(tmp, (axis,), n, forward, norm, None, workers)
+
+
+hfft = functools.partial(c2r, True)
+hfft.__name__ = 'hfft'
+irfft = functools.partial(c2r, False)
+irfft.__name__ = 'irfft'
+
+
+def fft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+         *, plan=None):
+    """
+    2-D discrete Fourier transform.
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    return fftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def ifft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+          *, plan=None):
+    """
+    2-D discrete inverse Fourier transform of real or complex sequence.
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    return ifftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def rfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+          *, plan=None):
+    """
+    2-D discrete Fourier transform of a real sequence
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    return rfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def irfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+           *, plan=None):
+    """
+    2-D discrete inverse Fourier transform of a real sequence
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    return irfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def hfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+          *, plan=None):
+    """
+    2-D discrete Fourier transform of a Hermitian sequence
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    return hfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def ihfft2(x, s=None, axes=(-2,-1), norm=None, overwrite_x=False, workers=None,
+           *, plan=None):
+    """
+    2-D discrete inverse Fourier transform of a Hermitian sequence
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    return ihfftn(x, s, axes, norm, overwrite_x, workers)
+
+
+def c2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
+         workers=None, *, plan=None):
+    """
+    Return multidimensional discrete Fourier transform.
+    """
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    tmp = _asfarray(x)
+
+    shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+    overwrite_x = overwrite_x or _datacopied(tmp, x)
+    workers = _workers(workers)
+
+    if len(axes) == 0:
+        return x
+
+    tmp, copied = _fix_shape(tmp, shape, axes)
+    overwrite_x = overwrite_x or copied
+
+    norm = _normalization(norm, forward)
+    out = (tmp if overwrite_x and tmp.dtype.kind == 'c' else None)
+
+    return pfft.c2c(tmp, axes, forward, norm, out, workers)
+
+
+fftn = functools.partial(c2cn, True)
+fftn.__name__ = 'fftn'
+ifftn = functools.partial(c2cn, False)
+ifftn.__name__ = 'ifftn'
+
+def r2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
+         workers=None, *, plan=None):
+    """Return multidimensional discrete Fourier transform of real input"""
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    tmp = _asfarray(x)
+
+    if not np.isrealobj(tmp):
+        raise TypeError("x must be a real sequence")
+
+    shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+    tmp, _ = _fix_shape(tmp, shape, axes)
+    norm = _normalization(norm, forward)
+    workers = _workers(workers)
+
+    if len(axes) == 0:
+        raise ValueError("at least 1 axis must be transformed")
+
+    # Note: overwrite_x is not utilized
+    return pfft.r2c(tmp, axes, forward, norm, None, workers)
+
+
+rfftn = functools.partial(r2cn, True)
+rfftn.__name__ = 'rfftn'
+ihfftn = functools.partial(r2cn, False)
+ihfftn.__name__ = 'ihfftn'
+
+
+def c2rn(forward, x, s=None, axes=None, norm=None, overwrite_x=False,
+         workers=None, *, plan=None):
+    """Multidimensional inverse discrete fourier transform with real output"""
+    if plan is not None:
+        raise NotImplementedError('Passing a precomputed plan is not yet '
+                                  'supported by scipy.fft functions')
+    tmp = _asfarray(x)
+
+    # TODO: Optimize for hermitian and real?
+    if np.isrealobj(tmp):
+        tmp = tmp + 0.j
+
+    noshape = s is None
+    shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+
+    if len(axes) == 0:
+        raise ValueError("at least 1 axis must be transformed")
+
+    if noshape:
+        shape[-1] = (x.shape[axes[-1]] - 1) * 2
+
+    norm = _normalization(norm, forward)
+    workers = _workers(workers)
+
+    # Last axis utilizes hermitian symmetry
+    lastsize = shape[-1]
+    shape[-1] = (shape[-1] // 2) + 1
+
+    tmp, _ = _fix_shape(tmp, shape, axes)
+
+    # Note: overwrite_x is not utilized
+    return pfft.c2r(tmp, axes, lastsize, forward, norm, None, workers)
+
+
+hfftn = functools.partial(c2rn, True)
+hfftn.__name__ = 'hfftn'
+irfftn = functools.partial(c2rn, False)
+irfftn.__name__ = 'irfftn'
+
+
+def r2r_fftpack(forward, x, n=None, axis=-1, norm=None, overwrite_x=False):
+    """FFT of a real sequence, returning fftpack half complex format"""
+    tmp = _asfarray(x)
+    overwrite_x = overwrite_x or _datacopied(tmp, x)
+    norm = _normalization(norm, forward)
+    workers = _workers(None)
+
+    if tmp.dtype.kind == 'c':
+        raise TypeError('x must be a real sequence')
+
+    if n is not None:
+        tmp, copied = _fix_shape_1d(tmp, n, axis)
+        overwrite_x = overwrite_x or copied
+    elif tmp.shape[axis] < 1:
+        raise ValueError("invalid number of data points ({0}) specified"
+                         .format(tmp.shape[axis]))
+
+    out = (tmp if overwrite_x else None)
+
+    return pfft.r2r_fftpack(tmp, (axis,), forward, forward, norm, out, workers)
+
+
+rfft_fftpack = functools.partial(r2r_fftpack, True)
+rfft_fftpack.__name__ = 'rfft_fftpack'
+irfft_fftpack = functools.partial(r2r_fftpack, False)
+irfft_fftpack.__name__ = 'irfft_fftpack'
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/helper.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/helper.py
new file mode 100644
index 00000000..8ba1da1f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/helper.py
@@ -0,0 +1,216 @@
+from numbers import Number
+import operator
+import os
+import threading
+import contextlib
+
+import numpy as np
+# good_size is exposed (and used) from this import
+from .pypocketfft import good_size
+
+_config = threading.local()
+_cpu_count = os.cpu_count()
+
+
+def _iterable_of_int(x, name=None):
+    """Convert ``x`` to an iterable sequence of int
+
+    Parameters
+    ----------
+    x : value, or sequence of values, convertible to int
+    name : str, optional
+        Name of the argument being converted, only used in the error message
+
+    Returns
+    -------
+    y : ``List[int]``
+    """
+    if isinstance(x, Number):
+        x = (x,)
+
+    try:
+        x = [operator.index(a) for a in x]
+    except TypeError as e:
+        name = name or "value"
+        raise ValueError("{} must be a scalar or iterable of integers"
+                         .format(name)) from e
+
+    return x
+
+
+def _init_nd_shape_and_axes(x, shape, axes):
+    """Handles shape and axes arguments for nd transforms"""
+    noshape = shape is None
+    noaxes = axes is None
+
+    if not noaxes:
+        axes = _iterable_of_int(axes, 'axes')
+        axes = [a + x.ndim if a < 0 else a for a in axes]
+
+        if any(a >= x.ndim or a < 0 for a in axes):
+            raise ValueError("axes exceeds dimensionality of input")
+        if len(set(axes)) != len(axes):
+            raise ValueError("all axes must be unique")
+
+    if not noshape:
+        shape = _iterable_of_int(shape, 'shape')
+
+        if axes and len(axes) != len(shape):
+            raise ValueError("when given, axes and shape arguments"
+                             " have to be of the same length")
+        if noaxes:
+            if len(shape) > x.ndim:
+                raise ValueError("shape requires more axes than are present")
+            axes = range(x.ndim - len(shape), x.ndim)
+
+        shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)]
+    elif noaxes:
+        shape = list(x.shape)
+        axes = range(x.ndim)
+    else:
+        shape = [x.shape[a] for a in axes]
+
+    if any(s < 1 for s in shape):
+        raise ValueError(
+            "invalid number of data points ({0}) specified".format(shape))
+
+    return shape, axes
+
+
+def _asfarray(x):
+    """
+    Convert to array with floating or complex dtype.
+
+    float16 values are also promoted to float32.
+    """
+    if not hasattr(x, "dtype"):
+        x = np.asarray(x)
+
+    if x.dtype == np.float16:
+        return np.asarray(x, np.float32)
+    elif x.dtype.kind not in 'fc':
+        return np.asarray(x, np.float64)
+
+    # Require native byte order
+    dtype = x.dtype.newbyteorder('=')
+    # Always align input
+    copy = not x.flags['ALIGNED']
+    return np.array(x, dtype=dtype, copy=copy)
+
+def _datacopied(arr, original):
+    """
+    Strict check for `arr` not sharing any data with `original`,
+    under the assumption that arr = asarray(original)
+    """
+    if arr is original:
+        return False
+    if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
+        return False
+    return arr.base is None
+
+
+def _fix_shape(x, shape, axes):
+    """Internal auxiliary function for _raw_fft, _raw_fftnd."""
+    must_copy = False
+
+    # Build an nd slice with the dimensions to be read from x
+    index = [slice(None)]*x.ndim
+    for n, ax in zip(shape, axes):
+        if x.shape[ax] >= n:
+            index[ax] = slice(0, n)
+        else:
+            index[ax] = slice(0, x.shape[ax])
+            must_copy = True
+
+    index = tuple(index)
+
+    if not must_copy:
+        return x[index], False
+
+    s = list(x.shape)
+    for n, axis in zip(shape, axes):
+        s[axis] = n
+
+    z = np.zeros(s, x.dtype)
+    z[index] = x[index]
+    return z, True
+
+
+def _fix_shape_1d(x, n, axis):
+    if n < 1:
+        raise ValueError(
+            "invalid number of data points ({0}) specified".format(n))
+
+    return _fix_shape(x, (n,), (axis,))
+
+
+_NORM_MAP = {None: 0, 'backward': 0, 'ortho': 1, 'forward': 2}
+
+
+def _normalization(norm, forward):
+    """Returns the pypocketfft normalization mode from the norm argument"""
+    try:
+        inorm = _NORM_MAP[norm]
+        return inorm if forward else (2 - inorm)
+    except KeyError:
+        raise ValueError(
+            f'Invalid norm value {norm!r}, should '
+            'be "backward", "ortho" or "forward"') from None
+
+
+def _workers(workers):
+    if workers is None:
+        return getattr(_config, 'default_workers', 1)
+
+    if workers < 0:
+        if workers >= -_cpu_count:
+            workers += 1 + _cpu_count
+        else:
+            raise ValueError("workers value out of range; got {}, must not be"
+                             " less than {}".format(workers, -_cpu_count))
+    elif workers == 0:
+        raise ValueError("workers must not be zero")
+
+    return workers
+
+
+@contextlib.contextmanager
+def set_workers(workers):
+    """Context manager for the default number of workers used in `scipy.fft`
+
+    Parameters
+    ----------
+    workers : int
+        The default number of workers to use
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import fft, signal
+    >>> rng = np.random.default_rng()
+    >>> x = rng.standard_normal((128, 64))
+    >>> with fft.set_workers(4):
+    ...     y = signal.fftconvolve(x, x)
+
+    """
+    old_workers = get_workers()
+    _config.default_workers = _workers(operator.index(workers))
+    try:
+        yield
+    finally:
+        _config.default_workers = old_workers
+
+
+def get_workers():
+    """Returns the default number of workers within the current context
+
+    Examples
+    --------
+    >>> from scipy import fft
+    >>> fft.get_workers()
+    1
+    >>> with fft.set_workers(4):
+    ...     fft.get_workers()
+    4
+    """
+    return getattr(_config, 'default_workers', 1)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/realtransforms.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/realtransforms.py
new file mode 100644
index 00000000..903eb5c9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/realtransforms.py
@@ -0,0 +1,110 @@
+import numpy as np
+from . import pypocketfft as pfft
+from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied,
+                     _fix_shape, _fix_shape_1d, _normalization, _workers)
+import functools
+
+
+def _r2r(forward, transform, x, type=2, n=None, axis=-1, norm=None,
+         overwrite_x=False, workers=None, orthogonalize=None):
+    """Forward or backward 1-D DCT/DST
+
+    Parameters
+    ----------
+    forward : bool
+        Transform direction (determines type and normalisation)
+    transform : {pypocketfft.dct, pypocketfft.dst}
+        The transform to perform
+    """
+    tmp = _asfarray(x)
+    overwrite_x = overwrite_x or _datacopied(tmp, x)
+    norm = _normalization(norm, forward)
+    workers = _workers(workers)
+
+    if not forward:
+        if type == 2:
+            type = 3
+        elif type == 3:
+            type = 2
+
+    if n is not None:
+        tmp, copied = _fix_shape_1d(tmp, n, axis)
+        overwrite_x = overwrite_x or copied
+    elif tmp.shape[axis] < 1:
+        raise ValueError("invalid number of data points ({0}) specified"
+                         .format(tmp.shape[axis]))
+
+    out = (tmp if overwrite_x else None)
+
+    # For complex input, transform real and imaginary components separably
+    if np.iscomplexobj(x):
+        out = np.empty_like(tmp) if out is None else out
+        transform(tmp.real, type, (axis,), norm, out.real, workers)
+        transform(tmp.imag, type, (axis,), norm, out.imag, workers)
+        return out
+
+    return transform(tmp, type, (axis,), norm, out, workers, orthogonalize)
+
+
+dct = functools.partial(_r2r, True, pfft.dct)
+dct.__name__ = 'dct'
+idct = functools.partial(_r2r, False, pfft.dct)
+idct.__name__ = 'idct'
+
+dst = functools.partial(_r2r, True, pfft.dst)
+dst.__name__ = 'dst'
+idst = functools.partial(_r2r, False, pfft.dst)
+idst.__name__ = 'idst'
+
+
+def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None,
+          overwrite_x=False, workers=None, orthogonalize=None):
+    """Forward or backward nd DCT/DST
+
+    Parameters
+    ----------
+    forward : bool
+        Transform direction (determines type and normalisation)
+    transform : {pypocketfft.dct, pypocketfft.dst}
+        The transform to perform
+    """
+    tmp = _asfarray(x)
+
+    shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
+    overwrite_x = overwrite_x or _datacopied(tmp, x)
+
+    if len(axes) == 0:
+        return x
+
+    tmp, copied = _fix_shape(tmp, shape, axes)
+    overwrite_x = overwrite_x or copied
+
+    if not forward:
+        if type == 2:
+            type = 3
+        elif type == 3:
+            type = 2
+
+    norm = _normalization(norm, forward)
+    workers = _workers(workers)
+    out = (tmp if overwrite_x else None)
+
+    # For complex input, transform real and imaginary components separably
+    if np.iscomplexobj(x):
+        out = np.empty_like(tmp) if out is None else out
+        transform(tmp.real, type, axes, norm, out.real, workers)
+        transform(tmp.imag, type, axes, norm, out.imag, workers)
+        return out
+
+    return transform(tmp, type, axes, norm, out, workers, orthogonalize)
+
+
+dctn = functools.partial(_r2rn, True, pfft.dct)
+dctn.__name__ = 'dctn'
+idctn = functools.partial(_r2rn, False, pfft.dct)
+idctn.__name__ = 'idctn'
+
+dstn = functools.partial(_r2rn, True, pfft.dst)
+dstn.__name__ = 'dstn'
+idstn = functools.partial(_r2rn, False, pfft.dst)
+idstn.__name__ = 'idstn'
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_basic.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_basic.py
new file mode 100644
index 00000000..15af7c9d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_basic.py
@@ -0,0 +1,1022 @@
+# Created by Pearu Peterson, September 2002
+
+from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
+                           assert_array_almost_equal_nulp, assert_array_less,
+                           assert_allclose)
+import pytest
+from pytest import raises as assert_raises
+from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn,
+                                  rfft, irfft, rfftn, irfftn, fft2,
+                                  hfft, ihfft, hfftn, ihfftn)
+
+from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
+                   swapaxes, cdouble)
+import numpy as np
+import numpy.fft
+from numpy.random import rand
+
+# "large" composite numbers supported by FFT._PYPOCKETFFT
+LARGE_COMPOSITE_SIZES = [
+    2**13,
+    2**5 * 3**5,
+    2**3 * 3**3 * 5**2,
+]
+SMALL_COMPOSITE_SIZES = [
+    2,
+    2*3*5,
+    2*2*3*3,
+]
+# prime
+LARGE_PRIME_SIZES = [
+    2011
+]
+SMALL_PRIME_SIZES = [
+    29
+]
+
+
+def _assert_close_in_norm(x, y, rtol, size, rdt):
+    # helper function for testing
+    err_msg = "size: %s  rdt: %s" % (size, rdt)
+    assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
+
+
+def random(size):
+    return rand(*size)
+
+def swap_byteorder(arr):
+    """Returns the same array with swapped byteorder"""
+    dtype = arr.dtype.newbyteorder('S')
+    return arr.astype(dtype)
+
+def get_mat(n):
+    data = arange(n)
+    data = add.outer(data, data)
+    return data
+
+
+def direct_dft(x):
+    x = asarray(x)
+    n = len(x)
+    y = zeros(n, dtype=cdouble)
+    w = -arange(n)*(2j*pi/n)
+    for i in range(n):
+        y[i] = dot(exp(i*w), x)
+    return y
+
+
+def direct_idft(x):
+    x = asarray(x)
+    n = len(x)
+    y = zeros(n, dtype=cdouble)
+    w = arange(n)*(2j*pi/n)
+    for i in range(n):
+        y[i] = dot(exp(i*w), x)/n
+    return y
+
+
+def direct_dftn(x):
+    x = asarray(x)
+    for axis in range(x.ndim):
+        x = fft(x, axis=axis)
+    return x
+
+
+def direct_idftn(x):
+    x = asarray(x)
+    for axis in range(x.ndim):
+        x = ifft(x, axis=axis)
+    return x
+
+
+def direct_rdft(x):
+    x = asarray(x)
+    n = len(x)
+    w = -arange(n)*(2j*pi/n)
+    y = zeros(n//2+1, dtype=cdouble)
+    for i in range(n//2+1):
+        y[i] = dot(exp(i*w), x)
+    return y
+
+
+def direct_irdft(x, n):
+    x = asarray(x)
+    x1 = zeros(n, dtype=cdouble)
+    for i in range(n//2+1):
+        x1[i] = x[i]
+        if i > 0 and 2*i < n:
+            x1[n-i] = np.conj(x[i])
+    return direct_idft(x1).real
+
+
+def direct_rdftn(x):
+    return fftn(rfft(x), axes=range(x.ndim - 1))
+
+
+class _TestFFTBase:
+    def setup_method(self):
+        self.cdt = None
+        self.rdt = None
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
+        y = fft(x)
+        assert_equal(y.dtype, self.cdt)
+        y1 = direct_dft(x)
+        assert_array_almost_equal(y,y1)
+        x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
+        assert_array_almost_equal(fft(x),direct_dft(x))
+
+    def test_n_argument_real(self):
+        x1 = np.array([1,2,3,4], dtype=self.rdt)
+        x2 = np.array([1,2,3,4], dtype=self.rdt)
+        y = fft([x1,x2],n=4)
+        assert_equal(y.dtype, self.cdt)
+        assert_equal(y.shape,(2,4))
+        assert_array_almost_equal(y[0],direct_dft(x1))
+        assert_array_almost_equal(y[1],direct_dft(x2))
+
+    def _test_n_argument_complex(self):
+        x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
+        x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
+        y = fft([x1,x2],n=4)
+        assert_equal(y.dtype, self.cdt)
+        assert_equal(y.shape,(2,4))
+        assert_array_almost_equal(y[0],direct_dft(x1))
+        assert_array_almost_equal(y[1],direct_dft(x2))
+
+    def test_djbfft(self):
+        for i in range(2,14):
+            n = 2**i
+            x = np.arange(n)
+            y = fft(x.astype(complex))
+            y2 = numpy.fft.fft(x)
+            assert_array_almost_equal(y,y2)
+            y = fft(x)
+            assert_array_almost_equal(y,y2)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, fft, [])
+        assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
+
+
+class TestLongDoubleFFT(_TestFFTBase):
+    def setup_method(self):
+        self.cdt = np.longcomplex
+        self.rdt = np.longdouble
+
+
+class TestDoubleFFT(_TestFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+
+
+class TestSingleFFT(_TestFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+
+
+class TestFloat16FFT:
+
+    def test_1_argument_real(self):
+        x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+        y = fft(x1, n=4)
+        assert_equal(y.dtype, np.complex64)
+        assert_equal(y.shape, (4, ))
+        assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
+
+    def test_n_argument_real(self):
+        x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+        x2 = np.array([1, 2, 3, 4], dtype=np.float16)
+        y = fft([x1, x2], n=4)
+        assert_equal(y.dtype, np.complex64)
+        assert_equal(y.shape, (2, 4))
+        assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
+        assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
+
+
+class _TestIFFTBase:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
+        y = ifft(x)
+        y1 = direct_idft(x)
+        assert_equal(y.dtype, self.cdt)
+        assert_array_almost_equal(y,y1)
+
+        x = np.array([1,2,3,4+0j,5], self.cdt)
+        assert_array_almost_equal(ifft(x),direct_idft(x))
+
+    def test_definition_real(self):
+        x = np.array([1,2,3,4,1,2,3,4], self.rdt)
+        y = ifft(x)
+        assert_equal(y.dtype, self.cdt)
+        y1 = direct_idft(x)
+        assert_array_almost_equal(y,y1)
+
+        x = np.array([1,2,3,4,5], dtype=self.rdt)
+        assert_equal(y.dtype, self.cdt)
+        assert_array_almost_equal(ifft(x),direct_idft(x))
+
+    def test_djbfft(self):
+        for i in range(2,14):
+            n = 2**i
+            x = np.arange(n)
+            y = ifft(x.astype(self.cdt))
+            y2 = numpy.fft.ifft(x)
+            assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
+            y = ifft(x)
+            assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
+
+    def test_random_complex(self):
+        for size in [1,51,111,100,200,64,128,256,1024]:
+            x = random([size]).astype(self.cdt)
+            x = random([size]).astype(self.cdt) + 1j*x
+            y1 = ifft(fft(x))
+            y2 = fft(ifft(x))
+            assert_equal(y1.dtype, self.cdt)
+            assert_equal(y2.dtype, self.cdt)
+            assert_array_almost_equal(y1, x)
+            assert_array_almost_equal(y2, x)
+
+    def test_random_real(self):
+        for size in [1,51,111,100,200,64,128,256,1024]:
+            x = random([size]).astype(self.rdt)
+            y1 = ifft(fft(x))
+            y2 = fft(ifft(x))
+            assert_equal(y1.dtype, self.cdt)
+            assert_equal(y2.dtype, self.cdt)
+            assert_array_almost_equal(y1, x)
+            assert_array_almost_equal(y2, x)
+
+    def test_size_accuracy(self):
+        # Sanity check for the accuracy for prime and non-prime sized inputs
+        for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+            np.random.seed(1234)
+            x = np.random.rand(size).astype(self.rdt)
+            y = ifft(fft(x))
+            _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+            y = fft(ifft(x))
+            _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+
+            x = (x + 1j*np.random.rand(size)).astype(self.cdt)
+            y = ifft(fft(x))
+            _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+            y = fft(ifft(x))
+            _assert_close_in_norm(x, y, self.rtol, size, self.rdt)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, ifft, [])
+        assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
+
+
+@pytest.mark.skipif(np.longdouble is np.float64,
+                    reason="Long double is aliased to double")
+class TestLongDoubleIFFT(_TestIFFTBase):
+    def setup_method(self):
+        self.cdt = np.longcomplex
+        self.rdt = np.longdouble
+        self.rtol = 1e-10
+        self.atol = 1e-10
+
+
+class TestDoubleIFFT(_TestIFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+        self.rtol = 1e-10
+        self.atol = 1e-10
+
+
+class TestSingleIFFT(_TestIFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+        self.rtol = 1e-5
+        self.atol = 1e-4
+
+
+class _TestRFFTBase:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
+            x = np.array(t, dtype=self.rdt)
+            y = rfft(x)
+            y1 = direct_rdft(x)
+            assert_array_almost_equal(y,y1)
+            assert_equal(y.dtype, self.cdt)
+
+    def test_djbfft(self):
+        for i in range(2,14):
+            n = 2**i
+            x = np.arange(n)
+            y1 = np.fft.rfft(x)
+            y = rfft(x)
+            assert_array_almost_equal(y,y1)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, rfft, [])
+        assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
+
+    def test_complex_input(self):
+        x = np.zeros(10, dtype=self.cdt)
+        with assert_raises(TypeError, match="x must be a real sequence"):
+            rfft(x)
+
+    # See gh-5790
+    class MockSeries:
+        def __init__(self, data):
+            self.data = np.asarray(data)
+
+        def __getattr__(self, item):
+            try:
+                return getattr(self.data, item)
+            except AttributeError as e:
+                raise AttributeError(("'MockSeries' object "
+                                      "has no attribute '{attr}'".
+                                      format(attr=item))) from e
+
+    def test_non_ndarray_with_dtype(self):
+        x = np.array([1., 2., 3., 4., 5.])
+        xs = _TestRFFTBase.MockSeries(x)
+
+        expected = [1, 2, 3, 4, 5]
+        rfft(xs)
+
+        # Data should not have been overwritten
+        assert_equal(x, expected)
+        assert_equal(xs.data, expected)
+
+@pytest.mark.skipif(np.longfloat is np.float64,
+                    reason="Long double is aliased to double")
+class TestRFFTLongDouble(_TestRFFTBase):
+    def setup_method(self):
+        self.cdt = np.longcomplex
+        self.rdt = np.longfloat
+
+
+class TestRFFTDouble(_TestRFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+
+
+class TestRFFTSingle(_TestRFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+
+
+class _TestIRFFTBase:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x1 = [1,2+3j,4+1j,1+2j,3+4j]
+        x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
+        x1 = x1_1[:5]
+        x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
+        x2 = x2_1[:5]
+
+        def _test(x, xr):
+            y = irfft(np.array(x, dtype=self.cdt), n=len(xr))
+            y1 = direct_irdft(x, len(xr))
+            assert_equal(y.dtype, self.rdt)
+            assert_array_almost_equal(y,y1, decimal=self.ndec)
+            assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
+
+        _test(x1, x1_1)
+        _test(x2, x2_1)
+
+    def test_djbfft(self):
+        for i in range(2,14):
+            n = 2**i
+            x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2)
+            x[0] = 0
+            if n % 2 == 0:
+                x[-1] = np.real(x[-1])
+            y1 = np.fft.irfft(x)
+            y = irfft(x)
+            assert_array_almost_equal(y,y1)
+
+    def test_random_real(self):
+        for size in [1,51,111,100,200,64,128,256,1024]:
+            x = random([size]).astype(self.rdt)
+            y1 = irfft(rfft(x), n=size)
+            y2 = rfft(irfft(x, n=(size*2-1)))
+            assert_equal(y1.dtype, self.rdt)
+            assert_equal(y2.dtype, self.cdt)
+            assert_array_almost_equal(y1, x, decimal=self.ndec,
+                                       err_msg="size=%d" % size)
+            assert_array_almost_equal(y2, x, decimal=self.ndec,
+                                       err_msg="size=%d" % size)
+
+    def test_size_accuracy(self):
+        # Sanity check for the accuracy for prime and non-prime sized inputs
+        if self.rdt == np.float32:
+            rtol = 1e-5
+        elif self.rdt == np.float64:
+            rtol = 1e-10
+
+        for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+            np.random.seed(1234)
+            x = np.random.rand(size).astype(self.rdt)
+            y = irfft(rfft(x), len(x))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+            y = rfft(irfft(x, 2 * len(x) - 1))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, irfft, [])
+        assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
+
+
+# self.ndec is bogus; we should have a assert_array_approx_equal for number of
+# significant digits
+
+@pytest.mark.skipif(np.longfloat is np.float64,
+                    reason="Long double is aliased to double")
+class TestIRFFTLongDouble(_TestIRFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+        self.ndec = 14
+
+
+class TestIRFFTDouble(_TestIRFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+        self.ndec = 14
+
+
+class TestIRFFTSingle(_TestIRFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+        self.ndec = 5
+
+
+class Testfft2:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_regression_244(self):
+        """FFT returns wrong result with axes parameter."""
+        # fftn (and hence fft2) used to break when both axes and shape were
+        # used
+        x = numpy.ones((4, 4, 2))
+        y = fft2(x, s=(8, 8), axes=(-3, -2))
+        y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
+        assert_array_almost_equal(y, y_r)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, fft2, [[]])
+        assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
+
+
+class TestFftnSingle:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = [[1, 2, 3],
+             [4, 5, 6],
+             [7, 8, 9]]
+        y = fftn(np.array(x, np.float32))
+        assert_(y.dtype == np.complex64,
+                msg="double precision output with single precision")
+
+        y_r = np.array(fftn(x), np.complex64)
+        assert_array_almost_equal_nulp(y, y_r)
+
+    @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+    def test_size_accuracy_small(self, size):
+        x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+        y1 = fftn(x.real.astype(np.float32))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 2000)
+
+    @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+    def test_size_accuracy_large(self, size):
+        x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+        y1 = fftn(x.real.astype(np.float32))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 2000)
+
+    def test_definition_float16(self):
+        x = [[1, 2, 3],
+             [4, 5, 6],
+             [7, 8, 9]]
+        y = fftn(np.array(x, np.float16))
+        assert_equal(y.dtype, np.complex64)
+        y_r = np.array(fftn(x), np.complex64)
+        assert_array_almost_equal_nulp(y, y_r)
+
+    @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+    def test_float16_input_small(self, size):
+        x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+        y1 = fftn(x.real.astype(np.float16))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 5e5)
+
+    @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+    def test_float16_input_large(self, size):
+        x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+        y1 = fftn(x.real.astype(np.float16))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 2e6)
+
+
+class TestFftn:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = [[1, 2, 3],
+             [4, 5, 6],
+             [7, 8, 9]]
+        y = fftn(x)
+        assert_array_almost_equal(y, direct_dftn(x))
+
+        x = random((20, 26))
+        assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+        x = random((5, 4, 3, 20))
+        assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+    def test_axes_argument(self):
+        # plane == ji_plane, x== kji_space
+        plane1 = [[1, 2, 3],
+                  [4, 5, 6],
+                  [7, 8, 9]]
+        plane2 = [[10, 11, 12],
+                  [13, 14, 15],
+                  [16, 17, 18]]
+        plane3 = [[19, 20, 21],
+                  [22, 23, 24],
+                  [25, 26, 27]]
+        ki_plane1 = [[1, 2, 3],
+                     [10, 11, 12],
+                     [19, 20, 21]]
+        ki_plane2 = [[4, 5, 6],
+                     [13, 14, 15],
+                     [22, 23, 24]]
+        ki_plane3 = [[7, 8, 9],
+                     [16, 17, 18],
+                     [25, 26, 27]]
+        jk_plane1 = [[1, 10, 19],
+                     [4, 13, 22],
+                     [7, 16, 25]]
+        jk_plane2 = [[2, 11, 20],
+                     [5, 14, 23],
+                     [8, 17, 26]]
+        jk_plane3 = [[3, 12, 21],
+                     [6, 15, 24],
+                     [9, 18, 27]]
+        kj_plane1 = [[1, 4, 7],
+                     [10, 13, 16], [19, 22, 25]]
+        kj_plane2 = [[2, 5, 8],
+                     [11, 14, 17], [20, 23, 26]]
+        kj_plane3 = [[3, 6, 9],
+                     [12, 15, 18], [21, 24, 27]]
+        ij_plane1 = [[1, 4, 7],
+                     [2, 5, 8],
+                     [3, 6, 9]]
+        ij_plane2 = [[10, 13, 16],
+                     [11, 14, 17],
+                     [12, 15, 18]]
+        ij_plane3 = [[19, 22, 25],
+                     [20, 23, 26],
+                     [21, 24, 27]]
+        ik_plane1 = [[1, 10, 19],
+                     [2, 11, 20],
+                     [3, 12, 21]]
+        ik_plane2 = [[4, 13, 22],
+                     [5, 14, 23],
+                     [6, 15, 24]]
+        ik_plane3 = [[7, 16, 25],
+                     [8, 17, 26],
+                     [9, 18, 27]]
+        ijk_space = [jk_plane1, jk_plane2, jk_plane3]
+        ikj_space = [kj_plane1, kj_plane2, kj_plane3]
+        jik_space = [ik_plane1, ik_plane2, ik_plane3]
+        jki_space = [ki_plane1, ki_plane2, ki_plane3]
+        kij_space = [ij_plane1, ij_plane2, ij_plane3]
+        x = array([plane1, plane2, plane3])
+
+        assert_array_almost_equal(fftn(x),
+                                  fftn(x, axes=(-3, -2, -1)))  # kji_space
+        assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
+        assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
+        y = fftn(x, axes=(2, 1, 0))  # ijk_space
+        assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
+        y = fftn(x, axes=(2, 0, 1))  # ikj_space
+        assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
+                                  fftn(ikj_space))
+        y = fftn(x, axes=(1, 2, 0))  # jik_space
+        assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
+                                  fftn(jik_space))
+        y = fftn(x, axes=(1, 0, 2))  # jki_space
+        assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
+        y = fftn(x, axes=(0, 2, 1))  # kij_space
+        assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
+
+        y = fftn(x, axes=(-2, -1))  # ji_plane
+        assert_array_almost_equal(fftn(plane1), y[0])
+        assert_array_almost_equal(fftn(plane2), y[1])
+        assert_array_almost_equal(fftn(plane3), y[2])
+
+        y = fftn(x, axes=(1, 2))  # ji_plane
+        assert_array_almost_equal(fftn(plane1), y[0])
+        assert_array_almost_equal(fftn(plane2), y[1])
+        assert_array_almost_equal(fftn(plane3), y[2])
+
+        y = fftn(x, axes=(-3, -2))  # kj_plane
+        assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
+        assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
+        assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
+
+        y = fftn(x, axes=(-3, -1))  # ki_plane
+        assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
+        assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
+        assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
+
+        y = fftn(x, axes=(-1, -2))  # ij_plane
+        assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
+        assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
+        assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
+
+        y = fftn(x, axes=(-1, -3))  # ik_plane
+        assert_array_almost_equal(fftn(ik_plane1),
+                                  swapaxes(y[:, 0, :], -1, -2))
+        assert_array_almost_equal(fftn(ik_plane2),
+                                  swapaxes(y[:, 1, :], -1, -2))
+        assert_array_almost_equal(fftn(ik_plane3),
+                                  swapaxes(y[:, 2, :], -1, -2))
+
+        y = fftn(x, axes=(-2, -3))  # jk_plane
+        assert_array_almost_equal(fftn(jk_plane1),
+                                  swapaxes(y[:, :, 0], -1, -2))
+        assert_array_almost_equal(fftn(jk_plane2),
+                                  swapaxes(y[:, :, 1], -1, -2))
+        assert_array_almost_equal(fftn(jk_plane3),
+                                  swapaxes(y[:, :, 2], -1, -2))
+
+        y = fftn(x, axes=(-1,))  # i_line
+        for i in range(3):
+            for j in range(3):
+                assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
+        y = fftn(x, axes=(-2,))  # j_line
+        for i in range(3):
+            for j in range(3):
+                assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
+        y = fftn(x, axes=(0,))  # k_line
+        for i in range(3):
+            for j in range(3):
+                assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
+
+        y = fftn(x, axes=())  # point
+        assert_array_almost_equal(y, x)
+
+    def test_shape_argument(self):
+        small_x = [[1, 2, 3],
+                   [4, 5, 6]]
+        large_x1 = [[1, 2, 3, 0],
+                    [4, 5, 6, 0],
+                    [0, 0, 0, 0],
+                    [0, 0, 0, 0]]
+
+        y = fftn(small_x, s=(4, 4))
+        assert_array_almost_equal(y, fftn(large_x1))
+
+        y = fftn(small_x, s=(3, 4))
+        assert_array_almost_equal(y, fftn(large_x1[:-1]))
+
+    def test_shape_axes_argument(self):
+        small_x = [[1, 2, 3],
+                   [4, 5, 6],
+                   [7, 8, 9]]
+        large_x1 = array([[1, 2, 3, 0],
+                          [4, 5, 6, 0],
+                          [7, 8, 9, 0],
+                          [0, 0, 0, 0]])
+        y = fftn(small_x, s=(4, 4), axes=(-2, -1))
+        assert_array_almost_equal(y, fftn(large_x1))
+        y = fftn(small_x, s=(4, 4), axes=(-1, -2))
+
+        assert_array_almost_equal(y, swapaxes(
+            fftn(swapaxes(large_x1, -1, -2)), -1, -2))
+
+    def test_shape_axes_argument2(self):
+        # Change shape of the last axis
+        x = numpy.random.random((10, 5, 3, 7))
+        y = fftn(x, axes=(-1,), s=(8,))
+        assert_array_almost_equal(y, fft(x, axis=-1, n=8))
+
+        # Change shape of an arbitrary axis which is not the last one
+        x = numpy.random.random((10, 5, 3, 7))
+        y = fftn(x, axes=(-2,), s=(8,))
+        assert_array_almost_equal(y, fft(x, axis=-2, n=8))
+
+        # Change shape of axes: cf #244, where shape and axes were mixed up
+        x = numpy.random.random((4, 4, 2))
+        y = fftn(x, axes=(-3, -2), s=(8, 8))
+        assert_array_almost_equal(y,
+                                  numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
+
+    def test_shape_argument_more(self):
+        x = zeros((4, 4, 2))
+        with assert_raises(ValueError,
+                           match="shape requires more axes than are present"):
+            fftn(x, s=(8, 8, 2, 1))
+
+    def test_invalid_sizes(self):
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[1, 0\]\) specified"):
+            fftn([[]])
+
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[4, -3\]\) specified"):
+            fftn([[1, 1], [2, 2]], (4, -3))
+
+    def test_no_axes(self):
+        x = numpy.random.random((2,2,2))
+        assert_allclose(fftn(x, axes=[]), x, atol=1e-7)
+
+
+class TestIfftn:
+    dtype = None
+    cdtype = None
+
+    def setup_method(self):
+        np.random.seed(1234)
+
+    @pytest.mark.parametrize('dtype,cdtype,maxnlp',
+                             [(np.float64, np.complex128, 2000),
+                              (np.float32, np.complex64, 3500)])
+    def test_definition(self, dtype, cdtype, maxnlp):
+        x = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]], dtype=dtype)
+        y = ifftn(x)
+        assert_equal(y.dtype, cdtype)
+        assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
+
+        x = random((20, 26))
+        assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+        x = random((5, 4, 3, 20))
+        assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+    @pytest.mark.parametrize('maxnlp', [2000, 3500])
+    @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
+    def test_random_complex(self, maxnlp, size):
+        x = random([size, size]) + 1j*random([size, size])
+        assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
+        assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
+
+    def test_invalid_sizes(self):
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[1, 0\]\) specified"):
+            ifftn([[]])
+
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[4, -3\]\) specified"):
+            ifftn([[1, 1], [2, 2]], (4, -3))
+
+    def test_no_axes(self):
+        x = numpy.random.random((2,2,2))
+        assert_allclose(ifftn(x, axes=[]), x, atol=1e-7)
+
+class TestRfftn:
+    dtype = None
+    cdtype = None
+
+    def setup_method(self):
+        np.random.seed(1234)
+
+    @pytest.mark.parametrize('dtype,cdtype,maxnlp',
+                             [(np.float64, np.complex128, 2000),
+                              (np.float32, np.complex64, 3500)])
+    def test_definition(self, dtype, cdtype, maxnlp):
+        x = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]], dtype=dtype)
+        y = rfftn(x)
+        assert_equal(y.dtype, cdtype)
+        assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp)
+
+        x = random((20, 26))
+        assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
+
+        x = random((5, 4, 3, 20))
+        assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
+
+    @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
+    def test_random(self, size):
+        x = random([size, size])
+        assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10)
+
+    @pytest.mark.parametrize('func', [rfftn, irfftn])
+    def test_invalid_sizes(self, func):
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[1, 0\]\) specified"):
+            func([[]])
+
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[4, -3\]\) specified"):
+            func([[1, 1], [2, 2]], (4, -3))
+
+    @pytest.mark.parametrize('func', [rfftn, irfftn])
+    def test_no_axes(self, func):
+        with assert_raises(ValueError,
+                           match="at least 1 axis must be transformed"):
+            func([], axes=[])
+
+    def test_complex_input(self):
+        with assert_raises(TypeError, match="x must be a real sequence"):
+            rfftn(np.zeros(10, dtype=np.complex64))
+
+
+class FakeArray:
+    def __init__(self, data):
+        self._data = data
+        self.__array_interface__ = data.__array_interface__
+
+
+class FakeArray2:
+    def __init__(self, data):
+        self._data = data
+
+    def __array__(self):
+        return self._data
+
+# TODO: Is this test actually valuable? The behavior it's testing shouldn't be
+# relied upon by users except for overwrite_x = False
+class TestOverwrite:
+    """Check input overwrite behavior of the FFT functions."""
+
+    real_dtypes = [np.float32, np.float64, np.longfloat]
+    dtypes = real_dtypes + [np.complex64, np.complex128, np.longcomplex]
+    fftsizes = [8, 16, 32]
+
+    def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
+        x2 = x.copy()
+        for fake in [lambda x: x, FakeArray, FakeArray2]:
+            routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
+
+            sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+                routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
+            if not should_overwrite:
+                assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+    def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
+                  fftsize, overwrite_x):
+        np.random.seed(1234)
+        if np.issubdtype(dtype, np.complexfloating):
+            data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+        else:
+            data = np.random.randn(*shape)
+        data = data.astype(dtype)
+
+        should_overwrite = (overwrite_x
+                            and dtype in overwritable_dtypes
+                            and fftsize <= shape[axis])
+        self._check(data, routine, fftsize, axis,
+                    overwrite_x=overwrite_x,
+                    should_overwrite=should_overwrite)
+
+    @pytest.mark.parametrize('dtype', dtypes)
+    @pytest.mark.parametrize('fftsize', fftsizes)
+    @pytest.mark.parametrize('overwrite_x', [True, False])
+    @pytest.mark.parametrize('shape,axes', [((16,), -1),
+                                            ((16, 2), 0),
+                                            ((2, 16), 1)])
+    def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
+        overwritable = (np.longcomplex, np.complex128, np.complex64)
+        self._check_1d(fft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+        self._check_1d(ifft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+
+    @pytest.mark.parametrize('dtype', real_dtypes)
+    @pytest.mark.parametrize('fftsize', fftsizes)
+    @pytest.mark.parametrize('overwrite_x', [True, False])
+    @pytest.mark.parametrize('shape,axes', [((16,), -1),
+                                            ((16, 2), 0),
+                                            ((2, 16), 1)])
+    def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
+        overwritable = self.real_dtypes
+        self._check_1d(irfft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+        self._check_1d(rfft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+
+    def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
+                      overwrite_x):
+        np.random.seed(1234)
+        if np.issubdtype(dtype, np.complexfloating):
+            data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+        else:
+            data = np.random.randn(*shape)
+        data = data.astype(dtype)
+
+        def fftshape_iter(shp):
+            if len(shp) <= 0:
+                yield ()
+            else:
+                for j in (shp[0]//2, shp[0], shp[0]*2):
+                    for rest in fftshape_iter(shp[1:]):
+                        yield (j,) + rest
+
+        def part_shape(shape, axes):
+            if axes is None:
+                return shape
+            else:
+                return tuple(np.take(shape, axes))
+
+        def should_overwrite(data, shape, axes):
+            s = part_shape(data.shape, axes)
+            return (overwrite_x and
+                    np.prod(shape) <= np.prod(s)
+                    and dtype in overwritable_dtypes)
+
+        for fftshape in fftshape_iter(part_shape(shape, axes)):
+            self._check(data, routine, fftshape, axes,
+                        overwrite_x=overwrite_x,
+                        should_overwrite=should_overwrite(data, fftshape, axes))
+            if data.ndim > 1:
+                # check fortran order
+                self._check(data.T, routine, fftshape, axes,
+                            overwrite_x=overwrite_x,
+                            should_overwrite=should_overwrite(
+                                data.T, fftshape, axes))
+
+    @pytest.mark.parametrize('dtype', dtypes)
+    @pytest.mark.parametrize('overwrite_x', [True, False])
+    @pytest.mark.parametrize('shape,axes', [((16,), None),
+                                            ((16,), (0,)),
+                                            ((16, 2), (0,)),
+                                            ((2, 16), (1,)),
+                                            ((8, 16), None),
+                                            ((8, 16), (0, 1)),
+                                            ((8, 16, 2), (0, 1)),
+                                            ((8, 16, 2), (1, 2)),
+                                            ((8, 16, 2), (0,)),
+                                            ((8, 16, 2), (1,)),
+                                            ((8, 16, 2), (2,)),
+                                            ((8, 16, 2), None),
+                                            ((8, 16, 2), (0, 1, 2))])
+    def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
+        overwritable = (np.longcomplex, np.complex128, np.complex64)
+        self._check_nd_one(fftn, dtype, shape, axes, overwritable,
+                           overwrite_x)
+        self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
+                           overwrite_x)
+
+
+@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
+                                 rfft, irfft, rfftn, irfftn])
+def test_invalid_norm(func):
+    x = np.arange(10, dtype=float)
+    with assert_raises(ValueError,
+                       match='Invalid norm value \'o\', should be'
+                             ' "backward", "ortho" or "forward"'):
+        func(x, norm='o')
+
+
+@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
+                                   irfft, irfftn, hfft, hfftn])
+def test_swapped_byte_order_complex(func):
+    rng = np.random.RandomState(1234)
+    x = rng.rand(10) + 1j * rng.rand(10)
+    assert_allclose(func(swap_byteorder(x)), func(x))
+
+
+@pytest.mark.parametrize('func', [ihfft, ihfftn, rfft, rfftn])
+def test_swapped_byte_order_real(func):
+    rng = np.random.RandomState(1234)
+    x = rng.rand(10)
+    assert_allclose(func(swap_byteorder(x)), func(x))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_real_transforms.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_real_transforms.py
new file mode 100644
index 00000000..072651dc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_pocketfft/tests/test_real_transforms.py
@@ -0,0 +1,493 @@
+from os.path import join, dirname
+from typing import Callable, Dict, Tuple, Union, Type
+
+import numpy as np
+from numpy.testing import (
+    assert_array_almost_equal, assert_equal, assert_allclose)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.fft._pocketfft.realtransforms import (
+    dct, idct, dst, idst, dctn, idctn, dstn, idstn)
+
+fftpack_test_dir = join(dirname(__file__), '..', '..', '..', 'fftpack', 'tests')
+
+MDATA_COUNT = 8
+FFTWDATA_COUNT = 14
+
+def is_longdouble_binary_compatible():
+    try:
+        one = np.frombuffer(
+            b'\x00\x00\x00\x00\x00\x00\x00\x80\xff\x3f\x00\x00\x00\x00\x00\x00',
+            dtype=' decimal
+dec_map: DecMapType = {
+    # DCT
+    (dct, np.double, 1): 13,
+    (dct, np.float32, 1): 6,
+
+    (dct, np.double, 2): 14,
+    (dct, np.float32, 2): 5,
+
+    (dct, np.double, 3): 14,
+    (dct, np.float32, 3): 5,
+
+    (dct, np.double, 4): 13,
+    (dct, np.float32, 4): 6,
+
+    # IDCT
+    (idct, np.double, 1): 14,
+    (idct, np.float32, 1): 6,
+
+    (idct, np.double, 2): 14,
+    (idct, np.float32, 2): 5,
+
+    (idct, np.double, 3): 14,
+    (idct, np.float32, 3): 5,
+
+    (idct, np.double, 4): 14,
+    (idct, np.float32, 4): 6,
+
+    # DST
+    (dst, np.double, 1): 13,
+    (dst, np.float32, 1): 6,
+
+    (dst, np.double, 2): 14,
+    (dst, np.float32, 2): 6,
+
+    (dst, np.double, 3): 14,
+    (dst, np.float32, 3): 7,
+
+    (dst, np.double, 4): 13,
+    (dst, np.float32, 4): 6,
+
+    # IDST
+    (idst, np.double, 1): 14,
+    (idst, np.float32, 1): 6,
+
+    (idst, np.double, 2): 14,
+    (idst, np.float32, 2): 6,
+
+    (idst, np.double, 3): 14,
+    (idst, np.float32, 3): 6,
+
+    (idst, np.double, 4): 14,
+    (idst, np.float32, 4): 6,
+}
+
+for k,v in dec_map.copy().items():
+    if k[1] == np.double:
+        dec_map[(k[0], np.longdouble, k[2])] = v
+    elif k[1] == np.float32:
+        dec_map[(k[0], int, k[2])] = v
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+class TestDCT:
+    def test_definition(self, rdt, type, fftwdata_size):
+        x, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)
+        y = dct(x, type=type)
+        assert_equal(y.dtype, dt)
+        dec = dec_map[(dct, rdt, type)]
+        assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
+
+    @pytest.mark.parametrize('size', [7, 8, 9, 16, 32, 64])
+    def test_axis(self, rdt, type, size):
+        nt = 2
+        dec = dec_map[(dct, rdt, type)]
+        x = np.random.randn(nt, size)
+        y = dct(x, type=type)
+        for j in range(nt):
+            assert_array_almost_equal(y[j], dct(x[j], type=type),
+                                      decimal=dec)
+
+        x = x.T
+        y = dct(x, axis=0, type=type)
+        for j in range(nt):
+            assert_array_almost_equal(y[:,j], dct(x[:,j], type=type),
+                                      decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct1_definition_ortho(rdt, mdata_x):
+    # Test orthornomal mode.
+    dec = dec_map[(dct, rdt, 1)]
+    x = np.array(mdata_x, dtype=rdt)
+    dt = np.result_type(np.float32, rdt)
+    y = dct(x, norm='ortho', type=1)
+    y2 = naive_dct1(x, norm='ortho')
+    assert_equal(y.dtype, dt)
+    assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct2_definition_matlab(mdata_xy, rdt):
+    # Test correspondence with matlab (orthornomal mode).
+    dt = np.result_type(np.float32, rdt)
+    x = np.array(mdata_xy[0], dtype=dt)
+
+    yr = mdata_xy[1]
+    y = dct(x, norm="ortho", type=2)
+    dec = dec_map[(dct, rdt, 2)]
+    assert_equal(y.dtype, dt)
+    assert_array_almost_equal(y, yr, decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct3_definition_ortho(mdata_x, rdt):
+    # Test orthornomal mode.
+    x = np.array(mdata_x, dtype=rdt)
+    dt = np.result_type(np.float32, rdt)
+    y = dct(x, norm='ortho', type=2)
+    xi = dct(y, norm="ortho", type=3)
+    dec = dec_map[(dct, rdt, 3)]
+    assert_equal(xi.dtype, dt)
+    assert_array_almost_equal(xi, x, decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dct4_definition_ortho(mdata_x, rdt):
+    # Test orthornomal mode.
+    x = np.array(mdata_x, dtype=rdt)
+    dt = np.result_type(np.float32, rdt)
+    y = dct(x, norm='ortho', type=4)
+    y2 = naive_dct4(x, norm='ortho')
+    dec = dec_map[(dct, rdt, 4)]
+    assert_equal(y.dtype, dt)
+    assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+def test_idct_definition(fftwdata_size, rdt, type):
+    xr, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)
+    x = idct(yr, type=type)
+    dec = dec_map[(idct, rdt, type)]
+    assert_equal(x.dtype, dt)
+    assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+def test_definition(fftwdata_size, rdt, type):
+    xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)
+    y = dst(xr, type=type)
+    dec = dec_map[(dst, rdt, type)]
+    assert_equal(y.dtype, dt)
+    assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dst1_definition_ortho(rdt, mdata_x):
+    # Test orthornomal mode.
+    dec = dec_map[(dst, rdt, 1)]
+    x = np.array(mdata_x, dtype=rdt)
+    dt = np.result_type(np.float32, rdt)
+    y = dst(x, norm='ortho', type=1)
+    y2 = naive_dst1(x, norm='ortho')
+    assert_equal(y.dtype, dt)
+    assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+def test_dst4_definition_ortho(rdt, mdata_x):
+    # Test orthornomal mode.
+    dec = dec_map[(dst, rdt, 4)]
+    x = np.array(mdata_x, dtype=rdt)
+    dt = np.result_type(np.float32, rdt)
+    y = dst(x, norm='ortho', type=4)
+    y2 = naive_dst4(x, norm='ortho')
+    assert_equal(y.dtype, dt)
+    assert_array_almost_equal(y, y2, decimal=dec)
+
+
+@pytest.mark.parametrize('rdt', [np.longfloat, np.double, np.float32, int])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+def test_idst_definition(fftwdata_size, rdt, type):
+    xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)
+    x = idst(yr, type=type)
+    dec = dec_map[(idst, rdt, type)]
+    assert_equal(x.dtype, dt)
+    assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
+
+
+@pytest.mark.parametrize('routine', [dct, dst, idct, idst])
+@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longfloat])
+@pytest.mark.parametrize('shape, axis', [
+    ((16,), -1), ((16, 2), 0), ((2, 16), 1)
+])
+@pytest.mark.parametrize('type', [1, 2, 3, 4])
+@pytest.mark.parametrize('overwrite_x', [True, False])
+@pytest.mark.parametrize('norm', [None, 'ortho'])
+def test_overwrite(routine, dtype, shape, axis, type, norm, overwrite_x):
+    # Check input overwrite behavior
+    np.random.seed(1234)
+    if np.issubdtype(dtype, np.complexfloating):
+        x = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+    else:
+        x = np.random.randn(*shape)
+    x = x.astype(dtype)
+    x2 = x.copy()
+    routine(x2, type, None, axis, norm, overwrite_x=overwrite_x)
+
+    sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+        routine.__name__, x.dtype, x.shape, None, axis, overwrite_x)
+    if not overwrite_x:
+        assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+
+class Test_DCTN_IDCTN:
+    dec = 14
+    dct_type = [1, 2, 3, 4]
+    norms = [None, 'backward', 'ortho', 'forward']
+    rstate = np.random.RandomState(1234)
+    shape = (32, 16)
+    data = rstate.randn(*shape)
+
+    @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+                                                   (dstn, idstn)])
+    @pytest.mark.parametrize('axes', [None,
+                                      1, (1,), [1],
+                                      0, (0,), [0],
+                                      (0, 1), [0, 1],
+                                      (-2, -1), [-2, -1]])
+    @pytest.mark.parametrize('dct_type', dct_type)
+    @pytest.mark.parametrize('norm', ['ortho'])
+    def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
+        tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
+        tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
+        assert_array_almost_equal(self.data, tmp, decimal=12)
+
+    @pytest.mark.parametrize('funcn,func', [(dctn, dct), (dstn, dst)])
+    @pytest.mark.parametrize('dct_type', dct_type)
+    @pytest.mark.parametrize('norm', norms)
+    def test_dctn_vs_2d_reference(self, funcn, func, dct_type, norm):
+        y1 = funcn(self.data, type=dct_type, axes=None, norm=norm)
+        y2 = ref_2d(func, self.data, type=dct_type, norm=norm)
+        assert_array_almost_equal(y1, y2, decimal=11)
+
+    @pytest.mark.parametrize('funcn,func', [(idctn, idct), (idstn, idst)])
+    @pytest.mark.parametrize('dct_type', dct_type)
+    @pytest.mark.parametrize('norm', norms)
+    def test_idctn_vs_2d_reference(self, funcn, func, dct_type, norm):
+        fdata = dctn(self.data, type=dct_type, norm=norm)
+        y1 = funcn(fdata, type=dct_type, norm=norm)
+        y2 = ref_2d(func, fdata, type=dct_type, norm=norm)
+        assert_array_almost_equal(y1, y2, decimal=11)
+
+    @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+                                                   (dstn, idstn)])
+    def test_axes_and_shape(self, fforward, finverse):
+        with assert_raises(ValueError,
+                           match="when given, axes and shape arguments"
+                           " have to be of the same length"):
+            fforward(self.data, s=self.data.shape[0], axes=(0, 1))
+
+        with assert_raises(ValueError,
+                           match="when given, axes and shape arguments"
+                           " have to be of the same length"):
+            fforward(self.data, s=self.data.shape, axes=0)
+
+    @pytest.mark.parametrize('fforward', [dctn, dstn])
+    def test_shape(self, fforward):
+        tmp = fforward(self.data, s=(128, 128), axes=None)
+        assert_equal(tmp.shape, (128, 128))
+
+    @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+                                                   (dstn, idstn)])
+    @pytest.mark.parametrize('axes', [1, (1,), [1],
+                                      0, (0,), [0]])
+    def test_shape_is_none_with_axes(self, fforward, finverse, axes):
+        tmp = fforward(self.data, s=None, axes=axes, norm='ortho')
+        tmp = finverse(tmp, s=None, axes=axes, norm='ortho')
+        assert_array_almost_equal(self.data, tmp, decimal=self.dec)
+
+
+@pytest.mark.parametrize('func', [dct, dctn, idct, idctn,
+                                  dst, dstn, idst, idstn])
+def test_swapped_byte_order(func):
+    rng = np.random.RandomState(1234)
+    x = rng.rand(10)
+    swapped_dt = x.dtype.newbyteorder('S')
+    assert_allclose(func(x.astype(swapped_dt)), func(x))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/_realtransforms.py b/__packaged__/coreml/.python_dependencies/scipy/fft/_realtransforms.py
new file mode 100644
index 00000000..d61a6226
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/_realtransforms.py
@@ -0,0 +1,693 @@
+from ._basic import _dispatch
+from scipy._lib.uarray import Dispatchable
+import numpy as np
+
+__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
+
+
+@_dispatch
+def dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+         workers=None, *, orthogonalize=None):
+    """
+    Return multidimensional Discrete Cosine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    s : int or array_like of ints or None, optional
+        The shape of the result. If both `s` and `axes` (see below) are None,
+        `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
+        ``numpy.take(x.shape, axes, axis=0)``.
+        If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+        ``s[i]``.
+        If any element of `s` is -1, the size of the corresponding dimension of
+        `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes over which the DCT is computed. If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized DCT variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    idctn : Inverse multidimensional DCT
+
+    Notes
+    -----
+    For full details of the DCT types and normalization modes, as well as
+    references, see `dct`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fft import dctn, idctn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idctn(dctn(y)))
+    True
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+          workers=None, orthogonalize=None):
+    """
+    Return multidimensional Inverse Discrete Cosine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    s : int or array_like of ints or None, optional
+        The shape of the result.  If both `s` and `axes` (see below) are
+        None, `s` is ``x.shape``; if `s` is None but `axes` is
+        not None, then `s` is ``numpy.take(x.shape, axes, axis=0)``.
+        If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+        ``s[i]``.
+        If any element of `s` is -1, the size of the corresponding dimension of
+        `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes over which the IDCT is computed. If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized IDCT variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dctn : multidimensional DCT
+
+    Notes
+    -----
+    For full details of the IDCT types and normalization modes, as well as
+    references, see `idct`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fft import dctn, idctn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idctn(dctn(y)))
+    True
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+         workers=None, orthogonalize=None):
+    """
+    Return multidimensional Discrete Sine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    s : int or array_like of ints or None, optional
+        The shape of the result.  If both `s` and `axes` (see below) are None,
+        `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
+        ``numpy.take(x.shape, axes, axis=0)``.
+        If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+        ``s[i]``.
+        If any element of `shape` is -1, the size of the corresponding dimension
+        of `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes over which the DST is computed. If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized DST variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    idstn : Inverse multidimensional DST
+
+    Notes
+    -----
+    For full details of the DST types and normalization modes, as well as
+    references, see `dst`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fft import dstn, idstn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idstn(dstn(y)))
+    True
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
+          workers=None, orthogonalize=None):
+    """
+    Return multidimensional Inverse Discrete Sine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    s : int or array_like of ints or None, optional
+        The shape of the result.  If both `s` and `axes` (see below) are None,
+        `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
+        ``numpy.take(x.shape, axes, axis=0)``.
+        If ``s[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``s[i] < x.shape[i]``, the ith dimension is truncated to length
+        ``s[i]``.
+        If any element of `s` is -1, the size of the corresponding dimension of
+        `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes over which the IDST is computed. If not given, the last ``len(s)``
+        axes are used, or all axes if `s` is also not specified.
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized IDST variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dstn : multidimensional DST
+
+    Notes
+    -----
+    For full details of the IDST types and normalization modes, as well as
+    references, see `idst`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fft import dstn, idstn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idstn(dstn(y)))
+    True
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None,
+        orthogonalize=None):
+    r"""Return the Discrete Cosine Transform of arbitrary type sequence x.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform.  If ``n < x.shape[axis]``, `x` is
+        truncated.  If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the dct is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized DCT variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    idct : Inverse DCT
+
+    Notes
+    -----
+    For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
+    MATLAB ``dct(x)``.
+
+    .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct
+                 correspondence with the direct Fourier transform. To recover
+                 it you must specify ``orthogonalize=False``.
+
+    For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
+    overall factor in both directions. By default, the transform is also
+    orthogonalized which for types 1, 2 and 3 means the transform definition is
+    modified to give orthogonality of the DCT matrix (see below).
+
+    For ``norm="backward"``, there is no scaling on `dct` and the `idct` is
+    scaled by ``1/N`` where ``N`` is the "logical" size of the DCT. For
+    ``norm="forward"`` the ``1/N`` normalization is applied to the forward
+    `dct` instead and the `idct` is unnormalized.
+
+    There are, theoretically, 8 types of the DCT, only the first 4 types are
+    implemented in SciPy.'The' DCT generally refers to DCT type 2, and 'the'
+    Inverse DCT generally refers to DCT type 3.
+
+    **Type I**
+
+    There are several definitions of the DCT-I; we use the following
+    (for ``norm="backward"``)
+
+    .. math::
+
+       y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
+       \frac{\pi k n}{N-1} \right)
+
+    If ``orthogonalize=True``, ``x[0]`` and ``x[N-1]`` are multiplied by a
+    scaling factor of :math:`\sqrt{2}`, and ``y[0]`` and ``y[N-1]`` are divided
+    by :math:`\sqrt{2}`. When combined with ``norm="ortho"``, this makes the
+    corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``).
+
+    .. note::
+       The DCT-I is only supported for input size > 1.
+
+    **Type II**
+
+    There are several definitions of the DCT-II; we use the following
+    (for ``norm="backward"``)
+
+    .. math::
+
+       y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
+
+    If ``orthogonalize=True``, ``y[0]`` is divided by :math:`\sqrt{2}` which,
+    when combined with ``norm="ortho"``, makes the corresponding matrix of
+    coefficients orthonormal (``O @ O.T = np.eye(N)``).
+
+    **Type III**
+
+    There are several definitions, we use the following (for
+    ``norm="backward"``)
+
+    .. math::
+
+       y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
+
+    If ``orthogonalize=True``, ``x[0]`` terms are multiplied by
+    :math:`\sqrt{2}` which, when combined with ``norm="ortho"``, makes the
+    corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``).
+
+    The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
+    to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
+    the orthonormalized DCT-II.
+
+    **Type IV**
+
+    There are several definitions of the DCT-IV; we use the following
+    (for ``norm="backward"``)
+
+    .. math::
+
+       y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
+
+    ``orthogonalize`` has no effect here, as the DCT-IV matrix is already
+    orthogonal up to a scale factor of ``2N``.
+
+    References
+    ----------
+    .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
+           Makhoul, `IEEE Transactions on acoustics, speech and signal
+           processing` vol. 28(1), pp. 27-34,
+           :doi:`10.1109/TASSP.1980.1163351` (1980).
+    .. [2] Wikipedia, "Discrete cosine transform",
+           https://en.wikipedia.org/wiki/Discrete_cosine_transform
+
+    Examples
+    --------
+    The Type 1 DCT is equivalent to the FFT (though faster) for real,
+    even-symmetrical inputs. The output is also real and even-symmetrical.
+    Half of the FFT input is used to generate half of the FFT output:
+
+    >>> from scipy.fft import fft, dct
+    >>> import numpy as np
+    >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
+    array([ 30.,  -8.,   6.,  -2.,   6.,  -8.])
+    >>> dct(np.array([4., 3., 5., 10.]), 1)
+    array([ 30.,  -8.,   6.,  -2.])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
+         workers=None, orthogonalize=None):
+    """
+    Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform.  If ``n < x.shape[axis]``, `x` is
+        truncated.  If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the idct is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized IDCT variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    idct : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dct : Forward DCT
+
+    Notes
+    -----
+    For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
+    MATLAB ``idct(x)``.
+
+    .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct
+                 correspondence with the inverse direct Fourier transform. To
+                 recover it you must specify ``orthogonalize=False``.
+
+    For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
+    overall factor in both directions. By default, the transform is also
+    orthogonalized which for types 1, 2 and 3 means the transform definition is
+    modified to give orthogonality of the IDCT matrix (see `dct` for the full
+    definitions).
+
+    'The' IDCT is the IDCT-II, which is the same as the normalized DCT-III.
+
+    The IDCT is equivalent to a normal DCT except for the normalization and
+    type. DCT type 1 and 4 are their own inverse and DCTs 2 and 3 are each
+    other's inverses.
+
+    Examples
+    --------
+    The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
+    inputs. The output is also real and even-symmetrical. Half of the IFFT
+    input is used to generate half of the IFFT output:
+
+    >>> from scipy.fft import ifft, idct
+    >>> import numpy as np
+    >>> ifft(np.array([ 30.,  -8.,   6.,  -2.,   6.,  -8.])).real
+    array([  4.,   3.,   5.,  10.,   5.,   3.])
+    >>> idct(np.array([ 30.,  -8.,   6.,  -2.]), 1)
+    array([  4.,   3.,   5.,  10.])
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None,
+        orthogonalize=None):
+    r"""
+    Return the Discrete Sine Transform of arbitrary type sequence x.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform. If ``n < x.shape[axis]``, `x` is
+        truncated.  If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the dst is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized DST variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    dst : ndarray of reals
+        The transformed input array.
+
+    See Also
+    --------
+    idst : Inverse DST
+
+    Notes
+    -----
+    .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct
+                 correspondence with the direct Fourier transform. To recover
+                 it you must specify ``orthogonalize=False``.
+
+    For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same
+    overall factor in both directions. By default, the transform is also
+    orthogonalized which for types 2 and 3 means the transform definition is
+    modified to give orthogonality of the DST matrix (see below).
+
+    For ``norm="backward"``, there is no scaling on the `dst` and the `idst` is
+    scaled by ``1/N`` where ``N`` is the "logical" size of the DST.
+
+    There are, theoretically, 8 types of the DST for different combinations of
+    even/odd boundary conditions and boundary off sets [1]_, only the first
+    4 types are implemented in SciPy.
+
+    **Type I**
+
+    There are several definitions of the DST-I; we use the following for
+    ``norm="backward"``. DST-I assumes the input is odd around :math:`n=-1` and
+    :math:`n=N`.
+
+    .. math::
+
+        y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
+
+    Note that the DST-I is only supported for input size > 1.
+    The (unnormalized) DST-I is its own inverse, up to a factor :math:`2(N+1)`.
+    The orthonormalized DST-I is exactly its own inverse.
+
+    ``orthogonalize`` has no effect here, as the DST-I matrix is already
+    orthogonal up to a scale factor of ``2N``.
+
+    **Type II**
+
+    There are several definitions of the DST-II; we use the following for
+    ``norm="backward"``. DST-II assumes the input is odd around :math:`n=-1/2` and
+    :math:`n=N-1/2`; the output is odd around :math:`k=-1` and even around :math:`k=N-1`
+
+    .. math::
+
+        y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
+
+    If ``orthogonalize=True``, ``y[0]`` is divided :math:`\sqrt{2}` which, when
+    combined with ``norm="ortho"``, makes the corresponding matrix of
+    coefficients orthonormal (``O @ O.T = np.eye(N)``).
+
+    **Type III**
+
+    There are several definitions of the DST-III, we use the following (for
+    ``norm="backward"``). DST-III assumes the input is odd around :math:`n=-1` and
+    even around :math:`n=N-1`
+
+    .. math::
+
+        y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
+        \frac{\pi(2k+1)(n+1)}{2N}\right)
+
+    If ``orthogonalize=True``, ``x[0]`` is multiplied by :math:`\sqrt{2}`
+    which, when combined with ``norm="ortho"``, makes the corresponding matrix
+    of coefficients orthonormal (``O @ O.T = np.eye(N)``).
+
+    The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
+    to a factor :math:`2N`. The orthonormalized DST-III is exactly the inverse of the
+    orthonormalized DST-II.
+
+    **Type IV**
+
+    There are several definitions of the DST-IV, we use the following (for
+    ``norm="backward"``). DST-IV assumes the input is odd around :math:`n=-0.5` and
+    even around :math:`n=N-0.5`
+
+    .. math::
+
+        y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
+
+    ``orthogonalize`` has no effect here, as the DST-IV matrix is already
+    orthogonal up to a scale factor of ``2N``.
+
+    The (unnormalized) DST-IV is its own inverse, up to a factor :math:`2N`. The
+    orthonormalized DST-IV is exactly its own inverse.
+
+    References
+    ----------
+    .. [1] Wikipedia, "Discrete sine transform",
+           https://en.wikipedia.org/wiki/Discrete_sine_transform
+
+    """
+    return (Dispatchable(x, np.ndarray),)
+
+
+@_dispatch
+def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
+         workers=None, orthogonalize=None):
+    """
+    Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform. If ``n < x.shape[axis]``, `x` is
+        truncated.  If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the idst is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {"backward", "ortho", "forward"}, optional
+        Normalization mode (see Notes). Default is "backward".
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+    workers : int, optional
+        Maximum number of workers to use for parallel computation. If negative,
+        the value wraps around from ``os.cpu_count()``.
+        See :func:`~scipy.fft.fft` for more details.
+    orthogonalize : bool, optional
+        Whether to use the orthogonalized IDST variant (see Notes).
+        Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    idst : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dst : Forward DST
+
+    Notes
+    -----
+    .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct
+                 correspondence with the inverse direct Fourier transform.
+
+    For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same
+    overall factor in both directions. By default, the transform is also
+    orthogonalized which for types 2 and 3 means the transform definition is
+    modified to give orthogonality of the DST matrix (see `dst` for the full
+    definitions).
+
+    'The' IDST is the IDST-II, which is the same as the normalized DST-III.
+
+    The IDST is equivalent to a normal DST except for the normalization and
+    type. DST type 1 and 4 are their own inverse and DSTs 2 and 3 are each
+    other's inverses.
+
+    """
+    return (Dispatchable(x, np.ndarray),)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/mock_backend.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/mock_backend.py
new file mode 100644
index 00000000..fa231e87
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/mock_backend.py
@@ -0,0 +1,59 @@
+import numpy as np
+
+class _MockFunction:
+    def __init__(self, return_value = None):
+        self.number_calls = 0
+        self.return_value = return_value
+        self.last_args = ([], {})
+
+    def __call__(self, *args, **kwargs):
+        self.number_calls += 1
+        self.last_args = (args, kwargs)
+        return self.return_value
+
+
+fft = _MockFunction(np.random.random(10))
+fft2 = _MockFunction(np.random.random(10))
+fftn = _MockFunction(np.random.random(10))
+
+ifft = _MockFunction(np.random.random(10))
+ifft2 = _MockFunction(np.random.random(10))
+ifftn = _MockFunction(np.random.random(10))
+
+rfft = _MockFunction(np.random.random(10))
+rfft2 = _MockFunction(np.random.random(10))
+rfftn = _MockFunction(np.random.random(10))
+
+irfft = _MockFunction(np.random.random(10))
+irfft2 = _MockFunction(np.random.random(10))
+irfftn = _MockFunction(np.random.random(10))
+
+hfft = _MockFunction(np.random.random(10))
+hfft2 = _MockFunction(np.random.random(10))
+hfftn = _MockFunction(np.random.random(10))
+
+ihfft = _MockFunction(np.random.random(10))
+ihfft2 = _MockFunction(np.random.random(10))
+ihfftn = _MockFunction(np.random.random(10))
+
+dct = _MockFunction(np.random.random(10))
+idct = _MockFunction(np.random.random(10))
+dctn = _MockFunction(np.random.random(10))
+idctn = _MockFunction(np.random.random(10))
+
+dst = _MockFunction(np.random.random(10))
+idst = _MockFunction(np.random.random(10))
+dstn = _MockFunction(np.random.random(10))
+idstn = _MockFunction(np.random.random(10))
+
+fht = _MockFunction(np.random.random(10))
+ifht = _MockFunction(np.random.random(10))
+
+
+__ua_domain__ = "numpy.scipy.fft"
+
+
+def __ua_function__(method, args, kwargs):
+    fn = globals().get(method.__name__)
+    return (fn(*args, **kwargs) if fn is not None
+            else NotImplemented)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_backend.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_backend.py
new file mode 100644
index 00000000..64140c1d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_backend.py
@@ -0,0 +1,98 @@
+from functools import partial
+
+import numpy as np
+import scipy.fft
+from scipy.fft import _fftlog, _pocketfft, set_backend
+from scipy.fft.tests import mock_backend
+
+from numpy.testing import assert_allclose, assert_equal
+import pytest
+
+fnames = ('fft', 'fft2', 'fftn',
+          'ifft', 'ifft2', 'ifftn',
+          'rfft', 'rfft2', 'rfftn',
+          'irfft', 'irfft2', 'irfftn',
+          'dct', 'idct', 'dctn', 'idctn',
+          'dst', 'idst', 'dstn', 'idstn',
+          'fht', 'ifht')
+
+np_funcs = (np.fft.fft, np.fft.fft2, np.fft.fftn,
+            np.fft.ifft, np.fft.ifft2, np.fft.ifftn,
+            np.fft.rfft, np.fft.rfft2, np.fft.rfftn,
+            np.fft.irfft, np.fft.irfft2, np.fft.irfftn,
+            np.fft.hfft, _pocketfft.hfft2, _pocketfft.hfftn,  # np has no hfftn
+            np.fft.ihfft, _pocketfft.ihfft2, _pocketfft.ihfftn,
+            _pocketfft.dct, _pocketfft.idct, _pocketfft.dctn, _pocketfft.idctn,
+            _pocketfft.dst, _pocketfft.idst, _pocketfft.dstn, _pocketfft.idstn,
+            # must provide required kwargs for fht, ifht
+            partial(_fftlog.fht, dln=2, mu=0.5),
+            partial(_fftlog.ifht, dln=2, mu=0.5))
+
+funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
+         scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
+         scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
+         scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
+         scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
+         scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn,
+         scipy.fft.dct, scipy.fft.idct, scipy.fft.dctn, scipy.fft.idctn,
+         scipy.fft.dst, scipy.fft.idst, scipy.fft.dstn, scipy.fft.idstn,
+         # must provide required kwargs for fht, ifht
+         partial(scipy.fft.fht, dln=2, mu=0.5),
+         partial(scipy.fft.ifht, dln=2, mu=0.5))
+
+mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
+         mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
+         mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
+         mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
+         mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
+         mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn,
+         mock_backend.dct, mock_backend.idct,
+         mock_backend.dctn, mock_backend.idctn,
+         mock_backend.dst, mock_backend.idst,
+         mock_backend.dstn, mock_backend.idstn,
+         mock_backend.fht, mock_backend.ifht)
+
+
+@pytest.mark.parametrize("func, np_func, mock", zip(funcs, np_funcs, mocks))
+def test_backend_call(func, np_func, mock):
+    x = np.arange(20).reshape((10,2))
+    answer = np_func(x)
+    assert_allclose(func(x), answer, atol=1e-10)
+
+    with set_backend(mock_backend, only=True):
+        mock.number_calls = 0
+        y = func(x)
+        assert_equal(y, mock.return_value)
+        assert_equal(mock.number_calls, 1)
+
+    assert_allclose(func(x), answer, atol=1e-10)
+
+
+plan_funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
+              scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
+              scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
+              scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
+              scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
+              scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn)
+
+plan_mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
+              mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
+              mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
+              mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
+              mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
+              mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn)
+
+
+@pytest.mark.parametrize("func, mock", zip(plan_funcs, plan_mocks))
+def test_backend_plan(func, mock):
+    x = np.arange(20).reshape((10, 2))
+
+    with pytest.raises(NotImplementedError, match='precomputed plan'):
+        func(x, plan='foo')
+
+    with set_backend(mock_backend, only=True):
+        mock.number_calls = 0
+        y = func(x, plan='foo')
+        assert_equal(y, mock.return_value)
+        assert_equal(mock.number_calls, 1)
+        assert_equal(mock.last_args[1]['plan'], 'foo')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fft_function.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fft_function.py
new file mode 100644
index 00000000..19d60d26
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fft_function.py
@@ -0,0 +1,43 @@
+import numpy as np
+import subprocess
+import sys
+
+TEST_BODY = r"""
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose
+import scipy
+import sys
+import pytest
+
+np.random.seed(1234)
+x = np.random.randn(10) + 1j * np.random.randn(10)
+X = np.fft.fft(x)
+# Callable before scipy.fft is imported
+with pytest.deprecated_call(match=r'2\.0\.0'):
+    y = scipy.ifft(X)
+assert_allclose(y, x)
+
+# Callable after scipy.fft is imported
+import scipy.fft
+with pytest.deprecated_call(match=r'2\.0\.0'):
+    y = scipy.ifft(X)
+assert_allclose(y, x)
+
+"""
+
+def test_fft_function():
+    # Historically, scipy.fft was an alias for numpy.fft.fft
+    # Ensure there are no conflicts with the FFT module (gh-10253)
+
+    # Test must run in a subprocess so scipy.fft is not already imported
+    subprocess.check_call([sys.executable, '-c', TEST_BODY])
+
+    # scipy.fft is the correct module
+    from scipy import fft
+    assert not callable(fft)
+    assert fft.__name__ == 'scipy.fft'
+
+    from scipy import ifft
+    assert ifft.__wrapped__ is np.fft.ifft
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fftlog.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fftlog.py
new file mode 100644
index 00000000..037d0813
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_fftlog.py
@@ -0,0 +1,161 @@
+import warnings
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+
+from scipy.fft._fftlog import fht, ifht, fhtoffset
+from scipy.special import poch
+
+
+def test_fht_agrees_with_fftlog():
+    # check that fht numerically agrees with the output from Fortran FFTLog,
+    # the results were generated with the provided `fftlogtest` program,
+    # after fixing how the k array is generated (divide range by n-1, not n)
+
+    # test function, analytical Hankel transform is of the same form
+    def f(r, mu):
+        return r**(mu+1)*np.exp(-r**2/2)
+
+    r = np.logspace(-4, 4, 16)
+
+    dln = np.log(r[1]/r[0])
+    mu = 0.3
+    offset = 0.0
+    bias = 0.0
+
+    a = f(r, mu)
+
+    # test 1: compute as given
+    ours = fht(a, dln, mu, offset=offset, bias=bias)
+    theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02,
+              -0.1949518286432330E-02, +0.3789220182554077E-02,
+              +0.5093959119952945E-03, +0.2785387803618774E-01,
+              +0.9944952700848897E-01, +0.4599202164586588E+00,
+              +0.3157462160881342E+00, -0.8201236844404755E-03,
+              -0.7834031308271878E-03, +0.3931444945110708E-03,
+              -0.2697710625194777E-03, +0.3568398050238820E-03,
+              -0.5554454827797206E-03, +0.8286331026468585E-03]
+    assert_allclose(ours, theirs)
+
+    # test 2: change to optimal offset
+    offset = fhtoffset(dln, mu, bias=bias)
+    ours = fht(a, dln, mu, offset=offset, bias=bias)
+    theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05,
+              +0.3150140927838524E-03, +0.9149121960963704E-03,
+              +0.5808089753959363E-02, +0.2548065256377240E-01,
+              +0.1339477692089897E+00, +0.4821530509479356E+00,
+              +0.2659899781579785E+00, -0.1116475278448113E-01,
+              +0.1791441617592385E-02, -0.4181810476548056E-03,
+              +0.1314963536765343E-03, -0.5422057743066297E-04,
+              +0.3208681804170443E-04, -0.2696849476008234E-04]
+    assert_allclose(ours, theirs)
+
+    # test 3: positive bias
+    bias = 0.8
+    offset = fhtoffset(dln, mu, bias=bias)
+    ours = fht(a, dln, mu, offset=offset, bias=bias)
+    theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00,
+              +0.1065374386206564E+00, -0.5121739602708132E-01,
+              +0.2636649319269470E-01, +0.1697209218849693E-01,
+              +0.1250215614723183E+00, +0.4739583261486729E+00,
+              +0.2841149874912028E+00, -0.8312764741645729E-02,
+              +0.1024233505508988E-02, -0.1644902767389120E-03,
+              +0.3305775476926270E-04, -0.7786993194882709E-05,
+              +0.1962258449520547E-05, -0.8977895734909250E-06]
+    assert_allclose(ours, theirs)
+
+    # test 4: negative bias
+    bias = -0.8
+    offset = fhtoffset(dln, mu, bias=bias)
+    ours = fht(a, dln, mu, offset=offset, bias=bias)
+    theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04,
+              +0.2123969254700955E-03, +0.1009558244834628E-02,
+              +0.5131386375222176E-02, +0.2461678673516286E-01,
+              +0.1235812845384476E+00, +0.4719570096404403E+00,
+              +0.2893487490631317E+00, -0.1686570611318716E-01,
+              +0.2231398155172505E-01, -0.1480742256379873E-01,
+              +0.1692387813500801E+00, +0.3097490354365797E+00,
+              +2.7593607182401860E+00, 10.5251075070045800E+00]
+    assert_allclose(ours, theirs)
+
+
+@pytest.mark.parametrize('optimal', [True, False])
+@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0])
+@pytest.mark.parametrize('bias', [0, 0.1, -0.1])
+@pytest.mark.parametrize('n', [64, 63])
+def test_fht_identity(n, bias, offset, optimal):
+    rng = np.random.RandomState(3491349965)
+
+    a = rng.standard_normal(n)
+    dln = rng.uniform(-1, 1)
+    mu = rng.uniform(-2, 2)
+
+    if optimal:
+        offset = fhtoffset(dln, mu, initial=offset, bias=bias)
+
+    A = fht(a, dln, mu, offset=offset, bias=bias)
+    a_ = ifht(A, dln, mu, offset=offset, bias=bias)
+
+    assert_allclose(a, a_)
+
+
+def test_fht_special_cases():
+    rng = np.random.RandomState(3491349965)
+
+    a = rng.standard_normal(64)
+    dln = rng.uniform(-1, 1)
+
+    # let xp = (mu+1+q)/2, xm = (mu+1-q)/2, M = {0, -1, -2, ...}
+
+    # case 1: xp in M, xm in M => well-defined transform
+    mu, bias = -4.0, 1.0
+    with warnings.catch_warnings(record=True) as record:
+        fht(a, dln, mu, bias=bias)
+        assert not record, 'fht warned about a well-defined transform'
+
+    # case 2: xp not in M, xm in M => well-defined transform
+    mu, bias = -2.5, 0.5
+    with warnings.catch_warnings(record=True) as record:
+        fht(a, dln, mu, bias=bias)
+        assert not record, 'fht warned about a well-defined transform'
+
+    # case 3: xp in M, xm not in M => singular transform
+    mu, bias = -3.5, 0.5
+    with pytest.warns(Warning) as record:
+        fht(a, dln, mu, bias=bias)
+        assert record, 'fht did not warn about a singular transform'
+
+    # case 4: xp not in M, xm in M => singular inverse transform
+    mu, bias = -2.5, 0.5
+    with pytest.warns(Warning) as record:
+        ifht(a, dln, mu, bias=bias)
+        assert record, 'ifht did not warn about a singular transform'
+
+
+@pytest.mark.parametrize('n', [64, 63])
+def test_fht_exact(n):
+    rng = np.random.RandomState(3491349965)
+
+    # for a(r) a power law r^\gamma, the fast Hankel transform produces the
+    # exact continuous Hankel transform if biased with q = \gamma
+
+    mu = rng.uniform(0, 3)
+
+    # convergence of HT: -1-mu < gamma < 1/2
+    gamma = rng.uniform(-1-mu, 1/2)
+
+    r = np.logspace(-2, 2, n)
+    a = r**gamma
+
+    dln = np.log(r[1]/r[0])
+
+    offset = fhtoffset(dln, mu, initial=0.0, bias=gamma)
+
+    A = fht(a, dln, mu, offset=offset, bias=gamma)
+
+    k = np.exp(offset)/r[::-1]
+
+    # analytical result
+    At = (2/k)**gamma * poch((mu+1-gamma)/2, gamma)
+
+    assert_allclose(A, At)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_helper.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_helper.py
new file mode 100644
index 00000000..26f11b5e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_helper.py
@@ -0,0 +1,300 @@
+from scipy.fft._helper import next_fast_len, _init_nd_shape_and_axes
+from numpy.testing import assert_equal, assert_array_equal
+from pytest import raises as assert_raises
+import pytest
+import numpy as np
+import sys
+
+_5_smooth_numbers = [
+    2, 3, 4, 5, 6, 8, 9, 10,
+    2 * 3 * 5,
+    2**3 * 3**5,
+    2**3 * 3**3 * 5**2,
+]
+
+def test_next_fast_len():
+    for n in _5_smooth_numbers:
+        assert_equal(next_fast_len(n), n)
+
+
+def _assert_n_smooth(x, n):
+    x_orig = x
+    if n < 2:
+        assert False
+
+    while True:
+        q, r = divmod(x, 2)
+        if r != 0:
+            break
+        x = q
+
+    for d in range(3, n+1, 2):
+        while True:
+            q, r = divmod(x, d)
+            if r != 0:
+                break
+            x = q
+
+    assert x == 1, \
+           'x={} is not {}-smooth, remainder={}'.format(x_orig, n, x)
+
+
+class TestNextFastLen:
+
+    def test_next_fast_len(self):
+        np.random.seed(1234)
+
+        def nums():
+            yield from range(1, 1000)
+            yield 2**5 * 3**5 * 4**5 + 1
+
+        for n in nums():
+            m = next_fast_len(n)
+            _assert_n_smooth(m, 11)
+            assert m == next_fast_len(n, False)
+
+            m = next_fast_len(n, True)
+            _assert_n_smooth(m, 5)
+
+    def test_np_integers(self):
+        ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]
+        for ityp in ITYPES:
+            x = ityp(12345)
+            testN = next_fast_len(x)
+            assert_equal(testN, next_fast_len(int(x)))
+
+    def testnext_fast_len_small(self):
+        hams = {
+            1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15,
+            16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000
+        }
+        for x, y in hams.items():
+            assert_equal(next_fast_len(x, True), y)
+
+    @pytest.mark.xfail(sys.maxsize < 2**32,
+                       reason="Hamming Numbers too large for 32-bit",
+                       raises=ValueError, strict=True)
+    def testnext_fast_len_big(self):
+        hams = {
+            510183360: 510183360, 510183360 + 1: 512000000,
+            511000000: 512000000,
+            854296875: 854296875, 854296875 + 1: 859963392,
+            196608000000: 196608000000, 196608000000 + 1: 196830000000,
+            8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208,
+            206391214080000: 206391214080000,
+            206391214080000 + 1: 206624260800000,
+            470184984576000: 470184984576000,
+            470184984576000 + 1: 470715894135000,
+            7222041363087360: 7222041363087360,
+            7222041363087360 + 1: 7230196133913600,
+            # power of 5    5**23
+            11920928955078125: 11920928955078125,
+            11920928955078125 - 1: 11920928955078125,
+            # power of 3    3**34
+            16677181699666569: 16677181699666569,
+            16677181699666569 - 1: 16677181699666569,
+            # power of 2   2**54
+            18014398509481984: 18014398509481984,
+            18014398509481984 - 1: 18014398509481984,
+            # above this, int(ceil(n)) == int(ceil(n+1))
+            19200000000000000: 19200000000000000,
+            19200000000000000 + 1: 19221679687500000,
+            288230376151711744: 288230376151711744,
+            288230376151711744 + 1: 288325195312500000,
+            288325195312500000 - 1: 288325195312500000,
+            288325195312500000: 288325195312500000,
+            288325195312500000 + 1: 288555831593533440,
+        }
+        for x, y in hams.items():
+            assert_equal(next_fast_len(x, True), y)
+
+    def test_keyword_args(self):
+        assert next_fast_len(11, real=True) == 12
+        assert next_fast_len(target=7, real=False) == 7
+
+
+class Test_init_nd_shape_and_axes:
+
+    def test_py_0d_defaults(self):
+        x = np.array(4)
+        shape = None
+        axes = None
+
+        shape_expected = np.array([])
+        axes_expected = np.array([])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_np_0d_defaults(self):
+        x = np.array(7.)
+        shape = None
+        axes = None
+
+        shape_expected = np.array([])
+        axes_expected = np.array([])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_py_1d_defaults(self):
+        x = np.array([1, 2, 3])
+        shape = None
+        axes = None
+
+        shape_expected = np.array([3])
+        axes_expected = np.array([0])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_np_1d_defaults(self):
+        x = np.arange(0, 1, .1)
+        shape = None
+        axes = None
+
+        shape_expected = np.array([10])
+        axes_expected = np.array([0])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_py_2d_defaults(self):
+        x = np.array([[1, 2, 3, 4],
+                      [5, 6, 7, 8]])
+        shape = None
+        axes = None
+
+        shape_expected = np.array([2, 4])
+        axes_expected = np.array([0, 1])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_np_2d_defaults(self):
+        x = np.arange(0, 1, .1).reshape(5, 2)
+        shape = None
+        axes = None
+
+        shape_expected = np.array([5, 2])
+        axes_expected = np.array([0, 1])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_np_5d_defaults(self):
+        x = np.zeros([6, 2, 5, 3, 4])
+        shape = None
+        axes = None
+
+        shape_expected = np.array([6, 2, 5, 3, 4])
+        axes_expected = np.array([0, 1, 2, 3, 4])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_np_5d_set_shape(self):
+        x = np.zeros([6, 2, 5, 3, 4])
+        shape = [10, -1, -1, 1, 4]
+        axes = None
+
+        shape_expected = np.array([10, 2, 5, 1, 4])
+        axes_expected = np.array([0, 1, 2, 3, 4])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_np_5d_set_axes(self):
+        x = np.zeros([6, 2, 5, 3, 4])
+        shape = None
+        axes = [4, 1, 2]
+
+        shape_expected = np.array([4, 2, 5])
+        axes_expected = np.array([4, 1, 2])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_np_5d_set_shape_axes(self):
+        x = np.zeros([6, 2, 5, 3, 4])
+        shape = [10, -1, 2]
+        axes = [1, 0, 3]
+
+        shape_expected = np.array([10, 6, 2])
+        axes_expected = np.array([1, 0, 3])
+
+        shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
+
+        assert_equal(shape_res, shape_expected)
+        assert_equal(axes_res, axes_expected)
+
+    def test_shape_axes_subset(self):
+        x = np.zeros((2, 3, 4, 5))
+        shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None)
+
+        assert_array_equal(shape, [5, 5, 5])
+        assert_array_equal(axes, [1, 2, 3])
+
+    def test_errors(self):
+        x = np.zeros(1)
+        with assert_raises(ValueError, match="axes must be a scalar or "
+                           "iterable of integers"):
+            _init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]])
+
+        with assert_raises(ValueError, match="axes must be a scalar or "
+                           "iterable of integers"):
+            _init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.])
+
+        with assert_raises(ValueError,
+                           match="axes exceeds dimensionality of input"):
+            _init_nd_shape_and_axes(x, shape=None, axes=[1])
+
+        with assert_raises(ValueError,
+                           match="axes exceeds dimensionality of input"):
+            _init_nd_shape_and_axes(x, shape=None, axes=[-2])
+
+        with assert_raises(ValueError,
+                           match="all axes must be unique"):
+            _init_nd_shape_and_axes(x, shape=None, axes=[0, 0])
+
+        with assert_raises(ValueError, match="shape must be a scalar or "
+                           "iterable of integers"):
+            _init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None)
+
+        with assert_raises(ValueError, match="shape must be a scalar or "
+                           "iterable of integers"):
+            _init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None)
+
+        with assert_raises(ValueError,
+                           match="when given, axes and shape arguments"
+                           " have to be of the same length"):
+            _init_nd_shape_and_axes(np.zeros([1, 1, 1, 1]),
+                                    shape=[1, 2, 3], axes=[1])
+
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[0\]\) specified"):
+            _init_nd_shape_and_axes(x, shape=[0], axes=None)
+
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[-2\]\) specified"):
+            _init_nd_shape_and_axes(x, shape=-2, axes=None)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_multithreading.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_multithreading.py
new file mode 100644
index 00000000..e771aff6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_multithreading.py
@@ -0,0 +1,83 @@
+from scipy import fft
+import numpy as np
+import pytest
+from numpy.testing import assert_allclose
+import multiprocessing
+import os
+
+
+@pytest.fixture(scope='module')
+def x():
+    return np.random.randn(512, 128)  # Must be large enough to qualify for mt
+
+
+@pytest.mark.parametrize("func", [
+    fft.fft, fft.ifft, fft.fft2, fft.ifft2, fft.fftn, fft.ifftn,
+    fft.rfft, fft.irfft, fft.rfft2, fft.irfft2, fft.rfftn, fft.irfftn,
+    fft.hfft, fft.ihfft, fft.hfft2, fft.ihfft2, fft.hfftn, fft.ihfftn,
+    fft.dct, fft.idct, fft.dctn, fft.idctn,
+    fft.dst, fft.idst, fft.dstn, fft.idstn,
+])
+@pytest.mark.parametrize("workers", [2, -1])
+def test_threaded_same(x, func, workers):
+    expected = func(x, workers=1)
+    actual = func(x, workers=workers)
+    assert_allclose(actual, expected)
+
+
+def _mt_fft(x):
+    return fft.fft(x, workers=2)
+
+
+def test_mixed_threads_processes(x):
+    # Test that the fft threadpool is safe to use before & after fork
+
+    expect = fft.fft(x, workers=2)
+
+    with multiprocessing.Pool(2) as p:
+        res = p.map(_mt_fft, [x for _ in range(4)])
+
+    for r in res:
+        assert_allclose(r, expect)
+
+    fft.fft(x, workers=2)
+
+
+def test_invalid_workers(x):
+    cpus = os.cpu_count()
+
+    fft.ifft([1], workers=-cpus)
+
+    with pytest.raises(ValueError, match='workers must not be zero'):
+        fft.fft(x, workers=0)
+
+    with pytest.raises(ValueError, match='workers value out of range'):
+        fft.ifft(x, workers=-cpus-1)
+
+
+def test_set_get_workers():
+    cpus = os.cpu_count()
+    assert fft.get_workers() == 1
+    with fft.set_workers(4):
+        assert fft.get_workers() == 4
+
+        with fft.set_workers(-1):
+            assert fft.get_workers() == cpus
+
+        assert fft.get_workers() == 4
+
+    assert fft.get_workers() == 1
+
+    with fft.set_workers(-cpus):
+        assert fft.get_workers() == 1
+
+
+def test_set_workers_invalid():
+
+    with pytest.raises(ValueError, match='workers must not be zero'):
+        with fft.set_workers(0):
+            pass
+
+    with pytest.raises(ValueError, match='workers value out of range'):
+        with fft.set_workers(-os.cpu_count()-1):
+            pass
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_numpy.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_numpy.py
new file mode 100644
index 00000000..c1e8e80f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_numpy.py
@@ -0,0 +1,364 @@
+import queue
+import threading
+import multiprocessing
+import numpy as np
+import pytest
+from numpy.random import random
+from numpy.testing import (
+        assert_array_almost_equal, assert_array_equal, assert_allclose
+        )
+from pytest import raises as assert_raises
+import scipy.fft as fft
+
+def fft1(x):
+    L = len(x)
+    phase = -2j*np.pi*(np.arange(L)/float(L))
+    phase = np.arange(L).reshape(-1, 1) * phase
+    return np.sum(x*np.exp(phase), axis=1)
+
+
+class TestFFTShift:
+
+    def test_fft_n(self):
+        assert_raises(ValueError, fft.fft, [1, 2, 3], 0)
+
+
+class TestFFT1D:
+
+    def test_identity(self):
+        maxlen = 512
+        x = random(maxlen) + 1j*random(maxlen)
+        xr = random(maxlen)
+        for i in range(1,maxlen):
+            assert_array_almost_equal(fft.ifft(fft.fft(x[0:i])), x[0:i],
+                                      decimal=12)
+            assert_array_almost_equal(fft.irfft(fft.rfft(xr[0:i]),i),
+                                      xr[0:i], decimal=12)
+
+    def test_fft(self):
+        x = random(30) + 1j*random(30)
+        expect = fft1(x)
+        assert_array_almost_equal(expect, fft.fft(x))
+        assert_array_almost_equal(expect, fft.fft(x, norm="backward"))
+        assert_array_almost_equal(expect / np.sqrt(30),
+                                  fft.fft(x, norm="ortho"))
+        assert_array_almost_equal(expect / 30, fft.fft(x, norm="forward"))
+
+    def test_ifft(self):
+        x = random(30) + 1j*random(30)
+        assert_array_almost_equal(x, fft.ifft(fft.fft(x)))
+        for norm in ["backward", "ortho", "forward"]:
+            assert_array_almost_equal(
+                x, fft.ifft(fft.fft(x, norm=norm), norm=norm))
+
+    def test_fft2(self):
+        x = random((30, 20)) + 1j*random((30, 20))
+        expect = fft.fft(fft.fft(x, axis=1), axis=0)
+        assert_array_almost_equal(expect, fft.fft2(x))
+        assert_array_almost_equal(expect, fft.fft2(x, norm="backward"))
+        assert_array_almost_equal(expect / np.sqrt(30 * 20),
+                                  fft.fft2(x, norm="ortho"))
+        assert_array_almost_equal(expect / (30 * 20),
+                                  fft.fft2(x, norm="forward"))
+
+    def test_ifft2(self):
+        x = random((30, 20)) + 1j*random((30, 20))
+        expect = fft.ifft(fft.ifft(x, axis=1), axis=0)
+        assert_array_almost_equal(expect, fft.ifft2(x))
+        assert_array_almost_equal(expect, fft.ifft2(x, norm="backward"))
+        assert_array_almost_equal(expect * np.sqrt(30 * 20),
+                                  fft.ifft2(x, norm="ortho"))
+        assert_array_almost_equal(expect * (30 * 20),
+                                  fft.ifft2(x, norm="forward"))
+
+    def test_fftn(self):
+        x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+        expect = fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0)
+        assert_array_almost_equal(expect, fft.fftn(x))
+        assert_array_almost_equal(expect, fft.fftn(x, norm="backward"))
+        assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
+                                  fft.fftn(x, norm="ortho"))
+        assert_array_almost_equal(expect / (30 * 20 * 10),
+                                  fft.fftn(x, norm="forward"))
+
+    def test_ifftn(self):
+        x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+        expect = fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0)
+        assert_array_almost_equal(expect, fft.ifftn(x))
+        assert_array_almost_equal(expect, fft.ifftn(x, norm="backward"))
+        assert_array_almost_equal(fft.ifftn(x) * np.sqrt(30 * 20 * 10),
+                                  fft.ifftn(x, norm="ortho"))
+        assert_array_almost_equal(expect * (30 * 20 * 10),
+                                  fft.ifftn(x, norm="forward"))
+
+    def test_rfft(self):
+        x = random(29)
+        for n in [x.size, 2*x.size]:
+            for norm in [None, "backward", "ortho", "forward"]:
+                assert_array_almost_equal(
+                    fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
+                    fft.rfft(x, n=n, norm=norm))
+            assert_array_almost_equal(fft.rfft(x, n=n) / np.sqrt(n),
+                                      fft.rfft(x, n=n, norm="ortho"))
+
+    def test_irfft(self):
+        x = random(30)
+        assert_array_almost_equal(x, fft.irfft(fft.rfft(x)))
+        for norm in ["backward", "ortho", "forward"]:
+            assert_array_almost_equal(
+                x, fft.irfft(fft.rfft(x, norm=norm), norm=norm))
+
+    def test_rfft2(self):
+        x = random((30, 20))
+        expect = fft.fft2(x)[:, :11]
+        assert_array_almost_equal(expect, fft.rfft2(x))
+        assert_array_almost_equal(expect, fft.rfft2(x, norm="backward"))
+        assert_array_almost_equal(expect / np.sqrt(30 * 20),
+                                  fft.rfft2(x, norm="ortho"))
+        assert_array_almost_equal(expect / (30 * 20),
+                                  fft.rfft2(x, norm="forward"))
+
+    def test_irfft2(self):
+        x = random((30, 20))
+        assert_array_almost_equal(x, fft.irfft2(fft.rfft2(x)))
+        for norm in ["backward", "ortho", "forward"]:
+            assert_array_almost_equal(
+                x, fft.irfft2(fft.rfft2(x, norm=norm), norm=norm))
+
+    def test_rfftn(self):
+        x = random((30, 20, 10))
+        expect = fft.fftn(x)[:, :, :6]
+        assert_array_almost_equal(expect, fft.rfftn(x))
+        assert_array_almost_equal(expect, fft.rfftn(x, norm="backward"))
+        assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
+                                  fft.rfftn(x, norm="ortho"))
+        assert_array_almost_equal(expect / (30 * 20 * 10),
+                                  fft.rfftn(x, norm="forward"))
+
+    def test_irfftn(self):
+        x = random((30, 20, 10))
+        assert_array_almost_equal(x, fft.irfftn(fft.rfftn(x)))
+        for norm in ["backward", "ortho", "forward"]:
+            assert_array_almost_equal(
+                x, fft.irfftn(fft.rfftn(x, norm=norm), norm=norm))
+
+    def test_hfft(self):
+        x = random(14) + 1j*random(14)
+        x_herm = np.concatenate((random(1), x, random(1)))
+        x = np.concatenate((x_herm, x[::-1].conj()))
+        expect = fft.fft(x)
+        assert_array_almost_equal(expect, fft.hfft(x_herm))
+        assert_array_almost_equal(expect, fft.hfft(x_herm, norm="backward"))
+        assert_array_almost_equal(expect / np.sqrt(30),
+                                  fft.hfft(x_herm, norm="ortho"))
+        assert_array_almost_equal(expect / 30,
+                                  fft.hfft(x_herm, norm="forward"))
+
+    def test_ihfft(self):
+        x = random(14) + 1j*random(14)
+        x_herm = np.concatenate((random(1), x, random(1)))
+        x = np.concatenate((x_herm, x[::-1].conj()))
+        assert_array_almost_equal(x_herm, fft.ihfft(fft.hfft(x_herm)))
+        for norm in ["backward", "ortho", "forward"]:
+            assert_array_almost_equal(
+                x_herm, fft.ihfft(fft.hfft(x_herm, norm=norm), norm=norm))
+
+    def test_hfft2(self):
+        x = random((30, 20))
+        assert_array_almost_equal(x, fft.hfft2(fft.ihfft2(x)))
+        for norm in ["backward", "ortho", "forward"]:
+            assert_array_almost_equal(
+                x, fft.hfft2(fft.ihfft2(x, norm=norm), norm=norm))
+
+    def test_ihfft2(self):
+        x = random((30, 20))
+        expect = fft.ifft2(x)[:, :11]
+        assert_array_almost_equal(expect, fft.ihfft2(x))
+        assert_array_almost_equal(expect, fft.ihfft2(x, norm="backward"))
+        assert_array_almost_equal(expect * np.sqrt(30 * 20),
+                                  fft.ihfft2(x, norm="ortho"))
+        assert_array_almost_equal(expect * (30 * 20),
+                                  fft.ihfft2(x, norm="forward"))
+
+    def test_hfftn(self):
+        x = random((30, 20, 10))
+        assert_array_almost_equal(x, fft.hfftn(fft.ihfftn(x)))
+        for norm in ["backward", "ortho", "forward"]:
+            assert_array_almost_equal(
+                x, fft.hfftn(fft.ihfftn(x, norm=norm), norm=norm))
+
+    def test_ihfftn(self):
+        x = random((30, 20, 10))
+        expect = fft.ifftn(x)[:, :, :6]
+        assert_array_almost_equal(expect, fft.ihfftn(x))
+        assert_array_almost_equal(expect, fft.ihfftn(x, norm="backward"))
+        assert_array_almost_equal(expect * np.sqrt(30 * 20 * 10),
+                                  fft.ihfftn(x, norm="ortho"))
+        assert_array_almost_equal(expect * (30 * 20 * 10),
+                                  fft.ihfftn(x, norm="forward"))
+
+    @pytest.mark.parametrize("op", [fft.fftn, fft.ifftn,
+                                    fft.rfftn, fft.irfftn,
+                                    fft.hfftn, fft.ihfftn])
+    def test_axes(self, op):
+        x = random((30, 20, 10))
+        axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
+        for a in axes:
+            op_tr = op(np.transpose(x, a))
+            tr_op = np.transpose(op(x, axes=a), a)
+            assert_array_almost_equal(op_tr, tr_op)
+
+    @pytest.mark.parametrize("op", [fft.fft2, fft.ifft2,
+                                    fft.rfft2, fft.irfft2,
+                                    fft.hfft2, fft.ihfft2,
+                                    fft.fftn, fft.ifftn,
+                                    fft.rfftn, fft.irfftn,
+                                    fft.hfftn, fft.ihfftn])
+    def test_axes_subset_with_shape(self, op):
+        x = random((16, 8, 4))
+        axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)]
+        for a in axes:
+            # different shape on the first two axes
+            shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax]
+                           for ax in range(x.ndim)])
+            # transform only the first two axes
+            op_tr = op(np.transpose(x, a), s=shape[:2], axes=(0, 1))
+            tr_op = np.transpose(op(x, s=shape[:2], axes=a[:2]), a)
+            assert_array_almost_equal(op_tr, tr_op)
+
+    def test_all_1d_norm_preserving(self):
+        # verify that round-trip transforms are norm-preserving
+        x = random(30)
+        x_norm = np.linalg.norm(x)
+        n = x.size * 2
+        func_pairs = [(fft.fft, fft.ifft),
+                      (fft.rfft, fft.irfft),
+                      # hfft: order so the first function takes x.size samples
+                      #       (necessary for comparison to x_norm above)
+                      (fft.ihfft, fft.hfft),
+                      ]
+        for forw, back in func_pairs:
+            for n in [x.size, 2*x.size]:
+                for norm in ['backward', 'ortho', 'forward']:
+                    tmp = forw(x, n=n, norm=norm)
+                    tmp = back(tmp, n=n, norm=norm)
+                    assert_array_almost_equal(x_norm,
+                                              np.linalg.norm(tmp))
+
+    @pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
+                                       np.longdouble])
+    def test_dtypes(self, dtype):
+        # make sure that all input precisions are accepted
+        x = random(30).astype(dtype)
+        assert_array_almost_equal(fft.ifft(fft.fft(x)), x)
+        assert_array_almost_equal(fft.irfft(fft.rfft(x)), x)
+        assert_array_almost_equal(fft.hfft(fft.ihfft(x), len(x)), x)
+
+
+@pytest.mark.parametrize(
+        "dtype",
+        [np.float32, np.float64, np.longfloat,
+         np.complex64, np.complex128, np.longcomplex])
+@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
+@pytest.mark.parametrize(
+        "fft",
+        [fft.fft, fft.fft2, fft.fftn,
+         fft.ifft, fft.ifft2, fft.ifftn])
+def test_fft_with_order(dtype, order, fft):
+    # Check that FFT/IFFT produces identical results for C, Fortran and
+    # non contiguous arrays
+    rng = np.random.RandomState(42)
+    X = rng.rand(8, 7, 13).astype(dtype, copy=False)
+    if order == 'F':
+        Y = np.asfortranarray(X)
+    else:
+        # Make a non contiguous array
+        Y = X[::-1]
+        X = np.ascontiguousarray(X[::-1])
+
+    if fft.__name__.endswith('fft'):
+        for axis in range(3):
+            X_res = fft(X, axis=axis)
+            Y_res = fft(Y, axis=axis)
+            assert_array_almost_equal(X_res, Y_res)
+    elif fft.__name__.endswith(('fft2', 'fftn')):
+        axes = [(0, 1), (1, 2), (0, 2)]
+        if fft.__name__.endswith('fftn'):
+            axes.extend([(0,), (1,), (2,), None])
+        for ax in axes:
+            X_res = fft(X, axes=ax)
+            Y_res = fft(Y, axes=ax)
+            assert_array_almost_equal(X_res, Y_res)
+    else:
+        raise ValueError
+
+
+class TestFFTThreadSafe:
+    threads = 16
+    input_shape = (800, 200)
+
+    def _test_mtsame(self, func, *args):
+        def worker(args, q):
+            q.put(func(*args))
+
+        q = queue.Queue()
+        expected = func(*args)
+
+        # Spin off a bunch of threads to call the same function simultaneously
+        t = [threading.Thread(target=worker, args=(args, q))
+             for i in range(self.threads)]
+        [x.start() for x in t]
+
+        [x.join() for x in t]
+        # Make sure all threads returned the correct value
+        for i in range(self.threads):
+            assert_array_equal(q.get(timeout=5), expected,
+                'Function returned wrong value in multithreaded context')
+
+    def test_fft(self):
+        a = np.ones(self.input_shape, dtype=np.complex128)
+        self._test_mtsame(fft.fft, a)
+
+    def test_ifft(self):
+        a = np.full(self.input_shape, 1+0j)
+        self._test_mtsame(fft.ifft, a)
+
+    def test_rfft(self):
+        a = np.ones(self.input_shape)
+        self._test_mtsame(fft.rfft, a)
+
+    def test_irfft(self):
+        a = np.full(self.input_shape, 1+0j)
+        self._test_mtsame(fft.irfft, a)
+
+    def test_hfft(self):
+        a = np.ones(self.input_shape, np.complex64)
+        self._test_mtsame(fft.hfft, a)
+
+    def test_ihfft(self):
+        a = np.ones(self.input_shape)
+        self._test_mtsame(fft.ihfft, a)
+
+
+@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft])
+def test_multiprocess(func):
+    # Test that fft still works after fork (gh-10422)
+
+    with multiprocessing.Pool(2) as p:
+        res = p.map(func, [np.ones(100) for _ in range(4)])
+
+    expect = func(np.ones(100))
+    for x in res:
+        assert_allclose(x, expect)
+
+
+class TestIRFFTN:
+
+    def test_not_last_axis_success(self):
+        ar, ai = np.random.random((2, 16, 8, 32))
+        a = ar + 1j*ai
+
+        axes = (-2,)
+
+        # Should not raise error
+        fft.irfftn(a, axes=axes)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_real_transforms.py b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_real_transforms.py
new file mode 100644
index 00000000..bcfd8e3c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fft/tests/test_real_transforms.py
@@ -0,0 +1,216 @@
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+import pytest
+
+from scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn
+import scipy.fft as fft
+from scipy import fftpack
+
+import math
+SQRT_2 = math.sqrt(2)
+
+# scipy.fft wraps the fftpack versions but with normalized inverse transforms.
+# So, the forward transforms and definitions are already thoroughly tested in
+# fftpack/test_real_transforms.py
+
+
+@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("n", [2, 3, 4, 5, 10, 16])
+@pytest.mark.parametrize("axis", [0, 1])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+@pytest.mark.parametrize("orthogonalize", [False, True])
+def test_identity_1d(forward, backward, type, n, axis, norm, orthogonalize):
+    # Test the identity f^-1(f(x)) == x
+    x = np.random.rand(n, n)
+
+    y = forward(x, type, axis=axis, norm=norm, orthogonalize=orthogonalize)
+    z = backward(y, type, axis=axis, norm=norm, orthogonalize=orthogonalize)
+    assert_allclose(z, x)
+
+    pad = [(0, 0)] * 2
+    pad[axis] = (0, 4)
+
+    y2 = np.pad(y, pad, mode='edge')
+    z2 = backward(y2, type, n, axis, norm, orthogonalize=orthogonalize)
+    assert_allclose(z2, x)
+
+
+@pytest.mark.parametrize("forward, backward", [(dct, idct), (dst, idst)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64,
+                                   np.complex64, np.complex128])
+@pytest.mark.parametrize("axis", [0, 1])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+@pytest.mark.parametrize("overwrite_x", [True, False])
+def test_identity_1d_overwrite(forward, backward, type, dtype, axis, norm,
+                               overwrite_x):
+    # Test the identity f^-1(f(x)) == x
+    x = np.random.rand(7, 8).astype(dtype)
+    x_orig = x.copy()
+
+    y = forward(x, type, axis=axis, norm=norm, overwrite_x=overwrite_x)
+    y_orig = y.copy()
+    z = backward(y, type, axis=axis, norm=norm, overwrite_x=overwrite_x)
+    if not overwrite_x:
+        assert_allclose(z, x, rtol=1e-6, atol=1e-6)
+        assert_array_equal(x, x_orig)
+        assert_array_equal(y, y_orig)
+    else:
+        assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)
+
+
+@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("shape, axes",
+                         [
+                             ((4, 4), 0),
+                             ((4, 4), 1),
+                             ((4, 4), None),
+                             ((4, 4), (0, 1)),
+                             ((10, 12), None),
+                             ((10, 12), (0, 1)),
+                             ((4, 5, 6), None),
+                             ((4, 5, 6), 1),
+                             ((4, 5, 6), (0, 2)),
+                         ])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+@pytest.mark.parametrize("orthogonalize", [False, True])
+def test_identity_nd(forward, backward, type, shape, axes, norm,
+                     orthogonalize):
+    # Test the identity f^-1(f(x)) == x
+
+    x = np.random.random(shape)
+
+    if axes is not None:
+        shape = np.take(shape, axes)
+
+    y = forward(x, type, axes=axes, norm=norm, orthogonalize=orthogonalize)
+    z = backward(y, type, axes=axes, norm=norm, orthogonalize=orthogonalize)
+    assert_allclose(z, x)
+
+    if axes is None:
+        pad = [(0, 4)] * x.ndim
+    elif isinstance(axes, int):
+        pad = [(0, 0)] * x.ndim
+        pad[axes] = (0, 4)
+    else:
+        pad = [(0, 0)] * x.ndim
+
+        for a in axes:
+            pad[a] = (0, 4)
+
+    y2 = np.pad(y, pad, mode='edge')
+    z2 = backward(y2, type, shape, axes, norm, orthogonalize=orthogonalize)
+    assert_allclose(z2, x)
+
+
+@pytest.mark.parametrize("forward, backward", [(dctn, idctn), (dstn, idstn)])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("shape, axes",
+                         [
+                             ((4, 5), 0),
+                             ((4, 5), 1),
+                             ((4, 5), None),
+                         ])
+@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64,
+                                   np.complex64, np.complex128])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+@pytest.mark.parametrize("overwrite_x", [False, True])
+def test_identity_nd_overwrite(forward, backward, type, shape, axes, dtype,
+                               norm, overwrite_x):
+    # Test the identity f^-1(f(x)) == x
+
+    x = np.random.random(shape).astype(dtype)
+    x_orig = x.copy()
+
+    if axes is not None:
+        shape = np.take(shape, axes)
+
+    y = forward(x, type, axes=axes, norm=norm)
+    y_orig = y.copy()
+    z = backward(y, type, axes=axes, norm=norm)
+    if overwrite_x:
+        assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)
+    else:
+        assert_allclose(z, x, rtol=1e-6, atol=1e-6)
+        assert_array_equal(x, x_orig)
+        assert_array_equal(y, y_orig)
+
+
+@pytest.mark.parametrize("func", ['dct', 'dst', 'dctn', 'dstn'])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+@pytest.mark.parametrize("norm", [None, 'backward', 'ortho', 'forward'])
+def test_fftpack_equivalience(func, type, norm):
+    x = np.random.rand(8, 16)
+    fft_res = getattr(fft, func)(x, type, norm=norm)
+    fftpack_res = getattr(fftpack, func)(x, type, norm=norm)
+
+    assert_allclose(fft_res, fftpack_res)
+
+
+@pytest.mark.parametrize("func", [dct, dst, dctn, dstn])
+@pytest.mark.parametrize("type", [1, 2, 3, 4])
+def test_orthogonalize_default(func, type):
+    # Test orthogonalize is the default when norm="ortho", but not otherwise
+    x = np.random.rand(100)
+
+    for norm, ortho in [
+            ("forward", False),
+            ("backward", False),
+            ("ortho", True),
+    ]:
+        a = func(x, type=type, norm=norm, orthogonalize=ortho)
+        b = func(x, type=type, norm=norm)
+        assert_allclose(a, b)
+
+
+@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
+@pytest.mark.parametrize("func, type", [
+    (dct, 4), (dst, 1), (dst, 4)])
+def test_orthogonalize_noop(func, type, norm):
+    # Transforms where orthogonalize is a no-op
+    x = np.random.rand(100)
+    y1 = func(x, type=type, norm=norm, orthogonalize=True)
+    y2 = func(x, type=type, norm=norm, orthogonalize=False)
+    assert_allclose(y1, y2)
+
+
+@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
+def test_orthogonalize_dct1(norm):
+    x = np.random.rand(100)
+
+    x2 = x.copy()
+    x2[0] *= SQRT_2
+    x2[-1] *= SQRT_2
+
+    y1 = dct(x, type=1, norm=norm, orthogonalize=True)
+    y2 = dct(x2, type=1, norm=norm, orthogonalize=False)
+
+    y2[0] /= SQRT_2
+    y2[-1] /= SQRT_2
+    assert_allclose(y1, y2)
+
+
+@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
+@pytest.mark.parametrize("func", [dct, dst])
+def test_orthogonalize_dcst2(func, norm):
+    x = np.random.rand(100)
+    y1 = func(x, type=2, norm=norm, orthogonalize=True)
+    y2 = func(x, type=2, norm=norm, orthogonalize=False)
+
+    y2[0] /= SQRT_2
+    assert_allclose(y1, y2)
+
+
+@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
+@pytest.mark.parametrize("func", [dct, dst])
+def test_orthogonalize_dcst3(func, norm):
+    x = np.random.rand(100)
+    x2 = x.copy()
+    x2[0] *= SQRT_2
+
+    y1 = func(x, type=3, norm=norm, orthogonalize=True)
+    y2 = func(x2, type=3, norm=norm, orthogonalize=False)
+    assert_allclose(y1, y2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/__init__.py
new file mode 100644
index 00000000..db28ed1a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/__init__.py
@@ -0,0 +1,104 @@
+"""
+=========================================================
+Legacy discrete Fourier transforms (:mod:`scipy.fftpack`)
+=========================================================
+
+.. warning::
+
+   This submodule is now considered legacy, new code should use
+   :mod:`scipy.fft`.
+
+Fast Fourier Transforms (FFTs)
+==============================
+
+.. autosummary::
+   :toctree: generated/
+
+   fft - Fast (discrete) Fourier Transform (FFT)
+   ifft - Inverse FFT
+   fft2 - 2-D FFT
+   ifft2 - 2-D inverse FFT
+   fftn - N-D FFT
+   ifftn - N-D inverse FFT
+   rfft - FFT of strictly real-valued sequence
+   irfft - Inverse of rfft
+   dct - Discrete cosine transform
+   idct - Inverse discrete cosine transform
+   dctn - N-D Discrete cosine transform
+   idctn - N-D Inverse discrete cosine transform
+   dst - Discrete sine transform
+   idst - Inverse discrete sine transform
+   dstn - N-D Discrete sine transform
+   idstn - N-D Inverse discrete sine transform
+
+Differential and pseudo-differential operators
+==============================================
+
+.. autosummary::
+   :toctree: generated/
+
+   diff - Differentiation and integration of periodic sequences
+   tilbert - Tilbert transform:         cs_diff(x,h,h)
+   itilbert - Inverse Tilbert transform: sc_diff(x,h,h)
+   hilbert - Hilbert transform:         cs_diff(x,inf,inf)
+   ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf)
+   cs_diff - cosh/sinh pseudo-derivative of periodic sequences
+   sc_diff - sinh/cosh pseudo-derivative of periodic sequences
+   ss_diff - sinh/sinh pseudo-derivative of periodic sequences
+   cc_diff - cosh/cosh pseudo-derivative of periodic sequences
+   shift - Shift periodic sequences
+
+Helper functions
+================
+
+.. autosummary::
+   :toctree: generated/
+
+   fftshift - Shift the zero-frequency component to the center of the spectrum
+   ifftshift - The inverse of `fftshift`
+   fftfreq - Return the Discrete Fourier Transform sample frequencies
+   rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
+   next_fast_len - Find the optimal length to zero-pad an FFT for speed
+
+Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions
+exposed by ``fftpack``; importing them from ``numpy`` should be preferred.
+
+Convolutions (:mod:`scipy.fftpack.convolve`)
+============================================
+
+.. module:: scipy.fftpack.convolve
+
+.. autosummary::
+   :toctree: generated/
+
+   convolve
+   convolve_z
+   init_convolution_kernel
+   destroy_convolve_cache
+
+"""
+
+
+__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
+           'fft2','ifft2',
+           'diff',
+           'tilbert','itilbert','hilbert','ihilbert',
+           'sc_diff','cs_diff','cc_diff','ss_diff',
+           'shift',
+           'fftfreq', 'rfftfreq',
+           'fftshift', 'ifftshift',
+           'next_fast_len',
+           'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
+           ]
+
+from ._basic import *
+from ._pseudo_diffs import *
+from ._helper import *
+from ._realtransforms import *
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import basic, helper, pseudo_diffs, realtransforms
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/_basic.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_basic.py
new file mode 100644
index 00000000..59c85ae4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_basic.py
@@ -0,0 +1,428 @@
+"""
+Discrete Fourier Transforms - _basic.py
+"""
+# Created by Pearu Peterson, August,September 2002
+__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
+           'fft2','ifft2']
+
+from scipy.fft import _pocketfft
+from ._helper import _good_shape
+
+
+def fft(x, n=None, axis=-1, overwrite_x=False):
+    """
+    Return discrete Fourier transform of real or complex sequence.
+
+    The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
+
+    ``y(j) = (x * exp(-2*pi*sqrt(-1)*j*np.arange(n)/n)).sum()``.
+
+    Parameters
+    ----------
+    x : array_like
+        Array to Fourier transform.
+    n : int, optional
+        Length of the Fourier transform. If ``n < x.shape[axis]``, `x` is
+        truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the fft's are computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    z : complex ndarray
+        with the elements::
+
+            [y(0),y(1),..,y(n/2),y(1-n/2),...,y(-1)]        if n is even
+            [y(0),y(1),..,y((n-1)/2),y(-(n-1)/2),...,y(-1)]  if n is odd
+
+        where::
+
+            y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k* 2*pi/n), j = 0..n-1
+
+    See Also
+    --------
+    ifft : Inverse FFT
+    rfft : FFT of a real sequence
+
+    Notes
+    -----
+    The packing of the result is "standard": If ``A = fft(a, n)``, then
+    ``A[0]`` contains the zero-frequency term, ``A[1:n/2]`` contains the
+    positive-frequency terms, and ``A[n/2:]`` contains the negative-frequency
+    terms, in order of decreasingly negative frequency. So ,for an 8-point
+    transform, the frequencies of the result are [0, 1, 2, 3, -4, -3, -2, -1].
+    To rearrange the fft output so that the zero-frequency component is
+    centered, like [-4, -3, -2, -1,  0,  1,  2,  3], use `fftshift`.
+
+    Both single and double precision routines are implemented. Half precision
+    inputs will be converted to single precision. Non-floating-point inputs
+    will be converted to double precision. Long-double precision inputs are
+    not supported.
+
+    This function is most efficient when `n` is a power of two, and least
+    efficient when `n` is prime.
+
+    Note that if ``x`` is real-valued, then ``A[j] == A[n-j].conjugate()``.
+    If ``x`` is real-valued and ``n`` is even, then ``A[n/2]`` is real.
+
+    If the data type of `x` is real, a "real FFT" algorithm is automatically
+    used, which roughly halves the computation time. To increase efficiency
+    a little further, use `rfft`, which does the same calculation, but only
+    outputs half of the symmetrical spectrum. If the data is both real and
+    symmetrical, the `dct` can again double the efficiency by generating
+    half of the spectrum from half of the signal.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import fft, ifft
+    >>> x = np.arange(5)
+    >>> np.allclose(fft(ifft(x)), x, atol=1e-15)  # within numerical accuracy.
+    True
+
+    """
+    return _pocketfft.fft(x, n, axis, None, overwrite_x)
+
+
+def ifft(x, n=None, axis=-1, overwrite_x=False):
+    """
+    Return discrete inverse Fourier transform of real or complex sequence.
+
+    The returned complex array contains ``y(0), y(1),..., y(n-1)``, where
+
+    ``y(j) = (x * exp(2*pi*sqrt(-1)*j*np.arange(n)/n)).mean()``.
+
+    Parameters
+    ----------
+    x : array_like
+        Transformed data to invert.
+    n : int, optional
+        Length of the inverse Fourier transform.  If ``n < x.shape[axis]``,
+        `x` is truncated. If ``n > x.shape[axis]``, `x` is zero-padded.
+        The default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the ifft's are computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    ifft : ndarray of floats
+        The inverse discrete Fourier transform.
+
+    See Also
+    --------
+    fft : Forward FFT
+
+    Notes
+    -----
+    Both single and double precision routines are implemented. Half precision
+    inputs will be converted to single precision. Non-floating-point inputs
+    will be converted to double precision. Long-double precision inputs are
+    not supported.
+
+    This function is most efficient when `n` is a power of two, and least
+    efficient when `n` is prime.
+
+    If the data type of `x` is real, a "real IFFT" algorithm is automatically
+    used, which roughly halves the computation time.
+
+    Examples
+    --------
+    >>> from scipy.fftpack import fft, ifft
+    >>> import numpy as np
+    >>> x = np.arange(5)
+    >>> np.allclose(ifft(fft(x)), x, atol=1e-15)  # within numerical accuracy.
+    True
+
+    """
+    return _pocketfft.ifft(x, n, axis, None, overwrite_x)
+
+
+def rfft(x, n=None, axis=-1, overwrite_x=False):
+    """
+    Discrete Fourier transform of a real sequence.
+
+    Parameters
+    ----------
+    x : array_like, real-valued
+        The data to transform.
+    n : int, optional
+        Defines the length of the Fourier transform. If `n` is not specified
+        (the default) then ``n = x.shape[axis]``. If ``n < x.shape[axis]``,
+        `x` is truncated, if ``n > x.shape[axis]``, `x` is zero-padded.
+    axis : int, optional
+        The axis along which the transform is applied. The default is the
+        last axis.
+    overwrite_x : bool, optional
+        If set to true, the contents of `x` can be overwritten. Default is
+        False.
+
+    Returns
+    -------
+    z : real ndarray
+        The returned real array contains::
+
+          [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))]              if n is even
+          [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))]   if n is odd
+
+        where::
+
+          y(j) = sum[k=0..n-1] x[k] * exp(-sqrt(-1)*j*k*2*pi/n)
+          j = 0..n-1
+
+    See Also
+    --------
+    fft, irfft, scipy.fft.rfft
+
+    Notes
+    -----
+    Within numerical accuracy, ``y == rfft(irfft(y))``.
+
+    Both single and double precision routines are implemented. Half precision
+    inputs will be converted to single precision. Non-floating-point inputs
+    will be converted to double precision. Long-double precision inputs are
+    not supported.
+
+    To get an output with a complex datatype, consider using the newer
+    function `scipy.fft.rfft`.
+
+    Examples
+    --------
+    >>> from scipy.fftpack import fft, rfft
+    >>> a = [9, -9, 1, 3]
+    >>> fft(a)
+    array([  4. +0.j,   8.+12.j,  16. +0.j,   8.-12.j])
+    >>> rfft(a)
+    array([  4.,   8.,  12.,  16.])
+
+    """
+    return _pocketfft.rfft_fftpack(x, n, axis, None, overwrite_x)
+
+
+def irfft(x, n=None, axis=-1, overwrite_x=False):
+    """
+    Return inverse discrete Fourier transform of real sequence x.
+
+    The contents of `x` are interpreted as the output of the `rfft`
+    function.
+
+    Parameters
+    ----------
+    x : array_like
+        Transformed data to invert.
+    n : int, optional
+        Length of the inverse Fourier transform.
+        If n < x.shape[axis], x is truncated.
+        If n > x.shape[axis], x is zero-padded.
+        The default results in n = x.shape[axis].
+    axis : int, optional
+        Axis along which the ifft's are computed; the default is over
+        the last axis (i.e., axis=-1).
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    irfft : ndarray of floats
+        The inverse discrete Fourier transform.
+
+    See Also
+    --------
+    rfft, ifft, scipy.fft.irfft
+
+    Notes
+    -----
+    The returned real array contains::
+
+        [y(0),y(1),...,y(n-1)]
+
+    where for n is even::
+
+        y(j) = 1/n (sum[k=1..n/2-1] (x[2*k-1]+sqrt(-1)*x[2*k])
+                                     * exp(sqrt(-1)*j*k* 2*pi/n)
+                    + c.c. + x[0] + (-1)**(j) x[n-1])
+
+    and for n is odd::
+
+        y(j) = 1/n (sum[k=1..(n-1)/2] (x[2*k-1]+sqrt(-1)*x[2*k])
+                                     * exp(sqrt(-1)*j*k* 2*pi/n)
+                    + c.c. + x[0])
+
+    c.c. denotes complex conjugate of preceding expression.
+
+    For details on input parameters, see `rfft`.
+
+    To process (conjugate-symmetric) frequency-domain data with a complex
+    datatype, consider using the newer function `scipy.fft.irfft`.
+
+    Examples
+    --------
+    >>> from scipy.fftpack import rfft, irfft
+    >>> a = [1.0, 2.0, 3.0, 4.0, 5.0]
+    >>> irfft(a)
+    array([ 2.6       , -3.16405192,  1.24398433, -1.14955713,  1.46962473])
+    >>> irfft(rfft(a))
+    array([1., 2., 3., 4., 5.])
+
+    """
+    return _pocketfft.irfft_fftpack(x, n, axis, None, overwrite_x)
+
+
+def fftn(x, shape=None, axes=None, overwrite_x=False):
+    """
+    Return multidimensional discrete Fourier transform.
+
+    The returned array contains::
+
+      y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
+         x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i)
+
+    where d = len(x.shape) and n = x.shape.
+
+    Parameters
+    ----------
+    x : array_like
+        The (N-D) array to transform.
+    shape : int or array_like of ints or None, optional
+        The shape of the result. If both `shape` and `axes` (see below) are
+        None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+        not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
+        If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+        length ``shape[i]``.
+        If any element of `shape` is -1, the size of the corresponding
+        dimension of `x` is used.
+    axes : int or array_like of ints or None, optional
+        The axes of `x` (`y` if `shape` is not None) along which the
+        transform is applied.
+        The default is over all axes.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed. Default is False.
+
+    Returns
+    -------
+    y : complex-valued N-D NumPy array
+        The (N-D) DFT of the input array.
+
+    See Also
+    --------
+    ifftn
+
+    Notes
+    -----
+    If ``x`` is real-valued, then
+    ``y[..., j_i, ...] == y[..., n_i-j_i, ...].conjugate()``.
+
+    Both single and double precision routines are implemented. Half precision
+    inputs will be converted to single precision. Non-floating-point inputs
+    will be converted to double precision. Long-double precision inputs are
+    not supported.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import fftn, ifftn
+    >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
+    >>> np.allclose(y, fftn(ifftn(y)))
+    True
+
+    """
+    shape = _good_shape(x, shape, axes)
+    return _pocketfft.fftn(x, shape, axes, None, overwrite_x)
+
+
+def ifftn(x, shape=None, axes=None, overwrite_x=False):
+    """
+    Return inverse multidimensional discrete Fourier transform.
+
+    The sequence can be of an arbitrary type.
+
+    The returned array contains::
+
+      y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1]
+         x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i)
+
+    where ``d = len(x.shape)``, ``n = x.shape``, and ``p = prod[i=1..d] n_i``.
+
+    For description of parameters see `fftn`.
+
+    See Also
+    --------
+    fftn : for detailed information.
+
+    Examples
+    --------
+    >>> from scipy.fftpack import fftn, ifftn
+    >>> import numpy as np
+    >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16))
+    >>> np.allclose(y, ifftn(fftn(y)))
+    True
+
+    """
+    shape = _good_shape(x, shape, axes)
+    return _pocketfft.ifftn(x, shape, axes, None, overwrite_x)
+
+
+def fft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
+    """
+    2-D discrete Fourier transform.
+
+    Return the 2-D discrete Fourier transform of the 2-D argument
+    `x`.
+
+    See Also
+    --------
+    fftn : for detailed information.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import fft2, ifft2
+    >>> y = np.mgrid[:5, :5][0]
+    >>> y
+    array([[0, 0, 0, 0, 0],
+           [1, 1, 1, 1, 1],
+           [2, 2, 2, 2, 2],
+           [3, 3, 3, 3, 3],
+           [4, 4, 4, 4, 4]])
+    >>> np.allclose(y, ifft2(fft2(y)))
+    True
+    """
+    return fftn(x,shape,axes,overwrite_x)
+
+
+def ifft2(x, shape=None, axes=(-2,-1), overwrite_x=False):
+    """
+    2-D discrete inverse Fourier transform of real or complex sequence.
+
+    Return inverse 2-D discrete Fourier transform of
+    arbitrary type sequence x.
+
+    See `ifft` for more information.
+
+    See Also
+    --------
+    fft2, ifft
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import fft2, ifft2
+    >>> y = np.mgrid[:5, :5][0]
+    >>> y
+    array([[0, 0, 0, 0, 0],
+           [1, 1, 1, 1, 1],
+           [2, 2, 2, 2, 2],
+           [3, 3, 3, 3, 3],
+           [4, 4, 4, 4, 4]])
+    >>> np.allclose(y, fft2(ifft2(y)))
+    True
+
+    """
+    return ifftn(x,shape,axes,overwrite_x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/_helper.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_helper.py
new file mode 100644
index 00000000..b144c276
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_helper.py
@@ -0,0 +1,112 @@
+import operator
+from numpy.fft.helper import fftshift, ifftshift, fftfreq
+import scipy.fft._pocketfft.helper as _helper
+import numpy as np
+__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len']
+
+
+def rfftfreq(n, d=1.0):
+    """DFT sample frequencies (for usage with rfft, irfft).
+
+    The returned float array contains the frequency bins in
+    cycles/unit (with zero at the start) given a window length `n` and a
+    sample spacing `d`::
+
+      f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n)   if n is even
+      f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n)   if n is odd
+
+    Parameters
+    ----------
+    n : int
+        Window length.
+    d : scalar, optional
+        Sample spacing. Default is 1.
+
+    Returns
+    -------
+    out : ndarray
+        The array of length `n`, containing the sample frequencies.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import fftpack
+    >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
+    >>> sig_fft = fftpack.rfft(sig)
+    >>> n = sig_fft.size
+    >>> timestep = 0.1
+    >>> freq = fftpack.rfftfreq(n, d=timestep)
+    >>> freq
+    array([ 0.  ,  1.25,  1.25,  2.5 ,  2.5 ,  3.75,  3.75,  5.  ])
+
+    """
+    n = operator.index(n)
+    if n < 0:
+        raise ValueError("n = %s is not valid. "
+                         "n must be a nonnegative integer." % n)
+
+    return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d)
+
+
+def next_fast_len(target):
+    """
+    Find the next fast size of input data to `fft`, for zero-padding, etc.
+
+    SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this
+    returns the next composite of the prime factors 2, 3, and 5 which is
+    greater than or equal to `target`. (These are also known as 5-smooth
+    numbers, regular numbers, or Hamming numbers.)
+
+    Parameters
+    ----------
+    target : int
+        Length to start searching from. Must be a positive integer.
+
+    Returns
+    -------
+    out : int
+        The first 5-smooth number greater than or equal to `target`.
+
+    Notes
+    -----
+    .. versionadded:: 0.18.0
+
+    Examples
+    --------
+    On a particular machine, an FFT of prime length takes 133 ms:
+
+    >>> from scipy import fftpack
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> min_len = 10007  # prime length is worst case for speed
+    >>> a = rng.standard_normal(min_len)
+    >>> b = fftpack.fft(a)
+
+    Zero-padding to the next 5-smooth length reduces computation time to
+    211 us, a speedup of 630 times:
+
+    >>> fftpack.next_fast_len(min_len)
+    10125
+    >>> b = fftpack.fft(a, 10125)
+
+    Rounding up to the next power of 2 is not optimal, taking 367 us to
+    compute, 1.7 times as long as the 5-smooth size:
+
+    >>> b = fftpack.fft(a, 16384)
+
+    """
+    # Real transforms use regular sizes so this is backwards compatible
+    return _helper.good_size(target, True)
+
+
+def _good_shape(x, shape, axes):
+    """Ensure that shape argument is valid for scipy.fftpack
+
+    scipy.fftpack does not support len(shape) < x.ndim when axes is not given.
+    """
+    if shape is not None and axes is None:
+        shape = _helper._iterable_of_int(shape, 'shape')
+        if len(shape) != np.ndim(x):
+            raise ValueError("when given, axes and shape arguments"
+                             " have to be of the same length")
+    return shape
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/_pseudo_diffs.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_pseudo_diffs.py
new file mode 100644
index 00000000..b8ef40ef
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_pseudo_diffs.py
@@ -0,0 +1,551 @@
+"""
+Differential and pseudo-differential operators.
+"""
+# Created by Pearu Peterson, September 2002
+
+__all__ = ['diff',
+           'tilbert','itilbert','hilbert','ihilbert',
+           'cs_diff','cc_diff','sc_diff','ss_diff',
+           'shift']
+
+from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj
+from . import convolve
+
+from scipy.fft._pocketfft.helper import _datacopied
+
+
+_cache = {}
+
+
+def diff(x,order=1,period=None, _cache=_cache):
+    """
+    Return kth derivative (or integral) of a periodic sequence x.
+
+    If x_j and y_j are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j
+      y_0 = 0 if order is not 0.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    order : int, optional
+        The order of differentiation. Default order is 1. If order is
+        negative, then integration is carried out under the assumption
+        that ``x_0 == 0``.
+    period : float, optional
+        The assumed period of the sequence. Default is ``2*pi``.
+
+    Notes
+    -----
+    If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within
+    numerical accuracy).
+
+    For odd order and even ``len(x)``, the Nyquist mode is taken zero.
+
+    """
+    tmp = asarray(x)
+    if order == 0:
+        return tmp
+    if iscomplexobj(tmp):
+        return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period)
+    if period is not None:
+        c = 2*pi/period
+    else:
+        c = 1.0
+    n = len(x)
+    omega = _cache.get((n,order,c))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k,order=order,c=c):
+            if k:
+                return pow(c*k,order)
+            return 0
+        omega = convolve.init_convolution_kernel(n,kernel,d=order,
+                                                 zero_nyquist=1)
+        _cache[(n,order,c)] = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,swap_real_imag=order % 2,
+                             overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def tilbert(x, h, period=None, _cache=_cache):
+    """
+    Return h-Tilbert transform of a periodic sequence x.
+
+    If x_j and y_j are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+        y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j
+        y_0 = 0
+
+    Parameters
+    ----------
+    x : array_like
+        The input array to transform.
+    h : float
+        Defines the parameter of the Tilbert transform.
+    period : float, optional
+        The assumed period of the sequence. Default period is ``2*pi``.
+
+    Returns
+    -------
+    tilbert : ndarray
+        The result of the transform.
+
+    Notes
+    -----
+    If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd, then
+    ``tilbert(itilbert(x)) == x``.
+
+    If ``2 * pi * h / period`` is approximately 10 or larger, then
+    numerically ``tilbert == hilbert``
+    (theoretically oo-Tilbert == Hilbert).
+
+    For even ``len(x)``, the Nyquist mode of ``x`` is taken zero.
+
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return tilbert(tmp.real, h, period) + \
+               1j * tilbert(tmp.imag, h, period)
+
+    if period is not None:
+        h = h * 2 * pi / period
+
+    n = len(x)
+    omega = _cache.get((n, h))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k, h=h):
+            if k:
+                return 1.0/tanh(h*k)
+
+            return 0
+
+        omega = convolve.init_convolution_kernel(n, kernel, d=1)
+        _cache[(n,h)] = omega
+
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def itilbert(x,h,period=None, _cache=_cache):
+    """
+    Return inverse h-Tilbert transform of a periodic sequence x.
+
+    If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
+      y_0 = 0
+
+    For more details, see `tilbert`.
+
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return itilbert(tmp.real,h,period) + \
+               1j*itilbert(tmp.imag,h,period)
+    if period is not None:
+        h = h*2*pi/period
+    n = len(x)
+    omega = _cache.get((n,h))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k,h=h):
+            if k:
+                return -tanh(h*k)
+            return 0
+        omega = convolve.init_convolution_kernel(n,kernel,d=1)
+        _cache[(n,h)] = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def hilbert(x, _cache=_cache):
+    """
+    Return Hilbert transform of a periodic sequence x.
+
+    If x_j and y_j are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = sqrt(-1)*sign(j) * x_j
+      y_0 = 0
+
+    Parameters
+    ----------
+    x : array_like
+        The input array, should be periodic.
+    _cache : dict, optional
+        Dictionary that contains the kernel used to do a convolution with.
+
+    Returns
+    -------
+    y : ndarray
+        The transformed input.
+
+    See Also
+    --------
+    scipy.signal.hilbert : Compute the analytic signal, using the Hilbert
+                           transform.
+
+    Notes
+    -----
+    If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``.
+
+    For even len(x), the Nyquist mode of x is taken zero.
+
+    The sign of the returned transform does not have a factor -1 that is more
+    often than not found in the definition of the Hilbert transform. Note also
+    that `scipy.signal.hilbert` does have an extra -1 factor compared to this
+    function.
+
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return hilbert(tmp.real)+1j*hilbert(tmp.imag)
+    n = len(x)
+    omega = _cache.get(n)
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k):
+            if k > 0:
+                return 1.0
+            elif k < 0:
+                return -1.0
+            return 0.0
+        omega = convolve.init_convolution_kernel(n,kernel,d=1)
+        _cache[n] = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+def ihilbert(x):
+    """
+    Return inverse Hilbert transform of a periodic sequence x.
+
+    If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = -sqrt(-1)*sign(j) * x_j
+      y_0 = 0
+
+    """
+    return -hilbert(x)
+
+
+_cache = {}
+
+
+def cs_diff(x, a, b, period=None, _cache=_cache):
+    """
+    Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence.
+
+    If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
+      y_0 = 0
+
+    Parameters
+    ----------
+    x : array_like
+        The array to take the pseudo-derivative from.
+    a, b : float
+        Defines the parameters of the cosh/sinh pseudo-differential
+        operator.
+    period : float, optional
+        The period of the sequence. Default period is ``2*pi``.
+
+    Returns
+    -------
+    cs_diff : ndarray
+        Pseudo-derivative of periodic sequence `x`.
+
+    Notes
+    -----
+    For even len(`x`), the Nyquist mode of `x` is taken as zero.
+
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return cs_diff(tmp.real,a,b,period) + \
+               1j*cs_diff(tmp.imag,a,b,period)
+    if period is not None:
+        a = a*2*pi/period
+        b = b*2*pi/period
+    n = len(x)
+    omega = _cache.get((n,a,b))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k,a=a,b=b):
+            if k:
+                return -cosh(a*k)/sinh(b*k)
+            return 0
+        omega = convolve.init_convolution_kernel(n,kernel,d=1)
+        _cache[(n,a,b)] = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def sc_diff(x, a, b, period=None, _cache=_cache):
+    """
+    Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
+
+    If x_j and y_j are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
+      y_0 = 0
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    a,b : float
+        Defines the parameters of the sinh/cosh pseudo-differential
+        operator.
+    period : float, optional
+        The period of the sequence x. Default is 2*pi.
+
+    Notes
+    -----
+    ``sc_diff(cs_diff(x,a,b),b,a) == x``
+    For even ``len(x)``, the Nyquist mode of x is taken as zero.
+
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return sc_diff(tmp.real,a,b,period) + \
+               1j*sc_diff(tmp.imag,a,b,period)
+    if period is not None:
+        a = a*2*pi/period
+        b = b*2*pi/period
+    n = len(x)
+    omega = _cache.get((n,a,b))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k,a=a,b=b):
+            if k:
+                return sinh(a*k)/cosh(b*k)
+            return 0
+        omega = convolve.init_convolution_kernel(n,kernel,d=1)
+        _cache[(n,a,b)] = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def ss_diff(x, a, b, period=None, _cache=_cache):
+    """
+    Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x.
+
+    If x_j and y_j are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
+      y_0 = a/b * x_0
+
+    Parameters
+    ----------
+    x : array_like
+        The array to take the pseudo-derivative from.
+    a,b
+        Defines the parameters of the sinh/sinh pseudo-differential
+        operator.
+    period : float, optional
+        The period of the sequence x. Default is ``2*pi``.
+
+    Notes
+    -----
+    ``ss_diff(ss_diff(x,a,b),b,a) == x``
+
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return ss_diff(tmp.real,a,b,period) + \
+               1j*ss_diff(tmp.imag,a,b,period)
+    if period is not None:
+        a = a*2*pi/period
+        b = b*2*pi/period
+    n = len(x)
+    omega = _cache.get((n,a,b))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k,a=a,b=b):
+            if k:
+                return sinh(a*k)/sinh(b*k)
+            return float(a)/b
+        omega = convolve.init_convolution_kernel(n,kernel)
+        _cache[(n,a,b)] = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def cc_diff(x, a, b, period=None, _cache=_cache):
+    """
+    Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence.
+
+    If x_j and y_j are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+      y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
+
+    Parameters
+    ----------
+    x : array_like
+        The array to take the pseudo-derivative from.
+    a,b : float
+        Defines the parameters of the sinh/sinh pseudo-differential
+        operator.
+    period : float, optional
+        The period of the sequence x. Default is ``2*pi``.
+
+    Returns
+    -------
+    cc_diff : ndarray
+        Pseudo-derivative of periodic sequence `x`.
+
+    Notes
+    -----
+    ``cc_diff(cc_diff(x,a,b),b,a) == x``
+
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return cc_diff(tmp.real,a,b,period) + \
+               1j*cc_diff(tmp.imag,a,b,period)
+    if period is not None:
+        a = a*2*pi/period
+        b = b*2*pi/period
+    n = len(x)
+    omega = _cache.get((n,a,b))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel(k,a=a,b=b):
+            return cosh(a*k)/cosh(b*k)
+        omega = convolve.init_convolution_kernel(n,kernel)
+        _cache[(n,a,b)] = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
+
+
+del _cache
+
+
+_cache = {}
+
+
+def shift(x, a, period=None, _cache=_cache):
+    """
+    Shift periodic sequence x by a: y(u) = x(u+a).
+
+    If x_j and y_j are Fourier coefficients of periodic functions x
+    and y, respectively, then::
+
+          y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f
+
+    Parameters
+    ----------
+    x : array_like
+        The array to take the pseudo-derivative from.
+    a : float
+        Defines the parameters of the sinh/sinh pseudo-differential
+    period : float, optional
+        The period of the sequences x and y. Default period is ``2*pi``.
+    """
+    tmp = asarray(x)
+    if iscomplexobj(tmp):
+        return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period)
+    if period is not None:
+        a = a*2*pi/period
+    n = len(x)
+    omega = _cache.get((n,a))
+    if omega is None:
+        if len(_cache) > 20:
+            while _cache:
+                _cache.popitem()
+
+        def kernel_real(k,a=a):
+            return cos(a*k)
+
+        def kernel_imag(k,a=a):
+            return sin(a*k)
+        omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0,
+                                                      zero_nyquist=0)
+        omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1,
+                                                      zero_nyquist=0)
+        _cache[(n,a)] = omega_real,omega_imag
+    else:
+        omega_real,omega_imag = omega
+    overwrite_x = _datacopied(tmp, x)
+    return convolve.convolve_z(tmp,omega_real,omega_imag,
+                               overwrite_x=overwrite_x)
+
+
+del _cache
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/_realtransforms.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_realtransforms.py
new file mode 100644
index 00000000..f56f68fc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/_realtransforms.py
@@ -0,0 +1,598 @@
+"""
+Real spectrum transforms (DCT, DST, MDCT)
+"""
+
+__all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
+
+from scipy.fft import _pocketfft
+from ._helper import _good_shape
+
+_inverse_typemap = {1: 1, 2: 3, 3: 2, 4: 4}
+
+
+def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+    """
+    Return multidimensional Discrete Cosine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    shape : int or array_like of ints or None, optional
+        The shape of the result. If both `shape` and `axes` (see below) are
+        None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+        not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
+        If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+        length ``shape[i]``.
+        If any element of `shape` is -1, the size of the corresponding
+        dimension of `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes along which the DCT is computed.
+        The default is over all axes.
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    idctn : Inverse multidimensional DCT
+
+    Notes
+    -----
+    For full details of the DCT types and normalization modes, as well as
+    references, see `dct`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import dctn, idctn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
+    True
+
+    """
+    shape = _good_shape(x, shape, axes)
+    return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
+
+
+def idctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+    """
+    Return multidimensional Discrete Cosine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    shape : int or array_like of ints or None, optional
+        The shape of the result.  If both `shape` and `axes` (see below) are
+        None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+        not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
+        If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+        length ``shape[i]``.
+        If any element of `shape` is -1, the size of the corresponding
+        dimension of `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes along which the IDCT is computed.
+        The default is over all axes.
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dctn : multidimensional DCT
+
+    Notes
+    -----
+    For full details of the IDCT types and normalization modes, as well as
+    references, see `idct`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import dctn, idctn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho'))
+    True
+
+    """
+    type = _inverse_typemap[type]
+    shape = _good_shape(x, shape, axes)
+    return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)
+
+
+def dstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+    """
+    Return multidimensional Discrete Sine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    shape : int or array_like of ints or None, optional
+        The shape of the result.  If both `shape` and `axes` (see below) are
+        None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+        not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
+        If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+        length ``shape[i]``.
+        If any element of `shape` is -1, the size of the corresponding
+        dimension of `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes along which the DCT is computed.
+        The default is over all axes.
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    idstn : Inverse multidimensional DST
+
+    Notes
+    -----
+    For full details of the DST types and normalization modes, as well as
+    references, see `dst`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import dstn, idstn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
+    True
+
+    """
+    shape = _good_shape(x, shape, axes)
+    return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
+
+
+def idstn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
+    """
+    Return multidimensional Discrete Sine Transform along the specified axes.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    shape : int or array_like of ints or None, optional
+        The shape of the result.  If both `shape` and `axes` (see below) are
+        None, `shape` is ``x.shape``; if `shape` is None but `axes` is
+        not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
+        If ``shape[i] > x.shape[i]``, the ith dimension is padded with zeros.
+        If ``shape[i] < x.shape[i]``, the ith dimension is truncated to
+        length ``shape[i]``.
+        If any element of `shape` is -1, the size of the corresponding
+        dimension of `x` is used.
+    axes : int or array_like of ints or None, optional
+        Axes along which the IDST is computed.
+        The default is over all axes.
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dstn : multidimensional DST
+
+    Notes
+    -----
+    For full details of the IDST types and normalization modes, as well as
+    references, see `idst`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.fftpack import dstn, idstn
+    >>> rng = np.random.default_rng()
+    >>> y = rng.standard_normal((16, 16))
+    >>> np.allclose(y, idstn(dstn(y, norm='ortho'), norm='ortho'))
+    True
+
+    """
+    type = _inverse_typemap[type]
+    shape = _good_shape(x, shape, axes)
+    return _pocketfft.dstn(x, type, shape, axes, norm, overwrite_x)
+
+
+def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+    r"""
+    Return the Discrete Cosine Transform of arbitrary type sequence x.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform.  If ``n < x.shape[axis]``, `x` is
+        truncated.  If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the dct is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    y : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    idct : Inverse DCT
+
+    Notes
+    -----
+    For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
+    MATLAB ``dct(x)``.
+
+    There are, theoretically, 8 types of the DCT, only the first 4 types are
+    implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the'
+    Inverse DCT generally refers to DCT type 3.
+
+    **Type I**
+
+    There are several definitions of the DCT-I; we use the following
+    (for ``norm=None``)
+
+    .. math::
+
+       y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
+       \frac{\pi k n}{N-1} \right)
+
+    If ``norm='ortho'``, ``x[0]`` and ``x[N-1]`` are multiplied by a scaling
+    factor of :math:`\sqrt{2}`, and ``y[k]`` is multiplied by a scaling factor
+    ``f``
+
+    .. math::
+
+        f = \begin{cases}
+         \frac{1}{2}\sqrt{\frac{1}{N-1}} & \text{if }k=0\text{ or }N-1, \\
+         \frac{1}{2}\sqrt{\frac{2}{N-1}} & \text{otherwise} \end{cases}
+
+    .. versionadded:: 1.2.0
+       Orthonormalization in DCT-I.
+
+    .. note::
+       The DCT-I is only supported for input size > 1.
+
+    **Type II**
+
+    There are several definitions of the DCT-II; we use the following
+    (for ``norm=None``)
+
+    .. math::
+
+       y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
+
+    If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+    .. math::
+       f = \begin{cases}
+       \sqrt{\frac{1}{4N}} & \text{if }k=0, \\
+       \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
+
+    which makes the corresponding matrix of coefficients orthonormal
+    (``O @ O.T = np.eye(N)``).
+
+    **Type III**
+
+    There are several definitions, we use the following (for ``norm=None``)
+
+    .. math::
+
+       y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
+
+    or, for ``norm='ortho'``
+
+    .. math::
+
+       y_k = \frac{x_0}{\sqrt{N}} + \sqrt{\frac{2}{N}} \sum_{n=1}^{N-1} x_n
+       \cos\left(\frac{\pi(2k+1)n}{2N}\right)
+
+    The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
+    to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
+    the orthonormalized DCT-II.
+
+    **Type IV**
+
+    There are several definitions of the DCT-IV; we use the following
+    (for ``norm=None``)
+
+    .. math::
+
+       y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
+
+    If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+    .. math::
+
+        f = \frac{1}{\sqrt{2N}}
+
+    .. versionadded:: 1.2.0
+       Support for DCT-IV.
+
+    References
+    ----------
+    .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
+           Makhoul, `IEEE Transactions on acoustics, speech and signal
+           processing` vol. 28(1), pp. 27-34,
+           :doi:`10.1109/TASSP.1980.1163351` (1980).
+    .. [2] Wikipedia, "Discrete cosine transform",
+           https://en.wikipedia.org/wiki/Discrete_cosine_transform
+
+    Examples
+    --------
+    The Type 1 DCT is equivalent to the FFT (though faster) for real,
+    even-symmetrical inputs. The output is also real and even-symmetrical.
+    Half of the FFT input is used to generate half of the FFT output:
+
+    >>> from scipy.fftpack import fft, dct
+    >>> import numpy as np
+    >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
+    array([ 30.,  -8.,   6.,  -2.,   6.,  -8.])
+    >>> dct(np.array([4., 3., 5., 10.]), 1)
+    array([ 30.,  -8.,   6.,  -2.])
+
+    """
+    return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
+
+
+def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+    """
+    Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DCT (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform.  If ``n < x.shape[axis]``, `x` is
+        truncated.  If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the idct is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    idct : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dct : Forward DCT
+
+    Notes
+    -----
+    For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
+    MATLAB ``idct(x)``.
+
+    'The' IDCT is the IDCT of type 2, which is the same as DCT of type 3.
+
+    IDCT of type 1 is the DCT of type 1, IDCT of type 2 is the DCT of type
+    3, and IDCT of type 3 is the DCT of type 2. IDCT of type 4 is the DCT
+    of type 4. For the definition of these types, see `dct`.
+
+    Examples
+    --------
+    The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
+    inputs. The output is also real and even-symmetrical. Half of the IFFT
+    input is used to generate half of the IFFT output:
+
+    >>> from scipy.fftpack import ifft, idct
+    >>> import numpy as np
+    >>> ifft(np.array([ 30.,  -8.,   6.,  -2.,   6.,  -8.])).real
+    array([  4.,   3.,   5.,  10.,   5.,   3.])
+    >>> idct(np.array([ 30.,  -8.,   6.,  -2.]), 1) / 6
+    array([  4.,   3.,   5.,  10.])
+
+    """
+    type = _inverse_typemap[type]
+    return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)
+
+
+def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+    r"""
+    Return the Discrete Sine Transform of arbitrary type sequence x.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform.  If ``n < x.shape[axis]``, `x` is
+        truncated.  If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the dst is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    dst : ndarray of reals
+        The transformed input array.
+
+    See Also
+    --------
+    idst : Inverse DST
+
+    Notes
+    -----
+    For a single dimension array ``x``.
+
+    There are, theoretically, 8 types of the DST for different combinations of
+    even/odd boundary conditions and boundary off sets [1]_, only the first
+    4 types are implemented in scipy.
+
+    **Type I**
+
+    There are several definitions of the DST-I; we use the following
+    for ``norm=None``. DST-I assumes the input is odd around `n=-1` and `n=N`.
+
+    .. math::
+
+        y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
+
+    Note that the DST-I is only supported for input size > 1.
+    The (unnormalized) DST-I is its own inverse, up to a factor `2(N+1)`.
+    The orthonormalized DST-I is exactly its own inverse.
+
+    **Type II**
+
+    There are several definitions of the DST-II; we use the following for
+    ``norm=None``. DST-II assumes the input is odd around `n=-1/2` and
+    `n=N-1/2`; the output is odd around :math:`k=-1` and even around `k=N-1`
+
+    .. math::
+
+        y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
+
+    if ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor ``f``
+
+    .. math::
+
+        f = \begin{cases}
+        \sqrt{\frac{1}{4N}} & \text{if }k = 0, \\
+        \sqrt{\frac{1}{2N}} & \text{otherwise} \end{cases}
+
+    **Type III**
+
+    There are several definitions of the DST-III, we use the following (for
+    ``norm=None``). DST-III assumes the input is odd around `n=-1` and even
+    around `n=N-1`
+
+    .. math::
+
+        y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
+        \frac{\pi(2k+1)(n+1)}{2N}\right)
+
+    The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
+    to a factor `2N`. The orthonormalized DST-III is exactly the inverse of the
+    orthonormalized DST-II.
+
+    .. versionadded:: 0.11.0
+
+    **Type IV**
+
+    There are several definitions of the DST-IV, we use the following (for
+    ``norm=None``). DST-IV assumes the input is odd around `n=-0.5` and even
+    around `n=N-0.5`
+
+    .. math::
+
+        y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
+
+    The (unnormalized) DST-IV is its own inverse, up to a factor `2N`. The
+    orthonormalized DST-IV is exactly its own inverse.
+
+    .. versionadded:: 1.2.0
+       Support for DST-IV.
+
+    References
+    ----------
+    .. [1] Wikipedia, "Discrete sine transform",
+           https://en.wikipedia.org/wiki/Discrete_sine_transform
+
+    """
+    return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
+
+
+def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
+    """
+    Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    type : {1, 2, 3, 4}, optional
+        Type of the DST (see Notes). Default type is 2.
+    n : int, optional
+        Length of the transform.  If ``n < x.shape[axis]``, `x` is
+        truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
+        default results in ``n = x.shape[axis]``.
+    axis : int, optional
+        Axis along which the idst is computed; the default is over the
+        last axis (i.e., ``axis=-1``).
+    norm : {None, 'ortho'}, optional
+        Normalization mode (see Notes). Default is None.
+    overwrite_x : bool, optional
+        If True, the contents of `x` can be destroyed; the default is False.
+
+    Returns
+    -------
+    idst : ndarray of real
+        The transformed input array.
+
+    See Also
+    --------
+    dst : Forward DST
+
+    Notes
+    -----
+    'The' IDST is the IDST of type 2, which is the same as DST of type 3.
+
+    IDST of type 1 is the DST of type 1, IDST of type 2 is the DST of type
+    3, and IDST of type 3 is the DST of type 2. For the definition of these
+    types, see `dst`.
+
+    .. versionadded:: 0.11.0
+
+    """
+    type = _inverse_typemap[type]
+    return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/basic.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/basic.py
new file mode 100644
index 00000000..40aaa574
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/basic.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.fftpack` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _basic
+
+__all__ = [  # noqa: F822
+    'fft','ifft','fftn','ifftn','rfft','irfft',
+    'fft2','ifft2'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.fftpack.basic is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.fftpack instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
+                  "the `scipy.fftpack.basic` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_basic, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/helper.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/helper.py
new file mode 100644
index 00000000..a9efc9fe
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/helper.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.fftpack` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _helper
+
+__all__ = [  # noqa: F822
+    'fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.fftpack.helper is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.fftpack instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
+                  "the `scipy.fftpack.helper` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_helper, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/pseudo_diffs.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/pseudo_diffs.py
new file mode 100644
index 00000000..21a9e5d2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/pseudo_diffs.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.fftpack` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _pseudo_diffs
+
+__all__ = [  # noqa: F822
+    'diff',
+    'tilbert', 'itilbert', 'hilbert', 'ihilbert',
+    'cs_diff', 'cc_diff', 'sc_diff', 'ss_diff',
+    'shift', 'iscomplexobj', 'convolve'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.fftpack.pseudo_diffs is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.fftpack instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
+                  "the `scipy.fftpack.pseudo_diffs` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_pseudo_diffs, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/realtransforms.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/realtransforms.py
new file mode 100644
index 00000000..e99360b8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/realtransforms.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.fftpack` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _realtransforms
+
+__all__ = [  # noqa: F822
+    'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.fftpack.realtransforms is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.fftpack instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.fftpack` namespace, "
+                  "the `scipy.fftpack.realtransforms` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_realtransforms, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_double_ref.npz b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_double_ref.npz
new file mode 100644
index 00000000..ee6dcb73
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_double_ref.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_longdouble_ref.npz b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_longdouble_ref.npz
new file mode 100644
index 00000000..cc53e6a2
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_longdouble_ref.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_single_ref.npz b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_single_ref.npz
new file mode 100644
index 00000000..8953d330
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/fftw_single_ref.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test.npz b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test.npz
new file mode 100644
index 00000000..f90294b4
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_basic.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_basic.py
new file mode 100644
index 00000000..bf5a5097
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_basic.py
@@ -0,0 +1,877 @@
+# Created by Pearu Peterson, September 2002
+
+from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
+                           assert_array_almost_equal_nulp, assert_array_less)
+import pytest
+from pytest import raises as assert_raises
+from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2
+
+from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
+                   swapaxes, double, cdouble)
+import numpy as np
+import numpy.fft
+from numpy.random import rand
+
+# "large" composite numbers supported by FFTPACK
+LARGE_COMPOSITE_SIZES = [
+    2**13,
+    2**5 * 3**5,
+    2**3 * 3**3 * 5**2,
+]
+SMALL_COMPOSITE_SIZES = [
+    2,
+    2*3*5,
+    2*2*3*3,
+]
+# prime
+LARGE_PRIME_SIZES = [
+    2011
+]
+SMALL_PRIME_SIZES = [
+    29
+]
+
+
+def _assert_close_in_norm(x, y, rtol, size, rdt):
+    # helper function for testing
+    err_msg = "size: %s  rdt: %s" % (size, rdt)
+    assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
+
+
+def random(size):
+    return rand(*size)
+
+
+def get_mat(n):
+    data = arange(n)
+    data = add.outer(data, data)
+    return data
+
+
+def direct_dft(x):
+    x = asarray(x)
+    n = len(x)
+    y = zeros(n, dtype=cdouble)
+    w = -arange(n)*(2j*pi/n)
+    for i in range(n):
+        y[i] = dot(exp(i*w), x)
+    return y
+
+
+def direct_idft(x):
+    x = asarray(x)
+    n = len(x)
+    y = zeros(n, dtype=cdouble)
+    w = arange(n)*(2j*pi/n)
+    for i in range(n):
+        y[i] = dot(exp(i*w), x)/n
+    return y
+
+
+def direct_dftn(x):
+    x = asarray(x)
+    for axis in range(len(x.shape)):
+        x = fft(x, axis=axis)
+    return x
+
+
+def direct_idftn(x):
+    x = asarray(x)
+    for axis in range(len(x.shape)):
+        x = ifft(x, axis=axis)
+    return x
+
+
+def direct_rdft(x):
+    x = asarray(x)
+    n = len(x)
+    w = -arange(n)*(2j*pi/n)
+    r = zeros(n, dtype=double)
+    for i in range(n//2+1):
+        y = dot(exp(i*w), x)
+        if i:
+            r[2*i-1] = y.real
+            if 2*i < n:
+                r[2*i] = y.imag
+        else:
+            r[0] = y.real
+    return r
+
+
+def direct_irdft(x):
+    x = asarray(x)
+    n = len(x)
+    x1 = zeros(n, dtype=cdouble)
+    for i in range(n//2+1):
+        if i:
+            if 2*i < n:
+                x1[i] = x[2*i-1] + 1j*x[2*i]
+                x1[n-i] = x[2*i-1] - 1j*x[2*i]
+            else:
+                x1[i] = x[2*i-1]
+        else:
+            x1[0] = x[0]
+    return direct_idft(x1).real
+
+
+class _TestFFTBase:
+    def setup_method(self):
+        self.cdt = None
+        self.rdt = None
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
+        y = fft(x)
+        assert_equal(y.dtype, self.cdt)
+        y1 = direct_dft(x)
+        assert_array_almost_equal(y,y1)
+        x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
+        assert_array_almost_equal(fft(x),direct_dft(x))
+
+    def test_n_argument_real(self):
+        x1 = np.array([1,2,3,4], dtype=self.rdt)
+        x2 = np.array([1,2,3,4], dtype=self.rdt)
+        y = fft([x1,x2],n=4)
+        assert_equal(y.dtype, self.cdt)
+        assert_equal(y.shape,(2,4))
+        assert_array_almost_equal(y[0],direct_dft(x1))
+        assert_array_almost_equal(y[1],direct_dft(x2))
+
+    def _test_n_argument_complex(self):
+        x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
+        x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
+        y = fft([x1,x2],n=4)
+        assert_equal(y.dtype, self.cdt)
+        assert_equal(y.shape,(2,4))
+        assert_array_almost_equal(y[0],direct_dft(x1))
+        assert_array_almost_equal(y[1],direct_dft(x2))
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, fft, [])
+        assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
+
+
+class TestDoubleFFT(_TestFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+
+
+class TestSingleFFT(_TestFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+
+    @pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved")
+    def test_notice(self):
+        pass
+
+
+class TestFloat16FFT:
+
+    def test_1_argument_real(self):
+        x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+        y = fft(x1, n=4)
+        assert_equal(y.dtype, np.complex64)
+        assert_equal(y.shape, (4, ))
+        assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
+
+    def test_n_argument_real(self):
+        x1 = np.array([1, 2, 3, 4], dtype=np.float16)
+        x2 = np.array([1, 2, 3, 4], dtype=np.float16)
+        y = fft([x1, x2], n=4)
+        assert_equal(y.dtype, np.complex64)
+        assert_equal(y.shape, (2, 4))
+        assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
+        assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
+
+
+class _TestIFFTBase:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
+        y = ifft(x)
+        y1 = direct_idft(x)
+        assert_equal(y.dtype, self.cdt)
+        assert_array_almost_equal(y,y1)
+
+        x = np.array([1,2,3,4+0j,5], self.cdt)
+        assert_array_almost_equal(ifft(x),direct_idft(x))
+
+    def test_definition_real(self):
+        x = np.array([1,2,3,4,1,2,3,4], self.rdt)
+        y = ifft(x)
+        assert_equal(y.dtype, self.cdt)
+        y1 = direct_idft(x)
+        assert_array_almost_equal(y,y1)
+
+        x = np.array([1,2,3,4,5], dtype=self.rdt)
+        assert_equal(y.dtype, self.cdt)
+        assert_array_almost_equal(ifft(x),direct_idft(x))
+
+    def test_random_complex(self):
+        for size in [1,51,111,100,200,64,128,256,1024]:
+            x = random([size]).astype(self.cdt)
+            x = random([size]).astype(self.cdt) + 1j*x
+            y1 = ifft(fft(x))
+            y2 = fft(ifft(x))
+            assert_equal(y1.dtype, self.cdt)
+            assert_equal(y2.dtype, self.cdt)
+            assert_array_almost_equal(y1, x)
+            assert_array_almost_equal(y2, x)
+
+    def test_random_real(self):
+        for size in [1,51,111,100,200,64,128,256,1024]:
+            x = random([size]).astype(self.rdt)
+            y1 = ifft(fft(x))
+            y2 = fft(ifft(x))
+            assert_equal(y1.dtype, self.cdt)
+            assert_equal(y2.dtype, self.cdt)
+            assert_array_almost_equal(y1, x)
+            assert_array_almost_equal(y2, x)
+
+    def test_size_accuracy(self):
+        # Sanity check for the accuracy for prime and non-prime sized inputs
+        if self.rdt == np.float32:
+            rtol = 1e-5
+        elif self.rdt == np.float64:
+            rtol = 1e-10
+
+        for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+            np.random.seed(1234)
+            x = np.random.rand(size).astype(self.rdt)
+            y = ifft(fft(x))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+            y = fft(ifft(x))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+            x = (x + 1j*np.random.rand(size)).astype(self.cdt)
+            y = ifft(fft(x))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+            y = fft(ifft(x))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, ifft, [])
+        assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
+
+
+class TestDoubleIFFT(_TestIFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+
+
+class TestSingleIFFT(_TestIFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+
+
+class _TestRFFTBase:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
+            x = np.array(t, dtype=self.rdt)
+            y = rfft(x)
+            y1 = direct_rdft(x)
+            assert_array_almost_equal(y,y1)
+            assert_equal(y.dtype, self.rdt)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, rfft, [])
+        assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
+
+    # See gh-5790
+    class MockSeries:
+        def __init__(self, data):
+            self.data = np.asarray(data)
+
+        def __getattr__(self, item):
+            try:
+                return getattr(self.data, item)
+            except AttributeError as e:
+                raise AttributeError(("'MockSeries' object "
+                                      "has no attribute '{attr}'".
+                                      format(attr=item))) from e
+
+    def test_non_ndarray_with_dtype(self):
+        x = np.array([1., 2., 3., 4., 5.])
+        xs = _TestRFFTBase.MockSeries(x)
+
+        expected = [1, 2, 3, 4, 5]
+        rfft(xs)
+
+        # Data should not have been overwritten
+        assert_equal(x, expected)
+        assert_equal(xs.data, expected)
+
+    def test_complex_input(self):
+        assert_raises(TypeError, rfft, np.arange(4, dtype=np.complex64))
+
+
+class TestRFFTDouble(_TestRFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+
+
+class TestRFFTSingle(_TestRFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+
+
+class _TestIRFFTBase:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x1 = [1,2,3,4,1,2,3,4]
+        x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
+        x2 = [1,2,3,4,1,2,3,4,5]
+        x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
+
+        def _test(x, xr):
+            y = irfft(np.array(x, dtype=self.rdt))
+            y1 = direct_irdft(x)
+            assert_equal(y.dtype, self.rdt)
+            assert_array_almost_equal(y,y1, decimal=self.ndec)
+            assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
+
+        _test(x1, x1_1)
+        _test(x2, x2_1)
+
+    def test_random_real(self):
+        for size in [1,51,111,100,200,64,128,256,1024]:
+            x = random([size]).astype(self.rdt)
+            y1 = irfft(rfft(x))
+            y2 = rfft(irfft(x))
+            assert_equal(y1.dtype, self.rdt)
+            assert_equal(y2.dtype, self.rdt)
+            assert_array_almost_equal(y1, x, decimal=self.ndec,
+                                       err_msg="size=%d" % size)
+            assert_array_almost_equal(y2, x, decimal=self.ndec,
+                                       err_msg="size=%d" % size)
+
+    def test_size_accuracy(self):
+        # Sanity check for the accuracy for prime and non-prime sized inputs
+        if self.rdt == np.float32:
+            rtol = 1e-5
+        elif self.rdt == np.float64:
+            rtol = 1e-10
+
+        for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
+            np.random.seed(1234)
+            x = np.random.rand(size).astype(self.rdt)
+            y = irfft(rfft(x))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+            y = rfft(irfft(x))
+            _assert_close_in_norm(x, y, rtol, size, self.rdt)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, irfft, [])
+        assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
+
+    def test_complex_input(self):
+        assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64))
+
+
+# self.ndec is bogus; we should have a assert_array_approx_equal for number of
+# significant digits
+
+class TestIRFFTDouble(_TestIRFFTBase):
+    def setup_method(self):
+        self.cdt = np.cdouble
+        self.rdt = np.double
+        self.ndec = 14
+
+
+class TestIRFFTSingle(_TestIRFFTBase):
+    def setup_method(self):
+        self.cdt = np.complex64
+        self.rdt = np.float32
+        self.ndec = 5
+
+
+class Testfft2:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_regression_244(self):
+        """FFT returns wrong result with axes parameter."""
+        # fftn (and hence fft2) used to break when both axes and shape were
+        # used
+        x = numpy.ones((4, 4, 2))
+        y = fft2(x, shape=(8, 8), axes=(-3, -2))
+        y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
+        assert_array_almost_equal(y, y_r)
+
+    def test_invalid_sizes(self):
+        assert_raises(ValueError, fft2, [[]])
+        assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
+
+
+class TestFftnSingle:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = [[1, 2, 3],
+             [4, 5, 6],
+             [7, 8, 9]]
+        y = fftn(np.array(x, np.float32))
+        assert_(y.dtype == np.complex64,
+                msg="double precision output with single precision")
+
+        y_r = np.array(fftn(x), np.complex64)
+        assert_array_almost_equal_nulp(y, y_r)
+
+    @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+    def test_size_accuracy_small(self, size):
+        x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+        y1 = fftn(x.real.astype(np.float32))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 2000)
+
+    @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+    def test_size_accuracy_large(self, size):
+        x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+        y1 = fftn(x.real.astype(np.float32))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 2000)
+
+    def test_definition_float16(self):
+        x = [[1, 2, 3],
+             [4, 5, 6],
+             [7, 8, 9]]
+        y = fftn(np.array(x, np.float16))
+        assert_equal(y.dtype, np.complex64)
+        y_r = np.array(fftn(x), np.complex64)
+        assert_array_almost_equal_nulp(y, y_r)
+
+    @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
+    def test_float16_input_small(self, size):
+        x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
+        y1 = fftn(x.real.astype(np.float16))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 5e5)
+
+    @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
+    def test_float16_input_large(self, size):
+        x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
+        y1 = fftn(x.real.astype(np.float16))
+        y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
+
+        assert_equal(y1.dtype, np.complex64)
+        assert_array_almost_equal_nulp(y1, y2, 2e6)
+
+
+class TestFftn:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_definition(self):
+        x = [[1, 2, 3],
+             [4, 5, 6],
+             [7, 8, 9]]
+        y = fftn(x)
+        assert_array_almost_equal(y, direct_dftn(x))
+
+        x = random((20, 26))
+        assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+        x = random((5, 4, 3, 20))
+        assert_array_almost_equal(fftn(x), direct_dftn(x))
+
+    def test_axes_argument(self):
+        # plane == ji_plane, x== kji_space
+        plane1 = [[1, 2, 3],
+                  [4, 5, 6],
+                  [7, 8, 9]]
+        plane2 = [[10, 11, 12],
+                  [13, 14, 15],
+                  [16, 17, 18]]
+        plane3 = [[19, 20, 21],
+                  [22, 23, 24],
+                  [25, 26, 27]]
+        ki_plane1 = [[1, 2, 3],
+                     [10, 11, 12],
+                     [19, 20, 21]]
+        ki_plane2 = [[4, 5, 6],
+                     [13, 14, 15],
+                     [22, 23, 24]]
+        ki_plane3 = [[7, 8, 9],
+                     [16, 17, 18],
+                     [25, 26, 27]]
+        jk_plane1 = [[1, 10, 19],
+                     [4, 13, 22],
+                     [7, 16, 25]]
+        jk_plane2 = [[2, 11, 20],
+                     [5, 14, 23],
+                     [8, 17, 26]]
+        jk_plane3 = [[3, 12, 21],
+                     [6, 15, 24],
+                     [9, 18, 27]]
+        kj_plane1 = [[1, 4, 7],
+                     [10, 13, 16], [19, 22, 25]]
+        kj_plane2 = [[2, 5, 8],
+                     [11, 14, 17], [20, 23, 26]]
+        kj_plane3 = [[3, 6, 9],
+                     [12, 15, 18], [21, 24, 27]]
+        ij_plane1 = [[1, 4, 7],
+                     [2, 5, 8],
+                     [3, 6, 9]]
+        ij_plane2 = [[10, 13, 16],
+                     [11, 14, 17],
+                     [12, 15, 18]]
+        ij_plane3 = [[19, 22, 25],
+                     [20, 23, 26],
+                     [21, 24, 27]]
+        ik_plane1 = [[1, 10, 19],
+                     [2, 11, 20],
+                     [3, 12, 21]]
+        ik_plane2 = [[4, 13, 22],
+                     [5, 14, 23],
+                     [6, 15, 24]]
+        ik_plane3 = [[7, 16, 25],
+                     [8, 17, 26],
+                     [9, 18, 27]]
+        ijk_space = [jk_plane1, jk_plane2, jk_plane3]
+        ikj_space = [kj_plane1, kj_plane2, kj_plane3]
+        jik_space = [ik_plane1, ik_plane2, ik_plane3]
+        jki_space = [ki_plane1, ki_plane2, ki_plane3]
+        kij_space = [ij_plane1, ij_plane2, ij_plane3]
+        x = array([plane1, plane2, plane3])
+
+        assert_array_almost_equal(fftn(x),
+                                  fftn(x, axes=(-3, -2, -1)))  # kji_space
+        assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
+        assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
+        y = fftn(x, axes=(2, 1, 0))  # ijk_space
+        assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
+        y = fftn(x, axes=(2, 0, 1))  # ikj_space
+        assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
+                                  fftn(ikj_space))
+        y = fftn(x, axes=(1, 2, 0))  # jik_space
+        assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
+                                  fftn(jik_space))
+        y = fftn(x, axes=(1, 0, 2))  # jki_space
+        assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
+        y = fftn(x, axes=(0, 2, 1))  # kij_space
+        assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
+
+        y = fftn(x, axes=(-2, -1))  # ji_plane
+        assert_array_almost_equal(fftn(plane1), y[0])
+        assert_array_almost_equal(fftn(plane2), y[1])
+        assert_array_almost_equal(fftn(plane3), y[2])
+
+        y = fftn(x, axes=(1, 2))  # ji_plane
+        assert_array_almost_equal(fftn(plane1), y[0])
+        assert_array_almost_equal(fftn(plane2), y[1])
+        assert_array_almost_equal(fftn(plane3), y[2])
+
+        y = fftn(x, axes=(-3, -2))  # kj_plane
+        assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
+        assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
+        assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
+
+        y = fftn(x, axes=(-3, -1))  # ki_plane
+        assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
+        assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
+        assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
+
+        y = fftn(x, axes=(-1, -2))  # ij_plane
+        assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
+        assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
+        assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
+
+        y = fftn(x, axes=(-1, -3))  # ik_plane
+        assert_array_almost_equal(fftn(ik_plane1),
+                                  swapaxes(y[:, 0, :], -1, -2))
+        assert_array_almost_equal(fftn(ik_plane2),
+                                  swapaxes(y[:, 1, :], -1, -2))
+        assert_array_almost_equal(fftn(ik_plane3),
+                                  swapaxes(y[:, 2, :], -1, -2))
+
+        y = fftn(x, axes=(-2, -3))  # jk_plane
+        assert_array_almost_equal(fftn(jk_plane1),
+                                  swapaxes(y[:, :, 0], -1, -2))
+        assert_array_almost_equal(fftn(jk_plane2),
+                                  swapaxes(y[:, :, 1], -1, -2))
+        assert_array_almost_equal(fftn(jk_plane3),
+                                  swapaxes(y[:, :, 2], -1, -2))
+
+        y = fftn(x, axes=(-1,))  # i_line
+        for i in range(3):
+            for j in range(3):
+                assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
+        y = fftn(x, axes=(-2,))  # j_line
+        for i in range(3):
+            for j in range(3):
+                assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
+        y = fftn(x, axes=(0,))  # k_line
+        for i in range(3):
+            for j in range(3):
+                assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
+
+        y = fftn(x, axes=())  # point
+        assert_array_almost_equal(y, x)
+
+    def test_shape_argument(self):
+        small_x = [[1, 2, 3],
+                   [4, 5, 6]]
+        large_x1 = [[1, 2, 3, 0],
+                    [4, 5, 6, 0],
+                    [0, 0, 0, 0],
+                    [0, 0, 0, 0]]
+
+        y = fftn(small_x, shape=(4, 4))
+        assert_array_almost_equal(y, fftn(large_x1))
+
+        y = fftn(small_x, shape=(3, 4))
+        assert_array_almost_equal(y, fftn(large_x1[:-1]))
+
+    def test_shape_axes_argument(self):
+        small_x = [[1, 2, 3],
+                   [4, 5, 6],
+                   [7, 8, 9]]
+        large_x1 = array([[1, 2, 3, 0],
+                          [4, 5, 6, 0],
+                          [7, 8, 9, 0],
+                          [0, 0, 0, 0]])
+        y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
+        assert_array_almost_equal(y, fftn(large_x1))
+        y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
+
+        assert_array_almost_equal(y, swapaxes(
+            fftn(swapaxes(large_x1, -1, -2)), -1, -2))
+
+    def test_shape_axes_argument2(self):
+        # Change shape of the last axis
+        x = numpy.random.random((10, 5, 3, 7))
+        y = fftn(x, axes=(-1,), shape=(8,))
+        assert_array_almost_equal(y, fft(x, axis=-1, n=8))
+
+        # Change shape of an arbitrary axis which is not the last one
+        x = numpy.random.random((10, 5, 3, 7))
+        y = fftn(x, axes=(-2,), shape=(8,))
+        assert_array_almost_equal(y, fft(x, axis=-2, n=8))
+
+        # Change shape of axes: cf #244, where shape and axes were mixed up
+        x = numpy.random.random((4, 4, 2))
+        y = fftn(x, axes=(-3, -2), shape=(8, 8))
+        assert_array_almost_equal(y,
+                                  numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
+
+    def test_shape_argument_more(self):
+        x = zeros((4, 4, 2))
+        with assert_raises(ValueError,
+                           match="when given, axes and shape arguments"
+                           " have to be of the same length"):
+            fftn(x, shape=(8, 8, 2, 1))
+
+    def test_invalid_sizes(self):
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[1, 0\]\) specified"):
+            fftn([[]])
+
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[4, -3\]\) specified"):
+            fftn([[1, 1], [2, 2]], (4, -3))
+
+
+class TestIfftn:
+    dtype = None
+    cdtype = None
+
+    def setup_method(self):
+        np.random.seed(1234)
+
+    @pytest.mark.parametrize('dtype,cdtype,maxnlp',
+                             [(np.float64, np.complex128, 2000),
+                              (np.float32, np.complex64, 3500)])
+    def test_definition(self, dtype, cdtype, maxnlp):
+        x = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]], dtype=dtype)
+        y = ifftn(x)
+        assert_equal(y.dtype, cdtype)
+        assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
+
+        x = random((20, 26))
+        assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+        x = random((5, 4, 3, 20))
+        assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
+
+    @pytest.mark.parametrize('maxnlp', [2000, 3500])
+    @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
+    def test_random_complex(self, maxnlp, size):
+        x = random([size, size]) + 1j*random([size, size])
+        assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
+        assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
+
+    def test_invalid_sizes(self):
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[1, 0\]\) specified"):
+            ifftn([[]])
+
+        with assert_raises(ValueError,
+                           match="invalid number of data points"
+                           r" \(\[4, -3\]\) specified"):
+            ifftn([[1, 1], [2, 2]], (4, -3))
+
+
+class FakeArray:
+    def __init__(self, data):
+        self._data = data
+        self.__array_interface__ = data.__array_interface__
+
+
+class FakeArray2:
+    def __init__(self, data):
+        self._data = data
+
+    def __array__(self):
+        return self._data
+
+
+class TestOverwrite:
+    """Check input overwrite behavior of the FFT functions."""
+
+    real_dtypes = (np.float32, np.float64)
+    dtypes = real_dtypes + (np.complex64, np.complex128)
+    fftsizes = [8, 16, 32]
+
+    def _check(self, x, routine, fftsize, axis, overwrite_x):
+        x2 = x.copy()
+        for fake in [lambda x: x, FakeArray, FakeArray2]:
+            routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
+
+            sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+                routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
+            if not overwrite_x:
+                assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+    def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
+                  fftsize, overwrite_x):
+        np.random.seed(1234)
+        if np.issubdtype(dtype, np.complexfloating):
+            data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+        else:
+            data = np.random.randn(*shape)
+        data = data.astype(dtype)
+
+        self._check(data, routine, fftsize, axis,
+                    overwrite_x=overwrite_x)
+
+    @pytest.mark.parametrize('dtype', dtypes)
+    @pytest.mark.parametrize('fftsize', fftsizes)
+    @pytest.mark.parametrize('overwrite_x', [True, False])
+    @pytest.mark.parametrize('shape,axes', [((16,), -1),
+                                            ((16, 2), 0),
+                                            ((2, 16), 1)])
+    def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
+        overwritable = (np.complex128, np.complex64)
+        self._check_1d(fft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+        self._check_1d(ifft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+
+    @pytest.mark.parametrize('dtype', real_dtypes)
+    @pytest.mark.parametrize('fftsize', fftsizes)
+    @pytest.mark.parametrize('overwrite_x', [True, False])
+    @pytest.mark.parametrize('shape,axes', [((16,), -1),
+                                            ((16, 2), 0),
+                                            ((2, 16), 1)])
+    def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
+        overwritable = self.real_dtypes
+        self._check_1d(irfft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+        self._check_1d(rfft, dtype, shape, axes, overwritable,
+                       fftsize, overwrite_x)
+
+    def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
+                      overwrite_x):
+        np.random.seed(1234)
+        if np.issubdtype(dtype, np.complexfloating):
+            data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+        else:
+            data = np.random.randn(*shape)
+        data = data.astype(dtype)
+
+        def fftshape_iter(shp):
+            if len(shp) <= 0:
+                yield ()
+            else:
+                for j in (shp[0]//2, shp[0], shp[0]*2):
+                    for rest in fftshape_iter(shp[1:]):
+                        yield (j,) + rest
+
+        if axes is None:
+            part_shape = shape
+        else:
+            part_shape = tuple(np.take(shape, axes))
+
+        for fftshape in fftshape_iter(part_shape):
+            self._check(data, routine, fftshape, axes,
+                        overwrite_x=overwrite_x)
+            if data.ndim > 1:
+                self._check(data.T, routine, fftshape, axes,
+                            overwrite_x=overwrite_x)
+
+    @pytest.mark.parametrize('dtype', dtypes)
+    @pytest.mark.parametrize('overwrite_x', [True, False])
+    @pytest.mark.parametrize('shape,axes', [((16,), None),
+                                            ((16,), (0,)),
+                                            ((16, 2), (0,)),
+                                            ((2, 16), (1,)),
+                                            ((8, 16), None),
+                                            ((8, 16), (0, 1)),
+                                            ((8, 16, 2), (0, 1)),
+                                            ((8, 16, 2), (1, 2)),
+                                            ((8, 16, 2), (0,)),
+                                            ((8, 16, 2), (1,)),
+                                            ((8, 16, 2), (2,)),
+                                            ((8, 16, 2), None),
+                                            ((8, 16, 2), (0, 1, 2))])
+    def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
+        overwritable = (np.complex128, np.complex64)
+        self._check_nd_one(fftn, dtype, shape, axes, overwritable,
+                           overwrite_x)
+        self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
+                           overwrite_x)
+
+
+@pytest.mark.parametrize('func', [fftn, ifftn, fft2])
+def test_shape_axes_ndarray(func):
+    # Test fftn and ifftn work with NumPy arrays for shape and axes arguments
+    # Regression test for gh-13342
+    a = np.random.rand(10, 10)
+
+    expect = func(a, shape=(5, 5))
+    actual = func(a, shape=np.array([5, 5]))
+    assert_equal(expect, actual)
+
+    expect = func(a, axes=(-1,))
+    actual = func(a, axes=np.array([-1,]))
+    assert_equal(expect, actual)
+
+    expect = func(a, shape=(4, 7), axes=(1, 0))
+    actual = func(a, shape=np.array([4, 7]), axes=np.array([1, 0]))
+    assert_equal(expect, actual)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_helper.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_helper.py
new file mode 100644
index 00000000..5e7be04f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_helper.py
@@ -0,0 +1,54 @@
+# Created by Pearu Peterson, September 2002
+
+__usage__ = """
+Build fftpack:
+  python setup_fftpack.py build
+Run tests if scipy is installed:
+  python -c 'import scipy;scipy.fftpack.test()'
+Run tests if fftpack is not installed:
+  python tests/test_helper.py []
+"""
+
+from numpy.testing import assert_array_almost_equal
+from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq
+
+from numpy import pi, random
+
+class TestFFTShift:
+
+    def test_definition(self):
+        x = [0,1,2,3,4,-4,-3,-2,-1]
+        y = [-4,-3,-2,-1,0,1,2,3,4]
+        assert_array_almost_equal(fftshift(x),y)
+        assert_array_almost_equal(ifftshift(y),x)
+        x = [0,1,2,3,4,-5,-4,-3,-2,-1]
+        y = [-5,-4,-3,-2,-1,0,1,2,3,4]
+        assert_array_almost_equal(fftshift(x),y)
+        assert_array_almost_equal(ifftshift(y),x)
+
+    def test_inverse(self):
+        for n in [1,4,9,100,211]:
+            x = random.random((n,))
+            assert_array_almost_equal(ifftshift(fftshift(x)),x)
+
+
+class TestFFTFreq:
+
+    def test_definition(self):
+        x = [0,1,2,3,4,-4,-3,-2,-1]
+        assert_array_almost_equal(9*fftfreq(9),x)
+        assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
+        x = [0,1,2,3,4,-5,-4,-3,-2,-1]
+        assert_array_almost_equal(10*fftfreq(10),x)
+        assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
+
+
+class TestRFFTFreq:
+
+    def test_definition(self):
+        x = [0,1,1,2,2,3,3,4,4]
+        assert_array_almost_equal(9*rfftfreq(9),x)
+        assert_array_almost_equal(9*pi*rfftfreq(9,pi),x)
+        x = [0,1,1,2,2,3,3,4,4,5]
+        assert_array_almost_equal(10*rfftfreq(10),x)
+        assert_array_almost_equal(10*pi*rfftfreq(10,pi),x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_import.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_import.py
new file mode 100644
index 00000000..2c46ed2d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_import.py
@@ -0,0 +1,31 @@
+"""Test possibility of patching fftpack with pyfftw.
+
+No module source outside of scipy.fftpack should contain an import of
+the form `from scipy.fftpack import ...`, so that a simple replacement
+of scipy.fftpack by the corresponding fftw interface completely swaps
+the two FFT implementations.
+
+Because this simply inspects source files, we only need to run the test
+on one version of Python.
+"""
+
+
+from pathlib import Path
+import re
+import tokenize
+from numpy.testing import assert_
+import scipy
+
+class TestFFTPackImport:
+    def test_fftpack_import(self):
+        base = Path(scipy.__file__).parent
+        regexp = r"\s*from.+\.fftpack import .*\n"
+        for path in base.rglob("*.py"):
+            if base / "fftpack" in path.parents:
+                continue
+            # use tokenize to auto-detect encoding on systems where no
+            # default encoding is defined (e.g., LANG='C')
+            with tokenize.open(str(path)) as file:
+                assert_(all(not re.fullmatch(regexp, line)
+                            for line in file),
+                        "{0} contains an import from fftpack".format(path))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_pseudo_diffs.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_pseudo_diffs.py
new file mode 100644
index 00000000..cec131ca
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_pseudo_diffs.py
@@ -0,0 +1,380 @@
+# Created by Pearu Peterson, September 2002
+
+__usage__ = """
+Build fftpack:
+  python setup_fftpack.py build
+Run tests if scipy is installed:
+  python -c 'import scipy;scipy.fftpack.test()'
+Run tests if fftpack is not installed:
+  python tests/test_pseudo_diffs.py []
+"""
+
+from numpy.testing import (assert_equal, assert_almost_equal,
+                           assert_array_almost_equal)
+from scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,
+                           ihilbert, shift, fftfreq, cs_diff, sc_diff,
+                           ss_diff, cc_diff)
+
+import numpy as np
+from numpy import arange, sin, cos, pi, exp, tanh, sum, sign
+from numpy.random import random
+
+
+def direct_diff(x,k=1,period=None):
+    fx = fft(x)
+    n = len(fx)
+    if period is None:
+        period = 2*pi
+    w = fftfreq(n)*2j*pi/period*n
+    if k < 0:
+        w = 1 / w**k
+        w[0] = 0.0
+    else:
+        w = w**k
+    if n > 2000:
+        w[250:n-250] = 0.0
+    return ifft(w*fx).real
+
+
+def direct_tilbert(x,h=1,period=None):
+    fx = fft(x)
+    n = len(fx)
+    if period is None:
+        period = 2*pi
+    w = fftfreq(n)*h*2*pi/period*n
+    w[0] = 1
+    w = 1j/tanh(w)
+    w[0] = 0j
+    return ifft(w*fx)
+
+
+def direct_itilbert(x,h=1,period=None):
+    fx = fft(x)
+    n = len(fx)
+    if period is None:
+        period = 2*pi
+    w = fftfreq(n)*h*2*pi/period*n
+    w = -1j*tanh(w)
+    return ifft(w*fx)
+
+
+def direct_hilbert(x):
+    fx = fft(x)
+    n = len(fx)
+    w = fftfreq(n)*n
+    w = 1j*sign(w)
+    return ifft(w*fx)
+
+
+def direct_ihilbert(x):
+    return -direct_hilbert(x)
+
+
+def direct_shift(x,a,period=None):
+    n = len(x)
+    if period is None:
+        k = fftfreq(n)*1j*n
+    else:
+        k = fftfreq(n)*2j*pi/period*n
+    return ifft(fft(x)*exp(k*a)).real
+
+
+class TestDiff:
+
+    def test_definition(self):
+        for n in [16,17,64,127,32]:
+            x = arange(n)*2*pi/n
+            assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))
+            assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))
+            assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))
+            assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))
+            assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))
+            assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))
+            assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))
+            assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))
+            assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))
+            assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))
+            assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))
+            assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))
+            assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))
+            assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))
+            for k in range(5):
+                assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))
+                assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))
+
+    def test_period(self):
+        for n in [17,64]:
+            x = arange(n)/float(n)
+            assert_array_almost_equal(diff(sin(2*pi*x),period=1),
+                                      2*pi*cos(2*pi*x))
+            assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),
+                                      -(2*pi)**3*cos(2*pi*x))
+
+    def test_sin(self):
+        for n in [32,64,77]:
+            x = arange(n)*2*pi/n
+            assert_array_almost_equal(diff(sin(x)),cos(x))
+            assert_array_almost_equal(diff(cos(x)),-sin(x))
+            assert_array_almost_equal(diff(sin(x),2),-sin(x))
+            assert_array_almost_equal(diff(sin(x),4),sin(x))
+            assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))
+            assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))
+
+    def test_expr(self):
+        for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:
+            x = arange(n)*2*pi/n
+            f = sin(x)*cos(4*x)+exp(sin(3*x))
+            df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
+            ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
+                 - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
+            d1 = diff(f)
+            assert_array_almost_equal(d1,df)
+            assert_array_almost_equal(diff(df),ddf)
+            assert_array_almost_equal(diff(f,2),ddf)
+            assert_array_almost_equal(diff(ddf,-1),df)
+
+    def test_expr_large(self):
+        for n in [2048,4096]:
+            x = arange(n)*2*pi/n
+            f = sin(x)*cos(4*x)+exp(sin(3*x))
+            df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))
+            ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\
+                 - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))
+            assert_array_almost_equal(diff(f),df)
+            assert_array_almost_equal(diff(df),ddf)
+            assert_array_almost_equal(diff(ddf,-1),df)
+            assert_array_almost_equal(diff(f,2),ddf)
+
+    def test_int(self):
+        n = 64
+        x = arange(n)*2*pi/n
+        assert_array_almost_equal(diff(sin(x),-1),-cos(x))
+        assert_array_almost_equal(diff(sin(x),-2),-sin(x))
+        assert_array_almost_equal(diff(sin(x),-4),sin(x))
+        assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))
+
+    def test_random_even(self):
+        for k in [0,2,4,6]:
+            for n in [60,32,64,56,55]:
+                f = random((n,))
+                af = sum(f,axis=0)/n
+                f = f-af
+                # zeroing Nyquist mode:
+                f = diff(diff(f,1),-1)
+                assert_almost_equal(sum(f,axis=0),0.0)
+                assert_array_almost_equal(diff(diff(f,k),-k),f)
+                assert_array_almost_equal(diff(diff(f,-k),k),f)
+
+    def test_random_odd(self):
+        for k in [0,1,2,3,4,5,6]:
+            for n in [33,65,55]:
+                f = random((n,))
+                af = sum(f,axis=0)/n
+                f = f-af
+                assert_almost_equal(sum(f,axis=0),0.0)
+                assert_array_almost_equal(diff(diff(f,k),-k),f)
+                assert_array_almost_equal(diff(diff(f,-k),k),f)
+
+    def test_zero_nyquist(self):
+        for k in [0,1,2,3,4,5,6]:
+            for n in [32,33,64,56,55]:
+                f = random((n,))
+                af = sum(f,axis=0)/n
+                f = f-af
+                # zeroing Nyquist mode:
+                f = diff(diff(f,1),-1)
+                assert_almost_equal(sum(f,axis=0),0.0)
+                assert_array_almost_equal(diff(diff(f,k),-k),f)
+                assert_array_almost_equal(diff(diff(f,-k),k),f)
+
+
+class TestTilbert:
+
+    def test_definition(self):
+        for h in [0.1,0.5,1,5.5,10]:
+            for n in [16,17,64,127]:
+                x = arange(n)*2*pi/n
+                y = tilbert(sin(x),h)
+                y1 = direct_tilbert(sin(x),h)
+                assert_array_almost_equal(y,y1)
+                assert_array_almost_equal(tilbert(sin(x),h),
+                                          direct_tilbert(sin(x),h))
+                assert_array_almost_equal(tilbert(sin(2*x),h),
+                                          direct_tilbert(sin(2*x),h))
+
+    def test_random_even(self):
+        for h in [0.1,0.5,1,5.5,10]:
+            for n in [32,64,56]:
+                f = random((n,))
+                af = sum(f,axis=0)/n
+                f = f-af
+                assert_almost_equal(sum(f,axis=0),0.0)
+                assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)
+
+    def test_random_odd(self):
+        for h in [0.1,0.5,1,5.5,10]:
+            for n in [33,65,55]:
+                f = random((n,))
+                af = sum(f,axis=0)/n
+                f = f-af
+                assert_almost_equal(sum(f,axis=0),0.0)
+                assert_array_almost_equal(itilbert(tilbert(f,h),h),f)
+                assert_array_almost_equal(tilbert(itilbert(f,h),h),f)
+
+
+class TestITilbert:
+
+    def test_definition(self):
+        for h in [0.1,0.5,1,5.5,10]:
+            for n in [16,17,64,127]:
+                x = arange(n)*2*pi/n
+                y = itilbert(sin(x),h)
+                y1 = direct_itilbert(sin(x),h)
+                assert_array_almost_equal(y,y1)
+                assert_array_almost_equal(itilbert(sin(x),h),
+                                          direct_itilbert(sin(x),h))
+                assert_array_almost_equal(itilbert(sin(2*x),h),
+                                          direct_itilbert(sin(2*x),h))
+
+
+class TestHilbert:
+
+    def test_definition(self):
+        for n in [16,17,64,127]:
+            x = arange(n)*2*pi/n
+            y = hilbert(sin(x))
+            y1 = direct_hilbert(sin(x))
+            assert_array_almost_equal(y,y1)
+            assert_array_almost_equal(hilbert(sin(2*x)),
+                                      direct_hilbert(sin(2*x)))
+
+    def test_tilbert_relation(self):
+        for n in [16,17,64,127]:
+            x = arange(n)*2*pi/n
+            f = sin(x)+cos(2*x)*sin(x)
+            y = hilbert(f)
+            y1 = direct_hilbert(f)
+            assert_array_almost_equal(y,y1)
+            y2 = tilbert(f,h=10)
+            assert_array_almost_equal(y,y2)
+
+    def test_random_odd(self):
+        for n in [33,65,55]:
+            f = random((n,))
+            af = sum(f,axis=0)/n
+            f = f-af
+            assert_almost_equal(sum(f,axis=0),0.0)
+            assert_array_almost_equal(ihilbert(hilbert(f)),f)
+            assert_array_almost_equal(hilbert(ihilbert(f)),f)
+
+    def test_random_even(self):
+        for n in [32,64,56]:
+            f = random((n,))
+            af = sum(f,axis=0)/n
+            f = f-af
+            # zeroing Nyquist mode:
+            f = diff(diff(f,1),-1)
+            assert_almost_equal(sum(f,axis=0),0.0)
+            assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)
+            assert_array_almost_equal(hilbert(ihilbert(f)),f)
+
+
+class TestIHilbert:
+
+    def test_definition(self):
+        for n in [16,17,64,127]:
+            x = arange(n)*2*pi/n
+            y = ihilbert(sin(x))
+            y1 = direct_ihilbert(sin(x))
+            assert_array_almost_equal(y,y1)
+            assert_array_almost_equal(ihilbert(sin(2*x)),
+                                      direct_ihilbert(sin(2*x)))
+
+    def test_itilbert_relation(self):
+        for n in [16,17,64,127]:
+            x = arange(n)*2*pi/n
+            f = sin(x)+cos(2*x)*sin(x)
+            y = ihilbert(f)
+            y1 = direct_ihilbert(f)
+            assert_array_almost_equal(y,y1)
+            y2 = itilbert(f,h=10)
+            assert_array_almost_equal(y,y2)
+
+
+class TestShift:
+
+    def test_definition(self):
+        for n in [18,17,64,127,32,2048,256]:
+            x = arange(n)*2*pi/n
+            for a in [0.1,3]:
+                assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))
+                assert_array_almost_equal(shift(sin(x),a),sin(x+a))
+                assert_array_almost_equal(shift(cos(x),a),cos(x+a))
+                assert_array_almost_equal(shift(cos(2*x)+sin(x),a),
+                                          cos(2*(x+a))+sin(x+a))
+                assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))
+            assert_array_almost_equal(shift(sin(x),2*pi),sin(x))
+            assert_array_almost_equal(shift(sin(x),pi),-sin(x))
+            assert_array_almost_equal(shift(sin(x),pi/2),cos(x))
+
+
+class TestOverwrite:
+    """Check input overwrite behavior """
+
+    real_dtypes = (np.float32, np.float64)
+    dtypes = real_dtypes + (np.complex64, np.complex128)
+
+    def _check(self, x, routine, *args, **kwargs):
+        x2 = x.copy()
+        routine(x2, *args, **kwargs)
+        sig = routine.__name__
+        if args:
+            sig += repr(args)
+        if kwargs:
+            sig += repr(kwargs)
+        assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+    def _check_1d(self, routine, dtype, shape, *args, **kwargs):
+        np.random.seed(1234)
+        if np.issubdtype(dtype, np.complexfloating):
+            data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+        else:
+            data = np.random.randn(*shape)
+        data = data.astype(dtype)
+        self._check(data, routine, *args, **kwargs)
+
+    def test_diff(self):
+        for dtype in self.dtypes:
+            self._check_1d(diff, dtype, (16,))
+
+    def test_tilbert(self):
+        for dtype in self.dtypes:
+            self._check_1d(tilbert, dtype, (16,), 1.6)
+
+    def test_itilbert(self):
+        for dtype in self.dtypes:
+            self._check_1d(itilbert, dtype, (16,), 1.6)
+
+    def test_hilbert(self):
+        for dtype in self.dtypes:
+            self._check_1d(hilbert, dtype, (16,))
+
+    def test_cs_diff(self):
+        for dtype in self.dtypes:
+            self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)
+
+    def test_sc_diff(self):
+        for dtype in self.dtypes:
+            self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)
+
+    def test_ss_diff(self):
+        for dtype in self.dtypes:
+            self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)
+
+    def test_cc_diff(self):
+        for dtype in self.dtypes:
+            self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)
+
+    def test_shift(self):
+        for dtype in self.dtypes:
+            self._check_1d(shift, dtype, (16,), 1.0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_real_transforms.py b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_real_transforms.py
new file mode 100644
index 00000000..e6b9ca1a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/fftpack/tests/test_real_transforms.py
@@ -0,0 +1,815 @@
+from os.path import join, dirname
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_equal
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.fftpack._realtransforms import (
+    dct, idct, dst, idst, dctn, idctn, dstn, idstn)
+
+# Matlab reference data
+MDATA = np.load(join(dirname(__file__), 'test.npz'))
+X = [MDATA['x%d' % i] for i in range(8)]
+Y = [MDATA['y%d' % i] for i in range(8)]
+
+# FFTW reference data: the data are organized as follows:
+#    * SIZES is an array containing all available sizes
+#    * for every type (1, 2, 3, 4) and every size, the array dct_type_size
+#    contains the output of the DCT applied to the input np.linspace(0, size-1,
+#    size)
+FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
+FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
+FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
+
+
+def fftw_dct_ref(type, size, dt):
+    x = np.linspace(0, size-1, size).astype(dt)
+    dt = np.result_type(np.float32, dt)
+    if dt == np.double:
+        data = FFTWDATA_DOUBLE
+    elif dt == np.float32:
+        data = FFTWDATA_SINGLE
+    else:
+        raise ValueError()
+    y = (data['dct_%d_%d' % (type, size)]).astype(dt)
+    return x, y, dt
+
+
+def fftw_dst_ref(type, size, dt):
+    x = np.linspace(0, size-1, size).astype(dt)
+    dt = np.result_type(np.float32, dt)
+    if dt == np.double:
+        data = FFTWDATA_DOUBLE
+    elif dt == np.float32:
+        data = FFTWDATA_SINGLE
+    else:
+        raise ValueError()
+    y = (data['dst_%d_%d' % (type, size)]).astype(dt)
+    return x, y, dt
+
+
+def dct_2d_ref(x, **kwargs):
+    """Calculate reference values for testing dct2."""
+    x = np.array(x, copy=True)
+    for row in range(x.shape[0]):
+        x[row, :] = dct(x[row, :], **kwargs)
+    for col in range(x.shape[1]):
+        x[:, col] = dct(x[:, col], **kwargs)
+    return x
+
+
+def idct_2d_ref(x, **kwargs):
+    """Calculate reference values for testing idct2."""
+    x = np.array(x, copy=True)
+    for row in range(x.shape[0]):
+        x[row, :] = idct(x[row, :], **kwargs)
+    for col in range(x.shape[1]):
+        x[:, col] = idct(x[:, col], **kwargs)
+    return x
+
+
+def dst_2d_ref(x, **kwargs):
+    """Calculate reference values for testing dst2."""
+    x = np.array(x, copy=True)
+    for row in range(x.shape[0]):
+        x[row, :] = dst(x[row, :], **kwargs)
+    for col in range(x.shape[1]):
+        x[:, col] = dst(x[:, col], **kwargs)
+    return x
+
+
+def idst_2d_ref(x, **kwargs):
+    """Calculate reference values for testing idst2."""
+    x = np.array(x, copy=True)
+    for row in range(x.shape[0]):
+        x[row, :] = idst(x[row, :], **kwargs)
+    for col in range(x.shape[1]):
+        x[:, col] = idst(x[:, col], **kwargs)
+    return x
+
+
+def naive_dct1(x, norm=None):
+    """Calculate textbook definition version of DCT-I."""
+    x = np.array(x, copy=True)
+    N = len(x)
+    M = N-1
+    y = np.zeros(N)
+    m0, m = 1, 2
+    if norm == 'ortho':
+        m0 = np.sqrt(1.0/M)
+        m = np.sqrt(2.0/M)
+    for k in range(N):
+        for n in range(1, N-1):
+            y[k] += m*x[n]*np.cos(np.pi*n*k/M)
+        y[k] += m0 * x[0]
+        y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
+    if norm == 'ortho':
+        y[0] *= 1/np.sqrt(2)
+        y[N-1] *= 1/np.sqrt(2)
+    return y
+
+
+def naive_dst1(x, norm=None):
+    """Calculate textbook definition version  of DST-I."""
+    x = np.array(x, copy=True)
+    N = len(x)
+    M = N+1
+    y = np.zeros(N)
+    for k in range(N):
+        for n in range(N):
+            y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
+    if norm == 'ortho':
+        y *= np.sqrt(0.5/M)
+    return y
+
+
+def naive_dct4(x, norm=None):
+    """Calculate textbook definition version of DCT-IV."""
+    x = np.array(x, copy=True)
+    N = len(x)
+    y = np.zeros(N)
+    for k in range(N):
+        for n in range(N):
+            y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
+    if norm == 'ortho':
+        y *= np.sqrt(2.0/N)
+    else:
+        y *= 2
+    return y
+
+
+def naive_dst4(x, norm=None):
+    """Calculate textbook definition version of DST-IV."""
+    x = np.array(x, copy=True)
+    N = len(x)
+    y = np.zeros(N)
+    for k in range(N):
+        for n in range(N):
+            y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
+    if norm == 'ortho':
+        y *= np.sqrt(2.0/N)
+    else:
+        y *= 2
+    return y
+
+
+class TestComplex:
+    def test_dct_complex64(self):
+        y = dct(1j*np.arange(5, dtype=np.complex64))
+        x = 1j*dct(np.arange(5))
+        assert_array_almost_equal(x, y)
+
+    def test_dct_complex(self):
+        y = dct(np.arange(5)*1j)
+        x = 1j*dct(np.arange(5))
+        assert_array_almost_equal(x, y)
+
+    def test_idct_complex(self):
+        y = idct(np.arange(5)*1j)
+        x = 1j*idct(np.arange(5))
+        assert_array_almost_equal(x, y)
+
+    def test_dst_complex64(self):
+        y = dst(np.arange(5, dtype=np.complex64)*1j)
+        x = 1j*dst(np.arange(5))
+        assert_array_almost_equal(x, y)
+
+    def test_dst_complex(self):
+        y = dst(np.arange(5)*1j)
+        x = 1j*dst(np.arange(5))
+        assert_array_almost_equal(x, y)
+
+    def test_idst_complex(self):
+        y = idst(np.arange(5)*1j)
+        x = 1j*idst(np.arange(5))
+        assert_array_almost_equal(x, y)
+
+
+class _TestDCTBase:
+    def setup_method(self):
+        self.rdt = None
+        self.dec = 14
+        self.type = None
+
+    def test_definition(self):
+        for i in FFTWDATA_SIZES:
+            x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
+            y = dct(x, type=self.type)
+            assert_equal(y.dtype, dt)
+            # XXX: we divide by np.max(y) because the tests fail otherwise. We
+            # should really use something like assert_array_approx_equal. The
+            # difference is due to fftw using a better algorithm w.r.t error
+            # propagation compared to the ones from fftpack.
+            assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
+                    err_msg="Size %d failed" % i)
+
+    def test_axis(self):
+        nt = 2
+        for i in [7, 8, 9, 16, 32, 64]:
+            x = np.random.randn(nt, i)
+            y = dct(x, type=self.type)
+            for j in range(nt):
+                assert_array_almost_equal(y[j], dct(x[j], type=self.type),
+                        decimal=self.dec)
+
+            x = x.T
+            y = dct(x, axis=0, type=self.type)
+            for j in range(nt):
+                assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
+                        decimal=self.dec)
+
+
+class _TestDCTIBase(_TestDCTBase):
+    def test_definition_ortho(self):
+        # Test orthornomal mode.
+        dt = np.result_type(np.float32, self.rdt)
+        for xr in X:
+            x = np.array(xr, dtype=self.rdt)
+            y = dct(x, norm='ortho', type=1)
+            y2 = naive_dct1(x, norm='ortho')
+            assert_equal(y.dtype, dt)
+            assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
+
+class _TestDCTIIBase(_TestDCTBase):
+    def test_definition_matlab(self):
+        # Test correspondence with MATLAB (orthornomal mode).
+        dt = np.result_type(np.float32, self.rdt)
+        for xr, yr in zip(X, Y):
+            x = np.array(xr, dtype=dt)
+            y = dct(x, norm="ortho", type=2)
+            assert_equal(y.dtype, dt)
+            assert_array_almost_equal(y, yr, decimal=self.dec)
+
+
+class _TestDCTIIIBase(_TestDCTBase):
+    def test_definition_ortho(self):
+        # Test orthornomal mode.
+        dt = np.result_type(np.float32, self.rdt)
+        for xr in X:
+            x = np.array(xr, dtype=self.rdt)
+            y = dct(x, norm='ortho', type=2)
+            xi = dct(y, norm="ortho", type=3)
+            assert_equal(xi.dtype, dt)
+            assert_array_almost_equal(xi, x, decimal=self.dec)
+
+class _TestDCTIVBase(_TestDCTBase):
+    def test_definition_ortho(self):
+        # Test orthornomal mode.
+        dt = np.result_type(np.float32, self.rdt)
+        for xr in X:
+            x = np.array(xr, dtype=self.rdt)
+            y = dct(x, norm='ortho', type=4)
+            y2 = naive_dct4(x, norm='ortho')
+            assert_equal(y.dtype, dt)
+            assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
+
+
+class TestDCTIDouble(_TestDCTIBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 10
+        self.type = 1
+
+
+class TestDCTIFloat(_TestDCTIBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 4
+        self.type = 1
+
+
+class TestDCTIInt(_TestDCTIBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 1
+
+
+class TestDCTIIDouble(_TestDCTIIBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 10
+        self.type = 2
+
+
+class TestDCTIIFloat(_TestDCTIIBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 5
+        self.type = 2
+
+
+class TestDCTIIInt(_TestDCTIIBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 2
+
+
+class TestDCTIIIDouble(_TestDCTIIIBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 14
+        self.type = 3
+
+
+class TestDCTIIIFloat(_TestDCTIIIBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 5
+        self.type = 3
+
+
+class TestDCTIIIInt(_TestDCTIIIBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 3
+
+
+class TestDCTIVDouble(_TestDCTIVBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 12
+        self.type = 3
+
+
+class TestDCTIVFloat(_TestDCTIVBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 5
+        self.type = 3
+
+
+class TestDCTIVInt(_TestDCTIVBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 3
+
+
+class _TestIDCTBase:
+    def setup_method(self):
+        self.rdt = None
+        self.dec = 14
+        self.type = None
+
+    def test_definition(self):
+        for i in FFTWDATA_SIZES:
+            xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
+            x = idct(yr, type=self.type)
+            if self.type == 1:
+                x /= 2 * (i-1)
+            else:
+                x /= 2 * i
+            assert_equal(x.dtype, dt)
+            # XXX: we divide by np.max(y) because the tests fail otherwise. We
+            # should really use something like assert_array_approx_equal. The
+            # difference is due to fftw using a better algorithm w.r.t error
+            # propagation compared to the ones from fftpack.
+            assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
+                    err_msg="Size %d failed" % i)
+
+
+class TestIDCTIDouble(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 10
+        self.type = 1
+
+
+class TestIDCTIFloat(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 4
+        self.type = 1
+
+
+class TestIDCTIInt(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 4
+        self.type = 1
+
+
+class TestIDCTIIDouble(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 10
+        self.type = 2
+
+
+class TestIDCTIIFloat(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 5
+        self.type = 2
+
+
+class TestIDCTIIInt(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 2
+
+
+class TestIDCTIIIDouble(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 14
+        self.type = 3
+
+
+class TestIDCTIIIFloat(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 5
+        self.type = 3
+
+
+class TestIDCTIIIInt(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 3
+
+class TestIDCTIVDouble(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 12
+        self.type = 4
+
+
+class TestIDCTIVFloat(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 5
+        self.type = 4
+
+
+class TestIDCTIVInt(_TestIDCTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 4
+
+class _TestDSTBase:
+    def setup_method(self):
+        self.rdt = None  # dtype
+        self.dec = None  # number of decimals to match
+        self.type = None  # dst type
+
+    def test_definition(self):
+        for i in FFTWDATA_SIZES:
+            xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
+            y = dst(xr, type=self.type)
+            assert_equal(y.dtype, dt)
+            # XXX: we divide by np.max(y) because the tests fail otherwise. We
+            # should really use something like assert_array_approx_equal. The
+            # difference is due to fftw using a better algorithm w.r.t error
+            # propagation compared to the ones from fftpack.
+            assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
+                    err_msg="Size %d failed" % i)
+
+
+class _TestDSTIBase(_TestDSTBase):
+    def test_definition_ortho(self):
+        # Test orthornomal mode.
+        dt = np.result_type(np.float32, self.rdt)
+        for xr in X:
+            x = np.array(xr, dtype=self.rdt)
+            y = dst(x, norm='ortho', type=1)
+            y2 = naive_dst1(x, norm='ortho')
+            assert_equal(y.dtype, dt)
+            assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
+
+class _TestDSTIVBase(_TestDSTBase):
+    def test_definition_ortho(self):
+        # Test orthornomal mode.
+        dt = np.result_type(np.float32, self.rdt)
+        for xr in X:
+            x = np.array(xr, dtype=self.rdt)
+            y = dst(x, norm='ortho', type=4)
+            y2 = naive_dst4(x, norm='ortho')
+            assert_equal(y.dtype, dt)
+            assert_array_almost_equal(y, y2, decimal=self.dec)
+
+class TestDSTIDouble(_TestDSTIBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 12
+        self.type = 1
+
+
+class TestDSTIFloat(_TestDSTIBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 4
+        self.type = 1
+
+
+class TestDSTIInt(_TestDSTIBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 1
+
+
+class TestDSTIIDouble(_TestDSTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 14
+        self.type = 2
+
+
+class TestDSTIIFloat(_TestDSTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 6
+        self.type = 2
+
+
+class TestDSTIIInt(_TestDSTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 6
+        self.type = 2
+
+
+class TestDSTIIIDouble(_TestDSTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 14
+        self.type = 3
+
+
+class TestDSTIIIFloat(_TestDSTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 7
+        self.type = 3
+
+
+class TestDSTIIIInt(_TestDSTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 7
+        self.type = 3
+
+
+class TestDSTIVDouble(_TestDSTIVBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 12
+        self.type = 4
+
+
+class TestDSTIVFloat(_TestDSTIVBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 4
+        self.type = 4
+
+
+class TestDSTIVInt(_TestDSTIVBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 5
+        self.type = 4
+
+
+class _TestIDSTBase:
+    def setup_method(self):
+        self.rdt = None
+        self.dec = None
+        self.type = None
+
+    def test_definition(self):
+        for i in FFTWDATA_SIZES:
+            xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
+            x = idst(yr, type=self.type)
+            if self.type == 1:
+                x /= 2 * (i+1)
+            else:
+                x /= 2 * i
+            assert_equal(x.dtype, dt)
+            # XXX: we divide by np.max(x) because the tests fail otherwise. We
+            # should really use something like assert_array_approx_equal. The
+            # difference is due to fftw using a better algorithm w.r.t error
+            # propagation compared to the ones from fftpack.
+            assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
+                    err_msg="Size %d failed" % i)
+
+
+class TestIDSTIDouble(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 12
+        self.type = 1
+
+
+class TestIDSTIFloat(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 4
+        self.type = 1
+
+
+class TestIDSTIInt(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 4
+        self.type = 1
+
+
+class TestIDSTIIDouble(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 14
+        self.type = 2
+
+
+class TestIDSTIIFloat(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 6
+        self.type = 2
+
+
+class TestIDSTIIInt(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 6
+        self.type = 2
+
+
+class TestIDSTIIIDouble(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 14
+        self.type = 3
+
+
+class TestIDSTIIIFloat(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 6
+        self.type = 3
+
+
+class TestIDSTIIIInt(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 6
+        self.type = 3
+
+
+class TestIDSTIVDouble(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.double
+        self.dec = 12
+        self.type = 4
+
+
+class TestIDSTIVFloat(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = np.float32
+        self.dec = 6
+        self.type = 4
+
+
+class TestIDSTIVnt(_TestIDSTBase):
+    def setup_method(self):
+        self.rdt = int
+        self.dec = 6
+        self.type = 4
+
+
+class TestOverwrite:
+    """Check input overwrite behavior."""
+
+    real_dtypes = [np.float32, np.float64]
+
+    def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
+        x2 = x.copy()
+        routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
+
+        sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
+            routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
+        if not overwrite_x:
+            assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
+
+    def _check_1d(self, routine, dtype, shape, axis):
+        np.random.seed(1234)
+        if np.issubdtype(dtype, np.complexfloating):
+            data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
+        else:
+            data = np.random.randn(*shape)
+        data = data.astype(dtype)
+
+        for type in [1, 2, 3, 4]:
+            for overwrite_x in [True, False]:
+                for norm in [None, 'ortho']:
+                    self._check(data, routine, type, None, axis, norm,
+                                overwrite_x)
+
+    def test_dct(self):
+        for dtype in self.real_dtypes:
+            self._check_1d(dct, dtype, (16,), -1)
+            self._check_1d(dct, dtype, (16, 2), 0)
+            self._check_1d(dct, dtype, (2, 16), 1)
+
+    def test_idct(self):
+        for dtype in self.real_dtypes:
+            self._check_1d(idct, dtype, (16,), -1)
+            self._check_1d(idct, dtype, (16, 2), 0)
+            self._check_1d(idct, dtype, (2, 16), 1)
+
+    def test_dst(self):
+        for dtype in self.real_dtypes:
+            self._check_1d(dst, dtype, (16,), -1)
+            self._check_1d(dst, dtype, (16, 2), 0)
+            self._check_1d(dst, dtype, (2, 16), 1)
+
+    def test_idst(self):
+        for dtype in self.real_dtypes:
+            self._check_1d(idst, dtype, (16,), -1)
+            self._check_1d(idst, dtype, (16, 2), 0)
+            self._check_1d(idst, dtype, (2, 16), 1)
+
+
+class Test_DCTN_IDCTN:
+    dec = 14
+    dct_type = [1, 2, 3, 4]
+    norms = [None, 'ortho']
+    rstate = np.random.RandomState(1234)
+    shape = (32, 16)
+    data = rstate.randn(*shape)
+
+    @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+                                                   (dstn, idstn)])
+    @pytest.mark.parametrize('axes', [None,
+                                      1, (1,), [1],
+                                      0, (0,), [0],
+                                      (0, 1), [0, 1],
+                                      (-2, -1), [-2, -1]])
+    @pytest.mark.parametrize('dct_type', dct_type)
+    @pytest.mark.parametrize('norm', ['ortho'])
+    def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
+        tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
+        tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
+        assert_array_almost_equal(self.data, tmp, decimal=12)
+
+    @pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
+                                                       (dstn, dst_2d_ref)])
+    @pytest.mark.parametrize('dct_type', dct_type)
+    @pytest.mark.parametrize('norm', norms)
+    def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
+                                  dct_type, norm):
+        y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
+        y2 = fforward_ref(self.data, type=dct_type, norm=norm)
+        assert_array_almost_equal(y1, y2, decimal=11)
+
+    @pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
+                                                       (idstn, idst_2d_ref)])
+    @pytest.mark.parametrize('dct_type', dct_type)
+    @pytest.mark.parametrize('norm', [None, 'ortho'])
+    def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
+                                   dct_type, norm):
+        fdata = dctn(self.data, type=dct_type, norm=norm)
+        y1 = finverse(fdata, type=dct_type, norm=norm)
+        y2 = finverse_ref(fdata, type=dct_type, norm=norm)
+        assert_array_almost_equal(y1, y2, decimal=11)
+
+    @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+                                                   (dstn, idstn)])
+    def test_axes_and_shape(self, fforward, finverse):
+        with assert_raises(ValueError,
+                           match="when given, axes and shape arguments"
+                           " have to be of the same length"):
+            fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
+
+        with assert_raises(ValueError,
+                           match="when given, axes and shape arguments"
+                           " have to be of the same length"):
+            fforward(self.data, shape=self.data.shape[0], axes=None)
+
+        with assert_raises(ValueError,
+                           match="when given, axes and shape arguments"
+                           " have to be of the same length"):
+            fforward(self.data, shape=self.data.shape, axes=0)
+
+    @pytest.mark.parametrize('fforward', [dctn, dstn])
+    def test_shape(self, fforward):
+        tmp = fforward(self.data, shape=(128, 128), axes=None)
+        assert_equal(tmp.shape, (128, 128))
+
+    @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
+                                                   (dstn, idstn)])
+    @pytest.mark.parametrize('axes', [1, (1,), [1],
+                                      0, (0,), [0]])
+    def test_shape_is_none_with_axes(self, fforward, finverse, axes):
+        tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
+        tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
+        assert_array_almost_equal(self.data, tmp, decimal=self.dec)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/__init__.py
new file mode 100644
index 00000000..bb6cce71
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/__init__.py
@@ -0,0 +1,107 @@
+"""
+=============================================
+Integration and ODEs (:mod:`scipy.integrate`)
+=============================================
+
+.. currentmodule:: scipy.integrate
+
+Integrating functions, given function object
+============================================
+
+.. autosummary::
+   :toctree: generated/
+
+   quad          -- General purpose integration
+   quad_vec      -- General purpose integration of vector-valued functions
+   dblquad       -- General purpose double integration
+   tplquad       -- General purpose triple integration
+   nquad         -- General purpose N-D integration
+   fixed_quad    -- Integrate func(x) using Gaussian quadrature of order n
+   quadrature    -- Integrate with given tolerance using Gaussian quadrature
+   romberg       -- Integrate func using Romberg integration
+   newton_cotes  -- Weights and error coefficient for Newton-Cotes integration
+   IntegrationWarning -- Warning on issues during integration
+   AccuracyWarning  -- Warning on issues during quadrature integration
+
+Integrating functions, given fixed samples
+==========================================
+
+.. autosummary::
+   :toctree: generated/
+
+   trapezoid            -- Use trapezoidal rule to compute integral.
+   cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral.
+   simpson              -- Use Simpson's rule to compute integral from samples.
+   romb                 -- Use Romberg Integration to compute integral from
+                        -- (2**k + 1) evenly-spaced samples.
+
+.. seealso::
+
+   :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
+   quadrature roots and weights for other weighting factors and regions.
+
+Solving initial value problems for ODE systems
+==============================================
+
+The solvers are implemented as individual classes, which can be used directly
+(low-level usage) or through a convenience function.
+
+.. autosummary::
+   :toctree: generated/
+
+   solve_ivp     -- Convenient function for ODE integration.
+   RK23          -- Explicit Runge-Kutta solver of order 3(2).
+   RK45          -- Explicit Runge-Kutta solver of order 5(4).
+   DOP853        -- Explicit Runge-Kutta solver of order 8.
+   Radau         -- Implicit Runge-Kutta solver of order 5.
+   BDF           -- Implicit multi-step variable order (1 to 5) solver.
+   LSODA         -- LSODA solver from ODEPACK Fortran package.
+   OdeSolver     -- Base class for ODE solvers.
+   DenseOutput   -- Local interpolant for computing a dense output.
+   OdeSolution   -- Class which represents a continuous ODE solution.
+
+
+Old API
+-------
+
+These are the routines developed earlier for SciPy. They wrap older solvers
+implemented in Fortran (mostly ODEPACK). While the interface to them is not
+particularly convenient and certain features are missing compared to the new
+API, the solvers themselves are of good quality and work fast as compiled
+Fortran code. In some cases, it might be worth using this old API.
+
+.. autosummary::
+   :toctree: generated/
+
+   odeint        -- General integration of ordinary differential equations.
+   ode           -- Integrate ODE using VODE and ZVODE routines.
+   complex_ode   -- Convert a complex-valued ODE to real-valued and integrate.
+
+
+Solving boundary value problems for ODE systems
+===============================================
+
+.. autosummary::
+   :toctree: generated/
+
+   solve_bvp     -- Solve a boundary value problem for a system of ODEs.
+"""  # noqa: E501
+
+
+from ._quadrature import *
+from ._odepack_py import *
+from ._quadpack_py import *
+from ._ode import *
+from ._bvp import solve_bvp
+from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
+                   OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
+from ._quad_vec import quad_vec
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import dop, lsoda, vode, odepack, quadpack
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_bvp.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_bvp.py
new file mode 100644
index 00000000..97cb6b54
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_bvp.py
@@ -0,0 +1,1159 @@
+"""Boundary value problem solver."""
+from warnings import warn
+
+import numpy as np
+from numpy.linalg import pinv
+
+from scipy.sparse import coo_matrix, csc_matrix
+from scipy.sparse.linalg import splu
+from scipy.optimize import OptimizeResult
+
+
+EPS = np.finfo(float).eps
+
+
+def estimate_fun_jac(fun, x, y, p, f0=None):
+    """Estimate derivatives of an ODE system rhs with forward differences.
+
+    Returns
+    -------
+    df_dy : ndarray, shape (n, n, m)
+        Derivatives with respect to y. An element (i, j, q) corresponds to
+        d f_i(x_q, y_q) / d (y_q)_j.
+    df_dp : ndarray with shape (n, k, m) or None
+        Derivatives with respect to p. An element (i, j, q) corresponds to
+        d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
+    """
+    n, m = y.shape
+    if f0 is None:
+        f0 = fun(x, y, p)
+
+    dtype = y.dtype
+
+    df_dy = np.empty((n, n, m), dtype=dtype)
+    h = EPS**0.5 * (1 + np.abs(y))
+    for i in range(n):
+        y_new = y.copy()
+        y_new[i] += h[i]
+        hi = y_new[i] - y[i]
+        f_new = fun(x, y_new, p)
+        df_dy[:, i, :] = (f_new - f0) / hi
+
+    k = p.shape[0]
+    if k == 0:
+        df_dp = None
+    else:
+        df_dp = np.empty((n, k, m), dtype=dtype)
+        h = EPS**0.5 * (1 + np.abs(p))
+        for i in range(k):
+            p_new = p.copy()
+            p_new[i] += h[i]
+            hi = p_new[i] - p[i]
+            f_new = fun(x, y, p_new)
+            df_dp[:, i, :] = (f_new - f0) / hi
+
+    return df_dy, df_dp
+
+
+def estimate_bc_jac(bc, ya, yb, p, bc0=None):
+    """Estimate derivatives of boundary conditions with forward differences.
+
+    Returns
+    -------
+    dbc_dya : ndarray, shape (n + k, n)
+        Derivatives with respect to ya. An element (i, j) corresponds to
+        d bc_i / d ya_j.
+    dbc_dyb : ndarray, shape (n + k, n)
+        Derivatives with respect to yb. An element (i, j) corresponds to
+        d bc_i / d ya_j.
+    dbc_dp : ndarray with shape (n + k, k) or None
+        Derivatives with respect to p. An element (i, j) corresponds to
+        d bc_i / d p_j. If `p` is empty, None is returned.
+    """
+    n = ya.shape[0]
+    k = p.shape[0]
+
+    if bc0 is None:
+        bc0 = bc(ya, yb, p)
+
+    dtype = ya.dtype
+
+    dbc_dya = np.empty((n, n + k), dtype=dtype)
+    h = EPS**0.5 * (1 + np.abs(ya))
+    for i in range(n):
+        ya_new = ya.copy()
+        ya_new[i] += h[i]
+        hi = ya_new[i] - ya[i]
+        bc_new = bc(ya_new, yb, p)
+        dbc_dya[i] = (bc_new - bc0) / hi
+    dbc_dya = dbc_dya.T
+
+    h = EPS**0.5 * (1 + np.abs(yb))
+    dbc_dyb = np.empty((n, n + k), dtype=dtype)
+    for i in range(n):
+        yb_new = yb.copy()
+        yb_new[i] += h[i]
+        hi = yb_new[i] - yb[i]
+        bc_new = bc(ya, yb_new, p)
+        dbc_dyb[i] = (bc_new - bc0) / hi
+    dbc_dyb = dbc_dyb.T
+
+    if k == 0:
+        dbc_dp = None
+    else:
+        h = EPS**0.5 * (1 + np.abs(p))
+        dbc_dp = np.empty((k, n + k), dtype=dtype)
+        for i in range(k):
+            p_new = p.copy()
+            p_new[i] += h[i]
+            hi = p_new[i] - p[i]
+            bc_new = bc(ya, yb, p_new)
+            dbc_dp[i] = (bc_new - bc0) / hi
+        dbc_dp = dbc_dp.T
+
+    return dbc_dya, dbc_dyb, dbc_dp
+
+
+def compute_jac_indices(n, m, k):
+    """Compute indices for the collocation system Jacobian construction.
+
+    See `construct_global_jac` for the explanation.
+    """
+    i_col = np.repeat(np.arange((m - 1) * n), n)
+    j_col = (np.tile(np.arange(n), n * (m - 1)) +
+             np.repeat(np.arange(m - 1) * n, n**2))
+
+    i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
+    j_bc = np.tile(np.arange(n), n + k)
+
+    i_p_col = np.repeat(np.arange((m - 1) * n), k)
+    j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
+
+    i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
+    j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
+
+    i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
+    j = np.hstack((j_col, j_col + n,
+                   j_bc, j_bc + (m - 1) * n,
+                   j_p_col, j_p_bc))
+
+    return i, j
+
+
+def stacked_matmul(a, b):
+    """Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
+
+    Empirical optimization. Use outer Python loop and BLAS for large
+    matrices, otherwise use a single einsum call.
+    """
+    if a.shape[1] > 50:
+        out = np.empty((a.shape[0], a.shape[1], b.shape[2]))
+        for i in range(a.shape[0]):
+            out[i] = np.dot(a[i], b[i])
+        return out
+    else:
+        return np.einsum('...ij,...jk->...ik', a, b)
+
+
+def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
+                         df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
+    """Construct the Jacobian of the collocation system.
+
+    There are n * m + k functions: m - 1 collocations residuals, each
+    containing n components, followed by n + k boundary condition residuals.
+
+    There are n * m + k variables: m vectors of y, each containing n
+    components, followed by k values of vector p.
+
+    For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
+    the following sparsity structure:
+
+        1 1 2 2 0 0 0 0  5
+        1 1 2 2 0 0 0 0  5
+        0 0 1 1 2 2 0 0  5
+        0 0 1 1 2 2 0 0  5
+        0 0 0 0 1 1 2 2  5
+        0 0 0 0 1 1 2 2  5
+
+        3 3 0 0 0 0 4 4  6
+        3 3 0 0 0 0 4 4  6
+        3 3 0 0 0 0 4 4  6
+
+    Zeros denote identically zero values, other values denote different kinds
+    of blocks in the matrix (see below). The blank row indicates the separation
+    of collocation residuals from boundary conditions. And the blank column
+    indicates the separation of y values from p values.
+
+    Refer to [1]_  (p. 306) for the formula of n x n blocks for derivatives
+    of collocation residuals with respect to y.
+
+    Parameters
+    ----------
+    n : int
+        Number of equations in the ODE system.
+    m : int
+        Number of nodes in the mesh.
+    k : int
+        Number of the unknown parameters.
+    i_jac, j_jac : ndarray
+        Row and column indices returned by `compute_jac_indices`. They
+        represent different blocks in the Jacobian matrix in the following
+        order (see the scheme above):
+
+            * 1: m - 1 diagonal n x n blocks for the collocation residuals.
+            * 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
+            * 3 : (n + k) x n block for the dependency of the boundary
+              conditions on ya.
+            * 4: (n + k) x n block for the dependency of the boundary
+              conditions on yb.
+            * 5: (m - 1) * n x k block for the dependency of the collocation
+              residuals on p.
+            * 6: (n + k) x k block for the dependency of the boundary
+              conditions on p.
+
+    df_dy : ndarray, shape (n, n, m)
+        Jacobian of f with respect to y computed at the mesh nodes.
+    df_dy_middle : ndarray, shape (n, n, m - 1)
+        Jacobian of f with respect to y computed at the middle between the
+        mesh nodes.
+    df_dp : ndarray with shape (n, k, m) or None
+        Jacobian of f with respect to p computed at the mesh nodes.
+    df_dp_middle : ndarray with shape (n, k, m - 1) or None
+        Jacobian of f with respect to p computed at the middle between the
+        mesh nodes.
+    dbc_dya, dbc_dyb : ndarray, shape (n, n)
+        Jacobian of bc with respect to ya and yb.
+    dbc_dp : ndarray with shape (n, k) or None
+        Jacobian of bc with respect to p.
+
+    Returns
+    -------
+    J : csc_matrix, shape (n * m + k, n * m + k)
+        Jacobian of the collocation system in a sparse form.
+
+    References
+    ----------
+    .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+       Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+       Number 3, pp. 299-316, 2001.
+    """
+    df_dy = np.transpose(df_dy, (2, 0, 1))
+    df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
+
+    h = h[:, np.newaxis, np.newaxis]
+
+    dtype = df_dy.dtype
+
+    # Computing diagonal n x n blocks.
+    dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
+    dPhi_dy_0[:] = -np.identity(n)
+    dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
+    T = stacked_matmul(df_dy_middle, df_dy[:-1])
+    dPhi_dy_0 -= h**2 / 12 * T
+
+    # Computing off-diagonal n x n blocks.
+    dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
+    dPhi_dy_1[:] = np.identity(n)
+    dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
+    T = stacked_matmul(df_dy_middle, df_dy[1:])
+    dPhi_dy_1 += h**2 / 12 * T
+
+    values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
+                        dbc_dyb.ravel()))
+
+    if k > 0:
+        df_dp = np.transpose(df_dp, (2, 0, 1))
+        df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
+        T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
+        df_dp_middle += 0.125 * h * T
+        dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
+        values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
+
+    J = coo_matrix((values, (i_jac, j_jac)))
+    return csc_matrix(J)
+
+
+def collocation_fun(fun, y, p, x, h):
+    """Evaluate collocation residuals.
+
+    This function lies in the core of the method. The solution is sought
+    as a cubic C1 continuous spline with derivatives matching the ODE rhs
+    at given nodes `x`. Collocation conditions are formed from the equality
+    of the spline derivatives and rhs of the ODE system in the middle points
+    between nodes.
+
+    Such method is classified to Lobbato IIIA family in ODE literature.
+    Refer to [1]_ for the formula and some discussion.
+
+    Returns
+    -------
+    col_res : ndarray, shape (n, m - 1)
+        Collocation residuals at the middle points of the mesh intervals.
+    y_middle : ndarray, shape (n, m - 1)
+        Values of the cubic spline evaluated at the middle points of the mesh
+        intervals.
+    f : ndarray, shape (n, m)
+        RHS of the ODE system evaluated at the mesh nodes.
+    f_middle : ndarray, shape (n, m - 1)
+        RHS of the ODE system evaluated at the middle points of the mesh
+        intervals (and using `y_middle`).
+
+    References
+    ----------
+    .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+           Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+           Number 3, pp. 299-316, 2001.
+    """
+    f = fun(x, y, p)
+    y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
+                0.125 * h * (f[:, 1:] - f[:, :-1]))
+    f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
+    col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
+                                              4 * f_middle)
+
+    return col_res, y_middle, f, f_middle
+
+
+def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
+    """Create the function and the Jacobian for the collocation system."""
+    x_middle = x[:-1] + 0.5 * h
+    i_jac, j_jac = compute_jac_indices(n, m, k)
+
+    def col_fun(y, p):
+        return collocation_fun(fun, y, p, x, h)
+
+    def sys_jac(y, p, y_middle, f, f_middle, bc0):
+        if fun_jac is None:
+            df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
+            df_dy_middle, df_dp_middle = estimate_fun_jac(
+                fun, x_middle, y_middle, p, f_middle)
+        else:
+            df_dy, df_dp = fun_jac(x, y, p)
+            df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
+
+        if bc_jac is None:
+            dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
+                                                       p, bc0)
+        else:
+            dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
+
+        return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
+                                    df_dy_middle, df_dp, df_dp_middle, dbc_dya,
+                                    dbc_dyb, dbc_dp)
+
+    return col_fun, sys_jac
+
+
+def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
+    """Solve the nonlinear collocation system by a Newton method.
+
+    This is a simple Newton method with a backtracking line search. As
+    advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
+    is used, where J is the Jacobian matrix at the current iteration and r is
+    the vector or collocation residuals (values of the system lhs).
+
+    The method alters between full Newton iterations and the fixed-Jacobian
+    iterations based
+
+    There are other tricks proposed in [1]_, but they are not used as they
+    don't seem to improve anything significantly, and even break the
+    convergence on some test problems I tried.
+
+    All important parameters of the algorithm are defined inside the function.
+
+    Parameters
+    ----------
+    n : int
+        Number of equations in the ODE system.
+    m : int
+        Number of nodes in the mesh.
+    h : ndarray, shape (m-1,)
+        Mesh intervals.
+    col_fun : callable
+        Function computing collocation residuals.
+    bc : callable
+        Function computing boundary condition residuals.
+    jac : callable
+        Function computing the Jacobian of the whole system (including
+        collocation and boundary condition residuals). It is supposed to
+        return csc_matrix.
+    y : ndarray, shape (n, m)
+        Initial guess for the function values at the mesh nodes.
+    p : ndarray, shape (k,)
+        Initial guess for the unknown parameters.
+    B : ndarray with shape (n, n) or None
+        Matrix to force the S y(a) = 0 condition for a problems with the
+        singular term. If None, the singular term is assumed to be absent.
+    bvp_tol : float
+        Tolerance to which we want to solve a BVP.
+    bc_tol : float
+        Tolerance to which we want to satisfy the boundary conditions.
+
+    Returns
+    -------
+    y : ndarray, shape (n, m)
+        Final iterate for the function values at the mesh nodes.
+    p : ndarray, shape (k,)
+        Final iterate for the unknown parameters.
+    singular : bool
+        True, if the LU decomposition failed because Jacobian turned out
+        to be singular.
+
+    References
+    ----------
+    .. [1]  U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
+       Boundary Value Problems for Ordinary Differential Equations"
+    """
+    # We know that the solution residuals at the middle points of the mesh
+    # are connected with collocation residuals  r_middle = 1.5 * col_res / h.
+    # As our BVP solver tries to decrease relative residuals below a certain
+    # tolerance, it seems reasonable to terminated Newton iterations by
+    # comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
+    # which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
+    # the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
+    # should be computed as follows:
+    tol_r = 2/3 * h * 5e-2 * bvp_tol
+
+    # Maximum allowed number of Jacobian evaluation and factorization, in
+    # other words, the maximum number of full Newton iterations. A small value
+    # is recommended in the literature.
+    max_njev = 4
+
+    # Maximum number of iterations, considering that some of them can be
+    # performed with the fixed Jacobian. In theory, such iterations are cheap,
+    # but it's not that simple in Python.
+    max_iter = 8
+
+    # Minimum relative improvement of the criterion function to accept the
+    # step (Armijo constant).
+    sigma = 0.2
+
+    # Step size decrease factor for backtracking.
+    tau = 0.5
+
+    # Maximum number of backtracking steps, the minimum step is then
+    # tau ** n_trial.
+    n_trial = 4
+
+    col_res, y_middle, f, f_middle = col_fun(y, p)
+    bc_res = bc(y[:, 0], y[:, -1], p)
+    res = np.hstack((col_res.ravel(order='F'), bc_res))
+
+    njev = 0
+    singular = False
+    recompute_jac = True
+    for iteration in range(max_iter):
+        if recompute_jac:
+            J = jac(y, p, y_middle, f, f_middle, bc_res)
+            njev += 1
+            try:
+                LU = splu(J)
+            except RuntimeError:
+                singular = True
+                break
+
+            step = LU.solve(res)
+            cost = np.dot(step, step)
+
+        y_step = step[:m * n].reshape((n, m), order='F')
+        p_step = step[m * n:]
+
+        alpha = 1
+        for trial in range(n_trial + 1):
+            y_new = y - alpha * y_step
+            if B is not None:
+                y_new[:, 0] = np.dot(B, y_new[:, 0])
+            p_new = p - alpha * p_step
+
+            col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
+            bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
+            res = np.hstack((col_res.ravel(order='F'), bc_res))
+
+            step_new = LU.solve(res)
+            cost_new = np.dot(step_new, step_new)
+            if cost_new < (1 - 2 * alpha * sigma) * cost:
+                break
+
+            if trial < n_trial:
+                alpha *= tau
+
+        y = y_new
+        p = p_new
+
+        if njev == max_njev:
+            break
+
+        if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
+                np.all(np.abs(bc_res) < bc_tol)):
+            break
+
+        # If the full step was taken, then we are going to continue with
+        # the same Jacobian. This is the approach of BVP_SOLVER.
+        if alpha == 1:
+            step = step_new
+            cost = cost_new
+            recompute_jac = False
+        else:
+            recompute_jac = True
+
+    return y, p, singular
+
+
+def print_iteration_header():
+    print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
+        "Iteration", "Max residual", "Max BC residual", "Total nodes",
+        "Nodes added"))
+
+
+def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
+                             nodes_added):
+    print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
+        iteration, residual, bc_residual, total_nodes, nodes_added))
+
+
+class BVPResult(OptimizeResult):
+    pass
+
+
+TERMINATION_MESSAGES = {
+    0: "The algorithm converged to the desired accuracy.",
+    1: "The maximum number of mesh nodes is exceeded.",
+    2: "A singular Jacobian encountered when solving the collocation system.",
+    3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
+}
+
+
+def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
+    """Estimate rms values of collocation residuals using Lobatto quadrature.
+
+    The residuals are defined as the difference between the derivatives of
+    our solution and rhs of the ODE system. We use relative residuals, i.e.,
+    normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
+    normalized integrals of the squared relative residuals over each interval.
+    Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
+    fact that residuals at the mesh nodes are identically zero.
+
+    In [2] they don't normalize integrals by interval lengths, which gives
+    a higher rate of convergence of the residuals by the factor of h**0.5.
+    I chose to do such normalization for an ease of interpretation of return
+    values as RMS estimates.
+
+    Returns
+    -------
+    rms_res : ndarray, shape (m - 1,)
+        Estimated rms values of the relative residuals over each interval.
+
+    References
+    ----------
+    .. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
+    .. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+       Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+       Number 3, pp. 299-316, 2001.
+    """
+    x_middle = x[:-1] + 0.5 * h
+    s = 0.5 * h * (3/7)**0.5
+    x1 = x_middle + s
+    x2 = x_middle - s
+    y1 = sol(x1)
+    y2 = sol(x2)
+    y1_prime = sol(x1, 1)
+    y2_prime = sol(x2, 1)
+    f1 = fun(x1, y1, p)
+    f2 = fun(x2, y2, p)
+    r1 = y1_prime - f1
+    r2 = y2_prime - f2
+
+    r_middle /= 1 + np.abs(f_middle)
+    r1 /= 1 + np.abs(f1)
+    r2 /= 1 + np.abs(f2)
+
+    r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
+    r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
+    r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
+
+    return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
+
+
+def create_spline(y, yp, x, h):
+    """Create a cubic spline given values and derivatives.
+
+    Formulas for the coefficients are taken from interpolate.CubicSpline.
+
+    Returns
+    -------
+    sol : PPoly
+        Constructed spline as a PPoly instance.
+    """
+    from scipy.interpolate import PPoly
+
+    n, m = y.shape
+    c = np.empty((4, n, m - 1), dtype=y.dtype)
+    slope = (y[:, 1:] - y[:, :-1]) / h
+    t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
+    c[0] = t / h
+    c[1] = (slope - yp[:, :-1]) / h - t
+    c[2] = yp[:, :-1]
+    c[3] = y[:, :-1]
+    c = np.moveaxis(c, 1, 0)
+
+    return PPoly(c, x, extrapolate=True, axis=1)
+
+
+def modify_mesh(x, insert_1, insert_2):
+    """Insert nodes into a mesh.
+
+    Nodes removal logic is not established, its impact on the solver is
+    presumably negligible. So, only insertion is done in this function.
+
+    Parameters
+    ----------
+    x : ndarray, shape (m,)
+        Mesh nodes.
+    insert_1 : ndarray
+        Intervals to each insert 1 new node in the middle.
+    insert_2 : ndarray
+        Intervals to each insert 2 new nodes, such that divide an interval
+        into 3 equal parts.
+
+    Returns
+    -------
+    x_new : ndarray
+        New mesh nodes.
+
+    Notes
+    -----
+    `insert_1` and `insert_2` should not have common values.
+    """
+    # Because np.insert implementation apparently varies with a version of
+    # NumPy, we use a simple and reliable approach with sorting.
+    return np.sort(np.hstack((
+        x,
+        0.5 * (x[insert_1] + x[insert_1 + 1]),
+        (2 * x[insert_2] + x[insert_2 + 1]) / 3,
+        (x[insert_2] + 2 * x[insert_2 + 1]) / 3
+    )))
+
+
+def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
+    """Wrap functions for unified usage in the solver."""
+    if fun_jac is None:
+        fun_jac_wrapped = None
+
+    if bc_jac is None:
+        bc_jac_wrapped = None
+
+    if k == 0:
+        def fun_p(x, y, _):
+            return np.asarray(fun(x, y), dtype)
+
+        def bc_wrapped(ya, yb, _):
+            return np.asarray(bc(ya, yb), dtype)
+
+        if fun_jac is not None:
+            def fun_jac_p(x, y, _):
+                return np.asarray(fun_jac(x, y), dtype), None
+
+        if bc_jac is not None:
+            def bc_jac_wrapped(ya, yb, _):
+                dbc_dya, dbc_dyb = bc_jac(ya, yb)
+                return (np.asarray(dbc_dya, dtype),
+                        np.asarray(dbc_dyb, dtype), None)
+    else:
+        def fun_p(x, y, p):
+            return np.asarray(fun(x, y, p), dtype)
+
+        def bc_wrapped(x, y, p):
+            return np.asarray(bc(x, y, p), dtype)
+
+        if fun_jac is not None:
+            def fun_jac_p(x, y, p):
+                df_dy, df_dp = fun_jac(x, y, p)
+                return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
+
+        if bc_jac is not None:
+            def bc_jac_wrapped(ya, yb, p):
+                dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
+                return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
+                        np.asarray(dbc_dp, dtype))
+
+    if S is None:
+        fun_wrapped = fun_p
+    else:
+        def fun_wrapped(x, y, p):
+            f = fun_p(x, y, p)
+            if x[0] == a:
+                f[:, 0] = np.dot(D, f[:, 0])
+                f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
+            else:
+                f += np.dot(S, y) / (x - a)
+            return f
+
+    if fun_jac is not None:
+        if S is None:
+            fun_jac_wrapped = fun_jac_p
+        else:
+            Sr = S[:, :, np.newaxis]
+
+            def fun_jac_wrapped(x, y, p):
+                df_dy, df_dp = fun_jac_p(x, y, p)
+                if x[0] == a:
+                    df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
+                    df_dy[:, :, 1:] += Sr / (x[1:] - a)
+                else:
+                    df_dy += Sr / (x - a)
+
+                return df_dy, df_dp
+
+    return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
+
+
+def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
+              tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
+    """Solve a boundary value problem for a system of ODEs.
+
+    This function numerically solves a first order system of ODEs subject to
+    two-point boundary conditions::
+
+        dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
+        bc(y(a), y(b), p) = 0
+
+    Here x is a 1-D independent variable, y(x) is an N-D
+    vector-valued function and p is a k-D vector of unknown
+    parameters which is to be found along with y(x). For the problem to be
+    determined, there must be n + k boundary conditions, i.e., bc must be an
+    (n + k)-D function.
+
+    The last singular term on the right-hand side of the system is optional.
+    It is defined by an n-by-n matrix S, such that the solution must satisfy
+    S y(a) = 0. This condition will be forced during iterations, so it must not
+    contradict boundary conditions. See [2]_ for the explanation how this term
+    is handled when solving BVPs numerically.
+
+    Problems in a complex domain can be solved as well. In this case, y and p
+    are considered to be complex, and f and bc are assumed to be complex-valued
+    functions, but x stays real. Note that f and bc must be complex
+    differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
+    should rewrite your problem for real and imaginary parts separately. To
+    solve a problem in a complex domain, pass an initial guess for y with a
+    complex data type (see below).
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(x, y)``,
+        or ``fun(x, y, p)`` if parameters are present. All arguments are
+        ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
+        ``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
+        return value must be an array with shape (n, m) and with the same
+        layout as ``y``.
+    bc : callable
+        Function evaluating residuals of the boundary conditions. The calling
+        signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
+        present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
+        and ``p`` with shape (k,). The return value must be an array with
+        shape (n + k,).
+    x : array_like, shape (m,)
+        Initial mesh. Must be a strictly increasing sequence of real numbers
+        with ``x[0]=a`` and ``x[-1]=b``.
+    y : array_like, shape (n, m)
+        Initial guess for the function values at the mesh nodes, ith column
+        corresponds to ``x[i]``. For problems in a complex domain pass `y`
+        with a complex data type (even if the initial guess is purely real).
+    p : array_like with shape (k,) or None, optional
+        Initial guess for the unknown parameters. If None (default), it is
+        assumed that the problem doesn't depend on any parameters.
+    S : array_like with shape (n, n) or None
+        Matrix defining the singular term. If None (default), the problem is
+        solved without the singular term.
+    fun_jac : callable or None, optional
+        Function computing derivatives of f with respect to y and p. The
+        calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
+        parameters are present. The return must contain 1 or 2 elements in the
+        following order:
+
+            * df_dy : array_like with shape (n, n, m), where an element
+              (i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
+            * df_dp : array_like with shape (n, k, m), where an element
+              (i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
+
+        Here q numbers nodes at which x and y are defined, whereas i and j
+        number vector components. If the problem is solved without unknown
+        parameters, df_dp should not be returned.
+
+        If `fun_jac` is None (default), the derivatives will be estimated
+        by the forward finite differences.
+    bc_jac : callable or None, optional
+        Function computing derivatives of bc with respect to ya, yb, and p.
+        The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
+        if parameters are present. The return must contain 2 or 3 elements in
+        the following order:
+
+            * dbc_dya : array_like with shape (n, n), where an element (i, j)
+              equals to d bc_i(ya, yb, p) / d ya_j.
+            * dbc_dyb : array_like with shape (n, n), where an element (i, j)
+              equals to d bc_i(ya, yb, p) / d yb_j.
+            * dbc_dp : array_like with shape (n, k), where an element (i, j)
+              equals to d bc_i(ya, yb, p) / d p_j.
+
+        If the problem is solved without unknown parameters, dbc_dp should not
+        be returned.
+
+        If `bc_jac` is None (default), the derivatives will be estimated by
+        the forward finite differences.
+    tol : float, optional
+        Desired tolerance of the solution. If we define ``r = y' - f(x, y)``,
+        where y is the found solution, then the solver tries to achieve on each
+        mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
+        estimated in a root mean squared sense (using a numerical quadrature
+        formula). Default is 1e-3.
+    max_nodes : int, optional
+        Maximum allowed number of the mesh nodes. If exceeded, the algorithm
+        terminates. Default is 1000.
+    verbose : {0, 1, 2}, optional
+        Level of algorithm's verbosity:
+
+            * 0 (default) : work silently.
+            * 1 : display a termination report.
+            * 2 : display progress during iterations.
+    bc_tol : float, optional
+        Desired absolute tolerance for the boundary condition residuals: `bc`
+        value should satisfy ``abs(bc) < bc_tol`` component-wise.
+        Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
+        tolerance.
+
+    Returns
+    -------
+    Bunch object with the following fields defined:
+    sol : PPoly
+        Found solution for y as `scipy.interpolate.PPoly` instance, a C1
+        continuous cubic spline.
+    p : ndarray or None, shape (k,)
+        Found parameters. None, if the parameters were not present in the
+        problem.
+    x : ndarray, shape (m,)
+        Nodes of the final mesh.
+    y : ndarray, shape (n, m)
+        Solution values at the mesh nodes.
+    yp : ndarray, shape (n, m)
+        Solution derivatives at the mesh nodes.
+    rms_residuals : ndarray, shape (m - 1,)
+        RMS values of the relative residuals over each mesh interval (see the
+        description of `tol` parameter).
+    niter : int
+        Number of completed iterations.
+    status : int
+        Reason for algorithm termination:
+
+            * 0: The algorithm converged to the desired accuracy.
+            * 1: The maximum number of mesh nodes is exceeded.
+            * 2: A singular Jacobian encountered when solving the collocation
+              system.
+
+    message : string
+        Verbal description of the termination reason.
+    success : bool
+        True if the algorithm converged to the desired accuracy (``status=0``).
+
+    Notes
+    -----
+    This function implements a 4th order collocation algorithm with the
+    control of residuals similar to [1]_. A collocation system is solved
+    by a damped Newton method with an affine-invariant criterion function as
+    described in [3]_.
+
+    Note that in [1]_  integral residuals are defined without normalization
+    by interval lengths. So, their definition is different by a multiplier of
+    h**0.5 (h is an interval length) from the definition used here.
+
+    .. versionadded:: 0.18.0
+
+    References
+    ----------
+    .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
+           Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
+           Number 3, pp. 299-316, 2001.
+    .. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
+           Solver".
+    .. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
+           Boundary Value Problems for Ordinary Differential Equations".
+    .. [4] `Cauchy-Riemann equations
+            `_ on
+            Wikipedia.
+
+    Examples
+    --------
+    In the first example, we solve Bratu's problem::
+
+        y'' + k * exp(y) = 0
+        y(0) = y(1) = 0
+
+    for k = 1.
+
+    We rewrite the equation as a first-order system and implement its
+    right-hand side evaluation::
+
+        y1' = y2
+        y2' = -exp(y1)
+
+    >>> import numpy as np
+    >>> def fun(x, y):
+    ...     return np.vstack((y[1], -np.exp(y[0])))
+
+    Implement evaluation of the boundary condition residuals:
+
+    >>> def bc(ya, yb):
+    ...     return np.array([ya[0], yb[0]])
+
+    Define the initial mesh with 5 nodes:
+
+    >>> x = np.linspace(0, 1, 5)
+
+    This problem is known to have two solutions. To obtain both of them, we
+    use two different initial guesses for y. We denote them by subscripts
+    a and b.
+
+    >>> y_a = np.zeros((2, x.size))
+    >>> y_b = np.zeros((2, x.size))
+    >>> y_b[0] = 3
+
+    Now we are ready to run the solver.
+
+    >>> from scipy.integrate import solve_bvp
+    >>> res_a = solve_bvp(fun, bc, x, y_a)
+    >>> res_b = solve_bvp(fun, bc, x, y_b)
+
+    Let's plot the two found solutions. We take an advantage of having the
+    solution in a spline form to produce a smooth plot.
+
+    >>> x_plot = np.linspace(0, 1, 100)
+    >>> y_plot_a = res_a.sol(x_plot)[0]
+    >>> y_plot_b = res_b.sol(x_plot)[0]
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(x_plot, y_plot_a, label='y_a')
+    >>> plt.plot(x_plot, y_plot_b, label='y_b')
+    >>> plt.legend()
+    >>> plt.xlabel("x")
+    >>> plt.ylabel("y")
+    >>> plt.show()
+
+    We see that the two solutions have similar shape, but differ in scale
+    significantly.
+
+    In the second example, we solve a simple Sturm-Liouville problem::
+
+        y'' + k**2 * y = 0
+        y(0) = y(1) = 0
+
+    It is known that a non-trivial solution y = A * sin(k * x) is possible for
+    k = pi * n, where n is an integer. To establish the normalization constant
+    A = 1 we add a boundary condition::
+
+        y'(0) = k
+
+    Again, we rewrite our equation as a first-order system and implement its
+    right-hand side evaluation::
+
+        y1' = y2
+        y2' = -k**2 * y1
+
+    >>> def fun(x, y, p):
+    ...     k = p[0]
+    ...     return np.vstack((y[1], -k**2 * y[0]))
+
+    Note that parameters p are passed as a vector (with one element in our
+    case).
+
+    Implement the boundary conditions:
+
+    >>> def bc(ya, yb, p):
+    ...     k = p[0]
+    ...     return np.array([ya[0], yb[0], ya[1] - k])
+
+    Set up the initial mesh and guess for y. We aim to find the solution for
+    k = 2 * pi, to achieve that we set values of y to approximately follow
+    sin(2 * pi * x):
+
+    >>> x = np.linspace(0, 1, 5)
+    >>> y = np.zeros((2, x.size))
+    >>> y[0, 1] = 1
+    >>> y[0, 3] = -1
+
+    Run the solver with 6 as an initial guess for k.
+
+    >>> sol = solve_bvp(fun, bc, x, y, p=[6])
+
+    We see that the found k is approximately correct:
+
+    >>> sol.p[0]
+    6.28329460046
+
+    And, finally, plot the solution to see the anticipated sinusoid:
+
+    >>> x_plot = np.linspace(0, 1, 100)
+    >>> y_plot = sol.sol(x_plot)[0]
+    >>> plt.plot(x_plot, y_plot)
+    >>> plt.xlabel("x")
+    >>> plt.ylabel("y")
+    >>> plt.show()
+    """
+    x = np.asarray(x, dtype=float)
+    if x.ndim != 1:
+        raise ValueError("`x` must be 1 dimensional.")
+    h = np.diff(x)
+    if np.any(h <= 0):
+        raise ValueError("`x` must be strictly increasing.")
+    a = x[0]
+
+    y = np.asarray(y)
+    if np.issubdtype(y.dtype, np.complexfloating):
+        dtype = complex
+    else:
+        dtype = float
+    y = y.astype(dtype, copy=False)
+
+    if y.ndim != 2:
+        raise ValueError("`y` must be 2 dimensional.")
+    if y.shape[1] != x.shape[0]:
+        raise ValueError("`y` is expected to have {} columns, but actually "
+                         "has {}.".format(x.shape[0], y.shape[1]))
+
+    if p is None:
+        p = np.array([])
+    else:
+        p = np.asarray(p, dtype=dtype)
+    if p.ndim != 1:
+        raise ValueError("`p` must be 1 dimensional.")
+
+    if tol < 100 * EPS:
+        warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
+        tol = 100 * EPS
+
+    if verbose not in [0, 1, 2]:
+        raise ValueError("`verbose` must be in [0, 1, 2].")
+
+    n = y.shape[0]
+    k = p.shape[0]
+
+    if S is not None:
+        S = np.asarray(S, dtype=dtype)
+        if S.shape != (n, n):
+            raise ValueError("`S` is expected to have shape {}, "
+                             "but actually has {}".format((n, n), S.shape))
+
+        # Compute I - S^+ S to impose necessary boundary conditions.
+        B = np.identity(n) - np.dot(pinv(S), S)
+
+        y[:, 0] = np.dot(B, y[:, 0])
+
+        # Compute (I - S)^+ to correct derivatives at x=a.
+        D = pinv(np.identity(n) - S)
+    else:
+        B = None
+        D = None
+
+    if bc_tol is None:
+        bc_tol = tol
+
+    # Maximum number of iterations
+    max_iteration = 10
+
+    fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
+        fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
+
+    f = fun_wrapped(x, y, p)
+    if f.shape != y.shape:
+        raise ValueError("`fun` return is expected to have shape {}, "
+                         "but actually has {}.".format(y.shape, f.shape))
+
+    bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
+    if bc_res.shape != (n + k,):
+        raise ValueError("`bc` return is expected to have shape {}, "
+                         "but actually has {}.".format((n + k,), bc_res.shape))
+
+    status = 0
+    iteration = 0
+    if verbose == 2:
+        print_iteration_header()
+
+    while True:
+        m = x.shape[0]
+
+        col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
+                                       fun_jac_wrapped, bc_jac_wrapped, x, h)
+        y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
+                                      y, p, B, tol, bc_tol)
+        iteration += 1
+
+        col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
+                                                         p, x, h)
+        bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
+        max_bc_res = np.max(abs(bc_res))
+
+        # This relation is not trivial, but can be verified.
+        r_middle = 1.5 * col_res / h
+        sol = create_spline(y, f, x, h)
+        rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
+                                         r_middle, f_middle)
+        max_rms_res = np.max(rms_res)
+
+        if singular:
+            status = 2
+            break
+
+        insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
+        insert_2, = np.nonzero(rms_res >= 100 * tol)
+        nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
+
+        if m + nodes_added > max_nodes:
+            status = 1
+            if verbose == 2:
+                nodes_added = "({})".format(nodes_added)
+                print_iteration_progress(iteration, max_rms_res, max_bc_res,
+                                         m, nodes_added)
+            break
+
+        if verbose == 2:
+            print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
+                                     nodes_added)
+
+        if nodes_added > 0:
+            x = modify_mesh(x, insert_1, insert_2)
+            h = np.diff(x)
+            y = sol(x)
+        elif max_bc_res <= bc_tol:
+            status = 0
+            break
+        elif iteration >= max_iteration:
+            status = 3
+            break
+
+    if verbose > 0:
+        if status == 0:
+            print("Solved in {} iterations, number of nodes {}. \n"
+                  "Maximum relative residual: {:.2e} \n"
+                  "Maximum boundary residual: {:.2e}"
+                  .format(iteration, x.shape[0], max_rms_res, max_bc_res))
+        elif status == 1:
+            print("Number of nodes is exceeded after iteration {}. \n"
+                  "Maximum relative residual: {:.2e} \n"
+                  "Maximum boundary residual: {:.2e}"
+                  .format(iteration, max_rms_res, max_bc_res))
+        elif status == 2:
+            print("Singular Jacobian encountered when solving the collocation "
+                  "system on iteration {}. \n"
+                  "Maximum relative residual: {:.2e} \n"
+                  "Maximum boundary residual: {:.2e}"
+                  .format(iteration, max_rms_res, max_bc_res))
+        elif status == 3:
+            print("The solver was unable to satisfy boundary conditions "
+                  "tolerance on iteration {}. \n"
+                  "Maximum relative residual: {:.2e} \n"
+                  "Maximum boundary residual: {:.2e}"
+                  .format(iteration, max_rms_res, max_bc_res))
+
+    if p.size == 0:
+        p = None
+
+    return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
+                     niter=iteration, status=status,
+                     message=TERMINATION_MESSAGES[status], success=status == 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/__init__.py
new file mode 100644
index 00000000..f3c8aaa3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/__init__.py
@@ -0,0 +1,8 @@
+"""Suite of ODE solvers implemented in Python."""
+from .ivp import solve_ivp
+from .rk import RK23, RK45, DOP853
+from .radau import Radau
+from .bdf import BDF
+from .lsoda import LSODA
+from .common import OdeSolution
+from .base import DenseOutput, OdeSolver
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/base.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/base.py
new file mode 100644
index 00000000..ada0589d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/base.py
@@ -0,0 +1,274 @@
+import numpy as np
+
+
+def check_arguments(fun, y0, support_complex):
+    """Helper function for checking arguments common to all solvers."""
+    y0 = np.asarray(y0)
+    if np.issubdtype(y0.dtype, np.complexfloating):
+        if not support_complex:
+            raise ValueError("`y0` is complex, but the chosen solver does "
+                             "not support integration in a complex domain.")
+        dtype = complex
+    else:
+        dtype = float
+    y0 = y0.astype(dtype, copy=False)
+
+    if y0.ndim != 1:
+        raise ValueError("`y0` must be 1-dimensional.")
+
+    def fun_wrapped(t, y):
+        return np.asarray(fun(t, y), dtype=dtype)
+
+    return fun_wrapped, y0
+
+
+class OdeSolver:
+    """Base class for ODE solvers.
+
+    In order to implement a new solver you need to follow the guidelines:
+
+        1. A constructor must accept parameters presented in the base class
+           (listed below) along with any other parameters specific to a solver.
+        2. A constructor must accept arbitrary extraneous arguments
+           ``**extraneous``, but warn that these arguments are irrelevant
+           using `common.warn_extraneous` function. Do not pass these
+           arguments to the base class.
+        3. A solver must implement a private method `_step_impl(self)` which
+           propagates a solver one step further. It must return tuple
+           ``(success, message)``, where ``success`` is a boolean indicating
+           whether a step was successful, and ``message`` is a string
+           containing description of a failure if a step failed or None
+           otherwise.
+        4. A solver must implement a private method `_dense_output_impl(self)`,
+           which returns a `DenseOutput` object covering the last successful
+           step.
+        5. A solver must have attributes listed below in Attributes section.
+           Note that ``t_old`` and ``step_size`` are updated automatically.
+        6. Use `fun(self, t, y)` method for the system rhs evaluation, this
+           way the number of function evaluations (`nfev`) will be tracked
+           automatically.
+        7. For convenience, a base class provides `fun_single(self, t, y)` and
+           `fun_vectorized(self, t, y)` for evaluating the rhs in
+           non-vectorized and vectorized fashions respectively (regardless of
+           how `fun` from the constructor is implemented). These calls don't
+           increment `nfev`.
+        8. If a solver uses a Jacobian matrix and LU decompositions, it should
+           track the number of Jacobian evaluations (`njev`) and the number of
+           LU decompositions (`nlu`).
+        9. By convention, the function evaluations used to compute a finite
+           difference approximation of the Jacobian should not be counted in
+           `nfev`, thus use `fun_single(self, t, y)` or
+           `fun_vectorized(self, t, y)` when computing a finite difference
+           approximation of the Jacobian.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here ``t`` is a scalar and there are two options for ndarray ``y``.
+        It can either have shape (n,), then ``fun`` must return array_like with
+        shape (n,). Or, alternatively, it can have shape (n, n_points), then
+        ``fun`` must return array_like with shape (n, n_points) (each column
+        corresponds to a single column in ``y``). The choice between the two
+        options is determined by `vectorized` argument (see below).
+    t0 : float
+        Initial time.
+    y0 : array_like, shape (n,)
+        Initial state.
+    t_bound : float
+        Boundary time --- the integration won't continue beyond it. It also
+        determines the direction of the integration.
+    vectorized : bool
+        Whether `fun` is implemented in a vectorized fashion.
+    support_complex : bool, optional
+        Whether integration in a complex domain should be supported.
+        Generally determined by a derived solver class capabilities.
+        Default is False.
+
+    Attributes
+    ----------
+    n : int
+        Number of equations.
+    status : string
+        Current status of the solver: 'running', 'finished' or 'failed'.
+    t_bound : float
+        Boundary time.
+    direction : float
+        Integration direction: +1 or -1.
+    t : float
+        Current time.
+    y : ndarray
+        Current state.
+    t_old : float
+        Previous time. None if no steps were made yet.
+    step_size : float
+        Size of the last successful step. None if no steps were made yet.
+    nfev : int
+        Number of the system's rhs evaluations.
+    njev : int
+        Number of the Jacobian evaluations.
+    nlu : int
+        Number of LU decompositions.
+    """
+    TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
+
+    def __init__(self, fun, t0, y0, t_bound, vectorized,
+                 support_complex=False):
+        self.t_old = None
+        self.t = t0
+        self._fun, self.y = check_arguments(fun, y0, support_complex)
+        self.t_bound = t_bound
+        self.vectorized = vectorized
+
+        if vectorized:
+            def fun_single(t, y):
+                return self._fun(t, y[:, None]).ravel()
+            fun_vectorized = self._fun
+        else:
+            fun_single = self._fun
+
+            def fun_vectorized(t, y):
+                f = np.empty_like(y)
+                for i, yi in enumerate(y.T):
+                    f[:, i] = self._fun(t, yi)
+                return f
+
+        def fun(t, y):
+            self.nfev += 1
+            return self.fun_single(t, y)
+
+        self.fun = fun
+        self.fun_single = fun_single
+        self.fun_vectorized = fun_vectorized
+
+        self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
+        self.n = self.y.size
+        self.status = 'running'
+
+        self.nfev = 0
+        self.njev = 0
+        self.nlu = 0
+
+    @property
+    def step_size(self):
+        if self.t_old is None:
+            return None
+        else:
+            return np.abs(self.t - self.t_old)
+
+    def step(self):
+        """Perform one integration step.
+
+        Returns
+        -------
+        message : string or None
+            Report from the solver. Typically a reason for a failure if
+            `self.status` is 'failed' after the step was taken or None
+            otherwise.
+        """
+        if self.status != 'running':
+            raise RuntimeError("Attempt to step on a failed or finished "
+                               "solver.")
+
+        if self.n == 0 or self.t == self.t_bound:
+            # Handle corner cases of empty solver or no integration.
+            self.t_old = self.t
+            self.t = self.t_bound
+            message = None
+            self.status = 'finished'
+        else:
+            t = self.t
+            success, message = self._step_impl()
+
+            if not success:
+                self.status = 'failed'
+            else:
+                self.t_old = t
+                if self.direction * (self.t - self.t_bound) >= 0:
+                    self.status = 'finished'
+
+        return message
+
+    def dense_output(self):
+        """Compute a local interpolant over the last successful step.
+
+        Returns
+        -------
+        sol : `DenseOutput`
+            Local interpolant over the last successful step.
+        """
+        if self.t_old is None:
+            raise RuntimeError("Dense output is available after a successful "
+                               "step was made.")
+
+        if self.n == 0 or self.t == self.t_old:
+            # Handle corner cases of empty solver and no integration.
+            return ConstantDenseOutput(self.t_old, self.t, self.y)
+        else:
+            return self._dense_output_impl()
+
+    def _step_impl(self):
+        raise NotImplementedError
+
+    def _dense_output_impl(self):
+        raise NotImplementedError
+
+
+class DenseOutput:
+    """Base class for local interpolant over step made by an ODE solver.
+
+    It interpolates between `t_min` and `t_max` (see Attributes below).
+    Evaluation outside this interval is not forbidden, but the accuracy is not
+    guaranteed.
+
+    Attributes
+    ----------
+    t_min, t_max : float
+        Time range of the interpolation.
+    """
+    def __init__(self, t_old, t):
+        self.t_old = t_old
+        self.t = t
+        self.t_min = min(t, t_old)
+        self.t_max = max(t, t_old)
+
+    def __call__(self, t):
+        """Evaluate the interpolant.
+
+        Parameters
+        ----------
+        t : float or array_like with shape (n_points,)
+            Points to evaluate the solution at.
+
+        Returns
+        -------
+        y : ndarray, shape (n,) or (n, n_points)
+            Computed values. Shape depends on whether `t` was a scalar or a
+            1-D array.
+        """
+        t = np.asarray(t)
+        if t.ndim > 1:
+            raise ValueError("`t` must be a float or a 1-D array.")
+        return self._call_impl(t)
+
+    def _call_impl(self, t):
+        raise NotImplementedError
+
+
+class ConstantDenseOutput(DenseOutput):
+    """Constant value interpolator.
+
+    This class used for degenerate integration cases: equal integration limits
+    or a system with 0 equations.
+    """
+    def __init__(self, t_old, t, value):
+        super().__init__(t_old, t)
+        self.value = value
+
+    def _call_impl(self, t):
+        if t.ndim == 0:
+            return self.value
+        else:
+            ret = np.empty((self.value.shape[0], t.shape[0]))
+            ret[:] = self.value[:, None]
+            return ret
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/bdf.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/bdf.py
new file mode 100644
index 00000000..34939d57
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/bdf.py
@@ -0,0 +1,470 @@
+import numpy as np
+from scipy.linalg import lu_factor, lu_solve
+from scipy.sparse import issparse, csc_matrix, eye
+from scipy.sparse.linalg import splu
+from scipy.optimize._numdiff import group_columns
+from .common import (validate_max_step, validate_tol, select_initial_step,
+                     norm, EPS, num_jac, validate_first_step,
+                     warn_extraneous)
+from .base import OdeSolver, DenseOutput
+
+
+MAX_ORDER = 5
+NEWTON_MAXITER = 4
+MIN_FACTOR = 0.2
+MAX_FACTOR = 10
+
+
+def compute_R(order, factor):
+    """Compute the matrix for changing the differences array."""
+    I = np.arange(1, order + 1)[:, None]
+    J = np.arange(1, order + 1)
+    M = np.zeros((order + 1, order + 1))
+    M[1:, 1:] = (I - 1 - factor * J) / I
+    M[0] = 1
+    return np.cumprod(M, axis=0)
+
+
+def change_D(D, order, factor):
+    """Change differences array in-place when step size is changed."""
+    R = compute_R(order, factor)
+    U = compute_R(order, 1)
+    RU = R.dot(U)
+    D[:order + 1] = np.dot(RU.T, D[:order + 1])
+
+
+def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
+    """Solve the algebraic system resulting from BDF method."""
+    d = 0
+    y = y_predict.copy()
+    dy_norm_old = None
+    converged = False
+    for k in range(NEWTON_MAXITER):
+        f = fun(t_new, y)
+        if not np.all(np.isfinite(f)):
+            break
+
+        dy = solve_lu(LU, c * f - psi - d)
+        dy_norm = norm(dy / scale)
+
+        if dy_norm_old is None:
+            rate = None
+        else:
+            rate = dy_norm / dy_norm_old
+
+        if (rate is not None and (rate >= 1 or
+                rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
+            break
+
+        y += dy
+        d += dy
+
+        if (dy_norm == 0 or
+                rate is not None and rate / (1 - rate) * dy_norm < tol):
+            converged = True
+            break
+
+        dy_norm_old = dy_norm
+
+    return converged, k + 1, y, d
+
+
+class BDF(OdeSolver):
+    """Implicit method based on backward-differentiation formulas.
+
+    This is a variable order method with the order varying automatically from
+    1 to 5. The general framework of the BDF algorithm is described in [1]_.
+    This class implements a quasi-constant step size as explained in [2]_.
+    The error estimation strategy for the constant-step BDF is derived in [3]_.
+    An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
+
+    Can be applied in the complex domain.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+        It can either have shape (n,); then ``fun`` must return array_like with
+        shape (n,). Alternatively it can have shape (n, k); then ``fun``
+        must return an array_like with shape (n, k), i.e. each column
+        corresponds to a single column in ``y``. The choice between the two
+        options is determined by `vectorized` argument (see below). The
+        vectorized implementation allows a faster approximation of the Jacobian
+        by finite differences (required for this solver).
+    t0 : float
+        Initial time.
+    y0 : array_like, shape (n,)
+        Initial state.
+    t_bound : float
+        Boundary time - the integration won't continue beyond it. It also
+        determines the direction of the integration.
+    first_step : float or None, optional
+        Initial step size. Default is ``None`` which means that the algorithm
+        should choose.
+    max_step : float, optional
+        Maximum allowed step size. Default is np.inf, i.e., the step size is not
+        bounded and determined solely by the solver.
+    rtol, atol : float and array_like, optional
+        Relative and absolute tolerances. The solver keeps the local error
+        estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+        relative accuracy (number of correct digits), while `atol` controls
+        absolute accuracy (number of correct decimal places). To achieve the
+        desired `rtol`, set `atol` to be smaller than the smallest value that
+        can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
+        allowable error. If `atol` is larger than ``rtol * abs(y)`` the
+        number of correct digits is not guaranteed. Conversely, to achieve the
+        desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
+        than `atol`. If components of y have different scales, it might be
+        beneficial to set different `atol` values for different components by
+        passing array_like with shape (n,) for `atol`. Default values are
+        1e-3 for `rtol` and 1e-6 for `atol`.
+    jac : {None, array_like, sparse_matrix, callable}, optional
+        Jacobian matrix of the right-hand side of the system with respect to y,
+        required by this method. The Jacobian matrix has shape (n, n) and its
+        element (i, j) is equal to ``d f_i / d y_j``.
+        There are three ways to define the Jacobian:
+
+            * If array_like or sparse_matrix, the Jacobian is assumed to
+              be constant.
+            * If callable, the Jacobian is assumed to depend on both
+              t and y; it will be called as ``jac(t, y)`` as necessary.
+              For the 'Radau' and 'BDF' methods, the return value might be a
+              sparse matrix.
+            * If None (default), the Jacobian will be approximated by
+              finite differences.
+
+        It is generally recommended to provide the Jacobian rather than
+        relying on a finite-difference approximation.
+    jac_sparsity : {None, array_like, sparse matrix}, optional
+        Defines a sparsity structure of the Jacobian matrix for a
+        finite-difference approximation. Its shape must be (n, n). This argument
+        is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
+        elements in *each* row, providing the sparsity structure will greatly
+        speed up the computations [4]_. A zero entry means that a corresponding
+        element in the Jacobian is always zero. If None (default), the Jacobian
+        is assumed to be dense.
+    vectorized : bool, optional
+        Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+    Attributes
+    ----------
+    n : int
+        Number of equations.
+    status : string
+        Current status of the solver: 'running', 'finished' or 'failed'.
+    t_bound : float
+        Boundary time.
+    direction : float
+        Integration direction: +1 or -1.
+    t : float
+        Current time.
+    y : ndarray
+        Current state.
+    t_old : float
+        Previous time. None if no steps were made yet.
+    step_size : float
+        Size of the last successful step. None if no steps were made yet.
+    nfev : int
+        Number of evaluations of the right-hand side.
+    njev : int
+        Number of evaluations of the Jacobian.
+    nlu : int
+        Number of LU decompositions.
+
+    References
+    ----------
+    .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
+           Solution of Ordinary Differential Equations", ACM Transactions on
+           Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
+    .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
+           COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
+    .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
+           Nonstiff Problems", Sec. III.2.
+    .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+           sparse Jacobian matrices", Journal of the Institute of Mathematics
+           and its Applications, 13, pp. 117-120, 1974.
+    """
+    def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+                 rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
+                 vectorized=False, first_step=None, **extraneous):
+        warn_extraneous(extraneous)
+        super().__init__(fun, t0, y0, t_bound, vectorized,
+                         support_complex=True)
+        self.max_step = validate_max_step(max_step)
+        self.rtol, self.atol = validate_tol(rtol, atol, self.n)
+        f = self.fun(self.t, self.y)
+        if first_step is None:
+            self.h_abs = select_initial_step(self.fun, self.t, self.y, f,
+                                             self.direction, 1,
+                                             self.rtol, self.atol)
+        else:
+            self.h_abs = validate_first_step(first_step, t0, t_bound)
+        self.h_abs_old = None
+        self.error_norm_old = None
+
+        self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
+
+        self.jac_factor = None
+        self.jac, self.J = self._validate_jac(jac, jac_sparsity)
+        if issparse(self.J):
+            def lu(A):
+                self.nlu += 1
+                return splu(A)
+
+            def solve_lu(LU, b):
+                return LU.solve(b)
+
+            I = eye(self.n, format='csc', dtype=self.y.dtype)
+        else:
+            def lu(A):
+                self.nlu += 1
+                return lu_factor(A, overwrite_a=True)
+
+            def solve_lu(LU, b):
+                return lu_solve(LU, b, overwrite_b=True)
+
+            I = np.identity(self.n, dtype=self.y.dtype)
+
+        self.lu = lu
+        self.solve_lu = solve_lu
+        self.I = I
+
+        kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
+        self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
+        self.alpha = (1 - kappa) * self.gamma
+        self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
+
+        D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
+        D[0] = self.y
+        D[1] = f * self.h_abs * self.direction
+        self.D = D
+
+        self.order = 1
+        self.n_equal_steps = 0
+        self.LU = None
+
+    def _validate_jac(self, jac, sparsity):
+        t0 = self.t
+        y0 = self.y
+
+        if jac is None:
+            if sparsity is not None:
+                if issparse(sparsity):
+                    sparsity = csc_matrix(sparsity)
+                groups = group_columns(sparsity)
+                sparsity = (sparsity, groups)
+
+            def jac_wrapped(t, y):
+                self.njev += 1
+                f = self.fun_single(t, y)
+                J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
+                                             self.atol, self.jac_factor,
+                                             sparsity)
+                return J
+            J = jac_wrapped(t0, y0)
+        elif callable(jac):
+            J = jac(t0, y0)
+            self.njev += 1
+            if issparse(J):
+                J = csc_matrix(J, dtype=y0.dtype)
+
+                def jac_wrapped(t, y):
+                    self.njev += 1
+                    return csc_matrix(jac(t, y), dtype=y0.dtype)
+            else:
+                J = np.asarray(J, dtype=y0.dtype)
+
+                def jac_wrapped(t, y):
+                    self.njev += 1
+                    return np.asarray(jac(t, y), dtype=y0.dtype)
+
+            if J.shape != (self.n, self.n):
+                raise ValueError("`jac` is expected to have shape {}, but "
+                                 "actually has {}."
+                                 .format((self.n, self.n), J.shape))
+        else:
+            if issparse(jac):
+                J = csc_matrix(jac, dtype=y0.dtype)
+            else:
+                J = np.asarray(jac, dtype=y0.dtype)
+
+            if J.shape != (self.n, self.n):
+                raise ValueError("`jac` is expected to have shape {}, but "
+                                 "actually has {}."
+                                 .format((self.n, self.n), J.shape))
+            jac_wrapped = None
+
+        return jac_wrapped, J
+
+    def _step_impl(self):
+        t = self.t
+        D = self.D
+
+        max_step = self.max_step
+        min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
+        if self.h_abs > max_step:
+            h_abs = max_step
+            change_D(D, self.order, max_step / self.h_abs)
+            self.n_equal_steps = 0
+        elif self.h_abs < min_step:
+            h_abs = min_step
+            change_D(D, self.order, min_step / self.h_abs)
+            self.n_equal_steps = 0
+        else:
+            h_abs = self.h_abs
+
+        atol = self.atol
+        rtol = self.rtol
+        order = self.order
+
+        alpha = self.alpha
+        gamma = self.gamma
+        error_const = self.error_const
+
+        J = self.J
+        LU = self.LU
+        current_jac = self.jac is None
+
+        step_accepted = False
+        while not step_accepted:
+            if h_abs < min_step:
+                return False, self.TOO_SMALL_STEP
+
+            h = h_abs * self.direction
+            t_new = t + h
+
+            if self.direction * (t_new - self.t_bound) > 0:
+                t_new = self.t_bound
+                change_D(D, order, np.abs(t_new - t) / h_abs)
+                self.n_equal_steps = 0
+                LU = None
+
+            h = t_new - t
+            h_abs = np.abs(h)
+
+            y_predict = np.sum(D[:order + 1], axis=0)
+
+            scale = atol + rtol * np.abs(y_predict)
+            psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
+
+            converged = False
+            c = h / alpha[order]
+            while not converged:
+                if LU is None:
+                    LU = self.lu(self.I - c * J)
+
+                converged, n_iter, y_new, d = solve_bdf_system(
+                    self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
+                    scale, self.newton_tol)
+
+                if not converged:
+                    if current_jac:
+                        break
+                    J = self.jac(t_new, y_predict)
+                    LU = None
+                    current_jac = True
+
+            if not converged:
+                factor = 0.5
+                h_abs *= factor
+                change_D(D, order, factor)
+                self.n_equal_steps = 0
+                LU = None
+                continue
+
+            safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+                                                       + n_iter)
+
+            scale = atol + rtol * np.abs(y_new)
+            error = error_const[order] * d
+            error_norm = norm(error / scale)
+
+            if error_norm > 1:
+                factor = max(MIN_FACTOR,
+                             safety * error_norm ** (-1 / (order + 1)))
+                h_abs *= factor
+                change_D(D, order, factor)
+                self.n_equal_steps = 0
+                # As we didn't have problems with convergence, we don't
+                # reset LU here.
+            else:
+                step_accepted = True
+
+        self.n_equal_steps += 1
+
+        self.t = t_new
+        self.y = y_new
+
+        self.h_abs = h_abs
+        self.J = J
+        self.LU = LU
+
+        # Update differences. The principal relation here is
+        # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
+        # contained difference for previous interpolating polynomial and
+        # d = D^{k + 1} y_n. Thus this elegant code follows.
+        D[order + 2] = d - D[order + 1]
+        D[order + 1] = d
+        for i in reversed(range(order + 1)):
+            D[i] += D[i + 1]
+
+        if self.n_equal_steps < order + 1:
+            return True, None
+
+        if order > 1:
+            error_m = error_const[order - 1] * D[order]
+            error_m_norm = norm(error_m / scale)
+        else:
+            error_m_norm = np.inf
+
+        if order < MAX_ORDER:
+            error_p = error_const[order + 1] * D[order + 2]
+            error_p_norm = norm(error_p / scale)
+        else:
+            error_p_norm = np.inf
+
+        error_norms = np.array([error_m_norm, error_norm, error_p_norm])
+        with np.errstate(divide='ignore'):
+            factors = error_norms ** (-1 / np.arange(order, order + 3))
+
+        delta_order = np.argmax(factors) - 1
+        order += delta_order
+        self.order = order
+
+        factor = min(MAX_FACTOR, safety * np.max(factors))
+        self.h_abs *= factor
+        change_D(D, order, factor)
+        self.n_equal_steps = 0
+        self.LU = None
+
+        return True, None
+
+    def _dense_output_impl(self):
+        return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
+                              self.order, self.D[:self.order + 1].copy())
+
+
+class BdfDenseOutput(DenseOutput):
+    def __init__(self, t_old, t, h, order, D):
+        super().__init__(t_old, t)
+        self.order = order
+        self.t_shift = self.t - h * np.arange(self.order)
+        self.denom = h * (1 + np.arange(self.order))
+        self.D = D
+
+    def _call_impl(self, t):
+        if t.ndim == 0:
+            x = (t - self.t_shift) / self.denom
+            p = np.cumprod(x)
+        else:
+            x = (t - self.t_shift[:, None]) / self.denom[:, None]
+            p = np.cumprod(x, axis=0)
+
+        y = np.dot(self.D[1:].T, p)
+        if y.ndim == 1:
+            y += self.D[0]
+        else:
+            y += self.D[0, :, None]
+
+        return y
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/common.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/common.py
new file mode 100644
index 00000000..7f6d2331
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/common.py
@@ -0,0 +1,433 @@
+from itertools import groupby
+from warnings import warn
+import numpy as np
+from scipy.sparse import find, coo_matrix
+
+
+EPS = np.finfo(float).eps
+
+
+def validate_first_step(first_step, t0, t_bound):
+    """Assert that first_step is valid and return it."""
+    if first_step <= 0:
+        raise ValueError("`first_step` must be positive.")
+    if first_step > np.abs(t_bound - t0):
+        raise ValueError("`first_step` exceeds bounds.")
+    return first_step
+
+
+def validate_max_step(max_step):
+    """Assert that max_Step is valid and return it."""
+    if max_step <= 0:
+        raise ValueError("`max_step` must be positive.")
+    return max_step
+
+
+def warn_extraneous(extraneous):
+    """Display a warning for extraneous keyword arguments.
+
+    The initializer of each solver class is expected to collect keyword
+    arguments that it doesn't understand and warn about them. This function
+    prints a warning for each key in the supplied dictionary.
+
+    Parameters
+    ----------
+    extraneous : dict
+        Extraneous keyword arguments
+    """
+    if extraneous:
+        warn("The following arguments have no effect for a chosen solver: {}."
+             .format(", ".join("`{}`".format(x) for x in extraneous)))
+
+
+def validate_tol(rtol, atol, n):
+    """Validate tolerance values."""
+
+    if np.any(rtol < 100 * EPS):
+        warn("At least one element of `rtol` is too small. "
+             f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.")
+        rtol = np.maximum(rtol, 100 * EPS)
+
+    atol = np.asarray(atol)
+    if atol.ndim > 0 and atol.shape != (n,):
+        raise ValueError("`atol` has wrong shape.")
+
+    if np.any(atol < 0):
+        raise ValueError("`atol` must be positive.")
+
+    return rtol, atol
+
+
+def norm(x):
+    """Compute RMS norm."""
+    return np.linalg.norm(x) / x.size ** 0.5
+
+
+def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
+    """Empirically select a good initial step.
+
+    The algorithm is described in [1]_.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system.
+    t0 : float
+        Initial value of the independent variable.
+    y0 : ndarray, shape (n,)
+        Initial value of the dependent variable.
+    f0 : ndarray, shape (n,)
+        Initial value of the derivative, i.e., ``fun(t0, y0)``.
+    direction : float
+        Integration direction.
+    order : float
+        Error estimator order. It means that the error controlled by the
+        algorithm is proportional to ``step_size ** (order + 1)`.
+    rtol : float
+        Desired relative tolerance.
+    atol : float
+        Desired absolute tolerance.
+
+    Returns
+    -------
+    h_abs : float
+        Absolute value of the suggested initial step.
+
+    References
+    ----------
+    .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+           Equations I: Nonstiff Problems", Sec. II.4.
+    """
+    if y0.size == 0:
+        return np.inf
+
+    scale = atol + np.abs(y0) * rtol
+    d0 = norm(y0 / scale)
+    d1 = norm(f0 / scale)
+    if d0 < 1e-5 or d1 < 1e-5:
+        h0 = 1e-6
+    else:
+        h0 = 0.01 * d0 / d1
+
+    y1 = y0 + h0 * direction * f0
+    f1 = fun(t0 + h0 * direction, y1)
+    d2 = norm((f1 - f0) / scale) / h0
+
+    if d1 <= 1e-15 and d2 <= 1e-15:
+        h1 = max(1e-6, h0 * 1e-3)
+    else:
+        h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
+
+    return min(100 * h0, h1)
+
+
+class OdeSolution:
+    """Continuous ODE solution.
+
+    It is organized as a collection of `DenseOutput` objects which represent
+    local interpolants. It provides an algorithm to select a right interpolant
+    for each given point.
+
+    The interpolants cover the range between `t_min` and `t_max` (see
+    Attributes below). Evaluation outside this interval is not forbidden, but
+    the accuracy is not guaranteed.
+
+    When evaluating at a breakpoint (one of the values in `ts`) a segment with
+    the lower index is selected.
+
+    Parameters
+    ----------
+    ts : array_like, shape (n_segments + 1,)
+        Time instants between which local interpolants are defined. Must
+        be strictly increasing or decreasing (zero segment with two points is
+        also allowed).
+    interpolants : list of DenseOutput with n_segments elements
+        Local interpolants. An i-th interpolant is assumed to be defined
+        between ``ts[i]`` and ``ts[i + 1]``.
+
+    Attributes
+    ----------
+    t_min, t_max : float
+        Time range of the interpolation.
+    """
+    def __init__(self, ts, interpolants):
+        ts = np.asarray(ts)
+        d = np.diff(ts)
+        # The first case covers integration on zero segment.
+        if not ((ts.size == 2 and ts[0] == ts[-1])
+                or np.all(d > 0) or np.all(d < 0)):
+            raise ValueError("`ts` must be strictly increasing or decreasing.")
+
+        self.n_segments = len(interpolants)
+        if ts.shape != (self.n_segments + 1,):
+            raise ValueError("Numbers of time stamps and interpolants "
+                             "don't match.")
+
+        self.ts = ts
+        self.interpolants = interpolants
+        if ts[-1] >= ts[0]:
+            self.t_min = ts[0]
+            self.t_max = ts[-1]
+            self.ascending = True
+            self.ts_sorted = ts
+        else:
+            self.t_min = ts[-1]
+            self.t_max = ts[0]
+            self.ascending = False
+            self.ts_sorted = ts[::-1]
+
+    def _call_single(self, t):
+        # Here we preserve a certain symmetry that when t is in self.ts,
+        # then we prioritize a segment with a lower index.
+        if self.ascending:
+            ind = np.searchsorted(self.ts_sorted, t, side='left')
+        else:
+            ind = np.searchsorted(self.ts_sorted, t, side='right')
+
+        segment = min(max(ind - 1, 0), self.n_segments - 1)
+        if not self.ascending:
+            segment = self.n_segments - 1 - segment
+
+        return self.interpolants[segment](t)
+
+    def __call__(self, t):
+        """Evaluate the solution.
+
+        Parameters
+        ----------
+        t : float or array_like with shape (n_points,)
+            Points to evaluate at.
+
+        Returns
+        -------
+        y : ndarray, shape (n_states,) or (n_states, n_points)
+            Computed values. Shape depends on whether `t` is a scalar or a
+            1-D array.
+        """
+        t = np.asarray(t)
+
+        if t.ndim == 0:
+            return self._call_single(t)
+
+        order = np.argsort(t)
+        reverse = np.empty_like(order)
+        reverse[order] = np.arange(order.shape[0])
+        t_sorted = t[order]
+
+        # See comment in self._call_single.
+        if self.ascending:
+            segments = np.searchsorted(self.ts_sorted, t_sorted, side='left')
+        else:
+            segments = np.searchsorted(self.ts_sorted, t_sorted, side='right')
+        segments -= 1
+        segments[segments < 0] = 0
+        segments[segments > self.n_segments - 1] = self.n_segments - 1
+        if not self.ascending:
+            segments = self.n_segments - 1 - segments
+
+        ys = []
+        group_start = 0
+        for segment, group in groupby(segments):
+            group_end = group_start + len(list(group))
+            y = self.interpolants[segment](t_sorted[group_start:group_end])
+            ys.append(y)
+            group_start = group_end
+
+        ys = np.hstack(ys)
+        ys = ys[:, reverse]
+
+        return ys
+
+
+NUM_JAC_DIFF_REJECT = EPS ** 0.875
+NUM_JAC_DIFF_SMALL = EPS ** 0.75
+NUM_JAC_DIFF_BIG = EPS ** 0.25
+NUM_JAC_MIN_FACTOR = 1e3 * EPS
+NUM_JAC_FACTOR_INCREASE = 10
+NUM_JAC_FACTOR_DECREASE = 0.1
+
+
+def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
+    """Finite differences Jacobian approximation tailored for ODE solvers.
+
+    This function computes finite difference approximation to the Jacobian
+    matrix of `fun` with respect to `y` using forward differences.
+    The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
+    ``d f_i / d y_j``.
+
+    A special feature of this function is the ability to correct the step
+    size from iteration to iteration. The main idea is to keep the finite
+    difference significantly separated from its round-off error which
+    approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
+    huge error and assures that the estimated derivative are reasonably close
+    to the true values (i.e., the finite difference approximation is at least
+    qualitatively reflects the structure of the true Jacobian).
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system implemented in a vectorized fashion.
+    t : float
+        Current time.
+    y : ndarray, shape (n,)
+        Current state.
+    f : ndarray, shape (n,)
+        Value of the right hand side at (t, y).
+    threshold : float
+        Threshold for `y` value used for computing the step size as
+        ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
+        absolute tolerance (atol) for a solver should be passed as `threshold`.
+    factor : ndarray with shape (n,) or None
+        Factor to use for computing the step size. Pass None for the very
+        evaluation, then use the value returned from this function.
+    sparsity : tuple (structure, groups) or None
+        Sparsity structure of the Jacobian, `structure` must be csc_matrix.
+
+    Returns
+    -------
+    J : ndarray or csc_matrix, shape (n, n)
+        Jacobian matrix.
+    factor : ndarray, shape (n,)
+        Suggested `factor` for the next evaluation.
+    """
+    y = np.asarray(y)
+    n = y.shape[0]
+    if n == 0:
+        return np.empty((0, 0)), factor
+
+    if factor is None:
+        factor = np.full(n, EPS ** 0.5)
+    else:
+        factor = factor.copy()
+
+    # Direct the step as ODE dictates, hoping that such a step won't lead to
+    # a problematic region. For complex ODEs it makes sense to use the real
+    # part of f as we use steps along real axis.
+    f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
+    y_scale = f_sign * np.maximum(threshold, np.abs(y))
+    h = (y + factor * y_scale) - y
+
+    # Make sure that the step is not 0 to start with. Not likely it will be
+    # executed often.
+    for i in np.nonzero(h == 0)[0]:
+        while h[i] == 0:
+            factor[i] *= 10
+            h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
+
+    if sparsity is None:
+        return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
+    else:
+        structure, groups = sparsity
+        return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
+                               structure, groups)
+
+
+def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
+    n = y.shape[0]
+    h_vecs = np.diag(h)
+    f_new = fun(t, y[:, None] + h_vecs)
+    diff = f_new - f[:, None]
+    max_ind = np.argmax(np.abs(diff), axis=0)
+    r = np.arange(n)
+    max_diff = np.abs(diff[max_ind, r])
+    scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
+
+    diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
+    if np.any(diff_too_small):
+        ind, = np.nonzero(diff_too_small)
+        new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
+        h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
+        h_vecs[ind, ind] = h_new
+        f_new = fun(t, y[:, None] + h_vecs[:, ind])
+        diff_new = f_new - f[:, None]
+        max_ind = np.argmax(np.abs(diff_new), axis=0)
+        r = np.arange(ind.shape[0])
+        max_diff_new = np.abs(diff_new[max_ind, r])
+        scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
+
+        update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
+        if np.any(update):
+            update, = np.nonzero(update)
+            update_ind = ind[update]
+            factor[update_ind] = new_factor[update]
+            h[update_ind] = h_new[update]
+            diff[:, update_ind] = diff_new[:, update]
+            scale[update_ind] = scale_new[update]
+            max_diff[update_ind] = max_diff_new[update]
+
+    diff /= h
+
+    factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
+    factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
+    factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
+
+    return diff, factor
+
+
+def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
+    n = y.shape[0]
+    n_groups = np.max(groups) + 1
+    h_vecs = np.empty((n_groups, n))
+    for group in range(n_groups):
+        e = np.equal(group, groups)
+        h_vecs[group] = h * e
+    h_vecs = h_vecs.T
+
+    f_new = fun(t, y[:, None] + h_vecs)
+    df = f_new - f[:, None]
+
+    i, j, _ = find(structure)
+    diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
+    max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
+    r = np.arange(n)
+    max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
+    scale = np.maximum(np.abs(f[max_ind]),
+                       np.abs(f_new[max_ind, groups[r]]))
+
+    diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
+    if np.any(diff_too_small):
+        ind, = np.nonzero(diff_too_small)
+        new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
+        h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
+        h_new_all = np.zeros(n)
+        h_new_all[ind] = h_new
+
+        groups_unique = np.unique(groups[ind])
+        groups_map = np.empty(n_groups, dtype=int)
+        h_vecs = np.empty((groups_unique.shape[0], n))
+        for k, group in enumerate(groups_unique):
+            e = np.equal(group, groups)
+            h_vecs[k] = h_new_all * e
+            groups_map[group] = k
+        h_vecs = h_vecs.T
+
+        f_new = fun(t, y[:, None] + h_vecs)
+        df = f_new - f[:, None]
+        i, j, _ = find(structure[:, ind])
+        diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
+                               (i, j)), shape=(n, ind.shape[0])).tocsc()
+
+        max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
+        r = np.arange(ind.shape[0])
+        max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
+        scale_new = np.maximum(
+            np.abs(f[max_ind_new]),
+            np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
+
+        update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
+        if np.any(update):
+            update, = np.nonzero(update)
+            update_ind = ind[update]
+            factor[update_ind] = new_factor[update]
+            h[update_ind] = h_new[update]
+            diff[:, update_ind] = diff_new[:, update]
+            scale[update_ind] = scale_new[update]
+            max_diff[update_ind] = max_diff_new[update]
+
+    diff.data /= np.repeat(h, np.diff(diff.indptr))
+
+    factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
+    factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
+    factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
+
+    return diff, factor
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/dop853_coefficients.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/dop853_coefficients.py
new file mode 100644
index 00000000..f39f2f36
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/dop853_coefficients.py
@@ -0,0 +1,193 @@
+import numpy as np
+
+N_STAGES = 12
+N_STAGES_EXTENDED = 16
+INTERPOLATOR_POWER = 7
+
+C = np.array([0.0,
+              0.526001519587677318785587544488e-01,
+              0.789002279381515978178381316732e-01,
+              0.118350341907227396726757197510,
+              0.281649658092772603273242802490,
+              0.333333333333333333333333333333,
+              0.25,
+              0.307692307692307692307692307692,
+              0.651282051282051282051282051282,
+              0.6,
+              0.857142857142857142857142857142,
+              1.0,
+              1.0,
+              0.1,
+              0.2,
+              0.777777777777777777777777777778])
+
+A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
+A[1, 0] = 5.26001519587677318785587544488e-2
+
+A[2, 0] = 1.97250569845378994544595329183e-2
+A[2, 1] = 5.91751709536136983633785987549e-2
+
+A[3, 0] = 2.95875854768068491816892993775e-2
+A[3, 2] = 8.87627564304205475450678981324e-2
+
+A[4, 0] = 2.41365134159266685502369798665e-1
+A[4, 2] = -8.84549479328286085344864962717e-1
+A[4, 3] = 9.24834003261792003115737966543e-1
+
+A[5, 0] = 3.7037037037037037037037037037e-2
+A[5, 3] = 1.70828608729473871279604482173e-1
+A[5, 4] = 1.25467687566822425016691814123e-1
+
+A[6, 0] = 3.7109375e-2
+A[6, 3] = 1.70252211019544039314978060272e-1
+A[6, 4] = 6.02165389804559606850219397283e-2
+A[6, 5] = -1.7578125e-2
+
+A[7, 0] = 3.70920001185047927108779319836e-2
+A[7, 3] = 1.70383925712239993810214054705e-1
+A[7, 4] = 1.07262030446373284651809199168e-1
+A[7, 5] = -1.53194377486244017527936158236e-2
+A[7, 6] = 8.27378916381402288758473766002e-3
+
+A[8, 0] = 6.24110958716075717114429577812e-1
+A[8, 3] = -3.36089262944694129406857109825
+A[8, 4] = -8.68219346841726006818189891453e-1
+A[8, 5] = 2.75920996994467083049415600797e1
+A[8, 6] = 2.01540675504778934086186788979e1
+A[8, 7] = -4.34898841810699588477366255144e1
+
+A[9, 0] = 4.77662536438264365890433908527e-1
+A[9, 3] = -2.48811461997166764192642586468
+A[9, 4] = -5.90290826836842996371446475743e-1
+A[9, 5] = 2.12300514481811942347288949897e1
+A[9, 6] = 1.52792336328824235832596922938e1
+A[9, 7] = -3.32882109689848629194453265587e1
+A[9, 8] = -2.03312017085086261358222928593e-2
+
+A[10, 0] = -9.3714243008598732571704021658e-1
+A[10, 3] = 5.18637242884406370830023853209
+A[10, 4] = 1.09143734899672957818500254654
+A[10, 5] = -8.14978701074692612513997267357
+A[10, 6] = -1.85200656599969598641566180701e1
+A[10, 7] = 2.27394870993505042818970056734e1
+A[10, 8] = 2.49360555267965238987089396762
+A[10, 9] = -3.0467644718982195003823669022
+
+A[11, 0] = 2.27331014751653820792359768449
+A[11, 3] = -1.05344954667372501984066689879e1
+A[11, 4] = -2.00087205822486249909675718444
+A[11, 5] = -1.79589318631187989172765950534e1
+A[11, 6] = 2.79488845294199600508499808837e1
+A[11, 7] = -2.85899827713502369474065508674
+A[11, 8] = -8.87285693353062954433549289258
+A[11, 9] = 1.23605671757943030647266201528e1
+A[11, 10] = 6.43392746015763530355970484046e-1
+
+A[12, 0] = 5.42937341165687622380535766363e-2
+A[12, 5] = 4.45031289275240888144113950566
+A[12, 6] = 1.89151789931450038304281599044
+A[12, 7] = -5.8012039600105847814672114227
+A[12, 8] = 3.1116436695781989440891606237e-1
+A[12, 9] = -1.52160949662516078556178806805e-1
+A[12, 10] = 2.01365400804030348374776537501e-1
+A[12, 11] = 4.47106157277725905176885569043e-2
+
+A[13, 0] = 5.61675022830479523392909219681e-2
+A[13, 6] = 2.53500210216624811088794765333e-1
+A[13, 7] = -2.46239037470802489917441475441e-1
+A[13, 8] = -1.24191423263816360469010140626e-1
+A[13, 9] = 1.5329179827876569731206322685e-1
+A[13, 10] = 8.20105229563468988491666602057e-3
+A[13, 11] = 7.56789766054569976138603589584e-3
+A[13, 12] = -8.298e-3
+
+A[14, 0] = 3.18346481635021405060768473261e-2
+A[14, 5] = 2.83009096723667755288322961402e-2
+A[14, 6] = 5.35419883074385676223797384372e-2
+A[14, 7] = -5.49237485713909884646569340306e-2
+A[14, 10] = -1.08347328697249322858509316994e-4
+A[14, 11] = 3.82571090835658412954920192323e-4
+A[14, 12] = -3.40465008687404560802977114492e-4
+A[14, 13] = 1.41312443674632500278074618366e-1
+
+A[15, 0] = -4.28896301583791923408573538692e-1
+A[15, 5] = -4.69762141536116384314449447206
+A[15, 6] = 7.68342119606259904184240953878
+A[15, 7] = 4.06898981839711007970213554331
+A[15, 8] = 3.56727187455281109270669543021e-1
+A[15, 12] = -1.39902416515901462129418009734e-3
+A[15, 13] = 2.9475147891527723389556272149
+A[15, 14] = -9.15095847217987001081870187138
+
+
+B = A[N_STAGES, :N_STAGES]
+
+E3 = np.zeros(N_STAGES + 1)
+E3[:-1] = B.copy()
+E3[0] -= 0.244094488188976377952755905512
+E3[8] -= 0.733846688281611857341361741547
+E3[11] -= 0.220588235294117647058823529412e-1
+
+E5 = np.zeros(N_STAGES + 1)
+E5[0] = 0.1312004499419488073250102996e-1
+E5[5] = -0.1225156446376204440720569753e+1
+E5[6] = -0.4957589496572501915214079952
+E5[7] = 0.1664377182454986536961530415e+1
+E5[8] = -0.3503288487499736816886487290
+E5[9] = 0.3341791187130174790297318841
+E5[10] = 0.8192320648511571246570742613e-1
+E5[11] = -0.2235530786388629525884427845e-1
+
+# First 3 coefficients are computed separately.
+D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
+D[0, 0] = -0.84289382761090128651353491142e+1
+D[0, 5] = 0.56671495351937776962531783590
+D[0, 6] = -0.30689499459498916912797304727e+1
+D[0, 7] = 0.23846676565120698287728149680e+1
+D[0, 8] = 0.21170345824450282767155149946e+1
+D[0, 9] = -0.87139158377797299206789907490
+D[0, 10] = 0.22404374302607882758541771650e+1
+D[0, 11] = 0.63157877876946881815570249290
+D[0, 12] = -0.88990336451333310820698117400e-1
+D[0, 13] = 0.18148505520854727256656404962e+2
+D[0, 14] = -0.91946323924783554000451984436e+1
+D[0, 15] = -0.44360363875948939664310572000e+1
+
+D[1, 0] = 0.10427508642579134603413151009e+2
+D[1, 5] = 0.24228349177525818288430175319e+3
+D[1, 6] = 0.16520045171727028198505394887e+3
+D[1, 7] = -0.37454675472269020279518312152e+3
+D[1, 8] = -0.22113666853125306036270938578e+2
+D[1, 9] = 0.77334326684722638389603898808e+1
+D[1, 10] = -0.30674084731089398182061213626e+2
+D[1, 11] = -0.93321305264302278729567221706e+1
+D[1, 12] = 0.15697238121770843886131091075e+2
+D[1, 13] = -0.31139403219565177677282850411e+2
+D[1, 14] = -0.93529243588444783865713862664e+1
+D[1, 15] = 0.35816841486394083752465898540e+2
+
+D[2, 0] = 0.19985053242002433820987653617e+2
+D[2, 5] = -0.38703730874935176555105901742e+3
+D[2, 6] = -0.18917813819516756882830838328e+3
+D[2, 7] = 0.52780815920542364900561016686e+3
+D[2, 8] = -0.11573902539959630126141871134e+2
+D[2, 9] = 0.68812326946963000169666922661e+1
+D[2, 10] = -0.10006050966910838403183860980e+1
+D[2, 11] = 0.77771377980534432092869265740
+D[2, 12] = -0.27782057523535084065932004339e+1
+D[2, 13] = -0.60196695231264120758267380846e+2
+D[2, 14] = 0.84320405506677161018159903784e+2
+D[2, 15] = 0.11992291136182789328035130030e+2
+
+D[3, 0] = -0.25693933462703749003312586129e+2
+D[3, 5] = -0.15418974869023643374053993627e+3
+D[3, 6] = -0.23152937917604549567536039109e+3
+D[3, 7] = 0.35763911791061412378285349910e+3
+D[3, 8] = 0.93405324183624310003907691704e+2
+D[3, 9] = -0.37458323136451633156875139351e+2
+D[3, 10] = 0.10409964950896230045147246184e+3
+D[3, 11] = 0.29840293426660503123344363579e+2
+D[3, 12] = -0.43533456590011143754432175058e+2
+D[3, 13] = 0.96324553959188282948394950600e+2
+D[3, 14] = -0.39177261675615439165231486172e+2
+D[3, 15] = -0.14972683625798562581422125276e+3
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/ivp.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/ivp.py
new file mode 100644
index 00000000..3d515ac6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/ivp.py
@@ -0,0 +1,678 @@
+import inspect
+import numpy as np
+from .bdf import BDF
+from .radau import Radau
+from .rk import RK23, RK45, DOP853
+from .lsoda import LSODA
+from scipy.optimize import OptimizeResult
+from .common import EPS, OdeSolution
+from .base import OdeSolver
+
+
+METHODS = {'RK23': RK23,
+           'RK45': RK45,
+           'DOP853': DOP853,
+           'Radau': Radau,
+           'BDF': BDF,
+           'LSODA': LSODA}
+
+
+MESSAGES = {0: "The solver successfully reached the end of the integration interval.",
+            1: "A termination event occurred."}
+
+
+class OdeResult(OptimizeResult):
+    pass
+
+
+def prepare_events(events):
+    """Standardize event functions and extract is_terminal and direction."""
+    if callable(events):
+        events = (events,)
+
+    if events is not None:
+        is_terminal = np.empty(len(events), dtype=bool)
+        direction = np.empty(len(events))
+        for i, event in enumerate(events):
+            try:
+                is_terminal[i] = event.terminal
+            except AttributeError:
+                is_terminal[i] = False
+
+            try:
+                direction[i] = event.direction
+            except AttributeError:
+                direction[i] = 0
+    else:
+        is_terminal = None
+        direction = None
+
+    return events, is_terminal, direction
+
+
+def solve_event_equation(event, sol, t_old, t):
+    """Solve an equation corresponding to an ODE event.
+
+    The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an
+    ODE solver using some sort of interpolation. It is solved by
+    `scipy.optimize.brentq` with xtol=atol=4*EPS.
+
+    Parameters
+    ----------
+    event : callable
+        Function ``event(t, y)``.
+    sol : callable
+        Function ``sol(t)`` which evaluates an ODE solution between `t_old`
+        and  `t`.
+    t_old, t : float
+        Previous and new values of time. They will be used as a bracketing
+        interval.
+
+    Returns
+    -------
+    root : float
+        Found solution.
+    """
+    from scipy.optimize import brentq
+    return brentq(lambda t: event(t, sol(t)), t_old, t,
+                  xtol=4 * EPS, rtol=4 * EPS)
+
+
+def handle_events(sol, events, active_events, is_terminal, t_old, t):
+    """Helper function to handle events.
+
+    Parameters
+    ----------
+    sol : DenseOutput
+        Function ``sol(t)`` which evaluates an ODE solution between `t_old`
+        and  `t`.
+    events : list of callables, length n_events
+        Event functions with signatures ``event(t, y)``.
+    active_events : ndarray
+        Indices of events which occurred.
+    is_terminal : ndarray, shape (n_events,)
+        Which events are terminal.
+    t_old, t : float
+        Previous and new values of time.
+
+    Returns
+    -------
+    root_indices : ndarray
+        Indices of events which take zero between `t_old` and `t` and before
+        a possible termination.
+    roots : ndarray
+        Values of t at which events occurred.
+    terminate : bool
+        Whether a terminal event occurred.
+    """
+    roots = [solve_event_equation(events[event_index], sol, t_old, t)
+             for event_index in active_events]
+
+    roots = np.asarray(roots)
+
+    if np.any(is_terminal[active_events]):
+        if t > t_old:
+            order = np.argsort(roots)
+        else:
+            order = np.argsort(-roots)
+        active_events = active_events[order]
+        roots = roots[order]
+        t = np.nonzero(is_terminal[active_events])[0][0]
+        active_events = active_events[:t + 1]
+        roots = roots[:t + 1]
+        terminate = True
+    else:
+        terminate = False
+
+    return active_events, roots, terminate
+
+
+def find_active_events(g, g_new, direction):
+    """Find which event occurred during an integration step.
+
+    Parameters
+    ----------
+    g, g_new : array_like, shape (n_events,)
+        Values of event functions at a current and next points.
+    direction : ndarray, shape (n_events,)
+        Event "direction" according to the definition in `solve_ivp`.
+
+    Returns
+    -------
+    active_events : ndarray
+        Indices of events which occurred during the step.
+    """
+    g, g_new = np.asarray(g), np.asarray(g_new)
+    up = (g <= 0) & (g_new >= 0)
+    down = (g >= 0) & (g_new <= 0)
+    either = up | down
+    mask = (up & (direction > 0) |
+            down & (direction < 0) |
+            either & (direction == 0))
+
+    return np.nonzero(mask)[0]
+
+
+def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False,
+              events=None, vectorized=False, args=None, **options):
+    """Solve an initial value problem for a system of ODEs.
+
+    This function numerically integrates a system of ordinary differential
+    equations given an initial value::
+
+        dy / dt = f(t, y)
+        y(t0) = y0
+
+    Here t is a 1-D independent variable (time), y(t) is an
+    N-D vector-valued function (state), and an N-D
+    vector-valued function f(t, y) determines the differential equations.
+    The goal is to find y(t) approximately satisfying the differential
+    equations, given an initial value y(t0)=y0.
+
+    Some of the solvers support integration in the complex domain, but note
+    that for stiff ODE solvers, the right-hand side must be
+    complex-differentiable (satisfy Cauchy-Riemann equations [11]_).
+    To solve a problem in the complex domain, pass y0 with a complex data type.
+    Another option always available is to rewrite your problem for real and
+    imaginary parts separately.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here `t` is a scalar, and there are two options for the ndarray `y`:
+        It can either have shape (n,); then `fun` must return array_like with
+        shape (n,). Alternatively, it can have shape (n, k); then `fun`
+        must return an array_like with shape (n, k), i.e., each column
+        corresponds to a single column in `y`. The choice between the two
+        options is determined by `vectorized` argument (see below). The
+        vectorized implementation allows a faster approximation of the Jacobian
+        by finite differences (required for stiff solvers).
+    t_span : 2-member sequence
+        Interval of integration (t0, tf). The solver starts with t=t0 and
+        integrates until it reaches t=tf. Both t0 and tf must be floats
+        or values interpretable by the float conversion function.
+    y0 : array_like, shape (n,)
+        Initial state. For problems in the complex domain, pass `y0` with a
+        complex data type (even if the initial value is purely real).
+    method : string or `OdeSolver`, optional
+        Integration method to use:
+
+            * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_.
+              The error is controlled assuming accuracy of the fourth-order
+              method, but steps are taken using the fifth-order accurate
+              formula (local extrapolation is done). A quartic interpolation
+              polynomial is used for the dense output [2]_. Can be applied in
+              the complex domain.
+            * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error
+              is controlled assuming accuracy of the second-order method, but
+              steps are taken using the third-order accurate formula (local
+              extrapolation is done). A cubic Hermite polynomial is used for the
+              dense output. Can be applied in the complex domain.
+            * 'DOP853': Explicit Runge-Kutta method of order 8 [13]_.
+              Python implementation of the "DOP853" algorithm originally
+              written in Fortran [14]_. A 7-th order interpolation polynomial
+              accurate to 7-th order is used for the dense output.
+              Can be applied in the complex domain.
+            * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of
+              order 5 [4]_. The error is controlled with a third-order accurate
+              embedded formula. A cubic polynomial which satisfies the
+              collocation conditions is used for the dense output.
+            * 'BDF': Implicit multi-step variable-order (1 to 5) method based
+              on a backward differentiation formula for the derivative
+              approximation [5]_. The implementation follows the one described
+              in [6]_. A quasi-constant step scheme is used and accuracy is
+              enhanced using the NDF modification. Can be applied in the
+              complex domain.
+            * 'LSODA': Adams/BDF method with automatic stiffness detection and
+              switching [7]_, [8]_. This is a wrapper of the Fortran solver
+              from ODEPACK.
+
+        Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used
+        for non-stiff problems and implicit methods ('Radau', 'BDF') for
+        stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended
+        for solving with high precision (low values of `rtol` and `atol`).
+
+        If not sure, first try to run 'RK45'. If it makes unusually many
+        iterations, diverges, or fails, your problem is likely to be stiff and
+        you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal
+        choice, but it might be somewhat less convenient to work with as it
+        wraps old Fortran code.
+
+        You can also pass an arbitrary class derived from `OdeSolver` which
+        implements the solver.
+    t_eval : array_like or None, optional
+        Times at which to store the computed solution, must be sorted and lie
+        within `t_span`. If None (default), use points selected by the solver.
+    dense_output : bool, optional
+        Whether to compute a continuous solution. Default is False.
+    events : callable, or list of callables, optional
+        Events to track. If None (default), no events will be tracked.
+        Each event occurs at the zeros of a continuous function of time and
+        state. Each function must have the signature ``event(t, y)`` and return
+        a float. The solver will find an accurate value of `t` at which
+        ``event(t, y(t)) = 0`` using a root-finding algorithm. By default, all
+        zeros will be found. The solver looks for a sign change over each step,
+        so if multiple zero crossings occur within one step, events may be
+        missed. Additionally each `event` function might have the following
+        attributes:
+
+            terminal: bool, optional
+                Whether to terminate integration if this event occurs.
+                Implicitly False if not assigned.
+            direction: float, optional
+                Direction of a zero crossing. If `direction` is positive,
+                `event` will only trigger when going from negative to positive,
+                and vice versa if `direction` is negative. If 0, then either
+                direction will trigger event. Implicitly 0 if not assigned.
+
+        You can assign attributes like ``event.terminal = True`` to any
+        function in Python.
+    vectorized : bool, optional
+        Whether `fun` is implemented in a vectorized fashion. Default is False.
+    args : tuple, optional
+        Additional arguments to pass to the user-defined functions.  If given,
+        the additional arguments are passed to all user-defined functions.
+        So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``,
+        then `jac` (if given) and any event functions must have the same
+        signature, and `args` must be a tuple of length 3.
+    **options
+        Options passed to a chosen solver. All options available for already
+        implemented solvers are listed below.
+    first_step : float or None, optional
+        Initial step size. Default is `None` which means that the algorithm
+        should choose.
+    max_step : float, optional
+        Maximum allowed step size. Default is np.inf, i.e., the step size is not
+        bounded and determined solely by the solver.
+    rtol, atol : float or array_like, optional
+        Relative and absolute tolerances. The solver keeps the local error
+        estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+        relative accuracy (number of correct digits), while `atol` controls
+        absolute accuracy (number of correct decimal places). To achieve the
+        desired `rtol`, set `atol` to be smaller than the smallest value that
+        can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
+        allowable error. If `atol` is larger than ``rtol * abs(y)`` the
+        number of correct digits is not guaranteed. Conversely, to achieve the
+        desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
+        than `atol`. If components of y have different scales, it might be
+        beneficial to set different `atol` values for different components by
+        passing array_like with shape (n,) for `atol`. Default values are
+        1e-3 for `rtol` and 1e-6 for `atol`.
+    jac : array_like, sparse_matrix, callable or None, optional
+        Jacobian matrix of the right-hand side of the system with respect
+        to y, required by the 'Radau', 'BDF' and 'LSODA' method. The
+        Jacobian matrix has shape (n, n) and its element (i, j) is equal to
+        ``d f_i / d y_j``.  There are three ways to define the Jacobian:
+
+            * If array_like or sparse_matrix, the Jacobian is assumed to
+              be constant. Not supported by 'LSODA'.
+            * If callable, the Jacobian is assumed to depend on both
+              t and y; it will be called as ``jac(t, y)``, as necessary.
+              For 'Radau' and 'BDF' methods, the return value might be a
+              sparse matrix.
+            * If None (default), the Jacobian will be approximated by
+              finite differences.
+
+        It is generally recommended to provide the Jacobian rather than
+        relying on a finite-difference approximation.
+    jac_sparsity : array_like, sparse matrix or None, optional
+        Defines a sparsity structure of the Jacobian matrix for a finite-
+        difference approximation. Its shape must be (n, n). This argument
+        is ignored if `jac` is not `None`. If the Jacobian has only few
+        non-zero elements in *each* row, providing the sparsity structure
+        will greatly speed up the computations [10]_. A zero entry means that
+        a corresponding element in the Jacobian is always zero. If None
+        (default), the Jacobian is assumed to be dense.
+        Not supported by 'LSODA', see `lband` and `uband` instead.
+    lband, uband : int or None, optional
+        Parameters defining the bandwidth of the Jacobian for the 'LSODA'
+        method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``.
+        Default is None. Setting these requires your jac routine to return the
+        Jacobian in the packed format: the returned array must have ``n``
+        columns and ``uband + lband + 1`` rows in which Jacobian diagonals are
+        written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``.
+        The same format is used in `scipy.linalg.solve_banded` (check for an
+        illustration).  These parameters can be also used with ``jac=None`` to
+        reduce the number of Jacobian elements estimated by finite differences.
+    min_step : float, optional
+        The minimum allowed step size for 'LSODA' method.
+        By default `min_step` is zero.
+
+    Returns
+    -------
+    Bunch object with the following fields defined:
+    t : ndarray, shape (n_points,)
+        Time points.
+    y : ndarray, shape (n, n_points)
+        Values of the solution at `t`.
+    sol : `OdeSolution` or None
+        Found solution as `OdeSolution` instance; None if `dense_output` was
+        set to False.
+    t_events : list of ndarray or None
+        Contains for each event type a list of arrays at which an event of
+        that type event was detected. None if `events` was None.
+    y_events : list of ndarray or None
+        For each value of `t_events`, the corresponding value of the solution.
+        None if `events` was None.
+    nfev : int
+        Number of evaluations of the right-hand side.
+    njev : int
+        Number of evaluations of the Jacobian.
+    nlu : int
+        Number of LU decompositions.
+    status : int
+        Reason for algorithm termination:
+
+            * -1: Integration step failed.
+            *  0: The solver successfully reached the end of `tspan`.
+            *  1: A termination event occurred.
+
+    message : string
+        Human-readable description of the termination reason.
+    success : bool
+        True if the solver reached the interval end or a termination event
+        occurred (``status >= 0``).
+
+    References
+    ----------
+    .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
+           formulae", Journal of Computational and Applied Mathematics, Vol. 6,
+           No. 1, pp. 19-26, 1980.
+    .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
+           of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
+    .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
+           Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
+    .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
+           Stiff and Differential-Algebraic Problems", Sec. IV.8.
+    .. [5] `Backward Differentiation Formula
+            `_
+            on Wikipedia.
+    .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
+           COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
+    .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
+           Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
+           pp. 55-64, 1983.
+    .. [8] L. Petzold, "Automatic selection of methods for solving stiff and
+           nonstiff systems of ordinary differential equations", SIAM Journal
+           on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
+           1983.
+    .. [9] `Stiff equation `_ on
+           Wikipedia.
+    .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+            sparse Jacobian matrices", Journal of the Institute of Mathematics
+            and its Applications, 13, pp. 117-120, 1974.
+    .. [11] `Cauchy-Riemann equations
+             `_ on
+             Wikipedia.
+    .. [12] `Lotka-Volterra equations
+            `_
+            on Wikipedia.
+    .. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+            Equations I: Nonstiff Problems", Sec. II.
+    .. [14] `Page with original Fortran code of DOP853
+            `_.
+
+    Examples
+    --------
+    Basic exponential decay showing automatically chosen time points.
+
+    >>> import numpy as np
+    >>> from scipy.integrate import solve_ivp
+    >>> def exponential_decay(t, y): return -0.5 * y
+    >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
+    >>> print(sol.t)
+    [ 0.          0.11487653  1.26364188  3.06061781  4.81611105  6.57445806
+      8.33328988 10.        ]
+    >>> print(sol.y)
+    [[2.         1.88836035 1.06327177 0.43319312 0.18017253 0.07483045
+      0.03107158 0.01350781]
+     [4.         3.7767207  2.12654355 0.86638624 0.36034507 0.14966091
+      0.06214316 0.02701561]
+     [8.         7.5534414  4.25308709 1.73277247 0.72069014 0.29932181
+      0.12428631 0.05403123]]
+
+    Specifying points where the solution is desired.
+
+    >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8],
+    ...                 t_eval=[0, 1, 2, 4, 10])
+    >>> print(sol.t)
+    [ 0  1  2  4 10]
+    >>> print(sol.y)
+    [[2.         1.21305369 0.73534021 0.27066736 0.01350938]
+     [4.         2.42610739 1.47068043 0.54133472 0.02701876]
+     [8.         4.85221478 2.94136085 1.08266944 0.05403753]]
+
+    Cannon fired upward with terminal event upon impact. The ``terminal`` and
+    ``direction`` fields of an event are applied by monkey patching a function.
+    Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts
+    at position 0 with velocity +10. Note that the integration never reaches
+    t=100 because the event is terminal.
+
+    >>> def upward_cannon(t, y): return [y[1], -0.5]
+    >>> def hit_ground(t, y): return y[0]
+    >>> hit_ground.terminal = True
+    >>> hit_ground.direction = -1
+    >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground)
+    >>> print(sol.t_events)
+    [array([40.])]
+    >>> print(sol.t)
+    [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
+     1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
+
+    Use `dense_output` and `events` to find position, which is 100, at the apex
+    of the cannonball's trajectory. Apex is not defined as terminal, so both
+    apex and hit_ground are found. There is no information at t=20, so the sol
+    attribute is used to evaluate the solution. The sol attribute is returned
+    by setting ``dense_output=True``. Alternatively, the `y_events` attribute
+    can be used to access the solution at the time of the event.
+
+    >>> def apex(t, y): return y[1]
+    >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10],
+    ...                 events=(hit_ground, apex), dense_output=True)
+    >>> print(sol.t_events)
+    [array([40.]), array([20.])]
+    >>> print(sol.t)
+    [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02
+     1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01]
+    >>> print(sol.sol(sol.t_events[1][0]))
+    [100.   0.]
+    >>> print(sol.y_events)
+    [array([[-5.68434189e-14, -1.00000000e+01]]), array([[1.00000000e+02, 1.77635684e-15]])]
+
+    As an example of a system with additional parameters, we'll implement
+    the Lotka-Volterra equations [12]_.
+
+    >>> def lotkavolterra(t, z, a, b, c, d):
+    ...     x, y = z
+    ...     return [a*x - b*x*y, -c*y + d*x*y]
+    ...
+
+    We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args`
+    argument.
+
+    >>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1),
+    ...                 dense_output=True)
+
+    Compute a dense solution and plot it.
+
+    >>> t = np.linspace(0, 15, 300)
+    >>> z = sol.sol(t)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(t, z.T)
+    >>> plt.xlabel('t')
+    >>> plt.legend(['x', 'y'], shadow=True)
+    >>> plt.title('Lotka-Volterra System')
+    >>> plt.show()
+
+    """
+    if method not in METHODS and not (
+            inspect.isclass(method) and issubclass(method, OdeSolver)):
+        raise ValueError("`method` must be one of {} or OdeSolver class."
+                         .format(METHODS))
+
+    t0, tf = map(float, t_span)
+
+    if args is not None:
+        # Wrap the user's fun (and jac, if given) in lambdas to hide the
+        # additional parameters.  Pass in the original fun as a keyword
+        # argument to keep it in the scope of the lambda.
+        try:
+            _ = [*(args)]
+        except TypeError as exp:
+            suggestion_tuple = (
+                "Supplied 'args' cannot be unpacked. Please supply `args`"
+                f" as a tuple (e.g. `args=({args},)`)"
+            )
+            raise TypeError(suggestion_tuple) from exp
+
+        fun = lambda t, x, fun=fun: fun(t, x, *args)
+        jac = options.get('jac')
+        if callable(jac):
+            options['jac'] = lambda t, x: jac(t, x, *args)
+
+    if t_eval is not None:
+        t_eval = np.asarray(t_eval)
+        if t_eval.ndim != 1:
+            raise ValueError("`t_eval` must be 1-dimensional.")
+
+        if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)):
+            raise ValueError("Values in `t_eval` are not within `t_span`.")
+
+        d = np.diff(t_eval)
+        if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0):
+            raise ValueError("Values in `t_eval` are not properly sorted.")
+
+        if tf > t0:
+            t_eval_i = 0
+        else:
+            # Make order of t_eval decreasing to use np.searchsorted.
+            t_eval = t_eval[::-1]
+            # This will be an upper bound for slices.
+            t_eval_i = t_eval.shape[0]
+
+    if method in METHODS:
+        method = METHODS[method]
+
+    solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
+
+    if t_eval is None:
+        ts = [t0]
+        ys = [y0]
+    elif t_eval is not None and dense_output:
+        ts = []
+        ti = [t0]
+        ys = []
+    else:
+        ts = []
+        ys = []
+
+    interpolants = []
+
+    events, is_terminal, event_dir = prepare_events(events)
+
+    if events is not None:
+        if args is not None:
+            # Wrap user functions in lambdas to hide the additional parameters.
+            # The original event function is passed as a keyword argument to the
+            # lambda to keep the original function in scope (i.e., avoid the
+            # late binding closure "gotcha").
+            events = [lambda t, x, event=event: event(t, x, *args)
+                      for event in events]
+        g = [event(t0, y0) for event in events]
+        t_events = [[] for _ in range(len(events))]
+        y_events = [[] for _ in range(len(events))]
+    else:
+        t_events = None
+        y_events = None
+
+    status = None
+    while status is None:
+        message = solver.step()
+
+        if solver.status == 'finished':
+            status = 0
+        elif solver.status == 'failed':
+            status = -1
+            break
+
+        t_old = solver.t_old
+        t = solver.t
+        y = solver.y
+
+        if dense_output:
+            sol = solver.dense_output()
+            interpolants.append(sol)
+        else:
+            sol = None
+
+        if events is not None:
+            g_new = [event(t, y) for event in events]
+            active_events = find_active_events(g, g_new, event_dir)
+            if active_events.size > 0:
+                if sol is None:
+                    sol = solver.dense_output()
+
+                root_indices, roots, terminate = handle_events(
+                    sol, events, active_events, is_terminal, t_old, t)
+
+                for e, te in zip(root_indices, roots):
+                    t_events[e].append(te)
+                    y_events[e].append(sol(te))
+
+                if terminate:
+                    status = 1
+                    t = roots[-1]
+                    y = sol(t)
+
+            g = g_new
+
+        if t_eval is None:
+            ts.append(t)
+            ys.append(y)
+        else:
+            # The value in t_eval equal to t will be included.
+            if solver.direction > 0:
+                t_eval_i_new = np.searchsorted(t_eval, t, side='right')
+                t_eval_step = t_eval[t_eval_i:t_eval_i_new]
+            else:
+                t_eval_i_new = np.searchsorted(t_eval, t, side='left')
+                # It has to be done with two slice operations, because
+                # you can't slice to 0th element inclusive using backward
+                # slicing.
+                t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1]
+
+            if t_eval_step.size > 0:
+                if sol is None:
+                    sol = solver.dense_output()
+                ts.append(t_eval_step)
+                ys.append(sol(t_eval_step))
+                t_eval_i = t_eval_i_new
+
+        if t_eval is not None and dense_output:
+            ti.append(t)
+
+    message = MESSAGES.get(status, message)
+
+    if t_events is not None:
+        t_events = [np.asarray(te) for te in t_events]
+        y_events = [np.asarray(ye) for ye in y_events]
+
+    if t_eval is None:
+        ts = np.array(ts)
+        ys = np.vstack(ys).T
+    elif ts:
+        ts = np.hstack(ts)
+        ys = np.hstack(ys)
+
+    if dense_output:
+        if t_eval is None:
+            sol = OdeSolution(ts, interpolants)
+        else:
+            sol = OdeSolution(ti, interpolants)
+    else:
+        sol = None
+
+    return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events,
+                     nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,
+                     status=status, message=message, success=status >= 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/lsoda.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/lsoda.py
new file mode 100644
index 00000000..3a31efb9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/lsoda.py
@@ -0,0 +1,192 @@
+import numpy as np
+from scipy.integrate import ode
+from .common import validate_tol, validate_first_step, warn_extraneous
+from .base import OdeSolver, DenseOutput
+
+
+class LSODA(OdeSolver):
+    """Adams/BDF method with automatic stiffness detection and switching.
+
+    This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches
+    automatically between the nonstiff Adams method and the stiff BDF method.
+    The method was originally detailed in [2]_.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+        It can either have shape (n,); then ``fun`` must return array_like with
+        shape (n,). Alternatively it can have shape (n, k); then ``fun``
+        must return an array_like with shape (n, k), i.e. each column
+        corresponds to a single column in ``y``. The choice between the two
+        options is determined by `vectorized` argument (see below). The
+        vectorized implementation allows a faster approximation of the Jacobian
+        by finite differences (required for this solver).
+    t0 : float
+        Initial time.
+    y0 : array_like, shape (n,)
+        Initial state.
+    t_bound : float
+        Boundary time - the integration won't continue beyond it. It also
+        determines the direction of the integration.
+    first_step : float or None, optional
+        Initial step size. Default is ``None`` which means that the algorithm
+        should choose.
+    min_step : float, optional
+        Minimum allowed step size. Default is 0.0, i.e., the step size is not
+        bounded and determined solely by the solver.
+    max_step : float, optional
+        Maximum allowed step size. Default is np.inf, i.e., the step size is not
+        bounded and determined solely by the solver.
+    rtol, atol : float and array_like, optional
+        Relative and absolute tolerances. The solver keeps the local error
+        estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+        relative accuracy (number of correct digits), while `atol` controls
+        absolute accuracy (number of correct decimal places). To achieve the
+        desired `rtol`, set `atol` to be smaller than the smallest value that
+        can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
+        allowable error. If `atol` is larger than ``rtol * abs(y)`` the
+        number of correct digits is not guaranteed. Conversely, to achieve the
+        desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
+        than `atol`. If components of y have different scales, it might be
+        beneficial to set different `atol` values for different components by
+        passing array_like with shape (n,) for `atol`. Default values are
+        1e-3 for `rtol` and 1e-6 for `atol`.
+    jac : None or callable, optional
+        Jacobian matrix of the right-hand side of the system with respect to
+        ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is
+        equal to ``d f_i / d y_j``. The function will be called as
+        ``jac(t, y)``. If None (default), the Jacobian will be
+        approximated by finite differences. It is generally recommended to
+        provide the Jacobian rather than relying on a finite-difference
+        approximation.
+    lband, uband : int or None
+        Parameters defining the bandwidth of the Jacobian,
+        i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting
+        these requires your jac routine to return the Jacobian in the packed format:
+        the returned array must have ``n`` columns and ``uband + lband + 1``
+        rows in which Jacobian diagonals are written. Specifically
+        ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used
+        in `scipy.linalg.solve_banded` (check for an illustration).
+        These parameters can be also used with ``jac=None`` to reduce the
+        number of Jacobian elements estimated by finite differences.
+    vectorized : bool, optional
+        Whether `fun` is implemented in a vectorized fashion. A vectorized
+        implementation offers no advantages for this solver. Default is False.
+
+    Attributes
+    ----------
+    n : int
+        Number of equations.
+    status : string
+        Current status of the solver: 'running', 'finished' or 'failed'.
+    t_bound : float
+        Boundary time.
+    direction : float
+        Integration direction: +1 or -1.
+    t : float
+        Current time.
+    y : ndarray
+        Current state.
+    t_old : float
+        Previous time. None if no steps were made yet.
+    nfev : int
+        Number of evaluations of the right-hand side.
+    njev : int
+        Number of evaluations of the Jacobian.
+
+    References
+    ----------
+    .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE
+           Solvers," IMACS Transactions on Scientific Computation, Vol 1.,
+           pp. 55-64, 1983.
+    .. [2] L. Petzold, "Automatic selection of methods for solving stiff and
+           nonstiff systems of ordinary differential equations", SIAM Journal
+           on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148,
+           1983.
+    """
+    def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0,
+                 max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None,
+                 uband=None, vectorized=False, **extraneous):
+        warn_extraneous(extraneous)
+        super().__init__(fun, t0, y0, t_bound, vectorized)
+
+        if first_step is None:
+            first_step = 0  # LSODA value for automatic selection.
+        else:
+            first_step = validate_first_step(first_step, t0, t_bound)
+
+        first_step *= self.direction
+
+        if max_step == np.inf:
+            max_step = 0  # LSODA value for infinity.
+        elif max_step <= 0:
+            raise ValueError("`max_step` must be positive.")
+
+        if min_step < 0:
+            raise ValueError("`min_step` must be nonnegative.")
+
+        rtol, atol = validate_tol(rtol, atol, self.n)
+
+        solver = ode(self.fun, jac)
+        solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step,
+                              min_step=min_step, first_step=first_step,
+                              lband=lband, uband=uband)
+        solver.set_initial_value(y0, t0)
+
+        # Inject t_bound into rwork array as needed for itask=5.
+        solver._integrator.rwork[0] = self.t_bound
+        solver._integrator.call_args[4] = solver._integrator.rwork
+
+        self._lsoda_solver = solver
+
+    def _step_impl(self):
+        solver = self._lsoda_solver
+        integrator = solver._integrator
+
+        # From lsoda.step and lsoda.integrate itask=5 means take a single
+        # step and do not go past t_bound.
+        itask = integrator.call_args[2]
+        integrator.call_args[2] = 5
+        solver._y, solver.t = integrator.run(
+            solver.f, solver.jac or (lambda: None), solver._y, solver.t,
+            self.t_bound, solver.f_params, solver.jac_params)
+        integrator.call_args[2] = itask
+
+        if solver.successful():
+            self.t = solver.t
+            self.y = solver._y
+            # From LSODA Fortran source njev is equal to nlu.
+            self.njev = integrator.iwork[12]
+            self.nlu = integrator.iwork[12]
+            return True, None
+        else:
+            return False, 'Unexpected istate in LSODA.'
+
+    def _dense_output_impl(self):
+        iwork = self._lsoda_solver._integrator.iwork
+        rwork = self._lsoda_solver._integrator.rwork
+
+        order = iwork[14]
+        h = rwork[11]
+        yh = np.reshape(rwork[20:20 + (order + 1) * self.n],
+                        (self.n, order + 1), order='F').copy()
+
+        return LsodaDenseOutput(self.t_old, self.t, h, order, yh)
+
+
+class LsodaDenseOutput(DenseOutput):
+    def __init__(self, t_old, t, h, order, yh):
+        super().__init__(t_old, t)
+        self.h = h
+        self.yh = yh
+        self.p = np.arange(order + 1)
+
+    def _call_impl(self, t):
+        if t.ndim == 0:
+            x = ((t - self.t) / self.h) ** self.p
+        else:
+            x = ((t - self.t) / self.h) ** self.p[:, None]
+
+        return np.dot(self.yh, x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/radau.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/radau.py
new file mode 100644
index 00000000..b8c80a19
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/radau.py
@@ -0,0 +1,565 @@
+import numpy as np
+from scipy.linalg import lu_factor, lu_solve
+from scipy.sparse import csc_matrix, issparse, eye
+from scipy.sparse.linalg import splu
+from scipy.optimize._numdiff import group_columns
+from .common import (validate_max_step, validate_tol, select_initial_step,
+                     norm, num_jac, EPS, warn_extraneous,
+                     validate_first_step)
+from .base import OdeSolver, DenseOutput
+
+S6 = 6 ** 0.5
+
+# Butcher tableau. A is not used directly, see below.
+C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1])
+E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3
+
+# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue
+# and a complex conjugate pair. They are written below.
+MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3)
+MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3))
+              - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6)))
+
+# These are transformation matrices.
+T = np.array([
+    [0.09443876248897524, -0.14125529502095421, 0.03002919410514742],
+    [0.25021312296533332, 0.20412935229379994, -0.38294211275726192],
+    [1, 1, 0]])
+TI = np.array([
+    [4.17871859155190428, 0.32768282076106237, 0.52337644549944951],
+    [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044],
+    [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]])
+# These linear combinations are used in the algorithm.
+TI_REAL = TI[0]
+TI_COMPLEX = TI[1] + 1j * TI[2]
+
+# Interpolator coefficients.
+P = np.array([
+    [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6],
+    [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6],
+    [1/3, -8/3, 10/3]])
+
+
+NEWTON_MAXITER = 6  # Maximum number of Newton iterations.
+MIN_FACTOR = 0.2  # Minimum allowed decrease in a step size.
+MAX_FACTOR = 10  # Maximum allowed increase in a step size.
+
+
+def solve_collocation_system(fun, t, y, h, Z0, scale, tol,
+                             LU_real, LU_complex, solve_lu):
+    """Solve the collocation system.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system.
+    t : float
+        Current time.
+    y : ndarray, shape (n,)
+        Current state.
+    h : float
+        Step to try.
+    Z0 : ndarray, shape (3, n)
+        Initial guess for the solution. It determines new values of `y` at
+        ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
+    scale : ndarray, shape (n)
+        Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
+    tol : float
+        Tolerance to which solve the system. This value is compared with
+        the normalized by `scale` error.
+    LU_real, LU_complex
+        LU decompositions of the system Jacobians.
+    solve_lu : callable
+        Callable which solves a linear system given a LU decomposition. The
+        signature is ``solve_lu(LU, b)``.
+
+    Returns
+    -------
+    converged : bool
+        Whether iterations converged.
+    n_iter : int
+        Number of completed iterations.
+    Z : ndarray, shape (3, n)
+        Found solution.
+    rate : float
+        The rate of convergence.
+    """
+    n = y.shape[0]
+    M_real = MU_REAL / h
+    M_complex = MU_COMPLEX / h
+
+    W = TI.dot(Z0)
+    Z = Z0
+
+    F = np.empty((3, n))
+    ch = h * C
+
+    dW_norm_old = None
+    dW = np.empty_like(W)
+    converged = False
+    rate = None
+    for k in range(NEWTON_MAXITER):
+        for i in range(3):
+            F[i] = fun(t + ch[i], y + Z[i])
+
+        if not np.all(np.isfinite(F)):
+            break
+
+        f_real = F.T.dot(TI_REAL) - M_real * W[0]
+        f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])
+
+        dW_real = solve_lu(LU_real, f_real)
+        dW_complex = solve_lu(LU_complex, f_complex)
+
+        dW[0] = dW_real
+        dW[1] = dW_complex.real
+        dW[2] = dW_complex.imag
+
+        dW_norm = norm(dW / scale)
+        if dW_norm_old is not None:
+            rate = dW_norm / dW_norm_old
+
+        if (rate is not None and (rate >= 1 or
+                rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)):
+            break
+
+        W += dW
+        Z = T.dot(W)
+
+        if (dW_norm == 0 or
+                rate is not None and rate / (1 - rate) * dW_norm < tol):
+            converged = True
+            break
+
+        dW_norm_old = dW_norm
+
+    return converged, k + 1, Z, rate
+
+
+def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):
+    """Predict by which factor to increase/decrease the step size.
+
+    The algorithm is described in [1]_.
+
+    Parameters
+    ----------
+    h_abs, h_abs_old : float
+        Current and previous values of the step size, `h_abs_old` can be None
+        (see Notes).
+    error_norm, error_norm_old : float
+        Current and previous values of the error norm, `error_norm_old` can
+        be None (see Notes).
+
+    Returns
+    -------
+    factor : float
+        Predicted factor.
+
+    Notes
+    -----
+    If `h_abs_old` and `error_norm_old` are both not None then a two-step
+    algorithm is used, otherwise a one-step algorithm is used.
+
+    References
+    ----------
+    .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+           Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8.
+    """
+    if error_norm_old is None or h_abs_old is None or error_norm == 0:
+        multiplier = 1
+    else:
+        multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25
+
+    with np.errstate(divide='ignore'):
+        factor = min(1, multiplier) * error_norm ** -0.25
+
+    return factor
+
+
+class Radau(OdeSolver):
+    """Implicit Runge-Kutta method of Radau IIA family of order 5.
+
+    The implementation follows [1]_. The error is controlled with a
+    third-order accurate embedded formula. A cubic polynomial which satisfies
+    the collocation conditions is used for the dense output.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+        It can either have shape (n,); then ``fun`` must return array_like with
+        shape (n,). Alternatively it can have shape (n, k); then ``fun``
+        must return an array_like with shape (n, k), i.e., each column
+        corresponds to a single column in ``y``. The choice between the two
+        options is determined by `vectorized` argument (see below). The
+        vectorized implementation allows a faster approximation of the Jacobian
+        by finite differences (required for this solver).
+    t0 : float
+        Initial time.
+    y0 : array_like, shape (n,)
+        Initial state.
+    t_bound : float
+        Boundary time - the integration won't continue beyond it. It also
+        determines the direction of the integration.
+    first_step : float or None, optional
+        Initial step size. Default is ``None`` which means that the algorithm
+        should choose.
+    max_step : float, optional
+        Maximum allowed step size. Default is np.inf, i.e., the step size is not
+        bounded and determined solely by the solver.
+    rtol, atol : float and array_like, optional
+        Relative and absolute tolerances. The solver keeps the local error
+        estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a
+        relative accuracy (number of correct digits), while `atol` controls
+        absolute accuracy (number of correct decimal places). To achieve the
+        desired `rtol`, set `atol` to be smaller than the smallest value that
+        can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
+        allowable error. If `atol` is larger than ``rtol * abs(y)`` the
+        number of correct digits is not guaranteed. Conversely, to achieve the
+        desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
+        than `atol`. If components of y have different scales, it might be
+        beneficial to set different `atol` values for different components by
+        passing array_like with shape (n,) for `atol`. Default values are
+        1e-3 for `rtol` and 1e-6 for `atol`.
+    jac : {None, array_like, sparse_matrix, callable}, optional
+        Jacobian matrix of the right-hand side of the system with respect to
+        y, required by this method. The Jacobian matrix has shape (n, n) and
+        its element (i, j) is equal to ``d f_i / d y_j``.
+        There are three ways to define the Jacobian:
+
+            * If array_like or sparse_matrix, the Jacobian is assumed to
+              be constant.
+            * If callable, the Jacobian is assumed to depend on both
+              t and y; it will be called as ``jac(t, y)`` as necessary.
+              For the 'Radau' and 'BDF' methods, the return value might be a
+              sparse matrix.
+            * If None (default), the Jacobian will be approximated by
+              finite differences.
+
+        It is generally recommended to provide the Jacobian rather than
+        relying on a finite-difference approximation.
+    jac_sparsity : {None, array_like, sparse matrix}, optional
+        Defines a sparsity structure of the Jacobian matrix for a
+        finite-difference approximation. Its shape must be (n, n). This argument
+        is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
+        elements in *each* row, providing the sparsity structure will greatly
+        speed up the computations [2]_. A zero entry means that a corresponding
+        element in the Jacobian is always zero. If None (default), the Jacobian
+        is assumed to be dense.
+    vectorized : bool, optional
+        Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+    Attributes
+    ----------
+    n : int
+        Number of equations.
+    status : string
+        Current status of the solver: 'running', 'finished' or 'failed'.
+    t_bound : float
+        Boundary time.
+    direction : float
+        Integration direction: +1 or -1.
+    t : float
+        Current time.
+    y : ndarray
+        Current state.
+    t_old : float
+        Previous time. None if no steps were made yet.
+    step_size : float
+        Size of the last successful step. None if no steps were made yet.
+    nfev : int
+        Number of evaluations of the right-hand side.
+    njev : int
+        Number of evaluations of the Jacobian.
+    nlu : int
+        Number of LU decompositions.
+
+    References
+    ----------
+    .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
+           Stiff and Differential-Algebraic Problems", Sec. IV.8.
+    .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+           sparse Jacobian matrices", Journal of the Institute of Mathematics
+           and its Applications, 13, pp. 117-120, 1974.
+    """
+    def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+                 rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
+                 vectorized=False, first_step=None, **extraneous):
+        warn_extraneous(extraneous)
+        super().__init__(fun, t0, y0, t_bound, vectorized)
+        self.y_old = None
+        self.max_step = validate_max_step(max_step)
+        self.rtol, self.atol = validate_tol(rtol, atol, self.n)
+        self.f = self.fun(self.t, self.y)
+        # Select initial step assuming the same order which is used to control
+        # the error.
+        if first_step is None:
+            self.h_abs = select_initial_step(
+                self.fun, self.t, self.y, self.f, self.direction,
+                3, self.rtol, self.atol)
+        else:
+            self.h_abs = validate_first_step(first_step, t0, t_bound)
+        self.h_abs_old = None
+        self.error_norm_old = None
+
+        self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
+        self.sol = None
+
+        self.jac_factor = None
+        self.jac, self.J = self._validate_jac(jac, jac_sparsity)
+        if issparse(self.J):
+            def lu(A):
+                self.nlu += 1
+                return splu(A)
+
+            def solve_lu(LU, b):
+                return LU.solve(b)
+
+            I = eye(self.n, format='csc')
+        else:
+            def lu(A):
+                self.nlu += 1
+                return lu_factor(A, overwrite_a=True)
+
+            def solve_lu(LU, b):
+                return lu_solve(LU, b, overwrite_b=True)
+
+            I = np.identity(self.n)
+
+        self.lu = lu
+        self.solve_lu = solve_lu
+        self.I = I
+
+        self.current_jac = True
+        self.LU_real = None
+        self.LU_complex = None
+        self.Z = None
+
+    def _validate_jac(self, jac, sparsity):
+        t0 = self.t
+        y0 = self.y
+
+        if jac is None:
+            if sparsity is not None:
+                if issparse(sparsity):
+                    sparsity = csc_matrix(sparsity)
+                groups = group_columns(sparsity)
+                sparsity = (sparsity, groups)
+
+            def jac_wrapped(t, y, f):
+                self.njev += 1
+                J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
+                                             self.atol, self.jac_factor,
+                                             sparsity)
+                return J
+            J = jac_wrapped(t0, y0, self.f)
+        elif callable(jac):
+            J = jac(t0, y0)
+            self.njev = 1
+            if issparse(J):
+                J = csc_matrix(J)
+
+                def jac_wrapped(t, y, _=None):
+                    self.njev += 1
+                    return csc_matrix(jac(t, y), dtype=float)
+
+            else:
+                J = np.asarray(J, dtype=float)
+
+                def jac_wrapped(t, y, _=None):
+                    self.njev += 1
+                    return np.asarray(jac(t, y), dtype=float)
+
+            if J.shape != (self.n, self.n):
+                raise ValueError("`jac` is expected to have shape {}, but "
+                                 "actually has {}."
+                                 .format((self.n, self.n), J.shape))
+        else:
+            if issparse(jac):
+                J = csc_matrix(jac)
+            else:
+                J = np.asarray(jac, dtype=float)
+
+            if J.shape != (self.n, self.n):
+                raise ValueError("`jac` is expected to have shape {}, but "
+                                 "actually has {}."
+                                 .format((self.n, self.n), J.shape))
+            jac_wrapped = None
+
+        return jac_wrapped, J
+
+    def _step_impl(self):
+        t = self.t
+        y = self.y
+        f = self.f
+
+        max_step = self.max_step
+        atol = self.atol
+        rtol = self.rtol
+
+        min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
+        if self.h_abs > max_step:
+            h_abs = max_step
+            h_abs_old = None
+            error_norm_old = None
+        elif self.h_abs < min_step:
+            h_abs = min_step
+            h_abs_old = None
+            error_norm_old = None
+        else:
+            h_abs = self.h_abs
+            h_abs_old = self.h_abs_old
+            error_norm_old = self.error_norm_old
+
+        J = self.J
+        LU_real = self.LU_real
+        LU_complex = self.LU_complex
+
+        current_jac = self.current_jac
+        jac = self.jac
+
+        rejected = False
+        step_accepted = False
+        message = None
+        while not step_accepted:
+            if h_abs < min_step:
+                return False, self.TOO_SMALL_STEP
+
+            h = h_abs * self.direction
+            t_new = t + h
+
+            if self.direction * (t_new - self.t_bound) > 0:
+                t_new = self.t_bound
+
+            h = t_new - t
+            h_abs = np.abs(h)
+
+            if self.sol is None:
+                Z0 = np.zeros((3, y.shape[0]))
+            else:
+                Z0 = self.sol(t + h * C).T - y
+
+            scale = atol + np.abs(y) * rtol
+
+            converged = False
+            while not converged:
+                if LU_real is None or LU_complex is None:
+                    LU_real = self.lu(MU_REAL / h * self.I - J)
+                    LU_complex = self.lu(MU_COMPLEX / h * self.I - J)
+
+                converged, n_iter, Z, rate = solve_collocation_system(
+                    self.fun, t, y, h, Z0, scale, self.newton_tol,
+                    LU_real, LU_complex, self.solve_lu)
+
+                if not converged:
+                    if current_jac:
+                        break
+
+                    J = self.jac(t, y, f)
+                    current_jac = True
+                    LU_real = None
+                    LU_complex = None
+
+            if not converged:
+                h_abs *= 0.5
+                LU_real = None
+                LU_complex = None
+                continue
+
+            y_new = y + Z[-1]
+            ZE = Z.T.dot(E) / h
+            error = self.solve_lu(LU_real, f + ZE)
+            scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
+            error_norm = norm(error / scale)
+            safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+                                                       + n_iter)
+
+            if rejected and error_norm > 1:
+                error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE)
+                error_norm = norm(error / scale)
+
+            if error_norm > 1:
+                factor = predict_factor(h_abs, h_abs_old,
+                                        error_norm, error_norm_old)
+                h_abs *= max(MIN_FACTOR, safety * factor)
+
+                LU_real = None
+                LU_complex = None
+                rejected = True
+            else:
+                step_accepted = True
+
+        recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3
+
+        factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old)
+        factor = min(MAX_FACTOR, safety * factor)
+
+        if not recompute_jac and factor < 1.2:
+            factor = 1
+        else:
+            LU_real = None
+            LU_complex = None
+
+        f_new = self.fun(t_new, y_new)
+        if recompute_jac:
+            J = jac(t_new, y_new, f_new)
+            current_jac = True
+        elif jac is not None:
+            current_jac = False
+
+        self.h_abs_old = self.h_abs
+        self.error_norm_old = error_norm
+
+        self.h_abs = h_abs * factor
+
+        self.y_old = y
+
+        self.t = t_new
+        self.y = y_new
+        self.f = f_new
+
+        self.Z = Z
+
+        self.LU_real = LU_real
+        self.LU_complex = LU_complex
+        self.current_jac = current_jac
+        self.J = J
+
+        self.t_old = t
+        self.sol = self._compute_dense_output()
+
+        return step_accepted, message
+
+    def _compute_dense_output(self):
+        Q = np.dot(self.Z.T, P)
+        return RadauDenseOutput(self.t_old, self.t, self.y_old, Q)
+
+    def _dense_output_impl(self):
+        return self.sol
+
+
+class RadauDenseOutput(DenseOutput):
+    def __init__(self, t_old, t, y_old, Q):
+        super().__init__(t_old, t)
+        self.h = t - t_old
+        self.Q = Q
+        self.order = Q.shape[1] - 1
+        self.y_old = y_old
+
+    def _call_impl(self, t):
+        x = (t - self.t_old) / self.h
+        if t.ndim == 0:
+            p = np.tile(x, self.order + 1)
+            p = np.cumprod(p)
+        else:
+            p = np.tile(x, (self.order + 1, 1))
+            p = np.cumprod(p, axis=0)
+        # Here we don't multiply by h, not a mistake.
+        y = np.dot(self.Q, p)
+        if y.ndim == 2:
+            y += self.y_old[:, None]
+        else:
+            y += self.y_old
+
+        return y
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/rk.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/rk.py
new file mode 100644
index 00000000..2ff95261
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/rk.py
@@ -0,0 +1,587 @@
+import numpy as np
+from .base import OdeSolver, DenseOutput
+from .common import (validate_max_step, validate_tol, select_initial_step,
+                     norm, warn_extraneous, validate_first_step)
+from . import dop853_coefficients
+
+# Multiply steps computed from asymptotic behaviour of errors by this.
+SAFETY = 0.9
+
+MIN_FACTOR = 0.2  # Minimum allowed decrease in a step size.
+MAX_FACTOR = 10  # Maximum allowed increase in a step size.
+
+
+def rk_step(fun, t, y, f, h, A, B, C, K):
+    """Perform a single Runge-Kutta step.
+
+    This function computes a prediction of an explicit Runge-Kutta method and
+    also estimates the error of a less accurate method.
+
+    Notation for Butcher tableau is as in [1]_.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system.
+    t : float
+        Current time.
+    y : ndarray, shape (n,)
+        Current state.
+    f : ndarray, shape (n,)
+        Current value of the derivative, i.e., ``fun(x, y)``.
+    h : float
+        Step to use.
+    A : ndarray, shape (n_stages, n_stages)
+        Coefficients for combining previous RK stages to compute the next
+        stage. For explicit methods the coefficients at and above the main
+        diagonal are zeros.
+    B : ndarray, shape (n_stages,)
+        Coefficients for combining RK stages for computing the final
+        prediction.
+    C : ndarray, shape (n_stages,)
+        Coefficients for incrementing time for consecutive RK stages.
+        The value for the first stage is always zero.
+    K : ndarray, shape (n_stages + 1, n)
+        Storage array for putting RK stages here. Stages are stored in rows.
+        The last row is a linear combination of the previous rows with
+        coefficients
+
+    Returns
+    -------
+    y_new : ndarray, shape (n,)
+        Solution at t + h computed with a higher accuracy.
+    f_new : ndarray, shape (n,)
+        Derivative ``fun(t + h, y_new)``.
+
+    References
+    ----------
+    .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+           Equations I: Nonstiff Problems", Sec. II.4.
+    """
+    K[0] = f
+    for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
+        dy = np.dot(K[:s].T, a[:s]) * h
+        K[s] = fun(t + c * h, y + dy)
+
+    y_new = y + h * np.dot(K[:-1].T, B)
+    f_new = fun(t + h, y_new)
+
+    K[-1] = f_new
+
+    return y_new, f_new
+
+
+class RungeKutta(OdeSolver):
+    """Base class for explicit Runge-Kutta methods."""
+    C: np.ndarray = NotImplemented
+    A: np.ndarray = NotImplemented
+    B: np.ndarray = NotImplemented
+    E: np.ndarray = NotImplemented
+    P: np.ndarray = NotImplemented
+    order: int = NotImplemented
+    error_estimator_order: int = NotImplemented
+    n_stages: int = NotImplemented
+
+    def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+                 rtol=1e-3, atol=1e-6, vectorized=False,
+                 first_step=None, **extraneous):
+        warn_extraneous(extraneous)
+        super().__init__(fun, t0, y0, t_bound, vectorized,
+                         support_complex=True)
+        self.y_old = None
+        self.max_step = validate_max_step(max_step)
+        self.rtol, self.atol = validate_tol(rtol, atol, self.n)
+        self.f = self.fun(self.t, self.y)
+        if first_step is None:
+            self.h_abs = select_initial_step(
+                self.fun, self.t, self.y, self.f, self.direction,
+                self.error_estimator_order, self.rtol, self.atol)
+        else:
+            self.h_abs = validate_first_step(first_step, t0, t_bound)
+        self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
+        self.error_exponent = -1 / (self.error_estimator_order + 1)
+        self.h_previous = None
+
+    def _estimate_error(self, K, h):
+        return np.dot(K.T, self.E) * h
+
+    def _estimate_error_norm(self, K, h, scale):
+        return norm(self._estimate_error(K, h) / scale)
+
+    def _step_impl(self):
+        t = self.t
+        y = self.y
+
+        max_step = self.max_step
+        rtol = self.rtol
+        atol = self.atol
+
+        min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
+
+        if self.h_abs > max_step:
+            h_abs = max_step
+        elif self.h_abs < min_step:
+            h_abs = min_step
+        else:
+            h_abs = self.h_abs
+
+        step_accepted = False
+        step_rejected = False
+
+        while not step_accepted:
+            if h_abs < min_step:
+                return False, self.TOO_SMALL_STEP
+
+            h = h_abs * self.direction
+            t_new = t + h
+
+            if self.direction * (t_new - self.t_bound) > 0:
+                t_new = self.t_bound
+
+            h = t_new - t
+            h_abs = np.abs(h)
+
+            y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
+                                   self.B, self.C, self.K)
+            scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
+            error_norm = self._estimate_error_norm(self.K, h, scale)
+
+            if error_norm < 1:
+                if error_norm == 0:
+                    factor = MAX_FACTOR
+                else:
+                    factor = min(MAX_FACTOR,
+                                 SAFETY * error_norm ** self.error_exponent)
+
+                if step_rejected:
+                    factor = min(1, factor)
+
+                h_abs *= factor
+
+                step_accepted = True
+            else:
+                h_abs *= max(MIN_FACTOR,
+                             SAFETY * error_norm ** self.error_exponent)
+                step_rejected = True
+
+        self.h_previous = h
+        self.y_old = y
+
+        self.t = t_new
+        self.y = y_new
+
+        self.h_abs = h_abs
+        self.f = f_new
+
+        return True, None
+
+    def _dense_output_impl(self):
+        Q = self.K.T.dot(self.P)
+        return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
+
+
+class RK23(RungeKutta):
+    """Explicit Runge-Kutta method of order 3(2).
+
+    This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
+    assuming accuracy of the second-order method, but steps are taken using the
+    third-order accurate formula (local extrapolation is done). A cubic Hermite
+    polynomial is used for the dense output.
+
+    Can be applied in the complex domain.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here ``t`` is a scalar and there are two options for ndarray ``y``.
+        It can either have shape (n,), then ``fun`` must return array_like with
+        shape (n,). Or alternatively it can have shape (n, k), then ``fun``
+        must return array_like with shape (n, k), i.e. each column
+        corresponds to a single column in ``y``. The choice between the two
+        options is determined by `vectorized` argument (see below).
+    t0 : float
+        Initial time.
+    y0 : array_like, shape (n,)
+        Initial state.
+    t_bound : float
+        Boundary time - the integration won't continue beyond it. It also
+        determines the direction of the integration.
+    first_step : float or None, optional
+        Initial step size. Default is ``None`` which means that the algorithm
+        should choose.
+    max_step : float, optional
+        Maximum allowed step size. Default is np.inf, i.e., the step size is not
+        bounded and determined solely by the solver.
+    rtol, atol : float and array_like, optional
+        Relative and absolute tolerances. The solver keeps the local error
+        estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+        relative accuracy (number of correct digits), while `atol` controls
+        absolute accuracy (number of correct decimal places). To achieve the
+        desired `rtol`, set `atol` to be smaller than the smallest value that
+        can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
+        allowable error. If `atol` is larger than ``rtol * abs(y)`` the
+        number of correct digits is not guaranteed. Conversely, to achieve the
+        desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
+        than `atol`. If components of y have different scales, it might be
+        beneficial to set different `atol` values for different components by
+        passing array_like with shape (n,) for `atol`. Default values are
+        1e-3 for `rtol` and 1e-6 for `atol`.
+    vectorized : bool, optional
+        Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+    Attributes
+    ----------
+    n : int
+        Number of equations.
+    status : string
+        Current status of the solver: 'running', 'finished' or 'failed'.
+    t_bound : float
+        Boundary time.
+    direction : float
+        Integration direction: +1 or -1.
+    t : float
+        Current time.
+    y : ndarray
+        Current state.
+    t_old : float
+        Previous time. None if no steps were made yet.
+    step_size : float
+        Size of the last successful step. None if no steps were made yet.
+    nfev : int
+        Number evaluations of the system's right-hand side.
+    njev : int
+        Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
+    nlu : int
+        Number of LU decompositions. Is always 0 for this solver.
+
+    References
+    ----------
+    .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
+           Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
+    """
+    order = 3
+    error_estimator_order = 2
+    n_stages = 3
+    C = np.array([0, 1/2, 3/4])
+    A = np.array([
+        [0, 0, 0],
+        [1/2, 0, 0],
+        [0, 3/4, 0]
+    ])
+    B = np.array([2/9, 1/3, 4/9])
+    E = np.array([5/72, -1/12, -1/9, 1/8])
+    P = np.array([[1, -4 / 3, 5 / 9],
+                  [0, 1, -2/3],
+                  [0, 4/3, -8/9],
+                  [0, -1, 1]])
+
+
+class RK45(RungeKutta):
+    """Explicit Runge-Kutta method of order 5(4).
+
+    This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
+    assuming accuracy of the fourth-order method accuracy, but steps are taken
+    using the fifth-order accurate formula (local extrapolation is done).
+    A quartic interpolation polynomial is used for the dense output [2]_.
+
+    Can be applied in the complex domain.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
+        It can either have shape (n,); then ``fun`` must return array_like with
+        shape (n,). Alternatively it can have shape (n, k); then ``fun``
+        must return an array_like with shape (n, k), i.e., each column
+        corresponds to a single column in ``y``. The choice between the two
+        options is determined by `vectorized` argument (see below).
+    t0 : float
+        Initial time.
+    y0 : array_like, shape (n,)
+        Initial state.
+    t_bound : float
+        Boundary time - the integration won't continue beyond it. It also
+        determines the direction of the integration.
+    first_step : float or None, optional
+        Initial step size. Default is ``None`` which means that the algorithm
+        should choose.
+    max_step : float, optional
+        Maximum allowed step size. Default is np.inf, i.e., the step size is not
+        bounded and determined solely by the solver.
+    rtol, atol : float and array_like, optional
+        Relative and absolute tolerances. The solver keeps the local error
+        estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+        relative accuracy (number of correct digits), while `atol` controls
+        absolute accuracy (number of correct decimal places). To achieve the
+        desired `rtol`, set `atol` to be smaller than the smallest value that
+        can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
+        allowable error. If `atol` is larger than ``rtol * abs(y)`` the
+        number of correct digits is not guaranteed. Conversely, to achieve the
+        desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
+        than `atol`. If components of y have different scales, it might be
+        beneficial to set different `atol` values for different components by
+        passing array_like with shape (n,) for `atol`. Default values are
+        1e-3 for `rtol` and 1e-6 for `atol`.
+    vectorized : bool, optional
+        Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+    Attributes
+    ----------
+    n : int
+        Number of equations.
+    status : string
+        Current status of the solver: 'running', 'finished' or 'failed'.
+    t_bound : float
+        Boundary time.
+    direction : float
+        Integration direction: +1 or -1.
+    t : float
+        Current time.
+    y : ndarray
+        Current state.
+    t_old : float
+        Previous time. None if no steps were made yet.
+    step_size : float
+        Size of the last successful step. None if no steps were made yet.
+    nfev : int
+        Number evaluations of the system's right-hand side.
+    njev : int
+        Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
+    nlu : int
+        Number of LU decompositions. Is always 0 for this solver.
+
+    References
+    ----------
+    .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
+           formulae", Journal of Computational and Applied Mathematics, Vol. 6,
+           No. 1, pp. 19-26, 1980.
+    .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
+           of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
+    """
+    order = 5
+    error_estimator_order = 4
+    n_stages = 6
+    C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
+    A = np.array([
+        [0, 0, 0, 0, 0],
+        [1/5, 0, 0, 0, 0],
+        [3/40, 9/40, 0, 0, 0],
+        [44/45, -56/15, 32/9, 0, 0],
+        [19372/6561, -25360/2187, 64448/6561, -212/729, 0],
+        [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
+    ])
+    B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
+    E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
+                  1/40])
+    # Corresponds to the optimum value of c_6 from [2]_.
+    P = np.array([
+        [1, -8048581381/2820520608, 8663915743/2820520608,
+         -12715105075/11282082432],
+        [0, 0, 0, 0],
+        [0, 131558114200/32700410799, -68118460800/10900136933,
+         87487479700/32700410799],
+        [0, -1754552775/470086768, 14199869525/1410260304,
+         -10690763975/1880347072],
+        [0, 127303824393/49829197408, -318862633887/49829197408,
+         701980252875 / 199316789632],
+        [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
+        [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
+
+
+class DOP853(RungeKutta):
+    """Explicit Runge-Kutta method of order 8.
+
+    This is a Python implementation of "DOP853" algorithm originally written
+    in Fortran [1]_, [2]_. Note that this is not a literate translation, but
+    the algorithmic core and coefficients are the same.
+
+    Can be applied in the complex domain.
+
+    Parameters
+    ----------
+    fun : callable
+        Right-hand side of the system. The calling signature is ``fun(t, y)``.
+        Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
+        It can either have shape (n,); then ``fun`` must return array_like with
+        shape (n,). Alternatively it can have shape (n, k); then ``fun``
+        must return an array_like with shape (n, k), i.e. each column
+        corresponds to a single column in ``y``. The choice between the two
+        options is determined by `vectorized` argument (see below).
+    t0 : float
+        Initial time.
+    y0 : array_like, shape (n,)
+        Initial state.
+    t_bound : float
+        Boundary time - the integration won't continue beyond it. It also
+        determines the direction of the integration.
+    first_step : float or None, optional
+        Initial step size. Default is ``None`` which means that the algorithm
+        should choose.
+    max_step : float, optional
+        Maximum allowed step size. Default is np.inf, i.e. the step size is not
+        bounded and determined solely by the solver.
+    rtol, atol : float and array_like, optional
+        Relative and absolute tolerances. The solver keeps the local error
+        estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
+        relative accuracy (number of correct digits), while `atol` controls
+        absolute accuracy (number of correct decimal places). To achieve the
+        desired `rtol`, set `atol` to be smaller than the smallest value that
+        can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
+        allowable error. If `atol` is larger than ``rtol * abs(y)`` the
+        number of correct digits is not guaranteed. Conversely, to achieve the
+        desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
+        than `atol`. If components of y have different scales, it might be
+        beneficial to set different `atol` values for different components by
+        passing array_like with shape (n,) for `atol`. Default values are
+        1e-3 for `rtol` and 1e-6 for `atol`.
+    vectorized : bool, optional
+        Whether `fun` is implemented in a vectorized fashion. Default is False.
+
+    Attributes
+    ----------
+    n : int
+        Number of equations.
+    status : string
+        Current status of the solver: 'running', 'finished' or 'failed'.
+    t_bound : float
+        Boundary time.
+    direction : float
+        Integration direction: +1 or -1.
+    t : float
+        Current time.
+    y : ndarray
+        Current state.
+    t_old : float
+        Previous time. None if no steps were made yet.
+    step_size : float
+        Size of the last successful step. None if no steps were made yet.
+    nfev : int
+        Number evaluations of the system's right-hand side.
+    njev : int
+        Number of evaluations of the Jacobian. Is always 0 for this solver
+        as it does not use the Jacobian.
+    nlu : int
+        Number of LU decompositions. Is always 0 for this solver.
+
+    References
+    ----------
+    .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
+           Equations I: Nonstiff Problems", Sec. II.
+    .. [2] `Page with original Fortran code of DOP853
+            `_.
+    """
+    n_stages = dop853_coefficients.N_STAGES
+    order = 8
+    error_estimator_order = 7
+    A = dop853_coefficients.A[:n_stages, :n_stages]
+    B = dop853_coefficients.B
+    C = dop853_coefficients.C[:n_stages]
+    E3 = dop853_coefficients.E3
+    E5 = dop853_coefficients.E5
+    D = dop853_coefficients.D
+
+    A_EXTRA = dop853_coefficients.A[n_stages + 1:]
+    C_EXTRA = dop853_coefficients.C[n_stages + 1:]
+
+    def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
+                 rtol=1e-3, atol=1e-6, vectorized=False,
+                 first_step=None, **extraneous):
+        super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol,
+                         vectorized, first_step, **extraneous)
+        self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
+                                    self.n), dtype=self.y.dtype)
+        self.K = self.K_extended[:self.n_stages + 1]
+
+    def _estimate_error(self, K, h):  # Left for testing purposes.
+        err5 = np.dot(K.T, self.E5)
+        err3 = np.dot(K.T, self.E3)
+        denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
+        correction_factor = np.ones_like(err5)
+        mask = denom > 0
+        correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
+        return h * err5 * correction_factor
+
+    def _estimate_error_norm(self, K, h, scale):
+        err5 = np.dot(K.T, self.E5) / scale
+        err3 = np.dot(K.T, self.E3) / scale
+        err5_norm_2 = np.linalg.norm(err5)**2
+        err3_norm_2 = np.linalg.norm(err3)**2
+        if err5_norm_2 == 0 and err3_norm_2 == 0:
+            return 0.0
+        denom = err5_norm_2 + 0.01 * err3_norm_2
+        return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
+
+    def _dense_output_impl(self):
+        K = self.K_extended
+        h = self.h_previous
+        for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
+                                   start=self.n_stages + 1):
+            dy = np.dot(K[:s].T, a[:s]) * h
+            K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
+
+        F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
+                     dtype=self.y_old.dtype)
+
+        f_old = K[0]
+        delta_y = self.y - self.y_old
+
+        F[0] = delta_y
+        F[1] = h * f_old - delta_y
+        F[2] = 2 * delta_y - h * (self.f + f_old)
+        F[3:] = h * np.dot(self.D, K)
+
+        return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
+
+
+class RkDenseOutput(DenseOutput):
+    def __init__(self, t_old, t, y_old, Q):
+        super().__init__(t_old, t)
+        self.h = t - t_old
+        self.Q = Q
+        self.order = Q.shape[1] - 1
+        self.y_old = y_old
+
+    def _call_impl(self, t):
+        x = (t - self.t_old) / self.h
+        if t.ndim == 0:
+            p = np.tile(x, self.order + 1)
+            p = np.cumprod(p)
+        else:
+            p = np.tile(x, (self.order + 1, 1))
+            p = np.cumprod(p, axis=0)
+        y = self.h * np.dot(self.Q, p)
+        if y.ndim == 2:
+            y += self.y_old[:, None]
+        else:
+            y += self.y_old
+
+        return y
+
+
+class Dop853DenseOutput(DenseOutput):
+    def __init__(self, t_old, t, y_old, F):
+        super().__init__(t_old, t)
+        self.h = t - t_old
+        self.F = F
+        self.y_old = y_old
+
+    def _call_impl(self, t):
+        x = (t - self.t_old) / self.h
+
+        if t.ndim == 0:
+            y = np.zeros_like(self.y_old)
+        else:
+            x = x[:, None]
+            y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
+
+        for i, f in enumerate(reversed(self.F)):
+            y += f
+            if i % 2 == 0:
+                y *= x
+            else:
+                y *= 1 - x
+        y += self.y_old
+
+        return y.T
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_ivp.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_ivp.py
new file mode 100644
index 00000000..2fb29482
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_ivp.py
@@ -0,0 +1,1040 @@
+from itertools import product
+from numpy.testing import (assert_, assert_allclose,
+                           assert_equal, assert_no_warnings, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+import numpy as np
+from scipy.optimize._numdiff import group_columns
+from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA
+from scipy.integrate import OdeSolution
+from scipy.integrate._ivp.common import num_jac
+from scipy.integrate._ivp.base import ConstantDenseOutput
+from scipy.sparse import coo_matrix, csc_matrix
+
+
+def fun_zero(t, y):
+    return np.zeros_like(y)
+
+
+def fun_linear(t, y):
+    return np.array([-y[0] - 5 * y[1], y[0] + y[1]])
+
+
+def jac_linear():
+    return np.array([[-1, -5], [1, 1]])
+
+
+def sol_linear(t):
+    return np.vstack((-5 * np.sin(2 * t),
+                      2 * np.cos(2 * t) + np.sin(2 * t)))
+
+
+def fun_rational(t, y):
+    return np.array([y[1] / t,
+                     y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])
+
+
+def fun_rational_vectorized(t, y):
+    return np.vstack((y[1] / t,
+                      y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))
+
+
+def jac_rational(t, y):
+    return np.array([
+        [0, 1 / t],
+        [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
+         (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
+    ])
+
+
+def jac_rational_sparse(t, y):
+    return csc_matrix([
+        [0, 1 / t],
+        [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
+         (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
+    ])
+
+
+def sol_rational(t):
+    return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))
+
+
+def fun_medazko(t, y):
+    n = y.shape[0] // 2
+    k = 100
+    c = 4
+
+    phi = 2 if t <= 5 else 0
+    y = np.hstack((phi, 0, y, y[-2]))
+
+    d = 1 / n
+    j = np.arange(n) + 1
+    alpha = 2 * (j * d - 1) ** 3 / c ** 2
+    beta = (j * d - 1) ** 4 / c ** 2
+
+    j_2_p1 = 2 * j + 2
+    j_2_m3 = 2 * j - 2
+    j_2_m1 = 2 * j
+    j_2 = 2 * j + 1
+
+    f = np.empty(2 * n)
+    f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +
+              beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -
+              k * y[j_2_m1] * y[j_2])
+    f[1::2] = -k * y[j_2] * y[j_2_m1]
+
+    return f
+
+
+def medazko_sparsity(n):
+    cols = []
+    rows = []
+
+    i = np.arange(n) * 2
+
+    cols.append(i[1:])
+    rows.append(i[1:] - 2)
+
+    cols.append(i)
+    rows.append(i)
+
+    cols.append(i)
+    rows.append(i + 1)
+
+    cols.append(i[:-1])
+    rows.append(i[:-1] + 2)
+
+    i = np.arange(n) * 2 + 1
+
+    cols.append(i)
+    rows.append(i)
+
+    cols.append(i)
+    rows.append(i - 1)
+
+    cols = np.hstack(cols)
+    rows = np.hstack(rows)
+
+    return coo_matrix((np.ones_like(cols), (cols, rows)))
+
+
+def fun_complex(t, y):
+    return -y
+
+
+def jac_complex(t, y):
+    return -np.eye(y.shape[0])
+
+
+def jac_complex_sparse(t, y):
+    return csc_matrix(jac_complex(t, y))
+
+
+def sol_complex(t):
+    y = (0.5 + 1j) * np.exp(-t)
+    return y.reshape((1, -1))
+
+
+def compute_error(y, y_true, rtol, atol):
+    e = (y - y_true) / (atol + rtol * np.abs(y_true))
+    return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0])
+
+
+def test_integration():
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [1/3, 2/9]
+
+    for vectorized, method, t_span, jac in product(
+            [False, True],
+            ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],
+            [[5, 9], [5, 1]],
+            [None, jac_rational, jac_rational_sparse]):
+
+        if vectorized:
+            fun = fun_rational_vectorized
+        else:
+            fun = fun_rational
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       "The following arguments have no effect for a chosen "
+                       "solver: `jac`")
+            res = solve_ivp(fun, t_span, y0, rtol=rtol,
+                            atol=atol, method=method, dense_output=True,
+                            jac=jac, vectorized=vectorized)
+        assert_equal(res.t[0], t_span[0])
+        assert_(res.t_events is None)
+        assert_(res.y_events is None)
+        assert_(res.success)
+        assert_equal(res.status, 0)
+
+        if method == 'DOP853':
+            # DOP853 spends more functions evaluation because it doesn't
+            # have enough time to develop big enough step size.
+            assert_(res.nfev < 50)
+        else:
+            assert_(res.nfev < 40)
+
+        if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:
+            assert_equal(res.njev, 0)
+            assert_equal(res.nlu, 0)
+        else:
+            assert_(0 < res.njev < 3)
+            assert_(0 < res.nlu < 10)
+
+        y_true = sol_rational(res.t)
+        e = compute_error(res.y, y_true, rtol, atol)
+        assert_(np.all(e < 5))
+
+        tc = np.linspace(*t_span)
+        yc_true = sol_rational(tc)
+        yc = res.sol(tc)
+
+        e = compute_error(yc, yc_true, rtol, atol)
+        assert_(np.all(e < 5))
+
+        tc = (t_span[0] + t_span[-1]) / 2
+        yc_true = sol_rational(tc)
+        yc = res.sol(tc)
+
+        e = compute_error(yc, yc_true, rtol, atol)
+        assert_(np.all(e < 5))
+
+        # LSODA for some reasons doesn't pass the polynomial through the
+        # previous points exactly after the order change. It might be some
+        # bug in LSOSA implementation or maybe we missing something.
+        if method != 'LSODA':
+            assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
+
+
+def test_integration_complex():
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [0.5 + 1j]
+    t_span = [0, 1]
+    tc = np.linspace(t_span[0], t_span[1])
+    for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'],
+                               [None, jac_complex, jac_complex_sparse]):
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       "The following arguments have no effect for a chosen "
+                       "solver: `jac`")
+            res = solve_ivp(fun_complex, t_span, y0, method=method,
+                            dense_output=True, rtol=rtol, atol=atol, jac=jac)
+
+        assert_equal(res.t[0], t_span[0])
+        assert_(res.t_events is None)
+        assert_(res.y_events is None)
+        assert_(res.success)
+        assert_equal(res.status, 0)
+
+        if method == 'DOP853':
+            assert res.nfev < 35
+        else:
+            assert res.nfev < 25
+
+        if method == 'BDF':
+            assert_equal(res.njev, 1)
+            assert res.nlu < 6
+        else:
+            assert res.njev == 0
+            assert res.nlu == 0
+
+        y_true = sol_complex(res.t)
+        e = compute_error(res.y, y_true, rtol, atol)
+        assert np.all(e < 5)
+
+        yc_true = sol_complex(tc)
+        yc = res.sol(tc)
+        e = compute_error(yc, yc_true, rtol, atol)
+
+        assert np.all(e < 5)
+
+
+def test_integration_sparse_difference():
+    n = 200
+    t_span = [0, 20]
+    y0 = np.zeros(2 * n)
+    y0[1::2] = 1
+    sparsity = medazko_sparsity(n)
+
+    for method in ['BDF', 'Radau']:
+        res = solve_ivp(fun_medazko, t_span, y0, method=method,
+                        jac_sparsity=sparsity)
+
+        assert_equal(res.t[0], t_span[0])
+        assert_(res.t_events is None)
+        assert_(res.y_events is None)
+        assert_(res.success)
+        assert_equal(res.status, 0)
+
+        assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)
+        assert_allclose(res.y[79, -1], 0, atol=1e-3)
+        assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)
+        assert_allclose(res.y[149, -1], 0, atol=1e-3)
+        assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)
+        assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)
+        assert_allclose(res.y[238, -1], 0, atol=1e-3)
+        assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)
+
+
+def test_integration_const_jac():
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [0, 2]
+    t_span = [0, 2]
+    J = jac_linear()
+    J_sparse = csc_matrix(J)
+
+    for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):
+        res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,
+                        method=method, dense_output=True, jac=jac)
+        assert_equal(res.t[0], t_span[0])
+        assert_(res.t_events is None)
+        assert_(res.y_events is None)
+        assert_(res.success)
+        assert_equal(res.status, 0)
+
+        assert_(res.nfev < 100)
+        assert_equal(res.njev, 0)
+        assert_(0 < res.nlu < 15)
+
+        y_true = sol_linear(res.t)
+        e = compute_error(res.y, y_true, rtol, atol)
+        assert_(np.all(e < 10))
+
+        tc = np.linspace(*t_span)
+        yc_true = sol_linear(tc)
+        yc = res.sol(tc)
+
+        e = compute_error(yc, yc_true, rtol, atol)
+        assert_(np.all(e < 15))
+
+        assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA'])
+def test_integration_stiff(method):
+    rtol = 1e-6
+    atol = 1e-6
+    y0 = [1e4, 0, 0]
+    tspan = [0, 1e8]
+
+    def fun_robertson(t, state):
+        x, y, z = state
+        return [
+            -0.04 * x + 1e4 * y * z,
+            0.04 * x - 1e4 * y * z - 3e7 * y * y,
+            3e7 * y * y,
+        ]
+
+    res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol,
+                    atol=atol, method=method)
+
+    # If the stiff mode is not activated correctly, these numbers will be much bigger
+    assert res.nfev < 5000
+    assert res.njev < 200
+
+
+def test_events():
+    def event_rational_1(t, y):
+        return y[0] - y[1] ** 0.7
+
+    def event_rational_2(t, y):
+        return y[1] ** 0.6 - y[0]
+
+    def event_rational_3(t, y):
+        return t - 7.4
+
+    event_rational_3.terminal = True
+
+    for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+        res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,
+                        events=(event_rational_1, event_rational_2))
+        assert_equal(res.status, 0)
+        assert_equal(res.t_events[0].size, 1)
+        assert_equal(res.t_events[1].size, 1)
+        assert_(5.3 < res.t_events[0][0] < 5.7)
+        assert_(7.3 < res.t_events[1][0] < 7.7)
+
+        assert_equal(res.y_events[0].shape, (1, 2))
+        assert_equal(res.y_events[1].shape, (1, 2))
+        assert np.isclose(
+            event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+        assert np.isclose(
+            event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+        event_rational_1.direction = 1
+        event_rational_2.direction = 1
+        res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+                        events=(event_rational_1, event_rational_2))
+        assert_equal(res.status, 0)
+        assert_equal(res.t_events[0].size, 1)
+        assert_equal(res.t_events[1].size, 0)
+        assert_(5.3 < res.t_events[0][0] < 5.7)
+        assert_equal(res.y_events[0].shape, (1, 2))
+        assert_equal(res.y_events[1].shape, (0,))
+        assert np.isclose(
+            event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+
+        event_rational_1.direction = -1
+        event_rational_2.direction = -1
+        res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+                        events=(event_rational_1, event_rational_2))
+        assert_equal(res.status, 0)
+        assert_equal(res.t_events[0].size, 0)
+        assert_equal(res.t_events[1].size, 1)
+        assert_(7.3 < res.t_events[1][0] < 7.7)
+        assert_equal(res.y_events[0].shape, (0,))
+        assert_equal(res.y_events[1].shape, (1, 2))
+        assert np.isclose(
+            event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+        event_rational_1.direction = 0
+        event_rational_2.direction = 0
+
+        res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+                        events=(event_rational_1, event_rational_2,
+                                event_rational_3), dense_output=True)
+        assert_equal(res.status, 1)
+        assert_equal(res.t_events[0].size, 1)
+        assert_equal(res.t_events[1].size, 0)
+        assert_equal(res.t_events[2].size, 1)
+        assert_(5.3 < res.t_events[0][0] < 5.7)
+        assert_(7.3 < res.t_events[2][0] < 7.5)
+        assert_equal(res.y_events[0].shape, (1, 2))
+        assert_equal(res.y_events[1].shape, (0,))
+        assert_equal(res.y_events[2].shape, (1, 2))
+        assert np.isclose(
+            event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+        assert np.isclose(
+            event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
+
+        res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
+                        events=event_rational_1, dense_output=True)
+        assert_equal(res.status, 0)
+        assert_equal(res.t_events[0].size, 1)
+        assert_(5.3 < res.t_events[0][0] < 5.7)
+
+        assert_equal(res.y_events[0].shape, (1, 2))
+        assert np.isclose(
+            event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+
+        # Also test that termination by event doesn't break interpolants.
+        tc = np.linspace(res.t[0], res.t[-1])
+        yc_true = sol_rational(tc)
+        yc = res.sol(tc)
+        e = compute_error(yc, yc_true, 1e-3, 1e-6)
+        assert_(np.all(e < 5))
+
+        # Test that the y_event matches solution
+        assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0], rtol=1e-3, atol=1e-6)
+
+    # Test in backward direction.
+    event_rational_1.direction = 0
+    event_rational_2.direction = 0
+    for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+        res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+                        events=(event_rational_1, event_rational_2))
+        assert_equal(res.status, 0)
+        assert_equal(res.t_events[0].size, 1)
+        assert_equal(res.t_events[1].size, 1)
+        assert_(5.3 < res.t_events[0][0] < 5.7)
+        assert_(7.3 < res.t_events[1][0] < 7.7)
+
+        assert_equal(res.y_events[0].shape, (1, 2))
+        assert_equal(res.y_events[1].shape, (1, 2))
+        assert np.isclose(
+            event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+        assert np.isclose(
+            event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+        event_rational_1.direction = -1
+        event_rational_2.direction = -1
+        res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+                        events=(event_rational_1, event_rational_2))
+        assert_equal(res.status, 0)
+        assert_equal(res.t_events[0].size, 1)
+        assert_equal(res.t_events[1].size, 0)
+        assert_(5.3 < res.t_events[0][0] < 5.7)
+
+        assert_equal(res.y_events[0].shape, (1, 2))
+        assert_equal(res.y_events[1].shape, (0,))
+        assert np.isclose(
+            event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)
+
+        event_rational_1.direction = 1
+        event_rational_2.direction = 1
+        res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+                        events=(event_rational_1, event_rational_2))
+        assert_equal(res.status, 0)
+        assert_equal(res.t_events[0].size, 0)
+        assert_equal(res.t_events[1].size, 1)
+        assert_(7.3 < res.t_events[1][0] < 7.7)
+
+        assert_equal(res.y_events[0].shape, (0,))
+        assert_equal(res.y_events[1].shape, (1, 2))
+        assert np.isclose(
+            event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+
+        event_rational_1.direction = 0
+        event_rational_2.direction = 0
+
+        res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
+                        events=(event_rational_1, event_rational_2,
+                                event_rational_3), dense_output=True)
+        assert_equal(res.status, 1)
+        assert_equal(res.t_events[0].size, 0)
+        assert_equal(res.t_events[1].size, 1)
+        assert_equal(res.t_events[2].size, 1)
+        assert_(7.3 < res.t_events[1][0] < 7.7)
+        assert_(7.3 < res.t_events[2][0] < 7.5)
+
+        assert_equal(res.y_events[0].shape, (0,))
+        assert_equal(res.y_events[1].shape, (1, 2))
+        assert_equal(res.y_events[2].shape, (1, 2))
+        assert np.isclose(
+            event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)
+        assert np.isclose(
+            event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)
+
+        # Also test that termination by event doesn't break interpolants.
+        tc = np.linspace(res.t[-1], res.t[0])
+        yc_true = sol_rational(tc)
+        yc = res.sol(tc)
+        e = compute_error(yc, yc_true, 1e-3, 1e-6)
+        assert_(np.all(e < 5))
+
+        assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0], rtol=1e-3, atol=1e-6)
+        assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0], rtol=1e-3, atol=1e-6)
+
+
+def test_max_step():
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [1/3, 2/9]
+    for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+        for t_span in ([5, 9], [5, 1]):
+            res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
+                            max_step=0.5, atol=atol, method=method,
+                            dense_output=True)
+            assert_equal(res.t[0], t_span[0])
+            assert_equal(res.t[-1], t_span[-1])
+            assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15))
+            assert_(res.t_events is None)
+            assert_(res.success)
+            assert_equal(res.status, 0)
+
+            y_true = sol_rational(res.t)
+            e = compute_error(res.y, y_true, rtol, atol)
+            assert_(np.all(e < 5))
+
+            tc = np.linspace(*t_span)
+            yc_true = sol_rational(tc)
+            yc = res.sol(tc)
+
+            e = compute_error(yc, yc_true, rtol, atol)
+            assert_(np.all(e < 5))
+
+            # See comment in test_integration.
+            if method is not LSODA:
+                assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
+
+            assert_raises(ValueError, method, fun_rational, t_span[0], y0,
+                          t_span[1], max_step=-1)
+
+            if method is not LSODA:
+                solver = method(fun_rational, t_span[0], y0, t_span[1],
+                                rtol=rtol, atol=atol, max_step=1e-20)
+                message = solver.step()
+
+                assert_equal(solver.status, 'failed')
+                assert_("step size is less" in message)
+                assert_raises(RuntimeError, solver.step)
+
+
+def test_first_step():
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [1/3, 2/9]
+    first_step = 0.1
+    for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+        for t_span in ([5, 9], [5, 1]):
+            res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
+                            max_step=0.5, atol=atol, method=method,
+                            dense_output=True, first_step=first_step)
+
+            assert_equal(res.t[0], t_span[0])
+            assert_equal(res.t[-1], t_span[-1])
+            assert_allclose(first_step, np.abs(res.t[1] - 5))
+            assert_(res.t_events is None)
+            assert_(res.success)
+            assert_equal(res.status, 0)
+
+            y_true = sol_rational(res.t)
+            e = compute_error(res.y, y_true, rtol, atol)
+            assert_(np.all(e < 5))
+
+            tc = np.linspace(*t_span)
+            yc_true = sol_rational(tc)
+            yc = res.sol(tc)
+
+            e = compute_error(yc, yc_true, rtol, atol)
+            assert_(np.all(e < 5))
+
+            # See comment in test_integration.
+            if method is not LSODA:
+                assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
+
+            assert_raises(ValueError, method, fun_rational, t_span[0], y0,
+                          t_span[1], first_step=-1)
+            assert_raises(ValueError, method, fun_rational, t_span[0], y0,
+                          t_span[1], first_step=5)
+
+
+def test_t_eval():
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [1/3, 2/9]
+    for t_span in ([5, 9], [5, 1]):
+        t_eval = np.linspace(t_span[0], t_span[1], 10)
+        res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
+                        t_eval=t_eval)
+        assert_equal(res.t, t_eval)
+        assert_(res.t_events is None)
+        assert_(res.success)
+        assert_equal(res.status, 0)
+
+        y_true = sol_rational(res.t)
+        e = compute_error(res.y, y_true, rtol, atol)
+        assert_(np.all(e < 5))
+
+    t_eval = [5, 5.01, 7, 8, 8.01, 9]
+    res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
+                    t_eval=t_eval)
+    assert_equal(res.t, t_eval)
+    assert_(res.t_events is None)
+    assert_(res.success)
+    assert_equal(res.status, 0)
+
+    y_true = sol_rational(res.t)
+    e = compute_error(res.y, y_true, rtol, atol)
+    assert_(np.all(e < 5))
+
+    t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]
+    res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
+                    t_eval=t_eval)
+    assert_equal(res.t, t_eval)
+    assert_(res.t_events is None)
+    assert_(res.success)
+    assert_equal(res.status, 0)
+
+    t_eval = [5.01, 7, 8, 8.01]
+    res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
+                    t_eval=t_eval)
+    assert_equal(res.t, t_eval)
+    assert_(res.t_events is None)
+    assert_(res.success)
+    assert_equal(res.status, 0)
+
+    y_true = sol_rational(res.t)
+    e = compute_error(res.y, y_true, rtol, atol)
+    assert_(np.all(e < 5))
+
+    t_eval = [4.99, 3, 1.5, 1.1, 1.01]
+    res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
+                    t_eval=t_eval)
+    assert_equal(res.t, t_eval)
+    assert_(res.t_events is None)
+    assert_(res.success)
+    assert_equal(res.status, 0)
+
+    t_eval = [4, 6]
+    assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,
+                  rtol=rtol, atol=atol, t_eval=t_eval)
+
+
+def test_t_eval_dense_output():
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [1/3, 2/9]
+    t_span = [5, 9]
+    t_eval = np.linspace(t_span[0], t_span[1], 10)
+    res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
+                    t_eval=t_eval)
+    res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
+                      t_eval=t_eval, dense_output=True)
+    assert_equal(res.t, t_eval)
+    assert_(res.t_events is None)
+    assert_(res.success)
+    assert_equal(res.status, 0)
+
+    assert_equal(res.t, res_d.t)
+    assert_equal(res.y, res_d.y)
+    assert_(res_d.t_events is None)
+    assert_(res_d.success)
+    assert_equal(res_d.status, 0)
+
+    # if t and y are equal only test values for one case
+    y_true = sol_rational(res.t)
+    e = compute_error(res.y, y_true, rtol, atol)
+    assert_(np.all(e < 5))
+
+
+def test_t_eval_early_event():
+    def early_event(t, y):
+        return t - 7
+
+    early_event.terminal = True
+
+    rtol = 1e-3
+    atol = 1e-6
+    y0 = [1/3, 2/9]
+    t_span = [5, 9]
+    t_eval = np.linspace(7.5, 9, 16)
+    for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       "The following arguments have no effect for a chosen "
+                       "solver: `jac`")
+            res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
+                            method=method, t_eval=t_eval, events=early_event,
+                            jac=jac_rational)
+        assert res.success
+        assert res.message == 'A termination event occurred.'
+        assert res.status == 1
+        assert not res.t and not res.y
+        assert len(res.t_events) == 1
+        assert res.t_events[0].size == 1
+        assert res.t_events[0][0] == 7
+
+
+def test_no_integration():
+    for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+        sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],
+                        method=method, dense_output=True)
+        assert_equal(sol.sol(4), [2, 3])
+        assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])
+
+
+def test_no_integration_class():
+    for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+        solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)
+        solver.step()
+        assert_equal(solver.status, 'finished')
+        sol = solver.dense_output()
+        assert_equal(sol(0.0), [10.0, 0.0])
+        assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])
+
+        solver = method(lambda t, y: -y, 0.0, [], np.inf)
+        solver.step()
+        assert_equal(solver.status, 'finished')
+        sol = solver.dense_output()
+        assert_equal(sol(100.0), [])
+        assert_equal(sol([0, 1, 2]), np.empty((0, 3)))
+
+
+def test_empty():
+    def fun(t, y):
+        return np.zeros((0,))
+
+    y0 = np.zeros((0,))
+
+    for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+        sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,
+                                 method=method, dense_output=True)
+        assert_equal(sol.sol(10), np.zeros((0,)))
+        assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
+
+    for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:
+        sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,
+                                 method=method, dense_output=True)
+        assert_equal(sol.sol(10), np.zeros((0,)))
+        assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
+
+
+def test_ConstantDenseOutput():
+    sol = ConstantDenseOutput(0, 1, np.array([1, 2]))
+    assert_allclose(sol(1.5), [1, 2])
+    assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])
+
+    sol = ConstantDenseOutput(0, 1, np.array([]))
+    assert_allclose(sol(1.5), np.empty(0))
+    assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))
+
+
+def test_classes():
+    y0 = [1 / 3, 2 / 9]
+    for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]:
+        solver = cls(fun_rational, 5, y0, np.inf)
+        assert_equal(solver.n, 2)
+        assert_equal(solver.status, 'running')
+        assert_equal(solver.t_bound, np.inf)
+        assert_equal(solver.direction, 1)
+        assert_equal(solver.t, 5)
+        assert_equal(solver.y, y0)
+        assert_(solver.step_size is None)
+        if cls is not LSODA:
+            assert_(solver.nfev > 0)
+            assert_(solver.njev >= 0)
+            assert_equal(solver.nlu, 0)
+        else:
+            assert_equal(solver.nfev, 0)
+            assert_equal(solver.njev, 0)
+            assert_equal(solver.nlu, 0)
+
+        assert_raises(RuntimeError, solver.dense_output)
+
+        message = solver.step()
+        assert_equal(solver.status, 'running')
+        assert_equal(message, None)
+        assert_equal(solver.n, 2)
+        assert_equal(solver.t_bound, np.inf)
+        assert_equal(solver.direction, 1)
+        assert_(solver.t > 5)
+        assert_(not np.all(np.equal(solver.y, y0)))
+        assert_(solver.step_size > 0)
+        assert_(solver.nfev > 0)
+        assert_(solver.njev >= 0)
+        assert_(solver.nlu >= 0)
+        sol = solver.dense_output()
+        assert_allclose(sol(5), y0, rtol=1e-15, atol=0)
+
+
+def test_OdeSolution():
+    ts = np.array([0, 2, 5], dtype=float)
+    s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
+    s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
+
+    sol = OdeSolution(ts, [s1, s2])
+
+    assert_equal(sol(-1), [-1])
+    assert_equal(sol(1), [-1])
+    assert_equal(sol(2), [-1])
+    assert_equal(sol(3), [1])
+    assert_equal(sol(5), [1])
+    assert_equal(sol(6), [1])
+
+    assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),
+                 np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))
+
+    ts = np.array([10, 4, -3])
+    s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
+    s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
+
+    sol = OdeSolution(ts, [s1, s2])
+    assert_equal(sol(11), [-1])
+    assert_equal(sol(10), [-1])
+    assert_equal(sol(5), [-1])
+    assert_equal(sol(4), [-1])
+    assert_equal(sol(0), [1])
+    assert_equal(sol(-3), [1])
+    assert_equal(sol(-4), [1])
+
+    assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),
+                 np.array([[-1, 1, -1, 1, -1, 1, -1]]))
+
+    ts = np.array([1, 1])
+    s = ConstantDenseOutput(1, 1, np.array([10]))
+    sol = OdeSolution(ts, [s])
+    assert_equal(sol(0), [10])
+    assert_equal(sol(1), [10])
+    assert_equal(sol(2), [10])
+
+    assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))
+
+
+def test_num_jac():
+    def fun(t, y):
+        return np.vstack([
+            -0.04 * y[0] + 1e4 * y[1] * y[2],
+            0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,
+            3e7 * y[1] ** 2
+        ])
+
+    def jac(t, y):
+        return np.array([
+            [-0.04, 1e4 * y[2], 1e4 * y[1]],
+            [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],
+            [0, 6e7 * y[1], 0]
+        ])
+
+    t = 1
+    y = np.array([1, 0, 0])
+    J_true = jac(t, y)
+    threshold = 1e-5
+    f = fun(t, y).ravel()
+
+    J_num, factor = num_jac(fun, t, y, f, threshold, None)
+    assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
+
+    J_num, factor = num_jac(fun, t, y, f, threshold, factor)
+    assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
+
+
+def test_num_jac_sparse():
+    def fun(t, y):
+        e = y[1:]**3 - y[:-1]**2
+        z = np.zeros(y.shape[1])
+        return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))
+
+    def structure(n):
+        A = np.zeros((n, n), dtype=int)
+        A[0, 0] = 1
+        A[0, 1] = 1
+        for i in range(1, n - 1):
+            A[i, i - 1: i + 2] = 1
+        A[-1, -1] = 1
+        A[-1, -2] = 1
+
+        return A
+
+    np.random.seed(0)
+    n = 20
+    y = np.random.randn(n)
+    A = structure(n)
+    groups = group_columns(A)
+
+    f = fun(0, y[:, None]).ravel()
+
+    # Compare dense and sparse results, assuming that dense implementation
+    # is correct (as it is straightforward).
+    J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,
+                                          sparsity=(A, groups))
+    J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)
+    assert_allclose(J_num_dense, J_num_sparse.toarray(),
+                    rtol=1e-12, atol=1e-14)
+    assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
+
+    # Take small factors to trigger their recomputing inside.
+    factor = np.random.uniform(0, 1e-12, size=n)
+    J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,
+                                          sparsity=(A, groups))
+    J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)
+
+    assert_allclose(J_num_dense, J_num_sparse.toarray(),
+                    rtol=1e-12, atol=1e-14)
+    assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
+
+
+def test_args():
+
+    # sys3 is actually two decoupled systems. (x, y) form a
+    # linear oscillator, while z is a nonlinear first order
+    # system with equilibria at z=0 and z=1. If k > 0, z=1
+    # is stable and z=0 is unstable.
+
+    def sys3(t, w, omega, k, zfinal):
+        x, y, z = w
+        return [-omega*y, omega*x, k*z*(1 - z)]
+
+    def sys3_jac(t, w, omega, k, zfinal):
+        x, y, z = w
+        J = np.array([[0, -omega, 0],
+                      [omega, 0, 0],
+                      [0, 0, k*(1 - 2*z)]])
+        return J
+
+    def sys3_x0decreasing(t, w, omega, k, zfinal):
+        x, y, z = w
+        return x
+
+    def sys3_y0increasing(t, w, omega, k, zfinal):
+        x, y, z = w
+        return y
+
+    def sys3_zfinal(t, w, omega, k, zfinal):
+        x, y, z = w
+        return z - zfinal
+
+    # Set the event flags for the event functions.
+    sys3_x0decreasing.direction = -1
+    sys3_y0increasing.direction = 1
+    sys3_zfinal.terminal = True
+
+    omega = 2
+    k = 4
+
+    tfinal = 5
+    zfinal = 0.99
+    # Find z0 such that when z(0) = z0, z(tfinal) = zfinal.
+    # The condition z(tfinal) = zfinal is the terminal event.
+    z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal))
+
+    w0 = [0, -1, z0]
+
+    # Provide the jac argument and use the Radau method to ensure that the use
+    # of the Jacobian function is exercised.
+    # If event handling is working, the solution will stop at tfinal, not tend.
+    tend = 2*tfinal
+    sol = solve_ivp(sys3, [0, tend], w0,
+                    events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal],
+                    dense_output=True, args=(omega, k, zfinal),
+                    method='Radau', jac=sys3_jac,
+                    rtol=1e-10, atol=1e-13)
+
+    # Check that we got the expected events at the expected times.
+    x0events_t = sol.t_events[0]
+    y0events_t = sol.t_events[1]
+    zfinalevents_t = sol.t_events[2]
+    assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi])
+    assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi])
+    assert_allclose(zfinalevents_t, [tfinal])
+
+    # Check that the solution agrees with the known exact solution.
+    t = np.linspace(0, zfinalevents_t[0], 250)
+    w = sol.sol(t)
+    assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12)
+    assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12)
+    assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1),
+                    rtol=1e-9, atol=1e-12)
+
+    # Check that the state variables have the expected values at the events.
+    x0events = sol.sol(x0events_t)
+    y0events = sol.sol(y0events_t)
+    zfinalevents = sol.sol(zfinalevents_t)
+    assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14)
+    assert_allclose(x0events[1], np.ones_like(x0events[1]))
+    assert_allclose(y0events[0], np.ones_like(y0events[0]))
+    assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14)
+    assert_allclose(zfinalevents[2], [zfinal])
+
+
+def test_array_rtol():
+    # solve_ivp had a bug with array_like `rtol`; see gh-15482
+    # check that it's fixed
+    def f(t, y):
+        return y[0], y[1]
+
+    # no warning (or error) when `rtol` is array_like
+    sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-1])
+    err1 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
+
+    # warning when an element of `rtol` is too small
+    with pytest.warns(UserWarning, match="At least one element..."):
+        sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-16])
+        err2 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1)))
+
+    # tighter rtol improves the error
+    assert err2 < err1
+
+@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'])
+def test_integration_zero_rhs(method):
+    result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method)
+    assert_(result.success)
+    assert_equal(result.status, 0)
+    assert_allclose(result.y, 1.0, rtol=1e-15)
+
+
+def test_args_single_value():
+    def fun_with_arg(t, y, a):
+        return a*y
+
+    message = "Supplied 'args' cannot be unpacked."
+    with pytest.raises(TypeError, match=message):
+        solve_ivp(fun_with_arg, (0, 0.1), [1], args=-1)
+
+    sol = solve_ivp(fun_with_arg, (0, 0.1), [1], args=(-1,))
+    assert_allclose(sol.y[0, -1], np.exp(-0.1))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_rk.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_rk.py
new file mode 100644
index 00000000..33cb27d0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ivp/tests/test_rk.py
@@ -0,0 +1,37 @@
+import pytest
+from numpy.testing import assert_allclose, assert_
+import numpy as np
+from scipy.integrate import RK23, RK45, DOP853
+from scipy.integrate._ivp import dop853_coefficients
+
+
+@pytest.mark.parametrize("solver", [RK23, RK45, DOP853])
+def test_coefficient_properties(solver):
+    assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
+    assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14)
+
+
+def test_coefficient_properties_dop853():
+    assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15)
+    assert_allclose(np.sum(dop853_coefficients.A, axis=1),
+                    dop853_coefficients.C,
+                    rtol=1e-14)
+
+
+@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
+def test_error_estimation(solver_class):
+    step = 0.2
+    solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step)
+    solver.step()
+    error_estimate = solver._estimate_error(solver.K, step)
+    error = solver.y - np.exp([step])
+    assert_(np.abs(error) < np.abs(error_estimate))
+
+
+@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853])
+def test_error_estimation_complex(solver_class):
+    h = 0.2
+    solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h)
+    solver.step()
+    err_norm = solver._estimate_error_norm(solver.K, h, scale=[1])
+    assert np.isrealobj(err_norm)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_ode.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ode.py
new file mode 100644
index 00000000..1e4be255
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_ode.py
@@ -0,0 +1,1372 @@
+# Authors: Pearu Peterson, Pauli Virtanen, John Travers
+"""
+First-order ODE integrators.
+
+User-friendly interface to various numerical integrators for solving a
+system of first order ODEs with prescribed initial conditions::
+
+    d y(t)[i]
+    ---------  = f(t,y(t))[i],
+       d t
+
+    y(t=0)[i] = y0[i],
+
+where::
+
+    i = 0, ..., len(y0) - 1
+
+class ode
+---------
+
+A generic interface class to numeric integrators. It has the following
+methods::
+
+    integrator = ode(f, jac=None)
+    integrator = integrator.set_integrator(name, **params)
+    integrator = integrator.set_initial_value(y0, t0=0.0)
+    integrator = integrator.set_f_params(*args)
+    integrator = integrator.set_jac_params(*args)
+    y1 = integrator.integrate(t1, step=False, relax=False)
+    flag = integrator.successful()
+
+class complex_ode
+-----------------
+
+This class has the same generic interface as ode, except it can handle complex
+f, y and Jacobians by transparently translating them into the equivalent
+real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
+an alternative to ode with the zvode solver, sometimes performing better.
+"""
+# XXX: Integrators must have:
+# ===========================
+# cvode - C version of vode and vodpk with many improvements.
+#   Get it from http://www.netlib.org/ode/cvode.tar.gz.
+#   To wrap cvode to Python, one must write the extension module by
+#   hand. Its interface is too much 'advanced C' that using f2py
+#   would be too complicated (or impossible).
+#
+# How to define a new integrator:
+# ===============================
+#
+# class myodeint(IntegratorBase):
+#
+#     runner =  or None
+#
+#     def __init__(self,...):                           # required
+#         
+#
+#     def reset(self,n,has_jac):                        # optional
+#         # n - the size of the problem (number of equations)
+#         # has_jac - whether user has supplied its own routine for Jacobian
+#         
+#
+#     def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
+#         # this method is called to integrate from t=t0 to t=t1
+#         # with initial condition y0. f and jac are user-supplied functions
+#         # that define the problem. f_params,jac_params are additional
+#         # arguments
+#         # to these functions.
+#         
+#         if :
+#             self.success = 0
+#         return t1,y1
+#
+#     # In addition, one can define step() and run_relax() methods (they
+#     # take the same arguments as run()) if the integrator can support
+#     # these features (see IntegratorBase doc strings).
+#
+# if myodeint.runner:
+#     IntegratorBase.integrator_classes.append(myodeint)
+
+__all__ = ['ode', 'complex_ode']
+
+import re
+import warnings
+
+from numpy import asarray, array, zeros, isscalar, real, imag, vstack
+
+from . import _vode
+from . import _dop
+from . import _lsoda
+
+
+_dop_int_dtype = _dop.types.intvar.dtype
+_vode_int_dtype = _vode.types.intvar.dtype
+_lsoda_int_dtype = _lsoda.types.intvar.dtype
+
+
+# ------------------------------------------------------------------------------
+# User interface
+# ------------------------------------------------------------------------------
+
+
+class ode:
+    """
+    A generic interface class to numeric integrators.
+
+    Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
+
+    *Note*: The first two arguments of ``f(t, y, ...)`` are in the
+    opposite order of the arguments in the system definition function used
+    by `scipy.integrate.odeint`.
+
+    Parameters
+    ----------
+    f : callable ``f(t, y, *f_args)``
+        Right-hand side of the differential equation. t is a scalar,
+        ``y.shape == (n,)``.
+        ``f_args`` is set by calling ``set_f_params(*args)``.
+        `f` should return a scalar, array or list (not a tuple).
+    jac : callable ``jac(t, y, *jac_args)``, optional
+        Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
+        ``jac_args`` is set by calling ``set_jac_params(*args)``.
+
+    Attributes
+    ----------
+    t : float
+        Current time.
+    y : ndarray
+        Current variable values.
+
+    See also
+    --------
+    odeint : an integrator with a simpler interface based on lsoda from ODEPACK
+    quad : for finding the area under a curve
+
+    Notes
+    -----
+    Available integrators are listed below. They can be selected using
+    the `set_integrator` method.
+
+    "vode"
+
+        Real-valued Variable-coefficient Ordinary Differential Equation
+        solver, with fixed-leading-coefficient implementation. It provides
+        implicit Adams method (for non-stiff problems) and a method based on
+        backward differentiation formulas (BDF) (for stiff problems).
+
+        Source: http://www.netlib.org/ode/vode.f
+
+        .. warning::
+
+           This integrator is not re-entrant. You cannot have two `ode`
+           instances using the "vode" integrator at the same time.
+
+        This integrator accepts the following parameters in `set_integrator`
+        method of the `ode` class:
+
+        - atol : float or sequence
+          absolute tolerance for solution
+        - rtol : float or sequence
+          relative tolerance for solution
+        - lband : None or int
+        - uband : None or int
+          Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
+          Setting these requires your jac routine to return the jacobian
+          in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
+          dimension of the matrix must be (lband+uband+1, len(y)).
+        - method: 'adams' or 'bdf'
+          Which solver to use, Adams (non-stiff) or BDF (stiff)
+        - with_jacobian : bool
+          This option is only considered when the user has not supplied a
+          Jacobian function and has not indicated (by setting either band)
+          that the Jacobian is banded. In this case, `with_jacobian` specifies
+          whether the iteration method of the ODE solver's correction step is
+          chord iteration with an internally generated full Jacobian or
+          functional iteration with no Jacobian.
+        - nsteps : int
+          Maximum number of (internally defined) steps allowed during one
+          call to the solver.
+        - first_step : float
+        - min_step : float
+        - max_step : float
+          Limits for the step sizes used by the integrator.
+        - order : int
+          Maximum order used by the integrator,
+          order <= 12 for Adams, <= 5 for BDF.
+
+    "zvode"
+
+        Complex-valued Variable-coefficient Ordinary Differential Equation
+        solver, with fixed-leading-coefficient implementation. It provides
+        implicit Adams method (for non-stiff problems) and a method based on
+        backward differentiation formulas (BDF) (for stiff problems).
+
+        Source: http://www.netlib.org/ode/zvode.f
+
+        .. warning::
+
+           This integrator is not re-entrant. You cannot have two `ode`
+           instances using the "zvode" integrator at the same time.
+
+        This integrator accepts the same parameters in `set_integrator`
+        as the "vode" solver.
+
+        .. note::
+
+            When using ZVODE for a stiff system, it should only be used for
+            the case in which the function f is analytic, that is, when each f(i)
+            is an analytic function of each y(j). Analyticity means that the
+            partial derivative df(i)/dy(j) is a unique complex number, and this
+            fact is critical in the way ZVODE solves the dense or banded linear
+            systems that arise in the stiff case. For a complex stiff ODE system
+            in which f is not analytic, ZVODE is likely to have convergence
+            failures, and for this problem one should instead use DVODE on the
+            equivalent real system (in the real and imaginary parts of y).
+
+    "lsoda"
+
+        Real-valued Variable-coefficient Ordinary Differential Equation
+        solver, with fixed-leading-coefficient implementation. It provides
+        automatic method switching between implicit Adams method (for non-stiff
+        problems) and a method based on backward differentiation formulas (BDF)
+        (for stiff problems).
+
+        Source: http://www.netlib.org/odepack
+
+        .. warning::
+
+           This integrator is not re-entrant. You cannot have two `ode`
+           instances using the "lsoda" integrator at the same time.
+
+        This integrator accepts the following parameters in `set_integrator`
+        method of the `ode` class:
+
+        - atol : float or sequence
+          absolute tolerance for solution
+        - rtol : float or sequence
+          relative tolerance for solution
+        - lband : None or int
+        - uband : None or int
+          Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
+          Setting these requires your jac routine to return the jacobian
+          in packed format, jac_packed[i-j+uband, j] = jac[i,j].
+        - with_jacobian : bool
+          *Not used.*
+        - nsteps : int
+          Maximum number of (internally defined) steps allowed during one
+          call to the solver.
+        - first_step : float
+        - min_step : float
+        - max_step : float
+          Limits for the step sizes used by the integrator.
+        - max_order_ns : int
+          Maximum order used in the nonstiff case (default 12).
+        - max_order_s : int
+          Maximum order used in the stiff case (default 5).
+        - max_hnil : int
+          Maximum number of messages reporting too small step size (t + h = t)
+          (default 0)
+        - ixpr : int
+          Whether to generate extra printing at method switches (default False).
+
+    "dopri5"
+
+        This is an explicit runge-kutta method of order (4)5 due to Dormand &
+        Prince (with stepsize control and dense output).
+
+        Authors:
+
+            E. Hairer and G. Wanner
+            Universite de Geneve, Dept. de Mathematiques
+            CH-1211 Geneve 24, Switzerland
+            e-mail:  ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
+
+        This code is described in [HNW93]_.
+
+        This integrator accepts the following parameters in set_integrator()
+        method of the ode class:
+
+        - atol : float or sequence
+          absolute tolerance for solution
+        - rtol : float or sequence
+          relative tolerance for solution
+        - nsteps : int
+          Maximum number of (internally defined) steps allowed during one
+          call to the solver.
+        - first_step : float
+        - max_step : float
+        - safety : float
+          Safety factor on new step selection (default 0.9)
+        - ifactor : float
+        - dfactor : float
+          Maximum factor to increase/decrease step size by in one step
+        - beta : float
+          Beta parameter for stabilised step size control.
+        - verbosity : int
+          Switch for printing messages (< 0 for no messages).
+
+    "dop853"
+
+        This is an explicit runge-kutta method of order 8(5,3) due to Dormand
+        & Prince (with stepsize control and dense output).
+
+        Options and references the same as "dopri5".
+
+    Examples
+    --------
+
+    A problem to integrate and the corresponding jacobian:
+
+    >>> from scipy.integrate import ode
+    >>>
+    >>> y0, t0 = [1.0j, 2.0], 0
+    >>>
+    >>> def f(t, y, arg1):
+    ...     return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
+    >>> def jac(t, y, arg1):
+    ...     return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
+
+    The integration:
+
+    >>> r = ode(f, jac).set_integrator('zvode', method='bdf')
+    >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
+    >>> t1 = 10
+    >>> dt = 1
+    >>> while r.successful() and r.t < t1:
+    ...     print(r.t+dt, r.integrate(r.t+dt))
+    1 [-0.71038232+0.23749653j  0.40000271+0.j        ]
+    2.0 [0.19098503-0.52359246j 0.22222356+0.j        ]
+    3.0 [0.47153208+0.52701229j 0.15384681+0.j        ]
+    4.0 [-0.61905937+0.30726255j  0.11764744+0.j        ]
+    5.0 [0.02340997-0.61418799j 0.09523835+0.j        ]
+    6.0 [0.58643071+0.339819j 0.08000018+0.j      ]
+    7.0 [-0.52070105+0.44525141j  0.06896565+0.j        ]
+    8.0 [-0.15986733-0.61234476j  0.06060616+0.j        ]
+    9.0 [0.64850462+0.15048982j 0.05405414+0.j        ]
+    10.0 [-0.38404699+0.56382299j  0.04878055+0.j        ]
+
+    References
+    ----------
+    .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
+        Differential Equations i. Nonstiff Problems. 2nd edition.
+        Springer Series in Computational Mathematics,
+        Springer-Verlag (1993)
+
+    """
+
+    def __init__(self, f, jac=None):
+        self.stiff = 0
+        self.f = f
+        self.jac = jac
+        self.f_params = ()
+        self.jac_params = ()
+        self._y = []
+
+    @property
+    def y(self):
+        return self._y
+
+    def set_initial_value(self, y, t=0.0):
+        """Set initial conditions y(t) = y."""
+        if isscalar(y):
+            y = [y]
+        n_prev = len(self._y)
+        if not n_prev:
+            self.set_integrator('')  # find first available integrator
+        self._y = asarray(y, self._integrator.scalar)
+        self.t = t
+        self._integrator.reset(len(self._y), self.jac is not None)
+        return self
+
+    def set_integrator(self, name, **integrator_params):
+        """
+        Set integrator by name.
+
+        Parameters
+        ----------
+        name : str
+            Name of the integrator.
+        **integrator_params
+            Additional parameters for the integrator.
+        """
+        integrator = find_integrator(name)
+        if integrator is None:
+            # FIXME: this really should be raise an exception. Will that break
+            # any code?
+            warnings.warn('No integrator name match with %r or is not '
+                          'available.' % name)
+        else:
+            self._integrator = integrator(**integrator_params)
+            if not len(self._y):
+                self.t = 0.0
+                self._y = array([0.0], self._integrator.scalar)
+            self._integrator.reset(len(self._y), self.jac is not None)
+        return self
+
+    def integrate(self, t, step=False, relax=False):
+        """Find y=y(t), set y as an initial condition, and return y.
+
+        Parameters
+        ----------
+        t : float
+            The endpoint of the integration step.
+        step : bool
+            If True, and if the integrator supports the step method,
+            then perform a single integration step and return.
+            This parameter is provided in order to expose internals of
+            the implementation, and should not be changed from its default
+            value in most cases.
+        relax : bool
+            If True and if the integrator supports the run_relax method,
+            then integrate until t_1 >= t and return. ``relax`` is not
+            referenced if ``step=True``.
+            This parameter is provided in order to expose internals of
+            the implementation, and should not be changed from its default
+            value in most cases.
+
+        Returns
+        -------
+        y : float
+            The integrated value at t
+        """
+        if step and self._integrator.supports_step:
+            mth = self._integrator.step
+        elif relax and self._integrator.supports_run_relax:
+            mth = self._integrator.run_relax
+        else:
+            mth = self._integrator.run
+
+        try:
+            self._y, self.t = mth(self.f, self.jac or (lambda: None),
+                                  self._y, self.t, t,
+                                  self.f_params, self.jac_params)
+        except SystemError as e:
+            # f2py issue with tuple returns, see ticket 1187.
+            raise ValueError(
+                'Function to integrate must not return a tuple.'
+            ) from e
+
+        return self._y
+
+    def successful(self):
+        """Check if integration was successful."""
+        try:
+            self._integrator
+        except AttributeError:
+            self.set_integrator('')
+        return self._integrator.success == 1
+
+    def get_return_code(self):
+        """Extracts the return code for the integration to enable better control
+        if the integration fails.
+
+        In general, a return code > 0 implies success, while a return code < 0
+        implies failure.
+
+        Notes
+        -----
+        This section describes possible return codes and their meaning, for available
+        integrators that can be selected by `set_integrator` method.
+
+        "vode"
+
+        ===========  =======
+        Return Code  Message
+        ===========  =======
+        2            Integration successful.
+        -1           Excess work done on this call. (Perhaps wrong MF.)
+        -2           Excess accuracy requested. (Tolerances too small.)
+        -3           Illegal input detected. (See printed message.)
+        -4           Repeated error test failures. (Check all input.)
+        -5           Repeated convergence failures. (Perhaps bad Jacobian
+                     supplied or wrong choice of MF or tolerances.)
+        -6           Error weight became zero during problem. (Solution
+                     component i vanished, and ATOL or ATOL(i) = 0.)
+        ===========  =======
+
+        "zvode"
+
+        ===========  =======
+        Return Code  Message
+        ===========  =======
+        2            Integration successful.
+        -1           Excess work done on this call. (Perhaps wrong MF.)
+        -2           Excess accuracy requested. (Tolerances too small.)
+        -3           Illegal input detected. (See printed message.)
+        -4           Repeated error test failures. (Check all input.)
+        -5           Repeated convergence failures. (Perhaps bad Jacobian
+                     supplied or wrong choice of MF or tolerances.)
+        -6           Error weight became zero during problem. (Solution
+                     component i vanished, and ATOL or ATOL(i) = 0.)
+        ===========  =======
+
+        "dopri5"
+
+        ===========  =======
+        Return Code  Message
+        ===========  =======
+        1            Integration successful.
+        2            Integration successful (interrupted by solout).
+        -1           Input is not consistent.
+        -2           Larger nsteps is needed.
+        -3           Step size becomes too small.
+        -4           Problem is probably stiff (interrupted).
+        ===========  =======
+
+        "dop853"
+
+        ===========  =======
+        Return Code  Message
+        ===========  =======
+        1            Integration successful.
+        2            Integration successful (interrupted by solout).
+        -1           Input is not consistent.
+        -2           Larger nsteps is needed.
+        -3           Step size becomes too small.
+        -4           Problem is probably stiff (interrupted).
+        ===========  =======
+
+        "lsoda"
+
+        ===========  =======
+        Return Code  Message
+        ===========  =======
+        2            Integration successful.
+        -1           Excess work done on this call (perhaps wrong Dfun type).
+        -2           Excess accuracy requested (tolerances too small).
+        -3           Illegal input detected (internal error).
+        -4           Repeated error test failures (internal error).
+        -5           Repeated convergence failures (perhaps bad Jacobian or tolerances).
+        -6           Error weight became zero during problem.
+        -7           Internal workspace insufficient to finish (internal error).
+        ===========  =======
+        """
+        try:
+            self._integrator
+        except AttributeError:
+            self.set_integrator('')
+        return self._integrator.istate
+
+    def set_f_params(self, *args):
+        """Set extra parameters for user-supplied function f."""
+        self.f_params = args
+        return self
+
+    def set_jac_params(self, *args):
+        """Set extra parameters for user-supplied function jac."""
+        self.jac_params = args
+        return self
+
+    def set_solout(self, solout):
+        """
+        Set callable to be called at every successful integration step.
+
+        Parameters
+        ----------
+        solout : callable
+            ``solout(t, y)`` is called at each internal integrator step,
+            t is a scalar providing the current independent position
+            y is the current soloution ``y.shape == (n,)``
+            solout should return -1 to stop integration
+            otherwise it should return None or 0
+
+        """
+        if self._integrator.supports_solout:
+            self._integrator.set_solout(solout)
+            if self._y is not None:
+                self._integrator.reset(len(self._y), self.jac is not None)
+        else:
+            raise ValueError("selected integrator does not support solout,"
+                             " choose another one")
+
+
+def _transform_banded_jac(bjac):
+    """
+    Convert a real matrix of the form (for example)
+
+        [0 0 A B]        [0 0 0 B]
+        [0 0 C D]        [0 0 A D]
+        [E F G H]   to   [0 F C H]
+        [I J K L]        [E J G L]
+                         [I 0 K 0]
+
+    That is, every other column is shifted up one.
+    """
+    # Shift every other column.
+    newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
+    newjac[1:, ::2] = bjac[:, ::2]
+    newjac[:-1, 1::2] = bjac[:, 1::2]
+    return newjac
+
+
+class complex_ode(ode):
+    """
+    A wrapper of ode for complex systems.
+
+    This functions similarly as `ode`, but re-maps a complex-valued
+    equation system to a real-valued one before using the integrators.
+
+    Parameters
+    ----------
+    f : callable ``f(t, y, *f_args)``
+        Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
+        ``f_args`` is set by calling ``set_f_params(*args)``.
+    jac : callable ``jac(t, y, *jac_args)``
+        Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
+        ``jac_args`` is set by calling ``set_f_params(*args)``.
+
+    Attributes
+    ----------
+    t : float
+        Current time.
+    y : ndarray
+        Current variable values.
+
+    Examples
+    --------
+    For usage examples, see `ode`.
+
+    """
+
+    def __init__(self, f, jac=None):
+        self.cf = f
+        self.cjac = jac
+        if jac is None:
+            ode.__init__(self, self._wrap, None)
+        else:
+            ode.__init__(self, self._wrap, self._wrap_jac)
+
+    def _wrap(self, t, y, *f_args):
+        f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
+        # self.tmp is a real-valued array containing the interleaved
+        # real and imaginary parts of f.
+        self.tmp[::2] = real(f)
+        self.tmp[1::2] = imag(f)
+        return self.tmp
+
+    def _wrap_jac(self, t, y, *jac_args):
+        # jac is the complex Jacobian computed by the user-defined function.
+        jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
+
+        # jac_tmp is the real version of the complex Jacobian.  Each complex
+        # entry in jac, say 2+3j, becomes a 2x2 block of the form
+        #     [2 -3]
+        #     [3  2]
+        jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
+        jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
+        jac_tmp[1::2, ::2] = imag(jac)
+        jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
+
+        ml = getattr(self._integrator, 'ml', None)
+        mu = getattr(self._integrator, 'mu', None)
+        if ml is not None or mu is not None:
+            # Jacobian is banded.  The user's Jacobian function has computed
+            # the complex Jacobian in packed format.  The corresponding
+            # real-valued version has every other column shifted up.
+            jac_tmp = _transform_banded_jac(jac_tmp)
+
+        return jac_tmp
+
+    @property
+    def y(self):
+        return self._y[::2] + 1j * self._y[1::2]
+
+    def set_integrator(self, name, **integrator_params):
+        """
+        Set integrator by name.
+
+        Parameters
+        ----------
+        name : str
+            Name of the integrator
+        **integrator_params
+            Additional parameters for the integrator.
+        """
+        if name == 'zvode':
+            raise ValueError("zvode must be used with ode, not complex_ode")
+
+        lband = integrator_params.get('lband')
+        uband = integrator_params.get('uband')
+        if lband is not None or uband is not None:
+            # The Jacobian is banded.  Override the user-supplied bandwidths
+            # (which are for the complex Jacobian) with the bandwidths of
+            # the corresponding real-valued Jacobian wrapper of the complex
+            # Jacobian.
+            integrator_params['lband'] = 2 * (lband or 0) + 1
+            integrator_params['uband'] = 2 * (uband or 0) + 1
+
+        return ode.set_integrator(self, name, **integrator_params)
+
+    def set_initial_value(self, y, t=0.0):
+        """Set initial conditions y(t) = y."""
+        y = asarray(y)
+        self.tmp = zeros(y.size * 2, 'float')
+        self.tmp[::2] = real(y)
+        self.tmp[1::2] = imag(y)
+        return ode.set_initial_value(self, self.tmp, t)
+
+    def integrate(self, t, step=False, relax=False):
+        """Find y=y(t), set y as an initial condition, and return y.
+
+        Parameters
+        ----------
+        t : float
+            The endpoint of the integration step.
+        step : bool
+            If True, and if the integrator supports the step method,
+            then perform a single integration step and return.
+            This parameter is provided in order to expose internals of
+            the implementation, and should not be changed from its default
+            value in most cases.
+        relax : bool
+            If True and if the integrator supports the run_relax method,
+            then integrate until t_1 >= t and return. ``relax`` is not
+            referenced if ``step=True``.
+            This parameter is provided in order to expose internals of
+            the implementation, and should not be changed from its default
+            value in most cases.
+
+        Returns
+        -------
+        y : float
+            The integrated value at t
+        """
+        y = ode.integrate(self, t, step, relax)
+        return y[::2] + 1j * y[1::2]
+
+    def set_solout(self, solout):
+        """
+        Set callable to be called at every successful integration step.
+
+        Parameters
+        ----------
+        solout : callable
+            ``solout(t, y)`` is called at each internal integrator step,
+            t is a scalar providing the current independent position
+            y is the current soloution ``y.shape == (n,)``
+            solout should return -1 to stop integration
+            otherwise it should return None or 0
+
+        """
+        if self._integrator.supports_solout:
+            self._integrator.set_solout(solout, complex=True)
+        else:
+            raise TypeError("selected integrator does not support solouta,"
+                            + "choose another one")
+
+
+# ------------------------------------------------------------------------------
+# ODE integrators
+# ------------------------------------------------------------------------------
+
+def find_integrator(name):
+    for cl in IntegratorBase.integrator_classes:
+        if re.match(name, cl.__name__, re.I):
+            return cl
+    return None
+
+
+class IntegratorConcurrencyError(RuntimeError):
+    """
+    Failure due to concurrent usage of an integrator that can be used
+    only for a single problem at a time.
+
+    """
+
+    def __init__(self, name):
+        msg = ("Integrator `%s` can be used to solve only a single problem "
+               "at a time. If you want to integrate multiple problems, "
+               "consider using a different integrator "
+               "(see `ode.set_integrator`)") % name
+        RuntimeError.__init__(self, msg)
+
+
+class IntegratorBase:
+    runner = None  # runner is None => integrator is not available
+    success = None  # success==1 if integrator was called successfully
+    istate = None  # istate > 0 means success, istate < 0 means failure
+    supports_run_relax = None
+    supports_step = None
+    supports_solout = False
+    integrator_classes = []
+    scalar = float
+
+    def acquire_new_handle(self):
+        # Some of the integrators have internal state (ancient
+        # Fortran...), and so only one instance can use them at a time.
+        # We keep track of this, and fail when concurrent usage is tried.
+        self.__class__.active_global_handle += 1
+        self.handle = self.__class__.active_global_handle
+
+    def check_handle(self):
+        if self.handle is not self.__class__.active_global_handle:
+            raise IntegratorConcurrencyError(self.__class__.__name__)
+
+    def reset(self, n, has_jac):
+        """Prepare integrator for call: allocate memory, set flags, etc.
+        n - number of equations.
+        has_jac - if user has supplied function for evaluating Jacobian.
+        """
+
+    def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+        """Integrate from t=t0 to t=t1 using y0 as an initial condition.
+        Return 2-tuple (y1,t1) where y1 is the result and t=t1
+        defines the stoppage coordinate of the result.
+        """
+        raise NotImplementedError('all integrators must define '
+                                  'run(f, jac, t0, t1, y0, f_params, jac_params)')
+
+    def step(self, f, jac, y0, t0, t1, f_params, jac_params):
+        """Make one integration step and return (y1,t1)."""
+        raise NotImplementedError('%s does not support step() method' %
+                                  self.__class__.__name__)
+
+    def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
+        """Integrate from t=t0 to t>=t1 and return (y1,t)."""
+        raise NotImplementedError('%s does not support run_relax() method' %
+                                  self.__class__.__name__)
+
+    # XXX: __str__ method for getting visual state of the integrator
+
+
+def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
+    """
+    Wrap a banded Jacobian function with a function that pads
+    the Jacobian with `ml` rows of zeros.
+    """
+
+    def jac_wrapper(t, y):
+        jac = asarray(jacfunc(t, y, *jac_params))
+        padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
+        return padded_jac
+
+    return jac_wrapper
+
+
+class vode(IntegratorBase):
+    runner = getattr(_vode, 'dvode', None)
+
+    messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
+                -2: 'Excess accuracy requested. (Tolerances too small.)',
+                -3: 'Illegal input detected. (See printed message.)',
+                -4: 'Repeated error test failures. (Check all input.)',
+                -5: 'Repeated convergence failures. (Perhaps bad'
+                    ' Jacobian supplied or wrong choice of MF or tolerances.)',
+                -6: 'Error weight became zero during problem. (Solution'
+                    ' component i vanished, and ATOL or ATOL(i) = 0.)'
+                }
+    supports_run_relax = 1
+    supports_step = 1
+    active_global_handle = 0
+
+    def __init__(self,
+                 method='adams',
+                 with_jacobian=False,
+                 rtol=1e-6, atol=1e-12,
+                 lband=None, uband=None,
+                 order=12,
+                 nsteps=500,
+                 max_step=0.0,  # corresponds to infinite
+                 min_step=0.0,
+                 first_step=0.0,  # determined by solver
+                 ):
+
+        if re.match(method, r'adams', re.I):
+            self.meth = 1
+        elif re.match(method, r'bdf', re.I):
+            self.meth = 2
+        else:
+            raise ValueError('Unknown integration method %s' % method)
+        self.with_jacobian = with_jacobian
+        self.rtol = rtol
+        self.atol = atol
+        self.mu = uband
+        self.ml = lband
+
+        self.order = order
+        self.nsteps = nsteps
+        self.max_step = max_step
+        self.min_step = min_step
+        self.first_step = first_step
+        self.success = 1
+
+        self.initialized = False
+
+    def _determine_mf_and_set_bands(self, has_jac):
+        """
+        Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
+
+        In the Fortran code, the legal values of `MF` are:
+            10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
+            -11, -12, -14, -15, -21, -22, -24, -25
+        but this Python wrapper does not use negative values.
+
+        Returns
+
+            mf  = 10*self.meth + miter
+
+        self.meth is the linear multistep method:
+            self.meth == 1:  method="adams"
+            self.meth == 2:  method="bdf"
+
+        miter is the correction iteration method:
+            miter == 0:  Functional iteraton; no Jacobian involved.
+            miter == 1:  Chord iteration with user-supplied full Jacobian.
+            miter == 2:  Chord iteration with internally computed full Jacobian.
+            miter == 3:  Chord iteration with internally computed diagonal Jacobian.
+            miter == 4:  Chord iteration with user-supplied banded Jacobian.
+            miter == 5:  Chord iteration with internally computed banded Jacobian.
+
+        Side effects: If either self.mu or self.ml is not None and the other is None,
+        then the one that is None is set to 0.
+        """
+
+        jac_is_banded = self.mu is not None or self.ml is not None
+        if jac_is_banded:
+            if self.mu is None:
+                self.mu = 0
+            if self.ml is None:
+                self.ml = 0
+
+        # has_jac is True if the user provided a Jacobian function.
+        if has_jac:
+            if jac_is_banded:
+                miter = 4
+            else:
+                miter = 1
+        else:
+            if jac_is_banded:
+                if self.ml == self.mu == 0:
+                    miter = 3  # Chord iteration with internal diagonal Jacobian.
+                else:
+                    miter = 5  # Chord iteration with internal banded Jacobian.
+            else:
+                # self.with_jacobian is set by the user in the call to ode.set_integrator.
+                if self.with_jacobian:
+                    miter = 2  # Chord iteration with internal full Jacobian.
+                else:
+                    miter = 0  # Functional iteraton; no Jacobian involved.
+
+        mf = 10 * self.meth + miter
+        return mf
+
+    def reset(self, n, has_jac):
+        mf = self._determine_mf_and_set_bands(has_jac)
+
+        if mf == 10:
+            lrw = 20 + 16 * n
+        elif mf in [11, 12]:
+            lrw = 22 + 16 * n + 2 * n * n
+        elif mf == 13:
+            lrw = 22 + 17 * n
+        elif mf in [14, 15]:
+            lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
+        elif mf == 20:
+            lrw = 20 + 9 * n
+        elif mf in [21, 22]:
+            lrw = 22 + 9 * n + 2 * n * n
+        elif mf == 23:
+            lrw = 22 + 10 * n
+        elif mf in [24, 25]:
+            lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
+        else:
+            raise ValueError('Unexpected mf=%s' % mf)
+
+        if mf % 10 in [0, 3]:
+            liw = 30
+        else:
+            liw = 30 + n
+
+        rwork = zeros((lrw,), float)
+        rwork[4] = self.first_step
+        rwork[5] = self.max_step
+        rwork[6] = self.min_step
+        self.rwork = rwork
+
+        iwork = zeros((liw,), _vode_int_dtype)
+        if self.ml is not None:
+            iwork[0] = self.ml
+        if self.mu is not None:
+            iwork[1] = self.mu
+        iwork[4] = self.order
+        iwork[5] = self.nsteps
+        iwork[6] = 2  # mxhnil
+        self.iwork = iwork
+
+        self.call_args = [self.rtol, self.atol, 1, 1,
+                          self.rwork, self.iwork, mf]
+        self.success = 1
+        self.initialized = False
+
+    def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+        if self.initialized:
+            self.check_handle()
+        else:
+            self.initialized = True
+            self.acquire_new_handle()
+
+        if self.ml is not None and self.ml > 0:
+            # Banded Jacobian. Wrap the user-provided function with one
+            # that pads the Jacobian array with the extra `self.ml` rows
+            # required by the f2py-generated wrapper.
+            jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
+
+        args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
+                (f_params, jac_params))
+        y1, t, istate = self.runner(*args)
+        self.istate = istate
+        if istate < 0:
+            unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
+            warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
+                          self.messages.get(istate, unexpected_istate_msg)))
+            self.success = 0
+        else:
+            self.call_args[3] = 2  # upgrade istate from 1 to 2
+            self.istate = 2
+        return y1, t
+
+    def step(self, *args):
+        itask = self.call_args[2]
+        self.call_args[2] = 2
+        r = self.run(*args)
+        self.call_args[2] = itask
+        return r
+
+    def run_relax(self, *args):
+        itask = self.call_args[2]
+        self.call_args[2] = 3
+        r = self.run(*args)
+        self.call_args[2] = itask
+        return r
+
+
+if vode.runner is not None:
+    IntegratorBase.integrator_classes.append(vode)
+
+
+class zvode(vode):
+    runner = getattr(_vode, 'zvode', None)
+
+    supports_run_relax = 1
+    supports_step = 1
+    scalar = complex
+    active_global_handle = 0
+
+    def reset(self, n, has_jac):
+        mf = self._determine_mf_and_set_bands(has_jac)
+
+        if mf in (10,):
+            lzw = 15 * n
+        elif mf in (11, 12):
+            lzw = 15 * n + 2 * n ** 2
+        elif mf in (-11, -12):
+            lzw = 15 * n + n ** 2
+        elif mf in (13,):
+            lzw = 16 * n
+        elif mf in (14, 15):
+            lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
+        elif mf in (-14, -15):
+            lzw = 16 * n + (2 * self.ml + self.mu) * n
+        elif mf in (20,):
+            lzw = 8 * n
+        elif mf in (21, 22):
+            lzw = 8 * n + 2 * n ** 2
+        elif mf in (-21, -22):
+            lzw = 8 * n + n ** 2
+        elif mf in (23,):
+            lzw = 9 * n
+        elif mf in (24, 25):
+            lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
+        elif mf in (-24, -25):
+            lzw = 9 * n + (2 * self.ml + self.mu) * n
+
+        lrw = 20 + n
+
+        if mf % 10 in (0, 3):
+            liw = 30
+        else:
+            liw = 30 + n
+
+        zwork = zeros((lzw,), complex)
+        self.zwork = zwork
+
+        rwork = zeros((lrw,), float)
+        rwork[4] = self.first_step
+        rwork[5] = self.max_step
+        rwork[6] = self.min_step
+        self.rwork = rwork
+
+        iwork = zeros((liw,), _vode_int_dtype)
+        if self.ml is not None:
+            iwork[0] = self.ml
+        if self.mu is not None:
+            iwork[1] = self.mu
+        iwork[4] = self.order
+        iwork[5] = self.nsteps
+        iwork[6] = 2  # mxhnil
+        self.iwork = iwork
+
+        self.call_args = [self.rtol, self.atol, 1, 1,
+                          self.zwork, self.rwork, self.iwork, mf]
+        self.success = 1
+        self.initialized = False
+
+
+if zvode.runner is not None:
+    IntegratorBase.integrator_classes.append(zvode)
+
+
+class dopri5(IntegratorBase):
+    runner = getattr(_dop, 'dopri5', None)
+    name = 'dopri5'
+    supports_solout = True
+
+    messages = {1: 'computation successful',
+                2: 'computation successful (interrupted by solout)',
+                -1: 'input is not consistent',
+                -2: 'larger nsteps is needed',
+                -3: 'step size becomes too small',
+                -4: 'problem is probably stiff (interrupted)',
+                }
+
+    def __init__(self,
+                 rtol=1e-6, atol=1e-12,
+                 nsteps=500,
+                 max_step=0.0,
+                 first_step=0.0,  # determined by solver
+                 safety=0.9,
+                 ifactor=10.0,
+                 dfactor=0.2,
+                 beta=0.0,
+                 method=None,
+                 verbosity=-1,  # no messages if negative
+                 ):
+        self.rtol = rtol
+        self.atol = atol
+        self.nsteps = nsteps
+        self.max_step = max_step
+        self.first_step = first_step
+        self.safety = safety
+        self.ifactor = ifactor
+        self.dfactor = dfactor
+        self.beta = beta
+        self.verbosity = verbosity
+        self.success = 1
+        self.set_solout(None)
+
+    def set_solout(self, solout, complex=False):
+        self.solout = solout
+        self.solout_cmplx = complex
+        if solout is None:
+            self.iout = 0
+        else:
+            self.iout = 1
+
+    def reset(self, n, has_jac):
+        work = zeros((8 * n + 21,), float)
+        work[1] = self.safety
+        work[2] = self.dfactor
+        work[3] = self.ifactor
+        work[4] = self.beta
+        work[5] = self.max_step
+        work[6] = self.first_step
+        self.work = work
+        iwork = zeros((21,), _dop_int_dtype)
+        iwork[0] = self.nsteps
+        iwork[2] = self.verbosity
+        self.iwork = iwork
+        self.call_args = [self.rtol, self.atol, self._solout,
+                          self.iout, self.work, self.iwork]
+        self.success = 1
+
+    def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+        x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
+                                          tuple(self.call_args) + (f_params,)))
+        self.istate = istate
+        if istate < 0:
+            unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
+            warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
+                          self.messages.get(istate, unexpected_istate_msg)))
+            self.success = 0
+        return y, x
+
+    def _solout(self, nr, xold, x, y, nd, icomp, con):
+        if self.solout is not None:
+            if self.solout_cmplx:
+                y = y[::2] + 1j * y[1::2]
+            return self.solout(x, y)
+        else:
+            return 1
+
+
+if dopri5.runner is not None:
+    IntegratorBase.integrator_classes.append(dopri5)
+
+
+class dop853(dopri5):
+    runner = getattr(_dop, 'dop853', None)
+    name = 'dop853'
+
+    def __init__(self,
+                 rtol=1e-6, atol=1e-12,
+                 nsteps=500,
+                 max_step=0.0,
+                 first_step=0.0,  # determined by solver
+                 safety=0.9,
+                 ifactor=6.0,
+                 dfactor=0.3,
+                 beta=0.0,
+                 method=None,
+                 verbosity=-1,  # no messages if negative
+                 ):
+        super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
+                         ifactor, dfactor, beta, method, verbosity)
+
+    def reset(self, n, has_jac):
+        work = zeros((11 * n + 21,), float)
+        work[1] = self.safety
+        work[2] = self.dfactor
+        work[3] = self.ifactor
+        work[4] = self.beta
+        work[5] = self.max_step
+        work[6] = self.first_step
+        self.work = work
+        iwork = zeros((21,), _dop_int_dtype)
+        iwork[0] = self.nsteps
+        iwork[2] = self.verbosity
+        self.iwork = iwork
+        self.call_args = [self.rtol, self.atol, self._solout,
+                          self.iout, self.work, self.iwork]
+        self.success = 1
+
+
+if dop853.runner is not None:
+    IntegratorBase.integrator_classes.append(dop853)
+
+
+class lsoda(IntegratorBase):
+    runner = getattr(_lsoda, 'lsoda', None)
+    active_global_handle = 0
+
+    messages = {
+        2: "Integration successful.",
+        -1: "Excess work done on this call (perhaps wrong Dfun type).",
+        -2: "Excess accuracy requested (tolerances too small).",
+        -3: "Illegal input detected (internal error).",
+        -4: "Repeated error test failures (internal error).",
+        -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
+        -6: "Error weight became zero during problem.",
+        -7: "Internal workspace insufficient to finish (internal error)."
+    }
+
+    def __init__(self,
+                 with_jacobian=False,
+                 rtol=1e-6, atol=1e-12,
+                 lband=None, uband=None,
+                 nsteps=500,
+                 max_step=0.0,  # corresponds to infinite
+                 min_step=0.0,
+                 first_step=0.0,  # determined by solver
+                 ixpr=0,
+                 max_hnil=0,
+                 max_order_ns=12,
+                 max_order_s=5,
+                 method=None
+                 ):
+
+        self.with_jacobian = with_jacobian
+        self.rtol = rtol
+        self.atol = atol
+        self.mu = uband
+        self.ml = lband
+
+        self.max_order_ns = max_order_ns
+        self.max_order_s = max_order_s
+        self.nsteps = nsteps
+        self.max_step = max_step
+        self.min_step = min_step
+        self.first_step = first_step
+        self.ixpr = ixpr
+        self.max_hnil = max_hnil
+        self.success = 1
+
+        self.initialized = False
+
+    def reset(self, n, has_jac):
+        # Calculate parameters for Fortran subroutine dvode.
+        if has_jac:
+            if self.mu is None and self.ml is None:
+                jt = 1
+            else:
+                if self.mu is None:
+                    self.mu = 0
+                if self.ml is None:
+                    self.ml = 0
+                jt = 4
+        else:
+            if self.mu is None and self.ml is None:
+                jt = 2
+            else:
+                if self.mu is None:
+                    self.mu = 0
+                if self.ml is None:
+                    self.ml = 0
+                jt = 5
+        lrn = 20 + (self.max_order_ns + 4) * n
+        if jt in [1, 2]:
+            lrs = 22 + (self.max_order_s + 4) * n + n * n
+        elif jt in [4, 5]:
+            lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
+        else:
+            raise ValueError('Unexpected jt=%s' % jt)
+        lrw = max(lrn, lrs)
+        liw = 20 + n
+        rwork = zeros((lrw,), float)
+        rwork[4] = self.first_step
+        rwork[5] = self.max_step
+        rwork[6] = self.min_step
+        self.rwork = rwork
+        iwork = zeros((liw,), _lsoda_int_dtype)
+        if self.ml is not None:
+            iwork[0] = self.ml
+        if self.mu is not None:
+            iwork[1] = self.mu
+        iwork[4] = self.ixpr
+        iwork[5] = self.nsteps
+        iwork[6] = self.max_hnil
+        iwork[7] = self.max_order_ns
+        iwork[8] = self.max_order_s
+        self.iwork = iwork
+        self.call_args = [self.rtol, self.atol, 1, 1,
+                          self.rwork, self.iwork, jt]
+        self.success = 1
+        self.initialized = False
+
+    def run(self, f, jac, y0, t0, t1, f_params, jac_params):
+        if self.initialized:
+            self.check_handle()
+        else:
+            self.initialized = True
+            self.acquire_new_handle()
+        args = [f, y0, t0, t1] + self.call_args[:-1] + \
+               [jac, self.call_args[-1], f_params, 0, jac_params]
+        y1, t, istate = self.runner(*args)
+        self.istate = istate
+        if istate < 0:
+            unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
+            warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
+                          self.messages.get(istate, unexpected_istate_msg)))
+            self.success = 0
+        else:
+            self.call_args[3] = 2  # upgrade istate from 1 to 2
+            self.istate = 2
+        return y1, t
+
+    def step(self, *args):
+        itask = self.call_args[2]
+        self.call_args[2] = 2
+        r = self.run(*args)
+        self.call_args[2] = itask
+        return r
+
+    def run_relax(self, *args):
+        itask = self.call_args[2]
+        self.call_args[2] = 3
+        r = self.run(*args)
+        self.call_args[2] = itask
+        return r
+
+
+if lsoda.runner:
+    IntegratorBase.integrator_classes.append(lsoda)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_odepack_py.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_odepack_py.py
new file mode 100644
index 00000000..bfb806aa
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_odepack_py.py
@@ -0,0 +1,260 @@
+# Author: Travis Oliphant
+
+__all__ = ['odeint']
+
+import numpy as np
+from . import _odepack
+from copy import copy
+import warnings
+
+
+class ODEintWarning(Warning):
+    pass
+
+
+_msgs = {2: "Integration successful.",
+         1: "Nothing was done; the integration time was 0.",
+         -1: "Excess work done on this call (perhaps wrong Dfun type).",
+         -2: "Excess accuracy requested (tolerances too small).",
+         -3: "Illegal input detected (internal error).",
+         -4: "Repeated error test failures (internal error).",
+         -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
+         -6: "Error weight became zero during problem.",
+         -7: "Internal workspace insufficient to finish (internal error).",
+         -8: "Run terminated (internal error)."
+         }
+
+
+def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
+           ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
+           hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
+           mxords=5, printmessg=0, tfirst=False):
+    """
+    Integrate a system of ordinary differential equations.
+
+    .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
+              differential equation.
+
+    Solve a system of ordinary differential equations using lsoda from the
+    FORTRAN library odepack.
+
+    Solves the initial value problem for stiff or non-stiff systems
+    of first order ode-s::
+
+        dy/dt = func(y, t, ...)  [or func(t, y, ...)]
+
+    where y can be a vector.
+
+    .. note:: By default, the required order of the first two arguments of
+              `func` are in the opposite order of the arguments in the system
+              definition function used by the `scipy.integrate.ode` class and
+              the function `scipy.integrate.solve_ivp`. To use a function with
+              the signature ``func(t, y, ...)``, the argument `tfirst` must be
+              set to ``True``.
+
+    Parameters
+    ----------
+    func : callable(y, t, ...) or callable(t, y, ...)
+        Computes the derivative of y at t.
+        If the signature is ``callable(t, y, ...)``, then the argument
+        `tfirst` must be set ``True``.
+    y0 : array
+        Initial condition on y (can be a vector).
+    t : array
+        A sequence of time points for which to solve for y. The initial
+        value point should be the first element of this sequence.
+        This sequence must be monotonically increasing or monotonically
+        decreasing; repeated values are allowed.
+    args : tuple, optional
+        Extra arguments to pass to function.
+    Dfun : callable(y, t, ...) or callable(t, y, ...)
+        Gradient (Jacobian) of `func`.
+        If the signature is ``callable(t, y, ...)``, then the argument
+        `tfirst` must be set ``True``.
+    col_deriv : bool, optional
+        True if `Dfun` defines derivatives down columns (faster),
+        otherwise `Dfun` should define derivatives across rows.
+    full_output : bool, optional
+        True if to return a dictionary of optional outputs as the second output
+    printmessg : bool, optional
+        Whether to print the convergence message
+    tfirst : bool, optional
+        If True, the first two arguments of `func` (and `Dfun`, if given)
+        must ``t, y`` instead of the default ``y, t``.
+
+        .. versionadded:: 1.1.0
+
+    Returns
+    -------
+    y : array, shape (len(t), len(y0))
+        Array containing the value of y for each desired time in t,
+        with the initial value `y0` in the first row.
+    infodict : dict, only returned if full_output == True
+        Dictionary containing additional output information
+
+        =======  ============================================================
+        key      meaning
+        =======  ============================================================
+        'hu'     vector of step sizes successfully used for each time step
+        'tcur'   vector with the value of t reached for each time step
+                 (will always be at least as large as the input times)
+        'tolsf'  vector of tolerance scale factors, greater than 1.0,
+                 computed when a request for too much accuracy was detected
+        'tsw'    value of t at the time of the last method switch
+                 (given for each time step)
+        'nst'    cumulative number of time steps
+        'nfe'    cumulative number of function evaluations for each time step
+        'nje'    cumulative number of jacobian evaluations for each time step
+        'nqu'    a vector of method orders for each successful step
+        'imxer'  index of the component of largest magnitude in the
+                 weighted local error vector (e / ewt) on an error return, -1
+                 otherwise
+        'lenrw'  the length of the double work array required
+        'leniw'  the length of integer work array required
+        'mused'  a vector of method indicators for each successful time step:
+                 1: adams (nonstiff), 2: bdf (stiff)
+        =======  ============================================================
+
+    Other Parameters
+    ----------------
+    ml, mu : int, optional
+        If either of these are not None or non-negative, then the
+        Jacobian is assumed to be banded. These give the number of
+        lower and upper non-zero diagonals in this banded matrix.
+        For the banded case, `Dfun` should return a matrix whose
+        rows contain the non-zero bands (starting with the lowest diagonal).
+        Thus, the return matrix `jac` from `Dfun` should have shape
+        ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
+        The data in `jac` must be stored such that ``jac[i - j + mu, j]``
+        holds the derivative of the `i`th equation with respect to the `j`th
+        state variable.  If `col_deriv` is True, the transpose of this
+        `jac` must be returned.
+    rtol, atol : float, optional
+        The input parameters `rtol` and `atol` determine the error
+        control performed by the solver.  The solver will control the
+        vector, e, of estimated local errors in y, according to an
+        inequality of the form ``max-norm of (e / ewt) <= 1``,
+        where ewt is a vector of positive error weights computed as
+        ``ewt = rtol * abs(y) + atol``.
+        rtol and atol can be either vectors the same length as y or scalars.
+        Defaults to 1.49012e-8.
+    tcrit : ndarray, optional
+        Vector of critical points (e.g., singularities) where integration
+        care should be taken.
+    h0 : float, (0: solver-determined), optional
+        The step size to be attempted on the first step.
+    hmax : float, (0: solver-determined), optional
+        The maximum absolute step size allowed.
+    hmin : float, (0: solver-determined), optional
+        The minimum absolute step size allowed.
+    ixpr : bool, optional
+        Whether to generate extra printing at method switches.
+    mxstep : int, (0: solver-determined), optional
+        Maximum number of (internally defined) steps allowed for each
+        integration point in t.
+    mxhnil : int, (0: solver-determined), optional
+        Maximum number of messages printed.
+    mxordn : int, (0: solver-determined), optional
+        Maximum order to be allowed for the non-stiff (Adams) method.
+    mxords : int, (0: solver-determined), optional
+        Maximum order to be allowed for the stiff (BDF) method.
+
+    See Also
+    --------
+    solve_ivp : solve an initial value problem for a system of ODEs
+    ode : a more object-oriented integrator based on VODE
+    quad : for finding the area under a curve
+
+    Examples
+    --------
+    The second order differential equation for the angle `theta` of a
+    pendulum acted on by gravity with friction can be written::
+
+        theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
+
+    where `b` and `c` are positive constants, and a prime (') denotes a
+    derivative. To solve this equation with `odeint`, we must first convert
+    it to a system of first order equations. By defining the angular
+    velocity ``omega(t) = theta'(t)``, we obtain the system::
+
+        theta'(t) = omega(t)
+        omega'(t) = -b*omega(t) - c*sin(theta(t))
+
+    Let `y` be the vector [`theta`, `omega`]. We implement this system
+    in Python as:
+
+    >>> import numpy as np
+    >>> def pend(y, t, b, c):
+    ...     theta, omega = y
+    ...     dydt = [omega, -b*omega - c*np.sin(theta)]
+    ...     return dydt
+    ...
+
+    We assume the constants are `b` = 0.25 and `c` = 5.0:
+
+    >>> b = 0.25
+    >>> c = 5.0
+
+    For initial conditions, we assume the pendulum is nearly vertical
+    with `theta(0)` = `pi` - 0.1, and is initially at rest, so
+    `omega(0)` = 0.  Then the vector of initial conditions is
+
+    >>> y0 = [np.pi - 0.1, 0.0]
+
+    We will generate a solution at 101 evenly spaced samples in the interval
+    0 <= `t` <= 10.  So our array of times is:
+
+    >>> t = np.linspace(0, 10, 101)
+
+    Call `odeint` to generate the solution. To pass the parameters
+    `b` and `c` to `pend`, we give them to `odeint` using the `args`
+    argument.
+
+    >>> from scipy.integrate import odeint
+    >>> sol = odeint(pend, y0, t, args=(b, c))
+
+    The solution is an array with shape (101, 2). The first column
+    is `theta(t)`, and the second is `omega(t)`. The following code
+    plots both components.
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
+    >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
+    >>> plt.legend(loc='best')
+    >>> plt.xlabel('t')
+    >>> plt.grid()
+    >>> plt.show()
+    """
+
+    if ml is None:
+        ml = -1  # changed to zero inside function call
+    if mu is None:
+        mu = -1  # changed to zero inside function call
+
+    dt = np.diff(t)
+    if not ((dt >= 0).all() or (dt <= 0).all()):
+        raise ValueError("The values in t must be monotonically increasing "
+                         "or monotonically decreasing; repeated values are "
+                         "allowed.")
+
+    t = copy(t)
+    y0 = copy(y0)
+    output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
+                             full_output, rtol, atol, tcrit, h0, hmax, hmin,
+                             ixpr, mxstep, mxhnil, mxordn, mxords,
+                             int(bool(tfirst)))
+    if output[-1] < 0:
+        warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
+        warnings.warn(warning_msg, ODEintWarning)
+    elif printmessg:
+        warning_msg = _msgs[output[-1]]
+        warnings.warn(warning_msg, ODEintWarning)
+
+    if full_output:
+        output[1]['message'] = _msgs[output[-1]]
+
+    output = output[:-1]
+    if len(output) == 1:
+        return output[0]
+    else:
+        return output
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_quad_vec.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_quad_vec.py
new file mode 100644
index 00000000..fac8193d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_quad_vec.py
@@ -0,0 +1,653 @@
+import sys
+import copy
+import heapq
+import collections
+import functools
+
+import numpy as np
+
+from scipy._lib._util import MapWrapper, _FunctionWrapper
+
+
+class LRUDict(collections.OrderedDict):
+    def __init__(self, max_size):
+        self.__max_size = max_size
+
+    def __setitem__(self, key, value):
+        existing_key = (key in self)
+        super().__setitem__(key, value)
+        if existing_key:
+            self.move_to_end(key)
+        elif len(self) > self.__max_size:
+            self.popitem(last=False)
+
+    def update(self, other):
+        # Not needed below
+        raise NotImplementedError()
+
+
+class SemiInfiniteFunc:
+    """
+    Argument transform from (start, +-oo) to (0, 1)
+    """
+    def __init__(self, func, start, infty):
+        self._func = func
+        self._start = start
+        self._sgn = -1 if infty < 0 else 1
+
+        # Overflow threshold for the 1/t**2 factor
+        self._tmin = sys.float_info.min**0.5
+
+    def get_t(self, x):
+        z = self._sgn * (x - self._start) + 1
+        if z == 0:
+            # Can happen only if point not in range
+            return np.inf
+        return 1 / z
+
+    def __call__(self, t):
+        if t < self._tmin:
+            return 0.0
+        else:
+            x = self._start + self._sgn * (1 - t) / t
+            f = self._func(x)
+            return self._sgn * (f / t) / t
+
+
+class DoubleInfiniteFunc:
+    """
+    Argument transform from (-oo, oo) to (-1, 1)
+    """
+    def __init__(self, func):
+        self._func = func
+
+        # Overflow threshold for the 1/t**2 factor
+        self._tmin = sys.float_info.min**0.5
+
+    def get_t(self, x):
+        s = -1 if x < 0 else 1
+        return s / (abs(x) + 1)
+
+    def __call__(self, t):
+        if abs(t) < self._tmin:
+            return 0.0
+        else:
+            x = (1 - abs(t)) / t
+            f = self._func(x)
+            return (f / t) / t
+
+
+def _max_norm(x):
+    return np.amax(abs(x))
+
+
+def _get_sizeof(obj):
+    try:
+        return sys.getsizeof(obj)
+    except TypeError:
+        # occurs on pypy
+        if hasattr(obj, '__sizeof__'):
+            return int(obj.__sizeof__())
+        return 64
+
+
+class _Bunch:
+    def __init__(self, **kwargs):
+        self.__keys = kwargs.keys()
+        self.__dict__.update(**kwargs)
+
+    def __repr__(self):
+        return "_Bunch({})".format(", ".join("{}={}".format(k, repr(self.__dict__[k]))
+                                             for k in self.__keys))
+
+
+def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
+             workers=1, points=None, quadrature=None, full_output=False,
+             *, args=()):
+    r"""Adaptive integration of a vector-valued function.
+
+    Parameters
+    ----------
+    f : callable
+        Vector-valued function f(x) to integrate.
+    a : float
+        Initial point.
+    b : float
+        Final point.
+    epsabs : float, optional
+        Absolute tolerance.
+    epsrel : float, optional
+        Relative tolerance.
+    norm : {'max', '2'}, optional
+        Vector norm to use for error estimation.
+    cache_size : int, optional
+        Number of bytes to use for memoization.
+    limit : float or int, optional
+        An upper bound on the number of subintervals used in the adaptive
+        algorithm.
+    workers : int or map-like callable, optional
+        If `workers` is an integer, part of the computation is done in
+        parallel subdivided to this many tasks (using
+        :class:`python:multiprocessing.pool.Pool`).
+        Supply `-1` to use all cores available to the Process.
+        Alternatively, supply a map-like callable, such as
+        :meth:`python:multiprocessing.pool.Pool.map` for evaluating the
+        population in parallel.
+        This evaluation is carried out as ``workers(func, iterable)``.
+    points : list, optional
+        List of additional breakpoints.
+    quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
+        Quadrature rule to use on subintervals.
+        Options: 'gk21' (Gauss-Kronrod 21-point rule),
+        'gk15' (Gauss-Kronrod 15-point rule),
+        'trapezoid' (composite trapezoid rule).
+        Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
+    full_output : bool, optional
+        Return an additional ``info`` dictionary.
+    args : tuple, optional
+        Extra arguments to pass to function, if any.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    res : {float, array-like}
+        Estimate for the result
+    err : float
+        Error estimate for the result in the given norm
+    info : dict
+        Returned only when ``full_output=True``.
+        Info dictionary. Is an object with the attributes:
+
+            success : bool
+                Whether integration reached target precision.
+            status : int
+                Indicator for convergence, success (0),
+                failure (1), and failure due to rounding error (2).
+            neval : int
+                Number of function evaluations.
+            intervals : ndarray, shape (num_intervals, 2)
+                Start and end points of subdivision intervals.
+            integrals : ndarray, shape (num_intervals, ...)
+                Integral for each interval.
+                Note that at most ``cache_size`` values are recorded,
+                and the array may contains *nan* for missing items.
+            errors : ndarray, shape (num_intervals,)
+                Estimated integration error for each interval.
+
+    Notes
+    -----
+    The algorithm mainly follows the implementation of QUADPACK's
+    DQAG* algorithms, implementing global error control and adaptive
+    subdivision.
+
+    The algorithm here has some differences to the QUADPACK approach:
+
+    Instead of subdividing one interval at a time, the algorithm
+    subdivides N intervals with largest errors at once. This enables
+    (partial) parallelization of the integration.
+
+    The logic of subdividing "next largest" intervals first is then
+    not implemented, and we rely on the above extension to avoid
+    concentrating on "small" intervals only.
+
+    The Wynn epsilon table extrapolation is not used (QUADPACK uses it
+    for infinite intervals). This is because the algorithm here is
+    supposed to work on vector-valued functions, in an user-specified
+    norm, and the extension of the epsilon algorithm to this case does
+    not appear to be widely agreed. For max-norm, using elementwise
+    Wynn epsilon could be possible, but we do not do this here with
+    the hope that the epsilon extrapolation is mainly useful in
+    special cases.
+
+    References
+    ----------
+    [1] R. Piessens, E. de Doncker, QUADPACK (1983).
+
+    Examples
+    --------
+    We can compute integrations of a vector-valued function:
+
+    >>> from scipy.integrate import quad_vec
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> alpha = np.linspace(0.0, 2.0, num=30)
+    >>> f = lambda x: x**alpha
+    >>> x0, x1 = 0, 2
+    >>> y, err = quad_vec(f, x0, x1)
+    >>> plt.plot(alpha, y)
+    >>> plt.xlabel(r"$\alpha$")
+    >>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
+    >>> plt.show()
+
+    """
+    a = float(a)
+    b = float(b)
+
+    if args:
+        if not isinstance(args, tuple):
+            args = (args,)
+
+        # create a wrapped function to allow the use of map and Pool.map
+        f = _FunctionWrapper(f, args)
+
+    # Use simple transformations to deal with integrals over infinite
+    # intervals.
+    kwargs = dict(epsabs=epsabs,
+                  epsrel=epsrel,
+                  norm=norm,
+                  cache_size=cache_size,
+                  limit=limit,
+                  workers=workers,
+                  points=points,
+                  quadrature='gk15' if quadrature is None else quadrature,
+                  full_output=full_output)
+    if np.isfinite(a) and np.isinf(b):
+        f2 = SemiInfiniteFunc(f, start=a, infty=b)
+        if points is not None:
+            kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
+        return quad_vec(f2, 0, 1, **kwargs)
+    elif np.isfinite(b) and np.isinf(a):
+        f2 = SemiInfiniteFunc(f, start=b, infty=a)
+        if points is not None:
+            kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
+        res = quad_vec(f2, 0, 1, **kwargs)
+        return (-res[0],) + res[1:]
+    elif np.isinf(a) and np.isinf(b):
+        sgn = -1 if b < a else 1
+
+        # NB. explicitly split integral at t=0, which separates
+        # the positive and negative sides
+        f2 = DoubleInfiniteFunc(f)
+        if points is not None:
+            kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
+        else:
+            kwargs['points'] = (0,)
+
+        if a != b:
+            res = quad_vec(f2, -1, 1, **kwargs)
+        else:
+            res = quad_vec(f2, 1, 1, **kwargs)
+
+        return (res[0]*sgn,) + res[1:]
+    elif not (np.isfinite(a) and np.isfinite(b)):
+        raise ValueError("invalid integration bounds a={}, b={}".format(a, b))
+
+    norm_funcs = {
+        None: _max_norm,
+        'max': _max_norm,
+        '2': np.linalg.norm
+    }
+    if callable(norm):
+        norm_func = norm
+    else:
+        norm_func = norm_funcs[norm]
+
+    parallel_count = 128
+    min_intervals = 2
+
+    try:
+        _quadrature = {None: _quadrature_gk21,
+                       'gk21': _quadrature_gk21,
+                       'gk15': _quadrature_gk15,
+                       'trapz': _quadrature_trapezoid,  # alias for backcompat
+                       'trapezoid': _quadrature_trapezoid}[quadrature]
+    except KeyError as e:
+        raise ValueError("unknown quadrature {!r}".format(quadrature)) from e
+
+    # Initial interval set
+    if points is None:
+        initial_intervals = [(a, b)]
+    else:
+        prev = a
+        initial_intervals = []
+        for p in sorted(points):
+            p = float(p)
+            if not (a < p < b) or p == prev:
+                continue
+            initial_intervals.append((prev, p))
+            prev = p
+        initial_intervals.append((prev, b))
+
+    global_integral = None
+    global_error = None
+    rounding_error = None
+    interval_cache = None
+    intervals = []
+    neval = 0
+
+    for x1, x2 in initial_intervals:
+        ig, err, rnd = _quadrature(x1, x2, f, norm_func)
+        neval += _quadrature.num_eval
+
+        if global_integral is None:
+            if isinstance(ig, (float, complex)):
+                # Specialize for scalars
+                if norm_func in (_max_norm, np.linalg.norm):
+                    norm_func = abs
+
+            global_integral = ig
+            global_error = float(err)
+            rounding_error = float(rnd)
+
+            cache_count = cache_size // _get_sizeof(ig)
+            interval_cache = LRUDict(cache_count)
+        else:
+            global_integral += ig
+            global_error += err
+            rounding_error += rnd
+
+        interval_cache[(x1, x2)] = copy.copy(ig)
+        intervals.append((-err, x1, x2))
+
+    heapq.heapify(intervals)
+
+    CONVERGED = 0
+    NOT_CONVERGED = 1
+    ROUNDING_ERROR = 2
+    NOT_A_NUMBER = 3
+
+    status_msg = {
+        CONVERGED: "Target precision reached.",
+        NOT_CONVERGED: "Target precision not reached.",
+        ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
+        NOT_A_NUMBER: "Non-finite values encountered."
+    }
+
+    # Process intervals
+    with MapWrapper(workers) as mapwrapper:
+        ier = NOT_CONVERGED
+
+        while intervals and len(intervals) < limit:
+            # Select intervals with largest errors for subdivision
+            tol = max(epsabs, epsrel*norm_func(global_integral))
+
+            to_process = []
+            err_sum = 0
+
+            for j in range(parallel_count):
+                if not intervals:
+                    break
+
+                if j > 0 and err_sum > global_error - tol/8:
+                    # avoid unnecessary parallel splitting
+                    break
+
+                interval = heapq.heappop(intervals)
+
+                neg_old_err, a, b = interval
+                old_int = interval_cache.pop((a, b), None)
+                to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
+                err_sum += -neg_old_err
+
+            # Subdivide intervals
+            for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
+                neval += dneval
+                global_integral += dint
+                global_error += derr
+                rounding_error += dround_err
+                for x in subint:
+                    x1, x2, ig, err = x
+                    interval_cache[(x1, x2)] = ig
+                    heapq.heappush(intervals, (-err, x1, x2))
+
+            # Termination check
+            if len(intervals) >= min_intervals:
+                tol = max(epsabs, epsrel*norm_func(global_integral))
+                if global_error < tol/8:
+                    ier = CONVERGED
+                    break
+                if global_error < rounding_error:
+                    ier = ROUNDING_ERROR
+                    break
+
+            if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
+                ier = NOT_A_NUMBER
+                break
+
+    res = global_integral
+    err = global_error + rounding_error
+
+    if full_output:
+        res_arr = np.asarray(res)
+        dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
+        integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
+                                      for z in intervals], dtype=res_arr.dtype)
+        errors = np.array([-z[0] for z in intervals])
+        intervals = np.array([[z[1], z[2]] for z in intervals])
+
+        info = _Bunch(neval=neval,
+                      success=(ier == CONVERGED),
+                      status=ier,
+                      message=status_msg[ier],
+                      intervals=intervals,
+                      integrals=integrals,
+                      errors=errors)
+        return (res, err, info)
+    else:
+        return (res, err)
+
+
+def _subdivide_interval(args):
+    interval, f, norm_func, _quadrature = args
+    old_err, a, b, old_int = interval
+
+    c = 0.5 * (a + b)
+
+    # Left-hand side
+    if getattr(_quadrature, 'cache_size', 0) > 0:
+        f = functools.lru_cache(_quadrature.cache_size)(f)
+
+    s1, err1, round1 = _quadrature(a, c, f, norm_func)
+    dneval = _quadrature.num_eval
+    s2, err2, round2 = _quadrature(c, b, f, norm_func)
+    dneval += _quadrature.num_eval
+    if old_int is None:
+        old_int, _, _ = _quadrature(a, b, f, norm_func)
+        dneval += _quadrature.num_eval
+
+    if getattr(_quadrature, 'cache_size', 0) > 0:
+        dneval = f.cache_info().misses
+
+    dint = s1 + s2 - old_int
+    derr = err1 + err2 - old_err
+    dround_err = round1 + round2
+
+    subintervals = ((a, c, s1, err1), (c, b, s2, err2))
+    return dint, derr, dround_err, subintervals, dneval
+
+
+def _quadrature_trapezoid(x1, x2, f, norm_func):
+    """
+    Composite trapezoid quadrature
+    """
+    x3 = 0.5*(x1 + x2)
+    f1 = f(x1)
+    f2 = f(x2)
+    f3 = f(x3)
+
+    s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
+
+    round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+                                       + 2*float(norm_func(f3))
+                                       + float(norm_func(f2))) * 2e-16
+
+    s1 = 0.5 * (x2 - x1) * (f1 + f2)
+    err = 1/3 * float(norm_func(s1 - s2))
+    return s2, err, round_err
+
+
+_quadrature_trapezoid.cache_size = 3 * 3
+_quadrature_trapezoid.num_eval = 3
+
+
+def _quadrature_gk(a, b, f, norm_func, x, w, v):
+    """
+    Generic Gauss-Kronrod quadrature
+    """
+
+    fv = [0.0]*len(x)
+
+    c = 0.5 * (a + b)
+    h = 0.5 * (b - a)
+
+    # Gauss-Kronrod
+    s_k = 0.0
+    s_k_abs = 0.0
+    for i in range(len(x)):
+        ff = f(c + h*x[i])
+        fv[i] = ff
+
+        vv = v[i]
+
+        # \int f(x)
+        s_k += vv * ff
+        # \int |f(x)|
+        s_k_abs += vv * abs(ff)
+
+    # Gauss
+    s_g = 0.0
+    for i in range(len(w)):
+        s_g += w[i] * fv[2*i + 1]
+
+    # Quadrature of abs-deviation from average
+    s_k_dabs = 0.0
+    y0 = s_k / 2.0
+    for i in range(len(x)):
+        # \int |f(x) - y0|
+        s_k_dabs += v[i] * abs(fv[i] - y0)
+
+    # Use similar error estimation as quadpack
+    err = float(norm_func((s_k - s_g) * h))
+    dabs = float(norm_func(s_k_dabs * h))
+    if dabs != 0 and err != 0:
+        err = dabs * min(1.0, (200 * err / dabs)**1.5)
+
+    eps = sys.float_info.epsilon
+    round_err = float(norm_func(50 * eps * h * s_k_abs))
+
+    if round_err > sys.float_info.min:
+        err = max(err, round_err)
+
+    return h * s_k, err, round_err
+
+
+def _quadrature_gk21(a, b, f, norm_func):
+    """
+    Gauss-Kronrod 21 quadrature with error estimate
+    """
+    # Gauss-Kronrod points
+    x = (0.995657163025808080735527280689003,
+         0.973906528517171720077964012084452,
+         0.930157491355708226001207180059508,
+         0.865063366688984510732096688423493,
+         0.780817726586416897063717578345042,
+         0.679409568299024406234327365114874,
+         0.562757134668604683339000099272694,
+         0.433395394129247190799265943165784,
+         0.294392862701460198131126603103866,
+         0.148874338981631210884826001129720,
+         0,
+         -0.148874338981631210884826001129720,
+         -0.294392862701460198131126603103866,
+         -0.433395394129247190799265943165784,
+         -0.562757134668604683339000099272694,
+         -0.679409568299024406234327365114874,
+         -0.780817726586416897063717578345042,
+         -0.865063366688984510732096688423493,
+         -0.930157491355708226001207180059508,
+         -0.973906528517171720077964012084452,
+         -0.995657163025808080735527280689003)
+
+    # 10-point weights
+    w = (0.066671344308688137593568809893332,
+         0.149451349150580593145776339657697,
+         0.219086362515982043995534934228163,
+         0.269266719309996355091226921569469,
+         0.295524224714752870173892994651338,
+         0.295524224714752870173892994651338,
+         0.269266719309996355091226921569469,
+         0.219086362515982043995534934228163,
+         0.149451349150580593145776339657697,
+         0.066671344308688137593568809893332)
+
+    # 21-point weights
+    v = (0.011694638867371874278064396062192,
+         0.032558162307964727478818972459390,
+         0.054755896574351996031381300244580,
+         0.075039674810919952767043140916190,
+         0.093125454583697605535065465083366,
+         0.109387158802297641899210590325805,
+         0.123491976262065851077958109831074,
+         0.134709217311473325928054001771707,
+         0.142775938577060080797094273138717,
+         0.147739104901338491374841515972068,
+         0.149445554002916905664936468389821,
+         0.147739104901338491374841515972068,
+         0.142775938577060080797094273138717,
+         0.134709217311473325928054001771707,
+         0.123491976262065851077958109831074,
+         0.109387158802297641899210590325805,
+         0.093125454583697605535065465083366,
+         0.075039674810919952767043140916190,
+         0.054755896574351996031381300244580,
+         0.032558162307964727478818972459390,
+         0.011694638867371874278064396062192)
+
+    return _quadrature_gk(a, b, f, norm_func, x, w, v)
+
+
+_quadrature_gk21.num_eval = 21
+
+
+def _quadrature_gk15(a, b, f, norm_func):
+    """
+    Gauss-Kronrod 15 quadrature with error estimate
+    """
+    # Gauss-Kronrod points
+    x = (0.991455371120812639206854697526329,
+         0.949107912342758524526189684047851,
+         0.864864423359769072789712788640926,
+         0.741531185599394439863864773280788,
+         0.586087235467691130294144838258730,
+         0.405845151377397166906606412076961,
+         0.207784955007898467600689403773245,
+         0.000000000000000000000000000000000,
+         -0.207784955007898467600689403773245,
+         -0.405845151377397166906606412076961,
+         -0.586087235467691130294144838258730,
+         -0.741531185599394439863864773280788,
+         -0.864864423359769072789712788640926,
+         -0.949107912342758524526189684047851,
+         -0.991455371120812639206854697526329)
+
+    # 7-point weights
+    w = (0.129484966168869693270611432679082,
+         0.279705391489276667901467771423780,
+         0.381830050505118944950369775488975,
+         0.417959183673469387755102040816327,
+         0.381830050505118944950369775488975,
+         0.279705391489276667901467771423780,
+         0.129484966168869693270611432679082)
+
+    # 15-point weights
+    v = (0.022935322010529224963732008058970,
+         0.063092092629978553290700663189204,
+         0.104790010322250183839876322541518,
+         0.140653259715525918745189590510238,
+         0.169004726639267902826583426598550,
+         0.190350578064785409913256402421014,
+         0.204432940075298892414161999234649,
+         0.209482141084727828012999174891714,
+         0.204432940075298892414161999234649,
+         0.190350578064785409913256402421014,
+         0.169004726639267902826583426598550,
+         0.140653259715525918745189590510238,
+         0.104790010322250183839876322541518,
+         0.063092092629978553290700663189204,
+         0.022935322010529224963732008058970)
+
+    return _quadrature_gk(a, b, f, norm_func, x, w, v)
+
+
+_quadrature_gk15.num_eval = 15
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_quadpack_py.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_quadpack_py.py
new file mode 100644
index 00000000..8d167456
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_quadpack_py.py
@@ -0,0 +1,1244 @@
+# Author: Travis Oliphant 2001
+# Author: Nathan Woods 2013 (nquad &c)
+import sys
+import warnings
+from functools import partial
+
+from . import _quadpack
+import numpy as np
+from numpy import Inf
+
+__all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"]
+
+
+error = _quadpack.error
+
+class IntegrationWarning(UserWarning):
+    """
+    Warning on issues during integration.
+    """
+    pass
+
+
+def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
+         limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
+         limlst=50, complex_func=False):
+    """
+    Compute a definite integral.
+
+    Integrate func from `a` to `b` (possibly infinite interval) using a
+    technique from the Fortran library QUADPACK.
+
+    Parameters
+    ----------
+    func : {function, scipy.LowLevelCallable}
+        A Python function or method to integrate. If `func` takes many
+        arguments, it is integrated along the axis corresponding to the
+        first argument.
+
+        If the user desires improved integration performance, then `f` may
+        be a `scipy.LowLevelCallable` with one of the signatures::
+
+            double func(double x)
+            double func(double x, void *user_data)
+            double func(int n, double *xx)
+            double func(int n, double *xx, void *user_data)
+
+        The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
+        In the call forms with ``xx``,  ``n`` is the length of the ``xx``
+        array which contains ``xx[0] == x`` and the rest of the items are
+        numbers contained in the ``args`` argument of quad.
+
+        In addition, certain ctypes call signatures are supported for
+        backward compatibility, but those should not be used in new code.
+    a : float
+        Lower limit of integration (use -numpy.inf for -infinity).
+    b : float
+        Upper limit of integration (use numpy.inf for +infinity).
+    args : tuple, optional
+        Extra arguments to pass to `func`.
+    full_output : int, optional
+        Non-zero to return a dictionary of integration information.
+        If non-zero, warning messages are also suppressed and the
+        message is appended to the output tuple.
+    complex_func : bool, optional
+        Indicate if the function's (`func`) return type is real
+        (``complex_func=False``: default) or complex (``complex_func=True``).
+        In both cases, the function's argument is real.
+        If full_output is also non-zero, the `infodict`, `message`, and
+        `explain` for the real and complex components are returned in
+        a dictionary with keys "real output" and "imag output".
+
+    Returns
+    -------
+    y : float
+        The integral of func from `a` to `b`.
+    abserr : float
+        An estimate of the absolute error in the result.
+    infodict : dict
+        A dictionary containing additional information.
+    message
+        A convergence message.
+    explain
+        Appended only with 'cos' or 'sin' weighting and infinite
+        integration limits, it contains an explanation of the codes in
+        infodict['ierlst']
+
+    Other Parameters
+    ----------------
+    epsabs : float or int, optional
+        Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
+        an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
+        where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
+        numerical approximation. See `epsrel` below.
+    epsrel : float or int, optional
+        Relative error tolerance. Default is 1.49e-8.
+        If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
+        and ``50 * (machine epsilon)``. See `epsabs` above.
+    limit : float or int, optional
+        An upper bound on the number of subintervals used in the adaptive
+        algorithm.
+    points : (sequence of floats,ints), optional
+        A sequence of break points in the bounded integration interval
+        where local difficulties of the integrand may occur (e.g.,
+        singularities, discontinuities). The sequence does not have
+        to be sorted. Note that this option cannot be used in conjunction
+        with ``weight``.
+    weight : float or int, optional
+        String indicating weighting function. Full explanation for this
+        and the remaining arguments can be found below.
+    wvar : optional
+        Variables for use with weighting functions.
+    wopts : optional
+        Optional input for reusing Chebyshev moments.
+    maxp1 : float or int, optional
+        An upper bound on the number of Chebyshev moments.
+    limlst : int, optional
+        Upper bound on the number of cycles (>=3) for use with a sinusoidal
+        weighting and an infinite end-point.
+
+    See Also
+    --------
+    dblquad : double integral
+    tplquad : triple integral
+    nquad : n-dimensional integrals (uses `quad` recursively)
+    fixed_quad : fixed-order Gaussian quadrature
+    quadrature : adaptive Gaussian quadrature
+    odeint : ODE integrator
+    ode : ODE integrator
+    simpson : integrator for sampled data
+    romb : integrator for sampled data
+    scipy.special : for coefficients and roots of orthogonal polynomials
+
+    Notes
+    -----
+
+    **Extra information for quad() inputs and outputs**
+
+    If full_output is non-zero, then the third output argument
+    (infodict) is a dictionary with entries as tabulated below. For
+    infinite limits, the range is transformed to (0,1) and the
+    optional outputs are given with respect to this transformed range.
+    Let M be the input argument limit and let K be infodict['last'].
+    The entries are:
+
+    'neval'
+        The number of function evaluations.
+    'last'
+        The number, K, of subintervals produced in the subdivision process.
+    'alist'
+        A rank-1 array of length M, the first K elements of which are the
+        left end points of the subintervals in the partition of the
+        integration range.
+    'blist'
+        A rank-1 array of length M, the first K elements of which are the
+        right end points of the subintervals.
+    'rlist'
+        A rank-1 array of length M, the first K elements of which are the
+        integral approximations on the subintervals.
+    'elist'
+        A rank-1 array of length M, the first K elements of which are the
+        moduli of the absolute error estimates on the subintervals.
+    'iord'
+        A rank-1 integer array of length M, the first L elements of
+        which are pointers to the error estimates over the subintervals
+        with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
+        sequence ``infodict['iord']`` and let E be the sequence
+        ``infodict['elist']``.  Then ``E[I[1]], ..., E[I[L]]`` forms a
+        decreasing sequence.
+
+    If the input argument points is provided (i.e., it is not None),
+    the following additional outputs are placed in the output
+    dictionary. Assume the points sequence is of length P.
+
+    'pts'
+        A rank-1 array of length P+2 containing the integration limits
+        and the break points of the intervals in ascending order.
+        This is an array giving the subintervals over which integration
+        will occur.
+    'level'
+        A rank-1 integer array of length M (=limit), containing the
+        subdivision levels of the subintervals, i.e., if (aa,bb) is a
+        subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
+        are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
+        if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
+    'ndin'
+        A rank-1 integer array of length P+2. After the first integration
+        over the intervals (pts[1], pts[2]), the error estimates over some
+        of the intervals may have been increased artificially in order to
+        put their subdivision forward. This array has ones in slots
+        corresponding to the subintervals for which this happens.
+
+    **Weighting the integrand**
+
+    The input variables, *weight* and *wvar*, are used to weight the
+    integrand by a select list of functions. Different integration
+    methods are used to compute the integral with these weighting
+    functions, and these do not support specifying break points. The
+    possible values of weight and the corresponding weighting functions are.
+
+    ==========  ===================================   =====================
+    ``weight``  Weight function used                  ``wvar``
+    ==========  ===================================   =====================
+    'cos'       cos(w*x)                              wvar = w
+    'sin'       sin(w*x)                              wvar = w
+    'alg'       g(x) = ((x-a)**alpha)*((b-x)**beta)   wvar = (alpha, beta)
+    'alg-loga'  g(x)*log(x-a)                         wvar = (alpha, beta)
+    'alg-logb'  g(x)*log(b-x)                         wvar = (alpha, beta)
+    'alg-log'   g(x)*log(x-a)*log(b-x)                wvar = (alpha, beta)
+    'cauchy'    1/(x-c)                               wvar = c
+    ==========  ===================================   =====================
+
+    wvar holds the parameter w, (alpha, beta), or c depending on the weight
+    selected. In these expressions, a and b are the integration limits.
+
+    For the 'cos' and 'sin' weighting, additional inputs and outputs are
+    available.
+
+    For finite integration limits, the integration is performed using a
+    Clenshaw-Curtis method which uses Chebyshev moments. For repeated
+    calculations, these moments are saved in the output dictionary:
+
+    'momcom'
+        The maximum level of Chebyshev moments that have been computed,
+        i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
+        computed for intervals of length ``|b-a| * 2**(-l)``,
+        ``l=0,1,...,M_c``.
+    'nnlog'
+        A rank-1 integer array of length M(=limit), containing the
+        subdivision levels of the subintervals, i.e., an element of this
+        array is equal to l if the corresponding subinterval is
+        ``|b-a|* 2**(-l)``.
+    'chebmo'
+        A rank-2 array of shape (25, maxp1) containing the computed
+        Chebyshev moments. These can be passed on to an integration
+        over the same interval by passing this array as the second
+        element of the sequence wopts and passing infodict['momcom'] as
+        the first element.
+
+    If one of the integration limits is infinite, then a Fourier integral is
+    computed (assuming w neq 0). If full_output is 1 and a numerical error
+    is encountered, besides the error message attached to the output tuple,
+    a dictionary is also appended to the output tuple which translates the
+    error codes in the array ``info['ierlst']`` to English messages. The
+    output information dictionary contains the following entries instead of
+    'last', 'alist', 'blist', 'rlist', and 'elist':
+
+    'lst'
+        The number of subintervals needed for the integration (call it ``K_f``).
+    'rslst'
+        A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
+        contain the integral contribution over the interval
+        ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
+        and ``k=1,2,...,K_f``.
+    'erlst'
+        A rank-1 array of length ``M_f`` containing the error estimate
+        corresponding to the interval in the same position in
+        ``infodict['rslist']``.
+    'ierlst'
+        A rank-1 integer array of length ``M_f`` containing an error flag
+        corresponding to the interval in the same position in
+        ``infodict['rslist']``.  See the explanation dictionary (last entry
+        in the output tuple) for the meaning of the codes.
+
+
+    **Details of QUADPACK level routines**
+
+    `quad` calls routines from the FORTRAN library QUADPACK. This section
+    provides details on the conditions for each routine to be called and a
+    short description of each routine. The routine called depends on
+    `weight`, `points` and the integration limits `a` and `b`.
+
+    ================  ==============  ==========  =====================
+    QUADPACK routine  `weight`        `points`    infinite bounds
+    ================  ==============  ==========  =====================
+    qagse             None            No          No
+    qagie             None            No          Yes
+    qagpe             None            Yes         No
+    qawoe             'sin', 'cos'    No          No
+    qawfe             'sin', 'cos'    No          either `a` or `b`
+    qawse             'alg*'          No          No
+    qawce             'cauchy'        No          No
+    ================  ==============  ==========  =====================
+
+    The following provides a short desciption from [1]_ for each
+    routine.
+
+    qagse
+        is an integrator based on globally adaptive interval
+        subdivision in connection with extrapolation, which will
+        eliminate the effects of integrand singularities of
+        several types.
+    qagie
+        handles integration over infinite intervals. The infinite range is
+        mapped onto a finite interval and subsequently the same strategy as
+        in ``QAGS`` is applied.
+    qagpe
+        serves the same purposes as QAGS, but also allows the
+        user to provide explicit information about the location
+        and type of trouble-spots i.e. the abscissae of internal
+        singularities, discontinuities and other difficulties of
+        the integrand function.
+    qawoe
+        is an integrator for the evaluation of
+        :math:`\\int^b_a \\cos(\\omega x)f(x)dx` or
+        :math:`\\int^b_a \\sin(\\omega x)f(x)dx`
+        over a finite interval [a,b], where :math:`\\omega` and :math:`f`
+        are specified by the user. The rule evaluation component is based
+        on the modified Clenshaw-Curtis technique
+
+        An adaptive subdivision scheme is used in connection
+        with an extrapolation procedure, which is a modification
+        of that in ``QAGS`` and allows the algorithm to deal with
+        singularities in :math:`f(x)`.
+    qawfe
+        calculates the Fourier transform
+        :math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or
+        :math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx`
+        for user-provided :math:`\\omega` and :math:`f`. The procedure of
+        ``QAWO`` is applied on successive finite intervals, and convergence
+        acceleration by means of the :math:`\\varepsilon`-algorithm is applied
+        to the series of integral approximations.
+    qawse
+        approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where
+        :math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with
+        :math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the
+        following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`,
+        :math:`\\log(x-a)\\log(b-x)`.
+
+        The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the
+        function :math:`v`. A globally adaptive subdivision strategy is
+        applied, with modified Clenshaw-Curtis integration on those
+        subintervals which contain `a` or `b`.
+    qawce
+        compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be
+        interpreted as a Cauchy principal value integral, for user specified
+        :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
+        Clenshaw-Curtis integration is used on those intervals containing the
+        point :math:`x = c`.
+
+    **Integration of Complex Function of a Real Variable**
+
+    A complex valued function, :math:`f`, of a real variable can be written as
+    :math:`f = g + ih`.  Similarly, the integral of :math:`f` can be
+    written as
+
+    .. math::
+        \\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx
+
+    assuming that the integrals of :math:`g` and :math:`h` exist
+    over the inteval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates
+    complex-valued functions by integrating the real and imaginary components
+    separately.
+
+
+    References
+    ----------
+
+    .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
+           Überhuber, Christoph W.; Kahaner, David (1983).
+           QUADPACK: A subroutine package for automatic integration.
+           Springer-Verlag.
+           ISBN 978-3-540-12553-2.
+
+    .. [2] McCullough, Thomas; Phillips, Keith (1973).
+           Foundations of Analysis in the Complex Plane.
+           Holt Rinehart Winston.
+           ISBN 0-03-086370-8
+
+    Examples
+    --------
+    Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
+
+    >>> from scipy import integrate
+    >>> import numpy as np
+    >>> x2 = lambda x: x**2
+    >>> integrate.quad(x2, 0, 4)
+    (21.333333333333332, 2.3684757858670003e-13)
+    >>> print(4**3 / 3.)  # analytical result
+    21.3333333333
+
+    Calculate :math:`\\int^\\infty_0 e^{-x} dx`
+
+    >>> invexp = lambda x: np.exp(-x)
+    >>> integrate.quad(invexp, 0, np.inf)
+    (1.0, 5.842605999138044e-11)
+
+    Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3`
+
+    >>> f = lambda x, a: a*x
+    >>> y, err = integrate.quad(f, 0, 1, args=(1,))
+    >>> y
+    0.5
+    >>> y, err = integrate.quad(f, 0, 1, args=(3,))
+    >>> y
+    1.5
+
+    Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
+    y parameter as 1::
+
+        testlib.c =>
+            double func(int n, double args[n]){
+                return args[0]*args[0] + args[1]*args[1];}
+        compile to library testlib.*
+
+    ::
+
+       from scipy import integrate
+       import ctypes
+       lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
+       lib.func.restype = ctypes.c_double
+       lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
+       integrate.quad(lib.func,0,1,(1))
+       #(1.3333333333333333, 1.4802973661668752e-14)
+       print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
+       # 1.3333333333333333
+
+    Be aware that pulse shapes and other sharp features as compared to the
+    size of the integration interval may not be integrated correctly using
+    this method. A simplified example of this limitation is integrating a
+    y-axis reflected step function with many zero values within the integrals
+    bounds.
+
+    >>> y = lambda x: 1 if x<=0 else 0
+    >>> integrate.quad(y, -1, 1)
+    (1.0, 1.1102230246251565e-14)
+    >>> integrate.quad(y, -1, 100)
+    (1.0000000002199108, 1.0189464580163188e-08)
+    >>> integrate.quad(y, -1, 10000)
+    (0.0, 0.0)
+
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+
+    # check the limits of integration: \int_a^b, expect a < b
+    flip, a, b = b < a, min(a, b), max(a, b)
+
+    if complex_func:
+        def imfunc(x, *args):
+            return np.imag(func(x, *args))
+
+        def refunc(x, *args):
+            return np.real(func(x, *args))
+
+        re_retval = quad(refunc, a, b, args, full_output, epsabs,
+                         epsrel, limit, points, weight, wvar, wopts,
+                         maxp1, limlst, complex_func=False)
+        im_retval = quad(imfunc, a, b, args, full_output, epsabs,
+                         epsrel, limit, points, weight, wvar, wopts,
+                         maxp1, limlst, complex_func=False)
+        integral = re_retval[0] + 1j*im_retval[0]
+        error_estimate = re_retval[1] + 1j*im_retval[1]
+        retval = integral, error_estimate
+        if full_output:
+            msgexp = {}
+            msgexp["real"] = re_retval[2:]
+            msgexp["imag"] = im_retval[2:]
+            retval = retval + (msgexp,)
+
+        return retval
+
+    if weight is None:
+        retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
+                       points)
+    else:
+        if points is not None:
+            msg = ("Break points cannot be specified when using weighted integrand.\n"
+                   "Continuing, ignoring specified points.")
+            warnings.warn(msg, IntegrationWarning, stacklevel=2)
+        retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
+                              limlst, limit, maxp1, weight, wvar, wopts)
+
+    if flip:
+        retval = (-retval[0],) + retval[1:]
+
+    ier = retval[-1]
+    if ier == 0:
+        return retval[:-1]
+
+    msgs = {80: "A Python error occurred possibly while calling the function.",
+             1: "The maximum number of subdivisions (%d) has been achieved.\n  If increasing the limit yields no improvement it is advised to analyze \n  the integrand in order to determine the difficulties.  If the position of a \n  local difficulty can be determined (singularity, discontinuity) one will \n  probably gain from splitting up the interval and calling the integrator \n  on the subranges.  Perhaps a special-purpose integrator should be used." % limit,
+             2: "The occurrence of roundoff error is detected, which prevents \n  the requested tolerance from being achieved.  The error may be \n  underestimated.",
+             3: "Extremely bad integrand behavior occurs at some points of the\n  integration interval.",
+             4: "The algorithm does not converge.  Roundoff error is detected\n  in the extrapolation table.  It is assumed that the requested tolerance\n  cannot be achieved, and that the returned result (if full_output = 1) is \n  the best which can be obtained.",
+             5: "The integral is probably divergent, or slowly convergent.",
+             6: "The input is invalid.",
+             7: "Abnormal termination of the routine.  The estimates for result\n  and error are less reliable.  It is assumed that the requested accuracy\n  has not been achieved.",
+            'unknown': "Unknown error."}
+
+    if weight in ['cos','sin'] and (b == Inf or a == -Inf):
+        msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n  of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n  *pi/abs(omega), for k = 1, 2, ..., lst.  One can allow more cycles by increasing the value of limlst.  Look at info['ierlst'] with full_output=1."
+        msgs[4] = "The extrapolation table constructed for convergence acceleration\n  of the series formed by the integral contributions over the cycles, \n  does not converge to within the requested accuracy.  Look at \n  info['ierlst'] with full_output=1."
+        msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n  Location and type of the difficulty involved can be determined from \n  the vector info['ierlist'] obtained with full_output=1."
+        explain = {1: "The maximum number of subdivisions (= limit) has been \n  achieved on this cycle.",
+                   2: "The occurrence of roundoff error is detected and prevents\n  the tolerance imposed on this cycle from being achieved.",
+                   3: "Extremely bad integrand behavior occurs at some points of\n  this cycle.",
+                   4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle.  It is assumed that the result on this interval is the best which can be obtained.",
+                   5: "The integral over this cycle is probably divergent or slowly convergent."}
+
+    try:
+        msg = msgs[ier]
+    except KeyError:
+        msg = msgs['unknown']
+
+    if ier in [1,2,3,4,5,7]:
+        if full_output:
+            if weight in ['cos', 'sin'] and (b == Inf or a == -Inf):
+                return retval[:-1] + (msg, explain)
+            else:
+                return retval[:-1] + (msg,)
+        else:
+            warnings.warn(msg, IntegrationWarning, stacklevel=2)
+            return retval[:-1]
+
+    elif ier == 6:  # Forensic decision tree when QUADPACK throws ier=6
+        if epsabs <= 0:  # Small error tolerance - applies to all methods
+            if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
+                msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
+                       " 5e-29 and 50*(machine epsilon).")
+            elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
+                msg = ("Sine or cosine weighted intergals with infinite domain"
+                       " must have 'epsabs'>0.")
+
+        elif weight is None:
+            if points is None:  # QAGSE/QAGIE
+                msg = ("Invalid 'limit' argument. There must be"
+                       " at least one subinterval")
+            else:  # QAGPE
+                if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
+                    msg = ("All break points in 'points' must lie within the"
+                           " integration limits.")
+                elif len(points) >= limit:
+                    msg = ("Number of break points ({:d})"
+                           " must be less than subinterval"
+                           " limit ({:d})").format(len(points), limit)
+
+        else:
+            if maxp1 < 1:
+                msg = "Chebyshev moment limit maxp1 must be >=1."
+
+            elif weight in ('cos', 'sin') and abs(a+b) == Inf:  # QAWFE
+                msg = "Cycle limit limlst must be >=3."
+
+            elif weight.startswith('alg'):  # QAWSE
+                if min(wvar) < -1:
+                    msg = "wvar parameters (alpha, beta) must both be >= -1."
+                if b < a:
+                    msg = "Integration limits a, b must satistfy a>> import numpy as np
+    >>> from scipy import integrate
+    >>> f = lambda y, x: x*y**2
+    >>> integrate.dblquad(f, 0, 2, 0, 1)
+        (0.6666666666666667, 7.401486830834377e-15)
+
+    Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
+    \\,dy \\,dx`.
+
+    >>> f = lambda y, x: 1
+    >>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
+        (0.41421356237309503, 1.1083280054755938e-14)
+
+    Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx`
+    for :math:`a=1, 3`.
+
+    >>> f = lambda y, x, a: a*x*y
+    >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
+        (0.33333333333333337, 5.551115123125783e-15)
+    >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
+        (0.9999999999999999, 1.6653345369377348e-14)
+
+    Compute the two-dimensional Gaussian Integral, which is the integral of the
+    Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
+    :math:`(-\\infty,+\\infty)`. That is, compute the integral
+    :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`.
+
+    >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
+    >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
+        (3.141592653589777, 2.5173086737433208e-08)
+
+    """
+
+    def temp_ranges(*args):
+        return [gfun(args[0]) if callable(gfun) else gfun,
+                hfun(args[0]) if callable(hfun) else hfun]
+
+    return nquad(func, [temp_ranges, [a, b]], args=args,
+            opts={"epsabs": epsabs, "epsrel": epsrel})
+
+
+def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
+            epsrel=1.49e-8):
+    """
+    Compute a triple (definite) integral.
+
+    Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
+    ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
+
+    Parameters
+    ----------
+    func : function
+        A Python function or method of at least three variables in the
+        order (z, y, x).
+    a, b : float
+        The limits of integration in x: `a` < `b`
+    gfun : function or float
+        The lower boundary curve in y which is a function taking a single
+        floating point argument (x) and returning a floating point result
+        or a float indicating a constant boundary curve.
+    hfun : function or float
+        The upper boundary curve in y (same requirements as `gfun`).
+    qfun : function or float
+        The lower boundary surface in z.  It must be a function that takes
+        two floats in the order (x, y) and returns a float or a float
+        indicating a constant boundary surface.
+    rfun : function or float
+        The upper boundary surface in z. (Same requirements as `qfun`.)
+    args : tuple, optional
+        Extra arguments to pass to `func`.
+    epsabs : float, optional
+        Absolute tolerance passed directly to the innermost 1-D quadrature
+        integration. Default is 1.49e-8.
+    epsrel : float, optional
+        Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
+
+    Returns
+    -------
+    y : float
+        The resultant integral.
+    abserr : float
+        An estimate of the error.
+
+    See Also
+    --------
+    quad : Adaptive quadrature using QUADPACK
+    quadrature : Adaptive Gaussian quadrature
+    fixed_quad : Fixed-order Gaussian quadrature
+    dblquad : Double integrals
+    nquad : N-dimensional integrals
+    romb : Integrators for sampled data
+    simpson : Integrators for sampled data
+    ode : ODE integrators
+    odeint : ODE integrators
+    scipy.special : For coefficients and roots of orthogonal polynomials
+
+    Notes
+    -----
+
+    **Details of QUADPACK level routines**
+
+    `quad` calls routines from the FORTRAN library QUADPACK. This section
+    provides details on the conditions for each routine to be called and a
+    short description of each routine. For each level of integration, ``qagse``
+    is used for finite limits or ``qagie`` is used, if either limit (or both!)
+    are infinite. The following provides a short description from [1]_ for each
+    routine.
+
+    qagse
+        is an integrator based on globally adaptive interval
+        subdivision in connection with extrapolation, which will
+        eliminate the effects of integrand singularities of
+        several types.
+    qagie
+        handles integration over infinite intervals. The infinite range is
+        mapped onto a finite interval and subsequently the same strategy as
+        in ``QAGS`` is applied.
+
+    References
+    ----------
+
+    .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
+           Überhuber, Christoph W.; Kahaner, David (1983).
+           QUADPACK: A subroutine package for automatic integration.
+           Springer-Verlag.
+           ISBN 978-3-540-12553-2.
+
+    Examples
+    --------
+    Compute the triple integral of ``x * y * z``, over ``x`` ranging
+    from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
+    That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z
+    \\,dz \\,dy \\,dx`.
+
+    >>> import numpy as np
+    >>> from scipy import integrate
+    >>> f = lambda z, y, x: x*y*z
+    >>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1)
+    (1.8749999999999998, 3.3246447942574074e-14)
+
+    Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0}
+    \\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`.
+    Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f``
+    takes arguments in the order (z, y, x).
+
+    >>> f = lambda z, y, x: x*y*z
+    >>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y)
+    (0.05416666666666668, 2.1774196738157757e-14)
+
+    Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0}
+    a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`.
+
+    >>> f = lambda z, y, x, a: a*x*y*z
+    >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,))
+        (0.125, 5.527033708952211e-15)
+    >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,))
+        (0.375, 1.6581101126856635e-14)
+
+    Compute the three-dimensional Gaussian Integral, which is the integral of
+    the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over
+    :math:`(-\\infty,+\\infty)`. That is, compute the integral
+    :math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz
+    \\,dy\\,dx`.
+
+    >>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2))
+    >>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf)
+        (5.568327996830833, 4.4619078828029765e-08)
+
+    """
+    # f(z, y, x)
+    # qfun/rfun(x, y)
+    # gfun/hfun(x)
+    # nquad will hand (y, x, t0, ...) to ranges0
+    # nquad will hand (x, t0, ...) to ranges1
+    # Only qfun / rfun is different API...
+
+    def ranges0(*args):
+        return [qfun(args[1], args[0]) if callable(qfun) else qfun,
+                rfun(args[1], args[0]) if callable(rfun) else rfun]
+
+    def ranges1(*args):
+        return [gfun(args[0]) if callable(gfun) else gfun,
+                hfun(args[0]) if callable(hfun) else hfun]
+
+    ranges = [ranges0, ranges1, [a, b]]
+    return nquad(func, ranges, args=args,
+            opts={"epsabs": epsabs, "epsrel": epsrel})
+
+
+def nquad(func, ranges, args=None, opts=None, full_output=False):
+    r"""
+    Integration over multiple variables.
+
+    Wraps `quad` to enable integration over multiple variables.
+    Various options allow improved integration of discontinuous functions, as
+    well as the use of weighted integration, and generally finer control of the
+    integration process.
+
+    Parameters
+    ----------
+    func : {callable, scipy.LowLevelCallable}
+        The function to be integrated. Has arguments of ``x0, ... xn``,
+        ``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
+        which must be floats.  Where ``t0, ... tm`` are extra arguments
+        passed in args.
+        Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
+        Integration is carried out in order.  That is, integration over ``x0``
+        is the innermost integral, and ``xn`` is the outermost.
+
+        If the user desires improved integration performance, then `f` may
+        be a `scipy.LowLevelCallable` with one of the signatures::
+
+            double func(int n, double *xx)
+            double func(int n, double *xx, void *user_data)
+
+        where ``n`` is the number of variables and args.  The ``xx`` array
+        contains the coordinates and extra arguments. ``user_data`` is the data
+        contained in the `scipy.LowLevelCallable`.
+    ranges : iterable object
+        Each element of ranges may be either a sequence  of 2 numbers, or else
+        a callable that returns such a sequence. ``ranges[0]`` corresponds to
+        integration over x0, and so on. If an element of ranges is a callable,
+        then it will be called with all of the integration arguments available,
+        as well as any parametric arguments. e.g., if
+        ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
+        either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
+    args : iterable object, optional
+        Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``,
+        and ``opts``.
+    opts : iterable object or dict, optional
+        Options to be passed to `quad`. May be empty, a dict, or
+        a sequence of dicts or functions that return a dict. If empty, the
+        default options from scipy.integrate.quad are used. If a dict, the same
+        options are used for all levels of integraion. If a sequence, then each
+        element of the sequence corresponds to a particular integration. e.g.,
+        ``opts[0]`` corresponds to integration over ``x0``, and so on. If a
+        callable, the signature must be the same as for ``ranges``. The
+        available options together with their default values are:
+
+          - epsabs = 1.49e-08
+          - epsrel = 1.49e-08
+          - limit  = 50
+          - points = None
+          - weight = None
+          - wvar   = None
+          - wopts  = None
+
+        For more information on these options, see `quad`.
+
+    full_output : bool, optional
+        Partial implementation of ``full_output`` from scipy.integrate.quad.
+        The number of integrand function evaluations ``neval`` can be obtained
+        by setting ``full_output=True`` when calling nquad.
+
+    Returns
+    -------
+    result : float
+        The result of the integration.
+    abserr : float
+        The maximum of the estimates of the absolute error in the various
+        integration results.
+    out_dict : dict, optional
+        A dict containing additional information on the integration.
+
+    See Also
+    --------
+    quad : 1-D numerical integration
+    dblquad, tplquad : double and triple integrals
+    fixed_quad : fixed-order Gaussian quadrature
+    quadrature : adaptive Gaussian quadrature
+
+    Notes
+    -----
+
+    **Details of QUADPACK level routines**
+
+    `nquad` calls routines from the FORTRAN library QUADPACK. This section
+    provides details on the conditions for each routine to be called and a
+    short description of each routine. The routine called depends on
+    `weight`, `points` and the integration limits `a` and `b`.
+
+    ================  ==============  ==========  =====================
+    QUADPACK routine  `weight`        `points`    infinite bounds
+    ================  ==============  ==========  =====================
+    qagse             None            No          No
+    qagie             None            No          Yes
+    qagpe             None            Yes         No
+    qawoe             'sin', 'cos'    No          No
+    qawfe             'sin', 'cos'    No          either `a` or `b`
+    qawse             'alg*'          No          No
+    qawce             'cauchy'        No          No
+    ================  ==============  ==========  =====================
+
+    The following provides a short desciption from [1]_ for each
+    routine.
+
+    qagse
+        is an integrator based on globally adaptive interval
+        subdivision in connection with extrapolation, which will
+        eliminate the effects of integrand singularities of
+        several types.
+    qagie
+        handles integration over infinite intervals. The infinite range is
+        mapped onto a finite interval and subsequently the same strategy as
+        in ``QAGS`` is applied.
+    qagpe
+        serves the same purposes as QAGS, but also allows the
+        user to provide explicit information about the location
+        and type of trouble-spots i.e. the abscissae of internal
+        singularities, discontinuities and other difficulties of
+        the integrand function.
+    qawoe
+        is an integrator for the evaluation of
+        :math:`\int^b_a \cos(\omega x)f(x)dx` or
+        :math:`\int^b_a \sin(\omega x)f(x)dx`
+        over a finite interval [a,b], where :math:`\omega` and :math:`f`
+        are specified by the user. The rule evaluation component is based
+        on the modified Clenshaw-Curtis technique
+
+        An adaptive subdivision scheme is used in connection
+        with an extrapolation procedure, which is a modification
+        of that in ``QAGS`` and allows the algorithm to deal with
+        singularities in :math:`f(x)`.
+    qawfe
+        calculates the Fourier transform
+        :math:`\int^\infty_a \cos(\omega x)f(x)dx` or
+        :math:`\int^\infty_a \sin(\omega x)f(x)dx`
+        for user-provided :math:`\omega` and :math:`f`. The procedure of
+        ``QAWO`` is applied on successive finite intervals, and convergence
+        acceleration by means of the :math:`\varepsilon`-algorithm is applied
+        to the series of integral approximations.
+    qawse
+        approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where
+        :math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with
+        :math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the
+        following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`,
+        :math:`\log(x-a)\log(b-x)`.
+
+        The user specifies :math:`\alpha`, :math:`\beta` and the type of the
+        function :math:`v`. A globally adaptive subdivision strategy is
+        applied, with modified Clenshaw-Curtis integration on those
+        subintervals which contain `a` or `b`.
+    qawce
+        compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be
+        interpreted as a Cauchy principal value integral, for user specified
+        :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
+        Clenshaw-Curtis integration is used on those intervals containing the
+        point :math:`x = c`.
+
+    References
+    ----------
+
+    .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
+           Überhuber, Christoph W.; Kahaner, David (1983).
+           QUADPACK: A subroutine package for automatic integration.
+           Springer-Verlag.
+           ISBN 978-3-540-12553-2.
+
+    Examples
+    --------
+    Calculate
+
+    .. math::
+
+        \int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0}
+        f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 ,
+
+    where
+
+    .. math::
+
+        f(x_0, x_1, x_2, x_3) = \begin{cases}
+          x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\
+          x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0)
+        \end{cases} .
+
+    >>> import numpy as np
+    >>> from scipy import integrate
+    >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
+    ...                                 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
+    >>> def opts0(*args, **kwargs):
+    ...     return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
+    >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
+    ...                 opts=[opts0,{},{},{}], full_output=True)
+    (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
+
+    Calculate
+
+    .. math::
+
+        \int^{t_0+t_1+1}_{t_0+t_1-1}
+        \int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1}
+        \int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1}
+        f(x_0,x_1, x_2,t_0,t_1)
+        \,dx_0 \,dx_1 \,dx_2,
+
+    where
+
+    .. math::
+
+        f(x_0, x_1, x_2, t_0, t_1) = \begin{cases}
+          x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\
+          x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0)
+        \end{cases}
+
+    and :math:`(t_0, t_1) = (0, 1)` .
+
+    >>> def func2(x0, x1, x2, t0, t1):
+    ...     return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0)
+    >>> def lim0(x1, x2, t0, t1):
+    ...     return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1]
+    >>> def lim1(x2, t0, t1):
+    ...     return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1]
+    >>> def lim2(t0, t1):
+    ...     return [t0 + t1 - 1, t0 + t1 + 1]
+    >>> def opts0(x1, x2, t0, t1):
+    ...     return {'points' : [t0 - t1*x1]}
+    >>> def opts1(x2, t0, t1):
+    ...     return {}
+    >>> def opts2(t0, t1):
+    ...     return {}
+    >>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1),
+    ...                 opts=[opts0, opts1, opts2])
+    (36.099919226771625, 1.8546948553373528e-07)
+
+    """
+    depth = len(ranges)
+    ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
+    if args is None:
+        args = ()
+    if opts is None:
+        opts = [dict([])] * depth
+
+    if isinstance(opts, dict):
+        opts = [_OptFunc(opts)] * depth
+    else:
+        opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
+    return _NQuad(func, ranges, opts, full_output).integrate(*args)
+
+
+class _RangeFunc:
+    def __init__(self, range_):
+        self.range_ = range_
+
+    def __call__(self, *args):
+        """Return stored value.
+
+        *args needed because range_ can be float or func, and is called with
+        variable number of parameters.
+        """
+        return self.range_
+
+
+class _OptFunc:
+    def __init__(self, opt):
+        self.opt = opt
+
+    def __call__(self, *args):
+        """Return stored dict."""
+        return self.opt
+
+
+class _NQuad:
+    def __init__(self, func, ranges, opts, full_output):
+        self.abserr = 0
+        self.func = func
+        self.ranges = ranges
+        self.opts = opts
+        self.maxdepth = len(ranges)
+        self.full_output = full_output
+        if self.full_output:
+            self.out_dict = {'neval': 0}
+
+    def integrate(self, *args, **kwargs):
+        depth = kwargs.pop('depth', 0)
+        if kwargs:
+            raise ValueError('unexpected kwargs')
+
+        # Get the integration range and options for this depth.
+        ind = -(depth + 1)
+        fn_range = self.ranges[ind]
+        low, high = fn_range(*args)
+        fn_opt = self.opts[ind]
+        opt = dict(fn_opt(*args))
+
+        if 'points' in opt:
+            opt['points'] = [x for x in opt['points'] if low <= x <= high]
+        if depth + 1 == self.maxdepth:
+            f = self.func
+        else:
+            f = partial(self.integrate, depth=depth+1)
+        quad_r = quad(f, low, high, args=args, full_output=self.full_output,
+                      **opt)
+        value = quad_r[0]
+        abserr = quad_r[1]
+        if self.full_output:
+            infodict = quad_r[2]
+            # The 'neval' parameter in full_output returns the total
+            # number of times the integrand function was evaluated.
+            # Therefore, only the innermost integration loop counts.
+            if depth + 1 == self.maxdepth:
+                self.out_dict['neval'] += infodict['neval']
+        self.abserr = max(self.abserr, abserr)
+        if depth > 0:
+            return value
+        else:
+            # Final result of N-D integration with error
+            if self.full_output:
+                return value, self.abserr, self.out_dict
+            else:
+                return value, self.abserr
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/_quadrature.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/_quadrature.py
new file mode 100644
index 00000000..1fe46e5c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/_quadrature.py
@@ -0,0 +1,1360 @@
+from __future__ import annotations
+from typing import TYPE_CHECKING, Callable, Dict, Tuple, Any, cast
+import functools
+import numpy as np
+import math
+import types
+import warnings
+from collections import namedtuple
+
+from scipy.special import roots_legendre
+from scipy.special import gammaln, logsumexp
+from scipy._lib._util import _rng_spawn
+
+
+__all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
+           'trapezoid', 'trapz', 'simps', 'simpson',
+           'cumulative_trapezoid', 'cumtrapz', 'newton_cotes',
+           'AccuracyWarning']
+
+
+def trapezoid(y, x=None, dx=1.0, axis=-1):
+    r"""
+    Integrate along the given axis using the composite trapezoidal rule.
+
+    If `x` is provided, the integration happens in sequence along its
+    elements - they are not sorted.
+
+    Integrate `y` (`x`) along each 1d slice on the given axis, compute
+    :math:`\int y(x) dx`.
+    When `x` is specified, this integrates along the parametric curve,
+    computing :math:`\int_t y(t) dt =
+    \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
+
+    Parameters
+    ----------
+    y : array_like
+        Input array to integrate.
+    x : array_like, optional
+        The sample points corresponding to the `y` values. If `x` is None,
+        the sample points are assumed to be evenly spaced `dx` apart. The
+        default is None.
+    dx : scalar, optional
+        The spacing between sample points when `x` is None. The default is 1.
+    axis : int, optional
+        The axis along which to integrate.
+
+    Returns
+    -------
+    trapezoid : float or ndarray
+        Definite integral of `y` = n-dimensional array as approximated along
+        a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
+        then the result is a float. If `n` is greater than 1, then the result
+        is an `n`-1 dimensional array.
+
+    See Also
+    --------
+    cumulative_trapezoid, simpson, romb
+
+    Notes
+    -----
+    Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
+    will be taken from `y` array, by default x-axis distances between
+    points will be 1.0, alternatively they can be provided with `x` array
+    or with `dx` scalar.  Return value will be equal to combined area under
+    the red lines.
+
+    References
+    ----------
+    .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
+
+    .. [2] Illustration image:
+           https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
+
+    Examples
+    --------
+    Use the trapezoidal rule on evenly spaced points:
+
+    >>> import numpy as np
+    >>> from scipy import integrate
+    >>> integrate.trapezoid([1, 2, 3])
+    4.0
+
+    The spacing between sample points can be selected by either the
+    ``x`` or ``dx`` arguments:
+
+    >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8])
+    8.0
+    >>> integrate.trapezoid([1, 2, 3], dx=2)
+    8.0
+
+    Using a decreasing ``x`` corresponds to integrating in reverse:
+
+    >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4])
+    -8.0
+
+    More generally ``x`` is used to integrate along a parametric curve. We can
+    estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
+
+    >>> x = np.linspace(0, 1, num=50)
+    >>> y = x**2
+    >>> integrate.trapezoid(y, x)
+    0.33340274885464394
+
+    Or estimate the area of a circle, noting we repeat the sample which closes
+    the curve:
+
+    >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
+    >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta))
+    3.141571941375841
+
+    ``trapezoid`` can be applied along a specified axis to do multiple
+    computations in one call:
+
+    >>> a = np.arange(6).reshape(2, 3)
+    >>> a
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> integrate.trapezoid(a, axis=0)
+    array([1.5, 2.5, 3.5])
+    >>> integrate.trapezoid(a, axis=1)
+    array([2.,  8.])
+    """
+    # Future-proofing, in case NumPy moves from trapz to trapezoid for the same
+    # reasons as SciPy
+    if hasattr(np, 'trapezoid'):
+        return np.trapezoid(y, x=x, dx=dx, axis=axis)
+    else:
+        return np.trapz(y, x=x, dx=dx, axis=axis)
+
+
+# Note: alias kept for backwards compatibility. Rename was done
+# because trapz is a slur in colloquial English (see gh-12924).
+def trapz(y, x=None, dx=1.0, axis=-1):
+    """An alias of `trapezoid`.
+
+    `trapz` is kept for backwards compatibility. For new code, prefer
+    `trapezoid` instead.
+    """
+    return trapezoid(y, x=x, dx=dx, axis=axis)
+
+
+class AccuracyWarning(Warning):
+    pass
+
+
+if TYPE_CHECKING:
+    # workaround for mypy function attributes see:
+    # https://github.com/python/mypy/issues/2087#issuecomment-462726600
+    from typing import Protocol
+
+    class CacheAttributes(Protocol):
+        cache: Dict[int, Tuple[Any, Any]]
+else:
+    CacheAttributes = Callable
+
+
+def cache_decorator(func: Callable) -> CacheAttributes:
+    return cast(CacheAttributes, func)
+
+
+@cache_decorator
+def _cached_roots_legendre(n):
+    """
+    Cache roots_legendre results to speed up calls of the fixed_quad
+    function.
+    """
+    if n in _cached_roots_legendre.cache:
+        return _cached_roots_legendre.cache[n]
+
+    _cached_roots_legendre.cache[n] = roots_legendre(n)
+    return _cached_roots_legendre.cache[n]
+
+
+_cached_roots_legendre.cache = dict()
+
+
+def fixed_quad(func, a, b, args=(), n=5):
+    """
+    Compute a definite integral using fixed-order Gaussian quadrature.
+
+    Integrate `func` from `a` to `b` using Gaussian quadrature of
+    order `n`.
+
+    Parameters
+    ----------
+    func : callable
+        A Python function or method to integrate (must accept vector inputs).
+        If integrating a vector-valued function, the returned array must have
+        shape ``(..., len(x))``.
+    a : float
+        Lower limit of integration.
+    b : float
+        Upper limit of integration.
+    args : tuple, optional
+        Extra arguments to pass to function, if any.
+    n : int, optional
+        Order of quadrature integration. Default is 5.
+
+    Returns
+    -------
+    val : float
+        Gaussian quadrature approximation to the integral
+    none : None
+        Statically returned value of None
+
+    See Also
+    --------
+    quad : adaptive quadrature using QUADPACK
+    dblquad : double integrals
+    tplquad : triple integrals
+    romberg : adaptive Romberg quadrature
+    quadrature : adaptive Gaussian quadrature
+    romb : integrators for sampled data
+    simpson : integrators for sampled data
+    cumulative_trapezoid : cumulative integration for sampled data
+    ode : ODE integrator
+    odeint : ODE integrator
+
+    Examples
+    --------
+    >>> from scipy import integrate
+    >>> import numpy as np
+    >>> f = lambda x: x**8
+    >>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
+    (0.1110884353741496, None)
+    >>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
+    (0.11111111111111102, None)
+    >>> print(1/9.0)  # analytical result
+    0.1111111111111111
+
+    >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
+    (0.9999999771971152, None)
+    >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
+    (1.000000000039565, None)
+    >>> np.sin(np.pi/2)-np.sin(0)  # analytical result
+    1.0
+
+    """
+    x, w = _cached_roots_legendre(n)
+    x = np.real(x)
+    if np.isinf(a) or np.isinf(b):
+        raise ValueError("Gaussian quadrature is only available for "
+                         "finite limits.")
+    y = (b-a)*(x+1)/2.0 + a
+    return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
+
+
+def vectorize1(func, args=(), vec_func=False):
+    """Vectorize the call to a function.
+
+    This is an internal utility function used by `romberg` and
+    `quadrature` to create a vectorized version of a function.
+
+    If `vec_func` is True, the function `func` is assumed to take vector
+    arguments.
+
+    Parameters
+    ----------
+    func : callable
+        User defined function.
+    args : tuple, optional
+        Extra arguments for the function.
+    vec_func : bool, optional
+        True if the function func takes vector arguments.
+
+    Returns
+    -------
+    vfunc : callable
+        A function that will take a vector argument and return the
+        result.
+
+    """
+    if vec_func:
+        def vfunc(x):
+            return func(x, *args)
+    else:
+        def vfunc(x):
+            if np.isscalar(x):
+                return func(x, *args)
+            x = np.asarray(x)
+            # call with first point to get output type
+            y0 = func(x[0], *args)
+            n = len(x)
+            dtype = getattr(y0, 'dtype', type(y0))
+            output = np.empty((n,), dtype=dtype)
+            output[0] = y0
+            for i in range(1, n):
+                output[i] = func(x[i], *args)
+            return output
+    return vfunc
+
+
+def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
+               vec_func=True, miniter=1):
+    """
+    Compute a definite integral using fixed-tolerance Gaussian quadrature.
+
+    Integrate `func` from `a` to `b` using Gaussian quadrature
+    with absolute tolerance `tol`.
+
+    Parameters
+    ----------
+    func : function
+        A Python function or method to integrate.
+    a : float
+        Lower limit of integration.
+    b : float
+        Upper limit of integration.
+    args : tuple, optional
+        Extra arguments to pass to function.
+    tol, rtol : float, optional
+        Iteration stops when error between last two iterates is less than
+        `tol` OR the relative change is less than `rtol`.
+    maxiter : int, optional
+        Maximum order of Gaussian quadrature.
+    vec_func : bool, optional
+        True or False if func handles arrays as arguments (is
+        a "vector" function). Default is True.
+    miniter : int, optional
+        Minimum order of Gaussian quadrature.
+
+    Returns
+    -------
+    val : float
+        Gaussian quadrature approximation (within tolerance) to integral.
+    err : float
+        Difference between last two estimates of the integral.
+
+    See Also
+    --------
+    romberg : adaptive Romberg quadrature
+    fixed_quad : fixed-order Gaussian quadrature
+    quad : adaptive quadrature using QUADPACK
+    dblquad : double integrals
+    tplquad : triple integrals
+    romb : integrator for sampled data
+    simpson : integrator for sampled data
+    cumulative_trapezoid : cumulative integration for sampled data
+    ode : ODE integrator
+    odeint : ODE integrator
+
+    Examples
+    --------
+    >>> from scipy import integrate
+    >>> import numpy as np
+    >>> f = lambda x: x**8
+    >>> integrate.quadrature(f, 0.0, 1.0)
+    (0.11111111111111106, 4.163336342344337e-17)
+    >>> print(1/9.0)  # analytical result
+    0.1111111111111111
+
+    >>> integrate.quadrature(np.cos, 0.0, np.pi/2)
+    (0.9999999999999536, 3.9611425250996035e-11)
+    >>> np.sin(np.pi/2)-np.sin(0)  # analytical result
+    1.0
+
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+    vfunc = vectorize1(func, args, vec_func=vec_func)
+    val = np.inf
+    err = np.inf
+    maxiter = max(miniter+1, maxiter)
+    for n in range(miniter, maxiter+1):
+        newval = fixed_quad(vfunc, a, b, (), n)[0]
+        err = abs(newval-val)
+        val = newval
+
+        if err < tol or err < rtol*abs(val):
+            break
+    else:
+        warnings.warn(
+            "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
+            AccuracyWarning)
+    return val, err
+
+
+def tupleset(t, i, value):
+    l = list(t)
+    l[i] = value
+    return tuple(l)
+
+
+# Note: alias kept for backwards compatibility. Rename was done
+# because cumtrapz is a slur in colloquial English (see gh-12924).
+def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
+    """An alias of `cumulative_trapezoid`.
+
+    `cumtrapz` is kept for backwards compatibility. For new code, prefer
+    `cumulative_trapezoid` instead.
+    """
+    return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=initial)
+
+
+def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
+    """
+    Cumulatively integrate y(x) using the composite trapezoidal rule.
+
+    Parameters
+    ----------
+    y : array_like
+        Values to integrate.
+    x : array_like, optional
+        The coordinate to integrate along. If None (default), use spacing `dx`
+        between consecutive elements in `y`.
+    dx : float, optional
+        Spacing between elements of `y`. Only used if `x` is None.
+    axis : int, optional
+        Specifies the axis to cumulate. Default is -1 (last axis).
+    initial : scalar, optional
+        If given, insert this value at the beginning of the returned result.
+        Typically this value should be 0. Default is None, which means no
+        value at ``x[0]`` is returned and `res` has one element less than `y`
+        along the axis of integration.
+
+    Returns
+    -------
+    res : ndarray
+        The result of cumulative integration of `y` along `axis`.
+        If `initial` is None, the shape is such that the axis of integration
+        has one less value than `y`. If `initial` is given, the shape is equal
+        to that of `y`.
+
+    See Also
+    --------
+    numpy.cumsum, numpy.cumprod
+    quad : adaptive quadrature using QUADPACK
+    romberg : adaptive Romberg quadrature
+    quadrature : adaptive Gaussian quadrature
+    fixed_quad : fixed-order Gaussian quadrature
+    dblquad : double integrals
+    tplquad : triple integrals
+    romb : integrators for sampled data
+    ode : ODE integrators
+    odeint : ODE integrators
+
+    Examples
+    --------
+    >>> from scipy import integrate
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+
+    >>> x = np.linspace(-2, 2, num=20)
+    >>> y = x
+    >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
+    >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
+    >>> plt.show()
+
+    """
+    y = np.asarray(y)
+    if x is None:
+        d = dx
+    else:
+        x = np.asarray(x)
+        if x.ndim == 1:
+            d = np.diff(x)
+            # reshape to correct shape
+            shape = [1] * y.ndim
+            shape[axis] = -1
+            d = d.reshape(shape)
+        elif len(x.shape) != len(y.shape):
+            raise ValueError("If given, shape of x must be 1-D or the "
+                             "same as y.")
+        else:
+            d = np.diff(x, axis=axis)
+
+        if d.shape[axis] != y.shape[axis] - 1:
+            raise ValueError("If given, length of x along axis must be the "
+                             "same as y.")
+
+    nd = len(y.shape)
+    slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
+    slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
+    res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
+
+    if initial is not None:
+        if not np.isscalar(initial):
+            raise ValueError("`initial` parameter should be a scalar.")
+
+        shape = list(res.shape)
+        shape[axis] = 1
+        res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
+                             axis=axis)
+
+    return res
+
+
+def _basic_simpson(y, start, stop, x, dx, axis):
+    nd = len(y.shape)
+    if start is None:
+        start = 0
+    step = 2
+    slice_all = (slice(None),)*nd
+    slice0 = tupleset(slice_all, axis, slice(start, stop, step))
+    slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
+    slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
+
+    if x is None:  # Even-spaced Simpson's rule.
+        result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis)
+        result *= dx / 3.0
+    else:
+        # Account for possibly different spacings.
+        #    Simpson's rule changes a bit.
+        h = np.diff(x, axis=axis)
+        sl0 = tupleset(slice_all, axis, slice(start, stop, step))
+        sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
+        h0 = np.float64(h[sl0])
+        h1 = np.float64(h[sl1])
+        hsum = h0 + h1
+        hprod = h0 * h1
+        h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0)
+        tmp = hsum/6.0 * (y[slice0] *
+                          (2.0 - np.true_divide(1.0, h0divh1,
+                                                out=np.zeros_like(h0divh1),
+                                                where=h0divh1 != 0)) +
+                          y[slice1] * (hsum *
+                                       np.true_divide(hsum, hprod,
+                                                      out=np.zeros_like(hsum),
+                                                      where=hprod != 0)) +
+                          y[slice2] * (2.0 - h0divh1))
+        result = np.sum(tmp, axis=axis)
+    return result
+
+
+# Note: alias kept for backwards compatibility. simps was renamed to simpson
+# because the former is a slur in colloquial English (see gh-12924).
+def simps(y, x=None, dx=1.0, axis=-1, even='avg'):
+    """An alias of `simpson`.
+
+    `simps` is kept for backwards compatibility. For new code, prefer
+    `simpson` instead.
+    """
+    return simpson(y, x=x, dx=dx, axis=axis, even=even)
+
+
+def simpson(y, x=None, dx=1.0, axis=-1, even='avg'):
+    """
+    Integrate y(x) using samples along the given axis and the composite
+    Simpson's rule. If x is None, spacing of dx is assumed.
+
+    If there are an even number of samples, N, then there are an odd
+    number of intervals (N-1), but Simpson's rule requires an even number
+    of intervals. The parameter 'even' controls how this is handled.
+
+    Parameters
+    ----------
+    y : array_like
+        Array to be integrated.
+    x : array_like, optional
+        If given, the points at which `y` is sampled.
+    dx : float, optional
+        Spacing of integration points along axis of `x`. Only used when
+        `x` is None. Default is 1.
+    axis : int, optional
+        Axis along which to integrate. Default is the last axis.
+    even : str {'avg', 'first', 'last'}, optional
+        'avg' : Average two results:1) use the first N-2 intervals with
+                  a trapezoidal rule on the last interval and 2) use the last
+                  N-2 intervals with a trapezoidal rule on the first interval.
+
+        'first' : Use Simpson's rule for the first N-2 intervals with
+                a trapezoidal rule on the last interval.
+
+        'last' : Use Simpson's rule for the last N-2 intervals with a
+               trapezoidal rule on the first interval.
+
+    Returns
+    -------
+    float
+        The estimated integral computed with the composite Simpson's rule.
+
+    See Also
+    --------
+    quad : adaptive quadrature using QUADPACK
+    romberg : adaptive Romberg quadrature
+    quadrature : adaptive Gaussian quadrature
+    fixed_quad : fixed-order Gaussian quadrature
+    dblquad : double integrals
+    tplquad : triple integrals
+    romb : integrators for sampled data
+    cumulative_trapezoid : cumulative integration for sampled data
+    ode : ODE integrators
+    odeint : ODE integrators
+
+    Notes
+    -----
+    For an odd number of samples that are equally spaced the result is
+    exact if the function is a polynomial of order 3 or less. If
+    the samples are not equally spaced, then the result is exact only
+    if the function is a polynomial of order 2 or less.
+
+    Examples
+    --------
+    >>> from scipy import integrate
+    >>> import numpy as np
+    >>> x = np.arange(0, 10)
+    >>> y = np.arange(0, 10)
+
+    >>> integrate.simpson(y, x)
+    40.5
+
+    >>> y = np.power(x, 3)
+    >>> integrate.simpson(y, x)
+    1642.5
+    >>> integrate.quad(lambda x: x**3, 0, 9)[0]
+    1640.25
+
+    >>> integrate.simpson(y, x, even='first')
+    1644.5
+
+    """
+    y = np.asarray(y)
+    nd = len(y.shape)
+    N = y.shape[axis]
+    last_dx = dx
+    first_dx = dx
+    returnshape = 0
+    if x is not None:
+        x = np.asarray(x)
+        if len(x.shape) == 1:
+            shapex = [1] * nd
+            shapex[axis] = x.shape[0]
+            saveshape = x.shape
+            returnshape = 1
+            x = x.reshape(tuple(shapex))
+        elif len(x.shape) != len(y.shape):
+            raise ValueError("If given, shape of x must be 1-D or the "
+                             "same as y.")
+        if x.shape[axis] != N:
+            raise ValueError("If given, length of x along axis must be the "
+                             "same as y.")
+    if N % 2 == 0:
+        val = 0.0
+        result = 0.0
+        slice1 = (slice(None),)*nd
+        slice2 = (slice(None),)*nd
+        if even not in ['avg', 'last', 'first']:
+            raise ValueError("Parameter 'even' must be "
+                             "'avg', 'last', or 'first'.")
+        # Compute using Simpson's rule on first intervals
+        if even in ['avg', 'first']:
+            slice1 = tupleset(slice1, axis, -1)
+            slice2 = tupleset(slice2, axis, -2)
+            if x is not None:
+                last_dx = x[slice1] - x[slice2]
+            val += 0.5*last_dx*(y[slice1]+y[slice2])
+            result = _basic_simpson(y, 0, N-3, x, dx, axis)
+        # Compute using Simpson's rule on last set of intervals
+        if even in ['avg', 'last']:
+            slice1 = tupleset(slice1, axis, 0)
+            slice2 = tupleset(slice2, axis, 1)
+            if x is not None:
+                first_dx = x[tuple(slice2)] - x[tuple(slice1)]
+            val += 0.5*first_dx*(y[slice2]+y[slice1])
+            result += _basic_simpson(y, 1, N-2, x, dx, axis)
+        if even == 'avg':
+            val /= 2.0
+            result /= 2.0
+        result = result + val
+    else:
+        result = _basic_simpson(y, 0, N-2, x, dx, axis)
+    if returnshape:
+        x = x.reshape(saveshape)
+    return result
+
+
+def romb(y, dx=1.0, axis=-1, show=False):
+    """
+    Romberg integration using samples of a function.
+
+    Parameters
+    ----------
+    y : array_like
+        A vector of ``2**k + 1`` equally-spaced samples of a function.
+    dx : float, optional
+        The sample spacing. Default is 1.
+    axis : int, optional
+        The axis along which to integrate. Default is -1 (last axis).
+    show : bool, optional
+        When `y` is a single 1-D array, then if this argument is True
+        print the table showing Richardson extrapolation from the
+        samples. Default is False.
+
+    Returns
+    -------
+    romb : ndarray
+        The integrated result for `axis`.
+
+    See Also
+    --------
+    quad : adaptive quadrature using QUADPACK
+    romberg : adaptive Romberg quadrature
+    quadrature : adaptive Gaussian quadrature
+    fixed_quad : fixed-order Gaussian quadrature
+    dblquad : double integrals
+    tplquad : triple integrals
+    simpson : integrators for sampled data
+    cumulative_trapezoid : cumulative integration for sampled data
+    ode : ODE integrators
+    odeint : ODE integrators
+
+    Examples
+    --------
+    >>> from scipy import integrate
+    >>> import numpy as np
+    >>> x = np.arange(10, 14.25, 0.25)
+    >>> y = np.arange(3, 12)
+
+    >>> integrate.romb(y)
+    56.0
+
+    >>> y = np.sin(np.power(x, 2.5))
+    >>> integrate.romb(y)
+    -0.742561336672229
+
+    >>> integrate.romb(y, show=True)
+    Richardson Extrapolation Table for Romberg Integration
+    ======================================================
+    -0.81576
+     4.63862  6.45674
+    -1.10581 -3.02062 -3.65245
+    -2.57379 -3.06311 -3.06595 -3.05664
+    -1.34093 -0.92997 -0.78776 -0.75160 -0.74256
+    ======================================================
+    -0.742561336672229  # may vary
+
+    """
+    y = np.asarray(y)
+    nd = len(y.shape)
+    Nsamps = y.shape[axis]
+    Ninterv = Nsamps-1
+    n = 1
+    k = 0
+    while n < Ninterv:
+        n <<= 1
+        k += 1
+    if n != Ninterv:
+        raise ValueError("Number of samples must be one plus a "
+                         "non-negative power of 2.")
+
+    R = {}
+    slice_all = (slice(None),) * nd
+    slice0 = tupleset(slice_all, axis, 0)
+    slicem1 = tupleset(slice_all, axis, -1)
+    h = Ninterv * np.asarray(dx, dtype=float)
+    R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
+    slice_R = slice_all
+    start = stop = step = Ninterv
+    for i in range(1, k+1):
+        start >>= 1
+        slice_R = tupleset(slice_R, axis, slice(start, stop, step))
+        step >>= 1
+        R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
+        for j in range(1, i+1):
+            prev = R[(i, j-1)]
+            R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
+        h /= 2.0
+
+    if show:
+        if not np.isscalar(R[(0, 0)]):
+            print("*** Printing table only supported for integrals" +
+                  " of a single data set.")
+        else:
+            try:
+                precis = show[0]
+            except (TypeError, IndexError):
+                precis = 5
+            try:
+                width = show[1]
+            except (TypeError, IndexError):
+                width = 8
+            formstr = "%%%d.%df" % (width, precis)
+
+            title = "Richardson Extrapolation Table for Romberg Integration"
+            print(title, "=" * len(title), sep="\n", end="\n")
+            for i in range(k+1):
+                for j in range(i+1):
+                    print(formstr % R[(i, j)], end=" ")
+                print()
+            print("=" * len(title))
+
+    return R[(k, k)]
+
+# Romberg quadratures for numeric integration.
+#
+# Written by Scott M. Ransom 
+# last revision: 14 Nov 98
+#
+# Cosmetic changes by Konrad Hinsen 
+# last revision: 1999-7-21
+#
+# Adapted to SciPy by Travis Oliphant 
+# last revision: Dec 2001
+
+
+def _difftrap(function, interval, numtraps):
+    """
+    Perform part of the trapezoidal rule to integrate a function.
+    Assume that we had called difftrap with all lower powers-of-2
+    starting with 1. Calling difftrap only returns the summation
+    of the new ordinates. It does _not_ multiply by the width
+    of the trapezoids. This must be performed by the caller.
+        'function' is the function to evaluate (must accept vector arguments).
+        'interval' is a sequence with lower and upper limits
+                   of integration.
+        'numtraps' is the number of trapezoids to use (must be a
+                   power-of-2).
+    """
+    if numtraps <= 0:
+        raise ValueError("numtraps must be > 0 in difftrap().")
+    elif numtraps == 1:
+        return 0.5*(function(interval[0])+function(interval[1]))
+    else:
+        numtosum = numtraps/2
+        h = float(interval[1]-interval[0])/numtosum
+        lox = interval[0] + 0.5 * h
+        points = lox + h * np.arange(numtosum)
+        s = np.sum(function(points), axis=0)
+        return s
+
+
+def _romberg_diff(b, c, k):
+    """
+    Compute the differences for the Romberg quadrature corrections.
+    See Forman Acton's "Real Computing Made Real," p 143.
+    """
+    tmp = 4.0**k
+    return (tmp * c - b)/(tmp - 1.0)
+
+
+def _printresmat(function, interval, resmat):
+    # Print the Romberg result matrix.
+    i = j = 0
+    print('Romberg integration of', repr(function), end=' ')
+    print('from', interval)
+    print('')
+    print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
+    for i in range(len(resmat)):
+        print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
+        for j in range(i+1):
+            print('%9f' % (resmat[i][j]), end=' ')
+        print('')
+    print('')
+    print('The final result is', resmat[i][j], end=' ')
+    print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
+
+
+def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
+            divmax=10, vec_func=False):
+    """
+    Romberg integration of a callable function or method.
+
+    Returns the integral of `function` (a function of one variable)
+    over the interval (`a`, `b`).
+
+    If `show` is 1, the triangular array of the intermediate results
+    will be printed. If `vec_func` is True (default is False), then
+    `function` is assumed to support vector arguments.
+
+    Parameters
+    ----------
+    function : callable
+        Function to be integrated.
+    a : float
+        Lower limit of integration.
+    b : float
+        Upper limit of integration.
+
+    Returns
+    -------
+    results : float
+        Result of the integration.
+
+    Other Parameters
+    ----------------
+    args : tuple, optional
+        Extra arguments to pass to function. Each element of `args` will
+        be passed as a single argument to `func`. Default is to pass no
+        extra arguments.
+    tol, rtol : float, optional
+        The desired absolute and relative tolerances. Defaults are 1.48e-8.
+    show : bool, optional
+        Whether to print the results. Default is False.
+    divmax : int, optional
+        Maximum order of extrapolation. Default is 10.
+    vec_func : bool, optional
+        Whether `func` handles arrays as arguments (i.e., whether it is a
+        "vector" function). Default is False.
+
+    See Also
+    --------
+    fixed_quad : Fixed-order Gaussian quadrature.
+    quad : Adaptive quadrature using QUADPACK.
+    dblquad : Double integrals.
+    tplquad : Triple integrals.
+    romb : Integrators for sampled data.
+    simpson : Integrators for sampled data.
+    cumulative_trapezoid : Cumulative integration for sampled data.
+    ode : ODE integrator.
+    odeint : ODE integrator.
+
+    References
+    ----------
+    .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
+
+    Examples
+    --------
+    Integrate a gaussian from 0 to 1 and compare to the error function.
+
+    >>> from scipy import integrate
+    >>> from scipy.special import erf
+    >>> import numpy as np
+    >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
+    >>> result = integrate.romberg(gaussian, 0, 1, show=True)
+    Romberg integration of  from [0, 1]
+
+    ::
+
+       Steps  StepSize  Results
+           1  1.000000  0.385872
+           2  0.500000  0.412631  0.421551
+           4  0.250000  0.419184  0.421368  0.421356
+           8  0.125000  0.420810  0.421352  0.421350  0.421350
+          16  0.062500  0.421215  0.421350  0.421350  0.421350  0.421350
+          32  0.031250  0.421317  0.421350  0.421350  0.421350  0.421350  0.421350
+
+    The final result is 0.421350396475 after 33 function evaluations.
+
+    >>> print("%g %g" % (2*result, erf(1)))
+    0.842701 0.842701
+
+    """
+    if np.isinf(a) or np.isinf(b):
+        raise ValueError("Romberg integration only available "
+                         "for finite limits.")
+    vfunc = vectorize1(function, args, vec_func=vec_func)
+    n = 1
+    interval = [a, b]
+    intrange = b - a
+    ordsum = _difftrap(vfunc, interval, n)
+    result = intrange * ordsum
+    resmat = [[result]]
+    err = np.inf
+    last_row = resmat[0]
+    for i in range(1, divmax+1):
+        n *= 2
+        ordsum += _difftrap(vfunc, interval, n)
+        row = [intrange * ordsum / n]
+        for k in range(i):
+            row.append(_romberg_diff(last_row[k], row[k], k+1))
+        result = row[i]
+        lastresult = last_row[i-1]
+        if show:
+            resmat.append(row)
+        err = abs(result - lastresult)
+        if err < tol or err < rtol * abs(result):
+            break
+        last_row = row
+    else:
+        warnings.warn(
+            "divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
+            AccuracyWarning)
+
+    if show:
+        _printresmat(vfunc, interval, resmat)
+    return result
+
+
+# Coefficients for Newton-Cotes quadrature
+#
+# These are the points being used
+#  to construct the local interpolating polynomial
+#  a are the weights for Newton-Cotes integration
+#  B is the error coefficient.
+#  error in these coefficients grows as N gets larger.
+#  or as samples are closer and closer together
+
+# You can use maxima to find these rational coefficients
+#  for equally spaced data using the commands
+#  a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
+#  Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
+#  Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
+#  B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
+#
+# pre-computed for equally-spaced weights
+#
+# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
+#
+#  a = num_a*array(int_a)/den_a
+#  B = num_B*1.0 / den_B
+#
+#  integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
+#    where k = N // 2
+#
+_builtincoeffs = {
+    1: (1,2,[1,1],-1,12),
+    2: (1,3,[1,4,1],-1,90),
+    3: (3,8,[1,3,3,1],-3,80),
+    4: (2,45,[7,32,12,32,7],-8,945),
+    5: (5,288,[19,75,50,50,75,19],-275,12096),
+    6: (1,140,[41,216,27,272,27,216,41],-9,1400),
+    7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
+    8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
+        -2368,467775),
+    9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
+                 15741,2857], -4671, 394240),
+    10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
+                   -260550,272400,-48525,106300,16067],
+         -673175, 163459296),
+    11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
+                      15493566,15493566,-9595542,25226685,-3237113,
+                      13486539,2171465], -2224234463, 237758976000),
+    12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
+                      87516288,-87797136,87516288,-51491295,35725120,
+                      -7587864,9903168,1364651], -3012, 875875),
+    13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
+                           156074417954,-151659573325,206683437987,
+                           -43111992612,-43111992612,206683437987,
+                           -151659573325,156074417954,-31268252574,
+                           56280729661,8181904909], -2639651053,
+         344881152000),
+    14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
+                         -6625093363,12630121616,-16802270373,19534438464,
+                         -16802270373,12630121616,-6625093363,3501442784,
+                         -770720657,710986864,90241897], -3740727473,
+         1275983280000)
+    }
+
+
+def newton_cotes(rn, equal=0):
+    r"""
+    Return weights and error coefficient for Newton-Cotes integration.
+
+    Suppose we have (N+1) samples of f at the positions
+    x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
+    integral between x_0 and x_N is:
+
+    :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+    + B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
+
+    where :math:`\xi \in [x_0,x_N]`
+    and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
+
+    If the samples are equally-spaced and N is even, then the error
+    term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
+
+    Parameters
+    ----------
+    rn : int
+        The integer order for equally-spaced data or the relative positions of
+        the samples with the first sample at 0 and the last at N, where N+1 is
+        the length of `rn`. N is the order of the Newton-Cotes integration.
+    equal : int, optional
+        Set to 1 to enforce equally spaced data.
+
+    Returns
+    -------
+    an : ndarray
+        1-D array of weights to apply to the function at the provided sample
+        positions.
+    B : float
+        Error coefficient.
+
+    Notes
+    -----
+    Normally, the Newton-Cotes rules are used on smaller integration
+    regions and a composite rule is used to return the total integral.
+
+    Examples
+    --------
+    Compute the integral of sin(x) in [0, :math:`\pi`]:
+
+    >>> from scipy.integrate import newton_cotes
+    >>> import numpy as np
+    >>> def f(x):
+    ...     return np.sin(x)
+    >>> a = 0
+    >>> b = np.pi
+    >>> exact = 2
+    >>> for N in [2, 4, 6, 8, 10]:
+    ...     x = np.linspace(a, b, N + 1)
+    ...     an, B = newton_cotes(N, 1)
+    ...     dx = (b - a) / N
+    ...     quad = dx * np.sum(an * f(x))
+    ...     error = abs(quad - exact)
+    ...     print('{:2d}  {:10.9f}  {:.5e}'.format(N, quad, error))
+    ...
+     2   2.094395102   9.43951e-02
+     4   1.998570732   1.42927e-03
+     6   2.000017814   1.78136e-05
+     8   1.999999835   1.64725e-07
+    10   2.000000001   1.14677e-09
+
+    """
+    try:
+        N = len(rn)-1
+        if equal:
+            rn = np.arange(N+1)
+        elif np.all(np.diff(rn) == 1):
+            equal = 1
+    except Exception:
+        N = rn
+        rn = np.arange(N+1)
+        equal = 1
+
+    if equal and N in _builtincoeffs:
+        na, da, vi, nb, db = _builtincoeffs[N]
+        an = na * np.array(vi, dtype=float) / da
+        return an, float(nb)/db
+
+    if (rn[0] != 0) or (rn[-1] != N):
+        raise ValueError("The sample positions must start at 0"
+                         " and end at N")
+    yi = rn / float(N)
+    ti = 2 * yi - 1
+    nvec = np.arange(N+1)
+    C = ti ** nvec[:, np.newaxis]
+    Cinv = np.linalg.inv(C)
+    # improve precision of result
+    for i in range(2):
+        Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
+    vec = 2.0 / (nvec[::2]+1)
+    ai = Cinv[:, ::2].dot(vec) * (N / 2.)
+
+    if (N % 2 == 0) and equal:
+        BN = N/(N+3.)
+        power = N+2
+    else:
+        BN = N/(N+2.)
+        power = N+1
+
+    BN = BN - np.dot(yi**power, ai)
+    p1 = power+1
+    fac = power*math.log(N) - gammaln(p1)
+    fac = math.exp(fac)
+    return ai, BN*fac
+
+
+def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log):
+
+    # lazy import to avoid issues with partially-initialized submodule
+    if not hasattr(_qmc_quad, 'qmc'):
+        from scipy import stats
+        _qmc_quad.stats = stats
+    else:
+        stats = _qmc_quad.stats
+
+    if not callable(func):
+        message = "`func` must be callable."
+        raise TypeError(message)
+
+    # a, b will be modified, so copy. Oh well if it's copied twice.
+    a = np.atleast_1d(a).copy()
+    b = np.atleast_1d(b).copy()
+    a, b = np.broadcast_arrays(a, b)
+    dim = a.shape[0]
+
+    try:
+        func((a + b) / 2)
+    except Exception as e:
+        message = ("`func` must evaluate the integrand at points within "
+                   "the integration range; e.g. `func( (a + b) / 2)` "
+                   "must return the integrand at the centroid of the "
+                   "integration volume.")
+        raise ValueError(message) from e
+
+    try:
+        func(np.array([a, b]))
+        vfunc = func
+    except Exception as e:
+        message = ("Exception encountered when attempting vectorized call to "
+                   f"`func`: {e}. `func` should accept two-dimensional array "
+                   "with shape `(n_points, len(a))` and return an array with "
+                   "the integrand value at each of the `n_points` for better "
+                   "performance.")
+        warnings.warn(message, stacklevel=3)
+
+        def vfunc(x):
+            return np.apply_along_axis(func, axis=-1, arr=x)
+
+    n_points_int = np.int64(n_points)
+    if n_points != n_points_int:
+        message = "`n_points` must be an integer."
+        raise TypeError(message)
+
+    n_estimates_int = np.int64(n_estimates)
+    if n_estimates != n_estimates_int:
+        message = "`n_estimates` must be an integer."
+        raise TypeError(message)
+
+    if qrng is None:
+        qrng = stats.qmc.Halton(dim)
+    elif not isinstance(qrng, stats.qmc.QMCEngine):
+        message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
+        raise TypeError(message)
+
+    if qrng.d != a.shape[0]:
+        message = ("`qrng` must be initialized with dimensionality equal to "
+                   "the number of variables in `a`, i.e., "
+                   "`qrng.random().shape[-1]` must equal `a.shape[0]`.")
+        raise ValueError(message)
+
+    rng_seed = getattr(qrng, 'rng_seed', None)
+    rng = stats._qmc.check_random_state(rng_seed)
+
+    if log not in {True, False}:
+        message = "`log` must be boolean (`True` or `False`)."
+        raise TypeError(message)
+
+    return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats)
+
+
+QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error'])
+
+
+def _qmc_quad(func, a, b, *, n_points=1024, n_estimates=8, qrng=None,
+              log=False, args=None):
+    """
+    Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature.
+
+    Parameters
+    ----------
+    func : callable
+        The integrand. Must accept a single arguments `x`, an array which
+        specifies the point at which to evaluate the integrand. For efficiency,
+        the function should be vectorized to compute the integrand for each
+        element an array of shape ``(n_points, n)``, where ``n`` is number of
+        variables.
+    a, b : array-like
+        One-dimensional arrays specifying the lower and upper integration
+        limits, respectively, of each of the ``n`` variables.
+    n_points, n_estimates : int, optional
+        One QMC sample of `n_points` (default: 256) points will be generated
+        by `qrng`, and `n_estimates` (default: 8) statistically independent
+        estimates of the integral will be produced. The total number of points
+        at which the integrand `func` will be evaluated is
+        ``n_points * n_estimates``. See Notes for details.
+    qrng : `~scipy.stats.qmc.QMCEngine`, optional
+        An instance of the QMCEngine from which to sample QMC points.
+        The QMCEngine must be initialized to a number of dimensions
+        corresponding with the number of variables ``x0, ..., xn`` passed to
+        `func`.
+        The provided QMCEngine is used to produce the first integral estimate.
+        If `n_estimates` is greater than one, additional QMCEngines are
+        spawned from the first (with scrambling enabled, if it is an option.)
+        If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton`
+        will be initialized with the number of dimensions determine from
+        `a`.
+    log : boolean, default: False
+        When set to True, `func` returns the log of the integrand, and
+        the result object contains the log of the integral.
+
+    Returns
+    -------
+    result : object
+        A result object with attributes:
+
+        integral : float
+            The estimate of the integral.
+        standard_error :
+            The error estimate. See Notes for interpretation.
+
+    Notes
+    -----
+    Values of the integrand at each of the `n_points` points of a QMC sample
+    are used to produce an estimate of the integral. This estimate is drawn
+    from a population of possible estimates of the integral, the value of
+    which we obtain depends on the particular points at which the integral
+    was evaluated. We perform this process `n_estimates` times, each time
+    evaluating the integrand at different scrambled QMC points, effectively
+    drawing i.i.d. random samples from the population of integral estimates.
+    The sample mean :math:`m` of these integral estimates is an
+    unbiased estimator of the true value of the integral, and the standard
+    error of the mean :math:`s` of these estimates may be used to generate
+    confidence intervals using the t distribution with ``n_estimates - 1``
+    degrees of freedom. Perhaps counter-intuitively, increasing `n_points`
+    while keeping the total number of function evaluation points
+    ``n_points * n_estimates`` fixed tends to reduce the actual error, whereas
+    increasing `n_estimates` tends to decrease the error estimate.
+
+    Examples
+    --------
+    QMC quadrature is particularly useful for computing integrals in higher
+    dimensions. An example integrand is the probability density function
+    of a multivariate normal distribution.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> dim = 8
+    >>> mean = np.zeros(dim)
+    >>> cov = np.eye(dim)
+    >>> def func(x):
+    ...     return stats.multivariate_normal.pdf(x, mean, cov)
+
+    To compute the integral over the unit hypercube:
+
+    >>> from scipy.integrate import qmc_quad
+    >>> a = np.zeros(dim)
+    >>> b = np.ones(dim)
+    >>> rng = np.random.default_rng()
+    >>> qrng = stats.qmc.Halton(d=dim, seed=rng)
+    >>> n_estimates = 8
+    >>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng)
+    >>> res.integral, res.standard_error
+    (0.00018441088533413305, 1.1255608140911588e-07)
+
+    A two-sided, 99% confidence interval for the integral may be estimated
+    as:
+
+    >>> t = stats.t(df=n_estimates-1, loc=res.integral,
+    ...             scale=res.standard_error)
+    >>> t.interval(0.99)
+    (0.00018401699720722663, 0.00018480477346103947)
+
+    Indeed, the value reported by `scipy.stats.multivariate_normal` is
+    within this range.
+
+    >>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
+    0.00018430867675187443
+
+    """
+    args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log)
+    func, a, b, n_points, n_estimates, qrng, rng, log, stats = args
+
+    # The sign of the integral depends on the order of the limits. Fix this by
+    # ensuring that lower bounds are indeed lower and setting sign of resulting
+    # integral manually
+    if np.any(a == b):
+        message = ("A lower limit was equal to an upper limit, so the value "
+                   "of the integral is zero by definition.")
+        warnings.warn(message, stacklevel=2)
+        return QMCQuadResult(-np.inf if log else 0, 0)
+
+    i_swap = b < a
+    sign = (-1)**(i_swap.sum(axis=-1))  # odd # of swaps -> negative
+    a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
+
+    A = np.prod(b - a)
+    dA = A / n_points
+
+    estimates = np.zeros(n_estimates)
+    rngs = _rng_spawn(qrng.rng, n_estimates)
+    for i in range(n_estimates):
+        # Generate integral estimate
+        sample = qrng.random(n_points)
+        x = stats.qmc.scale(sample, a, b)
+        integrands = func(x)
+        if log:
+            estimate = logsumexp(integrands) + np.log(dA)
+        else:
+            estimate = np.sum(integrands * dA)
+        estimates[i] = estimate
+
+        # Get a new, independently-scrambled QRNG for next time
+        qrng = type(qrng)(seed=rngs[i], **qrng._init_quad)
+
+    integral = np.mean(estimates)
+    integral = integral + np.pi*1j if (log and sign < 0) else integral*sign
+    standard_error = stats.sem(estimates)
+    return QMCQuadResult(integral, standard_error)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/dop.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/dop.py
new file mode 100644
index 00000000..6aa9b20b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/dop.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+
+
+import warnings
+from . import _dop  # type: ignore
+
+
+__all__ = [  # noqa: F822
+    'dopri5',
+    'dop853'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.integrate.dop is deprecated and has no attribute "
+            f"{name}")
+
+    warnings.warn("The `scipy.integrate.dop` namespace is deprecated "
+                  "and will be removed in SciPy v2.0.0.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_dop, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/lsoda.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/lsoda.py
new file mode 100644
index 00000000..2219b272
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/lsoda.py
@@ -0,0 +1,25 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+
+
+import warnings
+from . import _lsoda  # type: ignore
+
+
+__all__ = ['lsoda']  # noqa: F822
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.integrate.lsoda is deprecated and has no attribute "
+            f"{name}.")
+
+    warnings.warn("The `scipy.integrate.lsoda` namespace is deprecated "
+                  "and will be removed in SciPy v2.0.0.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_lsoda, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/odepack.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/odepack.py
new file mode 100644
index 00000000..454f0bfc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/odepack.py
@@ -0,0 +1,25 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.integrate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _odepack_py
+
+__all__ = ['odeint', 'ODEintWarning']  # noqa: F822
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.integrate.odepack is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.integrate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.integrate` namespace, "
+                  "the `scipy.integrate.odepack` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_odepack_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/quadpack.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/quadpack.py
new file mode 100644
index 00000000..ba873556
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/quadpack.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.integrate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _quadpack_py
+
+__all__ = [  # noqa: F822
+    "quad",
+    "dblquad",
+    "tplquad",
+    "nquad",
+    "IntegrationWarning",
+    "error",
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.integrate.quadpack is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.integrate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.integrate` namespace, "
+                  "the `scipy.integrate.quadpack` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_quadpack_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test__quad_vec.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test__quad_vec.py
new file mode 100644
index 00000000..01f37de9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test__quad_vec.py
@@ -0,0 +1,204 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_allclose
+
+from scipy.integrate import quad_vec
+
+from multiprocessing.dummy import Pool
+
+
+quadrature_params = pytest.mark.parametrize(
+    'quadrature', [None, "gk15", "gk21", "trapezoid"])
+
+
+@quadrature_params
+def test_quad_vec_simple(quadrature):
+    n = np.arange(10)
+    f = lambda x: x**n
+    for epsabs in [0.1, 1e-3, 1e-6]:
+        if quadrature == 'trapezoid' and epsabs < 1e-4:
+            # slow: skip
+            continue
+
+        kwargs = dict(epsabs=epsabs, quadrature=quadrature)
+
+        exact = 2**(n+1)/(n + 1)
+
+        res, err = quad_vec(f, 0, 2, norm='max', **kwargs)
+        assert_allclose(res, exact, rtol=0, atol=epsabs)
+
+        res, err = quad_vec(f, 0, 2, norm='2', **kwargs)
+        assert np.linalg.norm(res - exact) < epsabs
+
+        res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs)
+        assert_allclose(res, exact, rtol=0, atol=epsabs)
+
+        res, err, *rest = quad_vec(f, 0, 2, norm='max',
+                                   epsrel=1e-8,
+                                   full_output=True,
+                                   limit=10000,
+                                   **kwargs)
+        assert_allclose(res, exact, rtol=0, atol=epsabs)
+
+
+@quadrature_params
+def test_quad_vec_simple_inf(quadrature):
+    f = lambda x: 1 / (1 + np.float64(x)**2)
+
+    for epsabs in [0.1, 1e-3, 1e-6]:
+        if quadrature == 'trapezoid' and epsabs < 1e-4:
+            # slow: skip
+            continue
+
+        kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
+
+        res, err = quad_vec(f, 0, np.inf, **kwargs)
+        assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, 0, -np.inf, **kwargs)
+        assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, -np.inf, 0, **kwargs)
+        assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, np.inf, 0, **kwargs)
+        assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, -np.inf, np.inf, **kwargs)
+        assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, np.inf, -np.inf, **kwargs)
+        assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, np.inf, np.inf, **kwargs)
+        assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, -np.inf, -np.inf, **kwargs)
+        assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
+
+        res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
+        assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
+
+    f = lambda x: np.sin(x + 2) / (1 + x**2)
+    exact = np.pi / np.e * np.sin(2)
+    epsabs = 1e-5
+
+    res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs,
+                              quadrature=quadrature, full_output=True)
+    assert info.status == 1
+    assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err))
+
+
+def test_quad_vec_args():
+    f = lambda x, a: x * (x + a) * np.arange(3)
+    a = 2
+    exact = np.array([0, 4/3, 8/3])
+
+    res, err = quad_vec(f, 0, 1, args=(a,))
+    assert_allclose(res, exact, rtol=0, atol=1e-4)
+
+
+def _lorenzian(x):
+    return 1 / (1 + x**2)
+
+
+def test_quad_vec_pool():
+    f = _lorenzian
+    res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4)
+    assert_allclose(res, np.pi, rtol=0, atol=1e-4)
+
+    with Pool(10) as pool:
+        f = lambda x: 1 / (1 + x**2)
+        res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map)
+        assert_allclose(res, np.pi, rtol=0, atol=1e-4)
+
+
+def _func_with_args(x, a):
+    return x * (x + a) * np.arange(3)
+
+
+@pytest.mark.parametrize('extra_args', [2, (2,)])
+@pytest.mark.parametrize('workers', [1, 10])
+def test_quad_vec_pool_args(extra_args, workers):
+    f = _func_with_args
+    exact = np.array([0, 4/3, 8/3])
+
+    res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers)
+    assert_allclose(res, exact, rtol=0, atol=1e-4)
+
+    with Pool(workers) as pool:
+        res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map)
+        assert_allclose(res, exact, rtol=0, atol=1e-4)
+
+
+@quadrature_params
+def test_num_eval(quadrature):
+    def f(x):
+        count[0] += 1
+        return x**5
+
+    count = [0]
+    res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature)
+    assert res[2].neval == count[0]
+
+
+def test_info():
+    def f(x):
+        return np.ones((3, 2, 1))
+
+    res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True)
+
+    assert info.success == True
+    assert info.status == 0
+    assert info.message == 'Target precision reached.'
+    assert info.neval > 0
+    assert info.intervals.shape[1] == 2
+    assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1)
+    assert info.errors.shape == (info.intervals.shape[0],)
+
+
+def test_nan_inf():
+    def f_nan(x):
+        return np.nan
+
+    def f_inf(x):
+        return np.inf if x < 0.1 else 1/x
+
+    res, err, info = quad_vec(f_nan, 0, 1, full_output=True)
+    assert info.status == 3
+
+    res, err, info = quad_vec(f_inf, 0, 1, full_output=True)
+    assert info.status == 3
+
+
+@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0),
+                                 (-np.inf, np.inf), (np.inf, -np.inf)])
+def test_points(a, b):
+    # Check that initial interval splitting is done according to
+    # `points`, by checking that consecutive sets of 15 point (for
+    # gk15) function evaluations lie between `points`
+
+    points = (0, 0.25, 0.5, 0.75, 1.0)
+    points += tuple(-x for x in points)
+
+    quadrature_points = 15
+    interval_sets = []
+    count = 0
+
+    def f(x):
+        nonlocal count
+
+        if count % quadrature_points == 0:
+            interval_sets.append(set())
+
+        count += 1
+        interval_sets[-1].add(float(x))
+        return 0.0
+
+    quad_vec(f, a, b, points=points, quadrature='gk15', limit=0)
+
+    # Check that all point sets lie in a single `points` interval
+    for p in interval_sets:
+        j = np.searchsorted(sorted(points), tuple(p))
+        assert np.all(j == j[0])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_banded_ode_solvers.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_banded_ode_solvers.py
new file mode 100644
index 00000000..f34d45d9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_banded_ode_solvers.py
@@ -0,0 +1,218 @@
+import itertools
+import numpy as np
+from numpy.testing import assert_allclose
+from scipy.integrate import ode
+
+
+def _band_count(a):
+    """Returns ml and mu, the lower and upper band sizes of a."""
+    nrows, ncols = a.shape
+    ml = 0
+    for k in range(-nrows+1, 0):
+        if np.diag(a, k).any():
+            ml = -k
+            break
+    mu = 0
+    for k in range(nrows-1, 0, -1):
+        if np.diag(a, k).any():
+            mu = k
+            break
+    return ml, mu
+
+
+def _linear_func(t, y, a):
+    """Linear system dy/dt = a * y"""
+    return a.dot(y)
+
+
+def _linear_jac(t, y, a):
+    """Jacobian of a * y is a."""
+    return a
+
+
+def _linear_banded_jac(t, y, a):
+    """Banded Jacobian."""
+    ml, mu = _band_count(a)
+    bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)]
+    bjac.append(np.diag(a))
+    for k in range(-1, -ml-1, -1):
+        bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
+    return bjac
+
+
+def _solve_linear_sys(a, y0, tend=1, dt=0.1,
+                      solver=None, method='bdf', use_jac=True,
+                      with_jacobian=False, banded=False):
+    """Use scipy.integrate.ode to solve a linear system of ODEs.
+
+    a : square ndarray
+        Matrix of the linear system to be solved.
+    y0 : ndarray
+        Initial condition
+    tend : float
+        Stop time.
+    dt : float
+        Step size of the output.
+    solver : str
+        If not None, this must be "vode", "lsoda" or "zvode".
+    method : str
+        Either "bdf" or "adams".
+    use_jac : bool
+        Determines if the jacobian function is passed to ode().
+    with_jacobian : bool
+        Passed to ode.set_integrator().
+    banded : bool
+        Determines whether a banded or full jacobian is used.
+        If `banded` is True, `lband` and `uband` are determined by the
+        values in `a`.
+    """
+    if banded:
+        lband, uband = _band_count(a)
+    else:
+        lband = None
+        uband = None
+
+    if use_jac:
+        if banded:
+            r = ode(_linear_func, _linear_banded_jac)
+        else:
+            r = ode(_linear_func, _linear_jac)
+    else:
+        r = ode(_linear_func)
+
+    if solver is None:
+        if np.iscomplexobj(a):
+            solver = "zvode"
+        else:
+            solver = "vode"
+
+    r.set_integrator(solver,
+                     with_jacobian=with_jacobian,
+                     method=method,
+                     lband=lband, uband=uband,
+                     rtol=1e-9, atol=1e-10,
+                     )
+    t0 = 0
+    r.set_initial_value(y0, t0)
+    r.set_f_params(a)
+    r.set_jac_params(a)
+
+    t = [t0]
+    y = [y0]
+    while r.successful() and r.t < tend:
+        r.integrate(r.t + dt)
+        t.append(r.t)
+        y.append(r.y)
+
+    t = np.array(t)
+    y = np.array(y)
+    return t, y
+
+
+def _analytical_solution(a, y0, t):
+    """
+    Analytical solution to the linear differential equations dy/dt = a*y.
+
+    The solution is only valid if `a` is diagonalizable.
+
+    Returns a 2-D array with shape (len(t), len(y0)).
+    """
+    lam, v = np.linalg.eig(a)
+    c = np.linalg.solve(v, y0)
+    e = c * np.exp(lam * t.reshape(-1, 1))
+    sol = e.dot(v.T)
+    return sol
+
+
+def test_banded_ode_solvers():
+    # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
+    # with a system that has a banded Jacobian matrix.
+
+    t_exact = np.linspace(0, 1.0, 5)
+
+    # --- Real arrays for testing the "lsoda" and "vode" solvers ---
+
+    # lband = 2, uband = 1:
+    a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
+                       [0.2, -0.5, 0.9, 0.0, 0.0],
+                       [0.1, 0.1, -0.4, 0.1, 0.0],
+                       [0.0, 0.3, -0.1, -0.9, -0.3],
+                       [0.0, 0.0, 0.1, 0.1, -0.7]])
+
+    # lband = 0, uband = 1:
+    a_real_upper = np.triu(a_real)
+
+    # lband = 2, uband = 0:
+    a_real_lower = np.tril(a_real)
+
+    # lband = 0, uband = 0:
+    a_real_diag = np.triu(a_real_lower)
+
+    real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
+    real_solutions = []
+
+    for a in real_matrices:
+        y0 = np.arange(1, a.shape[0] + 1)
+        y_exact = _analytical_solution(a, y0, t_exact)
+        real_solutions.append((y0, t_exact, y_exact))
+
+    def check_real(idx, solver, meth, use_jac, with_jac, banded):
+        a = real_matrices[idx]
+        y0, t_exact, y_exact = real_solutions[idx]
+        t, y = _solve_linear_sys(a, y0,
+                                 tend=t_exact[-1],
+                                 dt=t_exact[1] - t_exact[0],
+                                 solver=solver,
+                                 method=meth,
+                                 use_jac=use_jac,
+                                 with_jacobian=with_jac,
+                                 banded=banded)
+        assert_allclose(t, t_exact)
+        assert_allclose(y, y_exact)
+
+    for idx in range(len(real_matrices)):
+        p = [['vode', 'lsoda'],  # solver
+             ['bdf', 'adams'],   # method
+             [False, True],      # use_jac
+             [False, True],      # with_jacobian
+             [False, True]]      # banded
+        for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
+            check_real(idx, solver, meth, use_jac, with_jac, banded)
+
+    # --- Complex arrays for testing the "zvode" solver ---
+
+    # complex, lband = 2, uband = 1:
+    a_complex = a_real - 0.5j * a_real
+
+    # complex, lband = 0, uband = 0:
+    a_complex_diag = np.diag(np.diag(a_complex))
+
+    complex_matrices = [a_complex, a_complex_diag]
+    complex_solutions = []
+
+    for a in complex_matrices:
+        y0 = np.arange(1, a.shape[0] + 1) + 1j
+        y_exact = _analytical_solution(a, y0, t_exact)
+        complex_solutions.append((y0, t_exact, y_exact))
+
+    def check_complex(idx, solver, meth, use_jac, with_jac, banded):
+        a = complex_matrices[idx]
+        y0, t_exact, y_exact = complex_solutions[idx]
+        t, y = _solve_linear_sys(a, y0,
+                                 tend=t_exact[-1],
+                                 dt=t_exact[1] - t_exact[0],
+                                 solver=solver,
+                                 method=meth,
+                                 use_jac=use_jac,
+                                 with_jacobian=with_jac,
+                                 banded=banded)
+        assert_allclose(t, t_exact)
+        assert_allclose(y, y_exact)
+
+    for idx in range(len(complex_matrices)):
+        p = [['bdf', 'adams'],   # method
+             [False, True],      # use_jac
+             [False, True],      # with_jacobian
+             [False, True]]      # banded
+        for meth, use_jac, with_jac, banded in itertools.product(*p):
+            check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_bvp.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_bvp.py
new file mode 100644
index 00000000..811845f5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_bvp.py
@@ -0,0 +1,709 @@
+import sys
+
+try:
+    from StringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+import numpy as np
+from numpy.testing import (assert_, assert_array_equal, assert_allclose,
+                           assert_equal)
+from pytest import raises as assert_raises
+
+from scipy.sparse import coo_matrix
+from scipy.special import erf
+from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
+                                  estimate_bc_jac, compute_jac_indices,
+                                  construct_global_jac, solve_bvp)
+
+
+def exp_fun(x, y):
+    return np.vstack((y[1], y[0]))
+
+
+def exp_fun_jac(x, y):
+    df_dy = np.empty((2, 2, x.shape[0]))
+    df_dy[0, 0] = 0
+    df_dy[0, 1] = 1
+    df_dy[1, 0] = 1
+    df_dy[1, 1] = 0
+    return df_dy
+
+
+def exp_bc(ya, yb):
+    return np.hstack((ya[0] - 1, yb[0]))
+
+
+def exp_bc_complex(ya, yb):
+    return np.hstack((ya[0] - 1 - 1j, yb[0]))
+
+
+def exp_bc_jac(ya, yb):
+    dbc_dya = np.array([
+        [1, 0],
+        [0, 0]
+    ])
+    dbc_dyb = np.array([
+        [0, 0],
+        [1, 0]
+    ])
+    return dbc_dya, dbc_dyb
+
+
+def exp_sol(x):
+    return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
+
+
+def sl_fun(x, y, p):
+    return np.vstack((y[1], -p[0]**2 * y[0]))
+
+
+def sl_fun_jac(x, y, p):
+    n, m = y.shape
+    df_dy = np.empty((n, 2, m))
+    df_dy[0, 0] = 0
+    df_dy[0, 1] = 1
+    df_dy[1, 0] = -p[0]**2
+    df_dy[1, 1] = 0
+
+    df_dp = np.empty((n, 1, m))
+    df_dp[0, 0] = 0
+    df_dp[1, 0] = -2 * p[0] * y[0]
+
+    return df_dy, df_dp
+
+
+def sl_bc(ya, yb, p):
+    return np.hstack((ya[0], yb[0], ya[1] - p[0]))
+
+
+def sl_bc_jac(ya, yb, p):
+    dbc_dya = np.zeros((3, 2))
+    dbc_dya[0, 0] = 1
+    dbc_dya[2, 1] = 1
+
+    dbc_dyb = np.zeros((3, 2))
+    dbc_dyb[1, 0] = 1
+
+    dbc_dp = np.zeros((3, 1))
+    dbc_dp[2, 0] = -1
+
+    return dbc_dya, dbc_dyb, dbc_dp
+
+
+def sl_sol(x, p):
+    return np.sin(p[0] * x)
+
+
+def emden_fun(x, y):
+    return np.vstack((y[1], -y[0]**5))
+
+
+def emden_fun_jac(x, y):
+    df_dy = np.empty((2, 2, x.shape[0]))
+    df_dy[0, 0] = 0
+    df_dy[0, 1] = 1
+    df_dy[1, 0] = -5 * y[0]**4
+    df_dy[1, 1] = 0
+    return df_dy
+
+
+def emden_bc(ya, yb):
+    return np.array([ya[1], yb[0] - (3/4)**0.5])
+
+
+def emden_bc_jac(ya, yb):
+    dbc_dya = np.array([
+        [0, 1],
+        [0, 0]
+    ])
+    dbc_dyb = np.array([
+        [0, 0],
+        [1, 0]
+    ])
+    return dbc_dya, dbc_dyb
+
+
+def emden_sol(x):
+    return (1 + x**2/3)**-0.5
+
+
+def undefined_fun(x, y):
+    return np.zeros_like(y)
+
+
+def undefined_bc(ya, yb):
+    return np.array([ya[0], yb[0] - 1])
+
+
+def big_fun(x, y):
+    f = np.zeros_like(y)
+    f[::2] = y[1::2]
+    return f
+
+
+def big_bc(ya, yb):
+    return np.hstack((ya[::2], yb[::2] - 1))
+
+
+def big_sol(x, n):
+    y = np.ones((2 * n, x.size))
+    y[::2] = x
+    return x
+
+
+def big_fun_with_parameters(x, y, p):
+    """ Big version of sl_fun, with two parameters.
+
+    The two differential equations represented by sl_fun are broadcast to the
+    number of rows of y, rotating between the parameters p[0] and p[1].
+    Here are the differential equations:
+
+        dy[0]/dt = y[1]
+        dy[1]/dt = -p[0]**2 * y[0]
+        dy[2]/dt = y[3]
+        dy[3]/dt = -p[1]**2 * y[2]
+        dy[4]/dt = y[5]
+        dy[5]/dt = -p[0]**2 * y[4]
+        dy[6]/dt = y[7]
+        dy[7]/dt = -p[1]**2 * y[6]
+        .
+        .
+        .
+
+    """
+    f = np.zeros_like(y)
+    f[::2] = y[1::2]
+    f[1::4] = -p[0]**2 * y[::4]
+    f[3::4] = -p[1]**2 * y[2::4]
+    return f
+
+
+def big_fun_with_parameters_jac(x, y, p):
+    # big version of sl_fun_jac, with two parameters
+    n, m = y.shape
+    df_dy = np.zeros((n, n, m))
+    df_dy[range(0, n, 2), range(1, n, 2)] = 1
+    df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2
+    df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2
+
+    df_dp = np.zeros((n, 2, m))
+    df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)]
+    df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)]
+
+    return df_dy, df_dp
+
+
+def big_bc_with_parameters(ya, yb, p):
+    # big version of sl_bc, with two parameters
+    return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1]))
+
+
+def big_bc_with_parameters_jac(ya, yb, p):
+    # big version of sl_bc_jac, with two parameters
+    n = ya.shape[0]
+    dbc_dya = np.zeros((n + 2, n))
+    dbc_dyb = np.zeros((n + 2, n))
+
+    dbc_dya[range(n // 2), range(0, n, 2)] = 1
+    dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1
+
+    dbc_dp = np.zeros((n + 2, 2))
+    dbc_dp[n, 0] = -1
+    dbc_dya[n, 1] = 1
+    dbc_dp[n + 1, 1] = -1
+    dbc_dya[n + 1, 3] = 1
+
+    return dbc_dya, dbc_dyb, dbc_dp
+
+
+def big_sol_with_parameters(x, p):
+    # big version of sl_sol, with two parameters
+    return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x)))
+
+
+def shock_fun(x, y):
+    eps = 1e-3
+    return np.vstack((
+        y[1],
+        -(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
+          np.pi * x * np.sin(np.pi * x)) / eps
+    ))
+
+
+def shock_bc(ya, yb):
+    return np.array([ya[0] + 2, yb[0]])
+
+
+def shock_sol(x):
+    eps = 1e-3
+    k = np.sqrt(2 * eps)
+    return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
+
+
+def nonlin_bc_fun(x, y):
+    # laplace eq.
+    return np.stack([y[1], np.zeros_like(x)])
+
+
+def nonlin_bc_bc(ya, yb):
+    phiA, phipA = ya
+    phiC, phipC = yb
+
+    kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9
+
+    # Butler-Volmer Kinetics at Anode
+    hA = 0.0-phiA-0.0
+    iA = ioA * (np.exp(f*hA) - np.exp(-f*hA))
+    res0 = iA + kappa * phipA
+
+    # Butler-Volmer Kinetics at Cathode
+    hC = V - phiC - 1.0
+    iC = ioC * (np.exp(f*hC) - np.exp(-f*hC))
+    res1 = iC - kappa*phipC
+
+    return np.array([res0, res1])
+
+
+def nonlin_bc_sol(x):
+    return -0.13426436116763119 - 1.1308709 * x
+
+
+def test_modify_mesh():
+    x = np.array([0, 1, 3, 9], dtype=float)
+    x_new = modify_mesh(x, np.array([0]), np.array([2]))
+    assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
+
+    x = np.array([-6, -3, 0, 3, 6], dtype=float)
+    x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
+    assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
+
+
+def test_compute_fun_jac():
+    x = np.linspace(0, 1, 5)
+    y = np.empty((2, x.shape[0]))
+    y[0] = 0.01
+    y[1] = 0.02
+    p = np.array([])
+    df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
+    df_dy_an = exp_fun_jac(x, y)
+    assert_allclose(df_dy, df_dy_an)
+    assert_(df_dp is None)
+
+    x = np.linspace(0, np.pi, 5)
+    y = np.empty((2, x.shape[0]))
+    y[0] = np.sin(x)
+    y[1] = np.cos(x)
+    p = np.array([1.0])
+    df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
+    df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
+    assert_allclose(df_dy, df_dy_an)
+    assert_allclose(df_dp, df_dp_an)
+
+    x = np.linspace(0, 1, 10)
+    y = np.empty((2, x.shape[0]))
+    y[0] = (3/4)**0.5
+    y[1] = 1e-4
+    p = np.array([])
+    df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
+    df_dy_an = emden_fun_jac(x, y)
+    assert_allclose(df_dy, df_dy_an)
+    assert_(df_dp is None)
+
+
+def test_compute_bc_jac():
+    ya = np.array([-1.0, 2])
+    yb = np.array([0.5, 3])
+    p = np.array([])
+    dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
+        lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
+    dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
+    assert_allclose(dbc_dya, dbc_dya_an)
+    assert_allclose(dbc_dyb, dbc_dyb_an)
+    assert_(dbc_dp is None)
+
+    ya = np.array([0.0, 1])
+    yb = np.array([0.0, -1])
+    p = np.array([0.5])
+    dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
+    dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
+    assert_allclose(dbc_dya, dbc_dya_an)
+    assert_allclose(dbc_dyb, dbc_dyb_an)
+    assert_allclose(dbc_dp, dbc_dp_an)
+
+    ya = np.array([0.5, 100])
+    yb = np.array([-1000, 10.5])
+    p = np.array([])
+    dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
+        lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
+    dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
+    assert_allclose(dbc_dya, dbc_dya_an)
+    assert_allclose(dbc_dyb, dbc_dyb_an)
+    assert_(dbc_dp is None)
+
+
+def test_compute_jac_indices():
+    n = 2
+    m = 4
+    k = 2
+    i, j = compute_jac_indices(n, m, k)
+    s = coo_matrix((np.ones_like(i), (i, j))).toarray()
+    s_true = np.array([
+        [1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
+        [1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
+        [0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
+        [0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
+        [0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
+        [0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
+        [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+        [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+        [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+        [1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
+    ])
+    assert_array_equal(s, s_true)
+
+
+def test_compute_global_jac():
+    n = 2
+    m = 5
+    k = 1
+    i_jac, j_jac = compute_jac_indices(2, 5, 1)
+    x = np.linspace(0, 1, 5)
+    h = np.diff(x)
+    y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
+    p = np.array([3.0])
+
+    f = sl_fun(x, y, p)
+
+    x_middle = x[:-1] + 0.5 * h
+    y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
+
+    df_dy, df_dp = sl_fun_jac(x, y, p)
+    df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
+    dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
+
+    J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
+                             df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
+    J = J.toarray()
+
+    def J_block(h, p):
+        return np.array([
+            [h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
+            [0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
+        ])
+
+    J_true = np.zeros((m * n + k, m * n + k))
+    for i in range(m - 1):
+        J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0])
+
+    J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
+    J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
+                                       h**2/6 * (y[1, :-1] - y[1, 1:]))
+
+    J_true[8, 0] = 1
+    J_true[9, 8] = 1
+    J_true[10, 1] = 1
+    J_true[10, 10] = -1
+
+    assert_allclose(J, J_true, rtol=1e-10)
+
+    df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
+    df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
+    dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
+    J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
+                             df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
+    J = J.toarray()
+    assert_allclose(J, J_true, rtol=2e-8, atol=2e-8)
+
+
+def test_parameter_validation():
+    x = [0, 1, 0.5]
+    y = np.zeros((2, 3))
+    assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
+
+    x = np.linspace(0, 1, 5)
+    y = np.zeros((2, 4))
+    assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
+
+    fun = lambda x, y, p: exp_fun(x, y)
+    bc = lambda ya, yb, p: exp_bc(ya, yb)
+
+    y = np.zeros((2, x.shape[0]))
+    assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
+
+    def wrong_shape_fun(x, y):
+        return np.zeros(3)
+
+    assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
+
+    S = np.array([[0, 0]])
+    assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
+
+
+def test_no_params():
+    x = np.linspace(0, 1, 5)
+    x_test = np.linspace(0, 1, 100)
+    y = np.zeros((2, x.shape[0]))
+    for fun_jac in [None, exp_fun_jac]:
+        for bc_jac in [None, exp_bc_jac]:
+            sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
+                            bc_jac=bc_jac)
+
+            assert_equal(sol.status, 0)
+            assert_(sol.success)
+
+            assert_equal(sol.x.size, 5)
+
+            sol_test = sol.sol(x_test)
+
+            assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
+
+            f_test = exp_fun(x_test, sol_test)
+            r = sol.sol(x_test, 1) - f_test
+            rel_res = r / (1 + np.abs(f_test))
+            norm_res = np.sum(rel_res**2, axis=0)**0.5
+            assert_(np.all(norm_res < 1e-3))
+
+            assert_(np.all(sol.rms_residuals < 1e-3))
+            assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+            assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_with_params():
+    x = np.linspace(0, np.pi, 5)
+    x_test = np.linspace(0, np.pi, 100)
+    y = np.ones((2, x.shape[0]))
+
+    for fun_jac in [None, sl_fun_jac]:
+        for bc_jac in [None, sl_bc_jac]:
+            sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
+                            bc_jac=bc_jac)
+
+            assert_equal(sol.status, 0)
+            assert_(sol.success)
+
+            assert_(sol.x.size < 10)
+
+            assert_allclose(sol.p, [1], rtol=1e-4)
+
+            sol_test = sol.sol(x_test)
+
+            assert_allclose(sol_test[0], sl_sol(x_test, [1]),
+                            rtol=1e-4, atol=1e-4)
+
+            f_test = sl_fun(x_test, sol_test, [1])
+            r = sol.sol(x_test, 1) - f_test
+            rel_res = r / (1 + np.abs(f_test))
+            norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+            assert_(np.all(norm_res < 1e-3))
+
+            assert_(np.all(sol.rms_residuals < 1e-3))
+            assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+            assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_singular_term():
+    x = np.linspace(0, 1, 10)
+    x_test = np.linspace(0.05, 1, 100)
+    y = np.empty((2, 10))
+    y[0] = (3/4)**0.5
+    y[1] = 1e-4
+    S = np.array([[0, 0], [0, -2]])
+
+    for fun_jac in [None, emden_fun_jac]:
+        for bc_jac in [None, emden_bc_jac]:
+            sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
+                            bc_jac=bc_jac)
+
+            assert_equal(sol.status, 0)
+            assert_(sol.success)
+
+            assert_equal(sol.x.size, 10)
+
+            sol_test = sol.sol(x_test)
+            assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5)
+
+            f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test
+            r = sol.sol(x_test, 1) - f_test
+            rel_res = r / (1 + np.abs(f_test))
+            norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+
+            assert_(np.all(norm_res < 1e-3))
+            assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+            assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_complex():
+    # The test is essentially the same as test_no_params, but boundary
+    # conditions are turned into complex.
+    x = np.linspace(0, 1, 5)
+    x_test = np.linspace(0, 1, 100)
+    y = np.zeros((2, x.shape[0]), dtype=complex)
+    for fun_jac in [None, exp_fun_jac]:
+        for bc_jac in [None, exp_bc_jac]:
+            sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac,
+                            bc_jac=bc_jac)
+
+            assert_equal(sol.status, 0)
+            assert_(sol.success)
+
+            sol_test = sol.sol(x_test)
+
+            assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5)
+            assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5)
+
+            f_test = exp_fun(x_test, sol_test)
+            r = sol.sol(x_test, 1) - f_test
+            rel_res = r / (1 + np.abs(f_test))
+            norm_res = np.sum(np.real(rel_res * np.conj(rel_res)),
+                              axis=0) ** 0.5
+            assert_(np.all(norm_res < 1e-3))
+
+            assert_(np.all(sol.rms_residuals < 1e-3))
+            assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+            assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_failures():
+    x = np.linspace(0, 1, 2)
+    y = np.zeros((2, x.size))
+    res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5)
+    assert_equal(res.status, 1)
+    assert_(not res.success)
+
+    x = np.linspace(0, 1, 5)
+    y = np.zeros((2, x.size))
+    res = solve_bvp(undefined_fun, undefined_bc, x, y)
+    assert_equal(res.status, 2)
+    assert_(not res.success)
+
+
+def test_big_problem():
+    n = 30
+    x = np.linspace(0, 1, 5)
+    y = np.zeros((2 * n, x.size))
+    sol = solve_bvp(big_fun, big_bc, x, y)
+
+    assert_equal(sol.status, 0)
+    assert_(sol.success)
+
+    sol_test = sol.sol(x)
+
+    assert_allclose(sol_test[0], big_sol(x, n))
+
+    f_test = big_fun(x, sol_test)
+    r = sol.sol(x, 1) - f_test
+    rel_res = r / (1 + np.abs(f_test))
+    norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5
+    assert_(np.all(norm_res < 1e-3))
+
+    assert_(np.all(sol.rms_residuals < 1e-3))
+    assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+    assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_big_problem_with_parameters():
+    n = 30
+    x = np.linspace(0, np.pi, 5)
+    x_test = np.linspace(0, np.pi, 100)
+    y = np.ones((2 * n, x.size))
+
+    for fun_jac in [None, big_fun_with_parameters_jac]:
+        for bc_jac in [None, big_bc_with_parameters_jac]:
+            sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x,
+                            y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac)
+
+            assert_equal(sol.status, 0)
+            assert_(sol.success)
+
+            assert_allclose(sol.p, [1, 1], rtol=1e-4)
+
+            sol_test = sol.sol(x_test)
+
+            for isol in range(0, n, 4):
+                assert_allclose(sol_test[isol],
+                                big_sol_with_parameters(x_test, [1, 1])[0],
+                                rtol=1e-4, atol=1e-4)
+                assert_allclose(sol_test[isol + 2],
+                                big_sol_with_parameters(x_test, [1, 1])[1],
+                                rtol=1e-4, atol=1e-4)
+
+            f_test = big_fun_with_parameters(x_test, sol_test, [1, 1])
+            r = sol.sol(x_test, 1) - f_test
+            rel_res = r / (1 + np.abs(f_test))
+            norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+            assert_(np.all(norm_res < 1e-3))
+
+            assert_(np.all(sol.rms_residuals < 1e-3))
+            assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+            assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_shock_layer():
+    x = np.linspace(-1, 1, 5)
+    x_test = np.linspace(-1, 1, 100)
+    y = np.zeros((2, x.size))
+    sol = solve_bvp(shock_fun, shock_bc, x, y)
+
+    assert_equal(sol.status, 0)
+    assert_(sol.success)
+
+    assert_(sol.x.size < 110)
+
+    sol_test = sol.sol(x_test)
+    assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5)
+
+    f_test = shock_fun(x_test, sol_test)
+    r = sol.sol(x_test, 1) - f_test
+    rel_res = r / (1 + np.abs(f_test))
+    norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+
+    assert_(np.all(norm_res < 1e-3))
+    assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+    assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_nonlin_bc():
+    x = np.linspace(0, 0.1, 5)
+    x_test = x
+    y = np.zeros([2, x.size])
+    sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y)
+
+    assert_equal(sol.status, 0)
+    assert_(sol.success)
+
+    assert_(sol.x.size < 8)
+
+    sol_test = sol.sol(x_test)
+    assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5)
+
+    f_test = nonlin_bc_fun(x_test, sol_test)
+    r = sol.sol(x_test, 1) - f_test
+    rel_res = r / (1 + np.abs(f_test))
+    norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
+
+    assert_(np.all(norm_res < 1e-3))
+    assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
+    assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
+
+
+def test_verbose():
+    # Smoke test that checks the printing does something and does not crash
+    x = np.linspace(0, 1, 5)
+    y = np.zeros((2, x.shape[0]))
+    for verbose in [0, 1, 2]:
+        old_stdout = sys.stdout
+        sys.stdout = StringIO()
+        try:
+            sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose)
+            text = sys.stdout.getvalue()
+        finally:
+            sys.stdout = old_stdout
+
+        assert_(sol.success)
+        if verbose == 0:
+            assert_(not text, text)
+        if verbose >= 1:
+            assert_("Solved in" in text, text)
+        if verbose >= 2:
+            assert_("Max residual" in text, text)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_integrate.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_integrate.py
new file mode 100644
index 00000000..dabe0a58
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_integrate.py
@@ -0,0 +1,830 @@
+# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
+"""
+Tests for numerical integration.
+"""
+import numpy as np
+from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
+                   allclose)
+
+from numpy.testing import (
+    assert_, assert_array_almost_equal,
+    assert_allclose, assert_array_equal, assert_equal, assert_warns)
+from pytest import raises as assert_raises
+from scipy.integrate import odeint, ode, complex_ode
+
+#------------------------------------------------------------------------------
+# Test ODE integrators
+#------------------------------------------------------------------------------
+
+
+class TestOdeint:
+    # Check integrate.odeint
+
+    def _do_problem(self, problem):
+        t = arange(0.0, problem.stop_t, 0.05)
+
+        # Basic case
+        z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
+        assert_(problem.verify(z, t))
+
+        # Use tfirst=True
+        z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
+                             full_output=True, tfirst=True)
+        assert_(problem.verify(z, t))
+
+        if hasattr(problem, 'jac'):
+            # Use Dfun
+            z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
+                                 full_output=True)
+            assert_(problem.verify(z, t))
+
+            # Use Dfun and tfirst=True
+            z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
+                                 Dfun=lambda t, y: problem.jac(y, t),
+                                 full_output=True, tfirst=True)
+            assert_(problem.verify(z, t))
+
+    def test_odeint(self):
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if problem.cmplx:
+                continue
+            self._do_problem(problem)
+
+
+class TestODEClass:
+
+    ode_class = None   # Set in subclass.
+
+    def _do_problem(self, problem, integrator, method='adams'):
+
+        # ode has callback arguments in different order than odeint
+        f = lambda t, z: problem.f(z, t)
+        jac = None
+        if hasattr(problem, 'jac'):
+            jac = lambda t, z: problem.jac(z, t)
+
+        integrator_params = {}
+        if problem.lband is not None or problem.uband is not None:
+            integrator_params['uband'] = problem.uband
+            integrator_params['lband'] = problem.lband
+
+        ig = self.ode_class(f, jac)
+        ig.set_integrator(integrator,
+                          atol=problem.atol/10,
+                          rtol=problem.rtol/10,
+                          method=method,
+                          **integrator_params)
+
+        ig.set_initial_value(problem.z0, t=0.0)
+        z = ig.integrate(problem.stop_t)
+
+        assert_array_equal(z, ig.y)
+        assert_(ig.successful(), (problem, method))
+        assert_(ig.get_return_code() > 0, (problem, method))
+        assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
+
+
+class TestOde(TestODEClass):
+
+    ode_class = ode
+
+    def test_vode(self):
+        # Check the vode solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if problem.cmplx:
+                continue
+            if not problem.stiff:
+                self._do_problem(problem, 'vode', 'adams')
+            self._do_problem(problem, 'vode', 'bdf')
+
+    def test_zvode(self):
+        # Check the zvode solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if not problem.stiff:
+                self._do_problem(problem, 'zvode', 'adams')
+            self._do_problem(problem, 'zvode', 'bdf')
+
+    def test_lsoda(self):
+        # Check the lsoda solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if problem.cmplx:
+                continue
+            self._do_problem(problem, 'lsoda')
+
+    def test_dopri5(self):
+        # Check the dopri5 solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if problem.cmplx:
+                continue
+            if problem.stiff:
+                continue
+            if hasattr(problem, 'jac'):
+                continue
+            self._do_problem(problem, 'dopri5')
+
+    def test_dop853(self):
+        # Check the dop853 solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if problem.cmplx:
+                continue
+            if problem.stiff:
+                continue
+            if hasattr(problem, 'jac'):
+                continue
+            self._do_problem(problem, 'dop853')
+
+    def test_concurrent_fail(self):
+        for sol in ('vode', 'zvode', 'lsoda'):
+            f = lambda t, y: 1.0
+
+            r = ode(f).set_integrator(sol)
+            r.set_initial_value(0, 0)
+
+            r2 = ode(f).set_integrator(sol)
+            r2.set_initial_value(0, 0)
+
+            r.integrate(r.t + 0.1)
+            r2.integrate(r2.t + 0.1)
+
+            assert_raises(RuntimeError, r.integrate, r.t + 0.1)
+
+    def test_concurrent_ok(self):
+        f = lambda t, y: 1.0
+
+        for k in range(3):
+            for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
+                r = ode(f).set_integrator(sol)
+                r.set_initial_value(0, 0)
+
+                r2 = ode(f).set_integrator(sol)
+                r2.set_initial_value(0, 0)
+
+                r.integrate(r.t + 0.1)
+                r2.integrate(r2.t + 0.1)
+                r2.integrate(r2.t + 0.1)
+
+                assert_allclose(r.y, 0.1)
+                assert_allclose(r2.y, 0.2)
+
+            for sol in ('dopri5', 'dop853'):
+                r = ode(f).set_integrator(sol)
+                r.set_initial_value(0, 0)
+
+                r2 = ode(f).set_integrator(sol)
+                r2.set_initial_value(0, 0)
+
+                r.integrate(r.t + 0.1)
+                r.integrate(r.t + 0.1)
+                r2.integrate(r2.t + 0.1)
+                r.integrate(r.t + 0.1)
+                r2.integrate(r2.t + 0.1)
+
+                assert_allclose(r.y, 0.3)
+                assert_allclose(r2.y, 0.2)
+
+
+class TestComplexOde(TestODEClass):
+
+    ode_class = complex_ode
+
+    def test_vode(self):
+        # Check the vode solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if not problem.stiff:
+                self._do_problem(problem, 'vode', 'adams')
+            else:
+                self._do_problem(problem, 'vode', 'bdf')
+
+    def test_lsoda(self):
+        # Check the lsoda solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            self._do_problem(problem, 'lsoda')
+
+    def test_dopri5(self):
+        # Check the dopri5 solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if problem.stiff:
+                continue
+            if hasattr(problem, 'jac'):
+                continue
+            self._do_problem(problem, 'dopri5')
+
+    def test_dop853(self):
+        # Check the dop853 solver
+        for problem_cls in PROBLEMS:
+            problem = problem_cls()
+            if problem.stiff:
+                continue
+            if hasattr(problem, 'jac'):
+                continue
+            self._do_problem(problem, 'dop853')
+
+
+class TestSolout:
+    # Check integrate.ode correctly handles solout for dopri5 and dop853
+    def _run_solout_test(self, integrator):
+        # Check correct usage of solout
+        ts = []
+        ys = []
+        t0 = 0.0
+        tend = 10.0
+        y0 = [1.0, 2.0]
+
+        def solout(t, y):
+            ts.append(t)
+            ys.append(y.copy())
+
+        def rhs(t, y):
+            return [y[0] + y[1], -y[1]**2]
+
+        ig = ode(rhs).set_integrator(integrator)
+        ig.set_solout(solout)
+        ig.set_initial_value(y0, t0)
+        ret = ig.integrate(tend)
+        assert_array_equal(ys[0], y0)
+        assert_array_equal(ys[-1], ret)
+        assert_equal(ts[0], t0)
+        assert_equal(ts[-1], tend)
+
+    def test_solout(self):
+        for integrator in ('dopri5', 'dop853'):
+            self._run_solout_test(integrator)
+
+    def _run_solout_after_initial_test(self, integrator):
+        # Check if solout works even if it is set after the initial value.
+        ts = []
+        ys = []
+        t0 = 0.0
+        tend = 10.0
+        y0 = [1.0, 2.0]
+
+        def solout(t, y):
+            ts.append(t)
+            ys.append(y.copy())
+
+        def rhs(t, y):
+            return [y[0] + y[1], -y[1]**2]
+
+        ig = ode(rhs).set_integrator(integrator)
+        ig.set_initial_value(y0, t0)
+        ig.set_solout(solout)
+        ret = ig.integrate(tend)
+        assert_array_equal(ys[0], y0)
+        assert_array_equal(ys[-1], ret)
+        assert_equal(ts[0], t0)
+        assert_equal(ts[-1], tend)
+
+    def test_solout_after_initial(self):
+        for integrator in ('dopri5', 'dop853'):
+            self._run_solout_after_initial_test(integrator)
+
+    def _run_solout_break_test(self, integrator):
+        # Check correct usage of stopping via solout
+        ts = []
+        ys = []
+        t0 = 0.0
+        tend = 10.0
+        y0 = [1.0, 2.0]
+
+        def solout(t, y):
+            ts.append(t)
+            ys.append(y.copy())
+            if t > tend/2.0:
+                return -1
+
+        def rhs(t, y):
+            return [y[0] + y[1], -y[1]**2]
+
+        ig = ode(rhs).set_integrator(integrator)
+        ig.set_solout(solout)
+        ig.set_initial_value(y0, t0)
+        ret = ig.integrate(tend)
+        assert_array_equal(ys[0], y0)
+        assert_array_equal(ys[-1], ret)
+        assert_equal(ts[0], t0)
+        assert_(ts[-1] > tend/2.0)
+        assert_(ts[-1] < tend)
+
+    def test_solout_break(self):
+        for integrator in ('dopri5', 'dop853'):
+            self._run_solout_break_test(integrator)
+
+
+class TestComplexSolout:
+    # Check integrate.ode correctly handles solout for dopri5 and dop853
+    def _run_solout_test(self, integrator):
+        # Check correct usage of solout
+        ts = []
+        ys = []
+        t0 = 0.0
+        tend = 20.0
+        y0 = [0.0]
+
+        def solout(t, y):
+            ts.append(t)
+            ys.append(y.copy())
+
+        def rhs(t, y):
+            return [1.0/(t - 10.0 - 1j)]
+
+        ig = complex_ode(rhs).set_integrator(integrator)
+        ig.set_solout(solout)
+        ig.set_initial_value(y0, t0)
+        ret = ig.integrate(tend)
+        assert_array_equal(ys[0], y0)
+        assert_array_equal(ys[-1], ret)
+        assert_equal(ts[0], t0)
+        assert_equal(ts[-1], tend)
+
+    def test_solout(self):
+        for integrator in ('dopri5', 'dop853'):
+            self._run_solout_test(integrator)
+
+    def _run_solout_break_test(self, integrator):
+        # Check correct usage of stopping via solout
+        ts = []
+        ys = []
+        t0 = 0.0
+        tend = 20.0
+        y0 = [0.0]
+
+        def solout(t, y):
+            ts.append(t)
+            ys.append(y.copy())
+            if t > tend/2.0:
+                return -1
+
+        def rhs(t, y):
+            return [1.0/(t - 10.0 - 1j)]
+
+        ig = complex_ode(rhs).set_integrator(integrator)
+        ig.set_solout(solout)
+        ig.set_initial_value(y0, t0)
+        ret = ig.integrate(tend)
+        assert_array_equal(ys[0], y0)
+        assert_array_equal(ys[-1], ret)
+        assert_equal(ts[0], t0)
+        assert_(ts[-1] > tend/2.0)
+        assert_(ts[-1] < tend)
+
+    def test_solout_break(self):
+        for integrator in ('dopri5', 'dop853'):
+            self._run_solout_break_test(integrator)
+
+
+#------------------------------------------------------------------------------
+# Test problems
+#------------------------------------------------------------------------------
+
+
+class ODE:
+    """
+    ODE problem
+    """
+    stiff = False
+    cmplx = False
+    stop_t = 1
+    z0 = []
+
+    lband = None
+    uband = None
+
+    atol = 1e-6
+    rtol = 1e-5
+
+
+class SimpleOscillator(ODE):
+    r"""
+    Free vibration of a simple oscillator::
+        m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
+    Solution::
+        u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
+    """
+    stop_t = 1 + 0.09
+    z0 = array([1.0, 0.1], float)
+
+    k = 4.0
+    m = 1.0
+
+    def f(self, z, t):
+        tmp = zeros((2, 2), float)
+        tmp[0, 1] = 1.0
+        tmp[1, 0] = -self.k / self.m
+        return dot(tmp, z)
+
+    def verify(self, zs, t):
+        omega = sqrt(self.k / self.m)
+        u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
+        return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
+
+
+class ComplexExp(ODE):
+    r"""The equation :lm:`\dot u = i u`"""
+    stop_t = 1.23*pi
+    z0 = exp([1j, 2j, 3j, 4j, 5j])
+    cmplx = True
+
+    def f(self, z, t):
+        return 1j*z
+
+    def jac(self, z, t):
+        return 1j*eye(5)
+
+    def verify(self, zs, t):
+        u = self.z0 * exp(1j*t)
+        return allclose(u, zs, atol=self.atol, rtol=self.rtol)
+
+
+class Pi(ODE):
+    r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
+    stop_t = 20
+    z0 = [0]
+    cmplx = True
+
+    def f(self, z, t):
+        return array([1./(t - 10 + 1j)])
+
+    def verify(self, zs, t):
+        u = -2j * np.arctan(10)
+        return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
+
+
+class CoupledDecay(ODE):
+    r"""
+    3 coupled decays suited for banded treatment
+    (banded mode makes it necessary when N>>3)
+    """
+
+    stiff = True
+    stop_t = 0.5
+    z0 = [5.0, 7.0, 13.0]
+    lband = 1
+    uband = 0
+
+    lmbd = [0.17, 0.23, 0.29]  # fictitious decay constants
+
+    def f(self, z, t):
+        lmbd = self.lmbd
+        return np.array([-lmbd[0]*z[0],
+                         -lmbd[1]*z[1] + lmbd[0]*z[0],
+                         -lmbd[2]*z[2] + lmbd[1]*z[1]])
+
+    def jac(self, z, t):
+        # The full Jacobian is
+        #
+        #    [-lmbd[0]      0         0   ]
+        #    [ lmbd[0]  -lmbd[1]      0   ]
+        #    [    0      lmbd[1]  -lmbd[2]]
+        #
+        # The lower and upper bandwidths are lband=1 and uband=0, resp.
+        # The representation of this array in packed format is
+        #
+        #    [-lmbd[0]  -lmbd[1]  -lmbd[2]]
+        #    [ lmbd[0]   lmbd[1]      0   ]
+
+        lmbd = self.lmbd
+        j = np.zeros((self.lband + self.uband + 1, 3), order='F')
+
+        def set_j(ri, ci, val):
+            j[self.uband + ri - ci, ci] = val
+        set_j(0, 0, -lmbd[0])
+        set_j(1, 0, lmbd[0])
+        set_j(1, 1, -lmbd[1])
+        set_j(2, 1, lmbd[1])
+        set_j(2, 2, -lmbd[2])
+        return j
+
+    def verify(self, zs, t):
+        # Formulae derived by hand
+        lmbd = np.array(self.lmbd)
+        d10 = lmbd[1] - lmbd[0]
+        d21 = lmbd[2] - lmbd[1]
+        d20 = lmbd[2] - lmbd[0]
+        e0 = np.exp(-lmbd[0] * t)
+        e1 = np.exp(-lmbd[1] * t)
+        e2 = np.exp(-lmbd[2] * t)
+        u = np.vstack((
+            self.z0[0] * e0,
+            self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
+            self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
+            lmbd[1] * lmbd[0] * self.z0[0] / d10 *
+            (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
+        return allclose(u, zs, atol=self.atol, rtol=self.rtol)
+
+
+PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
+
+#------------------------------------------------------------------------------
+
+
+def f(t, x):
+    dxdt = [x[1], -x[0]]
+    return dxdt
+
+
+def jac(t, x):
+    j = array([[0.0, 1.0],
+               [-1.0, 0.0]])
+    return j
+
+
+def f1(t, x, omega):
+    dxdt = [omega*x[1], -omega*x[0]]
+    return dxdt
+
+
+def jac1(t, x, omega):
+    j = array([[0.0, omega],
+               [-omega, 0.0]])
+    return j
+
+
+def f2(t, x, omega1, omega2):
+    dxdt = [omega1*x[1], -omega2*x[0]]
+    return dxdt
+
+
+def jac2(t, x, omega1, omega2):
+    j = array([[0.0, omega1],
+               [-omega2, 0.0]])
+    return j
+
+
+def fv(t, x, omega):
+    dxdt = [omega[0]*x[1], -omega[1]*x[0]]
+    return dxdt
+
+
+def jacv(t, x, omega):
+    j = array([[0.0, omega[0]],
+               [-omega[1], 0.0]])
+    return j
+
+
+class ODECheckParameterUse:
+    """Call an ode-class solver with several cases of parameter use."""
+
+    # solver_name must be set before tests can be run with this class.
+
+    # Set these in subclasses.
+    solver_name = ''
+    solver_uses_jac = False
+
+    def _get_solver(self, f, jac):
+        solver = ode(f, jac)
+        if self.solver_uses_jac:
+            solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
+                                  with_jacobian=self.solver_uses_jac)
+        else:
+            # XXX Shouldn't set_integrator *always* accept the keyword arg
+            # 'with_jacobian', and perhaps raise an exception if it is set
+            # to True if the solver can't actually use it?
+            solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
+        return solver
+
+    def _check_solver(self, solver):
+        ic = [1.0, 0.0]
+        solver.set_initial_value(ic, 0.0)
+        solver.integrate(pi)
+        assert_array_almost_equal(solver.y, [-1.0, 0.0])
+
+    def test_no_params(self):
+        solver = self._get_solver(f, jac)
+        self._check_solver(solver)
+
+    def test_one_scalar_param(self):
+        solver = self._get_solver(f1, jac1)
+        omega = 1.0
+        solver.set_f_params(omega)
+        if self.solver_uses_jac:
+            solver.set_jac_params(omega)
+        self._check_solver(solver)
+
+    def test_two_scalar_params(self):
+        solver = self._get_solver(f2, jac2)
+        omega1 = 1.0
+        omega2 = 1.0
+        solver.set_f_params(omega1, omega2)
+        if self.solver_uses_jac:
+            solver.set_jac_params(omega1, omega2)
+        self._check_solver(solver)
+
+    def test_vector_param(self):
+        solver = self._get_solver(fv, jacv)
+        omega = [1.0, 1.0]
+        solver.set_f_params(omega)
+        if self.solver_uses_jac:
+            solver.set_jac_params(omega)
+        self._check_solver(solver)
+
+    def test_warns_on_failure(self):
+        # Set nsteps small to ensure failure
+        solver = self._get_solver(f, jac)
+        solver.set_integrator(self.solver_name, nsteps=1)
+        ic = [1.0, 0.0]
+        solver.set_initial_value(ic, 0.0)
+        assert_warns(UserWarning, solver.integrate, pi)
+
+
+class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
+    solver_name = 'dopri5'
+    solver_uses_jac = False
+
+
+class TestDOP853CheckParameterUse(ODECheckParameterUse):
+    solver_name = 'dop853'
+    solver_uses_jac = False
+
+
+class TestVODECheckParameterUse(ODECheckParameterUse):
+    solver_name = 'vode'
+    solver_uses_jac = True
+
+
+class TestZVODECheckParameterUse(ODECheckParameterUse):
+    solver_name = 'zvode'
+    solver_uses_jac = True
+
+
+class TestLSODACheckParameterUse(ODECheckParameterUse):
+    solver_name = 'lsoda'
+    solver_uses_jac = True
+
+
+def test_odeint_trivial_time():
+    # Test that odeint succeeds when given a single time point
+    # and full_output=True.  This is a regression test for gh-4282.
+    y0 = 1
+    t = [0]
+    y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
+    assert_array_equal(y, np.array([[y0]]))
+
+
+def test_odeint_banded_jacobian():
+    # Test the use of the `Dfun`, `ml` and `mu` options of odeint.
+
+    def func(y, t, c):
+        return c.dot(y)
+
+    def jac(y, t, c):
+        return c
+
+    def jac_transpose(y, t, c):
+        return c.T.copy(order='C')
+
+    def bjac_rows(y, t, c):
+        jac = np.row_stack((np.r_[0, np.diag(c, 1)],
+                            np.diag(c),
+                            np.r_[np.diag(c, -1), 0],
+                            np.r_[np.diag(c, -2), 0, 0]))
+        return jac
+
+    def bjac_cols(y, t, c):
+        return bjac_rows(y, t, c).T.copy(order='C')
+
+    c = array([[-205, 0.01, 0.00, 0.0],
+               [0.1, -2.50, 0.02, 0.0],
+               [1e-3, 0.01, -2.0, 0.01],
+               [0.00, 0.00, 0.1, -1.0]])
+
+    y0 = np.ones(4)
+    t = np.array([0, 5, 10, 100])
+
+    # Use the full Jacobian.
+    sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
+                         atol=1e-13, rtol=1e-11, mxstep=10000,
+                         Dfun=jac)
+
+    # Use the transposed full Jacobian, with col_deriv=True.
+    sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
+                         atol=1e-13, rtol=1e-11, mxstep=10000,
+                         Dfun=jac_transpose, col_deriv=True)
+
+    # Use the banded Jacobian.
+    sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
+                         atol=1e-13, rtol=1e-11, mxstep=10000,
+                         Dfun=bjac_rows, ml=2, mu=1)
+
+    # Use the transposed banded Jacobian, with col_deriv=True.
+    sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
+                         atol=1e-13, rtol=1e-11, mxstep=10000,
+                         Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
+
+    assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
+    assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
+    assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
+
+    # Verify that the number of jacobian evaluations was the same for the
+    # calls of odeint with a full jacobian and with a banded jacobian. This is
+    # a regression test--there was a bug in the handling of banded jacobians
+    # that resulted in an incorrect jacobian matrix being passed to the LSODA
+    # code.  That would cause errors or excessive jacobian evaluations.
+    assert_array_equal(info1['nje'], info2['nje'])
+    assert_array_equal(info3['nje'], info4['nje'])
+
+    # Test the use of tfirst
+    sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
+                             full_output=True, atol=1e-13, rtol=1e-11,
+                             mxstep=10000,
+                             Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
+    # The code should execute the exact same sequence of floating point
+    # calculations, so these should be exactly equal. We'll be safe and use
+    # a small tolerance.
+    assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
+
+
+def test_odeint_errors():
+    def sys1d(x, t):
+        return -100*x
+
+    def bad1(x, t):
+        return 1.0/0
+
+    def bad2(x, t):
+        return "foo"
+
+    def bad_jac1(x, t):
+        return 1.0/0
+
+    def bad_jac2(x, t):
+        return [["foo"]]
+
+    def sys2d(x, t):
+        return [-100*x[0], -0.1*x[1]]
+
+    def sys2d_bad_jac(x, t):
+        return [[1.0/0, 0], [0, -0.1]]
+
+    assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
+    assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
+
+    assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
+    assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
+
+    assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
+                  Dfun=sys2d_bad_jac)
+
+
+def test_odeint_bad_shapes():
+    # Tests of some errors that can occur with odeint.
+
+    def badrhs(x, t):
+        return [1, -1]
+
+    def sys1(x, t):
+        return -100*x
+
+    def badjac(x, t):
+        return [[0, 0, 0]]
+
+    # y0 must be at most 1-d.
+    bad_y0 = [[0, 0], [0, 0]]
+    assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
+
+    # t must be at most 1-d.
+    bad_t = [[0, 1], [2, 3]]
+    assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
+
+    # y0 is 10, but badrhs(x, t) returns [1, -1].
+    assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
+
+    # shape of array returned by badjac(x, t) is not correct.
+    assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
+
+
+def test_repeated_t_values():
+    """Regression test for gh-8217."""
+
+    def func(x, t):
+        return -0.25*x
+
+    t = np.zeros(10)
+    sol = odeint(func, [1.], t)
+    assert_array_equal(sol, np.ones((len(t), 1)))
+
+    tau = 4*np.log(2)
+    t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]
+    sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)
+    expected_sol = np.array([[1.0, 2.0]]*9 +
+                            [[0.5, 1.0],
+                             [0.25, 0.5],
+                             [0.25, 0.5],
+                             [0.125, 0.25]])
+    assert_allclose(sol, expected_sol)
+
+    # Edge case: empty t sequence.
+    sol = odeint(func, [1.], [])
+    assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))
+
+    # t values are not monotonic.
+    assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])
+    assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_odeint_jac.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_odeint_jac.py
new file mode 100644
index 00000000..ef148900
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_odeint_jac.py
@@ -0,0 +1,75 @@
+
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+from scipy.integrate import odeint
+import scipy.integrate._test_odeint_banded as banded5x5
+
+
+def rhs(y, t):
+    dydt = np.zeros_like(y)
+    banded5x5.banded5x5(t, y, dydt)
+    return dydt
+
+
+def jac(y, t):
+    n = len(y)
+    jac = np.zeros((n, n), order='F')
+    banded5x5.banded5x5_jac(t, y, 1, 1, jac)
+    return jac
+
+
+def bjac(y, t):
+    n = len(y)
+    bjac = np.zeros((4, n), order='F')
+    banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
+    return bjac
+
+
+JACTYPE_FULL = 1
+JACTYPE_BANDED = 4
+
+
+def check_odeint(jactype):
+    if jactype == JACTYPE_FULL:
+        ml = None
+        mu = None
+        jacobian = jac
+    elif jactype == JACTYPE_BANDED:
+        ml = 2
+        mu = 1
+        jacobian = bjac
+    else:
+        raise ValueError("invalid jactype: %r" % (jactype,))
+
+    y0 = np.arange(1.0, 6.0)
+    # These tolerances must match the tolerances used in banded5x5.f.
+    rtol = 1e-11
+    atol = 1e-13
+    dt = 0.125
+    nsteps = 64
+    t = dt * np.arange(nsteps+1)
+
+    sol, info = odeint(rhs, y0, t,
+                       Dfun=jacobian, ml=ml, mu=mu,
+                       atol=atol, rtol=rtol, full_output=True)
+    yfinal = sol[-1]
+    odeint_nst = info['nst'][-1]
+    odeint_nfe = info['nfe'][-1]
+    odeint_nje = info['nje'][-1]
+
+    y1 = y0.copy()
+    # Pure Fortran solution. y1 is modified in-place.
+    nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
+
+    # It is likely that yfinal and y1 are *exactly* the same, but
+    # we'll be cautious and use assert_allclose.
+    assert_allclose(yfinal, y1, rtol=1e-12)
+    assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
+
+
+def test_odeint_full_jac():
+    check_odeint(JACTYPE_FULL)
+
+
+def test_odeint_banded_jac():
+    check_odeint(JACTYPE_BANDED)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadpack.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadpack.py
new file mode 100644
index 00000000..0d2ea4bc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadpack.py
@@ -0,0 +1,675 @@
+import sys
+import math
+import numpy as np
+from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf
+from numpy.testing import (assert_,
+        assert_allclose, assert_array_less, assert_almost_equal)
+import pytest
+
+from scipy.integrate import quad, dblquad, tplquad, nquad
+from scipy.special import erf, erfc
+from scipy._lib._ccallback import LowLevelCallable
+
+import ctypes
+import ctypes.util
+from scipy._lib._ccallback_c import sine_ctypes
+
+import scipy.integrate._test_multivariate as clib_test
+
+
+def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8):
+    value, err = value_and_err
+    assert_allclose(value, tabled_value, atol=err, rtol=0)
+    if error_tolerance is not None:
+        assert_array_less(err, error_tolerance)
+
+
+def get_clib_test_routine(name, restype, *argtypes):
+    ptr = getattr(clib_test, name)
+    return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
+
+
+class TestCtypesQuad:
+    def setup_method(self):
+        if sys.platform == 'win32':
+            files = ['api-ms-win-crt-math-l1-1-0.dll']
+        elif sys.platform == 'darwin':
+            files = ['libm.dylib']
+        else:
+            files = ['libm.so', 'libm.so.6']
+
+        for file in files:
+            try:
+                self.lib = ctypes.CDLL(file)
+                break
+            except OSError:
+                pass
+        else:
+            # This test doesn't work on some Linux platforms (Fedora for
+            # example) that put an ld script in libm.so - see gh-5370
+            pytest.skip("Ctypes can't import libm.so")
+
+        restype = ctypes.c_double
+        argtypes = (ctypes.c_double,)
+        for name in ['sin', 'cos', 'tan']:
+            func = getattr(self.lib, name)
+            func.restype = restype
+            func.argtypes = argtypes
+
+    def test_typical(self):
+        assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
+        assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
+        assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
+
+    def test_ctypes_sine(self):
+        quad(LowLevelCallable(sine_ctypes), 0, 1)
+
+    def test_ctypes_variants(self):
+        sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
+                                      ctypes.c_double, ctypes.c_void_p)
+
+        sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
+                                      ctypes.c_int, ctypes.POINTER(ctypes.c_double),
+                                      ctypes.c_void_p)
+
+        sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
+                                      ctypes.c_double)
+
+        sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
+                                      ctypes.c_int, ctypes.POINTER(ctypes.c_double))
+
+        sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
+                                      ctypes.c_int, ctypes.c_double)
+
+        all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
+        legacy_sigs = [sin_2, sin_4]
+        legacy_only_sigs = [sin_4]
+
+        # LowLevelCallables work for new signatures
+        for j, func in enumerate(all_sigs):
+            callback = LowLevelCallable(func)
+            if func in legacy_only_sigs:
+                pytest.raises(ValueError, quad, callback, 0, pi)
+            else:
+                assert_allclose(quad(callback, 0, pi)[0], 2.0)
+
+        # Plain ctypes items work only for legacy signatures
+        for j, func in enumerate(legacy_sigs):
+            if func in legacy_sigs:
+                assert_allclose(quad(func, 0, pi)[0], 2.0)
+            else:
+                pytest.raises(ValueError, quad, func, 0, pi)
+
+
+class TestMultivariateCtypesQuad:
+    def setup_method(self):
+        restype = ctypes.c_double
+        argtypes = (ctypes.c_int, ctypes.c_double)
+        for name in ['_multivariate_typical', '_multivariate_indefinite',
+                     '_multivariate_sin']:
+            func = get_clib_test_routine(name, restype, *argtypes)
+            setattr(self, name, func)
+
+    def test_typical(self):
+        # 1) Typical function with two extra arguments:
+        assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
+                    0.30614353532540296487)
+
+    def test_indefinite(self):
+        # 2) Infinite integration limits --- Euler's constant
+        assert_quad(quad(self._multivariate_indefinite, 0, Inf),
+                    0.577215664901532860606512)
+
+    def test_threadsafety(self):
+        # Ensure multivariate ctypes are threadsafe
+        def threadsafety(y):
+            return y + quad(self._multivariate_sin, 0, 1)[0]
+        assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
+
+
+class TestQuad:
+    def test_typical(self):
+        # 1) Typical function with two extra arguments:
+        def myfunc(x, n, z):       # Bessel function integrand
+            return cos(n*x-z*sin(x))/pi
+        assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
+
+    def test_indefinite(self):
+        # 2) Infinite integration limits --- Euler's constant
+        def myfunc(x):           # Euler's constant integrand
+            return -exp(-x)*log(x)
+        assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
+
+    def test_singular(self):
+        # 3) Singular points in region of integration.
+        def myfunc(x):
+            if 0 < x < 2.5:
+                return sin(x)
+            elif 2.5 <= x <= 5.0:
+                return exp(-x)
+            else:
+                return 0.0
+
+        assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
+                    1 - cos(2.5) + exp(-2.5) - exp(-5.0))
+
+    def test_sine_weighted_finite(self):
+        # 4) Sine weighted integral (finite limits)
+        def myfunc(x, a):
+            return exp(a*(x-1))
+
+        ome = 2.0**3.4
+        assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
+                    (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
+
+    def test_sine_weighted_infinite(self):
+        # 5) Sine weighted integral (infinite limits)
+        def myfunc(x, a):
+            return exp(-x*a)
+
+        a = 4.0
+        ome = 3.0
+        assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
+                    ome/(a**2 + ome**2))
+
+    def test_cosine_weighted_infinite(self):
+        # 6) Cosine weighted integral (negative infinite limits)
+        def myfunc(x, a):
+            return exp(x*a)
+
+        a = 2.5
+        ome = 2.3
+        assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
+                    a/(a**2 + ome**2))
+
+    def test_algebraic_log_weight(self):
+        # 6) Algebraic-logarithmic weight.
+        def myfunc(x, a):
+            return 1/(1+x+2**(-a))
+
+        a = 1.5
+        assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
+                         wvar=(-0.5, -0.5)),
+                    pi/sqrt((1+2**(-a))**2 - 1))
+
+    def test_cauchypv_weight(self):
+        # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
+        def myfunc(x, a):
+            return 2.0**(-a)/((x-1)**2+4.0**(-a))
+
+        a = 0.4
+        tabledValue = ((2.0**(-0.4)*log(1.5) -
+                        2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
+                        arctan(2.0**(a+2)) -
+                        arctan(2.0**a)) /
+                       (4.0**(-a) + 1))
+        assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
+                    tabledValue, error_tolerance=1.9e-8)
+
+    def test_b_less_than_a(self):
+        def f(x, p, q):
+            return p * np.exp(-q*x)
+
+        val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
+        val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
+        assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
+
+    def test_b_less_than_a_2(self):
+        def f(x, s):
+            return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
+
+        val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
+        val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
+        assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
+
+    def test_b_less_than_a_3(self):
+        def f(x):
+            return 1.0
+
+        val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
+        val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
+        assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
+
+    def test_b_less_than_a_full_output(self):
+        def f(x):
+            return 1.0
+
+        res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
+        res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
+        err = max(res_1[1], res_2[1])
+        assert_allclose(res_1[0], -res_2[0], atol=err)
+
+    def test_double_integral(self):
+        # 8) Double Integral test
+        def simpfunc(y, x):       # Note order of arguments.
+            return x+y
+
+        a, b = 1.0, 2.0
+        assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
+                    5/6.0 * (b**3.0-a**3.0))
+
+    def test_double_integral2(self):
+        def func(x0, x1, t0, t1):
+            return x0 + x1 + t0 + t1
+        g = lambda x: x
+        h = lambda x: 2 * x
+        args = 1, 2
+        assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
+
+    def test_double_integral3(self):
+        def func(x0, x1):
+            return x0 + x1 + 1 + 2
+        assert_quad(dblquad(func, 1, 2, 1, 2),6.)
+
+    @pytest.mark.parametrize(
+        "x_lower, x_upper, y_lower, y_upper, expected",
+        [
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-inf, 0] for all n.
+            (-np.inf, 0, -np.inf, 0, np.pi / 4),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-inf, -1] for each n (one at a time).
+            (-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)),
+            (-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-inf, -1] for all n.
+            (-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-inf, 1] for each n (one at a time).
+            (-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)),
+            (-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-inf, 1] for all n.
+            (-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain Dx = [-inf, -1] and Dy = [-inf, 1].
+            (-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain Dx = [-inf, 1] and Dy = [-inf, -1].
+            (-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [0, inf] for all n.
+            (0, np.inf, 0, np.inf, np.pi / 4),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [1, inf] for each n (one at a time).
+            (1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)),
+            (0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [1, inf] for all n.
+            (1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-1, inf] for each n (one at a time).
+            (-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)),
+            (0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-1, inf] for all n.
+            (-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain Dx = [-1, inf] and Dy = [1, inf].
+            (-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain Dx = [1, inf] and Dy = [-1, inf].
+            (1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
+            # Multiple integration of a function in n = 2 variables: f(x, y, z)
+            # over domain D = [-inf, inf] for all n.
+            (-np.inf, np.inf, -np.inf, np.inf, np.pi)
+        ]
+    )
+    def test_double_integral_improper(
+            self, x_lower, x_upper, y_lower, y_upper, expected
+    ):
+        # The Gaussian Integral.
+        def f(x, y):
+            return np.exp(-x ** 2 - y ** 2)
+
+        assert_quad(
+            dblquad(f, x_lower, x_upper, y_lower, y_upper),
+            expected,
+            error_tolerance=3e-8
+        )
+
+    def test_triple_integral(self):
+        # 9) Triple Integral test
+        def simpfunc(z, y, x, t):      # Note order of arguments.
+            return (x+y+z)*t
+
+        a, b = 1.0, 2.0
+        assert_quad(tplquad(simpfunc, a, b,
+                            lambda x: x, lambda x: 2*x,
+                            lambda x, y: x - y, lambda x, y: x + y,
+                            (2.,)),
+                     2*8/3.0 * (b**4.0 - a**4.0))
+
+    @pytest.mark.parametrize(
+        "x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected",
+        [
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, 0] for all n.
+            (-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, -1] for each n (one at a time).
+            (-np.inf, -1, -np.inf, 0, -np.inf, 0,
+             (np.pi ** (3 / 2)) / 8 * erfc(1)),
+            (-np.inf, 0, -np.inf, -1, -np.inf, 0,
+             (np.pi ** (3 / 2)) / 8 * erfc(1)),
+            (-np.inf, 0, -np.inf, 0, -np.inf, -1,
+             (np.pi ** (3 / 2)) / 8 * erfc(1)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, -1] for each n (two at a time).
+            (-np.inf, -1, -np.inf, -1, -np.inf, 0,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
+            (-np.inf, -1, -np.inf, 0, -np.inf, -1,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
+            (-np.inf, 0, -np.inf, -1, -np.inf, -1,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, -1] for all n.
+            (-np.inf, -1, -np.inf, -1, -np.inf, -1,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1].
+            (-np.inf, -1, -np.inf, 1, -np.inf, 1,
+             (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1].
+            (-np.inf, -1, -np.inf, -1, -np.inf, 1,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1].
+            (-np.inf, -1, -np.inf, 1, -np.inf, -1,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1].
+            (-np.inf, 1, -np.inf, -1, -np.inf, -1,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1].
+            (-np.inf, 1, -np.inf, 1, -np.inf, -1,
+             (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1].
+            (-np.inf, 1, -np.inf, -1, -np.inf, 1,
+             (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, 1] for each n (one at a time).
+            (-np.inf, 1, -np.inf, 0, -np.inf, 0,
+             (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
+            (-np.inf, 0, -np.inf, 1, -np.inf, 0,
+             (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
+            (-np.inf, 0, -np.inf, 0, -np.inf, 1,
+             (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, 1] for each n (two at a time).
+            (-np.inf, 1, -np.inf, 1, -np.inf, 0,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
+            (-np.inf, 1, -np.inf, 0, -np.inf, 1,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
+            (-np.inf, 0, -np.inf, 1, -np.inf, 1,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, 1] for all n.
+            (-np.inf, 1, -np.inf, 1, -np.inf, 1,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [0, inf] for all n.
+            (0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [1, inf] for each n (one at a time).
+            (1, np.inf, 0, np.inf, 0, np.inf,
+             (np.pi ** (3 / 2)) / 8 * erfc(1)),
+            (0, np.inf, 1, np.inf, 0, np.inf,
+             (np.pi ** (3 / 2)) / 8 * erfc(1)),
+            (0, np.inf, 0, np.inf, 1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * erfc(1)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [1, inf] for each n (two at a time).
+            (1, np.inf, 1, np.inf, 0, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
+            (1, np.inf, 0, np.inf, 1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
+            (0, np.inf, 1, np.inf, 1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [1, inf] for all n.
+            (1, np.inf, 1, np.inf, 1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-1, inf] for each n (one at a time).
+            (-1, np.inf, 0, np.inf, 0, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
+            (0, np.inf, -1, np.inf, 0, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
+            (0, np.inf, 0, np.inf, -1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-1, inf] for each n (two at a time).
+            (-1, np.inf, -1, np.inf, 0, np.inf,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
+            (-1, np.inf, 0, np.inf, -1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
+            (0, np.inf, -1, np.inf, -1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-1, inf] for all n.
+            (-1, np.inf, -1, np.inf, -1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = [1, inf] and Dy = Dz = [-1, inf].
+            (1, np.inf, -1, np.inf, -1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dy = [1, inf] and Dz = [-1, inf].
+            (1, np.inf, 1, np.inf, -1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dz = [1, inf] and Dy = [-1, inf].
+            (1, np.inf, -1, np.inf, 1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = [-1, inf] and Dy = Dz = [1, inf].
+            (-1, np.inf, 1, np.inf, 1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dy = [-1, inf] and Dz = [1, inf].
+            (-1, np.inf, -1, np.inf, 1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain Dx = Dz = [-1, inf] and Dy = [1, inf].
+            (-1, np.inf, 1, np.inf, -1, np.inf,
+             (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
+            # Multiple integration of a function in n = 3 variables: f(x, y, z)
+            # over domain D = [-inf, inf] for all n.
+            (-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf,
+             np.pi ** (3 / 2)),
+        ],
+    )
+    def test_triple_integral_improper(
+            self,
+            x_lower,
+            x_upper,
+            y_lower,
+            y_upper,
+            z_lower,
+            z_upper,
+            expected
+    ):
+        # The Gaussian Integral.
+        def f(x, y, z):
+            return np.exp(-x ** 2 - y ** 2 - z ** 2)
+
+        assert_quad(
+            tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper),
+            expected,
+            error_tolerance=6e-8
+        )
+
+    def test_complex(self):
+        def tfunc(x):
+            return np.exp(1j*x)
+
+        assert np.allclose(
+                    quad(tfunc, 0, np.pi/2, complex_func=True)[0],
+                    1+1j)
+
+        # We consider a divergent case in order to force quadpack
+        # to return an error message.  The output is compared
+        # against what is returned by explicit integration
+        # of the parts.
+        kwargs = {'a': 0, 'b': np.inf, 'full_output': True,
+                  'weight': 'cos', 'wvar': 1}
+        res_c = quad(tfunc, complex_func=True, **kwargs)
+        res_r = quad(lambda x: np.real(np.exp(1j*x)),
+                     complex_func=False,
+                     **kwargs)
+        res_i = quad(lambda x: np.imag(np.exp(1j*x)),
+                     complex_func=False,
+                     **kwargs)
+
+        np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0])
+        np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1])
+
+        assert len(res_c[2]['real']) == len(res_r[2:]) == 3
+        assert res_c[2]['real'][2] == res_r[4]
+        assert res_c[2]['real'][1] == res_r[3]
+        assert res_c[2]['real'][0]['lst'] == res_r[2]['lst']
+
+        assert len(res_c[2]['imag']) == len(res_i[2:]) == 1
+        assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst']
+
+
+class TestNQuad:
+    def test_fixed_limits(self):
+        def func1(x0, x1, x2, x3):
+            val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
+                   (1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
+            return val
+
+        def opts_basic(*args):
+            return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
+
+        res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
+                    opts=[opts_basic, {}, {}, {}], full_output=True)
+        assert_quad(res[:-1], 1.5267454070738635)
+        assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
+
+    def test_variable_limits(self):
+        scale = .1
+
+        def func2(x0, x1, x2, x3, t0, t1):
+            val = (x0*x1*x3**2 + np.sin(x2) + 1 +
+                   (1 if x0 + t1*x1 - t0 > 0 else 0))
+            return val
+
+        def lim0(x1, x2, x3, t0, t1):
+            return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
+                    scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
+
+        def lim1(x2, x3, t0, t1):
+            return [scale * (t0*x2 + t1*x3) - 1,
+                    scale * (t0*x2 + t1*x3) + 1]
+
+        def lim2(x3, t0, t1):
+            return [scale * (x3 + t0**2*t1**3) - 1,
+                    scale * (x3 + t0**2*t1**3) + 1]
+
+        def lim3(t0, t1):
+            return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
+
+        def opts0(x1, x2, x3, t0, t1):
+            return {'points': [t0 - t1*x1]}
+
+        def opts1(x2, x3, t0, t1):
+            return {}
+
+        def opts2(x3, t0, t1):
+            return {}
+
+        def opts3(t0, t1):
+            return {}
+
+        res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
+                    opts=[opts0, opts1, opts2, opts3])
+        assert_quad(res, 25.066666666666663)
+
+    def test_square_separate_ranges_and_opts(self):
+        def f(y, x):
+            return 1.0
+
+        assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
+
+    def test_square_aliased_ranges_and_opts(self):
+        def f(y, x):
+            return 1.0
+
+        r = [-1, 1]
+        opt = {}
+        assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
+
+    def test_square_separate_fn_ranges_and_opts(self):
+        def f(y, x):
+            return 1.0
+
+        def fn_range0(*args):
+            return (-1, 1)
+
+        def fn_range1(*args):
+            return (-1, 1)
+
+        def fn_opt0(*args):
+            return {}
+
+        def fn_opt1(*args):
+            return {}
+
+        ranges = [fn_range0, fn_range1]
+        opts = [fn_opt0, fn_opt1]
+        assert_quad(nquad(f, ranges, opts=opts), 4.0)
+
+    def test_square_aliased_fn_ranges_and_opts(self):
+        def f(y, x):
+            return 1.0
+
+        def fn_range(*args):
+            return (-1, 1)
+
+        def fn_opt(*args):
+            return {}
+
+        ranges = [fn_range, fn_range]
+        opts = [fn_opt, fn_opt]
+        assert_quad(nquad(f, ranges, opts=opts), 4.0)
+
+    def test_matching_quad(self):
+        def func(x):
+            return x**2 + 1
+
+        res, reserr = quad(func, 0, 4)
+        res2, reserr2 = nquad(func, ranges=[[0, 4]])
+        assert_almost_equal(res, res2)
+        assert_almost_equal(reserr, reserr2)
+
+    def test_matching_dblquad(self):
+        def func2d(x0, x1):
+            return x0**2 + x1**3 - x0 * x1 + 1
+
+        res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
+        res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
+        assert_almost_equal(res, res2)
+        assert_almost_equal(reserr, reserr2)
+
+    def test_matching_tplquad(self):
+        def func3d(x0, x1, x2, c0, c1):
+            return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
+
+        res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
+                      lambda x, y: -np.pi, lambda x, y: np.pi,
+                      args=(2, 3))
+        res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
+        assert_almost_equal(res, res2)
+
+    def test_dict_as_opts(self):
+        try:
+            nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
+        except TypeError:
+            assert False
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadrature.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadrature.py
new file mode 100644
index 00000000..00f31c88
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/tests/test_quadrature.py
@@ -0,0 +1,397 @@
+import pytest
+import numpy as np
+from numpy import cos, sin, pi
+from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
+                           assert_, suppress_warnings)
+
+from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
+                             cumulative_trapezoid, cumtrapz, trapz, trapezoid,
+                             quad, simpson, simps, fixed_quad, AccuracyWarning)
+from scipy.integrate._quadrature import _qmc_quad as qmc_quad
+from scipy import stats, special as sc
+
+
+class TestFixedQuad:
+    def test_scalar(self):
+        n = 4
+        expected = 1/(2*n)
+        got, _ = fixed_quad(lambda x: x**(2*n - 1), 0, 1, n=n)
+        # quadrature exact for this input
+        assert_allclose(got, expected, rtol=1e-12)
+
+    def test_vector(self):
+        n = 4
+        p = np.arange(1, 2*n)
+        expected = 1/(p + 1)
+        got, _ = fixed_quad(lambda x: x**p[:, None], 0, 1, n=n)
+        assert_allclose(got, expected, rtol=1e-12)
+
+
+class TestQuadrature:
+    def quad(self, x, a, b, args):
+        raise NotImplementedError
+
+    def test_quadrature(self):
+        # Typical function with two extra arguments:
+        def myfunc(x, n, z):       # Bessel function integrand
+            return cos(n*x-z*sin(x))/pi
+        val, err = quadrature(myfunc, 0, pi, (2, 1.8))
+        table_val = 0.30614353532540296487
+        assert_almost_equal(val, table_val, decimal=7)
+
+    def test_quadrature_rtol(self):
+        def myfunc(x, n, z):       # Bessel function integrand
+            return 1e90 * cos(n*x-z*sin(x))/pi
+        val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
+        table_val = 1e90 * 0.30614353532540296487
+        assert_allclose(val, table_val, rtol=1e-10)
+
+    def test_quadrature_miniter(self):
+        # Typical function with two extra arguments:
+        def myfunc(x, n, z):       # Bessel function integrand
+            return cos(n*x-z*sin(x))/pi
+        table_val = 0.30614353532540296487
+        for miniter in [5, 52]:
+            val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
+            assert_almost_equal(val, table_val, decimal=7)
+            assert_(err < 1.0)
+
+    def test_quadrature_single_args(self):
+        def myfunc(x, n):
+            return 1e90 * cos(n*x-1.8*sin(x))/pi
+        val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
+        table_val = 1e90 * 0.30614353532540296487
+        assert_allclose(val, table_val, rtol=1e-10)
+
+    def test_romberg(self):
+        # Typical function with two extra arguments:
+        def myfunc(x, n, z):       # Bessel function integrand
+            return cos(n*x-z*sin(x))/pi
+        val = romberg(myfunc, 0, pi, args=(2, 1.8))
+        table_val = 0.30614353532540296487
+        assert_almost_equal(val, table_val, decimal=7)
+
+    def test_romberg_rtol(self):
+        # Typical function with two extra arguments:
+        def myfunc(x, n, z):       # Bessel function integrand
+            return 1e19*cos(n*x-z*sin(x))/pi
+        val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
+        table_val = 1e19*0.30614353532540296487
+        assert_allclose(val, table_val, rtol=1e-10)
+
+    def test_romb(self):
+        assert_equal(romb(np.arange(17)), 128)
+
+    def test_romb_gh_3731(self):
+        # Check that romb makes maximal use of data points
+        x = np.arange(2**4+1)
+        y = np.cos(0.2*x)
+        val = romb(y)
+        val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
+        assert_allclose(val, val2, rtol=1e-8, atol=0)
+
+        # should be equal to romb with 2**k+1 samples
+        with suppress_warnings() as sup:
+            sup.filter(AccuracyWarning, "divmax .4. exceeded")
+            val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
+        assert_allclose(val, val3, rtol=1e-12, atol=0)
+
+    def test_non_dtype(self):
+        # Check that we work fine with functions returning float
+        import math
+        valmath = romberg(math.sin, 0, 1)
+        expected_val = 0.45969769413185085
+        assert_almost_equal(valmath, expected_val, decimal=7)
+
+    def test_newton_cotes(self):
+        """Test the first few degrees, for evenly spaced points."""
+        n = 1
+        wts, errcoff = newton_cotes(n, 1)
+        assert_equal(wts, n*np.array([0.5, 0.5]))
+        assert_almost_equal(errcoff, -n**3/12.0)
+
+        n = 2
+        wts, errcoff = newton_cotes(n, 1)
+        assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
+        assert_almost_equal(errcoff, -n**5/2880.0)
+
+        n = 3
+        wts, errcoff = newton_cotes(n, 1)
+        assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
+        assert_almost_equal(errcoff, -n**5/6480.0)
+
+        n = 4
+        wts, errcoff = newton_cotes(n, 1)
+        assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
+        assert_almost_equal(errcoff, -n**7/1935360.0)
+
+    def test_newton_cotes2(self):
+        """Test newton_cotes with points that are not evenly spaced."""
+
+        x = np.array([0.0, 1.5, 2.0])
+        y = x**2
+        wts, errcoff = newton_cotes(x)
+        exact_integral = 8.0/3
+        numeric_integral = np.dot(wts, y)
+        assert_almost_equal(numeric_integral, exact_integral)
+
+        x = np.array([0.0, 1.4, 2.1, 3.0])
+        y = x**2
+        wts, errcoff = newton_cotes(x)
+        exact_integral = 9.0
+        numeric_integral = np.dot(wts, y)
+        assert_almost_equal(numeric_integral, exact_integral)
+
+    def test_simpson(self):
+        y = np.arange(17)
+        assert_equal(simpson(y), 128)
+        assert_equal(simpson(y, dx=0.5), 64)
+        assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32)
+
+        y = np.arange(4)
+        x = 2**y
+        assert_equal(simpson(y, x=x, even='avg'), 13.875)
+        assert_equal(simpson(y, x=x, even='first'), 13.75)
+        assert_equal(simpson(y, x=x, even='last'), 14)
+
+        # Tests for checking base case
+        x = np.array([3])
+        y = np.power(x, 2)
+        assert_equal(simpson(y, x=x, axis=0), 0.0)
+        assert_equal(simpson(y, x=x, axis=-1), 0.0)
+
+        x = np.array([3, 3, 3, 3])
+        y = np.power(x, 2)
+        assert_equal(simpson(y, x=x, axis=0), 0.0)
+        assert_equal(simpson(y, x=x, axis=-1), 0.0)
+
+        x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]])
+        y = np.power(x, 2)
+        zero_axis = [0.0, 0.0, 0.0, 0.0]
+        default_axis = [175.75, 175.75, 175.75]
+        assert_equal(simpson(y, x=x, axis=0), zero_axis)
+        assert_equal(simpson(y, x=x, axis=-1), default_axis)
+
+        x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 8, 16, 32]])
+        y = np.power(x, 2)
+        zero_axis = [0.0, 136.0, 1088.0, 8704.0]
+        default_axis = [175.75, 175.75, 11292.25]
+        assert_equal(simpson(y, x=x, axis=0), zero_axis)
+        assert_equal(simpson(y, x=x, axis=-1), default_axis)
+
+    @pytest.mark.parametrize('droplast', [False, True])
+    def test_simpson_2d_integer_no_x(self, droplast):
+        # The inputs are 2d integer arrays.  The results should be
+        # identical to the results when the inputs are floating point.
+        y = np.array([[2, 2, 4, 4, 8, 8, -4, 5],
+                      [4, 4, 2, -4, 10, 22, -2, 10]])
+        if droplast:
+            y = y[:, :-1]
+        result = simpson(y, axis=-1)
+        expected = simpson(np.array(y, dtype=np.float64), axis=-1)
+        assert_equal(result, expected)
+
+    def test_simps(self):
+        # Basic coverage test for the alias
+        y = np.arange(4)
+        x = 2**y
+        assert_equal(simpson(y, x=x, dx=0.5, even='first'),
+                     simps(y, x=x, dx=0.5, even='first'))
+
+
+class TestCumulative_trapezoid:
+    def test_1d(self):
+        x = np.linspace(-2, 2, num=5)
+        y = x
+        y_int = cumulative_trapezoid(y, x, initial=0)
+        y_expected = [0., -1.5, -2., -1.5, 0.]
+        assert_allclose(y_int, y_expected)
+
+        y_int = cumulative_trapezoid(y, x, initial=None)
+        assert_allclose(y_int, y_expected[1:])
+
+    def test_y_nd_x_nd(self):
+        x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
+        y = x
+        y_int = cumulative_trapezoid(y, x, initial=0)
+        y_expected = np.array([[[0., 0.5, 2., 4.5],
+                                [0., 4.5, 10., 16.5]],
+                               [[0., 8.5, 18., 28.5],
+                                [0., 12.5, 26., 40.5]],
+                               [[0., 16.5, 34., 52.5],
+                                [0., 20.5, 42., 64.5]]])
+
+        assert_allclose(y_int, y_expected)
+
+        # Try with all axes
+        shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
+        for axis, shape in zip([0, 1, 2], shapes):
+            y_int = cumulative_trapezoid(y, x, initial=3.45, axis=axis)
+            assert_equal(y_int.shape, (3, 2, 4))
+            y_int = cumulative_trapezoid(y, x, initial=None, axis=axis)
+            assert_equal(y_int.shape, shape)
+
+    def test_y_nd_x_1d(self):
+        y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
+        x = np.arange(4)**2
+        # Try with all axes
+        ys_expected = (
+            np.array([[[4., 5., 6., 7.],
+                       [8., 9., 10., 11.]],
+                      [[40., 44., 48., 52.],
+                       [56., 60., 64., 68.]]]),
+            np.array([[[2., 3., 4., 5.]],
+                      [[10., 11., 12., 13.]],
+                      [[18., 19., 20., 21.]]]),
+            np.array([[[0.5, 5., 17.5],
+                       [4.5, 21., 53.5]],
+                      [[8.5, 37., 89.5],
+                       [12.5, 53., 125.5]],
+                      [[16.5, 69., 161.5],
+                       [20.5, 85., 197.5]]]))
+
+        for axis, y_expected in zip([0, 1, 2], ys_expected):
+            y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis,
+                                         initial=None)
+            assert_allclose(y_int, y_expected)
+
+    def test_x_none(self):
+        y = np.linspace(-2, 2, num=5)
+
+        y_int = cumulative_trapezoid(y)
+        y_expected = [-1.5, -2., -1.5, 0.]
+        assert_allclose(y_int, y_expected)
+
+        y_int = cumulative_trapezoid(y, initial=1.23)
+        y_expected = [1.23, -1.5, -2., -1.5, 0.]
+        assert_allclose(y_int, y_expected)
+
+        y_int = cumulative_trapezoid(y, dx=3)
+        y_expected = [-4.5, -6., -4.5, 0.]
+        assert_allclose(y_int, y_expected)
+
+        y_int = cumulative_trapezoid(y, dx=3, initial=1.23)
+        y_expected = [1.23, -4.5, -6., -4.5, 0.]
+        assert_allclose(y_int, y_expected)
+
+    def test_cumtrapz(self):
+        # Basic coverage test for the alias
+        x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
+        y = x
+        assert_allclose(cumulative_trapezoid(y, x, dx=0.5, axis=0, initial=0),
+                        cumtrapz(y, x, dx=0.5, axis=0, initial=0),
+                        rtol=1e-14)
+
+
+class TestTrapezoid():
+    """This function is tested in NumPy more extensive, just do some
+    basic due diligence here."""
+    def test_trapezoid(self):
+        y = np.arange(17)
+        assert_equal(trapezoid(y), 128)
+        assert_equal(trapezoid(y, dx=0.5), 64)
+        assert_equal(trapezoid(y, x=np.linspace(0, 4, 17)), 32)
+
+        y = np.arange(4)
+        x = 2**y
+        assert_equal(trapezoid(y, x=x, dx=0.1), 13.5)
+
+    def test_trapz(self):
+        # Basic coverage test for the alias
+        y = np.arange(4)
+        x = 2**y
+        assert_equal(trapezoid(y, x=x, dx=0.5, axis=0),
+                     trapz(y, x=x, dx=0.5, axis=0))
+
+
+class TestQMCQuad():
+    def test_input_validation(self):
+        message = "`func` must be callable."
+        with pytest.raises(TypeError, match=message):
+            qmc_quad("a duck", [0, 0], [1, 1])
+
+        message = "`func` must evaluate the integrand at points..."
+        with pytest.raises(ValueError, match=message):
+            qmc_quad(lambda: 1, [0, 0], [1, 1])
+
+        def func(x):
+            assert x.ndim == 1
+            return np.sum(x)
+        message = "Exception encountered when attempting vectorized call..."
+        with pytest.warns(UserWarning, match=message):
+            qmc_quad(func, [0, 0], [1, 1])
+
+        message = "`n_points` must be an integer."
+        with pytest.raises(TypeError, match=message):
+            qmc_quad(lambda x: 1, [0, 0], [1, 1], n_points=1024.5)
+
+        message = "`n_estimates` must be an integer."
+        with pytest.raises(TypeError, match=message):
+            qmc_quad(lambda x: 1, [0, 0], [1, 1], n_estimates=8.5)
+
+        message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
+        with pytest.raises(TypeError, match=message):
+            qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng="a duck")
+
+        message = "`qrng` must be initialized with dimensionality equal to "
+        with pytest.raises(ValueError, match=message):
+            qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng=stats.qmc.Sobol(1))
+
+        message = r"`log` must be boolean \(`True` or `False`\)."
+        with pytest.raises(TypeError, match=message):
+            qmc_quad(lambda x: 1, [0, 0], [1, 1], log=10)
+
+    def basic_test(self, n_points=2**8, n_estimates=8, signs=np.ones(2)):
+
+        ndim = 2
+        mean = np.zeros(ndim)
+        cov = np.eye(ndim)
+
+        def func(x):
+            return stats.multivariate_normal.pdf(x, mean, cov)
+
+        rng = np.random.default_rng(2879434385674690281)
+        qrng = stats.qmc.Sobol(ndim, seed=rng)
+        a = np.zeros(ndim)
+        b = np.ones(ndim) * signs
+        res = qmc_quad(func, a, b, n_points=n_points,
+                       n_estimates=n_estimates, args=(mean, cov), qrng=qrng)
+        ref = stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
+        atol = sc.stdtrit(n_estimates-1, 0.995) * res.standard_error  # 99% CI
+        assert_allclose(res.integral, ref, atol=atol)
+        assert np.prod(signs)*res.integral > 0
+
+        rng = np.random.default_rng(2879434385674690281)
+        qrng = stats.qmc.Sobol(ndim, seed=rng)
+        logres = qmc_quad(lambda *args: np.log(func(*args)), a, b,
+                          n_points=n_points, n_estimates=n_estimates,
+                          args=(mean, cov), log=True, qrng=qrng)
+        assert_allclose(np.exp(logres.integral), res.integral)
+        assert np.imag(logres.integral) == (np.pi if np.prod(signs) < 0 else 0)
+
+    @pytest.mark.parametrize("n_points", [2**8, 2**12])
+    @pytest.mark.parametrize("n_estimates", [8, 16])
+    def test_basic(self, n_points, n_estimates):
+        self.basic_test(n_points, n_estimates)
+
+    @pytest.mark.parametrize("signs", [[1, 1], [-1, -1], [-1, 1], [1, -1]])
+    def test_sign(self, signs):
+        self.basic_test(signs=signs)
+
+    @pytest.mark.parametrize("log", [False, True])
+    def test_zero(self, log):
+        message = "A lower limit was equal to an upper limit, so"
+        with pytest.warns(UserWarning, match=message):
+            res = qmc_quad(lambda x: 1, [0, 0], [0, 1], log=log)
+        assert res.integral == (-np.inf if log else 0)
+        assert res.standard_error == 0
+
+    def test_flexible_input(self):
+        # check that qrng is not required
+        # also checks that for 1d problems, a and b can be scalars
+        def func(x):
+            return stats.norm.pdf(x, scale=2)
+
+        res = qmc_quad(func, 0, 1)
+        ref = stats.norm.cdf(1, scale=2) - stats.norm.cdf(0, scale=2)
+        assert_allclose(res.integral, ref, 1e-2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/integrate/vode.py b/__packaged__/coreml/.python_dependencies/scipy/integrate/vode.py
new file mode 100644
index 00000000..03a02562
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/integrate/vode.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+
+
+import warnings
+from . import _vode  # type: ignore
+
+
+__all__ = [  # noqa: F822
+    'dvode',
+    'zvode'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.integrate.vode is deprecated and has no attribute "
+            f"{name}.")
+
+    warnings.warn("The `scipy.integrate.vode` namespace is deprecated "
+                  "and will be removed in SciPy v2.0.0.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_vode, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/__init__.py
new file mode 100644
index 00000000..cc10acce
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/__init__.py
@@ -0,0 +1,199 @@
+"""
+========================================
+Interpolation (:mod:`scipy.interpolate`)
+========================================
+
+.. currentmodule:: scipy.interpolate
+
+Sub-package for objects used in interpolation.
+
+As listed below, this sub-package contains spline functions and classes,
+1-D and multidimensional (univariate and multivariate)
+interpolation classes, Lagrange and Taylor polynomial interpolators, and
+wrappers for `FITPACK `__
+and DFITPACK functions.
+
+Univariate interpolation
+========================
+
+.. autosummary::
+   :toctree: generated/
+
+   interp1d
+   BarycentricInterpolator
+   KroghInterpolator
+   barycentric_interpolate
+   krogh_interpolate
+   pchip_interpolate
+   CubicHermiteSpline
+   PchipInterpolator
+   Akima1DInterpolator
+   CubicSpline
+   PPoly
+   BPoly
+
+
+Multivariate interpolation
+==========================
+
+Unstructured data:
+
+.. autosummary::
+   :toctree: generated/
+
+   griddata
+   LinearNDInterpolator
+   NearestNDInterpolator
+   CloughTocher2DInterpolator
+   RBFInterpolator
+   Rbf
+   interp2d
+
+For data on a grid:
+
+.. autosummary::
+   :toctree: generated/
+
+   interpn
+   RegularGridInterpolator
+   RectBivariateSpline
+
+.. seealso::
+
+    `scipy.ndimage.map_coordinates`
+
+Tensor product polynomials:
+
+.. autosummary::
+   :toctree: generated/
+
+   NdPPoly
+
+
+1-D Splines
+===========
+
+.. autosummary::
+   :toctree: generated/
+
+   BSpline
+   make_interp_spline
+   make_lsq_spline
+   make_smoothing_spline
+
+Functional interface to FITPACK routines:
+
+.. autosummary::
+   :toctree: generated/
+
+   splrep
+   splprep
+   splev
+   splint
+   sproot
+   spalde
+   splder
+   splantider
+   insert
+
+Object-oriented FITPACK interface:
+
+.. autosummary::
+   :toctree: generated/
+
+   UnivariateSpline
+   InterpolatedUnivariateSpline
+   LSQUnivariateSpline
+
+
+
+2-D Splines
+===========
+
+For data on a grid:
+
+.. autosummary::
+   :toctree: generated/
+
+   RectBivariateSpline
+   RectSphereBivariateSpline
+
+For unstructured data:
+
+.. autosummary::
+   :toctree: generated/
+
+   BivariateSpline
+   SmoothBivariateSpline
+   SmoothSphereBivariateSpline
+   LSQBivariateSpline
+   LSQSphereBivariateSpline
+
+Low-level interface to FITPACK functions:
+
+.. autosummary::
+   :toctree: generated/
+
+   bisplrep
+   bisplev
+
+Additional tools
+================
+
+.. autosummary::
+   :toctree: generated/
+
+   lagrange
+   approximate_taylor_polynomial
+   pade
+
+.. seealso::
+
+   `scipy.ndimage.map_coordinates`,
+   `scipy.ndimage.spline_filter`,
+   `scipy.signal.resample`,
+   `scipy.signal.bspline`,
+   `scipy.signal.gauss_spline`,
+   `scipy.signal.qspline1d`,
+   `scipy.signal.cspline1d`,
+   `scipy.signal.qspline1d_eval`,
+   `scipy.signal.cspline1d_eval`,
+   `scipy.signal.qspline2d`,
+   `scipy.signal.cspline2d`.
+
+``pchip`` is an alias of `PchipInterpolator` for backward compatibility
+(should not be used in new code).
+"""
+from ._interpolate import *
+from ._fitpack_py import *
+
+# New interface to fitpack library:
+from ._fitpack2 import *
+
+from ._rbf import Rbf
+
+from ._rbfinterp import *
+
+from ._polyint import *
+
+from ._cubic import *
+
+from ._ndgriddata import *
+
+from ._bsplines import *
+
+from ._pade import *
+
+from ._rgi import *
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import fitpack, fitpack2, interpolate, ndgriddata, polyint, rbf
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
+
+# Backward compatibility
+pchip = PchipInterpolator
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_bsplines.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_bsplines.py
new file mode 100644
index 00000000..24506fb1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_bsplines.py
@@ -0,0 +1,2030 @@
+import operator
+
+import numpy as np
+from numpy.core.multiarray import normalize_axis_index
+from scipy.linalg import (get_lapack_funcs, LinAlgError,
+                          cholesky_banded, cho_solve_banded,
+                          solve, solve_banded)
+from scipy.optimize import minimize_scalar
+from . import _bspl
+from . import _fitpack_impl
+from scipy._lib._util import prod
+from scipy.sparse import csr_array
+from scipy.special import poch
+from itertools import combinations
+
+__all__ = ["BSpline", "make_interp_spline", "make_lsq_spline",
+           "make_smoothing_spline"]
+
+
+def _get_dtype(dtype):
+    """Return np.complex128 for complex dtypes, np.float64 otherwise."""
+    if np.issubdtype(dtype, np.complexfloating):
+        return np.complex_
+    else:
+        return np.float_
+
+
+def _as_float_array(x, check_finite=False):
+    """Convert the input into a C contiguous float array.
+
+    NB: Upcasts half- and single-precision floats to double precision.
+    """
+    x = np.ascontiguousarray(x)
+    dtyp = _get_dtype(x.dtype)
+    x = x.astype(dtyp, copy=False)
+    if check_finite and not np.isfinite(x).all():
+        raise ValueError("Array must not contain infs or nans.")
+    return x
+
+
+def _dual_poly(j, k, t, y):
+    """
+    Dual polynomial of the B-spline B_{j,k,t} -
+    polynomial which is associated with B_{j,k,t}:
+    $p_{j,k}(y) = (y - t_{j+1})(y - t_{j+2})...(y - t_{j+k})$
+    """
+    if k == 0:
+        return 1
+    return np.prod([(y - t[j + i]) for i in range(1, k + 1)])
+
+
+def _diff_dual_poly(j, k, y, d, t):
+    """
+    d-th derivative of the dual polynomial $p_{j,k}(y)$
+    """
+    if d == 0:
+        return _dual_poly(j, k, t, y)
+    if d == k:
+        return poch(1, k)
+    comb = list(combinations(range(j + 1, j + k + 1), d))
+    res = 0
+    for i in range(len(comb) * len(comb[0])):
+        res += np.prod([(y - t[j + p]) for p in range(1, k + 1)
+                        if (j + p) not in comb[i//d]])
+    return res
+
+
+class BSpline:
+    r"""Univariate spline in the B-spline basis.
+
+    .. math::
+
+        S(x) = \sum_{j=0}^{n-1} c_j  B_{j, k; t}(x)
+
+    where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
+    and knots `t`.
+
+    Parameters
+    ----------
+    t : ndarray, shape (n+k+1,)
+        knots
+    c : ndarray, shape (>=n, ...)
+        spline coefficients
+    k : int
+        B-spline degree
+    extrapolate : bool or 'periodic', optional
+        whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
+        or to return nans.
+        If True, extrapolates the first and last polynomial pieces of b-spline
+        functions active on the base interval.
+        If 'periodic', periodic extrapolation is used.
+        Default is True.
+    axis : int, optional
+        Interpolation axis. Default is zero.
+
+    Attributes
+    ----------
+    t : ndarray
+        knot vector
+    c : ndarray
+        spline coefficients
+    k : int
+        spline degree
+    extrapolate : bool
+        If True, extrapolates the first and last polynomial pieces of b-spline
+        functions active on the base interval.
+    axis : int
+        Interpolation axis.
+    tck : tuple
+        A read-only equivalent of ``(self.t, self.c, self.k)``
+
+    Methods
+    -------
+    __call__
+    basis_element
+    derivative
+    antiderivative
+    integrate
+    construct_fast
+    design_matrix
+    from_power_basis
+
+    Notes
+    -----
+    B-spline basis elements are defined via
+
+    .. math::
+
+        B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
+
+        B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+                 + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
+
+    **Implementation details**
+
+    - At least ``k+1`` coefficients are required for a spline of degree `k`,
+      so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
+      ``j > n``, are ignored.
+
+    - B-spline basis elements of degree `k` form a partition of unity on the
+      *base interval*, ``t[k] <= x <= t[n]``.
+
+
+    Examples
+    --------
+
+    Translating the recursive definition of B-splines into Python code, we have:
+
+    >>> def B(x, k, i, t):
+    ...    if k == 0:
+    ...       return 1.0 if t[i] <= x < t[i+1] else 0.0
+    ...    if t[i+k] == t[i]:
+    ...       c1 = 0.0
+    ...    else:
+    ...       c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
+    ...    if t[i+k+1] == t[i+1]:
+    ...       c2 = 0.0
+    ...    else:
+    ...       c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
+    ...    return c1 + c2
+
+    >>> def bspline(x, t, c, k):
+    ...    n = len(t) - k - 1
+    ...    assert (n >= k+1) and (len(c) >= n)
+    ...    return sum(c[i] * B(x, k, i, t) for i in range(n))
+
+    Note that this is an inefficient (if straightforward) way to
+    evaluate B-splines --- this spline class does it in an equivalent,
+    but much more efficient way.
+
+    Here we construct a quadratic spline function on the base interval
+    ``2 <= x <= 4`` and compare with the naive way of evaluating the spline:
+
+    >>> from scipy.interpolate import BSpline
+    >>> k = 2
+    >>> t = [0, 1, 2, 3, 4, 5, 6]
+    >>> c = [-1, 2, 0, -1]
+    >>> spl = BSpline(t, c, k)
+    >>> spl(2.5)
+    array(1.375)
+    >>> bspline(2.5, t, c, k)
+    1.375
+
+    Note that outside of the base interval results differ. This is because
+    `BSpline` extrapolates the first and last polynomial pieces of B-spline
+    functions active on the base interval.
+
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+    >>> fig, ax = plt.subplots()
+    >>> xx = np.linspace(1.5, 4.5, 50)
+    >>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')
+    >>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
+    >>> ax.grid(True)
+    >>> ax.legend(loc='best')
+    >>> plt.show()
+
+
+    References
+    ----------
+    .. [1] Tom Lyche and Knut Morken, Spline methods,
+        http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
+    .. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
+
+    """
+
+    def __init__(self, t, c, k, extrapolate=True, axis=0):
+        super().__init__()
+
+        self.k = operator.index(k)
+        self.c = np.asarray(c)
+        self.t = np.ascontiguousarray(t, dtype=np.float64)
+
+        if extrapolate == 'periodic':
+            self.extrapolate = extrapolate
+        else:
+            self.extrapolate = bool(extrapolate)
+
+        n = self.t.shape[0] - self.k - 1
+
+        axis = normalize_axis_index(axis, self.c.ndim)
+
+        # Note that the normalized axis is stored in the object.
+        self.axis = axis
+        if axis != 0:
+            # roll the interpolation axis to be the first one in self.c
+            # More specifically, the target shape for self.c is (n, ...),
+            # and axis !=0 means that we have c.shape (..., n, ...)
+            #                                               ^
+            #                                              axis
+            self.c = np.moveaxis(self.c, axis, 0)
+
+        if k < 0:
+            raise ValueError("Spline order cannot be negative.")
+        if self.t.ndim != 1:
+            raise ValueError("Knot vector must be one-dimensional.")
+        if n < self.k + 1:
+            raise ValueError("Need at least %d knots for degree %d" %
+                    (2*k + 2, k))
+        if (np.diff(self.t) < 0).any():
+            raise ValueError("Knots must be in a non-decreasing order.")
+        if len(np.unique(self.t[k:n+1])) < 2:
+            raise ValueError("Need at least two internal knots.")
+        if not np.isfinite(self.t).all():
+            raise ValueError("Knots should not have nans or infs.")
+        if self.c.ndim < 1:
+            raise ValueError("Coefficients must be at least 1-dimensional.")
+        if self.c.shape[0] < n:
+            raise ValueError("Knots, coefficients and degree are inconsistent.")
+
+        dt = _get_dtype(self.c.dtype)
+        self.c = np.ascontiguousarray(self.c, dtype=dt)
+
+    @classmethod
+    def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
+        """Construct a spline without making checks.
+
+        Accepts same parameters as the regular constructor. Input arrays
+        `t` and `c` must of correct shape and dtype.
+        """
+        self = object.__new__(cls)
+        self.t, self.c, self.k = t, c, k
+        self.extrapolate = extrapolate
+        self.axis = axis
+        return self
+
+    @property
+    def tck(self):
+        """Equivalent to ``(self.t, self.c, self.k)`` (read-only).
+        """
+        return self.t, self.c, self.k
+
+    @classmethod
+    def basis_element(cls, t, extrapolate=True):
+        """Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
+
+        Parameters
+        ----------
+        t : ndarray, shape (k+2,)
+            internal knots
+        extrapolate : bool or 'periodic', optional
+            whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,
+            or to return nans.
+            If 'periodic', periodic extrapolation is used.
+            Default is True.
+
+        Returns
+        -------
+        basis_element : callable
+            A callable representing a B-spline basis element for the knot
+            vector `t`.
+
+        Notes
+        -----
+        The degree of the B-spline, `k`, is inferred from the length of `t` as
+        ``len(t)-2``. The knot vector is constructed by appending and prepending
+        ``k+1`` elements to internal knots `t`.
+
+        Examples
+        --------
+
+        Construct a cubic B-spline:
+
+        >>> import numpy as np
+        >>> from scipy.interpolate import BSpline
+        >>> b = BSpline.basis_element([0, 1, 2, 3, 4])
+        >>> k = b.k
+        >>> b.t[k:-k]
+        array([ 0.,  1.,  2.,  3.,  4.])
+        >>> k
+        3
+
+        Construct a quadratic B-spline on ``[0, 1, 1, 2]``, and compare
+        to its explicit form:
+
+        >>> t = [0, 1, 1, 2]
+        >>> b = BSpline.basis_element(t)
+        >>> def f(x):
+        ...     return np.where(x < 1, x*x, (2. - x)**2)
+
+        >>> import matplotlib.pyplot as plt
+        >>> fig, ax = plt.subplots()
+        >>> x = np.linspace(0, 2, 51)
+        >>> ax.plot(x, b(x), 'g', lw=3)
+        >>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
+        >>> ax.grid(True)
+        >>> plt.show()
+
+        """
+        k = len(t) - 2
+        t = _as_float_array(t)
+        t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
+        c = np.zeros_like(t)
+        c[k] = 1.
+        return cls.construct_fast(t, c, k, extrapolate)
+
+    @classmethod
+    def design_matrix(cls, x, t, k, extrapolate=False):
+        """
+        Returns a design matrix as a CSR format sparse array.
+
+        Parameters
+        ----------
+        x : array_like, shape (n,)
+            Points to evaluate the spline at.
+        t : array_like, shape (nt,)
+            Sorted 1D array of knots.
+        k : int
+            B-spline degree.
+        extrapolate : bool or 'periodic', optional
+            Whether to extrapolate based on the first and last intervals
+            or raise an error. If 'periodic', periodic extrapolation is used.
+            Default is False.
+
+            .. versionadded:: 1.10.0
+
+        Returns
+        -------
+        design_matrix : `csr_array` object
+            Sparse matrix in CSR format where each row contains all the basis
+            elements of the input row (first row = basis elements of x[0],
+            ..., last row = basis elements x[-1]).
+
+        Examples
+        --------
+        Construct a design matrix for a B-spline
+
+        >>> from scipy.interpolate import make_interp_spline, BSpline
+        >>> import numpy as np
+        >>> x = np.linspace(0, np.pi * 2, 4)
+        >>> y = np.sin(x)
+        >>> k = 3
+        >>> bspl = make_interp_spline(x, y, k=k)
+        >>> design_matrix = bspl.design_matrix(x, bspl.t, k)
+        >>> design_matrix.toarray()
+        [[1.        , 0.        , 0.        , 0.        ],
+        [0.2962963 , 0.44444444, 0.22222222, 0.03703704],
+        [0.03703704, 0.22222222, 0.44444444, 0.2962963 ],
+        [0.        , 0.        , 0.        , 1.        ]]
+
+        Construct a design matrix for some vector of knots
+
+        >>> k = 2
+        >>> t = [-1, 0, 1, 2, 3, 4, 5, 6]
+        >>> x = [1, 2, 3, 4]
+        >>> design_matrix = BSpline.design_matrix(x, t, k).toarray()
+        >>> design_matrix
+        [[0.5, 0.5, 0. , 0. , 0. ],
+        [0. , 0.5, 0.5, 0. , 0. ],
+        [0. , 0. , 0.5, 0.5, 0. ],
+        [0. , 0. , 0. , 0.5, 0.5]]
+
+        This result is equivalent to the one created in the sparse format
+
+        >>> c = np.eye(len(t) - k - 1)
+        >>> design_matrix_gh = BSpline(t, c, k)(x)
+        >>> np.allclose(design_matrix, design_matrix_gh, atol=1e-14)
+        True
+
+        Notes
+        -----
+        .. versionadded:: 1.8.0
+
+        In each row of the design matrix all the basis elements are evaluated
+        at the certain point (first row - x[0], ..., last row - x[-1]).
+
+        `nt` is a length of the vector of knots: as far as there are
+        `nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2`
+        to have at least `k + 1` basis element.
+
+        Out of bounds `x` raises a ValueError.
+        """
+        x = _as_float_array(x, True)
+        t = _as_float_array(t, True)
+
+        if extrapolate != 'periodic':
+            extrapolate = bool(extrapolate)
+
+        if k < 0:
+            raise ValueError("Spline order cannot be negative.")
+        if t.ndim != 1 or np.any(t[1:] < t[:-1]):
+            raise ValueError(f"Expect t to be a 1-D sorted array_like, but "
+                             f"got t={t}.")
+        # There are `nt - k - 1` basis elements in a BSpline built on the
+        # vector of knots with length `nt`, so to have at least `k + 1` basis
+        # elements we need to have at least `2 * k + 2` elements in the vector
+        # of knots.
+        if len(t) < 2 * k + 2:
+            raise ValueError(f"Length t is not enough for k={k}.")
+
+        if extrapolate == 'periodic':
+            # With periodic extrapolation we map x to the segment
+            # [t[k], t[n]].
+            n = t.size - k - 1
+            x = t[k] + (x - t[k]) % (t[n] - t[k])
+            extrapolate = False
+        elif not extrapolate and (
+            (min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1])
+        ):
+            # Checks from `find_interval` function
+            raise ValueError(f'Out of bounds w/ x = {x}.')
+
+        # Compute number of non-zeros of final CSR array in order to determine
+        # the dtype of indices and indptr of the CSR array.
+        n = x.shape[0]
+        nnz = n * (k + 1)
+        if nnz < np.iinfo(np.int32).max:
+            int_dtype = np.int32
+        else:
+            int_dtype = np.int64
+        # Preallocate indptr and indices
+        indices = np.empty(n * (k + 1), dtype=int_dtype)
+        indptr = np.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype)
+
+        # indptr is not passed to Cython as it is already fully computed
+        data, indices = _bspl._make_design_matrix(
+            x, t, k, extrapolate, indices
+        )
+        return csr_array(
+            (data, indices, indptr),
+            shape=(x.shape[0], t.shape[0] - k - 1)
+        )
+
+    def __call__(self, x, nu=0, extrapolate=None):
+        """
+        Evaluate a spline function.
+
+        Parameters
+        ----------
+        x : array_like
+            points to evaluate the spline at.
+        nu : int, optional
+            derivative to evaluate (default is 0).
+        extrapolate : bool or 'periodic', optional
+            whether to extrapolate based on the first and last intervals
+            or return nans. If 'periodic', periodic extrapolation is used.
+            Default is `self.extrapolate`.
+
+        Returns
+        -------
+        y : array_like
+            Shape is determined by replacing the interpolation axis
+            in the coefficient array with the shape of `x`.
+
+        """
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+        x = np.asarray(x)
+        x_shape, x_ndim = x.shape, x.ndim
+        x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
+
+        # With periodic extrapolation we map x to the segment
+        # [self.t[k], self.t[n]].
+        if extrapolate == 'periodic':
+            n = self.t.size - self.k - 1
+            x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -
+                                                         self.t[self.k])
+            extrapolate = False
+
+        out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype)
+        self._ensure_c_contiguous()
+        self._evaluate(x, nu, extrapolate, out)
+        out = out.reshape(x_shape + self.c.shape[1:])
+        if self.axis != 0:
+            # transpose to move the calculated values to the interpolation axis
+            l = list(range(out.ndim))
+            l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
+            out = out.transpose(l)
+        return out
+
+    def _evaluate(self, xp, nu, extrapolate, out):
+        _bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
+                self.k, xp, nu, extrapolate, out)
+
+    def _ensure_c_contiguous(self):
+        """
+        c and t may be modified by the user. The Cython code expects
+        that they are C contiguous.
+
+        """
+        if not self.t.flags.c_contiguous:
+            self.t = self.t.copy()
+        if not self.c.flags.c_contiguous:
+            self.c = self.c.copy()
+
+    def derivative(self, nu=1):
+        """Return a B-spline representing the derivative.
+
+        Parameters
+        ----------
+        nu : int, optional
+            Derivative order.
+            Default is 1.
+
+        Returns
+        -------
+        b : BSpline object
+            A new instance representing the derivative.
+
+        See Also
+        --------
+        splder, splantider
+
+        """
+        c = self.c
+        # pad the c array if needed
+        ct = len(self.t) - len(c)
+        if ct > 0:
+            c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+        tck = _fitpack_impl.splder((self.t, c, self.k), nu)
+        return self.construct_fast(*tck, extrapolate=self.extrapolate,
+                                    axis=self.axis)
+
+    def antiderivative(self, nu=1):
+        """Return a B-spline representing the antiderivative.
+
+        Parameters
+        ----------
+        nu : int, optional
+            Antiderivative order. Default is 1.
+
+        Returns
+        -------
+        b : BSpline object
+            A new instance representing the antiderivative.
+
+        Notes
+        -----
+        If antiderivative is computed and ``self.extrapolate='periodic'``,
+        it will be set to False for the returned instance. This is done because
+        the antiderivative is no longer periodic and its correct evaluation
+        outside of the initially given x interval is difficult.
+
+        See Also
+        --------
+        splder, splantider
+
+        """
+        c = self.c
+        # pad the c array if needed
+        ct = len(self.t) - len(c)
+        if ct > 0:
+            c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+        tck = _fitpack_impl.splantider((self.t, c, self.k), nu)
+
+        if self.extrapolate == 'periodic':
+            extrapolate = False
+        else:
+            extrapolate = self.extrapolate
+
+        return self.construct_fast(*tck, extrapolate=extrapolate,
+                                   axis=self.axis)
+
+    def integrate(self, a, b, extrapolate=None):
+        """Compute a definite integral of the spline.
+
+        Parameters
+        ----------
+        a : float
+            Lower limit of integration.
+        b : float
+            Upper limit of integration.
+        extrapolate : bool or 'periodic', optional
+            whether to extrapolate beyond the base interval,
+            ``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
+            base interval. If 'periodic', periodic extrapolation is used.
+            If None (default), use `self.extrapolate`.
+
+        Returns
+        -------
+        I : array_like
+            Definite integral of the spline over the interval ``[a, b]``.
+
+        Examples
+        --------
+        Construct the linear spline ``x if x < 1 else 2 - x`` on the base
+        interval :math:`[0, 2]`, and integrate it
+
+        >>> from scipy.interpolate import BSpline
+        >>> b = BSpline.basis_element([0, 1, 2])
+        >>> b.integrate(0, 1)
+        array(0.5)
+
+        If the integration limits are outside of the base interval, the result
+        is controlled by the `extrapolate` parameter
+
+        >>> b.integrate(-1, 1)
+        array(0.0)
+        >>> b.integrate(-1, 1, extrapolate=False)
+        array(0.5)
+
+        >>> import matplotlib.pyplot as plt
+        >>> fig, ax = plt.subplots()
+        >>> ax.grid(True)
+        >>> ax.axvline(0, c='r', lw=5, alpha=0.5)  # base interval
+        >>> ax.axvline(2, c='r', lw=5, alpha=0.5)
+        >>> xx = [-1, 1, 2]
+        >>> ax.plot(xx, b(xx))
+        >>> plt.show()
+
+        """
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+
+        # Prepare self.t and self.c.
+        self._ensure_c_contiguous()
+
+        # Swap integration bounds if needed.
+        sign = 1
+        if b < a:
+            a, b = b, a
+            sign = -1
+        n = self.t.size - self.k - 1
+
+        if extrapolate != "periodic" and not extrapolate:
+            # Shrink the integration interval, if needed.
+            a = max(a, self.t[self.k])
+            b = min(b, self.t[n])
+
+            if self.c.ndim == 1:
+                # Fast path: use FITPACK's routine
+                # (cf _fitpack_impl.splint).
+                integral = _fitpack_impl.splint(a, b, self.tck)
+                return integral * sign
+
+        out = np.empty((2, prod(self.c.shape[1:])), dtype=self.c.dtype)
+
+        # Compute the antiderivative.
+        c = self.c
+        ct = len(self.t) - len(c)
+        if ct > 0:
+            c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
+        ta, ca, ka = _fitpack_impl.splantider((self.t, c, self.k), 1)
+
+        if extrapolate == 'periodic':
+            # Split the integral into the part over period (can be several
+            # of them) and the remaining part.
+
+            ts, te = self.t[self.k], self.t[n]
+            period = te - ts
+            interval = b - a
+            n_periods, left = divmod(interval, period)
+
+            if n_periods > 0:
+                # Evaluate the difference of antiderivatives.
+                x = np.asarray([ts, te], dtype=np.float_)
+                _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+                                      ka, x, 0, False, out)
+                integral = out[1] - out[0]
+                integral *= n_periods
+            else:
+                integral = np.zeros((1, prod(self.c.shape[1:])),
+                                    dtype=self.c.dtype)
+
+            # Map a to [ts, te], b is always a + left.
+            a = ts + (a - ts) % period
+            b = a + left
+
+            # If b <= te then we need to integrate over [a, b], otherwise
+            # over [a, te] and from xs to what is remained.
+            if b <= te:
+                x = np.asarray([a, b], dtype=np.float_)
+                _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+                                      ka, x, 0, False, out)
+                integral += out[1] - out[0]
+            else:
+                x = np.asarray([a, te], dtype=np.float_)
+                _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+                                      ka, x, 0, False, out)
+                integral += out[1] - out[0]
+
+                x = np.asarray([ts, ts + b - te], dtype=np.float_)
+                _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+                                      ka, x, 0, False, out)
+                integral += out[1] - out[0]
+        else:
+            # Evaluate the difference of antiderivatives.
+            x = np.asarray([a, b], dtype=np.float_)
+            _bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
+                                  ka, x, 0, extrapolate, out)
+            integral = out[1] - out[0]
+
+        integral *= sign
+        return integral.reshape(ca.shape[1:])
+
+    @classmethod
+    def from_power_basis(cls, pp, bc_type='not-a-knot'):
+        r"""
+        Construct a polynomial in the B-spline basis
+        from a piecewise polynomial in the power basis.
+
+        For now, accepts ``CubicSpline`` instances only.
+
+        Parameters
+        ----------
+        pp : CubicSpline
+            A piecewise polynomial in the power basis, as created
+            by ``CubicSpline``
+        bc_type : string, optional
+            Boundary condition type as in ``CubicSpline``: one of the
+            ``not-a-knot``, ``natural``, ``clamped``, or ``periodic``.
+            Necessary for construction an instance of ``BSpline`` class.
+            Default is ``not-a-knot``.
+
+        Returns
+        -------
+        b : BSpline object
+            A new instance representing the initial polynomial
+            in the B-spline basis.
+
+        Notes
+        -----
+        .. versionadded:: 1.8.0
+
+        Accepts only ``CubicSpline`` instances for now.
+
+        The algorithm follows from differentiation
+        the Marsden's identity [1]: each of coefficients of spline
+        interpolation function in the B-spline basis is computed as follows:
+
+        .. math::
+
+            c_j = \sum_{m=0}^{k} \frac{(k-m)!}{k!}
+                       c_{m,i} (-1)^{k-m} D^m p_{j,k}(x_i)
+
+        :math:`c_{m, i}` - a coefficient of CubicSpline,
+        :math:`D^m p_{j, k}(x_i)` - an m-th defivative of a dual polynomial
+        in :math:`x_i`.
+
+        ``k`` always equals 3 for now.
+
+        First ``n - 2`` coefficients are computed in :math:`x_i = x_j`, e.g.
+
+        .. math::
+
+            c_1 = \sum_{m=0}^{k} \frac{(k-1)!}{k!} c_{m,1} D^m p_{j,3}(x_1)
+
+        Last ``nod + 2`` coefficients are computed in ``x[-2]``,
+        ``nod`` - number of derivatives at the ends.
+
+        For example, consider :math:`x = [0, 1, 2, 3, 4]`,
+        :math:`y = [1, 1, 1, 1, 1]` and bc_type = ``natural``
+
+        The coefficients of CubicSpline in the power basis:
+
+        :math:`[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
+        [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]`
+
+        The knot vector: :math:`t = [0, 0, 0, 0, 1, 2, 3, 4, 4, 4, 4]`
+
+        In this case
+
+        .. math::
+
+            c_j = \frac{0!}{k!} c_{3, i} k! = c_{3, i} = 1,~j = 0, ..., 6
+
+        References
+        ----------
+        .. [1] Tom Lyche and Knut Morken, Spline Methods, 2005, Section 3.1.2
+
+        """
+        from ._cubic import CubicSpline
+        if not isinstance(pp, CubicSpline):
+            raise NotImplementedError("Only CubicSpline objects are accepted"
+                                      "for now. Got %s instead." % type(pp))
+        x = pp.x
+        coef = pp.c
+        k = pp.c.shape[0] - 1
+        n = x.shape[0]
+
+        if bc_type == 'not-a-knot':
+            t = _not_a_knot(x, k)
+        elif bc_type == 'natural' or bc_type == 'clamped':
+            t = _augknt(x, k)
+        elif bc_type == 'periodic':
+            t = _periodic_knots(x, k)
+        else:
+            raise TypeError('Unknown boundary condition: %s' % bc_type)
+
+        nod = t.shape[0] - (n + k + 1)  # number of derivatives at the ends
+        c = np.zeros(n + nod, dtype=pp.c.dtype)
+        for m in range(k + 1):
+            for i in range(n - 2):
+                c[i] += poch(k + 1, -m) * coef[m, i]\
+                        * np.power(-1, k - m)\
+                        * _diff_dual_poly(i, k, x[i], m, t)
+            for j in range(n - 2, n + nod):
+                c[j] += poch(k + 1, -m) * coef[m, n - 2]\
+                        * np.power(-1, k - m)\
+                        * _diff_dual_poly(j, k, x[n - 2], m, t)
+        return cls.construct_fast(t, c, k, pp.extrapolate, pp.axis)
+
+
+#################################
+#  Interpolating spline helpers #
+#################################
+
+def _not_a_knot(x, k):
+    """Given data x, construct the knot vector w/ not-a-knot BC.
+    cf de Boor, XIII(12)."""
+    x = np.asarray(x)
+    if k % 2 != 1:
+        raise ValueError("Odd degree for now only. Got %s." % k)
+
+    m = (k - 1) // 2
+    t = x[m+1:-m-1]
+    t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]
+    return t
+
+
+def _augknt(x, k):
+    """Construct a knot vector appropriate for the order-k interpolation."""
+    return np.r_[(x[0],)*k, x, (x[-1],)*k]
+
+
+def _convert_string_aliases(deriv, target_shape):
+    if isinstance(deriv, str):
+        if deriv == "clamped":
+            deriv = [(1, np.zeros(target_shape))]
+        elif deriv == "natural":
+            deriv = [(2, np.zeros(target_shape))]
+        else:
+            raise ValueError("Unknown boundary condition : %s" % deriv)
+    return deriv
+
+
+def _process_deriv_spec(deriv):
+    if deriv is not None:
+        try:
+            ords, vals = zip(*deriv)
+        except TypeError as e:
+            msg = ("Derivatives, `bc_type`, should be specified as a pair of "
+                   "iterables of pairs of (order, value).")
+            raise ValueError(msg) from e
+    else:
+        ords, vals = [], []
+    return np.atleast_1d(ords, vals)
+
+def _woodbury_algorithm(A, ur, ll, b, k):
+    '''
+    Solve a cyclic banded linear system with upper right
+    and lower blocks of size ``(k-1) / 2`` using
+    the Woodbury formula
+
+    Parameters
+    ----------
+    A : 2-D array, shape(k, n)
+        Matrix of diagonals of original matrix(see 
+        ``solve_banded`` documentation).
+    ur : 2-D array, shape(bs, bs)
+        Upper right block matrix.
+    ll : 2-D array, shape(bs, bs)
+        Lower left block matrix.
+    b : 1-D array, shape(n,)
+        Vector of constant terms of the system of linear equations.
+    k : int
+        B-spline degree.
+
+    Returns
+    -------
+    c : 1-D array, shape(n,)
+        Solution of the original system of linear equations.
+
+    Notes
+    -----
+    This algorithm works only for systems with banded matrix A plus
+    a correction term U @ V.T, where the matrix U @ V.T gives upper right
+    and lower left block of A
+    The system is solved with the following steps:
+        1.  New systems of linear equations are constructed:
+            A @ z_i = u_i,
+            u_i - columnn vector of U,
+            i = 1, ..., k - 1
+        2.  Matrix Z is formed from vectors z_i:
+            Z = [ z_1 | z_2 | ... | z_{k - 1} ]
+        3.  Matrix H = (1 + V.T @ Z)^{-1}
+        4.  The system A' @ y = b is solved
+        5.  x = y - Z @ (H @ V.T @ y)
+    Also, ``n`` should be greater than ``k``, otherwise corner block
+    elements will intersect with diagonals.
+
+    Examples
+    --------
+    Consider the case of n = 8, k = 5 (size of blocks - 2 x 2).
+    The matrix of a system:       U:          V:
+      x  x  x  *  *  a  b         a b 0 0     0 0 1 0
+      x  x  x  x  *  *  c         0 c 0 0     0 0 0 1
+      x  x  x  x  x  *  *         0 0 0 0     0 0 0 0
+      *  x  x  x  x  x  *         0 0 0 0     0 0 0 0
+      *  *  x  x  x  x  x         0 0 0 0     0 0 0 0
+      d  *  *  x  x  x  x         0 0 d 0     1 0 0 0
+      e  f  *  *  x  x  x         0 0 e f     0 1 0 0
+
+    References
+    ----------
+    .. [1] William H. Press, Saul A. Teukolsky, William T. Vetterling
+           and Brian P. Flannery, Numerical Recipes, 2007, Section 2.7.3
+
+    '''
+    k_mod = k - k % 2
+    bs = int((k - 1) / 2) + (k + 1) % 2
+
+    n = A.shape[1] + 1
+    U = np.zeros((n - 1, k_mod))
+    VT = np.zeros((k_mod, n - 1))  # V transpose
+
+    # upper right block 
+    U[:bs, :bs] = ur
+    VT[np.arange(bs), np.arange(bs) - bs] = 1
+
+    # lower left block 
+    U[-bs:, -bs:] = ll
+    VT[np.arange(bs) - bs, np.arange(bs)] = 1
+
+    Z = solve_banded((bs, bs), A, U)
+
+    H = solve(np.identity(k_mod) + VT @ Z, np.identity(k_mod))
+
+    y = solve_banded((bs, bs), A, b)
+    c = y - Z @ (H @ (VT @ y))
+
+    return c
+
+def _periodic_knots(x, k):
+    '''
+    returns vector of nodes on circle
+    '''
+    xc = np.copy(x)
+    n = len(xc)
+    if k % 2 == 0:
+        dx = np.diff(xc)
+        xc[1: -1] -= dx[:-1] / 2 
+    dx = np.diff(xc)
+    t = np.zeros(n + 2 * k)
+    t[k: -k] = xc
+    for i in range(0, k):
+        # filling first `k` elements in descending order
+        t[k - i - 1] = t[k - i] - dx[-(i % (n - 1)) - 1]
+        # filling last `k` elements in ascending order
+        t[-k + i] = t[-k + i - 1] + dx[i % (n - 1)]
+    return t
+
+
+def _make_interp_per_full_matr(x, y, t, k):
+    '''
+    Returns a solution of a system for B-spline interpolation with periodic
+    boundary conditions. First ``k - 1`` rows of matrix are condtions of
+    periodicity (continuity of ``k - 1`` derivatives at the boundary points).
+    Last ``n`` rows are interpolation conditions.
+    RHS is ``k - 1`` zeros and ``n`` ordinates in this case.
+
+    Parameters
+    ----------
+    x : 1-D array, shape (n,)
+        Values of x - coordinate of a given set of points.
+    y : 1-D array, shape (n,)
+        Values of y - coordinate of a given set of points.
+    t : 1-D array, shape(n+2*k,)
+        Vector of knots.
+    k : int
+        The maximum degree of spline
+
+    Returns
+    -------
+    c : 1-D array, shape (n+k-1,)
+        B-spline coefficients
+
+    Notes
+    -----
+    ``t`` is supposed to be taken on circle.
+
+    '''
+
+    x, y, t = map(np.asarray, (x, y, t))
+
+    n = x.size
+    # LHS: the collocation matrix + derivatives at edges
+    matr = np.zeros((n + k - 1, n + k - 1))
+
+    # derivatives at x[0] and x[-1]:
+    for i in range(k - 1):
+        bb = _bspl.evaluate_all_bspl(t, k, x[0], k, nu=i + 1)
+        matr[i, : k + 1] += bb
+        bb = _bspl.evaluate_all_bspl(t, k, x[-1], n + k - 1, nu=i + 1)[:-1]
+        matr[i, -k:] -= bb
+
+    # collocation matrix
+    for i in range(n):
+        xval = x[i]
+        # find interval
+        if xval == t[k]:
+            left = k
+        else:
+            left = np.searchsorted(t, xval) - 1
+
+        # fill a row
+        bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+        matr[i + k - 1, left-k:left+1] = bb
+
+    # RHS
+    b = np.r_[[0] * (k - 1), y]
+
+    c = solve(matr, b)
+    return c
+
+def _make_periodic_spline(x, y, t, k, axis):
+    '''
+    Compute the (coefficients of) interpolating B-spline with periodic
+    boundary conditions.
+
+    Parameters
+    ----------
+    x : array_like, shape (n,)
+        Abscissas.
+    y : array_like, shape (n,)
+        Ordinates.
+    k : int
+        B-spline degree.
+    t : array_like, shape (n + 2 * k,).
+        Knots taken on a circle, ``k`` on the left and ``k`` on the right
+        of the vector ``x``.
+
+    Returns
+    -------
+    b : a BSpline object of the degree ``k`` and with knots ``t``.
+
+    Notes
+    -----
+    The original system is formed by ``n + k - 1`` equations where the first
+    ``k - 1`` of them stand for the ``k - 1`` derivatives continuity on the
+    edges while the other equations correspond to an interpolating case
+    (matching all the input points). Due to a special form of knot vector, it
+    can be proved that in the original system the first and last ``k``
+    coefficients of a spline function are the same, respectively. It follows
+    from the fact that all ``k - 1`` derivatives are equal term by term at ends
+    and that the matrix of the original system of linear equations is
+    non-degenerate. So, we can reduce the number of equations to ``n - 1``
+    (first ``k - 1`` equations could be reduced). Another trick of this
+    implementation is cyclic shift of values of B-splines due to equality of
+    ``k`` unknown coefficients. With this we can receive matrix of the system
+    with upper right and lower left blocks, and ``k`` diagonals.  It allows
+    to use Woodbury formula to optimize the computations.
+
+    '''
+    n = y.shape[0]
+
+    extradim = prod(y.shape[1:])
+    y_new = y.reshape(n, extradim)
+    c = np.zeros((n + k - 1, extradim))
+
+    # n <= k case is solved with full matrix
+    if n <= k:
+        for i in range(extradim):
+            c[:, i] = _make_interp_per_full_matr(x, y_new[:, i], t, k)
+        c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
+        return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
+
+    nt = len(t) - k - 1
+
+    # size of block elements
+    kul = int(k / 2)
+
+    # kl = ku = k
+    ab = np.zeros((3 * k + 1, nt), dtype=np.float_, order='F')
+
+    # upper right and lower left blocks
+    ur = np.zeros((kul, kul))
+    ll = np.zeros_like(ur)
+
+    # `offset` is made to shift all the non-zero elements to the end of the
+    # matrix
+    _bspl._colloc(x, t, k, ab, offset=k)
+
+    # remove zeros before the matrix
+    ab = ab[-k - (k + 1) % 2:, :]
+
+    # The least elements in rows (except repetitions) are diagonals
+    # of block matrices. Upper right matrix is an upper triangular
+    # matrix while lower left is a lower triangular one.
+    for i in range(kul):
+        ur += np.diag(ab[-i - 1, i: kul], k=i)
+        ll += np.diag(ab[i, -kul - (k % 2): n - 1 + 2 * kul - i], k=-i)
+
+    # remove elements that occur in the last point
+    # (first and last points are equivalent)
+    A = ab[:, kul: -k + kul]
+
+    for i in range(extradim):
+        cc = _woodbury_algorithm(A, ur, ll, y_new[:, i][:-1], k)
+        c[:, i] = np.concatenate((cc[-kul:], cc, cc[:kul + k % 2]))
+    c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
+    return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
+
+def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,
+                       check_finite=True):
+    """Compute the (coefficients of) interpolating B-spline.
+
+    Parameters
+    ----------
+    x : array_like, shape (n,)
+        Abscissas.
+    y : array_like, shape (n, ...)
+        Ordinates.
+    k : int, optional
+        B-spline degree. Default is cubic, ``k = 3``.
+    t : array_like, shape (nt + k + 1,), optional.
+        Knots.
+        The number of knots needs to agree with the number of data points and
+        the number of derivatives at the edges. Specifically, ``nt - n`` must
+        equal ``len(deriv_l) + len(deriv_r)``.
+    bc_type : 2-tuple or None
+        Boundary conditions.
+        Default is None, which means choosing the boundary conditions
+        automatically. Otherwise, it must be a length-two tuple where the first
+        element (``deriv_l``) sets the boundary conditions at ``x[0]`` and
+        the second element (``deriv_r``) sets the boundary conditions at
+        ``x[-1]``. Each of these must be an iterable of pairs
+        ``(order, value)`` which gives the values of derivatives of specified
+        orders at the given edge of the interpolation interval.
+        Alternatively, the following string aliases are recognized:
+
+        * ``"clamped"``: The first derivatives at the ends are zero. This is
+           equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``.
+        * ``"natural"``: The second derivatives at ends are zero. This is
+          equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``.
+        * ``"not-a-knot"`` (default): The first and second segments are the
+          same polynomial. This is equivalent to having ``bc_type=None``.
+        * ``"periodic"``: The values and the first ``k-1`` derivatives at the
+          ends are equivalent.
+
+    axis : int, optional
+        Interpolation axis. Default is 0.
+    check_finite : bool, optional
+        Whether to check that the input arrays contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default is True.
+
+    Returns
+    -------
+    b : a BSpline object of the degree ``k`` and with knots ``t``.
+
+    Examples
+    --------
+
+    Use cubic interpolation on Chebyshev nodes:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> def cheb_nodes(N):
+    ...     jj = 2.*np.arange(N) + 1
+    ...     x = np.cos(np.pi * jj / 2 / N)[::-1]
+    ...     return x
+
+    >>> x = cheb_nodes(20)
+    >>> y = np.sqrt(1 - x**2)
+
+    >>> from scipy.interpolate import BSpline, make_interp_spline
+    >>> b = make_interp_spline(x, y)
+    >>> np.allclose(b(x), y)
+    True
+
+    Note that the default is a cubic spline with a not-a-knot boundary condition
+
+    >>> b.k
+    3
+
+    Here we use a 'natural' spline, with zero 2nd derivatives at edges:
+
+    >>> l, r = [(2, 0.0)], [(2, 0.0)]
+    >>> b_n = make_interp_spline(x, y, bc_type=(l, r))  # or, bc_type="natural"
+    >>> np.allclose(b_n(x), y)
+    True
+    >>> x0, x1 = x[0], x[-1]
+    >>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])
+    True
+
+    Interpolation of parametric curves is also supported. As an example, we
+    compute a discretization of a snail curve in polar coordinates
+
+    >>> phi = np.linspace(0, 2.*np.pi, 40)
+    >>> r = 0.3 + np.cos(phi)
+    >>> x, y = r*np.cos(phi), r*np.sin(phi)  # convert to Cartesian coordinates
+
+    Build an interpolating curve, parameterizing it by the angle
+
+    >>> spl = make_interp_spline(phi, np.c_[x, y])
+
+    Evaluate the interpolant on a finer grid (note that we transpose the result
+    to unpack it into a pair of x- and y-arrays)
+
+    >>> phi_new = np.linspace(0, 2.*np.pi, 100)
+    >>> x_new, y_new = spl(phi_new).T
+
+    Plot the result
+
+    >>> plt.plot(x, y, 'o')
+    >>> plt.plot(x_new, y_new, '-')
+    >>> plt.show()
+
+    Build a B-spline curve with 2 dimensional y
+
+    >>> x = np.linspace(0, 2*np.pi, 10)
+    >>> y = np.array([np.sin(x), np.cos(x)])
+
+    Periodic condition is satisfied because y coordinates of points on the ends
+    are equivalent
+
+    >>> ax = plt.axes(projection='3d')
+    >>> xx = np.linspace(0, 2*np.pi, 100)
+    >>> bspl = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
+    >>> ax.plot3D(xx, *bspl(xx))
+    >>> ax.scatter3D(x, *y, color='red')
+    >>> plt.show()
+
+    See Also
+    --------
+    BSpline : base class representing the B-spline objects
+    CubicSpline : a cubic spline in the polynomial basis
+    make_lsq_spline : a similar factory function for spline fitting
+    UnivariateSpline : a wrapper over FITPACK spline fitting routines
+    splrep : a wrapper over FITPACK spline fitting routines
+
+    """
+    # convert string aliases for the boundary conditions
+    if bc_type is None or bc_type == 'not-a-knot' or bc_type == 'periodic':
+        deriv_l, deriv_r = None, None
+    elif isinstance(bc_type, str):
+        deriv_l, deriv_r = bc_type, bc_type
+    else:
+        try:
+            deriv_l, deriv_r = bc_type
+        except TypeError as e:
+            raise ValueError("Unknown boundary condition: %s" % bc_type) from e
+
+    y = np.asarray(y)
+
+    axis = normalize_axis_index(axis, y.ndim)
+
+    x = _as_float_array(x, check_finite)
+    y = _as_float_array(y, check_finite)
+
+    y = np.moveaxis(y, axis, 0)    # now internally interp axis is zero
+
+    # sanity check the input
+    if bc_type == 'periodic' and not np.allclose(y[0], y[-1], atol=1e-15):
+        raise ValueError("First and last points does not match while "
+                         "periodic case expected")
+    if x.size != y.shape[0]:
+        raise ValueError('Shapes of x {} and y {} are incompatible'
+                         .format(x.shape, y.shape))
+    if np.any(x[1:] == x[:-1]):
+        raise ValueError("Expect x to not have duplicates")
+    if x.ndim != 1 or np.any(x[1:] < x[:-1]):
+        raise ValueError("Expect x to be a 1D strictly increasing sequence.")
+
+    # special-case k=0 right away
+    if k == 0:
+        if any(_ is not None for _ in (t, deriv_l, deriv_r)):
+            raise ValueError("Too much info for k=0: t and bc_type can only "
+                             "be None.")
+        t = np.r_[x, x[-1]]
+        c = np.asarray(y)
+        c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
+        return BSpline.construct_fast(t, c, k, axis=axis)
+
+    # special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))
+    if k == 1 and t is None:
+        if not (deriv_l is None and deriv_r is None):
+            raise ValueError("Too much info for k=1: bc_type can only be None.")
+        t = np.r_[x[0], x, x[-1]]
+        c = np.asarray(y)
+        c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
+        return BSpline.construct_fast(t, c, k, axis=axis)
+
+    k = operator.index(k)
+
+    if bc_type == 'periodic' and t is not None:
+        raise NotImplementedError("For periodic case t is constructed "
+                         "automatically and can not be passed manually")
+
+    # come up with a sensible knot vector, if needed
+    if t is None:
+        if deriv_l is None and deriv_r is None:
+            if bc_type == 'periodic':
+                t = _periodic_knots(x, k)
+            elif k == 2:
+                # OK, it's a bit ad hoc: Greville sites + omit
+                # 2nd and 2nd-to-last points, a la not-a-knot
+                t = (x[1:] + x[:-1]) / 2.
+                t = np.r_[(x[0],)*(k+1),
+                           t[1:-1],
+                           (x[-1],)*(k+1)]
+            else:
+                t = _not_a_knot(x, k)
+        else:
+            t = _augknt(x, k)
+
+    t = _as_float_array(t, check_finite)
+
+    if k < 0:
+        raise ValueError("Expect non-negative k.")
+    if t.ndim != 1 or np.any(t[1:] < t[:-1]):
+        raise ValueError("Expect t to be a 1-D sorted array_like.")
+    if t.size < x.size + k + 1:
+        raise ValueError('Got %d knots, need at least %d.' %
+                         (t.size, x.size + k + 1))
+    if (x[0] < t[k]) or (x[-1] > t[-k]):
+        raise ValueError('Out of bounds w/ x = %s.' % x)
+
+    if bc_type == 'periodic':
+        return _make_periodic_spline(x, y, t, k, axis)
+
+    # Here : deriv_l, r = [(nu, value), ...]
+    deriv_l = _convert_string_aliases(deriv_l, y.shape[1:])
+    deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l)
+    nleft = deriv_l_ords.shape[0]
+
+    deriv_r = _convert_string_aliases(deriv_r, y.shape[1:])
+    deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r)
+    nright = deriv_r_ords.shape[0]
+
+    # have `n` conditions for `nt` coefficients; need nt-n derivatives
+    n = x.size
+    nt = t.size - k - 1
+
+    if nt - n != nleft + nright:
+        raise ValueError("The number of derivatives at boundaries does not "
+                         "match: expected %s, got %s+%s" % (nt-n, nleft, nright))
+
+    # bail out if the `y` array is zero-sized
+    if y.size == 0:
+        c = np.zeros((nt,) + y.shape[1:], dtype=float)
+        return BSpline.construct_fast(t, c, k, axis=axis)
+
+    # set up the LHS: the collocation matrix + derivatives at boundaries
+    kl = ku = k
+    ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float_, order='F')
+    _bspl._colloc(x, t, k, ab, offset=nleft)
+    if nleft > 0:
+        _bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku, deriv_l_ords)
+    if nright > 0:
+        _bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku, deriv_r_ords,
+                                offset=nt-nright)
+
+    # set up the RHS: values to interpolate (+ derivative values, if any)
+    extradim = prod(y.shape[1:])
+    rhs = np.empty((nt, extradim), dtype=y.dtype)
+    if nleft > 0:
+        rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)
+    rhs[nleft:nt - nright] = y.reshape(-1, extradim)
+    if nright > 0:
+        rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)
+
+    # solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded
+    if check_finite:
+        ab, rhs = map(np.asarray_chkfinite, (ab, rhs))
+    gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))
+    lu, piv, c, info = gbsv(kl, ku, ab, rhs,
+            overwrite_ab=True, overwrite_b=True)
+
+    if info > 0:
+        raise LinAlgError("Collocation matrix is singular.")
+    elif info < 0:
+        raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
+
+    c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))
+    return BSpline.construct_fast(t, c, k, axis=axis)
+
+
+def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True):
+    r"""Compute the (coefficients of) an LSQ (Least SQuared) based
+    fitting B-spline.
+
+    The result is a linear combination
+
+    .. math::
+
+            S(x) = \sum_j c_j B_j(x; t)
+
+    of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes
+
+    .. math::
+
+        \sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2
+
+    Parameters
+    ----------
+    x : array_like, shape (m,)
+        Abscissas.
+    y : array_like, shape (m, ...)
+        Ordinates.
+    t : array_like, shape (n + k + 1,).
+        Knots.
+        Knots and data points must satisfy Schoenberg-Whitney conditions.
+    k : int, optional
+        B-spline degree. Default is cubic, ``k = 3``.
+    w : array_like, shape (m,), optional
+        Weights for spline fitting. Must be positive. If ``None``,
+        then weights are all equal.
+        Default is ``None``.
+    axis : int, optional
+        Interpolation axis. Default is zero.
+    check_finite : bool, optional
+        Whether to check that the input arrays contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+        Default is True.
+
+    Returns
+    -------
+    b : a BSpline object of the degree ``k`` with knots ``t``.
+
+    Notes
+    -----
+    The number of data points must be larger than the spline degree ``k``.
+
+    Knots ``t`` must satisfy the Schoenberg-Whitney conditions,
+    i.e., there must be a subset of data points ``x[j]`` such that
+    ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+    Examples
+    --------
+    Generate some noisy data:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> x = np.linspace(-3, 3, 50)
+    >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+
+    Now fit a smoothing cubic spline with a pre-defined internal knots.
+    Here we make the knot vector (k+1)-regular by adding boundary knots:
+
+    >>> from scipy.interpolate import make_lsq_spline, BSpline
+    >>> t = [-1, 0, 1]
+    >>> k = 3
+    >>> t = np.r_[(x[0],)*(k+1),
+    ...           t,
+    ...           (x[-1],)*(k+1)]
+    >>> spl = make_lsq_spline(x, y, t, k)
+
+    For comparison, we also construct an interpolating spline for the same
+    set of data:
+
+    >>> from scipy.interpolate import make_interp_spline
+    >>> spl_i = make_interp_spline(x, y)
+
+    Plot both:
+
+    >>> xs = np.linspace(-3, 3, 100)
+    >>> plt.plot(x, y, 'ro', ms=5)
+    >>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')
+    >>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    **NaN handling**: If the input arrays contain ``nan`` values, the result is
+    not useful since the underlying spline fitting routines cannot deal with
+    ``nan``. A workaround is to use zero weights for not-a-number data points:
+
+    >>> y[8] = np.nan
+    >>> w = np.isnan(y)
+    >>> y[w] = 0.
+    >>> tck = make_lsq_spline(x, y, t, w=~w)
+
+    Notice the need to replace a ``nan`` by a numerical value (precise value
+    does not matter as long as the corresponding weight is zero.)
+
+    See Also
+    --------
+    BSpline : base class representing the B-spline objects
+    make_interp_spline : a similar factory function for interpolating splines
+    LSQUnivariateSpline : a FITPACK-based spline fitting routine
+    splrep : a FITPACK-based fitting routine
+
+    """
+    x = _as_float_array(x, check_finite)
+    y = _as_float_array(y, check_finite)
+    t = _as_float_array(t, check_finite)
+    if w is not None:
+        w = _as_float_array(w, check_finite)
+    else:
+        w = np.ones_like(x)
+    k = operator.index(k)
+
+    axis = normalize_axis_index(axis, y.ndim)
+
+    y = np.moveaxis(y, axis, 0)    # now internally interp axis is zero
+
+    if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0):
+        raise ValueError("Expect x to be a 1-D sorted array_like.")
+    if x.shape[0] < k+1:
+        raise ValueError("Need more x points.")
+    if k < 0:
+        raise ValueError("Expect non-negative k.")
+    if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):
+        raise ValueError("Expect t to be a 1-D sorted array_like.")
+    if x.size != y.shape[0]:
+        raise ValueError('Shapes of x {} and y {} are incompatible'
+                         .format(x.shape, y.shape))
+    if k > 0 and np.any((x < t[k]) | (x > t[-k])):
+        raise ValueError('Out of bounds w/ x = %s.' % x)
+    if x.size != w.size:
+        raise ValueError('Shapes of x {} and w {} are incompatible'
+                         .format(x.shape, w.shape))
+
+    # number of coefficients
+    n = t.size - k - 1
+
+    # construct A.T @ A and rhs with A the collocation matrix, and
+    # rhs = A.T @ y for solving the LSQ problem  ``A.T @ A @ c = A.T @ y``
+    lower = True
+    extradim = prod(y.shape[1:])
+    ab = np.zeros((k+1, n), dtype=np.float_, order='F')
+    rhs = np.zeros((n, extradim), dtype=y.dtype, order='F')
+    _bspl._norm_eq_lsq(x, t, k,
+                      y.reshape(-1, extradim),
+                      w,
+                      ab, rhs)
+    rhs = rhs.reshape((n,) + y.shape[1:])
+
+    # have observation matrix & rhs, can solve the LSQ problem
+    cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,
+                                 check_finite=check_finite)
+    c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True,
+                         check_finite=check_finite)
+
+    c = np.ascontiguousarray(c)
+    return BSpline.construct_fast(t, c, k, axis=axis)
+
+
+#############################
+#  Smoothing spline helpers #
+#############################
+
+def _compute_optimal_gcv_parameter(X, wE, y, w):
+    """
+    Returns an optimal regularization parameter from the GCV criteria [1].
+
+    Parameters
+    ----------
+    X : array, shape (5, n)
+        5 bands of the design matrix ``X`` stored in LAPACK banded storage.
+    wE : array, shape (5, n)
+        5 bands of the penalty matrix :math:`W^{-1} E` stored in LAPACK banded
+        storage.
+    y : array, shape (n,)
+        Ordinates.
+    w : array, shape (n,)
+        Vector of weights.
+
+    Returns
+    -------
+    lam : float
+        An optimal from the GCV criteria point of view regularization
+        parameter.
+
+    Notes
+    -----
+    No checks are performed.
+
+    References
+    ----------
+    .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
+        for observational data, Philadelphia, Pennsylvania: Society for
+        Industrial and Applied Mathematics, 1990, pp. 45-65.
+        :doi:`10.1137/1.9781611970128`
+
+    """
+
+    def compute_banded_symmetric_XT_W_Y(X, w, Y):
+        """
+        Assuming that the product :math:`X^T W Y` is symmetric and both ``X``
+        and ``Y`` are 5-banded, compute the unique bands of the product.
+
+        Parameters
+        ----------
+        X : array, shape (5, n)
+            5 bands of the matrix ``X`` stored in LAPACK banded storage.
+        w : array, shape (n,)
+            Array of weights
+        Y : array, shape (5, n)
+            5 bands of the matrix ``Y`` stored in LAPACK banded storage.
+
+        Returns
+        -------
+        res : array, shape (4, n)
+            The result of the product :math:`X^T Y` stored in the banded way.
+
+        Notes
+        -----
+        As far as the matrices ``X`` and ``Y`` are 5-banded, their product
+        :math:`X^T W Y` is 7-banded. It is also symmetric, so we can store only
+        unique diagonals.
+
+        """
+        # compute W Y
+        W_Y = np.copy(Y)
+
+        W_Y[2] *= w
+        for i in range(2):
+            W_Y[i, 2 - i:] *= w[:-2 + i]
+            W_Y[3 + i, :-1 - i] *= w[1 + i:]
+
+        n = X.shape[1]
+        res = np.zeros((4, n))
+        for i in range(n):
+            for j in range(min(n-i, 4)):
+                res[-j-1, i + j] = sum(X[j:, i] * W_Y[:5-j, i + j])
+        return res
+
+    def compute_b_inv(A):
+        """
+        Inverse 3 central bands of matrix :math:`A=U^T D^{-1} U` assuming that
+        ``U`` is a unit upper triangular banded matrix using an algorithm
+        proposed in [1].
+
+        Parameters
+        ----------
+        A : array, shape (4, n)
+            Matrix to inverse, stored in LAPACK banded storage.
+
+        Returns
+        -------
+        B : array, shape (4, n)
+            3 unique bands of the symmetric matrix that is an inverse to ``A``.
+            The first row is filled with zeros.
+
+        Notes
+        -----
+        The algorithm is based on the cholesky decomposition and, therefore,
+        in case matrix ``A`` is close to not positive defined, the function
+        raises LinalgError.
+
+        Both matrices ``A`` and ``B`` are stored in LAPACK banded storage.
+
+        References
+        ----------
+        .. [1] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
+            spline functions," Numerische Mathematik, vol. 47, no. 1,
+            pp. 99-106, 1985.
+            :doi:`10.1007/BF01389878`
+
+        """
+
+        def find_b_inv_elem(i, j, U, D, B):
+            rng = min(3, n - i - 1)
+            rng_sum = 0.
+            if j == 0:
+                # use 2-nd formula from [1]
+                for k in range(1, rng + 1):
+                    rng_sum -= U[-k - 1, i + k] * B[-k - 1, i + k]
+                rng_sum += D[i]
+                B[-1, i] = rng_sum
+            else:
+                # use 1-st formula from [1]
+                for k in range(1, rng + 1):
+                    diag = abs(k - j)
+                    ind = i + min(k, j)
+                    rng_sum -= U[-k - 1, i + k] * B[-diag - 1, ind + diag]
+                B[-j - 1, i + j] = rng_sum
+
+        U = cholesky_banded(A)
+        for i in range(2, 5):
+            U[-i, i-1:] /= U[-1, :-i+1]
+        D = 1. / (U[-1])**2
+        U[-1] /= U[-1]
+
+        n = U.shape[1]
+
+        B = np.zeros(shape=(4, n))
+        for i in range(n - 1, -1, -1):
+            for j in range(min(3, n - i - 1), -1, -1):
+                find_b_inv_elem(i, j, U, D, B)
+        # the first row contains garbage and should be removed
+        B[0] = [0.] * n
+        return B
+
+    def _gcv(lam, X, XtWX, wE, XtE):
+        r"""
+        Computes the generalized cross-validation criteria [1].
+
+        Parameters
+        ----------
+        lam : float, (:math:`\lambda \geq 0`)
+            Regularization parameter.
+        X : array, shape (5, n)
+            Matrix is stored in LAPACK banded storage.
+        XtWX : array, shape (4, n)
+            Product :math:`X^T W X` stored in LAPACK banded storage.
+        wE : array, shape (5, n)
+            Matrix :math:`W^{-1} E` stored in LAPACK banded storage.
+        XtE : array, shape (4, n)
+            Product :math:`X^T E` stored in LAPACK banded storage.
+
+        Returns
+        -------
+        res : float
+            Value of the GCV criteria with the regularization parameter
+            :math:`\lambda`.
+
+        Notes
+        -----
+        Criteria is computed from the formula (1.3.2) [3]:
+
+        .. math:
+
+        GCV(\lambda) = \dfrac{1}{n} \sum\limits_{k = 1}^{n} \dfrac{ \left(
+        y_k - f_{\lambda}(x_k) \right)^2}{\left( 1 - \Tr{A}/n\right)^2}$.
+        The criteria is discussed in section 1.3 [3].
+
+        The numerator is computed using (2.2.4) [3] and the denominator is
+        computed using an algorithm from [2] (see in the ``compute_b_inv``
+        function).
+
+        References
+        ----------
+        .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
+            for observational data, Philadelphia, Pennsylvania: Society for
+            Industrial and Applied Mathematics, 1990, pp. 45-65.
+            :doi:`10.1137/1.9781611970128`
+        .. [2] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
+            spline functions," Numerische Mathematik, vol. 47, no. 1,
+            pp. 99-106, 1985.
+            :doi:`10.1007/BF01389878`
+        .. [3] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
+            BSc thesis, 2022. Might be available (in Russian)
+            `here `_
+
+        """
+        # Compute the numerator from (2.2.4) [3]
+        n = X.shape[1]
+        c = solve_banded((2, 2), X + lam * wE, y)
+        res = np.zeros(n)
+        # compute ``W^{-1} E c`` with respect to banded-storage of ``E``
+        tmp = wE * c
+        for i in range(n):
+            for j in range(max(0, i - n + 3), min(5, i + 3)):
+                res[i] += tmp[j, i + 2 - j]
+        numer = np.linalg.norm(lam * res)**2 / n
+
+        # compute the denominator
+        lhs = XtWX + lam * XtE
+        try:
+            b_banded = compute_b_inv(lhs)
+            # compute the trace of the product b_banded @ XtX
+            tr = b_banded * XtWX
+            tr[:-1] *= 2
+            # find the denominator
+            denom = (1 - sum(sum(tr)) / n)**2
+        except LinAlgError:
+            # cholesky decomposition cannot be performed
+            raise ValueError('Seems like the problem is ill-posed')
+
+        res = numer / denom
+
+        return res
+
+    n = X.shape[1]
+
+    XtWX = compute_banded_symmetric_XT_W_Y(X, w, X)
+    XtE = compute_banded_symmetric_XT_W_Y(X, w, wE)
+
+    def fun(lam):
+        return _gcv(lam, X, XtWX, wE, XtE)
+
+    gcv_est = minimize_scalar(fun, bounds=(0, n), method='Bounded')
+    if gcv_est.success:
+        return gcv_est.x
+    raise ValueError(f"Unable to find minimum of the GCV "
+                     f"function: {gcv_est.message}")
+
+
+def _coeff_of_divided_diff(x):
+    """
+    Returns the coefficients of the divided difference.
+
+    Parameters
+    ----------
+    x : array, shape (n,)
+        Array which is used for the computation of divided difference.
+
+    Returns
+    -------
+    res : array_like, shape (n,)
+        Coefficients of the divided difference.
+
+    Notes
+    -----
+    Vector ``x`` should have unique elements, otherwise an error division by
+    zero might be raised.
+
+    No checks are performed.
+
+    """
+    n = x.shape[0]
+    res = np.zeros(n)
+    for i in range(n):
+        pp = 1.
+        for k in range(n):
+            if k != i:
+                pp *= (x[i] - x[k])
+        res[i] = 1. / pp
+    return res
+
+
+def make_smoothing_spline(x, y, w=None, lam=None):
+    r"""
+    Compute the (coefficients of) smoothing cubic spline function using
+    ``lam`` to control the tradeoff between the amount of smoothness of the
+    curve and its proximity to the data. In case ``lam`` is None, using the
+    GCV criteria [1] to find it.
+
+    A smoothing spline is found as a solution to the regularized weighted
+    linear regression problem:
+
+    .. math::
+
+        \sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2 +
+        \lambda\int\limits_{x_1}^{x_n} (f^{(2)}(u))^2 d u
+
+    where :math:`f` is a spline function, :math:`w` is a vector of weights and
+    :math:`\lambda` is a regularization parameter.
+
+    If ``lam`` is None, we use the GCV criteria to find an optimal
+    regularization parameter, otherwise we solve the regularized weighted
+    linear regression problem with given parameter. The parameter controls
+    the tradeoff in the following way: the larger the parameter becomes, the
+    smoother the function gets.
+
+    Parameters
+    ----------
+    x : array_like, shape (n,)
+        Abscissas.
+    y : array_like, shape (n,)
+        Ordinates.
+    w : array_like, shape (n,), optional
+        Vector of weights. Default is ``np.ones_like(x)``.
+    lam : float, (:math:`\lambda \geq 0`), optional
+        Regularization parameter. If ``lam`` is None, then it is found from
+        the GCV criteria. Default is None.
+
+    Returns
+    -------
+    func : a BSpline object.
+        A callable representing a spline in the B-spline basis
+        as a solution of the problem of smoothing splines using
+        the GCV criteria [1] in case ``lam`` is None, otherwise using the
+        given parameter ``lam``.
+
+    Notes
+    -----
+    This algorithm is a clean room reimplementation of the algorithm
+    introduced by Woltring in FORTRAN [2]. The original version cannot be used
+    in SciPy source code because of the license issues. The details of the
+    reimplementation are discussed here (available only in Russian) [4].
+
+    If the vector of weights ``w`` is None, we assume that all the points are
+    equal in terms of weights, and vector of weights is vector of ones.
+
+    Note that in weighted residual sum of squares, weights are not squared:
+    :math:`\sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2` while in
+    ``splrep`` the sum is built from the squared weights.
+
+    In cases when the initial problem is ill-posed (for example, the product
+    :math:`X^T W X` where :math:`X` is a design matrix is not a positive
+    defined matrix) a ValueError is raised.
+
+    References
+    ----------
+    .. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models for
+        observational data, Philadelphia, Pennsylvania: Society for Industrial
+        and Applied Mathematics, 1990, pp. 45-65.
+        :doi:`10.1137/1.9781611970128`
+    .. [2] H. J. Woltring, A Fortran package for generalized, cross-validatory
+        spline smoothing and differentiation, Advances in Engineering
+        Software, vol. 8, no. 2, pp. 104-113, 1986.
+        :doi:`10.1016/0141-1195(86)90098-7`
+    .. [3] T. Hastie, J. Friedman, and R. Tisbshirani, "Smoothing Splines" in
+        The elements of Statistical Learning: Data Mining, Inference, and
+        prediction, New York: Springer, 2017, pp. 241-249.
+        :doi:`10.1007/978-0-387-84858-7`
+    .. [4] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
+        BSc thesis, 2022.
+        ``_ (in
+        Russian)
+
+    Examples
+    --------
+    Generate some noisy data
+
+    >>> import numpy as np
+    >>> np.random.seed(1234)
+    >>> n = 200
+    >>> def func(x):
+    ...    return x**3 + x**2 * np.sin(4 * x)
+    >>> x = np.sort(np.random.random_sample(n) * 4 - 2)
+    >>> y = func(x) + np.random.normal(scale=1.5, size=n)
+
+    Make a smoothing spline function
+
+    >>> from scipy.interpolate import make_smoothing_spline
+    >>> spl = make_smoothing_spline(x, y)
+
+    Plot both
+
+    >>> import matplotlib.pyplot as plt
+    >>> grid = np.linspace(x[0], x[-1], 400)
+    >>> plt.plot(grid, spl(grid), label='Spline')
+    >>> plt.plot(grid, func(grid), label='Original function')
+    >>> plt.scatter(x, y, marker='.')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+
+    x = np.ascontiguousarray(x, dtype=float)
+    y = np.ascontiguousarray(y, dtype=float)
+
+    if any(x[1:] - x[:-1] <= 0):
+        raise ValueError('``x`` should be an ascending array')
+
+    if x.ndim != 1 or y.ndim != 1 or x.shape[0] != y.shape[0]:
+        raise ValueError('``x`` and ``y`` should be one dimensional and the'
+                         ' same size')
+
+    if w is None:
+        w = np.ones(len(x))
+    else:
+        w = np.ascontiguousarray(w)
+        if any(w <= 0):
+            raise ValueError('Invalid vector of weights')
+
+    t = np.r_[[x[0]] * 3, x, [x[-1]] * 3]
+    n = x.shape[0]
+
+    # It is known that the solution to the stated minimization problem exists
+    # and is a natural cubic spline with vector of knots equal to the unique
+    # elements of ``x`` [3], so we will solve the problem in the basis of
+    # natural splines.
+
+    # create design matrix in the B-spline basis
+    X_bspl = BSpline.design_matrix(x, t, 3)
+    # move from B-spline basis to the basis of natural splines using equations
+    # (2.1.7) [4]
+    # central elements
+    X = np.zeros((5, n))
+    for i in range(1, 4):
+        X[i, 2: -2] = X_bspl[i: i - 4, 3: -3][np.diag_indices(n - 4)]
+
+    # first elements
+    X[1, 1] = X_bspl[0, 0]
+    X[2, :2] = ((x[2] + x[1] - 2 * x[0]) * X_bspl[0, 0],
+                X_bspl[1, 1] + X_bspl[1, 2])
+    X[3, :2] = ((x[2] - x[0]) * X_bspl[1, 1], X_bspl[2, 2])
+
+    # last elements
+    X[1, -2:] = (X_bspl[-3, -3], (x[-1] - x[-3]) * X_bspl[-2, -2])
+    X[2, -2:] = (X_bspl[-2, -3] + X_bspl[-2, -2],
+                 (2 * x[-1] - x[-2] - x[-3]) * X_bspl[-1, -1])
+    X[3, -2] = X_bspl[-1, -1]
+
+    # create penalty matrix and divide it by vector of weights: W^{-1} E
+    wE = np.zeros((5, n))
+    wE[2:, 0] = _coeff_of_divided_diff(x[:3]) / w[:3]
+    wE[1:, 1] = _coeff_of_divided_diff(x[:4]) / w[:4]
+    for j in range(2, n - 2):
+        wE[:, j] = (x[j+2] - x[j-2]) * _coeff_of_divided_diff(x[j-2:j+3])\
+                   / w[j-2: j+3]
+
+    wE[:-1, -2] = -_coeff_of_divided_diff(x[-4:]) / w[-4:]
+    wE[:-2, -1] = _coeff_of_divided_diff(x[-3:]) / w[-3:]
+    wE *= 6
+
+    if lam is None:
+        lam = _compute_optimal_gcv_parameter(X, wE, y, w)
+    elif lam < 0.:
+        raise ValueError('Regularization parameter should be non-negative')
+
+    # solve the initial problem in the basis of natural splines
+    c = solve_banded((2, 2), X + lam * wE, y)
+    # move back to B-spline basis using equations (2.2.10) [4]
+    c_ = np.r_[c[0] * (t[5] + t[4] - 2 * t[3]) + c[1],
+               c[0] * (t[5] - t[3]) + c[1],
+               c[1: -1],
+               c[-1] * (t[-4] - t[-6]) + c[-2],
+               c[-1] * (2 * t[-4] - t[-5] - t[-6]) + c[-2]]
+
+    return BSpline.construct_fast(t, c_, 3)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_cubic.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_cubic.py
new file mode 100644
index 00000000..63f3ac2e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_cubic.py
@@ -0,0 +1,864 @@
+"""Interpolation algorithms using piecewise cubic polynomials."""
+
+import numpy as np
+
+from . import PPoly
+from ._polyint import _isscalar
+from scipy.linalg import solve_banded, solve
+
+
+__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
+           "Akima1DInterpolator", "CubicSpline"]
+
+
+def prepare_input(x, y, axis, dydx=None):
+    """Prepare input for cubic spline interpolators.
+
+    All data are converted to numpy arrays and checked for correctness.
+    Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th
+    axis. The value of `axis` is converted to lie in
+    [0, number of dimensions of `y`).
+    """
+
+    x, y = map(np.asarray, (x, y))
+    if np.issubdtype(x.dtype, np.complexfloating):
+        raise ValueError("`x` must contain real values.")
+    x = x.astype(float)
+
+    if np.issubdtype(y.dtype, np.complexfloating):
+        dtype = complex
+    else:
+        dtype = float
+
+    if dydx is not None:
+        dydx = np.asarray(dydx)
+        if y.shape != dydx.shape:
+            raise ValueError("The shapes of `y` and `dydx` must be identical.")
+        if np.issubdtype(dydx.dtype, np.complexfloating):
+            dtype = complex
+        dydx = dydx.astype(dtype, copy=False)
+
+    y = y.astype(dtype, copy=False)
+    axis = axis % y.ndim
+    if x.ndim != 1:
+        raise ValueError("`x` must be 1-dimensional.")
+    if x.shape[0] < 2:
+        raise ValueError("`x` must contain at least 2 elements.")
+    if x.shape[0] != y.shape[axis]:
+        raise ValueError("The length of `y` along `axis`={0} doesn't "
+                         "match the length of `x`".format(axis))
+
+    if not np.all(np.isfinite(x)):
+        raise ValueError("`x` must contain only finite values.")
+    if not np.all(np.isfinite(y)):
+        raise ValueError("`y` must contain only finite values.")
+
+    if dydx is not None and not np.all(np.isfinite(dydx)):
+        raise ValueError("`dydx` must contain only finite values.")
+
+    dx = np.diff(x)
+    if np.any(dx <= 0):
+        raise ValueError("`x` must be strictly increasing sequence.")
+
+    y = np.moveaxis(y, axis, 0)
+    if dydx is not None:
+        dydx = np.moveaxis(dydx, axis, 0)
+
+    return x, dx, y, axis, dydx
+
+
+class CubicHermiteSpline(PPoly):
+    """Piecewise-cubic interpolator matching values and first derivatives.
+
+    The result is represented as a `PPoly` instance.
+
+    Parameters
+    ----------
+    x : array_like, shape (n,)
+        1-D array containing values of the independent variable.
+        Values must be real, finite and in strictly increasing order.
+    y : array_like
+        Array containing values of the dependent variable. It can have
+        arbitrary number of dimensions, but the length along ``axis``
+        (see below) must match the length of ``x``. Values must be finite.
+    dydx : array_like
+        Array containing derivatives of the dependent variable. It can have
+        arbitrary number of dimensions, but the length along ``axis``
+        (see below) must match the length of ``x``. Values must be finite.
+    axis : int, optional
+        Axis along which `y` is assumed to be varying. Meaning that for
+        ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+        Default is 0.
+    extrapolate : {bool, 'periodic', None}, optional
+        If bool, determines whether to extrapolate to out-of-bounds points
+        based on first and last intervals, or to return NaNs. If 'periodic',
+        periodic extrapolation is used. If None (default), it is set to True.
+
+    Attributes
+    ----------
+    x : ndarray, shape (n,)
+        Breakpoints. The same ``x`` which was passed to the constructor.
+    c : ndarray, shape (4, n-1, ...)
+        Coefficients of the polynomials on each segment. The trailing
+        dimensions match the dimensions of `y`, excluding ``axis``.
+        For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
+        ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+    axis : int
+        Interpolation axis. The same axis which was passed to the
+        constructor.
+
+    Methods
+    -------
+    __call__
+    derivative
+    antiderivative
+    integrate
+    roots
+
+    See Also
+    --------
+    Akima1DInterpolator : Akima 1D interpolator.
+    PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+    CubicSpline : Cubic spline data interpolator.
+    PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+    Notes
+    -----
+    If you want to create a higher-order spline matching higher-order
+    derivatives, use `BPoly.from_derivatives`.
+
+    References
+    ----------
+    .. [1] `Cubic Hermite spline
+            `_
+            on Wikipedia.
+    """
+
+    def __init__(self, x, y, dydx, axis=0, extrapolate=None):
+        if extrapolate is None:
+            extrapolate = True
+
+        x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
+
+        dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+        slope = np.diff(y, axis=0) / dxr
+        t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
+
+        c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
+        c[0] = t / dxr
+        c[1] = (slope - dydx[:-1]) / dxr - t
+        c[2] = dydx[:-1]
+        c[3] = y[:-1]
+
+        super().__init__(c, x, extrapolate=extrapolate)
+        self.axis = axis
+
+
+class PchipInterpolator(CubicHermiteSpline):
+    r"""PCHIP 1-D monotonic cubic interpolation.
+
+    ``x`` and ``y`` are arrays of values used to approximate some function f,
+    with ``y = f(x)``. The interpolant uses monotonic cubic splines
+    to find the value of new points. (PCHIP stands for Piecewise Cubic
+    Hermite Interpolating Polynomial).
+
+    Parameters
+    ----------
+    x : ndarray
+        A 1-D array of monotonically increasing real values. ``x`` cannot
+        include duplicate values (otherwise f is overspecified)
+    y : ndarray
+        A 1-D array of real values. ``y``'s length along the interpolation
+        axis must be equal to the length of ``x``. If N-D array, use ``axis``
+        parameter to select correct axis.
+    axis : int, optional
+        Axis in the y array corresponding to the x-coordinate values.
+    extrapolate : bool, optional
+        Whether to extrapolate to out-of-bounds points based on first
+        and last intervals, or to return NaNs.
+
+    Methods
+    -------
+    __call__
+    derivative
+    antiderivative
+    roots
+
+    See Also
+    --------
+    CubicHermiteSpline : Piecewise-cubic interpolator.
+    Akima1DInterpolator : Akima 1D interpolator.
+    CubicSpline : Cubic spline data interpolator.
+    PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+    Notes
+    -----
+    The interpolator preserves monotonicity in the interpolation data and does
+    not overshoot if the data is not smooth.
+
+    The first derivatives are guaranteed to be continuous, but the second
+    derivatives may jump at :math:`x_k`.
+
+    Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
+    by using PCHIP algorithm [1]_.
+
+    Let :math:`h_k = x_{k+1} - x_k`, and  :math:`d_k = (y_{k+1} - y_k) / h_k`
+    are the slopes at internal points :math:`x_k`.
+    If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
+    them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
+    weighted harmonic mean
+
+    .. math::
+
+        \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
+
+    where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
+
+    The end slopes are set using a one-sided scheme [2]_.
+
+
+    References
+    ----------
+    .. [1] F. N. Fritsch and J. Butland,
+           A method for constructing local
+           monotone piecewise cubic interpolants,
+           SIAM J. Sci. Comput., 5(2), 300-304 (1984).
+           :doi:`10.1137/0905021`.
+    .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
+           :doi:`10.1137/1.9780898717952`
+
+
+    """
+
+    def __init__(self, x, y, axis=0, extrapolate=None):
+        x, _, y, axis, _ = prepare_input(x, y, axis)
+        xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
+        dk = self._find_derivatives(xp, y)
+        super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
+        self.axis = axis
+
+    @staticmethod
+    def _edge_case(h0, h1, m0, m1):
+        # one-sided three-point estimate for the derivative
+        d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
+
+        # try to preserve shape
+        mask = np.sign(d) != np.sign(m0)
+        mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
+        mmm = (~mask) & mask2
+
+        d[mask] = 0.
+        d[mmm] = 3.*m0[mmm]
+
+        return d
+
+    @staticmethod
+    def _find_derivatives(x, y):
+        # Determine the derivatives at the points y_k, d_k, by using
+        #  PCHIP algorithm is:
+        # We choose the derivatives at the point x_k by
+        # Let m_k be the slope of the kth segment (between k and k+1)
+        # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
+        # else use weighted harmonic mean:
+        #   w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
+        #   1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
+        #   where h_k is the spacing between x_k and x_{k+1}
+        y_shape = y.shape
+        if y.ndim == 1:
+            # So that _edge_case doesn't end up assigning to scalars
+            x = x[:, None]
+            y = y[:, None]
+
+        hk = x[1:] - x[:-1]
+        mk = (y[1:] - y[:-1]) / hk
+
+        if y.shape[0] == 2:
+            # edge case: only have two points, use linear interpolation
+            dk = np.zeros_like(y)
+            dk[0] = mk
+            dk[1] = mk
+            return dk.reshape(y_shape)
+
+        smk = np.sign(mk)
+        condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
+
+        w1 = 2*hk[1:] + hk[:-1]
+        w2 = hk[1:] + 2*hk[:-1]
+
+        # values where division by zero occurs will be excluded
+        # by 'condition' afterwards
+        with np.errstate(divide='ignore', invalid='ignore'):
+            whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
+
+        dk = np.zeros_like(y)
+        dk[1:-1][condition] = 0.0
+        dk[1:-1][~condition] = 1.0 / whmean[~condition]
+
+        # special case endpoints, as suggested in
+        # Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
+        dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
+        dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
+
+        return dk.reshape(y_shape)
+
+
+def pchip_interpolate(xi, yi, x, der=0, axis=0):
+    """
+    Convenience function for pchip interpolation.
+
+    xi and yi are arrays of values used to approximate some function f,
+    with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
+    to find the value of new points x and the derivatives there.
+
+    See `scipy.interpolate.PchipInterpolator` for details.
+
+    Parameters
+    ----------
+    xi : array_like
+        A sorted list of x-coordinates, of length N.
+    yi : array_like
+        A 1-D array of real values. `yi`'s length along the interpolation
+        axis must be equal to the length of `xi`. If N-D array, use axis
+        parameter to select correct axis.
+    x : scalar or array_like
+        Of length M.
+    der : int or list, optional
+        Derivatives to extract. The 0th derivative can be included to
+        return the function value.
+    axis : int, optional
+        Axis in the yi array corresponding to the x-coordinate values.
+
+    See Also
+    --------
+    PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+
+    Returns
+    -------
+    y : scalar or array_like
+        The result, of length R or length M or M by R,
+
+    Examples
+    --------
+    We can interpolate 2D observed data using pchip interpolation:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import pchip_interpolate
+    >>> x_observed = np.linspace(0.0, 10.0, 11)
+    >>> y_observed = np.sin(x_observed)
+    >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+    >>> y = pchip_interpolate(x_observed, y_observed, x)
+    >>> plt.plot(x_observed, y_observed, "o", label="observation")
+    >>> plt.plot(x, y, label="pchip interpolation")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    P = PchipInterpolator(xi, yi, axis=axis)
+
+    if der == 0:
+        return P(x)
+    elif _isscalar(der):
+        return P.derivative(der)(x)
+    else:
+        return [P.derivative(nu)(x) for nu in der]
+
+
+class Akima1DInterpolator(CubicHermiteSpline):
+    """
+    Akima interpolator
+
+    Fit piecewise cubic polynomials, given vectors x and y. The interpolation
+    method by Akima uses a continuously differentiable sub-spline built from
+    piecewise cubic polynomials. The resultant curve passes through the given
+    data points and will appear smooth and natural.
+
+    Parameters
+    ----------
+    x : ndarray, shape (m, )
+        1-D array of monotonically increasing real values.
+    y : ndarray, shape (m, ...)
+        N-D array of real values. The length of ``y`` along the first axis
+        must be equal to the length of ``x``.
+    axis : int, optional
+        Specifies the axis of ``y`` along which to interpolate. Interpolation
+        defaults to the first axis of ``y``.
+
+    Methods
+    -------
+    __call__
+    derivative
+    antiderivative
+    roots
+
+    See Also
+    --------
+    PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+    CubicSpline : Cubic spline data interpolator.
+    PPoly : Piecewise polynomial in terms of coefficients and breakpoints
+
+    Notes
+    -----
+    .. versionadded:: 0.14
+
+    Use only for precise data, as the fitted curve passes through the given
+    points exactly. This routine is useful for plotting a pleasingly smooth
+    curve through a few given points for purposes of plotting.
+
+    References
+    ----------
+    [1] A new method of interpolation and smooth curve fitting based
+        on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
+        589-602.
+
+    """
+
+    def __init__(self, x, y, axis=0):
+        # Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
+        # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
+        x, dx, y, axis, _ = prepare_input(x, y, axis)
+
+        # determine slopes between breakpoints
+        m = np.empty((x.size + 3, ) + y.shape[1:])
+        dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
+        m[2:-2] = np.diff(y, axis=0) / dx
+
+        # add two additional points on the left ...
+        m[1] = 2. * m[2] - m[3]
+        m[0] = 2. * m[1] - m[2]
+        # ... and on the right
+        m[-2] = 2. * m[-3] - m[-4]
+        m[-1] = 2. * m[-2] - m[-3]
+
+        # if m1 == m2 != m3 == m4, the slope at the breakpoint is not
+        # defined. This is the fill value:
+        t = .5 * (m[3:] + m[:-3])
+        # get the denominator of the slope t
+        dm = np.abs(np.diff(m, axis=0))
+        f1 = dm[2:]
+        f2 = dm[:-2]
+        f12 = f1 + f2
+        # These are the mask of where the slope at breakpoint is defined:
+        ind = np.nonzero(f12 > 1e-9 * np.max(f12, initial=-np.inf))
+        x_ind, y_ind = ind[0], ind[1:]
+        # Set the slope at breakpoint
+        t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
+                  f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
+
+        super().__init__(x, y, t, axis=0, extrapolate=False)
+        self.axis = axis
+
+    def extend(self, c, x, right=True):
+        raise NotImplementedError("Extending a 1-D Akima interpolator is not "
+                                  "yet implemented")
+
+    # These are inherited from PPoly, but they do not produce an Akima
+    # interpolator. Hence stub them out.
+    @classmethod
+    def from_spline(cls, tck, extrapolate=None):
+        raise NotImplementedError("This method does not make sense for "
+                                  "an Akima interpolator.")
+
+    @classmethod
+    def from_bernstein_basis(cls, bp, extrapolate=None):
+        raise NotImplementedError("This method does not make sense for "
+                                  "an Akima interpolator.")
+
+
+class CubicSpline(CubicHermiteSpline):
+    """Cubic spline data interpolator.
+
+    Interpolate data with a piecewise cubic polynomial which is twice
+    continuously differentiable [1]_. The result is represented as a `PPoly`
+    instance with breakpoints matching the given data.
+
+    Parameters
+    ----------
+    x : array_like, shape (n,)
+        1-D array containing values of the independent variable.
+        Values must be real, finite and in strictly increasing order.
+    y : array_like
+        Array containing values of the dependent variable. It can have
+        arbitrary number of dimensions, but the length along ``axis``
+        (see below) must match the length of ``x``. Values must be finite.
+    axis : int, optional
+        Axis along which `y` is assumed to be varying. Meaning that for
+        ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
+        Default is 0.
+    bc_type : string or 2-tuple, optional
+        Boundary condition type. Two additional equations, given by the
+        boundary conditions, are required to determine all coefficients of
+        polynomials on each segment [2]_.
+
+        If `bc_type` is a string, then the specified condition will be applied
+        at both ends of a spline. Available conditions are:
+
+        * 'not-a-knot' (default): The first and second segment at a curve end
+          are the same polynomial. It is a good default when there is no
+          information on boundary conditions.
+        * 'periodic': The interpolated functions is assumed to be periodic
+          of period ``x[-1] - x[0]``. The first and last value of `y` must be
+          identical: ``y[0] == y[-1]``. This boundary condition will result in
+          ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
+        * 'clamped': The first derivative at curves ends are zero. Assuming
+          a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
+        * 'natural': The second derivative at curve ends are zero. Assuming
+          a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
+
+        If `bc_type` is a 2-tuple, the first and the second value will be
+        applied at the curve start and end respectively. The tuple values can
+        be one of the previously mentioned strings (except 'periodic') or a
+        tuple `(order, deriv_values)` allowing to specify arbitrary
+        derivatives at curve ends:
+
+        * `order`: the derivative order, 1 or 2.
+        * `deriv_value`: array_like containing derivative values, shape must
+          be the same as `y`, excluding ``axis`` dimension. For example, if
+          `y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
+          the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
+          and have the shape (n0, n1).
+    extrapolate : {bool, 'periodic', None}, optional
+        If bool, determines whether to extrapolate to out-of-bounds points
+        based on first and last intervals, or to return NaNs. If 'periodic',
+        periodic extrapolation is used. If None (default), ``extrapolate`` is
+        set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
+
+    Attributes
+    ----------
+    x : ndarray, shape (n,)
+        Breakpoints. The same ``x`` which was passed to the constructor.
+    c : ndarray, shape (4, n-1, ...)
+        Coefficients of the polynomials on each segment. The trailing
+        dimensions match the dimensions of `y`, excluding ``axis``.
+        For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
+        ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
+    axis : int
+        Interpolation axis. The same axis which was passed to the
+        constructor.
+
+    Methods
+    -------
+    __call__
+    derivative
+    antiderivative
+    integrate
+    roots
+
+    See Also
+    --------
+    Akima1DInterpolator : Akima 1D interpolator.
+    PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
+    PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
+
+    Notes
+    -----
+    Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
+    former controls only construction of a spline, and the latter only
+    evaluation.
+
+    When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
+    a condition that the first derivative is equal to the linear interpolant
+    slope. When both boundary conditions are 'not-a-knot' and n = 3, the
+    solution is sought as a parabola passing through given points.
+
+    When 'not-a-knot' boundary conditions is applied to both ends, the
+    resulting spline will be the same as returned by `splrep` (with ``s=0``)
+    and `InterpolatedUnivariateSpline`, but these two methods use a
+    representation in B-spline basis.
+
+    .. versionadded:: 0.18.0
+
+    Examples
+    --------
+    In this example the cubic spline is used to interpolate a sampled sinusoid.
+    You can see that the spline continuity property holds for the first and
+    second derivatives and violates only for the third derivative.
+
+    >>> import numpy as np
+    >>> from scipy.interpolate import CubicSpline
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(10)
+    >>> y = np.sin(x)
+    >>> cs = CubicSpline(x, y)
+    >>> xs = np.arange(-0.5, 9.6, 0.1)
+    >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+    >>> ax.plot(x, y, 'o', label='data')
+    >>> ax.plot(xs, np.sin(xs), label='true')
+    >>> ax.plot(xs, cs(xs), label="S")
+    >>> ax.plot(xs, cs(xs, 1), label="S'")
+    >>> ax.plot(xs, cs(xs, 2), label="S''")
+    >>> ax.plot(xs, cs(xs, 3), label="S'''")
+    >>> ax.set_xlim(-0.5, 9.5)
+    >>> ax.legend(loc='lower left', ncol=2)
+    >>> plt.show()
+
+    In the second example, the unit circle is interpolated with a spline. A
+    periodic boundary condition is used. You can see that the first derivative
+    values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
+    computed. Note that a circle cannot be exactly represented by a cubic
+    spline. To increase precision, more breakpoints would be required.
+
+    >>> theta = 2 * np.pi * np.linspace(0, 1, 5)
+    >>> y = np.c_[np.cos(theta), np.sin(theta)]
+    >>> cs = CubicSpline(theta, y, bc_type='periodic')
+    >>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
+    ds/dx=0.0 ds/dy=1.0
+    >>> xs = 2 * np.pi * np.linspace(0, 1, 100)
+    >>> fig, ax = plt.subplots(figsize=(6.5, 4))
+    >>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
+    >>> ax.plot(np.cos(xs), np.sin(xs), label='true')
+    >>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
+    >>> ax.axes.set_aspect('equal')
+    >>> ax.legend(loc='center')
+    >>> plt.show()
+
+    The third example is the interpolation of a polynomial y = x**3 on the
+    interval 0 <= x<= 1. A cubic spline can represent this function exactly.
+    To achieve that we need to specify values and first derivatives at
+    endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
+    y'(1) = 3.
+
+    >>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
+    >>> x = np.linspace(0, 1)
+    >>> np.allclose(x**3, cs(x))
+    True
+
+    References
+    ----------
+    .. [1] `Cubic Spline Interpolation
+            `_
+            on Wikiversity.
+    .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
+    """
+
+    def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
+        x, dx, y, axis, _ = prepare_input(x, y, axis)
+        n = len(x)
+
+        bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
+
+        if extrapolate is None:
+            if bc[0] == 'periodic':
+                extrapolate = 'periodic'
+            else:
+                extrapolate = True
+
+        if y.size == 0:
+            # bail out early for zero-sized arrays
+            s = np.zeros_like(y)
+        else:
+            dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
+            slope = np.diff(y, axis=0) / dxr
+
+            # If bc is 'not-a-knot' this change is just a convention.
+            # If bc is 'periodic' then we already checked that y[0] == y[-1],
+            # and the spline is just a constant, we handle this case in the
+            # same way by setting the first derivatives to slope, which is 0.
+            if n == 2:
+                if bc[0] in ['not-a-knot', 'periodic']:
+                    bc[0] = (1, slope[0])
+                if bc[1] in ['not-a-knot', 'periodic']:
+                    bc[1] = (1, slope[0])
+
+            # This is a special case, when both conditions are 'not-a-knot'
+            # and n == 3. In this case 'not-a-knot' can't be handled regularly
+            # as the both conditions are identical. We handle this case by
+            # constructing a parabola passing through given points.
+            if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
+                A = np.zeros((3, 3))  # This is a standard matrix.
+                b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
+
+                A[0, 0] = 1
+                A[0, 1] = 1
+                A[1, 0] = dx[1]
+                A[1, 1] = 2 * (dx[0] + dx[1])
+                A[1, 2] = dx[0]
+                A[2, 1] = 1
+                A[2, 2] = 1
+
+                b[0] = 2 * slope[0]
+                b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
+                b[2] = 2 * slope[1]
+
+                s = solve(A, b, overwrite_a=True, overwrite_b=True,
+                          check_finite=False)
+            elif n == 3 and bc[0] == 'periodic':
+                # In case when number of points is 3 we compute the derivatives
+                # manually
+                s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+                t = (slope / dxr).sum() / (1. / dxr).sum()
+                s.fill(t)
+            else:
+                # Find derivative values at each x[i] by solving a tridiagonal
+                # system.
+                A = np.zeros((3, n))  # This is a banded matrix representation.
+                b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+
+                # Filling the system for i=1..n-2
+                #                         (x[i-1] - x[i]) * s[i-1] +\
+                # 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i]   +\
+                #                         (x[i] - x[i-1]) * s[i+1] =\
+                #       3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
+                #           (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
+
+                A[1, 1:-1] = 2 * (dx[:-1] + dx[1:])  # The diagonal
+                A[0, 2:] = dx[:-1]                   # The upper diagonal
+                A[-1, :-2] = dx[1:]                  # The lower diagonal
+
+                b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
+
+                bc_start, bc_end = bc
+
+                if bc_start == 'periodic':
+                    # Due to the periodicity, and because y[-1] = y[0], the
+                    # linear system has (n-1) unknowns/equations instead of n:
+                    A = A[:, 0:-1]
+                    A[1, 0] = 2 * (dx[-1] + dx[0])
+                    A[0, 1] = dx[-1]
+
+                    b = b[:-1]
+
+                    # Also, due to the periodicity, the system is not tri-diagonal.
+                    # We need to compute a "condensed" matrix of shape (n-2, n-2).
+                    # See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
+                    # for more explanations.
+                    # The condensed matrix is obtained by removing the last column
+                    # and last row of the (n-1, n-1) system matrix. The removed
+                    # values are saved in scalar variables with the (n-1, n-1)
+                    # system matrix indices forming their names:
+                    a_m1_0 = dx[-2]  # lower left corner value: A[-1, 0]
+                    a_m1_m2 = dx[-1]
+                    a_m1_m1 = 2 * (dx[-1] + dx[-2])
+                    a_m2_m1 = dx[-3]
+                    a_0_m1 = dx[0]
+
+                    b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
+                    b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
+
+                    Ac = A[:, :-1]
+                    b1 = b[:-1]
+                    b2 = np.zeros_like(b1)
+                    b2[0] = -a_0_m1
+                    b2[-1] = -a_m2_m1
+
+                    # s1 and s2 are the solutions of (n-2, n-2) system
+                    s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
+                                      overwrite_b=False, check_finite=False)
+
+                    s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
+                                      overwrite_b=False, check_finite=False)
+
+                    # computing the s[n-2] solution:
+                    s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
+                            (a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
+
+                    # s is the solution of the (n, n) system:
+                    s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
+                    s[:-2] = s1 + s_m1 * s2
+                    s[-2] = s_m1
+                    s[-1] = s[0]
+                else:
+                    if bc_start == 'not-a-knot':
+                        A[1, 0] = dx[1]
+                        A[0, 1] = x[2] - x[0]
+                        d = x[2] - x[0]
+                        b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
+                                dxr[0]**2 * slope[1]) / d
+                    elif bc_start[0] == 1:
+                        A[1, 0] = 1
+                        A[0, 1] = 0
+                        b[0] = bc_start[1]
+                    elif bc_start[0] == 2:
+                        A[1, 0] = 2 * dx[0]
+                        A[0, 1] = dx[0]
+                        b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
+
+                    if bc_end == 'not-a-knot':
+                        A[1, -1] = dx[-2]
+                        A[-1, -2] = x[-1] - x[-3]
+                        d = x[-1] - x[-3]
+                        b[-1] = ((dxr[-1]**2*slope[-2] +
+                                 (2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
+                    elif bc_end[0] == 1:
+                        A[1, -1] = 1
+                        A[-1, -2] = 0
+                        b[-1] = bc_end[1]
+                    elif bc_end[0] == 2:
+                        A[1, -1] = 2 * dx[-1]
+                        A[-1, -2] = dx[-1]
+                        b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
+
+                    s = solve_banded((1, 1), A, b, overwrite_ab=True,
+                                     overwrite_b=True, check_finite=False)
+
+        super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
+        self.axis = axis
+
+    @staticmethod
+    def _validate_bc(bc_type, y, expected_deriv_shape, axis):
+        """Validate and prepare boundary conditions.
+
+        Returns
+        -------
+        validated_bc : 2-tuple
+            Boundary conditions for a curve start and end.
+        y : ndarray
+            y casted to complex dtype if one of the boundary conditions has
+            complex dtype.
+        """
+        if isinstance(bc_type, str):
+            if bc_type == 'periodic':
+                if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
+                    raise ValueError(
+                        "The first and last `y` point along axis {} must "
+                        "be identical (within machine precision) when "
+                        "bc_type='periodic'.".format(axis))
+
+            bc_type = (bc_type, bc_type)
+
+        else:
+            if len(bc_type) != 2:
+                raise ValueError("`bc_type` must contain 2 elements to "
+                                 "specify start and end conditions.")
+
+            if 'periodic' in bc_type:
+                raise ValueError("'periodic' `bc_type` is defined for both "
+                                 "curve ends and cannot be used with other "
+                                 "boundary conditions.")
+
+        validated_bc = []
+        for bc in bc_type:
+            if isinstance(bc, str):
+                if bc == 'clamped':
+                    validated_bc.append((1, np.zeros(expected_deriv_shape)))
+                elif bc == 'natural':
+                    validated_bc.append((2, np.zeros(expected_deriv_shape)))
+                elif bc in ['not-a-knot', 'periodic']:
+                    validated_bc.append(bc)
+                else:
+                    raise ValueError("bc_type={} is not allowed.".format(bc))
+            else:
+                try:
+                    deriv_order, deriv_value = bc
+                except Exception as e:
+                    raise ValueError(
+                        "A specified derivative value must be "
+                        "given in the form (order, value)."
+                    ) from e
+
+                if deriv_order not in [1, 2]:
+                    raise ValueError("The specified derivative order must "
+                                     "be 1 or 2.")
+
+                deriv_value = np.asarray(deriv_value)
+                if deriv_value.shape != expected_deriv_shape:
+                    raise ValueError(
+                        "`deriv_value` shape {} is not the expected one {}."
+                        .format(deriv_value.shape, expected_deriv_shape))
+
+                if np.issubdtype(deriv_value.dtype, np.complexfloating):
+                    y = y.astype(complex, copy=False)
+
+                validated_bc.append((deriv_order, deriv_value))
+
+        return validated_bc, y
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack2.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack2.py
new file mode 100644
index 00000000..d155b1a9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack2.py
@@ -0,0 +1,2187 @@
+"""
+fitpack --- curve and surface fitting with splines
+
+fitpack is based on a collection of Fortran routines DIERCKX
+by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
+to double routines by Pearu Peterson.
+"""
+# Created by Pearu Peterson, June,August 2003
+__all__ = [
+    'UnivariateSpline',
+    'InterpolatedUnivariateSpline',
+    'LSQUnivariateSpline',
+    'BivariateSpline',
+    'LSQBivariateSpline',
+    'SmoothBivariateSpline',
+    'LSQSphereBivariateSpline',
+    'SmoothSphereBivariateSpline',
+    'RectBivariateSpline',
+    'RectSphereBivariateSpline']
+
+
+import warnings
+
+from numpy import zeros, concatenate, ravel, diff, array, ones
+import numpy as np
+
+from . import _fitpack_impl
+from . import dfitpack
+
+
+dfitpack_int = dfitpack.types.intvar.dtype
+
+
+# ############### Univariate spline ####################
+
+_curfit_messages = {1: """
+The required storage space exceeds the available storage space, as
+specified by the parameter nest: nest too small. If nest is already
+large (say nest > m/2), it may also indicate that s is too small.
+The approximation returned is the weighted least-squares spline
+according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
+gives the corresponding weighted sum of squared residuals (fp>s).
+""",
+                    2: """
+A theoretically impossible result was found during the iteration
+process for finding a smoothing spline with fp = s: s too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
+                    3: """
+The maximal number of iterations maxit (set to 20 by the program)
+allowed for finding a smoothing spline with fp=s has been reached: s
+too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
+                    10: """
+Error on entry, no approximation returned. The following conditions
+must hold:
+xb<=x[0]0, i=0..m-1
+if iopt=-1:
+  xb>> import numpy as np
+    >>> from scipy.interpolate import UnivariateSpline
+    >>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
+    >>> w = np.isnan(y)
+    >>> y[w] = 0.
+    >>> spl = UnivariateSpline(x, y, w=~w)
+
+    Notice the need to replace a ``nan`` by a numerical value (precise value
+    does not matter as long as the corresponding weight is zero.)
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import UnivariateSpline
+    >>> rng = np.random.default_rng()
+    >>> x = np.linspace(-3, 3, 50)
+    >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+    >>> plt.plot(x, y, 'ro', ms=5)
+
+    Use the default value for the smoothing parameter:
+
+    >>> spl = UnivariateSpline(x, y)
+    >>> xs = np.linspace(-3, 3, 1000)
+    >>> plt.plot(xs, spl(xs), 'g', lw=3)
+
+    Manually change the amount of smoothing:
+
+    >>> spl.set_smoothing_factor(0.5)
+    >>> plt.plot(xs, spl(xs), 'b', lw=3)
+    >>> plt.show()
+
+    """
+
+    def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
+                 ext=0, check_finite=False):
+
+        x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, s, ext,
+                                                      check_finite)
+
+        # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+        data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
+                                xe=bbox[1], s=s)
+        if data[-1] == 1:
+            # nest too small, setting to maximum bound
+            data = self._reset_nest(data)
+        self._data = data
+        self._reset_class()
+
+    @staticmethod
+    def validate_input(x, y, w, bbox, k, s, ext, check_finite):
+        x, y, bbox = np.asarray(x), np.asarray(y), np.asarray(bbox)
+        if w is not None:
+            w = np.asarray(w)
+        if check_finite:
+            w_finite = np.isfinite(w).all() if w is not None else True
+            if (not np.isfinite(x).all() or not np.isfinite(y).all() or
+                    not w_finite):
+                raise ValueError("x and y array must not contain "
+                                 "NaNs or infs.")
+        if s is None or s > 0:
+            if not np.all(diff(x) >= 0.0):
+                raise ValueError("x must be increasing if s > 0")
+        else:
+            if not np.all(diff(x) > 0.0):
+                raise ValueError("x must be strictly increasing if s = 0")
+        if x.size != y.size:
+            raise ValueError("x and y should have a same length")
+        elif w is not None and not x.size == y.size == w.size:
+            raise ValueError("x, y, and w should have a same length")
+        elif bbox.shape != (2,):
+            raise ValueError("bbox shape should be (2,)")
+        elif not (1 <= k <= 5):
+            raise ValueError("k should be 1 <= k <= 5")
+        elif s is not None and not s >= 0.0:
+            raise ValueError("s should be s >= 0.0")
+
+        try:
+            ext = _extrap_modes[ext]
+        except KeyError as e:
+            raise ValueError("Unknown extrapolation mode %s." % ext) from e
+
+        return x, y, w, bbox, ext
+
+    @classmethod
+    def _from_tck(cls, tck, ext=0):
+        """Construct a spline object from given tck"""
+        self = cls.__new__(cls)
+        t, c, k = tck
+        self._eval_args = tck
+        # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+        self._data = (None, None, None, None, None, k, None, len(t), t,
+                      c, None, None, None, None)
+        self.ext = ext
+        return self
+
+    def _reset_class(self):
+        data = self._data
+        n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1]
+        self._eval_args = t[:n], c[:n], k
+        if ier == 0:
+            # the spline returned has a residual sum of squares fp
+            # such that abs(fp-s)/s <= tol with tol a relative
+            # tolerance set to 0.001 by the program
+            pass
+        elif ier == -1:
+            # the spline returned is an interpolating spline
+            self._set_class(InterpolatedUnivariateSpline)
+        elif ier == -2:
+            # the spline returned is the weighted least-squares
+            # polynomial of degree k. In this extreme case fp gives
+            # the upper bound fp0 for the smoothing factor s.
+            self._set_class(LSQUnivariateSpline)
+        else:
+            # error
+            if ier == 1:
+                self._set_class(LSQUnivariateSpline)
+            message = _curfit_messages.get(ier, 'ier=%s' % (ier))
+            warnings.warn(message)
+
+    def _set_class(self, cls):
+        self._spline_class = cls
+        if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
+                              LSQUnivariateSpline):
+            self.__class__ = cls
+        else:
+            # It's an unknown subclass -- don't change class. cf. #731
+            pass
+
+    def _reset_nest(self, data, nest=None):
+        n = data[10]
+        if nest is None:
+            k, m = data[5], len(data[0])
+            nest = m+k+1  # this is the maximum bound for nest
+        else:
+            if not n <= nest:
+                raise ValueError("`nest` can only be increased")
+        t, c, fpint, nrdata = [np.resize(data[j], nest) for j in
+                               [8, 9, 11, 12]]
+
+        args = data[:8] + (t, c, n, fpint, nrdata, data[13])
+        data = dfitpack.fpcurf1(*args)
+        return data
+
+    def set_smoothing_factor(self, s):
+        """ Continue spline computation with the given smoothing
+        factor s and with the knots found at the last call.
+
+        This routine modifies the spline in place.
+
+        """
+        data = self._data
+        if data[6] == -1:
+            warnings.warn('smoothing factor unchanged for'
+                          'LSQ spline with fixed knots')
+            return
+        args = data[:6] + (s,) + data[7:]
+        data = dfitpack.fpcurf1(*args)
+        if data[-1] == 1:
+            # nest too small, setting to maximum bound
+            data = self._reset_nest(data)
+        self._data = data
+        self._reset_class()
+
+    def __call__(self, x, nu=0, ext=None):
+        """
+        Evaluate spline (or its nu-th derivative) at positions x.
+
+        Parameters
+        ----------
+        x : array_like
+            A 1-D array of points at which to return the value of the smoothed
+            spline or its derivatives. Note: `x` can be unordered but the
+            evaluation is more efficient if `x` is (partially) ordered.
+        nu  : int
+            The order of derivative of the spline to compute.
+        ext : int
+            Controls the value returned for elements of `x` not in the
+            interval defined by the knot sequence.
+
+            * if ext=0 or 'extrapolate', return the extrapolated value.
+            * if ext=1 or 'zeros', return 0
+            * if ext=2 or 'raise', raise a ValueError
+            * if ext=3 or 'const', return the boundary value.
+
+            The default value is 0, passed from the initialization of
+            UnivariateSpline.
+
+        """
+        x = np.asarray(x)
+        # empty input yields empty output
+        if x.size == 0:
+            return array([])
+        if ext is None:
+            ext = self.ext
+        else:
+            try:
+                ext = _extrap_modes[ext]
+            except KeyError as e:
+                raise ValueError("Unknown extrapolation mode %s." % ext) from e
+        return _fitpack_impl.splev(x, self._eval_args, der=nu, ext=ext)
+
+    def get_knots(self):
+        """ Return positions of interior knots of the spline.
+
+        Internally, the knot vector contains ``2*k`` additional boundary knots.
+        """
+        data = self._data
+        k, n = data[5], data[7]
+        return data[8][k:n-k]
+
+    def get_coeffs(self):
+        """Return spline coefficients."""
+        data = self._data
+        k, n = data[5], data[7]
+        return data[9][:n-k-1]
+
+    def get_residual(self):
+        """Return weighted sum of squared residuals of the spline approximation.
+
+           This is equivalent to::
+
+                sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
+
+        """
+        return self._data[10]
+
+    def integral(self, a, b):
+        """ Return definite integral of the spline between two given points.
+
+        Parameters
+        ----------
+        a : float
+            Lower limit of integration.
+        b : float
+            Upper limit of integration.
+
+        Returns
+        -------
+        integral : float
+            The value of the definite integral of the spline between limits.
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy.interpolate import UnivariateSpline
+        >>> x = np.linspace(0, 3, 11)
+        >>> y = x**2
+        >>> spl = UnivariateSpline(x, y)
+        >>> spl.integral(0, 3)
+        9.0
+
+        which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
+        of 0 and 3.
+
+        A caveat is that this routine assumes the spline to be zero outside of
+        the data limits:
+
+        >>> spl.integral(-1, 4)
+        9.0
+        >>> spl.integral(-1, 0)
+        0.0
+
+        """
+        return _fitpack_impl.splint(a, b, self._eval_args)
+
+    def derivatives(self, x):
+        """ Return all derivatives of the spline at the point x.
+
+        Parameters
+        ----------
+        x : float
+            The point to evaluate the derivatives at.
+
+        Returns
+        -------
+        der : ndarray, shape(k+1,)
+            Derivatives of the orders 0 to k.
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy.interpolate import UnivariateSpline
+        >>> x = np.linspace(0, 3, 11)
+        >>> y = x**2
+        >>> spl = UnivariateSpline(x, y)
+        >>> spl.derivatives(1.5)
+        array([2.25, 3.0, 2.0, 0])
+
+        """
+        return _fitpack_impl.spalde(x, self._eval_args)
+
+    def roots(self):
+        """ Return the zeros of the spline.
+
+        Notes
+        -----
+        Restriction: only cubic splines are supported by FITPACK. For non-cubic
+        splines, use `PPoly.root` (see below for an example).
+
+        Examples
+        --------
+
+        For some data, this method may miss a root. This happens when one of
+        the spline knots (which FITPACK places automatically) happens to
+        coincide with the true root. A workaround is to convert to `PPoly`,
+        which uses a different root-finding algorithm.
+
+        For example,
+
+        >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
+        >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
+        ...      4.440892e-16,  1.616930e-03,  3.243000e-03,  4.877670e-03,
+        ...      6.520430e-03,  8.170770e-03]
+        >>> from scipy.interpolate import UnivariateSpline
+        >>> spl = UnivariateSpline(x, y, s=0)
+        >>> spl.roots()
+        array([], dtype=float64)
+
+        Converting to a PPoly object does find the roots at `x=2`:
+
+        >>> from scipy.interpolate import splrep, PPoly
+        >>> tck = splrep(x, y, s=0)
+        >>> ppoly = PPoly.from_spline(tck)
+        >>> ppoly.roots(extrapolate=False)
+        array([2.])
+
+        See Also
+        --------
+        sproot
+        PPoly.roots
+
+        """
+        k = self._data[5]
+        if k == 3:
+            return _fitpack_impl.sproot(self._eval_args)
+        raise NotImplementedError('finding roots unsupported for '
+                                  'non-cubic splines')
+
+    def derivative(self, n=1):
+        """
+        Construct a new spline representing the derivative of this spline.
+
+        Parameters
+        ----------
+        n : int, optional
+            Order of derivative to evaluate. Default: 1
+
+        Returns
+        -------
+        spline : UnivariateSpline
+            Spline of order k2=k-n representing the derivative of this
+            spline.
+
+        See Also
+        --------
+        splder, antiderivative
+
+        Notes
+        -----
+
+        .. versionadded:: 0.13.0
+
+        Examples
+        --------
+        This can be used for finding maxima of a curve:
+
+        >>> import numpy as np
+        >>> from scipy.interpolate import UnivariateSpline
+        >>> x = np.linspace(0, 10, 70)
+        >>> y = np.sin(x)
+        >>> spl = UnivariateSpline(x, y, k=4, s=0)
+
+        Now, differentiate the spline and find the zeros of the
+        derivative. (NB: `sproot` only works for order 3 splines, so we
+        fit an order 4 spline):
+
+        >>> spl.derivative().roots() / np.pi
+        array([ 0.50000001,  1.5       ,  2.49999998])
+
+        This agrees well with roots :math:`\\pi/2 + n\\pi` of
+        :math:`\\cos(x) = \\sin'(x)`.
+
+        """
+        tck = _fitpack_impl.splder(self._eval_args, n)
+        # if self.ext is 'const', derivative.ext will be 'zeros'
+        ext = 1 if self.ext == 3 else self.ext
+        return UnivariateSpline._from_tck(tck, ext=ext)
+
+    def antiderivative(self, n=1):
+        """
+        Construct a new spline representing the antiderivative of this spline.
+
+        Parameters
+        ----------
+        n : int, optional
+            Order of antiderivative to evaluate. Default: 1
+
+        Returns
+        -------
+        spline : UnivariateSpline
+            Spline of order k2=k+n representing the antiderivative of this
+            spline.
+
+        Notes
+        -----
+
+        .. versionadded:: 0.13.0
+
+        See Also
+        --------
+        splantider, derivative
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy.interpolate import UnivariateSpline
+        >>> x = np.linspace(0, np.pi/2, 70)
+        >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+        >>> spl = UnivariateSpline(x, y, s=0)
+
+        The derivative is the inverse operation of the antiderivative,
+        although some floating point error accumulates:
+
+        >>> spl(1.7), spl.antiderivative().derivative()(1.7)
+        (array(2.1565429877197317), array(2.1565429877201865))
+
+        Antiderivative can be used to evaluate definite integrals:
+
+        >>> ispl = spl.antiderivative()
+        >>> ispl(np.pi/2) - ispl(0)
+        2.2572053588768486
+
+        This is indeed an approximation to the complete elliptic integral
+        :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+        >>> from scipy.special import ellipk
+        >>> ellipk(0.8)
+        2.2572053268208538
+
+        """
+        tck = _fitpack_impl.splantider(self._eval_args, n)
+        return UnivariateSpline._from_tck(tck, self.ext)
+
+
+class InterpolatedUnivariateSpline(UnivariateSpline):
+    """
+    1-D interpolating spline for a given set of data points.
+
+    Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.
+    Spline function passes through all provided points. Equivalent to
+    `UnivariateSpline` with  `s` = 0.
+
+    Parameters
+    ----------
+    x : (N,) array_like
+        Input dimension of data points -- must be strictly increasing
+    y : (N,) array_like
+        input dimension of data points
+    w : (N,) array_like, optional
+        Weights for spline fitting.  Must be positive.  If None (default),
+        weights are all 1.
+    bbox : (2,) array_like, optional
+        2-sequence specifying the boundary of the approximation interval. If
+        None (default), ``bbox=[x[0], x[-1]]``.
+    k : int, optional
+        Degree of the smoothing spline.  Must be ``1 <= k <= 5``. Default is
+        ``k = 3``, a cubic spline.
+    ext : int or str, optional
+        Controls the extrapolation mode for elements
+        not in the interval defined by the knot sequence.
+
+        * if ext=0 or 'extrapolate', return the extrapolated value.
+        * if ext=1 or 'zeros', return 0
+        * if ext=2 or 'raise', raise a ValueError
+        * if ext=3 of 'const', return the boundary value.
+
+        The default value is 0.
+
+    check_finite : bool, optional
+        Whether to check that the input arrays contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination or non-sensical results) if the inputs
+        do contain infinities or NaNs.
+        Default is False.
+
+    See Also
+    --------
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    LSQUnivariateSpline :
+        a spline for which knots are user-selected
+    SmoothBivariateSpline :
+        a smoothing bivariate spline through the given points
+    LSQBivariateSpline :
+        a bivariate spline using weighted least-squares fitting
+    splrep :
+        a function to find the B-spline representation of a 1-D curve
+    splev :
+        a function to evaluate a B-spline or its derivatives
+    sproot :
+        a function to find the roots of a cubic B-spline
+    splint :
+        a function to evaluate the definite integral of a B-spline between two
+        given points
+    spalde :
+        a function to evaluate all derivatives of a B-spline
+
+    Notes
+    -----
+    The number of data points must be larger than the spline degree `k`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import InterpolatedUnivariateSpline
+    >>> rng = np.random.default_rng()
+    >>> x = np.linspace(-3, 3, 50)
+    >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+    >>> spl = InterpolatedUnivariateSpline(x, y)
+    >>> plt.plot(x, y, 'ro', ms=5)
+    >>> xs = np.linspace(-3, 3, 1000)
+    >>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
+    >>> plt.show()
+
+    Notice that the ``spl(x)`` interpolates `y`:
+
+    >>> spl.get_residual()
+    0.0
+
+    """
+
+    def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
+                 ext=0, check_finite=False):
+
+        x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
+                                            ext, check_finite)
+        if not np.all(diff(x) > 0.0):
+            raise ValueError('x must be strictly increasing')
+
+        # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+        self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
+                                      xe=bbox[1], s=0)
+        self._reset_class()
+
+
+_fpchec_error_string = """The input parameters have been rejected by fpchec. \
+This means that at least one of the following conditions is violated:
+
+1) k+1 <= n-k-1 <= m
+2) t(1) <= t(2) <= ... <= t(k+1)
+   t(n-k) <= t(n-k+1) <= ... <= t(n)
+3) t(k+1) < t(k+2) < ... < t(n-k)
+4) t(k+1) <= x(i) <= t(n-k)
+5) The conditions specified by Schoenberg and Whitney must hold
+   for at least one subset of data points, i.e., there must be a
+   subset of data points y(j) such that
+       t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
+"""
+
+
+class LSQUnivariateSpline(UnivariateSpline):
+    """
+    1-D spline with explicit internal knots.
+
+    Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.  `t`
+    specifies the internal knots of the spline
+
+    Parameters
+    ----------
+    x : (N,) array_like
+        Input dimension of data points -- must be increasing
+    y : (N,) array_like
+        Input dimension of data points
+    t : (M,) array_like
+        interior knots of the spline.  Must be in ascending order and::
+
+            bbox[0] < t[0] < ... < t[-1] < bbox[-1]
+
+    w : (N,) array_like, optional
+        weights for spline fitting. Must be positive. If None (default),
+        weights are all 1.
+    bbox : (2,) array_like, optional
+        2-sequence specifying the boundary of the approximation interval. If
+        None (default), ``bbox = [x[0], x[-1]]``.
+    k : int, optional
+        Degree of the smoothing spline.  Must be 1 <= `k` <= 5.
+        Default is `k` = 3, a cubic spline.
+    ext : int or str, optional
+        Controls the extrapolation mode for elements
+        not in the interval defined by the knot sequence.
+
+        * if ext=0 or 'extrapolate', return the extrapolated value.
+        * if ext=1 or 'zeros', return 0
+        * if ext=2 or 'raise', raise a ValueError
+        * if ext=3 of 'const', return the boundary value.
+
+        The default value is 0.
+
+    check_finite : bool, optional
+        Whether to check that the input arrays contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination or non-sensical results) if the inputs
+        do contain infinities or NaNs.
+        Default is False.
+
+    Raises
+    ------
+    ValueError
+        If the interior knots do not satisfy the Schoenberg-Whitney conditions
+
+    See Also
+    --------
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    InterpolatedUnivariateSpline :
+        a interpolating univariate spline for a given set of data points.
+    splrep :
+        a function to find the B-spline representation of a 1-D curve
+    splev :
+        a function to evaluate a B-spline or its derivatives
+    sproot :
+        a function to find the roots of a cubic B-spline
+    splint :
+        a function to evaluate the definite integral of a B-spline between two
+        given points
+    spalde :
+        a function to evaluate all derivatives of a B-spline
+
+    Notes
+    -----
+    The number of data points must be larger than the spline degree `k`.
+
+    Knots `t` must satisfy the Schoenberg-Whitney conditions,
+    i.e., there must be a subset of data points ``x[j]`` such that
+    ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> x = np.linspace(-3, 3, 50)
+    >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
+
+    Fit a smoothing spline with a pre-defined internal knots:
+
+    >>> t = [-1, 0, 1]
+    >>> spl = LSQUnivariateSpline(x, y, t)
+
+    >>> xs = np.linspace(-3, 3, 1000)
+    >>> plt.plot(x, y, 'ro', ms=5)
+    >>> plt.plot(xs, spl(xs), 'g-', lw=3)
+    >>> plt.show()
+
+    Check the knot vector:
+
+    >>> spl.get_knots()
+    array([-3., -1., 0., 1., 3.])
+
+    Constructing lsq spline using the knots from another spline:
+
+    >>> x = np.arange(10)
+    >>> s = UnivariateSpline(x, x, s=0)
+    >>> s.get_knots()
+    array([ 0.,  2.,  3.,  4.,  5.,  6.,  7.,  9.])
+    >>> knt = s.get_knots()
+    >>> s1 = LSQUnivariateSpline(x, x, knt[1:-1])    # Chop 1st and last knot
+    >>> s1.get_knots()
+    array([ 0.,  2.,  3.,  4.,  5.,  6.,  7.,  9.])
+
+    """
+
+    def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
+                 ext=0, check_finite=False):
+
+        x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None,
+                                                      ext, check_finite)
+        if not np.all(diff(x) >= 0.0):
+            raise ValueError('x must be increasing')
+
+        # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
+        xb = bbox[0]
+        xe = bbox[1]
+        if xb is None:
+            xb = x[0]
+        if xe is None:
+            xe = x[-1]
+        t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
+        n = len(t)
+        if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
+            raise ValueError('Interior knots t must satisfy '
+                             'Schoenberg-Whitney conditions')
+        if not dfitpack.fpchec(x, t, k) == 0:
+            raise ValueError(_fpchec_error_string)
+        data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
+        self._data = data[:-3] + (None, None, data[-1])
+        self._reset_class()
+
+
+# ############### Bivariate spline ####################
+
+class _BivariateSplineBase:
+    """ Base class for Bivariate spline s(x,y) interpolation on the rectangle
+    [xb,xe] x [yb, ye] calculated from a given set of data points
+    (x,y,z).
+
+    See Also
+    --------
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+    BivariateSpline :
+        a base class for bivariate splines.
+    SphereBivariateSpline :
+        a bivariate spline on a spherical grid
+    """
+
+    @classmethod
+    def _from_tck(cls, tck):
+        """Construct a spline object from given tck and degree"""
+        self = cls.__new__(cls)
+        if len(tck) != 5:
+            raise ValueError("tck should be a 5 element tuple of tx,"
+                             " ty, c, kx, ky")
+        self.tck = tck[:3]
+        self.degrees = tck[3:]
+        return self
+
+    def get_residual(self):
+        """ Return weighted sum of squared residuals of the spline
+        approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
+        """
+        return self.fp
+
+    def get_knots(self):
+        """ Return a tuple (tx,ty) where tx,ty contain knots positions
+        of the spline with respect to x-, y-variable, respectively.
+        The position of interior and additional knots are given as
+        t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
+        """
+        return self.tck[:2]
+
+    def get_coeffs(self):
+        """ Return spline coefficients."""
+        return self.tck[2]
+
+    def __call__(self, x, y, dx=0, dy=0, grid=True):
+        """
+        Evaluate the spline or its derivatives at given positions.
+
+        Parameters
+        ----------
+        x, y : array_like
+            Input coordinates.
+
+            If `grid` is False, evaluate the spline at points ``(x[i],
+            y[i]), i=0, ..., len(x)-1``.  Standard Numpy broadcasting
+            is obeyed.
+
+            If `grid` is True: evaluate spline at the grid points
+            defined by the coordinate arrays x, y. The arrays must be
+            sorted to increasing order.
+
+            Note that the axis ordering is inverted relative to
+            the output of meshgrid.
+        dx : int
+            Order of x-derivative
+
+            .. versionadded:: 0.14.0
+        dy : int
+            Order of y-derivative
+
+            .. versionadded:: 0.14.0
+        grid : bool
+            Whether to evaluate the results on a grid spanned by the
+            input arrays, or at points specified by the input arrays.
+
+            .. versionadded:: 0.14.0
+
+        """
+        x = np.asarray(x)
+        y = np.asarray(y)
+
+        tx, ty, c = self.tck[:3]
+        kx, ky = self.degrees
+        if grid:
+            if x.size == 0 or y.size == 0:
+                return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
+
+            if (x.size >= 2) and (not np.all(np.diff(x) >= 0.0)):
+                raise ValueError("x must be strictly increasing when `grid` is True")
+            if (y.size >= 2) and (not np.all(np.diff(y) >= 0.0)):
+                raise ValueError("y must be strictly increasing when `grid` is True")
+
+            if dx or dy:
+                z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
+                if not ier == 0:
+                    raise ValueError("Error code returned by parder: %s" % ier)
+            else:
+                z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
+                if not ier == 0:
+                    raise ValueError("Error code returned by bispev: %s" % ier)
+        else:
+            # standard Numpy broadcasting
+            if x.shape != y.shape:
+                x, y = np.broadcast_arrays(x, y)
+
+            shape = x.shape
+            x = x.ravel()
+            y = y.ravel()
+
+            if x.size == 0 or y.size == 0:
+                return np.zeros(shape, dtype=self.tck[2].dtype)
+
+            if dx or dy:
+                z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y)
+                if not ier == 0:
+                    raise ValueError("Error code returned by pardeu: %s" % ier)
+            else:
+                z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y)
+                if not ier == 0:
+                    raise ValueError("Error code returned by bispeu: %s" % ier)
+
+            z = z.reshape(shape)
+        return z
+
+    def partial_derivative(self, dx, dy):
+        """Construct a new spline representing a partial derivative of this
+        spline.
+
+        Parameters
+        ----------
+        dx, dy : int
+            Orders of the derivative in x and y respectively. They must be
+            non-negative integers and less than the respective degree of the
+            original spline (self) in that direction (``kx``, ``ky``).
+
+        Returns
+        -------
+        spline :
+            A new spline of degrees (``kx - dx``, ``ky - dy``) representing the
+            derivative of this spline.
+
+        Notes
+        -----
+
+        .. versionadded:: 1.9.0
+
+        """
+        if dx == 0 and dy == 0:
+            return self
+        else:
+            kx, ky = self.degrees
+            if not (dx >= 0 and dy >= 0):
+                raise ValueError("order of derivative must be positive or"
+                                 " zero")
+            if not (dx < kx and dy < ky):
+                raise ValueError("order of derivative must be less than"
+                                 " degree of spline")
+            tx, ty, c = self.tck[:3]
+            newc, ier = dfitpack.pardtc(tx, ty, c, kx, ky, dx, dy)
+            if ier != 0:
+                # This should not happen under normal conditions.
+                raise ValueError("Unexpected error code returned by"
+                                 " pardtc: %d" % ier)
+            nx = len(tx)
+            ny = len(ty)
+            newtx = tx[dx:nx - dx]
+            newty = ty[dy:ny - dy]
+            newkx, newky = kx - dx, ky - dy
+            newclen = (nx - dx - kx - 1) * (ny - dy - ky - 1)
+            return _DerivedBivariateSpline._from_tck((newtx, newty,
+                                                      newc[:newclen],
+                                                      newkx, newky))
+
+
+_surfit_messages = {1: """
+The required storage space exceeds the available storage space: nxest
+or nyest too small, or s too small.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+                    2: """
+A theoretically impossible result was found during the iteration
+process for finding a smoothing spline with fp = s: s too small or
+badly chosen eps.
+Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
+                    3: """
+the maximal number of iterations maxit (set to 20 by the program)
+allowed for finding a smoothing spline with fp=s has been reached:
+s too small.
+Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
+                    4: """
+No more knots can be added because the number of b-spline coefficients
+(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
+either s or m too small.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+                    5: """
+No more knots can be added because the additional knot would (quasi)
+coincide with an old one: s too small or too large a weight to an
+inaccurate data point.
+The weighted least-squares spline corresponds to the current set of
+knots.""",
+                    10: """
+Error on entry, no approximation returned. The following conditions
+must hold:
+xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
+If iopt==-1, then
+  xb= 0.0):
+                raise ValueError('w should be positive')
+        if (eps is not None) and (not 0.0 < eps < 1.0):
+            raise ValueError('eps should be between (0, 1)')
+        if not x.size >= (kx + 1) * (ky + 1):
+            raise ValueError('The length of x, y and z should be at least'
+                             ' (kx+1) * (ky+1)')
+        return x, y, z, w
+
+
+class _DerivedBivariateSpline(_BivariateSplineBase):
+    """Bivariate spline constructed from the coefficients and knots of another
+    spline.
+
+    Notes
+    -----
+    The class is not meant to be instantiated directly from the data to be
+    interpolated or smoothed. As a result, its ``fp`` attribute and
+    ``get_residual`` method are inherited but overriden; ``AttributeError`` is
+    raised when they are accessed.
+
+    The other inherited attributes can be used as usual.
+    """
+    _invalid_why = ("is unavailable, because _DerivedBivariateSpline"
+                    " instance is not constructed from data that are to be"
+                    " interpolated or smoothed, but derived from the"
+                    " underlying knots and coefficients of another spline"
+                    " object")
+
+    @property
+    def fp(self):
+        raise AttributeError("attribute \"fp\" %s" % self._invalid_why)
+
+    def get_residual(self):
+        raise AttributeError("method \"get_residual\" %s" % self._invalid_why)
+
+
+class SmoothBivariateSpline(BivariateSpline):
+    """
+    Smooth bivariate spline approximation.
+
+    Parameters
+    ----------
+    x, y, z : array_like
+        1-D sequences of data points (order is not important).
+    w : array_like, optional
+        Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
+    bbox : array_like, optional
+        Sequence of length 4 specifying the boundary of the rectangular
+        approximation domain.  By default,
+        ``bbox=[min(x), max(x), min(y), max(y)]``.
+    kx, ky : ints, optional
+        Degrees of the bivariate spline. Default is 3.
+    s : float, optional
+        Positive smoothing factor defined for estimation condition:
+        ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
+        Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
+        estimate of the standard deviation of ``z[i]``.
+    eps : float, optional
+        A threshold for determining the effective rank of an over-determined
+        linear system of equations. `eps` should have a value within the open
+        interval ``(0, 1)``, the default is 1e-16.
+
+    See Also
+    --------
+    BivariateSpline :
+        a base class for bivariate splines.
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    LSQBivariateSpline :
+        a bivariate spline using weighted least-squares fitting
+    RectSphereBivariateSpline :
+        a bivariate spline over a rectangular mesh on a sphere
+    SmoothSphereBivariateSpline :
+        a smoothing bivariate spline in spherical coordinates
+    LSQSphereBivariateSpline :
+        a bivariate spline in spherical coordinates using weighted
+        least-squares fitting
+    RectBivariateSpline :
+        a bivariate spline over a rectangular mesh
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+
+    Notes
+    -----
+    The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
+
+    If the input data is such that input dimensions have incommensurate
+    units and differ by many orders of magnitude, the interpolant may have
+    numerical artifacts. Consider rescaling the data before interpolating.
+
+    This routine constructs spline knot vectors automatically via the FITPACK
+    algorithm. The spline knots may be placed away from the data points. For
+    some data sets, this routine may fail to construct an interpolating spline,
+    even if one is requested via ``s=0`` parameter. In such situations, it is
+    recommended to use `bisplrep` / `bisplev` directly instead of this routine
+    and, if needed, increase the values of ``nxest`` and ``nyest`` parameters
+    of `bisplrep`.
+
+    For linear interpolation, prefer `LinearNDInterpolator`.
+    See ``https://gist.github.com/ev-br/8544371b40f414b7eaf3fe6217209bff``
+    for discussion.
+
+    """
+
+    def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
+                 eps=1e-16):
+
+        x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
+        bbox = ravel(bbox)
+        if not bbox.shape == (4,):
+            raise ValueError('bbox shape should be (4,)')
+        if s is not None and not s >= 0.0:
+            raise ValueError("s should be s >= 0.0")
+
+        xb, xe, yb, ye = bbox
+        nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
+                                                                xb, xe, yb,
+                                                                ye, kx, ky,
+                                                                s=s, eps=eps,
+                                                                lwrk2=1)
+        if ier > 10:          # lwrk2 was to small, re-run
+            nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
+                                                                    xb, xe, yb,
+                                                                    ye, kx, ky,
+                                                                    s=s,
+                                                                    eps=eps,
+                                                                    lwrk2=ier)
+        if ier in [0, -1, -2]:  # normal return
+            pass
+        else:
+            message = _surfit_messages.get(ier, 'ier=%s' % (ier))
+            warnings.warn(message)
+
+        self.fp = fp
+        self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)]
+        self.degrees = kx, ky
+
+
+class LSQBivariateSpline(BivariateSpline):
+    """
+    Weighted least-squares bivariate spline approximation.
+
+    Parameters
+    ----------
+    x, y, z : array_like
+        1-D sequences of data points (order is not important).
+    tx, ty : array_like
+        Strictly ordered 1-D sequences of knots coordinates.
+    w : array_like, optional
+        Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
+    bbox : (4,) array_like, optional
+        Sequence of length 4 specifying the boundary of the rectangular
+        approximation domain.  By default,
+        ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
+    kx, ky : ints, optional
+        Degrees of the bivariate spline. Default is 3.
+    eps : float, optional
+        A threshold for determining the effective rank of an over-determined
+        linear system of equations. `eps` should have a value within the open
+        interval ``(0, 1)``, the default is 1e-16.
+
+    See Also
+    --------
+    BivariateSpline :
+        a base class for bivariate splines.
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    SmoothBivariateSpline :
+        a smoothing bivariate spline through the given points
+    RectSphereBivariateSpline :
+        a bivariate spline over a rectangular mesh on a sphere
+    SmoothSphereBivariateSpline :
+        a smoothing bivariate spline in spherical coordinates
+    LSQSphereBivariateSpline :
+        a bivariate spline in spherical coordinates using weighted
+        least-squares fitting
+    RectBivariateSpline :
+        a bivariate spline over a rectangular mesh.
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+
+    Notes
+    -----
+    The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
+
+    If the input data is such that input dimensions have incommensurate
+    units and differ by many orders of magnitude, the interpolant may have
+    numerical artifacts. Consider rescaling the data before interpolating.
+
+    """
+
+    def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
+                 eps=None):
+
+        x, y, z, w = self._validate_input(x, y, z, w, kx, ky, eps)
+        bbox = ravel(bbox)
+        if not bbox.shape == (4,):
+            raise ValueError('bbox shape should be (4,)')
+
+        nx = 2*kx+2+len(tx)
+        ny = 2*ky+2+len(ty)
+        # The Fortran subroutine "surfit" (called as dfitpack.surfit_lsq)
+        # requires that the knot arrays passed as input should be "real
+        # array(s) of dimension nmax" where "nmax" refers to the greater of nx
+        # and ny. We pad the tx1/ty1 arrays here so that this is satisfied, and
+        # slice them to the desired sizes upon return.
+        nmax = max(nx, ny)
+        tx1 = zeros((nmax,), float)
+        ty1 = zeros((nmax,), float)
+        tx1[kx+1:nx-kx-1] = tx
+        ty1[ky+1:ny-ky-1] = ty
+
+        xb, xe, yb, ye = bbox
+        tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, nx, tx1, ny, ty1,
+                                                   w, xb, xe, yb, ye,
+                                                   kx, ky, eps, lwrk2=1)
+        if ier > 10:
+            tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z,
+                                                       nx, tx1, ny, ty1, w,
+                                                       xb, xe, yb, ye,
+                                                       kx, ky, eps, lwrk2=ier)
+        if ier in [0, -1, -2]:  # normal return
+            pass
+        else:
+            if ier < -2:
+                deficiency = (nx-kx-1)*(ny-ky-1)+ier
+                message = _surfit_messages.get(-3) % (deficiency)
+            else:
+                message = _surfit_messages.get(ier, 'ier=%s' % (ier))
+            warnings.warn(message)
+        self.fp = fp
+        self.tck = tx1[:nx], ty1[:ny], c
+        self.degrees = kx, ky
+
+
+class RectBivariateSpline(BivariateSpline):
+    """
+    Bivariate spline approximation over a rectangular mesh.
+
+    Can be used for both smoothing and interpolating data.
+
+    Parameters
+    ----------
+    x,y : array_like
+        1-D arrays of coordinates in strictly ascending order.
+        Evaluated points outside the data range will be extrapolated.
+    z : array_like
+        2-D array of data with shape (x.size,y.size).
+    bbox : array_like, optional
+        Sequence of length 4 specifying the boundary of the rectangular
+        approximation domain, which means the start and end spline knots of
+        each dimension are set by these values. By default,
+        ``bbox=[min(x), max(x), min(y), max(y)]``.
+    kx, ky : ints, optional
+        Degrees of the bivariate spline. Default is 3.
+    s : float, optional
+        Positive smoothing factor defined for estimation condition:
+        ``sum((z[i]-f(x[i], y[i]))**2, axis=0) <= s`` where f is a spline
+        function. Default is ``s=0``, which is for interpolation.
+
+    See Also
+    --------
+    BivariateSpline :
+        a base class for bivariate splines.
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    SmoothBivariateSpline :
+        a smoothing bivariate spline through the given points
+    LSQBivariateSpline :
+        a bivariate spline using weighted least-squares fitting
+    RectSphereBivariateSpline :
+        a bivariate spline over a rectangular mesh on a sphere
+    SmoothSphereBivariateSpline :
+        a smoothing bivariate spline in spherical coordinates
+    LSQSphereBivariateSpline :
+        a bivariate spline in spherical coordinates using weighted
+        least-squares fitting
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+
+    Notes
+    -----
+
+    If the input data is such that input dimensions have incommensurate
+    units and differ by many orders of magnitude, the interpolant may have
+    numerical artifacts. Consider rescaling the data before interpolating.
+
+    """
+
+    def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
+        x, y, bbox = ravel(x), ravel(y), ravel(bbox)
+        z = np.asarray(z)
+        if not np.all(diff(x) > 0.0):
+            raise ValueError('x must be strictly increasing')
+        if not np.all(diff(y) > 0.0):
+            raise ValueError('y must be strictly increasing')
+        if not x.size == z.shape[0]:
+            raise ValueError('x dimension of z must have same number of '
+                             'elements as x')
+        if not y.size == z.shape[1]:
+            raise ValueError('y dimension of z must have same number of '
+                             'elements as y')
+        if not bbox.shape == (4,):
+            raise ValueError('bbox shape should be (4,)')
+        if s is not None and not s >= 0.0:
+            raise ValueError("s should be s >= 0.0")
+
+        z = ravel(z)
+        xb, xe, yb, ye = bbox
+        nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
+                                                          ye, kx, ky, s)
+
+        if ier not in [0, -1, -2]:
+            msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
+            raise ValueError(msg)
+
+        self.fp = fp
+        self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
+        self.degrees = kx, ky
+
+
+_spherefit_messages = _surfit_messages.copy()
+_spherefit_messages[10] = """
+ERROR. On entry, the input data are controlled on validity. The following
+       restrictions must be satisfied:
+            -1<=iopt<=1,  m>=2, ntest>=8 ,npest >=8, 00, i=1,...,m
+            lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
+            kwrk >= m+(ntest-7)*(npest-7)
+            if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
+                        0=0: s>=0
+            if one of these conditions is found to be violated,control
+            is immediately repassed to the calling program. in that
+            case there is no approximation returned."""
+_spherefit_messages[-3] = """
+WARNING. The coefficients of the spline returned have been computed as the
+         minimal norm least-squares solution of a (numerically) rank
+         deficient system (deficiency=%i, rank=%i). Especially if the rank
+         deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
+         the results may be inaccurate. They could also seriously depend on
+         the value of eps."""
+
+
+class SphereBivariateSpline(_BivariateSplineBase):
+    """
+    Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
+    given set of data points (theta,phi,r).
+
+    .. versionadded:: 0.11.0
+
+    See Also
+    --------
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    SmoothBivariateSpline :
+        a smoothing bivariate spline through the given points
+    LSQUnivariateSpline :
+        a univariate spline using weighted least-squares fitting
+    """
+
+    def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+        """
+        Evaluate the spline or its derivatives at given positions.
+
+        Parameters
+        ----------
+        theta, phi : array_like
+            Input coordinates.
+
+            If `grid` is False, evaluate the spline at points
+            ``(theta[i], phi[i]), i=0, ..., len(x)-1``.  Standard
+            Numpy broadcasting is obeyed.
+
+            If `grid` is True: evaluate spline at the grid points
+            defined by the coordinate arrays theta, phi. The arrays
+            must be sorted to increasing order.
+        dtheta : int, optional
+            Order of theta-derivative
+
+            .. versionadded:: 0.14.0
+        dphi : int
+            Order of phi-derivative
+
+            .. versionadded:: 0.14.0
+        grid : bool
+            Whether to evaluate the results on a grid spanned by the
+            input arrays, or at points specified by the input arrays.
+
+            .. versionadded:: 0.14.0
+
+        """
+        theta = np.asarray(theta)
+        phi = np.asarray(phi)
+
+        if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
+            raise ValueError("requested theta out of bounds.")
+
+        return _BivariateSplineBase.__call__(self, theta, phi,
+                                             dx=dtheta, dy=dphi, grid=grid)
+
+    def ev(self, theta, phi, dtheta=0, dphi=0):
+        """
+        Evaluate the spline at points
+
+        Returns the interpolated value at ``(theta[i], phi[i]),
+        i=0,...,len(theta)-1``.
+
+        Parameters
+        ----------
+        theta, phi : array_like
+            Input coordinates. Standard Numpy broadcasting is obeyed.
+        dtheta : int, optional
+            Order of theta-derivative
+
+            .. versionadded:: 0.14.0
+        dphi : int, optional
+            Order of phi-derivative
+
+            .. versionadded:: 0.14.0
+        """
+        return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
+
+
+class SmoothSphereBivariateSpline(SphereBivariateSpline):
+    """
+    Smooth bivariate spline approximation in spherical coordinates.
+
+    .. versionadded:: 0.11.0
+
+    Parameters
+    ----------
+    theta, phi, r : array_like
+        1-D sequences of data points (order is not important). Coordinates
+        must be given in radians. Theta must lie within the interval
+        ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
+    w : array_like, optional
+        Positive 1-D sequence of weights.
+    s : float, optional
+        Positive smoothing factor defined for estimation condition:
+        ``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
+        Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
+        estimate of the standard deviation of ``r[i]``.
+    eps : float, optional
+        A threshold for determining the effective rank of an over-determined
+        linear system of equations. `eps` should have a value within the open
+        interval ``(0, 1)``, the default is 1e-16.
+
+    See Also
+    --------
+    BivariateSpline :
+        a base class for bivariate splines.
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    SmoothBivariateSpline :
+        a smoothing bivariate spline through the given points
+    LSQBivariateSpline :
+        a bivariate spline using weighted least-squares fitting
+    RectSphereBivariateSpline :
+        a bivariate spline over a rectangular mesh on a sphere
+    LSQSphereBivariateSpline :
+        a bivariate spline in spherical coordinates using weighted
+        least-squares fitting
+    RectBivariateSpline :
+        a bivariate spline over a rectangular mesh.
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+
+    Notes
+    -----
+    For more information, see the FITPACK_ site about this function.
+
+    .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
+
+    Examples
+    --------
+    Suppose we have global data on a coarse grid (the input data does not
+    have to be on a grid):
+
+    >>> import numpy as np
+    >>> theta = np.linspace(0., np.pi, 7)
+    >>> phi = np.linspace(0., 2*np.pi, 9)
+    >>> data = np.empty((theta.shape[0], phi.shape[0]))
+    >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
+    >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
+    >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
+    >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
+    >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
+    >>> data[3,3:-2] = 3.
+    >>> data = np.roll(data, 4, 1)
+
+    We need to set up the interpolator object
+
+    >>> lats, lons = np.meshgrid(theta, phi)
+    >>> from scipy.interpolate import SmoothSphereBivariateSpline
+    >>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
+    ...                                   data.T.ravel(), s=3.5)
+
+    As a first test, we'll see what the algorithm returns when run on the
+    input coordinates
+
+    >>> data_orig = lut(theta, phi)
+
+    Finally we interpolate the data to a finer grid
+
+    >>> fine_lats = np.linspace(0., np.pi, 70)
+    >>> fine_lons = np.linspace(0., 2 * np.pi, 90)
+
+    >>> data_smth = lut(fine_lats, fine_lons)
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> ax1 = fig.add_subplot(131)
+    >>> ax1.imshow(data, interpolation='nearest')
+    >>> ax2 = fig.add_subplot(132)
+    >>> ax2.imshow(data_orig, interpolation='nearest')
+    >>> ax3 = fig.add_subplot(133)
+    >>> ax3.imshow(data_smth, interpolation='nearest')
+    >>> plt.show()
+
+    """
+
+    def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
+
+        theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
+
+        # input validation
+        if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
+            raise ValueError('theta should be between [0, pi]')
+        if not ((0.0 <= phi).all() and (phi <= 2.0 * np.pi).all()):
+            raise ValueError('phi should be between [0, 2pi]')
+        if w is not None:
+            w = np.asarray(w)
+            if not (w >= 0.0).all():
+                raise ValueError('w should be positive')
+        if not s >= 0.0:
+            raise ValueError('s should be positive')
+        if not 0.0 < eps < 1.0:
+            raise ValueError('eps should be between (0, 1)')
+
+        if np.issubclass_(w, float):
+            w = ones(len(theta)) * w
+        nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
+                                                                r, w=w, s=s,
+                                                                eps=eps)
+        if ier not in [0, -1, -2]:
+            message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
+            raise ValueError(message)
+
+        self.fp = fp
+        self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
+        self.degrees = (3, 3)
+
+    def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+
+        theta = np.asarray(theta)
+        phi = np.asarray(phi)
+
+        if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
+            raise ValueError("requested phi out of bounds.")
+
+        return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
+                                              dphi=dphi, grid=grid)
+
+
+class LSQSphereBivariateSpline(SphereBivariateSpline):
+    """
+    Weighted least-squares bivariate spline approximation in spherical
+    coordinates.
+
+    Determines a smoothing bicubic spline according to a given
+    set of knots in the `theta` and `phi` directions.
+
+    .. versionadded:: 0.11.0
+
+    Parameters
+    ----------
+    theta, phi, r : array_like
+        1-D sequences of data points (order is not important). Coordinates
+        must be given in radians. Theta must lie within the interval
+        ``[0, pi]``, and phi must lie within the interval ``[0, 2pi]``.
+    tt, tp : array_like
+        Strictly ordered 1-D sequences of knots coordinates.
+        Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
+    w : array_like, optional
+        Positive 1-D sequence of weights, of the same length as `theta`, `phi`
+        and `r`.
+    eps : float, optional
+        A threshold for determining the effective rank of an over-determined
+        linear system of equations. `eps` should have a value within the
+        open interval ``(0, 1)``, the default is 1e-16.
+
+    See Also
+    --------
+    BivariateSpline :
+        a base class for bivariate splines.
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    SmoothBivariateSpline :
+        a smoothing bivariate spline through the given points
+    LSQBivariateSpline :
+        a bivariate spline using weighted least-squares fitting
+    RectSphereBivariateSpline :
+        a bivariate spline over a rectangular mesh on a sphere
+    SmoothSphereBivariateSpline :
+        a smoothing bivariate spline in spherical coordinates
+    RectBivariateSpline :
+        a bivariate spline over a rectangular mesh.
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+
+    Notes
+    -----
+    For more information, see the FITPACK_ site about this function.
+
+    .. _FITPACK: http://www.netlib.org/dierckx/sphere.f
+
+    Examples
+    --------
+    Suppose we have global data on a coarse grid (the input data does not
+    have to be on a grid):
+
+    >>> from scipy.interpolate import LSQSphereBivariateSpline
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+
+    >>> theta = np.linspace(0, np.pi, num=7)
+    >>> phi = np.linspace(0, 2*np.pi, num=9)
+    >>> data = np.empty((theta.shape[0], phi.shape[0]))
+    >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
+    >>> data[1:-1,1], data[1:-1,-1] = 1., 1.
+    >>> data[1,1:-1], data[-2,1:-1] = 1., 1.
+    >>> data[2:-2,2], data[2:-2,-2] = 2., 2.
+    >>> data[2,2:-2], data[-3,2:-2] = 2., 2.
+    >>> data[3,3:-2] = 3.
+    >>> data = np.roll(data, 4, 1)
+
+    We need to set up the interpolator object. Here, we must also specify the
+    coordinates of the knots to use.
+
+    >>> lats, lons = np.meshgrid(theta, phi)
+    >>> knotst, knotsp = theta.copy(), phi.copy()
+    >>> knotst[0] += .0001
+    >>> knotst[-1] -= .0001
+    >>> knotsp[0] += .0001
+    >>> knotsp[-1] -= .0001
+    >>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+    ...                                data.T.ravel(), knotst, knotsp)
+
+    As a first test, we'll see what the algorithm returns when run on the
+    input coordinates
+
+    >>> data_orig = lut(theta, phi)
+
+    Finally we interpolate the data to a finer grid
+
+    >>> fine_lats = np.linspace(0., np.pi, 70)
+    >>> fine_lons = np.linspace(0., 2*np.pi, 90)
+    >>> data_lsq = lut(fine_lats, fine_lons)
+
+    >>> fig = plt.figure()
+    >>> ax1 = fig.add_subplot(131)
+    >>> ax1.imshow(data, interpolation='nearest')
+    >>> ax2 = fig.add_subplot(132)
+    >>> ax2.imshow(data_orig, interpolation='nearest')
+    >>> ax3 = fig.add_subplot(133)
+    >>> ax3.imshow(data_lsq, interpolation='nearest')
+    >>> plt.show()
+
+    """
+
+    def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
+
+        theta, phi, r = np.asarray(theta), np.asarray(phi), np.asarray(r)
+        tt, tp = np.asarray(tt), np.asarray(tp)
+
+        if not ((0.0 <= theta).all() and (theta <= np.pi).all()):
+            raise ValueError('theta should be between [0, pi]')
+        if not ((0.0 <= phi).all() and (phi <= 2*np.pi).all()):
+            raise ValueError('phi should be between [0, 2pi]')
+        if not ((0.0 < tt).all() and (tt < np.pi).all()):
+            raise ValueError('tt should be between (0, pi)')
+        if not ((0.0 < tp).all() and (tp < 2*np.pi).all()):
+            raise ValueError('tp should be between (0, 2pi)')
+        if w is not None:
+            w = np.asarray(w)
+            if not (w >= 0.0).all():
+                raise ValueError('w should be positive')
+        if not 0.0 < eps < 1.0:
+            raise ValueError('eps should be between (0, 1)')
+
+        if np.issubclass_(w, float):
+            w = ones(len(theta)) * w
+        nt_, np_ = 8 + len(tt), 8 + len(tp)
+        tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
+        tt_[4:-4], tp_[4:-4] = tt, tp
+        tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
+        tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
+                                                     w=w, eps=eps)
+        if ier > 0:
+            message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
+            raise ValueError(message)
+
+        self.fp = fp
+        self.tck = tt_, tp_, c
+        self.degrees = (3, 3)
+
+    def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+
+        theta = np.asarray(theta)
+        phi = np.asarray(phi)
+
+        if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
+            raise ValueError("requested phi out of bounds.")
+
+        return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
+                                              dphi=dphi, grid=grid)
+
+
+_spfit_messages = _surfit_messages.copy()
+_spfit_messages[10] = """
+ERROR: on entry, the input data are controlled on validity
+       the following restrictions must be satisfied.
+          -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
+          -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
+          -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
+          mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
+          kwrk>=5+mu+mv+nuest+nvest,
+          lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
+          0< u(i-1)=0: s>=0
+          if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
+       if one of these conditions is found to be violated,control is
+       immediately repassed to the calling program. in that case there is no
+       approximation returned."""
+
+
+class RectSphereBivariateSpline(SphereBivariateSpline):
+    """
+    Bivariate spline approximation over a rectangular mesh on a sphere.
+
+    Can be used for smoothing data.
+
+    .. versionadded:: 0.11.0
+
+    Parameters
+    ----------
+    u : array_like
+        1-D array of colatitude coordinates in strictly ascending order.
+        Coordinates must be given in radians and lie within the open interval
+        ``(0, pi)``.
+    v : array_like
+        1-D array of longitude coordinates in strictly ascending order.
+        Coordinates must be given in radians. First element (``v[0]``) must lie
+        within the interval ``[-pi, pi)``. Last element (``v[-1]``) must satisfy
+        ``v[-1] <= v[0] + 2*pi``.
+    r : array_like
+        2-D array of data with shape ``(u.size, v.size)``.
+    s : float, optional
+        Positive smoothing factor defined for estimation condition
+        (``s=0`` is for interpolation).
+    pole_continuity : bool or (bool, bool), optional
+        Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
+        ``u=pi`` (``pole_continuity[1]``).  The order of continuity at the pole
+        will be 1 or 0 when this is True or False, respectively.
+        Defaults to False.
+    pole_values : float or (float, float), optional
+        Data values at the poles ``u=0`` and ``u=pi``.  Either the whole
+        parameter or each individual element can be None.  Defaults to None.
+    pole_exact : bool or (bool, bool), optional
+        Data value exactness at the poles ``u=0`` and ``u=pi``.  If True, the
+        value is considered to be the right function value, and it will be
+        fitted exactly. If False, the value will be considered to be a data
+        value just like the other data values.  Defaults to False.
+    pole_flat : bool or (bool, bool), optional
+        For the poles at ``u=0`` and ``u=pi``, specify whether or not the
+        approximation has vanishing derivatives.  Defaults to False.
+
+    See Also
+    --------
+    BivariateSpline :
+        a base class for bivariate splines.
+    UnivariateSpline :
+        a smooth univariate spline to fit a given set of data points.
+    SmoothBivariateSpline :
+        a smoothing bivariate spline through the given points
+    LSQBivariateSpline :
+        a bivariate spline using weighted least-squares fitting
+    SmoothSphereBivariateSpline :
+        a smoothing bivariate spline in spherical coordinates
+    LSQSphereBivariateSpline :
+        a bivariate spline in spherical coordinates using weighted
+        least-squares fitting
+    RectBivariateSpline :
+        a bivariate spline over a rectangular mesh.
+    bisplrep :
+        a function to find a bivariate B-spline representation of a surface
+    bisplev :
+        a function to evaluate a bivariate B-spline and its derivatives
+
+    Notes
+    -----
+    Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
+    ``iopt[0] = 1`` in the FITPACK routine) is supported.  The exact
+    least-squares spline approximation is not implemented yet.
+
+    When actually performing the interpolation, the requested `v` values must
+    lie within the same length 2pi interval that the original `v` values were
+    chosen from.
+
+    For more information, see the FITPACK_ site about this function.
+
+    .. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
+
+    Examples
+    --------
+    Suppose we have global data on a coarse grid
+
+    >>> import numpy as np
+    >>> lats = np.linspace(10, 170, 9) * np.pi / 180.
+    >>> lons = np.linspace(0, 350, 18) * np.pi / 180.
+    >>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+    ...               np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+    We want to interpolate it to a global one-degree grid
+
+    >>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
+    >>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
+    >>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
+
+    We need to set up the interpolator object
+
+    >>> from scipy.interpolate import RectSphereBivariateSpline
+    >>> lut = RectSphereBivariateSpline(lats, lons, data)
+
+    Finally we interpolate the data.  The `RectSphereBivariateSpline` object
+    only takes 1-D arrays as input, therefore we need to do some reshaping.
+
+    >>> data_interp = lut.ev(new_lats.ravel(),
+    ...                      new_lons.ravel()).reshape((360, 180)).T
+
+    Looking at the original and the interpolated data, one can see that the
+    interpolant reproduces the original data very well:
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> ax1 = fig.add_subplot(211)
+    >>> ax1.imshow(data, interpolation='nearest')
+    >>> ax2 = fig.add_subplot(212)
+    >>> ax2.imshow(data_interp, interpolation='nearest')
+    >>> plt.show()
+
+    Choosing the optimal value of ``s`` can be a delicate task. Recommended
+    values for ``s`` depend on the accuracy of the data values.  If the user
+    has an idea of the statistical errors on the data, she can also find a
+    proper estimate for ``s``. By assuming that, if she specifies the
+    right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
+    reproduces the function underlying the data, she can evaluate
+    ``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
+    For example, if she knows that the statistical errors on her
+    ``r(i,j)``-values are not greater than 0.1, she may expect that a good
+    ``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
+
+    If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
+    be determined by trial and error.  The best is then to start with a very
+    large value of ``s`` (to determine the least-squares polynomial and the
+    corresponding upper bound ``fp0`` for ``s``) and then to progressively
+    decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
+    ``s = fp0 / 10, fp0 / 100, ...``  and more carefully as the approximation
+    shows more detail) to obtain closer fits.
+
+    The interpolation results for different values of ``s`` give some insight
+    into this process:
+
+    >>> fig2 = plt.figure()
+    >>> s = [3e9, 2e9, 1e9, 1e8]
+    >>> for idx, sval in enumerate(s, 1):
+    ...     lut = RectSphereBivariateSpline(lats, lons, data, s=sval)
+    ...     data_interp = lut.ev(new_lats.ravel(),
+    ...                          new_lons.ravel()).reshape((360, 180)).T
+    ...     ax = fig2.add_subplot(2, 2, idx)
+    ...     ax.imshow(data_interp, interpolation='nearest')
+    ...     ax.set_title(f"s = {sval:g}")
+    >>> plt.show()
+
+    """
+
+    def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
+                 pole_exact=False, pole_flat=False):
+        iopt = np.array([0, 0, 0], dtype=dfitpack_int)
+        ider = np.array([-1, 0, -1, 0], dtype=dfitpack_int)
+        if pole_values is None:
+            pole_values = (None, None)
+        elif isinstance(pole_values, (float, np.float32, np.float64)):
+            pole_values = (pole_values, pole_values)
+        if isinstance(pole_continuity, bool):
+            pole_continuity = (pole_continuity, pole_continuity)
+        if isinstance(pole_exact, bool):
+            pole_exact = (pole_exact, pole_exact)
+        if isinstance(pole_flat, bool):
+            pole_flat = (pole_flat, pole_flat)
+
+        r0, r1 = pole_values
+        iopt[1:] = pole_continuity
+        if r0 is None:
+            ider[0] = -1
+        else:
+            ider[0] = pole_exact[0]
+
+        if r1 is None:
+            ider[2] = -1
+        else:
+            ider[2] = pole_exact[1]
+
+        ider[1], ider[3] = pole_flat
+
+        u, v = np.ravel(u), np.ravel(v)
+        r = np.asarray(r)
+
+        if not (0.0 < u[0] and u[-1] < np.pi):
+            raise ValueError('u should be between (0, pi)')
+        if not -np.pi <= v[0] < np.pi:
+            raise ValueError('v[0] should be between [-pi, pi)')
+        if not v[-1] <= v[0] + 2*np.pi:
+            raise ValueError('v[-1] should be v[0] + 2pi or less ')
+
+        if not np.all(np.diff(u) > 0.0):
+            raise ValueError('u must be strictly increasing')
+        if not np.all(np.diff(v) > 0.0):
+            raise ValueError('v must be strictly increasing')
+
+        if not u.size == r.shape[0]:
+            raise ValueError('u dimension of r must have same number of '
+                             'elements as u')
+        if not v.size == r.shape[1]:
+            raise ValueError('v dimension of r must have same number of '
+                             'elements as v')
+
+        if pole_continuity[1] is False and pole_flat[1] is True:
+            raise ValueError('if pole_continuity is False, so must be '
+                             'pole_flat')
+        if pole_continuity[0] is False and pole_flat[0] is True:
+            raise ValueError('if pole_continuity is False, so must be '
+                             'pole_flat')
+
+        if not s >= 0.0:
+            raise ValueError('s should be positive')
+
+        r = np.ravel(r)
+        nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
+                                                                u.copy(),
+                                                                v.copy(),
+                                                                r.copy(),
+                                                                r0, r1, s)
+
+        if ier not in [0, -1, -2]:
+            msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
+            raise ValueError(msg)
+
+        self.fp = fp
+        self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
+        self.degrees = (3, 3)
+        self.v0 = v[0]
+
+    def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
+
+        theta = np.asarray(theta)
+        phi = np.asarray(phi)
+
+        return SphereBivariateSpline.__call__(self, theta, phi, dtheta=dtheta,
+                                              dphi=dphi, grid=grid)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_impl.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_impl.py
new file mode 100644
index 00000000..06e3a69d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_impl.py
@@ -0,0 +1,1314 @@
+"""
+fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
+        FITPACK is a collection of FORTRAN programs for curve and surface
+        fitting with splines and tensor product splines.
+
+See
+ https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
+or
+ http://www.netlib.org/dierckx/
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson 
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+
+TODO: Make interfaces to the following fitpack functions:
+    For univariate splines: cocosp, concon, fourco, insert
+    For bivariate splines: profil, regrid, parsur, surev
+"""
+
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+           'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+import warnings
+import numpy as np
+from . import _fitpack
+from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
+                   empty, iinfo, asarray)
+
+# Try to replace _fitpack interface with
+#  f2py-generated version
+from . import dfitpack
+
+
+dfitpack_int = dfitpack.types.intvar.dtype
+
+
+def _int_overflow(x, msg=None):
+    """Cast the value to an dfitpack_int and raise an OverflowError if the value
+    cannot fit.
+    """
+    if x > iinfo(dfitpack_int).max:
+        if msg is None:
+            msg = '%r cannot fit into an %r' % (x, dfitpack_int)
+        raise OverflowError(msg)
+    return dfitpack_int.type(x)
+
+
+_iermess = {
+    0: ["The spline has a residual sum of squares fp such that "
+        "abs(fp-s)/s<=0.001", None],
+    -1: ["The spline is an interpolating spline (fp=0)", None],
+    -2: ["The spline is weighted least-squares polynomial of degree k.\n"
+         "fp gives the upper bound fp0 for the smoothing factor s", None],
+    1: ["The required storage space exceeds the available storage space.\n"
+        "Probable causes: data (x,y) size is too small or smoothing parameter"
+        "\ns is too small (fp>s).", ValueError],
+    2: ["A theoretically impossible result when finding a smoothing spline\n"
+        "with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
+        ValueError],
+    3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+        "spline with fp=s has been reached. Probable cause: s too small.\n"
+        "(abs(fp-s)/s>0.001)", ValueError],
+    10: ["Error on input data", ValueError],
+    'unknown': ["An error occurred", TypeError]
+}
+
+_iermess2 = {
+    0: ["The spline has a residual sum of squares fp such that "
+        "abs(fp-s)/s<=0.001", None],
+    -1: ["The spline is an interpolating spline (fp=0)", None],
+    -2: ["The spline is weighted least-squares polynomial of degree kx and ky."
+         "\nfp gives the upper bound fp0 for the smoothing factor s", None],
+    -3: ["Warning. The coefficients of the spline have been computed as the\n"
+         "minimal norm least-squares solution of a rank deficient system.",
+         None],
+    1: ["The required storage space exceeds the available storage space.\n"
+        "Probable causes: nxest or nyest too small or s is too small. (fp>s)",
+        ValueError],
+    2: ["A theoretically impossible result when finding a smoothing spline\n"
+        "with fp = s. Probable causes: s too small or badly chosen eps.\n"
+        "(abs(fp-s)/s>0.001)", ValueError],
+    3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
+        "spline with fp=s has been reached. Probable cause: s too small.\n"
+        "(abs(fp-s)/s>0.001)", ValueError],
+    4: ["No more knots can be added because the number of B-spline\n"
+        "coefficients already exceeds the number of data points m.\n"
+        "Probable causes: either s or m too small. (fp>s)", ValueError],
+    5: ["No more knots can be added because the additional knot would\n"
+        "coincide with an old one. Probable cause: s too small or too large\n"
+        "a weight to an inaccurate data point. (fp>s)", ValueError],
+    10: ["Error on input data", ValueError],
+    11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
+         "the minimal least-squares solution of a rank deficient system of\n"
+         "linear equations.", ValueError],
+    'unknown': ["An error occurred", TypeError]
+}
+
+_parcur_cache = {'t': array([], float), 'wrk': array([], float),
+                 'iwrk': array([], dfitpack_int), 'u': array([], float),
+                 'ub': 0, 'ue': 1}
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+            full_output=0, nest=None, per=0, quiet=1):
+    """
+    Find the B-spline representation of an N-D curve.
+
+    Given a list of N rank-1 arrays, `x`, which represent a curve in
+    N-dimensional space parametrized by `u`, find a smooth approximating
+    spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
+
+    Parameters
+    ----------
+    x : array_like
+        A list of sample vector arrays representing the curve.
+    w : array_like, optional
+        Strictly positive rank-1 array of weights the same length as `x[0]`.
+        The weights are used in computing the weighted least-squares spline
+        fit. If the errors in the `x` values have standard-deviation given by
+        the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
+    u : array_like, optional
+        An array of parameter values. If not given, these values are
+        calculated automatically as ``M = len(x[0])``, where
+
+            v[0] = 0
+
+            v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
+
+            u[i] = v[i] / v[M-1]
+
+    ub, ue : int, optional
+        The end-points of the parameters interval. Defaults to
+        u[0] and u[-1].
+    k : int, optional
+        Degree of the spline. Cubic splines are recommended.
+        Even values of `k` should be avoided especially with a small s-value.
+        ``1 <= k <= 5``, default is 3.
+    task : int, optional
+        If task==0 (default), find t and c for a given smoothing factor, s.
+        If task==1, find t and c for another value of the smoothing factor, s.
+        There must have been a previous call with task=0 or task=1
+        for the same set of data.
+        If task=-1 find the weighted least square spline for a given set of
+        knots, t.
+    s : float, optional
+        A smoothing condition. The amount of smoothness is determined by
+        satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
+        where g(x) is the smoothed interpolation of (x,y).  The user can
+        use `s` to control the trade-off between closeness and smoothness
+        of fit. Larger `s` means more smoothing while smaller values of `s`
+        indicate less smoothing. Recommended values of `s` depend on the
+        weights, w.  If the weights represent the inverse of the
+        standard-deviation of y, then a good `s` value should be found in
+        the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
+        data points in x, y, and w.
+    t : int, optional
+        The knots needed for task=-1.
+    full_output : int, optional
+        If non-zero, then return optional outputs.
+    nest : int, optional
+        An over-estimate of the total number of knots of the spline to
+        help in determining the storage space. By default nest=m/2.
+        Always large enough is nest=m+k+1.
+    per : int, optional
+       If non-zero, data points are considered periodic with period
+       ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
+       returned.  Values of ``y[m-1]`` and ``w[m-1]`` are not used.
+    quiet : int, optional
+         Non-zero to suppress messages.
+
+    Returns
+    -------
+    tck : tuple
+        A tuple (t,c,k) containing the vector of knots, the B-spline
+        coefficients, and the degree of the spline.
+    u : array
+        An array of the values of the parameter.
+    fp : float
+        The weighted sum of squared residuals of the spline approximation.
+    ier : int
+        An integer flag about splrep success.  Success is indicated
+        if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+        Otherwise an error is raised.
+    msg : str
+        A message corresponding to the integer flag, ier.
+
+    See Also
+    --------
+    splrep, splev, sproot, spalde, splint,
+    bisplrep, bisplev
+    UnivariateSpline, BivariateSpline
+
+    Notes
+    -----
+    See `splev` for evaluation of the spline and its derivatives.
+    The number of dimensions N must be smaller than 11.
+
+    References
+    ----------
+    .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
+        parametric splines, Computer Graphics and Image Processing",
+        20 (1982) 171-184.
+    .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
+        parametric splines", report tw55, Dept. Computer Science,
+        K.U.Leuven, 1981.
+    .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+        Numerical Analysis, Oxford University Press, 1993.
+
+    """
+    if task <= 0:
+        _parcur_cache = {'t': array([], float), 'wrk': array([], float),
+                         'iwrk': array([], dfitpack_int), 'u': array([], float),
+                         'ub': 0, 'ue': 1}
+    x = atleast_1d(x)
+    idim, m = x.shape
+    if per:
+        for i in range(idim):
+            if x[i][0] != x[i][-1]:
+                if not quiet:
+                    warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
+                                                 (i, m, i)))
+                x[i][-1] = x[i][0]
+    if not 0 < idim < 11:
+        raise TypeError('0 < idim < 11 must hold')
+    if w is None:
+        w = ones(m, float)
+    else:
+        w = atleast_1d(w)
+    ipar = (u is not None)
+    if ipar:
+        _parcur_cache['u'] = u
+        if ub is None:
+            _parcur_cache['ub'] = u[0]
+        else:
+            _parcur_cache['ub'] = ub
+        if ue is None:
+            _parcur_cache['ue'] = u[-1]
+        else:
+            _parcur_cache['ue'] = ue
+    else:
+        _parcur_cache['u'] = zeros(m, float)
+    if not (1 <= k <= 5):
+        raise TypeError('1 <= k= %d <=5 must hold' % k)
+    if not (-1 <= task <= 1):
+        raise TypeError('task must be -1, 0 or 1')
+    if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
+        raise TypeError('Mismatch of input dimensions')
+    if s is None:
+        s = m - sqrt(2*m)
+    if t is None and task == -1:
+        raise TypeError('Knots must be given for task=-1')
+    if t is not None:
+        _parcur_cache['t'] = atleast_1d(t)
+    n = len(_parcur_cache['t'])
+    if task == -1 and n < 2*k + 2:
+        raise TypeError('There must be at least 2*k+2 knots for task=-1')
+    if m <= k:
+        raise TypeError('m > k must hold')
+    if nest is None:
+        nest = m + 2*k
+
+    if (task >= 0 and s == 0) or (nest < 0):
+        if per:
+            nest = m + 2*k
+        else:
+            nest = m + k + 1
+    nest = max(nest, 2*k + 3)
+    u = _parcur_cache['u']
+    ub = _parcur_cache['ub']
+    ue = _parcur_cache['ue']
+    t = _parcur_cache['t']
+    wrk = _parcur_cache['wrk']
+    iwrk = _parcur_cache['iwrk']
+    t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
+                               task, ipar, s, t, nest, wrk, iwrk, per)
+    _parcur_cache['u'] = o['u']
+    _parcur_cache['ub'] = o['ub']
+    _parcur_cache['ue'] = o['ue']
+    _parcur_cache['t'] = t
+    _parcur_cache['wrk'] = o['wrk']
+    _parcur_cache['iwrk'] = o['iwrk']
+    ier = o['ier']
+    fp = o['fp']
+    n = len(t)
+    u = o['u']
+    c.shape = idim, n - k - 1
+    tcku = [t, list(c), k], u
+    if ier <= 0 and not quiet:
+        warnings.warn(RuntimeWarning(_iermess[ier][0] +
+                                     "\tk=%d n=%d m=%d fp=%f s=%f" %
+                                     (k, len(t), m, fp, s)))
+    if ier > 0 and not full_output:
+        if ier in [1, 2, 3]:
+            warnings.warn(RuntimeWarning(_iermess[ier][0]))
+        else:
+            try:
+                raise _iermess[ier][1](_iermess[ier][0])
+            except KeyError as e:
+                raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+    if full_output:
+        try:
+            return tcku, fp, ier, _iermess[ier][0]
+        except KeyError:
+            return tcku, fp, ier, _iermess['unknown'][0]
+    else:
+        return tcku
+
+
+_curfit_cache = {'t': array([], float), 'wrk': array([], float),
+                 'iwrk': array([], dfitpack_int)}
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+           full_output=0, per=0, quiet=1):
+    """
+    Find the B-spline representation of 1-D curve.
+
+    Given the set of data points ``(x[i], y[i])`` determine a smooth spline
+    approximation of degree k on the interval ``xb <= x <= xe``.
+
+    Parameters
+    ----------
+    x, y : array_like
+        The data points defining a curve y = f(x).
+    w : array_like, optional
+        Strictly positive rank-1 array of weights the same length as x and y.
+        The weights are used in computing the weighted least-squares spline
+        fit. If the errors in the y values have standard-deviation given by the
+        vector d, then w should be 1/d. Default is ones(len(x)).
+    xb, xe : float, optional
+        The interval to fit.  If None, these default to x[0] and x[-1]
+        respectively.
+    k : int, optional
+        The order of the spline fit. It is recommended to use cubic splines.
+        Even order splines should be avoided especially with small s values.
+        1 <= k <= 5
+    task : {1, 0, -1}, optional
+        If task==0 find t and c for a given smoothing factor, s.
+
+        If task==1 find t and c for another value of the smoothing factor, s.
+        There must have been a previous call with task=0 or task=1 for the same
+        set of data (t will be stored an used internally)
+
+        If task=-1 find the weighted least square spline for a given set of
+        knots, t. These should be interior knots as knots on the ends will be
+        added automatically.
+    s : float, optional
+        A smoothing condition. The amount of smoothness is determined by
+        satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s, where g(x)
+        is the smoothed interpolation of (x,y). The user can use s to control
+        the tradeoff between closeness and smoothness of fit. Larger s means
+        more smoothing while smaller values of s indicate less smoothing.
+        Recommended values of s depend on the weights, w. If the weights
+        represent the inverse of the standard-deviation of y, then a good s
+        value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
+        the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
+        weights are supplied. s = 0.0 (interpolating) if no weights are
+        supplied.
+    t : array_like, optional
+        The knots needed for task=-1. If given then task is automatically set
+        to -1.
+    full_output : bool, optional
+        If non-zero, then return optional outputs.
+    per : bool, optional
+        If non-zero, data points are considered periodic with period x[m-1] -
+        x[0] and a smooth periodic spline approximation is returned. Values of
+        y[m-1] and w[m-1] are not used.
+    quiet : bool, optional
+        Non-zero to suppress messages.
+
+    Returns
+    -------
+    tck : tuple
+        (t,c,k) a tuple containing the vector of knots, the B-spline
+        coefficients, and the degree of the spline.
+    fp : array, optional
+        The weighted sum of squared residuals of the spline approximation.
+    ier : int, optional
+        An integer flag about splrep success. Success is indicated if ier<=0.
+        If ier in [1,2,3] an error occurred but was not raised. Otherwise an
+        error is raised.
+    msg : str, optional
+        A message corresponding to the integer flag, ier.
+
+    See Also
+    --------
+    UnivariateSpline, BivariateSpline
+    splprep, splev, sproot, spalde, splint
+    bisplrep, bisplev
+
+    Notes
+    -----
+    See splev for evaluation of the spline and its derivatives. Uses the
+    FORTRAN routine curfit from FITPACK.
+
+    The user is responsible for assuring that the values of *x* are unique.
+    Otherwise, *splrep* will not return sensible results.
+
+    If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
+    i.e., there must be a subset of data points ``x[j]`` such that
+    ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+    References
+    ----------
+    Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
+
+    .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
+       integration of experimental data using spline functions",
+       J.Comp.Appl.Maths 1 (1975) 165-184.
+    .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
+       grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
+       1286-1304.
+    .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
+       functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
+    .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+       Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import splev, splrep
+    >>> x = np.linspace(0, 10, 10)
+    >>> y = np.sin(x)
+    >>> tck = splrep(x, y)
+    >>> x2 = np.linspace(0, 10, 200)
+    >>> y2 = splev(x2, tck)
+    >>> plt.plot(x, y, 'o', x2, y2)
+    >>> plt.show()
+
+    """
+    if task <= 0:
+        _curfit_cache = {}
+    x, y = map(atleast_1d, [x, y])
+    m = len(x)
+    if w is None:
+        w = ones(m, float)
+        if s is None:
+            s = 0.0
+    else:
+        w = atleast_1d(w)
+        if s is None:
+            s = m - sqrt(2*m)
+    if not len(w) == m:
+        raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+    if (m != len(y)) or (m != len(w)):
+        raise TypeError('Lengths of the first three arguments (x,y,w) must '
+                        'be equal')
+    if not (1 <= k <= 5):
+        raise TypeError('Given degree of the spline (k=%d) is not supported. '
+                        '(1<=k<=5)' % k)
+    if m <= k:
+        raise TypeError('m > k must hold')
+    if xb is None:
+        xb = x[0]
+    if xe is None:
+        xe = x[-1]
+    if not (-1 <= task <= 1):
+        raise TypeError('task must be -1, 0 or 1')
+    if t is not None:
+        task = -1
+    if task == -1:
+        if t is None:
+            raise TypeError('Knots must be given for task=-1')
+        numknots = len(t)
+        _curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
+        _curfit_cache['t'][k+1:-k-1] = t
+        nest = len(_curfit_cache['t'])
+    elif task == 0:
+        if per:
+            nest = max(m + 2*k, 2*k + 3)
+        else:
+            nest = max(m + k + 1, 2*k + 3)
+        t = empty((nest,), float)
+        _curfit_cache['t'] = t
+    if task <= 0:
+        if per:
+            _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
+        else:
+            _curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
+        _curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
+    try:
+        t = _curfit_cache['t']
+        wrk = _curfit_cache['wrk']
+        iwrk = _curfit_cache['iwrk']
+    except KeyError as e:
+        raise TypeError("must call with task=1 only after"
+                        " call with task=0,-1") from e
+    if not per:
+        n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
+                                        xb, xe, k, s)
+    else:
+        n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
+    tck = (t[:n], c[:n], k)
+    if ier <= 0 and not quiet:
+        _mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
+                 (k, len(t), m, fp, s))
+        warnings.warn(RuntimeWarning(_mess))
+    if ier > 0 and not full_output:
+        if ier in [1, 2, 3]:
+            warnings.warn(RuntimeWarning(_iermess[ier][0]))
+        else:
+            try:
+                raise _iermess[ier][1](_iermess[ier][0])
+            except KeyError as e:
+                raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
+    if full_output:
+        try:
+            return tck, fp, ier, _iermess[ier][0]
+        except KeyError:
+            return tck, fp, ier, _iermess['unknown'][0]
+    else:
+        return tck
+
+
+def splev(x, tck, der=0, ext=0):
+    """
+    Evaluate a B-spline or its derivatives.
+
+    Given the knots and coefficients of a B-spline representation, evaluate
+    the value of the smoothing polynomial and its derivatives. This is a
+    wrapper around the FORTRAN routines splev and splder of FITPACK.
+
+    Parameters
+    ----------
+    x : array_like
+        An array of points at which to return the value of the smoothed
+        spline or its derivatives. If `tck` was returned from `splprep`,
+        then the parameter values, u should be given.
+    tck : tuple
+        A sequence of length 3 returned by `splrep` or `splprep` containing
+        the knots, coefficients, and degree of the spline.
+    der : int, optional
+        The order of derivative of the spline to compute (must be less than
+        or equal to k).
+    ext : int, optional
+        Controls the value returned for elements of ``x`` not in the
+        interval defined by the knot sequence.
+
+        * if ext=0, return the extrapolated value.
+        * if ext=1, return 0
+        * if ext=2, raise a ValueError
+        * if ext=3, return the boundary value.
+
+        The default value is 0.
+
+    Returns
+    -------
+    y : ndarray or list of ndarrays
+        An array of values representing the spline function evaluated at
+        the points in ``x``.  If `tck` was returned from `splprep`, then this
+        is a list of arrays representing the curve in N-D space.
+
+    See Also
+    --------
+    splprep, splrep, sproot, spalde, splint
+    bisplrep, bisplev
+
+    References
+    ----------
+    .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+        Theory, 6, p.50-62, 1972.
+    .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+        Applics, 10, p.134-149, 1972.
+    .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+        on Numerical Analysis, Oxford University Press, 1993.
+
+    """
+    t, c, k = tck
+    try:
+        c[0][0]
+        parametric = True
+    except Exception:
+        parametric = False
+    if parametric:
+        return list(map(lambda c, x=x, t=t, k=k, der=der:
+                        splev(x, [t, c, k], der, ext), c))
+    else:
+        if not (0 <= der <= k):
+            raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
+        if ext not in (0, 1, 2, 3):
+            raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
+
+        x = asarray(x)
+        shape = x.shape
+        x = atleast_1d(x).ravel()
+        y, ier = _fitpack._spl_(x, der, t, c, k, ext)
+
+        if ier == 10:
+            raise ValueError("Invalid input data")
+        if ier == 1:
+            raise ValueError("Found x value not in the domain")
+        if ier:
+            raise TypeError("An error occurred")
+
+        return y.reshape(shape)
+
+
+def splint(a, b, tck, full_output=0):
+    """
+    Evaluate the definite integral of a B-spline.
+
+    Given the knots and coefficients of a B-spline, evaluate the definite
+    integral of the smoothing polynomial between two given points.
+
+    Parameters
+    ----------
+    a, b : float
+        The end-points of the integration interval.
+    tck : tuple
+        A tuple (t,c,k) containing the vector of knots, the B-spline
+        coefficients, and the degree of the spline (see `splev`).
+    full_output : int, optional
+        Non-zero to return optional output.
+
+    Returns
+    -------
+    integral : float
+        The resulting integral.
+    wrk : ndarray
+        An array containing the integrals of the normalized B-splines
+        defined on the set of knots.
+
+    Notes
+    -----
+    splint silently assumes that the spline function is zero outside the data
+    interval (a, b).
+
+    See Also
+    --------
+    splprep, splrep, sproot, spalde, splev
+    bisplrep, bisplev
+    UnivariateSpline, BivariateSpline
+
+    References
+    ----------
+    .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
+        J. Inst. Maths Applics, 17, p.37-41, 1976.
+    .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
+        on Numerical Analysis, Oxford University Press, 1993.
+
+    """
+    t, c, k = tck
+    try:
+        c[0][0]
+        parametric = True
+    except Exception:
+        parametric = False
+    if parametric:
+        return list(map(lambda c, a=a, b=b, t=t, k=k:
+                        splint(a, b, [t, c, k]), c))
+    else:
+        aint, wrk = dfitpack.splint(t, c, k, a, b)
+        if full_output:
+            return aint, wrk
+        else:
+            return aint
+
+
+def sproot(tck, mest=10):
+    """
+    Find the roots of a cubic B-spline.
+
+    Given the knots (>=8) and coefficients of a cubic B-spline return the
+    roots of the spline.
+
+    Parameters
+    ----------
+    tck : tuple
+        A tuple (t,c,k) containing the vector of knots,
+        the B-spline coefficients, and the degree of the spline.
+        The number of knots must be >= 8, and the degree must be 3.
+        The knots must be a montonically increasing sequence.
+    mest : int, optional
+        An estimate of the number of zeros (Default is 10).
+
+    Returns
+    -------
+    zeros : ndarray
+        An array giving the roots of the spline.
+
+    See Also
+    --------
+    splprep, splrep, splint, spalde, splev
+    bisplrep, bisplev
+    UnivariateSpline, BivariateSpline
+
+
+    References
+    ----------
+    .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+        Theory, 6, p.50-62, 1972.
+    .. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+        Applics, 10, p.134-149, 1972.
+    .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+        on Numerical Analysis, Oxford University Press, 1993.
+
+    """
+    t, c, k = tck
+    if k != 3:
+        raise ValueError("sproot works only for cubic (k=3) splines")
+    try:
+        c[0][0]
+        parametric = True
+    except Exception:
+        parametric = False
+    if parametric:
+        return list(map(lambda c, t=t, k=k, mest=mest:
+                        sproot([t, c, k], mest), c))
+    else:
+        if len(t) < 8:
+            raise TypeError("The number of knots %d>=8" % len(t))
+        z, m, ier = dfitpack.sproot(t, c, mest)
+        if ier == 10:
+            raise TypeError("Invalid input data. "
+                            "t1<=..<=t4 1:
+            return list(map(lambda x, tck=tck: spalde(x, tck), x))
+        d, ier = dfitpack.spalde(t, c, k+1, x[0])
+        if ier == 0:
+            return d
+        if ier == 10:
+            raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
+        raise TypeError("Unknown error")
+
+# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
+#           full_output=0,nest=None,per=0,quiet=1):
+
+
+_surfit_cache = {'tx': array([], float), 'ty': array([], float),
+                 'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
+
+
+def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
+             kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
+             full_output=0, nxest=None, nyest=None, quiet=1):
+    """
+    Find a bivariate B-spline representation of a surface.
+
+    Given a set of data points (x[i], y[i], z[i]) representing a surface
+    z=f(x,y), compute a B-spline representation of the surface. Based on
+    the routine SURFIT from FITPACK.
+
+    Parameters
+    ----------
+    x, y, z : ndarray
+        Rank-1 arrays of data points.
+    w : ndarray, optional
+        Rank-1 array of weights. By default ``w=np.ones(len(x))``.
+    xb, xe : float, optional
+        End points of approximation interval in `x`.
+        By default ``xb = x.min(), xe=x.max()``.
+    yb, ye : float, optional
+        End points of approximation interval in `y`.
+        By default ``yb=y.min(), ye = y.max()``.
+    kx, ky : int, optional
+        The degrees of the spline (1 <= kx, ky <= 5).
+        Third order (kx=ky=3) is recommended.
+    task : int, optional
+        If task=0, find knots in x and y and coefficients for a given
+        smoothing factor, s.
+        If task=1, find knots and coefficients for another value of the
+        smoothing factor, s.  bisplrep must have been previously called
+        with task=0 or task=1.
+        If task=-1, find coefficients for a given set of knots tx, ty.
+    s : float, optional
+        A non-negative smoothing factor. If weights correspond
+        to the inverse of the standard-deviation of the errors in z,
+        then a good s-value should be found in the range
+        ``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
+    eps : float, optional
+        A threshold for determining the effective rank of an
+        over-determined linear system of equations (0 < eps < 1).
+        `eps` is not likely to need changing.
+    tx, ty : ndarray, optional
+        Rank-1 arrays of the knots of the spline for task=-1
+    full_output : int, optional
+        Non-zero to return optional outputs.
+    nxest, nyest : int, optional
+        Over-estimates of the total number of knots. If None then
+        ``nxest = max(kx+sqrt(m/2),2*kx+3)``,
+        ``nyest = max(ky+sqrt(m/2),2*ky+3)``.
+    quiet : int, optional
+        Non-zero to suppress printing of messages.
+
+    Returns
+    -------
+    tck : array_like
+        A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
+        coefficients (c) of the bivariate B-spline representation of the
+        surface along with the degree of the spline.
+    fp : ndarray
+        The weighted sum of squared residuals of the spline approximation.
+    ier : int
+        An integer flag about splrep success. Success is indicated if
+        ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+        Otherwise an error is raised.
+    msg : str
+        A message corresponding to the integer flag, ier.
+
+    See Also
+    --------
+    splprep, splrep, splint, sproot, splev
+    UnivariateSpline, BivariateSpline
+
+    Notes
+    -----
+    See `bisplev` to evaluate the value of the B-spline given its tck
+    representation.
+
+    If the input data is such that input dimensions have incommensurate
+    units and differ by many orders of magnitude, the interpolant may have
+    numerical artifacts. Consider rescaling the data before interpolation.
+
+    References
+    ----------
+    .. [1] Dierckx P.:An algorithm for surface fitting with spline functions
+       Ima J. Numer. Anal. 1 (1981) 267-283.
+    .. [2] Dierckx P.:An algorithm for surface fitting with spline functions
+       report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+    .. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
+       Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+    Examples are given :ref:`in the tutorial `.
+
+    """
+    x, y, z = map(ravel, [x, y, z])  # ensure 1-d arrays.
+    m = len(x)
+    if not (m == len(y) == len(z)):
+        raise TypeError('len(x)==len(y)==len(z) must hold.')
+    if w is None:
+        w = ones(m, float)
+    else:
+        w = atleast_1d(w)
+    if not len(w) == m:
+        raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
+    if xb is None:
+        xb = x.min()
+    if xe is None:
+        xe = x.max()
+    if yb is None:
+        yb = y.min()
+    if ye is None:
+        ye = y.max()
+    if not (-1 <= task <= 1):
+        raise TypeError('task must be -1, 0 or 1')
+    if s is None:
+        s = m - sqrt(2*m)
+    if tx is None and task == -1:
+        raise TypeError('Knots_x must be given for task=-1')
+    if tx is not None:
+        _surfit_cache['tx'] = atleast_1d(tx)
+    nx = len(_surfit_cache['tx'])
+    if ty is None and task == -1:
+        raise TypeError('Knots_y must be given for task=-1')
+    if ty is not None:
+        _surfit_cache['ty'] = atleast_1d(ty)
+    ny = len(_surfit_cache['ty'])
+    if task == -1 and nx < 2*kx+2:
+        raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
+    if task == -1 and ny < 2*ky+2:
+        raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
+    if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
+        raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
+                        'supported. (1<=k<=5)' % (kx, ky))
+    if m < (kx + 1)*(ky + 1):
+        raise TypeError('m >= (kx+1)(ky+1) must hold')
+    if nxest is None:
+        nxest = int(kx + sqrt(m/2))
+    if nyest is None:
+        nyest = int(ky + sqrt(m/2))
+    nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
+    if task >= 0 and s == 0:
+        nxest = int(kx + sqrt(3*m))
+        nyest = int(ky + sqrt(3*m))
+    if task == -1:
+        _surfit_cache['tx'] = atleast_1d(tx)
+        _surfit_cache['ty'] = atleast_1d(ty)
+    tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
+    wrk = _surfit_cache['wrk']
+    u = nxest - kx - 1
+    v = nyest - ky - 1
+    km = max(kx, ky) + 1
+    ne = max(nxest, nyest)
+    bx, by = kx*v + ky + 1, ky*u + kx + 1
+    b1, b2 = bx, bx + v - ky
+    if bx > by:
+        b1, b2 = by, by + u - kx
+    msg = "Too many data points to interpolate"
+    lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
+                          2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
+                          msg=msg)
+    lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, msg=msg)
+    tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
+                                    task, s, eps, tx, ty, nxest, nyest,
+                                    wrk, lwrk1, lwrk2)
+    _curfit_cache['tx'] = tx
+    _curfit_cache['ty'] = ty
+    _curfit_cache['wrk'] = o['wrk']
+    ier, fp = o['ier'], o['fp']
+    tck = [tx, ty, c, kx, ky]
+
+    ierm = min(11, max(-3, ier))
+    if ierm <= 0 and not quiet:
+        _mess = (_iermess2[ierm][0] +
+                 "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+                 (kx, ky, len(tx), len(ty), m, fp, s))
+        warnings.warn(RuntimeWarning(_mess))
+    if ierm > 0 and not full_output:
+        if ier in [1, 2, 3, 4, 5]:
+            _mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
+                     (kx, ky, len(tx), len(ty), m, fp, s))
+            warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
+        else:
+            try:
+                raise _iermess2[ierm][1](_iermess2[ierm][0])
+            except KeyError as e:
+                raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
+    if full_output:
+        try:
+            return tck, fp, ier, _iermess2[ierm][0]
+        except KeyError:
+            return tck, fp, ier, _iermess2['unknown'][0]
+    else:
+        return tck
+
+
+def bisplev(x, y, tck, dx=0, dy=0):
+    """
+    Evaluate a bivariate B-spline and its derivatives.
+
+    Return a rank-2 array of spline function values (or spline derivative
+    values) at points given by the cross-product of the rank-1 arrays `x` and
+    `y`.  In special cases, return an array or just a float if either `x` or
+    `y` or both are floats.  Based on BISPEV and PARDER from FITPACK.
+
+    Parameters
+    ----------
+    x, y : ndarray
+        Rank-1 arrays specifying the domain over which to evaluate the
+        spline or its derivative.
+    tck : tuple
+        A sequence of length 5 returned by `bisplrep` containing the knot
+        locations, the coefficients, and the degree of the spline:
+        [tx, ty, c, kx, ky].
+    dx, dy : int, optional
+        The orders of the partial derivatives in `x` and `y` respectively.
+
+    Returns
+    -------
+    vals : ndarray
+        The B-spline or its derivative evaluated over the set formed by
+        the cross-product of `x` and `y`.
+
+    See Also
+    --------
+    splprep, splrep, splint, sproot, splev
+    UnivariateSpline, BivariateSpline
+
+    Notes
+    -----
+        See `bisplrep` to generate the `tck` representation.
+
+    References
+    ----------
+    .. [1] Dierckx P. : An algorithm for surface fitting
+       with spline functions
+       Ima J. Numer. Anal. 1 (1981) 267-283.
+    .. [2] Dierckx P. : An algorithm for surface fitting
+       with spline functions
+       report tw50, Dept. Computer Science,K.U.Leuven, 1980.
+    .. [3] Dierckx P. : Curve and surface fitting with splines,
+       Monographs on Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+    Examples are given :ref:`in the tutorial `.
+
+    """
+    tx, ty, c, kx, ky = tck
+    if not (0 <= dx < kx):
+        raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
+    if not (0 <= dy < ky):
+        raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
+    x, y = map(atleast_1d, [x, y])
+    if (len(x.shape) != 1) or (len(y.shape) != 1):
+        raise ValueError("First two entries should be rank-1 arrays.")
+    z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
+    if ier == 10:
+        raise ValueError("Invalid input data")
+    if ier:
+        raise TypeError("An error occurred")
+    z.shape = len(x), len(y)
+    if len(z) > 1:
+        return z
+    if len(z[0]) > 1:
+        return z[0]
+    return z[0][0]
+
+
+def dblint(xa, xb, ya, yb, tck):
+    """Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
+
+    Parameters
+    ----------
+    xa, xb : float
+        The end-points of the x integration interval.
+    ya, yb : float
+        The end-points of the y integration interval.
+    tck : list [tx, ty, c, kx, ky]
+        A sequence of length 5 returned by bisplrep containing the knot
+        locations tx, ty, the coefficients c, and the degrees kx, ky
+        of the spline.
+
+    Returns
+    -------
+    integ : float
+        The value of the resulting integral.
+    """
+    tx, ty, c, kx, ky = tck
+    return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
+
+
+def insert(x, tck, m=1, per=0):
+    """
+    Insert knots into a B-spline.
+
+    Given the knots and coefficients of a B-spline representation, create a
+    new B-spline with a knot inserted `m` times at point `x`.
+    This is a wrapper around the FORTRAN routine insert of FITPACK.
+
+    Parameters
+    ----------
+    x (u) : array_like
+        A 1-D point at which to insert a new knot(s).  If `tck` was returned
+        from ``splprep``, then the parameter values, u should be given.
+    tck : tuple
+        A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
+        the vector of knots, the B-spline coefficients,
+        and the degree of the spline.
+    m : int, optional
+        The number of times to insert the given knot (its multiplicity).
+        Default is 1.
+    per : int, optional
+        If non-zero, the input spline is considered periodic.
+
+    Returns
+    -------
+    tck : tuple
+        A tuple (t,c,k) containing the vector of knots, the B-spline
+        coefficients, and the degree of the new spline.
+        ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
+        In case of a periodic spline (``per != 0``) there must be
+        either at least k interior knots t(j) satisfying ``t(k+1)>> from scipy.interpolate import splrep, splder, sproot
+    >>> x = np.linspace(0, 10, 70)
+    >>> y = np.sin(x)
+    >>> spl = splrep(x, y, k=4)
+
+    Now, differentiate the spline and find the zeros of the
+    derivative. (NB: `sproot` only works for order 3 splines, so we
+    fit an order 4 spline):
+
+    >>> dspl = splder(spl)
+    >>> sproot(dspl) / np.pi
+    array([ 0.50000001,  1.5       ,  2.49999998])
+
+    This agrees well with roots :math:`\\pi/2 + n\\pi` of
+    :math:`\\cos(x) = \\sin'(x)`.
+
+    """
+    if n < 0:
+        return splantider(tck, -n)
+
+    t, c, k = tck
+
+    if n > k:
+        raise ValueError(("Order of derivative (n = %r) must be <= "
+                          "order of spline (k = %r)") % (n, tck[2]))
+
+    # Extra axes for the trailing dims of the `c` array:
+    sh = (slice(None),) + ((None,)*len(c.shape[1:]))
+
+    with np.errstate(invalid='raise', divide='raise'):
+        try:
+            for j in range(n):
+                # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
+
+                # Compute the denominator in the differentiation formula.
+                # (and append traling dims, if necessary)
+                dt = t[k+1:-1] - t[1:-k-1]
+                dt = dt[sh]
+                # Compute the new coefficients
+                c = (c[1:-1-k] - c[:-2-k]) * k / dt
+                # Pad coefficient array to same size as knots (FITPACK
+                # convention)
+                c = np.r_[c, np.zeros((k,) + c.shape[1:])]
+                # Adjust knots
+                t = t[1:-1]
+                k -= 1
+        except FloatingPointError as e:
+            raise ValueError(("The spline has internal repeated knots "
+                              "and is not differentiable %d times") % n) from e
+
+    return t, c, k
+
+
+def splantider(tck, n=1):
+    """
+    Compute the spline for the antiderivative (integral) of a given spline.
+
+    Parameters
+    ----------
+    tck : tuple of (t, c, k)
+        Spline whose antiderivative to compute
+    n : int, optional
+        Order of antiderivative to evaluate. Default: 1
+
+    Returns
+    -------
+    tck_ader : tuple of (t2, c2, k2)
+        Spline of order k2=k+n representing the antiderivative of the input
+        spline.
+
+    See Also
+    --------
+    splder, splev, spalde
+
+    Notes
+    -----
+    The `splder` function is the inverse operation of this function.
+    Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
+    rounding error.
+
+    .. versionadded:: 0.13.0
+
+    Examples
+    --------
+    >>> from scipy.interpolate import splrep, splder, splantider, splev
+    >>> x = np.linspace(0, np.pi/2, 70)
+    >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+    >>> spl = splrep(x, y)
+
+    The derivative is the inverse operation of the antiderivative,
+    although some floating point error accumulates:
+
+    >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
+    (array(2.1565429877197317), array(2.1565429877201865))
+
+    Antiderivative can be used to evaluate definite integrals:
+
+    >>> ispl = splantider(spl)
+    >>> splev(np.pi/2, ispl) - splev(0, ispl)
+    2.2572053588768486
+
+    This is indeed an approximation to the complete elliptic integral
+    :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+    >>> from scipy.special import ellipk
+    >>> ellipk(0.8)
+    2.2572053268208538
+
+    """
+    if n < 0:
+        return splder(tck, -n)
+
+    t, c, k = tck
+
+    # Extra axes for the trailing dims of the `c` array:
+    sh = (slice(None),) + (None,)*len(c.shape[1:])
+
+    for j in range(n):
+        # This is the inverse set of operations to splder.
+
+        # Compute the multiplier in the antiderivative formula.
+        dt = t[k+1:] - t[:-k-1]
+        dt = dt[sh]
+        # Compute the new coefficients
+        c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
+        c = np.r_[np.zeros((1,) + c.shape[1:]),
+                  c,
+                  [c[-1]] * (k+2)]
+        # New knots
+        t = np.r_[t[0], t, t[-1]]
+        k += 1
+
+    return t, c, k
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_py.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_py.py
new file mode 100644
index 00000000..a6fb8bd0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_fitpack_py.py
@@ -0,0 +1,788 @@
+__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
+           'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
+
+
+import numpy as np
+
+# These are in the API for fitpack even if not used in fitpack.py itself.
+from ._fitpack_impl import bisplrep, bisplev, dblint
+from . import _fitpack_impl as _impl
+from ._bsplines import BSpline
+
+
+def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
+            full_output=0, nest=None, per=0, quiet=1):
+    """
+    Find the B-spline representation of an N-D curve.
+
+    Given a list of N rank-1 arrays, `x`, which represent a curve in
+    N-D space parametrized by `u`, find a smooth approximating
+    spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
+
+    Parameters
+    ----------
+    x : array_like
+        A list of sample vector arrays representing the curve.
+    w : array_like, optional
+        Strictly positive rank-1 array of weights the same length as `x[0]`.
+        The weights are used in computing the weighted least-squares spline
+        fit. If the errors in the `x` values have standard-deviation given by
+        the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
+    u : array_like, optional
+        An array of parameter values. If not given, these values are
+        calculated automatically as ``M = len(x[0])``, where
+
+            v[0] = 0
+
+            v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
+
+            u[i] = v[i] / v[M-1]
+
+    ub, ue : int, optional
+        The end-points of the parameters interval.  Defaults to
+        u[0] and u[-1].
+    k : int, optional
+        Degree of the spline. Cubic splines are recommended.
+        Even values of `k` should be avoided especially with a small s-value.
+        ``1 <= k <= 5``, default is 3.
+    task : int, optional
+        If task==0 (default), find t and c for a given smoothing factor, s.
+        If task==1, find t and c for another value of the smoothing factor, s.
+        There must have been a previous call with task=0 or task=1
+        for the same set of data.
+        If task=-1 find the weighted least square spline for a given set of
+        knots, t.
+    s : float, optional
+        A smoothing condition.  The amount of smoothness is determined by
+        satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
+        where g(x) is the smoothed interpolation of (x,y).  The user can
+        use `s` to control the trade-off between closeness and smoothness
+        of fit.  Larger `s` means more smoothing while smaller values of `s`
+        indicate less smoothing. Recommended values of `s` depend on the
+        weights, w.  If the weights represent the inverse of the
+        standard-deviation of y, then a good `s` value should be found in
+        the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
+        data points in x, y, and w.
+    t : int, optional
+        The knots needed for task=-1.
+    full_output : int, optional
+        If non-zero, then return optional outputs.
+    nest : int, optional
+        An over-estimate of the total number of knots of the spline to
+        help in determining the storage space.  By default nest=m/2.
+        Always large enough is nest=m+k+1.
+    per : int, optional
+       If non-zero, data points are considered periodic with period
+       ``x[m-1] - x[0]`` and a smooth periodic spline approximation is
+       returned.  Values of ``y[m-1]`` and ``w[m-1]`` are not used.
+    quiet : int, optional
+         Non-zero to suppress messages.
+
+    Returns
+    -------
+    tck : tuple
+        (t,c,k) a tuple containing the vector of knots, the B-spline
+        coefficients, and the degree of the spline.
+    u : array
+        An array of the values of the parameter.
+    fp : float
+        The weighted sum of squared residuals of the spline approximation.
+    ier : int
+        An integer flag about splrep success.  Success is indicated
+        if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
+        Otherwise an error is raised.
+    msg : str
+        A message corresponding to the integer flag, ier.
+
+    See Also
+    --------
+    splrep, splev, sproot, spalde, splint,
+    bisplrep, bisplev
+    UnivariateSpline, BivariateSpline
+    BSpline
+    make_interp_spline
+
+    Notes
+    -----
+    See `splev` for evaluation of the spline and its derivatives.
+    The number of dimensions N must be smaller than 11.
+
+    The number of coefficients in the `c` array is ``k+1`` less than the number
+    of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
+    the array of coefficients to have the same length as the array of knots.
+    These additional coefficients are ignored by evaluation routines, `splev`
+    and `BSpline`.
+
+    References
+    ----------
+    .. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
+        parametric splines, Computer Graphics and Image Processing",
+        20 (1982) 171-184.
+    .. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
+        parametric splines", report tw55, Dept. Computer Science,
+        K.U.Leuven, 1981.
+    .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+        Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+    Generate a discretization of a limacon curve in the polar coordinates:
+
+    >>> import numpy as np
+    >>> phi = np.linspace(0, 2.*np.pi, 40)
+    >>> r = 0.5 + np.cos(phi)         # polar coords
+    >>> x, y = r * np.cos(phi), r * np.sin(phi)    # convert to cartesian
+
+    And interpolate:
+
+    >>> from scipy.interpolate import splprep, splev
+    >>> tck, u = splprep([x, y], s=0)
+    >>> new_points = splev(u, tck)
+
+    Notice that (i) we force interpolation by using `s=0`,
+    (ii) the parameterization, ``u``, is generated automatically.
+    Now plot the result:
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, y, 'ro')
+    >>> ax.plot(new_points[0], new_points[1], 'r-')
+    >>> plt.show()
+
+    """
+    res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
+                        quiet)
+    return res
+
+
+def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
+           full_output=0, per=0, quiet=1):
+    """
+    Find the B-spline representation of a 1-D curve.
+
+    Given the set of data points ``(x[i], y[i])`` determine a smooth spline
+    approximation of degree k on the interval ``xb <= x <= xe``.
+
+    Parameters
+    ----------
+    x, y : array_like
+        The data points defining a curve y = f(x).
+    w : array_like, optional
+        Strictly positive rank-1 array of weights the same length as x and y.
+        The weights are used in computing the weighted least-squares spline
+        fit. If the errors in the y values have standard-deviation given by the
+        vector d, then w should be 1/d. Default is ones(len(x)).
+    xb, xe : float, optional
+        The interval to fit.  If None, these default to x[0] and x[-1]
+        respectively.
+    k : int, optional
+        The degree of the spline fit. It is recommended to use cubic splines.
+        Even values of k should be avoided especially with small s values.
+        1 <= k <= 5
+    task : {1, 0, -1}, optional
+        If task==0 find t and c for a given smoothing factor, s.
+
+        If task==1 find t and c for another value of the smoothing factor, s.
+        There must have been a previous call with task=0 or task=1 for the same
+        set of data (t will be stored an used internally)
+
+        If task=-1 find the weighted least square spline for a given set of
+        knots, t. These should be interior knots as knots on the ends will be
+        added automatically.
+    s : float, optional
+        A smoothing condition. The amount of smoothness is determined by
+        satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
+        is the smoothed interpolation of (x,y). The user can use s to control
+        the tradeoff between closeness and smoothness of fit. Larger s means
+        more smoothing while smaller values of s indicate less smoothing.
+        Recommended values of s depend on the weights, w. If the weights
+        represent the inverse of the standard-deviation of y, then a good s
+        value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
+        the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
+        weights are supplied. s = 0.0 (interpolating) if no weights are
+        supplied.
+    t : array_like, optional
+        The knots needed for task=-1. If given then task is automatically set
+        to -1.
+    full_output : bool, optional
+        If non-zero, then return optional outputs.
+    per : bool, optional
+        If non-zero, data points are considered periodic with period x[m-1] -
+        x[0] and a smooth periodic spline approximation is returned. Values of
+        y[m-1] and w[m-1] are not used.
+    quiet : bool, optional
+        Non-zero to suppress messages.
+
+    Returns
+    -------
+    tck : tuple
+        A tuple (t,c,k) containing the vector of knots, the B-spline
+        coefficients, and the degree of the spline.
+    fp : array, optional
+        The weighted sum of squared residuals of the spline approximation.
+    ier : int, optional
+        An integer flag about splrep success. Success is indicated if ier<=0.
+        If ier in [1,2,3] an error occurred but was not raised. Otherwise an
+        error is raised.
+    msg : str, optional
+        A message corresponding to the integer flag, ier.
+
+    See Also
+    --------
+    UnivariateSpline, BivariateSpline
+    splprep, splev, sproot, spalde, splint
+    bisplrep, bisplev
+    BSpline
+    make_interp_spline
+
+    Notes
+    -----
+    See `splev` for evaluation of the spline and its derivatives. Uses the
+    FORTRAN routine ``curfit`` from FITPACK.
+
+    The user is responsible for assuring that the values of `x` are unique.
+    Otherwise, `splrep` will not return sensible results.
+
+    If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
+    i.e., there must be a subset of data points ``x[j]`` such that
+    ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
+
+    This routine zero-pads the coefficients array ``c`` to have the same length
+    as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
+    by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
+    `splprep`, which does not zero-pad the coefficients.
+
+    References
+    ----------
+    Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
+
+    .. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
+       integration of experimental data using spline functions",
+       J.Comp.Appl.Maths 1 (1975) 165-184.
+    .. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
+       grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
+       1286-1304.
+    .. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
+       functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
+    .. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
+       Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+    You can interpolate 1-D points with a B-spline curve.
+    Further examples are given in
+    :ref:`in the tutorial `.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import splev, splrep
+    >>> x = np.linspace(0, 10, 10)
+    >>> y = np.sin(x)
+    >>> spl = splrep(x, y)
+    >>> x2 = np.linspace(0, 10, 200)
+    >>> y2 = splev(x2, spl)
+    >>> plt.plot(x, y, 'o', x2, y2)
+    >>> plt.show()
+
+    """
+    res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
+    return res
+
+
+def splev(x, tck, der=0, ext=0):
+    """
+    Evaluate a B-spline or its derivatives.
+
+    Given the knots and coefficients of a B-spline representation, evaluate
+    the value of the smoothing polynomial and its derivatives. This is a
+    wrapper around the FORTRAN routines splev and splder of FITPACK.
+
+    Parameters
+    ----------
+    x : array_like
+        An array of points at which to return the value of the smoothed
+        spline or its derivatives. If `tck` was returned from `splprep`,
+        then the parameter values, u should be given.
+    tck : 3-tuple or a BSpline object
+        If a tuple, then it should be a sequence of length 3 returned by
+        `splrep` or `splprep` containing the knots, coefficients, and degree
+        of the spline. (Also see Notes.)
+    der : int, optional
+        The order of derivative of the spline to compute (must be less than
+        or equal to k, the degree of the spline).
+    ext : int, optional
+        Controls the value returned for elements of ``x`` not in the
+        interval defined by the knot sequence.
+
+        * if ext=0, return the extrapolated value.
+        * if ext=1, return 0
+        * if ext=2, raise a ValueError
+        * if ext=3, return the boundary value.
+
+        The default value is 0.
+
+    Returns
+    -------
+    y : ndarray or list of ndarrays
+        An array of values representing the spline function evaluated at
+        the points in `x`.  If `tck` was returned from `splprep`, then this
+        is a list of arrays representing the curve in an N-D space.
+
+    Notes
+    -----
+    Manipulating the tck-tuples directly is not recommended. In new code,
+    prefer using `BSpline` objects.
+
+    See Also
+    --------
+    splprep, splrep, sproot, spalde, splint
+    bisplrep, bisplev
+    BSpline
+
+    References
+    ----------
+    .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+        Theory, 6, p.50-62, 1972.
+    .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+        Applics, 10, p.134-149, 1972.
+    .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+        on Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+    Examples are given :ref:`in the tutorial `.
+
+    """
+    if isinstance(tck, BSpline):
+        if tck.c.ndim > 1:
+            mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
+                    "not allowed. Use BSpline.__call__(x) instead.")
+            raise ValueError(mesg)
+
+        # remap the out-of-bounds behavior
+        try:
+            extrapolate = {0: True, }[ext]
+        except KeyError as e:
+            raise ValueError("Extrapolation mode %s is not supported "
+                             "by BSpline." % ext) from e
+
+        return tck(x, der, extrapolate=extrapolate)
+    else:
+        return _impl.splev(x, tck, der, ext)
+
+
+def splint(a, b, tck, full_output=0):
+    """
+    Evaluate the definite integral of a B-spline between two given points.
+
+    Parameters
+    ----------
+    a, b : float
+        The end-points of the integration interval.
+    tck : tuple or a BSpline instance
+        If a tuple, then it should be a sequence of length 3, containing the
+        vector of knots, the B-spline coefficients, and the degree of the
+        spline (see `splev`).
+    full_output : int, optional
+        Non-zero to return optional output.
+
+    Returns
+    -------
+    integral : float
+        The resulting integral.
+    wrk : ndarray
+        An array containing the integrals of the normalized B-splines
+        defined on the set of knots.
+        (Only returned if `full_output` is non-zero)
+
+    Notes
+    -----
+    `splint` silently assumes that the spline function is zero outside the data
+    interval (`a`, `b`).
+
+    Manipulating the tck-tuples directly is not recommended. In new code,
+    prefer using the `BSpline` objects.
+
+    See Also
+    --------
+    splprep, splrep, sproot, spalde, splev
+    bisplrep, bisplev
+    BSpline
+
+    References
+    ----------
+    .. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
+        J. Inst. Maths Applics, 17, p.37-41, 1976.
+    .. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
+        on Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+    Examples are given :ref:`in the tutorial `.
+
+    """
+    if isinstance(tck, BSpline):
+        if tck.c.ndim > 1:
+            mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
+                    "not allowed. Use BSpline.integrate() instead.")
+            raise ValueError(mesg)
+
+        if full_output != 0:
+            mesg = ("full_output = %s is not supported. Proceeding as if "
+                    "full_output = 0" % full_output)
+
+        return tck.integrate(a, b, extrapolate=False)
+    else:
+        return _impl.splint(a, b, tck, full_output)
+
+
+def sproot(tck, mest=10):
+    """
+    Find the roots of a cubic B-spline.
+
+    Given the knots (>=8) and coefficients of a cubic B-spline return the
+    roots of the spline.
+
+    Parameters
+    ----------
+    tck : tuple or a BSpline object
+        If a tuple, then it should be a sequence of length 3, containing the
+        vector of knots, the B-spline coefficients, and the degree of the
+        spline.
+        The number of knots must be >= 8, and the degree must be 3.
+        The knots must be a montonically increasing sequence.
+    mest : int, optional
+        An estimate of the number of zeros (Default is 10).
+
+    Returns
+    -------
+    zeros : ndarray
+        An array giving the roots of the spline.
+
+    Notes
+    -----
+    Manipulating the tck-tuples directly is not recommended. In new code,
+    prefer using the `BSpline` objects.
+
+    See Also
+    --------
+    splprep, splrep, splint, spalde, splev
+    bisplrep, bisplev
+    BSpline
+
+
+    References
+    ----------
+    .. [1] C. de Boor, "On calculating with b-splines", J. Approximation
+        Theory, 6, p.50-62, 1972.
+    .. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
+        Applics, 10, p.134-149, 1972.
+    .. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
+        on Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+
+    For some data, this method may miss a root. This happens when one of
+    the spline knots (which FITPACK places automatically) happens to
+    coincide with the true root. A workaround is to convert to `PPoly`,
+    which uses a different root-finding algorithm.
+
+    For example,
+
+    >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
+    >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
+    ...      4.440892e-16,  1.616930e-03,  3.243000e-03,  4.877670e-03,
+    ...      6.520430e-03,  8.170770e-03]
+    >>> from scipy.interpolate import splrep, sproot, PPoly
+    >>> tck = splrep(x, y, s=0)
+    >>> sproot(tck)
+    array([], dtype=float64)
+
+    Converting to a PPoly object does find the roots at `x=2`:
+
+    >>> ppoly = PPoly.from_spline(tck)
+    >>> ppoly.roots(extrapolate=False)
+    array([2.])
+
+
+    Further examples are given :ref:`in the tutorial
+    `.
+
+    """
+    if isinstance(tck, BSpline):
+        if tck.c.ndim > 1:
+            mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
+                    "not allowed.")
+            raise ValueError(mesg)
+
+        t, c, k = tck.tck
+
+        # _impl.sproot expects the interpolation axis to be last, so roll it.
+        # NB: This transpose is a no-op if c is 1D.
+        sh = tuple(range(c.ndim))
+        c = c.transpose(sh[1:] + (0,))
+        return _impl.sproot((t, c, k), mest)
+    else:
+        return _impl.sproot(tck, mest)
+
+
+def spalde(x, tck):
+    """
+    Evaluate all derivatives of a B-spline.
+
+    Given the knots and coefficients of a cubic B-spline compute all
+    derivatives up to order k at a point (or set of points).
+
+    Parameters
+    ----------
+    x : array_like
+        A point or a set of points at which to evaluate the derivatives.
+        Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
+    tck : tuple
+        A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
+        coefficients, and the degree of the spline (see `splev`).
+
+    Returns
+    -------
+    results : {ndarray, list of ndarrays}
+        An array (or a list of arrays) containing all derivatives
+        up to order k inclusive for each point `x`.
+
+    See Also
+    --------
+    splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
+    BSpline
+
+    References
+    ----------
+    .. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
+       6 (1972) 50-62.
+    .. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
+       applics 10 (1972) 134-149.
+    .. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
+       Numerical Analysis, Oxford University Press, 1993.
+
+    Examples
+    --------
+    Examples are given :ref:`in the tutorial `.
+
+    """
+    if isinstance(tck, BSpline):
+        raise TypeError("spalde does not accept BSpline instances.")
+    else:
+        return _impl.spalde(x, tck)
+
+
+def insert(x, tck, m=1, per=0):
+    """
+    Insert knots into a B-spline.
+
+    Given the knots and coefficients of a B-spline representation, create a
+    new B-spline with a knot inserted `m` times at point `x`.
+    This is a wrapper around the FORTRAN routine insert of FITPACK.
+
+    Parameters
+    ----------
+    x (u) : array_like
+        A 1-D point at which to insert a new knot(s).  If `tck` was returned
+        from ``splprep``, then the parameter values, u should be given.
+    tck : a `BSpline` instance or a tuple
+        If tuple, then it is expected to be a tuple (t,c,k) containing
+        the vector of knots, the B-spline coefficients, and the degree of
+        the spline.
+    m : int, optional
+        The number of times to insert the given knot (its multiplicity).
+        Default is 1.
+    per : int, optional
+        If non-zero, the input spline is considered periodic.
+
+    Returns
+    -------
+    BSpline instance or a tuple
+        A new B-spline with knots t, coefficients c, and degree k.
+        ``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
+        In case of a periodic spline (``per != 0``) there must be
+        either at least k interior knots t(j) satisfying ``t(k+1)>> from scipy.interpolate import splrep, insert
+    >>> import numpy as np
+    >>> x = np.linspace(0, 10, 5)
+    >>> y = np.sin(x)
+    >>> tck = splrep(x, y)
+    >>> tck[0]
+    array([ 0.,  0.,  0.,  0.,  5., 10., 10., 10., 10.])
+
+    A knot is inserted:
+
+    >>> tck_inserted = insert(3, tck)
+    >>> tck_inserted[0]
+    array([ 0.,  0.,  0.,  0.,  3.,  5., 10., 10., 10., 10.])
+
+    Some knots are inserted:
+
+    >>> tck_inserted2 = insert(8, tck, m=3)
+    >>> tck_inserted2[0]
+    array([ 0.,  0.,  0.,  0.,  5.,  8.,  8.,  8., 10., 10., 10., 10.])
+
+    """
+    if isinstance(tck, BSpline):
+
+        t, c, k = tck.tck
+
+        # FITPACK expects the interpolation axis to be last, so roll it over
+        # NB: if c array is 1D, transposes are no-ops
+        sh = tuple(range(c.ndim))
+        c = c.transpose(sh[1:] + (0,))
+        t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
+
+        # and roll the last axis back
+        c_ = np.asarray(c_)
+        c_ = c_.transpose((sh[-1],) + sh[:-1])
+        return BSpline(t_, c_, k_)
+    else:
+        return _impl.insert(x, tck, m, per)
+
+
+def splder(tck, n=1):
+    """
+    Compute the spline representation of the derivative of a given spline
+
+    Parameters
+    ----------
+    tck : BSpline instance or a tuple of (t, c, k)
+        Spline whose derivative to compute
+    n : int, optional
+        Order of derivative to evaluate. Default: 1
+
+    Returns
+    -------
+    `BSpline` instance or tuple
+        Spline of order k2=k-n representing the derivative
+        of the input spline.
+        A tuple is returned iff the input argument `tck` is a tuple, otherwise
+        a BSpline object is constructed and returned.
+
+    Notes
+    -----
+
+    .. versionadded:: 0.13.0
+
+    See Also
+    --------
+    splantider, splev, spalde
+    BSpline
+
+    Examples
+    --------
+    This can be used for finding maxima of a curve:
+
+    >>> from scipy.interpolate import splrep, splder, sproot
+    >>> import numpy as np
+    >>> x = np.linspace(0, 10, 70)
+    >>> y = np.sin(x)
+    >>> spl = splrep(x, y, k=4)
+
+    Now, differentiate the spline and find the zeros of the
+    derivative. (NB: `sproot` only works for order 3 splines, so we
+    fit an order 4 spline):
+
+    >>> dspl = splder(spl)
+    >>> sproot(dspl) / np.pi
+    array([ 0.50000001,  1.5       ,  2.49999998])
+
+    This agrees well with roots :math:`\\pi/2 + n\\pi` of
+    :math:`\\cos(x) = \\sin'(x)`.
+
+    """
+    if isinstance(tck, BSpline):
+        return tck.derivative(n)
+    else:
+        return _impl.splder(tck, n)
+
+
+def splantider(tck, n=1):
+    """
+    Compute the spline for the antiderivative (integral) of a given spline.
+
+    Parameters
+    ----------
+    tck : BSpline instance or a tuple of (t, c, k)
+        Spline whose antiderivative to compute
+    n : int, optional
+        Order of antiderivative to evaluate. Default: 1
+
+    Returns
+    -------
+    BSpline instance or a tuple of (t2, c2, k2)
+        Spline of order k2=k+n representing the antiderivative of the input
+        spline.
+        A tuple is returned iff the input argument `tck` is a tuple, otherwise
+        a BSpline object is constructed and returned.
+
+    See Also
+    --------
+    splder, splev, spalde
+    BSpline
+
+    Notes
+    -----
+    The `splder` function is the inverse operation of this function.
+    Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
+    rounding error.
+
+    .. versionadded:: 0.13.0
+
+    Examples
+    --------
+    >>> from scipy.interpolate import splrep, splder, splantider, splev
+    >>> import numpy as np
+    >>> x = np.linspace(0, np.pi/2, 70)
+    >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
+    >>> spl = splrep(x, y)
+
+    The derivative is the inverse operation of the antiderivative,
+    although some floating point error accumulates:
+
+    >>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
+    (array(2.1565429877197317), array(2.1565429877201865))
+
+    Antiderivative can be used to evaluate definite integrals:
+
+    >>> ispl = splantider(spl)
+    >>> splev(np.pi/2, ispl) - splev(0, ispl)
+    2.2572053588768486
+
+    This is indeed an approximation to the complete elliptic integral
+    :math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
+
+    >>> from scipy.special import ellipk
+    >>> ellipk(0.8)
+    2.2572053268208538
+
+    """
+    if isinstance(tck, BSpline):
+        return tck.antiderivative(n)
+    else:
+        return _impl.splantider(tck, n)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_interpnd_info.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_interpnd_info.py
new file mode 100644
index 00000000..3537c95e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_interpnd_info.py
@@ -0,0 +1,37 @@
+"""
+Here we perform some symbolic computations required for the N-D
+interpolation routines in `interpnd.pyx`.
+
+"""
+from sympy import symbols, binomial, Matrix
+
+
+def _estimate_gradients_2d_global():
+
+    #
+    # Compute
+    #
+    #
+
+    f1, f2, df1, df2, x = symbols(['f1', 'f2', 'df1', 'df2', 'x'])
+    c = [f1, (df1 + 3*f1)/3, (df2 + 3*f2)/3, f2]
+
+    w = 0
+    for k in range(4):
+        w += binomial(3, k) * c[k] * x**k*(1-x)**(3-k)
+
+    wpp = w.diff(x, 2).expand()
+    intwpp2 = (wpp**2).integrate((x, 0, 1)).expand()
+
+    A = Matrix([[intwpp2.coeff(df1**2), intwpp2.coeff(df1*df2)/2],
+                [intwpp2.coeff(df1*df2)/2, intwpp2.coeff(df2**2)]])
+
+    B = Matrix([[intwpp2.coeff(df1).subs(df2, 0)],
+                [intwpp2.coeff(df2).subs(df1, 0)]]) / 2
+
+    print("A")
+    print(A)
+    print("B")
+    print(B)
+    print("solution")
+    print(A.inv() * B)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_interpolate.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_interpolate.py
new file mode 100644
index 00000000..ca5ad798
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_interpolate.py
@@ -0,0 +1,2462 @@
+__all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly']
+
+
+import numpy as np
+from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
+                   ravel, poly1d, asarray, intp)
+
+import scipy.special as spec
+from scipy.special import comb
+from scipy._lib._util import prod
+
+from . import _fitpack_py
+from . import dfitpack
+from . import _fitpack
+from ._polyint import _Interpolator1D
+from . import _ppoly
+from ._fitpack2 import RectBivariateSpline
+from .interpnd import _ndim_coords_from_arrays
+from ._bsplines import make_interp_spline, BSpline
+
+
+def lagrange(x, w):
+    r"""
+    Return a Lagrange interpolating polynomial.
+
+    Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
+    polynomial through the points ``(x, w)``.
+
+    Warning: This implementation is numerically unstable. Do not expect to
+    be able to use more than about 20 points even if they are chosen optimally.
+
+    Parameters
+    ----------
+    x : array_like
+        `x` represents the x-coordinates of a set of datapoints.
+    w : array_like
+        `w` represents the y-coordinates of a set of datapoints, i.e., f(`x`).
+
+    Returns
+    -------
+    lagrange : `numpy.poly1d` instance
+        The Lagrange interpolating polynomial.
+
+    Examples
+    --------
+    Interpolate :math:`f(x) = x^3` by 3 points.
+
+    >>> import numpy as np
+    >>> from scipy.interpolate import lagrange
+    >>> x = np.array([0, 1, 2])
+    >>> y = x**3
+    >>> poly = lagrange(x, y)
+
+    Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
+    it is given by
+
+    .. math::
+
+        \begin{aligned}
+            L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
+                 &= x (-2 + 3x)
+        \end{aligned}
+
+    >>> from numpy.polynomial.polynomial import Polynomial
+    >>> Polynomial(poly.coef[::-1]).coef
+    array([ 0., -2.,  3.])
+
+    >>> import matplotlib.pyplot as plt
+    >>> x_new = np.arange(0, 2.1, 0.1)
+    >>> plt.scatter(x, y, label='data')
+    >>> plt.plot(x_new, Polynomial(poly.coef[::-1])(x_new), label='Polynomial')
+    >>> plt.plot(x_new, 3*x_new**2 - 2*x_new + 0*x_new,
+    ...          label=r"$3 x^2 - 2 x$", linestyle='-.')
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+
+    M = len(x)
+    p = poly1d(0.0)
+    for j in range(M):
+        pt = poly1d(w[j])
+        for k in range(M):
+            if k == j:
+                continue
+            fac = x[j]-x[k]
+            pt *= poly1d([1.0, -x[k]])/fac
+        p += pt
+    return p
+
+
+# !! Need to find argument for keeping initialize. If it isn't
+# !! found, get rid of it!
+
+
+dep_mesg = """\
+`interp2d` is deprecated in SciPy 1.10 and will be removed in SciPy 1.12.0.
+
+For legacy code, nearly bug-for-bug compatible replacements are
+`RectBivariateSpline` on regular grids, and `bisplrep`/`bisplev` for
+scattered 2D data.
+
+In new code, for regular grids use `RegularGridInterpolator` instead.
+For scattered data, prefer `LinearNDInterpolator` or
+`CloughTocher2DInterpolator`.
+
+For more details see
+`https://gist.github.com/ev-br/8544371b40f414b7eaf3fe6217209bff`
+"""
+
+class interp2d:
+    """
+    interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
+             fill_value=None)
+
+    .. deprecated:: 1.10.0
+
+        `interp2d` is deprecated in SciPy 1.10 and will be removed in SciPy
+        1.12.0.
+
+        For legacy code, nearly bug-for-bug compatible replacements are
+        `RectBivariateSpline` on regular grids, and `bisplrep`/`bisplev` for
+        scattered 2D data.
+
+        In new code, for regular grids use `RegularGridInterpolator` instead.
+        For scattered data, prefer `LinearNDInterpolator` or
+        `CloughTocher2DInterpolator`.
+
+        For more details see
+        `https://gist.github.com/ev-br/8544371b40f414b7eaf3fe6217209bff`
+
+
+    Interpolate over a 2-D grid.
+
+    `x`, `y` and `z` are arrays of values used to approximate some function
+    f: ``z = f(x, y)`` which returns a scalar value `z`. This class returns a
+    function whose call method uses spline interpolation to find the value
+    of new points.
+
+    If `x` and `y` represent a regular grid, consider using
+    `RectBivariateSpline`.
+
+    If `z` is a vector value, consider using `interpn`.
+
+    Note that calling `interp2d` with NaNs present in input values, or with
+    decreasing values in `x` an `y` results in undefined behaviour.
+
+    Methods
+    -------
+    __call__
+
+    Parameters
+    ----------
+    x, y : array_like
+        Arrays defining the data point coordinates.
+        The data point coordinates need to be sorted by increasing order.
+
+        If the points lie on a regular grid, `x` can specify the column
+        coordinates and `y` the row coordinates, for example::
+
+          >>> x = [0,1,2];  y = [0,3]; z = [[1,2,3], [4,5,6]]
+
+        Otherwise, `x` and `y` must specify the full coordinates for each
+        point, for example::
+
+          >>> x = [0,1,2,0,1,2];  y = [0,0,0,3,3,3]; z = [1,4,2,5,3,6]
+
+        If `x` and `y` are multidimensional, they are flattened before use.
+    z : array_like
+        The values of the function to interpolate at the data points. If
+        `z` is a multidimensional array, it is flattened before use assuming
+        Fortran-ordering (order='F').  The length of a flattened `z` array
+        is either len(`x`)*len(`y`) if `x` and `y` specify the column and
+        row coordinates or ``len(z) == len(x) == len(y)`` if `x` and `y`
+        specify coordinates for each point.
+    kind : {'linear', 'cubic', 'quintic'}, optional
+        The kind of spline interpolation to use. Default is 'linear'.
+    copy : bool, optional
+        If True, the class makes internal copies of x, y and z.
+        If False, references may be used. The default is to copy.
+    bounds_error : bool, optional
+        If True, when interpolated values are requested outside of the
+        domain of the input data (x,y), a ValueError is raised.
+        If False, then `fill_value` is used.
+    fill_value : number, optional
+        If provided, the value to use for points outside of the
+        interpolation domain. If omitted (None), values outside
+        the domain are extrapolated via nearest-neighbor extrapolation.
+
+    See Also
+    --------
+    RectBivariateSpline :
+        Much faster 2-D interpolation if your input data is on a grid
+    bisplrep, bisplev :
+        Spline interpolation based on FITPACK
+    BivariateSpline : a more recent wrapper of the FITPACK routines
+    interp1d : 1-D version of this function
+    RegularGridInterpolator : interpolation on a regular or rectilinear grid
+        in arbitrary dimensions.
+    interpn : Multidimensional interpolation on regular grids (wraps
+        `RegularGridInterpolator` and `RectBivariateSpline`).
+
+    Notes
+    -----
+    The minimum number of data points required along the interpolation
+    axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
+    quintic interpolation.
+
+    The interpolator is constructed by `bisplrep`, with a smoothing factor
+    of 0. If more control over smoothing is needed, `bisplrep` should be
+    used directly.
+
+    The coordinates of the data points to interpolate `xnew` and `ynew`
+    have to be sorted by ascending order.
+    `interp2d` is legacy and is not
+    recommended for use in new code. New code should use
+    `RegularGridInterpolator` instead.
+
+    Examples
+    --------
+    Construct a 2-D grid and interpolate on it:
+
+    >>> import numpy as np
+    >>> from scipy import interpolate
+    >>> x = np.arange(-5.01, 5.01, 0.25)
+    >>> y = np.arange(-5.01, 5.01, 0.25)
+    >>> xx, yy = np.meshgrid(x, y)
+    >>> z = np.sin(xx**2+yy**2)
+    >>> f = interpolate.interp2d(x, y, z, kind='cubic')
+
+    Now use the obtained interpolation function and plot the result:
+
+    >>> import matplotlib.pyplot as plt
+    >>> xnew = np.arange(-5.01, 5.01, 1e-2)
+    >>> ynew = np.arange(-5.01, 5.01, 1e-2)
+    >>> znew = f(xnew, ynew)
+    >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
+    >>> plt.show()
+    """
+
+    @np.deprecate(old_name='interp2d', message=dep_mesg)
+    def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
+                 fill_value=None):
+        x = ravel(x)
+        y = ravel(y)
+        z = asarray(z)
+
+        rectangular_grid = (z.size == len(x) * len(y))
+        if rectangular_grid:
+            if z.ndim == 2:
+                if z.shape != (len(y), len(x)):
+                    raise ValueError("When on a regular grid with x.size = m "
+                                     "and y.size = n, if z.ndim == 2, then z "
+                                     "must have shape (n, m)")
+            if not np.all(x[1:] >= x[:-1]):
+                j = np.argsort(x)
+                x = x[j]
+                z = z[:, j]
+            if not np.all(y[1:] >= y[:-1]):
+                j = np.argsort(y)
+                y = y[j]
+                z = z[j, :]
+            z = ravel(z.T)
+        else:
+            z = ravel(z)
+            if len(x) != len(y):
+                raise ValueError(
+                    "x and y must have equal lengths for non rectangular grid")
+            if len(z) != len(x):
+                raise ValueError(
+                    "Invalid length for input z for non rectangular grid")
+
+        interpolation_types = {'linear': 1, 'cubic': 3, 'quintic': 5}
+        try:
+            kx = ky = interpolation_types[kind]
+        except KeyError as e:
+            raise ValueError(
+                f"Unsupported interpolation type {repr(kind)}, must be "
+                f"either of {', '.join(map(repr, interpolation_types))}."
+            ) from e
+
+        if not rectangular_grid:
+            # TODO: surfit is really not meant for interpolation!
+            self.tck = _fitpack_py.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
+        else:
+            nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
+                x, y, z, None, None, None, None,
+                kx=kx, ky=ky, s=0.0)
+            self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
+                        kx, ky)
+
+        self.bounds_error = bounds_error
+        self.fill_value = fill_value
+        self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
+
+        self.x_min, self.x_max = np.amin(x), np.amax(x)
+        self.y_min, self.y_max = np.amin(y), np.amax(y)
+
+    @np.deprecate(old_name='interp2d', message=dep_mesg)
+    def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
+        """Interpolate the function.
+
+        Parameters
+        ----------
+        x : 1-D array
+            x-coordinates of the mesh on which to interpolate.
+        y : 1-D array
+            y-coordinates of the mesh on which to interpolate.
+        dx : int >= 0, < kx
+            Order of partial derivatives in x.
+        dy : int >= 0, < ky
+            Order of partial derivatives in y.
+        assume_sorted : bool, optional
+            If False, values of `x` and `y` can be in any order and they are
+            sorted first.
+            If True, `x` and `y` have to be arrays of monotonically
+            increasing values.
+
+        Returns
+        -------
+        z : 2-D array with shape (len(y), len(x))
+            The interpolated values.
+        """
+
+        x = atleast_1d(x)
+        y = atleast_1d(y)
+
+        if x.ndim != 1 or y.ndim != 1:
+            raise ValueError("x and y should both be 1-D arrays")
+
+        if not assume_sorted:
+            x = np.sort(x, kind="mergesort")
+            y = np.sort(y, kind="mergesort")
+
+        if self.bounds_error or self.fill_value is not None:
+            out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
+            out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
+
+            any_out_of_bounds_x = np.any(out_of_bounds_x)
+            any_out_of_bounds_y = np.any(out_of_bounds_y)
+
+        if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
+            raise ValueError("Values out of range; x must be in %r, y in %r"
+                             % ((self.x_min, self.x_max),
+                                (self.y_min, self.y_max)))
+
+        z = _fitpack_py.bisplev(x, y, self.tck, dx, dy)
+        z = atleast_2d(z)
+        z = transpose(z)
+
+        if self.fill_value is not None:
+            if any_out_of_bounds_x:
+                z[:, out_of_bounds_x] = self.fill_value
+            if any_out_of_bounds_y:
+                z[out_of_bounds_y, :] = self.fill_value
+
+        if len(z) == 1:
+            z = z[0]
+        return array(z)
+
+
+def _check_broadcast_up_to(arr_from, shape_to, name):
+    """Helper to check that arr_from broadcasts up to shape_to"""
+    shape_from = arr_from.shape
+    if len(shape_to) >= len(shape_from):
+        for t, f in zip(shape_to[::-1], shape_from[::-1]):
+            if f != 1 and f != t:
+                break
+        else:  # all checks pass, do the upcasting that we need later
+            if arr_from.size != 1 and arr_from.shape != shape_to:
+                arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
+            return arr_from.ravel()
+    # at least one check failed
+    raise ValueError('%s argument must be able to broadcast up '
+                     'to shape %s but had shape %s'
+                     % (name, shape_to, shape_from))
+
+
+def _do_extrapolate(fill_value):
+    """Helper to check if fill_value == "extrapolate" without warnings"""
+    return (isinstance(fill_value, str) and
+            fill_value == 'extrapolate')
+
+
+class interp1d(_Interpolator1D):
+    """
+    Interpolate a 1-D function.
+
+    `x` and `y` are arrays of values used to approximate some function f:
+    ``y = f(x)``. This class returns a function whose call method uses
+    interpolation to find the value of new points.
+
+    Parameters
+    ----------
+    x : (N,) array_like
+        A 1-D array of real values.
+    y : (...,N,...) array_like
+        A N-D array of real values. The length of `y` along the interpolation
+        axis must be equal to the length of `x`.
+    kind : str or int, optional
+        Specifies the kind of interpolation as a string or as an integer
+        specifying the order of the spline interpolator to use.
+        The string has to be one of 'linear', 'nearest', 'nearest-up', 'zero',
+        'slinear', 'quadratic', 'cubic', 'previous', or 'next'. 'zero',
+        'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
+        zeroth, first, second or third order; 'previous' and 'next' simply
+        return the previous or next value of the point; 'nearest-up' and
+        'nearest' differ when interpolating half-integers (e.g. 0.5, 1.5)
+        in that 'nearest-up' rounds up and 'nearest' rounds down. Default
+        is 'linear'.
+    axis : int, optional
+        Specifies the axis of `y` along which to interpolate.
+        Interpolation defaults to the last axis of `y`.
+    copy : bool, optional
+        If True, the class makes internal copies of x and y.
+        If False, references to `x` and `y` are used. The default is to copy.
+    bounds_error : bool, optional
+        If True, a ValueError is raised any time interpolation is attempted on
+        a value outside of the range of x (where extrapolation is
+        necessary). If False, out of bounds values are assigned `fill_value`.
+        By default, an error is raised unless ``fill_value="extrapolate"``.
+    fill_value : array-like or (array-like, array_like) or "extrapolate", optional
+        - if a ndarray (or float), this value will be used to fill in for
+          requested points outside of the data range. If not provided, then
+          the default is NaN. The array-like must broadcast properly to the
+          dimensions of the non-interpolation axes.
+        - If a two-element tuple, then the first element is used as a
+          fill value for ``x_new < x[0]`` and the second element is used for
+          ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
+          list or ndarray, regardless of shape) is taken to be a single
+          array-like argument meant to be used for both bounds as
+          ``below, above = fill_value, fill_value``. Using a two-element tuple
+          or ndarray requires ``bounds_error=False``.
+
+          .. versionadded:: 0.17.0
+        - If "extrapolate", then points outside the data range will be
+          extrapolated.
+
+          .. versionadded:: 0.17.0
+    assume_sorted : bool, optional
+        If False, values of `x` can be in any order and they are sorted first.
+        If True, `x` has to be an array of monotonically increasing values.
+
+    Attributes
+    ----------
+    fill_value
+
+    Methods
+    -------
+    __call__
+
+    See Also
+    --------
+    splrep, splev
+        Spline interpolation/smoothing based on FITPACK.
+    UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
+    interp2d : 2-D interpolation
+
+    Notes
+    -----
+    Calling `interp1d` with NaNs present in input values results in
+    undefined behaviour.
+
+    Input values `x` and `y` must be convertible to `float` values like
+    `int` or `float`.
+
+    If the values in `x` are not unique, the resulting behavior is
+    undefined and specific to the choice of `kind`, i.e., changing
+    `kind` will change the behavior for duplicates.
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy import interpolate
+    >>> x = np.arange(0, 10)
+    >>> y = np.exp(-x/3.0)
+    >>> f = interpolate.interp1d(x, y)
+
+    >>> xnew = np.arange(0, 9, 0.1)
+    >>> ynew = f(xnew)   # use interpolation function returned by `interp1d`
+    >>> plt.plot(x, y, 'o', xnew, ynew, '-')
+    >>> plt.show()
+    """
+
+    def __init__(self, x, y, kind='linear', axis=-1,
+                 copy=True, bounds_error=None, fill_value=np.nan,
+                 assume_sorted=False):
+        """ Initialize a 1-D linear interpolation class."""
+        _Interpolator1D.__init__(self, x, y, axis=axis)
+
+        self.bounds_error = bounds_error  # used by fill_value setter
+        self.copy = copy
+
+        if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
+            order = {'zero': 0, 'slinear': 1,
+                     'quadratic': 2, 'cubic': 3}[kind]
+            kind = 'spline'
+        elif isinstance(kind, int):
+            order = kind
+            kind = 'spline'
+        elif kind not in ('linear', 'nearest', 'nearest-up', 'previous',
+                          'next'):
+            raise NotImplementedError("%s is unsupported: Use fitpack "
+                                      "routines for other types." % kind)
+        x = array(x, copy=self.copy)
+        y = array(y, copy=self.copy)
+
+        if not assume_sorted:
+            ind = np.argsort(x, kind="mergesort")
+            x = x[ind]
+            y = np.take(y, ind, axis=axis)
+
+        if x.ndim != 1:
+            raise ValueError("the x array must have exactly one dimension.")
+        if y.ndim == 0:
+            raise ValueError("the y array must have at least one dimension.")
+
+        # Force-cast y to a floating-point type, if it's not yet one
+        if not issubclass(y.dtype.type, np.inexact):
+            y = y.astype(np.float_)
+
+        # Backward compatibility
+        self.axis = axis % y.ndim
+
+        # Interpolation goes internally along the first axis
+        self.y = y
+        self._y = self._reshape_yi(self.y)
+        self.x = x
+        del y, x  # clean up namespace to prevent misuse; use attributes
+        self._kind = kind
+
+        # Adjust to interpolation kind; store reference to *unbound*
+        # interpolation methods, in order to avoid circular references to self
+        # stored in the bound instance methods, and therefore delayed garbage
+        # collection.  See: https://docs.python.org/reference/datamodel.html
+        if kind in ('linear', 'nearest', 'nearest-up', 'previous', 'next'):
+            # Make a "view" of the y array that is rotated to the interpolation
+            # axis.
+            minval = 1
+            if kind == 'nearest':
+                # Do division before addition to prevent possible integer
+                # overflow
+                self._side = 'left'
+                self.x_bds = self.x / 2.0
+                self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
+
+                self._call = self.__class__._call_nearest
+            elif kind == 'nearest-up':
+                # Do division before addition to prevent possible integer
+                # overflow
+                self._side = 'right'
+                self.x_bds = self.x / 2.0
+                self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
+
+                self._call = self.__class__._call_nearest
+            elif kind == 'previous':
+                # Side for np.searchsorted and index for clipping
+                self._side = 'left'
+                self._ind = 0
+                # Move x by one floating point value to the left
+                self._x_shift = np.nextafter(self.x, -np.inf)
+                self._call = self.__class__._call_previousnext
+                if _do_extrapolate(fill_value):
+                    self._check_and_update_bounds_error_for_extrapolation()
+                    # assume y is sorted by x ascending order here.
+                    fill_value = (np.nan, np.take(self.y, -1, axis))
+            elif kind == 'next':
+                self._side = 'right'
+                self._ind = 1
+                # Move x by one floating point value to the right
+                self._x_shift = np.nextafter(self.x, np.inf)
+                self._call = self.__class__._call_previousnext
+                if _do_extrapolate(fill_value):
+                    self._check_and_update_bounds_error_for_extrapolation()
+                    # assume y is sorted by x ascending order here.
+                    fill_value = (np.take(self.y, 0, axis), np.nan)
+            else:
+                # Check if we can delegate to numpy.interp (2x-10x faster).
+                np_types = (np.float_, np.int_)
+                cond = self.x.dtype in np_types and self.y.dtype in np_types
+                cond = cond and self.y.ndim == 1
+                cond = cond and not _do_extrapolate(fill_value)
+
+                if cond:
+                    self._call = self.__class__._call_linear_np
+                else:
+                    self._call = self.__class__._call_linear
+        else:
+            minval = order + 1
+
+            rewrite_nan = False
+            xx, yy = self.x, self._y
+            if order > 1:
+                # Quadratic or cubic spline. If input contains even a single
+                # nan, then the output is all nans. We cannot just feed data
+                # with nans to make_interp_spline because it calls LAPACK.
+                # So, we make up a bogus x and y with no nans and use it
+                # to get the correct shape of the output, which we then fill
+                # with nans.
+                # For slinear or zero order spline, we just pass nans through.
+                mask = np.isnan(self.x)
+                if mask.any():
+                    sx = self.x[~mask]
+                    if sx.size == 0:
+                        raise ValueError("`x` array is all-nan")
+                    xx = np.linspace(np.nanmin(self.x),
+                                     np.nanmax(self.x),
+                                     len(self.x))
+                    rewrite_nan = True
+                if np.isnan(self._y).any():
+                    yy = np.ones_like(self._y)
+                    rewrite_nan = True
+
+            self._spline = make_interp_spline(xx, yy, k=order,
+                                              check_finite=False)
+            if rewrite_nan:
+                self._call = self.__class__._call_nan_spline
+            else:
+                self._call = self.__class__._call_spline
+
+        if len(self.x) < minval:
+            raise ValueError("x and y arrays must have at "
+                             "least %d entries" % minval)
+
+        self.fill_value = fill_value  # calls the setter, can modify bounds_err
+
+    @property
+    def fill_value(self):
+        """The fill value."""
+        # backwards compat: mimic a public attribute
+        return self._fill_value_orig
+
+    @fill_value.setter
+    def fill_value(self, fill_value):
+        # extrapolation only works for nearest neighbor and linear methods
+        if _do_extrapolate(fill_value):
+            self._check_and_update_bounds_error_for_extrapolation()
+            self._extrapolate = True
+        else:
+            broadcast_shape = (self.y.shape[:self.axis] +
+                               self.y.shape[self.axis + 1:])
+            if len(broadcast_shape) == 0:
+                broadcast_shape = (1,)
+            # it's either a pair (_below_range, _above_range) or a single value
+            # for both above and below range
+            if isinstance(fill_value, tuple) and len(fill_value) == 2:
+                below_above = [np.asarray(fill_value[0]),
+                               np.asarray(fill_value[1])]
+                names = ('fill_value (below)', 'fill_value (above)')
+                for ii in range(2):
+                    below_above[ii] = _check_broadcast_up_to(
+                        below_above[ii], broadcast_shape, names[ii])
+            else:
+                fill_value = np.asarray(fill_value)
+                below_above = [_check_broadcast_up_to(
+                    fill_value, broadcast_shape, 'fill_value')] * 2
+            self._fill_value_below, self._fill_value_above = below_above
+            self._extrapolate = False
+            if self.bounds_error is None:
+                self.bounds_error = True
+        # backwards compat: fill_value was a public attr; make it writeable
+        self._fill_value_orig = fill_value
+
+    def _check_and_update_bounds_error_for_extrapolation(self):
+        if self.bounds_error:
+            raise ValueError("Cannot extrapolate and raise "
+                             "at the same time.")
+        self.bounds_error = False
+
+    def _call_linear_np(self, x_new):
+        # Note that out-of-bounds values are taken care of in self._evaluate
+        return np.interp(x_new, self.x, self.y)
+
+    def _call_linear(self, x_new):
+        # 2. Find where in the original data, the values to interpolate
+        #    would be inserted.
+        #    Note: If x_new[n] == x[m], then m is returned by searchsorted.
+        x_new_indices = searchsorted(self.x, x_new)
+
+        # 3. Clip x_new_indices so that they are within the range of
+        #    self.x indices and at least 1. Removes mis-interpolation
+        #    of x_new[n] = x[0]
+        x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
+
+        # 4. Calculate the slope of regions that each x_new value falls in.
+        lo = x_new_indices - 1
+        hi = x_new_indices
+
+        x_lo = self.x[lo]
+        x_hi = self.x[hi]
+        y_lo = self._y[lo]
+        y_hi = self._y[hi]
+
+        # Note that the following two expressions rely on the specifics of the
+        # broadcasting semantics.
+        slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
+
+        # 5. Calculate the actual value for each entry in x_new.
+        y_new = slope*(x_new - x_lo)[:, None] + y_lo
+
+        return y_new
+
+    def _call_nearest(self, x_new):
+        """ Find nearest neighbor interpolated y_new = f(x_new)."""
+
+        # 2. Find where in the averaged data the values to interpolate
+        #    would be inserted.
+        #    Note: use side='left' (right) to searchsorted() to define the
+        #    halfway point to be nearest to the left (right) neighbor
+        x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)
+
+        # 3. Clip x_new_indices so that they are within the range of x indices.
+        x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
+
+        # 4. Calculate the actual value for each entry in x_new.
+        y_new = self._y[x_new_indices]
+
+        return y_new
+
+    def _call_previousnext(self, x_new):
+        """Use previous/next neighbor of x_new, y_new = f(x_new)."""
+
+        # 1. Get index of left/right value
+        x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
+
+        # 2. Clip x_new_indices so that they are within the range of x indices.
+        x_new_indices = x_new_indices.clip(1-self._ind,
+                                           len(self.x)-self._ind).astype(intp)
+
+        # 3. Calculate the actual value for each entry in x_new.
+        y_new = self._y[x_new_indices+self._ind-1]
+
+        return y_new
+
+    def _call_spline(self, x_new):
+        return self._spline(x_new)
+
+    def _call_nan_spline(self, x_new):
+        out = self._spline(x_new)
+        out[...] = np.nan
+        return out
+
+    def _evaluate(self, x_new):
+        # 1. Handle values in x_new that are outside of x. Throw error,
+        #    or return a list of mask array indicating the outofbounds values.
+        #    The behavior is set by the bounds_error variable.
+        x_new = asarray(x_new)
+        y_new = self._call(self, x_new)
+        if not self._extrapolate:
+            below_bounds, above_bounds = self._check_bounds(x_new)
+            if len(y_new) > 0:
+                # Note fill_value must be broadcast up to the proper size
+                # and flattened to work here
+                y_new[below_bounds] = self._fill_value_below
+                y_new[above_bounds] = self._fill_value_above
+        return y_new
+
+    def _check_bounds(self, x_new):
+        """Check the inputs for being in the bounds of the interpolated data.
+
+        Parameters
+        ----------
+        x_new : array
+
+        Returns
+        -------
+        out_of_bounds : bool array
+            The mask on x_new of values that are out of the bounds.
+        """
+
+        # If self.bounds_error is True, we raise an error if any x_new values
+        # fall outside the range of x. Otherwise, we return an array indicating
+        # which values are outside the boundary region.
+        below_bounds = x_new < self.x[0]
+        above_bounds = x_new > self.x[-1]
+
+        if self.bounds_error and below_bounds.any():
+            below_bounds_value = x_new[np.argmax(below_bounds)]
+            raise ValueError("A value ({}) in x_new is below "
+                             "the interpolation range's minimum value ({})."
+                             .format(below_bounds_value, self.x[0]))
+        if self.bounds_error and above_bounds.any():
+            above_bounds_value = x_new[np.argmax(above_bounds)]
+            raise ValueError("A value ({}) in x_new is above "
+                             "the interpolation range's maximum value ({})."
+                             .format(above_bounds_value, self.x[-1]))
+
+        # !! Should we emit a warning if some values are out of bounds?
+        # !! matlab does not.
+        return below_bounds, above_bounds
+
+
+class _PPolyBase:
+    """Base class for piecewise polynomials."""
+    __slots__ = ('c', 'x', 'extrapolate', 'axis')
+
+    def __init__(self, c, x, extrapolate=None, axis=0):
+        self.c = np.asarray(c)
+        self.x = np.ascontiguousarray(x, dtype=np.float64)
+
+        if extrapolate is None:
+            extrapolate = True
+        elif extrapolate != 'periodic':
+            extrapolate = bool(extrapolate)
+        self.extrapolate = extrapolate
+
+        if self.c.ndim < 2:
+            raise ValueError("Coefficients array must be at least "
+                             "2-dimensional.")
+
+        if not (0 <= axis < self.c.ndim - 1):
+            raise ValueError("axis=%s must be between 0 and %s" %
+                             (axis, self.c.ndim-1))
+
+        self.axis = axis
+        if axis != 0:
+            # move the interpolation axis to be the first one in self.c
+            # More specifically, the target shape for self.c is (k, m, ...),
+            # and axis !=0 means that we have c.shape (..., k, m, ...)
+            #                                               ^
+            #                                              axis
+            # So we roll two of them.
+            self.c = np.moveaxis(self.c, axis+1, 0)
+            self.c = np.moveaxis(self.c, axis+1, 0)
+
+        if self.x.ndim != 1:
+            raise ValueError("x must be 1-dimensional")
+        if self.x.size < 2:
+            raise ValueError("at least 2 breakpoints are needed")
+        if self.c.ndim < 2:
+            raise ValueError("c must have at least 2 dimensions")
+        if self.c.shape[0] == 0:
+            raise ValueError("polynomial must be at least of order 0")
+        if self.c.shape[1] != self.x.size-1:
+            raise ValueError("number of coefficients != len(x)-1")
+        dx = np.diff(self.x)
+        if not (np.all(dx >= 0) or np.all(dx <= 0)):
+            raise ValueError("`x` must be strictly increasing or decreasing.")
+
+        dtype = self._get_dtype(self.c.dtype)
+        self.c = np.ascontiguousarray(self.c, dtype=dtype)
+
+    def _get_dtype(self, dtype):
+        if np.issubdtype(dtype, np.complexfloating) \
+               or np.issubdtype(self.c.dtype, np.complexfloating):
+            return np.complex_
+        else:
+            return np.float_
+
+    @classmethod
+    def construct_fast(cls, c, x, extrapolate=None, axis=0):
+        """
+        Construct the piecewise polynomial without making checks.
+
+        Takes the same parameters as the constructor. Input arguments
+        ``c`` and ``x`` must be arrays of the correct shape and type. The
+        ``c`` array can only be of dtypes float and complex, and ``x``
+        array must have dtype float.
+        """
+        self = object.__new__(cls)
+        self.c = c
+        self.x = x
+        self.axis = axis
+        if extrapolate is None:
+            extrapolate = True
+        self.extrapolate = extrapolate
+        return self
+
+    def _ensure_c_contiguous(self):
+        """
+        c and x may be modified by the user. The Cython code expects
+        that they are C contiguous.
+        """
+        if not self.x.flags.c_contiguous:
+            self.x = self.x.copy()
+        if not self.c.flags.c_contiguous:
+            self.c = self.c.copy()
+
+    def extend(self, c, x):
+        """
+        Add additional breakpoints and coefficients to the polynomial.
+
+        Parameters
+        ----------
+        c : ndarray, size (k, m, ...)
+            Additional coefficients for polynomials in intervals. Note that
+            the first additional interval will be formed using one of the
+            ``self.x`` end points.
+        x : ndarray, size (m,)
+            Additional breakpoints. Must be sorted in the same order as
+            ``self.x`` and either to the right or to the left of the current
+            breakpoints.
+        """
+
+        c = np.asarray(c)
+        x = np.asarray(x)
+
+        if c.ndim < 2:
+            raise ValueError("invalid dimensions for c")
+        if x.ndim != 1:
+            raise ValueError("invalid dimensions for x")
+        if x.shape[0] != c.shape[1]:
+            raise ValueError("Shapes of x {} and c {} are incompatible"
+                             .format(x.shape, c.shape))
+        if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
+            raise ValueError("Shapes of c {} and self.c {} are incompatible"
+                             .format(c.shape, self.c.shape))
+
+        if c.size == 0:
+            return
+
+        dx = np.diff(x)
+        if not (np.all(dx >= 0) or np.all(dx <= 0)):
+            raise ValueError("`x` is not sorted.")
+
+        if self.x[-1] >= self.x[0]:
+            if not x[-1] >= x[0]:
+                raise ValueError("`x` is in the different order "
+                                 "than `self.x`.")
+
+            if x[0] >= self.x[-1]:
+                action = 'append'
+            elif x[-1] <= self.x[0]:
+                action = 'prepend'
+            else:
+                raise ValueError("`x` is neither on the left or on the right "
+                                 "from `self.x`.")
+        else:
+            if not x[-1] <= x[0]:
+                raise ValueError("`x` is in the different order "
+                                 "than `self.x`.")
+
+            if x[0] <= self.x[-1]:
+                action = 'append'
+            elif x[-1] >= self.x[0]:
+                action = 'prepend'
+            else:
+                raise ValueError("`x` is neither on the left or on the right "
+                                 "from `self.x`.")
+
+        dtype = self._get_dtype(c.dtype)
+
+        k2 = max(c.shape[0], self.c.shape[0])
+        c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
+                      dtype=dtype)
+
+        if action == 'append':
+            c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
+            c2[k2-c.shape[0]:, self.c.shape[1]:] = c
+            self.x = np.r_[self.x, x]
+        elif action == 'prepend':
+            c2[k2-self.c.shape[0]:, :c.shape[1]] = c
+            c2[k2-c.shape[0]:, c.shape[1]:] = self.c
+            self.x = np.r_[x, self.x]
+
+        self.c = c2
+
+    def __call__(self, x, nu=0, extrapolate=None):
+        """
+        Evaluate the piecewise polynomial or its derivative.
+
+        Parameters
+        ----------
+        x : array_like
+            Points to evaluate the interpolant at.
+        nu : int, optional
+            Order of derivative to evaluate. Must be non-negative.
+        extrapolate : {bool, 'periodic', None}, optional
+            If bool, determines whether to extrapolate to out-of-bounds points
+            based on first and last intervals, or to return NaNs.
+            If 'periodic', periodic extrapolation is used.
+            If None (default), use `self.extrapolate`.
+
+        Returns
+        -------
+        y : array_like
+            Interpolated values. Shape is determined by replacing
+            the interpolation axis in the original array with the shape of x.
+
+        Notes
+        -----
+        Derivatives are evaluated piecewise for each polynomial
+        segment, even if the polynomial is not differentiable at the
+        breakpoints. The polynomial intervals are considered half-open,
+        ``[a, b)``, except for the last interval which is closed
+        ``[a, b]``.
+        """
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+        x = np.asarray(x)
+        x_shape, x_ndim = x.shape, x.ndim
+        x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
+
+        # With periodic extrapolation we map x to the segment
+        # [self.x[0], self.x[-1]].
+        if extrapolate == 'periodic':
+            x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
+            extrapolate = False
+
+        out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
+        self._ensure_c_contiguous()
+        self._evaluate(x, nu, extrapolate, out)
+        out = out.reshape(x_shape + self.c.shape[2:])
+        if self.axis != 0:
+            # transpose to move the calculated values to the interpolation axis
+            l = list(range(out.ndim))
+            l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
+            out = out.transpose(l)
+        return out
+
+
+class PPoly(_PPolyBase):
+    """
+    Piecewise polynomial in terms of coefficients and breakpoints
+
+    The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
+    local power basis::
+
+        S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
+
+    where ``k`` is the degree of the polynomial.
+
+    Parameters
+    ----------
+    c : ndarray, shape (k, m, ...)
+        Polynomial coefficients, order `k` and `m` intervals.
+    x : ndarray, shape (m+1,)
+        Polynomial breakpoints. Must be sorted in either increasing or
+        decreasing order.
+    extrapolate : bool or 'periodic', optional
+        If bool, determines whether to extrapolate to out-of-bounds points
+        based on first and last intervals, or to return NaNs. If 'periodic',
+        periodic extrapolation is used. Default is True.
+    axis : int, optional
+        Interpolation axis. Default is zero.
+
+    Attributes
+    ----------
+    x : ndarray
+        Breakpoints.
+    c : ndarray
+        Coefficients of the polynomials. They are reshaped
+        to a 3-D array with the last dimension representing
+        the trailing dimensions of the original coefficient array.
+    axis : int
+        Interpolation axis.
+
+    Methods
+    -------
+    __call__
+    derivative
+    antiderivative
+    integrate
+    solve
+    roots
+    extend
+    from_spline
+    from_bernstein_basis
+    construct_fast
+
+    See also
+    --------
+    BPoly : piecewise polynomials in the Bernstein basis
+
+    Notes
+    -----
+    High-order polynomials in the power basis can be numerically
+    unstable. Precision problems can start to appear for orders
+    larger than 20-30.
+    """
+
+    def _evaluate(self, x, nu, extrapolate, out):
+        _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+                        self.x, x, nu, bool(extrapolate), out)
+
+    def derivative(self, nu=1):
+        """
+        Construct a new piecewise polynomial representing the derivative.
+
+        Parameters
+        ----------
+        nu : int, optional
+            Order of derivative to evaluate. Default is 1, i.e., compute the
+            first derivative. If negative, the antiderivative is returned.
+
+        Returns
+        -------
+        pp : PPoly
+            Piecewise polynomial of order k2 = k - n representing the derivative
+            of this polynomial.
+
+        Notes
+        -----
+        Derivatives are evaluated piecewise for each polynomial
+        segment, even if the polynomial is not differentiable at the
+        breakpoints. The polynomial intervals are considered half-open,
+        ``[a, b)``, except for the last interval which is closed
+        ``[a, b]``.
+        """
+        if nu < 0:
+            return self.antiderivative(-nu)
+
+        # reduce order
+        if nu == 0:
+            c2 = self.c.copy()
+        else:
+            c2 = self.c[:-nu, :].copy()
+
+        if c2.shape[0] == 0:
+            # derivative of order 0 is zero
+            c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
+
+        # multiply by the correct rising factorials
+        factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
+        c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
+
+        # construct a compatible polynomial
+        return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
+
+    def antiderivative(self, nu=1):
+        """
+        Construct a new piecewise polynomial representing the antiderivative.
+
+        Antiderivative is also the indefinite integral of the function,
+        and derivative is its inverse operation.
+
+        Parameters
+        ----------
+        nu : int, optional
+            Order of antiderivative to evaluate. Default is 1, i.e., compute
+            the first integral. If negative, the derivative is returned.
+
+        Returns
+        -------
+        pp : PPoly
+            Piecewise polynomial of order k2 = k + n representing
+            the antiderivative of this polynomial.
+
+        Notes
+        -----
+        The antiderivative returned by this function is continuous and
+        continuously differentiable to order n-1, up to floating point
+        rounding error.
+
+        If antiderivative is computed and ``self.extrapolate='periodic'``,
+        it will be set to False for the returned instance. This is done because
+        the antiderivative is no longer periodic and its correct evaluation
+        outside of the initially given x interval is difficult.
+        """
+        if nu <= 0:
+            return self.derivative(-nu)
+
+        c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
+                     dtype=self.c.dtype)
+        c[:-nu] = self.c
+
+        # divide by the correct rising factorials
+        factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
+        c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
+
+        # fix continuity of added degrees of freedom
+        self._ensure_c_contiguous()
+        _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
+                              self.x, nu - 1)
+
+        if self.extrapolate == 'periodic':
+            extrapolate = False
+        else:
+            extrapolate = self.extrapolate
+
+        # construct a compatible polynomial
+        return self.construct_fast(c, self.x, extrapolate, self.axis)
+
+    def integrate(self, a, b, extrapolate=None):
+        """
+        Compute a definite integral over a piecewise polynomial.
+
+        Parameters
+        ----------
+        a : float
+            Lower integration bound
+        b : float
+            Upper integration bound
+        extrapolate : {bool, 'periodic', None}, optional
+            If bool, determines whether to extrapolate to out-of-bounds points
+            based on first and last intervals, or to return NaNs.
+            If 'periodic', periodic extrapolation is used.
+            If None (default), use `self.extrapolate`.
+
+        Returns
+        -------
+        ig : array_like
+            Definite integral of the piecewise polynomial over [a, b]
+        """
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+
+        # Swap integration bounds if needed
+        sign = 1
+        if b < a:
+            a, b = b, a
+            sign = -1
+
+        range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
+        self._ensure_c_contiguous()
+
+        # Compute the integral.
+        if extrapolate == 'periodic':
+            # Split the integral into the part over period (can be several
+            # of them) and the remaining part.
+
+            xs, xe = self.x[0], self.x[-1]
+            period = xe - xs
+            interval = b - a
+            n_periods, left = divmod(interval, period)
+
+            if n_periods > 0:
+                _ppoly.integrate(
+                    self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+                    self.x, xs, xe, False, out=range_int)
+                range_int *= n_periods
+            else:
+                range_int.fill(0)
+
+            # Map a to [xs, xe], b is always a + left.
+            a = xs + (a - xs) % period
+            b = a + left
+
+            # If b <= xe then we need to integrate over [a, b], otherwise
+            # over [a, xe] and from xs to what is remained.
+            remainder_int = np.empty_like(range_int)
+            if b <= xe:
+                _ppoly.integrate(
+                    self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+                    self.x, a, b, False, out=remainder_int)
+                range_int += remainder_int
+            else:
+                _ppoly.integrate(
+                    self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+                    self.x, a, xe, False, out=remainder_int)
+                range_int += remainder_int
+
+                _ppoly.integrate(
+                    self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+                    self.x, xs, xs + left + a - xe, False, out=remainder_int)
+                range_int += remainder_int
+        else:
+            _ppoly.integrate(
+                self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+                self.x, a, b, bool(extrapolate), out=range_int)
+
+        # Return
+        range_int *= sign
+        return range_int.reshape(self.c.shape[2:])
+
+    def solve(self, y=0., discontinuity=True, extrapolate=None):
+        """
+        Find real solutions of the equation ``pp(x) == y``.
+
+        Parameters
+        ----------
+        y : float, optional
+            Right-hand side. Default is zero.
+        discontinuity : bool, optional
+            Whether to report sign changes across discontinuities at
+            breakpoints as roots.
+        extrapolate : {bool, 'periodic', None}, optional
+            If bool, determines whether to return roots from the polynomial
+            extrapolated based on first and last intervals, 'periodic' works
+            the same as False. If None (default), use `self.extrapolate`.
+
+        Returns
+        -------
+        roots : ndarray
+            Roots of the polynomial(s).
+
+            If the PPoly object describes multiple polynomials, the
+            return value is an object array whose each element is an
+            ndarray containing the roots.
+
+        Notes
+        -----
+        This routine works only on real-valued polynomials.
+
+        If the piecewise polynomial contains sections that are
+        identically zero, the root list will contain the start point
+        of the corresponding interval, followed by a ``nan`` value.
+
+        If the polynomial is discontinuous across a breakpoint, and
+        there is a sign change across the breakpoint, this is reported
+        if the `discont` parameter is True.
+
+        Examples
+        --------
+
+        Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
+        ``[-2, 1], [1, 2]``:
+
+        >>> import numpy as np
+        >>> from scipy.interpolate import PPoly
+        >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
+        >>> pp.solve()
+        array([-1.,  1.])
+        """
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+
+        self._ensure_c_contiguous()
+
+        if np.issubdtype(self.c.dtype, np.complexfloating):
+            raise ValueError("Root finding is only for "
+                             "real-valued polynomials")
+
+        y = float(y)
+        r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+                              self.x, y, bool(discontinuity),
+                              bool(extrapolate))
+        if self.c.ndim == 2:
+            return r[0]
+        else:
+            r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
+            # this for-loop is equivalent to ``r2[...] = r``, but that's broken
+            # in NumPy 1.6.0
+            for ii, root in enumerate(r):
+                r2[ii] = root
+
+            return r2.reshape(self.c.shape[2:])
+
+    def roots(self, discontinuity=True, extrapolate=None):
+        """
+        Find real roots of the piecewise polynomial.
+
+        Parameters
+        ----------
+        discontinuity : bool, optional
+            Whether to report sign changes across discontinuities at
+            breakpoints as roots.
+        extrapolate : {bool, 'periodic', None}, optional
+            If bool, determines whether to return roots from the polynomial
+            extrapolated based on first and last intervals, 'periodic' works
+            the same as False. If None (default), use `self.extrapolate`.
+
+        Returns
+        -------
+        roots : ndarray
+            Roots of the polynomial(s).
+
+            If the PPoly object describes multiple polynomials, the
+            return value is an object array whose each element is an
+            ndarray containing the roots.
+
+        See Also
+        --------
+        PPoly.solve
+        """
+        return self.solve(0, discontinuity, extrapolate)
+
+    @classmethod
+    def from_spline(cls, tck, extrapolate=None):
+        """
+        Construct a piecewise polynomial from a spline
+
+        Parameters
+        ----------
+        tck
+            A spline, as returned by `splrep` or a BSpline object.
+        extrapolate : bool or 'periodic', optional
+            If bool, determines whether to extrapolate to out-of-bounds points
+            based on first and last intervals, or to return NaNs.
+            If 'periodic', periodic extrapolation is used. Default is True.
+
+        Examples
+        --------
+        Construct an interpolating spline and convert it to a `PPoly` instance 
+
+        >>> import numpy as np
+        >>> from scipy.interpolate import splrep, PPoly
+        >>> x = np.linspace(0, 1, 11)
+        >>> y = np.sin(2*np.pi*x)
+        >>> tck = splrep(x, y, s=0)
+        >>> p = PPoly.from_spline(tck)
+        >>> isinstance(p, PPoly)
+        True
+
+        Note that this function only supports 1D splines out of the box.
+
+        If the ``tck`` object represents a parametric spline (e.g. constructed
+        by `splprep` or a `BSpline` with ``c.ndim > 1``), you will need to loop
+        over the dimensions manually.
+
+        >>> from scipy.interpolate import splprep, splev
+        >>> t = np.linspace(0, 1, 11)
+        >>> x = np.sin(2*np.pi*t)
+        >>> y = np.cos(2*np.pi*t)
+        >>> (t, c, k), u = splprep([x, y], s=0)
+
+        Note that ``c`` is a list of two arrays of length 11.
+
+        >>> unew = np.arange(0, 1.01, 0.01)
+        >>> out = splev(unew, (t, c, k))
+
+        To convert this spline to the power basis, we convert each
+        component of the list of b-spline coefficients, ``c``, into the
+        corresponding cubic polynomial.
+
+        >>> polys = [PPoly.from_spline((t, cj, k)) for cj in c]
+        >>> polys[0].c.shape
+        (4, 14)
+
+        Note that the coefficients of the polynomials `polys` are in the
+        power basis and their dimensions reflect just that: here 4 is the order
+        (degree+1), and 14 is the number of intervals---which is nothing but
+        the length of the knot array of the original `tck` minus one.
+
+        Optionally, we can stack the components into a single `PPoly` along
+        the third dimension:
+
+        >>> cc = np.dstack([p.c for p in polys])    # has shape = (4, 14, 2)
+        >>> poly = PPoly(cc, polys[0].x)
+        >>> np.allclose(poly(unew).T,     # note the transpose to match `splev`
+        ...             out, atol=1e-15)
+        True
+
+        """
+        if isinstance(tck, BSpline):
+            t, c, k = tck.tck
+            if extrapolate is None:
+                extrapolate = tck.extrapolate
+        else:
+            t, c, k = tck
+
+        cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
+        for m in range(k, -1, -1):
+            y = _fitpack_py.splev(t[:-1], tck, der=m)
+            cvals[k - m, :] = y/spec.gamma(m+1)
+
+        return cls.construct_fast(cvals, t, extrapolate)
+
+    @classmethod
+    def from_bernstein_basis(cls, bp, extrapolate=None):
+        """
+        Construct a piecewise polynomial in the power basis
+        from a polynomial in Bernstein basis.
+
+        Parameters
+        ----------
+        bp : BPoly
+            A Bernstein basis polynomial, as created by BPoly
+        extrapolate : bool or 'periodic', optional
+            If bool, determines whether to extrapolate to out-of-bounds points
+            based on first and last intervals, or to return NaNs.
+            If 'periodic', periodic extrapolation is used. Default is True.
+        """
+        if not isinstance(bp, BPoly):
+            raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
+                            "Got %s instead." % type(bp))
+
+        dx = np.diff(bp.x)
+        k = bp.c.shape[0] - 1  # polynomial order
+
+        rest = (None,)*(bp.c.ndim-2)
+
+        c = np.zeros_like(bp.c)
+        for a in range(k+1):
+            factor = (-1)**a * comb(k, a) * bp.c[a]
+            for s in range(a, k+1):
+                val = comb(k-a, s-a) * (-1)**s
+                c[k-s] += factor * val / dx[(slice(None),)+rest]**s
+
+        if extrapolate is None:
+            extrapolate = bp.extrapolate
+
+        return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
+
+
+class BPoly(_PPolyBase):
+    """Piecewise polynomial in terms of coefficients and breakpoints.
+
+    The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
+    Bernstein polynomial basis::
+
+        S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
+
+    where ``k`` is the degree of the polynomial, and::
+
+        b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
+
+    with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
+    coefficient.
+
+    Parameters
+    ----------
+    c : ndarray, shape (k, m, ...)
+        Polynomial coefficients, order `k` and `m` intervals
+    x : ndarray, shape (m+1,)
+        Polynomial breakpoints. Must be sorted in either increasing or
+        decreasing order.
+    extrapolate : bool, optional
+        If bool, determines whether to extrapolate to out-of-bounds points
+        based on first and last intervals, or to return NaNs. If 'periodic',
+        periodic extrapolation is used. Default is True.
+    axis : int, optional
+        Interpolation axis. Default is zero.
+
+    Attributes
+    ----------
+    x : ndarray
+        Breakpoints.
+    c : ndarray
+        Coefficients of the polynomials. They are reshaped
+        to a 3-D array with the last dimension representing
+        the trailing dimensions of the original coefficient array.
+    axis : int
+        Interpolation axis.
+
+    Methods
+    -------
+    __call__
+    extend
+    derivative
+    antiderivative
+    integrate
+    construct_fast
+    from_power_basis
+    from_derivatives
+
+    See also
+    --------
+    PPoly : piecewise polynomials in the power basis
+
+    Notes
+    -----
+    Properties of Bernstein polynomials are well documented in the literature,
+    see for example [1]_ [2]_ [3]_.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
+
+    .. [2] Kenneth I. Joy, Bernstein polynomials,
+       http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
+
+    .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
+           vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
+
+    Examples
+    --------
+    >>> from scipy.interpolate import BPoly
+    >>> x = [0, 1]
+    >>> c = [[1], [2], [3]]
+    >>> bp = BPoly(c, x)
+
+    This creates a 2nd order polynomial
+
+    .. math::
+
+        B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
+             = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
+
+    """
+
+    def _evaluate(self, x, nu, extrapolate, out):
+        _ppoly.evaluate_bernstein(
+            self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
+            self.x, x, nu, bool(extrapolate), out)
+
+    def derivative(self, nu=1):
+        """
+        Construct a new piecewise polynomial representing the derivative.
+
+        Parameters
+        ----------
+        nu : int, optional
+            Order of derivative to evaluate. Default is 1, i.e., compute the
+            first derivative. If negative, the antiderivative is returned.
+
+        Returns
+        -------
+        bp : BPoly
+            Piecewise polynomial of order k - nu representing the derivative of
+            this polynomial.
+
+        """
+        if nu < 0:
+            return self.antiderivative(-nu)
+
+        if nu > 1:
+            bp = self
+            for k in range(nu):
+                bp = bp.derivative()
+            return bp
+
+        # reduce order
+        if nu == 0:
+            c2 = self.c.copy()
+        else:
+            # For a polynomial
+            #    B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
+            # we use the fact that
+            #   b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
+            # which leads to
+            #   B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
+            #
+            # finally, for an interval [y, y + dy] with dy != 1,
+            # we need to correct for an extra power of dy
+
+            rest = (None,)*(self.c.ndim-2)
+
+            k = self.c.shape[0] - 1
+            dx = np.diff(self.x)[(None, slice(None))+rest]
+            c2 = k * np.diff(self.c, axis=0) / dx
+
+        if c2.shape[0] == 0:
+            # derivative of order 0 is zero
+            c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
+
+        # construct a compatible polynomial
+        return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
+
+    def antiderivative(self, nu=1):
+        """
+        Construct a new piecewise polynomial representing the antiderivative.
+
+        Parameters
+        ----------
+        nu : int, optional
+            Order of antiderivative to evaluate. Default is 1, i.e., compute
+            the first integral. If negative, the derivative is returned.
+
+        Returns
+        -------
+        bp : BPoly
+            Piecewise polynomial of order k + nu representing the
+            antiderivative of this polynomial.
+
+        Notes
+        -----
+        If antiderivative is computed and ``self.extrapolate='periodic'``,
+        it will be set to False for the returned instance. This is done because
+        the antiderivative is no longer periodic and its correct evaluation
+        outside of the initially given x interval is difficult.
+        """
+        if nu <= 0:
+            return self.derivative(-nu)
+
+        if nu > 1:
+            bp = self
+            for k in range(nu):
+                bp = bp.antiderivative()
+            return bp
+
+        # Construct the indefinite integrals on individual intervals
+        c, x = self.c, self.x
+        k = c.shape[0]
+        c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
+
+        c2[1:, ...] = np.cumsum(c, axis=0) / k
+        delta = x[1:] - x[:-1]
+        c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
+
+        # Now fix continuity: on the very first interval, take the integration
+        # constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
+        # the integration constant is then equal to the jump of the `bp` at x_j.
+        # The latter is given by the coefficient of B_{n+1, n+1}
+        # *on the previous interval* (other B. polynomials are zero at the
+        # breakpoint). Finally, use the fact that BPs form a partition of unity.
+        c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
+
+        if self.extrapolate == 'periodic':
+            extrapolate = False
+        else:
+            extrapolate = self.extrapolate
+
+        return self.construct_fast(c2, x, extrapolate, axis=self.axis)
+
+    def integrate(self, a, b, extrapolate=None):
+        """
+        Compute a definite integral over a piecewise polynomial.
+
+        Parameters
+        ----------
+        a : float
+            Lower integration bound
+        b : float
+            Upper integration bound
+        extrapolate : {bool, 'periodic', None}, optional
+            Whether to extrapolate to out-of-bounds points based on first
+            and last intervals, or to return NaNs. If 'periodic', periodic
+            extrapolation is used. If None (default), use `self.extrapolate`.
+
+        Returns
+        -------
+        array_like
+            Definite integral of the piecewise polynomial over [a, b]
+
+        """
+        # XXX: can probably use instead the fact that
+        # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
+        ib = self.antiderivative()
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+
+        # ib.extrapolate shouldn't be 'periodic', it is converted to
+        # False for 'periodic. in antiderivative() call.
+        if extrapolate != 'periodic':
+            ib.extrapolate = extrapolate
+
+        if extrapolate == 'periodic':
+            # Split the integral into the part over period (can be several
+            # of them) and the remaining part.
+
+            # For simplicity and clarity convert to a <= b case.
+            if a <= b:
+                sign = 1
+            else:
+                a, b = b, a
+                sign = -1
+
+            xs, xe = self.x[0], self.x[-1]
+            period = xe - xs
+            interval = b - a
+            n_periods, left = divmod(interval, period)
+            res = n_periods * (ib(xe) - ib(xs))
+
+            # Map a and b to [xs, xe].
+            a = xs + (a - xs) % period
+            b = a + left
+
+            # If b <= xe then we need to integrate over [a, b], otherwise
+            # over [a, xe] and from xs to what is remained.
+            if b <= xe:
+                res += ib(b) - ib(a)
+            else:
+                res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
+
+            return sign * res
+        else:
+            return ib(b) - ib(a)
+
+    def extend(self, c, x):
+        k = max(self.c.shape[0], c.shape[0])
+        self.c = self._raise_degree(self.c, k - self.c.shape[0])
+        c = self._raise_degree(c, k - c.shape[0])
+        return _PPolyBase.extend(self, c, x)
+    extend.__doc__ = _PPolyBase.extend.__doc__
+
+    @classmethod
+    def from_power_basis(cls, pp, extrapolate=None):
+        """
+        Construct a piecewise polynomial in Bernstein basis
+        from a power basis polynomial.
+
+        Parameters
+        ----------
+        pp : PPoly
+            A piecewise polynomial in the power basis
+        extrapolate : bool or 'periodic', optional
+            If bool, determines whether to extrapolate to out-of-bounds points
+            based on first and last intervals, or to return NaNs.
+            If 'periodic', periodic extrapolation is used. Default is True.
+        """
+        if not isinstance(pp, PPoly):
+            raise TypeError(".from_power_basis only accepts PPoly instances. "
+                            "Got %s instead." % type(pp))
+
+        dx = np.diff(pp.x)
+        k = pp.c.shape[0] - 1   # polynomial order
+
+        rest = (None,)*(pp.c.ndim-2)
+
+        c = np.zeros_like(pp.c)
+        for a in range(k+1):
+            factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
+            for j in range(k-a, k+1):
+                c[j] += factor * comb(j, k-a)
+
+        if extrapolate is None:
+            extrapolate = pp.extrapolate
+
+        return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
+
+    @classmethod
+    def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
+        """Construct a piecewise polynomial in the Bernstein basis,
+        compatible with the specified values and derivatives at breakpoints.
+
+        Parameters
+        ----------
+        xi : array_like
+            sorted 1-D array of x-coordinates
+        yi : array_like or list of array_likes
+            ``yi[i][j]`` is the ``j``th derivative known at ``xi[i]``
+        orders : None or int or array_like of ints. Default: None.
+            Specifies the degree of local polynomials. If not None, some
+            derivatives are ignored.
+        extrapolate : bool or 'periodic', optional
+            If bool, determines whether to extrapolate to out-of-bounds points
+            based on first and last intervals, or to return NaNs.
+            If 'periodic', periodic extrapolation is used. Default is True.
+
+        Notes
+        -----
+        If ``k`` derivatives are specified at a breakpoint ``x``, the
+        constructed polynomial is exactly ``k`` times continuously
+        differentiable at ``x``, unless the ``order`` is provided explicitly.
+        In the latter case, the smoothness of the polynomial at
+        the breakpoint is controlled by the ``order``.
+
+        Deduces the number of derivatives to match at each end
+        from ``order`` and the number of derivatives available. If
+        possible it uses the same number of derivatives from
+        each end; if the number is odd it tries to take the
+        extra one from y2. In any case if not enough derivatives
+        are available at one end or another it draws enough to
+        make up the total from the other end.
+
+        If the order is too high and not enough derivatives are available,
+        an exception is raised.
+
+        Examples
+        --------
+
+        >>> from scipy.interpolate import BPoly
+        >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
+
+        Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
+        such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
+
+        >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
+
+        Creates a piecewise polynomial `f(x)`, such that
+        `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
+        Based on the number of derivatives provided, the order of the
+        local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
+        Notice that no restriction is imposed on the derivatives at
+        ``x = 1`` and ``x = 2``.
+
+        Indeed, the explicit form of the polynomial is::
+
+            f(x) = | x * (1 - x),  0 <= x < 1
+                   | 2 * (x - 1),  1 <= x <= 2
+
+        So that f'(1-0) = -1 and f'(1+0) = 2
+
+        """
+        xi = np.asarray(xi)
+        if len(xi) != len(yi):
+            raise ValueError("xi and yi need to have the same length")
+        if np.any(xi[1:] - xi[:1] <= 0):
+            raise ValueError("x coordinates are not in increasing order")
+
+        # number of intervals
+        m = len(xi) - 1
+
+        # global poly order is k-1, local orders are <=k and can vary
+        try:
+            k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
+        except TypeError as e:
+            raise ValueError(
+                "Using a 1-D array for y? Please .reshape(-1, 1)."
+            ) from e
+
+        if orders is None:
+            orders = [None] * m
+        else:
+            if isinstance(orders, (int, np.integer)):
+                orders = [orders] * m
+            k = max(k, max(orders))
+
+            if any(o <= 0 for o in orders):
+                raise ValueError("Orders must be positive.")
+
+        c = []
+        for i in range(m):
+            y1, y2 = yi[i], yi[i+1]
+            if orders[i] is None:
+                n1, n2 = len(y1), len(y2)
+            else:
+                n = orders[i]+1
+                n1 = min(n//2, len(y1))
+                n2 = min(n - n1, len(y2))
+                n1 = min(n - n2, len(y2))
+                if n1+n2 != n:
+                    mesg = ("Point %g has %d derivatives, point %g"
+                            " has %d derivatives, but order %d requested" % (
+                               xi[i], len(y1), xi[i+1], len(y2), orders[i]))
+                    raise ValueError(mesg)
+
+                if not (n1 <= len(y1) and n2 <= len(y2)):
+                    raise ValueError("`order` input incompatible with"
+                                     " length y1 or y2.")
+
+            b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
+                                                  y1[:n1], y2[:n2])
+            if len(b) < k:
+                b = BPoly._raise_degree(b, k - len(b))
+            c.append(b)
+
+        c = np.asarray(c)
+        return cls(c.swapaxes(0, 1), xi, extrapolate)
+
+    @staticmethod
+    def _construct_from_derivatives(xa, xb, ya, yb):
+        r"""Compute the coefficients of a polynomial in the Bernstein basis
+        given the values and derivatives at the edges.
+
+        Return the coefficients of a polynomial in the Bernstein basis
+        defined on ``[xa, xb]`` and having the values and derivatives at the
+        endpoints `xa` and `xb` as specified by `ya`` and `yb`.
+        The polynomial constructed is of the minimal possible degree, i.e.,
+        if the lengths of `ya` and `yb` are `na` and `nb`, the degree
+        of the polynomial is ``na + nb - 1``.
+
+        Parameters
+        ----------
+        xa : float
+            Left-hand end point of the interval
+        xb : float
+            Right-hand end point of the interval
+        ya : array_like
+            Derivatives at `xa`. `ya[0]` is the value of the function, and
+            `ya[i]` for ``i > 0`` is the value of the ``i``th derivative.
+        yb : array_like
+            Derivatives at `xb`.
+
+        Returns
+        -------
+        array
+            coefficient array of a polynomial having specified derivatives
+
+        Notes
+        -----
+        This uses several facts from life of Bernstein basis functions.
+        First of all,
+
+            .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
+
+        If B(x) is a linear combination of the form
+
+            .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
+
+        then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
+        Iterating the latter one, one finds for the q-th derivative
+
+            .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
+
+        with
+
+          .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
+
+        This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
+        `c_q` are found one by one by iterating `q = 0, ..., na`.
+
+        At ``x = xb`` it's the same with ``a = n - q``.
+
+        """
+        ya, yb = np.asarray(ya), np.asarray(yb)
+        if ya.shape[1:] != yb.shape[1:]:
+            raise ValueError('Shapes of ya {} and yb {} are incompatible'
+                             .format(ya.shape, yb.shape))
+
+        dta, dtb = ya.dtype, yb.dtype
+        if (np.issubdtype(dta, np.complexfloating) or
+               np.issubdtype(dtb, np.complexfloating)):
+            dt = np.complex_
+        else:
+            dt = np.float_
+
+        na, nb = len(ya), len(yb)
+        n = na + nb
+
+        c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
+
+        # compute coefficients of a polynomial degree na+nb-1
+        # walk left-to-right
+        for q in range(0, na):
+            c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
+            for j in range(0, q):
+                c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
+
+        # now walk right-to-left
+        for q in range(0, nb):
+            c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
+            for j in range(0, q):
+                c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
+
+        return c
+
+    @staticmethod
+    def _raise_degree(c, d):
+        r"""Raise a degree of a polynomial in the Bernstein basis.
+
+        Given the coefficients of a polynomial degree `k`, return (the
+        coefficients of) the equivalent polynomial of degree `k+d`.
+
+        Parameters
+        ----------
+        c : array_like
+            coefficient array, 1-D
+        d : integer
+
+        Returns
+        -------
+        array
+            coefficient array, 1-D array of length `c.shape[0] + d`
+
+        Notes
+        -----
+        This uses the fact that a Bernstein polynomial `b_{a, k}` can be
+        identically represented as a linear combination of polynomials of
+        a higher degree `k+d`:
+
+            .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
+                                 comb(d, j) / comb(k+d, a+j)
+
+        """
+        if d == 0:
+            return c
+
+        k = c.shape[0] - 1
+        out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
+
+        for a in range(c.shape[0]):
+            f = c[a] * comb(k, a)
+            for j in range(d+1):
+                out[a+j] += f * comb(d, j) / comb(k+d, a+j)
+        return out
+
+
+class NdPPoly:
+    """
+    Piecewise tensor product polynomial
+
+    The value at point ``xp = (x', y', z', ...)`` is evaluated by first
+    computing the interval indices `i` such that::
+
+        x[0][i[0]] <= x' < x[0][i[0]+1]
+        x[1][i[1]] <= y' < x[1][i[1]+1]
+        ...
+
+    and then computing::
+
+        S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
+                * (xp[0] - x[0][i[0]])**m0
+                * ...
+                * (xp[n] - x[n][i[n]])**mn
+                for m0 in range(k[0]+1)
+                ...
+                for mn in range(k[n]+1))
+
+    where ``k[j]`` is the degree of the polynomial in dimension j. This
+    representation is the piecewise multivariate power basis.
+
+    Parameters
+    ----------
+    c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
+        Polynomial coefficients, with polynomial order `kj` and
+        `mj+1` intervals for each dimension `j`.
+    x : ndim-tuple of ndarrays, shapes (mj+1,)
+        Polynomial breakpoints for each dimension. These must be
+        sorted in increasing order.
+    extrapolate : bool, optional
+        Whether to extrapolate to out-of-bounds points based on first
+        and last intervals, or to return NaNs. Default: True.
+
+    Attributes
+    ----------
+    x : tuple of ndarrays
+        Breakpoints.
+    c : ndarray
+        Coefficients of the polynomials.
+
+    Methods
+    -------
+    __call__
+    derivative
+    antiderivative
+    integrate
+    integrate_1d
+    construct_fast
+
+    See also
+    --------
+    PPoly : piecewise polynomials in 1D
+
+    Notes
+    -----
+    High-order polynomials in the power basis can be numerically
+    unstable.
+
+    """
+
+    def __init__(self, c, x, extrapolate=None):
+        self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
+        self.c = np.asarray(c)
+        if extrapolate is None:
+            extrapolate = True
+        self.extrapolate = bool(extrapolate)
+
+        ndim = len(self.x)
+        if any(v.ndim != 1 for v in self.x):
+            raise ValueError("x arrays must all be 1-dimensional")
+        if any(v.size < 2 for v in self.x):
+            raise ValueError("x arrays must all contain at least 2 points")
+        if c.ndim < 2*ndim:
+            raise ValueError("c must have at least 2*len(x) dimensions")
+        if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
+            raise ValueError("x-coordinates are not in increasing order")
+        if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
+            raise ValueError("x and c do not agree on the number of intervals")
+
+        dtype = self._get_dtype(self.c.dtype)
+        self.c = np.ascontiguousarray(self.c, dtype=dtype)
+
+    @classmethod
+    def construct_fast(cls, c, x, extrapolate=None):
+        """
+        Construct the piecewise polynomial without making checks.
+
+        Takes the same parameters as the constructor. Input arguments
+        ``c`` and ``x`` must be arrays of the correct shape and type.  The
+        ``c`` array can only be of dtypes float and complex, and ``x``
+        array must have dtype float.
+
+        """
+        self = object.__new__(cls)
+        self.c = c
+        self.x = x
+        if extrapolate is None:
+            extrapolate = True
+        self.extrapolate = extrapolate
+        return self
+
+    def _get_dtype(self, dtype):
+        if np.issubdtype(dtype, np.complexfloating) \
+               or np.issubdtype(self.c.dtype, np.complexfloating):
+            return np.complex_
+        else:
+            return np.float_
+
+    def _ensure_c_contiguous(self):
+        if not self.c.flags.c_contiguous:
+            self.c = self.c.copy()
+        if not isinstance(self.x, tuple):
+            self.x = tuple(self.x)
+
+    def __call__(self, x, nu=None, extrapolate=None):
+        """
+        Evaluate the piecewise polynomial or its derivative
+
+        Parameters
+        ----------
+        x : array-like
+            Points to evaluate the interpolant at.
+        nu : tuple, optional
+            Orders of derivatives to evaluate. Each must be non-negative.
+        extrapolate : bool, optional
+            Whether to extrapolate to out-of-bounds points based on first
+            and last intervals, or to return NaNs.
+
+        Returns
+        -------
+        y : array-like
+            Interpolated values. Shape is determined by replacing
+            the interpolation axis in the original array with the shape of x.
+
+        Notes
+        -----
+        Derivatives are evaluated piecewise for each polynomial
+        segment, even if the polynomial is not differentiable at the
+        breakpoints. The polynomial intervals are considered half-open,
+        ``[a, b)``, except for the last interval which is closed
+        ``[a, b]``.
+
+        """
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+        else:
+            extrapolate = bool(extrapolate)
+
+        ndim = len(self.x)
+
+        x = _ndim_coords_from_arrays(x)
+        x_shape = x.shape
+        x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
+
+        if nu is None:
+            nu = np.zeros((ndim,), dtype=np.intc)
+        else:
+            nu = np.asarray(nu, dtype=np.intc)
+            if nu.ndim != 1 or nu.shape[0] != ndim:
+                raise ValueError("invalid number of derivative orders nu")
+
+        dim1 = prod(self.c.shape[:ndim])
+        dim2 = prod(self.c.shape[ndim:2*ndim])
+        dim3 = prod(self.c.shape[2*ndim:])
+        ks = np.array(self.c.shape[:ndim], dtype=np.intc)
+
+        out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
+        self._ensure_c_contiguous()
+
+        _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
+                           self.x,
+                           ks,
+                           x,
+                           nu,
+                           bool(extrapolate),
+                           out)
+
+        return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
+
+    def _derivative_inplace(self, nu, axis):
+        """
+        Compute 1-D derivative along a selected dimension in-place
+        May result to non-contiguous c array.
+        """
+        if nu < 0:
+            return self._antiderivative_inplace(-nu, axis)
+
+        ndim = len(self.x)
+        axis = axis % ndim
+
+        # reduce order
+        if nu == 0:
+            # noop
+            return
+        else:
+            sl = [slice(None)]*ndim
+            sl[axis] = slice(None, -nu, None)
+            c2 = self.c[tuple(sl)]
+
+        if c2.shape[axis] == 0:
+            # derivative of order 0 is zero
+            shp = list(c2.shape)
+            shp[axis] = 1
+            c2 = np.zeros(shp, dtype=c2.dtype)
+
+        # multiply by the correct rising factorials
+        factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
+        sl = [None]*c2.ndim
+        sl[axis] = slice(None)
+        c2 *= factor[tuple(sl)]
+
+        self.c = c2
+
+    def _antiderivative_inplace(self, nu, axis):
+        """
+        Compute 1-D antiderivative along a selected dimension
+        May result to non-contiguous c array.
+        """
+        if nu <= 0:
+            return self._derivative_inplace(-nu, axis)
+
+        ndim = len(self.x)
+        axis = axis % ndim
+
+        perm = list(range(ndim))
+        perm[0], perm[axis] = perm[axis], perm[0]
+        perm = perm + list(range(ndim, self.c.ndim))
+
+        c = self.c.transpose(perm)
+
+        c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
+                     dtype=c.dtype)
+        c2[:-nu] = c
+
+        # divide by the correct rising factorials
+        factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
+        c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
+
+        # fix continuity of added degrees of freedom
+        perm2 = list(range(c2.ndim))
+        perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
+
+        c2 = c2.transpose(perm2)
+        c2 = c2.copy()
+        _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
+                              self.x[axis], nu-1)
+
+        c2 = c2.transpose(perm2)
+        c2 = c2.transpose(perm)
+
+        # Done
+        self.c = c2
+
+    def derivative(self, nu):
+        """
+        Construct a new piecewise polynomial representing the derivative.
+
+        Parameters
+        ----------
+        nu : ndim-tuple of int
+            Order of derivatives to evaluate for each dimension.
+            If negative, the antiderivative is returned.
+
+        Returns
+        -------
+        pp : NdPPoly
+            Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
+            representing the derivative of this polynomial.
+
+        Notes
+        -----
+        Derivatives are evaluated piecewise for each polynomial
+        segment, even if the polynomial is not differentiable at the
+        breakpoints. The polynomial intervals in each dimension are
+        considered half-open, ``[a, b)``, except for the last interval
+        which is closed ``[a, b]``.
+
+        """
+        p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
+
+        for axis, n in enumerate(nu):
+            p._derivative_inplace(n, axis)
+
+        p._ensure_c_contiguous()
+        return p
+
+    def antiderivative(self, nu):
+        """
+        Construct a new piecewise polynomial representing the antiderivative.
+
+        Antiderivative is also the indefinite integral of the function,
+        and derivative is its inverse operation.
+
+        Parameters
+        ----------
+        nu : ndim-tuple of int
+            Order of derivatives to evaluate for each dimension.
+            If negative, the derivative is returned.
+
+        Returns
+        -------
+        pp : PPoly
+            Piecewise polynomial of order k2 = k + n representing
+            the antiderivative of this polynomial.
+
+        Notes
+        -----
+        The antiderivative returned by this function is continuous and
+        continuously differentiable to order n-1, up to floating point
+        rounding error.
+
+        """
+        p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
+
+        for axis, n in enumerate(nu):
+            p._antiderivative_inplace(n, axis)
+
+        p._ensure_c_contiguous()
+        return p
+
+    def integrate_1d(self, a, b, axis, extrapolate=None):
+        r"""
+        Compute NdPPoly representation for one dimensional definite integral
+
+        The result is a piecewise polynomial representing the integral:
+
+        .. math::
+
+           p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
+
+        where the dimension integrated over is specified with the
+        `axis` parameter.
+
+        Parameters
+        ----------
+        a, b : float
+            Lower and upper bound for integration.
+        axis : int
+            Dimension over which to compute the 1-D integrals
+        extrapolate : bool, optional
+            Whether to extrapolate to out-of-bounds points based on first
+            and last intervals, or to return NaNs.
+
+        Returns
+        -------
+        ig : NdPPoly or array-like
+            Definite integral of the piecewise polynomial over [a, b].
+            If the polynomial was 1D, an array is returned,
+            otherwise, an NdPPoly object.
+
+        """
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+        else:
+            extrapolate = bool(extrapolate)
+
+        ndim = len(self.x)
+        axis = int(axis) % ndim
+
+        # reuse 1-D integration routines
+        c = self.c
+        swap = list(range(c.ndim))
+        swap.insert(0, swap[axis])
+        del swap[axis + 1]
+        swap.insert(1, swap[ndim + axis])
+        del swap[ndim + axis + 1]
+
+        c = c.transpose(swap)
+        p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
+                                 self.x[axis],
+                                 extrapolate=extrapolate)
+        out = p.integrate(a, b, extrapolate=extrapolate)
+
+        # Construct result
+        if ndim == 1:
+            return out.reshape(c.shape[2:])
+        else:
+            c = out.reshape(c.shape[2:])
+            x = self.x[:axis] + self.x[axis+1:]
+            return self.construct_fast(c, x, extrapolate=extrapolate)
+
+    def integrate(self, ranges, extrapolate=None):
+        """
+        Compute a definite integral over a piecewise polynomial.
+
+        Parameters
+        ----------
+        ranges : ndim-tuple of 2-tuples float
+            Sequence of lower and upper bounds for each dimension,
+            ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
+        extrapolate : bool, optional
+            Whether to extrapolate to out-of-bounds points based on first
+            and last intervals, or to return NaNs.
+
+        Returns
+        -------
+        ig : array_like
+            Definite integral of the piecewise polynomial over
+            [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
+
+        """
+
+        ndim = len(self.x)
+
+        if extrapolate is None:
+            extrapolate = self.extrapolate
+        else:
+            extrapolate = bool(extrapolate)
+
+        if not hasattr(ranges, '__len__') or len(ranges) != ndim:
+            raise ValueError("Range not a sequence of correct length")
+
+        self._ensure_c_contiguous()
+
+        # Reuse 1D integration routine
+        c = self.c
+        for n, (a, b) in enumerate(ranges):
+            swap = list(range(c.ndim))
+            swap.insert(1, swap[ndim - n])
+            del swap[ndim - n + 1]
+
+            c = c.transpose(swap)
+
+            p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
+            out = p.integrate(a, b, extrapolate=extrapolate)
+            c = out.reshape(c.shape[2:])
+
+        return c
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_ndgriddata.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_ndgriddata.py
new file mode 100644
index 00000000..85ad1c49
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_ndgriddata.py
@@ -0,0 +1,273 @@
+"""
+Convenience interface to N-D interpolation
+
+.. versionadded:: 0.9
+
+"""
+import numpy as np
+from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
+     CloughTocher2DInterpolator, _ndim_coords_from_arrays
+from scipy.spatial import cKDTree
+
+__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
+           'CloughTocher2DInterpolator']
+
+#------------------------------------------------------------------------------
+# Nearest-neighbor interpolation
+#------------------------------------------------------------------------------
+
+
+class NearestNDInterpolator(NDInterpolatorBase):
+    """NearestNDInterpolator(x, y).
+
+    Nearest-neighbor interpolation in N > 1 dimensions.
+
+    .. versionadded:: 0.9
+
+    Methods
+    -------
+    __call__
+
+    Parameters
+    ----------
+    x : (Npoints, Ndims) ndarray of floats
+        Data point coordinates.
+    y : (Npoints,) ndarray of float or complex
+        Data values.
+    rescale : boolean, optional
+        Rescale points to unit cube before performing interpolation.
+        This is useful if some of the input dimensions have
+        incommensurable units and differ by many orders of magnitude.
+
+        .. versionadded:: 0.14.0
+    tree_options : dict, optional
+        Options passed to the underlying ``cKDTree``.
+
+        .. versionadded:: 0.17.0
+
+
+    Notes
+    -----
+    Uses ``scipy.spatial.cKDTree``
+
+    Examples
+    --------
+    We can interpolate values on a 2D plane:
+
+    >>> from scipy.interpolate import NearestNDInterpolator
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> x = rng.random(10) - 0.5
+    >>> y = rng.random(10) - 0.5
+    >>> z = np.hypot(x, y)
+    >>> X = np.linspace(min(x), max(x))
+    >>> Y = np.linspace(min(y), max(y))
+    >>> X, Y = np.meshgrid(X, Y)  # 2D grid for interpolation
+    >>> interp = NearestNDInterpolator(list(zip(x, y)), z)
+    >>> Z = interp(X, Y)
+    >>> plt.pcolormesh(X, Y, Z, shading='auto')
+    >>> plt.plot(x, y, "ok", label="input point")
+    >>> plt.legend()
+    >>> plt.colorbar()
+    >>> plt.axis("equal")
+    >>> plt.show()
+
+    See also
+    --------
+    griddata :
+        Interpolate unstructured D-D data.
+    LinearNDInterpolator :
+        Piecewise linear interpolant in N dimensions.
+    CloughTocher2DInterpolator :
+        Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D.
+
+    """
+
+    def __init__(self, x, y, rescale=False, tree_options=None):
+        NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
+                                    need_contiguous=False,
+                                    need_values=False)
+        if tree_options is None:
+            tree_options = dict()
+        self.tree = cKDTree(self.points, **tree_options)
+        self.values = np.asarray(y)
+
+    def __call__(self, *args):
+        """
+        Evaluate interpolator at given points.
+
+        Parameters
+        ----------
+        x1, x2, ... xn : array-like of float
+            Points where to interpolate data at.
+            x1, x2, ... xn can be array-like of float with broadcastable shape.
+            or x1 can be array-like of float with shape ``(..., ndim)``
+
+        """
+        xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
+        xi = self._check_call_shape(xi)
+        xi = self._scale_x(xi)
+        dist, i = self.tree.query(xi)
+        return self.values[i]
+
+
+#------------------------------------------------------------------------------
+# Convenience interface function
+#------------------------------------------------------------------------------
+
+def griddata(points, values, xi, method='linear', fill_value=np.nan,
+             rescale=False):
+    """
+    Interpolate unstructured D-D data.
+
+    Parameters
+    ----------
+    points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
+        Data point coordinates.
+    values : ndarray of float or complex, shape (n,)
+        Data values.
+    xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
+        Points at which to interpolate data.
+    method : {'linear', 'nearest', 'cubic'}, optional
+        Method of interpolation. One of
+
+        ``nearest``
+          return the value at the data point closest to
+          the point of interpolation. See `NearestNDInterpolator` for
+          more details.
+
+        ``linear``
+          tessellate the input point set to N-D
+          simplices, and interpolate linearly on each simplex. See
+          `LinearNDInterpolator` for more details.
+
+        ``cubic`` (1-D)
+          return the value determined from a cubic
+          spline.
+
+        ``cubic`` (2-D)
+          return the value determined from a
+          piecewise cubic, continuously differentiable (C1), and
+          approximately curvature-minimizing polynomial surface. See
+          `CloughTocher2DInterpolator` for more details.
+    fill_value : float, optional
+        Value used to fill in for requested points outside of the
+        convex hull of the input points. If not provided, then the
+        default is ``nan``. This option has no effect for the
+        'nearest' method.
+    rescale : bool, optional
+        Rescale points to unit cube before performing interpolation.
+        This is useful if some of the input dimensions have
+        incommensurable units and differ by many orders of magnitude.
+
+        .. versionadded:: 0.14.0
+
+    Returns
+    -------
+    ndarray
+        Array of interpolated values.
+
+    Notes
+    -----
+
+    .. versionadded:: 0.9
+
+    For data on a regular grid use `interpn` instead.
+
+    Examples
+    --------
+
+    Suppose we want to interpolate the 2-D function
+
+    >>> import numpy as np
+    >>> def func(x, y):
+    ...     return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
+
+    on a grid in [0, 1]x[0, 1]
+
+    >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
+
+    but we only know its values at 1000 data points:
+
+    >>> rng = np.random.default_rng()
+    >>> points = rng.random((1000, 2))
+    >>> values = func(points[:,0], points[:,1])
+
+    This can be done with `griddata` -- below we try out all of the
+    interpolation methods:
+
+    >>> from scipy.interpolate import griddata
+    >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
+    >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
+    >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
+
+    One can see that the exact result is reproduced by all of the
+    methods to some degree, but for this smooth function the piecewise
+    cubic interpolant gives the best results:
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.subplot(221)
+    >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
+    >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
+    >>> plt.title('Original')
+    >>> plt.subplot(222)
+    >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
+    >>> plt.title('Nearest')
+    >>> plt.subplot(223)
+    >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
+    >>> plt.title('Linear')
+    >>> plt.subplot(224)
+    >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
+    >>> plt.title('Cubic')
+    >>> plt.gcf().set_size_inches(6, 6)
+    >>> plt.show()
+
+    See Also
+    --------
+    LinearNDInterpolator :
+        Piecewise linear interpolant in N dimensions.
+    NearestNDInterpolator :
+        Nearest-neighbor interpolation in N dimensions.
+    CloughTocher2DInterpolator :
+        Piecewise cubic, C1 smooth, curvature-minimizing interpolant in 2D.
+
+    """
+
+    points = _ndim_coords_from_arrays(points)
+
+    if points.ndim < 2:
+        ndim = points.ndim
+    else:
+        ndim = points.shape[-1]
+
+    if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
+        from ._interpolate import interp1d
+        points = points.ravel()
+        if isinstance(xi, tuple):
+            if len(xi) != 1:
+                raise ValueError("invalid number of dimensions in xi")
+            xi, = xi
+        # Sort points/values together, necessary as input for interp1d
+        idx = np.argsort(points)
+        points = points[idx]
+        values = values[idx]
+        if method == 'nearest':
+            fill_value = 'extrapolate'
+        ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
+                      fill_value=fill_value)
+        return ip(xi)
+    elif method == 'nearest':
+        ip = NearestNDInterpolator(points, values, rescale=rescale)
+        return ip(xi)
+    elif method == 'linear':
+        ip = LinearNDInterpolator(points, values, fill_value=fill_value,
+                                  rescale=rescale)
+        return ip(xi)
+    elif method == 'cubic' and ndim == 2:
+        ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
+                                        rescale=rescale)
+        return ip(xi)
+    else:
+        raise ValueError("Unknown interpolation method %r for "
+                         "%d dimensional data" % (method, ndim))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_pade.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_pade.py
new file mode 100644
index 00000000..387ef11d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_pade.py
@@ -0,0 +1,67 @@
+from numpy import zeros, asarray, eye, poly1d, hstack, r_
+from scipy import linalg
+
+__all__ = ["pade"]
+
+def pade(an, m, n=None):
+    """
+    Return Pade approximation to a polynomial as the ratio of two polynomials.
+
+    Parameters
+    ----------
+    an : (N,) array_like
+        Taylor series coefficients.
+    m : int
+        The order of the returned approximating polynomial `q`.
+    n : int, optional
+        The order of the returned approximating polynomial `p`. By default,
+        the order is ``len(an)-1-m``.
+
+    Returns
+    -------
+    p, q : Polynomial class
+        The Pade approximation of the polynomial defined by `an` is
+        ``p(x)/q(x)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.interpolate import pade
+    >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
+    >>> p, q = pade(e_exp, 2)
+
+    >>> e_exp.reverse()
+    >>> e_poly = np.poly1d(e_exp)
+
+    Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
+
+    >>> e_poly(1)
+    2.7166666666666668
+
+    >>> p(1)/q(1)
+    2.7179487179487181
+
+    """
+    an = asarray(an)
+    if n is None:
+        n = len(an) - 1 - m
+        if n < 0:
+            raise ValueError("Order of q  must be smaller than len(an)-1.")
+    if n < 0:
+        raise ValueError("Order of p  must be greater than 0.")
+    N = m + n
+    if N > len(an)-1:
+        raise ValueError("Order of q+p  must be smaller than len(an).")
+    an = an[:N+1]
+    Akj = eye(N+1, n+1, dtype=an.dtype)
+    Bkj = zeros((N+1, m), dtype=an.dtype)
+    for row in range(1, m+1):
+        Bkj[row,:row] = -(an[:row])[::-1]
+    for row in range(m+1, N+1):
+        Bkj[row,:] = -(an[row-m:row])[::-1]
+    C = hstack((Akj, Bkj))
+    pq = linalg.solve(C, an)
+    p = pq[:n+1]
+    q = r_[1.0, pq[n+1:]]
+    return poly1d(p[::-1]), poly1d(q[::-1])
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_polyint.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_polyint.py
new file mode 100644
index 00000000..64cfdd4c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_polyint.py
@@ -0,0 +1,743 @@
+import warnings
+
+import numpy as np
+from scipy.special import factorial
+from scipy._lib._util import _asarray_validated, float_factorial
+
+
+__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator",
+           "barycentric_interpolate", "approximate_taylor_polynomial"]
+
+
+def _isscalar(x):
+    """Check whether x is if a scalar type, or 0-dim"""
+    return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
+
+
+class _Interpolator1D:
+    """
+    Common features in univariate interpolation
+
+    Deal with input data type and interpolation axis rolling. The
+    actual interpolator can assume the y-data is of shape (n, r) where
+    `n` is the number of x-points, and `r` the number of variables,
+    and use self.dtype as the y-data type.
+
+    Attributes
+    ----------
+    _y_axis
+        Axis along which the interpolation goes in the original array
+    _y_extra_shape
+        Additional trailing shape of the input arrays, excluding
+        the interpolation axis.
+    dtype
+        Dtype of the y-data arrays. Can be set via _set_dtype, which
+        forces it to be float or complex.
+
+    Methods
+    -------
+    __call__
+    _prepare_x
+    _finish_y
+    _reshape_yi
+    _set_yi
+    _set_dtype
+    _evaluate
+
+    """
+
+    __slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
+
+    def __init__(self, xi=None, yi=None, axis=None):
+        self._y_axis = axis
+        self._y_extra_shape = None
+        self.dtype = None
+        if yi is not None:
+            self._set_yi(yi, xi=xi, axis=axis)
+
+    def __call__(self, x):
+        """
+        Evaluate the interpolant
+
+        Parameters
+        ----------
+        x : array_like
+            Points to evaluate the interpolant at.
+
+        Returns
+        -------
+        y : array_like
+            Interpolated values. Shape is determined by replacing
+            the interpolation axis in the original array with the shape of x.
+
+        Notes
+        -----
+        Input values `x` must be convertible to `float` values like `int`
+        or `float`.
+
+        """
+        x, x_shape = self._prepare_x(x)
+        y = self._evaluate(x)
+        return self._finish_y(y, x_shape)
+
+    def _evaluate(self, x):
+        """
+        Actually evaluate the value of the interpolator.
+        """
+        raise NotImplementedError()
+
+    def _prepare_x(self, x):
+        """Reshape input x array to 1-D"""
+        x = _asarray_validated(x, check_finite=False, as_inexact=True)
+        x_shape = x.shape
+        return x.ravel(), x_shape
+
+    def _finish_y(self, y, x_shape):
+        """Reshape interpolated y back to an N-D array similar to initial y"""
+        y = y.reshape(x_shape + self._y_extra_shape)
+        if self._y_axis != 0 and x_shape != ():
+            nx = len(x_shape)
+            ny = len(self._y_extra_shape)
+            s = (list(range(nx, nx + self._y_axis))
+                 + list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
+            y = y.transpose(s)
+        return y
+
+    def _reshape_yi(self, yi, check=False):
+        yi = np.moveaxis(np.asarray(yi), self._y_axis, 0)
+        if check and yi.shape[1:] != self._y_extra_shape:
+            ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:],
+                                           self._y_extra_shape[:-self._y_axis])
+            raise ValueError("Data must be of shape %s" % ok_shape)
+        return yi.reshape((yi.shape[0], -1))
+
+    def _set_yi(self, yi, xi=None, axis=None):
+        if axis is None:
+            axis = self._y_axis
+        if axis is None:
+            raise ValueError("no interpolation axis specified")
+
+        yi = np.asarray(yi)
+
+        shape = yi.shape
+        if shape == ():
+            shape = (1,)
+        if xi is not None and shape[axis] != len(xi):
+            raise ValueError("x and y arrays must be equal in length along "
+                             "interpolation axis.")
+
+        self._y_axis = (axis % yi.ndim)
+        self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:]
+        self.dtype = None
+        self._set_dtype(yi.dtype)
+
+    def _set_dtype(self, dtype, union=False):
+        if np.issubdtype(dtype, np.complexfloating) \
+               or np.issubdtype(self.dtype, np.complexfloating):
+            self.dtype = np.complex_
+        else:
+            if not union or self.dtype != np.complex_:
+                self.dtype = np.float_
+
+
+class _Interpolator1DWithDerivatives(_Interpolator1D):
+    def derivatives(self, x, der=None):
+        """
+        Evaluate many derivatives of the polynomial at the point x
+
+        Produce an array of all derivative values at the point x.
+
+        Parameters
+        ----------
+        x : array_like
+            Point or points at which to evaluate the derivatives
+        der : int or None, optional
+            How many derivatives to extract; None for all potentially
+            nonzero derivatives (that is a number equal to the number
+            of points). This number includes the function value as 0th
+            derivative.
+
+        Returns
+        -------
+        d : ndarray
+            Array with derivatives; d[j] contains the jth derivative.
+            Shape of d[j] is determined by replacing the interpolation
+            axis in the original array with the shape of x.
+
+        Examples
+        --------
+        >>> from scipy.interpolate import KroghInterpolator
+        >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
+        array([1.0,2.0,3.0])
+        >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
+        array([[1.0,1.0],
+               [2.0,2.0],
+               [3.0,3.0]])
+
+        """
+        x, x_shape = self._prepare_x(x)
+        y = self._evaluate_derivatives(x, der)
+
+        y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
+        if self._y_axis != 0 and x_shape != ():
+            nx = len(x_shape)
+            ny = len(self._y_extra_shape)
+            s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+                 + list(range(1, nx+1)) +
+                 list(range(nx+1+self._y_axis, nx+ny+1)))
+            y = y.transpose(s)
+        return y
+
+    def derivative(self, x, der=1):
+        """
+        Evaluate one derivative of the polynomial at the point x
+
+        Parameters
+        ----------
+        x : array_like
+            Point or points at which to evaluate the derivatives
+
+        der : integer, optional
+            Which derivative to extract. This number includes the
+            function value as 0th derivative.
+
+        Returns
+        -------
+        d : ndarray
+            Derivative interpolated at the x-points. Shape of d is
+            determined by replacing the interpolation axis in the
+            original array with the shape of x.
+
+        Notes
+        -----
+        This is computed by evaluating all derivatives up to the desired
+        one (using self.derivatives()) and then discarding the rest.
+
+        """
+        x, x_shape = self._prepare_x(x)
+        y = self._evaluate_derivatives(x, der+1)
+        return self._finish_y(y[der], x_shape)
+
+
+class KroghInterpolator(_Interpolator1DWithDerivatives):
+    """
+    Interpolating polynomial for a set of points.
+
+    The polynomial passes through all the pairs (xi,yi). One may
+    additionally specify a number of derivatives at each point xi;
+    this is done by repeating the value xi and specifying the
+    derivatives as successive yi values.
+
+    Allows evaluation of the polynomial and all its derivatives.
+    For reasons of numerical stability, this function does not compute
+    the coefficients of the polynomial, although they can be obtained
+    by evaluating all the derivatives.
+
+    Parameters
+    ----------
+    xi : array_like, length N
+        Known x-coordinates. Must be sorted in increasing order.
+    yi : array_like
+        Known y-coordinates. When an xi occurs two or more times in
+        a row, the corresponding yi's represent derivative values.
+    axis : int, optional
+        Axis in the yi array corresponding to the x-coordinate values.
+
+    Notes
+    -----
+    Be aware that the algorithms implemented here are not necessarily
+    the most numerically stable known. Moreover, even in a world of
+    exact computation, unless the x coordinates are chosen very
+    carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+    polynomial interpolation itself is a very ill-conditioned process
+    due to the Runge phenomenon. In general, even with well-chosen
+    x values, degrees higher than about thirty cause problems with
+    numerical instability in this code.
+
+    Based on [1]_.
+
+    References
+    ----------
+    .. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
+        and Numerical Differentiation", 1970.
+
+    Examples
+    --------
+    To produce a polynomial that is zero at 0 and 1 and has
+    derivative 2 at 0, call
+
+    >>> from scipy.interpolate import KroghInterpolator
+    >>> KroghInterpolator([0,0,1],[0,2,0])
+
+    This constructs the quadratic 2*X**2-2*X. The derivative condition
+    is indicated by the repeated zero in the xi array; the corresponding
+    yi values are 0, the function value, and 2, the derivative value.
+
+    For another example, given xi, yi, and a derivative ypi for each
+    point, appropriate arrays can be constructed as:
+
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> xi = np.linspace(0, 1, 5)
+    >>> yi, ypi = rng.random((2, 5))
+    >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
+    >>> KroghInterpolator(xi_k, yi_k)
+
+    To produce a vector-valued polynomial, supply a higher-dimensional
+    array for yi:
+
+    >>> KroghInterpolator([0,1],[[2,3],[4,5]])
+
+    This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
+
+    """
+
+    def __init__(self, xi, yi, axis=0):
+        _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)
+
+        self.xi = np.asarray(xi)
+        self.yi = self._reshape_yi(yi)
+        self.n, self.r = self.yi.shape
+
+        if (deg := self.xi.size) > 30:
+            warnings.warn(f"{deg} degrees provided, degrees higher than about"
+                          " thirty cause problems with numerical instability "
+                          "with 'KroghInterpolator'", stacklevel=2)
+
+        c = np.zeros((self.n+1, self.r), dtype=self.dtype)
+        c[0] = self.yi[0]
+        Vk = np.zeros((self.n, self.r), dtype=self.dtype)
+        for k in range(1, self.n):
+            s = 0
+            while s <= k and xi[k-s] == xi[k]:
+                s += 1
+            s -= 1
+            Vk[0] = self.yi[k]/float_factorial(s)
+            for i in range(k-s):
+                if xi[i] == xi[k]:
+                    raise ValueError("Elements if `xi` can't be equal.")
+                if s == 0:
+                    Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
+                else:
+                    Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
+            c[k] = Vk[k-s]
+        self.c = c
+
+    def _evaluate(self, x):
+        pi = 1
+        p = np.zeros((len(x), self.r), dtype=self.dtype)
+        p += self.c[0,np.newaxis,:]
+        for k in range(1, self.n):
+            w = x - self.xi[k-1]
+            pi = w*pi
+            p += pi[:,np.newaxis] * self.c[k]
+        return p
+
+    def _evaluate_derivatives(self, x, der=None):
+        n = self.n
+        r = self.r
+
+        if der is None:
+            der = self.n
+        pi = np.zeros((n, len(x)))
+        w = np.zeros((n, len(x)))
+        pi[0] = 1
+        p = np.zeros((len(x), self.r), dtype=self.dtype)
+        p += self.c[0, np.newaxis, :]
+
+        for k in range(1, n):
+            w[k-1] = x - self.xi[k-1]
+            pi[k] = w[k-1] * pi[k-1]
+            p += pi[k, :, np.newaxis] * self.c[k]
+
+        cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
+        cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
+        cn[0] = p
+        for k in range(1, n):
+            for i in range(1, n-k+1):
+                pi[i] = w[k+i-1]*pi[i-1] + pi[i]
+                cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
+            cn[k] *= float_factorial(k)
+
+        cn[n, :, :] = 0
+        return cn[:der]
+
+
+def krogh_interpolate(xi, yi, x, der=0, axis=0):
+    """
+    Convenience function for polynomial interpolation.
+
+    See `KroghInterpolator` for more details.
+
+    Parameters
+    ----------
+    xi : array_like
+        Known x-coordinates.
+    yi : array_like
+        Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
+        vectors of length R, or scalars if R=1.
+    x : array_like
+        Point or points at which to evaluate the derivatives.
+    der : int or list, optional
+        How many derivatives to extract; None for all potentially
+        nonzero derivatives (that is a number equal to the number
+        of points), or a list of derivatives to extract. This number
+        includes the function value as 0th derivative.
+    axis : int, optional
+        Axis in the yi array corresponding to the x-coordinate values.
+
+    Returns
+    -------
+    d : ndarray
+        If the interpolator's values are R-D then the
+        returned array will be the number of derivatives by N by R.
+        If `x` is a scalar, the middle dimension will be dropped; if
+        the `yi` are scalars then the last dimension will be dropped.
+
+    See Also
+    --------
+    KroghInterpolator : Krogh interpolator
+
+    Notes
+    -----
+    Construction of the interpolating polynomial is a relatively expensive
+    process. If you want to evaluate it repeatedly consider using the class
+    KroghInterpolator (which is what this function uses).
+
+    Examples
+    --------
+    We can interpolate 2D observed data using krogh interpolation:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import krogh_interpolate
+    >>> x_observed = np.linspace(0.0, 10.0, 11)
+    >>> y_observed = np.sin(x_observed)
+    >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+    >>> y = krogh_interpolate(x_observed, y_observed, x)
+    >>> plt.plot(x_observed, y_observed, "o", label="observation")
+    >>> plt.plot(x, y, label="krogh interpolation")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    P = KroghInterpolator(xi, yi, axis=axis)
+    if der == 0:
+        return P(x)
+    elif _isscalar(der):
+        return P.derivative(x,der=der)
+    else:
+        return P.derivatives(x,der=np.amax(der)+1)[der]
+
+
+def approximate_taylor_polynomial(f,x,degree,scale,order=None):
+    """
+    Estimate the Taylor polynomial of f at x by polynomial fitting.
+
+    Parameters
+    ----------
+    f : callable
+        The function whose Taylor polynomial is sought. Should accept
+        a vector of `x` values.
+    x : scalar
+        The point at which the polynomial is to be evaluated.
+    degree : int
+        The degree of the Taylor polynomial
+    scale : scalar
+        The width of the interval to use to evaluate the Taylor polynomial.
+        Function values spread over a range this wide are used to fit the
+        polynomial. Must be chosen carefully.
+    order : int or None, optional
+        The order of the polynomial to be used in the fitting; `f` will be
+        evaluated ``order+1`` times. If None, use `degree`.
+
+    Returns
+    -------
+    p : poly1d instance
+        The Taylor polynomial (translated to the origin, so that
+        for example p(0)=f(x)).
+
+    Notes
+    -----
+    The appropriate choice of "scale" is a trade-off; too large and the
+    function differs from its Taylor polynomial too much to get a good
+    answer, too small and round-off errors overwhelm the higher-order terms.
+    The algorithm used becomes numerically unstable around order 30 even
+    under ideal circumstances.
+
+    Choosing order somewhat larger than degree may improve the higher-order
+    terms.
+
+    Examples
+    --------
+    We can calculate Taylor approximation polynomials of sin function with
+    various degrees:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import approximate_taylor_polynomial
+    >>> x = np.linspace(-10.0, 10.0, num=100)
+    >>> plt.plot(x, np.sin(x), label="sin curve")
+    >>> for degree in np.arange(1, 15, step=2):
+    ...     sin_taylor = approximate_taylor_polynomial(np.sin, 0, degree, 1,
+    ...                                                order=degree + 2)
+    ...     plt.plot(x, sin_taylor(x), label=f"degree={degree}")
+    >>> plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
+    ...            borderaxespad=0.0, shadow=True)
+    >>> plt.tight_layout()
+    >>> plt.axis([-10, 10, -10, 10])
+    >>> plt.show()
+
+    """
+    if order is None:
+        order = degree
+
+    n = order+1
+    # Choose n points that cluster near the endpoints of the interval in
+    # a way that avoids the Runge phenomenon. Ensure, by including the
+    # endpoint or not as appropriate, that one point always falls at x
+    # exactly.
+    xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
+
+    P = KroghInterpolator(xs, f(xs))
+    d = P.derivatives(x,der=degree+1)
+
+    return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
+
+
+class BarycentricInterpolator(_Interpolator1D):
+    """The interpolating polynomial for a set of points
+
+    Constructs a polynomial that passes through a given set of points.
+    Allows evaluation of the polynomial, efficient changing of the y
+    values to be interpolated, and updating by adding more x values.
+    For reasons of numerical stability, this function does not compute
+    the coefficients of the polynomial.
+
+    The values yi need to be provided before the function is
+    evaluated, but none of the preprocessing depends on them, so rapid
+    updates are possible.
+
+    Parameters
+    ----------
+    xi : array_like
+        1-D array of x coordinates of the points the polynomial
+        should pass through
+    yi : array_like, optional
+        The y coordinates of the points the polynomial should pass through.
+        If None, the y values will be supplied later via the `set_y` method.
+    axis : int, optional
+        Axis in the yi array corresponding to the x-coordinate values.
+
+    Notes
+    -----
+    This class uses a "barycentric interpolation" method that treats
+    the problem as a special case of rational function interpolation.
+    This algorithm is quite stable, numerically, but even in a world of
+    exact computation, unless the x coordinates are chosen very
+    carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+    polynomial interpolation itself is a very ill-conditioned process
+    due to the Runge phenomenon.
+
+    Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
+
+    """
+
+    def __init__(self, xi, yi=None, axis=0):
+        _Interpolator1D.__init__(self, xi, yi, axis)
+
+        self.xi = np.asfarray(xi)
+        self.set_yi(yi)
+        self.n = len(self.xi)
+
+        # See page 510 of Berrut and Trefethen 2004 for an explanation of the
+        # capacity scaling and the suggestion of using a random permutation of
+        # the input factors.
+        # At the moment, the permutation is not performed for xi that are
+        # appended later through the add_xi interface. It's not clear to me how
+        # to implement that and it seems that most situations that require
+        # these numerical stability improvements will be able to provide all
+        # the points to the constructor.
+        self._inv_capacity = 4.0 / (np.max(self.xi) - np.min(self.xi))
+        permute = np.random.permutation(self.n)
+        inv_permute = np.zeros(self.n, dtype=np.int32)
+        inv_permute[permute] = np.arange(self.n)
+
+        self.wi = np.zeros(self.n)
+        for i in range(self.n):
+            dist = self._inv_capacity * (self.xi[i] - self.xi[permute])
+            dist[inv_permute[i]] = 1.0
+            self.wi[i] = 1.0 / np.prod(dist)
+
+    def set_yi(self, yi, axis=None):
+        """
+        Update the y values to be interpolated
+
+        The barycentric interpolation algorithm requires the calculation
+        of weights, but these depend only on the xi. The yi can be changed
+        at any time.
+
+        Parameters
+        ----------
+        yi : array_like
+            The y coordinates of the points the polynomial should pass through.
+            If None, the y values will be supplied later.
+        axis : int, optional
+            Axis in the yi array corresponding to the x-coordinate values.
+
+        """
+        if yi is None:
+            self.yi = None
+            return
+        self._set_yi(yi, xi=self.xi, axis=axis)
+        self.yi = self._reshape_yi(yi)
+        self.n, self.r = self.yi.shape
+
+    def add_xi(self, xi, yi=None):
+        """
+        Add more x values to the set to be interpolated
+
+        The barycentric interpolation algorithm allows easy updating by
+        adding more points for the polynomial to pass through.
+
+        Parameters
+        ----------
+        xi : array_like
+            The x coordinates of the points that the polynomial should pass
+            through.
+        yi : array_like, optional
+            The y coordinates of the points the polynomial should pass through.
+            Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
+            vector-valued.
+            If `yi` is not given, the y values will be supplied later. `yi`
+            should be given if and only if the interpolator has y values
+            specified.
+
+        """
+        if yi is not None:
+            if self.yi is None:
+                raise ValueError("No previous yi value to update!")
+            yi = self._reshape_yi(yi, check=True)
+            self.yi = np.vstack((self.yi,yi))
+        else:
+            if self.yi is not None:
+                raise ValueError("No update to yi provided!")
+        old_n = self.n
+        self.xi = np.concatenate((self.xi,xi))
+        self.n = len(self.xi)
+        self.wi **= -1
+        old_wi = self.wi
+        self.wi = np.zeros(self.n)
+        self.wi[:old_n] = old_wi
+        for j in range(old_n, self.n):
+            self.wi[:j] *= self._inv_capacity * (self.xi[j]-self.xi[:j])
+            self.wi[j] = np.multiply.reduce(
+                self._inv_capacity * (self.xi[:j]-self.xi[j])
+            )
+        self.wi **= -1
+
+    def __call__(self, x):
+        """Evaluate the interpolating polynomial at the points x
+
+        Parameters
+        ----------
+        x : array_like
+            Points to evaluate the interpolant at.
+
+        Returns
+        -------
+        y : array_like
+            Interpolated values. Shape is determined by replacing
+            the interpolation axis in the original array with the shape of x.
+
+        Notes
+        -----
+        Currently the code computes an outer product between x and the
+        weights, that is, it constructs an intermediate array of size
+        N by len(x), where N is the degree of the polynomial.
+        """
+        return _Interpolator1D.__call__(self, x)
+
+    def _evaluate(self, x):
+        if x.size == 0:
+            p = np.zeros((0, self.r), dtype=self.dtype)
+        else:
+            c = x[..., np.newaxis] - self.xi
+            z = c == 0
+            c[z] = 1
+            c = self.wi/c
+            with np.errstate(divide='ignore'):
+                p = np.dot(c, self.yi) / np.sum(c, axis=-1)[..., np.newaxis]
+            # Now fix where x==some xi
+            r = np.nonzero(z)
+            if len(r) == 1:  # evaluation at a scalar
+                if len(r[0]) > 0:  # equals one of the points
+                    p = self.yi[r[0][0]]
+            else:
+                p[r[:-1]] = self.yi[r[-1]]
+        return p
+
+
+def barycentric_interpolate(xi, yi, x, axis=0):
+    """
+    Convenience function for polynomial interpolation.
+
+    Constructs a polynomial that passes through a given set of points,
+    then evaluates the polynomial. For reasons of numerical stability,
+    this function does not compute the coefficients of the polynomial.
+
+    This function uses a "barycentric interpolation" method that treats
+    the problem as a special case of rational function interpolation.
+    This algorithm is quite stable, numerically, but even in a world of
+    exact computation, unless the `x` coordinates are chosen very
+    carefully - Chebyshev zeros (e.g., cos(i*pi/n)) are a good choice -
+    polynomial interpolation itself is a very ill-conditioned process
+    due to the Runge phenomenon.
+
+    Parameters
+    ----------
+    xi : array_like
+        1-D array of x coordinates of the points the polynomial should
+        pass through
+    yi : array_like
+        The y coordinates of the points the polynomial should pass through.
+    x : scalar or array_like
+        Points to evaluate the interpolator at.
+    axis : int, optional
+        Axis in the yi array corresponding to the x-coordinate values.
+
+    Returns
+    -------
+    y : scalar or array_like
+        Interpolated values. Shape is determined by replacing
+        the interpolation axis in the original array with the shape of x.
+
+    See Also
+    --------
+    BarycentricInterpolator : Bary centric interpolator
+
+    Notes
+    -----
+    Construction of the interpolation weights is a relatively slow process.
+    If you want to call this many times with the same xi (but possibly
+    varying yi or x) you should use the class `BarycentricInterpolator`.
+    This is what this function uses internally.
+
+    Examples
+    --------
+    We can interpolate 2D observed data using barycentric interpolation:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import barycentric_interpolate
+    >>> x_observed = np.linspace(0.0, 10.0, 11)
+    >>> y_observed = np.sin(x_observed)
+    >>> x = np.linspace(min(x_observed), max(x_observed), num=100)
+    >>> y = barycentric_interpolate(x_observed, y_observed, x)
+    >>> plt.plot(x_observed, y_observed, "o", label="observation")
+    >>> plt.plot(x, y, label="barycentric interpolation")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    return BarycentricInterpolator(xi, yi, axis=axis)(x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rbf.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rbf.py
new file mode 100644
index 00000000..85864502
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rbf.py
@@ -0,0 +1,289 @@
+"""rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
+
+Written by John Travers , February 2007
+Based closely on Matlab code by Alex Chirokov
+Additional, large, improvements by Robert Hetland
+Some additional alterations by Travis Oliphant
+Interpolation with multi-dimensional target domain by Josua Sassen
+
+Permission to use, modify, and distribute this software is given under the
+terms of the SciPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+Copyright (c) 2006-2007, Robert Hetland 
+Copyright (c) 2007, John Travers 
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+       notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+       copyright notice, this list of conditions and the following
+       disclaimer in the documentation and/or other materials provided
+       with the distribution.
+
+    * Neither the name of Robert Hetland nor the names of any
+       contributors may be used to endorse or promote products derived
+       from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+import numpy as np
+
+from scipy import linalg
+from scipy.special import xlogy
+from scipy.spatial.distance import cdist, pdist, squareform
+
+__all__ = ['Rbf']
+
+
+class Rbf:
+    """
+    Rbf(*args, **kwargs)
+
+    A class for radial basis function interpolation of functions from
+    N-D scattered data to an M-D domain.
+
+    .. note::
+        `Rbf` is legacy code, for new usage please use `RBFInterpolator`
+        instead.
+
+    Parameters
+    ----------
+    *args : arrays
+        x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
+        and d is the array of values at the nodes
+    function : str or callable, optional
+        The radial basis function, based on the radius, r, given by the norm
+        (default is Euclidean distance); the default is 'multiquadric'::
+
+            'multiquadric': sqrt((r/self.epsilon)**2 + 1)
+            'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
+            'gaussian': exp(-(r/self.epsilon)**2)
+            'linear': r
+            'cubic': r**3
+            'quintic': r**5
+            'thin_plate': r**2 * log(r)
+
+        If callable, then it must take 2 arguments (self, r). The epsilon
+        parameter will be available as self.epsilon. Other keyword
+        arguments passed in will be available as well.
+
+    epsilon : float, optional
+        Adjustable constant for gaussian or multiquadrics functions
+        - defaults to approximate average distance between nodes (which is
+        a good start).
+    smooth : float, optional
+        Values greater than zero increase the smoothness of the
+        approximation. 0 is for interpolation (default), the function will
+        always go through the nodal points in this case.
+    norm : str, callable, optional
+        A function that returns the 'distance' between two points, with
+        inputs as arrays of positions (x, y, z, ...), and an output as an
+        array of distance. E.g., the default: 'euclidean', such that the result
+        is a matrix of the distances from each point in ``x1`` to each point in
+        ``x2``. For more options, see documentation of
+        `scipy.spatial.distances.cdist`.
+    mode : str, optional
+        Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
+        '1-D' the data `d` will be considered as 1-D and flattened
+        internally. When it is 'N-D' the data `d` is assumed to be an array of
+        shape (n_samples, m), where m is the dimension of the target domain.
+
+
+    Attributes
+    ----------
+    N : int
+        The number of data points (as determined by the input arrays).
+    di : ndarray
+        The 1-D array of data values at each of the data coordinates `xi`.
+    xi : ndarray
+        The 2-D array of data coordinates.
+    function : str or callable
+        The radial basis function. See description under Parameters.
+    epsilon : float
+        Parameter used by gaussian or multiquadrics functions. See Parameters.
+    smooth : float
+        Smoothing parameter. See description under Parameters.
+    norm : str or callable
+        The distance function. See description under Parameters.
+    mode : str
+        Mode of the interpolation. See description under Parameters.
+    nodes : ndarray
+        A 1-D array of node values for the interpolation.
+    A : internal property, do not use
+
+    See Also
+    --------
+    RBFInterpolator
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.interpolate import Rbf
+    >>> rng = np.random.default_rng()
+    >>> x, y, z, d = rng.random((4, 50))
+    >>> rbfi = Rbf(x, y, z, d)  # radial basis function interpolator instance
+    >>> xi = yi = zi = np.linspace(0, 1, 20)
+    >>> di = rbfi(xi, yi, zi)   # interpolated values
+    >>> di.shape
+    (20,)
+
+    """
+    # Available radial basis functions that can be selected as strings;
+    # they all start with _h_ (self._init_function relies on that)
+    def _h_multiquadric(self, r):
+        return np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+    def _h_inverse_multiquadric(self, r):
+        return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
+
+    def _h_gaussian(self, r):
+        return np.exp(-(1.0/self.epsilon*r)**2)
+
+    def _h_linear(self, r):
+        return r
+
+    def _h_cubic(self, r):
+        return r**3
+
+    def _h_quintic(self, r):
+        return r**5
+
+    def _h_thin_plate(self, r):
+        return xlogy(r**2, r)
+
+    # Setup self._function and do smoke test on initial r
+    def _init_function(self, r):
+        if isinstance(self.function, str):
+            self.function = self.function.lower()
+            _mapped = {'inverse': 'inverse_multiquadric',
+                       'inverse multiquadric': 'inverse_multiquadric',
+                       'thin-plate': 'thin_plate'}
+            if self.function in _mapped:
+                self.function = _mapped[self.function]
+
+            func_name = "_h_" + self.function
+            if hasattr(self, func_name):
+                self._function = getattr(self, func_name)
+            else:
+                functionlist = [x[3:] for x in dir(self)
+                                if x.startswith('_h_')]
+                raise ValueError("function must be a callable or one of " +
+                                 ", ".join(functionlist))
+            self._function = getattr(self, "_h_"+self.function)
+        elif callable(self.function):
+            allow_one = False
+            if hasattr(self.function, 'func_code') or \
+               hasattr(self.function, '__code__'):
+                val = self.function
+                allow_one = True
+            elif hasattr(self.function, "__call__"):
+                val = self.function.__call__.__func__
+            else:
+                raise ValueError("Cannot determine number of arguments to "
+                                 "function")
+
+            argcount = val.__code__.co_argcount
+            if allow_one and argcount == 1:
+                self._function = self.function
+            elif argcount == 2:
+                self._function = self.function.__get__(self, Rbf)
+            else:
+                raise ValueError("Function argument must take 1 or 2 "
+                                 "arguments.")
+
+        a0 = self._function(r)
+        if a0.shape != r.shape:
+            raise ValueError("Callable must take array and return array of "
+                             "the same shape")
+        return a0
+
+    def __init__(self, *args, **kwargs):
+        # `args` can be a variable number of arrays; we flatten them and store
+        # them as a single 2-D array `xi` of shape (n_args-1, array_size),
+        # plus a 1-D array `di` for the values.
+        # All arrays must have the same number of elements
+        self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten()
+                              for a in args[:-1]])
+        self.N = self.xi.shape[-1]
+
+        self.mode = kwargs.pop('mode', '1-D')
+
+        if self.mode == '1-D':
+            self.di = np.asarray(args[-1]).flatten()
+            self._target_dim = 1
+        elif self.mode == 'N-D':
+            self.di = np.asarray(args[-1])
+            self._target_dim = self.di.shape[-1]
+        else:
+            raise ValueError("Mode has to be 1-D or N-D.")
+
+        if not all([x.size == self.di.shape[0] for x in self.xi]):
+            raise ValueError("All arrays must be equal length.")
+
+        self.norm = kwargs.pop('norm', 'euclidean')
+        self.epsilon = kwargs.pop('epsilon', None)
+        if self.epsilon is None:
+            # default epsilon is the "the average distance between nodes" based
+            # on a bounding hypercube
+            ximax = np.amax(self.xi, axis=1)
+            ximin = np.amin(self.xi, axis=1)
+            edges = ximax - ximin
+            edges = edges[np.nonzero(edges)]
+            self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
+
+        self.smooth = kwargs.pop('smooth', 0.0)
+        self.function = kwargs.pop('function', 'multiquadric')
+
+        # attach anything left in kwargs to self for use by any user-callable
+        # function or to save on the object returned.
+        for item, value in kwargs.items():
+            setattr(self, item, value)
+
+        # Compute weights
+        if self._target_dim > 1:  # If we have more than one target dimension,
+            # we first factorize the matrix
+            self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
+            lu, piv = linalg.lu_factor(self.A)
+            for i in range(self._target_dim):
+                self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
+        else:
+            self.nodes = linalg.solve(self.A, self.di)
+
+    @property
+    def A(self):
+        # this only exists for backwards compatibility: self.A was available
+        # and, at least technically, public.
+        r = squareform(pdist(self.xi.T, self.norm))  # Pairwise norm
+        return self._init_function(r) - np.eye(self.N)*self.smooth
+
+    def _call_norm(self, x1, x2):
+        return cdist(x1.T, x2.T, self.norm)
+
+    def __call__(self, *args):
+        args = [np.asarray(x) for x in args]
+        if not all([x.shape == y.shape for x in args for y in args]):
+            raise ValueError("Array lengths must be equal")
+        if self._target_dim > 1:
+            shp = args[0].shape + (self._target_dim,)
+        else:
+            shp = args[0].shape
+        xa = np.asarray([a.flatten() for a in args], dtype=np.float_)
+        r = self._call_norm(xa, self.xi)
+        return np.dot(self._function(r), self.nodes).reshape(shp)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rbfinterp.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rbfinterp.py
new file mode 100644
index 00000000..5404c7c6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rbfinterp.py
@@ -0,0 +1,546 @@
+"""Module for RBF interpolation."""
+import warnings
+from itertools import combinations_with_replacement
+
+import numpy as np
+from numpy.linalg import LinAlgError
+from scipy.spatial import KDTree
+from scipy.special import comb
+from scipy.linalg.lapack import dgesv  # type: ignore[attr-defined]
+
+from ._rbfinterp_pythran import (_build_system,
+                                 _build_evaluation_coefficients,
+                                 _polynomial_matrix)
+
+
+__all__ = ["RBFInterpolator"]
+
+
+# These RBFs are implemented.
+_AVAILABLE = {
+    "linear",
+    "thin_plate_spline",
+    "cubic",
+    "quintic",
+    "multiquadric",
+    "inverse_multiquadric",
+    "inverse_quadratic",
+    "gaussian"
+    }
+
+
+# The shape parameter does not need to be specified when using these RBFs.
+_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
+
+
+# For RBFs that are conditionally positive definite of order m, the interpolant
+# should include polynomial terms with degree >= m - 1. Define the minimum
+# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
+# Approximation Methods with MATLAB". The RBFs that are not in this dictionary
+# are positive definite and do not need polynomial terms.
+_NAME_TO_MIN_DEGREE = {
+    "multiquadric": 0,
+    "linear": 0,
+    "thin_plate_spline": 1,
+    "cubic": 1,
+    "quintic": 2
+    }
+
+
+def _monomial_powers(ndim, degree):
+    """Return the powers for each monomial in a polynomial.
+
+    Parameters
+    ----------
+    ndim : int
+        Number of variables in the polynomial.
+    degree : int
+        Degree of the polynomial.
+
+    Returns
+    -------
+    (nmonos, ndim) int ndarray
+        Array where each row contains the powers for each variable in a
+        monomial.
+
+    """
+    nmonos = comb(degree + ndim, ndim, exact=True)
+    out = np.zeros((nmonos, ndim), dtype=int)
+    count = 0
+    for deg in range(degree + 1):
+        for mono in combinations_with_replacement(range(ndim), deg):
+            # `mono` is a tuple of variables in the current monomial with
+            # multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
+            for var in mono:
+                out[count, var] += 1
+
+            count += 1
+
+    return out
+
+
+def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
+    """Build and solve the RBF interpolation system of equations.
+
+    Parameters
+    ----------
+    y : (P, N) float ndarray
+        Data point coordinates.
+    d : (P, S) float ndarray
+        Data values at `y`.
+    smoothing : (P,) float ndarray
+        Smoothing parameter for each data point.
+    kernel : str
+        Name of the RBF.
+    epsilon : float
+        Shape parameter.
+    powers : (R, N) int ndarray
+        The exponents for each monomial in the polynomial.
+
+    Returns
+    -------
+    coeffs : (P + R, S) float ndarray
+        Coefficients for each RBF and monomial.
+    shift : (N,) float ndarray
+        Domain shift used to create the polynomial matrix.
+    scale : (N,) float ndarray
+        Domain scaling used to create the polynomial matrix.
+
+    """
+    lhs, rhs, shift, scale = _build_system(
+        y, d, smoothing, kernel, epsilon, powers
+        )
+    _, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
+    if info < 0:
+        raise ValueError(f"The {-info}-th argument had an illegal value.")
+    elif info > 0:
+        msg = "Singular matrix."
+        nmonos = powers.shape[0]
+        if nmonos > 0:
+            pmat = _polynomial_matrix((y - shift)/scale, powers)
+            rank = np.linalg.matrix_rank(pmat)
+            if rank < nmonos:
+                msg = (
+                    "Singular matrix. The matrix of monomials evaluated at "
+                    "the data point coordinates does not have full column "
+                    f"rank ({rank}/{nmonos})."
+                    )
+
+        raise LinAlgError(msg)
+
+    return shift, scale, coeffs
+
+
+class RBFInterpolator:
+    """Radial basis function (RBF) interpolation in N dimensions.
+
+    Parameters
+    ----------
+    y : (P, N) array_like
+        Data point coordinates.
+    d : (P, ...) array_like
+        Data values at `y`.
+    neighbors : int, optional
+        If specified, the value of the interpolant at each evaluation point
+        will be computed using only this many nearest data points. All the data
+        points are used by default.
+    smoothing : float or (P,) array_like, optional
+        Smoothing parameter. The interpolant perfectly fits the data when this
+        is set to 0. For large values, the interpolant approaches a least
+        squares fit of a polynomial with the specified degree. Default is 0.
+    kernel : str, optional
+        Type of RBF. This should be one of
+
+            - 'linear'               : ``-r``
+            - 'thin_plate_spline'    : ``r**2 * log(r)``
+            - 'cubic'                : ``r**3``
+            - 'quintic'              : ``-r**5``
+            - 'multiquadric'         : ``-sqrt(1 + r**2)``
+            - 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
+            - 'inverse_quadratic'    : ``1/(1 + r**2)``
+            - 'gaussian'             : ``exp(-r**2)``
+
+        Default is 'thin_plate_spline'.
+    epsilon : float, optional
+        Shape parameter that scales the input to the RBF. If `kernel` is
+        'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
+        1 and can be ignored because it has the same effect as scaling the
+        smoothing parameter. Otherwise, this must be specified.
+    degree : int, optional
+        Degree of the added polynomial. For some RBFs the interpolant may not
+        be well-posed if the polynomial degree is too small. Those RBFs and
+        their corresponding minimum degrees are
+
+            - 'multiquadric'      : 0
+            - 'linear'            : 0
+            - 'thin_plate_spline' : 1
+            - 'cubic'             : 1
+            - 'quintic'           : 2
+
+        The default value is the minimum degree for `kernel` or 0 if there is
+        no minimum degree. Set this to -1 for no added polynomial.
+
+    Notes
+    -----
+    An RBF is a scalar valued function in N-dimensional space whose value at
+    :math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
+    is the center of the RBF.
+
+    An RBF interpolant for the vector of data values :math:`d`, which are from
+    locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
+    plus a polynomial with a specified degree. The RBF interpolant is written
+    as
+
+    .. math::
+        f(x) = K(x, y) a + P(x) b,
+
+    where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
+    evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
+    monomials, which span polynomials with the specified degree, evaluated at
+    :math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
+    linear equations
+
+    .. math::
+        (K(y, y) + \\lambda I) a + P(y) b = d
+
+    and
+
+    .. math::
+        P(y)^T a = 0,
+
+    where :math:`\\lambda` is a non-negative smoothing parameter that controls
+    how well we want to fit the data. The data are fit exactly when the
+    smoothing parameter is 0.
+
+    The above system is uniquely solvable if the following requirements are
+    met:
+
+        - :math:`P(y)` must have full column rank. :math:`P(y)` always has full
+          column rank when `degree` is -1 or 0. When `degree` is 1,
+          :math:`P(y)` has full column rank if the data point locations are not
+          all collinear (N=2), coplanar (N=3), etc.
+        - If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
+          'cubic', or 'quintic', then `degree` must not be lower than the
+          minimum value listed above.
+        - If `smoothing` is 0, then each data point location must be distinct.
+
+    When using an RBF that is not scale invariant ('multiquadric',
+    'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
+    shape parameter must be chosen (e.g., through cross validation). Smaller
+    values for the shape parameter correspond to wider RBFs. The problem can
+    become ill-conditioned or singular when the shape parameter is too small.
+
+    The memory required to solve for the RBF interpolation coefficients
+    increases quadratically with the number of data points, which can become
+    impractical when interpolating more than about a thousand data points.
+    To overcome memory limitations for large interpolation problems, the
+    `neighbors` argument can be specified to compute an RBF interpolant for
+    each evaluation point using only the nearest data points.
+
+    .. versionadded:: 1.7.0
+
+    See Also
+    --------
+    NearestNDInterpolator
+    LinearNDInterpolator
+    CloughTocher2DInterpolator
+
+    References
+    ----------
+    .. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
+        World Scientific Publishing Co.
+
+    .. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
+
+    .. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
+
+    .. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
+
+    Examples
+    --------
+    Demonstrate interpolating scattered data to a grid in 2-D.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.interpolate import RBFInterpolator
+    >>> from scipy.stats.qmc import Halton
+
+    >>> rng = np.random.default_rng()
+    >>> xobs = 2*Halton(2, seed=rng).random(100) - 1
+    >>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
+
+    >>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
+    >>> xflat = xgrid.reshape(2, -1).T
+    >>> yflat = RBFInterpolator(xobs, yobs)(xflat)
+    >>> ygrid = yflat.reshape(50, 50)
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
+    >>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
+    >>> fig.colorbar(p)
+    >>> plt.show()
+
+    """
+
+    def __init__(self, y, d,
+                 neighbors=None,
+                 smoothing=0.0,
+                 kernel="thin_plate_spline",
+                 epsilon=None,
+                 degree=None):
+        y = np.asarray(y, dtype=float, order="C")
+        if y.ndim != 2:
+            raise ValueError("`y` must be a 2-dimensional array.")
+
+        ny, ndim = y.shape
+
+        d_dtype = complex if np.iscomplexobj(d) else float
+        d = np.asarray(d, dtype=d_dtype, order="C")
+        if d.shape[0] != ny:
+            raise ValueError(
+                f"Expected the first axis of `d` to have length {ny}."
+                )
+
+        d_shape = d.shape[1:]
+        d = d.reshape((ny, -1))
+        # If `d` is complex, convert it to a float array with twice as many
+        # columns. Otherwise, the LHS matrix would need to be converted to
+        # complex and take up 2x more memory than necessary.
+        d = d.view(float)
+
+        if np.isscalar(smoothing):
+            smoothing = np.full(ny, smoothing, dtype=float)
+        else:
+            smoothing = np.asarray(smoothing, dtype=float, order="C")
+            if smoothing.shape != (ny,):
+                raise ValueError(
+                    "Expected `smoothing` to be a scalar or have shape "
+                    f"({ny},)."
+                    )
+
+        kernel = kernel.lower()
+        if kernel not in _AVAILABLE:
+            raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
+
+        if epsilon is None:
+            if kernel in _SCALE_INVARIANT:
+                epsilon = 1.0
+            else:
+                raise ValueError(
+                    "`epsilon` must be specified if `kernel` is not one of "
+                    f"{_SCALE_INVARIANT}."
+                    )
+        else:
+            epsilon = float(epsilon)
+
+        min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
+        if degree is None:
+            degree = max(min_degree, 0)
+        else:
+            degree = int(degree)
+            if degree < -1:
+                raise ValueError("`degree` must be at least -1.")
+            elif degree < min_degree:
+                warnings.warn(
+                    f"`degree` should not be below {min_degree} when `kernel` "
+                    f"is '{kernel}'. The interpolant may not be uniquely "
+                    "solvable, and the smoothing parameter may have an "
+                    "unintuitive effect.",
+                    UserWarning
+                    )
+
+        if neighbors is None:
+            nobs = ny
+        else:
+            # Make sure the number of nearest neighbors used for interpolation
+            # does not exceed the number of observations.
+            neighbors = int(min(neighbors, ny))
+            nobs = neighbors
+
+        powers = _monomial_powers(ndim, degree)
+        # The polynomial matrix must have full column rank in order for the
+        # interpolant to be well-posed, which is not possible if there are
+        # fewer observations than monomials.
+        if powers.shape[0] > nobs:
+            raise ValueError(
+                f"At least {powers.shape[0]} data points are required when "
+                f"`degree` is {degree} and the number of dimensions is {ndim}."
+                )
+
+        if neighbors is None:
+            shift, scale, coeffs = _build_and_solve_system(
+                y, d, smoothing, kernel, epsilon, powers
+                )
+
+            # Make these attributes private since they do not always exist.
+            self._shift = shift
+            self._scale = scale
+            self._coeffs = coeffs
+
+        else:
+            self._tree = KDTree(y)
+
+        self.y = y
+        self.d = d
+        self.d_shape = d_shape
+        self.d_dtype = d_dtype
+        self.neighbors = neighbors
+        self.smoothing = smoothing
+        self.kernel = kernel
+        self.epsilon = epsilon
+        self.powers = powers
+
+    def _chunk_evaluator(
+            self,
+            x,
+            y,
+            shift,
+            scale,
+            coeffs,
+            memory_budget=1000000
+    ):
+        """
+        Evaluate the interpolation while controlling memory consumption.
+        We chunk the input if we need more memory than specified.
+
+        Parameters
+        ----------
+        x : (Q, N) float ndarray
+            array of points on which to evaluate
+        y: (P, N) float ndarray
+            array of points on which we know function values
+        shift: (N, ) ndarray
+            Domain shift used to create the polynomial matrix.
+        scale : (N,) float ndarray
+            Domain scaling used to create the polynomial matrix.
+        coeffs: (P+R, S) float ndarray
+            Coefficients in front of basis functions
+        memory_budget: int
+            Total amount of memory (in units of sizeof(float)) we wish
+            to devote for storing the array of coefficients for
+            interpolated points. If we need more memory than that, we
+            chunk the input.
+
+        Returns
+        -------
+        (Q, S) float ndarray
+        Interpolated array
+        """
+        nx, ndim = x.shape
+        if self.neighbors is None:
+            nnei = len(y)
+        else:
+            nnei = self.neighbors
+        # in each chunk we consume the same space we already occupy
+        chunksize = memory_budget // ((self.powers.shape[0] + nnei)) + 1
+        if chunksize <= nx:
+            out = np.empty((nx, self.d.shape[1]), dtype=float)
+            for i in range(0, nx, chunksize):
+                vec = _build_evaluation_coefficients(
+                    x[i:i + chunksize, :],
+                    y,
+                    self.kernel,
+                    self.epsilon,
+                    self.powers,
+                    shift,
+                    scale)
+                out[i:i + chunksize, :] = np.dot(vec, coeffs)
+        else:
+            vec = _build_evaluation_coefficients(
+                x,
+                y,
+                self.kernel,
+                self.epsilon,
+                self.powers,
+                shift,
+                scale)
+            out = np.dot(vec, coeffs)
+        return out
+
+    def __call__(self, x):
+        """Evaluate the interpolant at `x`.
+
+        Parameters
+        ----------
+        x : (Q, N) array_like
+            Evaluation point coordinates.
+
+        Returns
+        -------
+        (Q, ...) ndarray
+            Values of the interpolant at `x`.
+
+        """
+        x = np.asarray(x, dtype=float, order="C")
+        if x.ndim != 2:
+            raise ValueError("`x` must be a 2-dimensional array.")
+
+        nx, ndim = x.shape
+        if ndim != self.y.shape[1]:
+            raise ValueError("Expected the second axis of `x` to have length "
+                             f"{self.y.shape[1]}.")
+
+        # Our memory budget for storing RBF coefficients is
+        # based on how many floats in memory we already occupy
+        # If this number is below 1e6 we just use 1e6
+        # This memory budget is used to decide how we chunk
+        # the inputs
+        memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
+
+        if self.neighbors is None:
+            out = self._chunk_evaluator(
+                x,
+                self.y,
+                self._shift,
+                self._scale,
+                self._coeffs,
+                memory_budget=memory_budget)
+        else:
+            # Get the indices of the k nearest observation points to each
+            # evaluation point.
+            _, yindices = self._tree.query(x, self.neighbors)
+            if self.neighbors == 1:
+                # `KDTree` squeezes the output when neighbors=1.
+                yindices = yindices[:, None]
+
+            # Multiple evaluation points may have the same neighborhood of
+            # observation points. Make the neighborhoods unique so that we only
+            # compute the interpolation coefficients once for each
+            # neighborhood.
+            yindices = np.sort(yindices, axis=1)
+            yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
+            # `inv` tells us which neighborhood will be used by each evaluation
+            # point. Now we find which evaluation points will be using each
+            # neighborhood.
+            xindices = [[] for _ in range(len(yindices))]
+            for i, j in enumerate(inv):
+                xindices[j].append(i)
+
+            out = np.empty((nx, self.d.shape[1]), dtype=float)
+            for xidx, yidx in zip(xindices, yindices):
+                # `yidx` are the indices of the observations in this
+                # neighborhood. `xidx` are the indices of the evaluation points
+                # that are using this neighborhood.
+                xnbr = x[xidx]
+                ynbr = self.y[yidx]
+                dnbr = self.d[yidx]
+                snbr = self.smoothing[yidx]
+                shift, scale, coeffs = _build_and_solve_system(
+                    ynbr,
+                    dnbr,
+                    snbr,
+                    self.kernel,
+                    self.epsilon,
+                    self.powers,
+                )
+                out[xidx] = self._chunk_evaluator(
+                    xnbr,
+                    ynbr,
+                    shift,
+                    scale,
+                    coeffs,
+                    memory_budget=memory_budget)
+
+        out = out.view(self.d_dtype)
+        out = out.reshape((nx, ) + self.d_shape)
+        return out
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rgi.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rgi.py
new file mode 100644
index 00000000..204bc3ea
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/_rgi.py
@@ -0,0 +1,675 @@
+__all__ = ['RegularGridInterpolator', 'interpn']
+
+import itertools
+
+import numpy as np
+
+from .interpnd import _ndim_coords_from_arrays
+from ._cubic import PchipInterpolator
+from ._rgi_cython import evaluate_linear_2d, find_indices
+from ._bsplines import make_interp_spline
+from ._fitpack2 import RectBivariateSpline
+
+
+def _check_points(points):
+    descending_dimensions = []
+    grid = []
+    for i, p in enumerate(points):
+        # early make points float
+        # see https://github.com/scipy/scipy/pull/17230
+        p = np.asarray(p, dtype=float)
+        if not np.all(p[1:] > p[:-1]):
+            if np.all(p[1:] < p[:-1]):
+                # input is descending, so make it ascending
+                descending_dimensions.append(i)
+                p = np.flip(p)
+            else:
+                raise ValueError(
+                    "The points in dimension %d must be strictly "
+                    "ascending or descending" % i)
+        # see https://github.com/scipy/scipy/issues/17716
+        p = np.ascontiguousarray(p)
+        grid.append(p)
+    return tuple(grid), tuple(descending_dimensions)
+
+
+def _check_dimensionality(points, values):
+    if len(points) > values.ndim:
+        raise ValueError("There are %d point arrays, but values has %d "
+                         "dimensions" % (len(points), values.ndim))
+    for i, p in enumerate(points):
+        if not np.asarray(p).ndim == 1:
+            raise ValueError("The points in dimension %d must be "
+                             "1-dimensional" % i)
+        if not values.shape[i] == len(p):
+            raise ValueError("There are %d points and %d values in "
+                             "dimension %d" % (len(p), values.shape[i], i))
+
+
+class RegularGridInterpolator:
+    """
+    Interpolation on a regular or rectilinear grid in arbitrary dimensions.
+
+    The data must be defined on a rectilinear grid; that is, a rectangular
+    grid with even or uneven spacing. Linear, nearest-neighbor, spline
+    interpolations are supported. After setting up the interpolator object,
+    the interpolation method may be chosen at each evaluation.
+
+    Parameters
+    ----------
+    points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+        The points defining the regular grid in n dimensions. The points in
+        each dimension (i.e. every elements of the points tuple) must be
+        strictly ascending or descending.
+
+    values : array_like, shape (m1, ..., mn, ...)
+        The data on the regular grid in n dimensions. Complex data can be
+        acceptable.
+
+    method : str, optional
+        The method of interpolation to perform. Supported are "linear",
+        "nearest", "slinear", "cubic", "quintic" and "pchip". This
+        parameter will become the default for the object's ``__call__``
+        method. Default is "linear".
+
+    bounds_error : bool, optional
+        If True, when interpolated values are requested outside of the
+        domain of the input data, a ValueError is raised.
+        If False, then `fill_value` is used.
+        Default is True.
+
+    fill_value : float or None, optional
+        The value to use for points outside of the interpolation domain.
+        If None, values outside the domain are extrapolated.
+        Default is ``np.nan``.
+
+    Methods
+    -------
+    __call__
+
+    Attributes
+    ----------
+    grid : tuple of ndarrays
+        The points defining the regular grid in n dimensions.
+        This tuple defines the full grid via
+        ``np.meshgrid(*grid, indexing='ij')``
+    values : ndarray
+        Data values at the grid.
+    method : str
+        Interpolation method.
+    fill_value : float or ``None``
+        Use this value for out-of-bounds arguments to `__call__`.
+    bounds_error : bool
+        If ``True``, out-of-bounds argument raise a ``ValueError``.
+
+    Notes
+    -----
+    Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
+    avoids expensive triangulation of the input data by taking advantage of the
+    regular grid structure.
+
+    In other words, this class assumes that the data is defined on a
+    *rectilinear* grid.
+
+    .. versionadded:: 0.14
+
+    The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
+    tensor-product spline interpolators, where `k` is the spline degree,
+    If any dimension has fewer points than `k` + 1, an error will be raised.
+
+    .. versionadded:: 1.9
+
+    If the input data is such that dimensions have incommensurate
+    units and differ by many orders of magnitude, the interpolant may have
+    numerical artifacts. Consider rescaling the data before interpolating.
+
+    Examples
+    --------
+    **Evaluate a function on the points of a 3-D grid**
+
+    As a first example, we evaluate a simple example function on the points of
+    a 3-D grid:
+
+    >>> from scipy.interpolate import RegularGridInterpolator
+    >>> import numpy as np
+    >>> def f(x, y, z):
+    ...     return 2 * x**3 + 3 * y**2 - z
+    >>> x = np.linspace(1, 4, 11)
+    >>> y = np.linspace(4, 7, 22)
+    >>> z = np.linspace(7, 9, 33)
+    >>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
+    >>> data = f(xg, yg, zg)
+
+    ``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
+    Next, define an interpolating function from this data:
+
+    >>> interp = RegularGridInterpolator((x, y, z), data)
+
+    Evaluate the interpolating function at the two points
+    ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
+
+    >>> pts = np.array([[2.1, 6.2, 8.3],
+    ...                 [3.3, 5.2, 7.1]])
+    >>> interp(pts)
+    array([ 125.80469388,  146.30069388])
+
+    which is indeed a close approximation to
+
+    >>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
+    (125.54200000000002, 145.894)
+
+    **Interpolate and extrapolate a 2D dataset**
+
+    As a second example, we interpolate and extrapolate a 2D data set:
+
+    >>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
+    >>> def ff(x, y):
+    ...     return x**2 + y**2
+
+    >>> xg, yg = np.meshgrid(x, y, indexing='ij')
+    >>> data = ff(xg, yg)
+    >>> interp = RegularGridInterpolator((x, y), data,
+    ...                                  bounds_error=False, fill_value=None)
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(projection='3d')
+    >>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
+    ...            s=60, c='k', label='data')
+
+    Evaluate and plot the interpolator on a finer grid
+
+    >>> xx = np.linspace(-4, 9, 31)
+    >>> yy = np.linspace(-4, 9, 31)
+    >>> X, Y = np.meshgrid(xx, yy, indexing='ij')
+
+    >>> # interpolator
+    >>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
+    ...                   alpha=0.4, color='m', label='linear interp')
+
+    >>> # ground truth
+    >>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
+    ...                   alpha=0.4, label='ground truth')
+    >>> plt.legend()
+    >>> plt.show()
+
+    Other examples are given
+    :ref:`in the tutorial `.
+
+    See Also
+    --------
+    NearestNDInterpolator : Nearest neighbor interpolation on *unstructured*
+                            data in N dimensions
+
+    LinearNDInterpolator : Piecewise linear interpolant on *unstructured* data
+                           in N dimensions
+
+    interpn : a convenience function which wraps `RegularGridInterpolator`
+
+    scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
+                                    (suitable for e.g., N-D image resampling)
+
+    References
+    ----------
+    .. [1] Python package *regulargrid* by Johannes Buchner, see
+           https://pypi.python.org/pypi/regulargrid/
+    .. [2] Wikipedia, "Trilinear interpolation",
+           https://en.wikipedia.org/wiki/Trilinear_interpolation
+    .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
+           and multilinear table interpolation in many dimensions." MATH.
+           COMPUT. 50.181 (1988): 189-196.
+           https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
+           :doi:`10.1090/S0025-5718-1988-0917826-0`
+
+    """
+    # this class is based on code originally programmed by Johannes Buchner,
+    # see https://github.com/JohannesBuchner/regulargrid
+
+    _SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3}
+    _SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
+    _ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
+
+    def __init__(self, points, values, method="linear", bounds_error=True,
+                 fill_value=np.nan):
+        if method not in self._ALL_METHODS:
+            raise ValueError("Method '%s' is not defined" % method)
+        elif method in self._SPLINE_METHODS:
+            self._validate_grid_dimensions(points, method)
+        self.method = method
+        self.bounds_error = bounds_error
+        self.grid, self._descending_dimensions = _check_points(points)
+        self.values = self._check_values(values)
+        self._check_dimensionality(self.grid, self.values)
+        self.fill_value = self._check_fill_value(self.values, fill_value)
+        if self._descending_dimensions:
+            self.values = np.flip(values, axis=self._descending_dimensions)
+
+    def _check_dimensionality(self, grid, values):
+        _check_dimensionality(grid, values)
+
+    def _check_points(self, points):
+        return _check_points(points)
+
+    def _check_values(self, values):
+        if not hasattr(values, 'ndim'):
+            # allow reasonable duck-typed values
+            values = np.asarray(values)
+
+        if hasattr(values, 'dtype') and hasattr(values, 'astype'):
+            if not np.issubdtype(values.dtype, np.inexact):
+                values = values.astype(float)
+
+        return values
+
+    def _check_fill_value(self, values, fill_value):
+        if fill_value is not None:
+            fill_value_dtype = np.asarray(fill_value).dtype
+            if (hasattr(values, 'dtype') and not
+                    np.can_cast(fill_value_dtype, values.dtype,
+                                casting='same_kind')):
+                raise ValueError("fill_value must be either 'None' or "
+                                 "of a type compatible with values")
+        return fill_value
+
+    def __call__(self, xi, method=None):
+        """
+        Interpolation at coordinates.
+
+        Parameters
+        ----------
+        xi : ndarray of shape (..., ndim)
+            The coordinates to evaluate the interpolator at.
+
+        method : str, optional
+            The method of interpolation to perform. Supported are "linear",
+            "nearest", "slinear", "cubic", "quintic" and "pchip". Default is
+            the method chosen when the interpolator was created.
+
+        Returns
+        -------
+        values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
+            Interpolated values at `xi`. See notes for behaviour when
+            ``xi.ndim == 1``.
+
+        Notes
+        -----
+        In the case that ``xi.ndim == 1`` a new axis is inserted into
+        the 0 position of the returned array, values_x, so its shape is
+        instead ``(1,) + values.shape[ndim:]``.
+
+        Examples
+        --------
+        Here we define a nearest-neighbor interpolator of a simple function
+
+        >>> import numpy as np
+        >>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
+        >>> def f(x, y):
+        ...     return x**2 + y**2
+        >>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
+        >>> from scipy.interpolate import RegularGridInterpolator
+        >>> interp = RegularGridInterpolator((x, y), data, method='nearest')
+
+        By construction, the interpolator uses the nearest-neighbor
+        interpolation
+
+        >>> interp([[1.5, 1.3], [0.3, 4.5]])
+        array([2., 9.])
+
+        We can however evaluate the linear interpolant by overriding the
+        `method` parameter
+
+        >>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
+        array([ 4.7, 24.3])
+        """
+        is_method_changed = self.method != method
+        method = self.method if method is None else method
+        if method not in self._ALL_METHODS:
+            raise ValueError("Method '%s' is not defined" % method)
+
+        xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
+
+        if method == "linear":
+            indices, norm_distances = self._find_indices(xi.T)
+            if (ndim == 2 and hasattr(self.values, 'dtype') and
+                    self.values.ndim == 2 and self.values.flags.writeable and
+                    self.values.dtype in (np.float64, np.complex128) and
+                    self.values.dtype.byteorder == '='):
+                # until cython supports const fused types, the fast path
+                # cannot support non-writeable values
+                # a fast path
+                out = np.empty(indices.shape[1], dtype=self.values.dtype)
+                result = evaluate_linear_2d(self.values,
+                                            indices,
+                                            norm_distances,
+                                            self.grid,
+                                            out)
+            else:
+                result = self._evaluate_linear(indices, norm_distances)
+        elif method == "nearest":
+            indices, norm_distances = self._find_indices(xi.T)
+            result = self._evaluate_nearest(indices, norm_distances)
+        elif method in self._SPLINE_METHODS:
+            if is_method_changed:
+                self._validate_grid_dimensions(self.grid, method)
+            result = self._evaluate_spline(xi, method)
+
+        if not self.bounds_error and self.fill_value is not None:
+            result[out_of_bounds] = self.fill_value
+
+        # f(nan) = nan, if any
+        if np.any(nans):
+            result[nans] = np.nan
+        return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
+
+    def _prepare_xi(self, xi):
+        ndim = len(self.grid)
+        xi = _ndim_coords_from_arrays(xi, ndim=ndim)
+        if xi.shape[-1] != len(self.grid):
+            raise ValueError("The requested sample points xi have dimension "
+                             f"{xi.shape[-1]} but this "
+                             f"RegularGridInterpolator has dimension {ndim}")
+
+        xi_shape = xi.shape
+        xi = xi.reshape(-1, xi_shape[-1])
+        xi = np.asarray(xi, dtype=float)
+
+        # find nans in input
+        nans = np.any(np.isnan(xi), axis=-1)
+
+        if self.bounds_error:
+            for i, p in enumerate(xi.T):
+                if not np.logical_and(np.all(self.grid[i][0] <= p),
+                                      np.all(p <= self.grid[i][-1])):
+                    raise ValueError("One of the requested xi is out of bounds "
+                                     "in dimension %d" % i)
+            out_of_bounds = None
+        else:
+            out_of_bounds = self._find_out_of_bounds(xi.T)
+
+        return xi, xi_shape, ndim, nans, out_of_bounds
+
+    def _evaluate_linear(self, indices, norm_distances):
+        # slice for broadcasting over trailing dimensions in self.values
+        vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
+
+        # Compute shifting up front before zipping everything together
+        shift_norm_distances = [1 - yi for yi in norm_distances]
+        shift_indices = [i + 1 for i in indices]
+
+        # The formula for linear interpolation in 2d takes the form:
+        # values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \
+        #          self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \
+        #          self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \
+        #          self.values[(i0 + 1, i1 + 1)] * y0 * y1
+        # We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
+        zipped1 = zip(indices, shift_norm_distances)
+        zipped2 = zip(shift_indices, norm_distances)
+
+        # Take all products of zipped1 and zipped2 and iterate over them
+        # to get the terms in the above formula. This corresponds to iterating
+        # over the vertices of a hypercube.
+        hypercube = itertools.product(*zip(zipped1, zipped2))
+        value = np.array([0.])
+        for h in hypercube:
+            edge_indices, weights = zip(*h)
+            weight = np.array([1.])
+            for w in weights:
+                weight = weight * w
+            term = np.asarray(self.values[edge_indices]) * weight[vslice]
+            value = value + term   # cannot use += because broadcasting
+        return value
+
+    def _evaluate_nearest(self, indices, norm_distances):
+        idx_res = [np.where(yi <= .5, i, i + 1)
+                   for i, yi in zip(indices, norm_distances)]
+        return self.values[tuple(idx_res)]
+
+    def _validate_grid_dimensions(self, points, method):
+        k = self._SPLINE_DEGREE_MAP[method]
+        for i, point in enumerate(points):
+            ndim = len(np.atleast_1d(point))
+            if ndim <= k:
+                raise ValueError(f"There are {ndim} points in dimension {i},"
+                                 f" but method {method} requires at least "
+                                 f" {k+1} points per dimension.")
+
+    def _evaluate_spline(self, xi, method):
+        # ensure xi is 2D list of points to evaluate (`m` is the number of
+        # points and `n` is the number of interpolation dimensions,
+        # ``n == len(self.grid)``.)
+        if xi.ndim == 1:
+            xi = xi.reshape((1, xi.size))
+        m, n = xi.shape
+
+        # Reorder the axes: n-dimensional process iterates over the
+        # interpolation axes from the last axis downwards: E.g. for a 4D grid
+        # the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
+        # the 0th axis of its argument array (for 1D routine it's its ``y``
+        # array). Thus permute the interpolation axes of `values` *and keep
+        # trailing dimensions trailing*.
+        axes = tuple(range(self.values.ndim))
+        axx = axes[:n][::-1] + axes[n:]
+        values = self.values.transpose(axx)
+
+        if method == 'pchip':
+            _eval_func = self._do_pchip
+        else:
+            _eval_func = self._do_spline_fit
+        k = self._SPLINE_DEGREE_MAP[method]
+
+        # Non-stationary procedure: difficult to vectorize this part entirely
+        # into numpy-level operations. Unfortunately this requires explicit
+        # looping over each point in xi.
+
+        # can at least vectorize the first pass across all points in the
+        # last variable of xi.
+        last_dim = n - 1
+        first_values = _eval_func(self.grid[last_dim],
+                                  values,
+                                  xi[:, last_dim],
+                                  k)
+
+        # the rest of the dimensions have to be on a per point-in-xi basis
+        shape = (m, *self.values.shape[n:])
+        result = np.empty(shape, dtype=self.values.dtype)
+        for j in range(m):
+            # Main process: Apply 1D interpolate in each dimension
+            # sequentially, starting with the last dimension.
+            # These are then "folded" into the next dimension in-place.
+            folded_values = first_values[j, ...]
+            for i in range(last_dim-1, -1, -1):
+                # Interpolate for each 1D from the last dimensions.
+                # This collapses each 1D sequence into a scalar.
+                folded_values = _eval_func(self.grid[i],
+                                           folded_values,
+                                           xi[j, i],
+                                           k)
+            result[j, ...] = folded_values
+
+        return result
+
+    @staticmethod
+    def _do_spline_fit(x, y, pt, k):
+        local_interp = make_interp_spline(x, y, k=k, axis=0)
+        values = local_interp(pt)
+        return values
+
+    @staticmethod
+    def _do_pchip(x, y, pt, k):
+        local_interp = PchipInterpolator(x, y, axis=0)
+        values = local_interp(pt)
+        return values
+
+    def _find_indices(self, xi):
+        return find_indices(self.grid, xi)
+
+    def _find_out_of_bounds(self, xi):
+        # check for out of bounds xi
+        out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
+        # iterate through dimensions
+        for x, grid in zip(xi, self.grid):
+            out_of_bounds += x < grid[0]
+            out_of_bounds += x > grid[-1]
+        return out_of_bounds
+
+
+def interpn(points, values, xi, method="linear", bounds_error=True,
+            fill_value=np.nan):
+    """
+    Multidimensional interpolation on regular or rectilinear grids.
+
+    Strictly speaking, not all regular grids are supported - this function
+    works on *rectilinear* grids, that is, a rectangular grid with even or
+    uneven spacing.
+
+    Parameters
+    ----------
+    points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
+        The points defining the regular grid in n dimensions. The points in
+        each dimension (i.e. every elements of the points tuple) must be
+        strictly ascending or descending.
+
+    values : array_like, shape (m1, ..., mn, ...)
+        The data on the regular grid in n dimensions. Complex data can be
+        acceptable.
+
+    xi : ndarray of shape (..., ndim)
+        The coordinates to sample the gridded data at
+
+    method : str, optional
+        The method of interpolation to perform. Supported are "linear",
+        "nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
+        "splinef2d" is only supported for 2-dimensional data.
+
+    bounds_error : bool, optional
+        If True, when interpolated values are requested outside of the
+        domain of the input data, a ValueError is raised.
+        If False, then `fill_value` is used.
+
+    fill_value : number, optional
+        If provided, the value to use for points outside of the
+        interpolation domain. If None, values outside
+        the domain are extrapolated.  Extrapolation is not supported by method
+        "splinef2d".
+
+    Returns
+    -------
+    values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
+        Interpolated values at `xi`. See notes for behaviour when
+        ``xi.ndim == 1``.
+
+    Notes
+    -----
+
+    .. versionadded:: 0.14
+
+    In the case that ``xi.ndim == 1`` a new axis is inserted into
+    the 0 position of the returned array, values_x, so its shape is
+    instead ``(1,) + values.shape[ndim:]``.
+
+    If the input data is such that input dimensions have incommensurate
+    units and differ by many orders of magnitude, the interpolant may have
+    numerical artifacts. Consider rescaling the data before interpolation.
+
+    Examples
+    --------
+    Evaluate a simple example function on the points of a regular 3-D grid:
+
+    >>> import numpy as np
+    >>> from scipy.interpolate import interpn
+    >>> def value_func_3d(x, y, z):
+    ...     return 2 * x + 3 * y - z
+    >>> x = np.linspace(0, 4, 5)
+    >>> y = np.linspace(0, 5, 6)
+    >>> z = np.linspace(0, 6, 7)
+    >>> points = (x, y, z)
+    >>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
+
+    Evaluate the interpolating function at a point
+
+    >>> point = np.array([2.21, 3.12, 1.15])
+    >>> print(interpn(points, values, point))
+    [12.63]
+
+    See Also
+    --------
+    NearestNDInterpolator : Nearest neighbor interpolation on unstructured
+                            data in N dimensions
+
+    LinearNDInterpolator : Piecewise linear interpolant on unstructured data
+                           in N dimensions
+
+    RegularGridInterpolator : interpolation on a regular or rectilinear grid
+                              in arbitrary dimensions (`interpn` wraps this
+                              class).
+
+    RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
+
+    scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
+                                    (suitable for e.g., N-D image resampling)
+
+    """
+    # sanity check 'method' kwarg
+    if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
+                      "splinef2d", "slinear"]:
+        raise ValueError("interpn only understands the methods 'linear', "
+                         "'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
+                         f"and 'splinef2d'. You provided {method}.")
+
+    if not hasattr(values, 'ndim'):
+        values = np.asarray(values)
+
+    ndim = values.ndim
+    if ndim > 2 and method == "splinef2d":
+        raise ValueError("The method splinef2d can only be used for "
+                         "2-dimensional input data")
+    if not bounds_error and fill_value is None and method == "splinef2d":
+        raise ValueError("The method splinef2d does not support extrapolation.")
+
+    # sanity check consistency of input dimensions
+    if len(points) > ndim:
+        raise ValueError("There are %d point arrays, but values has %d "
+                         "dimensions" % (len(points), ndim))
+    if len(points) != ndim and method == 'splinef2d':
+        raise ValueError("The method splinef2d can only be used for "
+                         "scalar data with one point per coordinate")
+
+    grid, descending_dimensions = _check_points(points)
+    _check_dimensionality(grid, values)
+
+    # sanity check requested xi
+    xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
+    if xi.shape[-1] != len(grid):
+        raise ValueError("The requested sample points xi have dimension "
+                         "%d, but this RegularGridInterpolator has "
+                         "dimension %d" % (xi.shape[-1], len(grid)))
+
+    if bounds_error:
+        for i, p in enumerate(xi.T):
+            if not np.logical_and(np.all(grid[i][0] <= p),
+                                                np.all(p <= grid[i][-1])):
+                raise ValueError("One of the requested xi is out of bounds "
+                                "in dimension %d" % i)
+
+    # perform interpolation
+    if method in ["linear", "nearest", "slinear", "cubic", "quintic", "pchip"]:
+        interp = RegularGridInterpolator(points, values, method=method,
+                                         bounds_error=bounds_error,
+                                         fill_value=fill_value)
+        return interp(xi)
+    elif method == "splinef2d":
+        xi_shape = xi.shape
+        xi = xi.reshape(-1, xi.shape[-1])
+
+        # RectBivariateSpline doesn't support fill_value; we need to wrap here
+        idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
+                            grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
+                           axis=0)
+        result = np.empty_like(xi[:, 0])
+
+        # make a copy of values for RectBivariateSpline
+        interp = RectBivariateSpline(points[0], points[1], values[:])
+        result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
+        result[np.logical_not(idx_valid)] = fill_value
+
+        return result.reshape(xi_shape[:-1])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack.py
new file mode 100644
index 00000000..cf50218a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack.py
@@ -0,0 +1,40 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _fitpack_py
+
+
+__all__ = [  # noqa: F822
+    'BSpline',
+    'bisplev',
+    'bisplrep',
+    'dblint',
+    'insert',
+    'spalde',
+    'splantider',
+    'splder',
+    'splev',
+    'splint',
+    'splprep',
+    'splrep',
+    'sproot',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.interpolate.fitpack is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.interpolate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.interpolate` namespace, "
+                  "the `scipy.interpolate.fitpack` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_fitpack_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack2.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack2.py
new file mode 100644
index 00000000..7dcbb04e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/fitpack2.py
@@ -0,0 +1,46 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _fitpack2
+
+
+__all__ = [  # noqa: F822
+    'BivariateSpline',
+    'InterpolatedUnivariateSpline',
+    'LSQBivariateSpline',
+    'LSQSphereBivariateSpline',
+    'LSQUnivariateSpline',
+    'RectBivariateSpline',
+    'RectSphereBivariateSpline',
+    'SmoothBivariateSpline',
+    'SmoothSphereBivariateSpline',
+    'SphereBivariateSpline',
+    'UnivariateSpline',
+    'array',
+    'concatenate',
+    'dfitpack',
+    'dfitpack_int',
+    'diff',
+    'ones',
+    'ravel',
+    'zeros',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.interpolate.fitpack2 is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.interpolate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.interpolate` namespace, "
+                  "the `scipy.interpolate.fitpack2` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_fitpack2, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/interpolate.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/interpolate.py
new file mode 100644
index 00000000..b655eb1b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/interpolate.py
@@ -0,0 +1,52 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _interpolate
+
+
+__all__ = [  # noqa: F822
+    'BPoly',
+    'BSpline',
+    'NdPPoly',
+    'PPoly',
+    'RectBivariateSpline',
+    'RegularGridInterpolator',
+    'array',
+    'asarray',
+    'atleast_1d',
+    'atleast_2d',
+    'comb',
+    'dfitpack',
+    'interp1d',
+    'interp2d',
+    'interpn',
+    'intp',
+    'itertools',
+    'lagrange',
+    'make_interp_spline',
+    'poly1d',
+    'prod',
+    'ravel',
+    'searchsorted',
+    'spec',
+    'transpose',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.interpolate.interpolate is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.interpolate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.interpolate` namespace, "
+                  "the `scipy.interpolate.interpolate` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_interpolate, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/ndgriddata.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/ndgriddata.py
new file mode 100644
index 00000000..2e5f7a64
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/ndgriddata.py
@@ -0,0 +1,33 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _ndgriddata
+
+
+__all__ = [  # noqa: F822
+    'CloughTocher2DInterpolator',
+    'LinearNDInterpolator',
+    'NDInterpolatorBase',
+    'NearestNDInterpolator',
+    'cKDTree',
+    'griddata',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.interpolate.ndgriddata is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.interpolate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.interpolate` namespace, "
+                  "the `scipy.interpolate.ndgriddata` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_ndgriddata, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/polyint.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/polyint.py
new file mode 100644
index 00000000..141bb5d2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/polyint.py
@@ -0,0 +1,34 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _polyint
+
+
+__all__ = [  # noqa: F822
+    'BarycentricInterpolator',
+    'KroghInterpolator',
+    'approximate_taylor_polynomial',
+    'barycentric_interpolate',
+    'factorial',
+    'float_factorial',
+    'krogh_interpolate',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.interpolate.polyint is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.interpolate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.interpolate` namespace, "
+                  "the `scipy.interpolate.polyint` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_polyint, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/rbf.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/rbf.py
new file mode 100644
index 00000000..76cfde58
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/rbf.py
@@ -0,0 +1,33 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.interpolate` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _rbf
+
+
+__all__ = [  # noqa: F822
+    'Rbf',
+    'cdist',
+    'linalg',
+    'pdist',
+    'squareform',
+    'xlogy',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.interpolate.rbf is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.interpolate instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.interpolate` namespace, "
+                  "the `scipy.interpolate.rbf` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_rbf, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/bug-1310.npz b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/bug-1310.npz
new file mode 100644
index 00000000..8dc93c71
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/bug-1310.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/estimate_gradients_hang.npy b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/estimate_gradients_hang.npy
new file mode 100644
index 00000000..79e1b094
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/estimate_gradients_hang.npy differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/gcvspl.npz b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/gcvspl.npz
new file mode 100644
index 00000000..dfd0642f
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/data/gcvspl.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_bsplines.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_bsplines.py
new file mode 100644
index 00000000..578d2e24
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_bsplines.py
@@ -0,0 +1,1639 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose, assert_
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline,
+                               make_lsq_spline, _bspl, splev, splrep, splprep,
+                               splder, splantider, sproot, splint, insert,
+                               CubicSpline, make_smoothing_spline)
+import scipy.linalg as sl
+
+from scipy.interpolate._bsplines import (_not_a_knot, _augknt,
+                                        _woodbury_algorithm, _periodic_knots,
+                                         _make_interp_per_full_matr)
+import scipy.interpolate._fitpack_impl as _impl
+import os
+
+
+class TestBSpline:
+
+    def test_ctor(self):
+        # knots should be an ordered 1-D array of finite real numbers
+        assert_raises((TypeError, ValueError), BSpline,
+                **dict(t=[1, 1.j], c=[1.], k=0))
+        with np.errstate(invalid='ignore'):
+            assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
+        assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
+        assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
+        assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
+
+        # for n+k+1 knots and degree k need at least n coefficients
+        assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
+        assert_raises(ValueError, BSpline,
+                **dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
+
+        # non-integer orders
+        assert_raises(TypeError, BSpline,
+                **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
+        assert_raises(TypeError, BSpline,
+                **dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
+
+        # basic interval cannot have measure zero (here: [1..1])
+        assert_raises(ValueError, BSpline,
+                **dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
+
+        # tck vs self.tck
+        n, k = 11, 3
+        t = np.arange(n+k+1)
+        c = np.random.random(n)
+        b = BSpline(t, c, k)
+
+        assert_allclose(t, b.t)
+        assert_allclose(c, b.c)
+        assert_equal(k, b.k)
+
+    def test_tck(self):
+        b = _make_random_spline()
+        tck = b.tck
+
+        assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
+        assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
+        assert_equal(b.k, tck[2])
+
+        # b.tck is read-only
+        with pytest.raises(AttributeError):
+            b.tck = 'foo'
+
+    def test_degree_0(self):
+        xx = np.linspace(0, 1, 10)
+
+        b = BSpline(t=[0, 1], c=[3.], k=0)
+        assert_allclose(b(xx), 3)
+
+        b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
+        assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
+
+    def test_degree_1(self):
+        t = [0, 1, 2, 3, 4]
+        c = [1, 2, 3]
+        k = 1
+        b = BSpline(t, c, k)
+
+        x = np.linspace(1, 3, 50)
+        assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
+                        b(x), atol=1e-14)
+        assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
+
+    def test_bernstein(self):
+        # a special knot vector: Bernstein polynomials
+        k = 3
+        t = np.asarray([0]*(k+1) + [1]*(k+1))
+        c = np.asarray([1., 2., 3., 4.])
+        bp = BPoly(c.reshape(-1, 1), [0, 1])
+        bspl = BSpline(t, c, k)
+
+        xx = np.linspace(-1., 2., 10)
+        assert_allclose(bp(xx, extrapolate=True),
+                        bspl(xx, extrapolate=True), atol=1e-14)
+        assert_allclose(splev(xx, (t, c, k)),
+                        bspl(xx), atol=1e-14)
+
+    def test_rndm_naive_eval(self):
+        # test random coefficient spline *on the base interval*,
+        # t[k] <= x < t[-k-1]
+        b = _make_random_spline()
+        t, c, k = b.tck
+        xx = np.linspace(t[k], t[-k-1], 50)
+        y_b = b(xx)
+
+        y_n = [_naive_eval(x, t, c, k) for x in xx]
+        assert_allclose(y_b, y_n, atol=1e-14)
+
+        y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
+        assert_allclose(y_b, y_n2, atol=1e-14)
+
+    def test_rndm_splev(self):
+        b = _make_random_spline()
+        t, c, k = b.tck
+        xx = np.linspace(t[k], t[-k-1], 50)
+        assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+
+    def test_rndm_splrep(self):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(20))
+        y = np.random.random(20)
+
+        tck = splrep(x, y)
+        b = BSpline(*tck)
+
+        t, k = b.t, b.k
+        xx = np.linspace(t[k], t[-k-1], 80)
+        assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
+
+    def test_rndm_unity(self):
+        b = _make_random_spline()
+        b.c = np.ones_like(b.c)
+        xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)
+        assert_allclose(b(xx), 1.)
+
+    def test_vectorization(self):
+        n, k = 22, 3
+        t = np.sort(np.random.random(n))
+        c = np.random.random(size=(n, 6, 7))
+        b = BSpline(t, c, k)
+        tm, tp = t[k], t[-k-1]
+        xx = tm + (tp - tm) * np.random.random((3, 4, 5))
+        assert_equal(b(xx).shape, (3, 4, 5, 6, 7))
+
+    def test_len_c(self):
+        # for n+k+1 knots, only first n coefs are used.
+        # and BTW this is consistent with FITPACK
+        n, k = 33, 3
+        t = np.sort(np.random.random(n+k+1))
+        c = np.random.random(n)
+
+        # pad coefficients with random garbage
+        c_pad = np.r_[c, np.random.random(k+1)]
+
+        b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
+
+        dt = t[-1] - t[0]
+        xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
+        assert_allclose(b(xx), b_pad(xx), atol=1e-14)
+        assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
+        assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
+
+    def test_endpoints(self):
+        # base interval is closed
+        b = _make_random_spline()
+        t, _, k = b.tck
+        tm, tp = t[k], t[-k-1]
+        for extrap in (True, False):
+            assert_allclose(b([tm, tp], extrap),
+                            b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)
+
+    def test_continuity(self):
+        # assert continuity at internal knots
+        b = _make_random_spline()
+        t, _, k = b.tck
+        assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
+                atol=1e-9)
+
+    def test_extrap(self):
+        b = _make_random_spline()
+        t, c, k = b.tck
+        dt = t[-1] - t[0]
+        xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)
+        mask = (t[k] < xx) & (xx < t[-k-1])
+
+        # extrap has no effect within the base interval
+        assert_allclose(b(xx[mask], extrapolate=True),
+                        b(xx[mask], extrapolate=False))
+
+        # extrapolated values agree with FITPACK
+        assert_allclose(b(xx, extrapolate=True),
+                splev(xx, (t, c, k), ext=0))
+
+    def test_default_extrap(self):
+        # BSpline defaults to extrapolate=True
+        b = _make_random_spline()
+        t, _, k = b.tck
+        xx = [t[0] - 1, t[-1] + 1]
+        yy = b(xx)
+        assert_(not np.all(np.isnan(yy)))
+
+    def test_periodic_extrap(self):
+        np.random.seed(1234)
+        t = np.sort(np.random.random(8))
+        c = np.random.random(4)
+        k = 3
+        b = BSpline(t, c, k, extrapolate='periodic')
+        n = t.size - (k + 1)
+
+        dt = t[-1] - t[0]
+        xx = np.linspace(t[k] - dt, t[n] + dt, 50)
+        xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+        assert_allclose(b(xx), splev(xy, (t, c, k)))
+
+        # Direct check
+        xx = [-1, 0, 0.5, 1]
+        xy = t[k] + (xx - t[k]) % (t[n] - t[k])
+        assert_equal(b(xx, extrapolate='periodic'), b(xy, extrapolate=True))
+
+    def test_ppoly(self):
+        b = _make_random_spline()
+        t, c, k = b.tck
+        pp = PPoly.from_spline((t, c, k))
+
+        xx = np.linspace(t[k], t[-k], 100)
+        assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
+
+    def test_derivative_rndm(self):
+        b = _make_random_spline()
+        t, c, k = b.tck
+        xx = np.linspace(t[0], t[-1], 50)
+        xx = np.r_[xx, t]
+
+        for der in range(1, k+1):
+            yd = splev(xx, (t, c, k), der=der)
+            assert_allclose(yd, b(xx, nu=der), atol=1e-14)
+
+        # higher derivatives all vanish
+        assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)
+
+    def test_derivative_jumps(self):
+        # example from de Boor, Chap IX, example (24)
+        # NB: knots augmented & corresp coefs are zeroed out
+        # in agreement with the convention (29)
+        k = 2
+        t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
+        np.random.seed(1234)
+        c = np.r_[0, 0, np.random.random(5), 0, 0]
+        b = BSpline(t, c, k)
+
+        # b is continuous at x != 6 (triple knot)
+        x = np.asarray([1, 3, 4, 6])
+        assert_allclose(b(x[x != 6] - 1e-10),
+                        b(x[x != 6] + 1e-10))
+        assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))
+
+        # 1st derivative jumps at double knots, 1 & 6:
+        x0 = np.asarray([3, 4])
+        assert_allclose(b(x0 - 1e-10, nu=1),
+                        b(x0 + 1e-10, nu=1))
+        x1 = np.asarray([1, 6])
+        assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),
+                                       b(x1 + 1e-10, nu=1))))
+
+        # 2nd derivative is not guaranteed to be continuous either
+        assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),
+                                       b(x + 1e-10, nu=2))))
+
+    def test_basis_element_quadratic(self):
+        xx = np.linspace(-1, 4, 20)
+        b = BSpline.basis_element(t=[0, 1, 2, 3])
+        assert_allclose(b(xx),
+                        splev(xx, (b.t, b.c, b.k)), atol=1e-14)
+        assert_allclose(b(xx),
+                        B_0123(xx), atol=1e-14)
+
+        b = BSpline.basis_element(t=[0, 1, 1, 2])
+        xx = np.linspace(0, 2, 10)
+        assert_allclose(b(xx),
+                np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
+
+    def test_basis_element_rndm(self):
+        b = _make_random_spline()
+        t, c, k = b.tck
+        xx = np.linspace(t[k], t[-k-1], 20)
+        assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
+
+    def test_cmplx(self):
+        b = _make_random_spline()
+        t, c, k = b.tck
+        cc = c * (1. + 3.j)
+
+        b = BSpline(t, cc, k)
+        b_re = BSpline(t, b.c.real, k)
+        b_im = BSpline(t, b.c.imag, k)
+
+        xx = np.linspace(t[k], t[-k-1], 20)
+        assert_allclose(b(xx).real, b_re(xx), atol=1e-14)
+        assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)
+
+    def test_nan(self):
+        # nan in, nan out.
+        b = BSpline.basis_element([0, 1, 1, 2])
+        assert_(np.isnan(b(np.nan)))
+
+    def test_derivative_method(self):
+        b = _make_random_spline(k=5)
+        t, c, k = b.tck
+        b0 = BSpline(t, c, k)
+        xx = np.linspace(t[k], t[-k-1], 20)
+        for j in range(1, k):
+            b = b.derivative()
+            assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
+
+    def test_antiderivative_method(self):
+        b = _make_random_spline()
+        t, c, k = b.tck
+        xx = np.linspace(t[k], t[-k-1], 20)
+        assert_allclose(b.antiderivative().derivative()(xx),
+                        b(xx), atol=1e-14, rtol=1e-14)
+
+        # repeat with N-D array for c
+        c = np.c_[c, c, c]
+        c = np.dstack((c, c))
+        b = BSpline(t, c, k)
+        assert_allclose(b.antiderivative().derivative()(xx),
+                        b(xx), atol=1e-14, rtol=1e-14)
+
+    def test_integral(self):
+        b = BSpline.basis_element([0, 1, 2])  # x for x < 1 else 2 - x
+        assert_allclose(b.integrate(0, 1), 0.5)
+        assert_allclose(b.integrate(1, 0), -1 * 0.5)
+        assert_allclose(b.integrate(1, 0), -0.5)
+
+        # extrapolate or zeros outside of [0, 2]; default is yes
+        assert_allclose(b.integrate(-1, 1), 0)
+        assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
+        assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
+        assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5)
+
+        # Test ``_fitpack._splint()``
+        assert_allclose(b.integrate(1, -1, extrapolate=False),
+                        _impl.splint(1, -1, b.tck))
+
+        # Test ``extrapolate='periodic'``.
+        b.extrapolate = 'periodic'
+        i = b.antiderivative()
+        period_int = i(2) - i(0)
+
+        assert_allclose(b.integrate(0, 2), period_int)
+        assert_allclose(b.integrate(2, 0), -1 * period_int)
+        assert_allclose(b.integrate(-9, -7), period_int)
+        assert_allclose(b.integrate(-8, -4), 2 * period_int)
+
+        assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5))
+        assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5))
+        assert_allclose(b.integrate(1.5 + 12, 3 + 12),
+                        i(1) - i(0) + i(2) - i(1.5))
+        assert_allclose(b.integrate(1.5, 3 + 12),
+                        i(1) - i(0) + i(2) - i(1.5) + 6 * period_int)
+
+        assert_allclose(b.integrate(0, -1), i(0) - i(1))
+        assert_allclose(b.integrate(-9, -10), i(0) - i(1))
+        assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int)
+
+    def test_integrate_ppoly(self):
+        # test .integrate method to be consistent with PPoly.integrate
+        x = [0, 1, 2, 3, 4]
+        b = make_interp_spline(x, x)
+        b.extrapolate = 'periodic'
+        p = PPoly.from_spline(b)
+
+        for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]:
+            assert_allclose(b.integrate(x0, x1),
+                            p.integrate(x0, x1))
+
+    def test_subclassing(self):
+        # classmethods should not decay to the base class
+        class B(BSpline):
+            pass
+
+        b = B.basis_element([0, 1, 2, 2])
+        assert_equal(b.__class__, B)
+        assert_equal(b.derivative().__class__, B)
+        assert_equal(b.antiderivative().__class__, B)
+
+    @pytest.mark.parametrize('axis', range(-4, 4))
+    def test_axis(self, axis):
+        n, k = 22, 3
+        t = np.linspace(0, 1, n + k + 1)
+        sh = [6, 7, 8]
+        # We need the positive axis for some of the indexing and slices used
+        # in this test.
+        pos_axis = axis % 4
+        sh.insert(pos_axis, n)   # [22, 6, 7, 8] etc
+        c = np.random.random(size=sh)
+        b = BSpline(t, c, k, axis=axis)
+        assert_equal(b.c.shape,
+                     [sh[pos_axis],] + sh[:pos_axis] + sh[pos_axis+1:])
+
+        xp = np.random.random((3, 4, 5))
+        assert_equal(b(xp).shape,
+                     sh[:pos_axis] + list(xp.shape) + sh[pos_axis+1:])
+
+        # -c.ndim <= axis < c.ndim
+        for ax in [-c.ndim - 1, c.ndim]:
+            assert_raises(np.AxisError, BSpline,
+                          **dict(t=t, c=c, k=k, axis=ax))
+
+        # derivative, antiderivative keeps the axis
+        for b1 in [BSpline(t, c, k, axis=axis).derivative(),
+                   BSpline(t, c, k, axis=axis).derivative(2),
+                   BSpline(t, c, k, axis=axis).antiderivative(),
+                   BSpline(t, c, k, axis=axis).antiderivative(2)]:
+            assert_equal(b1.axis, b.axis)
+
+    def test_neg_axis(self):
+        k = 2
+        t = [0, 1, 2, 3, 4, 5, 6]
+        c = np.array([[-1, 2, 0, -1], [2, 0, -3, 1]])
+
+        spl = BSpline(t, c, k, axis=-1)
+        spl0 = BSpline(t, c[0], k)
+        spl1 = BSpline(t, c[1], k)
+        assert_equal(spl(2.5), [spl0(2.5), spl1(2.5)])
+
+    def test_design_matrix_bc_types(self):
+        '''
+        Splines with different boundary conditions are built on different
+        types of vectors of knots. As far as design matrix depends only on
+        vector of knots, `k` and `x` it is useful to make tests for different
+        boundary conditions (and as following different vectors of knots).
+        '''
+        def run_design_matrix_tests(n, k, bc_type):
+            '''
+            To avoid repetition of code the following function is provided.
+            '''
+            np.random.seed(1234)
+            x = np.sort(np.random.random_sample(n) * 40 - 20)
+            y = np.random.random_sample(n) * 40 - 20
+            if bc_type == "periodic":
+                y[0] = y[-1]
+
+            bspl = make_interp_spline(x, y, k=k, bc_type=bc_type)
+
+            c = np.eye(len(bspl.t) - k - 1)
+            des_matr_def = BSpline(bspl.t, c, k)(x)
+            des_matr_csr = BSpline.design_matrix(x,
+                                                 bspl.t,
+                                                 k).toarray()
+            assert_allclose(des_matr_csr @ bspl.c, y, atol=1e-14)
+            assert_allclose(des_matr_def, des_matr_csr, atol=1e-14)
+
+        # "clamped" and "natural" work only with `k = 3`
+        n = 11
+        k = 3
+        for bc in ["clamped", "natural"]:
+            run_design_matrix_tests(n, k, bc)
+
+        # "not-a-knot" works with odd `k`
+        for k in range(3, 8, 2):
+            run_design_matrix_tests(n, k, "not-a-knot")
+
+        # "periodic" works with any `k` (even more than `n`)
+        n = 5  # smaller `n` to test `k > n` case
+        for k in range(2, 7):
+            run_design_matrix_tests(n, k, "periodic")
+
+    @pytest.mark.parametrize('extrapolate', [False, True, 'periodic'])
+    @pytest.mark.parametrize('degree', range(5))
+    def test_design_matrix_same_as_BSpline_call(self, extrapolate, degree):
+        """Test that design_matrix(x) is equivalent to BSpline(..)(x)."""
+        np.random.seed(1234)
+        x = np.random.random_sample(10 * (degree + 1))
+        xmin, xmax = np.amin(x), np.amax(x)
+        k = degree
+        t = np.r_[np.linspace(xmin - 2, xmin - 1, degree),
+                  np.linspace(xmin, xmax, 2 * (degree + 1)),
+                  np.linspace(xmax + 1, xmax + 2, degree)]
+        c = np.eye(len(t) - k - 1)
+        bspline = BSpline(t, c, k, extrapolate)
+        assert_allclose(
+            bspline(x), BSpline.design_matrix(x, t, k, extrapolate).toarray()
+        )
+
+        # extrapolation regime
+        x = np.array([xmin - 10, xmin - 1, xmax + 1.5, xmax + 10])
+        if not extrapolate:
+            with pytest.raises(ValueError):
+                BSpline.design_matrix(x, t, k, extrapolate)
+        else:
+            assert_allclose(
+                bspline(x),
+                BSpline.design_matrix(x, t, k, extrapolate).toarray()
+            )
+
+    def test_design_matrix_x_shapes(self):
+        # test for different `x` shapes
+        np.random.seed(1234)
+        n = 10
+        k = 3
+        x = np.sort(np.random.random_sample(n) * 40 - 20)
+        y = np.random.random_sample(n) * 40 - 20
+
+        bspl = make_interp_spline(x, y, k=k)
+        for i in range(1, 4):
+            xc = x[:i]
+            yc = y[:i]
+            des_matr_csr = BSpline.design_matrix(xc,
+                                                 bspl.t,
+                                                 k).toarray()
+            assert_allclose(des_matr_csr @ bspl.c, yc, atol=1e-14)
+
+    def test_design_matrix_t_shapes(self):
+        # test for minimal possible `t` shape
+        t = [1., 1., 1., 2., 3., 4., 4., 4.]
+        des_matr = BSpline.design_matrix(2., t, 3).toarray()
+        assert_allclose(des_matr,
+                        [[0.25, 0.58333333, 0.16666667, 0.]],
+                        atol=1e-14)
+
+    def test_design_matrix_asserts(self):
+        np.random.seed(1234)
+        n = 10
+        k = 3
+        x = np.sort(np.random.random_sample(n) * 40 - 20)
+        y = np.random.random_sample(n) * 40 - 20
+        bspl = make_interp_spline(x, y, k=k)
+        # invalid vector of knots (should be a 1D non-descending array)
+        # here the actual vector of knots is reversed, so it is invalid
+        with assert_raises(ValueError):
+            BSpline.design_matrix(x, bspl.t[::-1], k)
+        k = 2
+        t = [0., 1., 2., 3., 4., 5.]
+        x = [1., 2., 3., 4.]
+        # out of bounds
+        with assert_raises(ValueError):
+            BSpline.design_matrix(x, t, k)
+
+    @pytest.mark.parametrize('bc_type', ['natural', 'clamped',
+                                         'periodic', 'not-a-knot'])
+    def test_from_power_basis(self, bc_type):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(20))
+        y = np.random.random(20)
+        if bc_type == 'periodic':
+            y[-1] = y[0]
+        cb = CubicSpline(x, y, bc_type=bc_type)
+        bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
+        xx = np.linspace(0, 1, 20)
+        assert_allclose(cb(xx), bspl(xx), atol=1e-15)
+        bspl_new = make_interp_spline(x, y, bc_type=bc_type)
+        assert_allclose(bspl.c, bspl_new.c, atol=1e-15)
+
+    @pytest.mark.parametrize('bc_type', ['natural', 'clamped',
+                                         'periodic', 'not-a-knot'])
+    def test_from_power_basis_complex(self, bc_type):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(20))
+        y = np.random.random(20) + np.random.random(20) * 1j
+        if bc_type == 'periodic':
+            y[-1] = y[0]
+        cb = CubicSpline(x, y, bc_type=bc_type)
+        bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
+        bspl_new_real = make_interp_spline(x, y.real, bc_type=bc_type)
+        bspl_new_imag = make_interp_spline(x, y.imag, bc_type=bc_type)
+        assert_equal(bspl.c.dtype, (bspl_new_real.c
+                                    + 1j * bspl_new_imag.c).dtype)
+        assert_allclose(bspl.c, bspl_new_real.c
+                        + 1j * bspl_new_imag.c, atol=1e-15)
+
+    def test_from_power_basis_exmp(self):
+        '''
+        For x = [0, 1, 2, 3, 4] and y = [1, 1, 1, 1, 1]
+        the coefficients of Cubic Spline in the power basis:
+
+        $[[0, 0, 0, 0, 0],\\$
+        $[0, 0, 0, 0, 0],\\$
+        $[0, 0, 0, 0, 0],\\$
+        $[1, 1, 1, 1, 1]]$
+
+        It could be shown explicitly that coefficients of the interpolating
+        function in B-spline basis are c = [1, 1, 1, 1, 1, 1, 1]
+        '''
+        x = np.array([0, 1, 2, 3, 4])
+        y = np.array([1, 1, 1, 1, 1])
+        bspl = BSpline.from_power_basis(CubicSpline(x, y, bc_type='natural'),
+                                        bc_type='natural')
+        assert_allclose(bspl.c, [1, 1, 1, 1, 1, 1, 1], atol=1e-15)
+
+
+def test_knots_multiplicity():
+    # Take a spline w/ random coefficients, throw in knots of varying
+    # multiplicity.
+
+    def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):
+        # check evaluations against FITPACK, incl extrapolations
+        t, c, k = b.tck
+        x = np.unique(t)
+        x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]
+        assert_allclose(splev(x, (t, c, k), der), b(x, der),
+                atol=atol, rtol=rtol, err_msg='der = %s  k = %s' % (der, b.k))
+
+    # test loop itself
+    # [the index `j` is for interpreting the traceback in case of a failure]
+    for k in [1, 2, 3, 4, 5]:
+        b = _make_random_spline(k=k)
+        for j, b1 in enumerate(_make_multiples(b)):
+            check_splev(b1, j)
+            for der in range(1, k+1):
+                check_splev(b1, j, der, 1e-12, 1e-12)
+
+
+### stolen from @pv, verbatim
+def _naive_B(x, k, i, t):
+    """
+    Naive way to compute B-spline basis functions. Useful only for testing!
+    computes B(x; t[i],..., t[i+k+1])
+    """
+    if k == 0:
+        return 1.0 if t[i] <= x < t[i+1] else 0.0
+    if t[i+k] == t[i]:
+        c1 = 0.0
+    else:
+        c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)
+    if t[i+k+1] == t[i+1]:
+        c2 = 0.0
+    else:
+        c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)
+    return (c1 + c2)
+
+
+### stolen from @pv, verbatim
+def _naive_eval(x, t, c, k):
+    """
+    Naive B-spline evaluation. Useful only for testing!
+    """
+    if x == t[k]:
+        i = k
+    else:
+        i = np.searchsorted(t, x) - 1
+    assert t[i] <= x <= t[i+1]
+    assert i >= k and i < len(t) - k
+    return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))
+
+
+def _naive_eval_2(x, t, c, k):
+    """Naive B-spline evaluation, another way."""
+    n = len(t) - (k+1)
+    assert n >= k+1
+    assert len(c) >= n
+    assert t[k] <= x <= t[n]
+    return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))
+
+
+def _sum_basis_elements(x, t, c, k):
+    n = len(t) - (k+1)
+    assert n >= k+1
+    assert len(c) >= n
+    s = 0.
+    for i in range(n):
+        b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
+        s += c[i] * np.nan_to_num(b)   # zero out out-of-bounds elements
+    return s
+
+
+def B_012(x):
+    """ A linear B-spline function B(x | 0, 1, 2)."""
+    x = np.atleast_1d(x)
+    return np.piecewise(x, [(x < 0) | (x > 2),
+                            (x >= 0) & (x < 1),
+                            (x >= 1) & (x <= 2)],
+                           [lambda x: 0., lambda x: x, lambda x: 2.-x])
+
+
+def B_0123(x, der=0):
+    """A quadratic B-spline function B(x | 0, 1, 2, 3)."""
+    x = np.atleast_1d(x)
+    conds = [x < 1, (x > 1) & (x < 2), x > 2]
+    if der == 0:
+        funcs = [lambda x: x*x/2.,
+                 lambda x: 3./4 - (x-3./2)**2,
+                 lambda x: (3.-x)**2 / 2]
+    elif der == 2:
+        funcs = [lambda x: 1.,
+                 lambda x: -2.,
+                 lambda x: 1.]
+    else:
+        raise ValueError('never be here: der=%s' % der)
+    pieces = np.piecewise(x, conds, funcs)
+    return pieces
+
+
+def _make_random_spline(n=35, k=3):
+    np.random.seed(123)
+    t = np.sort(np.random.random(n+k+1))
+    c = np.random.random(n)
+    return BSpline.construct_fast(t, c, k)
+
+
+def _make_multiples(b):
+    """Increase knot multiplicity."""
+    c, k = b.c, b.k
+
+    t1 = b.t.copy()
+    t1[17:19] = t1[17]
+    t1[22] = t1[21]
+    yield BSpline(t1, c, k)
+
+    t1 = b.t.copy()
+    t1[:k+1] = t1[0]
+    yield BSpline(t1, c, k)
+
+    t1 = b.t.copy()
+    t1[-k-1:] = t1[-1]
+    yield BSpline(t1, c, k)
+
+
+class TestInterop:
+    #
+    # Test that FITPACK-based spl* functions can deal with BSpline objects
+    #
+    def setup_method(self):
+        xx = np.linspace(0, 4.*np.pi, 41)
+        yy = np.cos(xx)
+        b = make_interp_spline(xx, yy)
+        self.tck = (b.t, b.c, b.k)
+        self.xx, self.yy, self.b = xx, yy, b
+
+        self.xnew = np.linspace(0, 4.*np.pi, 21)
+
+        c2 = np.c_[b.c, b.c, b.c]
+        self.c2 = np.dstack((c2, c2))
+        self.b2 = BSpline(b.t, self.c2, b.k)
+
+    def test_splev(self):
+        xnew, b, b2 = self.xnew, self.b, self.b2
+
+        # check that splev works with 1-D array of coefficients
+        # for array and scalar `x`
+        assert_allclose(splev(xnew, b),
+                        b(xnew), atol=1e-15, rtol=1e-15)
+        assert_allclose(splev(xnew, b.tck),
+                        b(xnew), atol=1e-15, rtol=1e-15)
+        assert_allclose([splev(x, b) for x in xnew],
+                        b(xnew), atol=1e-15, rtol=1e-15)
+
+        # With N-D coefficients, there's a quirck:
+        # splev(x, BSpline) is equivalent to BSpline(x)
+        with assert_raises(ValueError, match="Calling splev.. with BSpline"):
+            splev(xnew, b2)
+
+        # However, splev(x, BSpline.tck) needs some transposes. This is because
+        # BSpline interpolates along the first axis, while the legacy FITPACK
+        # wrapper does list(map(...)) which effectively interpolates along the
+        # last axis. Like so:
+        sh = tuple(range(1, b2.c.ndim)) + (0,)   # sh = (1, 2, 0)
+        cc = b2.c.transpose(sh)
+        tck = (b2.t, cc, b2.k)
+        assert_allclose(splev(xnew, tck),
+                        b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
+
+    def test_splrep(self):
+        x, y = self.xx, self.yy
+        # test that "new" splrep is equivalent to _impl.splrep
+        tck = splrep(x, y)
+        t, c, k = _impl.splrep(x, y)
+        assert_allclose(tck[0], t, atol=1e-15)
+        assert_allclose(tck[1], c, atol=1e-15)
+        assert_equal(tck[2], k)
+
+        # also cover the `full_output=True` branch
+        tck_f, _, _, _ = splrep(x, y, full_output=True)
+        assert_allclose(tck_f[0], t, atol=1e-15)
+        assert_allclose(tck_f[1], c, atol=1e-15)
+        assert_equal(tck_f[2], k)
+
+        # test that the result of splrep roundtrips with splev:
+        # evaluate the spline on the original `x` points
+        yy = splev(x, tck)
+        assert_allclose(y, yy, atol=1e-15)
+
+        # ... and also it roundtrips if wrapped in a BSpline
+        b = BSpline(*tck)
+        assert_allclose(y, b(x), atol=1e-15)
+
+    def test_splrep_errors(self):
+        # test that both "old" and "new" splrep raise for an N-D ``y`` array
+        # with n > 1
+        x, y = self.xx, self.yy
+        y2 = np.c_[y, y]
+        with assert_raises(ValueError):
+            splrep(x, y2)
+        with assert_raises(ValueError):
+            _impl.splrep(x, y2)
+
+        # input below minimum size
+        with assert_raises(TypeError, match="m > k must hold"):
+            splrep(x[:3], y[:3])
+        with assert_raises(TypeError, match="m > k must hold"):
+            _impl.splrep(x[:3], y[:3])
+
+    def test_splprep(self):
+        x = np.arange(15).reshape((3, 5))
+        b, u = splprep(x)
+        tck, u1 = _impl.splprep(x)
+
+        # test the roundtrip with splev for both "old" and "new" output
+        assert_allclose(u, u1, atol=1e-15)
+        assert_allclose(splev(u, b), x, atol=1e-15)
+        assert_allclose(splev(u, tck), x, atol=1e-15)
+
+        # cover the ``full_output=True`` branch
+        (b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)
+        assert_allclose(u, u_f, atol=1e-15)
+        assert_allclose(splev(u_f, b_f), x, atol=1e-15)
+
+    def test_splprep_errors(self):
+        # test that both "old" and "new" code paths raise for x.ndim > 2
+        x = np.arange(3*4*5).reshape((3, 4, 5))
+        with assert_raises(ValueError, match="too many values to unpack"):
+            splprep(x)
+        with assert_raises(ValueError, match="too many values to unpack"):
+            _impl.splprep(x)
+
+        # input below minimum size
+        x = np.linspace(0, 40, num=3)
+        with assert_raises(TypeError, match="m > k must hold"):
+            splprep([x])
+        with assert_raises(TypeError, match="m > k must hold"):
+            _impl.splprep([x])
+
+        # automatically calculated parameters are non-increasing
+        # see gh-7589
+        x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266]
+        with assert_raises(ValueError, match="Invalid inputs"):
+            splprep([x])
+        with assert_raises(ValueError, match="Invalid inputs"):
+            _impl.splprep([x])
+
+        # given non-increasing parameter values u
+        x = [1, 3, 2, 4]
+        u = [0, 0.3, 0.2, 1]
+        with assert_raises(ValueError, match="Invalid inputs"):
+            splprep(*[[x], None, u])
+
+    def test_sproot(self):
+        b, b2 = self.b, self.b2
+        roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi
+        # sproot accepts a BSpline obj w/ 1-D coef array
+        assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7)
+        assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)
+
+        # ... and deals with trailing dimensions if coef array is N-D
+        with assert_raises(ValueError, match="Calling sproot.. with BSpline"):
+            sproot(b2, mest=50)
+
+        # and legacy behavior is preserved for a tck tuple w/ N-D coef
+        c2r = b2.c.transpose(1, 2, 0)
+        rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))
+        assert_equal(rr.shape, (3, 2, 4))
+        assert_allclose(rr - roots, 0, atol=1e-12)
+
+    def test_splint(self):
+        # test that splint accepts BSpline objects
+        b, b2 = self.b, self.b2
+        assert_allclose(splint(0, 1, b),
+                        splint(0, 1, b.tck), atol=1e-14)
+        assert_allclose(splint(0, 1, b),
+                        b.integrate(0, 1), atol=1e-14)
+
+        # ... and deals with N-D arrays of coefficients
+        with assert_raises(ValueError, match="Calling splint.. with BSpline"):
+            splint(0, 1, b2)
+
+        # and the legacy behavior is preserved for a tck tuple w/ N-D coef
+        c2r = b2.c.transpose(1, 2, 0)
+        integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))
+        assert_equal(integr.shape, (3, 2))
+        assert_allclose(integr,
+                        splint(0, 1, b), atol=1e-14)
+
+    def test_splder(self):
+        for b in [self.b, self.b2]:
+            # pad the c array (FITPACK convention)
+            ct = len(b.t) - len(b.c)
+            if ct > 0:
+                b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+            for n in [1, 2, 3]:
+                bd = splder(b)
+                tck_d = _impl.splder((b.t, b.c, b.k))
+                assert_allclose(bd.t, tck_d[0], atol=1e-15)
+                assert_allclose(bd.c, tck_d[1], atol=1e-15)
+                assert_equal(bd.k, tck_d[2])
+                assert_(isinstance(bd, BSpline))
+                assert_(isinstance(tck_d, tuple))  # back-compat: tck in and out
+
+    def test_splantider(self):
+        for b in [self.b, self.b2]:
+            # pad the c array (FITPACK convention)
+            ct = len(b.t) - len(b.c)
+            if ct > 0:
+                b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
+
+            for n in [1, 2, 3]:
+                bd = splantider(b)
+                tck_d = _impl.splantider((b.t, b.c, b.k))
+                assert_allclose(bd.t, tck_d[0], atol=1e-15)
+                assert_allclose(bd.c, tck_d[1], atol=1e-15)
+                assert_equal(bd.k, tck_d[2])
+                assert_(isinstance(bd, BSpline))
+                assert_(isinstance(tck_d, tuple))  # back-compat: tck in and out
+
+    def test_insert(self):
+        b, b2, xx = self.b, self.b2, self.xx
+
+        j = b.t.size // 2
+        tn = 0.5*(b.t[j] + b.t[j+1])
+
+        bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
+        assert_allclose(splev(xx, bn),
+                        splev(xx, tck_n), atol=1e-15)
+        assert_(isinstance(bn, BSpline))
+        assert_(isinstance(tck_n, tuple))   # back-compat: tck in, tck out
+
+        # for N-D array of coefficients, BSpline.c needs to be transposed
+        # after that, the results are equivalent.
+        sh = tuple(range(b2.c.ndim))
+        c_ = b2.c.transpose(sh[1:] + (0,))
+        tck_n2 = insert(tn, (b2.t, c_, b2.k))
+
+        bn2 = insert(tn, b2)
+
+        # need a transpose for comparing the results, cf test_splev
+        assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
+                        bn2(xx), atol=1e-15)
+        assert_(isinstance(bn2, BSpline))
+        assert_(isinstance(tck_n2, tuple))   # back-compat: tck in, tck out
+
+
+class TestInterp:
+    #
+    # Test basic ways of constructing interpolating splines.
+    #
+    xx = np.linspace(0., 2.*np.pi)
+    yy = np.sin(xx)
+
+    def test_non_int_order(self):
+        with assert_raises(TypeError):
+            make_interp_spline(self.xx, self.yy, k=2.5)
+
+    def test_order_0(self):
+        b = make_interp_spline(self.xx, self.yy, k=0)
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        b = make_interp_spline(self.xx, self.yy, k=0, axis=-1)
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+    def test_linear(self):
+        b = make_interp_spline(self.xx, self.yy, k=1)
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        b = make_interp_spline(self.xx, self.yy, k=1, axis=-1)
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+    @pytest.mark.parametrize('k', [0, 1, 2, 3])
+    def test_incompatible_x_y(self, k):
+        x = [0, 1, 2, 3, 4, 5]
+        y = [0, 1, 2, 3, 4, 5, 6, 7]
+        with assert_raises(ValueError, match="Shapes of x"):
+            make_interp_spline(x, y, k=k)
+
+    @pytest.mark.parametrize('k', [0, 1, 2, 3])
+    def test_broken_x(self, k):
+        x = [0, 1, 1, 2, 3, 4]      # duplicates
+        y = [0, 1, 2, 3, 4, 5]
+        with assert_raises(ValueError, match="x to not have duplicates"):
+            make_interp_spline(x, y, k=k)
+
+        x = [0, 2, 1, 3, 4, 5]      # unsorted
+        with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
+            make_interp_spline(x, y, k=k)
+
+        x = [0, 1, 2, 3, 4, 5]
+        x = np.asarray(x).reshape((1, -1))     # 1D
+        with assert_raises(ValueError, match="Expect x to be a 1D strictly"):
+            make_interp_spline(x, y, k=k)
+
+    def test_not_a_knot(self):
+        for k in [3, 5]:
+            b = make_interp_spline(self.xx, self.yy, k)
+            assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+    def test_periodic(self):
+        # k = 5 here for more derivatives
+        b = make_interp_spline(self.xx, self.yy, k=5, bc_type='periodic')
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        # in periodic case it is expected equality of k-1 first
+        # derivatives at the boundaries
+        for i in range(1, 5):
+            assert_allclose(b(self.xx[0], nu=i), b(self.xx[-1], nu=i), atol=1e-11)
+        # tests for axis=-1
+        b = make_interp_spline(self.xx, self.yy, k=5, bc_type='periodic', axis=-1)
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        for i in range(1, 5):
+            assert_allclose(b(self.xx[0], nu=i), b(self.xx[-1], nu=i), atol=1e-11)
+
+    @pytest.mark.parametrize('k', [2, 3, 4, 5, 6, 7])
+    def test_periodic_random(self, k):
+        # tests for both cases (k > n and k <= n)
+        n = 5
+        np.random.seed(1234)
+        x = np.sort(np.random.random_sample(n) * 10)
+        y = np.random.random_sample(n) * 100
+        y[0] = y[-1]
+        b = make_interp_spline(x, y, k=k, bc_type='periodic')
+        assert_allclose(b(x), y, atol=1e-14)
+
+    def test_periodic_axis(self):
+        n = self.xx.shape[0]
+        np.random.seed(1234)
+        x = np.random.random_sample(n) * 2 * np.pi
+        x = np.sort(x)
+        x[0] = 0.
+        x[-1] = 2 * np.pi
+        y = np.zeros((2, n))
+        y[0] = np.sin(x)
+        y[1] = np.cos(x)
+        b = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
+        for i in range(n):
+            assert_allclose(b(x[i]), y[:, i], atol=1e-14)
+        assert_allclose(b(x[0]), b(x[-1]), atol=1e-14)
+
+    def test_periodic_points_exception(self):
+        # first and last points should match when periodic case expected
+        np.random.seed(1234)
+        k = 5
+        n = 8
+        x = np.sort(np.random.random_sample(n))
+        y = np.random.random_sample(n)
+        y[0] = y[-1] - 1  # to be sure that they are not equal
+        with assert_raises(ValueError):
+            make_interp_spline(x, y, k=k, bc_type='periodic')
+
+    def test_periodic_knots_exception(self):
+        # `periodic` case does not work with passed vector of knots
+        np.random.seed(1234)
+        k = 3
+        n = 7
+        x = np.sort(np.random.random_sample(n))
+        y = np.random.random_sample(n)
+        t = np.zeros(n + 2 * k)
+        with assert_raises(ValueError):
+            make_interp_spline(x, y, k, t, 'periodic')
+
+    @pytest.mark.parametrize('k', [2, 3, 4, 5])
+    def test_periodic_splev(self, k):
+        # comparision values of periodic b-spline with splev
+        b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
+        tck = splrep(self.xx, self.yy, per=True, k=k)
+        spl = splev(self.xx, tck)
+        assert_allclose(spl, b(self.xx), atol=1e-14)
+
+        # comparison derivatives of periodic b-spline with splev
+        for i in range(1, k):
+            spl = splev(self.xx, tck, der=i)
+            assert_allclose(spl, b(self.xx, nu=i), atol=1e-10)
+
+    def test_periodic_cubic(self):
+        # comparison values of cubic periodic b-spline with CubicSpline
+        b = make_interp_spline(self.xx, self.yy, k=3, bc_type='periodic')
+        cub = CubicSpline(self.xx, self.yy, bc_type='periodic')
+        assert_allclose(b(self.xx), cub(self.xx), atol=1e-14)
+
+        # edge case: Cubic interpolation on 3 points
+        n = 3
+        x = np.sort(np.random.random_sample(n) * 10)
+        y = np.random.random_sample(n) * 100
+        y[0] = y[-1]
+        b = make_interp_spline(x, y, k=3, bc_type='periodic')
+        cub = CubicSpline(x, y, bc_type='periodic')
+        assert_allclose(b(x), cub(x), atol=1e-14)
+
+    def test_periodic_full_matrix(self):
+        # comparison values of cubic periodic b-spline with
+        # solution of the system with full matrix
+        k = 3
+        b = make_interp_spline(self.xx, self.yy, k=k, bc_type='periodic')
+        t = _periodic_knots(self.xx, k)
+        c = _make_interp_per_full_matr(self.xx, self.yy, t, k)
+        b1 = np.vectorize(lambda x: _naive_eval(x, t, c, k))
+        assert_allclose(b(self.xx), b1(self.xx), atol=1e-14)
+
+    def test_quadratic_deriv(self):
+        der = [(1, 8.)]  # order, value: f'(x) = 8.
+
+        # derivative at right-hand edge
+        b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der))
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+        # derivative at left-hand edge
+        b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None))
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14)
+
+    def test_cubic_deriv(self):
+        k = 3
+
+        # first derivatives at left & right edges:
+        der_l, der_r = [(1, 3.)], [(1, 4.)]
+        b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)],
+                        [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+        # 'natural' cubic spline, zero out 2nd derivatives at the boundaries
+        der_l, der_r = [(2, 0)], [(2, 0)]
+        b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+    def test_quintic_derivs(self):
+        k, n = 5, 7
+        x = np.arange(n).astype(np.float_)
+        y = np.sin(x)
+        der_l = [(1, -12.), (2, 1)]
+        der_r = [(1, 8.), (2, 3.)]
+        b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))
+        assert_allclose(b(x), y, atol=1e-14, rtol=1e-14)
+        assert_allclose([b(x[0], 1), b(x[0], 2)],
+                        [val for (nu, val) in der_l])
+        assert_allclose([b(x[-1], 1), b(x[-1], 2)],
+                        [val for (nu, val) in der_r])
+
+    @pytest.mark.xfail(reason='unstable')
+    def test_cubic_deriv_unstable(self):
+        # 1st and 2nd derivative at x[0], no derivative information at x[-1]
+        # The problem is not that it fails [who would use this anyway],
+        # the problem is that it fails *silently*, and I've no idea
+        # how to detect this sort of instability.
+        # In this particular case: it's OK for len(t) < 20, goes haywire
+        # at larger `len(t)`.
+        k = 3
+        t = _augknt(self.xx, k)
+
+        der_l = [(1, 3.), (2, 4.)]
+        b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+
+    def test_knots_not_data_sites(self):
+        # Knots need not coincide with the data sites.
+        # use a quadratic spline, knots are at data averages,
+        # two additional constraints are zero 2nd derivatives at edges
+        k = 2
+        t = np.r_[(self.xx[0],)*(k+1),
+                  (self.xx[1:] + self.xx[:-1]) / 2.,
+                  (self.xx[-1],)*(k+1)]
+        b = make_interp_spline(self.xx, self.yy, k, t,
+                               bc_type=([(2, 0)], [(2, 0)]))
+
+        assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
+        assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.],
+                atol=1e-14)
+
+    def test_minimum_points_and_deriv(self):
+        # interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and
+        # f'(0) = 0, f'(1) = 3.
+        k = 3
+        x = [0., 1.]
+        y = [0., 1.]
+        b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))
+
+        xx = np.linspace(0., 1.)
+        yy = xx**3
+        assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+    def test_deriv_spec(self):
+        # If one of the derivatives is omitted, the spline definition is
+        # incomplete.
+        x = y = [1.0, 2, 3, 4, 5, 6]
+
+        with assert_raises(ValueError):
+            make_interp_spline(x, y, bc_type=([(1, 0.)], None))
+
+        with assert_raises(ValueError):
+            make_interp_spline(x, y, bc_type=(1, 0.))
+
+        with assert_raises(ValueError):
+            make_interp_spline(x, y, bc_type=[(1, 0.)])
+
+        with assert_raises(ValueError):
+            make_interp_spline(x, y, bc_type=42)
+
+        # CubicSpline expects`bc_type=(left_pair, right_pair)`, while
+        # here we expect `bc_type=(iterable, iterable)`.
+        l, r = (1, 0.0), (1, 0.0)
+        with assert_raises(ValueError):
+            make_interp_spline(x, y, bc_type=(l, r))
+
+    def test_complex(self):
+        k = 3
+        xx = self.xx
+        yy = self.yy + 1.j*self.yy
+
+        # first derivatives at left & right edges:
+        der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]
+        b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
+        assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+        assert_allclose([b(xx[0], 1), b(xx[-1], 1)],
+                        [der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
+
+        # also test zero and first order
+        for k in (0, 1):
+            b = make_interp_spline(xx, yy, k=k)
+            assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
+
+    def test_int_xy(self):
+        x = np.arange(10).astype(np.int_)
+        y = np.arange(10).astype(np.int_)
+
+        # Cython chokes on "buffer type mismatch" (construction) or
+        # "no matching signature found" (evaluation)
+        for k in (0, 1, 2, 3):
+            b = make_interp_spline(x, y, k=k)
+            b(x)
+
+    def test_sliced_input(self):
+        # Cython code chokes on non C contiguous arrays
+        xx = np.linspace(-1, 1, 100)
+
+        x = xx[::5]
+        y = xx[::5]
+
+        for k in (0, 1, 2, 3):
+            make_interp_spline(x, y, k=k)
+
+    def test_check_finite(self):
+        # check_finite defaults to True; nans and such trigger a ValueError
+        x = np.arange(10).astype(float)
+        y = x**2
+
+        for z in [np.nan, np.inf, -np.inf]:
+            y[-1] = z
+            assert_raises(ValueError, make_interp_spline, x, y)
+
+    @pytest.mark.parametrize('k', [1, 2, 3, 5])
+    def test_list_input(self, k):
+        # regression test for gh-8714: TypeError for x, y being lists and k=2
+        x = list(range(10))
+        y = [a**2 for a in x]
+        make_interp_spline(x, y, k=k)
+
+    def test_multiple_rhs(self):
+        yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+        der_l = [(1, [1., 2.])]
+        der_r = [(1, [3., 4.])]
+
+        b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+        assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14)
+        assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14)
+        assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14)
+
+    def test_shapes(self):
+        np.random.seed(1234)
+        k, n = 3, 22
+        x = np.sort(np.random.random(size=n))
+        y = np.random.random(size=(n, 5, 6, 7))
+
+        b = make_interp_spline(x, y, k)
+        assert_equal(b.c.shape, (n, 5, 6, 7))
+
+        # now throw in some derivatives
+        d_l = [(1, np.random.random((5, 6, 7)))]
+        d_r = [(1, np.random.random((5, 6, 7)))]
+        b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+        assert_equal(b.c.shape, (n + k - 1, 5, 6, 7))
+
+    def test_string_aliases(self):
+        yy = np.sin(self.xx)
+
+        # a single string is duplicated
+        b1 = make_interp_spline(self.xx, yy, k=3, bc_type='natural')
+        b2 = make_interp_spline(self.xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)]))
+        assert_allclose(b1.c, b2.c, atol=1e-15)
+
+        # two strings are handled
+        b1 = make_interp_spline(self.xx, yy, k=3,
+                                bc_type=('natural', 'clamped'))
+        b2 = make_interp_spline(self.xx, yy, k=3,
+                                bc_type=([(2, 0)], [(1, 0)]))
+        assert_allclose(b1.c, b2.c, atol=1e-15)
+
+        # one-sided BCs are OK
+        b1 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, 'clamped'))
+        b2 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, [(1, 0.0)]))
+        assert_allclose(b1.c, b2.c, atol=1e-15)
+
+        # 'not-a-knot' is equivalent to None
+        b1 = make_interp_spline(self.xx, yy, k=3, bc_type='not-a-knot')
+        b2 = make_interp_spline(self.xx, yy, k=3, bc_type=None)
+        assert_allclose(b1.c, b2.c, atol=1e-15)
+
+        # unknown strings do not pass
+        with assert_raises(ValueError):
+            make_interp_spline(self.xx, yy, k=3, bc_type='typo')
+
+        # string aliases are handled for 2D values
+        yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
+        der_l = [(1, [0., 0.])]
+        der_r = [(2, [0., 0.])]
+        b2 = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
+        b1 = make_interp_spline(self.xx, yy, k=3,
+                                bc_type=('clamped', 'natural'))
+        assert_allclose(b1.c, b2.c, atol=1e-15)
+
+        # ... and for N-D values:
+        np.random.seed(1234)
+        k, n = 3, 22
+        x = np.sort(np.random.random(size=n))
+        y = np.random.random(size=(n, 5, 6, 7))
+
+        # now throw in some derivatives
+        d_l = [(1, np.zeros((5, 6, 7)))]
+        d_r = [(1, np.zeros((5, 6, 7)))]
+        b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
+        b2 = make_interp_spline(x, y, k, bc_type='clamped')
+        assert_allclose(b1.c, b2.c, atol=1e-15)
+
+    def test_full_matrix(self):
+        np.random.seed(1234)
+        k, n = 3, 7
+        x = np.sort(np.random.random(size=n))
+        y = np.random.random(size=n)
+        t = _not_a_knot(x, k)
+
+        b = make_interp_spline(x, y, k, t)
+        cf = make_interp_full_matr(x, y, t, k)
+        assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14)
+
+    def test_woodbury(self):
+        '''
+        Random elements in diagonal matrix with blocks in the
+        left lower and right upper corners checking the
+        implementation of Woodbury algorithm.
+        '''
+        np.random.seed(1234)
+        n = 201
+        for k in range(3, 32, 2):
+            offset = int((k - 1) / 2)
+            a = np.diagflat(np.random.random((1, n)))
+            for i in range(1, offset + 1):
+                a[:-i, i:] += np.diagflat(np.random.random((1, n - i)))
+                a[i:, :-i] += np.diagflat(np.random.random((1, n - i)))
+            ur = np.random.random((offset, offset))
+            a[:offset, -offset:] = ur
+            ll = np.random.random((offset, offset))
+            a[-offset:, :offset] = ll
+            d = np.zeros((k, n))
+            for i, j in enumerate(range(offset, -offset - 1, -1)):
+                if j < 0:
+                    d[i, :j] = np.diagonal(a, offset=j)
+                else:
+                    d[i, j:] = np.diagonal(a, offset=j)
+            b = np.random.random(n)
+            assert_allclose(_woodbury_algorithm(d, ur, ll, b, k),
+                            np.linalg.solve(a, b), atol=1e-14)
+
+
+def make_interp_full_matr(x, y, t, k):
+    """Assemble an spline order k with knots t to interpolate
+    y(x) using full matrices.
+    Not-a-knot BC only.
+
+    This routine is here for testing only (even though it's functional).
+    """
+    assert x.size == y.size
+    assert t.size == x.size + k + 1
+    n = x.size
+
+    A = np.zeros((n, n), dtype=np.float_)
+
+    for j in range(n):
+        xval = x[j]
+        if xval == t[k]:
+            left = k
+        else:
+            left = np.searchsorted(t, xval) - 1
+
+        # fill a row
+        bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+        A[j, left-k:left+1] = bb
+
+    c = sl.solve(A, y)
+    return c
+
+
+def make_lsq_full_matrix(x, y, t, k=3):
+    """Make the least-square spline, full matrices."""
+    x, y, t = map(np.asarray, (x, y, t))
+    m = x.size
+    n = t.size - k - 1
+
+    A = np.zeros((m, n), dtype=np.float_)
+
+    for j in range(m):
+        xval = x[j]
+        # find interval
+        if xval == t[k]:
+            left = k
+        else:
+            left = np.searchsorted(t, xval) - 1
+
+        # fill a row
+        bb = _bspl.evaluate_all_bspl(t, k, xval, left)
+        A[j, left-k:left+1] = bb
+
+    # have observation matrix, can solve the LSQ problem
+    B = np.dot(A.T, A)
+    Y = np.dot(A.T, y)
+    c = sl.solve(B, Y)
+
+    return c, (A, Y)
+
+
+class TestLSQ:
+    #
+    # Test make_lsq_spline
+    #
+    np.random.seed(1234)
+    n, k = 13, 3
+    x = np.sort(np.random.random(n))
+    y = np.random.random(n)
+    t = _augknt(np.linspace(x[0], x[-1], 7), k)
+
+    def test_lstsq(self):
+        # check LSQ construction vs a full matrix version
+        x, y, t, k = self.x, self.y, self.t, self.k
+
+        c0, AY = make_lsq_full_matrix(x, y, t, k)
+        b = make_lsq_spline(x, y, t, k)
+
+        assert_allclose(b.c, c0)
+        assert_equal(b.c.shape, (t.size - k - 1,))
+
+        # also check against numpy.lstsq
+        aa, yy = AY
+        c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)
+        assert_allclose(b.c, c1)
+
+    def test_weights(self):
+        # weights = 1 is same as None
+        x, y, t, k = self.x, self.y, self.t, self.k
+        w = np.ones_like(x)
+
+        b = make_lsq_spline(x, y, t, k)
+        b_w = make_lsq_spline(x, y, t, k, w=w)
+
+        assert_allclose(b.t, b_w.t, atol=1e-14)
+        assert_allclose(b.c, b_w.c, atol=1e-14)
+        assert_equal(b.k, b_w.k)
+
+    def test_multiple_rhs(self):
+        x, t, k, n = self.x, self.t, self.k, self.n
+        y = np.random.random(size=(n, 5, 6, 7))
+
+        b = make_lsq_spline(x, y, t, k)
+        assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7))
+
+    def test_complex(self):
+        # cmplx-valued `y`
+        x, t, k = self.x, self.t, self.k
+        yc = self.y * (1. + 2.j)
+
+        b = make_lsq_spline(x, yc, t, k)
+        b_re = make_lsq_spline(x, yc.real, t, k)
+        b_im = make_lsq_spline(x, yc.imag, t, k)
+
+        assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
+
+    def test_int_xy(self):
+        x = np.arange(10).astype(np.int_)
+        y = np.arange(10).astype(np.int_)
+        t = _augknt(x, k=1)
+        # Cython chokes on "buffer type mismatch"
+        make_lsq_spline(x, y, t, k=1)
+
+    def test_sliced_input(self):
+        # Cython code chokes on non C contiguous arrays
+        xx = np.linspace(-1, 1, 100)
+
+        x = xx[::3]
+        y = xx[::3]
+        t = _augknt(x, 1)
+        make_lsq_spline(x, y, t, k=1)
+
+    def test_checkfinite(self):
+        # check_finite defaults to True; nans and such trigger a ValueError
+        x = np.arange(12).astype(float)
+        y = x**2
+        t = _augknt(x, 3)
+
+        for z in [np.nan, np.inf, -np.inf]:
+            y[-1] = z
+            assert_raises(ValueError, make_lsq_spline, x, y, t)
+
+
+def data_file(basename):
+    return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+                        'data', basename)
+
+
+class TestSmoothingSpline:
+    #
+    # test make_smoothing_spline
+    #
+    def test_invalid_input(self):
+        np.random.seed(1234)
+        n = 100
+        x = np.sort(np.random.random_sample(n) * 4 - 2)
+        y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+        # ``x`` and ``y`` should have same shapes (1-D array)
+        with assert_raises(ValueError):
+            make_smoothing_spline(x, y[1:])
+        with assert_raises(ValueError):
+            make_smoothing_spline(x[1:], y)
+        with assert_raises(ValueError):
+            make_smoothing_spline(x.reshape(1, n), y)
+
+        # ``x`` should be an ascending array
+        with assert_raises(ValueError):
+            make_smoothing_spline(x[::-1], y)
+
+        x_dupl = np.copy(x)
+        x_dupl[0] = x_dupl[1]
+
+        with assert_raises(ValueError):
+            make_smoothing_spline(x_dupl, y)
+
+    def test_compare_with_GCVSPL(self):
+        """
+        Data is generated in the following way:
+        >>> np.random.seed(1234)
+        >>> n = 100
+        >>> x = np.sort(np.random.random_sample(n) * 4 - 2)
+        >>> y = np.sin(x) + np.random.normal(scale=.5, size=n)
+        >>> np.savetxt('x.csv', x)
+        >>> np.savetxt('y.csv', y)
+
+        We obtain the result of performing the GCV smoothing splines
+        package (by Woltring, gcvspl) on the sample data points
+        using its version for Octave (https://github.com/srkuberski/gcvspl).
+        In order to use this implementation, one should clone the repository
+        and open the folder in Octave.
+        In Octave, we load up ``x`` and ``y`` (generated from Python code
+        above):
+
+        >>> x = csvread('x.csv');
+        >>> y = csvread('y.csv');
+
+        Then, in order to access the implementation, we compile gcvspl files in
+        Octave:
+
+        >>> mex gcvsplmex.c gcvspl.c
+        >>> mex spldermex.c gcvspl.c
+
+        The first function computes the vector of unknowns from the dataset
+        (x, y) while the second one evaluates the spline in certain points
+        with known vector of coefficients.
+
+        >>> c = gcvsplmex( x, y, 2 );
+        >>> y0 = spldermex( x, c, 2, x, 0 );
+
+        If we want to compare the results of the gcvspl code, we can save
+        ``y0`` in csv file:
+
+        >>> csvwrite('y0.csv', y0);
+
+        """
+        # load the data sample
+        data = np.load(data_file('gcvspl.npz'))
+        # data points
+        x = data['x']
+        y = data['y']
+
+        y_GCVSPL = data['y_GCVSPL']
+        y_compr = make_smoothing_spline(x, y)(x)
+
+        # such tolerance is explained by the fact that the spline is built
+        # using an iterative algorithm for minimizing the GCV criteria. These
+        # algorithms may vary, so the tolerance should be rather low.
+        assert_allclose(y_compr, y_GCVSPL, atol=1e-4, rtol=1e-4)
+
+    def test_non_regularized_case(self):
+        """
+        In case the regularization parameter is 0, the resulting spline
+        is an interpolation spline with natural boundary conditions.
+        """
+        # create data sample
+        np.random.seed(1234)
+        n = 100
+        x = np.sort(np.random.random_sample(n) * 4 - 2)
+        y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+        spline_GCV = make_smoothing_spline(x, y, lam=0.)
+        spline_interp = make_interp_spline(x, y, 3, bc_type='natural')
+
+        grid = np.linspace(x[0], x[-1], 2 * n)
+        assert_allclose(spline_GCV(grid),
+                        spline_interp(grid),
+                        atol=1e-15)
+
+    def test_weighted_smoothing_spline(self):
+        # create data sample
+        np.random.seed(1234)
+        n = 100
+        x = np.sort(np.random.random_sample(n) * 4 - 2)
+        y = x**2 * np.sin(4 * x) + x**3 + np.random.normal(0., 1.5, n)
+
+        spl = make_smoothing_spline(x, y)
+
+        # in order not to iterate over all of the indices, we select 10 of
+        # them randomly
+        for ind in np.random.choice(range(100), size=10):
+            w = np.ones(n)
+            w[ind] = 30.
+            spl_w = make_smoothing_spline(x, y, w)
+            # check that spline with weight in a certain point is closer to the
+            # original point than the one without weights
+            orig = abs(spl(x[ind]) - y[ind])
+            weighted = abs(spl_w(x[ind]) - y[ind])
+
+            if orig < weighted:
+                raise ValueError(f'Spline with weights should be closer to the'
+                                 f' points than the original one: {orig:.4} < '
+                                 f'{weighted:.4}')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack.py
new file mode 100644
index 00000000..fcd5e561
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack.py
@@ -0,0 +1,462 @@
+import itertools
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_,
+                           assert_almost_equal, assert_array_almost_equal)
+from pytest import raises as assert_raises
+import pytest
+from scipy._lib._testutils import check_free_memory
+
+from scipy.interpolate import RectBivariateSpline
+
+from scipy.interpolate._fitpack_py import (splrep, splev, bisplrep, bisplev,
+     sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
+from scipy.interpolate.dfitpack import regrid_smth
+from scipy.interpolate._fitpack2 import dfitpack_int
+
+
+def data_file(basename):
+    return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+                        'data', basename)
+
+
+def norm2(x):
+    return np.sqrt(np.dot(x.T, x))
+
+
+def f1(x, d=0):
+    """Derivatives of sin->cos->-sin->-cos."""
+    if d % 4 == 0:
+        return np.sin(x)
+    if d % 4 == 1:
+        return np.cos(x)
+    if d % 4 == 2:
+        return -np.sin(x)
+    if d % 4 == 3:
+        return -np.cos(x)
+
+
+def makepairs(x, y):
+    """Helper function to create an array of pairs of x and y."""
+    xy = np.array(list(itertools.product(np.asarray(x), np.asarray(y))))
+    return xy.T
+
+
+class TestSmokeTests:
+    """
+    Smoke tests (with a few asserts) for fitpack routines -- mostly
+    check that they are runnable
+    """
+    def check_1(self, per=0, s=0, a=0, b=2*np.pi, at_nodes=False,
+                xb=None, xe=None):
+        if xb is None:
+            xb = a
+        if xe is None:
+            xe = b
+
+        N = 20
+        # nodes and middle points of the nodes
+        x = np.linspace(a, b, N + 1)
+        x1 = a + (b - a) * np.arange(1, N, dtype=float) / float(N - 1)
+        v = f1(x)
+
+        def err_est(k, d):
+            # Assume f has all derivatives < 1
+            h = 1.0 / N
+            tol = 5 * h**(.75*(k-d))
+            if s > 0:
+                tol += 1e5*s
+            return tol
+
+        for k in range(1, 6):
+            tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
+            tt = tck[0][k:-k] if at_nodes else x1
+
+            nd = []
+            for d in range(k+1):
+                tol = err_est(k, d)
+                err = norm2(f1(tt, d) - splev(tt, tck, d)) / norm2(f1(tt, d))
+                assert err < tol
+
+    def check_2(self, per=0, N=20, ia=0, ib=2*np.pi):
+        a, b, dx = 0, 2*np.pi, 0.2*np.pi
+        x = np.linspace(a, b, N+1)    # nodes
+        v = np.sin(x)
+
+        def err_est(k, d):
+            # Assume f has all derivatives < 1
+            h = 1.0 / N
+            tol = 5 * h**(.75*(k-d))
+            return tol
+
+        nk = []
+        for k in range(1, 6):
+            tck = splrep(x, v, s=0, per=per, k=k, xe=b)
+            nk.append([splint(ia, ib, tck), spalde(dx, tck)])
+
+        k = 1
+        for r in nk:
+            d = 0
+            for dr in r[1]:
+                tol = err_est(k, d)
+                assert_allclose(dr, f1(dx, d), atol=0, rtol=tol)
+                d = d+1
+            k = k+1
+
+    def test_smoke_splrep_splev(self):
+        self.check_1(s=1e-6)
+        self.check_1(b=1.5*np.pi)
+        self.check_1(b=1.5*np.pi, xe=2*np.pi, per=1, s=1e-1)
+
+    @pytest.mark.parametrize('per', [0, 1])
+    @pytest.mark.parametrize('at_nodes', [True, False])
+    def test_smoke_splrep_splev_2(self, per, at_nodes):
+        self.check_1(per=per, at_nodes=at_nodes)
+
+    @pytest.mark.parametrize('N', [20, 50])
+    @pytest.mark.parametrize('per', [0, 1])
+    def test_smoke_splint_spalde(self, N, per):
+        self.check_2(per=per, N=N)
+
+    @pytest.mark.parametrize('N', [20, 50])
+    @pytest.mark.parametrize('per', [0, 1])
+    def test_smoke_splint_spalde_iaib(self, N, per):
+        self.check_2(ia=0.2*np.pi, ib=np.pi, N=N, per=per)
+
+    def test_smoke_sproot(self):
+        # sproot is only implemented for k=3
+        a, b = 0.1, 15
+        x = np.linspace(a, b, 20)
+        v = np.sin(x)
+
+        for k in [1, 2, 4, 5]:
+            tck = splrep(x, v, s=0, per=0, k=k, xe=b)
+            with assert_raises(ValueError):
+                sproot(tck)
+
+        k = 3
+        tck = splrep(x, v, s=0, k=3)
+        roots = sproot(tck)
+        assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
+        assert_allclose(roots, np.pi * np.array([1, 2, 3, 4]), rtol=1e-3)
+
+    @pytest.mark.parametrize('N', [20, 50])
+    @pytest.mark.parametrize('k', [1, 2, 3, 4, 5])
+    def test_smoke_splprep_splrep_splev(self, N, k):
+        a, b, dx = 0, 2.*np.pi, 0.2*np.pi
+        x = np.linspace(a, b, N+1)    # nodes
+        v = np.sin(x)
+
+        tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
+        uv = splev(dx, tckp)
+        err1 = abs(uv[1] - np.sin(uv[0]))
+        assert err1 < 1e-2
+
+        tck = splrep(x, v, s=0, per=0, k=k)
+        err2 = abs(splev(uv[0], tck) - np.sin(uv[0]))
+        assert err2 < 1e-2
+
+        # Derivatives of parametric cubic spline at u (first function)
+        if k == 3:
+            tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
+            for d in range(1, k+1):
+                uv = splev(dx, tckp, d)
+
+    def test_smoke_bisplrep_bisplev(self):
+        xb, xe = 0, 2.*np.pi
+        yb, ye = 0, 2.*np.pi
+        kx, ky = 3, 3
+        Nx, Ny = 20, 20
+
+        def f2(x, y):
+            return np.sin(x+y)
+
+        x = np.linspace(xb, xe, Nx + 1)
+        y = np.linspace(yb, ye, Ny + 1)
+        xy = makepairs(x, y)
+        tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
+
+        tt = [tck[0][kx:-kx], tck[1][ky:-ky]]
+        t2 = makepairs(tt[0], tt[1])
+        v1 = bisplev(tt[0], tt[1], tck)
+        v2 = f2(t2[0], t2[1])
+        v2.shape = len(tt[0]), len(tt[1])
+
+        assert norm2(np.ravel(v1 - v2)) < 1e-2
+
+
+class TestSplev:
+    def test_1d_shape(self):
+        x = [1,2,3,4,5]
+        y = [4,5,6,7,8]
+        tck = splrep(x, y)
+        z = splev([1], tck)
+        assert_equal(z.shape, (1,))
+        z = splev(1, tck)
+        assert_equal(z.shape, ())
+
+    def test_2d_shape(self):
+        x = [1, 2, 3, 4, 5]
+        y = [4, 5, 6, 7, 8]
+        tck = splrep(x, y)
+        t = np.array([[1.0, 1.5, 2.0, 2.5],
+                      [3.0, 3.5, 4.0, 4.5]])
+        z = splev(t, tck)
+        z0 = splev(t[0], tck)
+        z1 = splev(t[1], tck)
+        assert_equal(z, np.row_stack((z0, z1)))
+
+    def test_extrapolation_modes(self):
+        # test extrapolation modes
+        #    * if ext=0, return the extrapolated value.
+        #    * if ext=1, return 0
+        #    * if ext=2, raise a ValueError
+        #    * if ext=3, return the boundary value.
+        x = [1,2,3]
+        y = [0,2,4]
+        tck = splrep(x, y, k=1)
+
+        rstl = [[-2, 6], [0, 0], None, [0, 4]]
+        for ext in (0, 1, 3):
+            assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
+
+        assert_raises(ValueError, splev, [0, 4], tck, ext=2)
+
+
+class TestSplder:
+    def setup_method(self):
+        # non-uniform grid, just to make it sure
+        x = np.linspace(0, 1, 100)**3
+        y = np.sin(20 * x)
+        self.spl = splrep(x, y)
+
+        # double check that knots are non-uniform
+        assert_(np.diff(self.spl[0]).ptp() > 0)
+
+    def test_inverse(self):
+        # Check that antiderivative + derivative is identity.
+        for n in range(5):
+            spl2 = splantider(self.spl, n)
+            spl3 = splder(spl2, n)
+            assert_allclose(self.spl[0], spl3[0])
+            assert_allclose(self.spl[1], spl3[1])
+            assert_equal(self.spl[2], spl3[2])
+
+    def test_splder_vs_splev(self):
+        # Check derivative vs. FITPACK
+
+        for n in range(3+1):
+            # Also extrapolation!
+            xx = np.linspace(-1, 2, 2000)
+            if n == 3:
+                # ... except that FITPACK extrapolates strangely for
+                # order 0, so let's not check that.
+                xx = xx[(xx >= 0) & (xx <= 1)]
+
+            dy = splev(xx, self.spl, n)
+            spl2 = splder(self.spl, n)
+            dy2 = splev(xx, spl2)
+            if n == 1:
+                assert_allclose(dy, dy2, rtol=2e-6)
+            else:
+                assert_allclose(dy, dy2)
+
+    def test_splantider_vs_splint(self):
+        # Check antiderivative vs. FITPACK
+        spl2 = splantider(self.spl)
+
+        # no extrapolation, splint assumes function is zero outside
+        # range
+        xx = np.linspace(0, 1, 20)
+
+        for x1 in xx:
+            for x2 in xx:
+                y1 = splint(x1, x2, self.spl)
+                y2 = splev(x2, spl2) - splev(x1, spl2)
+                assert_allclose(y1, y2)
+
+    def test_order0_diff(self):
+        assert_raises(ValueError, splder, self.spl, 4)
+
+    def test_kink(self):
+        # Should refuse to differentiate splines with kinks
+
+        spl2 = insert(0.5, self.spl, m=2)
+        splder(spl2, 2)  # Should work
+        assert_raises(ValueError, splder, spl2, 3)
+
+        spl2 = insert(0.5, self.spl, m=3)
+        splder(spl2, 1)  # Should work
+        assert_raises(ValueError, splder, spl2, 2)
+
+        spl2 = insert(0.5, self.spl, m=4)
+        assert_raises(ValueError, splder, spl2, 1)
+
+    def test_multidim(self):
+        # c can have trailing dims
+        for n in range(3):
+            t, c, k = self.spl
+            c2 = np.c_[c, c, c]
+            c2 = np.dstack((c2, c2))
+
+            spl2 = splantider((t, c2, k), n)
+            spl3 = splder(spl2, n)
+
+            assert_allclose(t, spl3[0])
+            assert_allclose(c2, spl3[1])
+            assert_equal(k, spl3[2])
+
+
+class TestSplint:
+    def test_len_c(self):
+        n, k = 7, 3
+        x = np.arange(n)
+        y = x**3
+        t, c, k = splrep(x, y, s=0)
+
+        # note that len(c) == len(t) == 11 (== len(x) + 2*(k-1))
+        assert len(t) == len(c) == n + 2*(k-1)
+
+        # integrate directly: $\int_0^6 x^3 dx = 6^4 / 4$
+        res = splint(0, 6, (t, c, k))
+        assert_allclose(res, 6**4 / 4, atol=1e-15)
+
+        # check that the coefficients past len(t) - k - 1 are ignored
+        c0 = c.copy()
+        c0[len(t)-k-1:] = np.nan
+        res0 = splint(0, 6, (t, c0, k))
+        assert_allclose(res0, 6**4 / 4, atol=1e-15)
+
+        # however, all other coefficients *are* used
+        c0[6] = np.nan
+        assert np.isnan(splint(0, 6, (t, c0, k)))
+
+        # check that the coefficient array can have length `len(t) - k - 1`
+        c1 = c[:len(t) - k - 1]
+        res1 = splint(0, 6, (t, c1, k))
+        assert_allclose(res1, 6**4 / 4, atol=1e-15)
+
+        # however shorter c arrays raise. The error from f2py is a
+        # `dftipack.error`, which is an Exception but not ValueError etc.
+        with assert_raises(Exception, match=r">=n-k-1"):
+            splint(0, 1, (np.ones(10), np.ones(5), 3))
+
+
+class TestBisplrep:
+    def test_overflow(self):
+        from numpy.lib.stride_tricks import as_strided
+        if dfitpack_int.itemsize == 8:
+            size = 1500000**2
+        else:
+            size = 400**2
+        # Don't allocate a real array, as it's very big, but rely
+        # on that it's not referenced
+        x = as_strided(np.zeros(()), shape=(size,))
+        assert_raises(OverflowError, bisplrep, x, x, x, w=x,
+                      xb=0, xe=1, yb=0, ye=1, s=0)
+
+    def test_regression_1310(self):
+        # Regression test for gh-1310
+        data = np.load(data_file('bug-1310.npz'))['data']
+
+        # Shouldn't crash -- the input data triggers work array sizes
+        # that caused previously some data to not be aligned on
+        # sizeof(double) boundaries in memory, which made the Fortran
+        # code to crash when compiled with -O3
+        bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
+                 full_output=True)
+
+    @pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
+    def test_ilp64_bisplrep(self):
+        check_free_memory(28000)  # VM size, doesn't actually use the pages
+        x = np.linspace(0, 1, 400)
+        y = np.linspace(0, 1, 400)
+        x, y = np.meshgrid(x, y)
+        z = np.zeros_like(x)
+        tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
+        assert_allclose(bisplev(0.5, 0.5, tck), 0.0)
+
+
+def test_dblint():
+    # Basic test to see it runs and gives the correct result on a trivial
+    # problem. Note that `dblint` is not exposed in the interpolate namespace.
+    x = np.linspace(0, 1)
+    y = np.linspace(0, 1)
+    xx, yy = np.meshgrid(x, y)
+    rect = RectBivariateSpline(x, y, 4 * xx * yy)
+    tck = list(rect.tck)
+    tck.extend(rect.degrees)
+
+    assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
+    assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
+    assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
+    assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
+
+
+def test_splev_der_k():
+    # regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
+    # for x outside of knot range
+
+    # test case from gh-2188
+    tck = (np.array([0., 0., 2.5, 2.5]),
+           np.array([-1.56679978, 2.43995873, 0., 0.]),
+           1)
+    t, c, k = tck
+    x = np.array([-3, 0, 2.5, 3])
+
+    # an explicit form of the linear spline
+    assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
+    assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
+
+    # now check a random spline vs splder
+    np.random.seed(1234)
+    x = np.sort(np.random.random(30))
+    y = np.random.random(30)
+    t, c, k = splrep(x, y)
+
+    x = [t[0] - 1., t[-1] + 1.]
+    tck2 = splder((t, c, k), k)
+    assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
+
+
+def test_splprep_segfault():
+    # regression test for gh-3847: splprep segfaults if knots are specified
+    # for task=-1
+    t = np.arange(0, 1.1, 0.1)
+    x = np.sin(2*np.pi*t)
+    y = np.cos(2*np.pi*t)
+    tck, u = splprep([x, y], s=0)
+    unew = np.arange(0, 1.01, 0.01)
+
+    uknots = tck[0]  # using the knots from the previous fitting
+    tck, u = splprep([x, y], task=-1, t=uknots)  # here is the crash
+
+
+def test_bisplev_integer_overflow():
+    np.random.seed(1)
+
+    x = np.linspace(0, 1, 11)
+    y = x
+    z = np.random.randn(11, 11).ravel()
+    kx = 1
+    ky = 1
+
+    nx, tx, ny, ty, c, fp, ier = regrid_smth(
+        x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
+    tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
+
+    xp = np.zeros([2621440])
+    yp = np.zeros([2621440])
+
+    assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
+
+
+def test_spalde_scalar_input():
+    # Ticket #629
+    x = np.linspace(0, 10)
+    y = x**3
+    tck = splrep(x, y, k=3, t=[5])
+    res = spalde(np.float64(1), tck)
+    des = np.array([1., 3., 6., 6.])
+    assert_almost_equal(res, des)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack2.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack2.py
new file mode 100644
index 00000000..c51f2882
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_fitpack2.py
@@ -0,0 +1,1347 @@
+# Created by Pearu Peterson, June 2003
+import itertools
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
+        assert_array_almost_equal, assert_allclose, suppress_warnings)
+from pytest import raises as assert_raises
+
+from numpy import array, diff, linspace, meshgrid, ones, pi, shape
+from scipy.interpolate._fitpack_py import bisplrep, bisplev, splrep, spalde
+from scipy.interpolate._fitpack2 import (UnivariateSpline,
+        LSQUnivariateSpline, InterpolatedUnivariateSpline,
+        LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
+        LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
+        RectSphereBivariateSpline)
+
+
+class TestUnivariateSpline:
+    def test_linear_constant(self):
+        x = [1,2,3]
+        y = [3,3,3]
+        lut = UnivariateSpline(x,y,k=1)
+        assert_array_almost_equal(lut.get_knots(),[1,3])
+        assert_array_almost_equal(lut.get_coeffs(),[3,3])
+        assert_almost_equal(lut.get_residual(),0.0)
+        assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
+
+    def test_preserve_shape(self):
+        x = [1, 2, 3]
+        y = [0, 2, 4]
+        lut = UnivariateSpline(x, y, k=1)
+        arg = 2
+        assert_equal(shape(arg), shape(lut(arg)))
+        assert_equal(shape(arg), shape(lut(arg, nu=1)))
+        arg = [1.5, 2, 2.5]
+        assert_equal(shape(arg), shape(lut(arg)))
+        assert_equal(shape(arg), shape(lut(arg, nu=1)))
+
+    def test_linear_1d(self):
+        x = [1,2,3]
+        y = [0,2,4]
+        lut = UnivariateSpline(x,y,k=1)
+        assert_array_almost_equal(lut.get_knots(),[1,3])
+        assert_array_almost_equal(lut.get_coeffs(),[0,4])
+        assert_almost_equal(lut.get_residual(),0.0)
+        assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
+
+    def test_subclassing(self):
+        # See #731
+
+        class ZeroSpline(UnivariateSpline):
+            def __call__(self, x):
+                return 0*array(x)
+
+        sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
+        assert_array_equal(sp([1.5, 2.5]), [0., 0.])
+
+    def test_empty_input(self):
+        # Test whether empty input returns an empty output. Ticket 1014
+        x = [1,3,5,7,9]
+        y = [0,4,9,12,21]
+        spl = UnivariateSpline(x, y, k=3)
+        assert_array_equal(spl([]), array([]))
+
+    def test_roots(self):
+        x = [1, 3, 5, 7, 9]
+        y = [0, 4, 9, 12, 21]
+        spl = UnivariateSpline(x, y, k=3)
+        assert_almost_equal(spl.roots()[0], 1.050290639101332)
+
+    def test_derivatives(self):
+        x = [1, 3, 5, 7, 9]
+        y = [0, 4, 9, 12, 21]
+        spl = UnivariateSpline(x, y, k=3)
+        assert_almost_equal(spl.derivatives(3.5),
+                            [5.5152902, 1.7146577, -0.1830357, 0.3125])
+
+    def test_derivatives_2(self):
+        x = np.arange(8)
+        y = x**3 + 2.*x**2
+
+        tck = splrep(x, y, s=0)
+        ders = spalde(3, tck)
+        assert_allclose(ders, [45.,   # 3**3 + 2*(3)**2
+                               39.,   # 3*(3)**2 + 4*(3)
+                               22.,   # 6*(3) + 4
+                               6.],   # 6*3**0
+                        atol=1e-15)
+        spl = UnivariateSpline(x, y, s=0, k=3)
+        assert_allclose(spl.derivatives(3),
+                        ders,
+                        atol=1e-15)
+
+    def test_resize_regression(self):
+        """Regression test for #1375."""
+        x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
+             -0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
+             0.65016502, 1.]
+        y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
+             0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
+             0.62928599, 1.]
+        w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
+             6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
+             6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
+             1.00000000e+12]
+        spl = UnivariateSpline(x=x, y=y, w=w, s=None)
+        desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
+        assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
+
+    def test_out_of_range_regression(self):
+        # Test different extrapolation modes. See ticket 3557
+        x = np.arange(5, dtype=float)
+        y = x**3
+
+        xp = linspace(-8, 13, 100)
+        xp_zeros = xp.copy()
+        xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
+        xp_clip = xp.copy()
+        xp_clip[xp_clip < x[0]] = x[0]
+        xp_clip[xp_clip > x[-1]] = x[-1]
+
+        for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
+            spl = cls(x=x, y=y)
+            for ext in [0, 'extrapolate']:
+                assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
+                assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
+            for ext in [1, 'zeros']:
+                assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
+                assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
+            for ext in [2, 'raise']:
+                assert_raises(ValueError, spl, xp, **dict(ext=ext))
+            for ext in [3, 'const']:
+                assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
+                assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
+
+        # also test LSQUnivariateSpline [which needs explicit knots]
+        t = spl.get_knots()[3:4]  # interior knots w/ default k=3
+        spl = LSQUnivariateSpline(x, y, t)
+        assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
+        assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
+        assert_raises(ValueError, spl, xp, **dict(ext=2))
+        assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
+
+        # also make sure that unknown values for `ext` are caught early
+        for ext in [-1, 'unknown']:
+            spl = UnivariateSpline(x, y)
+            assert_raises(ValueError, spl, xp, **dict(ext=ext))
+            assert_raises(ValueError, UnivariateSpline,
+                    **dict(x=x, y=y, ext=ext))
+
+    def test_lsq_fpchec(self):
+        xs = np.arange(100) * 1.
+        ys = np.arange(100) * 1.
+        knots = np.linspace(0, 99, 10)
+        bbox = (-1, 101)
+        assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
+                      bbox=bbox)
+
+    def test_derivative_and_antiderivative(self):
+        # Thin wrappers to splder/splantider, so light smoke test only.
+        x = np.linspace(0, 1, 70)**3
+        y = np.cos(x)
+
+        spl = UnivariateSpline(x, y, s=0)
+        spl2 = spl.antiderivative(2).derivative(2)
+        assert_allclose(spl(0.3), spl2(0.3))
+
+        spl2 = spl.antiderivative(1)
+        assert_allclose(spl2(0.6) - spl2(0.2),
+                        spl.integral(0.2, 0.6))
+
+    def test_derivative_extrapolation(self):
+        # Regression test for gh-10195: for a const-extrapolation spline
+        # its derivative evaluates to zero for extrapolation
+        x_values = [1, 2, 4, 6, 8.5]
+        y_values = [0.5, 0.8, 1.3, 2.5, 5]
+        f = UnivariateSpline(x_values, y_values, ext='const', k=3)
+
+        x = [-1, 0, -0.5, 9, 9.5, 10]
+        assert_allclose(f.derivative()(x), 0, atol=1e-15)
+
+    def test_integral_out_of_bounds(self):
+        # Regression test for gh-7906: .integral(a, b) is wrong if both
+        # a and b are out-of-bounds
+        x = np.linspace(0., 1., 7)
+        for ext in range(4):
+            f = UnivariateSpline(x, x, s=0, ext=ext)
+            for (a, b) in [(1, 1), (1, 5), (2, 5),
+                           (0, 0), (-2, 0), (-2, -1)]:
+                assert_allclose(f.integral(a, b), 0, atol=1e-15)
+
+    def test_nan(self):
+        # bail out early if the input data contains nans
+        x = np.arange(10, dtype=float)
+        y = x**3
+        w = np.ones_like(x)
+        # also test LSQUnivariateSpline [which needs explicit knots]
+        spl = UnivariateSpline(x, y, check_finite=True)
+        t = spl.get_knots()[3:4]  # interior knots w/ default k=3
+        y_end = y[-1]
+        for z in [np.nan, np.inf, -np.inf]:
+            y[-1] = z
+            assert_raises(ValueError, UnivariateSpline,
+                    **dict(x=x, y=y, check_finite=True))
+            assert_raises(ValueError, InterpolatedUnivariateSpline,
+                    **dict(x=x, y=y, check_finite=True))
+            assert_raises(ValueError, LSQUnivariateSpline,
+                    **dict(x=x, y=y, t=t, check_finite=True))
+            y[-1] = y_end  # check valid y but invalid w
+            w[-1] = z
+            assert_raises(ValueError, UnivariateSpline,
+                    **dict(x=x, y=y, w=w, check_finite=True))
+            assert_raises(ValueError, InterpolatedUnivariateSpline,
+                    **dict(x=x, y=y, w=w, check_finite=True))
+            assert_raises(ValueError, LSQUnivariateSpline,
+                    **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+    def test_strictly_increasing_x(self):
+        # Test the x is required to be strictly increasing for
+        # UnivariateSpline if s=0 and for InterpolatedUnivariateSpline,
+        # but merely increasing for UnivariateSpline if s>0
+        # and for LSQUnivariateSpline; see gh-8535
+        xx = np.arange(10, dtype=float)
+        yy = xx**3
+        x = np.arange(10, dtype=float)
+        x[1] = x[0]
+        y = x**3
+        w = np.ones_like(x)
+        # also test LSQUnivariateSpline [which needs explicit knots]
+        spl = UnivariateSpline(xx, yy, check_finite=True)
+        t = spl.get_knots()[3:4]  # interior knots w/ default k=3
+        UnivariateSpline(x=x, y=y, w=w, s=1, check_finite=True)
+        LSQUnivariateSpline(x=x, y=y, t=t, w=w, check_finite=True)
+        assert_raises(ValueError, UnivariateSpline,
+                **dict(x=x, y=y, s=0, check_finite=True))
+        assert_raises(ValueError, InterpolatedUnivariateSpline,
+                **dict(x=x, y=y, check_finite=True))
+
+    def test_increasing_x(self):
+        # Test that x is required to be increasing, see gh-8535
+        xx = np.arange(10, dtype=float)
+        yy = xx**3
+        x = np.arange(10, dtype=float)
+        x[1] = x[0] - 1.0
+        y = x**3
+        w = np.ones_like(x)
+        # also test LSQUnivariateSpline [which needs explicit knots]
+        spl = UnivariateSpline(xx, yy, check_finite=True)
+        t = spl.get_knots()[3:4]  # interior knots w/ default k=3
+        assert_raises(ValueError, UnivariateSpline,
+                **dict(x=x, y=y, check_finite=True))
+        assert_raises(ValueError, InterpolatedUnivariateSpline,
+                **dict(x=x, y=y, check_finite=True))
+        assert_raises(ValueError, LSQUnivariateSpline,
+                **dict(x=x, y=y, t=t, w=w, check_finite=True))
+
+    def test_invalid_input_for_univariate_spline(self):
+
+        with assert_raises(ValueError) as info:
+            x_values = [1, 2, 4, 6, 8.5]
+            y_values = [0.5, 0.8, 1.3, 2.5]
+            UnivariateSpline(x_values, y_values)
+        assert "x and y should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x_values = [1, 2, 4, 6, 8.5]
+            y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+            w_values = [-1.0, 1.0, 1.0, 1.0]
+            UnivariateSpline(x_values, y_values, w=w_values)
+        assert "x, y, and w should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            bbox = (-1)
+            UnivariateSpline(x_values, y_values, bbox=bbox)
+        assert "bbox shape should be (2,)" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            UnivariateSpline(x_values, y_values, k=6)
+        assert "k should be 1 <= k <= 5" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            UnivariateSpline(x_values, y_values, s=-1.0)
+        assert "s should be s >= 0.0" in str(info.value)
+
+    def test_invalid_input_for_interpolated_univariate_spline(self):
+
+        with assert_raises(ValueError) as info:
+            x_values = [1, 2, 4, 6, 8.5]
+            y_values = [0.5, 0.8, 1.3, 2.5]
+            InterpolatedUnivariateSpline(x_values, y_values)
+        assert "x and y should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x_values = [1, 2, 4, 6, 8.5]
+            y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+            w_values = [-1.0, 1.0, 1.0, 1.0]
+            InterpolatedUnivariateSpline(x_values, y_values, w=w_values)
+        assert "x, y, and w should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            bbox = (-1)
+            InterpolatedUnivariateSpline(x_values, y_values, bbox=bbox)
+        assert "bbox shape should be (2,)" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            InterpolatedUnivariateSpline(x_values, y_values, k=6)
+        assert "k should be 1 <= k <= 5" in str(info.value)
+
+    def test_invalid_input_for_lsq_univariate_spline(self):
+
+        x_values = [1, 2, 4, 6, 8.5]
+        y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+        spl = UnivariateSpline(x_values, y_values, check_finite=True)
+        t_values = spl.get_knots()[3:4]  # interior knots w/ default k=3
+
+        with assert_raises(ValueError) as info:
+            x_values = [1, 2, 4, 6, 8.5]
+            y_values = [0.5, 0.8, 1.3, 2.5]
+            LSQUnivariateSpline(x_values, y_values, t_values)
+        assert "x and y should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x_values = [1, 2, 4, 6, 8.5]
+            y_values = [0.5, 0.8, 1.3, 2.5, 2.8]
+            w_values = [1.0, 1.0, 1.0, 1.0]
+            LSQUnivariateSpline(x_values, y_values, t_values, w=w_values)
+        assert "x, y, and w should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            bbox = (100, -100)
+            LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+        assert "Interior knots t must satisfy Schoenberg-Whitney conditions" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            bbox = (-1)
+            LSQUnivariateSpline(x_values, y_values, t_values, bbox=bbox)
+        assert "bbox shape should be (2,)" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            LSQUnivariateSpline(x_values, y_values, t_values, k=6)
+        assert "k should be 1 <= k <= 5" in str(info.value)
+
+    def test_array_like_input(self):
+        x_values = np.array([1, 2, 4, 6, 8.5])
+        y_values = np.array([0.5, 0.8, 1.3, 2.5, 2.8])
+        w_values = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
+        bbox = np.array([-100, 100])
+        # np.array input
+        spl1 = UnivariateSpline(x=x_values, y=y_values, w=w_values,
+                                bbox=bbox)
+        # list input
+        spl2 = UnivariateSpline(x=x_values.tolist(), y=y_values.tolist(),
+                                w=w_values.tolist(), bbox=bbox.tolist())
+
+        assert_allclose(spl1([0.1, 0.5, 0.9, 0.99]),
+                        spl2([0.1, 0.5, 0.9, 0.99]))
+
+    def test_fpknot_oob_crash(self):
+        # https://github.com/scipy/scipy/issues/3691
+        x = range(109)
+        y = [0., 0., 0., 0., 0., 10.9, 0., 11., 0.,
+             0., 0., 10.9, 0., 0., 0., 0., 0., 0.,
+             10.9, 0., 0., 0., 11., 0., 0., 0., 10.9,
+             0., 0., 0., 10.5, 0., 0., 0., 10.7, 0.,
+             0., 0., 11., 0., 0., 0., 0., 0., 0.,
+             10.9, 0., 0., 10.7, 0., 0., 0., 10.6, 0.,
+             0., 0., 10.5, 0., 0., 10.7, 0., 0., 10.5,
+             0., 0., 11.5, 0., 0., 0., 10.7, 0., 0.,
+             10.7, 0., 0., 10.9, 0., 0., 10.8, 0., 0.,
+             0., 10.7, 0., 0., 10.6, 0., 0., 0., 10.4,
+             0., 0., 10.6, 0., 0., 10.5, 0., 0., 0.,
+             10.7, 0., 0., 0., 10.4, 0., 0., 0., 10.8, 0.]
+        with suppress_warnings() as sup:
+            r = sup.record(
+                UserWarning,
+                r"""
+The maximal number of iterations maxit \(set to 20 by the program\)
+allowed for finding a smoothing spline with fp=s has been reached: s
+too small.
+There is an approximation returned but the corresponding weighted sum
+of squared residuals does not satisfy the condition abs\(fp-s\)/s < tol.""")
+            UnivariateSpline(x, y, k=1)
+            assert_equal(len(r), 1)
+
+
+class TestLSQBivariateSpline:
+    # NOTE: The systems in this test class are rank-deficient
+    def test_linear_constant(self):
+        x = [1,1,1,2,2,2,3,3,3]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = [3,3,3,3,3,3,3,3,3]
+        s = 0.1
+        tx = [1+s,3-s]
+        ty = [1+s,3-s]
+        with suppress_warnings() as sup:
+            r = sup.record(UserWarning, "\nThe coefficients of the spline")
+            lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+            assert_equal(len(r), 1)
+
+        assert_almost_equal(lut(2,2), 3.)
+
+    def test_bilinearity(self):
+        x = [1,1,1,2,2,2,3,3,3]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = [0,7,8,3,4,7,1,3,4]
+        s = 0.1
+        tx = [1+s,3-s]
+        ty = [1+s,3-s]
+        with suppress_warnings() as sup:
+            # This seems to fail (ier=1, see ticket 1642).
+            sup.filter(UserWarning, "\nThe coefficients of the spline")
+            lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+
+        tx, ty = lut.get_knots()
+        for xa, xb in zip(tx[:-1], tx[1:]):
+            for ya, yb in zip(ty[:-1], ty[1:]):
+                for t in [0.1, 0.5, 0.9]:
+                    for s in [0.3, 0.4, 0.7]:
+                        xp = xa*(1-t) + xb*t
+                        yp = ya*(1-s) + yb*s
+                        zp = (+ lut(xa, ya)*(1-t)*(1-s)
+                              + lut(xb, ya)*t*(1-s)
+                              + lut(xa, yb)*(1-t)*s
+                              + lut(xb, yb)*t*s)
+                        assert_almost_equal(lut(xp,yp), zp)
+
+    def test_integral(self):
+        x = [1,1,1,2,2,2,8,8,8]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = array([0,7,8,3,4,7,1,3,4])
+
+        s = 0.1
+        tx = [1+s,3-s]
+        ty = [1+s,3-s]
+        with suppress_warnings() as sup:
+            r = sup.record(UserWarning, "\nThe coefficients of the spline")
+            lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+            assert_equal(len(r), 1)
+        tx, ty = lut.get_knots()
+        tz = lut(tx, ty)
+        trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+                    * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+
+        assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]),
+                            trpz)
+
+    def test_empty_input(self):
+        # Test whether empty inputs returns an empty output. Ticket 1014
+        x = [1,1,1,2,2,2,3,3,3]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = [3,3,3,3,3,3,3,3,3]
+        s = 0.1
+        tx = [1+s,3-s]
+        ty = [1+s,3-s]
+        with suppress_warnings() as sup:
+            r = sup.record(UserWarning, "\nThe coefficients of the spline")
+            lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
+            assert_equal(len(r), 1)
+
+        assert_array_equal(lut([], []), np.zeros((0,0)))
+        assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
+
+    def test_invalid_input(self):
+        s = 0.1
+        tx = [1 + s, 3 - s]
+        ty = [1 + s, 3 - s]
+
+        with assert_raises(ValueError) as info:
+            x = np.linspace(1.0, 10.0)
+            y = np.linspace(1.0, 10.0)
+            z = np.linspace(1.0, 10.0, num=10)
+            LSQBivariateSpline(x, y, z, tx, ty)
+        assert "x, y, and z should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x = np.linspace(1.0, 10.0)
+            y = np.linspace(1.0, 10.0)
+            z = np.linspace(1.0, 10.0)
+            w = np.linspace(1.0, 10.0, num=20)
+            LSQBivariateSpline(x, y, z, tx, ty, w=w)
+        assert "x, y, z, and w should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            w = np.linspace(-1.0, 10.0)
+            LSQBivariateSpline(x, y, z, tx, ty, w=w)
+        assert "w should be positive" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            bbox = (-100, 100, -100)
+            LSQBivariateSpline(x, y, z, tx, ty, bbox=bbox)
+        assert "bbox shape should be (4,)" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            LSQBivariateSpline(x, y, z, tx, ty, kx=10, ky=10)
+        assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in \
+               str(info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            LSQBivariateSpline(x, y, z, tx, ty, eps=0.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            LSQBivariateSpline(x, y, z, tx, ty, eps=1.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+    def test_array_like_input(self):
+        s = 0.1
+        tx = np.array([1 + s, 3 - s])
+        ty = np.array([1 + s, 3 - s])
+        x = np.linspace(1.0, 10.0)
+        y = np.linspace(1.0, 10.0)
+        z = np.linspace(1.0, 10.0)
+        w = np.linspace(1.0, 10.0)
+        bbox = np.array([1.0, 10.0, 1.0, 10.0])
+
+        with suppress_warnings() as sup:
+            r = sup.record(UserWarning, "\nThe coefficients of the spline")
+            # np.array input
+            spl1 = LSQBivariateSpline(x, y, z, tx, ty, w=w, bbox=bbox)
+            # list input
+            spl2 = LSQBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+                                      tx.tolist(), ty.tolist(), w=w.tolist(),
+                                      bbox=bbox)
+            assert_allclose(spl1(2.0, 2.0), spl2(2.0, 2.0))
+            assert_equal(len(r), 2)
+
+    def test_unequal_length_of_knots(self):
+        """Test for the case when the input knot-location arrays in x and y are
+        of different lengths.
+        """
+        x, y = np.mgrid[0:100, 0:100]
+        x = x.ravel()
+        y = y.ravel()
+        z = 3.0 * np.ones_like(x)
+        tx = np.linspace(0.1, 98.0, 29)
+        ty = np.linspace(0.1, 98.0, 33)
+        with suppress_warnings() as sup:
+            r = sup.record(UserWarning, "\nThe coefficients of the spline")
+            lut = LSQBivariateSpline(x,y,z,tx,ty)
+            assert_equal(len(r), 1)
+
+        assert_almost_equal(lut(x, y, grid=False), z)
+
+
+class TestSmoothBivariateSpline:
+    def test_linear_constant(self):
+        x = [1,1,1,2,2,2,3,3,3]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = [3,3,3,3,3,3,3,3,3]
+        lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+        assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+        assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
+        assert_almost_equal(lut.get_residual(),0.0)
+        assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
+
+    def test_linear_1d(self):
+        x = [1,1,1,2,2,2,3,3,3]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = [0,0,0,2,2,2,4,4,4]
+        lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
+        assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
+        assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
+        assert_almost_equal(lut.get_residual(),0.0)
+        assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
+
+    def test_integral(self):
+        x = [1,1,1,2,2,2,4,4,4]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = array([0,7,8,3,4,7,1,3,4])
+
+        with suppress_warnings() as sup:
+            # This seems to fail (ier=1, see ticket 1642).
+            sup.filter(UserWarning, "\nThe required storage space")
+            lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
+
+        tx = [1,2,4]
+        ty = [1,2,3]
+
+        tz = lut(tx, ty)
+        trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+                    * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+        assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
+
+        lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
+        assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
+                            decimal=0)  # the quadratures give 23.75 and 23.85
+
+        tz = lut(tx[:-1], ty[:-1])
+        trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
+                    * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+        assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
+
+    def test_rerun_lwrk2_too_small(self):
+        # in this setting, lwrk2 is too small in the default run. Here we
+        # check for equality with the bisplrep/bisplev output because there,
+        # an automatic re-run of the spline representation is done if ier>10.
+        x = np.linspace(-2, 2, 80)
+        y = np.linspace(-2, 2, 80)
+        z = x + y
+        xi = np.linspace(-1, 1, 100)
+        yi = np.linspace(-2, 2, 100)
+        tck = bisplrep(x, y, z)
+        res1 = bisplev(xi, yi, tck)
+        interp_ = SmoothBivariateSpline(x, y, z)
+        res2 = interp_(xi, yi)
+        assert_almost_equal(res1, res2)
+
+    def test_invalid_input(self):
+
+        with assert_raises(ValueError) as info:
+            x = np.linspace(1.0, 10.0)
+            y = np.linspace(1.0, 10.0)
+            z = np.linspace(1.0, 10.0, num=10)
+            SmoothBivariateSpline(x, y, z)
+        assert "x, y, and z should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x = np.linspace(1.0, 10.0)
+            y = np.linspace(1.0, 10.0)
+            z = np.linspace(1.0, 10.0)
+            w = np.linspace(1.0, 10.0, num=20)
+            SmoothBivariateSpline(x, y, z, w=w)
+        assert "x, y, z, and w should have a same length" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            w = np.linspace(-1.0, 10.0)
+            SmoothBivariateSpline(x, y, z, w=w)
+        assert "w should be positive" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            bbox = (-100, 100, -100)
+            SmoothBivariateSpline(x, y, z, bbox=bbox)
+        assert "bbox shape should be (4,)" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            SmoothBivariateSpline(x, y, z, kx=10, ky=10)
+        assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in\
+               str(info.value)
+
+        with assert_raises(ValueError) as info:
+            SmoothBivariateSpline(x, y, z, s=-1.0)
+        assert "s should be s >= 0.0" in str(info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            SmoothBivariateSpline(x, y, z, eps=0.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            SmoothBivariateSpline(x, y, z, eps=1.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+    def test_array_like_input(self):
+        x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
+        y = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+        z = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+        w = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
+        bbox = np.array([1.0, 3.0, 1.0, 3.0])
+        # np.array input
+        spl1 = SmoothBivariateSpline(x, y, z, w=w, bbox=bbox, kx=1, ky=1)
+        # list input
+        spl2 = SmoothBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+                                     bbox=bbox.tolist(), w=w.tolist(),
+                                     kx=1, ky=1)
+        assert_allclose(spl1(0.1, 0.5), spl2(0.1, 0.5))
+
+
+class TestLSQSphereBivariateSpline:
+    def setup_method(self):
+        # define the input data and coordinates
+        ntheta, nphi = 70, 90
+        theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
+        phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
+        data = ones((theta.shape[0], phi.shape[0]))
+        # define knots and extract data values at the knots
+        knotst = theta[::5]
+        knotsp = phi[::5]
+        knotdata = data[::5, ::5]
+        # calculate spline coefficients
+        lats, lons = meshgrid(theta, phi)
+        lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+                                           data.T.ravel(), knotst, knotsp)
+        self.lut_lsq = lut_lsq
+        self.data = knotdata
+        self.new_lons, self.new_lats = knotsp, knotst
+
+    def test_linear_constant(self):
+        assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
+        assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
+                                  self.data)
+
+    def test_empty_input(self):
+        assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
+        assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
+
+    def test_invalid_input(self):
+        ntheta, nphi = 70, 90
+        theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+                         ntheta) * pi
+        phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1), nphi) * 2. * pi
+        data = ones((theta.shape[0], phi.shape[0]))
+        # define knots and extract data values at the knots
+        knotst = theta[::5]
+        knotsp = phi[::5]
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_theta = linspace(-0.1, 1.0, num=ntheta) * pi
+            invalid_lats, lons = meshgrid(invalid_theta, phi)
+            LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+                                     data.T.ravel(), knotst, knotsp)
+        assert "theta should be between [0, pi]" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_theta = linspace(0.1, 1.1, num=ntheta) * pi
+            invalid_lats, lons = meshgrid(invalid_theta, phi)
+            LSQSphereBivariateSpline(invalid_lats.ravel(), lons.ravel(),
+                                     data.T.ravel(), knotst, knotsp)
+        assert "theta should be between [0, pi]" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_phi = linspace(-0.1, 1.0, num=ntheta) * 2.0 * pi
+            lats, invalid_lons = meshgrid(theta, invalid_phi)
+            LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+                                     data.T.ravel(), knotst, knotsp)
+        assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_phi = linspace(0.0, 1.1, num=ntheta) * 2.0 * pi
+            lats, invalid_lons = meshgrid(theta, invalid_phi)
+            LSQSphereBivariateSpline(lats.ravel(), invalid_lons.ravel(),
+                                     data.T.ravel(), knotst, knotsp)
+        assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+        lats, lons = meshgrid(theta, phi)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_knotst = np.copy(knotst)
+            invalid_knotst[0] = -0.1
+            LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+                                     data.T.ravel(), invalid_knotst, knotsp)
+        assert "tt should be between (0, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_knotst = np.copy(knotst)
+            invalid_knotst[0] = pi
+            LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+                                     data.T.ravel(), invalid_knotst, knotsp)
+        assert "tt should be between (0, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_knotsp = np.copy(knotsp)
+            invalid_knotsp[0] = -0.1
+            LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+                                     data.T.ravel(), knotst, invalid_knotsp)
+        assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_knotsp = np.copy(knotsp)
+            invalid_knotsp[0] = 2 * pi
+            LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+                                     data.T.ravel(), knotst, invalid_knotsp)
+        assert "tp should be between (0, 2pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+            LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+                                     knotst, knotsp, w=invalid_w)
+        assert "w should be positive" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+                                     knotst, knotsp, eps=0.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            LSQSphereBivariateSpline(lats.ravel(), lons.ravel(), data.T.ravel(),
+                                     knotst, knotsp, eps=1.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+    def test_array_like_input(self):
+        ntheta, nphi = 70, 90
+        theta = linspace(0.5 / (ntheta - 1), 1 - 0.5 / (ntheta - 1),
+                         ntheta) * pi
+        phi = linspace(0.5 / (nphi - 1), 1 - 0.5 / (nphi - 1),
+                       nphi) * 2. * pi
+        lats, lons = meshgrid(theta, phi)
+        data = ones((theta.shape[0], phi.shape[0]))
+        # define knots and extract data values at the knots
+        knotst = theta[::5]
+        knotsp = phi[::5]
+        w = ones((lats.ravel().shape[0]))
+
+        # np.array input
+        spl1 = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
+                                        data.T.ravel(), knotst, knotsp, w=w)
+        # list input
+        spl2 = LSQSphereBivariateSpline(lats.ravel().tolist(),
+                                        lons.ravel().tolist(),
+                                        data.T.ravel().tolist(),
+                                        knotst.tolist(),
+                                        knotsp.tolist(), w=w.tolist())
+        assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestSmoothSphereBivariateSpline:
+    def setup_method(self):
+        theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
+                       .75*pi, .75*pi])
+        phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+                     1.5 * pi])
+        r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+        self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
+
+    def test_linear_constant(self):
+        assert_almost_equal(self.lut.get_residual(), 0.)
+        assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
+                                  [[3, 3], [3, 3], [3, 3]])
+
+    def test_empty_input(self):
+        assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
+        assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
+
+    def test_invalid_input(self):
+        theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi, .5 * pi,
+                       .75 * pi, .75 * pi, .75 * pi])
+        phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
+                     1.5 * pi])
+        r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_theta = array([-0.1 * pi, .25 * pi, .25 * pi, .5 * pi,
+                                   .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+                                   .75 * pi])
+            SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+        assert "theta should be between [0, pi]" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_theta = array([.25 * pi, .25 * pi, .25 * pi, .5 * pi,
+                                   .5 * pi, .5 * pi, .75 * pi, .75 * pi,
+                                   1.1 * pi])
+            SmoothSphereBivariateSpline(invalid_theta, phi, r, s=1E10)
+        assert "theta should be between [0, pi]" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_phi = array([-.1 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+                                 .5 * pi, pi, 1.5 * pi])
+            SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+        assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_phi = array([1.0 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi,
+                                 .5 * pi, pi, 2.1 * pi])
+            SmoothSphereBivariateSpline(theta, invalid_phi, r, s=1E10)
+        assert "phi should be between [0, 2pi]" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            invalid_w = array([-1.0, 1.0, 1.5, 0.5, 1.0, 1.5, 0.5, 1.0, 1.0])
+            SmoothSphereBivariateSpline(theta, phi, r, w=invalid_w, s=1E10)
+        assert "w should be positive" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            SmoothSphereBivariateSpline(theta, phi, r, s=-1.0)
+        assert "s should be positive" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            SmoothSphereBivariateSpline(theta, phi, r, eps=-1.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            SmoothSphereBivariateSpline(theta, phi, r, eps=1.0)
+        assert "eps should be between (0, 1)" in str(exc_info.value)
+
+    def test_array_like_input(self):
+        theta = np.array([.25 * pi, .25 * pi, .25 * pi, .5 * pi, .5 * pi,
+                          .5 * pi, .75 * pi, .75 * pi, .75 * pi])
+        phi = np.array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi,
+                        pi, 1.5 * pi])
+        r = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
+        w = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
+
+        # np.array input
+        spl1 = SmoothSphereBivariateSpline(theta, phi, r, w=w, s=1E10)
+
+        # list input
+        spl2 = SmoothSphereBivariateSpline(theta.tolist(), phi.tolist(),
+                                           r.tolist(), w=w.tolist(), s=1E10)
+        assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+
+class TestRectBivariateSpline:
+    def test_defaults(self):
+        x = array([1,2,3,4,5])
+        y = array([1,2,3,4,5])
+        z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+        lut = RectBivariateSpline(x,y,z)
+        assert_array_almost_equal(lut(x,y),z)
+
+    def test_evaluate(self):
+        x = array([1,2,3,4,5])
+        y = array([1,2,3,4,5])
+        z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+        lut = RectBivariateSpline(x,y,z)
+
+        xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
+        yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
+        zi = lut.ev(xi, yi)
+        zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+
+        assert_almost_equal(zi, zi2)
+
+    def test_derivatives_grid(self):
+        x = array([1,2,3,4,5])
+        y = array([1,2,3,4,5])
+        z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+        dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
+            [0,0,-11,0,0],[0,0,4,0,0]])/6.
+        dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
+            [2,.25,0,-.25,-2],[4,-1,0,1,-4]])
+        dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
+            [-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
+        lut = RectBivariateSpline(x,y,z)
+        assert_array_almost_equal(lut(x,y,dx=1),dx)
+        assert_array_almost_equal(lut(x,y,dy=1),dy)
+        assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
+
+    def test_derivatives(self):
+        x = array([1,2,3,4,5])
+        y = array([1,2,3,4,5])
+        z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+        dx = array([0,0,2./3,0,0])
+        dy = array([4,-1,0,-.25,-4])
+        dxdy = array([160,65,0,55,32])/24.
+        lut = RectBivariateSpline(x,y,z)
+        assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
+        assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
+        assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
+
+    def test_partial_derivative_method_grid(self):
+        x = array([1, 2, 3, 4, 5])
+        y = array([1, 2, 3, 4, 5])
+        z = array([[1, 2, 1, 2, 1],
+                   [1, 2, 1, 2, 1],
+                   [1, 2, 3, 2, 1],
+                   [1, 2, 2, 2, 1],
+                   [1, 2, 1, 2, 1]])
+        dx = array([[0, 0, -20, 0, 0],
+                    [0, 0, 13, 0, 0],
+                    [0, 0, 4, 0, 0],
+                    [0, 0, -11, 0, 0],
+                    [0, 0, 4, 0, 0]]) / 6.
+        dy = array([[4, -1, 0, 1, -4],
+                    [4, -1, 0, 1, -4],
+                    [0, 1.5, 0, -1.5, 0],
+                    [2, .25, 0, -.25, -2],
+                    [4, -1, 0, 1, -4]])
+        dxdy = array([[40, -25, 0, 25, -40],
+                      [-26, 16.25, 0, -16.25, 26],
+                      [-8, 5, 0, -5, 8],
+                      [22, -13.75, 0, 13.75, -22],
+                      [-8, 5, 0, -5, 8]]) / 6.
+        lut = RectBivariateSpline(x, y, z)
+        assert_array_almost_equal(lut.partial_derivative(1, 0)(x, y), dx)
+        assert_array_almost_equal(lut.partial_derivative(0, 1)(x, y), dy)
+        assert_array_almost_equal(lut.partial_derivative(1, 1)(x, y), dxdy)
+
+    def test_partial_derivative_method(self):
+        x = array([1, 2, 3, 4, 5])
+        y = array([1, 2, 3, 4, 5])
+        z = array([[1, 2, 1, 2, 1],
+                   [1, 2, 1, 2, 1],
+                   [1, 2, 3, 2, 1],
+                   [1, 2, 2, 2, 1],
+                   [1, 2, 1, 2, 1]])
+        dx = array([0, 0, 2./3, 0, 0])
+        dy = array([4, -1, 0, -.25, -4])
+        dxdy = array([160, 65, 0, 55, 32]) / 24.
+        lut = RectBivariateSpline(x, y, z)
+        assert_array_almost_equal(lut.partial_derivative(1, 0)(x, y,
+                                                               grid=False),
+                                  dx)
+        assert_array_almost_equal(lut.partial_derivative(0, 1)(x, y,
+                                                               grid=False),
+                                  dy)
+        assert_array_almost_equal(lut.partial_derivative(1, 1)(x, y,
+                                                               grid=False),
+                                  dxdy)
+
+    def test_partial_derivative_order_too_large(self):
+        x = array([0, 1, 2, 3, 4], dtype=float)
+        y = x.copy()
+        z = ones((x.size, y.size))
+        lut = RectBivariateSpline(x, y, z)
+        with assert_raises(ValueError):
+            lut.partial_derivative(4, 1)
+
+    def test_broadcast(self):
+        x = array([1,2,3,4,5])
+        y = array([1,2,3,4,5])
+        z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
+        lut = RectBivariateSpline(x,y,z)
+        assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
+
+    def test_invalid_input(self):
+
+        with assert_raises(ValueError) as info:
+            x = array([6, 2, 3, 4, 5])
+            y = array([1, 2, 3, 4, 5])
+            z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+                       [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+            RectBivariateSpline(x, y, z)
+        assert "x must be strictly increasing" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x = array([1, 2, 3, 4, 5])
+            y = array([2, 2, 3, 4, 5])
+            z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+                       [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+            RectBivariateSpline(x, y, z)
+        assert "y must be strictly increasing" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x = array([1, 2, 3, 4, 5])
+            y = array([1, 2, 3, 4, 5])
+            z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+                       [1, 2, 2, 2, 1]])
+            RectBivariateSpline(x, y, z)
+        assert "x dimension of z must have same number of elements as x"\
+               in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x = array([1, 2, 3, 4, 5])
+            y = array([1, 2, 3, 4, 5])
+            z = array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 3, 2],
+                       [1, 2, 2, 2], [1, 2, 1, 2]])
+            RectBivariateSpline(x, y, z)
+        assert "y dimension of z must have same number of elements as y"\
+               in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            x = array([1, 2, 3, 4, 5])
+            y = array([1, 2, 3, 4, 5])
+            z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+                       [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+            bbox = (-100, 100, -100)
+            RectBivariateSpline(x, y, z, bbox=bbox)
+        assert "bbox shape should be (4,)" in str(info.value)
+
+        with assert_raises(ValueError) as info:
+            RectBivariateSpline(x, y, z, s=-1.0)
+        assert "s should be s >= 0.0" in str(info.value)
+
+    def test_array_like_input(self):
+        x = array([1, 2, 3, 4, 5])
+        y = array([1, 2, 3, 4, 5])
+        z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+                   [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+        bbox = array([1, 5, 1, 5])
+
+        spl1 = RectBivariateSpline(x, y, z, bbox=bbox)
+        spl2 = RectBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
+                                   bbox=bbox.tolist())
+        assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
+
+    def test_not_increasing_input(self):
+        # gh-8565
+        NSamp = 20
+        Theta = np.random.uniform(0, np.pi, NSamp)
+        Phi = np.random.uniform(0, 2 * np.pi, NSamp)
+        Data = np.ones(NSamp)
+
+        Interpolator = SmoothSphereBivariateSpline(Theta, Phi, Data, s=3.5)
+
+        NLon = 6
+        NLat = 3
+        GridPosLats = np.arange(NLat) / NLat * np.pi
+        GridPosLons = np.arange(NLon) / NLon * 2 * np.pi
+
+        # No error
+        Interpolator(GridPosLats, GridPosLons)
+
+        nonGridPosLats = GridPosLats.copy()
+        nonGridPosLats[2] = 0.001
+        with assert_raises(ValueError) as exc_info:
+            Interpolator(nonGridPosLats, GridPosLons)
+        assert "x must be strictly increasing" in str(exc_info.value)
+
+        nonGridPosLons = GridPosLons.copy()
+        nonGridPosLons[2] = 0.001
+        with assert_raises(ValueError) as exc_info:
+            Interpolator(GridPosLats, nonGridPosLons)
+        assert "y must be strictly increasing" in str(exc_info.value)
+
+
+class TestRectSphereBivariateSpline:
+    def test_defaults(self):
+        y = linspace(0.01, 2*pi-0.01, 7)
+        x = linspace(0.01, pi-0.01, 7)
+        z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+                   [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+                   [1,2,1,2,1,2,1]])
+        lut = RectSphereBivariateSpline(x,y,z)
+        assert_array_almost_equal(lut(x,y),z)
+
+    def test_evaluate(self):
+        y = linspace(0.01, 2*pi-0.01, 7)
+        x = linspace(0.01, pi-0.01, 7)
+        z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+                   [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+                   [1,2,1,2,1,2,1]])
+        lut = RectSphereBivariateSpline(x,y,z)
+        yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
+        xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
+        zi = lut.ev(xi, yi)
+        zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
+        assert_almost_equal(zi, zi2)
+
+    def test_invalid_input(self):
+        data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+                      np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(-1, 170, 9) * np.pi / 180.
+            lons = np.linspace(0, 350, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "u should be between (0, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 181, 9) * np.pi / 180.
+            lons = np.linspace(0, 350, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "u should be between (0, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 170, 9) * np.pi / 180.
+            lons = np.linspace(-181, 10, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 170, 9) * np.pi / 180.
+            lons = np.linspace(-10, 360, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 170, 9) * np.pi / 180.
+            lons = np.linspace(10, 350, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data, s=-1)
+        assert "s should be positive" in str(exc_info.value)
+
+    def test_derivatives_grid(self):
+        y = linspace(0.01, 2*pi-0.01, 7)
+        x = linspace(0.01, pi-0.01, 7)
+        z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+                   [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+                   [1,2,1,2,1,2,1]])
+
+        lut = RectSphereBivariateSpline(x,y,z)
+
+        y = linspace(0.02, 2*pi-0.02, 7)
+        x = linspace(0.02, pi-0.02, 7)
+
+        assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
+                        rtol=1e-4, atol=1e-4)
+        assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
+                        rtol=1e-4, atol=1e-4)
+        assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
+                        rtol=1e-3, atol=1e-3)
+
+        assert_array_equal(lut(x, y, dtheta=1),
+                           lut.partial_derivative(1, 0)(x, y))
+        assert_array_equal(lut(x, y, dphi=1),
+                           lut.partial_derivative(0, 1)(x, y))
+        assert_array_equal(lut(x, y, dtheta=1, dphi=1),
+                           lut.partial_derivative(1, 1)(x, y))
+
+        assert_array_equal(lut(x, y, dtheta=1, grid=False),
+                           lut.partial_derivative(1, 0)(x, y, grid=False))
+        assert_array_equal(lut(x, y, dphi=1, grid=False),
+                           lut.partial_derivative(0, 1)(x, y, grid=False))
+        assert_array_equal(lut(x, y, dtheta=1, dphi=1, grid=False),
+                           lut.partial_derivative(1, 1)(x, y, grid=False))
+
+    def test_derivatives(self):
+        y = linspace(0.01, 2*pi-0.01, 7)
+        x = linspace(0.01, pi-0.01, 7)
+        z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
+                   [1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
+                   [1,2,1,2,1,2,1]])
+
+        lut = RectSphereBivariateSpline(x,y,z)
+
+        y = linspace(0.02, 2*pi-0.02, 7)
+        x = linspace(0.02, pi-0.02, 7)
+
+        assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
+        assert_allclose(lut(x, y, dtheta=1, grid=False),
+                        _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
+                        rtol=1e-4, atol=1e-4)
+        assert_allclose(lut(x, y, dphi=1, grid=False),
+                        _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
+                        rtol=1e-4, atol=1e-4)
+        assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
+                        _numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6),
+                        rtol=1e-3, atol=1e-3)
+
+    def test_invalid_input_2(self):
+        data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
+                      np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(0, 170, 9) * np.pi / 180.
+            lons = np.linspace(0, 350, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "u should be between (0, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 180, 9) * np.pi / 180.
+            lons = np.linspace(0, 350, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "u should be between (0, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 170, 9) * np.pi / 180.
+            lons = np.linspace(-181, 10, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "v[0] should be between [-pi, pi)" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 170, 9) * np.pi / 180.
+            lons = np.linspace(-10, 360, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data)
+        assert "v[-1] should be v[0] + 2pi or less" in str(exc_info.value)
+
+        with assert_raises(ValueError) as exc_info:
+            lats = np.linspace(10, 170, 9) * np.pi / 180.
+            lons = np.linspace(10, 350, 18) * np.pi / 180.
+            RectSphereBivariateSpline(lats, lons, data, s=-1)
+        assert "s should be positive" in str(exc_info.value)
+
+    def test_array_like_input(self):
+        y = linspace(0.01, 2 * pi - 0.01, 7)
+        x = linspace(0.01, pi - 0.01, 7)
+        z = array([[1, 2, 1, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+                   [1, 2, 3, 2, 1, 2, 1],
+                   [1, 2, 2, 2, 1, 2, 1], [1, 2, 1, 2, 1, 2, 1],
+                   [1, 2, 2, 2, 1, 2, 1],
+                   [1, 2, 1, 2, 1, 2, 1]])
+        # np.array input
+        spl1 = RectSphereBivariateSpline(x, y, z)
+        # list input
+        spl2 = RectSphereBivariateSpline(x.tolist(), y.tolist(), z.tolist())
+        assert_array_almost_equal(spl1(x, y), spl2(x, y))
+
+    def test_negative_evaluation(self):
+        lats = np.array([25, 30, 35, 40, 45])
+        lons = np.array([-90, -85, -80, -75, 70])
+        mesh = np.meshgrid(lats, lons)
+        data = mesh[0] + mesh[1]  # lon + lat value
+        lat_r = np.radians(lats)
+        lon_r = np.radians(lons)
+        interpolator = RectSphereBivariateSpline(lat_r, lon_r, data)
+        query_lat = np.radians(np.array([35, 37.5]))
+        query_lon = np.radians(np.array([-80, -77.5]))
+        data_interp = interpolator(query_lat, query_lon)
+        ans = np.array([[-45.0, -42.480862],
+                        [-49.0625, -46.54315]])
+        assert_array_almost_equal(data_interp, ans)
+
+    def test_pole_continuity_gh_14591(self):
+        # regression test for https://github.com/scipy/scipy/issues/14591
+        # with pole_continuty=(True, True), the internal work array size
+        # was too small, leading to a FITPACK data validation error.
+
+        # The reproducer in gh-14591 was using a NetCDF4 file with
+        # 361x507 arrays, so here we trivialize array sizes to a minimum
+        # which still demonstrates the issue.
+        u = np.arange(1, 10) * np.pi / 10
+        v = np.arange(1, 10) * np.pi / 10
+        r = np.zeros((9, 9))
+        for p in [(True, True), (True, False), (False, False)]:
+            RectSphereBivariateSpline(u, v, r, s=0, pole_continuity=p)
+
+
+def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
+    if dx == 0 and dy == 0:
+        return func(x, y)
+    elif dx == 1 and dy == 0:
+        return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
+    elif dx == 0 and dy == 1:
+        return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
+    elif dx == 1 and dy == 1:
+        return (func(x + eps, y + eps) - func(x - eps, y + eps)
+                - func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
+    else:
+        raise ValueError("invalid derivative order")
+
+
+class Test_DerivedBivariateSpline(object):
+    """Test the creation, usage, and attribute access of the (private)
+    _DerivedBivariateSpline class.
+    """
+    def setup_method(self):
+        x = np.concatenate(list(zip(range(10), range(10))))
+        y = np.concatenate(list(zip(range(10), range(1, 11))))
+        z = np.concatenate((np.linspace(3, 1, 10), np.linspace(1, 3, 10)))
+        with suppress_warnings() as sup:
+            sup.record(UserWarning, "\nThe coefficients of the spline")
+            self.lut_lsq = LSQBivariateSpline(x, y, z,
+                                              linspace(0.5, 19.5, 4),
+                                              linspace(1.5, 20.5, 4),
+                                              eps=1e-2)
+        self.lut_smooth = SmoothBivariateSpline(x, y, z)
+        xx = linspace(0, 1, 20)
+        yy = xx + 1.0
+        zz = array([np.roll(z, i) for i in range(z.size)])
+        self.lut_rect = RectBivariateSpline(xx, yy, zz)
+        self.orders = list(itertools.product(range(3), range(3)))
+
+    def test_creation_from_LSQ(self):
+        for nux, nuy in self.orders:
+            lut_der = self.lut_lsq.partial_derivative(nux, nuy)
+            a = lut_der(3.5, 3.5, grid=False)
+            b = self.lut_lsq(3.5, 3.5, dx=nux, dy=nuy, grid=False)
+            assert_equal(a, b)
+
+    def test_creation_from_Smooth(self):
+        for nux, nuy in self.orders:
+            lut_der = self.lut_smooth.partial_derivative(nux, nuy)
+            a = lut_der(5.5, 5.5, grid=False)
+            b = self.lut_smooth(5.5, 5.5, dx=nux, dy=nuy, grid=False)
+            assert_equal(a, b)
+
+    def test_creation_from_Rect(self):
+        for nux, nuy in self.orders:
+            lut_der = self.lut_rect.partial_derivative(nux, nuy)
+            a = lut_der(0.5, 1.5, grid=False)
+            b = self.lut_rect(0.5, 1.5, dx=nux, dy=nuy, grid=False)
+            assert_equal(a, b)
+
+    def test_invalid_attribute_fp(self):
+        der = self.lut_rect.partial_derivative(1, 1)
+        with assert_raises(AttributeError):
+            der.fp
+
+    def test_invalid_attribute_get_residual(self):
+        der = self.lut_smooth.partial_derivative(1, 1)
+        with assert_raises(AttributeError):
+            der.get_residual()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_gil.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_gil.py
new file mode 100644
index 00000000..0902308f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_gil.py
@@ -0,0 +1,65 @@
+import itertools
+import threading
+import time
+
+import numpy as np
+from numpy.testing import assert_equal
+import pytest
+import scipy.interpolate
+
+
+class TestGIL:
+    """Check if the GIL is properly released by scipy.interpolate functions."""
+
+    def setup_method(self):
+        self.messages = []
+
+    def log(self, message):
+        self.messages.append(message)
+
+    def make_worker_thread(self, target, args):
+        log = self.log
+
+        class WorkerThread(threading.Thread):
+            def run(self):
+                log('interpolation started')
+                target(*args)
+                log('interpolation complete')
+
+        return WorkerThread()
+
+    @pytest.mark.slow
+    @pytest.mark.xfail(reason='race conditions, may depend on system load')
+    def test_rectbivariatespline(self):
+        def generate_params(n_points):
+            x = y = np.linspace(0, 1000, n_points)
+            x_grid, y_grid = np.meshgrid(x, y)
+            z = x_grid * y_grid
+            return x, y, z
+
+        def calibrate_delay(requested_time):
+            for n_points in itertools.count(5000, 1000):
+                args = generate_params(n_points)
+                time_started = time.time()
+                interpolate(*args)
+                if time.time() - time_started > requested_time:
+                    return args
+
+        def interpolate(x, y, z):
+            scipy.interpolate.RectBivariateSpline(x, y, z)
+
+        args = calibrate_delay(requested_time=3)
+        worker_thread = self.make_worker_thread(interpolate, args)
+        worker_thread.start()
+        for i in range(3):
+            time.sleep(0.5)
+            self.log('working')
+        worker_thread.join()
+        assert_equal(self.messages, [
+            'interpolation started',
+            'working',
+            'working',
+            'working',
+            'interpolation complete',
+        ])
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpnd.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpnd.py
new file mode 100644
index 00000000..008b4518
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpnd.py
@@ -0,0 +1,386 @@
+import os
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
+                           suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+import scipy.interpolate.interpnd as interpnd
+import scipy.spatial._qhull as qhull
+
+import pickle
+
+
+def data_file(basename):
+    return os.path.join(os.path.abspath(os.path.dirname(__file__)),
+                        'data', basename)
+
+
+class TestLinearNDInterpolation:
+    def test_smoketest(self):
+        # Test at single points
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+
+        yi = interpnd.LinearNDInterpolator(x, y)(x)
+        assert_almost_equal(y, yi)
+
+    def test_smoketest_alternate(self):
+        # Test at single points, alternate calling convention
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+
+        yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
+        assert_almost_equal(y, yi)
+
+    def test_complex_smoketest(self):
+        # Test at single points
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 3j*y
+
+        yi = interpnd.LinearNDInterpolator(x, y)(x)
+        assert_almost_equal(y, yi)
+
+    def test_tri_input(self):
+        # Test at single points
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 3j*y
+
+        tri = qhull.Delaunay(x)
+        yi = interpnd.LinearNDInterpolator(tri, y)(x)
+        assert_almost_equal(y, yi)
+
+    def test_square(self):
+        # Test barycentric interpolation on a square against a manual
+        # implementation
+
+        points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
+        values = np.array([1., 2., -3., 5.], dtype=np.double)
+
+        # NB: assume triangles (0, 1, 3) and (1, 2, 3)
+        #
+        #  1----2
+        #  | \  |
+        #  |  \ |
+        #  0----3
+
+        def ip(x, y):
+            t1 = (x + y <= 1)
+            t2 = ~t1
+
+            x1 = x[t1]
+            y1 = y[t1]
+
+            x2 = x[t2]
+            y2 = y[t2]
+
+            z = 0*x
+
+            z[t1] = (values[0]*(1 - x1 - y1)
+                     + values[1]*y1
+                     + values[3]*x1)
+
+            z[t2] = (values[2]*(x2 + y2 - 1)
+                     + values[1]*(1 - x2)
+                     + values[3]*(1 - y2))
+            return z
+
+        xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
+                                     np.linspace(0, 1, 14)[None,:])
+        xx = xx.ravel()
+        yy = yy.ravel()
+
+        xi = np.array([xx, yy]).T.copy()
+        zi = interpnd.LinearNDInterpolator(points, values)(xi)
+
+        assert_almost_equal(zi, ip(xx, yy))
+
+    def test_smoketest_rescale(self):
+        # Test at single points
+        x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+
+        yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
+        assert_almost_equal(y, yi)
+
+    def test_square_rescale(self):
+        # Test barycentric interpolation on a rectangle with rescaling
+        # agaings the same implementation without rescaling
+
+        points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.double)
+        values = np.array([1., 2., -3., 5.], dtype=np.double)
+
+        xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+                                     np.linspace(0, 100, 14)[None,:])
+        xx = xx.ravel()
+        yy = yy.ravel()
+        xi = np.array([xx, yy]).T.copy()
+        zi = interpnd.LinearNDInterpolator(points, values)(xi)
+        zi_rescaled = interpnd.LinearNDInterpolator(points, values,
+                rescale=True)(xi)
+
+        assert_almost_equal(zi, zi_rescaled)
+
+    def test_tripoints_input_rescale(self):
+        # Test at single points
+        x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 3j*y
+
+        tri = qhull.Delaunay(x)
+        yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
+        yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
+                rescale=True)(x)
+        assert_almost_equal(yi, yi_rescale)
+
+    def test_tri_input_rescale(self):
+        # Test at single points
+        x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 3j*y
+
+        tri = qhull.Delaunay(x)
+        match = ("Rescaling is not supported when passing a "
+                 "Delaunay triangulation as ``points``.")
+        with pytest.raises(ValueError, match=match):
+            interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
+
+    def test_pickle(self):
+        # Test at single points
+        np.random.seed(1234)
+        x = np.random.rand(30, 2)
+        y = np.random.rand(30) + 1j*np.random.rand(30)
+
+        ip = interpnd.LinearNDInterpolator(x, y)
+        ip2 = pickle.loads(pickle.dumps(ip))
+
+        assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+
+class TestEstimateGradients2DGlobal:
+    def test_smoketest(self):
+        x = np.array([(0, 0), (0, 2),
+                      (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
+        tri = qhull.Delaunay(x)
+
+        # Should be exact for linear functions, independent of triangulation
+
+        funcs = [
+            (lambda x, y: 0*x + 1, (0, 0)),
+            (lambda x, y: 0 + x, (1, 0)),
+            (lambda x, y: -2 + y, (0, 1)),
+            (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
+        ]
+
+        for j, (func, grad) in enumerate(funcs):
+            z = func(x[:,0], x[:,1])
+            dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
+
+            assert_equal(dz.shape, (6, 2))
+            assert_allclose(dz, np.array(grad)[None,:] + 0*dz,
+                            rtol=1e-5, atol=1e-5, err_msg="item %d" % j)
+
+    def test_regression_2359(self):
+        # Check regression --- for certain point sets, gradient
+        # estimation could end up in an infinite loop
+        points = np.load(data_file('estimate_gradients_hang.npy'))
+        values = np.random.rand(points.shape[0])
+        tri = qhull.Delaunay(points)
+
+        # This should not hang
+        with suppress_warnings() as sup:
+            sup.filter(interpnd.GradientEstimationWarning,
+                       "Gradient estimation did not converge")
+            interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
+
+
+class TestCloughTocher2DInterpolator:
+
+    def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, rescale=False, **kw):
+        np.random.seed(1234)
+        if x is None:
+            x = np.array([(0, 0), (0, 1),
+                          (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
+                          (0.5, 0.2)],
+                         dtype=float)
+
+        if not alternate:
+            ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
+                                                     tol=1e-6, rescale=rescale)
+        else:
+            ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
+                                                     func(x[:,0], x[:,1]),
+                                                     tol=1e-6, rescale=rescale)
+
+        p = np.random.rand(50, 2)
+
+        if not alternate:
+            a = ip(p)
+        else:
+            a = ip(p[:,0], p[:,1])
+        b = func(p[:,0], p[:,1])
+
+        try:
+            assert_allclose(a, b, **kw)
+        except AssertionError:
+            print("_check_accuracy: abs(a-b):", abs(a - b))
+            print("ip.grad:", ip.grad)
+            raise
+
+    def test_linear_smoketest(self):
+        # Should be exact for linear functions, independent of triangulation
+        funcs = [
+            lambda x, y: 0*x + 1,
+            lambda x, y: 0 + x,
+            lambda x, y: -2 + y,
+            lambda x, y: 3 + 3*x + 14.15*y,
+        ]
+
+        for j, func in enumerate(funcs):
+            self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+                                 err_msg="Function %d" % j)
+            self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+                                 alternate=True,
+                                 err_msg="Function (alternate) %d" % j)
+            # check rescaling
+            self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+                                 err_msg="Function (rescaled) %d" % j, rescale=True)
+            self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
+                                 alternate=True, rescale=True,
+                                 err_msg="Function (alternate, rescaled) %d" % j)
+
+    def test_quadratic_smoketest(self):
+        # Should be reasonably accurate for quadratic functions
+        funcs = [
+            lambda x, y: x**2,
+            lambda x, y: y**2,
+            lambda x, y: x**2 - y**2,
+            lambda x, y: x*y,
+        ]
+
+        for j, func in enumerate(funcs):
+            self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+                                 err_msg="Function %d" % j)
+            self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
+                                 err_msg="Function %d" % j, rescale=True)
+
+    def test_tri_input(self):
+        # Test at single points
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 3j*y
+
+        tri = qhull.Delaunay(x)
+        yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
+        assert_almost_equal(y, yi)
+
+    def test_tri_input_rescale(self):
+        # Test at single points
+        x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 3j*y
+
+        tri = qhull.Delaunay(x)
+        match = ("Rescaling is not supported when passing a "
+                 "Delaunay triangulation as ``points``.")
+        with pytest.raises(ValueError, match=match):
+            interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
+
+    def test_tripoints_input_rescale(self):
+        # Test at single points
+        x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 3j*y
+
+        tri = qhull.Delaunay(x)
+        yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
+        yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
+        assert_almost_equal(yi, yi_rescale)
+
+    def test_dense(self):
+        # Should be more accurate for dense meshes
+        funcs = [
+            lambda x, y: x**2,
+            lambda x, y: y**2,
+            lambda x, y: x**2 - y**2,
+            lambda x, y: x*y,
+            lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
+        ]
+
+        np.random.seed(4321)  # use a different seed than the check!
+        grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
+                     np.random.rand(30*30, 2)]
+
+        for j, func in enumerate(funcs):
+            self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+                                 err_msg="Function %d" % j)
+            self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
+                                 err_msg="Function %d" % j, rescale=True)
+
+    def test_wrong_ndim(self):
+        x = np.random.randn(30, 3)
+        y = np.random.randn(30)
+        assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
+
+    def test_pickle(self):
+        # Test at single points
+        np.random.seed(1234)
+        x = np.random.rand(30, 2)
+        y = np.random.rand(30) + 1j*np.random.rand(30)
+
+        ip = interpnd.CloughTocher2DInterpolator(x, y)
+        ip2 = pickle.loads(pickle.dumps(ip))
+
+        assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
+
+    def test_boundary_tri_symmetry(self):
+        # Interpolation at neighbourless triangles should retain
+        # symmetry with mirroring the triangle.
+
+        # Equilateral triangle
+        points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])
+        values = np.array([1, 0, 0])
+
+        ip = interpnd.CloughTocher2DInterpolator(points, values)
+
+        # Set gradient to zero at vertices
+        ip.grad[...] = 0
+
+        # Interpolation should be symmetric vs. bisector
+        alpha = 0.3
+        p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])
+        p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])
+
+        v1 = ip(p1)
+        v2 = ip(p2)
+        assert_allclose(v1, v2)
+
+        # ... and affine invariant
+        np.random.seed(1)
+        A = np.random.randn(2, 2)
+        b = np.random.randn(2)
+
+        points = A.dot(points.T).T + b[None,:]
+        p1 = A.dot(p1) + b
+        p2 = A.dot(p2) + b
+
+        ip = interpnd.CloughTocher2DInterpolator(points, values)
+        ip.grad[...] = 0
+
+        w1 = ip(p1)
+        w2 = ip(p2)
+        assert_allclose(w1, v1)
+        assert_allclose(w2, v2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpolate.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpolate.py
new file mode 100644
index 00000000..97e9e07c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_interpolate.py
@@ -0,0 +1,2545 @@
+from numpy.testing import (assert_, assert_equal, assert_almost_equal,
+                           assert_array_almost_equal, assert_array_equal,
+                           assert_allclose, suppress_warnings)
+from pytest import raises as assert_raises
+import pytest
+
+from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
+import numpy as np
+
+from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
+        splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
+        NdPPoly, BSpline)
+
+from scipy.special import poch, gamma
+
+from scipy.interpolate import _ppoly
+
+from scipy._lib._gcutils import assert_deallocated, IS_PYPY
+
+from scipy.integrate import nquad
+
+from scipy.special import binom
+
+
+class TestInterp2D:
+    def test_interp2d(self):
+        y, x = mgrid[0:2:20j, 0:pi:21j]
+        z = sin(x+0.5*y)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            II = interp2d(x, y, z)
+            assert_almost_equal(II(1.0, 2.0), sin(2.0), decimal=2)
+
+            v, u = ogrid[0:2:24j, 0:pi:25j]
+            assert_almost_equal(II(u.ravel(), v.ravel()),
+                                sin(u+0.5*v), decimal=2)
+
+    def test_interp2d_meshgrid_input(self):
+        # Ticket #703
+        x = linspace(0, 2, 16)
+        y = linspace(0, pi, 21)
+        z = sin(x[None, :] + y[:, None]/2.)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            II = interp2d(x, y, z)
+            assert_almost_equal(II(1.0, 2.0), sin(2.0), decimal=2)
+
+    def test_interp2d_meshgrid_input_unsorted(self):
+        np.random.seed(1234)
+        x = linspace(0, 2, 16)
+        y = linspace(0, pi, 21)
+
+        z = sin(x[None, :] + y[:, None] / 2.)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+            np.random.shuffle(x)
+            z = sin(x[None, :] + y[:, None]/2.)
+            ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
+
+            np.random.shuffle(x)
+            np.random.shuffle(y)
+            z = sin(x[None, :] + y[:, None] / 2.)
+            ip3 = interp2d(x, y, z, kind='cubic')
+
+            x = linspace(0, 2, 31)
+            y = linspace(0, pi, 30)
+
+            assert_equal(ip1(x, y), ip2(x, y))
+            assert_equal(ip1(x, y), ip3(x, y))
+
+    def test_interp2d_eval_unsorted(self):
+        y, x = mgrid[0:2:20j, 0:pi:21j]
+        z = sin(x + 0.5*y)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            func = interp2d(x, y, z)
+
+            xe = np.array([3, 4, 5])
+            ye = np.array([5.3, 7.1])
+            assert_allclose(func(xe, ye), func(xe, ye[::-1]))
+
+            assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
+
+    def test_interp2d_linear(self):
+        # Ticket #898
+        a = np.zeros([5, 5])
+        a[2, 2] = 1.0
+        x = y = np.arange(5)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            b = interp2d(x, y, a, 'linear')
+            assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
+            assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
+
+    def test_interp2d_bounds(self):
+        x = np.linspace(0, 1, 5)
+        y = np.linspace(0, 2, 7)
+        z = x[None, :]**2 + y[:, None]
+
+        ix = np.linspace(-1, 3, 31)
+        iy = np.linspace(-1, 3, 33)
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+
+            b = interp2d(x, y, z, bounds_error=True)
+            assert_raises(ValueError, b, ix, iy)
+
+            b = interp2d(x, y, z, fill_value=np.nan)
+            iz = b(ix, iy)
+            mx = (ix < 0) | (ix > 1)
+            my = (iy < 0) | (iy > 2)
+            assert_(np.isnan(iz[my, :]).all())
+            assert_(np.isnan(iz[:, mx]).all())
+            assert_(np.isfinite(iz[~my, :][:, ~mx]).all())
+
+
+class TestInterp1D:
+
+    def setup_method(self):
+        self.x5 = np.arange(5.)
+        self.x10 = np.arange(10.)
+        self.y10 = np.arange(10.)
+        self.x25 = self.x10.reshape((2,5))
+        self.x2 = np.arange(2.)
+        self.y2 = np.arange(2.)
+        self.x1 = np.array([0.])
+        self.y1 = np.array([0.])
+
+        self.y210 = np.arange(20.).reshape((2, 10))
+        self.y102 = np.arange(20.).reshape((10, 2))
+        self.y225 = np.arange(20.).reshape((2, 2, 5))
+        self.y25 = np.arange(10.).reshape((2, 5))
+        self.y235 = np.arange(30.).reshape((2, 3, 5))
+        self.y325 = np.arange(30.).reshape((3, 2, 5))
+
+        # Edge updated test matrix 1
+        # array([[ 30,   1,   2,   3,   4,   5,   6,   7,   8, -30],
+        #        [ 30,  11,  12,  13,  14,  15,  16,  17,  18, -30]])
+        self.y210_edge_updated = np.arange(20.).reshape((2, 10))
+        self.y210_edge_updated[:, 0] = 30
+        self.y210_edge_updated[:, -1] = -30
+
+        # Edge updated test matrix 2
+        # array([[ 30,  30],
+        #       [  2,   3],
+        #       [  4,   5],
+        #       [  6,   7],
+        #       [  8,   9],
+        #       [ 10,  11],
+        #       [ 12,  13],
+        #       [ 14,  15],
+        #       [ 16,  17],
+        #       [-30, -30]])
+        self.y102_edge_updated = np.arange(20.).reshape((10, 2))
+        self.y102_edge_updated[0, :] = 30
+        self.y102_edge_updated[-1, :] = -30
+
+        self.fill_value = -100.0
+
+    def test_validation(self):
+        # Make sure that appropriate exceptions are raised when invalid values
+        # are given to the constructor.
+
+        # These should all work.
+        for kind in ('nearest', 'nearest-up', 'zero', 'linear', 'slinear',
+                     'quadratic', 'cubic', 'previous', 'next'):
+            interp1d(self.x10, self.y10, kind=kind)
+            interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
+        interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
+        interp1d(self.x10, self.y10, kind='linear',
+                 fill_value=np.array([-1]))
+        interp1d(self.x10, self.y10, kind='linear',
+                 fill_value=(-1,))
+        interp1d(self.x10, self.y10, kind='linear',
+                 fill_value=-1)
+        interp1d(self.x10, self.y10, kind='linear',
+                 fill_value=(-1, -1))
+        interp1d(self.x10, self.y10, kind=0)
+        interp1d(self.x10, self.y10, kind=1)
+        interp1d(self.x10, self.y10, kind=2)
+        interp1d(self.x10, self.y10, kind=3)
+        interp1d(self.x10, self.y210, kind='linear', axis=-1,
+                 fill_value=(-1, -1))
+        interp1d(self.x2, self.y210, kind='linear', axis=0,
+                 fill_value=np.ones(10))
+        interp1d(self.x2, self.y210, kind='linear', axis=0,
+                 fill_value=(np.ones(10), np.ones(10)))
+        interp1d(self.x2, self.y210, kind='linear', axis=0,
+                 fill_value=(np.ones(10), -1))
+
+        # x array must be 1D.
+        assert_raises(ValueError, interp1d, self.x25, self.y10)
+
+        # y array cannot be a scalar.
+        assert_raises(ValueError, interp1d, self.x10, np.array(0))
+
+        # Check for x and y arrays having the same length.
+        assert_raises(ValueError, interp1d, self.x10, self.y2)
+        assert_raises(ValueError, interp1d, self.x2, self.y10)
+        assert_raises(ValueError, interp1d, self.x10, self.y102)
+        interp1d(self.x10, self.y210)
+        interp1d(self.x10, self.y102, axis=0)
+
+        # Check for x and y having at least 1 element.
+        assert_raises(ValueError, interp1d, self.x1, self.y10)
+        assert_raises(ValueError, interp1d, self.x10, self.y1)
+
+        # Bad fill values
+        assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+                      fill_value=(-1, -1, -1))  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+                      fill_value=[-1, -1, -1])  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+                      fill_value=np.array((-1, -1, -1)))  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+                      fill_value=[[-1]])  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+                      fill_value=[-1, -1])  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+                      fill_value=np.array([]))  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
+                      fill_value=())  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+                      axis=0, fill_value=[-1, -1])  # doesn't broadcast
+        assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
+                      axis=0, fill_value=(0., [-1, -1]))  # above doesn't bc
+
+    def test_init(self):
+        # Check that the attributes are initialized appropriately by the
+        # constructor.
+        assert_(interp1d(self.x10, self.y10).copy)
+        assert_(not interp1d(self.x10, self.y10, copy=False).copy)
+        assert_(interp1d(self.x10, self.y10).bounds_error)
+        assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
+        assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
+        assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
+                     3.0)
+        assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value,
+                     (1.0, 2.0))
+        assert_equal(interp1d(self.x10, self.y10).axis, 0)
+        assert_equal(interp1d(self.x10, self.y210).axis, 1)
+        assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
+        assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
+        assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
+        assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
+
+    def test_assume_sorted(self):
+        # Check for unsorted arrays
+        interp10 = interp1d(self.x10, self.y10)
+        interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
+
+        assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
+        assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
+        assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
+                                  interp10([2.4, 5.6, 6.0]))
+
+        # Check assume_sorted keyword (defaults to False)
+        interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
+                                      assume_sorted=False)
+        assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
+
+        interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
+                                       assume_sorted=True)
+        # Should raise an error for unsorted input if assume_sorted=True
+        assert_raises(ValueError, interp10_assume_kw2, self.x10)
+
+        # Check that if y is a 2-D array, things are still consistent
+        interp10_y_2d = interp1d(self.x10, self.y210)
+        interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
+        assert_array_almost_equal(interp10_y_2d(self.x10),
+                                  interp10_y_2d_unsorted(self.x10))
+
+    def test_linear(self):
+        for kind in ['linear', 'slinear']:
+            self._check_linear(kind)
+
+    def _check_linear(self, kind):
+        # Check the actual implementation of linear interpolation.
+        interp10 = interp1d(self.x10, self.y10, kind=kind)
+        assert_array_almost_equal(interp10(self.x10), self.y10)
+        assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+        assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+                                  np.array([2.4, 5.6, 6.0]))
+
+        # test fill_value="extrapolate"
+        extrapolator = interp1d(self.x10, self.y10, kind=kind,
+                                fill_value='extrapolate')
+        assert_allclose(extrapolator([-1., 0, 9, 11]),
+                        [-1, 0, 9, 11], rtol=1e-14)
+
+        opts = dict(kind=kind,
+                    fill_value='extrapolate',
+                    bounds_error=True)
+        assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+    def test_linear_dtypes(self):
+        # regression test for gh-5898, where 1D linear interpolation has been
+        # delegated to numpy.interp for all float dtypes, and the latter was
+        # not handling e.g. np.float128.
+        for dtyp in np.sctypes["float"]:
+            x = np.arange(8, dtype=dtyp)
+            y = x
+            yp = interp1d(x, y, kind='linear')(x)
+            assert_equal(yp.dtype, dtyp)
+            assert_allclose(yp, y, atol=1e-15)
+
+        # regression test for gh-14531, where 1D linear interpolation has been
+        # has been extended to delegate to numpy.interp for integer dtypes
+        x = [0, 1, 2]
+        y = [np.nan, 0, 1]
+        yp = interp1d(x, y)(x)
+        assert_allclose(yp, y, atol=1e-15)
+
+    def test_slinear_dtypes(self):
+        # regression test for gh-7273: 1D slinear interpolation fails with
+        # float32 inputs
+        dt_r = [np.float16, np.float32, np.float64]
+        dt_rc = dt_r + [np.complex64, np.complex128]
+        spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic']
+        for dtx in dt_r:
+            x = np.arange(0, 10, dtype=dtx)
+            for dty in dt_rc:
+                y = np.exp(-x/3.0).astype(dty)
+                for dtn in dt_r:
+                    xnew = x.astype(dtn)
+                    for kind in spline_kinds:
+                        f = interp1d(x, y, kind=kind, bounds_error=False)
+                        assert_allclose(f(xnew), y, atol=1e-7,
+                                        err_msg="%s, %s %s" % (dtx, dty, dtn))
+
+    def test_cubic(self):
+        # Check the actual implementation of spline interpolation.
+        interp10 = interp1d(self.x10, self.y10, kind='cubic')
+        assert_array_almost_equal(interp10(self.x10), self.y10)
+        assert_array_almost_equal(interp10(1.2), np.array([1.2]))
+        assert_array_almost_equal(interp10(1.5), np.array([1.5]))
+        assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+                                  np.array([2.4, 5.6, 6.0]),)
+
+    def test_nearest(self):
+        # Check the actual implementation of nearest-neighbour interpolation.
+        # Nearest asserts that half-integer case (1.5) rounds down to 1
+        interp10 = interp1d(self.x10, self.y10, kind='nearest')
+        assert_array_almost_equal(interp10(self.x10), self.y10)
+        assert_array_almost_equal(interp10(1.2), np.array(1.))
+        assert_array_almost_equal(interp10(1.5), np.array(1.))
+        assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+                                  np.array([2., 6., 6.]),)
+
+        # test fill_value="extrapolate"
+        extrapolator = interp1d(self.x10, self.y10, kind='nearest',
+                                fill_value='extrapolate')
+        assert_allclose(extrapolator([-1., 0, 9, 11]),
+                        [0, 0, 9, 9], rtol=1e-14)
+
+        opts = dict(kind='nearest',
+                    fill_value='extrapolate',
+                    bounds_error=True)
+        assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+    def test_nearest_up(self):
+        # Check the actual implementation of nearest-neighbour interpolation.
+        # Nearest-up asserts that half-integer case (1.5) rounds up to 2
+        interp10 = interp1d(self.x10, self.y10, kind='nearest-up')
+        assert_array_almost_equal(interp10(self.x10), self.y10)
+        assert_array_almost_equal(interp10(1.2), np.array(1.))
+        assert_array_almost_equal(interp10(1.5), np.array(2.))
+        assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+                                  np.array([2., 6., 6.]),)
+
+        # test fill_value="extrapolate"
+        extrapolator = interp1d(self.x10, self.y10, kind='nearest-up',
+                                fill_value='extrapolate')
+        assert_allclose(extrapolator([-1., 0, 9, 11]),
+                        [0, 0, 9, 9], rtol=1e-14)
+
+        opts = dict(kind='nearest-up',
+                    fill_value='extrapolate',
+                    bounds_error=True)
+        assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+    def test_previous(self):
+        # Check the actual implementation of previous interpolation.
+        interp10 = interp1d(self.x10, self.y10, kind='previous')
+        assert_array_almost_equal(interp10(self.x10), self.y10)
+        assert_array_almost_equal(interp10(1.2), np.array(1.))
+        assert_array_almost_equal(interp10(1.5), np.array(1.))
+        assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+                                  np.array([2., 5., 6.]),)
+
+        # test fill_value="extrapolate"
+        extrapolator = interp1d(self.x10, self.y10, kind='previous',
+                                fill_value='extrapolate')
+        assert_allclose(extrapolator([-1., 0, 9, 11]),
+                        [np.nan, 0, 9, 9], rtol=1e-14)
+
+        # Tests for gh-9591
+        interpolator1D = interp1d(self.x10, self.y10, kind="previous",
+                                  fill_value='extrapolate')
+        assert_allclose(interpolator1D([-1, -2, 5, 8, 12, 25]),
+                        [np.nan, np.nan, 5, 8, 9, 9])
+
+        interpolator2D = interp1d(self.x10, self.y210, kind="previous",
+                                  fill_value='extrapolate')
+        assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+                        [[np.nan, np.nan, 5, 8, 9, 9],
+                         [np.nan, np.nan, 15, 18, 19, 19]])
+
+        interpolator2DAxis0 = interp1d(self.x10, self.y102, kind="previous",
+                                       axis=0, fill_value='extrapolate')
+        assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+                        [[np.nan, np.nan],
+                         [10, 11],
+                         [18, 19]])
+
+        opts = dict(kind='previous',
+                    fill_value='extrapolate',
+                    bounds_error=True)
+        assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+        # Tests for gh-16813
+        interpolator1D = interp1d([0, 1, 2],
+                                  [0, 1, -1], kind="previous",
+                                  fill_value='extrapolate',
+                                  assume_sorted=True)
+        assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+                        [np.nan, np.nan, 0, 1, -1, -1, -1])
+
+        interpolator1D = interp1d([2, 0, 1],  # x is not ascending
+                                  [-1, 0, 1], kind="previous",
+                                  fill_value='extrapolate',
+                                  assume_sorted=False)
+        assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+                        [np.nan, np.nan, 0, 1, -1, -1, -1])
+
+        interpolator2D = interp1d(self.x10, self.y210_edge_updated,
+                                  kind="previous",
+                                  fill_value='extrapolate')
+        assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+                        [[np.nan, np.nan, 5, 8, -30, -30],
+                         [np.nan, np.nan, 15, 18, -30, -30]])
+
+        interpolator2DAxis0 = interp1d(self.x10, self.y102_edge_updated,
+                                       kind="previous",
+                                       axis=0, fill_value='extrapolate')
+        assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+                        [[np.nan, np.nan],
+                         [10, 11],
+                         [-30, -30]])
+
+    def test_next(self):
+        # Check the actual implementation of next interpolation.
+        interp10 = interp1d(self.x10, self.y10, kind='next')
+        assert_array_almost_equal(interp10(self.x10), self.y10)
+        assert_array_almost_equal(interp10(1.2), np.array(2.))
+        assert_array_almost_equal(interp10(1.5), np.array(2.))
+        assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+                                  np.array([3., 6., 6.]),)
+
+        # test fill_value="extrapolate"
+        extrapolator = interp1d(self.x10, self.y10, kind='next',
+                                fill_value='extrapolate')
+        assert_allclose(extrapolator([-1., 0, 9, 11]),
+                        [0, 0, 9, np.nan], rtol=1e-14)
+
+        # Tests for gh-9591
+        interpolator1D = interp1d(self.x10, self.y10, kind="next",
+                                  fill_value='extrapolate')
+        assert_allclose(interpolator1D([-1, -2, 5, 8, 12, 25]),
+                        [0, 0, 5, 8, np.nan, np.nan])
+
+        interpolator2D = interp1d(self.x10, self.y210, kind="next",
+                                  fill_value='extrapolate')
+        assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+                        [[0, 0, 5, 8, np.nan, np.nan],
+                         [10, 10, 15, 18, np.nan, np.nan]])
+
+        interpolator2DAxis0 = interp1d(self.x10, self.y102, kind="next",
+                                       axis=0, fill_value='extrapolate')
+        assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+                        [[0, 1],
+                         [10, 11],
+                         [np.nan, np.nan]])
+
+        opts = dict(kind='next',
+                    fill_value='extrapolate',
+                    bounds_error=True)
+        assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
+
+        # Tests for gh-16813
+        interpolator1D = interp1d([0, 1, 2],
+                                  [0, 1, -1], kind="next",
+                                  fill_value='extrapolate',
+                                  assume_sorted=True)
+        assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+                        [0, 0, 0, 1, -1, np.nan, np.nan])
+
+        interpolator1D = interp1d([2, 0, 1],  # x is not ascending
+                                  [-1, 0, 1], kind="next",
+                                  fill_value='extrapolate',
+                                  assume_sorted=False)
+        assert_allclose(interpolator1D([-2, -1, 0, 1, 2, 3, 5]),
+                        [0, 0, 0, 1, -1, np.nan, np.nan])
+
+        interpolator2D = interp1d(self.x10, self.y210_edge_updated,
+                                  kind="next",
+                                  fill_value='extrapolate')
+        assert_allclose(interpolator2D([-1, -2, 5, 8, 12, 25]),
+                        [[30, 30, 5, 8, np.nan, np.nan],
+                         [30, 30, 15, 18, np.nan, np.nan]])
+
+        interpolator2DAxis0 = interp1d(self.x10, self.y102_edge_updated,
+                                       kind="next",
+                                       axis=0, fill_value='extrapolate')
+        assert_allclose(interpolator2DAxis0([-2, 5, 12]),
+                        [[30, 30],
+                         [10, 11],
+                         [np.nan, np.nan]])
+
+    def test_zero(self):
+        # Check the actual implementation of zero-order spline interpolation.
+        interp10 = interp1d(self.x10, self.y10, kind='zero')
+        assert_array_almost_equal(interp10(self.x10), self.y10)
+        assert_array_almost_equal(interp10(1.2), np.array(1.))
+        assert_array_almost_equal(interp10(1.5), np.array(1.))
+        assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
+                                  np.array([2., 5., 6.]))
+
+    def bounds_check_helper(self, interpolant, test_array, fail_value):
+        # Asserts that a ValueError is raised and that the error message
+        # contains the value causing this exception.
+        assert_raises(ValueError, interpolant, test_array)
+        try:
+            interpolant(test_array)
+        except ValueError as err:
+            assert ("{}".format(fail_value) in str(err))
+
+    def _bounds_check(self, kind='linear'):
+        # Test that our handling of out-of-bounds input is correct.
+        extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
+                            bounds_error=False, kind=kind)
+
+        assert_array_equal(extrap10(11.2), np.array(self.fill_value))
+        assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
+        assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
+                           np.array(self.fill_value),)
+        assert_array_equal(extrap10._check_bounds(
+                               np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
+                           np.array([[True, False, False, False, False],
+                                     [False, False, False, False, True]]))
+
+        raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
+                                       kind=kind)
+
+        self.bounds_check_helper(raises_bounds_error, -1.0, -1.0)
+        self.bounds_check_helper(raises_bounds_error, 11.0, 11.0)
+        self.bounds_check_helper(raises_bounds_error, [0.0, -1.0, 0.0], -1.0)
+        self.bounds_check_helper(raises_bounds_error, [0.0, 1.0, 21.0], 21.0)
+
+        raises_bounds_error([0.0, 5.0, 9.0])
+
+    def _bounds_check_int_nan_fill(self, kind='linear'):
+        x = np.arange(10).astype(np.int_)
+        y = np.arange(10).astype(np.int_)
+        c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
+        yi = c(x - 1)
+        assert_(np.isnan(yi[0]))
+        assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
+
+    def test_bounds(self):
+        for kind in ('linear', 'cubic', 'nearest', 'previous', 'next',
+                     'slinear', 'zero', 'quadratic'):
+            self._bounds_check(kind)
+            self._bounds_check_int_nan_fill(kind)
+
+    def _check_fill_value(self, kind):
+        interp = interp1d(self.x10, self.y10, kind=kind,
+                          fill_value=(-100, 100), bounds_error=False)
+        assert_array_almost_equal(interp(10), 100)
+        assert_array_almost_equal(interp(-10), -100)
+        assert_array_almost_equal(interp([-10, 10]), [-100, 100])
+
+        # Proper broadcasting:
+        #    interp along axis of length 5
+        # other dim=(2, 3), (3, 2), (2, 2), or (2,)
+
+        # one singleton fill_value (works for all)
+        for y in (self.y235, self.y325, self.y225, self.y25):
+            interp = interp1d(self.x5, y, kind=kind, axis=-1,
+                              fill_value=100, bounds_error=False)
+            assert_array_almost_equal(interp(10), 100)
+            assert_array_almost_equal(interp(-10), 100)
+            assert_array_almost_equal(interp([-10, 10]), 100)
+
+            # singleton lower, singleton upper
+            interp = interp1d(self.x5, y, kind=kind, axis=-1,
+                              fill_value=(-100, 100), bounds_error=False)
+            assert_array_almost_equal(interp(10), 100)
+            assert_array_almost_equal(interp(-10), -100)
+            if y.ndim == 3:
+                result = [[[-100, 100]] * y.shape[1]] * y.shape[0]
+            else:
+                result = [[-100, 100]] * y.shape[0]
+            assert_array_almost_equal(interp([-10, 10]), result)
+
+        # one broadcastable (3,) fill_value
+        fill_value = [100, 200, 300]
+        for y in (self.y325, self.y225):
+            assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+                          axis=-1, fill_value=fill_value, bounds_error=False)
+        interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+                          fill_value=fill_value, bounds_error=False)
+        assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+        assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2)
+        assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+                                                       [200, 200],
+                                                       [300, 300]]] * 2)
+
+        # one broadcastable (2,) fill_value
+        fill_value = [100, 200]
+        assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+                      axis=-1, fill_value=fill_value, bounds_error=False)
+        for y in (self.y225, self.y325, self.y25):
+            interp = interp1d(self.x5, y, kind=kind, axis=-1,
+                              fill_value=fill_value, bounds_error=False)
+            result = [100, 200]
+            if y.ndim == 3:
+                result = [result] * y.shape[0]
+            assert_array_almost_equal(interp(10), result)
+            assert_array_almost_equal(interp(-10), result)
+            result = [[100, 100], [200, 200]]
+            if y.ndim == 3:
+                result = [result] * y.shape[0]
+            assert_array_almost_equal(interp([-10, 10]), result)
+
+        # broadcastable (3,) lower, singleton upper
+        fill_value = (np.array([-100, -200, -300]), 100)
+        for y in (self.y325, self.y225):
+            assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+                          axis=-1, fill_value=fill_value, bounds_error=False)
+        interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+                          fill_value=fill_value, bounds_error=False)
+        assert_array_almost_equal(interp(10), 100)
+        assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+        assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+                                                       [-200, 100],
+                                                       [-300, 100]]] * 2)
+
+        # broadcastable (2,) lower, singleton upper
+        fill_value = (np.array([-100, -200]), 100)
+        assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+                      axis=-1, fill_value=fill_value, bounds_error=False)
+        for y in (self.y225, self.y325, self.y25):
+            interp = interp1d(self.x5, y, kind=kind, axis=-1,
+                              fill_value=fill_value, bounds_error=False)
+            assert_array_almost_equal(interp(10), 100)
+            result = [-100, -200]
+            if y.ndim == 3:
+                result = [result] * y.shape[0]
+            assert_array_almost_equal(interp(-10), result)
+            result = [[-100, 100], [-200, 100]]
+            if y.ndim == 3:
+                result = [result] * y.shape[0]
+            assert_array_almost_equal(interp([-10, 10]), result)
+
+        # broadcastable (3,) lower, broadcastable (3,) upper
+        fill_value = ([-100, -200, -300], [100, 200, 300])
+        for y in (self.y325, self.y225):
+            assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+                          axis=-1, fill_value=fill_value, bounds_error=False)
+        for ii in range(2):  # check ndarray as well as list here
+            if ii == 1:
+                fill_value = tuple(np.array(f) for f in fill_value)
+            interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
+                              fill_value=fill_value, bounds_error=False)
+            assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
+            assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
+            assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+                                                           [-200, 200],
+                                                           [-300, 300]]] * 2)
+        # broadcastable (2,) lower, broadcastable (2,) upper
+        fill_value = ([-100, -200], [100, 200])
+        assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
+                      axis=-1, fill_value=fill_value, bounds_error=False)
+        for y in (self.y325, self.y225, self.y25):
+            interp = interp1d(self.x5, y, kind=kind, axis=-1,
+                              fill_value=fill_value, bounds_error=False)
+            result = [100, 200]
+            if y.ndim == 3:
+                result = [result] * y.shape[0]
+            assert_array_almost_equal(interp(10), result)
+            result = [-100, -200]
+            if y.ndim == 3:
+                result = [result] * y.shape[0]
+            assert_array_almost_equal(interp(-10), result)
+            result = [[-100, 100], [-200, 200]]
+            if y.ndim == 3:
+                result = [result] * y.shape[0]
+            assert_array_almost_equal(interp([-10, 10]), result)
+
+        # one broadcastable (2, 2) array-like
+        fill_value = [[100, 200], [1000, 2000]]
+        for y in (self.y235, self.y325, self.y25):
+            assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+                          axis=-1, fill_value=fill_value, bounds_error=False)
+        for ii in range(2):
+            if ii == 1:
+                fill_value = np.array(fill_value)
+            interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+                              fill_value=fill_value, bounds_error=False)
+            assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+            assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]])
+            assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
+                                                           [200, 200]],
+                                                          [[1000, 1000],
+                                                           [2000, 2000]]])
+
+        # broadcastable (2, 2) lower, broadcastable (2, 2) upper
+        fill_value = ([[-100, -200], [-1000, -2000]],
+                      [[100, 200], [1000, 2000]])
+        for y in (self.y235, self.y325, self.y25):
+            assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
+                          axis=-1, fill_value=fill_value, bounds_error=False)
+        for ii in range(2):
+            if ii == 1:
+                fill_value = (np.array(fill_value[0]), np.array(fill_value[1]))
+            interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
+                              fill_value=fill_value, bounds_error=False)
+            assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
+            assert_array_almost_equal(interp(-10), [[-100, -200],
+                                                    [-1000, -2000]])
+            assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
+                                                           [-200, 200]],
+                                                          [[-1000, 1000],
+                                                           [-2000, 2000]]])
+
+    def test_fill_value(self):
+        # test that two-element fill value works
+        for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+                     'zero', 'previous', 'next'):
+            self._check_fill_value(kind)
+
+    def test_fill_value_writeable(self):
+        # backwards compat: fill_value is a public writeable attribute
+        interp = interp1d(self.x10, self.y10, fill_value=123.0)
+        assert_equal(interp.fill_value, 123.0)
+        interp.fill_value = 321.0
+        assert_equal(interp.fill_value, 321.0)
+
+    def _nd_check_interp(self, kind='linear'):
+        # Check the behavior when the inputs and outputs are multidimensional.
+
+        # Multidimensional input.
+        interp10 = interp1d(self.x10, self.y10, kind=kind)
+        assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
+                                  np.array([[3., 5.], [2., 7.]]))
+
+        # Scalar input -> 0-dim scalar array output
+        assert_(isinstance(interp10(1.2), np.ndarray))
+        assert_equal(interp10(1.2).shape, ())
+
+        # Multidimensional outputs.
+        interp210 = interp1d(self.x10, self.y210, kind=kind)
+        assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
+        assert_array_almost_equal(interp210(np.array([1., 2.])),
+                                  np.array([[1., 2.], [11., 12.]]))
+
+        interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
+        assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
+        assert_array_almost_equal(interp102(np.array([1., 3.])),
+                                  np.array([[2., 3.], [6., 7.]]))
+
+        # Both at the same time!
+        x_new = np.array([[3., 5.], [2., 7.]])
+        assert_array_almost_equal(interp210(x_new),
+                                  np.array([[[3., 5.], [2., 7.]],
+                                            [[13., 15.], [12., 17.]]]))
+        assert_array_almost_equal(interp102(x_new),
+                                  np.array([[[6., 7.], [10., 11.]],
+                                            [[4., 5.], [14., 15.]]]))
+
+    def _nd_check_shape(self, kind='linear'):
+        # Check large N-D output shape
+        a = [4, 5, 6, 7]
+        y = np.arange(np.prod(a)).reshape(*a)
+        for n, s in enumerate(a):
+            x = np.arange(s)
+            z = interp1d(x, y, axis=n, kind=kind)
+            assert_array_almost_equal(z(x), y, err_msg=kind)
+
+            x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
+            b = list(a)
+            b[n:n+1] = [2,3,1]
+            assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
+
+    def test_nd(self):
+        for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest',
+                     'zero', 'previous', 'next'):
+            self._nd_check_interp(kind)
+            self._nd_check_shape(kind)
+
+    def _check_complex(self, dtype=np.complex_, kind='linear'):
+        x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
+        y = x * x ** (1 + 2j)
+        y = y.astype(dtype)
+
+        # simple test
+        c = interp1d(x, y, kind=kind)
+        assert_array_almost_equal(y[:-1], c(x)[:-1])
+
+        # check against interpolating real+imag separately
+        xi = np.linspace(1, 10, 31)
+        cr = interp1d(x, y.real, kind=kind)
+        ci = interp1d(x, y.imag, kind=kind)
+        assert_array_almost_equal(c(xi).real, cr(xi))
+        assert_array_almost_equal(c(xi).imag, ci(xi))
+
+    def test_complex(self):
+        for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
+                     'zero', 'previous', 'next'):
+            self._check_complex(np.complex64, kind)
+            self._check_complex(np.complex128, kind)
+
+    @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+    def test_circular_refs(self):
+        # Test interp1d can be automatically garbage collected
+        x = np.linspace(0, 1)
+        y = np.linspace(0, 1)
+        # Confirm interp can be released from memory after use
+        with assert_deallocated(interp1d, x, y) as interp:
+            interp([0.1, 0.2])
+            del interp
+
+    def test_overflow_nearest(self):
+        # Test that the x range doesn't overflow when given integers as input
+        for kind in ('nearest', 'previous', 'next'):
+            x = np.array([0, 50, 127], dtype=np.int8)
+            ii = interp1d(x, x, kind=kind)
+            assert_array_almost_equal(ii(x), x)
+
+    def test_local_nans(self):
+        # check that for local interpolation kinds (slinear, zero) a single nan
+        # only affects its local neighborhood
+        x = np.arange(10).astype(float)
+        y = x.copy()
+        y[6] = np.nan
+        for kind in ('zero', 'slinear'):
+            ir = interp1d(x, y, kind=kind)
+            vals = ir([4.9, 7.0])
+            assert_(np.isfinite(vals).all())
+
+    def test_spline_nans(self):
+        # Backwards compat: a single nan makes the whole spline interpolation
+        # return nans in an array of the correct shape. And it doesn't raise,
+        # just quiet nans because of backcompat.
+        x = np.arange(8).astype(float)
+        y = x.copy()
+        yn = y.copy()
+        yn[3] = np.nan
+
+        for kind in ['quadratic', 'cubic']:
+            ir = interp1d(x, y, kind=kind)
+            irn = interp1d(x, yn, kind=kind)
+            for xnew in (6, [1, 6], [[1, 6], [3, 5]]):
+                xnew = np.asarray(xnew)
+                out, outn = ir(x), irn(x)
+                assert_(np.isnan(outn).all())
+                assert_equal(out.shape, outn.shape)
+
+    def test_all_nans(self):
+        # regression test for gh-11637: interp1d core dumps with all-nan `x`
+        x = np.ones(10) * np.nan
+        y = np.arange(10)
+        with assert_raises(ValueError):
+            interp1d(x, y, kind='cubic')
+
+    def test_read_only(self):
+        x = np.arange(0, 10)
+        y = np.exp(-x / 3.0)
+        xnew = np.arange(0, 9, 0.1)
+        # Check both read-only and not read-only:
+        for xnew_writeable in (True, False):
+            xnew.flags.writeable = xnew_writeable
+            x.flags.writeable = False
+            for kind in ('linear', 'nearest', 'zero', 'slinear', 'quadratic',
+                         'cubic'):
+                f = interp1d(x, y, kind=kind)
+                vals = f(xnew)
+                assert_(np.isfinite(vals).all())
+
+    @pytest.mark.parametrize(
+        "kind", ("linear", "nearest", "nearest-up", "previous", "next")
+    )
+    def test_single_value(self, kind):
+        # https://github.com/scipy/scipy/issues/4043
+        f = interp1d([1.5], [6], kind=kind, bounds_error=False,
+                     fill_value=(2, 10))
+        assert_array_equal(f([1, 1.5, 2]), [2, 6, 10])
+        # check still error if bounds_error=True
+        f = interp1d([1.5], [6], kind=kind, bounds_error=True)
+        with assert_raises(ValueError, match="x_new is above"):
+            f(2.0)
+
+
+class TestLagrange:
+
+    def test_lagrange(self):
+        p = poly1d([5,2,1,4,3])
+        xs = np.arange(len(p.coeffs))
+        ys = p(xs)
+        pl = lagrange(xs,ys)
+        assert_array_almost_equal(p.coeffs,pl.coeffs)
+
+
+class TestAkima1DInterpolator:
+    def test_eval(self):
+        x = np.arange(0., 11.)
+        y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+        ak = Akima1DInterpolator(x, y)
+        xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+            8.6, 9.9, 10.])
+        yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+            4.1363636363636366866103344, 5.9803623910336236590978842,
+            5.5067291516462386624652936, 5.2031367459745245795943447,
+            4.1796554159017080820603951, 3.4110386597938129327189927,
+            3.])
+        assert_allclose(ak(xi), yi)
+
+    def test_eval_2d(self):
+        x = np.arange(0., 11.)
+        y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+        y = np.column_stack((y, 2. * y))
+        ak = Akima1DInterpolator(x, y)
+        xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+                       8.6, 9.9, 10.])
+        yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+                       4.1363636363636366866103344,
+                       5.9803623910336236590978842,
+                       5.5067291516462386624652936,
+                       5.2031367459745245795943447,
+                       4.1796554159017080820603951,
+                       3.4110386597938129327189927, 3.])
+        yi = np.column_stack((yi, 2. * yi))
+        assert_allclose(ak(xi), yi)
+
+    def test_eval_3d(self):
+        x = np.arange(0., 11.)
+        y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+        y = np.empty((11, 2, 2))
+        y[:, 0, 0] = y_
+        y[:, 1, 0] = 2. * y_
+        y[:, 0, 1] = 3. * y_
+        y[:, 1, 1] = 4. * y_
+        ak = Akima1DInterpolator(x, y)
+        xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
+                       8.6, 9.9, 10.])
+        yi = np.empty((13, 2, 2))
+        yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
+                        4.1363636363636366866103344,
+                        5.9803623910336236590978842,
+                        5.5067291516462386624652936,
+                        5.2031367459745245795943447,
+                        4.1796554159017080820603951,
+                        3.4110386597938129327189927, 3.])
+        yi[:, 0, 0] = yi_
+        yi[:, 1, 0] = 2. * yi_
+        yi[:, 0, 1] = 3. * yi_
+        yi[:, 1, 1] = 4. * yi_
+        assert_allclose(ak(xi), yi)
+
+    def test_degenerate_case_multidimensional(self):
+        # This test is for issue #5683.
+        x = np.array([0, 1, 2])
+        y = np.vstack((x, x**2)).T
+        ak = Akima1DInterpolator(x, y)
+        x_eval = np.array([0.5, 1.5])
+        y_eval = ak(x_eval)
+        assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T)
+
+    def test_extend(self):
+        x = np.arange(0., 11.)
+        y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
+        ak = Akima1DInterpolator(x, y)
+        match = "Extending a 1-D Akima interpolator is not yet implemented"
+        with pytest.raises(NotImplementedError, match=match):
+            ak.extend(None, None)
+
+
+class TestPPolyCommon:
+    # test basic functionality for PPoly and BPoly
+    def test_sort_check(self):
+        c = np.array([[1, 4], [2, 5], [3, 6]])
+        x = np.array([0, 1, 0.5])
+        assert_raises(ValueError, PPoly, c, x)
+        assert_raises(ValueError, BPoly, c, x)
+
+    def test_ctor_c(self):
+        # wrong shape: `c` must be at least 2D
+        with assert_raises(ValueError):
+            PPoly([1, 2], [0, 1])
+
+    def test_extend(self):
+        # Test adding new points to the piecewise polynomial
+        np.random.seed(1234)
+
+        order = 3
+        x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+        c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+        for cls in (PPoly, BPoly):
+            pp = cls(c[:,:9], x[:10])
+            pp.extend(c[:,9:], x[10:])
+
+            pp2 = cls(c[:, 10:], x[10:])
+            pp2.extend(c[:, :10], x[:10])
+
+            pp3 = cls(c, x)
+
+            assert_array_equal(pp.c, pp3.c)
+            assert_array_equal(pp.x, pp3.x)
+            assert_array_equal(pp2.c, pp3.c)
+            assert_array_equal(pp2.x, pp3.x)
+
+    def test_extend_diff_orders(self):
+        # Test extending polynomial with different order one
+        np.random.seed(1234)
+
+        x = np.linspace(0, 1, 6)
+        c = np.random.rand(2, 5)
+
+        x2 = np.linspace(1, 2, 6)
+        c2 = np.random.rand(4, 5)
+
+        for cls in (PPoly, BPoly):
+            pp1 = cls(c, x)
+            pp2 = cls(c2, x2)
+
+            pp_comb = cls(c, x)
+            pp_comb.extend(c2, x2[1:])
+
+            # NB. doesn't match to pp1 at the endpoint, because pp1 is not
+            #     continuous with pp2 as we took random coefs.
+            xi1 = np.linspace(0, 1, 300, endpoint=False)
+            xi2 = np.linspace(1, 2, 300)
+
+            assert_allclose(pp1(xi1), pp_comb(xi1))
+            assert_allclose(pp2(xi2), pp_comb(xi2))
+
+    def test_extend_descending(self):
+        np.random.seed(0)
+
+        order = 3
+        x = np.sort(np.random.uniform(0, 10, 20))
+        c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
+
+        for cls in (PPoly, BPoly):
+            p = cls(c, x)
+
+            p1 = cls(c[:, :9], x[:10])
+            p1.extend(c[:, 9:], x[10:])
+
+            p2 = cls(c[:, 10:], x[10:])
+            p2.extend(c[:, :10], x[:10])
+
+            assert_array_equal(p1.c, p.c)
+            assert_array_equal(p1.x, p.x)
+            assert_array_equal(p2.c, p.c)
+            assert_array_equal(p2.x, p.x)
+
+    def test_shape(self):
+        np.random.seed(1234)
+        c = np.random.rand(8, 12, 5, 6, 7)
+        x = np.sort(np.random.rand(13))
+        xp = np.random.rand(3, 4)
+        for cls in (PPoly, BPoly):
+            p = cls(c, x)
+            assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
+
+        # 'scalars'
+        for cls in (PPoly, BPoly):
+            p = cls(c[..., 0, 0, 0], x)
+
+            assert_equal(np.shape(p(0.5)), ())
+            assert_equal(np.shape(p(np.array(0.5))), ())
+
+            assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]], dtype=object))
+
+    def test_complex_coef(self):
+        np.random.seed(12345)
+        x = np.sort(np.random.random(13))
+        c = np.random.random((8, 12)) * (1. + 0.3j)
+        c_re, c_im = c.real, c.imag
+        xp = np.random.random(5)
+        for cls in (PPoly, BPoly):
+            p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
+            for nu in [0, 1, 2]:
+                assert_allclose(p(xp, nu).real, p_re(xp, nu))
+                assert_allclose(p(xp, nu).imag, p_im(xp, nu))
+
+    def test_axis(self):
+        np.random.seed(12345)
+        c = np.random.rand(3, 4, 5, 6, 7, 8)
+        c_s = c.shape
+        xp = np.random.random((1, 2))
+        for axis in (0, 1, 2, 3):
+            m = c.shape[axis+1]
+            x = np.sort(np.random.rand(m+1))
+            for cls in (PPoly, BPoly):
+                p = cls(c, x, axis=axis)
+                assert_equal(p.c.shape,
+                             c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
+                res = p(xp)
+                targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
+                assert_equal(res.shape, targ_shape)
+
+                # deriv/antideriv does not drop the axis
+                for p1 in [cls(c, x, axis=axis).derivative(),
+                           cls(c, x, axis=axis).derivative(2),
+                           cls(c, x, axis=axis).antiderivative(),
+                           cls(c, x, axis=axis).antiderivative(2)]:
+                    assert_equal(p1.axis, p.axis)
+
+        # c array needs two axes for the coefficients and intervals, so
+        # 0 <= axis < c.ndim-1; raise otherwise
+        for axis in (-1, 4, 5, 6):
+            for cls in (BPoly, PPoly):
+                assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
+
+
+class TestPolySubclassing:
+    class P(PPoly):
+        pass
+
+    class B(BPoly):
+        pass
+
+    def _make_polynomials(self):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(3))
+        c = np.random.random((4, 2))
+        return self.P(c, x), self.B(c, x)
+
+    def test_derivative(self):
+        pp, bp = self._make_polynomials()
+        for p in (pp, bp):
+            pd = p.derivative()
+            assert_equal(p.__class__, pd.__class__)
+
+        ppa = pp.antiderivative()
+        assert_equal(pp.__class__, ppa.__class__)
+
+    def test_from_spline(self):
+        np.random.seed(1234)
+        x = np.sort(np.r_[0, np.random.rand(11), 1])
+        y = np.random.rand(len(x))
+
+        spl = splrep(x, y, s=0)
+        pp = self.P.from_spline(spl)
+        assert_equal(pp.__class__, self.P)
+
+    def test_conversions(self):
+        pp, bp = self._make_polynomials()
+
+        pp1 = self.P.from_bernstein_basis(bp)
+        assert_equal(pp1.__class__, self.P)
+
+        bp1 = self.B.from_power_basis(pp)
+        assert_equal(bp1.__class__, self.B)
+
+    def test_from_derivatives(self):
+        x = [0, 1, 2]
+        y = [[1], [2], [3]]
+        bp = self.B.from_derivatives(x, y)
+        assert_equal(bp.__class__, self.B)
+
+
+class TestPPoly:
+    def test_simple(self):
+        c = np.array([[1, 4], [2, 5], [3, 6]])
+        x = np.array([0, 0.5, 1])
+        p = PPoly(c, x)
+        assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+        assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+    def test_periodic(self):
+        c = np.array([[1, 4], [2, 5], [3, 6]])
+        x = np.array([0, 0.5, 1])
+        p = PPoly(c, x, extrapolate='periodic')
+
+        assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3)
+        assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6)
+
+        assert_allclose(p(1.3, 1), 2 * 0.3 + 2)
+        assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5)
+
+    def test_read_only(self):
+        c = np.array([[1, 4], [2, 5], [3, 6]])
+        x = np.array([0, 0.5, 1])
+        xnew = np.array([0, 0.1, 0.2])
+        PPoly(c, x, extrapolate='periodic')
+
+        for writeable in (True, False):
+            x.flags.writeable = writeable
+            f = PPoly(c, x)
+            vals = f(xnew)
+            assert_(np.isfinite(vals).all())
+
+    def test_descending(self):
+        def binom_matrix(power):
+            n = np.arange(power + 1).reshape(-1, 1)
+            k = np.arange(power + 1)
+            B = binom(n, k)
+            return B[::-1, ::-1]
+
+        np.random.seed(0)
+
+        power = 3
+        for m in [10, 20, 30]:
+            x = np.sort(np.random.uniform(0, 10, m + 1))
+            ca = np.random.uniform(-2, 2, size=(power + 1, m))
+
+            h = np.diff(x)
+            h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
+            B = binom_matrix(power)
+            cap = ca * h_powers
+            cdp = np.dot(B.T, cap)
+            cd = cdp / h_powers
+
+            pa = PPoly(ca, x, extrapolate=True)
+            pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+            x_test = np.random.uniform(-10, 20, 100)
+            assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+            assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+            pa_d = pa.derivative()
+            pd_d = pd.derivative()
+
+            assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+            # Antiderivatives won't be equal because fixing continuity is
+            # done in the reverse order, but surely the differences should be
+            # equal.
+            pa_i = pa.antiderivative()
+            pd_i = pd.antiderivative()
+            for a, b in np.random.uniform(-10, 20, (5, 2)):
+                int_a = pa.integrate(a, b)
+                int_d = pd.integrate(a, b)
+                assert_allclose(int_a, int_d, rtol=1e-13)
+                assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+                                rtol=1e-13)
+
+            roots_d = pd.roots()
+            roots_a = pa.roots()
+            assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12)
+
+    def test_multi_shape(self):
+        c = np.random.rand(6, 2, 1, 2, 3)
+        x = np.array([0, 0.5, 1])
+        p = PPoly(c, x)
+        assert_equal(p.x.shape, x.shape)
+        assert_equal(p.c.shape, c.shape)
+        assert_equal(p(0.3).shape, c.shape[2:])
+
+        assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:])
+
+        dp = p.derivative()
+        assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+        ip = p.antiderivative()
+        assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
+
+    def test_construct_fast(self):
+        np.random.seed(1234)
+        c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
+        x = np.array([0, 0.5, 1])
+        p = PPoly.construct_fast(c, x)
+        assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
+        assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
+
+    def test_vs_alternative_implementations(self):
+        np.random.seed(1234)
+        c = np.random.rand(3, 12, 22)
+        x = np.sort(np.r_[0, np.random.rand(11), 1])
+
+        p = PPoly(c, x)
+
+        xp = np.r_[0.3, 0.5, 0.33, 0.6]
+        expected = _ppoly_eval_1(c, x, xp)
+        assert_allclose(p(xp), expected)
+
+        expected = _ppoly_eval_2(c[:,:,0], x, xp)
+        assert_allclose(p(xp)[:,0], expected)
+
+    def test_from_spline(self):
+        np.random.seed(1234)
+        x = np.sort(np.r_[0, np.random.rand(11), 1])
+        y = np.random.rand(len(x))
+
+        spl = splrep(x, y, s=0)
+        pp = PPoly.from_spline(spl)
+
+        xi = np.linspace(0, 1, 200)
+        assert_allclose(pp(xi), splev(xi, spl))
+
+        # make sure .from_spline accepts BSpline objects
+        b = BSpline(*spl)
+        ppp = PPoly.from_spline(b)
+        assert_allclose(ppp(xi), b(xi))
+
+        # BSpline's extrapolate attribute propagates unless overridden
+        t, c, k = spl
+        for extrap in (None, True, False):
+            b = BSpline(t, c, k, extrapolate=extrap)
+            p = PPoly.from_spline(b)
+            assert_equal(p.extrapolate, b.extrapolate)
+
+    def test_derivative_simple(self):
+        np.random.seed(1234)
+        c = np.array([[4, 3, 2, 1]]).T
+        dc = np.array([[3*4, 2*3, 2]]).T
+        ddc = np.array([[2*3*4, 1*2*3]]).T
+        x = np.array([0, 1])
+
+        pp = PPoly(c, x)
+        dpp = PPoly(dc, x)
+        ddpp = PPoly(ddc, x)
+
+        assert_allclose(pp.derivative().c, dpp.c)
+        assert_allclose(pp.derivative(2).c, ddpp.c)
+
+    def test_derivative_eval(self):
+        np.random.seed(1234)
+        x = np.sort(np.r_[0, np.random.rand(11), 1])
+        y = np.random.rand(len(x))
+
+        spl = splrep(x, y, s=0)
+        pp = PPoly.from_spline(spl)
+
+        xi = np.linspace(0, 1, 200)
+        for dx in range(0, 3):
+            assert_allclose(pp(xi, dx), splev(xi, spl, dx))
+
+    def test_derivative(self):
+        np.random.seed(1234)
+        x = np.sort(np.r_[0, np.random.rand(11), 1])
+        y = np.random.rand(len(x))
+
+        spl = splrep(x, y, s=0, k=5)
+        pp = PPoly.from_spline(spl)
+
+        xi = np.linspace(0, 1, 200)
+        for dx in range(0, 10):
+            assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
+                            err_msg="dx=%d" % (dx,))
+
+    def test_antiderivative_of_constant(self):
+        # https://github.com/scipy/scipy/issues/4216
+        p = PPoly([[1.]], [0, 1])
+        assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
+        assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
+
+    def test_antiderivative_regression_4355(self):
+        # https://github.com/scipy/scipy/issues/4355
+        p = PPoly([[1., 0.5]], [0, 1, 2])
+        q = p.antiderivative()
+        assert_equal(q.c, [[1, 0.5], [0, 1]])
+        assert_equal(q.x, [0, 1, 2])
+        assert_allclose(p.integrate(0, 2), 1.5)
+        assert_allclose(q(2) - q(0), 1.5)
+
+    def test_antiderivative_simple(self):
+        np.random.seed(1234)
+        # [ p1(x) = 3*x**2 + 2*x + 1,
+        #   p2(x) = 1.6875]
+        c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
+        # [ pp1(x) = x**3 + x**2 + x,
+        #   pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
+        ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
+        # [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
+        #   ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
+        iic = np.array([[1/4, 1/3, 1/2, 0, 0],
+                        [0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
+        x = np.array([0, 0.25, 1])
+
+        pp = PPoly(c, x)
+        ipp = pp.antiderivative()
+        iipp = pp.antiderivative(2)
+        iipp2 = ipp.antiderivative()
+
+        assert_allclose(ipp.x, x)
+        assert_allclose(ipp.c.T, ic.T)
+        assert_allclose(iipp.c.T, iic.T)
+        assert_allclose(iipp2.c.T, iic.T)
+
+    def test_antiderivative_vs_derivative(self):
+        np.random.seed(1234)
+        x = np.linspace(0, 1, 30)**2
+        y = np.random.rand(len(x))
+        spl = splrep(x, y, s=0, k=5)
+        pp = PPoly.from_spline(spl)
+
+        for dx in range(0, 10):
+            ipp = pp.antiderivative(dx)
+
+            # check that derivative is inverse op
+            pp2 = ipp.derivative(dx)
+            assert_allclose(pp.c, pp2.c)
+
+            # check continuity
+            for k in range(dx):
+                pp2 = ipp.derivative(k)
+
+                r = 1e-13
+                endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
+
+                assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
+                                rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
+
+    def test_antiderivative_vs_spline(self):
+        np.random.seed(1234)
+        x = np.sort(np.r_[0, np.random.rand(11), 1])
+        y = np.random.rand(len(x))
+
+        spl = splrep(x, y, s=0, k=5)
+        pp = PPoly.from_spline(spl)
+
+        for dx in range(0, 10):
+            pp2 = pp.antiderivative(dx)
+            spl2 = splantider(spl, dx)
+
+            xi = np.linspace(0, 1, 200)
+            assert_allclose(pp2(xi), splev(xi, spl2),
+                            rtol=1e-7)
+
+    def test_antiderivative_continuity(self):
+        c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
+        x = np.array([0, 0.5, 1])
+
+        p = PPoly(c, x)
+        ip = p.antiderivative()
+
+        # check continuity
+        assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
+
+        # check that only lowest order coefficients were changed
+        p2 = ip.derivative()
+        assert_allclose(p2.c, p.c)
+
+    def test_integrate(self):
+        np.random.seed(1234)
+        x = np.sort(np.r_[0, np.random.rand(11), 1])
+        y = np.random.rand(len(x))
+
+        spl = splrep(x, y, s=0, k=5)
+        pp = PPoly.from_spline(spl)
+
+        a, b = 0.3, 0.9
+        ig = pp.integrate(a, b)
+
+        ipp = pp.antiderivative()
+        assert_allclose(ig, ipp(b) - ipp(a))
+        assert_allclose(ig, splint(a, b, spl))
+
+        a, b = -0.3, 0.9
+        ig = pp.integrate(a, b, extrapolate=True)
+        assert_allclose(ig, ipp(b) - ipp(a))
+
+        assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
+
+    def test_integrate_readonly(self):
+        x = np.array([1, 2, 4])
+        c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+        for writeable in (True, False):
+            x.flags.writeable = writeable
+
+            P = PPoly(c, x)
+            vals = P.integrate(1, 4)
+
+            assert_(np.isfinite(vals).all())
+
+    def test_integrate_periodic(self):
+        x = np.array([1, 2, 4])
+        c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+        P = PPoly(c, x, extrapolate='periodic')
+        I = P.antiderivative()
+
+        period_int = I(4) - I(1)
+
+        assert_allclose(P.integrate(1, 4), period_int)
+        assert_allclose(P.integrate(-10, -7), period_int)
+        assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+        assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+        assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+        assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+                        I(2) - I(1) + I(4) - I(3.5))
+        assert_allclose(P.integrate(3.5, 5 + 12),
+                        I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+        assert_allclose(P.integrate(0, -1), I(2) - I(3))
+        assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+        assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+    def test_roots(self):
+        x = np.linspace(0, 1, 31)**2
+        y = np.sin(30*x)
+
+        spl = splrep(x, y, s=0, k=3)
+        pp = PPoly.from_spline(spl)
+
+        r = pp.roots()
+        r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
+        assert_allclose(r, sproot(spl), atol=1e-15)
+
+    def test_roots_idzero(self):
+        # Roots for piecewise polynomials with identically zero
+        # sections.
+        c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
+        x = np.array([0, 0.4, 0.6, 1.0])
+
+        pp = PPoly(c, x)
+        assert_array_equal(pp.roots(),
+                           [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+        # ditto for p.solve(const) with sections identically equal const
+        const = 2.
+        c1 = c.copy()
+        c1[1, :] += const
+        pp1 = PPoly(c1, x)
+
+        assert_array_equal(pp1.solve(const),
+                           [0.25, 0.4, np.nan, 0.6 + 0.25])
+
+    def test_roots_all_zero(self):
+        # test the code path for the polynomial being identically zero everywhere
+        c = [[0], [0]]
+        x = [0, 1]
+        p = PPoly(c, x)
+        assert_array_equal(p.roots(), [0, np.nan])
+        assert_array_equal(p.solve(0), [0, np.nan])
+        assert_array_equal(p.solve(1), [])
+
+        c = [[0, 0], [0, 0]]
+        x = [0, 1, 2]
+        p = PPoly(c, x)
+        assert_array_equal(p.roots(), [0, np.nan, 1, np.nan])
+        assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan])
+        assert_array_equal(p.solve(1), [])
+
+    def test_roots_repeated(self):
+        # Check roots repeated in multiple sections are reported only
+        # once.
+
+        # [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
+        c = np.array([[1, 0, -1], [-1, 0, 0]]).T
+        x = np.array([-1, 0, 1])
+
+        pp = PPoly(c, x)
+        assert_array_equal(pp.roots(), [-2, 0])
+        assert_array_equal(pp.roots(extrapolate=False), [0])
+
+    def test_roots_discont(self):
+        # Check that a discontinuity across zero is reported as root
+        c = np.array([[1], [-1]]).T
+        x = np.array([0, 0.5, 1])
+        pp = PPoly(c, x)
+        assert_array_equal(pp.roots(), [0.5])
+        assert_array_equal(pp.roots(discontinuity=False), [])
+
+        # ditto for a discontinuity across y:
+        assert_array_equal(pp.solve(0.5), [0.5])
+        assert_array_equal(pp.solve(0.5, discontinuity=False), [])
+
+        assert_array_equal(pp.solve(1.5), [])
+        assert_array_equal(pp.solve(1.5, discontinuity=False), [])
+
+    def test_roots_random(self):
+        # Check high-order polynomials with random coefficients
+        np.random.seed(1234)
+
+        num = 0
+
+        for extrapolate in (True, False):
+            for order in range(0, 20):
+                x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
+                c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
+
+                pp = PPoly(c, x)
+                for y in [0, np.random.random()]:
+                    r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
+
+                    for i in range(2):
+                        for j in range(3):
+                            rr = r[i,j]
+                            if rr.size > 0:
+                                # Check that the reported roots indeed are roots
+                                num += rr.size
+                                val = pp(rr, extrapolate=extrapolate)[:,i,j]
+                                cmpval = pp(rr, nu=1,
+                                            extrapolate=extrapolate)[:,i,j]
+                                msg = "(%r) r = %s" % (extrapolate, repr(rr),)
+                                assert_allclose((val-y) / cmpval, 0, atol=1e-7,
+                                                err_msg=msg)
+
+        # Check that we checked a number of roots
+        assert_(num > 100, repr(num))
+
+    def test_roots_croots(self):
+        # Test the complex root finding algorithm
+        np.random.seed(1234)
+
+        for k in range(1, 15):
+            c = np.random.rand(k, 1, 130)
+
+            if k == 3:
+                # add a case with zero discriminant
+                c[:,0,0] = 1, 2, 1
+
+            for y in [0, np.random.random()]:
+                w = np.empty(c.shape, dtype=complex)
+                _ppoly._croots_poly1(c, w)
+
+                if k == 1:
+                    assert_(np.isnan(w).all())
+                    continue
+
+                res = 0
+                cres = 0
+                for i in range(k):
+                    res += c[i,None] * w**(k-1-i)
+                    cres += abs(c[i,None] * w**(k-1-i))
+                with np.errstate(invalid='ignore'):
+                    res /= cres
+                res = res.ravel()
+                res = res[~np.isnan(res)]
+                assert_allclose(res, 0, atol=1e-10)
+
+    def test_extrapolate_attr(self):
+        # [ 1 - x**2 ]
+        c = np.array([[-1, 0, 1]]).T
+        x = np.array([0, 1])
+
+        for extrapolate in [True, False, None]:
+            pp = PPoly(c, x, extrapolate=extrapolate)
+            pp_d = pp.derivative()
+            pp_i = pp.antiderivative()
+
+            if extrapolate is False:
+                assert_(np.isnan(pp([-0.1, 1.1])).all())
+                assert_(np.isnan(pp_i([-0.1, 1.1])).all())
+                assert_(np.isnan(pp_d([-0.1, 1.1])).all())
+                assert_equal(pp.roots(), [1])
+            else:
+                assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
+                assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
+                assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
+                assert_allclose(pp.roots(), [1, -1])
+
+
+class TestBPoly:
+    def test_simple(self):
+        x = [0, 1]
+        c = [[3]]
+        bp = BPoly(c, x)
+        assert_allclose(bp(0.1), 3.)
+
+    def test_simple2(self):
+        x = [0, 1]
+        c = [[3], [1]]
+        bp = BPoly(c, x)   # 3*(1-x) + 1*x
+        assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
+
+    def test_simple3(self):
+        x = [0, 1]
+        c = [[3], [1], [4]]
+        bp = BPoly(c, x)   # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
+        assert_allclose(bp(0.2),
+                3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
+
+    def test_simple4(self):
+        x = [0, 1]
+        c = [[1], [1], [1], [2]]
+        bp = BPoly(c, x)
+        assert_allclose(bp(0.3), 0.7**3 +
+                                 3 * 0.7**2 * 0.3 +
+                                 3 * 0.7 * 0.3**2 +
+                             2 * 0.3**3)
+
+    def test_simple5(self):
+        x = [0, 1]
+        c = [[1], [1], [8], [2], [1]]
+        bp = BPoly(c, x)
+        assert_allclose(bp(0.3), 0.7**4 +
+                                 4 * 0.7**3 * 0.3 +
+                             8 * 6 * 0.7**2 * 0.3**2 +
+                             2 * 4 * 0.7 * 0.3**3 +
+                                 0.3**4)
+
+    def test_periodic(self):
+        x = [0, 1, 3]
+        c = [[3, 0], [0, 0], [0, 2]]
+        # [3*(1-x)**2, 2*((x-1)/2)**2]
+        bp = BPoly(c, x, extrapolate='periodic')
+
+        assert_allclose(bp(3.4), 3 * 0.6**2)
+        assert_allclose(bp(-1.3), 2 * (0.7/2)**2)
+
+        assert_allclose(bp(3.4, 1), -6 * 0.6)
+        assert_allclose(bp(-1.3, 1), 2 * (0.7/2))
+
+    def test_descending(self):
+        np.random.seed(0)
+
+        power = 3
+        for m in [10, 20, 30]:
+            x = np.sort(np.random.uniform(0, 10, m + 1))
+            ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m))
+            # We need only to flip coefficients to get it right!
+            cd = ca[::-1].copy()
+
+            pa = BPoly(ca, x, extrapolate=True)
+            pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
+
+            x_test = np.random.uniform(-10, 20, 100)
+            assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
+            assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
+
+            pa_d = pa.derivative()
+            pd_d = pd.derivative()
+
+            assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
+
+            # Antiderivatives won't be equal because fixing continuity is
+            # done in the reverse order, but surely the differences should be
+            # equal.
+            pa_i = pa.antiderivative()
+            pd_i = pd.antiderivative()
+            for a, b in np.random.uniform(-10, 20, (5, 2)):
+                int_a = pa.integrate(a, b)
+                int_d = pd.integrate(a, b)
+                assert_allclose(int_a, int_d, rtol=1e-12)
+                assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
+                                rtol=1e-12)
+
+    def test_multi_shape(self):
+        c = np.random.rand(6, 2, 1, 2, 3)
+        x = np.array([0, 0.5, 1])
+        p = BPoly(c, x)
+        assert_equal(p.x.shape, x.shape)
+        assert_equal(p.c.shape, c.shape)
+        assert_equal(p(0.3).shape, c.shape[2:])
+        assert_equal(p(np.random.rand(5,6)).shape,
+                     (5,6)+c.shape[2:])
+
+        dp = p.derivative()
+        assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
+
+    def test_interval_length(self):
+        x = [0, 2]
+        c = [[3], [1], [4]]
+        bp = BPoly(c, x)
+        xval = 0.1
+        s = xval / 2  # s = (x - xa) / (xb - xa)
+        assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
+
+    def test_two_intervals(self):
+        x = [0, 1, 3]
+        c = [[3, 0], [0, 0], [0, 2]]
+        bp = BPoly(c, x)  # [3*(1-x)**2, 2*((x-1)/2)**2]
+
+        assert_allclose(bp(0.4), 3 * 0.6*0.6)
+        assert_allclose(bp(1.7), 2 * (0.7/2)**2)
+
+    def test_extrapolate_attr(self):
+        x = [0, 2]
+        c = [[3], [1], [4]]
+        bp = BPoly(c, x)
+
+        for extrapolate in (True, False, None):
+            bp = BPoly(c, x, extrapolate=extrapolate)
+            bp_d = bp.derivative()
+            if extrapolate is False:
+                assert_(np.isnan(bp([-0.1, 2.1])).all())
+                assert_(np.isnan(bp_d([-0.1, 2.1])).all())
+            else:
+                assert_(not np.isnan(bp([-0.1, 2.1])).any())
+                assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
+
+
+class TestBPolyCalculus:
+    def test_derivative(self):
+        x = [0, 1, 3]
+        c = [[3, 0], [0, 0], [0, 2]]
+        bp = BPoly(c, x)  # [3*(1-x)**2, 2*((x-1)/2)**2]
+        bp_der = bp.derivative()
+        assert_allclose(bp_der(0.4), -6*(0.6))
+        assert_allclose(bp_der(1.7), 0.7)
+
+        # derivatives in-place
+        assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
+                        [-6*(1-0.4), 6., 0.])
+        assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
+                        [0.7, 1., 0])
+
+    def test_derivative_ppoly(self):
+        # make sure it's consistent w/ power basis
+        np.random.seed(1234)
+        m, k = 5, 8   # number of intervals, order
+        x = np.sort(np.random.random(m))
+        c = np.random.random((k, m-1))
+        bp = BPoly(c, x)
+        pp = PPoly.from_bernstein_basis(bp)
+
+        for d in range(k):
+            bp = bp.derivative()
+            pp = pp.derivative()
+            xp = np.linspace(x[0], x[-1], 21)
+            assert_allclose(bp(xp), pp(xp))
+
+    def test_deriv_inplace(self):
+        np.random.seed(1234)
+        m, k = 5, 8   # number of intervals, order
+        x = np.sort(np.random.random(m))
+        c = np.random.random((k, m-1))
+
+        # test both real and complex coefficients
+        for cc in [c.copy(), c*(1. + 2.j)]:
+            bp = BPoly(cc, x)
+            xp = np.linspace(x[0], x[-1], 21)
+            for i in range(k):
+                assert_allclose(bp(xp, i), bp.derivative(i)(xp))
+
+    def test_antiderivative_simple(self):
+        # f(x) = x        for x \in [0, 1),
+        #        (x-1)/2  for x \in [1, 3]
+        #
+        # antiderivative is then
+        # F(x) = x**2 / 2            for x \in [0, 1),
+        #        0.5*x*(x/2 - 1) + A  for x \in [1, 3]
+        # where A = 3/4 for continuity at x = 1.
+        x = [0, 1, 3]
+        c = [[0, 0], [1, 1]]
+
+        bp = BPoly(c, x)
+        bi = bp.antiderivative()
+
+        xx = np.linspace(0, 3, 11)
+        assert_allclose(bi(xx),
+                        np.where(xx < 1, xx**2 / 2.,
+                                         0.5 * xx * (xx/2. - 1) + 3./4),
+                        atol=1e-12, rtol=1e-12)
+
+    def test_der_antider(self):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(11))
+        c = np.random.random((4, 10, 2, 3))
+        bp = BPoly(c, x)
+
+        xx = np.linspace(x[0], x[-1], 100)
+        assert_allclose(bp.antiderivative().derivative()(xx),
+                        bp(xx), atol=1e-12, rtol=1e-12)
+
+    def test_antider_ppoly(self):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(11))
+        c = np.random.random((4, 10, 2, 3))
+        bp = BPoly(c, x)
+        pp = PPoly.from_bernstein_basis(bp)
+
+        xx = np.linspace(x[0], x[-1], 10)
+
+        assert_allclose(bp.antiderivative(2)(xx),
+                        pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
+
+    def test_antider_continuous(self):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(11))
+        c = np.random.random((4, 10))
+        bp = BPoly(c, x).antiderivative()
+
+        xx = bp.x[1:-1]
+        assert_allclose(bp(xx - 1e-14),
+                        bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
+
+    def test_integrate(self):
+        np.random.seed(1234)
+        x = np.sort(np.random.random(11))
+        c = np.random.random((4, 10))
+        bp = BPoly(c, x)
+        pp = PPoly.from_bernstein_basis(bp)
+        assert_allclose(bp.integrate(0, 1),
+                        pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
+
+    def test_integrate_extrap(self):
+        c = [[1]]
+        x = [0, 1]
+        b = BPoly(c, x)
+
+        # default is extrapolate=True
+        assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
+
+        # .integrate argument overrides self.extrapolate
+        b1 = BPoly(c, x, extrapolate=False)
+        assert_(np.isnan(b1.integrate(0, 2)))
+        assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
+
+    def test_integrate_periodic(self):
+        x = np.array([1, 2, 4])
+        c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
+
+        P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic')
+        I = P.antiderivative()
+
+        period_int = I(4) - I(1)
+
+        assert_allclose(P.integrate(1, 4), period_int)
+        assert_allclose(P.integrate(-10, -7), period_int)
+        assert_allclose(P.integrate(-10, -4), 2 * period_int)
+
+        assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
+        assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
+        assert_allclose(P.integrate(3.5 + 12, 5 + 12),
+                        I(2) - I(1) + I(4) - I(3.5))
+        assert_allclose(P.integrate(3.5, 5 + 12),
+                        I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
+
+        assert_allclose(P.integrate(0, -1), I(2) - I(3))
+        assert_allclose(P.integrate(-9, -10), I(2) - I(3))
+        assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
+
+    def test_antider_neg(self):
+        # .derivative(-nu) ==> .andiderivative(nu) and vice versa
+        c = [[1]]
+        x = [0, 1]
+        b = BPoly(c, x)
+
+        xx = np.linspace(0, 1, 21)
+
+        assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
+                        atol=1e-12, rtol=1e-12)
+        assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
+                        atol=1e-12, rtol=1e-12)
+
+
+class TestPolyConversions:
+    def test_bp_from_pp(self):
+        x = [0, 1, 3]
+        c = [[3, 2], [1, 8], [4, 3]]
+        pp = PPoly(c, x)
+        bp = BPoly.from_power_basis(pp)
+        pp1 = PPoly.from_bernstein_basis(bp)
+
+        xp = [0.1, 1.4]
+        assert_allclose(pp(xp), bp(xp))
+        assert_allclose(pp(xp), pp1(xp))
+
+    def test_bp_from_pp_random(self):
+        np.random.seed(1234)
+        m, k = 5, 8   # number of intervals, order
+        x = np.sort(np.random.random(m))
+        c = np.random.random((k, m-1))
+        pp = PPoly(c, x)
+        bp = BPoly.from_power_basis(pp)
+        pp1 = PPoly.from_bernstein_basis(bp)
+
+        xp = np.linspace(x[0], x[-1], 21)
+        assert_allclose(pp(xp), bp(xp))
+        assert_allclose(pp(xp), pp1(xp))
+
+    def test_pp_from_bp(self):
+        x = [0, 1, 3]
+        c = [[3, 3], [1, 1], [4, 2]]
+        bp = BPoly(c, x)
+        pp = PPoly.from_bernstein_basis(bp)
+        bp1 = BPoly.from_power_basis(pp)
+
+        xp = [0.1, 1.4]
+        assert_allclose(bp(xp), pp(xp))
+        assert_allclose(bp(xp), bp1(xp))
+
+    def test_broken_conversions(self):
+        # regression test for gh-10597: from_power_basis only accepts PPoly etc.
+        x = [0, 1, 3]
+        c = [[3, 3], [1, 1], [4, 2]]
+        pp = PPoly(c, x)
+        with assert_raises(TypeError):
+            PPoly.from_bernstein_basis(pp)
+
+        bp = BPoly(c, x)
+        with assert_raises(TypeError):
+            BPoly.from_power_basis(bp)
+
+
+class TestBPolyFromDerivatives:
+    def test_make_poly_1(self):
+        c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
+        assert_allclose(c1, [2., 3.])
+
+    def test_make_poly_2(self):
+        c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
+        assert_allclose(c1, [1., 1., 1.])
+
+        # f'(0) = 3
+        c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
+        assert_allclose(c2, [2., 7./2, 1.])
+
+        # f'(1) = 3
+        c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
+        assert_allclose(c3, [2., -0.5, 1.])
+
+    def test_make_poly_3(self):
+        # f'(0)=2, f''(0)=3
+        c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
+        assert_allclose(c1, [1., 5./3, 17./6, 4.])
+
+        # f'(1)=2, f''(1)=3
+        c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
+        assert_allclose(c2, [1., 19./6, 10./3, 4.])
+
+        # f'(0)=2, f'(1)=3
+        c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
+        assert_allclose(c3, [1., 5./3, 3., 4.])
+
+    def test_make_poly_12(self):
+        np.random.seed(12345)
+        ya = np.r_[0, np.random.random(5)]
+        yb = np.r_[0, np.random.random(5)]
+
+        c = BPoly._construct_from_derivatives(0, 1, ya, yb)
+        pp = BPoly(c[:, None], [0, 1])
+        for j in range(6):
+            assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
+            pp = pp.derivative()
+
+    def test_raise_degree(self):
+        np.random.seed(12345)
+        x = [0, 1]
+        k, d = 8, 5
+        c = np.random.random((k, 1, 2, 3, 4))
+        bp = BPoly(c, x)
+
+        c1 = BPoly._raise_degree(c, d)
+        bp1 = BPoly(c1, x)
+
+        xp = np.linspace(0, 1, 11)
+        assert_allclose(bp(xp), bp1(xp))
+
+    def test_xi_yi(self):
+        assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
+
+    def test_coords_order(self):
+        xi = [0, 0, 1]
+        yi = [[0], [0], [0]]
+        assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
+
+    def test_zeros(self):
+        xi = [0, 1, 2, 3]
+        yi = [[0, 0], [0], [0, 0], [0, 0]]  # NB: will have to raise the degree
+        pp = BPoly.from_derivatives(xi, yi)
+        assert_(pp.c.shape == (4, 3))
+
+        ppd = pp.derivative()
+        for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
+            assert_allclose([pp(xp), ppd(xp)], [0., 0.])
+
+    def _make_random_mk(self, m, k):
+        # k derivatives at each breakpoint
+        np.random.seed(1234)
+        xi = np.asarray([1. * j**2 for j in range(m+1)])
+        yi = [np.random.random(k) for j in range(m+1)]
+        return xi, yi
+
+    def test_random_12(self):
+        m, k = 5, 12
+        xi, yi = self._make_random_mk(m, k)
+        pp = BPoly.from_derivatives(xi, yi)
+
+        for order in range(k//2):
+            assert_allclose(pp(xi), [yy[order] for yy in yi])
+            pp = pp.derivative()
+
+    def test_order_zero(self):
+        m, k = 5, 12
+        xi, yi = self._make_random_mk(m, k)
+        assert_raises(ValueError, BPoly.from_derivatives,
+                **dict(xi=xi, yi=yi, orders=0))
+
+    def test_orders_too_high(self):
+        m, k = 5, 12
+        xi, yi = self._make_random_mk(m, k)
+
+        BPoly.from_derivatives(xi, yi, orders=2*k-1)   # this is still ok
+        assert_raises(ValueError, BPoly.from_derivatives,   # but this is not
+                **dict(xi=xi, yi=yi, orders=2*k))
+
+    def test_orders_global(self):
+        m, k = 5, 12
+        xi, yi = self._make_random_mk(m, k)
+
+        # ok, this is confusing. Local polynomials will be of the order 5
+        # which means that up to the 2nd derivatives will be used at each point
+        order = 5
+        pp = BPoly.from_derivatives(xi, yi, orders=order)
+
+        for j in range(order//2+1):
+            assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+            pp = pp.derivative()
+        assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+        # now repeat with `order` being even: on each interval, it uses
+        # order//2 'derivatives' @ the right-hand endpoint and
+        # order//2+1 @ 'derivatives' the left-hand endpoint
+        order = 6
+        pp = BPoly.from_derivatives(xi, yi, orders=order)
+        for j in range(order//2):
+            assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
+            pp = pp.derivative()
+        assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
+
+    def test_orders_local(self):
+        m, k = 7, 12
+        xi, yi = self._make_random_mk(m, k)
+
+        orders = [o + 1 for o in range(m)]
+        for i, x in enumerate(xi[1:-1]):
+            pp = BPoly.from_derivatives(xi, yi, orders=orders)
+            for j in range(orders[i] // 2 + 1):
+                assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
+                pp = pp.derivative()
+            assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
+
+    def test_yi_trailing_dims(self):
+        m, k = 7, 5
+        xi = np.sort(np.random.random(m+1))
+        yi = np.random.random((m+1, k, 6, 7, 8))
+        pp = BPoly.from_derivatives(xi, yi)
+        assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
+
+    def test_gh_5430(self):
+        # At least one of these raises an error unless gh-5430 is
+        # fixed. In py2k an int is implemented using a C long, so
+        # which one fails depends on your system. In py3k there is only
+        # one arbitrary precision integer type, so both should fail.
+        orders = np.int32(1)
+        p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+        assert_almost_equal(p(0), 0)
+        orders = np.int64(1)
+        p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+        assert_almost_equal(p(0), 0)
+        orders = 1
+        # This worked before; make sure it still works
+        p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
+        assert_almost_equal(p(0), 0)
+        orders = 1
+
+
+class TestNdPPoly:
+    def test_simple_1d(self):
+        np.random.seed(1234)
+
+        c = np.random.rand(4, 5)
+        x = np.linspace(0, 1, 5+1)
+
+        xi = np.random.rand(200)
+
+        p = NdPPoly(c, (x,))
+        v1 = p((xi,))
+
+        v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
+        assert_allclose(v1, v2)
+
+    def test_simple_2d(self):
+        np.random.seed(1234)
+
+        c = np.random.rand(4, 5, 6, 7)
+        x = np.linspace(0, 1, 6+1)
+        y = np.linspace(0, 1, 7+1)**2
+
+        xi = np.random.rand(200)
+        yi = np.random.rand(200)
+
+        v1 = np.empty([len(xi), 1], dtype=c.dtype)
+        v1.fill(np.nan)
+        _ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
+                           (x, y),
+                           np.array([4, 5], dtype=np.intc),
+                           np.c_[xi, yi],
+                           np.array([0, 0], dtype=np.intc),
+                           1,
+                           v1)
+        v1 = v1.ravel()
+        v2 = _ppoly2d_eval(c, (x, y), xi, yi)
+        assert_allclose(v1, v2)
+
+        p = NdPPoly(c, (x, y))
+        for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
+            v1 = p(np.c_[xi, yi], nu=nu)
+            v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
+            assert_allclose(v1, v2, err_msg=repr(nu))
+
+    def test_simple_3d(self):
+        np.random.seed(1234)
+
+        c = np.random.rand(4, 5, 6, 7, 8, 9)
+        x = np.linspace(0, 1, 7+1)
+        y = np.linspace(0, 1, 8+1)**2
+        z = np.linspace(0, 1, 9+1)**3
+
+        xi = np.random.rand(40)
+        yi = np.random.rand(40)
+        zi = np.random.rand(40)
+
+        p = NdPPoly(c, (x, y, z))
+
+        for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
+                   (6, 0, 2)):
+            v1 = p((xi, yi, zi), nu=nu)
+            v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
+            assert_allclose(v1, v2, err_msg=repr(nu))
+
+    def test_simple_4d(self):
+        np.random.seed(1234)
+
+        c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11)
+        x = np.linspace(0, 1, 8+1)
+        y = np.linspace(0, 1, 9+1)**2
+        z = np.linspace(0, 1, 10+1)**3
+        u = np.linspace(0, 1, 11+1)**4
+
+        xi = np.random.rand(20)
+        yi = np.random.rand(20)
+        zi = np.random.rand(20)
+        ui = np.random.rand(20)
+
+        p = NdPPoly(c, (x, y, z, u))
+        v1 = p((xi, yi, zi, ui))
+
+        v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
+        assert_allclose(v1, v2)
+
+    def test_deriv_1d(self):
+        np.random.seed(1234)
+
+        c = np.random.rand(4, 5)
+        x = np.linspace(0, 1, 5+1)
+
+        p = NdPPoly(c, (x,))
+
+        # derivative
+        dp = p.derivative(nu=[1])
+        p1 = PPoly(c, x)
+        dp1 = p1.derivative()
+        assert_allclose(dp.c, dp1.c)
+
+        # antiderivative
+        dp = p.antiderivative(nu=[2])
+        p1 = PPoly(c, x)
+        dp1 = p1.antiderivative(2)
+        assert_allclose(dp.c, dp1.c)
+
+    def test_deriv_3d(self):
+        np.random.seed(1234)
+
+        c = np.random.rand(4, 5, 6, 7, 8, 9)
+        x = np.linspace(0, 1, 7+1)
+        y = np.linspace(0, 1, 8+1)**2
+        z = np.linspace(0, 1, 9+1)**3
+
+        p = NdPPoly(c, (x, y, z))
+
+        # differentiate vs x
+        p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
+        dp = p.derivative(nu=[2])
+        dp1 = p1.derivative(2)
+        assert_allclose(dp.c,
+                        dp1.c.transpose(0, 2, 3, 1, 4, 5))
+
+        # antidifferentiate vs y
+        p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
+        dp = p.antiderivative(nu=[0, 1, 0])
+        dp1 = p1.antiderivative(1)
+        assert_allclose(dp.c,
+                        dp1.c.transpose(2, 0, 3, 4, 1, 5))
+
+        # differentiate vs z
+        p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
+        dp = p.derivative(nu=[0, 0, 3])
+        dp1 = p1.derivative(3)
+        assert_allclose(dp.c,
+                        dp1.c.transpose(2, 3, 0, 4, 5, 1))
+
+    def test_deriv_3d_simple(self):
+        # Integrate to obtain function x y**2 z**4 / (2! 4!)
+
+        c = np.ones((1, 1, 1, 3, 4, 5))
+        x = np.linspace(0, 1, 3+1)**1
+        y = np.linspace(0, 1, 4+1)**2
+        z = np.linspace(0, 1, 5+1)**3
+
+        p = NdPPoly(c, (x, y, z))
+        ip = p.antiderivative((1, 0, 4))
+        ip = ip.antiderivative((0, 2, 0))
+
+        xi = np.random.rand(20)
+        yi = np.random.rand(20)
+        zi = np.random.rand(20)
+
+        assert_allclose(ip((xi, yi, zi)),
+                        xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
+
+    def test_integrate_2d(self):
+        np.random.seed(1234)
+        c = np.random.rand(4, 5, 16, 17)
+        x = np.linspace(0, 1, 16+1)**1
+        y = np.linspace(0, 1, 17+1)**2
+
+        # make continuously differentiable so that nquad() has an
+        # easier time
+        c = c.transpose(0, 2, 1, 3)
+        cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+        _ppoly.fix_continuity(cx, x, 2)
+        c = cx.reshape(c.shape)
+        c = c.transpose(0, 2, 1, 3)
+        c = c.transpose(1, 3, 0, 2)
+        cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
+        _ppoly.fix_continuity(cx, y, 2)
+        c = cx.reshape(c.shape)
+        c = c.transpose(2, 0, 3, 1).copy()
+
+        # Check integration
+        p = NdPPoly(c, (x, y))
+
+        for ranges in [[(0, 1), (0, 1)],
+                       [(0, 0.5), (0, 1)],
+                       [(0, 1), (0, 0.5)],
+                       [(0.3, 0.7), (0.6, 0.2)]]:
+
+            ig = p.integrate(ranges)
+            ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
+                              opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
+            assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5,
+                            err_msg=repr(ranges))
+
+    def test_integrate_1d(self):
+        np.random.seed(1234)
+        c = np.random.rand(4, 5, 6, 16, 17, 18)
+        x = np.linspace(0, 1, 16+1)**1
+        y = np.linspace(0, 1, 17+1)**2
+        z = np.linspace(0, 1, 18+1)**3
+
+        # Check 1-D integration
+        p = NdPPoly(c, (x, y, z))
+
+        u = np.random.rand(200)
+        v = np.random.rand(200)
+        a, b = 0.2, 0.7
+
+        px = p.integrate_1d(a, b, axis=0)
+        pax = p.antiderivative((1, 0, 0))
+        assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
+
+        py = p.integrate_1d(a, b, axis=1)
+        pay = p.antiderivative((0, 1, 0))
+        assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
+
+        pz = p.integrate_1d(a, b, axis=2)
+        paz = p.antiderivative((0, 0, 1))
+        assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
+
+
+def _ppoly_eval_1(c, x, xps):
+    """Evaluate piecewise polynomial manually"""
+    out = np.zeros((len(xps), c.shape[2]))
+    for i, xp in enumerate(xps):
+        if xp < 0 or xp > 1:
+            out[i,:] = np.nan
+            continue
+        j = np.searchsorted(x, xp) - 1
+        d = xp - x[j]
+        assert_(x[j] <= xp < x[j+1])
+        r = sum(c[k,j] * d**(c.shape[0]-k-1)
+                for k in range(c.shape[0]))
+        out[i,:] = r
+    return out
+
+
+def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
+    """Evaluate piecewise polynomial manually (another way)"""
+    a = breaks[0]
+    b = breaks[-1]
+    K = coeffs.shape[0]
+
+    saveshape = np.shape(xnew)
+    xnew = np.ravel(xnew)
+    res = np.empty_like(xnew)
+    mask = (xnew >= a) & (xnew <= b)
+    res[~mask] = fill
+    xx = xnew.compress(mask)
+    indxs = np.searchsorted(breaks, xx)-1
+    indxs = indxs.clip(0, len(breaks))
+    pp = coeffs
+    diff = xx - breaks.take(indxs)
+    V = np.vander(diff, N=K)
+    values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in range(len(xx))])
+    res[mask] = values
+    res.shape = saveshape
+    return res
+
+
+def _dpow(x, y, n):
+    """
+    d^n (x**y) / dx^n
+    """
+    if n < 0:
+        raise ValueError("invalid derivative order")
+    elif n > y:
+        return 0
+    else:
+        return poch(y - n + 1, n) * x**(y - n)
+
+
+def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
+    """
+    Straightforward evaluation of 2-D piecewise polynomial
+    """
+    if nu is None:
+        nu = (0, 0)
+
+    out = np.empty((len(xnew),), dtype=c.dtype)
+
+    nx, ny = c.shape[:2]
+
+    for jout, (x, y) in enumerate(zip(xnew, ynew)):
+        if not ((xs[0][0] <= x <= xs[0][-1]) and
+                (xs[1][0] <= y <= xs[1][-1])):
+            out[jout] = np.nan
+            continue
+
+        j1 = np.searchsorted(xs[0], x) - 1
+        j2 = np.searchsorted(xs[1], y) - 1
+
+        s1 = x - xs[0][j1]
+        s2 = y - xs[1][j2]
+
+        val = 0
+
+        for k1 in range(c.shape[0]):
+            for k2 in range(c.shape[1]):
+                val += (c[nx-k1-1,ny-k2-1,j1,j2]
+                        * _dpow(s1, k1, nu[0])
+                        * _dpow(s2, k2, nu[1]))
+
+        out[jout] = val
+
+    return out
+
+
+def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
+    """
+    Straightforward evaluation of 3-D piecewise polynomial
+    """
+    if nu is None:
+        nu = (0, 0, 0)
+
+    out = np.empty((len(xnew),), dtype=c.dtype)
+
+    nx, ny, nz = c.shape[:3]
+
+    for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
+        if not ((xs[0][0] <= x <= xs[0][-1]) and
+                (xs[1][0] <= y <= xs[1][-1]) and
+                (xs[2][0] <= z <= xs[2][-1])):
+            out[jout] = np.nan
+            continue
+
+        j1 = np.searchsorted(xs[0], x) - 1
+        j2 = np.searchsorted(xs[1], y) - 1
+        j3 = np.searchsorted(xs[2], z) - 1
+
+        s1 = x - xs[0][j1]
+        s2 = y - xs[1][j2]
+        s3 = z - xs[2][j3]
+
+        val = 0
+        for k1 in range(c.shape[0]):
+            for k2 in range(c.shape[1]):
+                for k3 in range(c.shape[2]):
+                    val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
+                            * _dpow(s1, k1, nu[0])
+                            * _dpow(s2, k2, nu[1])
+                            * _dpow(s3, k3, nu[2]))
+
+        out[jout] = val
+
+    return out
+
+
+def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
+    """
+    Straightforward evaluation of 4-D piecewise polynomial
+    """
+    if nu is None:
+        nu = (0, 0, 0, 0)
+
+    out = np.empty((len(xnew),), dtype=c.dtype)
+
+    mx, my, mz, mu = c.shape[:4]
+
+    for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
+        if not ((xs[0][0] <= x <= xs[0][-1]) and
+                (xs[1][0] <= y <= xs[1][-1]) and
+                (xs[2][0] <= z <= xs[2][-1]) and
+                (xs[3][0] <= u <= xs[3][-1])):
+            out[jout] = np.nan
+            continue
+
+        j1 = np.searchsorted(xs[0], x) - 1
+        j2 = np.searchsorted(xs[1], y) - 1
+        j3 = np.searchsorted(xs[2], z) - 1
+        j4 = np.searchsorted(xs[3], u) - 1
+
+        s1 = x - xs[0][j1]
+        s2 = y - xs[1][j2]
+        s3 = z - xs[2][j3]
+        s4 = u - xs[3][j4]
+
+        val = 0
+        for k1 in range(c.shape[0]):
+            for k2 in range(c.shape[1]):
+                for k3 in range(c.shape[2]):
+                    for k4 in range(c.shape[3]):
+                        val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
+                                * _dpow(s1, k1, nu[0])
+                                * _dpow(s2, k2, nu[1])
+                                * _dpow(s3, k3, nu[2])
+                                * _dpow(s4, k4, nu[3]))
+
+        out[jout] = val
+
+    return out
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_ndgriddata.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_ndgriddata.py
new file mode 100644
index 00000000..42b500d5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_ndgriddata.py
@@ -0,0 +1,246 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_array_equal, assert_allclose
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.interpolate import (griddata, NearestNDInterpolator,
+                               LinearNDInterpolator,
+                               CloughTocher2DInterpolator)
+
+
+parametrize_interpolators = pytest.mark.parametrize(
+    "interpolator", [NearestNDInterpolator, LinearNDInterpolator,
+                     CloughTocher2DInterpolator]
+)
+
+class TestGriddata:
+    def test_fill_value(self):
+        x = [(0,0), (0,1), (1,0)]
+        y = [1, 2, 3]
+
+        yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
+        assert_array_equal(yi, [-1., -1, 1])
+
+        yi = griddata(x, y, [(1,1), (1,2), (0,0)])
+        assert_array_equal(yi, [np.nan, np.nan, 1])
+
+    def test_alternative_call(self):
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+             + np.array([0,1])[None,:])
+
+        for method in ('nearest', 'linear', 'cubic'):
+            for rescale in (True, False):
+                msg = repr((method, rescale))
+                yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
+                              rescale=rescale)
+                assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+    def test_multivalue_2d(self):
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+             + np.array([0,1])[None,:])
+
+        for method in ('nearest', 'linear', 'cubic'):
+            for rescale in (True, False):
+                msg = repr((method, rescale))
+                yi = griddata(x, y, x, method=method, rescale=rescale)
+                assert_allclose(y, yi, atol=1e-14, err_msg=msg)
+
+    def test_multipoint_2d(self):
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+
+        xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+        for method in ('nearest', 'linear', 'cubic'):
+            for rescale in (True, False):
+                msg = repr((method, rescale))
+                yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+                assert_equal(yi.shape, (5, 3), err_msg=msg)
+                assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+                                atol=1e-14, err_msg=msg)
+
+    def test_complex_2d(self):
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 2j*y[::-1]
+
+        xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
+
+        for method in ('nearest', 'linear', 'cubic'):
+            for rescale in (True, False):
+                msg = repr((method, rescale))
+                yi = griddata(x, y, xi, method=method, rescale=rescale)
+
+                assert_equal(yi.shape, (5, 3), err_msg=msg)
+                assert_allclose(yi, np.tile(y[:,None], (1, 3)),
+                                atol=1e-14, err_msg=msg)
+
+    def test_1d(self):
+        x = np.array([1, 2.5, 3, 4.5, 5, 6])
+        y = np.array([1, 2, 0, 3.9, 2, 1])
+
+        for method in ('nearest', 'linear', 'cubic'):
+            assert_allclose(griddata(x, y, x, method=method), y,
+                            err_msg=method, atol=1e-14)
+            assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+                            err_msg=method, atol=1e-14)
+            assert_allclose(griddata((x,), y, (x,), method=method), y,
+                            err_msg=method, atol=1e-14)
+
+    def test_1d_borders(self):
+        # Test for nearest neighbor case with xi outside
+        # the range of the values.
+        x = np.array([1, 2.5, 3, 4.5, 5, 6])
+        y = np.array([1, 2, 0, 3.9, 2, 1])
+        xi = np.array([0.9, 6.5])
+        yi_should = np.array([1.0, 1.0])
+
+        method = 'nearest'
+        assert_allclose(griddata(x, y, xi,
+                                 method=method), yi_should,
+                        err_msg=method,
+                        atol=1e-14)
+        assert_allclose(griddata(x.reshape(6, 1), y, xi,
+                                 method=method), yi_should,
+                        err_msg=method,
+                        atol=1e-14)
+        assert_allclose(griddata((x, ), y, (xi, ),
+                                 method=method), yi_should,
+                        err_msg=method,
+                        atol=1e-14)
+
+    def test_1d_unsorted(self):
+        x = np.array([2.5, 1, 4.5, 5, 6, 3])
+        y = np.array([1, 2, 0, 3.9, 2, 1])
+
+        for method in ('nearest', 'linear', 'cubic'):
+            assert_allclose(griddata(x, y, x, method=method), y,
+                            err_msg=method, atol=1e-10)
+            assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
+                            err_msg=method, atol=1e-10)
+            assert_allclose(griddata((x,), y, (x,), method=method), y,
+                            err_msg=method, atol=1e-10)
+
+    def test_square_rescale_manual(self):
+        points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.double)
+        points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)], dtype=np.double)
+        values = np.array([1., 2., -3., 5., 9.], dtype=np.double)
+
+        xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
+                                     np.linspace(0, 100, 14)[None,:])
+        xx = xx.ravel()
+        yy = yy.ravel()
+        xi = np.array([xx, yy]).T.copy()
+
+        for method in ('nearest', 'linear', 'cubic'):
+            msg = method
+            zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
+                          method=method)
+            zi_rescaled = griddata(points, values, xi, method=method,
+                                   rescale=True)
+            assert_allclose(zi, zi_rescaled, err_msg=msg,
+                            atol=1e-12)
+
+    def test_xi_1d(self):
+        # Check that 1-D xi is interpreted as a coordinate
+        x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
+                     dtype=np.double)
+        y = np.arange(x.shape[0], dtype=np.double)
+        y = y - 2j*y[::-1]
+
+        xi = np.array([0.5, 0.5])
+
+        for method in ('nearest', 'linear', 'cubic'):
+            p1 = griddata(x, y, xi, method=method)
+            p2 = griddata(x, y, xi[None,:], method=method)
+            assert_allclose(p1, p2, err_msg=method)
+
+            xi1 = np.array([0.5])
+            xi3 = np.array([0.5, 0.5, 0.5])
+            assert_raises(ValueError, griddata, x, y, xi1,
+                          method=method)
+            assert_raises(ValueError, griddata, x, y, xi3,
+                          method=method)
+
+
+class TestNearestNDInterpolator:
+    def test_nearest_options(self):
+        # smoke test that NearestNDInterpolator accept cKDTree options
+        npts, nd = 4, 3
+        x = np.arange(npts*nd).reshape((npts, nd))
+        y = np.arange(npts)
+        nndi = NearestNDInterpolator(x, y)
+
+        opts = {'balanced_tree': False, 'compact_nodes': False}
+        nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
+        assert_allclose(nndi(x), nndi_o(x), atol=1e-14)
+
+    def test_nearest_list_argument(self):
+        nd = np.array([[0, 0, 0, 0, 1, 0, 1],
+                       [0, 0, 0, 0, 0, 1, 1],
+                       [0, 0, 0, 0, 1, 1, 2]])
+        d = nd[:, 3:]
+
+        # z is np.array
+        NI = NearestNDInterpolator((d[0], d[1]), d[2])
+        assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
+
+        # z is list
+        NI = NearestNDInterpolator((d[0], d[1]), list(d[2]))
+        assert_array_equal(NI([0.1, 0.9], [0.1, 0.9]), [0, 2])
+
+
+class TestNDInterpolators:
+    @parametrize_interpolators
+    def test_broadcastable_input(self, interpolator):
+        # input data
+        np.random.seed(0)
+        x = np.random.random(10)
+        y = np.random.random(10)
+        z = np.hypot(x, y)
+
+        # x-y grid for interpolation
+        X = np.linspace(min(x), max(x))
+        Y = np.linspace(min(y), max(y))
+        X, Y = np.meshgrid(X, Y)
+        XY = np.vstack((X.ravel(), Y.ravel())).T
+        interp = interpolator(list(zip(x, y)), z)
+        # single array input
+        interp_points0 = interp(XY)
+        # tuple input
+        interp_points1 = interp((X, Y))
+        interp_points2 = interp((X, 0.0))
+        # broadcastable input
+        interp_points3 = interp(X, Y)
+        interp_points4 = interp(X, 0.0)
+
+        assert_equal(interp_points0.size ==
+                     interp_points1.size ==
+                     interp_points2.size ==
+                     interp_points3.size ==
+                     interp_points4.size, True)
+
+    @parametrize_interpolators
+    def test_read_only(self, interpolator):
+        # input data
+        np.random.seed(0)
+        xy = np.random.random((10, 2))
+        x, y = xy[:, 0], xy[:, 1]
+        z = np.hypot(x, y)
+
+        # interpolation points
+        XY = np.random.random((50, 2))
+
+        xy.setflags(write=False)
+        z.setflags(write=False)
+        XY.setflags(write=False)
+
+        interp = interpolator(xy, z)
+        interp(XY)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_pade.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_pade.py
new file mode 100644
index 00000000..5c3e03e2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_pade.py
@@ -0,0 +1,101 @@
+from numpy.testing import (assert_array_equal, assert_array_almost_equal)
+from scipy.interpolate import pade
+
+def test_pade_trivial():
+    nump, denomp = pade([1.0], 0)
+    assert_array_equal(nump.c, [1.0])
+    assert_array_equal(denomp.c, [1.0])
+
+    nump, denomp = pade([1.0], 0, 0)
+    assert_array_equal(nump.c, [1.0])
+    assert_array_equal(denomp.c, [1.0])
+
+
+def test_pade_4term_exp():
+    # First four Taylor coefficients of exp(x).
+    # Unlike poly1d, the first array element is the zero-order term.
+    an = [1.0, 1.0, 0.5, 1.0/6]
+
+    nump, denomp = pade(an, 0)
+    assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+    assert_array_almost_equal(denomp.c, [1.0])
+
+    nump, denomp = pade(an, 1)
+    assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+    assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+    nump, denomp = pade(an, 2)
+    assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+    assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+    nump, denomp = pade(an, 3)
+    assert_array_almost_equal(nump.c, [1.0])
+    assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+    # Testing inclusion of optional parameter
+    nump, denomp = pade(an, 0, 3)
+    assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
+    assert_array_almost_equal(denomp.c, [1.0])
+
+    nump, denomp = pade(an, 1, 2)
+    assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
+    assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
+
+    nump, denomp = pade(an, 2, 1)
+    assert_array_almost_equal(nump.c, [1.0/3, 1.0])
+    assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
+
+    nump, denomp = pade(an, 3, 0)
+    assert_array_almost_equal(nump.c, [1.0])
+    assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
+
+    # Testing reducing array.
+    nump, denomp = pade(an, 0, 2)
+    assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0])
+    assert_array_almost_equal(denomp.c, [1.0])
+
+    nump, denomp = pade(an, 1, 1)
+    assert_array_almost_equal(nump.c, [1.0/2, 1.0])
+    assert_array_almost_equal(denomp.c, [-1.0/2, 1.0])
+
+    nump, denomp = pade(an, 2, 0)
+    assert_array_almost_equal(nump.c, [1.0])
+    assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0])
+
+
+def test_pade_ints():
+    # Simple test sequences (one of ints, one of floats).
+    an_int = [1, 2, 3, 4]
+    an_flt = [1.0, 2.0, 3.0, 4.0]
+
+    # Make sure integer arrays give the same result as float arrays with same values.
+    for i in range(0, len(an_int)):
+        for j in range(0, len(an_int) - i):
+
+            # Create float and int pade approximation for given order.
+            nump_int, denomp_int = pade(an_int, i, j)
+            nump_flt, denomp_flt = pade(an_flt, i, j)
+
+            # Check that they are the same.
+            assert_array_equal(nump_int.c, nump_flt.c)
+            assert_array_equal(denomp_int.c, denomp_flt.c)
+
+
+def test_pade_complex():
+    # Test sequence with known solutions - see page 6 of 10.1109/PESGM.2012.6344759.
+    # Variable x is parameter - these tests will work with any complex number.
+    x = 0.2 + 0.6j
+    an = [1.0, x, -x*x.conjugate(), x.conjugate()*(x**2) + x*(x.conjugate()**2),
+          -(x**3)*x.conjugate() - 3*(x*x.conjugate())**2 - x*(x.conjugate()**3)]
+
+    nump, denomp = pade(an, 1, 1)
+    assert_array_almost_equal(nump.c, [x + x.conjugate(), 1.0])
+    assert_array_almost_equal(denomp.c, [x.conjugate(), 1.0])
+
+    nump, denomp = pade(an, 1, 2)
+    assert_array_almost_equal(nump.c, [x**2, 2*x + x.conjugate(), 1.0])
+    assert_array_almost_equal(denomp.c, [x + x.conjugate(), 1.0])
+
+    nump, denomp = pade(an, 2, 2)
+    assert_array_almost_equal(nump.c, [x**2 + x*x.conjugate() + x.conjugate()**2, 2*(x + x.conjugate()), 1.0])
+    assert_array_almost_equal(denomp.c, [x.conjugate()**2, x + 2*x.conjugate(), 1.0])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_polyint.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_polyint.py
new file mode 100644
index 00000000..af78f1e0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_polyint.py
@@ -0,0 +1,808 @@
+import warnings
+import io
+import numpy as np
+
+from numpy.testing import (
+    assert_almost_equal, assert_array_equal, assert_array_almost_equal,
+    assert_allclose, assert_equal, assert_)
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.interpolate import (
+    KroghInterpolator, krogh_interpolate,
+    BarycentricInterpolator, barycentric_interpolate,
+    approximate_taylor_polynomial, CubicHermiteSpline, pchip,
+    PchipInterpolator, pchip_interpolate, Akima1DInterpolator, CubicSpline,
+    make_interp_spline)
+
+
+def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
+                extra_args={}):
+    np.random.seed(1234)
+
+    x = [-1, 0, 1, 2, 3, 4]
+    s = list(range(1, len(y_shape)+1))
+    s.insert(axis % (len(y_shape)+1), 0)
+    y = np.random.rand(*((6,) + y_shape)).transpose(s)
+
+    xi = np.zeros(x_shape)
+    if interpolator_cls is CubicHermiteSpline:
+        dydx = np.random.rand(*((6,) + y_shape)).transpose(s)
+        yi = interpolator_cls(x, y, dydx, axis=axis, **extra_args)(xi)
+    else:
+        yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
+
+    target_shape = ((deriv_shape or ()) + y.shape[:axis]
+                    + x_shape + y.shape[axis:][1:])
+    assert_equal(yi.shape, target_shape)
+
+    # check it works also with lists
+    if x_shape and y.size > 0:
+        if interpolator_cls is CubicHermiteSpline:
+            interpolator_cls(list(x), list(y), list(dydx), axis=axis,
+                             **extra_args)(list(xi))
+        else:
+            interpolator_cls(list(x), list(y), axis=axis,
+                             **extra_args)(list(xi))
+
+    # check also values
+    if xi.size > 0 and deriv_shape is None:
+        bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
+        yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
+        yv = yv.reshape(bs_shape)
+
+        yi, y = np.broadcast_arrays(yi, yv)
+        assert_allclose(yi, y)
+
+
+SHAPES = [(), (0,), (1,), (6, 2, 5)]
+
+
+def test_shapes():
+
+    def spl_interp(x, y, axis):
+        return make_interp_spline(x, y, axis=axis)
+
+    for ip in [KroghInterpolator, BarycentricInterpolator, CubicHermiteSpline,
+               pchip, Akima1DInterpolator, CubicSpline, spl_interp]:
+        for s1 in SHAPES:
+            for s2 in SHAPES:
+                for axis in range(-len(s2), len(s2)):
+                    if ip != CubicSpline:
+                        check_shape(ip, s1, s2, None, axis)
+                    else:
+                        for bc in ['natural', 'clamped']:
+                            extra = {'bc_type': bc}
+                            check_shape(ip, s1, s2, None, axis, extra)
+
+def test_derivs_shapes():
+    def krogh_derivs(x, y, axis=0):
+        return KroghInterpolator(x, y, axis).derivatives
+
+    for s1 in SHAPES:
+        for s2 in SHAPES:
+            for axis in range(-len(s2), len(s2)):
+                check_shape(krogh_derivs, s1, s2, (6,), axis)
+
+
+def test_deriv_shapes():
+    def krogh_deriv(x, y, axis=0):
+        return KroghInterpolator(x, y, axis).derivative
+
+    def pchip_deriv(x, y, axis=0):
+        return pchip(x, y, axis).derivative()
+
+    def pchip_deriv2(x, y, axis=0):
+        return pchip(x, y, axis).derivative(2)
+
+    def pchip_antideriv(x, y, axis=0):
+        return pchip(x, y, axis).antiderivative()
+
+    def pchip_antideriv2(x, y, axis=0):
+        return pchip(x, y, axis).antiderivative(2)
+
+    def pchip_deriv_inplace(x, y, axis=0):
+        class P(PchipInterpolator):
+            def __call__(self, x):
+                return PchipInterpolator.__call__(self, x, 1)
+            pass
+        return P(x, y, axis)
+
+    def akima_deriv(x, y, axis=0):
+        return Akima1DInterpolator(x, y, axis).derivative()
+
+    def akima_antideriv(x, y, axis=0):
+        return Akima1DInterpolator(x, y, axis).antiderivative()
+
+    def cspline_deriv(x, y, axis=0):
+        return CubicSpline(x, y, axis).derivative()
+
+    def cspline_antideriv(x, y, axis=0):
+        return CubicSpline(x, y, axis).antiderivative()
+
+    def bspl_deriv(x, y, axis=0):
+        return make_interp_spline(x, y, axis=axis).derivative()
+
+    def bspl_antideriv(x, y, axis=0):
+        return make_interp_spline(x, y, axis=axis).antiderivative()
+
+    for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
+               pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
+               cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
+        for s1 in SHAPES:
+            for s2 in SHAPES:
+                for axis in range(-len(s2), len(s2)):
+                    check_shape(ip, s1, s2, (), axis)
+
+
+def test_complex():
+    x = [1, 2, 3, 4]
+    y = [1, 2, 1j, 3]
+
+    for ip in [KroghInterpolator, BarycentricInterpolator, pchip, CubicSpline]:
+        p = ip(x, y)
+        assert_allclose(y, p(x))
+
+    dydx = [0, -1j, 2, 3j]
+    p = CubicHermiteSpline(x, y, dydx)
+    assert_allclose(y, p(x))
+    assert_allclose(dydx, p(x, 1))
+
+
+class TestKrogh:
+    def setup_method(self):
+        self.true_poly = np.poly1d([-2,3,1,5,-4])
+        self.test_xs = np.linspace(-1,1,100)
+        self.xs = np.linspace(-1,1,5)
+        self.ys = self.true_poly(self.xs)
+
+    def test_lagrange(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+    def test_scalar(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        assert_almost_equal(self.true_poly(7),P(7))
+        assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
+
+    def test_derivatives(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        D = P.derivatives(self.test_xs)
+        for i in range(D.shape[0]):
+            assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+                                D[i])
+
+    def test_low_derivatives(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        D = P.derivatives(self.test_xs,len(self.xs)+2)
+        for i in range(D.shape[0]):
+            assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
+                                D[i])
+
+    def test_derivative(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        m = 10
+        r = P.derivatives(self.test_xs,m)
+        for i in range(m):
+            assert_almost_equal(P.derivative(self.test_xs,i),r[i])
+
+    def test_high_derivative(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        for i in range(len(self.xs), 2*len(self.xs)):
+            assert_almost_equal(P.derivative(self.test_xs,i),
+                                np.zeros(len(self.test_xs)))
+
+    def test_hermite(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
+
+    def test_vector(self):
+        xs = [0, 1, 2]
+        ys = np.array([[0,1],[1,0],[2,1]])
+        P = KroghInterpolator(xs,ys)
+        Pi = [KroghInterpolator(xs,ys[:,i]) for i in range(ys.shape[1])]
+        test_xs = np.linspace(-1,3,100)
+        assert_almost_equal(P(test_xs),
+                            np.asarray([p(test_xs) for p in Pi]).T)
+        assert_almost_equal(P.derivatives(test_xs),
+                np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
+                    (1,2,0)))
+
+    def test_empty(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        assert_array_equal(P([]), [])
+
+    def test_shapes_scalarvalue(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        assert_array_equal(np.shape(P(0)), ())
+        assert_array_equal(np.shape(P(np.array(0))), ())
+        assert_array_equal(np.shape(P([0])), (1,))
+        assert_array_equal(np.shape(P([0,1])), (2,))
+
+    def test_shapes_scalarvalue_derivative(self):
+        P = KroghInterpolator(self.xs,self.ys)
+        n = P.n
+        assert_array_equal(np.shape(P.derivatives(0)), (n,))
+        assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
+        assert_array_equal(np.shape(P.derivatives([0])), (n,1))
+        assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
+
+    def test_shapes_vectorvalue(self):
+        P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+        assert_array_equal(np.shape(P(0)), (3,))
+        assert_array_equal(np.shape(P([0])), (1,3))
+        assert_array_equal(np.shape(P([0,1])), (2,3))
+
+    def test_shapes_1d_vectorvalue(self):
+        P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
+        assert_array_equal(np.shape(P(0)), (1,))
+        assert_array_equal(np.shape(P([0])), (1,1))
+        assert_array_equal(np.shape(P([0,1])), (2,1))
+
+    def test_shapes_vectorvalue_derivative(self):
+        P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
+        n = P.n
+        assert_array_equal(np.shape(P.derivatives(0)), (n,3))
+        assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
+        assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
+
+    def test_wrapper(self):
+        P = KroghInterpolator(self.xs, self.ys)
+        ki = krogh_interpolate
+        assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
+        assert_almost_equal(P.derivative(self.test_xs, 2),
+                            ki(self.xs, self.ys, self.test_xs, der=2))
+        assert_almost_equal(P.derivatives(self.test_xs, 2),
+                            ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
+
+    def test_int_inputs(self):
+        # Check input args are cast correctly to floats, gh-3669
+        x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
+             13104, 60000]
+        offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
+                               -0.48002351, -0.34925329, -0.26503107,
+                               -0.13148093, -0.12988833, -0.12979296,
+                               -0.12973574, -0.08582937, 0.05])
+        f = KroghInterpolator(x, offset_cdf)
+
+        assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
+                        0, atol=1e-10)
+
+    def test_derivatives_complex(self):
+        # regression test for gh-7381: krogh.derivatives(0) fails complex y
+        x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
+        func = KroghInterpolator(x, y)
+        cmplx = func.derivatives(0)
+
+        cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
+                  1j*KroghInterpolator(x, y.imag).derivatives(0))
+        assert_allclose(cmplx, cmplx2, atol=1e-15)
+
+    def test_high_degree_warning(self):
+        with pytest.warns(UserWarning, match="40 degrees provided,"):
+            KroghInterpolator(np.arange(40), np.ones(40))
+
+
+class TestTaylor:
+    def test_exponential(self):
+        degree = 5
+        p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
+        for i in range(degree+1):
+            assert_almost_equal(p(0),1)
+            p = p.deriv()
+        assert_almost_equal(p(0),0)
+
+
+class TestBarycentric:
+    def setup_method(self):
+        self.true_poly = np.poly1d([-2, 3, 1, 5, -4])
+        self.test_xs = np.linspace(-1, 1, 100)
+        self.xs = np.linspace(-1, 1, 5)
+        self.ys = self.true_poly(self.xs)
+
+    def test_lagrange(self):
+        P = BarycentricInterpolator(self.xs, self.ys)
+        assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+    def test_scalar(self):
+        P = BarycentricInterpolator(self.xs, self.ys)
+        assert_almost_equal(self.true_poly(7), P(7))
+        assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
+
+    def test_delayed(self):
+        P = BarycentricInterpolator(self.xs)
+        P.set_yi(self.ys)
+        assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+    def test_append(self):
+        P = BarycentricInterpolator(self.xs[:3], self.ys[:3])
+        P.add_xi(self.xs[3:], self.ys[3:])
+        assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
+
+    def test_vector(self):
+        xs = [0, 1, 2]
+        ys = np.array([[0, 1], [1, 0], [2, 1]])
+        BI = BarycentricInterpolator
+        P = BI(xs, ys)
+        Pi = [BI(xs, ys[:, i]) for i in range(ys.shape[1])]
+        test_xs = np.linspace(-1, 3, 100)
+        assert_almost_equal(P(test_xs),
+                            np.asarray([p(test_xs) for p in Pi]).T)
+
+    def test_shapes_scalarvalue(self):
+        P = BarycentricInterpolator(self.xs, self.ys)
+        assert_array_equal(np.shape(P(0)), ())
+        assert_array_equal(np.shape(P(np.array(0))), ())
+        assert_array_equal(np.shape(P([0])), (1,))
+        assert_array_equal(np.shape(P([0, 1])), (2,))
+
+    def test_shapes_vectorvalue(self):
+        P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3)))
+        assert_array_equal(np.shape(P(0)), (3,))
+        assert_array_equal(np.shape(P([0])), (1, 3))
+        assert_array_equal(np.shape(P([0, 1])), (2, 3))
+
+    def test_shapes_1d_vectorvalue(self):
+        P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1]))
+        assert_array_equal(np.shape(P(0)), (1,))
+        assert_array_equal(np.shape(P([0])), (1, 1))
+        assert_array_equal(np.shape(P([0,1])), (2, 1))
+
+    def test_wrapper(self):
+        P = BarycentricInterpolator(self.xs, self.ys)
+        values = barycentric_interpolate(self.xs, self.ys, self.test_xs)
+        assert_almost_equal(P(self.test_xs), values)
+
+    def test_int_input(self):
+        x = 1000 * np.arange(1, 11)  # np.prod(x[-1] - x[:-1]) overflows
+        y = np.arange(1, 11)
+        value = barycentric_interpolate(x, y, 1000 * 9.5)
+        assert_almost_equal(value, 9.5)
+
+    def test_large_chebyshev(self):
+        # The weights for Chebyshev points of the second kind have analytically
+        # solvable weights. Naive calculation of barycentric weights will fail
+        # for large N because of numerical underflow and overflow. We test
+        # correctness for large N against analytical Chebyshev weights.
+
+        # Without capacity scaling or permutation, n=800 fails,
+        # With just capacity scaling, n=1097 fails
+        # With both capacity scaling and random permutation, n=30000 succeeds
+        n = 800
+        j = np.arange(n + 1).astype(np.float64)
+        x = np.cos(j * np.pi / n)
+
+        # See page 506 of Berrut and Trefethen 2004 for this formula
+        w = (-1) ** j
+        w[0] *= 0.5
+        w[-1] *= 0.5
+
+        P = BarycentricInterpolator(x)
+
+        # It's okay to have a constant scaling factor in the weights because it
+        # cancels out in the evaluation of the polynomial.
+        factor = P.wi[0]
+        assert_almost_equal(P.wi / (2 * factor), w)
+
+    def test_warning(self):
+        # Test if the divide-by-zero warning is properly ignored when computing
+        # interpolated values equals to interpolation points
+        P = BarycentricInterpolator([0, 1], [1, 2])
+        with np.errstate(divide='raise'):
+            yi = P(P.xi)
+
+        # Additionaly check if the interpolated values are the nodes values
+        assert_almost_equal(yi, P.yi.ravel())
+
+
+class TestPCHIP:
+    def _make_random(self, npts=20):
+        np.random.seed(1234)
+        xi = np.sort(np.random.random(npts))
+        yi = np.random.random(npts)
+        return pchip(xi, yi), xi, yi
+
+    def test_overshoot(self):
+        # PCHIP should not overshoot
+        p, xi, yi = self._make_random()
+        for i in range(len(xi)-1):
+            x1, x2 = xi[i], xi[i+1]
+            y1, y2 = yi[i], yi[i+1]
+            if y1 > y2:
+                y1, y2 = y2, y1
+            xp = np.linspace(x1, x2, 10)
+            yp = p(xp)
+            assert_(((y1 <= yp + 1e-15) & (yp <= y2 + 1e-15)).all())
+
+    def test_monotone(self):
+        # PCHIP should preserve monotonicty
+        p, xi, yi = self._make_random()
+        for i in range(len(xi)-1):
+            x1, x2 = xi[i], xi[i+1]
+            y1, y2 = yi[i], yi[i+1]
+            xp = np.linspace(x1, x2, 10)
+            yp = p(xp)
+            assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
+
+    def test_cast(self):
+        # regression test for integer input data, see gh-3453
+        data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
+                         [-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
+        xx = np.arange(100)
+        curve = pchip(data[0], data[1])(xx)
+
+        data1 = data * 1.0
+        curve1 = pchip(data1[0], data1[1])(xx)
+
+        assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
+
+    def test_nag(self):
+        # Example from NAG C implementation,
+        # http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
+        # suggested in gh-5326 as a smoke test for the way the derivatives
+        # are computed (see also gh-3453)
+        dataStr = '''
+          7.99   0.00000E+0
+          8.09   0.27643E-4
+          8.19   0.43750E-1
+          8.70   0.16918E+0
+          9.20   0.46943E+0
+         10.00   0.94374E+0
+         12.00   0.99864E+0
+         15.00   0.99992E+0
+         20.00   0.99999E+0
+        '''
+        data = np.loadtxt(io.StringIO(dataStr))
+        pch = pchip(data[:,0], data[:,1])
+
+        resultStr = '''
+           7.9900       0.0000
+           9.1910       0.4640
+          10.3920       0.9645
+          11.5930       0.9965
+          12.7940       0.9992
+          13.9950       0.9998
+          15.1960       0.9999
+          16.3970       1.0000
+          17.5980       1.0000
+          18.7990       1.0000
+          20.0000       1.0000
+        '''
+        result = np.loadtxt(io.StringIO(resultStr))
+        assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
+
+    def test_endslopes(self):
+        # this is a smoke test for gh-3453: PCHIP interpolator should not
+        # set edge slopes to zero if the data do not suggest zero edge derivatives
+        x = np.array([0.0, 0.1, 0.25, 0.35])
+        y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
+        y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
+        for pp in (pchip(x, y1), pchip(x, y2)):
+            for t in (x[0], x[-1]):
+                assert_(pp(t, 1) != 0)
+
+    def test_all_zeros(self):
+        x = np.arange(10)
+        y = np.zeros_like(x)
+
+        # this should work and not generate any warnings
+        with warnings.catch_warnings():
+            warnings.filterwarnings('error')
+            pch = pchip(x, y)
+
+        xx = np.linspace(0, 9, 101)
+        assert_equal(pch(xx), 0.)
+
+    def test_two_points(self):
+        # regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
+        # it tries to use a three-point scheme to estimate edge derivatives,
+        # while there are only two points available.
+        # Instead, it should construct a linear interpolator.
+        x = np.linspace(0, 1, 11)
+        p = pchip([0, 1], [0, 2])
+        assert_allclose(p(x), 2*x, atol=1e-15)
+
+    def test_pchip_interpolate(self):
+        assert_array_almost_equal(
+            pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
+            [1.])
+
+        assert_array_almost_equal(
+            pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
+            [3.5])
+
+        assert_array_almost_equal(
+            pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
+            [[3.5], [1]])
+
+    def test_roots(self):
+        # regression test for gh-6357: .roots method should work
+        p = pchip([0, 1], [-1, 1])
+        r = p.roots()
+        assert_allclose(r, 0.5)
+
+
+class TestCubicSpline:
+    @staticmethod
+    def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
+                          tol=1e-14):
+        """Check that spline coefficients satisfy the continuity and boundary
+        conditions."""
+        x = S.x
+        c = S.c
+        dx = np.diff(x)
+        dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
+        dxi = dx[:-1]
+
+        # Check C2 continuity.
+        assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
+                        c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
+        assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
+                        2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
+        assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
+                        rtol=tol, atol=tol)
+
+        # Check that we found a parabola, the third derivative is 0.
+        if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
+            assert_allclose(c[0], 0, rtol=tol, atol=tol)
+            return
+
+        # Check periodic boundary conditions.
+        if bc_start == 'periodic':
+            assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
+            assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
+            assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
+            return
+
+        # Check other boundary conditions.
+        if bc_start == 'not-a-knot':
+            if x.size == 2:
+                slope = (S(x[1]) - S(x[0])) / dx[0]
+                assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
+            else:
+                assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
+        elif bc_start == 'clamped':
+            assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
+        elif bc_start == 'natural':
+            assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
+        else:
+            order, value = bc_start
+            assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
+
+        if bc_end == 'not-a-knot':
+            if x.size == 2:
+                slope = (S(x[1]) - S(x[0])) / dx[0]
+                assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
+            else:
+                assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
+        elif bc_end == 'clamped':
+            assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
+        elif bc_end == 'natural':
+            assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)
+        else:
+            order, value = bc_end
+            assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
+
+    def check_all_bc(self, x, y, axis):
+        deriv_shape = list(y.shape)
+        del deriv_shape[axis]
+        first_deriv = np.empty(deriv_shape)
+        first_deriv.fill(2)
+        second_deriv = np.empty(deriv_shape)
+        second_deriv.fill(-1)
+        bc_all = [
+            'not-a-knot',
+            'natural',
+            'clamped',
+            (1, first_deriv),
+            (2, second_deriv)
+        ]
+        for bc in bc_all[:3]:
+            S = CubicSpline(x, y, axis=axis, bc_type=bc)
+            self.check_correctness(S, bc, bc)
+
+        for bc_start in bc_all:
+            for bc_end in bc_all:
+                S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
+                self.check_correctness(S, bc_start, bc_end, tol=2e-14)
+
+    def test_general(self):
+        x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
+        y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
+        for n in [2, 3, x.size]:
+            self.check_all_bc(x[:n], y[:n], 0)
+
+            Y = np.empty((2, n, 2))
+            Y[0, :, 0] = y[:n]
+            Y[0, :, 1] = y[:n] - 1
+            Y[1, :, 0] = y[:n] + 2
+            Y[1, :, 1] = y[:n] + 3
+            self.check_all_bc(x[:n], Y, 1)
+
+    def test_periodic(self):
+        for n in [2, 3, 5]:
+            x = np.linspace(0, 2 * np.pi, n)
+            y = np.cos(x)
+            S = CubicSpline(x, y, bc_type='periodic')
+            self.check_correctness(S, 'periodic', 'periodic')
+
+            Y = np.empty((2, n, 2))
+            Y[0, :, 0] = y
+            Y[0, :, 1] = y + 2
+            Y[1, :, 0] = y - 1
+            Y[1, :, 1] = y + 5
+            S = CubicSpline(x, Y, axis=1, bc_type='periodic')
+            self.check_correctness(S, 'periodic', 'periodic')
+
+    def test_periodic_eval(self):
+        x = np.linspace(0, 2 * np.pi, 10)
+        y = np.cos(x)
+        S = CubicSpline(x, y, bc_type='periodic')
+        assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
+
+    def test_second_derivative_continuity_gh_11758(self):
+        # gh-11758: C2 continuity fail
+        x = np.array([0.9, 1.3, 1.9, 2.1, 2.6, 3.0, 3.9, 4.4, 4.7, 5.0, 6.0,
+                      7.0, 8.0, 9.2, 10.5, 11.3, 11.6, 12.0, 12.6, 13.0, 13.3])
+        y = np.array([1.3, 1.5, 1.85, 2.1, 2.6, 2.7, 2.4, 2.15, 2.05, 2.1,
+                      2.25, 2.3, 2.25, 1.95, 1.4, 0.9, 0.7, 0.6, 0.5, 0.4, 1.3])
+        S = CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
+        self.check_correctness(S, 'periodic', 'periodic')
+
+    def test_three_points(self):
+        # gh-11758: Fails computing a_m2_m1
+        # In this case, s (first derivatives) could be found manually by solving
+        # system of 2 linear equations. Due to solution of this system,
+        # s[i] = (h1m2 + h2m1) / (h1 + h2), where h1 = x[1] - x[0], h2 = x[2] - x[1],
+        # m1 = (y[1] - y[0]) / h1, m2 = (y[2] - y[1]) / h2
+        x = np.array([1.0, 2.75, 3.0])
+        y = np.array([1.0, 15.0, 1.0])
+        S = CubicSpline(x, y, bc_type='periodic')
+        self.check_correctness(S, 'periodic', 'periodic')
+        assert_allclose(S.derivative(1)(x), np.array([-48.0, -48.0, -48.0]))
+
+    def test_dtypes(self):
+        x = np.array([0, 1, 2, 3], dtype=int)
+        y = np.array([-5, 2, 3, 1], dtype=int)
+        S = CubicSpline(x, y)
+        self.check_correctness(S)
+
+        y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
+        S = CubicSpline(x, y)
+        self.check_correctness(S)
+
+        S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
+        self.check_correctness(S, "natural", (1, 2j))
+
+        y = np.array([-5, 2, 3, 1])
+        S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
+        self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
+
+    def test_small_dx(self):
+        rng = np.random.RandomState(0)
+        x = np.sort(rng.uniform(size=100))
+        y = 1e4 + rng.uniform(size=100)
+        S = CubicSpline(x, y)
+        self.check_correctness(S, tol=1e-13)
+
+    def test_incorrect_inputs(self):
+        x = np.array([1, 2, 3, 4])
+        y = np.array([1, 2, 3, 4])
+        xc = np.array([1 + 1j, 2, 3, 4])
+        xn = np.array([np.nan, 2, 3, 4])
+        xo = np.array([2, 1, 3, 4])
+        yn = np.array([np.nan, 2, 3, 4])
+        y3 = [1, 2, 3]
+        x1 = [1]
+        y1 = [1]
+
+        assert_raises(ValueError, CubicSpline, xc, y)
+        assert_raises(ValueError, CubicSpline, xn, y)
+        assert_raises(ValueError, CubicSpline, x, yn)
+        assert_raises(ValueError, CubicSpline, xo, y)
+        assert_raises(ValueError, CubicSpline, x, y3)
+        assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
+        assert_raises(ValueError, CubicSpline, x1, y1)
+
+        wrong_bc = [('periodic', 'clamped'),
+                    ((2, 0), (3, 10)),
+                    ((1, 0), ),
+                    (0., 0.),
+                    'not-a-typo']
+
+        for bc_type in wrong_bc:
+            assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
+
+        # Shapes mismatch when giving arbitrary derivative values:
+        Y = np.c_[y, y]
+        bc1 = ('clamped', (1, 0))
+        bc2 = ('clamped', (1, [0, 0, 0]))
+        bc3 = ('clamped', (1, [[0, 0]]))
+        assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
+        assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
+        assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
+
+        # periodic condition, y[-1] must be equal to y[0]:
+        assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
+
+
+def test_CubicHermiteSpline_correctness():
+    x = [0, 2, 7]
+    y = [-1, 2, 3]
+    dydx = [0, 3, 7]
+    s = CubicHermiteSpline(x, y, dydx)
+    assert_allclose(s(x), y, rtol=1e-15)
+    assert_allclose(s(x, 1), dydx, rtol=1e-15)
+
+
+def test_CubicHermiteSpline_error_handling():
+    x = [1, 2, 3]
+    y = [0, 3, 5]
+    dydx = [1, -1, 2, 3]
+    assert_raises(ValueError, CubicHermiteSpline, x, y, dydx)
+
+    dydx_with_nan = [1, 0, np.nan]
+    assert_raises(ValueError, CubicHermiteSpline, x, y, dydx_with_nan)
+
+
+def test_roots_extrapolate_gh_11185():
+    x = np.array([0.001, 0.002])
+    y = np.array([1.66066935e-06, 1.10410807e-06])
+    dy = np.array([-1.60061854, -1.600619])
+    p = CubicHermiteSpline(x, y, dy)
+
+    # roots(extrapolate=True) for a polynomial with a single interval
+    # should return all three real roots
+    r = p.roots(extrapolate=True)
+    assert_equal(p.c.shape[1], 1)
+    assert_equal(r.size, 3)
+
+
+class TestZeroSizeArrays:
+    # regression tests for gh-17241 : CubicSpline et al must not segfault
+    # when y.size == 0
+    # The two methods below are _almost_ the same, but not quite:
+    # one is for objects which have the `bc_type` argument (CubicSpline)
+    # and the other one is for those which do not (Pchip, Akima1D)
+
+    @pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
+                                   np.zeros((10, 5, 0))])
+    @pytest.mark.parametrize('bc_type',
+                             ['not-a-knot', 'periodic', 'natural', 'clamped'])
+    @pytest.mark.parametrize('axis', [0, 1, 2])
+    @pytest.mark.parametrize('cls', [make_interp_spline, CubicSpline])
+    def test_zero_size(self, cls, y, bc_type, axis):
+        x = np.arange(10)
+        xval = np.arange(3)
+
+        obj = cls(x, y, bc_type=bc_type)
+        assert obj(xval).size == 0
+        assert obj(xval).shape == xval.shape + y.shape[1:]
+
+        # Also check with an explicit non-default axis
+        yt = np.moveaxis(y, 0, axis)  # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
+
+        obj = cls(x, yt, bc_type=bc_type, axis=axis)
+        sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
+        assert obj(xval).size == 0
+        assert obj(xval).shape == sh
+
+    @pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
+                                   np.zeros((10, 5, 0))])
+    @pytest.mark.parametrize('axis', [0, 1, 2])
+    @pytest.mark.parametrize('cls', [PchipInterpolator, Akima1DInterpolator])
+    def test_zero_size_2(self, cls, y, axis):
+        x = np.arange(10)
+        xval = np.arange(3)
+
+        obj = cls(x, y)
+        assert obj(xval).size == 0
+        assert obj(xval).shape == xval.shape + y.shape[1:]
+
+        # Also check with an explicit non-default axis
+        yt = np.moveaxis(y, 0, axis)  # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
+
+        obj = cls(x, yt, axis=axis)
+        sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
+        assert obj(xval).size == 0
+        assert obj(xval).shape == sh
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbf.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbf.py
new file mode 100644
index 00000000..ed7714a2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbf.py
@@ -0,0 +1,221 @@
+# Created by John Travers, Robert Hetland, 2007
+""" Test functions for rbf module """
+
+import numpy as np
+from numpy.testing import (assert_, assert_array_almost_equal,
+                           assert_almost_equal)
+from numpy import linspace, sin, cos, random, exp, allclose
+from scipy.interpolate._rbf import Rbf
+
+FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
+             'cubic', 'quintic', 'thin-plate', 'linear')
+
+
+def check_rbf1d_interpolation(function):
+    # Check that the Rbf function interpolates through the nodes (1D)
+    x = linspace(0,10,9)
+    y = sin(x)
+    rbf = Rbf(x, y, function=function)
+    yi = rbf(x)
+    assert_array_almost_equal(y, yi)
+    assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_rbf2d_interpolation(function):
+    # Check that the Rbf function interpolates through the nodes (2D).
+    x = random.rand(50,1)*4-2
+    y = random.rand(50,1)*4-2
+    z = x*exp(-x**2-1j*y**2)
+    rbf = Rbf(x, y, z, epsilon=2, function=function)
+    zi = rbf(x, y)
+    zi.shape = x.shape
+    assert_array_almost_equal(z, zi)
+
+
+def check_rbf3d_interpolation(function):
+    # Check that the Rbf function interpolates through the nodes (3D).
+    x = random.rand(50, 1)*4 - 2
+    y = random.rand(50, 1)*4 - 2
+    z = random.rand(50, 1)*4 - 2
+    d = x*exp(-x**2 - y**2)
+    rbf = Rbf(x, y, z, d, epsilon=2, function=function)
+    di = rbf(x, y, z)
+    di.shape = x.shape
+    assert_array_almost_equal(di, d)
+
+
+def test_rbf_interpolation():
+    for function in FUNCTIONS:
+        check_rbf1d_interpolation(function)
+        check_rbf2d_interpolation(function)
+        check_rbf3d_interpolation(function)
+
+
+def check_2drbf1d_interpolation(function):
+    # Check that the 2-D Rbf function interpolates through the nodes (1D)
+    x = linspace(0, 10, 9)
+    y0 = sin(x)
+    y1 = cos(x)
+    y = np.vstack([y0, y1]).T
+    rbf = Rbf(x, y, function=function, mode='N-D')
+    yi = rbf(x)
+    assert_array_almost_equal(y, yi)
+    assert_almost_equal(rbf(float(x[0])), y[0])
+
+
+def check_2drbf2d_interpolation(function):
+    # Check that the 2-D Rbf function interpolates through the nodes (2D).
+    x = random.rand(50, ) * 4 - 2
+    y = random.rand(50, ) * 4 - 2
+    z0 = x * exp(-x ** 2 - 1j * y ** 2)
+    z1 = y * exp(-y ** 2 - 1j * x ** 2)
+    z = np.vstack([z0, z1]).T
+    rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
+    zi = rbf(x, y)
+    zi.shape = z.shape
+    assert_array_almost_equal(z, zi)
+
+
+def check_2drbf3d_interpolation(function):
+    # Check that the 2-D Rbf function interpolates through the nodes (3D).
+    x = random.rand(50, ) * 4 - 2
+    y = random.rand(50, ) * 4 - 2
+    z = random.rand(50, ) * 4 - 2
+    d0 = x * exp(-x ** 2 - y ** 2)
+    d1 = y * exp(-y ** 2 - x ** 2)
+    d = np.vstack([d0, d1]).T
+    rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
+    di = rbf(x, y, z)
+    di.shape = d.shape
+    assert_array_almost_equal(di, d)
+
+
+def test_2drbf_interpolation():
+    for function in FUNCTIONS:
+        check_2drbf1d_interpolation(function)
+        check_2drbf2d_interpolation(function)
+        check_2drbf3d_interpolation(function)
+
+
+def check_rbf1d_regularity(function, atol):
+    # Check that the Rbf function approximates a smooth function well away
+    # from the nodes.
+    x = linspace(0, 10, 9)
+    y = sin(x)
+    rbf = Rbf(x, y, function=function)
+    xi = linspace(0, 10, 100)
+    yi = rbf(xi)
+    msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
+    assert_(allclose(yi, sin(xi), atol=atol), msg)
+
+
+def test_rbf_regularity():
+    tolerances = {
+        'multiquadric': 0.1,
+        'inverse multiquadric': 0.15,
+        'gaussian': 0.15,
+        'cubic': 0.15,
+        'quintic': 0.1,
+        'thin-plate': 0.1,
+        'linear': 0.2
+    }
+    for function in FUNCTIONS:
+        check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_2drbf1d_regularity(function, atol):
+    # Check that the 2-D Rbf function approximates a smooth function well away
+    # from the nodes.
+    x = linspace(0, 10, 9)
+    y0 = sin(x)
+    y1 = cos(x)
+    y = np.vstack([y0, y1]).T
+    rbf = Rbf(x, y, function=function, mode='N-D')
+    xi = linspace(0, 10, 100)
+    yi = rbf(xi)
+    msg = "abs-diff: %f" % abs(yi - np.vstack([sin(xi), cos(xi)]).T).max()
+    assert_(allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg)
+
+
+def test_2drbf_regularity():
+    tolerances = {
+        'multiquadric': 0.1,
+        'inverse multiquadric': 0.15,
+        'gaussian': 0.15,
+        'cubic': 0.15,
+        'quintic': 0.1,
+        'thin-plate': 0.15,
+        'linear': 0.2
+    }
+    for function in FUNCTIONS:
+        check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
+
+
+def check_rbf1d_stability(function):
+    # Check that the Rbf function with default epsilon is not subject
+    # to overshoot. Regression for issue #4523.
+    #
+    # Generate some data (fixed random seed hence deterministic)
+    np.random.seed(1234)
+    x = np.linspace(0, 10, 50)
+    z = x + 4.0 * np.random.randn(len(x))
+
+    rbf = Rbf(x, z, function=function)
+    xi = np.linspace(0, 10, 1000)
+    yi = rbf(xi)
+
+    # subtract the linear trend and make sure there no spikes
+    assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
+
+def test_rbf_stability():
+    for function in FUNCTIONS:
+        check_rbf1d_stability(function)
+
+
+def test_default_construction():
+    # Check that the Rbf class can be constructed with the default
+    # multiquadric basis function. Regression test for ticket #1228.
+    x = linspace(0,10,9)
+    y = sin(x)
+    rbf = Rbf(x, y)
+    yi = rbf(x)
+    assert_array_almost_equal(y, yi)
+
+
+def test_function_is_callable():
+    # Check that the Rbf class can be constructed with function=callable.
+    x = linspace(0,10,9)
+    y = sin(x)
+    linfunc = lambda x:x
+    rbf = Rbf(x, y, function=linfunc)
+    yi = rbf(x)
+    assert_array_almost_equal(y, yi)
+
+
+def test_two_arg_function_is_callable():
+    # Check that the Rbf class can be constructed with a two argument
+    # function=callable.
+    def _func(self, r):
+        return self.epsilon + r
+
+    x = linspace(0,10,9)
+    y = sin(x)
+    rbf = Rbf(x, y, function=_func)
+    yi = rbf(x)
+    assert_array_almost_equal(y, yi)
+
+
+def test_rbf_epsilon_none():
+    x = linspace(0, 10, 9)
+    y = sin(x)
+    Rbf(x, y, epsilon=None)
+
+
+def test_rbf_epsilon_none_collinear():
+    # Check that collinear points in one dimension doesn't cause an error
+    # due to epsilon = 0
+    x = [1, 2, 3]
+    y = [4, 4, 4]
+    z = [5, 6, 7]
+    rbf = Rbf(x, y, z, epsilon=None)
+    assert_(rbf.epsilon > 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbfinterp.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbfinterp.py
new file mode 100644
index 00000000..04cfc800
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rbfinterp.py
@@ -0,0 +1,507 @@
+import pickle
+import pytest
+import numpy as np
+from numpy.linalg import LinAlgError
+from numpy.testing import assert_allclose, assert_array_equal
+from scipy.stats.qmc import Halton
+from scipy.spatial import cKDTree
+from scipy.interpolate._rbfinterp import (
+    _AVAILABLE, _SCALE_INVARIANT, _NAME_TO_MIN_DEGREE, _monomial_powers,
+    RBFInterpolator
+    )
+from scipy.interpolate import _rbfinterp_pythran
+
+
+def _vandermonde(x, degree):
+    # Returns a matrix of monomials that span polynomials with the specified
+    # degree evaluated at x.
+    powers = _monomial_powers(x.shape[1], degree)
+    return _rbfinterp_pythran._polynomial_matrix(x, powers)
+
+
+def _1d_test_function(x):
+    # Test function used in Wahba's "Spline Models for Observational Data".
+    # domain ~= (0, 3), range ~= (-1.0, 0.2)
+    x = x[:, 0]
+    y = 4.26*(np.exp(-x) - 4*np.exp(-2*x) + 3*np.exp(-3*x))
+    return y
+
+
+def _2d_test_function(x):
+    # Franke's test function.
+    # domain ~= (0, 1) X (0, 1), range ~= (0.0, 1.2)
+    x1, x2 = x[:, 0], x[:, 1]
+    term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
+    term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
+    term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
+    term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
+    y = term1 + term2 + term3 + term4
+    return y
+
+
+def _is_conditionally_positive_definite(kernel, m):
+    # Tests whether the kernel is conditionally positive definite of order m.
+    # See chapter 7 of Fasshauer's "Meshfree Approximation Methods with
+    # MATLAB".
+    nx = 10
+    ntests = 100
+    for ndim in [1, 2, 3, 4, 5]:
+        # Generate sample points with a Halton sequence to avoid samples that
+        # are too close to eachother, which can make the matrix singular.
+        seq = Halton(ndim, scramble=False, seed=np.random.RandomState())
+        for _ in range(ntests):
+            x = 2*seq.random(nx) - 1
+            A = _rbfinterp_pythran._kernel_matrix(x, kernel)
+            P = _vandermonde(x, m - 1)
+            Q, R = np.linalg.qr(P, mode='complete')
+            # Q2 forms a basis spanning the space where P.T.dot(x) = 0. Project
+            # A onto this space, and then see if it is positive definite using
+            # the Cholesky decomposition. If not, then the kernel is not c.p.d.
+            # of order m.
+            Q2 = Q[:, P.shape[1]:]
+            B = Q2.T.dot(A).dot(Q2)
+            try:
+                np.linalg.cholesky(B)
+            except np.linalg.LinAlgError:
+                return False
+
+    return True
+
+
+# Sorting the parametrize arguments is necessary to avoid a parallelization
+# issue described here: https://github.com/pytest-dev/pytest-xdist/issues/432.
+@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+def test_conditionally_positive_definite(kernel):
+    # Test if each kernel in _AVAILABLE is conditionally positive definite of
+    # order m, where m comes from _NAME_TO_MIN_DEGREE. This is a necessary
+    # condition for the smoothed RBF interpolant to be well-posed in general.
+    m = _NAME_TO_MIN_DEGREE.get(kernel, -1) + 1
+    assert _is_conditionally_positive_definite(kernel, m)
+
+
+class _TestRBFInterpolator:
+    @pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
+    def test_scale_invariance_1d(self, kernel):
+        # Verify that the functions in _SCALE_INVARIANT are insensitive to the
+        # shape parameter (when smoothing == 0) in 1d.
+        seq = Halton(1, scramble=False, seed=np.random.RandomState())
+        x = 3*seq.random(50)
+        y = _1d_test_function(x)
+        xitp = 3*seq.random(50)
+        yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
+        yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+    @pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
+    def test_scale_invariance_2d(self, kernel):
+        # Verify that the functions in _SCALE_INVARIANT are insensitive to the
+        # shape parameter (when smoothing == 0) in 2d.
+        seq = Halton(2, scramble=False, seed=np.random.RandomState())
+        x = seq.random(100)
+        y = _2d_test_function(x)
+        xitp = seq.random(100)
+        yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
+        yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+    @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+    def test_extreme_domains(self, kernel):
+        # Make sure the interpolant remains numerically stable for very
+        # large/small domains.
+        seq = Halton(2, scramble=False, seed=np.random.RandomState())
+        scale = 1e50
+        shift = 1e55
+
+        x = seq.random(100)
+        y = _2d_test_function(x)
+        xitp = seq.random(100)
+
+        if kernel in _SCALE_INVARIANT:
+            yitp1 = self.build(x, y, kernel=kernel)(xitp)
+            yitp2 = self.build(
+                x*scale + shift, y,
+                kernel=kernel
+                )(xitp*scale + shift)
+        else:
+            yitp1 = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+            yitp2 = self.build(
+                x*scale + shift, y,
+                epsilon=5.0/scale,
+                kernel=kernel
+                )(xitp*scale + shift)
+
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+    def test_polynomial_reproduction(self):
+        # If the observed data comes from a polynomial, then the interpolant
+        # should be able to reproduce the polynomial exactly, provided that
+        # `degree` is sufficiently high.
+        rng = np.random.RandomState(0)
+        seq = Halton(2, scramble=False, seed=rng)
+        degree = 3
+
+        x = seq.random(50)
+        xitp = seq.random(50)
+
+        P = _vandermonde(x, degree)
+        Pitp = _vandermonde(xitp, degree)
+
+        poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+
+        y = P.dot(poly_coeffs)
+        yitp1 = Pitp.dot(poly_coeffs)
+        yitp2 = self.build(x, y, degree=degree)(xitp)
+
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+    @pytest.mark.slow
+    def test_chunking(self, monkeypatch):
+        # If the observed data comes from a polynomial, then the interpolant
+        # should be able to reproduce the polynomial exactly, provided that
+        # `degree` is sufficiently high.
+        rng = np.random.RandomState(0)
+        seq = Halton(2, scramble=False, seed=rng)
+        degree = 3
+
+        largeN = 1000 + 33
+        # this is large to check that chunking of the RBFInterpolator is tested
+        x = seq.random(50)
+        xitp = seq.random(largeN)
+
+        P = _vandermonde(x, degree)
+        Pitp = _vandermonde(xitp, degree)
+
+        poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+
+        y = P.dot(poly_coeffs)
+        yitp1 = Pitp.dot(poly_coeffs)
+        interp = self.build(x, y, degree=degree)
+        ce_real = interp._chunk_evaluator
+
+        def _chunk_evaluator(*args, **kwargs):
+            kwargs.update(memory_budget=100)
+            return ce_real(*args, **kwargs)
+
+        monkeypatch.setattr(interp, '_chunk_evaluator', _chunk_evaluator)
+        yitp2 = interp(xitp)
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+    def test_vector_data(self):
+        # Make sure interpolating a vector field is the same as interpolating
+        # each component separately.
+        seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+        x = seq.random(100)
+        xitp = seq.random(100)
+
+        y = np.array([_2d_test_function(x),
+                      _2d_test_function(x[:, ::-1])]).T
+
+        yitp1 = self.build(x, y)(xitp)
+        yitp2 = self.build(x, y[:, 0])(xitp)
+        yitp3 = self.build(x, y[:, 1])(xitp)
+
+        assert_allclose(yitp1[:, 0], yitp2)
+        assert_allclose(yitp1[:, 1], yitp3)
+
+    def test_complex_data(self):
+        # Interpolating complex input should be the same as interpolating the
+        # real and complex components.
+        seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+        x = seq.random(100)
+        xitp = seq.random(100)
+
+        y = _2d_test_function(x) + 1j*_2d_test_function(x[:, ::-1])
+
+        yitp1 = self.build(x, y)(xitp)
+        yitp2 = self.build(x, y.real)(xitp)
+        yitp3 = self.build(x, y.imag)(xitp)
+
+        assert_allclose(yitp1.real, yitp2)
+        assert_allclose(yitp1.imag, yitp3)
+
+    @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+    def test_interpolation_misfit_1d(self, kernel):
+        # Make sure that each kernel, with its default `degree` and an
+        # appropriate `epsilon`, does a good job at interpolation in 1d.
+        seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+        x = 3*seq.random(50)
+        xitp = 3*seq.random(50)
+
+        y = _1d_test_function(x)
+        ytrue = _1d_test_function(xitp)
+        yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+
+        mse = np.mean((yitp - ytrue)**2)
+        assert mse < 1.0e-4
+
+    @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+    def test_interpolation_misfit_2d(self, kernel):
+        # Make sure that each kernel, with its default `degree` and an
+        # appropriate `epsilon`, does a good job at interpolation in 2d.
+        seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+        x = seq.random(100)
+        xitp = seq.random(100)
+
+        y = _2d_test_function(x)
+        ytrue = _2d_test_function(xitp)
+        yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
+
+        mse = np.mean((yitp - ytrue)**2)
+        assert mse < 2.0e-4
+
+    @pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
+    def test_smoothing_misfit(self, kernel):
+        # Make sure we can find a smoothing parameter for each kernel that
+        # removes a sufficient amount of noise.
+        rng = np.random.RandomState(0)
+        seq = Halton(1, scramble=False, seed=rng)
+
+        noise = 0.2
+        rmse_tol = 0.1
+        smoothing_range = 10**np.linspace(-4, 1, 20)
+
+        x = 3*seq.random(100)
+        y = _1d_test_function(x) + rng.normal(0.0, noise, (100,))
+        ytrue = _1d_test_function(x)
+        rmse_within_tol = False
+        for smoothing in smoothing_range:
+            ysmooth = self.build(
+                x, y,
+                epsilon=1.0,
+                smoothing=smoothing,
+                kernel=kernel)(x)
+            rmse = np.sqrt(np.mean((ysmooth - ytrue)**2))
+            if rmse < rmse_tol:
+                rmse_within_tol = True
+                break
+
+        assert rmse_within_tol
+
+    def test_array_smoothing(self):
+        # Test using an array for `smoothing` to give less weight to a known
+        # outlier.
+        rng = np.random.RandomState(0)
+        seq = Halton(1, scramble=False, seed=rng)
+        degree = 2
+
+        x = seq.random(50)
+        P = _vandermonde(x, degree)
+        poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
+        y = P.dot(poly_coeffs)
+        y_with_outlier = np.copy(y)
+        y_with_outlier[10] += 1.0
+        smoothing = np.zeros((50,))
+        smoothing[10] = 1000.0
+        yitp = self.build(x, y_with_outlier, smoothing=smoothing)(x)
+        # Should be able to reproduce the uncorrupted data almost exactly.
+        assert_allclose(yitp, y, atol=1e-4)
+
+    def test_inconsistent_x_dimensions_error(self):
+        # ValueError should be raised if the observation points and evaluation
+        # points have a different number of dimensions.
+        y = Halton(2, scramble=False, seed=np.random.RandomState()).random(10)
+        d = _2d_test_function(y)
+        x = Halton(1, scramble=False, seed=np.random.RandomState()).random(10)
+        match = 'Expected the second axis of `x`'
+        with pytest.raises(ValueError, match=match):
+            self.build(y, d)(x)
+
+    def test_inconsistent_d_length_error(self):
+        y = np.linspace(0, 1, 5)[:, None]
+        d = np.zeros(1)
+        match = 'Expected the first axis of `d`'
+        with pytest.raises(ValueError, match=match):
+            self.build(y, d)
+
+    def test_y_not_2d_error(self):
+        y = np.linspace(0, 1, 5)
+        d = np.zeros(5)
+        match = '`y` must be a 2-dimensional array.'
+        with pytest.raises(ValueError, match=match):
+            self.build(y, d)
+
+    def test_inconsistent_smoothing_length_error(self):
+        y = np.linspace(0, 1, 5)[:, None]
+        d = np.zeros(5)
+        smoothing = np.ones(1)
+        match = 'Expected `smoothing` to be'
+        with pytest.raises(ValueError, match=match):
+            self.build(y, d, smoothing=smoothing)
+
+    def test_invalid_kernel_name_error(self):
+        y = np.linspace(0, 1, 5)[:, None]
+        d = np.zeros(5)
+        match = '`kernel` must be one of'
+        with pytest.raises(ValueError, match=match):
+            self.build(y, d, kernel='test')
+
+    def test_epsilon_not_specified_error(self):
+        y = np.linspace(0, 1, 5)[:, None]
+        d = np.zeros(5)
+        for kernel in _AVAILABLE:
+            if kernel in _SCALE_INVARIANT:
+                continue
+
+            match = '`epsilon` must be specified'
+            with pytest.raises(ValueError, match=match):
+                self.build(y, d, kernel=kernel)
+
+    def test_x_not_2d_error(self):
+        y = np.linspace(0, 1, 5)[:, None]
+        x = np.linspace(0, 1, 5)
+        d = np.zeros(5)
+        match = '`x` must be a 2-dimensional array.'
+        with pytest.raises(ValueError, match=match):
+            self.build(y, d)(x)
+
+    def test_not_enough_observations_error(self):
+        y = np.linspace(0, 1, 1)[:, None]
+        d = np.zeros(1)
+        match = 'At least 2 data points are required'
+        with pytest.raises(ValueError, match=match):
+            self.build(y, d, kernel='thin_plate_spline')
+
+    def test_degree_warning(self):
+        y = np.linspace(0, 1, 5)[:, None]
+        d = np.zeros(5)
+        for kernel, deg in _NAME_TO_MIN_DEGREE.items():
+            match = f'`degree` should not be below {deg}'
+            with pytest.warns(Warning, match=match):
+                self.build(y, d, epsilon=1.0, kernel=kernel, degree=deg-1)
+
+    def test_rank_error(self):
+        # An error should be raised when `kernel` is "thin_plate_spline" and
+        # observations are 2-D and collinear.
+        y = np.array([[2.0, 0.0], [1.0, 0.0], [0.0, 0.0]])
+        d = np.array([0.0, 0.0, 0.0])
+        match = 'does not have full column rank'
+        with pytest.raises(LinAlgError, match=match):
+            self.build(y, d, kernel='thin_plate_spline')(y)
+
+    def test_single_point(self):
+        # Make sure interpolation still works with only one point (in 1, 2, and
+        # 3 dimensions).
+        for dim in [1, 2, 3]:
+            y = np.zeros((1, dim))
+            d = np.ones((1,))
+            f = self.build(y, d, kernel='linear')(y)
+            assert_allclose(d, f)
+
+    def test_pickleable(self):
+        # Make sure we can pickle and unpickle the interpolant without any
+        # changes in the behavior.
+        seq = Halton(1, scramble=False, seed=np.random.RandomState(2305982309))
+
+        x = 3*seq.random(50)
+        xitp = 3*seq.random(50)
+
+        y = _1d_test_function(x)
+
+        interp = self.build(x, y)
+
+        yitp1 = interp(xitp)
+        yitp2 = pickle.loads(pickle.dumps(interp))(xitp)
+
+        assert_array_equal(yitp1, yitp2)
+
+
+class TestRBFInterpolatorNeighborsNone(_TestRBFInterpolator):
+    def build(self, *args, **kwargs):
+        return RBFInterpolator(*args, **kwargs)
+
+    def test_smoothing_limit_1d(self):
+        # For large smoothing parameters, the interpolant should approach a
+        # least squares fit of a polynomial with the specified degree.
+        seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+        degree = 3
+        smoothing = 1e8
+
+        x = 3*seq.random(50)
+        xitp = 3*seq.random(50)
+
+        y = _1d_test_function(x)
+
+        yitp1 = self.build(
+            x, y,
+            degree=degree,
+            smoothing=smoothing
+            )(xitp)
+
+        P = _vandermonde(x, degree)
+        Pitp = _vandermonde(xitp, degree)
+        yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
+
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+    def test_smoothing_limit_2d(self):
+        # For large smoothing parameters, the interpolant should approach a
+        # least squares fit of a polynomial with the specified degree.
+        seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+        degree = 3
+        smoothing = 1e8
+
+        x = seq.random(100)
+        xitp = seq.random(100)
+
+        y = _2d_test_function(x)
+
+        yitp1 = self.build(
+            x, y,
+            degree=degree,
+            smoothing=smoothing
+            )(xitp)
+
+        P = _vandermonde(x, degree)
+        Pitp = _vandermonde(xitp, degree)
+        yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
+
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+
+class TestRBFInterpolatorNeighbors20(_TestRBFInterpolator):
+    # RBFInterpolator using 20 nearest neighbors.
+    def build(self, *args, **kwargs):
+        return RBFInterpolator(*args, **kwargs, neighbors=20)
+
+    def test_equivalent_to_rbf_interpolator(self):
+        seq = Halton(2, scramble=False, seed=np.random.RandomState())
+
+        x = seq.random(100)
+        xitp = seq.random(100)
+
+        y = _2d_test_function(x)
+
+        yitp1 = self.build(x, y)(xitp)
+
+        yitp2 = []
+        tree = cKDTree(x)
+        for xi in xitp:
+            _, nbr = tree.query(xi, 20)
+            yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
+
+        assert_allclose(yitp1, yitp2, atol=1e-8)
+
+
+class TestRBFInterpolatorNeighborsInf(TestRBFInterpolatorNeighborsNone):
+    # RBFInterpolator using neighbors=np.inf. This should give exactly the same
+    # results as neighbors=None, but it will be slower.
+    def build(self, *args, **kwargs):
+        return RBFInterpolator(*args, **kwargs, neighbors=np.inf)
+
+    def test_equivalent_to_rbf_interpolator(self):
+        seq = Halton(1, scramble=False, seed=np.random.RandomState())
+
+        x = 3*seq.random(50)
+        xitp = 3*seq.random(50)
+
+        y = _1d_test_function(x)
+        yitp1 = self.build(x, y)(xitp)
+        yitp2 = RBFInterpolator(x, y)(xitp)
+
+        assert_allclose(yitp1, yitp2, atol=1e-8)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rgi.py b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rgi.py
new file mode 100644
index 00000000..5ff52547
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/interpolate/tests/test_rgi.py
@@ -0,0 +1,1019 @@
+import itertools
+
+import pytest
+import numpy as np
+
+from numpy.testing import (assert_allclose, assert_equal, assert_warns,
+                           assert_array_almost_equal, assert_array_equal)
+from pytest import raises as assert_raises
+
+from scipy.interpolate import (RegularGridInterpolator, interpn,
+                               RectBivariateSpline,
+                               NearestNDInterpolator, LinearNDInterpolator)
+
+from scipy.sparse._sputils import matrix
+
+parametrize_rgi_interp_methods = pytest.mark.parametrize(
+    "method", ['linear', 'nearest', 'slinear', 'cubic', 'quintic', 'pchip']
+)
+
+class TestRegularGridInterpolator:
+    def _get_sample_4d(self):
+        # create a 4-D grid of 3 points in each dimension
+        points = [(0., .5, 1.)] * 4
+        values = np.asarray([0., .5, 1.])
+        values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+        values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+        values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+        values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+        values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+        return points, values
+
+    def _get_sample_4d_2(self):
+        # create another 4-D grid of 3 points in each dimension
+        points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+        values = np.asarray([0., .5, 1.])
+        values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+        values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+        values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+        values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+        values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+        return points, values
+
+    def _get_sample_4d_3(self):
+        # create another 4-D grid of 7 points in each dimension
+        points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0)] * 4
+        values = np.asarray([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
+        values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+        values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+        values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+        values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+        values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+        return points, values
+
+    def _get_sample_4d_4(self):
+        # create another 4-D grid of 2 points in each dimension
+        points = [(0.0, 1.0)] * 4
+        values = np.asarray([0.0, 1.0])
+        values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+        values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+        values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+        values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+        values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+        return points, values
+
+    @parametrize_rgi_interp_methods
+    def test_list_input(self, method):
+        points, values = self._get_sample_4d_3()
+
+        sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+                             [0.5, 0.5, .5, .5]])
+
+        interp = RegularGridInterpolator(points,
+                                         values.tolist(),
+                                         method=method)
+        v1 = interp(sample.tolist())
+        interp = RegularGridInterpolator(points,
+                                         values,
+                                         method=method)
+        v2 = interp(sample)
+        assert_allclose(v1, v2)
+
+    @pytest.mark.parametrize('method', ['cubic', 'quintic', 'pchip'])
+    def test_spline_dim_error(self, method):
+        points, values = self._get_sample_4d_4()
+        match = "points in dimension"
+
+        # Check error raise when creating interpolator
+        with pytest.raises(ValueError, match=match):
+            RegularGridInterpolator(points, values, method=method)
+
+        # Check error raise when creating interpolator
+        interp = RegularGridInterpolator(points, values)
+        sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+                             [0.5, 0.5, .5, .5]])
+        with pytest.raises(ValueError, match=match):
+            interp(sample, method=method)
+
+    @pytest.mark.parametrize(
+        "points_values, sample",
+        [
+            (
+                _get_sample_4d,
+                np.asarray(
+                    [[0.1, 0.1, 1.0, 0.9],
+                     [0.2, 0.1, 0.45, 0.8],
+                     [0.5, 0.5, 0.5, 0.5]]
+                ),
+            ),
+            (_get_sample_4d_2, np.asarray([0.1, 0.1, 10.0, 9.0])),
+        ],
+    )
+    def test_linear_and_slinear_close(self, points_values, sample):
+        points, values = points_values(self)
+        interp = RegularGridInterpolator(points, values, method="linear")
+        v1 = interp(sample)
+        interp = RegularGridInterpolator(points, values, method="slinear")
+        v2 = interp(sample)
+        assert_allclose(v1, v2)
+
+    @parametrize_rgi_interp_methods
+    def test_complex(self, method):
+        points, values = self._get_sample_4d_3()
+        values = values - 2j*values
+        sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+                             [0.5, 0.5, .5, .5]])
+
+        interp = RegularGridInterpolator(points, values, method=method)
+        rinterp = RegularGridInterpolator(points, values.real, method=method)
+        iinterp = RegularGridInterpolator(points, values.imag, method=method)
+
+        v1 = interp(sample)
+        v2 = rinterp(sample) + 1j*iinterp(sample)
+        assert_allclose(v1, v2)
+
+    def test_cubic_vs_pchip(self):
+        x, y = [1, 2, 3, 4], [1, 2, 3, 4]
+        xg, yg = np.meshgrid(x, y, indexing='ij')
+
+        values = (lambda x, y: x**4 * y**4)(xg, yg)
+        cubic = RegularGridInterpolator((x, y), values, method='cubic')
+        pchip = RegularGridInterpolator((x, y), values, method='pchip')
+
+        vals_cubic = cubic([1.5, 2])
+        vals_pchip = pchip([1.5, 2])
+        assert not np.allclose(vals_cubic, vals_pchip, atol=1e-14, rtol=0)
+
+    def test_linear_xi1d(self):
+        points, values = self._get_sample_4d_2()
+        interp = RegularGridInterpolator(points, values)
+        sample = np.asarray([0.1, 0.1, 10., 9.])
+        wanted = 1001.1
+        assert_array_almost_equal(interp(sample), wanted)
+
+    def test_linear_xi3d(self):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values)
+        sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+                             [0.5, 0.5, .5, .5]])
+        wanted = np.asarray([1001.1, 846.2, 555.5])
+        assert_array_almost_equal(interp(sample), wanted)
+
+    @pytest.mark.parametrize(
+        "sample, wanted",
+        [
+            (np.asarray([0.1, 0.1, 0.9, 0.9]), 1100.0),
+            (np.asarray([0.1, 0.1, 0.1, 0.1]), 0.0),
+            (np.asarray([0.0, 0.0, 0.0, 0.0]), 0.0),
+            (np.asarray([1.0, 1.0, 1.0, 1.0]), 1111.0),
+            (np.asarray([0.1, 0.4, 0.6, 0.9]), 1055.0),
+        ],
+    )
+    def test_nearest(self, sample, wanted):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values, method="nearest")
+        assert_array_almost_equal(interp(sample), wanted)
+
+    def test_linear_edges(self):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values)
+        sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+        wanted = np.asarray([0., 1111.])
+        assert_array_almost_equal(interp(sample), wanted)
+
+    def test_valid_create(self):
+        # create a 2-D grid of 3 points in each dimension
+        points = [(0., .5, 1.), (0., 1., .5)]
+        values = np.asarray([0., .5, 1.])
+        values0 = values[:, np.newaxis]
+        values1 = values[np.newaxis, :]
+        values = (values0 + values1 * 10)
+        assert_raises(ValueError, RegularGridInterpolator, points, values)
+        points = [((0., .5, 1.), ), (0., .5, 1.)]
+        assert_raises(ValueError, RegularGridInterpolator, points, values)
+        points = [(0., .5, .75, 1.), (0., .5, 1.)]
+        assert_raises(ValueError, RegularGridInterpolator, points, values)
+        points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
+        assert_raises(ValueError, RegularGridInterpolator, points, values)
+        points = [(0., .5, 1.), (0., .5, 1.)]
+        assert_raises(ValueError, RegularGridInterpolator, points, values,
+                      method="undefmethod")
+
+    def test_valid_call(self):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values)
+        sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
+        assert_raises(ValueError, interp, sample, "undefmethod")
+        sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
+        assert_raises(ValueError, interp, sample)
+        sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
+        assert_raises(ValueError, interp, sample)
+
+    def test_out_of_bounds_extrap(self):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values, bounds_error=False,
+                                         fill_value=None)
+        sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+                             [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+        wanted = np.asarray([0., 1111., 11., 11.])
+        assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+        wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
+        assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+    def test_out_of_bounds_extrap2(self):
+        points, values = self._get_sample_4d_2()
+        interp = RegularGridInterpolator(points, values, bounds_error=False,
+                                         fill_value=None)
+        sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+                             [21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
+        wanted = np.asarray([0., 11., 11., 11.])
+        assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+        wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
+        assert_array_almost_equal(interp(sample, method="linear"), wanted)
+
+    def test_out_of_bounds_fill(self):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values, bounds_error=False,
+                                         fill_value=np.nan)
+        sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
+                             [2.1, 2.1, -1.1, -1.1]])
+        wanted = np.asarray([np.nan, np.nan, np.nan])
+        assert_array_almost_equal(interp(sample, method="nearest"), wanted)
+        assert_array_almost_equal(interp(sample, method="linear"), wanted)
+        sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+                             [0.5, 0.5, .5, .5]])
+        wanted = np.asarray([1001.1, 846.2, 555.5])
+        assert_array_almost_equal(interp(sample), wanted)
+
+    def test_nearest_compare_qhull(self):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values, method="nearest")
+        points_qhull = itertools.product(*points)
+        points_qhull = [p for p in points_qhull]
+        points_qhull = np.asarray(points_qhull)
+        values_qhull = values.reshape(-1)
+        interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
+        sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+                             [0.5, 0.5, .5, .5]])
+        assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+    def test_linear_compare_qhull(self):
+        points, values = self._get_sample_4d()
+        interp = RegularGridInterpolator(points, values)
+        points_qhull = itertools.product(*points)
+        points_qhull = [p for p in points_qhull]
+        points_qhull = np.asarray(points_qhull)
+        values_qhull = values.reshape(-1)
+        interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
+        sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
+                             [0.5, 0.5, .5, .5]])
+        assert_array_almost_equal(interp(sample), interp_qhull(sample))
+
+    @pytest.mark.parametrize("method", ["nearest", "linear"])
+    def test_duck_typed_values(self, method):
+        x = np.linspace(0, 2, 5)
+        y = np.linspace(0, 1, 7)
+
+        values = MyValue((5, 7))
+
+        interp = RegularGridInterpolator((x, y), values, method=method)
+        v1 = interp([0.4, 0.7])
+
+        interp = RegularGridInterpolator((x, y), values._v, method=method)
+        v2 = interp([0.4, 0.7])
+        assert_allclose(v1, v2)
+
+    def test_invalid_fill_value(self):
+        np.random.seed(1234)
+        x = np.linspace(0, 2, 5)
+        y = np.linspace(0, 1, 7)
+        values = np.random.rand(5, 7)
+
+        # integers can be cast to floats
+        RegularGridInterpolator((x, y), values, fill_value=1)
+
+        # complex values cannot
+        assert_raises(ValueError, RegularGridInterpolator,
+                      (x, y), values, fill_value=1+2j)
+
+    def test_fillvalue_type(self):
+        # from #3703; test that interpolator object construction succeeds
+        values = np.ones((10, 20, 30), dtype='>f4')
+        points = [np.arange(n) for n in values.shape]
+        # xi = [(1, 1, 1)]
+        RegularGridInterpolator(points, values)
+        RegularGridInterpolator(points, values, fill_value=0.)
+
+    def test_length_one_axis(self):
+        # gh-5890, gh-9524 : length-1 axis is legal for method='linear'.
+        # Along the axis it's linear interpolation; away from the length-1
+        # axis, it's an extrapolation, so fill_value should be used.
+        def f(x, y):
+            return x + y
+        x = np.linspace(1, 1, 1)
+        y = np.linspace(1, 10, 10)
+        data = f(*np.meshgrid(x, y, indexing="ij", sparse=True))
+
+        interp = RegularGridInterpolator((x, y), data, method="linear",
+                                         bounds_error=False, fill_value=101)
+
+        # check values at the grid
+        assert_allclose(interp(np.array([[1, 1], [1, 5], [1, 10]])),
+                        [2, 6, 11],
+                        atol=1e-14)
+
+        # check off-grid interpolation is indeed linear
+        assert_allclose(interp(np.array([[1, 1.4], [1, 5.3], [1, 10]])),
+                        [2.4, 6.3, 11],
+                        atol=1e-14)
+
+        # check exrapolation w/ fill_value
+        assert_allclose(interp(np.array([1.1, 2.4])),
+                        interp.fill_value,
+                        atol=1e-14)
+
+        # check extrapolation: linear along the `y` axis, const along `x`
+        interp.fill_value = None
+        assert_allclose(interp([[1, 0.3], [1, 11.5]]),
+                        [1.3, 12.5], atol=1e-15)
+
+        assert_allclose(interp([[1.5, 0.3], [1.9, 11.5]]),
+                        [1.3, 12.5], atol=1e-15)
+
+        # extrapolation with method='nearest'
+        interp = RegularGridInterpolator((x, y), data, method="nearest",
+                                         bounds_error=False, fill_value=None)
+        assert_allclose(interp([[1.5, 1.8], [-4, 5.1]]),
+                        [3, 6],
+                        atol=1e-15)
+
+    @pytest.mark.parametrize("fill_value", [None, np.nan, np.pi])
+    @pytest.mark.parametrize("method", ['linear', 'nearest'])
+    def test_length_one_axis2(self, fill_value, method):
+        options = {"fill_value": fill_value, "bounds_error": False,
+                   "method": method}
+
+        x = np.linspace(0, 2*np.pi, 20)
+        z = np.sin(x)
+
+        fa = RegularGridInterpolator((x,), z[:], **options)
+        fb = RegularGridInterpolator((x, [0]), z[:, None], **options)
+
+        x1a = np.linspace(-1, 2*np.pi+1, 100)
+        za = fa(x1a)
+
+        # evaluated at provided y-value, fb should behave exactly as fa
+        y1b = np.zeros(100)
+        zb = fb(np.vstack([x1a, y1b]).T)
+        assert_allclose(zb, za)
+
+        # evaluated at a different y-value, fb should return fill value
+        y1b = np.ones(100)
+        zb = fb(np.vstack([x1a, y1b]).T)
+        if fill_value is None:
+            assert_allclose(zb, za)
+        else:
+            assert_allclose(zb, fill_value)
+
+    @pytest.mark.parametrize("method", ['nearest', 'linear'])
+    def test_nan_x_1d(self, method):
+        # gh-6624 : if x is nan, result should be nan
+        f = RegularGridInterpolator(([1, 2, 3],), [10, 20, 30], fill_value=1,
+                                    bounds_error=False, method=method)
+        assert np.isnan(f([np.nan]))
+
+        # test arbitrary nan pattern
+        rng = np.random.default_rng(8143215468)
+        x = rng.random(size=100)*4
+        i = rng.random(size=100) > 0.5
+        x[i] = np.nan
+        with np.errstate(invalid='ignore'):
+            # out-of-bounds comparisons, `out_of_bounds += x < grid[0]`,
+            # generate numpy warnings if `x` contains nans.
+            # These warnings should propagate to user (since `x` is user
+            # input) and we simply filter them out.
+            res = f(x)
+
+        assert_equal(res[i], np.nan)
+        assert_equal(res[~i], f(x[~i]))
+
+        # also test the length-one axis f(nan)
+        x = [1, 2, 3]
+        y = [1, ]
+        data = np.ones((3, 1))
+        f = RegularGridInterpolator((x, y), data, fill_value=1,
+                                    bounds_error=False, method=method)
+        assert np.isnan(f([np.nan, 1]))
+        assert np.isnan(f([1, np.nan]))
+
+    @pytest.mark.parametrize("method", ['nearest', 'linear'])
+    def test_nan_x_2d(self, method):
+        x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
+
+        def f(x, y):
+            return x**2 + y**2
+
+        xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+        data = f(xg, yg)
+        interp = RegularGridInterpolator((x, y), data,
+                                         method=method, bounds_error=False)
+
+        with np.errstate(invalid='ignore'):
+            res = interp([[1.5, np.nan], [1, 1]])
+        assert_allclose(res[1], 2, atol=1e-14)
+        assert np.isnan(res[0])
+
+        # test arbitrary nan pattern
+        rng = np.random.default_rng(8143215468)
+        x = rng.random(size=100)*4-1
+        y = rng.random(size=100)*8
+        i1 = rng.random(size=100) > 0.5
+        i2 = rng.random(size=100) > 0.5
+        i = i1 | i2
+        x[i1] = np.nan
+        y[i2] = np.nan
+        z = np.array([x, y]).T
+        with np.errstate(invalid='ignore'):
+            # out-of-bounds comparisons, `out_of_bounds += x < grid[0]`,
+            # generate numpy warnings if `x` contains nans.
+            # These warnings should propagate to user (since `x` is user
+            # input) and we simply filter them out.
+            res = interp(z)
+
+        assert_equal(res[i], np.nan)
+        assert_equal(res[~i], interp(z[~i]))
+
+    @parametrize_rgi_interp_methods
+    def test_descending_points(self, method):
+        def val_func_3d(x, y, z):
+            return 2 * x ** 3 + 3 * y ** 2 - z
+
+        x = np.linspace(1, 4, 11)
+        y = np.linspace(4, 7, 22)
+        z = np.linspace(7, 9, 33)
+        points = (x, y, z)
+        values = val_func_3d(
+            *np.meshgrid(*points, indexing='ij', sparse=True))
+        my_interpolating_function = RegularGridInterpolator(points,
+                                                            values,
+                                                            method=method)
+        pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
+        correct_result = my_interpolating_function(pts)
+
+        # descending data
+        x_descending = x[::-1]
+        y_descending = y[::-1]
+        z_descending = z[::-1]
+        points_shuffled = (x_descending, y_descending, z_descending)
+        values_shuffled = val_func_3d(
+            *np.meshgrid(*points_shuffled, indexing='ij', sparse=True))
+        my_interpolating_function = RegularGridInterpolator(
+            points_shuffled, values_shuffled, method=method)
+        test_result = my_interpolating_function(pts)
+
+        assert_array_equal(correct_result, test_result)
+
+    def test_invalid_points_order(self):
+        def val_func_2d(x, y):
+            return 2 * x ** 3 + 3 * y ** 2
+
+        x = np.array([.5, 2., 0., 4., 5.5])  # not ascending or descending
+        y = np.array([.5, 2., 3., 4., 5.5])
+        points = (x, y)
+        values = val_func_2d(*np.meshgrid(*points, indexing='ij',
+                                          sparse=True))
+        match = "must be strictly ascending or descending"
+        with pytest.raises(ValueError, match=match):
+            RegularGridInterpolator(points, values)
+
+    @parametrize_rgi_interp_methods
+    def test_fill_value(self, method):
+        interp = RegularGridInterpolator([np.arange(6)], np.ones(6),
+                                         method=method, bounds_error=False)
+        assert np.isnan(interp([10]))
+
+    @parametrize_rgi_interp_methods
+    def test_nonscalar_values(self, method):
+        # Verify that non-scalar valued values also works
+        points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] * 2 + [
+            (0.0, 5.0, 10.0, 15.0, 20, 25.0)
+        ] * 2
+
+        rng = np.random.default_rng(1234)
+        values = rng.random((6, 6, 6, 6, 8))
+        sample = rng.random((7, 3, 4))
+
+        interp = RegularGridInterpolator(points, values, method=method,
+                                         bounds_error=False)
+        v = interp(sample)
+        assert_equal(v.shape, (7, 3, 8), err_msg=method)
+
+        vs = []
+        for j in range(8):
+            interp = RegularGridInterpolator(points, values[..., j],
+                                             method=method,
+                                             bounds_error=False)
+            vs.append(interp(sample))
+        v2 = np.array(vs).transpose(1, 2, 0)
+
+        assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+    @parametrize_rgi_interp_methods
+    @pytest.mark.parametrize("flip_points", [False, True])
+    def test_nonscalar_values_2(self, method, flip_points):
+        # Verify that non-scalar valued values also work : use different
+        # lengths of axes to simplify tracing the internals
+        points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+                  (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0),
+                  (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0),
+                  (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0, 47)]
+
+        # verify, that strictly decreasing dimensions work
+        if flip_points:
+            points = [tuple(reversed(p)) for p in points]
+
+        rng = np.random.default_rng(1234)
+
+        trailing_points = (3, 2)
+        # NB: values has a `num_trailing_dims` trailing dimension
+        values = rng.random((6, 7, 8, 9, *trailing_points))
+        sample = rng.random(4)   # a single sample point !
+
+        interp = RegularGridInterpolator(points, values, method=method,
+                                         bounds_error=False)
+        v = interp(sample)
+
+        # v has a single sample point *per entry in the trailing dimensions*
+        assert v.shape == (1, *trailing_points)
+
+        # check the values, too : manually loop over the trailing dimensions
+        vs = np.empty((values.shape[-2:]))
+        for i in range(values.shape[-2]):
+            for j in range(values.shape[-1]):
+                interp = RegularGridInterpolator(points, values[..., i, j],
+                                                 method=method,
+                                                 bounds_error=False)
+                vs[i, j] = interp(sample)
+        v2 = np.expand_dims(vs, axis=0)
+        assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+    def test_nonscalar_values_linear_2D(self):
+        # Verify that non-scalar values work in the 2D fast path
+        method = 'linear'
+        points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+                  (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0), ]
+
+        rng = np.random.default_rng(1234)
+
+        trailing_points = (3, 4)
+        # NB: values has a `num_trailing_dims` trailing dimension
+        values = rng.random((6, 7, *trailing_points))
+        sample = rng.random(2)   # a single sample point !
+
+        interp = RegularGridInterpolator(points, values, method=method,
+                                         bounds_error=False)
+        v = interp(sample)
+
+        # v has a single sample point *per entry in the trailing dimensions*
+        assert v.shape == (1, *trailing_points)
+
+        # check the values, too : manually loop over the trailing dimensions
+        vs = np.empty((values.shape[-2:]))
+        for i in range(values.shape[-2]):
+            for j in range(values.shape[-1]):
+                interp = RegularGridInterpolator(points, values[..., i, j],
+                                                 method=method,
+                                                 bounds_error=False)
+                vs[i, j] = interp(sample)
+        v2 = np.expand_dims(vs, axis=0)
+        assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+    @pytest.mark.parametrize(
+        "dtype",
+        [np.float32, np.float64, np.complex64, np.complex128]
+    )
+    @pytest.mark.parametrize("xi_dtype", [np.float32, np.float64])
+    def test_float32_values(self, dtype, xi_dtype):
+        # regression test for gh-17718: values.dtype=float32 fails
+        def f(x, y):
+            return 2 * x**3 + 3 * y**2
+
+        x = np.linspace(1, 4, 11)
+        y = np.linspace(4, 7, 22)
+
+        xg, yg = np.meshgrid(x, y, indexing='ij', sparse=True)
+        data = f(xg, yg)
+
+        data = data.astype(dtype)
+
+        interp = RegularGridInterpolator((x, y), data)
+
+        pts = np.array([[2.1, 6.2],
+                        [3.3, 5.2]], dtype=xi_dtype)
+
+        # the values here are just what the call returns; the test checks that
+        # that the call succeeds at all, instead of failing with cython not
+        # having a float32 kernel
+        assert_allclose(interp(pts), [134.10469388, 153.40069388], atol=1e-7)
+
+
+class MyValue:
+    """
+    Minimal indexable object
+    """
+
+    def __init__(self, shape):
+        self.ndim = 2
+        self.shape = shape
+        self._v = np.arange(np.prod(shape)).reshape(shape)
+
+    def __getitem__(self, idx):
+        return self._v[idx]
+
+    def __array_interface__(self):
+        return None
+
+    def __array__(self):
+        raise RuntimeError("No array representation")
+
+
+class TestInterpN:
+    def _sample_2d_data(self):
+        x = np.array([.5, 2., 3., 4., 5.5, 6.])
+        y = np.array([.5, 2., 3., 4., 5.5, 6.])
+        z = np.array(
+            [
+                [1, 2, 1, 2, 1, 1],
+                [1, 2, 1, 2, 1, 1],
+                [1, 2, 3, 2, 1, 1],
+                [1, 2, 2, 2, 1, 1],
+                [1, 2, 1, 2, 1, 1],
+                [1, 2, 2, 2, 1, 1],
+            ]
+        )
+        return x, y, z
+
+    def test_spline_2d(self):
+        x, y, z = self._sample_2d_data()
+        lut = RectBivariateSpline(x, y, z)
+
+        xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+                       [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+        assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
+                                  lut.ev(xi[:, 0], xi[:, 1]))
+
+    @parametrize_rgi_interp_methods
+    def test_list_input(self, method):
+        x, y, z = self._sample_2d_data()
+        xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+                       [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+        v1 = interpn((x, y), z, xi, method=method)
+        v2 = interpn(
+            (x.tolist(), y.tolist()), z.tolist(), xi.tolist(), method=method
+        )
+        assert_allclose(v1, v2, err_msg=method)
+
+    def test_spline_2d_outofbounds(self):
+        x = np.array([.5, 2., 3., 4., 5.5])
+        y = np.array([.5, 2., 3., 4., 5.5])
+        z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+                      [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+        lut = RectBivariateSpline(x, y, z)
+
+        xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
+                       [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
+        actual = interpn((x, y), z, xi, method="splinef2d",
+                         bounds_error=False, fill_value=999.99)
+        expected = lut.ev(xi[:, 0], xi[:, 1])
+        expected[2:4] = 999.99
+        assert_array_almost_equal(actual, expected)
+
+        # no extrapolation for splinef2d
+        assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
+                      bounds_error=False, fill_value=None)
+
+    def _sample_4d_data(self):
+        points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
+        values = np.asarray([0., .5, 1.])
+        values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
+        values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
+        values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
+        values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
+        values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
+        return points, values
+
+    def test_linear_4d(self):
+        # create a 4-D grid of 3 points in each dimension
+        points, values = self._sample_4d_data()
+        interp_rg = RegularGridInterpolator(points, values)
+        sample = np.asarray([[0.1, 0.1, 10., 9.]])
+        wanted = interpn(points, values, sample, method="linear")
+        assert_array_almost_equal(interp_rg(sample), wanted)
+
+    def test_4d_linear_outofbounds(self):
+        # create a 4-D grid of 3 points in each dimension
+        points, values = self._sample_4d_data()
+        sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+        wanted = 999.99
+        actual = interpn(points, values, sample, method="linear",
+                         bounds_error=False, fill_value=999.99)
+        assert_array_almost_equal(actual, wanted)
+
+    def test_nearest_4d(self):
+        # create a 4-D grid of 3 points in each dimension
+        points, values = self._sample_4d_data()
+        interp_rg = RegularGridInterpolator(points, values, method="nearest")
+        sample = np.asarray([[0.1, 0.1, 10., 9.]])
+        wanted = interpn(points, values, sample, method="nearest")
+        assert_array_almost_equal(interp_rg(sample), wanted)
+
+    def test_4d_nearest_outofbounds(self):
+        # create a 4-D grid of 3 points in each dimension
+        points, values = self._sample_4d_data()
+        sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
+        wanted = 999.99
+        actual = interpn(points, values, sample, method="nearest",
+                         bounds_error=False, fill_value=999.99)
+        assert_array_almost_equal(actual, wanted)
+
+    def test_xi_1d(self):
+        # verify that 1-D xi works as expected
+        points, values = self._sample_4d_data()
+        sample = np.asarray([0.1, 0.1, 10., 9.])
+        v1 = interpn(points, values, sample, bounds_error=False)
+        v2 = interpn(points, values, sample[None,:], bounds_error=False)
+        assert_allclose(v1, v2)
+
+    def test_xi_nd(self):
+        # verify that higher-d xi works as expected
+        points, values = self._sample_4d_data()
+
+        np.random.seed(1234)
+        sample = np.random.rand(2, 3, 4)
+
+        v1 = interpn(points, values, sample, method='nearest',
+                     bounds_error=False)
+        assert_equal(v1.shape, (2, 3))
+
+        v2 = interpn(points, values, sample.reshape(-1, 4),
+                     method='nearest', bounds_error=False)
+        assert_allclose(v1, v2.reshape(v1.shape))
+
+    @parametrize_rgi_interp_methods
+    def test_xi_broadcast(self, method):
+        # verify that the interpolators broadcast xi
+        x, y, values = self._sample_2d_data()
+        points = (x, y)
+
+        xi = np.linspace(0, 1, 2)
+        yi = np.linspace(0, 3, 3)
+
+        sample = (xi[:, None], yi[None, :])
+        v1 = interpn(points, values, sample, method=method, bounds_error=False)
+        assert_equal(v1.shape, (2, 3))
+
+        xx, yy = np.meshgrid(xi, yi)
+        sample = np.c_[xx.T.ravel(), yy.T.ravel()]
+
+        v2 = interpn(points, values, sample,
+                     method=method, bounds_error=False)
+        assert_allclose(v1, v2.reshape(v1.shape))
+
+    @parametrize_rgi_interp_methods
+    def test_nonscalar_values(self, method):
+        # Verify that non-scalar valued values also works
+        points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] * 2 + [
+            (0.0, 5.0, 10.0, 15.0, 20, 25.0)
+        ] * 2
+
+        rng = np.random.default_rng(1234)
+        values = rng.random((6, 6, 6, 6, 8))
+        sample = rng.random((7, 3, 4))
+
+        v = interpn(points, values, sample, method=method,
+                    bounds_error=False)
+        assert_equal(v.shape, (7, 3, 8), err_msg=method)
+
+        vs = [interpn(points, values[..., j], sample, method=method,
+                      bounds_error=False) for j in range(8)]
+        v2 = np.array(vs).transpose(1, 2, 0)
+
+        assert_allclose(v, v2, atol=1e-14, err_msg=method)
+
+    @parametrize_rgi_interp_methods
+    def test_nonscalar_values_2(self, method):
+        # Verify that non-scalar valued values also work : use different
+        # lengths of axes to simplify tracing the internals
+        points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
+                  (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0),
+                  (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0),
+                  (0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0, 47)]
+
+        rng = np.random.default_rng(1234)
+
+        trailing_points = (3, 2)
+        # NB: values has a `num_trailing_dims` trailing dimension
+        values = rng.random((6, 7, 8, 9, *trailing_points))
+        sample = rng.random(4)   # a single sample point !
+
+        v = interpn(points, values, sample, method=method, bounds_error=False)
+
+        # v has a single sample point *per entry in the trailing dimensions*
+        assert v.shape == (1, *trailing_points)
+
+        # check the values, too : manually loop over the trailing dimensions
+        vs = [[
+                interpn(points, values[..., i, j], sample, method=method,
+                        bounds_error=False) for i in range(values.shape[-2])
+              ] for j in range(values.shape[-1])]
+
+        assert_allclose(v, np.asarray(vs).T, atol=1e-14, err_msg=method)
+
+    def test_non_scalar_values_splinef2d(self):
+        # Vector-valued splines supported with fitpack
+        points, values = self._sample_4d_data()
+
+        np.random.seed(1234)
+        values = np.random.rand(3, 3, 3, 3, 6)
+        sample = np.random.rand(7, 11, 4)
+        assert_raises(ValueError, interpn, points, values, sample,
+                      method='splinef2d')
+
+    @parametrize_rgi_interp_methods
+    def test_complex(self, method):
+        x, y, values = self._sample_2d_data()
+        points = (x, y)
+        values = values - 2j*values
+
+        sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+                           [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+
+        v1 = interpn(points, values, sample, method=method)
+        v2r = interpn(points, values.real, sample, method=method)
+        v2i = interpn(points, values.imag, sample, method=method)
+        v2 = v2r + 1j*v2i
+        assert_allclose(v1, v2)
+
+    def test_complex_spline2fd(self):
+        # Complex-valued data not supported by spline2fd
+        x, y, values = self._sample_2d_data()
+        points = (x, y)
+        values = values - 2j*values
+
+        sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
+                           [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
+        with assert_warns(np.ComplexWarning):
+            interpn(points, values, sample, method='splinef2d')
+
+    @pytest.mark.parametrize(
+        "method",
+        ["linear", "nearest"]
+    )
+    def test_duck_typed_values(self, method):
+        x = np.linspace(0, 2, 5)
+        y = np.linspace(0, 1, 7)
+
+        values = MyValue((5, 7))
+
+        v1 = interpn((x, y), values, [0.4, 0.7], method=method)
+        v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
+        assert_allclose(v1, v2)
+
+    @parametrize_rgi_interp_methods
+    def test_matrix_input(self, method):
+        x = np.linspace(0, 2, 6)
+        y = np.linspace(0, 1, 7)
+
+        values = matrix(np.random.rand(6, 7))
+
+        sample = np.random.rand(3, 7, 2)
+
+        v1 = interpn((x, y), values, sample, method=method)
+        v2 = interpn((x, y), np.asarray(values), sample, method=method)
+        assert_allclose(v1, v2)
+
+    def test_length_one_axis(self):
+        # gh-5890, gh-9524 : length-1 axis is legal for method='linear'.
+        # Along the axis it's linear interpolation; away from the length-1
+        # axis, it's an extrapolation, so fill_value should be used.
+
+        values = np.array([[0.1, 1, 10]])
+        xi = np.array([[1, 2.2], [1, 3.2], [1, 3.8]])
+
+        res = interpn(([1], [2, 3, 4]), values, xi)
+        wanted = [0.9*0.2 + 0.1,   # on [2, 3) it's 0.9*(x-2) + 0.1
+                  9*0.2 + 1,       # on [3, 4] it's 9*(x-3) + 1
+                  9*0.8 + 1]
+
+        assert_allclose(res, wanted, atol=1e-15)
+
+        # check extrapolation
+        xi = np.array([[1.1, 2.2], [1.5, 3.2], [-2.3, 3.8]])
+        res = interpn(([1], [2, 3, 4]), values, xi,
+                      bounds_error=False, fill_value=None)
+
+        assert_allclose(res, wanted, atol=1e-15)
+
+    def test_descending_points(self):
+        def value_func_4d(x, y, z, a):
+            return 2 * x ** 3 + 3 * y ** 2 - z - a
+
+        x1 = np.array([0, 1, 2, 3])
+        x2 = np.array([0, 10, 20, 30])
+        x3 = np.array([0, 10, 20, 30])
+        x4 = np.array([0, .1, .2, .30])
+        points = (x1, x2, x3, x4)
+        values = value_func_4d(
+            *np.meshgrid(*points, indexing='ij', sparse=True))
+        pts = (0.1, 0.3, np.transpose(np.linspace(0, 30, 4)),
+               np.linspace(0, 0.3, 4))
+        correct_result = interpn(points, values, pts)
+
+        x1_descend = x1[::-1]
+        x2_descend = x2[::-1]
+        x3_descend = x3[::-1]
+        x4_descend = x4[::-1]
+        points_shuffled = (x1_descend, x2_descend, x3_descend, x4_descend)
+        values_shuffled = value_func_4d(
+            *np.meshgrid(*points_shuffled, indexing='ij', sparse=True))
+        test_result = interpn(points_shuffled, values_shuffled, pts)
+
+        assert_array_equal(correct_result, test_result)
+
+    def test_invalid_points_order(self):
+        x = np.array([.5, 2., 0., 4., 5.5])  # not ascending or descending
+        y = np.array([.5, 2., 3., 4., 5.5])
+        z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
+                      [1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
+        xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
+                       [1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
+
+        match = "must be strictly ascending or descending"
+        with pytest.raises(ValueError, match=match):
+            interpn((x, y), z, xi)
+
+    def test_invalid_xi_dimensions(self):
+        # https://github.com/scipy/scipy/issues/16519
+        points = [(0, 1)]
+        values = [0, 1]
+        xi = np.ones((1, 1, 3))
+        msg = ("The requested sample points xi have dimension 3, but this "
+               "RegularGridInterpolator has dimension 1")
+        with assert_raises(ValueError, match=msg):
+            interpn(points, values, xi)
+
+    def test_readonly_grid(self):
+        # https://github.com/scipy/scipy/issues/17716
+        x = np.linspace(0, 4, 5)
+        y = np.linspace(0, 5, 6)
+        z = np.linspace(0, 6, 7)
+        points = (x, y, z)
+        values = np.ones((5, 6, 7))
+        point = np.array([2.21, 3.12, 1.15])
+        for d in points:
+            d.flags.writeable = False
+        values.flags.writeable = False
+        point.flags.writeable = False
+        interpn(points, values, point)
+        RegularGridInterpolator(points, values)(point)
+
+    def test_2d_readonly_grid(self):
+        # https://github.com/scipy/scipy/issues/17716
+        # test special 2d case
+        x = np.linspace(0, 4, 5)
+        y = np.linspace(0, 5, 6)
+        points = (x, y)
+        values = np.ones((5, 6))
+        point = np.array([2.21, 3.12])
+        for d in points:
+            d.flags.writeable = False
+        values.flags.writeable = False
+        point.flags.writeable = False
+        interpn(points, values, point)
+        RegularGridInterpolator(points, values)(point)
+
+    def test_non_c_contiguous_grid(self):
+        # https://github.com/scipy/scipy/issues/17716
+        x = np.linspace(0, 4, 5)
+        x = np.vstack((x, np.empty_like(x))).T.copy()[:, 0]
+        assert not x.flags.c_contiguous
+        y = np.linspace(0, 5, 6)
+        z = np.linspace(0, 6, 7)
+        points = (x, y, z)
+        values = np.ones((5, 6, 7))
+        point = np.array([2.21, 3.12, 1.15])
+        interpn(points, values, point)
+        RegularGridInterpolator(points, values)(point)
+
+    @pytest.mark.parametrize("dtype", ['>f8', '`__
+
+MATLAB® files
+=============
+
+.. autosummary::
+   :toctree: generated/
+
+   loadmat - Read a MATLAB style mat file (version 4 through 7.1)
+   savemat - Write a MATLAB style mat file (version 4 through 7.1)
+   whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
+
+For low-level MATLAB reading and writing utilities, see `scipy.io.matlab`.
+
+IDL® files
+==========
+
+.. autosummary::
+   :toctree: generated/
+
+   readsav - Read an IDL 'save' file
+
+Matrix Market files
+===================
+
+.. autosummary::
+   :toctree: generated/
+
+   mminfo - Query matrix info from Matrix Market formatted file
+   mmread - Read matrix from Matrix Market formatted file
+   mmwrite - Write matrix to Matrix Market formatted file
+
+Unformatted Fortran files
+===============================
+
+.. autosummary::
+   :toctree: generated/
+
+   FortranFile - A file object for unformatted sequential Fortran files
+   FortranEOFError - Exception indicating the end of a well-formed file
+   FortranFormattingError - Exception indicating an inappropriate end
+
+Netcdf
+======
+
+.. autosummary::
+   :toctree: generated/
+
+   netcdf_file - A file object for NetCDF data
+   netcdf_variable - A data object for the netcdf module
+
+Harwell-Boeing files
+====================
+
+.. autosummary::
+   :toctree: generated/
+
+   hb_read   -- read H-B file
+   hb_write  -- write H-B file
+
+Wav sound files (:mod:`scipy.io.wavfile`)
+=========================================
+
+.. module:: scipy.io.wavfile
+
+.. autosummary::
+   :toctree: generated/
+
+   read
+   write
+   WavFileWarning
+
+Arff files (:mod:`scipy.io.arff`)
+=================================
+
+.. module:: scipy.io.arff
+
+.. autosummary::
+   :toctree: generated/
+
+   loadarff
+   MetaData
+   ArffError
+   ParseArffError
+"""
+# matfile read and write
+from .matlab import loadmat, savemat, whosmat
+
+# netCDF file support
+from ._netcdf import netcdf_file, netcdf_variable
+
+# Fortran file support
+from ._fortran import FortranFile, FortranEOFError, FortranFormattingError
+
+from ._mmio import mminfo, mmread, mmwrite
+from ._idl import readsav
+from ._harwell_boeing import hb_read, hb_write
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import arff, harwell_boeing, idl, mmio, netcdf, wavfile
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_fortran.py b/__packaged__/coreml/.python_dependencies/scipy/io/_fortran.py
new file mode 100644
index 00000000..b448ed58
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/_fortran.py
@@ -0,0 +1,354 @@
+"""
+Module to read / write Fortran unformatted sequential files.
+
+This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz.
+
+"""
+import warnings
+import numpy as np
+
+__all__ = ['FortranFile', 'FortranEOFError', 'FortranFormattingError']
+
+
+class FortranEOFError(TypeError, OSError):
+    """Indicates that the file ended properly.
+
+    This error descends from TypeError because the code used to raise
+    TypeError (and this was the only way to know that the file had
+    ended) so users might have ``except TypeError:``.
+
+    """
+    pass
+
+
+class FortranFormattingError(TypeError, OSError):
+    """Indicates that the file ended mid-record.
+
+    Descends from TypeError for backward compatibility.
+
+    """
+    pass
+
+
+class FortranFile:
+    """
+    A file object for unformatted sequential files from Fortran code.
+
+    Parameters
+    ----------
+    filename : file or str
+        Open file object or filename.
+    mode : {'r', 'w'}, optional
+        Read-write mode, default is 'r'.
+    header_dtype : dtype, optional
+        Data type of the header. Size and endiness must match the input/output file.
+
+    Notes
+    -----
+    These files are broken up into records of unspecified types. The size of
+    each record is given at the start (although the size of this header is not
+    standard) and the data is written onto disk without any formatting. Fortran
+    compilers supporting the BACKSPACE statement will write a second copy of
+    the size to facilitate backwards seeking.
+
+    This class only supports files written with both sizes for the record.
+    It also does not support the subrecords used in Intel and gfortran compilers
+    for records which are greater than 2GB with a 4-byte header.
+
+    An example of an unformatted sequential file in Fortran would be written as::
+
+        OPEN(1, FILE=myfilename, FORM='unformatted')
+
+        WRITE(1) myvariable
+
+    Since this is a non-standard file format, whose contents depend on the
+    compiler and the endianness of the machine, caution is advised. Files from
+    gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work.
+
+    Consider using Fortran direct-access files or files from the newer Stream
+    I/O, which can be easily read by `numpy.fromfile`.
+
+    Examples
+    --------
+    To create an unformatted sequential Fortran file:
+
+    >>> from scipy.io import FortranFile
+    >>> import numpy as np
+    >>> f = FortranFile('test.unf', 'w')
+    >>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32))
+    >>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T)
+    >>> f.close()
+
+    To read this file:
+
+    >>> f = FortranFile('test.unf', 'r')
+    >>> print(f.read_ints(np.int32))
+    [1 2 3 4 5]
+    >>> print(f.read_reals(float).reshape((5,4), order="F"))
+    [[0.         0.05263158 0.10526316 0.15789474]
+     [0.21052632 0.26315789 0.31578947 0.36842105]
+     [0.42105263 0.47368421 0.52631579 0.57894737]
+     [0.63157895 0.68421053 0.73684211 0.78947368]
+     [0.84210526 0.89473684 0.94736842 1.        ]]
+    >>> f.close()
+
+    Or, in Fortran::
+
+        integer :: a(5), i
+        double precision :: b(5,4)
+        open(1, file='test.unf', form='unformatted')
+        read(1) a
+        read(1) b
+        close(1)
+        write(*,*) a
+        do i = 1, 5
+            write(*,*) b(i,:)
+        end do
+
+    """
+    def __init__(self, filename, mode='r', header_dtype=np.uint32):
+        if header_dtype is None:
+            raise ValueError('Must specify dtype')
+
+        header_dtype = np.dtype(header_dtype)
+        if header_dtype.kind != 'u':
+            warnings.warn("Given a dtype which is not unsigned.")
+
+        if mode not in 'rw' or len(mode) != 1:
+            raise ValueError('mode must be either r or w')
+
+        if hasattr(filename, 'seek'):
+            self._fp = filename
+        else:
+            self._fp = open(filename, '%sb' % mode)
+
+        self._header_dtype = header_dtype
+
+    def _read_size(self, eof_ok=False):
+        n = self._header_dtype.itemsize
+        b = self._fp.read(n)
+        if (not b) and eof_ok:
+            raise FortranEOFError("End of file occurred at end of record")
+        elif len(b) < n:
+            raise FortranFormattingError(
+                "End of file in the middle of the record size")
+        return int(np.frombuffer(b, dtype=self._header_dtype, count=1)[0])
+
+    def write_record(self, *items):
+        """
+        Write a record (including sizes) to the file.
+
+        Parameters
+        ----------
+        *items : array_like
+            The data arrays to write.
+
+        Notes
+        -----
+        Writes data items to a file::
+
+            write_record(a.T, b.T, c.T, ...)
+
+            write(1) a, b, c, ...
+
+        Note that data in multidimensional arrays is written in
+        row-major order --- to make them read correctly by Fortran
+        programs, you need to transpose the arrays yourself when
+        writing them.
+
+        """
+        items = tuple(np.asarray(item) for item in items)
+        total_size = sum(item.nbytes for item in items)
+
+        nb = np.array([total_size], dtype=self._header_dtype)
+
+        nb.tofile(self._fp)
+        for item in items:
+            item.tofile(self._fp)
+        nb.tofile(self._fp)
+
+    def read_record(self, *dtypes, **kwargs):
+        """
+        Reads a record of a given type from the file.
+
+        Parameters
+        ----------
+        *dtypes : dtypes, optional
+            Data type(s) specifying the size and endiness of the data.
+
+        Returns
+        -------
+        data : ndarray
+            A 1-D array object.
+
+        Raises
+        ------
+        FortranEOFError
+            To signal that no further records are available
+        FortranFormattingError
+            To signal that the end of the file was encountered
+            part-way through a record
+
+        Notes
+        -----
+        If the record contains a multidimensional array, you can specify
+        the size in the dtype. For example::
+
+            INTEGER var(5,4)
+
+        can be read with::
+
+            read_record('(4,5)i4').T
+
+        Note that this function does **not** assume the file data is in Fortran
+        column major order, so you need to (i) swap the order of dimensions
+        when reading and (ii) transpose the resulting array.
+
+        Alternatively, you can read the data as a 1-D array and handle the
+        ordering yourself. For example::
+
+            read_record('i4').reshape(5, 4, order='F')
+
+        For records that contain several variables or mixed types (as opposed
+        to single scalar or array types), give them as separate arguments::
+
+            double precision :: a
+            integer :: b
+            write(1) a, b
+
+            record = f.read_record(' 0, -n and n if n < 0
+
+        Parameters
+        ----------
+        n : int
+            max number one wants to be able to represent
+        min : int
+            minimum number of characters to use for the format
+
+        Returns
+        -------
+        res : IntFormat
+            IntFormat instance with reasonable (see Notes) computed width
+
+        Notes
+        -----
+        Reasonable should be understood as the minimal string length necessary
+        without losing precision. For example, IntFormat.from_number(1) will
+        return an IntFormat instance of width 2, so that any 0 and 1 may be
+        represented as 1-character strings without loss of information.
+        """
+        width = number_digits(n) + 1
+        if n < 0:
+            width += 1
+        repeat = 80 // width
+        return cls(width, min, repeat=repeat)
+
+    def __init__(self, width, min=None, repeat=None):
+        self.width = width
+        self.repeat = repeat
+        self.min = min
+
+    def __repr__(self):
+        r = "IntFormat("
+        if self.repeat:
+            r += "%d" % self.repeat
+        r += "I%d" % self.width
+        if self.min:
+            r += ".%d" % self.min
+        return r + ")"
+
+    @property
+    def fortran_format(self):
+        r = "("
+        if self.repeat:
+            r += "%d" % self.repeat
+        r += "I%d" % self.width
+        if self.min:
+            r += ".%d" % self.min
+        return r + ")"
+
+    @property
+    def python_format(self):
+        return "%" + str(self.width) + "d"
+
+
+class ExpFormat:
+    @classmethod
+    def from_number(cls, n, min=None):
+        """Given a float number, returns a "reasonable" ExpFormat instance to
+        represent any number between -n and n.
+
+        Parameters
+        ----------
+        n : float
+            max number one wants to be able to represent
+        min : int
+            minimum number of characters to use for the format
+
+        Returns
+        -------
+        res : ExpFormat
+            ExpFormat instance with reasonable (see Notes) computed width
+
+        Notes
+        -----
+        Reasonable should be understood as the minimal string length necessary
+        to avoid losing precision.
+        """
+        # len of one number in exp format: sign + 1|0 + "." +
+        # number of digit for fractional part + 'E' + sign of exponent +
+        # len of exponent
+        finfo = np.finfo(n.dtype)
+        # Number of digits for fractional part
+        n_prec = finfo.precision + 1
+        # Number of digits for exponential part
+        n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
+        width = 1 + 1 + n_prec + 1 + n_exp + 1
+        if n < 0:
+            width += 1
+        repeat = int(np.floor(80 / width))
+        return cls(width, n_prec, min, repeat=repeat)
+
+    def __init__(self, width, significand, min=None, repeat=None):
+        """\
+        Parameters
+        ----------
+        width : int
+            number of characters taken by the string (includes space).
+        """
+        self.width = width
+        self.significand = significand
+        self.repeat = repeat
+        self.min = min
+
+    def __repr__(self):
+        r = "ExpFormat("
+        if self.repeat:
+            r += "%d" % self.repeat
+        r += "E%d.%d" % (self.width, self.significand)
+        if self.min:
+            r += "E%d" % self.min
+        return r + ")"
+
+    @property
+    def fortran_format(self):
+        r = "("
+        if self.repeat:
+            r += "%d" % self.repeat
+        r += "E%d.%d" % (self.width, self.significand)
+        if self.min:
+            r += "E%d" % self.min
+        return r + ")"
+
+    @property
+    def python_format(self):
+        return "%" + str(self.width-1) + "." + str(self.significand) + "E"
+
+
+class Token:
+    def __init__(self, type, value, pos):
+        self.type = type
+        self.value = value
+        self.pos = pos
+
+    def __str__(self):
+        return """Token('%s', "%s")""" % (self.type, self.value)
+
+    def __repr__(self):
+        return self.__str__()
+
+
+class Tokenizer:
+    def __init__(self):
+        self.tokens = list(TOKENS.keys())
+        self.res = [re.compile(TOKENS[i]) for i in self.tokens]
+
+    def input(self, s):
+        self.data = s
+        self.curpos = 0
+        self.len = len(s)
+
+    def next_token(self):
+        curpos = self.curpos
+
+        while curpos < self.len:
+            for i, r in enumerate(self.res):
+                m = r.match(self.data, curpos)
+                if m is None:
+                    continue
+                else:
+                    self.curpos = m.end()
+                    return Token(self.tokens[i], m.group(), self.curpos)
+            raise SyntaxError("Unknown character at position %d (%s)"
+                              % (self.curpos, self.data[curpos]))
+
+
+# Grammar for fortran format:
+# format            : LPAR format_string RPAR
+# format_string     : repeated | simple
+# repeated          : repeat simple
+# simple            : int_fmt | exp_fmt
+# int_fmt           : INT_ID width
+# exp_fmt           : simple_exp_fmt
+# simple_exp_fmt    : EXP_ID width DOT significand
+# extended_exp_fmt  : EXP_ID width DOT significand EXP_ID ndigits
+# repeat            : INT
+# width             : INT
+# significand       : INT
+# ndigits           : INT
+
+# Naive fortran formatter - parser is hand-made
+class FortranFormatParser:
+    """Parser for Fortran format strings. The parse method returns a *Format
+    instance.
+
+    Notes
+    -----
+    Only ExpFormat (exponential format for floating values) and IntFormat
+    (integer format) for now.
+    """
+    def __init__(self):
+        self.tokenizer = Tokenizer()
+
+    def parse(self, s):
+        self.tokenizer.input(s)
+
+        tokens = []
+
+        try:
+            while True:
+                t = self.tokenizer.next_token()
+                if t is None:
+                    break
+                else:
+                    tokens.append(t)
+            return self._parse_format(tokens)
+        except SyntaxError as e:
+            raise BadFortranFormat(str(e)) from e
+
+    def _get_min(self, tokens):
+        next = tokens.pop(0)
+        if not next.type == "DOT":
+            raise SyntaxError()
+        next = tokens.pop(0)
+        return next.value
+
+    def _expect(self, token, tp):
+        if not token.type == tp:
+            raise SyntaxError()
+
+    def _parse_format(self, tokens):
+        if not tokens[0].type == "LPAR":
+            raise SyntaxError("Expected left parenthesis at position "
+                              "%d (got '%s')" % (0, tokens[0].value))
+        elif not tokens[-1].type == "RPAR":
+            raise SyntaxError("Expected right parenthesis at position "
+                              "%d (got '%s')" % (len(tokens), tokens[-1].value))
+
+        tokens = tokens[1:-1]
+        types = [t.type for t in tokens]
+        if types[0] == "INT":
+            repeat = int(tokens.pop(0).value)
+        else:
+            repeat = None
+
+        next = tokens.pop(0)
+        if next.type == "INT_ID":
+            next = self._next(tokens, "INT")
+            width = int(next.value)
+            if tokens:
+                min = int(self._get_min(tokens))
+            else:
+                min = None
+            return IntFormat(width, min, repeat)
+        elif next.type == "EXP_ID":
+            next = self._next(tokens, "INT")
+            width = int(next.value)
+
+            next = self._next(tokens, "DOT")
+
+            next = self._next(tokens, "INT")
+            significand = int(next.value)
+
+            if tokens:
+                next = self._next(tokens, "EXP_ID")
+
+                next = self._next(tokens, "INT")
+                min = int(next.value)
+            else:
+                min = None
+            return ExpFormat(width, significand, min, repeat)
+        else:
+            raise SyntaxError("Invalid formater type %s" % next.value)
+
+    def _next(self, tokens, tp):
+        if not len(tokens) > 0:
+            raise SyntaxError()
+        next = tokens.pop(0)
+        self._expect(next, tp)
+        return next
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/hb.py b/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/hb.py
new file mode 100644
index 00000000..f2630ce4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/hb.py
@@ -0,0 +1,570 @@
+"""
+Implementation of Harwell-Boeing read/write.
+
+At the moment not the full Harwell-Boeing format is supported. Supported
+features are:
+
+    - assembled, non-symmetric, real matrices
+    - integer for pointer/indices
+    - exponential format for float values, and int format
+
+"""
+# TODO:
+#   - Add more support (symmetric/complex matrices, non-assembled matrices ?)
+
+# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
+# takes a lot of memory. Being faster would require compiled code.
+# write is not efficient. Although not a terribly exciting task,
+# having reusable facilities to efficiently read/write fortran-formatted files
+# would be useful outside this module.
+
+import warnings
+
+import numpy as np
+from scipy.sparse import csc_matrix
+from ._fortran_format_parser import FortranFormatParser, IntFormat, ExpFormat
+
+__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
+           "HBMatrixType"]
+
+
+class MalformedHeader(Exception):
+    pass
+
+
+class LineOverflow(Warning):
+    pass
+
+
+def _nbytes_full(fmt, nlines):
+    """Return the number of bytes to read to get every full lines for the
+    given parsed fortran format."""
+    return (fmt.repeat * fmt.width + 1) * (nlines - 1)
+
+
+class HBInfo:
+    @classmethod
+    def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
+        """Create a HBInfo instance from an existing sparse matrix.
+
+        Parameters
+        ----------
+        m : sparse matrix
+            the HBInfo instance will derive its parameters from m
+        title : str
+            Title to put in the HB header
+        key : str
+            Key
+        mxtype : HBMatrixType
+            type of the input matrix
+        fmt : dict
+            not implemented
+
+        Returns
+        -------
+        hb_info : HBInfo instance
+        """
+        m = m.tocsc(copy=False)
+
+        pointer = m.indptr
+        indices = m.indices
+        values = m.data
+
+        nrows, ncols = m.shape
+        nnon_zeros = m.nnz
+
+        if fmt is None:
+            # +1 because HB use one-based indexing (Fortran), and we will write
+            # the indices /pointer as such
+            pointer_fmt = IntFormat.from_number(np.max(pointer+1))
+            indices_fmt = IntFormat.from_number(np.max(indices+1))
+
+            if values.dtype.kind in np.typecodes["AllFloat"]:
+                values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
+            elif values.dtype.kind in np.typecodes["AllInteger"]:
+                values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
+            else:
+                raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
+        else:
+            raise NotImplementedError("fmt argument not supported yet.")
+
+        if mxtype is None:
+            if not np.isrealobj(values):
+                raise ValueError("Complex values not supported yet")
+            if values.dtype.kind in np.typecodes["AllInteger"]:
+                tp = "integer"
+            elif values.dtype.kind in np.typecodes["AllFloat"]:
+                tp = "real"
+            else:
+                raise NotImplementedError("type %s for values not implemented"
+                                          % values.dtype)
+            mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
+        else:
+            raise ValueError("mxtype argument not handled yet.")
+
+        def _nlines(fmt, size):
+            nlines = size // fmt.repeat
+            if nlines * fmt.repeat != size:
+                nlines += 1
+            return nlines
+
+        pointer_nlines = _nlines(pointer_fmt, pointer.size)
+        indices_nlines = _nlines(indices_fmt, indices.size)
+        values_nlines = _nlines(values_fmt, values.size)
+
+        total_nlines = pointer_nlines + indices_nlines + values_nlines
+
+        return cls(title, key,
+            total_nlines, pointer_nlines, indices_nlines, values_nlines,
+            mxtype, nrows, ncols, nnon_zeros,
+            pointer_fmt.fortran_format, indices_fmt.fortran_format,
+            values_fmt.fortran_format)
+
+    @classmethod
+    def from_file(cls, fid):
+        """Create a HBInfo instance from a file object containing a matrix in the
+        HB format.
+
+        Parameters
+        ----------
+        fid : file-like matrix
+            File or file-like object containing a matrix in the HB format.
+
+        Returns
+        -------
+        hb_info : HBInfo instance
+        """
+        # First line
+        line = fid.readline().strip("\n")
+        if not len(line) > 72:
+            raise ValueError("Expected at least 72 characters for first line, "
+                             "got: \n%s" % line)
+        title = line[:72]
+        key = line[72:]
+
+        # Second line
+        line = fid.readline().strip("\n")
+        if not len(line.rstrip()) >= 56:
+            raise ValueError("Expected at least 56 characters for second line, "
+                             "got: \n%s" % line)
+        total_nlines = _expect_int(line[:14])
+        pointer_nlines = _expect_int(line[14:28])
+        indices_nlines = _expect_int(line[28:42])
+        values_nlines = _expect_int(line[42:56])
+
+        rhs_nlines = line[56:72].strip()
+        if rhs_nlines == '':
+            rhs_nlines = 0
+        else:
+            rhs_nlines = _expect_int(rhs_nlines)
+        if not rhs_nlines == 0:
+            raise ValueError("Only files without right hand side supported for "
+                             "now.")
+
+        # Third line
+        line = fid.readline().strip("\n")
+        if not len(line) >= 70:
+            raise ValueError("Expected at least 72 character for third line, got:\n"
+                             "%s" % line)
+
+        mxtype_s = line[:3].upper()
+        if not len(mxtype_s) == 3:
+            raise ValueError("mxtype expected to be 3 characters long")
+
+        mxtype = HBMatrixType.from_fortran(mxtype_s)
+        if mxtype.value_type not in ["real", "integer"]:
+            raise ValueError("Only real or integer matrices supported for "
+                             "now (detected %s)" % mxtype)
+        if not mxtype.structure == "unsymmetric":
+            raise ValueError("Only unsymmetric matrices supported for "
+                             "now (detected %s)" % mxtype)
+        if not mxtype.storage == "assembled":
+            raise ValueError("Only assembled matrices supported for now")
+
+        if not line[3:14] == " " * 11:
+            raise ValueError("Malformed data for third line: %s" % line)
+
+        nrows = _expect_int(line[14:28])
+        ncols = _expect_int(line[28:42])
+        nnon_zeros = _expect_int(line[42:56])
+        nelementals = _expect_int(line[56:70])
+        if not nelementals == 0:
+            raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
+                             % nelementals)
+
+        # Fourth line
+        line = fid.readline().strip("\n")
+
+        ct = line.split()
+        if not len(ct) == 3:
+            raise ValueError("Expected 3 formats, got %s" % ct)
+
+        return cls(title, key,
+                   total_nlines, pointer_nlines, indices_nlines, values_nlines,
+                   mxtype, nrows, ncols, nnon_zeros,
+                   ct[0], ct[1], ct[2],
+                   rhs_nlines, nelementals)
+
+    def __init__(self, title, key,
+            total_nlines, pointer_nlines, indices_nlines, values_nlines,
+            mxtype, nrows, ncols, nnon_zeros,
+            pointer_format_str, indices_format_str, values_format_str,
+            right_hand_sides_nlines=0, nelementals=0):
+        """Do not use this directly, but the class ctrs (from_* functions)."""
+        self.title = title
+        self.key = key
+        if title is None:
+            title = "No Title"
+        if len(title) > 72:
+            raise ValueError("title cannot be > 72 characters")
+
+        if key is None:
+            key = "|No Key"
+        if len(key) > 8:
+            warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
+
+        self.total_nlines = total_nlines
+        self.pointer_nlines = pointer_nlines
+        self.indices_nlines = indices_nlines
+        self.values_nlines = values_nlines
+
+        parser = FortranFormatParser()
+        pointer_format = parser.parse(pointer_format_str)
+        if not isinstance(pointer_format, IntFormat):
+            raise ValueError("Expected int format for pointer format, got %s"
+                             % pointer_format)
+
+        indices_format = parser.parse(indices_format_str)
+        if not isinstance(indices_format, IntFormat):
+            raise ValueError("Expected int format for indices format, got %s" %
+                             indices_format)
+
+        values_format = parser.parse(values_format_str)
+        if isinstance(values_format, ExpFormat):
+            if mxtype.value_type not in ["real", "complex"]:
+                raise ValueError("Inconsistency between matrix type %s and "
+                                 "value type %s" % (mxtype, values_format))
+            values_dtype = np.float64
+        elif isinstance(values_format, IntFormat):
+            if mxtype.value_type not in ["integer"]:
+                raise ValueError("Inconsistency between matrix type %s and "
+                                 "value type %s" % (mxtype, values_format))
+            # XXX: fortran int -> dtype association ?
+            values_dtype = int
+        else:
+            raise ValueError("Unsupported format for values %r" % (values_format,))
+
+        self.pointer_format = pointer_format
+        self.indices_format = indices_format
+        self.values_format = values_format
+
+        self.pointer_dtype = np.int32
+        self.indices_dtype = np.int32
+        self.values_dtype = values_dtype
+
+        self.pointer_nlines = pointer_nlines
+        self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
+
+        self.indices_nlines = indices_nlines
+        self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
+
+        self.values_nlines = values_nlines
+        self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
+
+        self.nrows = nrows
+        self.ncols = ncols
+        self.nnon_zeros = nnon_zeros
+        self.nelementals = nelementals
+        self.mxtype = mxtype
+
+    def dump(self):
+        """Gives the header corresponding to this instance as a string."""
+        header = [self.title.ljust(72) + self.key.ljust(8)]
+
+        header.append("%14d%14d%14d%14d" %
+                      (self.total_nlines, self.pointer_nlines,
+                       self.indices_nlines, self.values_nlines))
+        header.append("%14s%14d%14d%14d%14d" %
+                      (self.mxtype.fortran_format.ljust(14), self.nrows,
+                       self.ncols, self.nnon_zeros, 0))
+
+        pffmt = self.pointer_format.fortran_format
+        iffmt = self.indices_format.fortran_format
+        vffmt = self.values_format.fortran_format
+        header.append("%16s%16s%20s" %
+                      (pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
+        return "\n".join(header)
+
+
+def _expect_int(value, msg=None):
+    try:
+        return int(value)
+    except ValueError as e:
+        if msg is None:
+            msg = "Expected an int, got %s"
+        raise ValueError(msg % value) from e
+
+
+def _read_hb_data(content, header):
+    # XXX: look at a way to reduce memory here (big string creation)
+    ptr_string = "".join([content.read(header.pointer_nbytes_full),
+                           content.readline()])
+    ptr = np.fromstring(ptr_string,
+            dtype=int, sep=' ')
+
+    ind_string = "".join([content.read(header.indices_nbytes_full),
+                       content.readline()])
+    ind = np.fromstring(ind_string,
+            dtype=int, sep=' ')
+
+    val_string = "".join([content.read(header.values_nbytes_full),
+                          content.readline()])
+    val = np.fromstring(val_string,
+            dtype=header.values_dtype, sep=' ')
+
+    try:
+        return csc_matrix((val, ind-1, ptr-1),
+                          shape=(header.nrows, header.ncols))
+    except ValueError as e:
+        raise e
+
+
+def _write_data(m, fid, header):
+    m = m.tocsc(copy=False)
+
+    def write_array(f, ar, nlines, fmt):
+        # ar_nlines is the number of full lines, n is the number of items per
+        # line, ffmt the fortran format
+        pyfmt = fmt.python_format
+        pyfmt_full = pyfmt * fmt.repeat
+
+        # for each array to write, we first write the full lines, and special
+        # case for partial line
+        full = ar[:(nlines - 1) * fmt.repeat]
+        for row in full.reshape((nlines-1, fmt.repeat)):
+            f.write(pyfmt_full % tuple(row) + "\n")
+        nremain = ar.size - full.size
+        if nremain > 0:
+            f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
+
+    fid.write(header.dump())
+    fid.write("\n")
+    # +1 is for Fortran one-based indexing
+    write_array(fid, m.indptr+1, header.pointer_nlines,
+                header.pointer_format)
+    write_array(fid, m.indices+1, header.indices_nlines,
+                header.indices_format)
+    write_array(fid, m.data, header.values_nlines,
+                header.values_format)
+
+
+class HBMatrixType:
+    """Class to hold the matrix type."""
+    # q2f* translates qualified names to Fortran character
+    _q2f_type = {
+        "real": "R",
+        "complex": "C",
+        "pattern": "P",
+        "integer": "I",
+    }
+    _q2f_structure = {
+            "symmetric": "S",
+            "unsymmetric": "U",
+            "hermitian": "H",
+            "skewsymmetric": "Z",
+            "rectangular": "R"
+    }
+    _q2f_storage = {
+        "assembled": "A",
+        "elemental": "E",
+    }
+
+    _f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
+    _f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
+    _f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
+
+    @classmethod
+    def from_fortran(cls, fmt):
+        if not len(fmt) == 3:
+            raise ValueError("Fortran format for matrix type should be 3 "
+                             "characters long")
+        try:
+            value_type = cls._f2q_type[fmt[0]]
+            structure = cls._f2q_structure[fmt[1]]
+            storage = cls._f2q_storage[fmt[2]]
+            return cls(value_type, structure, storage)
+        except KeyError as e:
+            raise ValueError("Unrecognized format %s" % fmt) from e
+
+    def __init__(self, value_type, structure, storage="assembled"):
+        self.value_type = value_type
+        self.structure = structure
+        self.storage = storage
+
+        if value_type not in self._q2f_type:
+            raise ValueError("Unrecognized type %s" % value_type)
+        if structure not in self._q2f_structure:
+            raise ValueError("Unrecognized structure %s" % structure)
+        if storage not in self._q2f_storage:
+            raise ValueError("Unrecognized storage %s" % storage)
+
+    @property
+    def fortran_format(self):
+        return self._q2f_type[self.value_type] + \
+               self._q2f_structure[self.structure] + \
+               self._q2f_storage[self.storage]
+
+    def __repr__(self):
+        return "HBMatrixType(%s, %s, %s)" % \
+               (self.value_type, self.structure, self.storage)
+
+
+class HBFile:
+    def __init__(self, file, hb_info=None):
+        """Create a HBFile instance.
+
+        Parameters
+        ----------
+        file : file-object
+            StringIO work as well
+        hb_info : HBInfo, optional
+            Should be given as an argument for writing, in which case the file
+            should be writable.
+        """
+        self._fid = file
+        if hb_info is None:
+            self._hb_info = HBInfo.from_file(file)
+        else:
+            #raise OSError("file %s is not writable, and hb_info "
+            #              "was given." % file)
+            self._hb_info = hb_info
+
+    @property
+    def title(self):
+        return self._hb_info.title
+
+    @property
+    def key(self):
+        return self._hb_info.key
+
+    @property
+    def type(self):
+        return self._hb_info.mxtype.value_type
+
+    @property
+    def structure(self):
+        return self._hb_info.mxtype.structure
+
+    @property
+    def storage(self):
+        return self._hb_info.mxtype.storage
+
+    def read_matrix(self):
+        return _read_hb_data(self._fid, self._hb_info)
+
+    def write_matrix(self, m):
+        return _write_data(m, self._fid, self._hb_info)
+
+
+def hb_read(path_or_open_file):
+    """Read HB-format file.
+
+    Parameters
+    ----------
+    path_or_open_file : path-like or file-like
+        If a file-like object, it is used as-is. Otherwise, it is opened
+        before reading.
+
+    Returns
+    -------
+    data : scipy.sparse.csc_matrix instance
+        The data read from the HB file as a sparse matrix.
+
+    Notes
+    -----
+    At the moment not the full Harwell-Boeing format is supported. Supported
+    features are:
+
+        - assembled, non-symmetric, real matrices
+        - integer for pointer/indices
+        - exponential format for float values, and int format
+
+    Examples
+    --------
+    We can read and write a harwell-boeing format file:
+
+    >>> from scipy.io import hb_read, hb_write
+    >>> from scipy.sparse import csr_matrix, eye
+    >>> data = csr_matrix(eye(3))  # create a sparse matrix
+    >>> hb_write("data.hb", data)  # write a hb file
+    >>> print(hb_read("data.hb"))  # read a hb file
+      (0, 0)	1.0
+      (1, 1)	1.0
+      (2, 2)	1.0
+
+    """
+    def _get_matrix(fid):
+        hb = HBFile(fid)
+        return hb.read_matrix()
+
+    if hasattr(path_or_open_file, 'read'):
+        return _get_matrix(path_or_open_file)
+    else:
+        with open(path_or_open_file) as f:
+            return _get_matrix(f)
+
+
+def hb_write(path_or_open_file, m, hb_info=None):
+    """Write HB-format file.
+
+    Parameters
+    ----------
+    path_or_open_file : path-like or file-like
+        If a file-like object, it is used as-is. Otherwise, it is opened
+        before writing.
+    m : sparse-matrix
+        the sparse matrix to write
+    hb_info : HBInfo
+        contains the meta-data for write
+
+    Returns
+    -------
+    None
+
+    Notes
+    -----
+    At the moment not the full Harwell-Boeing format is supported. Supported
+    features are:
+
+        - assembled, non-symmetric, real matrices
+        - integer for pointer/indices
+        - exponential format for float values, and int format
+
+    Examples
+    --------
+    We can read and write a harwell-boeing format file:
+
+    >>> from scipy.io import hb_read, hb_write
+    >>> from scipy.sparse import csr_matrix, eye
+    >>> data = csr_matrix(eye(3))  # create a sparse matrix
+    >>> hb_write("data.hb", data)  # write a hb file
+    >>> print(hb_read("data.hb"))  # read a hb file
+      (0, 0)	1.0
+      (1, 1)	1.0
+      (2, 2)	1.0
+
+    """
+    m = m.tocsc(copy=False)
+
+    if hb_info is None:
+        hb_info = HBInfo.from_data(m)
+
+    def _set_matrix(fid):
+        hb = HBFile(fid, hb_info)
+        return hb.write_matrix(m)
+
+    if hasattr(path_or_open_file, 'write'):
+        return _set_matrix(path_or_open_file)
+    else:
+        with open(path_or_open_file, 'w') as f:
+            return _set_matrix(f)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_fortran_format.py b/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_fortran_format.py
new file mode 100644
index 00000000..53384ca0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_fortran_format.py
@@ -0,0 +1,74 @@
+import numpy as np
+
+from numpy.testing import assert_equal
+from pytest import raises as assert_raises
+
+from scipy.io._harwell_boeing import (
+        FortranFormatParser, IntFormat, ExpFormat, BadFortranFormat)
+
+
+class TestFortranFormatParser:
+    def setup_method(self):
+        self.parser = FortranFormatParser()
+
+    def _test_equal(self, format, ref):
+        ret = self.parser.parse(format)
+        assert_equal(ret.__dict__, ref.__dict__)
+
+    def test_simple_int(self):
+        self._test_equal("(I4)", IntFormat(4))
+
+    def test_simple_repeated_int(self):
+        self._test_equal("(3I4)", IntFormat(4, repeat=3))
+
+    def test_simple_exp(self):
+        self._test_equal("(E4.3)", ExpFormat(4, 3))
+
+    def test_exp_exp(self):
+        self._test_equal("(E8.3E3)", ExpFormat(8, 3, 3))
+
+    def test_repeat_exp(self):
+        self._test_equal("(2E4.3)", ExpFormat(4, 3, repeat=2))
+
+    def test_repeat_exp_exp(self):
+        self._test_equal("(2E8.3E3)", ExpFormat(8, 3, 3, repeat=2))
+
+    def test_wrong_formats(self):
+        def _test_invalid(bad_format):
+            assert_raises(BadFortranFormat, lambda: self.parser.parse(bad_format))
+        _test_invalid("I4")
+        _test_invalid("(E4)")
+        _test_invalid("(E4.)")
+        _test_invalid("(E4.E3)")
+
+
+class TestIntFormat:
+    def test_to_fortran(self):
+        f = [IntFormat(10), IntFormat(12, 10), IntFormat(12, 10, 3)]
+        res = ["(I10)", "(I12.10)", "(3I12.10)"]
+
+        for i, j in zip(f, res):
+            assert_equal(i.fortran_format, j)
+
+    def test_from_number(self):
+        f = [10, -12, 123456789]
+        r_f = [IntFormat(3, repeat=26), IntFormat(4, repeat=20),
+               IntFormat(10, repeat=8)]
+        for i, j in zip(f, r_f):
+            assert_equal(IntFormat.from_number(i).__dict__, j.__dict__)
+
+
+class TestExpFormat:
+    def test_to_fortran(self):
+        f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3),
+             ExpFormat(10, 5, repeat=3)]
+        res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"]
+
+        for i, j in zip(f, res):
+            assert_equal(i.fortran_format, j)
+
+    def test_from_number(self):
+        f = np.array([1.0, -1.2])
+        r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)]
+        for i, j in zip(f, r_f):
+            assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_hb.py b/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_hb.py
new file mode 100644
index 00000000..a4cf8823
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/_harwell_boeing/tests/test_hb.py
@@ -0,0 +1,65 @@
+from io import StringIO
+import tempfile
+
+import numpy as np
+
+from numpy.testing import assert_equal, \
+    assert_array_almost_equal_nulp
+
+from scipy.sparse import coo_matrix, csc_matrix, rand
+
+from scipy.io import hb_read, hb_write
+
+
+SIMPLE = """\
+No Title                                                                |No Key
+             9             4             1             4
+RUA                      100           100            10             0
+(26I3)          (26I3)          (3E23.15)
+1  2  2  2  2  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
+3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3  3
+3  3  3  3  3  3  3  4  4  4  6  6  6  6  6  6  6  6  6  6  6  8  9  9  9  9
+9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9  9 11
+37 71 89 18 30 45 70 19 25 52
+2.971243799687726e-01  3.662366682877375e-01  4.786962174699534e-01
+6.490068647991184e-01  6.617490424831662e-02  8.870370343191623e-01
+4.196478590163001e-01  5.649603072111251e-01  9.934423887087086e-01
+6.912334991524289e-01
+"""
+
+SIMPLE_MATRIX = coo_matrix(
+    ((0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
+      0.0661749042483, 0.887037034319, 0.419647859016,
+      0.564960307211, 0.993442388709, 0.691233499152,),
+     (np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
+                [0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
+
+
+def assert_csc_almost_equal(r, l):
+    r = csc_matrix(r)
+    l = csc_matrix(l)
+    assert_equal(r.indptr, l.indptr)
+    assert_equal(r.indices, l.indices)
+    assert_array_almost_equal_nulp(r.data, l.data, 10000)
+
+
+class TestHBReader:
+    def test_simple(self):
+        m = hb_read(StringIO(SIMPLE))
+        assert_csc_almost_equal(m, SIMPLE_MATRIX)
+
+
+class TestHBReadWrite:
+
+    def check_save_load(self, value):
+        with tempfile.NamedTemporaryFile(mode='w+t') as file:
+            hb_write(file, value)
+            file.file.seek(0)
+            value_loaded = hb_read(file)
+        assert_csc_almost_equal(value, value_loaded)
+
+    def test_simple(self):
+        random_matrix = rand(10, 100, 0.1)
+        for matrix_format in ('coo', 'csc', 'csr', 'bsr', 'dia', 'dok', 'lil'):
+            matrix = random_matrix.asformat(matrix_format, copy=False)
+            self.check_save_load(matrix)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_idl.py b/__packaged__/coreml/.python_dependencies/scipy/io/_idl.py
new file mode 100644
index 00000000..529a772e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/_idl.py
@@ -0,0 +1,914 @@
+# IDLSave - a python module to read IDL 'save' files
+# Copyright (c) 2010 Thomas P. Robitaille
+
+# Many thanks to Craig Markwardt for publishing the Unofficial Format
+# Specification for IDL .sav files, without which this Python module would not
+# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).
+
+# This code was developed by with permission from ITT Visual Information
+# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,
+# Inc. for their Interactive Data Language software.
+
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+
+__all__ = ['readsav']
+
+import struct
+import numpy as np
+import tempfile
+import zlib
+import warnings
+
+# Define the different data types that can be found in an IDL save file
+DTYPE_DICT = {1: '>u1',
+              2: '>i2',
+              3: '>i4',
+              4: '>f4',
+              5: '>f8',
+              6: '>c8',
+              7: '|O',
+              8: '|O',
+              9: '>c16',
+              10: '|O',
+              11: '|O',
+              12: '>u2',
+              13: '>u4',
+              14: '>i8',
+              15: '>u8'}
+
+# Define the different record types that can be found in an IDL save file
+RECTYPE_DICT = {0: "START_MARKER",
+                1: "COMMON_VARIABLE",
+                2: "VARIABLE",
+                3: "SYSTEM_VARIABLE",
+                6: "END_MARKER",
+                10: "TIMESTAMP",
+                12: "COMPILED",
+                13: "IDENTIFICATION",
+                14: "VERSION",
+                15: "HEAP_HEADER",
+                16: "HEAP_DATA",
+                17: "PROMOTE64",
+                19: "NOTICE",
+                20: "DESCRIPTION"}
+
+# Define a dictionary to contain structure definitions
+STRUCT_DICT = {}
+
+
+def _align_32(f):
+    '''Align to the next 32-bit position in a file'''
+
+    pos = f.tell()
+    if pos % 4 != 0:
+        f.seek(pos + 4 - pos % 4)
+    return
+
+
+def _skip_bytes(f, n):
+    '''Skip `n` bytes'''
+    f.read(n)
+    return
+
+
+def _read_bytes(f, n):
+    '''Read the next `n` bytes'''
+    return f.read(n)
+
+
+def _read_byte(f):
+    '''Read a single byte'''
+    return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])
+
+
+def _read_long(f):
+    '''Read a signed 32-bit integer'''
+    return np.int32(struct.unpack('>l', f.read(4))[0])
+
+
+def _read_int16(f):
+    '''Read a signed 16-bit integer'''
+    return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
+
+
+def _read_int32(f):
+    '''Read a signed 32-bit integer'''
+    return np.int32(struct.unpack('>i', f.read(4))[0])
+
+
+def _read_int64(f):
+    '''Read a signed 64-bit integer'''
+    return np.int64(struct.unpack('>q', f.read(8))[0])
+
+
+def _read_uint16(f):
+    '''Read an unsigned 16-bit integer'''
+    return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])
+
+
+def _read_uint32(f):
+    '''Read an unsigned 32-bit integer'''
+    return np.uint32(struct.unpack('>I', f.read(4))[0])
+
+
+def _read_uint64(f):
+    '''Read an unsigned 64-bit integer'''
+    return np.uint64(struct.unpack('>Q', f.read(8))[0])
+
+
+def _read_float32(f):
+    '''Read a 32-bit float'''
+    return np.float32(struct.unpack('>f', f.read(4))[0])
+
+
+def _read_float64(f):
+    '''Read a 64-bit float'''
+    return np.float64(struct.unpack('>d', f.read(8))[0])
+
+
+class Pointer:
+    '''Class used to define pointers'''
+
+    def __init__(self, index):
+        self.index = index
+        return
+
+
+class ObjectPointer(Pointer):
+    '''Class used to define object pointers'''
+    pass
+
+
+def _read_string(f):
+    '''Read a string'''
+    length = _read_long(f)
+    if length > 0:
+        chars = _read_bytes(f, length).decode('latin1')
+        _align_32(f)
+    else:
+        chars = ''
+    return chars
+
+
+def _read_string_data(f):
+    '''Read a data string (length is specified twice)'''
+    length = _read_long(f)
+    if length > 0:
+        length = _read_long(f)
+        string_data = _read_bytes(f, length)
+        _align_32(f)
+    else:
+        string_data = ''
+    return string_data
+
+
+def _read_data(f, dtype):
+    '''Read a variable with a specified data type'''
+    if dtype == 1:
+        if _read_int32(f) != 1:
+            raise Exception("Error occurred while reading byte variable")
+        return _read_byte(f)
+    elif dtype == 2:
+        return _read_int16(f)
+    elif dtype == 3:
+        return _read_int32(f)
+    elif dtype == 4:
+        return _read_float32(f)
+    elif dtype == 5:
+        return _read_float64(f)
+    elif dtype == 6:
+        real = _read_float32(f)
+        imag = _read_float32(f)
+        return np.complex64(real + imag * 1j)
+    elif dtype == 7:
+        return _read_string_data(f)
+    elif dtype == 8:
+        raise Exception("Should not be here - please report this")
+    elif dtype == 9:
+        real = _read_float64(f)
+        imag = _read_float64(f)
+        return np.complex128(real + imag * 1j)
+    elif dtype == 10:
+        return Pointer(_read_int32(f))
+    elif dtype == 11:
+        return ObjectPointer(_read_int32(f))
+    elif dtype == 12:
+        return _read_uint16(f)
+    elif dtype == 13:
+        return _read_uint32(f)
+    elif dtype == 14:
+        return _read_int64(f)
+    elif dtype == 15:
+        return _read_uint64(f)
+    else:
+        raise Exception("Unknown IDL type: %i - please report this" % dtype)
+
+
+def _read_structure(f, array_desc, struct_desc):
+    '''
+    Read a structure, with the array and structure descriptors given as
+    `array_desc` and `structure_desc` respectively.
+    '''
+
+    nrows = array_desc['nelements']
+    columns = struct_desc['tagtable']
+
+    dtype = []
+    for col in columns:
+        if col['structure'] or col['array']:
+            dtype.append(((col['name'].lower(), col['name']), np.object_))
+        else:
+            if col['typecode'] in DTYPE_DICT:
+                dtype.append(((col['name'].lower(), col['name']),
+                                    DTYPE_DICT[col['typecode']]))
+            else:
+                raise Exception("Variable type %i not implemented" %
+                                                            col['typecode'])
+
+    structure = np.recarray((nrows, ), dtype=dtype)
+
+    for i in range(nrows):
+        for col in columns:
+            dtype = col['typecode']
+            if col['structure']:
+                structure[col['name']][i] = _read_structure(f,
+                                      struct_desc['arrtable'][col['name']],
+                                      struct_desc['structtable'][col['name']])
+            elif col['array']:
+                structure[col['name']][i] = _read_array(f, dtype,
+                                      struct_desc['arrtable'][col['name']])
+            else:
+                structure[col['name']][i] = _read_data(f, dtype)
+
+    # Reshape structure if needed
+    if array_desc['ndims'] > 1:
+        dims = array_desc['dims'][:int(array_desc['ndims'])]
+        dims.reverse()
+        structure = structure.reshape(dims)
+
+    return structure
+
+
+def _read_array(f, typecode, array_desc):
+    '''
+    Read an array of type `typecode`, with the array descriptor given as
+    `array_desc`.
+    '''
+
+    if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:
+
+        if typecode == 1:
+            nbytes = _read_int32(f)
+            if nbytes != array_desc['nbytes']:
+                warnings.warn("Not able to verify number of bytes from header")
+
+        # Read bytes as numpy array
+        array = np.frombuffer(f.read(array_desc['nbytes']),
+                              dtype=DTYPE_DICT[typecode])
+
+    elif typecode in [2, 12]:
+
+        # These are 2 byte types, need to skip every two as they are not packed
+
+        array = np.frombuffer(f.read(array_desc['nbytes']*2),
+                              dtype=DTYPE_DICT[typecode])[1::2]
+
+    else:
+
+        # Read bytes into list
+        array = []
+        for i in range(array_desc['nelements']):
+            dtype = typecode
+            data = _read_data(f, dtype)
+            array.append(data)
+
+        array = np.array(array, dtype=np.object_)
+
+    # Reshape array if needed
+    if array_desc['ndims'] > 1:
+        dims = array_desc['dims'][:int(array_desc['ndims'])]
+        dims.reverse()
+        array = array.reshape(dims)
+
+    # Go to next alignment position
+    _align_32(f)
+
+    return array
+
+
+def _read_record(f):
+    '''Function to read in a full record'''
+
+    record = {'rectype': _read_long(f)}
+
+    nextrec = _read_uint32(f)
+    nextrec += _read_uint32(f) * 2**32
+
+    _skip_bytes(f, 4)
+
+    if not record['rectype'] in RECTYPE_DICT:
+        raise Exception("Unknown RECTYPE: %i" % record['rectype'])
+
+    record['rectype'] = RECTYPE_DICT[record['rectype']]
+
+    if record['rectype'] in ["VARIABLE", "HEAP_DATA"]:
+
+        if record['rectype'] == "VARIABLE":
+            record['varname'] = _read_string(f)
+        else:
+            record['heap_index'] = _read_long(f)
+            _skip_bytes(f, 4)
+
+        rectypedesc = _read_typedesc(f)
+
+        if rectypedesc['typecode'] == 0:
+
+            if nextrec == f.tell():
+                record['data'] = None  # Indicates NULL value
+            else:
+                raise ValueError("Unexpected type code: 0")
+
+        else:
+
+            varstart = _read_long(f)
+            if varstart != 7:
+                raise Exception("VARSTART is not 7")
+
+            if rectypedesc['structure']:
+                record['data'] = _read_structure(f, rectypedesc['array_desc'],
+                                                    rectypedesc['struct_desc'])
+            elif rectypedesc['array']:
+                record['data'] = _read_array(f, rectypedesc['typecode'],
+                                                rectypedesc['array_desc'])
+            else:
+                dtype = rectypedesc['typecode']
+                record['data'] = _read_data(f, dtype)
+
+    elif record['rectype'] == "TIMESTAMP":
+
+        _skip_bytes(f, 4*256)
+        record['date'] = _read_string(f)
+        record['user'] = _read_string(f)
+        record['host'] = _read_string(f)
+
+    elif record['rectype'] == "VERSION":
+
+        record['format'] = _read_long(f)
+        record['arch'] = _read_string(f)
+        record['os'] = _read_string(f)
+        record['release'] = _read_string(f)
+
+    elif record['rectype'] == "IDENTIFICATON":
+
+        record['author'] = _read_string(f)
+        record['title'] = _read_string(f)
+        record['idcode'] = _read_string(f)
+
+    elif record['rectype'] == "NOTICE":
+
+        record['notice'] = _read_string(f)
+
+    elif record['rectype'] == "DESCRIPTION":
+
+        record['description'] = _read_string_data(f)
+
+    elif record['rectype'] == "HEAP_HEADER":
+
+        record['nvalues'] = _read_long(f)
+        record['indices'] = [_read_long(f) for _ in range(record['nvalues'])]
+
+    elif record['rectype'] == "COMMONBLOCK":
+
+        record['nvars'] = _read_long(f)
+        record['name'] = _read_string(f)
+        record['varnames'] = [_read_string(f) for _ in range(record['nvars'])]
+
+    elif record['rectype'] == "END_MARKER":
+
+        record['end'] = True
+
+    elif record['rectype'] == "UNKNOWN":
+
+        warnings.warn("Skipping UNKNOWN record")
+
+    elif record['rectype'] == "SYSTEM_VARIABLE":
+
+        warnings.warn("Skipping SYSTEM_VARIABLE record")
+
+    else:
+
+        raise Exception("record['rectype']=%s not implemented" %
+                                                            record['rectype'])
+
+    f.seek(nextrec)
+
+    return record
+
+
+def _read_typedesc(f):
+    '''Function to read in a type descriptor'''
+
+    typedesc = {'typecode': _read_long(f), 'varflags': _read_long(f)}
+
+    if typedesc['varflags'] & 2 == 2:
+        raise Exception("System variables not implemented")
+
+    typedesc['array'] = typedesc['varflags'] & 4 == 4
+    typedesc['structure'] = typedesc['varflags'] & 32 == 32
+
+    if typedesc['structure']:
+        typedesc['array_desc'] = _read_arraydesc(f)
+        typedesc['struct_desc'] = _read_structdesc(f)
+    elif typedesc['array']:
+        typedesc['array_desc'] = _read_arraydesc(f)
+
+    return typedesc
+
+
+def _read_arraydesc(f):
+    '''Function to read in an array descriptor'''
+
+    arraydesc = {'arrstart': _read_long(f)}
+
+    if arraydesc['arrstart'] == 8:
+
+        _skip_bytes(f, 4)
+
+        arraydesc['nbytes'] = _read_long(f)
+        arraydesc['nelements'] = _read_long(f)
+        arraydesc['ndims'] = _read_long(f)
+
+        _skip_bytes(f, 8)
+
+        arraydesc['nmax'] = _read_long(f)
+
+        arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
+
+    elif arraydesc['arrstart'] == 18:
+
+        warnings.warn("Using experimental 64-bit array read")
+
+        _skip_bytes(f, 8)
+
+        arraydesc['nbytes'] = _read_uint64(f)
+        arraydesc['nelements'] = _read_uint64(f)
+        arraydesc['ndims'] = _read_long(f)
+
+        _skip_bytes(f, 8)
+
+        arraydesc['nmax'] = 8
+
+        arraydesc['dims'] = []
+        for d in range(arraydesc['nmax']):
+            v = _read_long(f)
+            if v != 0:
+                raise Exception("Expected a zero in ARRAY_DESC")
+            arraydesc['dims'].append(_read_long(f))
+
+    else:
+
+        raise Exception("Unknown ARRSTART: %i" % arraydesc['arrstart'])
+
+    return arraydesc
+
+
+def _read_structdesc(f):
+    '''Function to read in a structure descriptor'''
+
+    structdesc = {}
+
+    structstart = _read_long(f)
+    if structstart != 9:
+        raise Exception("STRUCTSTART should be 9")
+
+    structdesc['name'] = _read_string(f)
+    predef = _read_long(f)
+    structdesc['ntags'] = _read_long(f)
+    structdesc['nbytes'] = _read_long(f)
+
+    structdesc['predef'] = predef & 1
+    structdesc['inherits'] = predef & 2
+    structdesc['is_super'] = predef & 4
+
+    if not structdesc['predef']:
+
+        structdesc['tagtable'] = [_read_tagdesc(f)
+                                  for _ in range(structdesc['ntags'])]
+
+        for tag in structdesc['tagtable']:
+            tag['name'] = _read_string(f)
+
+        structdesc['arrtable'] = {tag['name']: _read_arraydesc(f)
+                                  for tag in structdesc['tagtable']
+                                  if tag['array']}
+
+        structdesc['structtable'] = {tag['name']: _read_structdesc(f)
+                                     for tag in structdesc['tagtable']
+                                     if tag['structure']}
+
+        if structdesc['inherits'] or structdesc['is_super']:
+            structdesc['classname'] = _read_string(f)
+            structdesc['nsupclasses'] = _read_long(f)
+            structdesc['supclassnames'] = [
+                _read_string(f) for _ in range(structdesc['nsupclasses'])]
+            structdesc['supclasstable'] = [
+                _read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
+
+        STRUCT_DICT[structdesc['name']] = structdesc
+
+    else:
+
+        if not structdesc['name'] in STRUCT_DICT:
+            raise Exception("PREDEF=1 but can't find definition")
+
+        structdesc = STRUCT_DICT[structdesc['name']]
+
+    return structdesc
+
+
+def _read_tagdesc(f):
+    '''Function to read in a tag descriptor'''
+
+    tagdesc = {'offset': _read_long(f)}
+
+    if tagdesc['offset'] == -1:
+        tagdesc['offset'] = _read_uint64(f)
+
+    tagdesc['typecode'] = _read_long(f)
+    tagflags = _read_long(f)
+
+    tagdesc['array'] = tagflags & 4 == 4
+    tagdesc['structure'] = tagflags & 32 == 32
+    tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT
+    # Assume '10'x is scalar
+
+    return tagdesc
+
+
+def _replace_heap(variable, heap):
+
+    if isinstance(variable, Pointer):
+
+        while isinstance(variable, Pointer):
+
+            if variable.index == 0:
+                variable = None
+            else:
+                if variable.index in heap:
+                    variable = heap[variable.index]
+                else:
+                    warnings.warn("Variable referenced by pointer not found "
+                                  "in heap: variable will be set to None")
+                    variable = None
+
+        replace, new = _replace_heap(variable, heap)
+
+        if replace:
+            variable = new
+
+        return True, variable
+
+    elif isinstance(variable, np.core.records.recarray):
+
+        # Loop over records
+        for ir, record in enumerate(variable):
+
+            replace, new = _replace_heap(record, heap)
+
+            if replace:
+                variable[ir] = new
+
+        return False, variable
+
+    elif isinstance(variable, np.core.records.record):
+
+        # Loop over values
+        for iv, value in enumerate(variable):
+
+            replace, new = _replace_heap(value, heap)
+
+            if replace:
+                variable[iv] = new
+
+        return False, variable
+
+    elif isinstance(variable, np.ndarray):
+
+        # Loop over values if type is np.object_
+        if variable.dtype.type is np.object_:
+
+            for iv in range(variable.size):
+
+                replace, new = _replace_heap(variable.item(iv), heap)
+
+                if replace:
+                    variable.itemset(iv, new)
+
+        return False, variable
+
+    else:
+
+        return False, variable
+
+
+class AttrDict(dict):
+    '''
+    A case-insensitive dictionary with access via item, attribute, and call
+    notations:
+
+        >>> d = AttrDict()
+        >>> d['Variable'] = 123
+        >>> d['Variable']
+        123
+        >>> d.Variable
+        123
+        >>> d.variable
+        123
+        >>> d('VARIABLE')
+        123
+        >>> d['missing']
+        Traceback (most recent error last):
+        ...
+        KeyError: 'missing'
+        >>> d.missing
+        Traceback (most recent error last):
+        ...
+        AttributeError: 'AttrDict' object has no attribute 'missing'
+    '''
+
+    def __init__(self, init={}):
+        dict.__init__(self, init)
+
+    def __getitem__(self, name):
+        return super().__getitem__(name.lower())
+
+    def __setitem__(self, key, value):
+        return super().__setitem__(key.lower(), value)
+
+    def __getattr__(self, name):
+        try:
+            return self.__getitem__(name)
+        except KeyError:
+            raise AttributeError(
+                f"'{type(self)}' object has no attribute '{name}'") from None
+
+    __setattr__ = __setitem__
+    __call__ = __getitem__
+
+
+def readsav(file_name, idict=None, python_dict=False,
+            uncompressed_file_name=None, verbose=False):
+    """
+    Read an IDL .sav file.
+
+    Parameters
+    ----------
+    file_name : str
+        Name of the IDL save file.
+    idict : dict, optional
+        Dictionary in which to insert .sav file variables.
+    python_dict : bool, optional
+        By default, the object return is not a Python dictionary, but a
+        case-insensitive dictionary with item, attribute, and call access
+        to variables. To get a standard Python dictionary, set this option
+        to True.
+    uncompressed_file_name : str, optional
+        This option only has an effect for .sav files written with the
+        /compress option. If a file name is specified, compressed .sav
+        files are uncompressed to this file. Otherwise, readsav will use
+        the `tempfile` module to determine a temporary filename
+        automatically, and will remove the temporary file upon successfully
+        reading it in.
+    verbose : bool, optional
+        Whether to print out information about the save file, including
+        the records read, and available variables.
+
+    Returns
+    -------
+    idl_dict : AttrDict or dict
+        If `python_dict` is set to False (default), this function returns a
+        case-insensitive dictionary with item, attribute, and call access
+        to variables. If `python_dict` is set to True, this function
+        returns a Python dictionary with all variable names in lowercase.
+        If `idict` was specified, then variables are written to the
+        dictionary specified, and the updated dictionary is returned.
+
+    Examples
+    --------
+    >>> from os.path import dirname, join as pjoin
+    >>> import scipy.io as sio
+    >>> from scipy.io import readsav
+
+    Get the filename for an example .sav file from the tests/data directory.
+
+    >>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
+    >>> sav_fname = pjoin(data_dir, 'array_float32_1d.sav')
+
+    Load the .sav file contents.
+
+    >>> sav_data = readsav(sav_fname)
+
+    Get keys of the .sav file contents.
+
+    >>> print(sav_data.keys())
+    dict_keys(['array1d'])
+
+    Access a content with a key.
+
+    >>> print(sav_data['array1d'])
+    [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+     0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+     0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+     0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+     0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
+     0. 0. 0.]
+
+    """
+
+    # Initialize record and variable holders
+    records = []
+    if python_dict or idict:
+        variables = {}
+    else:
+        variables = AttrDict()
+
+    # Open the IDL file
+    f = open(file_name, 'rb')
+
+    # Read the signature, which should be 'SR'
+    signature = _read_bytes(f, 2)
+    if signature != b'SR':
+        raise Exception("Invalid SIGNATURE: %s" % signature)
+
+    # Next, the record format, which is '\x00\x04' for normal .sav
+    # files, and '\x00\x06' for compressed .sav files.
+    recfmt = _read_bytes(f, 2)
+
+    if recfmt == b'\x00\x04':
+        pass
+
+    elif recfmt == b'\x00\x06':
+
+        if verbose:
+            print("IDL Save file is compressed")
+
+        if uncompressed_file_name:
+            fout = open(uncompressed_file_name, 'w+b')
+        else:
+            fout = tempfile.NamedTemporaryFile(suffix='.sav')
+
+        if verbose:
+            print(" -> expanding to %s" % fout.name)
+
+        # Write header
+        fout.write(b'SR\x00\x04')
+
+        # Cycle through records
+        while True:
+
+            # Read record type
+            rectype = _read_long(f)
+            fout.write(struct.pack('>l', int(rectype)))
+
+            # Read position of next record and return as int
+            nextrec = _read_uint32(f)
+            nextrec += _read_uint32(f) * 2**32
+
+            # Read the unknown 4 bytes
+            unknown = f.read(4)
+
+            # Check if the end of the file has been reached
+            if RECTYPE_DICT[rectype] == 'END_MARKER':
+                modval = np.int64(2**32)
+                fout.write(struct.pack('>I', int(nextrec) % modval))
+                fout.write(struct.pack('>I', int((nextrec - (nextrec % modval)) / modval)))
+                fout.write(unknown)
+                break
+
+            # Find current position
+            pos = f.tell()
+
+            # Decompress record
+            rec_string = zlib.decompress(f.read(nextrec-pos))
+
+            # Find new position of next record
+            nextrec = fout.tell() + len(rec_string) + 12
+
+            # Write out record
+            fout.write(struct.pack('>I', int(nextrec % 2**32)))
+            fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))
+            fout.write(unknown)
+            fout.write(rec_string)
+
+        # Close the original compressed file
+        f.close()
+
+        # Set f to be the decompressed file, and skip the first four bytes
+        f = fout
+        f.seek(4)
+
+    else:
+        raise Exception("Invalid RECFMT: %s" % recfmt)
+
+    # Loop through records, and add them to the list
+    while True:
+        r = _read_record(f)
+        records.append(r)
+        if 'end' in r:
+            if r['end']:
+                break
+
+    # Close the file
+    f.close()
+
+    # Find heap data variables
+    heap = {}
+    for r in records:
+        if r['rectype'] == "HEAP_DATA":
+            heap[r['heap_index']] = r['data']
+
+    # Find all variables
+    for r in records:
+        if r['rectype'] == "VARIABLE":
+            replace, new = _replace_heap(r['data'], heap)
+            if replace:
+                r['data'] = new
+            variables[r['varname'].lower()] = r['data']
+
+    if verbose:
+
+        # Print out timestamp info about the file
+        for record in records:
+            if record['rectype'] == "TIMESTAMP":
+                print("-"*50)
+                print("Date: %s" % record['date'])
+                print("User: %s" % record['user'])
+                print("Host: %s" % record['host'])
+                break
+
+        # Print out version info about the file
+        for record in records:
+            if record['rectype'] == "VERSION":
+                print("-"*50)
+                print("Format: %s" % record['format'])
+                print("Architecture: %s" % record['arch'])
+                print("Operating System: %s" % record['os'])
+                print("IDL Version: %s" % record['release'])
+                break
+
+        # Print out identification info about the file
+        for record in records:
+            if record['rectype'] == "IDENTIFICATON":
+                print("-"*50)
+                print("Author: %s" % record['author'])
+                print("Title: %s" % record['title'])
+                print("ID Code: %s" % record['idcode'])
+                break
+
+        # Print out descriptions saved with the file
+        for record in records:
+            if record['rectype'] == "DESCRIPTION":
+                print("-"*50)
+                print("Description: %s" % record['description'])
+                break
+
+        print("-"*50)
+        print("Successfully read %i records of which:" %
+                                            (len(records)))
+
+        # Create convenience list of record types
+        rectypes = [r['rectype'] for r in records]
+
+        for rt in set(rectypes):
+            if rt != 'END_MARKER':
+                print(" - %i are of type %s" % (rectypes.count(rt), rt))
+        print("-"*50)
+
+        if 'VARIABLE' in rectypes:
+            print("Available variables:")
+            for var in variables:
+                print(" - %s [%s]" % (var, type(variables[var])))
+            print("-"*50)
+
+    if idict:
+        for var in variables:
+            idict[var] = variables[var]
+        return idict
+    else:
+        return variables
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_mmio.py b/__packaged__/coreml/.python_dependencies/scipy/io/_mmio.py
new file mode 100644
index 00000000..75d544a1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/_mmio.py
@@ -0,0 +1,996 @@
+"""
+  Matrix Market I/O in Python.
+  See http://math.nist.gov/MatrixMarket/formats.html
+  for information about the Matrix Market format.
+"""
+#
+# Author: Pearu Peterson 
+# Created: October, 2004
+#
+# References:
+#  http://math.nist.gov/MatrixMarket/
+#
+import os
+
+import numpy as np
+from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate,
+                   ones, can_cast)
+
+from scipy.sparse import coo_matrix, isspmatrix
+
+__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile']
+
+
+# -----------------------------------------------------------------------------
+def asstr(s):
+    if isinstance(s, bytes):
+        return s.decode('latin1')
+    return str(s)
+
+
+def mminfo(source):
+    """
+    Return size and storage parameters from Matrix Market file-like 'source'.
+
+    Parameters
+    ----------
+    source : str or file-like
+        Matrix Market filename (extension .mtx) or open file-like object
+
+    Returns
+    -------
+    rows : int
+        Number of matrix rows.
+    cols : int
+        Number of matrix columns.
+    entries : int
+        Number of non-zero entries of a sparse matrix
+        or rows*cols for a dense matrix.
+    format : str
+        Either 'coordinate' or 'array'.
+    field : str
+        Either 'real', 'complex', 'pattern', or 'integer'.
+    symmetry : str
+        Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+
+    Examples
+    --------
+    >>> from io import StringIO
+    >>> from scipy.io import mminfo
+
+    >>> text = '''%%MatrixMarket matrix coordinate real general
+    ...  5 5 7
+    ...  2 3 1.0
+    ...  3 4 2.0
+    ...  3 5 3.0
+    ...  4 1 4.0
+    ...  4 2 5.0
+    ...  4 3 6.0
+    ...  4 4 7.0
+    ... '''
+
+
+    ``mminfo(source)`` returns the number of rows, number of columns,
+    format, field type and symmetry attribute of the source file.
+
+    >>> mminfo(StringIO(text))
+    (5, 5, 7, 'coordinate', 'real', 'general')
+    """
+    return MMFile.info(source)
+
+# -----------------------------------------------------------------------------
+
+
+def mmread(source):
+    """
+    Reads the contents of a Matrix Market file-like 'source' into a matrix.
+
+    Parameters
+    ----------
+    source : str or file-like
+        Matrix Market filename (extensions .mtx, .mtz.gz)
+        or open file-like object.
+
+    Returns
+    -------
+    a : ndarray or coo_matrix
+        Dense or sparse matrix depending on the matrix format in the
+        Matrix Market file.
+
+    Examples
+    --------
+    >>> from io import StringIO
+    >>> from scipy.io import mmread
+
+    >>> text = '''%%MatrixMarket matrix coordinate real general
+    ...  5 5 7
+    ...  2 3 1.0
+    ...  3 4 2.0
+    ...  3 5 3.0
+    ...  4 1 4.0
+    ...  4 2 5.0
+    ...  4 3 6.0
+    ...  4 4 7.0
+    ... '''
+
+    ``mmread(source)`` returns the data as sparse matrix in COO format.
+
+    >>> m = mmread(StringIO(text))
+    >>> m
+    <5x5 sparse matrix of type ''
+    with 7 stored elements in COOrdinate format>
+    >>> m.A
+    array([[0., 0., 0., 0., 0.],
+           [0., 0., 1., 0., 0.],
+           [0., 0., 0., 2., 3.],
+           [4., 5., 6., 7., 0.],
+           [0., 0., 0., 0., 0.]])
+    """
+    return MMFile().read(source)
+
+# -----------------------------------------------------------------------------
+
+
+def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None):
+    r"""
+    Writes the sparse or dense array `a` to Matrix Market file-like `target`.
+
+    Parameters
+    ----------
+    target : str or file-like
+        Matrix Market filename (extension .mtx) or open file-like object.
+    a : array like
+        Sparse or dense 2-D array.
+    comment : str, optional
+        Comments to be prepended to the Matrix Market file.
+    field : None or str, optional
+        Either 'real', 'complex', 'pattern', or 'integer'.
+    precision : None or int, optional
+        Number of digits to display for real or complex values.
+    symmetry : None or str, optional
+        Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+        If symmetry is None the symmetry type of 'a' is determined by its
+        values.
+
+    Returns
+    -------
+    None
+
+    Examples
+    --------
+    >>> from io import BytesIO
+    >>> import numpy as np
+    >>> from scipy.sparse import coo_matrix
+    >>> from scipy.io import mmwrite
+
+    Write a small NumPy array to a matrix market file.  The file will be
+    written in the ``'array'`` format.
+
+    >>> a = np.array([[1.0, 0, 0, 0], [0, 2.5, 0, 6.25]])
+    >>> target = BytesIO()
+    >>> mmwrite(target, a)
+    >>> print(target.getvalue().decode('latin1'))
+    %%MatrixMarket matrix array real general
+    %
+    2 4
+    1.0000000000000000e+00
+    0.0000000000000000e+00
+    0.0000000000000000e+00
+    2.5000000000000000e+00
+    0.0000000000000000e+00
+    0.0000000000000000e+00
+    0.0000000000000000e+00
+    6.2500000000000000e+00
+
+    Add a comment to the output file, and set the precision to 3.
+
+    >>> target = BytesIO()
+    >>> mmwrite(target, a, comment='\n Some test data.\n', precision=3)
+    >>> print(target.getvalue().decode('latin1'))
+    %%MatrixMarket matrix array real general
+    %
+    % Some test data.
+    %
+    2 4
+    1.000e+00
+    0.000e+00
+    0.000e+00
+    2.500e+00
+    0.000e+00
+    0.000e+00
+    0.000e+00
+    6.250e+00
+
+    Convert to a sparse matrix before calling ``mmwrite``.  This will
+    result in the output format being ``'coordinate'`` rather than
+    ``'array'``.
+
+    >>> target = BytesIO()
+    >>> mmwrite(target, coo_matrix(a), precision=3)
+    >>> print(target.getvalue().decode('latin1'))
+    %%MatrixMarket matrix coordinate real general
+    %
+    2 4 3
+    1 1 1.00e+00
+    2 2 2.50e+00
+    2 4 6.25e+00
+
+    Write a complex Hermitian array to a matrix market file.  Note that
+    only six values are actually written to the file; the other values
+    are implied by the symmetry.
+
+    >>> z = np.array([[3, 1+2j, 4-3j], [1-2j, 1, -5j], [4+3j, 5j, 2.5]])
+    >>> z
+    array([[ 3. +0.j,  1. +2.j,  4. -3.j],
+           [ 1. -2.j,  1. +0.j, -0. -5.j],
+           [ 4. +3.j,  0. +5.j,  2.5+0.j]])
+
+    >>> target = BytesIO()
+    >>> mmwrite(target, z, precision=2)
+    >>> print(target.getvalue().decode('latin1'))
+    %%MatrixMarket matrix array complex hermitian
+    %
+    3 3
+    3.00e+00 0.00e+00
+    1.00e+00 -2.00e+00
+    4.00e+00 3.00e+00
+    1.00e+00 0.00e+00
+    0.00e+00 5.00e+00
+    2.50e+00 0.00e+00
+
+    """
+    MMFile().write(target, a, comment, field, precision, symmetry)
+
+
+###############################################################################
+class MMFile:
+    __slots__ = ('_rows',
+                 '_cols',
+                 '_entries',
+                 '_format',
+                 '_field',
+                 '_symmetry')
+
+    @property
+    def rows(self):
+        return self._rows
+
+    @property
+    def cols(self):
+        return self._cols
+
+    @property
+    def entries(self):
+        return self._entries
+
+    @property
+    def format(self):
+        return self._format
+
+    @property
+    def field(self):
+        return self._field
+
+    @property
+    def symmetry(self):
+        return self._symmetry
+
+    @property
+    def has_symmetry(self):
+        return self._symmetry in (self.SYMMETRY_SYMMETRIC,
+                                  self.SYMMETRY_SKEW_SYMMETRIC,
+                                  self.SYMMETRY_HERMITIAN)
+
+    # format values
+    FORMAT_COORDINATE = 'coordinate'
+    FORMAT_ARRAY = 'array'
+    FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
+
+    @classmethod
+    def _validate_format(self, format):
+        if format not in self.FORMAT_VALUES:
+            raise ValueError('unknown format type %s, must be one of %s' %
+                             (format, self.FORMAT_VALUES))
+
+    # field values
+    FIELD_INTEGER = 'integer'
+    FIELD_UNSIGNED = 'unsigned-integer'
+    FIELD_REAL = 'real'
+    FIELD_COMPLEX = 'complex'
+    FIELD_PATTERN = 'pattern'
+    FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX,
+                    FIELD_PATTERN)
+
+    @classmethod
+    def _validate_field(self, field):
+        if field not in self.FIELD_VALUES:
+            raise ValueError('unknown field type %s, must be one of %s' %
+                             (field, self.FIELD_VALUES))
+
+    # symmetry values
+    SYMMETRY_GENERAL = 'general'
+    SYMMETRY_SYMMETRIC = 'symmetric'
+    SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
+    SYMMETRY_HERMITIAN = 'hermitian'
+    SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
+                       SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
+
+    @classmethod
+    def _validate_symmetry(self, symmetry):
+        if symmetry not in self.SYMMETRY_VALUES:
+            raise ValueError('unknown symmetry type %s, must be one of %s' %
+                             (symmetry, self.SYMMETRY_VALUES))
+
+    DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp',
+                       FIELD_UNSIGNED: 'uint64',
+                       FIELD_REAL: 'd',
+                       FIELD_COMPLEX: 'D',
+                       FIELD_PATTERN: 'd'}
+
+    # -------------------------------------------------------------------------
+    @staticmethod
+    def reader():
+        pass
+
+    # -------------------------------------------------------------------------
+    @staticmethod
+    def writer():
+        pass
+
+    # -------------------------------------------------------------------------
+    @classmethod
+    def info(self, source):
+        """
+        Return size, storage parameters from Matrix Market file-like 'source'.
+
+        Parameters
+        ----------
+        source : str or file-like
+            Matrix Market filename (extension .mtx) or open file-like object
+
+        Returns
+        -------
+        rows : int
+            Number of matrix rows.
+        cols : int
+            Number of matrix columns.
+        entries : int
+            Number of non-zero entries of a sparse matrix
+            or rows*cols for a dense matrix.
+        format : str
+            Either 'coordinate' or 'array'.
+        field : str
+            Either 'real', 'complex', 'pattern', or 'integer'.
+        symmetry : str
+            Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+        """
+
+        stream, close_it = self._open(source)
+
+        try:
+
+            # read and validate header line
+            line = stream.readline()
+            mmid, matrix, format, field, symmetry = \
+                [asstr(part.strip()) for part in line.split()]
+            if not mmid.startswith('%%MatrixMarket'):
+                raise ValueError('source is not in Matrix Market format')
+            if not matrix.lower() == 'matrix':
+                raise ValueError("Problem reading file header: " + line)
+
+            # http://math.nist.gov/MatrixMarket/formats.html
+            if format.lower() == 'array':
+                format = self.FORMAT_ARRAY
+            elif format.lower() == 'coordinate':
+                format = self.FORMAT_COORDINATE
+
+            # skip comments
+            # line.startswith('%')
+            while line and line[0] in ['%', 37]:
+                line = stream.readline()
+
+            # skip empty lines
+            while not line.strip():
+                line = stream.readline()
+
+            split_line = line.split()
+            if format == self.FORMAT_ARRAY:
+                if not len(split_line) == 2:
+                    raise ValueError("Header line not of length 2: " +
+                                     line.decode('ascii'))
+                rows, cols = map(int, split_line)
+                entries = rows * cols
+            else:
+                if not len(split_line) == 3:
+                    raise ValueError("Header line not of length 3: " +
+                                     line.decode('ascii'))
+                rows, cols, entries = map(int, split_line)
+
+            return (rows, cols, entries, format, field.lower(),
+                    symmetry.lower())
+
+        finally:
+            if close_it:
+                stream.close()
+
+    # -------------------------------------------------------------------------
+    @staticmethod
+    def _open(filespec, mode='rb'):
+        """ Return an open file stream for reading based on source.
+
+        If source is a file name, open it (after trying to find it with mtx and
+        gzipped mtx extensions). Otherwise, just return source.
+
+        Parameters
+        ----------
+        filespec : str or file-like
+            String giving file name or file-like object
+        mode : str, optional
+            Mode with which to open file, if `filespec` is a file name.
+
+        Returns
+        -------
+        fobj : file-like
+            Open file-like object.
+        close_it : bool
+            True if the calling function should close this file when done,
+            false otherwise.
+        """
+        # If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class
+        # implementing a '__fspath__' method), try to convert it to str. If this
+        # fails by throwing a 'TypeError', assume it's an open file handle and
+        # return it as-is.
+        try:
+            filespec = os.fspath(filespec)
+        except TypeError:
+            return filespec, False
+
+        # 'filespec' is definitely a str now
+
+        # open for reading
+        if mode[0] == 'r':
+
+            # determine filename plus extension
+            if not os.path.isfile(filespec):
+                if os.path.isfile(filespec+'.mtx'):
+                    filespec = filespec + '.mtx'
+                elif os.path.isfile(filespec+'.mtx.gz'):
+                    filespec = filespec + '.mtx.gz'
+                elif os.path.isfile(filespec+'.mtx.bz2'):
+                    filespec = filespec + '.mtx.bz2'
+            # open filename
+            if filespec.endswith('.gz'):
+                import gzip
+                stream = gzip.open(filespec, mode)
+            elif filespec.endswith('.bz2'):
+                import bz2
+                stream = bz2.BZ2File(filespec, 'rb')
+            else:
+                stream = open(filespec, mode)
+
+        # open for writing
+        else:
+            if filespec[-4:] != '.mtx':
+                filespec = filespec + '.mtx'
+            stream = open(filespec, mode)
+
+        return stream, True
+
+    # -------------------------------------------------------------------------
+    @staticmethod
+    def _get_symmetry(a):
+        m, n = a.shape
+        if m != n:
+            return MMFile.SYMMETRY_GENERAL
+        issymm = True
+        isskew = True
+        isherm = a.dtype.char in 'FD'
+
+        # sparse input
+        if isspmatrix(a):
+            # check if number of nonzero entries of lower and upper triangle
+            # matrix are equal
+            a = a.tocoo()
+            (row, col) = a.nonzero()
+            if (row < col).sum() != (row > col).sum():
+                return MMFile.SYMMETRY_GENERAL
+
+            # define iterator over symmetric pair entries
+            a = a.todok()
+
+            def symm_iterator():
+                for ((i, j), aij) in a.items():
+                    if i > j:
+                        aji = a[j, i]
+                        yield (aij, aji, False)
+                    elif i == j:
+                        yield (aij, aij, True)
+
+        # non-sparse input
+        else:
+            # define iterator over symmetric pair entries
+            def symm_iterator():
+                for j in range(n):
+                    for i in range(j, n):
+                        aij, aji = a[i][j], a[j][i]
+                        yield (aij, aji, i == j)
+
+        # check for symmetry
+        # yields aij, aji, is_diagonal
+        for (aij, aji, is_diagonal) in symm_iterator():
+            if isskew and is_diagonal and aij != 0:
+                isskew = False
+            else:
+                if issymm and aij != aji:
+                    issymm = False
+                with np.errstate(over="ignore"):
+                    # This can give a warning for uint dtypes, so silence that
+                    if isskew and aij != -aji:
+                        isskew = False
+                if isherm and aij != conj(aji):
+                    isherm = False
+            if not (issymm or isskew or isherm):
+                break
+
+        # return symmetry value
+        if issymm:
+            return MMFile.SYMMETRY_SYMMETRIC
+        if isskew:
+            return MMFile.SYMMETRY_SKEW_SYMMETRIC
+        if isherm:
+            return MMFile.SYMMETRY_HERMITIAN
+        return MMFile.SYMMETRY_GENERAL
+
+    # -------------------------------------------------------------------------
+    @staticmethod
+    def _field_template(field, precision):
+        return {MMFile.FIELD_REAL: '%%.%ie\n' % precision,
+                MMFile.FIELD_INTEGER: '%i\n',
+                MMFile.FIELD_UNSIGNED: '%u\n',
+                MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' %
+                    (precision, precision)
+                }.get(field, None)
+
+    # -------------------------------------------------------------------------
+    def __init__(self, **kwargs):
+        self._init_attrs(**kwargs)
+
+    # -------------------------------------------------------------------------
+    def read(self, source):
+        """
+        Reads the contents of a Matrix Market file-like 'source' into a matrix.
+
+        Parameters
+        ----------
+        source : str or file-like
+            Matrix Market filename (extensions .mtx, .mtz.gz)
+            or open file object.
+
+        Returns
+        -------
+        a : ndarray or coo_matrix
+            Dense or sparse matrix depending on the matrix format in the
+            Matrix Market file.
+        """
+        stream, close_it = self._open(source)
+
+        try:
+            self._parse_header(stream)
+            return self._parse_body(stream)
+
+        finally:
+            if close_it:
+                stream.close()
+
+    # -------------------------------------------------------------------------
+    def write(self, target, a, comment='', field=None, precision=None,
+              symmetry=None):
+        """
+        Writes sparse or dense array `a` to Matrix Market file-like `target`.
+
+        Parameters
+        ----------
+        target : str or file-like
+            Matrix Market filename (extension .mtx) or open file-like object.
+        a : array like
+            Sparse or dense 2-D array.
+        comment : str, optional
+            Comments to be prepended to the Matrix Market file.
+        field : None or str, optional
+            Either 'real', 'complex', 'pattern', or 'integer'.
+        precision : None or int, optional
+            Number of digits to display for real or complex values.
+        symmetry : None or str, optional
+            Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
+            If symmetry is None the symmetry type of 'a' is determined by its
+            values.
+        """
+
+        stream, close_it = self._open(target, 'wb')
+
+        try:
+            self._write(stream, a, comment, field, precision, symmetry)
+
+        finally:
+            if close_it:
+                stream.close()
+            else:
+                stream.flush()
+
+    # -------------------------------------------------------------------------
+    def _init_attrs(self, **kwargs):
+        """
+        Initialize each attributes with the corresponding keyword arg value
+        or a default of None
+        """
+
+        attrs = self.__class__.__slots__
+        public_attrs = [attr[1:] for attr in attrs]
+        invalid_keys = set(kwargs.keys()) - set(public_attrs)
+
+        if invalid_keys:
+            raise ValueError('''found %s invalid keyword arguments, please only
+                                use %s''' % (tuple(invalid_keys),
+                                             public_attrs))
+
+        for attr in attrs:
+            setattr(self, attr, kwargs.get(attr[1:], None))
+
+    # -------------------------------------------------------------------------
+    def _parse_header(self, stream):
+        rows, cols, entries, format, field, symmetry = \
+            self.__class__.info(stream)
+        self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
+                         field=field, symmetry=symmetry)
+
+    # -------------------------------------------------------------------------
+    def _parse_body(self, stream):
+        rows, cols, entries, format, field, symm = (self.rows, self.cols,
+                                                    self.entries, self.format,
+                                                    self.field, self.symmetry)
+
+        try:
+            from scipy.sparse import coo_matrix
+        except ImportError:
+            coo_matrix = None
+
+        dtype = self.DTYPES_BY_FIELD.get(field, None)
+
+        has_symmetry = self.has_symmetry
+        is_integer = field == self.FIELD_INTEGER
+        is_unsigned_integer = field == self.FIELD_UNSIGNED
+        is_complex = field == self.FIELD_COMPLEX
+        is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
+        is_herm = symm == self.SYMMETRY_HERMITIAN
+        is_pattern = field == self.FIELD_PATTERN
+
+        if format == self.FORMAT_ARRAY:
+            a = zeros((rows, cols), dtype=dtype)
+            line = 1
+            i, j = 0, 0
+            if is_skew:
+                a[i, j] = 0
+                if i < rows - 1:
+                    i += 1
+            while line:
+                line = stream.readline()
+                # line.startswith('%')
+                if not line or line[0] in ['%', 37] or not line.strip():
+                    continue
+                if is_integer:
+                    aij = int(line)
+                elif is_unsigned_integer:
+                    aij = int(line)
+                elif is_complex:
+                    aij = complex(*map(float, line.split()))
+                else:
+                    aij = float(line)
+                a[i, j] = aij
+                if has_symmetry and i != j:
+                    if is_skew:
+                        a[j, i] = -aij
+                    elif is_herm:
+                        a[j, i] = conj(aij)
+                    else:
+                        a[j, i] = aij
+                if i < rows-1:
+                    i = i + 1
+                else:
+                    j = j + 1
+                    if not has_symmetry:
+                        i = 0
+                    else:
+                        i = j
+                        if is_skew:
+                            a[i, j] = 0
+                            if i < rows-1:
+                                i += 1
+
+            if is_skew:
+                if not (i in [0, j] and j == cols - 1):
+                    raise ValueError("Parse error, did not read all lines.")
+            else:
+                if not (i in [0, j] and j == cols):
+                    raise ValueError("Parse error, did not read all lines.")
+
+        elif format == self.FORMAT_COORDINATE and coo_matrix is None:
+            # Read sparse matrix to dense when coo_matrix is not available.
+            a = zeros((rows, cols), dtype=dtype)
+            line = 1
+            k = 0
+            while line:
+                line = stream.readline()
+                # line.startswith('%')
+                if not line or line[0] in ['%', 37] or not line.strip():
+                    continue
+                l = line.split()
+                i, j = map(int, l[:2])
+                i, j = i-1, j-1
+                if is_integer:
+                    aij = int(l[2])
+                elif is_unsigned_integer:
+                    aij = int(l[2])
+                elif is_complex:
+                    aij = complex(*map(float, l[2:]))
+                else:
+                    aij = float(l[2])
+                a[i, j] = aij
+                if has_symmetry and i != j:
+                    if is_skew:
+                        a[j, i] = -aij
+                    elif is_herm:
+                        a[j, i] = conj(aij)
+                    else:
+                        a[j, i] = aij
+                k = k + 1
+            if not k == entries:
+                ValueError("Did not read all entries")
+
+        elif format == self.FORMAT_COORDINATE:
+            # Read sparse COOrdinate format
+
+            if entries == 0:
+                # empty matrix
+                return coo_matrix((rows, cols), dtype=dtype)
+
+            I = zeros(entries, dtype='intc')
+            J = zeros(entries, dtype='intc')
+            if is_pattern:
+                V = ones(entries, dtype='int8')
+            elif is_integer:
+                V = zeros(entries, dtype='intp')
+            elif is_unsigned_integer:
+                V = zeros(entries, dtype='uint64')
+            elif is_complex:
+                V = zeros(entries, dtype='complex')
+            else:
+                V = zeros(entries, dtype='float')
+
+            entry_number = 0
+            for line in stream:
+                # line.startswith('%')
+                if not line or line[0] in ['%', 37] or not line.strip():
+                    continue
+
+                if entry_number+1 > entries:
+                    raise ValueError("'entries' in header is smaller than "
+                                     "number of entries")
+                l = line.split()
+                I[entry_number], J[entry_number] = map(int, l[:2])
+
+                if not is_pattern:
+                    if is_integer:
+                        V[entry_number] = int(l[2])
+                    elif is_unsigned_integer:
+                        V[entry_number] = int(l[2])
+                    elif is_complex:
+                        V[entry_number] = complex(*map(float, l[2:]))
+                    else:
+                        V[entry_number] = float(l[2])
+                entry_number += 1
+            if entry_number < entries:
+                raise ValueError("'entries' in header is larger than "
+                                 "number of entries")
+
+            I -= 1  # adjust indices (base 1 -> base 0)
+            J -= 1
+
+            if has_symmetry:
+                mask = (I != J)       # off diagonal mask
+                od_I = I[mask]
+                od_J = J[mask]
+                od_V = V[mask]
+
+                I = concatenate((I, od_J))
+                J = concatenate((J, od_I))
+
+                if is_skew:
+                    od_V *= -1
+                elif is_herm:
+                    od_V = od_V.conjugate()
+
+                V = concatenate((V, od_V))
+
+            a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
+        else:
+            raise NotImplementedError(format)
+
+        return a
+
+    #  ------------------------------------------------------------------------
+    def _write(self, stream, a, comment='', field=None, precision=None,
+               symmetry=None):
+        if isinstance(a, list) or isinstance(a, ndarray) or \
+           isinstance(a, tuple) or hasattr(a, '__array__'):
+            rep = self.FORMAT_ARRAY
+            a = asarray(a)
+            if len(a.shape) != 2:
+                raise ValueError('Expected 2 dimensional array')
+            rows, cols = a.shape
+
+            if field is not None:
+
+                if field == self.FIELD_INTEGER:
+                    if not can_cast(a.dtype, 'intp'):
+                        raise OverflowError("mmwrite does not support integer "
+                                            "dtypes larger than native 'intp'.")
+                    a = a.astype('intp')
+                elif field == self.FIELD_REAL:
+                    if a.dtype.char not in 'fd':
+                        a = a.astype('d')
+                elif field == self.FIELD_COMPLEX:
+                    if a.dtype.char not in 'FD':
+                        a = a.astype('D')
+
+        else:
+            if not isspmatrix(a):
+                raise ValueError('unknown matrix type: %s' % type(a))
+
+            rep = 'coordinate'
+            rows, cols = a.shape
+
+        typecode = a.dtype.char
+
+        if precision is None:
+            if typecode in 'fF':
+                precision = 8
+            else:
+                precision = 16
+        if field is None:
+            kind = a.dtype.kind
+            if kind == 'i':
+                if not can_cast(a.dtype, 'intp'):
+                    raise OverflowError("mmwrite does not support integer "
+                                        "dtypes larger than native 'intp'.")
+                field = 'integer'
+            elif kind == 'f':
+                field = 'real'
+            elif kind == 'c':
+                field = 'complex'
+            elif kind == 'u':
+                field = 'unsigned-integer'
+            else:
+                raise TypeError('unexpected dtype kind ' + kind)
+
+        if symmetry is None:
+            symmetry = self._get_symmetry(a)
+
+        # validate rep, field, and symmetry
+        self.__class__._validate_format(rep)
+        self.__class__._validate_field(field)
+        self.__class__._validate_symmetry(symmetry)
+
+        # write initial header line
+        data = f'%%MatrixMarket matrix {rep} {field} {symmetry}\n'
+        stream.write(data.encode('latin1'))
+
+        # write comments
+        for line in comment.split('\n'):
+            data = '%%%s\n' % (line)
+            stream.write(data.encode('latin1'))
+
+        template = self._field_template(field, precision)
+        # write dense format
+        if rep == self.FORMAT_ARRAY:
+            # write shape spec
+            data = '%i %i\n' % (rows, cols)
+            stream.write(data.encode('latin1'))
+
+            if field in (self.FIELD_INTEGER, self.FIELD_REAL,
+                         self.FIELD_UNSIGNED):
+                if symmetry == self.SYMMETRY_GENERAL:
+                    for j in range(cols):
+                        for i in range(rows):
+                            data = template % a[i, j]
+                            stream.write(data.encode('latin1'))
+
+                elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC:
+                    for j in range(cols):
+                        for i in range(j + 1, rows):
+                            data = template % a[i, j]
+                            stream.write(data.encode('latin1'))
+
+                else:
+                    for j in range(cols):
+                        for i in range(j, rows):
+                            data = template % a[i, j]
+                            stream.write(data.encode('latin1'))
+
+            elif field == self.FIELD_COMPLEX:
+
+                if symmetry == self.SYMMETRY_GENERAL:
+                    for j in range(cols):
+                        for i in range(rows):
+                            aij = a[i, j]
+                            data = template % (real(aij), imag(aij))
+                            stream.write(data.encode('latin1'))
+                else:
+                    for j in range(cols):
+                        for i in range(j, rows):
+                            aij = a[i, j]
+                            data = template % (real(aij), imag(aij))
+                            stream.write(data.encode('latin1'))
+
+            elif field == self.FIELD_PATTERN:
+                raise ValueError('pattern type inconsisted with dense format')
+
+            else:
+                raise TypeError('Unknown field type %s' % field)
+
+        # write sparse format
+        else:
+            coo = a.tocoo()  # convert to COOrdinate format
+
+            # if symmetry format used, remove values above main diagonal
+            if symmetry != self.SYMMETRY_GENERAL:
+                lower_triangle_mask = coo.row >= coo.col
+                coo = coo_matrix((coo.data[lower_triangle_mask],
+                                 (coo.row[lower_triangle_mask],
+                                  coo.col[lower_triangle_mask])),
+                                 shape=coo.shape)
+
+            # write shape spec
+            data = '%i %i %i\n' % (rows, cols, coo.nnz)
+            stream.write(data.encode('latin1'))
+
+            template = self._field_template(field, precision-1)
+
+            if field == self.FIELD_PATTERN:
+                for r, c in zip(coo.row+1, coo.col+1):
+                    data = "%i %i\n" % (r, c)
+                    stream.write(data.encode('latin1'))
+            elif field in (self.FIELD_INTEGER, self.FIELD_REAL,
+                           self.FIELD_UNSIGNED):
+                for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
+                    data = ("%i %i " % (r, c)) + (template % d)
+                    stream.write(data.encode('latin1'))
+            elif field == self.FIELD_COMPLEX:
+                for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
+                    data = ("%i %i " % (r, c)) + (template % (d.real, d.imag))
+                    stream.write(data.encode('latin1'))
+            else:
+                raise TypeError('Unknown field type %s' % field)
+
+
+def _is_fromfile_compatible(stream):
+    """
+    Check whether `stream` is compatible with numpy.fromfile.
+
+    Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with
+    Python 3.
+    """
+
+    bad_cls = []
+    try:
+        import gzip
+        bad_cls.append(gzip.GzipFile)
+    except ImportError:
+        pass
+    try:
+        import bz2
+        bad_cls.append(bz2.BZ2File)
+    except ImportError:
+        pass
+
+    bad_cls = tuple(bad_cls)
+    return not isinstance(stream, bad_cls)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/_netcdf.py b/__packaged__/coreml/.python_dependencies/scipy/io/_netcdf.py
new file mode 100644
index 00000000..64b64324
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/_netcdf.py
@@ -0,0 +1,1088 @@
+"""
+NetCDF reader/writer module.
+
+This module is used to read and create NetCDF files. NetCDF files are
+accessed through the `netcdf_file` object. Data written to and from NetCDF
+files are contained in `netcdf_variable` objects. Attributes are given
+as member variables of the `netcdf_file` and `netcdf_variable` objects.
+
+This module implements the Scientific.IO.NetCDF API to read and create
+NetCDF files. The same API is also used in the PyNIO and pynetcdf
+modules, allowing these modules to be used interchangeably when working
+with NetCDF files.
+
+Only NetCDF3 is supported here; for NetCDF4 see
+`netCDF4-python `__,
+which has a similar API.
+
+"""
+
+# TODO:
+# * properly implement ``_FillValue``.
+# * fix character variables.
+# * implement PAGESIZE for Python 2.6?
+
+# The Scientific.IO.NetCDF API allows attributes to be added directly to
+# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
+# between user-set attributes and instance attributes, user-set attributes
+# are automatically stored in the ``_attributes`` attribute by overloading
+#``__setattr__``. This is the reason why the code sometimes uses
+#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
+# otherwise the key would be inserted into userspace attributes.
+
+
+__all__ = ['netcdf_file', 'netcdf_variable']
+
+
+import warnings
+import weakref
+from operator import mul
+from platform import python_implementation
+
+import mmap as mm
+
+import numpy as np
+from numpy import frombuffer, dtype, empty, array, asarray
+from numpy import little_endian as LITTLE_ENDIAN
+from functools import reduce
+
+
+IS_PYPY = python_implementation() == 'PyPy'
+
+ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
+ZERO = b'\x00\x00\x00\x00'
+NC_BYTE = b'\x00\x00\x00\x01'
+NC_CHAR = b'\x00\x00\x00\x02'
+NC_SHORT = b'\x00\x00\x00\x03'
+NC_INT = b'\x00\x00\x00\x04'
+NC_FLOAT = b'\x00\x00\x00\x05'
+NC_DOUBLE = b'\x00\x00\x00\x06'
+NC_DIMENSION = b'\x00\x00\x00\n'
+NC_VARIABLE = b'\x00\x00\x00\x0b'
+NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
+FILL_BYTE = b'\x81'
+FILL_CHAR = b'\x00'
+FILL_SHORT = b'\x80\x01'
+FILL_INT = b'\x80\x00\x00\x01'
+FILL_FLOAT = b'\x7C\xF0\x00\x00'
+FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00'
+
+TYPEMAP = {NC_BYTE: ('b', 1),
+           NC_CHAR: ('c', 1),
+           NC_SHORT: ('h', 2),
+           NC_INT: ('i', 4),
+           NC_FLOAT: ('f', 4),
+           NC_DOUBLE: ('d', 8)}
+
+FILLMAP = {NC_BYTE: FILL_BYTE,
+           NC_CHAR: FILL_CHAR,
+           NC_SHORT: FILL_SHORT,
+           NC_INT: FILL_INT,
+           NC_FLOAT: FILL_FLOAT,
+           NC_DOUBLE: FILL_DOUBLE}
+
+REVERSE = {('b', 1): NC_BYTE,
+           ('B', 1): NC_CHAR,
+           ('c', 1): NC_CHAR,
+           ('h', 2): NC_SHORT,
+           ('i', 4): NC_INT,
+           ('f', 4): NC_FLOAT,
+           ('d', 8): NC_DOUBLE,
+
+           # these come from asarray(1).dtype.char and asarray('foo').dtype.char,
+           # used when getting the types from generic attributes.
+           ('l', 4): NC_INT,
+           ('S', 1): NC_CHAR}
+
+
+class netcdf_file:
+    """
+    A file object for NetCDF data.
+
+    A `netcdf_file` object has two standard attributes: `dimensions` and
+    `variables`. The values of both are dictionaries, mapping dimension
+    names to their associated lengths and variable names to variables,
+    respectively. Application programs should never modify these
+    dictionaries.
+
+    All other attributes correspond to global attributes defined in the
+    NetCDF file. Global file attributes are created by assigning to an
+    attribute of the `netcdf_file` object.
+
+    Parameters
+    ----------
+    filename : string or file-like
+        string -> filename
+    mode : {'r', 'w', 'a'}, optional
+        read-write-append mode, default is 'r'
+    mmap : None or bool, optional
+        Whether to mmap `filename` when reading.  Default is True
+        when `filename` is a file name, False when `filename` is a
+        file-like object. Note that when mmap is in use, data arrays
+        returned refer directly to the mmapped data on disk, and the
+        file cannot be closed as long as references to it exist.
+    version : {1, 2}, optional
+        version of netcdf to read / write, where 1 means *Classic
+        format* and 2 means *64-bit offset format*.  Default is 1.  See
+        `here `__
+        for more info.
+    maskandscale : bool, optional
+        Whether to automatically scale and/or mask data based on attributes.
+        Default is False.
+
+    Notes
+    -----
+    The major advantage of this module over other modules is that it doesn't
+    require the code to be linked to the NetCDF libraries. This module is
+    derived from `pupynere `_.
+
+    NetCDF files are a self-describing binary data format. The file contains
+    metadata that describes the dimensions and variables in the file. More
+    details about NetCDF files can be found `here
+    `__. There
+    are three main sections to a NetCDF data structure:
+
+    1. Dimensions
+    2. Variables
+    3. Attributes
+
+    The dimensions section records the name and length of each dimension used
+    by the variables. The variables would then indicate which dimensions it
+    uses and any attributes such as data units, along with containing the data
+    values for the variable. It is good practice to include a
+    variable that is the same name as a dimension to provide the values for
+    that axes. Lastly, the attributes section would contain additional
+    information such as the name of the file creator or the instrument used to
+    collect the data.
+
+    When writing data to a NetCDF file, there is often the need to indicate the
+    'record dimension'. A record dimension is the unbounded dimension for a
+    variable. For example, a temperature variable may have dimensions of
+    latitude, longitude and time. If one wants to add more temperature data to
+    the NetCDF file as time progresses, then the temperature variable should
+    have the time dimension flagged as the record dimension.
+
+    In addition, the NetCDF file header contains the position of the data in
+    the file, so access can be done in an efficient manner without loading
+    unnecessary data into memory. It uses the ``mmap`` module to create
+    Numpy arrays mapped to the data on disk, for the same purpose.
+
+    Note that when `netcdf_file` is used to open a file with mmap=True
+    (default for read-only), arrays returned by it refer to data
+    directly on the disk. The file should not be closed, and cannot be cleanly
+    closed when asked, if such arrays are alive. You may want to copy data arrays
+    obtained from mmapped Netcdf file if they are to be processed after the file
+    is closed, see the example below.
+
+    Examples
+    --------
+    To create a NetCDF file:
+
+    >>> from scipy.io import netcdf_file
+    >>> import numpy as np
+    >>> f = netcdf_file('simple.nc', 'w')
+    >>> f.history = 'Created for a test'
+    >>> f.createDimension('time', 10)
+    >>> time = f.createVariable('time', 'i', ('time',))
+    >>> time[:] = np.arange(10)
+    >>> time.units = 'days since 2008-01-01'
+    >>> f.close()
+
+    Note the assignment of ``arange(10)`` to ``time[:]``.  Exposing the slice
+    of the time variable allows for the data to be set in the object, rather
+    than letting ``arange(10)`` overwrite the ``time`` variable.
+
+    To read the NetCDF file we just created:
+
+    >>> from scipy.io import netcdf_file
+    >>> f = netcdf_file('simple.nc', 'r')
+    >>> print(f.history)
+    b'Created for a test'
+    >>> time = f.variables['time']
+    >>> print(time.units)
+    b'days since 2008-01-01'
+    >>> print(time.shape)
+    (10,)
+    >>> print(time[-1])
+    9
+
+    NetCDF files, when opened read-only, return arrays that refer
+    directly to memory-mapped data on disk:
+
+    >>> data = time[:]
+
+    If the data is to be processed after the file is closed, it needs
+    to be copied to main memory:
+
+    >>> data = time[:].copy()
+    >>> f.close()
+    >>> data.mean()
+    4.5
+
+    A NetCDF file can also be used as context manager:
+
+    >>> from scipy.io import netcdf_file
+    >>> with netcdf_file('simple.nc', 'r') as f:
+    ...     print(f.history)
+    b'Created for a test'
+
+    """
+    def __init__(self, filename, mode='r', mmap=None, version=1,
+                 maskandscale=False):
+        """Initialize netcdf_file from fileobj (str or file-like)."""
+        if mode not in 'rwa':
+            raise ValueError("Mode must be either 'r', 'w' or 'a'.")
+
+        if hasattr(filename, 'seek'):  # file-like
+            self.fp = filename
+            self.filename = 'None'
+            if mmap is None:
+                mmap = False
+            elif mmap and not hasattr(filename, 'fileno'):
+                raise ValueError('Cannot use file object for mmap')
+        else:  # maybe it's a string
+            self.filename = filename
+            omode = 'r+' if mode == 'a' else mode
+            self.fp = open(self.filename, '%sb' % omode)
+            if mmap is None:
+                # Mmapped files on PyPy cannot be usually closed
+                # before the GC runs, so it's better to use mmap=False
+                # as the default.
+                mmap = (not IS_PYPY)
+
+        if mode != 'r':
+            # Cannot read write-only files
+            mmap = False
+
+        self.use_mmap = mmap
+        self.mode = mode
+        self.version_byte = version
+        self.maskandscale = maskandscale
+
+        self.dimensions = {}
+        self.variables = {}
+
+        self._dims = []
+        self._recs = 0
+        self._recsize = 0
+
+        self._mm = None
+        self._mm_buf = None
+        if self.use_mmap:
+            self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
+            self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
+
+        self._attributes = {}
+
+        if mode in 'ra':
+            self._read()
+
+    def __setattr__(self, attr, value):
+        # Store user defined attributes in a separate dict,
+        # so we can save them to file later.
+        try:
+            self._attributes[attr] = value
+        except AttributeError:
+            pass
+        self.__dict__[attr] = value
+
+    def close(self):
+        """Closes the NetCDF file."""
+        if hasattr(self, 'fp') and not self.fp.closed:
+            try:
+                self.flush()
+            finally:
+                self.variables = {}
+                if self._mm_buf is not None:
+                    ref = weakref.ref(self._mm_buf)
+                    self._mm_buf = None
+                    if ref() is None:
+                        # self._mm_buf is gc'd, and we can close the mmap
+                        self._mm.close()
+                    else:
+                        # we cannot close self._mm, since self._mm_buf is
+                        # alive and there may still be arrays referring to it
+                        warnings.warn((
+                            "Cannot close a netcdf_file opened with mmap=True, when "
+                            "netcdf_variables or arrays referring to its data still exist. "
+                            "All data arrays obtained from such files refer directly to "
+                            "data on disk, and must be copied before the file can be cleanly "
+                            "closed. (See netcdf_file docstring for more information on mmap.)"
+                        ), category=RuntimeWarning)
+                self._mm = None
+                self.fp.close()
+    __del__ = close
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.close()
+
+    def createDimension(self, name, length):
+        """
+        Adds a dimension to the Dimension section of the NetCDF data structure.
+
+        Note that this function merely adds a new dimension that the variables can
+        reference. The values for the dimension, if desired, should be added as
+        a variable using `createVariable`, referring to this dimension.
+
+        Parameters
+        ----------
+        name : str
+            Name of the dimension (Eg, 'lat' or 'time').
+        length : int
+            Length of the dimension.
+
+        See Also
+        --------
+        createVariable
+
+        """
+        if length is None and self._dims:
+            raise ValueError("Only first dimension may be unlimited!")
+
+        self.dimensions[name] = length
+        self._dims.append(name)
+
+    def createVariable(self, name, type, dimensions):
+        """
+        Create an empty variable for the `netcdf_file` object, specifying its data
+        type and the dimensions it uses.
+
+        Parameters
+        ----------
+        name : str
+            Name of the new variable.
+        type : dtype or str
+            Data type of the variable.
+        dimensions : sequence of str
+            List of the dimension names used by the variable, in the desired order.
+
+        Returns
+        -------
+        variable : netcdf_variable
+            The newly created ``netcdf_variable`` object.
+            This object has also been added to the `netcdf_file` object as well.
+
+        See Also
+        --------
+        createDimension
+
+        Notes
+        -----
+        Any dimensions to be used by the variable should already exist in the
+        NetCDF data structure or should be created by `createDimension` prior to
+        creating the NetCDF variable.
+
+        """
+        shape = tuple([self.dimensions[dim] for dim in dimensions])
+        shape_ = tuple([dim or 0 for dim in shape])  # replace None with 0 for NumPy
+
+        type = dtype(type)
+        typecode, size = type.char, type.itemsize
+        if (typecode, size) not in REVERSE:
+            raise ValueError("NetCDF 3 does not support type %s" % type)
+
+        data = empty(shape_, dtype=type.newbyteorder("B"))  # convert to big endian always for NetCDF 3
+        self.variables[name] = netcdf_variable(
+                data, typecode, size, shape, dimensions,
+                maskandscale=self.maskandscale)
+        return self.variables[name]
+
+    def flush(self):
+        """
+        Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
+
+        See Also
+        --------
+        sync : Identical function
+
+        """
+        if hasattr(self, 'mode') and self.mode in 'wa':
+            self._write()
+    sync = flush
+
+    def _write(self):
+        self.fp.seek(0)
+        self.fp.write(b'CDF')
+        self.fp.write(array(self.version_byte, '>b').tobytes())
+
+        # Write headers and data.
+        self._write_numrecs()
+        self._write_dim_array()
+        self._write_gatt_array()
+        self._write_var_array()
+
+    def _write_numrecs(self):
+        # Get highest record count from all record variables.
+        for var in self.variables.values():
+            if var.isrec and len(var.data) > self._recs:
+                self.__dict__['_recs'] = len(var.data)
+        self._pack_int(self._recs)
+
+    def _write_dim_array(self):
+        if self.dimensions:
+            self.fp.write(NC_DIMENSION)
+            self._pack_int(len(self.dimensions))
+            for name in self._dims:
+                self._pack_string(name)
+                length = self.dimensions[name]
+                self._pack_int(length or 0)  # replace None with 0 for record dimension
+        else:
+            self.fp.write(ABSENT)
+
+    def _write_gatt_array(self):
+        self._write_att_array(self._attributes)
+
+    def _write_att_array(self, attributes):
+        if attributes:
+            self.fp.write(NC_ATTRIBUTE)
+            self._pack_int(len(attributes))
+            for name, values in attributes.items():
+                self._pack_string(name)
+                self._write_att_values(values)
+        else:
+            self.fp.write(ABSENT)
+
+    def _write_var_array(self):
+        if self.variables:
+            self.fp.write(NC_VARIABLE)
+            self._pack_int(len(self.variables))
+
+            # Sort variable names non-recs first, then recs.
+            def sortkey(n):
+                v = self.variables[n]
+                if v.isrec:
+                    return (-1,)
+                return v._shape
+            variables = sorted(self.variables, key=sortkey, reverse=True)
+
+            # Set the metadata for all variables.
+            for name in variables:
+                self._write_var_metadata(name)
+            # Now that we have the metadata, we know the vsize of
+            # each record variable, so we can calculate recsize.
+            self.__dict__['_recsize'] = sum([
+                    var._vsize for var in self.variables.values()
+                    if var.isrec])
+            # Set the data for all variables.
+            for name in variables:
+                self._write_var_data(name)
+        else:
+            self.fp.write(ABSENT)
+
+    def _write_var_metadata(self, name):
+        var = self.variables[name]
+
+        self._pack_string(name)
+        self._pack_int(len(var.dimensions))
+        for dimname in var.dimensions:
+            dimid = self._dims.index(dimname)
+            self._pack_int(dimid)
+
+        self._write_att_array(var._attributes)
+
+        nc_type = REVERSE[var.typecode(), var.itemsize()]
+        self.fp.write(nc_type)
+
+        if not var.isrec:
+            vsize = var.data.size * var.data.itemsize
+            vsize += -vsize % 4
+        else:  # record variable
+            try:
+                vsize = var.data[0].size * var.data.itemsize
+            except IndexError:
+                vsize = 0
+            rec_vars = len([v for v in self.variables.values()
+                            if v.isrec])
+            if rec_vars > 1:
+                vsize += -vsize % 4
+        self.variables[name].__dict__['_vsize'] = vsize
+        self._pack_int(vsize)
+
+        # Pack a bogus begin, and set the real value later.
+        self.variables[name].__dict__['_begin'] = self.fp.tell()
+        self._pack_begin(0)
+
+    def _write_var_data(self, name):
+        var = self.variables[name]
+
+        # Set begin in file header.
+        the_beguine = self.fp.tell()
+        self.fp.seek(var._begin)
+        self._pack_begin(the_beguine)
+        self.fp.seek(the_beguine)
+
+        # Write data.
+        if not var.isrec:
+            self.fp.write(var.data.tobytes())
+            count = var.data.size * var.data.itemsize
+            self._write_var_padding(var, var._vsize - count)
+        else:  # record variable
+            # Handle rec vars with shape[0] < nrecs.
+            if self._recs > len(var.data):
+                shape = (self._recs,) + var.data.shape[1:]
+                # Resize in-place does not always work since
+                # the array might not be single-segment
+                try:
+                    var.data.resize(shape)
+                except ValueError:
+                    var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)
+
+            pos0 = pos = self.fp.tell()
+            for rec in var.data:
+                # Apparently scalars cannot be converted to big endian. If we
+                # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
+                # will remain as ``=i4``.
+                if not rec.shape and (rec.dtype.byteorder == '<' or
+                        (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
+                    rec = rec.byteswap()
+                self.fp.write(rec.tobytes())
+                # Padding
+                count = rec.size * rec.itemsize
+                self._write_var_padding(var, var._vsize - count)
+                pos += self._recsize
+                self.fp.seek(pos)
+            self.fp.seek(pos0 + var._vsize)
+
+    def _write_var_padding(self, var, size):
+        encoded_fill_value = var._get_encoded_fill_value()
+        num_fills = size // len(encoded_fill_value)
+        self.fp.write(encoded_fill_value * num_fills)
+
+    def _write_att_values(self, values):
+        if hasattr(values, 'dtype'):
+            nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
+        else:
+            types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)]
+
+            # bytes index into scalars in py3k. Check for "string" types
+            if isinstance(values, (str, bytes)):
+                sample = values
+            else:
+                try:
+                    sample = values[0]  # subscriptable?
+                except TypeError:
+                    sample = values     # scalar
+
+            for class_, nc_type in types:
+                if isinstance(sample, class_):
+                    break
+
+        typecode, size = TYPEMAP[nc_type]
+        dtype_ = '>%s' % typecode
+        # asarray() dies with bytes and '>c' in py3k. Change to 'S'
+        dtype_ = 'S' if dtype_ == '>c' else dtype_
+
+        values = asarray(values, dtype=dtype_)
+
+        self.fp.write(nc_type)
+
+        if values.dtype.char == 'S':
+            nelems = values.itemsize
+        else:
+            nelems = values.size
+        self._pack_int(nelems)
+
+        if not values.shape and (values.dtype.byteorder == '<' or
+                (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
+            values = values.byteswap()
+        self.fp.write(values.tobytes())
+        count = values.size * values.itemsize
+        self.fp.write(b'\x00' * (-count % 4))  # pad
+
+    def _read(self):
+        # Check magic bytes and version
+        magic = self.fp.read(3)
+        if not magic == b'CDF':
+            raise TypeError("Error: %s is not a valid NetCDF 3 file" %
+                            self.filename)
+        self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
+
+        # Read file headers and set data.
+        self._read_numrecs()
+        self._read_dim_array()
+        self._read_gatt_array()
+        self._read_var_array()
+
+    def _read_numrecs(self):
+        self.__dict__['_recs'] = self._unpack_int()
+
+    def _read_dim_array(self):
+        header = self.fp.read(4)
+        if header not in [ZERO, NC_DIMENSION]:
+            raise ValueError("Unexpected header.")
+        count = self._unpack_int()
+
+        for dim in range(count):
+            name = self._unpack_string().decode('latin1')
+            length = self._unpack_int() or None  # None for record dimension
+            self.dimensions[name] = length
+            self._dims.append(name)  # preserve order
+
+    def _read_gatt_array(self):
+        for k, v in self._read_att_array().items():
+            self.__setattr__(k, v)
+
+    def _read_att_array(self):
+        header = self.fp.read(4)
+        if header not in [ZERO, NC_ATTRIBUTE]:
+            raise ValueError("Unexpected header.")
+        count = self._unpack_int()
+
+        attributes = {}
+        for attr in range(count):
+            name = self._unpack_string().decode('latin1')
+            attributes[name] = self._read_att_values()
+        return attributes
+
+    def _read_var_array(self):
+        header = self.fp.read(4)
+        if header not in [ZERO, NC_VARIABLE]:
+            raise ValueError("Unexpected header.")
+
+        begin = 0
+        dtypes = {'names': [], 'formats': []}
+        rec_vars = []
+        count = self._unpack_int()
+        for var in range(count):
+            (name, dimensions, shape, attributes,
+             typecode, size, dtype_, begin_, vsize) = self._read_var()
+            # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
+            # Note that vsize is the product of the dimension lengths
+            # (omitting the record dimension) and the number of bytes
+            # per value (determined from the type), increased to the
+            # next multiple of 4, for each variable. If a record
+            # variable, this is the amount of space per record. The
+            # netCDF "record size" is calculated as the sum of the
+            # vsize's of all the record variables.
+            #
+            # The vsize field is actually redundant, because its value
+            # may be computed from other information in the header. The
+            # 32-bit vsize field is not large enough to contain the size
+            # of variables that require more than 2^32 - 4 bytes, so
+            # 2^32 - 1 is used in the vsize field for such variables.
+            if shape and shape[0] is None:  # record variable
+                rec_vars.append(name)
+                # The netCDF "record size" is calculated as the sum of
+                # the vsize's of all the record variables.
+                self.__dict__['_recsize'] += vsize
+                if begin == 0:
+                    begin = begin_
+                dtypes['names'].append(name)
+                dtypes['formats'].append(str(shape[1:]) + dtype_)
+
+                # Handle padding with a virtual variable.
+                if typecode in 'bch':
+                    actual_size = reduce(mul, (1,) + shape[1:]) * size
+                    padding = -actual_size % 4
+                    if padding:
+                        dtypes['names'].append('_padding_%d' % var)
+                        dtypes['formats'].append('(%d,)>b' % padding)
+
+                # Data will be set later.
+                data = None
+            else:  # not a record variable
+                # Calculate size to avoid problems with vsize (above)
+                a_size = reduce(mul, shape, 1) * size
+                if self.use_mmap:
+                    data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
+                    data.shape = shape
+                else:
+                    pos = self.fp.tell()
+                    self.fp.seek(begin_)
+                    data = frombuffer(self.fp.read(a_size), dtype=dtype_
+                                      ).copy()
+                    data.shape = shape
+                    self.fp.seek(pos)
+
+            # Add variable.
+            self.variables[name] = netcdf_variable(
+                    data, typecode, size, shape, dimensions, attributes,
+                    maskandscale=self.maskandscale)
+
+        if rec_vars:
+            # Remove padding when only one record variable.
+            if len(rec_vars) == 1:
+                dtypes['names'] = dtypes['names'][:1]
+                dtypes['formats'] = dtypes['formats'][:1]
+
+            # Build rec array.
+            if self.use_mmap:
+                rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
+                rec_array.shape = (self._recs,)
+            else:
+                pos = self.fp.tell()
+                self.fp.seek(begin)
+                rec_array = frombuffer(self.fp.read(self._recs*self._recsize),
+                                       dtype=dtypes).copy()
+                rec_array.shape = (self._recs,)
+                self.fp.seek(pos)
+
+            for var in rec_vars:
+                self.variables[var].__dict__['data'] = rec_array[var]
+
+    def _read_var(self):
+        name = self._unpack_string().decode('latin1')
+        dimensions = []
+        shape = []
+        dims = self._unpack_int()
+
+        for i in range(dims):
+            dimid = self._unpack_int()
+            dimname = self._dims[dimid]
+            dimensions.append(dimname)
+            dim = self.dimensions[dimname]
+            shape.append(dim)
+        dimensions = tuple(dimensions)
+        shape = tuple(shape)
+
+        attributes = self._read_att_array()
+        nc_type = self.fp.read(4)
+        vsize = self._unpack_int()
+        begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
+
+        typecode, size = TYPEMAP[nc_type]
+        dtype_ = '>%s' % typecode
+
+        return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
+
+    def _read_att_values(self):
+        nc_type = self.fp.read(4)
+        n = self._unpack_int()
+
+        typecode, size = TYPEMAP[nc_type]
+
+        count = n*size
+        values = self.fp.read(int(count))
+        self.fp.read(-count % 4)  # read padding
+
+        if typecode != 'c':
+            values = frombuffer(values, dtype='>%s' % typecode).copy()
+            if values.shape == (1,):
+                values = values[0]
+        else:
+            values = values.rstrip(b'\x00')
+        return values
+
+    def _pack_begin(self, begin):
+        if self.version_byte == 1:
+            self._pack_int(begin)
+        elif self.version_byte == 2:
+            self._pack_int64(begin)
+
+    def _pack_int(self, value):
+        self.fp.write(array(value, '>i').tobytes())
+    _pack_int32 = _pack_int
+
+    def _unpack_int(self):
+        return int(frombuffer(self.fp.read(4), '>i')[0])
+    _unpack_int32 = _unpack_int
+
+    def _pack_int64(self, value):
+        self.fp.write(array(value, '>q').tobytes())
+
+    def _unpack_int64(self):
+        return frombuffer(self.fp.read(8), '>q')[0]
+
+    def _pack_string(self, s):
+        count = len(s)
+        self._pack_int(count)
+        self.fp.write(s.encode('latin1'))
+        self.fp.write(b'\x00' * (-count % 4))  # pad
+
+    def _unpack_string(self):
+        count = self._unpack_int()
+        s = self.fp.read(count).rstrip(b'\x00')
+        self.fp.read(-count % 4)  # read padding
+        return s
+
+
+class netcdf_variable:
+    """
+    A data object for netcdf files.
+
+    `netcdf_variable` objects are constructed by calling the method
+    `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
+    objects behave much like array objects defined in numpy, except that their
+    data resides in a file. Data is read by indexing and written by assigning
+    to an indexed subset; the entire array can be accessed by the index ``[:]``
+    or (for scalars) by using the methods `getValue` and `assignValue`.
+    `netcdf_variable` objects also have attribute `shape` with the same meaning
+    as for arrays, but the shape cannot be modified. There is another read-only
+    attribute `dimensions`, whose value is the tuple of dimension names.
+
+    All other attributes correspond to variable attributes defined in
+    the NetCDF file. Variable attributes are created by assigning to an
+    attribute of the `netcdf_variable` object.
+
+    Parameters
+    ----------
+    data : array_like
+        The data array that holds the values for the variable.
+        Typically, this is initialized as empty, but with the proper shape.
+    typecode : dtype character code
+        Desired data-type for the data array.
+    size : int
+        Desired element size for the data array.
+    shape : sequence of ints
+        The shape of the array. This should match the lengths of the
+        variable's dimensions.
+    dimensions : sequence of strings
+        The names of the dimensions used by the variable. Must be in the
+        same order of the dimension lengths given by `shape`.
+    attributes : dict, optional
+        Attribute values (any type) keyed by string names. These attributes
+        become attributes for the netcdf_variable object.
+    maskandscale : bool, optional
+        Whether to automatically scale and/or mask data based on attributes.
+        Default is False.
+
+
+    Attributes
+    ----------
+    dimensions : list of str
+        List of names of dimensions used by the variable object.
+    isrec, shape
+        Properties
+
+    See also
+    --------
+    isrec, shape
+
+    """
+    def __init__(self, data, typecode, size, shape, dimensions,
+                 attributes=None,
+                 maskandscale=False):
+        self.data = data
+        self._typecode = typecode
+        self._size = size
+        self._shape = shape
+        self.dimensions = dimensions
+        self.maskandscale = maskandscale
+
+        self._attributes = attributes or {}
+        for k, v in self._attributes.items():
+            self.__dict__[k] = v
+
+    def __setattr__(self, attr, value):
+        # Store user defined attributes in a separate dict,
+        # so we can save them to file later.
+        try:
+            self._attributes[attr] = value
+        except AttributeError:
+            pass
+        self.__dict__[attr] = value
+
+    def isrec(self):
+        """Returns whether the variable has a record dimension or not.
+
+        A record dimension is a dimension along which additional data could be
+        easily appended in the netcdf data structure without much rewriting of
+        the data file. This attribute is a read-only property of the
+        `netcdf_variable`.
+
+        """
+        return bool(self.data.shape) and not self._shape[0]
+    isrec = property(isrec)
+
+    def shape(self):
+        """Returns the shape tuple of the data variable.
+
+        This is a read-only attribute and can not be modified in the
+        same manner of other numpy arrays.
+        """
+        return self.data.shape
+    shape = property(shape)
+
+    def getValue(self):
+        """
+        Retrieve a scalar value from a `netcdf_variable` of length one.
+
+        Raises
+        ------
+        ValueError
+            If the netcdf variable is an array of length greater than one,
+            this exception will be raised.
+
+        """
+        return self.data.item()
+
+    def assignValue(self, value):
+        """
+        Assign a scalar value to a `netcdf_variable` of length one.
+
+        Parameters
+        ----------
+        value : scalar
+            Scalar value (of compatible type) to assign to a length-one netcdf
+            variable. This value will be written to file.
+
+        Raises
+        ------
+        ValueError
+            If the input is not a scalar, or if the destination is not a length-one
+            netcdf variable.
+
+        """
+        if not self.data.flags.writeable:
+            # Work-around for a bug in NumPy.  Calling itemset() on a read-only
+            # memory-mapped array causes a seg. fault.
+            # See NumPy ticket #1622, and SciPy ticket #1202.
+            # This check for `writeable` can be removed when the oldest version
+            # of NumPy still supported by scipy contains the fix for #1622.
+            raise RuntimeError("variable is not writeable")
+
+        self.data.itemset(value)
+
+    def typecode(self):
+        """
+        Return the typecode of the variable.
+
+        Returns
+        -------
+        typecode : char
+            The character typecode of the variable (e.g., 'i' for int).
+
+        """
+        return self._typecode
+
+    def itemsize(self):
+        """
+        Return the itemsize of the variable.
+
+        Returns
+        -------
+        itemsize : int
+            The element size of the variable (e.g., 8 for float64).
+
+        """
+        return self._size
+
+    def __getitem__(self, index):
+        if not self.maskandscale:
+            return self.data[index]
+
+        data = self.data[index].copy()
+        missing_value = self._get_missing_value()
+        data = self._apply_missing_value(data, missing_value)
+        scale_factor = self._attributes.get('scale_factor')
+        add_offset = self._attributes.get('add_offset')
+        if add_offset is not None or scale_factor is not None:
+            data = data.astype(np.float64)
+        if scale_factor is not None:
+            data = data * scale_factor
+        if add_offset is not None:
+            data += add_offset
+
+        return data
+
+    def __setitem__(self, index, data):
+        if self.maskandscale:
+            missing_value = (
+                    self._get_missing_value() or
+                    getattr(data, 'fill_value', 999999))
+            self._attributes.setdefault('missing_value', missing_value)
+            self._attributes.setdefault('_FillValue', missing_value)
+            data = ((data - self._attributes.get('add_offset', 0.0)) /
+                    self._attributes.get('scale_factor', 1.0))
+            data = np.ma.asarray(data).filled(missing_value)
+            if self._typecode not in 'fd' and data.dtype.kind == 'f':
+                data = np.round(data)
+
+        # Expand data for record vars?
+        if self.isrec:
+            if isinstance(index, tuple):
+                rec_index = index[0]
+            else:
+                rec_index = index
+            if isinstance(rec_index, slice):
+                recs = (rec_index.start or 0) + len(data)
+            else:
+                recs = rec_index + 1
+            if recs > len(self.data):
+                shape = (recs,) + self._shape[1:]
+                # Resize in-place does not always work since
+                # the array might not be single-segment
+                try:
+                    self.data.resize(shape)
+                except ValueError:
+                    self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype)
+        self.data[index] = data
+
+    def _default_encoded_fill_value(self):
+        """
+        The default encoded fill-value for this Variable's data type.
+        """
+        nc_type = REVERSE[self.typecode(), self.itemsize()]
+        return FILLMAP[nc_type]
+
+    def _get_encoded_fill_value(self):
+        """
+        Returns the encoded fill value for this variable as bytes.
+
+        This is taken from either the _FillValue attribute, or the default fill
+        value for this variable's data type.
+        """
+        if '_FillValue' in self._attributes:
+            fill_value = np.array(self._attributes['_FillValue'],
+                                  dtype=self.data.dtype).tobytes()
+            if len(fill_value) == self.itemsize():
+                return fill_value
+            else:
+                return self._default_encoded_fill_value()
+        else:
+            return self._default_encoded_fill_value()
+
+    def _get_missing_value(self):
+        """
+        Returns the value denoting "no data" for this variable.
+
+        If this variable does not have a missing/fill value, returns None.
+
+        If both _FillValue and missing_value are given, give precedence to
+        _FillValue. The netCDF standard gives special meaning to _FillValue;
+        missing_value is  just used for compatibility with old datasets.
+        """
+
+        if '_FillValue' in self._attributes:
+            missing_value = self._attributes['_FillValue']
+        elif 'missing_value' in self._attributes:
+            missing_value = self._attributes['missing_value']
+        else:
+            missing_value = None
+
+        return missing_value
+
+    @staticmethod
+    def _apply_missing_value(data, missing_value):
+        """
+        Applies the given missing value to the data array.
+
+        Returns a numpy.ma array, with any value equal to missing_value masked
+        out (unless missing_value is None, in which case the original array is
+        returned).
+        """
+
+        if missing_value is None:
+            newdata = data
+        else:
+            try:
+                missing_value_isnan = np.isnan(missing_value)
+            except (TypeError, NotImplementedError):
+                # some data types (e.g., characters) cannot be tested for NaN
+                missing_value_isnan = False
+
+            if missing_value_isnan:
+                mymask = np.isnan(data)
+            else:
+                mymask = (data == missing_value)
+
+            newdata = np.ma.masked_where(mymask, data)
+
+        return newdata
+
+
+NetCDFFile = netcdf_file
+NetCDFVariable = netcdf_variable
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/io/arff/__init__.py
new file mode 100644
index 00000000..dcfe1c42
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/__init__.py
@@ -0,0 +1,28 @@
+"""
+Module to read ARFF files
+=========================
+ARFF is the standard data format for WEKA.
+It is a text file format which support numerical, string and data values.
+The format can also represent missing data and sparse data.
+
+Notes
+-----
+The ARFF support in ``scipy.io`` provides file reading functionality only.
+For more extensive ARFF functionality, see `liac-arff
+`_.
+
+See the `WEKA website `_
+for more details about the ARFF format and available datasets.
+
+"""
+from ._arffread import *
+from . import _arffread
+
+# Deprecated namespaces, to be removed in v2.0.0
+from .import arffread
+
+__all__ = _arffread.__all__ + ['arffread']
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/_arffread.py b/__packaged__/coreml/.python_dependencies/scipy/io/arff/_arffread.py
new file mode 100644
index 00000000..df1e3559
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/_arffread.py
@@ -0,0 +1,905 @@
+# Last Change: Mon Aug 20 08:00 PM 2007 J
+import re
+import datetime
+
+import numpy as np
+
+import csv
+import ctypes
+
+"""A module to read arff files."""
+
+__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
+
+# An Arff file is basically two parts:
+#   - header
+#   - data
+#
+# A header has each of its components starting by @META where META is one of
+# the keyword (attribute of relation, for now).
+
+# TODO:
+#   - both integer and reals are treated as numeric -> the integer info
+#    is lost!
+#   - Replace ValueError by ParseError or something
+
+# We know can handle the following:
+#   - numeric and nominal attributes
+#   - missing values for numeric attributes
+
+r_meta = re.compile(r'^\s*@')
+# Match a comment
+r_comment = re.compile(r'^%')
+# Match an empty line
+r_empty = re.compile(r'^\s+$')
+# Match a header line, that is a line which starts by @ + a word
+r_headerline = re.compile(r'^\s*@\S*')
+r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
+r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
+r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
+
+r_nominal = re.compile(r'{(.+)}')
+r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
+
+# To get attributes name enclosed with ''
+r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
+# To get normal attributes
+r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
+
+# ------------------------
+# Module defined exception
+# ------------------------
+
+
+class ArffError(OSError):
+    pass
+
+
+class ParseArffError(ArffError):
+    pass
+
+
+# ----------
+# Attributes
+# ----------
+class Attribute:
+
+    type_name = None
+
+    def __init__(self, name):
+        self.name = name
+        self.range = None
+        self.dtype = np.object_
+
+    @classmethod
+    def parse_attribute(cls, name, attr_string):
+        """
+        Parse the attribute line if it knows how. Returns the parsed
+        attribute, or None.
+        """
+        return None
+
+    def parse_data(self, data_str):
+        """
+        Parse a value of this type.
+        """
+        return None
+
+    def __str__(self):
+        """
+        Parse a value of this type.
+        """
+        return self.name + ',' + self.type_name
+
+
+class NominalAttribute(Attribute):
+
+    type_name = 'nominal'
+
+    def __init__(self, name, values):
+        super().__init__(name)
+        self.values = values
+        self.range = values
+        self.dtype = (np.string_, max(len(i) for i in values))
+
+    @staticmethod
+    def _get_nom_val(atrv):
+        """Given a string containing a nominal type, returns a tuple of the
+        possible values.
+
+        A nominal type is defined as something framed between braces ({}).
+
+        Parameters
+        ----------
+        atrv : str
+           Nominal type definition
+
+        Returns
+        -------
+        poss_vals : tuple
+           possible values
+
+        Examples
+        --------
+        >>> get_nom_val("{floup, bouga, fl, ratata}")
+        ('floup', 'bouga', 'fl', 'ratata')
+        """
+        m = r_nominal.match(atrv)
+        if m:
+            attrs, _ = split_data_line(m.group(1))
+            return tuple(attrs)
+        else:
+            raise ValueError("This does not look like a nominal string")
+
+    @classmethod
+    def parse_attribute(cls, name, attr_string):
+        """
+        Parse the attribute line if it knows how. Returns the parsed
+        attribute, or None.
+
+        For nominal attributes, the attribute string would be like '{,
+         , }'.
+        """
+        if attr_string[0] == '{':
+            values = cls._get_nom_val(attr_string)
+            return cls(name, values)
+        else:
+            return None
+
+    def parse_data(self, data_str):
+        """
+        Parse a value of this type.
+        """
+        if data_str in self.values:
+            return data_str
+        elif data_str == '?':
+            return data_str
+        else:
+            raise ValueError("%s value not in %s" % (str(data_str),
+                                                     str(self.values)))
+
+    def __str__(self):
+        msg = self.name + ",{"
+        for i in range(len(self.values)-1):
+            msg += self.values[i] + ","
+        msg += self.values[-1]
+        msg += "}"
+        return msg
+
+
+class NumericAttribute(Attribute):
+
+    def __init__(self, name):
+        super().__init__(name)
+        self.type_name = 'numeric'
+        self.dtype = np.float_
+
+    @classmethod
+    def parse_attribute(cls, name, attr_string):
+        """
+        Parse the attribute line if it knows how. Returns the parsed
+        attribute, or None.
+
+        For numeric attributes, the attribute string would be like
+        'numeric' or 'int' or 'real'.
+        """
+
+        attr_string = attr_string.lower().strip()
+
+        if (attr_string[:len('numeric')] == 'numeric' or
+           attr_string[:len('int')] == 'int' or
+           attr_string[:len('real')] == 'real'):
+            return cls(name)
+        else:
+            return None
+
+    def parse_data(self, data_str):
+        """
+        Parse a value of this type.
+
+        Parameters
+        ----------
+        data_str : str
+           string to convert
+
+        Returns
+        -------
+        f : float
+           where float can be nan
+
+        Examples
+        --------
+        >>> atr = NumericAttribute('atr')
+        >>> atr.parse_data('1')
+        1.0
+        >>> atr.parse_data('1\\n')
+        1.0
+        >>> atr.parse_data('?\\n')
+        nan
+        """
+        if '?' in data_str:
+            return np.nan
+        else:
+            return float(data_str)
+
+    def _basic_stats(self, data):
+        nbfac = data.size * 1. / (data.size - 1)
+        return (np.nanmin(data), np.nanmax(data),
+                np.mean(data), np.std(data) * nbfac)
+
+
+class StringAttribute(Attribute):
+
+    def __init__(self, name):
+        super().__init__(name)
+        self.type_name = 'string'
+
+    @classmethod
+    def parse_attribute(cls, name, attr_string):
+        """
+        Parse the attribute line if it knows how. Returns the parsed
+        attribute, or None.
+
+        For string attributes, the attribute string would be like
+        'string'.
+        """
+
+        attr_string = attr_string.lower().strip()
+
+        if attr_string[:len('string')] == 'string':
+            return cls(name)
+        else:
+            return None
+
+
+class DateAttribute(Attribute):
+
+    def __init__(self, name, date_format, datetime_unit):
+        super().__init__(name)
+        self.date_format = date_format
+        self.datetime_unit = datetime_unit
+        self.type_name = 'date'
+        self.range = date_format
+        self.dtype = np.datetime64(0, self.datetime_unit)
+
+    @staticmethod
+    def _get_date_format(atrv):
+        m = r_date.match(atrv)
+        if m:
+            pattern = m.group(1).strip()
+            # convert time pattern from Java's SimpleDateFormat to C's format
+            datetime_unit = None
+            if "yyyy" in pattern:
+                pattern = pattern.replace("yyyy", "%Y")
+                datetime_unit = "Y"
+            elif "yy":
+                pattern = pattern.replace("yy", "%y")
+                datetime_unit = "Y"
+            if "MM" in pattern:
+                pattern = pattern.replace("MM", "%m")
+                datetime_unit = "M"
+            if "dd" in pattern:
+                pattern = pattern.replace("dd", "%d")
+                datetime_unit = "D"
+            if "HH" in pattern:
+                pattern = pattern.replace("HH", "%H")
+                datetime_unit = "h"
+            if "mm" in pattern:
+                pattern = pattern.replace("mm", "%M")
+                datetime_unit = "m"
+            if "ss" in pattern:
+                pattern = pattern.replace("ss", "%S")
+                datetime_unit = "s"
+            if "z" in pattern or "Z" in pattern:
+                raise ValueError("Date type attributes with time zone not "
+                                 "supported, yet")
+
+            if datetime_unit is None:
+                raise ValueError("Invalid or unsupported date format")
+
+            return pattern, datetime_unit
+        else:
+            raise ValueError("Invalid or no date format")
+
+    @classmethod
+    def parse_attribute(cls, name, attr_string):
+        """
+        Parse the attribute line if it knows how. Returns the parsed
+        attribute, or None.
+
+        For date attributes, the attribute string would be like
+        'date '.
+        """
+
+        attr_string_lower = attr_string.lower().strip()
+
+        if attr_string_lower[:len('date')] == 'date':
+            date_format, datetime_unit = cls._get_date_format(attr_string)
+            return cls(name, date_format, datetime_unit)
+        else:
+            return None
+
+    def parse_data(self, data_str):
+        """
+        Parse a value of this type.
+        """
+        date_str = data_str.strip().strip("'").strip('"')
+        if date_str == '?':
+            return np.datetime64('NaT', self.datetime_unit)
+        else:
+            dt = datetime.datetime.strptime(date_str, self.date_format)
+            return np.datetime64(dt).astype(
+                "datetime64[%s]" % self.datetime_unit)
+
+    def __str__(self):
+        return super().__str__() + ',' + self.date_format
+
+
+class RelationalAttribute(Attribute):
+
+    def __init__(self, name):
+        super().__init__(name)
+        self.type_name = 'relational'
+        self.dtype = np.object_
+        self.attributes = []
+        self.dialect = None
+
+    @classmethod
+    def parse_attribute(cls, name, attr_string):
+        """
+        Parse the attribute line if it knows how. Returns the parsed
+        attribute, or None.
+
+        For date attributes, the attribute string would be like
+        'date '.
+        """
+
+        attr_string_lower = attr_string.lower().strip()
+
+        if attr_string_lower[:len('relational')] == 'relational':
+            return cls(name)
+        else:
+            return None
+
+    def parse_data(self, data_str):
+        # Copy-pasted
+        elems = list(range(len(self.attributes)))
+
+        escaped_string = data_str.encode().decode("unicode-escape")
+
+        row_tuples = []
+
+        for raw in escaped_string.split("\n"):
+            row, self.dialect = split_data_line(raw, self.dialect)
+
+            row_tuples.append(tuple(
+                [self.attributes[i].parse_data(row[i]) for i in elems]))
+
+        return np.array(row_tuples,
+                        [(a.name, a.dtype) for a in self.attributes])
+
+    def __str__(self):
+        return (super().__str__() + '\n\t' +
+                '\n\t'.join(str(a) for a in self.attributes))
+
+
+# -----------------
+# Various utilities
+# -----------------
+def to_attribute(name, attr_string):
+    attr_classes = (NominalAttribute, NumericAttribute, DateAttribute,
+                    StringAttribute, RelationalAttribute)
+
+    for cls in attr_classes:
+        attr = cls.parse_attribute(name, attr_string)
+        if attr is not None:
+            return attr
+
+    raise ParseArffError("unknown attribute %s" % attr_string)
+
+
+def csv_sniffer_has_bug_last_field():
+    """
+    Checks if the bug https://bugs.python.org/issue30157 is unpatched.
+    """
+
+    # We only compute this once.
+    has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None)
+
+    if has_bug is None:
+        dialect = csv.Sniffer().sniff("3, 'a'")
+        csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'"
+        has_bug = csv_sniffer_has_bug_last_field.has_bug
+
+    return has_bug
+
+
+def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters):
+    """
+    Workaround for the bug https://bugs.python.org/issue30157 if is unpatched.
+    """
+    if csv_sniffer_has_bug_last_field():
+        # Reuses code from the csv module
+        right_regex = r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?:$|\n)'
+
+        for restr in (r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?P=delim)',  # ,".*?",
+                      r'(?:^|\n)(?P["\']).*?(?P=quote)(?P[^\w\n"\'])(?P ?)',  # .*?",
+                      right_regex,  # ,".*?"
+                      r'(?:^|\n)(?P["\']).*?(?P=quote)(?:$|\n)'):  # ".*?" (no delim, no space)
+            regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
+            matches = regexp.findall(sniff_line)
+            if matches:
+                break
+
+        # If it does not match the expression that was bugged, then this bug does not apply
+        if restr != right_regex:
+            return
+
+        groupindex = regexp.groupindex
+
+        # There is only one end of the string
+        assert len(matches) == 1
+        m = matches[0]
+
+        n = groupindex['quote'] - 1
+        quote = m[n]
+
+        n = groupindex['delim'] - 1
+        delim = m[n]
+
+        n = groupindex['space'] - 1
+        space = bool(m[n])
+
+        dq_regexp = re.compile(
+            r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" %
+            {'delim': re.escape(delim), 'quote': quote}, re.MULTILINE
+        )
+
+        doublequote = bool(dq_regexp.search(sniff_line))
+
+        dialect.quotechar = quote
+        if delim in delimiters:
+            dialect.delimiter = delim
+        dialect.doublequote = doublequote
+        dialect.skipinitialspace = space
+
+
+def split_data_line(line, dialect=None):
+    delimiters = ",\t"
+
+    # This can not be done in a per reader basis, and relational fields
+    # can be HUGE
+    csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
+
+    # Remove the line end if any
+    if line[-1] == '\n':
+        line = line[:-1]
+    
+    # Remove potential trailing whitespace
+    line = line.strip()
+    
+    sniff_line = line
+
+    # Add a delimiter if none is present, so that the csv.Sniffer
+    # does not complain for a single-field CSV.
+    if not any(d in line for d in delimiters):
+        sniff_line += ","
+
+    if dialect is None:
+        dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
+        workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line,
+                                              dialect=dialect,
+                                              delimiters=delimiters)
+
+    row = next(csv.reader([line], dialect))
+
+    return row, dialect
+
+
+# --------------
+# Parsing header
+# --------------
+def tokenize_attribute(iterable, attribute):
+    """Parse a raw string in header (e.g., starts by @attribute).
+
+    Given a raw string attribute, try to get the name and type of the
+    attribute. Constraints:
+
+    * The first line must start with @attribute (case insensitive, and
+      space like characters before @attribute are allowed)
+    * Works also if the attribute is spread on multilines.
+    * Works if empty lines or comments are in between
+
+    Parameters
+    ----------
+    attribute : str
+       the attribute string.
+
+    Returns
+    -------
+    name : str
+       name of the attribute
+    value : str
+       value of the attribute
+    next : str
+       next line to be parsed
+
+    Examples
+    --------
+    If attribute is a string defined in python as r"floupi real", will
+    return floupi as name, and real as value.
+
+    >>> iterable = iter([0] * 10) # dummy iterator
+    >>> tokenize_attribute(iterable, r"@attribute floupi real")
+    ('floupi', 'real', 0)
+
+    If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
+    and real as value.
+
+    >>> tokenize_attribute(iterable, r"  @attribute 'floupi 2' real   ")
+    ('floupi 2', 'real', 0)
+
+    """
+    sattr = attribute.strip()
+    mattr = r_attribute.match(sattr)
+    if mattr:
+        # atrv is everything after @attribute
+        atrv = mattr.group(1)
+        if r_comattrval.match(atrv):
+            name, type = tokenize_single_comma(atrv)
+            next_item = next(iterable)
+        elif r_wcomattrval.match(atrv):
+            name, type = tokenize_single_wcomma(atrv)
+            next_item = next(iterable)
+        else:
+            # Not sure we should support this, as it does not seem supported by
+            # weka.
+            raise ValueError("multi line not supported yet")
+    else:
+        raise ValueError("First line unparsable: %s" % sattr)
+
+    attribute = to_attribute(name, type)
+
+    if type.lower() == 'relational':
+        next_item = read_relational_attribute(iterable, attribute, next_item)
+    #    raise ValueError("relational attributes not supported yet")
+
+    return attribute, next_item
+
+
+def tokenize_single_comma(val):
+    # XXX we match twice the same string (here and at the caller level). It is
+    # stupid, but it is easier for now...
+    m = r_comattrval.match(val)
+    if m:
+        try:
+            name = m.group(1).strip()
+            type = m.group(2).strip()
+        except IndexError as e:
+            raise ValueError("Error while tokenizing attribute") from e
+    else:
+        raise ValueError("Error while tokenizing single %s" % val)
+    return name, type
+
+
+def tokenize_single_wcomma(val):
+    # XXX we match twice the same string (here and at the caller level). It is
+    # stupid, but it is easier for now...
+    m = r_wcomattrval.match(val)
+    if m:
+        try:
+            name = m.group(1).strip()
+            type = m.group(2).strip()
+        except IndexError as e:
+            raise ValueError("Error while tokenizing attribute") from e
+    else:
+        raise ValueError("Error while tokenizing single %s" % val)
+    return name, type
+
+
+def read_relational_attribute(ofile, relational_attribute, i):
+    """Read the nested attributes of a relational attribute"""
+
+    r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' +
+                                  relational_attribute.name + r'\s*$')
+
+    while not r_end_relational.match(i):
+        m = r_headerline.match(i)
+        if m:
+            isattr = r_attribute.match(i)
+            if isattr:
+                attr, i = tokenize_attribute(ofile, i)
+                relational_attribute.attributes.append(attr)
+            else:
+                raise ValueError("Error parsing line %s" % i)
+        else:
+            i = next(ofile)
+
+    i = next(ofile)
+    return i
+
+
+def read_header(ofile):
+    """Read the header of the iterable ofile."""
+    i = next(ofile)
+
+    # Pass first comments
+    while r_comment.match(i):
+        i = next(ofile)
+
+    # Header is everything up to DATA attribute ?
+    relation = None
+    attributes = []
+    while not r_datameta.match(i):
+        m = r_headerline.match(i)
+        if m:
+            isattr = r_attribute.match(i)
+            if isattr:
+                attr, i = tokenize_attribute(ofile, i)
+                attributes.append(attr)
+            else:
+                isrel = r_relation.match(i)
+                if isrel:
+                    relation = isrel.group(1)
+                else:
+                    raise ValueError("Error parsing line %s" % i)
+                i = next(ofile)
+        else:
+            i = next(ofile)
+
+    return relation, attributes
+
+
+class MetaData:
+    """Small container to keep useful information on a ARFF dataset.
+
+    Knows about attributes names and types.
+
+    Examples
+    --------
+    ::
+
+        data, meta = loadarff('iris.arff')
+        # This will print the attributes names of the iris.arff dataset
+        for i in meta:
+            print(i)
+        # This works too
+        meta.names()
+        # Getting attribute type
+        types = meta.types()
+
+    Methods
+    -------
+    names
+    types
+
+    Notes
+    -----
+    Also maintains the list of attributes in order, i.e., doing for i in
+    meta, where meta is an instance of MetaData, will return the
+    different attribute names in the order they were defined.
+    """
+    def __init__(self, rel, attr):
+        self.name = rel
+        self._attributes = {a.name: a for a in attr}
+
+    def __repr__(self):
+        msg = ""
+        msg += "Dataset: %s\n" % self.name
+        for i in self._attributes:
+            msg += "\t%s's type is %s" % (i, self._attributes[i].type_name)
+            if self._attributes[i].range:
+                msg += ", range is %s" % str(self._attributes[i].range)
+            msg += '\n'
+        return msg
+
+    def __iter__(self):
+        return iter(self._attributes)
+
+    def __getitem__(self, key):
+        attr = self._attributes[key]
+
+        return (attr.type_name, attr.range)
+
+    def names(self):
+        """Return the list of attribute names.
+
+        Returns
+        -------
+        attrnames : list of str
+            The attribute names.
+        """
+        return list(self._attributes)
+
+    def types(self):
+        """Return the list of attribute types.
+
+        Returns
+        -------
+        attr_types : list of str
+            The attribute types.
+        """
+        attr_types = [self._attributes[name].type_name
+                      for name in self._attributes]
+        return attr_types
+
+
+def loadarff(f):
+    """
+    Read an arff file.
+
+    The data is returned as a record array, which can be accessed much like
+    a dictionary of NumPy arrays. For example, if one of the attributes is
+    called 'pressure', then its first 10 data points can be accessed from the
+    ``data`` record array like so: ``data['pressure'][0:10]``
+
+
+    Parameters
+    ----------
+    f : file-like or str
+       File-like object to read from, or filename to open.
+
+    Returns
+    -------
+    data : record array
+       The data of the arff file, accessible by attribute names.
+    meta : `MetaData`
+       Contains information about the arff file such as name and
+       type of attributes, the relation (name of the dataset), etc.
+
+    Raises
+    ------
+    ParseArffError
+        This is raised if the given file is not ARFF-formatted.
+    NotImplementedError
+        The ARFF file has an attribute which is not supported yet.
+
+    Notes
+    -----
+
+    This function should be able to read most arff files. Not
+    implemented functionality include:
+
+    * date type attributes
+    * string type attributes
+
+    It can read files with numeric and nominal attributes. It cannot read
+    files with sparse data ({} in the file). However, this function can
+    read files with missing data (? in the file), representing the data
+    points as NaNs.
+
+    Examples
+    --------
+    >>> from scipy.io import arff
+    >>> from io import StringIO
+    >>> content = \"\"\"
+    ... @relation foo
+    ... @attribute width  numeric
+    ... @attribute height numeric
+    ... @attribute color  {red,green,blue,yellow,black}
+    ... @data
+    ... 5.0,3.25,blue
+    ... 4.5,3.75,green
+    ... 3.0,4.00,red
+    ... \"\"\"
+    >>> f = StringIO(content)
+    >>> data, meta = arff.loadarff(f)
+    >>> data
+    array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
+          dtype=[('width', '>> meta
+    Dataset: foo
+    \twidth's type is numeric
+    \theight's type is numeric
+    \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
+
+    """
+    if hasattr(f, 'read'):
+        ofile = f
+    else:
+        ofile = open(f, 'rt')
+    try:
+        return _loadarff(ofile)
+    finally:
+        if ofile is not f:  # only close what we opened
+            ofile.close()
+
+
+def _loadarff(ofile):
+    # Parse the header file
+    try:
+        rel, attr = read_header(ofile)
+    except ValueError as e:
+        msg = "Error while parsing header, error was: " + str(e)
+        raise ParseArffError(msg) from e
+
+    # Check whether we have a string attribute (not supported yet)
+    hasstr = False
+    for a in attr:
+        if isinstance(a, StringAttribute):
+            hasstr = True
+
+    meta = MetaData(rel, attr)
+
+    # XXX The following code is not great
+    # Build the type descriptor descr and the list of convertors to convert
+    # each attribute to the suitable type (which should match the one in
+    # descr).
+
+    # This can be used once we want to support integer as integer values and
+    # not as numeric anymore (using masked arrays ?).
+
+    if hasstr:
+        # How to support string efficiently ? Ideally, we should know the max
+        # size of the string before allocating the numpy array.
+        raise NotImplementedError("String attributes not supported yet, sorry")
+
+    ni = len(attr)
+
+    def generator(row_iter, delim=','):
+        # TODO: this is where we are spending time (~80%). I think things
+        # could be made more efficiently:
+        #   - We could for example "compile" the function, because some values
+        #   do not change here.
+        #   - The function to convert a line to dtyped values could also be
+        #   generated on the fly from a string and be executed instead of
+        #   looping.
+        #   - The regex are overkill: for comments, checking that a line starts
+        #   by % should be enough and faster, and for empty lines, same thing
+        #   --> this does not seem to change anything.
+
+        # 'compiling' the range since it does not change
+        # Note, I have already tried zipping the converters and
+        # row elements and got slightly worse performance.
+        elems = list(range(ni))
+
+        dialect = None
+        for raw in row_iter:
+            # We do not abstract skipping comments and empty lines for
+            # performance reasons.
+            if r_comment.match(raw) or r_empty.match(raw):
+                continue
+
+            row, dialect = split_data_line(raw, dialect)
+
+            yield tuple([attr[i].parse_data(row[i]) for i in elems])
+
+    a = list(generator(ofile))
+    # No error should happen here: it is a bug otherwise
+    data = np.array(a, [(a.name, a.dtype) for a in attr])
+    return data, meta
+
+
+# ----
+# Misc
+# ----
+def basic_stats(data):
+    nbfac = data.size * 1. / (data.size - 1)
+    return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
+
+
+def print_attribute(name, tp, data):
+    type = tp.type_name
+    if type == 'numeric' or type == 'real' or type == 'integer':
+        min, max, mean, std = basic_stats(data)
+        print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
+    else:
+        print(str(tp))
+
+
+def test_weka(filename):
+    data, meta = loadarff(filename)
+    print(len(data.dtype))
+    print(data.size)
+    for i in meta:
+        print_attribute(i, meta[i], data[i])
+
+
+# make sure nose does not find this as a test
+test_weka.__test__ = False
+
+
+if __name__ == '__main__':
+    import sys
+    filename = sys.argv[1]
+    test_weka(filename)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/arffread.py b/__packaged__/coreml/.python_dependencies/scipy/io/arff/arffread.py
new file mode 100644
index 00000000..832ad868
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/arffread.py
@@ -0,0 +1,36 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.arff` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _arffread
+
+__all__ = [  # noqa: F822
+    'MetaData', 'loadarff', 'ArffError', 'ParseArffError',
+    'r_meta', 'r_comment', 'r_empty', 'r_headerline',
+    'r_datameta', 'r_relation', 'r_attribute', 'r_nominal',
+    'r_date', 'r_comattrval', 'r_wcomattrval', 'Attribute',
+    'NominalAttribute', 'NumericAttribute', 'StringAttribute',
+    'DateAttribute', 'RelationalAttribute', 'to_attribute',
+    'csv_sniffer_has_bug_last_field', 'workaround_csv_sniffer_bug_last_field',
+    'split_data_line', 'tokenize_attribute', 'tokenize_single_comma',
+    'tokenize_single_wcomma', 'read_relational_attribute', 'read_header',
+    'basic_stats', 'print_attribute', 'test_weka'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.arff.arffread is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.arff instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.arff` namespace, "
+                  "the `scipy.io.arff.arffread` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_arffread, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/iris.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/iris.arff
new file mode 100644
index 00000000..780480c7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/iris.arff
@@ -0,0 +1,225 @@
+% 1. Title: Iris Plants Database
+% 
+% 2. Sources:
+%      (a) Creator: R.A. Fisher
+%      (b) Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov)
+%      (c) Date: July, 1988
+% 
+% 3. Past Usage:
+%    - Publications: too many to mention!!!  Here are a few.
+%    1. Fisher,R.A. "The use of multiple measurements in taxonomic problems"
+%       Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions
+%       to Mathematical Statistics" (John Wiley, NY, 1950).
+%    2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis.
+%       (Q327.D83) John Wiley & Sons.  ISBN 0-471-22361-1.  See page 218.
+%    3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System
+%       Structure and Classification Rule for Recognition in Partially Exposed
+%       Environments".  IEEE Transactions on Pattern Analysis and Machine
+%       Intelligence, Vol. PAMI-2, No. 1, 67-71.
+%       -- Results:
+%          -- very low misclassification rates (0% for the setosa class)
+%    4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule".  IEEE 
+%       Transactions on Information Theory, May 1972, 431-433.
+%       -- Results:
+%          -- very low misclassification rates again
+%    5. See also: 1988 MLC Proceedings, 54-64.  Cheeseman et al's AUTOCLASS II
+%       conceptual clustering system finds 3 classes in the data.
+% 
+% 4. Relevant Information:
+%    --- This is perhaps the best known database to be found in the pattern
+%        recognition literature.  Fisher's paper is a classic in the field
+%        and is referenced frequently to this day.  (See Duda & Hart, for
+%        example.)  The data set contains 3 classes of 50 instances each,
+%        where each class refers to a type of iris plant.  One class is
+%        linearly separable from the other 2; the latter are NOT linearly
+%        separable from each other.
+%    --- Predicted attribute: class of iris plant.
+%    --- This is an exceedingly simple domain.
+% 
+% 5. Number of Instances: 150 (50 in each of three classes)
+% 
+% 6. Number of Attributes: 4 numeric, predictive attributes and the class
+% 
+% 7. Attribute Information:
+%    1. sepal length in cm
+%    2. sepal width in cm
+%    3. petal length in cm
+%    4. petal width in cm
+%    5. class: 
+%       -- Iris Setosa
+%       -- Iris Versicolour
+%       -- Iris Virginica
+% 
+% 8. Missing Attribute Values: None
+% 
+% Summary Statistics:
+%  	           Min  Max   Mean    SD   Class Correlation
+%    sepal length: 4.3  7.9   5.84  0.83    0.7826   
+%     sepal width: 2.0  4.4   3.05  0.43   -0.4194
+%    petal length: 1.0  6.9   3.76  1.76    0.9490  (high!)
+%     petal width: 0.1  2.5   1.20  0.76    0.9565  (high!)
+% 
+% 9. Class Distribution: 33.3% for each of 3 classes.
+
+@RELATION iris
+
+@ATTRIBUTE sepallength	REAL
+@ATTRIBUTE sepalwidth 	REAL
+@ATTRIBUTE petallength 	REAL
+@ATTRIBUTE petalwidth	REAL
+@ATTRIBUTE class 	{Iris-setosa,Iris-versicolor,Iris-virginica}
+
+@DATA
+5.1,3.5,1.4,0.2,Iris-setosa
+4.9,3.0,1.4,0.2,Iris-setosa
+4.7,3.2,1.3,0.2,Iris-setosa
+4.6,3.1,1.5,0.2,Iris-setosa
+5.0,3.6,1.4,0.2,Iris-setosa
+5.4,3.9,1.7,0.4,Iris-setosa
+4.6,3.4,1.4,0.3,Iris-setosa
+5.0,3.4,1.5,0.2,Iris-setosa
+4.4,2.9,1.4,0.2,Iris-setosa
+4.9,3.1,1.5,0.1,Iris-setosa
+5.4,3.7,1.5,0.2,Iris-setosa
+4.8,3.4,1.6,0.2,Iris-setosa
+4.8,3.0,1.4,0.1,Iris-setosa
+4.3,3.0,1.1,0.1,Iris-setosa
+5.8,4.0,1.2,0.2,Iris-setosa
+5.7,4.4,1.5,0.4,Iris-setosa
+5.4,3.9,1.3,0.4,Iris-setosa
+5.1,3.5,1.4,0.3,Iris-setosa
+5.7,3.8,1.7,0.3,Iris-setosa
+5.1,3.8,1.5,0.3,Iris-setosa
+5.4,3.4,1.7,0.2,Iris-setosa
+5.1,3.7,1.5,0.4,Iris-setosa
+4.6,3.6,1.0,0.2,Iris-setosa
+5.1,3.3,1.7,0.5,Iris-setosa
+4.8,3.4,1.9,0.2,Iris-setosa
+5.0,3.0,1.6,0.2,Iris-setosa
+5.0,3.4,1.6,0.4,Iris-setosa
+5.2,3.5,1.5,0.2,Iris-setosa
+5.2,3.4,1.4,0.2,Iris-setosa
+4.7,3.2,1.6,0.2,Iris-setosa
+4.8,3.1,1.6,0.2,Iris-setosa
+5.4,3.4,1.5,0.4,Iris-setosa
+5.2,4.1,1.5,0.1,Iris-setosa
+5.5,4.2,1.4,0.2,Iris-setosa
+4.9,3.1,1.5,0.1,Iris-setosa
+5.0,3.2,1.2,0.2,Iris-setosa
+5.5,3.5,1.3,0.2,Iris-setosa
+4.9,3.1,1.5,0.1,Iris-setosa
+4.4,3.0,1.3,0.2,Iris-setosa
+5.1,3.4,1.5,0.2,Iris-setosa
+5.0,3.5,1.3,0.3,Iris-setosa
+4.5,2.3,1.3,0.3,Iris-setosa
+4.4,3.2,1.3,0.2,Iris-setosa
+5.0,3.5,1.6,0.6,Iris-setosa
+5.1,3.8,1.9,0.4,Iris-setosa
+4.8,3.0,1.4,0.3,Iris-setosa
+5.1,3.8,1.6,0.2,Iris-setosa
+4.6,3.2,1.4,0.2,Iris-setosa
+5.3,3.7,1.5,0.2,Iris-setosa
+5.0,3.3,1.4,0.2,Iris-setosa
+7.0,3.2,4.7,1.4,Iris-versicolor
+6.4,3.2,4.5,1.5,Iris-versicolor
+6.9,3.1,4.9,1.5,Iris-versicolor
+5.5,2.3,4.0,1.3,Iris-versicolor
+6.5,2.8,4.6,1.5,Iris-versicolor
+5.7,2.8,4.5,1.3,Iris-versicolor
+6.3,3.3,4.7,1.6,Iris-versicolor
+4.9,2.4,3.3,1.0,Iris-versicolor
+6.6,2.9,4.6,1.3,Iris-versicolor
+5.2,2.7,3.9,1.4,Iris-versicolor
+5.0,2.0,3.5,1.0,Iris-versicolor
+5.9,3.0,4.2,1.5,Iris-versicolor
+6.0,2.2,4.0,1.0,Iris-versicolor
+6.1,2.9,4.7,1.4,Iris-versicolor
+5.6,2.9,3.6,1.3,Iris-versicolor
+6.7,3.1,4.4,1.4,Iris-versicolor
+5.6,3.0,4.5,1.5,Iris-versicolor
+5.8,2.7,4.1,1.0,Iris-versicolor
+6.2,2.2,4.5,1.5,Iris-versicolor
+5.6,2.5,3.9,1.1,Iris-versicolor
+5.9,3.2,4.8,1.8,Iris-versicolor
+6.1,2.8,4.0,1.3,Iris-versicolor
+6.3,2.5,4.9,1.5,Iris-versicolor
+6.1,2.8,4.7,1.2,Iris-versicolor
+6.4,2.9,4.3,1.3,Iris-versicolor
+6.6,3.0,4.4,1.4,Iris-versicolor
+6.8,2.8,4.8,1.4,Iris-versicolor
+6.7,3.0,5.0,1.7,Iris-versicolor
+6.0,2.9,4.5,1.5,Iris-versicolor
+5.7,2.6,3.5,1.0,Iris-versicolor
+5.5,2.4,3.8,1.1,Iris-versicolor
+5.5,2.4,3.7,1.0,Iris-versicolor
+5.8,2.7,3.9,1.2,Iris-versicolor
+6.0,2.7,5.1,1.6,Iris-versicolor
+5.4,3.0,4.5,1.5,Iris-versicolor
+6.0,3.4,4.5,1.6,Iris-versicolor
+6.7,3.1,4.7,1.5,Iris-versicolor
+6.3,2.3,4.4,1.3,Iris-versicolor
+5.6,3.0,4.1,1.3,Iris-versicolor
+5.5,2.5,4.0,1.3,Iris-versicolor
+5.5,2.6,4.4,1.2,Iris-versicolor
+6.1,3.0,4.6,1.4,Iris-versicolor
+5.8,2.6,4.0,1.2,Iris-versicolor
+5.0,2.3,3.3,1.0,Iris-versicolor
+5.6,2.7,4.2,1.3,Iris-versicolor
+5.7,3.0,4.2,1.2,Iris-versicolor
+5.7,2.9,4.2,1.3,Iris-versicolor
+6.2,2.9,4.3,1.3,Iris-versicolor
+5.1,2.5,3.0,1.1,Iris-versicolor
+5.7,2.8,4.1,1.3,Iris-versicolor
+6.3,3.3,6.0,2.5,Iris-virginica
+5.8,2.7,5.1,1.9,Iris-virginica
+7.1,3.0,5.9,2.1,Iris-virginica
+6.3,2.9,5.6,1.8,Iris-virginica
+6.5,3.0,5.8,2.2,Iris-virginica
+7.6,3.0,6.6,2.1,Iris-virginica
+4.9,2.5,4.5,1.7,Iris-virginica
+7.3,2.9,6.3,1.8,Iris-virginica
+6.7,2.5,5.8,1.8,Iris-virginica
+7.2,3.6,6.1,2.5,Iris-virginica
+6.5,3.2,5.1,2.0,Iris-virginica
+6.4,2.7,5.3,1.9,Iris-virginica
+6.8,3.0,5.5,2.1,Iris-virginica
+5.7,2.5,5.0,2.0,Iris-virginica
+5.8,2.8,5.1,2.4,Iris-virginica
+6.4,3.2,5.3,2.3,Iris-virginica
+6.5,3.0,5.5,1.8,Iris-virginica
+7.7,3.8,6.7,2.2,Iris-virginica
+7.7,2.6,6.9,2.3,Iris-virginica
+6.0,2.2,5.0,1.5,Iris-virginica
+6.9,3.2,5.7,2.3,Iris-virginica
+5.6,2.8,4.9,2.0,Iris-virginica
+7.7,2.8,6.7,2.0,Iris-virginica
+6.3,2.7,4.9,1.8,Iris-virginica
+6.7,3.3,5.7,2.1,Iris-virginica
+7.2,3.2,6.0,1.8,Iris-virginica
+6.2,2.8,4.8,1.8,Iris-virginica
+6.1,3.0,4.9,1.8,Iris-virginica
+6.4,2.8,5.6,2.1,Iris-virginica
+7.2,3.0,5.8,1.6,Iris-virginica
+7.4,2.8,6.1,1.9,Iris-virginica
+7.9,3.8,6.4,2.0,Iris-virginica
+6.4,2.8,5.6,2.2,Iris-virginica
+6.3,2.8,5.1,1.5,Iris-virginica
+6.1,2.6,5.6,1.4,Iris-virginica
+7.7,3.0,6.1,2.3,Iris-virginica
+6.3,3.4,5.6,2.4,Iris-virginica
+6.4,3.1,5.5,1.8,Iris-virginica
+6.0,3.0,4.8,1.8,Iris-virginica
+6.9,3.1,5.4,2.1,Iris-virginica
+6.7,3.1,5.6,2.4,Iris-virginica
+6.9,3.1,5.1,2.3,Iris-virginica
+5.8,2.7,5.1,1.9,Iris-virginica
+6.8,3.2,5.9,2.3,Iris-virginica
+6.7,3.3,5.7,2.5,Iris-virginica
+6.7,3.0,5.2,2.3,Iris-virginica
+6.3,2.5,5.0,1.9,Iris-virginica
+6.5,3.0,5.2,2.0,Iris-virginica
+6.2,3.4,5.4,2.3,Iris-virginica
+5.9,3.0,5.1,1.8,Iris-virginica
+%
+%
+%
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/missing.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/missing.arff
new file mode 100644
index 00000000..dedc64c8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/missing.arff
@@ -0,0 +1,8 @@
+% This arff file contains some missing data
+@relation missing
+@attribute yop real
+@attribute yap real
+@data
+1,5
+2,4
+?,?
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/nodata.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/nodata.arff
new file mode 100644
index 00000000..5766aeb2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/nodata.arff
@@ -0,0 +1,11 @@
+@RELATION iris
+
+@ATTRIBUTE sepallength  REAL
+@ATTRIBUTE sepalwidth   REAL
+@ATTRIBUTE petallength  REAL
+@ATTRIBUTE petalwidth   REAL
+@ATTRIBUTE class    {Iris-setosa,Iris-versicolor,Iris-virginica}
+
+@DATA
+
+% This file has no data
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal.arff
new file mode 100644
index 00000000..7cd16d1e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal.arff
@@ -0,0 +1,13 @@
+% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes
+% Spaces between elements are stripped by the parser
+
+@relation SOME_DATA
+@attribute age numeric
+@attribute smoker {'yes', 'no'}
+@data
+18,  'no'
+24, 'yes'
+44,     'no'
+56, 'no'
+89,'yes'
+11,  'no'
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal_spaces.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal_spaces.arff
new file mode 100644
index 00000000..c7991278
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/quoted_nominal_spaces.arff
@@ -0,0 +1,13 @@
+% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes
+% Spaces inside quotes are NOT stripped by the parser
+
+@relation SOME_DATA
+@attribute age numeric
+@attribute smoker {'  yes', 'no  '}
+@data
+18,'no  '
+24,'  yes'
+44,'no  '
+56,'no  '
+89,'  yes'
+11,'no  '
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test1.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test1.arff
new file mode 100644
index 00000000..ccc8e0cc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test1.arff
@@ -0,0 +1,10 @@
+@RELATION test1
+
+@ATTRIBUTE attr0	REAL
+@ATTRIBUTE attr1 	REAL
+@ATTRIBUTE attr2 	REAL
+@ATTRIBUTE attr3	REAL
+@ATTRIBUTE class 	{class0, class1, class2, class3}
+
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test10.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test10.arff
new file mode 100644
index 00000000..094ac509
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test10.arff
@@ -0,0 +1,8 @@
+@relation test9
+
+@attribute attr_relational	    relational
+	@attribute attr_number	integer
+@end attr_relational
+
+@data
+'0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n101\n102\n103\n104\n105\n106\n107\n108\n109\n110\n111\n112\n113\n114\n115\n116\n117\n118\n119\n120\n121\n122\n123\n124\n125\n126\n127\n128\n129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n151\n152\n153\n154\n155\n156\n157\n158\n159\n160\n161\n162\n163\n164\n165\n166\n167\n168\n169\n170\n171\n172\n173\n174\n175\n176\n177\n178\n179\n180\n181\n182\n183\n184\n185\n186\n187\n188\n189\n190\n191\n192\n193\n194\n195\n196\n197\n198\n199\n200\n201\n202\n203\n204\n205\n206\n207\n208\n209\n210\n211\n212\n213\n214\n215\n216\n217\n218\n219\n220\n221\n222\n223\n224\n225\n226\n227\n228\n229\n230\n231\n232\n233\n234\n235\n236\n237\n238\n239\n240\n241\n242\n243\n244\n245\n246\n247\n248\n249\n250\n251\n252\n253\n254\n255\n256\n257\n258\n259\n260\n261\n262\n263\n264\n265\n266\n267\n268\n269\n270\n271\n272\n273\n274\n275\n276\n277\n278\n279\n280\n281\n282\n283\n284\n285\n286\n287\n288\n289\n290\n291\n292\n293\n294\n295\n296\n297\n298\n299\n300\n301\n302\n303\n304\n305\n306\n307\n308\n309\n310\n311\n312\n313\n314\n315\n316\n317\n318\n319\n320\n321\n322\n323\n324\n325\n326\n327\n328\n329\n330\n331\n332\n333\n334\n335\n336\n337\n338\n339\n340\n341\n342\n343\n344\n345\n346\n347\n348\n349\n350\n351\n352\n353\n354\n355\n356\n357\n358\n359\n360\n361\n362\n363\n364\n365\n366\n367\n368\n369\n370\n371\n372\n373\n374\n375\n376\n377\n378\n379\n380\n381\n382\n383\n384\n385\n386\n387\n388\n389\n390\n391\n392\n393\n394\n395\n396\n397\n398\n399\n400\n401\n402\n403\n404\n405\n406\n407\n408\n409\n410\n411\n412\n413\n414\n415\n416\n417\n418\n419\n420\n421\n422\n423\n424\n425\n426\n427\n428\n429\n430\n431\n432\n433\n434\n435\n436\n437\n438\n439\n440\n441\n442\n443\n444\n445\n446\n447\n448\n449\n450\n451\n452\n453\n454\n455\n456\n457\n458\n459\n460\n461\n462\n463\n464\n465\n466\n467\n468\n469\n470\n471\n472\n473\n474\n475\n476\n477\n478\n479\n480\n481\n482\n483\n484\n485\n486\n487\n488\n489\n490\n491\n492\n493\n494\n495\n496\n497\n498\n499\n500\n501\n502\n503\n504\n505\n506\n507\n508\n509\n510\n511\n512\n513\n514\n515\n516\n517\n518\n519\n520\n521\n522\n523\n524\n525\n526\n527\n528\n529\n530\n531\n532\n533\n534\n535\n536\n537\n538\n539\n540\n541\n542\n543\n544\n545\n546\n547\n548\n549\n550\n551\n552\n553\n554\n555\n556\n557\n558\n559\n560\n561\n562\n563\n564\n565\n566\n567\n568\n569\n570\n571\n572\n573\n574\n575\n576\n577\n578\n579\n580\n581\n582\n583\n584\n585\n586\n587\n588\n589\n590\n591\n592\n593\n594\n595\n596\n597\n598\n599\n600\n601\n602\n603\n604\n605\n606\n607\n608\n609\n610\n611\n612\n613\n614\n615\n616\n617\n618\n619\n620\n621\n622\n623\n624\n625\n626\n627\n628\n629\n630\n631\n632\n633\n634\n635\n636\n637\n638\n639\n640\n641\n642\n643\n644\n645\n646\n647\n648\n649\n650\n651\n652\n653\n654\n655\n656\n657\n658\n659\n660\n661\n662\n663\n664\n665\n666\n667\n668\n669\n670\n671\n672\n673\n674\n675\n676\n677\n678\n679\n680\n681\n682\n683\n684\n685\n686\n687\n688\n689\n690\n691\n692\n693\n694\n695\n696\n697\n698\n699\n700\n701\n702\n703\n704\n705\n706\n707\n708\n709\n710\n711\n712\n713\n714\n715\n716\n717\n718\n719\n720\n721\n722\n723\n724\n725\n726\n727\n728\n729\n730\n731\n732\n733\n734\n735\n736\n737\n738\n739\n740\n741\n742\n743\n744\n745\n746\n747\n748\n749\n750\n751\n752\n753\n754\n755\n756\n757\n758\n759\n760\n761\n762\n763\n764\n765\n766\n767\n768\n769\n770\n771\n772\n773\n774\n775\n776\n777\n778\n779\n780\n781\n782\n783\n784\n785\n786\n787\n788\n789\n790\n791\n792\n793\n794\n795\n796\n797\n798\n799\n800\n801\n802\n803\n804\n805\n806\n807\n808\n809\n810\n811\n812\n813\n814\n815\n816\n817\n818\n819\n820\n821\n822\n823\n824\n825\n826\n827\n828\n829\n830\n831\n832\n833\n834\n835\n836\n837\n838\n839\n840\n841\n842\n843\n844\n845\n846\n847\n848\n849\n850\n851\n852\n853\n854\n855\n856\n857\n858\n859\n860\n861\n862\n863\n864\n865\n866\n867\n868\n869\n870\n871\n872\n873\n874\n875\n876\n877\n878\n879\n880\n881\n882\n883\n884\n885\n886\n887\n888\n889\n890\n891\n892\n893\n894\n895\n896\n897\n898\n899\n900\n901\n902\n903\n904\n905\n906\n907\n908\n909\n910\n911\n912\n913\n914\n915\n916\n917\n918\n919\n920\n921\n922\n923\n924\n925\n926\n927\n928\n929\n930\n931\n932\n933\n934\n935\n936\n937\n938\n939\n940\n941\n942\n943\n944\n945\n946\n947\n948\n949\n950\n951\n952\n953\n954\n955\n956\n957\n958\n959\n960\n961\n962\n963\n964\n965\n966\n967\n968\n969\n970\n971\n972\n973\n974\n975\n976\n977\n978\n979\n980\n981\n982\n983\n984\n985\n986\n987\n988\n989\n990\n991\n992\n993\n994\n995\n996\n997\n998\n999\n1000\n1001\n1002\n1003\n1004\n1005\n1006\n1007\n1008\n1009\n1010\n1011\n1012\n1013\n1014\n1015\n1016\n1017\n1018\n1019\n1020\n1021\n1022\n1023\n1024\n1025\n1026\n1027\n1028\n1029\n1030\n1031\n1032\n1033\n1034\n1035\n1036\n1037\n1038\n1039\n1040\n1041\n1042\n1043\n1044\n1045\n1046\n1047\n1048\n1049\n1050\n1051\n1052\n1053\n1054\n1055\n1056\n1057\n1058\n1059\n1060\n1061\n1062\n1063\n1064\n1065\n1066\n1067\n1068\n1069\n1070\n1071\n1072\n1073\n1074\n1075\n1076\n1077\n1078\n1079\n1080\n1081\n1082\n1083\n1084\n1085\n1086\n1087\n1088\n1089\n1090\n1091\n1092\n1093\n1094\n1095\n1096\n1097\n1098\n1099\n1100\n1101\n1102\n1103\n1104\n1105\n1106\n1107\n1108\n1109\n1110\n1111\n1112\n1113\n1114\n1115\n1116\n1117\n1118\n1119\n1120\n1121\n1122\n1123\n1124\n1125\n1126\n1127\n1128\n1129\n1130\n1131\n1132\n1133\n1134\n1135\n1136\n1137\n1138\n1139\n1140\n1141\n1142\n1143\n1144\n1145\n1146\n1147\n1148\n1149\n1150\n1151\n1152\n1153\n1154\n1155\n1156\n1157\n1158\n1159\n1160\n1161\n1162\n1163\n1164\n1165\n1166\n1167\n1168\n1169\n1170\n1171\n1172\n1173\n1174\n1175\n1176\n1177\n1178\n1179\n1180\n1181\n1182\n1183\n1184\n1185\n1186\n1187\n1188\n1189\n1190\n1191\n1192\n1193\n1194\n1195\n1196\n1197\n1198\n1199\n1200\n1201\n1202\n1203\n1204\n1205\n1206\n1207\n1208\n1209\n1210\n1211\n1212\n1213\n1214\n1215\n1216\n1217\n1218\n1219\n1220\n1221\n1222\n1223\n1224\n1225\n1226\n1227\n1228\n1229\n1230\n1231\n1232\n1233\n1234\n1235\n1236\n1237\n1238\n1239\n1240\n1241\n1242\n1243\n1244\n1245\n1246\n1247\n1248\n1249\n1250\n1251\n1252\n1253\n1254\n1255\n1256\n1257\n1258\n1259\n1260\n1261\n1262\n1263\n1264\n1265\n1266\n1267\n1268\n1269\n1270\n1271\n1272\n1273\n1274\n1275\n1276\n1277\n1278\n1279\n1280\n1281\n1282\n1283\n1284\n1285\n1286\n1287\n1288\n1289\n1290\n1291\n1292\n1293\n1294\n1295\n1296\n1297\n1298\n1299\n1300\n1301\n1302\n1303\n1304\n1305\n1306\n1307\n1308\n1309\n1310\n1311\n1312\n1313\n1314\n1315\n1316\n1317\n1318\n1319\n1320\n1321\n1322\n1323\n1324\n1325\n1326\n1327\n1328\n1329\n1330\n1331\n1332\n1333\n1334\n1335\n1336\n1337\n1338\n1339\n1340\n1341\n1342\n1343\n1344\n1345\n1346\n1347\n1348\n1349\n1350\n1351\n1352\n1353\n1354\n1355\n1356\n1357\n1358\n1359\n1360\n1361\n1362\n1363\n1364\n1365\n1366\n1367\n1368\n1369\n1370\n1371\n1372\n1373\n1374\n1375\n1376\n1377\n1378\n1379\n1380\n1381\n1382\n1383\n1384\n1385\n1386\n1387\n1388\n1389\n1390\n1391\n1392\n1393\n1394\n1395\n1396\n1397\n1398\n1399\n1400\n1401\n1402\n1403\n1404\n1405\n1406\n1407\n1408\n1409\n1410\n1411\n1412\n1413\n1414\n1415\n1416\n1417\n1418\n1419\n1420\n1421\n1422\n1423\n1424\n1425\n1426\n1427\n1428\n1429\n1430\n1431\n1432\n1433\n1434\n1435\n1436\n1437\n1438\n1439\n1440\n1441\n1442\n1443\n1444\n1445\n1446\n1447\n1448\n1449\n1450\n1451\n1452\n1453\n1454\n1455\n1456\n1457\n1458\n1459\n1460\n1461\n1462\n1463\n1464\n1465\n1466\n1467\n1468\n1469\n1470\n1471\n1472\n1473\n1474\n1475\n1476\n1477\n1478\n1479\n1480\n1481\n1482\n1483\n1484\n1485\n1486\n1487\n1488\n1489\n1490\n1491\n1492\n1493\n1494\n1495\n1496\n1497\n1498\n1499\n1500\n1501\n1502\n1503\n1504\n1505\n1506\n1507\n1508\n1509\n1510\n1511\n1512\n1513\n1514\n1515\n1516\n1517\n1518\n1519\n1520\n1521\n1522\n1523\n1524\n1525\n1526\n1527\n1528\n1529\n1530\n1531\n1532\n1533\n1534\n1535\n1536\n1537\n1538\n1539\n1540\n1541\n1542\n1543\n1544\n1545\n1546\n1547\n1548\n1549\n1550\n1551\n1552\n1553\n1554\n1555\n1556\n1557\n1558\n1559\n1560\n1561\n1562\n1563\n1564\n1565\n1566\n1567\n1568\n1569\n1570\n1571\n1572\n1573\n1574\n1575\n1576\n1577\n1578\n1579\n1580\n1581\n1582\n1583\n1584\n1585\n1586\n1587\n1588\n1589\n1590\n1591\n1592\n1593\n1594\n1595\n1596\n1597\n1598\n1599\n1600\n1601\n1602\n1603\n1604\n1605\n1606\n1607\n1608\n1609\n1610\n1611\n1612\n1613\n1614\n1615\n1616\n1617\n1618\n1619\n1620\n1621\n1622\n1623\n1624\n1625\n1626\n1627\n1628\n1629\n1630\n1631\n1632\n1633\n1634\n1635\n1636\n1637\n1638\n1639\n1640\n1641\n1642\n1643\n1644\n1645\n1646\n1647\n1648\n1649\n1650\n1651\n1652\n1653\n1654\n1655\n1656\n1657\n1658\n1659\n1660\n1661\n1662\n1663\n1664\n1665\n1666\n1667\n1668\n1669\n1670\n1671\n1672\n1673\n1674\n1675\n1676\n1677\n1678\n1679\n1680\n1681\n1682\n1683\n1684\n1685\n1686\n1687\n1688\n1689\n1690\n1691\n1692\n1693\n1694\n1695\n1696\n1697\n1698\n1699\n1700\n1701\n1702\n1703\n1704\n1705\n1706\n1707\n1708\n1709\n1710\n1711\n1712\n1713\n1714\n1715\n1716\n1717\n1718\n1719\n1720\n1721\n1722\n1723\n1724\n1725\n1726\n1727\n1728\n1729\n1730\n1731\n1732\n1733\n1734\n1735\n1736\n1737\n1738\n1739\n1740\n1741\n1742\n1743\n1744\n1745\n1746\n1747\n1748\n1749\n1750\n1751\n1752\n1753\n1754\n1755\n1756\n1757\n1758\n1759\n1760\n1761\n1762\n1763\n1764\n1765\n1766\n1767\n1768\n1769\n1770\n1771\n1772\n1773\n1774\n1775\n1776\n1777\n1778\n1779\n1780\n1781\n1782\n1783\n1784\n1785\n1786\n1787\n1788\n1789\n1790\n1791\n1792\n1793\n1794\n1795\n1796\n1797\n1798\n1799\n1800\n1801\n1802\n1803\n1804\n1805\n1806\n1807\n1808\n1809\n1810\n1811\n1812\n1813\n1814\n1815\n1816\n1817\n1818\n1819\n1820\n1821\n1822\n1823\n1824\n1825\n1826\n1827\n1828\n1829\n1830\n1831\n1832\n1833\n1834\n1835\n1836\n1837\n1838\n1839\n1840\n1841\n1842\n1843\n1844\n1845\n1846\n1847\n1848\n1849\n1850\n1851\n1852\n1853\n1854\n1855\n1856\n1857\n1858\n1859\n1860\n1861\n1862\n1863\n1864\n1865\n1866\n1867\n1868\n1869\n1870\n1871\n1872\n1873\n1874\n1875\n1876\n1877\n1878\n1879\n1880\n1881\n1882\n1883\n1884\n1885\n1886\n1887\n1888\n1889\n1890\n1891\n1892\n1893\n1894\n1895\n1896\n1897\n1898\n1899\n1900\n1901\n1902\n1903\n1904\n1905\n1906\n1907\n1908\n1909\n1910\n1911\n1912\n1913\n1914\n1915\n1916\n1917\n1918\n1919\n1920\n1921\n1922\n1923\n1924\n1925\n1926\n1927\n1928\n1929\n1930\n1931\n1932\n1933\n1934\n1935\n1936\n1937\n1938\n1939\n1940\n1941\n1942\n1943\n1944\n1945\n1946\n1947\n1948\n1949\n1950\n1951\n1952\n1953\n1954\n1955\n1956\n1957\n1958\n1959\n1960\n1961\n1962\n1963\n1964\n1965\n1966\n1967\n1968\n1969\n1970\n1971\n1972\n1973\n1974\n1975\n1976\n1977\n1978\n1979\n1980\n1981\n1982\n1983\n1984\n1985\n1986\n1987\n1988\n1989\n1990\n1991\n1992\n1993\n1994\n1995\n1996\n1997\n1998\n1999\n2000\n2001\n2002\n2003\n2004\n2005\n2006\n2007\n2008\n2009\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n2020\n2021\n2022\n2023\n2024\n2025\n2026\n2027\n2028\n2029\n2030\n2031\n2032\n2033\n2034\n2035\n2036\n2037\n2038\n2039\n2040\n2041\n2042\n2043\n2044\n2045\n2046\n2047\n2048\n2049\n2050\n2051\n2052\n2053\n2054\n2055\n2056\n2057\n2058\n2059\n2060\n2061\n2062\n2063\n2064\n2065\n2066\n2067\n2068\n2069\n2070\n2071\n2072\n2073\n2074\n2075\n2076\n2077\n2078\n2079\n2080\n2081\n2082\n2083\n2084\n2085\n2086\n2087\n2088\n2089\n2090\n2091\n2092\n2093\n2094\n2095\n2096\n2097\n2098\n2099\n2100\n2101\n2102\n2103\n2104\n2105\n2106\n2107\n2108\n2109\n2110\n2111\n2112\n2113\n2114\n2115\n2116\n2117\n2118\n2119\n2120\n2121\n2122\n2123\n2124\n2125\n2126\n2127\n2128\n2129\n2130\n2131\n2132\n2133\n2134\n2135\n2136\n2137\n2138\n2139\n2140\n2141\n2142\n2143\n2144\n2145\n2146\n2147\n2148\n2149\n2150\n2151\n2152\n2153\n2154\n2155\n2156\n2157\n2158\n2159\n2160\n2161\n2162\n2163\n2164\n2165\n2166\n2167\n2168\n2169\n2170\n2171\n2172\n2173\n2174\n2175\n2176\n2177\n2178\n2179\n2180\n2181\n2182\n2183\n2184\n2185\n2186\n2187\n2188\n2189\n2190\n2191\n2192\n2193\n2194\n2195\n2196\n2197\n2198\n2199\n2200\n2201\n2202\n2203\n2204\n2205\n2206\n2207\n2208\n2209\n2210\n2211\n2212\n2213\n2214\n2215\n2216\n2217\n2218\n2219\n2220\n2221\n2222\n2223\n2224\n2225\n2226\n2227\n2228\n2229\n2230\n2231\n2232\n2233\n2234\n2235\n2236\n2237\n2238\n2239\n2240\n2241\n2242\n2243\n2244\n2245\n2246\n2247\n2248\n2249\n2250\n2251\n2252\n2253\n2254\n2255\n2256\n2257\n2258\n2259\n2260\n2261\n2262\n2263\n2264\n2265\n2266\n2267\n2268\n2269\n2270\n2271\n2272\n2273\n2274\n2275\n2276\n2277\n2278\n2279\n2280\n2281\n2282\n2283\n2284\n2285\n2286\n2287\n2288\n2289\n2290\n2291\n2292\n2293\n2294\n2295\n2296\n2297\n2298\n2299\n2300\n2301\n2302\n2303\n2304\n2305\n2306\n2307\n2308\n2309\n2310\n2311\n2312\n2313\n2314\n2315\n2316\n2317\n2318\n2319\n2320\n2321\n2322\n2323\n2324\n2325\n2326\n2327\n2328\n2329\n2330\n2331\n2332\n2333\n2334\n2335\n2336\n2337\n2338\n2339\n2340\n2341\n2342\n2343\n2344\n2345\n2346\n2347\n2348\n2349\n2350\n2351\n2352\n2353\n2354\n2355\n2356\n2357\n2358\n2359\n2360\n2361\n2362\n2363\n2364\n2365\n2366\n2367\n2368\n2369\n2370\n2371\n2372\n2373\n2374\n2375\n2376\n2377\n2378\n2379\n2380\n2381\n2382\n2383\n2384\n2385\n2386\n2387\n2388\n2389\n2390\n2391\n2392\n2393\n2394\n2395\n2396\n2397\n2398\n2399\n2400\n2401\n2402\n2403\n2404\n2405\n2406\n2407\n2408\n2409\n2410\n2411\n2412\n2413\n2414\n2415\n2416\n2417\n2418\n2419\n2420\n2421\n2422\n2423\n2424\n2425\n2426\n2427\n2428\n2429\n2430\n2431\n2432\n2433\n2434\n2435\n2436\n2437\n2438\n2439\n2440\n2441\n2442\n2443\n2444\n2445\n2446\n2447\n2448\n2449\n2450\n2451\n2452\n2453\n2454\n2455\n2456\n2457\n2458\n2459\n2460\n2461\n2462\n2463\n2464\n2465\n2466\n2467\n2468\n2469\n2470\n2471\n2472\n2473\n2474\n2475\n2476\n2477\n2478\n2479\n2480\n2481\n2482\n2483\n2484\n2485\n2486\n2487\n2488\n2489\n2490\n2491\n2492\n2493\n2494\n2495\n2496\n2497\n2498\n2499\n2500\n2501\n2502\n2503\n2504\n2505\n2506\n2507\n2508\n2509\n2510\n2511\n2512\n2513\n2514\n2515\n2516\n2517\n2518\n2519\n2520\n2521\n2522\n2523\n2524\n2525\n2526\n2527\n2528\n2529\n2530\n2531\n2532\n2533\n2534\n2535\n2536\n2537\n2538\n2539\n2540\n2541\n2542\n2543\n2544\n2545\n2546\n2547\n2548\n2549\n2550\n2551\n2552\n2553\n2554\n2555\n2556\n2557\n2558\n2559\n2560\n2561\n2562\n2563\n2564\n2565\n2566\n2567\n2568\n2569\n2570\n2571\n2572\n2573\n2574\n2575\n2576\n2577\n2578\n2579\n2580\n2581\n2582\n2583\n2584\n2585\n2586\n2587\n2588\n2589\n2590\n2591\n2592\n2593\n2594\n2595\n2596\n2597\n2598\n2599\n2600\n2601\n2602\n2603\n2604\n2605\n2606\n2607\n2608\n2609\n2610\n2611\n2612\n2613\n2614\n2615\n2616\n2617\n2618\n2619\n2620\n2621\n2622\n2623\n2624\n2625\n2626\n2627\n2628\n2629\n2630\n2631\n2632\n2633\n2634\n2635\n2636\n2637\n2638\n2639\n2640\n2641\n2642\n2643\n2644\n2645\n2646\n2647\n2648\n2649\n2650\n2651\n2652\n2653\n2654\n2655\n2656\n2657\n2658\n2659\n2660\n2661\n2662\n2663\n2664\n2665\n2666\n2667\n2668\n2669\n2670\n2671\n2672\n2673\n2674\n2675\n2676\n2677\n2678\n2679\n2680\n2681\n2682\n2683\n2684\n2685\n2686\n2687\n2688\n2689\n2690\n2691\n2692\n2693\n2694\n2695\n2696\n2697\n2698\n2699\n2700\n2701\n2702\n2703\n2704\n2705\n2706\n2707\n2708\n2709\n2710\n2711\n2712\n2713\n2714\n2715\n2716\n2717\n2718\n2719\n2720\n2721\n2722\n2723\n2724\n2725\n2726\n2727\n2728\n2729\n2730\n2731\n2732\n2733\n2734\n2735\n2736\n2737\n2738\n2739\n2740\n2741\n2742\n2743\n2744\n2745\n2746\n2747\n2748\n2749\n2750\n2751\n2752\n2753\n2754\n2755\n2756\n2757\n2758\n2759\n2760\n2761\n2762\n2763\n2764\n2765\n2766\n2767\n2768\n2769\n2770\n2771\n2772\n2773\n2774\n2775\n2776\n2777\n2778\n2779\n2780\n2781\n2782\n2783\n2784\n2785\n2786\n2787\n2788\n2789\n2790\n2791\n2792\n2793\n2794\n2795\n2796\n2797\n2798\n2799\n2800\n2801\n2802\n2803\n2804\n2805\n2806\n2807\n2808\n2809\n2810\n2811\n2812\n2813\n2814\n2815\n2816\n2817\n2818\n2819\n2820\n2821\n2822\n2823\n2824\n2825\n2826\n2827\n2828\n2829\n2830\n2831\n2832\n2833\n2834\n2835\n2836\n2837\n2838\n2839\n2840\n2841\n2842\n2843\n2844\n2845\n2846\n2847\n2848\n2849\n2850\n2851\n2852\n2853\n2854\n2855\n2856\n2857\n2858\n2859\n2860\n2861\n2862\n2863\n2864\n2865\n2866\n2867\n2868\n2869\n2870\n2871\n2872\n2873\n2874\n2875\n2876\n2877\n2878\n2879\n2880\n2881\n2882\n2883\n2884\n2885\n2886\n2887\n2888\n2889\n2890\n2891\n2892\n2893\n2894\n2895\n2896\n2897\n2898\n2899\n2900\n2901\n2902\n2903\n2904\n2905\n2906\n2907\n2908\n2909\n2910\n2911\n2912\n2913\n2914\n2915\n2916\n2917\n2918\n2919\n2920\n2921\n2922\n2923\n2924\n2925\n2926\n2927\n2928\n2929\n2930\n2931\n2932\n2933\n2934\n2935\n2936\n2937\n2938\n2939\n2940\n2941\n2942\n2943\n2944\n2945\n2946\n2947\n2948\n2949\n2950\n2951\n2952\n2953\n2954\n2955\n2956\n2957\n2958\n2959\n2960\n2961\n2962\n2963\n2964\n2965\n2966\n2967\n2968\n2969\n2970\n2971\n2972\n2973\n2974\n2975\n2976\n2977\n2978\n2979\n2980\n2981\n2982\n2983\n2984\n2985\n2986\n2987\n2988\n2989\n2990\n2991\n2992\n2993\n2994\n2995\n2996\n2997\n2998\n2999\n3000\n3001\n3002\n3003\n3004\n3005\n3006\n3007\n3008\n3009\n3010\n3011\n3012\n3013\n3014\n3015\n3016\n3017\n3018\n3019\n3020\n3021\n3022\n3023\n3024\n3025\n3026\n3027\n3028\n3029\n3030\n3031\n3032\n3033\n3034\n3035\n3036\n3037\n3038\n3039\n3040\n3041\n3042\n3043\n3044\n3045\n3046\n3047\n3048\n3049\n3050\n3051\n3052\n3053\n3054\n3055\n3056\n3057\n3058\n3059\n3060\n3061\n3062\n3063\n3064\n3065\n3066\n3067\n3068\n3069\n3070\n3071\n3072\n3073\n3074\n3075\n3076\n3077\n3078\n3079\n3080\n3081\n3082\n3083\n3084\n3085\n3086\n3087\n3088\n3089\n3090\n3091\n3092\n3093\n3094\n3095\n3096\n3097\n3098\n3099\n3100\n3101\n3102\n3103\n3104\n3105\n3106\n3107\n3108\n3109\n3110\n3111\n3112\n3113\n3114\n3115\n3116\n3117\n3118\n3119\n3120\n3121\n3122\n3123\n3124\n3125\n3126\n3127\n3128\n3129\n3130\n3131\n3132\n3133\n3134\n3135\n3136\n3137\n3138\n3139\n3140\n3141\n3142\n3143\n3144\n3145\n3146\n3147\n3148\n3149\n3150\n3151\n3152\n3153\n3154\n3155\n3156\n3157\n3158\n3159\n3160\n3161\n3162\n3163\n3164\n3165\n3166\n3167\n3168\n3169\n3170\n3171\n3172\n3173\n3174\n3175\n3176\n3177\n3178\n3179\n3180\n3181\n3182\n3183\n3184\n3185\n3186\n3187\n3188\n3189\n3190\n3191\n3192\n3193\n3194\n3195\n3196\n3197\n3198\n3199\n3200\n3201\n3202\n3203\n3204\n3205\n3206\n3207\n3208\n3209\n3210\n3211\n3212\n3213\n3214\n3215\n3216\n3217\n3218\n3219\n3220\n3221\n3222\n3223\n3224\n3225\n3226\n3227\n3228\n3229\n3230\n3231\n3232\n3233\n3234\n3235\n3236\n3237\n3238\n3239\n3240\n3241\n3242\n3243\n3244\n3245\n3246\n3247\n3248\n3249\n3250\n3251\n3252\n3253\n3254\n3255\n3256\n3257\n3258\n3259\n3260\n3261\n3262\n3263\n3264\n3265\n3266\n3267\n3268\n3269\n3270\n3271\n3272\n3273\n3274\n3275\n3276\n3277\n3278\n3279\n3280\n3281\n3282\n3283\n3284\n3285\n3286\n3287\n3288\n3289\n3290\n3291\n3292\n3293\n3294\n3295\n3296\n3297\n3298\n3299\n3300\n3301\n3302\n3303\n3304\n3305\n3306\n3307\n3308\n3309\n3310\n3311\n3312\n3313\n3314\n3315\n3316\n3317\n3318\n3319\n3320\n3321\n3322\n3323\n3324\n3325\n3326\n3327\n3328\n3329\n3330\n3331\n3332\n3333\n3334\n3335\n3336\n3337\n3338\n3339\n3340\n3341\n3342\n3343\n3344\n3345\n3346\n3347\n3348\n3349\n3350\n3351\n3352\n3353\n3354\n3355\n3356\n3357\n3358\n3359\n3360\n3361\n3362\n3363\n3364\n3365\n3366\n3367\n3368\n3369\n3370\n3371\n3372\n3373\n3374\n3375\n3376\n3377\n3378\n3379\n3380\n3381\n3382\n3383\n3384\n3385\n3386\n3387\n3388\n3389\n3390\n3391\n3392\n3393\n3394\n3395\n3396\n3397\n3398\n3399\n3400\n3401\n3402\n3403\n3404\n3405\n3406\n3407\n3408\n3409\n3410\n3411\n3412\n3413\n3414\n3415\n3416\n3417\n3418\n3419\n3420\n3421\n3422\n3423\n3424\n3425\n3426\n3427\n3428\n3429\n3430\n3431\n3432\n3433\n3434\n3435\n3436\n3437\n3438\n3439\n3440\n3441\n3442\n3443\n3444\n3445\n3446\n3447\n3448\n3449\n3450\n3451\n3452\n3453\n3454\n3455\n3456\n3457\n3458\n3459\n3460\n3461\n3462\n3463\n3464\n3465\n3466\n3467\n3468\n3469\n3470\n3471\n3472\n3473\n3474\n3475\n3476\n3477\n3478\n3479\n3480\n3481\n3482\n3483\n3484\n3485\n3486\n3487\n3488\n3489\n3490\n3491\n3492\n3493\n3494\n3495\n3496\n3497\n3498\n3499\n3500\n3501\n3502\n3503\n3504\n3505\n3506\n3507\n3508\n3509\n3510\n3511\n3512\n3513\n3514\n3515\n3516\n3517\n3518\n3519\n3520\n3521\n3522\n3523\n3524\n3525\n3526\n3527\n3528\n3529\n3530\n3531\n3532\n3533\n3534\n3535\n3536\n3537\n3538\n3539\n3540\n3541\n3542\n3543\n3544\n3545\n3546\n3547\n3548\n3549\n3550\n3551\n3552\n3553\n3554\n3555\n3556\n3557\n3558\n3559\n3560\n3561\n3562\n3563\n3564\n3565\n3566\n3567\n3568\n3569\n3570\n3571\n3572\n3573\n3574\n3575\n3576\n3577\n3578\n3579\n3580\n3581\n3582\n3583\n3584\n3585\n3586\n3587\n3588\n3589\n3590\n3591\n3592\n3593\n3594\n3595\n3596\n3597\n3598\n3599\n3600\n3601\n3602\n3603\n3604\n3605\n3606\n3607\n3608\n3609\n3610\n3611\n3612\n3613\n3614\n3615\n3616\n3617\n3618\n3619\n3620\n3621\n3622\n3623\n3624\n3625\n3626\n3627\n3628\n3629\n3630\n3631\n3632\n3633\n3634\n3635\n3636\n3637\n3638\n3639\n3640\n3641\n3642\n3643\n3644\n3645\n3646\n3647\n3648\n3649\n3650\n3651\n3652\n3653\n3654\n3655\n3656\n3657\n3658\n3659\n3660\n3661\n3662\n3663\n3664\n3665\n3666\n3667\n3668\n3669\n3670\n3671\n3672\n3673\n3674\n3675\n3676\n3677\n3678\n3679\n3680\n3681\n3682\n3683\n3684\n3685\n3686\n3687\n3688\n3689\n3690\n3691\n3692\n3693\n3694\n3695\n3696\n3697\n3698\n3699\n3700\n3701\n3702\n3703\n3704\n3705\n3706\n3707\n3708\n3709\n3710\n3711\n3712\n3713\n3714\n3715\n3716\n3717\n3718\n3719\n3720\n3721\n3722\n3723\n3724\n3725\n3726\n3727\n3728\n3729\n3730\n3731\n3732\n3733\n3734\n3735\n3736\n3737\n3738\n3739\n3740\n3741\n3742\n3743\n3744\n3745\n3746\n3747\n3748\n3749\n3750\n3751\n3752\n3753\n3754\n3755\n3756\n3757\n3758\n3759\n3760\n3761\n3762\n3763\n3764\n3765\n3766\n3767\n3768\n3769\n3770\n3771\n3772\n3773\n3774\n3775\n3776\n3777\n3778\n3779\n3780\n3781\n3782\n3783\n3784\n3785\n3786\n3787\n3788\n3789\n3790\n3791\n3792\n3793\n3794\n3795\n3796\n3797\n3798\n3799\n3800\n3801\n3802\n3803\n3804\n3805\n3806\n3807\n3808\n3809\n3810\n3811\n3812\n3813\n3814\n3815\n3816\n3817\n3818\n3819\n3820\n3821\n3822\n3823\n3824\n3825\n3826\n3827\n3828\n3829\n3830\n3831\n3832\n3833\n3834\n3835\n3836\n3837\n3838\n3839\n3840\n3841\n3842\n3843\n3844\n3845\n3846\n3847\n3848\n3849\n3850\n3851\n3852\n3853\n3854\n3855\n3856\n3857\n3858\n3859\n3860\n3861\n3862\n3863\n3864\n3865\n3866\n3867\n3868\n3869\n3870\n3871\n3872\n3873\n3874\n3875\n3876\n3877\n3878\n3879\n3880\n3881\n3882\n3883\n3884\n3885\n3886\n3887\n3888\n3889\n3890\n3891\n3892\n3893\n3894\n3895\n3896\n3897\n3898\n3899\n3900\n3901\n3902\n3903\n3904\n3905\n3906\n3907\n3908\n3909\n3910\n3911\n3912\n3913\n3914\n3915\n3916\n3917\n3918\n3919\n3920\n3921\n3922\n3923\n3924\n3925\n3926\n3927\n3928\n3929\n3930\n3931\n3932\n3933\n3934\n3935\n3936\n3937\n3938\n3939\n3940\n3941\n3942\n3943\n3944\n3945\n3946\n3947\n3948\n3949\n3950\n3951\n3952\n3953\n3954\n3955\n3956\n3957\n3958\n3959\n3960\n3961\n3962\n3963\n3964\n3965\n3966\n3967\n3968\n3969\n3970\n3971\n3972\n3973\n3974\n3975\n3976\n3977\n3978\n3979\n3980\n3981\n3982\n3983\n3984\n3985\n3986\n3987\n3988\n3989\n3990\n3991\n3992\n3993\n3994\n3995\n3996\n3997\n3998\n3999\n4000\n4001\n4002\n4003\n4004\n4005\n4006\n4007\n4008\n4009\n4010\n4011\n4012\n4013\n4014\n4015\n4016\n4017\n4018\n4019\n4020\n4021\n4022\n4023\n4024\n4025\n4026\n4027\n4028\n4029\n4030\n4031\n4032\n4033\n4034\n4035\n4036\n4037\n4038\n4039\n4040\n4041\n4042\n4043\n4044\n4045\n4046\n4047\n4048\n4049\n4050\n4051\n4052\n4053\n4054\n4055\n4056\n4057\n4058\n4059\n4060\n4061\n4062\n4063\n4064\n4065\n4066\n4067\n4068\n4069\n4070\n4071\n4072\n4073\n4074\n4075\n4076\n4077\n4078\n4079\n4080\n4081\n4082\n4083\n4084\n4085\n4086\n4087\n4088\n4089\n4090\n4091\n4092\n4093\n4094\n4095\n4096\n4097\n4098\n4099\n4100\n4101\n4102\n4103\n4104\n4105\n4106\n4107\n4108\n4109\n4110\n4111\n4112\n4113\n4114\n4115\n4116\n4117\n4118\n4119\n4120\n4121\n4122\n4123\n4124\n4125\n4126\n4127\n4128\n4129\n4130\n4131\n4132\n4133\n4134\n4135\n4136\n4137\n4138\n4139\n4140\n4141\n4142\n4143\n4144\n4145\n4146\n4147\n4148\n4149\n4150\n4151\n4152\n4153\n4154\n4155\n4156\n4157\n4158\n4159\n4160\n4161\n4162\n4163\n4164\n4165\n4166\n4167\n4168\n4169\n4170\n4171\n4172\n4173\n4174\n4175\n4176\n4177\n4178\n4179\n4180\n4181\n4182\n4183\n4184\n4185\n4186\n4187\n4188\n4189\n4190\n4191\n4192\n4193\n4194\n4195\n4196\n4197\n4198\n4199\n4200\n4201\n4202\n4203\n4204\n4205\n4206\n4207\n4208\n4209\n4210\n4211\n4212\n4213\n4214\n4215\n4216\n4217\n4218\n4219\n4220\n4221\n4222\n4223\n4224\n4225\n4226\n4227\n4228\n4229\n4230\n4231\n4232\n4233\n4234\n4235\n4236\n4237\n4238\n4239\n4240\n4241\n4242\n4243\n4244\n4245\n4246\n4247\n4248\n4249\n4250\n4251\n4252\n4253\n4254\n4255\n4256\n4257\n4258\n4259\n4260\n4261\n4262\n4263\n4264\n4265\n4266\n4267\n4268\n4269\n4270\n4271\n4272\n4273\n4274\n4275\n4276\n4277\n4278\n4279\n4280\n4281\n4282\n4283\n4284\n4285\n4286\n4287\n4288\n4289\n4290\n4291\n4292\n4293\n4294\n4295\n4296\n4297\n4298\n4299\n4300\n4301\n4302\n4303\n4304\n4305\n4306\n4307\n4308\n4309\n4310\n4311\n4312\n4313\n4314\n4315\n4316\n4317\n4318\n4319\n4320\n4321\n4322\n4323\n4324\n4325\n4326\n4327\n4328\n4329\n4330\n4331\n4332\n4333\n4334\n4335\n4336\n4337\n4338\n4339\n4340\n4341\n4342\n4343\n4344\n4345\n4346\n4347\n4348\n4349\n4350\n4351\n4352\n4353\n4354\n4355\n4356\n4357\n4358\n4359\n4360\n4361\n4362\n4363\n4364\n4365\n4366\n4367\n4368\n4369\n4370\n4371\n4372\n4373\n4374\n4375\n4376\n4377\n4378\n4379\n4380\n4381\n4382\n4383\n4384\n4385\n4386\n4387\n4388\n4389\n4390\n4391\n4392\n4393\n4394\n4395\n4396\n4397\n4398\n4399\n4400\n4401\n4402\n4403\n4404\n4405\n4406\n4407\n4408\n4409\n4410\n4411\n4412\n4413\n4414\n4415\n4416\n4417\n4418\n4419\n4420\n4421\n4422\n4423\n4424\n4425\n4426\n4427\n4428\n4429\n4430\n4431\n4432\n4433\n4434\n4435\n4436\n4437\n4438\n4439\n4440\n4441\n4442\n4443\n4444\n4445\n4446\n4447\n4448\n4449\n4450\n4451\n4452\n4453\n4454\n4455\n4456\n4457\n4458\n4459\n4460\n4461\n4462\n4463\n4464\n4465\n4466\n4467\n4468\n4469\n4470\n4471\n4472\n4473\n4474\n4475\n4476\n4477\n4478\n4479\n4480\n4481\n4482\n4483\n4484\n4485\n4486\n4487\n4488\n4489\n4490\n4491\n4492\n4493\n4494\n4495\n4496\n4497\n4498\n4499\n4500\n4501\n4502\n4503\n4504\n4505\n4506\n4507\n4508\n4509\n4510\n4511\n4512\n4513\n4514\n4515\n4516\n4517\n4518\n4519\n4520\n4521\n4522\n4523\n4524\n4525\n4526\n4527\n4528\n4529\n4530\n4531\n4532\n4533\n4534\n4535\n4536\n4537\n4538\n4539\n4540\n4541\n4542\n4543\n4544\n4545\n4546\n4547\n4548\n4549\n4550\n4551\n4552\n4553\n4554\n4555\n4556\n4557\n4558\n4559\n4560\n4561\n4562\n4563\n4564\n4565\n4566\n4567\n4568\n4569\n4570\n4571\n4572\n4573\n4574\n4575\n4576\n4577\n4578\n4579\n4580\n4581\n4582\n4583\n4584\n4585\n4586\n4587\n4588\n4589\n4590\n4591\n4592\n4593\n4594\n4595\n4596\n4597\n4598\n4599\n4600\n4601\n4602\n4603\n4604\n4605\n4606\n4607\n4608\n4609\n4610\n4611\n4612\n4613\n4614\n4615\n4616\n4617\n4618\n4619\n4620\n4621\n4622\n4623\n4624\n4625\n4626\n4627\n4628\n4629\n4630\n4631\n4632\n4633\n4634\n4635\n4636\n4637\n4638\n4639\n4640\n4641\n4642\n4643\n4644\n4645\n4646\n4647\n4648\n4649\n4650\n4651\n4652\n4653\n4654\n4655\n4656\n4657\n4658\n4659\n4660\n4661\n4662\n4663\n4664\n4665\n4666\n4667\n4668\n4669\n4670\n4671\n4672\n4673\n4674\n4675\n4676\n4677\n4678\n4679\n4680\n4681\n4682\n4683\n4684\n4685\n4686\n4687\n4688\n4689\n4690\n4691\n4692\n4693\n4694\n4695\n4696\n4697\n4698\n4699\n4700\n4701\n4702\n4703\n4704\n4705\n4706\n4707\n4708\n4709\n4710\n4711\n4712\n4713\n4714\n4715\n4716\n4717\n4718\n4719\n4720\n4721\n4722\n4723\n4724\n4725\n4726\n4727\n4728\n4729\n4730\n4731\n4732\n4733\n4734\n4735\n4736\n4737\n4738\n4739\n4740\n4741\n4742\n4743\n4744\n4745\n4746\n4747\n4748\n4749\n4750\n4751\n4752\n4753\n4754\n4755\n4756\n4757\n4758\n4759\n4760\n4761\n4762\n4763\n4764\n4765\n4766\n4767\n4768\n4769\n4770\n4771\n4772\n4773\n4774\n4775\n4776\n4777\n4778\n4779\n4780\n4781\n4782\n4783\n4784\n4785\n4786\n4787\n4788\n4789\n4790\n4791\n4792\n4793\n4794\n4795\n4796\n4797\n4798\n4799\n4800\n4801\n4802\n4803\n4804\n4805\n4806\n4807\n4808\n4809\n4810\n4811\n4812\n4813\n4814\n4815\n4816\n4817\n4818\n4819\n4820\n4821\n4822\n4823\n4824\n4825\n4826\n4827\n4828\n4829\n4830\n4831\n4832\n4833\n4834\n4835\n4836\n4837\n4838\n4839\n4840\n4841\n4842\n4843\n4844\n4845\n4846\n4847\n4848\n4849\n4850\n4851\n4852\n4853\n4854\n4855\n4856\n4857\n4858\n4859\n4860\n4861\n4862\n4863\n4864\n4865\n4866\n4867\n4868\n4869\n4870\n4871\n4872\n4873\n4874\n4875\n4876\n4877\n4878\n4879\n4880\n4881\n4882\n4883\n4884\n4885\n4886\n4887\n4888\n4889\n4890\n4891\n4892\n4893\n4894\n4895\n4896\n4897\n4898\n4899\n4900\n4901\n4902\n4903\n4904\n4905\n4906\n4907\n4908\n4909\n4910\n4911\n4912\n4913\n4914\n4915\n4916\n4917\n4918\n4919\n4920\n4921\n4922\n4923\n4924\n4925\n4926\n4927\n4928\n4929\n4930\n4931\n4932\n4933\n4934\n4935\n4936\n4937\n4938\n4939\n4940\n4941\n4942\n4943\n4944\n4945\n4946\n4947\n4948\n4949\n4950\n4951\n4952\n4953\n4954\n4955\n4956\n4957\n4958\n4959\n4960\n4961\n4962\n4963\n4964\n4965\n4966\n4967\n4968\n4969\n4970\n4971\n4972\n4973\n4974\n4975\n4976\n4977\n4978\n4979\n4980\n4981\n4982\n4983\n4984\n4985\n4986\n4987\n4988\n4989\n4990\n4991\n4992\n4993\n4994\n4995\n4996\n4997\n4998\n4999\n5000\n5001\n5002\n5003\n5004\n5005\n5006\n5007\n5008\n5009\n5010\n5011\n5012\n5013\n5014\n5015\n5016\n5017\n5018\n5019\n5020\n5021\n5022\n5023\n5024\n5025\n5026\n5027\n5028\n5029\n5030\n5031\n5032\n5033\n5034\n5035\n5036\n5037\n5038\n5039\n5040\n5041\n5042\n5043\n5044\n5045\n5046\n5047\n5048\n5049\n5050\n5051\n5052\n5053\n5054\n5055\n5056\n5057\n5058\n5059\n5060\n5061\n5062\n5063\n5064\n5065\n5066\n5067\n5068\n5069\n5070\n5071\n5072\n5073\n5074\n5075\n5076\n5077\n5078\n5079\n5080\n5081\n5082\n5083\n5084\n5085\n5086\n5087\n5088\n5089\n5090\n5091\n5092\n5093\n5094\n5095\n5096\n5097\n5098\n5099\n5100\n5101\n5102\n5103\n5104\n5105\n5106\n5107\n5108\n5109\n5110\n5111\n5112\n5113\n5114\n5115\n5116\n5117\n5118\n5119\n5120\n5121\n5122\n5123\n5124\n5125\n5126\n5127\n5128\n5129\n5130\n5131\n5132\n5133\n5134\n5135\n5136\n5137\n5138\n5139\n5140\n5141\n5142\n5143\n5144\n5145\n5146\n5147\n5148\n5149\n5150\n5151\n5152\n5153\n5154\n5155\n5156\n5157\n5158\n5159\n5160\n5161\n5162\n5163\n5164\n5165\n5166\n5167\n5168\n5169\n5170\n5171\n5172\n5173\n5174\n5175\n5176\n5177\n5178\n5179\n5180\n5181\n5182\n5183\n5184\n5185\n5186\n5187\n5188\n5189\n5190\n5191\n5192\n5193\n5194\n5195\n5196\n5197\n5198\n5199\n5200\n5201\n5202\n5203\n5204\n5205\n5206\n5207\n5208\n5209\n5210\n5211\n5212\n5213\n5214\n5215\n5216\n5217\n5218\n5219\n5220\n5221\n5222\n5223\n5224\n5225\n5226\n5227\n5228\n5229\n5230\n5231\n5232\n5233\n5234\n5235\n5236\n5237\n5238\n5239\n5240\n5241\n5242\n5243\n5244\n5245\n5246\n5247\n5248\n5249\n5250\n5251\n5252\n5253\n5254\n5255\n5256\n5257\n5258\n5259\n5260\n5261\n5262\n5263\n5264\n5265\n5266\n5267\n5268\n5269\n5270\n5271\n5272\n5273\n5274\n5275\n5276\n5277\n5278\n5279\n5280\n5281\n5282\n5283\n5284\n5285\n5286\n5287\n5288\n5289\n5290\n5291\n5292\n5293\n5294\n5295\n5296\n5297\n5298\n5299\n5300\n5301\n5302\n5303\n5304\n5305\n5306\n5307\n5308\n5309\n5310\n5311\n5312\n5313\n5314\n5315\n5316\n5317\n5318\n5319\n5320\n5321\n5322\n5323\n5324\n5325\n5326\n5327\n5328\n5329\n5330\n5331\n5332\n5333\n5334\n5335\n5336\n5337\n5338\n5339\n5340\n5341\n5342\n5343\n5344\n5345\n5346\n5347\n5348\n5349\n5350\n5351\n5352\n5353\n5354\n5355\n5356\n5357\n5358\n5359\n5360\n5361\n5362\n5363\n5364\n5365\n5366\n5367\n5368\n5369\n5370\n5371\n5372\n5373\n5374\n5375\n5376\n5377\n5378\n5379\n5380\n5381\n5382\n5383\n5384\n5385\n5386\n5387\n5388\n5389\n5390\n5391\n5392\n5393\n5394\n5395\n5396\n5397\n5398\n5399\n5400\n5401\n5402\n5403\n5404\n5405\n5406\n5407\n5408\n5409\n5410\n5411\n5412\n5413\n5414\n5415\n5416\n5417\n5418\n5419\n5420\n5421\n5422\n5423\n5424\n5425\n5426\n5427\n5428\n5429\n5430\n5431\n5432\n5433\n5434\n5435\n5436\n5437\n5438\n5439\n5440\n5441\n5442\n5443\n5444\n5445\n5446\n5447\n5448\n5449\n5450\n5451\n5452\n5453\n5454\n5455\n5456\n5457\n5458\n5459\n5460\n5461\n5462\n5463\n5464\n5465\n5466\n5467\n5468\n5469\n5470\n5471\n5472\n5473\n5474\n5475\n5476\n5477\n5478\n5479\n5480\n5481\n5482\n5483\n5484\n5485\n5486\n5487\n5488\n5489\n5490\n5491\n5492\n5493\n5494\n5495\n5496\n5497\n5498\n5499\n5500\n5501\n5502\n5503\n5504\n5505\n5506\n5507\n5508\n5509\n5510\n5511\n5512\n5513\n5514\n5515\n5516\n5517\n5518\n5519\n5520\n5521\n5522\n5523\n5524\n5525\n5526\n5527\n5528\n5529\n5530\n5531\n5532\n5533\n5534\n5535\n5536\n5537\n5538\n5539\n5540\n5541\n5542\n5543\n5544\n5545\n5546\n5547\n5548\n5549\n5550\n5551\n5552\n5553\n5554\n5555\n5556\n5557\n5558\n5559\n5560\n5561\n5562\n5563\n5564\n5565\n5566\n5567\n5568\n5569\n5570\n5571\n5572\n5573\n5574\n5575\n5576\n5577\n5578\n5579\n5580\n5581\n5582\n5583\n5584\n5585\n5586\n5587\n5588\n5589\n5590\n5591\n5592\n5593\n5594\n5595\n5596\n5597\n5598\n5599\n5600\n5601\n5602\n5603\n5604\n5605\n5606\n5607\n5608\n5609\n5610\n5611\n5612\n5613\n5614\n5615\n5616\n5617\n5618\n5619\n5620\n5621\n5622\n5623\n5624\n5625\n5626\n5627\n5628\n5629\n5630\n5631\n5632\n5633\n5634\n5635\n5636\n5637\n5638\n5639\n5640\n5641\n5642\n5643\n5644\n5645\n5646\n5647\n5648\n5649\n5650\n5651\n5652\n5653\n5654\n5655\n5656\n5657\n5658\n5659\n5660\n5661\n5662\n5663\n5664\n5665\n5666\n5667\n5668\n5669\n5670\n5671\n5672\n5673\n5674\n5675\n5676\n5677\n5678\n5679\n5680\n5681\n5682\n5683\n5684\n5685\n5686\n5687\n5688\n5689\n5690\n5691\n5692\n5693\n5694\n5695\n5696\n5697\n5698\n5699\n5700\n5701\n5702\n5703\n5704\n5705\n5706\n5707\n5708\n5709\n5710\n5711\n5712\n5713\n5714\n5715\n5716\n5717\n5718\n5719\n5720\n5721\n5722\n5723\n5724\n5725\n5726\n5727\n5728\n5729\n5730\n5731\n5732\n5733\n5734\n5735\n5736\n5737\n5738\n5739\n5740\n5741\n5742\n5743\n5744\n5745\n5746\n5747\n5748\n5749\n5750\n5751\n5752\n5753\n5754\n5755\n5756\n5757\n5758\n5759\n5760\n5761\n5762\n5763\n5764\n5765\n5766\n5767\n5768\n5769\n5770\n5771\n5772\n5773\n5774\n5775\n5776\n5777\n5778\n5779\n5780\n5781\n5782\n5783\n5784\n5785\n5786\n5787\n5788\n5789\n5790\n5791\n5792\n5793\n5794\n5795\n5796\n5797\n5798\n5799\n5800\n5801\n5802\n5803\n5804\n5805\n5806\n5807\n5808\n5809\n5810\n5811\n5812\n5813\n5814\n5815\n5816\n5817\n5818\n5819\n5820\n5821\n5822\n5823\n5824\n5825\n5826\n5827\n5828\n5829\n5830\n5831\n5832\n5833\n5834\n5835\n5836\n5837\n5838\n5839\n5840\n5841\n5842\n5843\n5844\n5845\n5846\n5847\n5848\n5849\n5850\n5851\n5852\n5853\n5854\n5855\n5856\n5857\n5858\n5859\n5860\n5861\n5862\n5863\n5864\n5865\n5866\n5867\n5868\n5869\n5870\n5871\n5872\n5873\n5874\n5875\n5876\n5877\n5878\n5879\n5880\n5881\n5882\n5883\n5884\n5885\n5886\n5887\n5888\n5889\n5890\n5891\n5892\n5893\n5894\n5895\n5896\n5897\n5898\n5899\n5900\n5901\n5902\n5903\n5904\n5905\n5906\n5907\n5908\n5909\n5910\n5911\n5912\n5913\n5914\n5915\n5916\n5917\n5918\n5919\n5920\n5921\n5922\n5923\n5924\n5925\n5926\n5927\n5928\n5929\n5930\n5931\n5932\n5933\n5934\n5935\n5936\n5937\n5938\n5939\n5940\n5941\n5942\n5943\n5944\n5945\n5946\n5947\n5948\n5949\n5950\n5951\n5952\n5953\n5954\n5955\n5956\n5957\n5958\n5959\n5960\n5961\n5962\n5963\n5964\n5965\n5966\n5967\n5968\n5969\n5970\n5971\n5972\n5973\n5974\n5975\n5976\n5977\n5978\n5979\n5980\n5981\n5982\n5983\n5984\n5985\n5986\n5987\n5988\n5989\n5990\n5991\n5992\n5993\n5994\n5995\n5996\n5997\n5998\n5999\n6000\n6001\n6002\n6003\n6004\n6005\n6006\n6007\n6008\n6009\n6010\n6011\n6012\n6013\n6014\n6015\n6016\n6017\n6018\n6019\n6020\n6021\n6022\n6023\n6024\n6025\n6026\n6027\n6028\n6029\n6030\n6031\n6032\n6033\n6034\n6035\n6036\n6037\n6038\n6039\n6040\n6041\n6042\n6043\n6044\n6045\n6046\n6047\n6048\n6049\n6050\n6051\n6052\n6053\n6054\n6055\n6056\n6057\n6058\n6059\n6060\n6061\n6062\n6063\n6064\n6065\n6066\n6067\n6068\n6069\n6070\n6071\n6072\n6073\n6074\n6075\n6076\n6077\n6078\n6079\n6080\n6081\n6082\n6083\n6084\n6085\n6086\n6087\n6088\n6089\n6090\n6091\n6092\n6093\n6094\n6095\n6096\n6097\n6098\n6099\n6100\n6101\n6102\n6103\n6104\n6105\n6106\n6107\n6108\n6109\n6110\n6111\n6112\n6113\n6114\n6115\n6116\n6117\n6118\n6119\n6120\n6121\n6122\n6123\n6124\n6125\n6126\n6127\n6128\n6129\n6130\n6131\n6132\n6133\n6134\n6135\n6136\n6137\n6138\n6139\n6140\n6141\n6142\n6143\n6144\n6145\n6146\n6147\n6148\n6149\n6150\n6151\n6152\n6153\n6154\n6155\n6156\n6157\n6158\n6159\n6160\n6161\n6162\n6163\n6164\n6165\n6166\n6167\n6168\n6169\n6170\n6171\n6172\n6173\n6174\n6175\n6176\n6177\n6178\n6179\n6180\n6181\n6182\n6183\n6184\n6185\n6186\n6187\n6188\n6189\n6190\n6191\n6192\n6193\n6194\n6195\n6196\n6197\n6198\n6199\n6200\n6201\n6202\n6203\n6204\n6205\n6206\n6207\n6208\n6209\n6210\n6211\n6212\n6213\n6214\n6215\n6216\n6217\n6218\n6219\n6220\n6221\n6222\n6223\n6224\n6225\n6226\n6227\n6228\n6229\n6230\n6231\n6232\n6233\n6234\n6235\n6236\n6237\n6238\n6239\n6240\n6241\n6242\n6243\n6244\n6245\n6246\n6247\n6248\n6249\n6250\n6251\n6252\n6253\n6254\n6255\n6256\n6257\n6258\n6259\n6260\n6261\n6262\n6263\n6264\n6265\n6266\n6267\n6268\n6269\n6270\n6271\n6272\n6273\n6274\n6275\n6276\n6277\n6278\n6279\n6280\n6281\n6282\n6283\n6284\n6285\n6286\n6287\n6288\n6289\n6290\n6291\n6292\n6293\n6294\n6295\n6296\n6297\n6298\n6299\n6300\n6301\n6302\n6303\n6304\n6305\n6306\n6307\n6308\n6309\n6310\n6311\n6312\n6313\n6314\n6315\n6316\n6317\n6318\n6319\n6320\n6321\n6322\n6323\n6324\n6325\n6326\n6327\n6328\n6329\n6330\n6331\n6332\n6333\n6334\n6335\n6336\n6337\n6338\n6339\n6340\n6341\n6342\n6343\n6344\n6345\n6346\n6347\n6348\n6349\n6350\n6351\n6352\n6353\n6354\n6355\n6356\n6357\n6358\n6359\n6360\n6361\n6362\n6363\n6364\n6365\n6366\n6367\n6368\n6369\n6370\n6371\n6372\n6373\n6374\n6375\n6376\n6377\n6378\n6379\n6380\n6381\n6382\n6383\n6384\n6385\n6386\n6387\n6388\n6389\n6390\n6391\n6392\n6393\n6394\n6395\n6396\n6397\n6398\n6399\n6400\n6401\n6402\n6403\n6404\n6405\n6406\n6407\n6408\n6409\n6410\n6411\n6412\n6413\n6414\n6415\n6416\n6417\n6418\n6419\n6420\n6421\n6422\n6423\n6424\n6425\n6426\n6427\n6428\n6429\n6430\n6431\n6432\n6433\n6434\n6435\n6436\n6437\n6438\n6439\n6440\n6441\n6442\n6443\n6444\n6445\n6446\n6447\n6448\n6449\n6450\n6451\n6452\n6453\n6454\n6455\n6456\n6457\n6458\n6459\n6460\n6461\n6462\n6463\n6464\n6465\n6466\n6467\n6468\n6469\n6470\n6471\n6472\n6473\n6474\n6475\n6476\n6477\n6478\n6479\n6480\n6481\n6482\n6483\n6484\n6485\n6486\n6487\n6488\n6489\n6490\n6491\n6492\n6493\n6494\n6495\n6496\n6497\n6498\n6499\n6500\n6501\n6502\n6503\n6504\n6505\n6506\n6507\n6508\n6509\n6510\n6511\n6512\n6513\n6514\n6515\n6516\n6517\n6518\n6519\n6520\n6521\n6522\n6523\n6524\n6525\n6526\n6527\n6528\n6529\n6530\n6531\n6532\n6533\n6534\n6535\n6536\n6537\n6538\n6539\n6540\n6541\n6542\n6543\n6544\n6545\n6546\n6547\n6548\n6549\n6550\n6551\n6552\n6553\n6554\n6555\n6556\n6557\n6558\n6559\n6560\n6561\n6562\n6563\n6564\n6565\n6566\n6567\n6568\n6569\n6570\n6571\n6572\n6573\n6574\n6575\n6576\n6577\n6578\n6579\n6580\n6581\n6582\n6583\n6584\n6585\n6586\n6587\n6588\n6589\n6590\n6591\n6592\n6593\n6594\n6595\n6596\n6597\n6598\n6599\n6600\n6601\n6602\n6603\n6604\n6605\n6606\n6607\n6608\n6609\n6610\n6611\n6612\n6613\n6614\n6615\n6616\n6617\n6618\n6619\n6620\n6621\n6622\n6623\n6624\n6625\n6626\n6627\n6628\n6629\n6630\n6631\n6632\n6633\n6634\n6635\n6636\n6637\n6638\n6639\n6640\n6641\n6642\n6643\n6644\n6645\n6646\n6647\n6648\n6649\n6650\n6651\n6652\n6653\n6654\n6655\n6656\n6657\n6658\n6659\n6660\n6661\n6662\n6663\n6664\n6665\n6666\n6667\n6668\n6669\n6670\n6671\n6672\n6673\n6674\n6675\n6676\n6677\n6678\n6679\n6680\n6681\n6682\n6683\n6684\n6685\n6686\n6687\n6688\n6689\n6690\n6691\n6692\n6693\n6694\n6695\n6696\n6697\n6698\n6699\n6700\n6701\n6702\n6703\n6704\n6705\n6706\n6707\n6708\n6709\n6710\n6711\n6712\n6713\n6714\n6715\n6716\n6717\n6718\n6719\n6720\n6721\n6722\n6723\n6724\n6725\n6726\n6727\n6728\n6729\n6730\n6731\n6732\n6733\n6734\n6735\n6736\n6737\n6738\n6739\n6740\n6741\n6742\n6743\n6744\n6745\n6746\n6747\n6748\n6749\n6750\n6751\n6752\n6753\n6754\n6755\n6756\n6757\n6758\n6759\n6760\n6761\n6762\n6763\n6764\n6765\n6766\n6767\n6768\n6769\n6770\n6771\n6772\n6773\n6774\n6775\n6776\n6777\n6778\n6779\n6780\n6781\n6782\n6783\n6784\n6785\n6786\n6787\n6788\n6789\n6790\n6791\n6792\n6793\n6794\n6795\n6796\n6797\n6798\n6799\n6800\n6801\n6802\n6803\n6804\n6805\n6806\n6807\n6808\n6809\n6810\n6811\n6812\n6813\n6814\n6815\n6816\n6817\n6818\n6819\n6820\n6821\n6822\n6823\n6824\n6825\n6826\n6827\n6828\n6829\n6830\n6831\n6832\n6833\n6834\n6835\n6836\n6837\n6838\n6839\n6840\n6841\n6842\n6843\n6844\n6845\n6846\n6847\n6848\n6849\n6850\n6851\n6852\n6853\n6854\n6855\n6856\n6857\n6858\n6859\n6860\n6861\n6862\n6863\n6864\n6865\n6866\n6867\n6868\n6869\n6870\n6871\n6872\n6873\n6874\n6875\n6876\n6877\n6878\n6879\n6880\n6881\n6882\n6883\n6884\n6885\n6886\n6887\n6888\n6889\n6890\n6891\n6892\n6893\n6894\n6895\n6896\n6897\n6898\n6899\n6900\n6901\n6902\n6903\n6904\n6905\n6906\n6907\n6908\n6909\n6910\n6911\n6912\n6913\n6914\n6915\n6916\n6917\n6918\n6919\n6920\n6921\n6922\n6923\n6924\n6925\n6926\n6927\n6928\n6929\n6930\n6931\n6932\n6933\n6934\n6935\n6936\n6937\n6938\n6939\n6940\n6941\n6942\n6943\n6944\n6945\n6946\n6947\n6948\n6949\n6950\n6951\n6952\n6953\n6954\n6955\n6956\n6957\n6958\n6959\n6960\n6961\n6962\n6963\n6964\n6965\n6966\n6967\n6968\n6969\n6970\n6971\n6972\n6973\n6974\n6975\n6976\n6977\n6978\n6979\n6980\n6981\n6982\n6983\n6984\n6985\n6986\n6987\n6988\n6989\n6990\n6991\n6992\n6993\n6994\n6995\n6996\n6997\n6998\n6999\n7000\n7001\n7002\n7003\n7004\n7005\n7006\n7007\n7008\n7009\n7010\n7011\n7012\n7013\n7014\n7015\n7016\n7017\n7018\n7019\n7020\n7021\n7022\n7023\n7024\n7025\n7026\n7027\n7028\n7029\n7030\n7031\n7032\n7033\n7034\n7035\n7036\n7037\n7038\n7039\n7040\n7041\n7042\n7043\n7044\n7045\n7046\n7047\n7048\n7049\n7050\n7051\n7052\n7053\n7054\n7055\n7056\n7057\n7058\n7059\n7060\n7061\n7062\n7063\n7064\n7065\n7066\n7067\n7068\n7069\n7070\n7071\n7072\n7073\n7074\n7075\n7076\n7077\n7078\n7079\n7080\n7081\n7082\n7083\n7084\n7085\n7086\n7087\n7088\n7089\n7090\n7091\n7092\n7093\n7094\n7095\n7096\n7097\n7098\n7099\n7100\n7101\n7102\n7103\n7104\n7105\n7106\n7107\n7108\n7109\n7110\n7111\n7112\n7113\n7114\n7115\n7116\n7117\n7118\n7119\n7120\n7121\n7122\n7123\n7124\n7125\n7126\n7127\n7128\n7129\n7130\n7131\n7132\n7133\n7134\n7135\n7136\n7137\n7138\n7139\n7140\n7141\n7142\n7143\n7144\n7145\n7146\n7147\n7148\n7149\n7150\n7151\n7152\n7153\n7154\n7155\n7156\n7157\n7158\n7159\n7160\n7161\n7162\n7163\n7164\n7165\n7166\n7167\n7168\n7169\n7170\n7171\n7172\n7173\n7174\n7175\n7176\n7177\n7178\n7179\n7180\n7181\n7182\n7183\n7184\n7185\n7186\n7187\n7188\n7189\n7190\n7191\n7192\n7193\n7194\n7195\n7196\n7197\n7198\n7199\n7200\n7201\n7202\n7203\n7204\n7205\n7206\n7207\n7208\n7209\n7210\n7211\n7212\n7213\n7214\n7215\n7216\n7217\n7218\n7219\n7220\n7221\n7222\n7223\n7224\n7225\n7226\n7227\n7228\n7229\n7230\n7231\n7232\n7233\n7234\n7235\n7236\n7237\n7238\n7239\n7240\n7241\n7242\n7243\n7244\n7245\n7246\n7247\n7248\n7249\n7250\n7251\n7252\n7253\n7254\n7255\n7256\n7257\n7258\n7259\n7260\n7261\n7262\n7263\n7264\n7265\n7266\n7267\n7268\n7269\n7270\n7271\n7272\n7273\n7274\n7275\n7276\n7277\n7278\n7279\n7280\n7281\n7282\n7283\n7284\n7285\n7286\n7287\n7288\n7289\n7290\n7291\n7292\n7293\n7294\n7295\n7296\n7297\n7298\n7299\n7300\n7301\n7302\n7303\n7304\n7305\n7306\n7307\n7308\n7309\n7310\n7311\n7312\n7313\n7314\n7315\n7316\n7317\n7318\n7319\n7320\n7321\n7322\n7323\n7324\n7325\n7326\n7327\n7328\n7329\n7330\n7331\n7332\n7333\n7334\n7335\n7336\n7337\n7338\n7339\n7340\n7341\n7342\n7343\n7344\n7345\n7346\n7347\n7348\n7349\n7350\n7351\n7352\n7353\n7354\n7355\n7356\n7357\n7358\n7359\n7360\n7361\n7362\n7363\n7364\n7365\n7366\n7367\n7368\n7369\n7370\n7371\n7372\n7373\n7374\n7375\n7376\n7377\n7378\n7379\n7380\n7381\n7382\n7383\n7384\n7385\n7386\n7387\n7388\n7389\n7390\n7391\n7392\n7393\n7394\n7395\n7396\n7397\n7398\n7399\n7400\n7401\n7402\n7403\n7404\n7405\n7406\n7407\n7408\n7409\n7410\n7411\n7412\n7413\n7414\n7415\n7416\n7417\n7418\n7419\n7420\n7421\n7422\n7423\n7424\n7425\n7426\n7427\n7428\n7429\n7430\n7431\n7432\n7433\n7434\n7435\n7436\n7437\n7438\n7439\n7440\n7441\n7442\n7443\n7444\n7445\n7446\n7447\n7448\n7449\n7450\n7451\n7452\n7453\n7454\n7455\n7456\n7457\n7458\n7459\n7460\n7461\n7462\n7463\n7464\n7465\n7466\n7467\n7468\n7469\n7470\n7471\n7472\n7473\n7474\n7475\n7476\n7477\n7478\n7479\n7480\n7481\n7482\n7483\n7484\n7485\n7486\n7487\n7488\n7489\n7490\n7491\n7492\n7493\n7494\n7495\n7496\n7497\n7498\n7499\n7500\n7501\n7502\n7503\n7504\n7505\n7506\n7507\n7508\n7509\n7510\n7511\n7512\n7513\n7514\n7515\n7516\n7517\n7518\n7519\n7520\n7521\n7522\n7523\n7524\n7525\n7526\n7527\n7528\n7529\n7530\n7531\n7532\n7533\n7534\n7535\n7536\n7537\n7538\n7539\n7540\n7541\n7542\n7543\n7544\n7545\n7546\n7547\n7548\n7549\n7550\n7551\n7552\n7553\n7554\n7555\n7556\n7557\n7558\n7559\n7560\n7561\n7562\n7563\n7564\n7565\n7566\n7567\n7568\n7569\n7570\n7571\n7572\n7573\n7574\n7575\n7576\n7577\n7578\n7579\n7580\n7581\n7582\n7583\n7584\n7585\n7586\n7587\n7588\n7589\n7590\n7591\n7592\n7593\n7594\n7595\n7596\n7597\n7598\n7599\n7600\n7601\n7602\n7603\n7604\n7605\n7606\n7607\n7608\n7609\n7610\n7611\n7612\n7613\n7614\n7615\n7616\n7617\n7618\n7619\n7620\n7621\n7622\n7623\n7624\n7625\n7626\n7627\n7628\n7629\n7630\n7631\n7632\n7633\n7634\n7635\n7636\n7637\n7638\n7639\n7640\n7641\n7642\n7643\n7644\n7645\n7646\n7647\n7648\n7649\n7650\n7651\n7652\n7653\n7654\n7655\n7656\n7657\n7658\n7659\n7660\n7661\n7662\n7663\n7664\n7665\n7666\n7667\n7668\n7669\n7670\n7671\n7672\n7673\n7674\n7675\n7676\n7677\n7678\n7679\n7680\n7681\n7682\n7683\n7684\n7685\n7686\n7687\n7688\n7689\n7690\n7691\n7692\n7693\n7694\n7695\n7696\n7697\n7698\n7699\n7700\n7701\n7702\n7703\n7704\n7705\n7706\n7707\n7708\n7709\n7710\n7711\n7712\n7713\n7714\n7715\n7716\n7717\n7718\n7719\n7720\n7721\n7722\n7723\n7724\n7725\n7726\n7727\n7728\n7729\n7730\n7731\n7732\n7733\n7734\n7735\n7736\n7737\n7738\n7739\n7740\n7741\n7742\n7743\n7744\n7745\n7746\n7747\n7748\n7749\n7750\n7751\n7752\n7753\n7754\n7755\n7756\n7757\n7758\n7759\n7760\n7761\n7762\n7763\n7764\n7765\n7766\n7767\n7768\n7769\n7770\n7771\n7772\n7773\n7774\n7775\n7776\n7777\n7778\n7779\n7780\n7781\n7782\n7783\n7784\n7785\n7786\n7787\n7788\n7789\n7790\n7791\n7792\n7793\n7794\n7795\n7796\n7797\n7798\n7799\n7800\n7801\n7802\n7803\n7804\n7805\n7806\n7807\n7808\n7809\n7810\n7811\n7812\n7813\n7814\n7815\n7816\n7817\n7818\n7819\n7820\n7821\n7822\n7823\n7824\n7825\n7826\n7827\n7828\n7829\n7830\n7831\n7832\n7833\n7834\n7835\n7836\n7837\n7838\n7839\n7840\n7841\n7842\n7843\n7844\n7845\n7846\n7847\n7848\n7849\n7850\n7851\n7852\n7853\n7854\n7855\n7856\n7857\n7858\n7859\n7860\n7861\n7862\n7863\n7864\n7865\n7866\n7867\n7868\n7869\n7870\n7871\n7872\n7873\n7874\n7875\n7876\n7877\n7878\n7879\n7880\n7881\n7882\n7883\n7884\n7885\n7886\n7887\n7888\n7889\n7890\n7891\n7892\n7893\n7894\n7895\n7896\n7897\n7898\n7899\n7900\n7901\n7902\n7903\n7904\n7905\n7906\n7907\n7908\n7909\n7910\n7911\n7912\n7913\n7914\n7915\n7916\n7917\n7918\n7919\n7920\n7921\n7922\n7923\n7924\n7925\n7926\n7927\n7928\n7929\n7930\n7931\n7932\n7933\n7934\n7935\n7936\n7937\n7938\n7939\n7940\n7941\n7942\n7943\n7944\n7945\n7946\n7947\n7948\n7949\n7950\n7951\n7952\n7953\n7954\n7955\n7956\n7957\n7958\n7959\n7960\n7961\n7962\n7963\n7964\n7965\n7966\n7967\n7968\n7969\n7970\n7971\n7972\n7973\n7974\n7975\n7976\n7977\n7978\n7979\n7980\n7981\n7982\n7983\n7984\n7985\n7986\n7987\n7988\n7989\n7990\n7991\n7992\n7993\n7994\n7995\n7996\n7997\n7998\n7999\n8000\n8001\n8002\n8003\n8004\n8005\n8006\n8007\n8008\n8009\n8010\n8011\n8012\n8013\n8014\n8015\n8016\n8017\n8018\n8019\n8020\n8021\n8022\n8023\n8024\n8025\n8026\n8027\n8028\n8029\n8030\n8031\n8032\n8033\n8034\n8035\n8036\n8037\n8038\n8039\n8040\n8041\n8042\n8043\n8044\n8045\n8046\n8047\n8048\n8049\n8050\n8051\n8052\n8053\n8054\n8055\n8056\n8057\n8058\n8059\n8060\n8061\n8062\n8063\n8064\n8065\n8066\n8067\n8068\n8069\n8070\n8071\n8072\n8073\n8074\n8075\n8076\n8077\n8078\n8079\n8080\n8081\n8082\n8083\n8084\n8085\n8086\n8087\n8088\n8089\n8090\n8091\n8092\n8093\n8094\n8095\n8096\n8097\n8098\n8099\n8100\n8101\n8102\n8103\n8104\n8105\n8106\n8107\n8108\n8109\n8110\n8111\n8112\n8113\n8114\n8115\n8116\n8117\n8118\n8119\n8120\n8121\n8122\n8123\n8124\n8125\n8126\n8127\n8128\n8129\n8130\n8131\n8132\n8133\n8134\n8135\n8136\n8137\n8138\n8139\n8140\n8141\n8142\n8143\n8144\n8145\n8146\n8147\n8148\n8149\n8150\n8151\n8152\n8153\n8154\n8155\n8156\n8157\n8158\n8159\n8160\n8161\n8162\n8163\n8164\n8165\n8166\n8167\n8168\n8169\n8170\n8171\n8172\n8173\n8174\n8175\n8176\n8177\n8178\n8179\n8180\n8181\n8182\n8183\n8184\n8185\n8186\n8187\n8188\n8189\n8190\n8191\n8192\n8193\n8194\n8195\n8196\n8197\n8198\n8199\n8200\n8201\n8202\n8203\n8204\n8205\n8206\n8207\n8208\n8209\n8210\n8211\n8212\n8213\n8214\n8215\n8216\n8217\n8218\n8219\n8220\n8221\n8222\n8223\n8224\n8225\n8226\n8227\n8228\n8229\n8230\n8231\n8232\n8233\n8234\n8235\n8236\n8237\n8238\n8239\n8240\n8241\n8242\n8243\n8244\n8245\n8246\n8247\n8248\n8249\n8250\n8251\n8252\n8253\n8254\n8255\n8256\n8257\n8258\n8259\n8260\n8261\n8262\n8263\n8264\n8265\n8266\n8267\n8268\n8269\n8270\n8271\n8272\n8273\n8274\n8275\n8276\n8277\n8278\n8279\n8280\n8281\n8282\n8283\n8284\n8285\n8286\n8287\n8288\n8289\n8290\n8291\n8292\n8293\n8294\n8295\n8296\n8297\n8298\n8299\n8300\n8301\n8302\n8303\n8304\n8305\n8306\n8307\n8308\n8309\n8310\n8311\n8312\n8313\n8314\n8315\n8316\n8317\n8318\n8319\n8320\n8321\n8322\n8323\n8324\n8325\n8326\n8327\n8328\n8329\n8330\n8331\n8332\n8333\n8334\n8335\n8336\n8337\n8338\n8339\n8340\n8341\n8342\n8343\n8344\n8345\n8346\n8347\n8348\n8349\n8350\n8351\n8352\n8353\n8354\n8355\n8356\n8357\n8358\n8359\n8360\n8361\n8362\n8363\n8364\n8365\n8366\n8367\n8368\n8369\n8370\n8371\n8372\n8373\n8374\n8375\n8376\n8377\n8378\n8379\n8380\n8381\n8382\n8383\n8384\n8385\n8386\n8387\n8388\n8389\n8390\n8391\n8392\n8393\n8394\n8395\n8396\n8397\n8398\n8399\n8400\n8401\n8402\n8403\n8404\n8405\n8406\n8407\n8408\n8409\n8410\n8411\n8412\n8413\n8414\n8415\n8416\n8417\n8418\n8419\n8420\n8421\n8422\n8423\n8424\n8425\n8426\n8427\n8428\n8429\n8430\n8431\n8432\n8433\n8434\n8435\n8436\n8437\n8438\n8439\n8440\n8441\n8442\n8443\n8444\n8445\n8446\n8447\n8448\n8449\n8450\n8451\n8452\n8453\n8454\n8455\n8456\n8457\n8458\n8459\n8460\n8461\n8462\n8463\n8464\n8465\n8466\n8467\n8468\n8469\n8470\n8471\n8472\n8473\n8474\n8475\n8476\n8477\n8478\n8479\n8480\n8481\n8482\n8483\n8484\n8485\n8486\n8487\n8488\n8489\n8490\n8491\n8492\n8493\n8494\n8495\n8496\n8497\n8498\n8499\n8500\n8501\n8502\n8503\n8504\n8505\n8506\n8507\n8508\n8509\n8510\n8511\n8512\n8513\n8514\n8515\n8516\n8517\n8518\n8519\n8520\n8521\n8522\n8523\n8524\n8525\n8526\n8527\n8528\n8529\n8530\n8531\n8532\n8533\n8534\n8535\n8536\n8537\n8538\n8539\n8540\n8541\n8542\n8543\n8544\n8545\n8546\n8547\n8548\n8549\n8550\n8551\n8552\n8553\n8554\n8555\n8556\n8557\n8558\n8559\n8560\n8561\n8562\n8563\n8564\n8565\n8566\n8567\n8568\n8569\n8570\n8571\n8572\n8573\n8574\n8575\n8576\n8577\n8578\n8579\n8580\n8581\n8582\n8583\n8584\n8585\n8586\n8587\n8588\n8589\n8590\n8591\n8592\n8593\n8594\n8595\n8596\n8597\n8598\n8599\n8600\n8601\n8602\n8603\n8604\n8605\n8606\n8607\n8608\n8609\n8610\n8611\n8612\n8613\n8614\n8615\n8616\n8617\n8618\n8619\n8620\n8621\n8622\n8623\n8624\n8625\n8626\n8627\n8628\n8629\n8630\n8631\n8632\n8633\n8634\n8635\n8636\n8637\n8638\n8639\n8640\n8641\n8642\n8643\n8644\n8645\n8646\n8647\n8648\n8649\n8650\n8651\n8652\n8653\n8654\n8655\n8656\n8657\n8658\n8659\n8660\n8661\n8662\n8663\n8664\n8665\n8666\n8667\n8668\n8669\n8670\n8671\n8672\n8673\n8674\n8675\n8676\n8677\n8678\n8679\n8680\n8681\n8682\n8683\n8684\n8685\n8686\n8687\n8688\n8689\n8690\n8691\n8692\n8693\n8694\n8695\n8696\n8697\n8698\n8699\n8700\n8701\n8702\n8703\n8704\n8705\n8706\n8707\n8708\n8709\n8710\n8711\n8712\n8713\n8714\n8715\n8716\n8717\n8718\n8719\n8720\n8721\n8722\n8723\n8724\n8725\n8726\n8727\n8728\n8729\n8730\n8731\n8732\n8733\n8734\n8735\n8736\n8737\n8738\n8739\n8740\n8741\n8742\n8743\n8744\n8745\n8746\n8747\n8748\n8749\n8750\n8751\n8752\n8753\n8754\n8755\n8756\n8757\n8758\n8759\n8760\n8761\n8762\n8763\n8764\n8765\n8766\n8767\n8768\n8769\n8770\n8771\n8772\n8773\n8774\n8775\n8776\n8777\n8778\n8779\n8780\n8781\n8782\n8783\n8784\n8785\n8786\n8787\n8788\n8789\n8790\n8791\n8792\n8793\n8794\n8795\n8796\n8797\n8798\n8799\n8800\n8801\n8802\n8803\n8804\n8805\n8806\n8807\n8808\n8809\n8810\n8811\n8812\n8813\n8814\n8815\n8816\n8817\n8818\n8819\n8820\n8821\n8822\n8823\n8824\n8825\n8826\n8827\n8828\n8829\n8830\n8831\n8832\n8833\n8834\n8835\n8836\n8837\n8838\n8839\n8840\n8841\n8842\n8843\n8844\n8845\n8846\n8847\n8848\n8849\n8850\n8851\n8852\n8853\n8854\n8855\n8856\n8857\n8858\n8859\n8860\n8861\n8862\n8863\n8864\n8865\n8866\n8867\n8868\n8869\n8870\n8871\n8872\n8873\n8874\n8875\n8876\n8877\n8878\n8879\n8880\n8881\n8882\n8883\n8884\n8885\n8886\n8887\n8888\n8889\n8890\n8891\n8892\n8893\n8894\n8895\n8896\n8897\n8898\n8899\n8900\n8901\n8902\n8903\n8904\n8905\n8906\n8907\n8908\n8909\n8910\n8911\n8912\n8913\n8914\n8915\n8916\n8917\n8918\n8919\n8920\n8921\n8922\n8923\n8924\n8925\n8926\n8927\n8928\n8929\n8930\n8931\n8932\n8933\n8934\n8935\n8936\n8937\n8938\n8939\n8940\n8941\n8942\n8943\n8944\n8945\n8946\n8947\n8948\n8949\n8950\n8951\n8952\n8953\n8954\n8955\n8956\n8957\n8958\n8959\n8960\n8961\n8962\n8963\n8964\n8965\n8966\n8967\n8968\n8969\n8970\n8971\n8972\n8973\n8974\n8975\n8976\n8977\n8978\n8979\n8980\n8981\n8982\n8983\n8984\n8985\n8986\n8987\n8988\n8989\n8990\n8991\n8992\n8993\n8994\n8995\n8996\n8997\n8998\n8999\n9000\n9001\n9002\n9003\n9004\n9005\n9006\n9007\n9008\n9009\n9010\n9011\n9012\n9013\n9014\n9015\n9016\n9017\n9018\n9019\n9020\n9021\n9022\n9023\n9024\n9025\n9026\n9027\n9028\n9029\n9030\n9031\n9032\n9033\n9034\n9035\n9036\n9037\n9038\n9039\n9040\n9041\n9042\n9043\n9044\n9045\n9046\n9047\n9048\n9049\n9050\n9051\n9052\n9053\n9054\n9055\n9056\n9057\n9058\n9059\n9060\n9061\n9062\n9063\n9064\n9065\n9066\n9067\n9068\n9069\n9070\n9071\n9072\n9073\n9074\n9075\n9076\n9077\n9078\n9079\n9080\n9081\n9082\n9083\n9084\n9085\n9086\n9087\n9088\n9089\n9090\n9091\n9092\n9093\n9094\n9095\n9096\n9097\n9098\n9099\n9100\n9101\n9102\n9103\n9104\n9105\n9106\n9107\n9108\n9109\n9110\n9111\n9112\n9113\n9114\n9115\n9116\n9117\n9118\n9119\n9120\n9121\n9122\n9123\n9124\n9125\n9126\n9127\n9128\n9129\n9130\n9131\n9132\n9133\n9134\n9135\n9136\n9137\n9138\n9139\n9140\n9141\n9142\n9143\n9144\n9145\n9146\n9147\n9148\n9149\n9150\n9151\n9152\n9153\n9154\n9155\n9156\n9157\n9158\n9159\n9160\n9161\n9162\n9163\n9164\n9165\n9166\n9167\n9168\n9169\n9170\n9171\n9172\n9173\n9174\n9175\n9176\n9177\n9178\n9179\n9180\n9181\n9182\n9183\n9184\n9185\n9186\n9187\n9188\n9189\n9190\n9191\n9192\n9193\n9194\n9195\n9196\n9197\n9198\n9199\n9200\n9201\n9202\n9203\n9204\n9205\n9206\n9207\n9208\n9209\n9210\n9211\n9212\n9213\n9214\n9215\n9216\n9217\n9218\n9219\n9220\n9221\n9222\n9223\n9224\n9225\n9226\n9227\n9228\n9229\n9230\n9231\n9232\n9233\n9234\n9235\n9236\n9237\n9238\n9239\n9240\n9241\n9242\n9243\n9244\n9245\n9246\n9247\n9248\n9249\n9250\n9251\n9252\n9253\n9254\n9255\n9256\n9257\n9258\n9259\n9260\n9261\n9262\n9263\n9264\n9265\n9266\n9267\n9268\n9269\n9270\n9271\n9272\n9273\n9274\n9275\n9276\n9277\n9278\n9279\n9280\n9281\n9282\n9283\n9284\n9285\n9286\n9287\n9288\n9289\n9290\n9291\n9292\n9293\n9294\n9295\n9296\n9297\n9298\n9299\n9300\n9301\n9302\n9303\n9304\n9305\n9306\n9307\n9308\n9309\n9310\n9311\n9312\n9313\n9314\n9315\n9316\n9317\n9318\n9319\n9320\n9321\n9322\n9323\n9324\n9325\n9326\n9327\n9328\n9329\n9330\n9331\n9332\n9333\n9334\n9335\n9336\n9337\n9338\n9339\n9340\n9341\n9342\n9343\n9344\n9345\n9346\n9347\n9348\n9349\n9350\n9351\n9352\n9353\n9354\n9355\n9356\n9357\n9358\n9359\n9360\n9361\n9362\n9363\n9364\n9365\n9366\n9367\n9368\n9369\n9370\n9371\n9372\n9373\n9374\n9375\n9376\n9377\n9378\n9379\n9380\n9381\n9382\n9383\n9384\n9385\n9386\n9387\n9388\n9389\n9390\n9391\n9392\n9393\n9394\n9395\n9396\n9397\n9398\n9399\n9400\n9401\n9402\n9403\n9404\n9405\n9406\n9407\n9408\n9409\n9410\n9411\n9412\n9413\n9414\n9415\n9416\n9417\n9418\n9419\n9420\n9421\n9422\n9423\n9424\n9425\n9426\n9427\n9428\n9429\n9430\n9431\n9432\n9433\n9434\n9435\n9436\n9437\n9438\n9439\n9440\n9441\n9442\n9443\n9444\n9445\n9446\n9447\n9448\n9449\n9450\n9451\n9452\n9453\n9454\n9455\n9456\n9457\n9458\n9459\n9460\n9461\n9462\n9463\n9464\n9465\n9466\n9467\n9468\n9469\n9470\n9471\n9472\n9473\n9474\n9475\n9476\n9477\n9478\n9479\n9480\n9481\n9482\n9483\n9484\n9485\n9486\n9487\n9488\n9489\n9490\n9491\n9492\n9493\n9494\n9495\n9496\n9497\n9498\n9499\n9500\n9501\n9502\n9503\n9504\n9505\n9506\n9507\n9508\n9509\n9510\n9511\n9512\n9513\n9514\n9515\n9516\n9517\n9518\n9519\n9520\n9521\n9522\n9523\n9524\n9525\n9526\n9527\n9528\n9529\n9530\n9531\n9532\n9533\n9534\n9535\n9536\n9537\n9538\n9539\n9540\n9541\n9542\n9543\n9544\n9545\n9546\n9547\n9548\n9549\n9550\n9551\n9552\n9553\n9554\n9555\n9556\n9557\n9558\n9559\n9560\n9561\n9562\n9563\n9564\n9565\n9566\n9567\n9568\n9569\n9570\n9571\n9572\n9573\n9574\n9575\n9576\n9577\n9578\n9579\n9580\n9581\n9582\n9583\n9584\n9585\n9586\n9587\n9588\n9589\n9590\n9591\n9592\n9593\n9594\n9595\n9596\n9597\n9598\n9599\n9600\n9601\n9602\n9603\n9604\n9605\n9606\n9607\n9608\n9609\n9610\n9611\n9612\n9613\n9614\n9615\n9616\n9617\n9618\n9619\n9620\n9621\n9622\n9623\n9624\n9625\n9626\n9627\n9628\n9629\n9630\n9631\n9632\n9633\n9634\n9635\n9636\n9637\n9638\n9639\n9640\n9641\n9642\n9643\n9644\n9645\n9646\n9647\n9648\n9649\n9650\n9651\n9652\n9653\n9654\n9655\n9656\n9657\n9658\n9659\n9660\n9661\n9662\n9663\n9664\n9665\n9666\n9667\n9668\n9669\n9670\n9671\n9672\n9673\n9674\n9675\n9676\n9677\n9678\n9679\n9680\n9681\n9682\n9683\n9684\n9685\n9686\n9687\n9688\n9689\n9690\n9691\n9692\n9693\n9694\n9695\n9696\n9697\n9698\n9699\n9700\n9701\n9702\n9703\n9704\n9705\n9706\n9707\n9708\n9709\n9710\n9711\n9712\n9713\n9714\n9715\n9716\n9717\n9718\n9719\n9720\n9721\n9722\n9723\n9724\n9725\n9726\n9727\n9728\n9729\n9730\n9731\n9732\n9733\n9734\n9735\n9736\n9737\n9738\n9739\n9740\n9741\n9742\n9743\n9744\n9745\n9746\n9747\n9748\n9749\n9750\n9751\n9752\n9753\n9754\n9755\n9756\n9757\n9758\n9759\n9760\n9761\n9762\n9763\n9764\n9765\n9766\n9767\n9768\n9769\n9770\n9771\n9772\n9773\n9774\n9775\n9776\n9777\n9778\n9779\n9780\n9781\n9782\n9783\n9784\n9785\n9786\n9787\n9788\n9789\n9790\n9791\n9792\n9793\n9794\n9795\n9796\n9797\n9798\n9799\n9800\n9801\n9802\n9803\n9804\n9805\n9806\n9807\n9808\n9809\n9810\n9811\n9812\n9813\n9814\n9815\n9816\n9817\n9818\n9819\n9820\n9821\n9822\n9823\n9824\n9825\n9826\n9827\n9828\n9829\n9830\n9831\n9832\n9833\n9834\n9835\n9836\n9837\n9838\n9839\n9840\n9841\n9842\n9843\n9844\n9845\n9846\n9847\n9848\n9849\n9850\n9851\n9852\n9853\n9854\n9855\n9856\n9857\n9858\n9859\n9860\n9861\n9862\n9863\n9864\n9865\n9866\n9867\n9868\n9869\n9870\n9871\n9872\n9873\n9874\n9875\n9876\n9877\n9878\n9879\n9880\n9881\n9882\n9883\n9884\n9885\n9886\n9887\n9888\n9889\n9890\n9891\n9892\n9893\n9894\n9895\n9896\n9897\n9898\n9899\n9900\n9901\n9902\n9903\n9904\n9905\n9906\n9907\n9908\n9909\n9910\n9911\n9912\n9913\n9914\n9915\n9916\n9917\n9918\n9919\n9920\n9921\n9922\n9923\n9924\n9925\n9926\n9927\n9928\n9929\n9930\n9931\n9932\n9933\n9934\n9935\n9936\n9937\n9938\n9939\n9940\n9941\n9942\n9943\n9944\n9945\n9946\n9947\n9948\n9949\n9950\n9951\n9952\n9953\n9954\n9955\n9956\n9957\n9958\n9959\n9960\n9961\n9962\n9963\n9964\n9965\n9966\n9967\n9968\n9969\n9970\n9971\n9972\n9973\n9974\n9975\n9976\n9977\n9978\n9979\n9980\n9981\n9982\n9983\n9984\n9985\n9986\n9987\n9988\n9989\n9990\n9991\n9992\n9993\n9994\n9995\n9996\n9997\n9998\n9999\n10000\n10001\n10002\n10003\n10004\n10005\n10006\n10007\n10008\n10009\n10010\n10011\n10012\n10013\n10014\n10015\n10016\n10017\n10018\n10019\n10020\n10021\n10022\n10023\n10024\n10025\n10026\n10027\n10028\n10029\n10030\n10031\n10032\n10033\n10034\n10035\n10036\n10037\n10038\n10039\n10040\n10041\n10042\n10043\n10044\n10045\n10046\n10047\n10048\n10049\n10050\n10051\n10052\n10053\n10054\n10055\n10056\n10057\n10058\n10059\n10060\n10061\n10062\n10063\n10064\n10065\n10066\n10067\n10068\n10069\n10070\n10071\n10072\n10073\n10074\n10075\n10076\n10077\n10078\n10079\n10080\n10081\n10082\n10083\n10084\n10085\n10086\n10087\n10088\n10089\n10090\n10091\n10092\n10093\n10094\n10095\n10096\n10097\n10098\n10099\n10100\n10101\n10102\n10103\n10104\n10105\n10106\n10107\n10108\n10109\n10110\n10111\n10112\n10113\n10114\n10115\n10116\n10117\n10118\n10119\n10120\n10121\n10122\n10123\n10124\n10125\n10126\n10127\n10128\n10129\n10130\n10131\n10132\n10133\n10134\n10135\n10136\n10137\n10138\n10139\n10140\n10141\n10142\n10143\n10144\n10145\n10146\n10147\n10148\n10149\n10150\n10151\n10152\n10153\n10154\n10155\n10156\n10157\n10158\n10159\n10160\n10161\n10162\n10163\n10164\n10165\n10166\n10167\n10168\n10169\n10170\n10171\n10172\n10173\n10174\n10175\n10176\n10177\n10178\n10179\n10180\n10181\n10182\n10183\n10184\n10185\n10186\n10187\n10188\n10189\n10190\n10191\n10192\n10193\n10194\n10195\n10196\n10197\n10198\n10199\n10200\n10201\n10202\n10203\n10204\n10205\n10206\n10207\n10208\n10209\n10210\n10211\n10212\n10213\n10214\n10215\n10216\n10217\n10218\n10219\n10220\n10221\n10222\n10223\n10224\n10225\n10226\n10227\n10228\n10229\n10230\n10231\n10232\n10233\n10234\n10235\n10236\n10237\n10238\n10239\n10240\n10241\n10242\n10243\n10244\n10245\n10246\n10247\n10248\n10249\n10250\n10251\n10252\n10253\n10254\n10255\n10256\n10257\n10258\n10259\n10260\n10261\n10262\n10263\n10264\n10265\n10266\n10267\n10268\n10269\n10270\n10271\n10272\n10273\n10274\n10275\n10276\n10277\n10278\n10279\n10280\n10281\n10282\n10283\n10284\n10285\n10286\n10287\n10288\n10289\n10290\n10291\n10292\n10293\n10294\n10295\n10296\n10297\n10298\n10299\n10300\n10301\n10302\n10303\n10304\n10305\n10306\n10307\n10308\n10309\n10310\n10311\n10312\n10313\n10314\n10315\n10316\n10317\n10318\n10319\n10320\n10321\n10322\n10323\n10324\n10325\n10326\n10327\n10328\n10329\n10330\n10331\n10332\n10333\n10334\n10335\n10336\n10337\n10338\n10339\n10340\n10341\n10342\n10343\n10344\n10345\n10346\n10347\n10348\n10349\n10350\n10351\n10352\n10353\n10354\n10355\n10356\n10357\n10358\n10359\n10360\n10361\n10362\n10363\n10364\n10365\n10366\n10367\n10368\n10369\n10370\n10371\n10372\n10373\n10374\n10375\n10376\n10377\n10378\n10379\n10380\n10381\n10382\n10383\n10384\n10385\n10386\n10387\n10388\n10389\n10390\n10391\n10392\n10393\n10394\n10395\n10396\n10397\n10398\n10399\n10400\n10401\n10402\n10403\n10404\n10405\n10406\n10407\n10408\n10409\n10410\n10411\n10412\n10413\n10414\n10415\n10416\n10417\n10418\n10419\n10420\n10421\n10422\n10423\n10424\n10425\n10426\n10427\n10428\n10429\n10430\n10431\n10432\n10433\n10434\n10435\n10436\n10437\n10438\n10439\n10440\n10441\n10442\n10443\n10444\n10445\n10446\n10447\n10448\n10449\n10450\n10451\n10452\n10453\n10454\n10455\n10456\n10457\n10458\n10459\n10460\n10461\n10462\n10463\n10464\n10465\n10466\n10467\n10468\n10469\n10470\n10471\n10472\n10473\n10474\n10475\n10476\n10477\n10478\n10479\n10480\n10481\n10482\n10483\n10484\n10485\n10486\n10487\n10488\n10489\n10490\n10491\n10492\n10493\n10494\n10495\n10496\n10497\n10498\n10499\n10500\n10501\n10502\n10503\n10504\n10505\n10506\n10507\n10508\n10509\n10510\n10511\n10512\n10513\n10514\n10515\n10516\n10517\n10518\n10519\n10520\n10521\n10522\n10523\n10524\n10525\n10526\n10527\n10528\n10529\n10530\n10531\n10532\n10533\n10534\n10535\n10536\n10537\n10538\n10539\n10540\n10541\n10542\n10543\n10544\n10545\n10546\n10547\n10548\n10549\n10550\n10551\n10552\n10553\n10554\n10555\n10556\n10557\n10558\n10559\n10560\n10561\n10562\n10563\n10564\n10565\n10566\n10567\n10568\n10569\n10570\n10571\n10572\n10573\n10574\n10575\n10576\n10577\n10578\n10579\n10580\n10581\n10582\n10583\n10584\n10585\n10586\n10587\n10588\n10589\n10590\n10591\n10592\n10593\n10594\n10595\n10596\n10597\n10598\n10599\n10600\n10601\n10602\n10603\n10604\n10605\n10606\n10607\n10608\n10609\n10610\n10611\n10612\n10613\n10614\n10615\n10616\n10617\n10618\n10619\n10620\n10621\n10622\n10623\n10624\n10625\n10626\n10627\n10628\n10629\n10630\n10631\n10632\n10633\n10634\n10635\n10636\n10637\n10638\n10639\n10640\n10641\n10642\n10643\n10644\n10645\n10646\n10647\n10648\n10649\n10650\n10651\n10652\n10653\n10654\n10655\n10656\n10657\n10658\n10659\n10660\n10661\n10662\n10663\n10664\n10665\n10666\n10667\n10668\n10669\n10670\n10671\n10672\n10673\n10674\n10675\n10676\n10677\n10678\n10679\n10680\n10681\n10682\n10683\n10684\n10685\n10686\n10687\n10688\n10689\n10690\n10691\n10692\n10693\n10694\n10695\n10696\n10697\n10698\n10699\n10700\n10701\n10702\n10703\n10704\n10705\n10706\n10707\n10708\n10709\n10710\n10711\n10712\n10713\n10714\n10715\n10716\n10717\n10718\n10719\n10720\n10721\n10722\n10723\n10724\n10725\n10726\n10727\n10728\n10729\n10730\n10731\n10732\n10733\n10734\n10735\n10736\n10737\n10738\n10739\n10740\n10741\n10742\n10743\n10744\n10745\n10746\n10747\n10748\n10749\n10750\n10751\n10752\n10753\n10754\n10755\n10756\n10757\n10758\n10759\n10760\n10761\n10762\n10763\n10764\n10765\n10766\n10767\n10768\n10769\n10770\n10771\n10772\n10773\n10774\n10775\n10776\n10777\n10778\n10779\n10780\n10781\n10782\n10783\n10784\n10785\n10786\n10787\n10788\n10789\n10790\n10791\n10792\n10793\n10794\n10795\n10796\n10797\n10798\n10799\n10800\n10801\n10802\n10803\n10804\n10805\n10806\n10807\n10808\n10809\n10810\n10811\n10812\n10813\n10814\n10815\n10816\n10817\n10818\n10819\n10820\n10821\n10822\n10823\n10824\n10825\n10826\n10827\n10828\n10829\n10830\n10831\n10832\n10833\n10834\n10835\n10836\n10837\n10838\n10839\n10840\n10841\n10842\n10843\n10844\n10845\n10846\n10847\n10848\n10849\n10850\n10851\n10852\n10853\n10854\n10855\n10856\n10857\n10858\n10859\n10860\n10861\n10862\n10863\n10864\n10865\n10866\n10867\n10868\n10869\n10870\n10871\n10872\n10873\n10874\n10875\n10876\n10877\n10878\n10879\n10880\n10881\n10882\n10883\n10884\n10885\n10886\n10887\n10888\n10889\n10890\n10891\n10892\n10893\n10894\n10895\n10896\n10897\n10898\n10899\n10900\n10901\n10902\n10903\n10904\n10905\n10906\n10907\n10908\n10909\n10910\n10911\n10912\n10913\n10914\n10915\n10916\n10917\n10918\n10919\n10920\n10921\n10922\n10923\n10924\n10925\n10926\n10927\n10928\n10929\n10930\n10931\n10932\n10933\n10934\n10935\n10936\n10937\n10938\n10939\n10940\n10941\n10942\n10943\n10944\n10945\n10946\n10947\n10948\n10949\n10950\n10951\n10952\n10953\n10954\n10955\n10956\n10957\n10958\n10959\n10960\n10961\n10962\n10963\n10964\n10965\n10966\n10967\n10968\n10969\n10970\n10971\n10972\n10973\n10974\n10975\n10976\n10977\n10978\n10979\n10980\n10981\n10982\n10983\n10984\n10985\n10986\n10987\n10988\n10989\n10990\n10991\n10992\n10993\n10994\n10995\n10996\n10997\n10998\n10999\n11000\n11001\n11002\n11003\n11004\n11005\n11006\n11007\n11008\n11009\n11010\n11011\n11012\n11013\n11014\n11015\n11016\n11017\n11018\n11019\n11020\n11021\n11022\n11023\n11024\n11025\n11026\n11027\n11028\n11029\n11030\n11031\n11032\n11033\n11034\n11035\n11036\n11037\n11038\n11039\n11040\n11041\n11042\n11043\n11044\n11045\n11046\n11047\n11048\n11049\n11050\n11051\n11052\n11053\n11054\n11055\n11056\n11057\n11058\n11059\n11060\n11061\n11062\n11063\n11064\n11065\n11066\n11067\n11068\n11069\n11070\n11071\n11072\n11073\n11074\n11075\n11076\n11077\n11078\n11079\n11080\n11081\n11082\n11083\n11084\n11085\n11086\n11087\n11088\n11089\n11090\n11091\n11092\n11093\n11094\n11095\n11096\n11097\n11098\n11099\n11100\n11101\n11102\n11103\n11104\n11105\n11106\n11107\n11108\n11109\n11110\n11111\n11112\n11113\n11114\n11115\n11116\n11117\n11118\n11119\n11120\n11121\n11122\n11123\n11124\n11125\n11126\n11127\n11128\n11129\n11130\n11131\n11132\n11133\n11134\n11135\n11136\n11137\n11138\n11139\n11140\n11141\n11142\n11143\n11144\n11145\n11146\n11147\n11148\n11149\n11150\n11151\n11152\n11153\n11154\n11155\n11156\n11157\n11158\n11159\n11160\n11161\n11162\n11163\n11164\n11165\n11166\n11167\n11168\n11169\n11170\n11171\n11172\n11173\n11174\n11175\n11176\n11177\n11178\n11179\n11180\n11181\n11182\n11183\n11184\n11185\n11186\n11187\n11188\n11189\n11190\n11191\n11192\n11193\n11194\n11195\n11196\n11197\n11198\n11199\n11200\n11201\n11202\n11203\n11204\n11205\n11206\n11207\n11208\n11209\n11210\n11211\n11212\n11213\n11214\n11215\n11216\n11217\n11218\n11219\n11220\n11221\n11222\n11223\n11224\n11225\n11226\n11227\n11228\n11229\n11230\n11231\n11232\n11233\n11234\n11235\n11236\n11237\n11238\n11239\n11240\n11241\n11242\n11243\n11244\n11245\n11246\n11247\n11248\n11249\n11250\n11251\n11252\n11253\n11254\n11255\n11256\n11257\n11258\n11259\n11260\n11261\n11262\n11263\n11264\n11265\n11266\n11267\n11268\n11269\n11270\n11271\n11272\n11273\n11274\n11275\n11276\n11277\n11278\n11279\n11280\n11281\n11282\n11283\n11284\n11285\n11286\n11287\n11288\n11289\n11290\n11291\n11292\n11293\n11294\n11295\n11296\n11297\n11298\n11299\n11300\n11301\n11302\n11303\n11304\n11305\n11306\n11307\n11308\n11309\n11310\n11311\n11312\n11313\n11314\n11315\n11316\n11317\n11318\n11319\n11320\n11321\n11322\n11323\n11324\n11325\n11326\n11327\n11328\n11329\n11330\n11331\n11332\n11333\n11334\n11335\n11336\n11337\n11338\n11339\n11340\n11341\n11342\n11343\n11344\n11345\n11346\n11347\n11348\n11349\n11350\n11351\n11352\n11353\n11354\n11355\n11356\n11357\n11358\n11359\n11360\n11361\n11362\n11363\n11364\n11365\n11366\n11367\n11368\n11369\n11370\n11371\n11372\n11373\n11374\n11375\n11376\n11377\n11378\n11379\n11380\n11381\n11382\n11383\n11384\n11385\n11386\n11387\n11388\n11389\n11390\n11391\n11392\n11393\n11394\n11395\n11396\n11397\n11398\n11399\n11400\n11401\n11402\n11403\n11404\n11405\n11406\n11407\n11408\n11409\n11410\n11411\n11412\n11413\n11414\n11415\n11416\n11417\n11418\n11419\n11420\n11421\n11422\n11423\n11424\n11425\n11426\n11427\n11428\n11429\n11430\n11431\n11432\n11433\n11434\n11435\n11436\n11437\n11438\n11439\n11440\n11441\n11442\n11443\n11444\n11445\n11446\n11447\n11448\n11449\n11450\n11451\n11452\n11453\n11454\n11455\n11456\n11457\n11458\n11459\n11460\n11461\n11462\n11463\n11464\n11465\n11466\n11467\n11468\n11469\n11470\n11471\n11472\n11473\n11474\n11475\n11476\n11477\n11478\n11479\n11480\n11481\n11482\n11483\n11484\n11485\n11486\n11487\n11488\n11489\n11490\n11491\n11492\n11493\n11494\n11495\n11496\n11497\n11498\n11499\n11500\n11501\n11502\n11503\n11504\n11505\n11506\n11507\n11508\n11509\n11510\n11511\n11512\n11513\n11514\n11515\n11516\n11517\n11518\n11519\n11520\n11521\n11522\n11523\n11524\n11525\n11526\n11527\n11528\n11529\n11530\n11531\n11532\n11533\n11534\n11535\n11536\n11537\n11538\n11539\n11540\n11541\n11542\n11543\n11544\n11545\n11546\n11547\n11548\n11549\n11550\n11551\n11552\n11553\n11554\n11555\n11556\n11557\n11558\n11559\n11560\n11561\n11562\n11563\n11564\n11565\n11566\n11567\n11568\n11569\n11570\n11571\n11572\n11573\n11574\n11575\n11576\n11577\n11578\n11579\n11580\n11581\n11582\n11583\n11584\n11585\n11586\n11587\n11588\n11589\n11590\n11591\n11592\n11593\n11594\n11595\n11596\n11597\n11598\n11599\n11600\n11601\n11602\n11603\n11604\n11605\n11606\n11607\n11608\n11609\n11610\n11611\n11612\n11613\n11614\n11615\n11616\n11617\n11618\n11619\n11620\n11621\n11622\n11623\n11624\n11625\n11626\n11627\n11628\n11629\n11630\n11631\n11632\n11633\n11634\n11635\n11636\n11637\n11638\n11639\n11640\n11641\n11642\n11643\n11644\n11645\n11646\n11647\n11648\n11649\n11650\n11651\n11652\n11653\n11654\n11655\n11656\n11657\n11658\n11659\n11660\n11661\n11662\n11663\n11664\n11665\n11666\n11667\n11668\n11669\n11670\n11671\n11672\n11673\n11674\n11675\n11676\n11677\n11678\n11679\n11680\n11681\n11682\n11683\n11684\n11685\n11686\n11687\n11688\n11689\n11690\n11691\n11692\n11693\n11694\n11695\n11696\n11697\n11698\n11699\n11700\n11701\n11702\n11703\n11704\n11705\n11706\n11707\n11708\n11709\n11710\n11711\n11712\n11713\n11714\n11715\n11716\n11717\n11718\n11719\n11720\n11721\n11722\n11723\n11724\n11725\n11726\n11727\n11728\n11729\n11730\n11731\n11732\n11733\n11734\n11735\n11736\n11737\n11738\n11739\n11740\n11741\n11742\n11743\n11744\n11745\n11746\n11747\n11748\n11749\n11750\n11751\n11752\n11753\n11754\n11755\n11756\n11757\n11758\n11759\n11760\n11761\n11762\n11763\n11764\n11765\n11766\n11767\n11768\n11769\n11770\n11771\n11772\n11773\n11774\n11775\n11776\n11777\n11778\n11779\n11780\n11781\n11782\n11783\n11784\n11785\n11786\n11787\n11788\n11789\n11790\n11791\n11792\n11793\n11794\n11795\n11796\n11797\n11798\n11799\n11800\n11801\n11802\n11803\n11804\n11805\n11806\n11807\n11808\n11809\n11810\n11811\n11812\n11813\n11814\n11815\n11816\n11817\n11818\n11819\n11820\n11821\n11822\n11823\n11824\n11825\n11826\n11827\n11828\n11829\n11830\n11831\n11832\n11833\n11834\n11835\n11836\n11837\n11838\n11839\n11840\n11841\n11842\n11843\n11844\n11845\n11846\n11847\n11848\n11849\n11850\n11851\n11852\n11853\n11854\n11855\n11856\n11857\n11858\n11859\n11860\n11861\n11862\n11863\n11864\n11865\n11866\n11867\n11868\n11869\n11870\n11871\n11872\n11873\n11874\n11875\n11876\n11877\n11878\n11879\n11880\n11881\n11882\n11883\n11884\n11885\n11886\n11887\n11888\n11889\n11890\n11891\n11892\n11893\n11894\n11895\n11896\n11897\n11898\n11899\n11900\n11901\n11902\n11903\n11904\n11905\n11906\n11907\n11908\n11909\n11910\n11911\n11912\n11913\n11914\n11915\n11916\n11917\n11918\n11919\n11920\n11921\n11922\n11923\n11924\n11925\n11926\n11927\n11928\n11929\n11930\n11931\n11932\n11933\n11934\n11935\n11936\n11937\n11938\n11939\n11940\n11941\n11942\n11943\n11944\n11945\n11946\n11947\n11948\n11949\n11950\n11951\n11952\n11953\n11954\n11955\n11956\n11957\n11958\n11959\n11960\n11961\n11962\n11963\n11964\n11965\n11966\n11967\n11968\n11969\n11970\n11971\n11972\n11973\n11974\n11975\n11976\n11977\n11978\n11979\n11980\n11981\n11982\n11983\n11984\n11985\n11986\n11987\n11988\n11989\n11990\n11991\n11992\n11993\n11994\n11995\n11996\n11997\n11998\n11999\n12000\n12001\n12002\n12003\n12004\n12005\n12006\n12007\n12008\n12009\n12010\n12011\n12012\n12013\n12014\n12015\n12016\n12017\n12018\n12019\n12020\n12021\n12022\n12023\n12024\n12025\n12026\n12027\n12028\n12029\n12030\n12031\n12032\n12033\n12034\n12035\n12036\n12037\n12038\n12039\n12040\n12041\n12042\n12043\n12044\n12045\n12046\n12047\n12048\n12049\n12050\n12051\n12052\n12053\n12054\n12055\n12056\n12057\n12058\n12059\n12060\n12061\n12062\n12063\n12064\n12065\n12066\n12067\n12068\n12069\n12070\n12071\n12072\n12073\n12074\n12075\n12076\n12077\n12078\n12079\n12080\n12081\n12082\n12083\n12084\n12085\n12086\n12087\n12088\n12089\n12090\n12091\n12092\n12093\n12094\n12095\n12096\n12097\n12098\n12099\n12100\n12101\n12102\n12103\n12104\n12105\n12106\n12107\n12108\n12109\n12110\n12111\n12112\n12113\n12114\n12115\n12116\n12117\n12118\n12119\n12120\n12121\n12122\n12123\n12124\n12125\n12126\n12127\n12128\n12129\n12130\n12131\n12132\n12133\n12134\n12135\n12136\n12137\n12138\n12139\n12140\n12141\n12142\n12143\n12144\n12145\n12146\n12147\n12148\n12149\n12150\n12151\n12152\n12153\n12154\n12155\n12156\n12157\n12158\n12159\n12160\n12161\n12162\n12163\n12164\n12165\n12166\n12167\n12168\n12169\n12170\n12171\n12172\n12173\n12174\n12175\n12176\n12177\n12178\n12179\n12180\n12181\n12182\n12183\n12184\n12185\n12186\n12187\n12188\n12189\n12190\n12191\n12192\n12193\n12194\n12195\n12196\n12197\n12198\n12199\n12200\n12201\n12202\n12203\n12204\n12205\n12206\n12207\n12208\n12209\n12210\n12211\n12212\n12213\n12214\n12215\n12216\n12217\n12218\n12219\n12220\n12221\n12222\n12223\n12224\n12225\n12226\n12227\n12228\n12229\n12230\n12231\n12232\n12233\n12234\n12235\n12236\n12237\n12238\n12239\n12240\n12241\n12242\n12243\n12244\n12245\n12246\n12247\n12248\n12249\n12250\n12251\n12252\n12253\n12254\n12255\n12256\n12257\n12258\n12259\n12260\n12261\n12262\n12263\n12264\n12265\n12266\n12267\n12268\n12269\n12270\n12271\n12272\n12273\n12274\n12275\n12276\n12277\n12278\n12279\n12280\n12281\n12282\n12283\n12284\n12285\n12286\n12287\n12288\n12289\n12290\n12291\n12292\n12293\n12294\n12295\n12296\n12297\n12298\n12299\n12300\n12301\n12302\n12303\n12304\n12305\n12306\n12307\n12308\n12309\n12310\n12311\n12312\n12313\n12314\n12315\n12316\n12317\n12318\n12319\n12320\n12321\n12322\n12323\n12324\n12325\n12326\n12327\n12328\n12329\n12330\n12331\n12332\n12333\n12334\n12335\n12336\n12337\n12338\n12339\n12340\n12341\n12342\n12343\n12344\n12345\n12346\n12347\n12348\n12349\n12350\n12351\n12352\n12353\n12354\n12355\n12356\n12357\n12358\n12359\n12360\n12361\n12362\n12363\n12364\n12365\n12366\n12367\n12368\n12369\n12370\n12371\n12372\n12373\n12374\n12375\n12376\n12377\n12378\n12379\n12380\n12381\n12382\n12383\n12384\n12385\n12386\n12387\n12388\n12389\n12390\n12391\n12392\n12393\n12394\n12395\n12396\n12397\n12398\n12399\n12400\n12401\n12402\n12403\n12404\n12405\n12406\n12407\n12408\n12409\n12410\n12411\n12412\n12413\n12414\n12415\n12416\n12417\n12418\n12419\n12420\n12421\n12422\n12423\n12424\n12425\n12426\n12427\n12428\n12429\n12430\n12431\n12432\n12433\n12434\n12435\n12436\n12437\n12438\n12439\n12440\n12441\n12442\n12443\n12444\n12445\n12446\n12447\n12448\n12449\n12450\n12451\n12452\n12453\n12454\n12455\n12456\n12457\n12458\n12459\n12460\n12461\n12462\n12463\n12464\n12465\n12466\n12467\n12468\n12469\n12470\n12471\n12472\n12473\n12474\n12475\n12476\n12477\n12478\n12479\n12480\n12481\n12482\n12483\n12484\n12485\n12486\n12487\n12488\n12489\n12490\n12491\n12492\n12493\n12494\n12495\n12496\n12497\n12498\n12499\n12500\n12501\n12502\n12503\n12504\n12505\n12506\n12507\n12508\n12509\n12510\n12511\n12512\n12513\n12514\n12515\n12516\n12517\n12518\n12519\n12520\n12521\n12522\n12523\n12524\n12525\n12526\n12527\n12528\n12529\n12530\n12531\n12532\n12533\n12534\n12535\n12536\n12537\n12538\n12539\n12540\n12541\n12542\n12543\n12544\n12545\n12546\n12547\n12548\n12549\n12550\n12551\n12552\n12553\n12554\n12555\n12556\n12557\n12558\n12559\n12560\n12561\n12562\n12563\n12564\n12565\n12566\n12567\n12568\n12569\n12570\n12571\n12572\n12573\n12574\n12575\n12576\n12577\n12578\n12579\n12580\n12581\n12582\n12583\n12584\n12585\n12586\n12587\n12588\n12589\n12590\n12591\n12592\n12593\n12594\n12595\n12596\n12597\n12598\n12599\n12600\n12601\n12602\n12603\n12604\n12605\n12606\n12607\n12608\n12609\n12610\n12611\n12612\n12613\n12614\n12615\n12616\n12617\n12618\n12619\n12620\n12621\n12622\n12623\n12624\n12625\n12626\n12627\n12628\n12629\n12630\n12631\n12632\n12633\n12634\n12635\n12636\n12637\n12638\n12639\n12640\n12641\n12642\n12643\n12644\n12645\n12646\n12647\n12648\n12649\n12650\n12651\n12652\n12653\n12654\n12655\n12656\n12657\n12658\n12659\n12660\n12661\n12662\n12663\n12664\n12665\n12666\n12667\n12668\n12669\n12670\n12671\n12672\n12673\n12674\n12675\n12676\n12677\n12678\n12679\n12680\n12681\n12682\n12683\n12684\n12685\n12686\n12687\n12688\n12689\n12690\n12691\n12692\n12693\n12694\n12695\n12696\n12697\n12698\n12699\n12700\n12701\n12702\n12703\n12704\n12705\n12706\n12707\n12708\n12709\n12710\n12711\n12712\n12713\n12714\n12715\n12716\n12717\n12718\n12719\n12720\n12721\n12722\n12723\n12724\n12725\n12726\n12727\n12728\n12729\n12730\n12731\n12732\n12733\n12734\n12735\n12736\n12737\n12738\n12739\n12740\n12741\n12742\n12743\n12744\n12745\n12746\n12747\n12748\n12749\n12750\n12751\n12752\n12753\n12754\n12755\n12756\n12757\n12758\n12759\n12760\n12761\n12762\n12763\n12764\n12765\n12766\n12767\n12768\n12769\n12770\n12771\n12772\n12773\n12774\n12775\n12776\n12777\n12778\n12779\n12780\n12781\n12782\n12783\n12784\n12785\n12786\n12787\n12788\n12789\n12790\n12791\n12792\n12793\n12794\n12795\n12796\n12797\n12798\n12799\n12800\n12801\n12802\n12803\n12804\n12805\n12806\n12807\n12808\n12809\n12810\n12811\n12812\n12813\n12814\n12815\n12816\n12817\n12818\n12819\n12820\n12821\n12822\n12823\n12824\n12825\n12826\n12827\n12828\n12829\n12830\n12831\n12832\n12833\n12834\n12835\n12836\n12837\n12838\n12839\n12840\n12841\n12842\n12843\n12844\n12845\n12846\n12847\n12848\n12849\n12850\n12851\n12852\n12853\n12854\n12855\n12856\n12857\n12858\n12859\n12860\n12861\n12862\n12863\n12864\n12865\n12866\n12867\n12868\n12869\n12870\n12871\n12872\n12873\n12874\n12875\n12876\n12877\n12878\n12879\n12880\n12881\n12882\n12883\n12884\n12885\n12886\n12887\n12888\n12889\n12890\n12891\n12892\n12893\n12894\n12895\n12896\n12897\n12898\n12899\n12900\n12901\n12902\n12903\n12904\n12905\n12906\n12907\n12908\n12909\n12910\n12911\n12912\n12913\n12914\n12915\n12916\n12917\n12918\n12919\n12920\n12921\n12922\n12923\n12924\n12925\n12926\n12927\n12928\n12929\n12930\n12931\n12932\n12933\n12934\n12935\n12936\n12937\n12938\n12939\n12940\n12941\n12942\n12943\n12944\n12945\n12946\n12947\n12948\n12949\n12950\n12951\n12952\n12953\n12954\n12955\n12956\n12957\n12958\n12959\n12960\n12961\n12962\n12963\n12964\n12965\n12966\n12967\n12968\n12969\n12970\n12971\n12972\n12973\n12974\n12975\n12976\n12977\n12978\n12979\n12980\n12981\n12982\n12983\n12984\n12985\n12986\n12987\n12988\n12989\n12990\n12991\n12992\n12993\n12994\n12995\n12996\n12997\n12998\n12999\n13000\n13001\n13002\n13003\n13004\n13005\n13006\n13007\n13008\n13009\n13010\n13011\n13012\n13013\n13014\n13015\n13016\n13017\n13018\n13019\n13020\n13021\n13022\n13023\n13024\n13025\n13026\n13027\n13028\n13029\n13030\n13031\n13032\n13033\n13034\n13035\n13036\n13037\n13038\n13039\n13040\n13041\n13042\n13043\n13044\n13045\n13046\n13047\n13048\n13049\n13050\n13051\n13052\n13053\n13054\n13055\n13056\n13057\n13058\n13059\n13060\n13061\n13062\n13063\n13064\n13065\n13066\n13067\n13068\n13069\n13070\n13071\n13072\n13073\n13074\n13075\n13076\n13077\n13078\n13079\n13080\n13081\n13082\n13083\n13084\n13085\n13086\n13087\n13088\n13089\n13090\n13091\n13092\n13093\n13094\n13095\n13096\n13097\n13098\n13099\n13100\n13101\n13102\n13103\n13104\n13105\n13106\n13107\n13108\n13109\n13110\n13111\n13112\n13113\n13114\n13115\n13116\n13117\n13118\n13119\n13120\n13121\n13122\n13123\n13124\n13125\n13126\n13127\n13128\n13129\n13130\n13131\n13132\n13133\n13134\n13135\n13136\n13137\n13138\n13139\n13140\n13141\n13142\n13143\n13144\n13145\n13146\n13147\n13148\n13149\n13150\n13151\n13152\n13153\n13154\n13155\n13156\n13157\n13158\n13159\n13160\n13161\n13162\n13163\n13164\n13165\n13166\n13167\n13168\n13169\n13170\n13171\n13172\n13173\n13174\n13175\n13176\n13177\n13178\n13179\n13180\n13181\n13182\n13183\n13184\n13185\n13186\n13187\n13188\n13189\n13190\n13191\n13192\n13193\n13194\n13195\n13196\n13197\n13198\n13199\n13200\n13201\n13202\n13203\n13204\n13205\n13206\n13207\n13208\n13209\n13210\n13211\n13212\n13213\n13214\n13215\n13216\n13217\n13218\n13219\n13220\n13221\n13222\n13223\n13224\n13225\n13226\n13227\n13228\n13229\n13230\n13231\n13232\n13233\n13234\n13235\n13236\n13237\n13238\n13239\n13240\n13241\n13242\n13243\n13244\n13245\n13246\n13247\n13248\n13249\n13250\n13251\n13252\n13253\n13254\n13255\n13256\n13257\n13258\n13259\n13260\n13261\n13262\n13263\n13264\n13265\n13266\n13267\n13268\n13269\n13270\n13271\n13272\n13273\n13274\n13275\n13276\n13277\n13278\n13279\n13280\n13281\n13282\n13283\n13284\n13285\n13286\n13287\n13288\n13289\n13290\n13291\n13292\n13293\n13294\n13295\n13296\n13297\n13298\n13299\n13300\n13301\n13302\n13303\n13304\n13305\n13306\n13307\n13308\n13309\n13310\n13311\n13312\n13313\n13314\n13315\n13316\n13317\n13318\n13319\n13320\n13321\n13322\n13323\n13324\n13325\n13326\n13327\n13328\n13329\n13330\n13331\n13332\n13333\n13334\n13335\n13336\n13337\n13338\n13339\n13340\n13341\n13342\n13343\n13344\n13345\n13346\n13347\n13348\n13349\n13350\n13351\n13352\n13353\n13354\n13355\n13356\n13357\n13358\n13359\n13360\n13361\n13362\n13363\n13364\n13365\n13366\n13367\n13368\n13369\n13370\n13371\n13372\n13373\n13374\n13375\n13376\n13377\n13378\n13379\n13380\n13381\n13382\n13383\n13384\n13385\n13386\n13387\n13388\n13389\n13390\n13391\n13392\n13393\n13394\n13395\n13396\n13397\n13398\n13399\n13400\n13401\n13402\n13403\n13404\n13405\n13406\n13407\n13408\n13409\n13410\n13411\n13412\n13413\n13414\n13415\n13416\n13417\n13418\n13419\n13420\n13421\n13422\n13423\n13424\n13425\n13426\n13427\n13428\n13429\n13430\n13431\n13432\n13433\n13434\n13435\n13436\n13437\n13438\n13439\n13440\n13441\n13442\n13443\n13444\n13445\n13446\n13447\n13448\n13449\n13450\n13451\n13452\n13453\n13454\n13455\n13456\n13457\n13458\n13459\n13460\n13461\n13462\n13463\n13464\n13465\n13466\n13467\n13468\n13469\n13470\n13471\n13472\n13473\n13474\n13475\n13476\n13477\n13478\n13479\n13480\n13481\n13482\n13483\n13484\n13485\n13486\n13487\n13488\n13489\n13490\n13491\n13492\n13493\n13494\n13495\n13496\n13497\n13498\n13499\n13500\n13501\n13502\n13503\n13504\n13505\n13506\n13507\n13508\n13509\n13510\n13511\n13512\n13513\n13514\n13515\n13516\n13517\n13518\n13519\n13520\n13521\n13522\n13523\n13524\n13525\n13526\n13527\n13528\n13529\n13530\n13531\n13532\n13533\n13534\n13535\n13536\n13537\n13538\n13539\n13540\n13541\n13542\n13543\n13544\n13545\n13546\n13547\n13548\n13549\n13550\n13551\n13552\n13553\n13554\n13555\n13556\n13557\n13558\n13559\n13560\n13561\n13562\n13563\n13564\n13565\n13566\n13567\n13568\n13569\n13570\n13571\n13572\n13573\n13574\n13575\n13576\n13577\n13578\n13579\n13580\n13581\n13582\n13583\n13584\n13585\n13586\n13587\n13588\n13589\n13590\n13591\n13592\n13593\n13594\n13595\n13596\n13597\n13598\n13599\n13600\n13601\n13602\n13603\n13604\n13605\n13606\n13607\n13608\n13609\n13610\n13611\n13612\n13613\n13614\n13615\n13616\n13617\n13618\n13619\n13620\n13621\n13622\n13623\n13624\n13625\n13626\n13627\n13628\n13629\n13630\n13631\n13632\n13633\n13634\n13635\n13636\n13637\n13638\n13639\n13640\n13641\n13642\n13643\n13644\n13645\n13646\n13647\n13648\n13649\n13650\n13651\n13652\n13653\n13654\n13655\n13656\n13657\n13658\n13659\n13660\n13661\n13662\n13663\n13664\n13665\n13666\n13667\n13668\n13669\n13670\n13671\n13672\n13673\n13674\n13675\n13676\n13677\n13678\n13679\n13680\n13681\n13682\n13683\n13684\n13685\n13686\n13687\n13688\n13689\n13690\n13691\n13692\n13693\n13694\n13695\n13696\n13697\n13698\n13699\n13700\n13701\n13702\n13703\n13704\n13705\n13706\n13707\n13708\n13709\n13710\n13711\n13712\n13713\n13714\n13715\n13716\n13717\n13718\n13719\n13720\n13721\n13722\n13723\n13724\n13725\n13726\n13727\n13728\n13729\n13730\n13731\n13732\n13733\n13734\n13735\n13736\n13737\n13738\n13739\n13740\n13741\n13742\n13743\n13744\n13745\n13746\n13747\n13748\n13749\n13750\n13751\n13752\n13753\n13754\n13755\n13756\n13757\n13758\n13759\n13760\n13761\n13762\n13763\n13764\n13765\n13766\n13767\n13768\n13769\n13770\n13771\n13772\n13773\n13774\n13775\n13776\n13777\n13778\n13779\n13780\n13781\n13782\n13783\n13784\n13785\n13786\n13787\n13788\n13789\n13790\n13791\n13792\n13793\n13794\n13795\n13796\n13797\n13798\n13799\n13800\n13801\n13802\n13803\n13804\n13805\n13806\n13807\n13808\n13809\n13810\n13811\n13812\n13813\n13814\n13815\n13816\n13817\n13818\n13819\n13820\n13821\n13822\n13823\n13824\n13825\n13826\n13827\n13828\n13829\n13830\n13831\n13832\n13833\n13834\n13835\n13836\n13837\n13838\n13839\n13840\n13841\n13842\n13843\n13844\n13845\n13846\n13847\n13848\n13849\n13850\n13851\n13852\n13853\n13854\n13855\n13856\n13857\n13858\n13859\n13860\n13861\n13862\n13863\n13864\n13865\n13866\n13867\n13868\n13869\n13870\n13871\n13872\n13873\n13874\n13875\n13876\n13877\n13878\n13879\n13880\n13881\n13882\n13883\n13884\n13885\n13886\n13887\n13888\n13889\n13890\n13891\n13892\n13893\n13894\n13895\n13896\n13897\n13898\n13899\n13900\n13901\n13902\n13903\n13904\n13905\n13906\n13907\n13908\n13909\n13910\n13911\n13912\n13913\n13914\n13915\n13916\n13917\n13918\n13919\n13920\n13921\n13922\n13923\n13924\n13925\n13926\n13927\n13928\n13929\n13930\n13931\n13932\n13933\n13934\n13935\n13936\n13937\n13938\n13939\n13940\n13941\n13942\n13943\n13944\n13945\n13946\n13947\n13948\n13949\n13950\n13951\n13952\n13953\n13954\n13955\n13956\n13957\n13958\n13959\n13960\n13961\n13962\n13963\n13964\n13965\n13966\n13967\n13968\n13969\n13970\n13971\n13972\n13973\n13974\n13975\n13976\n13977\n13978\n13979\n13980\n13981\n13982\n13983\n13984\n13985\n13986\n13987\n13988\n13989\n13990\n13991\n13992\n13993\n13994\n13995\n13996\n13997\n13998\n13999\n14000\n14001\n14002\n14003\n14004\n14005\n14006\n14007\n14008\n14009\n14010\n14011\n14012\n14013\n14014\n14015\n14016\n14017\n14018\n14019\n14020\n14021\n14022\n14023\n14024\n14025\n14026\n14027\n14028\n14029\n14030\n14031\n14032\n14033\n14034\n14035\n14036\n14037\n14038\n14039\n14040\n14041\n14042\n14043\n14044\n14045\n14046\n14047\n14048\n14049\n14050\n14051\n14052\n14053\n14054\n14055\n14056\n14057\n14058\n14059\n14060\n14061\n14062\n14063\n14064\n14065\n14066\n14067\n14068\n14069\n14070\n14071\n14072\n14073\n14074\n14075\n14076\n14077\n14078\n14079\n14080\n14081\n14082\n14083\n14084\n14085\n14086\n14087\n14088\n14089\n14090\n14091\n14092\n14093\n14094\n14095\n14096\n14097\n14098\n14099\n14100\n14101\n14102\n14103\n14104\n14105\n14106\n14107\n14108\n14109\n14110\n14111\n14112\n14113\n14114\n14115\n14116\n14117\n14118\n14119\n14120\n14121\n14122\n14123\n14124\n14125\n14126\n14127\n14128\n14129\n14130\n14131\n14132\n14133\n14134\n14135\n14136\n14137\n14138\n14139\n14140\n14141\n14142\n14143\n14144\n14145\n14146\n14147\n14148\n14149\n14150\n14151\n14152\n14153\n14154\n14155\n14156\n14157\n14158\n14159\n14160\n14161\n14162\n14163\n14164\n14165\n14166\n14167\n14168\n14169\n14170\n14171\n14172\n14173\n14174\n14175\n14176\n14177\n14178\n14179\n14180\n14181\n14182\n14183\n14184\n14185\n14186\n14187\n14188\n14189\n14190\n14191\n14192\n14193\n14194\n14195\n14196\n14197\n14198\n14199\n14200\n14201\n14202\n14203\n14204\n14205\n14206\n14207\n14208\n14209\n14210\n14211\n14212\n14213\n14214\n14215\n14216\n14217\n14218\n14219\n14220\n14221\n14222\n14223\n14224\n14225\n14226\n14227\n14228\n14229\n14230\n14231\n14232\n14233\n14234\n14235\n14236\n14237\n14238\n14239\n14240\n14241\n14242\n14243\n14244\n14245\n14246\n14247\n14248\n14249\n14250\n14251\n14252\n14253\n14254\n14255\n14256\n14257\n14258\n14259\n14260\n14261\n14262\n14263\n14264\n14265\n14266\n14267\n14268\n14269\n14270\n14271\n14272\n14273\n14274\n14275\n14276\n14277\n14278\n14279\n14280\n14281\n14282\n14283\n14284\n14285\n14286\n14287\n14288\n14289\n14290\n14291\n14292\n14293\n14294\n14295\n14296\n14297\n14298\n14299\n14300\n14301\n14302\n14303\n14304\n14305\n14306\n14307\n14308\n14309\n14310\n14311\n14312\n14313\n14314\n14315\n14316\n14317\n14318\n14319\n14320\n14321\n14322\n14323\n14324\n14325\n14326\n14327\n14328\n14329\n14330\n14331\n14332\n14333\n14334\n14335\n14336\n14337\n14338\n14339\n14340\n14341\n14342\n14343\n14344\n14345\n14346\n14347\n14348\n14349\n14350\n14351\n14352\n14353\n14354\n14355\n14356\n14357\n14358\n14359\n14360\n14361\n14362\n14363\n14364\n14365\n14366\n14367\n14368\n14369\n14370\n14371\n14372\n14373\n14374\n14375\n14376\n14377\n14378\n14379\n14380\n14381\n14382\n14383\n14384\n14385\n14386\n14387\n14388\n14389\n14390\n14391\n14392\n14393\n14394\n14395\n14396\n14397\n14398\n14399\n14400\n14401\n14402\n14403\n14404\n14405\n14406\n14407\n14408\n14409\n14410\n14411\n14412\n14413\n14414\n14415\n14416\n14417\n14418\n14419\n14420\n14421\n14422\n14423\n14424\n14425\n14426\n14427\n14428\n14429\n14430\n14431\n14432\n14433\n14434\n14435\n14436\n14437\n14438\n14439\n14440\n14441\n14442\n14443\n14444\n14445\n14446\n14447\n14448\n14449\n14450\n14451\n14452\n14453\n14454\n14455\n14456\n14457\n14458\n14459\n14460\n14461\n14462\n14463\n14464\n14465\n14466\n14467\n14468\n14469\n14470\n14471\n14472\n14473\n14474\n14475\n14476\n14477\n14478\n14479\n14480\n14481\n14482\n14483\n14484\n14485\n14486\n14487\n14488\n14489\n14490\n14491\n14492\n14493\n14494\n14495\n14496\n14497\n14498\n14499\n14500\n14501\n14502\n14503\n14504\n14505\n14506\n14507\n14508\n14509\n14510\n14511\n14512\n14513\n14514\n14515\n14516\n14517\n14518\n14519\n14520\n14521\n14522\n14523\n14524\n14525\n14526\n14527\n14528\n14529\n14530\n14531\n14532\n14533\n14534\n14535\n14536\n14537\n14538\n14539\n14540\n14541\n14542\n14543\n14544\n14545\n14546\n14547\n14548\n14549\n14550\n14551\n14552\n14553\n14554\n14555\n14556\n14557\n14558\n14559\n14560\n14561\n14562\n14563\n14564\n14565\n14566\n14567\n14568\n14569\n14570\n14571\n14572\n14573\n14574\n14575\n14576\n14577\n14578\n14579\n14580\n14581\n14582\n14583\n14584\n14585\n14586\n14587\n14588\n14589\n14590\n14591\n14592\n14593\n14594\n14595\n14596\n14597\n14598\n14599\n14600\n14601\n14602\n14603\n14604\n14605\n14606\n14607\n14608\n14609\n14610\n14611\n14612\n14613\n14614\n14615\n14616\n14617\n14618\n14619\n14620\n14621\n14622\n14623\n14624\n14625\n14626\n14627\n14628\n14629\n14630\n14631\n14632\n14633\n14634\n14635\n14636\n14637\n14638\n14639\n14640\n14641\n14642\n14643\n14644\n14645\n14646\n14647\n14648\n14649\n14650\n14651\n14652\n14653\n14654\n14655\n14656\n14657\n14658\n14659\n14660\n14661\n14662\n14663\n14664\n14665\n14666\n14667\n14668\n14669\n14670\n14671\n14672\n14673\n14674\n14675\n14676\n14677\n14678\n14679\n14680\n14681\n14682\n14683\n14684\n14685\n14686\n14687\n14688\n14689\n14690\n14691\n14692\n14693\n14694\n14695\n14696\n14697\n14698\n14699\n14700\n14701\n14702\n14703\n14704\n14705\n14706\n14707\n14708\n14709\n14710\n14711\n14712\n14713\n14714\n14715\n14716\n14717\n14718\n14719\n14720\n14721\n14722\n14723\n14724\n14725\n14726\n14727\n14728\n14729\n14730\n14731\n14732\n14733\n14734\n14735\n14736\n14737\n14738\n14739\n14740\n14741\n14742\n14743\n14744\n14745\n14746\n14747\n14748\n14749\n14750\n14751\n14752\n14753\n14754\n14755\n14756\n14757\n14758\n14759\n14760\n14761\n14762\n14763\n14764\n14765\n14766\n14767\n14768\n14769\n14770\n14771\n14772\n14773\n14774\n14775\n14776\n14777\n14778\n14779\n14780\n14781\n14782\n14783\n14784\n14785\n14786\n14787\n14788\n14789\n14790\n14791\n14792\n14793\n14794\n14795\n14796\n14797\n14798\n14799\n14800\n14801\n14802\n14803\n14804\n14805\n14806\n14807\n14808\n14809\n14810\n14811\n14812\n14813\n14814\n14815\n14816\n14817\n14818\n14819\n14820\n14821\n14822\n14823\n14824\n14825\n14826\n14827\n14828\n14829\n14830\n14831\n14832\n14833\n14834\n14835\n14836\n14837\n14838\n14839\n14840\n14841\n14842\n14843\n14844\n14845\n14846\n14847\n14848\n14849\n14850\n14851\n14852\n14853\n14854\n14855\n14856\n14857\n14858\n14859\n14860\n14861\n14862\n14863\n14864\n14865\n14866\n14867\n14868\n14869\n14870\n14871\n14872\n14873\n14874\n14875\n14876\n14877\n14878\n14879\n14880\n14881\n14882\n14883\n14884\n14885\n14886\n14887\n14888\n14889\n14890\n14891\n14892\n14893\n14894\n14895\n14896\n14897\n14898\n14899\n14900\n14901\n14902\n14903\n14904\n14905\n14906\n14907\n14908\n14909\n14910\n14911\n14912\n14913\n14914\n14915\n14916\n14917\n14918\n14919\n14920\n14921\n14922\n14923\n14924\n14925\n14926\n14927\n14928\n14929\n14930\n14931\n14932\n14933\n14934\n14935\n14936\n14937\n14938\n14939\n14940\n14941\n14942\n14943\n14944\n14945\n14946\n14947\n14948\n14949\n14950\n14951\n14952\n14953\n14954\n14955\n14956\n14957\n14958\n14959\n14960\n14961\n14962\n14963\n14964\n14965\n14966\n14967\n14968\n14969\n14970\n14971\n14972\n14973\n14974\n14975\n14976\n14977\n14978\n14979\n14980\n14981\n14982\n14983\n14984\n14985\n14986\n14987\n14988\n14989\n14990\n14991\n14992\n14993\n14994\n14995\n14996\n14997\n14998\n14999\n15000\n15001\n15002\n15003\n15004\n15005\n15006\n15007\n15008\n15009\n15010\n15011\n15012\n15013\n15014\n15015\n15016\n15017\n15018\n15019\n15020\n15021\n15022\n15023\n15024\n15025\n15026\n15027\n15028\n15029\n15030\n15031\n15032\n15033\n15034\n15035\n15036\n15037\n15038\n15039\n15040\n15041\n15042\n15043\n15044\n15045\n15046\n15047\n15048\n15049\n15050\n15051\n15052\n15053\n15054\n15055\n15056\n15057\n15058\n15059\n15060\n15061\n15062\n15063\n15064\n15065\n15066\n15067\n15068\n15069\n15070\n15071\n15072\n15073\n15074\n15075\n15076\n15077\n15078\n15079\n15080\n15081\n15082\n15083\n15084\n15085\n15086\n15087\n15088\n15089\n15090\n15091\n15092\n15093\n15094\n15095\n15096\n15097\n15098\n15099\n15100\n15101\n15102\n15103\n15104\n15105\n15106\n15107\n15108\n15109\n15110\n15111\n15112\n15113\n15114\n15115\n15116\n15117\n15118\n15119\n15120\n15121\n15122\n15123\n15124\n15125\n15126\n15127\n15128\n15129\n15130\n15131\n15132\n15133\n15134\n15135\n15136\n15137\n15138\n15139\n15140\n15141\n15142\n15143\n15144\n15145\n15146\n15147\n15148\n15149\n15150\n15151\n15152\n15153\n15154\n15155\n15156\n15157\n15158\n15159\n15160\n15161\n15162\n15163\n15164\n15165\n15166\n15167\n15168\n15169\n15170\n15171\n15172\n15173\n15174\n15175\n15176\n15177\n15178\n15179\n15180\n15181\n15182\n15183\n15184\n15185\n15186\n15187\n15188\n15189\n15190\n15191\n15192\n15193\n15194\n15195\n15196\n15197\n15198\n15199\n15200\n15201\n15202\n15203\n15204\n15205\n15206\n15207\n15208\n15209\n15210\n15211\n15212\n15213\n15214\n15215\n15216\n15217\n15218\n15219\n15220\n15221\n15222\n15223\n15224\n15225\n15226\n15227\n15228\n15229\n15230\n15231\n15232\n15233\n15234\n15235\n15236\n15237\n15238\n15239\n15240\n15241\n15242\n15243\n15244\n15245\n15246\n15247\n15248\n15249\n15250\n15251\n15252\n15253\n15254\n15255\n15256\n15257\n15258\n15259\n15260\n15261\n15262\n15263\n15264\n15265\n15266\n15267\n15268\n15269\n15270\n15271\n15272\n15273\n15274\n15275\n15276\n15277\n15278\n15279\n15280\n15281\n15282\n15283\n15284\n15285\n15286\n15287\n15288\n15289\n15290\n15291\n15292\n15293\n15294\n15295\n15296\n15297\n15298\n15299\n15300\n15301\n15302\n15303\n15304\n15305\n15306\n15307\n15308\n15309\n15310\n15311\n15312\n15313\n15314\n15315\n15316\n15317\n15318\n15319\n15320\n15321\n15322\n15323\n15324\n15325\n15326\n15327\n15328\n15329\n15330\n15331\n15332\n15333\n15334\n15335\n15336\n15337\n15338\n15339\n15340\n15341\n15342\n15343\n15344\n15345\n15346\n15347\n15348\n15349\n15350\n15351\n15352\n15353\n15354\n15355\n15356\n15357\n15358\n15359\n15360\n15361\n15362\n15363\n15364\n15365\n15366\n15367\n15368\n15369\n15370\n15371\n15372\n15373\n15374\n15375\n15376\n15377\n15378\n15379\n15380\n15381\n15382\n15383\n15384\n15385\n15386\n15387\n15388\n15389\n15390\n15391\n15392\n15393\n15394\n15395\n15396\n15397\n15398\n15399\n15400\n15401\n15402\n15403\n15404\n15405\n15406\n15407\n15408\n15409\n15410\n15411\n15412\n15413\n15414\n15415\n15416\n15417\n15418\n15419\n15420\n15421\n15422\n15423\n15424\n15425\n15426\n15427\n15428\n15429\n15430\n15431\n15432\n15433\n15434\n15435\n15436\n15437\n15438\n15439\n15440\n15441\n15442\n15443\n15444\n15445\n15446\n15447\n15448\n15449\n15450\n15451\n15452\n15453\n15454\n15455\n15456\n15457\n15458\n15459\n15460\n15461\n15462\n15463\n15464\n15465\n15466\n15467\n15468\n15469\n15470\n15471\n15472\n15473\n15474\n15475\n15476\n15477\n15478\n15479\n15480\n15481\n15482\n15483\n15484\n15485\n15486\n15487\n15488\n15489\n15490\n15491\n15492\n15493\n15494\n15495\n15496\n15497\n15498\n15499\n15500\n15501\n15502\n15503\n15504\n15505\n15506\n15507\n15508\n15509\n15510\n15511\n15512\n15513\n15514\n15515\n15516\n15517\n15518\n15519\n15520\n15521\n15522\n15523\n15524\n15525\n15526\n15527\n15528\n15529\n15530\n15531\n15532\n15533\n15534\n15535\n15536\n15537\n15538\n15539\n15540\n15541\n15542\n15543\n15544\n15545\n15546\n15547\n15548\n15549\n15550\n15551\n15552\n15553\n15554\n15555\n15556\n15557\n15558\n15559\n15560\n15561\n15562\n15563\n15564\n15565\n15566\n15567\n15568\n15569\n15570\n15571\n15572\n15573\n15574\n15575\n15576\n15577\n15578\n15579\n15580\n15581\n15582\n15583\n15584\n15585\n15586\n15587\n15588\n15589\n15590\n15591\n15592\n15593\n15594\n15595\n15596\n15597\n15598\n15599\n15600\n15601\n15602\n15603\n15604\n15605\n15606\n15607\n15608\n15609\n15610\n15611\n15612\n15613\n15614\n15615\n15616\n15617\n15618\n15619\n15620\n15621\n15622\n15623\n15624\n15625\n15626\n15627\n15628\n15629\n15630\n15631\n15632\n15633\n15634\n15635\n15636\n15637\n15638\n15639\n15640\n15641\n15642\n15643\n15644\n15645\n15646\n15647\n15648\n15649\n15650\n15651\n15652\n15653\n15654\n15655\n15656\n15657\n15658\n15659\n15660\n15661\n15662\n15663\n15664\n15665\n15666\n15667\n15668\n15669\n15670\n15671\n15672\n15673\n15674\n15675\n15676\n15677\n15678\n15679\n15680\n15681\n15682\n15683\n15684\n15685\n15686\n15687\n15688\n15689\n15690\n15691\n15692\n15693\n15694\n15695\n15696\n15697\n15698\n15699\n15700\n15701\n15702\n15703\n15704\n15705\n15706\n15707\n15708\n15709\n15710\n15711\n15712\n15713\n15714\n15715\n15716\n15717\n15718\n15719\n15720\n15721\n15722\n15723\n15724\n15725\n15726\n15727\n15728\n15729\n15730\n15731\n15732\n15733\n15734\n15735\n15736\n15737\n15738\n15739\n15740\n15741\n15742\n15743\n15744\n15745\n15746\n15747\n15748\n15749\n15750\n15751\n15752\n15753\n15754\n15755\n15756\n15757\n15758\n15759\n15760\n15761\n15762\n15763\n15764\n15765\n15766\n15767\n15768\n15769\n15770\n15771\n15772\n15773\n15774\n15775\n15776\n15777\n15778\n15779\n15780\n15781\n15782\n15783\n15784\n15785\n15786\n15787\n15788\n15789\n15790\n15791\n15792\n15793\n15794\n15795\n15796\n15797\n15798\n15799\n15800\n15801\n15802\n15803\n15804\n15805\n15806\n15807\n15808\n15809\n15810\n15811\n15812\n15813\n15814\n15815\n15816\n15817\n15818\n15819\n15820\n15821\n15822\n15823\n15824\n15825\n15826\n15827\n15828\n15829\n15830\n15831\n15832\n15833\n15834\n15835\n15836\n15837\n15838\n15839\n15840\n15841\n15842\n15843\n15844\n15845\n15846\n15847\n15848\n15849\n15850\n15851\n15852\n15853\n15854\n15855\n15856\n15857\n15858\n15859\n15860\n15861\n15862\n15863\n15864\n15865\n15866\n15867\n15868\n15869\n15870\n15871\n15872\n15873\n15874\n15875\n15876\n15877\n15878\n15879\n15880\n15881\n15882\n15883\n15884\n15885\n15886\n15887\n15888\n15889\n15890\n15891\n15892\n15893\n15894\n15895\n15896\n15897\n15898\n15899\n15900\n15901\n15902\n15903\n15904\n15905\n15906\n15907\n15908\n15909\n15910\n15911\n15912\n15913\n15914\n15915\n15916\n15917\n15918\n15919\n15920\n15921\n15922\n15923\n15924\n15925\n15926\n15927\n15928\n15929\n15930\n15931\n15932\n15933\n15934\n15935\n15936\n15937\n15938\n15939\n15940\n15941\n15942\n15943\n15944\n15945\n15946\n15947\n15948\n15949\n15950\n15951\n15952\n15953\n15954\n15955\n15956\n15957\n15958\n15959\n15960\n15961\n15962\n15963\n15964\n15965\n15966\n15967\n15968\n15969\n15970\n15971\n15972\n15973\n15974\n15975\n15976\n15977\n15978\n15979\n15980\n15981\n15982\n15983\n15984\n15985\n15986\n15987\n15988\n15989\n15990\n15991\n15992\n15993\n15994\n15995\n15996\n15997\n15998\n15999\n16000\n16001\n16002\n16003\n16004\n16005\n16006\n16007\n16008\n16009\n16010\n16011\n16012\n16013\n16014\n16015\n16016\n16017\n16018\n16019\n16020\n16021\n16022\n16023\n16024\n16025\n16026\n16027\n16028\n16029\n16030\n16031\n16032\n16033\n16034\n16035\n16036\n16037\n16038\n16039\n16040\n16041\n16042\n16043\n16044\n16045\n16046\n16047\n16048\n16049\n16050\n16051\n16052\n16053\n16054\n16055\n16056\n16057\n16058\n16059\n16060\n16061\n16062\n16063\n16064\n16065\n16066\n16067\n16068\n16069\n16070\n16071\n16072\n16073\n16074\n16075\n16076\n16077\n16078\n16079\n16080\n16081\n16082\n16083\n16084\n16085\n16086\n16087\n16088\n16089\n16090\n16091\n16092\n16093\n16094\n16095\n16096\n16097\n16098\n16099\n16100\n16101\n16102\n16103\n16104\n16105\n16106\n16107\n16108\n16109\n16110\n16111\n16112\n16113\n16114\n16115\n16116\n16117\n16118\n16119\n16120\n16121\n16122\n16123\n16124\n16125\n16126\n16127\n16128\n16129\n16130\n16131\n16132\n16133\n16134\n16135\n16136\n16137\n16138\n16139\n16140\n16141\n16142\n16143\n16144\n16145\n16146\n16147\n16148\n16149\n16150\n16151\n16152\n16153\n16154\n16155\n16156\n16157\n16158\n16159\n16160\n16161\n16162\n16163\n16164\n16165\n16166\n16167\n16168\n16169\n16170\n16171\n16172\n16173\n16174\n16175\n16176\n16177\n16178\n16179\n16180\n16181\n16182\n16183\n16184\n16185\n16186\n16187\n16188\n16189\n16190\n16191\n16192\n16193\n16194\n16195\n16196\n16197\n16198\n16199\n16200\n16201\n16202\n16203\n16204\n16205\n16206\n16207\n16208\n16209\n16210\n16211\n16212\n16213\n16214\n16215\n16216\n16217\n16218\n16219\n16220\n16221\n16222\n16223\n16224\n16225\n16226\n16227\n16228\n16229\n16230\n16231\n16232\n16233\n16234\n16235\n16236\n16237\n16238\n16239\n16240\n16241\n16242\n16243\n16244\n16245\n16246\n16247\n16248\n16249\n16250\n16251\n16252\n16253\n16254\n16255\n16256\n16257\n16258\n16259\n16260\n16261\n16262\n16263\n16264\n16265\n16266\n16267\n16268\n16269\n16270\n16271\n16272\n16273\n16274\n16275\n16276\n16277\n16278\n16279\n16280\n16281\n16282\n16283\n16284\n16285\n16286\n16287\n16288\n16289\n16290\n16291\n16292\n16293\n16294\n16295\n16296\n16297\n16298\n16299\n16300\n16301\n16302\n16303\n16304\n16305\n16306\n16307\n16308\n16309\n16310\n16311\n16312\n16313\n16314\n16315\n16316\n16317\n16318\n16319\n16320\n16321\n16322\n16323\n16324\n16325\n16326\n16327\n16328\n16329\n16330\n16331\n16332\n16333\n16334\n16335\n16336\n16337\n16338\n16339\n16340\n16341\n16342\n16343\n16344\n16345\n16346\n16347\n16348\n16349\n16350\n16351\n16352\n16353\n16354\n16355\n16356\n16357\n16358\n16359\n16360\n16361\n16362\n16363\n16364\n16365\n16366\n16367\n16368\n16369\n16370\n16371\n16372\n16373\n16374\n16375\n16376\n16377\n16378\n16379\n16380\n16381\n16382\n16383\n16384\n16385\n16386\n16387\n16388\n16389\n16390\n16391\n16392\n16393\n16394\n16395\n16396\n16397\n16398\n16399\n16400\n16401\n16402\n16403\n16404\n16405\n16406\n16407\n16408\n16409\n16410\n16411\n16412\n16413\n16414\n16415\n16416\n16417\n16418\n16419\n16420\n16421\n16422\n16423\n16424\n16425\n16426\n16427\n16428\n16429\n16430\n16431\n16432\n16433\n16434\n16435\n16436\n16437\n16438\n16439\n16440\n16441\n16442\n16443\n16444\n16445\n16446\n16447\n16448\n16449\n16450\n16451\n16452\n16453\n16454\n16455\n16456\n16457\n16458\n16459\n16460\n16461\n16462\n16463\n16464\n16465\n16466\n16467\n16468\n16469\n16470\n16471\n16472\n16473\n16474\n16475\n16476\n16477\n16478\n16479\n16480\n16481\n16482\n16483\n16484\n16485\n16486\n16487\n16488\n16489\n16490\n16491\n16492\n16493\n16494\n16495\n16496\n16497\n16498\n16499\n16500\n16501\n16502\n16503\n16504\n16505\n16506\n16507\n16508\n16509\n16510\n16511\n16512\n16513\n16514\n16515\n16516\n16517\n16518\n16519\n16520\n16521\n16522\n16523\n16524\n16525\n16526\n16527\n16528\n16529\n16530\n16531\n16532\n16533\n16534\n16535\n16536\n16537\n16538\n16539\n16540\n16541\n16542\n16543\n16544\n16545\n16546\n16547\n16548\n16549\n16550\n16551\n16552\n16553\n16554\n16555\n16556\n16557\n16558\n16559\n16560\n16561\n16562\n16563\n16564\n16565\n16566\n16567\n16568\n16569\n16570\n16571\n16572\n16573\n16574\n16575\n16576\n16577\n16578\n16579\n16580\n16581\n16582\n16583\n16584\n16585\n16586\n16587\n16588\n16589\n16590\n16591\n16592\n16593\n16594\n16595\n16596\n16597\n16598\n16599\n16600\n16601\n16602\n16603\n16604\n16605\n16606\n16607\n16608\n16609\n16610\n16611\n16612\n16613\n16614\n16615\n16616\n16617\n16618\n16619\n16620\n16621\n16622\n16623\n16624\n16625\n16626\n16627\n16628\n16629\n16630\n16631\n16632\n16633\n16634\n16635\n16636\n16637\n16638\n16639\n16640\n16641\n16642\n16643\n16644\n16645\n16646\n16647\n16648\n16649\n16650\n16651\n16652\n16653\n16654\n16655\n16656\n16657\n16658\n16659\n16660\n16661\n16662\n16663\n16664\n16665\n16666\n16667\n16668\n16669\n16670\n16671\n16672\n16673\n16674\n16675\n16676\n16677\n16678\n16679\n16680\n16681\n16682\n16683\n16684\n16685\n16686\n16687\n16688\n16689\n16690\n16691\n16692\n16693\n16694\n16695\n16696\n16697\n16698\n16699\n16700\n16701\n16702\n16703\n16704\n16705\n16706\n16707\n16708\n16709\n16710\n16711\n16712\n16713\n16714\n16715\n16716\n16717\n16718\n16719\n16720\n16721\n16722\n16723\n16724\n16725\n16726\n16727\n16728\n16729\n16730\n16731\n16732\n16733\n16734\n16735\n16736\n16737\n16738\n16739\n16740\n16741\n16742\n16743\n16744\n16745\n16746\n16747\n16748\n16749\n16750\n16751\n16752\n16753\n16754\n16755\n16756\n16757\n16758\n16759\n16760\n16761\n16762\n16763\n16764\n16765\n16766\n16767\n16768\n16769\n16770\n16771\n16772\n16773\n16774\n16775\n16776\n16777\n16778\n16779\n16780\n16781\n16782\n16783\n16784\n16785\n16786\n16787\n16788\n16789\n16790\n16791\n16792\n16793\n16794\n16795\n16796\n16797\n16798\n16799\n16800\n16801\n16802\n16803\n16804\n16805\n16806\n16807\n16808\n16809\n16810\n16811\n16812\n16813\n16814\n16815\n16816\n16817\n16818\n16819\n16820\n16821\n16822\n16823\n16824\n16825\n16826\n16827\n16828\n16829\n16830\n16831\n16832\n16833\n16834\n16835\n16836\n16837\n16838\n16839\n16840\n16841\n16842\n16843\n16844\n16845\n16846\n16847\n16848\n16849\n16850\n16851\n16852\n16853\n16854\n16855\n16856\n16857\n16858\n16859\n16860\n16861\n16862\n16863\n16864\n16865\n16866\n16867\n16868\n16869\n16870\n16871\n16872\n16873\n16874\n16875\n16876\n16877\n16878\n16879\n16880\n16881\n16882\n16883\n16884\n16885\n16886\n16887\n16888\n16889\n16890\n16891\n16892\n16893\n16894\n16895\n16896\n16897\n16898\n16899\n16900\n16901\n16902\n16903\n16904\n16905\n16906\n16907\n16908\n16909\n16910\n16911\n16912\n16913\n16914\n16915\n16916\n16917\n16918\n16919\n16920\n16921\n16922\n16923\n16924\n16925\n16926\n16927\n16928\n16929\n16930\n16931\n16932\n16933\n16934\n16935\n16936\n16937\n16938\n16939\n16940\n16941\n16942\n16943\n16944\n16945\n16946\n16947\n16948\n16949\n16950\n16951\n16952\n16953\n16954\n16955\n16956\n16957\n16958\n16959\n16960\n16961\n16962\n16963\n16964\n16965\n16966\n16967\n16968\n16969\n16970\n16971\n16972\n16973\n16974\n16975\n16976\n16977\n16978\n16979\n16980\n16981\n16982\n16983\n16984\n16985\n16986\n16987\n16988\n16989\n16990\n16991\n16992\n16993\n16994\n16995\n16996\n16997\n16998\n16999\n17000\n17001\n17002\n17003\n17004\n17005\n17006\n17007\n17008\n17009\n17010\n17011\n17012\n17013\n17014\n17015\n17016\n17017\n17018\n17019\n17020\n17021\n17022\n17023\n17024\n17025\n17026\n17027\n17028\n17029\n17030\n17031\n17032\n17033\n17034\n17035\n17036\n17037\n17038\n17039\n17040\n17041\n17042\n17043\n17044\n17045\n17046\n17047\n17048\n17049\n17050\n17051\n17052\n17053\n17054\n17055\n17056\n17057\n17058\n17059\n17060\n17061\n17062\n17063\n17064\n17065\n17066\n17067\n17068\n17069\n17070\n17071\n17072\n17073\n17074\n17075\n17076\n17077\n17078\n17079\n17080\n17081\n17082\n17083\n17084\n17085\n17086\n17087\n17088\n17089\n17090\n17091\n17092\n17093\n17094\n17095\n17096\n17097\n17098\n17099\n17100\n17101\n17102\n17103\n17104\n17105\n17106\n17107\n17108\n17109\n17110\n17111\n17112\n17113\n17114\n17115\n17116\n17117\n17118\n17119\n17120\n17121\n17122\n17123\n17124\n17125\n17126\n17127\n17128\n17129\n17130\n17131\n17132\n17133\n17134\n17135\n17136\n17137\n17138\n17139\n17140\n17141\n17142\n17143\n17144\n17145\n17146\n17147\n17148\n17149\n17150\n17151\n17152\n17153\n17154\n17155\n17156\n17157\n17158\n17159\n17160\n17161\n17162\n17163\n17164\n17165\n17166\n17167\n17168\n17169\n17170\n17171\n17172\n17173\n17174\n17175\n17176\n17177\n17178\n17179\n17180\n17181\n17182\n17183\n17184\n17185\n17186\n17187\n17188\n17189\n17190\n17191\n17192\n17193\n17194\n17195\n17196\n17197\n17198\n17199\n17200\n17201\n17202\n17203\n17204\n17205\n17206\n17207\n17208\n17209\n17210\n17211\n17212\n17213\n17214\n17215\n17216\n17217\n17218\n17219\n17220\n17221\n17222\n17223\n17224\n17225\n17226\n17227\n17228\n17229\n17230\n17231\n17232\n17233\n17234\n17235\n17236\n17237\n17238\n17239\n17240\n17241\n17242\n17243\n17244\n17245\n17246\n17247\n17248\n17249\n17250\n17251\n17252\n17253\n17254\n17255\n17256\n17257\n17258\n17259\n17260\n17261\n17262\n17263\n17264\n17265\n17266\n17267\n17268\n17269\n17270\n17271\n17272\n17273\n17274\n17275\n17276\n17277\n17278\n17279\n17280\n17281\n17282\n17283\n17284\n17285\n17286\n17287\n17288\n17289\n17290\n17291\n17292\n17293\n17294\n17295\n17296\n17297\n17298\n17299\n17300\n17301\n17302\n17303\n17304\n17305\n17306\n17307\n17308\n17309\n17310\n17311\n17312\n17313\n17314\n17315\n17316\n17317\n17318\n17319\n17320\n17321\n17322\n17323\n17324\n17325\n17326\n17327\n17328\n17329\n17330\n17331\n17332\n17333\n17334\n17335\n17336\n17337\n17338\n17339\n17340\n17341\n17342\n17343\n17344\n17345\n17346\n17347\n17348\n17349\n17350\n17351\n17352\n17353\n17354\n17355\n17356\n17357\n17358\n17359\n17360\n17361\n17362\n17363\n17364\n17365\n17366\n17367\n17368\n17369\n17370\n17371\n17372\n17373\n17374\n17375\n17376\n17377\n17378\n17379\n17380\n17381\n17382\n17383\n17384\n17385\n17386\n17387\n17388\n17389\n17390\n17391\n17392\n17393\n17394\n17395\n17396\n17397\n17398\n17399\n17400\n17401\n17402\n17403\n17404\n17405\n17406\n17407\n17408\n17409\n17410\n17411\n17412\n17413\n17414\n17415\n17416\n17417\n17418\n17419\n17420\n17421\n17422\n17423\n17424\n17425\n17426\n17427\n17428\n17429\n17430\n17431\n17432\n17433\n17434\n17435\n17436\n17437\n17438\n17439\n17440\n17441\n17442\n17443\n17444\n17445\n17446\n17447\n17448\n17449\n17450\n17451\n17452\n17453\n17454\n17455\n17456\n17457\n17458\n17459\n17460\n17461\n17462\n17463\n17464\n17465\n17466\n17467\n17468\n17469\n17470\n17471\n17472\n17473\n17474\n17475\n17476\n17477\n17478\n17479\n17480\n17481\n17482\n17483\n17484\n17485\n17486\n17487\n17488\n17489\n17490\n17491\n17492\n17493\n17494\n17495\n17496\n17497\n17498\n17499\n17500\n17501\n17502\n17503\n17504\n17505\n17506\n17507\n17508\n17509\n17510\n17511\n17512\n17513\n17514\n17515\n17516\n17517\n17518\n17519\n17520\n17521\n17522\n17523\n17524\n17525\n17526\n17527\n17528\n17529\n17530\n17531\n17532\n17533\n17534\n17535\n17536\n17537\n17538\n17539\n17540\n17541\n17542\n17543\n17544\n17545\n17546\n17547\n17548\n17549\n17550\n17551\n17552\n17553\n17554\n17555\n17556\n17557\n17558\n17559\n17560\n17561\n17562\n17563\n17564\n17565\n17566\n17567\n17568\n17569\n17570\n17571\n17572\n17573\n17574\n17575\n17576\n17577\n17578\n17579\n17580\n17581\n17582\n17583\n17584\n17585\n17586\n17587\n17588\n17589\n17590\n17591\n17592\n17593\n17594\n17595\n17596\n17597\n17598\n17599\n17600\n17601\n17602\n17603\n17604\n17605\n17606\n17607\n17608\n17609\n17610\n17611\n17612\n17613\n17614\n17615\n17616\n17617\n17618\n17619\n17620\n17621\n17622\n17623\n17624\n17625\n17626\n17627\n17628\n17629\n17630\n17631\n17632\n17633\n17634\n17635\n17636\n17637\n17638\n17639\n17640\n17641\n17642\n17643\n17644\n17645\n17646\n17647\n17648\n17649\n17650\n17651\n17652\n17653\n17654\n17655\n17656\n17657\n17658\n17659\n17660\n17661\n17662\n17663\n17664\n17665\n17666\n17667\n17668\n17669\n17670\n17671\n17672\n17673\n17674\n17675\n17676\n17677\n17678\n17679\n17680\n17681\n17682\n17683\n17684\n17685\n17686\n17687\n17688\n17689\n17690\n17691\n17692\n17693\n17694\n17695\n17696\n17697\n17698\n17699\n17700\n17701\n17702\n17703\n17704\n17705\n17706\n17707\n17708\n17709\n17710\n17711\n17712\n17713\n17714\n17715\n17716\n17717\n17718\n17719\n17720\n17721\n17722\n17723\n17724\n17725\n17726\n17727\n17728\n17729\n17730\n17731\n17732\n17733\n17734\n17735\n17736\n17737\n17738\n17739\n17740\n17741\n17742\n17743\n17744\n17745\n17746\n17747\n17748\n17749\n17750\n17751\n17752\n17753\n17754\n17755\n17756\n17757\n17758\n17759\n17760\n17761\n17762\n17763\n17764\n17765\n17766\n17767\n17768\n17769\n17770\n17771\n17772\n17773\n17774\n17775\n17776\n17777\n17778\n17779\n17780\n17781\n17782\n17783\n17784\n17785\n17786\n17787\n17788\n17789\n17790\n17791\n17792\n17793\n17794\n17795\n17796\n17797\n17798\n17799\n17800\n17801\n17802\n17803\n17804\n17805\n17806\n17807\n17808\n17809\n17810\n17811\n17812\n17813\n17814\n17815\n17816\n17817\n17818\n17819\n17820\n17821\n17822\n17823\n17824\n17825\n17826\n17827\n17828\n17829\n17830\n17831\n17832\n17833\n17834\n17835\n17836\n17837\n17838\n17839\n17840\n17841\n17842\n17843\n17844\n17845\n17846\n17847\n17848\n17849\n17850\n17851\n17852\n17853\n17854\n17855\n17856\n17857\n17858\n17859\n17860\n17861\n17862\n17863\n17864\n17865\n17866\n17867\n17868\n17869\n17870\n17871\n17872\n17873\n17874\n17875\n17876\n17877\n17878\n17879\n17880\n17881\n17882\n17883\n17884\n17885\n17886\n17887\n17888\n17889\n17890\n17891\n17892\n17893\n17894\n17895\n17896\n17897\n17898\n17899\n17900\n17901\n17902\n17903\n17904\n17905\n17906\n17907\n17908\n17909\n17910\n17911\n17912\n17913\n17914\n17915\n17916\n17917\n17918\n17919\n17920\n17921\n17922\n17923\n17924\n17925\n17926\n17927\n17928\n17929\n17930\n17931\n17932\n17933\n17934\n17935\n17936\n17937\n17938\n17939\n17940\n17941\n17942\n17943\n17944\n17945\n17946\n17947\n17948\n17949\n17950\n17951\n17952\n17953\n17954\n17955\n17956\n17957\n17958\n17959\n17960\n17961\n17962\n17963\n17964\n17965\n17966\n17967\n17968\n17969\n17970\n17971\n17972\n17973\n17974\n17975\n17976\n17977\n17978\n17979\n17980\n17981\n17982\n17983\n17984\n17985\n17986\n17987\n17988\n17989\n17990\n17991\n17992\n17993\n17994\n17995\n17996\n17997\n17998\n17999\n18000\n18001\n18002\n18003\n18004\n18005\n18006\n18007\n18008\n18009\n18010\n18011\n18012\n18013\n18014\n18015\n18016\n18017\n18018\n18019\n18020\n18021\n18022\n18023\n18024\n18025\n18026\n18027\n18028\n18029\n18030\n18031\n18032\n18033\n18034\n18035\n18036\n18037\n18038\n18039\n18040\n18041\n18042\n18043\n18044\n18045\n18046\n18047\n18048\n18049\n18050\n18051\n18052\n18053\n18054\n18055\n18056\n18057\n18058\n18059\n18060\n18061\n18062\n18063\n18064\n18065\n18066\n18067\n18068\n18069\n18070\n18071\n18072\n18073\n18074\n18075\n18076\n18077\n18078\n18079\n18080\n18081\n18082\n18083\n18084\n18085\n18086\n18087\n18088\n18089\n18090\n18091\n18092\n18093\n18094\n18095\n18096\n18097\n18098\n18099\n18100\n18101\n18102\n18103\n18104\n18105\n18106\n18107\n18108\n18109\n18110\n18111\n18112\n18113\n18114\n18115\n18116\n18117\n18118\n18119\n18120\n18121\n18122\n18123\n18124\n18125\n18126\n18127\n18128\n18129\n18130\n18131\n18132\n18133\n18134\n18135\n18136\n18137\n18138\n18139\n18140\n18141\n18142\n18143\n18144\n18145\n18146\n18147\n18148\n18149\n18150\n18151\n18152\n18153\n18154\n18155\n18156\n18157\n18158\n18159\n18160\n18161\n18162\n18163\n18164\n18165\n18166\n18167\n18168\n18169\n18170\n18171\n18172\n18173\n18174\n18175\n18176\n18177\n18178\n18179\n18180\n18181\n18182\n18183\n18184\n18185\n18186\n18187\n18188\n18189\n18190\n18191\n18192\n18193\n18194\n18195\n18196\n18197\n18198\n18199\n18200\n18201\n18202\n18203\n18204\n18205\n18206\n18207\n18208\n18209\n18210\n18211\n18212\n18213\n18214\n18215\n18216\n18217\n18218\n18219\n18220\n18221\n18222\n18223\n18224\n18225\n18226\n18227\n18228\n18229\n18230\n18231\n18232\n18233\n18234\n18235\n18236\n18237\n18238\n18239\n18240\n18241\n18242\n18243\n18244\n18245\n18246\n18247\n18248\n18249\n18250\n18251\n18252\n18253\n18254\n18255\n18256\n18257\n18258\n18259\n18260\n18261\n18262\n18263\n18264\n18265\n18266\n18267\n18268\n18269\n18270\n18271\n18272\n18273\n18274\n18275\n18276\n18277\n18278\n18279\n18280\n18281\n18282\n18283\n18284\n18285\n18286\n18287\n18288\n18289\n18290\n18291\n18292\n18293\n18294\n18295\n18296\n18297\n18298\n18299\n18300\n18301\n18302\n18303\n18304\n18305\n18306\n18307\n18308\n18309\n18310\n18311\n18312\n18313\n18314\n18315\n18316\n18317\n18318\n18319\n18320\n18321\n18322\n18323\n18324\n18325\n18326\n18327\n18328\n18329\n18330\n18331\n18332\n18333\n18334\n18335\n18336\n18337\n18338\n18339\n18340\n18341\n18342\n18343\n18344\n18345\n18346\n18347\n18348\n18349\n18350\n18351\n18352\n18353\n18354\n18355\n18356\n18357\n18358\n18359\n18360\n18361\n18362\n18363\n18364\n18365\n18366\n18367\n18368\n18369\n18370\n18371\n18372\n18373\n18374\n18375\n18376\n18377\n18378\n18379\n18380\n18381\n18382\n18383\n18384\n18385\n18386\n18387\n18388\n18389\n18390\n18391\n18392\n18393\n18394\n18395\n18396\n18397\n18398\n18399\n18400\n18401\n18402\n18403\n18404\n18405\n18406\n18407\n18408\n18409\n18410\n18411\n18412\n18413\n18414\n18415\n18416\n18417\n18418\n18419\n18420\n18421\n18422\n18423\n18424\n18425\n18426\n18427\n18428\n18429\n18430\n18431\n18432\n18433\n18434\n18435\n18436\n18437\n18438\n18439\n18440\n18441\n18442\n18443\n18444\n18445\n18446\n18447\n18448\n18449\n18450\n18451\n18452\n18453\n18454\n18455\n18456\n18457\n18458\n18459\n18460\n18461\n18462\n18463\n18464\n18465\n18466\n18467\n18468\n18469\n18470\n18471\n18472\n18473\n18474\n18475\n18476\n18477\n18478\n18479\n18480\n18481\n18482\n18483\n18484\n18485\n18486\n18487\n18488\n18489\n18490\n18491\n18492\n18493\n18494\n18495\n18496\n18497\n18498\n18499\n18500\n18501\n18502\n18503\n18504\n18505\n18506\n18507\n18508\n18509\n18510\n18511\n18512\n18513\n18514\n18515\n18516\n18517\n18518\n18519\n18520\n18521\n18522\n18523\n18524\n18525\n18526\n18527\n18528\n18529\n18530\n18531\n18532\n18533\n18534\n18535\n18536\n18537\n18538\n18539\n18540\n18541\n18542\n18543\n18544\n18545\n18546\n18547\n18548\n18549\n18550\n18551\n18552\n18553\n18554\n18555\n18556\n18557\n18558\n18559\n18560\n18561\n18562\n18563\n18564\n18565\n18566\n18567\n18568\n18569\n18570\n18571\n18572\n18573\n18574\n18575\n18576\n18577\n18578\n18579\n18580\n18581\n18582\n18583\n18584\n18585\n18586\n18587\n18588\n18589\n18590\n18591\n18592\n18593\n18594\n18595\n18596\n18597\n18598\n18599\n18600\n18601\n18602\n18603\n18604\n18605\n18606\n18607\n18608\n18609\n18610\n18611\n18612\n18613\n18614\n18615\n18616\n18617\n18618\n18619\n18620\n18621\n18622\n18623\n18624\n18625\n18626\n18627\n18628\n18629\n18630\n18631\n18632\n18633\n18634\n18635\n18636\n18637\n18638\n18639\n18640\n18641\n18642\n18643\n18644\n18645\n18646\n18647\n18648\n18649\n18650\n18651\n18652\n18653\n18654\n18655\n18656\n18657\n18658\n18659\n18660\n18661\n18662\n18663\n18664\n18665\n18666\n18667\n18668\n18669\n18670\n18671\n18672\n18673\n18674\n18675\n18676\n18677\n18678\n18679\n18680\n18681\n18682\n18683\n18684\n18685\n18686\n18687\n18688\n18689\n18690\n18691\n18692\n18693\n18694\n18695\n18696\n18697\n18698\n18699\n18700\n18701\n18702\n18703\n18704\n18705\n18706\n18707\n18708\n18709\n18710\n18711\n18712\n18713\n18714\n18715\n18716\n18717\n18718\n18719\n18720\n18721\n18722\n18723\n18724\n18725\n18726\n18727\n18728\n18729\n18730\n18731\n18732\n18733\n18734\n18735\n18736\n18737\n18738\n18739\n18740\n18741\n18742\n18743\n18744\n18745\n18746\n18747\n18748\n18749\n18750\n18751\n18752\n18753\n18754\n18755\n18756\n18757\n18758\n18759\n18760\n18761\n18762\n18763\n18764\n18765\n18766\n18767\n18768\n18769\n18770\n18771\n18772\n18773\n18774\n18775\n18776\n18777\n18778\n18779\n18780\n18781\n18782\n18783\n18784\n18785\n18786\n18787\n18788\n18789\n18790\n18791\n18792\n18793\n18794\n18795\n18796\n18797\n18798\n18799\n18800\n18801\n18802\n18803\n18804\n18805\n18806\n18807\n18808\n18809\n18810\n18811\n18812\n18813\n18814\n18815\n18816\n18817\n18818\n18819\n18820\n18821\n18822\n18823\n18824\n18825\n18826\n18827\n18828\n18829\n18830\n18831\n18832\n18833\n18834\n18835\n18836\n18837\n18838\n18839\n18840\n18841\n18842\n18843\n18844\n18845\n18846\n18847\n18848\n18849\n18850\n18851\n18852\n18853\n18854\n18855\n18856\n18857\n18858\n18859\n18860\n18861\n18862\n18863\n18864\n18865\n18866\n18867\n18868\n18869\n18870\n18871\n18872\n18873\n18874\n18875\n18876\n18877\n18878\n18879\n18880\n18881\n18882\n18883\n18884\n18885\n18886\n18887\n18888\n18889\n18890\n18891\n18892\n18893\n18894\n18895\n18896\n18897\n18898\n18899\n18900\n18901\n18902\n18903\n18904\n18905\n18906\n18907\n18908\n18909\n18910\n18911\n18912\n18913\n18914\n18915\n18916\n18917\n18918\n18919\n18920\n18921\n18922\n18923\n18924\n18925\n18926\n18927\n18928\n18929\n18930\n18931\n18932\n18933\n18934\n18935\n18936\n18937\n18938\n18939\n18940\n18941\n18942\n18943\n18944\n18945\n18946\n18947\n18948\n18949\n18950\n18951\n18952\n18953\n18954\n18955\n18956\n18957\n18958\n18959\n18960\n18961\n18962\n18963\n18964\n18965\n18966\n18967\n18968\n18969\n18970\n18971\n18972\n18973\n18974\n18975\n18976\n18977\n18978\n18979\n18980\n18981\n18982\n18983\n18984\n18985\n18986\n18987\n18988\n18989\n18990\n18991\n18992\n18993\n18994\n18995\n18996\n18997\n18998\n18999\n19000\n19001\n19002\n19003\n19004\n19005\n19006\n19007\n19008\n19009\n19010\n19011\n19012\n19013\n19014\n19015\n19016\n19017\n19018\n19019\n19020\n19021\n19022\n19023\n19024\n19025\n19026\n19027\n19028\n19029\n19030\n19031\n19032\n19033\n19034\n19035\n19036\n19037\n19038\n19039\n19040\n19041\n19042\n19043\n19044\n19045\n19046\n19047\n19048\n19049\n19050\n19051\n19052\n19053\n19054\n19055\n19056\n19057\n19058\n19059\n19060\n19061\n19062\n19063\n19064\n19065\n19066\n19067\n19068\n19069\n19070\n19071\n19072\n19073\n19074\n19075\n19076\n19077\n19078\n19079\n19080\n19081\n19082\n19083\n19084\n19085\n19086\n19087\n19088\n19089\n19090\n19091\n19092\n19093\n19094\n19095\n19096\n19097\n19098\n19099\n19100\n19101\n19102\n19103\n19104\n19105\n19106\n19107\n19108\n19109\n19110\n19111\n19112\n19113\n19114\n19115\n19116\n19117\n19118\n19119\n19120\n19121\n19122\n19123\n19124\n19125\n19126\n19127\n19128\n19129\n19130\n19131\n19132\n19133\n19134\n19135\n19136\n19137\n19138\n19139\n19140\n19141\n19142\n19143\n19144\n19145\n19146\n19147\n19148\n19149\n19150\n19151\n19152\n19153\n19154\n19155\n19156\n19157\n19158\n19159\n19160\n19161\n19162\n19163\n19164\n19165\n19166\n19167\n19168\n19169\n19170\n19171\n19172\n19173\n19174\n19175\n19176\n19177\n19178\n19179\n19180\n19181\n19182\n19183\n19184\n19185\n19186\n19187\n19188\n19189\n19190\n19191\n19192\n19193\n19194\n19195\n19196\n19197\n19198\n19199\n19200\n19201\n19202\n19203\n19204\n19205\n19206\n19207\n19208\n19209\n19210\n19211\n19212\n19213\n19214\n19215\n19216\n19217\n19218\n19219\n19220\n19221\n19222\n19223\n19224\n19225\n19226\n19227\n19228\n19229\n19230\n19231\n19232\n19233\n19234\n19235\n19236\n19237\n19238\n19239\n19240\n19241\n19242\n19243\n19244\n19245\n19246\n19247\n19248\n19249\n19250\n19251\n19252\n19253\n19254\n19255\n19256\n19257\n19258\n19259\n19260\n19261\n19262\n19263\n19264\n19265\n19266\n19267\n19268\n19269\n19270\n19271\n19272\n19273\n19274\n19275\n19276\n19277\n19278\n19279\n19280\n19281\n19282\n19283\n19284\n19285\n19286\n19287\n19288\n19289\n19290\n19291\n19292\n19293\n19294\n19295\n19296\n19297\n19298\n19299\n19300\n19301\n19302\n19303\n19304\n19305\n19306\n19307\n19308\n19309\n19310\n19311\n19312\n19313\n19314\n19315\n19316\n19317\n19318\n19319\n19320\n19321\n19322\n19323\n19324\n19325\n19326\n19327\n19328\n19329\n19330\n19331\n19332\n19333\n19334\n19335\n19336\n19337\n19338\n19339\n19340\n19341\n19342\n19343\n19344\n19345\n19346\n19347\n19348\n19349\n19350\n19351\n19352\n19353\n19354\n19355\n19356\n19357\n19358\n19359\n19360\n19361\n19362\n19363\n19364\n19365\n19366\n19367\n19368\n19369\n19370\n19371\n19372\n19373\n19374\n19375\n19376\n19377\n19378\n19379\n19380\n19381\n19382\n19383\n19384\n19385\n19386\n19387\n19388\n19389\n19390\n19391\n19392\n19393\n19394\n19395\n19396\n19397\n19398\n19399\n19400\n19401\n19402\n19403\n19404\n19405\n19406\n19407\n19408\n19409\n19410\n19411\n19412\n19413\n19414\n19415\n19416\n19417\n19418\n19419\n19420\n19421\n19422\n19423\n19424\n19425\n19426\n19427\n19428\n19429\n19430\n19431\n19432\n19433\n19434\n19435\n19436\n19437\n19438\n19439\n19440\n19441\n19442\n19443\n19444\n19445\n19446\n19447\n19448\n19449\n19450\n19451\n19452\n19453\n19454\n19455\n19456\n19457\n19458\n19459\n19460\n19461\n19462\n19463\n19464\n19465\n19466\n19467\n19468\n19469\n19470\n19471\n19472\n19473\n19474\n19475\n19476\n19477\n19478\n19479\n19480\n19481\n19482\n19483\n19484\n19485\n19486\n19487\n19488\n19489\n19490\n19491\n19492\n19493\n19494\n19495\n19496\n19497\n19498\n19499\n19500\n19501\n19502\n19503\n19504\n19505\n19506\n19507\n19508\n19509\n19510\n19511\n19512\n19513\n19514\n19515\n19516\n19517\n19518\n19519\n19520\n19521\n19522\n19523\n19524\n19525\n19526\n19527\n19528\n19529\n19530\n19531\n19532\n19533\n19534\n19535\n19536\n19537\n19538\n19539\n19540\n19541\n19542\n19543\n19544\n19545\n19546\n19547\n19548\n19549\n19550\n19551\n19552\n19553\n19554\n19555\n19556\n19557\n19558\n19559\n19560\n19561\n19562\n19563\n19564\n19565\n19566\n19567\n19568\n19569\n19570\n19571\n19572\n19573\n19574\n19575\n19576\n19577\n19578\n19579\n19580\n19581\n19582\n19583\n19584\n19585\n19586\n19587\n19588\n19589\n19590\n19591\n19592\n19593\n19594\n19595\n19596\n19597\n19598\n19599\n19600\n19601\n19602\n19603\n19604\n19605\n19606\n19607\n19608\n19609\n19610\n19611\n19612\n19613\n19614\n19615\n19616\n19617\n19618\n19619\n19620\n19621\n19622\n19623\n19624\n19625\n19626\n19627\n19628\n19629\n19630\n19631\n19632\n19633\n19634\n19635\n19636\n19637\n19638\n19639\n19640\n19641\n19642\n19643\n19644\n19645\n19646\n19647\n19648\n19649\n19650\n19651\n19652\n19653\n19654\n19655\n19656\n19657\n19658\n19659\n19660\n19661\n19662\n19663\n19664\n19665\n19666\n19667\n19668\n19669\n19670\n19671\n19672\n19673\n19674\n19675\n19676\n19677\n19678\n19679\n19680\n19681\n19682\n19683\n19684\n19685\n19686\n19687\n19688\n19689\n19690\n19691\n19692\n19693\n19694\n19695\n19696\n19697\n19698\n19699\n19700\n19701\n19702\n19703\n19704\n19705\n19706\n19707\n19708\n19709\n19710\n19711\n19712\n19713\n19714\n19715\n19716\n19717\n19718\n19719\n19720\n19721\n19722\n19723\n19724\n19725\n19726\n19727\n19728\n19729\n19730\n19731\n19732\n19733\n19734\n19735\n19736\n19737\n19738\n19739\n19740\n19741\n19742\n19743\n19744\n19745\n19746\n19747\n19748\n19749\n19750\n19751\n19752\n19753\n19754\n19755\n19756\n19757\n19758\n19759\n19760\n19761\n19762\n19763\n19764\n19765\n19766\n19767\n19768\n19769\n19770\n19771\n19772\n19773\n19774\n19775\n19776\n19777\n19778\n19779\n19780\n19781\n19782\n19783\n19784\n19785\n19786\n19787\n19788\n19789\n19790\n19791\n19792\n19793\n19794\n19795\n19796\n19797\n19798\n19799\n19800\n19801\n19802\n19803\n19804\n19805\n19806\n19807\n19808\n19809\n19810\n19811\n19812\n19813\n19814\n19815\n19816\n19817\n19818\n19819\n19820\n19821\n19822\n19823\n19824\n19825\n19826\n19827\n19828\n19829\n19830\n19831\n19832\n19833\n19834\n19835\n19836\n19837\n19838\n19839\n19840\n19841\n19842\n19843\n19844\n19845\n19846\n19847\n19848\n19849\n19850\n19851\n19852\n19853\n19854\n19855\n19856\n19857\n19858\n19859\n19860\n19861\n19862\n19863\n19864\n19865\n19866\n19867\n19868\n19869\n19870\n19871\n19872\n19873\n19874\n19875\n19876\n19877\n19878\n19879\n19880\n19881\n19882\n19883\n19884\n19885\n19886\n19887\n19888\n19889\n19890\n19891\n19892\n19893\n19894\n19895\n19896\n19897\n19898\n19899\n19900\n19901\n19902\n19903\n19904\n19905\n19906\n19907\n19908\n19909\n19910\n19911\n19912\n19913\n19914\n19915\n19916\n19917\n19918\n19919\n19920\n19921\n19922\n19923\n19924\n19925\n19926\n19927\n19928\n19929\n19930\n19931\n19932\n19933\n19934\n19935\n19936\n19937\n19938\n19939\n19940\n19941\n19942\n19943\n19944\n19945\n19946\n19947\n19948\n19949\n19950\n19951\n19952\n19953\n19954\n19955\n19956\n19957\n19958\n19959\n19960\n19961\n19962\n19963\n19964\n19965\n19966\n19967\n19968\n19969\n19970\n19971\n19972\n19973\n19974\n19975\n19976\n19977\n19978\n19979\n19980\n19981\n19982\n19983\n19984\n19985\n19986\n19987\n19988\n19989\n19990\n19991\n19992\n19993\n19994\n19995\n19996\n19997\n19998\n19999\n20000\n20001\n20002\n20003\n20004\n20005\n20006\n20007\n20008\n20009\n20010\n20011\n20012\n20013\n20014\n20015\n20016\n20017\n20018\n20019\n20020\n20021\n20022\n20023\n20024\n20025\n20026\n20027\n20028\n20029\n20030\n20031\n20032\n20033\n20034\n20035\n20036\n20037\n20038\n20039\n20040\n20041\n20042\n20043\n20044\n20045\n20046\n20047\n20048\n20049\n20050\n20051\n20052\n20053\n20054\n20055\n20056\n20057\n20058\n20059\n20060\n20061\n20062\n20063\n20064\n20065\n20066\n20067\n20068\n20069\n20070\n20071\n20072\n20073\n20074\n20075\n20076\n20077\n20078\n20079\n20080\n20081\n20082\n20083\n20084\n20085\n20086\n20087\n20088\n20089\n20090\n20091\n20092\n20093\n20094\n20095\n20096\n20097\n20098\n20099\n20100\n20101\n20102\n20103\n20104\n20105\n20106\n20107\n20108\n20109\n20110\n20111\n20112\n20113\n20114\n20115\n20116\n20117\n20118\n20119\n20120\n20121\n20122\n20123\n20124\n20125\n20126\n20127\n20128\n20129\n20130\n20131\n20132\n20133\n20134\n20135\n20136\n20137\n20138\n20139\n20140\n20141\n20142\n20143\n20144\n20145\n20146\n20147\n20148\n20149\n20150\n20151\n20152\n20153\n20154\n20155\n20156\n20157\n20158\n20159\n20160\n20161\n20162\n20163\n20164\n20165\n20166\n20167\n20168\n20169\n20170\n20171\n20172\n20173\n20174\n20175\n20176\n20177\n20178\n20179\n20180\n20181\n20182\n20183\n20184\n20185\n20186\n20187\n20188\n20189\n20190\n20191\n20192\n20193\n20194\n20195\n20196\n20197\n20198\n20199\n20200\n20201\n20202\n20203\n20204\n20205\n20206\n20207\n20208\n20209\n20210\n20211\n20212\n20213\n20214\n20215\n20216\n20217\n20218\n20219\n20220\n20221\n20222\n20223\n20224\n20225\n20226\n20227\n20228\n20229\n20230\n20231\n20232\n20233\n20234\n20235\n20236\n20237\n20238\n20239\n20240\n20241\n20242\n20243\n20244\n20245\n20246\n20247\n20248\n20249\n20250\n20251\n20252\n20253\n20254\n20255\n20256\n20257\n20258\n20259\n20260\n20261\n20262\n20263\n20264\n20265\n20266\n20267\n20268\n20269\n20270\n20271\n20272\n20273\n20274\n20275\n20276\n20277\n20278\n20279\n20280\n20281\n20282\n20283\n20284\n20285\n20286\n20287\n20288\n20289\n20290\n20291\n20292\n20293\n20294\n20295\n20296\n20297\n20298\n20299\n20300\n20301\n20302\n20303\n20304\n20305\n20306\n20307\n20308\n20309\n20310\n20311\n20312\n20313\n20314\n20315\n20316\n20317\n20318\n20319\n20320\n20321\n20322\n20323\n20324\n20325\n20326\n20327\n20328\n20329\n20330\n20331\n20332\n20333\n20334\n20335\n20336\n20337\n20338\n20339\n20340\n20341\n20342\n20343\n20344\n20345\n20346\n20347\n20348\n20349\n20350\n20351\n20352\n20353\n20354\n20355\n20356\n20357\n20358\n20359\n20360\n20361\n20362\n20363\n20364\n20365\n20366\n20367\n20368\n20369\n20370\n20371\n20372\n20373\n20374\n20375\n20376\n20377\n20378\n20379\n20380\n20381\n20382\n20383\n20384\n20385\n20386\n20387\n20388\n20389\n20390\n20391\n20392\n20393\n20394\n20395\n20396\n20397\n20398\n20399\n20400\n20401\n20402\n20403\n20404\n20405\n20406\n20407\n20408\n20409\n20410\n20411\n20412\n20413\n20414\n20415\n20416\n20417\n20418\n20419\n20420\n20421\n20422\n20423\n20424\n20425\n20426\n20427\n20428\n20429\n20430\n20431\n20432\n20433\n20434\n20435\n20436\n20437\n20438\n20439\n20440\n20441\n20442\n20443\n20444\n20445\n20446\n20447\n20448\n20449\n20450\n20451\n20452\n20453\n20454\n20455\n20456\n20457\n20458\n20459\n20460\n20461\n20462\n20463\n20464\n20465\n20466\n20467\n20468\n20469\n20470\n20471\n20472\n20473\n20474\n20475\n20476\n20477\n20478\n20479\n20480\n20481\n20482\n20483\n20484\n20485\n20486\n20487\n20488\n20489\n20490\n20491\n20492\n20493\n20494\n20495\n20496\n20497\n20498\n20499\n20500\n20501\n20502\n20503\n20504\n20505\n20506\n20507\n20508\n20509\n20510\n20511\n20512\n20513\n20514\n20515\n20516\n20517\n20518\n20519\n20520\n20521\n20522\n20523\n20524\n20525\n20526\n20527\n20528\n20529\n20530\n20531\n20532\n20533\n20534\n20535\n20536\n20537\n20538\n20539\n20540\n20541\n20542\n20543\n20544\n20545\n20546\n20547\n20548\n20549\n20550\n20551\n20552\n20553\n20554\n20555\n20556\n20557\n20558\n20559\n20560\n20561\n20562\n20563\n20564\n20565\n20566\n20567\n20568\n20569\n20570\n20571\n20572\n20573\n20574\n20575\n20576\n20577\n20578\n20579\n20580\n20581\n20582\n20583\n20584\n20585\n20586\n20587\n20588\n20589\n20590\n20591\n20592\n20593\n20594\n20595\n20596\n20597\n20598\n20599\n20600\n20601\n20602\n20603\n20604\n20605\n20606\n20607\n20608\n20609\n20610\n20611\n20612\n20613\n20614\n20615\n20616\n20617\n20618\n20619\n20620\n20621\n20622\n20623\n20624\n20625\n20626\n20627\n20628\n20629\n20630\n20631\n20632\n20633\n20634\n20635\n20636\n20637\n20638\n20639\n20640\n20641\n20642\n20643\n20644\n20645\n20646\n20647\n20648\n20649\n20650\n20651\n20652\n20653\n20654\n20655\n20656\n20657\n20658\n20659\n20660\n20661\n20662\n20663\n20664\n20665\n20666\n20667\n20668\n20669\n20670\n20671\n20672\n20673\n20674\n20675\n20676\n20677\n20678\n20679\n20680\n20681\n20682\n20683\n20684\n20685\n20686\n20687\n20688\n20689\n20690\n20691\n20692\n20693\n20694\n20695\n20696\n20697\n20698\n20699\n20700\n20701\n20702\n20703\n20704\n20705\n20706\n20707\n20708\n20709\n20710\n20711\n20712\n20713\n20714\n20715\n20716\n20717\n20718\n20719\n20720\n20721\n20722\n20723\n20724\n20725\n20726\n20727\n20728\n20729\n20730\n20731\n20732\n20733\n20734\n20735\n20736\n20737\n20738\n20739\n20740\n20741\n20742\n20743\n20744\n20745\n20746\n20747\n20748\n20749\n20750\n20751\n20752\n20753\n20754\n20755\n20756\n20757\n20758\n20759\n20760\n20761\n20762\n20763\n20764\n20765\n20766\n20767\n20768\n20769\n20770\n20771\n20772\n20773\n20774\n20775\n20776\n20777\n20778\n20779\n20780\n20781\n20782\n20783\n20784\n20785\n20786\n20787\n20788\n20789\n20790\n20791\n20792\n20793\n20794\n20795\n20796\n20797\n20798\n20799\n20800\n20801\n20802\n20803\n20804\n20805\n20806\n20807\n20808\n20809\n20810\n20811\n20812\n20813\n20814\n20815\n20816\n20817\n20818\n20819\n20820\n20821\n20822\n20823\n20824\n20825\n20826\n20827\n20828\n20829\n20830\n20831\n20832\n20833\n20834\n20835\n20836\n20837\n20838\n20839\n20840\n20841\n20842\n20843\n20844\n20845\n20846\n20847\n20848\n20849\n20850\n20851\n20852\n20853\n20854\n20855\n20856\n20857\n20858\n20859\n20860\n20861\n20862\n20863\n20864\n20865\n20866\n20867\n20868\n20869\n20870\n20871\n20872\n20873\n20874\n20875\n20876\n20877\n20878\n20879\n20880\n20881\n20882\n20883\n20884\n20885\n20886\n20887\n20888\n20889\n20890\n20891\n20892\n20893\n20894\n20895\n20896\n20897\n20898\n20899\n20900\n20901\n20902\n20903\n20904\n20905\n20906\n20907\n20908\n20909\n20910\n20911\n20912\n20913\n20914\n20915\n20916\n20917\n20918\n20919\n20920\n20921\n20922\n20923\n20924\n20925\n20926\n20927\n20928\n20929\n20930\n20931\n20932\n20933\n20934\n20935\n20936\n20937\n20938\n20939\n20940\n20941\n20942\n20943\n20944\n20945\n20946\n20947\n20948\n20949\n20950\n20951\n20952\n20953\n20954\n20955\n20956\n20957\n20958\n20959\n20960\n20961\n20962\n20963\n20964\n20965\n20966\n20967\n20968\n20969\n20970\n20971\n20972\n20973\n20974\n20975\n20976\n20977\n20978\n20979\n20980\n20981\n20982\n20983\n20984\n20985\n20986\n20987\n20988\n20989\n20990\n20991\n20992\n20993\n20994\n20995\n20996\n20997\n20998\n20999\n21000\n21001\n21002\n21003\n21004\n21005\n21006\n21007\n21008\n21009\n21010\n21011\n21012\n21013\n21014\n21015\n21016\n21017\n21018\n21019\n21020\n21021\n21022\n21023\n21024\n21025\n21026\n21027\n21028\n21029\n21030\n21031\n21032\n21033\n21034\n21035\n21036\n21037\n21038\n21039\n21040\n21041\n21042\n21043\n21044\n21045\n21046\n21047\n21048\n21049\n21050\n21051\n21052\n21053\n21054\n21055\n21056\n21057\n21058\n21059\n21060\n21061\n21062\n21063\n21064\n21065\n21066\n21067\n21068\n21069\n21070\n21071\n21072\n21073\n21074\n21075\n21076\n21077\n21078\n21079\n21080\n21081\n21082\n21083\n21084\n21085\n21086\n21087\n21088\n21089\n21090\n21091\n21092\n21093\n21094\n21095\n21096\n21097\n21098\n21099\n21100\n21101\n21102\n21103\n21104\n21105\n21106\n21107\n21108\n21109\n21110\n21111\n21112\n21113\n21114\n21115\n21116\n21117\n21118\n21119\n21120\n21121\n21122\n21123\n21124\n21125\n21126\n21127\n21128\n21129\n21130\n21131\n21132\n21133\n21134\n21135\n21136\n21137\n21138\n21139\n21140\n21141\n21142\n21143\n21144\n21145\n21146\n21147\n21148\n21149\n21150\n21151\n21152\n21153\n21154\n21155\n21156\n21157\n21158\n21159\n21160\n21161\n21162\n21163\n21164\n21165\n21166\n21167\n21168\n21169\n21170\n21171\n21172\n21173\n21174\n21175\n21176\n21177\n21178\n21179\n21180\n21181\n21182\n21183\n21184\n21185\n21186\n21187\n21188\n21189\n21190\n21191\n21192\n21193\n21194\n21195\n21196\n21197\n21198\n21199\n21200\n21201\n21202\n21203\n21204\n21205\n21206\n21207\n21208\n21209\n21210\n21211\n21212\n21213\n21214\n21215\n21216\n21217\n21218\n21219\n21220\n21221\n21222\n21223\n21224\n21225\n21226\n21227\n21228\n21229\n21230\n21231\n21232\n21233\n21234\n21235\n21236\n21237\n21238\n21239\n21240\n21241\n21242\n21243\n21244\n21245\n21246\n21247\n21248\n21249\n21250\n21251\n21252\n21253\n21254\n21255\n21256\n21257\n21258\n21259\n21260\n21261\n21262\n21263\n21264\n21265\n21266\n21267\n21268\n21269\n21270\n21271\n21272\n21273\n21274\n21275\n21276\n21277\n21278\n21279\n21280\n21281\n21282\n21283\n21284\n21285\n21286\n21287\n21288\n21289\n21290\n21291\n21292\n21293\n21294\n21295\n21296\n21297\n21298\n21299\n21300\n21301\n21302\n21303\n21304\n21305\n21306\n21307\n21308\n21309\n21310\n21311\n21312\n21313\n21314\n21315\n21316\n21317\n21318\n21319\n21320\n21321\n21322\n21323\n21324\n21325\n21326\n21327\n21328\n21329\n21330\n21331\n21332\n21333\n21334\n21335\n21336\n21337\n21338\n21339\n21340\n21341\n21342\n21343\n21344\n21345\n21346\n21347\n21348\n21349\n21350\n21351\n21352\n21353\n21354\n21355\n21356\n21357\n21358\n21359\n21360\n21361\n21362\n21363\n21364\n21365\n21366\n21367\n21368\n21369\n21370\n21371\n21372\n21373\n21374\n21375\n21376\n21377\n21378\n21379\n21380\n21381\n21382\n21383\n21384\n21385\n21386\n21387\n21388\n21389\n21390\n21391\n21392\n21393\n21394\n21395\n21396\n21397\n21398\n21399\n21400\n21401\n21402\n21403\n21404\n21405\n21406\n21407\n21408\n21409\n21410\n21411\n21412\n21413\n21414\n21415\n21416\n21417\n21418\n21419\n21420\n21421\n21422\n21423\n21424\n21425\n21426\n21427\n21428\n21429\n21430\n21431\n21432\n21433\n21434\n21435\n21436\n21437\n21438\n21439\n21440\n21441\n21442\n21443\n21444\n21445\n21446\n21447\n21448\n21449\n21450\n21451\n21452\n21453\n21454\n21455\n21456\n21457\n21458\n21459\n21460\n21461\n21462\n21463\n21464\n21465\n21466\n21467\n21468\n21469\n21470\n21471\n21472\n21473\n21474\n21475\n21476\n21477\n21478\n21479\n21480\n21481\n21482\n21483\n21484\n21485\n21486\n21487\n21488\n21489\n21490\n21491\n21492\n21493\n21494\n21495\n21496\n21497\n21498\n21499\n21500\n21501\n21502\n21503\n21504\n21505\n21506\n21507\n21508\n21509\n21510\n21511\n21512\n21513\n21514\n21515\n21516\n21517\n21518\n21519\n21520\n21521\n21522\n21523\n21524\n21525\n21526\n21527\n21528\n21529\n21530\n21531\n21532\n21533\n21534\n21535\n21536\n21537\n21538\n21539\n21540\n21541\n21542\n21543\n21544\n21545\n21546\n21547\n21548\n21549\n21550\n21551\n21552\n21553\n21554\n21555\n21556\n21557\n21558\n21559\n21560\n21561\n21562\n21563\n21564\n21565\n21566\n21567\n21568\n21569\n21570\n21571\n21572\n21573\n21574\n21575\n21576\n21577\n21578\n21579\n21580\n21581\n21582\n21583\n21584\n21585\n21586\n21587\n21588\n21589\n21590\n21591\n21592\n21593\n21594\n21595\n21596\n21597\n21598\n21599\n21600\n21601\n21602\n21603\n21604\n21605\n21606\n21607\n21608\n21609\n21610\n21611\n21612\n21613\n21614\n21615\n21616\n21617\n21618\n21619\n21620\n21621\n21622\n21623\n21624\n21625\n21626\n21627\n21628\n21629\n21630\n21631\n21632\n21633\n21634\n21635\n21636\n21637\n21638\n21639\n21640\n21641\n21642\n21643\n21644\n21645\n21646\n21647\n21648\n21649\n21650\n21651\n21652\n21653\n21654\n21655\n21656\n21657\n21658\n21659\n21660\n21661\n21662\n21663\n21664\n21665\n21666\n21667\n21668\n21669\n21670\n21671\n21672\n21673\n21674\n21675\n21676\n21677\n21678\n21679\n21680\n21681\n21682\n21683\n21684\n21685\n21686\n21687\n21688\n21689\n21690\n21691\n21692\n21693\n21694\n21695\n21696\n21697\n21698\n21699\n21700\n21701\n21702\n21703\n21704\n21705\n21706\n21707\n21708\n21709\n21710\n21711\n21712\n21713\n21714\n21715\n21716\n21717\n21718\n21719\n21720\n21721\n21722\n21723\n21724\n21725\n21726\n21727\n21728\n21729\n21730\n21731\n21732\n21733\n21734\n21735\n21736\n21737\n21738\n21739\n21740\n21741\n21742\n21743\n21744\n21745\n21746\n21747\n21748\n21749\n21750\n21751\n21752\n21753\n21754\n21755\n21756\n21757\n21758\n21759\n21760\n21761\n21762\n21763\n21764\n21765\n21766\n21767\n21768\n21769\n21770\n21771\n21772\n21773\n21774\n21775\n21776\n21777\n21778\n21779\n21780\n21781\n21782\n21783\n21784\n21785\n21786\n21787\n21788\n21789\n21790\n21791\n21792\n21793\n21794\n21795\n21796\n21797\n21798\n21799\n21800\n21801\n21802\n21803\n21804\n21805\n21806\n21807\n21808\n21809\n21810\n21811\n21812\n21813\n21814\n21815\n21816\n21817\n21818\n21819\n21820\n21821\n21822\n21823\n21824\n21825\n21826\n21827\n21828\n21829\n21830\n21831\n21832\n21833\n21834\n21835\n21836\n21837\n21838\n21839\n21840\n21841\n21842\n21843\n21844\n21845\n21846\n21847\n21848\n21849\n21850\n21851\n21852\n21853\n21854\n21855\n21856\n21857\n21858\n21859\n21860\n21861\n21862\n21863\n21864\n21865\n21866\n21867\n21868\n21869\n21870\n21871\n21872\n21873\n21874\n21875\n21876\n21877\n21878\n21879\n21880\n21881\n21882\n21883\n21884\n21885\n21886\n21887\n21888\n21889\n21890\n21891\n21892\n21893\n21894\n21895\n21896\n21897\n21898\n21899\n21900\n21901\n21902\n21903\n21904\n21905\n21906\n21907\n21908\n21909\n21910\n21911\n21912\n21913\n21914\n21915\n21916\n21917\n21918\n21919\n21920\n21921\n21922\n21923\n21924\n21925\n21926\n21927\n21928\n21929\n21930\n21931\n21932\n21933\n21934\n21935\n21936\n21937\n21938\n21939\n21940\n21941\n21942\n21943\n21944\n21945\n21946\n21947\n21948\n21949\n21950\n21951\n21952\n21953\n21954\n21955\n21956\n21957\n21958\n21959\n21960\n21961\n21962\n21963\n21964\n21965\n21966\n21967\n21968\n21969\n21970\n21971\n21972\n21973\n21974\n21975\n21976\n21977\n21978\n21979\n21980\n21981\n21982\n21983\n21984\n21985\n21986\n21987\n21988\n21989\n21990\n21991\n21992\n21993\n21994\n21995\n21996\n21997\n21998\n21999\n22000\n22001\n22002\n22003\n22004\n22005\n22006\n22007\n22008\n22009\n22010\n22011\n22012\n22013\n22014\n22015\n22016\n22017\n22018\n22019\n22020\n22021\n22022\n22023\n22024\n22025\n22026\n22027\n22028\n22029\n22030\n22031\n22032\n22033\n22034\n22035\n22036\n22037\n22038\n22039\n22040\n22041\n22042\n22043\n22044\n22045\n22046\n22047\n22048\n22049\n22050\n22051\n22052\n22053\n22054\n22055\n22056\n22057\n22058\n22059\n22060\n22061\n22062\n22063\n22064\n22065\n22066\n22067\n22068\n22069\n22070\n22071\n22072\n22073\n22074\n22075\n22076\n22077\n22078\n22079\n22080\n22081\n22082\n22083\n22084\n22085\n22086\n22087\n22088\n22089\n22090\n22091\n22092\n22093\n22094\n22095\n22096\n22097\n22098\n22099\n22100\n22101\n22102\n22103\n22104\n22105\n22106\n22107\n22108\n22109\n22110\n22111\n22112\n22113\n22114\n22115\n22116\n22117\n22118\n22119\n22120\n22121\n22122\n22123\n22124\n22125\n22126\n22127\n22128\n22129\n22130\n22131\n22132\n22133\n22134\n22135\n22136\n22137\n22138\n22139\n22140\n22141\n22142\n22143\n22144\n22145\n22146\n22147\n22148\n22149\n22150\n22151\n22152\n22153\n22154\n22155\n22156\n22157\n22158\n22159\n22160\n22161\n22162\n22163\n22164\n22165\n22166\n22167\n22168\n22169\n22170\n22171\n22172\n22173\n22174\n22175\n22176\n22177\n22178\n22179\n22180\n22181\n22182\n22183\n22184\n22185\n22186\n22187\n22188\n22189\n22190\n22191\n22192\n22193\n22194\n22195\n22196\n22197\n22198\n22199\n22200\n22201\n22202\n22203\n22204\n22205\n22206\n22207\n22208\n22209\n22210\n22211\n22212\n22213\n22214\n22215\n22216\n22217\n22218\n22219\n22220\n22221\n22222\n22223\n22224\n22225\n22226\n22227\n22228\n22229\n22230\n22231\n22232\n22233\n22234\n22235\n22236\n22237\n22238\n22239\n22240\n22241\n22242\n22243\n22244\n22245\n22246\n22247\n22248\n22249\n22250\n22251\n22252\n22253\n22254\n22255\n22256\n22257\n22258\n22259\n22260\n22261\n22262\n22263\n22264\n22265\n22266\n22267\n22268\n22269\n22270\n22271\n22272\n22273\n22274\n22275\n22276\n22277\n22278\n22279\n22280\n22281\n22282\n22283\n22284\n22285\n22286\n22287\n22288\n22289\n22290\n22291\n22292\n22293\n22294\n22295\n22296\n22297\n22298\n22299\n22300\n22301\n22302\n22303\n22304\n22305\n22306\n22307\n22308\n22309\n22310\n22311\n22312\n22313\n22314\n22315\n22316\n22317\n22318\n22319\n22320\n22321\n22322\n22323\n22324\n22325\n22326\n22327\n22328\n22329\n22330\n22331\n22332\n22333\n22334\n22335\n22336\n22337\n22338\n22339\n22340\n22341\n22342\n22343\n22344\n22345\n22346\n22347\n22348\n22349\n22350\n22351\n22352\n22353\n22354\n22355\n22356\n22357\n22358\n22359\n22360\n22361\n22362\n22363\n22364\n22365\n22366\n22367\n22368\n22369\n22370\n22371\n22372\n22373\n22374\n22375\n22376\n22377\n22378\n22379\n22380\n22381\n22382\n22383\n22384\n22385\n22386\n22387\n22388\n22389\n22390\n22391\n22392\n22393\n22394\n22395\n22396\n22397\n22398\n22399\n22400\n22401\n22402\n22403\n22404\n22405\n22406\n22407\n22408\n22409\n22410\n22411\n22412\n22413\n22414\n22415\n22416\n22417\n22418\n22419\n22420\n22421\n22422\n22423\n22424\n22425\n22426\n22427\n22428\n22429\n22430\n22431\n22432\n22433\n22434\n22435\n22436\n22437\n22438\n22439\n22440\n22441\n22442\n22443\n22444\n22445\n22446\n22447\n22448\n22449\n22450\n22451\n22452\n22453\n22454\n22455\n22456\n22457\n22458\n22459\n22460\n22461\n22462\n22463\n22464\n22465\n22466\n22467\n22468\n22469\n22470\n22471\n22472\n22473\n22474\n22475\n22476\n22477\n22478\n22479\n22480\n22481\n22482\n22483\n22484\n22485\n22486\n22487\n22488\n22489\n22490\n22491\n22492\n22493\n22494\n22495\n22496\n22497\n22498\n22499\n22500\n22501\n22502\n22503\n22504\n22505\n22506\n22507\n22508\n22509\n22510\n22511\n22512\n22513\n22514\n22515\n22516\n22517\n22518\n22519\n22520\n22521\n22522\n22523\n22524\n22525\n22526\n22527\n22528\n22529\n22530\n22531\n22532\n22533\n22534\n22535\n22536\n22537\n22538\n22539\n22540\n22541\n22542\n22543\n22544\n22545\n22546\n22547\n22548\n22549\n22550\n22551\n22552\n22553\n22554\n22555\n22556\n22557\n22558\n22559\n22560\n22561\n22562\n22563\n22564\n22565\n22566\n22567\n22568\n22569\n22570\n22571\n22572\n22573\n22574\n22575\n22576\n22577\n22578\n22579\n22580\n22581\n22582\n22583\n22584\n22585\n22586\n22587\n22588\n22589\n22590\n22591\n22592\n22593\n22594\n22595\n22596\n22597\n22598\n22599\n22600\n22601\n22602\n22603\n22604\n22605\n22606\n22607\n22608\n22609\n22610\n22611\n22612\n22613\n22614\n22615\n22616\n22617\n22618\n22619\n22620\n22621\n22622\n22623\n22624\n22625\n22626\n22627\n22628\n22629\n22630\n22631\n22632\n22633\n22634\n22635\n22636\n22637\n22638\n22639\n22640\n22641\n22642\n22643\n22644\n22645\n22646\n22647\n22648\n22649\n22650\n22651\n22652\n22653\n22654\n22655\n22656\n22657\n22658\n22659\n22660\n22661\n22662\n22663\n22664\n22665\n22666\n22667\n22668\n22669\n22670\n22671\n22672\n22673\n22674\n22675\n22676\n22677\n22678\n22679\n22680\n22681\n22682\n22683\n22684\n22685\n22686\n22687\n22688\n22689\n22690\n22691\n22692\n22693\n22694\n22695\n22696\n22697\n22698\n22699\n22700\n22701\n22702\n22703\n22704\n22705\n22706\n22707\n22708\n22709\n22710\n22711\n22712\n22713\n22714\n22715\n22716\n22717\n22718\n22719\n22720\n22721\n22722\n22723\n22724\n22725\n22726\n22727\n22728\n22729\n22730\n22731\n22732\n22733\n22734\n22735\n22736\n22737\n22738\n22739\n22740\n22741\n22742\n22743\n22744\n22745\n22746\n22747\n22748\n22749\n22750\n22751\n22752\n22753\n22754\n22755\n22756\n22757\n22758\n22759\n22760\n22761\n22762\n22763\n22764\n22765\n22766\n22767\n22768\n22769\n22770\n22771\n22772\n22773\n22774\n22775\n22776\n22777\n22778\n22779\n22780\n22781\n22782\n22783\n22784\n22785\n22786\n22787\n22788\n22789\n22790\n22791\n22792\n22793\n22794\n22795\n22796\n22797\n22798\n22799\n22800\n22801\n22802\n22803\n22804\n22805\n22806\n22807\n22808\n22809\n22810\n22811\n22812\n22813\n22814\n22815\n22816\n22817\n22818\n22819\n22820\n22821\n22822\n22823\n22824\n22825\n22826\n22827\n22828\n22829\n22830\n22831\n22832\n22833\n22834\n22835\n22836\n22837\n22838\n22839\n22840\n22841\n22842\n22843\n22844\n22845\n22846\n22847\n22848\n22849\n22850\n22851\n22852\n22853\n22854\n22855\n22856\n22857\n22858\n22859\n22860\n22861\n22862\n22863\n22864\n22865\n22866\n22867\n22868\n22869\n22870\n22871\n22872\n22873\n22874\n22875\n22876\n22877\n22878\n22879\n22880\n22881\n22882\n22883\n22884\n22885\n22886\n22887\n22888\n22889\n22890\n22891\n22892\n22893\n22894\n22895\n22896\n22897\n22898\n22899\n22900\n22901\n22902\n22903\n22904\n22905\n22906\n22907\n22908\n22909\n22910\n22911\n22912\n22913\n22914\n22915\n22916\n22917\n22918\n22919\n22920\n22921\n22922\n22923\n22924\n22925\n22926\n22927\n22928\n22929\n22930\n22931\n22932\n22933\n22934\n22935\n22936\n22937\n22938\n22939\n22940\n22941\n22942\n22943\n22944\n22945\n22946\n22947\n22948\n22949\n22950\n22951\n22952\n22953\n22954\n22955\n22956\n22957\n22958\n22959\n22960\n22961\n22962\n22963\n22964\n22965\n22966\n22967\n22968\n22969\n22970\n22971\n22972\n22973\n22974\n22975\n22976\n22977\n22978\n22979\n22980\n22981\n22982\n22983\n22984\n22985\n22986\n22987\n22988\n22989\n22990\n22991\n22992\n22993\n22994\n22995\n22996\n22997\n22998\n22999\n23000\n23001\n23002\n23003\n23004\n23005\n23006\n23007\n23008\n23009\n23010\n23011\n23012\n23013\n23014\n23015\n23016\n23017\n23018\n23019\n23020\n23021\n23022\n23023\n23024\n23025\n23026\n23027\n23028\n23029\n23030\n23031\n23032\n23033\n23034\n23035\n23036\n23037\n23038\n23039\n23040\n23041\n23042\n23043\n23044\n23045\n23046\n23047\n23048\n23049\n23050\n23051\n23052\n23053\n23054\n23055\n23056\n23057\n23058\n23059\n23060\n23061\n23062\n23063\n23064\n23065\n23066\n23067\n23068\n23069\n23070\n23071\n23072\n23073\n23074\n23075\n23076\n23077\n23078\n23079\n23080\n23081\n23082\n23083\n23084\n23085\n23086\n23087\n23088\n23089\n23090\n23091\n23092\n23093\n23094\n23095\n23096\n23097\n23098\n23099\n23100\n23101\n23102\n23103\n23104\n23105\n23106\n23107\n23108\n23109\n23110\n23111\n23112\n23113\n23114\n23115\n23116\n23117\n23118\n23119\n23120\n23121\n23122\n23123\n23124\n23125\n23126\n23127\n23128\n23129\n23130\n23131\n23132\n23133\n23134\n23135\n23136\n23137\n23138\n23139\n23140\n23141\n23142\n23143\n23144\n23145\n23146\n23147\n23148\n23149\n23150\n23151\n23152\n23153\n23154\n23155\n23156\n23157\n23158\n23159\n23160\n23161\n23162\n23163\n23164\n23165\n23166\n23167\n23168\n23169\n23170\n23171\n23172\n23173\n23174\n23175\n23176\n23177\n23178\n23179\n23180\n23181\n23182\n23183\n23184\n23185\n23186\n23187\n23188\n23189\n23190\n23191\n23192\n23193\n23194\n23195\n23196\n23197\n23198\n23199\n23200\n23201\n23202\n23203\n23204\n23205\n23206\n23207\n23208\n23209\n23210\n23211\n23212\n23213\n23214\n23215\n23216\n23217\n23218\n23219\n23220\n23221\n23222\n23223\n23224\n23225\n23226\n23227\n23228\n23229\n23230\n23231\n23232\n23233\n23234\n23235\n23236\n23237\n23238\n23239\n23240\n23241\n23242\n23243\n23244\n23245\n23246\n23247\n23248\n23249\n23250\n23251\n23252\n23253\n23254\n23255\n23256\n23257\n23258\n23259\n23260\n23261\n23262\n23263\n23264\n23265\n23266\n23267\n23268\n23269\n23270\n23271\n23272\n23273\n23274\n23275\n23276\n23277\n23278\n23279\n23280\n23281\n23282\n23283\n23284\n23285\n23286\n23287\n23288\n23289\n23290\n23291\n23292\n23293\n23294\n23295\n23296\n23297\n23298\n23299\n23300\n23301\n23302\n23303\n23304\n23305\n23306\n23307\n23308\n23309\n23310\n23311\n23312\n23313\n23314\n23315\n23316\n23317\n23318\n23319\n23320\n23321\n23322\n23323\n23324\n23325\n23326\n23327\n23328\n23329\n23330\n23331\n23332\n23333\n23334\n23335\n23336\n23337\n23338\n23339\n23340\n23341\n23342\n23343\n23344\n23345\n23346\n23347\n23348\n23349\n23350\n23351\n23352\n23353\n23354\n23355\n23356\n23357\n23358\n23359\n23360\n23361\n23362\n23363\n23364\n23365\n23366\n23367\n23368\n23369\n23370\n23371\n23372\n23373\n23374\n23375\n23376\n23377\n23378\n23379\n23380\n23381\n23382\n23383\n23384\n23385\n23386\n23387\n23388\n23389\n23390\n23391\n23392\n23393\n23394\n23395\n23396\n23397\n23398\n23399\n23400\n23401\n23402\n23403\n23404\n23405\n23406\n23407\n23408\n23409\n23410\n23411\n23412\n23413\n23414\n23415\n23416\n23417\n23418\n23419\n23420\n23421\n23422\n23423\n23424\n23425\n23426\n23427\n23428\n23429\n23430\n23431\n23432\n23433\n23434\n23435\n23436\n23437\n23438\n23439\n23440\n23441\n23442\n23443\n23444\n23445\n23446\n23447\n23448\n23449\n23450\n23451\n23452\n23453\n23454\n23455\n23456\n23457\n23458\n23459\n23460\n23461\n23462\n23463\n23464\n23465\n23466\n23467\n23468\n23469\n23470\n23471\n23472\n23473\n23474\n23475\n23476\n23477\n23478\n23479\n23480\n23481\n23482\n23483\n23484\n23485\n23486\n23487\n23488\n23489\n23490\n23491\n23492\n23493\n23494\n23495\n23496\n23497\n23498\n23499\n23500\n23501\n23502\n23503\n23504\n23505\n23506\n23507\n23508\n23509\n23510\n23511\n23512\n23513\n23514\n23515\n23516\n23517\n23518\n23519\n23520\n23521\n23522\n23523\n23524\n23525\n23526\n23527\n23528\n23529\n23530\n23531\n23532\n23533\n23534\n23535\n23536\n23537\n23538\n23539\n23540\n23541\n23542\n23543\n23544\n23545\n23546\n23547\n23548\n23549\n23550\n23551\n23552\n23553\n23554\n23555\n23556\n23557\n23558\n23559\n23560\n23561\n23562\n23563\n23564\n23565\n23566\n23567\n23568\n23569\n23570\n23571\n23572\n23573\n23574\n23575\n23576\n23577\n23578\n23579\n23580\n23581\n23582\n23583\n23584\n23585\n23586\n23587\n23588\n23589\n23590\n23591\n23592\n23593\n23594\n23595\n23596\n23597\n23598\n23599\n23600\n23601\n23602\n23603\n23604\n23605\n23606\n23607\n23608\n23609\n23610\n23611\n23612\n23613\n23614\n23615\n23616\n23617\n23618\n23619\n23620\n23621\n23622\n23623\n23624\n23625\n23626\n23627\n23628\n23629\n23630\n23631\n23632\n23633\n23634\n23635\n23636\n23637\n23638\n23639\n23640\n23641\n23642\n23643\n23644\n23645\n23646\n23647\n23648\n23649\n23650\n23651\n23652\n23653\n23654\n23655\n23656\n23657\n23658\n23659\n23660\n23661\n23662\n23663\n23664\n23665\n23666\n23667\n23668\n23669\n23670\n23671\n23672\n23673\n23674\n23675\n23676\n23677\n23678\n23679\n23680\n23681\n23682\n23683\n23684\n23685\n23686\n23687\n23688\n23689\n23690\n23691\n23692\n23693\n23694\n23695\n23696\n23697\n23698\n23699\n23700\n23701\n23702\n23703\n23704\n23705\n23706\n23707\n23708\n23709\n23710\n23711\n23712\n23713\n23714\n23715\n23716\n23717\n23718\n23719\n23720\n23721\n23722\n23723\n23724\n23725\n23726\n23727\n23728\n23729\n23730\n23731\n23732\n23733\n23734\n23735\n23736\n23737\n23738\n23739\n23740\n23741\n23742\n23743\n23744\n23745\n23746\n23747\n23748\n23749\n23750\n23751\n23752\n23753\n23754\n23755\n23756\n23757\n23758\n23759\n23760\n23761\n23762\n23763\n23764\n23765\n23766\n23767\n23768\n23769\n23770\n23771\n23772\n23773\n23774\n23775\n23776\n23777\n23778\n23779\n23780\n23781\n23782\n23783\n23784\n23785\n23786\n23787\n23788\n23789\n23790\n23791\n23792\n23793\n23794\n23795\n23796\n23797\n23798\n23799\n23800\n23801\n23802\n23803\n23804\n23805\n23806\n23807\n23808\n23809\n23810\n23811\n23812\n23813\n23814\n23815\n23816\n23817\n23818\n23819\n23820\n23821\n23822\n23823\n23824\n23825\n23826\n23827\n23828\n23829\n23830\n23831\n23832\n23833\n23834\n23835\n23836\n23837\n23838\n23839\n23840\n23841\n23842\n23843\n23844\n23845\n23846\n23847\n23848\n23849\n23850\n23851\n23852\n23853\n23854\n23855\n23856\n23857\n23858\n23859\n23860\n23861\n23862\n23863\n23864\n23865\n23866\n23867\n23868\n23869\n23870\n23871\n23872\n23873\n23874\n23875\n23876\n23877\n23878\n23879\n23880\n23881\n23882\n23883\n23884\n23885\n23886\n23887\n23888\n23889\n23890\n23891\n23892\n23893\n23894\n23895\n23896\n23897\n23898\n23899\n23900\n23901\n23902\n23903\n23904\n23905\n23906\n23907\n23908\n23909\n23910\n23911\n23912\n23913\n23914\n23915\n23916\n23917\n23918\n23919\n23920\n23921\n23922\n23923\n23924\n23925\n23926\n23927\n23928\n23929\n23930\n23931\n23932\n23933\n23934\n23935\n23936\n23937\n23938\n23939\n23940\n23941\n23942\n23943\n23944\n23945\n23946\n23947\n23948\n23949\n23950\n23951\n23952\n23953\n23954\n23955\n23956\n23957\n23958\n23959\n23960\n23961\n23962\n23963\n23964\n23965\n23966\n23967\n23968\n23969\n23970\n23971\n23972\n23973\n23974\n23975\n23976\n23977\n23978\n23979\n23980\n23981\n23982\n23983\n23984\n23985\n23986\n23987\n23988\n23989\n23990\n23991\n23992\n23993\n23994\n23995\n23996\n23997\n23998\n23999\n24000\n24001\n24002\n24003\n24004\n24005\n24006\n24007\n24008\n24009\n24010\n24011\n24012\n24013\n24014\n24015\n24016\n24017\n24018\n24019\n24020\n24021\n24022\n24023\n24024\n24025\n24026\n24027\n24028\n24029\n24030\n24031\n24032\n24033\n24034\n24035\n24036\n24037\n24038\n24039\n24040\n24041\n24042\n24043\n24044\n24045\n24046\n24047\n24048\n24049\n24050\n24051\n24052\n24053\n24054\n24055\n24056\n24057\n24058\n24059\n24060\n24061\n24062\n24063\n24064\n24065\n24066\n24067\n24068\n24069\n24070\n24071\n24072\n24073\n24074\n24075\n24076\n24077\n24078\n24079\n24080\n24081\n24082\n24083\n24084\n24085\n24086\n24087\n24088\n24089\n24090\n24091\n24092\n24093\n24094\n24095\n24096\n24097\n24098\n24099\n24100\n24101\n24102\n24103\n24104\n24105\n24106\n24107\n24108\n24109\n24110\n24111\n24112\n24113\n24114\n24115\n24116\n24117\n24118\n24119\n24120\n24121\n24122\n24123\n24124\n24125\n24126\n24127\n24128\n24129\n24130\n24131\n24132\n24133\n24134\n24135\n24136\n24137\n24138\n24139\n24140\n24141\n24142\n24143\n24144\n24145\n24146\n24147\n24148\n24149\n24150\n24151\n24152\n24153\n24154\n24155\n24156\n24157\n24158\n24159\n24160\n24161\n24162\n24163\n24164\n24165\n24166\n24167\n24168\n24169\n24170\n24171\n24172\n24173\n24174\n24175\n24176\n24177\n24178\n24179\n24180\n24181\n24182\n24183\n24184\n24185\n24186\n24187\n24188\n24189\n24190\n24191\n24192\n24193\n24194\n24195\n24196\n24197\n24198\n24199\n24200\n24201\n24202\n24203\n24204\n24205\n24206\n24207\n24208\n24209\n24210\n24211\n24212\n24213\n24214\n24215\n24216\n24217\n24218\n24219\n24220\n24221\n24222\n24223\n24224\n24225\n24226\n24227\n24228\n24229\n24230\n24231\n24232\n24233\n24234\n24235\n24236\n24237\n24238\n24239\n24240\n24241\n24242\n24243\n24244\n24245\n24246\n24247\n24248\n24249\n24250\n24251\n24252\n24253\n24254\n24255\n24256\n24257\n24258\n24259\n24260\n24261\n24262\n24263\n24264\n24265\n24266\n24267\n24268\n24269\n24270\n24271\n24272\n24273\n24274\n24275\n24276\n24277\n24278\n24279\n24280\n24281\n24282\n24283\n24284\n24285\n24286\n24287\n24288\n24289\n24290\n24291\n24292\n24293\n24294\n24295\n24296\n24297\n24298\n24299\n24300\n24301\n24302\n24303\n24304\n24305\n24306\n24307\n24308\n24309\n24310\n24311\n24312\n24313\n24314\n24315\n24316\n24317\n24318\n24319\n24320\n24321\n24322\n24323\n24324\n24325\n24326\n24327\n24328\n24329\n24330\n24331\n24332\n24333\n24334\n24335\n24336\n24337\n24338\n24339\n24340\n24341\n24342\n24343\n24344\n24345\n24346\n24347\n24348\n24349\n24350\n24351\n24352\n24353\n24354\n24355\n24356\n24357\n24358\n24359\n24360\n24361\n24362\n24363\n24364\n24365\n24366\n24367\n24368\n24369\n24370\n24371\n24372\n24373\n24374\n24375\n24376\n24377\n24378\n24379\n24380\n24381\n24382\n24383\n24384\n24385\n24386\n24387\n24388\n24389\n24390\n24391\n24392\n24393\n24394\n24395\n24396\n24397\n24398\n24399\n24400\n24401\n24402\n24403\n24404\n24405\n24406\n24407\n24408\n24409\n24410\n24411\n24412\n24413\n24414\n24415\n24416\n24417\n24418\n24419\n24420\n24421\n24422\n24423\n24424\n24425\n24426\n24427\n24428\n24429\n24430\n24431\n24432\n24433\n24434\n24435\n24436\n24437\n24438\n24439\n24440\n24441\n24442\n24443\n24444\n24445\n24446\n24447\n24448\n24449\n24450\n24451\n24452\n24453\n24454\n24455\n24456\n24457\n24458\n24459\n24460\n24461\n24462\n24463\n24464\n24465\n24466\n24467\n24468\n24469\n24470\n24471\n24472\n24473\n24474\n24475\n24476\n24477\n24478\n24479\n24480\n24481\n24482\n24483\n24484\n24485\n24486\n24487\n24488\n24489\n24490\n24491\n24492\n24493\n24494\n24495\n24496\n24497\n24498\n24499\n24500\n24501\n24502\n24503\n24504\n24505\n24506\n24507\n24508\n24509\n24510\n24511\n24512\n24513\n24514\n24515\n24516\n24517\n24518\n24519\n24520\n24521\n24522\n24523\n24524\n24525\n24526\n24527\n24528\n24529\n24530\n24531\n24532\n24533\n24534\n24535\n24536\n24537\n24538\n24539\n24540\n24541\n24542\n24543\n24544\n24545\n24546\n24547\n24548\n24549\n24550\n24551\n24552\n24553\n24554\n24555\n24556\n24557\n24558\n24559\n24560\n24561\n24562\n24563\n24564\n24565\n24566\n24567\n24568\n24569\n24570\n24571\n24572\n24573\n24574\n24575\n24576\n24577\n24578\n24579\n24580\n24581\n24582\n24583\n24584\n24585\n24586\n24587\n24588\n24589\n24590\n24591\n24592\n24593\n24594\n24595\n24596\n24597\n24598\n24599\n24600\n24601\n24602\n24603\n24604\n24605\n24606\n24607\n24608\n24609\n24610\n24611\n24612\n24613\n24614\n24615\n24616\n24617\n24618\n24619\n24620\n24621\n24622\n24623\n24624\n24625\n24626\n24627\n24628\n24629\n24630\n24631\n24632\n24633\n24634\n24635\n24636\n24637\n24638\n24639\n24640\n24641\n24642\n24643\n24644\n24645\n24646\n24647\n24648\n24649\n24650\n24651\n24652\n24653\n24654\n24655\n24656\n24657\n24658\n24659\n24660\n24661\n24662\n24663\n24664\n24665\n24666\n24667\n24668\n24669\n24670\n24671\n24672\n24673\n24674\n24675\n24676\n24677\n24678\n24679\n24680\n24681\n24682\n24683\n24684\n24685\n24686\n24687\n24688\n24689\n24690\n24691\n24692\n24693\n24694\n24695\n24696\n24697\n24698\n24699\n24700\n24701\n24702\n24703\n24704\n24705\n24706\n24707\n24708\n24709\n24710\n24711\n24712\n24713\n24714\n24715\n24716\n24717\n24718\n24719\n24720\n24721\n24722\n24723\n24724\n24725\n24726\n24727\n24728\n24729\n24730\n24731\n24732\n24733\n24734\n24735\n24736\n24737\n24738\n24739\n24740\n24741\n24742\n24743\n24744\n24745\n24746\n24747\n24748\n24749\n24750\n24751\n24752\n24753\n24754\n24755\n24756\n24757\n24758\n24759\n24760\n24761\n24762\n24763\n24764\n24765\n24766\n24767\n24768\n24769\n24770\n24771\n24772\n24773\n24774\n24775\n24776\n24777\n24778\n24779\n24780\n24781\n24782\n24783\n24784\n24785\n24786\n24787\n24788\n24789\n24790\n24791\n24792\n24793\n24794\n24795\n24796\n24797\n24798\n24799\n24800\n24801\n24802\n24803\n24804\n24805\n24806\n24807\n24808\n24809\n24810\n24811\n24812\n24813\n24814\n24815\n24816\n24817\n24818\n24819\n24820\n24821\n24822\n24823\n24824\n24825\n24826\n24827\n24828\n24829\n24830\n24831\n24832\n24833\n24834\n24835\n24836\n24837\n24838\n24839\n24840\n24841\n24842\n24843\n24844\n24845\n24846\n24847\n24848\n24849\n24850\n24851\n24852\n24853\n24854\n24855\n24856\n24857\n24858\n24859\n24860\n24861\n24862\n24863\n24864\n24865\n24866\n24867\n24868\n24869\n24870\n24871\n24872\n24873\n24874\n24875\n24876\n24877\n24878\n24879\n24880\n24881\n24882\n24883\n24884\n24885\n24886\n24887\n24888\n24889\n24890\n24891\n24892\n24893\n24894\n24895\n24896\n24897\n24898\n24899\n24900\n24901\n24902\n24903\n24904\n24905\n24906\n24907\n24908\n24909\n24910\n24911\n24912\n24913\n24914\n24915\n24916\n24917\n24918\n24919\n24920\n24921\n24922\n24923\n24924\n24925\n24926\n24927\n24928\n24929\n24930\n24931\n24932\n24933\n24934\n24935\n24936\n24937\n24938\n24939\n24940\n24941\n24942\n24943\n24944\n24945\n24946\n24947\n24948\n24949\n24950\n24951\n24952\n24953\n24954\n24955\n24956\n24957\n24958\n24959\n24960\n24961\n24962\n24963\n24964\n24965\n24966\n24967\n24968\n24969\n24970\n24971\n24972\n24973\n24974\n24975\n24976\n24977\n24978\n24979\n24980\n24981\n24982\n24983\n24984\n24985\n24986\n24987\n24988\n24989\n24990\n24991\n24992\n24993\n24994\n24995\n24996\n24997\n24998\n24999\n25000\n25001\n25002\n25003\n25004\n25005\n25006\n25007\n25008\n25009\n25010\n25011\n25012\n25013\n25014\n25015\n25016\n25017\n25018\n25019\n25020\n25021\n25022\n25023\n25024\n25025\n25026\n25027\n25028\n25029\n25030\n25031\n25032\n25033\n25034\n25035\n25036\n25037\n25038\n25039\n25040\n25041\n25042\n25043\n25044\n25045\n25046\n25047\n25048\n25049\n25050\n25051\n25052\n25053\n25054\n25055\n25056\n25057\n25058\n25059\n25060\n25061\n25062\n25063\n25064\n25065\n25066\n25067\n25068\n25069\n25070\n25071\n25072\n25073\n25074\n25075\n25076\n25077\n25078\n25079\n25080\n25081\n25082\n25083\n25084\n25085\n25086\n25087\n25088\n25089\n25090\n25091\n25092\n25093\n25094\n25095\n25096\n25097\n25098\n25099\n25100\n25101\n25102\n25103\n25104\n25105\n25106\n25107\n25108\n25109\n25110\n25111\n25112\n25113\n25114\n25115\n25116\n25117\n25118\n25119\n25120\n25121\n25122\n25123\n25124\n25125\n25126\n25127\n25128\n25129\n25130\n25131\n25132\n25133\n25134\n25135\n25136\n25137\n25138\n25139\n25140\n25141\n25142\n25143\n25144\n25145\n25146\n25147\n25148\n25149\n25150\n25151\n25152\n25153\n25154\n25155\n25156\n25157\n25158\n25159\n25160\n25161\n25162\n25163\n25164\n25165\n25166\n25167\n25168\n25169\n25170\n25171\n25172\n25173\n25174\n25175\n25176\n25177\n25178\n25179\n25180\n25181\n25182\n25183\n25184\n25185\n25186\n25187\n25188\n25189\n25190\n25191\n25192\n25193\n25194\n25195\n25196\n25197\n25198\n25199\n25200\n25201\n25202\n25203\n25204\n25205\n25206\n25207\n25208\n25209\n25210\n25211\n25212\n25213\n25214\n25215\n25216\n25217\n25218\n25219\n25220\n25221\n25222\n25223\n25224\n25225\n25226\n25227\n25228\n25229\n25230\n25231\n25232\n25233\n25234\n25235\n25236\n25237\n25238\n25239\n25240\n25241\n25242\n25243\n25244\n25245\n25246\n25247\n25248\n25249\n25250\n25251\n25252\n25253\n25254\n25255\n25256\n25257\n25258\n25259\n25260\n25261\n25262\n25263\n25264\n25265\n25266\n25267\n25268\n25269\n25270\n25271\n25272\n25273\n25274\n25275\n25276\n25277\n25278\n25279\n25280\n25281\n25282\n25283\n25284\n25285\n25286\n25287\n25288\n25289\n25290\n25291\n25292\n25293\n25294\n25295\n25296\n25297\n25298\n25299\n25300\n25301\n25302\n25303\n25304\n25305\n25306\n25307\n25308\n25309\n25310\n25311\n25312\n25313\n25314\n25315\n25316\n25317\n25318\n25319\n25320\n25321\n25322\n25323\n25324\n25325\n25326\n25327\n25328\n25329\n25330\n25331\n25332\n25333\n25334\n25335\n25336\n25337\n25338\n25339\n25340\n25341\n25342\n25343\n25344\n25345\n25346\n25347\n25348\n25349\n25350\n25351\n25352\n25353\n25354\n25355\n25356\n25357\n25358\n25359\n25360\n25361\n25362\n25363\n25364\n25365\n25366\n25367\n25368\n25369\n25370\n25371\n25372\n25373\n25374\n25375\n25376\n25377\n25378\n25379\n25380\n25381\n25382\n25383\n25384\n25385\n25386\n25387\n25388\n25389\n25390\n25391\n25392\n25393\n25394\n25395\n25396\n25397\n25398\n25399\n25400\n25401\n25402\n25403\n25404\n25405\n25406\n25407\n25408\n25409\n25410\n25411\n25412\n25413\n25414\n25415\n25416\n25417\n25418\n25419\n25420\n25421\n25422\n25423\n25424\n25425\n25426\n25427\n25428\n25429\n25430\n25431\n25432\n25433\n25434\n25435\n25436\n25437\n25438\n25439\n25440\n25441\n25442\n25443\n25444\n25445\n25446\n25447\n25448\n25449\n25450\n25451\n25452\n25453\n25454\n25455\n25456\n25457\n25458\n25459\n25460\n25461\n25462\n25463\n25464\n25465\n25466\n25467\n25468\n25469\n25470\n25471\n25472\n25473\n25474\n25475\n25476\n25477\n25478\n25479\n25480\n25481\n25482\n25483\n25484\n25485\n25486\n25487\n25488\n25489\n25490\n25491\n25492\n25493\n25494\n25495\n25496\n25497\n25498\n25499\n25500\n25501\n25502\n25503\n25504\n25505\n25506\n25507\n25508\n25509\n25510\n25511\n25512\n25513\n25514\n25515\n25516\n25517\n25518\n25519\n25520\n25521\n25522\n25523\n25524\n25525\n25526\n25527\n25528\n25529\n25530\n25531\n25532\n25533\n25534\n25535\n25536\n25537\n25538\n25539\n25540\n25541\n25542\n25543\n25544\n25545\n25546\n25547\n25548\n25549\n25550\n25551\n25552\n25553\n25554\n25555\n25556\n25557\n25558\n25559\n25560\n25561\n25562\n25563\n25564\n25565\n25566\n25567\n25568\n25569\n25570\n25571\n25572\n25573\n25574\n25575\n25576\n25577\n25578\n25579\n25580\n25581\n25582\n25583\n25584\n25585\n25586\n25587\n25588\n25589\n25590\n25591\n25592\n25593\n25594\n25595\n25596\n25597\n25598\n25599\n25600\n25601\n25602\n25603\n25604\n25605\n25606\n25607\n25608\n25609\n25610\n25611\n25612\n25613\n25614\n25615\n25616\n25617\n25618\n25619\n25620\n25621\n25622\n25623\n25624\n25625\n25626\n25627\n25628\n25629\n25630\n25631\n25632\n25633\n25634\n25635\n25636\n25637\n25638\n25639\n25640\n25641\n25642\n25643\n25644\n25645\n25646\n25647\n25648\n25649\n25650\n25651\n25652\n25653\n25654\n25655\n25656\n25657\n25658\n25659\n25660\n25661\n25662\n25663\n25664\n25665\n25666\n25667\n25668\n25669\n25670\n25671\n25672\n25673\n25674\n25675\n25676\n25677\n25678\n25679\n25680\n25681\n25682\n25683\n25684\n25685\n25686\n25687\n25688\n25689\n25690\n25691\n25692\n25693\n25694\n25695\n25696\n25697\n25698\n25699\n25700\n25701\n25702\n25703\n25704\n25705\n25706\n25707\n25708\n25709\n25710\n25711\n25712\n25713\n25714\n25715\n25716\n25717\n25718\n25719\n25720\n25721\n25722\n25723\n25724\n25725\n25726\n25727\n25728\n25729\n25730\n25731\n25732\n25733\n25734\n25735\n25736\n25737\n25738\n25739\n25740\n25741\n25742\n25743\n25744\n25745\n25746\n25747\n25748\n25749\n25750\n25751\n25752\n25753\n25754\n25755\n25756\n25757\n25758\n25759\n25760\n25761\n25762\n25763\n25764\n25765\n25766\n25767\n25768\n25769\n25770\n25771\n25772\n25773\n25774\n25775\n25776\n25777\n25778\n25779\n25780\n25781\n25782\n25783\n25784\n25785\n25786\n25787\n25788\n25789\n25790\n25791\n25792\n25793\n25794\n25795\n25796\n25797\n25798\n25799\n25800\n25801\n25802\n25803\n25804\n25805\n25806\n25807\n25808\n25809\n25810\n25811\n25812\n25813\n25814\n25815\n25816\n25817\n25818\n25819\n25820\n25821\n25822\n25823\n25824\n25825\n25826\n25827\n25828\n25829\n25830\n25831\n25832\n25833\n25834\n25835\n25836\n25837\n25838\n25839\n25840\n25841\n25842\n25843\n25844\n25845\n25846\n25847\n25848\n25849\n25850\n25851\n25852\n25853\n25854\n25855\n25856\n25857\n25858\n25859\n25860\n25861\n25862\n25863\n25864\n25865\n25866\n25867\n25868\n25869\n25870\n25871\n25872\n25873\n25874\n25875\n25876\n25877\n25878\n25879\n25880\n25881\n25882\n25883\n25884\n25885\n25886\n25887\n25888\n25889\n25890\n25891\n25892\n25893\n25894\n25895\n25896\n25897\n25898\n25899\n25900\n25901\n25902\n25903\n25904\n25905\n25906\n25907\n25908\n25909\n25910\n25911\n25912\n25913\n25914\n25915\n25916\n25917\n25918\n25919\n25920\n25921\n25922\n25923\n25924\n25925\n25926\n25927\n25928\n25929\n25930\n25931\n25932\n25933\n25934\n25935\n25936\n25937\n25938\n25939\n25940\n25941\n25942\n25943\n25944\n25945\n25946\n25947\n25948\n25949\n25950\n25951\n25952\n25953\n25954\n25955\n25956\n25957\n25958\n25959\n25960\n25961\n25962\n25963\n25964\n25965\n25966\n25967\n25968\n25969\n25970\n25971\n25972\n25973\n25974\n25975\n25976\n25977\n25978\n25979\n25980\n25981\n25982\n25983\n25984\n25985\n25986\n25987\n25988\n25989\n25990\n25991\n25992\n25993\n25994\n25995\n25996\n25997\n25998\n25999\n26000\n26001\n26002\n26003\n26004\n26005\n26006\n26007\n26008\n26009\n26010\n26011\n26012\n26013\n26014\n26015\n26016\n26017\n26018\n26019\n26020\n26021\n26022\n26023\n26024\n26025\n26026\n26027\n26028\n26029\n26030\n26031\n26032\n26033\n26034\n26035\n26036\n26037\n26038\n26039\n26040\n26041\n26042\n26043\n26044\n26045\n26046\n26047\n26048\n26049\n26050\n26051\n26052\n26053\n26054\n26055\n26056\n26057\n26058\n26059\n26060\n26061\n26062\n26063\n26064\n26065\n26066\n26067\n26068\n26069\n26070\n26071\n26072\n26073\n26074\n26075\n26076\n26077\n26078\n26079\n26080\n26081\n26082\n26083\n26084\n26085\n26086\n26087\n26088\n26089\n26090\n26091\n26092\n26093\n26094\n26095\n26096\n26097\n26098\n26099\n26100\n26101\n26102\n26103\n26104\n26105\n26106\n26107\n26108\n26109\n26110\n26111\n26112\n26113\n26114\n26115\n26116\n26117\n26118\n26119\n26120\n26121\n26122\n26123\n26124\n26125\n26126\n26127\n26128\n26129\n26130\n26131\n26132\n26133\n26134\n26135\n26136\n26137\n26138\n26139\n26140\n26141\n26142\n26143\n26144\n26145\n26146\n26147\n26148\n26149\n26150\n26151\n26152\n26153\n26154\n26155\n26156\n26157\n26158\n26159\n26160\n26161\n26162\n26163\n26164\n26165\n26166\n26167\n26168\n26169\n26170\n26171\n26172\n26173\n26174\n26175\n26176\n26177\n26178\n26179\n26180\n26181\n26182\n26183\n26184\n26185\n26186\n26187\n26188\n26189\n26190\n26191\n26192\n26193\n26194\n26195\n26196\n26197\n26198\n26199\n26200\n26201\n26202\n26203\n26204\n26205\n26206\n26207\n26208\n26209\n26210\n26211\n26212\n26213\n26214\n26215\n26216\n26217\n26218\n26219\n26220\n26221\n26222\n26223\n26224\n26225\n26226\n26227\n26228\n26229\n26230\n26231\n26232\n26233\n26234\n26235\n26236\n26237\n26238\n26239\n26240\n26241\n26242\n26243\n26244\n26245\n26246\n26247\n26248\n26249\n26250\n26251\n26252\n26253\n26254\n26255\n26256\n26257\n26258\n26259\n26260\n26261\n26262\n26263\n26264\n26265\n26266\n26267\n26268\n26269\n26270\n26271\n26272\n26273\n26274\n26275\n26276\n26277\n26278\n26279\n26280\n26281\n26282\n26283\n26284\n26285\n26286\n26287\n26288\n26289\n26290\n26291\n26292\n26293\n26294\n26295\n26296\n26297\n26298\n26299\n26300\n26301\n26302\n26303\n26304\n26305\n26306\n26307\n26308\n26309\n26310\n26311\n26312\n26313\n26314\n26315\n26316\n26317\n26318\n26319\n26320\n26321\n26322\n26323\n26324\n26325\n26326\n26327\n26328\n26329\n26330\n26331\n26332\n26333\n26334\n26335\n26336\n26337\n26338\n26339\n26340\n26341\n26342\n26343\n26344\n26345\n26346\n26347\n26348\n26349\n26350\n26351\n26352\n26353\n26354\n26355\n26356\n26357\n26358\n26359\n26360\n26361\n26362\n26363\n26364\n26365\n26366\n26367\n26368\n26369\n26370\n26371\n26372\n26373\n26374\n26375\n26376\n26377\n26378\n26379\n26380\n26381\n26382\n26383\n26384\n26385\n26386\n26387\n26388\n26389\n26390\n26391\n26392\n26393\n26394\n26395\n26396\n26397\n26398\n26399\n26400\n26401\n26402\n26403\n26404\n26405\n26406\n26407\n26408\n26409\n26410\n26411\n26412\n26413\n26414\n26415\n26416\n26417\n26418\n26419\n26420\n26421\n26422\n26423\n26424\n26425\n26426\n26427\n26428\n26429\n26430\n26431\n26432\n26433\n26434\n26435\n26436\n26437\n26438\n26439\n26440\n26441\n26442\n26443\n26444\n26445\n26446\n26447\n26448\n26449\n26450\n26451\n26452\n26453\n26454\n26455\n26456\n26457\n26458\n26459\n26460\n26461\n26462\n26463\n26464\n26465\n26466\n26467\n26468\n26469\n26470\n26471\n26472\n26473\n26474\n26475\n26476\n26477\n26478\n26479\n26480\n26481\n26482\n26483\n26484\n26485\n26486\n26487\n26488\n26489\n26490\n26491\n26492\n26493\n26494\n26495\n26496\n26497\n26498\n26499\n26500\n26501\n26502\n26503\n26504\n26505\n26506\n26507\n26508\n26509\n26510\n26511\n26512\n26513\n26514\n26515\n26516\n26517\n26518\n26519\n26520\n26521\n26522\n26523\n26524\n26525\n26526\n26527\n26528\n26529\n26530\n26531\n26532\n26533\n26534\n26535\n26536\n26537\n26538\n26539\n26540\n26541\n26542\n26543\n26544\n26545\n26546\n26547\n26548\n26549\n26550\n26551\n26552\n26553\n26554\n26555\n26556\n26557\n26558\n26559\n26560\n26561\n26562\n26563\n26564\n26565\n26566\n26567\n26568\n26569\n26570\n26571\n26572\n26573\n26574\n26575\n26576\n26577\n26578\n26579\n26580\n26581\n26582\n26583\n26584\n26585\n26586\n26587\n26588\n26589\n26590\n26591\n26592\n26593\n26594\n26595\n26596\n26597\n26598\n26599\n26600\n26601\n26602\n26603\n26604\n26605\n26606\n26607\n26608\n26609\n26610\n26611\n26612\n26613\n26614\n26615\n26616\n26617\n26618\n26619\n26620\n26621\n26622\n26623\n26624\n26625\n26626\n26627\n26628\n26629\n26630\n26631\n26632\n26633\n26634\n26635\n26636\n26637\n26638\n26639\n26640\n26641\n26642\n26643\n26644\n26645\n26646\n26647\n26648\n26649\n26650\n26651\n26652\n26653\n26654\n26655\n26656\n26657\n26658\n26659\n26660\n26661\n26662\n26663\n26664\n26665\n26666\n26667\n26668\n26669\n26670\n26671\n26672\n26673\n26674\n26675\n26676\n26677\n26678\n26679\n26680\n26681\n26682\n26683\n26684\n26685\n26686\n26687\n26688\n26689\n26690\n26691\n26692\n26693\n26694\n26695\n26696\n26697\n26698\n26699\n26700\n26701\n26702\n26703\n26704\n26705\n26706\n26707\n26708\n26709\n26710\n26711\n26712\n26713\n26714\n26715\n26716\n26717\n26718\n26719\n26720\n26721\n26722\n26723\n26724\n26725\n26726\n26727\n26728\n26729\n26730\n26731\n26732\n26733\n26734\n26735\n26736\n26737\n26738\n26739\n26740\n26741\n26742\n26743\n26744\n26745\n26746\n26747\n26748\n26749\n26750\n26751\n26752\n26753\n26754\n26755\n26756\n26757\n26758\n26759\n26760\n26761\n26762\n26763\n26764\n26765\n26766\n26767\n26768\n26769\n26770\n26771\n26772\n26773\n26774\n26775\n26776\n26777\n26778\n26779\n26780\n26781\n26782\n26783\n26784\n26785\n26786\n26787\n26788\n26789\n26790\n26791\n26792\n26793\n26794\n26795\n26796\n26797\n26798\n26799\n26800\n26801\n26802\n26803\n26804\n26805\n26806\n26807\n26808\n26809\n26810\n26811\n26812\n26813\n26814\n26815\n26816\n26817\n26818\n26819\n26820\n26821\n26822\n26823\n26824\n26825\n26826\n26827\n26828\n26829\n26830\n26831\n26832\n26833\n26834\n26835\n26836\n26837\n26838\n26839\n26840\n26841\n26842\n26843\n26844\n26845\n26846\n26847\n26848\n26849\n26850\n26851\n26852\n26853\n26854\n26855\n26856\n26857\n26858\n26859\n26860\n26861\n26862\n26863\n26864\n26865\n26866\n26867\n26868\n26869\n26870\n26871\n26872\n26873\n26874\n26875\n26876\n26877\n26878\n26879\n26880\n26881\n26882\n26883\n26884\n26885\n26886\n26887\n26888\n26889\n26890\n26891\n26892\n26893\n26894\n26895\n26896\n26897\n26898\n26899\n26900\n26901\n26902\n26903\n26904\n26905\n26906\n26907\n26908\n26909\n26910\n26911\n26912\n26913\n26914\n26915\n26916\n26917\n26918\n26919\n26920\n26921\n26922\n26923\n26924\n26925\n26926\n26927\n26928\n26929\n26930\n26931\n26932\n26933\n26934\n26935\n26936\n26937\n26938\n26939\n26940\n26941\n26942\n26943\n26944\n26945\n26946\n26947\n26948\n26949\n26950\n26951\n26952\n26953\n26954\n26955\n26956\n26957\n26958\n26959\n26960\n26961\n26962\n26963\n26964\n26965\n26966\n26967\n26968\n26969\n26970\n26971\n26972\n26973\n26974\n26975\n26976\n26977\n26978\n26979\n26980\n26981\n26982\n26983\n26984\n26985\n26986\n26987\n26988\n26989\n26990\n26991\n26992\n26993\n26994\n26995\n26996\n26997\n26998\n26999\n27000\n27001\n27002\n27003\n27004\n27005\n27006\n27007\n27008\n27009\n27010\n27011\n27012\n27013\n27014\n27015\n27016\n27017\n27018\n27019\n27020\n27021\n27022\n27023\n27024\n27025\n27026\n27027\n27028\n27029\n27030\n27031\n27032\n27033\n27034\n27035\n27036\n27037\n27038\n27039\n27040\n27041\n27042\n27043\n27044\n27045\n27046\n27047\n27048\n27049\n27050\n27051\n27052\n27053\n27054\n27055\n27056\n27057\n27058\n27059\n27060\n27061\n27062\n27063\n27064\n27065\n27066\n27067\n27068\n27069\n27070\n27071\n27072\n27073\n27074\n27075\n27076\n27077\n27078\n27079\n27080\n27081\n27082\n27083\n27084\n27085\n27086\n27087\n27088\n27089\n27090\n27091\n27092\n27093\n27094\n27095\n27096\n27097\n27098\n27099\n27100\n27101\n27102\n27103\n27104\n27105\n27106\n27107\n27108\n27109\n27110\n27111\n27112\n27113\n27114\n27115\n27116\n27117\n27118\n27119\n27120\n27121\n27122\n27123\n27124\n27125\n27126\n27127\n27128\n27129\n27130\n27131\n27132\n27133\n27134\n27135\n27136\n27137\n27138\n27139\n27140\n27141\n27142\n27143\n27144\n27145\n27146\n27147\n27148\n27149\n27150\n27151\n27152\n27153\n27154\n27155\n27156\n27157\n27158\n27159\n27160\n27161\n27162\n27163\n27164\n27165\n27166\n27167\n27168\n27169\n27170\n27171\n27172\n27173\n27174\n27175\n27176\n27177\n27178\n27179\n27180\n27181\n27182\n27183\n27184\n27185\n27186\n27187\n27188\n27189\n27190\n27191\n27192\n27193\n27194\n27195\n27196\n27197\n27198\n27199\n27200\n27201\n27202\n27203\n27204\n27205\n27206\n27207\n27208\n27209\n27210\n27211\n27212\n27213\n27214\n27215\n27216\n27217\n27218\n27219\n27220\n27221\n27222\n27223\n27224\n27225\n27226\n27227\n27228\n27229\n27230\n27231\n27232\n27233\n27234\n27235\n27236\n27237\n27238\n27239\n27240\n27241\n27242\n27243\n27244\n27245\n27246\n27247\n27248\n27249\n27250\n27251\n27252\n27253\n27254\n27255\n27256\n27257\n27258\n27259\n27260\n27261\n27262\n27263\n27264\n27265\n27266\n27267\n27268\n27269\n27270\n27271\n27272\n27273\n27274\n27275\n27276\n27277\n27278\n27279\n27280\n27281\n27282\n27283\n27284\n27285\n27286\n27287\n27288\n27289\n27290\n27291\n27292\n27293\n27294\n27295\n27296\n27297\n27298\n27299\n27300\n27301\n27302\n27303\n27304\n27305\n27306\n27307\n27308\n27309\n27310\n27311\n27312\n27313\n27314\n27315\n27316\n27317\n27318\n27319\n27320\n27321\n27322\n27323\n27324\n27325\n27326\n27327\n27328\n27329\n27330\n27331\n27332\n27333\n27334\n27335\n27336\n27337\n27338\n27339\n27340\n27341\n27342\n27343\n27344\n27345\n27346\n27347\n27348\n27349\n27350\n27351\n27352\n27353\n27354\n27355\n27356\n27357\n27358\n27359\n27360\n27361\n27362\n27363\n27364\n27365\n27366\n27367\n27368\n27369\n27370\n27371\n27372\n27373\n27374\n27375\n27376\n27377\n27378\n27379\n27380\n27381\n27382\n27383\n27384\n27385\n27386\n27387\n27388\n27389\n27390\n27391\n27392\n27393\n27394\n27395\n27396\n27397\n27398\n27399\n27400\n27401\n27402\n27403\n27404\n27405\n27406\n27407\n27408\n27409\n27410\n27411\n27412\n27413\n27414\n27415\n27416\n27417\n27418\n27419\n27420\n27421\n27422\n27423\n27424\n27425\n27426\n27427\n27428\n27429\n27430\n27431\n27432\n27433\n27434\n27435\n27436\n27437\n27438\n27439\n27440\n27441\n27442\n27443\n27444\n27445\n27446\n27447\n27448\n27449\n27450\n27451\n27452\n27453\n27454\n27455\n27456\n27457\n27458\n27459\n27460\n27461\n27462\n27463\n27464\n27465\n27466\n27467\n27468\n27469\n27470\n27471\n27472\n27473\n27474\n27475\n27476\n27477\n27478\n27479\n27480\n27481\n27482\n27483\n27484\n27485\n27486\n27487\n27488\n27489\n27490\n27491\n27492\n27493\n27494\n27495\n27496\n27497\n27498\n27499\n27500\n27501\n27502\n27503\n27504\n27505\n27506\n27507\n27508\n27509\n27510\n27511\n27512\n27513\n27514\n27515\n27516\n27517\n27518\n27519\n27520\n27521\n27522\n27523\n27524\n27525\n27526\n27527\n27528\n27529\n27530\n27531\n27532\n27533\n27534\n27535\n27536\n27537\n27538\n27539\n27540\n27541\n27542\n27543\n27544\n27545\n27546\n27547\n27548\n27549\n27550\n27551\n27552\n27553\n27554\n27555\n27556\n27557\n27558\n27559\n27560\n27561\n27562\n27563\n27564\n27565\n27566\n27567\n27568\n27569\n27570\n27571\n27572\n27573\n27574\n27575\n27576\n27577\n27578\n27579\n27580\n27581\n27582\n27583\n27584\n27585\n27586\n27587\n27588\n27589\n27590\n27591\n27592\n27593\n27594\n27595\n27596\n27597\n27598\n27599\n27600\n27601\n27602\n27603\n27604\n27605\n27606\n27607\n27608\n27609\n27610\n27611\n27612\n27613\n27614\n27615\n27616\n27617\n27618\n27619\n27620\n27621\n27622\n27623\n27624\n27625\n27626\n27627\n27628\n27629\n27630\n27631\n27632\n27633\n27634\n27635\n27636\n27637\n27638\n27639\n27640\n27641\n27642\n27643\n27644\n27645\n27646\n27647\n27648\n27649\n27650\n27651\n27652\n27653\n27654\n27655\n27656\n27657\n27658\n27659\n27660\n27661\n27662\n27663\n27664\n27665\n27666\n27667\n27668\n27669\n27670\n27671\n27672\n27673\n27674\n27675\n27676\n27677\n27678\n27679\n27680\n27681\n27682\n27683\n27684\n27685\n27686\n27687\n27688\n27689\n27690\n27691\n27692\n27693\n27694\n27695\n27696\n27697\n27698\n27699\n27700\n27701\n27702\n27703\n27704\n27705\n27706\n27707\n27708\n27709\n27710\n27711\n27712\n27713\n27714\n27715\n27716\n27717\n27718\n27719\n27720\n27721\n27722\n27723\n27724\n27725\n27726\n27727\n27728\n27729\n27730\n27731\n27732\n27733\n27734\n27735\n27736\n27737\n27738\n27739\n27740\n27741\n27742\n27743\n27744\n27745\n27746\n27747\n27748\n27749\n27750\n27751\n27752\n27753\n27754\n27755\n27756\n27757\n27758\n27759\n27760\n27761\n27762\n27763\n27764\n27765\n27766\n27767\n27768\n27769\n27770\n27771\n27772\n27773\n27774\n27775\n27776\n27777\n27778\n27779\n27780\n27781\n27782\n27783\n27784\n27785\n27786\n27787\n27788\n27789\n27790\n27791\n27792\n27793\n27794\n27795\n27796\n27797\n27798\n27799\n27800\n27801\n27802\n27803\n27804\n27805\n27806\n27807\n27808\n27809\n27810\n27811\n27812\n27813\n27814\n27815\n27816\n27817\n27818\n27819\n27820\n27821\n27822\n27823\n27824\n27825\n27826\n27827\n27828\n27829\n27830\n27831\n27832\n27833\n27834\n27835\n27836\n27837\n27838\n27839\n27840\n27841\n27842\n27843\n27844\n27845\n27846\n27847\n27848\n27849\n27850\n27851\n27852\n27853\n27854\n27855\n27856\n27857\n27858\n27859\n27860\n27861\n27862\n27863\n27864\n27865\n27866\n27867\n27868\n27869\n27870\n27871\n27872\n27873\n27874\n27875\n27876\n27877\n27878\n27879\n27880\n27881\n27882\n27883\n27884\n27885\n27886\n27887\n27888\n27889\n27890\n27891\n27892\n27893\n27894\n27895\n27896\n27897\n27898\n27899\n27900\n27901\n27902\n27903\n27904\n27905\n27906\n27907\n27908\n27909\n27910\n27911\n27912\n27913\n27914\n27915\n27916\n27917\n27918\n27919\n27920\n27921\n27922\n27923\n27924\n27925\n27926\n27927\n27928\n27929\n27930\n27931\n27932\n27933\n27934\n27935\n27936\n27937\n27938\n27939\n27940\n27941\n27942\n27943\n27944\n27945\n27946\n27947\n27948\n27949\n27950\n27951\n27952\n27953\n27954\n27955\n27956\n27957\n27958\n27959\n27960\n27961\n27962\n27963\n27964\n27965\n27966\n27967\n27968\n27969\n27970\n27971\n27972\n27973\n27974\n27975\n27976\n27977\n27978\n27979\n27980\n27981\n27982\n27983\n27984\n27985\n27986\n27987\n27988\n27989\n27990\n27991\n27992\n27993\n27994\n27995\n27996\n27997\n27998\n27999\n28000\n28001\n28002\n28003\n28004\n28005\n28006\n28007\n28008\n28009\n28010\n28011\n28012\n28013\n28014\n28015\n28016\n28017\n28018\n28019\n28020\n28021\n28022\n28023\n28024\n28025\n28026\n28027\n28028\n28029\n28030\n28031\n28032\n28033\n28034\n28035\n28036\n28037\n28038\n28039\n28040\n28041\n28042\n28043\n28044\n28045\n28046\n28047\n28048\n28049\n28050\n28051\n28052\n28053\n28054\n28055\n28056\n28057\n28058\n28059\n28060\n28061\n28062\n28063\n28064\n28065\n28066\n28067\n28068\n28069\n28070\n28071\n28072\n28073\n28074\n28075\n28076\n28077\n28078\n28079\n28080\n28081\n28082\n28083\n28084\n28085\n28086\n28087\n28088\n28089\n28090\n28091\n28092\n28093\n28094\n28095\n28096\n28097\n28098\n28099\n28100\n28101\n28102\n28103\n28104\n28105\n28106\n28107\n28108\n28109\n28110\n28111\n28112\n28113\n28114\n28115\n28116\n28117\n28118\n28119\n28120\n28121\n28122\n28123\n28124\n28125\n28126\n28127\n28128\n28129\n28130\n28131\n28132\n28133\n28134\n28135\n28136\n28137\n28138\n28139\n28140\n28141\n28142\n28143\n28144\n28145\n28146\n28147\n28148\n28149\n28150\n28151\n28152\n28153\n28154\n28155\n28156\n28157\n28158\n28159\n28160\n28161\n28162\n28163\n28164\n28165\n28166\n28167\n28168\n28169\n28170\n28171\n28172\n28173\n28174\n28175\n28176\n28177\n28178\n28179\n28180\n28181\n28182\n28183\n28184\n28185\n28186\n28187\n28188\n28189\n28190\n28191\n28192\n28193\n28194\n28195\n28196\n28197\n28198\n28199\n28200\n28201\n28202\n28203\n28204\n28205\n28206\n28207\n28208\n28209\n28210\n28211\n28212\n28213\n28214\n28215\n28216\n28217\n28218\n28219\n28220\n28221\n28222\n28223\n28224\n28225\n28226\n28227\n28228\n28229\n28230\n28231\n28232\n28233\n28234\n28235\n28236\n28237\n28238\n28239\n28240\n28241\n28242\n28243\n28244\n28245\n28246\n28247\n28248\n28249\n28250\n28251\n28252\n28253\n28254\n28255\n28256\n28257\n28258\n28259\n28260\n28261\n28262\n28263\n28264\n28265\n28266\n28267\n28268\n28269\n28270\n28271\n28272\n28273\n28274\n28275\n28276\n28277\n28278\n28279\n28280\n28281\n28282\n28283\n28284\n28285\n28286\n28287\n28288\n28289\n28290\n28291\n28292\n28293\n28294\n28295\n28296\n28297\n28298\n28299\n28300\n28301\n28302\n28303\n28304\n28305\n28306\n28307\n28308\n28309\n28310\n28311\n28312\n28313\n28314\n28315\n28316\n28317\n28318\n28319\n28320\n28321\n28322\n28323\n28324\n28325\n28326\n28327\n28328\n28329\n28330\n28331\n28332\n28333\n28334\n28335\n28336\n28337\n28338\n28339\n28340\n28341\n28342\n28343\n28344\n28345\n28346\n28347\n28348\n28349\n28350\n28351\n28352\n28353\n28354\n28355\n28356\n28357\n28358\n28359\n28360\n28361\n28362\n28363\n28364\n28365\n28366\n28367\n28368\n28369\n28370\n28371\n28372\n28373\n28374\n28375\n28376\n28377\n28378\n28379\n28380\n28381\n28382\n28383\n28384\n28385\n28386\n28387\n28388\n28389\n28390\n28391\n28392\n28393\n28394\n28395\n28396\n28397\n28398\n28399\n28400\n28401\n28402\n28403\n28404\n28405\n28406\n28407\n28408\n28409\n28410\n28411\n28412\n28413\n28414\n28415\n28416\n28417\n28418\n28419\n28420\n28421\n28422\n28423\n28424\n28425\n28426\n28427\n28428\n28429\n28430\n28431\n28432\n28433\n28434\n28435\n28436\n28437\n28438\n28439\n28440\n28441\n28442\n28443\n28444\n28445\n28446\n28447\n28448\n28449\n28450\n28451\n28452\n28453\n28454\n28455\n28456\n28457\n28458\n28459\n28460\n28461\n28462\n28463\n28464\n28465\n28466\n28467\n28468\n28469\n28470\n28471\n28472\n28473\n28474\n28475\n28476\n28477\n28478\n28479\n28480\n28481\n28482\n28483\n28484\n28485\n28486\n28487\n28488\n28489\n28490\n28491\n28492\n28493\n28494\n28495\n28496\n28497\n28498\n28499\n28500\n28501\n28502\n28503\n28504\n28505\n28506\n28507\n28508\n28509\n28510\n28511\n28512\n28513\n28514\n28515\n28516\n28517\n28518\n28519\n28520\n28521\n28522\n28523\n28524\n28525\n28526\n28527\n28528\n28529\n28530\n28531\n28532\n28533\n28534\n28535\n28536\n28537\n28538\n28539\n28540\n28541\n28542\n28543\n28544\n28545\n28546\n28547\n28548\n28549\n28550\n28551\n28552\n28553\n28554\n28555\n28556\n28557\n28558\n28559\n28560\n28561\n28562\n28563\n28564\n28565\n28566\n28567\n28568\n28569\n28570\n28571\n28572\n28573\n28574\n28575\n28576\n28577\n28578\n28579\n28580\n28581\n28582\n28583\n28584\n28585\n28586\n28587\n28588\n28589\n28590\n28591\n28592\n28593\n28594\n28595\n28596\n28597\n28598\n28599\n28600\n28601\n28602\n28603\n28604\n28605\n28606\n28607\n28608\n28609\n28610\n28611\n28612\n28613\n28614\n28615\n28616\n28617\n28618\n28619\n28620\n28621\n28622\n28623\n28624\n28625\n28626\n28627\n28628\n28629\n28630\n28631\n28632\n28633\n28634\n28635\n28636\n28637\n28638\n28639\n28640\n28641\n28642\n28643\n28644\n28645\n28646\n28647\n28648\n28649\n28650\n28651\n28652\n28653\n28654\n28655\n28656\n28657\n28658\n28659\n28660\n28661\n28662\n28663\n28664\n28665\n28666\n28667\n28668\n28669\n28670\n28671\n28672\n28673\n28674\n28675\n28676\n28677\n28678\n28679\n28680\n28681\n28682\n28683\n28684\n28685\n28686\n28687\n28688\n28689\n28690\n28691\n28692\n28693\n28694\n28695\n28696\n28697\n28698\n28699\n28700\n28701\n28702\n28703\n28704\n28705\n28706\n28707\n28708\n28709\n28710\n28711\n28712\n28713\n28714\n28715\n28716\n28717\n28718\n28719\n28720\n28721\n28722\n28723\n28724\n28725\n28726\n28727\n28728\n28729\n28730\n28731\n28732\n28733\n28734\n28735\n28736\n28737\n28738\n28739\n28740\n28741\n28742\n28743\n28744\n28745\n28746\n28747\n28748\n28749\n28750\n28751\n28752\n28753\n28754\n28755\n28756\n28757\n28758\n28759\n28760\n28761\n28762\n28763\n28764\n28765\n28766\n28767\n28768\n28769\n28770\n28771\n28772\n28773\n28774\n28775\n28776\n28777\n28778\n28779\n28780\n28781\n28782\n28783\n28784\n28785\n28786\n28787\n28788\n28789\n28790\n28791\n28792\n28793\n28794\n28795\n28796\n28797\n28798\n28799\n28800\n28801\n28802\n28803\n28804\n28805\n28806\n28807\n28808\n28809\n28810\n28811\n28812\n28813\n28814\n28815\n28816\n28817\n28818\n28819\n28820\n28821\n28822\n28823\n28824\n28825\n28826\n28827\n28828\n28829\n28830\n28831\n28832\n28833\n28834\n28835\n28836\n28837\n28838\n28839\n28840\n28841\n28842\n28843\n28844\n28845\n28846\n28847\n28848\n28849\n28850\n28851\n28852\n28853\n28854\n28855\n28856\n28857\n28858\n28859\n28860\n28861\n28862\n28863\n28864\n28865\n28866\n28867\n28868\n28869\n28870\n28871\n28872\n28873\n28874\n28875\n28876\n28877\n28878\n28879\n28880\n28881\n28882\n28883\n28884\n28885\n28886\n28887\n28888\n28889\n28890\n28891\n28892\n28893\n28894\n28895\n28896\n28897\n28898\n28899\n28900\n28901\n28902\n28903\n28904\n28905\n28906\n28907\n28908\n28909\n28910\n28911\n28912\n28913\n28914\n28915\n28916\n28917\n28918\n28919\n28920\n28921\n28922\n28923\n28924\n28925\n28926\n28927\n28928\n28929\n28930\n28931\n28932\n28933\n28934\n28935\n28936\n28937\n28938\n28939\n28940\n28941\n28942\n28943\n28944\n28945\n28946\n28947\n28948\n28949\n28950\n28951\n28952\n28953\n28954\n28955\n28956\n28957\n28958\n28959\n28960\n28961\n28962\n28963\n28964\n28965\n28966\n28967\n28968\n28969\n28970\n28971\n28972\n28973\n28974\n28975\n28976\n28977\n28978\n28979\n28980\n28981\n28982\n28983\n28984\n28985\n28986\n28987\n28988\n28989\n28990\n28991\n28992\n28993\n28994\n28995\n28996\n28997\n28998\n28999\n29000\n29001\n29002\n29003\n29004\n29005\n29006\n29007\n29008\n29009\n29010\n29011\n29012\n29013\n29014\n29015\n29016\n29017\n29018\n29019\n29020\n29021\n29022\n29023\n29024\n29025\n29026\n29027\n29028\n29029\n29030\n29031\n29032\n29033\n29034\n29035\n29036\n29037\n29038\n29039\n29040\n29041\n29042\n29043\n29044\n29045\n29046\n29047\n29048\n29049\n29050\n29051\n29052\n29053\n29054\n29055\n29056\n29057\n29058\n29059\n29060\n29061\n29062\n29063\n29064\n29065\n29066\n29067\n29068\n29069\n29070\n29071\n29072\n29073\n29074\n29075\n29076\n29077\n29078\n29079\n29080\n29081\n29082\n29083\n29084\n29085\n29086\n29087\n29088\n29089\n29090\n29091\n29092\n29093\n29094\n29095\n29096\n29097\n29098\n29099\n29100\n29101\n29102\n29103\n29104\n29105\n29106\n29107\n29108\n29109\n29110\n29111\n29112\n29113\n29114\n29115\n29116\n29117\n29118\n29119\n29120\n29121\n29122\n29123\n29124\n29125\n29126\n29127\n29128\n29129\n29130\n29131\n29132\n29133\n29134\n29135\n29136\n29137\n29138\n29139\n29140\n29141\n29142\n29143\n29144\n29145\n29146\n29147\n29148\n29149\n29150\n29151\n29152\n29153\n29154\n29155\n29156\n29157\n29158\n29159\n29160\n29161\n29162\n29163\n29164\n29165\n29166\n29167\n29168\n29169\n29170\n29171\n29172\n29173\n29174\n29175\n29176\n29177\n29178\n29179\n29180\n29181\n29182\n29183\n29184\n29185\n29186\n29187\n29188\n29189\n29190\n29191\n29192\n29193\n29194\n29195\n29196\n29197\n29198\n29199\n29200\n29201\n29202\n29203\n29204\n29205\n29206\n29207\n29208\n29209\n29210\n29211\n29212\n29213\n29214\n29215\n29216\n29217\n29218\n29219\n29220\n29221\n29222\n29223\n29224\n29225\n29226\n29227\n29228\n29229\n29230\n29231\n29232\n29233\n29234\n29235\n29236\n29237\n29238\n29239\n29240\n29241\n29242\n29243\n29244\n29245\n29246\n29247\n29248\n29249\n29250\n29251\n29252\n29253\n29254\n29255\n29256\n29257\n29258\n29259\n29260\n29261\n29262\n29263\n29264\n29265\n29266\n29267\n29268\n29269\n29270\n29271\n29272\n29273\n29274\n29275\n29276\n29277\n29278\n29279\n29280\n29281\n29282\n29283\n29284\n29285\n29286\n29287\n29288\n29289\n29290\n29291\n29292\n29293\n29294\n29295\n29296\n29297\n29298\n29299\n29300\n29301\n29302\n29303\n29304\n29305\n29306\n29307\n29308\n29309\n29310\n29311\n29312\n29313\n29314\n29315\n29316\n29317\n29318\n29319\n29320\n29321\n29322\n29323\n29324\n29325\n29326\n29327\n29328\n29329\n29330\n29331\n29332\n29333\n29334\n29335\n29336\n29337\n29338\n29339\n29340\n29341\n29342\n29343\n29344\n29345\n29346\n29347\n29348\n29349\n29350\n29351\n29352\n29353\n29354\n29355\n29356\n29357\n29358\n29359\n29360\n29361\n29362\n29363\n29364\n29365\n29366\n29367\n29368\n29369\n29370\n29371\n29372\n29373\n29374\n29375\n29376\n29377\n29378\n29379\n29380\n29381\n29382\n29383\n29384\n29385\n29386\n29387\n29388\n29389\n29390\n29391\n29392\n29393\n29394\n29395\n29396\n29397\n29398\n29399\n29400\n29401\n29402\n29403\n29404\n29405\n29406\n29407\n29408\n29409\n29410\n29411\n29412\n29413\n29414\n29415\n29416\n29417\n29418\n29419\n29420\n29421\n29422\n29423\n29424\n29425\n29426\n29427\n29428\n29429\n29430\n29431\n29432\n29433\n29434\n29435\n29436\n29437\n29438\n29439\n29440\n29441\n29442\n29443\n29444\n29445\n29446\n29447\n29448\n29449\n29450\n29451\n29452\n29453\n29454\n29455\n29456\n29457\n29458\n29459\n29460\n29461\n29462\n29463\n29464\n29465\n29466\n29467\n29468\n29469\n29470\n29471\n29472\n29473\n29474\n29475\n29476\n29477\n29478\n29479\n29480\n29481\n29482\n29483\n29484\n29485\n29486\n29487\n29488\n29489\n29490\n29491\n29492\n29493\n29494\n29495\n29496\n29497\n29498\n29499\n29500\n29501\n29502\n29503\n29504\n29505\n29506\n29507\n29508\n29509\n29510\n29511\n29512\n29513\n29514\n29515\n29516\n29517\n29518\n29519\n29520\n29521\n29522\n29523\n29524\n29525\n29526\n29527\n29528\n29529\n29530\n29531\n29532\n29533\n29534\n29535\n29536\n29537\n29538\n29539\n29540\n29541\n29542\n29543\n29544\n29545\n29546\n29547\n29548\n29549\n29550\n29551\n29552\n29553\n29554\n29555\n29556\n29557\n29558\n29559\n29560\n29561\n29562\n29563\n29564\n29565\n29566\n29567\n29568\n29569\n29570\n29571\n29572\n29573\n29574\n29575\n29576\n29577\n29578\n29579\n29580\n29581\n29582\n29583\n29584\n29585\n29586\n29587\n29588\n29589\n29590\n29591\n29592\n29593\n29594\n29595\n29596\n29597\n29598\n29599\n29600\n29601\n29602\n29603\n29604\n29605\n29606\n29607\n29608\n29609\n29610\n29611\n29612\n29613\n29614\n29615\n29616\n29617\n29618\n29619\n29620\n29621\n29622\n29623\n29624\n29625\n29626\n29627\n29628\n29629\n29630\n29631\n29632\n29633\n29634\n29635\n29636\n29637\n29638\n29639\n29640\n29641\n29642\n29643\n29644\n29645\n29646\n29647\n29648\n29649\n29650\n29651\n29652\n29653\n29654\n29655\n29656\n29657\n29658\n29659\n29660\n29661\n29662\n29663\n29664\n29665\n29666\n29667\n29668\n29669\n29670\n29671\n29672\n29673\n29674\n29675\n29676\n29677\n29678\n29679\n29680\n29681\n29682\n29683\n29684\n29685\n29686\n29687\n29688\n29689\n29690\n29691\n29692\n29693\n29694\n29695\n29696\n29697\n29698\n29699\n29700\n29701\n29702\n29703\n29704\n29705\n29706\n29707\n29708\n29709\n29710\n29711\n29712\n29713\n29714\n29715\n29716\n29717\n29718\n29719\n29720\n29721\n29722\n29723\n29724\n29725\n29726\n29727\n29728\n29729\n29730\n29731\n29732\n29733\n29734\n29735\n29736\n29737\n29738\n29739\n29740\n29741\n29742\n29743\n29744\n29745\n29746\n29747\n29748\n29749\n29750\n29751\n29752\n29753\n29754\n29755\n29756\n29757\n29758\n29759\n29760\n29761\n29762\n29763\n29764\n29765\n29766\n29767\n29768\n29769\n29770\n29771\n29772\n29773\n29774\n29775\n29776\n29777\n29778\n29779\n29780\n29781\n29782\n29783\n29784\n29785\n29786\n29787\n29788\n29789\n29790\n29791\n29792\n29793\n29794\n29795\n29796\n29797\n29798\n29799\n29800\n29801\n29802\n29803\n29804\n29805\n29806\n29807\n29808\n29809\n29810\n29811\n29812\n29813\n29814\n29815\n29816\n29817\n29818\n29819\n29820\n29821\n29822\n29823\n29824\n29825\n29826\n29827\n29828\n29829\n29830\n29831\n29832\n29833\n29834\n29835\n29836\n29837\n29838\n29839\n29840\n29841\n29842\n29843\n29844\n29845\n29846\n29847\n29848\n29849\n29850\n29851\n29852\n29853\n29854\n29855\n29856\n29857\n29858\n29859\n29860\n29861\n29862\n29863\n29864\n29865\n29866\n29867\n29868\n29869\n29870\n29871\n29872\n29873\n29874\n29875\n29876\n29877\n29878\n29879\n29880\n29881\n29882\n29883\n29884\n29885\n29886\n29887\n29888\n29889\n29890\n29891\n29892\n29893\n29894\n29895\n29896\n29897\n29898\n29899\n29900\n29901\n29902\n29903\n29904\n29905\n29906\n29907\n29908\n29909\n29910\n29911\n29912\n29913\n29914\n29915\n29916\n29917\n29918\n29919\n29920\n29921\n29922\n29923\n29924\n29925\n29926\n29927\n29928\n29929\n29930\n29931\n29932\n29933\n29934\n29935\n29936\n29937\n29938\n29939\n29940\n29941\n29942\n29943\n29944\n29945\n29946\n29947\n29948\n29949\n29950\n29951\n29952\n29953\n29954\n29955\n29956\n29957\n29958\n29959\n29960\n29961\n29962\n29963\n29964\n29965\n29966\n29967\n29968\n29969\n29970\n29971\n29972\n29973\n29974\n29975\n29976\n29977\n29978\n29979\n29980\n29981\n29982\n29983\n29984\n29985\n29986\n29987\n29988\n29989\n29990\n29991\n29992\n29993\n29994\n29995\n29996\n29997\n29998\n29999'
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test11.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test11.arff
new file mode 100644
index 00000000..fadfaee8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test11.arff
@@ -0,0 +1,11 @@
+@RELATION test11
+
+@ATTRIBUTE attr0	REAL
+@ATTRIBUTE attr1 	REAL
+@ATTRIBUTE attr2 	REAL
+@ATTRIBUTE attr3	REAL
+@ATTRIBUTE class 	{ class0, class1, class2, class3 }
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
+-0.1, -0.2, -0.3, -0.4,class2
+1, 2, 3, 4,class3
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test2.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test2.arff
new file mode 100644
index 00000000..30f0dbf9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test2.arff
@@ -0,0 +1,15 @@
+@RELATION test2
+
+@ATTRIBUTE attr0	REAL
+@ATTRIBUTE attr1 	real
+@ATTRIBUTE attr2 	integer
+@ATTRIBUTE attr3	Integer
+@ATTRIBUTE attr4 	Numeric
+@ATTRIBUTE attr5	numeric
+@ATTRIBUTE attr6 	string
+@ATTRIBUTE attr7 	STRING
+@ATTRIBUTE attr8 	{bla}
+@ATTRIBUTE attr9 	{bla, bla}
+
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test3.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test3.arff
new file mode 100644
index 00000000..23da3b30
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test3.arff
@@ -0,0 +1,6 @@
+@RELATION test3
+
+@ATTRIBUTE attr0	crap
+
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test4.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test4.arff
new file mode 100644
index 00000000..bf5f99ca
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test4.arff
@@ -0,0 +1,11 @@
+@RELATION test5
+
+@ATTRIBUTE attr0	REAL
+@ATTRIBUTE attr1 	REAL
+@ATTRIBUTE attr2 	REAL
+@ATTRIBUTE attr3	REAL
+@ATTRIBUTE class 	{class0, class1, class2, class3}
+@DATA
+0.1, 0.2, 0.3, 0.4,class1
+-0.1, -0.2, -0.3, -0.4,class2
+1, 2, 3, 4,class3
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test5.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test5.arff
new file mode 100644
index 00000000..0075daf0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test5.arff
@@ -0,0 +1,26 @@
+@RELATION test4
+
+@ATTRIBUTE attr0	REAL
+@ATTRIBUTE attr1 	REAL
+@ATTRIBUTE attr2 	REAL
+@ATTRIBUTE attr3	REAL
+@ATTRIBUTE class 	{class0, class1, class2, class3}
+
+@DATA
+
+% lsdflkjhaksjdhf
+
+% lsdflkjhaksjdhf
+
+0.1, 0.2, 0.3, 0.4,class1
+% laksjdhf
+
+% lsdflkjhaksjdhf
+-0.1, -0.2, -0.3, -0.4,class2
+
+% lsdflkjhaksjdhf
+% lsdflkjhaksjdhf
+
+% lsdflkjhaksjdhf
+
+1, 2, 3, 4,class3
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test6.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test6.arff
new file mode 100644
index 00000000..b63280b0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test6.arff
@@ -0,0 +1,12 @@
+@RELATION test6
+
+@ATTRIBUTE attr0	REAL
+@ATTRIBUTE attr1 	REAL
+@ATTRIBUTE attr2 	REAL
+@ATTRIBUTE attr3	REAL
+@ATTRIBUTE class 	{C}
+
+@DATA
+0.1, 0.2, 0.3, 0.4,C
+-0.1, -0.2, -0.3, -0.4,C
+1, 2, 3, 4,C
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test7.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test7.arff
new file mode 100644
index 00000000..38ef6c9a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test7.arff
@@ -0,0 +1,15 @@
+@RELATION test7
+
+@ATTRIBUTE attr_year	DATE yyyy
+@ATTRIBUTE attr_month	DATE yyyy-MM
+@ATTRIBUTE attr_date	DATE yyyy-MM-dd
+@ATTRIBUTE attr_datetime_local	DATE "yyyy-MM-dd HH:mm"
+@ATTRIBUTE attr_datetime_missing	DATE "yyyy-MM-dd HH:mm"
+
+@DATA
+1999,1999-01,1999-01-31,"1999-01-31 00:01",?
+2004,2004-12,2004-12-01,"2004-12-01 23:59","2004-12-01 23:59"
+1817,1817-04,1817-04-28,"1817-04-28 13:00",?
+2100,2100-09,2100-09-10,"2100-09-10 12:00",?
+2013,2013-11,2013-11-30,"2013-11-30 04:55","2013-11-30 04:55"
+1631,1631-10,1631-10-15,"1631-10-15 20:04","1631-10-15 20:04"
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test8.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test8.arff
new file mode 100644
index 00000000..776deb4c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test8.arff
@@ -0,0 +1,12 @@
+@RELATION test8
+
+@ATTRIBUTE attr_datetime_utc	DATE "yyyy-MM-dd HH:mm Z"
+@ATTRIBUTE attr_datetime_full	DATE "yy-MM-dd HH:mm:ss z"
+
+@DATA
+"1999-01-31 00:01 UTC","99-01-31 00:01:08 +0430"
+"2004-12-01 23:59 UTC","04-12-01 23:59:59 -0800"
+"1817-04-28 13:00 UTC","17-04-28 13:00:33 +1000"
+"2100-09-10 12:00 UTC","21-09-10 12:00:21 -0300"
+"2013-11-30 04:55 UTC","13-11-30 04:55:48 -1100"
+"1631-10-15 20:04 UTC","31-10-15 20:04:10 +0000"
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test9.arff b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test9.arff
new file mode 100644
index 00000000..b3f97e32
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/data/test9.arff
@@ -0,0 +1,14 @@
+@RELATION test9
+
+@ATTRIBUTE attr_date_number	    RELATIONAL
+	@ATTRIBUTE attr_date	DATE "yyyy-MM-dd"
+	@ATTRIBUTE attr_number	INTEGER
+@END attr_date_number
+
+@DATA
+"1999-01-31	1\n1935-11-27	10"
+"2004-12-01	2\n1942-08-13	20"
+"1817-04-28	3"
+"2100-09-10	4\n1957-04-17	40\n1721-01-14	400"
+"2013-11-30	5"
+"1631-10-15	6"
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/test_arffread.py b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/test_arffread.py
new file mode 100644
index 00000000..668df2de
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/arff/tests/test_arffread.py
@@ -0,0 +1,418 @@
+import datetime
+import os
+import sys
+from os.path import join as pjoin
+
+from io import StringIO
+
+import numpy as np
+
+from numpy.testing import (assert_array_almost_equal,
+                           assert_array_equal, assert_equal, assert_)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.io.arff import loadarff
+from scipy.io.arff._arffread import read_header, ParseArffError
+
+
+data_path = pjoin(os.path.dirname(__file__), 'data')
+
+test1 = pjoin(data_path, 'test1.arff')
+test2 = pjoin(data_path, 'test2.arff')
+test3 = pjoin(data_path, 'test3.arff')
+
+test4 = pjoin(data_path, 'test4.arff')
+test5 = pjoin(data_path, 'test5.arff')
+test6 = pjoin(data_path, 'test6.arff')
+test7 = pjoin(data_path, 'test7.arff')
+test8 = pjoin(data_path, 'test8.arff')
+test9 = pjoin(data_path, 'test9.arff')
+test10 = pjoin(data_path, 'test10.arff')
+test11 = pjoin(data_path, 'test11.arff')
+test_quoted_nominal = pjoin(data_path, 'quoted_nominal.arff')
+test_quoted_nominal_spaces = pjoin(data_path, 'quoted_nominal_spaces.arff')
+
+expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
+                (-0.1, -0.2, -0.3, -0.4, 'class2'),
+                (1, 2, 3, 4, 'class3')]
+expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']
+
+missing = pjoin(data_path, 'missing.arff')
+expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
+expect_missing = np.empty(3, [('yop', float), ('yap', float)])
+expect_missing['yop'] = expect_missing_raw[:, 0]
+expect_missing['yap'] = expect_missing_raw[:, 1]
+
+
+class TestData:
+    def test1(self):
+        # Parsing trivial file with nothing.
+        self._test(test4)
+
+    def test2(self):
+        # Parsing trivial file with some comments in the data section.
+        self._test(test5)
+
+    def test3(self):
+        # Parsing trivial file with nominal attribute of 1 character.
+        self._test(test6)
+
+    def test4(self):
+        # Parsing trivial file with trailing spaces in attribute declaration.
+        self._test(test11)
+
+    def _test(self, test_file):
+        data, meta = loadarff(test_file)
+        for i in range(len(data)):
+            for j in range(4):
+                assert_array_almost_equal(expect4_data[i][j], data[i][j])
+        assert_equal(meta.types(), expected_types)
+
+    def test_filelike(self):
+        # Test reading from file-like object (StringIO)
+        with open(test1) as f1:
+            data1, meta1 = loadarff(f1)
+        with open(test1) as f2:
+            data2, meta2 = loadarff(StringIO(f2.read()))
+        assert_(data1 == data2)
+        assert_(repr(meta1) == repr(meta2))
+
+    def test_path(self):
+        # Test reading from `pathlib.Path` object
+        from pathlib import Path
+
+        with open(test1) as f1:
+            data1, meta1 = loadarff(f1)
+
+        data2, meta2 = loadarff(Path(test1))
+
+        assert_(data1 == data2)
+        assert_(repr(meta1) == repr(meta2))
+
+
+class TestMissingData:
+    def test_missing(self):
+        data, meta = loadarff(missing)
+        for i in ['yop', 'yap']:
+            assert_array_almost_equal(data[i], expect_missing[i])
+
+
+class TestNoData:
+    def test_nodata(self):
+        # The file nodata.arff has no data in the @DATA section.
+        # Reading it should result in an array with length 0.
+        nodata_filename = os.path.join(data_path, 'nodata.arff')
+        data, meta = loadarff(nodata_filename)
+        if sys.byteorder == 'big':
+            end = '>'
+        else:
+            end = '<'
+        expected_dtype = np.dtype([('sepallength', f'{end}f8'),
+                                   ('sepalwidth', f'{end}f8'),
+                                   ('petallength', f'{end}f8'),
+                                   ('petalwidth', f'{end}f8'),
+                                   ('class', 'S15')])
+        assert_equal(data.dtype, expected_dtype)
+        assert_equal(data.size, 0)
+
+
+class TestHeader:
+    def test_type_parsing(self):
+        # Test parsing type of attribute from their value.
+        with open(test2) as ofile:
+            rel, attrs = read_header(ofile)
+
+        expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric',
+                    'numeric', 'string', 'string', 'nominal', 'nominal']
+
+        for i in range(len(attrs)):
+            assert_(attrs[i].type_name == expected[i])
+
+    def test_badtype_parsing(self):
+        # Test parsing wrong type of attribute from their value.
+        def badtype_read():
+            with open(test3) as ofile:
+                _, _ = read_header(ofile)
+
+        assert_raises(ParseArffError, badtype_read)
+
+    def test_fullheader1(self):
+        # Parsing trivial header with nothing.
+        with open(test1) as ofile:
+            rel, attrs = read_header(ofile)
+
+        # Test relation
+        assert_(rel == 'test1')
+
+        # Test numerical attributes
+        assert_(len(attrs) == 5)
+        for i in range(4):
+            assert_(attrs[i].name == 'attr%d' % i)
+            assert_(attrs[i].type_name == 'numeric')
+
+        # Test nominal attribute
+        assert_(attrs[4].name == 'class')
+        assert_(attrs[4].values == ('class0', 'class1', 'class2', 'class3'))
+
+    def test_dateheader(self):
+        with open(test7) as ofile:
+            rel, attrs = read_header(ofile)
+
+        assert_(rel == 'test7')
+
+        assert_(len(attrs) == 5)
+
+        assert_(attrs[0].name == 'attr_year')
+        assert_(attrs[0].date_format == '%Y')
+
+        assert_(attrs[1].name == 'attr_month')
+        assert_(attrs[1].date_format == '%Y-%m')
+
+        assert_(attrs[2].name == 'attr_date')
+        assert_(attrs[2].date_format == '%Y-%m-%d')
+
+        assert_(attrs[3].name == 'attr_datetime_local')
+        assert_(attrs[3].date_format == '%Y-%m-%d %H:%M')
+
+        assert_(attrs[4].name == 'attr_datetime_missing')
+        assert_(attrs[4].date_format == '%Y-%m-%d %H:%M')
+
+    def test_dateheader_unsupported(self):
+        def read_dateheader_unsupported():
+            with open(test8) as ofile:
+                _, _ = read_header(ofile)
+
+        assert_raises(ValueError, read_dateheader_unsupported)
+
+
+class TestDateAttribute:
+    def setup_method(self):
+        self.data, self.meta = loadarff(test7)
+
+    def test_year_attribute(self):
+        expected = np.array([
+            '1999',
+            '2004',
+            '1817',
+            '2100',
+            '2013',
+            '1631'
+        ], dtype='datetime64[Y]')
+
+        assert_array_equal(self.data["attr_year"], expected)
+
+    def test_month_attribute(self):
+        expected = np.array([
+            '1999-01',
+            '2004-12',
+            '1817-04',
+            '2100-09',
+            '2013-11',
+            '1631-10'
+        ], dtype='datetime64[M]')
+
+        assert_array_equal(self.data["attr_month"], expected)
+
+    def test_date_attribute(self):
+        expected = np.array([
+            '1999-01-31',
+            '2004-12-01',
+            '1817-04-28',
+            '2100-09-10',
+            '2013-11-30',
+            '1631-10-15'
+        ], dtype='datetime64[D]')
+
+        assert_array_equal(self.data["attr_date"], expected)
+
+    def test_datetime_local_attribute(self):
+        expected = np.array([
+            datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1),
+            datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59),
+            datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0),
+            datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0),
+            datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55),
+            datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4)
+        ], dtype='datetime64[m]')
+
+        assert_array_equal(self.data["attr_datetime_local"], expected)
+
+    def test_datetime_missing(self):
+        expected = np.array([
+            'nat',
+            '2004-12-01T23:59',
+            'nat',
+            'nat',
+            '2013-11-30T04:55',
+            '1631-10-15T20:04'
+        ], dtype='datetime64[m]')
+
+        assert_array_equal(self.data["attr_datetime_missing"], expected)
+
+    def test_datetime_timezone(self):
+        assert_raises(ParseArffError, loadarff, test8)
+
+
+class TestRelationalAttribute:
+    def setup_method(self):
+        self.data, self.meta = loadarff(test9)
+
+    def test_attributes(self):
+        assert_equal(len(self.meta._attributes), 1)
+
+        relational = list(self.meta._attributes.values())[0]
+
+        assert_equal(relational.name, 'attr_date_number')
+        assert_equal(relational.type_name, 'relational')
+        assert_equal(len(relational.attributes), 2)
+        assert_equal(relational.attributes[0].name,
+                     'attr_date')
+        assert_equal(relational.attributes[0].type_name,
+                     'date')
+        assert_equal(relational.attributes[1].name,
+                     'attr_number')
+        assert_equal(relational.attributes[1].type_name,
+                     'numeric')
+
+    def test_data(self):
+        dtype_instance = [('attr_date', 'datetime64[D]'),
+                          ('attr_number', np.float_)]
+
+        expected = [
+            np.array([('1999-01-31', 1), ('1935-11-27', 10)],
+                     dtype=dtype_instance),
+            np.array([('2004-12-01', 2), ('1942-08-13', 20)],
+                     dtype=dtype_instance),
+            np.array([('1817-04-28', 3)],
+                     dtype=dtype_instance),
+            np.array([('2100-09-10', 4), ('1957-04-17', 40),
+                      ('1721-01-14', 400)],
+                     dtype=dtype_instance),
+            np.array([('2013-11-30', 5)],
+                     dtype=dtype_instance),
+            np.array([('1631-10-15', 6)],
+                     dtype=dtype_instance)
+        ]
+
+        for i in range(len(self.data["attr_date_number"])):
+            assert_array_equal(self.data["attr_date_number"][i],
+                               expected[i])
+
+
+class TestRelationalAttributeLong:
+    def setup_method(self):
+        self.data, self.meta = loadarff(test10)
+
+    def test_attributes(self):
+        assert_equal(len(self.meta._attributes), 1)
+
+        relational = list(self.meta._attributes.values())[0]
+
+        assert_equal(relational.name, 'attr_relational')
+        assert_equal(relational.type_name, 'relational')
+        assert_equal(len(relational.attributes), 1)
+        assert_equal(relational.attributes[0].name,
+                     'attr_number')
+        assert_equal(relational.attributes[0].type_name, 'numeric')
+
+    def test_data(self):
+        dtype_instance = [('attr_number', np.float_)]
+
+        expected = np.array([(n,) for n in range(30000)],
+                            dtype=dtype_instance)
+
+        assert_array_equal(self.data["attr_relational"][0],
+                           expected)
+
+
+class TestQuotedNominal:
+    """
+    Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes.
+    """
+
+    def setup_method(self):
+        self.data, self.meta = loadarff(test_quoted_nominal)
+
+    def test_attributes(self):
+        assert_equal(len(self.meta._attributes), 2)
+
+        age, smoker = self.meta._attributes.values()
+
+        assert_equal(age.name, 'age')
+        assert_equal(age.type_name, 'numeric')
+        assert_equal(smoker.name, 'smoker')
+        assert_equal(smoker.type_name, 'nominal')
+        assert_equal(smoker.values, ['yes', 'no'])
+
+    def test_data(self):
+
+        age_dtype_instance = np.float_
+        smoker_dtype_instance = '' (big endian)
+
+'''
+import sys
+
+__all__ = [
+    'aliases', 'native_code', 'swapped_code',
+    'sys_is_le', 'to_numpy_code'
+]
+
+sys_is_le = sys.byteorder == 'little'
+native_code = sys_is_le and '<' or '>'
+swapped_code = sys_is_le and '>' or '<'
+
+aliases = {'little': ('little', '<', 'l', 'le'),
+           'big': ('big', '>', 'b', 'be'),
+           'native': ('native', '='),
+           'swapped': ('swapped', 'S')}
+
+
+def to_numpy_code(code):
+    """
+    Convert various order codings to NumPy format.
+
+    Parameters
+    ----------
+    code : str
+        The code to convert. It is converted to lower case before parsing.
+        Legal values are:
+        'little', 'big', 'l', 'b', 'le', 'be', '<', '>', 'native', '=',
+        'swapped', 's'.
+
+    Returns
+    -------
+    out_code : {'<', '>'}
+        Here '<' is the numpy dtype code for little endian,
+        and '>' is the code for big endian.
+
+    Examples
+    --------
+    >>> import sys
+    >>> sys_is_le == (sys.byteorder == 'little')
+    True
+    >>> to_numpy_code('big')
+    '>'
+    >>> to_numpy_code('little')
+    '<'
+    >>> nc = to_numpy_code('native')
+    >>> nc == '<' if sys_is_le else nc == '>'
+    True
+    >>> sc = to_numpy_code('swapped')
+    >>> sc == '>' if sys_is_le else sc == '<'
+    True
+
+    """
+    code = code.lower()
+    if code is None:
+        return native_code
+    if code in aliases['little']:
+        return '<'
+    elif code in aliases['big']:
+        return '>'
+    elif code in aliases['native']:
+        return native_code
+    elif code in aliases['swapped']:
+        return swapped_code
+    else:
+        raise ValueError(
+            'We cannot handle byte order %s' % code)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio.py
new file mode 100644
index 00000000..bd5fc61a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio.py
@@ -0,0 +1,358 @@
+"""
+Module for reading and writing matlab (TM) .mat files
+"""
+# Authors: Travis Oliphant, Matthew Brett
+
+from contextlib import contextmanager
+
+from ._miobase import _get_matfile_version, docfiller
+from ._mio4 import MatFile4Reader, MatFile4Writer
+from ._mio5 import MatFile5Reader, MatFile5Writer
+
+__all__ = ['mat_reader_factory', 'loadmat', 'savemat', 'whosmat']
+
+
+@contextmanager
+def _open_file_context(file_like, appendmat, mode='rb'):
+    f, opened = _open_file(file_like, appendmat, mode)
+    try:
+        yield f
+    finally:
+        if opened:
+            f.close()
+
+
+def _open_file(file_like, appendmat, mode='rb'):
+    """
+    Open `file_like` and return as file-like object. First, check if object is
+    already file-like; if so, return it as-is. Otherwise, try to pass it
+    to open(). If that fails, and `file_like` is a string, and `appendmat` is true,
+    append '.mat' and try again.
+    """
+    reqs = {'read'} if set(mode) & set('r+') else set()
+    if set(mode) & set('wax+'):
+        reqs.add('write')
+    if reqs.issubset(dir(file_like)):
+        return file_like, False
+
+    try:
+        return open(file_like, mode), True
+    except OSError as e:
+        # Probably "not found"
+        if isinstance(file_like, str):
+            if appendmat and not file_like.endswith('.mat'):
+                file_like += '.mat'
+            return open(file_like, mode), True
+        else:
+            raise OSError(
+                'Reader needs file name or open file-like object'
+            ) from e
+
+
+@docfiller
+def mat_reader_factory(file_name, appendmat=True, **kwargs):
+    """
+    Create reader for matlab .mat format files.
+
+    Parameters
+    ----------
+    %(file_arg)s
+    %(append_arg)s
+    %(load_args)s
+    %(struct_arg)s
+
+    Returns
+    -------
+    matreader : MatFileReader object
+       Initialized instance of MatFileReader class matching the mat file
+       type detected in `filename`.
+    file_opened : bool
+       Whether the file was opened by this routine.
+
+    """
+    byte_stream, file_opened = _open_file(file_name, appendmat)
+    mjv, mnv = _get_matfile_version(byte_stream)
+    if mjv == 0:
+        return MatFile4Reader(byte_stream, **kwargs), file_opened
+    elif mjv == 1:
+        return MatFile5Reader(byte_stream, **kwargs), file_opened
+    elif mjv == 2:
+        raise NotImplementedError('Please use HDF reader for matlab v7.3 '
+                                  'files, e.g. h5py')
+    else:
+        raise TypeError('Did not recognize version %s' % mjv)
+
+
+@docfiller
+def loadmat(file_name, mdict=None, appendmat=True, **kwargs):
+    """
+    Load MATLAB file.
+
+    Parameters
+    ----------
+    file_name : str
+       Name of the mat file (do not need .mat extension if
+       appendmat==True). Can also pass open file-like object.
+    mdict : dict, optional
+        Dictionary in which to insert matfile variables.
+    appendmat : bool, optional
+       True to append the .mat extension to the end of the given
+       filename, if not already present. Default is True.
+    byte_order : str or None, optional
+       None by default, implying byte order guessed from mat
+       file. Otherwise can be one of ('native', '=', 'little', '<',
+       'BIG', '>').
+    mat_dtype : bool, optional
+       If True, return arrays in same dtype as would be loaded into
+       MATLAB (instead of the dtype with which they are saved).
+    squeeze_me : bool, optional
+       Whether to squeeze unit matrix dimensions or not.
+    chars_as_strings : bool, optional
+       Whether to convert char arrays to string arrays.
+    matlab_compatible : bool, optional
+       Returns matrices as would be loaded by MATLAB (implies
+       squeeze_me=False, chars_as_strings=False, mat_dtype=True,
+       struct_as_record=True).
+    struct_as_record : bool, optional
+       Whether to load MATLAB structs as NumPy record arrays, or as
+       old-style NumPy arrays with dtype=object. Setting this flag to
+       False replicates the behavior of scipy version 0.7.x (returning
+       NumPy object arrays). The default setting is True, because it
+       allows easier round-trip load and save of MATLAB files.
+    verify_compressed_data_integrity : bool, optional
+        Whether the length of compressed sequences in the MATLAB file
+        should be checked, to ensure that they are not longer than we expect.
+        It is advisable to enable this (the default) because overlong
+        compressed sequences in MATLAB files generally indicate that the
+        files have experienced some sort of corruption.
+    variable_names : None or sequence
+        If None (the default) - read all variables in file. Otherwise,
+        `variable_names` should be a sequence of strings, giving names of the
+        MATLAB variables to read from the file. The reader will skip any
+        variable with a name not in this sequence, possibly saving some read
+        processing.
+    simplify_cells : False, optional
+        If True, return a simplified dict structure (which is useful if the mat
+        file contains cell arrays). Note that this only affects the structure
+        of the result and not its contents (which is identical for both output
+        structures). If True, this automatically sets `struct_as_record` to
+        False and `squeeze_me` to True, which is required to simplify cells.
+
+    Returns
+    -------
+    mat_dict : dict
+       dictionary with variable names as keys, and loaded matrices as
+       values.
+
+    Notes
+    -----
+    v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
+
+    You will need an HDF5 Python library to read MATLAB 7.3 format mat
+    files. Because SciPy does not supply one, we do not implement the
+    HDF5 / 7.3 interface here.
+
+    Examples
+    --------
+    >>> from os.path import dirname, join as pjoin
+    >>> import scipy.io as sio
+
+    Get the filename for an example .mat file from the tests/data directory.
+
+    >>> data_dir = pjoin(dirname(sio.__file__), 'matlab', 'tests', 'data')
+    >>> mat_fname = pjoin(data_dir, 'testdouble_7.4_GLNX86.mat')
+
+    Load the .mat file contents.
+
+    >>> mat_contents = sio.loadmat(mat_fname)
+
+    The result is a dictionary, one key/value pair for each variable:
+
+    >>> sorted(mat_contents.keys())
+    ['__globals__', '__header__', '__version__', 'testdouble']
+    >>> mat_contents['testdouble']
+    array([[0.        , 0.78539816, 1.57079633, 2.35619449, 3.14159265,
+            3.92699082, 4.71238898, 5.49778714, 6.28318531]])
+
+    By default SciPy reads MATLAB structs as structured NumPy arrays where the
+    dtype fields are of type `object` and the names correspond to the MATLAB
+    struct field names. This can be disabled by setting the optional argument
+    `struct_as_record=False`.
+
+    Get the filename for an example .mat file that contains a MATLAB struct
+    called `teststruct` and load the contents.
+
+    >>> matstruct_fname = pjoin(data_dir, 'teststruct_7.4_GLNX86.mat')
+    >>> matstruct_contents = sio.loadmat(matstruct_fname)
+    >>> teststruct = matstruct_contents['teststruct']
+    >>> teststruct.dtype
+    dtype([('stringfield', 'O'), ('doublefield', 'O'), ('complexfield', 'O')])
+
+    The size of the structured array is the size of the MATLAB struct, not the
+    number of elements in any particular field. The shape defaults to 2-D
+    unless the optional argument `squeeze_me=True`, in which case all length 1
+    dimensions are removed.
+
+    >>> teststruct.size
+    1
+    >>> teststruct.shape
+    (1, 1)
+
+    Get the 'stringfield' of the first element in the MATLAB struct.
+
+    >>> teststruct[0, 0]['stringfield']
+    array(['Rats live on no evil star.'],
+      dtype='>> teststruct['doublefield'][0, 0]
+    array([[ 1.41421356,  2.71828183,  3.14159265]])
+
+    Load the MATLAB struct, squeezing out length 1 dimensions, and get the item
+    from the 'complexfield'.
+
+    >>> matstruct_squeezed = sio.loadmat(matstruct_fname, squeeze_me=True)
+    >>> matstruct_squeezed['teststruct'].shape
+    ()
+    >>> matstruct_squeezed['teststruct']['complexfield'].shape
+    ()
+    >>> matstruct_squeezed['teststruct']['complexfield'].item()
+    array([ 1.41421356+1.41421356j,  2.71828183+2.71828183j,
+        3.14159265+3.14159265j])
+    """
+    variable_names = kwargs.pop('variable_names', None)
+    with _open_file_context(file_name, appendmat) as f:
+        MR, _ = mat_reader_factory(f, **kwargs)
+        matfile_dict = MR.get_variables(variable_names)
+
+    if mdict is not None:
+        mdict.update(matfile_dict)
+    else:
+        mdict = matfile_dict
+
+    return mdict
+
+
+@docfiller
+def savemat(file_name, mdict,
+            appendmat=True,
+            format='5',
+            long_field_names=False,
+            do_compression=False,
+            oned_as='row'):
+    """
+    Save a dictionary of names and arrays into a MATLAB-style .mat file.
+
+    This saves the array objects in the given dictionary to a MATLAB-
+    style .mat file.
+
+    Parameters
+    ----------
+    file_name : str or file-like object
+        Name of the .mat file (.mat extension not needed if ``appendmat ==
+        True``).
+        Can also pass open file_like object.
+    mdict : dict
+        Dictionary from which to save matfile variables.
+    appendmat : bool, optional
+        True (the default) to append the .mat extension to the end of the
+        given filename, if not already present.
+    format : {'5', '4'}, string, optional
+        '5' (the default) for MATLAB 5 and up (to 7.2),
+        '4' for MATLAB 4 .mat files.
+    long_field_names : bool, optional
+        False (the default) - maximum field name length in a structure is
+        31 characters which is the documented maximum length.
+        True - maximum field name length in a structure is 63 characters
+        which works for MATLAB 7.6+.
+    do_compression : bool, optional
+        Whether or not to compress matrices on write. Default is False.
+    oned_as : {'row', 'column'}, optional
+        If 'column', write 1-D NumPy arrays as column vectors.
+        If 'row', write 1-D NumPy arrays as row vectors.
+
+    Examples
+    --------
+    >>> from scipy.io import savemat
+    >>> import numpy as np
+    >>> a = np.arange(20)
+    >>> mdic = {"a": a, "label": "experiment"}
+    >>> mdic
+    {'a': array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,
+        17, 18, 19]),
+    'label': 'experiment'}
+    >>> savemat("matlab_matrix.mat", mdic)
+    """
+    with _open_file_context(file_name, appendmat, 'wb') as file_stream:
+        if format == '4':
+            if long_field_names:
+                raise ValueError("Long field names are not available for version 4 files")
+            MW = MatFile4Writer(file_stream, oned_as)
+        elif format == '5':
+            MW = MatFile5Writer(file_stream,
+                                do_compression=do_compression,
+                                unicode_strings=True,
+                                long_field_names=long_field_names,
+                                oned_as=oned_as)
+        else:
+            raise ValueError("Format should be '4' or '5'")
+        MW.put_variables(mdict)
+
+
+@docfiller
+def whosmat(file_name, appendmat=True, **kwargs):
+    """
+    List variables inside a MATLAB file.
+
+    Parameters
+    ----------
+    %(file_arg)s
+    %(append_arg)s
+    %(load_args)s
+    %(struct_arg)s
+
+    Returns
+    -------
+    variables : list of tuples
+        A list of tuples, where each tuple holds the matrix name (a string),
+        its shape (tuple of ints), and its data class (a string).
+        Possible data classes are: int8, uint8, int16, uint16, int32, uint32,
+        int64, uint64, single, double, cell, struct, object, char, sparse,
+        function, opaque, logical, unknown.
+
+    Notes
+    -----
+    v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
+
+    You will need an HDF5 python library to read matlab 7.3 format mat
+    files (e.g. h5py). Because SciPy does not supply one, we do not implement the
+    HDF5 / 7.3 interface here.
+
+    .. versionadded:: 0.12.0
+
+    Examples
+    --------
+    >>> from io import BytesIO
+    >>> import numpy as np
+    >>> from scipy.io import savemat, whosmat
+
+    Create some arrays, and use `savemat` to write them to a ``BytesIO``
+    instance.
+
+    >>> a = np.array([[10, 20, 30], [11, 21, 31]], dtype=np.int32)
+    >>> b = np.geomspace(1, 10, 5)
+    >>> f = BytesIO()
+    >>> savemat(f, {'a': a, 'b': b})
+
+    Use `whosmat` to inspect ``f``.  Each tuple in the output list gives
+    the name, shape and data type of the array in ``f``.
+
+    >>> whosmat(f)
+    [('a', (2, 3), 'int32'), ('b', (1, 5), 'double')]
+
+    """
+    with _open_file_context(file_name, appendmat) as f:
+        ML, file_opened = mat_reader_factory(f, **kwargs)
+        variables = ML.list_variables()
+    return variables
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio4.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio4.py
new file mode 100644
index 00000000..a153b3b7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio4.py
@@ -0,0 +1,623 @@
+''' Classes for read / write of matlab (TM) 4 files
+'''
+import sys
+import warnings
+
+import numpy as np
+
+import scipy.sparse
+
+from ._miobase import (MatFileReader, docfiller, matdims, read_dtype,
+                      convert_dtypes, arr_to_chars, arr_dtype_number)
+
+from ._mio_utils import squeeze_element, chars_to_strings
+from functools import reduce
+
+
+__all__ = [
+    'MatFile4Reader', 'MatFile4Writer', 'SYS_LITTLE_ENDIAN',
+    'VarHeader4', 'VarReader4', 'VarWriter4', 'arr_to_2d', 'mclass_info',
+    'mdtypes_template', 'miDOUBLE', 'miINT16', 'miINT32', 'miSINGLE',
+    'miUINT16', 'miUINT8', 'mxCHAR_CLASS', 'mxFULL_CLASS', 'mxSPARSE_CLASS',
+    'np_to_mtypes', 'order_codes'
+]
+
+
+SYS_LITTLE_ENDIAN = sys.byteorder == 'little'
+
+miDOUBLE = 0
+miSINGLE = 1
+miINT32 = 2
+miINT16 = 3
+miUINT16 = 4
+miUINT8 = 5
+
+mdtypes_template = {
+    miDOUBLE: 'f8',
+    miSINGLE: 'f4',
+    miINT32: 'i4',
+    miINT16: 'i2',
+    miUINT16: 'u2',
+    miUINT8: 'u1',
+    'header': [('mopt', 'i4'),
+               ('mrows', 'i4'),
+               ('ncols', 'i4'),
+               ('imagf', 'i4'),
+               ('namlen', 'i4')],
+    'U1': 'U1',
+    }
+
+np_to_mtypes = {
+    'f8': miDOUBLE,
+    'c32': miDOUBLE,
+    'c24': miDOUBLE,
+    'c16': miDOUBLE,
+    'f4': miSINGLE,
+    'c8': miSINGLE,
+    'i4': miINT32,
+    'i2': miINT16,
+    'u2': miUINT16,
+    'u1': miUINT8,
+    'S1': miUINT8,
+    }
+
+# matrix classes
+mxFULL_CLASS = 0
+mxCHAR_CLASS = 1
+mxSPARSE_CLASS = 2
+
+order_codes = {
+    0: '<',
+    1: '>',
+    2: 'VAX D-float',  # !
+    3: 'VAX G-float',
+    4: 'Cray',  # !!
+    }
+
+mclass_info = {
+    mxFULL_CLASS: 'double',
+    mxCHAR_CLASS: 'char',
+    mxSPARSE_CLASS: 'sparse',
+    }
+
+
+class VarHeader4:
+    # Mat4 variables never logical or global
+    is_logical = False
+    is_global = False
+
+    def __init__(self,
+                 name,
+                 dtype,
+                 mclass,
+                 dims,
+                 is_complex):
+        self.name = name
+        self.dtype = dtype
+        self.mclass = mclass
+        self.dims = dims
+        self.is_complex = is_complex
+
+
+class VarReader4:
+    ''' Class to read matlab 4 variables '''
+
+    def __init__(self, file_reader):
+        self.file_reader = file_reader
+        self.mat_stream = file_reader.mat_stream
+        self.dtypes = file_reader.dtypes
+        self.chars_as_strings = file_reader.chars_as_strings
+        self.squeeze_me = file_reader.squeeze_me
+
+    def read_header(self):
+        ''' Read and return header for variable '''
+        data = read_dtype(self.mat_stream, self.dtypes['header'])
+        name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00')
+        if data['mopt'] < 0 or data['mopt'] > 5000:
+            raise ValueError('Mat 4 mopt wrong format, byteswapping problem?')
+        M, rest = divmod(data['mopt'], 1000)  # order code
+        if M not in (0, 1):
+            warnings.warn("We do not support byte ordering '%s'; returned "
+                          "data may be corrupt" % order_codes[M],
+                          UserWarning)
+        O, rest = divmod(rest, 100)  # unused, should be 0
+        if O != 0:
+            raise ValueError('O in MOPT integer should be 0, wrong format?')
+        P, rest = divmod(rest, 10)  # data type code e.g miDOUBLE (see above)
+        T = rest  # matrix type code e.g., mxFULL_CLASS (see above)
+        dims = (data['mrows'], data['ncols'])
+        is_complex = data['imagf'] == 1
+        dtype = self.dtypes[P]
+        return VarHeader4(
+            name,
+            dtype,
+            T,
+            dims,
+            is_complex)
+
+    def array_from_header(self, hdr, process=True):
+        mclass = hdr.mclass
+        if mclass == mxFULL_CLASS:
+            arr = self.read_full_array(hdr)
+        elif mclass == mxCHAR_CLASS:
+            arr = self.read_char_array(hdr)
+            if process and self.chars_as_strings:
+                arr = chars_to_strings(arr)
+        elif mclass == mxSPARSE_CLASS:
+            # no current processing (below) makes sense for sparse
+            return self.read_sparse_array(hdr)
+        else:
+            raise TypeError('No reader for class code %s' % mclass)
+        if process and self.squeeze_me:
+            return squeeze_element(arr)
+        return arr
+
+    def read_sub_array(self, hdr, copy=True):
+        ''' Mat4 read using header `hdr` dtype and dims
+
+        Parameters
+        ----------
+        hdr : object
+           object with attributes ``dtype``, ``dims``. dtype is assumed to be
+           the correct endianness
+        copy : bool, optional
+           copies array before return if True (default True)
+           (buffer is usually read only)
+
+        Returns
+        -------
+        arr : ndarray
+            of dtype given by `hdr` ``dtype`` and shape given by `hdr` ``dims``
+        '''
+        dt = hdr.dtype
+        dims = hdr.dims
+        num_bytes = dt.itemsize
+        for d in dims:
+            num_bytes *= d
+        buffer = self.mat_stream.read(int(num_bytes))
+        if len(buffer) != num_bytes:
+            raise ValueError("Not enough bytes to read matrix '%s'; is this "
+                             "a badly-formed file? Consider listing matrices "
+                             "with `whosmat` and loading named matrices with "
+                             "`variable_names` kwarg to `loadmat`" % hdr.name)
+        arr = np.ndarray(shape=dims,
+                         dtype=dt,
+                         buffer=buffer,
+                         order='F')
+        if copy:
+            arr = arr.copy()
+        return arr
+
+    def read_full_array(self, hdr):
+        ''' Full (rather than sparse) matrix getter
+
+        Read matrix (array) can be real or complex
+
+        Parameters
+        ----------
+        hdr : ``VarHeader4`` instance
+
+        Returns
+        -------
+        arr : ndarray
+            complex array if ``hdr.is_complex`` is True, otherwise a real
+            numeric array
+        '''
+        if hdr.is_complex:
+            # avoid array copy to save memory
+            res = self.read_sub_array(hdr, copy=False)
+            res_j = self.read_sub_array(hdr, copy=False)
+            return res + (res_j * 1j)
+        return self.read_sub_array(hdr)
+
+    def read_char_array(self, hdr):
+        ''' latin-1 text matrix (char matrix) reader
+
+        Parameters
+        ----------
+        hdr : ``VarHeader4`` instance
+
+        Returns
+        -------
+        arr : ndarray
+            with dtype 'U1', shape given by `hdr` ``dims``
+        '''
+        arr = self.read_sub_array(hdr).astype(np.uint8)
+        S = arr.tobytes().decode('latin-1')
+        return np.ndarray(shape=hdr.dims,
+                          dtype=np.dtype('U1'),
+                          buffer=np.array(S)).copy()
+
+    def read_sparse_array(self, hdr):
+        ''' Read and return sparse matrix type
+
+        Parameters
+        ----------
+        hdr : ``VarHeader4`` instance
+
+        Returns
+        -------
+        arr : ``scipy.sparse.coo_matrix``
+            with dtype ``float`` and shape read from the sparse matrix data
+
+        Notes
+        -----
+        MATLAB 4 real sparse arrays are saved in a N+1 by 3 array format, where
+        N is the number of non-zero values. Column 1 values [0:N] are the
+        (1-based) row indices of the each non-zero value, column 2 [0:N] are the
+        column indices, column 3 [0:N] are the (real) values. The last values
+        [-1,0:2] of the rows, column indices are shape[0] and shape[1]
+        respectively of the output matrix. The last value for the values column
+        is a padding 0. mrows and ncols values from the header give the shape of
+        the stored matrix, here [N+1, 3]. Complex data are saved as a 4 column
+        matrix, where the fourth column contains the imaginary component; the
+        last value is again 0. Complex sparse data do *not* have the header
+        ``imagf`` field set to True; the fact that the data are complex is only
+        detectable because there are 4 storage columns.
+        '''
+        res = self.read_sub_array(hdr)
+        tmp = res[:-1,:]
+        # All numbers are float64 in Matlab, but SciPy sparse expects int shape
+        dims = (int(res[-1,0]), int(res[-1,1]))
+        I = np.ascontiguousarray(tmp[:,0],dtype='intc')  # fixes byte order also
+        J = np.ascontiguousarray(tmp[:,1],dtype='intc')
+        I -= 1  # for 1-based indexing
+        J -= 1
+        if res.shape[1] == 3:
+            V = np.ascontiguousarray(tmp[:,2],dtype='float')
+        else:
+            V = np.ascontiguousarray(tmp[:,2],dtype='complex')
+            V.imag = tmp[:,3]
+        return scipy.sparse.coo_matrix((V,(I,J)), dims)
+
+    def shape_from_header(self, hdr):
+        '''Read the shape of the array described by the header.
+        The file position after this call is unspecified.
+        '''
+        mclass = hdr.mclass
+        if mclass == mxFULL_CLASS:
+            shape = tuple(map(int, hdr.dims))
+        elif mclass == mxCHAR_CLASS:
+            shape = tuple(map(int, hdr.dims))
+            if self.chars_as_strings:
+                shape = shape[:-1]
+        elif mclass == mxSPARSE_CLASS:
+            dt = hdr.dtype
+            dims = hdr.dims
+
+            if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1):
+                return ()
+
+            # Read only the row and column counts
+            self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
+            rows = np.ndarray(shape=(), dtype=dt,
+                              buffer=self.mat_stream.read(dt.itemsize))
+            self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
+            cols = np.ndarray(shape=(), dtype=dt,
+                              buffer=self.mat_stream.read(dt.itemsize))
+
+            shape = (int(rows), int(cols))
+        else:
+            raise TypeError('No reader for class code %s' % mclass)
+
+        if self.squeeze_me:
+            shape = tuple([x for x in shape if x != 1])
+        return shape
+
+
+class MatFile4Reader(MatFileReader):
+    ''' Reader for Mat4 files '''
+    @docfiller
+    def __init__(self, mat_stream, *args, **kwargs):
+        ''' Initialize matlab 4 file reader
+
+    %(matstream_arg)s
+    %(load_args)s
+        '''
+        super().__init__(mat_stream, *args, **kwargs)
+        self._matrix_reader = None
+
+    def guess_byte_order(self):
+        self.mat_stream.seek(0)
+        mopt = read_dtype(self.mat_stream, np.dtype('i4'))
+        self.mat_stream.seek(0)
+        if mopt == 0:
+            return '<'
+        if mopt < 0 or mopt > 5000:
+            # Number must have been byteswapped
+            return SYS_LITTLE_ENDIAN and '>' or '<'
+        # Not byteswapped
+        return SYS_LITTLE_ENDIAN and '<' or '>'
+
+    def initialize_read(self):
+        ''' Run when beginning read of variables
+
+        Sets up readers from parameters in `self`
+        '''
+        self.dtypes = convert_dtypes(mdtypes_template, self.byte_order)
+        self._matrix_reader = VarReader4(self)
+
+    def read_var_header(self):
+        ''' Read and return header, next position
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+        header : object
+           object that can be passed to self.read_var_array, and that
+           has attributes ``name`` and ``is_global``
+        next_position : int
+           position in stream of next variable
+        '''
+        hdr = self._matrix_reader.read_header()
+        n = reduce(lambda x, y: x*y, hdr.dims, 1)  # fast product
+        remaining_bytes = hdr.dtype.itemsize * n
+        if hdr.is_complex and not hdr.mclass == mxSPARSE_CLASS:
+            remaining_bytes *= 2
+        next_position = self.mat_stream.tell() + remaining_bytes
+        return hdr, next_position
+
+    def read_var_array(self, header, process=True):
+        ''' Read array, given `header`
+
+        Parameters
+        ----------
+        header : header object
+           object with fields defining variable header
+        process : {True, False}, optional
+           If True, apply recursive post-processing during loading of array.
+
+        Returns
+        -------
+        arr : array
+           array with post-processing applied or not according to
+           `process`.
+        '''
+        return self._matrix_reader.array_from_header(header, process)
+
+    def get_variables(self, variable_names=None):
+        ''' get variables from stream as dictionary
+
+        Parameters
+        ----------
+        variable_names : None or str or sequence of str, optional
+            variable name, or sequence of variable names to get from Mat file /
+            file stream. If None, then get all variables in file.
+        '''
+        if isinstance(variable_names, str):
+            variable_names = [variable_names]
+        elif variable_names is not None:
+            variable_names = list(variable_names)
+        self.mat_stream.seek(0)
+        # set up variable reader
+        self.initialize_read()
+        mdict = {}
+        while not self.end_of_stream():
+            hdr, next_position = self.read_var_header()
+            name = 'None' if hdr.name is None else hdr.name.decode('latin1')
+            if variable_names is not None and name not in variable_names:
+                self.mat_stream.seek(next_position)
+                continue
+            mdict[name] = self.read_var_array(hdr)
+            self.mat_stream.seek(next_position)
+            if variable_names is not None:
+                variable_names.remove(name)
+                if len(variable_names) == 0:
+                    break
+        return mdict
+
+    def list_variables(self):
+        ''' list variables from stream '''
+        self.mat_stream.seek(0)
+        # set up variable reader
+        self.initialize_read()
+        vars = []
+        while not self.end_of_stream():
+            hdr, next_position = self.read_var_header()
+            name = 'None' if hdr.name is None else hdr.name.decode('latin1')
+            shape = self._matrix_reader.shape_from_header(hdr)
+            info = mclass_info.get(hdr.mclass, 'unknown')
+            vars.append((name, shape, info))
+
+            self.mat_stream.seek(next_position)
+        return vars
+
+
+def arr_to_2d(arr, oned_as='row'):
+    ''' Make ``arr`` exactly two dimensional
+
+    If `arr` has more than 2 dimensions, raise a ValueError
+
+    Parameters
+    ----------
+    arr : array
+    oned_as : {'row', 'column'}, optional
+       Whether to reshape 1-D vectors as row vectors or column vectors.
+       See documentation for ``matdims`` for more detail
+
+    Returns
+    -------
+    arr2d : array
+       2-D version of the array
+    '''
+    dims = matdims(arr, oned_as)
+    if len(dims) > 2:
+        raise ValueError('Matlab 4 files cannot save arrays with more than '
+                         '2 dimensions')
+    return arr.reshape(dims)
+
+
+class VarWriter4:
+    def __init__(self, file_writer):
+        self.file_stream = file_writer.file_stream
+        self.oned_as = file_writer.oned_as
+
+    def write_bytes(self, arr):
+        self.file_stream.write(arr.tobytes(order='F'))
+
+    def write_string(self, s):
+        self.file_stream.write(s)
+
+    def write_header(self, name, shape, P=miDOUBLE, T=mxFULL_CLASS, imagf=0):
+        ''' Write header for given data options
+
+        Parameters
+        ----------
+        name : str
+            name of variable
+        shape : sequence
+            Shape of array as it will be read in matlab
+        P : int, optional
+            code for mat4 data type, one of ``miDOUBLE, miSINGLE, miINT32,
+            miINT16, miUINT16, miUINT8``
+        T : int, optional
+            code for mat4 matrix class, one of ``mxFULL_CLASS, mxCHAR_CLASS,
+            mxSPARSE_CLASS``
+        imagf : int, optional
+            flag indicating complex
+        '''
+        header = np.empty((), mdtypes_template['header'])
+        M = not SYS_LITTLE_ENDIAN
+        O = 0
+        header['mopt'] = (M * 1000 +
+                          O * 100 +
+                          P * 10 +
+                          T)
+        header['mrows'] = shape[0]
+        header['ncols'] = shape[1]
+        header['imagf'] = imagf
+        header['namlen'] = len(name) + 1
+        self.write_bytes(header)
+        data = name + '\0'
+        self.write_string(data.encode('latin1'))
+
+    def write(self, arr, name):
+        ''' Write matrix `arr`, with name `name`
+
+        Parameters
+        ----------
+        arr : array_like
+           array to write
+        name : str
+           name in matlab workspace
+        '''
+        # we need to catch sparse first, because np.asarray returns an
+        # an object array for scipy.sparse
+        if scipy.sparse.issparse(arr):
+            self.write_sparse(arr, name)
+            return
+        arr = np.asarray(arr)
+        dt = arr.dtype
+        if not dt.isnative:
+            arr = arr.astype(dt.newbyteorder('='))
+        dtt = dt.type
+        if dtt is np.object_:
+            raise TypeError('Cannot save object arrays in Mat4')
+        elif dtt is np.void:
+            raise TypeError('Cannot save void type arrays')
+        elif dtt in (np.unicode_, np.string_):
+            self.write_char(arr, name)
+            return
+        self.write_numeric(arr, name)
+
+    def write_numeric(self, arr, name):
+        arr = arr_to_2d(arr, self.oned_as)
+        imagf = arr.dtype.kind == 'c'
+        try:
+            P = np_to_mtypes[arr.dtype.str[1:]]
+        except KeyError:
+            if imagf:
+                arr = arr.astype('c128')
+            else:
+                arr = arr.astype('f8')
+            P = miDOUBLE
+        self.write_header(name,
+                          arr.shape,
+                          P=P,
+                          T=mxFULL_CLASS,
+                          imagf=imagf)
+        if imagf:
+            self.write_bytes(arr.real)
+            self.write_bytes(arr.imag)
+        else:
+            self.write_bytes(arr)
+
+    def write_char(self, arr, name):
+        arr = arr_to_chars(arr)
+        arr = arr_to_2d(arr, self.oned_as)
+        dims = arr.shape
+        self.write_header(
+            name,
+            dims,
+            P=miUINT8,
+            T=mxCHAR_CLASS)
+        if arr.dtype.kind == 'U':
+            # Recode unicode to latin1
+            n_chars = np.prod(dims)
+            st_arr = np.ndarray(shape=(),
+                                dtype=arr_dtype_number(arr, n_chars),
+                                buffer=arr)
+            st = st_arr.item().encode('latin-1')
+            arr = np.ndarray(shape=dims, dtype='S1', buffer=st)
+        self.write_bytes(arr)
+
+    def write_sparse(self, arr, name):
+        ''' Sparse matrices are 2-D
+
+        See docstring for VarReader4.read_sparse_array
+        '''
+        A = arr.tocoo()  # convert to sparse COO format (ijv)
+        imagf = A.dtype.kind == 'c'
+        ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8')
+        ijv[:-1,0] = A.row
+        ijv[:-1,1] = A.col
+        ijv[:-1,0:2] += 1  # 1 based indexing
+        if imagf:
+            ijv[:-1,2] = A.data.real
+            ijv[:-1,3] = A.data.imag
+        else:
+            ijv[:-1,2] = A.data
+        ijv[-1,0:2] = A.shape
+        self.write_header(
+            name,
+            ijv.shape,
+            P=miDOUBLE,
+            T=mxSPARSE_CLASS)
+        self.write_bytes(ijv)
+
+
+class MatFile4Writer:
+    ''' Class for writing matlab 4 format files '''
+    def __init__(self, file_stream, oned_as=None):
+        self.file_stream = file_stream
+        if oned_as is None:
+            oned_as = 'row'
+        self.oned_as = oned_as
+        self._matrix_writer = None
+
+    def put_variables(self, mdict, write_header=None):
+        ''' Write variables in `mdict` to stream
+
+        Parameters
+        ----------
+        mdict : mapping
+           mapping with method ``items`` return name, contents pairs
+           where ``name`` which will appeak in the matlab workspace in
+           file load, and ``contents`` is something writeable to a
+           matlab file, such as a NumPy array.
+        write_header : {None, True, False}
+           If True, then write the matlab file header before writing the
+           variables. If None (the default) then write the file header
+           if we are at position 0 in the stream. By setting False
+           here, and setting the stream position to the end of the file,
+           you can append variables to a matlab file
+        '''
+        # there is no header for a matlab 4 mat file, so we ignore the
+        # ``write_header`` input argument. It's there for compatibility
+        # with the matlab 5 version of this method
+        self._matrix_writer = VarWriter4(self)
+        for name, var in mdict.items():
+            self._matrix_writer.write(var, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5.py
new file mode 100644
index 00000000..9b8de188
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5.py
@@ -0,0 +1,892 @@
+''' Classes for read / write of matlab (TM) 5 files
+
+The matfile specification last found here:
+
+https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
+
+(as of December 5 2008)
+'''
+'''
+=================================
+ Note on functions and mat files
+=================================
+
+The document above does not give any hints as to the storage of matlab
+function handles, or anonymous function handles. I had, therefore, to
+guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
+``mxOPAQUE_CLASS`` by looking at example mat files.
+
+``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
+contain a struct matrix with a set pattern of fields. For anonymous
+functions, a sub-fields of one of these fields seems to contain the
+well-named ``mxOPAQUE_CLASS``. This seems to contain:
+
+* array flags as for any matlab matrix
+* 3 int8 strings
+* a matrix
+
+It seems that whenever the mat file contains a ``mxOPAQUE_CLASS``
+instance, there is also an un-named matrix (name == '') at the end of
+the mat file. I'll call this the ``__function_workspace__`` matrix.
+
+When I saved two anonymous functions in a mat file, or appended another
+anonymous function to the mat file, there was still only one
+``__function_workspace__`` un-named matrix at the end, but larger than
+that for a mat file with a single anonymous function, suggesting that
+the workspaces for the two functions had been merged.
+
+The ``__function_workspace__`` matrix appears to be of double class
+(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
+the format of a mini .mat file, without the first 124 bytes of the file
+header (the description and the subsystem_offset), but with the version
+U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
+presumably for 8 byte padding, and then a series of ``miMATRIX``
+entries, as in a standard mat file. The ``miMATRIX`` entries appear to
+be series of un-named (name == '') matrices, and may also contain arrays
+of this same mini-mat format.
+
+I guess that:
+
+* saving an anonymous function back to a mat file will need the
+  associated ``__function_workspace__`` matrix saved as well for the
+  anonymous function to work correctly.
+* appending to a mat file that has a ``__function_workspace__`` would
+  involve first pulling off this workspace, appending, checking whether
+  there were any more anonymous functions appended, and then somehow
+  merging the relevant workspaces, and saving at the end of the mat
+  file.
+
+The mat files I was playing with are in ``tests/data``:
+
+* sqr.mat
+* parabola.mat
+* some_functions.mat
+
+See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging
+script I was working with.
+
+'''
+
+# Small fragments of current code adapted from matfile.py by Heiko
+# Henkelmann; parts of the code for simplify_cells=True adapted from
+# http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.
+
+import os
+import time
+import sys
+import zlib
+
+from io import BytesIO
+
+import warnings
+
+import numpy as np
+
+import scipy.sparse
+
+from ._byteordercodes import native_code, swapped_code
+
+from ._miobase import (MatFileReader, docfiller, matdims, read_dtype,
+                      arr_to_chars, arr_dtype_number, MatWriteError,
+                      MatReadError, MatReadWarning)
+
+# Reader object for matlab 5 format variables
+from ._mio5_utils import VarReader5
+
+# Constants and helper objects
+from ._mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
+                          NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
+                          miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
+                          mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
+                          mxDOUBLE_CLASS, mclass_info, mat_struct)
+
+from ._streams import ZlibInputStream
+
+
+def _has_struct(elem):
+    """Determine if elem is an array and if first array item is a struct."""
+    return (isinstance(elem, np.ndarray) and (elem.size > 0) and
+            isinstance(elem[0], mat_struct))
+
+
+def _inspect_cell_array(ndarray):
+    """Construct lists from cell arrays (loaded as numpy ndarrays), recursing
+    into items if they contain mat_struct objects."""
+    elem_list = []
+    for sub_elem in ndarray:
+        if isinstance(sub_elem, mat_struct):
+            elem_list.append(_matstruct_to_dict(sub_elem))
+        elif _has_struct(sub_elem):
+            elem_list.append(_inspect_cell_array(sub_elem))
+        else:
+            elem_list.append(sub_elem)
+    return elem_list
+
+
+def _matstruct_to_dict(matobj):
+    """Construct nested dicts from mat_struct objects."""
+    d = {}
+    for f in matobj._fieldnames:
+        elem = matobj.__dict__[f]
+        if isinstance(elem, mat_struct):
+            d[f] = _matstruct_to_dict(elem)
+        elif _has_struct(elem):
+            d[f] = _inspect_cell_array(elem)
+        else:
+            d[f] = elem
+    return d
+
+
+def _simplify_cells(d):
+    """Convert mat objects in dict to nested dicts."""
+    for key in d:
+        if isinstance(d[key], mat_struct):
+            d[key] = _matstruct_to_dict(d[key])
+        elif _has_struct(d[key]):
+            d[key] = _inspect_cell_array(d[key])
+    return d
+
+
+class MatFile5Reader(MatFileReader):
+    ''' Reader for Mat 5 mat files
+    Adds the following attribute to base class
+
+    uint16_codec - char codec to use for uint16 char arrays
+        (defaults to system default codec)
+
+    Uses variable reader that has the following stardard interface (see
+    abstract class in ``miobase``::
+
+       __init__(self, file_reader)
+       read_header(self)
+       array_from_header(self)
+
+    and added interface::
+
+       set_stream(self, stream)
+       read_full_tag(self)
+
+    '''
+    @docfiller
+    def __init__(self,
+                 mat_stream,
+                 byte_order=None,
+                 mat_dtype=False,
+                 squeeze_me=False,
+                 chars_as_strings=True,
+                 matlab_compatible=False,
+                 struct_as_record=True,
+                 verify_compressed_data_integrity=True,
+                 uint16_codec=None,
+                 simplify_cells=False):
+        '''Initializer for matlab 5 file format reader
+
+    %(matstream_arg)s
+    %(load_args)s
+    %(struct_arg)s
+    uint16_codec : {None, string}
+        Set codec to use for uint16 char arrays (e.g., 'utf-8').
+        Use system default codec if None
+        '''
+        super().__init__(
+            mat_stream,
+            byte_order,
+            mat_dtype,
+            squeeze_me,
+            chars_as_strings,
+            matlab_compatible,
+            struct_as_record,
+            verify_compressed_data_integrity,
+            simplify_cells)
+        # Set uint16 codec
+        if not uint16_codec:
+            uint16_codec = sys.getdefaultencoding()
+        self.uint16_codec = uint16_codec
+        # placeholders for readers - see initialize_read method
+        self._file_reader = None
+        self._matrix_reader = None
+
+    def guess_byte_order(self):
+        ''' Guess byte order.
+        Sets stream pointer to 0'''
+        self.mat_stream.seek(126)
+        mi = self.mat_stream.read(2)
+        self.mat_stream.seek(0)
+        return mi == b'IM' and '<' or '>'
+
+    def read_file_header(self):
+        ''' Read in mat 5 file header '''
+        hdict = {}
+        hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
+        hdr = read_dtype(self.mat_stream, hdr_dtype)
+        hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
+        v_major = hdr['version'] >> 8
+        v_minor = hdr['version'] & 0xFF
+        hdict['__version__'] = '%d.%d' % (v_major, v_minor)
+        return hdict
+
+    def initialize_read(self):
+        ''' Run when beginning read of variables
+
+        Sets up readers from parameters in `self`
+        '''
+        # reader for top level stream. We need this extra top-level
+        # reader because we use the matrix_reader object to contain
+        # compressed matrices (so they have their own stream)
+        self._file_reader = VarReader5(self)
+        # reader for matrix streams
+        self._matrix_reader = VarReader5(self)
+
+    def read_var_header(self):
+        ''' Read header, return header, next position
+
+        Header has to define at least .name and .is_global
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+        header : object
+           object that can be passed to self.read_var_array, and that
+           has attributes .name and .is_global
+        next_position : int
+           position in stream of next variable
+        '''
+        mdtype, byte_count = self._file_reader.read_full_tag()
+        if not byte_count > 0:
+            raise ValueError("Did not read any bytes")
+        next_pos = self.mat_stream.tell() + byte_count
+        if mdtype == miCOMPRESSED:
+            # Make new stream from compressed data
+            stream = ZlibInputStream(self.mat_stream, byte_count)
+            self._matrix_reader.set_stream(stream)
+            check_stream_limit = self.verify_compressed_data_integrity
+            mdtype, byte_count = self._matrix_reader.read_full_tag()
+        else:
+            check_stream_limit = False
+            self._matrix_reader.set_stream(self.mat_stream)
+        if not mdtype == miMATRIX:
+            raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
+        header = self._matrix_reader.read_header(check_stream_limit)
+        return header, next_pos
+
+    def read_var_array(self, header, process=True):
+        ''' Read array, given `header`
+
+        Parameters
+        ----------
+        header : header object
+           object with fields defining variable header
+        process : {True, False} bool, optional
+           If True, apply recursive post-processing during loading of
+           array.
+
+        Returns
+        -------
+        arr : array
+           array with post-processing applied or not according to
+           `process`.
+        '''
+        return self._matrix_reader.array_from_header(header, process)
+
+    def get_variables(self, variable_names=None):
+        ''' get variables from stream as dictionary
+
+        variable_names   - optional list of variable names to get
+
+        If variable_names is None, then get all variables in file
+        '''
+        if isinstance(variable_names, str):
+            variable_names = [variable_names]
+        elif variable_names is not None:
+            variable_names = list(variable_names)
+
+        self.mat_stream.seek(0)
+        # Here we pass all the parameters in self to the reading objects
+        self.initialize_read()
+        mdict = self.read_file_header()
+        mdict['__globals__'] = []
+        while not self.end_of_stream():
+            hdr, next_position = self.read_var_header()
+            name = 'None' if hdr.name is None else hdr.name.decode('latin1')
+            if name in mdict:
+                warnings.warn('Duplicate variable name "%s" in stream'
+                              ' - replacing previous with new\n'
+                              'Consider mio5.varmats_from_mat to split '
+                              'file into single variable files' % name,
+                              MatReadWarning, stacklevel=2)
+            if name == '':
+                # can only be a matlab 7 function workspace
+                name = '__function_workspace__'
+                # We want to keep this raw because mat_dtype processing
+                # will break the format (uint8 as mxDOUBLE_CLASS)
+                process = False
+            else:
+                process = True
+            if variable_names is not None and name not in variable_names:
+                self.mat_stream.seek(next_position)
+                continue
+            try:
+                res = self.read_var_array(hdr, process)
+            except MatReadError as err:
+                warnings.warn(
+                    'Unreadable variable "%s", because "%s"' %
+                    (name, err),
+                    Warning, stacklevel=2)
+                res = "Read error: %s" % err
+            self.mat_stream.seek(next_position)
+            mdict[name] = res
+            if hdr.is_global:
+                mdict['__globals__'].append(name)
+            if variable_names is not None:
+                variable_names.remove(name)
+                if len(variable_names) == 0:
+                    break
+        if self.simplify_cells:
+            return _simplify_cells(mdict)
+        else:
+            return mdict
+
+    def list_variables(self):
+        ''' list variables from stream '''
+        self.mat_stream.seek(0)
+        # Here we pass all the parameters in self to the reading objects
+        self.initialize_read()
+        self.read_file_header()
+        vars = []
+        while not self.end_of_stream():
+            hdr, next_position = self.read_var_header()
+            name = 'None' if hdr.name is None else hdr.name.decode('latin1')
+            if name == '':
+                # can only be a matlab 7 function workspace
+                name = '__function_workspace__'
+
+            shape = self._matrix_reader.shape_from_header(hdr)
+            if hdr.is_logical:
+                info = 'logical'
+            else:
+                info = mclass_info.get(hdr.mclass, 'unknown')
+            vars.append((name, shape, info))
+
+            self.mat_stream.seek(next_position)
+        return vars
+
+
+def varmats_from_mat(file_obj):
+    """ Pull variables out of mat 5 file as a sequence of mat file objects
+
+    This can be useful with a difficult mat file, containing unreadable
+    variables. This routine pulls the variables out in raw form and puts them,
+    unread, back into a file stream for saving or reading. Another use is the
+    pathological case where there is more than one variable of the same name in
+    the file; this routine returns the duplicates, whereas the standard reader
+    will overwrite duplicates in the returned dictionary.
+
+    The file pointer in `file_obj` will be undefined. File pointers for the
+    returned file-like objects are set at 0.
+
+    Parameters
+    ----------
+    file_obj : file-like
+        file object containing mat file
+
+    Returns
+    -------
+    named_mats : list
+        list contains tuples of (name, BytesIO) where BytesIO is a file-like
+        object containing mat file contents as for a single variable. The
+        BytesIO contains a string with the original header and a single var. If
+        ``var_file_obj`` is an individual BytesIO instance, then save as a mat
+        file with something like ``open('test.mat',
+        'wb').write(var_file_obj.read())``
+
+    Examples
+    --------
+    >>> import scipy.io
+
+    BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for
+    Python < 3.
+
+    >>> mat_fileobj = BytesIO()
+    >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
+    >>> varmats = varmats_from_mat(mat_fileobj)
+    >>> sorted([name for name, str_obj in varmats])
+    ['a', 'b']
+    """
+    rdr = MatFile5Reader(file_obj)
+    file_obj.seek(0)
+    # Raw read of top-level file header
+    hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
+    raw_hdr = file_obj.read(hdr_len)
+    # Initialize variable reading
+    file_obj.seek(0)
+    rdr.initialize_read()
+    rdr.read_file_header()
+    next_position = file_obj.tell()
+    named_mats = []
+    while not rdr.end_of_stream():
+        start_position = next_position
+        hdr, next_position = rdr.read_var_header()
+        name = 'None' if hdr.name is None else hdr.name.decode('latin1')
+        # Read raw variable string
+        file_obj.seek(start_position)
+        byte_count = next_position - start_position
+        var_str = file_obj.read(byte_count)
+        # write to stringio object
+        out_obj = BytesIO()
+        out_obj.write(raw_hdr)
+        out_obj.write(var_str)
+        out_obj.seek(0)
+        named_mats.append((name, out_obj))
+    return named_mats
+
+
+class EmptyStructMarker:
+    """ Class to indicate presence of empty matlab struct on output """
+
+
+def to_writeable(source):
+    ''' Convert input object ``source`` to something we can write
+
+    Parameters
+    ----------
+    source : object
+
+    Returns
+    -------
+    arr : None or ndarray or EmptyStructMarker
+        If `source` cannot be converted to something we can write to a matfile,
+        return None.  If `source` is equivalent to an empty dictionary, return
+        ``EmptyStructMarker``.  Otherwise return `source` converted to an
+        ndarray with contents for writing to matfile.
+    '''
+    if isinstance(source, np.ndarray):
+        return source
+    if source is None:
+        return None
+    # Objects that implement mappings
+    is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
+                  hasattr(source, 'items'))
+    # Objects that don't implement mappings, but do have dicts
+    if isinstance(source, np.generic):
+        # NumPy scalars are never mappings (PyPy issue workaround)
+        pass
+    elif not is_mapping and hasattr(source, '__dict__'):
+        source = dict((key, value) for key, value in source.__dict__.items()
+                      if not key.startswith('_'))
+        is_mapping = True
+    if is_mapping:
+        dtype = []
+        values = []
+        for field, value in source.items():
+            if (isinstance(field, str) and
+                    field[0] not in '_0123456789'):
+                dtype.append((str(field), object))
+                values.append(value)
+        if dtype:
+            return np.array([tuple(values)], dtype)
+        else:
+            return EmptyStructMarker
+    # Next try and convert to an array
+    narr = np.asanyarray(source)
+    if narr.dtype.type in (object, np.object_) and \
+       narr.shape == () and narr == source:
+        # No interesting conversion possible
+        return None
+    return narr
+
+
+# Native byte ordered dtypes for convenience for writers
+NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
+NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
+NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
+NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
+
+
+class VarWriter5:
+    ''' Generic matlab matrix writing class '''
+    mat_tag = np.zeros((), NDT_TAG_FULL)
+    mat_tag['mdtype'] = miMATRIX
+
+    def __init__(self, file_writer):
+        self.file_stream = file_writer.file_stream
+        self.unicode_strings = file_writer.unicode_strings
+        self.long_field_names = file_writer.long_field_names
+        self.oned_as = file_writer.oned_as
+        # These are used for top level writes, and unset after
+        self._var_name = None
+        self._var_is_global = False
+
+    def write_bytes(self, arr):
+        self.file_stream.write(arr.tobytes(order='F'))
+
+    def write_string(self, s):
+        self.file_stream.write(s)
+
+    def write_element(self, arr, mdtype=None):
+        ''' write tag and data '''
+        if mdtype is None:
+            mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
+        # Array needs to be in native byte order
+        if arr.dtype.byteorder == swapped_code:
+            arr = arr.byteswap().newbyteorder()
+        byte_count = arr.size*arr.itemsize
+        if byte_count <= 4:
+            self.write_smalldata_element(arr, mdtype, byte_count)
+        else:
+            self.write_regular_element(arr, mdtype, byte_count)
+
+    def write_smalldata_element(self, arr, mdtype, byte_count):
+        # write tag with embedded data
+        tag = np.zeros((), NDT_TAG_SMALL)
+        tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
+        # if arr.tobytes is < 4, the element will be zero-padded as needed.
+        tag['data'] = arr.tobytes(order='F')
+        self.write_bytes(tag)
+
+    def write_regular_element(self, arr, mdtype, byte_count):
+        # write tag, data
+        tag = np.zeros((), NDT_TAG_FULL)
+        tag['mdtype'] = mdtype
+        tag['byte_count'] = byte_count
+        self.write_bytes(tag)
+        self.write_bytes(arr)
+        # pad to next 64-bit boundary
+        bc_mod_8 = byte_count % 8
+        if bc_mod_8:
+            self.file_stream.write(b'\x00' * (8-bc_mod_8))
+
+    def write_header(self,
+                     shape,
+                     mclass,
+                     is_complex=False,
+                     is_logical=False,
+                     nzmax=0):
+        ''' Write header for given data options
+        shape : sequence
+           array shape
+        mclass      - mat5 matrix class
+        is_complex  - True if matrix is complex
+        is_logical  - True if matrix is logical
+        nzmax        - max non zero elements for sparse arrays
+
+        We get the name and the global flag from the object, and reset
+        them to defaults after we've used them
+        '''
+        # get name and is_global from one-shot object store
+        name = self._var_name
+        is_global = self._var_is_global
+        # initialize the top-level matrix tag, store position
+        self._mat_tag_pos = self.file_stream.tell()
+        self.write_bytes(self.mat_tag)
+        # write array flags (complex, global, logical, class, nzmax)
+        af = np.zeros((), NDT_ARRAY_FLAGS)
+        af['data_type'] = miUINT32
+        af['byte_count'] = 8
+        flags = is_complex << 3 | is_global << 2 | is_logical << 1
+        af['flags_class'] = mclass | flags << 8
+        af['nzmax'] = nzmax
+        self.write_bytes(af)
+        # shape
+        self.write_element(np.array(shape, dtype='i4'))
+        # write name
+        name = np.asarray(name)
+        if name == '':  # empty string zero-terminated
+            self.write_smalldata_element(name, miINT8, 0)
+        else:
+            self.write_element(name, miINT8)
+        # reset the one-shot store to defaults
+        self._var_name = ''
+        self._var_is_global = False
+
+    def update_matrix_tag(self, start_pos):
+        curr_pos = self.file_stream.tell()
+        self.file_stream.seek(start_pos)
+        byte_count = curr_pos - start_pos - 8
+        if byte_count >= 2**32:
+            raise MatWriteError("Matrix too large to save with Matlab "
+                                "5 format")
+        self.mat_tag['byte_count'] = byte_count
+        self.write_bytes(self.mat_tag)
+        self.file_stream.seek(curr_pos)
+
+    def write_top(self, arr, name, is_global):
+        """ Write variable at top level of mat file
+
+        Parameters
+        ----------
+        arr : array_like
+            array-like object to create writer for
+        name : str, optional
+            name as it will appear in matlab workspace
+            default is empty string
+        is_global : {False, True}, optional
+            whether variable will be global on load into matlab
+        """
+        # these are set before the top-level header write, and unset at
+        # the end of the same write, because they do not apply for lower levels
+        self._var_is_global = is_global
+        self._var_name = name
+        # write the header and data
+        self.write(arr)
+
+    def write(self, arr):
+        ''' Write `arr` to stream at top and sub levels
+
+        Parameters
+        ----------
+        arr : array_like
+            array-like object to create writer for
+        '''
+        # store position, so we can update the matrix tag
+        mat_tag_pos = self.file_stream.tell()
+        # First check if these are sparse
+        if scipy.sparse.issparse(arr):
+            self.write_sparse(arr)
+            self.update_matrix_tag(mat_tag_pos)
+            return
+        # Try to convert things that aren't arrays
+        narr = to_writeable(arr)
+        if narr is None:
+            raise TypeError('Could not convert %s (type %s) to array'
+                            % (arr, type(arr)))
+        if isinstance(narr, MatlabObject):
+            self.write_object(narr)
+        elif isinstance(narr, MatlabFunction):
+            raise MatWriteError('Cannot write matlab functions')
+        elif narr is EmptyStructMarker:  # empty struct array
+            self.write_empty_struct()
+        elif narr.dtype.fields:  # struct array
+            self.write_struct(narr)
+        elif narr.dtype.hasobject:  # cell array
+            self.write_cells(narr)
+        elif narr.dtype.kind in ('U', 'S'):
+            if self.unicode_strings:
+                codec = 'UTF8'
+            else:
+                codec = 'ascii'
+            self.write_char(narr, codec)
+        else:
+            self.write_numeric(narr)
+        self.update_matrix_tag(mat_tag_pos)
+
+    def write_numeric(self, arr):
+        imagf = arr.dtype.kind == 'c'
+        logif = arr.dtype.kind == 'b'
+        try:
+            mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
+        except KeyError:
+            # No matching matlab type, probably complex256 / float128 / float96
+            # Cast data to complex128 / float64.
+            if imagf:
+                arr = arr.astype('c128')
+            elif logif:
+                arr = arr.astype('i1')  # Should only contain 0/1
+            else:
+                arr = arr.astype('f8')
+            mclass = mxDOUBLE_CLASS
+        self.write_header(matdims(arr, self.oned_as),
+                          mclass,
+                          is_complex=imagf,
+                          is_logical=logif)
+        if imagf:
+            self.write_element(arr.real)
+            self.write_element(arr.imag)
+        else:
+            self.write_element(arr)
+
+    def write_char(self, arr, codec='ascii'):
+        ''' Write string array `arr` with given `codec`
+        '''
+        if arr.size == 0 or np.all(arr == ''):
+            # This an empty string array or a string array containing
+            # only empty strings. Matlab cannot distinguish between a
+            # string array that is empty, and a string array containing
+            # only empty strings, because it stores strings as arrays of
+            # char. There is no way of having an array of char that is
+            # not empty, but contains an empty string. We have to
+            # special-case the array-with-empty-strings because even
+            # empty strings have zero padding, which would otherwise
+            # appear in matlab as a string with a space.
+            shape = (0,) * np.max([arr.ndim, 2])
+            self.write_header(shape, mxCHAR_CLASS)
+            self.write_smalldata_element(arr, miUTF8, 0)
+            return
+        # non-empty string.
+        #
+        # Convert to char array
+        arr = arr_to_chars(arr)
+        # We have to write the shape directly, because we are going
+        # recode the characters, and the resulting stream of chars
+        # may have a different length
+        shape = arr.shape
+        self.write_header(shape, mxCHAR_CLASS)
+        if arr.dtype.kind == 'U' and arr.size:
+            # Make one long string from all the characters. We need to
+            # transpose here, because we're flattening the array, before
+            # we write the bytes. The bytes have to be written in
+            # Fortran order.
+            n_chars = np.prod(shape)
+            st_arr = np.ndarray(shape=(),
+                                dtype=arr_dtype_number(arr, n_chars),
+                                buffer=arr.T.copy())  # Fortran order
+            # Recode with codec to give byte string
+            st = st_arr.item().encode(codec)
+            # Reconstruct as 1-D byte array
+            arr = np.ndarray(shape=(len(st),),
+                             dtype='S1',
+                             buffer=st)
+        self.write_element(arr, mdtype=miUTF8)
+
+    def write_sparse(self, arr):
+        ''' Sparse matrices are 2D
+        '''
+        A = arr.tocsc()  # convert to sparse CSC format
+        A.sort_indices()     # MATLAB expects sorted row indices
+        is_complex = (A.dtype.kind == 'c')
+        is_logical = (A.dtype.kind == 'b')
+        nz = A.nnz
+        self.write_header(matdims(arr, self.oned_as),
+                          mxSPARSE_CLASS,
+                          is_complex=is_complex,
+                          is_logical=is_logical,
+                          # matlab won't load file with 0 nzmax
+                          nzmax=1 if nz == 0 else nz)
+        self.write_element(A.indices.astype('i4'))
+        self.write_element(A.indptr.astype('i4'))
+        self.write_element(A.data.real)
+        if is_complex:
+            self.write_element(A.data.imag)
+
+    def write_cells(self, arr):
+        self.write_header(matdims(arr, self.oned_as),
+                          mxCELL_CLASS)
+        # loop over data, column major
+        A = np.atleast_2d(arr).flatten('F')
+        for el in A:
+            self.write(el)
+
+    def write_empty_struct(self):
+        self.write_header((1, 1), mxSTRUCT_CLASS)
+        # max field name length set to 1 in an example matlab struct
+        self.write_element(np.array(1, dtype=np.int32))
+        # Field names element is empty
+        self.write_element(np.array([], dtype=np.int8))
+
+    def write_struct(self, arr):
+        self.write_header(matdims(arr, self.oned_as),
+                          mxSTRUCT_CLASS)
+        self._write_items(arr)
+
+    def _write_items(self, arr):
+        # write fieldnames
+        fieldnames = [f[0] for f in arr.dtype.descr]
+        length = max([len(fieldname) for fieldname in fieldnames])+1
+        max_length = (self.long_field_names and 64) or 32
+        if length > max_length:
+            raise ValueError("Field names are restricted to %d characters" %
+                             (max_length-1))
+        self.write_element(np.array([length], dtype='i4'))
+        self.write_element(
+            np.array(fieldnames, dtype='S%d' % (length)),
+            mdtype=miINT8)
+        A = np.atleast_2d(arr).flatten('F')
+        for el in A:
+            for f in fieldnames:
+                self.write(el[f])
+
+    def write_object(self, arr):
+        '''Same as writing structs, except different mx class, and extra
+        classname element after header
+        '''
+        self.write_header(matdims(arr, self.oned_as),
+                          mxOBJECT_CLASS)
+        self.write_element(np.array(arr.classname, dtype='S'),
+                           mdtype=miINT8)
+        self._write_items(arr)
+
+
+class MatFile5Writer:
+    ''' Class for writing mat5 files '''
+
+    @docfiller
+    def __init__(self, file_stream,
+                 do_compression=False,
+                 unicode_strings=False,
+                 global_vars=None,
+                 long_field_names=False,
+                 oned_as='row'):
+        ''' Initialize writer for matlab 5 format files
+
+        Parameters
+        ----------
+        %(do_compression)s
+        %(unicode_strings)s
+        global_vars : None or sequence of strings, optional
+            Names of variables to be marked as global for matlab
+        %(long_fields)s
+        %(oned_as)s
+        '''
+        self.file_stream = file_stream
+        self.do_compression = do_compression
+        self.unicode_strings = unicode_strings
+        if global_vars:
+            self.global_vars = global_vars
+        else:
+            self.global_vars = []
+        self.long_field_names = long_field_names
+        self.oned_as = oned_as
+        self._matrix_writer = None
+
+    def write_file_header(self):
+        # write header
+        hdr = np.zeros((), NDT_FILE_HDR)
+        hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
+            % (os.name,time.asctime())
+        hdr['version'] = 0x0100
+        hdr['endian_test'] = np.ndarray(shape=(),
+                                      dtype='S2',
+                                      buffer=np.uint16(0x4d49))
+        self.file_stream.write(hdr.tobytes())
+
+    def put_variables(self, mdict, write_header=None):
+        ''' Write variables in `mdict` to stream
+
+        Parameters
+        ----------
+        mdict : mapping
+           mapping with method ``items`` returns name, contents pairs where
+           ``name`` which will appear in the matlab workspace in file load, and
+           ``contents`` is something writeable to a matlab file, such as a NumPy
+           array.
+        write_header : {None, True, False}, optional
+           If True, then write the matlab file header before writing the
+           variables. If None (the default) then write the file header
+           if we are at position 0 in the stream. By setting False
+           here, and setting the stream position to the end of the file,
+           you can append variables to a matlab file
+        '''
+        # write header if requested, or None and start of file
+        if write_header is None:
+            write_header = self.file_stream.tell() == 0
+        if write_header:
+            self.write_file_header()
+        self._matrix_writer = VarWriter5(self)
+        for name, var in mdict.items():
+            if name[0] == '_':
+                continue
+            is_global = name in self.global_vars
+            if self.do_compression:
+                stream = BytesIO()
+                self._matrix_writer.file_stream = stream
+                self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
+                out_str = zlib.compress(stream.getvalue())
+                tag = np.empty((), NDT_TAG_FULL)
+                tag['mdtype'] = miCOMPRESSED
+                tag['byte_count'] = len(out_str)
+                self.file_stream.write(tag.tobytes())
+                self.file_stream.write(out_str)
+            else:  # not compressing
+                self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5_params.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5_params.py
new file mode 100644
index 00000000..db9e5a61
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_mio5_params.py
@@ -0,0 +1,280 @@
+''' Constants and classes for matlab 5 read and write
+
+See also mio5_utils.pyx where these same constants arise as c enums.
+
+If you make changes in this file, don't forget to change mio5_utils.pyx
+'''
+import numpy as np
+
+from ._miobase import convert_dtypes
+
+
+__all__ = [
+    'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque',
+    'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template',
+    'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template',
+    'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8',
+    'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8',
+    'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS',
+    'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS',
+    'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS',
+    'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS',
+    'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS',
+    'mxUINT64_CLASS', 'mxUINT8_CLASS'
+]
+miINT8 = 1
+miUINT8 = 2
+miINT16 = 3
+miUINT16 = 4
+miINT32 = 5
+miUINT32 = 6
+miSINGLE = 7
+miDOUBLE = 9
+miINT64 = 12
+miUINT64 = 13
+miMATRIX = 14
+miCOMPRESSED = 15
+miUTF8 = 16
+miUTF16 = 17
+miUTF32 = 18
+
+mxCELL_CLASS = 1
+mxSTRUCT_CLASS = 2
+# The March 2008 edition of "Matlab 7 MAT-File Format" says that
+# mxOBJECT_CLASS = 3, whereas matrix.h says that mxLOGICAL = 3.
+# Matlab 2008a appears to save logicals as type 9, so we assume that
+# the document is correct. See type 18, below.
+mxOBJECT_CLASS = 3
+mxCHAR_CLASS = 4
+mxSPARSE_CLASS = 5
+mxDOUBLE_CLASS = 6
+mxSINGLE_CLASS = 7
+mxINT8_CLASS = 8
+mxUINT8_CLASS = 9
+mxINT16_CLASS = 10
+mxUINT16_CLASS = 11
+mxINT32_CLASS = 12
+mxUINT32_CLASS = 13
+# The following are not in the March 2008 edition of "Matlab 7
+# MAT-File Format," but were guessed from matrix.h.
+mxINT64_CLASS = 14
+mxUINT64_CLASS = 15
+mxFUNCTION_CLASS = 16
+# Not doing anything with these at the moment.
+mxOPAQUE_CLASS = 17  # This appears to be a function workspace
+# Thread 'saving/loading symbol table of annymous functions', octave-maintainers, April-May 2007
+# https://lists.gnu.org/archive/html/octave-maintainers/2007-04/msg00031.html
+# https://lists.gnu.org/archive/html/octave-maintainers/2007-05/msg00032.html
+# (Was/Deprecated: https://www-old.cae.wisc.edu/pipermail/octave-maintainers/2007-May/002824.html)
+mxOBJECT_CLASS_FROM_MATRIX_H = 18
+
+mdtypes_template = {
+    miINT8: 'i1',
+    miUINT8: 'u1',
+    miINT16: 'i2',
+    miUINT16: 'u2',
+    miINT32: 'i4',
+    miUINT32: 'u4',
+    miSINGLE: 'f4',
+    miDOUBLE: 'f8',
+    miINT64: 'i8',
+    miUINT64: 'u8',
+    miUTF8: 'u1',
+    miUTF16: 'u2',
+    miUTF32: 'u4',
+    'file_header': [('description', 'S116'),
+                    ('subsystem_offset', 'i8'),
+                    ('version', 'u2'),
+                    ('endian_test', 'S2')],
+    'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')],
+    'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')],
+    'array_flags': [('data_type', 'u4'),
+                    ('byte_count', 'u4'),
+                    ('flags_class','u4'),
+                    ('nzmax', 'u4')],
+    'U1': 'U1',
+    }
+
+mclass_dtypes_template = {
+    mxINT8_CLASS: 'i1',
+    mxUINT8_CLASS: 'u1',
+    mxINT16_CLASS: 'i2',
+    mxUINT16_CLASS: 'u2',
+    mxINT32_CLASS: 'i4',
+    mxUINT32_CLASS: 'u4',
+    mxINT64_CLASS: 'i8',
+    mxUINT64_CLASS: 'u8',
+    mxSINGLE_CLASS: 'f4',
+    mxDOUBLE_CLASS: 'f8',
+    }
+
+mclass_info = {
+    mxINT8_CLASS: 'int8',
+    mxUINT8_CLASS: 'uint8',
+    mxINT16_CLASS: 'int16',
+    mxUINT16_CLASS: 'uint16',
+    mxINT32_CLASS: 'int32',
+    mxUINT32_CLASS: 'uint32',
+    mxINT64_CLASS: 'int64',
+    mxUINT64_CLASS: 'uint64',
+    mxSINGLE_CLASS: 'single',
+    mxDOUBLE_CLASS: 'double',
+    mxCELL_CLASS: 'cell',
+    mxSTRUCT_CLASS: 'struct',
+    mxOBJECT_CLASS: 'object',
+    mxCHAR_CLASS: 'char',
+    mxSPARSE_CLASS: 'sparse',
+    mxFUNCTION_CLASS: 'function',
+    mxOPAQUE_CLASS: 'opaque',
+    }
+
+NP_TO_MTYPES = {
+    'f8': miDOUBLE,
+    'c32': miDOUBLE,
+    'c24': miDOUBLE,
+    'c16': miDOUBLE,
+    'f4': miSINGLE,
+    'c8': miSINGLE,
+    'i8': miINT64,
+    'i4': miINT32,
+    'i2': miINT16,
+    'i1': miINT8,
+    'u8': miUINT64,
+    'u4': miUINT32,
+    'u2': miUINT16,
+    'u1': miUINT8,
+    'S1': miUINT8,
+    'U1': miUTF16,
+    'b1': miUINT8,  # not standard but seems MATLAB uses this (gh-4022)
+    }
+
+
+NP_TO_MXTYPES = {
+    'f8': mxDOUBLE_CLASS,
+    'c32': mxDOUBLE_CLASS,
+    'c24': mxDOUBLE_CLASS,
+    'c16': mxDOUBLE_CLASS,
+    'f4': mxSINGLE_CLASS,
+    'c8': mxSINGLE_CLASS,
+    'i8': mxINT64_CLASS,
+    'i4': mxINT32_CLASS,
+    'i2': mxINT16_CLASS,
+    'i1': mxINT8_CLASS,
+    'u8': mxUINT64_CLASS,
+    'u4': mxUINT32_CLASS,
+    'u2': mxUINT16_CLASS,
+    'u1': mxUINT8_CLASS,
+    'S1': mxUINT8_CLASS,
+    'b1': mxUINT8_CLASS,  # not standard but seems MATLAB uses this
+    }
+
+''' Before release v7.1 (release 14) matlab (TM) used the system
+default character encoding scheme padded out to 16-bits. Release 14
+and later use Unicode. When saving character data, R14 checks if it
+can be encoded in 7-bit ascii, and saves in that format if so.'''
+
+codecs_template = {
+    miUTF8: {'codec': 'utf_8', 'width': 1},
+    miUTF16: {'codec': 'utf_16', 'width': 2},
+    miUTF32: {'codec': 'utf_32','width': 4},
+    }
+
+
+def _convert_codecs(template, byte_order):
+    ''' Convert codec template mapping to byte order
+
+    Set codecs not on this system to None
+
+    Parameters
+    ----------
+    template : mapping
+       key, value are respectively codec name, and root name for codec
+       (without byte order suffix)
+    byte_order : {'<', '>'}
+       code for little or big endian
+
+    Returns
+    -------
+    codecs : dict
+       key, value are name, codec (as in .encode(codec))
+    '''
+    codecs = {}
+    postfix = byte_order == '<' and '_le' or '_be'
+    for k, v in template.items():
+        codec = v['codec']
+        try:
+            " ".encode(codec)
+        except LookupError:
+            codecs[k] = None
+            continue
+        if v['width'] > 1:
+            codec += postfix
+        codecs[k] = codec
+    return codecs.copy()
+
+
+MDTYPES = {}
+for _bytecode in '<>':
+    _def = {'dtypes': convert_dtypes(mdtypes_template, _bytecode),
+            'classes': convert_dtypes(mclass_dtypes_template, _bytecode),
+            'codecs': _convert_codecs(codecs_template, _bytecode)}
+    MDTYPES[_bytecode] = _def
+
+
+class mat_struct:
+    """Placeholder for holding read data from structs.
+
+    We use instances of this class when the user passes False as a value to the
+    ``struct_as_record`` parameter of the :func:`scipy.io.loadmat` function.
+    """
+    pass
+
+
+class MatlabObject(np.ndarray):
+    """Subclass of ndarray to signal this is a matlab object.
+
+    This is a simple subclass of :class:`numpy.ndarray` meant to be used
+    by :func:`scipy.io.loadmat` and should not be instantiated directly.
+    """
+
+    def __new__(cls, input_array, classname=None):
+        # Input array is an already formed ndarray instance
+        # We first cast to be our class type
+        obj = np.asarray(input_array).view(cls)
+        # add the new attribute to the created instance
+        obj.classname = classname
+        # Finally, we must return the newly created object:
+        return obj
+
+    def __array_finalize__(self,obj):
+        # reset the attribute from passed original object
+        self.classname = getattr(obj, 'classname', None)
+        # We do not need to return anything
+
+
+class MatlabFunction(np.ndarray):
+    """Subclass for a MATLAB function.
+
+    This is a simple subclass of :class:`numpy.ndarray` meant to be used
+    by :func:`scipy.io.loadmat` and should not be directly instantiated.
+    """
+
+    def __new__(cls, input_array):
+        obj = np.asarray(input_array).view(cls)
+        return obj
+
+
+class MatlabOpaque(np.ndarray):
+    """Subclass for a MATLAB opaque matrix.
+
+    This is a simple subclass of :class:`numpy.ndarray` meant to be used
+    by :func:`scipy.io.loadmat` and should not be directly instantiated.
+    """
+
+    def __new__(cls, input_array):
+        obj = np.asarray(input_array).view(cls)
+        return obj
+
+
+OPAQUE_DTYPE = np.dtype(
+    [('s0', 'O'), ('s1', 'O'), ('s2', 'O'), ('arr', 'O')])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_miobase.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_miobase.py
new file mode 100644
index 00000000..a10ab9b5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/_miobase.py
@@ -0,0 +1,429 @@
+# Authors: Travis Oliphant, Matthew Brett
+
+"""
+Base classes for MATLAB file stream reading.
+
+MATLAB is a registered trademark of the Mathworks inc.
+"""
+import operator
+import functools
+
+import numpy as np
+from scipy._lib import doccer
+
+from . import _byteordercodes as boc
+
+__all__ = [
+    'MatFileReader', 'MatReadError', 'MatReadWarning',
+    'MatVarReader', 'MatWriteError', 'arr_dtype_number',
+    'arr_to_chars', 'convert_dtypes', 'doc_dict',
+    'docfiller', 'get_matfile_version',
+    'matdims', 'read_dtype'
+]
+
+class MatReadError(Exception):
+    """Exception indicating a read issue."""
+
+
+class MatWriteError(Exception):
+    """Exception indicating a write issue."""
+
+
+class MatReadWarning(UserWarning):
+    """Warning class for read issues."""
+
+
+doc_dict = \
+    {'file_arg':
+         '''file_name : str
+   Name of the mat file (do not need .mat extension if
+   appendmat==True) Can also pass open file-like object.''',
+     'append_arg':
+         '''appendmat : bool, optional
+   True to append the .mat extension to the end of the given
+   filename, if not already present. Default is True.''',
+     'load_args':
+         '''byte_order : str or None, optional
+   None by default, implying byte order guessed from mat
+   file. Otherwise can be one of ('native', '=', 'little', '<',
+   'BIG', '>').
+mat_dtype : bool, optional
+   If True, return arrays in same dtype as would be loaded into
+   MATLAB (instead of the dtype with which they are saved).
+squeeze_me : bool, optional
+   Whether to squeeze unit matrix dimensions or not.
+chars_as_strings : bool, optional
+   Whether to convert char arrays to string arrays.
+matlab_compatible : bool, optional
+   Returns matrices as would be loaded by MATLAB (implies
+   squeeze_me=False, chars_as_strings=False, mat_dtype=True,
+   struct_as_record=True).''',
+     'struct_arg':
+         '''struct_as_record : bool, optional
+   Whether to load MATLAB structs as NumPy record arrays, or as
+   old-style NumPy arrays with dtype=object. Setting this flag to
+   False replicates the behavior of SciPy version 0.7.x (returning
+   numpy object arrays). The default setting is True, because it
+   allows easier round-trip load and save of MATLAB files.''',
+     'matstream_arg':
+         '''mat_stream : file-like
+   Object with file API, open for reading.''',
+     'long_fields':
+         '''long_field_names : bool, optional
+   * False - maximum field name length in a structure is 31 characters
+     which is the documented maximum length. This is the default.
+   * True - maximum field name length in a structure is 63 characters
+     which works for MATLAB 7.6''',
+     'do_compression':
+         '''do_compression : bool, optional
+   Whether to compress matrices on write. Default is False.''',
+     'oned_as':
+         '''oned_as : {'row', 'column'}, optional
+   If 'column', write 1-D NumPy arrays as column vectors.
+   If 'row', write 1D NumPy arrays as row vectors.''',
+     'unicode_strings':
+         '''unicode_strings : bool, optional
+   If True, write strings as Unicode, else MATLAB usual encoding.'''}
+
+docfiller = doccer.filldoc(doc_dict)
+
+'''
+
+ Note on architecture
+======================
+
+There are three sets of parameters relevant for reading files. The
+first are *file read parameters* - containing options that are common
+for reading the whole file, and therefore every variable within that
+file. At the moment these are:
+
+* mat_stream
+* dtypes (derived from byte code)
+* byte_order
+* chars_as_strings
+* squeeze_me
+* struct_as_record (MATLAB 5 files)
+* class_dtypes (derived from order code, MATLAB 5 files)
+* codecs (MATLAB 5 files)
+* uint16_codec (MATLAB 5 files)
+
+Another set of parameters are those that apply only to the current
+variable being read - the *header*:
+
+* header related variables (different for v4 and v5 mat files)
+* is_complex
+* mclass
+* var_stream
+
+With the header, we need ``next_position`` to tell us where the next
+variable in the stream is.
+
+Then, for each element in a matrix, there can be *element read
+parameters*. An element is, for example, one element in a MATLAB cell
+array. At the moment, these are:
+
+* mat_dtype
+
+The file-reading object contains the *file read parameters*. The
+*header* is passed around as a data object, or may be read and discarded
+in a single function. The *element read parameters* - the mat_dtype in
+this instance, is passed into a general post-processing function - see
+``mio_utils`` for details.
+'''
+
+
+def convert_dtypes(dtype_template, order_code):
+    ''' Convert dtypes in mapping to given order
+
+    Parameters
+    ----------
+    dtype_template : mapping
+       mapping with values returning numpy dtype from ``np.dtype(val)``
+    order_code : str
+       an order code suitable for using in ``dtype.newbyteorder()``
+
+    Returns
+    -------
+    dtypes : mapping
+       mapping where values have been replaced by
+       ``np.dtype(val).newbyteorder(order_code)``
+
+    '''
+    dtypes = dtype_template.copy()
+    for k in dtypes:
+        dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code)
+    return dtypes
+
+
+def read_dtype(mat_stream, a_dtype):
+    """
+    Generic get of byte stream data of known type
+
+    Parameters
+    ----------
+    mat_stream : file_like object
+        MATLAB (tm) mat file stream
+    a_dtype : dtype
+        dtype of array to read. `a_dtype` is assumed to be correct
+        endianness.
+
+    Returns
+    -------
+    arr : ndarray
+        Array of dtype `a_dtype` read from stream.
+
+    """
+    num_bytes = a_dtype.itemsize
+    arr = np.ndarray(shape=(),
+                     dtype=a_dtype,
+                     buffer=mat_stream.read(num_bytes),
+                     order='F')
+    return arr
+
+
+def matfile_version(file_name, *, appendmat=True):
+    """
+    Return major, minor tuple depending on apparent mat file type
+
+    Where:
+
+     #. 0,x -> version 4 format mat files
+     #. 1,x -> version 5 format mat files
+     #. 2,x -> version 7.3 format mat files (HDF format)
+
+    Parameters
+    ----------
+    file_name : str
+       Name of the mat file (do not need .mat extension if
+       appendmat==True). Can also pass open file-like object.
+    appendmat : bool, optional
+       True to append the .mat extension to the end of the given
+       filename, if not already present. Default is True.
+
+    Returns
+    -------
+    major_version : {0, 1, 2}
+        major MATLAB File format version
+    minor_version : int
+        minor MATLAB file format version
+
+    Raises
+    ------
+    MatReadError
+        If the file is empty.
+    ValueError
+        The matfile version is unknown.
+
+    Notes
+    -----
+    Has the side effect of setting the file read pointer to 0
+    """
+    from ._mio import _open_file_context
+    with _open_file_context(file_name, appendmat=appendmat) as fileobj:
+        return _get_matfile_version(fileobj)
+
+
+get_matfile_version = matfile_version
+
+
+def _get_matfile_version(fileobj):
+    # Mat4 files have a zero somewhere in first 4 bytes
+    fileobj.seek(0)
+    mopt_bytes = fileobj.read(4)
+    if len(mopt_bytes) == 0:
+        raise MatReadError("Mat file appears to be empty")
+    mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes)
+    if 0 in mopt_ints:
+        fileobj.seek(0)
+        return (0,0)
+    # For 5 format or 7.3 format we need to read an integer in the
+    # header. Bytes 124 through 128 contain a version integer and an
+    # endian test string
+    fileobj.seek(124)
+    tst_str = fileobj.read(4)
+    fileobj.seek(0)
+    maj_ind = int(tst_str[2] == b'I'[0])
+    maj_val = int(tst_str[maj_ind])
+    min_val = int(tst_str[1 - maj_ind])
+    ret = (maj_val, min_val)
+    if maj_val in (1, 2):
+        return ret
+    raise ValueError('Unknown mat file type, version %s, %s' % ret)
+
+
+def matdims(arr, oned_as='column'):
+    """
+    Determine equivalent MATLAB dimensions for given array
+
+    Parameters
+    ----------
+    arr : ndarray
+        Input array
+    oned_as : {'column', 'row'}, optional
+        Whether 1-D arrays are returned as MATLAB row or column matrices.
+        Default is 'column'.
+
+    Returns
+    -------
+    dims : tuple
+        Shape tuple, in the form MATLAB expects it.
+
+    Notes
+    -----
+    We had to decide what shape a 1 dimensional array would be by
+    default. ``np.atleast_2d`` thinks it is a row vector. The
+    default for a vector in MATLAB (e.g., ``>> 1:12``) is a row vector.
+
+    Versions of scipy up to and including 0.11 resulted (accidentally)
+    in 1-D arrays being read as column vectors. For the moment, we
+    maintain the same tradition here.
+
+    Examples
+    --------
+    >>> matdims(np.array(1)) # NumPy scalar
+    (1, 1)
+    >>> matdims(np.array([1])) # 1-D array, 1 element
+    (1, 1)
+    >>> matdims(np.array([1,2])) # 1-D array, 2 elements
+    (2, 1)
+    >>> matdims(np.array([[2],[3]])) # 2-D array, column vector
+    (2, 1)
+    >>> matdims(np.array([[2,3]])) # 2-D array, row vector
+    (1, 2)
+    >>> matdims(np.array([[[2,3]]])) # 3-D array, rowish vector
+    (1, 1, 2)
+    >>> matdims(np.array([])) # empty 1-D array
+    (0, 0)
+    >>> matdims(np.array([[]])) # empty 2-D array
+    (0, 0)
+    >>> matdims(np.array([[[]]])) # empty 3-D array
+    (0, 0, 0)
+
+    Optional argument flips 1-D shape behavior.
+
+    >>> matdims(np.array([1,2]), 'row') # 1-D array, 2 elements
+    (1, 2)
+
+    The argument has to make sense though
+
+    >>> matdims(np.array([1,2]), 'bizarre')
+    Traceback (most recent call last):
+       ...
+    ValueError: 1-D option "bizarre" is strange
+
+    """
+    shape = arr.shape
+    if shape == ():  # scalar
+        return (1, 1)
+    if len(shape) == 1:  # 1D
+        if shape[0] == 0:
+            return (0, 0)
+        elif oned_as == 'column':
+            return shape + (1,)
+        elif oned_as == 'row':
+            return (1,) + shape
+        else:
+            raise ValueError('1-D option "%s" is strange'
+                             % oned_as)
+    return shape
+
+
+class MatVarReader:
+    ''' Abstract class defining required interface for var readers'''
+    def __init__(self, file_reader):
+        pass
+
+    def read_header(self):
+        ''' Returns header '''
+        pass
+
+    def array_from_header(self, header):
+        ''' Reads array given header '''
+        pass
+
+
+class MatFileReader:
+    """ Base object for reading mat files
+
+    To make this class functional, you will need to override the
+    following methods:
+
+    matrix_getter_factory   - gives object to fetch next matrix from stream
+    guess_byte_order        - guesses file byte order from file
+    """
+
+    @docfiller
+    def __init__(self, mat_stream,
+                 byte_order=None,
+                 mat_dtype=False,
+                 squeeze_me=False,
+                 chars_as_strings=True,
+                 matlab_compatible=False,
+                 struct_as_record=True,
+                 verify_compressed_data_integrity=True,
+                 simplify_cells=False):
+        '''
+        Initializer for mat file reader
+
+        mat_stream : file-like
+            object with file API, open for reading
+    %(load_args)s
+        '''
+        # Initialize stream
+        self.mat_stream = mat_stream
+        self.dtypes = {}
+        if not byte_order:
+            byte_order = self.guess_byte_order()
+        else:
+            byte_order = boc.to_numpy_code(byte_order)
+        self.byte_order = byte_order
+        self.struct_as_record = struct_as_record
+        if matlab_compatible:
+            self.set_matlab_compatible()
+        else:
+            self.squeeze_me = squeeze_me
+            self.chars_as_strings = chars_as_strings
+            self.mat_dtype = mat_dtype
+        self.verify_compressed_data_integrity = verify_compressed_data_integrity
+        self.simplify_cells = simplify_cells
+        if simplify_cells:
+            self.squeeze_me = True
+            self.struct_as_record = False
+
+    def set_matlab_compatible(self):
+        ''' Sets options to return arrays as MATLAB loads them '''
+        self.mat_dtype = True
+        self.squeeze_me = False
+        self.chars_as_strings = False
+
+    def guess_byte_order(self):
+        ''' As we do not know what file type we have, assume native '''
+        return boc.native_code
+
+    def end_of_stream(self):
+        b = self.mat_stream.read(1)
+        curpos = self.mat_stream.tell()
+        self.mat_stream.seek(curpos-1)
+        return len(b) == 0
+
+
+def arr_dtype_number(arr, num):
+    ''' Return dtype for given number of items per element'''
+    return np.dtype(arr.dtype.str[:2] + str(num))
+
+
+def arr_to_chars(arr):
+    ''' Convert string array to char array '''
+    dims = list(arr.shape)
+    if not dims:
+        dims = [1]
+    dims.append(int(arr.dtype.str[2:]))
+    arr = np.ndarray(shape=dims,
+                     dtype=arr_dtype_number(arr, 1),
+                     buffer=arr)
+    empties = [arr == np.array('', dtype=arr.dtype)]
+    if not np.any(empties):
+        return arr
+    arr = arr.copy()
+    arr[tuple(empties)] = ' '
+    return arr
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/byteordercodes.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/byteordercodes.py
new file mode 100644
index 00000000..c2b05ae2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/byteordercodes.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _byteordercodes
+
+
+__all__ = [  # noqa: F822
+    'aliases', 'native_code', 'swapped_code',
+    'sys_is_le', 'to_numpy_code'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.byteordercodes is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.byteordercodes` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_byteordercodes, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio.py
new file mode 100644
index 00000000..37e201d0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mio
+
+
+__all__ = [  # noqa: F822
+    'mat_reader_factory', 'loadmat', 'savemat', 'whosmat',
+    'contextmanager', 'docfiller',
+    'MatFile4Reader', 'MatFile4Writer', 'MatFile5Reader', 'MatFile5Writer'
+]
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.mio is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.mio` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mio, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio4.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio4.py
new file mode 100644
index 00000000..d6f50f2a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio4.py
@@ -0,0 +1,33 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mio4
+
+
+__all__ = [  # noqa: F822
+    'MatFile4Reader', 'MatFile4Writer', 'SYS_LITTLE_ENDIAN',
+    'VarHeader4', 'VarReader4', 'VarWriter4', 'arr_to_2d', 'mclass_info',
+    'mdtypes_template', 'miDOUBLE', 'miINT16', 'miINT32', 'miSINGLE',
+    'miUINT16', 'miUINT8', 'mxCHAR_CLASS', 'mxFULL_CLASS', 'mxSPARSE_CLASS',
+    'np_to_mtypes', 'order_codes', 'MatFileReader', 'docfiller',
+    'matdims', 'read_dtype', 'convert_dtypes', 'arr_to_chars',
+    'arr_dtype_number', 'squeeze_element', 'chars_to_strings'
+]
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.mio4 is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.mio4` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mio4, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5.py
new file mode 100644
index 00000000..4390416e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5.py
@@ -0,0 +1,37 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mio5
+
+
+__all__ = [  # noqa: F822
+    'mclass_info', 'mxCHAR_CLASS', 'mxSPARSE_CLASS',
+    'BytesIO', 'native_code',
+    'swapped_code', 'MatFileReader', 'docfiller', 'matdims',
+    'read_dtype', 'arr_to_chars', 'arr_dtype_number', 'MatWriteError',
+    'MatReadError', 'MatReadWarning', 'VarReader5', 'MatlabObject',
+    'MatlabFunction', 'MDTYPES', 'NP_TO_MTYPES', 'NP_TO_MXTYPES',
+    'miCOMPRESSED', 'miMATRIX', 'miINT8', 'miUTF8', 'miUINT32',
+    'mxCELL_CLASS', 'mxSTRUCT_CLASS', 'mxOBJECT_CLASS', 'mxDOUBLE_CLASS',
+    'mat_struct', 'ZlibInputStream', 'MatFile5Reader', 'varmats_from_mat',
+    'EmptyStructMarker', 'to_writeable', 'NDT_FILE_HDR', 'NDT_TAG_FULL',
+    'NDT_TAG_SMALL', 'NDT_ARRAY_FLAGS', 'VarWriter5', 'MatFile5Writer'
+]
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.mio5 is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.mio5` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mio5, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_params.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_params.py
new file mode 100644
index 00000000..12445504
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_params.py
@@ -0,0 +1,37 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mio5_params
+
+
+__all__ = [  # noqa: F822
+    'MDTYPES', 'MatlabFunction', 'MatlabObject', 'MatlabOpaque',
+    'NP_TO_MTYPES', 'NP_TO_MXTYPES', 'OPAQUE_DTYPE', 'codecs_template',
+    'mat_struct', 'mclass_dtypes_template', 'mclass_info', 'mdtypes_template',
+    'miCOMPRESSED', 'miDOUBLE', 'miINT16', 'miINT32', 'miINT64', 'miINT8',
+    'miMATRIX', 'miSINGLE', 'miUINT16', 'miUINT32', 'miUINT64', 'miUINT8',
+    'miUTF16', 'miUTF32', 'miUTF8', 'mxCELL_CLASS', 'mxCHAR_CLASS',
+    'mxDOUBLE_CLASS', 'mxFUNCTION_CLASS', 'mxINT16_CLASS', 'mxINT32_CLASS',
+    'mxINT64_CLASS', 'mxINT8_CLASS', 'mxOBJECT_CLASS',
+    'mxOBJECT_CLASS_FROM_MATRIX_H', 'mxOPAQUE_CLASS', 'mxSINGLE_CLASS',
+    'mxSPARSE_CLASS', 'mxSTRUCT_CLASS', 'mxUINT16_CLASS', 'mxUINT32_CLASS',
+    'mxUINT64_CLASS', 'mxUINT8_CLASS', 'convert_dtypes'
+]
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.mio5_params is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.mio5_params` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mio5_params, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_utils.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_utils.py
new file mode 100644
index 00000000..b0d578bf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio5_utils.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mio5_utils
+
+
+__all__ = [  # noqa: F822
+    'VarHeader5', 'VarReader5', 'byteswap_u4', 'chars_to_strings',
+    'csc_matrix', 'mio5p', 'miob', 'pycopy', 'swapped_code', 'squeeze_element'
+]
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.mio5_utils is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.mio5_utils` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mio5_utils, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio_utils.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio_utils.py
new file mode 100644
index 00000000..c5e35084
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/mio_utils.py
@@ -0,0 +1,26 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mio_utils
+
+
+__all__ = ['squeeze_element', 'chars_to_strings']  # noqa: F822
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.mio_utils is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.mio_utils` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mio_utils, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/miobase.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/miobase.py
new file mode 100644
index 00000000..ee6a2b93
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/miobase.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _miobase
+
+
+__all__ = [  # noqa: F822
+    'MatFileReader', 'MatReadError', 'MatReadWarning',
+    'MatVarReader', 'MatWriteError', 'arr_dtype_number',
+    'arr_to_chars', 'convert_dtypes', 'doc_dict',
+    'docfiller', 'get_matfile_version',
+    'matdims', 'read_dtype', 'doccer', 'boc'
+]
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.miobase is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.miobase` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_miobase, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/streams.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/streams.py
new file mode 100644
index 00000000..7566e9fd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/streams.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io.matlab` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _streams
+
+
+__all__ = [  # noqa: F822
+    'BLOCK_SIZE', 'GenericStream', 'ZlibInputStream', 'make_stream'
+]
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.matlab.streams is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io.matlab instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io.matlab` namespace, "
+                  "the `scipy.io.matlab.streams` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_streams, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miuint32.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miuint32.mat
new file mode 100644
index 00000000..c9ab357e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miuint32.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat
new file mode 100644
index 00000000..a17203fb
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/big_endian.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/big_endian.mat
new file mode 100644
index 00000000..2a0c982c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/big_endian.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/broken_utf8.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/broken_utf8.mat
new file mode 100644
index 00000000..4f632387
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/broken_utf8.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat
new file mode 100644
index 00000000..c88cbb6f
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_data.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_data.mat
new file mode 100644
index 00000000..45a2ef4e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/corrupted_zlib_data.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/japanese_utf8.txt b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/japanese_utf8.txt
new file mode 100644
index 00000000..1459b6b6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/japanese_utf8.txt
@@ -0,0 +1,5 @@
+Japanese: 
+すべての人間は、生まれながらにして自由であり、
+かつ、尊厳と権利と について平等である。
+人間は、理性と良心とを授けられており、
+互いに同胞の精神をもって行動しなければならない。
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/little_endian.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/little_endian.mat
new file mode 100644
index 00000000..df6db666
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/little_endian.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/logical_sparse.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/logical_sparse.mat
new file mode 100644
index 00000000..a60ad5b6
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/logical_sparse.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/malformed1.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/malformed1.mat
new file mode 100644
index 00000000..54462e27
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/malformed1.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miuint32_for_miint32.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miuint32_for_miint32.mat
new file mode 100644
index 00000000..fd2c4994
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miuint32_for_miint32.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miutf8_array_name.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miutf8_array_name.mat
new file mode 100644
index 00000000..ccfdaa8a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/miutf8_array_name.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat
new file mode 100644
index 00000000..35dcb715
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/one_by_zero_char.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/one_by_zero_char.mat
new file mode 100644
index 00000000..07e7dca4
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/one_by_zero_char.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/parabola.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/parabola.mat
new file mode 100644
index 00000000..66350532
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/parabola.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/single_empty_string.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/single_empty_string.mat
new file mode 100644
index 00000000..293f3877
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/single_empty_string.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/some_functions.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/some_functions.mat
new file mode 100644
index 00000000..cc818593
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/some_functions.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/sqr.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/sqr.mat
new file mode 100644
index 00000000..2436d87c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/sqr.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat
new file mode 100644
index 00000000..45371261
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..e04d27d3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat
new file mode 100644
index 00000000..4c030303
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat
new file mode 100644
index 00000000..232a051c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_empty_struct.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_empty_struct.mat
new file mode 100644
index 00000000..30c8c8ad
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_empty_struct.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_mat4_le_floats.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_mat4_le_floats.mat
new file mode 100644
index 00000000..6643c42d
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_mat4_le_floats.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_skip_variable.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_skip_variable.mat
new file mode 100644
index 00000000..efbe3fec
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/test_skip_variable.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testbool_8_WIN64.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testbool_8_WIN64.mat
new file mode 100644
index 00000000..faa30b10
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testbool_8_WIN64.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat
new file mode 100644
index 00000000..512f7d88
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..a7633104
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat
new file mode 100644
index 00000000..2ac1da15
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat
new file mode 100644
index 00000000..fc893f33
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat
new file mode 100644
index 00000000..4198a4f2
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..2c7826ee
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat
new file mode 100644
index 00000000..b3b086cc
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat
new file mode 100644
index 00000000..316f8894
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat
new file mode 100644
index 00000000..36621b25
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat
new file mode 100644
index 00000000..32fcd2a9
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..f3ecd203
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat
new file mode 100644
index 00000000..c0c08385
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat
new file mode 100644
index 00000000..6a187edb
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat
new file mode 100644
index 00000000..5dbfcf17
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat
new file mode 100644
index 00000000..8e36c0c8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..a003b6d8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat
new file mode 100644
index 00000000..3106712e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat
new file mode 100644
index 00000000..9097bb08
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat
new file mode 100644
index 00000000..e7dec3b8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..a1c93483
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat
new file mode 100644
index 00000000..f29d4f93
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat
new file mode 100644
index 00000000..8b244044
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat
new file mode 100644
index 00000000..adb6c28e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat
new file mode 100644
index 00000000..6066c1e3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat
new file mode 100644
index 00000000..3698c885
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat
new file mode 100644
index 00000000..164be110
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..a8735e9a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat
new file mode 100644
index 00000000..b6fb05bb
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat
new file mode 100644
index 00000000..eb537ab1
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat
new file mode 100644
index 00000000..cc207ed9
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat
new file mode 100644
index 00000000..c2f0ba2a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..b4dbd152
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat
new file mode 100644
index 00000000..fadcd236
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat
new file mode 100644
index 00000000..9ce65f91
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat
new file mode 100644
index 00000000..9c6ba793
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat
new file mode 100644
index 00000000..0c4729c5
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat
new file mode 100644
index 00000000..6d3e0689
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat
new file mode 100644
index 00000000..fc136422
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..f68323b0
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat
new file mode 100644
index 00000000..83dcad34
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat
new file mode 100644
index 00000000..59d243c4
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat
new file mode 100644
index 00000000..cdb4191c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat
new file mode 100644
index 00000000..3b5a4285
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..8cef2dd7
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat
new file mode 100644
index 00000000..5ba4810a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat
new file mode 100644
index 00000000..8964765f
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat
new file mode 100644
index 00000000..1dcd72e5
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsimplecell.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsimplecell.mat
new file mode 100644
index 00000000..2a98f489
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsimplecell.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat
new file mode 100644
index 00000000..55cbd3c1
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat
new file mode 100644
index 00000000..194ca4d7
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..3e1e9a1e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat
new file mode 100644
index 00000000..55b51076
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat
new file mode 100644
index 00000000..bdb6ce66
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat
new file mode 100644
index 00000000..81c536d0
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat
new file mode 100644
index 00000000..520e1ced
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..969b7143
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat
new file mode 100644
index 00000000..9117dce3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat
new file mode 100644
index 00000000..a8a615a3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat
new file mode 100644
index 00000000..15424266
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat
new file mode 100644
index 00000000..137561e1
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat
new file mode 100644
index 00000000..2ad75f2e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..6fd12d88
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat
new file mode 100644
index 00000000..ab93994f
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat
new file mode 100644
index 00000000..63059b84
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat
new file mode 100644
index 00000000..fa687ee9
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat
new file mode 100644
index 00000000..11afb412
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..75e07a0b
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat
new file mode 100644
index 00000000..7d76f636
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat
new file mode 100644
index 00000000..954e39be
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat
new file mode 100644
index 00000000..5086bb7a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..6feb6e42
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat
new file mode 100644
index 00000000..b2ff2226
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat
new file mode 100644
index 00000000..028841f9
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat
new file mode 100644
index 00000000..da573659
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..d1c97a7a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat
new file mode 100644
index 00000000..c7ca0959
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat
new file mode 100644
index 00000000..8716f7e3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat
new file mode 100644
index 00000000..2c34c4d8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat
new file mode 100644
index 00000000..c6dccc00
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat
new file mode 100644
index 00000000..0f6f5444
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat
new file mode 100644
index 00000000..faf9221b
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat
new file mode 100644
index 00000000..1b7b3d7f
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat
new file mode 100644
index 00000000..d22fb57c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat
new file mode 100644
index 00000000..76c51d01
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/data/testvec_4_GLNX86.mat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_byteordercodes.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_byteordercodes.py
new file mode 100644
index 00000000..535434d1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_byteordercodes.py
@@ -0,0 +1,29 @@
+''' Tests for byteorder module '''
+
+import sys
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+import scipy.io.matlab._byteordercodes as sibc
+
+
+def test_native():
+    native_is_le = sys.byteorder == 'little'
+    assert_(sibc.sys_is_le == native_is_le)
+
+
+def test_to_numpy():
+    if sys.byteorder == 'little':
+        assert_(sibc.to_numpy_code('native') == '<')
+        assert_(sibc.to_numpy_code('swapped') == '>')
+    else:
+        assert_(sibc.to_numpy_code('native') == '>')
+        assert_(sibc.to_numpy_code('swapped') == '<')
+    assert_(sibc.to_numpy_code('native') == sibc.to_numpy_code('='))
+    assert_(sibc.to_numpy_code('big') == '>')
+    for code in ('little', '<', 'l', 'L', 'le'):
+        assert_(sibc.to_numpy_code(code) == '<')
+    for code in ('big', '>', 'b', 'B', 'be'):
+        assert_(sibc.to_numpy_code(code) == '>')
+    assert_raises(ValueError, sibc.to_numpy_code, 'silly string')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio.py
new file mode 100644
index 00000000..99222f69
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio.py
@@ -0,0 +1,1291 @@
+# -*- coding: utf-8 -*-
+''' Nose test generators
+
+Need function load / save / roundtrip tests
+
+'''
+import os
+from collections import OrderedDict
+from os.path import join as pjoin, dirname
+from glob import glob
+from io import BytesIO
+import re
+from tempfile import mkdtemp
+
+import warnings
+import shutil
+import gzip
+
+from numpy.testing import (assert_array_equal, assert_array_almost_equal,
+                           assert_equal, assert_, assert_warns)
+import pytest
+from pytest import raises as assert_raises
+
+import numpy as np
+from numpy import array
+import scipy.sparse as SP
+
+import scipy.io
+from scipy.io.matlab import MatlabOpaque, MatlabFunction, MatlabObject
+import scipy.io.matlab._byteordercodes as boc
+from scipy.io.matlab._miobase import (
+    matdims, MatWriteError, MatReadError, matfile_version)
+from scipy.io.matlab._mio import mat_reader_factory, loadmat, savemat, whosmat
+from scipy.io.matlab._mio5 import (
+    MatFile5Writer, MatFile5Reader, varmats_from_mat, to_writeable,
+    EmptyStructMarker)
+import scipy.io.matlab._mio5_params as mio5p
+
+test_data_path = pjoin(dirname(__file__), 'data')
+
+
+def mlarr(*args, **kwargs):
+    """Convenience function to return matlab-compatible 2-D array."""
+    arr = np.array(*args, **kwargs)
+    arr.shape = matdims(arr)
+    return arr
+
+
+# Define cases to test
+theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)
+case_table4 = [
+    {'name': 'double',
+     'classes': {'testdouble': 'double'},
+     'expected': {'testdouble': theta}
+     }]
+case_table4.append(
+    {'name': 'string',
+     'classes': {'teststring': 'char'},
+     'expected': {'teststring':
+                  array(['"Do nine men interpret?" "Nine men," I nod.'])}
+     })
+case_table4.append(
+    {'name': 'complex',
+     'classes': {'testcomplex': 'double'},
+     'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}
+     })
+A = np.zeros((3,5))
+A[0] = list(range(1,6))
+A[:,0] = list(range(1,4))
+case_table4.append(
+    {'name': 'matrix',
+     'classes': {'testmatrix': 'double'},
+     'expected': {'testmatrix': A},
+     })
+case_table4.append(
+    {'name': 'sparse',
+     'classes': {'testsparse': 'sparse'},
+     'expected': {'testsparse': SP.coo_matrix(A)},
+     })
+B = A.astype(complex)
+B[0,0] += 1j
+case_table4.append(
+    {'name': 'sparsecomplex',
+     'classes': {'testsparsecomplex': 'sparse'},
+     'expected': {'testsparsecomplex': SP.coo_matrix(B)},
+     })
+case_table4.append(
+    {'name': 'multi',
+     'classes': {'theta': 'double', 'a': 'double'},
+     'expected': {'theta': theta, 'a': A},
+     })
+case_table4.append(
+    {'name': 'minus',
+     'classes': {'testminus': 'double'},
+     'expected': {'testminus': mlarr(-1)},
+     })
+case_table4.append(
+    {'name': 'onechar',
+     'classes': {'testonechar': 'char'},
+     'expected': {'testonechar': array(['r'])},
+     })
+# Cell arrays stored as object arrays
+CA = mlarr((  # tuple for object array creation
+        [],
+        mlarr([1]),
+        mlarr([[1,2]]),
+        mlarr([[1,2,3]])), dtype=object).reshape(1,-1)
+CA[0,0] = array(
+    ['This cell contains this string and 3 arrays of increasing length'])
+case_table5 = [
+    {'name': 'cell',
+     'classes': {'testcell': 'cell'},
+     'expected': {'testcell': CA}}]
+CAE = mlarr((  # tuple for object array creation
+    mlarr(1),
+    mlarr(2),
+    mlarr([]),
+    mlarr([]),
+    mlarr(3)), dtype=object).reshape(1,-1)
+objarr = np.empty((1,1),dtype=object)
+objarr[0,0] = mlarr(1)
+case_table5.append(
+    {'name': 'scalarcell',
+     'classes': {'testscalarcell': 'cell'},
+     'expected': {'testscalarcell': objarr}
+     })
+case_table5.append(
+    {'name': 'emptycell',
+     'classes': {'testemptycell': 'cell'},
+     'expected': {'testemptycell': CAE}})
+case_table5.append(
+    {'name': 'stringarray',
+     'classes': {'teststringarray': 'char'},
+     'expected': {'teststringarray': array(
+    ['one  ', 'two  ', 'three'])},
+     })
+case_table5.append(
+    {'name': '3dmatrix',
+     'classes': {'test3dmatrix': 'double'},
+     'expected': {
+    'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}
+     })
+st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)
+dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]
+st1 = np.zeros((1,1), dtype)
+st1['stringfield'][0,0] = array(['Rats live on no evil star.'])
+st1['doublefield'][0,0] = st_sub_arr
+st1['complexfield'][0,0] = st_sub_arr * (1 + 1j)
+case_table5.append(
+    {'name': 'struct',
+     'classes': {'teststruct': 'struct'},
+     'expected': {'teststruct': st1}
+     })
+CN = np.zeros((1,2), dtype=object)
+CN[0,0] = mlarr(1)
+CN[0,1] = np.zeros((1,3), dtype=object)
+CN[0,1][0,0] = mlarr(2, dtype=np.uint8)
+CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)
+CN[0,1][0,2] = np.zeros((1,2), dtype=object)
+CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)
+CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)
+case_table5.append(
+    {'name': 'cellnest',
+     'classes': {'testcellnest': 'cell'},
+     'expected': {'testcellnest': CN},
+     })
+st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])
+st2[0,0]['one'] = mlarr(1)
+st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])
+st2[0,0]['two'][0,0]['three'] = array(['number 3'])
+case_table5.append(
+    {'name': 'structnest',
+     'classes': {'teststructnest': 'struct'},
+     'expected': {'teststructnest': st2}
+     })
+a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])
+a[0,0]['one'] = mlarr(1)
+a[0,0]['two'] = mlarr(2)
+a[0,1]['one'] = array(['number 1'])
+a[0,1]['two'] = array(['number 2'])
+case_table5.append(
+    {'name': 'structarr',
+     'classes': {'teststructarr': 'struct'},
+     'expected': {'teststructarr': a}
+     })
+ODT = np.dtype([(n, object) for n in
+                 ['expr', 'inputExpr', 'args',
+                  'isEmpty', 'numArgs', 'version']])
+MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')
+m0 = MO[0,0]
+m0['expr'] = array(['x'])
+m0['inputExpr'] = array([' x = INLINE_INPUTS_{1};'])
+m0['args'] = array(['x'])
+m0['isEmpty'] = mlarr(0)
+m0['numArgs'] = mlarr(1)
+m0['version'] = mlarr(1)
+case_table5.append(
+    {'name': 'object',
+     'classes': {'testobject': 'object'},
+     'expected': {'testobject': MO}
+     })
+fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')
+u_str = fp_u_str.read().decode('utf-8')
+fp_u_str.close()
+case_table5.append(
+    {'name': 'unicode',
+     'classes': {'testunicode': 'char'},
+    'expected': {'testunicode': array([u_str])}
+     })
+case_table5.append(
+    {'name': 'sparse',
+     'classes': {'testsparse': 'sparse'},
+     'expected': {'testsparse': SP.coo_matrix(A)},
+     })
+case_table5.append(
+    {'name': 'sparsecomplex',
+     'classes': {'testsparsecomplex': 'sparse'},
+     'expected': {'testsparsecomplex': SP.coo_matrix(B)},
+     })
+case_table5.append(
+    {'name': 'bool',
+     'classes': {'testbools': 'logical'},
+     'expected': {'testbools':
+                  array([[True], [False]])},
+     })
+
+case_table5_rt = case_table5[:]
+# Inline functions can't be concatenated in matlab, so RT only
+case_table5_rt.append(
+    {'name': 'objectarray',
+     'classes': {'testobjectarray': 'object'},
+     'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})
+
+
+def types_compatible(var1, var2):
+    """Check if types are same or compatible.
+
+    0-D numpy scalars are compatible with bare python scalars.
+    """
+    type1 = type(var1)
+    type2 = type(var2)
+    if type1 is type2:
+        return True
+    if type1 is np.ndarray and var1.shape == ():
+        return type(var1.item()) is type2
+    if type2 is np.ndarray and var2.shape == ():
+        return type(var2.item()) is type1
+    return False
+
+
+def _check_level(label, expected, actual):
+    """ Check one level of a potentially nested array """
+    if SP.issparse(expected):  # allow different types of sparse matrices
+        assert_(SP.issparse(actual))
+        assert_array_almost_equal(actual.toarray(),
+                                  expected.toarray(),
+                                  err_msg=label,
+                                  decimal=5)
+        return
+    # Check types are as expected
+    assert_(types_compatible(expected, actual),
+            "Expected type %s, got %s at %s" %
+            (type(expected), type(actual), label))
+    # A field in a record array may not be an ndarray
+    # A scalar from a record array will be type np.void
+    if not isinstance(expected,
+                      (np.void, np.ndarray, MatlabObject)):
+        assert_equal(expected, actual)
+        return
+    # This is an ndarray-like thing
+    assert_(expected.shape == actual.shape,
+            msg='Expected shape %s, got %s at %s' % (expected.shape,
+                                                     actual.shape,
+                                                     label))
+    ex_dtype = expected.dtype
+    if ex_dtype.hasobject:  # array of objects
+        if isinstance(expected, MatlabObject):
+            assert_equal(expected.classname, actual.classname)
+        for i, ev in enumerate(expected):
+            level_label = "%s, [%d], " % (label, i)
+            _check_level(level_label, ev, actual[i])
+        return
+    if ex_dtype.fields:  # probably recarray
+        for fn in ex_dtype.fields:
+            level_label = "%s, field %s, " % (label, fn)
+            _check_level(level_label,
+                         expected[fn], actual[fn])
+        return
+    if ex_dtype.type in (str,  # string or bool
+                         np.unicode_,
+                         np.bool_):
+        assert_equal(actual, expected, err_msg=label)
+        return
+    # Something numeric
+    assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)
+
+
+def _load_check_case(name, files, case):
+    for file_name in files:
+        matdict = loadmat(file_name, struct_as_record=True)
+        label = "test %s; file %s" % (name, file_name)
+        for k, expected in case.items():
+            k_label = "%s, variable %s" % (label, k)
+            assert_(k in matdict, "Missing key at %s" % k_label)
+            _check_level(k_label, expected, matdict[k])
+
+
+def _whos_check_case(name, files, case, classes):
+    for file_name in files:
+        label = "test %s; file %s" % (name, file_name)
+
+        whos = whosmat(file_name)
+
+        expected_whos = [
+            (k, expected.shape, classes[k]) for k, expected in case.items()]
+
+        whos.sort()
+        expected_whos.sort()
+        assert_equal(whos, expected_whos,
+                     "%s: %r != %r" % (label, whos, expected_whos)
+                     )
+
+
+# Round trip tests
+def _rt_check_case(name, expected, format):
+    mat_stream = BytesIO()
+    savemat(mat_stream, expected, format=format)
+    mat_stream.seek(0)
+    _load_check_case(name, [mat_stream], expected)
+
+
+# generator for tests
+def _cases(version, filt='test%(name)s_*.mat'):
+    if version == '4':
+        cases = case_table4
+    elif version == '5':
+        cases = case_table5
+    else:
+        assert version == '5_rt'
+        cases = case_table5_rt
+    for case in cases:
+        name = case['name']
+        expected = case['expected']
+        if filt is None:
+            files = None
+        else:
+            use_filt = pjoin(test_data_path, filt % dict(name=name))
+            files = glob(use_filt)
+            assert len(files) > 0, \
+                "No files for test %s using filter %s" % (name, filt)
+        classes = case['classes']
+        yield name, files, expected, classes
+
+
+@pytest.mark.parametrize('version', ('4', '5'))
+def test_load(version):
+    for case in _cases(version):
+        _load_check_case(*case[:3])
+
+
+@pytest.mark.parametrize('version', ('4', '5'))
+def test_whos(version):
+    for case in _cases(version):
+        _whos_check_case(*case)
+
+
+# generator for round trip tests
+@pytest.mark.parametrize('version, fmts', [
+    ('4', ['4', '5']),
+    ('5_rt', ['5']),
+])
+def test_round_trip(version, fmts):
+    for case in _cases(version, filt=None):
+        for fmt in fmts:
+            _rt_check_case(case[0], case[2], fmt)
+
+
+def test_gzip_simple():
+    xdense = np.zeros((20,20))
+    xdense[2,3] = 2.3
+    xdense[4,5] = 4.5
+    x = SP.csc_matrix(xdense)
+
+    name = 'gzip_test'
+    expected = {'x':x}
+    format = '4'
+
+    tmpdir = mkdtemp()
+    try:
+        fname = pjoin(tmpdir,name)
+        mat_stream = gzip.open(fname, mode='wb')
+        savemat(mat_stream, expected, format=format)
+        mat_stream.close()
+
+        mat_stream = gzip.open(fname, mode='rb')
+        actual = loadmat(mat_stream, struct_as_record=True)
+        mat_stream.close()
+    finally:
+        shutil.rmtree(tmpdir)
+
+    assert_array_almost_equal(actual['x'].toarray(),
+                              expected['x'].toarray(),
+                              err_msg=repr(actual))
+
+
+def test_multiple_open():
+    # Ticket #1039, on Windows: check that files are not left open
+    tmpdir = mkdtemp()
+    try:
+        x = dict(x=np.zeros((2, 2)))
+
+        fname = pjoin(tmpdir, "a.mat")
+
+        # Check that file is not left open
+        savemat(fname, x)
+        os.unlink(fname)
+        savemat(fname, x)
+        loadmat(fname)
+        os.unlink(fname)
+
+        # Check that stream is left open
+        f = open(fname, 'wb')
+        savemat(f, x)
+        f.seek(0)
+        f.close()
+
+        f = open(fname, 'rb')
+        loadmat(f)
+        f.seek(0)
+        f.close()
+    finally:
+        shutil.rmtree(tmpdir)
+
+
+def test_mat73():
+    # Check any hdf5 files raise an error
+    filenames = glob(
+        pjoin(test_data_path, 'testhdf5*.mat'))
+    assert_(len(filenames) > 0)
+    for filename in filenames:
+        fp = open(filename, 'rb')
+        assert_raises(NotImplementedError,
+                      loadmat,
+                      fp,
+                      struct_as_record=True)
+        fp.close()
+
+
+def test_warnings():
+    # This test is an echo of the previous behavior, which was to raise a
+    # warning if the user triggered a search for mat files on the Python system
+    # path. We can remove the test in the next version after upcoming (0.13).
+    fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')
+    with warnings.catch_warnings():
+        warnings.simplefilter('error')
+        # This should not generate a warning
+        loadmat(fname, struct_as_record=True)
+        # This neither
+        loadmat(fname, struct_as_record=False)
+
+
+def test_regression_653():
+    # Saving a dictionary with only invalid keys used to raise an error. Now we
+    # save this as an empty struct in matlab space.
+    sio = BytesIO()
+    savemat(sio, {'d':{1:2}}, format='5')
+    back = loadmat(sio)['d']
+    # Check we got an empty struct equivalent
+    assert_equal(back.shape, (1,1))
+    assert_equal(back.dtype, np.dtype(object))
+    assert_(back[0,0] is None)
+
+
+def test_structname_len():
+    # Test limit for length of field names in structs
+    lim = 31
+    fldname = 'a' * lim
+    st1 = np.zeros((1,1), dtype=[(fldname, object)])
+    savemat(BytesIO(), {'longstruct': st1}, format='5')
+    fldname = 'a' * (lim+1)
+    st1 = np.zeros((1,1), dtype=[(fldname, object)])
+    assert_raises(ValueError, savemat, BytesIO(),
+                  {'longstruct': st1}, format='5')
+
+
+def test_4_and_long_field_names_incompatible():
+    # Long field names option not supported in 4
+    my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])
+    assert_raises(ValueError, savemat, BytesIO(),
+                  {'my_struct':my_struct}, format='4', long_field_names=True)
+
+
+def test_long_field_names():
+    # Test limit for length of field names in structs
+    lim = 63
+    fldname = 'a' * lim
+    st1 = np.zeros((1,1), dtype=[(fldname, object)])
+    savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)
+    fldname = 'a' * (lim+1)
+    st1 = np.zeros((1,1), dtype=[(fldname, object)])
+    assert_raises(ValueError, savemat, BytesIO(),
+                  {'longstruct': st1}, format='5',long_field_names=True)
+
+
+def test_long_field_names_in_struct():
+    # Regression test - long_field_names was erased if you passed a struct
+    # within a struct
+    lim = 63
+    fldname = 'a' * lim
+    cell = np.ndarray((1,2),dtype=object)
+    st1 = np.zeros((1,1), dtype=[(fldname, object)])
+    cell[0,0] = st1
+    cell[0,1] = st1
+    savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)
+    #
+    # Check to make sure it fails with long field names off
+    #
+    assert_raises(ValueError, savemat, BytesIO(),
+                  {'longstruct': cell}, format='5', long_field_names=False)
+
+
+def test_cell_with_one_thing_in_it():
+    # Regression test - make a cell array that's 1 x 2 and put two
+    # strings in it. It works. Make a cell array that's 1 x 1 and put
+    # a string in it. It should work but, in the old days, it didn't.
+    cells = np.ndarray((1,2),dtype=object)
+    cells[0,0] = 'Hello'
+    cells[0,1] = 'World'
+    savemat(BytesIO(), {'x': cells}, format='5')
+
+    cells = np.ndarray((1,1),dtype=object)
+    cells[0,0] = 'Hello, world'
+    savemat(BytesIO(), {'x': cells}, format='5')
+
+
+def test_writer_properties():
+    # Tests getting, setting of properties of matrix writer
+    mfw = MatFile5Writer(BytesIO())
+    assert_equal(mfw.global_vars, [])
+    mfw.global_vars = ['avar']
+    assert_equal(mfw.global_vars, ['avar'])
+    assert_equal(mfw.unicode_strings, False)
+    mfw.unicode_strings = True
+    assert_equal(mfw.unicode_strings, True)
+    assert_equal(mfw.long_field_names, False)
+    mfw.long_field_names = True
+    assert_equal(mfw.long_field_names, True)
+
+
+def test_use_small_element():
+    # Test whether we're using small data element or not
+    sio = BytesIO()
+    wtr = MatFile5Writer(sio)
+    # First check size for no sde for name
+    arr = np.zeros(10)
+    wtr.put_variables({'aaaaa': arr})
+    w_sz = len(sio.getvalue())
+    # Check small name results in largish difference in size
+    sio.truncate(0)
+    sio.seek(0)
+    wtr.put_variables({'aaaa': arr})
+    assert_(w_sz - len(sio.getvalue()) > 4)
+    # Whereas increasing name size makes less difference
+    sio.truncate(0)
+    sio.seek(0)
+    wtr.put_variables({'aaaaaa': arr})
+    assert_(len(sio.getvalue()) - w_sz < 4)
+
+
+def test_save_dict():
+    # Test that both dict and OrderedDict can be saved (as recarray),
+    # loaded as matstruct, and preserve order
+    ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
+    for dict_type in (dict, OrderedDict):
+        # Initialize with tuples to keep order
+        d = dict_type([('a', 1), ('b', 2)])
+        stream = BytesIO()
+        savemat(stream, {'dict': d})
+        stream.seek(0)
+        vals = loadmat(stream)['dict']
+        assert_equal(vals.dtype.names, ('a', 'b'))
+        assert_array_equal(vals, ab_exp)
+
+
+def test_1d_shape():
+    # New 5 behavior is 1D -> row vector
+    arr = np.arange(5)
+    for format in ('4', '5'):
+        # Column is the default
+        stream = BytesIO()
+        savemat(stream, {'oned': arr}, format=format)
+        vals = loadmat(stream)
+        assert_equal(vals['oned'].shape, (1, 5))
+        # can be explicitly 'column' for oned_as
+        stream = BytesIO()
+        savemat(stream, {'oned':arr},
+                format=format,
+                oned_as='column')
+        vals = loadmat(stream)
+        assert_equal(vals['oned'].shape, (5,1))
+        # but different from 'row'
+        stream = BytesIO()
+        savemat(stream, {'oned':arr},
+                format=format,
+                oned_as='row')
+        vals = loadmat(stream)
+        assert_equal(vals['oned'].shape, (1,5))
+
+
+def test_compression():
+    arr = np.zeros(100).reshape((5,20))
+    arr[2,10] = 1
+    stream = BytesIO()
+    savemat(stream, {'arr':arr})
+    raw_len = len(stream.getvalue())
+    vals = loadmat(stream)
+    assert_array_equal(vals['arr'], arr)
+    stream = BytesIO()
+    savemat(stream, {'arr':arr}, do_compression=True)
+    compressed_len = len(stream.getvalue())
+    vals = loadmat(stream)
+    assert_array_equal(vals['arr'], arr)
+    assert_(raw_len > compressed_len)
+    # Concatenate, test later
+    arr2 = arr.copy()
+    arr2[0,0] = 1
+    stream = BytesIO()
+    savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)
+    vals = loadmat(stream)
+    assert_array_equal(vals['arr2'], arr2)
+    stream = BytesIO()
+    savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)
+    vals = loadmat(stream)
+    assert_array_equal(vals['arr2'], arr2)
+
+
+def test_single_object():
+    stream = BytesIO()
+    savemat(stream, {'A':np.array(1, dtype=object)})
+
+
+def test_skip_variable():
+    # Test skipping over the first of two variables in a MAT file
+    # using mat_reader_factory and put_variables to read them in.
+    #
+    # This is a regression test of a problem that's caused by
+    # using the compressed file reader seek instead of the raw file
+    # I/O seek when skipping over a compressed chunk.
+    #
+    # The problem arises when the chunk is large: this file has
+    # a 256x256 array of random (uncompressible) doubles.
+    #
+    filename = pjoin(test_data_path,'test_skip_variable.mat')
+    #
+    # Prove that it loads with loadmat
+    #
+    d = loadmat(filename, struct_as_record=True)
+    assert_('first' in d)
+    assert_('second' in d)
+    #
+    # Make the factory
+    #
+    factory, file_opened = mat_reader_factory(filename, struct_as_record=True)
+    #
+    # This is where the factory breaks with an error in MatMatrixGetter.to_next
+    #
+    d = factory.get_variables('second')
+    assert_('second' in d)
+    factory.mat_stream.close()
+
+
+def test_empty_struct():
+    # ticket 885
+    filename = pjoin(test_data_path,'test_empty_struct.mat')
+    # before ticket fix, this would crash with ValueError, empty data
+    # type
+    d = loadmat(filename, struct_as_record=True)
+    a = d['a']
+    assert_equal(a.shape, (1,1))
+    assert_equal(a.dtype, np.dtype(object))
+    assert_(a[0,0] is None)
+    stream = BytesIO()
+    arr = np.array((), dtype='U')
+    # before ticket fix, this used to give data type not understood
+    savemat(stream, {'arr':arr})
+    d = loadmat(stream)
+    a2 = d['arr']
+    assert_array_equal(a2, arr)
+
+
+def test_save_empty_dict():
+    # saving empty dict also gives empty struct
+    stream = BytesIO()
+    savemat(stream, {'arr': {}})
+    d = loadmat(stream)
+    a = d['arr']
+    assert_equal(a.shape, (1,1))
+    assert_equal(a.dtype, np.dtype(object))
+    assert_(a[0,0] is None)
+
+
+def assert_any_equal(output, alternatives):
+    """ Assert `output` is equal to at least one element in `alternatives`
+    """
+    one_equal = False
+    for expected in alternatives:
+        if np.all(output == expected):
+            one_equal = True
+            break
+    assert_(one_equal)
+
+
+def test_to_writeable():
+    # Test to_writeable function
+    res = to_writeable(np.array([1]))  # pass through ndarrays
+    assert_equal(res.shape, (1,))
+    assert_array_equal(res, 1)
+    # Dict fields can be written in any order
+    expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])
+    expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')])
+    alternatives = (expected1, expected2)
+    assert_any_equal(to_writeable({'a':1,'b':2}), alternatives)
+    # Fields with underscores discarded
+    assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives)
+    # Not-string fields discarded
+    assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives)
+    # String fields that are valid Python identifiers discarded
+    assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives)
+    # Object with field names is equivalent
+
+    class klass:
+        pass
+
+    c = klass
+    c.a = 1
+    c.b = 2
+    assert_any_equal(to_writeable(c), alternatives)
+    # empty list and tuple go to empty array
+    res = to_writeable([])
+    assert_equal(res.shape, (0,))
+    assert_equal(res.dtype.type, np.float64)
+    res = to_writeable(())
+    assert_equal(res.shape, (0,))
+    assert_equal(res.dtype.type, np.float64)
+    # None -> None
+    assert_(to_writeable(None) is None)
+    # String to strings
+    assert_equal(to_writeable('a string').dtype.type, np.str_)
+    # Scalars to numpy to NumPy scalars
+    res = to_writeable(1)
+    assert_equal(res.shape, ())
+    assert_equal(res.dtype.type, np.array(1).dtype.type)
+    assert_array_equal(res, 1)
+    # Empty dict returns EmptyStructMarker
+    assert_(to_writeable({}) is EmptyStructMarker)
+    # Object does not have (even empty) __dict__
+    assert_(to_writeable(object()) is None)
+    # Custom object does have empty __dict__, returns EmptyStructMarker
+
+    class C:
+        pass
+
+    assert_(to_writeable(c()) is EmptyStructMarker)
+    # dict keys with legal characters are convertible
+    res = to_writeable({'a': 1})['a']
+    assert_equal(res.shape, (1,))
+    assert_equal(res.dtype.type, np.object_)
+    # Only fields with illegal characters, falls back to EmptyStruct
+    assert_(to_writeable({'1':1}) is EmptyStructMarker)
+    assert_(to_writeable({'_a':1}) is EmptyStructMarker)
+    # Unless there are valid fields, in which case structured array
+    assert_equal(to_writeable({'1':1, 'f': 2}),
+                 np.array([(2,)], dtype=[('f', '|O8')]))
+
+
+def test_recarray():
+    # check roundtrip of structured array
+    dt = [('f1', 'f8'),
+          ('f2', 'S10')]
+    arr = np.zeros((2,), dtype=dt)
+    arr[0]['f1'] = 0.5
+    arr[0]['f2'] = 'python'
+    arr[1]['f1'] = 99
+    arr[1]['f2'] = 'not perl'
+    stream = BytesIO()
+    savemat(stream, {'arr': arr})
+    d = loadmat(stream, struct_as_record=False)
+    a20 = d['arr'][0,0]
+    assert_equal(a20.f1, 0.5)
+    assert_equal(a20.f2, 'python')
+    d = loadmat(stream, struct_as_record=True)
+    a20 = d['arr'][0,0]
+    assert_equal(a20['f1'], 0.5)
+    assert_equal(a20['f2'], 'python')
+    # structs always come back as object types
+    assert_equal(a20.dtype, np.dtype([('f1', 'O'),
+                                      ('f2', 'O')]))
+    a21 = d['arr'].flat[1]
+    assert_equal(a21['f1'], 99)
+    assert_equal(a21['f2'], 'not perl')
+
+
+def test_save_object():
+    class C:
+        pass
+    c = C()
+    c.field1 = 1
+    c.field2 = 'a string'
+    stream = BytesIO()
+    savemat(stream, {'c': c})
+    d = loadmat(stream, struct_as_record=False)
+    c2 = d['c'][0,0]
+    assert_equal(c2.field1, 1)
+    assert_equal(c2.field2, 'a string')
+    d = loadmat(stream, struct_as_record=True)
+    c2 = d['c'][0,0]
+    assert_equal(c2['field1'], 1)
+    assert_equal(c2['field2'], 'a string')
+
+
+def test_read_opts():
+    # tests if read is seeing option sets, at initialization and after
+    # initialization
+    arr = np.arange(6).reshape(1,6)
+    stream = BytesIO()
+    savemat(stream, {'a': arr})
+    rdr = MatFile5Reader(stream)
+    back_dict = rdr.get_variables()
+    rarr = back_dict['a']
+    assert_array_equal(rarr, arr)
+    rdr = MatFile5Reader(stream, squeeze_me=True)
+    assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
+    rdr.squeeze_me = False
+    assert_array_equal(rarr, arr)
+    rdr = MatFile5Reader(stream, byte_order=boc.native_code)
+    assert_array_equal(rdr.get_variables()['a'], arr)
+    # inverted byte code leads to error on read because of swapped
+    # header etc.
+    rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
+    assert_raises(Exception, rdr.get_variables)
+    rdr.byte_order = boc.native_code
+    assert_array_equal(rdr.get_variables()['a'], arr)
+    arr = np.array(['a string'])
+    stream.truncate(0)
+    stream.seek(0)
+    savemat(stream, {'a': arr})
+    rdr = MatFile5Reader(stream)
+    assert_array_equal(rdr.get_variables()['a'], arr)
+    rdr = MatFile5Reader(stream, chars_as_strings=False)
+    carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
+    assert_array_equal(rdr.get_variables()['a'], carr)
+    rdr.chars_as_strings = True
+    assert_array_equal(rdr.get_variables()['a'], arr)
+
+
+def test_empty_string():
+    # make sure reading empty string does not raise error
+    estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
+    fp = open(estring_fname, 'rb')
+    rdr = MatFile5Reader(fp)
+    d = rdr.get_variables()
+    fp.close()
+    assert_array_equal(d['a'], np.array([], dtype='U1'))
+    # Empty string round trip. Matlab cannot distinguish
+    # between a string array that is empty, and a string array
+    # containing a single empty string, because it stores strings as
+    # arrays of char. There is no way of having an array of char that
+    # is not empty, but contains an empty string.
+    stream = BytesIO()
+    savemat(stream, {'a': np.array([''])})
+    rdr = MatFile5Reader(stream)
+    d = rdr.get_variables()
+    assert_array_equal(d['a'], np.array([], dtype='U1'))
+    stream.truncate(0)
+    stream.seek(0)
+    savemat(stream, {'a': np.array([], dtype='U1')})
+    rdr = MatFile5Reader(stream)
+    d = rdr.get_variables()
+    assert_array_equal(d['a'], np.array([], dtype='U1'))
+    stream.close()
+
+
+def test_corrupted_data():
+    import zlib
+    for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),
+                       (zlib.error, 'corrupted_zlib_checksum.mat')]:
+        with open(pjoin(test_data_path, fname), 'rb') as fp:
+            rdr = MatFile5Reader(fp)
+            assert_raises(exc, rdr.get_variables)
+
+
+def test_corrupted_data_check_can_be_disabled():
+    with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:
+        rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)
+        rdr.get_variables()
+
+
+def test_read_both_endian():
+    # make sure big- and little- endian data is read correctly
+    for fname in ('big_endian.mat', 'little_endian.mat'):
+        fp = open(pjoin(test_data_path, fname), 'rb')
+        rdr = MatFile5Reader(fp)
+        d = rdr.get_variables()
+        fp.close()
+        assert_array_equal(d['strings'],
+                           np.array([['hello'],
+                                     ['world']], dtype=object))
+        assert_array_equal(d['floats'],
+                           np.array([[2., 3.],
+                                     [3., 4.]], dtype=np.float32))
+
+
+def test_write_opposite_endian():
+    # We don't support writing opposite endian .mat files, but we need to behave
+    # correctly if the user supplies an other-endian NumPy array to write out.
+    float_arr = np.array([[2., 3.],
+                          [3., 4.]])
+    int_arr = np.arange(6).reshape((2, 3))
+    uni_arr = np.array(['hello', 'world'], dtype='U')
+    stream = BytesIO()
+    savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),
+                            'ints': int_arr.byteswap().newbyteorder(),
+                            'uni_arr': uni_arr.byteswap().newbyteorder()})
+    rdr = MatFile5Reader(stream)
+    d = rdr.get_variables()
+    assert_array_equal(d['floats'], float_arr)
+    assert_array_equal(d['ints'], int_arr)
+    assert_array_equal(d['uni_arr'], uni_arr)
+    stream.close()
+
+
+def test_logical_array():
+    # The roundtrip test doesn't verify that we load the data up with the
+    # correct (bool) dtype
+    with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:
+        rdr = MatFile5Reader(fobj, mat_dtype=True)
+        d = rdr.get_variables()
+    x = np.array([[True], [False]], dtype=np.bool_)
+    assert_array_equal(d['testbools'], x)
+    assert_equal(d['testbools'].dtype, x.dtype)
+
+
+def test_logical_out_type():
+    # Confirm that bool type written as uint8, uint8 class
+    # See gh-4022
+    stream = BytesIO()
+    barr = np.array([False, True, False])
+    savemat(stream, {'barray': barr})
+    stream.seek(0)
+    reader = MatFile5Reader(stream)
+    reader.initialize_read()
+    reader.read_file_header()
+    hdr, _ = reader.read_var_header()
+    assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)
+    assert_equal(hdr.is_logical, True)
+    var = reader.read_var_array(hdr, False)
+    assert_equal(var.dtype.type, np.uint8)
+
+
+def test_roundtrip_zero_dimensions():
+    stream = BytesIO()
+    savemat(stream, {'d':np.empty((10, 0))})
+    d = loadmat(stream)
+    assert d['d'].shape == (10, 0)
+
+
+def test_mat4_3d():
+    # test behavior when writing 3-D arrays to matlab 4 files
+    stream = BytesIO()
+    arr = np.arange(24).reshape((2,3,4))
+    assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')
+
+
+def test_func_read():
+    func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')
+    fp = open(func_eg, 'rb')
+    rdr = MatFile5Reader(fp)
+    d = rdr.get_variables()
+    fp.close()
+    assert isinstance(d['testfunc'], MatlabFunction)
+    stream = BytesIO()
+    wtr = MatFile5Writer(stream)
+    assert_raises(MatWriteError, wtr.put_variables, d)
+
+
+def test_mat_dtype():
+    double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')
+    fp = open(double_eg, 'rb')
+    rdr = MatFile5Reader(fp, mat_dtype=False)
+    d = rdr.get_variables()
+    fp.close()
+    assert_equal(d['testmatrix'].dtype.kind, 'u')
+
+    fp = open(double_eg, 'rb')
+    rdr = MatFile5Reader(fp, mat_dtype=True)
+    d = rdr.get_variables()
+    fp.close()
+    assert_equal(d['testmatrix'].dtype.kind, 'f')
+
+
+def test_sparse_in_struct():
+    # reproduces bug found by DC where Cython code was insisting on
+    # ndarray return type, but getting sparse matrix
+    st = {'sparsefield': SP.coo_matrix(np.eye(4))}
+    stream = BytesIO()
+    savemat(stream, {'a':st})
+    d = loadmat(stream, struct_as_record=True)
+    assert_array_equal(d['a'][0, 0]['sparsefield'].toarray(), np.eye(4))
+
+
+def test_mat_struct_squeeze():
+    stream = BytesIO()
+    in_d = {'st':{'one':1, 'two':2}}
+    savemat(stream, in_d)
+    # no error without squeeze
+    loadmat(stream, struct_as_record=False)
+    # previous error was with squeeze, with mat_struct
+    loadmat(stream, struct_as_record=False, squeeze_me=True)
+
+
+def test_scalar_squeeze():
+    stream = BytesIO()
+    in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}
+    savemat(stream, in_d)
+    out_d = loadmat(stream, squeeze_me=True)
+    assert_(isinstance(out_d['scalar'], float))
+    assert_(isinstance(out_d['string'], str))
+    assert_(isinstance(out_d['st'], np.ndarray))
+
+
+def test_str_round():
+    # from report by Angus McMorland on mailing list 3 May 2010
+    stream = BytesIO()
+    in_arr = np.array(['Hello', 'Foob'])
+    out_arr = np.array(['Hello', 'Foob '])
+    savemat(stream, dict(a=in_arr))
+    res = loadmat(stream)
+    # resulted in ['HloolFoa', 'elWrdobr']
+    assert_array_equal(res['a'], out_arr)
+    stream.truncate(0)
+    stream.seek(0)
+    # Make Fortran ordered version of string
+    in_str = in_arr.tobytes(order='F')
+    in_from_str = np.ndarray(shape=a.shape,
+                             dtype=in_arr.dtype,
+                             order='F',
+                             buffer=in_str)
+    savemat(stream, dict(a=in_from_str))
+    assert_array_equal(res['a'], out_arr)
+    # unicode save did lead to buffer too small error
+    stream.truncate(0)
+    stream.seek(0)
+    in_arr_u = in_arr.astype('U')
+    out_arr_u = out_arr.astype('U')
+    savemat(stream, {'a': in_arr_u})
+    res = loadmat(stream)
+    assert_array_equal(res['a'], out_arr_u)
+
+
+def test_fieldnames():
+    # Check that field names are as expected
+    stream = BytesIO()
+    savemat(stream, {'a': {'a':1, 'b':2}})
+    res = loadmat(stream)
+    field_names = res['a'].dtype.names
+    assert_equal(set(field_names), set(('a', 'b')))
+
+
+def test_loadmat_varnames():
+    # Test that we can get just one variable from a mat file using loadmat
+    mat5_sys_names = ['__globals__',
+                      '__header__',
+                      '__version__']
+    for eg_file, sys_v_names in (
+        (pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(
+            test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):
+        vars = loadmat(eg_file)
+        assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))
+        vars = loadmat(eg_file, variable_names='a')
+        assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
+        vars = loadmat(eg_file, variable_names=['a'])
+        assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
+        vars = loadmat(eg_file, variable_names=['theta'])
+        assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
+        vars = loadmat(eg_file, variable_names=('theta',))
+        assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
+        vars = loadmat(eg_file, variable_names=[])
+        assert_equal(set(vars.keys()), set(sys_v_names))
+        vnames = ['theta']
+        vars = loadmat(eg_file, variable_names=vnames)
+        assert_equal(vnames, ['theta'])
+
+
+def test_round_types():
+    # Check that saving, loading preserves dtype in most cases
+    arr = np.arange(10)
+    stream = BytesIO()
+    for dts in ('f8','f4','i8','i4','i2','i1',
+                'u8','u4','u2','u1','c16','c8'):
+        stream.truncate(0)
+        stream.seek(0)  # needed for BytesIO in Python 3
+        savemat(stream, {'arr': arr.astype(dts)})
+        vars = loadmat(stream)
+        assert_equal(np.dtype(dts), vars['arr'].dtype)
+
+
+def test_varmats_from_mat():
+    # Make a mat file with several variables, write it, read it back
+    names_vars = (('arr', mlarr(np.arange(10))),
+                  ('mystr', mlarr('a string')),
+                  ('mynum', mlarr(10)))
+
+    # Dict like thing to give variables in defined order
+    class C:
+        def items(self):
+            return names_vars
+    stream = BytesIO()
+    savemat(stream, C())
+    varmats = varmats_from_mat(stream)
+    assert_equal(len(varmats), 3)
+    for i in range(3):
+        name, var_stream = varmats[i]
+        exp_name, exp_res = names_vars[i]
+        assert_equal(name, exp_name)
+        res = loadmat(var_stream)
+        assert_array_equal(res[name], exp_res)
+
+
+def test_one_by_zero():
+    # Test 1x0 chars get read correctly
+    func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')
+    fp = open(func_eg, 'rb')
+    rdr = MatFile5Reader(fp)
+    d = rdr.get_variables()
+    fp.close()
+    assert_equal(d['var'].shape, (0,))
+
+
+def test_load_mat4_le():
+    # We were getting byte order wrong when reading little-endian floa64 dense
+    # matrices on big-endian platforms
+    mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')
+    vars = loadmat(mat4_fname)
+    assert_array_equal(vars['a'], [[0.1, 1.2]])
+
+
+def test_unicode_mat4():
+    # Mat4 should save unicode as latin1
+    bio = BytesIO()
+    var = {'second_cat': 'Schrödinger'}
+    savemat(bio, var, format='4')
+    var_back = loadmat(bio)
+    assert_equal(var_back['second_cat'], var['second_cat'])
+
+
+def test_logical_sparse():
+    # Test we can read logical sparse stored in mat file as bytes.
+    # See https://github.com/scipy/scipy/issues/3539.
+    # In some files saved by MATLAB, the sparse data elements (Real Part
+    # Subelement in MATLAB speak) are stored with apparent type double
+    # (miDOUBLE) but are in fact single bytes.
+    filename = pjoin(test_data_path,'logical_sparse.mat')
+    # Before fix, this would crash with:
+    # ValueError: indices and data should have the same size
+    d = loadmat(filename, struct_as_record=True)
+    log_sp = d['sp_log_5_4']
+    assert_(isinstance(log_sp, SP.csc_matrix))
+    assert_equal(log_sp.dtype.type, np.bool_)
+    assert_array_equal(log_sp.toarray(),
+                       [[True, True, True, False],
+                        [False, False, True, False],
+                        [False, False, True, False],
+                        [False, False, False, False],
+                        [False, False, False, False]])
+
+
+def test_empty_sparse():
+    # Can we read empty sparse matrices?
+    sio = BytesIO()
+    import scipy.sparse
+    empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])
+    savemat(sio, dict(x=empty_sparse))
+    sio.seek(0)
+    res = loadmat(sio)
+    assert_array_equal(res['x'].shape, empty_sparse.shape)
+    assert_array_equal(res['x'].toarray(), 0)
+    # Do empty sparse matrices get written with max nnz 1?
+    # See https://github.com/scipy/scipy/issues/4208
+    sio.seek(0)
+    reader = MatFile5Reader(sio)
+    reader.initialize_read()
+    reader.read_file_header()
+    hdr, _ = reader.read_var_header()
+    assert_equal(hdr.nzmax, 1)
+
+
+def test_empty_mat_error():
+    # Test we get a specific warning for an empty mat file
+    sio = BytesIO()
+    assert_raises(MatReadError, loadmat, sio)
+
+
+def test_miuint32_compromise():
+    # Reader should accept miUINT32 for miINT32, but check signs
+    # mat file with miUINT32 for miINT32, but OK values
+    filename = pjoin(test_data_path, 'miuint32_for_miint32.mat')
+    res = loadmat(filename)
+    assert_equal(res['an_array'], np.arange(10)[None, :])
+    # mat file with miUINT32 for miINT32, with negative value
+    filename = pjoin(test_data_path, 'bad_miuint32.mat')
+    with assert_raises(ValueError):
+        loadmat(filename)
+
+
+def test_miutf8_for_miint8_compromise():
+    # Check reader accepts ascii as miUTF8 for array names
+    filename = pjoin(test_data_path, 'miutf8_array_name.mat')
+    res = loadmat(filename)
+    assert_equal(res['array_name'], [[1]])
+    # mat file with non-ascii utf8 name raises error
+    filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat')
+    with assert_raises(ValueError):
+        loadmat(filename)
+
+
+def test_bad_utf8():
+    # Check that reader reads bad UTF with 'replace' option
+    filename = pjoin(test_data_path,'broken_utf8.mat')
+    res = loadmat(filename)
+    assert_equal(res['bad_string'],
+                 b'\x80 am broken'.decode('utf8', 'replace'))
+
+
+def test_save_unicode_field(tmpdir):
+    filename = os.path.join(str(tmpdir), 'test.mat')
+    test_dict = {u'a':{u'b':1,u'c':'test_str'}}
+    savemat(filename, test_dict)
+
+
+def test_filenotfound():
+    # Check the correct error is thrown
+    assert_raises(OSError, loadmat, "NotExistentFile00.mat")
+    assert_raises(OSError, loadmat, "NotExistentFile00")
+
+
+def test_simplify_cells():
+    # Test output when simplify_cells=True
+    filename = pjoin(test_data_path, 'testsimplecell.mat')
+    res1 = loadmat(filename, simplify_cells=True)
+    res2 = loadmat(filename, simplify_cells=False)
+    assert_(isinstance(res1["s"], dict))
+    assert_(isinstance(res2["s"], np.ndarray))
+    assert_array_equal(res1["s"]["mycell"], np.array(["a", "b", "c"]))
+
+
+@pytest.mark.parametrize('version, filt, regex', [
+    (0, '_4*_*', None),
+    (1, '_5*_*', None),
+    (1, '_6*_*', None),
+    (1, '_7*_*', '^((?!hdf5).)*$'),  # not containing hdf5
+    (2, '_7*_*', '.*hdf5.*'),
+    (1, '8*_*', None),
+])
+def test_matfile_version(version, filt, regex):
+    use_filt = pjoin(test_data_path, 'test*%s.mat' % filt)
+    files = glob(use_filt)
+    if regex is not None:
+        files = [file for file in files if re.match(regex, file) is not None]
+    assert len(files) > 0, \
+        "No files for version %s using filter %s" % (version, filt)
+    for file in files:
+        got_version = matfile_version(file)
+        assert got_version[0] == version
+
+
+def test_opaque():
+    """Test that we can read a MatlabOpaque object."""
+    data = loadmat(pjoin(test_data_path, 'parabola.mat'))
+    assert isinstance(data['parabola'], MatlabFunction)
+    assert isinstance(data['parabola'].item()[3].item()[3], MatlabOpaque)
+
+
+def test_deprecation():
+    """Test that access to previous attributes still works."""
+    # This should be accessible immediately from scipy.io import
+    with assert_warns(DeprecationWarning):
+        scipy.io.matlab.mio5_params.MatlabOpaque  # noqa
+
+    # These should be importable but warn as well
+    with assert_warns(DeprecationWarning):
+        from scipy.io.matlab.miobase import MatReadError  # noqa
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio5_utils.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio5_utils.py
new file mode 100644
index 00000000..fe800750
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio5_utils.py
@@ -0,0 +1,180 @@
+""" Testing mio5_utils Cython module
+
+"""
+import sys
+
+from io import BytesIO
+cStringIO = BytesIO
+
+import numpy as np
+
+from numpy.testing import assert_array_equal, assert_equal, assert_
+from pytest import raises as assert_raises
+
+import scipy.io.matlab._byteordercodes as boc
+import scipy.io.matlab._streams as streams
+import scipy.io.matlab._mio5_params as mio5p
+import scipy.io.matlab._mio5_utils as m5u
+
+
+def test_byteswap():
+    for val in (
+        1,
+        0x100,
+        0x10000):
+        a = np.array(val, dtype=np.uint32)
+        b = a.byteswap()
+        c = m5u.byteswap_u4(a)
+        assert_equal(b.item(), c)
+        d = m5u.byteswap_u4(c)
+        assert_equal(a.item(), d)
+
+
+def _make_tag(base_dt, val, mdtype, sde=False):
+    ''' Makes a simple matlab tag, full or sde '''
+    base_dt = np.dtype(base_dt)
+    bo = boc.to_numpy_code(base_dt.byteorder)
+    byte_count = base_dt.itemsize
+    if not sde:
+        udt = bo + 'u4'
+        padding = 8 - (byte_count % 8)
+        all_dt = [('mdtype', udt),
+                  ('byte_count', udt),
+                  ('val', base_dt)]
+        if padding:
+            all_dt.append(('padding', 'u1', padding))
+    else:  # is sde
+        udt = bo + 'u2'
+        padding = 4-byte_count
+        if bo == '<':  # little endian
+            all_dt = [('mdtype', udt),
+                      ('byte_count', udt),
+                      ('val', base_dt)]
+        else:  # big endian
+            all_dt = [('byte_count', udt),
+                      ('mdtype', udt),
+                      ('val', base_dt)]
+        if padding:
+            all_dt.append(('padding', 'u1', padding))
+    tag = np.zeros((1,), dtype=all_dt)
+    tag['mdtype'] = mdtype
+    tag['byte_count'] = byte_count
+    tag['val'] = val
+    return tag
+
+
+def _write_stream(stream, *strings):
+    stream.truncate(0)
+    stream.seek(0)
+    for s in strings:
+        stream.write(s)
+    stream.seek(0)
+
+
+def _make_readerlike(stream, byte_order=boc.native_code):
+    class R:
+        pass
+    r = R()
+    r.mat_stream = stream
+    r.byte_order = byte_order
+    r.struct_as_record = True
+    r.uint16_codec = sys.getdefaultencoding()
+    r.chars_as_strings = False
+    r.mat_dtype = False
+    r.squeeze_me = False
+    return r
+
+
+def test_read_tag():
+    # mainly to test errors
+    # make reader-like thing
+    str_io = BytesIO()
+    r = _make_readerlike(str_io)
+    c_reader = m5u.VarReader5(r)
+    # This works for StringIO but _not_ cStringIO
+    assert_raises(OSError, c_reader.read_tag)
+    # bad SDE
+    tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
+    tag['byte_count'] = 5
+    _write_stream(str_io, tag.tobytes())
+    assert_raises(ValueError, c_reader.read_tag)
+
+
+def test_read_stream():
+    tag = _make_tag('i4', 1, mio5p.miINT32, sde=True)
+    tag_str = tag.tobytes()
+    str_io = cStringIO(tag_str)
+    st = streams.make_stream(str_io)
+    s = streams._read_into(st, tag.itemsize)
+    assert_equal(s, tag.tobytes())
+
+
+def test_read_numeric():
+    # make reader-like thing
+    str_io = cStringIO()
+    r = _make_readerlike(str_io)
+    # check simplest of tags
+    for base_dt, val, mdtype in (('u2', 30, mio5p.miUINT16),
+                                 ('i4', 1, mio5p.miINT32),
+                                 ('i2', -1, mio5p.miINT16)):
+        for byte_code in ('<', '>'):
+            r.byte_order = byte_code
+            c_reader = m5u.VarReader5(r)
+            assert_equal(c_reader.little_endian, byte_code == '<')
+            assert_equal(c_reader.is_swapped, byte_code != boc.native_code)
+            for sde_f in (False, True):
+                dt = np.dtype(base_dt).newbyteorder(byte_code)
+                a = _make_tag(dt, val, mdtype, sde_f)
+                a_str = a.tobytes()
+                _write_stream(str_io, a_str)
+                el = c_reader.read_numeric()
+                assert_equal(el, val)
+                # two sequential reads
+                _write_stream(str_io, a_str, a_str)
+                el = c_reader.read_numeric()
+                assert_equal(el, val)
+                el = c_reader.read_numeric()
+                assert_equal(el, val)
+
+
+def test_read_numeric_writeable():
+    # make reader-like thing
+    str_io = cStringIO()
+    r = _make_readerlike(str_io, '<')
+    c_reader = m5u.VarReader5(r)
+    dt = np.dtype(''
+    rdr.mat_stream.read(4)  # presumably byte padding
+    mdict = read_minimat_vars(rdr)
+    fp.close()
+    return mdict
+
+
+def test_jottings():
+    # example
+    fname = os.path.join(test_data_path, 'parabola.mat')
+    read_workspace_vars(fname)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio_utils.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio_utils.py
new file mode 100644
index 00000000..1d19a979
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_mio_utils.py
@@ -0,0 +1,45 @@
+""" Testing
+
+"""
+
+import numpy as np
+
+from numpy.testing import assert_array_equal, assert_
+
+from scipy.io.matlab._mio_utils import squeeze_element, chars_to_strings
+
+
+def test_squeeze_element():
+    a = np.zeros((1,3))
+    assert_array_equal(np.squeeze(a), squeeze_element(a))
+    # 0-D output from squeeze gives scalar
+    sq_int = squeeze_element(np.zeros((1,1), dtype=float))
+    assert_(isinstance(sq_int, float))
+    # Unless it's a structured array
+    sq_sa = squeeze_element(np.zeros((1,1),dtype=[('f1', 'f')]))
+    assert_(isinstance(sq_sa, np.ndarray))
+    # Squeezing empty arrays maintain their dtypes.
+    sq_empty = squeeze_element(np.empty(0, np.uint8))
+    assert sq_empty.dtype == np.uint8
+
+
+def test_chars_strings():
+    # chars as strings
+    strings = ['learn ', 'python', 'fast  ', 'here  ']
+    str_arr = np.array(strings, dtype='U6')  # shape (4,)
+    chars = [list(s) for s in strings]
+    char_arr = np.array(chars, dtype='U1')  # shape (4,6)
+    assert_array_equal(chars_to_strings(char_arr), str_arr)
+    ca2d = char_arr.reshape((2,2,6))
+    sa2d = str_arr.reshape((2,2))
+    assert_array_equal(chars_to_strings(ca2d), sa2d)
+    ca3d = char_arr.reshape((1,2,2,6))
+    sa3d = str_arr.reshape((1,2,2))
+    assert_array_equal(chars_to_strings(ca3d), sa3d)
+    # Fortran ordered arrays
+    char_arrf = np.array(chars, dtype='U1', order='F')  # shape (4,6)
+    assert_array_equal(chars_to_strings(char_arrf), str_arr)
+    # empty array
+    arr = np.array([['']], dtype='U1')
+    out_arr = np.array([''], dtype='U1')
+    assert_array_equal(chars_to_strings(arr), out_arr)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_miobase.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_miobase.py
new file mode 100644
index 00000000..e07024f9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_miobase.py
@@ -0,0 +1,32 @@
+""" Testing miobase module
+"""
+
+import numpy as np
+
+from numpy.testing import assert_equal
+from pytest import raises as assert_raises
+
+from scipy.io.matlab._miobase import matdims
+
+
+def test_matdims():
+    # Test matdims dimension finder
+    assert_equal(matdims(np.array(1)), (1, 1))  # NumPy scalar
+    assert_equal(matdims(np.array([1])), (1, 1))  # 1-D array, 1 element
+    assert_equal(matdims(np.array([1,2])), (2, 1))  # 1-D array, 2 elements
+    assert_equal(matdims(np.array([[2],[3]])), (2, 1))  # 2-D array, column vector
+    assert_equal(matdims(np.array([[2,3]])), (1, 2))  # 2-D array, row vector
+    # 3d array, rowish vector
+    assert_equal(matdims(np.array([[[2,3]]])), (1, 1, 2))
+    assert_equal(matdims(np.array([])), (0, 0))  # empty 1-D array
+    assert_equal(matdims(np.array([[]])), (1, 0))  # empty 2-D array
+    assert_equal(matdims(np.array([[[]]])), (1, 1, 0))  # empty 3-D array
+    assert_equal(matdims(np.empty((1, 0, 1))), (1, 0, 1))  # empty 3-D array
+    # Optional argument flips 1-D shape behavior.
+    assert_equal(matdims(np.array([1,2]), 'row'), (1, 2))  # 1-D array, 2 elements
+    # The argument has to make sense though
+    assert_raises(ValueError, matdims, np.array([1,2]), 'bizarre')
+    # Check empty sparse matrices get their own shape
+    from scipy.sparse import csr_matrix, csc_matrix
+    assert_equal(matdims(csr_matrix(np.zeros((3, 3)))), (3, 3))
+    assert_equal(matdims(csc_matrix(np.zeros((2, 2)))), (2, 2))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_pathological.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_pathological.py
new file mode 100644
index 00000000..3f4b35c9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_pathological.py
@@ -0,0 +1,33 @@
+""" Test reading of files not conforming to matlab specification
+
+We try and read any file that matlab reads, these files included
+"""
+from os.path import dirname, join as pjoin
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+from scipy.io.matlab._mio import loadmat
+
+TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
+
+
+def test_multiple_fieldnames():
+    # Example provided by Dharhas Pothina
+    # Extracted using mio5.varmats_from_mat
+    multi_fname = pjoin(TEST_DATA_PATH, 'nasty_duplicate_fieldnames.mat')
+    vars = loadmat(multi_fname)
+    funny_names = vars['Summary'].dtype.names
+    assert_(set(['_1_Station_Q', '_2_Station_Q',
+                     '_3_Station_Q']).issubset(funny_names))
+
+
+def test_malformed1():
+    # Example from gh-6072
+    # Contains malformed header data, which previously resulted into a
+    # buffer overflow.
+    #
+    # Should raise an exception, not segfault
+    fname = pjoin(TEST_DATA_PATH, 'malformed1.mat')
+    with open(fname, 'rb') as f:
+        assert_raises(ValueError, loadmat, f)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_streams.py b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_streams.py
new file mode 100644
index 00000000..9a7c1c7c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/matlab/tests/test_streams.py
@@ -0,0 +1,229 @@
+""" Testing
+
+"""
+
+import os
+import zlib
+
+from io import BytesIO
+
+
+from tempfile import mkstemp
+from contextlib import contextmanager
+
+import numpy as np
+
+from numpy.testing import assert_, assert_equal
+from pytest import raises as assert_raises
+
+from scipy.io.matlab._streams import (make_stream,
+    GenericStream, ZlibInputStream,
+    _read_into, _read_string, BLOCK_SIZE)
+
+
+@contextmanager
+def setup_test_file():
+    val = b'a\x00string'
+    fd, fname = mkstemp()
+
+    with os.fdopen(fd, 'wb') as fs:
+        fs.write(val)
+    with open(fname, 'rb') as fs:
+        gs = BytesIO(val)
+        cs = BytesIO(val)
+        yield fs, gs, cs
+    os.unlink(fname)
+
+
+def test_make_stream():
+    with setup_test_file() as (fs, gs, cs):
+        # test stream initialization
+        assert_(isinstance(make_stream(gs), GenericStream))
+
+
+def test_tell_seek():
+    with setup_test_file() as (fs, gs, cs):
+        for s in (fs, gs, cs):
+            st = make_stream(s)
+            res = st.seek(0)
+            assert_equal(res, 0)
+            assert_equal(st.tell(), 0)
+            res = st.seek(5)
+            assert_equal(res, 0)
+            assert_equal(st.tell(), 5)
+            res = st.seek(2, 1)
+            assert_equal(res, 0)
+            assert_equal(st.tell(), 7)
+            res = st.seek(-2, 2)
+            assert_equal(res, 0)
+            assert_equal(st.tell(), 6)
+
+
+def test_read():
+    with setup_test_file() as (fs, gs, cs):
+        for s in (fs, gs, cs):
+            st = make_stream(s)
+            st.seek(0)
+            res = st.read(-1)
+            assert_equal(res, b'a\x00string')
+            st.seek(0)
+            res = st.read(4)
+            assert_equal(res, b'a\x00st')
+            # read into
+            st.seek(0)
+            res = _read_into(st, 4)
+            assert_equal(res, b'a\x00st')
+            res = _read_into(st, 4)
+            assert_equal(res, b'ring')
+            assert_raises(OSError, _read_into, st, 2)
+            # read alloc
+            st.seek(0)
+            res = _read_string(st, 4)
+            assert_equal(res, b'a\x00st')
+            res = _read_string(st, 4)
+            assert_equal(res, b'ring')
+            assert_raises(OSError, _read_string, st, 2)
+
+
+class TestZlibInputStream:
+    def _get_data(self, size):
+        data = np.random.randint(0, 256, size).astype(np.uint8).tobytes()
+        compressed_data = zlib.compress(data)
+        stream = BytesIO(compressed_data)
+        return stream, len(compressed_data), data
+
+    def test_read(self):
+        SIZES = [0, 1, 10, BLOCK_SIZE//2, BLOCK_SIZE-1,
+                 BLOCK_SIZE, BLOCK_SIZE+1, 2*BLOCK_SIZE-1]
+
+        READ_SIZES = [BLOCK_SIZE//2, BLOCK_SIZE-1,
+                      BLOCK_SIZE, BLOCK_SIZE+1]
+
+        def check(size, read_size):
+            compressed_stream, compressed_data_len, data = self._get_data(size)
+            stream = ZlibInputStream(compressed_stream, compressed_data_len)
+            data2 = b''
+            so_far = 0
+            while True:
+                block = stream.read(min(read_size,
+                                        size - so_far))
+                if not block:
+                    break
+                so_far += len(block)
+                data2 += block
+            assert_equal(data, data2)
+
+        for size in SIZES:
+            for read_size in READ_SIZES:
+                check(size, read_size)
+
+    def test_read_max_length(self):
+        size = 1234
+        data = np.random.randint(0, 256, size).astype(np.uint8).tobytes()
+        compressed_data = zlib.compress(data)
+        compressed_stream = BytesIO(compressed_data + b"abbacaca")
+        stream = ZlibInputStream(compressed_stream, len(compressed_data))
+
+        stream.read(len(data))
+        assert_equal(compressed_stream.tell(), len(compressed_data))
+
+        assert_raises(OSError, stream.read, 1)
+
+    def test_read_bad_checksum(self):
+        data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes()
+        compressed_data = zlib.compress(data)
+
+        # break checksum
+        compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
+
+        compressed_stream = BytesIO(compressed_data)
+        stream = ZlibInputStream(compressed_stream, len(compressed_data))
+
+        assert_raises(zlib.error, stream.read, len(data))
+
+    def test_seek(self):
+        compressed_stream, compressed_data_len, data = self._get_data(1024)
+
+        stream = ZlibInputStream(compressed_stream, compressed_data_len)
+
+        stream.seek(123)
+        p = 123
+        assert_equal(stream.tell(), p)
+        d1 = stream.read(11)
+        assert_equal(d1, data[p:p+11])
+
+        stream.seek(321, 1)
+        p = 123+11+321
+        assert_equal(stream.tell(), p)
+        d2 = stream.read(21)
+        assert_equal(d2, data[p:p+21])
+
+        stream.seek(641, 0)
+        p = 641
+        assert_equal(stream.tell(), p)
+        d3 = stream.read(11)
+        assert_equal(d3, data[p:p+11])
+
+        assert_raises(OSError, stream.seek, 10, 2)
+        assert_raises(OSError, stream.seek, -1, 1)
+        assert_raises(ValueError, stream.seek, 1, 123)
+
+        stream.seek(10000, 1)
+        assert_raises(OSError, stream.read, 12)
+
+    def test_seek_bad_checksum(self):
+        data = np.random.randint(0, 256, 10).astype(np.uint8).tobytes()
+        compressed_data = zlib.compress(data)
+
+        # break checksum
+        compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
+
+        compressed_stream = BytesIO(compressed_data)
+        stream = ZlibInputStream(compressed_stream, len(compressed_data))
+
+        assert_raises(zlib.error, stream.seek, len(data))
+
+    def test_all_data_read(self):
+        compressed_stream, compressed_data_len, data = self._get_data(1024)
+        stream = ZlibInputStream(compressed_stream, compressed_data_len)
+        assert_(not stream.all_data_read())
+        stream.seek(512)
+        assert_(not stream.all_data_read())
+        stream.seek(1024)
+        assert_(stream.all_data_read())
+
+    def test_all_data_read_overlap(self):
+        COMPRESSION_LEVEL = 6
+
+        data = np.arange(33707000).astype(np.uint8).tobytes()
+        compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
+        compressed_data_len = len(compressed_data)
+
+        # check that part of the checksum overlaps
+        assert_(compressed_data_len == BLOCK_SIZE + 2)
+
+        compressed_stream = BytesIO(compressed_data)
+        stream = ZlibInputStream(compressed_stream, compressed_data_len)
+        assert_(not stream.all_data_read())
+        stream.seek(len(data))
+        assert_(stream.all_data_read())
+
+    def test_all_data_read_bad_checksum(self):
+        COMPRESSION_LEVEL = 6
+
+        data = np.arange(33707000).astype(np.uint8).tobytes()
+        compressed_data = zlib.compress(data, COMPRESSION_LEVEL)
+        compressed_data_len = len(compressed_data)
+
+        # check that part of the checksum overlaps
+        assert_(compressed_data_len == BLOCK_SIZE + 2)
+
+        # break checksum
+        compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])
+
+        compressed_stream = BytesIO(compressed_data)
+        stream = ZlibInputStream(compressed_stream, compressed_data_len)
+        assert_(not stream.all_data_read())
+        stream.seek(len(data))
+
+        assert_raises(zlib.error, stream.all_data_read)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/mmio.py b/__packaged__/coreml/.python_dependencies/scipy/io/mmio.py
new file mode 100644
index 00000000..70d0e91c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/mmio.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mmio
+
+__all__ = [  # noqa: F822
+    'mminfo', 'mmread', 'mmwrite', 'MMFile',
+    'coo_matrix', 'isspmatrix', 'asstr'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.mmio is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, "
+                  "the `scipy.io.mmio` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mmio, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/netcdf.py b/__packaged__/coreml/.python_dependencies/scipy/io/netcdf.py
new file mode 100644
index 00000000..7788e774
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/netcdf.py
@@ -0,0 +1,33 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.io` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _netcdf
+
+__all__ = [  # noqa: F822
+    'netcdf_file', 'netcdf_variable',
+    'array', 'LITTLE_ENDIAN', 'IS_PYPY', 'ABSENT', 'ZERO',
+    'NC_BYTE', 'NC_CHAR', 'NC_SHORT', 'NC_INT', 'NC_FLOAT',
+    'NC_DOUBLE', 'NC_DIMENSION', 'NC_VARIABLE', 'NC_ATTRIBUTE',
+    'FILL_BYTE', 'FILL_CHAR', 'FILL_SHORT', 'FILL_INT', 'FILL_FLOAT',
+    'FILL_DOUBLE', 'TYPEMAP', 'FILLMAP', 'REVERSE', 'NetCDFFile',
+    'NetCDFVariable'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.io.netcdf is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.io instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, "
+                  "the `scipy.io.netcdf` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_netcdf, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/io/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/Transparent Busy.ani b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/Transparent Busy.ani
new file mode 100644
index 00000000..3be50003
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/Transparent Busy.ani differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_1d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_1d.sav
new file mode 100644
index 00000000..619a1259
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_1d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_2d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_2d.sav
new file mode 100644
index 00000000..804d8b1a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_2d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_3d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_3d.sav
new file mode 100644
index 00000000..3fa56c45
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_3d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_4d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_4d.sav
new file mode 100644
index 00000000..4bb951e2
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_4d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_5d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_5d.sav
new file mode 100644
index 00000000..2854dbc8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_5d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_6d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_6d.sav
new file mode 100644
index 00000000..91588d34
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_6d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_7d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_7d.sav
new file mode 100644
index 00000000..3e978fad
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_7d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_8d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_8d.sav
new file mode 100644
index 00000000..f699fe24
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_8d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_1d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_1d.sav
new file mode 100644
index 00000000..8e3a402c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_1d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_2d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_2d.sav
new file mode 100644
index 00000000..dd3504f0
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_2d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_3d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_3d.sav
new file mode 100644
index 00000000..285da7f7
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_3d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_4d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_4d.sav
new file mode 100644
index 00000000..d99fa48f
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_4d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_5d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_5d.sav
new file mode 100644
index 00000000..de5e984e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_5d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_6d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_6d.sav
new file mode 100644
index 00000000..bb76671a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_6d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_7d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_7d.sav
new file mode 100644
index 00000000..995d23c6
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_7d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_8d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_8d.sav
new file mode 100644
index 00000000..4249ec62
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/array_float32_pointer_8d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_1.nc b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_1.nc
new file mode 100644
index 00000000..5775622d
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_1.nc differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_2.nc b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_2.nc
new file mode 100644
index 00000000..07db1cd9
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_2.nc differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_3_maskedvals.nc b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_3_maskedvals.nc
new file mode 100644
index 00000000..57f8bf9d
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/example_3_maskedvals.nc differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-3x3d-2i.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-3x3d-2i.dat
new file mode 100644
index 00000000..87731eb9
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-3x3d-2i.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-mixed.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-mixed.dat
new file mode 100644
index 00000000..a165a7a3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-mixed.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-11x1x10.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-11x1x10.dat
new file mode 100644
index 00000000..c3bb9dcb
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-11x1x10.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-15x10x22.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-15x10x22.dat
new file mode 100644
index 00000000..351801fd
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-15x10x22.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x1.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x1.dat
new file mode 100644
index 00000000..64bf92f7
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x1.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x5.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x5.dat
new file mode 100644
index 00000000..3d3f27f8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x5.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x7.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x7.dat
new file mode 100644
index 00000000..0bd68309
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x1x7.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x3x5.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x3x5.dat
new file mode 100644
index 00000000..25269ff9
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-sf8-1x3x5.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-11x1x10.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-11x1x10.dat
new file mode 100644
index 00000000..9850de37
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-11x1x10.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-15x10x22.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-15x10x22.dat
new file mode 100644
index 00000000..98c09c2d
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-15x10x22.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x1.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x1.dat
new file mode 100644
index 00000000..959098d2
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x1.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x5.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x5.dat
new file mode 100644
index 00000000..49c0ec1d
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x5.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x7.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x7.dat
new file mode 100644
index 00000000..bb936b87
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x1x7.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x3x5.dat b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x3x5.dat
new file mode 100644
index 00000000..cb3e9e48
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/fortran-si4-1x3x5.dat differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/invalid_pointer.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/invalid_pointer.sav
new file mode 100644
index 00000000..d53893c6
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/invalid_pointer.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/null_pointer.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/null_pointer.sav
new file mode 100644
index 00000000..8cee5ebe
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/null_pointer.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte.sav
new file mode 100644
index 00000000..e4027b3c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte_descr.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte_descr.sav
new file mode 100644
index 00000000..182e29bc
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_byte_descr.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex32.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex32.sav
new file mode 100644
index 00000000..593e8c62
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex32.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex64.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex64.sav
new file mode 100644
index 00000000..edb19d38
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_complex64.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float32.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float32.sav
new file mode 100644
index 00000000..be9e3877
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float32.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float64.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float64.sav
new file mode 100644
index 00000000..9680b287
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_float64.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_heap_pointer.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_heap_pointer.sav
new file mode 100644
index 00000000..d02b1756
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_heap_pointer.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int16.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int16.sav
new file mode 100644
index 00000000..60352569
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int16.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int32.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int32.sav
new file mode 100644
index 00000000..40210b88
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int32.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int64.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int64.sav
new file mode 100644
index 00000000..c91cd0a5
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_int64.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_string.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_string.sav
new file mode 100644
index 00000000..ee6e69fe
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_string.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint16.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint16.sav
new file mode 100644
index 00000000..759c2e64
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint16.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint32.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint32.sav
new file mode 100644
index 00000000..74dec7b8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint32.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint64.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint64.sav
new file mode 100644
index 00000000..fc9da579
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/scalar_uint64.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays.sav
new file mode 100644
index 00000000..40c9cd33
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_byte_idl80.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_byte_idl80.sav
new file mode 100644
index 00000000..f1aa416f
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_byte_idl80.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated.sav
new file mode 100644
index 00000000..6f01fbfd
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated_3d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated_3d.sav
new file mode 100644
index 00000000..bac9b207
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_arrays_replicated_3d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_inherit.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_inherit.sav
new file mode 100644
index 00000000..8babd563
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_inherit.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays.sav
new file mode 100644
index 00000000..a3c67816
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated.sav
new file mode 100644
index 00000000..38b81226
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav
new file mode 100644
index 00000000..db1c256c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers.sav
new file mode 100644
index 00000000..acbb058a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated.sav
new file mode 100644
index 00000000..d16f4655
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated_3d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated_3d.sav
new file mode 100644
index 00000000..732dd2cb
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_pointers_replicated_3d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars.sav
new file mode 100644
index 00000000..69d7eaf4
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated.sav
new file mode 100644
index 00000000..2222391a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated_3d.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated_3d.sav
new file mode 100644
index 00000000..a35f1acf
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/struct_scalars_replicated_3d.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav
new file mode 100644
index 00000000..056333e7
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav
new file mode 100644
index 00000000..57e6f178
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav
new file mode 100644
index 00000000..1825dfcf
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav
new file mode 100644
index 00000000..bb86f2f3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav
new file mode 100644
index 00000000..d1b7065c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav
new file mode 100644
index 00000000..7271fdd2
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav
new file mode 100644
index 00000000..8aae8e2c
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav
new file mode 100644
index 00000000..31221b2a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav
new file mode 100644
index 00000000..db596cc5
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav
new file mode 100644
index 00000000..13f131e3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav
new file mode 100644
index 00000000..c4fed626
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav
new file mode 100644
index 00000000..70900819
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav
new file mode 100644
index 00000000..8e79d54d
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav
new file mode 100644
index 00000000..9c4312bc
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav
new file mode 100644
index 00000000..5c28ed81
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav
new file mode 100644
index 00000000..2d4eea22
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav
new file mode 100644
index 00000000..68437dad
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav
new file mode 100644
index 00000000..ef478def
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav
new file mode 100644
index 00000000..9c93e132
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav
new file mode 100644
index 00000000..b95bcdf3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/various_compressed.sav b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/various_compressed.sav
new file mode 100644
index 00000000..dcdb0b0d
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/io/tests/data/various_compressed.sav differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_fortran.py b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_fortran.py
new file mode 100644
index 00000000..a32b660a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_fortran.py
@@ -0,0 +1,236 @@
+''' Tests for fortran sequential files '''
+
+import tempfile
+import shutil
+from os import path
+from glob import iglob
+import re
+
+from numpy.testing import assert_equal, assert_allclose
+import numpy as np
+import pytest
+
+from scipy.io import (FortranFile,
+                      _test_fortran,
+                      FortranEOFError,
+                      FortranFormattingError)
+
+
+DATA_PATH = path.join(path.dirname(__file__), 'data')
+
+
+def test_fortranfiles_read():
+    for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
+        m = re.search(r'fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
+        if not m:
+            raise RuntimeError("Couldn't match %s filename to regex" % filename)
+
+        dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
+
+        dtype = m.group(1).replace('s', '<')
+
+        f = FortranFile(filename, 'r', ' 0] = 1
+        info = (2, 2, 3, 'coordinate', 'pattern', 'general')
+        mmwrite(self.fn, a, field='pattern')
+        assert_equal(mminfo(self.fn), info)
+        b = mmread(self.fn)
+        assert_array_almost_equal(p, b.toarray())
+
+    def test_gh13634_non_skew_symmetric_int(self):
+        a = scipy.sparse.csr_matrix([[1, 2], [-2, 99]], dtype=np.int32)
+        self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
+
+    def test_gh13634_non_skew_symmetric_float(self):
+        a = scipy.sparse.csr_matrix([[1, 2], [-2, 99.]], dtype=np.float32)
+        self.check(a, (2, 2, 4, 'coordinate', 'real', 'general'))
+
+
+_32bit_integer_dense_example = '''\
+%%MatrixMarket matrix array integer general
+2  2
+2147483647
+2147483646
+2147483647
+2147483646
+'''
+
+_32bit_integer_sparse_example = '''\
+%%MatrixMarket matrix coordinate integer symmetric
+2  2  2
+1  1  2147483647
+2  2  2147483646
+'''
+
+_64bit_integer_dense_example = '''\
+%%MatrixMarket matrix array integer general
+2  2
+          2147483648
+-9223372036854775806
+         -2147483648
+ 9223372036854775807
+'''
+
+_64bit_integer_sparse_general_example = '''\
+%%MatrixMarket matrix coordinate integer general
+2  2  3
+1  1           2147483648
+1  2  9223372036854775807
+2  2  9223372036854775807
+'''
+
+_64bit_integer_sparse_symmetric_example = '''\
+%%MatrixMarket matrix coordinate integer symmetric
+2  2  3
+1  1            2147483648
+1  2  -9223372036854775807
+2  2   9223372036854775807
+'''
+
+_64bit_integer_sparse_skew_example = '''\
+%%MatrixMarket matrix coordinate integer skew-symmetric
+2  2  3
+1  1            2147483648
+1  2  -9223372036854775807
+2  2   9223372036854775807
+'''
+
+_over64bit_integer_dense_example = '''\
+%%MatrixMarket matrix array integer general
+2  2
+         2147483648
+9223372036854775807
+         2147483648
+9223372036854775808
+'''
+
+_over64bit_integer_sparse_example = '''\
+%%MatrixMarket matrix coordinate integer symmetric
+2  2  2
+1  1            2147483648
+2  2  19223372036854775808
+'''
+
+
+class TestMMIOReadLargeIntegers:
+    def setup_method(self):
+        self.tmpdir = mkdtemp()
+        self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
+
+    def teardown_method(self):
+        shutil.rmtree(self.tmpdir)
+
+    def check_read(self, example, a, info, dense, over32, over64):
+        with open(self.fn, 'w') as f:
+            f.write(example)
+        assert_equal(mminfo(self.fn), info)
+        if (over32 and (np.intp(0).itemsize < 8)) or over64:
+            assert_raises(OverflowError, mmread, self.fn)
+        else:
+            b = mmread(self.fn)
+            if not dense:
+                b = b.toarray()
+            assert_equal(a, b)
+
+    def test_read_32bit_integer_dense(self):
+        a = array([[2**31-1, 2**31-1],
+                   [2**31-2, 2**31-2]], dtype=np.int64)
+        self.check_read(_32bit_integer_dense_example,
+                        a,
+                        (2, 2, 4, 'array', 'integer', 'general'),
+                        dense=True,
+                        over32=False,
+                        over64=False)
+
+    def test_read_32bit_integer_sparse(self):
+        a = array([[2**31-1, 0],
+                   [0, 2**31-2]], dtype=np.int64)
+        self.check_read(_32bit_integer_sparse_example,
+                        a,
+                        (2, 2, 2, 'coordinate', 'integer', 'symmetric'),
+                        dense=False,
+                        over32=False,
+                        over64=False)
+
+    def test_read_64bit_integer_dense(self):
+        a = array([[2**31, -2**31],
+                   [-2**63+2, 2**63-1]], dtype=np.int64)
+        self.check_read(_64bit_integer_dense_example,
+                        a,
+                        (2, 2, 4, 'array', 'integer', 'general'),
+                        dense=True,
+                        over32=True,
+                        over64=False)
+
+    def test_read_64bit_integer_sparse_general(self):
+        a = array([[2**31, 2**63-1],
+                   [0, 2**63-1]], dtype=np.int64)
+        self.check_read(_64bit_integer_sparse_general_example,
+                        a,
+                        (2, 2, 3, 'coordinate', 'integer', 'general'),
+                        dense=False,
+                        over32=True,
+                        over64=False)
+
+    def test_read_64bit_integer_sparse_symmetric(self):
+        a = array([[2**31, -2**63+1],
+                   [-2**63+1, 2**63-1]], dtype=np.int64)
+        self.check_read(_64bit_integer_sparse_symmetric_example,
+                        a,
+                        (2, 2, 3, 'coordinate', 'integer', 'symmetric'),
+                        dense=False,
+                        over32=True,
+                        over64=False)
+
+    def test_read_64bit_integer_sparse_skew(self):
+        a = array([[2**31, -2**63+1],
+                   [2**63-1, 2**63-1]], dtype=np.int64)
+        self.check_read(_64bit_integer_sparse_skew_example,
+                        a,
+                        (2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'),
+                        dense=False,
+                        over32=True,
+                        over64=False)
+
+    def test_read_over64bit_integer_dense(self):
+        self.check_read(_over64bit_integer_dense_example,
+                        None,
+                        (2, 2, 4, 'array', 'integer', 'general'),
+                        dense=True,
+                        over32=True,
+                        over64=True)
+
+    def test_read_over64bit_integer_sparse(self):
+        self.check_read(_over64bit_integer_sparse_example,
+                        None,
+                        (2, 2, 2, 'coordinate', 'integer', 'symmetric'),
+                        dense=False,
+                        over32=True,
+                        over64=True)
+
+
+_general_example = '''\
+%%MatrixMarket matrix coordinate real general
+%=================================================================================
+%
+% This ASCII file represents a sparse MxN matrix with L
+% nonzeros in the following Matrix Market format:
+%
+% +----------------------------------------------+
+% |%%MatrixMarket matrix coordinate real general | <--- header line
+% |%                                             | <--+
+% |% comments                                    |    |-- 0 or more comment lines
+% |%                                             | <--+
+% |    M  N  L                                   | <--- rows, columns, entries
+% |    I1  J1  A(I1, J1)                         | <--+
+% |    I2  J2  A(I2, J2)                         |    |
+% |    I3  J3  A(I3, J3)                         |    |-- L lines
+% |        . . .                                 |    |
+% |    IL JL  A(IL, JL)                          | <--+
+% +----------------------------------------------+
+%
+% Indices are 1-based, i.e. A(1,1) is the first element.
+%
+%=================================================================================
+  5  5  8
+    1     1   1.000e+00
+    2     2   1.050e+01
+    3     3   1.500e-02
+    1     4   6.000e+00
+    4     2   2.505e+02
+    4     4  -2.800e+02
+    4     5   3.332e+01
+    5     5   1.200e+01
+'''
+
+_hermitian_example = '''\
+%%MatrixMarket matrix coordinate complex hermitian
+  5  5  7
+    1     1     1.0      0
+    2     2    10.5      0
+    4     2   250.5     22.22
+    3     3     1.5e-2   0
+    4     4    -2.8e2    0
+    5     5    12.       0
+    5     4     0       33.32
+'''
+
+_skew_example = '''\
+%%MatrixMarket matrix coordinate real skew-symmetric
+  5  5  7
+    1     1     1.0
+    2     2    10.5
+    4     2   250.5
+    3     3     1.5e-2
+    4     4    -2.8e2
+    5     5    12.
+    5     4     0
+'''
+
+_symmetric_example = '''\
+%%MatrixMarket matrix coordinate real symmetric
+  5  5  7
+    1     1     1.0
+    2     2    10.5
+    4     2   250.5
+    3     3     1.5e-2
+    4     4    -2.8e2
+    5     5    12.
+    5     4     8
+'''
+
+_symmetric_pattern_example = '''\
+%%MatrixMarket matrix coordinate pattern symmetric
+  5  5  7
+    1     1
+    2     2
+    4     2
+    3     3
+    4     4
+    5     5
+    5     4
+'''
+
+# example (without comment lines) from Figure 1 in
+# https://math.nist.gov/MatrixMarket/reports/MMformat.ps
+_empty_lines_example = '''\
+%%MatrixMarket  MATRIX    Coordinate    Real General
+
+   5  5         8
+
+1 1  1.0
+2 2       10.5
+3 3             1.5e-2
+4 4                     -2.8E2
+5 5                              12.
+     1      4      6
+     4      2      250.5
+     4      5      33.32
+
+'''
+
+
+class TestMMIOCoordinate:
+    def setup_method(self):
+        self.tmpdir = mkdtemp()
+        self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
+
+    def teardown_method(self):
+        shutil.rmtree(self.tmpdir)
+
+    def check_read(self, example, a, info):
+        f = open(self.fn, 'w')
+        f.write(example)
+        f.close()
+        assert_equal(mminfo(self.fn), info)
+        b = mmread(self.fn).toarray()
+        assert_array_almost_equal(a, b)
+
+    def test_read_general(self):
+        a = [[1, 0, 0, 6, 0],
+             [0, 10.5, 0, 0, 0],
+             [0, 0, .015, 0, 0],
+             [0, 250.5, 0, -280, 33.32],
+             [0, 0, 0, 0, 12]]
+        self.check_read(_general_example, a,
+                        (5, 5, 8, 'coordinate', 'real', 'general'))
+
+    def test_read_hermitian(self):
+        a = [[1, 0, 0, 0, 0],
+             [0, 10.5, 0, 250.5 - 22.22j, 0],
+             [0, 0, .015, 0, 0],
+             [0, 250.5 + 22.22j, 0, -280, -33.32j],
+             [0, 0, 0, 33.32j, 12]]
+        self.check_read(_hermitian_example, a,
+                        (5, 5, 7, 'coordinate', 'complex', 'hermitian'))
+
+    def test_read_skew(self):
+        a = [[1, 0, 0, 0, 0],
+             [0, 10.5, 0, -250.5, 0],
+             [0, 0, .015, 0, 0],
+             [0, 250.5, 0, -280, 0],
+             [0, 0, 0, 0, 12]]
+        self.check_read(_skew_example, a,
+                        (5, 5, 7, 'coordinate', 'real', 'skew-symmetric'))
+
+    def test_read_symmetric(self):
+        a = [[1, 0, 0, 0, 0],
+             [0, 10.5, 0, 250.5, 0],
+             [0, 0, .015, 0, 0],
+             [0, 250.5, 0, -280, 8],
+             [0, 0, 0, 8, 12]]
+        self.check_read(_symmetric_example, a,
+                        (5, 5, 7, 'coordinate', 'real', 'symmetric'))
+
+    def test_read_symmetric_pattern(self):
+        a = [[1, 0, 0, 0, 0],
+             [0, 1, 0, 1, 0],
+             [0, 0, 1, 0, 0],
+             [0, 1, 0, 1, 1],
+             [0, 0, 0, 1, 1]]
+        self.check_read(_symmetric_pattern_example, a,
+                        (5, 5, 7, 'coordinate', 'pattern', 'symmetric'))
+
+    def test_read_empty_lines(self):
+        a = [[1, 0, 0, 6, 0],
+             [0, 10.5, 0, 0, 0],
+             [0, 0, .015, 0, 0],
+             [0, 250.5, 0, -280, 33.32],
+             [0, 0, 0, 0, 12]]
+        self.check_read(_empty_lines_example, a,
+                        (5, 5, 8, 'coordinate', 'real', 'general'))
+
+    def test_empty_write_read(self):
+        # https://github.com/scipy/scipy/issues/1410 (Trac #883)
+
+        b = scipy.sparse.coo_matrix((10, 10))
+        mmwrite(self.fn, b)
+
+        assert_equal(mminfo(self.fn),
+                     (10, 10, 0, 'coordinate', 'real', 'symmetric'))
+        a = b.toarray()
+        b = mmread(self.fn).toarray()
+        assert_array_almost_equal(a, b)
+
+    def test_bzip2_py3(self):
+        # test if fix for #2152 works
+        try:
+            # bz2 module isn't always built when building Python.
+            import bz2
+        except ImportError:
+            return
+        I = array([0, 0, 1, 2, 3, 3, 3, 4])
+        J = array([0, 3, 1, 2, 1, 3, 4, 4])
+        V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+
+        b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+        mmwrite(self.fn, b)
+
+        fn_bzip2 = "%s.bz2" % self.fn
+        with open(self.fn, 'rb') as f_in:
+            f_out = bz2.BZ2File(fn_bzip2, 'wb')
+            f_out.write(f_in.read())
+            f_out.close()
+
+        a = mmread(fn_bzip2).toarray()
+        assert_array_almost_equal(a, b.toarray())
+
+    def test_gzip_py3(self):
+        # test if fix for #2152 works
+        try:
+            # gzip module can be missing from Python installation
+            import gzip
+        except ImportError:
+            return
+        I = array([0, 0, 1, 2, 3, 3, 3, 4])
+        J = array([0, 3, 1, 2, 1, 3, 4, 4])
+        V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+
+        b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+        mmwrite(self.fn, b)
+
+        fn_gzip = "%s.gz" % self.fn
+        with open(self.fn, 'rb') as f_in:
+            f_out = gzip.open(fn_gzip, 'wb')
+            f_out.write(f_in.read())
+            f_out.close()
+
+        a = mmread(fn_gzip).toarray()
+        assert_array_almost_equal(a, b.toarray())
+
+    def test_real_write_read(self):
+        I = array([0, 0, 1, 2, 3, 3, 3, 4])
+        J = array([0, 3, 1, 2, 1, 3, 4, 4])
+        V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+
+        b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+        mmwrite(self.fn, b)
+
+        assert_equal(mminfo(self.fn),
+                     (5, 5, 8, 'coordinate', 'real', 'general'))
+        a = b.toarray()
+        b = mmread(self.fn).toarray()
+        assert_array_almost_equal(a, b)
+
+    def test_complex_write_read(self):
+        I = array([0, 0, 1, 2, 3, 3, 3, 4])
+        J = array([0, 3, 1, 2, 1, 3, 4, 4])
+        V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
+                   250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
+
+        b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
+
+        mmwrite(self.fn, b)
+
+        assert_equal(mminfo(self.fn),
+                     (5, 5, 8, 'coordinate', 'complex', 'general'))
+        a = b.toarray()
+        b = mmread(self.fn).toarray()
+        assert_array_almost_equal(a, b)
+
+    def test_sparse_formats(self, tmp_path):
+        # Note: `tmp_path` is a pytest fixture, it handles cleanup
+        tmpdir = tmp_path / 'sparse_formats'
+        tmpdir.mkdir()
+
+        mats = []
+        I = array([0, 0, 1, 2, 3, 3, 3, 4])
+        J = array([0, 3, 1, 2, 1, 3, 4, 4])
+
+        V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
+        mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
+
+        V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
+                   250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
+        mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
+
+        for mat in mats:
+            expected = mat.toarray()
+            for fmt in ['csr', 'csc', 'coo']:
+                fname = tmpdir / (fmt + '.mtx')
+                mmwrite(fname, mat.asformat(fmt))
+                result = mmread(fname).toarray()
+                assert_array_almost_equal(result, expected)
+
+    def test_precision(self):
+        test_values = [pi] + [10**(i) for i in range(0, -10, -1)]
+        test_precisions = range(1, 10)
+        for value in test_values:
+            for precision in test_precisions:
+                # construct sparse matrix with test value at last main diagonal
+                n = 10**precision + 1
+                A = scipy.sparse.dok_matrix((n, n))
+                A[n-1, n-1] = value
+                # write matrix with test precision and read again
+                mmwrite(self.fn, A, precision=precision)
+                A = scipy.io.mmread(self.fn)
+                # check for right entries in matrix
+                assert_array_equal(A.row, [n-1])
+                assert_array_equal(A.col, [n-1])
+                assert_allclose(A.data, [float('%%.%dg' % precision % value)])
+
+    def test_bad_number_of_coordinate_header_fields(self):
+        s = """\
+            %%MatrixMarket matrix coordinate real general
+              5  5  8 999
+                1     1   1.000e+00
+                2     2   1.050e+01
+                3     3   1.500e-02
+                1     4   6.000e+00
+                4     2   2.505e+02
+                4     4  -2.800e+02
+                4     5   3.332e+01
+                5     5   1.200e+01
+            """
+        text = textwrap.dedent(s).encode('ascii')
+        with pytest.raises(ValueError, match='not of length 3'):
+            scipy.io.mmread(io.BytesIO(text))
+
+
+def test_gh11389():
+    mmread(io.StringIO("%%MatrixMarket matrix coordinate complex symmetric\n"
+                       " 1 1 1\n"
+                       "1 1 -2.1846000000000e+02  0.0000000000000e+00"))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_netcdf.py b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_netcdf.py
new file mode 100644
index 00000000..d1ebae82
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_netcdf.py
@@ -0,0 +1,543 @@
+''' Tests for netcdf '''
+import os
+from os.path import join as pjoin, dirname
+import shutil
+import tempfile
+import warnings
+from io import BytesIO
+from glob import glob
+from contextlib import contextmanager
+
+import numpy as np
+from numpy.testing import (assert_, assert_allclose, assert_equal,
+                           break_cycles, suppress_warnings, IS_PYPY)
+from pytest import raises as assert_raises
+
+from scipy.io import netcdf_file
+from scipy._lib._tmpdirs import in_tempdir
+
+TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
+
+N_EG_ELS = 11  # number of elements for example variable
+VARTYPE_EG = 'b'  # var type for example variable
+
+
+@contextmanager
+def make_simple(*args, **kwargs):
+    f = netcdf_file(*args, **kwargs)
+    f.history = 'Created for a test'
+    f.createDimension('time', N_EG_ELS)
+    time = f.createVariable('time', VARTYPE_EG, ('time',))
+    time[:] = np.arange(N_EG_ELS)
+    time.units = 'days since 2008-01-01'
+    f.flush()
+    yield f
+    f.close()
+
+
+def check_simple(ncfileobj):
+    '''Example fileobj tests '''
+    assert_equal(ncfileobj.history, b'Created for a test')
+    time = ncfileobj.variables['time']
+    assert_equal(time.units, b'days since 2008-01-01')
+    assert_equal(time.shape, (N_EG_ELS,))
+    assert_equal(time[-1], N_EG_ELS-1)
+
+def assert_mask_matches(arr, expected_mask):
+    '''
+    Asserts that the mask of arr is effectively the same as expected_mask.
+
+    In contrast to numpy.ma.testutils.assert_mask_equal, this function allows
+    testing the 'mask' of a standard numpy array (the mask in this case is treated
+    as all False).
+
+    Parameters
+    ----------
+    arr : ndarray or MaskedArray
+        Array to test.
+    expected_mask : array_like of booleans
+        A list giving the expected mask.
+    '''
+
+    mask = np.ma.getmaskarray(arr)
+    assert_equal(mask, expected_mask)
+
+
+def test_read_write_files():
+    # test round trip for example file
+    cwd = os.getcwd()
+    try:
+        tmpdir = tempfile.mkdtemp()
+        os.chdir(tmpdir)
+        with make_simple('simple.nc', 'w') as f:
+            pass
+        # read the file we just created in 'a' mode
+        with netcdf_file('simple.nc', 'a') as f:
+            check_simple(f)
+            # add something
+            f._attributes['appendRan'] = 1
+
+        # To read the NetCDF file we just created::
+        with netcdf_file('simple.nc') as f:
+            # Using mmap is the default (but not on pypy)
+            assert_equal(f.use_mmap, not IS_PYPY)
+            check_simple(f)
+            assert_equal(f._attributes['appendRan'], 1)
+
+        # Read it in append (and check mmap is off)
+        with netcdf_file('simple.nc', 'a') as f:
+            assert_(not f.use_mmap)
+            check_simple(f)
+            assert_equal(f._attributes['appendRan'], 1)
+
+        # Now without mmap
+        with netcdf_file('simple.nc', mmap=False) as f:
+            # Using mmap is the default
+            assert_(not f.use_mmap)
+            check_simple(f)
+
+        # To read the NetCDF file we just created, as file object, no
+        # mmap.  When n * n_bytes(var_type) is not divisible by 4, this
+        # raised an error in pupynere 1.0.12 and scipy rev 5893, because
+        # calculated vsize was rounding up in units of 4 - see
+        # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
+        with open('simple.nc', 'rb') as fobj:
+            with netcdf_file(fobj) as f:
+                # by default, don't use mmap for file-like
+                assert_(not f.use_mmap)
+                check_simple(f)
+
+        # Read file from fileobj, with mmap
+        with suppress_warnings() as sup:
+            if IS_PYPY:
+                sup.filter(RuntimeWarning,
+                           "Cannot close a netcdf_file opened with mmap=True.*")
+            with open('simple.nc', 'rb') as fobj:
+                with netcdf_file(fobj, mmap=True) as f:
+                    assert_(f.use_mmap)
+                    check_simple(f)
+
+        # Again read it in append mode (adding another att)
+        with open('simple.nc', 'r+b') as fobj:
+            with netcdf_file(fobj, 'a') as f:
+                assert_(not f.use_mmap)
+                check_simple(f)
+                f.createDimension('app_dim', 1)
+                var = f.createVariable('app_var', 'i', ('app_dim',))
+                var[:] = 42
+
+        # And... check that app_var made it in...
+        with netcdf_file('simple.nc') as f:
+            check_simple(f)
+            assert_equal(f.variables['app_var'][:], 42)
+
+    finally:
+        if IS_PYPY:
+            # windows cannot remove a dead file held by a mmap
+            # that has not been collected in PyPy
+            break_cycles()
+            break_cycles()
+        os.chdir(cwd)
+        shutil.rmtree(tmpdir)
+
+
+def test_read_write_sio():
+    eg_sio1 = BytesIO()
+    with make_simple(eg_sio1, 'w'):
+        str_val = eg_sio1.getvalue()
+
+    eg_sio2 = BytesIO(str_val)
+    with netcdf_file(eg_sio2) as f2:
+        check_simple(f2)
+
+    # Test that error is raised if attempting mmap for sio
+    eg_sio3 = BytesIO(str_val)
+    assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
+    # Test 64-bit offset write / read
+    eg_sio_64 = BytesIO()
+    with make_simple(eg_sio_64, 'w', version=2) as f_64:
+        str_val = eg_sio_64.getvalue()
+
+    eg_sio_64 = BytesIO(str_val)
+    with netcdf_file(eg_sio_64) as f_64:
+        check_simple(f_64)
+        assert_equal(f_64.version_byte, 2)
+    # also when version 2 explicitly specified
+    eg_sio_64 = BytesIO(str_val)
+    with netcdf_file(eg_sio_64, version=2) as f_64:
+        check_simple(f_64)
+        assert_equal(f_64.version_byte, 2)
+
+
+def test_bytes():
+    raw_file = BytesIO()
+    f = netcdf_file(raw_file, mode='w')
+    # Dataset only has a single variable, dimension and attribute to avoid
+    # any ambiguity related to order.
+    f.a = 'b'
+    f.createDimension('dim', 1)
+    var = f.createVariable('var', np.int16, ('dim',))
+    var[0] = -9999
+    var.c = 'd'
+    f.sync()
+
+    actual = raw_file.getvalue()
+
+    expected = (b'CDF\x01'
+                b'\x00\x00\x00\x00'
+                b'\x00\x00\x00\x0a'
+                b'\x00\x00\x00\x01'
+                b'\x00\x00\x00\x03'
+                b'dim\x00'
+                b'\x00\x00\x00\x01'
+                b'\x00\x00\x00\x0c'
+                b'\x00\x00\x00\x01'
+                b'\x00\x00\x00\x01'
+                b'a\x00\x00\x00'
+                b'\x00\x00\x00\x02'
+                b'\x00\x00\x00\x01'
+                b'b\x00\x00\x00'
+                b'\x00\x00\x00\x0b'
+                b'\x00\x00\x00\x01'
+                b'\x00\x00\x00\x03'
+                b'var\x00'
+                b'\x00\x00\x00\x01'
+                b'\x00\x00\x00\x00'
+                b'\x00\x00\x00\x0c'
+                b'\x00\x00\x00\x01'
+                b'\x00\x00\x00\x01'
+                b'c\x00\x00\x00'
+                b'\x00\x00\x00\x02'
+                b'\x00\x00\x00\x01'
+                b'd\x00\x00\x00'
+                b'\x00\x00\x00\x03'
+                b'\x00\x00\x00\x04'
+                b'\x00\x00\x00\x78'
+                b'\xd8\xf1\x80\x01')
+
+    assert_equal(actual, expected)
+
+
+def test_encoded_fill_value():
+    with netcdf_file(BytesIO(), mode='w') as f:
+        f.createDimension('x', 1)
+        var = f.createVariable('var', 'S1', ('x',))
+        assert_equal(var._get_encoded_fill_value(), b'\x00')
+        var._FillValue = b'\x01'
+        assert_equal(var._get_encoded_fill_value(), b'\x01')
+        var._FillValue = b'\x00\x00'  # invalid, wrong size
+        assert_equal(var._get_encoded_fill_value(), b'\x00')
+
+
+def test_read_example_data():
+    # read any example data files
+    for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
+        with netcdf_file(fname, 'r'):
+            pass
+        with netcdf_file(fname, 'r', mmap=False):
+            pass
+
+
+def test_itemset_no_segfault_on_readonly():
+    # Regression test for ticket #1202.
+    # Open the test file in read-only mode.
+
+    filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning,
+                   "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
+        with netcdf_file(filename, 'r', mmap=True) as f:
+            time_var = f.variables['time']
+
+    # time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
+    assert_raises(RuntimeError, time_var.assignValue, 42)
+
+
+def test_appending_issue_gh_8625():
+    stream = BytesIO()
+
+    with make_simple(stream, mode='w') as f:
+        f.createDimension('x', 2)
+        f.createVariable('x', float, ('x',))
+        f.variables['x'][...] = 1
+        f.flush()
+        contents = stream.getvalue()
+
+    stream = BytesIO(contents)
+    with netcdf_file(stream, mode='a') as f:
+        f.variables['x'][...] = 2
+
+
+def test_write_invalid_dtype():
+    dtypes = ['int64', 'uint64']
+    if np.dtype('int').itemsize == 8:   # 64-bit machines
+        dtypes.append('int')
+    if np.dtype('uint').itemsize == 8:   # 64-bit machines
+        dtypes.append('uint')
+
+    with netcdf_file(BytesIO(), 'w') as f:
+        f.createDimension('time', N_EG_ELS)
+        for dt in dtypes:
+            assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
+
+
+def test_flush_rewind():
+    stream = BytesIO()
+    with make_simple(stream, mode='w') as f:
+        x = f.createDimension('x',4)  # x is used in createVariable
+        v = f.createVariable('v', 'i2', ['x'])
+        v[:] = 1
+        f.flush()
+        len_single = len(stream.getvalue())
+        f.flush()
+        len_double = len(stream.getvalue())
+
+    assert_(len_single == len_double)
+
+
+def test_dtype_specifiers():
+    # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
+    # Specifying np.int16 or similar only works from the same commit as this
+    # comment was made.
+    with make_simple(BytesIO(), mode='w') as f:
+        f.createDimension('x',4)
+        f.createVariable('v1', 'i2', ['x'])
+        f.createVariable('v2', np.int16, ['x'])
+        f.createVariable('v3', np.dtype(np.int16), ['x'])
+
+
+def test_ticket_1720():
+    io = BytesIO()
+
+    items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
+
+    with netcdf_file(io, 'w') as f:
+        f.history = 'Created for a test'
+        f.createDimension('float_var', 10)
+        float_var = f.createVariable('float_var', 'f', ('float_var',))
+        float_var[:] = items
+        float_var.units = 'metres'
+        f.flush()
+        contents = io.getvalue()
+
+    io = BytesIO(contents)
+    with netcdf_file(io, 'r') as f:
+        assert_equal(f.history, b'Created for a test')
+        float_var = f.variables['float_var']
+        assert_equal(float_var.units, b'metres')
+        assert_equal(float_var.shape, (10,))
+        assert_allclose(float_var[:], items)
+
+
+def test_mmaps_segfault():
+    filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
+
+    if not IS_PYPY:
+        with warnings.catch_warnings():
+            warnings.simplefilter("error")
+            with netcdf_file(filename, mmap=True) as f:
+                x = f.variables['lat'][:]
+                # should not raise warnings
+                del x
+
+    def doit():
+        with netcdf_file(filename, mmap=True) as f:
+            return f.variables['lat'][:]
+
+    # should not crash
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning,
+                   "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist")
+        x = doit()
+    x.sum()
+
+
+def test_zero_dimensional_var():
+    io = BytesIO()
+    with make_simple(io, 'w') as f:
+        v = f.createVariable('zerodim', 'i2', [])
+        # This is checking that .isrec returns a boolean - don't simplify it
+        # to 'assert not ...'
+        assert v.isrec is False, v.isrec
+        f.flush()
+
+
+def test_byte_gatts():
+    # Check that global "string" atts work like they did before py3k
+    # unicode and general bytes confusion
+    with in_tempdir():
+        filename = 'g_byte_atts.nc'
+        f = netcdf_file(filename, 'w')
+        f._attributes['holy'] = b'grail'
+        f._attributes['witch'] = 'floats'
+        f.close()
+        f = netcdf_file(filename, 'r')
+        assert_equal(f._attributes['holy'], b'grail')
+        assert_equal(f._attributes['witch'], b'floats')
+        f.close()
+
+
+def test_open_append():
+    # open 'w' put one attr
+    with in_tempdir():
+        filename = 'append_dat.nc'
+        f = netcdf_file(filename, 'w')
+        f._attributes['Kilroy'] = 'was here'
+        f.close()
+
+        # open again in 'a', read the att and a new one
+        f = netcdf_file(filename, 'a')
+        assert_equal(f._attributes['Kilroy'], b'was here')
+        f._attributes['naughty'] = b'Zoot'
+        f.close()
+
+        # open yet again in 'r' and check both atts
+        f = netcdf_file(filename, 'r')
+        assert_equal(f._attributes['Kilroy'], b'was here')
+        assert_equal(f._attributes['naughty'], b'Zoot')
+        f.close()
+
+
+def test_append_recordDimension():
+    dataSize = 100
+
+    with in_tempdir():
+        # Create file with record time dimension
+        with netcdf_file('withRecordDimension.nc', 'w') as f:
+            f.createDimension('time', None)
+            f.createVariable('time', 'd', ('time',))
+            f.createDimension('x', dataSize)
+            x = f.createVariable('x', 'd', ('x',))
+            x[:] = np.array(range(dataSize))
+            f.createDimension('y', dataSize)
+            y = f.createVariable('y', 'd', ('y',))
+            y[:] = np.array(range(dataSize))
+            f.createVariable('testData', 'i', ('time', 'x', 'y'))
+            f.flush()
+            f.close()
+
+        for i in range(2):
+            # Open the file in append mode and add data
+            with netcdf_file('withRecordDimension.nc', 'a') as f:
+                f.variables['time'].data = np.append(f.variables["time"].data, i)
+                f.variables['testData'][i, :, :] = np.full((dataSize, dataSize), i)
+                f.flush()
+
+            # Read the file and check that append worked
+            with netcdf_file('withRecordDimension.nc') as f:
+                assert_equal(f.variables['time'][-1], i)
+                assert_equal(f.variables['testData'][-1, :, :].copy(), np.full((dataSize, dataSize), i))
+                assert_equal(f.variables['time'].data.shape[0], i+1)
+                assert_equal(f.variables['testData'].data.shape[0], i+1)
+
+        # Read the file and check that 'data' was not saved as user defined
+        # attribute of testData variable during append operation
+        with netcdf_file('withRecordDimension.nc') as f:
+            with assert_raises(KeyError) as ar:
+                f.variables['testData']._attributes['data']
+            ex = ar.value
+            assert_equal(ex.args[0], 'data')
+
+def test_maskandscale():
+    t = np.linspace(20, 30, 15)
+    t[3] = 100
+    tm = np.ma.masked_greater(t, 99)
+    fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        Temp = f.variables['Temperature']
+        assert_equal(Temp.missing_value, 9999)
+        assert_equal(Temp.add_offset, 20)
+        assert_equal(Temp.scale_factor, np.float32(0.01))
+        found = Temp[:].compressed()
+        del Temp  # Remove ref to mmap, so file can be closed.
+        expected = np.round(tm.compressed(), 2)
+        assert_allclose(found, expected)
+
+    with in_tempdir():
+        newfname = 'ms.nc'
+        f = netcdf_file(newfname, 'w', maskandscale=True)
+        f.createDimension('Temperature', len(tm))
+        temp = f.createVariable('Temperature', 'i', ('Temperature',))
+        temp.missing_value = 9999
+        temp.scale_factor = 0.01
+        temp.add_offset = 20
+        temp[:] = tm
+        f.close()
+
+        with netcdf_file(newfname, maskandscale=True) as f:
+            Temp = f.variables['Temperature']
+            assert_equal(Temp.missing_value, 9999)
+            assert_equal(Temp.add_offset, 20)
+            assert_equal(Temp.scale_factor, np.float32(0.01))
+            expected = np.round(tm.compressed(), 2)
+            found = Temp[:].compressed()
+            del Temp
+            assert_allclose(found, expected)
+
+
+# ------------------------------------------------------------------------
+# Test reading with masked values (_FillValue / missing_value)
+# ------------------------------------------------------------------------
+
+def test_read_withValuesNearFillValue():
+    # Regression test for ticket #5626
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        vardata = f.variables['var1_fillval0'][:]
+        assert_mask_matches(vardata, [False, True, False])
+
+def test_read_withNoFillValue():
+    # For a variable with no fill value, reading data with maskandscale=True
+    # should return unmasked data
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        vardata = f.variables['var2_noFillval'][:]
+        assert_mask_matches(vardata, [False, False, False])
+        assert_equal(vardata, [1,2,3])
+
+def test_read_withFillValueAndMissingValue():
+    # For a variable with both _FillValue and missing_value, the _FillValue
+    # should be used
+    IRRELEVANT_VALUE = 9999
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        vardata = f.variables['var3_fillvalAndMissingValue'][:]
+        assert_mask_matches(vardata, [True, False, False])
+        assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])
+
+def test_read_withMissingValue():
+    # For a variable with missing_value but not _FillValue, the missing_value
+    # should be used
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        vardata = f.variables['var4_missingValue'][:]
+        assert_mask_matches(vardata, [False, True, False])
+
+def test_read_withFillValNaN():
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        vardata = f.variables['var5_fillvalNaN'][:]
+        assert_mask_matches(vardata, [False, True, False])
+
+def test_read_withChar():
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        vardata = f.variables['var6_char'][:]
+        assert_mask_matches(vardata, [False, True, False])
+
+def test_read_with2dVar():
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    with netcdf_file(fname, maskandscale=True) as f:
+        vardata = f.variables['var7_2d'][:]
+        assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])
+
+def test_read_withMaskAndScaleFalse():
+    # If a variable has a _FillValue (or missing_value) attribute, but is read
+    # with maskandscale set to False, the result should be unmasked
+    fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')
+    # Open file with mmap=False to avoid problems with closing a mmap'ed file
+    # when arrays referring to its data still exist:
+    with netcdf_file(fname, maskandscale=False, mmap=False) as f:
+        vardata = f.variables['var3_fillvalAndMissingValue'][:]
+        assert_mask_matches(vardata, [False, False, False])
+        assert_equal(vardata, [1, 2, 3])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_paths.py b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_paths.py
new file mode 100644
index 00000000..84464e46
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_paths.py
@@ -0,0 +1,93 @@
+"""
+Ensure that we can use pathlib.Path objects in all relevant IO functions.
+"""
+from pathlib import Path
+
+import numpy as np
+
+import scipy.io
+import scipy.io.wavfile
+from scipy._lib._tmpdirs import tempdir
+import scipy.sparse
+
+
+class TestPaths:
+    data = np.arange(5).astype(np.int64)
+
+    def test_savemat(self):
+        with tempdir() as temp_dir:
+            path = Path(temp_dir) / 'data.mat'
+            scipy.io.savemat(path, {'data': self.data})
+            assert path.is_file()
+
+    def test_loadmat(self):
+        # Save data with string path, load with pathlib.Path
+        with tempdir() as temp_dir:
+            path = Path(temp_dir) / 'data.mat'
+            scipy.io.savemat(str(path), {'data': self.data})
+
+            mat_contents = scipy.io.loadmat(path)
+            assert (mat_contents['data'] == self.data).all()
+
+    def test_whosmat(self):
+        # Save data with string path, load with pathlib.Path
+        with tempdir() as temp_dir:
+            path = Path(temp_dir) / 'data.mat'
+            scipy.io.savemat(str(path), {'data': self.data})
+
+            contents = scipy.io.whosmat(path)
+            assert contents[0] == ('data', (1, 5), 'int64')
+
+    def test_readsav(self):
+        path = Path(__file__).parent / 'data/scalar_string.sav'
+        scipy.io.readsav(path)
+
+    def test_hb_read(self):
+        # Save data with string path, load with pathlib.Path
+        with tempdir() as temp_dir:
+            data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+            path = Path(temp_dir) / 'data.hb'
+            scipy.io.hb_write(str(path), data)
+
+            data_new = scipy.io.hb_read(path)
+            assert (data_new != data).nnz == 0
+
+    def test_hb_write(self):
+        with tempdir() as temp_dir:
+            data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+            path = Path(temp_dir) / 'data.hb'
+            scipy.io.hb_write(path, data)
+            assert path.is_file()
+
+    def test_mmio_read(self):
+        # Save data with string path, load with pathlib.Path
+        with tempdir() as temp_dir:
+            data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+            path = Path(temp_dir) / 'data.mtx'
+            scipy.io.mmwrite(str(path), data)
+
+            data_new = scipy.io.mmread(path)
+            assert (data_new != data).nnz == 0
+
+    def test_mmio_write(self):
+        with tempdir() as temp_dir:
+            data = scipy.sparse.csr_matrix(scipy.sparse.eye(3))
+            path = Path(temp_dir) / 'data.mtx'
+            scipy.io.mmwrite(path, data)
+
+    def test_netcdf_file(self):
+        path = Path(__file__).parent / 'data/example_1.nc'
+        scipy.io.netcdf_file(path)
+
+    def test_wavfile_read(self):
+        path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
+        scipy.io.wavfile.read(path)
+
+    def test_wavfile_write(self):
+        # Read from str path, write to Path
+        input_path = Path(__file__).parent / 'data/test-8000Hz-le-2ch-1byteu.wav'
+        rate, data = scipy.io.wavfile.read(str(input_path))
+
+        with tempdir() as temp_dir:
+            output_path = Path(temp_dir) / input_path.name
+            scipy.io.wavfile.write(output_path, rate, data)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_wavfile.py b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_wavfile.py
new file mode 100644
index 00000000..95acefbb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/tests/test_wavfile.py
@@ -0,0 +1,416 @@
+import os
+import sys
+from io import BytesIO
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_, assert_array_equal,
+                           break_cycles, suppress_warnings, IS_PYPY)
+import pytest
+from pytest import raises, warns
+
+from scipy.io import wavfile
+
+
+def datafile(fn):
+    return os.path.join(os.path.dirname(__file__), 'data', fn)
+
+
+def test_read_1():
+    # 32-bit PCM (which uses extensible format)
+    for mmap in [False, True]:
+        filename = 'test-44100Hz-le-1ch-4bytes.wav'
+        rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+        assert_equal(rate, 44100)
+        assert_(np.issubdtype(data.dtype, np.int32))
+        assert_equal(data.shape, (4410,))
+
+        del data
+
+
+def test_read_2():
+    # 8-bit unsigned PCM
+    for mmap in [False, True]:
+        filename = 'test-8000Hz-le-2ch-1byteu.wav'
+        rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+        assert_equal(rate, 8000)
+        assert_(np.issubdtype(data.dtype, np.uint8))
+        assert_equal(data.shape, (800, 2))
+
+        del data
+
+
+def test_read_3():
+    # Little-endian float
+    for mmap in [False, True]:
+        filename = 'test-44100Hz-2ch-32bit-float-le.wav'
+        rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+        assert_equal(rate, 44100)
+        assert_(np.issubdtype(data.dtype, np.float32))
+        assert_equal(data.shape, (441, 2))
+
+        del data
+
+
+def test_read_4():
+    # Contains unsupported 'PEAK' chunk
+    for mmap in [False, True]:
+        with suppress_warnings() as sup:
+            sup.filter(wavfile.WavFileWarning,
+                       "Chunk .non-data. not understood, skipping it")
+            filename = 'test-48000Hz-2ch-64bit-float-le-wavex.wav'
+            rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+        assert_equal(rate, 48000)
+        assert_(np.issubdtype(data.dtype, np.float64))
+        assert_equal(data.shape, (480, 2))
+
+        del data
+
+
+def test_read_5():
+    # Big-endian float
+    for mmap in [False, True]:
+        filename = 'test-44100Hz-2ch-32bit-float-be.wav'
+        rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+        assert_equal(rate, 44100)
+        assert_(np.issubdtype(data.dtype, np.float32))
+        assert_(data.dtype.byteorder == '>' or (sys.byteorder == 'big' and
+                                                data.dtype.byteorder == '='))
+        assert_equal(data.shape, (441, 2))
+
+        del data
+
+
+def test_5_bit_odd_size_no_pad():
+    # 5-bit, 1 B container, 5 channels, 9 samples, 45 B data chunk
+    # Generated by LTspice, which incorrectly omits pad byte, but should be
+    # readable anyway
+    for mmap in [False, True]:
+        filename = 'test-8000Hz-le-5ch-9S-5bit.wav'
+        rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+        assert_equal(rate, 8000)
+        assert_(np.issubdtype(data.dtype, np.uint8))
+        assert_equal(data.shape, (9, 5))
+
+        # 8-5 = 3 LSBits should be 0
+        assert_equal(data & 0b00000111, 0)
+
+        # Unsigned
+        assert_equal(data.max(), 0b11111000)  # Highest possible
+        assert_equal(data[0, 0], 128)  # Midpoint is 128 for <= 8-bit
+        assert_equal(data.min(), 0)  # Lowest possible
+
+        del data
+
+
+def test_12_bit_even_size():
+    # 12-bit, 2 B container, 4 channels, 9 samples, 72 B data chunk
+    # Generated by LTspice from 1 Vpk sine waves
+    for mmap in [False, True]:
+        filename = 'test-8000Hz-le-4ch-9S-12bit.wav'
+        rate, data = wavfile.read(datafile(filename), mmap=mmap)
+
+        assert_equal(rate, 8000)
+        assert_(np.issubdtype(data.dtype, np.int16))
+        assert_equal(data.shape, (9, 4))
+
+        # 16-12 = 4 LSBits should be 0
+        assert_equal(data & 0b00000000_00001111, 0)
+
+        # Signed
+        assert_equal(data.max(), 0b01111111_11110000)  # Highest possible
+        assert_equal(data[0, 0], 0)  # Midpoint is 0 for >= 9-bit
+        assert_equal(data.min(), -0b10000000_00000000)  # Lowest possible
+
+        del data
+
+
+def test_24_bit_odd_size_with_pad():
+    # 24-bit, 3 B container, 3 channels, 5 samples, 45 B data chunk
+    # Should not raise any warnings about the data chunk pad byte
+    filename = 'test-8000Hz-le-3ch-5S-24bit.wav'
+    rate, data = wavfile.read(datafile(filename), mmap=False)
+
+    assert_equal(rate, 8000)
+    assert_(np.issubdtype(data.dtype, np.int32))
+    assert_equal(data.shape, (5, 3))
+
+    # All LSBytes should be 0
+    assert_equal(data & 0xff, 0)
+
+    # Hand-made max/min samples under different conventions:
+    #                      2**(N-1)     2**(N-1)-1     LSB
+    assert_equal(data, [[-0x8000_0000, -0x7fff_ff00, -0x200],
+                        [-0x4000_0000, -0x3fff_ff00, -0x100],
+                        [+0x0000_0000, +0x0000_0000, +0x000],
+                        [+0x4000_0000, +0x3fff_ff00, +0x100],
+                        [+0x7fff_ff00, +0x7fff_ff00, +0x200]])
+    #                     ^ clipped
+
+
+def test_20_bit_extra_data():
+    # 20-bit, 3 B container, 1 channel, 10 samples, 30 B data chunk
+    # with extra data filling container beyond the bit depth
+    filename = 'test-8000Hz-le-1ch-10S-20bit-extra.wav'
+    rate, data = wavfile.read(datafile(filename), mmap=False)
+
+    assert_equal(rate, 1234)
+    assert_(np.issubdtype(data.dtype, np.int32))
+    assert_equal(data.shape, (10,))
+
+    # All LSBytes should still be 0, because 3 B container in 4 B dtype
+    assert_equal(data & 0xff, 0)
+
+    # But it should load the data beyond 20 bits
+    assert_((data & 0xf00).any())
+
+    # Full-scale positive/negative samples, then being halved each time
+    assert_equal(data, [+0x7ffff000,       # +full-scale 20-bit
+                        -0x7ffff000,       # -full-scale 20-bit
+                        +0x7ffff000 >> 1,  # +1/2
+                        -0x7ffff000 >> 1,  # -1/2
+                        +0x7ffff000 >> 2,  # +1/4
+                        -0x7ffff000 >> 2,  # -1/4
+                        +0x7ffff000 >> 3,  # +1/8
+                        -0x7ffff000 >> 3,  # -1/8
+                        +0x7ffff000 >> 4,  # +1/16
+                        -0x7ffff000 >> 4,  # -1/16
+                        ])
+
+
+def test_36_bit_odd_size():
+    # 36-bit, 5 B container, 3 channels, 5 samples, 75 B data chunk + pad
+    filename = 'test-8000Hz-le-3ch-5S-36bit.wav'
+    rate, data = wavfile.read(datafile(filename), mmap=False)
+
+    assert_equal(rate, 8000)
+    assert_(np.issubdtype(data.dtype, np.int64))
+    assert_equal(data.shape, (5, 3))
+
+    # 28 LSBits should be 0
+    assert_equal(data & 0xfffffff, 0)
+
+    # Hand-made max/min samples under different conventions:
+    #            Fixed-point 2**(N-1)    Full-scale 2**(N-1)-1       LSB
+    correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_f000_0000, -0x2000_0000],
+               [-0x4000_0000_0000_0000, -0x3fff_ffff_f000_0000, -0x1000_0000],
+               [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000_0000],
+               [+0x4000_0000_0000_0000, +0x3fff_ffff_f000_0000, +0x1000_0000],
+               [+0x7fff_ffff_f000_0000, +0x7fff_ffff_f000_0000, +0x2000_0000]]
+    #              ^ clipped
+
+    assert_equal(data, correct)
+
+
+def test_45_bit_even_size():
+    # 45-bit, 6 B container, 3 channels, 5 samples, 90 B data chunk
+    filename = 'test-8000Hz-le-3ch-5S-45bit.wav'
+    rate, data = wavfile.read(datafile(filename), mmap=False)
+
+    assert_equal(rate, 8000)
+    assert_(np.issubdtype(data.dtype, np.int64))
+    assert_equal(data.shape, (5, 3))
+
+    # 19 LSBits should be 0
+    assert_equal(data & 0x7ffff, 0)
+
+    # Hand-made max/min samples under different conventions:
+    #            Fixed-point 2**(N-1)    Full-scale 2**(N-1)-1      LSB
+    correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_fff8_0000, -0x10_0000],
+               [-0x4000_0000_0000_0000, -0x3fff_ffff_fff8_0000, -0x08_0000],
+               [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x00_0000],
+               [+0x4000_0000_0000_0000, +0x3fff_ffff_fff8_0000, +0x08_0000],
+               [+0x7fff_ffff_fff8_0000, +0x7fff_ffff_fff8_0000, +0x10_0000]]
+    #              ^ clipped
+
+    assert_equal(data, correct)
+
+
+def test_53_bit_odd_size():
+    # 53-bit, 7 B container, 3 channels, 5 samples, 105 B data chunk + pad
+    filename = 'test-8000Hz-le-3ch-5S-53bit.wav'
+    rate, data = wavfile.read(datafile(filename), mmap=False)
+
+    assert_equal(rate, 8000)
+    assert_(np.issubdtype(data.dtype, np.int64))
+    assert_equal(data.shape, (5, 3))
+
+    # 11 LSBits should be 0
+    assert_equal(data & 0x7ff, 0)
+
+    # Hand-made max/min samples under different conventions:
+    #            Fixed-point 2**(N-1)    Full-scale 2**(N-1)-1    LSB
+    correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_f800, -0x1000],
+               [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_f800, -0x0800],
+               [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0000],
+               [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_f800, +0x0800],
+               [+0x7fff_ffff_ffff_f800, +0x7fff_ffff_ffff_f800, +0x1000]]
+    #              ^ clipped
+
+    assert_equal(data, correct)
+
+
+def test_64_bit_even_size():
+    # 64-bit, 8 B container, 3 channels, 5 samples, 120 B data chunk
+    for mmap in [False, True]:
+        filename = 'test-8000Hz-le-3ch-5S-64bit.wav'
+        rate, data = wavfile.read(datafile(filename), mmap=False)
+
+        assert_equal(rate, 8000)
+        assert_(np.issubdtype(data.dtype, np.int64))
+        assert_equal(data.shape, (5, 3))
+
+        # Hand-made max/min samples under different conventions:
+        #            Fixed-point 2**(N-1)    Full-scale 2**(N-1)-1   LSB
+        correct = [[-0x8000_0000_0000_0000, -0x7fff_ffff_ffff_ffff, -0x2],
+                   [-0x4000_0000_0000_0000, -0x3fff_ffff_ffff_ffff, -0x1],
+                   [+0x0000_0000_0000_0000, +0x0000_0000_0000_0000, +0x0],
+                   [+0x4000_0000_0000_0000, +0x3fff_ffff_ffff_ffff, +0x1],
+                   [+0x7fff_ffff_ffff_ffff, +0x7fff_ffff_ffff_ffff, +0x2]]
+        #              ^ clipped
+
+        assert_equal(data, correct)
+
+        del data
+
+
+def test_unsupported_mmap():
+    # Test containers that cannot be mapped to numpy types
+    for filename in {'test-8000Hz-le-3ch-5S-24bit.wav',
+                     'test-8000Hz-le-3ch-5S-36bit.wav',
+                     'test-8000Hz-le-3ch-5S-45bit.wav',
+                     'test-8000Hz-le-3ch-5S-53bit.wav',
+                     'test-8000Hz-le-1ch-10S-20bit-extra.wav'}:
+        with raises(ValueError, match="mmap.*not compatible"):
+            rate, data = wavfile.read(datafile(filename), mmap=True)
+
+
+def test_rifx():
+    # Compare equivalent RIFX and RIFF files
+    for rifx, riff in {('test-44100Hz-be-1ch-4bytes.wav',
+                        'test-44100Hz-le-1ch-4bytes.wav'),
+                       ('test-8000Hz-be-3ch-5S-24bit.wav',
+                        'test-8000Hz-le-3ch-5S-24bit.wav')}:
+        rate1, data1 = wavfile.read(datafile(rifx), mmap=False)
+        rate2, data2 = wavfile.read(datafile(riff), mmap=False)
+        assert_equal(rate1, rate2)
+        assert_equal(data1, data2)
+
+
+def test_read_unknown_filetype_fail():
+    # Not an RIFF
+    for mmap in [False, True]:
+        filename = 'example_1.nc'
+        with open(datafile(filename), 'rb') as fp:
+            with raises(ValueError, match="CDF.*'RIFF' and 'RIFX' supported"):
+                wavfile.read(fp, mmap=mmap)
+
+
+def test_read_unknown_riff_form_type():
+    # RIFF, but not WAVE form
+    for mmap in [False, True]:
+        filename = 'Transparent Busy.ani'
+        with open(datafile(filename), 'rb') as fp:
+            with raises(ValueError, match='Not a WAV file.*ACON'):
+                wavfile.read(fp, mmap=mmap)
+
+
+def test_read_unknown_wave_format():
+    # RIFF and WAVE, but not supported format
+    for mmap in [False, True]:
+        filename = 'test-8000Hz-le-1ch-1byte-ulaw.wav'
+        with open(datafile(filename), 'rb') as fp:
+            with raises(ValueError, match='Unknown wave file format.*MULAW.*'
+                        'Supported formats'):
+                wavfile.read(fp, mmap=mmap)
+
+
+def test_read_early_eof_with_data():
+    # File ends inside 'data' chunk, but we keep incomplete data
+    for mmap in [False, True]:
+        filename = 'test-44100Hz-le-1ch-4bytes-early-eof.wav'
+        with open(datafile(filename), 'rb') as fp:
+            with warns(wavfile.WavFileWarning, match='Reached EOF'):
+                rate, data = wavfile.read(fp, mmap=mmap)
+                assert data.size > 0
+                assert rate == 44100
+                # also test writing (gh-12176)
+                data[0] = 0
+
+
+def test_read_early_eof():
+    # File ends after 'fact' chunk at boundary, no data read
+    for mmap in [False, True]:
+        filename = 'test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav'
+        with open(datafile(filename), 'rb') as fp:
+            with raises(ValueError, match="Unexpected end of file."):
+                wavfile.read(fp, mmap=mmap)
+
+
+def test_read_incomplete_chunk():
+    # File ends inside 'fmt ' chunk ID, no data read
+    for mmap in [False, True]:
+        filename = 'test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav'
+        with open(datafile(filename), 'rb') as fp:
+            with raises(ValueError, match="Incomplete chunk ID.*b'f'"):
+                wavfile.read(fp, mmap=mmap)
+
+
+def test_read_inconsistent_header():
+    # File header's size fields contradict each other
+    for mmap in [False, True]:
+        filename = 'test-8000Hz-le-3ch-5S-24bit-inconsistent.wav'
+        with open(datafile(filename), 'rb') as fp:
+            with raises(ValueError, match="header is invalid"):
+                wavfile.read(fp, mmap=mmap)
+
+
+# signed 8-bit integer PCM is not allowed
+# unsigned > 8-bit integer PCM is not allowed
+# 8- or 16-bit float PCM is not expected
+# g and q are platform-dependent, so not included
+@pytest.mark.parametrize("dt_str", ["i2", ">i4", ">i8", ">f4", ">f8", '|u1'])
+@pytest.mark.parametrize("channels", [1, 2, 5])
+@pytest.mark.parametrize("rate", [8000, 32000])
+@pytest.mark.parametrize("mmap", [False, True])
+@pytest.mark.parametrize("realfile", [False, True])
+def test_write_roundtrip(realfile, mmap, rate, channels, dt_str, tmpdir):
+    dtype = np.dtype(dt_str)
+    if realfile:
+        tmpfile = str(tmpdir.join('temp.wav'))
+    else:
+        tmpfile = BytesIO()
+    data = np.random.rand(100, channels)
+    if channels == 1:
+        data = data[:, 0]
+    if dtype.kind == 'f':
+        # The range of the float type should be in [-1, 1]
+        data = data.astype(dtype)
+    else:
+        data = (data*128).astype(dtype)
+
+    wavfile.write(tmpfile, rate, data)
+
+    rate2, data2 = wavfile.read(tmpfile, mmap=mmap)
+
+    assert_equal(rate, rate2)
+    assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype)
+    assert_array_equal(data, data2)
+    # also test writing (gh-12176)
+    if realfile:
+        data2[0] = 0
+    else:
+        with pytest.raises(ValueError, match='read-only'):
+            data2[0] = 0
+
+    if realfile and mmap and IS_PYPY and sys.platform == 'win32':
+        # windows cannot remove a dead file held by a mmap but not collected
+        # in PyPy; since the filename gets reused in this test, clean this up
+        break_cycles()
+        break_cycles()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/io/wavfile.py b/__packaged__/coreml/.python_dependencies/scipy/io/wavfile.py
new file mode 100644
index 00000000..1ad04795
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/io/wavfile.py
@@ -0,0 +1,840 @@
+"""
+Module to read / write wav files using NumPy arrays
+
+Functions
+---------
+`read`: Return the sample rate (in samples/sec) and data from a WAV file.
+
+`write`: Write a NumPy array as a WAV file.
+
+"""
+import io
+import sys
+import numpy
+import struct
+import warnings
+from enum import IntEnum
+
+
+__all__ = [
+    'WavFileWarning',
+    'read',
+    'write'
+]
+
+
+class WavFileWarning(UserWarning):
+    pass
+
+
+class WAVE_FORMAT(IntEnum):
+    """
+    WAVE form wFormatTag IDs
+
+    Complete list is in mmreg.h in Windows 10 SDK.  ALAC and OPUS are the
+    newest additions, in v10.0.14393 2016-07
+    """
+    UNKNOWN = 0x0000
+    PCM = 0x0001
+    ADPCM = 0x0002
+    IEEE_FLOAT = 0x0003
+    VSELP = 0x0004
+    IBM_CVSD = 0x0005
+    ALAW = 0x0006
+    MULAW = 0x0007
+    DTS = 0x0008
+    DRM = 0x0009
+    WMAVOICE9 = 0x000A
+    WMAVOICE10 = 0x000B
+    OKI_ADPCM = 0x0010
+    DVI_ADPCM = 0x0011
+    IMA_ADPCM = 0x0011  # Duplicate
+    MEDIASPACE_ADPCM = 0x0012
+    SIERRA_ADPCM = 0x0013
+    G723_ADPCM = 0x0014
+    DIGISTD = 0x0015
+    DIGIFIX = 0x0016
+    DIALOGIC_OKI_ADPCM = 0x0017
+    MEDIAVISION_ADPCM = 0x0018
+    CU_CODEC = 0x0019
+    HP_DYN_VOICE = 0x001A
+    YAMAHA_ADPCM = 0x0020
+    SONARC = 0x0021
+    DSPGROUP_TRUESPEECH = 0x0022
+    ECHOSC1 = 0x0023
+    AUDIOFILE_AF36 = 0x0024
+    APTX = 0x0025
+    AUDIOFILE_AF10 = 0x0026
+    PROSODY_1612 = 0x0027
+    LRC = 0x0028
+    DOLBY_AC2 = 0x0030
+    GSM610 = 0x0031
+    MSNAUDIO = 0x0032
+    ANTEX_ADPCME = 0x0033
+    CONTROL_RES_VQLPC = 0x0034
+    DIGIREAL = 0x0035
+    DIGIADPCM = 0x0036
+    CONTROL_RES_CR10 = 0x0037
+    NMS_VBXADPCM = 0x0038
+    CS_IMAADPCM = 0x0039
+    ECHOSC3 = 0x003A
+    ROCKWELL_ADPCM = 0x003B
+    ROCKWELL_DIGITALK = 0x003C
+    XEBEC = 0x003D
+    G721_ADPCM = 0x0040
+    G728_CELP = 0x0041
+    MSG723 = 0x0042
+    INTEL_G723_1 = 0x0043
+    INTEL_G729 = 0x0044
+    SHARP_G726 = 0x0045
+    MPEG = 0x0050
+    RT24 = 0x0052
+    PAC = 0x0053
+    MPEGLAYER3 = 0x0055
+    LUCENT_G723 = 0x0059
+    CIRRUS = 0x0060
+    ESPCM = 0x0061
+    VOXWARE = 0x0062
+    CANOPUS_ATRAC = 0x0063
+    G726_ADPCM = 0x0064
+    G722_ADPCM = 0x0065
+    DSAT = 0x0066
+    DSAT_DISPLAY = 0x0067
+    VOXWARE_BYTE_ALIGNED = 0x0069
+    VOXWARE_AC8 = 0x0070
+    VOXWARE_AC10 = 0x0071
+    VOXWARE_AC16 = 0x0072
+    VOXWARE_AC20 = 0x0073
+    VOXWARE_RT24 = 0x0074
+    VOXWARE_RT29 = 0x0075
+    VOXWARE_RT29HW = 0x0076
+    VOXWARE_VR12 = 0x0077
+    VOXWARE_VR18 = 0x0078
+    VOXWARE_TQ40 = 0x0079
+    VOXWARE_SC3 = 0x007A
+    VOXWARE_SC3_1 = 0x007B
+    SOFTSOUND = 0x0080
+    VOXWARE_TQ60 = 0x0081
+    MSRT24 = 0x0082
+    G729A = 0x0083
+    MVI_MVI2 = 0x0084
+    DF_G726 = 0x0085
+    DF_GSM610 = 0x0086
+    ISIAUDIO = 0x0088
+    ONLIVE = 0x0089
+    MULTITUDE_FT_SX20 = 0x008A
+    INFOCOM_ITS_G721_ADPCM = 0x008B
+    CONVEDIA_G729 = 0x008C
+    CONGRUENCY = 0x008D
+    SBC24 = 0x0091
+    DOLBY_AC3_SPDIF = 0x0092
+    MEDIASONIC_G723 = 0x0093
+    PROSODY_8KBPS = 0x0094
+    ZYXEL_ADPCM = 0x0097
+    PHILIPS_LPCBB = 0x0098
+    PACKED = 0x0099
+    MALDEN_PHONYTALK = 0x00A0
+    RACAL_RECORDER_GSM = 0x00A1
+    RACAL_RECORDER_G720_A = 0x00A2
+    RACAL_RECORDER_G723_1 = 0x00A3
+    RACAL_RECORDER_TETRA_ACELP = 0x00A4
+    NEC_AAC = 0x00B0
+    RAW_AAC1 = 0x00FF
+    RHETOREX_ADPCM = 0x0100
+    IRAT = 0x0101
+    VIVO_G723 = 0x0111
+    VIVO_SIREN = 0x0112
+    PHILIPS_CELP = 0x0120
+    PHILIPS_GRUNDIG = 0x0121
+    DIGITAL_G723 = 0x0123
+    SANYO_LD_ADPCM = 0x0125
+    SIPROLAB_ACEPLNET = 0x0130
+    SIPROLAB_ACELP4800 = 0x0131
+    SIPROLAB_ACELP8V3 = 0x0132
+    SIPROLAB_G729 = 0x0133
+    SIPROLAB_G729A = 0x0134
+    SIPROLAB_KELVIN = 0x0135
+    VOICEAGE_AMR = 0x0136
+    G726ADPCM = 0x0140
+    DICTAPHONE_CELP68 = 0x0141
+    DICTAPHONE_CELP54 = 0x0142
+    QUALCOMM_PUREVOICE = 0x0150
+    QUALCOMM_HALFRATE = 0x0151
+    TUBGSM = 0x0155
+    MSAUDIO1 = 0x0160
+    WMAUDIO2 = 0x0161
+    WMAUDIO3 = 0x0162
+    WMAUDIO_LOSSLESS = 0x0163
+    WMASPDIF = 0x0164
+    UNISYS_NAP_ADPCM = 0x0170
+    UNISYS_NAP_ULAW = 0x0171
+    UNISYS_NAP_ALAW = 0x0172
+    UNISYS_NAP_16K = 0x0173
+    SYCOM_ACM_SYC008 = 0x0174
+    SYCOM_ACM_SYC701_G726L = 0x0175
+    SYCOM_ACM_SYC701_CELP54 = 0x0176
+    SYCOM_ACM_SYC701_CELP68 = 0x0177
+    KNOWLEDGE_ADVENTURE_ADPCM = 0x0178
+    FRAUNHOFER_IIS_MPEG2_AAC = 0x0180
+    DTS_DS = 0x0190
+    CREATIVE_ADPCM = 0x0200
+    CREATIVE_FASTSPEECH8 = 0x0202
+    CREATIVE_FASTSPEECH10 = 0x0203
+    UHER_ADPCM = 0x0210
+    ULEAD_DV_AUDIO = 0x0215
+    ULEAD_DV_AUDIO_1 = 0x0216
+    QUARTERDECK = 0x0220
+    ILINK_VC = 0x0230
+    RAW_SPORT = 0x0240
+    ESST_AC3 = 0x0241
+    GENERIC_PASSTHRU = 0x0249
+    IPI_HSX = 0x0250
+    IPI_RPELP = 0x0251
+    CS2 = 0x0260
+    SONY_SCX = 0x0270
+    SONY_SCY = 0x0271
+    SONY_ATRAC3 = 0x0272
+    SONY_SPC = 0x0273
+    TELUM_AUDIO = 0x0280
+    TELUM_IA_AUDIO = 0x0281
+    NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285
+    FM_TOWNS_SND = 0x0300
+    MICRONAS = 0x0350
+    MICRONAS_CELP833 = 0x0351
+    BTV_DIGITAL = 0x0400
+    INTEL_MUSIC_CODER = 0x0401
+    INDEO_AUDIO = 0x0402
+    QDESIGN_MUSIC = 0x0450
+    ON2_VP7_AUDIO = 0x0500
+    ON2_VP6_AUDIO = 0x0501
+    VME_VMPCM = 0x0680
+    TPC = 0x0681
+    LIGHTWAVE_LOSSLESS = 0x08AE
+    OLIGSM = 0x1000
+    OLIADPCM = 0x1001
+    OLICELP = 0x1002
+    OLISBC = 0x1003
+    OLIOPR = 0x1004
+    LH_CODEC = 0x1100
+    LH_CODEC_CELP = 0x1101
+    LH_CODEC_SBC8 = 0x1102
+    LH_CODEC_SBC12 = 0x1103
+    LH_CODEC_SBC16 = 0x1104
+    NORRIS = 0x1400
+    ISIAUDIO_2 = 0x1401
+    SOUNDSPACE_MUSICOMPRESS = 0x1500
+    MPEG_ADTS_AAC = 0x1600
+    MPEG_RAW_AAC = 0x1601
+    MPEG_LOAS = 0x1602
+    NOKIA_MPEG_ADTS_AAC = 0x1608
+    NOKIA_MPEG_RAW_AAC = 0x1609
+    VODAFONE_MPEG_ADTS_AAC = 0x160A
+    VODAFONE_MPEG_RAW_AAC = 0x160B
+    MPEG_HEAAC = 0x1610
+    VOXWARE_RT24_SPEECH = 0x181C
+    SONICFOUNDRY_LOSSLESS = 0x1971
+    INNINGS_TELECOM_ADPCM = 0x1979
+    LUCENT_SX8300P = 0x1C07
+    LUCENT_SX5363S = 0x1C0C
+    CUSEEME = 0x1F03
+    NTCSOFT_ALF2CM_ACM = 0x1FC4
+    DVM = 0x2000
+    DTS2 = 0x2001
+    MAKEAVIS = 0x3313
+    DIVIO_MPEG4_AAC = 0x4143
+    NOKIA_ADAPTIVE_MULTIRATE = 0x4201
+    DIVIO_G726 = 0x4243
+    LEAD_SPEECH = 0x434C
+    LEAD_VORBIS = 0x564C
+    WAVPACK_AUDIO = 0x5756
+    OGG_VORBIS_MODE_1 = 0x674F
+    OGG_VORBIS_MODE_2 = 0x6750
+    OGG_VORBIS_MODE_3 = 0x6751
+    OGG_VORBIS_MODE_1_PLUS = 0x676F
+    OGG_VORBIS_MODE_2_PLUS = 0x6770
+    OGG_VORBIS_MODE_3_PLUS = 0x6771
+    ALAC = 0x6C61
+    _3COM_NBX = 0x7000  # Can't have leading digit
+    OPUS = 0x704F
+    FAAD_AAC = 0x706D
+    AMR_NB = 0x7361
+    AMR_WB = 0x7362
+    AMR_WP = 0x7363
+    GSM_AMR_CBR = 0x7A21
+    GSM_AMR_VBR_SID = 0x7A22
+    COMVERSE_INFOSYS_G723_1 = 0xA100
+    COMVERSE_INFOSYS_AVQSBC = 0xA101
+    COMVERSE_INFOSYS_SBC = 0xA102
+    SYMBOL_G729_A = 0xA103
+    VOICEAGE_AMR_WB = 0xA104
+    INGENIENT_G726 = 0xA105
+    MPEG4_AAC = 0xA106
+    ENCORE_G726 = 0xA107
+    ZOLL_ASAO = 0xA108
+    SPEEX_VOICE = 0xA109
+    VIANIX_MASC = 0xA10A
+    WM9_SPECTRUM_ANALYZER = 0xA10B
+    WMF_SPECTRUM_ANAYZER = 0xA10C
+    GSM_610 = 0xA10D
+    GSM_620 = 0xA10E
+    GSM_660 = 0xA10F
+    GSM_690 = 0xA110
+    GSM_ADAPTIVE_MULTIRATE_WB = 0xA111
+    POLYCOM_G722 = 0xA112
+    POLYCOM_G728 = 0xA113
+    POLYCOM_G729_A = 0xA114
+    POLYCOM_SIREN = 0xA115
+    GLOBAL_IP_ILBC = 0xA116
+    RADIOTIME_TIME_SHIFT_RADIO = 0xA117
+    NICE_ACA = 0xA118
+    NICE_ADPCM = 0xA119
+    VOCORD_G721 = 0xA11A
+    VOCORD_G726 = 0xA11B
+    VOCORD_G722_1 = 0xA11C
+    VOCORD_G728 = 0xA11D
+    VOCORD_G729 = 0xA11E
+    VOCORD_G729_A = 0xA11F
+    VOCORD_G723_1 = 0xA120
+    VOCORD_LBC = 0xA121
+    NICE_G728 = 0xA122
+    FRACE_TELECOM_G729 = 0xA123
+    CODIAN = 0xA124
+    FLAC = 0xF1AC
+    EXTENSIBLE = 0xFFFE
+    DEVELOPMENT = 0xFFFF
+
+
+KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT}
+
+
+def _raise_bad_format(format_tag):
+    try:
+        format_name = WAVE_FORMAT(format_tag).name
+    except ValueError:
+        format_name = f'{format_tag:#06x}'
+    raise ValueError(f"Unknown wave file format: {format_name}. Supported "
+                     "formats: " +
+                     ', '.join(x.name for x in KNOWN_WAVE_FORMATS))
+
+
+def _read_fmt_chunk(fid, is_big_endian):
+    """
+    Returns
+    -------
+    size : int
+        size of format subchunk in bytes (minus 8 for "fmt " and itself)
+    format_tag : int
+        PCM, float, or compressed format
+    channels : int
+        number of channels
+    fs : int
+        sampling frequency in samples per second
+    bytes_per_second : int
+        overall byte rate for the file
+    block_align : int
+        bytes per sample, including all channels
+    bit_depth : int
+        bits per sample
+
+    Notes
+    -----
+    Assumes file pointer is immediately after the 'fmt ' id
+    """
+    if is_big_endian:
+        fmt = '>'
+    else:
+        fmt = '<'
+
+    size = struct.unpack(fmt+'I', fid.read(4))[0]
+
+    if size < 16:
+        raise ValueError("Binary structure of wave file is not compliant")
+
+    res = struct.unpack(fmt+'HHIIHH', fid.read(16))
+    bytes_read = 16
+
+    format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
+
+    if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2):
+        ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
+        bytes_read += 2
+        if ext_chunk_size >= 22:
+            extensible_chunk_data = fid.read(22)
+            bytes_read += 22
+            raw_guid = extensible_chunk_data[2+4:2+4+16]
+            # GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
+            # MS GUID byte order: first three groups are native byte order,
+            # rest is Big Endian
+            if is_big_endian:
+                tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
+            else:
+                tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
+            if raw_guid.endswith(tail):
+                format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
+        else:
+            raise ValueError("Binary structure of wave file is not compliant")
+
+    if format_tag not in KNOWN_WAVE_FORMATS:
+        _raise_bad_format(format_tag)
+
+    # move file pointer to next chunk
+    if size > bytes_read:
+        fid.read(size - bytes_read)
+
+    # fmt should always be 16, 18 or 40, but handle it just in case
+    _handle_pad_byte(fid, size)
+
+    if format_tag == WAVE_FORMAT.PCM:
+        if bytes_per_second != fs * block_align:
+            raise ValueError("WAV header is invalid: nAvgBytesPerSec must"
+                             " equal product of nSamplesPerSec and"
+                             " nBlockAlign, but file has nSamplesPerSec ="
+                             f" {fs}, nBlockAlign = {block_align}, and"
+                             f" nAvgBytesPerSec = {bytes_per_second}")
+
+    return (size, format_tag, channels, fs, bytes_per_second, block_align,
+            bit_depth)
+
+
+def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
+                     block_align, mmap=False):
+    """
+    Notes
+    -----
+    Assumes file pointer is immediately after the 'data' id
+
+    It's possible to not use all available bits in a container, or to store
+    samples in a container bigger than necessary, so bytes_per_sample uses
+    the actual reported container size (nBlockAlign / nChannels).  Real-world
+    examples:
+
+    Adobe Audition's "24-bit packed int (type 1, 20-bit)"
+
+        nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20
+
+    http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav
+    is:
+
+        nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12
+
+    http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf
+    gives an example of:
+
+        nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20
+    """
+    if is_big_endian:
+        fmt = '>'
+    else:
+        fmt = '<'
+
+    # Size of the data subchunk in bytes
+    size = struct.unpack(fmt+'I', fid.read(4))[0]
+
+    # Number of bytes per sample (sample container size)
+    bytes_per_sample = block_align // channels
+    n_samples = size // bytes_per_sample
+
+    if format_tag == WAVE_FORMAT.PCM:
+        if 1 <= bit_depth <= 8:
+            dtype = 'u1'  # WAV of 8-bit integer or less are unsigned
+        elif bytes_per_sample in {3, 5, 6, 7}:
+            # No compatible dtype.  Load as raw bytes for reshaping later.
+            dtype = 'V1'
+        elif bit_depth <= 64:
+            # Remaining bit depths can map directly to signed numpy dtypes
+            dtype = f'{fmt}i{bytes_per_sample}'
+        else:
+            raise ValueError("Unsupported bit depth: the WAV file "
+                             f"has {bit_depth}-bit integer data.")
+    elif format_tag == WAVE_FORMAT.IEEE_FLOAT:
+        if bit_depth in {32, 64}:
+            dtype = f'{fmt}f{bytes_per_sample}'
+        else:
+            raise ValueError("Unsupported bit depth: the WAV file "
+                             f"has {bit_depth}-bit floating-point data.")
+    else:
+        _raise_bad_format(format_tag)
+
+    start = fid.tell()
+    if not mmap:
+        try:
+            count = size if dtype == 'V1' else n_samples
+            data = numpy.fromfile(fid, dtype=dtype, count=count)
+        except io.UnsupportedOperation:  # not a C-like file
+            fid.seek(start, 0)  # just in case it seeked, though it shouldn't
+            data = numpy.frombuffer(fid.read(size), dtype=dtype)
+
+        if dtype == 'V1':
+            # Rearrange raw bytes into smallest compatible numpy dtype
+            dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8'
+            a = numpy.zeros((len(data) // bytes_per_sample, numpy.dtype(dt).itemsize),
+                            dtype='V1')
+            if is_big_endian:
+                a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample))
+            else:
+                a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample))
+            data = a.view(dt).reshape(a.shape[:-1])
+    else:
+        if bytes_per_sample in {1, 2, 4, 8}:
+            start = fid.tell()
+            data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
+                                shape=(n_samples,))
+            fid.seek(start + size)
+        else:
+            raise ValueError("mmap=True not compatible with "
+                             f"{bytes_per_sample}-byte container size.")
+
+    _handle_pad_byte(fid, size)
+
+    if channels > 1:
+        data = data.reshape(-1, channels)
+    return data
+
+
+def _skip_unknown_chunk(fid, is_big_endian):
+    if is_big_endian:
+        fmt = '>I'
+    else:
+        fmt = '>> from os.path import dirname, join as pjoin
+    >>> from scipy.io import wavfile
+    >>> import scipy.io
+
+    Get the filename for an example .wav file from the tests/data directory.
+
+    >>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
+    >>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
+
+    Load the .wav file contents.
+
+    >>> samplerate, data = wavfile.read(wav_fname)
+    >>> print(f"number of channels = {data.shape[1]}")
+    number of channels = 2
+    >>> length = data.shape[0] / samplerate
+    >>> print(f"length = {length}s")
+    length = 0.01s
+
+    Plot the waveform.
+
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+    >>> time = np.linspace(0., length, data.shape[0])
+    >>> plt.plot(time, data[:, 0], label="Left channel")
+    >>> plt.plot(time, data[:, 1], label="Right channel")
+    >>> plt.legend()
+    >>> plt.xlabel("Time [s]")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.show()
+
+    """
+    if hasattr(filename, 'read'):
+        fid = filename
+        mmap = False
+    else:
+        fid = open(filename, 'rb')
+
+    try:
+        file_size, is_big_endian = _read_riff_chunk(fid)
+        fmt_chunk_received = False
+        data_chunk_received = False
+        while fid.tell() < file_size:
+            # read the next chunk
+            chunk_id = fid.read(4)
+
+            if not chunk_id:
+                if data_chunk_received:
+                    # End of file but data successfully read
+                    warnings.warn(
+                        "Reached EOF prematurely; finished at {:d} bytes, "
+                        "expected {:d} bytes from header."
+                        .format(fid.tell(), file_size),
+                        WavFileWarning, stacklevel=2)
+                    break
+                else:
+                    raise ValueError("Unexpected end of file.")
+            elif len(chunk_id) < 4:
+                msg = f"Incomplete chunk ID: {repr(chunk_id)}"
+                # If we have the data, ignore the broken chunk
+                if fmt_chunk_received and data_chunk_received:
+                    warnings.warn(msg + ", ignoring it.", WavFileWarning,
+                                  stacklevel=2)
+                else:
+                    raise ValueError(msg)
+
+            if chunk_id == b'fmt ':
+                fmt_chunk_received = True
+                fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
+                format_tag, channels, fs = fmt_chunk[1:4]
+                bit_depth = fmt_chunk[6]
+                block_align = fmt_chunk[5]
+            elif chunk_id == b'fact':
+                _skip_unknown_chunk(fid, is_big_endian)
+            elif chunk_id == b'data':
+                data_chunk_received = True
+                if not fmt_chunk_received:
+                    raise ValueError("No fmt chunk before data")
+                data = _read_data_chunk(fid, format_tag, channels, bit_depth,
+                                        is_big_endian, block_align, mmap)
+            elif chunk_id == b'LIST':
+                # Someday this could be handled properly but for now skip it
+                _skip_unknown_chunk(fid, is_big_endian)
+            elif chunk_id in {b'JUNK', b'Fake'}:
+                # Skip alignment chunks without warning
+                _skip_unknown_chunk(fid, is_big_endian)
+            else:
+                warnings.warn("Chunk (non-data) not understood, skipping it.",
+                              WavFileWarning, stacklevel=2)
+                _skip_unknown_chunk(fid, is_big_endian)
+    finally:
+        if not hasattr(filename, 'read'):
+            fid.close()
+        else:
+            fid.seek(0)
+
+    return fs, data
+
+
+def write(filename, rate, data):
+    """
+    Write a NumPy array as a WAV file.
+
+    Parameters
+    ----------
+    filename : string or open file handle
+        Output wav file.
+    rate : int
+        The sample rate (in samples/sec).
+    data : ndarray
+        A 1-D or 2-D NumPy array of either integer or float data-type.
+
+    Notes
+    -----
+    * Writes a simple uncompressed WAV file.
+    * To write multiple-channels, use a 2-D array of shape
+      (Nsamples, Nchannels).
+    * The bits-per-sample and PCM/float will be determined by the data-type.
+
+    Common data types: [1]_
+
+    =====================  ===========  ===========  =============
+         WAV format            Min          Max       NumPy dtype
+    =====================  ===========  ===========  =============
+    32-bit floating-point  -1.0         +1.0         float32
+    32-bit PCM             -2147483648  +2147483647  int32
+    16-bit PCM             -32768       +32767       int16
+    8-bit PCM              0            255          uint8
+    =====================  ===========  ===========  =============
+
+    Note that 8-bit PCM is unsigned.
+
+    References
+    ----------
+    .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
+       Interface and Data Specifications 1.0", section "Data Format of the
+       Samples", August 1991
+       http://www.tactilemedia.com/info/MCI_Control_Info.html
+
+    Examples
+    --------
+    Create a 100Hz sine wave, sampled at 44100Hz.
+    Write to 16-bit PCM, Mono.
+
+    >>> from scipy.io.wavfile import write
+    >>> import numpy as np
+    >>> samplerate = 44100; fs = 100
+    >>> t = np.linspace(0., 1., samplerate)
+    >>> amplitude = np.iinfo(np.int16).max
+    >>> data = amplitude * np.sin(2. * np.pi * fs * t)
+    >>> write("example.wav", samplerate, data.astype(np.int16))
+
+    """
+    if hasattr(filename, 'write'):
+        fid = filename
+    else:
+        fid = open(filename, 'wb')
+
+    fs = rate
+
+    try:
+        dkind = data.dtype.kind
+        if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
+                                                 data.dtype.itemsize == 1)):
+            raise ValueError("Unsupported data type '%s'" % data.dtype)
+
+        header_data = b''
+
+        header_data += b'RIFF'
+        header_data += b'\x00\x00\x00\x00'
+        header_data += b'WAVE'
+
+        # fmt chunk
+        header_data += b'fmt '
+        if dkind == 'f':
+            format_tag = WAVE_FORMAT.IEEE_FLOAT
+        else:
+            format_tag = WAVE_FORMAT.PCM
+        if data.ndim == 1:
+            channels = 1
+        else:
+            channels = data.shape[1]
+        bit_depth = data.dtype.itemsize * 8
+        bytes_per_second = fs*(bit_depth // 8)*channels
+        block_align = channels * (bit_depth // 8)
+
+        fmt_chunk_data = struct.pack(' 0xFFFFFFFF:
+            raise ValueError("Data exceeds wave file size limit")
+
+        fid.write(header_data)
+
+        # data chunk
+        fid.write(b'data')
+        fid.write(struct.pack('' or (data.dtype.byteorder == '=' and
+                                           sys.byteorder == 'big'):
+            data = data.byteswap()
+        _array_tofile(fid, data)
+
+        # Determine file size and place it in correct
+        #  position at start of the file.
+        size = fid.tell()
+        fid.seek(4)
+        fid.write(struct.pack('`__
+   for more linear algebra functions. Note that
+   although `scipy.linalg` imports most of them, identically named
+   functions from `scipy.linalg` may offer more or slightly differing
+   functionality.
+
+
+Basics
+======
+
+.. autosummary::
+   :toctree: generated/
+
+   inv - Find the inverse of a square matrix
+   solve - Solve a linear system of equations
+   solve_banded - Solve a banded linear system
+   solveh_banded - Solve a Hermitian or symmetric banded system
+   solve_circulant - Solve a circulant system
+   solve_triangular - Solve a triangular matrix
+   solve_toeplitz - Solve a toeplitz matrix
+   matmul_toeplitz - Multiply a Toeplitz matrix with an array.
+   det - Find the determinant of a square matrix
+   norm - Matrix and vector norm
+   lstsq - Solve a linear least-squares problem
+   pinv - Pseudo-inverse (Moore-Penrose) using lstsq
+   pinvh - Pseudo-inverse of hermitian matrix
+   kron - Kronecker product of two arrays
+   khatri_rao - Khatri-Rao product of two arrays
+   tril - Construct a lower-triangular matrix from a given matrix
+   triu - Construct an upper-triangular matrix from a given matrix
+   orthogonal_procrustes - Solve an orthogonal Procrustes problem
+   matrix_balance - Balance matrix entries with a similarity transformation
+   subspace_angles - Compute the subspace angles between two matrices
+   bandwidth - Return the lower and upper bandwidth of an array
+   issymmetric - Check if a square 2D array is symmetric
+   ishermitian - Check if a square 2D array is Hermitian
+   LinAlgError
+   LinAlgWarning
+
+Eigenvalue Problems
+===================
+
+.. autosummary::
+   :toctree: generated/
+
+   eig - Find the eigenvalues and eigenvectors of a square matrix
+   eigvals - Find just the eigenvalues of a square matrix
+   eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
+   eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
+   eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
+   eigvals_banded - Find just the eigenvalues of a banded matrix
+   eigh_tridiagonal - Find the eigenvalues and eigenvectors of a tridiagonal matrix
+   eigvalsh_tridiagonal - Find just the eigenvalues of a tridiagonal matrix
+
+Decompositions
+==============
+
+.. autosummary::
+   :toctree: generated/
+
+   lu - LU decomposition of a matrix
+   lu_factor - LU decomposition returning unordered matrix and pivots
+   lu_solve - Solve Ax=b using back substitution with output of lu_factor
+   svd - Singular value decomposition of a matrix
+   svdvals - Singular values of a matrix
+   diagsvd - Construct matrix of singular values from output of svd
+   orth - Construct orthonormal basis for the range of A using svd
+   null_space - Construct orthonormal basis for the null space of A using svd
+   ldl - LDL.T decomposition of a Hermitian or a symmetric matrix.
+   cholesky - Cholesky decomposition of a matrix
+   cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
+   cho_factor - Cholesky decomposition for use in solving a linear system
+   cho_solve - Solve previously factored linear system
+   cho_solve_banded - Solve previously factored banded linear system
+   polar - Compute the polar decomposition.
+   qr - QR decomposition of a matrix
+   qr_multiply - QR decomposition and multiplication by Q
+   qr_update - Rank k QR update
+   qr_delete - QR downdate on row or column deletion
+   qr_insert - QR update on row or column insertion
+   rq - RQ decomposition of a matrix
+   qz - QZ decomposition of a pair of matrices
+   ordqz - QZ decomposition of a pair of matrices with reordering
+   schur - Schur decomposition of a matrix
+   rsf2csf - Real to complex Schur form
+   hessenberg - Hessenberg form of a matrix
+   cdf2rdf - Complex diagonal form to real diagonal block form
+   cossin - Cosine sine decomposition of a unitary or orthogonal matrix
+
+.. seealso::
+
+   `scipy.linalg.interpolative` -- Interpolative matrix decompositions
+
+
+Matrix Functions
+================
+
+.. autosummary::
+   :toctree: generated/
+
+   expm - Matrix exponential
+   logm - Matrix logarithm
+   cosm - Matrix cosine
+   sinm - Matrix sine
+   tanm - Matrix tangent
+   coshm - Matrix hyperbolic cosine
+   sinhm - Matrix hyperbolic sine
+   tanhm - Matrix hyperbolic tangent
+   signm - Matrix sign
+   sqrtm - Matrix square root
+   funm - Evaluating an arbitrary matrix function
+   expm_frechet - Frechet derivative of the matrix exponential
+   expm_cond - Relative condition number of expm in the Frobenius norm
+   fractional_matrix_power - Fractional matrix power
+
+
+Matrix Equation Solvers
+=======================
+
+.. autosummary::
+   :toctree: generated/
+
+   solve_sylvester - Solve the Sylvester matrix equation
+   solve_continuous_are - Solve the continuous-time algebraic Riccati equation
+   solve_discrete_are - Solve the discrete-time algebraic Riccati equation
+   solve_continuous_lyapunov - Solve the continuous-time Lyapunov equation
+   solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
+
+
+Sketches and Random Projections
+===============================
+
+.. autosummary::
+   :toctree: generated/
+
+   clarkson_woodruff_transform - Applies the Clarkson Woodruff Sketch (a.k.a CountMin Sketch)
+
+Special Matrices
+================
+
+.. autosummary::
+   :toctree: generated/
+
+   block_diag - Construct a block diagonal matrix from submatrices
+   circulant - Circulant matrix
+   companion - Companion matrix
+   convolution_matrix - Convolution matrix
+   dft - Discrete Fourier transform matrix
+   fiedler - Fiedler matrix
+   fiedler_companion - Fiedler companion matrix
+   hadamard - Hadamard matrix of order 2**n
+   hankel - Hankel matrix
+   helmert - Helmert matrix
+   hilbert - Hilbert matrix
+   invhilbert - Inverse Hilbert matrix
+   leslie - Leslie matrix
+   pascal - Pascal matrix
+   invpascal - Inverse Pascal matrix
+   toeplitz - Toeplitz matrix
+   tri - Construct a matrix filled with ones at and below a given diagonal
+
+Low-level routines
+==================
+
+.. autosummary::
+   :toctree: generated/
+
+   get_blas_funcs
+   get_lapack_funcs
+   find_best_blas_type
+
+.. seealso::
+
+   `scipy.linalg.blas` -- Low-level BLAS functions
+
+   `scipy.linalg.lapack` -- Low-level LAPACK functions
+
+   `scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
+
+   `scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
+
+"""  # noqa: E501
+
+from ._misc import *
+from ._cythonized_array_utils import *
+from ._basic import *
+from ._decomp import *
+from ._decomp_lu import *
+from ._decomp_ldl import *
+from ._decomp_cholesky import *
+from ._decomp_qr import *
+from ._decomp_qz import *
+from ._decomp_svd import *
+from ._decomp_schur import *
+from ._decomp_polar import *
+from ._matfuncs import *
+from .blas import *
+from .lapack import *
+from ._special_matrices import *
+from ._solvers import *
+from ._procrustes import *
+from ._decomp_update import *
+from ._sketches import *
+from ._decomp_cossin import *
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import (
+    decomp, decomp_cholesky, decomp_lu, decomp_qr, decomp_svd, decomp_schur,
+    basic, misc, special_matrices, flinalg, matfuncs
+)
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_basic.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_basic.py
new file mode 100644
index 00000000..427b94d2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_basic.py
@@ -0,0 +1,1815 @@
+#
+# Author: Pearu Peterson, March 2002
+#
+# w/ additions by Travis Oliphant, March 2002
+#              and Jake Vanderplas, August 2012
+
+from warnings import warn
+import numpy as np
+from numpy import atleast_1d, atleast_2d
+from ._flinalg_py import get_flinalg_funcs
+from .lapack import get_lapack_funcs, _compute_lwork
+from ._misc import LinAlgError, _datacopied, LinAlgWarning
+from ._decomp import _asarray_validated
+from . import _decomp, _decomp_svd
+from ._solve_toeplitz import levinson
+
+__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
+           'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
+           'pinv', 'pinvh', 'matrix_balance', 'matmul_toeplitz']
+
+
+# Linear equations
+def _solve_check(n, info, lamch=None, rcond=None):
+    """ Check arguments during the different steps of the solution phase """
+    if info < 0:
+        raise ValueError('LAPACK reported an illegal value in {}-th argument'
+                         '.'.format(-info))
+    elif 0 < info:
+        raise LinAlgError('Matrix is singular.')
+
+    if lamch is None:
+        return
+    E = lamch('E')
+    if rcond < E:
+        warn('Ill-conditioned matrix (rcond={:.6g}): '
+             'result may not be accurate.'.format(rcond),
+             LinAlgWarning, stacklevel=3)
+
+
+def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
+          overwrite_b=False, check_finite=True, assume_a='gen',
+          transposed=False):
+    """
+    Solves the linear equation set ``a @ x == b`` for the unknown ``x``
+    for square `a` matrix.
+
+    If the data matrix is known to be a particular type then supplying the
+    corresponding string to ``assume_a`` key chooses the dedicated solver.
+    The available options are
+
+    ===================  ========
+     generic matrix       'gen'
+     symmetric            'sym'
+     hermitian            'her'
+     positive definite    'pos'
+    ===================  ========
+
+    If omitted, ``'gen'`` is the default structure.
+
+    The datatype of the arrays define which solver is called regardless
+    of the values. In other words, even when the complex array entries have
+    precisely zero imaginary parts, the complex solver will be called based
+    on the data type of the array.
+
+    Parameters
+    ----------
+    a : (N, N) array_like
+        Square input data
+    b : (N, NRHS) array_like
+        Input data for the right hand side.
+    sym_pos : bool, default: False, deprecated
+        Assume `a` is symmetric and positive definite.
+
+        .. deprecated:: 0.19.0
+            This keyword is deprecated and should be replaced by using
+           ``assume_a = 'pos'``. `sym_pos` will be removed in SciPy 1.11.0.
+
+    lower : bool, default: False
+        Ignored if ``assume_a == 'gen'`` (the default). If True, the
+        calculation uses only the data in the lower triangle of `a`;
+        entries above the diagonal are ignored. If False (default), the
+        calculation uses only the data in the upper triangle of `a`; entries
+        below the diagonal are ignored.
+    overwrite_a : bool, default: False
+        Allow overwriting data in `a` (may enhance performance).
+    overwrite_b : bool, default: False
+        Allow overwriting data in `b` (may enhance performance).
+    check_finite : bool, default: True
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    assume_a : str, {'gen', 'sym', 'her', 'pos'}
+        Valid entries are explained above.
+    transposed : bool, default: False
+        If True, solve ``a.T @ x == b``. Raises `NotImplementedError`
+        for complex `a`.
+
+    Returns
+    -------
+    x : (N, NRHS) ndarray
+        The solution array.
+
+    Raises
+    ------
+    ValueError
+        If size mismatches detected or input a is not square.
+    LinAlgError
+        If the matrix is singular.
+    LinAlgWarning
+        If an ill-conditioned input a is detected.
+    NotImplementedError
+        If transposed is True and input a is a complex matrix.
+
+    Notes
+    -----
+    If the input b matrix is a 1-D array with N elements, when supplied
+    together with an NxN input a, it is assumed as a valid column vector
+    despite the apparent size mismatch. This is compatible with the
+    numpy.dot() behavior and the returned result is still 1-D array.
+
+    The generic, symmetric, Hermitian and positive definite solutions are
+    obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
+    LAPACK respectively.
+
+    Examples
+    --------
+    Given `a` and `b`, solve for `x`:
+
+    >>> import numpy as np
+    >>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
+    >>> b = np.array([2, 4, -1])
+    >>> from scipy import linalg
+    >>> x = linalg.solve(a, b)
+    >>> x
+    array([ 2., -2.,  9.])
+    >>> np.dot(a, x) == b
+    array([ True,  True,  True], dtype=bool)
+
+    """
+    # Flags for 1-D or N-D right-hand side
+    b_is_1D = False
+
+    a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
+    b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
+    n = a1.shape[0]
+
+    overwrite_a = overwrite_a or _datacopied(a1, a)
+    overwrite_b = overwrite_b or _datacopied(b1, b)
+
+    if a1.shape[0] != a1.shape[1]:
+        raise ValueError('Input a needs to be a square matrix.')
+
+    if n != b1.shape[0]:
+        # Last chance to catch 1x1 scalar a and 1-D b arrays
+        if not (n == 1 and b1.size != 0):
+            raise ValueError('Input b has to have same number of rows as '
+                             'input a')
+
+    # accommodate empty arrays
+    if b1.size == 0:
+        return np.asfortranarray(b1.copy())
+
+    # regularize 1-D b arrays to 2D
+    if b1.ndim == 1:
+        if n == 1:
+            b1 = b1[None, :]
+        else:
+            b1 = b1[:, None]
+        b_is_1D = True
+
+    # Backwards compatibility - old keyword.
+    if sym_pos:
+        message = ("The 'sym_pos' keyword is deprecated and should be "
+                   "replaced by using 'assume_a = \"pos\"'. 'sym_pos' will be"
+                   " removed in SciPy 1.11.0.")
+        warn(message, DeprecationWarning, stacklevel=2)
+        assume_a = 'pos'
+
+    if assume_a not in ('gen', 'sym', 'her', 'pos'):
+        raise ValueError('{} is not a recognized matrix structure'
+                         ''.format(assume_a))
+
+    # for a real matrix, describe it as "symmetric", not "hermitian"
+    # (lapack doesn't know what to do with real hermitian matrices)
+    if assume_a == 'her' and not np.iscomplexobj(a1):
+        assume_a = 'sym'
+
+    # Get the correct lamch function.
+    # The LAMCH functions only exists for S and D
+    # So for complex values we have to convert to real/double.
+    if a1.dtype.char in 'fF':  # single precision
+        lamch = get_lapack_funcs('lamch', dtype='f')
+    else:
+        lamch = get_lapack_funcs('lamch', dtype='d')
+
+    # Currently we do not have the other forms of the norm calculators
+    #   lansy, lanpo, lanhe.
+    # However, in any case they only reduce computations slightly...
+    lange = get_lapack_funcs('lange', (a1,))
+
+    # Since the I-norm and 1-norm are the same for symmetric matrices
+    # we can collect them all in this one call
+    # Note however, that when issuing 'gen' and form!='none', then
+    # the I-norm should be used
+    if transposed:
+        trans = 1
+        norm = 'I'
+        if np.iscomplexobj(a1):
+            raise NotImplementedError('scipy.linalg.solve can currently '
+                                      'not solve a^T x = b or a^H x = b '
+                                      'for complex matrices.')
+    else:
+        trans = 0
+        norm = '1'
+
+    anorm = lange(norm, a1)
+
+    # Generalized case 'gesv'
+    if assume_a == 'gen':
+        gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
+                                               (a1, b1))
+        lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
+        _solve_check(n, info)
+        x, info = getrs(lu, ipvt, b1,
+                        trans=trans, overwrite_b=overwrite_b)
+        _solve_check(n, info)
+        rcond, info = gecon(lu, anorm, norm=norm)
+    # Hermitian case 'hesv'
+    elif assume_a == 'her':
+        hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
+                                                 'hesv_lwork'), (a1, b1))
+        lwork = _compute_lwork(hesv_lw, n, lower)
+        lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
+                                 lower=lower,
+                                 overwrite_a=overwrite_a,
+                                 overwrite_b=overwrite_b)
+        _solve_check(n, info)
+        rcond, info = hecon(lu, ipvt, anorm)
+    # Symmetric case 'sysv'
+    elif assume_a == 'sym':
+        sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
+                                                 'sysv_lwork'), (a1, b1))
+        lwork = _compute_lwork(sysv_lw, n, lower)
+        lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
+                                 lower=lower,
+                                 overwrite_a=overwrite_a,
+                                 overwrite_b=overwrite_b)
+        _solve_check(n, info)
+        rcond, info = sycon(lu, ipvt, anorm)
+    # Positive definite case 'posv'
+    else:
+        pocon, posv = get_lapack_funcs(('pocon', 'posv'),
+                                       (a1, b1))
+        lu, x, info = posv(a1, b1, lower=lower,
+                           overwrite_a=overwrite_a,
+                           overwrite_b=overwrite_b)
+        _solve_check(n, info)
+        rcond, info = pocon(lu, anorm)
+
+    _solve_check(n, info, lamch, rcond)
+
+    if b_is_1D:
+        x = x.ravel()
+
+    return x
+
+
+def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
+                     overwrite_b=False, check_finite=True):
+    """
+    Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        A triangular matrix
+    b : (M,) or (M, N) array_like
+        Right-hand side matrix in `a x = b`
+    lower : bool, optional
+        Use only data contained in the lower triangle of `a`.
+        Default is to use upper triangle.
+    trans : {0, 1, 2, 'N', 'T', 'C'}, optional
+        Type of system to solve:
+
+        ========  =========
+        trans     system
+        ========  =========
+        0 or 'N'  a x  = b
+        1 or 'T'  a^T x = b
+        2 or 'C'  a^H x = b
+        ========  =========
+    unit_diagonal : bool, optional
+        If True, diagonal elements of `a` are assumed to be 1 and
+        will not be referenced.
+    overwrite_b : bool, optional
+        Allow overwriting data in `b` (may enhance performance)
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : (M,) or (M, N) ndarray
+        Solution to the system `a x = b`.  Shape of return matches `b`.
+
+    Raises
+    ------
+    LinAlgError
+        If `a` is singular
+
+    Notes
+    -----
+    .. versionadded:: 0.9.0
+
+    Examples
+    --------
+    Solve the lower triangular system a x = b, where::
+
+             [3  0  0  0]       [4]
+        a =  [2  1  0  0]   b = [2]
+             [1  0  1  0]       [4]
+             [1  1  1  1]       [2]
+
+    >>> import numpy as np
+    >>> from scipy.linalg import solve_triangular
+    >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
+    >>> b = np.array([4, 2, 4, 2])
+    >>> x = solve_triangular(a, b, lower=True)
+    >>> x
+    array([ 1.33333333, -0.66666667,  2.66666667, -1.33333333])
+    >>> a.dot(x)  # Check the result
+    array([ 4.,  2.,  4.,  2.])
+
+    """
+
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    b1 = _asarray_validated(b, check_finite=check_finite)
+    if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+        raise ValueError('expected square matrix')
+    if a1.shape[0] != b1.shape[0]:
+        raise ValueError('shapes of a {} and b {} are incompatible'
+                         .format(a1.shape, b1.shape))
+    overwrite_b = overwrite_b or _datacopied(b1, b)
+
+    trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
+    trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
+    if a1.flags.f_contiguous or trans == 2:
+        x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
+                        trans=trans, unitdiag=unit_diagonal)
+    else:
+        # transposed system is solved since trtrs expects Fortran ordering
+        x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
+                        trans=not trans, unitdiag=unit_diagonal)
+
+    if info == 0:
+        return x
+    if info > 0:
+        raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
+                          (info-1))
+    raise ValueError('illegal value in %dth argument of internal trtrs' %
+                     (-info))
+
+
+def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
+                 check_finite=True):
+    """
+    Solve the equation a x = b for x, assuming a is banded matrix.
+
+    The matrix a is stored in `ab` using the matrix diagonal ordered form::
+
+        ab[u + i - j, j] == a[i,j]
+
+    Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
+
+        *    a01  a12  a23  a34  a45
+        a00  a11  a22  a33  a44  a55
+        a10  a21  a32  a43  a54   *
+        a20  a31  a42  a53   *    *
+
+    Parameters
+    ----------
+    (l, u) : (integer, integer)
+        Number of non-zero lower and upper diagonals
+    ab : (`l` + `u` + 1, M) array_like
+        Banded matrix
+    b : (M,) or (M, K) array_like
+        Right-hand side
+    overwrite_ab : bool, optional
+        Discard data in `ab` (may enhance performance)
+    overwrite_b : bool, optional
+        Discard data in `b` (may enhance performance)
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : (M,) or (M, K) ndarray
+        The solution to the system a x = b. Returned shape depends on the
+        shape of `b`.
+
+    Examples
+    --------
+    Solve the banded system a x = b, where::
+
+            [5  2 -1  0  0]       [0]
+            [1  4  2 -1  0]       [1]
+        a = [0  1  3  2 -1]   b = [2]
+            [0  0  1  2  2]       [2]
+            [0  0  0  1  1]       [3]
+
+    There is one nonzero diagonal below the main diagonal (l = 1), and
+    two above (u = 2). The diagonal banded form of the matrix is::
+
+             [*  * -1 -1 -1]
+        ab = [*  2  2  2  2]
+             [5  4  3  2  1]
+             [1  1  1  1  *]
+
+    >>> import numpy as np
+    >>> from scipy.linalg import solve_banded
+    >>> ab = np.array([[0,  0, -1, -1, -1],
+    ...                [0,  2,  2,  2,  2],
+    ...                [5,  4,  3,  2,  1],
+    ...                [1,  1,  1,  1,  0]])
+    >>> b = np.array([0, 1, 2, 2, 3])
+    >>> x = solve_banded((1, 2), ab, b)
+    >>> x
+    array([-2.37288136,  3.93220339, -4.        ,  4.3559322 , -1.3559322 ])
+
+    """
+
+    a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
+    b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
+    # Validate shapes.
+    if a1.shape[-1] != b1.shape[0]:
+        raise ValueError("shapes of ab and b are not compatible.")
+    (nlower, nupper) = l_and_u
+    if nlower + nupper + 1 != a1.shape[0]:
+        raise ValueError("invalid values for the number of lower and upper "
+                         "diagonals: l+u+1 (%d) does not equal ab.shape[0] "
+                         "(%d)" % (nlower + nupper + 1, ab.shape[0]))
+
+    overwrite_b = overwrite_b or _datacopied(b1, b)
+    if a1.shape[-1] == 1:
+        b2 = np.array(b1, copy=(not overwrite_b))
+        b2 /= a1[1, 0]
+        return b2
+    if nlower == nupper == 1:
+        overwrite_ab = overwrite_ab or _datacopied(a1, ab)
+        gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
+        du = a1[0, 1:]
+        d = a1[1, :]
+        dl = a1[2, :-1]
+        du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
+                                   overwrite_ab, overwrite_b)
+    else:
+        gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
+        a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
+        a2[nlower:, :] = a1
+        lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
+                                overwrite_b=overwrite_b)
+    if info == 0:
+        return x
+    if info > 0:
+        raise LinAlgError("singular matrix")
+    raise ValueError('illegal value in %d-th argument of internal '
+                     'gbsv/gtsv' % -info)
+
+
+def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
+                  check_finite=True):
+    """
+    Solve equation a x = b. a is Hermitian positive-definite banded matrix.
+
+    Uses Thomas' Algorithm, which is more efficient than standard LU
+    factorization, but should only be used for Hermitian positive-definite
+    matrices.
+
+    The matrix ``a`` is stored in `ab` either in lower diagonal or upper
+    diagonal ordered form:
+
+        ab[u + i - j, j] == a[i,j]        (if upper form; i <= j)
+        ab[    i - j, j] == a[i,j]        (if lower form; i >= j)
+
+    Example of `ab` (shape of ``a`` is (6, 6), number of upper diagonals,
+    ``u`` =2)::
+
+        upper form:
+        *   *   a02 a13 a24 a35
+        *   a01 a12 a23 a34 a45
+        a00 a11 a22 a33 a44 a55
+
+        lower form:
+        a00 a11 a22 a33 a44 a55
+        a10 a21 a32 a43 a54 *
+        a20 a31 a42 a53 *   *
+
+    Cells marked with * are not used.
+
+    Parameters
+    ----------
+    ab : (``u`` + 1, M) array_like
+        Banded matrix
+    b : (M,) or (M, K) array_like
+        Right-hand side
+    overwrite_ab : bool, optional
+        Discard data in `ab` (may enhance performance)
+    overwrite_b : bool, optional
+        Discard data in `b` (may enhance performance)
+    lower : bool, optional
+        Is the matrix in the lower form. (Default is upper form)
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : (M,) or (M, K) ndarray
+        The solution to the system ``a x = b``. Shape of return matches shape
+        of `b`.
+
+    Notes
+    -----
+    In the case of a non-positive definite matrix ``a``, the solver
+    `solve_banded` may be used.
+
+    Examples
+    --------
+    Solve the banded system ``A x = b``, where::
+
+            [ 4  2 -1  0  0  0]       [1]
+            [ 2  5  2 -1  0  0]       [2]
+        A = [-1  2  6  2 -1  0]   b = [2]
+            [ 0 -1  2  7  2 -1]       [3]
+            [ 0  0 -1  2  8  2]       [3]
+            [ 0  0  0 -1  2  9]       [3]
+
+    >>> import numpy as np
+    >>> from scipy.linalg import solveh_banded
+
+    ``ab`` contains the main diagonal and the nonzero diagonals below the
+    main diagonal. That is, we use the lower form:
+
+    >>> ab = np.array([[ 4,  5,  6,  7, 8, 9],
+    ...                [ 2,  2,  2,  2, 2, 0],
+    ...                [-1, -1, -1, -1, 0, 0]])
+    >>> b = np.array([1, 2, 2, 3, 3, 3])
+    >>> x = solveh_banded(ab, b, lower=True)
+    >>> x
+    array([ 0.03431373,  0.45938375,  0.05602241,  0.47759104,  0.17577031,
+            0.34733894])
+
+
+    Solve the Hermitian banded system ``H x = b``, where::
+
+            [ 8   2-1j   0     0  ]        [ 1  ]
+        H = [2+1j  5     1j    0  ]    b = [1+1j]
+            [ 0   -1j    9   -2-1j]        [1-2j]
+            [ 0    0   -2+1j   6  ]        [ 0  ]
+
+    In this example, we put the upper diagonals in the array ``hb``:
+
+    >>> hb = np.array([[0, 2-1j, 1j, -2-1j],
+    ...                [8,  5,    9,   6  ]])
+    >>> b = np.array([1, 1+1j, 1-2j, 0])
+    >>> x = solveh_banded(hb, b)
+    >>> x
+    array([ 0.07318536-0.02939412j,  0.11877624+0.17696461j,
+            0.10077984-0.23035393j, -0.00479904-0.09358128j])
+
+    """
+    a1 = _asarray_validated(ab, check_finite=check_finite)
+    b1 = _asarray_validated(b, check_finite=check_finite)
+    # Validate shapes.
+    if a1.shape[-1] != b1.shape[0]:
+        raise ValueError("shapes of ab and b are not compatible.")
+
+    overwrite_b = overwrite_b or _datacopied(b1, b)
+    overwrite_ab = overwrite_ab or _datacopied(a1, ab)
+
+    if a1.shape[0] == 2:
+        ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
+        if lower:
+            d = a1[0, :].real
+            e = a1[1, :-1]
+        else:
+            d = a1[1, :].real
+            e = a1[0, 1:].conj()
+        d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
+                              overwrite_b)
+    else:
+        pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
+        c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
+                          overwrite_b=overwrite_b)
+    if info > 0:
+        raise LinAlgError("%dth leading minor not positive definite" % info)
+    if info < 0:
+        raise ValueError('illegal value in %dth argument of internal '
+                         'pbsv' % -info)
+    return x
+
+
+def solve_toeplitz(c_or_cr, b, check_finite=True):
+    """Solve a Toeplitz system using Levinson Recursion
+
+    The Toeplitz matrix has constant diagonals, with c as its first column
+    and r as its first row. If r is not given, ``r == conjugate(c)`` is
+    assumed.
+
+    Parameters
+    ----------
+    c_or_cr : array_like or tuple of (array_like, array_like)
+        The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
+        actual shape of ``c``, it will be converted to a 1-D array. If not
+        supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
+        real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
+        of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
+        of ``r``, it will be converted to a 1-D array.
+    b : (M,) or (M, K) array_like
+        Right-hand side in ``T x = b``.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (result entirely NaNs) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : (M,) or (M, K) ndarray
+        The solution to the system ``T x = b``. Shape of return matches shape
+        of `b`.
+
+    See Also
+    --------
+    toeplitz : Toeplitz matrix
+
+    Notes
+    -----
+    The solution is computed using Levinson-Durbin recursion, which is faster
+    than generic least-squares methods, but can be less numerically stable.
+
+    Examples
+    --------
+    Solve the Toeplitz system T x = b, where::
+
+            [ 1 -1 -2 -3]       [1]
+        T = [ 3  1 -1 -2]   b = [2]
+            [ 6  3  1 -1]       [2]
+            [10  6  3  1]       [5]
+
+    To specify the Toeplitz matrix, only the first column and the first
+    row are needed.
+
+    >>> import numpy as np
+    >>> c = np.array([1, 3, 6, 10])    # First column of T
+    >>> r = np.array([1, -1, -2, -3])  # First row of T
+    >>> b = np.array([1, 2, 2, 5])
+
+    >>> from scipy.linalg import solve_toeplitz, toeplitz
+    >>> x = solve_toeplitz((c, r), b)
+    >>> x
+    array([ 1.66666667, -1.        , -2.66666667,  2.33333333])
+
+    Check the result by creating the full Toeplitz matrix and
+    multiplying it by `x`.  We should get `b`.
+
+    >>> T = toeplitz(c, r)
+    >>> T.dot(x)
+    array([ 1.,  2.,  2.,  5.])
+
+    """
+    # If numerical stability of this algorithm is a problem, a future
+    # developer might consider implementing other O(N^2) Toeplitz solvers,
+    # such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
+
+    r, c, b, dtype, b_shape = _validate_args_for_toeplitz_ops(
+        c_or_cr, b, check_finite, keep_b_shape=True)
+
+    # Form a 1-D array of values to be used in the matrix, containing a
+    # reversed copy of r[1:], followed by c.
+    vals = np.concatenate((r[-1:0:-1], c))
+    if b is None:
+        raise ValueError('illegal value, `b` is a required argument')
+
+    if b.ndim == 1:
+        x, _ = levinson(vals, np.ascontiguousarray(b))
+    else:
+        x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
+                             for i in range(b.shape[1])])
+        x = x.reshape(*b_shape)
+
+    return x
+
+
+def _get_axis_len(aname, a, axis):
+    ax = axis
+    if ax < 0:
+        ax += a.ndim
+    if 0 <= ax < a.ndim:
+        return a.shape[ax]
+    raise ValueError("'%saxis' entry is out of bounds" % (aname,))
+
+
+def solve_circulant(c, b, singular='raise', tol=None,
+                    caxis=-1, baxis=0, outaxis=0):
+    """Solve C x = b for x, where C is a circulant matrix.
+
+    `C` is the circulant matrix associated with the vector `c`.
+
+    The system is solved by doing division in Fourier space. The
+    calculation is::
+
+        x = ifft(fft(b) / fft(c))
+
+    where `fft` and `ifft` are the fast Fourier transform and its inverse,
+    respectively. For a large vector `c`, this is *much* faster than
+    solving the system with the full circulant matrix.
+
+    Parameters
+    ----------
+    c : array_like
+        The coefficients of the circulant matrix.
+    b : array_like
+        Right-hand side matrix in ``a x = b``.
+    singular : str, optional
+        This argument controls how a near singular circulant matrix is
+        handled.  If `singular` is "raise" and the circulant matrix is
+        near singular, a `LinAlgError` is raised. If `singular` is
+        "lstsq", the least squares solution is returned. Default is "raise".
+    tol : float, optional
+        If any eigenvalue of the circulant matrix has an absolute value
+        that is less than or equal to `tol`, the matrix is considered to be
+        near singular. If not given, `tol` is set to::
+
+            tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
+
+        where `abs_eigs` is the array of absolute values of the eigenvalues
+        of the circulant matrix.
+    caxis : int
+        When `c` has dimension greater than 1, it is viewed as a collection
+        of circulant vectors. In this case, `caxis` is the axis of `c` that
+        holds the vectors of circulant coefficients.
+    baxis : int
+        When `b` has dimension greater than 1, it is viewed as a collection
+        of vectors. In this case, `baxis` is the axis of `b` that holds the
+        right-hand side vectors.
+    outaxis : int
+        When `c` or `b` are multidimensional, the value returned by
+        `solve_circulant` is multidimensional. In this case, `outaxis` is
+        the axis of the result that holds the solution vectors.
+
+    Returns
+    -------
+    x : ndarray
+        Solution to the system ``C x = b``.
+
+    Raises
+    ------
+    LinAlgError
+        If the circulant matrix associated with `c` is near singular.
+
+    See Also
+    --------
+    circulant : circulant matrix
+
+    Notes
+    -----
+    For a 1-D vector `c` with length `m`, and an array `b`
+    with shape ``(m, ...)``,
+
+        solve_circulant(c, b)
+
+    returns the same result as
+
+        solve(circulant(c), b)
+
+    where `solve` and `circulant` are from `scipy.linalg`.
+
+    .. versionadded:: 0.16.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
+
+    >>> c = np.array([2, 2, 4])
+    >>> b = np.array([1, 2, 3])
+    >>> solve_circulant(c, b)
+    array([ 0.75, -0.25,  0.25])
+
+    Compare that result to solving the system with `scipy.linalg.solve`:
+
+    >>> solve(circulant(c), b)
+    array([ 0.75, -0.25,  0.25])
+
+    A singular example:
+
+    >>> c = np.array([1, 1, 0, 0])
+    >>> b = np.array([1, 2, 3, 4])
+
+    Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`.  For the
+    least square solution, use the option ``singular='lstsq'``:
+
+    >>> solve_circulant(c, b, singular='lstsq')
+    array([ 0.25,  1.25,  2.25,  1.25])
+
+    Compare to `scipy.linalg.lstsq`:
+
+    >>> x, resid, rnk, s = lstsq(circulant(c), b)
+    >>> x
+    array([ 0.25,  1.25,  2.25,  1.25])
+
+    A broadcasting example:
+
+    Suppose we have the vectors of two circulant matrices stored in an array
+    with shape (2, 5), and three `b` vectors stored in an array with shape
+    (3, 5).  For example,
+
+    >>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
+    >>> b = np.arange(15).reshape(-1, 5)
+
+    We want to solve all combinations of circulant matrices and `b` vectors,
+    with the result stored in an array with shape (2, 3, 5). When we
+    disregard the axes of `c` and `b` that hold the vectors of coefficients,
+    the shapes of the collections are (2,) and (3,), respectively, which are
+    not compatible for broadcasting. To have a broadcast result with shape
+    (2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
+    shape (2, 1, 5). The last dimension holds the coefficients of the
+    circulant matrices, so when we call `solve_circulant`, we can use the
+    default ``caxis=-1``. The coefficients of the `b` vectors are in the last
+    dimension of the array `b`, so we use ``baxis=-1``. If we use the
+    default `outaxis`, the result will have shape (5, 2, 3), so we'll use
+    ``outaxis=-1`` to put the solution vectors in the last dimension.
+
+    >>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
+    >>> x.shape
+    (2, 3, 5)
+    >>> np.set_printoptions(precision=3)  # For compact output of numbers.
+    >>> x
+    array([[[-0.118,  0.22 ,  1.277, -0.142,  0.302],
+            [ 0.651,  0.989,  2.046,  0.627,  1.072],
+            [ 1.42 ,  1.758,  2.816,  1.396,  1.841]],
+           [[ 0.401,  0.304,  0.694, -0.867,  0.377],
+            [ 0.856,  0.758,  1.149, -0.412,  0.831],
+            [ 1.31 ,  1.213,  1.603,  0.042,  1.286]]])
+
+    Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
+
+    >>> solve_circulant(c[1], b[1, :])
+    array([ 0.856,  0.758,  1.149, -0.412,  0.831])
+
+    """
+    c = np.atleast_1d(c)
+    nc = _get_axis_len("c", c, caxis)
+    b = np.atleast_1d(b)
+    nb = _get_axis_len("b", b, baxis)
+    if nc != nb:
+        raise ValueError('Shapes of c {} and b {} are incompatible'
+                         .format(c.shape, b.shape))
+
+    fc = np.fft.fft(np.moveaxis(c, caxis, -1), axis=-1)
+    abs_fc = np.abs(fc)
+    if tol is None:
+        # This is the same tolerance as used in np.linalg.matrix_rank.
+        tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
+        if tol.shape != ():
+            tol.shape = tol.shape + (1,)
+        else:
+            tol = np.atleast_1d(tol)
+
+    near_zeros = abs_fc <= tol
+    is_near_singular = np.any(near_zeros)
+    if is_near_singular:
+        if singular == 'raise':
+            raise LinAlgError("near singular circulant matrix.")
+        else:
+            # Replace the small values with 1 to avoid errors in the
+            # division fb/fc below.
+            fc[near_zeros] = 1
+
+    fb = np.fft.fft(np.moveaxis(b, baxis, -1), axis=-1)
+
+    q = fb / fc
+
+    if is_near_singular:
+        # `near_zeros` is a boolean array, same shape as `c`, that is
+        # True where `fc` is (near) zero. `q` is the broadcasted result
+        # of fb / fc, so to set the values of `q` to 0 where `fc` is near
+        # zero, we use a mask that is the broadcast result of an array
+        # of True values shaped like `b` with `near_zeros`.
+        mask = np.ones_like(b, dtype=bool) & near_zeros
+        q[mask] = 0
+
+    x = np.fft.ifft(q, axis=-1)
+    if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
+        x = x.real
+    if outaxis != -1:
+        x = np.moveaxis(x, -1, outaxis)
+    return x
+
+
+# matrix inversion
+def inv(a, overwrite_a=False, check_finite=True):
+    """
+    Compute the inverse of a matrix.
+
+    Parameters
+    ----------
+    a : array_like
+        Square matrix to be inverted.
+    overwrite_a : bool, optional
+        Discard data in `a` (may improve performance). Default is False.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    ainv : ndarray
+        Inverse of the matrix `a`.
+
+    Raises
+    ------
+    LinAlgError
+        If `a` is singular.
+    ValueError
+        If `a` is not square, or not 2D.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[1., 2.], [3., 4.]])
+    >>> linalg.inv(a)
+    array([[-2. ,  1. ],
+           [ 1.5, -0.5]])
+    >>> np.dot(a, linalg.inv(a))
+    array([[ 1.,  0.],
+           [ 0.,  1.]])
+
+    """
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+        raise ValueError('expected square matrix')
+    overwrite_a = overwrite_a or _datacopied(a1, a)
+    # XXX: I found no advantage or disadvantage of using finv.
+#     finv, = get_flinalg_funcs(('inv',),(a1,))
+#     if finv is not None:
+#         a_inv,info = finv(a1,overwrite_a=overwrite_a)
+#         if info==0:
+#             return a_inv
+#         if info>0: raise LinAlgError, "singular matrix"
+#         if info<0: raise ValueError('illegal value in %d-th argument of '
+#                                     'internal inv.getrf|getri'%(-info))
+    getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
+                                                  'getri_lwork'),
+                                                 (a1,))
+    lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
+    if info == 0:
+        lwork = _compute_lwork(getri_lwork, a1.shape[0])
+
+        # XXX: the following line fixes curious SEGFAULT when
+        # benchmarking 500x500 matrix inverse. This seems to
+        # be a bug in LAPACK ?getri routine because if lwork is
+        # minimal (when using lwork[0] instead of lwork[1]) then
+        # all tests pass. Further investigation is required if
+        # more such SEGFAULTs occur.
+        lwork = int(1.01 * lwork)
+        inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
+    if info > 0:
+        raise LinAlgError("singular matrix")
+    if info < 0:
+        raise ValueError('illegal value in %d-th argument of internal '
+                         'getrf|getri' % -info)
+    return inv_a
+
+
+# Determinant
+
+def det(a, overwrite_a=False, check_finite=True):
+    """
+    Compute the determinant of a matrix
+
+    The determinant of a square matrix is a value derived arithmetically
+    from the coefficients of the matrix.
+
+    The determinant for a 3x3 matrix, for example, is computed as follows::
+
+        a    b    c
+        d    e    f = A
+        g    h    i
+
+        det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        A square matrix.
+    overwrite_a : bool, optional
+        Allow overwriting data in a (may enhance performance).
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    det : float or complex
+        Determinant of `a`.
+
+    Notes
+    -----
+    The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
+    >>> linalg.det(a)
+    0.0
+    >>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
+    >>> linalg.det(a)
+    3.0
+
+    """
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+        raise ValueError('expected square matrix')
+    overwrite_a = overwrite_a or _datacopied(a1, a)
+    fdet, = get_flinalg_funcs(('det',), (a1,))
+    a_det, info = fdet(a1, overwrite_a=overwrite_a)
+    if info < 0:
+        raise ValueError('illegal value in %d-th argument of internal '
+                         'det.getrf' % -info)
+    return a_det
+
+
+# Linear Least Squares
+def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
+          check_finite=True, lapack_driver=None):
+    """
+    Compute least-squares solution to equation Ax = b.
+
+    Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Left-hand side array
+    b : (M,) or (M, K) array_like
+        Right hand side array
+    cond : float, optional
+        Cutoff for 'small' singular values; used to determine effective
+        rank of a. Singular values smaller than
+        ``cond * largest_singular_value`` are considered zero.
+    overwrite_a : bool, optional
+        Discard data in `a` (may enhance performance). Default is False.
+    overwrite_b : bool, optional
+        Discard data in `b` (may enhance performance). Default is False.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    lapack_driver : str, optional
+        Which LAPACK driver is used to solve the least-squares problem.
+        Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
+        (``'gelsd'``) is a good choice.  However, ``'gelsy'`` can be slightly
+        faster on many problems.  ``'gelss'`` was used historically.  It is
+        generally slow but uses less memory.
+
+        .. versionadded:: 0.17.0
+
+    Returns
+    -------
+    x : (N,) or (N, K) ndarray
+        Least-squares solution.
+    residues : (K,) ndarray or float
+        Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
+        ``ndim(A) == n`` (returns a scalar if ``b`` is 1-D). Otherwise a
+        (0,)-shaped array is returned.
+    rank : int
+        Effective rank of `a`.
+    s : (min(M, N),) ndarray or None
+        Singular values of `a`. The condition number of ``a`` is
+        ``s[0] / s[-1]``.
+
+    Raises
+    ------
+    LinAlgError
+        If computation does not converge.
+
+    ValueError
+        When parameters are not compatible.
+
+    See Also
+    --------
+    scipy.optimize.nnls : linear least squares with non-negativity constraint
+
+    Notes
+    -----
+    When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
+    array and `s` is always ``None``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import lstsq
+    >>> import matplotlib.pyplot as plt
+
+    Suppose we have the following data:
+
+    >>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
+    >>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
+
+    We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
+    to this data.  We first form the "design matrix" M, with a constant
+    column of 1s and a column containing ``x**2``:
+
+    >>> M = x[:, np.newaxis]**[0, 2]
+    >>> M
+    array([[  1.  ,   1.  ],
+           [  1.  ,   6.25],
+           [  1.  ,  12.25],
+           [  1.  ,  16.  ],
+           [  1.  ,  25.  ],
+           [  1.  ,  49.  ],
+           [  1.  ,  72.25]])
+
+    We want to find the least-squares solution to ``M.dot(p) = y``,
+    where ``p`` is a vector with length 2 that holds the parameters
+    ``a`` and ``b``.
+
+    >>> p, res, rnk, s = lstsq(M, y)
+    >>> p
+    array([ 0.20925829,  0.12013861])
+
+    Plot the data and the fitted curve.
+
+    >>> plt.plot(x, y, 'o', label='data')
+    >>> xx = np.linspace(0, 9, 101)
+    >>> yy = p[0] + p[1]*xx**2
+    >>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
+    >>> plt.xlabel('x')
+    >>> plt.ylabel('y')
+    >>> plt.legend(framealpha=1, shadow=True)
+    >>> plt.grid(alpha=0.25)
+    >>> plt.show()
+
+    """
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    b1 = _asarray_validated(b, check_finite=check_finite)
+    if len(a1.shape) != 2:
+        raise ValueError('Input array a should be 2D')
+    m, n = a1.shape
+    if len(b1.shape) == 2:
+        nrhs = b1.shape[1]
+    else:
+        nrhs = 1
+    if m != b1.shape[0]:
+        raise ValueError('Shape mismatch: a and b should have the same number'
+                         ' of rows ({} != {}).'.format(m, b1.shape[0]))
+    if m == 0 or n == 0:  # Zero-sized problem, confuses LAPACK
+        x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
+        if n == 0:
+            residues = np.linalg.norm(b1, axis=0)**2
+        else:
+            residues = np.empty((0,))
+        return x, residues, 0, np.empty((0,))
+
+    driver = lapack_driver
+    if driver is None:
+        driver = lstsq.default_lapack_driver
+    if driver not in ('gelsd', 'gelsy', 'gelss'):
+        raise ValueError('LAPACK driver "%s" is not found' % driver)
+
+    lapack_func, lapack_lwork = get_lapack_funcs((driver,
+                                                 '%s_lwork' % driver),
+                                                 (a1, b1))
+    real_data = True if (lapack_func.dtype.kind == 'f') else False
+
+    if m < n:
+        # need to extend b matrix as it will be filled with
+        # a larger solution matrix
+        if len(b1.shape) == 2:
+            b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
+            b2[:m, :] = b1
+        else:
+            b2 = np.zeros(n, dtype=lapack_func.dtype)
+            b2[:m] = b1
+        b1 = b2
+
+    overwrite_a = overwrite_a or _datacopied(a1, a)
+    overwrite_b = overwrite_b or _datacopied(b1, b)
+
+    if cond is None:
+        cond = np.finfo(lapack_func.dtype).eps
+
+    if driver in ('gelss', 'gelsd'):
+        if driver == 'gelss':
+            lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
+            v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
+                                                    overwrite_a=overwrite_a,
+                                                    overwrite_b=overwrite_b)
+
+        elif driver == 'gelsd':
+            if real_data:
+                lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
+                x, s, rank, info = lapack_func(a1, b1, lwork,
+                                               iwork, cond, False, False)
+            else:  # complex data
+                lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
+                                                     nrhs, cond)
+                x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
+                                               cond, False, False)
+        if info > 0:
+            raise LinAlgError("SVD did not converge in Linear Least Squares")
+        if info < 0:
+            raise ValueError('illegal value in %d-th argument of internal %s'
+                             % (-info, lapack_driver))
+        resids = np.asarray([], dtype=x.dtype)
+        if m > n:
+            x1 = x[:n]
+            if rank == n:
+                resids = np.sum(np.abs(x[n:])**2, axis=0)
+            x = x1
+        return x, resids, rank, s
+
+    elif driver == 'gelsy':
+        lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
+        jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+        v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
+                                          lwork, False, False)
+        if info < 0:
+            raise ValueError("illegal value in %d-th argument of internal "
+                             "gelsy" % -info)
+        if m > n:
+            x1 = x[:n]
+            x = x1
+        return x, np.array([], x.dtype), rank, None
+
+
+lstsq.default_lapack_driver = 'gelsd'
+
+
+def pinv(a, atol=None, rtol=None, return_rank=False, check_finite=True,
+         cond=None, rcond=None):
+    """
+    Compute the (Moore-Penrose) pseudo-inverse of a matrix.
+
+    Calculate a generalized inverse of a matrix using its
+    singular-value decomposition ``U @ S @ V`` in the economy mode and picking
+    up only the columns/rows that are associated with significant singular
+    values.
+
+    If ``s`` is the maximum singular value of ``a``, then the
+    significance cut-off value is determined by ``atol + rtol * s``. Any
+    singular value below this value is assumed insignificant.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Matrix to be pseudo-inverted.
+    atol : float, optional
+        Absolute threshold term, default value is 0.
+
+        .. versionadded:: 1.7.0
+
+    rtol : float, optional
+        Relative threshold term, default value is ``max(M, N) * eps`` where
+        ``eps`` is the machine precision value of the datatype of ``a``.
+
+        .. versionadded:: 1.7.0
+
+    return_rank : bool, optional
+        If True, return the effective rank of the matrix.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    cond, rcond : float, optional
+        In older versions, these values were meant to be used as ``atol`` with
+        ``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
+        the code was not correct. Thus using these are strongly discouraged and
+        the tolerances above are recommended instead. In fact, if provided,
+        atol, rtol takes precedence over these keywords.
+
+        .. versionchanged:: 1.7.0
+            Deprecated in favor of ``rtol`` and ``atol`` parameters above and
+            will be removed in future versions of SciPy.
+
+        .. versionchanged:: 1.3.0
+            Previously the default cutoff value was just ``eps*f`` where ``f``
+            was ``1e3`` for single precision and ``1e6`` for double precision.
+
+    Returns
+    -------
+    B : (N, M) ndarray
+        The pseudo-inverse of matrix `a`.
+    rank : int
+        The effective rank of the matrix. Returned if `return_rank` is True.
+
+    Raises
+    ------
+    LinAlgError
+        If SVD computation does not converge.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> rng = np.random.default_rng()
+    >>> a = rng.standard_normal((9, 6))
+    >>> B = linalg.pinv(a)
+    >>> np.allclose(a, a @ B @ a)
+    True
+    >>> np.allclose(B, B @ a @ B)
+    True
+
+    """
+    a = _asarray_validated(a, check_finite=check_finite)
+    u, s, vh = _decomp_svd.svd(a, full_matrices=False, check_finite=False)
+    t = u.dtype.char.lower()
+    maxS = np.max(s)
+
+    if rcond or cond:
+        warn('Use of the "cond" and "rcond" keywords are deprecated and '
+             'will be removed in future versions of SciPy. Use "atol" and '
+             '"rtol" keywords instead', DeprecationWarning, stacklevel=2)
+
+    # backwards compatible only atol and rtol are both missing
+    if (rcond or cond) and (atol is None) and (rtol is None):
+        atol = rcond or cond
+        rtol = 0.
+
+    atol = 0. if atol is None else atol
+    rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
+
+    if (atol < 0.) or (rtol < 0.):
+        raise ValueError("atol and rtol values must be positive.")
+
+    val = atol + maxS * rtol
+    rank = np.sum(s > val)
+
+    u = u[:, :rank]
+    u /= s[:rank]
+    B = (u @ vh[:rank]).conj().T
+
+    if return_rank:
+        return B, rank
+    else:
+        return B
+
+
+def pinvh(a, atol=None, rtol=None, lower=True, return_rank=False,
+          check_finite=True):
+    """
+    Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
+
+    Calculate a generalized inverse of a complex Hermitian/real symmetric
+    matrix using its eigenvalue decomposition and including all eigenvalues
+    with 'large' absolute value.
+
+    Parameters
+    ----------
+    a : (N, N) array_like
+        Real symmetric or complex hermetian matrix to be pseudo-inverted
+
+    atol : float, optional
+        Absolute threshold term, default value is 0.
+
+        .. versionadded:: 1.7.0
+
+    rtol : float, optional
+        Relative threshold term, default value is ``N * eps`` where
+        ``eps`` is the machine precision value of the datatype of ``a``.
+
+        .. versionadded:: 1.7.0
+
+    lower : bool, optional
+        Whether the pertinent array data is taken from the lower or upper
+        triangle of `a`. (Default: lower)
+    return_rank : bool, optional
+        If True, return the effective rank of the matrix.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    B : (N, N) ndarray
+        The pseudo-inverse of matrix `a`.
+    rank : int
+        The effective rank of the matrix.  Returned if `return_rank` is True.
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue algorithm does not converge.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import pinvh
+    >>> rng = np.random.default_rng()
+    >>> a = rng.standard_normal((9, 6))
+    >>> a = np.dot(a, a.T)
+    >>> B = pinvh(a)
+    >>> np.allclose(a, a @ B @ a)
+    True
+    >>> np.allclose(B, B @ a @ B)
+    True
+
+    """
+    a = _asarray_validated(a, check_finite=check_finite)
+    s, u = _decomp.eigh(a, lower=lower, check_finite=False)
+    t = u.dtype.char.lower()
+    maxS = np.max(np.abs(s))
+
+    atol = 0. if atol is None else atol
+    rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
+
+    if (atol < 0.) or (rtol < 0.):
+        raise ValueError("atol and rtol values must be positive.")
+
+    val = atol + maxS * rtol
+    above_cutoff = (abs(s) > val)
+
+    psigma_diag = 1.0 / s[above_cutoff]
+    u = u[:, above_cutoff]
+
+    B = (u * psigma_diag) @ u.conj().T
+
+    if return_rank:
+        return B, len(psigma_diag)
+    else:
+        return B
+
+
+def matrix_balance(A, permute=True, scale=True, separate=False,
+                   overwrite_a=False):
+    """
+    Compute a diagonal similarity transformation for row/column balancing.
+
+    The balancing tries to equalize the row and column 1-norms by applying
+    a similarity transformation such that the magnitude variation of the
+    matrix entries is reflected to the scaling matrices.
+
+    Moreover, if enabled, the matrix is first permuted to isolate the upper
+    triangular parts of the matrix and, again if scaling is also enabled,
+    only the remaining subblocks are subjected to scaling.
+
+    The balanced matrix satisfies the following equality
+
+    .. math::
+
+                        B = T^{-1} A T
+
+    The scaling coefficients are approximated to the nearest power of 2
+    to avoid round-off errors.
+
+    Parameters
+    ----------
+    A : (n, n) array_like
+        Square data matrix for the balancing.
+    permute : bool, optional
+        The selector to define whether permutation of A is also performed
+        prior to scaling.
+    scale : bool, optional
+        The selector to turn on and off the scaling. If False, the matrix
+        will not be scaled.
+    separate : bool, optional
+        This switches from returning a full matrix of the transformation
+        to a tuple of two separate 1-D permutation and scaling arrays.
+    overwrite_a : bool, optional
+        This is passed to xGEBAL directly. Essentially, overwrites the result
+        to the data. It might increase the space efficiency. See LAPACK manual
+        for details. This is False by default.
+
+    Returns
+    -------
+    B : (n, n) ndarray
+        Balanced matrix
+    T : (n, n) ndarray
+        A possibly permuted diagonal matrix whose nonzero entries are
+        integer powers of 2 to avoid numerical truncation errors.
+    scale, perm : (n,) ndarray
+        If ``separate`` keyword is set to True then instead of the array
+        ``T`` above, the scaling and the permutation vectors are given
+        separately as a tuple without allocating the full array ``T``.
+
+    Notes
+    -----
+    This algorithm is particularly useful for eigenvalue and matrix
+    decompositions and in many cases it is already called by various
+    LAPACK routines.
+
+    The algorithm is based on the well-known technique of [1]_ and has
+    been modified to account for special cases. See [2]_ for details
+    which have been implemented since LAPACK v3.5.0. Before this version
+    there are corner cases where balancing can actually worsen the
+    conditioning. See [3]_ for such examples.
+
+    The code is a wrapper around LAPACK's xGEBAL routine family for matrix
+    balancing.
+
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] B.N. Parlett and C. Reinsch, "Balancing a Matrix for
+       Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
+       Vol.13(4), 1969, :doi:`10.1007/BF02165404`
+    .. [2] R. James, J. Langou, B.R. Lowery, "On matrix balancing and
+       eigenvector computation", 2014, :arxiv:`1401.5766`
+    .. [3] D.S. Watkins. A case where balancing is harmful.
+       Electron. Trans. Numer. Anal, Vol.23, 2006.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
+
+    >>> y, permscale = linalg.matrix_balance(x)
+    >>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
+    array([ 3.66666667,  0.4995005 ,  0.91312162])
+
+    >>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
+    array([ 1.2       ,  1.27041742,  0.92658316])  # may vary
+
+    >>> permscale  # only powers of 2 (0.5 == 2^(-1))
+    array([[  0.5,   0. ,  0. ],  # may vary
+           [  0. ,   1. ,  0. ],
+           [  0. ,   0. ,  1. ]])
+
+    """
+
+    A = np.atleast_2d(_asarray_validated(A, check_finite=True))
+
+    if not np.equal(*A.shape):
+        raise ValueError('The data matrix for balancing should be square.')
+
+    gebal = get_lapack_funcs(('gebal'), (A,))
+    B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
+                                overwrite_a=overwrite_a)
+
+    if info < 0:
+        raise ValueError('xGEBAL exited with the internal error '
+                         '"illegal value in argument number {}.". See '
+                         'LAPACK documentation for the xGEBAL error codes.'
+                         ''.format(-info))
+
+    # Separate the permutations from the scalings and then convert to int
+    scaling = np.ones_like(ps, dtype=float)
+    scaling[lo:hi+1] = ps[lo:hi+1]
+
+    # gebal uses 1-indexing
+    ps = ps.astype(int, copy=False) - 1
+    n = A.shape[0]
+    perm = np.arange(n)
+
+    # LAPACK permutes with the ordering n --> hi, then 0--> lo
+    if hi < n:
+        for ind, x in enumerate(ps[hi+1:][::-1], 1):
+            if n-ind == x:
+                continue
+            perm[[x, n-ind]] = perm[[n-ind, x]]
+
+    if lo > 0:
+        for ind, x in enumerate(ps[:lo]):
+            if ind == x:
+                continue
+            perm[[x, ind]] = perm[[ind, x]]
+
+    if separate:
+        return B, (scaling, perm)
+
+    # get the inverse permutation
+    iperm = np.empty_like(perm)
+    iperm[perm] = np.arange(n)
+
+    return B, np.diag(scaling)[iperm, :]
+
+
+def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
+                                    enforce_square=True):
+    """Validate arguments and format inputs for toeplitz functions
+
+    Parameters
+    ----------
+    c_or_cr : array_like or tuple of (array_like, array_like)
+        The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
+        actual shape of ``c``, it will be converted to a 1-D array. If not
+        supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
+        real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
+        of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
+        of ``r``, it will be converted to a 1-D array.
+    b : (M,) or (M, K) array_like
+        Right-hand side in ``T x = b``.
+    check_finite : bool
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (result entirely NaNs) if the inputs do contain infinities or NaNs.
+    keep_b_shape : bool
+        Whether to convert a (M,) dimensional b into a (M, 1) dimensional
+        matrix.
+    enforce_square : bool, optional
+        If True (default), this verifies that the Toeplitz matrix is square.
+
+    Returns
+    -------
+    r : array
+        1d array corresponding to the first row of the Toeplitz matrix.
+    c: array
+        1d array corresponding to the first column of the Toeplitz matrix.
+    b: array
+        (M,), (M, 1) or (M, K) dimensional array, post validation,
+        corresponding to ``b``.
+    dtype: numpy datatype
+        ``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
+        ``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
+        otherwise, it is ``np.float``.
+    b_shape: tuple
+        Shape of ``b`` after passing it through ``_asarray_validated``.
+
+    """
+
+    if isinstance(c_or_cr, tuple):
+        c, r = c_or_cr
+        c = _asarray_validated(c, check_finite=check_finite).ravel()
+        r = _asarray_validated(r, check_finite=check_finite).ravel()
+    else:
+        c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
+        r = c.conjugate()
+
+    if b is None:
+        raise ValueError('`b` must be an array, not None.')
+
+    b = _asarray_validated(b, check_finite=check_finite)
+    b_shape = b.shape
+
+    is_not_square = r.shape[0] != c.shape[0]
+    if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
+        raise ValueError('Incompatible dimensions.')
+
+    is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
+    dtype = np.complex128 if is_cmplx else np.double
+    r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
+
+    if b.ndim == 1 and not keep_b_shape:
+        b = b.reshape(-1, 1)
+    elif b.ndim != 1:
+        b = b.reshape(b.shape[0], -1)
+
+    return r, c, b, dtype, b_shape
+
+
+def matmul_toeplitz(c_or_cr, x, check_finite=False, workers=None):
+    """Efficient Toeplitz Matrix-Matrix Multiplication using FFT
+
+    This function returns the matrix multiplication between a Toeplitz
+    matrix and a dense matrix.
+
+    The Toeplitz matrix has constant diagonals, with c as its first column
+    and r as its first row. If r is not given, ``r == conjugate(c)`` is
+    assumed.
+
+    Parameters
+    ----------
+    c_or_cr : array_like or tuple of (array_like, array_like)
+        The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
+        actual shape of ``c``, it will be converted to a 1-D array. If not
+        supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
+        real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
+        of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
+        of ``r``, it will be converted to a 1-D array.
+    x : (M,) or (M, K) array_like
+        Matrix with which to multiply.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (result entirely NaNs) if the inputs do contain infinities or NaNs.
+    workers : int, optional
+        To pass to scipy.fft.fft and ifft. Maximum number of workers to use
+        for parallel computation. If negative, the value wraps around from
+        ``os.cpu_count()``. See scipy.fft.fft for more details.
+
+    Returns
+    -------
+    T @ x : (M,) or (M, K) ndarray
+        The result of the matrix multiplication ``T @ x``. Shape of return
+        matches shape of `x`.
+
+    See Also
+    --------
+    toeplitz : Toeplitz matrix
+    solve_toeplitz : Solve a Toeplitz system using Levinson Recursion
+
+    Notes
+    -----
+    The Toeplitz matrix is embedded in a circulant matrix and the FFT is used
+    to efficiently calculate the matrix-matrix product.
+
+    Because the computation is based on the FFT, integer inputs will
+    result in floating point outputs.  This is unlike NumPy's `matmul`,
+    which preserves the data type of the input.
+
+    This is partly based on the implementation that can be found in [1]_,
+    licensed under the MIT license. More information about the method can be
+    found in reference [2]_. References [3]_ and [4]_ have more reference
+    implementations in Python.
+
+    .. versionadded:: 1.6.0
+
+    References
+    ----------
+    .. [1] Jacob R Gardner, Geoff Pleiss, David Bindel, Kilian
+       Q Weinberger, Andrew Gordon Wilson, "GPyTorch: Blackbox Matrix-Matrix
+       Gaussian Process Inference with GPU Acceleration" with contributions
+       from Max Balandat and Ruihan Wu. Available online:
+       https://github.com/cornellius-gp/gpytorch
+
+    .. [2] J. Demmel, P. Koev, and X. Li, "A Brief Survey of Direct Linear
+       Solvers". In Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der
+       Vorst, editors. Templates for the Solution of Algebraic Eigenvalue
+       Problems: A Practical Guide. SIAM, Philadelphia, 2000. Available at:
+       http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
+
+    .. [3] R. Scheibler, E. Bezzam, I. Dokmanic, Pyroomacoustics: A Python
+       package for audio room simulations and array processing algorithms,
+       Proc. IEEE ICASSP, Calgary, CA, 2018.
+       https://github.com/LCAV/pyroomacoustics/blob/pypi-release/
+       pyroomacoustics/adaptive/util.py
+
+    .. [4] Marano S, Edwards B, Ferrari G and Fah D (2017), "Fitting
+       Earthquake Spectra: Colored Noise and Incomplete Data", Bulletin of
+       the Seismological Society of America., January, 2017. Vol. 107(1),
+       pp. 276-291.
+
+    Examples
+    --------
+    Multiply the Toeplitz matrix T with matrix x::
+
+            [ 1 -1 -2 -3]       [1 10]
+        T = [ 3  1 -1 -2]   x = [2 11]
+            [ 6  3  1 -1]       [2 11]
+            [10  6  3  1]       [5 19]
+
+    To specify the Toeplitz matrix, only the first column and the first
+    row are needed.
+
+    >>> import numpy as np
+    >>> c = np.array([1, 3, 6, 10])    # First column of T
+    >>> r = np.array([1, -1, -2, -3])  # First row of T
+    >>> x = np.array([[1, 10], [2, 11], [2, 11], [5, 19]])
+
+    >>> from scipy.linalg import toeplitz, matmul_toeplitz
+    >>> matmul_toeplitz((c, r), x)
+    array([[-20., -80.],
+           [ -7.,  -8.],
+           [  9.,  85.],
+           [ 33., 218.]])
+
+    Check the result by creating the full Toeplitz matrix and
+    multiplying it by ``x``.
+
+    >>> toeplitz(c, r) @ x
+    array([[-20, -80],
+           [ -7,  -8],
+           [  9,  85],
+           [ 33, 218]])
+
+    The full matrix is never formed explicitly, so this routine
+    is suitable for very large Toeplitz matrices.
+
+    >>> n = 1000000
+    >>> matmul_toeplitz([1] + [0]*(n-1), np.ones(n))
+    array([1., 1., 1., ..., 1., 1., 1.])
+
+    """
+
+    from ..fft import fft, ifft, rfft, irfft
+
+    r, c, x, dtype, x_shape = _validate_args_for_toeplitz_ops(
+        c_or_cr, x, check_finite, keep_b_shape=False, enforce_square=False)
+    n, m = x.shape
+
+    T_nrows = len(c)
+    T_ncols = len(r)
+    p = T_nrows + T_ncols - 1  # equivalent to len(embedded_col)
+
+    embedded_col = np.concatenate((c, r[-1:0:-1]))
+
+    if np.iscomplexobj(embedded_col) or np.iscomplexobj(x):
+        fft_mat = fft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
+        fft_x = fft(x, n=p, axis=0, workers=workers)
+
+        mat_times_x = ifft(fft_mat*fft_x, axis=0,
+                           workers=workers)[:T_nrows, :]
+    else:
+        # Real inputs; using rfft is faster
+        fft_mat = rfft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
+        fft_x = rfft(x, n=p, axis=0, workers=workers)
+
+        mat_times_x = irfft(fft_mat*fft_x, axis=0,
+                            workers=workers, n=p)[:T_nrows, :]
+
+    return_shape = (T_nrows,) if len(x_shape) == 1 else (T_nrows, m)
+    return mat_times_x.reshape(*return_shape)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutine_wrappers.f b/__packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutine_wrappers.f
new file mode 100644
index 00000000..e4398023
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutine_wrappers.f
@@ -0,0 +1,462 @@
+c     This file was generated by _generate_pyx.py.
+c     Do not edit this file directly.
+
+      subroutine cdotcwrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx,
+     +    cy,
+     +    incy
+     +    )
+        external wcdotc
+        complex wcdotc
+        complex ret
+        integer n
+        complex cx(n)
+        integer incx
+        complex cy(n)
+        integer incy
+        ret = wcdotc(
+     +    n,
+     +    cx,
+     +    incx,
+     +    cy,
+     +    incy
+     +    )
+      end
+
+      subroutine cdotuwrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx,
+     +    cy,
+     +    incy
+     +    )
+        external wcdotu
+        complex wcdotu
+        complex ret
+        integer n
+        complex cx(n)
+        integer incx
+        complex cy(n)
+        integer incy
+        ret = wcdotu(
+     +    n,
+     +    cx,
+     +    incx,
+     +    cy,
+     +    incy
+     +    )
+      end
+
+      subroutine dasumwrp(
+     +    ret,
+     +    n,
+     +    dx,
+     +    incx
+     +    )
+        external dasum
+        double precision dasum
+        double precision ret
+        integer n
+        double precision dx(n)
+        integer incx
+        ret = dasum(
+     +    n,
+     +    dx,
+     +    incx
+     +    )
+      end
+
+      subroutine dcabs1wrp(
+     +    ret,
+     +    z
+     +    )
+        external dcabs1
+        double precision dcabs1
+        double precision ret
+        complex*16 z
+        ret = dcabs1(
+     +    z
+     +    )
+      end
+
+      subroutine ddotwrp(
+     +    ret,
+     +    n,
+     +    dx,
+     +    incx,
+     +    dy,
+     +    incy
+     +    )
+        external ddot
+        double precision ddot
+        double precision ret
+        integer n
+        double precision dx(n)
+        integer incx
+        double precision dy(n)
+        integer incy
+        ret = ddot(
+     +    n,
+     +    dx,
+     +    incx,
+     +    dy,
+     +    incy
+     +    )
+      end
+
+      subroutine dnrm2wrp(
+     +    ret,
+     +    n,
+     +    x,
+     +    incx
+     +    )
+        external dnrm2
+        double precision dnrm2
+        double precision ret
+        integer n
+        double precision x(n)
+        integer incx
+        ret = dnrm2(
+     +    n,
+     +    x,
+     +    incx
+     +    )
+      end
+
+      subroutine dsdotwrp(
+     +    ret,
+     +    n,
+     +    sx,
+     +    incx,
+     +    sy,
+     +    incy
+     +    )
+        external dsdot
+        double precision dsdot
+        double precision ret
+        integer n
+        real sx(n)
+        integer incx
+        real sy(n)
+        integer incy
+        ret = dsdot(
+     +    n,
+     +    sx,
+     +    incx,
+     +    sy,
+     +    incy
+     +    )
+      end
+
+      subroutine dzasumwrp(
+     +    ret,
+     +    n,
+     +    zx,
+     +    incx
+     +    )
+        external dzasum
+        double precision dzasum
+        double precision ret
+        integer n
+        complex*16 zx(n)
+        integer incx
+        ret = dzasum(
+     +    n,
+     +    zx,
+     +    incx
+     +    )
+      end
+
+      subroutine dznrm2wrp(
+     +    ret,
+     +    n,
+     +    x,
+     +    incx
+     +    )
+        external dznrm2
+        double precision dznrm2
+        double precision ret
+        integer n
+        complex*16 x(n)
+        integer incx
+        ret = dznrm2(
+     +    n,
+     +    x,
+     +    incx
+     +    )
+      end
+
+      subroutine icamaxwrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+        external icamax
+        integer icamax
+        integer ret
+        integer n
+        complex cx(n)
+        integer incx
+        ret = icamax(
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+      end
+
+      subroutine idamaxwrp(
+     +    ret,
+     +    n,
+     +    dx,
+     +    incx
+     +    )
+        external idamax
+        integer idamax
+        integer ret
+        integer n
+        double precision dx(n)
+        integer incx
+        ret = idamax(
+     +    n,
+     +    dx,
+     +    incx
+     +    )
+      end
+
+      subroutine isamaxwrp(
+     +    ret,
+     +    n,
+     +    sx,
+     +    incx
+     +    )
+        external isamax
+        integer isamax
+        integer ret
+        integer n
+        real sx(n)
+        integer incx
+        ret = isamax(
+     +    n,
+     +    sx,
+     +    incx
+     +    )
+      end
+
+      subroutine izamaxwrp(
+     +    ret,
+     +    n,
+     +    zx,
+     +    incx
+     +    )
+        external izamax
+        integer izamax
+        integer ret
+        integer n
+        complex*16 zx(n)
+        integer incx
+        ret = izamax(
+     +    n,
+     +    zx,
+     +    incx
+     +    )
+      end
+
+      subroutine lsamewrp(
+     +    ret,
+     +    ca,
+     +    cb
+     +    )
+        external lsame
+        logical lsame
+        logical ret
+        character ca
+        character cb
+        ret = lsame(
+     +    ca,
+     +    cb
+     +    )
+      end
+
+      subroutine sasumwrp(
+     +    ret,
+     +    n,
+     +    sx,
+     +    incx
+     +    )
+        external sasum
+        real sasum
+        real ret
+        integer n
+        real sx(n)
+        integer incx
+        ret = sasum(
+     +    n,
+     +    sx,
+     +    incx
+     +    )
+      end
+
+      subroutine scasumwrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+        external scasum
+        real scasum
+        real ret
+        integer n
+        complex cx(n)
+        integer incx
+        ret = scasum(
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+      end
+
+      subroutine scnrm2wrp(
+     +    ret,
+     +    n,
+     +    x,
+     +    incx
+     +    )
+        external scnrm2
+        real scnrm2
+        real ret
+        integer n
+        complex x(n)
+        integer incx
+        ret = scnrm2(
+     +    n,
+     +    x,
+     +    incx
+     +    )
+      end
+
+      subroutine sdotwrp(
+     +    ret,
+     +    n,
+     +    sx,
+     +    incx,
+     +    sy,
+     +    incy
+     +    )
+        external sdot
+        real sdot
+        real ret
+        integer n
+        real sx(n)
+        integer incx
+        real sy(n)
+        integer incy
+        ret = sdot(
+     +    n,
+     +    sx,
+     +    incx,
+     +    sy,
+     +    incy
+     +    )
+      end
+
+      subroutine sdsdotwrp(
+     +    ret,
+     +    n,
+     +    sb,
+     +    sx,
+     +    incx,
+     +    sy,
+     +    incy
+     +    )
+        external sdsdot
+        real sdsdot
+        real ret
+        integer n
+        real sb
+        real sx(n)
+        integer incx
+        real sy(n)
+        integer incy
+        ret = sdsdot(
+     +    n,
+     +    sb,
+     +    sx,
+     +    incx,
+     +    sy,
+     +    incy
+     +    )
+      end
+
+      subroutine snrm2wrp(
+     +    ret,
+     +    n,
+     +    x,
+     +    incx
+     +    )
+        external snrm2
+        real snrm2
+        real ret
+        integer n
+        real x(n)
+        integer incx
+        ret = snrm2(
+     +    n,
+     +    x,
+     +    incx
+     +    )
+      end
+
+      subroutine zdotcwrp(
+     +    ret,
+     +    n,
+     +    zx,
+     +    incx,
+     +    zy,
+     +    incy
+     +    )
+        external wzdotc
+        complex*16 wzdotc
+        complex*16 ret
+        integer n
+        complex*16 zx(n)
+        integer incx
+        complex*16 zy(n)
+        integer incy
+        ret = wzdotc(
+     +    n,
+     +    zx,
+     +    incx,
+     +    zy,
+     +    incy
+     +    )
+      end
+
+      subroutine zdotuwrp(
+     +    ret,
+     +    n,
+     +    zx,
+     +    incx,
+     +    zy,
+     +    incy
+     +    )
+        external wzdotu
+        complex*16 wzdotu
+        complex*16 ret
+        integer n
+        complex*16 zx(n)
+        integer incx
+        complex*16 zy(n)
+        integer incy
+        ret = wzdotu(
+     +    n,
+     +    zx,
+     +    incx,
+     +    zy,
+     +    incy
+     +    )
+      end
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutines.h b/__packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutines.h
new file mode 100644
index 00000000..9e809195
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_blas_subroutines.h
@@ -0,0 +1,166 @@
+/* This file was generated by _generate_pyx.py. */
+/* Do not edit this file directly. */
+
+#ifndef SCIPY_LINALG_BLAS_FORTRAN_WRAPPERS_H
+#define SCIPY_LINALG_BLAS_FORTRAN_WRAPPERS_H
+#include "fortran_defs.h"
+#include "numpy/arrayobject.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void F_FUNC(cdotcwrp, CDOTCWRP)(npy_complex64 *ret, int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy);
+void F_FUNC(cdotuwrp, CDOTUWRP)(npy_complex64 *ret, int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy);
+void F_FUNC(dasumwrp, DASUMWRP)(double *ret, int *n, double *dx, int *incx);
+void F_FUNC(dcabs1wrp, DCABS1WRP)(double *ret, npy_complex128 *z);
+void F_FUNC(ddotwrp, DDOTWRP)(double *ret, int *n, double *dx, int *incx, double *dy, int *incy);
+void F_FUNC(dnrm2wrp, DNRM2WRP)(double *ret, int *n, double *x, int *incx);
+void F_FUNC(dsdotwrp, DSDOTWRP)(double *ret, int *n, float *sx, int *incx, float *sy, int *incy);
+void F_FUNC(dzasumwrp, DZASUMWRP)(double *ret, int *n, npy_complex128 *zx, int *incx);
+void F_FUNC(dznrm2wrp, DZNRM2WRP)(double *ret, int *n, npy_complex128 *x, int *incx);
+void F_FUNC(icamaxwrp, ICAMAXWRP)(int *ret, int *n, npy_complex64 *cx, int *incx);
+void F_FUNC(idamaxwrp, IDAMAXWRP)(int *ret, int *n, double *dx, int *incx);
+void F_FUNC(isamaxwrp, ISAMAXWRP)(int *ret, int *n, float *sx, int *incx);
+void F_FUNC(izamaxwrp, IZAMAXWRP)(int *ret, int *n, npy_complex128 *zx, int *incx);
+void F_FUNC(lsamewrp, LSAMEWRP)(int *ret, char *ca, char *cb);
+void F_FUNC(sasumwrp, SASUMWRP)(float *ret, int *n, float *sx, int *incx);
+void F_FUNC(scasumwrp, SCASUMWRP)(float *ret, int *n, npy_complex64 *cx, int *incx);
+void F_FUNC(scnrm2wrp, SCNRM2WRP)(float *ret, int *n, npy_complex64 *x, int *incx);
+void F_FUNC(sdotwrp, SDOTWRP)(float *ret, int *n, float *sx, int *incx, float *sy, int *incy);
+void F_FUNC(sdsdotwrp, SDSDOTWRP)(float *ret, int *n, float *sb, float *sx, int *incx, float *sy, int *incy);
+void F_FUNC(snrm2wrp, SNRM2WRP)(float *ret, int *n, float *x, int *incx);
+void F_FUNC(zdotcwrp, ZDOTCWRP)(npy_complex128 *ret, int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy);
+void F_FUNC(zdotuwrp, ZDOTUWRP)(npy_complex128 *ret, int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy);
+
+void F_FUNC(caxpy,CAXPY)(int *n, npy_complex64 *ca, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy);
+void F_FUNC(ccopy,CCOPY)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy);
+void F_FUNC(cgbmv,CGBMV)(char *trans, int *m, int *n, int *kl, int *ku, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy);
+void F_FUNC(cgemm,CGEMM)(char *transa, char *transb, int *m, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc);
+void F_FUNC(cgemv,CGEMV)(char *trans, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy);
+void F_FUNC(cgerc,CGERC)(int *m, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda);
+void F_FUNC(cgeru,CGERU)(int *m, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda);
+void F_FUNC(chbmv,CHBMV)(char *uplo, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy);
+void F_FUNC(chemm,CHEMM)(char *side, char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc);
+void F_FUNC(chemv,CHEMV)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy);
+void F_FUNC(cher,CHER)(char *uplo, int *n, float *alpha, npy_complex64 *x, int *incx, npy_complex64 *a, int *lda);
+void F_FUNC(cher2,CHER2)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda);
+void F_FUNC(cher2k,CHER2K)(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *beta, npy_complex64 *c, int *ldc);
+void F_FUNC(cherk,CHERK)(char *uplo, char *trans, int *n, int *k, float *alpha, npy_complex64 *a, int *lda, float *beta, npy_complex64 *c, int *ldc);
+void F_FUNC(chpmv,CHPMV)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *ap, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy);
+void F_FUNC(chpr,CHPR)(char *uplo, int *n, float *alpha, npy_complex64 *x, int *incx, npy_complex64 *ap);
+void F_FUNC(chpr2,CHPR2)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *ap);
+void F_FUNC(crotg,CROTG)(npy_complex64 *ca, npy_complex64 *cb, float *c, npy_complex64 *s);
+void F_FUNC(cscal,CSCAL)(int *n, npy_complex64 *ca, npy_complex64 *cx, int *incx);
+void F_FUNC(csrot,CSROT)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, float *c, float *s);
+void F_FUNC(csscal,CSSCAL)(int *n, float *sa, npy_complex64 *cx, int *incx);
+void F_FUNC(cswap,CSWAP)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy);
+void F_FUNC(csymm,CSYMM)(char *side, char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc);
+void F_FUNC(csyr2k,CSYR2K)(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc);
+void F_FUNC(csyrk,CSYRK)(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *beta, npy_complex64 *c, int *ldc);
+void F_FUNC(ctbmv,CTBMV)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx);
+void F_FUNC(ctbsv,CTBSV)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx);
+void F_FUNC(ctpmv,CTPMV)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *ap, npy_complex64 *x, int *incx);
+void F_FUNC(ctpsv,CTPSV)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *ap, npy_complex64 *x, int *incx);
+void F_FUNC(ctrmm,CTRMM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb);
+void F_FUNC(ctrmv,CTRMV)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx);
+void F_FUNC(ctrsm,CTRSM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb);
+void F_FUNC(ctrsv,CTRSV)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx);
+void F_FUNC(daxpy,DAXPY)(int *n, double *da, double *dx, int *incx, double *dy, int *incy);
+void F_FUNC(dcopy,DCOPY)(int *n, double *dx, int *incx, double *dy, int *incy);
+void F_FUNC(dgbmv,DGBMV)(char *trans, int *m, int *n, int *kl, int *ku, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy);
+void F_FUNC(dgemm,DGEMM)(char *transa, char *transb, int *m, int *n, int *k, double *alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c, int *ldc);
+void F_FUNC(dgemv,DGEMV)(char *trans, int *m, int *n, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy);
+void F_FUNC(dger,DGER)(int *m, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *a, int *lda);
+void F_FUNC(drot,DROT)(int *n, double *dx, int *incx, double *dy, int *incy, double *c, double *s);
+void F_FUNC(drotg,DROTG)(double *da, double *db, double *c, double *s);
+void F_FUNC(drotm,DROTM)(int *n, double *dx, int *incx, double *dy, int *incy, double *dparam);
+void F_FUNC(drotmg,DROTMG)(double *dd1, double *dd2, double *dx1, double *dy1, double *dparam);
+void F_FUNC(dsbmv,DSBMV)(char *uplo, int *n, int *k, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy);
+void F_FUNC(dscal,DSCAL)(int *n, double *da, double *dx, int *incx);
+void F_FUNC(dspmv,DSPMV)(char *uplo, int *n, double *alpha, double *ap, double *x, int *incx, double *beta, double *y, int *incy);
+void F_FUNC(dspr,DSPR)(char *uplo, int *n, double *alpha, double *x, int *incx, double *ap);
+void F_FUNC(dspr2,DSPR2)(char *uplo, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *ap);
+void F_FUNC(dswap,DSWAP)(int *n, double *dx, int *incx, double *dy, int *incy);
+void F_FUNC(dsymm,DSYMM)(char *side, char *uplo, int *m, int *n, double *alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c, int *ldc);
+void F_FUNC(dsymv,DSYMV)(char *uplo, int *n, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy);
+void F_FUNC(dsyr,DSYR)(char *uplo, int *n, double *alpha, double *x, int *incx, double *a, int *lda);
+void F_FUNC(dsyr2,DSYR2)(char *uplo, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *a, int *lda);
+void F_FUNC(dsyr2k,DSYR2K)(char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c, int *ldc);
+void F_FUNC(dsyrk,DSYRK)(char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda, double *beta, double *c, int *ldc);
+void F_FUNC(dtbmv,DTBMV)(char *uplo, char *trans, char *diag, int *n, int *k, double *a, int *lda, double *x, int *incx);
+void F_FUNC(dtbsv,DTBSV)(char *uplo, char *trans, char *diag, int *n, int *k, double *a, int *lda, double *x, int *incx);
+void F_FUNC(dtpmv,DTPMV)(char *uplo, char *trans, char *diag, int *n, double *ap, double *x, int *incx);
+void F_FUNC(dtpsv,DTPSV)(char *uplo, char *trans, char *diag, int *n, double *ap, double *x, int *incx);
+void F_FUNC(dtrmm,DTRMM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, double *alpha, double *a, int *lda, double *b, int *ldb);
+void F_FUNC(dtrmv,DTRMV)(char *uplo, char *trans, char *diag, int *n, double *a, int *lda, double *x, int *incx);
+void F_FUNC(dtrsm,DTRSM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, double *alpha, double *a, int *lda, double *b, int *ldb);
+void F_FUNC(dtrsv,DTRSV)(char *uplo, char *trans, char *diag, int *n, double *a, int *lda, double *x, int *incx);
+void F_FUNC(saxpy,SAXPY)(int *n, float *sa, float *sx, int *incx, float *sy, int *incy);
+void F_FUNC(scopy,SCOPY)(int *n, float *sx, int *incx, float *sy, int *incy);
+void F_FUNC(sgbmv,SGBMV)(char *trans, int *m, int *n, int *kl, int *ku, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy);
+void F_FUNC(sgemm,SGEMM)(char *transa, char *transb, int *m, int *n, int *k, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc);
+void F_FUNC(sgemv,SGEMV)(char *trans, int *m, int *n, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy);
+void F_FUNC(sger,SGER)(int *m, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *a, int *lda);
+void F_FUNC(srot,SROT)(int *n, float *sx, int *incx, float *sy, int *incy, float *c, float *s);
+void F_FUNC(srotg,SROTG)(float *sa, float *sb, float *c, float *s);
+void F_FUNC(srotm,SROTM)(int *n, float *sx, int *incx, float *sy, int *incy, float *sparam);
+void F_FUNC(srotmg,SROTMG)(float *sd1, float *sd2, float *sx1, float *sy1, float *sparam);
+void F_FUNC(ssbmv,SSBMV)(char *uplo, int *n, int *k, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy);
+void F_FUNC(sscal,SSCAL)(int *n, float *sa, float *sx, int *incx);
+void F_FUNC(sspmv,SSPMV)(char *uplo, int *n, float *alpha, float *ap, float *x, int *incx, float *beta, float *y, int *incy);
+void F_FUNC(sspr,SSPR)(char *uplo, int *n, float *alpha, float *x, int *incx, float *ap);
+void F_FUNC(sspr2,SSPR2)(char *uplo, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *ap);
+void F_FUNC(sswap,SSWAP)(int *n, float *sx, int *incx, float *sy, int *incy);
+void F_FUNC(ssymm,SSYMM)(char *side, char *uplo, int *m, int *n, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc);
+void F_FUNC(ssymv,SSYMV)(char *uplo, int *n, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy);
+void F_FUNC(ssyr,SSYR)(char *uplo, int *n, float *alpha, float *x, int *incx, float *a, int *lda);
+void F_FUNC(ssyr2,SSYR2)(char *uplo, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *a, int *lda);
+void F_FUNC(ssyr2k,SSYR2K)(char *uplo, char *trans, int *n, int *k, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc);
+void F_FUNC(ssyrk,SSYRK)(char *uplo, char *trans, int *n, int *k, float *alpha, float *a, int *lda, float *beta, float *c, int *ldc);
+void F_FUNC(stbmv,STBMV)(char *uplo, char *trans, char *diag, int *n, int *k, float *a, int *lda, float *x, int *incx);
+void F_FUNC(stbsv,STBSV)(char *uplo, char *trans, char *diag, int *n, int *k, float *a, int *lda, float *x, int *incx);
+void F_FUNC(stpmv,STPMV)(char *uplo, char *trans, char *diag, int *n, float *ap, float *x, int *incx);
+void F_FUNC(stpsv,STPSV)(char *uplo, char *trans, char *diag, int *n, float *ap, float *x, int *incx);
+void F_FUNC(strmm,STRMM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, float *alpha, float *a, int *lda, float *b, int *ldb);
+void F_FUNC(strmv,STRMV)(char *uplo, char *trans, char *diag, int *n, float *a, int *lda, float *x, int *incx);
+void F_FUNC(strsm,STRSM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, float *alpha, float *a, int *lda, float *b, int *ldb);
+void F_FUNC(strsv,STRSV)(char *uplo, char *trans, char *diag, int *n, float *a, int *lda, float *x, int *incx);
+void F_FUNC(zaxpy,ZAXPY)(int *n, npy_complex128 *za, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy);
+void F_FUNC(zcopy,ZCOPY)(int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy);
+void F_FUNC(zdrot,ZDROT)(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, double *c, double *s);
+void F_FUNC(zdscal,ZDSCAL)(int *n, double *da, npy_complex128 *zx, int *incx);
+void F_FUNC(zgbmv,ZGBMV)(char *trans, int *m, int *n, int *kl, int *ku, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy);
+void F_FUNC(zgemm,ZGEMM)(char *transa, char *transb, int *m, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc);
+void F_FUNC(zgemv,ZGEMV)(char *trans, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy);
+void F_FUNC(zgerc,ZGERC)(int *m, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda);
+void F_FUNC(zgeru,ZGERU)(int *m, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda);
+void F_FUNC(zhbmv,ZHBMV)(char *uplo, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy);
+void F_FUNC(zhemm,ZHEMM)(char *side, char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc);
+void F_FUNC(zhemv,ZHEMV)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy);
+void F_FUNC(zher,ZHER)(char *uplo, int *n, double *alpha, npy_complex128 *x, int *incx, npy_complex128 *a, int *lda);
+void F_FUNC(zher2,ZHER2)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda);
+void F_FUNC(zher2k,ZHER2K)(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *beta, npy_complex128 *c, int *ldc);
+void F_FUNC(zherk,ZHERK)(char *uplo, char *trans, int *n, int *k, double *alpha, npy_complex128 *a, int *lda, double *beta, npy_complex128 *c, int *ldc);
+void F_FUNC(zhpmv,ZHPMV)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *ap, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy);
+void F_FUNC(zhpr,ZHPR)(char *uplo, int *n, double *alpha, npy_complex128 *x, int *incx, npy_complex128 *ap);
+void F_FUNC(zhpr2,ZHPR2)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *ap);
+void F_FUNC(zrotg,ZROTG)(npy_complex128 *ca, npy_complex128 *cb, double *c, npy_complex128 *s);
+void F_FUNC(zscal,ZSCAL)(int *n, npy_complex128 *za, npy_complex128 *zx, int *incx);
+void F_FUNC(zswap,ZSWAP)(int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy);
+void F_FUNC(zsymm,ZSYMM)(char *side, char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc);
+void F_FUNC(zsyr2k,ZSYR2K)(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc);
+void F_FUNC(zsyrk,ZSYRK)(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *beta, npy_complex128 *c, int *ldc);
+void F_FUNC(ztbmv,ZTBMV)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx);
+void F_FUNC(ztbsv,ZTBSV)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx);
+void F_FUNC(ztpmv,ZTPMV)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *ap, npy_complex128 *x, int *incx);
+void F_FUNC(ztpsv,ZTPSV)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *ap, npy_complex128 *x, int *incx);
+void F_FUNC(ztrmm,ZTRMM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb);
+void F_FUNC(ztrmv,ZTRMV)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx);
+void F_FUNC(ztrsm,ZTRSM)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb);
+void F_FUNC(ztrsv,ZTRSV)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pxd b/__packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pxd
new file mode 100644
index 00000000..6263fa24
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pxd
@@ -0,0 +1,40 @@
+cimport numpy as cnp
+
+ctypedef fused lapack_t:
+    float
+    double
+    (float complex)
+    (double complex)
+
+ctypedef fused lapack_cz_t:
+    (float complex)
+    (double complex)
+
+ctypedef fused lapack_sd_t:
+    float
+    double
+
+ctypedef fused np_numeric_t:
+    cnp.int8_t
+    cnp.int16_t
+    cnp.int32_t
+    cnp.int64_t
+    cnp.uint8_t
+    cnp.uint16_t
+    cnp.uint32_t
+    cnp.uint64_t
+    cnp.float32_t
+    cnp.float64_t
+    cnp.longdouble_t
+    cnp.complex64_t
+    cnp.complex128_t
+
+ctypedef fused np_complex_numeric_t:
+    cnp.complex64_t
+    cnp.complex128_t
+
+
+cdef void swap_c_and_f_layout(lapack_t *a, lapack_t *b, int r, int c, int n) nogil
+cdef (int, int) band_check_internal_c(np_numeric_t[:, ::1]A) nogil
+cdef bint is_sym_her_real_c_internal(np_numeric_t[:, ::1]A) nogil
+cdef bint is_sym_her_complex_c_internal(np_complex_numeric_t[:, ::1]A) nogil
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pyi b/__packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pyi
new file mode 100644
index 00000000..3ec63ac1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_cythonized_array_utils.pyi
@@ -0,0 +1,16 @@
+from numpy.typing import NDArray
+from typing import Any, Tuple
+
+def bandwidth(a: NDArray[Any]) -> Tuple[int, int]: ...
+
+def issymmetric(
+    a: NDArray[Any],
+    atol: None | float = ...,
+    rtol: None | float = ...,
+) -> bool: ...
+
+def ishermitian(
+    a: NDArray[Any],
+    atol: None | float = ...,
+    rtol: None | float = ...,
+) -> bool: ...
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp.py
new file mode 100644
index 00000000..aa9ff01f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp.py
@@ -0,0 +1,1603 @@
+# -*- coding: utf-8 -*-
+#
+# Author: Pearu Peterson, March 2002
+#
+# additions by Travis Oliphant, March 2002
+# additions by Eric Jones,      June 2002
+# additions by Johannes Loehnert, June 2006
+# additions by Bart Vandereycken, June 2006
+# additions by Andrew D Straw, May 2007
+# additions by Tiziano Zito, November 2008
+#
+# April 2010: Functions for LU, QR, SVD, Schur, and Cholesky decompositions
+# were moved to their own files. Still in this file are functions for
+# eigenstuff and for the Hessenberg form.
+
+__all__ = ['eig', 'eigvals', 'eigh', 'eigvalsh',
+           'eig_banded', 'eigvals_banded',
+           'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf']
+
+import warnings
+
+import numpy
+from numpy import (array, isfinite, inexact, nonzero, iscomplexobj, cast,
+                   flatnonzero, conj, asarray, argsort, empty,
+                   iscomplex, zeros, einsum, eye, inf)
+# Local imports
+from scipy._lib._util import _asarray_validated
+from ._misc import LinAlgError, _datacopied, norm
+from .lapack import get_lapack_funcs, _compute_lwork
+
+
+_I = cast['F'](1j)
+
+
+def _make_complex_eigvecs(w, vin, dtype):
+    """
+    Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output
+    """
+    # - see LAPACK man page DGGEV at ALPHAI
+    v = numpy.array(vin, dtype=dtype)
+    m = (w.imag > 0)
+    m[:-1] |= (w.imag[1:] < 0)  # workaround for LAPACK bug, cf. ticket #709
+    for i in flatnonzero(m):
+        v.imag[:, i] = vin[:, i+1]
+        conj(v[:, i], v[:, i+1])
+    return v
+
+
+def _make_eigvals(alpha, beta, homogeneous_eigvals):
+    if homogeneous_eigvals:
+        if beta is None:
+            return numpy.vstack((alpha, numpy.ones_like(alpha)))
+        else:
+            return numpy.vstack((alpha, beta))
+    else:
+        if beta is None:
+            return alpha
+        else:
+            w = numpy.empty_like(alpha)
+            alpha_zero = (alpha == 0)
+            beta_zero = (beta == 0)
+            beta_nonzero = ~beta_zero
+            w[beta_nonzero] = alpha[beta_nonzero]/beta[beta_nonzero]
+            # Use numpy.inf for complex values too since
+            # 1/numpy.inf = 0, i.e., it correctly behaves as projective
+            # infinity.
+            w[~alpha_zero & beta_zero] = numpy.inf
+            if numpy.all(alpha.imag == 0):
+                w[alpha_zero & beta_zero] = numpy.nan
+            else:
+                w[alpha_zero & beta_zero] = complex(numpy.nan, numpy.nan)
+            return w
+
+
+def _geneig(a1, b1, left, right, overwrite_a, overwrite_b,
+            homogeneous_eigvals):
+    ggev, = get_lapack_funcs(('ggev',), (a1, b1))
+    cvl, cvr = left, right
+    res = ggev(a1, b1, lwork=-1)
+    lwork = res[-2][0].real.astype(numpy.int_)
+    if ggev.typecode in 'cz':
+        alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork,
+                                               overwrite_a, overwrite_b)
+        w = _make_eigvals(alpha, beta, homogeneous_eigvals)
+    else:
+        alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr,
+                                                        lwork, overwrite_a,
+                                                        overwrite_b)
+        alpha = alphar + _I * alphai
+        w = _make_eigvals(alpha, beta, homogeneous_eigvals)
+    _check_info(info, 'generalized eig algorithm (ggev)')
+
+    only_real = numpy.all(w.imag == 0.0)
+    if not (ggev.typecode in 'cz' or only_real):
+        t = w.dtype.char
+        if left:
+            vl = _make_complex_eigvecs(w, vl, t)
+        if right:
+            vr = _make_complex_eigvecs(w, vr, t)
+
+    # the eigenvectors returned by the lapack function are NOT normalized
+    for i in range(vr.shape[0]):
+        if right:
+            vr[:, i] /= norm(vr[:, i])
+        if left:
+            vl[:, i] /= norm(vl[:, i])
+
+    if not (left or right):
+        return w
+    if left:
+        if right:
+            return w, vl, vr
+        return w, vl
+    return w, vr
+
+
+def eig(a, b=None, left=False, right=True, overwrite_a=False,
+        overwrite_b=False, check_finite=True, homogeneous_eigvals=False):
+    """
+    Solve an ordinary or generalized eigenvalue problem of a square matrix.
+
+    Find eigenvalues w and right or left eigenvectors of a general matrix::
+
+        a   vr[:,i] = w[i]        b   vr[:,i]
+        a.H vl[:,i] = w[i].conj() b.H vl[:,i]
+
+    where ``.H`` is the Hermitian conjugation.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        A complex or real matrix whose eigenvalues and eigenvectors
+        will be computed.
+    b : (M, M) array_like, optional
+        Right-hand side matrix in a generalized eigenvalue problem.
+        Default is None, identity matrix is assumed.
+    left : bool, optional
+        Whether to calculate and return left eigenvectors.  Default is False.
+    right : bool, optional
+        Whether to calculate and return right eigenvectors.  Default is True.
+    overwrite_a : bool, optional
+        Whether to overwrite `a`; may improve performance.  Default is False.
+    overwrite_b : bool, optional
+        Whether to overwrite `b`; may improve performance.  Default is False.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    homogeneous_eigvals : bool, optional
+        If True, return the eigenvalues in homogeneous coordinates.
+        In this case ``w`` is a (2, M) array so that::
+
+            w[1,i] a vr[:,i] = w[0,i] b vr[:,i]
+
+        Default is False.
+
+    Returns
+    -------
+    w : (M,) or (2, M) double or complex ndarray
+        The eigenvalues, each repeated according to its
+        multiplicity. The shape is (M,) unless
+        ``homogeneous_eigvals=True``.
+    vl : (M, M) double or complex ndarray
+        The normalized left eigenvector corresponding to the eigenvalue
+        ``w[i]`` is the column vl[:,i]. Only returned if ``left=True``.
+    vr : (M, M) double or complex ndarray
+        The normalized right eigenvector corresponding to the eigenvalue
+        ``w[i]`` is the column ``vr[:,i]``.  Only returned if ``right=True``.
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge.
+
+    See Also
+    --------
+    eigvals : eigenvalues of general arrays
+    eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays.
+    eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
+        band matrices
+    eigh_tridiagonal : eigenvalues and right eiegenvectors for
+        symmetric/Hermitian tridiagonal matrices
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[0., -1.], [1., 0.]])
+    >>> linalg.eigvals(a)
+    array([0.+1.j, 0.-1.j])
+
+    >>> b = np.array([[0., 1.], [1., 1.]])
+    >>> linalg.eigvals(a, b)
+    array([ 1.+0.j, -1.+0.j])
+
+    >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])
+    >>> linalg.eigvals(a, homogeneous_eigvals=True)
+    array([[3.+0.j, 8.+0.j, 7.+0.j],
+           [1.+0.j, 1.+0.j, 1.+0.j]])
+
+    >>> a = np.array([[0., -1.], [1., 0.]])
+    >>> linalg.eigvals(a) == linalg.eig(a)[0]
+    array([ True,  True])
+    >>> linalg.eig(a, left=True, right=False)[1] # normalized left eigenvector
+    array([[-0.70710678+0.j        , -0.70710678-0.j        ],
+           [-0.        +0.70710678j, -0.        -0.70710678j]])
+    >>> linalg.eig(a, left=False, right=True)[1] # normalized right eigenvector
+    array([[0.70710678+0.j        , 0.70710678-0.j        ],
+           [0.        -0.70710678j, 0.        +0.70710678j]])
+
+
+
+    """
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+        raise ValueError('expected square matrix')
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+    if b is not None:
+        b1 = _asarray_validated(b, check_finite=check_finite)
+        overwrite_b = overwrite_b or _datacopied(b1, b)
+        if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
+            raise ValueError('expected square matrix')
+        if b1.shape != a1.shape:
+            raise ValueError('a and b must have the same shape')
+        return _geneig(a1, b1, left, right, overwrite_a, overwrite_b,
+                       homogeneous_eigvals)
+
+    geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,))
+    compute_vl, compute_vr = left, right
+
+    lwork = _compute_lwork(geev_lwork, a1.shape[0],
+                           compute_vl=compute_vl,
+                           compute_vr=compute_vr)
+
+    if geev.typecode in 'cz':
+        w, vl, vr, info = geev(a1, lwork=lwork,
+                               compute_vl=compute_vl,
+                               compute_vr=compute_vr,
+                               overwrite_a=overwrite_a)
+        w = _make_eigvals(w, None, homogeneous_eigvals)
+    else:
+        wr, wi, vl, vr, info = geev(a1, lwork=lwork,
+                                    compute_vl=compute_vl,
+                                    compute_vr=compute_vr,
+                                    overwrite_a=overwrite_a)
+        t = {'f': 'F', 'd': 'D'}[wr.dtype.char]
+        w = wr + _I * wi
+        w = _make_eigvals(w, None, homogeneous_eigvals)
+
+    _check_info(info, 'eig algorithm (geev)',
+                positive='did not converge (only eigenvalues '
+                         'with order >= %d have converged)')
+
+    only_real = numpy.all(w.imag == 0.0)
+    if not (geev.typecode in 'cz' or only_real):
+        t = w.dtype.char
+        if left:
+            vl = _make_complex_eigvecs(w, vl, t)
+        if right:
+            vr = _make_complex_eigvecs(w, vr, t)
+    if not (left or right):
+        return w
+    if left:
+        if right:
+            return w, vl, vr
+        return w, vl
+    return w, vr
+
+
+def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False,
+         overwrite_b=False, turbo=False, eigvals=None, type=1,
+         check_finite=True, subset_by_index=None, subset_by_value=None,
+         driver=None):
+    """
+    Solve a standard or generalized eigenvalue problem for a complex
+    Hermitian or real symmetric matrix.
+
+    Find eigenvalues array ``w`` and optionally eigenvectors array ``v`` of
+    array ``a``, where ``b`` is positive definite such that for every
+    eigenvalue λ (i-th entry of w) and its eigenvector ``vi`` (i-th column of
+    ``v``) satisfies::
+
+                      a @ vi = λ * b @ vi
+        vi.conj().T @ a @ vi = λ
+        vi.conj().T @ b @ vi = 1
+
+    In the standard problem, ``b`` is assumed to be the identity matrix.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        A complex Hermitian or real symmetric matrix whose eigenvalues and
+        eigenvectors will be computed.
+    b : (M, M) array_like, optional
+        A complex Hermitian or real symmetric definite positive matrix in.
+        If omitted, identity matrix is assumed.
+    lower : bool, optional
+        Whether the pertinent array data is taken from the lower or upper
+        triangle of ``a`` and, if applicable, ``b``. (Default: lower)
+    eigvals_only : bool, optional
+        Whether to calculate only eigenvalues and no eigenvectors.
+        (Default: both are calculated)
+    subset_by_index : iterable, optional
+        If provided, this two-element iterable defines the start and the end
+        indices of the desired eigenvalues (ascending order and 0-indexed).
+        To return only the second smallest to fifth smallest eigenvalues,
+        ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only
+        available with "evr", "evx", and "gvx" drivers. The entries are
+        directly converted to integers via ``int()``.
+    subset_by_value : iterable, optional
+        If provided, this two-element iterable defines the half-open interval
+        ``(a, b]`` that, if any, only the eigenvalues between these values
+        are returned. Only available with "evr", "evx", and "gvx" drivers. Use
+        ``np.inf`` for the unconstrained ends.
+    driver : str, optional
+        Defines which LAPACK driver should be used. Valid options are "ev",
+        "evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for
+        generalized (where b is not None) problems. See the Notes section.
+        The default for standard problems is "evr". For generalized problems,
+        "gvd" is used for full set, and "gvx" for subset requested cases.
+    type : int, optional
+        For the generalized problems, this keyword specifies the problem type
+        to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible
+        inputs)::
+
+            1 =>     a @ v = w @ b @ v
+            2 => a @ b @ v = w @ v
+            3 => b @ a @ v = w @ v
+
+        This keyword is ignored for standard problems.
+    overwrite_a : bool, optional
+        Whether to overwrite data in ``a`` (may improve performance). Default
+        is False.
+    overwrite_b : bool, optional
+        Whether to overwrite data in ``b`` (may improve performance). Default
+        is False.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    turbo : bool, optional, deprecated
+            .. deprecated:: 1.5.0
+                `eigh` keyword argument `turbo` is deprecated in favour of
+                ``driver=gvd`` keyword instead and will be removed in SciPy
+                1.12.0.
+    eigvals : tuple (lo, hi), optional, deprecated
+            .. deprecated:: 1.5.0
+                `eigh` keyword argument `eigvals` is deprecated in favour of
+                `subset_by_index` keyword instead and will be removed in SciPy
+                1.12.0.
+
+    Returns
+    -------
+    w : (N,) ndarray
+        The N (1<=N<=M) selected eigenvalues, in ascending order, each
+        repeated according to its multiplicity.
+    v : (M, N) ndarray
+        (if ``eigvals_only == False``)
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge, an error occurred, or
+        b matrix is not definite positive. Note that if input matrices are
+        not symmetric or Hermitian, no error will be reported but results will
+        be wrong.
+
+    See Also
+    --------
+    eigvalsh : eigenvalues of symmetric or Hermitian arrays
+    eig : eigenvalues and right eigenvectors for non-symmetric arrays
+    eigh_tridiagonal : eigenvalues and right eiegenvectors for
+        symmetric/Hermitian tridiagonal matrices
+
+    Notes
+    -----
+    This function does not check the input array for being Hermitian/symmetric
+    in order to allow for representing arrays with only their upper/lower
+    triangular parts. Also, note that even though not taken into account,
+    finiteness check applies to the whole array and unaffected by "lower"
+    keyword.
+
+    This function uses LAPACK drivers for computations in all possible keyword
+    combinations, prefixed with ``sy`` if arrays are real and ``he`` if
+    complex, e.g., a float array with "evr" driver is solved via
+    "syevr", complex arrays with "gvx" driver problem is solved via "hegvx"
+    etc.
+
+    As a brief summary, the slowest and the most robust driver is the
+    classical ``ev`` which uses symmetric QR. ``evr`` is seen as
+    the optimal choice for the most general cases. However, there are certain
+    occasions that ``evd`` computes faster at the expense of more
+    memory usage. ``evx``, while still being faster than ``ev``,
+    often performs worse than the rest except when very few eigenvalues are
+    requested for large arrays though there is still no performance guarantee.
+
+
+    For the generalized problem, normalization with respect to the given
+    type argument::
+
+            type 1 and 3 :      v.conj().T @ a @ v = w
+            type 2       : inv(v).conj().T @ a @ inv(v) = w
+
+            type 1 or 2  :      v.conj().T @ b @ v  = I
+            type 3       : v.conj().T @ inv(b) @ v  = I
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import eigh
+    >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])
+    >>> w, v = eigh(A)
+    >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
+    True
+
+    Request only the eigenvalues
+
+    >>> w = eigh(A, eigvals_only=True)
+
+    Request eigenvalues that are less than 10.
+
+    >>> A = np.array([[34, -4, -10, -7, 2],
+    ...               [-4, 7, 2, 12, 0],
+    ...               [-10, 2, 44, 2, -19],
+    ...               [-7, 12, 2, 79, -34],
+    ...               [2, 0, -19, -34, 29]])
+    >>> eigh(A, eigvals_only=True, subset_by_value=[-np.inf, 10])
+    array([6.69199443e-07, 9.11938152e+00])
+
+    Request the second smallest eigenvalue and its eigenvector
+
+    >>> w, v = eigh(A, subset_by_index=[1, 1])
+    >>> w
+    array([9.11938152])
+    >>> v.shape  # only a single column is returned
+    (5, 1)
+
+    """
+    if turbo:
+        warnings.warn("Keyword argument 'turbo' is deprecated in favour of '"
+                      "driver=gvd' keyword instead and will be removed in "
+                      "SciPy 1.12.0.",
+                      DeprecationWarning, stacklevel=2)
+    if eigvals:
+        warnings.warn("Keyword argument 'eigvals' is deprecated in favour of "
+                      "'subset_by_index' keyword instead and will be removed "
+                      "in SciPy 1.12.0.",
+                      DeprecationWarning, stacklevel=2)
+
+    # set lower
+    uplo = 'L' if lower else 'U'
+    # Set job for Fortran routines
+    _job = 'N' if eigvals_only else 'V'
+
+    drv_str = [None, "ev", "evd", "evr", "evx", "gv", "gvd", "gvx"]
+    if driver not in drv_str:
+        raise ValueError('"{}" is unknown. Possible values are "None", "{}".'
+                         ''.format(driver, '", "'.join(drv_str[1:])))
+
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
+        raise ValueError('expected square "a" matrix')
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+    cplx = True if iscomplexobj(a1) else False
+    n = a1.shape[0]
+    drv_args = {'overwrite_a': overwrite_a}
+
+    if b is not None:
+        b1 = _asarray_validated(b, check_finite=check_finite)
+        overwrite_b = overwrite_b or _datacopied(b1, b)
+        if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
+            raise ValueError('expected square "b" matrix')
+
+        if b1.shape != a1.shape:
+            raise ValueError("wrong b dimensions {}, should "
+                             "be {}".format(b1.shape, a1.shape))
+
+        if type not in [1, 2, 3]:
+            raise ValueError('"type" keyword only accepts 1, 2, and 3.')
+
+        cplx = True if iscomplexobj(b1) else (cplx or False)
+        drv_args.update({'overwrite_b': overwrite_b, 'itype': type})
+
+    # backwards-compatibility handling
+    subset_by_index = subset_by_index if (eigvals is None) else eigvals
+
+    subset = (subset_by_index is not None) or (subset_by_value is not None)
+
+    # Both subsets can't be given
+    if subset_by_index and subset_by_value:
+        raise ValueError('Either index or value subset can be requested.')
+
+    # Take turbo into account if all conditions are met otherwise ignore
+    if turbo and b is not None:
+        driver = 'gvx' if subset else 'gvd'
+
+    # Check indices if given
+    if subset_by_index:
+        lo, hi = [int(x) for x in subset_by_index]
+        if not (0 <= lo <= hi < n):
+            raise ValueError('Requested eigenvalue indices are not valid. '
+                             'Valid range is [0, {}] and start <= end, but '
+                             'start={}, end={} is given'.format(n-1, lo, hi))
+        # fortran is 1-indexed
+        drv_args.update({'range': 'I', 'il': lo + 1, 'iu': hi + 1})
+
+    if subset_by_value:
+        lo, hi = subset_by_value
+        if not (-inf <= lo < hi <= inf):
+            raise ValueError('Requested eigenvalue bounds are not valid. '
+                             'Valid range is (-inf, inf) and low < high, but '
+                             'low={}, high={} is given'.format(lo, hi))
+
+        drv_args.update({'range': 'V', 'vl': lo, 'vu': hi})
+
+    # fix prefix for lapack routines
+    pfx = 'he' if cplx else 'sy'
+
+    # decide on the driver if not given
+    # first early exit on incompatible choice
+    if driver:
+        if b is None and (driver in ["gv", "gvd", "gvx"]):
+            raise ValueError('{} requires input b array to be supplied '
+                             'for generalized eigenvalue problems.'
+                             ''.format(driver))
+        if (b is not None) and (driver in ['ev', 'evd', 'evr', 'evx']):
+            raise ValueError('"{}" does not accept input b array '
+                             'for standard eigenvalue problems.'
+                             ''.format(driver))
+        if subset and (driver in ["ev", "evd", "gv", "gvd"]):
+            raise ValueError('"{}" cannot compute subsets of eigenvalues'
+                             ''.format(driver))
+
+    # Default driver is evr and gvd
+    else:
+        driver = "evr" if b is None else ("gvx" if subset else "gvd")
+
+    lwork_spec = {
+                  'syevd': ['lwork', 'liwork'],
+                  'syevr': ['lwork', 'liwork'],
+                  'heevd': ['lwork', 'liwork', 'lrwork'],
+                  'heevr': ['lwork', 'lrwork', 'liwork'],
+                  }
+
+    if b is None:  # Standard problem
+        drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),
+                                      [a1])
+        clw_args = {'n': n, 'lower': lower}
+        if driver == 'evd':
+            clw_args.update({'compute_v': 0 if _job == "N" else 1})
+
+        lw = _compute_lwork(drvlw, **clw_args)
+        # Multiple lwork vars
+        if isinstance(lw, tuple):
+            lwork_args = dict(zip(lwork_spec[pfx+driver], lw))
+        else:
+            lwork_args = {'lwork': lw}
+
+        drv_args.update({'lower': lower, 'compute_v': 0 if _job == "N" else 1})
+        w, v, *other_args, info = drv(a=a1, **drv_args, **lwork_args)
+
+    else:  # Generalized problem
+        # 'gvd' doesn't have lwork query
+        if driver == "gvd":
+            drv = get_lapack_funcs(pfx + "gvd", [a1, b1])
+            lwork_args = {}
+        else:
+            drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'),
+                                          [a1, b1])
+            # generalized drivers use uplo instead of lower
+            lw = _compute_lwork(drvlw, n, uplo=uplo)
+            lwork_args = {'lwork': lw}
+
+        drv_args.update({'uplo': uplo, 'jobz': _job})
+
+        w, v, *other_args, info = drv(a=a1, b=b1, **drv_args, **lwork_args)
+
+    # m is always the first extra argument
+    w = w[:other_args[0]] if subset else w
+    v = v[:, :other_args[0]] if (subset and not eigvals_only) else v
+
+    # Check if we had a  successful exit
+    if info == 0:
+        if eigvals_only:
+            return w
+        else:
+            return w, v
+    else:
+        if info < -1:
+            raise LinAlgError('Illegal value in argument {} of internal {}'
+                              ''.format(-info, drv.typecode + pfx + driver))
+        elif info > n:
+            raise LinAlgError('The leading minor of order {} of B is not '
+                              'positive definite. The factorization of B '
+                              'could not be completed and no eigenvalues '
+                              'or eigenvectors were computed.'.format(info-n))
+        else:
+            drv_err = {'ev': 'The algorithm failed to converge; {} '
+                             'off-diagonal elements of an intermediate '
+                             'tridiagonal form did not converge to zero.',
+                       'evx': '{} eigenvectors failed to converge.',
+                       'evd': 'The algorithm failed to compute an eigenvalue '
+                              'while working on the submatrix lying in rows '
+                              'and columns {0}/{1} through mod({0},{1}).',
+                       'evr': 'Internal Error.'
+                       }
+            if driver in ['ev', 'gv']:
+                msg = drv_err['ev'].format(info)
+            elif driver in ['evx', 'gvx']:
+                msg = drv_err['evx'].format(info)
+            elif driver in ['evd', 'gvd']:
+                if eigvals_only:
+                    msg = drv_err['ev'].format(info)
+                else:
+                    msg = drv_err['evd'].format(info, n+1)
+            else:
+                msg = drv_err['evr']
+
+            raise LinAlgError(msg)
+
+
+_conv_dict = {0: 0, 1: 1, 2: 2,
+              'all': 0, 'value': 1, 'index': 2,
+              'a': 0, 'v': 1, 'i': 2}
+
+
+def _check_select(select, select_range, max_ev, max_len):
+    """Check that select is valid, convert to Fortran style."""
+    if isinstance(select, str):
+        select = select.lower()
+    try:
+        select = _conv_dict[select]
+    except KeyError as e:
+        raise ValueError('invalid argument for select') from e
+    vl, vu = 0., 1.
+    il = iu = 1
+    if select != 0:  # (non-all)
+        sr = asarray(select_range)
+        if sr.ndim != 1 or sr.size != 2 or sr[1] < sr[0]:
+            raise ValueError('select_range must be a 2-element array-like '
+                             'in nondecreasing order')
+        if select == 1:  # (value)
+            vl, vu = sr
+            if max_ev == 0:
+                max_ev = max_len
+        else:  # 2 (index)
+            if sr.dtype.char.lower() not in 'hilqp':
+                raise ValueError('when using select="i", select_range must '
+                                 'contain integers, got dtype %s (%s)'
+                                 % (sr.dtype, sr.dtype.char))
+            # translate Python (0 ... N-1) into Fortran (1 ... N) with + 1
+            il, iu = sr + 1
+            if min(il, iu) < 1 or max(il, iu) > max_len:
+                raise ValueError('select_range out of bounds')
+            max_ev = iu - il + 1
+    return select, vl, vu, il, iu, max_ev
+
+
+def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False,
+               select='a', select_range=None, max_ev=0, check_finite=True):
+    """
+    Solve real symmetric or complex Hermitian band matrix eigenvalue problem.
+
+    Find eigenvalues w and optionally right eigenvectors v of a::
+
+        a v[:,i] = w[i] v[:,i]
+        v.H v    = identity
+
+    The matrix a is stored in a_band either in lower diagonal or upper
+    diagonal ordered form:
+
+        a_band[u + i - j, j] == a[i,j]        (if upper form; i <= j)
+        a_band[    i - j, j] == a[i,j]        (if lower form; i >= j)
+
+    where u is the number of bands above the diagonal.
+
+    Example of a_band (shape of a is (6,6), u=2)::
+
+        upper form:
+        *   *   a02 a13 a24 a35
+        *   a01 a12 a23 a34 a45
+        a00 a11 a22 a33 a44 a55
+
+        lower form:
+        a00 a11 a22 a33 a44 a55
+        a10 a21 a32 a43 a54 *
+        a20 a31 a42 a53 *   *
+
+    Cells marked with * are not used.
+
+    Parameters
+    ----------
+    a_band : (u+1, M) array_like
+        The bands of the M by M matrix a.
+    lower : bool, optional
+        Is the matrix in the lower form. (Default is upper form)
+    eigvals_only : bool, optional
+        Compute only the eigenvalues and no eigenvectors.
+        (Default: calculate also eigenvectors)
+    overwrite_a_band : bool, optional
+        Discard data in a_band (may enhance performance)
+    select : {'a', 'v', 'i'}, optional
+        Which eigenvalues to calculate
+
+        ======  ========================================
+        select  calculated
+        ======  ========================================
+        'a'     All eigenvalues
+        'v'     Eigenvalues in the interval (min, max]
+        'i'     Eigenvalues with indices min <= i <= max
+        ======  ========================================
+    select_range : (min, max), optional
+        Range of selected eigenvalues
+    max_ev : int, optional
+        For select=='v', maximum number of eigenvalues expected.
+        For other values of select, has no meaning.
+
+        In doubt, leave this parameter untouched.
+
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    w : (M,) ndarray
+        The eigenvalues, in ascending order, each repeated according to its
+        multiplicity.
+    v : (M, M) float or complex ndarray
+        The normalized eigenvector corresponding to the eigenvalue w[i] is
+        the column v[:,i].
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge.
+
+    See Also
+    --------
+    eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
+    eig : eigenvalues and right eigenvectors of general arrays.
+    eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+    eigh_tridiagonal : eigenvalues and right eigenvectors for
+        symmetric/Hermitian tridiagonal matrices
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import eig_banded
+    >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])
+    >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])
+    >>> w, v = eig_banded(Ab, lower=True)
+    >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
+    True
+    >>> w = eig_banded(Ab, lower=True, eigvals_only=True)
+    >>> w
+    array([-4.26200532, -2.22987175,  3.95222349, 12.53965359])
+
+    Request only the eigenvalues between ``[-3, 4]``
+
+    >>> w, v = eig_banded(Ab, lower=True, select='v', select_range=[-3, 4])
+    >>> w
+    array([-2.22987175,  3.95222349])
+
+    """
+    if eigvals_only or overwrite_a_band:
+        a1 = _asarray_validated(a_band, check_finite=check_finite)
+        overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band))
+    else:
+        a1 = array(a_band)
+        if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all():
+            raise ValueError("array must not contain infs or NaNs")
+        overwrite_a_band = 1
+
+    if len(a1.shape) != 2:
+        raise ValueError('expected a 2-D array')
+    select, vl, vu, il, iu, max_ev = _check_select(
+        select, select_range, max_ev, a1.shape[1])
+    del select_range
+    if select == 0:
+        if a1.dtype.char in 'GFD':
+            # FIXME: implement this somewhen, for now go with builtin values
+            # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1)
+            #        or by using calc_lwork.f ???
+            # lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower)
+            internal_name = 'hbevd'
+        else:  # a1.dtype.char in 'fd':
+            # FIXME: implement this somewhen, for now go with builtin values
+            #         see above
+            # lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower)
+            internal_name = 'sbevd'
+        bevd, = get_lapack_funcs((internal_name,), (a1,))
+        w, v, info = bevd(a1, compute_v=not eigvals_only,
+                          lower=lower, overwrite_ab=overwrite_a_band)
+    else:  # select in [1, 2]
+        if eigvals_only:
+            max_ev = 1
+        # calculate optimal abstol for dsbevx (see manpage)
+        if a1.dtype.char in 'fF':  # single precision
+            lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),))
+        else:
+            lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),))
+        abstol = 2 * lamch('s')
+        if a1.dtype.char in 'GFD':
+            internal_name = 'hbevx'
+        else:  # a1.dtype.char in 'gfd'
+            internal_name = 'sbevx'
+        bevx, = get_lapack_funcs((internal_name,), (a1,))
+        w, v, m, ifail, info = bevx(
+            a1, vl, vu, il, iu, compute_v=not eigvals_only, mmax=max_ev,
+            range=select, lower=lower, overwrite_ab=overwrite_a_band,
+            abstol=abstol)
+        # crop off w and v
+        w = w[:m]
+        if not eigvals_only:
+            v = v[:, :m]
+    _check_info(info, internal_name)
+
+    if eigvals_only:
+        return w
+    return w, v
+
+
+def eigvals(a, b=None, overwrite_a=False, check_finite=True,
+            homogeneous_eigvals=False):
+    """
+    Compute eigenvalues from an ordinary or generalized eigenvalue problem.
+
+    Find eigenvalues of a general matrix::
+
+        a   vr[:,i] = w[i]        b   vr[:,i]
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        A complex or real matrix whose eigenvalues and eigenvectors
+        will be computed.
+    b : (M, M) array_like, optional
+        Right-hand side matrix in a generalized eigenvalue problem.
+        If omitted, identity matrix is assumed.
+    overwrite_a : bool, optional
+        Whether to overwrite data in a (may improve performance)
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities
+        or NaNs.
+    homogeneous_eigvals : bool, optional
+        If True, return the eigenvalues in homogeneous coordinates.
+        In this case ``w`` is a (2, M) array so that::
+
+            w[1,i] a vr[:,i] = w[0,i] b vr[:,i]
+
+        Default is False.
+
+    Returns
+    -------
+    w : (M,) or (2, M) double or complex ndarray
+        The eigenvalues, each repeated according to its multiplicity
+        but not in any specific order. The shape is (M,) unless
+        ``homogeneous_eigvals=True``.
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge
+
+    See Also
+    --------
+    eig : eigenvalues and right eigenvectors of general arrays.
+    eigvalsh : eigenvalues of symmetric or Hermitian arrays
+    eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
+    eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+        matrices
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[0., -1.], [1., 0.]])
+    >>> linalg.eigvals(a)
+    array([0.+1.j, 0.-1.j])
+
+    >>> b = np.array([[0., 1.], [1., 1.]])
+    >>> linalg.eigvals(a, b)
+    array([ 1.+0.j, -1.+0.j])
+
+    >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]])
+    >>> linalg.eigvals(a, homogeneous_eigvals=True)
+    array([[3.+0.j, 8.+0.j, 7.+0.j],
+           [1.+0.j, 1.+0.j, 1.+0.j]])
+
+    """
+    return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a,
+               check_finite=check_finite,
+               homogeneous_eigvals=homogeneous_eigvals)
+
+
+def eigvalsh(a, b=None, lower=True, overwrite_a=False,
+             overwrite_b=False, turbo=False, eigvals=None, type=1,
+             check_finite=True, subset_by_index=None, subset_by_value=None,
+             driver=None):
+    """
+    Solves a standard or generalized eigenvalue problem for a complex
+    Hermitian or real symmetric matrix.
+
+    Find eigenvalues array ``w`` of array ``a``, where ``b`` is positive
+    definite such that for every eigenvalue λ (i-th entry of w) and its
+    eigenvector vi (i-th column of v) satisfies::
+
+                      a @ vi = λ * b @ vi
+        vi.conj().T @ a @ vi = λ
+        vi.conj().T @ b @ vi = 1
+
+    In the standard problem, b is assumed to be the identity matrix.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        A complex Hermitian or real symmetric matrix whose eigenvalues will
+        be computed.
+    b : (M, M) array_like, optional
+        A complex Hermitian or real symmetric definite positive matrix in.
+        If omitted, identity matrix is assumed.
+    lower : bool, optional
+        Whether the pertinent array data is taken from the lower or upper
+        triangle of ``a`` and, if applicable, ``b``. (Default: lower)
+    overwrite_a : bool, optional
+        Whether to overwrite data in ``a`` (may improve performance). Default
+        is False.
+    overwrite_b : bool, optional
+        Whether to overwrite data in ``b`` (may improve performance). Default
+        is False.
+    type : int, optional
+        For the generalized problems, this keyword specifies the problem type
+        to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible
+        inputs)::
+
+            1 =>     a @ v = w @ b @ v
+            2 => a @ b @ v = w @ v
+            3 => b @ a @ v = w @ v
+
+        This keyword is ignored for standard problems.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    subset_by_index : iterable, optional
+        If provided, this two-element iterable defines the start and the end
+        indices of the desired eigenvalues (ascending order and 0-indexed).
+        To return only the second smallest to fifth smallest eigenvalues,
+        ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only
+        available with "evr", "evx", and "gvx" drivers. The entries are
+        directly converted to integers via ``int()``.
+    subset_by_value : iterable, optional
+        If provided, this two-element iterable defines the half-open interval
+        ``(a, b]`` that, if any, only the eigenvalues between these values
+        are returned. Only available with "evr", "evx", and "gvx" drivers. Use
+        ``np.inf`` for the unconstrained ends.
+    driver : str, optional
+        Defines which LAPACK driver should be used. Valid options are "ev",
+        "evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for
+        generalized (where b is not None) problems. See the Notes section of
+        `scipy.linalg.eigh`.
+    turbo : bool, optional, deprecated
+        .. deprecated:: 1.5.0
+            'eigvalsh' keyword argument `turbo` is deprecated in favor of
+            ``driver=gvd`` option and will be removed in SciPy 1.12.0.
+
+    eigvals : tuple (lo, hi), optional
+        .. deprecated:: 1.5.0
+            'eigvalsh' keyword argument `eigvals` is deprecated in favor of
+            `subset_by_index` option and will be removed in SciPy 1.12.0.
+
+    Returns
+    -------
+    w : (N,) ndarray
+        The ``N`` (``1<=N<=M``) selected eigenvalues, in ascending order, each
+        repeated according to its multiplicity.
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge, an error occurred, or
+        b matrix is not definite positive. Note that if input matrices are
+        not symmetric or Hermitian, no error will be reported but results will
+        be wrong.
+
+    See Also
+    --------
+    eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+    eigvals : eigenvalues of general arrays
+    eigvals_banded : eigenvalues for symmetric/Hermitian band matrices
+    eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+        matrices
+
+    Notes
+    -----
+    This function does not check the input array for being Hermitian/symmetric
+    in order to allow for representing arrays with only their upper/lower
+    triangular parts.
+
+    This function serves as a one-liner shorthand for `scipy.linalg.eigh` with
+    the option ``eigvals_only=True`` to get the eigenvalues and not the
+    eigenvectors. Here it is kept as a legacy convenience. It might be
+    beneficial to use the main function to have full control and to be a bit
+    more pythonic.
+
+    Examples
+    --------
+    For more examples see `scipy.linalg.eigh`.
+
+    >>> import numpy as np
+    >>> from scipy.linalg import eigvalsh
+    >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]])
+    >>> w = eigvalsh(A)
+    >>> w
+    array([-3.74637491, -0.76263923,  6.08502336, 12.42399079])
+
+    """
+    return eigh(a, b=b, lower=lower, eigvals_only=True,
+                overwrite_a=overwrite_a, overwrite_b=overwrite_b,
+                turbo=turbo, eigvals=eigvals, type=type,
+                check_finite=check_finite, subset_by_index=subset_by_index,
+                subset_by_value=subset_by_value, driver=driver)
+
+
+def eigvals_banded(a_band, lower=False, overwrite_a_band=False,
+                   select='a', select_range=None, check_finite=True):
+    """
+    Solve real symmetric or complex Hermitian band matrix eigenvalue problem.
+
+    Find eigenvalues w of a::
+
+        a v[:,i] = w[i] v[:,i]
+        v.H v    = identity
+
+    The matrix a is stored in a_band either in lower diagonal or upper
+    diagonal ordered form:
+
+        a_band[u + i - j, j] == a[i,j]        (if upper form; i <= j)
+        a_band[    i - j, j] == a[i,j]        (if lower form; i >= j)
+
+    where u is the number of bands above the diagonal.
+
+    Example of a_band (shape of a is (6,6), u=2)::
+
+        upper form:
+        *   *   a02 a13 a24 a35
+        *   a01 a12 a23 a34 a45
+        a00 a11 a22 a33 a44 a55
+
+        lower form:
+        a00 a11 a22 a33 a44 a55
+        a10 a21 a32 a43 a54 *
+        a20 a31 a42 a53 *   *
+
+    Cells marked with * are not used.
+
+    Parameters
+    ----------
+    a_band : (u+1, M) array_like
+        The bands of the M by M matrix a.
+    lower : bool, optional
+        Is the matrix in the lower form. (Default is upper form)
+    overwrite_a_band : bool, optional
+        Discard data in a_band (may enhance performance)
+    select : {'a', 'v', 'i'}, optional
+        Which eigenvalues to calculate
+
+        ======  ========================================
+        select  calculated
+        ======  ========================================
+        'a'     All eigenvalues
+        'v'     Eigenvalues in the interval (min, max]
+        'i'     Eigenvalues with indices min <= i <= max
+        ======  ========================================
+    select_range : (min, max), optional
+        Range of selected eigenvalues
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    w : (M,) ndarray
+        The eigenvalues, in ascending order, each repeated according to its
+        multiplicity.
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge.
+
+    See Also
+    --------
+    eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
+        band matrices
+    eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+        matrices
+    eigvals : eigenvalues of general arrays
+    eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+    eig : eigenvalues and right eigenvectors for non-symmetric arrays
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import eigvals_banded
+    >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]])
+    >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]])
+    >>> w = eigvals_banded(Ab, lower=True)
+    >>> w
+    array([-4.26200532, -2.22987175,  3.95222349, 12.53965359])
+    """
+    return eig_banded(a_band, lower=lower, eigvals_only=1,
+                      overwrite_a_band=overwrite_a_band, select=select,
+                      select_range=select_range, check_finite=check_finite)
+
+
+def eigvalsh_tridiagonal(d, e, select='a', select_range=None,
+                         check_finite=True, tol=0., lapack_driver='auto'):
+    """
+    Solve eigenvalue problem for a real symmetric tridiagonal matrix.
+
+    Find eigenvalues `w` of ``a``::
+
+        a v[:,i] = w[i] v[:,i]
+        v.H v    = identity
+
+    For a real symmetric matrix ``a`` with diagonal elements `d` and
+    off-diagonal elements `e`.
+
+    Parameters
+    ----------
+    d : ndarray, shape (ndim,)
+        The diagonal elements of the array.
+    e : ndarray, shape (ndim-1,)
+        The off-diagonal elements of the array.
+    select : {'a', 'v', 'i'}, optional
+        Which eigenvalues to calculate
+
+        ======  ========================================
+        select  calculated
+        ======  ========================================
+        'a'     All eigenvalues
+        'v'     Eigenvalues in the interval (min, max]
+        'i'     Eigenvalues with indices min <= i <= max
+        ======  ========================================
+    select_range : (min, max), optional
+        Range of selected eigenvalues
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    tol : float
+        The absolute tolerance to which each eigenvalue is required
+        (only used when ``lapack_driver='stebz'``).
+        An eigenvalue (or cluster) is considered to have converged if it
+        lies in an interval of this width. If <= 0. (default),
+        the value ``eps*|a|`` is used where eps is the machine precision,
+        and ``|a|`` is the 1-norm of the matrix ``a``.
+    lapack_driver : str
+        LAPACK function to use, can be 'auto', 'stemr', 'stebz',  'sterf',
+        or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
+        and 'stebz' otherwise. 'sterf' and 'stev' can only be used when
+        ``select='a'``.
+
+    Returns
+    -------
+    w : (M,) ndarray
+        The eigenvalues, in ascending order, each repeated according to its
+        multiplicity.
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge.
+
+    See Also
+    --------
+    eigh_tridiagonal : eigenvalues and right eiegenvectors for
+        symmetric/Hermitian tridiagonal matrices
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh
+    >>> d = 3*np.ones(4)
+    >>> e = -1*np.ones(3)
+    >>> w = eigvalsh_tridiagonal(d, e)
+    >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
+    >>> w2 = eigvalsh(A)  # Verify with other eigenvalue routines
+    >>> np.allclose(w - w2, np.zeros(4))
+    True
+    """
+    return eigh_tridiagonal(
+        d, e, eigvals_only=True, select=select, select_range=select_range,
+        check_finite=check_finite, tol=tol, lapack_driver=lapack_driver)
+
+
+def eigh_tridiagonal(d, e, eigvals_only=False, select='a', select_range=None,
+                     check_finite=True, tol=0., lapack_driver='auto'):
+    """
+    Solve eigenvalue problem for a real symmetric tridiagonal matrix.
+
+    Find eigenvalues `w` and optionally right eigenvectors `v` of ``a``::
+
+        a v[:,i] = w[i] v[:,i]
+        v.H v    = identity
+
+    For a real symmetric matrix ``a`` with diagonal elements `d` and
+    off-diagonal elements `e`.
+
+    Parameters
+    ----------
+    d : ndarray, shape (ndim,)
+        The diagonal elements of the array.
+    e : ndarray, shape (ndim-1,)
+        The off-diagonal elements of the array.
+    select : {'a', 'v', 'i'}, optional
+        Which eigenvalues to calculate
+
+        ======  ========================================
+        select  calculated
+        ======  ========================================
+        'a'     All eigenvalues
+        'v'     Eigenvalues in the interval (min, max]
+        'i'     Eigenvalues with indices min <= i <= max
+        ======  ========================================
+    select_range : (min, max), optional
+        Range of selected eigenvalues
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    tol : float
+        The absolute tolerance to which each eigenvalue is required
+        (only used when 'stebz' is the `lapack_driver`).
+        An eigenvalue (or cluster) is considered to have converged if it
+        lies in an interval of this width. If <= 0. (default),
+        the value ``eps*|a|`` is used where eps is the machine precision,
+        and ``|a|`` is the 1-norm of the matrix ``a``.
+    lapack_driver : str
+        LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf',
+        or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'``
+        and 'stebz' otherwise. When 'stebz' is used to find the eigenvalues and
+        ``eigvals_only=False``, then a second LAPACK call (to ``?STEIN``) is
+        used to find the corresponding eigenvectors. 'sterf' can only be
+        used when ``eigvals_only=True`` and ``select='a'``. 'stev' can only
+        be used when ``select='a'``.
+
+    Returns
+    -------
+    w : (M,) ndarray
+        The eigenvalues, in ascending order, each repeated according to its
+        multiplicity.
+    v : (M, M) ndarray
+        The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is
+        the column ``v[:,i]``.
+
+    Raises
+    ------
+    LinAlgError
+        If eigenvalue computation does not converge.
+
+    See Also
+    --------
+    eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal
+        matrices
+    eig : eigenvalues and right eigenvectors for non-symmetric arrays
+    eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
+    eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
+        band matrices
+
+    Notes
+    -----
+    This function makes use of LAPACK ``S/DSTEMR`` routines.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import eigh_tridiagonal
+    >>> d = 3*np.ones(4)
+    >>> e = -1*np.ones(3)
+    >>> w, v = eigh_tridiagonal(d, e)
+    >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1)
+    >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4)))
+    True
+    """
+    d = _asarray_validated(d, check_finite=check_finite)
+    e = _asarray_validated(e, check_finite=check_finite)
+    for check in (d, e):
+        if check.ndim != 1:
+            raise ValueError('expected a 1-D array')
+        if check.dtype.char in 'GFD':  # complex
+            raise TypeError('Only real arrays currently supported')
+    if d.size != e.size + 1:
+        raise ValueError('d (%s) must have one more element than e (%s)'
+                         % (d.size, e.size))
+    select, vl, vu, il, iu, _ = _check_select(
+        select, select_range, 0, d.size)
+    if not isinstance(lapack_driver, str):
+        raise TypeError('lapack_driver must be str')
+    drivers = ('auto', 'stemr', 'sterf', 'stebz', 'stev')
+    if lapack_driver not in drivers:
+        raise ValueError('lapack_driver must be one of %s, got %s'
+                         % (drivers, lapack_driver))
+    if lapack_driver == 'auto':
+        lapack_driver = 'stemr' if select == 0 else 'stebz'
+    func, = get_lapack_funcs((lapack_driver,), (d, e))
+    compute_v = not eigvals_only
+    if lapack_driver == 'sterf':
+        if select != 0:
+            raise ValueError('sterf can only be used when select == "a"')
+        if not eigvals_only:
+            raise ValueError('sterf can only be used when eigvals_only is '
+                             'True')
+        w, info = func(d, e)
+        m = len(w)
+    elif lapack_driver == 'stev':
+        if select != 0:
+            raise ValueError('stev can only be used when select == "a"')
+        w, v, info = func(d, e, compute_v=compute_v)
+        m = len(w)
+    elif lapack_driver == 'stebz':
+        tol = float(tol)
+        internal_name = 'stebz'
+        stebz, = get_lapack_funcs((internal_name,), (d, e))
+        # If getting eigenvectors, needs to be block-ordered (B) instead of
+        # matrix-ordered (E), and we will reorder later
+        order = 'E' if eigvals_only else 'B'
+        m, w, iblock, isplit, info = stebz(d, e, select, vl, vu, il, iu, tol,
+                                           order)
+    else:   # 'stemr'
+        # ?STEMR annoyingly requires size N instead of N-1
+        e_ = empty(e.size+1, e.dtype)
+        e_[:-1] = e
+        stemr_lwork, = get_lapack_funcs(('stemr_lwork',), (d, e))
+        lwork, liwork, info = stemr_lwork(d, e_, select, vl, vu, il, iu,
+                                          compute_v=compute_v)
+        _check_info(info, 'stemr_lwork')
+        m, w, v, info = func(d, e_, select, vl, vu, il, iu,
+                             compute_v=compute_v, lwork=lwork, liwork=liwork)
+    _check_info(info, lapack_driver + ' (eigh_tridiagonal)')
+    w = w[:m]
+    if eigvals_only:
+        return w
+    else:
+        # Do we still need to compute the eigenvalues?
+        if lapack_driver == 'stebz':
+            func, = get_lapack_funcs(('stein',), (d, e))
+            v, info = func(d, e, w, iblock, isplit)
+            _check_info(info, 'stein (eigh_tridiagonal)',
+                        positive='%d eigenvectors failed to converge')
+            # Convert block-order to matrix-order
+            order = argsort(w)
+            w, v = w[order], v[:, order]
+        else:
+            v = v[:, :m]
+        return w, v
+
+
+def _check_info(info, driver, positive='did not converge (LAPACK info=%d)'):
+    """Check info return value."""
+    if info < 0:
+        raise ValueError('illegal value in argument %d of internal %s'
+                         % (-info, driver))
+    if info > 0 and positive:
+        raise LinAlgError(("%s " + positive) % (driver, info,))
+
+
+def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True):
+    """
+    Compute Hessenberg form of a matrix.
+
+    The Hessenberg decomposition is::
+
+        A = Q H Q^H
+
+    where `Q` is unitary/orthogonal and `H` has only zero elements below
+    the first sub-diagonal.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        Matrix to bring into Hessenberg form.
+    calc_q : bool, optional
+        Whether to compute the transformation matrix.  Default is False.
+    overwrite_a : bool, optional
+        Whether to overwrite `a`; may improve performance.
+        Default is False.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    H : (M, M) ndarray
+        Hessenberg form of `a`.
+    Q : (M, M) ndarray
+        Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``.
+        Only returned if ``calc_q=True``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import hessenberg
+    >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+    >>> H, Q = hessenberg(A, calc_q=True)
+    >>> H
+    array([[  2.        , -11.65843866,   1.42005301,   0.25349066],
+           [ -9.94987437,  14.53535354,  -5.31022304,   2.43081618],
+           [  0.        ,  -1.83299243,   0.38969961,  -0.51527034],
+           [  0.        ,   0.        ,  -3.83189513,   1.07494686]])
+    >>> np.allclose(Q @ H @ Q.conj().T - A, np.zeros((4, 4)))
+    True
+    """
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
+        raise ValueError('expected square matrix')
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+    # if 2x2 or smaller: already in Hessenberg
+    if a1.shape[0] <= 2:
+        if calc_q:
+            return a1, eye(a1.shape[0])
+        return a1
+
+    gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal',
+                                                  'gehrd_lwork'), (a1,))
+    ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a)
+    _check_info(info, 'gebal (hessenberg)', positive=False)
+    n = len(a1)
+
+    lwork = _compute_lwork(gehrd_lwork, ba.shape[0], lo=lo, hi=hi)
+
+    hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
+    _check_info(info, 'gehrd (hessenberg)', positive=False)
+    h = numpy.triu(hq, -1)
+    if not calc_q:
+        return h
+
+    # use orghr/unghr to compute q
+    orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,))
+    lwork = _compute_lwork(orghr_lwork, n, lo=lo, hi=hi)
+
+    q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
+    _check_info(info, 'orghr (hessenberg)', positive=False)
+    return h, q
+
+
+def cdf2rdf(w, v):
+    """
+    Converts complex eigenvalues ``w`` and eigenvectors ``v`` to real
+    eigenvalues in a block diagonal form ``wr`` and the associated real
+    eigenvectors ``vr``, such that::
+
+        vr @ wr = X @ vr
+
+    continues to hold, where ``X`` is the original array for which ``w`` and
+    ``v`` are the eigenvalues and eigenvectors.
+
+    .. versionadded:: 1.1.0
+
+    Parameters
+    ----------
+    w : (..., M) array_like
+        Complex or real eigenvalues, an array or stack of arrays
+
+        Conjugate pairs must not be interleaved, else the wrong result
+        will be produced. So ``[1+1j, 1, 1-1j]`` will give a correct result,
+        but ``[1+1j, 2+1j, 1-1j, 2-1j]`` will not.
+
+    v : (..., M, M) array_like
+        Complex or real eigenvectors, a square array or stack of square arrays.
+
+    Returns
+    -------
+    wr : (..., M, M) ndarray
+        Real diagonal block form of eigenvalues
+    vr : (..., M, M) ndarray
+        Real eigenvectors associated with ``wr``
+
+    See Also
+    --------
+    eig : Eigenvalues and right eigenvectors for non-symmetric arrays
+    rsf2csf : Convert real Schur form to complex Schur form
+
+    Notes
+    -----
+    ``w``, ``v`` must be the eigenstructure for some *real* matrix ``X``.
+    For example, obtained by ``w, v = scipy.linalg.eig(X)`` or
+    ``w, v = numpy.linalg.eig(X)`` in which case ``X`` can also represent
+    stacked arrays.
+
+    .. versionadded:: 1.1.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
+    >>> X
+    array([[ 1,  2,  3],
+           [ 0,  4,  5],
+           [ 0, -5,  4]])
+
+    >>> from scipy import linalg
+    >>> w, v = linalg.eig(X)
+    >>> w
+    array([ 1.+0.j,  4.+5.j,  4.-5.j])
+    >>> v
+    array([[ 1.00000+0.j     , -0.01906-0.40016j, -0.01906+0.40016j],
+           [ 0.00000+0.j     ,  0.00000-0.64788j,  0.00000+0.64788j],
+           [ 0.00000+0.j     ,  0.64788+0.j     ,  0.64788-0.j     ]])
+
+    >>> wr, vr = linalg.cdf2rdf(w, v)
+    >>> wr
+    array([[ 1.,  0.,  0.],
+           [ 0.,  4.,  5.],
+           [ 0., -5.,  4.]])
+    >>> vr
+    array([[ 1.     ,  0.40016, -0.01906],
+           [ 0.     ,  0.64788,  0.     ],
+           [ 0.     ,  0.     ,  0.64788]])
+
+    >>> vr @ wr
+    array([[ 1.     ,  1.69593,  1.9246 ],
+           [ 0.     ,  2.59153,  3.23942],
+           [ 0.     , -3.23942,  2.59153]])
+    >>> X @ vr
+    array([[ 1.     ,  1.69593,  1.9246 ],
+           [ 0.     ,  2.59153,  3.23942],
+           [ 0.     , -3.23942,  2.59153]])
+    """
+    w, v = _asarray_validated(w), _asarray_validated(v)
+
+    # check dimensions
+    if w.ndim < 1:
+        raise ValueError('expected w to be at least 1D')
+    if v.ndim < 2:
+        raise ValueError('expected v to be at least 2D')
+    if v.ndim != w.ndim + 1:
+        raise ValueError('expected eigenvectors array to have exactly one '
+                         'dimension more than eigenvalues array')
+
+    # check shapes
+    n = w.shape[-1]
+    M = w.shape[:-1]
+    if v.shape[-2] != v.shape[-1]:
+        raise ValueError('expected v to be a square matrix or stacked square '
+                         'matrices: v.shape[-2] = v.shape[-1]')
+    if v.shape[-1] != n:
+        raise ValueError('expected the same number of eigenvalues as '
+                         'eigenvectors')
+
+    # get indices for each first pair of complex eigenvalues
+    complex_mask = iscomplex(w)
+    n_complex = complex_mask.sum(axis=-1)
+
+    # check if all complex eigenvalues have conjugate pairs
+    if not (n_complex % 2 == 0).all():
+        raise ValueError('expected complex-conjugate pairs of eigenvalues')
+
+    # find complex indices
+    idx = nonzero(complex_mask)
+    idx_stack = idx[:-1]
+    idx_elem = idx[-1]
+
+    # filter them to conjugate indices, assuming pairs are not interleaved
+    j = idx_elem[0::2]
+    k = idx_elem[1::2]
+    stack_ind = ()
+    for i in idx_stack:
+        # should never happen, assuming nonzero orders by the last axis
+        assert (i[0::2] == i[1::2]).all(),\
+                "Conjugate pair spanned different arrays!"
+        stack_ind += (i[0::2],)
+
+    # all eigenvalues to diagonal form
+    wr = zeros(M + (n, n), dtype=w.real.dtype)
+    di = range(n)
+    wr[..., di, di] = w.real
+
+    # complex eigenvalues to real block diagonal form
+    wr[stack_ind + (j, k)] = w[stack_ind + (j,)].imag
+    wr[stack_ind + (k, j)] = w[stack_ind + (k,)].imag
+
+    # compute real eigenvectors associated with real block diagonal eigenvalues
+    u = zeros(M + (n, n), dtype=numpy.cdouble)
+    u[..., di, di] = 1.0
+    u[stack_ind + (j, j)] = 0.5j
+    u[stack_ind + (j, k)] = 0.5
+    u[stack_ind + (k, j)] = -0.5j
+    u[stack_ind + (k, k)] = 0.5
+
+    # multipy matrices v and u (equivalent to v @ u)
+    vr = einsum('...ij,...jk->...ik', v, u).real
+
+    return wr, vr
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cholesky.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cholesky.py
new file mode 100644
index 00000000..9593bb24
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cholesky.py
@@ -0,0 +1,358 @@
+"""Cholesky decomposition functions."""
+
+from numpy import asarray_chkfinite, asarray, atleast_2d
+
+# Local imports
+from ._misc import LinAlgError, _datacopied
+from .lapack import get_lapack_funcs
+
+__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',
+           'cho_solve_banded']
+
+
+def _cholesky(a, lower=False, overwrite_a=False, clean=True,
+              check_finite=True):
+    """Common code for cholesky() and cho_factor()."""
+
+    a1 = asarray_chkfinite(a) if check_finite else asarray(a)
+    a1 = atleast_2d(a1)
+
+    # Dimension check
+    if a1.ndim != 2:
+        raise ValueError('Input array needs to be 2D but received '
+                         'a {}d-array.'.format(a1.ndim))
+    # Squareness check
+    if a1.shape[0] != a1.shape[1]:
+        raise ValueError('Input array is expected to be square but has '
+                         'the shape: {}.'.format(a1.shape))
+
+    # Quick return for square empty array
+    if a1.size == 0:
+        return a1.copy(), lower
+
+    overwrite_a = overwrite_a or _datacopied(a1, a)
+    potrf, = get_lapack_funcs(('potrf',), (a1,))
+    c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean)
+    if info > 0:
+        raise LinAlgError("%d-th leading minor of the array is not positive "
+                          "definite" % info)
+    if info < 0:
+        raise ValueError('LAPACK reported an illegal value in {}-th argument'
+                         'on entry to "POTRF".'.format(-info))
+    return c, lower
+
+
+def cholesky(a, lower=False, overwrite_a=False, check_finite=True):
+    """
+    Compute the Cholesky decomposition of a matrix.
+
+    Returns the Cholesky decomposition, :math:`A = L L^*` or
+    :math:`A = U^* U` of a Hermitian positive-definite matrix A.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        Matrix to be decomposed
+    lower : bool, optional
+        Whether to compute the upper- or lower-triangular Cholesky
+        factorization.  Default is upper-triangular.
+    overwrite_a : bool, optional
+        Whether to overwrite data in `a` (may improve performance).
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    c : (M, M) ndarray
+        Upper- or lower-triangular Cholesky factor of `a`.
+
+    Raises
+    ------
+    LinAlgError : if decomposition fails.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import cholesky
+    >>> a = np.array([[1,-2j],[2j,5]])
+    >>> L = cholesky(a, lower=True)
+    >>> L
+    array([[ 1.+0.j,  0.+0.j],
+           [ 0.+2.j,  1.+0.j]])
+    >>> L @ L.T.conj()
+    array([[ 1.+0.j,  0.-2.j],
+           [ 0.+2.j,  5.+0.j]])
+
+    """
+    c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True,
+                         check_finite=check_finite)
+    return c
+
+
+def cho_factor(a, lower=False, overwrite_a=False, check_finite=True):
+    """
+    Compute the Cholesky decomposition of a matrix, to use in cho_solve
+
+    Returns a matrix containing the Cholesky decomposition,
+    ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.
+    The return value can be directly used as the first parameter to cho_solve.
+
+    .. warning::
+        The returned matrix also contains random data in the entries not
+        used by the Cholesky decomposition. If you need to zero these
+        entries, use the function `cholesky` instead.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        Matrix to be decomposed
+    lower : bool, optional
+        Whether to compute the upper or lower triangular Cholesky factorization
+        (Default: upper-triangular)
+    overwrite_a : bool, optional
+        Whether to overwrite data in a (may improve performance)
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    c : (M, M) ndarray
+        Matrix whose upper or lower triangle contains the Cholesky factor
+        of `a`. Other parts of the matrix contain random data.
+    lower : bool
+        Flag indicating whether the factor is in the lower or upper triangle
+
+    Raises
+    ------
+    LinAlgError
+        Raised if decomposition fails.
+
+    See Also
+    --------
+    cho_solve : Solve a linear set equations using the Cholesky factorization
+                of a matrix.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import cho_factor
+    >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])
+    >>> c, low = cho_factor(A)
+    >>> c
+    array([[3.        , 1.        , 0.33333333, 1.66666667],
+           [3.        , 2.44948974, 1.90515869, -0.27216553],
+           [1.        , 5.        , 2.29330749, 0.8559528 ],
+           [5.        , 1.        , 2.        , 1.55418563]])
+    >>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4)))
+    True
+
+    """
+    c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False,
+                         check_finite=check_finite)
+    return c, lower
+
+
+def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True):
+    """Solve the linear equations A x = b, given the Cholesky factorization of A.
+
+    Parameters
+    ----------
+    (c, lower) : tuple, (array, bool)
+        Cholesky factorization of a, as given by cho_factor
+    b : array
+        Right-hand side
+    overwrite_b : bool, optional
+        Whether to overwrite data in b (may improve performance)
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : array
+        The solution to the system A x = b
+
+    See Also
+    --------
+    cho_factor : Cholesky factorization of a matrix
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import cho_factor, cho_solve
+    >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])
+    >>> c, low = cho_factor(A)
+    >>> x = cho_solve((c, low), [1, 1, 1, 1])
+    >>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4))
+    True
+
+    """
+    (c, lower) = c_and_lower
+    if check_finite:
+        b1 = asarray_chkfinite(b)
+        c = asarray_chkfinite(c)
+    else:
+        b1 = asarray(b)
+        c = asarray(c)
+    if c.ndim != 2 or c.shape[0] != c.shape[1]:
+        raise ValueError("The factored matrix c is not square.")
+    if c.shape[1] != b1.shape[0]:
+        raise ValueError("incompatible dimensions ({} and {})"
+                         .format(c.shape, b1.shape))
+
+    overwrite_b = overwrite_b or _datacopied(b1, b)
+
+    potrs, = get_lapack_funcs(('potrs',), (c, b1))
+    x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b)
+    if info != 0:
+        raise ValueError('illegal value in %dth argument of internal potrs'
+                         % -info)
+    return x
+
+
+def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True):
+    """
+    Cholesky decompose a banded Hermitian positive-definite matrix
+
+    The matrix a is stored in ab either in lower-diagonal or upper-
+    diagonal ordered form::
+
+        ab[u + i - j, j] == a[i,j]        (if upper form; i <= j)
+        ab[    i - j, j] == a[i,j]        (if lower form; i >= j)
+
+    Example of ab (shape of a is (6,6), u=2)::
+
+        upper form:
+        *   *   a02 a13 a24 a35
+        *   a01 a12 a23 a34 a45
+        a00 a11 a22 a33 a44 a55
+
+        lower form:
+        a00 a11 a22 a33 a44 a55
+        a10 a21 a32 a43 a54 *
+        a20 a31 a42 a53 *   *
+
+    Parameters
+    ----------
+    ab : (u + 1, M) array_like
+        Banded matrix
+    overwrite_ab : bool, optional
+        Discard data in ab (may enhance performance)
+    lower : bool, optional
+        Is the matrix in the lower form. (Default is upper form)
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    c : (u + 1, M) ndarray
+        Cholesky factorization of a, in the same banded format as ab
+
+    See Also
+    --------
+    cho_solve_banded :
+        Solve a linear set equations, given the Cholesky factorization
+        of a banded Hermitian.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import cholesky_banded
+    >>> from numpy import allclose, zeros, diag
+    >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])
+    >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)
+    >>> A = A + A.conj().T + np.diag(Ab[2, :])
+    >>> c = cholesky_banded(Ab)
+    >>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :])
+    >>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5)))
+    True
+
+    """
+    if check_finite:
+        ab = asarray_chkfinite(ab)
+    else:
+        ab = asarray(ab)
+
+    pbtrf, = get_lapack_funcs(('pbtrf',), (ab,))
+    c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab)
+    if info > 0:
+        raise LinAlgError("%d-th leading minor not positive definite" % info)
+    if info < 0:
+        raise ValueError('illegal value in %d-th argument of internal pbtrf'
+                         % -info)
+    return c
+
+
+def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True):
+    """
+    Solve the linear equations ``A x = b``, given the Cholesky factorization of
+    the banded Hermitian ``A``.
+
+    Parameters
+    ----------
+    (cb, lower) : tuple, (ndarray, bool)
+        `cb` is the Cholesky factorization of A, as given by cholesky_banded.
+        `lower` must be the same value that was given to cholesky_banded.
+    b : array_like
+        Right-hand side
+    overwrite_b : bool, optional
+        If True, the function will overwrite the values in `b`.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : array
+        The solution to the system A x = b
+
+    See Also
+    --------
+    cholesky_banded : Cholesky factorization of a banded matrix
+
+    Notes
+    -----
+
+    .. versionadded:: 0.8.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import cholesky_banded, cho_solve_banded
+    >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])
+    >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)
+    >>> A = A + A.conj().T + np.diag(Ab[2, :])
+    >>> c = cholesky_banded(Ab)
+    >>> x = cho_solve_banded((c, False), np.ones(5))
+    >>> np.allclose(A @ x - np.ones(5), np.zeros(5))
+    True
+
+    """
+    (cb, lower) = cb_and_lower
+    if check_finite:
+        cb = asarray_chkfinite(cb)
+        b = asarray_chkfinite(b)
+    else:
+        cb = asarray(cb)
+        b = asarray(b)
+
+    # Validate shapes.
+    if cb.shape[-1] != b.shape[0]:
+        raise ValueError("shapes of cb and b are not compatible.")
+
+    pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b))
+    x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b)
+    if info > 0:
+        raise LinAlgError("%dth leading minor not positive definite" % info)
+    if info < 0:
+        raise ValueError('illegal value in %dth argument of internal pbtrs'
+                         % -info)
+    return x
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cossin.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cossin.py
new file mode 100644
index 00000000..2ec0f135
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_cossin.py
@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+from collections.abc import Iterable
+import numpy as np
+
+from scipy._lib._util import _asarray_validated
+from scipy.linalg import block_diag, LinAlgError
+from .lapack import _compute_lwork, get_lapack_funcs
+
+__all__ = ['cossin']
+
+
+def cossin(X, p=None, q=None, separate=False,
+           swap_sign=False, compute_u=True, compute_vh=True):
+    """
+    Compute the cosine-sine (CS) decomposition of an orthogonal/unitary matrix.
+
+    X is an ``(m, m)`` orthogonal/unitary matrix, partitioned as the following
+    where upper left block has the shape of ``(p, q)``::
+
+                                   ┌                   ┐
+                                   │ I  0  0 │ 0  0  0 │
+        ┌           ┐   ┌         ┐│ 0  C  0 │ 0 -S  0 │┌         ┐*
+        │ X11 │ X12 │   │ U1 │    ││ 0  0  0 │ 0  0 -I ││ V1 │    │
+        │ ────┼──── │ = │────┼────││─────────┼─────────││────┼────│
+        │ X21 │ X22 │   │    │ U2 ││ 0  0  0 │ I  0  0 ││    │ V2 │
+        └           ┘   └         ┘│ 0  S  0 │ 0  C  0 │└         ┘
+                                   │ 0  0  I │ 0  0  0 │
+                                   └                   ┘
+
+    ``U1``, ``U2``, ``V1``, ``V2`` are square orthogonal/unitary matrices of
+    dimensions ``(p,p)``, ``(m-p,m-p)``, ``(q,q)``, and ``(m-q,m-q)``
+    respectively, and ``C`` and ``S`` are ``(r, r)`` nonnegative diagonal
+    matrices satisfying ``C^2 + S^2 = I`` where ``r = min(p, m-p, q, m-q)``.
+
+    Moreover, the rank of the identity matrices are ``min(p, q) - r``,
+    ``min(p, m - q) - r``, ``min(m - p, q) - r``, and ``min(m - p, m - q) - r``
+    respectively.
+
+    X can be supplied either by itself and block specifications p, q or its
+    subblocks in an iterable from which the shapes would be derived. See the
+    examples below.
+
+    Parameters
+    ----------
+    X : array_like, iterable
+        complex unitary or real orthogonal matrix to be decomposed, or iterable
+        of subblocks ``X11``, ``X12``, ``X21``, ``X22``, when ``p``, ``q`` are
+        omitted.
+    p : int, optional
+        Number of rows of the upper left block ``X11``, used only when X is
+        given as an array.
+    q : int, optional
+        Number of columns of the upper left block ``X11``, used only when X is
+        given as an array.
+    separate : bool, optional
+        if ``True``, the low level components are returned instead of the
+        matrix factors, i.e. ``(u1,u2)``, ``theta``, ``(v1h,v2h)`` instead of
+        ``u``, ``cs``, ``vh``.
+    swap_sign : bool, optional
+        if ``True``, the ``-S``, ``-I`` block will be the bottom left,
+        otherwise (by default) they will be in the upper right block.
+    compute_u : bool, optional
+        if ``False``, ``u`` won't be computed and an empty array is returned.
+    compute_vh : bool, optional
+        if ``False``, ``vh`` won't be computed and an empty array is returned.
+
+    Returns
+    -------
+    u : ndarray
+        When ``compute_u=True``, contains the block diagonal orthogonal/unitary
+        matrix consisting of the blocks ``U1`` (``p`` x ``p``) and ``U2``
+        (``m-p`` x ``m-p``) orthogonal/unitary matrices. If ``separate=True``,
+        this contains the tuple of ``(U1, U2)``.
+    cs : ndarray
+        The cosine-sine factor with the structure described above.
+         If ``separate=True``, this contains the ``theta`` array containing the
+         angles in radians.
+    vh : ndarray
+        When ``compute_vh=True`, contains the block diagonal orthogonal/unitary
+        matrix consisting of the blocks ``V1H`` (``q`` x ``q``) and ``V2H``
+        (``m-q`` x ``m-q``) orthogonal/unitary matrices. If ``separate=True``,
+        this contains the tuple of ``(V1H, V2H)``.
+
+    References
+    ----------
+    .. [1] Brian D. Sutton. Computing the complete CS decomposition. Numer.
+           Algorithms, 50(1):33-65, 2009.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import cossin
+    >>> from scipy.stats import unitary_group
+    >>> x = unitary_group.rvs(4)
+    >>> u, cs, vdh = cossin(x, p=2, q=2)
+    >>> np.allclose(x, u @ cs @ vdh)
+    True
+
+    Same can be entered via subblocks without the need of ``p`` and ``q``. Also
+    let's skip the computation of ``u``
+
+    >>> ue, cs, vdh = cossin((x[:2, :2], x[:2, 2:], x[2:, :2], x[2:, 2:]),
+    ...                      compute_u=False)
+    >>> print(ue)
+    []
+    >>> np.allclose(x, u @ cs @ vdh)
+    True
+
+    """
+
+    if p or q:
+        p = 1 if p is None else int(p)
+        q = 1 if q is None else int(q)
+        X = _asarray_validated(X, check_finite=True)
+        if not np.equal(*X.shape):
+            raise ValueError("Cosine Sine decomposition only supports square"
+                             " matrices, got {}".format(X.shape))
+        m = X.shape[0]
+        if p >= m or p <= 0:
+            raise ValueError("invalid p={}, 0= m or q <= 0:
+            raise ValueError("invalid q={}, 0 0:
+        raise LinAlgError("{} did not converge: {}".format(method_name, info))
+
+    if separate:
+        return (u1, u2), theta, (v1h, v2h)
+
+    U = block_diag(u1, u2)
+    VDH = block_diag(v1h, v2h)
+
+    # Construct the middle factor CS
+    c = np.diag(np.cos(theta))
+    s = np.diag(np.sin(theta))
+    r = min(p, q, m - p, m - q)
+    n11 = min(p, q) - r
+    n12 = min(p, m - q) - r
+    n21 = min(m - p, q) - r
+    n22 = min(m - p, m - q) - r
+    Id = np.eye(np.max([n11, n12, n21, n22, r]), dtype=theta.dtype)
+    CS = np.zeros((m, m), dtype=theta.dtype)
+
+    CS[:n11, :n11] = Id[:n11, :n11]
+
+    xs = n11 + r
+    xe = n11 + r + n12
+    ys = n11 + n21 + n22 + 2 * r
+    ye = n11 + n21 + n22 + 2 * r + n12
+    CS[xs: xe, ys:ye] = Id[:n12, :n12] if swap_sign else -Id[:n12, :n12]
+
+    xs = p + n22 + r
+    xe = p + n22 + r + + n21
+    ys = n11 + r
+    ye = n11 + r + n21
+    CS[xs:xe, ys:ye] = -Id[:n21, :n21] if swap_sign else Id[:n21, :n21]
+
+    CS[p:p + n22, q:q + n22] = Id[:n22, :n22]
+    CS[n11:n11 + r, n11:n11 + r] = c
+    CS[p + n22:p + n22 + r, r + n21 + n22:2 * r + n21 + n22] = c
+
+    xs = n11
+    xe = n11 + r
+    ys = n11 + n21 + n22 + r
+    ye = n11 + n21 + n22 + 2 * r
+    CS[xs:xe, ys:ye] = s if swap_sign else -s
+
+    CS[p + n22:p + n22 + r, n11:n11 + r] = -s if swap_sign else s
+
+    return U, CS, VDH
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_ldl.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_ldl.py
new file mode 100644
index 00000000..e484e11f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_ldl.py
@@ -0,0 +1,352 @@
+from warnings import warn
+
+import numpy as np
+from numpy import (atleast_2d, ComplexWarning, arange, zeros_like, imag, diag,
+                   iscomplexobj, tril, triu, argsort, empty_like)
+from ._decomp import _asarray_validated
+from .lapack import get_lapack_funcs, _compute_lwork
+
+__all__ = ['ldl']
+
+
+def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True):
+    """ Computes the LDLt or Bunch-Kaufman factorization of a symmetric/
+    hermitian matrix.
+
+    This function returns a block diagonal matrix D consisting blocks of size
+    at most 2x2 and also a possibly permuted unit lower triangular matrix
+    ``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T``
+    holds. If `lower` is False then (again possibly permuted) upper
+    triangular matrices are returned as outer factors.
+
+    The permutation array can be used to triangularize the outer factors
+    simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower
+    triangular matrix. This is also equivalent to multiplication with a
+    permutation matrix ``P.dot(lu)``, where ``P`` is a column-permuted
+    identity matrix ``I[:, perm]``.
+
+    Depending on the value of the boolean `lower`, only upper or lower
+    triangular part of the input array is referenced. Hence, a triangular
+    matrix on entry would give the same result as if the full matrix is
+    supplied.
+
+    Parameters
+    ----------
+    A : array_like
+        Square input array
+    lower : bool, optional
+        This switches between the lower and upper triangular outer factors of
+        the factorization. Lower triangular (``lower=True``) is the default.
+    hermitian : bool, optional
+        For complex-valued arrays, this defines whether ``A = A.conj().T`` or
+        ``A = A.T`` is assumed. For real-valued arrays, this switch has no
+        effect.
+    overwrite_a : bool, optional
+        Allow overwriting data in `A` (may enhance performance). The default
+        is False.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    lu : ndarray
+        The (possibly) permuted upper/lower triangular outer factor of the
+        factorization.
+    d : ndarray
+        The block diagonal multiplier of the factorization.
+    perm : ndarray
+        The row-permutation index array that brings lu into triangular form.
+
+    Raises
+    ------
+    ValueError
+        If input array is not square.
+    ComplexWarning
+        If a complex-valued array with nonzero imaginary parts on the
+        diagonal is given and hermitian is set to True.
+
+    See Also
+    --------
+    cholesky, lu
+
+    Notes
+    -----
+    This function uses ``?SYTRF`` routines for symmetric matrices and
+    ``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for
+    the algorithm details.
+
+    Depending on the `lower` keyword value, only lower or upper triangular
+    part of the input array is referenced. Moreover, this keyword also defines
+    the structure of the outer factors of the factorization.
+
+    .. versionadded:: 1.1.0
+
+    References
+    ----------
+    .. [1] J.R. Bunch, L. Kaufman, Some stable methods for calculating
+       inertia and solving symmetric linear systems, Math. Comput. Vol.31,
+       1977. :doi:`10.2307/2005787`
+
+    Examples
+    --------
+    Given an upper triangular array ``a`` that represents the full symmetric
+    array with its entries, obtain ``l``, 'd' and the permutation vector `perm`:
+
+    >>> import numpy as np
+    >>> from scipy.linalg import ldl
+    >>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]])
+    >>> lu, d, perm = ldl(a, lower=0) # Use the upper part
+    >>> lu
+    array([[ 0. ,  0. ,  1. ],
+           [ 0. ,  1. , -0.5],
+           [ 1. ,  1. ,  1.5]])
+    >>> d
+    array([[-5. ,  0. ,  0. ],
+           [ 0. ,  1.5,  0. ],
+           [ 0. ,  0. ,  2. ]])
+    >>> perm
+    array([2, 1, 0])
+    >>> lu[perm, :]
+    array([[ 1. ,  1. ,  1.5],
+           [ 0. ,  1. , -0.5],
+           [ 0. ,  0. ,  1. ]])
+    >>> lu.dot(d).dot(lu.T)
+    array([[ 2., -1.,  3.],
+           [-1.,  2.,  0.],
+           [ 3.,  0.,  1.]])
+
+    """
+    a = atleast_2d(_asarray_validated(A, check_finite=check_finite))
+    if a.shape[0] != a.shape[1]:
+        raise ValueError('The input array "a" should be square.')
+    # Return empty arrays for empty square input
+    if a.size == 0:
+        return empty_like(a), empty_like(a), np.array([], dtype=int)
+
+    n = a.shape[0]
+    r_or_c = complex if iscomplexobj(a) else float
+
+    # Get the LAPACK routine
+    if r_or_c is complex and hermitian:
+        s, sl = 'hetrf', 'hetrf_lwork'
+        if np.any(imag(diag(a))):
+            warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal'
+                 'are ignored. Use "hermitian=False" for factorization of'
+                 'complex symmetric arrays.', ComplexWarning, stacklevel=2)
+    else:
+        s, sl = 'sytrf', 'sytrf_lwork'
+
+    solver, solver_lwork = get_lapack_funcs((s, sl), (a,))
+    lwork = _compute_lwork(solver_lwork, n, lower=lower)
+    ldu, piv, info = solver(a, lwork=lwork, lower=lower,
+                            overwrite_a=overwrite_a)
+    if info < 0:
+        raise ValueError('{} exited with the internal error "illegal value '
+                         'in argument number {}". See LAPACK documentation '
+                         'for the error codes.'.format(s.upper(), -info))
+
+    swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower)
+    d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian)
+    lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower)
+
+    return lu, d, perm
+
+
+def _ldl_sanitize_ipiv(a, lower=True):
+    """
+    This helper function takes the rather strangely encoded permutation array
+    returned by the LAPACK routines ?(HE/SY)TRF and converts it into
+    regularized permutation and diagonal pivot size format.
+
+    Since FORTRAN uses 1-indexing and LAPACK uses different start points for
+    upper and lower formats there are certain offsets in the indices used
+    below.
+
+    Let's assume a result where the matrix is 6x6 and there are two 2x2
+    and two 1x1 blocks reported by the routine. To ease the coding efforts,
+    we still populate a 6-sized array and fill zeros as the following ::
+
+        pivots = [2, 0, 2, 0, 1, 1]
+
+    This denotes a diagonal matrix of the form ::
+
+        [x x        ]
+        [x x        ]
+        [    x x    ]
+        [    x x    ]
+        [        x  ]
+        [          x]
+
+    In other words, we write 2 when the 2x2 block is first encountered and
+    automatically write 0 to the next entry and skip the next spin of the
+    loop. Thus, a separate counter or array appends to keep track of block
+    sizes are avoided. If needed, zeros can be filtered out later without
+    losing the block structure.
+
+    Parameters
+    ----------
+    a : ndarray
+        The permutation array ipiv returned by LAPACK
+    lower : bool, optional
+        The switch to select whether upper or lower triangle is chosen in
+        the LAPACK call.
+
+    Returns
+    -------
+    swap_ : ndarray
+        The array that defines the row/column swap operations. For example,
+        if row two is swapped with row four, the result is [0, 3, 2, 3].
+    pivots : ndarray
+        The array that defines the block diagonal structure as given above.
+
+    """
+    n = a.size
+    swap_ = arange(n)
+    pivots = zeros_like(swap_, dtype=int)
+    skip_2x2 = False
+
+    # Some upper/lower dependent offset values
+    # range (s)tart, r(e)nd, r(i)ncrement
+    x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1)
+
+    for ind in range(rs, re, ri):
+        # If previous spin belonged already to a 2x2 block
+        if skip_2x2:
+            skip_2x2 = False
+            continue
+
+        cur_val = a[ind]
+        # do we have a 1x1 block or not?
+        if cur_val > 0:
+            if cur_val != ind+1:
+                # Index value != array value --> permutation required
+                swap_[ind] = swap_[cur_val-1]
+            pivots[ind] = 1
+        # Not.
+        elif cur_val < 0 and cur_val == a[ind+x]:
+            # first neg entry of 2x2 block identifier
+            if -cur_val != ind+2:
+                # Index value != array value --> permutation required
+                swap_[ind+x] = swap_[-cur_val-1]
+            pivots[ind+y] = 2
+            skip_2x2 = True
+        else:  # Doesn't make sense, give up
+            raise ValueError('While parsing the permutation array '
+                             'in "scipy.linalg.ldl", invalid entries '
+                             'found. The array syntax is invalid.')
+    return swap_, pivots
+
+
+def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True):
+    """
+    Helper function to extract the diagonal and triangular matrices for
+    LDL.T factorization.
+
+    Parameters
+    ----------
+    ldu : ndarray
+        The compact output returned by the LAPACK routing
+    pivs : ndarray
+        The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For
+        every 2 there is a succeeding 0.
+    lower : bool, optional
+        If set to False, upper triangular part is considered.
+    hermitian : bool, optional
+        If set to False a symmetric complex array is assumed.
+
+    Returns
+    -------
+    d : ndarray
+        The block diagonal matrix.
+    lu : ndarray
+        The upper/lower triangular matrix
+    """
+    is_c = iscomplexobj(ldu)
+    d = diag(diag(ldu))
+    n = d.shape[0]
+    blk_i = 0  # block index
+
+    # row/column offsets for selecting sub-, super-diagonal
+    x, y = (1, 0) if lower else (0, 1)
+
+    lu = tril(ldu, -1) if lower else triu(ldu, 1)
+    diag_inds = arange(n)
+    lu[diag_inds, diag_inds] = 1
+
+    for blk in pivs[pivs != 0]:
+        # increment the block index and check for 2s
+        # if 2 then copy the off diagonals depending on uplo
+        inc = blk_i + blk
+
+        if blk == 2:
+            d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y]
+            # If Hermitian matrix is factorized, the cross-offdiagonal element
+            # should be conjugated.
+            if is_c and hermitian:
+                d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj()
+            else:
+                d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y]
+
+            lu[blk_i+x, blk_i+y] = 0.
+        blk_i = inc
+
+    return d, lu
+
+
+def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True):
+    """
+    Helper function to construct explicit outer factors of LDL factorization.
+
+    If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k).
+    Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See
+    LAPACK documentation for more details.
+
+    Parameters
+    ----------
+    lu : ndarray
+        The triangular array that is extracted from LAPACK routine call with
+        ones on the diagonals.
+    swap_vec : ndarray
+        The array that defines the row swapping indices. If the kth entry is m
+        then rows k,m are swapped. Notice that the mth entry is not necessarily
+        k to avoid undoing the swapping.
+    pivs : ndarray
+        The array that defines the block diagonal structure returned by
+        _ldl_sanitize_ipiv().
+    lower : bool, optional
+        The boolean to switch between lower and upper triangular structure.
+
+    Returns
+    -------
+    lu : ndarray
+        The square outer factor which satisfies the L * D * L.T = A
+    perm : ndarray
+        The permutation vector that brings the lu to the triangular form
+
+    Notes
+    -----
+    Note that the original argument "lu" is overwritten.
+
+    """
+    n = lu.shape[0]
+    perm = arange(n)
+    # Setup the reading order of the permutation matrix for upper/lower
+    rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1)
+
+    for ind in range(rs, re, ri):
+        s_ind = swap_vec[ind]
+        if s_ind != ind:
+            # Column start and end positions
+            col_s = ind if lower else 0
+            col_e = n if lower else ind+1
+
+            # If we stumble upon a 2x2 block include both cols in the perm.
+            if pivs[ind] == (0 if lower else 2):
+                col_s += -1 if lower else 0
+                col_e += 0 if lower else 1
+            lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e]
+            perm[[s_ind, ind]] = perm[[ind, s_ind]]
+
+    return lu, argsort(perm)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_lu.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_lu.py
new file mode 100644
index 00000000..1f124435
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_lu.py
@@ -0,0 +1,226 @@
+"""LU decomposition functions."""
+
+from warnings import warn
+
+from numpy import asarray, asarray_chkfinite
+
+# Local imports
+from ._misc import _datacopied, LinAlgWarning
+from .lapack import get_lapack_funcs
+from ._flinalg_py import get_flinalg_funcs
+
+__all__ = ['lu', 'lu_solve', 'lu_factor']
+
+
+def lu_factor(a, overwrite_a=False, check_finite=True):
+    """
+    Compute pivoted LU decomposition of a matrix.
+
+    The decomposition is::
+
+        A = P L U
+
+    where P is a permutation matrix, L lower triangular with unit
+    diagonal elements, and U upper triangular.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Matrix to decompose
+    overwrite_a : bool, optional
+        Whether to overwrite data in A (may increase performance)
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    lu : (M, N) ndarray
+        Matrix containing U in its upper triangle, and L in its lower triangle.
+        The unit diagonal elements of L are not stored.
+    piv : (N,) ndarray
+        Pivot indices representing the permutation matrix P:
+        row i of matrix was interchanged with row piv[i].
+
+    See Also
+    --------
+    lu : gives lu factorization in more user-friendly format
+    lu_solve : solve an equation system using the LU factorization of a matrix
+
+    Notes
+    -----
+    This is a wrapper to the ``*GETRF`` routines from LAPACK. Unlike
+    :func:`lu`, it outputs the L and U factors into a single array
+    and returns pivot indices instead of a permutation matrix.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import lu_factor
+    >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+    >>> lu, piv = lu_factor(A)
+    >>> piv
+    array([2, 2, 3, 3], dtype=int32)
+
+    Convert LAPACK's ``piv`` array to NumPy index and test the permutation
+
+    >>> piv_py = [2, 0, 3, 1]
+    >>> L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu)
+    >>> np.allclose(A[piv_py] - L @ U, np.zeros((4, 4)))
+    True
+    """
+    if check_finite:
+        a1 = asarray_chkfinite(a)
+    else:
+        a1 = asarray(a)
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+    getrf, = get_lapack_funcs(('getrf',), (a1,))
+    lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
+    if info < 0:
+        raise ValueError('illegal value in %dth argument of '
+                         'internal getrf (lu_factor)' % -info)
+    if info > 0:
+        warn("Diagonal number %d is exactly zero. Singular matrix." % info,
+             LinAlgWarning, stacklevel=2)
+    return lu, piv
+
+
+def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
+    """Solve an equation system, a x = b, given the LU factorization of a
+
+    Parameters
+    ----------
+    (lu, piv)
+        Factorization of the coefficient matrix a, as given by lu_factor
+    b : array
+        Right-hand side
+    trans : {0, 1, 2}, optional
+        Type of system to solve:
+
+        =====  =========
+        trans  system
+        =====  =========
+        0      a x   = b
+        1      a^T x = b
+        2      a^H x = b
+        =====  =========
+    overwrite_b : bool, optional
+        Whether to overwrite data in b (may increase performance)
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : array
+        Solution to the system
+
+    See Also
+    --------
+    lu_factor : LU factorize a matrix
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import lu_factor, lu_solve
+    >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+    >>> b = np.array([1, 1, 1, 1])
+    >>> lu, piv = lu_factor(A)
+    >>> x = lu_solve((lu, piv), b)
+    >>> np.allclose(A @ x - b, np.zeros((4,)))
+    True
+
+    """
+    (lu, piv) = lu_and_piv
+    if check_finite:
+        b1 = asarray_chkfinite(b)
+    else:
+        b1 = asarray(b)
+    overwrite_b = overwrite_b or _datacopied(b1, b)
+    if lu.shape[0] != b1.shape[0]:
+        raise ValueError("Shapes of lu {} and b {} are incompatible"
+                         .format(lu.shape, b1.shape))
+
+    getrs, = get_lapack_funcs(('getrs',), (lu, b1))
+    x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
+    if info == 0:
+        return x
+    raise ValueError('illegal value in %dth argument of internal gesv|posv'
+                     % -info)
+
+
+def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
+    """
+    Compute pivoted LU decomposition of a matrix.
+
+    The decomposition is::
+
+        A = P L U
+
+    where P is a permutation matrix, L lower triangular with unit
+    diagonal elements, and U upper triangular.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Array to decompose
+    permute_l : bool, optional
+        Perform the multiplication P*L (Default: do not permute)
+    overwrite_a : bool, optional
+        Whether to overwrite data in a (may improve performance)
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    **(If permute_l == False)**
+
+    p : (M, M) ndarray
+        Permutation matrix
+    l : (M, K) ndarray
+        Lower triangular or trapezoidal matrix with unit diagonal.
+        K = min(M, N)
+    u : (K, N) ndarray
+        Upper triangular or trapezoidal matrix
+
+    **(If permute_l == True)**
+
+    pl : (M, K) ndarray
+        Permuted L matrix.
+        K = min(M, N)
+    u : (K, N) ndarray
+        Upper triangular or trapezoidal matrix
+
+    Notes
+    -----
+    This is a LU factorization routine written for SciPy.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import lu
+    >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+    >>> p, l, u = lu(A)
+    >>> np.allclose(A - p @ l @ u, np.zeros((4, 4)))
+    True
+
+    """
+    if check_finite:
+        a1 = asarray_chkfinite(a)
+    else:
+        a1 = asarray(a)
+    if len(a1.shape) != 2:
+        raise ValueError('expected matrix')
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+    flu, = get_flinalg_funcs(('lu',), (a1,))
+    p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
+    if info < 0:
+        raise ValueError('illegal value in %dth argument of '
+                         'internal lu.getrf' % -info)
+    if permute_l:
+        return l, u
+    return p, l, u
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_polar.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_polar.py
new file mode 100644
index 00000000..2fc36528
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_polar.py
@@ -0,0 +1,111 @@
+import numpy as np
+from scipy.linalg import svd
+
+
+__all__ = ['polar']
+
+
+def polar(a, side="right"):
+    """
+    Compute the polar decomposition.
+
+    Returns the factors of the polar decomposition [1]_ `u` and `p` such
+    that ``a = up`` (if `side` is "right") or ``a = pu`` (if `side` is
+    "left"), where `p` is positive semidefinite. Depending on the shape
+    of `a`, either the rows or columns of `u` are orthonormal. When `a`
+    is a square array, `u` is a square unitary array. When `a` is not
+    square, the "canonical polar decomposition" [2]_ is computed.
+
+    Parameters
+    ----------
+    a : (m, n) array_like
+        The array to be factored.
+    side : {'left', 'right'}, optional
+        Determines whether a right or left polar decomposition is computed.
+        If `side` is "right", then ``a = up``.  If `side` is "left",  then
+        ``a = pu``.  The default is "right".
+
+    Returns
+    -------
+    u : (m, n) ndarray
+        If `a` is square, then `u` is unitary. If m > n, then the columns
+        of `a` are orthonormal, and if m < n, then the rows of `u` are
+        orthonormal.
+    p : ndarray
+        `p` is Hermitian positive semidefinite. If `a` is nonsingular, `p`
+        is positive definite. The shape of `p` is (n, n) or (m, m), depending
+        on whether `side` is "right" or "left", respectively.
+
+    References
+    ----------
+    .. [1] R. A. Horn and C. R. Johnson, "Matrix Analysis", Cambridge
+           University Press, 1985.
+    .. [2] N. J. Higham, "Functions of Matrices: Theory and Computation",
+           SIAM, 2008.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import polar
+    >>> a = np.array([[1, -1], [2, 4]])
+    >>> u, p = polar(a)
+    >>> u
+    array([[ 0.85749293, -0.51449576],
+           [ 0.51449576,  0.85749293]])
+    >>> p
+    array([[ 1.88648444,  1.2004901 ],
+           [ 1.2004901 ,  3.94446746]])
+
+    A non-square example, with m < n:
+
+    >>> b = np.array([[0.5, 1, 2], [1.5, 3, 4]])
+    >>> u, p = polar(b)
+    >>> u
+    array([[-0.21196618, -0.42393237,  0.88054056],
+           [ 0.39378971,  0.78757942,  0.4739708 ]])
+    >>> p
+    array([[ 0.48470147,  0.96940295,  1.15122648],
+           [ 0.96940295,  1.9388059 ,  2.30245295],
+           [ 1.15122648,  2.30245295,  3.65696431]])
+    >>> u.dot(p)   # Verify the decomposition.
+    array([[ 0.5,  1. ,  2. ],
+           [ 1.5,  3. ,  4. ]])
+    >>> u.dot(u.T)   # The rows of u are orthonormal.
+    array([[  1.00000000e+00,  -2.07353665e-17],
+           [ -2.07353665e-17,   1.00000000e+00]])
+
+    Another non-square example, with m > n:
+
+    >>> c = b.T
+    >>> u, p = polar(c)
+    >>> u
+    array([[-0.21196618,  0.39378971],
+           [-0.42393237,  0.78757942],
+           [ 0.88054056,  0.4739708 ]])
+    >>> p
+    array([[ 1.23116567,  1.93241587],
+           [ 1.93241587,  4.84930602]])
+    >>> u.dot(p)   # Verify the decomposition.
+    array([[ 0.5,  1.5],
+           [ 1. ,  3. ],
+           [ 2. ,  4. ]])
+    >>> u.T.dot(u)  # The columns of u are orthonormal.
+    array([[  1.00000000e+00,  -1.26363763e-16],
+           [ -1.26363763e-16,   1.00000000e+00]])
+
+    """
+    if side not in ['right', 'left']:
+        raise ValueError("`side` must be either 'right' or 'left'")
+    a = np.asarray(a)
+    if a.ndim != 2:
+        raise ValueError("`a` must be a 2-D array.")
+
+    w, s, vh = svd(a, full_matrices=False)
+    u = w.dot(vh)
+    if side == 'right':
+        # a = up
+        p = (vh.T.conj() * s).dot(vh)
+    else:
+        # a = pu
+        p = (w * s).dot(w.T.conj())
+    return u, p
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qr.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qr.py
new file mode 100644
index 00000000..18f4376c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qr.py
@@ -0,0 +1,429 @@
+"""QR decomposition functions."""
+import numpy
+
+# Local imports
+from .lapack import get_lapack_funcs
+from ._misc import _datacopied
+
+__all__ = ['qr', 'qr_multiply', 'rq']
+
+
+def safecall(f, name, *args, **kwargs):
+    """Call a LAPACK routine, determining lwork automatically and handling
+    error return values"""
+    lwork = kwargs.get("lwork", None)
+    if lwork in (None, -1):
+        kwargs['lwork'] = -1
+        ret = f(*args, **kwargs)
+        kwargs['lwork'] = ret[-2][0].real.astype(numpy.int_)
+    ret = f(*args, **kwargs)
+    if ret[-1] < 0:
+        raise ValueError("illegal value in %dth argument of internal %s"
+                         % (-ret[-1], name))
+    return ret[:-2]
+
+
+def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False,
+       check_finite=True):
+    """
+    Compute QR decomposition of a matrix.
+
+    Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
+    and R upper triangular.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Matrix to be decomposed
+    overwrite_a : bool, optional
+        Whether data in `a` is overwritten (may improve performance if
+        `overwrite_a` is set to True by reusing the existing input data
+        structure rather than creating a new one.)
+    lwork : int, optional
+        Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
+        is computed.
+    mode : {'full', 'r', 'economic', 'raw'}, optional
+        Determines what information is to be returned: either both Q and R
+        ('full', default), only R ('r') or both Q and R but computed in
+        economy-size ('economic', see Notes). The final option 'raw'
+        (added in SciPy 0.11) makes the function return two matrices
+        (Q, TAU) in the internal format used by LAPACK.
+    pivoting : bool, optional
+        Whether or not factorization should include pivoting for rank-revealing
+        qr decomposition. If pivoting, compute the decomposition
+        ``A P = Q R`` as above, but where P is chosen such that the diagonal
+        of R is non-increasing.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    Q : float or complex ndarray
+        Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned
+        if ``mode='r'``.
+    R : float or complex ndarray
+        Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
+    P : int ndarray
+        Of shape (N,) for ``pivoting=True``. Not returned if
+        ``pivoting=False``.
+
+    Raises
+    ------
+    LinAlgError
+        Raised if decomposition fails
+
+    Notes
+    -----
+    This is an interface to the LAPACK routines dgeqrf, zgeqrf,
+    dorgqr, zungqr, dgeqp3, and zgeqp3.
+
+    If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
+    of (M,M) and (M,N), with ``K=min(M,N)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> rng = np.random.default_rng()
+    >>> a = rng.standard_normal((9, 6))
+
+    >>> q, r = linalg.qr(a)
+    >>> np.allclose(a, np.dot(q, r))
+    True
+    >>> q.shape, r.shape
+    ((9, 9), (9, 6))
+
+    >>> r2 = linalg.qr(a, mode='r')
+    >>> np.allclose(r, r2)
+    True
+
+    >>> q3, r3 = linalg.qr(a, mode='economic')
+    >>> q3.shape, r3.shape
+    ((9, 6), (6, 6))
+
+    >>> q4, r4, p4 = linalg.qr(a, pivoting=True)
+    >>> d = np.abs(np.diag(r4))
+    >>> np.all(d[1:] <= d[:-1])
+    True
+    >>> np.allclose(a[:, p4], np.dot(q4, r4))
+    True
+    >>> q4.shape, r4.shape, p4.shape
+    ((9, 9), (9, 6), (6,))
+
+    >>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
+    >>> q5.shape, r5.shape, p5.shape
+    ((9, 6), (6, 6), (6,))
+
+    """
+    # 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
+    # 'qr' are used below.
+    # 'raw' is used internally by qr_multiply
+    if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
+        raise ValueError("Mode argument should be one of ['full', 'r',"
+                         "'economic', 'raw']")
+
+    if check_finite:
+        a1 = numpy.asarray_chkfinite(a)
+    else:
+        a1 = numpy.asarray(a)
+    if len(a1.shape) != 2:
+        raise ValueError("expected a 2-D array")
+    M, N = a1.shape
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+    if pivoting:
+        geqp3, = get_lapack_funcs(('geqp3',), (a1,))
+        qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
+        jpvt -= 1  # geqp3 returns a 1-based index array, so subtract 1
+    else:
+        geqrf, = get_lapack_funcs(('geqrf',), (a1,))
+        qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
+                           overwrite_a=overwrite_a)
+
+    if mode not in ['economic', 'raw'] or M < N:
+        R = numpy.triu(qr)
+    else:
+        R = numpy.triu(qr[:N, :])
+
+    if pivoting:
+        Rj = R, jpvt
+    else:
+        Rj = R,
+
+    if mode == 'r':
+        return Rj
+    elif mode == 'raw':
+        return ((qr, tau),) + Rj
+
+    gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
+
+    if M < N:
+        Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
+                      lwork=lwork, overwrite_a=1)
+    elif mode == 'economic':
+        Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
+                      overwrite_a=1)
+    else:
+        t = qr.dtype.char
+        qqr = numpy.empty((M, M), dtype=t)
+        qqr[:, :N] = qr
+        Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
+                      overwrite_a=1)
+
+    return (Q,) + Rj
+
+
+def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False,
+                overwrite_a=False, overwrite_c=False):
+    """
+    Calculate the QR decomposition and multiply Q with a matrix.
+
+    Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal
+    and R upper triangular. Multiply Q with a vector or a matrix c.
+
+    Parameters
+    ----------
+    a : (M, N), array_like
+        Input array
+    c : array_like
+        Input array to be multiplied by ``q``.
+    mode : {'left', 'right'}, optional
+        ``Q @ c`` is returned if mode is 'left', ``c @ Q`` is returned if
+        mode is 'right'.
+        The shape of c must be appropriate for the matrix multiplications,
+        if mode is 'left', ``min(a.shape) == c.shape[0]``,
+        if mode is 'right', ``a.shape[0] == c.shape[1]``.
+    pivoting : bool, optional
+        Whether or not factorization should include pivoting for rank-revealing
+        qr decomposition, see the documentation of qr.
+    conjugate : bool, optional
+        Whether Q should be complex-conjugated. This might be faster
+        than explicit conjugation.
+    overwrite_a : bool, optional
+        Whether data in a is overwritten (may improve performance)
+    overwrite_c : bool, optional
+        Whether data in c is overwritten (may improve performance).
+        If this is used, c must be big enough to keep the result,
+        i.e. ``c.shape[0]`` = ``a.shape[0]`` if mode is 'left'.
+
+    Returns
+    -------
+    CQ : ndarray
+        The product of ``Q`` and ``c``.
+    R : (K, N), ndarray
+        R array of the resulting QR factorization where ``K = min(M, N)``.
+    P : (N,) ndarray
+        Integer pivot array. Only returned when ``pivoting=True``.
+
+    Raises
+    ------
+    LinAlgError
+        Raised if QR decomposition fails.
+
+    Notes
+    -----
+    This is an interface to the LAPACK routines ``?GEQRF``, ``?ORMQR``,
+    ``?UNMQR``, and ``?GEQP3``.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import qr_multiply, qr
+    >>> A = np.array([[1, 3, 3], [2, 3, 2], [2, 3, 3], [1, 3, 2]])
+    >>> qc, r1, piv1 = qr_multiply(A, 2*np.eye(4), pivoting=1)
+    >>> qc
+    array([[-1.,  1., -1.],
+           [-1., -1.,  1.],
+           [-1., -1., -1.],
+           [-1.,  1.,  1.]])
+    >>> r1
+    array([[-6., -3., -5.            ],
+           [ 0., -1., -1.11022302e-16],
+           [ 0.,  0., -1.            ]])
+    >>> piv1
+    array([1, 0, 2], dtype=int32)
+    >>> q2, r2, piv2 = qr(A, mode='economic', pivoting=1)
+    >>> np.allclose(2*q2 - qc, np.zeros((4, 3)))
+    True
+
+    """
+    if mode not in ['left', 'right']:
+        raise ValueError("Mode argument can only be 'left' or 'right' but "
+                         "not '{}'".format(mode))
+    c = numpy.asarray_chkfinite(c)
+    if c.ndim < 2:
+        onedim = True
+        c = numpy.atleast_2d(c)
+        if mode == "left":
+            c = c.T
+    else:
+        onedim = False
+
+    a = numpy.atleast_2d(numpy.asarray(a))  # chkfinite done in qr
+    M, N = a.shape
+
+    if mode == 'left':
+        if c.shape[0] != min(M, N + overwrite_c*(M-N)):
+            raise ValueError('Array shapes are not compatible for Q @ c'
+                             ' operation: {} vs {}'.format(a.shape, c.shape))
+    else:
+        if M != c.shape[1]:
+            raise ValueError('Array shapes are not compatible for c @ Q'
+                             ' operation: {} vs {}'.format(c.shape, a.shape))
+
+    raw = qr(a, overwrite_a, None, "raw", pivoting)
+    Q, tau = raw[0]
+
+    gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,))
+    if gor_un_mqr.typecode in ('s', 'd'):
+        trans = "T"
+    else:
+        trans = "C"
+
+    Q = Q[:, :min(M, N)]
+    if M > N and mode == "left" and not overwrite_c:
+        if conjugate:
+            cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F")
+            cc[:, :N] = c.T
+        else:
+            cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F")
+            cc[:N, :] = c
+            trans = "N"
+        if conjugate:
+            lr = "R"
+        else:
+            lr = "L"
+        overwrite_c = True
+    elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate:
+        cc = c.T
+        if mode == "left":
+            lr = "R"
+        else:
+            lr = "L"
+    else:
+        trans = "N"
+        cc = c
+        if mode == "left":
+            lr = "L"
+        else:
+            lr = "R"
+    cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc,
+                   overwrite_c=overwrite_c)
+    if trans != "N":
+        cQ = cQ.T
+    if mode == "right":
+        cQ = cQ[:, :min(M, N)]
+    if onedim:
+        cQ = cQ.ravel()
+
+    return (cQ,) + raw[1:]
+
+
+def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True):
+    """
+    Compute RQ decomposition of a matrix.
+
+    Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal
+    and R upper triangular.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Matrix to be decomposed
+    overwrite_a : bool, optional
+        Whether data in a is overwritten (may improve performance)
+    lwork : int, optional
+        Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
+        is computed.
+    mode : {'full', 'r', 'economic'}, optional
+        Determines what information is to be returned: either both Q and R
+        ('full', default), only R ('r') or both Q and R but computed in
+        economy-size ('economic', see Notes).
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    R : float or complex ndarray
+        Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``.
+    Q : float or complex ndarray
+        Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned
+        if ``mode='r'``.
+
+    Raises
+    ------
+    LinAlgError
+        If decomposition fails.
+
+    Notes
+    -----
+    This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf,
+    sorgrq, dorgrq, cungrq and zungrq.
+
+    If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead
+    of (N,N) and (M,N), with ``K=min(M,N)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> rng = np.random.default_rng()
+    >>> a = rng.standard_normal((6, 9))
+    >>> r, q = linalg.rq(a)
+    >>> np.allclose(a, r @ q)
+    True
+    >>> r.shape, q.shape
+    ((6, 9), (9, 9))
+    >>> r2 = linalg.rq(a, mode='r')
+    >>> np.allclose(r, r2)
+    True
+    >>> r3, q3 = linalg.rq(a, mode='economic')
+    >>> r3.shape, q3.shape
+    ((6, 6), (6, 9))
+
+    """
+    if mode not in ['full', 'r', 'economic']:
+        raise ValueError(
+                 "Mode argument should be one of ['full', 'r', 'economic']")
+
+    if check_finite:
+        a1 = numpy.asarray_chkfinite(a)
+    else:
+        a1 = numpy.asarray(a)
+    if len(a1.shape) != 2:
+        raise ValueError('expected matrix')
+    M, N = a1.shape
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+    gerqf, = get_lapack_funcs(('gerqf',), (a1,))
+    rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork,
+                       overwrite_a=overwrite_a)
+    if not mode == 'economic' or N < M:
+        R = numpy.triu(rq, N-M)
+    else:
+        R = numpy.triu(rq[-M:, -M:])
+
+    if mode == 'r':
+        return R
+
+    gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
+
+    if N < M:
+        Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork,
+                      overwrite_a=1)
+    elif mode == 'economic':
+        Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork,
+                      overwrite_a=1)
+    else:
+        rq1 = numpy.empty((N, N), dtype=rq.dtype)
+        rq1[-M:] = rq
+        Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork,
+                      overwrite_a=1)
+
+    return R, Q
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qz.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qz.py
new file mode 100644
index 00000000..8030a4b7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_qz.py
@@ -0,0 +1,448 @@
+import warnings
+
+import numpy as np
+from numpy import asarray_chkfinite
+from ._misc import LinAlgError, _datacopied, LinAlgWarning
+from .lapack import get_lapack_funcs
+
+
+__all__ = ['qz', 'ordqz']
+
+_double_precision = ['i', 'l', 'd']
+
+
+def _select_function(sort):
+    if callable(sort):
+        # assume the user knows what they're doing
+        sfunction = sort
+    elif sort == 'lhp':
+        sfunction = _lhp
+    elif sort == 'rhp':
+        sfunction = _rhp
+    elif sort == 'iuc':
+        sfunction = _iuc
+    elif sort == 'ouc':
+        sfunction = _ouc
+    else:
+        raise ValueError("sort parameter must be None, a callable, or "
+                         "one of ('lhp','rhp','iuc','ouc')")
+
+    return sfunction
+
+
+def _lhp(x, y):
+    out = np.empty_like(x, dtype=bool)
+    nonzero = (y != 0)
+    # handles (x, y) = (0, 0) too
+    out[~nonzero] = False
+    out[nonzero] = (np.real(x[nonzero]/y[nonzero]) < 0.0)
+    return out
+
+
+def _rhp(x, y):
+    out = np.empty_like(x, dtype=bool)
+    nonzero = (y != 0)
+    # handles (x, y) = (0, 0) too
+    out[~nonzero] = False
+    out[nonzero] = (np.real(x[nonzero]/y[nonzero]) > 0.0)
+    return out
+
+
+def _iuc(x, y):
+    out = np.empty_like(x, dtype=bool)
+    nonzero = (y != 0)
+    # handles (x, y) = (0, 0) too
+    out[~nonzero] = False
+    out[nonzero] = (abs(x[nonzero]/y[nonzero]) < 1.0)
+    return out
+
+
+def _ouc(x, y):
+    out = np.empty_like(x, dtype=bool)
+    xzero = (x == 0)
+    yzero = (y == 0)
+    out[xzero & yzero] = False
+    out[~xzero & yzero] = True
+    out[~yzero] = (abs(x[~yzero]/y[~yzero]) > 1.0)
+    return out
+
+
+def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
+        overwrite_b=False, check_finite=True):
+    if sort is not None:
+        # Disabled due to segfaults on win32, see ticket 1717.
+        raise ValueError("The 'sort' input of qz() has to be None and will be "
+                         "removed in a future release. Use ordqz instead.")
+
+    if output not in ['real', 'complex', 'r', 'c']:
+        raise ValueError("argument must be 'real', or 'complex'")
+
+    if check_finite:
+        a1 = asarray_chkfinite(A)
+        b1 = asarray_chkfinite(B)
+    else:
+        a1 = np.asarray(A)
+        b1 = np.asarray(B)
+
+    a_m, a_n = a1.shape
+    b_m, b_n = b1.shape
+    if not (a_m == a_n == b_m == b_n):
+        raise ValueError("Array dimensions must be square and agree")
+
+    typa = a1.dtype.char
+    if output in ['complex', 'c'] and typa not in ['F', 'D']:
+        if typa in _double_precision:
+            a1 = a1.astype('D')
+            typa = 'D'
+        else:
+            a1 = a1.astype('F')
+            typa = 'F'
+    typb = b1.dtype.char
+    if output in ['complex', 'c'] and typb not in ['F', 'D']:
+        if typb in _double_precision:
+            b1 = b1.astype('D')
+            typb = 'D'
+        else:
+            b1 = b1.astype('F')
+            typb = 'F'
+
+    overwrite_a = overwrite_a or (_datacopied(a1, A))
+    overwrite_b = overwrite_b or (_datacopied(b1, B))
+
+    gges, = get_lapack_funcs(('gges',), (a1, b1))
+
+    if lwork is None or lwork == -1:
+        # get optimal work array size
+        result = gges(lambda x: None, a1, b1, lwork=-1)
+        lwork = result[-2][0].real.astype(np.int_)
+
+    sfunction = lambda x: None
+    result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
+                  overwrite_b=overwrite_b, sort_t=0)
+
+    info = result[-1]
+    if info < 0:
+        raise ValueError("Illegal value in argument {} of gges".format(-info))
+    elif info > 0 and info <= a_n:
+        warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
+                      "form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be "
+                      "correct for J={},...,N".format(info-1), LinAlgWarning,
+                      stacklevel=3)
+    elif info == a_n+1:
+        raise LinAlgError("Something other than QZ iteration failed")
+    elif info == a_n+2:
+        raise LinAlgError("After reordering, roundoff changed values of some "
+                          "complex eigenvalues so that leading eigenvalues "
+                          "in the Generalized Schur form no longer satisfy "
+                          "sort=True. This could also be due to scaling.")
+    elif info == a_n+3:
+        raise LinAlgError("Reordering failed in tgsen")
+
+    return result, gges.typecode
+
+
+def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
+       overwrite_b=False, check_finite=True):
+    """
+    QZ decomposition for generalized eigenvalues of a pair of matrices.
+
+    The QZ, or generalized Schur, decomposition for a pair of n-by-n
+    matrices (A,B) is::
+
+        (A,B) = (Q @ AA @ Z*, Q @ BB @ Z*)
+
+    where AA, BB is in generalized Schur form if BB is upper-triangular
+    with non-negative diagonal and AA is upper-triangular, or for real QZ
+    decomposition (``output='real'``) block upper triangular with 1x1
+    and 2x2 blocks. In this case, the 1x1 blocks correspond to real
+    generalized eigenvalues and 2x2 blocks are 'standardized' by making
+    the corresponding elements of BB have the form::
+
+        [ a 0 ]
+        [ 0 b ]
+
+    and the pair of corresponding 2x2 blocks in AA and BB will have a complex
+    conjugate pair of generalized eigenvalues. If (``output='complex'``) or
+    A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
+    Q and Z are unitary matrices.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        2-D array to decompose
+    B : (N, N) array_like
+        2-D array to decompose
+    output : {'real', 'complex'}, optional
+        Construct the real or complex QZ decomposition for real matrices.
+        Default is 'real'.
+    lwork : int, optional
+        Work array size. If None or -1, it is automatically computed.
+    sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
+        NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead.
+
+        Specifies whether the upper eigenvalues should be sorted. A callable
+        may be passed that, given a eigenvalue, returns a boolean denoting
+        whether the eigenvalue should be sorted to the top-left (True). For
+        real matrix pairs, the sort function takes three real arguments
+        (alphar, alphai, beta). The eigenvalue
+        ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or
+        output='complex', the sort function takes two complex arguments
+        (alpha, beta). The eigenvalue ``x = (alpha/beta)``.  Alternatively,
+        string parameters may be used:
+
+            - 'lhp'   Left-hand plane (x.real < 0.0)
+            - 'rhp'   Right-hand plane (x.real > 0.0)
+            - 'iuc'   Inside the unit circle (x*x.conjugate() < 1.0)
+            - 'ouc'   Outside the unit circle (x*x.conjugate() > 1.0)
+
+        Defaults to None (no sorting).
+    overwrite_a : bool, optional
+        Whether to overwrite data in a (may improve performance)
+    overwrite_b : bool, optional
+        Whether to overwrite data in b (may improve performance)
+    check_finite : bool, optional
+        If true checks the elements of `A` and `B` are finite numbers. If
+        false does no checking and passes matrix through to
+        underlying algorithm.
+
+    Returns
+    -------
+    AA : (N, N) ndarray
+        Generalized Schur form of A.
+    BB : (N, N) ndarray
+        Generalized Schur form of B.
+    Q : (N, N) ndarray
+        The left Schur vectors.
+    Z : (N, N) ndarray
+        The right Schur vectors.
+
+    See Also
+    --------
+    ordqz
+
+    Notes
+    -----
+    Q is transposed versus the equivalent function in Matlab.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import qz
+
+    >>> A = np.array([[1, 2, -1], [5, 5, 5], [2, 4, -8]])
+    >>> B = np.array([[1, 1, -3], [3, 1, -1], [5, 6, -2]])
+
+    Compute the decomposition.  The QZ decomposition is not unique, so
+    depending on the underlying library that is used, there may be
+    differences in the signs of coefficients in the following output.
+
+    >>> AA, BB, Q, Z = qz(A, B)
+    >>> AA
+    array([[-1.36949157, -4.05459025,  7.44389431],
+           [ 0.        ,  7.65653432,  5.13476017],
+           [ 0.        , -0.65978437,  2.4186015 ]])  # may vary
+    >>> BB
+    array([[ 1.71890633, -1.64723705, -0.72696385],
+           [ 0.        ,  8.6965692 , -0.        ],
+           [ 0.        ,  0.        ,  2.27446233]])  # may vary
+    >>> Q
+    array([[-0.37048362,  0.1903278 ,  0.90912992],
+           [-0.90073232,  0.16534124, -0.40167593],
+           [ 0.22676676,  0.96769706, -0.11017818]])  # may vary
+    >>> Z
+    array([[-0.67660785,  0.63528924, -0.37230283],
+           [ 0.70243299,  0.70853819, -0.06753907],
+           [ 0.22088393, -0.30721526, -0.92565062]])  # may vary
+
+    Verify the QZ decomposition.  With real output, we only need the
+    transpose of ``Z`` in the following expressions.
+
+    >>> Q @ AA @ Z.T  # Should be A
+    array([[ 1.,  2., -1.],
+           [ 5.,  5.,  5.],
+           [ 2.,  4., -8.]])
+    >>> Q @ BB @ Z.T  # Should be B
+    array([[ 1.,  1., -3.],
+           [ 3.,  1., -1.],
+           [ 5.,  6., -2.]])
+
+    Repeat the decomposition, but with ``output='complex'``.
+
+    >>> AA, BB, Q, Z = qz(A, B, output='complex')
+
+    For conciseness in the output, we use ``np.set_printoptions()`` to set
+    the output precision of NumPy arrays to 3 and display tiny values as 0.
+
+    >>> np.set_printoptions(precision=3, suppress=True)
+    >>> AA
+    array([[-1.369+0.j   ,  2.248+4.237j,  4.861-5.022j],
+           [ 0.   +0.j   ,  7.037+2.922j,  0.794+4.932j],
+           [ 0.   +0.j   ,  0.   +0.j   ,  2.655-1.103j]])  # may vary
+    >>> BB
+    array([[ 1.719+0.j   , -1.115+1.j   , -0.763-0.646j],
+           [ 0.   +0.j   ,  7.24 +0.j   , -3.144+3.322j],
+           [ 0.   +0.j   ,  0.   +0.j   ,  2.732+0.j   ]])  # may vary
+    >>> Q
+    array([[ 0.326+0.175j, -0.273-0.029j, -0.886-0.052j],
+           [ 0.794+0.426j, -0.093+0.134j,  0.402-0.02j ],
+           [-0.2  -0.107j, -0.816+0.482j,  0.151-0.167j]])  # may vary
+    >>> Z
+    array([[ 0.596+0.32j , -0.31 +0.414j,  0.393-0.347j],
+           [-0.619-0.332j, -0.479+0.314j,  0.154-0.393j],
+           [-0.195-0.104j,  0.576+0.27j ,  0.715+0.187j]])  # may vary
+
+    With complex arrays, we must use ``Z.conj().T`` in the following
+    expressions to verify the decomposition.
+
+    >>> Q @ AA @ Z.conj().T  # Should be A
+    array([[ 1.-0.j,  2.-0.j, -1.-0.j],
+           [ 5.+0.j,  5.+0.j,  5.-0.j],
+           [ 2.+0.j,  4.+0.j, -8.+0.j]])
+    >>> Q @ BB @ Z.conj().T  # Should be B
+    array([[ 1.+0.j,  1.+0.j, -3.+0.j],
+           [ 3.-0.j,  1.-0.j, -1.+0.j],
+           [ 5.+0.j,  6.+0.j, -2.+0.j]])
+
+    """
+    # output for real
+    # AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info
+    # output for complex
+    # AA, BB, sdim, alpha, beta, vsl, vsr, work, info
+    result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort,
+                    overwrite_a=overwrite_a, overwrite_b=overwrite_b,
+                    check_finite=check_finite)
+    return result[0], result[1], result[-4], result[-3]
+
+
+def ordqz(A, B, sort='lhp', output='real', overwrite_a=False,
+          overwrite_b=False, check_finite=True):
+    """QZ decomposition for a pair of matrices with reordering.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        2-D array to decompose
+    B : (N, N) array_like
+        2-D array to decompose
+    sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
+        Specifies whether the upper eigenvalues should be sorted. A
+        callable may be passed that, given an ordered pair ``(alpha,
+        beta)`` representing the eigenvalue ``x = (alpha/beta)``,
+        returns a boolean denoting whether the eigenvalue should be
+        sorted to the top-left (True). For the real matrix pairs
+        ``beta`` is real while ``alpha`` can be complex, and for
+        complex matrix pairs both ``alpha`` and ``beta`` can be
+        complex. The callable must be able to accept a NumPy
+        array. Alternatively, string parameters may be used:
+
+            - 'lhp'   Left-hand plane (x.real < 0.0)
+            - 'rhp'   Right-hand plane (x.real > 0.0)
+            - 'iuc'   Inside the unit circle (x*x.conjugate() < 1.0)
+            - 'ouc'   Outside the unit circle (x*x.conjugate() > 1.0)
+
+        With the predefined sorting functions, an infinite eigenvalue
+        (i.e., ``alpha != 0`` and ``beta = 0``) is considered to lie in
+        neither the left-hand nor the right-hand plane, but it is
+        considered to lie outside the unit circle. For the eigenvalue
+        ``(alpha, beta) = (0, 0)``, the predefined sorting functions
+        all return `False`.
+    output : str {'real','complex'}, optional
+        Construct the real or complex QZ decomposition for real matrices.
+        Default is 'real'.
+    overwrite_a : bool, optional
+        If True, the contents of A are overwritten.
+    overwrite_b : bool, optional
+        If True, the contents of B are overwritten.
+    check_finite : bool, optional
+        If true checks the elements of `A` and `B` are finite numbers. If
+        false does no checking and passes matrix through to
+        underlying algorithm.
+
+    Returns
+    -------
+    AA : (N, N) ndarray
+        Generalized Schur form of A.
+    BB : (N, N) ndarray
+        Generalized Schur form of B.
+    alpha : (N,) ndarray
+        alpha = alphar + alphai * 1j. See notes.
+    beta : (N,) ndarray
+        See notes.
+    Q : (N, N) ndarray
+        The left Schur vectors.
+    Z : (N, N) ndarray
+        The right Schur vectors.
+
+    See Also
+    --------
+    qz
+
+    Notes
+    -----
+    On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the
+    generalized eigenvalues.  ``ALPHAR(j) + ALPHAI(j)*i`` and
+    ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T)
+    that would result if the 2-by-2 diagonal blocks of the real generalized
+    Schur form of (A,B) were further reduced to triangular form using complex
+    unitary transformations. If ALPHAI(j) is zero, then the jth eigenvalue is
+    real; if positive, then the ``j``th and ``(j+1)``st eigenvalues are a
+    complex conjugate pair, with ``ALPHAI(j+1)`` negative.
+
+    .. versionadded:: 0.17.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import ordqz
+    >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
+    >>> B = np.array([[0, 6, 0, 0], [5, 0, 2, 1], [5, 2, 6, 6], [4, 7, 7, 7]])
+    >>> AA, BB, alpha, beta, Q, Z = ordqz(A, B, sort='lhp')
+
+    Since we have sorted for left half plane eigenvalues, negatives come first
+
+    >>> (alpha/beta).real < 0
+    array([ True,  True, False, False], dtype=bool)
+
+    """
+    (AA, BB, _, *ab, Q, Z, _, _), typ = _qz(A, B, output=output, sort=None,
+                                            overwrite_a=overwrite_a,
+                                            overwrite_b=overwrite_b,
+                                            check_finite=check_finite)
+
+    if typ == 's':
+        alpha, beta = ab[0] + ab[1]*np.complex64(1j), ab[2]
+    elif typ == 'd':
+        alpha, beta = ab[0] + ab[1]*1.j, ab[2]
+    else:
+        alpha, beta = ab
+
+    sfunction = _select_function(sort)
+    select = sfunction(alpha, beta)
+
+    tgsen = get_lapack_funcs('tgsen', (AA, BB))
+    # the real case needs 4n + 16 lwork
+    lwork = 4*AA.shape[0] + 16 if typ in 'sd' else 1
+    AAA, BBB, *ab, QQ, ZZ, _, _, _, _, info = tgsen(select, AA, BB, Q, Z,
+                                                    ijob=0,
+                                                    lwork=lwork, liwork=1)
+
+    # Once more for tgsen output
+    if typ == 's':
+        alpha, beta = ab[0] + ab[1]*np.complex64(1j), ab[2]
+    elif typ == 'd':
+        alpha, beta = ab[0] + ab[1]*1.j, ab[2]
+    else:
+        alpha, beta = ab
+
+    if info < 0:
+        raise ValueError(f"Illegal value in argument {-info} of tgsen")
+    elif info == 1:
+        raise ValueError("Reordering of (A, B) failed because the transformed"
+                         " matrix pair (A, B) would be too far from "
+                         "generalized Schur form; the problem is very "
+                         "ill-conditioned. (A, B) may have been partially "
+                         "reordered.")
+
+    return AAA, BBB, alpha, beta, QQ, ZZ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_schur.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_schur.py
new file mode 100644
index 00000000..120429df
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_schur.py
@@ -0,0 +1,294 @@
+"""Schur decomposition functions."""
+import numpy
+from numpy import asarray_chkfinite, single, asarray, array
+from numpy.linalg import norm
+
+
+# Local imports.
+from ._misc import LinAlgError, _datacopied
+from .lapack import get_lapack_funcs
+from ._decomp import eigvals
+
+__all__ = ['schur', 'rsf2csf']
+
+_double_precision = ['i', 'l', 'd']
+
+
+def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
+          check_finite=True):
+    """
+    Compute Schur decomposition of a matrix.
+
+    The Schur decomposition is::
+
+        A = Z T Z^H
+
+    where Z is unitary and T is either upper-triangular, or for real
+    Schur decomposition (output='real'), quasi-upper triangular. In
+    the quasi-triangular form, 2x2 blocks describing complex-valued
+    eigenvalue pairs may extrude from the diagonal.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        Matrix to decompose
+    output : {'real', 'complex'}, optional
+        Construct the real or complex Schur decomposition (for real matrices).
+    lwork : int, optional
+        Work array size. If None or -1, it is automatically computed.
+    overwrite_a : bool, optional
+        Whether to overwrite data in a (may improve performance).
+    sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
+        Specifies whether the upper eigenvalues should be sorted. A callable
+        may be passed that, given a eigenvalue, returns a boolean denoting
+        whether the eigenvalue should be sorted to the top-left (True).
+        Alternatively, string parameters may be used::
+
+            'lhp'   Left-hand plane (x.real < 0.0)
+            'rhp'   Right-hand plane (x.real > 0.0)
+            'iuc'   Inside the unit circle (x*x.conjugate() <= 1.0)
+            'ouc'   Outside the unit circle (x*x.conjugate() > 1.0)
+
+        Defaults to None (no sorting).
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    T : (M, M) ndarray
+        Schur form of A. It is real-valued for the real Schur decomposition.
+    Z : (M, M) ndarray
+        An unitary Schur transformation matrix for A.
+        It is real-valued for the real Schur decomposition.
+    sdim : int
+        If and only if sorting was requested, a third return value will
+        contain the number of eigenvalues satisfying the sort condition.
+
+    Raises
+    ------
+    LinAlgError
+        Error raised under three conditions:
+
+        1. The algorithm failed due to a failure of the QR algorithm to
+           compute all eigenvalues.
+        2. If eigenvalue sorting was requested, the eigenvalues could not be
+           reordered due to a failure to separate eigenvalues, usually because
+           of poor conditioning.
+        3. If eigenvalue sorting was requested, roundoff errors caused the
+           leading eigenvalues to no longer satisfy the sorting condition.
+
+    See Also
+    --------
+    rsf2csf : Convert real Schur form to complex Schur form
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import schur, eigvals
+    >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
+    >>> T, Z = schur(A)
+    >>> T
+    array([[ 2.65896708,  1.42440458, -1.92933439],
+           [ 0.        , -0.32948354, -0.49063704],
+           [ 0.        ,  1.31178921, -0.32948354]])
+    >>> Z
+    array([[0.72711591, -0.60156188, 0.33079564],
+           [0.52839428, 0.79801892, 0.28976765],
+           [0.43829436, 0.03590414, -0.89811411]])
+
+    >>> T2, Z2 = schur(A, output='complex')
+    >>> T2
+    array([[ 2.65896708, -1.22839825+1.32378589j,  0.42590089+1.51937378j],
+           [ 0.        , -0.32948354+0.80225456j, -0.59877807+0.56192146j],
+           [ 0.        ,  0.                    , -0.32948354-0.80225456j]])
+    >>> eigvals(T2)
+    array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j])
+
+    An arbitrary custom eig-sorting condition, having positive imaginary part,
+    which is satisfied by only one eigenvalue
+
+    >>> T3, Z3, sdim = schur(A, output='complex', sort=lambda x: x.imag > 0)
+    >>> sdim
+    1
+
+    """
+    if output not in ['real', 'complex', 'r', 'c']:
+        raise ValueError("argument must be 'real', or 'complex'")
+    if check_finite:
+        a1 = asarray_chkfinite(a)
+    else:
+        a1 = asarray(a)
+    if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
+        raise ValueError('expected square matrix')
+    typ = a1.dtype.char
+    if output in ['complex', 'c'] and typ not in ['F', 'D']:
+        if typ in _double_precision:
+            a1 = a1.astype('D')
+            typ = 'D'
+        else:
+            a1 = a1.astype('F')
+            typ = 'F'
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+    gees, = get_lapack_funcs(('gees',), (a1,))
+    if lwork is None or lwork == -1:
+        # get optimal work array
+        result = gees(lambda x: None, a1, lwork=-1)
+        lwork = result[-2][0].real.astype(numpy.int_)
+
+    if sort is None:
+        sort_t = 0
+        sfunction = lambda x: None
+    else:
+        sort_t = 1
+        if callable(sort):
+            sfunction = sort
+        elif sort == 'lhp':
+            sfunction = lambda x: (x.real < 0.0)
+        elif sort == 'rhp':
+            sfunction = lambda x: (x.real >= 0.0)
+        elif sort == 'iuc':
+            sfunction = lambda x: (abs(x) <= 1.0)
+        elif sort == 'ouc':
+            sfunction = lambda x: (abs(x) > 1.0)
+        else:
+            raise ValueError("'sort' parameter must either be 'None', or a "
+                             "callable, or one of ('lhp','rhp','iuc','ouc')")
+
+    result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
+                  sort_t=sort_t)
+
+    info = result[-1]
+    if info < 0:
+        raise ValueError('illegal value in {}-th argument of internal gees'
+                         ''.format(-info))
+    elif info == a1.shape[0] + 1:
+        raise LinAlgError('Eigenvalues could not be separated for reordering.')
+    elif info == a1.shape[0] + 2:
+        raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
+    elif info > 0:
+        raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
+
+    if sort_t == 0:
+        return result[0], result[-3]
+    else:
+        return result[0], result[-3], result[1]
+
+
+eps = numpy.finfo(float).eps
+feps = numpy.finfo(single).eps
+
+_array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0,
+               'f': 0, 'd': 0, 'F': 1, 'D': 1}
+_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
+_array_type = [['f', 'd'], ['F', 'D']]
+
+
+def _commonType(*arrays):
+    kind = 0
+    precision = 0
+    for a in arrays:
+        t = a.dtype.char
+        kind = max(kind, _array_kind[t])
+        precision = max(precision, _array_precision[t])
+    return _array_type[kind][precision]
+
+
+def _castCopy(type, *arrays):
+    cast_arrays = ()
+    for a in arrays:
+        if a.dtype.char == type:
+            cast_arrays = cast_arrays + (a.copy(),)
+        else:
+            cast_arrays = cast_arrays + (a.astype(type),)
+    if len(cast_arrays) == 1:
+        return cast_arrays[0]
+    else:
+        return cast_arrays
+
+
+def rsf2csf(T, Z, check_finite=True):
+    """
+    Convert real Schur form to complex Schur form.
+
+    Convert a quasi-diagonal real-valued Schur form to the upper-triangular
+    complex-valued Schur form.
+
+    Parameters
+    ----------
+    T : (M, M) array_like
+        Real Schur form of the original array
+    Z : (M, M) array_like
+        Schur transformation matrix
+    check_finite : bool, optional
+        Whether to check that the input arrays contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    T : (M, M) ndarray
+        Complex Schur form of the original array
+    Z : (M, M) ndarray
+        Schur transformation matrix corresponding to the complex form
+
+    See Also
+    --------
+    schur : Schur decomposition of an array
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import schur, rsf2csf
+    >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
+    >>> T, Z = schur(A)
+    >>> T
+    array([[ 2.65896708,  1.42440458, -1.92933439],
+           [ 0.        , -0.32948354, -0.49063704],
+           [ 0.        ,  1.31178921, -0.32948354]])
+    >>> Z
+    array([[0.72711591, -0.60156188, 0.33079564],
+           [0.52839428, 0.79801892, 0.28976765],
+           [0.43829436, 0.03590414, -0.89811411]])
+    >>> T2 , Z2 = rsf2csf(T, Z)
+    >>> T2
+    array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j],
+           [0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j],
+           [0.+0.j , 0.+0.j, -0.32948354-0.802254558j]])
+    >>> Z2
+    array([[0.72711591+0.j,  0.28220393-0.31385693j,  0.51319638-0.17258824j],
+           [0.52839428+0.j,  0.24720268+0.41635578j, -0.68079517-0.15118243j],
+           [0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]])
+
+    """
+    if check_finite:
+        Z, T = map(asarray_chkfinite, (Z, T))
+    else:
+        Z, T = map(asarray, (Z, T))
+
+    for ind, X in enumerate([Z, T]):
+        if X.ndim != 2 or X.shape[0] != X.shape[1]:
+            raise ValueError("Input '{}' must be square.".format('ZT'[ind]))
+
+    if T.shape[0] != Z.shape[0]:
+        raise ValueError("Input array shapes must match: Z: {} vs. T: {}"
+                         "".format(Z.shape, T.shape))
+    N = T.shape[0]
+    t = _commonType(Z, T, array([3.0], 'F'))
+    Z, T = _castCopy(t, Z, T)
+
+    for m in range(N-1, 0, -1):
+        if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])):
+            mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m]
+            r = norm([mu[0], T[m, m-1]])
+            c = mu[0] / r
+            s = T[m, m-1] / r
+            G = array([[c.conj(), s], [-s, c]], dtype=t)
+
+            T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:])
+            T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T)
+            Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T)
+
+        T[m, m-1] = 0.0
+    return T, Z
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_svd.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_svd.py
new file mode 100644
index 00000000..8072ba10
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_decomp_svd.py
@@ -0,0 +1,503 @@
+"""SVD decomposition functions."""
+import numpy
+from numpy import zeros, r_, diag, dot, arccos, arcsin, where, clip
+
+# Local imports.
+from ._misc import LinAlgError, _datacopied
+from .lapack import get_lapack_funcs, _compute_lwork
+from ._decomp import _asarray_validated
+
+__all__ = ['svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space']
+
+
+def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False,
+        check_finite=True, lapack_driver='gesdd'):
+    """
+    Singular Value Decomposition.
+
+    Factorizes the matrix `a` into two unitary matrices ``U`` and ``Vh``, and
+    a 1-D array ``s`` of singular values (real, non-negative) such that
+    ``a == U @ S @ Vh``, where ``S`` is a suitably shaped matrix of zeros with
+    main diagonal ``s``.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Matrix to decompose.
+    full_matrices : bool, optional
+        If True (default), `U` and `Vh` are of shape ``(M, M)``, ``(N, N)``.
+        If False, the shapes are ``(M, K)`` and ``(K, N)``, where
+        ``K = min(M, N)``.
+    compute_uv : bool, optional
+        Whether to compute also ``U`` and ``Vh`` in addition to ``s``.
+        Default is True.
+    overwrite_a : bool, optional
+        Whether to overwrite `a`; may improve performance.
+        Default is False.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+    lapack_driver : {'gesdd', 'gesvd'}, optional
+        Whether to use the more efficient divide-and-conquer approach
+        (``'gesdd'``) or general rectangular approach (``'gesvd'``)
+        to compute the SVD. MATLAB and Octave use the ``'gesvd'`` approach.
+        Default is ``'gesdd'``.
+
+        .. versionadded:: 0.18
+
+    Returns
+    -------
+    U : ndarray
+        Unitary matrix having left singular vectors as columns.
+        Of shape ``(M, M)`` or ``(M, K)``, depending on `full_matrices`.
+    s : ndarray
+        The singular values, sorted in non-increasing order.
+        Of shape (K,), with ``K = min(M, N)``.
+    Vh : ndarray
+        Unitary matrix having right singular vectors as rows.
+        Of shape ``(N, N)`` or ``(K, N)`` depending on `full_matrices`.
+
+    For ``compute_uv=False``, only ``s`` is returned.
+
+    Raises
+    ------
+    LinAlgError
+        If SVD computation does not converge.
+
+    See Also
+    --------
+    svdvals : Compute singular values of a matrix.
+    diagsvd : Construct the Sigma matrix, given the vector s.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> rng = np.random.default_rng()
+    >>> m, n = 9, 6
+    >>> a = rng.standard_normal((m, n)) + 1.j*rng.standard_normal((m, n))
+    >>> U, s, Vh = linalg.svd(a)
+    >>> U.shape,  s.shape, Vh.shape
+    ((9, 9), (6,), (6, 6))
+
+    Reconstruct the original matrix from the decomposition:
+
+    >>> sigma = np.zeros((m, n))
+    >>> for i in range(min(m, n)):
+    ...     sigma[i, i] = s[i]
+    >>> a1 = np.dot(U, np.dot(sigma, Vh))
+    >>> np.allclose(a, a1)
+    True
+
+    Alternatively, use ``full_matrices=False`` (notice that the shape of
+    ``U`` is then ``(m, n)`` instead of ``(m, m)``):
+
+    >>> U, s, Vh = linalg.svd(a, full_matrices=False)
+    >>> U.shape, s.shape, Vh.shape
+    ((9, 6), (6,), (6, 6))
+    >>> S = np.diag(s)
+    >>> np.allclose(a, np.dot(U, np.dot(S, Vh)))
+    True
+
+    >>> s2 = linalg.svd(a, compute_uv=False)
+    >>> np.allclose(s, s2)
+    True
+
+    """
+    a1 = _asarray_validated(a, check_finite=check_finite)
+    if len(a1.shape) != 2:
+        raise ValueError('expected matrix')
+    m, n = a1.shape
+    overwrite_a = overwrite_a or (_datacopied(a1, a))
+
+    if not isinstance(lapack_driver, str):
+        raise TypeError('lapack_driver must be a string')
+    if lapack_driver not in ('gesdd', 'gesvd'):
+        raise ValueError('lapack_driver must be "gesdd" or "gesvd", not "%s"'
+                         % (lapack_driver,))
+    funcs = (lapack_driver, lapack_driver + '_lwork')
+    gesXd, gesXd_lwork = get_lapack_funcs(funcs, (a1,), ilp64='preferred')
+
+    # compute optimal lwork
+    lwork = _compute_lwork(gesXd_lwork, a1.shape[0], a1.shape[1],
+                           compute_uv=compute_uv, full_matrices=full_matrices)
+
+    # perform decomposition
+    u, s, v, info = gesXd(a1, compute_uv=compute_uv, lwork=lwork,
+                          full_matrices=full_matrices, overwrite_a=overwrite_a)
+
+    if info > 0:
+        raise LinAlgError("SVD did not converge")
+    if info < 0:
+        raise ValueError('illegal value in %dth argument of internal gesdd'
+                         % -info)
+    if compute_uv:
+        return u, s, v
+    else:
+        return s
+
+
+def svdvals(a, overwrite_a=False, check_finite=True):
+    """
+    Compute singular values of a matrix.
+
+    Parameters
+    ----------
+    a : (M, N) array_like
+        Matrix to decompose.
+    overwrite_a : bool, optional
+        Whether to overwrite `a`; may improve performance.
+        Default is False.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    s : (min(M, N),) ndarray
+        The singular values, sorted in decreasing order.
+
+    Raises
+    ------
+    LinAlgError
+        If SVD computation does not converge.
+
+    See Also
+    --------
+    svd : Compute the full singular value decomposition of a matrix.
+    diagsvd : Construct the Sigma matrix, given the vector s.
+
+    Notes
+    -----
+    ``svdvals(a)`` only differs from ``svd(a, compute_uv=False)`` by its
+    handling of the edge case of empty ``a``, where it returns an
+    empty sequence:
+
+    >>> import numpy as np
+    >>> a = np.empty((0, 2))
+    >>> from scipy.linalg import svdvals
+    >>> svdvals(a)
+    array([], dtype=float64)
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import svdvals
+    >>> m = np.array([[1.0, 0.0],
+    ...               [2.0, 3.0],
+    ...               [1.0, 1.0],
+    ...               [0.0, 2.0],
+    ...               [1.0, 0.0]])
+    >>> svdvals(m)
+    array([ 4.28091555,  1.63516424])
+
+    We can verify the maximum singular value of `m` by computing the maximum
+    length of `m.dot(u)` over all the unit vectors `u` in the (x,y) plane.
+    We approximate "all" the unit vectors with a large sample. Because
+    of linearity, we only need the unit vectors with angles in [0, pi].
+
+    >>> t = np.linspace(0, np.pi, 2000)
+    >>> u = np.array([np.cos(t), np.sin(t)])
+    >>> np.linalg.norm(m.dot(u), axis=0).max()
+    4.2809152422538475
+
+    `p` is a projection matrix with rank 1. With exact arithmetic,
+    its singular values would be [1, 0, 0, 0].
+
+    >>> v = np.array([0.1, 0.3, 0.9, 0.3])
+    >>> p = np.outer(v, v)
+    >>> svdvals(p)
+    array([  1.00000000e+00,   2.02021698e-17,   1.56692500e-17,
+             8.15115104e-34])
+
+    The singular values of an orthogonal matrix are all 1. Here, we
+    create a random orthogonal matrix by using the `rvs()` method of
+    `scipy.stats.ortho_group`.
+
+    >>> from scipy.stats import ortho_group
+    >>> orth = ortho_group.rvs(4)
+    >>> svdvals(orth)
+    array([ 1.,  1.,  1.,  1.])
+
+    """
+    a = _asarray_validated(a, check_finite=check_finite)
+    if a.size:
+        return svd(a, compute_uv=0, overwrite_a=overwrite_a,
+                   check_finite=False)
+    elif len(a.shape) != 2:
+        raise ValueError('expected matrix')
+    else:
+        return numpy.empty(0)
+
+
+def diagsvd(s, M, N):
+    """
+    Construct the sigma matrix in SVD from singular values and size M, N.
+
+    Parameters
+    ----------
+    s : (M,) or (N,) array_like
+        Singular values
+    M : int
+        Size of the matrix whose singular values are `s`.
+    N : int
+        Size of the matrix whose singular values are `s`.
+
+    Returns
+    -------
+    S : (M, N) ndarray
+        The S-matrix in the singular value decomposition
+
+    See Also
+    --------
+    svd : Singular value decomposition of a matrix
+    svdvals : Compute singular values of a matrix.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import diagsvd
+    >>> vals = np.array([1, 2, 3])  # The array representing the computed svd
+    >>> diagsvd(vals, 3, 4)
+    array([[1, 0, 0, 0],
+           [0, 2, 0, 0],
+           [0, 0, 3, 0]])
+    >>> diagsvd(vals, 4, 3)
+    array([[1, 0, 0],
+           [0, 2, 0],
+           [0, 0, 3],
+           [0, 0, 0]])
+
+    """
+    part = diag(s)
+    typ = part.dtype.char
+    MorN = len(s)
+    if MorN == M:
+        return r_['-1', part, zeros((M, N-M), typ)]
+    elif MorN == N:
+        return r_[part, zeros((M-N, N), typ)]
+    else:
+        raise ValueError("Length of s must be M or N.")
+
+
+# Orthonormal decomposition
+
+def orth(A, rcond=None):
+    """
+    Construct an orthonormal basis for the range of A using SVD
+
+    Parameters
+    ----------
+    A : (M, N) array_like
+        Input array
+    rcond : float, optional
+        Relative condition number. Singular values ``s`` smaller than
+        ``rcond * max(s)`` are considered zero.
+        Default: floating point eps * max(M,N).
+
+    Returns
+    -------
+    Q : (M, K) ndarray
+        Orthonormal basis for the range of A.
+        K = effective rank of A, as determined by rcond
+
+    See Also
+    --------
+    svd : Singular value decomposition of a matrix
+    null_space : Matrix null space
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import orth
+    >>> A = np.array([[2, 0, 0], [0, 5, 0]])  # rank 2 array
+    >>> orth(A)
+    array([[0., 1.],
+           [1., 0.]])
+    >>> orth(A.T)
+    array([[0., 1.],
+           [1., 0.],
+           [0., 0.]])
+
+    """
+    u, s, vh = svd(A, full_matrices=False)
+    M, N = u.shape[0], vh.shape[1]
+    if rcond is None:
+        rcond = numpy.finfo(s.dtype).eps * max(M, N)
+    tol = numpy.amax(s) * rcond
+    num = numpy.sum(s > tol, dtype=int)
+    Q = u[:, :num]
+    return Q
+
+
+def null_space(A, rcond=None):
+    """
+    Construct an orthonormal basis for the null space of A using SVD
+
+    Parameters
+    ----------
+    A : (M, N) array_like
+        Input array
+    rcond : float, optional
+        Relative condition number. Singular values ``s`` smaller than
+        ``rcond * max(s)`` are considered zero.
+        Default: floating point eps * max(M,N).
+
+    Returns
+    -------
+    Z : (N, K) ndarray
+        Orthonormal basis for the null space of A.
+        K = dimension of effective null space, as determined by rcond
+
+    See Also
+    --------
+    svd : Singular value decomposition of a matrix
+    orth : Matrix range
+
+    Examples
+    --------
+    1-D null space:
+
+    >>> import numpy as np
+    >>> from scipy.linalg import null_space
+    >>> A = np.array([[1, 1], [1, 1]])
+    >>> ns = null_space(A)
+    >>> ns * np.sign(ns[0,0])  # Remove the sign ambiguity of the vector
+    array([[ 0.70710678],
+           [-0.70710678]])
+
+    2-D null space:
+
+    >>> from numpy.random import default_rng
+    >>> rng = default_rng()
+    >>> B = rng.random((3, 5))
+    >>> Z = null_space(B)
+    >>> Z.shape
+    (5, 2)
+    >>> np.allclose(B.dot(Z), 0)
+    True
+
+    The basis vectors are orthonormal (up to rounding error):
+
+    >>> Z.T.dot(Z)
+    array([[  1.00000000e+00,   6.92087741e-17],
+           [  6.92087741e-17,   1.00000000e+00]])
+
+    """
+    u, s, vh = svd(A, full_matrices=True)
+    M, N = u.shape[0], vh.shape[1]
+    if rcond is None:
+        rcond = numpy.finfo(s.dtype).eps * max(M, N)
+    tol = numpy.amax(s) * rcond
+    num = numpy.sum(s > tol, dtype=int)
+    Q = vh[num:,:].T.conj()
+    return Q
+
+
+def subspace_angles(A, B):
+    r"""
+    Compute the subspace angles between two matrices.
+
+    Parameters
+    ----------
+    A : (M, N) array_like
+        The first input array.
+    B : (M, K) array_like
+        The second input array.
+
+    Returns
+    -------
+    angles : ndarray, shape (min(N, K),)
+        The subspace angles between the column spaces of `A` and `B` in
+        descending order.
+
+    See Also
+    --------
+    orth
+    svd
+
+    Notes
+    -----
+    This computes the subspace angles according to the formula
+    provided in [1]_. For equivalence with MATLAB and Octave behavior,
+    use ``angles[0]``.
+
+    .. versionadded:: 1.0
+
+    References
+    ----------
+    .. [1] Knyazev A, Argentati M (2002) Principal Angles between Subspaces
+           in an A-Based Scalar Product: Algorithms and Perturbation
+           Estimates. SIAM J. Sci. Comput. 23:2008-2040.
+
+    Examples
+    --------
+    An Hadamard matrix, which has orthogonal columns, so we expect that
+    the suspace angle to be :math:`\frac{\pi}{2}`:
+
+    >>> import numpy as np
+    >>> from scipy.linalg import hadamard, subspace_angles
+    >>> rng = np.random.default_rng()
+    >>> H = hadamard(4)
+    >>> print(H)
+    [[ 1  1  1  1]
+     [ 1 -1  1 -1]
+     [ 1  1 -1 -1]
+     [ 1 -1 -1  1]]
+    >>> np.rad2deg(subspace_angles(H[:, :2], H[:, 2:]))
+    array([ 90.,  90.])
+
+    And the subspace angle of a matrix to itself should be zero:
+
+    >>> subspace_angles(H[:, :2], H[:, :2]) <= 2 * np.finfo(float).eps
+    array([ True,  True], dtype=bool)
+
+    The angles between non-orthogonal subspaces are in between these extremes:
+
+    >>> x = rng.standard_normal((4, 3))
+    >>> np.rad2deg(subspace_angles(x[:, :2], x[:, [2]]))
+    array([ 55.832])  # random
+    """
+    # Steps here omit the U and V calculation steps from the paper
+
+    # 1. Compute orthonormal bases of column-spaces
+    A = _asarray_validated(A, check_finite=True)
+    if len(A.shape) != 2:
+        raise ValueError('expected 2D array, got shape %s' % (A.shape,))
+    QA = orth(A)
+    del A
+
+    B = _asarray_validated(B, check_finite=True)
+    if len(B.shape) != 2:
+        raise ValueError('expected 2D array, got shape %s' % (B.shape,))
+    if len(B) != len(QA):
+        raise ValueError('A and B must have the same number of rows, got '
+                         '%s and %s' % (QA.shape[0], B.shape[0]))
+    QB = orth(B)
+    del B
+
+    # 2. Compute SVD for cosine
+    QA_H_QB = dot(QA.T.conj(), QB)
+    sigma = svdvals(QA_H_QB)
+
+    # 3. Compute matrix B
+    if QA.shape[1] >= QB.shape[1]:
+        B = QB - dot(QA, QA_H_QB)
+    else:
+        B = QA - dot(QB, QA_H_QB.T.conj())
+    del QA, QB, QA_H_QB
+
+    # 4. Compute SVD for sine
+    mask = sigma ** 2 >= 0.5
+    if mask.any():
+        mu_arcsin = arcsin(clip(svdvals(B, overwrite_a=True), -1., 1.))
+    else:
+        mu_arcsin = 0.
+
+    # 5. Compute the principal angles
+    # with reverse ordering of sigma because smallest sigma belongs to largest
+    # angle theta
+    theta = where(mask, mu_arcsin, arccos(clip(sigma[::-1], -1., 1.)))
+    return theta
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_expm_frechet.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_expm_frechet.py
new file mode 100644
index 00000000..d46fd90c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_expm_frechet.py
@@ -0,0 +1,413 @@
+"""Frechet derivative of the matrix exponential."""
+import numpy as np
+import scipy.linalg
+
+__all__ = ['expm_frechet', 'expm_cond']
+
+
+def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):
+    """
+    Frechet derivative of the matrix exponential of A in the direction E.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Matrix of which to take the matrix exponential.
+    E : (N, N) array_like
+        Matrix direction in which to take the Frechet derivative.
+    method : str, optional
+        Choice of algorithm. Should be one of
+
+        - `SPS` (default)
+        - `blockEnlarge`
+
+    compute_expm : bool, optional
+        Whether to compute also `expm_A` in addition to `expm_frechet_AE`.
+        Default is True.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    expm_A : ndarray
+        Matrix exponential of A.
+    expm_frechet_AE : ndarray
+        Frechet derivative of the matrix exponential of A in the direction E.
+    For ``compute_expm = False``, only `expm_frechet_AE` is returned.
+
+    See Also
+    --------
+    expm : Compute the exponential of a matrix.
+
+    Notes
+    -----
+    This section describes the available implementations that can be selected
+    by the `method` parameter. The default method is *SPS*.
+
+    Method *blockEnlarge* is a naive algorithm.
+
+    Method *SPS* is Scaling-Pade-Squaring [1]_.
+    It is a sophisticated implementation which should take
+    only about 3/8 as much time as the naive implementation.
+    The asymptotics are the same.
+
+    .. versionadded:: 0.13.0
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
+           Computing the Frechet Derivative of the Matrix Exponential,
+           with an application to Condition Number Estimation.
+           SIAM Journal On Matrix Analysis and Applications.,
+           30 (4). pp. 1639-1657. ISSN 1095-7162
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> rng = np.random.default_rng()
+
+    >>> A = rng.standard_normal((3, 3))
+    >>> E = rng.standard_normal((3, 3))
+    >>> expm_A, expm_frechet_AE = linalg.expm_frechet(A, E)
+    >>> expm_A.shape, expm_frechet_AE.shape
+    ((3, 3), (3, 3))
+
+    Create a 6x6 matrix containg [[A, E], [0, A]]:
+
+    >>> M = np.zeros((6, 6))
+    >>> M[:3, :3] = A
+    >>> M[:3, 3:] = E
+    >>> M[3:, 3:] = A
+
+    >>> expm_M = linalg.expm(M)
+    >>> np.allclose(expm_A, expm_M[:3, :3])
+    True
+    >>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])
+    True
+
+    """
+    if check_finite:
+        A = np.asarray_chkfinite(A)
+        E = np.asarray_chkfinite(E)
+    else:
+        A = np.asarray(A)
+        E = np.asarray(E)
+    if A.ndim != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected A to be a square matrix')
+    if E.ndim != 2 or E.shape[0] != E.shape[1]:
+        raise ValueError('expected E to be a square matrix')
+    if A.shape != E.shape:
+        raise ValueError('expected A and E to be the same shape')
+    if method is None:
+        method = 'SPS'
+    if method == 'SPS':
+        expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)
+    elif method == 'blockEnlarge':
+        expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)
+    else:
+        raise ValueError('Unknown implementation %s' % method)
+    if compute_expm:
+        return expm_A, expm_frechet_AE
+    else:
+        return expm_frechet_AE
+
+
+def expm_frechet_block_enlarge(A, E):
+    """
+    This is a helper function, mostly for testing and profiling.
+    Return expm(A), frechet(A, E)
+    """
+    n = A.shape[0]
+    M = np.vstack([
+        np.hstack([A, E]),
+        np.hstack([np.zeros_like(A), A])])
+    expm_M = scipy.linalg.expm(M)
+    return expm_M[:n, :n], expm_M[:n, n:]
+
+
+"""
+Maximal values ell_m of ||2**-s A|| such that the backward error bound
+does not exceed 2**-53.
+"""
+ell_table_61 = (
+        None,
+        # 1
+        2.11e-8,
+        3.56e-4,
+        1.08e-2,
+        6.49e-2,
+        2.00e-1,
+        4.37e-1,
+        7.83e-1,
+        1.23e0,
+        1.78e0,
+        2.42e0,
+        # 11
+        3.13e0,
+        3.90e0,
+        4.74e0,
+        5.63e0,
+        6.56e0,
+        7.52e0,
+        8.53e0,
+        9.56e0,
+        1.06e1,
+        1.17e1,
+        )
+
+
+# The b vectors and U and V are copypasted
+# from scipy.sparse.linalg.matfuncs.py.
+# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)
+
+def _diff_pade3(A, E, ident):
+    b = (120., 60., 12., 1.)
+    A2 = A.dot(A)
+    M2 = np.dot(A, E) + np.dot(E, A)
+    U = A.dot(b[3]*A2 + b[1]*ident)
+    V = b[2]*A2 + b[0]*ident
+    Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)
+    Lv = b[2]*M2
+    return U, V, Lu, Lv
+
+
+def _diff_pade5(A, E, ident):
+    b = (30240., 15120., 3360., 420., 30., 1.)
+    A2 = A.dot(A)
+    M2 = np.dot(A, E) + np.dot(E, A)
+    A4 = np.dot(A2, A2)
+    M4 = np.dot(A2, M2) + np.dot(M2, A2)
+    U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)
+    V = b[4]*A4 + b[2]*A2 + b[0]*ident
+    Lu = (A.dot(b[5]*M4 + b[3]*M2) +
+            E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))
+    Lv = b[4]*M4 + b[2]*M2
+    return U, V, Lu, Lv
+
+
+def _diff_pade7(A, E, ident):
+    b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
+    A2 = A.dot(A)
+    M2 = np.dot(A, E) + np.dot(E, A)
+    A4 = np.dot(A2, A2)
+    M4 = np.dot(A2, M2) + np.dot(M2, A2)
+    A6 = np.dot(A2, A4)
+    M6 = np.dot(A4, M2) + np.dot(M4, A2)
+    U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
+    V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
+    Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +
+            E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
+    Lv = b[6]*M6 + b[4]*M4 + b[2]*M2
+    return U, V, Lu, Lv
+
+
+def _diff_pade9(A, E, ident):
+    b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
+            2162160., 110880., 3960., 90., 1.)
+    A2 = A.dot(A)
+    M2 = np.dot(A, E) + np.dot(E, A)
+    A4 = np.dot(A2, A2)
+    M4 = np.dot(A2, M2) + np.dot(M2, A2)
+    A6 = np.dot(A2, A4)
+    M6 = np.dot(A4, M2) + np.dot(M4, A2)
+    A8 = np.dot(A4, A4)
+    M8 = np.dot(A4, M4) + np.dot(M4, A4)
+    U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
+    V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
+    Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +
+            E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
+    Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2
+    return U, V, Lu, Lv
+
+
+def expm_frechet_algo_64(A, E):
+    n = A.shape[0]
+    s = None
+    ident = np.identity(n)
+    A_norm_1 = scipy.linalg.norm(A, 1)
+    m_pade_pairs = (
+            (3, _diff_pade3),
+            (5, _diff_pade5),
+            (7, _diff_pade7),
+            (9, _diff_pade9))
+    for m, pade in m_pade_pairs:
+        if A_norm_1 <= ell_table_61[m]:
+            U, V, Lu, Lv = pade(A, E, ident)
+            s = 0
+            break
+    if s is None:
+        # scaling
+        s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))
+        A = A * 2.0**-s
+        E = E * 2.0**-s
+        # pade order 13
+        A2 = np.dot(A, A)
+        M2 = np.dot(A, E) + np.dot(E, A)
+        A4 = np.dot(A2, A2)
+        M4 = np.dot(A2, M2) + np.dot(M2, A2)
+        A6 = np.dot(A2, A4)
+        M6 = np.dot(A4, M2) + np.dot(M4, A2)
+        b = (64764752532480000., 32382376266240000., 7771770303897600.,
+                1187353796428800., 129060195264000., 10559470521600.,
+                670442572800., 33522128640., 1323241920., 40840800., 960960.,
+                16380., 182., 1.)
+        W1 = b[13]*A6 + b[11]*A4 + b[9]*A2
+        W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident
+        Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2
+        Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
+        W = np.dot(A6, W1) + W2
+        U = np.dot(A, W)
+        V = np.dot(A6, Z1) + Z2
+        Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2
+        Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2
+        Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2
+        Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2
+        Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2
+        Lu = np.dot(A, Lw) + np.dot(E, W)
+        Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2
+    # factor once and solve twice
+    lu_piv = scipy.linalg.lu_factor(-U + V)
+    R = scipy.linalg.lu_solve(lu_piv, U + V)
+    L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))
+    # squaring
+    for k in range(s):
+        L = np.dot(R, L) + np.dot(L, R)
+        R = np.dot(R, R)
+    return R, L
+
+
+def vec(M):
+    """
+    Stack columns of M to construct a single vector.
+
+    This is somewhat standard notation in linear algebra.
+
+    Parameters
+    ----------
+    M : 2-D array_like
+        Input matrix
+
+    Returns
+    -------
+    v : 1-D ndarray
+        Output vector
+
+    """
+    return M.T.ravel()
+
+
+def expm_frechet_kronform(A, method=None, check_finite=True):
+    """
+    Construct the Kronecker form of the Frechet derivative of expm.
+
+    Parameters
+    ----------
+    A : array_like with shape (N, N)
+        Matrix to be expm'd.
+    method : str, optional
+        Extra keyword to be passed to expm_frechet.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    K : 2-D ndarray with shape (N*N, N*N)
+        Kronecker form of the Frechet derivative of the matrix exponential.
+
+    Notes
+    -----
+    This function is used to help compute the condition number
+    of the matrix exponential.
+
+    See Also
+    --------
+    expm : Compute a matrix exponential.
+    expm_frechet : Compute the Frechet derivative of the matrix exponential.
+    expm_cond : Compute the relative condition number of the matrix exponential
+                in the Frobenius norm.
+
+    """
+    if check_finite:
+        A = np.asarray_chkfinite(A)
+    else:
+        A = np.asarray(A)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected a square matrix')
+
+    n = A.shape[0]
+    ident = np.identity(n)
+    cols = []
+    for i in range(n):
+        for j in range(n):
+            E = np.outer(ident[i], ident[j])
+            F = expm_frechet(A, E,
+                    method=method, compute_expm=False, check_finite=False)
+            cols.append(vec(F))
+    return np.vstack(cols).T
+
+
+def expm_cond(A, check_finite=True):
+    """
+    Relative condition number of the matrix exponential in the Frobenius norm.
+
+    Parameters
+    ----------
+    A : 2-D array_like
+        Square input matrix with shape (N, N).
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    kappa : float
+        The relative condition number of the matrix exponential
+        in the Frobenius norm
+
+    See Also
+    --------
+    expm : Compute the exponential of a matrix.
+    expm_frechet : Compute the Frechet derivative of the matrix exponential.
+
+    Notes
+    -----
+    A faster estimate for the condition number in the 1-norm
+    has been published but is not yet implemented in SciPy.
+
+    .. versionadded:: 0.14.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import expm_cond
+    >>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])
+    >>> k = expm_cond(A)
+    >>> k
+    1.7787805864469866
+
+    """
+    if check_finite:
+        A = np.asarray_chkfinite(A)
+    else:
+        A = np.asarray(A)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected a square matrix')
+
+    X = scipy.linalg.expm(A)
+    K = expm_frechet_kronform(A, check_finite=False)
+
+    # The following norm choices are deliberate.
+    # The norms of A and X are Frobenius norms,
+    # and the norm of K is the induced 2-norm.
+    A_norm = scipy.linalg.norm(A, 'fro')
+    X_norm = scipy.linalg.norm(X, 'fro')
+    K_norm = scipy.linalg.norm(K, 2)
+
+    kappa = (K_norm * A_norm) / X_norm
+    return kappa
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_flinalg_py.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_flinalg_py.py
new file mode 100644
index 00000000..0191dd21
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_flinalg_py.py
@@ -0,0 +1,56 @@
+#
+# Author: Pearu Peterson, March 2002
+#
+
+__all__ = ['get_flinalg_funcs']
+
+# The following ensures that possibly missing flavor (C or Fortran) is
+# replaced with the available one. If none is available, exception
+# is raised at the first attempt to use the resources.
+try:
+    from . import _flinalg
+except ImportError:
+    _flinalg = None
+#    from numpy.distutils.misc_util import PostponedException
+#    _flinalg = PostponedException()
+#    print _flinalg.__doc__
+    has_column_major_storage = lambda a:0
+
+
+def has_column_major_storage(arr):
+    return arr.flags['FORTRAN']
+
+
+_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'}  # 'd' will be default for 'i',..
+
+
+def get_flinalg_funcs(names,arrays=(),debug=0):
+    """Return optimal available _flinalg function objects with
+    names. Arrays are used to determine optimal prefix."""
+    ordering = []
+    for i, ar in enumerate(arrays):
+        t = ar.dtype.char
+        if t not in _type_conv:
+            t = 'd'
+        ordering.append((t,i))
+    if ordering:
+        ordering.sort()
+        required_prefix = _type_conv[ordering[0][0]]
+    else:
+        required_prefix = 'd'
+    # Some routines may require special treatment.
+    # Handle them here before the default lookup.
+
+    # Default lookup:
+    if ordering and has_column_major_storage(arrays[ordering[0][1]]):
+        suffix1,suffix2 = '_c','_r'
+    else:
+        suffix1,suffix2 = '_r','_c'
+
+    funcs = []
+    for name in names:
+        func_name = required_prefix + name
+        func = getattr(_flinalg,func_name+suffix1,
+                       getattr(_flinalg,func_name+suffix2,None))
+        funcs.append(func)
+    return tuple(funcs)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_interpolative_backend.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_interpolative_backend.py
new file mode 100644
index 00000000..7835314f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_interpolative_backend.py
@@ -0,0 +1,1681 @@
+#******************************************************************************
+#   Copyright (C) 2013 Kenneth L. Ho
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions are met:
+#
+#   Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer. Redistributions in binary
+#   form must reproduce the above copyright notice, this list of conditions and
+#   the following disclaimer in the documentation and/or other materials
+#   provided with the distribution.
+#
+#   None of the names of the copyright holders may be used to endorse or
+#   promote products derived from this software without specific prior written
+#   permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+#   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+#   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+#   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+#   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+#   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+#   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+#   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+#   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+#   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+#   POSSIBILITY OF SUCH DAMAGE.
+#******************************************************************************
+
+"""
+Direct wrappers for Fortran `id_dist` backend.
+"""
+
+import scipy.linalg._interpolative as _id
+import numpy as np
+
+_RETCODE_ERROR = RuntimeError("nonzero return code")
+
+
+def _asfortranarray_copy(A):
+    """
+    Same as np.asfortranarray, but ensure a copy
+    """
+    A = np.asarray(A)
+    if A.flags.f_contiguous:
+        A = A.copy(order="F")
+    else:
+        A = np.asfortranarray(A)
+    return A
+
+
+#------------------------------------------------------------------------------
+# id_rand.f
+#------------------------------------------------------------------------------
+
+def id_srand(n):
+    """
+    Generate standard uniform pseudorandom numbers via a very efficient lagged
+    Fibonacci method.
+
+    :param n:
+        Number of pseudorandom numbers to generate.
+    :type n: int
+
+    :return:
+        Pseudorandom numbers.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.id_srand(n)
+
+
+def id_srandi(t):
+    """
+    Initialize seed values for :func:`id_srand` (any appropriately random
+    numbers will do).
+
+    :param t:
+        Array of 55 seed values.
+    :type t: :class:`numpy.ndarray`
+    """
+    t = np.asfortranarray(t)
+    _id.id_srandi(t)
+
+
+def id_srando():
+    """
+    Reset seed values to their original values.
+    """
+    _id.id_srando()
+
+
+#------------------------------------------------------------------------------
+# idd_frm.f
+#------------------------------------------------------------------------------
+
+def idd_frm(n, w, x):
+    """
+    Transform real vector via a composition of Rokhlin's random transform,
+    random subselection, and an FFT.
+
+    In contrast to :func:`idd_sfrm`, this routine works best when the length of
+    the transformed vector is the power-of-two integer output by
+    :func:`idd_frmi`, or when the length is not specified but instead
+    determined a posteriori from the output. The returned transformed vector is
+    randomly permuted.
+
+    :param n:
+        Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+        :func:`idd_frmi`; `n` is also the length of the output vector.
+    :type n: int
+    :param w:
+        Initialization array constructed by :func:`idd_frmi`.
+    :type w: :class:`numpy.ndarray`
+    :param x:
+        Vector to be transformed.
+    :type x: :class:`numpy.ndarray`
+
+    :return:
+        Transformed vector.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idd_frm(n, w, x)
+
+
+def idd_sfrm(l, n, w, x):
+    """
+    Transform real vector via a composition of Rokhlin's random transform,
+    random subselection, and an FFT.
+
+    In contrast to :func:`idd_frm`, this routine works best when the length of
+    the transformed vector is known a priori.
+
+    :param l:
+        Length of transformed vector, satisfying `l <= n`.
+    :type l: int
+    :param n:
+        Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+        :func:`idd_sfrmi`.
+    :type n: int
+    :param w:
+        Initialization array constructed by :func:`idd_sfrmi`.
+    :type w: :class:`numpy.ndarray`
+    :param x:
+        Vector to be transformed.
+    :type x: :class:`numpy.ndarray`
+
+    :return:
+        Transformed vector.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idd_sfrm(l, n, w, x)
+
+
+def idd_frmi(m):
+    """
+    Initialize data for :func:`idd_frm`.
+
+    :param m:
+        Length of vector to be transformed.
+    :type m: int
+
+    :return:
+        Greatest power-of-two integer `n` satisfying `n <= m`.
+    :rtype: int
+    :return:
+        Initialization array to be used by :func:`idd_frm`.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idd_frmi(m)
+
+
+def idd_sfrmi(l, m):
+    """
+    Initialize data for :func:`idd_sfrm`.
+
+    :param l:
+        Length of output transformed vector.
+    :type l: int
+    :param m:
+        Length of the vector to be transformed.
+    :type m: int
+
+    :return:
+        Greatest power-of-two integer `n` satisfying `n <= m`.
+    :rtype: int
+    :return:
+        Initialization array to be used by :func:`idd_sfrm`.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idd_sfrmi(l, m)
+
+
+#------------------------------------------------------------------------------
+# idd_id.f
+#------------------------------------------------------------------------------
+
+def iddp_id(eps, A):
+    """
+    Compute ID of a real matrix to a specified relative precision.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Rank of ID.
+    :rtype: int
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = _asfortranarray_copy(A)
+    k, idx, rnorms = _id.iddp_id(eps, A)
+    n = A.shape[1]
+    proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+    return k, idx, proj
+
+
+def iddr_id(A, k):
+    """
+    Compute ID of a real matrix to a specified rank.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = _asfortranarray_copy(A)
+    idx, rnorms = _id.iddr_id(A, k)
+    n = A.shape[1]
+    proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+    return idx, proj
+
+
+def idd_reconid(B, idx, proj):
+    """
+    Reconstruct matrix from real ID.
+
+    :param B:
+        Skeleton matrix.
+    :type B: :class:`numpy.ndarray`
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+    :param proj:
+        Interpolation coefficients.
+    :type proj: :class:`numpy.ndarray`
+
+    :return:
+        Reconstructed matrix.
+    :rtype: :class:`numpy.ndarray`
+    """
+    B = np.asfortranarray(B)
+    if proj.size > 0:
+        return _id.idd_reconid(B, idx, proj)
+    else:
+        return B[:, np.argsort(idx)]
+
+
+def idd_reconint(idx, proj):
+    """
+    Reconstruct interpolation matrix from real ID.
+
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+    :param proj:
+        Interpolation coefficients.
+    :type proj: :class:`numpy.ndarray`
+
+    :return:
+        Interpolation matrix.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idd_reconint(idx, proj)
+
+
+def idd_copycols(A, k, idx):
+    """
+    Reconstruct skeleton matrix from real ID.
+
+    :param A:
+        Original matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of ID.
+    :type k: int
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+
+    :return:
+        Skeleton matrix.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    return _id.idd_copycols(A, k, idx)
+
+
+#------------------------------------------------------------------------------
+# idd_id2svd.f
+#------------------------------------------------------------------------------
+
+def idd_id2svd(B, idx, proj):
+    """
+    Convert real ID to SVD.
+
+    :param B:
+        Skeleton matrix.
+    :type B: :class:`numpy.ndarray`
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+    :param proj:
+        Interpolation coefficients.
+    :type proj: :class:`numpy.ndarray`
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    B = np.asfortranarray(B)
+    U, V, S, ier = _id.idd_id2svd(B, idx, proj)
+    if ier:
+        raise _RETCODE_ERROR
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idd_snorm.f
+#------------------------------------------------------------------------------
+
+def idd_snorm(m, n, matvect, matvec, its=20):
+    """
+    Estimate spectral norm of a real matrix by the randomized power method.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matvect:
+        Function to apply the matrix transpose to a vector, with call signature
+        `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvect: function
+    :param matvec:
+        Function to apply the matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+    :param its:
+        Number of power method iterations.
+    :type its: int
+
+    :return:
+        Spectral norm estimate.
+    :rtype: float
+    """
+    snorm, v = _id.idd_snorm(m, n, matvect, matvec, its)
+    return snorm
+
+
+def idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its=20):
+    """
+    Estimate spectral norm of the difference of two real matrices by the
+    randomized power method.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matvect:
+        Function to apply the transpose of the first matrix to a vector, with
+        call signature `y = matvect(x)`, where `x` and `y` are the input and
+        output vectors, respectively.
+    :type matvect: function
+    :param matvect2:
+        Function to apply the transpose of the second matrix to a vector, with
+        call signature `y = matvect2(x)`, where `x` and `y` are the input and
+        output vectors, respectively.
+    :type matvect2: function
+    :param matvec:
+        Function to apply the first matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+    :param matvec2:
+        Function to apply the second matrix to a vector, with call signature
+        `y = matvec2(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec2: function
+    :param its:
+        Number of power method iterations.
+    :type its: int
+
+    :return:
+        Spectral norm estimate of matrix difference.
+    :rtype: float
+    """
+    return _id.idd_diffsnorm(m, n, matvect, matvect2, matvec, matvec2, its)
+
+
+#------------------------------------------------------------------------------
+# idd_svd.f
+#------------------------------------------------------------------------------
+
+def iddr_svd(A, k):
+    """
+    Compute SVD of a real matrix to a specified rank.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of SVD.
+    :type k: int
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    U, V, S, ier = _id.iddr_svd(A, k)
+    if ier:
+        raise _RETCODE_ERROR
+    return U, V, S
+
+
+def iddp_svd(eps, A):
+    """
+    Compute SVD of a real matrix to a specified relative precision.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    k, iU, iV, iS, w, ier = _id.iddp_svd(eps, A)
+    if ier:
+        raise _RETCODE_ERROR
+    U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+    V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+    S = w[iS-1:iS+k-1]
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddp_aid.f
+#------------------------------------------------------------------------------
+
+def iddp_aid(eps, A):
+    """
+    Compute ID of a real matrix to a specified relative precision using random
+    sampling.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Rank of ID.
+    :rtype: int
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    n2, w = idd_frmi(m)
+    proj = np.empty(n*(2*n2 + 1) + n2 + 1, order='F')
+    k, idx, proj = _id.iddp_aid(eps, A, w, proj)
+    proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+    return k, idx, proj
+
+
+def idd_estrank(eps, A):
+    """
+    Estimate rank of a real matrix to a specified relative precision using
+    random sampling.
+
+    The output rank is typically about 8 higher than the actual rank.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Rank estimate.
+    :rtype: int
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    n2, w = idd_frmi(m)
+    ra = np.empty(n*n2 + (n + 1)*(n2 + 1), order='F')
+    k, ra = _id.idd_estrank(eps, A, w, ra)
+    return k
+
+
+#------------------------------------------------------------------------------
+# iddp_asvd.f
+#------------------------------------------------------------------------------
+
+def iddp_asvd(eps, A):
+    """
+    Compute SVD of a real matrix to a specified relative precision using random
+    sampling.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    n2, winit = _id.idd_frmi(m)
+    w = np.empty(
+        max((min(m, n) + 1)*(3*m + 5*n + 1) + 25*min(m, n)**2,
+            (2*n + 1)*(n2 + 1)),
+        order='F')
+    k, iU, iV, iS, w, ier = _id.iddp_asvd(eps, A, winit, w)
+    if ier:
+        raise _RETCODE_ERROR
+    U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+    V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+    S = w[iS-1:iS+k-1]
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddp_rid.f
+#------------------------------------------------------------------------------
+
+def iddp_rid(eps, m, n, matvect):
+    """
+    Compute ID of a real matrix to a specified relative precision using random
+    matrix-vector multiplication.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matvect:
+        Function to apply the matrix transpose to a vector, with call signature
+        `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvect: function
+
+    :return:
+        Rank of ID.
+    :rtype: int
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    proj = np.empty(m + 1 + 2*n*(min(m, n) + 1), order='F')
+    k, idx, proj, ier = _id.iddp_rid(eps, m, n, matvect, proj)
+    if ier != 0:
+        raise _RETCODE_ERROR
+    proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+    return k, idx, proj
+
+
+def idd_findrank(eps, m, n, matvect):
+    """
+    Estimate rank of a real matrix to a specified relative precision using
+    random matrix-vector multiplication.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matvect:
+        Function to apply the matrix transpose to a vector, with call signature
+        `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvect: function
+
+    :return:
+        Rank estimate.
+    :rtype: int
+    """
+    k, ra, ier = _id.idd_findrank(eps, m, n, matvect)
+    if ier:
+        raise _RETCODE_ERROR
+    return k
+
+
+#------------------------------------------------------------------------------
+# iddp_rsvd.f
+#------------------------------------------------------------------------------
+
+def iddp_rsvd(eps, m, n, matvect, matvec):
+    """
+    Compute SVD of a real matrix to a specified relative precision using random
+    matrix-vector multiplication.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matvect:
+        Function to apply the matrix transpose to a vector, with call signature
+        `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvect: function
+    :param matvec:
+        Function to apply the matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    k, iU, iV, iS, w, ier = _id.iddp_rsvd(eps, m, n, matvect, matvec)
+    if ier:
+        raise _RETCODE_ERROR
+    U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+    V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+    S = w[iS-1:iS+k-1]
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddr_aid.f
+#------------------------------------------------------------------------------
+
+def iddr_aid(A, k):
+    """
+    Compute ID of a real matrix to a specified rank using random sampling.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    w = iddr_aidi(m, n, k)
+    idx, proj = _id.iddr_aid(A, k, w)
+    if k == n:
+        proj = np.empty((k, n-k), dtype='float64', order='F')
+    else:
+        proj = proj.reshape((k, n-k), order='F')
+    return idx, proj
+
+
+def iddr_aidi(m, n, k):
+    """
+    Initialize array for :func:`iddr_aid`.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Initialization array to be used by :func:`iddr_aid`.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.iddr_aidi(m, n, k)
+
+
+#------------------------------------------------------------------------------
+# iddr_asvd.f
+#------------------------------------------------------------------------------
+
+def iddr_asvd(A, k):
+    """
+    Compute SVD of a real matrix to a specified rank using random sampling.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of SVD.
+    :type k: int
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    w = np.empty((2*k + 28)*m + (6*k + 21)*n + 25*k**2 + 100, order='F')
+    w_ = iddr_aidi(m, n, k)
+    w[:w_.size] = w_
+    U, V, S, ier = _id.iddr_asvd(A, k, w)
+    if ier != 0:
+        raise _RETCODE_ERROR
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# iddr_rid.f
+#------------------------------------------------------------------------------
+
+def iddr_rid(m, n, matvect, k):
+    """
+    Compute ID of a real matrix to a specified rank using random matrix-vector
+    multiplication.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matvect:
+        Function to apply the matrix transpose to a vector, with call signature
+        `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvect: function
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    idx, proj = _id.iddr_rid(m, n, matvect, k)
+    proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+    return idx, proj
+
+
+#------------------------------------------------------------------------------
+# iddr_rsvd.f
+#------------------------------------------------------------------------------
+
+def iddr_rsvd(m, n, matvect, matvec, k):
+    """
+    Compute SVD of a real matrix to a specified rank using random matrix-vector
+    multiplication.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matvect:
+        Function to apply the matrix transpose to a vector, with call signature
+        `y = matvect(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvect: function
+    :param matvec:
+        Function to apply the matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+    :param k:
+        Rank of SVD.
+    :type k: int
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    U, V, S, ier = _id.iddr_rsvd(m, n, matvect, matvec, k)
+    if ier != 0:
+        raise _RETCODE_ERROR
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idz_frm.f
+#------------------------------------------------------------------------------
+
+def idz_frm(n, w, x):
+    """
+    Transform complex vector via a composition of Rokhlin's random transform,
+    random subselection, and an FFT.
+
+    In contrast to :func:`idz_sfrm`, this routine works best when the length of
+    the transformed vector is the power-of-two integer output by
+    :func:`idz_frmi`, or when the length is not specified but instead
+    determined a posteriori from the output. The returned transformed vector is
+    randomly permuted.
+
+    :param n:
+        Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+        :func:`idz_frmi`; `n` is also the length of the output vector.
+    :type n: int
+    :param w:
+        Initialization array constructed by :func:`idz_frmi`.
+    :type w: :class:`numpy.ndarray`
+    :param x:
+        Vector to be transformed.
+    :type x: :class:`numpy.ndarray`
+
+    :return:
+        Transformed vector.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idz_frm(n, w, x)
+
+
+def idz_sfrm(l, n, w, x):
+    """
+    Transform complex vector via a composition of Rokhlin's random transform,
+    random subselection, and an FFT.
+
+    In contrast to :func:`idz_frm`, this routine works best when the length of
+    the transformed vector is known a priori.
+
+    :param l:
+        Length of transformed vector, satisfying `l <= n`.
+    :type l: int
+    :param n:
+        Greatest power-of-two integer satisfying `n <= x.size` as obtained from
+        :func:`idz_sfrmi`.
+    :type n: int
+    :param w:
+        Initialization array constructed by :func:`idd_sfrmi`.
+    :type w: :class:`numpy.ndarray`
+    :param x:
+        Vector to be transformed.
+    :type x: :class:`numpy.ndarray`
+
+    :return:
+        Transformed vector.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idz_sfrm(l, n, w, x)
+
+
+def idz_frmi(m):
+    """
+    Initialize data for :func:`idz_frm`.
+
+    :param m:
+        Length of vector to be transformed.
+    :type m: int
+
+    :return:
+        Greatest power-of-two integer `n` satisfying `n <= m`.
+    :rtype: int
+    :return:
+        Initialization array to be used by :func:`idz_frm`.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idz_frmi(m)
+
+
+def idz_sfrmi(l, m):
+    """
+    Initialize data for :func:`idz_sfrm`.
+
+    :param l:
+        Length of output transformed vector.
+    :type l: int
+    :param m:
+        Length of the vector to be transformed.
+    :type m: int
+
+    :return:
+        Greatest power-of-two integer `n` satisfying `n <= m`.
+    :rtype: int
+    :return:
+        Initialization array to be used by :func:`idz_sfrm`.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idz_sfrmi(l, m)
+
+
+#------------------------------------------------------------------------------
+# idz_id.f
+#------------------------------------------------------------------------------
+
+def idzp_id(eps, A):
+    """
+    Compute ID of a complex matrix to a specified relative precision.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Rank of ID.
+    :rtype: int
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = _asfortranarray_copy(A)
+    k, idx, rnorms = _id.idzp_id(eps, A)
+    n = A.shape[1]
+    proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+    return k, idx, proj
+
+
+def idzr_id(A, k):
+    """
+    Compute ID of a complex matrix to a specified rank.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = _asfortranarray_copy(A)
+    idx, rnorms = _id.idzr_id(A, k)
+    n = A.shape[1]
+    proj = A.T.ravel()[:k*(n-k)].reshape((k, n-k), order='F')
+    return idx, proj
+
+
+def idz_reconid(B, idx, proj):
+    """
+    Reconstruct matrix from complex ID.
+
+    :param B:
+        Skeleton matrix.
+    :type B: :class:`numpy.ndarray`
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+    :param proj:
+        Interpolation coefficients.
+    :type proj: :class:`numpy.ndarray`
+
+    :return:
+        Reconstructed matrix.
+    :rtype: :class:`numpy.ndarray`
+    """
+    B = np.asfortranarray(B)
+    if proj.size > 0:
+        return _id.idz_reconid(B, idx, proj)
+    else:
+        return B[:, np.argsort(idx)]
+
+
+def idz_reconint(idx, proj):
+    """
+    Reconstruct interpolation matrix from complex ID.
+
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+    :param proj:
+        Interpolation coefficients.
+    :type proj: :class:`numpy.ndarray`
+
+    :return:
+        Interpolation matrix.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idz_reconint(idx, proj)
+
+
+def idz_copycols(A, k, idx):
+    """
+    Reconstruct skeleton matrix from complex ID.
+
+    :param A:
+        Original matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of ID.
+    :type k: int
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+
+    :return:
+        Skeleton matrix.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    return _id.idz_copycols(A, k, idx)
+
+
+#------------------------------------------------------------------------------
+# idz_id2svd.f
+#------------------------------------------------------------------------------
+
+def idz_id2svd(B, idx, proj):
+    """
+    Convert complex ID to SVD.
+
+    :param B:
+        Skeleton matrix.
+    :type B: :class:`numpy.ndarray`
+    :param idx:
+        Column index array.
+    :type idx: :class:`numpy.ndarray`
+    :param proj:
+        Interpolation coefficients.
+    :type proj: :class:`numpy.ndarray`
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    B = np.asfortranarray(B)
+    U, V, S, ier = _id.idz_id2svd(B, idx, proj)
+    if ier:
+        raise _RETCODE_ERROR
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idz_snorm.f
+#------------------------------------------------------------------------------
+
+def idz_snorm(m, n, matveca, matvec, its=20):
+    """
+    Estimate spectral norm of a complex matrix by the randomized power method.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matveca:
+        Function to apply the matrix adjoint to a vector, with call signature
+        `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matveca: function
+    :param matvec:
+        Function to apply the matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+    :param its:
+        Number of power method iterations.
+    :type its: int
+
+    :return:
+        Spectral norm estimate.
+    :rtype: float
+    """
+    snorm, v = _id.idz_snorm(m, n, matveca, matvec, its)
+    return snorm
+
+
+def idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its=20):
+    """
+    Estimate spectral norm of the difference of two complex matrices by the
+    randomized power method.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matveca:
+        Function to apply the adjoint of the first matrix to a vector, with
+        call signature `y = matveca(x)`, where `x` and `y` are the input and
+        output vectors, respectively.
+    :type matveca: function
+    :param matveca2:
+        Function to apply the adjoint of the second matrix to a vector, with
+        call signature `y = matveca2(x)`, where `x` and `y` are the input and
+        output vectors, respectively.
+    :type matveca2: function
+    :param matvec:
+        Function to apply the first matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+    :param matvec2:
+        Function to apply the second matrix to a vector, with call signature
+        `y = matvec2(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec2: function
+    :param its:
+        Number of power method iterations.
+    :type its: int
+
+    :return:
+        Spectral norm estimate of matrix difference.
+    :rtype: float
+    """
+    return _id.idz_diffsnorm(m, n, matveca, matveca2, matvec, matvec2, its)
+
+
+#------------------------------------------------------------------------------
+# idz_svd.f
+#------------------------------------------------------------------------------
+
+def idzr_svd(A, k):
+    """
+    Compute SVD of a complex matrix to a specified rank.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of SVD.
+    :type k: int
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    U, V, S, ier = _id.idzr_svd(A, k)
+    if ier:
+        raise _RETCODE_ERROR
+    return U, V, S
+
+
+def idzp_svd(eps, A):
+    """
+    Compute SVD of a complex matrix to a specified relative precision.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    k, iU, iV, iS, w, ier = _id.idzp_svd(eps, A)
+    if ier:
+        raise _RETCODE_ERROR
+    U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+    V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+    S = w[iS-1:iS+k-1]
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzp_aid.f
+#------------------------------------------------------------------------------
+
+def idzp_aid(eps, A):
+    """
+    Compute ID of a complex matrix to a specified relative precision using
+    random sampling.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Rank of ID.
+    :rtype: int
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    n2, w = idz_frmi(m)
+    proj = np.empty(n*(2*n2 + 1) + n2 + 1, dtype='complex128', order='F')
+    k, idx, proj = _id.idzp_aid(eps, A, w, proj)
+    proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+    return k, idx, proj
+
+
+def idz_estrank(eps, A):
+    """
+    Estimate rank of a complex matrix to a specified relative precision using
+    random sampling.
+
+    The output rank is typically about 8 higher than the actual rank.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Rank estimate.
+    :rtype: int
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    n2, w = idz_frmi(m)
+    ra = np.empty(n*n2 + (n + 1)*(n2 + 1), dtype='complex128', order='F')
+    k, ra = _id.idz_estrank(eps, A, w, ra)
+    return k
+
+
+#------------------------------------------------------------------------------
+# idzp_asvd.f
+#------------------------------------------------------------------------------
+
+def idzp_asvd(eps, A):
+    """
+    Compute SVD of a complex matrix to a specified relative precision using
+    random sampling.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    n2, winit = _id.idz_frmi(m)
+    w = np.empty(
+        max((min(m, n) + 1)*(3*m + 5*n + 11) + 8*min(m, n)**2,
+            (2*n + 1)*(n2 + 1)),
+        dtype=np.complex128, order='F')
+    k, iU, iV, iS, w, ier = _id.idzp_asvd(eps, A, winit, w)
+    if ier:
+        raise _RETCODE_ERROR
+    U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+    V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+    S = w[iS-1:iS+k-1]
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzp_rid.f
+#------------------------------------------------------------------------------
+
+def idzp_rid(eps, m, n, matveca):
+    """
+    Compute ID of a complex matrix to a specified relative precision using
+    random matrix-vector multiplication.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matveca:
+        Function to apply the matrix adjoint to a vector, with call signature
+        `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matveca: function
+
+    :return:
+        Rank of ID.
+    :rtype: int
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    proj = np.empty(
+        m + 1 + 2*n*(min(m, n) + 1),
+        dtype=np.complex128, order='F')
+    k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj)
+    if ier:
+        raise _RETCODE_ERROR
+    proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+    return k, idx, proj
+
+
+def idz_findrank(eps, m, n, matveca):
+    """
+    Estimate rank of a complex matrix to a specified relative precision using
+    random matrix-vector multiplication.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matveca:
+        Function to apply the matrix adjoint to a vector, with call signature
+        `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matveca: function
+
+    :return:
+        Rank estimate.
+    :rtype: int
+    """
+    k, ra, ier = _id.idz_findrank(eps, m, n, matveca)
+    if ier:
+        raise _RETCODE_ERROR
+    return k
+
+
+#------------------------------------------------------------------------------
+# idzp_rsvd.f
+#------------------------------------------------------------------------------
+
+def idzp_rsvd(eps, m, n, matveca, matvec):
+    """
+    Compute SVD of a complex matrix to a specified relative precision using
+    random matrix-vector multiplication.
+
+    :param eps:
+        Relative precision.
+    :type eps: float
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matveca:
+        Function to apply the matrix adjoint to a vector, with call signature
+        `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matveca: function
+    :param matvec:
+        Function to apply the matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    k, iU, iV, iS, w, ier = _id.idzp_rsvd(eps, m, n, matveca, matvec)
+    if ier:
+        raise _RETCODE_ERROR
+    U = w[iU-1:iU+m*k-1].reshape((m, k), order='F')
+    V = w[iV-1:iV+n*k-1].reshape((n, k), order='F')
+    S = w[iS-1:iS+k-1]
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzr_aid.f
+#------------------------------------------------------------------------------
+
+def idzr_aid(A, k):
+    """
+    Compute ID of a complex matrix to a specified rank using random sampling.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    w = idzr_aidi(m, n, k)
+    idx, proj = _id.idzr_aid(A, k, w)
+    if k == n:
+        proj = np.empty((k, n-k), dtype='complex128', order='F')
+    else:
+        proj = proj.reshape((k, n-k), order='F')
+    return idx, proj
+
+
+def idzr_aidi(m, n, k):
+    """
+    Initialize array for :func:`idzr_aid`.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Initialization array to be used by :func:`idzr_aid`.
+    :rtype: :class:`numpy.ndarray`
+    """
+    return _id.idzr_aidi(m, n, k)
+
+
+#------------------------------------------------------------------------------
+# idzr_asvd.f
+#------------------------------------------------------------------------------
+
+def idzr_asvd(A, k):
+    """
+    Compute SVD of a complex matrix to a specified rank using random sampling.
+
+    :param A:
+        Matrix.
+    :type A: :class:`numpy.ndarray`
+    :param k:
+        Rank of SVD.
+    :type k: int
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    A = np.asfortranarray(A)
+    m, n = A.shape
+    w = np.empty(
+        (2*k + 22)*m + (6*k + 21)*n + 8*k**2 + 10*k + 90,
+        dtype='complex128', order='F')
+    w_ = idzr_aidi(m, n, k)
+    w[:w_.size] = w_
+    U, V, S, ier = _id.idzr_asvd(A, k, w)
+    if ier:
+        raise _RETCODE_ERROR
+    return U, V, S
+
+
+#------------------------------------------------------------------------------
+# idzr_rid.f
+#------------------------------------------------------------------------------
+
+def idzr_rid(m, n, matveca, k):
+    """
+    Compute ID of a complex matrix to a specified rank using random
+    matrix-vector multiplication.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matveca:
+        Function to apply the matrix adjoint to a vector, with call signature
+        `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matveca: function
+    :param k:
+        Rank of ID.
+    :type k: int
+
+    :return:
+        Column index array.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Interpolation coefficients.
+    :rtype: :class:`numpy.ndarray`
+    """
+    idx, proj = _id.idzr_rid(m, n, matveca, k)
+    proj = proj[:k*(n-k)].reshape((k, n-k), order='F')
+    return idx, proj
+
+
+#------------------------------------------------------------------------------
+# idzr_rsvd.f
+#------------------------------------------------------------------------------
+
+def idzr_rsvd(m, n, matveca, matvec, k):
+    """
+    Compute SVD of a complex matrix to a specified rank using random
+    matrix-vector multiplication.
+
+    :param m:
+        Matrix row dimension.
+    :type m: int
+    :param n:
+        Matrix column dimension.
+    :type n: int
+    :param matveca:
+        Function to apply the matrix adjoint to a vector, with call signature
+        `y = matveca(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matveca: function
+    :param matvec:
+        Function to apply the matrix to a vector, with call signature
+        `y = matvec(x)`, where `x` and `y` are the input and output vectors,
+        respectively.
+    :type matvec: function
+    :param k:
+        Rank of SVD.
+    :type k: int
+
+    :return:
+        Left singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Right singular vectors.
+    :rtype: :class:`numpy.ndarray`
+    :return:
+        Singular values.
+    :rtype: :class:`numpy.ndarray`
+    """
+    U, V, S, ier = _id.idzr_rsvd(m, n, matveca, matvec, k)
+    if ier:
+        raise _RETCODE_ERROR
+    return U, V, S
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutine_wrappers.f b/__packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutine_wrappers.f
new file mode 100644
index 00000000..c6af4184
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutine_wrappers.f
@@ -0,0 +1,2031 @@
+c     This file was generated by _generate_pyx.py.
+c     Do not edit this file directly.
+
+      subroutine chla_transtypewrp(
+     +    ret,
+     +    trans
+     +    )
+        external chla_transtype
+        character chla_transtype
+        character ret
+        integer trans
+        ret = chla_transtype(
+     +    trans
+     +    )
+      end
+
+      subroutine cladivwrp(
+     +    ret,
+     +    x,
+     +    y
+     +    )
+        external wcladiv
+        complex wcladiv
+        complex ret
+        complex x
+        complex y
+        ret = wcladiv(
+     +    x,
+     +    y
+     +    )
+      end
+
+      subroutine clangbwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external clangb
+        real clangb
+        real ret
+        character norm
+        integer n
+        integer kl
+        integer ku
+        complex ab
+        integer ldab
+        real work
+        ret = clangb(
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine clangewrp(
+     +    ret,
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external clange
+        real clange
+        real ret
+        character norm
+        integer m
+        integer n
+        complex a
+        integer lda
+        real work
+        ret = clange(
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine clangtwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+        external clangt
+        real clangt
+        real ret
+        character norm
+        integer n
+        complex dl
+        complex d
+        complex du
+        ret = clangt(
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+      end
+
+      subroutine clanhbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external clanhb
+        real clanhb
+        real ret
+        character norm
+        character uplo
+        integer n
+        integer k
+        complex ab
+        integer ldab
+        real work
+        ret = clanhb(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine clanhewrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external clanhe
+        real clanhe
+        real ret
+        character norm
+        character uplo
+        integer n
+        complex a
+        integer lda
+        real work
+        ret = clanhe(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine clanhfwrp(
+     +    ret,
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+        external clanhf
+        real clanhf
+        real ret
+        character norm
+        character transr
+        character uplo
+        integer n
+        complex a(*)
+        real work
+        ret = clanhf(
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+      end
+
+      subroutine clanhpwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external clanhp
+        real clanhp
+        real ret
+        character norm
+        character uplo
+        integer n
+        complex ap
+        real work
+        ret = clanhp(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine clanhswrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external clanhs
+        real clanhs
+        real ret
+        character norm
+        integer n
+        complex a
+        integer lda
+        real work
+        ret = clanhs(
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine clanhtwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+        external clanht
+        real clanht
+        real ret
+        character norm
+        integer n
+        real d
+        complex e
+        ret = clanht(
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+      end
+
+      subroutine clansbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external clansb
+        real clansb
+        real ret
+        character norm
+        character uplo
+        integer n
+        integer k
+        complex ab
+        integer ldab
+        real work
+        ret = clansb(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine clanspwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external clansp
+        real clansp
+        real ret
+        character norm
+        character uplo
+        integer n
+        complex ap
+        real work
+        ret = clansp(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine clansywrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external clansy
+        real clansy
+        real ret
+        character norm
+        character uplo
+        integer n
+        complex a
+        integer lda
+        real work
+        ret = clansy(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine clantbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external clantb
+        real clantb
+        real ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        integer k
+        complex ab
+        integer ldab
+        real work
+        ret = clantb(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine clantpwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external clantp
+        real clantp
+        real ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        complex ap
+        real work
+        ret = clantp(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine clantrwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external clantr
+        real clantr
+        real ret
+        character norm
+        character uplo
+        character diag
+        integer m
+        integer n
+        complex a
+        integer lda
+        real work
+        ret = clantr(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine disnanwrp(
+     +    ret,
+     +    din
+     +    )
+        external disnan
+        logical disnan
+        logical ret
+        double precision din
+        ret = disnan(
+     +    din
+     +    )
+      end
+
+      subroutine dlamchwrp(
+     +    ret,
+     +    cmach
+     +    )
+        external dlamch
+        double precision dlamch
+        double precision ret
+        character cmach
+        ret = dlamch(
+     +    cmach
+     +    )
+      end
+
+      subroutine dlanegwrp(
+     +    ret,
+     +    n,
+     +    d,
+     +    lld,
+     +    sigma,
+     +    pivmin,
+     +    r
+     +    )
+        external dlaneg
+        integer dlaneg
+        integer ret
+        integer n
+        double precision d
+        double precision lld
+        double precision sigma
+        double precision pivmin
+        integer r
+        ret = dlaneg(
+     +    n,
+     +    d,
+     +    lld,
+     +    sigma,
+     +    pivmin,
+     +    r
+     +    )
+      end
+
+      subroutine dlangbwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external dlangb
+        double precision dlangb
+        double precision ret
+        character norm
+        integer n
+        integer kl
+        integer ku
+        double precision ab
+        integer ldab
+        double precision work
+        ret = dlangb(
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine dlangewrp(
+     +    ret,
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external dlange
+        double precision dlange
+        double precision ret
+        character norm
+        integer m
+        integer n
+        double precision a
+        integer lda
+        double precision work
+        ret = dlange(
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine dlangtwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+        external dlangt
+        double precision dlangt
+        double precision ret
+        character norm
+        integer n
+        double precision dl
+        double precision d
+        double precision du
+        ret = dlangt(
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+      end
+
+      subroutine dlanhswrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external dlanhs
+        double precision dlanhs
+        double precision ret
+        character norm
+        integer n
+        double precision a
+        integer lda
+        double precision work
+        ret = dlanhs(
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine dlansbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external dlansb
+        double precision dlansb
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        integer k
+        double precision ab
+        integer ldab
+        double precision work
+        ret = dlansb(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine dlansfwrp(
+     +    ret,
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+        external dlansf
+        double precision dlansf
+        double precision ret
+        character norm
+        character transr
+        character uplo
+        integer n
+        double precision a(*)
+        double precision work
+        ret = dlansf(
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+      end
+
+      subroutine dlanspwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external dlansp
+        double precision dlansp
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        double precision ap
+        double precision work
+        ret = dlansp(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine dlanstwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+        external dlanst
+        double precision dlanst
+        double precision ret
+        character norm
+        integer n
+        double precision d
+        double precision e
+        ret = dlanst(
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+      end
+
+      subroutine dlansywrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external dlansy
+        double precision dlansy
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        double precision a
+        integer lda
+        double precision work
+        ret = dlansy(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine dlantbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external dlantb
+        double precision dlantb
+        double precision ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        integer k
+        double precision ab
+        integer ldab
+        double precision work
+        ret = dlantb(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine dlantpwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external dlantp
+        double precision dlantp
+        double precision ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        double precision ap
+        double precision work
+        ret = dlantp(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine dlantrwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external dlantr
+        double precision dlantr
+        double precision ret
+        character norm
+        character uplo
+        character diag
+        integer m
+        integer n
+        double precision a
+        integer lda
+        double precision work
+        ret = dlantr(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine dlapy2wrp(
+     +    ret,
+     +    x,
+     +    y
+     +    )
+        external dlapy2
+        double precision dlapy2
+        double precision ret
+        double precision x
+        double precision y
+        ret = dlapy2(
+     +    x,
+     +    y
+     +    )
+      end
+
+      subroutine dlapy3wrp(
+     +    ret,
+     +    x,
+     +    y,
+     +    z
+     +    )
+        external dlapy3
+        double precision dlapy3
+        double precision ret
+        double precision x
+        double precision y
+        double precision z
+        ret = dlapy3(
+     +    x,
+     +    y,
+     +    z
+     +    )
+      end
+
+      subroutine dzsum1wrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+        external dzsum1
+        double precision dzsum1
+        double precision ret
+        integer n
+        complex*16 cx(n)
+        integer incx
+        ret = dzsum1(
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+      end
+
+      subroutine icmax1wrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+        external icmax1
+        integer icmax1
+        integer ret
+        integer n
+        complex cx(n)
+        integer incx
+        ret = icmax1(
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+      end
+
+      subroutine ieeeckwrp(
+     +    ret,
+     +    ispec,
+     +    zero,
+     +    one
+     +    )
+        external ieeeck
+        integer ieeeck
+        integer ret
+        integer ispec
+        real zero
+        real one
+        ret = ieeeck(
+     +    ispec,
+     +    zero,
+     +    one
+     +    )
+      end
+
+      subroutine ilaclcwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external ilaclc
+        integer ilaclc
+        integer ret
+        integer m
+        integer n
+        complex a
+        integer lda
+        ret = ilaclc(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine ilaclrwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external ilaclr
+        integer ilaclr
+        integer ret
+        integer m
+        integer n
+        complex a
+        integer lda
+        ret = ilaclr(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine iladiagwrp(
+     +    ret,
+     +    diag
+     +    )
+        external iladiag
+        integer iladiag
+        integer ret
+        character diag
+        ret = iladiag(
+     +    diag
+     +    )
+      end
+
+      subroutine iladlcwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external iladlc
+        integer iladlc
+        integer ret
+        integer m
+        integer n
+        double precision a
+        integer lda
+        ret = iladlc(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine iladlrwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external iladlr
+        integer iladlr
+        integer ret
+        integer m
+        integer n
+        double precision a
+        integer lda
+        ret = iladlr(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine ilaprecwrp(
+     +    ret,
+     +    prec
+     +    )
+        external ilaprec
+        integer ilaprec
+        integer ret
+        character prec
+        ret = ilaprec(
+     +    prec
+     +    )
+      end
+
+      subroutine ilaslcwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external ilaslc
+        integer ilaslc
+        integer ret
+        integer m
+        integer n
+        real a
+        integer lda
+        ret = ilaslc(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine ilaslrwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external ilaslr
+        integer ilaslr
+        integer ret
+        integer m
+        integer n
+        real a
+        integer lda
+        ret = ilaslr(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine ilatranswrp(
+     +    ret,
+     +    trans
+     +    )
+        external ilatrans
+        integer ilatrans
+        integer ret
+        character trans
+        ret = ilatrans(
+     +    trans
+     +    )
+      end
+
+      subroutine ilauplowrp(
+     +    ret,
+     +    uplo
+     +    )
+        external ilauplo
+        integer ilauplo
+        integer ret
+        character uplo
+        ret = ilauplo(
+     +    uplo
+     +    )
+      end
+
+      subroutine ilazlcwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external ilazlc
+        integer ilazlc
+        integer ret
+        integer m
+        integer n
+        complex*16 a
+        integer lda
+        ret = ilazlc(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine ilazlrwrp(
+     +    ret,
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+        external ilazlr
+        integer ilazlr
+        integer ret
+        integer m
+        integer n
+        complex*16 a
+        integer lda
+        ret = ilazlr(
+     +    m,
+     +    n,
+     +    a,
+     +    lda
+     +    )
+      end
+
+      subroutine izmax1wrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+        external izmax1
+        integer izmax1
+        integer ret
+        integer n
+        complex*16 cx(n)
+        integer incx
+        ret = izmax1(
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+      end
+
+      subroutine scsum1wrp(
+     +    ret,
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+        external scsum1
+        real scsum1
+        real ret
+        integer n
+        complex cx(n)
+        integer incx
+        ret = scsum1(
+     +    n,
+     +    cx,
+     +    incx
+     +    )
+      end
+
+      subroutine slamchwrp(
+     +    ret,
+     +    cmach
+     +    )
+        external slamch
+        real slamch
+        real ret
+        character cmach
+        ret = slamch(
+     +    cmach
+     +    )
+      end
+
+      subroutine slangbwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external slangb
+        real slangb
+        real ret
+        character norm
+        integer n
+        integer kl
+        integer ku
+        real ab
+        integer ldab
+        real work
+        ret = slangb(
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine slangewrp(
+     +    ret,
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external slange
+        real slange
+        real ret
+        character norm
+        integer m
+        integer n
+        real a
+        integer lda
+        real work
+        ret = slange(
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine slangtwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+        external slangt
+        real slangt
+        real ret
+        character norm
+        integer n
+        real dl
+        real d
+        real du
+        ret = slangt(
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+      end
+
+      subroutine slanhswrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external slanhs
+        real slanhs
+        real ret
+        character norm
+        integer n
+        real a
+        integer lda
+        real work
+        ret = slanhs(
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine slansbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external slansb
+        real slansb
+        real ret
+        character norm
+        character uplo
+        integer n
+        integer k
+        real ab
+        integer ldab
+        real work
+        ret = slansb(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine slansfwrp(
+     +    ret,
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+        external slansf
+        real slansf
+        real ret
+        character norm
+        character transr
+        character uplo
+        integer n
+        real a(*)
+        real work
+        ret = slansf(
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+      end
+
+      subroutine slanspwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external slansp
+        real slansp
+        real ret
+        character norm
+        character uplo
+        integer n
+        real ap
+        real work
+        ret = slansp(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine slanstwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+        external slanst
+        real slanst
+        real ret
+        character norm
+        integer n
+        real d
+        real e
+        ret = slanst(
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+      end
+
+      subroutine slansywrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external slansy
+        real slansy
+        real ret
+        character norm
+        character uplo
+        integer n
+        real a
+        integer lda
+        real work
+        ret = slansy(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine slantbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external slantb
+        real slantb
+        real ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        integer k
+        real ab
+        integer ldab
+        real work
+        ret = slantb(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine slantpwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external slantp
+        real slantp
+        real ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        real ap
+        real work
+        ret = slantp(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine slantrwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external slantr
+        real slantr
+        real ret
+        character norm
+        character uplo
+        character diag
+        integer m
+        integer n
+        real a
+        integer lda
+        real work
+        ret = slantr(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine slapy2wrp(
+     +    ret,
+     +    x,
+     +    y
+     +    )
+        external slapy2
+        real slapy2
+        real ret
+        real x
+        real y
+        ret = slapy2(
+     +    x,
+     +    y
+     +    )
+      end
+
+      subroutine slapy3wrp(
+     +    ret,
+     +    x,
+     +    y,
+     +    z
+     +    )
+        external slapy3
+        real slapy3
+        real ret
+        real x
+        real y
+        real z
+        ret = slapy3(
+     +    x,
+     +    y,
+     +    z
+     +    )
+      end
+
+      subroutine zladivwrp(
+     +    ret,
+     +    x,
+     +    y
+     +    )
+        external wzladiv
+        complex*16 wzladiv
+        complex*16 ret
+        complex*16 x
+        complex*16 y
+        ret = wzladiv(
+     +    x,
+     +    y
+     +    )
+      end
+
+      subroutine zlangbwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external zlangb
+        double precision zlangb
+        double precision ret
+        character norm
+        integer n
+        integer kl
+        integer ku
+        complex*16 ab
+        integer ldab
+        double precision work
+        ret = zlangb(
+     +    norm,
+     +    n,
+     +    kl,
+     +    ku,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine zlangewrp(
+     +    ret,
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external zlange
+        double precision zlange
+        double precision ret
+        character norm
+        integer m
+        integer n
+        complex*16 a
+        integer lda
+        double precision work
+        ret = zlange(
+     +    norm,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine zlangtwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+        external zlangt
+        double precision zlangt
+        double precision ret
+        character norm
+        integer n
+        complex*16 dl
+        complex*16 d
+        complex*16 du
+        ret = zlangt(
+     +    norm,
+     +    n,
+     +    dl,
+     +    d,
+     +    du
+     +    )
+      end
+
+      subroutine zlanhbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external zlanhb
+        double precision zlanhb
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        integer k
+        complex*16 ab
+        integer ldab
+        double precision work
+        ret = zlanhb(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine zlanhewrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external zlanhe
+        double precision zlanhe
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        complex*16 a
+        integer lda
+        double precision work
+        ret = zlanhe(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine zlanhfwrp(
+     +    ret,
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+        external zlanhf
+        double precision zlanhf
+        double precision ret
+        character norm
+        character transr
+        character uplo
+        integer n
+        complex*16 a(*)
+        double precision work
+        ret = zlanhf(
+     +    norm,
+     +    transr,
+     +    uplo,
+     +    n,
+     +    a,
+     +    work
+     +    )
+      end
+
+      subroutine zlanhpwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external zlanhp
+        double precision zlanhp
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        complex*16 ap
+        double precision work
+        ret = zlanhp(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine zlanhswrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external zlanhs
+        double precision zlanhs
+        double precision ret
+        character norm
+        integer n
+        complex*16 a
+        integer lda
+        double precision work
+        ret = zlanhs(
+     +    norm,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine zlanhtwrp(
+     +    ret,
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+        external zlanht
+        double precision zlanht
+        double precision ret
+        character norm
+        integer n
+        double precision d
+        complex*16 e
+        ret = zlanht(
+     +    norm,
+     +    n,
+     +    d,
+     +    e
+     +    )
+      end
+
+      subroutine zlansbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external zlansb
+        double precision zlansb
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        integer k
+        complex*16 ab
+        integer ldab
+        double precision work
+        ret = zlansb(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine zlanspwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external zlansp
+        double precision zlansp
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        complex*16 ap
+        double precision work
+        ret = zlansp(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine zlansywrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external zlansy
+        double precision zlansy
+        double precision ret
+        character norm
+        character uplo
+        integer n
+        complex*16 a
+        integer lda
+        double precision work
+        ret = zlansy(
+     +    norm,
+     +    uplo,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
+
+      subroutine zlantbwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+        external zlantb
+        double precision zlantb
+        double precision ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        integer k
+        complex*16 ab
+        integer ldab
+        double precision work
+        ret = zlantb(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    k,
+     +    ab,
+     +    ldab,
+     +    work
+     +    )
+      end
+
+      subroutine zlantpwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+        external zlantp
+        double precision zlantp
+        double precision ret
+        character norm
+        character uplo
+        character diag
+        integer n
+        complex*16 ap
+        double precision work
+        ret = zlantp(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    n,
+     +    ap,
+     +    work
+     +    )
+      end
+
+      subroutine zlantrwrp(
+     +    ret,
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+        external zlantr
+        double precision zlantr
+        double precision ret
+        character norm
+        character uplo
+        character diag
+        integer m
+        integer n
+        complex*16 a
+        integer lda
+        double precision work
+        ret = zlantr(
+     +    norm,
+     +    uplo,
+     +    diag,
+     +    m,
+     +    n,
+     +    a,
+     +    lda,
+     +    work
+     +    )
+      end
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutines.h b/__packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutines.h
new file mode 100644
index 00000000..9c825172
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_lapack_subroutines.h
@@ -0,0 +1,1523 @@
+/* This file was generated by _generate_pyx.py. */
+/* Do not edit this file directly. */
+
+#ifndef SCIPY_LINALG_LAPACK_FORTRAN_WRAPPERS_H
+#define SCIPY_LINALG_LAPACK_FORTRAN_WRAPPERS_H
+#include "fortran_defs.h"
+#include "numpy/arrayobject.h"
+
+typedef int (*_cselect1)(npy_complex64*);
+typedef int (*_cselect2)(npy_complex64*, npy_complex64*);
+typedef int (*_dselect2)(double*, double*);
+typedef int (*_dselect3)(double*, double*, double*);
+typedef int (*_sselect2)(float*, float*);
+typedef int (*_sselect3)(float*, float*, float*);
+typedef int (*_zselect1)(npy_complex128*);
+typedef int (*_zselect2)(npy_complex128*, npy_complex128*);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void F_FUNC(chla_transtypewrp, CHLA_TRANSTYPEWRP)(char *ret, int *trans);
+void F_FUNC(cladivwrp, CLADIVWRP)(npy_complex64 *ret, npy_complex64 *x, npy_complex64 *y);
+void F_FUNC(clangbwrp, CLANGBWRP)(float *ret, char *norm, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *work);
+void F_FUNC(clangewrp, CLANGEWRP)(float *ret, char *norm, int *m, int *n, npy_complex64 *a, int *lda, float *work);
+void F_FUNC(clangtwrp, CLANGTWRP)(float *ret, char *norm, int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du);
+void F_FUNC(clanhbwrp, CLANHBWRP)(float *ret, char *norm, char *uplo, int *n, int *k, npy_complex64 *ab, int *ldab, float *work);
+void F_FUNC(clanhewrp, CLANHEWRP)(float *ret, char *norm, char *uplo, int *n, npy_complex64 *a, int *lda, float *work);
+void F_FUNC(clanhfwrp, CLANHFWRP)(float *ret, char *norm, char *transr, char *uplo, int *n, npy_complex64 *a, float *work);
+void F_FUNC(clanhpwrp, CLANHPWRP)(float *ret, char *norm, char *uplo, int *n, npy_complex64 *ap, float *work);
+void F_FUNC(clanhswrp, CLANHSWRP)(float *ret, char *norm, int *n, npy_complex64 *a, int *lda, float *work);
+void F_FUNC(clanhtwrp, CLANHTWRP)(float *ret, char *norm, int *n, float *d, npy_complex64 *e);
+void F_FUNC(clansbwrp, CLANSBWRP)(float *ret, char *norm, char *uplo, int *n, int *k, npy_complex64 *ab, int *ldab, float *work);
+void F_FUNC(clanspwrp, CLANSPWRP)(float *ret, char *norm, char *uplo, int *n, npy_complex64 *ap, float *work);
+void F_FUNC(clansywrp, CLANSYWRP)(float *ret, char *norm, char *uplo, int *n, npy_complex64 *a, int *lda, float *work);
+void F_FUNC(clantbwrp, CLANTBWRP)(float *ret, char *norm, char *uplo, char *diag, int *n, int *k, npy_complex64 *ab, int *ldab, float *work);
+void F_FUNC(clantpwrp, CLANTPWRP)(float *ret, char *norm, char *uplo, char *diag, int *n, npy_complex64 *ap, float *work);
+void F_FUNC(clantrwrp, CLANTRWRP)(float *ret, char *norm, char *uplo, char *diag, int *m, int *n, npy_complex64 *a, int *lda, float *work);
+void F_FUNC(disnanwrp, DISNANWRP)(int *ret, double *din);
+void F_FUNC(dlamchwrp, DLAMCHWRP)(double *ret, char *cmach);
+void F_FUNC(dlanegwrp, DLANEGWRP)(int *ret, int *n, double *d, double *lld, double *sigma, double *pivmin, int *r);
+void F_FUNC(dlangbwrp, DLANGBWRP)(double *ret, char *norm, int *n, int *kl, int *ku, double *ab, int *ldab, double *work);
+void F_FUNC(dlangewrp, DLANGEWRP)(double *ret, char *norm, int *m, int *n, double *a, int *lda, double *work);
+void F_FUNC(dlangtwrp, DLANGTWRP)(double *ret, char *norm, int *n, double *dl, double *d, double *du);
+void F_FUNC(dlanhswrp, DLANHSWRP)(double *ret, char *norm, int *n, double *a, int *lda, double *work);
+void F_FUNC(dlansbwrp, DLANSBWRP)(double *ret, char *norm, char *uplo, int *n, int *k, double *ab, int *ldab, double *work);
+void F_FUNC(dlansfwrp, DLANSFWRP)(double *ret, char *norm, char *transr, char *uplo, int *n, double *a, double *work);
+void F_FUNC(dlanspwrp, DLANSPWRP)(double *ret, char *norm, char *uplo, int *n, double *ap, double *work);
+void F_FUNC(dlanstwrp, DLANSTWRP)(double *ret, char *norm, int *n, double *d, double *e);
+void F_FUNC(dlansywrp, DLANSYWRP)(double *ret, char *norm, char *uplo, int *n, double *a, int *lda, double *work);
+void F_FUNC(dlantbwrp, DLANTBWRP)(double *ret, char *norm, char *uplo, char *diag, int *n, int *k, double *ab, int *ldab, double *work);
+void F_FUNC(dlantpwrp, DLANTPWRP)(double *ret, char *norm, char *uplo, char *diag, int *n, double *ap, double *work);
+void F_FUNC(dlantrwrp, DLANTRWRP)(double *ret, char *norm, char *uplo, char *diag, int *m, int *n, double *a, int *lda, double *work);
+void F_FUNC(dlapy2wrp, DLAPY2WRP)(double *ret, double *x, double *y);
+void F_FUNC(dlapy3wrp, DLAPY3WRP)(double *ret, double *x, double *y, double *z);
+void F_FUNC(dzsum1wrp, DZSUM1WRP)(double *ret, int *n, npy_complex128 *cx, int *incx);
+void F_FUNC(icmax1wrp, ICMAX1WRP)(int *ret, int *n, npy_complex64 *cx, int *incx);
+void F_FUNC(ieeeckwrp, IEEECKWRP)(int *ret, int *ispec, float *zero, float *one);
+void F_FUNC(ilaclcwrp, ILACLCWRP)(int *ret, int *m, int *n, npy_complex64 *a, int *lda);
+void F_FUNC(ilaclrwrp, ILACLRWRP)(int *ret, int *m, int *n, npy_complex64 *a, int *lda);
+void F_FUNC(iladiagwrp, ILADIAGWRP)(int *ret, char *diag);
+void F_FUNC(iladlcwrp, ILADLCWRP)(int *ret, int *m, int *n, double *a, int *lda);
+void F_FUNC(iladlrwrp, ILADLRWRP)(int *ret, int *m, int *n, double *a, int *lda);
+void F_FUNC(ilaprecwrp, ILAPRECWRP)(int *ret, char *prec);
+void F_FUNC(ilaslcwrp, ILASLCWRP)(int *ret, int *m, int *n, float *a, int *lda);
+void F_FUNC(ilaslrwrp, ILASLRWRP)(int *ret, int *m, int *n, float *a, int *lda);
+void F_FUNC(ilatranswrp, ILATRANSWRP)(int *ret, char *trans);
+void F_FUNC(ilauplowrp, ILAUPLOWRP)(int *ret, char *uplo);
+void F_FUNC(ilazlcwrp, ILAZLCWRP)(int *ret, int *m, int *n, npy_complex128 *a, int *lda);
+void F_FUNC(ilazlrwrp, ILAZLRWRP)(int *ret, int *m, int *n, npy_complex128 *a, int *lda);
+void F_FUNC(izmax1wrp, IZMAX1WRP)(int *ret, int *n, npy_complex128 *cx, int *incx);
+void F_FUNC(scsum1wrp, SCSUM1WRP)(float *ret, int *n, npy_complex64 *cx, int *incx);
+void F_FUNC(slamchwrp, SLAMCHWRP)(float *ret, char *cmach);
+void F_FUNC(slangbwrp, SLANGBWRP)(float *ret, char *norm, int *n, int *kl, int *ku, float *ab, int *ldab, float *work);
+void F_FUNC(slangewrp, SLANGEWRP)(float *ret, char *norm, int *m, int *n, float *a, int *lda, float *work);
+void F_FUNC(slangtwrp, SLANGTWRP)(float *ret, char *norm, int *n, float *dl, float *d, float *du);
+void F_FUNC(slanhswrp, SLANHSWRP)(float *ret, char *norm, int *n, float *a, int *lda, float *work);
+void F_FUNC(slansbwrp, SLANSBWRP)(float *ret, char *norm, char *uplo, int *n, int *k, float *ab, int *ldab, float *work);
+void F_FUNC(slansfwrp, SLANSFWRP)(float *ret, char *norm, char *transr, char *uplo, int *n, float *a, float *work);
+void F_FUNC(slanspwrp, SLANSPWRP)(float *ret, char *norm, char *uplo, int *n, float *ap, float *work);
+void F_FUNC(slanstwrp, SLANSTWRP)(float *ret, char *norm, int *n, float *d, float *e);
+void F_FUNC(slansywrp, SLANSYWRP)(float *ret, char *norm, char *uplo, int *n, float *a, int *lda, float *work);
+void F_FUNC(slantbwrp, SLANTBWRP)(float *ret, char *norm, char *uplo, char *diag, int *n, int *k, float *ab, int *ldab, float *work);
+void F_FUNC(slantpwrp, SLANTPWRP)(float *ret, char *norm, char *uplo, char *diag, int *n, float *ap, float *work);
+void F_FUNC(slantrwrp, SLANTRWRP)(float *ret, char *norm, char *uplo, char *diag, int *m, int *n, float *a, int *lda, float *work);
+void F_FUNC(slapy2wrp, SLAPY2WRP)(float *ret, float *x, float *y);
+void F_FUNC(slapy3wrp, SLAPY3WRP)(float *ret, float *x, float *y, float *z);
+void F_FUNC(zladivwrp, ZLADIVWRP)(npy_complex128 *ret, npy_complex128 *x, npy_complex128 *y);
+void F_FUNC(zlangbwrp, ZLANGBWRP)(double *ret, char *norm, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *work);
+void F_FUNC(zlangewrp, ZLANGEWRP)(double *ret, char *norm, int *m, int *n, npy_complex128 *a, int *lda, double *work);
+void F_FUNC(zlangtwrp, ZLANGTWRP)(double *ret, char *norm, int *n, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du);
+void F_FUNC(zlanhbwrp, ZLANHBWRP)(double *ret, char *norm, char *uplo, int *n, int *k, npy_complex128 *ab, int *ldab, double *work);
+void F_FUNC(zlanhewrp, ZLANHEWRP)(double *ret, char *norm, char *uplo, int *n, npy_complex128 *a, int *lda, double *work);
+void F_FUNC(zlanhfwrp, ZLANHFWRP)(double *ret, char *norm, char *transr, char *uplo, int *n, npy_complex128 *a, double *work);
+void F_FUNC(zlanhpwrp, ZLANHPWRP)(double *ret, char *norm, char *uplo, int *n, npy_complex128 *ap, double *work);
+void F_FUNC(zlanhswrp, ZLANHSWRP)(double *ret, char *norm, int *n, npy_complex128 *a, int *lda, double *work);
+void F_FUNC(zlanhtwrp, ZLANHTWRP)(double *ret, char *norm, int *n, double *d, npy_complex128 *e);
+void F_FUNC(zlansbwrp, ZLANSBWRP)(double *ret, char *norm, char *uplo, int *n, int *k, npy_complex128 *ab, int *ldab, double *work);
+void F_FUNC(zlanspwrp, ZLANSPWRP)(double *ret, char *norm, char *uplo, int *n, npy_complex128 *ap, double *work);
+void F_FUNC(zlansywrp, ZLANSYWRP)(double *ret, char *norm, char *uplo, int *n, npy_complex128 *a, int *lda, double *work);
+void F_FUNC(zlantbwrp, ZLANTBWRP)(double *ret, char *norm, char *uplo, char *diag, int *n, int *k, npy_complex128 *ab, int *ldab, double *work);
+void F_FUNC(zlantpwrp, ZLANTPWRP)(double *ret, char *norm, char *uplo, char *diag, int *n, npy_complex128 *ap, double *work);
+void F_FUNC(zlantrwrp, ZLANTRWRP)(double *ret, char *norm, char *uplo, char *diag, int *m, int *n, npy_complex128 *a, int *lda, double *work);
+
+void F_FUNC(cbbcsd,CBBCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, float *theta, float *phi, npy_complex64 *u1, int *ldu1, npy_complex64 *u2, int *ldu2, npy_complex64 *v1t, int *ldv1t, npy_complex64 *v2t, int *ldv2t, float *b11d, float *b11e, float *b12d, float *b12e, float *b21d, float *b21e, float *b22d, float *b22e, float *rwork, int *lrwork, int *info);
+void F_FUNC(cbdsqr,CBDSQR)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, float *d, float *e, npy_complex64 *vt, int *ldvt, npy_complex64 *u, int *ldu, npy_complex64 *c, int *ldc, float *rwork, int *info);
+void F_FUNC(cgbbrd,CGBBRD)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *d, float *e, npy_complex64 *q, int *ldq, npy_complex64 *pt, int *ldpt, npy_complex64 *c, int *ldc, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgbcon,CGBCON)(char *norm, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgbequ,CGBEQU)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(cgbequb,CGBEQUB)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(cgbrfs,CGBRFS)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgbsv,CGBSV)(int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cgbsvx,CGBSVX)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, int *ipiv, char *equed, float *r, float *c, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgbtf2,CGBTF2)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(cgbtrf,CGBTRF)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(cgbtrs,CGBTRS)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cgebak,CGEBAK)(char *job, char *side, int *n, int *ilo, int *ihi, float *scale, int *m, npy_complex64 *v, int *ldv, int *info);
+void F_FUNC(cgebal,CGEBAL)(char *job, int *n, npy_complex64 *a, int *lda, int *ilo, int *ihi, float *scale, int *info);
+void F_FUNC(cgebd2,CGEBD2)(int *m, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *work, int *info);
+void F_FUNC(cgebrd,CGEBRD)(int *m, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgecon,CGECON)(char *norm, int *n, npy_complex64 *a, int *lda, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgeequ,CGEEQU)(int *m, int *n, npy_complex64 *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(cgeequb,CGEEQUB)(int *m, int *n, npy_complex64 *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(cgees,CGEES)(char *jobvs, char *sort, _cselect1 *select, int *n, npy_complex64 *a, int *lda, int *sdim, npy_complex64 *w, npy_complex64 *vs, int *ldvs, npy_complex64 *work, int *lwork, float *rwork, int *bwork, int *info);
+void F_FUNC(cgeesx,CGEESX)(char *jobvs, char *sort, _cselect1 *select, char *sense, int *n, npy_complex64 *a, int *lda, int *sdim, npy_complex64 *w, npy_complex64 *vs, int *ldvs, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *bwork, int *info);
+void F_FUNC(cgeev,CGEEV)(char *jobvl, char *jobvr, int *n, npy_complex64 *a, int *lda, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cgeevx,CGEEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *ilo, int *ihi, float *scale, float *abnrm, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cgehd2,CGEHD2)(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cgehrd,CGEHRD)(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgelq2,CGELQ2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cgelqf,CGELQF)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgels,CGELS)(char *trans, int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgelsd,CGELSD)(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *s, float *rcond, int *rank, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *info);
+void F_FUNC(cgelss,CGELSS)(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *s, float *rcond, int *rank, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cgelsy,CGELSY)(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *jpvt, float *rcond, int *rank, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cgemqrt,CGEMQRT)(char *side, char *trans, int *m, int *n, int *k, int *nb, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info);
+void F_FUNC(cgeql2,CGEQL2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cgeqlf,CGEQLF)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgeqp3,CGEQP3)(int *m, int *n, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cgeqr2,CGEQR2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cgeqr2p,CGEQR2P)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cgeqrf,CGEQRF)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgeqrfp,CGEQRFP)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgeqrt,CGEQRT)(int *m, int *n, int *nb, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, npy_complex64 *work, int *info);
+void F_FUNC(cgeqrt2,CGEQRT2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, int *info);
+void F_FUNC(cgeqrt3,CGEQRT3)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, int *info);
+void F_FUNC(cgerfs,CGERFS)(char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgerq2,CGERQ2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cgerqf,CGERQF)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgesc2,CGESC2)(int *n, npy_complex64 *a, int *lda, npy_complex64 *rhs, int *ipiv, int *jpiv, float *scale);
+void F_FUNC(cgesdd,CGESDD)(char *jobz, int *m, int *n, npy_complex64 *a, int *lda, float *s, npy_complex64 *u, int *ldu, npy_complex64 *vt, int *ldvt, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *info);
+void F_FUNC(cgesv,CGESV)(int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cgesvd,CGESVD)(char *jobu, char *jobvt, int *m, int *n, npy_complex64 *a, int *lda, float *s, npy_complex64 *u, int *ldu, npy_complex64 *vt, int *ldvt, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cgesvx,CGESVX)(char *fact, char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, char *equed, float *r, float *c, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgetc2,CGETC2)(int *n, npy_complex64 *a, int *lda, int *ipiv, int *jpiv, int *info);
+void F_FUNC(cgetf2,CGETF2)(int *m, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(cgetrf,CGETRF)(int *m, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(cgetri,CGETRI)(int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgetrs,CGETRS)(char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cggbak,CGGBAK)(char *job, char *side, int *n, int *ilo, int *ihi, float *lscale, float *rscale, int *m, npy_complex64 *v, int *ldv, int *info);
+void F_FUNC(cggbal,CGGBAL)(char *job, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *ilo, int *ihi, float *lscale, float *rscale, float *work, int *info);
+void F_FUNC(cgges,CGGES)(char *jobvsl, char *jobvsr, char *sort, _cselect2 *selctg, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *sdim, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vsl, int *ldvsl, npy_complex64 *vsr, int *ldvsr, npy_complex64 *work, int *lwork, float *rwork, int *bwork, int *info);
+void F_FUNC(cggesx,CGGESX)(char *jobvsl, char *jobvsr, char *sort, _cselect2 *selctg, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *sdim, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vsl, int *ldvsl, npy_complex64 *vsr, int *ldvsr, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *liwork, int *bwork, int *info);
+void F_FUNC(cggev,CGGEV)(char *jobvl, char *jobvr, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cggevx,CGGEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *ilo, int *ihi, float *lscale, float *rscale, float *abnrm, float *bbnrm, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *bwork, int *info);
+void F_FUNC(cggglm,CGGGLM)(int *n, int *m, int *p, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *d, npy_complex64 *x, npy_complex64 *y, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgghrd,CGGHRD)(char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *info);
+void F_FUNC(cgglse,CGGLSE)(int *m, int *n, int *p, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, npy_complex64 *d, npy_complex64 *x, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cggqrf,CGGQRF)(int *n, int *m, int *p, npy_complex64 *a, int *lda, npy_complex64 *taua, npy_complex64 *b, int *ldb, npy_complex64 *taub, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cggrqf,CGGRQF)(int *m, int *p, int *n, npy_complex64 *a, int *lda, npy_complex64 *taua, npy_complex64 *b, int *ldb, npy_complex64 *taub, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cgtcon,CGTCON)(char *norm, int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info);
+void F_FUNC(cgtrfs,CGTRFS)(char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *dlf, npy_complex64 *df, npy_complex64 *duf, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgtsv,CGTSV)(int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cgtsvx,CGTSVX)(char *fact, char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *dlf, npy_complex64 *df, npy_complex64 *duf, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cgttrf,CGTTRF)(int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, int *info);
+void F_FUNC(cgttrs,CGTTRS)(char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cgtts2,CGTTS2)(int *itrans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb);
+void F_FUNC(chbev,CHBEV)(char *jobz, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chbevd,CHBEVD)(char *jobz, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(chbevx,CHBEVX)(char *jobz, char *range, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, npy_complex64 *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(chbgst,CHBGST)(char *vect, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, npy_complex64 *x, int *ldx, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chbgv,CHBGV)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chbgvd,CHBGVD)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(chbgvx,CHBGVX)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, npy_complex64 *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(chbtrd,CHBTRD)(char *vect, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *d, float *e, npy_complex64 *q, int *ldq, npy_complex64 *work, int *info);
+void F_FUNC(checon,CHECON)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info);
+void F_FUNC(cheequb,CHEEQUB)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, npy_complex64 *work, int *info);
+void F_FUNC(cheev,CHEEV)(char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, float *w, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cheevd,CHEEVD)(char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, float *w, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(cheevr,CHEEVR)(char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, int *isuppz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(cheevx,CHEEVX)(char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(chegs2,CHEGS2)(int *itype, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(chegst,CHEGST)(int *itype, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(chegv,CHEGV)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *w, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(chegvd,CHEGVD)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *w, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(chegvx,CHEGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(cherfs,CHERFS)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chesv,CHESV)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(chesvx,CHESVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(cheswapr,CHESWAPR)(char *uplo, int *n, npy_complex64 *a, int *lda, int *i1, int *i2);
+void F_FUNC(chetd2,CHETD2)(char *uplo, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tau, int *info);
+void F_FUNC(chetf2,CHETF2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(chetrd,CHETRD)(char *uplo, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(chetrf,CHETRF)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(chetri,CHETRI)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info);
+void F_FUNC(chetri2,CHETRI2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(chetri2x,CHETRI2X)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *nb, int *info);
+void F_FUNC(chetrs,CHETRS)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(chetrs2,CHETRS2)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info);
+void F_FUNC(chfrk,CHFRK)(char *transr, char *uplo, char *trans, int *n, int *k, float *alpha, npy_complex64 *a, int *lda, float *beta, npy_complex64 *c);
+void F_FUNC(chgeqz,CHGEQZ)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *t, int *ldt, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(chpcon,CHPCON)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info);
+void F_FUNC(chpev,CHPEV)(char *jobz, char *uplo, int *n, npy_complex64 *ap, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chpevd,CHPEVD)(char *jobz, char *uplo, int *n, npy_complex64 *ap, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(chpevx,CHPEVX)(char *jobz, char *range, char *uplo, int *n, npy_complex64 *ap, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(chpgst,CHPGST)(int *itype, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, int *info);
+void F_FUNC(chpgv,CHPGV)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chpgvd,CHPGVD)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(chpgvx,CHPGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(chprfs,CHPRFS)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chpsv,CHPSV)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(chpsvx,CHPSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(chptrd,CHPTRD)(char *uplo, int *n, npy_complex64 *ap, float *d, float *e, npy_complex64 *tau, int *info);
+void F_FUNC(chptrf,CHPTRF)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, int *info);
+void F_FUNC(chptri,CHPTRI)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, npy_complex64 *work, int *info);
+void F_FUNC(chptrs,CHPTRS)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(chsein,CHSEIN)(char *side, char *eigsrc, char *initv, int *select, int *n, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, float *rwork, int *ifaill, int *ifailr, int *info);
+void F_FUNC(chseqr,CHSEQR)(char *job, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(clabrd,CLABRD)(int *m, int *n, int *nb, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *x, int *ldx, npy_complex64 *y, int *ldy);
+void F_FUNC(clacgv,CLACGV)(int *n, npy_complex64 *x, int *incx);
+void F_FUNC(clacn2,CLACN2)(int *n, npy_complex64 *v, npy_complex64 *x, float *est, int *kase, int *isave);
+void F_FUNC(clacon,CLACON)(int *n, npy_complex64 *v, npy_complex64 *x, float *est, int *kase);
+void F_FUNC(clacp2,CLACP2)(char *uplo, int *m, int *n, float *a, int *lda, npy_complex64 *b, int *ldb);
+void F_FUNC(clacpy,CLACPY)(char *uplo, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb);
+void F_FUNC(clacrm,CLACRM)(int *m, int *n, npy_complex64 *a, int *lda, float *b, int *ldb, npy_complex64 *c, int *ldc, float *rwork);
+void F_FUNC(clacrt,CLACRT)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, npy_complex64 *c, npy_complex64 *s);
+void F_FUNC(claed0,CLAED0)(int *qsiz, int *n, float *d, float *e, npy_complex64 *q, int *ldq, npy_complex64 *qstore, int *ldqs, float *rwork, int *iwork, int *info);
+void F_FUNC(claed7,CLAED7)(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, float *d, npy_complex64 *q, int *ldq, float *rho, int *indxq, float *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, float *givnum, npy_complex64 *work, float *rwork, int *iwork, int *info);
+void F_FUNC(claed8,CLAED8)(int *k, int *n, int *qsiz, npy_complex64 *q, int *ldq, float *d, float *rho, int *cutpnt, float *z, float *dlamda, npy_complex64 *q2, int *ldq2, float *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, float *givnum, int *info);
+void F_FUNC(claein,CLAEIN)(int *rightv, int *noinit, int *n, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *v, npy_complex64 *b, int *ldb, float *rwork, float *eps3, float *smlnum, int *info);
+void F_FUNC(claesy,CLAESY)(npy_complex64 *a, npy_complex64 *b, npy_complex64 *c, npy_complex64 *rt1, npy_complex64 *rt2, npy_complex64 *evscal, npy_complex64 *cs1, npy_complex64 *sn1);
+void F_FUNC(claev2,CLAEV2)(npy_complex64 *a, npy_complex64 *b, npy_complex64 *c, float *rt1, float *rt2, float *cs1, npy_complex64 *sn1);
+void F_FUNC(clag2z,CLAG2Z)(int *m, int *n, npy_complex64 *sa, int *ldsa, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(clags2,CLAGS2)(int *upper, float *a1, npy_complex64 *a2, float *a3, float *b1, npy_complex64 *b2, float *b3, float *csu, npy_complex64 *snu, float *csv, npy_complex64 *snv, float *csq, npy_complex64 *snq);
+void F_FUNC(clagtm,CLAGTM)(char *trans, int *n, int *nrhs, float *alpha, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *x, int *ldx, float *beta, npy_complex64 *b, int *ldb);
+void F_FUNC(clahef,CLAHEF)(char *uplo, int *n, int *nb, int *kb, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *w, int *ldw, int *info);
+void F_FUNC(clahqr,CLAHQR)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *info);
+void F_FUNC(clahr2,CLAHR2)(int *n, int *k, int *nb, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *t, int *ldt, npy_complex64 *y, int *ldy);
+void F_FUNC(claic1,CLAIC1)(int *job, int *j, npy_complex64 *x, float *sest, npy_complex64 *w, npy_complex64 *gamma, float *sestpr, npy_complex64 *s, npy_complex64 *c);
+void F_FUNC(clals0,CLALS0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, npy_complex64 *b, int *ldb, npy_complex64 *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *poles, float *difl, float *difr, float *z, int *k, float *c, float *s, float *rwork, int *info);
+void F_FUNC(clalsa,CLALSA)(int *icompq, int *smlsiz, int *n, int *nrhs, npy_complex64 *b, int *ldb, npy_complex64 *bx, int *ldbx, float *u, int *ldu, float *vt, int *k, float *difl, float *difr, float *z, float *poles, int *givptr, int *givcol, int *ldgcol, int *perm, float *givnum, float *c, float *s, float *rwork, int *iwork, int *info);
+void F_FUNC(clalsd,CLALSD)(char *uplo, int *smlsiz, int *n, int *nrhs, float *d, float *e, npy_complex64 *b, int *ldb, float *rcond, int *rank, npy_complex64 *work, float *rwork, int *iwork, int *info);
+void F_FUNC(clapll,CLAPLL)(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, float *ssmin);
+void F_FUNC(clapmr,CLAPMR)(int *forwrd, int *m, int *n, npy_complex64 *x, int *ldx, int *k);
+void F_FUNC(clapmt,CLAPMT)(int *forwrd, int *m, int *n, npy_complex64 *x, int *ldx, int *k);
+void F_FUNC(claqgb,CLAQGB)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed);
+void F_FUNC(claqge,CLAQGE)(int *m, int *n, npy_complex64 *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed);
+void F_FUNC(claqhb,CLAQHB)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(claqhe,CLAQHE)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(claqhp,CLAQHP)(char *uplo, int *n, npy_complex64 *ap, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(claqp2,CLAQP2)(int *m, int *n, int *offset, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, float *vn1, float *vn2, npy_complex64 *work);
+void F_FUNC(claqps,CLAQPS)(int *m, int *n, int *offset, int *nb, int *kb, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, float *vn1, float *vn2, npy_complex64 *auxv, npy_complex64 *f, int *ldf);
+void F_FUNC(claqr0,CLAQR0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(claqr1,CLAQR1)(int *n, npy_complex64 *h, int *ldh, npy_complex64 *s1, npy_complex64 *s2, npy_complex64 *v);
+void F_FUNC(claqr2,CLAQR2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *ns, int *nd, npy_complex64 *sh, npy_complex64 *v, int *ldv, int *nh, npy_complex64 *t, int *ldt, int *nv, npy_complex64 *wv, int *ldwv, npy_complex64 *work, int *lwork);
+void F_FUNC(claqr3,CLAQR3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *ns, int *nd, npy_complex64 *sh, npy_complex64 *v, int *ldv, int *nh, npy_complex64 *t, int *ldt, int *nv, npy_complex64 *wv, int *ldwv, npy_complex64 *work, int *lwork);
+void F_FUNC(claqr4,CLAQR4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(claqr5,CLAQR5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, npy_complex64 *s, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *v, int *ldv, npy_complex64 *u, int *ldu, int *nv, npy_complex64 *wv, int *ldwv, int *nh, npy_complex64 *wh, int *ldwh);
+void F_FUNC(claqsb,CLAQSB)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(claqsp,CLAQSP)(char *uplo, int *n, npy_complex64 *ap, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(claqsy,CLAQSY)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(clar1v,CLAR1V)(int *n, int *b1, int *bn, float *lambda, float *d, float *l, float *ld, float *lld, float *pivmin, float *gaptol, npy_complex64 *z, int *wantnc, int *negcnt, float *ztz, float *mingma, int *r, int *isuppz, float *nrminv, float *resid, float *rqcorr, float *work);
+void F_FUNC(clar2v,CLAR2V)(int *n, npy_complex64 *x, npy_complex64 *y, npy_complex64 *z, int *incx, float *c, npy_complex64 *s, int *incc);
+void F_FUNC(clarcm,CLARCM)(int *m, int *n, float *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, float *rwork);
+void F_FUNC(clarf,CLARF)(char *side, int *m, int *n, npy_complex64 *v, int *incv, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work);
+void F_FUNC(clarfb,CLARFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *ldwork);
+void F_FUNC(clarfg,CLARFG)(int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *tau);
+void F_FUNC(clarfgp,CLARFGP)(int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *tau);
+void F_FUNC(clarft,CLARFT)(char *direct, char *storev, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *tau, npy_complex64 *t, int *ldt);
+void F_FUNC(clarfx,CLARFX)(char *side, int *m, int *n, npy_complex64 *v, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work);
+void F_FUNC(clargv,CLARGV)(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, float *c, int *incc);
+void F_FUNC(clarnv,CLARNV)(int *idist, int *iseed, int *n, npy_complex64 *x);
+void F_FUNC(clarrv,CLARRV)(int *n, float *vl, float *vu, float *d, float *l, float *pivmin, int *isplit, int *m, int *dol, int *dou, float *minrgp, float *rtol1, float *rtol2, float *w, float *werr, float *wgap, int *iblock, int *indexw, float *gers, npy_complex64 *z, int *ldz, int *isuppz, float *work, int *iwork, int *info);
+void F_FUNC(clartg,CLARTG)(npy_complex64 *f, npy_complex64 *g, float *cs, npy_complex64 *sn, npy_complex64 *r);
+void F_FUNC(clartv,CLARTV)(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, float *c, npy_complex64 *s, int *incc);
+void F_FUNC(clarz,CLARZ)(char *side, int *m, int *n, int *l, npy_complex64 *v, int *incv, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work);
+void F_FUNC(clarzb,CLARZB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *ldwork);
+void F_FUNC(clarzt,CLARZT)(char *direct, char *storev, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *tau, npy_complex64 *t, int *ldt);
+void F_FUNC(clascl,CLASCL)(char *type_bn, int *kl, int *ku, float *cfrom, float *cto, int *m, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(claset,CLASET)(char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *a, int *lda);
+void F_FUNC(clasr,CLASR)(char *side, char *pivot, char *direct, int *m, int *n, float *c, float *s, npy_complex64 *a, int *lda);
+void F_FUNC(classq,CLASSQ)(int *n, npy_complex64 *x, int *incx, float *scale, float *sumsq);
+void F_FUNC(claswp,CLASWP)(int *n, npy_complex64 *a, int *lda, int *k1, int *k2, int *ipiv, int *incx);
+void F_FUNC(clasyf,CLASYF)(char *uplo, int *n, int *nb, int *kb, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *w, int *ldw, int *info);
+void F_FUNC(clatbs,CLATBS)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, npy_complex64 *ab, int *ldab, npy_complex64 *x, float *scale, float *cnorm, int *info);
+void F_FUNC(clatdf,CLATDF)(int *ijob, int *n, npy_complex64 *z, int *ldz, npy_complex64 *rhs, float *rdsum, float *rdscal, int *ipiv, int *jpiv);
+void F_FUNC(clatps,CLATPS)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex64 *ap, npy_complex64 *x, float *scale, float *cnorm, int *info);
+void F_FUNC(clatrd,CLATRD)(char *uplo, int *n, int *nb, npy_complex64 *a, int *lda, float *e, npy_complex64 *tau, npy_complex64 *w, int *ldw);
+void F_FUNC(clatrs,CLATRS)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, float *scale, float *cnorm, int *info);
+void F_FUNC(clatrz,CLATRZ)(int *m, int *n, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work);
+void F_FUNC(clauu2,CLAUU2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(clauum,CLAUUM)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(cpbcon,CPBCON)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cpbequ,CPBEQU)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *s, float *scond, float *amax, int *info);
+void F_FUNC(cpbrfs,CPBRFS)(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cpbstf,CPBSTF)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info);
+void F_FUNC(cpbsv,CPBSV)(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cpbsvx,CPBSVX)(char *fact, char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, char *equed, float *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cpbtf2,CPBTF2)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info);
+void F_FUNC(cpbtrf,CPBTRF)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info);
+void F_FUNC(cpbtrs,CPBTRS)(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cpftrf,CPFTRF)(char *transr, char *uplo, int *n, npy_complex64 *a, int *info);
+void F_FUNC(cpftri,CPFTRI)(char *transr, char *uplo, int *n, npy_complex64 *a, int *info);
+void F_FUNC(cpftrs,CPFTRS)(char *transr, char *uplo, int *n, int *nrhs, npy_complex64 *a, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cpocon,CPOCON)(char *uplo, int *n, npy_complex64 *a, int *lda, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cpoequ,CPOEQU)(int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, int *info);
+void F_FUNC(cpoequb,CPOEQUB)(int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, int *info);
+void F_FUNC(cporfs,CPORFS)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cposv,CPOSV)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cposvx,CPOSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, char *equed, float *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cpotf2,CPOTF2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(cpotrf,CPOTRF)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(cpotri,CPOTRI)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(cpotrs,CPOTRS)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cppcon,CPPCON)(char *uplo, int *n, npy_complex64 *ap, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cppequ,CPPEQU)(char *uplo, int *n, npy_complex64 *ap, float *s, float *scond, float *amax, int *info);
+void F_FUNC(cpprfs,CPPRFS)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cppsv,CPPSV)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cppsvx,CPPSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, char *equed, float *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cpptrf,CPPTRF)(char *uplo, int *n, npy_complex64 *ap, int *info);
+void F_FUNC(cpptri,CPPTRI)(char *uplo, int *n, npy_complex64 *ap, int *info);
+void F_FUNC(cpptrs,CPPTRS)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cpstf2,CPSTF2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info);
+void F_FUNC(cpstrf,CPSTRF)(char *uplo, int *n, npy_complex64 *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info);
+void F_FUNC(cptcon,CPTCON)(int *n, float *d, npy_complex64 *e, float *anorm, float *rcond, float *rwork, int *info);
+void F_FUNC(cpteqr,CPTEQR)(char *compz, int *n, float *d, float *e, npy_complex64 *z, int *ldz, float *work, int *info);
+void F_FUNC(cptrfs,CPTRFS)(char *uplo, int *n, int *nrhs, float *d, npy_complex64 *e, float *df, npy_complex64 *ef, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cptsv,CPTSV)(int *n, int *nrhs, float *d, npy_complex64 *e, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cptsvx,CPTSVX)(char *fact, int *n, int *nrhs, float *d, npy_complex64 *e, float *df, npy_complex64 *ef, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cpttrf,CPTTRF)(int *n, float *d, npy_complex64 *e, int *info);
+void F_FUNC(cpttrs,CPTTRS)(char *uplo, int *n, int *nrhs, float *d, npy_complex64 *e, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cptts2,CPTTS2)(int *iuplo, int *n, int *nrhs, float *d, npy_complex64 *e, npy_complex64 *b, int *ldb);
+void F_FUNC(crot,CROT)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, float *c, npy_complex64 *s);
+void F_FUNC(cspcon,CSPCON)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info);
+void F_FUNC(cspmv,CSPMV)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *ap, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy);
+void F_FUNC(cspr,CSPR)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *ap);
+void F_FUNC(csprfs,CSPRFS)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(cspsv,CSPSV)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(cspsvx,CSPSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(csptrf,CSPTRF)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, int *info);
+void F_FUNC(csptri,CSPTRI)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, npy_complex64 *work, int *info);
+void F_FUNC(csptrs,CSPTRS)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(csrscl,CSRSCL)(int *n, float *sa, npy_complex64 *sx, int *incx);
+void F_FUNC(cstedc,CSTEDC)(char *compz, int *n, float *d, float *e, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(cstegr,CSTEGR)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(cstein,CSTEIN)(int *n, float *d, float *e, int *m, float *w, int *iblock, int *isplit, npy_complex64 *z, int *ldz, float *work, int *iwork, int *ifail, int *info);
+void F_FUNC(cstemr,CSTEMR)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, int *m, float *w, npy_complex64 *z, int *ldz, int *nzc, int *isuppz, int *tryrac, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(csteqr,CSTEQR)(char *compz, int *n, float *d, float *e, npy_complex64 *z, int *ldz, float *work, int *info);
+void F_FUNC(csycon,CSYCON)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info);
+void F_FUNC(csyconv,CSYCONV)(char *uplo, char *way, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info);
+void F_FUNC(csyequb,CSYEQUB)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, npy_complex64 *work, int *info);
+void F_FUNC(csymv,CSYMV)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy);
+void F_FUNC(csyr,CSYR)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *a, int *lda);
+void F_FUNC(csyrfs,CSYRFS)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(csysv,CSYSV)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(csysvx,CSYSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, int *lwork, float *rwork, int *info);
+void F_FUNC(csyswapr,CSYSWAPR)(char *uplo, int *n, npy_complex64 *a, int *lda, int *i1, int *i2);
+void F_FUNC(csytf2,CSYTF2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(csytrf,CSYTRF)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(csytri,CSYTRI)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info);
+void F_FUNC(csytri2,CSYTRI2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(csytri2x,CSYTRI2X)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *nb, int *info);
+void F_FUNC(csytrs,CSYTRS)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(csytrs2,CSYTRS2)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info);
+void F_FUNC(ctbcon,CTBCON)(char *norm, char *uplo, char *diag, int *n, int *kd, npy_complex64 *ab, int *ldab, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctbrfs,CTBRFS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctbtrs,CTBTRS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(ctfsm,CTFSM)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, npy_complex64 *b, int *ldb);
+void F_FUNC(ctftri,CTFTRI)(char *transr, char *uplo, char *diag, int *n, npy_complex64 *a, int *info);
+void F_FUNC(ctfttp,CTFTTP)(char *transr, char *uplo, int *n, npy_complex64 *arf, npy_complex64 *ap, int *info);
+void F_FUNC(ctfttr,CTFTTR)(char *transr, char *uplo, int *n, npy_complex64 *arf, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(ctgevc,CTGEVC)(char *side, char *howmny, int *select, int *n, npy_complex64 *s, int *lds, npy_complex64 *p, int *ldp, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctgex2,CTGEX2)(int *wantq, int *wantz, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *j1, int *info);
+void F_FUNC(ctgexc,CTGEXC)(int *wantq, int *wantz, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *ifst, int *ilst, int *info);
+void F_FUNC(ctgsen,CTGSEN)(int *ijob, int *wantq, int *wantz, int *select, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *m, float *pl, float *pr, float *dif, npy_complex64 *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ctgsja,CTGSJA)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *tola, float *tolb, float *alpha, float *beta, npy_complex64 *u, int *ldu, npy_complex64 *v, int *ldv, npy_complex64 *q, int *ldq, npy_complex64 *work, int *ncycle, int *info);
+void F_FUNC(ctgsna,CTGSNA)(char *job, char *howmny, int *select, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, float *s, float *dif, int *mm, int *m, npy_complex64 *work, int *lwork, int *iwork, int *info);
+void F_FUNC(ctgsy2,CTGSY2)(char *trans, int *ijob, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, npy_complex64 *d, int *ldd, npy_complex64 *e, int *lde, npy_complex64 *f, int *ldf, float *scale, float *rdsum, float *rdscal, int *info);
+void F_FUNC(ctgsyl,CTGSYL)(char *trans, int *ijob, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, npy_complex64 *d, int *ldd, npy_complex64 *e, int *lde, npy_complex64 *f, int *ldf, float *scale, float *dif, npy_complex64 *work, int *lwork, int *iwork, int *info);
+void F_FUNC(ctpcon,CTPCON)(char *norm, char *uplo, char *diag, int *n, npy_complex64 *ap, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctpmqrt,CTPMQRT)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info);
+void F_FUNC(ctpqrt,CTPQRT)(int *m, int *n, int *l, int *nb, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *t, int *ldt, npy_complex64 *work, int *info);
+void F_FUNC(ctpqrt2,CTPQRT2)(int *m, int *n, int *l, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *t, int *ldt, int *info);
+void F_FUNC(ctprfb,CTPRFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *ldwork);
+void F_FUNC(ctprfs,CTPRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctptri,CTPTRI)(char *uplo, char *diag, int *n, npy_complex64 *ap, int *info);
+void F_FUNC(ctptrs,CTPTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(ctpttf,CTPTTF)(char *transr, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *arf, int *info);
+void F_FUNC(ctpttr,CTPTTR)(char *uplo, int *n, npy_complex64 *ap, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(ctrcon,CTRCON)(char *norm, char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, float *rcond, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctrevc,CTREVC)(char *side, char *howmny, int *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctrexc,CTREXC)(char *compq, int *n, npy_complex64 *t, int *ldt, npy_complex64 *q, int *ldq, int *ifst, int *ilst, int *info);
+void F_FUNC(ctrrfs,CTRRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info);
+void F_FUNC(ctrsen,CTRSEN)(char *job, char *compq, int *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *q, int *ldq, npy_complex64 *w, int *m, float *s, float *sep, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(ctrsna,CTRSNA)(char *job, char *howmny, int *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, float *s, float *sep, int *mm, int *m, npy_complex64 *work, int *ldwork, float *rwork, int *info);
+void F_FUNC(ctrsyl,CTRSYL)(char *trana, char *tranb, int *isgn, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, float *scale, int *info);
+void F_FUNC(ctrti2,CTRTI2)(char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(ctrtri,CTRTRI)(char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, int *info);
+void F_FUNC(ctrtrs,CTRTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info);
+void F_FUNC(ctrttf,CTRTTF)(char *transr, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *arf, int *info);
+void F_FUNC(ctrttp,CTRTTP)(char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *ap, int *info);
+void F_FUNC(ctzrzf,CTZRZF)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunbdb,CUNBDB)(char *trans, char *signs, int *m, int *p, int *q, npy_complex64 *x11, int *ldx11, npy_complex64 *x12, int *ldx12, npy_complex64 *x21, int *ldx21, npy_complex64 *x22, int *ldx22, float *theta, float *phi, npy_complex64 *taup1, npy_complex64 *taup2, npy_complex64 *tauq1, npy_complex64 *tauq2, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cuncsd,CUNCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, npy_complex64 *x11, int *ldx11, npy_complex64 *x12, int *ldx12, npy_complex64 *x21, int *ldx21, npy_complex64 *x22, int *ldx22, float *theta, npy_complex64 *u1, int *ldu1, npy_complex64 *u2, int *ldu2, npy_complex64 *v1t, int *ldv1t, npy_complex64 *v2t, int *ldv2t, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *info);
+void F_FUNC(cung2l,CUNG2L)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cung2r,CUNG2R)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cungbr,CUNGBR)(char *vect, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunghr,CUNGHR)(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cungl2,CUNGL2)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cunglq,CUNGLQ)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cungql,CUNGQL)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cungqr,CUNGQR)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cungr2,CUNGR2)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info);
+void F_FUNC(cungrq,CUNGRQ)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cungtr,CUNGTR)(char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunm2l,CUNM2L)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info);
+void F_FUNC(cunm2r,CUNM2R)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info);
+void F_FUNC(cunmbr,CUNMBR)(char *vect, char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunmhr,CUNMHR)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunml2,CUNML2)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info);
+void F_FUNC(cunmlq,CUNMLQ)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunmql,CUNMQL)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunmqr,CUNMQR)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunmr2,CUNMR2)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info);
+void F_FUNC(cunmr3,CUNMR3)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info);
+void F_FUNC(cunmrq,CUNMRQ)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunmrz,CUNMRZ)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cunmtr,CUNMTR)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info);
+void F_FUNC(cupgtr,CUPGTR)(char *uplo, int *n, npy_complex64 *ap, npy_complex64 *tau, npy_complex64 *q, int *ldq, npy_complex64 *work, int *info);
+void F_FUNC(cupmtr,CUPMTR)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex64 *ap, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info);
+void F_FUNC(dbbcsd,DBBCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, double *theta, double *phi, double *u1, int *ldu1, double *u2, int *ldu2, double *v1t, int *ldv1t, double *v2t, int *ldv2t, double *b11d, double *b11e, double *b12d, double *b12e, double *b21d, double *b21e, double *b22d, double *b22e, double *work, int *lwork, int *info);
+void F_FUNC(dbdsdc,DBDSDC)(char *uplo, char *compq, int *n, double *d, double *e, double *u, int *ldu, double *vt, int *ldvt, double *q, int *iq, double *work, int *iwork, int *info);
+void F_FUNC(dbdsqr,DBDSQR)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, double *d, double *e, double *vt, int *ldvt, double *u, int *ldu, double *c, int *ldc, double *work, int *info);
+void F_FUNC(ddisna,DDISNA)(char *job, int *m, int *n, double *d, double *sep, int *info);
+void F_FUNC(dgbbrd,DGBBRD)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, double *ab, int *ldab, double *d, double *e, double *q, int *ldq, double *pt, int *ldpt, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dgbcon,DGBCON)(char *norm, int *n, int *kl, int *ku, double *ab, int *ldab, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dgbequ,DGBEQU)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(dgbequb,DGBEQUB)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(dgbrfs,DGBRFS)(char *trans, int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dgbsv,DGBSV)(int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dgbsvx,DGBSVX)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, int *ipiv, char *equed, double *r, double *c, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dgbtf2,DGBTF2)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(dgbtrf,DGBTRF)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(dgbtrs,DGBTRS)(char *trans, int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dgebak,DGEBAK)(char *job, char *side, int *n, int *ilo, int *ihi, double *scale, int *m, double *v, int *ldv, int *info);
+void F_FUNC(dgebal,DGEBAL)(char *job, int *n, double *a, int *lda, int *ilo, int *ihi, double *scale, int *info);
+void F_FUNC(dgebd2,DGEBD2)(int *m, int *n, double *a, int *lda, double *d, double *e, double *tauq, double *taup, double *work, int *info);
+void F_FUNC(dgebrd,DGEBRD)(int *m, int *n, double *a, int *lda, double *d, double *e, double *tauq, double *taup, double *work, int *lwork, int *info);
+void F_FUNC(dgecon,DGECON)(char *norm, int *n, double *a, int *lda, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dgeequ,DGEEQU)(int *m, int *n, double *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(dgeequb,DGEEQUB)(int *m, int *n, double *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(dgees,DGEES)(char *jobvs, char *sort, _dselect2 *select, int *n, double *a, int *lda, int *sdim, double *wr, double *wi, double *vs, int *ldvs, double *work, int *lwork, int *bwork, int *info);
+void F_FUNC(dgeesx,DGEESX)(char *jobvs, char *sort, _dselect2 *select, char *sense, int *n, double *a, int *lda, int *sdim, double *wr, double *wi, double *vs, int *ldvs, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info);
+void F_FUNC(dgeev,DGEEV)(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info);
+void F_FUNC(dgeevx,DGEEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, int *ilo, int *ihi, double *scale, double *abnrm, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dgehd2,DGEHD2)(int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dgehrd,DGEHRD)(int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dgejsv,DGEJSV)(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, double *a, int *lda, double *sva, double *u, int *ldu, double *v, int *ldv, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dgelq2,DGELQ2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dgelqf,DGELQF)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dgels,DGELS)(char *trans, int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *work, int *lwork, int *info);
+void F_FUNC(dgelsd,DGELSD)(int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *s, double *rcond, int *rank, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dgelss,DGELSS)(int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *s, double *rcond, int *rank, double *work, int *lwork, int *info);
+void F_FUNC(dgelsy,DGELSY)(int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *jpvt, double *rcond, int *rank, double *work, int *lwork, int *info);
+void F_FUNC(dgemqrt,DGEMQRT)(char *side, char *trans, int *m, int *n, int *k, int *nb, double *v, int *ldv, double *t, int *ldt, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dgeql2,DGEQL2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dgeqlf,DGEQLF)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dgeqp3,DGEQP3)(int *m, int *n, double *a, int *lda, int *jpvt, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dgeqr2,DGEQR2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dgeqr2p,DGEQR2P)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dgeqrf,DGEQRF)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dgeqrfp,DGEQRFP)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dgeqrt,DGEQRT)(int *m, int *n, int *nb, double *a, int *lda, double *t, int *ldt, double *work, int *info);
+void F_FUNC(dgeqrt2,DGEQRT2)(int *m, int *n, double *a, int *lda, double *t, int *ldt, int *info);
+void F_FUNC(dgeqrt3,DGEQRT3)(int *m, int *n, double *a, int *lda, double *t, int *ldt, int *info);
+void F_FUNC(dgerfs,DGERFS)(char *trans, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dgerq2,DGERQ2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dgerqf,DGERQF)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dgesc2,DGESC2)(int *n, double *a, int *lda, double *rhs, int *ipiv, int *jpiv, double *scale);
+void F_FUNC(dgesdd,DGESDD)(char *jobz, int *m, int *n, double *a, int *lda, double *s, double *u, int *ldu, double *vt, int *ldvt, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dgesv,DGESV)(int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dgesvd,DGESVD)(char *jobu, char *jobvt, int *m, int *n, double *a, int *lda, double *s, double *u, int *ldu, double *vt, int *ldvt, double *work, int *lwork, int *info);
+void F_FUNC(dgesvj,DGESVJ)(char *joba, char *jobu, char *jobv, int *m, int *n, double *a, int *lda, double *sva, int *mv, double *v, int *ldv, double *work, int *lwork, int *info);
+void F_FUNC(dgesvx,DGESVX)(char *fact, char *trans, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, char *equed, double *r, double *c, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dgetc2,DGETC2)(int *n, double *a, int *lda, int *ipiv, int *jpiv, int *info);
+void F_FUNC(dgetf2,DGETF2)(int *m, int *n, double *a, int *lda, int *ipiv, int *info);
+void F_FUNC(dgetrf,DGETRF)(int *m, int *n, double *a, int *lda, int *ipiv, int *info);
+void F_FUNC(dgetri,DGETRI)(int *n, double *a, int *lda, int *ipiv, double *work, int *lwork, int *info);
+void F_FUNC(dgetrs,DGETRS)(char *trans, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dggbak,DGGBAK)(char *job, char *side, int *n, int *ilo, int *ihi, double *lscale, double *rscale, int *m, double *v, int *ldv, int *info);
+void F_FUNC(dggbal,DGGBAL)(char *job, int *n, double *a, int *lda, double *b, int *ldb, int *ilo, int *ihi, double *lscale, double *rscale, double *work, int *info);
+void F_FUNC(dgges,DGGES)(char *jobvsl, char *jobvsr, char *sort, _dselect3 *selctg, int *n, double *a, int *lda, double *b, int *ldb, int *sdim, double *alphar, double *alphai, double *beta, double *vsl, int *ldvsl, double *vsr, int *ldvsr, double *work, int *lwork, int *bwork, int *info);
+void F_FUNC(dggesx,DGGESX)(char *jobvsl, char *jobvsr, char *sort, _dselect3 *selctg, char *sense, int *n, double *a, int *lda, double *b, int *ldb, int *sdim, double *alphar, double *alphai, double *beta, double *vsl, int *ldvsl, double *vsr, int *ldvsr, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info);
+void F_FUNC(dggev,DGGEV)(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info);
+void F_FUNC(dggevx,DGGEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *vl, int *ldvl, double *vr, int *ldvr, int *ilo, int *ihi, double *lscale, double *rscale, double *abnrm, double *bbnrm, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *bwork, int *info);
+void F_FUNC(dggglm,DGGGLM)(int *n, int *m, int *p, double *a, int *lda, double *b, int *ldb, double *d, double *x, double *y, double *work, int *lwork, int *info);
+void F_FUNC(dgghrd,DGGHRD)(char *compq, char *compz, int *n, int *ilo, int *ihi, double *a, int *lda, double *b, int *ldb, double *q, int *ldq, double *z, int *ldz, int *info);
+void F_FUNC(dgglse,DGGLSE)(int *m, int *n, int *p, double *a, int *lda, double *b, int *ldb, double *c, double *d, double *x, double *work, int *lwork, int *info);
+void F_FUNC(dggqrf,DGGQRF)(int *n, int *m, int *p, double *a, int *lda, double *taua, double *b, int *ldb, double *taub, double *work, int *lwork, int *info);
+void F_FUNC(dggrqf,DGGRQF)(int *m, int *p, int *n, double *a, int *lda, double *taua, double *b, int *ldb, double *taub, double *work, int *lwork, int *info);
+void F_FUNC(dgsvj0,DGSVJ0)(char *jobv, int *m, int *n, double *a, int *lda, double *d, double *sva, int *mv, double *v, int *ldv, double *eps, double *sfmin, double *tol, int *nsweep, double *work, int *lwork, int *info);
+void F_FUNC(dgsvj1,DGSVJ1)(char *jobv, int *m, int *n, int *n1, double *a, int *lda, double *d, double *sva, int *mv, double *v, int *ldv, double *eps, double *sfmin, double *tol, int *nsweep, double *work, int *lwork, int *info);
+void F_FUNC(dgtcon,DGTCON)(char *norm, int *n, double *dl, double *d, double *du, double *du2, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dgtrfs,DGTRFS)(char *trans, int *n, int *nrhs, double *dl, double *d, double *du, double *dlf, double *df, double *duf, double *du2, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dgtsv,DGTSV)(int *n, int *nrhs, double *dl, double *d, double *du, double *b, int *ldb, int *info);
+void F_FUNC(dgtsvx,DGTSVX)(char *fact, char *trans, int *n, int *nrhs, double *dl, double *d, double *du, double *dlf, double *df, double *duf, double *du2, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dgttrf,DGTTRF)(int *n, double *dl, double *d, double *du, double *du2, int *ipiv, int *info);
+void F_FUNC(dgttrs,DGTTRS)(char *trans, int *n, int *nrhs, double *dl, double *d, double *du, double *du2, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dgtts2,DGTTS2)(int *itrans, int *n, int *nrhs, double *dl, double *d, double *du, double *du2, int *ipiv, double *b, int *ldb);
+void F_FUNC(dhgeqz,DHGEQZ)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *t, int *ldt, double *alphar, double *alphai, double *beta, double *q, int *ldq, double *z, int *ldz, double *work, int *lwork, int *info);
+void F_FUNC(dhsein,DHSEIN)(char *side, char *eigsrc, char *initv, int *select, int *n, double *h, int *ldh, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, int *mm, int *m, double *work, int *ifaill, int *ifailr, int *info);
+void F_FUNC(dhseqr,DHSEQR)(char *job, char *compz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, double *z, int *ldz, double *work, int *lwork, int *info);
+void F_FUNC(dlabad,DLABAD)(double *small, double *large);
+void F_FUNC(dlabrd,DLABRD)(int *m, int *n, int *nb, double *a, int *lda, double *d, double *e, double *tauq, double *taup, double *x, int *ldx, double *y, int *ldy);
+void F_FUNC(dlacn2,DLACN2)(int *n, double *v, double *x, int *isgn, double *est, int *kase, int *isave);
+void F_FUNC(dlacon,DLACON)(int *n, double *v, double *x, int *isgn, double *est, int *kase);
+void F_FUNC(dlacpy,DLACPY)(char *uplo, int *m, int *n, double *a, int *lda, double *b, int *ldb);
+void F_FUNC(dladiv,DLADIV)(double *a, double *b, double *c, double *d, double *p, double *q);
+void F_FUNC(dlae2,DLAE2)(double *a, double *b, double *c, double *rt1, double *rt2);
+void F_FUNC(dlaebz,DLAEBZ)(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, double *abstol, double *reltol, double *pivmin, double *d, double *e, double *e2, int *nval, double *ab, double *c, int *mout, int *nab, double *work, int *iwork, int *info);
+void F_FUNC(dlaed0,DLAED0)(int *icompq, int *qsiz, int *n, double *d, double *e, double *q, int *ldq, double *qstore, int *ldqs, double *work, int *iwork, int *info);
+void F_FUNC(dlaed1,DLAED1)(int *n, double *d, double *q, int *ldq, int *indxq, double *rho, int *cutpnt, double *work, int *iwork, int *info);
+void F_FUNC(dlaed2,DLAED2)(int *k, int *n, int *n1, double *d, double *q, int *ldq, int *indxq, double *rho, double *z, double *dlamda, double *w, double *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info);
+void F_FUNC(dlaed3,DLAED3)(int *k, int *n, int *n1, double *d, double *q, int *ldq, double *rho, double *dlamda, double *q2, int *indx, int *ctot, double *w, double *s, int *info);
+void F_FUNC(dlaed4,DLAED4)(int *n, int *i, double *d, double *z, double *delta, double *rho, double *dlam, int *info);
+void F_FUNC(dlaed5,DLAED5)(int *i, double *d, double *z, double *delta, double *rho, double *dlam);
+void F_FUNC(dlaed6,DLAED6)(int *kniter, int *orgati, double *rho, double *d, double *z, double *finit, double *tau, int *info);
+void F_FUNC(dlaed7,DLAED7)(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, double *d, double *q, int *ldq, int *indxq, double *rho, int *cutpnt, double *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, double *givnum, double *work, int *iwork, int *info);
+void F_FUNC(dlaed8,DLAED8)(int *icompq, int *k, int *n, int *qsiz, double *d, double *q, int *ldq, int *indxq, double *rho, int *cutpnt, double *z, double *dlamda, double *q2, int *ldq2, double *w, int *perm, int *givptr, int *givcol, double *givnum, int *indxp, int *indx, int *info);
+void F_FUNC(dlaed9,DLAED9)(int *k, int *kstart, int *kstop, int *n, double *d, double *q, int *ldq, double *rho, double *dlamda, double *w, double *s, int *lds, int *info);
+void F_FUNC(dlaeda,DLAEDA)(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, double *givnum, double *q, int *qptr, double *z, double *ztemp, int *info);
+void F_FUNC(dlaein,DLAEIN)(int *rightv, int *noinit, int *n, double *h, int *ldh, double *wr, double *wi, double *vr, double *vi, double *b, int *ldb, double *work, double *eps3, double *smlnum, double *bignum, int *info);
+void F_FUNC(dlaev2,DLAEV2)(double *a, double *b, double *c, double *rt1, double *rt2, double *cs1, double *sn1);
+void F_FUNC(dlaexc,DLAEXC)(int *wantq, int *n, double *t, int *ldt, double *q, int *ldq, int *j1, int *n1, int *n2, double *work, int *info);
+void F_FUNC(dlag2,DLAG2)(double *a, int *lda, double *b, int *ldb, double *safmin, double *scale1, double *scale2, double *wr1, double *wr2, double *wi);
+void F_FUNC(dlag2s,DLAG2S)(int *m, int *n, double *a, int *lda, float *sa, int *ldsa, int *info);
+void F_FUNC(dlags2,DLAGS2)(int *upper, double *a1, double *a2, double *a3, double *b1, double *b2, double *b3, double *csu, double *snu, double *csv, double *snv, double *csq, double *snq);
+void F_FUNC(dlagtf,DLAGTF)(int *n, double *a, double *lambda, double *b, double *c, double *tol, double *d, int *in, int *info);
+void F_FUNC(dlagtm,DLAGTM)(char *trans, int *n, int *nrhs, double *alpha, double *dl, double *d, double *du, double *x, int *ldx, double *beta, double *b, int *ldb);
+void F_FUNC(dlagts,DLAGTS)(int *job, int *n, double *a, double *b, double *c, double *d, int *in, double *y, double *tol, int *info);
+void F_FUNC(dlagv2,DLAGV2)(double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *csl, double *snl, double *csr, double *snr);
+void F_FUNC(dlahqr,DLAHQR)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, int *iloz, int *ihiz, double *z, int *ldz, int *info);
+void F_FUNC(dlahr2,DLAHR2)(int *n, int *k, int *nb, double *a, int *lda, double *tau, double *t, int *ldt, double *y, int *ldy);
+void F_FUNC(dlaic1,DLAIC1)(int *job, int *j, double *x, double *sest, double *w, double *gamma, double *sestpr, double *s, double *c);
+void F_FUNC(dlaln2,DLALN2)(int *ltrans, int *na, int *nw, double *smin, double *ca, double *a, int *lda, double *d1, double *d2, double *b, int *ldb, double *wr, double *wi, double *x, int *ldx, double *scale, double *xnorm, int *info);
+void F_FUNC(dlals0,DLALS0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, double *b, int *ldb, double *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *poles, double *difl, double *difr, double *z, int *k, double *c, double *s, double *work, int *info);
+void F_FUNC(dlalsa,DLALSA)(int *icompq, int *smlsiz, int *n, int *nrhs, double *b, int *ldb, double *bx, int *ldbx, double *u, int *ldu, double *vt, int *k, double *difl, double *difr, double *z, double *poles, int *givptr, int *givcol, int *ldgcol, int *perm, double *givnum, double *c, double *s, double *work, int *iwork, int *info);
+void F_FUNC(dlalsd,DLALSD)(char *uplo, int *smlsiz, int *n, int *nrhs, double *d, double *e, double *b, int *ldb, double *rcond, int *rank, double *work, int *iwork, int *info);
+void F_FUNC(dlamrg,DLAMRG)(int *n1, int *n2, double *a, int *dtrd1, int *dtrd2, int *index_bn);
+void F_FUNC(dlanv2,DLANV2)(double *a, double *b, double *c, double *d, double *rt1r, double *rt1i, double *rt2r, double *rt2i, double *cs, double *sn);
+void F_FUNC(dlapll,DLAPLL)(int *n, double *x, int *incx, double *y, int *incy, double *ssmin);
+void F_FUNC(dlapmr,DLAPMR)(int *forwrd, int *m, int *n, double *x, int *ldx, int *k);
+void F_FUNC(dlapmt,DLAPMT)(int *forwrd, int *m, int *n, double *x, int *ldx, int *k);
+void F_FUNC(dlaqgb,DLAQGB)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed);
+void F_FUNC(dlaqge,DLAQGE)(int *m, int *n, double *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed);
+void F_FUNC(dlaqp2,DLAQP2)(int *m, int *n, int *offset, double *a, int *lda, int *jpvt, double *tau, double *vn1, double *vn2, double *work);
+void F_FUNC(dlaqps,DLAQPS)(int *m, int *n, int *offset, int *nb, int *kb, double *a, int *lda, int *jpvt, double *tau, double *vn1, double *vn2, double *auxv, double *f, int *ldf);
+void F_FUNC(dlaqr0,DLAQR0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, int *iloz, int *ihiz, double *z, int *ldz, double *work, int *lwork, int *info);
+void F_FUNC(dlaqr1,DLAQR1)(int *n, double *h, int *ldh, double *sr1, double *si1, double *sr2, double *si2, double *v);
+void F_FUNC(dlaqr2,DLAQR2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, double *h, int *ldh, int *iloz, int *ihiz, double *z, int *ldz, int *ns, int *nd, double *sr, double *si, double *v, int *ldv, int *nh, double *t, int *ldt, int *nv, double *wv, int *ldwv, double *work, int *lwork);
+void F_FUNC(dlaqr3,DLAQR3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, double *h, int *ldh, int *iloz, int *ihiz, double *z, int *ldz, int *ns, int *nd, double *sr, double *si, double *v, int *ldv, int *nh, double *t, int *ldt, int *nv, double *wv, int *ldwv, double *work, int *lwork);
+void F_FUNC(dlaqr4,DLAQR4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, int *iloz, int *ihiz, double *z, int *ldz, double *work, int *lwork, int *info);
+void F_FUNC(dlaqr5,DLAQR5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, double *sr, double *si, double *h, int *ldh, int *iloz, int *ihiz, double *z, int *ldz, double *v, int *ldv, double *u, int *ldu, int *nv, double *wv, int *ldwv, int *nh, double *wh, int *ldwh);
+void F_FUNC(dlaqsb,DLAQSB)(char *uplo, int *n, int *kd, double *ab, int *ldab, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(dlaqsp,DLAQSP)(char *uplo, int *n, double *ap, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(dlaqsy,DLAQSY)(char *uplo, int *n, double *a, int *lda, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(dlaqtr,DLAQTR)(int *ltran, int *lreal, int *n, double *t, int *ldt, double *b, double *w, double *scale, double *x, double *work, int *info);
+void F_FUNC(dlar1v,DLAR1V)(int *n, int *b1, int *bn, double *lambda, double *d, double *l, double *ld, double *lld, double *pivmin, double *gaptol, double *z, int *wantnc, int *negcnt, double *ztz, double *mingma, int *r, int *isuppz, double *nrminv, double *resid, double *rqcorr, double *work);
+void F_FUNC(dlar2v,DLAR2V)(int *n, double *x, double *y, double *z, int *incx, double *c, double *s, int *incc);
+void F_FUNC(dlarf,DLARF)(char *side, int *m, int *n, double *v, int *incv, double *tau, double *c, int *ldc, double *work);
+void F_FUNC(dlarfb,DLARFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, double *v, int *ldv, double *t, int *ldt, double *c, int *ldc, double *work, int *ldwork);
+void F_FUNC(dlarfg,DLARFG)(int *n, double *alpha, double *x, int *incx, double *tau);
+void F_FUNC(dlarfgp,DLARFGP)(int *n, double *alpha, double *x, int *incx, double *tau);
+void F_FUNC(dlarft,DLARFT)(char *direct, char *storev, int *n, int *k, double *v, int *ldv, double *tau, double *t, int *ldt);
+void F_FUNC(dlarfx,DLARFX)(char *side, int *m, int *n, double *v, double *tau, double *c, int *ldc, double *work);
+void F_FUNC(dlargv,DLARGV)(int *n, double *x, int *incx, double *y, int *incy, double *c, int *incc);
+void F_FUNC(dlarnv,DLARNV)(int *idist, int *iseed, int *n, double *x);
+void F_FUNC(dlarra,DLARRA)(int *n, double *d, double *e, double *e2, double *spltol, double *tnrm, int *nsplit, int *isplit, int *info);
+void F_FUNC(dlarrb,DLARRB)(int *n, double *d, double *lld, int *ifirst, int *ilast, double *rtol1, double *rtol2, int *offset, double *w, double *wgap, double *werr, double *work, int *iwork, double *pivmin, double *spdiam, int *twist, int *info);
+void F_FUNC(dlarrc,DLARRC)(char *jobt, int *n, double *vl, double *vu, double *d, double *e, double *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info);
+void F_FUNC(dlarrd,DLARRD)(char *range, char *order, int *n, double *vl, double *vu, int *il, int *iu, double *gers, double *reltol, double *d, double *e, double *e2, double *pivmin, int *nsplit, int *isplit, int *m, double *w, double *werr, double *wl, double *wu, int *iblock, int *indexw, double *work, int *iwork, int *info);
+void F_FUNC(dlarre,DLARRE)(char *range, int *n, double *vl, double *vu, int *il, int *iu, double *d, double *e, double *e2, double *rtol1, double *rtol2, double *spltol, int *nsplit, int *isplit, int *m, double *w, double *werr, double *wgap, int *iblock, int *indexw, double *gers, double *pivmin, double *work, int *iwork, int *info);
+void F_FUNC(dlarrf,DLARRF)(int *n, double *d, double *l, double *ld, int *clstrt, int *clend, double *w, double *wgap, double *werr, double *spdiam, double *clgapl, double *clgapr, double *pivmin, double *sigma, double *dplus, double *lplus, double *work, int *info);
+void F_FUNC(dlarrj,DLARRJ)(int *n, double *d, double *e2, int *ifirst, int *ilast, double *rtol, int *offset, double *w, double *werr, double *work, int *iwork, double *pivmin, double *spdiam, int *info);
+void F_FUNC(dlarrk,DLARRK)(int *n, int *iw, double *gl, double *gu, double *d, double *e2, double *pivmin, double *reltol, double *w, double *werr, int *info);
+void F_FUNC(dlarrr,DLARRR)(int *n, double *d, double *e, int *info);
+void F_FUNC(dlarrv,DLARRV)(int *n, double *vl, double *vu, double *d, double *l, double *pivmin, int *isplit, int *m, int *dol, int *dou, double *minrgp, double *rtol1, double *rtol2, double *w, double *werr, double *wgap, int *iblock, int *indexw, double *gers, double *z, int *ldz, int *isuppz, double *work, int *iwork, int *info);
+void F_FUNC(dlartg,DLARTG)(double *f, double *g, double *cs, double *sn, double *r);
+void F_FUNC(dlartgp,DLARTGP)(double *f, double *g, double *cs, double *sn, double *r);
+void F_FUNC(dlartgs,DLARTGS)(double *x, double *y, double *sigma, double *cs, double *sn);
+void F_FUNC(dlartv,DLARTV)(int *n, double *x, int *incx, double *y, int *incy, double *c, double *s, int *incc);
+void F_FUNC(dlaruv,DLARUV)(int *iseed, int *n, double *x);
+void F_FUNC(dlarz,DLARZ)(char *side, int *m, int *n, int *l, double *v, int *incv, double *tau, double *c, int *ldc, double *work);
+void F_FUNC(dlarzb,DLARZB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, double *v, int *ldv, double *t, int *ldt, double *c, int *ldc, double *work, int *ldwork);
+void F_FUNC(dlarzt,DLARZT)(char *direct, char *storev, int *n, int *k, double *v, int *ldv, double *tau, double *t, int *ldt);
+void F_FUNC(dlas2,DLAS2)(double *f, double *g, double *h, double *ssmin, double *ssmax);
+void F_FUNC(dlascl,DLASCL)(char *type_bn, int *kl, int *ku, double *cfrom, double *cto, int *m, int *n, double *a, int *lda, int *info);
+void F_FUNC(dlasd0,DLASD0)(int *n, int *sqre, double *d, double *e, double *u, int *ldu, double *vt, int *ldvt, int *smlsiz, int *iwork, double *work, int *info);
+void F_FUNC(dlasd1,DLASD1)(int *nl, int *nr, int *sqre, double *d, double *alpha, double *beta, double *u, int *ldu, double *vt, int *ldvt, int *idxq, int *iwork, double *work, int *info);
+void F_FUNC(dlasd2,DLASD2)(int *nl, int *nr, int *sqre, int *k, double *d, double *z, double *alpha, double *beta, double *u, int *ldu, double *vt, int *ldvt, double *dsigma, double *u2, int *ldu2, double *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info);
+void F_FUNC(dlasd3,DLASD3)(int *nl, int *nr, int *sqre, int *k, double *d, double *q, int *ldq, double *dsigma, double *u, int *ldu, double *u2, int *ldu2, double *vt, int *ldvt, double *vt2, int *ldvt2, int *idxc, int *ctot, double *z, int *info);
+void F_FUNC(dlasd4,DLASD4)(int *n, int *i, double *d, double *z, double *delta, double *rho, double *sigma, double *work, int *info);
+void F_FUNC(dlasd5,DLASD5)(int *i, double *d, double *z, double *delta, double *rho, double *dsigma, double *work);
+void F_FUNC(dlasd6,DLASD6)(int *icompq, int *nl, int *nr, int *sqre, double *d, double *vf, double *vl, double *alpha, double *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *poles, double *difl, double *difr, double *z, int *k, double *c, double *s, double *work, int *iwork, int *info);
+void F_FUNC(dlasd7,DLASD7)(int *icompq, int *nl, int *nr, int *sqre, int *k, double *d, double *z, double *zw, double *vf, double *vfw, double *vl, double *vlw, double *alpha, double *beta, double *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *c, double *s, int *info);
+void F_FUNC(dlasd8,DLASD8)(int *icompq, int *k, double *d, double *z, double *vf, double *vl, double *difl, double *difr, int *lddifr, double *dsigma, double *work, int *info);
+void F_FUNC(dlasda,DLASDA)(int *icompq, int *smlsiz, int *n, int *sqre, double *d, double *e, double *u, int *ldu, double *vt, int *k, double *difl, double *difr, double *z, double *poles, int *givptr, int *givcol, int *ldgcol, int *perm, double *givnum, double *c, double *s, double *work, int *iwork, int *info);
+void F_FUNC(dlasdq,DLASDQ)(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, double *d, double *e, double *vt, int *ldvt, double *u, int *ldu, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dlasdt,DLASDT)(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub);
+void F_FUNC(dlaset,DLASET)(char *uplo, int *m, int *n, double *alpha, double *beta, double *a, int *lda);
+void F_FUNC(dlasq1,DLASQ1)(int *n, double *d, double *e, double *work, int *info);
+void F_FUNC(dlasq2,DLASQ2)(int *n, double *z, int *info);
+void F_FUNC(dlasq3,DLASQ3)(int *i0, int *n0, double *z, int *pp, double *dmin, double *sigma, double *desig, double *qmax, int *nfail, int *iter, int *ndiv, int *ieee, int *ttype, double *dmin1, double *dmin2, double *dn, double *dn1, double *dn2, double *g, double *tau);
+void F_FUNC(dlasq4,DLASQ4)(int *i0, int *n0, double *z, int *pp, int *n0in, double *dmin, double *dmin1, double *dmin2, double *dn, double *dn1, double *dn2, double *tau, int *ttype, double *g);
+void F_FUNC(dlasq6,DLASQ6)(int *i0, int *n0, double *z, int *pp, double *dmin, double *dmin1, double *dmin2, double *dn, double *dnm1, double *dnm2);
+void F_FUNC(dlasr,DLASR)(char *side, char *pivot, char *direct, int *m, int *n, double *c, double *s, double *a, int *lda);
+void F_FUNC(dlasrt,DLASRT)(char *id, int *n, double *d, int *info);
+void F_FUNC(dlassq,DLASSQ)(int *n, double *x, int *incx, double *scale, double *sumsq);
+void F_FUNC(dlasv2,DLASV2)(double *f, double *g, double *h, double *ssmin, double *ssmax, double *snr, double *csr, double *snl, double *csl);
+void F_FUNC(dlaswp,DLASWP)(int *n, double *a, int *lda, int *k1, int *k2, int *ipiv, int *incx);
+void F_FUNC(dlasy2,DLASY2)(int *ltranl, int *ltranr, int *isgn, int *n1, int *n2, double *tl, int *ldtl, double *tr, int *ldtr, double *b, int *ldb, double *scale, double *x, int *ldx, double *xnorm, int *info);
+void F_FUNC(dlasyf,DLASYF)(char *uplo, int *n, int *nb, int *kb, double *a, int *lda, int *ipiv, double *w, int *ldw, int *info);
+void F_FUNC(dlat2s,DLAT2S)(char *uplo, int *n, double *a, int *lda, float *sa, int *ldsa, int *info);
+void F_FUNC(dlatbs,DLATBS)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, double *ab, int *ldab, double *x, double *scale, double *cnorm, int *info);
+void F_FUNC(dlatdf,DLATDF)(int *ijob, int *n, double *z, int *ldz, double *rhs, double *rdsum, double *rdscal, int *ipiv, int *jpiv);
+void F_FUNC(dlatps,DLATPS)(char *uplo, char *trans, char *diag, char *normin, int *n, double *ap, double *x, double *scale, double *cnorm, int *info);
+void F_FUNC(dlatrd,DLATRD)(char *uplo, int *n, int *nb, double *a, int *lda, double *e, double *tau, double *w, int *ldw);
+void F_FUNC(dlatrs,DLATRS)(char *uplo, char *trans, char *diag, char *normin, int *n, double *a, int *lda, double *x, double *scale, double *cnorm, int *info);
+void F_FUNC(dlatrz,DLATRZ)(int *m, int *n, int *l, double *a, int *lda, double *tau, double *work);
+void F_FUNC(dlauu2,DLAUU2)(char *uplo, int *n, double *a, int *lda, int *info);
+void F_FUNC(dlauum,DLAUUM)(char *uplo, int *n, double *a, int *lda, int *info);
+void F_FUNC(dopgtr,DOPGTR)(char *uplo, int *n, double *ap, double *tau, double *q, int *ldq, double *work, int *info);
+void F_FUNC(dopmtr,DOPMTR)(char *side, char *uplo, char *trans, int *m, int *n, double *ap, double *tau, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dorbdb,DORBDB)(char *trans, char *signs, int *m, int *p, int *q, double *x11, int *ldx11, double *x12, int *ldx12, double *x21, int *ldx21, double *x22, int *ldx22, double *theta, double *phi, double *taup1, double *taup2, double *tauq1, double *tauq2, double *work, int *lwork, int *info);
+void F_FUNC(dorcsd,DORCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, double *x11, int *ldx11, double *x12, int *ldx12, double *x21, int *ldx21, double *x22, int *ldx22, double *theta, double *u1, int *ldu1, double *u2, int *ldu2, double *v1t, int *ldv1t, double *v2t, int *ldv2t, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dorg2l,DORG2L)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dorg2r,DORG2R)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dorgbr,DORGBR)(char *vect, int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dorghr,DORGHR)(int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dorgl2,DORGL2)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dorglq,DORGLQ)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dorgql,DORGQL)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dorgqr,DORGQR)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dorgr2,DORGR2)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info);
+void F_FUNC(dorgrq,DORGRQ)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dorgtr,DORGTR)(char *uplo, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dorm2l,DORM2L)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dorm2r,DORM2R)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dormbr,DORMBR)(char *vect, char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dormhr,DORMHR)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dorml2,DORML2)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dormlq,DORMLQ)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dormql,DORMQL)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dormqr,DORMQR)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dormr2,DORMR2)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dormr3,DORMR3)(char *side, char *trans, int *m, int *n, int *k, int *l, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info);
+void F_FUNC(dormrq,DORMRQ)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dormrz,DORMRZ)(char *side, char *trans, int *m, int *n, int *k, int *l, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dormtr,DORMTR)(char *side, char *uplo, char *trans, int *m, int *n, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
+void F_FUNC(dpbcon,DPBCON)(char *uplo, int *n, int *kd, double *ab, int *ldab, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dpbequ,DPBEQU)(char *uplo, int *n, int *kd, double *ab, int *ldab, double *s, double *scond, double *amax, int *info);
+void F_FUNC(dpbrfs,DPBRFS)(char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dpbstf,DPBSTF)(char *uplo, int *n, int *kd, double *ab, int *ldab, int *info);
+void F_FUNC(dpbsv,DPBSV)(char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, int *info);
+void F_FUNC(dpbsvx,DPBSVX)(char *fact, char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, char *equed, double *s, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dpbtf2,DPBTF2)(char *uplo, int *n, int *kd, double *ab, int *ldab, int *info);
+void F_FUNC(dpbtrf,DPBTRF)(char *uplo, int *n, int *kd, double *ab, int *ldab, int *info);
+void F_FUNC(dpbtrs,DPBTRS)(char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, int *info);
+void F_FUNC(dpftrf,DPFTRF)(char *transr, char *uplo, int *n, double *a, int *info);
+void F_FUNC(dpftri,DPFTRI)(char *transr, char *uplo, int *n, double *a, int *info);
+void F_FUNC(dpftrs,DPFTRS)(char *transr, char *uplo, int *n, int *nrhs, double *a, double *b, int *ldb, int *info);
+void F_FUNC(dpocon,DPOCON)(char *uplo, int *n, double *a, int *lda, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dpoequ,DPOEQU)(int *n, double *a, int *lda, double *s, double *scond, double *amax, int *info);
+void F_FUNC(dpoequb,DPOEQUB)(int *n, double *a, int *lda, double *s, double *scond, double *amax, int *info);
+void F_FUNC(dporfs,DPORFS)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dposv,DPOSV)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info);
+void F_FUNC(dposvx,DPOSVX)(char *fact, char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, char *equed, double *s, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dpotf2,DPOTF2)(char *uplo, int *n, double *a, int *lda, int *info);
+void F_FUNC(dpotrf,DPOTRF)(char *uplo, int *n, double *a, int *lda, int *info);
+void F_FUNC(dpotri,DPOTRI)(char *uplo, int *n, double *a, int *lda, int *info);
+void F_FUNC(dpotrs,DPOTRS)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info);
+void F_FUNC(dppcon,DPPCON)(char *uplo, int *n, double *ap, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dppequ,DPPEQU)(char *uplo, int *n, double *ap, double *s, double *scond, double *amax, int *info);
+void F_FUNC(dpprfs,DPPRFS)(char *uplo, int *n, int *nrhs, double *ap, double *afp, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dppsv,DPPSV)(char *uplo, int *n, int *nrhs, double *ap, double *b, int *ldb, int *info);
+void F_FUNC(dppsvx,DPPSVX)(char *fact, char *uplo, int *n, int *nrhs, double *ap, double *afp, char *equed, double *s, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dpptrf,DPPTRF)(char *uplo, int *n, double *ap, int *info);
+void F_FUNC(dpptri,DPPTRI)(char *uplo, int *n, double *ap, int *info);
+void F_FUNC(dpptrs,DPPTRS)(char *uplo, int *n, int *nrhs, double *ap, double *b, int *ldb, int *info);
+void F_FUNC(dpstf2,DPSTF2)(char *uplo, int *n, double *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info);
+void F_FUNC(dpstrf,DPSTRF)(char *uplo, int *n, double *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info);
+void F_FUNC(dptcon,DPTCON)(int *n, double *d, double *e, double *anorm, double *rcond, double *work, int *info);
+void F_FUNC(dpteqr,DPTEQR)(char *compz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *info);
+void F_FUNC(dptrfs,DPTRFS)(int *n, int *nrhs, double *d, double *e, double *df, double *ef, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *info);
+void F_FUNC(dptsv,DPTSV)(int *n, int *nrhs, double *d, double *e, double *b, int *ldb, int *info);
+void F_FUNC(dptsvx,DPTSVX)(char *fact, int *n, int *nrhs, double *d, double *e, double *df, double *ef, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *info);
+void F_FUNC(dpttrf,DPTTRF)(int *n, double *d, double *e, int *info);
+void F_FUNC(dpttrs,DPTTRS)(int *n, int *nrhs, double *d, double *e, double *b, int *ldb, int *info);
+void F_FUNC(dptts2,DPTTS2)(int *n, int *nrhs, double *d, double *e, double *b, int *ldb);
+void F_FUNC(drscl,DRSCL)(int *n, double *sa, double *sx, int *incx);
+void F_FUNC(dsbev,DSBEV)(char *jobz, char *uplo, int *n, int *kd, double *ab, int *ldab, double *w, double *z, int *ldz, double *work, int *info);
+void F_FUNC(dsbevd,DSBEVD)(char *jobz, char *uplo, int *n, int *kd, double *ab, int *ldab, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dsbevx,DSBEVX)(char *jobz, char *range, char *uplo, int *n, int *kd, double *ab, int *ldab, double *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info);
+void F_FUNC(dsbgst,DSBGST)(char *vect, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *x, int *ldx, double *work, int *info);
+void F_FUNC(dsbgv,DSBGV)(char *jobz, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *w, double *z, int *ldz, double *work, int *info);
+void F_FUNC(dsbgvd,DSBGVD)(char *jobz, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dsbgvx,DSBGVX)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info);
+void F_FUNC(dsbtrd,DSBTRD)(char *vect, char *uplo, int *n, int *kd, double *ab, int *ldab, double *d, double *e, double *q, int *ldq, double *work, int *info);
+void F_FUNC(dsfrk,DSFRK)(char *transr, char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda, double *beta, double *c);
+void F_FUNC(dsgesv,DSGESV)(int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *work, float *swork, int *iter, int *info);
+void F_FUNC(dspcon,DSPCON)(char *uplo, int *n, double *ap, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dspev,DSPEV)(char *jobz, char *uplo, int *n, double *ap, double *w, double *z, int *ldz, double *work, int *info);
+void F_FUNC(dspevd,DSPEVD)(char *jobz, char *uplo, int *n, double *ap, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dspevx,DSPEVX)(char *jobz, char *range, char *uplo, int *n, double *ap, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info);
+void F_FUNC(dspgst,DSPGST)(int *itype, char *uplo, int *n, double *ap, double *bp, int *info);
+void F_FUNC(dspgv,DSPGV)(int *itype, char *jobz, char *uplo, int *n, double *ap, double *bp, double *w, double *z, int *ldz, double *work, int *info);
+void F_FUNC(dspgvd,DSPGVD)(int *itype, char *jobz, char *uplo, int *n, double *ap, double *bp, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dspgvx,DSPGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, double *ap, double *bp, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info);
+void F_FUNC(dsposv,DSPOSV)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *x, int *ldx, double *work, float *swork, int *iter, int *info);
+void F_FUNC(dsprfs,DSPRFS)(char *uplo, int *n, int *nrhs, double *ap, double *afp, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dspsv,DSPSV)(char *uplo, int *n, int *nrhs, double *ap, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dspsvx,DSPSVX)(char *fact, char *uplo, int *n, int *nrhs, double *ap, double *afp, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dsptrd,DSPTRD)(char *uplo, int *n, double *ap, double *d, double *e, double *tau, int *info);
+void F_FUNC(dsptrf,DSPTRF)(char *uplo, int *n, double *ap, int *ipiv, int *info);
+void F_FUNC(dsptri,DSPTRI)(char *uplo, int *n, double *ap, int *ipiv, double *work, int *info);
+void F_FUNC(dsptrs,DSPTRS)(char *uplo, int *n, int *nrhs, double *ap, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dstebz,DSTEBZ)(char *range, char *order, int *n, double *vl, double *vu, int *il, int *iu, double *abstol, double *d, double *e, int *m, int *nsplit, double *w, int *iblock, int *isplit, double *work, int *iwork, int *info);
+void F_FUNC(dstedc,DSTEDC)(char *compz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dstegr,DSTEGR)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dstein,DSTEIN)(int *n, double *d, double *e, int *m, double *w, int *iblock, int *isplit, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info);
+void F_FUNC(dstemr,DSTEMR)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, int *m, double *w, double *z, int *ldz, int *nzc, int *isuppz, int *tryrac, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dsteqr,DSTEQR)(char *compz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *info);
+void F_FUNC(dsterf,DSTERF)(int *n, double *d, double *e, int *info);
+void F_FUNC(dstev,DSTEV)(char *jobz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *info);
+void F_FUNC(dstevd,DSTEVD)(char *jobz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dstevr,DSTEVR)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dstevx,DSTEVX)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info);
+void F_FUNC(dsycon,DSYCON)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dsyconv,DSYCONV)(char *uplo, char *way, int *n, double *a, int *lda, int *ipiv, double *work, int *info);
+void F_FUNC(dsyequb,DSYEQUB)(char *uplo, int *n, double *a, int *lda, double *s, double *scond, double *amax, double *work, int *info);
+void F_FUNC(dsyev,DSYEV)(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *info);
+void F_FUNC(dsyevd,DSYEVD)(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dsyevr,DSYEVR)(char *jobz, char *range, char *uplo, int *n, double *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dsyevx,DSYEVX)(char *jobz, char *range, char *uplo, int *n, double *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *ifail, int *info);
+void F_FUNC(dsygs2,DSYGS2)(int *itype, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, int *info);
+void F_FUNC(dsygst,DSYGST)(int *itype, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, int *info);
+void F_FUNC(dsygv,DSYGV)(int *itype, char *jobz, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, double *w, double *work, int *lwork, int *info);
+void F_FUNC(dsygvd,DSYGVD)(int *itype, char *jobz, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, double *w, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dsygvx,DSYGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *ifail, int *info);
+void F_FUNC(dsyrfs,DSYRFS)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dsysv,DSYSV)(char *uplo, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, double *work, int *lwork, int *info);
+void F_FUNC(dsysvx,DSYSVX)(char *fact, char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dsyswapr,DSYSWAPR)(char *uplo, int *n, double *a, int *lda, int *i1, int *i2);
+void F_FUNC(dsytd2,DSYTD2)(char *uplo, int *n, double *a, int *lda, double *d, double *e, double *tau, int *info);
+void F_FUNC(dsytf2,DSYTF2)(char *uplo, int *n, double *a, int *lda, int *ipiv, int *info);
+void F_FUNC(dsytrd,DSYTRD)(char *uplo, int *n, double *a, int *lda, double *d, double *e, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(dsytrf,DSYTRF)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *lwork, int *info);
+void F_FUNC(dsytri,DSYTRI)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *info);
+void F_FUNC(dsytri2,DSYTRI2)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *lwork, int *info);
+void F_FUNC(dsytri2x,DSYTRI2X)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *nb, int *info);
+void F_FUNC(dsytrs,DSYTRS)(char *uplo, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info);
+void F_FUNC(dsytrs2,DSYTRS2)(char *uplo, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, double *work, int *info);
+void F_FUNC(dtbcon,DTBCON)(char *norm, char *uplo, char *diag, int *n, int *kd, double *ab, int *ldab, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dtbrfs,DTBRFS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dtbtrs,DTBTRS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, int *info);
+void F_FUNC(dtfsm,DTFSM)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, double *alpha, double *a, double *b, int *ldb);
+void F_FUNC(dtftri,DTFTRI)(char *transr, char *uplo, char *diag, int *n, double *a, int *info);
+void F_FUNC(dtfttp,DTFTTP)(char *transr, char *uplo, int *n, double *arf, double *ap, int *info);
+void F_FUNC(dtfttr,DTFTTR)(char *transr, char *uplo, int *n, double *arf, double *a, int *lda, int *info);
+void F_FUNC(dtgevc,DTGEVC)(char *side, char *howmny, int *select, int *n, double *s, int *lds, double *p, int *ldp, double *vl, int *ldvl, double *vr, int *ldvr, int *mm, int *m, double *work, int *info);
+void F_FUNC(dtgex2,DTGEX2)(int *wantq, int *wantz, int *n, double *a, int *lda, double *b, int *ldb, double *q, int *ldq, double *z, int *ldz, int *j1, int *n1, int *n2, double *work, int *lwork, int *info);
+void F_FUNC(dtgexc,DTGEXC)(int *wantq, int *wantz, int *n, double *a, int *lda, double *b, int *ldb, double *q, int *ldq, double *z, int *ldz, int *ifst, int *ilst, double *work, int *lwork, int *info);
+void F_FUNC(dtgsen,DTGSEN)(int *ijob, int *wantq, int *wantz, int *select, int *n, double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *q, int *ldq, double *z, int *ldz, int *m, double *pl, double *pr, double *dif, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dtgsja,DTGSJA)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, double *a, int *lda, double *b, int *ldb, double *tola, double *tolb, double *alpha, double *beta, double *u, int *ldu, double *v, int *ldv, double *q, int *ldq, double *work, int *ncycle, int *info);
+void F_FUNC(dtgsna,DTGSNA)(char *job, char *howmny, int *select, int *n, double *a, int *lda, double *b, int *ldb, double *vl, int *ldvl, double *vr, int *ldvr, double *s, double *dif, int *mm, int *m, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dtgsy2,DTGSY2)(char *trans, int *ijob, int *m, int *n, double *a, int *lda, double *b, int *ldb, double *c, int *ldc, double *d, int *ldd, double *e, int *lde, double *f, int *ldf, double *scale, double *rdsum, double *rdscal, int *iwork, int *pq, int *info);
+void F_FUNC(dtgsyl,DTGSYL)(char *trans, int *ijob, int *m, int *n, double *a, int *lda, double *b, int *ldb, double *c, int *ldc, double *d, int *ldd, double *e, int *lde, double *f, int *ldf, double *scale, double *dif, double *work, int *lwork, int *iwork, int *info);
+void F_FUNC(dtpcon,DTPCON)(char *norm, char *uplo, char *diag, int *n, double *ap, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dtpmqrt,DTPMQRT)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, double *v, int *ldv, double *t, int *ldt, double *a, int *lda, double *b, int *ldb, double *work, int *info);
+void F_FUNC(dtpqrt,DTPQRT)(int *m, int *n, int *l, int *nb, double *a, int *lda, double *b, int *ldb, double *t, int *ldt, double *work, int *info);
+void F_FUNC(dtpqrt2,DTPQRT2)(int *m, int *n, int *l, double *a, int *lda, double *b, int *ldb, double *t, int *ldt, int *info);
+void F_FUNC(dtprfb,DTPRFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, double *v, int *ldv, double *t, int *ldt, double *a, int *lda, double *b, int *ldb, double *work, int *ldwork);
+void F_FUNC(dtprfs,DTPRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *ap, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dtptri,DTPTRI)(char *uplo, char *diag, int *n, double *ap, int *info);
+void F_FUNC(dtptrs,DTPTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *ap, double *b, int *ldb, int *info);
+void F_FUNC(dtpttf,DTPTTF)(char *transr, char *uplo, int *n, double *ap, double *arf, int *info);
+void F_FUNC(dtpttr,DTPTTR)(char *uplo, int *n, double *ap, double *a, int *lda, int *info);
+void F_FUNC(dtrcon,DTRCON)(char *norm, char *uplo, char *diag, int *n, double *a, int *lda, double *rcond, double *work, int *iwork, int *info);
+void F_FUNC(dtrevc,DTREVC)(char *side, char *howmny, int *select, int *n, double *t, int *ldt, double *vl, int *ldvl, double *vr, int *ldvr, int *mm, int *m, double *work, int *info);
+void F_FUNC(dtrexc,DTREXC)(char *compq, int *n, double *t, int *ldt, double *q, int *ldq, int *ifst, int *ilst, double *work, int *info);
+void F_FUNC(dtrrfs,DTRRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info);
+void F_FUNC(dtrsen,DTRSEN)(char *job, char *compq, int *select, int *n, double *t, int *ldt, double *q, int *ldq, double *wr, double *wi, int *m, double *s, double *sep, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(dtrsna,DTRSNA)(char *job, char *howmny, int *select, int *n, double *t, int *ldt, double *vl, int *ldvl, double *vr, int *ldvr, double *s, double *sep, int *mm, int *m, double *work, int *ldwork, int *iwork, int *info);
+void F_FUNC(dtrsyl,DTRSYL)(char *trana, char *tranb, int *isgn, int *m, int *n, double *a, int *lda, double *b, int *ldb, double *c, int *ldc, double *scale, int *info);
+void F_FUNC(dtrti2,DTRTI2)(char *uplo, char *diag, int *n, double *a, int *lda, int *info);
+void F_FUNC(dtrtri,DTRTRI)(char *uplo, char *diag, int *n, double *a, int *lda, int *info);
+void F_FUNC(dtrtrs,DTRTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info);
+void F_FUNC(dtrttf,DTRTTF)(char *transr, char *uplo, int *n, double *a, int *lda, double *arf, int *info);
+void F_FUNC(dtrttp,DTRTTP)(char *uplo, int *n, double *a, int *lda, double *ap, int *info);
+void F_FUNC(dtzrzf,DTZRZF)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info);
+void F_FUNC(ilaver,ILAVER)(int *vers_major, int *vers_minor, int *vers_patch);
+void F_FUNC(sbbcsd,SBBCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, float *theta, float *phi, float *u1, int *ldu1, float *u2, int *ldu2, float *v1t, int *ldv1t, float *v2t, int *ldv2t, float *b11d, float *b11e, float *b12d, float *b12e, float *b21d, float *b21e, float *b22d, float *b22e, float *work, int *lwork, int *info);
+void F_FUNC(sbdsdc,SBDSDC)(char *uplo, char *compq, int *n, float *d, float *e, float *u, int *ldu, float *vt, int *ldvt, float *q, int *iq, float *work, int *iwork, int *info);
+void F_FUNC(sbdsqr,SBDSQR)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, float *d, float *e, float *vt, int *ldvt, float *u, int *ldu, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sdisna,SDISNA)(char *job, int *m, int *n, float *d, float *sep, int *info);
+void F_FUNC(sgbbrd,SGBBRD)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, float *ab, int *ldab, float *d, float *e, float *q, int *ldq, float *pt, int *ldpt, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sgbcon,SGBCON)(char *norm, int *n, int *kl, int *ku, float *ab, int *ldab, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(sgbequ,SGBEQU)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(sgbequb,SGBEQUB)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(sgbrfs,SGBRFS)(char *trans, int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sgbsv,SGBSV)(int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(sgbsvx,SGBSVX)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, int *ipiv, char *equed, float *r, float *c, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sgbtf2,SGBTF2)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(sgbtrf,SGBTRF)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(sgbtrs,SGBTRS)(char *trans, int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(sgebak,SGEBAK)(char *job, char *side, int *n, int *ilo, int *ihi, float *scale, int *m, float *v, int *ldv, int *info);
+void F_FUNC(sgebal,SGEBAL)(char *job, int *n, float *a, int *lda, int *ilo, int *ihi, float *scale, int *info);
+void F_FUNC(sgebd2,SGEBD2)(int *m, int *n, float *a, int *lda, float *d, float *e, float *tauq, float *taup, float *work, int *info);
+void F_FUNC(sgebrd,SGEBRD)(int *m, int *n, float *a, int *lda, float *d, float *e, float *tauq, float *taup, float *work, int *lwork, int *info);
+void F_FUNC(sgecon,SGECON)(char *norm, int *n, float *a, int *lda, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(sgeequ,SGEEQU)(int *m, int *n, float *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(sgeequb,SGEEQUB)(int *m, int *n, float *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info);
+void F_FUNC(sgees,SGEES)(char *jobvs, char *sort, _sselect2 *select, int *n, float *a, int *lda, int *sdim, float *wr, float *wi, float *vs, int *ldvs, float *work, int *lwork, int *bwork, int *info);
+void F_FUNC(sgeesx,SGEESX)(char *jobvs, char *sort, _sselect2 *select, char *sense, int *n, float *a, int *lda, int *sdim, float *wr, float *wi, float *vs, int *ldvs, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info);
+void F_FUNC(sgeev,SGEEV)(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info);
+void F_FUNC(sgeevx,SGEEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, int *ilo, int *ihi, float *scale, float *abnrm, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(sgehd2,SGEHD2)(int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sgehrd,SGEHRD)(int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sgejsv,SGEJSV)(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, float *a, int *lda, float *sva, float *u, int *ldu, float *v, int *ldv, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(sgelq2,SGELQ2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sgelqf,SGELQF)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sgels,SGELS)(char *trans, int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *work, int *lwork, int *info);
+void F_FUNC(sgelsd,SGELSD)(int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *s, float *rcond, int *rank, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(sgelss,SGELSS)(int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *s, float *rcond, int *rank, float *work, int *lwork, int *info);
+void F_FUNC(sgelsy,SGELSY)(int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *jpvt, float *rcond, int *rank, float *work, int *lwork, int *info);
+void F_FUNC(sgemqrt,SGEMQRT)(char *side, char *trans, int *m, int *n, int *k, int *nb, float *v, int *ldv, float *t, int *ldt, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sgeql2,SGEQL2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sgeqlf,SGEQLF)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sgeqp3,SGEQP3)(int *m, int *n, float *a, int *lda, int *jpvt, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sgeqr2,SGEQR2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sgeqr2p,SGEQR2P)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sgeqrf,SGEQRF)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sgeqrfp,SGEQRFP)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sgeqrt,SGEQRT)(int *m, int *n, int *nb, float *a, int *lda, float *t, int *ldt, float *work, int *info);
+void F_FUNC(sgeqrt2,SGEQRT2)(int *m, int *n, float *a, int *lda, float *t, int *ldt, int *info);
+void F_FUNC(sgeqrt3,SGEQRT3)(int *m, int *n, float *a, int *lda, float *t, int *ldt, int *info);
+void F_FUNC(sgerfs,SGERFS)(char *trans, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sgerq2,SGERQ2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sgerqf,SGERQF)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sgesc2,SGESC2)(int *n, float *a, int *lda, float *rhs, int *ipiv, int *jpiv, float *scale);
+void F_FUNC(sgesdd,SGESDD)(char *jobz, int *m, int *n, float *a, int *lda, float *s, float *u, int *ldu, float *vt, int *ldvt, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(sgesv,SGESV)(int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(sgesvd,SGESVD)(char *jobu, char *jobvt, int *m, int *n, float *a, int *lda, float *s, float *u, int *ldu, float *vt, int *ldvt, float *work, int *lwork, int *info);
+void F_FUNC(sgesvj,SGESVJ)(char *joba, char *jobu, char *jobv, int *m, int *n, float *a, int *lda, float *sva, int *mv, float *v, int *ldv, float *work, int *lwork, int *info);
+void F_FUNC(sgesvx,SGESVX)(char *fact, char *trans, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, char *equed, float *r, float *c, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sgetc2,SGETC2)(int *n, float *a, int *lda, int *ipiv, int *jpiv, int *info);
+void F_FUNC(sgetf2,SGETF2)(int *m, int *n, float *a, int *lda, int *ipiv, int *info);
+void F_FUNC(sgetrf,SGETRF)(int *m, int *n, float *a, int *lda, int *ipiv, int *info);
+void F_FUNC(sgetri,SGETRI)(int *n, float *a, int *lda, int *ipiv, float *work, int *lwork, int *info);
+void F_FUNC(sgetrs,SGETRS)(char *trans, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(sggbak,SGGBAK)(char *job, char *side, int *n, int *ilo, int *ihi, float *lscale, float *rscale, int *m, float *v, int *ldv, int *info);
+void F_FUNC(sggbal,SGGBAL)(char *job, int *n, float *a, int *lda, float *b, int *ldb, int *ilo, int *ihi, float *lscale, float *rscale, float *work, int *info);
+void F_FUNC(sgges,SGGES)(char *jobvsl, char *jobvsr, char *sort, _sselect3 *selctg, int *n, float *a, int *lda, float *b, int *ldb, int *sdim, float *alphar, float *alphai, float *beta, float *vsl, int *ldvsl, float *vsr, int *ldvsr, float *work, int *lwork, int *bwork, int *info);
+void F_FUNC(sggesx,SGGESX)(char *jobvsl, char *jobvsr, char *sort, _sselect3 *selctg, char *sense, int *n, float *a, int *lda, float *b, int *ldb, int *sdim, float *alphar, float *alphai, float *beta, float *vsl, int *ldvsl, float *vsr, int *ldvsr, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info);
+void F_FUNC(sggev,SGGEV)(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info);
+void F_FUNC(sggevx,SGGEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *vl, int *ldvl, float *vr, int *ldvr, int *ilo, int *ihi, float *lscale, float *rscale, float *abnrm, float *bbnrm, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *bwork, int *info);
+void F_FUNC(sggglm,SGGGLM)(int *n, int *m, int *p, float *a, int *lda, float *b, int *ldb, float *d, float *x, float *y, float *work, int *lwork, int *info);
+void F_FUNC(sgghrd,SGGHRD)(char *compq, char *compz, int *n, int *ilo, int *ihi, float *a, int *lda, float *b, int *ldb, float *q, int *ldq, float *z, int *ldz, int *info);
+void F_FUNC(sgglse,SGGLSE)(int *m, int *n, int *p, float *a, int *lda, float *b, int *ldb, float *c, float *d, float *x, float *work, int *lwork, int *info);
+void F_FUNC(sggqrf,SGGQRF)(int *n, int *m, int *p, float *a, int *lda, float *taua, float *b, int *ldb, float *taub, float *work, int *lwork, int *info);
+void F_FUNC(sggrqf,SGGRQF)(int *m, int *p, int *n, float *a, int *lda, float *taua, float *b, int *ldb, float *taub, float *work, int *lwork, int *info);
+void F_FUNC(sgsvj0,SGSVJ0)(char *jobv, int *m, int *n, float *a, int *lda, float *d, float *sva, int *mv, float *v, int *ldv, float *eps, float *sfmin, float *tol, int *nsweep, float *work, int *lwork, int *info);
+void F_FUNC(sgsvj1,SGSVJ1)(char *jobv, int *m, int *n, int *n1, float *a, int *lda, float *d, float *sva, int *mv, float *v, int *ldv, float *eps, float *sfmin, float *tol, int *nsweep, float *work, int *lwork, int *info);
+void F_FUNC(sgtcon,SGTCON)(char *norm, int *n, float *dl, float *d, float *du, float *du2, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(sgtrfs,SGTRFS)(char *trans, int *n, int *nrhs, float *dl, float *d, float *du, float *dlf, float *df, float *duf, float *du2, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sgtsv,SGTSV)(int *n, int *nrhs, float *dl, float *d, float *du, float *b, int *ldb, int *info);
+void F_FUNC(sgtsvx,SGTSVX)(char *fact, char *trans, int *n, int *nrhs, float *dl, float *d, float *du, float *dlf, float *df, float *duf, float *du2, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sgttrf,SGTTRF)(int *n, float *dl, float *d, float *du, float *du2, int *ipiv, int *info);
+void F_FUNC(sgttrs,SGTTRS)(char *trans, int *n, int *nrhs, float *dl, float *d, float *du, float *du2, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(sgtts2,SGTTS2)(int *itrans, int *n, int *nrhs, float *dl, float *d, float *du, float *du2, int *ipiv, float *b, int *ldb);
+void F_FUNC(shgeqz,SHGEQZ)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *t, int *ldt, float *alphar, float *alphai, float *beta, float *q, int *ldq, float *z, int *ldz, float *work, int *lwork, int *info);
+void F_FUNC(shsein,SHSEIN)(char *side, char *eigsrc, char *initv, int *select, int *n, float *h, int *ldh, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, int *mm, int *m, float *work, int *ifaill, int *ifailr, int *info);
+void F_FUNC(shseqr,SHSEQR)(char *job, char *compz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, float *z, int *ldz, float *work, int *lwork, int *info);
+void F_FUNC(slabad,SLABAD)(float *small, float *large);
+void F_FUNC(slabrd,SLABRD)(int *m, int *n, int *nb, float *a, int *lda, float *d, float *e, float *tauq, float *taup, float *x, int *ldx, float *y, int *ldy);
+void F_FUNC(slacn2,SLACN2)(int *n, float *v, float *x, int *isgn, float *est, int *kase, int *isave);
+void F_FUNC(slacon,SLACON)(int *n, float *v, float *x, int *isgn, float *est, int *kase);
+void F_FUNC(slacpy,SLACPY)(char *uplo, int *m, int *n, float *a, int *lda, float *b, int *ldb);
+void F_FUNC(sladiv,SLADIV)(float *a, float *b, float *c, float *d, float *p, float *q);
+void F_FUNC(slae2,SLAE2)(float *a, float *b, float *c, float *rt1, float *rt2);
+void F_FUNC(slaebz,SLAEBZ)(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, float *abstol, float *reltol, float *pivmin, float *d, float *e, float *e2, int *nval, float *ab, float *c, int *mout, int *nab, float *work, int *iwork, int *info);
+void F_FUNC(slaed0,SLAED0)(int *icompq, int *qsiz, int *n, float *d, float *e, float *q, int *ldq, float *qstore, int *ldqs, float *work, int *iwork, int *info);
+void F_FUNC(slaed1,SLAED1)(int *n, float *d, float *q, int *ldq, int *indxq, float *rho, int *cutpnt, float *work, int *iwork, int *info);
+void F_FUNC(slaed2,SLAED2)(int *k, int *n, int *n1, float *d, float *q, int *ldq, int *indxq, float *rho, float *z, float *dlamda, float *w, float *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info);
+void F_FUNC(slaed3,SLAED3)(int *k, int *n, int *n1, float *d, float *q, int *ldq, float *rho, float *dlamda, float *q2, int *indx, int *ctot, float *w, float *s, int *info);
+void F_FUNC(slaed4,SLAED4)(int *n, int *i, float *d, float *z, float *delta, float *rho, float *dlam, int *info);
+void F_FUNC(slaed5,SLAED5)(int *i, float *d, float *z, float *delta, float *rho, float *dlam);
+void F_FUNC(slaed6,SLAED6)(int *kniter, int *orgati, float *rho, float *d, float *z, float *finit, float *tau, int *info);
+void F_FUNC(slaed7,SLAED7)(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, float *d, float *q, int *ldq, int *indxq, float *rho, int *cutpnt, float *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, float *givnum, float *work, int *iwork, int *info);
+void F_FUNC(slaed8,SLAED8)(int *icompq, int *k, int *n, int *qsiz, float *d, float *q, int *ldq, int *indxq, float *rho, int *cutpnt, float *z, float *dlamda, float *q2, int *ldq2, float *w, int *perm, int *givptr, int *givcol, float *givnum, int *indxp, int *indx, int *info);
+void F_FUNC(slaed9,SLAED9)(int *k, int *kstart, int *kstop, int *n, float *d, float *q, int *ldq, float *rho, float *dlamda, float *w, float *s, int *lds, int *info);
+void F_FUNC(slaeda,SLAEDA)(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, float *givnum, float *q, int *qptr, float *z, float *ztemp, int *info);
+void F_FUNC(slaein,SLAEIN)(int *rightv, int *noinit, int *n, float *h, int *ldh, float *wr, float *wi, float *vr, float *vi, float *b, int *ldb, float *work, float *eps3, float *smlnum, float *bignum, int *info);
+void F_FUNC(slaev2,SLAEV2)(float *a, float *b, float *c, float *rt1, float *rt2, float *cs1, float *sn1);
+void F_FUNC(slaexc,SLAEXC)(int *wantq, int *n, float *t, int *ldt, float *q, int *ldq, int *j1, int *n1, int *n2, float *work, int *info);
+void F_FUNC(slag2,SLAG2)(float *a, int *lda, float *b, int *ldb, float *safmin, float *scale1, float *scale2, float *wr1, float *wr2, float *wi);
+void F_FUNC(slag2d,SLAG2D)(int *m, int *n, float *sa, int *ldsa, double *a, int *lda, int *info);
+void F_FUNC(slags2,SLAGS2)(int *upper, float *a1, float *a2, float *a3, float *b1, float *b2, float *b3, float *csu, float *snu, float *csv, float *snv, float *csq, float *snq);
+void F_FUNC(slagtf,SLAGTF)(int *n, float *a, float *lambda, float *b, float *c, float *tol, float *d, int *in, int *info);
+void F_FUNC(slagtm,SLAGTM)(char *trans, int *n, int *nrhs, float *alpha, float *dl, float *d, float *du, float *x, int *ldx, float *beta, float *b, int *ldb);
+void F_FUNC(slagts,SLAGTS)(int *job, int *n, float *a, float *b, float *c, float *d, int *in, float *y, float *tol, int *info);
+void F_FUNC(slagv2,SLAGV2)(float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *csl, float *snl, float *csr, float *snr);
+void F_FUNC(slahqr,SLAHQR)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, int *iloz, int *ihiz, float *z, int *ldz, int *info);
+void F_FUNC(slahr2,SLAHR2)(int *n, int *k, int *nb, float *a, int *lda, float *tau, float *t, int *ldt, float *y, int *ldy);
+void F_FUNC(slaic1,SLAIC1)(int *job, int *j, float *x, float *sest, float *w, float *gamma, float *sestpr, float *s, float *c);
+void F_FUNC(slaln2,SLALN2)(int *ltrans, int *na, int *nw, float *smin, float *ca, float *a, int *lda, float *d1, float *d2, float *b, int *ldb, float *wr, float *wi, float *x, int *ldx, float *scale, float *xnorm, int *info);
+void F_FUNC(slals0,SLALS0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, float *b, int *ldb, float *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *poles, float *difl, float *difr, float *z, int *k, float *c, float *s, float *work, int *info);
+void F_FUNC(slalsa,SLALSA)(int *icompq, int *smlsiz, int *n, int *nrhs, float *b, int *ldb, float *bx, int *ldbx, float *u, int *ldu, float *vt, int *k, float *difl, float *difr, float *z, float *poles, int *givptr, int *givcol, int *ldgcol, int *perm, float *givnum, float *c, float *s, float *work, int *iwork, int *info);
+void F_FUNC(slalsd,SLALSD)(char *uplo, int *smlsiz, int *n, int *nrhs, float *d, float *e, float *b, int *ldb, float *rcond, int *rank, float *work, int *iwork, int *info);
+void F_FUNC(slamrg,SLAMRG)(int *n1, int *n2, float *a, int *strd1, int *strd2, int *index_bn);
+void F_FUNC(slanv2,SLANV2)(float *a, float *b, float *c, float *d, float *rt1r, float *rt1i, float *rt2r, float *rt2i, float *cs, float *sn);
+void F_FUNC(slapll,SLAPLL)(int *n, float *x, int *incx, float *y, int *incy, float *ssmin);
+void F_FUNC(slapmr,SLAPMR)(int *forwrd, int *m, int *n, float *x, int *ldx, int *k);
+void F_FUNC(slapmt,SLAPMT)(int *forwrd, int *m, int *n, float *x, int *ldx, int *k);
+void F_FUNC(slaqgb,SLAQGB)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed);
+void F_FUNC(slaqge,SLAQGE)(int *m, int *n, float *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed);
+void F_FUNC(slaqp2,SLAQP2)(int *m, int *n, int *offset, float *a, int *lda, int *jpvt, float *tau, float *vn1, float *vn2, float *work);
+void F_FUNC(slaqps,SLAQPS)(int *m, int *n, int *offset, int *nb, int *kb, float *a, int *lda, int *jpvt, float *tau, float *vn1, float *vn2, float *auxv, float *f, int *ldf);
+void F_FUNC(slaqr0,SLAQR0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, int *iloz, int *ihiz, float *z, int *ldz, float *work, int *lwork, int *info);
+void F_FUNC(slaqr1,SLAQR1)(int *n, float *h, int *ldh, float *sr1, float *si1, float *sr2, float *si2, float *v);
+void F_FUNC(slaqr2,SLAQR2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, float *h, int *ldh, int *iloz, int *ihiz, float *z, int *ldz, int *ns, int *nd, float *sr, float *si, float *v, int *ldv, int *nh, float *t, int *ldt, int *nv, float *wv, int *ldwv, float *work, int *lwork);
+void F_FUNC(slaqr3,SLAQR3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, float *h, int *ldh, int *iloz, int *ihiz, float *z, int *ldz, int *ns, int *nd, float *sr, float *si, float *v, int *ldv, int *nh, float *t, int *ldt, int *nv, float *wv, int *ldwv, float *work, int *lwork);
+void F_FUNC(slaqr4,SLAQR4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, int *iloz, int *ihiz, float *z, int *ldz, float *work, int *lwork, int *info);
+void F_FUNC(slaqr5,SLAQR5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, float *sr, float *si, float *h, int *ldh, int *iloz, int *ihiz, float *z, int *ldz, float *v, int *ldv, float *u, int *ldu, int *nv, float *wv, int *ldwv, int *nh, float *wh, int *ldwh);
+void F_FUNC(slaqsb,SLAQSB)(char *uplo, int *n, int *kd, float *ab, int *ldab, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(slaqsp,SLAQSP)(char *uplo, int *n, float *ap, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(slaqsy,SLAQSY)(char *uplo, int *n, float *a, int *lda, float *s, float *scond, float *amax, char *equed);
+void F_FUNC(slaqtr,SLAQTR)(int *ltran, int *lreal, int *n, float *t, int *ldt, float *b, float *w, float *scale, float *x, float *work, int *info);
+void F_FUNC(slar1v,SLAR1V)(int *n, int *b1, int *bn, float *lambda, float *d, float *l, float *ld, float *lld, float *pivmin, float *gaptol, float *z, int *wantnc, int *negcnt, float *ztz, float *mingma, int *r, int *isuppz, float *nrminv, float *resid, float *rqcorr, float *work);
+void F_FUNC(slar2v,SLAR2V)(int *n, float *x, float *y, float *z, int *incx, float *c, float *s, int *incc);
+void F_FUNC(slarf,SLARF)(char *side, int *m, int *n, float *v, int *incv, float *tau, float *c, int *ldc, float *work);
+void F_FUNC(slarfb,SLARFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, float *v, int *ldv, float *t, int *ldt, float *c, int *ldc, float *work, int *ldwork);
+void F_FUNC(slarfg,SLARFG)(int *n, float *alpha, float *x, int *incx, float *tau);
+void F_FUNC(slarfgp,SLARFGP)(int *n, float *alpha, float *x, int *incx, float *tau);
+void F_FUNC(slarft,SLARFT)(char *direct, char *storev, int *n, int *k, float *v, int *ldv, float *tau, float *t, int *ldt);
+void F_FUNC(slarfx,SLARFX)(char *side, int *m, int *n, float *v, float *tau, float *c, int *ldc, float *work);
+void F_FUNC(slargv,SLARGV)(int *n, float *x, int *incx, float *y, int *incy, float *c, int *incc);
+void F_FUNC(slarnv,SLARNV)(int *idist, int *iseed, int *n, float *x);
+void F_FUNC(slarra,SLARRA)(int *n, float *d, float *e, float *e2, float *spltol, float *tnrm, int *nsplit, int *isplit, int *info);
+void F_FUNC(slarrb,SLARRB)(int *n, float *d, float *lld, int *ifirst, int *ilast, float *rtol1, float *rtol2, int *offset, float *w, float *wgap, float *werr, float *work, int *iwork, float *pivmin, float *spdiam, int *twist, int *info);
+void F_FUNC(slarrc,SLARRC)(char *jobt, int *n, float *vl, float *vu, float *d, float *e, float *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info);
+void F_FUNC(slarrd,SLARRD)(char *range, char *order, int *n, float *vl, float *vu, int *il, int *iu, float *gers, float *reltol, float *d, float *e, float *e2, float *pivmin, int *nsplit, int *isplit, int *m, float *w, float *werr, float *wl, float *wu, int *iblock, int *indexw, float *work, int *iwork, int *info);
+void F_FUNC(slarre,SLARRE)(char *range, int *n, float *vl, float *vu, int *il, int *iu, float *d, float *e, float *e2, float *rtol1, float *rtol2, float *spltol, int *nsplit, int *isplit, int *m, float *w, float *werr, float *wgap, int *iblock, int *indexw, float *gers, float *pivmin, float *work, int *iwork, int *info);
+void F_FUNC(slarrf,SLARRF)(int *n, float *d, float *l, float *ld, int *clstrt, int *clend, float *w, float *wgap, float *werr, float *spdiam, float *clgapl, float *clgapr, float *pivmin, float *sigma, float *dplus, float *lplus, float *work, int *info);
+void F_FUNC(slarrj,SLARRJ)(int *n, float *d, float *e2, int *ifirst, int *ilast, float *rtol, int *offset, float *w, float *werr, float *work, int *iwork, float *pivmin, float *spdiam, int *info);
+void F_FUNC(slarrk,SLARRK)(int *n, int *iw, float *gl, float *gu, float *d, float *e2, float *pivmin, float *reltol, float *w, float *werr, int *info);
+void F_FUNC(slarrr,SLARRR)(int *n, float *d, float *e, int *info);
+void F_FUNC(slarrv,SLARRV)(int *n, float *vl, float *vu, float *d, float *l, float *pivmin, int *isplit, int *m, int *dol, int *dou, float *minrgp, float *rtol1, float *rtol2, float *w, float *werr, float *wgap, int *iblock, int *indexw, float *gers, float *z, int *ldz, int *isuppz, float *work, int *iwork, int *info);
+void F_FUNC(slartg,SLARTG)(float *f, float *g, float *cs, float *sn, float *r);
+void F_FUNC(slartgp,SLARTGP)(float *f, float *g, float *cs, float *sn, float *r);
+void F_FUNC(slartgs,SLARTGS)(float *x, float *y, float *sigma, float *cs, float *sn);
+void F_FUNC(slartv,SLARTV)(int *n, float *x, int *incx, float *y, int *incy, float *c, float *s, int *incc);
+void F_FUNC(slaruv,SLARUV)(int *iseed, int *n, float *x);
+void F_FUNC(slarz,SLARZ)(char *side, int *m, int *n, int *l, float *v, int *incv, float *tau, float *c, int *ldc, float *work);
+void F_FUNC(slarzb,SLARZB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, float *v, int *ldv, float *t, int *ldt, float *c, int *ldc, float *work, int *ldwork);
+void F_FUNC(slarzt,SLARZT)(char *direct, char *storev, int *n, int *k, float *v, int *ldv, float *tau, float *t, int *ldt);
+void F_FUNC(slas2,SLAS2)(float *f, float *g, float *h, float *ssmin, float *ssmax);
+void F_FUNC(slascl,SLASCL)(char *type_bn, int *kl, int *ku, float *cfrom, float *cto, int *m, int *n, float *a, int *lda, int *info);
+void F_FUNC(slasd0,SLASD0)(int *n, int *sqre, float *d, float *e, float *u, int *ldu, float *vt, int *ldvt, int *smlsiz, int *iwork, float *work, int *info);
+void F_FUNC(slasd1,SLASD1)(int *nl, int *nr, int *sqre, float *d, float *alpha, float *beta, float *u, int *ldu, float *vt, int *ldvt, int *idxq, int *iwork, float *work, int *info);
+void F_FUNC(slasd2,SLASD2)(int *nl, int *nr, int *sqre, int *k, float *d, float *z, float *alpha, float *beta, float *u, int *ldu, float *vt, int *ldvt, float *dsigma, float *u2, int *ldu2, float *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info);
+void F_FUNC(slasd3,SLASD3)(int *nl, int *nr, int *sqre, int *k, float *d, float *q, int *ldq, float *dsigma, float *u, int *ldu, float *u2, int *ldu2, float *vt, int *ldvt, float *vt2, int *ldvt2, int *idxc, int *ctot, float *z, int *info);
+void F_FUNC(slasd4,SLASD4)(int *n, int *i, float *d, float *z, float *delta, float *rho, float *sigma, float *work, int *info);
+void F_FUNC(slasd5,SLASD5)(int *i, float *d, float *z, float *delta, float *rho, float *dsigma, float *work);
+void F_FUNC(slasd6,SLASD6)(int *icompq, int *nl, int *nr, int *sqre, float *d, float *vf, float *vl, float *alpha, float *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *poles, float *difl, float *difr, float *z, int *k, float *c, float *s, float *work, int *iwork, int *info);
+void F_FUNC(slasd7,SLASD7)(int *icompq, int *nl, int *nr, int *sqre, int *k, float *d, float *z, float *zw, float *vf, float *vfw, float *vl, float *vlw, float *alpha, float *beta, float *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *c, float *s, int *info);
+void F_FUNC(slasd8,SLASD8)(int *icompq, int *k, float *d, float *z, float *vf, float *vl, float *difl, float *difr, int *lddifr, float *dsigma, float *work, int *info);
+void F_FUNC(slasda,SLASDA)(int *icompq, int *smlsiz, int *n, int *sqre, float *d, float *e, float *u, int *ldu, float *vt, int *k, float *difl, float *difr, float *z, float *poles, int *givptr, int *givcol, int *ldgcol, int *perm, float *givnum, float *c, float *s, float *work, int *iwork, int *info);
+void F_FUNC(slasdq,SLASDQ)(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, float *d, float *e, float *vt, int *ldvt, float *u, int *ldu, float *c, int *ldc, float *work, int *info);
+void F_FUNC(slasdt,SLASDT)(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub);
+void F_FUNC(slaset,SLASET)(char *uplo, int *m, int *n, float *alpha, float *beta, float *a, int *lda);
+void F_FUNC(slasq1,SLASQ1)(int *n, float *d, float *e, float *work, int *info);
+void F_FUNC(slasq2,SLASQ2)(int *n, float *z, int *info);
+void F_FUNC(slasq3,SLASQ3)(int *i0, int *n0, float *z, int *pp, float *dmin, float *sigma, float *desig, float *qmax, int *nfail, int *iter, int *ndiv, int *ieee, int *ttype, float *dmin1, float *dmin2, float *dn, float *dn1, float *dn2, float *g, float *tau);
+void F_FUNC(slasq4,SLASQ4)(int *i0, int *n0, float *z, int *pp, int *n0in, float *dmin, float *dmin1, float *dmin2, float *dn, float *dn1, float *dn2, float *tau, int *ttype, float *g);
+void F_FUNC(slasq6,SLASQ6)(int *i0, int *n0, float *z, int *pp, float *dmin, float *dmin1, float *dmin2, float *dn, float *dnm1, float *dnm2);
+void F_FUNC(slasr,SLASR)(char *side, char *pivot, char *direct, int *m, int *n, float *c, float *s, float *a, int *lda);
+void F_FUNC(slasrt,SLASRT)(char *id, int *n, float *d, int *info);
+void F_FUNC(slassq,SLASSQ)(int *n, float *x, int *incx, float *scale, float *sumsq);
+void F_FUNC(slasv2,SLASV2)(float *f, float *g, float *h, float *ssmin, float *ssmax, float *snr, float *csr, float *snl, float *csl);
+void F_FUNC(slaswp,SLASWP)(int *n, float *a, int *lda, int *k1, int *k2, int *ipiv, int *incx);
+void F_FUNC(slasy2,SLASY2)(int *ltranl, int *ltranr, int *isgn, int *n1, int *n2, float *tl, int *ldtl, float *tr, int *ldtr, float *b, int *ldb, float *scale, float *x, int *ldx, float *xnorm, int *info);
+void F_FUNC(slasyf,SLASYF)(char *uplo, int *n, int *nb, int *kb, float *a, int *lda, int *ipiv, float *w, int *ldw, int *info);
+void F_FUNC(slatbs,SLATBS)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, float *ab, int *ldab, float *x, float *scale, float *cnorm, int *info);
+void F_FUNC(slatdf,SLATDF)(int *ijob, int *n, float *z, int *ldz, float *rhs, float *rdsum, float *rdscal, int *ipiv, int *jpiv);
+void F_FUNC(slatps,SLATPS)(char *uplo, char *trans, char *diag, char *normin, int *n, float *ap, float *x, float *scale, float *cnorm, int *info);
+void F_FUNC(slatrd,SLATRD)(char *uplo, int *n, int *nb, float *a, int *lda, float *e, float *tau, float *w, int *ldw);
+void F_FUNC(slatrs,SLATRS)(char *uplo, char *trans, char *diag, char *normin, int *n, float *a, int *lda, float *x, float *scale, float *cnorm, int *info);
+void F_FUNC(slatrz,SLATRZ)(int *m, int *n, int *l, float *a, int *lda, float *tau, float *work);
+void F_FUNC(slauu2,SLAUU2)(char *uplo, int *n, float *a, int *lda, int *info);
+void F_FUNC(slauum,SLAUUM)(char *uplo, int *n, float *a, int *lda, int *info);
+void F_FUNC(sopgtr,SOPGTR)(char *uplo, int *n, float *ap, float *tau, float *q, int *ldq, float *work, int *info);
+void F_FUNC(sopmtr,SOPMTR)(char *side, char *uplo, char *trans, int *m, int *n, float *ap, float *tau, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sorbdb,SORBDB)(char *trans, char *signs, int *m, int *p, int *q, float *x11, int *ldx11, float *x12, int *ldx12, float *x21, int *ldx21, float *x22, int *ldx22, float *theta, float *phi, float *taup1, float *taup2, float *tauq1, float *tauq2, float *work, int *lwork, int *info);
+void F_FUNC(sorcsd,SORCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, float *x11, int *ldx11, float *x12, int *ldx12, float *x21, int *ldx21, float *x22, int *ldx22, float *theta, float *u1, int *ldu1, float *u2, int *ldu2, float *v1t, int *ldv1t, float *v2t, int *ldv2t, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(sorg2l,SORG2L)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sorg2r,SORG2R)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sorgbr,SORGBR)(char *vect, int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sorghr,SORGHR)(int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sorgl2,SORGL2)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sorglq,SORGLQ)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sorgql,SORGQL)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sorgqr,SORGQR)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sorgr2,SORGR2)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info);
+void F_FUNC(sorgrq,SORGRQ)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sorgtr,SORGTR)(char *uplo, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(sorm2l,SORM2L)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sorm2r,SORM2R)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sormbr,SORMBR)(char *vect, char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(sormhr,SORMHR)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(sorml2,SORML2)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sormlq,SORMLQ)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(sormql,SORMQL)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(sormqr,SORMQR)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(sormr2,SORMR2)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sormr3,SORMR3)(char *side, char *trans, int *m, int *n, int *k, int *l, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info);
+void F_FUNC(sormrq,SORMRQ)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(sormrz,SORMRZ)(char *side, char *trans, int *m, int *n, int *k, int *l, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(sormtr,SORMTR)(char *side, char *uplo, char *trans, int *m, int *n, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
+void F_FUNC(spbcon,SPBCON)(char *uplo, int *n, int *kd, float *ab, int *ldab, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(spbequ,SPBEQU)(char *uplo, int *n, int *kd, float *ab, int *ldab, float *s, float *scond, float *amax, int *info);
+void F_FUNC(spbrfs,SPBRFS)(char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(spbstf,SPBSTF)(char *uplo, int *n, int *kd, float *ab, int *ldab, int *info);
+void F_FUNC(spbsv,SPBSV)(char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, int *info);
+void F_FUNC(spbsvx,SPBSVX)(char *fact, char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, char *equed, float *s, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(spbtf2,SPBTF2)(char *uplo, int *n, int *kd, float *ab, int *ldab, int *info);
+void F_FUNC(spbtrf,SPBTRF)(char *uplo, int *n, int *kd, float *ab, int *ldab, int *info);
+void F_FUNC(spbtrs,SPBTRS)(char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, int *info);
+void F_FUNC(spftrf,SPFTRF)(char *transr, char *uplo, int *n, float *a, int *info);
+void F_FUNC(spftri,SPFTRI)(char *transr, char *uplo, int *n, float *a, int *info);
+void F_FUNC(spftrs,SPFTRS)(char *transr, char *uplo, int *n, int *nrhs, float *a, float *b, int *ldb, int *info);
+void F_FUNC(spocon,SPOCON)(char *uplo, int *n, float *a, int *lda, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(spoequ,SPOEQU)(int *n, float *a, int *lda, float *s, float *scond, float *amax, int *info);
+void F_FUNC(spoequb,SPOEQUB)(int *n, float *a, int *lda, float *s, float *scond, float *amax, int *info);
+void F_FUNC(sporfs,SPORFS)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sposv,SPOSV)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info);
+void F_FUNC(sposvx,SPOSVX)(char *fact, char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, char *equed, float *s, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(spotf2,SPOTF2)(char *uplo, int *n, float *a, int *lda, int *info);
+void F_FUNC(spotrf,SPOTRF)(char *uplo, int *n, float *a, int *lda, int *info);
+void F_FUNC(spotri,SPOTRI)(char *uplo, int *n, float *a, int *lda, int *info);
+void F_FUNC(spotrs,SPOTRS)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info);
+void F_FUNC(sppcon,SPPCON)(char *uplo, int *n, float *ap, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(sppequ,SPPEQU)(char *uplo, int *n, float *ap, float *s, float *scond, float *amax, int *info);
+void F_FUNC(spprfs,SPPRFS)(char *uplo, int *n, int *nrhs, float *ap, float *afp, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sppsv,SPPSV)(char *uplo, int *n, int *nrhs, float *ap, float *b, int *ldb, int *info);
+void F_FUNC(sppsvx,SPPSVX)(char *fact, char *uplo, int *n, int *nrhs, float *ap, float *afp, char *equed, float *s, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(spptrf,SPPTRF)(char *uplo, int *n, float *ap, int *info);
+void F_FUNC(spptri,SPPTRI)(char *uplo, int *n, float *ap, int *info);
+void F_FUNC(spptrs,SPPTRS)(char *uplo, int *n, int *nrhs, float *ap, float *b, int *ldb, int *info);
+void F_FUNC(spstf2,SPSTF2)(char *uplo, int *n, float *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info);
+void F_FUNC(spstrf,SPSTRF)(char *uplo, int *n, float *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info);
+void F_FUNC(sptcon,SPTCON)(int *n, float *d, float *e, float *anorm, float *rcond, float *work, int *info);
+void F_FUNC(spteqr,SPTEQR)(char *compz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *info);
+void F_FUNC(sptrfs,SPTRFS)(int *n, int *nrhs, float *d, float *e, float *df, float *ef, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *info);
+void F_FUNC(sptsv,SPTSV)(int *n, int *nrhs, float *d, float *e, float *b, int *ldb, int *info);
+void F_FUNC(sptsvx,SPTSVX)(char *fact, int *n, int *nrhs, float *d, float *e, float *df, float *ef, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *info);
+void F_FUNC(spttrf,SPTTRF)(int *n, float *d, float *e, int *info);
+void F_FUNC(spttrs,SPTTRS)(int *n, int *nrhs, float *d, float *e, float *b, int *ldb, int *info);
+void F_FUNC(sptts2,SPTTS2)(int *n, int *nrhs, float *d, float *e, float *b, int *ldb);
+void F_FUNC(srscl,SRSCL)(int *n, float *sa, float *sx, int *incx);
+void F_FUNC(ssbev,SSBEV)(char *jobz, char *uplo, int *n, int *kd, float *ab, int *ldab, float *w, float *z, int *ldz, float *work, int *info);
+void F_FUNC(ssbevd,SSBEVD)(char *jobz, char *uplo, int *n, int *kd, float *ab, int *ldab, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ssbevx,SSBEVX)(char *jobz, char *range, char *uplo, int *n, int *kd, float *ab, int *ldab, float *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info);
+void F_FUNC(ssbgst,SSBGST)(char *vect, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *x, int *ldx, float *work, int *info);
+void F_FUNC(ssbgv,SSBGV)(char *jobz, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *w, float *z, int *ldz, float *work, int *info);
+void F_FUNC(ssbgvd,SSBGVD)(char *jobz, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ssbgvx,SSBGVX)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info);
+void F_FUNC(ssbtrd,SSBTRD)(char *vect, char *uplo, int *n, int *kd, float *ab, int *ldab, float *d, float *e, float *q, int *ldq, float *work, int *info);
+void F_FUNC(ssfrk,SSFRK)(char *transr, char *uplo, char *trans, int *n, int *k, float *alpha, float *a, int *lda, float *beta, float *c);
+void F_FUNC(sspcon,SSPCON)(char *uplo, int *n, float *ap, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(sspev,SSPEV)(char *jobz, char *uplo, int *n, float *ap, float *w, float *z, int *ldz, float *work, int *info);
+void F_FUNC(sspevd,SSPEVD)(char *jobz, char *uplo, int *n, float *ap, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(sspevx,SSPEVX)(char *jobz, char *range, char *uplo, int *n, float *ap, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info);
+void F_FUNC(sspgst,SSPGST)(int *itype, char *uplo, int *n, float *ap, float *bp, int *info);
+void F_FUNC(sspgv,SSPGV)(int *itype, char *jobz, char *uplo, int *n, float *ap, float *bp, float *w, float *z, int *ldz, float *work, int *info);
+void F_FUNC(sspgvd,SSPGVD)(int *itype, char *jobz, char *uplo, int *n, float *ap, float *bp, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(sspgvx,SSPGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, float *ap, float *bp, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info);
+void F_FUNC(ssprfs,SSPRFS)(char *uplo, int *n, int *nrhs, float *ap, float *afp, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(sspsv,SSPSV)(char *uplo, int *n, int *nrhs, float *ap, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(sspsvx,SSPSVX)(char *fact, char *uplo, int *n, int *nrhs, float *ap, float *afp, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(ssptrd,SSPTRD)(char *uplo, int *n, float *ap, float *d, float *e, float *tau, int *info);
+void F_FUNC(ssptrf,SSPTRF)(char *uplo, int *n, float *ap, int *ipiv, int *info);
+void F_FUNC(ssptri,SSPTRI)(char *uplo, int *n, float *ap, int *ipiv, float *work, int *info);
+void F_FUNC(ssptrs,SSPTRS)(char *uplo, int *n, int *nrhs, float *ap, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(sstebz,SSTEBZ)(char *range, char *order, int *n, float *vl, float *vu, int *il, int *iu, float *abstol, float *d, float *e, int *m, int *nsplit, float *w, int *iblock, int *isplit, float *work, int *iwork, int *info);
+void F_FUNC(sstedc,SSTEDC)(char *compz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(sstegr,SSTEGR)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(sstein,SSTEIN)(int *n, float *d, float *e, int *m, float *w, int *iblock, int *isplit, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info);
+void F_FUNC(sstemr,SSTEMR)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, int *m, float *w, float *z, int *ldz, int *nzc, int *isuppz, int *tryrac, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ssteqr,SSTEQR)(char *compz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *info);
+void F_FUNC(ssterf,SSTERF)(int *n, float *d, float *e, int *info);
+void F_FUNC(sstev,SSTEV)(char *jobz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *info);
+void F_FUNC(sstevd,SSTEVD)(char *jobz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(sstevr,SSTEVR)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(sstevx,SSTEVX)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info);
+void F_FUNC(ssycon,SSYCON)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(ssyconv,SSYCONV)(char *uplo, char *way, int *n, float *a, int *lda, int *ipiv, float *work, int *info);
+void F_FUNC(ssyequb,SSYEQUB)(char *uplo, int *n, float *a, int *lda, float *s, float *scond, float *amax, float *work, int *info);
+void F_FUNC(ssyev,SSYEV)(char *jobz, char *uplo, int *n, float *a, int *lda, float *w, float *work, int *lwork, int *info);
+void F_FUNC(ssyevd,SSYEVD)(char *jobz, char *uplo, int *n, float *a, int *lda, float *w, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ssyevr,SSYEVR)(char *jobz, char *range, char *uplo, int *n, float *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ssyevx,SSYEVX)(char *jobz, char *range, char *uplo, int *n, float *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *ifail, int *info);
+void F_FUNC(ssygs2,SSYGS2)(int *itype, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, int *info);
+void F_FUNC(ssygst,SSYGST)(int *itype, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, int *info);
+void F_FUNC(ssygv,SSYGV)(int *itype, char *jobz, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, float *w, float *work, int *lwork, int *info);
+void F_FUNC(ssygvd,SSYGVD)(int *itype, char *jobz, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, float *w, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ssygvx,SSYGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *ifail, int *info);
+void F_FUNC(ssyrfs,SSYRFS)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(ssysv,SSYSV)(char *uplo, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, float *work, int *lwork, int *info);
+void F_FUNC(ssysvx,SSYSVX)(char *fact, char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(ssyswapr,SSYSWAPR)(char *uplo, int *n, float *a, int *lda, int *i1, int *i2);
+void F_FUNC(ssytd2,SSYTD2)(char *uplo, int *n, float *a, int *lda, float *d, float *e, float *tau, int *info);
+void F_FUNC(ssytf2,SSYTF2)(char *uplo, int *n, float *a, int *lda, int *ipiv, int *info);
+void F_FUNC(ssytrd,SSYTRD)(char *uplo, int *n, float *a, int *lda, float *d, float *e, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(ssytrf,SSYTRF)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *lwork, int *info);
+void F_FUNC(ssytri,SSYTRI)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *info);
+void F_FUNC(ssytri2,SSYTRI2)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *lwork, int *info);
+void F_FUNC(ssytri2x,SSYTRI2X)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *nb, int *info);
+void F_FUNC(ssytrs,SSYTRS)(char *uplo, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info);
+void F_FUNC(ssytrs2,SSYTRS2)(char *uplo, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, float *work, int *info);
+void F_FUNC(stbcon,STBCON)(char *norm, char *uplo, char *diag, int *n, int *kd, float *ab, int *ldab, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(stbrfs,STBRFS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(stbtrs,STBTRS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, int *info);
+void F_FUNC(stfsm,STFSM)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, float *alpha, float *a, float *b, int *ldb);
+void F_FUNC(stftri,STFTRI)(char *transr, char *uplo, char *diag, int *n, float *a, int *info);
+void F_FUNC(stfttp,STFTTP)(char *transr, char *uplo, int *n, float *arf, float *ap, int *info);
+void F_FUNC(stfttr,STFTTR)(char *transr, char *uplo, int *n, float *arf, float *a, int *lda, int *info);
+void F_FUNC(stgevc,STGEVC)(char *side, char *howmny, int *select, int *n, float *s, int *lds, float *p, int *ldp, float *vl, int *ldvl, float *vr, int *ldvr, int *mm, int *m, float *work, int *info);
+void F_FUNC(stgex2,STGEX2)(int *wantq, int *wantz, int *n, float *a, int *lda, float *b, int *ldb, float *q, int *ldq, float *z, int *ldz, int *j1, int *n1, int *n2, float *work, int *lwork, int *info);
+void F_FUNC(stgexc,STGEXC)(int *wantq, int *wantz, int *n, float *a, int *lda, float *b, int *ldb, float *q, int *ldq, float *z, int *ldz, int *ifst, int *ilst, float *work, int *lwork, int *info);
+void F_FUNC(stgsen,STGSEN)(int *ijob, int *wantq, int *wantz, int *select, int *n, float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *q, int *ldq, float *z, int *ldz, int *m, float *pl, float *pr, float *dif, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(stgsja,STGSJA)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, float *a, int *lda, float *b, int *ldb, float *tola, float *tolb, float *alpha, float *beta, float *u, int *ldu, float *v, int *ldv, float *q, int *ldq, float *work, int *ncycle, int *info);
+void F_FUNC(stgsna,STGSNA)(char *job, char *howmny, int *select, int *n, float *a, int *lda, float *b, int *ldb, float *vl, int *ldvl, float *vr, int *ldvr, float *s, float *dif, int *mm, int *m, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(stgsy2,STGSY2)(char *trans, int *ijob, int *m, int *n, float *a, int *lda, float *b, int *ldb, float *c, int *ldc, float *d, int *ldd, float *e, int *lde, float *f, int *ldf, float *scale, float *rdsum, float *rdscal, int *iwork, int *pq, int *info);
+void F_FUNC(stgsyl,STGSYL)(char *trans, int *ijob, int *m, int *n, float *a, int *lda, float *b, int *ldb, float *c, int *ldc, float *d, int *ldd, float *e, int *lde, float *f, int *ldf, float *scale, float *dif, float *work, int *lwork, int *iwork, int *info);
+void F_FUNC(stpcon,STPCON)(char *norm, char *uplo, char *diag, int *n, float *ap, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(stpmqrt,STPMQRT)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, float *v, int *ldv, float *t, int *ldt, float *a, int *lda, float *b, int *ldb, float *work, int *info);
+void F_FUNC(stpqrt,STPQRT)(int *m, int *n, int *l, int *nb, float *a, int *lda, float *b, int *ldb, float *t, int *ldt, float *work, int *info);
+void F_FUNC(stpqrt2,STPQRT2)(int *m, int *n, int *l, float *a, int *lda, float *b, int *ldb, float *t, int *ldt, int *info);
+void F_FUNC(stprfb,STPRFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, float *v, int *ldv, float *t, int *ldt, float *a, int *lda, float *b, int *ldb, float *work, int *ldwork);
+void F_FUNC(stprfs,STPRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *ap, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(stptri,STPTRI)(char *uplo, char *diag, int *n, float *ap, int *info);
+void F_FUNC(stptrs,STPTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *ap, float *b, int *ldb, int *info);
+void F_FUNC(stpttf,STPTTF)(char *transr, char *uplo, int *n, float *ap, float *arf, int *info);
+void F_FUNC(stpttr,STPTTR)(char *uplo, int *n, float *ap, float *a, int *lda, int *info);
+void F_FUNC(strcon,STRCON)(char *norm, char *uplo, char *diag, int *n, float *a, int *lda, float *rcond, float *work, int *iwork, int *info);
+void F_FUNC(strevc,STREVC)(char *side, char *howmny, int *select, int *n, float *t, int *ldt, float *vl, int *ldvl, float *vr, int *ldvr, int *mm, int *m, float *work, int *info);
+void F_FUNC(strexc,STREXC)(char *compq, int *n, float *t, int *ldt, float *q, int *ldq, int *ifst, int *ilst, float *work, int *info);
+void F_FUNC(strrfs,STRRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info);
+void F_FUNC(strsen,STRSEN)(char *job, char *compq, int *select, int *n, float *t, int *ldt, float *q, int *ldq, float *wr, float *wi, int *m, float *s, float *sep, float *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(strsna,STRSNA)(char *job, char *howmny, int *select, int *n, float *t, int *ldt, float *vl, int *ldvl, float *vr, int *ldvr, float *s, float *sep, int *mm, int *m, float *work, int *ldwork, int *iwork, int *info);
+void F_FUNC(strsyl,STRSYL)(char *trana, char *tranb, int *isgn, int *m, int *n, float *a, int *lda, float *b, int *ldb, float *c, int *ldc, float *scale, int *info);
+void F_FUNC(strti2,STRTI2)(char *uplo, char *diag, int *n, float *a, int *lda, int *info);
+void F_FUNC(strtri,STRTRI)(char *uplo, char *diag, int *n, float *a, int *lda, int *info);
+void F_FUNC(strtrs,STRTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info);
+void F_FUNC(strttf,STRTTF)(char *transr, char *uplo, int *n, float *a, int *lda, float *arf, int *info);
+void F_FUNC(strttp,STRTTP)(char *uplo, int *n, float *a, int *lda, float *ap, int *info);
+void F_FUNC(stzrzf,STZRZF)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info);
+void F_FUNC(xerbla_array,XERBLA_ARRAY)(char *srname_array, int *srname_len, int *info);
+void F_FUNC(zbbcsd,ZBBCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, double *theta, double *phi, npy_complex128 *u1, int *ldu1, npy_complex128 *u2, int *ldu2, npy_complex128 *v1t, int *ldv1t, npy_complex128 *v2t, int *ldv2t, double *b11d, double *b11e, double *b12d, double *b12e, double *b21d, double *b21e, double *b22d, double *b22e, double *rwork, int *lrwork, int *info);
+void F_FUNC(zbdsqr,ZBDSQR)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, double *d, double *e, npy_complex128 *vt, int *ldvt, npy_complex128 *u, int *ldu, npy_complex128 *c, int *ldc, double *rwork, int *info);
+void F_FUNC(zcgesv,ZCGESV)(int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, npy_complex128 *work, npy_complex64 *swork, double *rwork, int *iter, int *info);
+void F_FUNC(zcposv,ZCPOSV)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, npy_complex128 *work, npy_complex64 *swork, double *rwork, int *iter, int *info);
+void F_FUNC(zdrscl,ZDRSCL)(int *n, double *sa, npy_complex128 *sx, int *incx);
+void F_FUNC(zgbbrd,ZGBBRD)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *d, double *e, npy_complex128 *q, int *ldq, npy_complex128 *pt, int *ldpt, npy_complex128 *c, int *ldc, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgbcon,ZGBCON)(char *norm, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgbequ,ZGBEQU)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(zgbequb,ZGBEQUB)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(zgbrfs,ZGBRFS)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgbsv,ZGBSV)(int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zgbsvx,ZGBSVX)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, int *ipiv, char *equed, double *r, double *c, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgbtf2,ZGBTF2)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(zgbtrf,ZGBTRF)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, int *info);
+void F_FUNC(zgbtrs,ZGBTRS)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zgebak,ZGEBAK)(char *job, char *side, int *n, int *ilo, int *ihi, double *scale, int *m, npy_complex128 *v, int *ldv, int *info);
+void F_FUNC(zgebal,ZGEBAL)(char *job, int *n, npy_complex128 *a, int *lda, int *ilo, int *ihi, double *scale, int *info);
+void F_FUNC(zgebd2,ZGEBD2)(int *m, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *work, int *info);
+void F_FUNC(zgebrd,ZGEBRD)(int *m, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgecon,ZGECON)(char *norm, int *n, npy_complex128 *a, int *lda, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgeequ,ZGEEQU)(int *m, int *n, npy_complex128 *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(zgeequb,ZGEEQUB)(int *m, int *n, npy_complex128 *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info);
+void F_FUNC(zgees,ZGEES)(char *jobvs, char *sort, _zselect1 *select, int *n, npy_complex128 *a, int *lda, int *sdim, npy_complex128 *w, npy_complex128 *vs, int *ldvs, npy_complex128 *work, int *lwork, double *rwork, int *bwork, int *info);
+void F_FUNC(zgeesx,ZGEESX)(char *jobvs, char *sort, _zselect1 *select, char *sense, int *n, npy_complex128 *a, int *lda, int *sdim, npy_complex128 *w, npy_complex128 *vs, int *ldvs, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *bwork, int *info);
+void F_FUNC(zgeev,ZGEEV)(char *jobvl, char *jobvr, int *n, npy_complex128 *a, int *lda, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zgeevx,ZGEEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *ilo, int *ihi, double *scale, double *abnrm, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zgehd2,ZGEHD2)(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zgehrd,ZGEHRD)(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgelq2,ZGELQ2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zgelqf,ZGELQF)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgels,ZGELS)(char *trans, int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgelsd,ZGELSD)(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *s, double *rcond, int *rank, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *info);
+void F_FUNC(zgelss,ZGELSS)(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *s, double *rcond, int *rank, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zgelsy,ZGELSY)(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *jpvt, double *rcond, int *rank, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zgemqrt,ZGEMQRT)(char *side, char *trans, int *m, int *n, int *k, int *nb, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info);
+void F_FUNC(zgeql2,ZGEQL2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zgeqlf,ZGEQLF)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgeqp3,ZGEQP3)(int *m, int *n, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zgeqr2,ZGEQR2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zgeqr2p,ZGEQR2P)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zgeqrf,ZGEQRF)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgeqrfp,ZGEQRFP)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgeqrt,ZGEQRT)(int *m, int *n, int *nb, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, npy_complex128 *work, int *info);
+void F_FUNC(zgeqrt2,ZGEQRT2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, int *info);
+void F_FUNC(zgeqrt3,ZGEQRT3)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, int *info);
+void F_FUNC(zgerfs,ZGERFS)(char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgerq2,ZGERQ2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zgerqf,ZGERQF)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgesc2,ZGESC2)(int *n, npy_complex128 *a, int *lda, npy_complex128 *rhs, int *ipiv, int *jpiv, double *scale);
+void F_FUNC(zgesdd,ZGESDD)(char *jobz, int *m, int *n, npy_complex128 *a, int *lda, double *s, npy_complex128 *u, int *ldu, npy_complex128 *vt, int *ldvt, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *info);
+void F_FUNC(zgesv,ZGESV)(int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zgesvd,ZGESVD)(char *jobu, char *jobvt, int *m, int *n, npy_complex128 *a, int *lda, double *s, npy_complex128 *u, int *ldu, npy_complex128 *vt, int *ldvt, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zgesvx,ZGESVX)(char *fact, char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, char *equed, double *r, double *c, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgetc2,ZGETC2)(int *n, npy_complex128 *a, int *lda, int *ipiv, int *jpiv, int *info);
+void F_FUNC(zgetf2,ZGETF2)(int *m, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(zgetrf,ZGETRF)(int *m, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(zgetri,ZGETRI)(int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgetrs,ZGETRS)(char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zggbak,ZGGBAK)(char *job, char *side, int *n, int *ilo, int *ihi, double *lscale, double *rscale, int *m, npy_complex128 *v, int *ldv, int *info);
+void F_FUNC(zggbal,ZGGBAL)(char *job, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *ilo, int *ihi, double *lscale, double *rscale, double *work, int *info);
+void F_FUNC(zgges,ZGGES)(char *jobvsl, char *jobvsr, char *sort, _zselect2 *selctg, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *sdim, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vsl, int *ldvsl, npy_complex128 *vsr, int *ldvsr, npy_complex128 *work, int *lwork, double *rwork, int *bwork, int *info);
+void F_FUNC(zggesx,ZGGESX)(char *jobvsl, char *jobvsr, char *sort, _zselect2 *selctg, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *sdim, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vsl, int *ldvsl, npy_complex128 *vsr, int *ldvsr, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *liwork, int *bwork, int *info);
+void F_FUNC(zggev,ZGGEV)(char *jobvl, char *jobvr, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zggevx,ZGGEVX)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *ilo, int *ihi, double *lscale, double *rscale, double *abnrm, double *bbnrm, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *bwork, int *info);
+void F_FUNC(zggglm,ZGGGLM)(int *n, int *m, int *p, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *d, npy_complex128 *x, npy_complex128 *y, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgghrd,ZGGHRD)(char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *info);
+void F_FUNC(zgglse,ZGGLSE)(int *m, int *n, int *p, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, npy_complex128 *d, npy_complex128 *x, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zggqrf,ZGGQRF)(int *n, int *m, int *p, npy_complex128 *a, int *lda, npy_complex128 *taua, npy_complex128 *b, int *ldb, npy_complex128 *taub, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zggrqf,ZGGRQF)(int *m, int *p, int *n, npy_complex128 *a, int *lda, npy_complex128 *taua, npy_complex128 *b, int *ldb, npy_complex128 *taub, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zgtcon,ZGTCON)(char *norm, int *n, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info);
+void F_FUNC(zgtrfs,ZGTRFS)(char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *dlf, npy_complex128 *df, npy_complex128 *duf, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgtsv,ZGTSV)(int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zgtsvx,ZGTSVX)(char *fact, char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *dlf, npy_complex128 *df, npy_complex128 *duf, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zgttrf,ZGTTRF)(int *n, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, int *info);
+void F_FUNC(zgttrs,ZGTTRS)(char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zgtts2,ZGTTS2)(int *itrans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb);
+void F_FUNC(zhbev,ZHBEV)(char *jobz, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhbevd,ZHBEVD)(char *jobz, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zhbevx,ZHBEVX)(char *jobz, char *range, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, npy_complex128 *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(zhbgst,ZHBGST)(char *vect, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, npy_complex128 *x, int *ldx, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhbgv,ZHBGV)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhbgvd,ZHBGVD)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zhbgvx,ZHBGVX)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, npy_complex128 *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(zhbtrd,ZHBTRD)(char *vect, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *d, double *e, npy_complex128 *q, int *ldq, npy_complex128 *work, int *info);
+void F_FUNC(zhecon,ZHECON)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info);
+void F_FUNC(zheequb,ZHEEQUB)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, npy_complex128 *work, int *info);
+void F_FUNC(zheev,ZHEEV)(char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, double *w, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zheevd,ZHEEVD)(char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, double *w, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zheevr,ZHEEVR)(char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, int *isuppz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zheevx,ZHEEVX)(char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(zhegs2,ZHEGS2)(int *itype, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zhegst,ZHEGST)(int *itype, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zhegv,ZHEGV)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *w, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zhegvd,ZHEGVD)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *w, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zhegvx,ZHEGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(zherfs,ZHERFS)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhesv,ZHESV)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zhesvx,ZHESVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zheswapr,ZHESWAPR)(char *uplo, int *n, npy_complex128 *a, int *lda, int *i1, int *i2);
+void F_FUNC(zhetd2,ZHETD2)(char *uplo, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tau, int *info);
+void F_FUNC(zhetf2,ZHETF2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(zhetrd,ZHETRD)(char *uplo, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zhetrf,ZHETRF)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zhetri,ZHETRI)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info);
+void F_FUNC(zhetri2,ZHETRI2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zhetri2x,ZHETRI2X)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *nb, int *info);
+void F_FUNC(zhetrs,ZHETRS)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zhetrs2,ZHETRS2)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info);
+void F_FUNC(zhfrk,ZHFRK)(char *transr, char *uplo, char *trans, int *n, int *k, double *alpha, npy_complex128 *a, int *lda, double *beta, npy_complex128 *c);
+void F_FUNC(zhgeqz,ZHGEQZ)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *t, int *ldt, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zhpcon,ZHPCON)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info);
+void F_FUNC(zhpev,ZHPEV)(char *jobz, char *uplo, int *n, npy_complex128 *ap, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhpevd,ZHPEVD)(char *jobz, char *uplo, int *n, npy_complex128 *ap, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zhpevx,ZHPEVX)(char *jobz, char *range, char *uplo, int *n, npy_complex128 *ap, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(zhpgst,ZHPGST)(int *itype, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, int *info);
+void F_FUNC(zhpgv,ZHPGV)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhpgvd,ZHPGVD)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zhpgvx,ZHPGVX)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info);
+void F_FUNC(zhprfs,ZHPRFS)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhpsv,ZHPSV)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zhpsvx,ZHPSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zhptrd,ZHPTRD)(char *uplo, int *n, npy_complex128 *ap, double *d, double *e, npy_complex128 *tau, int *info);
+void F_FUNC(zhptrf,ZHPTRF)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, int *info);
+void F_FUNC(zhptri,ZHPTRI)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, npy_complex128 *work, int *info);
+void F_FUNC(zhptrs,ZHPTRS)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zhsein,ZHSEIN)(char *side, char *eigsrc, char *initv, int *select, int *n, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, double *rwork, int *ifaill, int *ifailr, int *info);
+void F_FUNC(zhseqr,ZHSEQR)(char *job, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zlabrd,ZLABRD)(int *m, int *n, int *nb, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *x, int *ldx, npy_complex128 *y, int *ldy);
+void F_FUNC(zlacgv,ZLACGV)(int *n, npy_complex128 *x, int *incx);
+void F_FUNC(zlacn2,ZLACN2)(int *n, npy_complex128 *v, npy_complex128 *x, double *est, int *kase, int *isave);
+void F_FUNC(zlacon,ZLACON)(int *n, npy_complex128 *v, npy_complex128 *x, double *est, int *kase);
+void F_FUNC(zlacp2,ZLACP2)(char *uplo, int *m, int *n, double *a, int *lda, npy_complex128 *b, int *ldb);
+void F_FUNC(zlacpy,ZLACPY)(char *uplo, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb);
+void F_FUNC(zlacrm,ZLACRM)(int *m, int *n, npy_complex128 *a, int *lda, double *b, int *ldb, npy_complex128 *c, int *ldc, double *rwork);
+void F_FUNC(zlacrt,ZLACRT)(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, npy_complex128 *c, npy_complex128 *s);
+void F_FUNC(zlaed0,ZLAED0)(int *qsiz, int *n, double *d, double *e, npy_complex128 *q, int *ldq, npy_complex128 *qstore, int *ldqs, double *rwork, int *iwork, int *info);
+void F_FUNC(zlaed7,ZLAED7)(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, double *d, npy_complex128 *q, int *ldq, double *rho, int *indxq, double *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, double *givnum, npy_complex128 *work, double *rwork, int *iwork, int *info);
+void F_FUNC(zlaed8,ZLAED8)(int *k, int *n, int *qsiz, npy_complex128 *q, int *ldq, double *d, double *rho, int *cutpnt, double *z, double *dlamda, npy_complex128 *q2, int *ldq2, double *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, double *givnum, int *info);
+void F_FUNC(zlaein,ZLAEIN)(int *rightv, int *noinit, int *n, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *v, npy_complex128 *b, int *ldb, double *rwork, double *eps3, double *smlnum, int *info);
+void F_FUNC(zlaesy,ZLAESY)(npy_complex128 *a, npy_complex128 *b, npy_complex128 *c, npy_complex128 *rt1, npy_complex128 *rt2, npy_complex128 *evscal, npy_complex128 *cs1, npy_complex128 *sn1);
+void F_FUNC(zlaev2,ZLAEV2)(npy_complex128 *a, npy_complex128 *b, npy_complex128 *c, double *rt1, double *rt2, double *cs1, npy_complex128 *sn1);
+void F_FUNC(zlag2c,ZLAG2C)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex64 *sa, int *ldsa, int *info);
+void F_FUNC(zlags2,ZLAGS2)(int *upper, double *a1, npy_complex128 *a2, double *a3, double *b1, npy_complex128 *b2, double *b3, double *csu, npy_complex128 *snu, double *csv, npy_complex128 *snv, double *csq, npy_complex128 *snq);
+void F_FUNC(zlagtm,ZLAGTM)(char *trans, int *n, int *nrhs, double *alpha, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *x, int *ldx, double *beta, npy_complex128 *b, int *ldb);
+void F_FUNC(zlahef,ZLAHEF)(char *uplo, int *n, int *nb, int *kb, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *w, int *ldw, int *info);
+void F_FUNC(zlahqr,ZLAHQR)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *info);
+void F_FUNC(zlahr2,ZLAHR2)(int *n, int *k, int *nb, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *t, int *ldt, npy_complex128 *y, int *ldy);
+void F_FUNC(zlaic1,ZLAIC1)(int *job, int *j, npy_complex128 *x, double *sest, npy_complex128 *w, npy_complex128 *gamma, double *sestpr, npy_complex128 *s, npy_complex128 *c);
+void F_FUNC(zlals0,ZLALS0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, npy_complex128 *b, int *ldb, npy_complex128 *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *poles, double *difl, double *difr, double *z, int *k, double *c, double *s, double *rwork, int *info);
+void F_FUNC(zlalsa,ZLALSA)(int *icompq, int *smlsiz, int *n, int *nrhs, npy_complex128 *b, int *ldb, npy_complex128 *bx, int *ldbx, double *u, int *ldu, double *vt, int *k, double *difl, double *difr, double *z, double *poles, int *givptr, int *givcol, int *ldgcol, int *perm, double *givnum, double *c, double *s, double *rwork, int *iwork, int *info);
+void F_FUNC(zlalsd,ZLALSD)(char *uplo, int *smlsiz, int *n, int *nrhs, double *d, double *e, npy_complex128 *b, int *ldb, double *rcond, int *rank, npy_complex128 *work, double *rwork, int *iwork, int *info);
+void F_FUNC(zlapll,ZLAPLL)(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, double *ssmin);
+void F_FUNC(zlapmr,ZLAPMR)(int *forwrd, int *m, int *n, npy_complex128 *x, int *ldx, int *k);
+void F_FUNC(zlapmt,ZLAPMT)(int *forwrd, int *m, int *n, npy_complex128 *x, int *ldx, int *k);
+void F_FUNC(zlaqgb,ZLAQGB)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed);
+void F_FUNC(zlaqge,ZLAQGE)(int *m, int *n, npy_complex128 *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed);
+void F_FUNC(zlaqhb,ZLAQHB)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(zlaqhe,ZLAQHE)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(zlaqhp,ZLAQHP)(char *uplo, int *n, npy_complex128 *ap, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(zlaqp2,ZLAQP2)(int *m, int *n, int *offset, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, double *vn1, double *vn2, npy_complex128 *work);
+void F_FUNC(zlaqps,ZLAQPS)(int *m, int *n, int *offset, int *nb, int *kb, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, double *vn1, double *vn2, npy_complex128 *auxv, npy_complex128 *f, int *ldf);
+void F_FUNC(zlaqr0,ZLAQR0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zlaqr1,ZLAQR1)(int *n, npy_complex128 *h, int *ldh, npy_complex128 *s1, npy_complex128 *s2, npy_complex128 *v);
+void F_FUNC(zlaqr2,ZLAQR2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *ns, int *nd, npy_complex128 *sh, npy_complex128 *v, int *ldv, int *nh, npy_complex128 *t, int *ldt, int *nv, npy_complex128 *wv, int *ldwv, npy_complex128 *work, int *lwork);
+void F_FUNC(zlaqr3,ZLAQR3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *ns, int *nd, npy_complex128 *sh, npy_complex128 *v, int *ldv, int *nh, npy_complex128 *t, int *ldt, int *nv, npy_complex128 *wv, int *ldwv, npy_complex128 *work, int *lwork);
+void F_FUNC(zlaqr4,ZLAQR4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zlaqr5,ZLAQR5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, npy_complex128 *s, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *v, int *ldv, npy_complex128 *u, int *ldu, int *nv, npy_complex128 *wv, int *ldwv, int *nh, npy_complex128 *wh, int *ldwh);
+void F_FUNC(zlaqsb,ZLAQSB)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(zlaqsp,ZLAQSP)(char *uplo, int *n, npy_complex128 *ap, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(zlaqsy,ZLAQSY)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, char *equed);
+void F_FUNC(zlar1v,ZLAR1V)(int *n, int *b1, int *bn, double *lambda, double *d, double *l, double *ld, double *lld, double *pivmin, double *gaptol, npy_complex128 *z, int *wantnc, int *negcnt, double *ztz, double *mingma, int *r, int *isuppz, double *nrminv, double *resid, double *rqcorr, double *work);
+void F_FUNC(zlar2v,ZLAR2V)(int *n, npy_complex128 *x, npy_complex128 *y, npy_complex128 *z, int *incx, double *c, npy_complex128 *s, int *incc);
+void F_FUNC(zlarcm,ZLARCM)(int *m, int *n, double *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, double *rwork);
+void F_FUNC(zlarf,ZLARF)(char *side, int *m, int *n, npy_complex128 *v, int *incv, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work);
+void F_FUNC(zlarfb,ZLARFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *ldwork);
+void F_FUNC(zlarfg,ZLARFG)(int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *tau);
+void F_FUNC(zlarfgp,ZLARFGP)(int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *tau);
+void F_FUNC(zlarft,ZLARFT)(char *direct, char *storev, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *tau, npy_complex128 *t, int *ldt);
+void F_FUNC(zlarfx,ZLARFX)(char *side, int *m, int *n, npy_complex128 *v, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work);
+void F_FUNC(zlargv,ZLARGV)(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, double *c, int *incc);
+void F_FUNC(zlarnv,ZLARNV)(int *idist, int *iseed, int *n, npy_complex128 *x);
+void F_FUNC(zlarrv,ZLARRV)(int *n, double *vl, double *vu, double *d, double *l, double *pivmin, int *isplit, int *m, int *dol, int *dou, double *minrgp, double *rtol1, double *rtol2, double *w, double *werr, double *wgap, int *iblock, int *indexw, double *gers, npy_complex128 *z, int *ldz, int *isuppz, double *work, int *iwork, int *info);
+void F_FUNC(zlartg,ZLARTG)(npy_complex128 *f, npy_complex128 *g, double *cs, npy_complex128 *sn, npy_complex128 *r);
+void F_FUNC(zlartv,ZLARTV)(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, double *c, npy_complex128 *s, int *incc);
+void F_FUNC(zlarz,ZLARZ)(char *side, int *m, int *n, int *l, npy_complex128 *v, int *incv, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work);
+void F_FUNC(zlarzb,ZLARZB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *ldwork);
+void F_FUNC(zlarzt,ZLARZT)(char *direct, char *storev, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *tau, npy_complex128 *t, int *ldt);
+void F_FUNC(zlascl,ZLASCL)(char *type_bn, int *kl, int *ku, double *cfrom, double *cto, int *m, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(zlaset,ZLASET)(char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *a, int *lda);
+void F_FUNC(zlasr,ZLASR)(char *side, char *pivot, char *direct, int *m, int *n, double *c, double *s, npy_complex128 *a, int *lda);
+void F_FUNC(zlassq,ZLASSQ)(int *n, npy_complex128 *x, int *incx, double *scale, double *sumsq);
+void F_FUNC(zlaswp,ZLASWP)(int *n, npy_complex128 *a, int *lda, int *k1, int *k2, int *ipiv, int *incx);
+void F_FUNC(zlasyf,ZLASYF)(char *uplo, int *n, int *nb, int *kb, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *w, int *ldw, int *info);
+void F_FUNC(zlat2c,ZLAT2C)(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex64 *sa, int *ldsa, int *info);
+void F_FUNC(zlatbs,ZLATBS)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, npy_complex128 *ab, int *ldab, npy_complex128 *x, double *scale, double *cnorm, int *info);
+void F_FUNC(zlatdf,ZLATDF)(int *ijob, int *n, npy_complex128 *z, int *ldz, npy_complex128 *rhs, double *rdsum, double *rdscal, int *ipiv, int *jpiv);
+void F_FUNC(zlatps,ZLATPS)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex128 *ap, npy_complex128 *x, double *scale, double *cnorm, int *info);
+void F_FUNC(zlatrd,ZLATRD)(char *uplo, int *n, int *nb, npy_complex128 *a, int *lda, double *e, npy_complex128 *tau, npy_complex128 *w, int *ldw);
+void F_FUNC(zlatrs,ZLATRS)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, double *scale, double *cnorm, int *info);
+void F_FUNC(zlatrz,ZLATRZ)(int *m, int *n, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work);
+void F_FUNC(zlauu2,ZLAUU2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(zlauum,ZLAUUM)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(zpbcon,ZPBCON)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zpbequ,ZPBEQU)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *s, double *scond, double *amax, int *info);
+void F_FUNC(zpbrfs,ZPBRFS)(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zpbstf,ZPBSTF)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info);
+void F_FUNC(zpbsv,ZPBSV)(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zpbsvx,ZPBSVX)(char *fact, char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, char *equed, double *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zpbtf2,ZPBTF2)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info);
+void F_FUNC(zpbtrf,ZPBTRF)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info);
+void F_FUNC(zpbtrs,ZPBTRS)(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zpftrf,ZPFTRF)(char *transr, char *uplo, int *n, npy_complex128 *a, int *info);
+void F_FUNC(zpftri,ZPFTRI)(char *transr, char *uplo, int *n, npy_complex128 *a, int *info);
+void F_FUNC(zpftrs,ZPFTRS)(char *transr, char *uplo, int *n, int *nrhs, npy_complex128 *a, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zpocon,ZPOCON)(char *uplo, int *n, npy_complex128 *a, int *lda, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zpoequ,ZPOEQU)(int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, int *info);
+void F_FUNC(zpoequb,ZPOEQUB)(int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, int *info);
+void F_FUNC(zporfs,ZPORFS)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zposv,ZPOSV)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zposvx,ZPOSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, char *equed, double *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zpotf2,ZPOTF2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(zpotrf,ZPOTRF)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(zpotri,ZPOTRI)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(zpotrs,ZPOTRS)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zppcon,ZPPCON)(char *uplo, int *n, npy_complex128 *ap, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zppequ,ZPPEQU)(char *uplo, int *n, npy_complex128 *ap, double *s, double *scond, double *amax, int *info);
+void F_FUNC(zpprfs,ZPPRFS)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zppsv,ZPPSV)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zppsvx,ZPPSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, char *equed, double *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zpptrf,ZPPTRF)(char *uplo, int *n, npy_complex128 *ap, int *info);
+void F_FUNC(zpptri,ZPPTRI)(char *uplo, int *n, npy_complex128 *ap, int *info);
+void F_FUNC(zpptrs,ZPPTRS)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zpstf2,ZPSTF2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info);
+void F_FUNC(zpstrf,ZPSTRF)(char *uplo, int *n, npy_complex128 *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info);
+void F_FUNC(zptcon,ZPTCON)(int *n, double *d, npy_complex128 *e, double *anorm, double *rcond, double *rwork, int *info);
+void F_FUNC(zpteqr,ZPTEQR)(char *compz, int *n, double *d, double *e, npy_complex128 *z, int *ldz, double *work, int *info);
+void F_FUNC(zptrfs,ZPTRFS)(char *uplo, int *n, int *nrhs, double *d, npy_complex128 *e, double *df, npy_complex128 *ef, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zptsv,ZPTSV)(int *n, int *nrhs, double *d, npy_complex128 *e, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zptsvx,ZPTSVX)(char *fact, int *n, int *nrhs, double *d, npy_complex128 *e, double *df, npy_complex128 *ef, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zpttrf,ZPTTRF)(int *n, double *d, npy_complex128 *e, int *info);
+void F_FUNC(zpttrs,ZPTTRS)(char *uplo, int *n, int *nrhs, double *d, npy_complex128 *e, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zptts2,ZPTTS2)(int *iuplo, int *n, int *nrhs, double *d, npy_complex128 *e, npy_complex128 *b, int *ldb);
+void F_FUNC(zrot,ZROT)(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, double *c, npy_complex128 *s);
+void F_FUNC(zspcon,ZSPCON)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info);
+void F_FUNC(zspmv,ZSPMV)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *ap, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy);
+void F_FUNC(zspr,ZSPR)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *ap);
+void F_FUNC(zsprfs,ZSPRFS)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zspsv,ZSPSV)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zspsvx,ZSPSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zsptrf,ZSPTRF)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, int *info);
+void F_FUNC(zsptri,ZSPTRI)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, npy_complex128 *work, int *info);
+void F_FUNC(zsptrs,ZSPTRS)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zstedc,ZSTEDC)(char *compz, int *n, double *d, double *e, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zstegr,ZSTEGR)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zstein,ZSTEIN)(int *n, double *d, double *e, int *m, double *w, int *iblock, int *isplit, npy_complex128 *z, int *ldz, double *work, int *iwork, int *ifail, int *info);
+void F_FUNC(zstemr,ZSTEMR)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, int *m, double *w, npy_complex128 *z, int *ldz, int *nzc, int *isuppz, int *tryrac, double *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(zsteqr,ZSTEQR)(char *compz, int *n, double *d, double *e, npy_complex128 *z, int *ldz, double *work, int *info);
+void F_FUNC(zsycon,ZSYCON)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info);
+void F_FUNC(zsyconv,ZSYCONV)(char *uplo, char *way, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info);
+void F_FUNC(zsyequb,ZSYEQUB)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, npy_complex128 *work, int *info);
+void F_FUNC(zsymv,ZSYMV)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy);
+void F_FUNC(zsyr,ZSYR)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *a, int *lda);
+void F_FUNC(zsyrfs,ZSYRFS)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(zsysv,ZSYSV)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zsysvx,ZSYSVX)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, int *lwork, double *rwork, int *info);
+void F_FUNC(zsyswapr,ZSYSWAPR)(char *uplo, int *n, npy_complex128 *a, int *lda, int *i1, int *i2);
+void F_FUNC(zsytf2,ZSYTF2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info);
+void F_FUNC(zsytrf,ZSYTRF)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zsytri,ZSYTRI)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info);
+void F_FUNC(zsytri2,ZSYTRI2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zsytri2x,ZSYTRI2X)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *nb, int *info);
+void F_FUNC(zsytrs,ZSYTRS)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(zsytrs2,ZSYTRS2)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info);
+void F_FUNC(ztbcon,ZTBCON)(char *norm, char *uplo, char *diag, int *n, int *kd, npy_complex128 *ab, int *ldab, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztbrfs,ZTBRFS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztbtrs,ZTBTRS)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(ztfsm,ZTFSM)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, npy_complex128 *b, int *ldb);
+void F_FUNC(ztftri,ZTFTRI)(char *transr, char *uplo, char *diag, int *n, npy_complex128 *a, int *info);
+void F_FUNC(ztfttp,ZTFTTP)(char *transr, char *uplo, int *n, npy_complex128 *arf, npy_complex128 *ap, int *info);
+void F_FUNC(ztfttr,ZTFTTR)(char *transr, char *uplo, int *n, npy_complex128 *arf, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(ztgevc,ZTGEVC)(char *side, char *howmny, int *select, int *n, npy_complex128 *s, int *lds, npy_complex128 *p, int *ldp, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztgex2,ZTGEX2)(int *wantq, int *wantz, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *j1, int *info);
+void F_FUNC(ztgexc,ZTGEXC)(int *wantq, int *wantz, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *ifst, int *ilst, int *info);
+void F_FUNC(ztgsen,ZTGSEN)(int *ijob, int *wantq, int *wantz, int *select, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *m, double *pl, double *pr, double *dif, npy_complex128 *work, int *lwork, int *iwork, int *liwork, int *info);
+void F_FUNC(ztgsja,ZTGSJA)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *tola, double *tolb, double *alpha, double *beta, npy_complex128 *u, int *ldu, npy_complex128 *v, int *ldv, npy_complex128 *q, int *ldq, npy_complex128 *work, int *ncycle, int *info);
+void F_FUNC(ztgsna,ZTGSNA)(char *job, char *howmny, int *select, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, double *s, double *dif, int *mm, int *m, npy_complex128 *work, int *lwork, int *iwork, int *info);
+void F_FUNC(ztgsy2,ZTGSY2)(char *trans, int *ijob, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, npy_complex128 *d, int *ldd, npy_complex128 *e, int *lde, npy_complex128 *f, int *ldf, double *scale, double *rdsum, double *rdscal, int *info);
+void F_FUNC(ztgsyl,ZTGSYL)(char *trans, int *ijob, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, npy_complex128 *d, int *ldd, npy_complex128 *e, int *lde, npy_complex128 *f, int *ldf, double *scale, double *dif, npy_complex128 *work, int *lwork, int *iwork, int *info);
+void F_FUNC(ztpcon,ZTPCON)(char *norm, char *uplo, char *diag, int *n, npy_complex128 *ap, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztpmqrt,ZTPMQRT)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info);
+void F_FUNC(ztpqrt,ZTPQRT)(int *m, int *n, int *l, int *nb, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *t, int *ldt, npy_complex128 *work, int *info);
+void F_FUNC(ztpqrt2,ZTPQRT2)(int *m, int *n, int *l, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *t, int *ldt, int *info);
+void F_FUNC(ztprfb,ZTPRFB)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *ldwork);
+void F_FUNC(ztprfs,ZTPRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztptri,ZTPTRI)(char *uplo, char *diag, int *n, npy_complex128 *ap, int *info);
+void F_FUNC(ztptrs,ZTPTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(ztpttf,ZTPTTF)(char *transr, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *arf, int *info);
+void F_FUNC(ztpttr,ZTPTTR)(char *uplo, int *n, npy_complex128 *ap, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(ztrcon,ZTRCON)(char *norm, char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, double *rcond, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztrevc,ZTREVC)(char *side, char *howmny, int *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztrexc,ZTREXC)(char *compq, int *n, npy_complex128 *t, int *ldt, npy_complex128 *q, int *ldq, int *ifst, int *ilst, int *info);
+void F_FUNC(ztrrfs,ZTRRFS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info);
+void F_FUNC(ztrsen,ZTRSEN)(char *job, char *compq, int *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *q, int *ldq, npy_complex128 *w, int *m, double *s, double *sep, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(ztrsna,ZTRSNA)(char *job, char *howmny, int *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, double *s, double *sep, int *mm, int *m, npy_complex128 *work, int *ldwork, double *rwork, int *info);
+void F_FUNC(ztrsyl,ZTRSYL)(char *trana, char *tranb, int *isgn, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, double *scale, int *info);
+void F_FUNC(ztrti2,ZTRTI2)(char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(ztrtri,ZTRTRI)(char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, int *info);
+void F_FUNC(ztrtrs,ZTRTRS)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info);
+void F_FUNC(ztrttf,ZTRTTF)(char *transr, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *arf, int *info);
+void F_FUNC(ztrttp,ZTRTTP)(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *ap, int *info);
+void F_FUNC(ztzrzf,ZTZRZF)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunbdb,ZUNBDB)(char *trans, char *signs, int *m, int *p, int *q, npy_complex128 *x11, int *ldx11, npy_complex128 *x12, int *ldx12, npy_complex128 *x21, int *ldx21, npy_complex128 *x22, int *ldx22, double *theta, double *phi, npy_complex128 *taup1, npy_complex128 *taup2, npy_complex128 *tauq1, npy_complex128 *tauq2, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zuncsd,ZUNCSD)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, npy_complex128 *x11, int *ldx11, npy_complex128 *x12, int *ldx12, npy_complex128 *x21, int *ldx21, npy_complex128 *x22, int *ldx22, double *theta, npy_complex128 *u1, int *ldu1, npy_complex128 *u2, int *ldu2, npy_complex128 *v1t, int *ldv1t, npy_complex128 *v2t, int *ldv2t, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *info);
+void F_FUNC(zung2l,ZUNG2L)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zung2r,ZUNG2R)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zungbr,ZUNGBR)(char *vect, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunghr,ZUNGHR)(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zungl2,ZUNGL2)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zunglq,ZUNGLQ)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zungql,ZUNGQL)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zungqr,ZUNGQR)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zungr2,ZUNGR2)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info);
+void F_FUNC(zungrq,ZUNGRQ)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zungtr,ZUNGTR)(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunm2l,ZUNM2L)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info);
+void F_FUNC(zunm2r,ZUNM2R)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info);
+void F_FUNC(zunmbr,ZUNMBR)(char *vect, char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunmhr,ZUNMHR)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunml2,ZUNML2)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info);
+void F_FUNC(zunmlq,ZUNMLQ)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunmql,ZUNMQL)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunmqr,ZUNMQR)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunmr2,ZUNMR2)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info);
+void F_FUNC(zunmr3,ZUNMR3)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info);
+void F_FUNC(zunmrq,ZUNMRQ)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunmrz,ZUNMRZ)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zunmtr,ZUNMTR)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info);
+void F_FUNC(zupgtr,ZUPGTR)(char *uplo, int *n, npy_complex128 *ap, npy_complex128 *tau, npy_complex128 *q, int *ldq, npy_complex128 *work, int *info);
+void F_FUNC(zupmtr,ZUPMTR)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex128 *ap, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs.py
new file mode 100644
index 00000000..45e5757b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs.py
@@ -0,0 +1,881 @@
+#
+# Author: Travis Oliphant, March 2002
+#
+from itertools import product
+
+import numpy as np
+from numpy import (Inf, dot, diag, prod, logical_not, ravel, transpose,
+                   conjugate, absolute, amax, sign, isfinite)
+from numpy.lib.scimath import sqrt as csqrt
+
+# Local imports
+from scipy.linalg import LinAlgError, bandwidth
+from ._misc import norm
+from ._basic import solve, inv
+from ._special_matrices import triu
+from ._decomp_svd import svd
+from ._decomp_schur import schur, rsf2csf
+from ._expm_frechet import expm_frechet, expm_cond
+from ._matfuncs_sqrtm import sqrtm
+from ._matfuncs_expm import pick_pade_structure, pade_UV_calc
+
+__all__ = ['expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm', 'tanhm', 'logm',
+           'funm', 'signm', 'sqrtm', 'fractional_matrix_power', 'expm_frechet',
+           'expm_cond', 'khatri_rao']
+
+eps = np.finfo('d').eps
+feps = np.finfo('f').eps
+
+_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
+
+
+###############################################################################
+# Utility functions.
+
+
+def _asarray_square(A):
+    """
+    Wraps asarray with the extra requirement that the input be a square matrix.
+
+    The motivation is that the matfuncs module has real functions that have
+    been lifted to square matrix functions.
+
+    Parameters
+    ----------
+    A : array_like
+        A square matrix.
+
+    Returns
+    -------
+    out : ndarray
+        An ndarray copy or view or other representation of A.
+
+    """
+    A = np.asarray(A)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected square array_like input')
+    return A
+
+
+def _maybe_real(A, B, tol=None):
+    """
+    Return either B or the real part of B, depending on properties of A and B.
+
+    The motivation is that B has been computed as a complicated function of A,
+    and B may be perturbed by negligible imaginary components.
+    If A is real and B is complex with small imaginary components,
+    then return a real copy of B.  The assumption in that case would be that
+    the imaginary components of B are numerical artifacts.
+
+    Parameters
+    ----------
+    A : ndarray
+        Input array whose type is to be checked as real vs. complex.
+    B : ndarray
+        Array to be returned, possibly without its imaginary part.
+    tol : float
+        Absolute tolerance.
+
+    Returns
+    -------
+    out : real or complex array
+        Either the input array B or only the real part of the input array B.
+
+    """
+    # Note that booleans and integers compare as real.
+    if np.isrealobj(A) and np.iscomplexobj(B):
+        if tol is None:
+            tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
+        if np.allclose(B.imag, 0.0, atol=tol):
+            B = B.real
+    return B
+
+
+###############################################################################
+# Matrix functions.
+
+
+def fractional_matrix_power(A, t):
+    """
+    Compute the fractional power of a matrix.
+
+    Proceeds according to the discussion in section (6) of [1]_.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Matrix whose fractional power to evaluate.
+    t : float
+        Fractional power.
+
+    Returns
+    -------
+    X : (N, N) array_like
+        The fractional power of the matrix.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import fractional_matrix_power
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> b = fractional_matrix_power(a, 0.5)
+    >>> b
+    array([[ 0.75592895,  1.13389342],
+           [ 0.37796447,  1.88982237]])
+    >>> np.dot(b, b)      # Verify square root
+    array([[ 1.,  3.],
+           [ 1.,  4.]])
+
+    """
+    # This fixes some issue with imports;
+    # this function calls onenormest which is in scipy.sparse.
+    A = _asarray_square(A)
+    import scipy.linalg._matfuncs_inv_ssq
+    return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
+
+
+def logm(A, disp=True):
+    """
+    Compute matrix logarithm.
+
+    The matrix logarithm is the inverse of
+    expm: expm(logm(`A`)) == `A`
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Matrix whose logarithm to evaluate
+    disp : bool, optional
+        Print warning if error in the result is estimated large
+        instead of returning estimated error. (Default: True)
+
+    Returns
+    -------
+    logm : (N, N) ndarray
+        Matrix logarithm of `A`
+    errest : float
+        (if disp == False)
+
+        1-norm of the estimated error, ||err||_1 / ||A||_1
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+           "Improved Inverse Scaling and Squaring Algorithms
+           for the Matrix Logarithm."
+           SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+           ISSN 1095-7197
+
+    .. [2] Nicholas J. Higham (2008)
+           "Functions of Matrices: Theory and Computation"
+           ISBN 978-0-898716-46-7
+
+    .. [3] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import logm, expm
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> b = logm(a)
+    >>> b
+    array([[-1.02571087,  2.05142174],
+           [ 0.68380725,  1.02571087]])
+    >>> expm(b)         # Verify expm(logm(a)) returns a
+    array([[ 1.,  3.],
+           [ 1.,  4.]])
+
+    """
+    A = _asarray_square(A)
+    # Avoid circular import ... this is OK, right?
+    import scipy.linalg._matfuncs_inv_ssq
+    F = scipy.linalg._matfuncs_inv_ssq._logm(A)
+    F = _maybe_real(A, F)
+    errtol = 1000*eps
+    #TODO use a better error approximation
+    errest = norm(expm(F)-A,1) / norm(A,1)
+    if disp:
+        if not isfinite(errest) or errest >= errtol:
+            print("logm result may be inaccurate, approximate err =", errest)
+        return F
+    else:
+        return F, errest
+
+
+def expm(A):
+    """Compute the matrix exponential of an array.
+
+    Parameters
+    ----------
+    A : ndarray
+        Input with last two dimensions are square ``(..., n, n)``.
+
+    Returns
+    -------
+    eA : ndarray
+        The resulting matrix exponential with the same shape of ``A``
+
+    Notes
+    -----
+    Implements the algorithm given in [1], which is essentially a Pade
+    approximation with a variable order that is decided based on the array
+    data.
+
+    For input with size ``n``, the memory usage is in the worst case in the
+    order of ``8*(n**2)``. If the input data is not of single and double
+    precision of real and complex dtypes, it is copied to a new array.
+
+    For cases ``n >= 400``, the exact 1-norm computation cost, breaks even with
+    1-norm estimation and from that point on the estimation scheme given in
+    [2] is used to decide on the approximation order.
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy and Nicholas J. Higham, (2009), "A New Scaling
+           and Squaring Algorithm for the Matrix Exponential", SIAM J. Matrix
+           Anal. Appl. 31(3):970-989, :doi:`10.1137/09074721X`
+
+    .. [2] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm
+           for Matrix 1-Norm Estimation, with an Application to 1-Norm
+           Pseudospectra." SIAM J. Matrix Anal. Appl. 21(4):1185-1201,
+           :doi:`10.1137/S0895479899356080`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import expm, sinm, cosm
+
+    Matrix version of the formula exp(0) = 1:
+
+    >>> expm(np.zeros((3, 2, 2)))
+    array([[[1., 0.],
+            [0., 1.]],
+    
+           [[1., 0.],
+            [0., 1.]],
+    
+           [[1., 0.],
+            [0., 1.]]])
+
+    Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
+    applied to a matrix:
+
+    >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
+    >>> expm(1j*a)
+    array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+           [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+    >>> cosm(a) + 1j*sinm(a)
+    array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+           [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+
+    """
+    a = np.asarray(A)
+    if a.size == 1 and a.ndim < 2:
+        return np.array([[np.exp(a.item())]])
+
+    if a.ndim < 2:
+        raise LinAlgError('The input array must be at least two-dimensional')
+    if a.shape[-1] != a.shape[-2]:
+        raise LinAlgError('Last 2 dimensions of the array must be square')
+    n = a.shape[-1]
+    # Empty array
+    if min(*a.shape) == 0:
+        return np.empty_like(a)
+
+    # Scalar case
+    if a.shape[-2:] == (1, 1):
+        return np.exp(a)
+
+    if not np.issubdtype(a.dtype, np.inexact):
+        a = a.astype(float)
+    elif a.dtype == np.float16:
+        a = a.astype(np.float32)
+
+    # Explicit formula for 2x2 case, formula (2.2) in [1]
+    # without Kahan's method numerical instabilities can occur.
+    if a.shape[-2:] == (2, 2):
+        a1, a2, a3, a4 = (a[..., [0], [0]],
+                          a[..., [0], [1]],
+                          a[..., [1], [0]],
+                          a[..., [1], [1]])
+        mu = csqrt((a1-a4)**2 + 4*a2*a3)/2.  # csqrt slow but handles neg.vals
+
+        eApD2 = np.exp((a1+a4)/2.)
+        AmD2 = (a1 - a4)/2.
+        coshMu = np.cosh(mu)
+        sinchMu = np.ones_like(coshMu)
+        mask = mu != 0
+        sinchMu[mask] = np.sinh(mu[mask]) / mu[mask]
+        eA = np.empty((a.shape), dtype=mu.dtype)
+        eA[..., [0], [0]] = eApD2 * (coshMu + AmD2*sinchMu)
+        eA[..., [0], [1]] = eApD2 * a2 * sinchMu
+        eA[..., [1], [0]] = eApD2 * a3 * sinchMu
+        eA[..., [1], [1]] = eApD2 * (coshMu - AmD2*sinchMu)
+        if np.isrealobj(a):
+            return eA.real
+        return eA
+
+    # larger problem with unspecified stacked dimensions.
+    n = a.shape[-1]
+    eA = np.empty(a.shape, dtype=a.dtype)
+    # working memory to hold intermediate arrays
+    Am = np.empty((5, n, n), dtype=a.dtype)
+
+    # Main loop to go through the slices of an ndarray and passing to expm
+    for ind in product(*[range(x) for x in a.shape[:-2]]):
+        aw = a[ind]
+
+        lu = bandwidth(aw)
+        if not any(lu):  # a is diagonal?
+            eA[ind] = np.diag(np.exp(np.diag(aw)))
+            continue
+
+        # Generic/triangular case; copy the slice into scratch and send.
+        # Am will be mutated by pick_pade_structure
+        Am[0, :, :] = aw
+        m, s = pick_pade_structure(Am)
+
+        if s != 0:  # scaling needed
+            Am[:4] *= [[[2**(-s)]], [[4**(-s)]], [[16**(-s)]], [[64**(-s)]]]
+
+        pade_UV_calc(Am, n, m)
+        eAw = Am[0]
+
+        if s != 0:  # squaring needed
+
+            if (lu[1] == 0) or (lu[0] == 0):  # lower/upper triangular
+                # This branch implements Code Fragment 2.1 of [1]
+
+                diag_aw = np.diag(aw)
+                # einsum returns a writable view
+                np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2**(-s))
+                # super/sub diagonal
+                sd = np.diag(aw, k=-1 if lu[1] == 0 else 1)
+
+                for i in range(s-1, -1, -1):
+                    eAw = eAw @ eAw
+
+                    # diagonal
+                    np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2.**(-i))
+                    exp_sd = _exp_sinch(diag_aw * (2.**(-i))) * (sd * 2**(-i))
+                    if lu[1] == 0:  # lower
+                        np.einsum('ii->i', eAw[1:, :-1])[:] = exp_sd
+                    else:  # upper
+                        np.einsum('ii->i', eAw[:-1, 1:])[:] = exp_sd
+
+            else:  # generic
+                for _ in range(s):
+                    eAw = eAw @ eAw
+
+        # Zero out the entries from np.empty in case of triangular input
+        if (lu[0] == 0) or (lu[1] == 0):
+            eA[ind] = np.triu(eAw) if lu[0] == 0 else np.tril(eAw)
+        else:
+            eA[ind] = eAw
+
+    return eA
+
+
+def _exp_sinch(x):
+    # Higham's formula (10.42), might overflow, see GH-11839
+    lexp_diff = np.diff(np.exp(x))
+    l_diff = np.diff(x)
+    mask_z = l_diff == 0.
+    lexp_diff[~mask_z] /= l_diff[~mask_z]
+    lexp_diff[mask_z] = np.exp(x[:-1][mask_z])
+    return lexp_diff
+
+
+def cosm(A):
+    """
+    Compute the matrix cosine.
+
+    This routine uses expm to compute the matrix exponentials.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Input array
+
+    Returns
+    -------
+    cosm : (N, N) ndarray
+        Matrix cosine of A
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import expm, sinm, cosm
+
+    Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
+    applied to a matrix:
+
+    >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
+    >>> expm(1j*a)
+    array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+           [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+    >>> cosm(a) + 1j*sinm(a)
+    array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+           [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+
+    """
+    A = _asarray_square(A)
+    if np.iscomplexobj(A):
+        return 0.5*(expm(1j*A) + expm(-1j*A))
+    else:
+        return expm(1j*A).real
+
+
+def sinm(A):
+    """
+    Compute the matrix sine.
+
+    This routine uses expm to compute the matrix exponentials.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Input array.
+
+    Returns
+    -------
+    sinm : (N, N) ndarray
+        Matrix sine of `A`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import expm, sinm, cosm
+
+    Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
+    applied to a matrix:
+
+    >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
+    >>> expm(1j*a)
+    array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+           [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+    >>> cosm(a) + 1j*sinm(a)
+    array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
+           [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
+
+    """
+    A = _asarray_square(A)
+    if np.iscomplexobj(A):
+        return -0.5j*(expm(1j*A) - expm(-1j*A))
+    else:
+        return expm(1j*A).imag
+
+
+def tanm(A):
+    """
+    Compute the matrix tangent.
+
+    This routine uses expm to compute the matrix exponentials.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Input array.
+
+    Returns
+    -------
+    tanm : (N, N) ndarray
+        Matrix tangent of `A`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import tanm, sinm, cosm
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> t = tanm(a)
+    >>> t
+    array([[ -2.00876993,  -8.41880636],
+           [ -2.80626879, -10.42757629]])
+
+    Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
+
+    >>> s = sinm(a)
+    >>> c = cosm(a)
+    >>> s.dot(np.linalg.inv(c))
+    array([[ -2.00876993,  -8.41880636],
+           [ -2.80626879, -10.42757629]])
+
+    """
+    A = _asarray_square(A)
+    return _maybe_real(A, solve(cosm(A), sinm(A)))
+
+
+def coshm(A):
+    """
+    Compute the hyperbolic matrix cosine.
+
+    This routine uses expm to compute the matrix exponentials.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Input array.
+
+    Returns
+    -------
+    coshm : (N, N) ndarray
+        Hyperbolic matrix cosine of `A`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import tanhm, sinhm, coshm
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> c = coshm(a)
+    >>> c
+    array([[ 11.24592233,  38.76236492],
+           [ 12.92078831,  50.00828725]])
+
+    Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
+
+    >>> t = tanhm(a)
+    >>> s = sinhm(a)
+    >>> t - s.dot(np.linalg.inv(c))
+    array([[  2.72004641e-15,   4.55191440e-15],
+           [  0.00000000e+00,  -5.55111512e-16]])
+
+    """
+    A = _asarray_square(A)
+    return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
+
+
+def sinhm(A):
+    """
+    Compute the hyperbolic matrix sine.
+
+    This routine uses expm to compute the matrix exponentials.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Input array.
+
+    Returns
+    -------
+    sinhm : (N, N) ndarray
+        Hyperbolic matrix sine of `A`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import tanhm, sinhm, coshm
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> s = sinhm(a)
+    >>> s
+    array([[ 10.57300653,  39.28826594],
+           [ 13.09608865,  49.86127247]])
+
+    Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
+
+    >>> t = tanhm(a)
+    >>> c = coshm(a)
+    >>> t - s.dot(np.linalg.inv(c))
+    array([[  2.72004641e-15,   4.55191440e-15],
+           [  0.00000000e+00,  -5.55111512e-16]])
+
+    """
+    A = _asarray_square(A)
+    return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
+
+
+def tanhm(A):
+    """
+    Compute the hyperbolic matrix tangent.
+
+    This routine uses expm to compute the matrix exponentials.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Input array
+
+    Returns
+    -------
+    tanhm : (N, N) ndarray
+        Hyperbolic matrix tangent of `A`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import tanhm, sinhm, coshm
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> t = tanhm(a)
+    >>> t
+    array([[ 0.3428582 ,  0.51987926],
+           [ 0.17329309,  0.86273746]])
+
+    Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
+
+    >>> s = sinhm(a)
+    >>> c = coshm(a)
+    >>> t - s.dot(np.linalg.inv(c))
+    array([[  2.72004641e-15,   4.55191440e-15],
+           [  0.00000000e+00,  -5.55111512e-16]])
+
+    """
+    A = _asarray_square(A)
+    return _maybe_real(A, solve(coshm(A), sinhm(A)))
+
+
+def funm(A, func, disp=True):
+    """
+    Evaluate a matrix function specified by a callable.
+
+    Returns the value of matrix-valued function ``f`` at `A`. The
+    function ``f`` is an extension of the scalar-valued function `func`
+    to matrices.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Matrix at which to evaluate the function
+    func : callable
+        Callable object that evaluates a scalar function f.
+        Must be vectorized (eg. using vectorize).
+    disp : bool, optional
+        Print warning if error in the result is estimated large
+        instead of returning estimated error. (Default: True)
+
+    Returns
+    -------
+    funm : (N, N) ndarray
+        Value of the matrix function specified by func evaluated at `A`
+    errest : float
+        (if disp == False)
+
+        1-norm of the estimated error, ||err||_1 / ||A||_1
+
+    Notes
+    -----
+    This function implements the general algorithm based on Schur decomposition
+    (Algorithm 9.1.1. in [1]_).
+
+    If the input matrix is known to be diagonalizable, then relying on the
+    eigendecomposition is likely to be faster. For example, if your matrix is
+    Hermitian, you can do
+
+    >>> from scipy.linalg import eigh
+    >>> def funm_herm(a, func, check_finite=False):
+    ...     w, v = eigh(a, check_finite=check_finite)
+    ...     ## if you further know that your matrix is positive semidefinite,
+    ...     ## you can optionally guard against precision errors by doing
+    ...     # w = np.maximum(w, 0)
+    ...     w = func(w)
+    ...     return (v * w).dot(v.conj().T)
+
+    References
+    ----------
+    .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import funm
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> funm(a, lambda x: x*x)
+    array([[  4.,  15.],
+           [  5.,  19.]])
+    >>> a.dot(a)
+    array([[  4.,  15.],
+           [  5.,  19.]])
+
+    """
+    A = _asarray_square(A)
+    # Perform Shur decomposition (lapack ?gees)
+    T, Z = schur(A)
+    T, Z = rsf2csf(T,Z)
+    n,n = T.shape
+    F = diag(func(diag(T)))  # apply function to diagonal elements
+    F = F.astype(T.dtype.char)  # e.g., when F is real but T is complex
+
+    minden = abs(T[0,0])
+
+    # implement Algorithm 11.1.1 from Golub and Van Loan
+    #                 "matrix Computations."
+    for p in range(1,n):
+        for i in range(1,n-p+1):
+            j = i + p
+            s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
+            ksl = slice(i,j-1)
+            val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
+            s = s + val
+            den = T[j-1,j-1] - T[i-1,i-1]
+            if den != 0.0:
+                s = s / den
+            F[i-1,j-1] = s
+            minden = min(minden,abs(den))
+
+    F = dot(dot(Z, F), transpose(conjugate(Z)))
+    F = _maybe_real(A, F)
+
+    tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
+    if minden == 0.0:
+        minden = tol
+    err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
+    if prod(ravel(logical_not(isfinite(F))),axis=0):
+        err = Inf
+    if disp:
+        if err > 1000*tol:
+            print("funm result may be inaccurate, approximate err =", err)
+        return F
+    else:
+        return F, err
+
+
+def signm(A, disp=True):
+    """
+    Matrix sign function.
+
+    Extension of the scalar sign(x) to matrices.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Matrix at which to evaluate the sign function
+    disp : bool, optional
+        Print warning if error in the result is estimated large
+        instead of returning estimated error. (Default: True)
+
+    Returns
+    -------
+    signm : (N, N) ndarray
+        Value of the sign function at `A`
+    errest : float
+        (if disp == False)
+
+        1-norm of the estimated error, ||err||_1 / ||A||_1
+
+    Examples
+    --------
+    >>> from scipy.linalg import signm, eigvals
+    >>> a = [[1,2,3], [1,2,1], [1,1,1]]
+    >>> eigvals(a)
+    array([ 4.12488542+0.j, -0.76155718+0.j,  0.63667176+0.j])
+    >>> eigvals(signm(a))
+    array([-1.+0.j,  1.+0.j,  1.+0.j])
+
+    """
+    A = _asarray_square(A)
+
+    def rounded_sign(x):
+        rx = np.real(x)
+        if rx.dtype.char == 'f':
+            c = 1e3*feps*amax(x)
+        else:
+            c = 1e3*eps*amax(x)
+        return sign((absolute(rx) > c) * rx)
+    result, errest = funm(A, rounded_sign, disp=0)
+    errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
+    if errest < errtol:
+        return result
+
+    # Handle signm of defective matrices:
+
+    # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
+    # 8:237-250,1981" for how to improve the following (currently a
+    # rather naive) iteration process:
+
+    # a = result # sometimes iteration converges faster but where??
+
+    # Shifting to avoid zero eigenvalues. How to ensure that shifting does
+    # not change the spectrum too much?
+    vals = svd(A, compute_uv=False)
+    max_sv = np.amax(vals)
+    # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
+    # c = 0.5/min_nonzero_sv
+    c = 0.5/max_sv
+    S0 = A + c*np.identity(A.shape[0])
+    prev_errest = errest
+    for i in range(100):
+        iS0 = inv(S0)
+        S0 = 0.5*(S0 + iS0)
+        Pp = 0.5*(dot(S0,S0)+S0)
+        errest = norm(dot(Pp,Pp)-Pp,1)
+        if errest < errtol or prev_errest == errest:
+            break
+        prev_errest = errest
+    if disp:
+        if not isfinite(errest) or errest >= errtol:
+            print("signm result may be inaccurate, approximate err =", errest)
+        return S0
+    else:
+        return S0, errest
+
+
+def khatri_rao(a, b):
+    r"""
+    Khatri-rao product
+
+    A column-wise Kronecker product of two matrices
+
+    Parameters
+    ----------
+    a : (n, k) array_like
+        Input array
+    b : (m, k) array_like
+        Input array
+
+    Returns
+    -------
+    c:  (n*m, k) ndarray
+        Khatri-rao product of `a` and `b`.
+
+    See Also
+    --------
+    kron : Kronecker product
+
+    Notes
+    -----
+    The mathematical definition of the Khatri-Rao product is:
+
+    .. math::
+
+        (A_{ij}  \bigotimes B_{ij})_{ij}
+
+    which is the Kronecker product of every column of A and B, e.g.::
+
+        c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[1, 2, 3], [4, 5, 6]])
+    >>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]])
+    >>> linalg.khatri_rao(a, b)
+    array([[ 3,  8, 15],
+           [ 6, 14, 24],
+           [ 2,  6, 27],
+           [12, 20, 30],
+           [24, 35, 48],
+           [ 8, 15, 54]])
+
+    """
+    a = np.asarray(a)
+    b = np.asarray(b)
+
+    if not (a.ndim == 2 and b.ndim == 2):
+        raise ValueError("The both arrays should be 2-dimensional.")
+
+    if not a.shape[1] == b.shape[1]:
+        raise ValueError("The number of columns for both arrays "
+                         "should be equal.")
+
+    # c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T
+    c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :]
+    return c.reshape((-1,) + c.shape[2:])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_expm.pyi b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_expm.pyi
new file mode 100644
index 00000000..880c6955
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_expm.pyi
@@ -0,0 +1,6 @@
+from numpy.typing import NDArray
+from typing import Any, Tuple
+
+def pick_pade_structure(a: NDArray[Any]) -> Tuple[int, int]: ...
+
+def pade_UV_calc(Am: NDArray[Any], n: int, m: int) -> None: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_inv_ssq.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_inv_ssq.py
new file mode 100644
index 00000000..ee0db796
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_inv_ssq.py
@@ -0,0 +1,886 @@
+"""
+Matrix functions that use Pade approximation with inverse scaling and squaring.
+
+"""
+import warnings
+
+import numpy as np
+
+from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
+from scipy.linalg._decomp_schur import schur, rsf2csf
+from scipy.linalg._matfuncs import funm
+from scipy.linalg import svdvals, solve_triangular
+from scipy.sparse.linalg._interface import LinearOperator
+from scipy.sparse.linalg import onenormest
+import scipy.special
+
+
+class LogmRankWarning(UserWarning):
+    pass
+
+
+class LogmExactlySingularWarning(LogmRankWarning):
+    pass
+
+
+class LogmNearlySingularWarning(LogmRankWarning):
+    pass
+
+
+class LogmError(np.linalg.LinAlgError):
+    pass
+
+
+class FractionalMatrixPowerError(np.linalg.LinAlgError):
+    pass
+
+
+#TODO renovate or move this class when scipy operators are more mature
+class _MatrixM1PowerOperator(LinearOperator):
+    """
+    A representation of the linear operator (A - I)^p.
+    """
+
+    def __init__(self, A, p):
+        if A.ndim != 2 or A.shape[0] != A.shape[1]:
+            raise ValueError('expected A to be like a square matrix')
+        if p < 0 or p != int(p):
+            raise ValueError('expected p to be a non-negative integer')
+        self._A = A
+        self._p = p
+        self.ndim = A.ndim
+        self.shape = A.shape
+
+    def _matvec(self, x):
+        for i in range(self._p):
+            x = self._A.dot(x) - x
+        return x
+
+    def _rmatvec(self, x):
+        for i in range(self._p):
+            x = x.dot(self._A) - x
+        return x
+
+    def _matmat(self, X):
+        for i in range(self._p):
+            X = self._A.dot(X) - X
+        return X
+
+    def _adjoint(self):
+        return _MatrixM1PowerOperator(self._A.T, self._p)
+
+
+#TODO renovate or move this function when SciPy operators are more mature
+def _onenormest_m1_power(A, p,
+        t=2, itmax=5, compute_v=False, compute_w=False):
+    """
+    Efficiently estimate the 1-norm of (A - I)^p.
+
+    Parameters
+    ----------
+    A : ndarray
+        Matrix whose 1-norm of a power is to be computed.
+    p : int
+        Non-negative integer power.
+    t : int, optional
+        A positive parameter controlling the tradeoff between
+        accuracy versus time and memory usage.
+        Larger values take longer and use more memory
+        but give more accurate output.
+    itmax : int, optional
+        Use at most this many iterations.
+    compute_v : bool, optional
+        Request a norm-maximizing linear operator input vector if True.
+    compute_w : bool, optional
+        Request a norm-maximizing linear operator output vector if True.
+
+    Returns
+    -------
+    est : float
+        An underestimate of the 1-norm of the sparse matrix.
+    v : ndarray, optional
+        The vector such that ||Av||_1 == est*||v||_1.
+        It can be thought of as an input to the linear operator
+        that gives an output with particularly large norm.
+    w : ndarray, optional
+        The vector Av which has relatively large 1-norm.
+        It can be thought of as an output of the linear operator
+        that is relatively large in norm compared to the input.
+
+    """
+    return onenormest(_MatrixM1PowerOperator(A, p),
+            t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
+
+
+def _unwindk(z):
+    """
+    Compute the scalar unwinding number.
+
+    Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
+    Note that this definition differs in sign from the original definition
+    in equations (5, 6) in [2]_.  The sign convention is justified in [3]_.
+
+    Parameters
+    ----------
+    z : complex
+        A complex number.
+
+    Returns
+    -------
+    unwinding_number : integer
+        The scalar unwinding number of z.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    .. [2] Robert M. Corless and David J. Jeffrey,
+           "The unwinding number." Newsletter ACM SIGSAM Bulletin
+           Volume 30, Issue 2, June 1996, Pages 28-35.
+
+    .. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
+           David J. Jeffrey and Stephen M. Watt,
+           "Reasoning about the elementary functions of complex analysis"
+           Annals of Mathematics and Artificial Intelligence,
+           36: 303-318, 2002.
+
+    """
+    return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
+
+
+def _briggs_helper_function(a, k):
+    """
+    Computes r = a^(1 / (2^k)) - 1.
+
+    This is algorithm (2) of [1]_.
+    The purpose is to avoid a danger of subtractive cancellation.
+    For more computational efficiency it should probably be cythonized.
+
+    Parameters
+    ----------
+    a : complex
+        A complex number.
+    k : integer
+        A nonnegative integer.
+
+    Returns
+    -------
+    r : complex
+        The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
+
+    Notes
+    -----
+    The algorithm as formulated in the reference does not handle k=0 or k=1
+    correctly, so these are special-cased in this implementation.
+    This function is intended to not allow `a` to belong to the closed
+    negative real axis, but this constraint is relaxed.
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy (2012)
+           "A more accurate Briggs method for the logarithm",
+           Numerical Algorithms, 59 : 393--402.
+
+    """
+    if k < 0 or int(k) != k:
+        raise ValueError('expected a nonnegative integer k')
+    if k == 0:
+        return a - 1
+    elif k == 1:
+        return np.sqrt(a) - 1
+    else:
+        k_hat = k
+        if np.angle(a) >= np.pi / 2:
+            a = np.sqrt(a)
+            k_hat = k - 1
+        z0 = a - 1
+        a = np.sqrt(a)
+        r = 1 + a
+        for j in range(1, k_hat):
+            a = np.sqrt(a)
+            r = r * (1 + a)
+        r = z0 / r
+        return r
+
+
+def _fractional_power_superdiag_entry(l1, l2, t12, p):
+    """
+    Compute a superdiagonal entry of a fractional matrix power.
+
+    This is Eq. (5.6) in [1]_.
+
+    Parameters
+    ----------
+    l1 : complex
+        A diagonal entry of the matrix.
+    l2 : complex
+        A diagonal entry of the matrix.
+    t12 : complex
+        A superdiagonal entry of the matrix.
+    p : float
+        A fractional power.
+
+    Returns
+    -------
+    f12 : complex
+        A superdiagonal entry of the fractional matrix power.
+
+    Notes
+    -----
+    Care has been taken to return a real number if possible when
+    all of the inputs are real numbers.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    """
+    if l1 == l2:
+        f12 = t12 * p * l1**(p-1)
+    elif abs(l2 - l1) > abs(l1 + l2) / 2:
+        f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
+    else:
+        # This is Eq. (5.5) in [1].
+        z = (l2 - l1) / (l2 + l1)
+        log_l1 = np.log(l1)
+        log_l2 = np.log(l2)
+        arctanh_z = np.arctanh(z)
+        tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
+        tmp_u = _unwindk(log_l2 - log_l1)
+        if tmp_u:
+            tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
+        else:
+            tmp_b = p * arctanh_z
+        tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
+        f12 = tmp_a * tmp_c
+    return f12
+
+
+def _logm_superdiag_entry(l1, l2, t12):
+    """
+    Compute a superdiagonal entry of a matrix logarithm.
+
+    This is like Eq. (11.28) in [1]_, except the determination of whether
+    l1 and l2 are sufficiently far apart has been modified.
+
+    Parameters
+    ----------
+    l1 : complex
+        A diagonal entry of the matrix.
+    l2 : complex
+        A diagonal entry of the matrix.
+    t12 : complex
+        A superdiagonal entry of the matrix.
+
+    Returns
+    -------
+    f12 : complex
+        A superdiagonal entry of the matrix logarithm.
+
+    Notes
+    -----
+    Care has been taken to return a real number if possible when
+    all of the inputs are real numbers.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham (2008)
+           "Functions of Matrices: Theory and Computation"
+           ISBN 978-0-898716-46-7
+
+    """
+    if l1 == l2:
+        f12 = t12 / l1
+    elif abs(l2 - l1) > abs(l1 + l2) / 2:
+        f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
+    else:
+        z = (l2 - l1) / (l2 + l1)
+        u = _unwindk(np.log(l2) - np.log(l1))
+        if u:
+            f12 = t12 * 2 * (np.arctanh(z) + np.pi*1j*u) / (l2 - l1)
+        else:
+            f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
+    return f12
+
+
+def _inverse_squaring_helper(T0, theta):
+    """
+    A helper function for inverse scaling and squaring for Pade approximation.
+
+    Parameters
+    ----------
+    T0 : (N, N) array_like upper triangular
+        Matrix involved in inverse scaling and squaring.
+    theta : indexable
+        The values theta[1] .. theta[7] must be available.
+        They represent bounds related to Pade approximation, and they depend
+        on the matrix function which is being computed.
+        For example, different values of theta are required for
+        matrix logarithm than for fractional matrix power.
+
+    Returns
+    -------
+    R : (N, N) array_like upper triangular
+        Composition of zero or more matrix square roots of T0, minus I.
+    s : non-negative integer
+        Number of square roots taken.
+    m : positive integer
+        The degree of the Pade approximation.
+
+    Notes
+    -----
+    This subroutine appears as a chunk of lines within
+    a couple of published algorithms; for example it appears
+    as lines 4--35 in algorithm (3.1) of [1]_, and
+    as lines 3--34 in algorithm (4.1) of [2]_.
+    The instances of 'goto line 38' in algorithm (3.1) of [1]_
+    probably mean 'goto line 36' and have been intepreted accordingly.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Lijing Lin (2013)
+           "An Improved Schur-Pade Algorithm for Fractional Powers
+           of a Matrix and their Frechet Derivatives."
+
+    .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+           "Improved Inverse Scaling and Squaring Algorithms
+           for the Matrix Logarithm."
+           SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+           ISSN 1095-7197
+
+    """
+    if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
+        raise ValueError('expected an upper triangular square matrix')
+    n, n = T0.shape
+    T = T0
+
+    # Find s0, the smallest s such that the spectral radius
+    # of a certain diagonal matrix is at most theta[7].
+    # Note that because theta[7] < 1,
+    # this search will not terminate if any diagonal entry of T is zero.
+    s0 = 0
+    tmp_diag = np.diag(T)
+    if np.count_nonzero(tmp_diag) != n:
+        raise Exception('Diagonal entries of T must be nonzero')
+    while np.max(np.absolute(tmp_diag - 1)) > theta[7]:
+        tmp_diag = np.sqrt(tmp_diag)
+        s0 += 1
+
+    # Take matrix square roots of T.
+    for i in range(s0):
+        T = _sqrtm_triu(T)
+
+    # Flow control in this section is a little odd.
+    # This is because I am translating algorithm descriptions
+    # which have GOTOs in the publication.
+    s = s0
+    k = 0
+    d2 = _onenormest_m1_power(T, 2) ** (1/2)
+    d3 = _onenormest_m1_power(T, 3) ** (1/3)
+    a2 = max(d2, d3)
+    m = None
+    for i in (1, 2):
+        if a2 <= theta[i]:
+            m = i
+            break
+    while m is None:
+        if s > s0:
+            d3 = _onenormest_m1_power(T, 3) ** (1/3)
+        d4 = _onenormest_m1_power(T, 4) ** (1/4)
+        a3 = max(d3, d4)
+        if a3 <= theta[7]:
+            j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
+            if j1 <= 6:
+                m = j1
+                break
+            elif a3 / 2 <= theta[5] and k < 2:
+                k += 1
+                T = _sqrtm_triu(T)
+                s += 1
+                continue
+        d5 = _onenormest_m1_power(T, 5) ** (1/5)
+        a4 = max(d4, d5)
+        eta = min(a3, a4)
+        for i in (6, 7):
+            if eta <= theta[i]:
+                m = i
+                break
+        if m is not None:
+            break
+        T = _sqrtm_triu(T)
+        s += 1
+
+    # The subtraction of the identity is redundant here,
+    # because the diagonal will be replaced for improved numerical accuracy,
+    # but this formulation should help clarify the meaning of R.
+    R = T - np.identity(n)
+
+    # Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
+    # using formulas that have less subtractive cancellation.
+    # Skip this step if the principal branch
+    # does not exist at T0; this happens when a diagonal entry of T0
+    # is negative with imaginary part 0.
+    has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
+    if has_principal_branch:
+        for j in range(n):
+            a = T0[j, j]
+            r = _briggs_helper_function(a, s)
+            R[j, j] = r
+        p = np.exp2(-s)
+        for j in range(n-1):
+            l1 = T0[j, j]
+            l2 = T0[j+1, j+1]
+            t12 = T0[j, j+1]
+            f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
+            R[j, j+1] = f12
+
+    # Return the T-I matrix, the number of square roots, and the Pade degree.
+    if not np.array_equal(R, np.triu(R)):
+        raise Exception('R is not upper triangular')
+    return R, s, m
+
+
+def _fractional_power_pade_constant(i, t):
+    # A helper function for matrix fractional power.
+    if i < 1:
+        raise ValueError('expected a positive integer i')
+    if not (-1 < t < 1):
+        raise ValueError('expected -1 < t < 1')
+    if i == 1:
+        return -t
+    elif i % 2 == 0:
+        j = i // 2
+        return (-j + t) / (2 * (2*j - 1))
+    elif i % 2 == 1:
+        j = (i - 1) // 2
+        return (-j - t) / (2 * (2*j + 1))
+    else:
+        raise Exception('unnexpected value of i, i = {}'.format(i))
+
+
+def _fractional_power_pade(R, t, m):
+    """
+    Evaluate the Pade approximation of a fractional matrix power.
+
+    Evaluate the degree-m Pade approximation of R
+    to the fractional matrix power t using the continued fraction
+    in bottom-up fashion using algorithm (4.1) in [1]_.
+
+    Parameters
+    ----------
+    R : (N, N) array_like
+        Upper triangular matrix whose fractional power to evaluate.
+    t : float
+        Fractional power between -1 and 1 exclusive.
+    m : positive integer
+        Degree of Pade approximation.
+
+    Returns
+    -------
+    U : (N, N) array_like
+        The degree-m Pade approximation of R to the fractional power t.
+        This matrix will be upper triangular.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    """
+    if m < 1 or int(m) != m:
+        raise ValueError('expected a positive integer m')
+    if not (-1 < t < 1):
+        raise ValueError('expected -1 < t < 1')
+    R = np.asarray(R)
+    if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
+        raise ValueError('expected an upper triangular square matrix')
+    n, n = R.shape
+    ident = np.identity(n)
+    Y = R * _fractional_power_pade_constant(2*m, t)
+    for j in range(2*m - 1, 0, -1):
+        rhs = R * _fractional_power_pade_constant(j, t)
+        Y = solve_triangular(ident + Y, rhs)
+    U = ident + Y
+    if not np.array_equal(U, np.triu(U)):
+        raise Exception('U is not upper triangular')
+    return U
+
+
+def _remainder_matrix_power_triu(T, t):
+    """
+    Compute a fractional power of an upper triangular matrix.
+
+    The fractional power is restricted to fractions -1 < t < 1.
+    This uses algorithm (3.1) of [1]_.
+    The Pade approximation itself uses algorithm (4.1) of [2]_.
+
+    Parameters
+    ----------
+    T : (N, N) array_like
+        Upper triangular matrix whose fractional power to evaluate.
+    t : float
+        Fractional power between -1 and 1 exclusive.
+
+    Returns
+    -------
+    X : (N, N) array_like
+        The fractional power of the matrix.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Lijing Lin (2013)
+           "An Improved Schur-Pade Algorithm for Fractional Powers
+           of a Matrix and their Frechet Derivatives."
+
+    .. [2] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    """
+    m_to_theta = {
+            1: 1.51e-5,
+            2: 2.24e-3,
+            3: 1.88e-2,
+            4: 6.04e-2,
+            5: 1.24e-1,
+            6: 2.00e-1,
+            7: 2.79e-1,
+            }
+    n, n = T.shape
+    T0 = T
+    T0_diag = np.diag(T0)
+    if np.array_equal(T0, np.diag(T0_diag)):
+        U = np.diag(T0_diag ** t)
+    else:
+        R, s, m = _inverse_squaring_helper(T0, m_to_theta)
+
+        # Evaluate the Pade approximation.
+        # Note that this function expects the negative of the matrix
+        # returned by the inverse squaring helper.
+        U = _fractional_power_pade(-R, t, m)
+
+        # Undo the inverse scaling and squaring.
+        # Be less clever about this
+        # if the principal branch does not exist at T0;
+        # this happens when a diagonal entry of T0
+        # is negative with imaginary part 0.
+        eivals = np.diag(T0)
+        has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
+        for i in range(s, -1, -1):
+            if i < s:
+                U = U.dot(U)
+            else:
+                if has_principal_branch:
+                    p = t * np.exp2(-i)
+                    U[np.diag_indices(n)] = T0_diag ** p
+                    for j in range(n-1):
+                        l1 = T0[j, j]
+                        l2 = T0[j+1, j+1]
+                        t12 = T0[j, j+1]
+                        f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
+                        U[j, j+1] = f12
+    if not np.array_equal(U, np.triu(U)):
+        raise Exception('U is not upper triangular')
+    return U
+
+
+def _remainder_matrix_power(A, t):
+    """
+    Compute the fractional power of a matrix, for fractions -1 < t < 1.
+
+    This uses algorithm (3.1) of [1]_.
+    The Pade approximation itself uses algorithm (4.1) of [2]_.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Matrix whose fractional power to evaluate.
+    t : float
+        Fractional power between -1 and 1 exclusive.
+
+    Returns
+    -------
+    X : (N, N) array_like
+        The fractional power of the matrix.
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Lijing Lin (2013)
+           "An Improved Schur-Pade Algorithm for Fractional Powers
+           of a Matrix and their Frechet Derivatives."
+
+    .. [2] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    """
+    # This code block is copied from numpy.matrix_power().
+    A = np.asarray(A)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('input must be a square array')
+
+    # Get the number of rows and columns.
+    n, n = A.shape
+
+    # Triangularize the matrix if necessary,
+    # attempting to preserve dtype if possible.
+    if np.array_equal(A, np.triu(A)):
+        Z = None
+        T = A
+    else:
+        if np.isrealobj(A):
+            T, Z = schur(A)
+            if not np.array_equal(T, np.triu(T)):
+                T, Z = rsf2csf(T, Z)
+        else:
+            T, Z = schur(A, output='complex')
+
+    # Zeros on the diagonal of the triangular matrix are forbidden,
+    # because the inverse scaling and squaring cannot deal with it.
+    T_diag = np.diag(T)
+    if np.count_nonzero(T_diag) != n:
+        raise FractionalMatrixPowerError(
+                'cannot use inverse scaling and squaring to find '
+                'the fractional matrix power of a singular matrix')
+
+    # If the triangular matrix is real and has a negative
+    # entry on the diagonal, then force the matrix to be complex.
+    if np.isrealobj(T) and np.min(T_diag) < 0:
+        T = T.astype(complex)
+
+    # Get the fractional power of the triangular matrix,
+    # and de-triangularize it if necessary.
+    U = _remainder_matrix_power_triu(T, t)
+    if Z is not None:
+        ZH = np.conjugate(Z).T
+        return Z.dot(U).dot(ZH)
+    else:
+        return U
+
+
+def _fractional_matrix_power(A, p):
+    """
+    Compute the fractional power of a matrix.
+
+    See the fractional_matrix_power docstring in matfuncs.py for more info.
+
+    """
+    A = np.asarray(A)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected a square matrix')
+    if p == int(p):
+        return np.linalg.matrix_power(A, int(p))
+    # Compute singular values.
+    s = svdvals(A)
+    # Inverse scaling and squaring cannot deal with a singular matrix,
+    # because the process of repeatedly taking square roots
+    # would not converge to the identity matrix.
+    if s[-1]:
+        # Compute the condition number relative to matrix inversion,
+        # and use this to decide between floor(p) and ceil(p).
+        k2 = s[0] / s[-1]
+        p1 = p - np.floor(p)
+        p2 = p - np.ceil(p)
+        if p1 * k2 ** (1 - p1) <= -p2 * k2:
+            a = int(np.floor(p))
+            b = p1
+        else:
+            a = int(np.ceil(p))
+            b = p2
+        try:
+            R = _remainder_matrix_power(A, b)
+            Q = np.linalg.matrix_power(A, a)
+            return Q.dot(R)
+        except np.linalg.LinAlgError:
+            pass
+    # If p is negative then we are going to give up.
+    # If p is non-negative then we can fall back to generic funm.
+    if p < 0:
+        X = np.empty_like(A)
+        X.fill(np.nan)
+        return X
+    else:
+        p1 = p - np.floor(p)
+        a = int(np.floor(p))
+        b = p1
+        R, info = funm(A, lambda x: pow(x, b), disp=False)
+        Q = np.linalg.matrix_power(A, a)
+        return Q.dot(R)
+
+
+def _logm_triu(T):
+    """
+    Compute matrix logarithm of an upper triangular matrix.
+
+    The matrix logarithm is the inverse of
+    expm: expm(logm(`T`)) == `T`
+
+    Parameters
+    ----------
+    T : (N, N) array_like
+        Upper triangular matrix whose logarithm to evaluate
+
+    Returns
+    -------
+    logm : (N, N) ndarray
+        Matrix logarithm of `T`
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+           "Improved Inverse Scaling and Squaring Algorithms
+           for the Matrix Logarithm."
+           SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+           ISSN 1095-7197
+
+    .. [2] Nicholas J. Higham (2008)
+           "Functions of Matrices: Theory and Computation"
+           ISBN 978-0-898716-46-7
+
+    .. [3] Nicholas J. Higham and Lijing lin (2011)
+           "A Schur-Pade Algorithm for Fractional Powers of a Matrix."
+           SIAM Journal on Matrix Analysis and Applications,
+           32 (3). pp. 1056-1078. ISSN 0895-4798
+
+    """
+    T = np.asarray(T)
+    if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
+        raise ValueError('expected an upper triangular square matrix')
+    n, n = T.shape
+
+    # Construct T0 with the appropriate type,
+    # depending on the dtype and the spectrum of T.
+    T_diag = np.diag(T)
+    keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
+    if keep_it_real:
+        T0 = T
+    else:
+        T0 = T.astype(complex)
+
+    # Define bounds given in Table (2.1).
+    theta = (None,
+            1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
+            1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
+            4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
+            6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
+
+    R, s, m = _inverse_squaring_helper(T0, theta)
+
+    # Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
+    # This requires the nodes and weights
+    # corresponding to degree-m Gauss-Legendre quadrature.
+    # These quadrature arrays need to be transformed from the [-1, 1] interval
+    # to the [0, 1] interval.
+    nodes, weights = scipy.special.p_roots(m)
+    nodes = nodes.real
+    if nodes.shape != (m,) or weights.shape != (m,):
+        raise Exception('internal error')
+    nodes = 0.5 + 0.5 * nodes
+    weights = 0.5 * weights
+    ident = np.identity(n)
+    U = np.zeros_like(R)
+    for alpha, beta in zip(weights, nodes):
+        U += solve_triangular(ident + beta*R, alpha*R)
+    U *= np.exp2(s)
+
+    # Skip this step if the principal branch
+    # does not exist at T0; this happens when a diagonal entry of T0
+    # is negative with imaginary part 0.
+    has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
+    if has_principal_branch:
+
+        # Recompute diagonal entries of U.
+        U[np.diag_indices(n)] = np.log(np.diag(T0))
+
+        # Recompute superdiagonal entries of U.
+        # This indexing of this code should be renovated
+        # when newer np.diagonal() becomes available.
+        for i in range(n-1):
+            l1 = T0[i, i]
+            l2 = T0[i+1, i+1]
+            t12 = T0[i, i+1]
+            U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
+
+    # Return the logm of the upper triangular matrix.
+    if not np.array_equal(U, np.triu(U)):
+        raise Exception('U is not upper triangular')
+    return U
+
+
+def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
+    # The input matrix should be upper triangular.
+    # The eps is ad hoc and is not meant to be machine precision.
+    tri_eps = 1e-20
+    abs_diag = np.absolute(np.diag(T))
+    if np.any(abs_diag == 0):
+        exact_singularity_msg = 'The logm input matrix is exactly singular.'
+        warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
+        if not inplace:
+            T = T.copy()
+        n = T.shape[0]
+        for i in range(n):
+            if not T[i, i]:
+                T[i, i] = tri_eps
+    elif np.any(abs_diag < tri_eps):
+        near_singularity_msg = 'The logm input matrix may be nearly singular.'
+        warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
+    return T
+
+
+def _logm(A):
+    """
+    Compute the matrix logarithm.
+
+    See the logm docstring in matfuncs.py for more info.
+
+    Notes
+    -----
+    In this function we look at triangular matrices that are similar
+    to the input matrix. If any diagonal entry of such a triangular matrix
+    is exactly zero then the original matrix is singular.
+    The matrix logarithm does not exist for such matrices,
+    but in such cases we will pretend that the diagonal entries that are zero
+    are actually slightly positive by an ad-hoc amount, in the interest
+    of returning something more useful than NaN. This will cause a warning.
+
+    """
+    A = np.asarray(A)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected a square matrix')
+
+    # If the input matrix dtype is integer then copy to a float dtype matrix.
+    if issubclass(A.dtype.type, np.integer):
+        A = np.asarray(A, dtype=float)
+
+    keep_it_real = np.isrealobj(A)
+    try:
+        if np.array_equal(A, np.triu(A)):
+            A = _logm_force_nonsingular_triangular_matrix(A)
+            if np.min(np.diag(A)) < 0:
+                A = A.astype(complex)
+            return _logm_triu(A)
+        else:
+            if keep_it_real:
+                T, Z = schur(A)
+                if not np.array_equal(T, np.triu(T)):
+                    T, Z = rsf2csf(T, Z)
+            else:
+                T, Z = schur(A, output='complex')
+            T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
+            U = _logm_triu(T)
+            ZH = np.conjugate(Z).T
+            return Z.dot(U).dot(ZH)
+    except (SqrtmError, LogmError):
+        X = np.empty_like(A)
+        X.fill(np.nan)
+        return X
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_sqrtm.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_sqrtm.py
new file mode 100644
index 00000000..1a64d579
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_matfuncs_sqrtm.py
@@ -0,0 +1,210 @@
+"""
+Matrix square root for general matrices and for upper triangular matrices.
+
+This module exists to avoid cyclic imports.
+
+"""
+__all__ = ['sqrtm']
+
+import numpy as np
+
+from scipy._lib._util import _asarray_validated
+
+
+# Local imports
+from ._misc import norm
+from .lapack import ztrsyl, dtrsyl
+from ._decomp_schur import schur, rsf2csf
+
+
+class SqrtmError(np.linalg.LinAlgError):
+    pass
+
+
+from ._matfuncs_sqrtm_triu import within_block_loop
+
+
+def _sqrtm_triu(T, blocksize=64):
+    """
+    Matrix square root of an upper triangular matrix.
+
+    This is a helper function for `sqrtm` and `logm`.
+
+    Parameters
+    ----------
+    T : (N, N) array_like upper triangular
+        Matrix whose square root to evaluate
+    blocksize : int, optional
+        If the blocksize is not degenerate with respect to the
+        size of the input array, then use a blocked algorithm. (Default: 64)
+
+    Returns
+    -------
+    sqrtm : (N, N) ndarray
+        Value of the sqrt function at `T`
+
+    References
+    ----------
+    .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
+           "Blocked Schur Algorithms for Computing the Matrix Square Root,
+           Lecture Notes in Computer Science, 7782. pp. 171-182.
+
+    """
+    T_diag = np.diag(T)
+    keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
+
+    # Cast to complex as necessary + ensure double precision
+    if not keep_it_real:
+        T = np.asarray(T, dtype=np.complex128, order="C")
+        T_diag = np.asarray(T_diag, dtype=np.complex128)
+    else:
+        T = np.asarray(T, dtype=np.float64, order="C")
+        T_diag = np.asarray(T_diag, dtype=np.float64)
+
+    R = np.diag(np.sqrt(T_diag))
+
+    # Compute the number of blocks to use; use at least one block.
+    n, n = T.shape
+    nblocks = max(n // blocksize, 1)
+
+    # Compute the smaller of the two sizes of blocks that
+    # we will actually use, and compute the number of large blocks.
+    bsmall, nlarge = divmod(n, nblocks)
+    blarge = bsmall + 1
+    nsmall = nblocks - nlarge
+    if nsmall * bsmall + nlarge * blarge != n:
+        raise Exception('internal inconsistency')
+
+    # Define the index range covered by each block.
+    start_stop_pairs = []
+    start = 0
+    for count, size in ((nsmall, bsmall), (nlarge, blarge)):
+        for i in range(count):
+            start_stop_pairs.append((start, start + size))
+            start += size
+
+    # Within-block interactions (Cythonized)
+    try:
+        within_block_loop(R, T, start_stop_pairs, nblocks)
+    except RuntimeError as e:
+        raise SqrtmError(*e.args) from e
+
+    # Between-block interactions (Cython would give no significant speedup)
+    for j in range(nblocks):
+        jstart, jstop = start_stop_pairs[j]
+        for i in range(j-1, -1, -1):
+            istart, istop = start_stop_pairs[i]
+            S = T[istart:istop, jstart:jstop]
+            if j - i > 1:
+                S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart,
+                                                            jstart:jstop])
+
+            # Invoke LAPACK.
+            # For more details, see the solve_sylvester implemention
+            # and the fortran dtrsyl and ztrsyl docs.
+            Rii = R[istart:istop, istart:istop]
+            Rjj = R[jstart:jstop, jstart:jstop]
+            if keep_it_real:
+                x, scale, info = dtrsyl(Rii, Rjj, S)
+            else:
+                x, scale, info = ztrsyl(Rii, Rjj, S)
+            R[istart:istop, jstart:jstop] = x * scale
+
+    # Return the matrix square root.
+    return R
+
+
+def sqrtm(A, disp=True, blocksize=64):
+    """
+    Matrix square root.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Matrix whose square root to evaluate
+    disp : bool, optional
+        Print warning if error in the result is estimated large
+        instead of returning estimated error. (Default: True)
+    blocksize : integer, optional
+        If the blocksize is not degenerate with respect to the
+        size of the input array, then use a blocked algorithm. (Default: 64)
+
+    Returns
+    -------
+    sqrtm : (N, N) ndarray
+        Value of the sqrt function at `A`. The dtype is float or complex.
+        The precision (data size) is determined based on the precision of
+        input `A`. When the dtype is float, the precision is same as `A`.
+        When the dtype is complex, the precition is double as `A`. The
+        precision might be cliped by each dtype precision range.
+
+    errest : float
+        (if disp == False)
+
+        Frobenius norm of the estimated error, ||err||_F / ||A||_F
+
+    References
+    ----------
+    .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
+           "Blocked Schur Algorithms for Computing the Matrix Square Root,
+           Lecture Notes in Computer Science, 7782. pp. 171-182.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import sqrtm
+    >>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
+    >>> r = sqrtm(a)
+    >>> r
+    array([[ 0.75592895,  1.13389342],
+           [ 0.37796447,  1.88982237]])
+    >>> r.dot(r)
+    array([[ 1.,  3.],
+           [ 1.,  4.]])
+
+    """
+    byte_size = np.asarray(A).dtype.itemsize
+    A = _asarray_validated(A, check_finite=True, as_inexact=True)
+    if len(A.shape) != 2:
+        raise ValueError("Non-matrix input to matrix function.")
+    if blocksize < 1:
+        raise ValueError("The blocksize should be at least 1.")
+    keep_it_real = np.isrealobj(A)
+    if keep_it_real:
+        T, Z = schur(A)
+        if not np.array_equal(T, np.triu(T)):
+            T, Z = rsf2csf(T, Z)
+    else:
+        T, Z = schur(A, output='complex')
+    failflag = False
+    try:
+        R = _sqrtm_triu(T, blocksize=blocksize)
+        ZH = np.conjugate(Z).T
+        X = Z.dot(R).dot(ZH)
+        if not np.iscomplexobj(X):
+            # float byte size range: f2 ~ f16
+            X = X.astype(f"f{np.clip(byte_size, 2, 16)}", copy=False)
+        else:
+            # complex byte size range: c8 ~ c32.
+            # c32(complex256) might not be supported in some environments.
+            if hasattr(np, 'complex256'):
+                X = X.astype(f"c{np.clip(byte_size*2, 8, 32)}", copy=False)
+            else:
+                X = X.astype(f"c{np.clip(byte_size*2, 8, 16)}", copy=False)
+    except SqrtmError:
+        failflag = True
+        X = np.empty_like(A)
+        X.fill(np.nan)
+
+    if disp:
+        if failflag:
+            print("Failed to find a square root.")
+        return X
+    else:
+        try:
+            arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro')
+        except ValueError:
+            # NaNs in matrix
+            arg2 = np.inf
+
+        return X, arg2
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_misc.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_misc.py
new file mode 100644
index 00000000..79341f78
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_misc.py
@@ -0,0 +1,191 @@
+import numpy as np
+from numpy.linalg import LinAlgError
+from .blas import get_blas_funcs
+from .lapack import get_lapack_funcs
+
+__all__ = ['LinAlgError', 'LinAlgWarning', 'norm']
+
+
+class LinAlgWarning(RuntimeWarning):
+    """
+    The warning emitted when a linear algebra related operation is close
+    to fail conditions of the algorithm or loss of accuracy is expected.
+    """
+    pass
+
+
+def norm(a, ord=None, axis=None, keepdims=False, check_finite=True):
+    """
+    Matrix or vector norm.
+
+    This function is able to return one of eight different matrix norms,
+    or one of an infinite number of vector norms (described below), depending
+    on the value of the ``ord`` parameter. For tensors with rank different from
+    1 or 2, only `ord=None` is supported.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array. If `axis` is None, `a` must be 1-D or 2-D, unless `ord`
+        is None. If both `axis` and `ord` are None, the 2-norm of
+        ``a.ravel`` will be returned.
+    ord : {int, inf, -inf, 'fro', 'nuc', None}, optional
+        Order of the norm (see table under ``Notes``). inf means NumPy's
+        `inf` object.
+    axis : {int, 2-tuple of ints, None}, optional
+        If `axis` is an integer, it specifies the axis of `a` along which to
+        compute the vector norms. If `axis` is a 2-tuple, it specifies the
+        axes that hold 2-D matrices, and the matrix norms of these matrices
+        are computed. If `axis` is None then either a vector norm (when `a`
+        is 1-D) or a matrix norm (when `a` is 2-D) is returned.
+    keepdims : bool, optional
+        If this is set to True, the axes which are normed over are left in the
+        result as dimensions with size one. With this option the result will
+        broadcast correctly against the original `a`.
+    check_finite : bool, optional
+        Whether to check that the input matrix contains only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    n : float or ndarray
+        Norm of the matrix or vector(s).
+
+    Notes
+    -----
+    For values of ``ord <= 0``, the result is, strictly speaking, not a
+    mathematical 'norm', but it may still be useful for various numerical
+    purposes.
+
+    The following norms can be calculated:
+
+    =====  ============================  ==========================
+    ord    norm for matrices             norm for vectors
+    =====  ============================  ==========================
+    None   Frobenius norm                2-norm
+    'fro'  Frobenius norm                --
+    'nuc'  nuclear norm                  --
+    inf    max(sum(abs(a), axis=1))      max(abs(a))
+    -inf   min(sum(abs(a), axis=1))      min(abs(a))
+    0      --                            sum(a != 0)
+    1      max(sum(abs(a), axis=0))      as below
+    -1     min(sum(abs(a), axis=0))      as below
+    2      2-norm (largest sing. value)  as below
+    -2     smallest singular value       as below
+    other  --                            sum(abs(a)**ord)**(1./ord)
+    =====  ============================  ==========================
+
+    The Frobenius norm is given by [1]_:
+
+        :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
+
+    The nuclear norm is the sum of the singular values.
+
+    Both the Frobenius and nuclear norm orders are only defined for
+    matrices.
+
+    References
+    ----------
+    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
+           Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import norm
+    >>> a = np.arange(9) - 4.0
+    >>> a
+    array([-4., -3., -2., -1.,  0.,  1.,  2.,  3.,  4.])
+    >>> b = a.reshape((3, 3))
+    >>> b
+    array([[-4., -3., -2.],
+           [-1.,  0.,  1.],
+           [ 2.,  3.,  4.]])
+
+    >>> norm(a)
+    7.745966692414834
+    >>> norm(b)
+    7.745966692414834
+    >>> norm(b, 'fro')
+    7.745966692414834
+    >>> norm(a, np.inf)
+    4
+    >>> norm(b, np.inf)
+    9
+    >>> norm(a, -np.inf)
+    0
+    >>> norm(b, -np.inf)
+    2
+
+    >>> norm(a, 1)
+    20
+    >>> norm(b, 1)
+    7
+    >>> norm(a, -1)
+    -4.6566128774142013e-010
+    >>> norm(b, -1)
+    6
+    >>> norm(a, 2)
+    7.745966692414834
+    >>> norm(b, 2)
+    7.3484692283495345
+
+    >>> norm(a, -2)
+    0
+    >>> norm(b, -2)
+    1.8570331885190563e-016
+    >>> norm(a, 3)
+    5.8480354764257312
+    >>> norm(a, -3)
+    0
+
+    """
+    # Differs from numpy only in non-finite handling and the use of blas.
+    if check_finite:
+        a = np.asarray_chkfinite(a)
+    else:
+        a = np.asarray(a)
+
+    if a.size and a.dtype.char in 'fdFD' and axis is None and not keepdims:
+
+        if ord in (None, 2) and (a.ndim == 1):
+            # use blas for fast and stable euclidean norm
+            nrm2 = get_blas_funcs('nrm2', dtype=a.dtype, ilp64='preferred')
+            return nrm2(a)
+
+        if a.ndim == 2:
+            # Use lapack for a couple fast matrix norms.
+            # For some reason the *lange frobenius norm is slow.
+            lange_args = None
+            # Make sure this works if the user uses the axis keywords
+            # to apply the norm to the transpose.
+            if ord == 1:
+                if np.isfortran(a):
+                    lange_args = '1', a
+                elif np.isfortran(a.T):
+                    lange_args = 'i', a.T
+            elif ord == np.inf:
+                if np.isfortran(a):
+                    lange_args = 'i', a
+                elif np.isfortran(a.T):
+                    lange_args = '1', a.T
+            if lange_args:
+                lange = get_lapack_funcs('lange', dtype=a.dtype, ilp64='preferred')
+                return lange(*lange_args)
+
+    # fall back to numpy in every other case
+    return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims)
+
+
+def _datacopied(arr, original):
+    """
+    Strict check for `arr` not sharing any data with `original`,
+    under the assumption that arr = asarray(original)
+
+    """
+    if arr is original:
+        return False
+    if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
+        return False
+    return arr.base is None
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_procrustes.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_procrustes.py
new file mode 100644
index 00000000..1e1f0f90
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_procrustes.py
@@ -0,0 +1,90 @@
+"""
+Solve the orthogonal Procrustes problem.
+
+"""
+import numpy as np
+from ._decomp_svd import svd
+
+
+__all__ = ['orthogonal_procrustes']
+
+
+def orthogonal_procrustes(A, B, check_finite=True):
+    """
+    Compute the matrix solution of the orthogonal Procrustes problem.
+
+    Given matrices A and B of equal shape, find an orthogonal matrix R
+    that most closely maps A to B using the algorithm given in [1]_.
+
+    Parameters
+    ----------
+    A : (M, N) array_like
+        Matrix to be mapped.
+    B : (M, N) array_like
+        Target matrix.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    R : (N, N) ndarray
+        The matrix solution of the orthogonal Procrustes problem.
+        Minimizes the Frobenius norm of ``(A @ R) - B``, subject to
+        ``R.T @ R = I``.
+    scale : float
+        Sum of the singular values of ``A.T @ B``.
+
+    Raises
+    ------
+    ValueError
+        If the input array shapes don't match or if check_finite is True and
+        the arrays contain Inf or NaN.
+
+    Notes
+    -----
+    Note that unlike higher level Procrustes analyses of spatial data, this
+    function only uses orthogonal transformations like rotations and
+    reflections, and it does not use scaling or translation.
+
+    .. versionadded:: 0.15.0
+
+    References
+    ----------
+    .. [1] Peter H. Schonemann, "A generalized solution of the orthogonal
+           Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1996.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import orthogonal_procrustes
+    >>> A = np.array([[ 2,  0,  1], [-2,  0,  0]])
+
+    Flip the order of columns and check for the anti-diagonal mapping
+
+    >>> R, sca = orthogonal_procrustes(A, np.fliplr(A))
+    >>> R
+    array([[-5.34384992e-17,  0.00000000e+00,  1.00000000e+00],
+           [ 0.00000000e+00,  1.00000000e+00,  0.00000000e+00],
+           [ 1.00000000e+00,  0.00000000e+00, -7.85941422e-17]])
+    >>> sca
+    9.0
+
+    """
+    if check_finite:
+        A = np.asarray_chkfinite(A)
+        B = np.asarray_chkfinite(B)
+    else:
+        A = np.asanyarray(A)
+        B = np.asanyarray(B)
+    if A.ndim != 2:
+        raise ValueError('expected ndim to be 2, but observed %s' % A.ndim)
+    if A.shape != B.shape:
+        raise ValueError('the shapes of A and B differ (%s vs %s)' % (
+            A.shape, B.shape))
+    # Be clever with transposes, with the intention to save memory.
+    u, w, vt = svd(B.T.dot(A).T)
+    R = u.dot(vt)
+    scale = w.sum()
+    return R, scale
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_sketches.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_sketches.py
new file mode 100644
index 00000000..0cefc686
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_sketches.py
@@ -0,0 +1,179 @@
+""" Sketching-based Matrix Computations """
+
+# Author: Jordi Montes 
+# August 28, 2017
+
+import numpy as np
+
+from scipy._lib._util import check_random_state, rng_integers
+from scipy.sparse import csc_matrix
+
+__all__ = ['clarkson_woodruff_transform']
+
+
+def cwt_matrix(n_rows, n_columns, seed=None):
+    r"""
+    Generate a matrix S which represents a Clarkson-Woodruff transform.
+
+    Given the desired size of matrix, the method returns a matrix S of size
+    (n_rows, n_columns) where each column has all the entries set to 0
+    except for one position which has been randomly set to +1 or -1 with
+    equal probability.
+
+    Parameters
+    ----------
+    n_rows : int
+        Number of rows of S
+    n_columns : int
+        Number of columns of S
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    S : (n_rows, n_columns) csc_matrix
+        The returned matrix has ``n_columns`` nonzero entries.
+
+    Notes
+    -----
+    Given a matrix A, with probability at least 9/10,
+    .. math:: \|SA\| = (1 \pm \epsilon)\|A\|
+    Where the error epsilon is related to the size of S.
+    """
+    rng = check_random_state(seed)
+    rows = rng_integers(rng, 0, n_rows, n_columns)
+    cols = np.arange(n_columns+1)
+    signs = rng.choice([1, -1], n_columns)
+    S = csc_matrix((signs, rows, cols),shape=(n_rows, n_columns))
+    return S
+
+
+def clarkson_woodruff_transform(input_matrix, sketch_size, seed=None):
+    r"""
+    Applies a Clarkson-Woodruff Transform/sketch to the input matrix.
+
+    Given an input_matrix ``A`` of size ``(n, d)``, compute a matrix ``A'`` of
+    size (sketch_size, d) so that
+
+    .. math:: \|Ax\| \approx \|A'x\|
+
+    with high probability via the Clarkson-Woodruff Transform, otherwise
+    known as the CountSketch matrix.
+
+    Parameters
+    ----------
+    input_matrix : array_like
+        Input matrix, of shape ``(n, d)``.
+    sketch_size : int
+        Number of rows for the sketch.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    A' : array_like
+        Sketch of the input matrix ``A``, of size ``(sketch_size, d)``.
+
+    Notes
+    -----
+    To make the statement
+
+    .. math:: \|Ax\| \approx \|A'x\|
+
+    precise, observe the following result which is adapted from the
+    proof of Theorem 14 of [2]_ via Markov's Inequality. If we have
+    a sketch size ``sketch_size=k`` which is at least
+
+    .. math:: k \geq \frac{2}{\epsilon^2\delta}
+
+    Then for any fixed vector ``x``,
+
+    .. math:: \|Ax\| = (1\pm\epsilon)\|A'x\|
+
+    with probability at least one minus delta.
+
+    This implementation takes advantage of sparsity: computing
+    a sketch takes time proportional to ``A.nnz``. Data ``A`` which
+    is in ``scipy.sparse.csc_matrix`` format gives the quickest
+    computation time for sparse input.
+
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> from scipy import sparse
+    >>> rng = np.random.default_rng()
+    >>> n_rows, n_columns, density, sketch_n_rows = 15000, 100, 0.01, 200
+    >>> A = sparse.rand(n_rows, n_columns, density=density, format='csc')
+    >>> B = sparse.rand(n_rows, n_columns, density=density, format='csr')
+    >>> C = sparse.rand(n_rows, n_columns, density=density, format='coo')
+    >>> D = rng.standard_normal((n_rows, n_columns))
+    >>> SA = linalg.clarkson_woodruff_transform(A, sketch_n_rows) # fastest
+    >>> SB = linalg.clarkson_woodruff_transform(B, sketch_n_rows) # fast
+    >>> SC = linalg.clarkson_woodruff_transform(C, sketch_n_rows) # slower
+    >>> SD = linalg.clarkson_woodruff_transform(D, sketch_n_rows) # slowest
+
+    That said, this method does perform well on dense inputs, just slower
+    on a relative scale.
+
+    References
+    ----------
+    .. [1] Kenneth L. Clarkson and David P. Woodruff. Low rank approximation
+           and regression in input sparsity time. In STOC, 2013.
+    .. [2] David P. Woodruff. Sketching as a tool for numerical linear algebra.
+           In Foundations and Trends in Theoretical Computer Science, 2014.
+
+    Examples
+    --------
+    Create a big dense matrix ``A`` for the example:
+
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> n_rows, n_columns  = 15000, 100
+    >>> rng = np.random.default_rng()
+    >>> A = rng.standard_normal((n_rows, n_columns))
+
+    Apply the transform to create a new matrix with 200 rows:
+
+    >>> sketch_n_rows = 200
+    >>> sketch = linalg.clarkson_woodruff_transform(A, sketch_n_rows, seed=rng)
+    >>> sketch.shape
+    (200, 100)
+
+    Now with high probability, the true norm is close to the sketched norm
+    in absolute value.
+
+    >>> linalg.norm(A)
+    1224.2812927123198
+    >>> linalg.norm(sketch)
+    1226.518328407333
+
+    Similarly, applying our sketch preserves the solution to a linear
+    regression of :math:`\min \|Ax - b\|`.
+
+    >>> b = rng.standard_normal(n_rows)
+    >>> x = linalg.lstsq(A, b)[0]
+    >>> Ab = np.hstack((A, b.reshape(-1, 1)))
+    >>> SAb = linalg.clarkson_woodruff_transform(Ab, sketch_n_rows, seed=rng)
+    >>> SA, Sb = SAb[:, :-1], SAb[:, -1]
+    >>> x_sketched = linalg.lstsq(SA, Sb)[0]
+
+    As with the matrix norm example, ``linalg.norm(A @ x - b)`` is close
+    to ``linalg.norm(A @ x_sketched - b)`` with high probability.
+
+    >>> linalg.norm(A @ x - b)
+    122.83242365433877
+    >>> linalg.norm(A @ x_sketched - b)
+    166.58473879945151
+
+    """
+    S = cwt_matrix(sketch_size, input_matrix.shape[0], seed)
+    return S.dot(input_matrix)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_solvers.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_solvers.py
new file mode 100644
index 00000000..9d83477c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_solvers.py
@@ -0,0 +1,847 @@
+"""Matrix equation solver routines"""
+# Author: Jeffrey Armstrong 
+# February 24, 2012
+
+# Modified: Chad Fulton 
+# June 19, 2014
+
+# Modified: Ilhan Polat 
+# September 13, 2016
+
+import warnings
+import numpy as np
+from numpy.linalg import inv, LinAlgError, norm, cond, svd
+
+from ._basic import solve, solve_triangular, matrix_balance
+from .lapack import get_lapack_funcs
+from ._decomp_schur import schur
+from ._decomp_lu import lu
+from ._decomp_qr import qr
+from ._decomp_qz import ordqz
+from ._decomp import _asarray_validated
+from ._special_matrices import kron, block_diag
+
+__all__ = ['solve_sylvester',
+           'solve_continuous_lyapunov', 'solve_discrete_lyapunov',
+           'solve_lyapunov',
+           'solve_continuous_are', 'solve_discrete_are']
+
+
+def solve_sylvester(a, b, q):
+    """
+    Computes a solution (X) to the Sylvester equation :math:`AX + XB = Q`.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        Leading matrix of the Sylvester equation
+    b : (N, N) array_like
+        Trailing matrix of the Sylvester equation
+    q : (M, N) array_like
+        Right-hand side
+
+    Returns
+    -------
+    x : (M, N) ndarray
+        The solution to the Sylvester equation.
+
+    Raises
+    ------
+    LinAlgError
+        If solution was not found
+
+    Notes
+    -----
+    Computes a solution to the Sylvester matrix equation via the Bartels-
+    Stewart algorithm. The A and B matrices first undergo Schur
+    decompositions. The resulting matrices are used to construct an
+    alternative Sylvester equation (``RY + YS^T = F``) where the R and S
+    matrices are in quasi-triangular form (or, when R, S or F are complex,
+    triangular form). The simplified equation is then solved using
+    ``*TRSYL`` from LAPACK directly.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    Given `a`, `b`, and `q` solve for `x`:
+
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[-3, -2, 0], [-1, -1, 3], [3, -5, -1]])
+    >>> b = np.array([[1]])
+    >>> q = np.array([[1],[2],[3]])
+    >>> x = linalg.solve_sylvester(a, b, q)
+    >>> x
+    array([[ 0.0625],
+           [-0.5625],
+           [ 0.6875]])
+    >>> np.allclose(a.dot(x) + x.dot(b), q)
+    True
+
+    """
+
+    # Compute the Schur decomposition form of a
+    r, u = schur(a, output='real')
+
+    # Compute the Schur decomposition of b
+    s, v = schur(b.conj().transpose(), output='real')
+
+    # Construct f = u'*q*v
+    f = np.dot(np.dot(u.conj().transpose(), q), v)
+
+    # Call the Sylvester equation solver
+    trsyl, = get_lapack_funcs(('trsyl',), (r, s, f))
+    if trsyl is None:
+        raise RuntimeError('LAPACK implementation does not contain a proper '
+                           'Sylvester equation solver (TRSYL)')
+    y, scale, info = trsyl(r, s, f, tranb='C')
+
+    y = scale*y
+
+    if info < 0:
+        raise LinAlgError("Illegal value encountered in "
+                          "the %d term" % (-info,))
+
+    return np.dot(np.dot(u, y), v.conj().transpose())
+
+
+def solve_continuous_lyapunov(a, q):
+    """
+    Solves the continuous Lyapunov equation :math:`AX + XA^H = Q`.
+
+    Uses the Bartels-Stewart algorithm to find :math:`X`.
+
+    Parameters
+    ----------
+    a : array_like
+        A square matrix
+
+    q : array_like
+        Right-hand side square matrix
+
+    Returns
+    -------
+    x : ndarray
+        Solution to the continuous Lyapunov equation
+
+    See Also
+    --------
+    solve_discrete_lyapunov : computes the solution to the discrete-time
+        Lyapunov equation
+    solve_sylvester : computes the solution to the Sylvester equation
+
+    Notes
+    -----
+    The continuous Lyapunov equation is a special form of the Sylvester
+    equation, hence this solver relies on LAPACK routine ?TRSYL.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    Given `a` and `q` solve for `x`:
+
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[-3, -2, 0], [-1, -1, 0], [0, -5, -1]])
+    >>> b = np.array([2, 4, -1])
+    >>> q = np.eye(3)
+    >>> x = linalg.solve_continuous_lyapunov(a, q)
+    >>> x
+    array([[ -0.75  ,   0.875 ,  -3.75  ],
+           [  0.875 ,  -1.375 ,   5.3125],
+           [ -3.75  ,   5.3125, -27.0625]])
+    >>> np.allclose(a.dot(x) + x.dot(a.T), q)
+    True
+    """
+
+    a = np.atleast_2d(_asarray_validated(a, check_finite=True))
+    q = np.atleast_2d(_asarray_validated(q, check_finite=True))
+
+    r_or_c = float
+
+    for ind, _ in enumerate((a, q)):
+        if np.iscomplexobj(_):
+            r_or_c = complex
+
+        if not np.equal(*_.shape):
+            raise ValueError("Matrix {} should be square.".format("aq"[ind]))
+
+    # Shape consistency check
+    if a.shape != q.shape:
+        raise ValueError("Matrix a and q should have the same shape.")
+
+    # Compute the Schur decomposition form of a
+    r, u = schur(a, output='real')
+
+    # Construct f = u'*q*u
+    f = u.conj().T.dot(q.dot(u))
+
+    # Call the Sylvester equation solver
+    trsyl = get_lapack_funcs('trsyl', (r, f))
+
+    dtype_string = 'T' if r_or_c == float else 'C'
+    y, scale, info = trsyl(r, r, f, tranb=dtype_string)
+
+    if info < 0:
+        raise ValueError('?TRSYL exited with the internal error '
+                         '"illegal value in argument number {}.". See '
+                         'LAPACK documentation for the ?TRSYL error codes.'
+                         ''.format(-info))
+    elif info == 1:
+        warnings.warn('Input "a" has an eigenvalue pair whose sum is '
+                      'very close to or exactly zero. The solution is '
+                      'obtained via perturbing the coefficients.',
+                      RuntimeWarning)
+    y *= scale
+
+    return u.dot(y).dot(u.conj().T)
+
+
+# For backwards compatibility, keep the old name
+solve_lyapunov = solve_continuous_lyapunov
+
+
+def _solve_discrete_lyapunov_direct(a, q):
+    """
+    Solves the discrete Lyapunov equation directly.
+
+    This function is called by the `solve_discrete_lyapunov` function with
+    `method=direct`. It is not supposed to be called directly.
+    """
+
+    lhs = kron(a, a.conj())
+    lhs = np.eye(lhs.shape[0]) - lhs
+    x = solve(lhs, q.flatten())
+
+    return np.reshape(x, q.shape)
+
+
+def _solve_discrete_lyapunov_bilinear(a, q):
+    """
+    Solves the discrete Lyapunov equation using a bilinear transformation.
+
+    This function is called by the `solve_discrete_lyapunov` function with
+    `method=bilinear`. It is not supposed to be called directly.
+    """
+    eye = np.eye(a.shape[0])
+    aH = a.conj().transpose()
+    aHI_inv = inv(aH + eye)
+    b = np.dot(aH - eye, aHI_inv)
+    c = 2*np.dot(np.dot(inv(a + eye), q), aHI_inv)
+    return solve_lyapunov(b.conj().transpose(), -c)
+
+
+def solve_discrete_lyapunov(a, q, method=None):
+    """
+    Solves the discrete Lyapunov equation :math:`AXA^H - X + Q = 0`.
+
+    Parameters
+    ----------
+    a, q : (M, M) array_like
+        Square matrices corresponding to A and Q in the equation
+        above respectively. Must have the same shape.
+
+    method : {'direct', 'bilinear'}, optional
+        Type of solver.
+
+        If not given, chosen to be ``direct`` if ``M`` is less than 10 and
+        ``bilinear`` otherwise.
+
+    Returns
+    -------
+    x : ndarray
+        Solution to the discrete Lyapunov equation
+
+    See Also
+    --------
+    solve_continuous_lyapunov : computes the solution to the continuous-time
+        Lyapunov equation
+
+    Notes
+    -----
+    This section describes the available solvers that can be selected by the
+    'method' parameter. The default method is *direct* if ``M`` is less than 10
+    and ``bilinear`` otherwise.
+
+    Method *direct* uses a direct analytical solution to the discrete Lyapunov
+    equation. The algorithm is given in, for example, [1]_. However, it requires
+    the linear solution of a system with dimension :math:`M^2` so that
+    performance degrades rapidly for even moderately sized matrices.
+
+    Method *bilinear* uses a bilinear transformation to convert the discrete
+    Lyapunov equation to a continuous Lyapunov equation :math:`(BX+XB'=-C)`
+    where :math:`B=(A-I)(A+I)^{-1}` and
+    :math:`C=2(A' + I)^{-1} Q (A + I)^{-1}`. The continuous equation can be
+    efficiently solved since it is a special case of a Sylvester equation.
+    The transformation algorithm is from Popov (1964) as described in [2]_.
+
+    .. versionadded:: 0.11.0
+
+    References
+    ----------
+    .. [1] Hamilton, James D. Time Series Analysis, Princeton: Princeton
+       University Press, 1994.  265.  Print.
+       http://doc1.lbfl.li/aca/FLMF037168.pdf
+    .. [2] Gajic, Z., and M.T.J. Qureshi. 2008.
+       Lyapunov Matrix Equation in System Stability and Control.
+       Dover Books on Engineering Series. Dover Publications.
+
+    Examples
+    --------
+    Given `a` and `q` solve for `x`:
+
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[0.2, 0.5],[0.7, -0.9]])
+    >>> q = np.eye(2)
+    >>> x = linalg.solve_discrete_lyapunov(a, q)
+    >>> x
+    array([[ 0.70872893,  1.43518822],
+           [ 1.43518822, -2.4266315 ]])
+    >>> np.allclose(a.dot(x).dot(a.T)-x, -q)
+    True
+
+    """
+    a = np.asarray(a)
+    q = np.asarray(q)
+    if method is None:
+        # Select automatically based on size of matrices
+        if a.shape[0] >= 10:
+            method = 'bilinear'
+        else:
+            method = 'direct'
+
+    meth = method.lower()
+
+    if meth == 'direct':
+        x = _solve_discrete_lyapunov_direct(a, q)
+    elif meth == 'bilinear':
+        x = _solve_discrete_lyapunov_bilinear(a, q)
+    else:
+        raise ValueError('Unknown solver %s' % method)
+
+    return x
+
+
+def solve_continuous_are(a, b, q, r, e=None, s=None, balanced=True):
+    r"""
+    Solves the continuous-time algebraic Riccati equation (CARE).
+
+    The CARE is defined as
+
+    .. math::
+
+          X A + A^H X - X B R^{-1} B^H X + Q = 0
+
+    The limitations for a solution to exist are :
+
+        * All eigenvalues of :math:`A` on the right half plane, should be
+          controllable.
+
+        * The associated hamiltonian pencil (See Notes), should have
+          eigenvalues sufficiently away from the imaginary axis.
+
+    Moreover, if ``e`` or ``s`` is not precisely ``None``, then the
+    generalized version of CARE
+
+    .. math::
+
+          E^HXA + A^HXE - (E^HXB + S) R^{-1} (B^HXE + S^H) + Q = 0
+
+    is solved. When omitted, ``e`` is assumed to be the identity and ``s``
+    is assumed to be the zero matrix with sizes compatible with ``a`` and
+    ``b``, respectively.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        Square matrix
+    b : (M, N) array_like
+        Input
+    q : (M, M) array_like
+        Input
+    r : (N, N) array_like
+        Nonsingular square matrix
+    e : (M, M) array_like, optional
+        Nonsingular square matrix
+    s : (M, N) array_like, optional
+        Input
+    balanced : bool, optional
+        The boolean that indicates whether a balancing step is performed
+        on the data. The default is set to True.
+
+    Returns
+    -------
+    x : (M, M) ndarray
+        Solution to the continuous-time algebraic Riccati equation.
+
+    Raises
+    ------
+    LinAlgError
+        For cases where the stable subspace of the pencil could not be
+        isolated. See Notes section and the references for details.
+
+    See Also
+    --------
+    solve_discrete_are : Solves the discrete-time algebraic Riccati equation
+
+    Notes
+    -----
+    The equation is solved by forming the extended hamiltonian matrix pencil,
+    as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
+
+        [ A    0    B ]             [ E   0    0 ]
+        [-Q  -A^H  -S ] - \lambda * [ 0  E^H   0 ]
+        [ S^H B^H   R ]             [ 0   0    0 ]
+
+    and using a QZ decomposition method.
+
+    In this algorithm, the fail conditions are linked to the symmetry
+    of the product :math:`U_2 U_1^{-1}` and condition number of
+    :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
+    eigenvectors spanning the stable subspace with 2-m rows and partitioned
+    into two m-row matrices. See [1]_ and [2]_ for more details.
+
+    In order to improve the QZ decomposition accuracy, the pencil goes
+    through a balancing step where the sum of absolute values of
+    :math:`H` and :math:`J` entries (after removing the diagonal entries of
+    the sum) is balanced following the recipe given in [3]_.
+
+    .. versionadded:: 0.11.0
+
+    References
+    ----------
+    .. [1]  P. van Dooren , "A Generalized Eigenvalue Approach For Solving
+       Riccati Equations.", SIAM Journal on Scientific and Statistical
+       Computing, Vol.2(2), :doi:`10.1137/0902010`
+
+    .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
+       Equations.", Massachusetts Institute of Technology. Laboratory for
+       Information and Decision Systems. LIDS-R ; 859. Available online :
+       http://hdl.handle.net/1721.1/1301
+
+    .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
+       SIAM J. Sci. Comput., 2001, Vol.22(5), :doi:`10.1137/S1064827500367993`
+
+    Examples
+    --------
+    Given `a`, `b`, `q`, and `r` solve for `x`:
+
+    >>> import numpy as np
+    >>> from scipy import linalg
+    >>> a = np.array([[4, 3], [-4.5, -3.5]])
+    >>> b = np.array([[1], [-1]])
+    >>> q = np.array([[9, 6], [6, 4.]])
+    >>> r = 1
+    >>> x = linalg.solve_continuous_are(a, b, q, r)
+    >>> x
+    array([[ 21.72792206,  14.48528137],
+           [ 14.48528137,   9.65685425]])
+    >>> np.allclose(a.T.dot(x) + x.dot(a)-x.dot(b).dot(b.T).dot(x), -q)
+    True
+
+    """
+
+    # Validate input arguments
+    a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
+                                                     a, b, q, r, e, s, 'care')
+
+    H = np.empty((2*m+n, 2*m+n), dtype=r_or_c)
+    H[:m, :m] = a
+    H[:m, m:2*m] = 0.
+    H[:m, 2*m:] = b
+    H[m:2*m, :m] = -q
+    H[m:2*m, m:2*m] = -a.conj().T
+    H[m:2*m, 2*m:] = 0. if s is None else -s
+    H[2*m:, :m] = 0. if s is None else s.conj().T
+    H[2*m:, m:2*m] = b.conj().T
+    H[2*m:, 2*m:] = r
+
+    if gen_are and e is not None:
+        J = block_diag(e, e.conj().T, np.zeros_like(r, dtype=r_or_c))
+    else:
+        J = block_diag(np.eye(2*m), np.zeros_like(r, dtype=r_or_c))
+
+    if balanced:
+        # xGEBAL does not remove the diagonals before scaling. Also
+        # to avoid destroying the Symplectic structure, we follow Ref.3
+        M = np.abs(H) + np.abs(J)
+        M[np.diag_indices_from(M)] = 0.
+        _, (sca, _) = matrix_balance(M, separate=1, permute=0)
+        # do we need to bother?
+        if not np.allclose(sca, np.ones_like(sca)):
+            # Now impose diag(D,inv(D)) from Benner where D is
+            # square root of s_i/s_(n+i) for i=0,....
+            sca = np.log2(sca)
+            # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
+            s = np.round((sca[m:2*m] - sca[:m])/2)
+            sca = 2 ** np.r_[s, -s, sca[2*m:]]
+            # Elementwise multiplication via broadcasting.
+            elwisescale = sca[:, None] * np.reciprocal(sca)
+            H *= elwisescale
+            J *= elwisescale
+
+    # Deflate the pencil to 2m x 2m ala Ref.1, eq.(55)
+    q, r = qr(H[:, -n:])
+    H = q[:, n:].conj().T.dot(H[:, :2*m])
+    J = q[:2*m, n:].conj().T.dot(J[:2*m, :2*m])
+
+    # Decide on which output type is needed for QZ
+    out_str = 'real' if r_or_c == float else 'complex'
+
+    _, _, _, _, _, u = ordqz(H, J, sort='lhp', overwrite_a=True,
+                             overwrite_b=True, check_finite=False,
+                             output=out_str)
+
+    # Get the relevant parts of the stable subspace basis
+    if e is not None:
+        u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
+    u00 = u[:m, :m]
+    u10 = u[m:, :m]
+
+    # Solve via back-substituion after checking the condition of u00
+    up, ul, uu = lu(u00)
+    if 1/cond(uu) < np.spacing(1.):
+        raise LinAlgError('Failed to find a finite solution.')
+
+    # Exploit the triangular structure
+    x = solve_triangular(ul.conj().T,
+                         solve_triangular(uu.conj().T,
+                                          u10.conj().T,
+                                          lower=True),
+                         unit_diagonal=True,
+                         ).conj().T.dot(up.conj().T)
+    if balanced:
+        x *= sca[:m, None] * sca[:m]
+
+    # Check the deviation from symmetry for lack of success
+    # See proof of Thm.5 item 3 in [2]
+    u_sym = u00.conj().T.dot(u10)
+    n_u_sym = norm(u_sym, 1)
+    u_sym = u_sym - u_sym.conj().T
+    sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym])
+
+    if norm(u_sym, 1) > sym_threshold:
+        raise LinAlgError('The associated Hamiltonian pencil has eigenvalues '
+                          'too close to the imaginary axis')
+
+    return (x + x.conj().T)/2
+
+
+def solve_discrete_are(a, b, q, r, e=None, s=None, balanced=True):
+    r"""
+    Solves the discrete-time algebraic Riccati equation (DARE).
+
+    The DARE is defined as
+
+    .. math::
+
+          A^HXA - X - (A^HXB) (R + B^HXB)^{-1} (B^HXA) + Q = 0
+
+    The limitations for a solution to exist are :
+
+        * All eigenvalues of :math:`A` outside the unit disc, should be
+          controllable.
+
+        * The associated symplectic pencil (See Notes), should have
+          eigenvalues sufficiently away from the unit circle.
+
+    Moreover, if ``e`` and ``s`` are not both precisely ``None``, then the
+    generalized version of DARE
+
+    .. math::
+
+          A^HXA - E^HXE - (A^HXB+S) (R+B^HXB)^{-1} (B^HXA+S^H) + Q = 0
+
+    is solved. When omitted, ``e`` is assumed to be the identity and ``s``
+    is assumed to be the zero matrix.
+
+    Parameters
+    ----------
+    a : (M, M) array_like
+        Square matrix
+    b : (M, N) array_like
+        Input
+    q : (M, M) array_like
+        Input
+    r : (N, N) array_like
+        Square matrix
+    e : (M, M) array_like, optional
+        Nonsingular square matrix
+    s : (M, N) array_like, optional
+        Input
+    balanced : bool
+        The boolean that indicates whether a balancing step is performed
+        on the data. The default is set to True.
+
+    Returns
+    -------
+    x : (M, M) ndarray
+        Solution to the discrete algebraic Riccati equation.
+
+    Raises
+    ------
+    LinAlgError
+        For cases where the stable subspace of the pencil could not be
+        isolated. See Notes section and the references for details.
+
+    See Also
+    --------
+    solve_continuous_are : Solves the continuous algebraic Riccati equation
+
+    Notes
+    -----
+    The equation is solved by forming the extended symplectic matrix pencil,
+    as described in [1]_, :math:`H - \lambda J` given by the block matrices ::
+
+           [  A   0   B ]             [ E   0   B ]
+           [ -Q  E^H -S ] - \lambda * [ 0  A^H  0 ]
+           [ S^H  0   R ]             [ 0 -B^H  0 ]
+
+    and using a QZ decomposition method.
+
+    In this algorithm, the fail conditions are linked to the symmetry
+    of the product :math:`U_2 U_1^{-1}` and condition number of
+    :math:`U_1`. Here, :math:`U` is the 2m-by-m matrix that holds the
+    eigenvectors spanning the stable subspace with 2-m rows and partitioned
+    into two m-row matrices. See [1]_ and [2]_ for more details.
+
+    In order to improve the QZ decomposition accuracy, the pencil goes
+    through a balancing step where the sum of absolute values of
+    :math:`H` and :math:`J` rows/cols (after removing the diagonal entries)
+    is balanced following the recipe given in [3]_. If the data has small
+    numerical noise, balancing may amplify their effects and some clean up
+    is required.
+
+    .. versionadded:: 0.11.0
+
+    References
+    ----------
+    .. [1]  P. van Dooren , "A Generalized Eigenvalue Approach For Solving
+       Riccati Equations.", SIAM Journal on Scientific and Statistical
+       Computing, Vol.2(2), :doi:`10.1137/0902010`
+
+    .. [2] A.J. Laub, "A Schur Method for Solving Algebraic Riccati
+       Equations.", Massachusetts Institute of Technology. Laboratory for
+       Information and Decision Systems. LIDS-R ; 859. Available online :
+       http://hdl.handle.net/1721.1/1301
+
+    .. [3] P. Benner, "Symplectic Balancing of Hamiltonian Matrices", 2001,
+       SIAM J. Sci. Comput., 2001, Vol.22(5), :doi:`10.1137/S1064827500367993`
+
+    Examples
+    --------
+    Given `a`, `b`, `q`, and `r` solve for `x`:
+
+    >>> import numpy as np
+    >>> from scipy import linalg as la
+    >>> a = np.array([[0, 1], [0, -1]])
+    >>> b = np.array([[1, 0], [2, 1]])
+    >>> q = np.array([[-4, -4], [-4, 7]])
+    >>> r = np.array([[9, 3], [3, 1]])
+    >>> x = la.solve_discrete_are(a, b, q, r)
+    >>> x
+    array([[-4., -4.],
+           [-4.,  7.]])
+    >>> R = la.solve(r + b.T.dot(x).dot(b), b.T.dot(x).dot(a))
+    >>> np.allclose(a.T.dot(x).dot(a) - x - a.T.dot(x).dot(b).dot(R), -q)
+    True
+
+    """
+
+    # Validate input arguments
+    a, b, q, r, e, s, m, n, r_or_c, gen_are = _are_validate_args(
+                                                     a, b, q, r, e, s, 'dare')
+
+    # Form the matrix pencil
+    H = np.zeros((2*m+n, 2*m+n), dtype=r_or_c)
+    H[:m, :m] = a
+    H[:m, 2*m:] = b
+    H[m:2*m, :m] = -q
+    H[m:2*m, m:2*m] = np.eye(m) if e is None else e.conj().T
+    H[m:2*m, 2*m:] = 0. if s is None else -s
+    H[2*m:, :m] = 0. if s is None else s.conj().T
+    H[2*m:, 2*m:] = r
+
+    J = np.zeros_like(H, dtype=r_or_c)
+    J[:m, :m] = np.eye(m) if e is None else e
+    J[m:2*m, m:2*m] = a.conj().T
+    J[2*m:, m:2*m] = -b.conj().T
+
+    if balanced:
+        # xGEBAL does not remove the diagonals before scaling. Also
+        # to avoid destroying the Symplectic structure, we follow Ref.3
+        M = np.abs(H) + np.abs(J)
+        M[np.diag_indices_from(M)] = 0.
+        _, (sca, _) = matrix_balance(M, separate=1, permute=0)
+        # do we need to bother?
+        if not np.allclose(sca, np.ones_like(sca)):
+            # Now impose diag(D,inv(D)) from Benner where D is
+            # square root of s_i/s_(n+i) for i=0,....
+            sca = np.log2(sca)
+            # NOTE: Py3 uses "Bankers Rounding: round to the nearest even" !!
+            s = np.round((sca[m:2*m] - sca[:m])/2)
+            sca = 2 ** np.r_[s, -s, sca[2*m:]]
+            # Elementwise multiplication via broadcasting.
+            elwisescale = sca[:, None] * np.reciprocal(sca)
+            H *= elwisescale
+            J *= elwisescale
+
+    # Deflate the pencil by the R column ala Ref.1
+    q_of_qr, _ = qr(H[:, -n:])
+    H = q_of_qr[:, n:].conj().T.dot(H[:, :2*m])
+    J = q_of_qr[:, n:].conj().T.dot(J[:, :2*m])
+
+    # Decide on which output type is needed for QZ
+    out_str = 'real' if r_or_c == float else 'complex'
+
+    _, _, _, _, _, u = ordqz(H, J, sort='iuc',
+                             overwrite_a=True,
+                             overwrite_b=True,
+                             check_finite=False,
+                             output=out_str)
+
+    # Get the relevant parts of the stable subspace basis
+    if e is not None:
+        u, _ = qr(np.vstack((e.dot(u[:m, :m]), u[m:, :m])))
+    u00 = u[:m, :m]
+    u10 = u[m:, :m]
+
+    # Solve via back-substituion after checking the condition of u00
+    up, ul, uu = lu(u00)
+
+    if 1/cond(uu) < np.spacing(1.):
+        raise LinAlgError('Failed to find a finite solution.')
+
+    # Exploit the triangular structure
+    x = solve_triangular(ul.conj().T,
+                         solve_triangular(uu.conj().T,
+                                          u10.conj().T,
+                                          lower=True),
+                         unit_diagonal=True,
+                         ).conj().T.dot(up.conj().T)
+    if balanced:
+        x *= sca[:m, None] * sca[:m]
+
+    # Check the deviation from symmetry for lack of success
+    # See proof of Thm.5 item 3 in [2]
+    u_sym = u00.conj().T.dot(u10)
+    n_u_sym = norm(u_sym, 1)
+    u_sym = u_sym - u_sym.conj().T
+    sym_threshold = np.max([np.spacing(1000.), 0.1*n_u_sym])
+
+    if norm(u_sym, 1) > sym_threshold:
+        raise LinAlgError('The associated symplectic pencil has eigenvalues'
+                          'too close to the unit circle')
+
+    return (x + x.conj().T)/2
+
+
+def _are_validate_args(a, b, q, r, e, s, eq_type='care'):
+    """
+    A helper function to validate the arguments supplied to the
+    Riccati equation solvers. Any discrepancy found in the input
+    matrices leads to a ``ValueError`` exception.
+
+    Essentially, it performs:
+
+        - a check whether the input is free of NaN and Infs
+        - a pass for the data through ``numpy.atleast_2d()``
+        - squareness check of the relevant arrays
+        - shape consistency check of the arrays
+        - singularity check of the relevant arrays
+        - symmetricity check of the relevant matrices
+        - a check whether the regular or the generalized version is asked.
+
+    This function is used by ``solve_continuous_are`` and
+    ``solve_discrete_are``.
+
+    Parameters
+    ----------
+    a, b, q, r, e, s : array_like
+        Input data
+    eq_type : str
+        Accepted arguments are 'care' and 'dare'.
+
+    Returns
+    -------
+    a, b, q, r, e, s : ndarray
+        Regularized input data
+    m, n : int
+        shape of the problem
+    r_or_c : type
+        Data type of the problem, returns float or complex
+    gen_or_not : bool
+        Type of the equation, True for generalized and False for regular ARE.
+
+    """
+
+    if not eq_type.lower() in ('dare', 'care'):
+        raise ValueError("Equation type unknown. "
+                         "Only 'care' and 'dare' is understood")
+
+    a = np.atleast_2d(_asarray_validated(a, check_finite=True))
+    b = np.atleast_2d(_asarray_validated(b, check_finite=True))
+    q = np.atleast_2d(_asarray_validated(q, check_finite=True))
+    r = np.atleast_2d(_asarray_validated(r, check_finite=True))
+
+    # Get the correct data types otherwise NumPy complains
+    # about pushing complex numbers into real arrays.
+    r_or_c = complex if np.iscomplexobj(b) else float
+
+    for ind, mat in enumerate((a, q, r)):
+        if np.iscomplexobj(mat):
+            r_or_c = complex
+
+        if not np.equal(*mat.shape):
+            raise ValueError("Matrix {} should be square.".format("aqr"[ind]))
+
+    # Shape consistency checks
+    m, n = b.shape
+    if m != a.shape[0]:
+        raise ValueError("Matrix a and b should have the same number of rows.")
+    if m != q.shape[0]:
+        raise ValueError("Matrix a and q should have the same shape.")
+    if n != r.shape[0]:
+        raise ValueError("Matrix b and r should have the same number of cols.")
+
+    # Check if the data matrices q, r are (sufficiently) hermitian
+    for ind, mat in enumerate((q, r)):
+        if norm(mat - mat.conj().T, 1) > np.spacing(norm(mat, 1))*100:
+            raise ValueError("Matrix {} should be symmetric/hermitian."
+                             "".format("qr"[ind]))
+
+    # Continuous time ARE should have a nonsingular r matrix.
+    if eq_type == 'care':
+        min_sv = svd(r, compute_uv=False)[-1]
+        if min_sv == 0. or min_sv < np.spacing(1.)*norm(r, 1):
+            raise ValueError('Matrix r is numerically singular.')
+
+    # Check if the generalized case is required with omitted arguments
+    # perform late shape checking etc.
+    generalized_case = e is not None or s is not None
+
+    if generalized_case:
+        if e is not None:
+            e = np.atleast_2d(_asarray_validated(e, check_finite=True))
+            if not np.equal(*e.shape):
+                raise ValueError("Matrix e should be square.")
+            if m != e.shape[0]:
+                raise ValueError("Matrix a and e should have the same shape.")
+            # numpy.linalg.cond doesn't check for exact zeros and
+            # emits a runtime warning. Hence the following manual check.
+            min_sv = svd(e, compute_uv=False)[-1]
+            if min_sv == 0. or min_sv < np.spacing(1.) * norm(e, 1):
+                raise ValueError('Matrix e is numerically singular.')
+            if np.iscomplexobj(e):
+                r_or_c = complex
+        if s is not None:
+            s = np.atleast_2d(_asarray_validated(s, check_finite=True))
+            if s.shape != b.shape:
+                raise ValueError("Matrix b and s should have the same shape.")
+            if np.iscomplexobj(s):
+                r_or_c = complex
+
+    return a, b, q, r, e, s, m, n, r_or_c, generalized_case
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_special_matrices.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_special_matrices.py
new file mode 100644
index 00000000..0447cf57
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_special_matrices.py
@@ -0,0 +1,1379 @@
+import math
+import numpy as np
+from numpy.lib.stride_tricks import as_strided
+
+__all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
+           'hadamard', 'leslie', 'kron', 'block_diag', 'companion',
+           'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft',
+           'fiedler', 'fiedler_companion', 'convolution_matrix']
+
+
+# -----------------------------------------------------------------------------
+#  matrix construction functions
+# -----------------------------------------------------------------------------
+
+#
+# *Note*: tri{,u,l} is implemented in NumPy, but an important bug was fixed in
+# 2.0.0.dev-1af2f3, the following tri{,u,l} definitions are here for backwards
+# compatibility.
+
+def tri(N, M=None, k=0, dtype=None):
+    """
+    Construct (N, M) matrix filled with ones at and below the kth diagonal.
+
+    The matrix has A[i,j] == 1 for j <= i + k
+
+    Parameters
+    ----------
+    N : int
+        The size of the first dimension of the matrix.
+    M : int or None, optional
+        The size of the second dimension of the matrix. If `M` is None,
+        `M = N` is assumed.
+    k : int, optional
+        Number of subdiagonal below which matrix is filled with ones.
+        `k` = 0 is the main diagonal, `k` < 0 subdiagonal and `k` > 0
+        superdiagonal.
+    dtype : dtype, optional
+        Data type of the matrix.
+
+    Returns
+    -------
+    tri : (N, M) ndarray
+        Tri matrix.
+
+    Examples
+    --------
+    >>> from scipy.linalg import tri
+    >>> tri(3, 5, 2, dtype=int)
+    array([[1, 1, 1, 0, 0],
+           [1, 1, 1, 1, 0],
+           [1, 1, 1, 1, 1]])
+    >>> tri(3, 5, -1, dtype=int)
+    array([[0, 0, 0, 0, 0],
+           [1, 0, 0, 0, 0],
+           [1, 1, 0, 0, 0]])
+
+    """
+    if M is None:
+        M = N
+    if isinstance(M, str):
+        # pearu: any objections to remove this feature?
+        #       As tri(N,'d') is equivalent to tri(N,dtype='d')
+        dtype = M
+        M = N
+    m = np.greater_equal.outer(np.arange(k, N+k), np.arange(M))
+    if dtype is None:
+        return m
+    else:
+        return m.astype(dtype)
+
+
+def tril(m, k=0):
+    """
+    Make a copy of a matrix with elements above the kth diagonal zeroed.
+
+    Parameters
+    ----------
+    m : array_like
+        Matrix whose elements to return
+    k : int, optional
+        Diagonal above which to zero elements.
+        `k` == 0 is the main diagonal, `k` < 0 subdiagonal and
+        `k` > 0 superdiagonal.
+
+    Returns
+    -------
+    tril : ndarray
+        Return is the same shape and type as `m`.
+
+    Examples
+    --------
+    >>> from scipy.linalg import tril
+    >>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+    array([[ 0,  0,  0],
+           [ 4,  0,  0],
+           [ 7,  8,  0],
+           [10, 11, 12]])
+
+    """
+    m = np.asarray(m)
+    out = tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m
+    return out
+
+
+def triu(m, k=0):
+    """
+    Make a copy of a matrix with elements below the kth diagonal zeroed.
+
+    Parameters
+    ----------
+    m : array_like
+        Matrix whose elements to return
+    k : int, optional
+        Diagonal below which to zero elements.
+        `k` == 0 is the main diagonal, `k` < 0 subdiagonal and
+        `k` > 0 superdiagonal.
+
+    Returns
+    -------
+    triu : ndarray
+        Return matrix with zeroed elements below the kth diagonal and has
+        same shape and type as `m`.
+
+    Examples
+    --------
+    >>> from scipy.linalg import triu
+    >>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+    array([[ 1,  2,  3],
+           [ 4,  5,  6],
+           [ 0,  8,  9],
+           [ 0,  0, 12]])
+
+    """
+    m = np.asarray(m)
+    out = (1 - tri(m.shape[0], m.shape[1], k - 1, m.dtype.char)) * m
+    return out
+
+
+def toeplitz(c, r=None):
+    """
+    Construct a Toeplitz matrix.
+
+    The Toeplitz matrix has constant diagonals, with c as its first column
+    and r as its first row. If r is not given, ``r == conjugate(c)`` is
+    assumed.
+
+    Parameters
+    ----------
+    c : array_like
+        First column of the matrix.  Whatever the actual shape of `c`, it
+        will be converted to a 1-D array.
+    r : array_like, optional
+        First row of the matrix. If None, ``r = conjugate(c)`` is assumed;
+        in this case, if c[0] is real, the result is a Hermitian matrix.
+        r[0] is ignored; the first row of the returned matrix is
+        ``[c[0], r[1:]]``.  Whatever the actual shape of `r`, it will be
+        converted to a 1-D array.
+
+    Returns
+    -------
+    A : (len(c), len(r)) ndarray
+        The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
+
+    See Also
+    --------
+    circulant : circulant matrix
+    hankel : Hankel matrix
+    solve_toeplitz : Solve a Toeplitz system.
+
+    Notes
+    -----
+    The behavior when `c` or `r` is a scalar, or when `c` is complex and
+    `r` is None, was changed in version 0.8.0. The behavior in previous
+    versions was undocumented and is no longer supported.
+
+    Examples
+    --------
+    >>> from scipy.linalg import toeplitz
+    >>> toeplitz([1,2,3], [1,4,5,6])
+    array([[1, 4, 5, 6],
+           [2, 1, 4, 5],
+           [3, 2, 1, 4]])
+    >>> toeplitz([1.0, 2+3j, 4-1j])
+    array([[ 1.+0.j,  2.-3.j,  4.+1.j],
+           [ 2.+3.j,  1.+0.j,  2.-3.j],
+           [ 4.-1.j,  2.+3.j,  1.+0.j]])
+
+    """
+    c = np.asarray(c).ravel()
+    if r is None:
+        r = c.conjugate()
+    else:
+        r = np.asarray(r).ravel()
+    # Form a 1-D array containing a reversed c followed by r[1:] that could be
+    # strided to give us toeplitz matrix.
+    vals = np.concatenate((c[::-1], r[1:]))
+    out_shp = len(c), len(r)
+    n = vals.strides[0]
+    return as_strided(vals[len(c)-1:], shape=out_shp, strides=(-n, n)).copy()
+
+
+def circulant(c):
+    """
+    Construct a circulant matrix.
+
+    Parameters
+    ----------
+    c : (N,) array_like
+        1-D array, the first column of the matrix.
+
+    Returns
+    -------
+    A : (N, N) ndarray
+        A circulant matrix whose first column is `c`.
+
+    See Also
+    --------
+    toeplitz : Toeplitz matrix
+    hankel : Hankel matrix
+    solve_circulant : Solve a circulant system.
+
+    Notes
+    -----
+    .. versionadded:: 0.8.0
+
+    Examples
+    --------
+    >>> from scipy.linalg import circulant
+    >>> circulant([1, 2, 3])
+    array([[1, 3, 2],
+           [2, 1, 3],
+           [3, 2, 1]])
+
+    """
+    c = np.asarray(c).ravel()
+    # Form an extended array that could be strided to give circulant version
+    c_ext = np.concatenate((c[::-1], c[:0:-1]))
+    L = len(c)
+    n = c_ext.strides[0]
+    return as_strided(c_ext[L-1:], shape=(L, L), strides=(-n, n)).copy()
+
+
+def hankel(c, r=None):
+    """
+    Construct a Hankel matrix.
+
+    The Hankel matrix has constant anti-diagonals, with `c` as its
+    first column and `r` as its last row. If `r` is not given, then
+    `r = zeros_like(c)` is assumed.
+
+    Parameters
+    ----------
+    c : array_like
+        First column of the matrix. Whatever the actual shape of `c`, it
+        will be converted to a 1-D array.
+    r : array_like, optional
+        Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed.
+        r[0] is ignored; the last row of the returned matrix is
+        ``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be
+        converted to a 1-D array.
+
+    Returns
+    -------
+    A : (len(c), len(r)) ndarray
+        The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``.
+
+    See Also
+    --------
+    toeplitz : Toeplitz matrix
+    circulant : circulant matrix
+
+    Examples
+    --------
+    >>> from scipy.linalg import hankel
+    >>> hankel([1, 17, 99])
+    array([[ 1, 17, 99],
+           [17, 99,  0],
+           [99,  0,  0]])
+    >>> hankel([1,2,3,4], [4,7,7,8,9])
+    array([[1, 2, 3, 4, 7],
+           [2, 3, 4, 7, 7],
+           [3, 4, 7, 7, 8],
+           [4, 7, 7, 8, 9]])
+
+    """
+    c = np.asarray(c).ravel()
+    if r is None:
+        r = np.zeros_like(c)
+    else:
+        r = np.asarray(r).ravel()
+    # Form a 1-D array of values to be used in the matrix, containing `c`
+    # followed by r[1:].
+    vals = np.concatenate((c, r[1:]))
+    # Stride on concatenated array to get hankel matrix
+    out_shp = len(c), len(r)
+    n = vals.strides[0]
+    return as_strided(vals, shape=out_shp, strides=(n, n)).copy()
+
+
+def hadamard(n, dtype=int):
+    """
+    Construct an Hadamard matrix.
+
+    Constructs an n-by-n Hadamard matrix, using Sylvester's
+    construction. `n` must be a power of 2.
+
+    Parameters
+    ----------
+    n : int
+        The order of the matrix. `n` must be a power of 2.
+    dtype : dtype, optional
+        The data type of the array to be constructed.
+
+    Returns
+    -------
+    H : (n, n) ndarray
+        The Hadamard matrix.
+
+    Notes
+    -----
+    .. versionadded:: 0.8.0
+
+    Examples
+    --------
+    >>> from scipy.linalg import hadamard
+    >>> hadamard(2, dtype=complex)
+    array([[ 1.+0.j,  1.+0.j],
+           [ 1.+0.j, -1.-0.j]])
+    >>> hadamard(4)
+    array([[ 1,  1,  1,  1],
+           [ 1, -1,  1, -1],
+           [ 1,  1, -1, -1],
+           [ 1, -1, -1,  1]])
+
+    """
+
+    # This function is a slightly modified version of the
+    # function contributed by Ivo in ticket #675.
+
+    if n < 1:
+        lg2 = 0
+    else:
+        lg2 = int(math.log(n, 2))
+    if 2 ** lg2 != n:
+        raise ValueError("n must be an positive integer, and n must be "
+                         "a power of 2")
+
+    H = np.array([[1]], dtype=dtype)
+
+    # Sylvester's construction
+    for i in range(0, lg2):
+        H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))
+
+    return H
+
+
+def leslie(f, s):
+    """
+    Create a Leslie matrix.
+
+    Given the length n array of fecundity coefficients `f` and the length
+    n-1 array of survival coefficients `s`, return the associated Leslie
+    matrix.
+
+    Parameters
+    ----------
+    f : (N,) array_like
+        The "fecundity" coefficients.
+    s : (N-1,) array_like
+        The "survival" coefficients, has to be 1-D.  The length of `s`
+        must be one less than the length of `f`, and it must be at least 1.
+
+    Returns
+    -------
+    L : (N, N) ndarray
+        The array is zero except for the first row,
+        which is `f`, and the first sub-diagonal, which is `s`.
+        The data-type of the array will be the data-type of ``f[0]+s[0]``.
+
+    Notes
+    -----
+    .. versionadded:: 0.8.0
+
+    The Leslie matrix is used to model discrete-time, age-structured
+    population growth [1]_ [2]_. In a population with `n` age classes, two sets
+    of parameters define a Leslie matrix: the `n` "fecundity coefficients",
+    which give the number of offspring per-capita produced by each age
+    class, and the `n` - 1 "survival coefficients", which give the
+    per-capita survival rate of each age class.
+
+    References
+    ----------
+    .. [1] P. H. Leslie, On the use of matrices in certain population
+           mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945)
+    .. [2] P. H. Leslie, Some further notes on the use of matrices in
+           population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245
+           (Dec. 1948)
+
+    Examples
+    --------
+    >>> from scipy.linalg import leslie
+    >>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7])
+    array([[ 0.1,  2. ,  1. ,  0.1],
+           [ 0.2,  0. ,  0. ,  0. ],
+           [ 0. ,  0.8,  0. ,  0. ],
+           [ 0. ,  0. ,  0.7,  0. ]])
+
+    """
+    f = np.atleast_1d(f)
+    s = np.atleast_1d(s)
+    if f.ndim != 1:
+        raise ValueError("Incorrect shape for f.  f must be 1D")
+    if s.ndim != 1:
+        raise ValueError("Incorrect shape for s.  s must be 1D")
+    if f.size != s.size + 1:
+        raise ValueError("Incorrect lengths for f and s.  The length"
+                         " of s must be one less than the length of f.")
+    if s.size == 0:
+        raise ValueError("The length of s must be at least 1.")
+
+    tmp = f[0] + s[0]
+    n = f.size
+    a = np.zeros((n, n), dtype=tmp.dtype)
+    a[0] = f
+    a[list(range(1, n)), list(range(0, n - 1))] = s
+    return a
+
+
+def kron(a, b):
+    """
+    Kronecker product.
+
+    The result is the block matrix::
+
+        a[0,0]*b    a[0,1]*b  ... a[0,-1]*b
+        a[1,0]*b    a[1,1]*b  ... a[1,-1]*b
+        ...
+        a[-1,0]*b   a[-1,1]*b ... a[-1,-1]*b
+
+    Parameters
+    ----------
+    a : (M, N) ndarray
+        Input array
+    b : (P, Q) ndarray
+        Input array
+
+    Returns
+    -------
+    A : (M*P, N*Q) ndarray
+        Kronecker product of `a` and `b`.
+
+    Examples
+    --------
+    >>> from numpy import array
+    >>> from scipy.linalg import kron
+    >>> kron(array([[1,2],[3,4]]), array([[1,1,1]]))
+    array([[1, 1, 1, 2, 2, 2],
+           [3, 3, 3, 4, 4, 4]])
+
+    """
+    if not a.flags['CONTIGUOUS']:
+        a = np.reshape(a, a.shape)
+    if not b.flags['CONTIGUOUS']:
+        b = np.reshape(b, b.shape)
+    o = np.outer(a, b)
+    o = o.reshape(a.shape + b.shape)
+    return np.concatenate(np.concatenate(o, axis=1), axis=1)
+
+
+def block_diag(*arrs):
+    """
+    Create a block diagonal matrix from provided arrays.
+
+    Given the inputs `A`, `B` and `C`, the output will have these
+    arrays arranged on the diagonal::
+
+        [[A, 0, 0],
+         [0, B, 0],
+         [0, 0, C]]
+
+    Parameters
+    ----------
+    A, B, C, ... : array_like, up to 2-D
+        Input arrays.  A 1-D array or array_like sequence of length `n` is
+        treated as a 2-D array with shape ``(1,n)``.
+
+    Returns
+    -------
+    D : ndarray
+        Array with `A`, `B`, `C`, ... on the diagonal. `D` has the
+        same dtype as `A`.
+
+    Notes
+    -----
+    If all the input arrays are square, the output is known as a
+    block diagonal matrix.
+
+    Empty sequences (i.e., array-likes of zero size) will not be ignored.
+    Noteworthy, both [] and [[]] are treated as matrices with shape ``(1,0)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import block_diag
+    >>> A = [[1, 0],
+    ...      [0, 1]]
+    >>> B = [[3, 4, 5],
+    ...      [6, 7, 8]]
+    >>> C = [[7]]
+    >>> P = np.zeros((2, 0), dtype='int32')
+    >>> block_diag(A, B, C)
+    array([[1, 0, 0, 0, 0, 0],
+           [0, 1, 0, 0, 0, 0],
+           [0, 0, 3, 4, 5, 0],
+           [0, 0, 6, 7, 8, 0],
+           [0, 0, 0, 0, 0, 7]])
+    >>> block_diag(A, P, B, C)
+    array([[1, 0, 0, 0, 0, 0],
+           [0, 1, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 3, 4, 5, 0],
+           [0, 0, 6, 7, 8, 0],
+           [0, 0, 0, 0, 0, 7]])
+    >>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]])
+    array([[ 1.,  0.,  0.,  0.,  0.],
+           [ 0.,  2.,  3.,  0.,  0.],
+           [ 0.,  0.,  0.,  4.,  5.],
+           [ 0.,  0.,  0.,  6.,  7.]])
+
+    """
+    if arrs == ():
+        arrs = ([],)
+    arrs = [np.atleast_2d(a) for a in arrs]
+
+    bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
+    if bad_args:
+        raise ValueError("arguments in the following positions have dimension "
+                         "greater than 2: %s" % bad_args)
+
+    shapes = np.array([a.shape for a in arrs])
+    out_dtype = np.result_type(*[arr.dtype for arr in arrs])
+    out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype)
+
+    r, c = 0, 0
+    for i, (rr, cc) in enumerate(shapes):
+        out[r:r + rr, c:c + cc] = arrs[i]
+        r += rr
+        c += cc
+    return out
+
+
+def companion(a):
+    """
+    Create a companion matrix.
+
+    Create the companion matrix [1]_ associated with the polynomial whose
+    coefficients are given in `a`.
+
+    Parameters
+    ----------
+    a : (N,) array_like
+        1-D array of polynomial coefficients. The length of `a` must be
+        at least two, and ``a[0]`` must not be zero.
+
+    Returns
+    -------
+    c : (N-1, N-1) ndarray
+        The first row of `c` is ``-a[1:]/a[0]``, and the first
+        sub-diagonal is all ones.  The data-type of the array is the same
+        as the data-type of ``1.0*a[0]``.
+
+    Raises
+    ------
+    ValueError
+        If any of the following are true: a) ``a.ndim != 1``;
+        b) ``a.size < 2``; c) ``a[0] == 0``.
+
+    Notes
+    -----
+    .. versionadded:: 0.8.0
+
+    References
+    ----------
+    .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*.  Cambridge, UK:
+        Cambridge University Press, 1999, pp. 146-7.
+
+    Examples
+    --------
+    >>> from scipy.linalg import companion
+    >>> companion([1, -10, 31, -30])
+    array([[ 10., -31.,  30.],
+           [  1.,   0.,   0.],
+           [  0.,   1.,   0.]])
+
+    """
+    a = np.atleast_1d(a)
+
+    if a.ndim != 1:
+        raise ValueError("Incorrect shape for `a`.  `a` must be "
+                         "one-dimensional.")
+
+    if a.size < 2:
+        raise ValueError("The length of `a` must be at least 2.")
+
+    if a[0] == 0:
+        raise ValueError("The first coefficient in `a` must not be zero.")
+
+    first_row = -a[1:] / (1.0 * a[0])
+    n = a.size
+    c = np.zeros((n - 1, n - 1), dtype=first_row.dtype)
+    c[0] = first_row
+    c[list(range(1, n - 1)), list(range(0, n - 2))] = 1
+    return c
+
+
+def helmert(n, full=False):
+    """
+    Create an Helmert matrix of order `n`.
+
+    This has applications in statistics, compositional or simplicial analysis,
+    and in Aitchison geometry.
+
+    Parameters
+    ----------
+    n : int
+        The size of the array to create.
+    full : bool, optional
+        If True the (n, n) ndarray will be returned.
+        Otherwise the submatrix that does not include the first
+        row will be returned.
+        Default: False.
+
+    Returns
+    -------
+    M : ndarray
+        The Helmert matrix.
+        The shape is (n, n) or (n-1, n) depending on the `full` argument.
+
+    Examples
+    --------
+    >>> from scipy.linalg import helmert
+    >>> helmert(5, full=True)
+    array([[ 0.4472136 ,  0.4472136 ,  0.4472136 ,  0.4472136 ,  0.4472136 ],
+           [ 0.70710678, -0.70710678,  0.        ,  0.        ,  0.        ],
+           [ 0.40824829,  0.40824829, -0.81649658,  0.        ,  0.        ],
+           [ 0.28867513,  0.28867513,  0.28867513, -0.8660254 ,  0.        ],
+           [ 0.2236068 ,  0.2236068 ,  0.2236068 ,  0.2236068 , -0.89442719]])
+
+    """
+    H = np.tril(np.ones((n, n)), -1) - np.diag(np.arange(n))
+    d = np.arange(n) * np.arange(1, n+1)
+    H[0] = 1
+    d[0] = n
+    H_full = H / np.sqrt(d)[:, np.newaxis]
+    if full:
+        return H_full
+    else:
+        return H_full[1:]
+
+
+def hilbert(n):
+    """
+    Create a Hilbert matrix of order `n`.
+
+    Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`.
+
+    Parameters
+    ----------
+    n : int
+        The size of the array to create.
+
+    Returns
+    -------
+    h : (n, n) ndarray
+        The Hilbert matrix.
+
+    See Also
+    --------
+    invhilbert : Compute the inverse of a Hilbert matrix.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+
+    Examples
+    --------
+    >>> from scipy.linalg import hilbert
+    >>> hilbert(3)
+    array([[ 1.        ,  0.5       ,  0.33333333],
+           [ 0.5       ,  0.33333333,  0.25      ],
+           [ 0.33333333,  0.25      ,  0.2       ]])
+
+    """
+    values = 1.0 / (1.0 + np.arange(2 * n - 1))
+    h = hankel(values[:n], r=values[n - 1:])
+    return h
+
+
+def invhilbert(n, exact=False):
+    """
+    Compute the inverse of the Hilbert matrix of order `n`.
+
+    The entries in the inverse of a Hilbert matrix are integers. When `n`
+    is greater than 14, some entries in the inverse exceed the upper limit
+    of 64 bit integers. The `exact` argument provides two options for
+    dealing with these large integers.
+
+    Parameters
+    ----------
+    n : int
+        The order of the Hilbert matrix.
+    exact : bool, optional
+        If False, the data type of the array that is returned is np.float64,
+        and the array is an approximation of the inverse.
+        If True, the array is the exact integer inverse array. To represent
+        the exact inverse when n > 14, the returned array is an object array
+        of long integers. For n <= 14, the exact inverse is returned as an
+        array with data type np.int64.
+
+    Returns
+    -------
+    invh : (n, n) ndarray
+        The data type of the array is np.float64 if `exact` is False.
+        If `exact` is True, the data type is either np.int64 (for n <= 14)
+        or object (for n > 14). In the latter case, the objects in the
+        array will be long integers.
+
+    See Also
+    --------
+    hilbert : Create a Hilbert matrix.
+
+    Notes
+    -----
+    .. versionadded:: 0.10.0
+
+    Examples
+    --------
+    >>> from scipy.linalg import invhilbert
+    >>> invhilbert(4)
+    array([[   16.,  -120.,   240.,  -140.],
+           [ -120.,  1200., -2700.,  1680.],
+           [  240., -2700.,  6480., -4200.],
+           [ -140.,  1680., -4200.,  2800.]])
+    >>> invhilbert(4, exact=True)
+    array([[   16,  -120,   240,  -140],
+           [ -120,  1200, -2700,  1680],
+           [  240, -2700,  6480, -4200],
+           [ -140,  1680, -4200,  2800]], dtype=int64)
+    >>> invhilbert(16)[7,7]
+    4.2475099528537506e+19
+    >>> invhilbert(16, exact=True)[7,7]
+    42475099528537378560
+
+    """
+    from scipy.special import comb
+    if exact:
+        if n > 14:
+            dtype = object
+        else:
+            dtype = np.int64
+    else:
+        dtype = np.float64
+    invh = np.empty((n, n), dtype=dtype)
+    for i in range(n):
+        for j in range(0, i + 1):
+            s = i + j
+            invh[i, j] = ((-1) ** s * (s + 1) *
+                          comb(n + i, n - j - 1, exact) *
+                          comb(n + j, n - i - 1, exact) *
+                          comb(s, i, exact) ** 2)
+            if i != j:
+                invh[j, i] = invh[i, j]
+    return invh
+
+
+def pascal(n, kind='symmetric', exact=True):
+    """
+    Returns the n x n Pascal matrix.
+
+    The Pascal matrix is a matrix containing the binomial coefficients as
+    its elements.
+
+    Parameters
+    ----------
+    n : int
+        The size of the matrix to create; that is, the result is an n x n
+        matrix.
+    kind : str, optional
+        Must be one of 'symmetric', 'lower', or 'upper'.
+        Default is 'symmetric'.
+    exact : bool, optional
+        If `exact` is True, the result is either an array of type
+        numpy.uint64 (if n < 35) or an object array of Python long integers.
+        If `exact` is False, the coefficients in the matrix are computed using
+        `scipy.special.comb` with `exact=False`. The result will be a floating
+        point array, and the values in the array will not be the exact
+        coefficients, but this version is much faster than `exact=True`.
+
+    Returns
+    -------
+    p : (n, n) ndarray
+        The Pascal matrix.
+
+    See Also
+    --------
+    invpascal
+
+    Notes
+    -----
+    See https://en.wikipedia.org/wiki/Pascal_matrix for more information
+    about Pascal matrices.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> from scipy.linalg import pascal
+    >>> pascal(4)
+    array([[ 1,  1,  1,  1],
+           [ 1,  2,  3,  4],
+           [ 1,  3,  6, 10],
+           [ 1,  4, 10, 20]], dtype=uint64)
+    >>> pascal(4, kind='lower')
+    array([[1, 0, 0, 0],
+           [1, 1, 0, 0],
+           [1, 2, 1, 0],
+           [1, 3, 3, 1]], dtype=uint64)
+    >>> pascal(50)[-1, -1]
+    25477612258980856902730428600
+    >>> from scipy.special import comb
+    >>> comb(98, 49, exact=True)
+    25477612258980856902730428600
+
+    """
+
+    from scipy.special import comb
+    if kind not in ['symmetric', 'lower', 'upper']:
+        raise ValueError("kind must be 'symmetric', 'lower', or 'upper'")
+
+    if exact:
+        if n >= 35:
+            L_n = np.empty((n, n), dtype=object)
+            L_n.fill(0)
+        else:
+            L_n = np.zeros((n, n), dtype=np.uint64)
+        for i in range(n):
+            for j in range(i + 1):
+                L_n[i, j] = comb(i, j, exact=True)
+    else:
+        L_n = comb(*np.ogrid[:n, :n])
+
+    if kind == 'lower':
+        p = L_n
+    elif kind == 'upper':
+        p = L_n.T
+    else:
+        p = np.dot(L_n, L_n.T)
+
+    return p
+
+
+def invpascal(n, kind='symmetric', exact=True):
+    """
+    Returns the inverse of the n x n Pascal matrix.
+
+    The Pascal matrix is a matrix containing the binomial coefficients as
+    its elements.
+
+    Parameters
+    ----------
+    n : int
+        The size of the matrix to create; that is, the result is an n x n
+        matrix.
+    kind : str, optional
+        Must be one of 'symmetric', 'lower', or 'upper'.
+        Default is 'symmetric'.
+    exact : bool, optional
+        If `exact` is True, the result is either an array of type
+        ``numpy.int64`` (if `n` <= 35) or an object array of Python integers.
+        If `exact` is False, the coefficients in the matrix are computed using
+        `scipy.special.comb` with `exact=False`. The result will be a floating
+        point array, and for large `n`, the values in the array will not be the
+        exact coefficients.
+
+    Returns
+    -------
+    invp : (n, n) ndarray
+        The inverse of the Pascal matrix.
+
+    See Also
+    --------
+    pascal
+
+    Notes
+    -----
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] "Pascal matrix", https://en.wikipedia.org/wiki/Pascal_matrix
+    .. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical
+           Gazette, 59(408), pp. 111-112, 1975.
+
+    Examples
+    --------
+    >>> from scipy.linalg import invpascal, pascal
+    >>> invp = invpascal(5)
+    >>> invp
+    array([[  5, -10,  10,  -5,   1],
+           [-10,  30, -35,  19,  -4],
+           [ 10, -35,  46, -27,   6],
+           [ -5,  19, -27,  17,  -4],
+           [  1,  -4,   6,  -4,   1]])
+
+    >>> p = pascal(5)
+    >>> p.dot(invp)
+    array([[ 1.,  0.,  0.,  0.,  0.],
+           [ 0.,  1.,  0.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  0.,  0.,  1.,  0.],
+           [ 0.,  0.,  0.,  0.,  1.]])
+
+    An example of the use of `kind` and `exact`:
+
+    >>> invpascal(5, kind='lower', exact=False)
+    array([[ 1., -0.,  0., -0.,  0.],
+           [-1.,  1., -0.,  0., -0.],
+           [ 1., -2.,  1., -0.,  0.],
+           [-1.,  3., -3.,  1., -0.],
+           [ 1., -4.,  6., -4.,  1.]])
+
+    """
+    from scipy.special import comb
+
+    if kind not in ['symmetric', 'lower', 'upper']:
+        raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.")
+
+    if kind == 'symmetric':
+        if exact:
+            if n > 34:
+                dt = object
+            else:
+                dt = np.int64
+        else:
+            dt = np.float64
+        invp = np.empty((n, n), dtype=dt)
+        for i in range(n):
+            for j in range(0, i + 1):
+                v = 0
+                for k in range(n - i):
+                    v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j,
+                                                            exact=exact)
+                invp[i, j] = (-1)**(i - j) * v
+                if i != j:
+                    invp[j, i] = invp[i, j]
+    else:
+        # For the 'lower' and 'upper' cases, we computer the inverse by
+        # changing the sign of every other diagonal of the pascal matrix.
+        invp = pascal(n, kind=kind, exact=exact)
+        if invp.dtype == np.uint64:
+            # This cast from np.uint64 to int64 OK, because if `kind` is not
+            # "symmetric", the values in invp are all much less than 2**63.
+            invp = invp.view(np.int64)
+
+        # The toeplitz matrix has alternating bands of 1 and -1.
+        invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype)
+
+    return invp
+
+
+def dft(n, scale=None):
+    """
+    Discrete Fourier transform matrix.
+
+    Create the matrix that computes the discrete Fourier transform of a
+    sequence [1]_. The nth primitive root of unity used to generate the
+    matrix is exp(-2*pi*i/n), where i = sqrt(-1).
+
+    Parameters
+    ----------
+    n : int
+        Size the matrix to create.
+    scale : str, optional
+        Must be None, 'sqrtn', or 'n'.
+        If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`.
+        If `scale` is 'n', the matrix is divided by `n`.
+        If `scale` is None (the default), the matrix is not normalized, and the
+        return value is simply the Vandermonde matrix of the roots of unity.
+
+    Returns
+    -------
+    m : (n, n) ndarray
+        The DFT matrix.
+
+    Notes
+    -----
+    When `scale` is None, multiplying a vector by the matrix returned by
+    `dft` is mathematically equivalent to (but much less efficient than)
+    the calculation performed by `scipy.fft.fft`.
+
+    .. versionadded:: 0.14.0
+
+    References
+    ----------
+    .. [1] "DFT matrix", https://en.wikipedia.org/wiki/DFT_matrix
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import dft
+    >>> np.set_printoptions(precision=2, suppress=True)  # for compact output
+    >>> m = dft(5)
+    >>> m
+    array([[ 1.  +0.j  ,  1.  +0.j  ,  1.  +0.j  ,  1.  +0.j  ,  1.  +0.j  ],
+           [ 1.  +0.j  ,  0.31-0.95j, -0.81-0.59j, -0.81+0.59j,  0.31+0.95j],
+           [ 1.  +0.j  , -0.81-0.59j,  0.31+0.95j,  0.31-0.95j, -0.81+0.59j],
+           [ 1.  +0.j  , -0.81+0.59j,  0.31-0.95j,  0.31+0.95j, -0.81-0.59j],
+           [ 1.  +0.j  ,  0.31+0.95j, -0.81+0.59j, -0.81-0.59j,  0.31-0.95j]])
+    >>> x = np.array([1, 2, 3, 0, 3])
+    >>> m @ x  # Compute the DFT of x
+    array([ 9.  +0.j  ,  0.12-0.81j, -2.12+3.44j, -2.12-3.44j,  0.12+0.81j])
+
+    Verify that ``m @ x`` is the same as ``fft(x)``.
+
+    >>> from scipy.fft import fft
+    >>> fft(x)     # Same result as m @ x
+    array([ 9.  +0.j  ,  0.12-0.81j, -2.12+3.44j, -2.12-3.44j,  0.12+0.81j])
+    """
+    if scale not in [None, 'sqrtn', 'n']:
+        raise ValueError("scale must be None, 'sqrtn', or 'n'; "
+                         "%r is not valid." % (scale,))
+
+    omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1)
+    m = omegas ** np.arange(n)
+    if scale == 'sqrtn':
+        m /= math.sqrt(n)
+    elif scale == 'n':
+        m /= n
+    return m
+
+
+def fiedler(a):
+    """Returns a symmetric Fiedler matrix
+
+    Given an sequence of numbers `a`, Fiedler matrices have the structure
+    ``F[i, j] = np.abs(a[i] - a[j])``, and hence zero diagonals and nonnegative
+    entries. A Fiedler matrix has a dominant positive eigenvalue and other
+    eigenvalues are negative. Although not valid generally, for certain inputs,
+    the inverse and the determinant can be derived explicitly as given in [1]_.
+
+    Parameters
+    ----------
+    a : (n,) array_like
+        coefficient array
+
+    Returns
+    -------
+    F : (n, n) ndarray
+
+    See Also
+    --------
+    circulant, toeplitz
+
+    Notes
+    -----
+
+    .. versionadded:: 1.3.0
+
+    References
+    ----------
+    .. [1] J. Todd, "Basic Numerical Mathematics: Vol.2 : Numerical Algebra",
+        1977, Birkhauser, :doi:`10.1007/978-3-0348-7286-7`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import det, inv, fiedler
+    >>> a = [1, 4, 12, 45, 77]
+    >>> n = len(a)
+    >>> A = fiedler(a)
+    >>> A
+    array([[ 0,  3, 11, 44, 76],
+           [ 3,  0,  8, 41, 73],
+           [11,  8,  0, 33, 65],
+           [44, 41, 33,  0, 32],
+           [76, 73, 65, 32,  0]])
+
+    The explicit formulas for determinant and inverse seem to hold only for
+    monotonically increasing/decreasing arrays. Note the tridiagonal structure
+    and the corners.
+
+    >>> Ai = inv(A)
+    >>> Ai[np.abs(Ai) < 1e-12] = 0.  # cleanup the numerical noise for display
+    >>> Ai
+    array([[-0.16008772,  0.16666667,  0.        ,  0.        ,  0.00657895],
+           [ 0.16666667, -0.22916667,  0.0625    ,  0.        ,  0.        ],
+           [ 0.        ,  0.0625    , -0.07765152,  0.01515152,  0.        ],
+           [ 0.        ,  0.        ,  0.01515152, -0.03077652,  0.015625  ],
+           [ 0.00657895,  0.        ,  0.        ,  0.015625  , -0.00904605]])
+    >>> det(A)
+    15409151.999999998
+    >>> (-1)**(n-1) * 2**(n-2) * np.diff(a).prod() * (a[-1] - a[0])
+    15409152
+
+    """
+    a = np.atleast_1d(a)
+
+    if a.ndim != 1:
+        raise ValueError("Input 'a' must be a 1D array.")
+
+    if a.size == 0:
+        return np.array([], dtype=float)
+    elif a.size == 1:
+        return np.array([[0.]])
+    else:
+        return np.abs(a[:, None] - a)
+
+
+def fiedler_companion(a):
+    """ Returns a Fiedler companion matrix
+
+    Given a polynomial coefficient array ``a``, this function forms a
+    pentadiagonal matrix with a special structure whose eigenvalues coincides
+    with the roots of ``a``.
+
+    Parameters
+    ----------
+    a : (N,) array_like
+        1-D array of polynomial coefficients in descending order with a nonzero
+        leading coefficient. For ``N < 2``, an empty array is returned.
+
+    Returns
+    -------
+    c : (N-1, N-1) ndarray
+        Resulting companion matrix
+
+    See Also
+    --------
+    companion
+
+    Notes
+    -----
+    Similar to `companion` the leading coefficient should be nonzero. In the case
+    the leading coefficient is not 1, other coefficients are rescaled before
+    the array generation. To avoid numerical issues, it is best to provide a
+    monic polynomial.
+
+    .. versionadded:: 1.3.0
+
+    References
+    ----------
+    .. [1] M. Fiedler, " A note on companion matrices", Linear Algebra and its
+        Applications, 2003, :doi:`10.1016/S0024-3795(03)00548-2`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import fiedler_companion, eigvals
+    >>> p = np.poly(np.arange(1, 9, 2))  # [1., -16., 86., -176., 105.]
+    >>> fc = fiedler_companion(p)
+    >>> fc
+    array([[  16.,  -86.,    1.,    0.],
+           [   1.,    0.,    0.,    0.],
+           [   0.,  176.,    0., -105.],
+           [   0.,    1.,    0.,    0.]])
+    >>> eigvals(fc)
+    array([7.+0.j, 5.+0.j, 3.+0.j, 1.+0.j])
+
+    """
+    a = np.atleast_1d(a)
+
+    if a.ndim != 1:
+        raise ValueError("Input 'a' must be a 1-D array.")
+
+    if a.size <= 2:
+        if a.size == 2:
+            return np.array([[-(a/a[0])[-1]]])
+        return np.array([], dtype=a.dtype)
+
+    if a[0] == 0.:
+        raise ValueError('Leading coefficient is zero.')
+
+    a = a/a[0]
+    n = a.size - 1
+    c = np.zeros((n, n), dtype=a.dtype)
+    # subdiagonals
+    c[range(3, n, 2), range(1, n-2, 2)] = 1.
+    c[range(2, n, 2), range(1, n-1, 2)] = -a[3::2]
+    # superdiagonals
+    c[range(0, n-2, 2), range(2, n, 2)] = 1.
+    c[range(0, n-1, 2), range(1, n, 2)] = -a[2::2]
+    c[[0, 1], 0] = [-a[1], 1]
+
+    return c
+
+
+def convolution_matrix(a, n, mode='full'):
+    """
+    Construct a convolution matrix.
+
+    Constructs the Toeplitz matrix representing one-dimensional
+    convolution [1]_.  See the notes below for details.
+
+    Parameters
+    ----------
+    a : (m,) array_like
+        The 1-D array to convolve.
+    n : int
+        The number of columns in the resulting matrix.  It gives the length
+        of the input to be convolved with `a`.  This is analogous to the
+        length of `v` in ``numpy.convolve(a, v)``.
+    mode : str
+        This is analogous to `mode` in ``numpy.convolve(v, a, mode)``.
+        It must be one of ('full', 'valid', 'same').
+        See below for how `mode` determines the shape of the result.
+
+    Returns
+    -------
+    A : (k, n) ndarray
+        The convolution matrix whose row count `k` depends on `mode`::
+
+            =======  =========================
+             mode    k
+            =======  =========================
+            'full'   m + n -1
+            'same'   max(m, n)
+            'valid'  max(m, n) - min(m, n) + 1
+            =======  =========================
+
+    See Also
+    --------
+    toeplitz : Toeplitz matrix
+
+    Notes
+    -----
+    The code::
+
+        A = convolution_matrix(a, n, mode)
+
+    creates a Toeplitz matrix `A` such that ``A @ v`` is equivalent to
+    using ``convolve(a, v, mode)``.  The returned array always has `n`
+    columns.  The number of rows depends on the specified `mode`, as
+    explained above.
+
+    In the default 'full' mode, the entries of `A` are given by::
+
+        A[i, j] == (a[i-j] if (0 <= (i-j) < m) else 0)
+
+    where ``m = len(a)``.  Suppose, for example, the input array is
+    ``[x, y, z]``.  The convolution matrix has the form::
+
+        [x, 0, 0, ..., 0, 0]
+        [y, x, 0, ..., 0, 0]
+        [z, y, x, ..., 0, 0]
+        ...
+        [0, 0, 0, ..., x, 0]
+        [0, 0, 0, ..., y, x]
+        [0, 0, 0, ..., z, y]
+        [0, 0, 0, ..., 0, z]
+
+    In 'valid' mode, the entries of `A` are given by::
+
+        A[i, j] == (a[i-j+m-1] if (0 <= (i-j+m-1) < m) else 0)
+
+    This corresponds to a matrix whose rows are the subset of those from
+    the 'full' case where all the coefficients in `a` are contained in the
+    row.  For input ``[x, y, z]``, this array looks like::
+
+        [z, y, x, 0, 0, ..., 0, 0, 0]
+        [0, z, y, x, 0, ..., 0, 0, 0]
+        [0, 0, z, y, x, ..., 0, 0, 0]
+        ...
+        [0, 0, 0, 0, 0, ..., x, 0, 0]
+        [0, 0, 0, 0, 0, ..., y, x, 0]
+        [0, 0, 0, 0, 0, ..., z, y, x]
+
+    In the 'same' mode, the entries of `A` are given by::
+
+        d = (m - 1) // 2
+        A[i, j] == (a[i-j+d] if (0 <= (i-j+d) < m) else 0)
+
+    The typical application of the 'same' mode is when one has a signal of
+    length `n` (with `n` greater than ``len(a)``), and the desired output
+    is a filtered signal that is still of length `n`.
+
+    For input ``[x, y, z]``, this array looks like::
+
+        [y, x, 0, 0, ..., 0, 0, 0]
+        [z, y, x, 0, ..., 0, 0, 0]
+        [0, z, y, x, ..., 0, 0, 0]
+        [0, 0, z, y, ..., 0, 0, 0]
+        ...
+        [0, 0, 0, 0, ..., y, x, 0]
+        [0, 0, 0, 0, ..., z, y, x]
+        [0, 0, 0, 0, ..., 0, z, y]
+
+    .. versionadded:: 1.5.0
+
+    References
+    ----------
+    .. [1] "Convolution", https://en.wikipedia.org/wiki/Convolution
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.linalg import convolution_matrix
+    >>> A = convolution_matrix([-1, 4, -2], 5, mode='same')
+    >>> A
+    array([[ 4, -1,  0,  0,  0],
+           [-2,  4, -1,  0,  0],
+           [ 0, -2,  4, -1,  0],
+           [ 0,  0, -2,  4, -1],
+           [ 0,  0,  0, -2,  4]])
+
+    Compare multiplication by `A` with the use of `numpy.convolve`.
+
+    >>> x = np.array([1, 2, 0, -3, 0.5])
+    >>> A @ x
+    array([  2. ,   6. ,  -1. , -12.5,   8. ])
+
+    Verify that ``A @ x`` produced the same result as applying the
+    convolution function.
+
+    >>> np.convolve([-1, 4, -2], x, mode='same')
+    array([  2. ,   6. ,  -1. , -12.5,   8. ])
+
+    For comparison to the case ``mode='same'`` shown above, here are the
+    matrices produced by ``mode='full'`` and ``mode='valid'`` for the
+    same coefficients and size.
+
+    >>> convolution_matrix([-1, 4, -2], 5, mode='full')
+    array([[-1,  0,  0,  0,  0],
+           [ 4, -1,  0,  0,  0],
+           [-2,  4, -1,  0,  0],
+           [ 0, -2,  4, -1,  0],
+           [ 0,  0, -2,  4, -1],
+           [ 0,  0,  0, -2,  4],
+           [ 0,  0,  0,  0, -2]])
+
+    >>> convolution_matrix([-1, 4, -2], 5, mode='valid')
+    array([[-2,  4, -1,  0,  0],
+           [ 0, -2,  4, -1,  0],
+           [ 0,  0, -2,  4, -1]])
+    """
+    if n <= 0:
+        raise ValueError('n must be a positive integer.')
+
+    a = np.asarray(a)
+    if a.ndim != 1:
+        raise ValueError('convolution_matrix expects a one-dimensional '
+                         'array as input')
+    if a.size == 0:
+        raise ValueError('len(a) must be at least 1.')
+
+    if mode not in ('full', 'valid', 'same'):
+        raise ValueError(
+            "'mode' argument must be one of ('full', 'valid', 'same')")
+
+    # create zero padded versions of the array
+    az = np.pad(a, (0, n-1), 'constant')
+    raz = np.pad(a[::-1], (0, n-1), 'constant')
+
+    if mode == 'same':
+        trim = min(n, len(a)) - 1
+        tb = trim//2
+        te = trim - tb
+        col0 = az[tb:len(az)-te]
+        row0 = raz[-n-tb:len(raz)-tb]
+    elif mode == 'valid':
+        tb = min(n, len(a)) - 1
+        te = tb
+        col0 = az[tb:len(az)-te]
+        row0 = raz[-n-tb:len(raz)-tb]
+    else:  # 'full'
+        col0 = az
+        row0 = raz[-n:]
+    return toeplitz(col0, row0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/_testutils.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/_testutils.py
new file mode 100644
index 00000000..992c1c88
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/_testutils.py
@@ -0,0 +1,63 @@
+import numpy as np
+
+
+class _FakeMatrix:
+    def __init__(self, data):
+        self._data = data
+        self.__array_interface__ = data.__array_interface__
+
+
+class _FakeMatrix2:
+    def __init__(self, data):
+        self._data = data
+
+    def __array__(self):
+        return self._data
+
+
+def _get_array(shape, dtype):
+    """
+    Get a test array of given shape and data type.
+    Returned NxN matrices are posdef, and 2xN are banded-posdef.
+
+    """
+    if len(shape) == 2 and shape[0] == 2:
+        # yield a banded positive definite one
+        x = np.zeros(shape, dtype=dtype)
+        x[0, 1:] = -1
+        x[1] = 2
+        return x
+    elif len(shape) == 2 and shape[0] == shape[1]:
+        # always yield a positive definite matrix
+        x = np.zeros(shape, dtype=dtype)
+        j = np.arange(shape[0])
+        x[j, j] = 2
+        x[j[:-1], j[:-1]+1] = -1
+        x[j[:-1]+1, j[:-1]] = -1
+        return x
+    else:
+        np.random.seed(1234)
+        return np.random.randn(*shape).astype(dtype)
+
+
+def _id(x):
+    return x
+
+
+def assert_no_overwrite(call, shapes, dtypes=None):
+    """
+    Test that a call does not overwrite its input arguments
+    """
+
+    if dtypes is None:
+        dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+
+    for dtype in dtypes:
+        for order in ["C", "F"]:
+            for faker in [_id, _FakeMatrix, _FakeMatrix2]:
+                orig_inputs = [_get_array(s, dtype) for s in shapes]
+                inputs = [faker(x.copy(order)) for x in orig_inputs]
+                call(*inputs)
+                msg = "call modified inputs [%r, %r]" % (dtype, faker)
+                for a, b in zip(inputs, orig_inputs):
+                    np.testing.assert_equal(a, b, err_msg=msg)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/basic.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/basic.py
new file mode 100644
index 00000000..5eb62d4c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/basic.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _basic
+
+__all__ = [  # noqa: F822
+    'solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
+    'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
+    'pinv', 'pinvh', 'matrix_balance', 'matmul_toeplitz',
+    'atleast_1d', 'atleast_2d', 'get_flinalg_funcs', 'get_lapack_funcs',
+    'LinAlgError', 'LinAlgWarning', 'levinson'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.basic is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.basic` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_basic, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/blas.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/blas.py
new file mode 100644
index 00000000..b29fd304
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/blas.py
@@ -0,0 +1,484 @@
+"""
+Low-level BLAS functions (:mod:`scipy.linalg.blas`)
+===================================================
+
+This module contains low-level functions from the BLAS library.
+
+.. versionadded:: 0.12.0
+
+.. note::
+
+   The common ``overwrite_<>`` option in many routines, allows the
+   input arrays to be overwritten to avoid extra memory allocation.
+   However this requires the array to satisfy two conditions
+   which are memory order and the data type to match exactly the
+   order and the type expected by the routine.
+
+   As an example, if you pass a double precision float array to any
+   ``S....`` routine which expects single precision arguments, f2py
+   will create an intermediate array to match the argument types and
+   overwriting will be performed on that intermediate array.
+
+   Similarly, if a C-contiguous array is passed, f2py will pass a
+   FORTRAN-contiguous array internally. Please make sure that these
+   details are satisfied. More information can be found in the f2py
+   documentation.
+
+.. warning::
+
+   These functions do little to no error checking.
+   It is possible to cause crashes by mis-using them,
+   so prefer using the higher-level routines in `scipy.linalg`.
+
+Finding functions
+-----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   get_blas_funcs
+   find_best_blas_type
+
+BLAS Level 1 functions
+----------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   caxpy
+   ccopy
+   cdotc
+   cdotu
+   crotg
+   cscal
+   csrot
+   csscal
+   cswap
+   dasum
+   daxpy
+   dcopy
+   ddot
+   dnrm2
+   drot
+   drotg
+   drotm
+   drotmg
+   dscal
+   dswap
+   dzasum
+   dznrm2
+   icamax
+   idamax
+   isamax
+   izamax
+   sasum
+   saxpy
+   scasum
+   scnrm2
+   scopy
+   sdot
+   snrm2
+   srot
+   srotg
+   srotm
+   srotmg
+   sscal
+   sswap
+   zaxpy
+   zcopy
+   zdotc
+   zdotu
+   zdrot
+   zdscal
+   zrotg
+   zscal
+   zswap
+
+BLAS Level 2 functions
+----------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   sgbmv
+   sgemv
+   sger
+   ssbmv
+   sspr
+   sspr2
+   ssymv
+   ssyr
+   ssyr2
+   stbmv
+   stpsv
+   strmv
+   strsv
+   dgbmv
+   dgemv
+   dger
+   dsbmv
+   dspr
+   dspr2
+   dsymv
+   dsyr
+   dsyr2
+   dtbmv
+   dtpsv
+   dtrmv
+   dtrsv
+   cgbmv
+   cgemv
+   cgerc
+   cgeru
+   chbmv
+   chemv
+   cher
+   cher2
+   chpmv
+   chpr
+   chpr2
+   ctbmv
+   ctbsv
+   ctpmv
+   ctpsv
+   ctrmv
+   ctrsv
+   csyr
+   zgbmv
+   zgemv
+   zgerc
+   zgeru
+   zhbmv
+   zhemv
+   zher
+   zher2
+   zhpmv
+   zhpr
+   zhpr2
+   ztbmv
+   ztbsv
+   ztpmv
+   ztrmv
+   ztrsv
+   zsyr
+
+BLAS Level 3 functions
+----------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   sgemm
+   ssymm
+   ssyr2k
+   ssyrk
+   strmm
+   strsm
+   dgemm
+   dsymm
+   dsyr2k
+   dsyrk
+   dtrmm
+   dtrsm
+   cgemm
+   chemm
+   cher2k
+   cherk
+   csymm
+   csyr2k
+   csyrk
+   ctrmm
+   ctrsm
+   zgemm
+   zhemm
+   zher2k
+   zherk
+   zsymm
+   zsyr2k
+   zsyrk
+   ztrmm
+   ztrsm
+
+"""
+#
+# Author: Pearu Peterson, March 2002
+#         refactoring by Fabian Pedregosa, March 2010
+#
+
+__all__ = ['get_blas_funcs', 'find_best_blas_type']
+
+import numpy as _np
+import functools
+
+from scipy.linalg import _fblas
+try:
+    from scipy.linalg import _cblas
+except ImportError:
+    _cblas = None
+
+try:
+    from scipy.linalg import _fblas_64
+    HAS_ILP64 = True
+except ImportError:
+    HAS_ILP64 = False
+    _fblas_64 = None
+
+# Expose all functions (only fblas --- cblas is an implementation detail)
+empty_module = None
+from scipy.linalg._fblas import *
+del empty_module
+
+# all numeric dtypes '?bBhHiIlLqQefdgFDGO' that are safe to be converted to
+
+# single precision float   : '?bBhH!!!!!!ef!!!!!!'
+# double precision float   : '?bBhHiIlLqQefdg!!!!'
+# single precision complex : '?bBhH!!!!!!ef!!F!!!'
+# double precision complex : '?bBhHiIlLqQefdgFDG!'
+
+_type_score = {x: 1 for x in '?bBhHef'}
+_type_score.update({x: 2 for x in 'iIlLqQd'})
+
+# Handle float128(g) and complex256(G) separately in case non-Windows systems.
+# On Windows, the values will be rewritten to the same key with the same value.
+_type_score.update({'F': 3, 'D': 4, 'g': 2, 'G': 4})
+
+# Final mapping to the actual prefixes and dtypes
+_type_conv = {1: ('s', _np.dtype('float32')),
+              2: ('d', _np.dtype('float64')),
+              3: ('c', _np.dtype('complex64')),
+              4: ('z', _np.dtype('complex128'))}
+
+# some convenience alias for complex functions
+_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
+               'cdot': 'cdotc', 'zdot': 'zdotc',
+               'cger': 'cgerc', 'zger': 'zgerc',
+               'sdotc': 'sdot', 'sdotu': 'sdot',
+               'ddotc': 'ddot', 'ddotu': 'ddot'}
+
+
+def find_best_blas_type(arrays=(), dtype=None):
+    """Find best-matching BLAS/LAPACK type.
+
+    Arrays are used to determine the optimal prefix of BLAS routines.
+
+    Parameters
+    ----------
+    arrays : sequence of ndarrays, optional
+        Arrays can be given to determine optimal prefix of BLAS
+        routines. If not given, double-precision routines will be
+        used, otherwise the most generic type in arrays will be used.
+    dtype : str or dtype, optional
+        Data-type specifier. Not used if `arrays` is non-empty.
+
+    Returns
+    -------
+    prefix : str
+        BLAS/LAPACK prefix character.
+    dtype : dtype
+        Inferred Numpy data type.
+    prefer_fortran : bool
+        Whether to prefer Fortran order routines over C order.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.linalg.blas as bla
+    >>> rng = np.random.default_rng()
+    >>> a = rng.random((10,15))
+    >>> b = np.asfortranarray(a)  # Change the memory layout order
+    >>> bla.find_best_blas_type((a,))
+    ('d', dtype('float64'), False)
+    >>> bla.find_best_blas_type((a*1j,))
+    ('z', dtype('complex128'), False)
+    >>> bla.find_best_blas_type((b,))
+    ('d', dtype('float64'), True)
+
+    """
+    dtype = _np.dtype(dtype)
+    max_score = _type_score.get(dtype.char, 5)
+    prefer_fortran = False
+
+    if arrays:
+        # In most cases, single element is passed through, quicker route
+        if len(arrays) == 1:
+            max_score = _type_score.get(arrays[0].dtype.char, 5)
+            prefer_fortran = arrays[0].flags['FORTRAN']
+        else:
+            # use the most generic type in arrays
+            scores = [_type_score.get(x.dtype.char, 5) for x in arrays]
+            max_score = max(scores)
+            ind_max_score = scores.index(max_score)
+            # safe upcasting for mix of float64 and complex64 --> prefix 'z'
+            if max_score == 3 and (2 in scores):
+                max_score = 4
+
+            if arrays[ind_max_score].flags['FORTRAN']:
+                # prefer Fortran for leading array with column major order
+                prefer_fortran = True
+
+    # Get the LAPACK prefix and the corresponding dtype if not fall back
+    # to 'd' and double precision float.
+    prefix, dtype = _type_conv.get(max_score, ('d', _np.dtype('float64')))
+
+    return prefix, dtype, prefer_fortran
+
+
+def _get_funcs(names, arrays, dtype,
+               lib_name, fmodule, cmodule,
+               fmodule_name, cmodule_name, alias,
+               ilp64=False):
+    """
+    Return available BLAS/LAPACK functions.
+
+    Used also in lapack.py. See get_blas_funcs for docstring.
+    """
+
+    funcs = []
+    unpack = False
+    dtype = _np.dtype(dtype)
+    module1 = (cmodule, cmodule_name)
+    module2 = (fmodule, fmodule_name)
+
+    if isinstance(names, str):
+        names = (names,)
+        unpack = True
+
+    prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
+
+    if prefer_fortran:
+        module1, module2 = module2, module1
+
+    for name in names:
+        func_name = prefix + name
+        func_name = alias.get(func_name, func_name)
+        func = getattr(module1[0], func_name, None)
+        module_name = module1[1]
+        if func is None:
+            func = getattr(module2[0], func_name, None)
+            module_name = module2[1]
+        if func is None:
+            raise ValueError(
+                '%s function %s could not be found' % (lib_name, func_name))
+        func.module_name, func.typecode = module_name, prefix
+        func.dtype = dtype
+        if not ilp64:
+            func.int_dtype = _np.dtype(_np.intc)
+        else:
+            func.int_dtype = _np.dtype(_np.int64)
+        func.prefix = prefix  # Backward compatibility
+        funcs.append(func)
+
+    if unpack:
+        return funcs[0]
+    else:
+        return funcs
+
+
+def _memoize_get_funcs(func):
+    """
+    Memoized fast path for _get_funcs instances
+    """
+    memo = {}
+    func.memo = memo
+
+    @functools.wraps(func)
+    def getter(names, arrays=(), dtype=None, ilp64=False):
+        key = (names, dtype, ilp64)
+        for array in arrays:
+            # cf. find_blas_funcs
+            key += (array.dtype.char, array.flags.fortran)
+
+        try:
+            value = memo.get(key)
+        except TypeError:
+            # unhashable key etc.
+            key = None
+            value = None
+
+        if value is not None:
+            return value
+
+        value = func(names, arrays, dtype, ilp64)
+
+        if key is not None:
+            memo[key] = value
+
+        return value
+
+    return getter
+
+
+@_memoize_get_funcs
+def get_blas_funcs(names, arrays=(), dtype=None, ilp64=False):
+    """Return available BLAS function objects from names.
+
+    Arrays are used to determine the optimal prefix of BLAS routines.
+
+    Parameters
+    ----------
+    names : str or sequence of str
+        Name(s) of BLAS functions without type prefix.
+
+    arrays : sequence of ndarrays, optional
+        Arrays can be given to determine optimal prefix of BLAS
+        routines. If not given, double-precision routines will be
+        used, otherwise the most generic type in arrays will be used.
+
+    dtype : str or dtype, optional
+        Data-type specifier. Not used if `arrays` is non-empty.
+
+    ilp64 : {True, False, 'preferred'}, optional
+        Whether to return ILP64 routine variant.
+        Choosing 'preferred' returns ILP64 routine if available,
+        and otherwise the 32-bit routine. Default: False
+
+    Returns
+    -------
+    funcs : list
+        List containing the found function(s).
+
+
+    Notes
+    -----
+    This routine automatically chooses between Fortran/C
+    interfaces. Fortran code is used whenever possible for arrays with
+    column major order. In all other cases, C code is preferred.
+
+    In BLAS, the naming convention is that all functions start with a
+    type prefix, which depends on the type of the principal
+    matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
+    types {float32, float64, complex64, complex128} respectively.
+    The code and the dtype are stored in attributes `typecode` and `dtype`
+    of the returned functions.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.linalg as LA
+    >>> rng = np.random.default_rng()
+    >>> a = rng.random((3,2))
+    >>> x_gemv = LA.get_blas_funcs('gemv', (a,))
+    >>> x_gemv.typecode
+    'd'
+    >>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,))
+    >>> x_gemv.typecode
+    'z'
+
+    """
+    if isinstance(ilp64, str):
+        if ilp64 == 'preferred':
+            ilp64 = HAS_ILP64
+        else:
+            raise ValueError("Invalid value for 'ilp64'")
+
+    if not ilp64:
+        return _get_funcs(names, arrays, dtype,
+                          "BLAS", _fblas, _cblas, "fblas", "cblas",
+                          _blas_alias, ilp64=False)
+    else:
+        if not HAS_ILP64:
+            raise RuntimeError("BLAS ILP64 routine requested, but Scipy "
+                               "compiled only with 32-bit BLAS")
+        return _get_funcs(names, arrays, dtype,
+                          "BLAS", _fblas_64, None, "fblas_64", None,
+                          _blas_alias, ilp64=True)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pxd b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pxd
new file mode 100644
index 00000000..5ddaa0b7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pxd
@@ -0,0 +1,314 @@
+# This file was generated by _generate_pyx.py.
+# Do not edit this file directly.
+
+# Within scipy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_blas
+# from scipy.linalg cimport cython_blas
+# cimport scipy.linalg.cython_blas as cython_blas
+# cimport ..linalg.cython_blas as cython_blas
+
+# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+ctypedef float s
+ctypedef double d
+ctypedef float complex c
+ctypedef double complex z
+
+cdef void caxpy(int *n, c *ca, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef void ccopy(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef c cdotc(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef c cdotu(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef void cgbmv(char *trans, int *m, int *n, int *kl, int *ku, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cgemm(char *transa, char *transb, int *m, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void cgemv(char *trans, int *m, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cgerc(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil
+
+cdef void cgeru(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil
+
+cdef void chbmv(char *uplo, int *n, int *k, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void chemm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void chemv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cher(char *uplo, int *n, s *alpha, c *x, int *incx, c *a, int *lda) nogil
+
+cdef void cher2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil
+
+cdef void cher2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, s *beta, c *c, int *ldc) nogil
+
+cdef void cherk(char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c, int *ldc) nogil
+
+cdef void chpmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void chpr(char *uplo, int *n, s *alpha, c *x, int *incx, c *ap) nogil
+
+cdef void chpr2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *ap) nogil
+
+cdef void crotg(c *ca, c *cb, s *c, c *s) nogil
+
+cdef void cscal(int *n, c *ca, c *cx, int *incx) nogil
+
+cdef void csrot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, s *s) nogil
+
+cdef void csscal(int *n, s *sa, c *cx, int *incx) nogil
+
+cdef void cswap(int *n, c *cx, int *incx, c *cy, int *incy) nogil
+
+cdef void csymm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void csyr2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil
+
+cdef void csyrk(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *beta, c *c, int *ldc) nogil
+
+cdef void ctbmv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil
+
+cdef void ctbsv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil
+
+cdef void ctpmv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil
+
+cdef void ctpsv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil
+
+cdef void ctrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil
+
+cdef void ctrmv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil
+
+cdef void ctrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil
+
+cdef void ctrsv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil
+
+cdef d dasum(int *n, d *dx, int *incx) nogil
+
+cdef void daxpy(int *n, d *da, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef d dcabs1(z *z) nogil
+
+cdef void dcopy(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef d ddot(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef void dgbmv(char *trans, int *m, int *n, int *kl, int *ku, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dgemm(char *transa, char *transb, int *m, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+
+cdef void dgemv(char *trans, int *m, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dger(int *m, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil
+
+cdef d dnrm2(int *n, d *x, int *incx) nogil
+
+cdef void drot(int *n, d *dx, int *incx, d *dy, int *incy, d *c, d *s) nogil
+
+cdef void drotg(d *da, d *db, d *c, d *s) nogil
+
+cdef void drotm(int *n, d *dx, int *incx, d *dy, int *incy, d *dparam) nogil
+
+cdef void drotmg(d *dd1, d *dd2, d *dx1, d *dy1, d *dparam) nogil
+
+cdef void dsbmv(char *uplo, int *n, int *k, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dscal(int *n, d *da, d *dx, int *incx) nogil
+
+cdef d dsdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef void dspmv(char *uplo, int *n, d *alpha, d *ap, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dspr(char *uplo, int *n, d *alpha, d *x, int *incx, d *ap) nogil
+
+cdef void dspr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *ap) nogil
+
+cdef void dswap(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+
+cdef void dsymm(char *side, char *uplo, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+
+cdef void dsymv(char *uplo, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+
+cdef void dsyr(char *uplo, int *n, d *alpha, d *x, int *incx, d *a, int *lda) nogil
+
+cdef void dsyr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil
+
+cdef void dsyr2k(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+
+cdef void dsyrk(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c, int *ldc) nogil
+
+cdef void dtbmv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil
+
+cdef void dtbsv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil
+
+cdef void dtpmv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil
+
+cdef void dtpsv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil
+
+cdef void dtrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil
+
+cdef void dtrmv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil
+
+cdef void dtrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil
+
+cdef void dtrsv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil
+
+cdef d dzasum(int *n, z *zx, int *incx) nogil
+
+cdef d dznrm2(int *n, z *x, int *incx) nogil
+
+cdef int icamax(int *n, c *cx, int *incx) nogil
+
+cdef int idamax(int *n, d *dx, int *incx) nogil
+
+cdef int isamax(int *n, s *sx, int *incx) nogil
+
+cdef int izamax(int *n, z *zx, int *incx) nogil
+
+cdef bint lsame(char *ca, char *cb) nogil
+
+cdef s sasum(int *n, s *sx, int *incx) nogil
+
+cdef void saxpy(int *n, s *sa, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef s scasum(int *n, c *cx, int *incx) nogil
+
+cdef s scnrm2(int *n, c *x, int *incx) nogil
+
+cdef void scopy(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef s sdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef s sdsdot(int *n, s *sb, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef void sgbmv(char *trans, int *m, int *n, int *kl, int *ku, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sgemm(char *transa, char *transb, int *m, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+
+cdef void sgemv(char *trans, int *m, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sger(int *m, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil
+
+cdef s snrm2(int *n, s *x, int *incx) nogil
+
+cdef void srot(int *n, s *sx, int *incx, s *sy, int *incy, s *c, s *s) nogil
+
+cdef void srotg(s *sa, s *sb, s *c, s *s) nogil
+
+cdef void srotm(int *n, s *sx, int *incx, s *sy, int *incy, s *sparam) nogil
+
+cdef void srotmg(s *sd1, s *sd2, s *sx1, s *sy1, s *sparam) nogil
+
+cdef void ssbmv(char *uplo, int *n, int *k, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sscal(int *n, s *sa, s *sx, int *incx) nogil
+
+cdef void sspmv(char *uplo, int *n, s *alpha, s *ap, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void sspr(char *uplo, int *n, s *alpha, s *x, int *incx, s *ap) nogil
+
+cdef void sspr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *ap) nogil
+
+cdef void sswap(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+
+cdef void ssymm(char *side, char *uplo, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+
+cdef void ssymv(char *uplo, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+
+cdef void ssyr(char *uplo, int *n, s *alpha, s *x, int *incx, s *a, int *lda) nogil
+
+cdef void ssyr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil
+
+cdef void ssyr2k(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+
+cdef void ssyrk(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c, int *ldc) nogil
+
+cdef void stbmv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void stbsv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void stpmv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil
+
+cdef void stpsv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil
+
+cdef void strmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil
+
+cdef void strmv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void strsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil
+
+cdef void strsv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil
+
+cdef void zaxpy(int *n, z *za, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef void zcopy(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef z zdotc(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef z zdotu(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef void zdrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, d *s) nogil
+
+cdef void zdscal(int *n, d *da, z *zx, int *incx) nogil
+
+cdef void zgbmv(char *trans, int *m, int *n, int *kl, int *ku, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zgemm(char *transa, char *transb, int *m, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zgemv(char *trans, int *m, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zgerc(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil
+
+cdef void zgeru(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil
+
+cdef void zhbmv(char *uplo, int *n, int *k, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zhemm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zhemv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zher(char *uplo, int *n, d *alpha, z *x, int *incx, z *a, int *lda) nogil
+
+cdef void zher2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil
+
+cdef void zher2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, d *beta, z *c, int *ldc) nogil
+
+cdef void zherk(char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c, int *ldc) nogil
+
+cdef void zhpmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zhpr(char *uplo, int *n, d *alpha, z *x, int *incx, z *ap) nogil
+
+cdef void zhpr2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *ap) nogil
+
+cdef void zrotg(z *ca, z *cb, d *c, z *s) nogil
+
+cdef void zscal(int *n, z *za, z *zx, int *incx) nogil
+
+cdef void zswap(int *n, z *zx, int *incx, z *zy, int *incy) nogil
+
+cdef void zsymm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zsyr2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil
+
+cdef void zsyrk(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *beta, z *c, int *ldc) nogil
+
+cdef void ztbmv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil
+
+cdef void ztbsv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil
+
+cdef void ztpmv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil
+
+cdef void ztpsv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil
+
+cdef void ztrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil
+
+cdef void ztrmv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil
+
+cdef void ztrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil
+
+cdef void ztrsv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pyx b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pyx
new file mode 100644
index 00000000..5f2ece60
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_blas.pyx
@@ -0,0 +1,1192 @@
+# This file was generated by _generate_pyx.py.
+# Do not edit this file directly.
+
+# cython: boundscheck = False
+# cython: wraparound = False
+# cython: cdivision = True
+
+"""
+BLAS Functions for Cython
+=========================
+
+Usable from Cython via::
+
+    cimport scipy.linalg.cython_blas
+
+These wrappers do not check for alignment of arrays.
+Alignment should be checked before these wrappers are used.
+
+Raw function pointers (Fortran-style pointer arguments):
+
+- caxpy
+- ccopy
+- cdotc
+- cdotu
+- cgbmv
+- cgemm
+- cgemv
+- cgerc
+- cgeru
+- chbmv
+- chemm
+- chemv
+- cher
+- cher2
+- cher2k
+- cherk
+- chpmv
+- chpr
+- chpr2
+- crotg
+- cscal
+- csrot
+- csscal
+- cswap
+- csymm
+- csyr2k
+- csyrk
+- ctbmv
+- ctbsv
+- ctpmv
+- ctpsv
+- ctrmm
+- ctrmv
+- ctrsm
+- ctrsv
+- dasum
+- daxpy
+- dcabs1
+- dcopy
+- ddot
+- dgbmv
+- dgemm
+- dgemv
+- dger
+- dnrm2
+- drot
+- drotg
+- drotm
+- drotmg
+- dsbmv
+- dscal
+- dsdot
+- dspmv
+- dspr
+- dspr2
+- dswap
+- dsymm
+- dsymv
+- dsyr
+- dsyr2
+- dsyr2k
+- dsyrk
+- dtbmv
+- dtbsv
+- dtpmv
+- dtpsv
+- dtrmm
+- dtrmv
+- dtrsm
+- dtrsv
+- dzasum
+- dznrm2
+- icamax
+- idamax
+- isamax
+- izamax
+- lsame
+- sasum
+- saxpy
+- scasum
+- scnrm2
+- scopy
+- sdot
+- sdsdot
+- sgbmv
+- sgemm
+- sgemv
+- sger
+- snrm2
+- srot
+- srotg
+- srotm
+- srotmg
+- ssbmv
+- sscal
+- sspmv
+- sspr
+- sspr2
+- sswap
+- ssymm
+- ssymv
+- ssyr
+- ssyr2
+- ssyr2k
+- ssyrk
+- stbmv
+- stbsv
+- stpmv
+- stpsv
+- strmm
+- strmv
+- strsm
+- strsv
+- zaxpy
+- zcopy
+- zdotc
+- zdotu
+- zdrot
+- zdscal
+- zgbmv
+- zgemm
+- zgemv
+- zgerc
+- zgeru
+- zhbmv
+- zhemm
+- zhemv
+- zher
+- zher2
+- zher2k
+- zherk
+- zhpmv
+- zhpr
+- zhpr2
+- zrotg
+- zscal
+- zswap
+- zsymm
+- zsyr2k
+- zsyrk
+- ztbmv
+- ztbsv
+- ztpmv
+- ztpsv
+- ztrmm
+- ztrmv
+- ztrsm
+- ztrsv
+
+
+"""
+
+# Within SciPy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_blas
+# from scipy.linalg cimport cython_blas
+# cimport scipy.linalg.cython_blas as cython_blas
+# cimport ..linalg.cython_blas as cython_blas
+
+# Within SciPy, if BLAS functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+cdef extern from "fortran_defs.h":
+    pass
+
+from numpy cimport npy_complex64, npy_complex128
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cdotc "F_FUNC(cdotcwrp, CDOTCWRP)"(c *out, int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy) nogil
+cdef c cdotc(int *n, c *cx, int *incx, c *cy, int *incy) nogil:
+    cdef c out
+    _fortran_cdotc(&out, n, cx, incx, cy, incy)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cdotu "F_FUNC(cdotuwrp, CDOTUWRP)"(c *out, int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy) nogil
+cdef c cdotu(int *n, c *cx, int *incx, c *cy, int *incy) nogil:
+    cdef c out
+    _fortran_cdotu(&out, n, cx, incx, cy, incy)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dasum "F_FUNC(dasumwrp, DASUMWRP)"(d *out, int *n, d *dx, int *incx) nogil
+cdef d dasum(int *n, d *dx, int *incx) nogil:
+    cdef d out
+    _fortran_dasum(&out, n, dx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dcabs1 "F_FUNC(dcabs1wrp, DCABS1WRP)"(d *out, npy_complex128 *z) nogil
+cdef d dcabs1(z *z) nogil:
+    cdef d out
+    _fortran_dcabs1(&out, z)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ddot "F_FUNC(ddotwrp, DDOTWRP)"(d *out, int *n, d *dx, int *incx, d *dy, int *incy) nogil
+cdef d ddot(int *n, d *dx, int *incx, d *dy, int *incy) nogil:
+    cdef d out
+    _fortran_ddot(&out, n, dx, incx, dy, incy)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dnrm2 "F_FUNC(dnrm2wrp, DNRM2WRP)"(d *out, int *n, d *x, int *incx) nogil
+cdef d dnrm2(int *n, d *x, int *incx) nogil:
+    cdef d out
+    _fortran_dnrm2(&out, n, x, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsdot "F_FUNC(dsdotwrp, DSDOTWRP)"(d *out, int *n, s *sx, int *incx, s *sy, int *incy) nogil
+cdef d dsdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil:
+    cdef d out
+    _fortran_dsdot(&out, n, sx, incx, sy, incy)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dzasum "F_FUNC(dzasumwrp, DZASUMWRP)"(d *out, int *n, npy_complex128 *zx, int *incx) nogil
+cdef d dzasum(int *n, z *zx, int *incx) nogil:
+    cdef d out
+    _fortran_dzasum(&out, n, zx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dznrm2 "F_FUNC(dznrm2wrp, DZNRM2WRP)"(d *out, int *n, npy_complex128 *x, int *incx) nogil
+cdef d dznrm2(int *n, z *x, int *incx) nogil:
+    cdef d out
+    _fortran_dznrm2(&out, n, x, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_icamax "F_FUNC(icamaxwrp, ICAMAXWRP)"(int *out, int *n, npy_complex64 *cx, int *incx) nogil
+cdef int icamax(int *n, c *cx, int *incx) nogil:
+    cdef int out
+    _fortran_icamax(&out, n, cx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_idamax "F_FUNC(idamaxwrp, IDAMAXWRP)"(int *out, int *n, d *dx, int *incx) nogil
+cdef int idamax(int *n, d *dx, int *incx) nogil:
+    cdef int out
+    _fortran_idamax(&out, n, dx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_isamax "F_FUNC(isamaxwrp, ISAMAXWRP)"(int *out, int *n, s *sx, int *incx) nogil
+cdef int isamax(int *n, s *sx, int *incx) nogil:
+    cdef int out
+    _fortran_isamax(&out, n, sx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_izamax "F_FUNC(izamaxwrp, IZAMAXWRP)"(int *out, int *n, npy_complex128 *zx, int *incx) nogil
+cdef int izamax(int *n, z *zx, int *incx) nogil:
+    cdef int out
+    _fortran_izamax(&out, n, zx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_lsame "F_FUNC(lsamewrp, LSAMEWRP)"(bint *out, char *ca, char *cb) nogil
+cdef bint lsame(char *ca, char *cb) nogil:
+    cdef bint out
+    _fortran_lsame(&out, ca, cb)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sasum "F_FUNC(sasumwrp, SASUMWRP)"(s *out, int *n, s *sx, int *incx) nogil
+cdef s sasum(int *n, s *sx, int *incx) nogil:
+    cdef s out
+    _fortran_sasum(&out, n, sx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_scasum "F_FUNC(scasumwrp, SCASUMWRP)"(s *out, int *n, npy_complex64 *cx, int *incx) nogil
+cdef s scasum(int *n, c *cx, int *incx) nogil:
+    cdef s out
+    _fortran_scasum(&out, n, cx, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_scnrm2 "F_FUNC(scnrm2wrp, SCNRM2WRP)"(s *out, int *n, npy_complex64 *x, int *incx) nogil
+cdef s scnrm2(int *n, c *x, int *incx) nogil:
+    cdef s out
+    _fortran_scnrm2(&out, n, x, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sdot "F_FUNC(sdotwrp, SDOTWRP)"(s *out, int *n, s *sx, int *incx, s *sy, int *incy) nogil
+cdef s sdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil:
+    cdef s out
+    _fortran_sdot(&out, n, sx, incx, sy, incy)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sdsdot "F_FUNC(sdsdotwrp, SDSDOTWRP)"(s *out, int *n, s *sb, s *sx, int *incx, s *sy, int *incy) nogil
+cdef s sdsdot(int *n, s *sb, s *sx, int *incx, s *sy, int *incy) nogil:
+    cdef s out
+    _fortran_sdsdot(&out, n, sb, sx, incx, sy, incy)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_snrm2 "F_FUNC(snrm2wrp, SNRM2WRP)"(s *out, int *n, s *x, int *incx) nogil
+cdef s snrm2(int *n, s *x, int *incx) nogil:
+    cdef s out
+    _fortran_snrm2(&out, n, x, incx)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zdotc "F_FUNC(zdotcwrp, ZDOTCWRP)"(z *out, int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy) nogil
+cdef z zdotc(int *n, z *zx, int *incx, z *zy, int *incy) nogil:
+    cdef z out
+    _fortran_zdotc(&out, n, zx, incx, zy, incy)
+    return out
+
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zdotu "F_FUNC(zdotuwrp, ZDOTUWRP)"(z *out, int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy) nogil
+cdef z zdotu(int *n, z *zx, int *incx, z *zy, int *incy) nogil:
+    cdef z out
+    _fortran_zdotu(&out, n, zx, incx, zy, incy)
+    return out
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_caxpy "F_FUNC(caxpy,CAXPY)"(int *n, npy_complex64 *ca, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy) nogil
+cdef void caxpy(int *n, c *ca, c *cx, int *incx, c *cy, int *incy) nogil:
+    _fortran_caxpy(n, ca, cx, incx, cy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ccopy "F_FUNC(ccopy,CCOPY)"(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy) nogil
+cdef void ccopy(int *n, c *cx, int *incx, c *cy, int *incy) nogil:
+    _fortran_ccopy(n, cx, incx, cy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cgbmv "F_FUNC(cgbmv,CGBMV)"(char *trans, int *m, int *n, int *kl, int *ku, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy) nogil
+cdef void cgbmv(char *trans, int *m, int *n, int *kl, int *ku, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil:
+    _fortran_cgbmv(trans, m, n, kl, ku, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cgemm "F_FUNC(cgemm,CGEMM)"(char *transa, char *transb, int *m, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc) nogil
+cdef void cgemm(char *transa, char *transb, int *m, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil:
+    _fortran_cgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cgemv "F_FUNC(cgemv,CGEMV)"(char *trans, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy) nogil
+cdef void cgemv(char *trans, int *m, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil:
+    _fortran_cgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cgerc "F_FUNC(cgerc,CGERC)"(int *m, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda) nogil
+cdef void cgerc(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil:
+    _fortran_cgerc(m, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cgeru "F_FUNC(cgeru,CGERU)"(int *m, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda) nogil
+cdef void cgeru(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil:
+    _fortran_cgeru(m, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_chbmv "F_FUNC(chbmv,CHBMV)"(char *uplo, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy) nogil
+cdef void chbmv(char *uplo, int *n, int *k, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil:
+    _fortran_chbmv(uplo, n, k, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_chemm "F_FUNC(chemm,CHEMM)"(char *side, char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc) nogil
+cdef void chemm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil:
+    _fortran_chemm(side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_chemv "F_FUNC(chemv,CHEMV)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy) nogil
+cdef void chemv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil:
+    _fortran_chemv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cher "F_FUNC(cher,CHER)"(char *uplo, int *n, s *alpha, npy_complex64 *x, int *incx, npy_complex64 *a, int *lda) nogil
+cdef void cher(char *uplo, int *n, s *alpha, c *x, int *incx, c *a, int *lda) nogil:
+    _fortran_cher(uplo, n, alpha, x, incx, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cher2 "F_FUNC(cher2,CHER2)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda) nogil
+cdef void cher2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil:
+    _fortran_cher2(uplo, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cher2k "F_FUNC(cher2k,CHER2K)"(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, s *beta, npy_complex64 *c, int *ldc) nogil
+cdef void cher2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, s *beta, c *c, int *ldc) nogil:
+    _fortran_cher2k(uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cherk "F_FUNC(cherk,CHERK)"(char *uplo, char *trans, int *n, int *k, s *alpha, npy_complex64 *a, int *lda, s *beta, npy_complex64 *c, int *ldc) nogil
+cdef void cherk(char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c, int *ldc) nogil:
+    _fortran_cherk(uplo, trans, n, k, alpha, a, lda, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_chpmv "F_FUNC(chpmv,CHPMV)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *ap, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy) nogil
+cdef void chpmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil:
+    _fortran_chpmv(uplo, n, alpha, ap, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_chpr "F_FUNC(chpr,CHPR)"(char *uplo, int *n, s *alpha, npy_complex64 *x, int *incx, npy_complex64 *ap) nogil
+cdef void chpr(char *uplo, int *n, s *alpha, c *x, int *incx, c *ap) nogil:
+    _fortran_chpr(uplo, n, alpha, x, incx, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_chpr2 "F_FUNC(chpr2,CHPR2)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *ap) nogil
+cdef void chpr2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *ap) nogil:
+    _fortran_chpr2(uplo, n, alpha, x, incx, y, incy, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_crotg "F_FUNC(crotg,CROTG)"(npy_complex64 *ca, npy_complex64 *cb, s *c, npy_complex64 *s) nogil
+cdef void crotg(c *ca, c *cb, s *c, c *s) nogil:
+    _fortran_crotg(ca, cb, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cscal "F_FUNC(cscal,CSCAL)"(int *n, npy_complex64 *ca, npy_complex64 *cx, int *incx) nogil
+cdef void cscal(int *n, c *ca, c *cx, int *incx) nogil:
+    _fortran_cscal(n, ca, cx, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_csrot "F_FUNC(csrot,CSROT)"(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, s *c, s *s) nogil
+cdef void csrot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, s *s) nogil:
+    _fortran_csrot(n, cx, incx, cy, incy, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_csscal "F_FUNC(csscal,CSSCAL)"(int *n, s *sa, npy_complex64 *cx, int *incx) nogil
+cdef void csscal(int *n, s *sa, c *cx, int *incx) nogil:
+    _fortran_csscal(n, sa, cx, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_cswap "F_FUNC(cswap,CSWAP)"(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy) nogil
+cdef void cswap(int *n, c *cx, int *incx, c *cy, int *incy) nogil:
+    _fortran_cswap(n, cx, incx, cy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_csymm "F_FUNC(csymm,CSYMM)"(char *side, char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc) nogil
+cdef void csymm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil:
+    _fortran_csymm(side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_csyr2k "F_FUNC(csyr2k,CSYR2K)"(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc) nogil
+cdef void csyr2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil:
+    _fortran_csyr2k(uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_csyrk "F_FUNC(csyrk,CSYRK)"(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *beta, npy_complex64 *c, int *ldc) nogil
+cdef void csyrk(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *beta, c *c, int *ldc) nogil:
+    _fortran_csyrk(uplo, trans, n, k, alpha, a, lda, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctbmv "F_FUNC(ctbmv,CTBMV)"(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx) nogil
+cdef void ctbmv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil:
+    _fortran_ctbmv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctbsv "F_FUNC(ctbsv,CTBSV)"(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx) nogil
+cdef void ctbsv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil:
+    _fortran_ctbsv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctpmv "F_FUNC(ctpmv,CTPMV)"(char *uplo, char *trans, char *diag, int *n, npy_complex64 *ap, npy_complex64 *x, int *incx) nogil
+cdef void ctpmv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil:
+    _fortran_ctpmv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctpsv "F_FUNC(ctpsv,CTPSV)"(char *uplo, char *trans, char *diag, int *n, npy_complex64 *ap, npy_complex64 *x, int *incx) nogil
+cdef void ctpsv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil:
+    _fortran_ctpsv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctrmm "F_FUNC(ctrmm,CTRMM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb) nogil
+cdef void ctrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil:
+    _fortran_ctrmm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctrmv "F_FUNC(ctrmv,CTRMV)"(char *uplo, char *trans, char *diag, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx) nogil
+cdef void ctrmv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil:
+    _fortran_ctrmv(uplo, trans, diag, n, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctrsm "F_FUNC(ctrsm,CTRSM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb) nogil
+cdef void ctrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil:
+    _fortran_ctrsm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ctrsv "F_FUNC(ctrsv,CTRSV)"(char *uplo, char *trans, char *diag, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx) nogil
+cdef void ctrsv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil:
+    _fortran_ctrsv(uplo, trans, diag, n, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_daxpy "F_FUNC(daxpy,DAXPY)"(int *n, d *da, d *dx, int *incx, d *dy, int *incy) nogil
+cdef void daxpy(int *n, d *da, d *dx, int *incx, d *dy, int *incy) nogil:
+    _fortran_daxpy(n, da, dx, incx, dy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dcopy "F_FUNC(dcopy,DCOPY)"(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+cdef void dcopy(int *n, d *dx, int *incx, d *dy, int *incy) nogil:
+    _fortran_dcopy(n, dx, incx, dy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dgbmv "F_FUNC(dgbmv,DGBMV)"(char *trans, int *m, int *n, int *kl, int *ku, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+cdef void dgbmv(char *trans, int *m, int *n, int *kl, int *ku, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil:
+    _fortran_dgbmv(trans, m, n, kl, ku, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dgemm "F_FUNC(dgemm,DGEMM)"(char *transa, char *transb, int *m, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+cdef void dgemm(char *transa, char *transb, int *m, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil:
+    _fortran_dgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dgemv "F_FUNC(dgemv,DGEMV)"(char *trans, int *m, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+cdef void dgemv(char *trans, int *m, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil:
+    _fortran_dgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dger "F_FUNC(dger,DGER)"(int *m, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil
+cdef void dger(int *m, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil:
+    _fortran_dger(m, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_drot "F_FUNC(drot,DROT)"(int *n, d *dx, int *incx, d *dy, int *incy, d *c, d *s) nogil
+cdef void drot(int *n, d *dx, int *incx, d *dy, int *incy, d *c, d *s) nogil:
+    _fortran_drot(n, dx, incx, dy, incy, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_drotg "F_FUNC(drotg,DROTG)"(d *da, d *db, d *c, d *s) nogil
+cdef void drotg(d *da, d *db, d *c, d *s) nogil:
+    _fortran_drotg(da, db, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_drotm "F_FUNC(drotm,DROTM)"(int *n, d *dx, int *incx, d *dy, int *incy, d *dparam) nogil
+cdef void drotm(int *n, d *dx, int *incx, d *dy, int *incy, d *dparam) nogil:
+    _fortran_drotm(n, dx, incx, dy, incy, dparam)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_drotmg "F_FUNC(drotmg,DROTMG)"(d *dd1, d *dd2, d *dx1, d *dy1, d *dparam) nogil
+cdef void drotmg(d *dd1, d *dd2, d *dx1, d *dy1, d *dparam) nogil:
+    _fortran_drotmg(dd1, dd2, dx1, dy1, dparam)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsbmv "F_FUNC(dsbmv,DSBMV)"(char *uplo, int *n, int *k, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+cdef void dsbmv(char *uplo, int *n, int *k, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil:
+    _fortran_dsbmv(uplo, n, k, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dscal "F_FUNC(dscal,DSCAL)"(int *n, d *da, d *dx, int *incx) nogil
+cdef void dscal(int *n, d *da, d *dx, int *incx) nogil:
+    _fortran_dscal(n, da, dx, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dspmv "F_FUNC(dspmv,DSPMV)"(char *uplo, int *n, d *alpha, d *ap, d *x, int *incx, d *beta, d *y, int *incy) nogil
+cdef void dspmv(char *uplo, int *n, d *alpha, d *ap, d *x, int *incx, d *beta, d *y, int *incy) nogil:
+    _fortran_dspmv(uplo, n, alpha, ap, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dspr "F_FUNC(dspr,DSPR)"(char *uplo, int *n, d *alpha, d *x, int *incx, d *ap) nogil
+cdef void dspr(char *uplo, int *n, d *alpha, d *x, int *incx, d *ap) nogil:
+    _fortran_dspr(uplo, n, alpha, x, incx, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dspr2 "F_FUNC(dspr2,DSPR2)"(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *ap) nogil
+cdef void dspr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *ap) nogil:
+    _fortran_dspr2(uplo, n, alpha, x, incx, y, incy, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dswap "F_FUNC(dswap,DSWAP)"(int *n, d *dx, int *incx, d *dy, int *incy) nogil
+cdef void dswap(int *n, d *dx, int *incx, d *dy, int *incy) nogil:
+    _fortran_dswap(n, dx, incx, dy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsymm "F_FUNC(dsymm,DSYMM)"(char *side, char *uplo, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+cdef void dsymm(char *side, char *uplo, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil:
+    _fortran_dsymm(side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsymv "F_FUNC(dsymv,DSYMV)"(char *uplo, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil
+cdef void dsymv(char *uplo, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil:
+    _fortran_dsymv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsyr "F_FUNC(dsyr,DSYR)"(char *uplo, int *n, d *alpha, d *x, int *incx, d *a, int *lda) nogil
+cdef void dsyr(char *uplo, int *n, d *alpha, d *x, int *incx, d *a, int *lda) nogil:
+    _fortran_dsyr(uplo, n, alpha, x, incx, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsyr2 "F_FUNC(dsyr2,DSYR2)"(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil
+cdef void dsyr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil:
+    _fortran_dsyr2(uplo, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsyr2k "F_FUNC(dsyr2k,DSYR2K)"(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil
+cdef void dsyr2k(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil:
+    _fortran_dsyr2k(uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dsyrk "F_FUNC(dsyrk,DSYRK)"(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c, int *ldc) nogil
+cdef void dsyrk(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c, int *ldc) nogil:
+    _fortran_dsyrk(uplo, trans, n, k, alpha, a, lda, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtbmv "F_FUNC(dtbmv,DTBMV)"(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil
+cdef void dtbmv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil:
+    _fortran_dtbmv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtbsv "F_FUNC(dtbsv,DTBSV)"(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil
+cdef void dtbsv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil:
+    _fortran_dtbsv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtpmv "F_FUNC(dtpmv,DTPMV)"(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil
+cdef void dtpmv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil:
+    _fortran_dtpmv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtpsv "F_FUNC(dtpsv,DTPSV)"(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil
+cdef void dtpsv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil:
+    _fortran_dtpsv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtrmm "F_FUNC(dtrmm,DTRMM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil
+cdef void dtrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil:
+    _fortran_dtrmm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtrmv "F_FUNC(dtrmv,DTRMV)"(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil
+cdef void dtrmv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil:
+    _fortran_dtrmv(uplo, trans, diag, n, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtrsm "F_FUNC(dtrsm,DTRSM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil
+cdef void dtrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil:
+    _fortran_dtrsm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_dtrsv "F_FUNC(dtrsv,DTRSV)"(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil
+cdef void dtrsv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil:
+    _fortran_dtrsv(uplo, trans, diag, n, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_saxpy "F_FUNC(saxpy,SAXPY)"(int *n, s *sa, s *sx, int *incx, s *sy, int *incy) nogil
+cdef void saxpy(int *n, s *sa, s *sx, int *incx, s *sy, int *incy) nogil:
+    _fortran_saxpy(n, sa, sx, incx, sy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_scopy "F_FUNC(scopy,SCOPY)"(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+cdef void scopy(int *n, s *sx, int *incx, s *sy, int *incy) nogil:
+    _fortran_scopy(n, sx, incx, sy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sgbmv "F_FUNC(sgbmv,SGBMV)"(char *trans, int *m, int *n, int *kl, int *ku, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+cdef void sgbmv(char *trans, int *m, int *n, int *kl, int *ku, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil:
+    _fortran_sgbmv(trans, m, n, kl, ku, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sgemm "F_FUNC(sgemm,SGEMM)"(char *transa, char *transb, int *m, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+cdef void sgemm(char *transa, char *transb, int *m, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil:
+    _fortran_sgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sgemv "F_FUNC(sgemv,SGEMV)"(char *trans, int *m, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+cdef void sgemv(char *trans, int *m, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil:
+    _fortran_sgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sger "F_FUNC(sger,SGER)"(int *m, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil
+cdef void sger(int *m, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil:
+    _fortran_sger(m, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_srot "F_FUNC(srot,SROT)"(int *n, s *sx, int *incx, s *sy, int *incy, s *c, s *s) nogil
+cdef void srot(int *n, s *sx, int *incx, s *sy, int *incy, s *c, s *s) nogil:
+    _fortran_srot(n, sx, incx, sy, incy, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_srotg "F_FUNC(srotg,SROTG)"(s *sa, s *sb, s *c, s *s) nogil
+cdef void srotg(s *sa, s *sb, s *c, s *s) nogil:
+    _fortran_srotg(sa, sb, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_srotm "F_FUNC(srotm,SROTM)"(int *n, s *sx, int *incx, s *sy, int *incy, s *sparam) nogil
+cdef void srotm(int *n, s *sx, int *incx, s *sy, int *incy, s *sparam) nogil:
+    _fortran_srotm(n, sx, incx, sy, incy, sparam)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_srotmg "F_FUNC(srotmg,SROTMG)"(s *sd1, s *sd2, s *sx1, s *sy1, s *sparam) nogil
+cdef void srotmg(s *sd1, s *sd2, s *sx1, s *sy1, s *sparam) nogil:
+    _fortran_srotmg(sd1, sd2, sx1, sy1, sparam)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ssbmv "F_FUNC(ssbmv,SSBMV)"(char *uplo, int *n, int *k, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+cdef void ssbmv(char *uplo, int *n, int *k, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil:
+    _fortran_ssbmv(uplo, n, k, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sscal "F_FUNC(sscal,SSCAL)"(int *n, s *sa, s *sx, int *incx) nogil
+cdef void sscal(int *n, s *sa, s *sx, int *incx) nogil:
+    _fortran_sscal(n, sa, sx, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sspmv "F_FUNC(sspmv,SSPMV)"(char *uplo, int *n, s *alpha, s *ap, s *x, int *incx, s *beta, s *y, int *incy) nogil
+cdef void sspmv(char *uplo, int *n, s *alpha, s *ap, s *x, int *incx, s *beta, s *y, int *incy) nogil:
+    _fortran_sspmv(uplo, n, alpha, ap, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sspr "F_FUNC(sspr,SSPR)"(char *uplo, int *n, s *alpha, s *x, int *incx, s *ap) nogil
+cdef void sspr(char *uplo, int *n, s *alpha, s *x, int *incx, s *ap) nogil:
+    _fortran_sspr(uplo, n, alpha, x, incx, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sspr2 "F_FUNC(sspr2,SSPR2)"(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *ap) nogil
+cdef void sspr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *ap) nogil:
+    _fortran_sspr2(uplo, n, alpha, x, incx, y, incy, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_sswap "F_FUNC(sswap,SSWAP)"(int *n, s *sx, int *incx, s *sy, int *incy) nogil
+cdef void sswap(int *n, s *sx, int *incx, s *sy, int *incy) nogil:
+    _fortran_sswap(n, sx, incx, sy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ssymm "F_FUNC(ssymm,SSYMM)"(char *side, char *uplo, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+cdef void ssymm(char *side, char *uplo, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil:
+    _fortran_ssymm(side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ssymv "F_FUNC(ssymv,SSYMV)"(char *uplo, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil
+cdef void ssymv(char *uplo, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil:
+    _fortran_ssymv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ssyr "F_FUNC(ssyr,SSYR)"(char *uplo, int *n, s *alpha, s *x, int *incx, s *a, int *lda) nogil
+cdef void ssyr(char *uplo, int *n, s *alpha, s *x, int *incx, s *a, int *lda) nogil:
+    _fortran_ssyr(uplo, n, alpha, x, incx, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ssyr2 "F_FUNC(ssyr2,SSYR2)"(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil
+cdef void ssyr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil:
+    _fortran_ssyr2(uplo, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ssyr2k "F_FUNC(ssyr2k,SSYR2K)"(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil
+cdef void ssyr2k(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil:
+    _fortran_ssyr2k(uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ssyrk "F_FUNC(ssyrk,SSYRK)"(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c, int *ldc) nogil
+cdef void ssyrk(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c, int *ldc) nogil:
+    _fortran_ssyrk(uplo, trans, n, k, alpha, a, lda, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_stbmv "F_FUNC(stbmv,STBMV)"(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil
+cdef void stbmv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil:
+    _fortran_stbmv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_stbsv "F_FUNC(stbsv,STBSV)"(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil
+cdef void stbsv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil:
+    _fortran_stbsv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_stpmv "F_FUNC(stpmv,STPMV)"(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil
+cdef void stpmv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil:
+    _fortran_stpmv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_stpsv "F_FUNC(stpsv,STPSV)"(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil
+cdef void stpsv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil:
+    _fortran_stpsv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_strmm "F_FUNC(strmm,STRMM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil
+cdef void strmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil:
+    _fortran_strmm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_strmv "F_FUNC(strmv,STRMV)"(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil
+cdef void strmv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil:
+    _fortran_strmv(uplo, trans, diag, n, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_strsm "F_FUNC(strsm,STRSM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil
+cdef void strsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil:
+    _fortran_strsm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_strsv "F_FUNC(strsv,STRSV)"(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil
+cdef void strsv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil:
+    _fortran_strsv(uplo, trans, diag, n, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zaxpy "F_FUNC(zaxpy,ZAXPY)"(int *n, npy_complex128 *za, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy) nogil
+cdef void zaxpy(int *n, z *za, z *zx, int *incx, z *zy, int *incy) nogil:
+    _fortran_zaxpy(n, za, zx, incx, zy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zcopy "F_FUNC(zcopy,ZCOPY)"(int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy) nogil
+cdef void zcopy(int *n, z *zx, int *incx, z *zy, int *incy) nogil:
+    _fortran_zcopy(n, zx, incx, zy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zdrot "F_FUNC(zdrot,ZDROT)"(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, d *c, d *s) nogil
+cdef void zdrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, d *s) nogil:
+    _fortran_zdrot(n, cx, incx, cy, incy, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zdscal "F_FUNC(zdscal,ZDSCAL)"(int *n, d *da, npy_complex128 *zx, int *incx) nogil
+cdef void zdscal(int *n, d *da, z *zx, int *incx) nogil:
+    _fortran_zdscal(n, da, zx, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zgbmv "F_FUNC(zgbmv,ZGBMV)"(char *trans, int *m, int *n, int *kl, int *ku, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy) nogil
+cdef void zgbmv(char *trans, int *m, int *n, int *kl, int *ku, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil:
+    _fortran_zgbmv(trans, m, n, kl, ku, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zgemm "F_FUNC(zgemm,ZGEMM)"(char *transa, char *transb, int *m, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc) nogil
+cdef void zgemm(char *transa, char *transb, int *m, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil:
+    _fortran_zgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zgemv "F_FUNC(zgemv,ZGEMV)"(char *trans, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy) nogil
+cdef void zgemv(char *trans, int *m, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil:
+    _fortran_zgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zgerc "F_FUNC(zgerc,ZGERC)"(int *m, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda) nogil
+cdef void zgerc(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil:
+    _fortran_zgerc(m, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zgeru "F_FUNC(zgeru,ZGERU)"(int *m, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda) nogil
+cdef void zgeru(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil:
+    _fortran_zgeru(m, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zhbmv "F_FUNC(zhbmv,ZHBMV)"(char *uplo, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy) nogil
+cdef void zhbmv(char *uplo, int *n, int *k, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil:
+    _fortran_zhbmv(uplo, n, k, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zhemm "F_FUNC(zhemm,ZHEMM)"(char *side, char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc) nogil
+cdef void zhemm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil:
+    _fortran_zhemm(side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zhemv "F_FUNC(zhemv,ZHEMV)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy) nogil
+cdef void zhemv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil:
+    _fortran_zhemv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zher "F_FUNC(zher,ZHER)"(char *uplo, int *n, d *alpha, npy_complex128 *x, int *incx, npy_complex128 *a, int *lda) nogil
+cdef void zher(char *uplo, int *n, d *alpha, z *x, int *incx, z *a, int *lda) nogil:
+    _fortran_zher(uplo, n, alpha, x, incx, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zher2 "F_FUNC(zher2,ZHER2)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda) nogil
+cdef void zher2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil:
+    _fortran_zher2(uplo, n, alpha, x, incx, y, incy, a, lda)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zher2k "F_FUNC(zher2k,ZHER2K)"(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, d *beta, npy_complex128 *c, int *ldc) nogil
+cdef void zher2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, d *beta, z *c, int *ldc) nogil:
+    _fortran_zher2k(uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zherk "F_FUNC(zherk,ZHERK)"(char *uplo, char *trans, int *n, int *k, d *alpha, npy_complex128 *a, int *lda, d *beta, npy_complex128 *c, int *ldc) nogil
+cdef void zherk(char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c, int *ldc) nogil:
+    _fortran_zherk(uplo, trans, n, k, alpha, a, lda, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zhpmv "F_FUNC(zhpmv,ZHPMV)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *ap, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy) nogil
+cdef void zhpmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil:
+    _fortran_zhpmv(uplo, n, alpha, ap, x, incx, beta, y, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zhpr "F_FUNC(zhpr,ZHPR)"(char *uplo, int *n, d *alpha, npy_complex128 *x, int *incx, npy_complex128 *ap) nogil
+cdef void zhpr(char *uplo, int *n, d *alpha, z *x, int *incx, z *ap) nogil:
+    _fortran_zhpr(uplo, n, alpha, x, incx, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zhpr2 "F_FUNC(zhpr2,ZHPR2)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *ap) nogil
+cdef void zhpr2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *ap) nogil:
+    _fortran_zhpr2(uplo, n, alpha, x, incx, y, incy, ap)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zrotg "F_FUNC(zrotg,ZROTG)"(npy_complex128 *ca, npy_complex128 *cb, d *c, npy_complex128 *s) nogil
+cdef void zrotg(z *ca, z *cb, d *c, z *s) nogil:
+    _fortran_zrotg(ca, cb, c, s)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zscal "F_FUNC(zscal,ZSCAL)"(int *n, npy_complex128 *za, npy_complex128 *zx, int *incx) nogil
+cdef void zscal(int *n, z *za, z *zx, int *incx) nogil:
+    _fortran_zscal(n, za, zx, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zswap "F_FUNC(zswap,ZSWAP)"(int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy) nogil
+cdef void zswap(int *n, z *zx, int *incx, z *zy, int *incy) nogil:
+    _fortran_zswap(n, zx, incx, zy, incy)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zsymm "F_FUNC(zsymm,ZSYMM)"(char *side, char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc) nogil
+cdef void zsymm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil:
+    _fortran_zsymm(side, uplo, m, n, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zsyr2k "F_FUNC(zsyr2k,ZSYR2K)"(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc) nogil
+cdef void zsyr2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil:
+    _fortran_zsyr2k(uplo, trans, n, k, alpha, a, lda, b, ldb, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_zsyrk "F_FUNC(zsyrk,ZSYRK)"(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *beta, npy_complex128 *c, int *ldc) nogil
+cdef void zsyrk(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *beta, z *c, int *ldc) nogil:
+    _fortran_zsyrk(uplo, trans, n, k, alpha, a, lda, beta, c, ldc)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztbmv "F_FUNC(ztbmv,ZTBMV)"(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx) nogil
+cdef void ztbmv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil:
+    _fortran_ztbmv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztbsv "F_FUNC(ztbsv,ZTBSV)"(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx) nogil
+cdef void ztbsv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil:
+    _fortran_ztbsv(uplo, trans, diag, n, k, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztpmv "F_FUNC(ztpmv,ZTPMV)"(char *uplo, char *trans, char *diag, int *n, npy_complex128 *ap, npy_complex128 *x, int *incx) nogil
+cdef void ztpmv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil:
+    _fortran_ztpmv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztpsv "F_FUNC(ztpsv,ZTPSV)"(char *uplo, char *trans, char *diag, int *n, npy_complex128 *ap, npy_complex128 *x, int *incx) nogil
+cdef void ztpsv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil:
+    _fortran_ztpsv(uplo, trans, diag, n, ap, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztrmm "F_FUNC(ztrmm,ZTRMM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb) nogil
+cdef void ztrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil:
+    _fortran_ztrmm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztrmv "F_FUNC(ztrmv,ZTRMV)"(char *uplo, char *trans, char *diag, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx) nogil
+cdef void ztrmv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil:
+    _fortran_ztrmv(uplo, trans, diag, n, a, lda, x, incx)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztrsm "F_FUNC(ztrsm,ZTRSM)"(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb) nogil
+cdef void ztrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil:
+    _fortran_ztrsm(side, uplo, transa, diag, m, n, alpha, a, lda, b, ldb)
+
+cdef extern from "_blas_subroutines.h":
+    void _fortran_ztrsv "F_FUNC(ztrsv,ZTRSV)"(char *uplo, char *trans, char *diag, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx) nogil
+cdef void ztrsv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil:
+    _fortran_ztrsv(uplo, trans, diag, n, a, lda, x, incx)
+
+
+# Python-accessible wrappers for testing:
+
+cdef inline bint _is_contiguous(double[:,:] a, int axis) nogil:
+    return (a.strides[axis] == sizeof(a[0,0]) or a.shape[axis] == 1)
+
+cpdef float complex _test_cdotc(float complex[:] cx, float complex[:] cy) nogil:
+    cdef:
+        int n = cx.shape[0]
+        int incx = cx.strides[0] // sizeof(cx[0])
+        int incy = cy.strides[0] // sizeof(cy[0])
+    return cdotc(&n, &cx[0], &incx, &cy[0], &incy)
+
+cpdef float complex _test_cdotu(float complex[:] cx, float complex[:] cy) nogil:
+    cdef:
+        int n = cx.shape[0]
+        int incx = cx.strides[0] // sizeof(cx[0])
+        int incy = cy.strides[0] // sizeof(cy[0])
+    return cdotu(&n, &cx[0], &incx, &cy[0], &incy)
+
+cpdef double _test_dasum(double[:] dx) nogil:
+    cdef:
+        int n = dx.shape[0]
+        int incx = dx.strides[0] // sizeof(dx[0])
+    return dasum(&n, &dx[0], &incx)
+
+cpdef double _test_ddot(double[:] dx, double[:] dy) nogil:
+    cdef:
+        int n = dx.shape[0]
+        int incx = dx.strides[0] // sizeof(dx[0])
+        int incy = dy.strides[0] // sizeof(dy[0])
+    return ddot(&n, &dx[0], &incx, &dy[0], &incy)
+
+cpdef int _test_dgemm(double alpha, double[:,:] a, double[:,:] b, double beta,
+                double[:,:] c) nogil except -1:
+    cdef:
+        char *transa
+        char *transb
+        int m, n, k, lda, ldb, ldc
+        double *a0=&a[0,0]
+        double *b0=&b[0,0]
+        double *c0=&c[0,0]
+    # In the case that c is C contiguous, swap a and b and
+    # swap whether or not each of them is transposed.
+    # This can be done because a.dot(b) = b.T.dot(a.T).T.
+    if _is_contiguous(c, 1):
+        if _is_contiguous(a, 1):
+            transb = 'n'
+            ldb = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
+        elif _is_contiguous(a, 0):
+            transb = 't'
+            ldb = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
+        else:
+            with gil:
+                raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
+        if _is_contiguous(b, 1):
+            transa = 'n'
+            lda = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
+        elif _is_contiguous(b, 0):
+            transa = 't'
+            lda = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
+        else:
+            with gil:
+                raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
+        k = b.shape[0]
+        if k != a.shape[1]:
+            with gil:
+                raise ValueError("Shape mismatch in input arrays.")
+        m = b.shape[1]
+        n = a.shape[0]
+        if n != c.shape[0] or m != c.shape[1]:
+            with gil:
+                raise ValueError("Output array does not have the correct shape.")
+        ldc = (&c[1,0]) - c0 if c.shape[0] > 1 else 1
+        dgemm(transa, transb, &m, &n, &k, &alpha, b0, &lda, a0,
+                   &ldb, &beta, c0, &ldc)
+    elif _is_contiguous(c, 0):
+        if _is_contiguous(a, 1):
+            transa = 't'
+            lda = (&a[1,0]) - a0 if a.shape[0] > 1 else 1
+        elif _is_contiguous(a, 0):
+            transa = 'n'
+            lda = (&a[0,1]) - a0 if a.shape[1] > 1 else 1
+        else:
+            with gil:
+                raise ValueError("Input 'a' is neither C nor Fortran contiguous.")
+        if _is_contiguous(b, 1):
+            transb = 't'
+            ldb = (&b[1,0]) - b0 if b.shape[0] > 1 else 1
+        elif _is_contiguous(b, 0):
+            transb = 'n'
+            ldb = (&b[0,1]) - b0 if b.shape[1] > 1 else 1
+        else:
+            with gil:
+                raise ValueError("Input 'b' is neither C nor Fortran contiguous.")
+        m = a.shape[0]
+        k = a.shape[1]
+        if k != b.shape[0]:
+            with gil:
+                raise ValueError("Shape mismatch in input arrays.")
+        n = b.shape[1]
+        if m != c.shape[0] or n != c.shape[1]:
+            with gil:
+                raise ValueError("Output array does not have the correct shape.")
+        ldc = (&c[0,1]) - c0 if c.shape[1] > 1 else 1
+        dgemm(transa, transb, &m, &n, &k, &alpha, a0, &lda, b0,
+                   &ldb, &beta, c0, &ldc)
+    else:
+        with gil:
+            raise ValueError("Input 'c' is neither C nor Fortran contiguous.")
+    return 0
+
+cpdef double _test_dnrm2(double[:] x) nogil:
+    cdef:
+        int n = x.shape[0]
+        int incx = x.strides[0] // sizeof(x[0])
+    return dnrm2(&n, &x[0], &incx)
+
+cpdef double _test_dzasum(double complex[:] zx) nogil:
+    cdef:
+        int n = zx.shape[0]
+        int incx = zx.strides[0] // sizeof(zx[0])
+    return dzasum(&n, &zx[0], &incx)
+
+cpdef double _test_dznrm2(double complex[:] x) nogil:
+    cdef:
+        int n = x.shape[0]
+        int incx = x.strides[0] // sizeof(x[0])
+    return dznrm2(&n, &x[0], &incx)
+
+cpdef int _test_icamax(float complex[:] cx) nogil:
+    cdef:
+        int n = cx.shape[0]
+        int incx = cx.strides[0] // sizeof(cx[0])
+    return icamax(&n, &cx[0], &incx)
+
+cpdef int _test_idamax(double[:] dx) nogil:
+    cdef:
+        int n = dx.shape[0]
+        int incx = dx.strides[0] // sizeof(dx[0])
+    return idamax(&n, &dx[0], &incx)
+
+cpdef int _test_isamax(float[:] sx) nogil:
+    cdef:
+        int n = sx.shape[0]
+        int incx = sx.strides[0] // sizeof(sx[0])
+    return isamax(&n, &sx[0], &incx)
+
+cpdef int _test_izamax(double complex[:] zx) nogil:
+    cdef:
+        int n = zx.shape[0]
+        int incx = zx.strides[0] // sizeof(zx[0])
+    return izamax(&n, &zx[0], &incx)
+
+cpdef float _test_sasum(float[:] sx) nogil:
+    cdef:
+        int n = sx.shape[0]
+        int incx = sx.shape[0] // sizeof(sx[0])
+    return sasum(&n, &sx[0], &incx)
+
+cpdef float _test_scasum(float complex[:] cx) nogil:
+    cdef:
+        int n = cx.shape[0]
+        int incx = cx.strides[0] // sizeof(cx[0])
+    return scasum(&n, &cx[0], &incx)
+
+cpdef float _test_scnrm2(float complex[:] x) nogil:
+    cdef:
+        int n = x.shape[0]
+        int incx = x.strides[0] // sizeof(x[0])
+    return scnrm2(&n, &x[0], &incx)
+
+cpdef float _test_sdot(float[:] sx, float[:] sy) nogil:
+    cdef:
+        int n = sx.shape[0]
+        int incx = sx.strides[0] // sizeof(sx[0])
+        int incy = sy.strides[0] // sizeof(sy[0])
+    return sdot(&n, &sx[0], &incx, &sy[0], &incy)
+
+cpdef float _test_snrm2(float[:] x) nogil:
+    cdef:
+        int n = x.shape[0]
+        int incx = x.shape[0] // sizeof(x[0])
+    return snrm2(&n, &x[0], &incx)
+
+cpdef double complex _test_zdotc(double complex[:] zx, double complex[:] zy) nogil:
+    cdef:
+        int n = zx.shape[0]
+        int incx = zx.strides[0] // sizeof(zx[0])
+        int incy = zy.strides[0] // sizeof(zy[0])
+    return zdotc(&n, &zx[0], &incx, &zy[0], &incy)
+
+cpdef double complex _test_zdotu(double complex[:] zx, double complex[:] zy) nogil:
+    cdef:
+        int n = zx.shape[0]
+        int incx = zx.strides[0] // sizeof(zx[0])
+        int incy = zy.strides[0] // sizeof(zy[0])
+    return zdotu(&n, &zx[0], &incx, &zy[0], &incy)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pxd b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pxd
new file mode 100644
index 00000000..7c36189d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pxd
@@ -0,0 +1,3021 @@
+# This file was generated by _generate_pyx.py.
+# Do not edit this file directly.
+
+# Within SciPy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_lapack
+# from scipy.linalg cimport cython_lapack
+# cimport scipy.linalg.cython_lapack as cython_lapack
+# cimport ..linalg.cython_lapack as cython_lapack
+
+# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+ctypedef float s
+ctypedef double d
+ctypedef float complex c
+ctypedef double complex z
+
+# Function pointer type declarations for
+# gees and gges families of functions.
+ctypedef bint cselect1(c*)
+ctypedef bint cselect2(c*, c*)
+ctypedef bint dselect2(d*, d*)
+ctypedef bint dselect3(d*, d*, d*)
+ctypedef bint sselect2(s*, s*)
+ctypedef bint sselect3(s*, s*, s*)
+ctypedef bint zselect1(z*)
+ctypedef bint zselect2(z*, z*)
+
+cdef void cbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *rwork, int *lrwork, int *info) nogil
+
+cdef void cbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, c *vt, int *ldvt, c *u, int *ldu, c *c, int *ldc, s *rwork, int *info) nogil
+
+cdef void cgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *pt, int *ldpt, c *c, int *ldc, c *work, s *rwork, int *info) nogil
+
+cdef void cgbcon(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cgbequ(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgbequb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgbsv(int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgbtf2(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void cgbtrf(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void cgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, c *v, int *ldv, int *info) nogil
+
+cdef void cgebal(char *job, int *n, c *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil
+
+cdef void cgebd2(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *info) nogil
+
+cdef void cgebrd(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *lwork, int *info) nogil
+
+cdef void cgecon(char *norm, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cgeequ(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgeequb(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+
+cdef void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+
+cdef void cgeev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgehd2(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgehrd(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgelq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgelqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgels(char *trans, int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *work, int *lwork, int *info) nogil
+
+cdef void cgelsd(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil
+
+cdef void cgelss(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgelsy(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *jpvt, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cgeql2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgeqlf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgeqp3(int *m, int *n, c *a, int *lda, int *jpvt, c *tau, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgeqr2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgeqr2p(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgeqrf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgeqrfp(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgeqrt(int *m, int *n, int *nb, c *a, int *lda, c *t, int *ldt, c *work, int *info) nogil
+
+cdef void cgeqrt2(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil
+
+cdef void cgeqrt3(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil
+
+cdef void cgerfs(char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgerq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cgerqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cgesc2(int *n, c *a, int *lda, c *rhs, int *ipiv, int *jpiv, s *scale) nogil
+
+cdef void cgesdd(char *jobz, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil
+
+cdef void cgesv(int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgesvd(char *jobu, char *jobvt, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cgesvx(char *fact, char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgetc2(int *n, c *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void cgetf2(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void cgetrf(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void cgetri(int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void cgetrs(char *trans, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, c *v, int *ldv, int *info) nogil
+
+cdef void cggbal(char *job, int *n, c *a, int *lda, c *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil
+
+cdef void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+
+cdef void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void cggev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void cggglm(int *n, int *m, int *p, c *a, int *lda, c *b, int *ldb, c *d, c *x, c *y, c *work, int *lwork, int *info) nogil
+
+cdef void cgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *info) nogil
+
+cdef void cgglse(int *m, int *n, int *p, c *a, int *lda, c *b, int *ldb, c *c, c *d, c *x, c *work, int *lwork, int *info) nogil
+
+cdef void cggqrf(int *n, int *m, int *p, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil
+
+cdef void cggrqf(int *m, int *p, int *n, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil
+
+cdef void cgtcon(char *norm, int *n, c *dl, c *d, c *du, c *du2, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void cgtrfs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgtsv(int *n, int *nrhs, c *dl, c *d, c *du, c *b, int *ldb, int *info) nogil
+
+cdef void cgtsvx(char *fact, char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cgttrf(int *n, c *dl, c *d, c *du, c *du2, int *ipiv, int *info) nogil
+
+cdef void cgttrs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cgtts2(int *itrans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb) nogil
+
+cdef void chbev(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chbevd(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chbevx(char *jobz, char *range, char *uplo, int *n, int *kd, c *ab, int *ldab, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chbgst(char *vect, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *x, int *ldx, c *work, s *rwork, int *info) nogil
+
+cdef void chbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chbtrd(char *vect, char *uplo, int *n, int *kd, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *work, int *info) nogil
+
+cdef void checon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void cheequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil
+
+cdef void cheev(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cheevd(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cheevr(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cheevx(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chegs2(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void chegst(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void chegv(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void chegvd(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chegvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void cherfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void chesv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil
+
+cdef void chesvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void cheswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil
+
+cdef void chetd2(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, int *info) nogil
+
+cdef void chetf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void chetrd(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void chetrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void chetri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil
+
+cdef void chetri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void chetri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil
+
+cdef void chetrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void chetrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil
+
+cdef void chfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c) nogil
+
+cdef void chgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *t, int *ldt, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef char chla_transtype(int *trans) nogil
+
+cdef void chpcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void chpev(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chpevd(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chpevx(char *jobz, char *range, char *uplo, int *n, c *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chpgst(int *itype, char *uplo, int *n, c *ap, c *bp, int *info) nogil
+
+cdef void chpgv(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil
+
+cdef void chpgvd(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void chpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *ap, c *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void chprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void chpsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void chpsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void chptrd(char *uplo, int *n, c *ap, s *d, s *e, c *tau, int *info) nogil
+
+cdef void chptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil
+
+cdef void chptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil
+
+cdef void chptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void chsein(char *side, char *eigsrc, char *initv, bint *select, int *n, c *h, int *ldh, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void chseqr(char *job, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, c *z, int *ldz, c *work, int *lwork, int *info) nogil
+
+cdef void clabrd(int *m, int *n, int *nb, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *x, int *ldx, c *y, int *ldy) nogil
+
+cdef void clacgv(int *n, c *x, int *incx) nogil
+
+cdef void clacn2(int *n, c *v, c *x, s *est, int *kase, int *isave) nogil
+
+cdef void clacon(int *n, c *v, c *x, s *est, int *kase) nogil
+
+cdef void clacp2(char *uplo, int *m, int *n, s *a, int *lda, c *b, int *ldb) nogil
+
+cdef void clacpy(char *uplo, int *m, int *n, c *a, int *lda, c *b, int *ldb) nogil
+
+cdef void clacrm(int *m, int *n, c *a, int *lda, s *b, int *ldb, c *c, int *ldc, s *rwork) nogil
+
+cdef void clacrt(int *n, c *cx, int *incx, c *cy, int *incy, c *c, c *s) nogil
+
+cdef c cladiv(c *x, c *y) nogil
+
+cdef void claed0(int *qsiz, int *n, s *d, s *e, c *q, int *ldq, c *qstore, int *ldqs, s *rwork, int *iwork, int *info) nogil
+
+cdef void claed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, c *q, int *ldq, s *rho, int *indxq, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, c *work, s *rwork, int *iwork, int *info) nogil
+
+cdef void claed8(int *k, int *n, int *qsiz, c *q, int *ldq, s *d, s *rho, int *cutpnt, s *z, s *dlamda, c *q2, int *ldq2, s *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, s *givnum, int *info) nogil
+
+cdef void claein(bint *rightv, bint *noinit, int *n, c *h, int *ldh, c *w, c *v, c *b, int *ldb, s *rwork, s *eps3, s *smlnum, int *info) nogil
+
+cdef void claesy(c *a, c *b, c *c, c *rt1, c *rt2, c *evscal, c *cs1, c *sn1) nogil
+
+cdef void claev2(c *a, c *b, c *c, s *rt1, s *rt2, s *cs1, c *sn1) nogil
+
+cdef void clag2z(int *m, int *n, c *sa, int *ldsa, z *a, int *lda, int *info) nogil
+
+cdef void clags2(bint *upper, s *a1, c *a2, s *a3, s *b1, c *b2, s *b3, s *csu, c *snu, s *csv, c *snv, s *csq, c *snq) nogil
+
+cdef void clagtm(char *trans, int *n, int *nrhs, s *alpha, c *dl, c *d, c *du, c *x, int *ldx, s *beta, c *b, int *ldb) nogil
+
+cdef void clahef(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil
+
+cdef void clahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, int *info) nogil
+
+cdef void clahr2(int *n, int *k, int *nb, c *a, int *lda, c *tau, c *t, int *ldt, c *y, int *ldy) nogil
+
+cdef void claic1(int *job, int *j, c *x, s *sest, c *w, c *gamma, s *sestpr, c *s, c *c) nogil
+
+cdef void clals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *rwork, int *info) nogil
+
+cdef void clalsa(int *icompq, int *smlsiz, int *n, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *rwork, int *iwork, int *info) nogil
+
+cdef void clalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, c *b, int *ldb, s *rcond, int *rank, c *work, s *rwork, int *iwork, int *info) nogil
+
+cdef s clangb(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, s *work) nogil
+
+cdef s clange(char *norm, int *m, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clangt(char *norm, int *n, c *dl, c *d, c *du) nogil
+
+cdef s clanhb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil
+
+cdef s clanhe(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clanhf(char *norm, char *transr, char *uplo, int *n, c *a, s *work) nogil
+
+cdef s clanhp(char *norm, char *uplo, int *n, c *ap, s *work) nogil
+
+cdef s clanhs(char *norm, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clanht(char *norm, int *n, s *d, c *e) nogil
+
+cdef s clansb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil
+
+cdef s clansp(char *norm, char *uplo, int *n, c *ap, s *work) nogil
+
+cdef s clansy(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil
+
+cdef s clantb(char *norm, char *uplo, char *diag, int *n, int *k, c *ab, int *ldab, s *work) nogil
+
+cdef s clantp(char *norm, char *uplo, char *diag, int *n, c *ap, s *work) nogil
+
+cdef s clantr(char *norm, char *uplo, char *diag, int *m, int *n, c *a, int *lda, s *work) nogil
+
+cdef void clapll(int *n, c *x, int *incx, c *y, int *incy, s *ssmin) nogil
+
+cdef void clapmr(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil
+
+cdef void clapmt(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil
+
+cdef void claqgb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void claqge(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void claqhb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqhe(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqhp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqp2(int *m, int *n, int *offset, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *work) nogil
+
+cdef void claqps(int *m, int *n, int *offset, int *nb, int *kb, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *auxv, c *f, int *ldf) nogil
+
+cdef void claqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil
+
+cdef void claqr1(int *n, c *h, int *ldh, c *s1, c *s2, c *v) nogil
+
+cdef void claqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil
+
+cdef void claqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil
+
+cdef void claqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil
+
+cdef void claqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, c *s, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, c *v, int *ldv, c *u, int *ldu, int *nv, c *wv, int *ldwv, int *nh, c *wh, int *ldwh) nogil
+
+cdef void claqsb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqsp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void claqsy(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void clar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, c *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil
+
+cdef void clar2v(int *n, c *x, c *y, c *z, int *incx, s *c, c *s, int *incc) nogil
+
+cdef void clarcm(int *m, int *n, s *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *rwork) nogil
+
+cdef void clarf(char *side, int *m, int *n, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil
+
+cdef void clarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil
+
+cdef void clarfg(int *n, c *alpha, c *x, int *incx, c *tau) nogil
+
+cdef void clarfgp(int *n, c *alpha, c *x, int *incx, c *tau) nogil
+
+cdef void clarft(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil
+
+cdef void clarfx(char *side, int *m, int *n, c *v, c *tau, c *c, int *ldc, c *work) nogil
+
+cdef void clargv(int *n, c *x, int *incx, c *y, int *incy, s *c, int *incc) nogil
+
+cdef void clarnv(int *idist, int *iseed, int *n, c *x) nogil
+
+cdef void clarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, c *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil
+
+cdef void clartg(c *f, c *g, s *cs, c *sn, c *r) nogil
+
+cdef void clartv(int *n, c *x, int *incx, c *y, int *incy, s *c, c *s, int *incc) nogil
+
+cdef void clarz(char *side, int *m, int *n, int *l, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil
+
+cdef void clarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil
+
+cdef void clarzt(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil
+
+cdef void clascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, c *a, int *lda, int *info) nogil
+
+cdef void claset(char *uplo, int *m, int *n, c *alpha, c *beta, c *a, int *lda) nogil
+
+cdef void clasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, c *a, int *lda) nogil
+
+cdef void classq(int *n, c *x, int *incx, s *scale, s *sumsq) nogil
+
+cdef void claswp(int *n, c *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void clasyf(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil
+
+cdef void clatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, c *ab, int *ldab, c *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void clatdf(int *ijob, int *n, c *z, int *ldz, c *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void clatps(char *uplo, char *trans, char *diag, char *normin, int *n, c *ap, c *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void clatrd(char *uplo, int *n, int *nb, c *a, int *lda, s *e, c *tau, c *w, int *ldw) nogil
+
+cdef void clatrs(char *uplo, char *trans, char *diag, char *normin, int *n, c *a, int *lda, c *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void clatrz(int *m, int *n, int *l, c *a, int *lda, c *tau, c *work) nogil
+
+cdef void clauu2(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void clauum(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpbcon(char *uplo, int *n, int *kd, c *ab, int *ldab, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cpbequ(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cpbrfs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpbstf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil
+
+cdef void cpbsv(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil
+
+cdef void cpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpbtf2(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil
+
+cdef void cpbtrf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil
+
+cdef void cpbtrs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil
+
+cdef void cpftrf(char *transr, char *uplo, int *n, c *a, int *info) nogil
+
+cdef void cpftri(char *transr, char *uplo, int *n, c *a, int *info) nogil
+
+cdef void cpftrs(char *transr, char *uplo, int *n, int *nrhs, c *a, c *b, int *ldb, int *info) nogil
+
+cdef void cpocon(char *uplo, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cpoequ(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cpoequb(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cporfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cposv(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void cposvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpotf2(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpotrf(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpotri(char *uplo, int *n, c *a, int *lda, int *info) nogil
+
+cdef void cpotrs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void cppcon(char *uplo, int *n, c *ap, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void cppequ(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void cpprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cppsv(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil
+
+cdef void cppsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpptrf(char *uplo, int *n, c *ap, int *info) nogil
+
+cdef void cpptri(char *uplo, int *n, c *ap, int *info) nogil
+
+cdef void cpptrs(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil
+
+cdef void cpstf2(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void cpstrf(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void cptcon(int *n, s *d, c *e, s *anorm, s *rcond, s *rwork, int *info) nogil
+
+cdef void cpteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil
+
+cdef void cptrfs(char *uplo, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cptsv(int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil
+
+cdef void cptsvx(char *fact, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cpttrf(int *n, s *d, c *e, int *info) nogil
+
+cdef void cpttrs(char *uplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil
+
+cdef void cptts2(int *iuplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb) nogil
+
+cdef void crot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, c *s) nogil
+
+cdef void cspcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void cspmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void cspr(char *uplo, int *n, c *alpha, c *x, int *incx, c *ap) nogil
+
+cdef void csprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void cspsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void cspsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void csptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil
+
+cdef void csptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil
+
+cdef void csptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void csrscl(int *n, s *sa, c *sx, int *incx) nogil
+
+cdef void cstedc(char *compz, int *n, s *d, s *e, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void cstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, c *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void cstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, c *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void csteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil
+
+cdef void csycon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil
+
+cdef void csyconv(char *uplo, char *way, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil
+
+cdef void csyequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil
+
+cdef void csymv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil
+
+cdef void csyr(char *uplo, int *n, c *alpha, c *x, int *incx, c *a, int *lda) nogil
+
+cdef void csyrfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void csysv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil
+
+cdef void csysvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil
+
+cdef void csyswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil
+
+cdef void csytf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void csytrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void csytri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil
+
+cdef void csytri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil
+
+cdef void csytri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil
+
+cdef void csytrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil
+
+cdef void csytrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil
+
+cdef void ctbcon(char *norm, char *uplo, char *diag, int *n, int *kd, c *ab, int *ldab, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void ctbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void ctbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil
+
+cdef void ctfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, c *alpha, c *a, c *b, int *ldb) nogil
+
+cdef void ctftri(char *transr, char *uplo, char *diag, int *n, c *a, int *info) nogil
+
+cdef void ctfttp(char *transr, char *uplo, int *n, c *arf, c *ap, int *info) nogil
+
+cdef void ctfttr(char *transr, char *uplo, int *n, c *arf, c *a, int *lda, int *info) nogil
+
+cdef void ctgevc(char *side, char *howmny, bint *select, int *n, c *s, int *lds, c *p, int *ldp, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil
+
+cdef void ctgex2(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *j1, int *info) nogil
+
+cdef void ctgexc(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *ifst, int *ilst, int *info) nogil
+
+cdef void ctgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, int *m, s *pl, s *pr, s *dif, c *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ctgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, c *a, int *lda, c *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, c *u, int *ldu, c *v, int *ldv, c *q, int *ldq, c *work, int *ncycle, int *info) nogil
+
+cdef void ctgsna(char *job, char *howmny, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *dif, int *mm, int *m, c *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ctgsy2(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *info) nogil
+
+cdef void ctgsyl(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *dif, c *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ctpcon(char *norm, char *uplo, char *diag, int *n, c *ap, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void ctpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *info) nogil
+
+cdef void ctpqrt(int *m, int *n, int *l, int *nb, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, c *work, int *info) nogil
+
+cdef void ctpqrt2(int *m, int *n, int *l, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, int *info) nogil
+
+cdef void ctprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *ldwork) nogil
+
+cdef void ctprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void ctptri(char *uplo, char *diag, int *n, c *ap, int *info) nogil
+
+cdef void ctptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil
+
+cdef void ctpttf(char *transr, char *uplo, int *n, c *ap, c *arf, int *info) nogil
+
+cdef void ctpttr(char *uplo, int *n, c *ap, c *a, int *lda, int *info) nogil
+
+cdef void ctrcon(char *norm, char *uplo, char *diag, int *n, c *a, int *lda, s *rcond, c *work, s *rwork, int *info) nogil
+
+cdef void ctrevc(char *side, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil
+
+cdef void ctrexc(char *compq, int *n, c *t, int *ldt, c *q, int *ldq, int *ifst, int *ilst, int *info) nogil
+
+cdef void ctrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil
+
+cdef void ctrsen(char *job, char *compq, bint *select, int *n, c *t, int *ldt, c *q, int *ldq, c *w, int *m, s *s, s *sep, c *work, int *lwork, int *info) nogil
+
+cdef void ctrsna(char *job, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *sep, int *mm, int *m, c *work, int *ldwork, s *rwork, int *info) nogil
+
+cdef void ctrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *scale, int *info) nogil
+
+cdef void ctrti2(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil
+
+cdef void ctrtri(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil
+
+cdef void ctrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil
+
+cdef void ctrttf(char *transr, char *uplo, int *n, c *a, int *lda, c *arf, int *info) nogil
+
+cdef void ctrttp(char *uplo, int *n, c *a, int *lda, c *ap, int *info) nogil
+
+cdef void ctzrzf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cunbdb(char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, s *phi, c *taup1, c *taup2, c *tauq1, c *tauq2, c *work, int *lwork, int *info) nogil
+
+cdef void cuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *info) nogil
+
+cdef void cung2l(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cung2r(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cungbr(char *vect, int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cunghr(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungl2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cunglq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungql(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungqr(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungr2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil
+
+cdef void cungrq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cungtr(char *uplo, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil
+
+cdef void cunm2l(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunm2r(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunml2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmlq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmql(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmqr(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmr2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void cunmrq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cunmtr(char *side, char *uplo, char *trans, int *m, int *n, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil
+
+cdef void cupgtr(char *uplo, int *n, c *ap, c *tau, c *q, int *ldq, c *work, int *info) nogil
+
+cdef void cupmtr(char *side, char *uplo, char *trans, int *m, int *n, c *ap, c *tau, c *c, int *ldc, c *work, int *info) nogil
+
+cdef void dbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *work, int *lwork, int *info) nogil
+
+cdef void dbdsdc(char *uplo, char *compq, int *n, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, d *q, int *iq, d *work, int *iwork, int *info) nogil
+
+cdef void dbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void ddisna(char *job, int *m, int *n, d *d, d *sep, int *info) nogil
+
+cdef void dgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *pt, int *ldpt, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dgbcon(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dgbequ(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgbequb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgbsv(int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgbtf2(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void dgbtrf(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void dgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, d *v, int *ldv, int *info) nogil
+
+cdef void dgebal(char *job, int *n, d *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil
+
+cdef void dgebd2(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *info) nogil
+
+cdef void dgebrd(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *lwork, int *info) nogil
+
+cdef void dgecon(char *norm, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dgeequ(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgeequb(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void dgeev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil
+
+cdef void dgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgehd2(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgehrd(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, d *a, int *lda, d *sva, d *u, int *ldu, d *v, int *ldv, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgelq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgelqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgels(char *trans, int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *work, int *lwork, int *info) nogil
+
+cdef void dgelsd(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgelss(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *info) nogil
+
+cdef void dgelsy(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *jpvt, d *rcond, int *rank, d *work, int *lwork, int *info) nogil
+
+cdef void dgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dgeql2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgeqlf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqp3(int *m, int *n, d *a, int *lda, int *jpvt, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqr2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgeqr2p(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgeqrf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqrfp(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgeqrt(int *m, int *n, int *nb, d *a, int *lda, d *t, int *ldt, d *work, int *info) nogil
+
+cdef void dgeqrt2(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil
+
+cdef void dgeqrt3(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil
+
+cdef void dgerfs(char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgerq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dgerqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dgesc2(int *n, d *a, int *lda, d *rhs, int *ipiv, int *jpiv, d *scale) nogil
+
+cdef void dgesdd(char *jobz, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgesvd(char *jobu, char *jobvt, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *info) nogil
+
+cdef void dgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, d *a, int *lda, d *sva, int *mv, d *v, int *ldv, d *work, int *lwork, int *info) nogil
+
+cdef void dgesvx(char *fact, char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgetc2(int *n, d *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void dgetf2(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void dgetrf(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void dgetri(int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+
+cdef void dgetrs(char *trans, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, d *v, int *ldv, int *info) nogil
+
+cdef void dggbal(char *job, int *n, d *a, int *lda, d *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil
+
+cdef void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void dggev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil
+
+cdef void dggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void dggglm(int *n, int *m, int *p, d *a, int *lda, d *b, int *ldb, d *d, d *x, d *y, d *work, int *lwork, int *info) nogil
+
+cdef void dgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *info) nogil
+
+cdef void dgglse(int *m, int *n, int *p, d *a, int *lda, d *b, int *ldb, d *c, d *d, d *x, d *work, int *lwork, int *info) nogil
+
+cdef void dggqrf(int *n, int *m, int *p, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil
+
+cdef void dggrqf(int *m, int *p, int *n, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil
+
+cdef void dgsvj0(char *jobv, int *m, int *n, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil
+
+cdef void dgsvj1(char *jobv, int *m, int *n, int *n1, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil
+
+cdef void dgtcon(char *norm, int *n, d *dl, d *d, d *du, d *du2, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dgtrfs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgtsv(int *n, int *nrhs, d *dl, d *d, d *du, d *b, int *ldb, int *info) nogil
+
+cdef void dgtsvx(char *fact, char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dgttrf(int *n, d *dl, d *d, d *du, d *du2, int *ipiv, int *info) nogil
+
+cdef void dgttrs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dgtts2(int *itrans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb) nogil
+
+cdef void dhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *t, int *ldt, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef void dhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, d *h, int *ldh, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void dhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef bint disnan(d *din) nogil
+
+cdef void dlabad(d *small, d *large) nogil
+
+cdef void dlabrd(int *m, int *n, int *nb, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *x, int *ldx, d *y, int *ldy) nogil
+
+cdef void dlacn2(int *n, d *v, d *x, int *isgn, d *est, int *kase, int *isave) nogil
+
+cdef void dlacon(int *n, d *v, d *x, int *isgn, d *est, int *kase) nogil
+
+cdef void dlacpy(char *uplo, int *m, int *n, d *a, int *lda, d *b, int *ldb) nogil
+
+cdef void dladiv(d *a, d *b, d *c, d *d, d *p, d *q) nogil
+
+cdef void dlae2(d *a, d *b, d *c, d *rt1, d *rt2) nogil
+
+cdef void dlaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, d *abstol, d *reltol, d *pivmin, d *d, d *e, d *e2, int *nval, d *ab, d *c, int *mout, int *nab, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed0(int *icompq, int *qsiz, int *n, d *d, d *e, d *q, int *ldq, d *qstore, int *ldqs, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed1(int *n, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed2(int *k, int *n, int *n1, d *d, d *q, int *ldq, int *indxq, d *rho, d *z, d *dlamda, d *w, d *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil
+
+cdef void dlaed3(int *k, int *n, int *n1, d *d, d *q, int *ldq, d *rho, d *dlamda, d *q2, int *indx, int *ctot, d *w, d *s, int *info) nogil
+
+cdef void dlaed4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *dlam, int *info) nogil
+
+cdef void dlaed5(int *i, d *d, d *z, d *delta, d *rho, d *dlam) nogil
+
+cdef void dlaed6(int *kniter, bint *orgati, d *rho, d *d, d *z, d *finit, d *tau, int *info) nogil
+
+cdef void dlaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *work, int *iwork, int *info) nogil
+
+cdef void dlaed8(int *icompq, int *k, int *n, int *qsiz, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *z, d *dlamda, d *q2, int *ldq2, d *w, int *perm, int *givptr, int *givcol, d *givnum, int *indxp, int *indx, int *info) nogil
+
+cdef void dlaed9(int *k, int *kstart, int *kstop, int *n, d *d, d *q, int *ldq, d *rho, d *dlamda, d *w, d *s, int *lds, int *info) nogil
+
+cdef void dlaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *q, int *qptr, d *z, d *ztemp, int *info) nogil
+
+cdef void dlaein(bint *rightv, bint *noinit, int *n, d *h, int *ldh, d *wr, d *wi, d *vr, d *vi, d *b, int *ldb, d *work, d *eps3, d *smlnum, d *bignum, int *info) nogil
+
+cdef void dlaev2(d *a, d *b, d *c, d *rt1, d *rt2, d *cs1, d *sn1) nogil
+
+cdef void dlaexc(bint *wantq, int *n, d *t, int *ldt, d *q, int *ldq, int *j1, int *n1, int *n2, d *work, int *info) nogil
+
+cdef void dlag2(d *a, int *lda, d *b, int *ldb, d *safmin, d *scale1, d *scale2, d *wr1, d *wr2, d *wi) nogil
+
+cdef void dlag2s(int *m, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil
+
+cdef void dlags2(bint *upper, d *a1, d *a2, d *a3, d *b1, d *b2, d *b3, d *csu, d *snu, d *csv, d *snv, d *csq, d *snq) nogil
+
+cdef void dlagtf(int *n, d *a, d *lambda_, d *b, d *c, d *tol, d *d, int *in_, int *info) nogil
+
+cdef void dlagtm(char *trans, int *n, int *nrhs, d *alpha, d *dl, d *d, d *du, d *x, int *ldx, d *beta, d *b, int *ldb) nogil
+
+cdef void dlagts(int *job, int *n, d *a, d *b, d *c, d *d, int *in_, d *y, d *tol, int *info) nogil
+
+cdef void dlagv2(d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *csl, d *snl, d *csr, d *snr) nogil
+
+cdef void dlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, int *info) nogil
+
+cdef void dlahr2(int *n, int *k, int *nb, d *a, int *lda, d *tau, d *t, int *ldt, d *y, int *ldy) nogil
+
+cdef void dlaic1(int *job, int *j, d *x, d *sest, d *w, d *gamma, d *sestpr, d *s, d *c) nogil
+
+cdef void dlaln2(bint *ltrans, int *na, int *nw, d *smin, d *ca, d *a, int *lda, d *d1, d *d2, d *b, int *ldb, d *wr, d *wi, d *x, int *ldx, d *scale, d *xnorm, int *info) nogil
+
+cdef void dlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *info) nogil
+
+cdef void dlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil
+
+cdef void dlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, d *b, int *ldb, d *rcond, int *rank, d *work, int *iwork, int *info) nogil
+
+cdef d dlamch(char *cmach) nogil
+
+cdef void dlamrg(int *n1, int *n2, d *a, int *dtrd1, int *dtrd2, int *index_bn) nogil
+
+cdef int dlaneg(int *n, d *d, d *lld, d *sigma, d *pivmin, int *r) nogil
+
+cdef d dlangb(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, d *work) nogil
+
+cdef d dlange(char *norm, int *m, int *n, d *a, int *lda, d *work) nogil
+
+cdef d dlangt(char *norm, int *n, d *dl, d *d, d *du) nogil
+
+cdef d dlanhs(char *norm, int *n, d *a, int *lda, d *work) nogil
+
+cdef d dlansb(char *norm, char *uplo, int *n, int *k, d *ab, int *ldab, d *work) nogil
+
+cdef d dlansf(char *norm, char *transr, char *uplo, int *n, d *a, d *work) nogil
+
+cdef d dlansp(char *norm, char *uplo, int *n, d *ap, d *work) nogil
+
+cdef d dlanst(char *norm, int *n, d *d, d *e) nogil
+
+cdef d dlansy(char *norm, char *uplo, int *n, d *a, int *lda, d *work) nogil
+
+cdef d dlantb(char *norm, char *uplo, char *diag, int *n, int *k, d *ab, int *ldab, d *work) nogil
+
+cdef d dlantp(char *norm, char *uplo, char *diag, int *n, d *ap, d *work) nogil
+
+cdef d dlantr(char *norm, char *uplo, char *diag, int *m, int *n, d *a, int *lda, d *work) nogil
+
+cdef void dlanv2(d *a, d *b, d *c, d *d, d *rt1r, d *rt1i, d *rt2r, d *rt2i, d *cs, d *sn) nogil
+
+cdef void dlapll(int *n, d *x, int *incx, d *y, int *incy, d *ssmin) nogil
+
+cdef void dlapmr(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil
+
+cdef void dlapmt(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil
+
+cdef d dlapy2(d *x, d *y) nogil
+
+cdef d dlapy3(d *x, d *y, d *z) nogil
+
+cdef void dlaqgb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void dlaqge(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void dlaqp2(int *m, int *n, int *offset, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *work) nogil
+
+cdef void dlaqps(int *m, int *n, int *offset, int *nb, int *kb, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *auxv, d *f, int *ldf) nogil
+
+cdef void dlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef void dlaqr1(int *n, d *h, int *ldh, d *sr1, d *si1, d *sr2, d *si2, d *v) nogil
+
+cdef void dlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil
+
+cdef void dlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil
+
+cdef void dlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+
+cdef void dlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, d *sr, d *si, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, d *v, int *ldv, d *u, int *ldu, int *nv, d *wv, int *ldwv, int *nh, d *wh, int *ldwh) nogil
+
+cdef void dlaqsb(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void dlaqsp(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void dlaqsy(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void dlaqtr(bint *ltran, bint *lreal, int *n, d *t, int *ldt, d *b, d *w, d *scale, d *x, d *work, int *info) nogil
+
+cdef void dlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, d *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil
+
+cdef void dlar2v(int *n, d *x, d *y, d *z, int *incx, d *c, d *s, int *incc) nogil
+
+cdef void dlarf(char *side, int *m, int *n, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil
+
+cdef void dlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil
+
+cdef void dlarfg(int *n, d *alpha, d *x, int *incx, d *tau) nogil
+
+cdef void dlarfgp(int *n, d *alpha, d *x, int *incx, d *tau) nogil
+
+cdef void dlarft(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil
+
+cdef void dlarfx(char *side, int *m, int *n, d *v, d *tau, d *c, int *ldc, d *work) nogil
+
+cdef void dlargv(int *n, d *x, int *incx, d *y, int *incy, d *c, int *incc) nogil
+
+cdef void dlarnv(int *idist, int *iseed, int *n, d *x) nogil
+
+cdef void dlarra(int *n, d *d, d *e, d *e2, d *spltol, d *tnrm, int *nsplit, int *isplit, int *info) nogil
+
+cdef void dlarrb(int *n, d *d, d *lld, int *ifirst, int *ilast, d *rtol1, d *rtol2, int *offset, d *w, d *wgap, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *twist, int *info) nogil
+
+cdef void dlarrc(char *jobt, int *n, d *vl, d *vu, d *d, d *e, d *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil
+
+cdef void dlarrd(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *gers, d *reltol, d *d, d *e, d *e2, d *pivmin, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wl, d *wu, int *iblock, int *indexw, d *work, int *iwork, int *info) nogil
+
+cdef void dlarre(char *range, int *n, d *vl, d *vu, int *il, int *iu, d *d, d *e, d *e2, d *rtol1, d *rtol2, d *spltol, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *pivmin, d *work, int *iwork, int *info) nogil
+
+cdef void dlarrf(int *n, d *d, d *l, d *ld, int *clstrt, int *clend, d *w, d *wgap, d *werr, d *spdiam, d *clgapl, d *clgapr, d *pivmin, d *sigma, d *dplus, d *lplus, d *work, int *info) nogil
+
+cdef void dlarrj(int *n, d *d, d *e2, int *ifirst, int *ilast, d *rtol, int *offset, d *w, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *info) nogil
+
+cdef void dlarrk(int *n, int *iw, d *gl, d *gu, d *d, d *e2, d *pivmin, d *reltol, d *w, d *werr, int *info) nogil
+
+cdef void dlarrr(int *n, d *d, d *e, int *info) nogil
+
+cdef void dlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil
+
+cdef void dlartg(d *f, d *g, d *cs, d *sn, d *r) nogil
+
+cdef void dlartgp(d *f, d *g, d *cs, d *sn, d *r) nogil
+
+cdef void dlartgs(d *x, d *y, d *sigma, d *cs, d *sn) nogil
+
+cdef void dlartv(int *n, d *x, int *incx, d *y, int *incy, d *c, d *s, int *incc) nogil
+
+cdef void dlaruv(int *iseed, int *n, d *x) nogil
+
+cdef void dlarz(char *side, int *m, int *n, int *l, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil
+
+cdef void dlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil
+
+cdef void dlarzt(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil
+
+cdef void dlas2(d *f, d *g, d *h, d *ssmin, d *ssmax) nogil
+
+cdef void dlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dlasd0(int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, int *smlsiz, int *iwork, d *work, int *info) nogil
+
+cdef void dlasd1(int *nl, int *nr, int *sqre, d *d, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, int *idxq, int *iwork, d *work, int *info) nogil
+
+cdef void dlasd2(int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, d *dsigma, d *u2, int *ldu2, d *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil
+
+cdef void dlasd3(int *nl, int *nr, int *sqre, int *k, d *d, d *q, int *ldq, d *dsigma, d *u, int *ldu, d *u2, int *ldu2, d *vt, int *ldvt, d *vt2, int *ldvt2, int *idxc, int *ctot, d *z, int *info) nogil
+
+cdef void dlasd4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *sigma, d *work, int *info) nogil
+
+cdef void dlasd5(int *i, d *d, d *z, d *delta, d *rho, d *dsigma, d *work) nogil
+
+cdef void dlasd6(int *icompq, int *nl, int *nr, int *sqre, d *d, d *vf, d *vl, d *alpha, d *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *iwork, int *info) nogil
+
+cdef void dlasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *zw, d *vf, d *vfw, d *vl, d *vlw, d *alpha, d *beta, d *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *c, d *s, int *info) nogil
+
+cdef void dlasd8(int *icompq, int *k, d *d, d *z, d *vf, d *vl, d *difl, d *difr, int *lddifr, d *dsigma, d *work, int *info) nogil
+
+cdef void dlasda(int *icompq, int *smlsiz, int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil
+
+cdef void dlasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dlasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil
+
+cdef void dlaset(char *uplo, int *m, int *n, d *alpha, d *beta, d *a, int *lda) nogil
+
+cdef void dlasq1(int *n, d *d, d *e, d *work, int *info) nogil
+
+cdef void dlasq2(int *n, d *z, int *info) nogil
+
+cdef void dlasq3(int *i0, int *n0, d *z, int *pp, d *dmin, d *sigma, d *desig, d *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *g, d *tau) nogil
+
+cdef void dlasq4(int *i0, int *n0, d *z, int *pp, int *n0in, d *dmin, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *tau, int *ttype, d *g) nogil
+
+cdef void dlasq6(int *i0, int *n0, d *z, int *pp, d *dmin, d *dmin1, d *dmin2, d *dn, d *dnm1, d *dnm2) nogil
+
+cdef void dlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, d *a, int *lda) nogil
+
+cdef void dlasrt(char *id, int *n, d *d, int *info) nogil
+
+cdef void dlassq(int *n, d *x, int *incx, d *scale, d *sumsq) nogil
+
+cdef void dlasv2(d *f, d *g, d *h, d *ssmin, d *ssmax, d *snr, d *csr, d *snl, d *csl) nogil
+
+cdef void dlaswp(int *n, d *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void dlasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, d *tl, int *ldtl, d *tr, int *ldtr, d *b, int *ldb, d *scale, d *x, int *ldx, d *xnorm, int *info) nogil
+
+cdef void dlasyf(char *uplo, int *n, int *nb, int *kb, d *a, int *lda, int *ipiv, d *w, int *ldw, int *info) nogil
+
+cdef void dlat2s(char *uplo, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil
+
+cdef void dlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, d *ab, int *ldab, d *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void dlatdf(int *ijob, int *n, d *z, int *ldz, d *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void dlatps(char *uplo, char *trans, char *diag, char *normin, int *n, d *ap, d *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void dlatrd(char *uplo, int *n, int *nb, d *a, int *lda, d *e, d *tau, d *w, int *ldw) nogil
+
+cdef void dlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, d *a, int *lda, d *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void dlatrz(int *m, int *n, int *l, d *a, int *lda, d *tau, d *work) nogil
+
+cdef void dlauu2(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dlauum(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dopgtr(char *uplo, int *n, d *ap, d *tau, d *q, int *ldq, d *work, int *info) nogil
+
+cdef void dopmtr(char *side, char *uplo, char *trans, int *m, int *n, d *ap, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dorbdb(char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *phi, d *taup1, d *taup2, d *tauq1, d *tauq2, d *work, int *lwork, int *info) nogil
+
+cdef void dorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dorg2l(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorg2r(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorgbr(char *vect, int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorghr(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgl2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorglq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgql(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgqr(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgr2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+
+cdef void dorgrq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorgtr(char *uplo, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dorm2l(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dorm2r(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dorml2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormlq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormql(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormqr(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormr2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormr3(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+
+cdef void dormrq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormrz(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dormtr(char *side, char *uplo, char *trans, int *m, int *n, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+
+cdef void dpbcon(char *uplo, int *n, int *kd, d *ab, int *ldab, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dpbequ(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dpbrfs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpbstf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+
+cdef void dpbsv(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+
+cdef void dpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpbtf2(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+
+cdef void dpbtrf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+
+cdef void dpbtrs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+
+cdef void dpftrf(char *transr, char *uplo, int *n, d *a, int *info) nogil
+
+cdef void dpftri(char *transr, char *uplo, int *n, d *a, int *info) nogil
+
+cdef void dpftrs(char *transr, char *uplo, int *n, int *nrhs, d *a, d *b, int *ldb, int *info) nogil
+
+cdef void dpocon(char *uplo, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dpoequ(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dpoequb(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dporfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dposvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpotf2(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dpotrf(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dpotri(char *uplo, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dpotrs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dppcon(char *uplo, int *n, d *ap, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dppequ(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void dpprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dppsv(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+
+cdef void dppsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dpptrf(char *uplo, int *n, d *ap, int *info) nogil
+
+cdef void dpptri(char *uplo, int *n, d *ap, int *info) nogil
+
+cdef void dpptrs(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+
+cdef void dpstf2(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void dpstrf(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void dptcon(int *n, d *d, d *e, d *anorm, d *rcond, d *work, int *info) nogil
+
+cdef void dpteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dptrfs(int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *info) nogil
+
+cdef void dptsv(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil
+
+cdef void dptsvx(char *fact, int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *info) nogil
+
+cdef void dpttrf(int *n, d *d, d *e, int *info) nogil
+
+cdef void dpttrs(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil
+
+cdef void dptts2(int *n, int *nrhs, d *d, d *e, d *b, int *ldb) nogil
+
+cdef void drscl(int *n, d *sa, d *sx, int *incx) nogil
+
+cdef void dsbev(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dsbevd(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsbevx(char *jobz, char *range, char *uplo, int *n, int *kd, d *ab, int *ldab, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsbgst(char *vect, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *x, int *ldx, d *work, int *info) nogil
+
+cdef void dsbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dsbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsbtrd(char *vect, char *uplo, int *n, int *kd, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *work, int *info) nogil
+
+cdef void dsfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c) nogil
+
+cdef void dsgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil
+
+cdef void dspcon(char *uplo, int *n, d *ap, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dspev(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dspevd(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dspevx(char *jobz, char *range, char *uplo, int *n, d *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dspgst(int *itype, char *uplo, int *n, d *ap, d *bp, int *info) nogil
+
+cdef void dspgv(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dspgvd(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *ap, d *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil
+
+cdef void dsprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dspsv(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dspsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dsptrd(char *uplo, int *n, d *ap, d *d, d *e, d *tau, int *info) nogil
+
+cdef void dsptrf(char *uplo, int *n, d *ap, int *ipiv, int *info) nogil
+
+cdef void dsptri(char *uplo, int *n, d *ap, int *ipiv, d *work, int *info) nogil
+
+cdef void dsptrs(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dstebz(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *abstol, d *d, d *e, int *m, int *nsplit, d *w, int *iblock, int *isplit, d *work, int *iwork, int *info) nogil
+
+cdef void dstedc(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, d *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dsterf(int *n, d *d, d *e, int *info) nogil
+
+cdef void dstev(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+
+cdef void dstevd(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstevr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dstevx(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsycon(char *uplo, int *n, d *a, int *lda, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dsyconv(char *uplo, char *way, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil
+
+cdef void dsyequb(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, d *work, int *info) nogil
+
+cdef void dsyev(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *info) nogil
+
+cdef void dsyevd(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsyevr(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsyevx(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsygs2(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dsygst(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dsygv(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *info) nogil
+
+cdef void dsygvd(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dsygvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void dsyrfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dsysv(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *lwork, int *info) nogil
+
+cdef void dsysvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dsyswapr(char *uplo, int *n, d *a, int *lda, int *i1, int *i2) nogil
+
+cdef void dsytd2(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, int *info) nogil
+
+cdef void dsytf2(char *uplo, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void dsytrd(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef void dsytrf(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+
+cdef void dsytri(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil
+
+cdef void dsytri2(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+
+cdef void dsytri2x(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *nb, int *info) nogil
+
+cdef void dsytrs(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+
+cdef void dsytrs2(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *info) nogil
+
+cdef void dtbcon(char *norm, char *uplo, char *diag, int *n, int *kd, d *ab, int *ldab, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dtbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dtbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+
+cdef void dtfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, d *alpha, d *a, d *b, int *ldb) nogil
+
+cdef void dtftri(char *transr, char *uplo, char *diag, int *n, d *a, int *info) nogil
+
+cdef void dtfttp(char *transr, char *uplo, int *n, d *arf, d *ap, int *info) nogil
+
+cdef void dtfttr(char *transr, char *uplo, int *n, d *arf, d *a, int *lda, int *info) nogil
+
+cdef void dtgevc(char *side, char *howmny, bint *select, int *n, d *s, int *lds, d *p, int *ldp, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil
+
+cdef void dtgex2(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *j1, int *n1, int *n2, d *work, int *lwork, int *info) nogil
+
+cdef void dtgexc(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *ifst, int *ilst, d *work, int *lwork, int *info) nogil
+
+cdef void dtgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, int *m, d *pl, d *pr, d *dif, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dtgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, d *a, int *lda, d *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, d *u, int *ldu, d *v, int *ldv, d *q, int *ldq, d *work, int *ncycle, int *info) nogil
+
+cdef void dtgsna(char *job, char *howmny, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *dif, int *mm, int *m, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dtgsy2(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *iwork, int *pq, int *info) nogil
+
+cdef void dtgsyl(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *dif, d *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void dtpcon(char *norm, char *uplo, char *diag, int *n, d *ap, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dtpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *info) nogil
+
+cdef void dtpqrt(int *m, int *n, int *l, int *nb, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, d *work, int *info) nogil
+
+cdef void dtpqrt2(int *m, int *n, int *l, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, int *info) nogil
+
+cdef void dtprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *ldwork) nogil
+
+cdef void dtprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dtptri(char *uplo, char *diag, int *n, d *ap, int *info) nogil
+
+cdef void dtptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+
+cdef void dtpttf(char *transr, char *uplo, int *n, d *ap, d *arf, int *info) nogil
+
+cdef void dtpttr(char *uplo, int *n, d *ap, d *a, int *lda, int *info) nogil
+
+cdef void dtrcon(char *norm, char *uplo, char *diag, int *n, d *a, int *lda, d *rcond, d *work, int *iwork, int *info) nogil
+
+cdef void dtrevc(char *side, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil
+
+cdef void dtrexc(char *compq, int *n, d *t, int *ldt, d *q, int *ldq, int *ifst, int *ilst, d *work, int *info) nogil
+
+cdef void dtrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+
+cdef void dtrsen(char *job, char *compq, bint *select, int *n, d *t, int *ldt, d *q, int *ldq, d *wr, d *wi, int *m, d *s, d *sep, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void dtrsna(char *job, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *sep, int *mm, int *m, d *work, int *ldwork, int *iwork, int *info) nogil
+
+cdef void dtrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *scale, int *info) nogil
+
+cdef void dtrti2(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dtrtri(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil
+
+cdef void dtrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+
+cdef void dtrttf(char *transr, char *uplo, int *n, d *a, int *lda, d *arf, int *info) nogil
+
+cdef void dtrttp(char *uplo, int *n, d *a, int *lda, d *ap, int *info) nogil
+
+cdef void dtzrzf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+
+cdef d dzsum1(int *n, z *cx, int *incx) nogil
+
+cdef int icmax1(int *n, c *cx, int *incx) nogil
+
+cdef int ieeeck(int *ispec, s *zero, s *one) nogil
+
+cdef int ilaclc(int *m, int *n, c *a, int *lda) nogil
+
+cdef int ilaclr(int *m, int *n, c *a, int *lda) nogil
+
+cdef int iladiag(char *diag) nogil
+
+cdef int iladlc(int *m, int *n, d *a, int *lda) nogil
+
+cdef int iladlr(int *m, int *n, d *a, int *lda) nogil
+
+cdef int ilaprec(char *prec) nogil
+
+cdef int ilaslc(int *m, int *n, s *a, int *lda) nogil
+
+cdef int ilaslr(int *m, int *n, s *a, int *lda) nogil
+
+cdef int ilatrans(char *trans) nogil
+
+cdef int ilauplo(char *uplo) nogil
+
+cdef void ilaver(int *vers_major, int *vers_minor, int *vers_patch) nogil
+
+cdef int ilazlc(int *m, int *n, z *a, int *lda) nogil
+
+cdef int ilazlr(int *m, int *n, z *a, int *lda) nogil
+
+cdef int izmax1(int *n, z *cx, int *incx) nogil
+
+cdef void sbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *work, int *lwork, int *info) nogil
+
+cdef void sbdsdc(char *uplo, char *compq, int *n, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, s *q, int *iq, s *work, int *iwork, int *info) nogil
+
+cdef void sbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil
+
+cdef s scsum1(int *n, c *cx, int *incx) nogil
+
+cdef void sdisna(char *job, int *m, int *n, s *d, s *sep, int *info) nogil
+
+cdef void sgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *pt, int *ldpt, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sgbcon(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sgbequ(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgbequb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgbsv(int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgbtf2(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void sgbtrf(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void sgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, s *v, int *ldv, int *info) nogil
+
+cdef void sgebal(char *job, int *n, s *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil
+
+cdef void sgebd2(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *info) nogil
+
+cdef void sgebrd(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *lwork, int *info) nogil
+
+cdef void sgecon(char *norm, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sgeequ(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgeequb(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+
+cdef void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void sgeev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil
+
+cdef void sgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgehd2(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgehrd(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, s *a, int *lda, s *sva, s *u, int *ldu, s *v, int *ldv, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgelq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgelqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgels(char *trans, int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *work, int *lwork, int *info) nogil
+
+cdef void sgelsd(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgelss(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *info) nogil
+
+cdef void sgelsy(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *jpvt, s *rcond, int *rank, s *work, int *lwork, int *info) nogil
+
+cdef void sgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sgeql2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgeqlf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqp3(int *m, int *n, s *a, int *lda, int *jpvt, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqr2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgeqr2p(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgeqrf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqrfp(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgeqrt(int *m, int *n, int *nb, s *a, int *lda, s *t, int *ldt, s *work, int *info) nogil
+
+cdef void sgeqrt2(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil
+
+cdef void sgeqrt3(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil
+
+cdef void sgerfs(char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgerq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sgerqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sgesc2(int *n, s *a, int *lda, s *rhs, int *ipiv, int *jpiv, s *scale) nogil
+
+cdef void sgesdd(char *jobz, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sgesv(int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgesvd(char *jobu, char *jobvt, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *info) nogil
+
+cdef void sgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, s *a, int *lda, s *sva, int *mv, s *v, int *ldv, s *work, int *lwork, int *info) nogil
+
+cdef void sgesvx(char *fact, char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgetc2(int *n, s *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void sgetf2(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void sgetrf(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void sgetri(int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+
+cdef void sgetrs(char *trans, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, s *v, int *ldv, int *info) nogil
+
+cdef void sggbal(char *job, int *n, s *a, int *lda, s *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil
+
+cdef void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) nogil
+
+cdef void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void sggev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil
+
+cdef void sggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void sggglm(int *n, int *m, int *p, s *a, int *lda, s *b, int *ldb, s *d, s *x, s *y, s *work, int *lwork, int *info) nogil
+
+cdef void sgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *info) nogil
+
+cdef void sgglse(int *m, int *n, int *p, s *a, int *lda, s *b, int *ldb, s *c, s *d, s *x, s *work, int *lwork, int *info) nogil
+
+cdef void sggqrf(int *n, int *m, int *p, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil
+
+cdef void sggrqf(int *m, int *p, int *n, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil
+
+cdef void sgsvj0(char *jobv, int *m, int *n, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil
+
+cdef void sgsvj1(char *jobv, int *m, int *n, int *n1, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil
+
+cdef void sgtcon(char *norm, int *n, s *dl, s *d, s *du, s *du2, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sgtrfs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgtsv(int *n, int *nrhs, s *dl, s *d, s *du, s *b, int *ldb, int *info) nogil
+
+cdef void sgtsvx(char *fact, char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sgttrf(int *n, s *dl, s *d, s *du, s *du2, int *ipiv, int *info) nogil
+
+cdef void sgttrs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sgtts2(int *itrans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb) nogil
+
+cdef void shgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *t, int *ldt, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void shsein(char *side, char *eigsrc, char *initv, bint *select, int *n, s *h, int *ldh, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void shseqr(char *job, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void slabad(s *small, s *large) nogil
+
+cdef void slabrd(int *m, int *n, int *nb, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *x, int *ldx, s *y, int *ldy) nogil
+
+cdef void slacn2(int *n, s *v, s *x, int *isgn, s *est, int *kase, int *isave) nogil
+
+cdef void slacon(int *n, s *v, s *x, int *isgn, s *est, int *kase) nogil
+
+cdef void slacpy(char *uplo, int *m, int *n, s *a, int *lda, s *b, int *ldb) nogil
+
+cdef void sladiv(s *a, s *b, s *c, s *d, s *p, s *q) nogil
+
+cdef void slae2(s *a, s *b, s *c, s *rt1, s *rt2) nogil
+
+cdef void slaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, s *abstol, s *reltol, s *pivmin, s *d, s *e, s *e2, int *nval, s *ab, s *c, int *mout, int *nab, s *work, int *iwork, int *info) nogil
+
+cdef void slaed0(int *icompq, int *qsiz, int *n, s *d, s *e, s *q, int *ldq, s *qstore, int *ldqs, s *work, int *iwork, int *info) nogil
+
+cdef void slaed1(int *n, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *work, int *iwork, int *info) nogil
+
+cdef void slaed2(int *k, int *n, int *n1, s *d, s *q, int *ldq, int *indxq, s *rho, s *z, s *dlamda, s *w, s *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil
+
+cdef void slaed3(int *k, int *n, int *n1, s *d, s *q, int *ldq, s *rho, s *dlamda, s *q2, int *indx, int *ctot, s *w, s *s, int *info) nogil
+
+cdef void slaed4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *dlam, int *info) nogil
+
+cdef void slaed5(int *i, s *d, s *z, s *delta, s *rho, s *dlam) nogil
+
+cdef void slaed6(int *kniter, bint *orgati, s *rho, s *d, s *z, s *finit, s *tau, int *info) nogil
+
+cdef void slaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *work, int *iwork, int *info) nogil
+
+cdef void slaed8(int *icompq, int *k, int *n, int *qsiz, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *z, s *dlamda, s *q2, int *ldq2, s *w, int *perm, int *givptr, int *givcol, s *givnum, int *indxp, int *indx, int *info) nogil
+
+cdef void slaed9(int *k, int *kstart, int *kstop, int *n, s *d, s *q, int *ldq, s *rho, s *dlamda, s *w, s *s, int *lds, int *info) nogil
+
+cdef void slaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *q, int *qptr, s *z, s *ztemp, int *info) nogil
+
+cdef void slaein(bint *rightv, bint *noinit, int *n, s *h, int *ldh, s *wr, s *wi, s *vr, s *vi, s *b, int *ldb, s *work, s *eps3, s *smlnum, s *bignum, int *info) nogil
+
+cdef void slaev2(s *a, s *b, s *c, s *rt1, s *rt2, s *cs1, s *sn1) nogil
+
+cdef void slaexc(bint *wantq, int *n, s *t, int *ldt, s *q, int *ldq, int *j1, int *n1, int *n2, s *work, int *info) nogil
+
+cdef void slag2(s *a, int *lda, s *b, int *ldb, s *safmin, s *scale1, s *scale2, s *wr1, s *wr2, s *wi) nogil
+
+cdef void slag2d(int *m, int *n, s *sa, int *ldsa, d *a, int *lda, int *info) nogil
+
+cdef void slags2(bint *upper, s *a1, s *a2, s *a3, s *b1, s *b2, s *b3, s *csu, s *snu, s *csv, s *snv, s *csq, s *snq) nogil
+
+cdef void slagtf(int *n, s *a, s *lambda_, s *b, s *c, s *tol, s *d, int *in_, int *info) nogil
+
+cdef void slagtm(char *trans, int *n, int *nrhs, s *alpha, s *dl, s *d, s *du, s *x, int *ldx, s *beta, s *b, int *ldb) nogil
+
+cdef void slagts(int *job, int *n, s *a, s *b, s *c, s *d, int *in_, s *y, s *tol, int *info) nogil
+
+cdef void slagv2(s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *csl, s *snl, s *csr, s *snr) nogil
+
+cdef void slahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, int *info) nogil
+
+cdef void slahr2(int *n, int *k, int *nb, s *a, int *lda, s *tau, s *t, int *ldt, s *y, int *ldy) nogil
+
+cdef void slaic1(int *job, int *j, s *x, s *sest, s *w, s *gamma, s *sestpr, s *s, s *c) nogil
+
+cdef void slaln2(bint *ltrans, int *na, int *nw, s *smin, s *ca, s *a, int *lda, s *d1, s *d2, s *b, int *ldb, s *wr, s *wi, s *x, int *ldx, s *scale, s *xnorm, int *info) nogil
+
+cdef void slals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *info) nogil
+
+cdef void slalsa(int *icompq, int *smlsiz, int *n, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil
+
+cdef void slalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, s *b, int *ldb, s *rcond, int *rank, s *work, int *iwork, int *info) nogil
+
+cdef s slamch(char *cmach) nogil
+
+cdef void slamrg(int *n1, int *n2, s *a, int *strd1, int *strd2, int *index_bn) nogil
+
+cdef s slangb(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, s *work) nogil
+
+cdef s slange(char *norm, int *m, int *n, s *a, int *lda, s *work) nogil
+
+cdef s slangt(char *norm, int *n, s *dl, s *d, s *du) nogil
+
+cdef s slanhs(char *norm, int *n, s *a, int *lda, s *work) nogil
+
+cdef s slansb(char *norm, char *uplo, int *n, int *k, s *ab, int *ldab, s *work) nogil
+
+cdef s slansf(char *norm, char *transr, char *uplo, int *n, s *a, s *work) nogil
+
+cdef s slansp(char *norm, char *uplo, int *n, s *ap, s *work) nogil
+
+cdef s slanst(char *norm, int *n, s *d, s *e) nogil
+
+cdef s slansy(char *norm, char *uplo, int *n, s *a, int *lda, s *work) nogil
+
+cdef s slantb(char *norm, char *uplo, char *diag, int *n, int *k, s *ab, int *ldab, s *work) nogil
+
+cdef s slantp(char *norm, char *uplo, char *diag, int *n, s *ap, s *work) nogil
+
+cdef s slantr(char *norm, char *uplo, char *diag, int *m, int *n, s *a, int *lda, s *work) nogil
+
+cdef void slanv2(s *a, s *b, s *c, s *d, s *rt1r, s *rt1i, s *rt2r, s *rt2i, s *cs, s *sn) nogil
+
+cdef void slapll(int *n, s *x, int *incx, s *y, int *incy, s *ssmin) nogil
+
+cdef void slapmr(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil
+
+cdef void slapmt(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil
+
+cdef s slapy2(s *x, s *y) nogil
+
+cdef s slapy3(s *x, s *y, s *z) nogil
+
+cdef void slaqgb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void slaqge(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+
+cdef void slaqp2(int *m, int *n, int *offset, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *work) nogil
+
+cdef void slaqps(int *m, int *n, int *offset, int *nb, int *kb, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *auxv, s *f, int *ldf) nogil
+
+cdef void slaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void slaqr1(int *n, s *h, int *ldh, s *sr1, s *si1, s *sr2, s *si2, s *v) nogil
+
+cdef void slaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil
+
+cdef void slaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil
+
+cdef void slaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+
+cdef void slaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, s *sr, s *si, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, s *v, int *ldv, s *u, int *ldu, int *nv, s *wv, int *ldwv, int *nh, s *wh, int *ldwh) nogil
+
+cdef void slaqsb(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void slaqsp(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void slaqsy(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+
+cdef void slaqtr(bint *ltran, bint *lreal, int *n, s *t, int *ldt, s *b, s *w, s *scale, s *x, s *work, int *info) nogil
+
+cdef void slar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, s *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil
+
+cdef void slar2v(int *n, s *x, s *y, s *z, int *incx, s *c, s *s, int *incc) nogil
+
+cdef void slarf(char *side, int *m, int *n, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil
+
+cdef void slarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil
+
+cdef void slarfg(int *n, s *alpha, s *x, int *incx, s *tau) nogil
+
+cdef void slarfgp(int *n, s *alpha, s *x, int *incx, s *tau) nogil
+
+cdef void slarft(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil
+
+cdef void slarfx(char *side, int *m, int *n, s *v, s *tau, s *c, int *ldc, s *work) nogil
+
+cdef void slargv(int *n, s *x, int *incx, s *y, int *incy, s *c, int *incc) nogil
+
+cdef void slarnv(int *idist, int *iseed, int *n, s *x) nogil
+
+cdef void slarra(int *n, s *d, s *e, s *e2, s *spltol, s *tnrm, int *nsplit, int *isplit, int *info) nogil
+
+cdef void slarrb(int *n, s *d, s *lld, int *ifirst, int *ilast, s *rtol1, s *rtol2, int *offset, s *w, s *wgap, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *twist, int *info) nogil
+
+cdef void slarrc(char *jobt, int *n, s *vl, s *vu, s *d, s *e, s *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil
+
+cdef void slarrd(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *gers, s *reltol, s *d, s *e, s *e2, s *pivmin, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wl, s *wu, int *iblock, int *indexw, s *work, int *iwork, int *info) nogil
+
+cdef void slarre(char *range, int *n, s *vl, s *vu, int *il, int *iu, s *d, s *e, s *e2, s *rtol1, s *rtol2, s *spltol, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *pivmin, s *work, int *iwork, int *info) nogil
+
+cdef void slarrf(int *n, s *d, s *l, s *ld, int *clstrt, int *clend, s *w, s *wgap, s *werr, s *spdiam, s *clgapl, s *clgapr, s *pivmin, s *sigma, s *dplus, s *lplus, s *work, int *info) nogil
+
+cdef void slarrj(int *n, s *d, s *e2, int *ifirst, int *ilast, s *rtol, int *offset, s *w, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *info) nogil
+
+cdef void slarrk(int *n, int *iw, s *gl, s *gu, s *d, s *e2, s *pivmin, s *reltol, s *w, s *werr, int *info) nogil
+
+cdef void slarrr(int *n, s *d, s *e, int *info) nogil
+
+cdef void slarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil
+
+cdef void slartg(s *f, s *g, s *cs, s *sn, s *r) nogil
+
+cdef void slartgp(s *f, s *g, s *cs, s *sn, s *r) nogil
+
+cdef void slartgs(s *x, s *y, s *sigma, s *cs, s *sn) nogil
+
+cdef void slartv(int *n, s *x, int *incx, s *y, int *incy, s *c, s *s, int *incc) nogil
+
+cdef void slaruv(int *iseed, int *n, s *x) nogil
+
+cdef void slarz(char *side, int *m, int *n, int *l, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil
+
+cdef void slarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil
+
+cdef void slarzt(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil
+
+cdef void slas2(s *f, s *g, s *h, s *ssmin, s *ssmax) nogil
+
+cdef void slascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, s *a, int *lda, int *info) nogil
+
+cdef void slasd0(int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, int *smlsiz, int *iwork, s *work, int *info) nogil
+
+cdef void slasd1(int *nl, int *nr, int *sqre, s *d, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, int *idxq, int *iwork, s *work, int *info) nogil
+
+cdef void slasd2(int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, s *dsigma, s *u2, int *ldu2, s *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil
+
+cdef void slasd3(int *nl, int *nr, int *sqre, int *k, s *d, s *q, int *ldq, s *dsigma, s *u, int *ldu, s *u2, int *ldu2, s *vt, int *ldvt, s *vt2, int *ldvt2, int *idxc, int *ctot, s *z, int *info) nogil
+
+cdef void slasd4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *sigma, s *work, int *info) nogil
+
+cdef void slasd5(int *i, s *d, s *z, s *delta, s *rho, s *dsigma, s *work) nogil
+
+cdef void slasd6(int *icompq, int *nl, int *nr, int *sqre, s *d, s *vf, s *vl, s *alpha, s *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *iwork, int *info) nogil
+
+cdef void slasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *zw, s *vf, s *vfw, s *vl, s *vlw, s *alpha, s *beta, s *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *c, s *s, int *info) nogil
+
+cdef void slasd8(int *icompq, int *k, s *d, s *z, s *vf, s *vl, s *difl, s *difr, int *lddifr, s *dsigma, s *work, int *info) nogil
+
+cdef void slasda(int *icompq, int *smlsiz, int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil
+
+cdef void slasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void slasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil
+
+cdef void slaset(char *uplo, int *m, int *n, s *alpha, s *beta, s *a, int *lda) nogil
+
+cdef void slasq1(int *n, s *d, s *e, s *work, int *info) nogil
+
+cdef void slasq2(int *n, s *z, int *info) nogil
+
+cdef void slasq3(int *i0, int *n0, s *z, int *pp, s *dmin, s *sigma, s *desig, s *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *g, s *tau) nogil
+
+cdef void slasq4(int *i0, int *n0, s *z, int *pp, int *n0in, s *dmin, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *tau, int *ttype, s *g) nogil
+
+cdef void slasq6(int *i0, int *n0, s *z, int *pp, s *dmin, s *dmin1, s *dmin2, s *dn, s *dnm1, s *dnm2) nogil
+
+cdef void slasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, s *a, int *lda) nogil
+
+cdef void slasrt(char *id, int *n, s *d, int *info) nogil
+
+cdef void slassq(int *n, s *x, int *incx, s *scale, s *sumsq) nogil
+
+cdef void slasv2(s *f, s *g, s *h, s *ssmin, s *ssmax, s *snr, s *csr, s *snl, s *csl) nogil
+
+cdef void slaswp(int *n, s *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void slasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, s *tl, int *ldtl, s *tr, int *ldtr, s *b, int *ldb, s *scale, s *x, int *ldx, s *xnorm, int *info) nogil
+
+cdef void slasyf(char *uplo, int *n, int *nb, int *kb, s *a, int *lda, int *ipiv, s *w, int *ldw, int *info) nogil
+
+cdef void slatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, s *ab, int *ldab, s *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void slatdf(int *ijob, int *n, s *z, int *ldz, s *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void slatps(char *uplo, char *trans, char *diag, char *normin, int *n, s *ap, s *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void slatrd(char *uplo, int *n, int *nb, s *a, int *lda, s *e, s *tau, s *w, int *ldw) nogil
+
+cdef void slatrs(char *uplo, char *trans, char *diag, char *normin, int *n, s *a, int *lda, s *x, s *scale, s *cnorm, int *info) nogil
+
+cdef void slatrz(int *m, int *n, int *l, s *a, int *lda, s *tau, s *work) nogil
+
+cdef void slauu2(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void slauum(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void sopgtr(char *uplo, int *n, s *ap, s *tau, s *q, int *ldq, s *work, int *info) nogil
+
+cdef void sopmtr(char *side, char *uplo, char *trans, int *m, int *n, s *ap, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sorbdb(char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *phi, s *taup1, s *taup2, s *tauq1, s *tauq2, s *work, int *lwork, int *info) nogil
+
+cdef void sorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void sorg2l(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorg2r(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorgbr(char *vect, int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorghr(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgl2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorglq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgql(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgqr(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgr2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+
+cdef void sorgrq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorgtr(char *uplo, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void sorm2l(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sorm2r(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sorml2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormlq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormql(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormqr(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormr2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormr3(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+
+cdef void sormrq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormrz(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void sormtr(char *side, char *uplo, char *trans, int *m, int *n, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+
+cdef void spbcon(char *uplo, int *n, int *kd, s *ab, int *ldab, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void spbequ(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void spbrfs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spbstf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+
+cdef void spbsv(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+
+cdef void spbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spbtf2(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+
+cdef void spbtrf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+
+cdef void spbtrs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+
+cdef void spftrf(char *transr, char *uplo, int *n, s *a, int *info) nogil
+
+cdef void spftri(char *transr, char *uplo, int *n, s *a, int *info) nogil
+
+cdef void spftrs(char *transr, char *uplo, int *n, int *nrhs, s *a, s *b, int *ldb, int *info) nogil
+
+cdef void spocon(char *uplo, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void spoequ(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void spoequb(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void sporfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sposv(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void sposvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spotf2(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void spotrf(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void spotri(char *uplo, int *n, s *a, int *lda, int *info) nogil
+
+cdef void spotrs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void sppcon(char *uplo, int *n, s *ap, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sppequ(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, int *info) nogil
+
+cdef void spprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sppsv(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+
+cdef void sppsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void spptrf(char *uplo, int *n, s *ap, int *info) nogil
+
+cdef void spptri(char *uplo, int *n, s *ap, int *info) nogil
+
+cdef void spptrs(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+
+cdef void spstf2(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void spstrf(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+
+cdef void sptcon(int *n, s *d, s *e, s *anorm, s *rcond, s *work, int *info) nogil
+
+cdef void spteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sptrfs(int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *info) nogil
+
+cdef void sptsv(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil
+
+cdef void sptsvx(char *fact, int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *info) nogil
+
+cdef void spttrf(int *n, s *d, s *e, int *info) nogil
+
+cdef void spttrs(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil
+
+cdef void sptts2(int *n, int *nrhs, s *d, s *e, s *b, int *ldb) nogil
+
+cdef void srscl(int *n, s *sa, s *sx, int *incx) nogil
+
+cdef void ssbev(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void ssbevd(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssbevx(char *jobz, char *range, char *uplo, int *n, int *kd, s *ab, int *ldab, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssbgst(char *vect, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *x, int *ldx, s *work, int *info) nogil
+
+cdef void ssbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void ssbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssbtrd(char *vect, char *uplo, int *n, int *kd, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *work, int *info) nogil
+
+cdef void ssfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c) nogil
+
+cdef void sspcon(char *uplo, int *n, s *ap, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void sspev(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sspevd(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sspevx(char *jobz, char *range, char *uplo, int *n, s *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void sspgst(int *itype, char *uplo, int *n, s *ap, s *bp, int *info) nogil
+
+cdef void sspgv(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sspgvd(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *ap, s *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void sspsv(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sspsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void ssptrd(char *uplo, int *n, s *ap, s *d, s *e, s *tau, int *info) nogil
+
+cdef void ssptrf(char *uplo, int *n, s *ap, int *ipiv, int *info) nogil
+
+cdef void ssptri(char *uplo, int *n, s *ap, int *ipiv, s *work, int *info) nogil
+
+cdef void ssptrs(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void sstebz(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *abstol, s *d, s *e, int *m, int *nsplit, s *w, int *iblock, int *isplit, s *work, int *iwork, int *info) nogil
+
+cdef void sstedc(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void sstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, s *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void ssterf(int *n, s *d, s *e, int *info) nogil
+
+cdef void sstev(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+
+cdef void sstevd(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstevr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void sstevx(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssycon(char *uplo, int *n, s *a, int *lda, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void ssyconv(char *uplo, char *way, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil
+
+cdef void ssyequb(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, s *work, int *info) nogil
+
+cdef void ssyev(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *info) nogil
+
+cdef void ssyevd(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssyevr(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssyevx(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssygs2(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void ssygst(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void ssygv(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *info) nogil
+
+cdef void ssygvd(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ssygvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void ssyrfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void ssysv(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *lwork, int *info) nogil
+
+cdef void ssysvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ssyswapr(char *uplo, int *n, s *a, int *lda, int *i1, int *i2) nogil
+
+cdef void ssytd2(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, int *info) nogil
+
+cdef void ssytf2(char *uplo, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void ssytrd(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void ssytrf(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+
+cdef void ssytri(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil
+
+cdef void ssytri2(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+
+cdef void ssytri2x(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *nb, int *info) nogil
+
+cdef void ssytrs(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+
+cdef void ssytrs2(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *info) nogil
+
+cdef void stbcon(char *norm, char *uplo, char *diag, int *n, int *kd, s *ab, int *ldab, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void stbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void stbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+
+cdef void stfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, s *alpha, s *a, s *b, int *ldb) nogil
+
+cdef void stftri(char *transr, char *uplo, char *diag, int *n, s *a, int *info) nogil
+
+cdef void stfttp(char *transr, char *uplo, int *n, s *arf, s *ap, int *info) nogil
+
+cdef void stfttr(char *transr, char *uplo, int *n, s *arf, s *a, int *lda, int *info) nogil
+
+cdef void stgevc(char *side, char *howmny, bint *select, int *n, s *s, int *lds, s *p, int *ldp, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil
+
+cdef void stgex2(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *j1, int *n1, int *n2, s *work, int *lwork, int *info) nogil
+
+cdef void stgexc(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *ifst, int *ilst, s *work, int *lwork, int *info) nogil
+
+cdef void stgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, int *m, s *pl, s *pr, s *dif, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void stgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, s *a, int *lda, s *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, s *u, int *ldu, s *v, int *ldv, s *q, int *ldq, s *work, int *ncycle, int *info) nogil
+
+cdef void stgsna(char *job, char *howmny, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *dif, int *mm, int *m, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void stgsy2(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *iwork, int *pq, int *info) nogil
+
+cdef void stgsyl(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *dif, s *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void stpcon(char *norm, char *uplo, char *diag, int *n, s *ap, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void stpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *info) nogil
+
+cdef void stpqrt(int *m, int *n, int *l, int *nb, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, s *work, int *info) nogil
+
+cdef void stpqrt2(int *m, int *n, int *l, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, int *info) nogil
+
+cdef void stprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *ldwork) nogil
+
+cdef void stprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void stptri(char *uplo, char *diag, int *n, s *ap, int *info) nogil
+
+cdef void stptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+
+cdef void stpttf(char *transr, char *uplo, int *n, s *ap, s *arf, int *info) nogil
+
+cdef void stpttr(char *uplo, int *n, s *ap, s *a, int *lda, int *info) nogil
+
+cdef void strcon(char *norm, char *uplo, char *diag, int *n, s *a, int *lda, s *rcond, s *work, int *iwork, int *info) nogil
+
+cdef void strevc(char *side, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil
+
+cdef void strexc(char *compq, int *n, s *t, int *ldt, s *q, int *ldq, int *ifst, int *ilst, s *work, int *info) nogil
+
+cdef void strrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+
+cdef void strsen(char *job, char *compq, bint *select, int *n, s *t, int *ldt, s *q, int *ldq, s *wr, s *wi, int *m, s *s, s *sep, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void strsna(char *job, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *sep, int *mm, int *m, s *work, int *ldwork, int *iwork, int *info) nogil
+
+cdef void strsyl(char *trana, char *tranb, int *isgn, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *scale, int *info) nogil
+
+cdef void strti2(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil
+
+cdef void strtri(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil
+
+cdef void strtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+
+cdef void strttf(char *transr, char *uplo, int *n, s *a, int *lda, s *arf, int *info) nogil
+
+cdef void strttp(char *uplo, int *n, s *a, int *lda, s *ap, int *info) nogil
+
+cdef void stzrzf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+
+cdef void xerbla_array(char *srname_array, int *srname_len, int *info) nogil
+
+cdef void zbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *rwork, int *lrwork, int *info) nogil
+
+cdef void zbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, z *vt, int *ldvt, z *u, int *ldu, z *c, int *ldc, d *rwork, int *info) nogil
+
+cdef void zcgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil
+
+cdef void zcposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil
+
+cdef void zdrscl(int *n, d *sa, z *sx, int *incx) nogil
+
+cdef void zgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *pt, int *ldpt, z *c, int *ldc, z *work, d *rwork, int *info) nogil
+
+cdef void zgbcon(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zgbequ(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgbequb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgbsv(int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgbtf2(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void zgbtrf(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil
+
+cdef void zgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, z *v, int *ldv, int *info) nogil
+
+cdef void zgebal(char *job, int *n, z *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil
+
+cdef void zgebd2(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *info) nogil
+
+cdef void zgebrd(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *lwork, int *info) nogil
+
+cdef void zgecon(char *norm, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zgeequ(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgeequb(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+
+cdef void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+
+cdef void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+
+cdef void zgeev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgehd2(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgehrd(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgelq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgelqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgels(char *trans, int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *work, int *lwork, int *info) nogil
+
+cdef void zgelsd(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil
+
+cdef void zgelss(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgelsy(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *jpvt, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zgeql2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgeqlf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgeqp3(int *m, int *n, z *a, int *lda, int *jpvt, z *tau, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgeqr2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgeqr2p(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgeqrf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgeqrfp(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgeqrt(int *m, int *n, int *nb, z *a, int *lda, z *t, int *ldt, z *work, int *info) nogil
+
+cdef void zgeqrt2(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil
+
+cdef void zgeqrt3(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil
+
+cdef void zgerfs(char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgerq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zgerqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zgesc2(int *n, z *a, int *lda, z *rhs, int *ipiv, int *jpiv, d *scale) nogil
+
+cdef void zgesdd(char *jobz, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil
+
+cdef void zgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgesvd(char *jobu, char *jobvt, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zgesvx(char *fact, char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgetc2(int *n, z *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+
+cdef void zgetf2(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zgetrf(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zgetri(int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zgetrs(char *trans, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, z *v, int *ldv, int *info) nogil
+
+cdef void zggbal(char *job, int *n, z *a, int *lda, z *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil
+
+cdef void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+
+cdef void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+
+cdef void zggev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, bint *bwork, int *info) nogil
+
+cdef void zggglm(int *n, int *m, int *p, z *a, int *lda, z *b, int *ldb, z *d, z *x, z *y, z *work, int *lwork, int *info) nogil
+
+cdef void zgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *info) nogil
+
+cdef void zgglse(int *m, int *n, int *p, z *a, int *lda, z *b, int *ldb, z *c, z *d, z *x, z *work, int *lwork, int *info) nogil
+
+cdef void zggqrf(int *n, int *m, int *p, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil
+
+cdef void zggrqf(int *m, int *p, int *n, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil
+
+cdef void zgtcon(char *norm, int *n, z *dl, z *d, z *du, z *du2, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zgtrfs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgtsv(int *n, int *nrhs, z *dl, z *d, z *du, z *b, int *ldb, int *info) nogil
+
+cdef void zgtsvx(char *fact, char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zgttrf(int *n, z *dl, z *d, z *du, z *du2, int *ipiv, int *info) nogil
+
+cdef void zgttrs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zgtts2(int *itrans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb) nogil
+
+cdef void zhbev(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhbevd(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhbevx(char *jobz, char *range, char *uplo, int *n, int *kd, z *ab, int *ldab, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhbgst(char *vect, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *x, int *ldx, z *work, d *rwork, int *info) nogil
+
+cdef void zhbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhbtrd(char *vect, char *uplo, int *n, int *kd, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *work, int *info) nogil
+
+cdef void zhecon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zheequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil
+
+cdef void zheev(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zheevd(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zheevr(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zheevx(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhegs2(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zhegst(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zhegv(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zhegvd(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhegvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zherfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zhesv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil
+
+cdef void zhesvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zheswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil
+
+cdef void zhetd2(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, int *info) nogil
+
+cdef void zhetf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zhetrd(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zhetrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zhetri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil
+
+cdef void zhetri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zhetri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil
+
+cdef void zhetrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zhetrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil
+
+cdef void zhfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c) nogil
+
+cdef void zhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *t, int *ldt, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zhpcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zhpev(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhpevd(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhpevx(char *jobz, char *range, char *uplo, int *n, z *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhpgst(int *itype, char *uplo, int *n, z *ap, z *bp, int *info) nogil
+
+cdef void zhpgv(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil
+
+cdef void zhpgvd(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zhpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *ap, z *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+
+cdef void zhprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zhpsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zhpsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zhptrd(char *uplo, int *n, z *ap, d *d, d *e, z *tau, int *info) nogil
+
+cdef void zhptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil
+
+cdef void zhptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil
+
+cdef void zhptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, z *h, int *ldh, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *ifaill, int *ifailr, int *info) nogil
+
+cdef void zhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, z *z, int *ldz, z *work, int *lwork, int *info) nogil
+
+cdef void zlabrd(int *m, int *n, int *nb, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *x, int *ldx, z *y, int *ldy) nogil
+
+cdef void zlacgv(int *n, z *x, int *incx) nogil
+
+cdef void zlacn2(int *n, z *v, z *x, d *est, int *kase, int *isave) nogil
+
+cdef void zlacon(int *n, z *v, z *x, d *est, int *kase) nogil
+
+cdef void zlacp2(char *uplo, int *m, int *n, d *a, int *lda, z *b, int *ldb) nogil
+
+cdef void zlacpy(char *uplo, int *m, int *n, z *a, int *lda, z *b, int *ldb) nogil
+
+cdef void zlacrm(int *m, int *n, z *a, int *lda, d *b, int *ldb, z *c, int *ldc, d *rwork) nogil
+
+cdef void zlacrt(int *n, z *cx, int *incx, z *cy, int *incy, z *c, z *s) nogil
+
+cdef z zladiv(z *x, z *y) nogil
+
+cdef void zlaed0(int *qsiz, int *n, d *d, d *e, z *q, int *ldq, z *qstore, int *ldqs, d *rwork, int *iwork, int *info) nogil
+
+cdef void zlaed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, z *q, int *ldq, d *rho, int *indxq, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, z *work, d *rwork, int *iwork, int *info) nogil
+
+cdef void zlaed8(int *k, int *n, int *qsiz, z *q, int *ldq, d *d, d *rho, int *cutpnt, d *z, d *dlamda, z *q2, int *ldq2, d *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, d *givnum, int *info) nogil
+
+cdef void zlaein(bint *rightv, bint *noinit, int *n, z *h, int *ldh, z *w, z *v, z *b, int *ldb, d *rwork, d *eps3, d *smlnum, int *info) nogil
+
+cdef void zlaesy(z *a, z *b, z *c, z *rt1, z *rt2, z *evscal, z *cs1, z *sn1) nogil
+
+cdef void zlaev2(z *a, z *b, z *c, d *rt1, d *rt2, d *cs1, z *sn1) nogil
+
+cdef void zlag2c(int *m, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil
+
+cdef void zlags2(bint *upper, d *a1, z *a2, d *a3, d *b1, z *b2, d *b3, d *csu, z *snu, d *csv, z *snv, d *csq, z *snq) nogil
+
+cdef void zlagtm(char *trans, int *n, int *nrhs, d *alpha, z *dl, z *d, z *du, z *x, int *ldx, d *beta, z *b, int *ldb) nogil
+
+cdef void zlahef(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil
+
+cdef void zlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, int *info) nogil
+
+cdef void zlahr2(int *n, int *k, int *nb, z *a, int *lda, z *tau, z *t, int *ldt, z *y, int *ldy) nogil
+
+cdef void zlaic1(int *job, int *j, z *x, d *sest, z *w, z *gamma, d *sestpr, z *s, z *c) nogil
+
+cdef void zlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *rwork, int *info) nogil
+
+cdef void zlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *rwork, int *iwork, int *info) nogil
+
+cdef void zlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, z *b, int *ldb, d *rcond, int *rank, z *work, d *rwork, int *iwork, int *info) nogil
+
+cdef d zlangb(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, d *work) nogil
+
+cdef d zlange(char *norm, int *m, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlangt(char *norm, int *n, z *dl, z *d, z *du) nogil
+
+cdef d zlanhb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil
+
+cdef d zlanhe(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlanhf(char *norm, char *transr, char *uplo, int *n, z *a, d *work) nogil
+
+cdef d zlanhp(char *norm, char *uplo, int *n, z *ap, d *work) nogil
+
+cdef d zlanhs(char *norm, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlanht(char *norm, int *n, d *d, z *e) nogil
+
+cdef d zlansb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil
+
+cdef d zlansp(char *norm, char *uplo, int *n, z *ap, d *work) nogil
+
+cdef d zlansy(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil
+
+cdef d zlantb(char *norm, char *uplo, char *diag, int *n, int *k, z *ab, int *ldab, d *work) nogil
+
+cdef d zlantp(char *norm, char *uplo, char *diag, int *n, z *ap, d *work) nogil
+
+cdef d zlantr(char *norm, char *uplo, char *diag, int *m, int *n, z *a, int *lda, d *work) nogil
+
+cdef void zlapll(int *n, z *x, int *incx, z *y, int *incy, d *ssmin) nogil
+
+cdef void zlapmr(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil
+
+cdef void zlapmt(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil
+
+cdef void zlaqgb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void zlaqge(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+
+cdef void zlaqhb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqhe(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqhp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqp2(int *m, int *n, int *offset, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *work) nogil
+
+cdef void zlaqps(int *m, int *n, int *offset, int *nb, int *kb, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *auxv, z *f, int *ldf) nogil
+
+cdef void zlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil
+
+cdef void zlaqr1(int *n, z *h, int *ldh, z *s1, z *s2, z *v) nogil
+
+cdef void zlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil
+
+cdef void zlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil
+
+cdef void zlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil
+
+cdef void zlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, z *s, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, z *v, int *ldv, z *u, int *ldu, int *nv, z *wv, int *ldwv, int *nh, z *wh, int *ldwh) nogil
+
+cdef void zlaqsb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqsp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlaqsy(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+
+cdef void zlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, z *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil
+
+cdef void zlar2v(int *n, z *x, z *y, z *z, int *incx, d *c, z *s, int *incc) nogil
+
+cdef void zlarcm(int *m, int *n, d *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *rwork) nogil
+
+cdef void zlarf(char *side, int *m, int *n, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil
+
+cdef void zlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil
+
+cdef void zlarfg(int *n, z *alpha, z *x, int *incx, z *tau) nogil
+
+cdef void zlarfgp(int *n, z *alpha, z *x, int *incx, z *tau) nogil
+
+cdef void zlarft(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil
+
+cdef void zlarfx(char *side, int *m, int *n, z *v, z *tau, z *c, int *ldc, z *work) nogil
+
+cdef void zlargv(int *n, z *x, int *incx, z *y, int *incy, d *c, int *incc) nogil
+
+cdef void zlarnv(int *idist, int *iseed, int *n, z *x) nogil
+
+cdef void zlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, z *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil
+
+cdef void zlartg(z *f, z *g, d *cs, z *sn, z *r) nogil
+
+cdef void zlartv(int *n, z *x, int *incx, z *y, int *incy, d *c, z *s, int *incc) nogil
+
+cdef void zlarz(char *side, int *m, int *n, int *l, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil
+
+cdef void zlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil
+
+cdef void zlarzt(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil
+
+cdef void zlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zlaset(char *uplo, int *m, int *n, z *alpha, z *beta, z *a, int *lda) nogil
+
+cdef void zlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, z *a, int *lda) nogil
+
+cdef void zlassq(int *n, z *x, int *incx, d *scale, d *sumsq) nogil
+
+cdef void zlaswp(int *n, z *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+
+cdef void zlasyf(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil
+
+cdef void zlat2c(char *uplo, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil
+
+cdef void zlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, z *ab, int *ldab, z *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void zlatdf(int *ijob, int *n, z *z, int *ldz, z *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil
+
+cdef void zlatps(char *uplo, char *trans, char *diag, char *normin, int *n, z *ap, z *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void zlatrd(char *uplo, int *n, int *nb, z *a, int *lda, d *e, z *tau, z *w, int *ldw) nogil
+
+cdef void zlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, z *a, int *lda, z *x, d *scale, d *cnorm, int *info) nogil
+
+cdef void zlatrz(int *m, int *n, int *l, z *a, int *lda, z *tau, z *work) nogil
+
+cdef void zlauu2(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zlauum(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpbcon(char *uplo, int *n, int *kd, z *ab, int *ldab, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zpbequ(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zpbrfs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpbstf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil
+
+cdef void zpbsv(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil
+
+cdef void zpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpbtf2(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil
+
+cdef void zpbtrf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil
+
+cdef void zpbtrs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil
+
+cdef void zpftrf(char *transr, char *uplo, int *n, z *a, int *info) nogil
+
+cdef void zpftri(char *transr, char *uplo, int *n, z *a, int *info) nogil
+
+cdef void zpftrs(char *transr, char *uplo, int *n, int *nrhs, z *a, z *b, int *ldb, int *info) nogil
+
+cdef void zpocon(char *uplo, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zpoequ(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zpoequb(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zporfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zposvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpotf2(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpotrf(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpotri(char *uplo, int *n, z *a, int *lda, int *info) nogil
+
+cdef void zpotrs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void zppcon(char *uplo, int *n, z *ap, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void zppequ(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, int *info) nogil
+
+cdef void zpprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zppsv(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil
+
+cdef void zppsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpptrf(char *uplo, int *n, z *ap, int *info) nogil
+
+cdef void zpptri(char *uplo, int *n, z *ap, int *info) nogil
+
+cdef void zpptrs(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil
+
+cdef void zpstf2(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void zpstrf(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+
+cdef void zptcon(int *n, d *d, z *e, d *anorm, d *rcond, d *rwork, int *info) nogil
+
+cdef void zpteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil
+
+cdef void zptrfs(char *uplo, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zptsv(int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil
+
+cdef void zptsvx(char *fact, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zpttrf(int *n, d *d, z *e, int *info) nogil
+
+cdef void zpttrs(char *uplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil
+
+cdef void zptts2(int *iuplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb) nogil
+
+cdef void zrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, z *s) nogil
+
+cdef void zspcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zspmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zspr(char *uplo, int *n, z *alpha, z *x, int *incx, z *ap) nogil
+
+cdef void zsprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zspsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zspsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zsptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil
+
+cdef void zsptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil
+
+cdef void zsptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zstedc(char *compz, int *n, d *d, d *e, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, z *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+
+cdef void zstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, z *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void zsteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil
+
+cdef void zsycon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil
+
+cdef void zsyconv(char *uplo, char *way, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil
+
+cdef void zsyequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil
+
+cdef void zsymv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil
+
+cdef void zsyr(char *uplo, int *n, z *alpha, z *x, int *incx, z *a, int *lda) nogil
+
+cdef void zsyrfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void zsysv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil
+
+cdef void zsysvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil
+
+cdef void zsyswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil
+
+cdef void zsytf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil
+
+cdef void zsytrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zsytri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil
+
+cdef void zsytri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil
+
+cdef void zsytri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil
+
+cdef void zsytrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil
+
+cdef void zsytrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil
+
+cdef void ztbcon(char *norm, char *uplo, char *diag, int *n, int *kd, z *ab, int *ldab, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void ztbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void ztbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil
+
+cdef void ztfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, z *alpha, z *a, z *b, int *ldb) nogil
+
+cdef void ztftri(char *transr, char *uplo, char *diag, int *n, z *a, int *info) nogil
+
+cdef void ztfttp(char *transr, char *uplo, int *n, z *arf, z *ap, int *info) nogil
+
+cdef void ztfttr(char *transr, char *uplo, int *n, z *arf, z *a, int *lda, int *info) nogil
+
+cdef void ztgevc(char *side, char *howmny, bint *select, int *n, z *s, int *lds, z *p, int *ldp, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil
+
+cdef void ztgex2(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *j1, int *info) nogil
+
+cdef void ztgexc(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *ifst, int *ilst, int *info) nogil
+
+cdef void ztgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, int *m, d *pl, d *pr, d *dif, z *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+
+cdef void ztgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, z *a, int *lda, z *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, z *u, int *ldu, z *v, int *ldv, z *q, int *ldq, z *work, int *ncycle, int *info) nogil
+
+cdef void ztgsna(char *job, char *howmny, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *dif, int *mm, int *m, z *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ztgsy2(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *info) nogil
+
+cdef void ztgsyl(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *dif, z *work, int *lwork, int *iwork, int *info) nogil
+
+cdef void ztpcon(char *norm, char *uplo, char *diag, int *n, z *ap, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void ztpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *info) nogil
+
+cdef void ztpqrt(int *m, int *n, int *l, int *nb, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, z *work, int *info) nogil
+
+cdef void ztpqrt2(int *m, int *n, int *l, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, int *info) nogil
+
+cdef void ztprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *ldwork) nogil
+
+cdef void ztprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void ztptri(char *uplo, char *diag, int *n, z *ap, int *info) nogil
+
+cdef void ztptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil
+
+cdef void ztpttf(char *transr, char *uplo, int *n, z *ap, z *arf, int *info) nogil
+
+cdef void ztpttr(char *uplo, int *n, z *ap, z *a, int *lda, int *info) nogil
+
+cdef void ztrcon(char *norm, char *uplo, char *diag, int *n, z *a, int *lda, d *rcond, z *work, d *rwork, int *info) nogil
+
+cdef void ztrevc(char *side, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil
+
+cdef void ztrexc(char *compq, int *n, z *t, int *ldt, z *q, int *ldq, int *ifst, int *ilst, int *info) nogil
+
+cdef void ztrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil
+
+cdef void ztrsen(char *job, char *compq, bint *select, int *n, z *t, int *ldt, z *q, int *ldq, z *w, int *m, d *s, d *sep, z *work, int *lwork, int *info) nogil
+
+cdef void ztrsna(char *job, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *sep, int *mm, int *m, z *work, int *ldwork, d *rwork, int *info) nogil
+
+cdef void ztrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *scale, int *info) nogil
+
+cdef void ztrti2(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil
+
+cdef void ztrtri(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil
+
+cdef void ztrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil
+
+cdef void ztrttf(char *transr, char *uplo, int *n, z *a, int *lda, z *arf, int *info) nogil
+
+cdef void ztrttp(char *uplo, int *n, z *a, int *lda, z *ap, int *info) nogil
+
+cdef void ztzrzf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zunbdb(char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, d *phi, z *taup1, z *taup2, z *tauq1, z *tauq2, z *work, int *lwork, int *info) nogil
+
+cdef void zuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *info) nogil
+
+cdef void zung2l(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zung2r(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zungbr(char *vect, int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zunghr(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungl2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zunglq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungql(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungqr(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungr2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil
+
+cdef void zungrq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zungtr(char *uplo, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil
+
+cdef void zunm2l(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunm2r(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunml2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmlq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmql(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmqr(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmr2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil
+
+cdef void zunmrq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zunmtr(char *side, char *uplo, char *trans, int *m, int *n, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil
+
+cdef void zupgtr(char *uplo, int *n, z *ap, z *tau, z *q, int *ldq, z *work, int *info) nogil
+
+cdef void zupmtr(char *side, char *uplo, char *trans, int *m, int *n, z *ap, z *tau, z *c, int *ldc, z *work, int *info) nogil
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pyx b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pyx
new file mode 100644
index 00000000..f21252b7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/cython_lapack.pyx
@@ -0,0 +1,9293 @@
+# This file was generated by _generate_pyx.py.
+# Do not edit this file directly.
+
+"""
+LAPACK functions for Cython
+===========================
+
+Usable from Cython via::
+
+    cimport scipy.linalg.cython_lapack
+
+This module provides Cython-level wrappers for all primary routines included
+in LAPACK 3.4.0 except for ``zcgesv`` since its interface is not consistent
+from LAPACK 3.4.0 to 3.6.0. It also provides some of the
+fixed-api auxiliary routines.
+
+These wrappers do not check for alignment of arrays.
+Alignment should be checked before these wrappers are used.
+
+Raw function pointers (Fortran-style pointer arguments):
+
+- cbbcsd
+- cbdsqr
+- cgbbrd
+- cgbcon
+- cgbequ
+- cgbequb
+- cgbrfs
+- cgbsv
+- cgbsvx
+- cgbtf2
+- cgbtrf
+- cgbtrs
+- cgebak
+- cgebal
+- cgebd2
+- cgebrd
+- cgecon
+- cgeequ
+- cgeequb
+- cgees
+- cgeesx
+- cgeev
+- cgeevx
+- cgehd2
+- cgehrd
+- cgelq2
+- cgelqf
+- cgels
+- cgelsd
+- cgelss
+- cgelsy
+- cgemqrt
+- cgeql2
+- cgeqlf
+- cgeqp3
+- cgeqr2
+- cgeqr2p
+- cgeqrf
+- cgeqrfp
+- cgeqrt
+- cgeqrt2
+- cgeqrt3
+- cgerfs
+- cgerq2
+- cgerqf
+- cgesc2
+- cgesdd
+- cgesv
+- cgesvd
+- cgesvx
+- cgetc2
+- cgetf2
+- cgetrf
+- cgetri
+- cgetrs
+- cggbak
+- cggbal
+- cgges
+- cggesx
+- cggev
+- cggevx
+- cggglm
+- cgghrd
+- cgglse
+- cggqrf
+- cggrqf
+- cgtcon
+- cgtrfs
+- cgtsv
+- cgtsvx
+- cgttrf
+- cgttrs
+- cgtts2
+- chbev
+- chbevd
+- chbevx
+- chbgst
+- chbgv
+- chbgvd
+- chbgvx
+- chbtrd
+- checon
+- cheequb
+- cheev
+- cheevd
+- cheevr
+- cheevx
+- chegs2
+- chegst
+- chegv
+- chegvd
+- chegvx
+- cherfs
+- chesv
+- chesvx
+- cheswapr
+- chetd2
+- chetf2
+- chetrd
+- chetrf
+- chetri
+- chetri2
+- chetri2x
+- chetrs
+- chetrs2
+- chfrk
+- chgeqz
+- chla_transtype
+- chpcon
+- chpev
+- chpevd
+- chpevx
+- chpgst
+- chpgv
+- chpgvd
+- chpgvx
+- chprfs
+- chpsv
+- chpsvx
+- chptrd
+- chptrf
+- chptri
+- chptrs
+- chsein
+- chseqr
+- clabrd
+- clacgv
+- clacn2
+- clacon
+- clacp2
+- clacpy
+- clacrm
+- clacrt
+- cladiv
+- claed0
+- claed7
+- claed8
+- claein
+- claesy
+- claev2
+- clag2z
+- clags2
+- clagtm
+- clahef
+- clahqr
+- clahr2
+- claic1
+- clals0
+- clalsa
+- clalsd
+- clangb
+- clange
+- clangt
+- clanhb
+- clanhe
+- clanhf
+- clanhp
+- clanhs
+- clanht
+- clansb
+- clansp
+- clansy
+- clantb
+- clantp
+- clantr
+- clapll
+- clapmr
+- clapmt
+- claqgb
+- claqge
+- claqhb
+- claqhe
+- claqhp
+- claqp2
+- claqps
+- claqr0
+- claqr1
+- claqr2
+- claqr3
+- claqr4
+- claqr5
+- claqsb
+- claqsp
+- claqsy
+- clar1v
+- clar2v
+- clarcm
+- clarf
+- clarfb
+- clarfg
+- clarfgp
+- clarft
+- clarfx
+- clargv
+- clarnv
+- clarrv
+- clartg
+- clartv
+- clarz
+- clarzb
+- clarzt
+- clascl
+- claset
+- clasr
+- classq
+- claswp
+- clasyf
+- clatbs
+- clatdf
+- clatps
+- clatrd
+- clatrs
+- clatrz
+- clauu2
+- clauum
+- cpbcon
+- cpbequ
+- cpbrfs
+- cpbstf
+- cpbsv
+- cpbsvx
+- cpbtf2
+- cpbtrf
+- cpbtrs
+- cpftrf
+- cpftri
+- cpftrs
+- cpocon
+- cpoequ
+- cpoequb
+- cporfs
+- cposv
+- cposvx
+- cpotf2
+- cpotrf
+- cpotri
+- cpotrs
+- cppcon
+- cppequ
+- cpprfs
+- cppsv
+- cppsvx
+- cpptrf
+- cpptri
+- cpptrs
+- cpstf2
+- cpstrf
+- cptcon
+- cpteqr
+- cptrfs
+- cptsv
+- cptsvx
+- cpttrf
+- cpttrs
+- cptts2
+- crot
+- cspcon
+- cspmv
+- cspr
+- csprfs
+- cspsv
+- cspsvx
+- csptrf
+- csptri
+- csptrs
+- csrscl
+- cstedc
+- cstegr
+- cstein
+- cstemr
+- csteqr
+- csycon
+- csyconv
+- csyequb
+- csymv
+- csyr
+- csyrfs
+- csysv
+- csysvx
+- csyswapr
+- csytf2
+- csytrf
+- csytri
+- csytri2
+- csytri2x
+- csytrs
+- csytrs2
+- ctbcon
+- ctbrfs
+- ctbtrs
+- ctfsm
+- ctftri
+- ctfttp
+- ctfttr
+- ctgevc
+- ctgex2
+- ctgexc
+- ctgsen
+- ctgsja
+- ctgsna
+- ctgsy2
+- ctgsyl
+- ctpcon
+- ctpmqrt
+- ctpqrt
+- ctpqrt2
+- ctprfb
+- ctprfs
+- ctptri
+- ctptrs
+- ctpttf
+- ctpttr
+- ctrcon
+- ctrevc
+- ctrexc
+- ctrrfs
+- ctrsen
+- ctrsna
+- ctrsyl
+- ctrti2
+- ctrtri
+- ctrtrs
+- ctrttf
+- ctrttp
+- ctzrzf
+- cunbdb
+- cuncsd
+- cung2l
+- cung2r
+- cungbr
+- cunghr
+- cungl2
+- cunglq
+- cungql
+- cungqr
+- cungr2
+- cungrq
+- cungtr
+- cunm2l
+- cunm2r
+- cunmbr
+- cunmhr
+- cunml2
+- cunmlq
+- cunmql
+- cunmqr
+- cunmr2
+- cunmr3
+- cunmrq
+- cunmrz
+- cunmtr
+- cupgtr
+- cupmtr
+- dbbcsd
+- dbdsdc
+- dbdsqr
+- ddisna
+- dgbbrd
+- dgbcon
+- dgbequ
+- dgbequb
+- dgbrfs
+- dgbsv
+- dgbsvx
+- dgbtf2
+- dgbtrf
+- dgbtrs
+- dgebak
+- dgebal
+- dgebd2
+- dgebrd
+- dgecon
+- dgeequ
+- dgeequb
+- dgees
+- dgeesx
+- dgeev
+- dgeevx
+- dgehd2
+- dgehrd
+- dgejsv
+- dgelq2
+- dgelqf
+- dgels
+- dgelsd
+- dgelss
+- dgelsy
+- dgemqrt
+- dgeql2
+- dgeqlf
+- dgeqp3
+- dgeqr2
+- dgeqr2p
+- dgeqrf
+- dgeqrfp
+- dgeqrt
+- dgeqrt2
+- dgeqrt3
+- dgerfs
+- dgerq2
+- dgerqf
+- dgesc2
+- dgesdd
+- dgesv
+- dgesvd
+- dgesvj
+- dgesvx
+- dgetc2
+- dgetf2
+- dgetrf
+- dgetri
+- dgetrs
+- dggbak
+- dggbal
+- dgges
+- dggesx
+- dggev
+- dggevx
+- dggglm
+- dgghrd
+- dgglse
+- dggqrf
+- dggrqf
+- dgsvj0
+- dgsvj1
+- dgtcon
+- dgtrfs
+- dgtsv
+- dgtsvx
+- dgttrf
+- dgttrs
+- dgtts2
+- dhgeqz
+- dhsein
+- dhseqr
+- disnan
+- dlabad
+- dlabrd
+- dlacn2
+- dlacon
+- dlacpy
+- dladiv
+- dlae2
+- dlaebz
+- dlaed0
+- dlaed1
+- dlaed2
+- dlaed3
+- dlaed4
+- dlaed5
+- dlaed6
+- dlaed7
+- dlaed8
+- dlaed9
+- dlaeda
+- dlaein
+- dlaev2
+- dlaexc
+- dlag2
+- dlag2s
+- dlags2
+- dlagtf
+- dlagtm
+- dlagts
+- dlagv2
+- dlahqr
+- dlahr2
+- dlaic1
+- dlaln2
+- dlals0
+- dlalsa
+- dlalsd
+- dlamch
+- dlamrg
+- dlaneg
+- dlangb
+- dlange
+- dlangt
+- dlanhs
+- dlansb
+- dlansf
+- dlansp
+- dlanst
+- dlansy
+- dlantb
+- dlantp
+- dlantr
+- dlanv2
+- dlapll
+- dlapmr
+- dlapmt
+- dlapy2
+- dlapy3
+- dlaqgb
+- dlaqge
+- dlaqp2
+- dlaqps
+- dlaqr0
+- dlaqr1
+- dlaqr2
+- dlaqr3
+- dlaqr4
+- dlaqr5
+- dlaqsb
+- dlaqsp
+- dlaqsy
+- dlaqtr
+- dlar1v
+- dlar2v
+- dlarf
+- dlarfb
+- dlarfg
+- dlarfgp
+- dlarft
+- dlarfx
+- dlargv
+- dlarnv
+- dlarra
+- dlarrb
+- dlarrc
+- dlarrd
+- dlarre
+- dlarrf
+- dlarrj
+- dlarrk
+- dlarrr
+- dlarrv
+- dlartg
+- dlartgp
+- dlartgs
+- dlartv
+- dlaruv
+- dlarz
+- dlarzb
+- dlarzt
+- dlas2
+- dlascl
+- dlasd0
+- dlasd1
+- dlasd2
+- dlasd3
+- dlasd4
+- dlasd5
+- dlasd6
+- dlasd7
+- dlasd8
+- dlasda
+- dlasdq
+- dlasdt
+- dlaset
+- dlasq1
+- dlasq2
+- dlasq3
+- dlasq4
+- dlasq6
+- dlasr
+- dlasrt
+- dlassq
+- dlasv2
+- dlaswp
+- dlasy2
+- dlasyf
+- dlat2s
+- dlatbs
+- dlatdf
+- dlatps
+- dlatrd
+- dlatrs
+- dlatrz
+- dlauu2
+- dlauum
+- dopgtr
+- dopmtr
+- dorbdb
+- dorcsd
+- dorg2l
+- dorg2r
+- dorgbr
+- dorghr
+- dorgl2
+- dorglq
+- dorgql
+- dorgqr
+- dorgr2
+- dorgrq
+- dorgtr
+- dorm2l
+- dorm2r
+- dormbr
+- dormhr
+- dorml2
+- dormlq
+- dormql
+- dormqr
+- dormr2
+- dormr3
+- dormrq
+- dormrz
+- dormtr
+- dpbcon
+- dpbequ
+- dpbrfs
+- dpbstf
+- dpbsv
+- dpbsvx
+- dpbtf2
+- dpbtrf
+- dpbtrs
+- dpftrf
+- dpftri
+- dpftrs
+- dpocon
+- dpoequ
+- dpoequb
+- dporfs
+- dposv
+- dposvx
+- dpotf2
+- dpotrf
+- dpotri
+- dpotrs
+- dppcon
+- dppequ
+- dpprfs
+- dppsv
+- dppsvx
+- dpptrf
+- dpptri
+- dpptrs
+- dpstf2
+- dpstrf
+- dptcon
+- dpteqr
+- dptrfs
+- dptsv
+- dptsvx
+- dpttrf
+- dpttrs
+- dptts2
+- drscl
+- dsbev
+- dsbevd
+- dsbevx
+- dsbgst
+- dsbgv
+- dsbgvd
+- dsbgvx
+- dsbtrd
+- dsfrk
+- dsgesv
+- dspcon
+- dspev
+- dspevd
+- dspevx
+- dspgst
+- dspgv
+- dspgvd
+- dspgvx
+- dsposv
+- dsprfs
+- dspsv
+- dspsvx
+- dsptrd
+- dsptrf
+- dsptri
+- dsptrs
+- dstebz
+- dstedc
+- dstegr
+- dstein
+- dstemr
+- dsteqr
+- dsterf
+- dstev
+- dstevd
+- dstevr
+- dstevx
+- dsycon
+- dsyconv
+- dsyequb
+- dsyev
+- dsyevd
+- dsyevr
+- dsyevx
+- dsygs2
+- dsygst
+- dsygv
+- dsygvd
+- dsygvx
+- dsyrfs
+- dsysv
+- dsysvx
+- dsyswapr
+- dsytd2
+- dsytf2
+- dsytrd
+- dsytrf
+- dsytri
+- dsytri2
+- dsytri2x
+- dsytrs
+- dsytrs2
+- dtbcon
+- dtbrfs
+- dtbtrs
+- dtfsm
+- dtftri
+- dtfttp
+- dtfttr
+- dtgevc
+- dtgex2
+- dtgexc
+- dtgsen
+- dtgsja
+- dtgsna
+- dtgsy2
+- dtgsyl
+- dtpcon
+- dtpmqrt
+- dtpqrt
+- dtpqrt2
+- dtprfb
+- dtprfs
+- dtptri
+- dtptrs
+- dtpttf
+- dtpttr
+- dtrcon
+- dtrevc
+- dtrexc
+- dtrrfs
+- dtrsen
+- dtrsna
+- dtrsyl
+- dtrti2
+- dtrtri
+- dtrtrs
+- dtrttf
+- dtrttp
+- dtzrzf
+- dzsum1
+- icmax1
+- ieeeck
+- ilaclc
+- ilaclr
+- iladiag
+- iladlc
+- iladlr
+- ilaprec
+- ilaslc
+- ilaslr
+- ilatrans
+- ilauplo
+- ilaver
+- ilazlc
+- ilazlr
+- izmax1
+- sbbcsd
+- sbdsdc
+- sbdsqr
+- scsum1
+- sdisna
+- sgbbrd
+- sgbcon
+- sgbequ
+- sgbequb
+- sgbrfs
+- sgbsv
+- sgbsvx
+- sgbtf2
+- sgbtrf
+- sgbtrs
+- sgebak
+- sgebal
+- sgebd2
+- sgebrd
+- sgecon
+- sgeequ
+- sgeequb
+- sgees
+- sgeesx
+- sgeev
+- sgeevx
+- sgehd2
+- sgehrd
+- sgejsv
+- sgelq2
+- sgelqf
+- sgels
+- sgelsd
+- sgelss
+- sgelsy
+- sgemqrt
+- sgeql2
+- sgeqlf
+- sgeqp3
+- sgeqr2
+- sgeqr2p
+- sgeqrf
+- sgeqrfp
+- sgeqrt
+- sgeqrt2
+- sgeqrt3
+- sgerfs
+- sgerq2
+- sgerqf
+- sgesc2
+- sgesdd
+- sgesv
+- sgesvd
+- sgesvj
+- sgesvx
+- sgetc2
+- sgetf2
+- sgetrf
+- sgetri
+- sgetrs
+- sggbak
+- sggbal
+- sgges
+- sggesx
+- sggev
+- sggevx
+- sggglm
+- sgghrd
+- sgglse
+- sggqrf
+- sggrqf
+- sgsvj0
+- sgsvj1
+- sgtcon
+- sgtrfs
+- sgtsv
+- sgtsvx
+- sgttrf
+- sgttrs
+- sgtts2
+- shgeqz
+- shsein
+- shseqr
+- slabad
+- slabrd
+- slacn2
+- slacon
+- slacpy
+- sladiv
+- slae2
+- slaebz
+- slaed0
+- slaed1
+- slaed2
+- slaed3
+- slaed4
+- slaed5
+- slaed6
+- slaed7
+- slaed8
+- slaed9
+- slaeda
+- slaein
+- slaev2
+- slaexc
+- slag2
+- slag2d
+- slags2
+- slagtf
+- slagtm
+- slagts
+- slagv2
+- slahqr
+- slahr2
+- slaic1
+- slaln2
+- slals0
+- slalsa
+- slalsd
+- slamch
+- slamrg
+- slangb
+- slange
+- slangt
+- slanhs
+- slansb
+- slansf
+- slansp
+- slanst
+- slansy
+- slantb
+- slantp
+- slantr
+- slanv2
+- slapll
+- slapmr
+- slapmt
+- slapy2
+- slapy3
+- slaqgb
+- slaqge
+- slaqp2
+- slaqps
+- slaqr0
+- slaqr1
+- slaqr2
+- slaqr3
+- slaqr4
+- slaqr5
+- slaqsb
+- slaqsp
+- slaqsy
+- slaqtr
+- slar1v
+- slar2v
+- slarf
+- slarfb
+- slarfg
+- slarfgp
+- slarft
+- slarfx
+- slargv
+- slarnv
+- slarra
+- slarrb
+- slarrc
+- slarrd
+- slarre
+- slarrf
+- slarrj
+- slarrk
+- slarrr
+- slarrv
+- slartg
+- slartgp
+- slartgs
+- slartv
+- slaruv
+- slarz
+- slarzb
+- slarzt
+- slas2
+- slascl
+- slasd0
+- slasd1
+- slasd2
+- slasd3
+- slasd4
+- slasd5
+- slasd6
+- slasd7
+- slasd8
+- slasda
+- slasdq
+- slasdt
+- slaset
+- slasq1
+- slasq2
+- slasq3
+- slasq4
+- slasq6
+- slasr
+- slasrt
+- slassq
+- slasv2
+- slaswp
+- slasy2
+- slasyf
+- slatbs
+- slatdf
+- slatps
+- slatrd
+- slatrs
+- slatrz
+- slauu2
+- slauum
+- sopgtr
+- sopmtr
+- sorbdb
+- sorcsd
+- sorg2l
+- sorg2r
+- sorgbr
+- sorghr
+- sorgl2
+- sorglq
+- sorgql
+- sorgqr
+- sorgr2
+- sorgrq
+- sorgtr
+- sorm2l
+- sorm2r
+- sormbr
+- sormhr
+- sorml2
+- sormlq
+- sormql
+- sormqr
+- sormr2
+- sormr3
+- sormrq
+- sormrz
+- sormtr
+- spbcon
+- spbequ
+- spbrfs
+- spbstf
+- spbsv
+- spbsvx
+- spbtf2
+- spbtrf
+- spbtrs
+- spftrf
+- spftri
+- spftrs
+- spocon
+- spoequ
+- spoequb
+- sporfs
+- sposv
+- sposvx
+- spotf2
+- spotrf
+- spotri
+- spotrs
+- sppcon
+- sppequ
+- spprfs
+- sppsv
+- sppsvx
+- spptrf
+- spptri
+- spptrs
+- spstf2
+- spstrf
+- sptcon
+- spteqr
+- sptrfs
+- sptsv
+- sptsvx
+- spttrf
+- spttrs
+- sptts2
+- srscl
+- ssbev
+- ssbevd
+- ssbevx
+- ssbgst
+- ssbgv
+- ssbgvd
+- ssbgvx
+- ssbtrd
+- ssfrk
+- sspcon
+- sspev
+- sspevd
+- sspevx
+- sspgst
+- sspgv
+- sspgvd
+- sspgvx
+- ssprfs
+- sspsv
+- sspsvx
+- ssptrd
+- ssptrf
+- ssptri
+- ssptrs
+- sstebz
+- sstedc
+- sstegr
+- sstein
+- sstemr
+- ssteqr
+- ssterf
+- sstev
+- sstevd
+- sstevr
+- sstevx
+- ssycon
+- ssyconv
+- ssyequb
+- ssyev
+- ssyevd
+- ssyevr
+- ssyevx
+- ssygs2
+- ssygst
+- ssygv
+- ssygvd
+- ssygvx
+- ssyrfs
+- ssysv
+- ssysvx
+- ssyswapr
+- ssytd2
+- ssytf2
+- ssytrd
+- ssytrf
+- ssytri
+- ssytri2
+- ssytri2x
+- ssytrs
+- ssytrs2
+- stbcon
+- stbrfs
+- stbtrs
+- stfsm
+- stftri
+- stfttp
+- stfttr
+- stgevc
+- stgex2
+- stgexc
+- stgsen
+- stgsja
+- stgsna
+- stgsy2
+- stgsyl
+- stpcon
+- stpmqrt
+- stpqrt
+- stpqrt2
+- stprfb
+- stprfs
+- stptri
+- stptrs
+- stpttf
+- stpttr
+- strcon
+- strevc
+- strexc
+- strrfs
+- strsen
+- strsna
+- strsyl
+- strti2
+- strtri
+- strtrs
+- strttf
+- strttp
+- stzrzf
+- xerbla_array
+- zbbcsd
+- zbdsqr
+- zcgesv
+- zcposv
+- zdrscl
+- zgbbrd
+- zgbcon
+- zgbequ
+- zgbequb
+- zgbrfs
+- zgbsv
+- zgbsvx
+- zgbtf2
+- zgbtrf
+- zgbtrs
+- zgebak
+- zgebal
+- zgebd2
+- zgebrd
+- zgecon
+- zgeequ
+- zgeequb
+- zgees
+- zgeesx
+- zgeev
+- zgeevx
+- zgehd2
+- zgehrd
+- zgelq2
+- zgelqf
+- zgels
+- zgelsd
+- zgelss
+- zgelsy
+- zgemqrt
+- zgeql2
+- zgeqlf
+- zgeqp3
+- zgeqr2
+- zgeqr2p
+- zgeqrf
+- zgeqrfp
+- zgeqrt
+- zgeqrt2
+- zgeqrt3
+- zgerfs
+- zgerq2
+- zgerqf
+- zgesc2
+- zgesdd
+- zgesv
+- zgesvd
+- zgesvx
+- zgetc2
+- zgetf2
+- zgetrf
+- zgetri
+- zgetrs
+- zggbak
+- zggbal
+- zgges
+- zggesx
+- zggev
+- zggevx
+- zggglm
+- zgghrd
+- zgglse
+- zggqrf
+- zggrqf
+- zgtcon
+- zgtrfs
+- zgtsv
+- zgtsvx
+- zgttrf
+- zgttrs
+- zgtts2
+- zhbev
+- zhbevd
+- zhbevx
+- zhbgst
+- zhbgv
+- zhbgvd
+- zhbgvx
+- zhbtrd
+- zhecon
+- zheequb
+- zheev
+- zheevd
+- zheevr
+- zheevx
+- zhegs2
+- zhegst
+- zhegv
+- zhegvd
+- zhegvx
+- zherfs
+- zhesv
+- zhesvx
+- zheswapr
+- zhetd2
+- zhetf2
+- zhetrd
+- zhetrf
+- zhetri
+- zhetri2
+- zhetri2x
+- zhetrs
+- zhetrs2
+- zhfrk
+- zhgeqz
+- zhpcon
+- zhpev
+- zhpevd
+- zhpevx
+- zhpgst
+- zhpgv
+- zhpgvd
+- zhpgvx
+- zhprfs
+- zhpsv
+- zhpsvx
+- zhptrd
+- zhptrf
+- zhptri
+- zhptrs
+- zhsein
+- zhseqr
+- zlabrd
+- zlacgv
+- zlacn2
+- zlacon
+- zlacp2
+- zlacpy
+- zlacrm
+- zlacrt
+- zladiv
+- zlaed0
+- zlaed7
+- zlaed8
+- zlaein
+- zlaesy
+- zlaev2
+- zlag2c
+- zlags2
+- zlagtm
+- zlahef
+- zlahqr
+- zlahr2
+- zlaic1
+- zlals0
+- zlalsa
+- zlalsd
+- zlangb
+- zlange
+- zlangt
+- zlanhb
+- zlanhe
+- zlanhf
+- zlanhp
+- zlanhs
+- zlanht
+- zlansb
+- zlansp
+- zlansy
+- zlantb
+- zlantp
+- zlantr
+- zlapll
+- zlapmr
+- zlapmt
+- zlaqgb
+- zlaqge
+- zlaqhb
+- zlaqhe
+- zlaqhp
+- zlaqp2
+- zlaqps
+- zlaqr0
+- zlaqr1
+- zlaqr2
+- zlaqr3
+- zlaqr4
+- zlaqr5
+- zlaqsb
+- zlaqsp
+- zlaqsy
+- zlar1v
+- zlar2v
+- zlarcm
+- zlarf
+- zlarfb
+- zlarfg
+- zlarfgp
+- zlarft
+- zlarfx
+- zlargv
+- zlarnv
+- zlarrv
+- zlartg
+- zlartv
+- zlarz
+- zlarzb
+- zlarzt
+- zlascl
+- zlaset
+- zlasr
+- zlassq
+- zlaswp
+- zlasyf
+- zlat2c
+- zlatbs
+- zlatdf
+- zlatps
+- zlatrd
+- zlatrs
+- zlatrz
+- zlauu2
+- zlauum
+- zpbcon
+- zpbequ
+- zpbrfs
+- zpbstf
+- zpbsv
+- zpbsvx
+- zpbtf2
+- zpbtrf
+- zpbtrs
+- zpftrf
+- zpftri
+- zpftrs
+- zpocon
+- zpoequ
+- zpoequb
+- zporfs
+- zposv
+- zposvx
+- zpotf2
+- zpotrf
+- zpotri
+- zpotrs
+- zppcon
+- zppequ
+- zpprfs
+- zppsv
+- zppsvx
+- zpptrf
+- zpptri
+- zpptrs
+- zpstf2
+- zpstrf
+- zptcon
+- zpteqr
+- zptrfs
+- zptsv
+- zptsvx
+- zpttrf
+- zpttrs
+- zptts2
+- zrot
+- zspcon
+- zspmv
+- zspr
+- zsprfs
+- zspsv
+- zspsvx
+- zsptrf
+- zsptri
+- zsptrs
+- zstedc
+- zstegr
+- zstein
+- zstemr
+- zsteqr
+- zsycon
+- zsyconv
+- zsyequb
+- zsymv
+- zsyr
+- zsyrfs
+- zsysv
+- zsysvx
+- zsyswapr
+- zsytf2
+- zsytrf
+- zsytri
+- zsytri2
+- zsytri2x
+- zsytrs
+- zsytrs2
+- ztbcon
+- ztbrfs
+- ztbtrs
+- ztfsm
+- ztftri
+- ztfttp
+- ztfttr
+- ztgevc
+- ztgex2
+- ztgexc
+- ztgsen
+- ztgsja
+- ztgsna
+- ztgsy2
+- ztgsyl
+- ztpcon
+- ztpmqrt
+- ztpqrt
+- ztpqrt2
+- ztprfb
+- ztprfs
+- ztptri
+- ztptrs
+- ztpttf
+- ztpttr
+- ztrcon
+- ztrevc
+- ztrexc
+- ztrrfs
+- ztrsen
+- ztrsna
+- ztrsyl
+- ztrti2
+- ztrtri
+- ztrtrs
+- ztrttf
+- ztrttp
+- ztzrzf
+- zunbdb
+- zuncsd
+- zung2l
+- zung2r
+- zungbr
+- zunghr
+- zungl2
+- zunglq
+- zungql
+- zungqr
+- zungr2
+- zungrq
+- zungtr
+- zunm2l
+- zunm2r
+- zunmbr
+- zunmhr
+- zunml2
+- zunmlq
+- zunmql
+- zunmqr
+- zunmr2
+- zunmr3
+- zunmrq
+- zunmrz
+- zunmtr
+- zupgtr
+- zupmtr
+
+
+"""
+
+# Within SciPy, these wrappers can be used via relative or absolute cimport.
+# Examples:
+# from ..linalg cimport cython_lapack
+# from scipy.linalg cimport cython_lapack
+# cimport scipy.linalg.cython_lapack as cython_lapack
+# cimport ..linalg.cython_lapack as cython_lapack
+
+# Within SciPy, if LAPACK functions are needed in C/C++/Fortran,
+# these wrappers should not be used.
+# The original libraries should be linked directly.
+
+cdef extern from "fortran_defs.h":
+    pass
+
+from numpy cimport npy_complex64, npy_complex128
+
+cdef extern from "_lapack_subroutines.h":
+    # Function pointer type declarations for
+    # gees and gges families of functions.
+    ctypedef bint _cselect1(npy_complex64*)
+    ctypedef bint _cselect2(npy_complex64*, npy_complex64*)
+    ctypedef bint _dselect2(d*, d*)
+    ctypedef bint _dselect3(d*, d*, d*)
+    ctypedef bint _sselect2(s*, s*)
+    ctypedef bint _sselect3(s*, s*, s*)
+    ctypedef bint _zselect1(npy_complex128*)
+    ctypedef bint _zselect2(npy_complex128*, npy_complex128*)
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chla_transtype "F_FUNC(chla_transtypewrp, CHLA_TRANSTYPEWRP)"(char *out, int *trans) nogil
+cdef char chla_transtype(int *trans) nogil:
+    cdef char out
+    _fortran_chla_transtype(&out, trans)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cladiv "F_FUNC(cladivwrp, CLADIVWRP)"(c *out, npy_complex64 *x, npy_complex64 *y) nogil
+cdef c cladiv(c *x, c *y) nogil:
+    cdef c out
+    _fortran_cladiv(&out, x, y)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clangb "F_FUNC(clangbwrp, CLANGBWRP)"(s *out, char *norm, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, s *work) nogil
+cdef s clangb(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, s *work) nogil:
+    cdef s out
+    _fortran_clangb(&out, norm, n, kl, ku, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clange "F_FUNC(clangewrp, CLANGEWRP)"(s *out, char *norm, int *m, int *n, npy_complex64 *a, int *lda, s *work) nogil
+cdef s clange(char *norm, int *m, int *n, c *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_clange(&out, norm, m, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clangt "F_FUNC(clangtwrp, CLANGTWRP)"(s *out, char *norm, int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du) nogil
+cdef s clangt(char *norm, int *n, c *dl, c *d, c *du) nogil:
+    cdef s out
+    _fortran_clangt(&out, norm, n, dl, d, du)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clanhb "F_FUNC(clanhbwrp, CLANHBWRP)"(s *out, char *norm, char *uplo, int *n, int *k, npy_complex64 *ab, int *ldab, s *work) nogil
+cdef s clanhb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil:
+    cdef s out
+    _fortran_clanhb(&out, norm, uplo, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clanhe "F_FUNC(clanhewrp, CLANHEWRP)"(s *out, char *norm, char *uplo, int *n, npy_complex64 *a, int *lda, s *work) nogil
+cdef s clanhe(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_clanhe(&out, norm, uplo, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clanhf "F_FUNC(clanhfwrp, CLANHFWRP)"(s *out, char *norm, char *transr, char *uplo, int *n, npy_complex64 *a, s *work) nogil
+cdef s clanhf(char *norm, char *transr, char *uplo, int *n, c *a, s *work) nogil:
+    cdef s out
+    _fortran_clanhf(&out, norm, transr, uplo, n, a, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clanhp "F_FUNC(clanhpwrp, CLANHPWRP)"(s *out, char *norm, char *uplo, int *n, npy_complex64 *ap, s *work) nogil
+cdef s clanhp(char *norm, char *uplo, int *n, c *ap, s *work) nogil:
+    cdef s out
+    _fortran_clanhp(&out, norm, uplo, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clanhs "F_FUNC(clanhswrp, CLANHSWRP)"(s *out, char *norm, int *n, npy_complex64 *a, int *lda, s *work) nogil
+cdef s clanhs(char *norm, int *n, c *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_clanhs(&out, norm, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clanht "F_FUNC(clanhtwrp, CLANHTWRP)"(s *out, char *norm, int *n, s *d, npy_complex64 *e) nogil
+cdef s clanht(char *norm, int *n, s *d, c *e) nogil:
+    cdef s out
+    _fortran_clanht(&out, norm, n, d, e)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clansb "F_FUNC(clansbwrp, CLANSBWRP)"(s *out, char *norm, char *uplo, int *n, int *k, npy_complex64 *ab, int *ldab, s *work) nogil
+cdef s clansb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil:
+    cdef s out
+    _fortran_clansb(&out, norm, uplo, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clansp "F_FUNC(clanspwrp, CLANSPWRP)"(s *out, char *norm, char *uplo, int *n, npy_complex64 *ap, s *work) nogil
+cdef s clansp(char *norm, char *uplo, int *n, c *ap, s *work) nogil:
+    cdef s out
+    _fortran_clansp(&out, norm, uplo, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clansy "F_FUNC(clansywrp, CLANSYWRP)"(s *out, char *norm, char *uplo, int *n, npy_complex64 *a, int *lda, s *work) nogil
+cdef s clansy(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_clansy(&out, norm, uplo, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clantb "F_FUNC(clantbwrp, CLANTBWRP)"(s *out, char *norm, char *uplo, char *diag, int *n, int *k, npy_complex64 *ab, int *ldab, s *work) nogil
+cdef s clantb(char *norm, char *uplo, char *diag, int *n, int *k, c *ab, int *ldab, s *work) nogil:
+    cdef s out
+    _fortran_clantb(&out, norm, uplo, diag, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clantp "F_FUNC(clantpwrp, CLANTPWRP)"(s *out, char *norm, char *uplo, char *diag, int *n, npy_complex64 *ap, s *work) nogil
+cdef s clantp(char *norm, char *uplo, char *diag, int *n, c *ap, s *work) nogil:
+    cdef s out
+    _fortran_clantp(&out, norm, uplo, diag, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clantr "F_FUNC(clantrwrp, CLANTRWRP)"(s *out, char *norm, char *uplo, char *diag, int *m, int *n, npy_complex64 *a, int *lda, s *work) nogil
+cdef s clantr(char *norm, char *uplo, char *diag, int *m, int *n, c *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_clantr(&out, norm, uplo, diag, m, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_disnan "F_FUNC(disnanwrp, DISNANWRP)"(bint *out, d *din) nogil
+cdef bint disnan(d *din) nogil:
+    cdef bint out
+    _fortran_disnan(&out, din)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlamch "F_FUNC(dlamchwrp, DLAMCHWRP)"(d *out, char *cmach) nogil
+cdef d dlamch(char *cmach) nogil:
+    cdef d out
+    _fortran_dlamch(&out, cmach)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaneg "F_FUNC(dlanegwrp, DLANEGWRP)"(int *out, int *n, d *d, d *lld, d *sigma, d *pivmin, int *r) nogil
+cdef int dlaneg(int *n, d *d, d *lld, d *sigma, d *pivmin, int *r) nogil:
+    cdef int out
+    _fortran_dlaneg(&out, n, d, lld, sigma, pivmin, r)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlangb "F_FUNC(dlangbwrp, DLANGBWRP)"(d *out, char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, d *work) nogil
+cdef d dlangb(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, d *work) nogil:
+    cdef d out
+    _fortran_dlangb(&out, norm, n, kl, ku, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlange "F_FUNC(dlangewrp, DLANGEWRP)"(d *out, char *norm, int *m, int *n, d *a, int *lda, d *work) nogil
+cdef d dlange(char *norm, int *m, int *n, d *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_dlange(&out, norm, m, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlangt "F_FUNC(dlangtwrp, DLANGTWRP)"(d *out, char *norm, int *n, d *dl, d *d_, d *du) nogil
+cdef d dlangt(char *norm, int *n, d *dl, d *d_, d *du) nogil:
+    cdef d out
+    _fortran_dlangt(&out, norm, n, dl, d_, du)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlanhs "F_FUNC(dlanhswrp, DLANHSWRP)"(d *out, char *norm, int *n, d *a, int *lda, d *work) nogil
+cdef d dlanhs(char *norm, int *n, d *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_dlanhs(&out, norm, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlansb "F_FUNC(dlansbwrp, DLANSBWRP)"(d *out, char *norm, char *uplo, int *n, int *k, d *ab, int *ldab, d *work) nogil
+cdef d dlansb(char *norm, char *uplo, int *n, int *k, d *ab, int *ldab, d *work) nogil:
+    cdef d out
+    _fortran_dlansb(&out, norm, uplo, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlansf "F_FUNC(dlansfwrp, DLANSFWRP)"(d *out, char *norm, char *transr, char *uplo, int *n, d *a, d *work) nogil
+cdef d dlansf(char *norm, char *transr, char *uplo, int *n, d *a, d *work) nogil:
+    cdef d out
+    _fortran_dlansf(&out, norm, transr, uplo, n, a, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlansp "F_FUNC(dlanspwrp, DLANSPWRP)"(d *out, char *norm, char *uplo, int *n, d *ap, d *work) nogil
+cdef d dlansp(char *norm, char *uplo, int *n, d *ap, d *work) nogil:
+    cdef d out
+    _fortran_dlansp(&out, norm, uplo, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlanst "F_FUNC(dlanstwrp, DLANSTWRP)"(d *out, char *norm, int *n, d *d_, d *e) nogil
+cdef d dlanst(char *norm, int *n, d *d_, d *e) nogil:
+    cdef d out
+    _fortran_dlanst(&out, norm, n, d_, e)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlansy "F_FUNC(dlansywrp, DLANSYWRP)"(d *out, char *norm, char *uplo, int *n, d *a, int *lda, d *work) nogil
+cdef d dlansy(char *norm, char *uplo, int *n, d *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_dlansy(&out, norm, uplo, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlantb "F_FUNC(dlantbwrp, DLANTBWRP)"(d *out, char *norm, char *uplo, char *diag, int *n, int *k, d *ab, int *ldab, d *work) nogil
+cdef d dlantb(char *norm, char *uplo, char *diag, int *n, int *k, d *ab, int *ldab, d *work) nogil:
+    cdef d out
+    _fortran_dlantb(&out, norm, uplo, diag, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlantp "F_FUNC(dlantpwrp, DLANTPWRP)"(d *out, char *norm, char *uplo, char *diag, int *n, d *ap, d *work) nogil
+cdef d dlantp(char *norm, char *uplo, char *diag, int *n, d *ap, d *work) nogil:
+    cdef d out
+    _fortran_dlantp(&out, norm, uplo, diag, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlantr "F_FUNC(dlantrwrp, DLANTRWRP)"(d *out, char *norm, char *uplo, char *diag, int *m, int *n, d *a, int *lda, d *work) nogil
+cdef d dlantr(char *norm, char *uplo, char *diag, int *m, int *n, d *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_dlantr(&out, norm, uplo, diag, m, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlapy2 "F_FUNC(dlapy2wrp, DLAPY2WRP)"(d *out, d *x, d *y) nogil
+cdef d dlapy2(d *x, d *y) nogil:
+    cdef d out
+    _fortran_dlapy2(&out, x, y)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlapy3 "F_FUNC(dlapy3wrp, DLAPY3WRP)"(d *out, d *x, d *y, d *z) nogil
+cdef d dlapy3(d *x, d *y, d *z) nogil:
+    cdef d out
+    _fortran_dlapy3(&out, x, y, z)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dzsum1 "F_FUNC(dzsum1wrp, DZSUM1WRP)"(d *out, int *n, npy_complex128 *cx, int *incx) nogil
+cdef d dzsum1(int *n, z *cx, int *incx) nogil:
+    cdef d out
+    _fortran_dzsum1(&out, n, cx, incx)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_icmax1 "F_FUNC(icmax1wrp, ICMAX1WRP)"(int *out, int *n, npy_complex64 *cx, int *incx) nogil
+cdef int icmax1(int *n, c *cx, int *incx) nogil:
+    cdef int out
+    _fortran_icmax1(&out, n, cx, incx)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ieeeck "F_FUNC(ieeeckwrp, IEEECKWRP)"(int *out, int *ispec, s *zero, s *one) nogil
+cdef int ieeeck(int *ispec, s *zero, s *one) nogil:
+    cdef int out
+    _fortran_ieeeck(&out, ispec, zero, one)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilaclc "F_FUNC(ilaclcwrp, ILACLCWRP)"(int *out, int *m, int *n, npy_complex64 *a, int *lda) nogil
+cdef int ilaclc(int *m, int *n, c *a, int *lda) nogil:
+    cdef int out
+    _fortran_ilaclc(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilaclr "F_FUNC(ilaclrwrp, ILACLRWRP)"(int *out, int *m, int *n, npy_complex64 *a, int *lda) nogil
+cdef int ilaclr(int *m, int *n, c *a, int *lda) nogil:
+    cdef int out
+    _fortran_ilaclr(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_iladiag "F_FUNC(iladiagwrp, ILADIAGWRP)"(int *out, char *diag) nogil
+cdef int iladiag(char *diag) nogil:
+    cdef int out
+    _fortran_iladiag(&out, diag)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_iladlc "F_FUNC(iladlcwrp, ILADLCWRP)"(int *out, int *m, int *n, d *a, int *lda) nogil
+cdef int iladlc(int *m, int *n, d *a, int *lda) nogil:
+    cdef int out
+    _fortran_iladlc(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_iladlr "F_FUNC(iladlrwrp, ILADLRWRP)"(int *out, int *m, int *n, d *a, int *lda) nogil
+cdef int iladlr(int *m, int *n, d *a, int *lda) nogil:
+    cdef int out
+    _fortran_iladlr(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilaprec "F_FUNC(ilaprecwrp, ILAPRECWRP)"(int *out, char *prec) nogil
+cdef int ilaprec(char *prec) nogil:
+    cdef int out
+    _fortran_ilaprec(&out, prec)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilaslc "F_FUNC(ilaslcwrp, ILASLCWRP)"(int *out, int *m, int *n, s *a, int *lda) nogil
+cdef int ilaslc(int *m, int *n, s *a, int *lda) nogil:
+    cdef int out
+    _fortran_ilaslc(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilaslr "F_FUNC(ilaslrwrp, ILASLRWRP)"(int *out, int *m, int *n, s *a, int *lda) nogil
+cdef int ilaslr(int *m, int *n, s *a, int *lda) nogil:
+    cdef int out
+    _fortran_ilaslr(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilatrans "F_FUNC(ilatranswrp, ILATRANSWRP)"(int *out, char *trans) nogil
+cdef int ilatrans(char *trans) nogil:
+    cdef int out
+    _fortran_ilatrans(&out, trans)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilauplo "F_FUNC(ilauplowrp, ILAUPLOWRP)"(int *out, char *uplo) nogil
+cdef int ilauplo(char *uplo) nogil:
+    cdef int out
+    _fortran_ilauplo(&out, uplo)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilazlc "F_FUNC(ilazlcwrp, ILAZLCWRP)"(int *out, int *m, int *n, npy_complex128 *a, int *lda) nogil
+cdef int ilazlc(int *m, int *n, z *a, int *lda) nogil:
+    cdef int out
+    _fortran_ilazlc(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilazlr "F_FUNC(ilazlrwrp, ILAZLRWRP)"(int *out, int *m, int *n, npy_complex128 *a, int *lda) nogil
+cdef int ilazlr(int *m, int *n, z *a, int *lda) nogil:
+    cdef int out
+    _fortran_ilazlr(&out, m, n, a, lda)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_izmax1 "F_FUNC(izmax1wrp, IZMAX1WRP)"(int *out, int *n, npy_complex128 *cx, int *incx) nogil
+cdef int izmax1(int *n, z *cx, int *incx) nogil:
+    cdef int out
+    _fortran_izmax1(&out, n, cx, incx)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_scsum1 "F_FUNC(scsum1wrp, SCSUM1WRP)"(s *out, int *n, npy_complex64 *cx, int *incx) nogil
+cdef s scsum1(int *n, c *cx, int *incx) nogil:
+    cdef s out
+    _fortran_scsum1(&out, n, cx, incx)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slamch "F_FUNC(slamchwrp, SLAMCHWRP)"(s *out, char *cmach) nogil
+cdef s slamch(char *cmach) nogil:
+    cdef s out
+    _fortran_slamch(&out, cmach)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slangb "F_FUNC(slangbwrp, SLANGBWRP)"(s *out, char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, s *work) nogil
+cdef s slangb(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, s *work) nogil:
+    cdef s out
+    _fortran_slangb(&out, norm, n, kl, ku, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slange "F_FUNC(slangewrp, SLANGEWRP)"(s *out, char *norm, int *m, int *n, s *a, int *lda, s *work) nogil
+cdef s slange(char *norm, int *m, int *n, s *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_slange(&out, norm, m, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slangt "F_FUNC(slangtwrp, SLANGTWRP)"(s *out, char *norm, int *n, s *dl, s *d, s *du) nogil
+cdef s slangt(char *norm, int *n, s *dl, s *d, s *du) nogil:
+    cdef s out
+    _fortran_slangt(&out, norm, n, dl, d, du)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slanhs "F_FUNC(slanhswrp, SLANHSWRP)"(s *out, char *norm, int *n, s *a, int *lda, s *work) nogil
+cdef s slanhs(char *norm, int *n, s *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_slanhs(&out, norm, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slansb "F_FUNC(slansbwrp, SLANSBWRP)"(s *out, char *norm, char *uplo, int *n, int *k, s *ab, int *ldab, s *work) nogil
+cdef s slansb(char *norm, char *uplo, int *n, int *k, s *ab, int *ldab, s *work) nogil:
+    cdef s out
+    _fortran_slansb(&out, norm, uplo, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slansf "F_FUNC(slansfwrp, SLANSFWRP)"(s *out, char *norm, char *transr, char *uplo, int *n, s *a, s *work) nogil
+cdef s slansf(char *norm, char *transr, char *uplo, int *n, s *a, s *work) nogil:
+    cdef s out
+    _fortran_slansf(&out, norm, transr, uplo, n, a, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slansp "F_FUNC(slanspwrp, SLANSPWRP)"(s *out, char *norm, char *uplo, int *n, s *ap, s *work) nogil
+cdef s slansp(char *norm, char *uplo, int *n, s *ap, s *work) nogil:
+    cdef s out
+    _fortran_slansp(&out, norm, uplo, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slanst "F_FUNC(slanstwrp, SLANSTWRP)"(s *out, char *norm, int *n, s *d, s *e) nogil
+cdef s slanst(char *norm, int *n, s *d, s *e) nogil:
+    cdef s out
+    _fortran_slanst(&out, norm, n, d, e)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slansy "F_FUNC(slansywrp, SLANSYWRP)"(s *out, char *norm, char *uplo, int *n, s *a, int *lda, s *work) nogil
+cdef s slansy(char *norm, char *uplo, int *n, s *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_slansy(&out, norm, uplo, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slantb "F_FUNC(slantbwrp, SLANTBWRP)"(s *out, char *norm, char *uplo, char *diag, int *n, int *k, s *ab, int *ldab, s *work) nogil
+cdef s slantb(char *norm, char *uplo, char *diag, int *n, int *k, s *ab, int *ldab, s *work) nogil:
+    cdef s out
+    _fortran_slantb(&out, norm, uplo, diag, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slantp "F_FUNC(slantpwrp, SLANTPWRP)"(s *out, char *norm, char *uplo, char *diag, int *n, s *ap, s *work) nogil
+cdef s slantp(char *norm, char *uplo, char *diag, int *n, s *ap, s *work) nogil:
+    cdef s out
+    _fortran_slantp(&out, norm, uplo, diag, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slantr "F_FUNC(slantrwrp, SLANTRWRP)"(s *out, char *norm, char *uplo, char *diag, int *m, int *n, s *a, int *lda, s *work) nogil
+cdef s slantr(char *norm, char *uplo, char *diag, int *m, int *n, s *a, int *lda, s *work) nogil:
+    cdef s out
+    _fortran_slantr(&out, norm, uplo, diag, m, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slapy2 "F_FUNC(slapy2wrp, SLAPY2WRP)"(s *out, s *x, s *y) nogil
+cdef s slapy2(s *x, s *y) nogil:
+    cdef s out
+    _fortran_slapy2(&out, x, y)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slapy3 "F_FUNC(slapy3wrp, SLAPY3WRP)"(s *out, s *x, s *y, s *z) nogil
+cdef s slapy3(s *x, s *y, s *z) nogil:
+    cdef s out
+    _fortran_slapy3(&out, x, y, z)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zladiv "F_FUNC(zladivwrp, ZLADIVWRP)"(z *out, npy_complex128 *x, npy_complex128 *y) nogil
+cdef z zladiv(z *x, z *y) nogil:
+    cdef z out
+    _fortran_zladiv(&out, x, y)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlangb "F_FUNC(zlangbwrp, ZLANGBWRP)"(d *out, char *norm, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, d *work) nogil
+cdef d zlangb(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, d *work) nogil:
+    cdef d out
+    _fortran_zlangb(&out, norm, n, kl, ku, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlange "F_FUNC(zlangewrp, ZLANGEWRP)"(d *out, char *norm, int *m, int *n, npy_complex128 *a, int *lda, d *work) nogil
+cdef d zlange(char *norm, int *m, int *n, z *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_zlange(&out, norm, m, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlangt "F_FUNC(zlangtwrp, ZLANGTWRP)"(d *out, char *norm, int *n, npy_complex128 *dl, npy_complex128 *d_, npy_complex128 *du) nogil
+cdef d zlangt(char *norm, int *n, z *dl, z *d_, z *du) nogil:
+    cdef d out
+    _fortran_zlangt(&out, norm, n, dl, d_, du)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlanhb "F_FUNC(zlanhbwrp, ZLANHBWRP)"(d *out, char *norm, char *uplo, int *n, int *k, npy_complex128 *ab, int *ldab, d *work) nogil
+cdef d zlanhb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil:
+    cdef d out
+    _fortran_zlanhb(&out, norm, uplo, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlanhe "F_FUNC(zlanhewrp, ZLANHEWRP)"(d *out, char *norm, char *uplo, int *n, npy_complex128 *a, int *lda, d *work) nogil
+cdef d zlanhe(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_zlanhe(&out, norm, uplo, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlanhf "F_FUNC(zlanhfwrp, ZLANHFWRP)"(d *out, char *norm, char *transr, char *uplo, int *n, npy_complex128 *a, d *work) nogil
+cdef d zlanhf(char *norm, char *transr, char *uplo, int *n, z *a, d *work) nogil:
+    cdef d out
+    _fortran_zlanhf(&out, norm, transr, uplo, n, a, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlanhp "F_FUNC(zlanhpwrp, ZLANHPWRP)"(d *out, char *norm, char *uplo, int *n, npy_complex128 *ap, d *work) nogil
+cdef d zlanhp(char *norm, char *uplo, int *n, z *ap, d *work) nogil:
+    cdef d out
+    _fortran_zlanhp(&out, norm, uplo, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlanhs "F_FUNC(zlanhswrp, ZLANHSWRP)"(d *out, char *norm, int *n, npy_complex128 *a, int *lda, d *work) nogil
+cdef d zlanhs(char *norm, int *n, z *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_zlanhs(&out, norm, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlanht "F_FUNC(zlanhtwrp, ZLANHTWRP)"(d *out, char *norm, int *n, d *d_, npy_complex128 *e) nogil
+cdef d zlanht(char *norm, int *n, d *d_, z *e) nogil:
+    cdef d out
+    _fortran_zlanht(&out, norm, n, d_, e)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlansb "F_FUNC(zlansbwrp, ZLANSBWRP)"(d *out, char *norm, char *uplo, int *n, int *k, npy_complex128 *ab, int *ldab, d *work) nogil
+cdef d zlansb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil:
+    cdef d out
+    _fortran_zlansb(&out, norm, uplo, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlansp "F_FUNC(zlanspwrp, ZLANSPWRP)"(d *out, char *norm, char *uplo, int *n, npy_complex128 *ap, d *work) nogil
+cdef d zlansp(char *norm, char *uplo, int *n, z *ap, d *work) nogil:
+    cdef d out
+    _fortran_zlansp(&out, norm, uplo, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlansy "F_FUNC(zlansywrp, ZLANSYWRP)"(d *out, char *norm, char *uplo, int *n, npy_complex128 *a, int *lda, d *work) nogil
+cdef d zlansy(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_zlansy(&out, norm, uplo, n, a, lda, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlantb "F_FUNC(zlantbwrp, ZLANTBWRP)"(d *out, char *norm, char *uplo, char *diag, int *n, int *k, npy_complex128 *ab, int *ldab, d *work) nogil
+cdef d zlantb(char *norm, char *uplo, char *diag, int *n, int *k, z *ab, int *ldab, d *work) nogil:
+    cdef d out
+    _fortran_zlantb(&out, norm, uplo, diag, n, k, ab, ldab, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlantp "F_FUNC(zlantpwrp, ZLANTPWRP)"(d *out, char *norm, char *uplo, char *diag, int *n, npy_complex128 *ap, d *work) nogil
+cdef d zlantp(char *norm, char *uplo, char *diag, int *n, z *ap, d *work) nogil:
+    cdef d out
+    _fortran_zlantp(&out, norm, uplo, diag, n, ap, work)
+    return out
+
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlantr "F_FUNC(zlantrwrp, ZLANTRWRP)"(d *out, char *norm, char *uplo, char *diag, int *m, int *n, npy_complex128 *a, int *lda, d *work) nogil
+cdef d zlantr(char *norm, char *uplo, char *diag, int *m, int *n, z *a, int *lda, d *work) nogil:
+    cdef d out
+    _fortran_zlantr(&out, norm, uplo, diag, m, n, a, lda, work)
+    return out
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cbbcsd "F_FUNC(cbbcsd,CBBCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, npy_complex64 *u1, int *ldu1, npy_complex64 *u2, int *ldu2, npy_complex64 *v1t, int *ldv1t, npy_complex64 *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *rwork, int *lrwork, int *info) nogil
+cdef void cbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *rwork, int *lrwork, int *info) nogil:
+    _fortran_cbbcsd(jobu1, jobu2, jobv1t, jobv2t, trans, m, p, q, theta, phi, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, b11d, b11e, b12d, b12e, b21d, b21e, b22d, b22e, rwork, lrwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cbdsqr "F_FUNC(cbdsqr,CBDSQR)"(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, npy_complex64 *vt, int *ldvt, npy_complex64 *u, int *ldu, npy_complex64 *c, int *ldc, s *rwork, int *info) nogil
+cdef void cbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, c *vt, int *ldvt, c *u, int *ldu, c *c, int *ldc, s *rwork, int *info) nogil:
+    _fortran_cbdsqr(uplo, n, ncvt, nru, ncc, d, e, vt, ldvt, u, ldu, c, ldc, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbbrd "F_FUNC(cgbbrd,CGBBRD)"(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, npy_complex64 *ab, int *ldab, s *d, s *e, npy_complex64 *q, int *ldq, npy_complex64 *pt, int *ldpt, npy_complex64 *c, int *ldc, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *pt, int *ldpt, c *c, int *ldc, c *work, s *rwork, int *info) nogil:
+    _fortran_cgbbrd(vect, m, n, ncc, kl, ku, ab, ldab, d, e, q, ldq, pt, ldpt, c, ldc, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbcon "F_FUNC(cgbcon,CGBCON)"(char *norm, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, s *anorm, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgbcon(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_cgbcon(norm, n, kl, ku, ab, ldab, ipiv, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbequ "F_FUNC(cgbequ,CGBEQU)"(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void cgbequ(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_cgbequ(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbequb "F_FUNC(cgbequb,CGBEQUB)"(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void cgbequb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_cgbequb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbrfs "F_FUNC(cgbrfs,CGBRFS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cgbrfs(trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbsv "F_FUNC(cgbsv,CGBSV)"(int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cgbsv(int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_cgbsv(n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbsvx "F_FUNC(cgbsvx,CGBSVX)"(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cgbsvx(fact, trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbtf2 "F_FUNC(cgbtf2,CGBTF2)"(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void cgbtf2(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_cgbtf2(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbtrf "F_FUNC(cgbtrf,CGBTRF)"(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void cgbtrf(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_cgbtrf(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgbtrs "F_FUNC(cgbtrs,CGBTRS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_cgbtrs(trans, n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgebak "F_FUNC(cgebak,CGEBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, npy_complex64 *v, int *ldv, int *info) nogil
+cdef void cgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, c *v, int *ldv, int *info) nogil:
+    _fortran_cgebak(job, side, n, ilo, ihi, scale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgebal "F_FUNC(cgebal,CGEBAL)"(char *job, int *n, npy_complex64 *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil
+cdef void cgebal(char *job, int *n, c *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil:
+    _fortran_cgebal(job, n, a, lda, ilo, ihi, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgebd2 "F_FUNC(cgebd2,CGEBD2)"(int *m, int *n, npy_complex64 *a, int *lda, s *d, s *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *work, int *info) nogil
+cdef void cgebd2(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *info) nogil:
+    _fortran_cgebd2(m, n, a, lda, d, e, tauq, taup, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgebrd "F_FUNC(cgebrd,CGEBRD)"(int *m, int *n, npy_complex64 *a, int *lda, s *d, s *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgebrd(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *lwork, int *info) nogil:
+    _fortran_cgebrd(m, n, a, lda, d, e, tauq, taup, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgecon "F_FUNC(cgecon,CGECON)"(char *norm, int *n, npy_complex64 *a, int *lda, s *anorm, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgecon(char *norm, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_cgecon(norm, n, a, lda, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeequ "F_FUNC(cgeequ,CGEEQU)"(int *m, int *n, npy_complex64 *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void cgeequ(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_cgeequ(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeequb "F_FUNC(cgeequb,CGEEQUB)"(int *m, int *n, npy_complex64 *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void cgeequb(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_cgeequb(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgees "F_FUNC(cgees,CGEES)"(char *jobvs, char *sort, _cselect1 *select, int *n, npy_complex64 *a, int *lda, int *sdim, npy_complex64 *w, npy_complex64 *vs, int *ldvs, npy_complex64 *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+cdef void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil:
+    _fortran_cgees(jobvs, sort, <_cselect1*>select, n, a, lda, sdim, w, vs, ldvs, work, lwork, rwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeesx "F_FUNC(cgeesx,CGEESX)"(char *jobvs, char *sort, _cselect1 *select, char *sense, int *n, npy_complex64 *a, int *lda, int *sdim, npy_complex64 *w, npy_complex64 *vs, int *ldvs, s *rconde, s *rcondv, npy_complex64 *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+cdef void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil:
+    _fortran_cgeesx(jobvs, sort, <_cselect1*>select, sense, n, a, lda, sdim, w, vs, ldvs, rconde, rcondv, work, lwork, rwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeev "F_FUNC(cgeev,CGEEV)"(char *jobvl, char *jobvr, int *n, npy_complex64 *a, int *lda, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cgeev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cgeev(jobvl, jobvr, n, a, lda, w, vl, ldvl, vr, ldvr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeevx "F_FUNC(cgeevx,CGEEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cgeevx(balanc, jobvl, jobvr, sense, n, a, lda, w, vl, ldvl, vr, ldvr, ilo, ihi, scale, abnrm, rconde, rcondv, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgehd2 "F_FUNC(cgehd2,CGEHD2)"(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cgehd2(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cgehd2(n, ilo, ihi, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgehrd "F_FUNC(cgehrd,CGEHRD)"(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgehrd(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgelq2 "F_FUNC(cgelq2,CGELQ2)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cgelq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cgelq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgelqf "F_FUNC(cgelqf,CGELQF)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgelqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cgelqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgels "F_FUNC(cgels,CGELS)"(char *trans, int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgels(char *trans, int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *work, int *lwork, int *info) nogil:
+    _fortran_cgels(trans, m, n, nrhs, a, lda, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgelsd "F_FUNC(cgelsd,CGELSD)"(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, s *s, s *rcond, int *rank, npy_complex64 *work, int *lwork, s *rwork, int *iwork, int *info) nogil
+cdef void cgelsd(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil:
+    _fortran_cgelsd(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgelss "F_FUNC(cgelss,CGELSS)"(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, s *s, s *rcond, int *rank, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cgelss(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cgelss(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgelsy "F_FUNC(cgelsy,CGELSY)"(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *jpvt, s *rcond, int *rank, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cgelsy(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *jpvt, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cgelsy(m, n, nrhs, a, lda, b, ldb, jpvt, rcond, rank, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgemqrt "F_FUNC(cgemqrt,CGEMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *nb, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info) nogil
+cdef void cgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *info) nogil:
+    _fortran_cgemqrt(side, trans, m, n, k, nb, v, ldv, t, ldt, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeql2 "F_FUNC(cgeql2,CGEQL2)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cgeql2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cgeql2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqlf "F_FUNC(cgeqlf,CGEQLF)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgeqlf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cgeqlf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqp3 "F_FUNC(cgeqp3,CGEQP3)"(int *m, int *n, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cgeqp3(int *m, int *n, c *a, int *lda, int *jpvt, c *tau, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cgeqp3(m, n, a, lda, jpvt, tau, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqr2 "F_FUNC(cgeqr2,CGEQR2)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cgeqr2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cgeqr2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqr2p "F_FUNC(cgeqr2p,CGEQR2P)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cgeqr2p(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cgeqr2p(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqrf "F_FUNC(cgeqrf,CGEQRF)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgeqrf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cgeqrf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqrfp "F_FUNC(cgeqrfp,CGEQRFP)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgeqrfp(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cgeqrfp(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqrt "F_FUNC(cgeqrt,CGEQRT)"(int *m, int *n, int *nb, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, npy_complex64 *work, int *info) nogil
+cdef void cgeqrt(int *m, int *n, int *nb, c *a, int *lda, c *t, int *ldt, c *work, int *info) nogil:
+    _fortran_cgeqrt(m, n, nb, a, lda, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqrt2 "F_FUNC(cgeqrt2,CGEQRT2)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, int *info) nogil
+cdef void cgeqrt2(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil:
+    _fortran_cgeqrt2(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgeqrt3 "F_FUNC(cgeqrt3,CGEQRT3)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, int *info) nogil
+cdef void cgeqrt3(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil:
+    _fortran_cgeqrt3(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgerfs "F_FUNC(cgerfs,CGERFS)"(char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgerfs(char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cgerfs(trans, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgerq2 "F_FUNC(cgerq2,CGERQ2)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cgerq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cgerq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgerqf "F_FUNC(cgerqf,CGERQF)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgerqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cgerqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgesc2 "F_FUNC(cgesc2,CGESC2)"(int *n, npy_complex64 *a, int *lda, npy_complex64 *rhs, int *ipiv, int *jpiv, s *scale) nogil
+cdef void cgesc2(int *n, c *a, int *lda, c *rhs, int *ipiv, int *jpiv, s *scale) nogil:
+    _fortran_cgesc2(n, a, lda, rhs, ipiv, jpiv, scale)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgesdd "F_FUNC(cgesdd,CGESDD)"(char *jobz, int *m, int *n, npy_complex64 *a, int *lda, s *s, npy_complex64 *u, int *ldu, npy_complex64 *vt, int *ldvt, npy_complex64 *work, int *lwork, s *rwork, int *iwork, int *info) nogil
+cdef void cgesdd(char *jobz, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil:
+    _fortran_cgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgesv "F_FUNC(cgesv,CGESV)"(int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cgesv(int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_cgesv(n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgesvd "F_FUNC(cgesvd,CGESVD)"(char *jobu, char *jobvt, int *m, int *n, npy_complex64 *a, int *lda, s *s, npy_complex64 *u, int *ldu, npy_complex64 *vt, int *ldvt, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cgesvd(char *jobu, char *jobvt, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgesvx "F_FUNC(cgesvx,CGESVX)"(char *fact, char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgesvx(char *fact, char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cgesvx(fact, trans, n, nrhs, a, lda, af, ldaf, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgetc2 "F_FUNC(cgetc2,CGETC2)"(int *n, npy_complex64 *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+cdef void cgetc2(int *n, c *a, int *lda, int *ipiv, int *jpiv, int *info) nogil:
+    _fortran_cgetc2(n, a, lda, ipiv, jpiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgetf2 "F_FUNC(cgetf2,CGETF2)"(int *m, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info) nogil
+cdef void cgetf2(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_cgetf2(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgetrf "F_FUNC(cgetrf,CGETRF)"(int *m, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info) nogil
+cdef void cgetrf(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_cgetrf(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgetri "F_FUNC(cgetri,CGETRI)"(int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgetri(int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil:
+    _fortran_cgetri(n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgetrs "F_FUNC(cgetrs,CGETRS)"(char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cgetrs(char *trans, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_cgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggbak "F_FUNC(cggbak,CGGBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, npy_complex64 *v, int *ldv, int *info) nogil
+cdef void cggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, c *v, int *ldv, int *info) nogil:
+    _fortran_cggbak(job, side, n, ilo, ihi, lscale, rscale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggbal "F_FUNC(cggbal,CGGBAL)"(char *job, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil
+cdef void cggbal(char *job, int *n, c *a, int *lda, c *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil:
+    _fortran_cggbal(job, n, a, lda, b, ldb, ilo, ihi, lscale, rscale, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgges "F_FUNC(cgges,CGGES)"(char *jobvsl, char *jobvsr, char *sort, _cselect2 *selctg, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *sdim, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vsl, int *ldvsl, npy_complex64 *vsr, int *ldvsr, npy_complex64 *work, int *lwork, s *rwork, bint *bwork, int *info) nogil
+cdef void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil:
+    _fortran_cgges(jobvsl, jobvsr, sort, <_cselect2*>selctg, n, a, lda, b, ldb, sdim, alpha, beta, vsl, ldvsl, vsr, ldvsr, work, lwork, rwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggesx "F_FUNC(cggesx,CGGESX)"(char *jobvsl, char *jobvsr, char *sort, _cselect2 *selctg, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *sdim, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vsl, int *ldvsl, npy_complex64 *vsr, int *ldvsr, s *rconde, s *rcondv, npy_complex64 *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+cdef void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil:
+    _fortran_cggesx(jobvsl, jobvsr, sort, <_cselect2*>selctg, sense, n, a, lda, b, ldb, sdim, alpha, beta, vsl, ldvsl, vsr, ldvsr, rconde, rcondv, work, lwork, rwork, iwork, liwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggev "F_FUNC(cggev,CGGEV)"(char *jobvl, char *jobvr, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cggev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cggev(jobvl, jobvr, n, a, lda, b, ldb, alpha, beta, vl, ldvl, vr, ldvr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggevx "F_FUNC(cggevx,CGGEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, npy_complex64 *work, int *lwork, s *rwork, int *iwork, bint *bwork, int *info) nogil
+cdef void cggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, bint *bwork, int *info) nogil:
+    _fortran_cggevx(balanc, jobvl, jobvr, sense, n, a, lda, b, ldb, alpha, beta, vl, ldvl, vr, ldvr, ilo, ihi, lscale, rscale, abnrm, bbnrm, rconde, rcondv, work, lwork, rwork, iwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggglm "F_FUNC(cggglm,CGGGLM)"(int *n, int *m, int *p, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *d, npy_complex64 *x, npy_complex64 *y, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cggglm(int *n, int *m, int *p, c *a, int *lda, c *b, int *ldb, c *d, c *x, c *y, c *work, int *lwork, int *info) nogil:
+    _fortran_cggglm(n, m, p, a, lda, b, ldb, d, x, y, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgghrd "F_FUNC(cgghrd,CGGHRD)"(char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *info) nogil
+cdef void cgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *info) nogil:
+    _fortran_cgghrd(compq, compz, n, ilo, ihi, a, lda, b, ldb, q, ldq, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgglse "F_FUNC(cgglse,CGGLSE)"(int *m, int *n, int *p, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, npy_complex64 *d, npy_complex64 *x, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cgglse(int *m, int *n, int *p, c *a, int *lda, c *b, int *ldb, c *c, c *d, c *x, c *work, int *lwork, int *info) nogil:
+    _fortran_cgglse(m, n, p, a, lda, b, ldb, c, d, x, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggqrf "F_FUNC(cggqrf,CGGQRF)"(int *n, int *m, int *p, npy_complex64 *a, int *lda, npy_complex64 *taua, npy_complex64 *b, int *ldb, npy_complex64 *taub, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cggqrf(int *n, int *m, int *p, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil:
+    _fortran_cggqrf(n, m, p, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cggrqf "F_FUNC(cggrqf,CGGRQF)"(int *m, int *p, int *n, npy_complex64 *a, int *lda, npy_complex64 *taua, npy_complex64 *b, int *ldb, npy_complex64 *taub, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cggrqf(int *m, int *p, int *n, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil:
+    _fortran_cggrqf(m, p, n, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgtcon "F_FUNC(cgtcon,CGTCON)"(char *norm, int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, s *anorm, s *rcond, npy_complex64 *work, int *info) nogil
+cdef void cgtcon(char *norm, int *n, c *dl, c *d, c *du, c *du2, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil:
+    _fortran_cgtcon(norm, n, dl, d, du, du2, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgtrfs "F_FUNC(cgtrfs,CGTRFS)"(char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *dlf, npy_complex64 *df, npy_complex64 *duf, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgtrfs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cgtrfs(trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgtsv "F_FUNC(cgtsv,CGTSV)"(int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cgtsv(int *n, int *nrhs, c *dl, c *d, c *du, c *b, int *ldb, int *info) nogil:
+    _fortran_cgtsv(n, nrhs, dl, d, du, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgtsvx "F_FUNC(cgtsvx,CGTSVX)"(char *fact, char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *dlf, npy_complex64 *df, npy_complex64 *duf, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cgtsvx(char *fact, char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cgtsvx(fact, trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgttrf "F_FUNC(cgttrf,CGTTRF)"(int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, int *info) nogil
+cdef void cgttrf(int *n, c *dl, c *d, c *du, c *du2, int *ipiv, int *info) nogil:
+    _fortran_cgttrf(n, dl, d, du, du2, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgttrs "F_FUNC(cgttrs,CGTTRS)"(char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cgttrs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_cgttrs(trans, n, nrhs, dl, d, du, du2, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cgtts2 "F_FUNC(cgtts2,CGTTS2)"(int *itrans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb) nogil
+cdef void cgtts2(int *itrans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb) nogil:
+    _fortran_cgtts2(itrans, n, nrhs, dl, d, du, du2, ipiv, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbev "F_FUNC(chbev,CHBEV)"(char *jobz, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void chbev(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil:
+    _fortran_chbev(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbevd "F_FUNC(chbevd,CHBEVD)"(char *jobz, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void chbevd(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_chbevd(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbevx "F_FUNC(chbevx,CHBEVX)"(char *jobz, char *range, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, npy_complex64 *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void chbevx(char *jobz, char *range, char *uplo, int *n, int *kd, c *ab, int *ldab, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_chbevx(jobz, range, uplo, n, kd, ab, ldab, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbgst "F_FUNC(chbgst,CHBGST)"(char *vect, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, npy_complex64 *x, int *ldx, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void chbgst(char *vect, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *x, int *ldx, c *work, s *rwork, int *info) nogil:
+    _fortran_chbgst(vect, uplo, n, ka, kb, ab, ldab, bb, ldbb, x, ldx, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbgv "F_FUNC(chbgv,CHBGV)"(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void chbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil:
+    _fortran_chbgv(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbgvd "F_FUNC(chbgvd,CHBGVD)"(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void chbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_chbgvd(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbgvx "F_FUNC(chbgvx,CHBGVX)"(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, npy_complex64 *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void chbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_chbgvx(jobz, range, uplo, n, ka, kb, ab, ldab, bb, ldbb, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chbtrd "F_FUNC(chbtrd,CHBTRD)"(char *vect, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, s *d, s *e, npy_complex64 *q, int *ldq, npy_complex64 *work, int *info) nogil
+cdef void chbtrd(char *vect, char *uplo, int *n, int *kd, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *work, int *info) nogil:
+    _fortran_chbtrd(vect, uplo, n, kd, ab, ldab, d, e, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_checon "F_FUNC(checon,CHECON)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, s *anorm, s *rcond, npy_complex64 *work, int *info) nogil
+cdef void checon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil:
+    _fortran_checon(uplo, n, a, lda, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cheequb "F_FUNC(cheequb,CHEEQUB)"(char *uplo, int *n, npy_complex64 *a, int *lda, s *s, s *scond, s *amax, npy_complex64 *work, int *info) nogil
+cdef void cheequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil:
+    _fortran_cheequb(uplo, n, a, lda, s, scond, amax, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cheev "F_FUNC(cheev,CHEEV)"(char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, s *w, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void cheev(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_cheev(jobz, uplo, n, a, lda, w, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cheevd "F_FUNC(cheevd,CHEEVD)"(char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, s *w, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void cheevd(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_cheevd(jobz, uplo, n, a, lda, w, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cheevr "F_FUNC(cheevr,CHEEVR)"(char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, int *isuppz, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void cheevr(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_cheevr(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cheevx "F_FUNC(cheevx,CHEEVX)"(char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void cheevx(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_cheevx(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chegs2 "F_FUNC(chegs2,CHEGS2)"(int *itype, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void chegs2(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil:
+    _fortran_chegs2(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chegst "F_FUNC(chegst,CHEGST)"(int *itype, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void chegst(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil:
+    _fortran_chegst(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chegv "F_FUNC(chegv,CHEGV)"(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, s *w, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void chegv(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_chegv(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chegvd "F_FUNC(chegvd,CHEGVD)"(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, s *w, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void chegvd(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_chegvd(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chegvx "F_FUNC(chegvx,CHEGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void chegvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_chegvx(itype, jobz, range, uplo, n, a, lda, b, ldb, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cherfs "F_FUNC(cherfs,CHERFS)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cherfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cherfs(uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chesv "F_FUNC(chesv,CHESV)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void chesv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil:
+    _fortran_chesv(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chesvx "F_FUNC(chesvx,CHESVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void chesvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_chesvx(fact, uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cheswapr "F_FUNC(cheswapr,CHESWAPR)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *i1, int *i2) nogil
+cdef void cheswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil:
+    _fortran_cheswapr(uplo, n, a, lda, i1, i2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetd2 "F_FUNC(chetd2,CHETD2)"(char *uplo, int *n, npy_complex64 *a, int *lda, s *d, s *e, npy_complex64 *tau, int *info) nogil
+cdef void chetd2(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, int *info) nogil:
+    _fortran_chetd2(uplo, n, a, lda, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetf2 "F_FUNC(chetf2,CHETF2)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info) nogil
+cdef void chetf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_chetf2(uplo, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetrd "F_FUNC(chetrd,CHETRD)"(char *uplo, int *n, npy_complex64 *a, int *lda, s *d, s *e, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void chetrd(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_chetrd(uplo, n, a, lda, d, e, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetrf "F_FUNC(chetrf,CHETRF)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void chetrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil:
+    _fortran_chetrf(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetri "F_FUNC(chetri,CHETRI)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info) nogil
+cdef void chetri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil:
+    _fortran_chetri(uplo, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetri2 "F_FUNC(chetri2,CHETRI2)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void chetri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil:
+    _fortran_chetri2(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetri2x "F_FUNC(chetri2x,CHETRI2X)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *nb, int *info) nogil
+cdef void chetri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil:
+    _fortran_chetri2x(uplo, n, a, lda, ipiv, work, nb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetrs "F_FUNC(chetrs,CHETRS)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void chetrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_chetrs(uplo, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chetrs2 "F_FUNC(chetrs2,CHETRS2)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info) nogil
+cdef void chetrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil:
+    _fortran_chetrs2(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chfrk "F_FUNC(chfrk,CHFRK)"(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, npy_complex64 *a, int *lda, s *beta, npy_complex64 *c) nogil
+cdef void chfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c) nogil:
+    _fortran_chfrk(transr, uplo, trans, n, k, alpha, a, lda, beta, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chgeqz "F_FUNC(chgeqz,CHGEQZ)"(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *t, int *ldt, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void chgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *t, int *ldt, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_chgeqz(job, compq, compz, n, ilo, ihi, h, ldh, t, ldt, alpha, beta, q, ldq, z, ldz, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpcon "F_FUNC(chpcon,CHPCON)"(char *uplo, int *n, npy_complex64 *ap, int *ipiv, s *anorm, s *rcond, npy_complex64 *work, int *info) nogil
+cdef void chpcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil:
+    _fortran_chpcon(uplo, n, ap, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpev "F_FUNC(chpev,CHPEV)"(char *jobz, char *uplo, int *n, npy_complex64 *ap, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void chpev(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil:
+    _fortran_chpev(jobz, uplo, n, ap, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpevd "F_FUNC(chpevd,CHPEVD)"(char *jobz, char *uplo, int *n, npy_complex64 *ap, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void chpevd(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_chpevd(jobz, uplo, n, ap, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpevx "F_FUNC(chpevx,CHPEVX)"(char *jobz, char *range, char *uplo, int *n, npy_complex64 *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void chpevx(char *jobz, char *range, char *uplo, int *n, c *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_chpevx(jobz, range, uplo, n, ap, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpgst "F_FUNC(chpgst,CHPGST)"(int *itype, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, int *info) nogil
+cdef void chpgst(int *itype, char *uplo, int *n, c *ap, c *bp, int *info) nogil:
+    _fortran_chpgst(itype, uplo, n, ap, bp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpgv "F_FUNC(chpgv,CHPGV)"(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void chpgv(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil:
+    _fortran_chpgv(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpgvd "F_FUNC(chpgvd,CHPGVD)"(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void chpgvd(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_chpgvd(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpgvx "F_FUNC(chpgvx,CHPGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, npy_complex64 *work, s *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void chpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *ap, c *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_chpgvx(itype, jobz, range, uplo, n, ap, bp, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chprfs "F_FUNC(chprfs,CHPRFS)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void chprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_chprfs(uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpsv "F_FUNC(chpsv,CHPSV)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void chpsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_chpsv(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chpsvx "F_FUNC(chpsvx,CHPSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void chpsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_chpsvx(fact, uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chptrd "F_FUNC(chptrd,CHPTRD)"(char *uplo, int *n, npy_complex64 *ap, s *d, s *e, npy_complex64 *tau, int *info) nogil
+cdef void chptrd(char *uplo, int *n, c *ap, s *d, s *e, c *tau, int *info) nogil:
+    _fortran_chptrd(uplo, n, ap, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chptrf "F_FUNC(chptrf,CHPTRF)"(char *uplo, int *n, npy_complex64 *ap, int *ipiv, int *info) nogil
+cdef void chptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil:
+    _fortran_chptrf(uplo, n, ap, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chptri "F_FUNC(chptri,CHPTRI)"(char *uplo, int *n, npy_complex64 *ap, int *ipiv, npy_complex64 *work, int *info) nogil
+cdef void chptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil:
+    _fortran_chptri(uplo, n, ap, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chptrs "F_FUNC(chptrs,CHPTRS)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void chptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_chptrs(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chsein "F_FUNC(chsein,CHSEIN)"(char *side, char *eigsrc, char *initv, bint *select, int *n, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, s *rwork, int *ifaill, int *ifailr, int *info) nogil
+cdef void chsein(char *side, char *eigsrc, char *initv, bint *select, int *n, c *h, int *ldh, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *ifaill, int *ifailr, int *info) nogil:
+    _fortran_chsein(side, eigsrc, initv, select, n, h, ldh, w, vl, ldvl, vr, ldvr, mm, m, work, rwork, ifaill, ifailr, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_chseqr "F_FUNC(chseqr,CHSEQR)"(char *job, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void chseqr(char *job, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, c *z, int *ldz, c *work, int *lwork, int *info) nogil:
+    _fortran_chseqr(job, compz, n, ilo, ihi, h, ldh, w, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clabrd "F_FUNC(clabrd,CLABRD)"(int *m, int *n, int *nb, npy_complex64 *a, int *lda, s *d, s *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *x, int *ldx, npy_complex64 *y, int *ldy) nogil
+cdef void clabrd(int *m, int *n, int *nb, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *x, int *ldx, c *y, int *ldy) nogil:
+    _fortran_clabrd(m, n, nb, a, lda, d, e, tauq, taup, x, ldx, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clacgv "F_FUNC(clacgv,CLACGV)"(int *n, npy_complex64 *x, int *incx) nogil
+cdef void clacgv(int *n, c *x, int *incx) nogil:
+    _fortran_clacgv(n, x, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clacn2 "F_FUNC(clacn2,CLACN2)"(int *n, npy_complex64 *v, npy_complex64 *x, s *est, int *kase, int *isave) nogil
+cdef void clacn2(int *n, c *v, c *x, s *est, int *kase, int *isave) nogil:
+    _fortran_clacn2(n, v, x, est, kase, isave)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clacon "F_FUNC(clacon,CLACON)"(int *n, npy_complex64 *v, npy_complex64 *x, s *est, int *kase) nogil
+cdef void clacon(int *n, c *v, c *x, s *est, int *kase) nogil:
+    _fortran_clacon(n, v, x, est, kase)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clacp2 "F_FUNC(clacp2,CLACP2)"(char *uplo, int *m, int *n, s *a, int *lda, npy_complex64 *b, int *ldb) nogil
+cdef void clacp2(char *uplo, int *m, int *n, s *a, int *lda, c *b, int *ldb) nogil:
+    _fortran_clacp2(uplo, m, n, a, lda, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clacpy "F_FUNC(clacpy,CLACPY)"(char *uplo, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb) nogil
+cdef void clacpy(char *uplo, int *m, int *n, c *a, int *lda, c *b, int *ldb) nogil:
+    _fortran_clacpy(uplo, m, n, a, lda, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clacrm "F_FUNC(clacrm,CLACRM)"(int *m, int *n, npy_complex64 *a, int *lda, s *b, int *ldb, npy_complex64 *c, int *ldc, s *rwork) nogil
+cdef void clacrm(int *m, int *n, c *a, int *lda, s *b, int *ldb, c *c, int *ldc, s *rwork) nogil:
+    _fortran_clacrm(m, n, a, lda, b, ldb, c, ldc, rwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clacrt "F_FUNC(clacrt,CLACRT)"(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, npy_complex64 *c, npy_complex64 *s) nogil
+cdef void clacrt(int *n, c *cx, int *incx, c *cy, int *incy, c *c, c *s) nogil:
+    _fortran_clacrt(n, cx, incx, cy, incy, c, s)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claed0 "F_FUNC(claed0,CLAED0)"(int *qsiz, int *n, s *d, s *e, npy_complex64 *q, int *ldq, npy_complex64 *qstore, int *ldqs, s *rwork, int *iwork, int *info) nogil
+cdef void claed0(int *qsiz, int *n, s *d, s *e, c *q, int *ldq, c *qstore, int *ldqs, s *rwork, int *iwork, int *info) nogil:
+    _fortran_claed0(qsiz, n, d, e, q, ldq, qstore, ldqs, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claed7 "F_FUNC(claed7,CLAED7)"(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, npy_complex64 *q, int *ldq, s *rho, int *indxq, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, npy_complex64 *work, s *rwork, int *iwork, int *info) nogil
+cdef void claed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, c *q, int *ldq, s *rho, int *indxq, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, c *work, s *rwork, int *iwork, int *info) nogil:
+    _fortran_claed7(n, cutpnt, qsiz, tlvls, curlvl, curpbm, d, q, ldq, rho, indxq, qstore, qptr, prmptr, perm, givptr, givcol, givnum, work, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claed8 "F_FUNC(claed8,CLAED8)"(int *k, int *n, int *qsiz, npy_complex64 *q, int *ldq, s *d, s *rho, int *cutpnt, s *z, s *dlamda, npy_complex64 *q2, int *ldq2, s *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, s *givnum, int *info) nogil
+cdef void claed8(int *k, int *n, int *qsiz, c *q, int *ldq, s *d, s *rho, int *cutpnt, s *z, s *dlamda, c *q2, int *ldq2, s *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, s *givnum, int *info) nogil:
+    _fortran_claed8(k, n, qsiz, q, ldq, d, rho, cutpnt, z, dlamda, q2, ldq2, w, indxp, indx, indxq, perm, givptr, givcol, givnum, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claein "F_FUNC(claein,CLAEIN)"(bint *rightv, bint *noinit, int *n, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *v, npy_complex64 *b, int *ldb, s *rwork, s *eps3, s *smlnum, int *info) nogil
+cdef void claein(bint *rightv, bint *noinit, int *n, c *h, int *ldh, c *w, c *v, c *b, int *ldb, s *rwork, s *eps3, s *smlnum, int *info) nogil:
+    _fortran_claein(rightv, noinit, n, h, ldh, w, v, b, ldb, rwork, eps3, smlnum, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claesy "F_FUNC(claesy,CLAESY)"(npy_complex64 *a, npy_complex64 *b, npy_complex64 *c, npy_complex64 *rt1, npy_complex64 *rt2, npy_complex64 *evscal, npy_complex64 *cs1, npy_complex64 *sn1) nogil
+cdef void claesy(c *a, c *b, c *c, c *rt1, c *rt2, c *evscal, c *cs1, c *sn1) nogil:
+    _fortran_claesy(a, b, c, rt1, rt2, evscal, cs1, sn1)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claev2 "F_FUNC(claev2,CLAEV2)"(npy_complex64 *a, npy_complex64 *b, npy_complex64 *c, s *rt1, s *rt2, s *cs1, npy_complex64 *sn1) nogil
+cdef void claev2(c *a, c *b, c *c, s *rt1, s *rt2, s *cs1, c *sn1) nogil:
+    _fortran_claev2(a, b, c, rt1, rt2, cs1, sn1)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clag2z "F_FUNC(clag2z,CLAG2Z)"(int *m, int *n, npy_complex64 *sa, int *ldsa, npy_complex128 *a, int *lda, int *info) nogil
+cdef void clag2z(int *m, int *n, c *sa, int *ldsa, z *a, int *lda, int *info) nogil:
+    _fortran_clag2z(m, n, sa, ldsa, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clags2 "F_FUNC(clags2,CLAGS2)"(bint *upper, s *a1, npy_complex64 *a2, s *a3, s *b1, npy_complex64 *b2, s *b3, s *csu, npy_complex64 *snu, s *csv, npy_complex64 *snv, s *csq, npy_complex64 *snq) nogil
+cdef void clags2(bint *upper, s *a1, c *a2, s *a3, s *b1, c *b2, s *b3, s *csu, c *snu, s *csv, c *snv, s *csq, c *snq) nogil:
+    _fortran_clags2(upper, a1, a2, a3, b1, b2, b3, csu, snu, csv, snv, csq, snq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clagtm "F_FUNC(clagtm,CLAGTM)"(char *trans, int *n, int *nrhs, s *alpha, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *x, int *ldx, s *beta, npy_complex64 *b, int *ldb) nogil
+cdef void clagtm(char *trans, int *n, int *nrhs, s *alpha, c *dl, c *d, c *du, c *x, int *ldx, s *beta, c *b, int *ldb) nogil:
+    _fortran_clagtm(trans, n, nrhs, alpha, dl, d, du, x, ldx, beta, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clahef "F_FUNC(clahef,CLAHEF)"(char *uplo, int *n, int *nb, int *kb, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *w, int *ldw, int *info) nogil
+cdef void clahef(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil:
+    _fortran_clahef(uplo, n, nb, kb, a, lda, ipiv, w, ldw, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clahqr "F_FUNC(clahqr,CLAHQR)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *info) nogil
+cdef void clahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, int *info) nogil:
+    _fortran_clahqr(wantt, wantz, n, ilo, ihi, h, ldh, w, iloz, ihiz, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clahr2 "F_FUNC(clahr2,CLAHR2)"(int *n, int *k, int *nb, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *t, int *ldt, npy_complex64 *y, int *ldy) nogil
+cdef void clahr2(int *n, int *k, int *nb, c *a, int *lda, c *tau, c *t, int *ldt, c *y, int *ldy) nogil:
+    _fortran_clahr2(n, k, nb, a, lda, tau, t, ldt, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claic1 "F_FUNC(claic1,CLAIC1)"(int *job, int *j, npy_complex64 *x, s *sest, npy_complex64 *w, npy_complex64 *gamma, s *sestpr, npy_complex64 *s, npy_complex64 *c) nogil
+cdef void claic1(int *job, int *j, c *x, s *sest, c *w, c *gamma, s *sestpr, c *s, c *c) nogil:
+    _fortran_claic1(job, j, x, sest, w, gamma, sestpr, s, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clals0 "F_FUNC(clals0,CLALS0)"(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, npy_complex64 *b, int *ldb, npy_complex64 *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *rwork, int *info) nogil
+cdef void clals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *rwork, int *info) nogil:
+    _fortran_clals0(icompq, nl, nr, sqre, nrhs, b, ldb, bx, ldbx, perm, givptr, givcol, ldgcol, givnum, ldgnum, poles, difl, difr, z, k, c, s, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clalsa "F_FUNC(clalsa,CLALSA)"(int *icompq, int *smlsiz, int *n, int *nrhs, npy_complex64 *b, int *ldb, npy_complex64 *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *rwork, int *iwork, int *info) nogil
+cdef void clalsa(int *icompq, int *smlsiz, int *n, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *rwork, int *iwork, int *info) nogil:
+    _fortran_clalsa(icompq, smlsiz, n, nrhs, b, ldb, bx, ldbx, u, ldu, vt, k, difl, difr, z, poles, givptr, givcol, ldgcol, perm, givnum, c, s, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clalsd "F_FUNC(clalsd,CLALSD)"(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, npy_complex64 *b, int *ldb, s *rcond, int *rank, npy_complex64 *work, s *rwork, int *iwork, int *info) nogil
+cdef void clalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, c *b, int *ldb, s *rcond, int *rank, c *work, s *rwork, int *iwork, int *info) nogil:
+    _fortran_clalsd(uplo, smlsiz, n, nrhs, d, e, b, ldb, rcond, rank, work, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clapll "F_FUNC(clapll,CLAPLL)"(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, s *ssmin) nogil
+cdef void clapll(int *n, c *x, int *incx, c *y, int *incy, s *ssmin) nogil:
+    _fortran_clapll(n, x, incx, y, incy, ssmin)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clapmr "F_FUNC(clapmr,CLAPMR)"(bint *forwrd, int *m, int *n, npy_complex64 *x, int *ldx, int *k) nogil
+cdef void clapmr(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil:
+    _fortran_clapmr(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clapmt "F_FUNC(clapmt,CLAPMT)"(bint *forwrd, int *m, int *n, npy_complex64 *x, int *ldx, int *k) nogil
+cdef void clapmt(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil:
+    _fortran_clapmt(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqgb "F_FUNC(claqgb,CLAQGB)"(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+cdef void claqgb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil:
+    _fortran_claqgb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqge "F_FUNC(claqge,CLAQGE)"(int *m, int *n, npy_complex64 *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+cdef void claqge(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil:
+    _fortran_claqge(m, n, a, lda, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqhb "F_FUNC(claqhb,CLAQHB)"(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+cdef void claqhb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_claqhb(uplo, n, kd, ab, ldab, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqhe "F_FUNC(claqhe,CLAQHE)"(char *uplo, int *n, npy_complex64 *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+cdef void claqhe(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_claqhe(uplo, n, a, lda, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqhp "F_FUNC(claqhp,CLAQHP)"(char *uplo, int *n, npy_complex64 *ap, s *s, s *scond, s *amax, char *equed) nogil
+cdef void claqhp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_claqhp(uplo, n, ap, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqp2 "F_FUNC(claqp2,CLAQP2)"(int *m, int *n, int *offset, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, s *vn1, s *vn2, npy_complex64 *work) nogil
+cdef void claqp2(int *m, int *n, int *offset, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *work) nogil:
+    _fortran_claqp2(m, n, offset, a, lda, jpvt, tau, vn1, vn2, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqps "F_FUNC(claqps,CLAQPS)"(int *m, int *n, int *offset, int *nb, int *kb, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, s *vn1, s *vn2, npy_complex64 *auxv, npy_complex64 *f, int *ldf) nogil
+cdef void claqps(int *m, int *n, int *offset, int *nb, int *kb, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *auxv, c *f, int *ldf) nogil:
+    _fortran_claqps(m, n, offset, nb, kb, a, lda, jpvt, tau, vn1, vn2, auxv, f, ldf)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqr0 "F_FUNC(claqr0,CLAQR0)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void claqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil:
+    _fortran_claqr0(wantt, wantz, n, ilo, ihi, h, ldh, w, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqr1 "F_FUNC(claqr1,CLAQR1)"(int *n, npy_complex64 *h, int *ldh, npy_complex64 *s1, npy_complex64 *s2, npy_complex64 *v) nogil
+cdef void claqr1(int *n, c *h, int *ldh, c *s1, c *s2, c *v) nogil:
+    _fortran_claqr1(n, h, ldh, s1, s2, v)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqr2 "F_FUNC(claqr2,CLAQR2)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *ns, int *nd, npy_complex64 *sh, npy_complex64 *v, int *ldv, int *nh, npy_complex64 *t, int *ldt, int *nv, npy_complex64 *wv, int *ldwv, npy_complex64 *work, int *lwork) nogil
+cdef void claqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil:
+    _fortran_claqr2(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sh, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqr3 "F_FUNC(claqr3,CLAQR3)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *ns, int *nd, npy_complex64 *sh, npy_complex64 *v, int *ldv, int *nh, npy_complex64 *t, int *ldt, int *nv, npy_complex64 *wv, int *ldwv, npy_complex64 *work, int *lwork) nogil
+cdef void claqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil:
+    _fortran_claqr3(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sh, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqr4 "F_FUNC(claqr4,CLAQR4)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void claqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil:
+    _fortran_claqr4(wantt, wantz, n, ilo, ihi, h, ldh, w, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqr5 "F_FUNC(claqr5,CLAQR5)"(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, npy_complex64 *s, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *v, int *ldv, npy_complex64 *u, int *ldu, int *nv, npy_complex64 *wv, int *ldwv, int *nh, npy_complex64 *wh, int *ldwh) nogil
+cdef void claqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, c *s, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, c *v, int *ldv, c *u, int *ldu, int *nv, c *wv, int *ldwv, int *nh, c *wh, int *ldwh) nogil:
+    _fortran_claqr5(wantt, wantz, kacc22, n, ktop, kbot, nshfts, s, h, ldh, iloz, ihiz, z, ldz, v, ldv, u, ldu, nv, wv, ldwv, nh, wh, ldwh)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqsb "F_FUNC(claqsb,CLAQSB)"(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+cdef void claqsb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_claqsb(uplo, n, kd, ab, ldab, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqsp "F_FUNC(claqsp,CLAQSP)"(char *uplo, int *n, npy_complex64 *ap, s *s, s *scond, s *amax, char *equed) nogil
+cdef void claqsp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_claqsp(uplo, n, ap, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claqsy "F_FUNC(claqsy,CLAQSY)"(char *uplo, int *n, npy_complex64 *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+cdef void claqsy(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_claqsy(uplo, n, a, lda, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clar1v "F_FUNC(clar1v,CLAR1V)"(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, npy_complex64 *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil
+cdef void clar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, c *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil:
+    _fortran_clar1v(n, b1, bn, lambda_, d, l, ld, lld, pivmin, gaptol, z, wantnc, negcnt, ztz, mingma, r, isuppz, nrminv, resid, rqcorr, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clar2v "F_FUNC(clar2v,CLAR2V)"(int *n, npy_complex64 *x, npy_complex64 *y, npy_complex64 *z, int *incx, s *c, npy_complex64 *s, int *incc) nogil
+cdef void clar2v(int *n, c *x, c *y, c *z, int *incx, s *c, c *s, int *incc) nogil:
+    _fortran_clar2v(n, x, y, z, incx, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarcm "F_FUNC(clarcm,CLARCM)"(int *m, int *n, s *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, s *rwork) nogil
+cdef void clarcm(int *m, int *n, s *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *rwork) nogil:
+    _fortran_clarcm(m, n, a, lda, b, ldb, c, ldc, rwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarf "F_FUNC(clarf,CLARF)"(char *side, int *m, int *n, npy_complex64 *v, int *incv, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work) nogil
+cdef void clarf(char *side, int *m, int *n, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil:
+    _fortran_clarf(side, m, n, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarfb "F_FUNC(clarfb,CLARFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *ldwork) nogil
+cdef void clarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil:
+    _fortran_clarfb(side, trans, direct, storev, m, n, k, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarfg "F_FUNC(clarfg,CLARFG)"(int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *tau) nogil
+cdef void clarfg(int *n, c *alpha, c *x, int *incx, c *tau) nogil:
+    _fortran_clarfg(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarfgp "F_FUNC(clarfgp,CLARFGP)"(int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *tau) nogil
+cdef void clarfgp(int *n, c *alpha, c *x, int *incx, c *tau) nogil:
+    _fortran_clarfgp(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarft "F_FUNC(clarft,CLARFT)"(char *direct, char *storev, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *tau, npy_complex64 *t, int *ldt) nogil
+cdef void clarft(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil:
+    _fortran_clarft(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarfx "F_FUNC(clarfx,CLARFX)"(char *side, int *m, int *n, npy_complex64 *v, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work) nogil
+cdef void clarfx(char *side, int *m, int *n, c *v, c *tau, c *c, int *ldc, c *work) nogil:
+    _fortran_clarfx(side, m, n, v, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clargv "F_FUNC(clargv,CLARGV)"(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, s *c, int *incc) nogil
+cdef void clargv(int *n, c *x, int *incx, c *y, int *incy, s *c, int *incc) nogil:
+    _fortran_clargv(n, x, incx, y, incy, c, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarnv "F_FUNC(clarnv,CLARNV)"(int *idist, int *iseed, int *n, npy_complex64 *x) nogil
+cdef void clarnv(int *idist, int *iseed, int *n, c *x) nogil:
+    _fortran_clarnv(idist, iseed, n, x)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarrv "F_FUNC(clarrv,CLARRV)"(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, npy_complex64 *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil
+cdef void clarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, c *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil:
+    _fortran_clarrv(n, vl, vu, d, l, pivmin, isplit, m, dol, dou, minrgp, rtol1, rtol2, w, werr, wgap, iblock, indexw, gers, z, ldz, isuppz, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clartg "F_FUNC(clartg,CLARTG)"(npy_complex64 *f, npy_complex64 *g, s *cs, npy_complex64 *sn, npy_complex64 *r) nogil
+cdef void clartg(c *f, c *g, s *cs, c *sn, c *r) nogil:
+    _fortran_clartg(f, g, cs, sn, r)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clartv "F_FUNC(clartv,CLARTV)"(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, s *c, npy_complex64 *s, int *incc) nogil
+cdef void clartv(int *n, c *x, int *incx, c *y, int *incy, s *c, c *s, int *incc) nogil:
+    _fortran_clartv(n, x, incx, y, incy, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarz "F_FUNC(clarz,CLARZ)"(char *side, int *m, int *n, int *l, npy_complex64 *v, int *incv, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work) nogil
+cdef void clarz(char *side, int *m, int *n, int *l, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil:
+    _fortran_clarz(side, m, n, l, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarzb "F_FUNC(clarzb,CLARZB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *ldwork) nogil
+cdef void clarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil:
+    _fortran_clarzb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clarzt "F_FUNC(clarzt,CLARZT)"(char *direct, char *storev, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *tau, npy_complex64 *t, int *ldt) nogil
+cdef void clarzt(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil:
+    _fortran_clarzt(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clascl "F_FUNC(clascl,CLASCL)"(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void clascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_clascl(type_bn, kl, ku, cfrom, cto, m, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claset "F_FUNC(claset,CLASET)"(char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *a, int *lda) nogil
+cdef void claset(char *uplo, int *m, int *n, c *alpha, c *beta, c *a, int *lda) nogil:
+    _fortran_claset(uplo, m, n, alpha, beta, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clasr "F_FUNC(clasr,CLASR)"(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, npy_complex64 *a, int *lda) nogil
+cdef void clasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, c *a, int *lda) nogil:
+    _fortran_clasr(side, pivot, direct, m, n, c, s, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_classq "F_FUNC(classq,CLASSQ)"(int *n, npy_complex64 *x, int *incx, s *scale, s *sumsq) nogil
+cdef void classq(int *n, c *x, int *incx, s *scale, s *sumsq) nogil:
+    _fortran_classq(n, x, incx, scale, sumsq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_claswp "F_FUNC(claswp,CLASWP)"(int *n, npy_complex64 *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+cdef void claswp(int *n, c *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil:
+    _fortran_claswp(n, a, lda, k1, k2, ipiv, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clasyf "F_FUNC(clasyf,CLASYF)"(char *uplo, int *n, int *nb, int *kb, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *w, int *ldw, int *info) nogil
+cdef void clasyf(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil:
+    _fortran_clasyf(uplo, n, nb, kb, a, lda, ipiv, w, ldw, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clatbs "F_FUNC(clatbs,CLATBS)"(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, npy_complex64 *ab, int *ldab, npy_complex64 *x, s *scale, s *cnorm, int *info) nogil
+cdef void clatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, c *ab, int *ldab, c *x, s *scale, s *cnorm, int *info) nogil:
+    _fortran_clatbs(uplo, trans, diag, normin, n, kd, ab, ldab, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clatdf "F_FUNC(clatdf,CLATDF)"(int *ijob, int *n, npy_complex64 *z, int *ldz, npy_complex64 *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil
+cdef void clatdf(int *ijob, int *n, c *z, int *ldz, c *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil:
+    _fortran_clatdf(ijob, n, z, ldz, rhs, rdsum, rdscal, ipiv, jpiv)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clatps "F_FUNC(clatps,CLATPS)"(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex64 *ap, npy_complex64 *x, s *scale, s *cnorm, int *info) nogil
+cdef void clatps(char *uplo, char *trans, char *diag, char *normin, int *n, c *ap, c *x, s *scale, s *cnorm, int *info) nogil:
+    _fortran_clatps(uplo, trans, diag, normin, n, ap, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clatrd "F_FUNC(clatrd,CLATRD)"(char *uplo, int *n, int *nb, npy_complex64 *a, int *lda, s *e, npy_complex64 *tau, npy_complex64 *w, int *ldw) nogil
+cdef void clatrd(char *uplo, int *n, int *nb, c *a, int *lda, s *e, c *tau, c *w, int *ldw) nogil:
+    _fortran_clatrd(uplo, n, nb, a, lda, e, tau, w, ldw)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clatrs "F_FUNC(clatrs,CLATRS)"(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, s *scale, s *cnorm, int *info) nogil
+cdef void clatrs(char *uplo, char *trans, char *diag, char *normin, int *n, c *a, int *lda, c *x, s *scale, s *cnorm, int *info) nogil:
+    _fortran_clatrs(uplo, trans, diag, normin, n, a, lda, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clatrz "F_FUNC(clatrz,CLATRZ)"(int *m, int *n, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work) nogil
+cdef void clatrz(int *m, int *n, int *l, c *a, int *lda, c *tau, c *work) nogil:
+    _fortran_clatrz(m, n, l, a, lda, tau, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clauu2 "F_FUNC(clauu2,CLAUU2)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void clauu2(char *uplo, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_clauu2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_clauum "F_FUNC(clauum,CLAUUM)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void clauum(char *uplo, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_clauum(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbcon "F_FUNC(cpbcon,CPBCON)"(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, s *anorm, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cpbcon(char *uplo, int *n, int *kd, c *ab, int *ldab, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_cpbcon(uplo, n, kd, ab, ldab, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbequ "F_FUNC(cpbequ,CPBEQU)"(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil
+cdef void cpbequ(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_cpbequ(uplo, n, kd, ab, ldab, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbrfs "F_FUNC(cpbrfs,CPBRFS)"(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cpbrfs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cpbrfs(uplo, n, kd, nrhs, ab, ldab, afb, ldafb, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbstf "F_FUNC(cpbstf,CPBSTF)"(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info) nogil
+cdef void cpbstf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil:
+    _fortran_cpbstf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbsv "F_FUNC(cpbsv,CPBSV)"(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cpbsv(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil:
+    _fortran_cpbsv(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbsvx "F_FUNC(cpbsvx,CPBSVX)"(char *fact, char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, char *equed, s *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cpbsvx(fact, uplo, n, kd, nrhs, ab, ldab, afb, ldafb, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbtf2 "F_FUNC(cpbtf2,CPBTF2)"(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info) nogil
+cdef void cpbtf2(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil:
+    _fortran_cpbtf2(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbtrf "F_FUNC(cpbtrf,CPBTRF)"(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info) nogil
+cdef void cpbtrf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil:
+    _fortran_cpbtrf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpbtrs "F_FUNC(cpbtrs,CPBTRS)"(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cpbtrs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil:
+    _fortran_cpbtrs(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpftrf "F_FUNC(cpftrf,CPFTRF)"(char *transr, char *uplo, int *n, npy_complex64 *a, int *info) nogil
+cdef void cpftrf(char *transr, char *uplo, int *n, c *a, int *info) nogil:
+    _fortran_cpftrf(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpftri "F_FUNC(cpftri,CPFTRI)"(char *transr, char *uplo, int *n, npy_complex64 *a, int *info) nogil
+cdef void cpftri(char *transr, char *uplo, int *n, c *a, int *info) nogil:
+    _fortran_cpftri(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpftrs "F_FUNC(cpftrs,CPFTRS)"(char *transr, char *uplo, int *n, int *nrhs, npy_complex64 *a, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cpftrs(char *transr, char *uplo, int *n, int *nrhs, c *a, c *b, int *ldb, int *info) nogil:
+    _fortran_cpftrs(transr, uplo, n, nrhs, a, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpocon "F_FUNC(cpocon,CPOCON)"(char *uplo, int *n, npy_complex64 *a, int *lda, s *anorm, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cpocon(char *uplo, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_cpocon(uplo, n, a, lda, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpoequ "F_FUNC(cpoequ,CPOEQU)"(int *n, npy_complex64 *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+cdef void cpoequ(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_cpoequ(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpoequb "F_FUNC(cpoequb,CPOEQUB)"(int *n, npy_complex64 *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+cdef void cpoequb(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_cpoequb(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cporfs "F_FUNC(cporfs,CPORFS)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cporfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cporfs(uplo, n, nrhs, a, lda, af, ldaf, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cposv "F_FUNC(cposv,CPOSV)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cposv(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil:
+    _fortran_cposv(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cposvx "F_FUNC(cposvx,CPOSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, char *equed, s *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cposvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cposvx(fact, uplo, n, nrhs, a, lda, af, ldaf, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpotf2 "F_FUNC(cpotf2,CPOTF2)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void cpotf2(char *uplo, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_cpotf2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpotrf "F_FUNC(cpotrf,CPOTRF)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void cpotrf(char *uplo, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_cpotrf(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpotri "F_FUNC(cpotri,CPOTRI)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void cpotri(char *uplo, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_cpotri(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpotrs "F_FUNC(cpotrs,CPOTRS)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cpotrs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil:
+    _fortran_cpotrs(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cppcon "F_FUNC(cppcon,CPPCON)"(char *uplo, int *n, npy_complex64 *ap, s *anorm, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cppcon(char *uplo, int *n, c *ap, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_cppcon(uplo, n, ap, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cppequ "F_FUNC(cppequ,CPPEQU)"(char *uplo, int *n, npy_complex64 *ap, s *s, s *scond, s *amax, int *info) nogil
+cdef void cppequ(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_cppequ(uplo, n, ap, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpprfs "F_FUNC(cpprfs,CPPRFS)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cpprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cpprfs(uplo, n, nrhs, ap, afp, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cppsv "F_FUNC(cppsv,CPPSV)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cppsv(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil:
+    _fortran_cppsv(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cppsvx "F_FUNC(cppsvx,CPPSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, char *equed, s *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cppsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cppsvx(fact, uplo, n, nrhs, ap, afp, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpptrf "F_FUNC(cpptrf,CPPTRF)"(char *uplo, int *n, npy_complex64 *ap, int *info) nogil
+cdef void cpptrf(char *uplo, int *n, c *ap, int *info) nogil:
+    _fortran_cpptrf(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpptri "F_FUNC(cpptri,CPPTRI)"(char *uplo, int *n, npy_complex64 *ap, int *info) nogil
+cdef void cpptri(char *uplo, int *n, c *ap, int *info) nogil:
+    _fortran_cpptri(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpptrs "F_FUNC(cpptrs,CPPTRS)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cpptrs(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil:
+    _fortran_cpptrs(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpstf2 "F_FUNC(cpstf2,CPSTF2)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+cdef void cpstf2(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil:
+    _fortran_cpstf2(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpstrf "F_FUNC(cpstrf,CPSTRF)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+cdef void cpstrf(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil:
+    _fortran_cpstrf(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cptcon "F_FUNC(cptcon,CPTCON)"(int *n, s *d, npy_complex64 *e, s *anorm, s *rcond, s *rwork, int *info) nogil
+cdef void cptcon(int *n, s *d, c *e, s *anorm, s *rcond, s *rwork, int *info) nogil:
+    _fortran_cptcon(n, d, e, anorm, rcond, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpteqr "F_FUNC(cpteqr,CPTEQR)"(char *compz, int *n, s *d, s *e, npy_complex64 *z, int *ldz, s *work, int *info) nogil
+cdef void cpteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil:
+    _fortran_cpteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cptrfs "F_FUNC(cptrfs,CPTRFS)"(char *uplo, int *n, int *nrhs, s *d, npy_complex64 *e, s *df, npy_complex64 *ef, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cptrfs(char *uplo, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cptrfs(uplo, n, nrhs, d, e, df, ef, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cptsv "F_FUNC(cptsv,CPTSV)"(int *n, int *nrhs, s *d, npy_complex64 *e, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cptsv(int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil:
+    _fortran_cptsv(n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cptsvx "F_FUNC(cptsvx,CPTSVX)"(char *fact, int *n, int *nrhs, s *d, npy_complex64 *e, s *df, npy_complex64 *ef, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cptsvx(char *fact, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cptsvx(fact, n, nrhs, d, e, df, ef, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpttrf "F_FUNC(cpttrf,CPTTRF)"(int *n, s *d, npy_complex64 *e, int *info) nogil
+cdef void cpttrf(int *n, s *d, c *e, int *info) nogil:
+    _fortran_cpttrf(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cpttrs "F_FUNC(cpttrs,CPTTRS)"(char *uplo, int *n, int *nrhs, s *d, npy_complex64 *e, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cpttrs(char *uplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil:
+    _fortran_cpttrs(uplo, n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cptts2 "F_FUNC(cptts2,CPTTS2)"(int *iuplo, int *n, int *nrhs, s *d, npy_complex64 *e, npy_complex64 *b, int *ldb) nogil
+cdef void cptts2(int *iuplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb) nogil:
+    _fortran_cptts2(iuplo, n, nrhs, d, e, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_crot "F_FUNC(crot,CROT)"(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, s *c, npy_complex64 *s) nogil
+cdef void crot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, c *s) nogil:
+    _fortran_crot(n, cx, incx, cy, incy, c, s)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cspcon "F_FUNC(cspcon,CSPCON)"(char *uplo, int *n, npy_complex64 *ap, int *ipiv, s *anorm, s *rcond, npy_complex64 *work, int *info) nogil
+cdef void cspcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil:
+    _fortran_cspcon(uplo, n, ap, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cspmv "F_FUNC(cspmv,CSPMV)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *ap, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy) nogil
+cdef void cspmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil:
+    _fortran_cspmv(uplo, n, alpha, ap, x, incx, beta, y, incy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cspr "F_FUNC(cspr,CSPR)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *ap) nogil
+cdef void cspr(char *uplo, int *n, c *alpha, c *x, int *incx, c *ap) nogil:
+    _fortran_cspr(uplo, n, alpha, x, incx, ap)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csprfs "F_FUNC(csprfs,CSPRFS)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void csprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_csprfs(uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cspsv "F_FUNC(cspsv,CSPSV)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void cspsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_cspsv(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cspsvx "F_FUNC(cspsvx,CSPSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void cspsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_cspsvx(fact, uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csptrf "F_FUNC(csptrf,CSPTRF)"(char *uplo, int *n, npy_complex64 *ap, int *ipiv, int *info) nogil
+cdef void csptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil:
+    _fortran_csptrf(uplo, n, ap, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csptri "F_FUNC(csptri,CSPTRI)"(char *uplo, int *n, npy_complex64 *ap, int *ipiv, npy_complex64 *work, int *info) nogil
+cdef void csptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil:
+    _fortran_csptri(uplo, n, ap, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csptrs "F_FUNC(csptrs,CSPTRS)"(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void csptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_csptrs(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csrscl "F_FUNC(csrscl,CSRSCL)"(int *n, s *sa, npy_complex64 *sx, int *incx) nogil
+cdef void csrscl(int *n, s *sa, c *sx, int *incx) nogil:
+    _fortran_csrscl(n, sa, sx, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cstedc "F_FUNC(cstedc,CSTEDC)"(char *compz, int *n, s *d, s *e, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void cstedc(char *compz, int *n, s *d, s *e, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_cstedc(compz, n, d, e, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cstegr "F_FUNC(cstegr,CSTEGR)"(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, npy_complex64 *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void cstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_cstegr(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cstein "F_FUNC(cstein,CSTEIN)"(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, npy_complex64 *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+cdef void cstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, c *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_cstein(n, d, e, m, w, iblock, isplit, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cstemr "F_FUNC(cstemr,CSTEMR)"(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, npy_complex64 *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void cstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, c *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_cstemr(jobz, range, n, d, e, vl, vu, il, iu, m, w, z, ldz, nzc, isuppz, tryrac, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csteqr "F_FUNC(csteqr,CSTEQR)"(char *compz, int *n, s *d, s *e, npy_complex64 *z, int *ldz, s *work, int *info) nogil
+cdef void csteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil:
+    _fortran_csteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csycon "F_FUNC(csycon,CSYCON)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, s *anorm, s *rcond, npy_complex64 *work, int *info) nogil
+cdef void csycon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil:
+    _fortran_csycon(uplo, n, a, lda, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csyconv "F_FUNC(csyconv,CSYCONV)"(char *uplo, char *way, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info) nogil
+cdef void csyconv(char *uplo, char *way, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil:
+    _fortran_csyconv(uplo, way, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csyequb "F_FUNC(csyequb,CSYEQUB)"(char *uplo, int *n, npy_complex64 *a, int *lda, s *s, s *scond, s *amax, npy_complex64 *work, int *info) nogil
+cdef void csyequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil:
+    _fortran_csyequb(uplo, n, a, lda, s, scond, amax, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csymv "F_FUNC(csymv,CSYMV)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy) nogil
+cdef void csymv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil:
+    _fortran_csymv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csyr "F_FUNC(csyr,CSYR)"(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *a, int *lda) nogil
+cdef void csyr(char *uplo, int *n, c *alpha, c *x, int *incx, c *a, int *lda) nogil:
+    _fortran_csyr(uplo, n, alpha, x, incx, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csyrfs "F_FUNC(csyrfs,CSYRFS)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void csyrfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_csyrfs(uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csysv "F_FUNC(csysv,CSYSV)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void csysv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil:
+    _fortran_csysv(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csysvx "F_FUNC(csysvx,CSYSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *rcond, s *ferr, s *berr, npy_complex64 *work, int *lwork, s *rwork, int *info) nogil
+cdef void csysvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil:
+    _fortran_csysvx(fact, uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csyswapr "F_FUNC(csyswapr,CSYSWAPR)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *i1, int *i2) nogil
+cdef void csyswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil:
+    _fortran_csyswapr(uplo, n, a, lda, i1, i2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csytf2 "F_FUNC(csytf2,CSYTF2)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info) nogil
+cdef void csytf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_csytf2(uplo, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csytrf "F_FUNC(csytrf,CSYTRF)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void csytrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil:
+    _fortran_csytrf(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csytri "F_FUNC(csytri,CSYTRI)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info) nogil
+cdef void csytri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil:
+    _fortran_csytri(uplo, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csytri2 "F_FUNC(csytri2,CSYTRI2)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void csytri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil:
+    _fortran_csytri2(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csytri2x "F_FUNC(csytri2x,CSYTRI2X)"(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *nb, int *info) nogil
+cdef void csytri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil:
+    _fortran_csytri2x(uplo, n, a, lda, ipiv, work, nb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csytrs "F_FUNC(csytrs,CSYTRS)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void csytrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil:
+    _fortran_csytrs(uplo, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_csytrs2 "F_FUNC(csytrs2,CSYTRS2)"(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info) nogil
+cdef void csytrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil:
+    _fortran_csytrs2(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctbcon "F_FUNC(ctbcon,CTBCON)"(char *norm, char *uplo, char *diag, int *n, int *kd, npy_complex64 *ab, int *ldab, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctbcon(char *norm, char *uplo, char *diag, int *n, int *kd, c *ab, int *ldab, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_ctbcon(norm, uplo, diag, n, kd, ab, ldab, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctbrfs "F_FUNC(ctbrfs,CTBRFS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_ctbrfs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctbtrs "F_FUNC(ctbtrs,CTBTRS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void ctbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil:
+    _fortran_ctbtrs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctfsm "F_FUNC(ctfsm,CTFSM)"(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, npy_complex64 *b, int *ldb) nogil
+cdef void ctfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, c *alpha, c *a, c *b, int *ldb) nogil:
+    _fortran_ctfsm(transr, side, uplo, trans, diag, m, n, alpha, a, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctftri "F_FUNC(ctftri,CTFTRI)"(char *transr, char *uplo, char *diag, int *n, npy_complex64 *a, int *info) nogil
+cdef void ctftri(char *transr, char *uplo, char *diag, int *n, c *a, int *info) nogil:
+    _fortran_ctftri(transr, uplo, diag, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctfttp "F_FUNC(ctfttp,CTFTTP)"(char *transr, char *uplo, int *n, npy_complex64 *arf, npy_complex64 *ap, int *info) nogil
+cdef void ctfttp(char *transr, char *uplo, int *n, c *arf, c *ap, int *info) nogil:
+    _fortran_ctfttp(transr, uplo, n, arf, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctfttr "F_FUNC(ctfttr,CTFTTR)"(char *transr, char *uplo, int *n, npy_complex64 *arf, npy_complex64 *a, int *lda, int *info) nogil
+cdef void ctfttr(char *transr, char *uplo, int *n, c *arf, c *a, int *lda, int *info) nogil:
+    _fortran_ctfttr(transr, uplo, n, arf, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgevc "F_FUNC(ctgevc,CTGEVC)"(char *side, char *howmny, bint *select, int *n, npy_complex64 *s, int *lds, npy_complex64 *p, int *ldp, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctgevc(char *side, char *howmny, bint *select, int *n, c *s, int *lds, c *p, int *ldp, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil:
+    _fortran_ctgevc(side, howmny, select, n, s, lds, p, ldp, vl, ldvl, vr, ldvr, mm, m, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgex2 "F_FUNC(ctgex2,CTGEX2)"(bint *wantq, bint *wantz, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *j1, int *info) nogil
+cdef void ctgex2(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *j1, int *info) nogil:
+    _fortran_ctgex2(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, j1, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgexc "F_FUNC(ctgexc,CTGEXC)"(bint *wantq, bint *wantz, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *ifst, int *ilst, int *info) nogil
+cdef void ctgexc(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *ifst, int *ilst, int *info) nogil:
+    _fortran_ctgexc(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, ifst, ilst, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgsen "F_FUNC(ctgsen,CTGSEN)"(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *m, s *pl, s *pr, s *dif, npy_complex64 *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void ctgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, int *m, s *pl, s *pr, s *dif, c *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_ctgsen(ijob, wantq, wantz, select, n, a, lda, b, ldb, alpha, beta, q, ldq, z, ldz, m, pl, pr, dif, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgsja "F_FUNC(ctgsja,CTGSJA)"(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, npy_complex64 *u, int *ldu, npy_complex64 *v, int *ldv, npy_complex64 *q, int *ldq, npy_complex64 *work, int *ncycle, int *info) nogil
+cdef void ctgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, c *a, int *lda, c *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, c *u, int *ldu, c *v, int *ldv, c *q, int *ldq, c *work, int *ncycle, int *info) nogil:
+    _fortran_ctgsja(jobu, jobv, jobq, m, p, n, k, l, a, lda, b, ldb, tola, tolb, alpha, beta, u, ldu, v, ldv, q, ldq, work, ncycle, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgsna "F_FUNC(ctgsna,CTGSNA)"(char *job, char *howmny, bint *select, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, s *s, s *dif, int *mm, int *m, npy_complex64 *work, int *lwork, int *iwork, int *info) nogil
+cdef void ctgsna(char *job, char *howmny, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *dif, int *mm, int *m, c *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_ctgsna(job, howmny, select, n, a, lda, b, ldb, vl, ldvl, vr, ldvr, s, dif, mm, m, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgsy2 "F_FUNC(ctgsy2,CTGSY2)"(char *trans, int *ijob, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, npy_complex64 *d, int *ldd, npy_complex64 *e, int *lde, npy_complex64 *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *info) nogil
+cdef void ctgsy2(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *info) nogil:
+    _fortran_ctgsy2(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, rdsum, rdscal, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctgsyl "F_FUNC(ctgsyl,CTGSYL)"(char *trans, int *ijob, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, npy_complex64 *d, int *ldd, npy_complex64 *e, int *lde, npy_complex64 *f, int *ldf, s *scale, s *dif, npy_complex64 *work, int *lwork, int *iwork, int *info) nogil
+cdef void ctgsyl(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *dif, c *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_ctgsyl(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, dif, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctpcon "F_FUNC(ctpcon,CTPCON)"(char *norm, char *uplo, char *diag, int *n, npy_complex64 *ap, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctpcon(char *norm, char *uplo, char *diag, int *n, c *ap, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_ctpcon(norm, uplo, diag, n, ap, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctpmqrt "F_FUNC(ctpmqrt,CTPMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info) nogil
+cdef void ctpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *info) nogil:
+    _fortran_ctpmqrt(side, trans, m, n, k, l, nb, v, ldv, t, ldt, a, lda, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctpqrt "F_FUNC(ctpqrt,CTPQRT)"(int *m, int *n, int *l, int *nb, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *t, int *ldt, npy_complex64 *work, int *info) nogil
+cdef void ctpqrt(int *m, int *n, int *l, int *nb, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, c *work, int *info) nogil:
+    _fortran_ctpqrt(m, n, l, nb, a, lda, b, ldb, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctpqrt2 "F_FUNC(ctpqrt2,CTPQRT2)"(int *m, int *n, int *l, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *t, int *ldt, int *info) nogil
+cdef void ctpqrt2(int *m, int *n, int *l, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, int *info) nogil:
+    _fortran_ctpqrt2(m, n, l, a, lda, b, ldb, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctprfb "F_FUNC(ctprfb,CTPRFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *ldwork) nogil
+cdef void ctprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *ldwork) nogil:
+    _fortran_ctprfb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, a, lda, b, ldb, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctprfs "F_FUNC(ctprfs,CTPRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_ctprfs(uplo, trans, diag, n, nrhs, ap, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctptri "F_FUNC(ctptri,CTPTRI)"(char *uplo, char *diag, int *n, npy_complex64 *ap, int *info) nogil
+cdef void ctptri(char *uplo, char *diag, int *n, c *ap, int *info) nogil:
+    _fortran_ctptri(uplo, diag, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctptrs "F_FUNC(ctptrs,CTPTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void ctptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil:
+    _fortran_ctptrs(uplo, trans, diag, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctpttf "F_FUNC(ctpttf,CTPTTF)"(char *transr, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *arf, int *info) nogil
+cdef void ctpttf(char *transr, char *uplo, int *n, c *ap, c *arf, int *info) nogil:
+    _fortran_ctpttf(transr, uplo, n, ap, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctpttr "F_FUNC(ctpttr,CTPTTR)"(char *uplo, int *n, npy_complex64 *ap, npy_complex64 *a, int *lda, int *info) nogil
+cdef void ctpttr(char *uplo, int *n, c *ap, c *a, int *lda, int *info) nogil:
+    _fortran_ctpttr(uplo, n, ap, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrcon "F_FUNC(ctrcon,CTRCON)"(char *norm, char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, s *rcond, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctrcon(char *norm, char *uplo, char *diag, int *n, c *a, int *lda, s *rcond, c *work, s *rwork, int *info) nogil:
+    _fortran_ctrcon(norm, uplo, diag, n, a, lda, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrevc "F_FUNC(ctrevc,CTREVC)"(char *side, char *howmny, bint *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctrevc(char *side, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil:
+    _fortran_ctrevc(side, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, mm, m, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrexc "F_FUNC(ctrexc,CTREXC)"(char *compq, int *n, npy_complex64 *t, int *ldt, npy_complex64 *q, int *ldq, int *ifst, int *ilst, int *info) nogil
+cdef void ctrexc(char *compq, int *n, c *t, int *ldt, c *q, int *ldq, int *ifst, int *ilst, int *info) nogil:
+    _fortran_ctrexc(compq, n, t, ldt, q, ldq, ifst, ilst, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrrfs "F_FUNC(ctrrfs,CTRRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, s *ferr, s *berr, npy_complex64 *work, s *rwork, int *info) nogil
+cdef void ctrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil:
+    _fortran_ctrrfs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrsen "F_FUNC(ctrsen,CTRSEN)"(char *job, char *compq, bint *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *q, int *ldq, npy_complex64 *w, int *m, s *s, s *sep, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void ctrsen(char *job, char *compq, bint *select, int *n, c *t, int *ldt, c *q, int *ldq, c *w, int *m, s *s, s *sep, c *work, int *lwork, int *info) nogil:
+    _fortran_ctrsen(job, compq, select, n, t, ldt, q, ldq, w, m, s, sep, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrsna "F_FUNC(ctrsna,CTRSNA)"(char *job, char *howmny, bint *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, s *s, s *sep, int *mm, int *m, npy_complex64 *work, int *ldwork, s *rwork, int *info) nogil
+cdef void ctrsna(char *job, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *sep, int *mm, int *m, c *work, int *ldwork, s *rwork, int *info) nogil:
+    _fortran_ctrsna(job, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, s, sep, mm, m, work, ldwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrsyl "F_FUNC(ctrsyl,CTRSYL)"(char *trana, char *tranb, int *isgn, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, s *scale, int *info) nogil
+cdef void ctrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *scale, int *info) nogil:
+    _fortran_ctrsyl(trana, tranb, isgn, m, n, a, lda, b, ldb, c, ldc, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrti2 "F_FUNC(ctrti2,CTRTI2)"(char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void ctrti2(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_ctrti2(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrtri "F_FUNC(ctrtri,CTRTRI)"(char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, int *info) nogil
+cdef void ctrtri(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil:
+    _fortran_ctrtri(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrtrs "F_FUNC(ctrtrs,CTRTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info) nogil
+cdef void ctrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil:
+    _fortran_ctrtrs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrttf "F_FUNC(ctrttf,CTRTTF)"(char *transr, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *arf, int *info) nogil
+cdef void ctrttf(char *transr, char *uplo, int *n, c *a, int *lda, c *arf, int *info) nogil:
+    _fortran_ctrttf(transr, uplo, n, a, lda, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctrttp "F_FUNC(ctrttp,CTRTTP)"(char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *ap, int *info) nogil
+cdef void ctrttp(char *uplo, int *n, c *a, int *lda, c *ap, int *info) nogil:
+    _fortran_ctrttp(uplo, n, a, lda, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ctzrzf "F_FUNC(ctzrzf,CTZRZF)"(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void ctzrzf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_ctzrzf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunbdb "F_FUNC(cunbdb,CUNBDB)"(char *trans, char *signs, int *m, int *p, int *q, npy_complex64 *x11, int *ldx11, npy_complex64 *x12, int *ldx12, npy_complex64 *x21, int *ldx21, npy_complex64 *x22, int *ldx22, s *theta, s *phi, npy_complex64 *taup1, npy_complex64 *taup2, npy_complex64 *tauq1, npy_complex64 *tauq2, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunbdb(char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, s *phi, c *taup1, c *taup2, c *tauq1, c *tauq2, c *work, int *lwork, int *info) nogil:
+    _fortran_cunbdb(trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, phi, taup1, taup2, tauq1, tauq2, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cuncsd "F_FUNC(cuncsd,CUNCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, npy_complex64 *x11, int *ldx11, npy_complex64 *x12, int *ldx12, npy_complex64 *x21, int *ldx21, npy_complex64 *x22, int *ldx22, s *theta, npy_complex64 *u1, int *ldu1, npy_complex64 *u2, int *ldu2, npy_complex64 *v1t, int *ldv1t, npy_complex64 *v2t, int *ldv2t, npy_complex64 *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *info) nogil
+cdef void cuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *info) nogil:
+    _fortran_cuncsd(jobu1, jobu2, jobv1t, jobv2t, trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, work, lwork, rwork, lrwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cung2l "F_FUNC(cung2l,CUNG2L)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cung2l(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cung2l(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cung2r "F_FUNC(cung2r,CUNG2R)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cung2r(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cung2r(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cungbr "F_FUNC(cungbr,CUNGBR)"(char *vect, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cungbr(char *vect, int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cungbr(vect, m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunghr "F_FUNC(cunghr,CUNGHR)"(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunghr(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cunghr(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cungl2 "F_FUNC(cungl2,CUNGL2)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cungl2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cungl2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunglq "F_FUNC(cunglq,CUNGLQ)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunglq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cunglq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cungql "F_FUNC(cungql,CUNGQL)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cungql(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cungql(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cungqr "F_FUNC(cungqr,CUNGQR)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cungqr(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cungqr(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cungr2 "F_FUNC(cungr2,CUNGR2)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info) nogil
+cdef void cungr2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil:
+    _fortran_cungr2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cungrq "F_FUNC(cungrq,CUNGRQ)"(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cungrq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cungrq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cungtr "F_FUNC(cungtr,CUNGTR)"(char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cungtr(char *uplo, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil:
+    _fortran_cungtr(uplo, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunm2l "F_FUNC(cunm2l,CUNM2L)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info) nogil
+cdef void cunm2l(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil:
+    _fortran_cunm2l(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunm2r "F_FUNC(cunm2r,CUNM2R)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info) nogil
+cdef void cunm2r(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil:
+    _fortran_cunm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmbr "F_FUNC(cunmbr,CUNMBR)"(char *vect, char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmbr(vect, side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmhr "F_FUNC(cunmhr,CUNMHR)"(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmhr(side, trans, m, n, ilo, ihi, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunml2 "F_FUNC(cunml2,CUNML2)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info) nogil
+cdef void cunml2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil:
+    _fortran_cunml2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmlq "F_FUNC(cunmlq,CUNMLQ)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmlq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmlq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmql "F_FUNC(cunmql,CUNMQL)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmql(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmql(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmqr "F_FUNC(cunmqr,CUNMQR)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmqr(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmr2 "F_FUNC(cunmr2,CUNMR2)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info) nogil
+cdef void cunmr2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil:
+    _fortran_cunmr2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmr3 "F_FUNC(cunmr3,CUNMR3)"(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info) nogil
+cdef void cunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil:
+    _fortran_cunmr3(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmrq "F_FUNC(cunmrq,CUNMRQ)"(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmrq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmrq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmrz "F_FUNC(cunmrz,CUNMRZ)"(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmrz(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cunmtr "F_FUNC(cunmtr,CUNMTR)"(char *side, char *uplo, char *trans, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info) nogil
+cdef void cunmtr(char *side, char *uplo, char *trans, int *m, int *n, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil:
+    _fortran_cunmtr(side, uplo, trans, m, n, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cupgtr "F_FUNC(cupgtr,CUPGTR)"(char *uplo, int *n, npy_complex64 *ap, npy_complex64 *tau, npy_complex64 *q, int *ldq, npy_complex64 *work, int *info) nogil
+cdef void cupgtr(char *uplo, int *n, c *ap, c *tau, c *q, int *ldq, c *work, int *info) nogil:
+    _fortran_cupgtr(uplo, n, ap, tau, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_cupmtr "F_FUNC(cupmtr,CUPMTR)"(char *side, char *uplo, char *trans, int *m, int *n, npy_complex64 *ap, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info) nogil
+cdef void cupmtr(char *side, char *uplo, char *trans, int *m, int *n, c *ap, c *tau, c *c, int *ldc, c *work, int *info) nogil:
+    _fortran_cupmtr(side, uplo, trans, m, n, ap, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dbbcsd "F_FUNC(dbbcsd,DBBCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *work, int *lwork, int *info) nogil
+cdef void dbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *work, int *lwork, int *info) nogil:
+    _fortran_dbbcsd(jobu1, jobu2, jobv1t, jobv2t, trans, m, p, q, theta, phi, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, b11d, b11e, b12d, b12e, b21d, b21e, b22d, b22e, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dbdsdc "F_FUNC(dbdsdc,DBDSDC)"(char *uplo, char *compq, int *n, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, d *q, int *iq, d *work, int *iwork, int *info) nogil
+cdef void dbdsdc(char *uplo, char *compq, int *n, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, d *q, int *iq, d *work, int *iwork, int *info) nogil:
+    _fortran_dbdsdc(uplo, compq, n, d, e, u, ldu, vt, ldvt, q, iq, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dbdsqr "F_FUNC(dbdsqr,DBDSQR)"(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil
+cdef void dbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dbdsqr(uplo, n, ncvt, nru, ncc, d, e, vt, ldvt, u, ldu, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ddisna "F_FUNC(ddisna,DDISNA)"(char *job, int *m, int *n, d *d, d *sep, int *info) nogil
+cdef void ddisna(char *job, int *m, int *n, d *d, d *sep, int *info) nogil:
+    _fortran_ddisna(job, m, n, d, sep, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbbrd "F_FUNC(dgbbrd,DGBBRD)"(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *pt, int *ldpt, d *c, int *ldc, d *work, int *info) nogil
+cdef void dgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *pt, int *ldpt, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dgbbrd(vect, m, n, ncc, kl, ku, ab, ldab, d, e, q, ldq, pt, ldpt, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbcon "F_FUNC(dgbcon,DGBCON)"(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dgbcon(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dgbcon(norm, n, kl, ku, ab, ldab, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbequ "F_FUNC(dgbequ,DGBEQU)"(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void dgbequ(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_dgbequ(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbequb "F_FUNC(dgbequb,DGBEQUB)"(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void dgbequb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_dgbequb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbrfs "F_FUNC(dgbrfs,DGBRFS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dgbrfs(trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbsv "F_FUNC(dgbsv,DGBSV)"(int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dgbsv(int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dgbsv(n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbsvx "F_FUNC(dgbsvx,DGBSVX)"(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dgbsvx(fact, trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbtf2 "F_FUNC(dgbtf2,DGBTF2)"(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void dgbtf2(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_dgbtf2(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbtrf "F_FUNC(dgbtrf,DGBTRF)"(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void dgbtrf(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_dgbtrf(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgbtrs "F_FUNC(dgbtrs,DGBTRS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dgbtrs(trans, n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgebak "F_FUNC(dgebak,DGEBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, d *v, int *ldv, int *info) nogil
+cdef void dgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, d *v, int *ldv, int *info) nogil:
+    _fortran_dgebak(job, side, n, ilo, ihi, scale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgebal "F_FUNC(dgebal,DGEBAL)"(char *job, int *n, d *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil
+cdef void dgebal(char *job, int *n, d *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil:
+    _fortran_dgebal(job, n, a, lda, ilo, ihi, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgebd2 "F_FUNC(dgebd2,DGEBD2)"(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *info) nogil
+cdef void dgebd2(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *info) nogil:
+    _fortran_dgebd2(m, n, a, lda, d, e, tauq, taup, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgebrd "F_FUNC(dgebrd,DGEBRD)"(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *lwork, int *info) nogil
+cdef void dgebrd(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *lwork, int *info) nogil:
+    _fortran_dgebrd(m, n, a, lda, d, e, tauq, taup, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgecon "F_FUNC(dgecon,DGECON)"(char *norm, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dgecon(char *norm, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dgecon(norm, n, a, lda, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeequ "F_FUNC(dgeequ,DGEEQU)"(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void dgeequ(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_dgeequ(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeequb "F_FUNC(dgeequb,DGEEQUB)"(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void dgeequb(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_dgeequb(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgees "F_FUNC(dgees,DGEES)"(char *jobvs, char *sort, _dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) nogil
+cdef void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) nogil:
+    _fortran_dgees(jobvs, sort, <_dselect2*>select, n, a, lda, sdim, wr, wi, vs, ldvs, work, lwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeesx "F_FUNC(dgeesx,DGEESX)"(char *jobvs, char *sort, _dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+cdef void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil:
+    _fortran_dgeesx(jobvs, sort, <_dselect2*>select, sense, n, a, lda, sdim, wr, wi, vs, ldvs, rconde, rcondv, work, lwork, iwork, liwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeev "F_FUNC(dgeev,DGEEV)"(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil
+cdef void dgeev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil:
+    _fortran_dgeev(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeevx "F_FUNC(dgeevx,DGEEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dgeevx(balanc, jobvl, jobvr, sense, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, ilo, ihi, scale, abnrm, rconde, rcondv, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgehd2 "F_FUNC(dgehd2,DGEHD2)"(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dgehd2(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dgehd2(n, ilo, ihi, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgehrd "F_FUNC(dgehrd,DGEHRD)"(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dgehrd(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgejsv "F_FUNC(dgejsv,DGEJSV)"(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, d *a, int *lda, d *sva, d *u, int *ldu, d *v, int *ldv, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, d *a, int *lda, d *sva, d *u, int *ldu, d *v, int *ldv, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dgejsv(joba, jobu, jobv, jobr, jobt, jobp, m, n, a, lda, sva, u, ldu, v, ldv, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgelq2 "F_FUNC(dgelq2,DGELQ2)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dgelq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dgelq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgelqf "F_FUNC(dgelqf,DGELQF)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dgelqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dgelqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgels "F_FUNC(dgels,DGELS)"(char *trans, int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *work, int *lwork, int *info) nogil
+cdef void dgels(char *trans, int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *work, int *lwork, int *info) nogil:
+    _fortran_dgels(trans, m, n, nrhs, a, lda, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgelsd "F_FUNC(dgelsd,DGELSD)"(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dgelsd(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dgelsd(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgelss "F_FUNC(dgelss,DGELSS)"(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *info) nogil
+cdef void dgelss(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *info) nogil:
+    _fortran_dgelss(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgelsy "F_FUNC(dgelsy,DGELSY)"(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *jpvt, d *rcond, int *rank, d *work, int *lwork, int *info) nogil
+cdef void dgelsy(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *jpvt, d *rcond, int *rank, d *work, int *lwork, int *info) nogil:
+    _fortran_dgelsy(m, n, nrhs, a, lda, b, ldb, jpvt, rcond, rank, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgemqrt "F_FUNC(dgemqrt,DGEMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *nb, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *info) nogil
+cdef void dgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dgemqrt(side, trans, m, n, k, nb, v, ldv, t, ldt, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeql2 "F_FUNC(dgeql2,DGEQL2)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dgeql2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dgeql2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqlf "F_FUNC(dgeqlf,DGEQLF)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dgeqlf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dgeqlf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqp3 "F_FUNC(dgeqp3,DGEQP3)"(int *m, int *n, d *a, int *lda, int *jpvt, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dgeqp3(int *m, int *n, d *a, int *lda, int *jpvt, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dgeqp3(m, n, a, lda, jpvt, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqr2 "F_FUNC(dgeqr2,DGEQR2)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dgeqr2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dgeqr2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqr2p "F_FUNC(dgeqr2p,DGEQR2P)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dgeqr2p(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dgeqr2p(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqrf "F_FUNC(dgeqrf,DGEQRF)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dgeqrf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dgeqrf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqrfp "F_FUNC(dgeqrfp,DGEQRFP)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dgeqrfp(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dgeqrfp(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqrt "F_FUNC(dgeqrt,DGEQRT)"(int *m, int *n, int *nb, d *a, int *lda, d *t, int *ldt, d *work, int *info) nogil
+cdef void dgeqrt(int *m, int *n, int *nb, d *a, int *lda, d *t, int *ldt, d *work, int *info) nogil:
+    _fortran_dgeqrt(m, n, nb, a, lda, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqrt2 "F_FUNC(dgeqrt2,DGEQRT2)"(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil
+cdef void dgeqrt2(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil:
+    _fortran_dgeqrt2(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgeqrt3 "F_FUNC(dgeqrt3,DGEQRT3)"(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil
+cdef void dgeqrt3(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil:
+    _fortran_dgeqrt3(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgerfs "F_FUNC(dgerfs,DGERFS)"(char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dgerfs(char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dgerfs(trans, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgerq2 "F_FUNC(dgerq2,DGERQ2)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dgerq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dgerq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgerqf "F_FUNC(dgerqf,DGERQF)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dgerqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dgerqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgesc2 "F_FUNC(dgesc2,DGESC2)"(int *n, d *a, int *lda, d *rhs, int *ipiv, int *jpiv, d *scale) nogil
+cdef void dgesc2(int *n, d *a, int *lda, d *rhs, int *ipiv, int *jpiv, d *scale) nogil:
+    _fortran_dgesc2(n, a, lda, rhs, ipiv, jpiv, scale)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgesdd "F_FUNC(dgesdd,DGESDD)"(char *jobz, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dgesdd(char *jobz, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgesv "F_FUNC(dgesv,DGESV)"(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dgesv(n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgesvd "F_FUNC(dgesvd,DGESVD)"(char *jobu, char *jobvt, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *info) nogil
+cdef void dgesvd(char *jobu, char *jobvt, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *info) nogil:
+    _fortran_dgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgesvj "F_FUNC(dgesvj,DGESVJ)"(char *joba, char *jobu, char *jobv, int *m, int *n, d *a, int *lda, d *sva, int *mv, d *v, int *ldv, d *work, int *lwork, int *info) nogil
+cdef void dgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, d *a, int *lda, d *sva, int *mv, d *v, int *ldv, d *work, int *lwork, int *info) nogil:
+    _fortran_dgesvj(joba, jobu, jobv, m, n, a, lda, sva, mv, v, ldv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgesvx "F_FUNC(dgesvx,DGESVX)"(char *fact, char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dgesvx(char *fact, char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dgesvx(fact, trans, n, nrhs, a, lda, af, ldaf, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgetc2 "F_FUNC(dgetc2,DGETC2)"(int *n, d *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+cdef void dgetc2(int *n, d *a, int *lda, int *ipiv, int *jpiv, int *info) nogil:
+    _fortran_dgetc2(n, a, lda, ipiv, jpiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgetf2 "F_FUNC(dgetf2,DGETF2)"(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+cdef void dgetf2(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_dgetf2(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgetrf "F_FUNC(dgetrf,DGETRF)"(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+cdef void dgetrf(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_dgetrf(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgetri "F_FUNC(dgetri,DGETRI)"(int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+cdef void dgetri(int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil:
+    _fortran_dgetri(n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgetrs "F_FUNC(dgetrs,DGETRS)"(char *trans, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dgetrs(char *trans, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggbak "F_FUNC(dggbak,DGGBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, d *v, int *ldv, int *info) nogil
+cdef void dggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, d *v, int *ldv, int *info) nogil:
+    _fortran_dggbak(job, side, n, ilo, ihi, lscale, rscale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggbal "F_FUNC(dggbal,DGGBAL)"(char *job, int *n, d *a, int *lda, d *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil
+cdef void dggbal(char *job, int *n, d *a, int *lda, d *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil:
+    _fortran_dggbal(job, n, a, lda, b, ldb, ilo, ihi, lscale, rscale, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgges "F_FUNC(dgges,DGGES)"(char *jobvsl, char *jobvsr, char *sort, _dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) nogil
+cdef void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) nogil:
+    _fortran_dgges(jobvsl, jobvsr, sort, <_dselect3*>selctg, n, a, lda, b, ldb, sdim, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, work, lwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggesx "F_FUNC(dggesx,DGGESX)"(char *jobvsl, char *jobvsr, char *sort, _dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+cdef void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil:
+    _fortran_dggesx(jobvsl, jobvsr, sort, <_dselect3*>selctg, sense, n, a, lda, b, ldb, sdim, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, rconde, rcondv, work, lwork, iwork, liwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggev "F_FUNC(dggev,DGGEV)"(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil
+cdef void dggev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil:
+    _fortran_dggev(jobvl, jobvr, n, a, lda, b, ldb, alphar, alphai, beta, vl, ldvl, vr, ldvr, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggevx "F_FUNC(dggevx,DGGEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, bint *bwork, int *info) nogil
+cdef void dggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, bint *bwork, int *info) nogil:
+    _fortran_dggevx(balanc, jobvl, jobvr, sense, n, a, lda, b, ldb, alphar, alphai, beta, vl, ldvl, vr, ldvr, ilo, ihi, lscale, rscale, abnrm, bbnrm, rconde, rcondv, work, lwork, iwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggglm "F_FUNC(dggglm,DGGGLM)"(int *n, int *m, int *p, d *a, int *lda, d *b, int *ldb, d *d, d *x, d *y, d *work, int *lwork, int *info) nogil
+cdef void dggglm(int *n, int *m, int *p, d *a, int *lda, d *b, int *ldb, d *d, d *x, d *y, d *work, int *lwork, int *info) nogil:
+    _fortran_dggglm(n, m, p, a, lda, b, ldb, d, x, y, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgghrd "F_FUNC(dgghrd,DGGHRD)"(char *compq, char *compz, int *n, int *ilo, int *ihi, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *info) nogil
+cdef void dgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *info) nogil:
+    _fortran_dgghrd(compq, compz, n, ilo, ihi, a, lda, b, ldb, q, ldq, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgglse "F_FUNC(dgglse,DGGLSE)"(int *m, int *n, int *p, d *a, int *lda, d *b, int *ldb, d *c, d *d, d *x, d *work, int *lwork, int *info) nogil
+cdef void dgglse(int *m, int *n, int *p, d *a, int *lda, d *b, int *ldb, d *c, d *d, d *x, d *work, int *lwork, int *info) nogil:
+    _fortran_dgglse(m, n, p, a, lda, b, ldb, c, d, x, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggqrf "F_FUNC(dggqrf,DGGQRF)"(int *n, int *m, int *p, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil
+cdef void dggqrf(int *n, int *m, int *p, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil:
+    _fortran_dggqrf(n, m, p, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dggrqf "F_FUNC(dggrqf,DGGRQF)"(int *m, int *p, int *n, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil
+cdef void dggrqf(int *m, int *p, int *n, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil:
+    _fortran_dggrqf(m, p, n, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgsvj0 "F_FUNC(dgsvj0,DGSVJ0)"(char *jobv, int *m, int *n, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil
+cdef void dgsvj0(char *jobv, int *m, int *n, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil:
+    _fortran_dgsvj0(jobv, m, n, a, lda, d, sva, mv, v, ldv, eps, sfmin, tol, nsweep, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgsvj1 "F_FUNC(dgsvj1,DGSVJ1)"(char *jobv, int *m, int *n, int *n1, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil
+cdef void dgsvj1(char *jobv, int *m, int *n, int *n1, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil:
+    _fortran_dgsvj1(jobv, m, n, n1, a, lda, d, sva, mv, v, ldv, eps, sfmin, tol, nsweep, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgtcon "F_FUNC(dgtcon,DGTCON)"(char *norm, int *n, d *dl, d *d, d *du, d *du2, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dgtcon(char *norm, int *n, d *dl, d *d, d *du, d *du2, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dgtcon(norm, n, dl, d, du, du2, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgtrfs "F_FUNC(dgtrfs,DGTRFS)"(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dgtrfs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dgtrfs(trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgtsv "F_FUNC(dgtsv,DGTSV)"(int *n, int *nrhs, d *dl, d *d, d *du, d *b, int *ldb, int *info) nogil
+cdef void dgtsv(int *n, int *nrhs, d *dl, d *d, d *du, d *b, int *ldb, int *info) nogil:
+    _fortran_dgtsv(n, nrhs, dl, d, du, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgtsvx "F_FUNC(dgtsvx,DGTSVX)"(char *fact, char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dgtsvx(char *fact, char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dgtsvx(fact, trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgttrf "F_FUNC(dgttrf,DGTTRF)"(int *n, d *dl, d *d, d *du, d *du2, int *ipiv, int *info) nogil
+cdef void dgttrf(int *n, d *dl, d *d, d *du, d *du2, int *ipiv, int *info) nogil:
+    _fortran_dgttrf(n, dl, d, du, du2, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgttrs "F_FUNC(dgttrs,DGTTRS)"(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dgttrs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dgttrs(trans, n, nrhs, dl, d, du, du2, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dgtts2 "F_FUNC(dgtts2,DGTTS2)"(int *itrans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb) nogil
+cdef void dgtts2(int *itrans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb) nogil:
+    _fortran_dgtts2(itrans, n, nrhs, dl, d, du, du2, ipiv, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dhgeqz "F_FUNC(dhgeqz,DHGEQZ)"(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *t, int *ldt, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+cdef void dhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *t, int *ldt, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, d *work, int *lwork, int *info) nogil:
+    _fortran_dhgeqz(job, compq, compz, n, ilo, ihi, h, ldh, t, ldt, alphar, alphai, beta, q, ldq, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dhsein "F_FUNC(dhsein,DHSEIN)"(char *side, char *eigsrc, char *initv, bint *select, int *n, d *h, int *ldh, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *ifaill, int *ifailr, int *info) nogil
+cdef void dhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, d *h, int *ldh, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *ifaill, int *ifailr, int *info) nogil:
+    _fortran_dhsein(side, eigsrc, initv, select, n, h, ldh, wr, wi, vl, ldvl, vr, ldvr, mm, m, work, ifaill, ifailr, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dhseqr "F_FUNC(dhseqr,DHSEQR)"(char *job, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+cdef void dhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, d *z, int *ldz, d *work, int *lwork, int *info) nogil:
+    _fortran_dhseqr(job, compz, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlabad "F_FUNC(dlabad,DLABAD)"(d *small, d *large) nogil
+cdef void dlabad(d *small, d *large) nogil:
+    _fortran_dlabad(small, large)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlabrd "F_FUNC(dlabrd,DLABRD)"(int *m, int *n, int *nb, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *x, int *ldx, d *y, int *ldy) nogil
+cdef void dlabrd(int *m, int *n, int *nb, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *x, int *ldx, d *y, int *ldy) nogil:
+    _fortran_dlabrd(m, n, nb, a, lda, d, e, tauq, taup, x, ldx, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlacn2 "F_FUNC(dlacn2,DLACN2)"(int *n, d *v, d *x, int *isgn, d *est, int *kase, int *isave) nogil
+cdef void dlacn2(int *n, d *v, d *x, int *isgn, d *est, int *kase, int *isave) nogil:
+    _fortran_dlacn2(n, v, x, isgn, est, kase, isave)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlacon "F_FUNC(dlacon,DLACON)"(int *n, d *v, d *x, int *isgn, d *est, int *kase) nogil
+cdef void dlacon(int *n, d *v, d *x, int *isgn, d *est, int *kase) nogil:
+    _fortran_dlacon(n, v, x, isgn, est, kase)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlacpy "F_FUNC(dlacpy,DLACPY)"(char *uplo, int *m, int *n, d *a, int *lda, d *b, int *ldb) nogil
+cdef void dlacpy(char *uplo, int *m, int *n, d *a, int *lda, d *b, int *ldb) nogil:
+    _fortran_dlacpy(uplo, m, n, a, lda, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dladiv "F_FUNC(dladiv,DLADIV)"(d *a, d *b, d *c, d *d, d *p, d *q) nogil
+cdef void dladiv(d *a, d *b, d *c, d *d, d *p, d *q) nogil:
+    _fortran_dladiv(a, b, c, d, p, q)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlae2 "F_FUNC(dlae2,DLAE2)"(d *a, d *b, d *c, d *rt1, d *rt2) nogil
+cdef void dlae2(d *a, d *b, d *c, d *rt1, d *rt2) nogil:
+    _fortran_dlae2(a, b, c, rt1, rt2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaebz "F_FUNC(dlaebz,DLAEBZ)"(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, d *abstol, d *reltol, d *pivmin, d *d, d *e, d *e2, int *nval, d *ab, d *c, int *mout, int *nab, d *work, int *iwork, int *info) nogil
+cdef void dlaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, d *abstol, d *reltol, d *pivmin, d *d, d *e, d *e2, int *nval, d *ab, d *c, int *mout, int *nab, d *work, int *iwork, int *info) nogil:
+    _fortran_dlaebz(ijob, nitmax, n, mmax, minp, nbmin, abstol, reltol, pivmin, d, e, e2, nval, ab, c, mout, nab, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed0 "F_FUNC(dlaed0,DLAED0)"(int *icompq, int *qsiz, int *n, d *d, d *e, d *q, int *ldq, d *qstore, int *ldqs, d *work, int *iwork, int *info) nogil
+cdef void dlaed0(int *icompq, int *qsiz, int *n, d *d, d *e, d *q, int *ldq, d *qstore, int *ldqs, d *work, int *iwork, int *info) nogil:
+    _fortran_dlaed0(icompq, qsiz, n, d, e, q, ldq, qstore, ldqs, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed1 "F_FUNC(dlaed1,DLAED1)"(int *n, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *work, int *iwork, int *info) nogil
+cdef void dlaed1(int *n, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *work, int *iwork, int *info) nogil:
+    _fortran_dlaed1(n, d, q, ldq, indxq, rho, cutpnt, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed2 "F_FUNC(dlaed2,DLAED2)"(int *k, int *n, int *n1, d *d, d *q, int *ldq, int *indxq, d *rho, d *z, d *dlamda, d *w, d *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil
+cdef void dlaed2(int *k, int *n, int *n1, d *d, d *q, int *ldq, int *indxq, d *rho, d *z, d *dlamda, d *w, d *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil:
+    _fortran_dlaed2(k, n, n1, d, q, ldq, indxq, rho, z, dlamda, w, q2, indx, indxc, indxp, coltyp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed3 "F_FUNC(dlaed3,DLAED3)"(int *k, int *n, int *n1, d *d, d *q, int *ldq, d *rho, d *dlamda, d *q2, int *indx, int *ctot, d *w, d *s, int *info) nogil
+cdef void dlaed3(int *k, int *n, int *n1, d *d, d *q, int *ldq, d *rho, d *dlamda, d *q2, int *indx, int *ctot, d *w, d *s, int *info) nogil:
+    _fortran_dlaed3(k, n, n1, d, q, ldq, rho, dlamda, q2, indx, ctot, w, s, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed4 "F_FUNC(dlaed4,DLAED4)"(int *n, int *i, d *d, d *z, d *delta, d *rho, d *dlam, int *info) nogil
+cdef void dlaed4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *dlam, int *info) nogil:
+    _fortran_dlaed4(n, i, d, z, delta, rho, dlam, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed5 "F_FUNC(dlaed5,DLAED5)"(int *i, d *d, d *z, d *delta, d *rho, d *dlam) nogil
+cdef void dlaed5(int *i, d *d, d *z, d *delta, d *rho, d *dlam) nogil:
+    _fortran_dlaed5(i, d, z, delta, rho, dlam)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed6 "F_FUNC(dlaed6,DLAED6)"(int *kniter, bint *orgati, d *rho, d *d, d *z, d *finit, d *tau, int *info) nogil
+cdef void dlaed6(int *kniter, bint *orgati, d *rho, d *d, d *z, d *finit, d *tau, int *info) nogil:
+    _fortran_dlaed6(kniter, orgati, rho, d, z, finit, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed7 "F_FUNC(dlaed7,DLAED7)"(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *work, int *iwork, int *info) nogil
+cdef void dlaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *work, int *iwork, int *info) nogil:
+    _fortran_dlaed7(icompq, n, qsiz, tlvls, curlvl, curpbm, d, q, ldq, indxq, rho, cutpnt, qstore, qptr, prmptr, perm, givptr, givcol, givnum, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed8 "F_FUNC(dlaed8,DLAED8)"(int *icompq, int *k, int *n, int *qsiz, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *z, d *dlamda, d *q2, int *ldq2, d *w, int *perm, int *givptr, int *givcol, d *givnum, int *indxp, int *indx, int *info) nogil
+cdef void dlaed8(int *icompq, int *k, int *n, int *qsiz, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *z, d *dlamda, d *q2, int *ldq2, d *w, int *perm, int *givptr, int *givcol, d *givnum, int *indxp, int *indx, int *info) nogil:
+    _fortran_dlaed8(icompq, k, n, qsiz, d, q, ldq, indxq, rho, cutpnt, z, dlamda, q2, ldq2, w, perm, givptr, givcol, givnum, indxp, indx, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaed9 "F_FUNC(dlaed9,DLAED9)"(int *k, int *kstart, int *kstop, int *n, d *d, d *q, int *ldq, d *rho, d *dlamda, d *w, d *s, int *lds, int *info) nogil
+cdef void dlaed9(int *k, int *kstart, int *kstop, int *n, d *d, d *q, int *ldq, d *rho, d *dlamda, d *w, d *s, int *lds, int *info) nogil:
+    _fortran_dlaed9(k, kstart, kstop, n, d, q, ldq, rho, dlamda, w, s, lds, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaeda "F_FUNC(dlaeda,DLAEDA)"(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *q, int *qptr, d *z, d *ztemp, int *info) nogil
+cdef void dlaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *q, int *qptr, d *z, d *ztemp, int *info) nogil:
+    _fortran_dlaeda(n, tlvls, curlvl, curpbm, prmptr, perm, givptr, givcol, givnum, q, qptr, z, ztemp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaein "F_FUNC(dlaein,DLAEIN)"(bint *rightv, bint *noinit, int *n, d *h, int *ldh, d *wr, d *wi, d *vr, d *vi, d *b, int *ldb, d *work, d *eps3, d *smlnum, d *bignum, int *info) nogil
+cdef void dlaein(bint *rightv, bint *noinit, int *n, d *h, int *ldh, d *wr, d *wi, d *vr, d *vi, d *b, int *ldb, d *work, d *eps3, d *smlnum, d *bignum, int *info) nogil:
+    _fortran_dlaein(rightv, noinit, n, h, ldh, wr, wi, vr, vi, b, ldb, work, eps3, smlnum, bignum, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaev2 "F_FUNC(dlaev2,DLAEV2)"(d *a, d *b, d *c, d *rt1, d *rt2, d *cs1, d *sn1) nogil
+cdef void dlaev2(d *a, d *b, d *c, d *rt1, d *rt2, d *cs1, d *sn1) nogil:
+    _fortran_dlaev2(a, b, c, rt1, rt2, cs1, sn1)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaexc "F_FUNC(dlaexc,DLAEXC)"(bint *wantq, int *n, d *t, int *ldt, d *q, int *ldq, int *j1, int *n1, int *n2, d *work, int *info) nogil
+cdef void dlaexc(bint *wantq, int *n, d *t, int *ldt, d *q, int *ldq, int *j1, int *n1, int *n2, d *work, int *info) nogil:
+    _fortran_dlaexc(wantq, n, t, ldt, q, ldq, j1, n1, n2, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlag2 "F_FUNC(dlag2,DLAG2)"(d *a, int *lda, d *b, int *ldb, d *safmin, d *scale1, d *scale2, d *wr1, d *wr2, d *wi) nogil
+cdef void dlag2(d *a, int *lda, d *b, int *ldb, d *safmin, d *scale1, d *scale2, d *wr1, d *wr2, d *wi) nogil:
+    _fortran_dlag2(a, lda, b, ldb, safmin, scale1, scale2, wr1, wr2, wi)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlag2s "F_FUNC(dlag2s,DLAG2S)"(int *m, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil
+cdef void dlag2s(int *m, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil:
+    _fortran_dlag2s(m, n, a, lda, sa, ldsa, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlags2 "F_FUNC(dlags2,DLAGS2)"(bint *upper, d *a1, d *a2, d *a3, d *b1, d *b2, d *b3, d *csu, d *snu, d *csv, d *snv, d *csq, d *snq) nogil
+cdef void dlags2(bint *upper, d *a1, d *a2, d *a3, d *b1, d *b2, d *b3, d *csu, d *snu, d *csv, d *snv, d *csq, d *snq) nogil:
+    _fortran_dlags2(upper, a1, a2, a3, b1, b2, b3, csu, snu, csv, snv, csq, snq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlagtf "F_FUNC(dlagtf,DLAGTF)"(int *n, d *a, d *lambda_, d *b, d *c, d *tol, d *d, int *in_, int *info) nogil
+cdef void dlagtf(int *n, d *a, d *lambda_, d *b, d *c, d *tol, d *d, int *in_, int *info) nogil:
+    _fortran_dlagtf(n, a, lambda_, b, c, tol, d, in_, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlagtm "F_FUNC(dlagtm,DLAGTM)"(char *trans, int *n, int *nrhs, d *alpha, d *dl, d *d, d *du, d *x, int *ldx, d *beta, d *b, int *ldb) nogil
+cdef void dlagtm(char *trans, int *n, int *nrhs, d *alpha, d *dl, d *d, d *du, d *x, int *ldx, d *beta, d *b, int *ldb) nogil:
+    _fortran_dlagtm(trans, n, nrhs, alpha, dl, d, du, x, ldx, beta, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlagts "F_FUNC(dlagts,DLAGTS)"(int *job, int *n, d *a, d *b, d *c, d *d, int *in_, d *y, d *tol, int *info) nogil
+cdef void dlagts(int *job, int *n, d *a, d *b, d *c, d *d, int *in_, d *y, d *tol, int *info) nogil:
+    _fortran_dlagts(job, n, a, b, c, d, in_, y, tol, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlagv2 "F_FUNC(dlagv2,DLAGV2)"(d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *csl, d *snl, d *csr, d *snr) nogil
+cdef void dlagv2(d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *csl, d *snl, d *csr, d *snr) nogil:
+    _fortran_dlagv2(a, lda, b, ldb, alphar, alphai, beta, csl, snl, csr, snr)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlahqr "F_FUNC(dlahqr,DLAHQR)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, int *info) nogil
+cdef void dlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, int *info) nogil:
+    _fortran_dlahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlahr2 "F_FUNC(dlahr2,DLAHR2)"(int *n, int *k, int *nb, d *a, int *lda, d *tau, d *t, int *ldt, d *y, int *ldy) nogil
+cdef void dlahr2(int *n, int *k, int *nb, d *a, int *lda, d *tau, d *t, int *ldt, d *y, int *ldy) nogil:
+    _fortran_dlahr2(n, k, nb, a, lda, tau, t, ldt, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaic1 "F_FUNC(dlaic1,DLAIC1)"(int *job, int *j, d *x, d *sest, d *w, d *gamma, d *sestpr, d *s, d *c) nogil
+cdef void dlaic1(int *job, int *j, d *x, d *sest, d *w, d *gamma, d *sestpr, d *s, d *c) nogil:
+    _fortran_dlaic1(job, j, x, sest, w, gamma, sestpr, s, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaln2 "F_FUNC(dlaln2,DLALN2)"(bint *ltrans, int *na, int *nw, d *smin, d *ca, d *a, int *lda, d *d1, d *d2, d *b, int *ldb, d *wr, d *wi, d *x, int *ldx, d *scale, d *xnorm, int *info) nogil
+cdef void dlaln2(bint *ltrans, int *na, int *nw, d *smin, d *ca, d *a, int *lda, d *d1, d *d2, d *b, int *ldb, d *wr, d *wi, d *x, int *ldx, d *scale, d *xnorm, int *info) nogil:
+    _fortran_dlaln2(ltrans, na, nw, smin, ca, a, lda, d1, d2, b, ldb, wr, wi, x, ldx, scale, xnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlals0 "F_FUNC(dlals0,DLALS0)"(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *info) nogil
+cdef void dlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *info) nogil:
+    _fortran_dlals0(icompq, nl, nr, sqre, nrhs, b, ldb, bx, ldbx, perm, givptr, givcol, ldgcol, givnum, ldgnum, poles, difl, difr, z, k, c, s, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlalsa "F_FUNC(dlalsa,DLALSA)"(int *icompq, int *smlsiz, int *n, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil
+cdef void dlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil:
+    _fortran_dlalsa(icompq, smlsiz, n, nrhs, b, ldb, bx, ldbx, u, ldu, vt, k, difl, difr, z, poles, givptr, givcol, ldgcol, perm, givnum, c, s, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlalsd "F_FUNC(dlalsd,DLALSD)"(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, d *b, int *ldb, d *rcond, int *rank, d *work, int *iwork, int *info) nogil
+cdef void dlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, d *b, int *ldb, d *rcond, int *rank, d *work, int *iwork, int *info) nogil:
+    _fortran_dlalsd(uplo, smlsiz, n, nrhs, d, e, b, ldb, rcond, rank, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlamrg "F_FUNC(dlamrg,DLAMRG)"(int *n1, int *n2, d *a, int *dtrd1, int *dtrd2, int *index_bn) nogil
+cdef void dlamrg(int *n1, int *n2, d *a, int *dtrd1, int *dtrd2, int *index_bn) nogil:
+    _fortran_dlamrg(n1, n2, a, dtrd1, dtrd2, index_bn)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlanv2 "F_FUNC(dlanv2,DLANV2)"(d *a, d *b, d *c, d *d, d *rt1r, d *rt1i, d *rt2r, d *rt2i, d *cs, d *sn) nogil
+cdef void dlanv2(d *a, d *b, d *c, d *d, d *rt1r, d *rt1i, d *rt2r, d *rt2i, d *cs, d *sn) nogil:
+    _fortran_dlanv2(a, b, c, d, rt1r, rt1i, rt2r, rt2i, cs, sn)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlapll "F_FUNC(dlapll,DLAPLL)"(int *n, d *x, int *incx, d *y, int *incy, d *ssmin) nogil
+cdef void dlapll(int *n, d *x, int *incx, d *y, int *incy, d *ssmin) nogil:
+    _fortran_dlapll(n, x, incx, y, incy, ssmin)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlapmr "F_FUNC(dlapmr,DLAPMR)"(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil
+cdef void dlapmr(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil:
+    _fortran_dlapmr(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlapmt "F_FUNC(dlapmt,DLAPMT)"(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil
+cdef void dlapmt(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil:
+    _fortran_dlapmt(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqgb "F_FUNC(dlaqgb,DLAQGB)"(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+cdef void dlaqgb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil:
+    _fortran_dlaqgb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqge "F_FUNC(dlaqge,DLAQGE)"(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+cdef void dlaqge(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil:
+    _fortran_dlaqge(m, n, a, lda, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqp2 "F_FUNC(dlaqp2,DLAQP2)"(int *m, int *n, int *offset, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *work) nogil
+cdef void dlaqp2(int *m, int *n, int *offset, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *work) nogil:
+    _fortran_dlaqp2(m, n, offset, a, lda, jpvt, tau, vn1, vn2, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqps "F_FUNC(dlaqps,DLAQPS)"(int *m, int *n, int *offset, int *nb, int *kb, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *auxv, d *f, int *ldf) nogil
+cdef void dlaqps(int *m, int *n, int *offset, int *nb, int *kb, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *auxv, d *f, int *ldf) nogil:
+    _fortran_dlaqps(m, n, offset, nb, kb, a, lda, jpvt, tau, vn1, vn2, auxv, f, ldf)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqr0 "F_FUNC(dlaqr0,DLAQR0)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+cdef void dlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil:
+    _fortran_dlaqr0(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqr1 "F_FUNC(dlaqr1,DLAQR1)"(int *n, d *h, int *ldh, d *sr1, d *si1, d *sr2, d *si2, d *v) nogil
+cdef void dlaqr1(int *n, d *h, int *ldh, d *sr1, d *si1, d *sr2, d *si2, d *v) nogil:
+    _fortran_dlaqr1(n, h, ldh, sr1, si1, sr2, si2, v)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqr2 "F_FUNC(dlaqr2,DLAQR2)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil
+cdef void dlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil:
+    _fortran_dlaqr2(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sr, si, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqr3 "F_FUNC(dlaqr3,DLAQR3)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil
+cdef void dlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil:
+    _fortran_dlaqr3(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sr, si, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqr4 "F_FUNC(dlaqr4,DLAQR4)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil
+cdef void dlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil:
+    _fortran_dlaqr4(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqr5 "F_FUNC(dlaqr5,DLAQR5)"(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, d *sr, d *si, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, d *v, int *ldv, d *u, int *ldu, int *nv, d *wv, int *ldwv, int *nh, d *wh, int *ldwh) nogil
+cdef void dlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, d *sr, d *si, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, d *v, int *ldv, d *u, int *ldu, int *nv, d *wv, int *ldwv, int *nh, d *wh, int *ldwh) nogil:
+    _fortran_dlaqr5(wantt, wantz, kacc22, n, ktop, kbot, nshfts, sr, si, h, ldh, iloz, ihiz, z, ldz, v, ldv, u, ldu, nv, wv, ldwv, nh, wh, ldwh)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqsb "F_FUNC(dlaqsb,DLAQSB)"(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+cdef void dlaqsb(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_dlaqsb(uplo, n, kd, ab, ldab, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqsp "F_FUNC(dlaqsp,DLAQSP)"(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, char *equed) nogil
+cdef void dlaqsp(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_dlaqsp(uplo, n, ap, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqsy "F_FUNC(dlaqsy,DLAQSY)"(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+cdef void dlaqsy(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_dlaqsy(uplo, n, a, lda, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaqtr "F_FUNC(dlaqtr,DLAQTR)"(bint *ltran, bint *lreal, int *n, d *t, int *ldt, d *b, d *w, d *scale, d *x, d *work, int *info) nogil
+cdef void dlaqtr(bint *ltran, bint *lreal, int *n, d *t, int *ldt, d *b, d *w, d *scale, d *x, d *work, int *info) nogil:
+    _fortran_dlaqtr(ltran, lreal, n, t, ldt, b, w, scale, x, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlar1v "F_FUNC(dlar1v,DLAR1V)"(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, d *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil
+cdef void dlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, d *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil:
+    _fortran_dlar1v(n, b1, bn, lambda_, d, l, ld, lld, pivmin, gaptol, z, wantnc, negcnt, ztz, mingma, r, isuppz, nrminv, resid, rqcorr, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlar2v "F_FUNC(dlar2v,DLAR2V)"(int *n, d *x, d *y, d *z, int *incx, d *c, d *s, int *incc) nogil
+cdef void dlar2v(int *n, d *x, d *y, d *z, int *incx, d *c, d *s, int *incc) nogil:
+    _fortran_dlar2v(n, x, y, z, incx, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarf "F_FUNC(dlarf,DLARF)"(char *side, int *m, int *n, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil
+cdef void dlarf(char *side, int *m, int *n, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil:
+    _fortran_dlarf(side, m, n, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarfb "F_FUNC(dlarfb,DLARFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil
+cdef void dlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil:
+    _fortran_dlarfb(side, trans, direct, storev, m, n, k, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarfg "F_FUNC(dlarfg,DLARFG)"(int *n, d *alpha, d *x, int *incx, d *tau) nogil
+cdef void dlarfg(int *n, d *alpha, d *x, int *incx, d *tau) nogil:
+    _fortran_dlarfg(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarfgp "F_FUNC(dlarfgp,DLARFGP)"(int *n, d *alpha, d *x, int *incx, d *tau) nogil
+cdef void dlarfgp(int *n, d *alpha, d *x, int *incx, d *tau) nogil:
+    _fortran_dlarfgp(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarft "F_FUNC(dlarft,DLARFT)"(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil
+cdef void dlarft(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil:
+    _fortran_dlarft(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarfx "F_FUNC(dlarfx,DLARFX)"(char *side, int *m, int *n, d *v, d *tau, d *c, int *ldc, d *work) nogil
+cdef void dlarfx(char *side, int *m, int *n, d *v, d *tau, d *c, int *ldc, d *work) nogil:
+    _fortran_dlarfx(side, m, n, v, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlargv "F_FUNC(dlargv,DLARGV)"(int *n, d *x, int *incx, d *y, int *incy, d *c, int *incc) nogil
+cdef void dlargv(int *n, d *x, int *incx, d *y, int *incy, d *c, int *incc) nogil:
+    _fortran_dlargv(n, x, incx, y, incy, c, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarnv "F_FUNC(dlarnv,DLARNV)"(int *idist, int *iseed, int *n, d *x) nogil
+cdef void dlarnv(int *idist, int *iseed, int *n, d *x) nogil:
+    _fortran_dlarnv(idist, iseed, n, x)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarra "F_FUNC(dlarra,DLARRA)"(int *n, d *d, d *e, d *e2, d *spltol, d *tnrm, int *nsplit, int *isplit, int *info) nogil
+cdef void dlarra(int *n, d *d, d *e, d *e2, d *spltol, d *tnrm, int *nsplit, int *isplit, int *info) nogil:
+    _fortran_dlarra(n, d, e, e2, spltol, tnrm, nsplit, isplit, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrb "F_FUNC(dlarrb,DLARRB)"(int *n, d *d, d *lld, int *ifirst, int *ilast, d *rtol1, d *rtol2, int *offset, d *w, d *wgap, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *twist, int *info) nogil
+cdef void dlarrb(int *n, d *d, d *lld, int *ifirst, int *ilast, d *rtol1, d *rtol2, int *offset, d *w, d *wgap, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *twist, int *info) nogil:
+    _fortran_dlarrb(n, d, lld, ifirst, ilast, rtol1, rtol2, offset, w, wgap, werr, work, iwork, pivmin, spdiam, twist, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrc "F_FUNC(dlarrc,DLARRC)"(char *jobt, int *n, d *vl, d *vu, d *d, d *e, d *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil
+cdef void dlarrc(char *jobt, int *n, d *vl, d *vu, d *d, d *e, d *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil:
+    _fortran_dlarrc(jobt, n, vl, vu, d, e, pivmin, eigcnt, lcnt, rcnt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrd "F_FUNC(dlarrd,DLARRD)"(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *gers, d *reltol, d *d, d *e, d *e2, d *pivmin, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wl, d *wu, int *iblock, int *indexw, d *work, int *iwork, int *info) nogil
+cdef void dlarrd(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *gers, d *reltol, d *d, d *e, d *e2, d *pivmin, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wl, d *wu, int *iblock, int *indexw, d *work, int *iwork, int *info) nogil:
+    _fortran_dlarrd(range, order, n, vl, vu, il, iu, gers, reltol, d, e, e2, pivmin, nsplit, isplit, m, w, werr, wl, wu, iblock, indexw, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarre "F_FUNC(dlarre,DLARRE)"(char *range, int *n, d *vl, d *vu, int *il, int *iu, d *d, d *e, d *e2, d *rtol1, d *rtol2, d *spltol, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *pivmin, d *work, int *iwork, int *info) nogil
+cdef void dlarre(char *range, int *n, d *vl, d *vu, int *il, int *iu, d *d, d *e, d *e2, d *rtol1, d *rtol2, d *spltol, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *pivmin, d *work, int *iwork, int *info) nogil:
+    _fortran_dlarre(range, n, vl, vu, il, iu, d, e, e2, rtol1, rtol2, spltol, nsplit, isplit, m, w, werr, wgap, iblock, indexw, gers, pivmin, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrf "F_FUNC(dlarrf,DLARRF)"(int *n, d *d, d *l, d *ld, int *clstrt, int *clend, d *w, d *wgap, d *werr, d *spdiam, d *clgapl, d *clgapr, d *pivmin, d *sigma, d *dplus, d *lplus, d *work, int *info) nogil
+cdef void dlarrf(int *n, d *d, d *l, d *ld, int *clstrt, int *clend, d *w, d *wgap, d *werr, d *spdiam, d *clgapl, d *clgapr, d *pivmin, d *sigma, d *dplus, d *lplus, d *work, int *info) nogil:
+    _fortran_dlarrf(n, d, l, ld, clstrt, clend, w, wgap, werr, spdiam, clgapl, clgapr, pivmin, sigma, dplus, lplus, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrj "F_FUNC(dlarrj,DLARRJ)"(int *n, d *d, d *e2, int *ifirst, int *ilast, d *rtol, int *offset, d *w, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *info) nogil
+cdef void dlarrj(int *n, d *d, d *e2, int *ifirst, int *ilast, d *rtol, int *offset, d *w, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *info) nogil:
+    _fortran_dlarrj(n, d, e2, ifirst, ilast, rtol, offset, w, werr, work, iwork, pivmin, spdiam, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrk "F_FUNC(dlarrk,DLARRK)"(int *n, int *iw, d *gl, d *gu, d *d, d *e2, d *pivmin, d *reltol, d *w, d *werr, int *info) nogil
+cdef void dlarrk(int *n, int *iw, d *gl, d *gu, d *d, d *e2, d *pivmin, d *reltol, d *w, d *werr, int *info) nogil:
+    _fortran_dlarrk(n, iw, gl, gu, d, e2, pivmin, reltol, w, werr, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrr "F_FUNC(dlarrr,DLARRR)"(int *n, d *d, d *e, int *info) nogil
+cdef void dlarrr(int *n, d *d, d *e, int *info) nogil:
+    _fortran_dlarrr(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarrv "F_FUNC(dlarrv,DLARRV)"(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil
+cdef void dlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil:
+    _fortran_dlarrv(n, vl, vu, d, l, pivmin, isplit, m, dol, dou, minrgp, rtol1, rtol2, w, werr, wgap, iblock, indexw, gers, z, ldz, isuppz, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlartg "F_FUNC(dlartg,DLARTG)"(d *f, d *g, d *cs, d *sn, d *r) nogil
+cdef void dlartg(d *f, d *g, d *cs, d *sn, d *r) nogil:
+    _fortran_dlartg(f, g, cs, sn, r)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlartgp "F_FUNC(dlartgp,DLARTGP)"(d *f, d *g, d *cs, d *sn, d *r) nogil
+cdef void dlartgp(d *f, d *g, d *cs, d *sn, d *r) nogil:
+    _fortran_dlartgp(f, g, cs, sn, r)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlartgs "F_FUNC(dlartgs,DLARTGS)"(d *x, d *y, d *sigma, d *cs, d *sn) nogil
+cdef void dlartgs(d *x, d *y, d *sigma, d *cs, d *sn) nogil:
+    _fortran_dlartgs(x, y, sigma, cs, sn)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlartv "F_FUNC(dlartv,DLARTV)"(int *n, d *x, int *incx, d *y, int *incy, d *c, d *s, int *incc) nogil
+cdef void dlartv(int *n, d *x, int *incx, d *y, int *incy, d *c, d *s, int *incc) nogil:
+    _fortran_dlartv(n, x, incx, y, incy, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaruv "F_FUNC(dlaruv,DLARUV)"(int *iseed, int *n, d *x) nogil
+cdef void dlaruv(int *iseed, int *n, d *x) nogil:
+    _fortran_dlaruv(iseed, n, x)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarz "F_FUNC(dlarz,DLARZ)"(char *side, int *m, int *n, int *l, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil
+cdef void dlarz(char *side, int *m, int *n, int *l, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil:
+    _fortran_dlarz(side, m, n, l, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarzb "F_FUNC(dlarzb,DLARZB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil
+cdef void dlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil:
+    _fortran_dlarzb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlarzt "F_FUNC(dlarzt,DLARZT)"(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil
+cdef void dlarzt(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil:
+    _fortran_dlarzt(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlas2 "F_FUNC(dlas2,DLAS2)"(d *f, d *g, d *h, d *ssmin, d *ssmax) nogil
+cdef void dlas2(d *f, d *g, d *h, d *ssmin, d *ssmax) nogil:
+    _fortran_dlas2(f, g, h, ssmin, ssmax)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlascl "F_FUNC(dlascl,DLASCL)"(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, d *a, int *lda, int *info) nogil
+cdef void dlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dlascl(type_bn, kl, ku, cfrom, cto, m, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd0 "F_FUNC(dlasd0,DLASD0)"(int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, int *smlsiz, int *iwork, d *work, int *info) nogil
+cdef void dlasd0(int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, int *smlsiz, int *iwork, d *work, int *info) nogil:
+    _fortran_dlasd0(n, sqre, d, e, u, ldu, vt, ldvt, smlsiz, iwork, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd1 "F_FUNC(dlasd1,DLASD1)"(int *nl, int *nr, int *sqre, d *d, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, int *idxq, int *iwork, d *work, int *info) nogil
+cdef void dlasd1(int *nl, int *nr, int *sqre, d *d, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, int *idxq, int *iwork, d *work, int *info) nogil:
+    _fortran_dlasd1(nl, nr, sqre, d, alpha, beta, u, ldu, vt, ldvt, idxq, iwork, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd2 "F_FUNC(dlasd2,DLASD2)"(int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, d *dsigma, d *u2, int *ldu2, d *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil
+cdef void dlasd2(int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, d *dsigma, d *u2, int *ldu2, d *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil:
+    _fortran_dlasd2(nl, nr, sqre, k, d, z, alpha, beta, u, ldu, vt, ldvt, dsigma, u2, ldu2, vt2, ldvt2, idxp, idx, idxc, idxq, coltyp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd3 "F_FUNC(dlasd3,DLASD3)"(int *nl, int *nr, int *sqre, int *k, d *d, d *q, int *ldq, d *dsigma, d *u, int *ldu, d *u2, int *ldu2, d *vt, int *ldvt, d *vt2, int *ldvt2, int *idxc, int *ctot, d *z, int *info) nogil
+cdef void dlasd3(int *nl, int *nr, int *sqre, int *k, d *d, d *q, int *ldq, d *dsigma, d *u, int *ldu, d *u2, int *ldu2, d *vt, int *ldvt, d *vt2, int *ldvt2, int *idxc, int *ctot, d *z, int *info) nogil:
+    _fortran_dlasd3(nl, nr, sqre, k, d, q, ldq, dsigma, u, ldu, u2, ldu2, vt, ldvt, vt2, ldvt2, idxc, ctot, z, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd4 "F_FUNC(dlasd4,DLASD4)"(int *n, int *i, d *d, d *z, d *delta, d *rho, d *sigma, d *work, int *info) nogil
+cdef void dlasd4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *sigma, d *work, int *info) nogil:
+    _fortran_dlasd4(n, i, d, z, delta, rho, sigma, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd5 "F_FUNC(dlasd5,DLASD5)"(int *i, d *d, d *z, d *delta, d *rho, d *dsigma, d *work) nogil
+cdef void dlasd5(int *i, d *d, d *z, d *delta, d *rho, d *dsigma, d *work) nogil:
+    _fortran_dlasd5(i, d, z, delta, rho, dsigma, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd6 "F_FUNC(dlasd6,DLASD6)"(int *icompq, int *nl, int *nr, int *sqre, d *d, d *vf, d *vl, d *alpha, d *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *iwork, int *info) nogil
+cdef void dlasd6(int *icompq, int *nl, int *nr, int *sqre, d *d, d *vf, d *vl, d *alpha, d *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *iwork, int *info) nogil:
+    _fortran_dlasd6(icompq, nl, nr, sqre, d, vf, vl, alpha, beta, idxq, perm, givptr, givcol, ldgcol, givnum, ldgnum, poles, difl, difr, z, k, c, s, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd7 "F_FUNC(dlasd7,DLASD7)"(int *icompq, int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *zw, d *vf, d *vfw, d *vl, d *vlw, d *alpha, d *beta, d *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *c, d *s, int *info) nogil
+cdef void dlasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *zw, d *vf, d *vfw, d *vl, d *vlw, d *alpha, d *beta, d *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *c, d *s, int *info) nogil:
+    _fortran_dlasd7(icompq, nl, nr, sqre, k, d, z, zw, vf, vfw, vl, vlw, alpha, beta, dsigma, idx, idxp, idxq, perm, givptr, givcol, ldgcol, givnum, ldgnum, c, s, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasd8 "F_FUNC(dlasd8,DLASD8)"(int *icompq, int *k, d *d, d *z, d *vf, d *vl, d *difl, d *difr, int *lddifr, d *dsigma, d *work, int *info) nogil
+cdef void dlasd8(int *icompq, int *k, d *d, d *z, d *vf, d *vl, d *difl, d *difr, int *lddifr, d *dsigma, d *work, int *info) nogil:
+    _fortran_dlasd8(icompq, k, d, z, vf, vl, difl, difr, lddifr, dsigma, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasda "F_FUNC(dlasda,DLASDA)"(int *icompq, int *smlsiz, int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil
+cdef void dlasda(int *icompq, int *smlsiz, int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil:
+    _fortran_dlasda(icompq, smlsiz, n, sqre, d, e, u, ldu, vt, k, difl, difr, z, poles, givptr, givcol, ldgcol, perm, givnum, c, s, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasdq "F_FUNC(dlasdq,DLASDQ)"(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil
+cdef void dlasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dlasdq(uplo, sqre, n, ncvt, nru, ncc, d, e, vt, ldvt, u, ldu, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasdt "F_FUNC(dlasdt,DLASDT)"(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil
+cdef void dlasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil:
+    _fortran_dlasdt(n, lvl, nd, inode, ndiml, ndimr, msub)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaset "F_FUNC(dlaset,DLASET)"(char *uplo, int *m, int *n, d *alpha, d *beta, d *a, int *lda) nogil
+cdef void dlaset(char *uplo, int *m, int *n, d *alpha, d *beta, d *a, int *lda) nogil:
+    _fortran_dlaset(uplo, m, n, alpha, beta, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasq1 "F_FUNC(dlasq1,DLASQ1)"(int *n, d *d, d *e, d *work, int *info) nogil
+cdef void dlasq1(int *n, d *d, d *e, d *work, int *info) nogil:
+    _fortran_dlasq1(n, d, e, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasq2 "F_FUNC(dlasq2,DLASQ2)"(int *n, d *z, int *info) nogil
+cdef void dlasq2(int *n, d *z, int *info) nogil:
+    _fortran_dlasq2(n, z, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasq3 "F_FUNC(dlasq3,DLASQ3)"(int *i0, int *n0, d *z, int *pp, d *dmin, d *sigma, d *desig, d *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *g, d *tau) nogil
+cdef void dlasq3(int *i0, int *n0, d *z, int *pp, d *dmin, d *sigma, d *desig, d *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *g, d *tau) nogil:
+    _fortran_dlasq3(i0, n0, z, pp, dmin, sigma, desig, qmax, nfail, iter, ndiv, ieee, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasq4 "F_FUNC(dlasq4,DLASQ4)"(int *i0, int *n0, d *z, int *pp, int *n0in, d *dmin, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *tau, int *ttype, d *g) nogil
+cdef void dlasq4(int *i0, int *n0, d *z, int *pp, int *n0in, d *dmin, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *tau, int *ttype, d *g) nogil:
+    _fortran_dlasq4(i0, n0, z, pp, n0in, dmin, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasq6 "F_FUNC(dlasq6,DLASQ6)"(int *i0, int *n0, d *z, int *pp, d *dmin, d *dmin1, d *dmin2, d *dn, d *dnm1, d *dnm2) nogil
+cdef void dlasq6(int *i0, int *n0, d *z, int *pp, d *dmin, d *dmin1, d *dmin2, d *dn, d *dnm1, d *dnm2) nogil:
+    _fortran_dlasq6(i0, n0, z, pp, dmin, dmin1, dmin2, dn, dnm1, dnm2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasr "F_FUNC(dlasr,DLASR)"(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, d *a, int *lda) nogil
+cdef void dlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, d *a, int *lda) nogil:
+    _fortran_dlasr(side, pivot, direct, m, n, c, s, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasrt "F_FUNC(dlasrt,DLASRT)"(char *id, int *n, d *d, int *info) nogil
+cdef void dlasrt(char *id, int *n, d *d, int *info) nogil:
+    _fortran_dlasrt(id, n, d, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlassq "F_FUNC(dlassq,DLASSQ)"(int *n, d *x, int *incx, d *scale, d *sumsq) nogil
+cdef void dlassq(int *n, d *x, int *incx, d *scale, d *sumsq) nogil:
+    _fortran_dlassq(n, x, incx, scale, sumsq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasv2 "F_FUNC(dlasv2,DLASV2)"(d *f, d *g, d *h, d *ssmin, d *ssmax, d *snr, d *csr, d *snl, d *csl) nogil
+cdef void dlasv2(d *f, d *g, d *h, d *ssmin, d *ssmax, d *snr, d *csr, d *snl, d *csl) nogil:
+    _fortran_dlasv2(f, g, h, ssmin, ssmax, snr, csr, snl, csl)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlaswp "F_FUNC(dlaswp,DLASWP)"(int *n, d *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+cdef void dlaswp(int *n, d *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil:
+    _fortran_dlaswp(n, a, lda, k1, k2, ipiv, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasy2 "F_FUNC(dlasy2,DLASY2)"(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, d *tl, int *ldtl, d *tr, int *ldtr, d *b, int *ldb, d *scale, d *x, int *ldx, d *xnorm, int *info) nogil
+cdef void dlasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, d *tl, int *ldtl, d *tr, int *ldtr, d *b, int *ldb, d *scale, d *x, int *ldx, d *xnorm, int *info) nogil:
+    _fortran_dlasy2(ltranl, ltranr, isgn, n1, n2, tl, ldtl, tr, ldtr, b, ldb, scale, x, ldx, xnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlasyf "F_FUNC(dlasyf,DLASYF)"(char *uplo, int *n, int *nb, int *kb, d *a, int *lda, int *ipiv, d *w, int *ldw, int *info) nogil
+cdef void dlasyf(char *uplo, int *n, int *nb, int *kb, d *a, int *lda, int *ipiv, d *w, int *ldw, int *info) nogil:
+    _fortran_dlasyf(uplo, n, nb, kb, a, lda, ipiv, w, ldw, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlat2s "F_FUNC(dlat2s,DLAT2S)"(char *uplo, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil
+cdef void dlat2s(char *uplo, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil:
+    _fortran_dlat2s(uplo, n, a, lda, sa, ldsa, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlatbs "F_FUNC(dlatbs,DLATBS)"(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, d *ab, int *ldab, d *x, d *scale, d *cnorm, int *info) nogil
+cdef void dlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, d *ab, int *ldab, d *x, d *scale, d *cnorm, int *info) nogil:
+    _fortran_dlatbs(uplo, trans, diag, normin, n, kd, ab, ldab, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlatdf "F_FUNC(dlatdf,DLATDF)"(int *ijob, int *n, d *z, int *ldz, d *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil
+cdef void dlatdf(int *ijob, int *n, d *z, int *ldz, d *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil:
+    _fortran_dlatdf(ijob, n, z, ldz, rhs, rdsum, rdscal, ipiv, jpiv)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlatps "F_FUNC(dlatps,DLATPS)"(char *uplo, char *trans, char *diag, char *normin, int *n, d *ap, d *x, d *scale, d *cnorm, int *info) nogil
+cdef void dlatps(char *uplo, char *trans, char *diag, char *normin, int *n, d *ap, d *x, d *scale, d *cnorm, int *info) nogil:
+    _fortran_dlatps(uplo, trans, diag, normin, n, ap, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlatrd "F_FUNC(dlatrd,DLATRD)"(char *uplo, int *n, int *nb, d *a, int *lda, d *e, d *tau, d *w, int *ldw) nogil
+cdef void dlatrd(char *uplo, int *n, int *nb, d *a, int *lda, d *e, d *tau, d *w, int *ldw) nogil:
+    _fortran_dlatrd(uplo, n, nb, a, lda, e, tau, w, ldw)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlatrs "F_FUNC(dlatrs,DLATRS)"(char *uplo, char *trans, char *diag, char *normin, int *n, d *a, int *lda, d *x, d *scale, d *cnorm, int *info) nogil
+cdef void dlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, d *a, int *lda, d *x, d *scale, d *cnorm, int *info) nogil:
+    _fortran_dlatrs(uplo, trans, diag, normin, n, a, lda, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlatrz "F_FUNC(dlatrz,DLATRZ)"(int *m, int *n, int *l, d *a, int *lda, d *tau, d *work) nogil
+cdef void dlatrz(int *m, int *n, int *l, d *a, int *lda, d *tau, d *work) nogil:
+    _fortran_dlatrz(m, n, l, a, lda, tau, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlauu2 "F_FUNC(dlauu2,DLAUU2)"(char *uplo, int *n, d *a, int *lda, int *info) nogil
+cdef void dlauu2(char *uplo, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dlauu2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dlauum "F_FUNC(dlauum,DLAUUM)"(char *uplo, int *n, d *a, int *lda, int *info) nogil
+cdef void dlauum(char *uplo, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dlauum(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dopgtr "F_FUNC(dopgtr,DOPGTR)"(char *uplo, int *n, d *ap, d *tau, d *q, int *ldq, d *work, int *info) nogil
+cdef void dopgtr(char *uplo, int *n, d *ap, d *tau, d *q, int *ldq, d *work, int *info) nogil:
+    _fortran_dopgtr(uplo, n, ap, tau, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dopmtr "F_FUNC(dopmtr,DOPMTR)"(char *side, char *uplo, char *trans, int *m, int *n, d *ap, d *tau, d *c, int *ldc, d *work, int *info) nogil
+cdef void dopmtr(char *side, char *uplo, char *trans, int *m, int *n, d *ap, d *tau, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dopmtr(side, uplo, trans, m, n, ap, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorbdb "F_FUNC(dorbdb,DORBDB)"(char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *phi, d *taup1, d *taup2, d *tauq1, d *tauq2, d *work, int *lwork, int *info) nogil
+cdef void dorbdb(char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *phi, d *taup1, d *taup2, d *tauq1, d *tauq2, d *work, int *lwork, int *info) nogil:
+    _fortran_dorbdb(trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, phi, taup1, taup2, tauq1, tauq2, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorcsd "F_FUNC(dorcsd,DORCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dorcsd(jobu1, jobu2, jobv1t, jobv2t, trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorg2l "F_FUNC(dorg2l,DORG2L)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dorg2l(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dorg2l(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorg2r "F_FUNC(dorg2r,DORG2R)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dorg2r(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dorg2r(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorgbr "F_FUNC(dorgbr,DORGBR)"(char *vect, int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dorgbr(char *vect, int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dorgbr(vect, m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorghr "F_FUNC(dorghr,DORGHR)"(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dorghr(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dorghr(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorgl2 "F_FUNC(dorgl2,DORGL2)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dorgl2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dorgl2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorglq "F_FUNC(dorglq,DORGLQ)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dorglq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dorglq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorgql "F_FUNC(dorgql,DORGQL)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dorgql(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dorgql(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorgqr "F_FUNC(dorgqr,DORGQR)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dorgqr(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dorgqr(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorgr2 "F_FUNC(dorgr2,DORGR2)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil
+cdef void dorgr2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil:
+    _fortran_dorgr2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorgrq "F_FUNC(dorgrq,DORGRQ)"(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dorgrq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dorgrq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorgtr "F_FUNC(dorgtr,DORGTR)"(char *uplo, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dorgtr(char *uplo, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dorgtr(uplo, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorm2l "F_FUNC(dorm2l,DORM2L)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+cdef void dorm2l(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dorm2l(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorm2r "F_FUNC(dorm2r,DORM2R)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+cdef void dorm2r(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dorm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormbr "F_FUNC(dormbr,DORMBR)"(char *vect, char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormbr(vect, side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormhr "F_FUNC(dormhr,DORMHR)"(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormhr(side, trans, m, n, ilo, ihi, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dorml2 "F_FUNC(dorml2,DORML2)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+cdef void dorml2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dorml2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormlq "F_FUNC(dormlq,DORMLQ)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormlq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormlq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormql "F_FUNC(dormql,DORMQL)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormql(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormql(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormqr "F_FUNC(dormqr,DORMQR)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormqr(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormr2 "F_FUNC(dormr2,DORMR2)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+cdef void dormr2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dormr2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormr3 "F_FUNC(dormr3,DORMR3)"(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil
+cdef void dormr3(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil:
+    _fortran_dormr3(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormrq "F_FUNC(dormrq,DORMRQ)"(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormrq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormrq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormrz "F_FUNC(dormrz,DORMRZ)"(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormrz(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormrz(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dormtr "F_FUNC(dormtr,DORMTR)"(char *side, char *uplo, char *trans, int *m, int *n, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil
+cdef void dormtr(char *side, char *uplo, char *trans, int *m, int *n, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil:
+    _fortran_dormtr(side, uplo, trans, m, n, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbcon "F_FUNC(dpbcon,DPBCON)"(char *uplo, int *n, int *kd, d *ab, int *ldab, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dpbcon(char *uplo, int *n, int *kd, d *ab, int *ldab, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dpbcon(uplo, n, kd, ab, ldab, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbequ "F_FUNC(dpbequ,DPBEQU)"(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil
+cdef void dpbequ(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_dpbequ(uplo, n, kd, ab, ldab, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbrfs "F_FUNC(dpbrfs,DPBRFS)"(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dpbrfs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dpbrfs(uplo, n, kd, nrhs, ab, ldab, afb, ldafb, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbstf "F_FUNC(dpbstf,DPBSTF)"(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+cdef void dpbstf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil:
+    _fortran_dpbstf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbsv "F_FUNC(dpbsv,DPBSV)"(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+cdef void dpbsv(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil:
+    _fortran_dpbsv(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbsvx "F_FUNC(dpbsvx,DPBSVX)"(char *fact, char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dpbsvx(fact, uplo, n, kd, nrhs, ab, ldab, afb, ldafb, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbtf2 "F_FUNC(dpbtf2,DPBTF2)"(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+cdef void dpbtf2(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil:
+    _fortran_dpbtf2(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbtrf "F_FUNC(dpbtrf,DPBTRF)"(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil
+cdef void dpbtrf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil:
+    _fortran_dpbtrf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpbtrs "F_FUNC(dpbtrs,DPBTRS)"(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+cdef void dpbtrs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil:
+    _fortran_dpbtrs(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpftrf "F_FUNC(dpftrf,DPFTRF)"(char *transr, char *uplo, int *n, d *a, int *info) nogil
+cdef void dpftrf(char *transr, char *uplo, int *n, d *a, int *info) nogil:
+    _fortran_dpftrf(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpftri "F_FUNC(dpftri,DPFTRI)"(char *transr, char *uplo, int *n, d *a, int *info) nogil
+cdef void dpftri(char *transr, char *uplo, int *n, d *a, int *info) nogil:
+    _fortran_dpftri(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpftrs "F_FUNC(dpftrs,DPFTRS)"(char *transr, char *uplo, int *n, int *nrhs, d *a, d *b, int *ldb, int *info) nogil
+cdef void dpftrs(char *transr, char *uplo, int *n, int *nrhs, d *a, d *b, int *ldb, int *info) nogil:
+    _fortran_dpftrs(transr, uplo, n, nrhs, a, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpocon "F_FUNC(dpocon,DPOCON)"(char *uplo, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dpocon(char *uplo, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dpocon(uplo, n, a, lda, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpoequ "F_FUNC(dpoequ,DPOEQU)"(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+cdef void dpoequ(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_dpoequ(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpoequb "F_FUNC(dpoequb,DPOEQUB)"(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+cdef void dpoequb(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_dpoequb(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dporfs "F_FUNC(dporfs,DPORFS)"(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dporfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dporfs(uplo, n, nrhs, a, lda, af, ldaf, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dposv "F_FUNC(dposv,DPOSV)"(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+cdef void dposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil:
+    _fortran_dposv(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dposvx "F_FUNC(dposvx,DPOSVX)"(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dposvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dposvx(fact, uplo, n, nrhs, a, lda, af, ldaf, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpotf2 "F_FUNC(dpotf2,DPOTF2)"(char *uplo, int *n, d *a, int *lda, int *info) nogil
+cdef void dpotf2(char *uplo, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dpotf2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpotrf "F_FUNC(dpotrf,DPOTRF)"(char *uplo, int *n, d *a, int *lda, int *info) nogil
+cdef void dpotrf(char *uplo, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dpotrf(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpotri "F_FUNC(dpotri,DPOTRI)"(char *uplo, int *n, d *a, int *lda, int *info) nogil
+cdef void dpotri(char *uplo, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dpotri(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpotrs "F_FUNC(dpotrs,DPOTRS)"(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+cdef void dpotrs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil:
+    _fortran_dpotrs(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dppcon "F_FUNC(dppcon,DPPCON)"(char *uplo, int *n, d *ap, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dppcon(char *uplo, int *n, d *ap, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dppcon(uplo, n, ap, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dppequ "F_FUNC(dppequ,DPPEQU)"(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, int *info) nogil
+cdef void dppequ(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_dppequ(uplo, n, ap, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpprfs "F_FUNC(dpprfs,DPPRFS)"(char *uplo, int *n, int *nrhs, d *ap, d *afp, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dpprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dpprfs(uplo, n, nrhs, ap, afp, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dppsv "F_FUNC(dppsv,DPPSV)"(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+cdef void dppsv(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil:
+    _fortran_dppsv(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dppsvx "F_FUNC(dppsvx,DPPSVX)"(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dppsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dppsvx(fact, uplo, n, nrhs, ap, afp, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpptrf "F_FUNC(dpptrf,DPPTRF)"(char *uplo, int *n, d *ap, int *info) nogil
+cdef void dpptrf(char *uplo, int *n, d *ap, int *info) nogil:
+    _fortran_dpptrf(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpptri "F_FUNC(dpptri,DPPTRI)"(char *uplo, int *n, d *ap, int *info) nogil
+cdef void dpptri(char *uplo, int *n, d *ap, int *info) nogil:
+    _fortran_dpptri(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpptrs "F_FUNC(dpptrs,DPPTRS)"(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+cdef void dpptrs(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil:
+    _fortran_dpptrs(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpstf2 "F_FUNC(dpstf2,DPSTF2)"(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+cdef void dpstf2(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil:
+    _fortran_dpstf2(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpstrf "F_FUNC(dpstrf,DPSTRF)"(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+cdef void dpstrf(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil:
+    _fortran_dpstrf(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dptcon "F_FUNC(dptcon,DPTCON)"(int *n, d *d, d *e, d *anorm, d *rcond, d *work, int *info) nogil
+cdef void dptcon(int *n, d *d, d *e, d *anorm, d *rcond, d *work, int *info) nogil:
+    _fortran_dptcon(n, d, e, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpteqr "F_FUNC(dpteqr,DPTEQR)"(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+cdef void dpteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil:
+    _fortran_dpteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dptrfs "F_FUNC(dptrfs,DPTRFS)"(int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *info) nogil
+cdef void dptrfs(int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *info) nogil:
+    _fortran_dptrfs(n, nrhs, d, e, df, ef, b, ldb, x, ldx, ferr, berr, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dptsv "F_FUNC(dptsv,DPTSV)"(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil
+cdef void dptsv(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil:
+    _fortran_dptsv(n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dptsvx "F_FUNC(dptsvx,DPTSVX)"(char *fact, int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *info) nogil
+cdef void dptsvx(char *fact, int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *info) nogil:
+    _fortran_dptsvx(fact, n, nrhs, d, e, df, ef, b, ldb, x, ldx, rcond, ferr, berr, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpttrf "F_FUNC(dpttrf,DPTTRF)"(int *n, d *d, d *e, int *info) nogil
+cdef void dpttrf(int *n, d *d, d *e, int *info) nogil:
+    _fortran_dpttrf(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dpttrs "F_FUNC(dpttrs,DPTTRS)"(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil
+cdef void dpttrs(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil:
+    _fortran_dpttrs(n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dptts2 "F_FUNC(dptts2,DPTTS2)"(int *n, int *nrhs, d *d, d *e, d *b, int *ldb) nogil
+cdef void dptts2(int *n, int *nrhs, d *d, d *e, d *b, int *ldb) nogil:
+    _fortran_dptts2(n, nrhs, d, e, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_drscl "F_FUNC(drscl,DRSCL)"(int *n, d *sa, d *sx, int *incx) nogil
+cdef void drscl(int *n, d *sa, d *sx, int *incx) nogil:
+    _fortran_drscl(n, sa, sx, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbev "F_FUNC(dsbev,DSBEV)"(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *info) nogil
+cdef void dsbev(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *info) nogil:
+    _fortran_dsbev(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbevd "F_FUNC(dsbevd,DSBEVD)"(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dsbevd(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dsbevd(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbevx "F_FUNC(dsbevx,DSBEVX)"(char *jobz, char *range, char *uplo, int *n, int *kd, d *ab, int *ldab, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+cdef void dsbevx(char *jobz, char *range, char *uplo, int *n, int *kd, d *ab, int *ldab, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dsbevx(jobz, range, uplo, n, kd, ab, ldab, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbgst "F_FUNC(dsbgst,DSBGST)"(char *vect, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *x, int *ldx, d *work, int *info) nogil
+cdef void dsbgst(char *vect, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *x, int *ldx, d *work, int *info) nogil:
+    _fortran_dsbgst(vect, uplo, n, ka, kb, ab, ldab, bb, ldbb, x, ldx, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbgv "F_FUNC(dsbgv,DSBGV)"(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *info) nogil
+cdef void dsbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *info) nogil:
+    _fortran_dsbgv(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbgvd "F_FUNC(dsbgvd,DSBGVD)"(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dsbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dsbgvd(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbgvx "F_FUNC(dsbgvx,DSBGVX)"(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+cdef void dsbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dsbgvx(jobz, range, uplo, n, ka, kb, ab, ldab, bb, ldbb, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsbtrd "F_FUNC(dsbtrd,DSBTRD)"(char *vect, char *uplo, int *n, int *kd, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *work, int *info) nogil
+cdef void dsbtrd(char *vect, char *uplo, int *n, int *kd, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *work, int *info) nogil:
+    _fortran_dsbtrd(vect, uplo, n, kd, ab, ldab, d, e, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsfrk "F_FUNC(dsfrk,DSFRK)"(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c) nogil
+cdef void dsfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c) nogil:
+    _fortran_dsfrk(transr, uplo, trans, n, k, alpha, a, lda, beta, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsgesv "F_FUNC(dsgesv,DSGESV)"(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil
+cdef void dsgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil:
+    _fortran_dsgesv(n, nrhs, a, lda, ipiv, b, ldb, x, ldx, work, swork, iter, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspcon "F_FUNC(dspcon,DSPCON)"(char *uplo, int *n, d *ap, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dspcon(char *uplo, int *n, d *ap, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dspcon(uplo, n, ap, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspev "F_FUNC(dspev,DSPEV)"(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *info) nogil
+cdef void dspev(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *info) nogil:
+    _fortran_dspev(jobz, uplo, n, ap, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspevd "F_FUNC(dspevd,DSPEVD)"(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dspevd(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dspevd(jobz, uplo, n, ap, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspevx "F_FUNC(dspevx,DSPEVX)"(char *jobz, char *range, char *uplo, int *n, d *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+cdef void dspevx(char *jobz, char *range, char *uplo, int *n, d *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dspevx(jobz, range, uplo, n, ap, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspgst "F_FUNC(dspgst,DSPGST)"(int *itype, char *uplo, int *n, d *ap, d *bp, int *info) nogil
+cdef void dspgst(int *itype, char *uplo, int *n, d *ap, d *bp, int *info) nogil:
+    _fortran_dspgst(itype, uplo, n, ap, bp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspgv "F_FUNC(dspgv,DSPGV)"(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *info) nogil
+cdef void dspgv(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *info) nogil:
+    _fortran_dspgv(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspgvd "F_FUNC(dspgvd,DSPGVD)"(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dspgvd(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dspgvd(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspgvx "F_FUNC(dspgvx,DSPGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, d *ap, d *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+cdef void dspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *ap, d *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dspgvx(itype, jobz, range, uplo, n, ap, bp, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsposv "F_FUNC(dsposv,DSPOSV)"(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil
+cdef void dsposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil:
+    _fortran_dsposv(uplo, n, nrhs, a, lda, b, ldb, x, ldx, work, swork, iter, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsprfs "F_FUNC(dsprfs,DSPRFS)"(char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dsprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dsprfs(uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspsv "F_FUNC(dspsv,DSPSV)"(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dspsv(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dspsv(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dspsvx "F_FUNC(dspsvx,DSPSVX)"(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dspsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dspsvx(fact, uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsptrd "F_FUNC(dsptrd,DSPTRD)"(char *uplo, int *n, d *ap, d *d, d *e, d *tau, int *info) nogil
+cdef void dsptrd(char *uplo, int *n, d *ap, d *d, d *e, d *tau, int *info) nogil:
+    _fortran_dsptrd(uplo, n, ap, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsptrf "F_FUNC(dsptrf,DSPTRF)"(char *uplo, int *n, d *ap, int *ipiv, int *info) nogil
+cdef void dsptrf(char *uplo, int *n, d *ap, int *ipiv, int *info) nogil:
+    _fortran_dsptrf(uplo, n, ap, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsptri "F_FUNC(dsptri,DSPTRI)"(char *uplo, int *n, d *ap, int *ipiv, d *work, int *info) nogil
+cdef void dsptri(char *uplo, int *n, d *ap, int *ipiv, d *work, int *info) nogil:
+    _fortran_dsptri(uplo, n, ap, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsptrs "F_FUNC(dsptrs,DSPTRS)"(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dsptrs(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dsptrs(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstebz "F_FUNC(dstebz,DSTEBZ)"(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *abstol, d *d, d *e, int *m, int *nsplit, d *w, int *iblock, int *isplit, d *work, int *iwork, int *info) nogil
+cdef void dstebz(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *abstol, d *d, d *e, int *m, int *nsplit, d *w, int *iblock, int *isplit, d *work, int *iwork, int *info) nogil:
+    _fortran_dstebz(range, order, n, vl, vu, il, iu, abstol, d, e, m, nsplit, w, iblock, isplit, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstedc "F_FUNC(dstedc,DSTEDC)"(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dstedc(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dstedc(compz, n, d, e, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstegr "F_FUNC(dstegr,DSTEGR)"(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dstegr(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstein "F_FUNC(dstein,DSTEIN)"(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+cdef void dstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dstein(n, d, e, m, w, iblock, isplit, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstemr "F_FUNC(dstemr,DSTEMR)"(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, d *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, d *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dstemr(jobz, range, n, d, e, vl, vu, il, iu, m, w, z, ldz, nzc, isuppz, tryrac, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsteqr "F_FUNC(dsteqr,DSTEQR)"(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+cdef void dsteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil:
+    _fortran_dsteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsterf "F_FUNC(dsterf,DSTERF)"(int *n, d *d, d *e, int *info) nogil
+cdef void dsterf(int *n, d *d, d *e, int *info) nogil:
+    _fortran_dsterf(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstev "F_FUNC(dstev,DSTEV)"(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil
+cdef void dstev(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil:
+    _fortran_dstev(jobz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstevd "F_FUNC(dstevd,DSTEVD)"(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dstevd(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dstevd(jobz, n, d, e, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstevr "F_FUNC(dstevr,DSTEVR)"(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dstevr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dstevr(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dstevx "F_FUNC(dstevx,DSTEVX)"(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+cdef void dstevx(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dstevx(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsycon "F_FUNC(dsycon,DSYCON)"(char *uplo, int *n, d *a, int *lda, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dsycon(char *uplo, int *n, d *a, int *lda, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dsycon(uplo, n, a, lda, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyconv "F_FUNC(dsyconv,DSYCONV)"(char *uplo, char *way, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil
+cdef void dsyconv(char *uplo, char *way, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil:
+    _fortran_dsyconv(uplo, way, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyequb "F_FUNC(dsyequb,DSYEQUB)"(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, d *work, int *info) nogil
+cdef void dsyequb(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, d *work, int *info) nogil:
+    _fortran_dsyequb(uplo, n, a, lda, s, scond, amax, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyev "F_FUNC(dsyev,DSYEV)"(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *info) nogil
+cdef void dsyev(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *info) nogil:
+    _fortran_dsyev(jobz, uplo, n, a, lda, w, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyevd "F_FUNC(dsyevd,DSYEVD)"(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dsyevd(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dsyevd(jobz, uplo, n, a, lda, w, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyevr "F_FUNC(dsyevr,DSYEVR)"(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dsyevr(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dsyevr(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyevx "F_FUNC(dsyevx,DSYEVX)"(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+cdef void dsyevx(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dsyevx(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsygs2 "F_FUNC(dsygs2,DSYGS2)"(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil
+cdef void dsygs2(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil:
+    _fortran_dsygs2(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsygst "F_FUNC(dsygst,DSYGST)"(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil
+cdef void dsygst(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil:
+    _fortran_dsygst(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsygv "F_FUNC(dsygv,DSYGV)"(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *info) nogil
+cdef void dsygv(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *info) nogil:
+    _fortran_dsygv(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsygvd "F_FUNC(dsygvd,DSYGVD)"(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dsygvd(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dsygvd(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsygvx "F_FUNC(dsygvx,DSYGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+cdef void dsygvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_dsygvx(itype, jobz, range, uplo, n, a, lda, b, ldb, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyrfs "F_FUNC(dsyrfs,DSYRFS)"(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dsyrfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dsyrfs(uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsysv "F_FUNC(dsysv,DSYSV)"(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *lwork, int *info) nogil
+cdef void dsysv(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *lwork, int *info) nogil:
+    _fortran_dsysv(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsysvx "F_FUNC(dsysvx,DSYSVX)"(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dsysvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dsysvx(fact, uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsyswapr "F_FUNC(dsyswapr,DSYSWAPR)"(char *uplo, int *n, d *a, int *lda, int *i1, int *i2) nogil
+cdef void dsyswapr(char *uplo, int *n, d *a, int *lda, int *i1, int *i2) nogil:
+    _fortran_dsyswapr(uplo, n, a, lda, i1, i2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytd2 "F_FUNC(dsytd2,DSYTD2)"(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, int *info) nogil
+cdef void dsytd2(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, int *info) nogil:
+    _fortran_dsytd2(uplo, n, a, lda, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytf2 "F_FUNC(dsytf2,DSYTF2)"(char *uplo, int *n, d *a, int *lda, int *ipiv, int *info) nogil
+cdef void dsytf2(char *uplo, int *n, d *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_dsytf2(uplo, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytrd "F_FUNC(dsytrd,DSYTRD)"(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dsytrd(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dsytrd(uplo, n, a, lda, d, e, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytrf "F_FUNC(dsytrf,DSYTRF)"(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+cdef void dsytrf(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil:
+    _fortran_dsytrf(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytri "F_FUNC(dsytri,DSYTRI)"(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil
+cdef void dsytri(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil:
+    _fortran_dsytri(uplo, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytri2 "F_FUNC(dsytri2,DSYTRI2)"(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil
+cdef void dsytri2(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil:
+    _fortran_dsytri2(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytri2x "F_FUNC(dsytri2x,DSYTRI2X)"(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *nb, int *info) nogil
+cdef void dsytri2x(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *nb, int *info) nogil:
+    _fortran_dsytri2x(uplo, n, a, lda, ipiv, work, nb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytrs "F_FUNC(dsytrs,DSYTRS)"(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil
+cdef void dsytrs(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil:
+    _fortran_dsytrs(uplo, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dsytrs2 "F_FUNC(dsytrs2,DSYTRS2)"(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *info) nogil
+cdef void dsytrs2(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *info) nogil:
+    _fortran_dsytrs2(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtbcon "F_FUNC(dtbcon,DTBCON)"(char *norm, char *uplo, char *diag, int *n, int *kd, d *ab, int *ldab, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dtbcon(char *norm, char *uplo, char *diag, int *n, int *kd, d *ab, int *ldab, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dtbcon(norm, uplo, diag, n, kd, ab, ldab, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtbrfs "F_FUNC(dtbrfs,DTBRFS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dtbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dtbrfs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtbtrs "F_FUNC(dtbtrs,DTBTRS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil
+cdef void dtbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil:
+    _fortran_dtbtrs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtfsm "F_FUNC(dtfsm,DTFSM)"(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, d *alpha, d *a, d *b, int *ldb) nogil
+cdef void dtfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, d *alpha, d *a, d *b, int *ldb) nogil:
+    _fortran_dtfsm(transr, side, uplo, trans, diag, m, n, alpha, a, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtftri "F_FUNC(dtftri,DTFTRI)"(char *transr, char *uplo, char *diag, int *n, d *a, int *info) nogil
+cdef void dtftri(char *transr, char *uplo, char *diag, int *n, d *a, int *info) nogil:
+    _fortran_dtftri(transr, uplo, diag, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtfttp "F_FUNC(dtfttp,DTFTTP)"(char *transr, char *uplo, int *n, d *arf, d *ap, int *info) nogil
+cdef void dtfttp(char *transr, char *uplo, int *n, d *arf, d *ap, int *info) nogil:
+    _fortran_dtfttp(transr, uplo, n, arf, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtfttr "F_FUNC(dtfttr,DTFTTR)"(char *transr, char *uplo, int *n, d *arf, d *a, int *lda, int *info) nogil
+cdef void dtfttr(char *transr, char *uplo, int *n, d *arf, d *a, int *lda, int *info) nogil:
+    _fortran_dtfttr(transr, uplo, n, arf, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgevc "F_FUNC(dtgevc,DTGEVC)"(char *side, char *howmny, bint *select, int *n, d *s, int *lds, d *p, int *ldp, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil
+cdef void dtgevc(char *side, char *howmny, bint *select, int *n, d *s, int *lds, d *p, int *ldp, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil:
+    _fortran_dtgevc(side, howmny, select, n, s, lds, p, ldp, vl, ldvl, vr, ldvr, mm, m, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgex2 "F_FUNC(dtgex2,DTGEX2)"(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *j1, int *n1, int *n2, d *work, int *lwork, int *info) nogil
+cdef void dtgex2(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *j1, int *n1, int *n2, d *work, int *lwork, int *info) nogil:
+    _fortran_dtgex2(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, j1, n1, n2, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgexc "F_FUNC(dtgexc,DTGEXC)"(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *ifst, int *ilst, d *work, int *lwork, int *info) nogil
+cdef void dtgexc(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *ifst, int *ilst, d *work, int *lwork, int *info) nogil:
+    _fortran_dtgexc(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, ifst, ilst, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgsen "F_FUNC(dtgsen,DTGSEN)"(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, int *m, d *pl, d *pr, d *dif, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dtgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, int *m, d *pl, d *pr, d *dif, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dtgsen(ijob, wantq, wantz, select, n, a, lda, b, ldb, alphar, alphai, beta, q, ldq, z, ldz, m, pl, pr, dif, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgsja "F_FUNC(dtgsja,DTGSJA)"(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, d *a, int *lda, d *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, d *u, int *ldu, d *v, int *ldv, d *q, int *ldq, d *work, int *ncycle, int *info) nogil
+cdef void dtgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, d *a, int *lda, d *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, d *u, int *ldu, d *v, int *ldv, d *q, int *ldq, d *work, int *ncycle, int *info) nogil:
+    _fortran_dtgsja(jobu, jobv, jobq, m, p, n, k, l, a, lda, b, ldb, tola, tolb, alpha, beta, u, ldu, v, ldv, q, ldq, work, ncycle, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgsna "F_FUNC(dtgsna,DTGSNA)"(char *job, char *howmny, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *dif, int *mm, int *m, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dtgsna(char *job, char *howmny, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *dif, int *mm, int *m, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dtgsna(job, howmny, select, n, a, lda, b, ldb, vl, ldvl, vr, ldvr, s, dif, mm, m, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgsy2 "F_FUNC(dtgsy2,DTGSY2)"(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *iwork, int *pq, int *info) nogil
+cdef void dtgsy2(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *iwork, int *pq, int *info) nogil:
+    _fortran_dtgsy2(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, rdsum, rdscal, iwork, pq, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtgsyl "F_FUNC(dtgsyl,DTGSYL)"(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *dif, d *work, int *lwork, int *iwork, int *info) nogil
+cdef void dtgsyl(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *dif, d *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_dtgsyl(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, dif, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtpcon "F_FUNC(dtpcon,DTPCON)"(char *norm, char *uplo, char *diag, int *n, d *ap, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dtpcon(char *norm, char *uplo, char *diag, int *n, d *ap, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dtpcon(norm, uplo, diag, n, ap, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtpmqrt "F_FUNC(dtpmqrt,DTPMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *info) nogil
+cdef void dtpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *info) nogil:
+    _fortran_dtpmqrt(side, trans, m, n, k, l, nb, v, ldv, t, ldt, a, lda, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtpqrt "F_FUNC(dtpqrt,DTPQRT)"(int *m, int *n, int *l, int *nb, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, d *work, int *info) nogil
+cdef void dtpqrt(int *m, int *n, int *l, int *nb, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, d *work, int *info) nogil:
+    _fortran_dtpqrt(m, n, l, nb, a, lda, b, ldb, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtpqrt2 "F_FUNC(dtpqrt2,DTPQRT2)"(int *m, int *n, int *l, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, int *info) nogil
+cdef void dtpqrt2(int *m, int *n, int *l, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, int *info) nogil:
+    _fortran_dtpqrt2(m, n, l, a, lda, b, ldb, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtprfb "F_FUNC(dtprfb,DTPRFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *ldwork) nogil
+cdef void dtprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *ldwork) nogil:
+    _fortran_dtprfb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, a, lda, b, ldb, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtprfs "F_FUNC(dtprfs,DTPRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dtprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dtprfs(uplo, trans, diag, n, nrhs, ap, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtptri "F_FUNC(dtptri,DTPTRI)"(char *uplo, char *diag, int *n, d *ap, int *info) nogil
+cdef void dtptri(char *uplo, char *diag, int *n, d *ap, int *info) nogil:
+    _fortran_dtptri(uplo, diag, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtptrs "F_FUNC(dtptrs,DTPTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil
+cdef void dtptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil:
+    _fortran_dtptrs(uplo, trans, diag, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtpttf "F_FUNC(dtpttf,DTPTTF)"(char *transr, char *uplo, int *n, d *ap, d *arf, int *info) nogil
+cdef void dtpttf(char *transr, char *uplo, int *n, d *ap, d *arf, int *info) nogil:
+    _fortran_dtpttf(transr, uplo, n, ap, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtpttr "F_FUNC(dtpttr,DTPTTR)"(char *uplo, int *n, d *ap, d *a, int *lda, int *info) nogil
+cdef void dtpttr(char *uplo, int *n, d *ap, d *a, int *lda, int *info) nogil:
+    _fortran_dtpttr(uplo, n, ap, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrcon "F_FUNC(dtrcon,DTRCON)"(char *norm, char *uplo, char *diag, int *n, d *a, int *lda, d *rcond, d *work, int *iwork, int *info) nogil
+cdef void dtrcon(char *norm, char *uplo, char *diag, int *n, d *a, int *lda, d *rcond, d *work, int *iwork, int *info) nogil:
+    _fortran_dtrcon(norm, uplo, diag, n, a, lda, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrevc "F_FUNC(dtrevc,DTREVC)"(char *side, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil
+cdef void dtrevc(char *side, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil:
+    _fortran_dtrevc(side, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, mm, m, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrexc "F_FUNC(dtrexc,DTREXC)"(char *compq, int *n, d *t, int *ldt, d *q, int *ldq, int *ifst, int *ilst, d *work, int *info) nogil
+cdef void dtrexc(char *compq, int *n, d *t, int *ldt, d *q, int *ldq, int *ifst, int *ilst, d *work, int *info) nogil:
+    _fortran_dtrexc(compq, n, t, ldt, q, ldq, ifst, ilst, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrrfs "F_FUNC(dtrrfs,DTRRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil
+cdef void dtrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil:
+    _fortran_dtrrfs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrsen "F_FUNC(dtrsen,DTRSEN)"(char *job, char *compq, bint *select, int *n, d *t, int *ldt, d *q, int *ldq, d *wr, d *wi, int *m, d *s, d *sep, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void dtrsen(char *job, char *compq, bint *select, int *n, d *t, int *ldt, d *q, int *ldq, d *wr, d *wi, int *m, d *s, d *sep, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_dtrsen(job, compq, select, n, t, ldt, q, ldq, wr, wi, m, s, sep, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrsna "F_FUNC(dtrsna,DTRSNA)"(char *job, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *sep, int *mm, int *m, d *work, int *ldwork, int *iwork, int *info) nogil
+cdef void dtrsna(char *job, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *sep, int *mm, int *m, d *work, int *ldwork, int *iwork, int *info) nogil:
+    _fortran_dtrsna(job, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, s, sep, mm, m, work, ldwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrsyl "F_FUNC(dtrsyl,DTRSYL)"(char *trana, char *tranb, int *isgn, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *scale, int *info) nogil
+cdef void dtrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *scale, int *info) nogil:
+    _fortran_dtrsyl(trana, tranb, isgn, m, n, a, lda, b, ldb, c, ldc, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrti2 "F_FUNC(dtrti2,DTRTI2)"(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil
+cdef void dtrti2(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dtrti2(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrtri "F_FUNC(dtrtri,DTRTRI)"(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil
+cdef void dtrtri(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil:
+    _fortran_dtrtri(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrtrs "F_FUNC(dtrtrs,DTRTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil
+cdef void dtrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil:
+    _fortran_dtrtrs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrttf "F_FUNC(dtrttf,DTRTTF)"(char *transr, char *uplo, int *n, d *a, int *lda, d *arf, int *info) nogil
+cdef void dtrttf(char *transr, char *uplo, int *n, d *a, int *lda, d *arf, int *info) nogil:
+    _fortran_dtrttf(transr, uplo, n, a, lda, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtrttp "F_FUNC(dtrttp,DTRTTP)"(char *uplo, int *n, d *a, int *lda, d *ap, int *info) nogil
+cdef void dtrttp(char *uplo, int *n, d *a, int *lda, d *ap, int *info) nogil:
+    _fortran_dtrttp(uplo, n, a, lda, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_dtzrzf "F_FUNC(dtzrzf,DTZRZF)"(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil
+cdef void dtzrzf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil:
+    _fortran_dtzrzf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ilaver "F_FUNC(ilaver,ILAVER)"(int *vers_major, int *vers_minor, int *vers_patch) nogil
+cdef void ilaver(int *vers_major, int *vers_minor, int *vers_patch) nogil:
+    _fortran_ilaver(vers_major, vers_minor, vers_patch)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sbbcsd "F_FUNC(sbbcsd,SBBCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *work, int *lwork, int *info) nogil
+cdef void sbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *work, int *lwork, int *info) nogil:
+    _fortran_sbbcsd(jobu1, jobu2, jobv1t, jobv2t, trans, m, p, q, theta, phi, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, b11d, b11e, b12d, b12e, b21d, b21e, b22d, b22e, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sbdsdc "F_FUNC(sbdsdc,SBDSDC)"(char *uplo, char *compq, int *n, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, s *q, int *iq, s *work, int *iwork, int *info) nogil
+cdef void sbdsdc(char *uplo, char *compq, int *n, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, s *q, int *iq, s *work, int *iwork, int *info) nogil:
+    _fortran_sbdsdc(uplo, compq, n, d, e, u, ldu, vt, ldvt, q, iq, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sbdsqr "F_FUNC(sbdsqr,SBDSQR)"(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil
+cdef void sbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sbdsqr(uplo, n, ncvt, nru, ncc, d, e, vt, ldvt, u, ldu, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sdisna "F_FUNC(sdisna,SDISNA)"(char *job, int *m, int *n, s *d, s *sep, int *info) nogil
+cdef void sdisna(char *job, int *m, int *n, s *d, s *sep, int *info) nogil:
+    _fortran_sdisna(job, m, n, d, sep, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbbrd "F_FUNC(sgbbrd,SGBBRD)"(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *pt, int *ldpt, s *c, int *ldc, s *work, int *info) nogil
+cdef void sgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *pt, int *ldpt, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sgbbrd(vect, m, n, ncc, kl, ku, ab, ldab, d, e, q, ldq, pt, ldpt, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbcon "F_FUNC(sgbcon,SGBCON)"(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void sgbcon(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_sgbcon(norm, n, kl, ku, ab, ldab, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbequ "F_FUNC(sgbequ,SGBEQU)"(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void sgbequ(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_sgbequ(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbequb "F_FUNC(sgbequb,SGBEQUB)"(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void sgbequb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_sgbequb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbrfs "F_FUNC(sgbrfs,SGBRFS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sgbrfs(trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbsv "F_FUNC(sgbsv,SGBSV)"(int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void sgbsv(int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_sgbsv(n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbsvx "F_FUNC(sgbsvx,SGBSVX)"(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sgbsvx(fact, trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbtf2 "F_FUNC(sgbtf2,SGBTF2)"(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void sgbtf2(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_sgbtf2(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbtrf "F_FUNC(sgbtrf,SGBTRF)"(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void sgbtrf(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_sgbtrf(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgbtrs "F_FUNC(sgbtrs,SGBTRS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void sgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_sgbtrs(trans, n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgebak "F_FUNC(sgebak,SGEBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, s *v, int *ldv, int *info) nogil
+cdef void sgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, s *v, int *ldv, int *info) nogil:
+    _fortran_sgebak(job, side, n, ilo, ihi, scale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgebal "F_FUNC(sgebal,SGEBAL)"(char *job, int *n, s *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil
+cdef void sgebal(char *job, int *n, s *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil:
+    _fortran_sgebal(job, n, a, lda, ilo, ihi, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgebd2 "F_FUNC(sgebd2,SGEBD2)"(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *info) nogil
+cdef void sgebd2(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *info) nogil:
+    _fortran_sgebd2(m, n, a, lda, d, e, tauq, taup, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgebrd "F_FUNC(sgebrd,SGEBRD)"(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *lwork, int *info) nogil
+cdef void sgebrd(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *lwork, int *info) nogil:
+    _fortran_sgebrd(m, n, a, lda, d, e, tauq, taup, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgecon "F_FUNC(sgecon,SGECON)"(char *norm, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void sgecon(char *norm, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_sgecon(norm, n, a, lda, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeequ "F_FUNC(sgeequ,SGEEQU)"(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void sgeequ(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_sgeequ(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeequb "F_FUNC(sgeequb,SGEEQUB)"(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil
+cdef void sgeequb(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil:
+    _fortran_sgeequb(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgees "F_FUNC(sgees,SGEES)"(char *jobvs, char *sort, _sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) nogil
+cdef void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) nogil:
+    _fortran_sgees(jobvs, sort, <_sselect2*>select, n, a, lda, sdim, wr, wi, vs, ldvs, work, lwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeesx "F_FUNC(sgeesx,SGEESX)"(char *jobvs, char *sort, _sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+cdef void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil:
+    _fortran_sgeesx(jobvs, sort, <_sselect2*>select, sense, n, a, lda, sdim, wr, wi, vs, ldvs, rconde, rcondv, work, lwork, iwork, liwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeev "F_FUNC(sgeev,SGEEV)"(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil
+cdef void sgeev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil:
+    _fortran_sgeev(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeevx "F_FUNC(sgeevx,SGEEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void sgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_sgeevx(balanc, jobvl, jobvr, sense, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, ilo, ihi, scale, abnrm, rconde, rcondv, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgehd2 "F_FUNC(sgehd2,SGEHD2)"(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sgehd2(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sgehd2(n, ilo, ihi, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgehrd "F_FUNC(sgehrd,SGEHRD)"(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sgehrd(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgejsv "F_FUNC(sgejsv,SGEJSV)"(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, s *a, int *lda, s *sva, s *u, int *ldu, s *v, int *ldv, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void sgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, s *a, int *lda, s *sva, s *u, int *ldu, s *v, int *ldv, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_sgejsv(joba, jobu, jobv, jobr, jobt, jobp, m, n, a, lda, sva, u, ldu, v, ldv, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgelq2 "F_FUNC(sgelq2,SGELQ2)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sgelq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sgelq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgelqf "F_FUNC(sgelqf,SGELQF)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sgelqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sgelqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgels "F_FUNC(sgels,SGELS)"(char *trans, int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *work, int *lwork, int *info) nogil
+cdef void sgels(char *trans, int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *work, int *lwork, int *info) nogil:
+    _fortran_sgels(trans, m, n, nrhs, a, lda, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgelsd "F_FUNC(sgelsd,SGELSD)"(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void sgelsd(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_sgelsd(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgelss "F_FUNC(sgelss,SGELSS)"(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *info) nogil
+cdef void sgelss(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *info) nogil:
+    _fortran_sgelss(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgelsy "F_FUNC(sgelsy,SGELSY)"(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *jpvt, s *rcond, int *rank, s *work, int *lwork, int *info) nogil
+cdef void sgelsy(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *jpvt, s *rcond, int *rank, s *work, int *lwork, int *info) nogil:
+    _fortran_sgelsy(m, n, nrhs, a, lda, b, ldb, jpvt, rcond, rank, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgemqrt "F_FUNC(sgemqrt,SGEMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *nb, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *info) nogil
+cdef void sgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sgemqrt(side, trans, m, n, k, nb, v, ldv, t, ldt, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeql2 "F_FUNC(sgeql2,SGEQL2)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sgeql2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sgeql2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqlf "F_FUNC(sgeqlf,SGEQLF)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sgeqlf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sgeqlf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqp3 "F_FUNC(sgeqp3,SGEQP3)"(int *m, int *n, s *a, int *lda, int *jpvt, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sgeqp3(int *m, int *n, s *a, int *lda, int *jpvt, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sgeqp3(m, n, a, lda, jpvt, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqr2 "F_FUNC(sgeqr2,SGEQR2)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sgeqr2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sgeqr2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqr2p "F_FUNC(sgeqr2p,SGEQR2P)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sgeqr2p(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sgeqr2p(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqrf "F_FUNC(sgeqrf,SGEQRF)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sgeqrf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sgeqrf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqrfp "F_FUNC(sgeqrfp,SGEQRFP)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sgeqrfp(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sgeqrfp(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqrt "F_FUNC(sgeqrt,SGEQRT)"(int *m, int *n, int *nb, s *a, int *lda, s *t, int *ldt, s *work, int *info) nogil
+cdef void sgeqrt(int *m, int *n, int *nb, s *a, int *lda, s *t, int *ldt, s *work, int *info) nogil:
+    _fortran_sgeqrt(m, n, nb, a, lda, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqrt2 "F_FUNC(sgeqrt2,SGEQRT2)"(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil
+cdef void sgeqrt2(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil:
+    _fortran_sgeqrt2(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgeqrt3 "F_FUNC(sgeqrt3,SGEQRT3)"(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil
+cdef void sgeqrt3(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil:
+    _fortran_sgeqrt3(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgerfs "F_FUNC(sgerfs,SGERFS)"(char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sgerfs(char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sgerfs(trans, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgerq2 "F_FUNC(sgerq2,SGERQ2)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sgerq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sgerq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgerqf "F_FUNC(sgerqf,SGERQF)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sgerqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sgerqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgesc2 "F_FUNC(sgesc2,SGESC2)"(int *n, s *a, int *lda, s *rhs, int *ipiv, int *jpiv, s *scale) nogil
+cdef void sgesc2(int *n, s *a, int *lda, s *rhs, int *ipiv, int *jpiv, s *scale) nogil:
+    _fortran_sgesc2(n, a, lda, rhs, ipiv, jpiv, scale)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgesdd "F_FUNC(sgesdd,SGESDD)"(char *jobz, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void sgesdd(char *jobz, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_sgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgesv "F_FUNC(sgesv,SGESV)"(int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void sgesv(int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_sgesv(n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgesvd "F_FUNC(sgesvd,SGESVD)"(char *jobu, char *jobvt, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *info) nogil
+cdef void sgesvd(char *jobu, char *jobvt, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *info) nogil:
+    _fortran_sgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgesvj "F_FUNC(sgesvj,SGESVJ)"(char *joba, char *jobu, char *jobv, int *m, int *n, s *a, int *lda, s *sva, int *mv, s *v, int *ldv, s *work, int *lwork, int *info) nogil
+cdef void sgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, s *a, int *lda, s *sva, int *mv, s *v, int *ldv, s *work, int *lwork, int *info) nogil:
+    _fortran_sgesvj(joba, jobu, jobv, m, n, a, lda, sva, mv, v, ldv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgesvx "F_FUNC(sgesvx,SGESVX)"(char *fact, char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sgesvx(char *fact, char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sgesvx(fact, trans, n, nrhs, a, lda, af, ldaf, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgetc2 "F_FUNC(sgetc2,SGETC2)"(int *n, s *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+cdef void sgetc2(int *n, s *a, int *lda, int *ipiv, int *jpiv, int *info) nogil:
+    _fortran_sgetc2(n, a, lda, ipiv, jpiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgetf2 "F_FUNC(sgetf2,SGETF2)"(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+cdef void sgetf2(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_sgetf2(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgetrf "F_FUNC(sgetrf,SGETRF)"(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+cdef void sgetrf(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_sgetrf(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgetri "F_FUNC(sgetri,SGETRI)"(int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+cdef void sgetri(int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil:
+    _fortran_sgetri(n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgetrs "F_FUNC(sgetrs,SGETRS)"(char *trans, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void sgetrs(char *trans, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_sgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggbak "F_FUNC(sggbak,SGGBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, s *v, int *ldv, int *info) nogil
+cdef void sggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, s *v, int *ldv, int *info) nogil:
+    _fortran_sggbak(job, side, n, ilo, ihi, lscale, rscale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggbal "F_FUNC(sggbal,SGGBAL)"(char *job, int *n, s *a, int *lda, s *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil
+cdef void sggbal(char *job, int *n, s *a, int *lda, s *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil:
+    _fortran_sggbal(job, n, a, lda, b, ldb, ilo, ihi, lscale, rscale, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgges "F_FUNC(sgges,SGGES)"(char *jobvsl, char *jobvsr, char *sort, _sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) nogil
+cdef void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) nogil:
+    _fortran_sgges(jobvsl, jobvsr, sort, <_sselect3*>selctg, n, a, lda, b, ldb, sdim, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, work, lwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggesx "F_FUNC(sggesx,SGGESX)"(char *jobvsl, char *jobvsr, char *sort, _sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+cdef void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil:
+    _fortran_sggesx(jobvsl, jobvsr, sort, <_sselect3*>selctg, sense, n, a, lda, b, ldb, sdim, alphar, alphai, beta, vsl, ldvsl, vsr, ldvsr, rconde, rcondv, work, lwork, iwork, liwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggev "F_FUNC(sggev,SGGEV)"(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil
+cdef void sggev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil:
+    _fortran_sggev(jobvl, jobvr, n, a, lda, b, ldb, alphar, alphai, beta, vl, ldvl, vr, ldvr, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggevx "F_FUNC(sggevx,SGGEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, bint *bwork, int *info) nogil
+cdef void sggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, bint *bwork, int *info) nogil:
+    _fortran_sggevx(balanc, jobvl, jobvr, sense, n, a, lda, b, ldb, alphar, alphai, beta, vl, ldvl, vr, ldvr, ilo, ihi, lscale, rscale, abnrm, bbnrm, rconde, rcondv, work, lwork, iwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggglm "F_FUNC(sggglm,SGGGLM)"(int *n, int *m, int *p, s *a, int *lda, s *b, int *ldb, s *d, s *x, s *y, s *work, int *lwork, int *info) nogil
+cdef void sggglm(int *n, int *m, int *p, s *a, int *lda, s *b, int *ldb, s *d, s *x, s *y, s *work, int *lwork, int *info) nogil:
+    _fortran_sggglm(n, m, p, a, lda, b, ldb, d, x, y, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgghrd "F_FUNC(sgghrd,SGGHRD)"(char *compq, char *compz, int *n, int *ilo, int *ihi, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *info) nogil
+cdef void sgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *info) nogil:
+    _fortran_sgghrd(compq, compz, n, ilo, ihi, a, lda, b, ldb, q, ldq, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgglse "F_FUNC(sgglse,SGGLSE)"(int *m, int *n, int *p, s *a, int *lda, s *b, int *ldb, s *c, s *d, s *x, s *work, int *lwork, int *info) nogil
+cdef void sgglse(int *m, int *n, int *p, s *a, int *lda, s *b, int *ldb, s *c, s *d, s *x, s *work, int *lwork, int *info) nogil:
+    _fortran_sgglse(m, n, p, a, lda, b, ldb, c, d, x, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggqrf "F_FUNC(sggqrf,SGGQRF)"(int *n, int *m, int *p, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil
+cdef void sggqrf(int *n, int *m, int *p, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil:
+    _fortran_sggqrf(n, m, p, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sggrqf "F_FUNC(sggrqf,SGGRQF)"(int *m, int *p, int *n, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil
+cdef void sggrqf(int *m, int *p, int *n, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil:
+    _fortran_sggrqf(m, p, n, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgsvj0 "F_FUNC(sgsvj0,SGSVJ0)"(char *jobv, int *m, int *n, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil
+cdef void sgsvj0(char *jobv, int *m, int *n, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil:
+    _fortran_sgsvj0(jobv, m, n, a, lda, d, sva, mv, v, ldv, eps, sfmin, tol, nsweep, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgsvj1 "F_FUNC(sgsvj1,SGSVJ1)"(char *jobv, int *m, int *n, int *n1, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil
+cdef void sgsvj1(char *jobv, int *m, int *n, int *n1, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil:
+    _fortran_sgsvj1(jobv, m, n, n1, a, lda, d, sva, mv, v, ldv, eps, sfmin, tol, nsweep, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgtcon "F_FUNC(sgtcon,SGTCON)"(char *norm, int *n, s *dl, s *d, s *du, s *du2, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void sgtcon(char *norm, int *n, s *dl, s *d, s *du, s *du2, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_sgtcon(norm, n, dl, d, du, du2, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgtrfs "F_FUNC(sgtrfs,SGTRFS)"(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sgtrfs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sgtrfs(trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgtsv "F_FUNC(sgtsv,SGTSV)"(int *n, int *nrhs, s *dl, s *d, s *du, s *b, int *ldb, int *info) nogil
+cdef void sgtsv(int *n, int *nrhs, s *dl, s *d, s *du, s *b, int *ldb, int *info) nogil:
+    _fortran_sgtsv(n, nrhs, dl, d, du, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgtsvx "F_FUNC(sgtsvx,SGTSVX)"(char *fact, char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sgtsvx(char *fact, char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sgtsvx(fact, trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgttrf "F_FUNC(sgttrf,SGTTRF)"(int *n, s *dl, s *d, s *du, s *du2, int *ipiv, int *info) nogil
+cdef void sgttrf(int *n, s *dl, s *d, s *du, s *du2, int *ipiv, int *info) nogil:
+    _fortran_sgttrf(n, dl, d, du, du2, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgttrs "F_FUNC(sgttrs,SGTTRS)"(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void sgttrs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_sgttrs(trans, n, nrhs, dl, d, du, du2, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sgtts2 "F_FUNC(sgtts2,SGTTS2)"(int *itrans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb) nogil
+cdef void sgtts2(int *itrans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb) nogil:
+    _fortran_sgtts2(itrans, n, nrhs, dl, d, du, du2, ipiv, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_shgeqz "F_FUNC(shgeqz,SHGEQZ)"(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *t, int *ldt, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+cdef void shgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *t, int *ldt, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, s *work, int *lwork, int *info) nogil:
+    _fortran_shgeqz(job, compq, compz, n, ilo, ihi, h, ldh, t, ldt, alphar, alphai, beta, q, ldq, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_shsein "F_FUNC(shsein,SHSEIN)"(char *side, char *eigsrc, char *initv, bint *select, int *n, s *h, int *ldh, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *ifaill, int *ifailr, int *info) nogil
+cdef void shsein(char *side, char *eigsrc, char *initv, bint *select, int *n, s *h, int *ldh, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *ifaill, int *ifailr, int *info) nogil:
+    _fortran_shsein(side, eigsrc, initv, select, n, h, ldh, wr, wi, vl, ldvl, vr, ldvr, mm, m, work, ifaill, ifailr, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_shseqr "F_FUNC(shseqr,SHSEQR)"(char *job, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+cdef void shseqr(char *job, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, s *z, int *ldz, s *work, int *lwork, int *info) nogil:
+    _fortran_shseqr(job, compz, n, ilo, ihi, h, ldh, wr, wi, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slabad "F_FUNC(slabad,SLABAD)"(s *small, s *large) nogil
+cdef void slabad(s *small, s *large) nogil:
+    _fortran_slabad(small, large)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slabrd "F_FUNC(slabrd,SLABRD)"(int *m, int *n, int *nb, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *x, int *ldx, s *y, int *ldy) nogil
+cdef void slabrd(int *m, int *n, int *nb, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *x, int *ldx, s *y, int *ldy) nogil:
+    _fortran_slabrd(m, n, nb, a, lda, d, e, tauq, taup, x, ldx, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slacn2 "F_FUNC(slacn2,SLACN2)"(int *n, s *v, s *x, int *isgn, s *est, int *kase, int *isave) nogil
+cdef void slacn2(int *n, s *v, s *x, int *isgn, s *est, int *kase, int *isave) nogil:
+    _fortran_slacn2(n, v, x, isgn, est, kase, isave)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slacon "F_FUNC(slacon,SLACON)"(int *n, s *v, s *x, int *isgn, s *est, int *kase) nogil
+cdef void slacon(int *n, s *v, s *x, int *isgn, s *est, int *kase) nogil:
+    _fortran_slacon(n, v, x, isgn, est, kase)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slacpy "F_FUNC(slacpy,SLACPY)"(char *uplo, int *m, int *n, s *a, int *lda, s *b, int *ldb) nogil
+cdef void slacpy(char *uplo, int *m, int *n, s *a, int *lda, s *b, int *ldb) nogil:
+    _fortran_slacpy(uplo, m, n, a, lda, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sladiv "F_FUNC(sladiv,SLADIV)"(s *a, s *b, s *c, s *d, s *p, s *q) nogil
+cdef void sladiv(s *a, s *b, s *c, s *d, s *p, s *q) nogil:
+    _fortran_sladiv(a, b, c, d, p, q)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slae2 "F_FUNC(slae2,SLAE2)"(s *a, s *b, s *c, s *rt1, s *rt2) nogil
+cdef void slae2(s *a, s *b, s *c, s *rt1, s *rt2) nogil:
+    _fortran_slae2(a, b, c, rt1, rt2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaebz "F_FUNC(slaebz,SLAEBZ)"(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, s *abstol, s *reltol, s *pivmin, s *d, s *e, s *e2, int *nval, s *ab, s *c, int *mout, int *nab, s *work, int *iwork, int *info) nogil
+cdef void slaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, s *abstol, s *reltol, s *pivmin, s *d, s *e, s *e2, int *nval, s *ab, s *c, int *mout, int *nab, s *work, int *iwork, int *info) nogil:
+    _fortran_slaebz(ijob, nitmax, n, mmax, minp, nbmin, abstol, reltol, pivmin, d, e, e2, nval, ab, c, mout, nab, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed0 "F_FUNC(slaed0,SLAED0)"(int *icompq, int *qsiz, int *n, s *d, s *e, s *q, int *ldq, s *qstore, int *ldqs, s *work, int *iwork, int *info) nogil
+cdef void slaed0(int *icompq, int *qsiz, int *n, s *d, s *e, s *q, int *ldq, s *qstore, int *ldqs, s *work, int *iwork, int *info) nogil:
+    _fortran_slaed0(icompq, qsiz, n, d, e, q, ldq, qstore, ldqs, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed1 "F_FUNC(slaed1,SLAED1)"(int *n, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *work, int *iwork, int *info) nogil
+cdef void slaed1(int *n, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *work, int *iwork, int *info) nogil:
+    _fortran_slaed1(n, d, q, ldq, indxq, rho, cutpnt, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed2 "F_FUNC(slaed2,SLAED2)"(int *k, int *n, int *n1, s *d, s *q, int *ldq, int *indxq, s *rho, s *z, s *dlamda, s *w, s *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil
+cdef void slaed2(int *k, int *n, int *n1, s *d, s *q, int *ldq, int *indxq, s *rho, s *z, s *dlamda, s *w, s *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil:
+    _fortran_slaed2(k, n, n1, d, q, ldq, indxq, rho, z, dlamda, w, q2, indx, indxc, indxp, coltyp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed3 "F_FUNC(slaed3,SLAED3)"(int *k, int *n, int *n1, s *d, s *q, int *ldq, s *rho, s *dlamda, s *q2, int *indx, int *ctot, s *w, s *s, int *info) nogil
+cdef void slaed3(int *k, int *n, int *n1, s *d, s *q, int *ldq, s *rho, s *dlamda, s *q2, int *indx, int *ctot, s *w, s *s, int *info) nogil:
+    _fortran_slaed3(k, n, n1, d, q, ldq, rho, dlamda, q2, indx, ctot, w, s, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed4 "F_FUNC(slaed4,SLAED4)"(int *n, int *i, s *d, s *z, s *delta, s *rho, s *dlam, int *info) nogil
+cdef void slaed4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *dlam, int *info) nogil:
+    _fortran_slaed4(n, i, d, z, delta, rho, dlam, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed5 "F_FUNC(slaed5,SLAED5)"(int *i, s *d, s *z, s *delta, s *rho, s *dlam) nogil
+cdef void slaed5(int *i, s *d, s *z, s *delta, s *rho, s *dlam) nogil:
+    _fortran_slaed5(i, d, z, delta, rho, dlam)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed6 "F_FUNC(slaed6,SLAED6)"(int *kniter, bint *orgati, s *rho, s *d, s *z, s *finit, s *tau, int *info) nogil
+cdef void slaed6(int *kniter, bint *orgati, s *rho, s *d, s *z, s *finit, s *tau, int *info) nogil:
+    _fortran_slaed6(kniter, orgati, rho, d, z, finit, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed7 "F_FUNC(slaed7,SLAED7)"(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *work, int *iwork, int *info) nogil
+cdef void slaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *work, int *iwork, int *info) nogil:
+    _fortran_slaed7(icompq, n, qsiz, tlvls, curlvl, curpbm, d, q, ldq, indxq, rho, cutpnt, qstore, qptr, prmptr, perm, givptr, givcol, givnum, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed8 "F_FUNC(slaed8,SLAED8)"(int *icompq, int *k, int *n, int *qsiz, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *z, s *dlamda, s *q2, int *ldq2, s *w, int *perm, int *givptr, int *givcol, s *givnum, int *indxp, int *indx, int *info) nogil
+cdef void slaed8(int *icompq, int *k, int *n, int *qsiz, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *z, s *dlamda, s *q2, int *ldq2, s *w, int *perm, int *givptr, int *givcol, s *givnum, int *indxp, int *indx, int *info) nogil:
+    _fortran_slaed8(icompq, k, n, qsiz, d, q, ldq, indxq, rho, cutpnt, z, dlamda, q2, ldq2, w, perm, givptr, givcol, givnum, indxp, indx, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaed9 "F_FUNC(slaed9,SLAED9)"(int *k, int *kstart, int *kstop, int *n, s *d, s *q, int *ldq, s *rho, s *dlamda, s *w, s *s, int *lds, int *info) nogil
+cdef void slaed9(int *k, int *kstart, int *kstop, int *n, s *d, s *q, int *ldq, s *rho, s *dlamda, s *w, s *s, int *lds, int *info) nogil:
+    _fortran_slaed9(k, kstart, kstop, n, d, q, ldq, rho, dlamda, w, s, lds, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaeda "F_FUNC(slaeda,SLAEDA)"(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *q, int *qptr, s *z, s *ztemp, int *info) nogil
+cdef void slaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *q, int *qptr, s *z, s *ztemp, int *info) nogil:
+    _fortran_slaeda(n, tlvls, curlvl, curpbm, prmptr, perm, givptr, givcol, givnum, q, qptr, z, ztemp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaein "F_FUNC(slaein,SLAEIN)"(bint *rightv, bint *noinit, int *n, s *h, int *ldh, s *wr, s *wi, s *vr, s *vi, s *b, int *ldb, s *work, s *eps3, s *smlnum, s *bignum, int *info) nogil
+cdef void slaein(bint *rightv, bint *noinit, int *n, s *h, int *ldh, s *wr, s *wi, s *vr, s *vi, s *b, int *ldb, s *work, s *eps3, s *smlnum, s *bignum, int *info) nogil:
+    _fortran_slaein(rightv, noinit, n, h, ldh, wr, wi, vr, vi, b, ldb, work, eps3, smlnum, bignum, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaev2 "F_FUNC(slaev2,SLAEV2)"(s *a, s *b, s *c, s *rt1, s *rt2, s *cs1, s *sn1) nogil
+cdef void slaev2(s *a, s *b, s *c, s *rt1, s *rt2, s *cs1, s *sn1) nogil:
+    _fortran_slaev2(a, b, c, rt1, rt2, cs1, sn1)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaexc "F_FUNC(slaexc,SLAEXC)"(bint *wantq, int *n, s *t, int *ldt, s *q, int *ldq, int *j1, int *n1, int *n2, s *work, int *info) nogil
+cdef void slaexc(bint *wantq, int *n, s *t, int *ldt, s *q, int *ldq, int *j1, int *n1, int *n2, s *work, int *info) nogil:
+    _fortran_slaexc(wantq, n, t, ldt, q, ldq, j1, n1, n2, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slag2 "F_FUNC(slag2,SLAG2)"(s *a, int *lda, s *b, int *ldb, s *safmin, s *scale1, s *scale2, s *wr1, s *wr2, s *wi) nogil
+cdef void slag2(s *a, int *lda, s *b, int *ldb, s *safmin, s *scale1, s *scale2, s *wr1, s *wr2, s *wi) nogil:
+    _fortran_slag2(a, lda, b, ldb, safmin, scale1, scale2, wr1, wr2, wi)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slag2d "F_FUNC(slag2d,SLAG2D)"(int *m, int *n, s *sa, int *ldsa, d *a, int *lda, int *info) nogil
+cdef void slag2d(int *m, int *n, s *sa, int *ldsa, d *a, int *lda, int *info) nogil:
+    _fortran_slag2d(m, n, sa, ldsa, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slags2 "F_FUNC(slags2,SLAGS2)"(bint *upper, s *a1, s *a2, s *a3, s *b1, s *b2, s *b3, s *csu, s *snu, s *csv, s *snv, s *csq, s *snq) nogil
+cdef void slags2(bint *upper, s *a1, s *a2, s *a3, s *b1, s *b2, s *b3, s *csu, s *snu, s *csv, s *snv, s *csq, s *snq) nogil:
+    _fortran_slags2(upper, a1, a2, a3, b1, b2, b3, csu, snu, csv, snv, csq, snq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slagtf "F_FUNC(slagtf,SLAGTF)"(int *n, s *a, s *lambda_, s *b, s *c, s *tol, s *d, int *in_, int *info) nogil
+cdef void slagtf(int *n, s *a, s *lambda_, s *b, s *c, s *tol, s *d, int *in_, int *info) nogil:
+    _fortran_slagtf(n, a, lambda_, b, c, tol, d, in_, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slagtm "F_FUNC(slagtm,SLAGTM)"(char *trans, int *n, int *nrhs, s *alpha, s *dl, s *d, s *du, s *x, int *ldx, s *beta, s *b, int *ldb) nogil
+cdef void slagtm(char *trans, int *n, int *nrhs, s *alpha, s *dl, s *d, s *du, s *x, int *ldx, s *beta, s *b, int *ldb) nogil:
+    _fortran_slagtm(trans, n, nrhs, alpha, dl, d, du, x, ldx, beta, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slagts "F_FUNC(slagts,SLAGTS)"(int *job, int *n, s *a, s *b, s *c, s *d, int *in_, s *y, s *tol, int *info) nogil
+cdef void slagts(int *job, int *n, s *a, s *b, s *c, s *d, int *in_, s *y, s *tol, int *info) nogil:
+    _fortran_slagts(job, n, a, b, c, d, in_, y, tol, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slagv2 "F_FUNC(slagv2,SLAGV2)"(s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *csl, s *snl, s *csr, s *snr) nogil
+cdef void slagv2(s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *csl, s *snl, s *csr, s *snr) nogil:
+    _fortran_slagv2(a, lda, b, ldb, alphar, alphai, beta, csl, snl, csr, snr)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slahqr "F_FUNC(slahqr,SLAHQR)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, int *info) nogil
+cdef void slahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, int *info) nogil:
+    _fortran_slahqr(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slahr2 "F_FUNC(slahr2,SLAHR2)"(int *n, int *k, int *nb, s *a, int *lda, s *tau, s *t, int *ldt, s *y, int *ldy) nogil
+cdef void slahr2(int *n, int *k, int *nb, s *a, int *lda, s *tau, s *t, int *ldt, s *y, int *ldy) nogil:
+    _fortran_slahr2(n, k, nb, a, lda, tau, t, ldt, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaic1 "F_FUNC(slaic1,SLAIC1)"(int *job, int *j, s *x, s *sest, s *w, s *gamma, s *sestpr, s *s, s *c) nogil
+cdef void slaic1(int *job, int *j, s *x, s *sest, s *w, s *gamma, s *sestpr, s *s, s *c) nogil:
+    _fortran_slaic1(job, j, x, sest, w, gamma, sestpr, s, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaln2 "F_FUNC(slaln2,SLALN2)"(bint *ltrans, int *na, int *nw, s *smin, s *ca, s *a, int *lda, s *d1, s *d2, s *b, int *ldb, s *wr, s *wi, s *x, int *ldx, s *scale, s *xnorm, int *info) nogil
+cdef void slaln2(bint *ltrans, int *na, int *nw, s *smin, s *ca, s *a, int *lda, s *d1, s *d2, s *b, int *ldb, s *wr, s *wi, s *x, int *ldx, s *scale, s *xnorm, int *info) nogil:
+    _fortran_slaln2(ltrans, na, nw, smin, ca, a, lda, d1, d2, b, ldb, wr, wi, x, ldx, scale, xnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slals0 "F_FUNC(slals0,SLALS0)"(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *info) nogil
+cdef void slals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *info) nogil:
+    _fortran_slals0(icompq, nl, nr, sqre, nrhs, b, ldb, bx, ldbx, perm, givptr, givcol, ldgcol, givnum, ldgnum, poles, difl, difr, z, k, c, s, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slalsa "F_FUNC(slalsa,SLALSA)"(int *icompq, int *smlsiz, int *n, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil
+cdef void slalsa(int *icompq, int *smlsiz, int *n, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil:
+    _fortran_slalsa(icompq, smlsiz, n, nrhs, b, ldb, bx, ldbx, u, ldu, vt, k, difl, difr, z, poles, givptr, givcol, ldgcol, perm, givnum, c, s, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slalsd "F_FUNC(slalsd,SLALSD)"(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, s *b, int *ldb, s *rcond, int *rank, s *work, int *iwork, int *info) nogil
+cdef void slalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, s *b, int *ldb, s *rcond, int *rank, s *work, int *iwork, int *info) nogil:
+    _fortran_slalsd(uplo, smlsiz, n, nrhs, d, e, b, ldb, rcond, rank, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slamrg "F_FUNC(slamrg,SLAMRG)"(int *n1, int *n2, s *a, int *strd1, int *strd2, int *index_bn) nogil
+cdef void slamrg(int *n1, int *n2, s *a, int *strd1, int *strd2, int *index_bn) nogil:
+    _fortran_slamrg(n1, n2, a, strd1, strd2, index_bn)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slanv2 "F_FUNC(slanv2,SLANV2)"(s *a, s *b, s *c, s *d, s *rt1r, s *rt1i, s *rt2r, s *rt2i, s *cs, s *sn) nogil
+cdef void slanv2(s *a, s *b, s *c, s *d, s *rt1r, s *rt1i, s *rt2r, s *rt2i, s *cs, s *sn) nogil:
+    _fortran_slanv2(a, b, c, d, rt1r, rt1i, rt2r, rt2i, cs, sn)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slapll "F_FUNC(slapll,SLAPLL)"(int *n, s *x, int *incx, s *y, int *incy, s *ssmin) nogil
+cdef void slapll(int *n, s *x, int *incx, s *y, int *incy, s *ssmin) nogil:
+    _fortran_slapll(n, x, incx, y, incy, ssmin)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slapmr "F_FUNC(slapmr,SLAPMR)"(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil
+cdef void slapmr(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil:
+    _fortran_slapmr(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slapmt "F_FUNC(slapmt,SLAPMT)"(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil
+cdef void slapmt(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil:
+    _fortran_slapmt(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqgb "F_FUNC(slaqgb,SLAQGB)"(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+cdef void slaqgb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil:
+    _fortran_slaqgb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqge "F_FUNC(slaqge,SLAQGE)"(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil
+cdef void slaqge(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil:
+    _fortran_slaqge(m, n, a, lda, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqp2 "F_FUNC(slaqp2,SLAQP2)"(int *m, int *n, int *offset, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *work) nogil
+cdef void slaqp2(int *m, int *n, int *offset, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *work) nogil:
+    _fortran_slaqp2(m, n, offset, a, lda, jpvt, tau, vn1, vn2, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqps "F_FUNC(slaqps,SLAQPS)"(int *m, int *n, int *offset, int *nb, int *kb, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *auxv, s *f, int *ldf) nogil
+cdef void slaqps(int *m, int *n, int *offset, int *nb, int *kb, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *auxv, s *f, int *ldf) nogil:
+    _fortran_slaqps(m, n, offset, nb, kb, a, lda, jpvt, tau, vn1, vn2, auxv, f, ldf)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqr0 "F_FUNC(slaqr0,SLAQR0)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+cdef void slaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil:
+    _fortran_slaqr0(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqr1 "F_FUNC(slaqr1,SLAQR1)"(int *n, s *h, int *ldh, s *sr1, s *si1, s *sr2, s *si2, s *v) nogil
+cdef void slaqr1(int *n, s *h, int *ldh, s *sr1, s *si1, s *sr2, s *si2, s *v) nogil:
+    _fortran_slaqr1(n, h, ldh, sr1, si1, sr2, si2, v)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqr2 "F_FUNC(slaqr2,SLAQR2)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil
+cdef void slaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil:
+    _fortran_slaqr2(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sr, si, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqr3 "F_FUNC(slaqr3,SLAQR3)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil
+cdef void slaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil:
+    _fortran_slaqr3(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sr, si, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqr4 "F_FUNC(slaqr4,SLAQR4)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil
+cdef void slaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil:
+    _fortran_slaqr4(wantt, wantz, n, ilo, ihi, h, ldh, wr, wi, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqr5 "F_FUNC(slaqr5,SLAQR5)"(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, s *sr, s *si, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, s *v, int *ldv, s *u, int *ldu, int *nv, s *wv, int *ldwv, int *nh, s *wh, int *ldwh) nogil
+cdef void slaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, s *sr, s *si, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, s *v, int *ldv, s *u, int *ldu, int *nv, s *wv, int *ldwv, int *nh, s *wh, int *ldwh) nogil:
+    _fortran_slaqr5(wantt, wantz, kacc22, n, ktop, kbot, nshfts, sr, si, h, ldh, iloz, ihiz, z, ldz, v, ldv, u, ldu, nv, wv, ldwv, nh, wh, ldwh)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqsb "F_FUNC(slaqsb,SLAQSB)"(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil
+cdef void slaqsb(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_slaqsb(uplo, n, kd, ab, ldab, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqsp "F_FUNC(slaqsp,SLAQSP)"(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, char *equed) nogil
+cdef void slaqsp(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_slaqsp(uplo, n, ap, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqsy "F_FUNC(slaqsy,SLAQSY)"(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil
+cdef void slaqsy(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil:
+    _fortran_slaqsy(uplo, n, a, lda, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaqtr "F_FUNC(slaqtr,SLAQTR)"(bint *ltran, bint *lreal, int *n, s *t, int *ldt, s *b, s *w, s *scale, s *x, s *work, int *info) nogil
+cdef void slaqtr(bint *ltran, bint *lreal, int *n, s *t, int *ldt, s *b, s *w, s *scale, s *x, s *work, int *info) nogil:
+    _fortran_slaqtr(ltran, lreal, n, t, ldt, b, w, scale, x, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slar1v "F_FUNC(slar1v,SLAR1V)"(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, s *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil
+cdef void slar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, s *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil:
+    _fortran_slar1v(n, b1, bn, lambda_, d, l, ld, lld, pivmin, gaptol, z, wantnc, negcnt, ztz, mingma, r, isuppz, nrminv, resid, rqcorr, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slar2v "F_FUNC(slar2v,SLAR2V)"(int *n, s *x, s *y, s *z, int *incx, s *c, s *s, int *incc) nogil
+cdef void slar2v(int *n, s *x, s *y, s *z, int *incx, s *c, s *s, int *incc) nogil:
+    _fortran_slar2v(n, x, y, z, incx, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarf "F_FUNC(slarf,SLARF)"(char *side, int *m, int *n, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil
+cdef void slarf(char *side, int *m, int *n, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil:
+    _fortran_slarf(side, m, n, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarfb "F_FUNC(slarfb,SLARFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil
+cdef void slarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil:
+    _fortran_slarfb(side, trans, direct, storev, m, n, k, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarfg "F_FUNC(slarfg,SLARFG)"(int *n, s *alpha, s *x, int *incx, s *tau) nogil
+cdef void slarfg(int *n, s *alpha, s *x, int *incx, s *tau) nogil:
+    _fortran_slarfg(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarfgp "F_FUNC(slarfgp,SLARFGP)"(int *n, s *alpha, s *x, int *incx, s *tau) nogil
+cdef void slarfgp(int *n, s *alpha, s *x, int *incx, s *tau) nogil:
+    _fortran_slarfgp(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarft "F_FUNC(slarft,SLARFT)"(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil
+cdef void slarft(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil:
+    _fortran_slarft(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarfx "F_FUNC(slarfx,SLARFX)"(char *side, int *m, int *n, s *v, s *tau, s *c, int *ldc, s *work) nogil
+cdef void slarfx(char *side, int *m, int *n, s *v, s *tau, s *c, int *ldc, s *work) nogil:
+    _fortran_slarfx(side, m, n, v, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slargv "F_FUNC(slargv,SLARGV)"(int *n, s *x, int *incx, s *y, int *incy, s *c, int *incc) nogil
+cdef void slargv(int *n, s *x, int *incx, s *y, int *incy, s *c, int *incc) nogil:
+    _fortran_slargv(n, x, incx, y, incy, c, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarnv "F_FUNC(slarnv,SLARNV)"(int *idist, int *iseed, int *n, s *x) nogil
+cdef void slarnv(int *idist, int *iseed, int *n, s *x) nogil:
+    _fortran_slarnv(idist, iseed, n, x)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarra "F_FUNC(slarra,SLARRA)"(int *n, s *d, s *e, s *e2, s *spltol, s *tnrm, int *nsplit, int *isplit, int *info) nogil
+cdef void slarra(int *n, s *d, s *e, s *e2, s *spltol, s *tnrm, int *nsplit, int *isplit, int *info) nogil:
+    _fortran_slarra(n, d, e, e2, spltol, tnrm, nsplit, isplit, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrb "F_FUNC(slarrb,SLARRB)"(int *n, s *d, s *lld, int *ifirst, int *ilast, s *rtol1, s *rtol2, int *offset, s *w, s *wgap, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *twist, int *info) nogil
+cdef void slarrb(int *n, s *d, s *lld, int *ifirst, int *ilast, s *rtol1, s *rtol2, int *offset, s *w, s *wgap, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *twist, int *info) nogil:
+    _fortran_slarrb(n, d, lld, ifirst, ilast, rtol1, rtol2, offset, w, wgap, werr, work, iwork, pivmin, spdiam, twist, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrc "F_FUNC(slarrc,SLARRC)"(char *jobt, int *n, s *vl, s *vu, s *d, s *e, s *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil
+cdef void slarrc(char *jobt, int *n, s *vl, s *vu, s *d, s *e, s *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil:
+    _fortran_slarrc(jobt, n, vl, vu, d, e, pivmin, eigcnt, lcnt, rcnt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrd "F_FUNC(slarrd,SLARRD)"(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *gers, s *reltol, s *d, s *e, s *e2, s *pivmin, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wl, s *wu, int *iblock, int *indexw, s *work, int *iwork, int *info) nogil
+cdef void slarrd(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *gers, s *reltol, s *d, s *e, s *e2, s *pivmin, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wl, s *wu, int *iblock, int *indexw, s *work, int *iwork, int *info) nogil:
+    _fortran_slarrd(range, order, n, vl, vu, il, iu, gers, reltol, d, e, e2, pivmin, nsplit, isplit, m, w, werr, wl, wu, iblock, indexw, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarre "F_FUNC(slarre,SLARRE)"(char *range, int *n, s *vl, s *vu, int *il, int *iu, s *d, s *e, s *e2, s *rtol1, s *rtol2, s *spltol, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *pivmin, s *work, int *iwork, int *info) nogil
+cdef void slarre(char *range, int *n, s *vl, s *vu, int *il, int *iu, s *d, s *e, s *e2, s *rtol1, s *rtol2, s *spltol, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *pivmin, s *work, int *iwork, int *info) nogil:
+    _fortran_slarre(range, n, vl, vu, il, iu, d, e, e2, rtol1, rtol2, spltol, nsplit, isplit, m, w, werr, wgap, iblock, indexw, gers, pivmin, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrf "F_FUNC(slarrf,SLARRF)"(int *n, s *d, s *l, s *ld, int *clstrt, int *clend, s *w, s *wgap, s *werr, s *spdiam, s *clgapl, s *clgapr, s *pivmin, s *sigma, s *dplus, s *lplus, s *work, int *info) nogil
+cdef void slarrf(int *n, s *d, s *l, s *ld, int *clstrt, int *clend, s *w, s *wgap, s *werr, s *spdiam, s *clgapl, s *clgapr, s *pivmin, s *sigma, s *dplus, s *lplus, s *work, int *info) nogil:
+    _fortran_slarrf(n, d, l, ld, clstrt, clend, w, wgap, werr, spdiam, clgapl, clgapr, pivmin, sigma, dplus, lplus, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrj "F_FUNC(slarrj,SLARRJ)"(int *n, s *d, s *e2, int *ifirst, int *ilast, s *rtol, int *offset, s *w, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *info) nogil
+cdef void slarrj(int *n, s *d, s *e2, int *ifirst, int *ilast, s *rtol, int *offset, s *w, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *info) nogil:
+    _fortran_slarrj(n, d, e2, ifirst, ilast, rtol, offset, w, werr, work, iwork, pivmin, spdiam, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrk "F_FUNC(slarrk,SLARRK)"(int *n, int *iw, s *gl, s *gu, s *d, s *e2, s *pivmin, s *reltol, s *w, s *werr, int *info) nogil
+cdef void slarrk(int *n, int *iw, s *gl, s *gu, s *d, s *e2, s *pivmin, s *reltol, s *w, s *werr, int *info) nogil:
+    _fortran_slarrk(n, iw, gl, gu, d, e2, pivmin, reltol, w, werr, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrr "F_FUNC(slarrr,SLARRR)"(int *n, s *d, s *e, int *info) nogil
+cdef void slarrr(int *n, s *d, s *e, int *info) nogil:
+    _fortran_slarrr(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarrv "F_FUNC(slarrv,SLARRV)"(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil
+cdef void slarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil:
+    _fortran_slarrv(n, vl, vu, d, l, pivmin, isplit, m, dol, dou, minrgp, rtol1, rtol2, w, werr, wgap, iblock, indexw, gers, z, ldz, isuppz, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slartg "F_FUNC(slartg,SLARTG)"(s *f, s *g, s *cs, s *sn, s *r) nogil
+cdef void slartg(s *f, s *g, s *cs, s *sn, s *r) nogil:
+    _fortran_slartg(f, g, cs, sn, r)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slartgp "F_FUNC(slartgp,SLARTGP)"(s *f, s *g, s *cs, s *sn, s *r) nogil
+cdef void slartgp(s *f, s *g, s *cs, s *sn, s *r) nogil:
+    _fortran_slartgp(f, g, cs, sn, r)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slartgs "F_FUNC(slartgs,SLARTGS)"(s *x, s *y, s *sigma, s *cs, s *sn) nogil
+cdef void slartgs(s *x, s *y, s *sigma, s *cs, s *sn) nogil:
+    _fortran_slartgs(x, y, sigma, cs, sn)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slartv "F_FUNC(slartv,SLARTV)"(int *n, s *x, int *incx, s *y, int *incy, s *c, s *s, int *incc) nogil
+cdef void slartv(int *n, s *x, int *incx, s *y, int *incy, s *c, s *s, int *incc) nogil:
+    _fortran_slartv(n, x, incx, y, incy, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaruv "F_FUNC(slaruv,SLARUV)"(int *iseed, int *n, s *x) nogil
+cdef void slaruv(int *iseed, int *n, s *x) nogil:
+    _fortran_slaruv(iseed, n, x)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarz "F_FUNC(slarz,SLARZ)"(char *side, int *m, int *n, int *l, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil
+cdef void slarz(char *side, int *m, int *n, int *l, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil:
+    _fortran_slarz(side, m, n, l, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarzb "F_FUNC(slarzb,SLARZB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil
+cdef void slarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil:
+    _fortran_slarzb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slarzt "F_FUNC(slarzt,SLARZT)"(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil
+cdef void slarzt(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil:
+    _fortran_slarzt(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slas2 "F_FUNC(slas2,SLAS2)"(s *f, s *g, s *h, s *ssmin, s *ssmax) nogil
+cdef void slas2(s *f, s *g, s *h, s *ssmin, s *ssmax) nogil:
+    _fortran_slas2(f, g, h, ssmin, ssmax)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slascl "F_FUNC(slascl,SLASCL)"(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, s *a, int *lda, int *info) nogil
+cdef void slascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_slascl(type_bn, kl, ku, cfrom, cto, m, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd0 "F_FUNC(slasd0,SLASD0)"(int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, int *smlsiz, int *iwork, s *work, int *info) nogil
+cdef void slasd0(int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, int *smlsiz, int *iwork, s *work, int *info) nogil:
+    _fortran_slasd0(n, sqre, d, e, u, ldu, vt, ldvt, smlsiz, iwork, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd1 "F_FUNC(slasd1,SLASD1)"(int *nl, int *nr, int *sqre, s *d, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, int *idxq, int *iwork, s *work, int *info) nogil
+cdef void slasd1(int *nl, int *nr, int *sqre, s *d, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, int *idxq, int *iwork, s *work, int *info) nogil:
+    _fortran_slasd1(nl, nr, sqre, d, alpha, beta, u, ldu, vt, ldvt, idxq, iwork, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd2 "F_FUNC(slasd2,SLASD2)"(int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, s *dsigma, s *u2, int *ldu2, s *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil
+cdef void slasd2(int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, s *dsigma, s *u2, int *ldu2, s *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil:
+    _fortran_slasd2(nl, nr, sqre, k, d, z, alpha, beta, u, ldu, vt, ldvt, dsigma, u2, ldu2, vt2, ldvt2, idxp, idx, idxc, idxq, coltyp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd3 "F_FUNC(slasd3,SLASD3)"(int *nl, int *nr, int *sqre, int *k, s *d, s *q, int *ldq, s *dsigma, s *u, int *ldu, s *u2, int *ldu2, s *vt, int *ldvt, s *vt2, int *ldvt2, int *idxc, int *ctot, s *z, int *info) nogil
+cdef void slasd3(int *nl, int *nr, int *sqre, int *k, s *d, s *q, int *ldq, s *dsigma, s *u, int *ldu, s *u2, int *ldu2, s *vt, int *ldvt, s *vt2, int *ldvt2, int *idxc, int *ctot, s *z, int *info) nogil:
+    _fortran_slasd3(nl, nr, sqre, k, d, q, ldq, dsigma, u, ldu, u2, ldu2, vt, ldvt, vt2, ldvt2, idxc, ctot, z, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd4 "F_FUNC(slasd4,SLASD4)"(int *n, int *i, s *d, s *z, s *delta, s *rho, s *sigma, s *work, int *info) nogil
+cdef void slasd4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *sigma, s *work, int *info) nogil:
+    _fortran_slasd4(n, i, d, z, delta, rho, sigma, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd5 "F_FUNC(slasd5,SLASD5)"(int *i, s *d, s *z, s *delta, s *rho, s *dsigma, s *work) nogil
+cdef void slasd5(int *i, s *d, s *z, s *delta, s *rho, s *dsigma, s *work) nogil:
+    _fortran_slasd5(i, d, z, delta, rho, dsigma, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd6 "F_FUNC(slasd6,SLASD6)"(int *icompq, int *nl, int *nr, int *sqre, s *d, s *vf, s *vl, s *alpha, s *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *iwork, int *info) nogil
+cdef void slasd6(int *icompq, int *nl, int *nr, int *sqre, s *d, s *vf, s *vl, s *alpha, s *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *iwork, int *info) nogil:
+    _fortran_slasd6(icompq, nl, nr, sqre, d, vf, vl, alpha, beta, idxq, perm, givptr, givcol, ldgcol, givnum, ldgnum, poles, difl, difr, z, k, c, s, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd7 "F_FUNC(slasd7,SLASD7)"(int *icompq, int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *zw, s *vf, s *vfw, s *vl, s *vlw, s *alpha, s *beta, s *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *c, s *s, int *info) nogil
+cdef void slasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *zw, s *vf, s *vfw, s *vl, s *vlw, s *alpha, s *beta, s *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *c, s *s, int *info) nogil:
+    _fortran_slasd7(icompq, nl, nr, sqre, k, d, z, zw, vf, vfw, vl, vlw, alpha, beta, dsigma, idx, idxp, idxq, perm, givptr, givcol, ldgcol, givnum, ldgnum, c, s, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasd8 "F_FUNC(slasd8,SLASD8)"(int *icompq, int *k, s *d, s *z, s *vf, s *vl, s *difl, s *difr, int *lddifr, s *dsigma, s *work, int *info) nogil
+cdef void slasd8(int *icompq, int *k, s *d, s *z, s *vf, s *vl, s *difl, s *difr, int *lddifr, s *dsigma, s *work, int *info) nogil:
+    _fortran_slasd8(icompq, k, d, z, vf, vl, difl, difr, lddifr, dsigma, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasda "F_FUNC(slasda,SLASDA)"(int *icompq, int *smlsiz, int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil
+cdef void slasda(int *icompq, int *smlsiz, int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil:
+    _fortran_slasda(icompq, smlsiz, n, sqre, d, e, u, ldu, vt, k, difl, difr, z, poles, givptr, givcol, ldgcol, perm, givnum, c, s, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasdq "F_FUNC(slasdq,SLASDQ)"(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil
+cdef void slasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_slasdq(uplo, sqre, n, ncvt, nru, ncc, d, e, vt, ldvt, u, ldu, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasdt "F_FUNC(slasdt,SLASDT)"(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil
+cdef void slasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil:
+    _fortran_slasdt(n, lvl, nd, inode, ndiml, ndimr, msub)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaset "F_FUNC(slaset,SLASET)"(char *uplo, int *m, int *n, s *alpha, s *beta, s *a, int *lda) nogil
+cdef void slaset(char *uplo, int *m, int *n, s *alpha, s *beta, s *a, int *lda) nogil:
+    _fortran_slaset(uplo, m, n, alpha, beta, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasq1 "F_FUNC(slasq1,SLASQ1)"(int *n, s *d, s *e, s *work, int *info) nogil
+cdef void slasq1(int *n, s *d, s *e, s *work, int *info) nogil:
+    _fortran_slasq1(n, d, e, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasq2 "F_FUNC(slasq2,SLASQ2)"(int *n, s *z, int *info) nogil
+cdef void slasq2(int *n, s *z, int *info) nogil:
+    _fortran_slasq2(n, z, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasq3 "F_FUNC(slasq3,SLASQ3)"(int *i0, int *n0, s *z, int *pp, s *dmin, s *sigma, s *desig, s *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *g, s *tau) nogil
+cdef void slasq3(int *i0, int *n0, s *z, int *pp, s *dmin, s *sigma, s *desig, s *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *g, s *tau) nogil:
+    _fortran_slasq3(i0, n0, z, pp, dmin, sigma, desig, qmax, nfail, iter, ndiv, ieee, ttype, dmin1, dmin2, dn, dn1, dn2, g, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasq4 "F_FUNC(slasq4,SLASQ4)"(int *i0, int *n0, s *z, int *pp, int *n0in, s *dmin, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *tau, int *ttype, s *g) nogil
+cdef void slasq4(int *i0, int *n0, s *z, int *pp, int *n0in, s *dmin, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *tau, int *ttype, s *g) nogil:
+    _fortran_slasq4(i0, n0, z, pp, n0in, dmin, dmin1, dmin2, dn, dn1, dn2, tau, ttype, g)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasq6 "F_FUNC(slasq6,SLASQ6)"(int *i0, int *n0, s *z, int *pp, s *dmin, s *dmin1, s *dmin2, s *dn, s *dnm1, s *dnm2) nogil
+cdef void slasq6(int *i0, int *n0, s *z, int *pp, s *dmin, s *dmin1, s *dmin2, s *dn, s *dnm1, s *dnm2) nogil:
+    _fortran_slasq6(i0, n0, z, pp, dmin, dmin1, dmin2, dn, dnm1, dnm2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasr "F_FUNC(slasr,SLASR)"(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, s *a, int *lda) nogil
+cdef void slasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, s *a, int *lda) nogil:
+    _fortran_slasr(side, pivot, direct, m, n, c, s, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasrt "F_FUNC(slasrt,SLASRT)"(char *id, int *n, s *d, int *info) nogil
+cdef void slasrt(char *id, int *n, s *d, int *info) nogil:
+    _fortran_slasrt(id, n, d, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slassq "F_FUNC(slassq,SLASSQ)"(int *n, s *x, int *incx, s *scale, s *sumsq) nogil
+cdef void slassq(int *n, s *x, int *incx, s *scale, s *sumsq) nogil:
+    _fortran_slassq(n, x, incx, scale, sumsq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasv2 "F_FUNC(slasv2,SLASV2)"(s *f, s *g, s *h, s *ssmin, s *ssmax, s *snr, s *csr, s *snl, s *csl) nogil
+cdef void slasv2(s *f, s *g, s *h, s *ssmin, s *ssmax, s *snr, s *csr, s *snl, s *csl) nogil:
+    _fortran_slasv2(f, g, h, ssmin, ssmax, snr, csr, snl, csl)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slaswp "F_FUNC(slaswp,SLASWP)"(int *n, s *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+cdef void slaswp(int *n, s *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil:
+    _fortran_slaswp(n, a, lda, k1, k2, ipiv, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasy2 "F_FUNC(slasy2,SLASY2)"(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, s *tl, int *ldtl, s *tr, int *ldtr, s *b, int *ldb, s *scale, s *x, int *ldx, s *xnorm, int *info) nogil
+cdef void slasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, s *tl, int *ldtl, s *tr, int *ldtr, s *b, int *ldb, s *scale, s *x, int *ldx, s *xnorm, int *info) nogil:
+    _fortran_slasy2(ltranl, ltranr, isgn, n1, n2, tl, ldtl, tr, ldtr, b, ldb, scale, x, ldx, xnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slasyf "F_FUNC(slasyf,SLASYF)"(char *uplo, int *n, int *nb, int *kb, s *a, int *lda, int *ipiv, s *w, int *ldw, int *info) nogil
+cdef void slasyf(char *uplo, int *n, int *nb, int *kb, s *a, int *lda, int *ipiv, s *w, int *ldw, int *info) nogil:
+    _fortran_slasyf(uplo, n, nb, kb, a, lda, ipiv, w, ldw, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slatbs "F_FUNC(slatbs,SLATBS)"(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, s *ab, int *ldab, s *x, s *scale, s *cnorm, int *info) nogil
+cdef void slatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, s *ab, int *ldab, s *x, s *scale, s *cnorm, int *info) nogil:
+    _fortran_slatbs(uplo, trans, diag, normin, n, kd, ab, ldab, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slatdf "F_FUNC(slatdf,SLATDF)"(int *ijob, int *n, s *z, int *ldz, s *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil
+cdef void slatdf(int *ijob, int *n, s *z, int *ldz, s *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil:
+    _fortran_slatdf(ijob, n, z, ldz, rhs, rdsum, rdscal, ipiv, jpiv)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slatps "F_FUNC(slatps,SLATPS)"(char *uplo, char *trans, char *diag, char *normin, int *n, s *ap, s *x, s *scale, s *cnorm, int *info) nogil
+cdef void slatps(char *uplo, char *trans, char *diag, char *normin, int *n, s *ap, s *x, s *scale, s *cnorm, int *info) nogil:
+    _fortran_slatps(uplo, trans, diag, normin, n, ap, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slatrd "F_FUNC(slatrd,SLATRD)"(char *uplo, int *n, int *nb, s *a, int *lda, s *e, s *tau, s *w, int *ldw) nogil
+cdef void slatrd(char *uplo, int *n, int *nb, s *a, int *lda, s *e, s *tau, s *w, int *ldw) nogil:
+    _fortran_slatrd(uplo, n, nb, a, lda, e, tau, w, ldw)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slatrs "F_FUNC(slatrs,SLATRS)"(char *uplo, char *trans, char *diag, char *normin, int *n, s *a, int *lda, s *x, s *scale, s *cnorm, int *info) nogil
+cdef void slatrs(char *uplo, char *trans, char *diag, char *normin, int *n, s *a, int *lda, s *x, s *scale, s *cnorm, int *info) nogil:
+    _fortran_slatrs(uplo, trans, diag, normin, n, a, lda, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slatrz "F_FUNC(slatrz,SLATRZ)"(int *m, int *n, int *l, s *a, int *lda, s *tau, s *work) nogil
+cdef void slatrz(int *m, int *n, int *l, s *a, int *lda, s *tau, s *work) nogil:
+    _fortran_slatrz(m, n, l, a, lda, tau, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slauu2 "F_FUNC(slauu2,SLAUU2)"(char *uplo, int *n, s *a, int *lda, int *info) nogil
+cdef void slauu2(char *uplo, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_slauu2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_slauum "F_FUNC(slauum,SLAUUM)"(char *uplo, int *n, s *a, int *lda, int *info) nogil
+cdef void slauum(char *uplo, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_slauum(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sopgtr "F_FUNC(sopgtr,SOPGTR)"(char *uplo, int *n, s *ap, s *tau, s *q, int *ldq, s *work, int *info) nogil
+cdef void sopgtr(char *uplo, int *n, s *ap, s *tau, s *q, int *ldq, s *work, int *info) nogil:
+    _fortran_sopgtr(uplo, n, ap, tau, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sopmtr "F_FUNC(sopmtr,SOPMTR)"(char *side, char *uplo, char *trans, int *m, int *n, s *ap, s *tau, s *c, int *ldc, s *work, int *info) nogil
+cdef void sopmtr(char *side, char *uplo, char *trans, int *m, int *n, s *ap, s *tau, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sopmtr(side, uplo, trans, m, n, ap, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorbdb "F_FUNC(sorbdb,SORBDB)"(char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *phi, s *taup1, s *taup2, s *tauq1, s *tauq2, s *work, int *lwork, int *info) nogil
+cdef void sorbdb(char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *phi, s *taup1, s *taup2, s *tauq1, s *tauq2, s *work, int *lwork, int *info) nogil:
+    _fortran_sorbdb(trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, phi, taup1, taup2, tauq1, tauq2, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorcsd "F_FUNC(sorcsd,SORCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void sorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_sorcsd(jobu1, jobu2, jobv1t, jobv2t, trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorg2l "F_FUNC(sorg2l,SORG2L)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sorg2l(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sorg2l(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorg2r "F_FUNC(sorg2r,SORG2R)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sorg2r(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sorg2r(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorgbr "F_FUNC(sorgbr,SORGBR)"(char *vect, int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sorgbr(char *vect, int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sorgbr(vect, m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorghr "F_FUNC(sorghr,SORGHR)"(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sorghr(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sorghr(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorgl2 "F_FUNC(sorgl2,SORGL2)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sorgl2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sorgl2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorglq "F_FUNC(sorglq,SORGLQ)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sorglq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sorglq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorgql "F_FUNC(sorgql,SORGQL)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sorgql(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sorgql(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorgqr "F_FUNC(sorgqr,SORGQR)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sorgqr(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sorgqr(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorgr2 "F_FUNC(sorgr2,SORGR2)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil
+cdef void sorgr2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil:
+    _fortran_sorgr2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorgrq "F_FUNC(sorgrq,SORGRQ)"(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sorgrq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sorgrq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorgtr "F_FUNC(sorgtr,SORGTR)"(char *uplo, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void sorgtr(char *uplo, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_sorgtr(uplo, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorm2l "F_FUNC(sorm2l,SORM2L)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+cdef void sorm2l(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sorm2l(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorm2r "F_FUNC(sorm2r,SORM2R)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+cdef void sorm2r(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sorm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormbr "F_FUNC(sormbr,SORMBR)"(char *vect, char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormbr(vect, side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormhr "F_FUNC(sormhr,SORMHR)"(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormhr(side, trans, m, n, ilo, ihi, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sorml2 "F_FUNC(sorml2,SORML2)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+cdef void sorml2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sorml2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormlq "F_FUNC(sormlq,SORMLQ)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormlq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormlq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormql "F_FUNC(sormql,SORMQL)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormql(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormql(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormqr "F_FUNC(sormqr,SORMQR)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormqr(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormr2 "F_FUNC(sormr2,SORMR2)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+cdef void sormr2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sormr2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormr3 "F_FUNC(sormr3,SORMR3)"(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil
+cdef void sormr3(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil:
+    _fortran_sormr3(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormrq "F_FUNC(sormrq,SORMRQ)"(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormrq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormrq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormrz "F_FUNC(sormrz,SORMRZ)"(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormrz(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormrz(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sormtr "F_FUNC(sormtr,SORMTR)"(char *side, char *uplo, char *trans, int *m, int *n, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil
+cdef void sormtr(char *side, char *uplo, char *trans, int *m, int *n, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil:
+    _fortran_sormtr(side, uplo, trans, m, n, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbcon "F_FUNC(spbcon,SPBCON)"(char *uplo, int *n, int *kd, s *ab, int *ldab, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void spbcon(char *uplo, int *n, int *kd, s *ab, int *ldab, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_spbcon(uplo, n, kd, ab, ldab, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbequ "F_FUNC(spbequ,SPBEQU)"(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil
+cdef void spbequ(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_spbequ(uplo, n, kd, ab, ldab, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbrfs "F_FUNC(spbrfs,SPBRFS)"(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void spbrfs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_spbrfs(uplo, n, kd, nrhs, ab, ldab, afb, ldafb, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbstf "F_FUNC(spbstf,SPBSTF)"(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+cdef void spbstf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil:
+    _fortran_spbstf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbsv "F_FUNC(spbsv,SPBSV)"(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+cdef void spbsv(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil:
+    _fortran_spbsv(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbsvx "F_FUNC(spbsvx,SPBSVX)"(char *fact, char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void spbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_spbsvx(fact, uplo, n, kd, nrhs, ab, ldab, afb, ldafb, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbtf2 "F_FUNC(spbtf2,SPBTF2)"(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+cdef void spbtf2(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil:
+    _fortran_spbtf2(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbtrf "F_FUNC(spbtrf,SPBTRF)"(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil
+cdef void spbtrf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil:
+    _fortran_spbtrf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spbtrs "F_FUNC(spbtrs,SPBTRS)"(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+cdef void spbtrs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil:
+    _fortran_spbtrs(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spftrf "F_FUNC(spftrf,SPFTRF)"(char *transr, char *uplo, int *n, s *a, int *info) nogil
+cdef void spftrf(char *transr, char *uplo, int *n, s *a, int *info) nogil:
+    _fortran_spftrf(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spftri "F_FUNC(spftri,SPFTRI)"(char *transr, char *uplo, int *n, s *a, int *info) nogil
+cdef void spftri(char *transr, char *uplo, int *n, s *a, int *info) nogil:
+    _fortran_spftri(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spftrs "F_FUNC(spftrs,SPFTRS)"(char *transr, char *uplo, int *n, int *nrhs, s *a, s *b, int *ldb, int *info) nogil
+cdef void spftrs(char *transr, char *uplo, int *n, int *nrhs, s *a, s *b, int *ldb, int *info) nogil:
+    _fortran_spftrs(transr, uplo, n, nrhs, a, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spocon "F_FUNC(spocon,SPOCON)"(char *uplo, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void spocon(char *uplo, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_spocon(uplo, n, a, lda, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spoequ "F_FUNC(spoequ,SPOEQU)"(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+cdef void spoequ(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_spoequ(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spoequb "F_FUNC(spoequb,SPOEQUB)"(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil
+cdef void spoequb(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_spoequb(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sporfs "F_FUNC(sporfs,SPORFS)"(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sporfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sporfs(uplo, n, nrhs, a, lda, af, ldaf, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sposv "F_FUNC(sposv,SPOSV)"(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+cdef void sposv(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil:
+    _fortran_sposv(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sposvx "F_FUNC(sposvx,SPOSVX)"(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sposvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sposvx(fact, uplo, n, nrhs, a, lda, af, ldaf, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spotf2 "F_FUNC(spotf2,SPOTF2)"(char *uplo, int *n, s *a, int *lda, int *info) nogil
+cdef void spotf2(char *uplo, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_spotf2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spotrf "F_FUNC(spotrf,SPOTRF)"(char *uplo, int *n, s *a, int *lda, int *info) nogil
+cdef void spotrf(char *uplo, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_spotrf(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spotri "F_FUNC(spotri,SPOTRI)"(char *uplo, int *n, s *a, int *lda, int *info) nogil
+cdef void spotri(char *uplo, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_spotri(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spotrs "F_FUNC(spotrs,SPOTRS)"(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+cdef void spotrs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil:
+    _fortran_spotrs(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sppcon "F_FUNC(sppcon,SPPCON)"(char *uplo, int *n, s *ap, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void sppcon(char *uplo, int *n, s *ap, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_sppcon(uplo, n, ap, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sppequ "F_FUNC(sppequ,SPPEQU)"(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, int *info) nogil
+cdef void sppequ(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, int *info) nogil:
+    _fortran_sppequ(uplo, n, ap, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spprfs "F_FUNC(spprfs,SPPRFS)"(char *uplo, int *n, int *nrhs, s *ap, s *afp, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void spprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_spprfs(uplo, n, nrhs, ap, afp, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sppsv "F_FUNC(sppsv,SPPSV)"(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+cdef void sppsv(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil:
+    _fortran_sppsv(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sppsvx "F_FUNC(sppsvx,SPPSVX)"(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sppsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sppsvx(fact, uplo, n, nrhs, ap, afp, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spptrf "F_FUNC(spptrf,SPPTRF)"(char *uplo, int *n, s *ap, int *info) nogil
+cdef void spptrf(char *uplo, int *n, s *ap, int *info) nogil:
+    _fortran_spptrf(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spptri "F_FUNC(spptri,SPPTRI)"(char *uplo, int *n, s *ap, int *info) nogil
+cdef void spptri(char *uplo, int *n, s *ap, int *info) nogil:
+    _fortran_spptri(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spptrs "F_FUNC(spptrs,SPPTRS)"(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+cdef void spptrs(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil:
+    _fortran_spptrs(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spstf2 "F_FUNC(spstf2,SPSTF2)"(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+cdef void spstf2(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil:
+    _fortran_spstf2(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spstrf "F_FUNC(spstrf,SPSTRF)"(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil
+cdef void spstrf(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil:
+    _fortran_spstrf(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sptcon "F_FUNC(sptcon,SPTCON)"(int *n, s *d, s *e, s *anorm, s *rcond, s *work, int *info) nogil
+cdef void sptcon(int *n, s *d, s *e, s *anorm, s *rcond, s *work, int *info) nogil:
+    _fortran_sptcon(n, d, e, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spteqr "F_FUNC(spteqr,SPTEQR)"(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+cdef void spteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil:
+    _fortran_spteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sptrfs "F_FUNC(sptrfs,SPTRFS)"(int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *info) nogil
+cdef void sptrfs(int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *info) nogil:
+    _fortran_sptrfs(n, nrhs, d, e, df, ef, b, ldb, x, ldx, ferr, berr, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sptsv "F_FUNC(sptsv,SPTSV)"(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil
+cdef void sptsv(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil:
+    _fortran_sptsv(n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sptsvx "F_FUNC(sptsvx,SPTSVX)"(char *fact, int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *info) nogil
+cdef void sptsvx(char *fact, int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *info) nogil:
+    _fortran_sptsvx(fact, n, nrhs, d, e, df, ef, b, ldb, x, ldx, rcond, ferr, berr, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spttrf "F_FUNC(spttrf,SPTTRF)"(int *n, s *d, s *e, int *info) nogil
+cdef void spttrf(int *n, s *d, s *e, int *info) nogil:
+    _fortran_spttrf(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_spttrs "F_FUNC(spttrs,SPTTRS)"(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil
+cdef void spttrs(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil:
+    _fortran_spttrs(n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sptts2 "F_FUNC(sptts2,SPTTS2)"(int *n, int *nrhs, s *d, s *e, s *b, int *ldb) nogil
+cdef void sptts2(int *n, int *nrhs, s *d, s *e, s *b, int *ldb) nogil:
+    _fortran_sptts2(n, nrhs, d, e, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_srscl "F_FUNC(srscl,SRSCL)"(int *n, s *sa, s *sx, int *incx) nogil
+cdef void srscl(int *n, s *sa, s *sx, int *incx) nogil:
+    _fortran_srscl(n, sa, sx, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbev "F_FUNC(ssbev,SSBEV)"(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *info) nogil
+cdef void ssbev(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *info) nogil:
+    _fortran_ssbev(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbevd "F_FUNC(ssbevd,SSBEVD)"(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void ssbevd(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_ssbevd(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbevx "F_FUNC(ssbevx,SSBEVX)"(char *jobz, char *range, char *uplo, int *n, int *kd, s *ab, int *ldab, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+cdef void ssbevx(char *jobz, char *range, char *uplo, int *n, int *kd, s *ab, int *ldab, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_ssbevx(jobz, range, uplo, n, kd, ab, ldab, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbgst "F_FUNC(ssbgst,SSBGST)"(char *vect, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *x, int *ldx, s *work, int *info) nogil
+cdef void ssbgst(char *vect, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *x, int *ldx, s *work, int *info) nogil:
+    _fortran_ssbgst(vect, uplo, n, ka, kb, ab, ldab, bb, ldbb, x, ldx, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbgv "F_FUNC(ssbgv,SSBGV)"(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *info) nogil
+cdef void ssbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *info) nogil:
+    _fortran_ssbgv(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbgvd "F_FUNC(ssbgvd,SSBGVD)"(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void ssbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_ssbgvd(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbgvx "F_FUNC(ssbgvx,SSBGVX)"(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+cdef void ssbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_ssbgvx(jobz, range, uplo, n, ka, kb, ab, ldab, bb, ldbb, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssbtrd "F_FUNC(ssbtrd,SSBTRD)"(char *vect, char *uplo, int *n, int *kd, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *work, int *info) nogil
+cdef void ssbtrd(char *vect, char *uplo, int *n, int *kd, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *work, int *info) nogil:
+    _fortran_ssbtrd(vect, uplo, n, kd, ab, ldab, d, e, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssfrk "F_FUNC(ssfrk,SSFRK)"(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c) nogil
+cdef void ssfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c) nogil:
+    _fortran_ssfrk(transr, uplo, trans, n, k, alpha, a, lda, beta, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspcon "F_FUNC(sspcon,SSPCON)"(char *uplo, int *n, s *ap, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void sspcon(char *uplo, int *n, s *ap, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_sspcon(uplo, n, ap, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspev "F_FUNC(sspev,SSPEV)"(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *info) nogil
+cdef void sspev(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *info) nogil:
+    _fortran_sspev(jobz, uplo, n, ap, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspevd "F_FUNC(sspevd,SSPEVD)"(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void sspevd(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_sspevd(jobz, uplo, n, ap, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspevx "F_FUNC(sspevx,SSPEVX)"(char *jobz, char *range, char *uplo, int *n, s *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+cdef void sspevx(char *jobz, char *range, char *uplo, int *n, s *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_sspevx(jobz, range, uplo, n, ap, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspgst "F_FUNC(sspgst,SSPGST)"(int *itype, char *uplo, int *n, s *ap, s *bp, int *info) nogil
+cdef void sspgst(int *itype, char *uplo, int *n, s *ap, s *bp, int *info) nogil:
+    _fortran_sspgst(itype, uplo, n, ap, bp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspgv "F_FUNC(sspgv,SSPGV)"(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *info) nogil
+cdef void sspgv(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *info) nogil:
+    _fortran_sspgv(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspgvd "F_FUNC(sspgvd,SSPGVD)"(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void sspgvd(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_sspgvd(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspgvx "F_FUNC(sspgvx,SSPGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, s *ap, s *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+cdef void sspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *ap, s *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_sspgvx(itype, jobz, range, uplo, n, ap, bp, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssprfs "F_FUNC(ssprfs,SSPRFS)"(char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void ssprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_ssprfs(uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspsv "F_FUNC(sspsv,SSPSV)"(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void sspsv(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_sspsv(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sspsvx "F_FUNC(sspsvx,SSPSVX)"(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void sspsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_sspsvx(fact, uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssptrd "F_FUNC(ssptrd,SSPTRD)"(char *uplo, int *n, s *ap, s *d, s *e, s *tau, int *info) nogil
+cdef void ssptrd(char *uplo, int *n, s *ap, s *d, s *e, s *tau, int *info) nogil:
+    _fortran_ssptrd(uplo, n, ap, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssptrf "F_FUNC(ssptrf,SSPTRF)"(char *uplo, int *n, s *ap, int *ipiv, int *info) nogil
+cdef void ssptrf(char *uplo, int *n, s *ap, int *ipiv, int *info) nogil:
+    _fortran_ssptrf(uplo, n, ap, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssptri "F_FUNC(ssptri,SSPTRI)"(char *uplo, int *n, s *ap, int *ipiv, s *work, int *info) nogil
+cdef void ssptri(char *uplo, int *n, s *ap, int *ipiv, s *work, int *info) nogil:
+    _fortran_ssptri(uplo, n, ap, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssptrs "F_FUNC(ssptrs,SSPTRS)"(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void ssptrs(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_ssptrs(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstebz "F_FUNC(sstebz,SSTEBZ)"(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *abstol, s *d, s *e, int *m, int *nsplit, s *w, int *iblock, int *isplit, s *work, int *iwork, int *info) nogil
+cdef void sstebz(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *abstol, s *d, s *e, int *m, int *nsplit, s *w, int *iblock, int *isplit, s *work, int *iwork, int *info) nogil:
+    _fortran_sstebz(range, order, n, vl, vu, il, iu, abstol, d, e, m, nsplit, w, iblock, isplit, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstedc "F_FUNC(sstedc,SSTEDC)"(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void sstedc(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_sstedc(compz, n, d, e, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstegr "F_FUNC(sstegr,SSTEGR)"(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void sstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_sstegr(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstein "F_FUNC(sstein,SSTEIN)"(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+cdef void sstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_sstein(n, d, e, m, w, iblock, isplit, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstemr "F_FUNC(sstemr,SSTEMR)"(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, s *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void sstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, s *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_sstemr(jobz, range, n, d, e, vl, vu, il, iu, m, w, z, ldz, nzc, isuppz, tryrac, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssteqr "F_FUNC(ssteqr,SSTEQR)"(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+cdef void ssteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil:
+    _fortran_ssteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssterf "F_FUNC(ssterf,SSTERF)"(int *n, s *d, s *e, int *info) nogil
+cdef void ssterf(int *n, s *d, s *e, int *info) nogil:
+    _fortran_ssterf(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstev "F_FUNC(sstev,SSTEV)"(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil
+cdef void sstev(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil:
+    _fortran_sstev(jobz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstevd "F_FUNC(sstevd,SSTEVD)"(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void sstevd(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_sstevd(jobz, n, d, e, z, ldz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstevr "F_FUNC(sstevr,SSTEVR)"(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void sstevr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_sstevr(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_sstevx "F_FUNC(sstevx,SSTEVX)"(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil
+cdef void sstevx(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_sstevx(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssycon "F_FUNC(ssycon,SSYCON)"(char *uplo, int *n, s *a, int *lda, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void ssycon(char *uplo, int *n, s *a, int *lda, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_ssycon(uplo, n, a, lda, ipiv, anorm, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyconv "F_FUNC(ssyconv,SSYCONV)"(char *uplo, char *way, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil
+cdef void ssyconv(char *uplo, char *way, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil:
+    _fortran_ssyconv(uplo, way, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyequb "F_FUNC(ssyequb,SSYEQUB)"(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, s *work, int *info) nogil
+cdef void ssyequb(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, s *work, int *info) nogil:
+    _fortran_ssyequb(uplo, n, a, lda, s, scond, amax, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyev "F_FUNC(ssyev,SSYEV)"(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *info) nogil
+cdef void ssyev(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *info) nogil:
+    _fortran_ssyev(jobz, uplo, n, a, lda, w, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyevd "F_FUNC(ssyevd,SSYEVD)"(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void ssyevd(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_ssyevd(jobz, uplo, n, a, lda, w, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyevr "F_FUNC(ssyevr,SSYEVR)"(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void ssyevr(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_ssyevr(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyevx "F_FUNC(ssyevx,SSYEVX)"(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+cdef void ssyevx(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_ssyevx(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssygs2 "F_FUNC(ssygs2,SSYGS2)"(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil
+cdef void ssygs2(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil:
+    _fortran_ssygs2(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssygst "F_FUNC(ssygst,SSYGST)"(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil
+cdef void ssygst(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil:
+    _fortran_ssygst(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssygv "F_FUNC(ssygv,SSYGV)"(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *info) nogil
+cdef void ssygv(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *info) nogil:
+    _fortran_ssygv(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssygvd "F_FUNC(ssygvd,SSYGVD)"(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void ssygvd(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_ssygvd(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssygvx "F_FUNC(ssygvx,SSYGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil
+cdef void ssygvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_ssygvx(itype, jobz, range, uplo, n, a, lda, b, ldb, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyrfs "F_FUNC(ssyrfs,SSYRFS)"(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void ssyrfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_ssyrfs(uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssysv "F_FUNC(ssysv,SSYSV)"(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *lwork, int *info) nogil
+cdef void ssysv(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *lwork, int *info) nogil:
+    _fortran_ssysv(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssysvx "F_FUNC(ssysvx,SSYSVX)"(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void ssysvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_ssysvx(fact, uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssyswapr "F_FUNC(ssyswapr,SSYSWAPR)"(char *uplo, int *n, s *a, int *lda, int *i1, int *i2) nogil
+cdef void ssyswapr(char *uplo, int *n, s *a, int *lda, int *i1, int *i2) nogil:
+    _fortran_ssyswapr(uplo, n, a, lda, i1, i2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytd2 "F_FUNC(ssytd2,SSYTD2)"(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, int *info) nogil
+cdef void ssytd2(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, int *info) nogil:
+    _fortran_ssytd2(uplo, n, a, lda, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytf2 "F_FUNC(ssytf2,SSYTF2)"(char *uplo, int *n, s *a, int *lda, int *ipiv, int *info) nogil
+cdef void ssytf2(char *uplo, int *n, s *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_ssytf2(uplo, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytrd "F_FUNC(ssytrd,SSYTRD)"(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, s *work, int *lwork, int *info) nogil
+cdef void ssytrd(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_ssytrd(uplo, n, a, lda, d, e, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytrf "F_FUNC(ssytrf,SSYTRF)"(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+cdef void ssytrf(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil:
+    _fortran_ssytrf(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytri "F_FUNC(ssytri,SSYTRI)"(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil
+cdef void ssytri(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil:
+    _fortran_ssytri(uplo, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytri2 "F_FUNC(ssytri2,SSYTRI2)"(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil
+cdef void ssytri2(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil:
+    _fortran_ssytri2(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytri2x "F_FUNC(ssytri2x,SSYTRI2X)"(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *nb, int *info) nogil
+cdef void ssytri2x(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *nb, int *info) nogil:
+    _fortran_ssytri2x(uplo, n, a, lda, ipiv, work, nb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytrs "F_FUNC(ssytrs,SSYTRS)"(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil
+cdef void ssytrs(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil:
+    _fortran_ssytrs(uplo, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ssytrs2 "F_FUNC(ssytrs2,SSYTRS2)"(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *info) nogil
+cdef void ssytrs2(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *info) nogil:
+    _fortran_ssytrs2(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stbcon "F_FUNC(stbcon,STBCON)"(char *norm, char *uplo, char *diag, int *n, int *kd, s *ab, int *ldab, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void stbcon(char *norm, char *uplo, char *diag, int *n, int *kd, s *ab, int *ldab, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_stbcon(norm, uplo, diag, n, kd, ab, ldab, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stbrfs "F_FUNC(stbrfs,STBRFS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void stbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_stbrfs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stbtrs "F_FUNC(stbtrs,STBTRS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil
+cdef void stbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil:
+    _fortran_stbtrs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stfsm "F_FUNC(stfsm,STFSM)"(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, s *alpha, s *a, s *b, int *ldb) nogil
+cdef void stfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, s *alpha, s *a, s *b, int *ldb) nogil:
+    _fortran_stfsm(transr, side, uplo, trans, diag, m, n, alpha, a, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stftri "F_FUNC(stftri,STFTRI)"(char *transr, char *uplo, char *diag, int *n, s *a, int *info) nogil
+cdef void stftri(char *transr, char *uplo, char *diag, int *n, s *a, int *info) nogil:
+    _fortran_stftri(transr, uplo, diag, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stfttp "F_FUNC(stfttp,STFTTP)"(char *transr, char *uplo, int *n, s *arf, s *ap, int *info) nogil
+cdef void stfttp(char *transr, char *uplo, int *n, s *arf, s *ap, int *info) nogil:
+    _fortran_stfttp(transr, uplo, n, arf, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stfttr "F_FUNC(stfttr,STFTTR)"(char *transr, char *uplo, int *n, s *arf, s *a, int *lda, int *info) nogil
+cdef void stfttr(char *transr, char *uplo, int *n, s *arf, s *a, int *lda, int *info) nogil:
+    _fortran_stfttr(transr, uplo, n, arf, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgevc "F_FUNC(stgevc,STGEVC)"(char *side, char *howmny, bint *select, int *n, s *s, int *lds, s *p, int *ldp, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil
+cdef void stgevc(char *side, char *howmny, bint *select, int *n, s *s, int *lds, s *p, int *ldp, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil:
+    _fortran_stgevc(side, howmny, select, n, s, lds, p, ldp, vl, ldvl, vr, ldvr, mm, m, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgex2 "F_FUNC(stgex2,STGEX2)"(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *j1, int *n1, int *n2, s *work, int *lwork, int *info) nogil
+cdef void stgex2(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *j1, int *n1, int *n2, s *work, int *lwork, int *info) nogil:
+    _fortran_stgex2(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, j1, n1, n2, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgexc "F_FUNC(stgexc,STGEXC)"(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *ifst, int *ilst, s *work, int *lwork, int *info) nogil
+cdef void stgexc(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *ifst, int *ilst, s *work, int *lwork, int *info) nogil:
+    _fortran_stgexc(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, ifst, ilst, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgsen "F_FUNC(stgsen,STGSEN)"(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, int *m, s *pl, s *pr, s *dif, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void stgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, int *m, s *pl, s *pr, s *dif, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_stgsen(ijob, wantq, wantz, select, n, a, lda, b, ldb, alphar, alphai, beta, q, ldq, z, ldz, m, pl, pr, dif, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgsja "F_FUNC(stgsja,STGSJA)"(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, s *a, int *lda, s *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, s *u, int *ldu, s *v, int *ldv, s *q, int *ldq, s *work, int *ncycle, int *info) nogil
+cdef void stgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, s *a, int *lda, s *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, s *u, int *ldu, s *v, int *ldv, s *q, int *ldq, s *work, int *ncycle, int *info) nogil:
+    _fortran_stgsja(jobu, jobv, jobq, m, p, n, k, l, a, lda, b, ldb, tola, tolb, alpha, beta, u, ldu, v, ldv, q, ldq, work, ncycle, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgsna "F_FUNC(stgsna,STGSNA)"(char *job, char *howmny, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *dif, int *mm, int *m, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void stgsna(char *job, char *howmny, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *dif, int *mm, int *m, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_stgsna(job, howmny, select, n, a, lda, b, ldb, vl, ldvl, vr, ldvr, s, dif, mm, m, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgsy2 "F_FUNC(stgsy2,STGSY2)"(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *iwork, int *pq, int *info) nogil
+cdef void stgsy2(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *iwork, int *pq, int *info) nogil:
+    _fortran_stgsy2(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, rdsum, rdscal, iwork, pq, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stgsyl "F_FUNC(stgsyl,STGSYL)"(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *dif, s *work, int *lwork, int *iwork, int *info) nogil
+cdef void stgsyl(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *dif, s *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_stgsyl(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, dif, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stpcon "F_FUNC(stpcon,STPCON)"(char *norm, char *uplo, char *diag, int *n, s *ap, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void stpcon(char *norm, char *uplo, char *diag, int *n, s *ap, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_stpcon(norm, uplo, diag, n, ap, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stpmqrt "F_FUNC(stpmqrt,STPMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *info) nogil
+cdef void stpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *info) nogil:
+    _fortran_stpmqrt(side, trans, m, n, k, l, nb, v, ldv, t, ldt, a, lda, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stpqrt "F_FUNC(stpqrt,STPQRT)"(int *m, int *n, int *l, int *nb, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, s *work, int *info) nogil
+cdef void stpqrt(int *m, int *n, int *l, int *nb, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, s *work, int *info) nogil:
+    _fortran_stpqrt(m, n, l, nb, a, lda, b, ldb, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stpqrt2 "F_FUNC(stpqrt2,STPQRT2)"(int *m, int *n, int *l, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, int *info) nogil
+cdef void stpqrt2(int *m, int *n, int *l, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, int *info) nogil:
+    _fortran_stpqrt2(m, n, l, a, lda, b, ldb, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stprfb "F_FUNC(stprfb,STPRFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *ldwork) nogil
+cdef void stprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *ldwork) nogil:
+    _fortran_stprfb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, a, lda, b, ldb, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stprfs "F_FUNC(stprfs,STPRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void stprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_stprfs(uplo, trans, diag, n, nrhs, ap, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stptri "F_FUNC(stptri,STPTRI)"(char *uplo, char *diag, int *n, s *ap, int *info) nogil
+cdef void stptri(char *uplo, char *diag, int *n, s *ap, int *info) nogil:
+    _fortran_stptri(uplo, diag, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stptrs "F_FUNC(stptrs,STPTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil
+cdef void stptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil:
+    _fortran_stptrs(uplo, trans, diag, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stpttf "F_FUNC(stpttf,STPTTF)"(char *transr, char *uplo, int *n, s *ap, s *arf, int *info) nogil
+cdef void stpttf(char *transr, char *uplo, int *n, s *ap, s *arf, int *info) nogil:
+    _fortran_stpttf(transr, uplo, n, ap, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stpttr "F_FUNC(stpttr,STPTTR)"(char *uplo, int *n, s *ap, s *a, int *lda, int *info) nogil
+cdef void stpttr(char *uplo, int *n, s *ap, s *a, int *lda, int *info) nogil:
+    _fortran_stpttr(uplo, n, ap, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strcon "F_FUNC(strcon,STRCON)"(char *norm, char *uplo, char *diag, int *n, s *a, int *lda, s *rcond, s *work, int *iwork, int *info) nogil
+cdef void strcon(char *norm, char *uplo, char *diag, int *n, s *a, int *lda, s *rcond, s *work, int *iwork, int *info) nogil:
+    _fortran_strcon(norm, uplo, diag, n, a, lda, rcond, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strevc "F_FUNC(strevc,STREVC)"(char *side, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil
+cdef void strevc(char *side, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil:
+    _fortran_strevc(side, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, mm, m, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strexc "F_FUNC(strexc,STREXC)"(char *compq, int *n, s *t, int *ldt, s *q, int *ldq, int *ifst, int *ilst, s *work, int *info) nogil
+cdef void strexc(char *compq, int *n, s *t, int *ldt, s *q, int *ldq, int *ifst, int *ilst, s *work, int *info) nogil:
+    _fortran_strexc(compq, n, t, ldt, q, ldq, ifst, ilst, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strrfs "F_FUNC(strrfs,STRRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil
+cdef void strrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil:
+    _fortran_strrfs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, x, ldx, ferr, berr, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strsen "F_FUNC(strsen,STRSEN)"(char *job, char *compq, bint *select, int *n, s *t, int *ldt, s *q, int *ldq, s *wr, s *wi, int *m, s *s, s *sep, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void strsen(char *job, char *compq, bint *select, int *n, s *t, int *ldt, s *q, int *ldq, s *wr, s *wi, int *m, s *s, s *sep, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_strsen(job, compq, select, n, t, ldt, q, ldq, wr, wi, m, s, sep, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strsna "F_FUNC(strsna,STRSNA)"(char *job, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *sep, int *mm, int *m, s *work, int *ldwork, int *iwork, int *info) nogil
+cdef void strsna(char *job, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *sep, int *mm, int *m, s *work, int *ldwork, int *iwork, int *info) nogil:
+    _fortran_strsna(job, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, s, sep, mm, m, work, ldwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strsyl "F_FUNC(strsyl,STRSYL)"(char *trana, char *tranb, int *isgn, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *scale, int *info) nogil
+cdef void strsyl(char *trana, char *tranb, int *isgn, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *scale, int *info) nogil:
+    _fortran_strsyl(trana, tranb, isgn, m, n, a, lda, b, ldb, c, ldc, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strti2 "F_FUNC(strti2,STRTI2)"(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil
+cdef void strti2(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_strti2(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strtri "F_FUNC(strtri,STRTRI)"(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil
+cdef void strtri(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil:
+    _fortran_strtri(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strtrs "F_FUNC(strtrs,STRTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil
+cdef void strtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil:
+    _fortran_strtrs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strttf "F_FUNC(strttf,STRTTF)"(char *transr, char *uplo, int *n, s *a, int *lda, s *arf, int *info) nogil
+cdef void strttf(char *transr, char *uplo, int *n, s *a, int *lda, s *arf, int *info) nogil:
+    _fortran_strttf(transr, uplo, n, a, lda, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_strttp "F_FUNC(strttp,STRTTP)"(char *uplo, int *n, s *a, int *lda, s *ap, int *info) nogil
+cdef void strttp(char *uplo, int *n, s *a, int *lda, s *ap, int *info) nogil:
+    _fortran_strttp(uplo, n, a, lda, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_stzrzf "F_FUNC(stzrzf,STZRZF)"(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil
+cdef void stzrzf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil:
+    _fortran_stzrzf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_xerbla_array "F_FUNC(xerbla_array,XERBLA_ARRAY)"(char *srname_array, int *srname_len, int *info) nogil
+cdef void xerbla_array(char *srname_array, int *srname_len, int *info) nogil:
+    _fortran_xerbla_array(srname_array, srname_len, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zbbcsd "F_FUNC(zbbcsd,ZBBCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, npy_complex128 *u1, int *ldu1, npy_complex128 *u2, int *ldu2, npy_complex128 *v1t, int *ldv1t, npy_complex128 *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *rwork, int *lrwork, int *info) nogil
+cdef void zbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *rwork, int *lrwork, int *info) nogil:
+    _fortran_zbbcsd(jobu1, jobu2, jobv1t, jobv2t, trans, m, p, q, theta, phi, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, b11d, b11e, b12d, b12e, b21d, b21e, b22d, b22e, rwork, lrwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zbdsqr "F_FUNC(zbdsqr,ZBDSQR)"(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, npy_complex128 *vt, int *ldvt, npy_complex128 *u, int *ldu, npy_complex128 *c, int *ldc, d *rwork, int *info) nogil
+cdef void zbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, z *vt, int *ldvt, z *u, int *ldu, z *c, int *ldc, d *rwork, int *info) nogil:
+    _fortran_zbdsqr(uplo, n, ncvt, nru, ncc, d, e, vt, ldvt, u, ldu, c, ldc, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zcgesv "F_FUNC(zcgesv,ZCGESV)"(int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, npy_complex128 *work, npy_complex64 *swork, d *rwork, int *iter, int *info) nogil
+cdef void zcgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil:
+    _fortran_zcgesv(n, nrhs, a, lda, ipiv, b, ldb, x, ldx, work, swork, rwork, iter, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zcposv "F_FUNC(zcposv,ZCPOSV)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, npy_complex128 *work, npy_complex64 *swork, d *rwork, int *iter, int *info) nogil
+cdef void zcposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil:
+    _fortran_zcposv(uplo, n, nrhs, a, lda, b, ldb, x, ldx, work, swork, rwork, iter, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zdrscl "F_FUNC(zdrscl,ZDRSCL)"(int *n, d *sa, npy_complex128 *sx, int *incx) nogil
+cdef void zdrscl(int *n, d *sa, z *sx, int *incx) nogil:
+    _fortran_zdrscl(n, sa, sx, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbbrd "F_FUNC(zgbbrd,ZGBBRD)"(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, npy_complex128 *ab, int *ldab, d *d, d *e, npy_complex128 *q, int *ldq, npy_complex128 *pt, int *ldpt, npy_complex128 *c, int *ldc, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *pt, int *ldpt, z *c, int *ldc, z *work, d *rwork, int *info) nogil:
+    _fortran_zgbbrd(vect, m, n, ncc, kl, ku, ab, ldab, d, e, q, ldq, pt, ldpt, c, ldc, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbcon "F_FUNC(zgbcon,ZGBCON)"(char *norm, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, d *anorm, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgbcon(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_zgbcon(norm, n, kl, ku, ab, ldab, ipiv, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbequ "F_FUNC(zgbequ,ZGBEQU)"(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void zgbequ(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_zgbequ(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbequb "F_FUNC(zgbequb,ZGBEQUB)"(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void zgbequb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_zgbequb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbrfs "F_FUNC(zgbrfs,ZGBRFS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zgbrfs(trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbsv "F_FUNC(zgbsv,ZGBSV)"(int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zgbsv(int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zgbsv(n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbsvx "F_FUNC(zgbsvx,ZGBSVX)"(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zgbsvx(fact, trans, n, kl, ku, nrhs, ab, ldab, afb, ldafb, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbtf2 "F_FUNC(zgbtf2,ZGBTF2)"(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void zgbtf2(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_zgbtf2(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbtrf "F_FUNC(zgbtrf,ZGBTRF)"(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, int *info) nogil
+cdef void zgbtrf(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil:
+    _fortran_zgbtrf(m, n, kl, ku, ab, ldab, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgbtrs "F_FUNC(zgbtrs,ZGBTRS)"(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zgbtrs(trans, n, kl, ku, nrhs, ab, ldab, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgebak "F_FUNC(zgebak,ZGEBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, npy_complex128 *v, int *ldv, int *info) nogil
+cdef void zgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, z *v, int *ldv, int *info) nogil:
+    _fortran_zgebak(job, side, n, ilo, ihi, scale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgebal "F_FUNC(zgebal,ZGEBAL)"(char *job, int *n, npy_complex128 *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil
+cdef void zgebal(char *job, int *n, z *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil:
+    _fortran_zgebal(job, n, a, lda, ilo, ihi, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgebd2 "F_FUNC(zgebd2,ZGEBD2)"(int *m, int *n, npy_complex128 *a, int *lda, d *d, d *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *work, int *info) nogil
+cdef void zgebd2(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *info) nogil:
+    _fortran_zgebd2(m, n, a, lda, d, e, tauq, taup, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgebrd "F_FUNC(zgebrd,ZGEBRD)"(int *m, int *n, npy_complex128 *a, int *lda, d *d, d *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgebrd(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *lwork, int *info) nogil:
+    _fortran_zgebrd(m, n, a, lda, d, e, tauq, taup, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgecon "F_FUNC(zgecon,ZGECON)"(char *norm, int *n, npy_complex128 *a, int *lda, d *anorm, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgecon(char *norm, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_zgecon(norm, n, a, lda, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeequ "F_FUNC(zgeequ,ZGEEQU)"(int *m, int *n, npy_complex128 *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void zgeequ(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_zgeequ(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeequb "F_FUNC(zgeequb,ZGEEQUB)"(int *m, int *n, npy_complex128 *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil
+cdef void zgeequb(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil:
+    _fortran_zgeequb(m, n, a, lda, r, c, rowcnd, colcnd, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgees "F_FUNC(zgees,ZGEES)"(char *jobvs, char *sort, _zselect1 *select, int *n, npy_complex128 *a, int *lda, int *sdim, npy_complex128 *w, npy_complex128 *vs, int *ldvs, npy_complex128 *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+cdef void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil:
+    _fortran_zgees(jobvs, sort, <_zselect1*>select, n, a, lda, sdim, w, vs, ldvs, work, lwork, rwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeesx "F_FUNC(zgeesx,ZGEESX)"(char *jobvs, char *sort, _zselect1 *select, char *sense, int *n, npy_complex128 *a, int *lda, int *sdim, npy_complex128 *w, npy_complex128 *vs, int *ldvs, d *rconde, d *rcondv, npy_complex128 *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+cdef void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil:
+    _fortran_zgeesx(jobvs, sort, <_zselect1*>select, sense, n, a, lda, sdim, w, vs, ldvs, rconde, rcondv, work, lwork, rwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeev "F_FUNC(zgeev,ZGEEV)"(char *jobvl, char *jobvr, int *n, npy_complex128 *a, int *lda, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zgeev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zgeev(jobvl, jobvr, n, a, lda, w, vl, ldvl, vr, ldvr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeevx "F_FUNC(zgeevx,ZGEEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zgeevx(balanc, jobvl, jobvr, sense, n, a, lda, w, vl, ldvl, vr, ldvr, ilo, ihi, scale, abnrm, rconde, rcondv, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgehd2 "F_FUNC(zgehd2,ZGEHD2)"(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zgehd2(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zgehd2(n, ilo, ihi, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgehrd "F_FUNC(zgehrd,ZGEHRD)"(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgehrd(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zgehrd(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgelq2 "F_FUNC(zgelq2,ZGELQ2)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zgelq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zgelq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgelqf "F_FUNC(zgelqf,ZGELQF)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgelqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zgelqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgels "F_FUNC(zgels,ZGELS)"(char *trans, int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgels(char *trans, int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *work, int *lwork, int *info) nogil:
+    _fortran_zgels(trans, m, n, nrhs, a, lda, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgelsd "F_FUNC(zgelsd,ZGELSD)"(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, d *s, d *rcond, int *rank, npy_complex128 *work, int *lwork, d *rwork, int *iwork, int *info) nogil
+cdef void zgelsd(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil:
+    _fortran_zgelsd(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgelss "F_FUNC(zgelss,ZGELSS)"(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, d *s, d *rcond, int *rank, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zgelss(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zgelss(m, n, nrhs, a, lda, b, ldb, s, rcond, rank, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgelsy "F_FUNC(zgelsy,ZGELSY)"(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *jpvt, d *rcond, int *rank, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zgelsy(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *jpvt, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zgelsy(m, n, nrhs, a, lda, b, ldb, jpvt, rcond, rank, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgemqrt "F_FUNC(zgemqrt,ZGEMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *nb, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info) nogil
+cdef void zgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *info) nogil:
+    _fortran_zgemqrt(side, trans, m, n, k, nb, v, ldv, t, ldt, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeql2 "F_FUNC(zgeql2,ZGEQL2)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zgeql2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zgeql2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqlf "F_FUNC(zgeqlf,ZGEQLF)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgeqlf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zgeqlf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqp3 "F_FUNC(zgeqp3,ZGEQP3)"(int *m, int *n, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zgeqp3(int *m, int *n, z *a, int *lda, int *jpvt, z *tau, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zgeqp3(m, n, a, lda, jpvt, tau, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqr2 "F_FUNC(zgeqr2,ZGEQR2)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zgeqr2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zgeqr2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqr2p "F_FUNC(zgeqr2p,ZGEQR2P)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zgeqr2p(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zgeqr2p(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqrf "F_FUNC(zgeqrf,ZGEQRF)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgeqrf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zgeqrf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqrfp "F_FUNC(zgeqrfp,ZGEQRFP)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgeqrfp(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zgeqrfp(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqrt "F_FUNC(zgeqrt,ZGEQRT)"(int *m, int *n, int *nb, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, npy_complex128 *work, int *info) nogil
+cdef void zgeqrt(int *m, int *n, int *nb, z *a, int *lda, z *t, int *ldt, z *work, int *info) nogil:
+    _fortran_zgeqrt(m, n, nb, a, lda, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqrt2 "F_FUNC(zgeqrt2,ZGEQRT2)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, int *info) nogil
+cdef void zgeqrt2(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil:
+    _fortran_zgeqrt2(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgeqrt3 "F_FUNC(zgeqrt3,ZGEQRT3)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, int *info) nogil
+cdef void zgeqrt3(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil:
+    _fortran_zgeqrt3(m, n, a, lda, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgerfs "F_FUNC(zgerfs,ZGERFS)"(char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgerfs(char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zgerfs(trans, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgerq2 "F_FUNC(zgerq2,ZGERQ2)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zgerq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zgerq2(m, n, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgerqf "F_FUNC(zgerqf,ZGERQF)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgerqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zgerqf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgesc2 "F_FUNC(zgesc2,ZGESC2)"(int *n, npy_complex128 *a, int *lda, npy_complex128 *rhs, int *ipiv, int *jpiv, d *scale) nogil
+cdef void zgesc2(int *n, z *a, int *lda, z *rhs, int *ipiv, int *jpiv, d *scale) nogil:
+    _fortran_zgesc2(n, a, lda, rhs, ipiv, jpiv, scale)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgesdd "F_FUNC(zgesdd,ZGESDD)"(char *jobz, int *m, int *n, npy_complex128 *a, int *lda, d *s, npy_complex128 *u, int *ldu, npy_complex128 *vt, int *ldvt, npy_complex128 *work, int *lwork, d *rwork, int *iwork, int *info) nogil
+cdef void zgesdd(char *jobz, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil:
+    _fortran_zgesdd(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgesv "F_FUNC(zgesv,ZGESV)"(int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zgesv(n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgesvd "F_FUNC(zgesvd,ZGESVD)"(char *jobu, char *jobvt, int *m, int *n, npy_complex128 *a, int *lda, d *s, npy_complex128 *u, int *ldu, npy_complex128 *vt, int *ldvt, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zgesvd(char *jobu, char *jobvt, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zgesvd(jobu, jobvt, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgesvx "F_FUNC(zgesvx,ZGESVX)"(char *fact, char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgesvx(char *fact, char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zgesvx(fact, trans, n, nrhs, a, lda, af, ldaf, ipiv, equed, r, c, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgetc2 "F_FUNC(zgetc2,ZGETC2)"(int *n, npy_complex128 *a, int *lda, int *ipiv, int *jpiv, int *info) nogil
+cdef void zgetc2(int *n, z *a, int *lda, int *ipiv, int *jpiv, int *info) nogil:
+    _fortran_zgetc2(n, a, lda, ipiv, jpiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgetf2 "F_FUNC(zgetf2,ZGETF2)"(int *m, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info) nogil
+cdef void zgetf2(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_zgetf2(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgetrf "F_FUNC(zgetrf,ZGETRF)"(int *m, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info) nogil
+cdef void zgetrf(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_zgetrf(m, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgetri "F_FUNC(zgetri,ZGETRI)"(int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgetri(int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil:
+    _fortran_zgetri(n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgetrs "F_FUNC(zgetrs,ZGETRS)"(char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zgetrs(char *trans, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zgetrs(trans, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggbak "F_FUNC(zggbak,ZGGBAK)"(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, npy_complex128 *v, int *ldv, int *info) nogil
+cdef void zggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, z *v, int *ldv, int *info) nogil:
+    _fortran_zggbak(job, side, n, ilo, ihi, lscale, rscale, m, v, ldv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggbal "F_FUNC(zggbal,ZGGBAL)"(char *job, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil
+cdef void zggbal(char *job, int *n, z *a, int *lda, z *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil:
+    _fortran_zggbal(job, n, a, lda, b, ldb, ilo, ihi, lscale, rscale, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgges "F_FUNC(zgges,ZGGES)"(char *jobvsl, char *jobvsr, char *sort, _zselect2 *selctg, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *sdim, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vsl, int *ldvsl, npy_complex128 *vsr, int *ldvsr, npy_complex128 *work, int *lwork, d *rwork, bint *bwork, int *info) nogil
+cdef void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil:
+    _fortran_zgges(jobvsl, jobvsr, sort, <_zselect2*>selctg, n, a, lda, b, ldb, sdim, alpha, beta, vsl, ldvsl, vsr, ldvsr, work, lwork, rwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggesx "F_FUNC(zggesx,ZGGESX)"(char *jobvsl, char *jobvsr, char *sort, _zselect2 *selctg, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *sdim, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vsl, int *ldvsl, npy_complex128 *vsr, int *ldvsr, d *rconde, d *rcondv, npy_complex128 *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil
+cdef void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil:
+    _fortran_zggesx(jobvsl, jobvsr, sort, <_zselect2*>selctg, sense, n, a, lda, b, ldb, sdim, alpha, beta, vsl, ldvsl, vsr, ldvsr, rconde, rcondv, work, lwork, rwork, iwork, liwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggev "F_FUNC(zggev,ZGGEV)"(char *jobvl, char *jobvr, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zggev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zggev(jobvl, jobvr, n, a, lda, b, ldb, alpha, beta, vl, ldvl, vr, ldvr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggevx "F_FUNC(zggevx,ZGGEVX)"(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, npy_complex128 *work, int *lwork, d *rwork, int *iwork, bint *bwork, int *info) nogil
+cdef void zggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, bint *bwork, int *info) nogil:
+    _fortran_zggevx(balanc, jobvl, jobvr, sense, n, a, lda, b, ldb, alpha, beta, vl, ldvl, vr, ldvr, ilo, ihi, lscale, rscale, abnrm, bbnrm, rconde, rcondv, work, lwork, rwork, iwork, bwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggglm "F_FUNC(zggglm,ZGGGLM)"(int *n, int *m, int *p, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *d, npy_complex128 *x, npy_complex128 *y, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zggglm(int *n, int *m, int *p, z *a, int *lda, z *b, int *ldb, z *d, z *x, z *y, z *work, int *lwork, int *info) nogil:
+    _fortran_zggglm(n, m, p, a, lda, b, ldb, d, x, y, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgghrd "F_FUNC(zgghrd,ZGGHRD)"(char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *info) nogil
+cdef void zgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *info) nogil:
+    _fortran_zgghrd(compq, compz, n, ilo, ihi, a, lda, b, ldb, q, ldq, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgglse "F_FUNC(zgglse,ZGGLSE)"(int *m, int *n, int *p, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, npy_complex128 *d, npy_complex128 *x, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zgglse(int *m, int *n, int *p, z *a, int *lda, z *b, int *ldb, z *c, z *d, z *x, z *work, int *lwork, int *info) nogil:
+    _fortran_zgglse(m, n, p, a, lda, b, ldb, c, d, x, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggqrf "F_FUNC(zggqrf,ZGGQRF)"(int *n, int *m, int *p, npy_complex128 *a, int *lda, npy_complex128 *taua, npy_complex128 *b, int *ldb, npy_complex128 *taub, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zggqrf(int *n, int *m, int *p, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil:
+    _fortran_zggqrf(n, m, p, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zggrqf "F_FUNC(zggrqf,ZGGRQF)"(int *m, int *p, int *n, npy_complex128 *a, int *lda, npy_complex128 *taua, npy_complex128 *b, int *ldb, npy_complex128 *taub, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zggrqf(int *m, int *p, int *n, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil:
+    _fortran_zggrqf(m, p, n, a, lda, taua, b, ldb, taub, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgtcon "F_FUNC(zgtcon,ZGTCON)"(char *norm, int *n, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, d *anorm, d *rcond, npy_complex128 *work, int *info) nogil
+cdef void zgtcon(char *norm, int *n, z *dl, z *d, z *du, z *du2, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil:
+    _fortran_zgtcon(norm, n, dl, d, du, du2, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgtrfs "F_FUNC(zgtrfs,ZGTRFS)"(char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *dlf, npy_complex128 *df, npy_complex128 *duf, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgtrfs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zgtrfs(trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgtsv "F_FUNC(zgtsv,ZGTSV)"(int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zgtsv(int *n, int *nrhs, z *dl, z *d, z *du, z *b, int *ldb, int *info) nogil:
+    _fortran_zgtsv(n, nrhs, dl, d, du, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgtsvx "F_FUNC(zgtsvx,ZGTSVX)"(char *fact, char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *dlf, npy_complex128 *df, npy_complex128 *duf, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zgtsvx(char *fact, char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zgtsvx(fact, trans, n, nrhs, dl, d, du, dlf, df, duf, du2, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgttrf "F_FUNC(zgttrf,ZGTTRF)"(int *n, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, int *info) nogil
+cdef void zgttrf(int *n, z *dl, z *d, z *du, z *du2, int *ipiv, int *info) nogil:
+    _fortran_zgttrf(n, dl, d, du, du2, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgttrs "F_FUNC(zgttrs,ZGTTRS)"(char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zgttrs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zgttrs(trans, n, nrhs, dl, d, du, du2, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zgtts2 "F_FUNC(zgtts2,ZGTTS2)"(int *itrans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb) nogil
+cdef void zgtts2(int *itrans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb) nogil:
+    _fortran_zgtts2(itrans, n, nrhs, dl, d, du, du2, ipiv, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbev "F_FUNC(zhbev,ZHBEV)"(char *jobz, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zhbev(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil:
+    _fortran_zhbev(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbevd "F_FUNC(zhbevd,ZHBEVD)"(char *jobz, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zhbevd(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zhbevd(jobz, uplo, n, kd, ab, ldab, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbevx "F_FUNC(zhbevx,ZHBEVX)"(char *jobz, char *range, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, npy_complex128 *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void zhbevx(char *jobz, char *range, char *uplo, int *n, int *kd, z *ab, int *ldab, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_zhbevx(jobz, range, uplo, n, kd, ab, ldab, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbgst "F_FUNC(zhbgst,ZHBGST)"(char *vect, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, npy_complex128 *x, int *ldx, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zhbgst(char *vect, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *x, int *ldx, z *work, d *rwork, int *info) nogil:
+    _fortran_zhbgst(vect, uplo, n, ka, kb, ab, ldab, bb, ldbb, x, ldx, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbgv "F_FUNC(zhbgv,ZHBGV)"(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zhbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil:
+    _fortran_zhbgv(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbgvd "F_FUNC(zhbgvd,ZHBGVD)"(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zhbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zhbgvd(jobz, uplo, n, ka, kb, ab, ldab, bb, ldbb, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbgvx "F_FUNC(zhbgvx,ZHBGVX)"(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, npy_complex128 *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void zhbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_zhbgvx(jobz, range, uplo, n, ka, kb, ab, ldab, bb, ldbb, q, ldq, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhbtrd "F_FUNC(zhbtrd,ZHBTRD)"(char *vect, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, d *d, d *e, npy_complex128 *q, int *ldq, npy_complex128 *work, int *info) nogil
+cdef void zhbtrd(char *vect, char *uplo, int *n, int *kd, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *work, int *info) nogil:
+    _fortran_zhbtrd(vect, uplo, n, kd, ab, ldab, d, e, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhecon "F_FUNC(zhecon,ZHECON)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, d *anorm, d *rcond, npy_complex128 *work, int *info) nogil
+cdef void zhecon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil:
+    _fortran_zhecon(uplo, n, a, lda, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zheequb "F_FUNC(zheequb,ZHEEQUB)"(char *uplo, int *n, npy_complex128 *a, int *lda, d *s, d *scond, d *amax, npy_complex128 *work, int *info) nogil
+cdef void zheequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil:
+    _fortran_zheequb(uplo, n, a, lda, s, scond, amax, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zheev "F_FUNC(zheev,ZHEEV)"(char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, d *w, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zheev(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zheev(jobz, uplo, n, a, lda, w, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zheevd "F_FUNC(zheevd,ZHEEVD)"(char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, d *w, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zheevd(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zheevd(jobz, uplo, n, a, lda, w, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zheevr "F_FUNC(zheevr,ZHEEVR)"(char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, int *isuppz, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zheevr(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zheevr(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zheevx "F_FUNC(zheevx,ZHEEVX)"(char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void zheevx(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_zheevx(jobz, range, uplo, n, a, lda, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhegs2 "F_FUNC(zhegs2,ZHEGS2)"(int *itype, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zhegs2(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil:
+    _fortran_zhegs2(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhegst "F_FUNC(zhegst,ZHEGST)"(int *itype, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zhegst(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil:
+    _fortran_zhegst(itype, uplo, n, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhegv "F_FUNC(zhegv,ZHEGV)"(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, d *w, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zhegv(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zhegv(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhegvd "F_FUNC(zhegvd,ZHEGVD)"(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, d *w, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zhegvd(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zhegvd(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhegvx "F_FUNC(zhegvx,ZHEGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void zhegvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_zhegvx(itype, jobz, range, uplo, n, a, lda, b, ldb, vl, vu, il, iu, abstol, m, w, z, ldz, work, lwork, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zherfs "F_FUNC(zherfs,ZHERFS)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zherfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zherfs(uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhesv "F_FUNC(zhesv,ZHESV)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zhesv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil:
+    _fortran_zhesv(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhesvx "F_FUNC(zhesvx,ZHESVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zhesvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zhesvx(fact, uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zheswapr "F_FUNC(zheswapr,ZHESWAPR)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *i1, int *i2) nogil
+cdef void zheswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil:
+    _fortran_zheswapr(uplo, n, a, lda, i1, i2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetd2 "F_FUNC(zhetd2,ZHETD2)"(char *uplo, int *n, npy_complex128 *a, int *lda, d *d, d *e, npy_complex128 *tau, int *info) nogil
+cdef void zhetd2(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, int *info) nogil:
+    _fortran_zhetd2(uplo, n, a, lda, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetf2 "F_FUNC(zhetf2,ZHETF2)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info) nogil
+cdef void zhetf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_zhetf2(uplo, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetrd "F_FUNC(zhetrd,ZHETRD)"(char *uplo, int *n, npy_complex128 *a, int *lda, d *d, d *e, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zhetrd(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zhetrd(uplo, n, a, lda, d, e, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetrf "F_FUNC(zhetrf,ZHETRF)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zhetrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil:
+    _fortran_zhetrf(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetri "F_FUNC(zhetri,ZHETRI)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info) nogil
+cdef void zhetri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil:
+    _fortran_zhetri(uplo, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetri2 "F_FUNC(zhetri2,ZHETRI2)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zhetri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil:
+    _fortran_zhetri2(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetri2x "F_FUNC(zhetri2x,ZHETRI2X)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *nb, int *info) nogil
+cdef void zhetri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil:
+    _fortran_zhetri2x(uplo, n, a, lda, ipiv, work, nb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetrs "F_FUNC(zhetrs,ZHETRS)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zhetrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zhetrs(uplo, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhetrs2 "F_FUNC(zhetrs2,ZHETRS2)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info) nogil
+cdef void zhetrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil:
+    _fortran_zhetrs2(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhfrk "F_FUNC(zhfrk,ZHFRK)"(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, npy_complex128 *a, int *lda, d *beta, npy_complex128 *c) nogil
+cdef void zhfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c) nogil:
+    _fortran_zhfrk(transr, uplo, trans, n, k, alpha, a, lda, beta, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhgeqz "F_FUNC(zhgeqz,ZHGEQZ)"(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *t, int *ldt, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *t, int *ldt, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zhgeqz(job, compq, compz, n, ilo, ihi, h, ldh, t, ldt, alpha, beta, q, ldq, z, ldz, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpcon "F_FUNC(zhpcon,ZHPCON)"(char *uplo, int *n, npy_complex128 *ap, int *ipiv, d *anorm, d *rcond, npy_complex128 *work, int *info) nogil
+cdef void zhpcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil:
+    _fortran_zhpcon(uplo, n, ap, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpev "F_FUNC(zhpev,ZHPEV)"(char *jobz, char *uplo, int *n, npy_complex128 *ap, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zhpev(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil:
+    _fortran_zhpev(jobz, uplo, n, ap, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpevd "F_FUNC(zhpevd,ZHPEVD)"(char *jobz, char *uplo, int *n, npy_complex128 *ap, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zhpevd(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zhpevd(jobz, uplo, n, ap, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpevx "F_FUNC(zhpevx,ZHPEVX)"(char *jobz, char *range, char *uplo, int *n, npy_complex128 *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void zhpevx(char *jobz, char *range, char *uplo, int *n, z *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_zhpevx(jobz, range, uplo, n, ap, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpgst "F_FUNC(zhpgst,ZHPGST)"(int *itype, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, int *info) nogil
+cdef void zhpgst(int *itype, char *uplo, int *n, z *ap, z *bp, int *info) nogil:
+    _fortran_zhpgst(itype, uplo, n, ap, bp, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpgv "F_FUNC(zhpgv,ZHPGV)"(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zhpgv(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil:
+    _fortran_zhpgv(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpgvd "F_FUNC(zhpgvd,ZHPGVD)"(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zhpgvd(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zhpgvd(itype, jobz, uplo, n, ap, bp, w, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpgvx "F_FUNC(zhpgvx,ZHPGVX)"(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, npy_complex128 *work, d *rwork, int *iwork, int *ifail, int *info) nogil
+cdef void zhpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *ap, z *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil:
+    _fortran_zhpgvx(itype, jobz, range, uplo, n, ap, bp, vl, vu, il, iu, abstol, m, w, z, ldz, work, rwork, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhprfs "F_FUNC(zhprfs,ZHPRFS)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zhprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zhprfs(uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpsv "F_FUNC(zhpsv,ZHPSV)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zhpsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zhpsv(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhpsvx "F_FUNC(zhpsvx,ZHPSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zhpsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zhpsvx(fact, uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhptrd "F_FUNC(zhptrd,ZHPTRD)"(char *uplo, int *n, npy_complex128 *ap, d *d, d *e, npy_complex128 *tau, int *info) nogil
+cdef void zhptrd(char *uplo, int *n, z *ap, d *d, d *e, z *tau, int *info) nogil:
+    _fortran_zhptrd(uplo, n, ap, d, e, tau, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhptrf "F_FUNC(zhptrf,ZHPTRF)"(char *uplo, int *n, npy_complex128 *ap, int *ipiv, int *info) nogil
+cdef void zhptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil:
+    _fortran_zhptrf(uplo, n, ap, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhptri "F_FUNC(zhptri,ZHPTRI)"(char *uplo, int *n, npy_complex128 *ap, int *ipiv, npy_complex128 *work, int *info) nogil
+cdef void zhptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil:
+    _fortran_zhptri(uplo, n, ap, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhptrs "F_FUNC(zhptrs,ZHPTRS)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zhptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zhptrs(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhsein "F_FUNC(zhsein,ZHSEIN)"(char *side, char *eigsrc, char *initv, bint *select, int *n, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, d *rwork, int *ifaill, int *ifailr, int *info) nogil
+cdef void zhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, z *h, int *ldh, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *ifaill, int *ifailr, int *info) nogil:
+    _fortran_zhsein(side, eigsrc, initv, select, n, h, ldh, w, vl, ldvl, vr, ldvr, mm, m, work, rwork, ifaill, ifailr, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zhseqr "F_FUNC(zhseqr,ZHSEQR)"(char *job, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, z *z, int *ldz, z *work, int *lwork, int *info) nogil:
+    _fortran_zhseqr(job, compz, n, ilo, ihi, h, ldh, w, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlabrd "F_FUNC(zlabrd,ZLABRD)"(int *m, int *n, int *nb, npy_complex128 *a, int *lda, d *d, d *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *x, int *ldx, npy_complex128 *y, int *ldy) nogil
+cdef void zlabrd(int *m, int *n, int *nb, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *x, int *ldx, z *y, int *ldy) nogil:
+    _fortran_zlabrd(m, n, nb, a, lda, d, e, tauq, taup, x, ldx, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlacgv "F_FUNC(zlacgv,ZLACGV)"(int *n, npy_complex128 *x, int *incx) nogil
+cdef void zlacgv(int *n, z *x, int *incx) nogil:
+    _fortran_zlacgv(n, x, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlacn2 "F_FUNC(zlacn2,ZLACN2)"(int *n, npy_complex128 *v, npy_complex128 *x, d *est, int *kase, int *isave) nogil
+cdef void zlacn2(int *n, z *v, z *x, d *est, int *kase, int *isave) nogil:
+    _fortran_zlacn2(n, v, x, est, kase, isave)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlacon "F_FUNC(zlacon,ZLACON)"(int *n, npy_complex128 *v, npy_complex128 *x, d *est, int *kase) nogil
+cdef void zlacon(int *n, z *v, z *x, d *est, int *kase) nogil:
+    _fortran_zlacon(n, v, x, est, kase)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlacp2 "F_FUNC(zlacp2,ZLACP2)"(char *uplo, int *m, int *n, d *a, int *lda, npy_complex128 *b, int *ldb) nogil
+cdef void zlacp2(char *uplo, int *m, int *n, d *a, int *lda, z *b, int *ldb) nogil:
+    _fortran_zlacp2(uplo, m, n, a, lda, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlacpy "F_FUNC(zlacpy,ZLACPY)"(char *uplo, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb) nogil
+cdef void zlacpy(char *uplo, int *m, int *n, z *a, int *lda, z *b, int *ldb) nogil:
+    _fortran_zlacpy(uplo, m, n, a, lda, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlacrm "F_FUNC(zlacrm,ZLACRM)"(int *m, int *n, npy_complex128 *a, int *lda, d *b, int *ldb, npy_complex128 *c, int *ldc, d *rwork) nogil
+cdef void zlacrm(int *m, int *n, z *a, int *lda, d *b, int *ldb, z *c, int *ldc, d *rwork) nogil:
+    _fortran_zlacrm(m, n, a, lda, b, ldb, c, ldc, rwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlacrt "F_FUNC(zlacrt,ZLACRT)"(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, npy_complex128 *c, npy_complex128 *s) nogil
+cdef void zlacrt(int *n, z *cx, int *incx, z *cy, int *incy, z *c, z *s) nogil:
+    _fortran_zlacrt(n, cx, incx, cy, incy, c, s)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaed0 "F_FUNC(zlaed0,ZLAED0)"(int *qsiz, int *n, d *d, d *e, npy_complex128 *q, int *ldq, npy_complex128 *qstore, int *ldqs, d *rwork, int *iwork, int *info) nogil
+cdef void zlaed0(int *qsiz, int *n, d *d, d *e, z *q, int *ldq, z *qstore, int *ldqs, d *rwork, int *iwork, int *info) nogil:
+    _fortran_zlaed0(qsiz, n, d, e, q, ldq, qstore, ldqs, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaed7 "F_FUNC(zlaed7,ZLAED7)"(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, npy_complex128 *q, int *ldq, d *rho, int *indxq, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, npy_complex128 *work, d *rwork, int *iwork, int *info) nogil
+cdef void zlaed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, z *q, int *ldq, d *rho, int *indxq, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, z *work, d *rwork, int *iwork, int *info) nogil:
+    _fortran_zlaed7(n, cutpnt, qsiz, tlvls, curlvl, curpbm, d, q, ldq, rho, indxq, qstore, qptr, prmptr, perm, givptr, givcol, givnum, work, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaed8 "F_FUNC(zlaed8,ZLAED8)"(int *k, int *n, int *qsiz, npy_complex128 *q, int *ldq, d *d, d *rho, int *cutpnt, d *z, d *dlamda, npy_complex128 *q2, int *ldq2, d *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, d *givnum, int *info) nogil
+cdef void zlaed8(int *k, int *n, int *qsiz, z *q, int *ldq, d *d, d *rho, int *cutpnt, d *z, d *dlamda, z *q2, int *ldq2, d *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, d *givnum, int *info) nogil:
+    _fortran_zlaed8(k, n, qsiz, q, ldq, d, rho, cutpnt, z, dlamda, q2, ldq2, w, indxp, indx, indxq, perm, givptr, givcol, givnum, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaein "F_FUNC(zlaein,ZLAEIN)"(bint *rightv, bint *noinit, int *n, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *v, npy_complex128 *b, int *ldb, d *rwork, d *eps3, d *smlnum, int *info) nogil
+cdef void zlaein(bint *rightv, bint *noinit, int *n, z *h, int *ldh, z *w, z *v, z *b, int *ldb, d *rwork, d *eps3, d *smlnum, int *info) nogil:
+    _fortran_zlaein(rightv, noinit, n, h, ldh, w, v, b, ldb, rwork, eps3, smlnum, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaesy "F_FUNC(zlaesy,ZLAESY)"(npy_complex128 *a, npy_complex128 *b, npy_complex128 *c, npy_complex128 *rt1, npy_complex128 *rt2, npy_complex128 *evscal, npy_complex128 *cs1, npy_complex128 *sn1) nogil
+cdef void zlaesy(z *a, z *b, z *c, z *rt1, z *rt2, z *evscal, z *cs1, z *sn1) nogil:
+    _fortran_zlaesy(a, b, c, rt1, rt2, evscal, cs1, sn1)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaev2 "F_FUNC(zlaev2,ZLAEV2)"(npy_complex128 *a, npy_complex128 *b, npy_complex128 *c, d *rt1, d *rt2, d *cs1, npy_complex128 *sn1) nogil
+cdef void zlaev2(z *a, z *b, z *c, d *rt1, d *rt2, d *cs1, z *sn1) nogil:
+    _fortran_zlaev2(a, b, c, rt1, rt2, cs1, sn1)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlag2c "F_FUNC(zlag2c,ZLAG2C)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex64 *sa, int *ldsa, int *info) nogil
+cdef void zlag2c(int *m, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil:
+    _fortran_zlag2c(m, n, a, lda, sa, ldsa, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlags2 "F_FUNC(zlags2,ZLAGS2)"(bint *upper, d *a1, npy_complex128 *a2, d *a3, d *b1, npy_complex128 *b2, d *b3, d *csu, npy_complex128 *snu, d *csv, npy_complex128 *snv, d *csq, npy_complex128 *snq) nogil
+cdef void zlags2(bint *upper, d *a1, z *a2, d *a3, d *b1, z *b2, d *b3, d *csu, z *snu, d *csv, z *snv, d *csq, z *snq) nogil:
+    _fortran_zlags2(upper, a1, a2, a3, b1, b2, b3, csu, snu, csv, snv, csq, snq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlagtm "F_FUNC(zlagtm,ZLAGTM)"(char *trans, int *n, int *nrhs, d *alpha, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *x, int *ldx, d *beta, npy_complex128 *b, int *ldb) nogil
+cdef void zlagtm(char *trans, int *n, int *nrhs, d *alpha, z *dl, z *d, z *du, z *x, int *ldx, d *beta, z *b, int *ldb) nogil:
+    _fortran_zlagtm(trans, n, nrhs, alpha, dl, d, du, x, ldx, beta, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlahef "F_FUNC(zlahef,ZLAHEF)"(char *uplo, int *n, int *nb, int *kb, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *w, int *ldw, int *info) nogil
+cdef void zlahef(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil:
+    _fortran_zlahef(uplo, n, nb, kb, a, lda, ipiv, w, ldw, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlahqr "F_FUNC(zlahqr,ZLAHQR)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *info) nogil
+cdef void zlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, int *info) nogil:
+    _fortran_zlahqr(wantt, wantz, n, ilo, ihi, h, ldh, w, iloz, ihiz, z, ldz, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlahr2 "F_FUNC(zlahr2,ZLAHR2)"(int *n, int *k, int *nb, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *t, int *ldt, npy_complex128 *y, int *ldy) nogil
+cdef void zlahr2(int *n, int *k, int *nb, z *a, int *lda, z *tau, z *t, int *ldt, z *y, int *ldy) nogil:
+    _fortran_zlahr2(n, k, nb, a, lda, tau, t, ldt, y, ldy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaic1 "F_FUNC(zlaic1,ZLAIC1)"(int *job, int *j, npy_complex128 *x, d *sest, npy_complex128 *w, npy_complex128 *gamma, d *sestpr, npy_complex128 *s, npy_complex128 *c) nogil
+cdef void zlaic1(int *job, int *j, z *x, d *sest, z *w, z *gamma, d *sestpr, z *s, z *c) nogil:
+    _fortran_zlaic1(job, j, x, sest, w, gamma, sestpr, s, c)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlals0 "F_FUNC(zlals0,ZLALS0)"(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, npy_complex128 *b, int *ldb, npy_complex128 *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *rwork, int *info) nogil
+cdef void zlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *rwork, int *info) nogil:
+    _fortran_zlals0(icompq, nl, nr, sqre, nrhs, b, ldb, bx, ldbx, perm, givptr, givcol, ldgcol, givnum, ldgnum, poles, difl, difr, z, k, c, s, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlalsa "F_FUNC(zlalsa,ZLALSA)"(int *icompq, int *smlsiz, int *n, int *nrhs, npy_complex128 *b, int *ldb, npy_complex128 *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *rwork, int *iwork, int *info) nogil
+cdef void zlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *rwork, int *iwork, int *info) nogil:
+    _fortran_zlalsa(icompq, smlsiz, n, nrhs, b, ldb, bx, ldbx, u, ldu, vt, k, difl, difr, z, poles, givptr, givcol, ldgcol, perm, givnum, c, s, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlalsd "F_FUNC(zlalsd,ZLALSD)"(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, npy_complex128 *b, int *ldb, d *rcond, int *rank, npy_complex128 *work, d *rwork, int *iwork, int *info) nogil
+cdef void zlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, z *b, int *ldb, d *rcond, int *rank, z *work, d *rwork, int *iwork, int *info) nogil:
+    _fortran_zlalsd(uplo, smlsiz, n, nrhs, d, e, b, ldb, rcond, rank, work, rwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlapll "F_FUNC(zlapll,ZLAPLL)"(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, d *ssmin) nogil
+cdef void zlapll(int *n, z *x, int *incx, z *y, int *incy, d *ssmin) nogil:
+    _fortran_zlapll(n, x, incx, y, incy, ssmin)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlapmr "F_FUNC(zlapmr,ZLAPMR)"(bint *forwrd, int *m, int *n, npy_complex128 *x, int *ldx, int *k) nogil
+cdef void zlapmr(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil:
+    _fortran_zlapmr(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlapmt "F_FUNC(zlapmt,ZLAPMT)"(bint *forwrd, int *m, int *n, npy_complex128 *x, int *ldx, int *k) nogil
+cdef void zlapmt(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil:
+    _fortran_zlapmt(forwrd, m, n, x, ldx, k)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqgb "F_FUNC(zlaqgb,ZLAQGB)"(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+cdef void zlaqgb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil:
+    _fortran_zlaqgb(m, n, kl, ku, ab, ldab, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqge "F_FUNC(zlaqge,ZLAQGE)"(int *m, int *n, npy_complex128 *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil
+cdef void zlaqge(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil:
+    _fortran_zlaqge(m, n, a, lda, r, c, rowcnd, colcnd, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqhb "F_FUNC(zlaqhb,ZLAQHB)"(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+cdef void zlaqhb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_zlaqhb(uplo, n, kd, ab, ldab, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqhe "F_FUNC(zlaqhe,ZLAQHE)"(char *uplo, int *n, npy_complex128 *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+cdef void zlaqhe(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_zlaqhe(uplo, n, a, lda, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqhp "F_FUNC(zlaqhp,ZLAQHP)"(char *uplo, int *n, npy_complex128 *ap, d *s, d *scond, d *amax, char *equed) nogil
+cdef void zlaqhp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_zlaqhp(uplo, n, ap, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqp2 "F_FUNC(zlaqp2,ZLAQP2)"(int *m, int *n, int *offset, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, d *vn1, d *vn2, npy_complex128 *work) nogil
+cdef void zlaqp2(int *m, int *n, int *offset, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *work) nogil:
+    _fortran_zlaqp2(m, n, offset, a, lda, jpvt, tau, vn1, vn2, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqps "F_FUNC(zlaqps,ZLAQPS)"(int *m, int *n, int *offset, int *nb, int *kb, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, d *vn1, d *vn2, npy_complex128 *auxv, npy_complex128 *f, int *ldf) nogil
+cdef void zlaqps(int *m, int *n, int *offset, int *nb, int *kb, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *auxv, z *f, int *ldf) nogil:
+    _fortran_zlaqps(m, n, offset, nb, kb, a, lda, jpvt, tau, vn1, vn2, auxv, f, ldf)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqr0 "F_FUNC(zlaqr0,ZLAQR0)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil:
+    _fortran_zlaqr0(wantt, wantz, n, ilo, ihi, h, ldh, w, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqr1 "F_FUNC(zlaqr1,ZLAQR1)"(int *n, npy_complex128 *h, int *ldh, npy_complex128 *s1, npy_complex128 *s2, npy_complex128 *v) nogil
+cdef void zlaqr1(int *n, z *h, int *ldh, z *s1, z *s2, z *v) nogil:
+    _fortran_zlaqr1(n, h, ldh, s1, s2, v)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqr2 "F_FUNC(zlaqr2,ZLAQR2)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *ns, int *nd, npy_complex128 *sh, npy_complex128 *v, int *ldv, int *nh, npy_complex128 *t, int *ldt, int *nv, npy_complex128 *wv, int *ldwv, npy_complex128 *work, int *lwork) nogil
+cdef void zlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil:
+    _fortran_zlaqr2(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sh, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqr3 "F_FUNC(zlaqr3,ZLAQR3)"(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *ns, int *nd, npy_complex128 *sh, npy_complex128 *v, int *ldv, int *nh, npy_complex128 *t, int *ldt, int *nv, npy_complex128 *wv, int *ldwv, npy_complex128 *work, int *lwork) nogil
+cdef void zlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil:
+    _fortran_zlaqr3(wantt, wantz, n, ktop, kbot, nw, h, ldh, iloz, ihiz, z, ldz, ns, nd, sh, v, ldv, nh, t, ldt, nv, wv, ldwv, work, lwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqr4 "F_FUNC(zlaqr4,ZLAQR4)"(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil:
+    _fortran_zlaqr4(wantt, wantz, n, ilo, ihi, h, ldh, w, iloz, ihiz, z, ldz, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqr5 "F_FUNC(zlaqr5,ZLAQR5)"(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, npy_complex128 *s, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *v, int *ldv, npy_complex128 *u, int *ldu, int *nv, npy_complex128 *wv, int *ldwv, int *nh, npy_complex128 *wh, int *ldwh) nogil
+cdef void zlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, z *s, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, z *v, int *ldv, z *u, int *ldu, int *nv, z *wv, int *ldwv, int *nh, z *wh, int *ldwh) nogil:
+    _fortran_zlaqr5(wantt, wantz, kacc22, n, ktop, kbot, nshfts, s, h, ldh, iloz, ihiz, z, ldz, v, ldv, u, ldu, nv, wv, ldwv, nh, wh, ldwh)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqsb "F_FUNC(zlaqsb,ZLAQSB)"(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil
+cdef void zlaqsb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_zlaqsb(uplo, n, kd, ab, ldab, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqsp "F_FUNC(zlaqsp,ZLAQSP)"(char *uplo, int *n, npy_complex128 *ap, d *s, d *scond, d *amax, char *equed) nogil
+cdef void zlaqsp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_zlaqsp(uplo, n, ap, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaqsy "F_FUNC(zlaqsy,ZLAQSY)"(char *uplo, int *n, npy_complex128 *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil
+cdef void zlaqsy(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil:
+    _fortran_zlaqsy(uplo, n, a, lda, s, scond, amax, equed)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlar1v "F_FUNC(zlar1v,ZLAR1V)"(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, npy_complex128 *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil
+cdef void zlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, z *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil:
+    _fortran_zlar1v(n, b1, bn, lambda_, d, l, ld, lld, pivmin, gaptol, z, wantnc, negcnt, ztz, mingma, r, isuppz, nrminv, resid, rqcorr, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlar2v "F_FUNC(zlar2v,ZLAR2V)"(int *n, npy_complex128 *x, npy_complex128 *y, npy_complex128 *z, int *incx, d *c, npy_complex128 *s, int *incc) nogil
+cdef void zlar2v(int *n, z *x, z *y, z *z, int *incx, d *c, z *s, int *incc) nogil:
+    _fortran_zlar2v(n, x, y, z, incx, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarcm "F_FUNC(zlarcm,ZLARCM)"(int *m, int *n, d *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, d *rwork) nogil
+cdef void zlarcm(int *m, int *n, d *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *rwork) nogil:
+    _fortran_zlarcm(m, n, a, lda, b, ldb, c, ldc, rwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarf "F_FUNC(zlarf,ZLARF)"(char *side, int *m, int *n, npy_complex128 *v, int *incv, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work) nogil
+cdef void zlarf(char *side, int *m, int *n, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil:
+    _fortran_zlarf(side, m, n, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarfb "F_FUNC(zlarfb,ZLARFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *ldwork) nogil
+cdef void zlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil:
+    _fortran_zlarfb(side, trans, direct, storev, m, n, k, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarfg "F_FUNC(zlarfg,ZLARFG)"(int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *tau) nogil
+cdef void zlarfg(int *n, z *alpha, z *x, int *incx, z *tau) nogil:
+    _fortran_zlarfg(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarfgp "F_FUNC(zlarfgp,ZLARFGP)"(int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *tau) nogil
+cdef void zlarfgp(int *n, z *alpha, z *x, int *incx, z *tau) nogil:
+    _fortran_zlarfgp(n, alpha, x, incx, tau)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarft "F_FUNC(zlarft,ZLARFT)"(char *direct, char *storev, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *tau, npy_complex128 *t, int *ldt) nogil
+cdef void zlarft(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil:
+    _fortran_zlarft(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarfx "F_FUNC(zlarfx,ZLARFX)"(char *side, int *m, int *n, npy_complex128 *v, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work) nogil
+cdef void zlarfx(char *side, int *m, int *n, z *v, z *tau, z *c, int *ldc, z *work) nogil:
+    _fortran_zlarfx(side, m, n, v, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlargv "F_FUNC(zlargv,ZLARGV)"(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, d *c, int *incc) nogil
+cdef void zlargv(int *n, z *x, int *incx, z *y, int *incy, d *c, int *incc) nogil:
+    _fortran_zlargv(n, x, incx, y, incy, c, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarnv "F_FUNC(zlarnv,ZLARNV)"(int *idist, int *iseed, int *n, npy_complex128 *x) nogil
+cdef void zlarnv(int *idist, int *iseed, int *n, z *x) nogil:
+    _fortran_zlarnv(idist, iseed, n, x)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarrv "F_FUNC(zlarrv,ZLARRV)"(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, npy_complex128 *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil
+cdef void zlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, z *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil:
+    _fortran_zlarrv(n, vl, vu, d, l, pivmin, isplit, m, dol, dou, minrgp, rtol1, rtol2, w, werr, wgap, iblock, indexw, gers, z, ldz, isuppz, work, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlartg "F_FUNC(zlartg,ZLARTG)"(npy_complex128 *f, npy_complex128 *g, d *cs, npy_complex128 *sn, npy_complex128 *r) nogil
+cdef void zlartg(z *f, z *g, d *cs, z *sn, z *r) nogil:
+    _fortran_zlartg(f, g, cs, sn, r)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlartv "F_FUNC(zlartv,ZLARTV)"(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, d *c, npy_complex128 *s, int *incc) nogil
+cdef void zlartv(int *n, z *x, int *incx, z *y, int *incy, d *c, z *s, int *incc) nogil:
+    _fortran_zlartv(n, x, incx, y, incy, c, s, incc)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarz "F_FUNC(zlarz,ZLARZ)"(char *side, int *m, int *n, int *l, npy_complex128 *v, int *incv, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work) nogil
+cdef void zlarz(char *side, int *m, int *n, int *l, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil:
+    _fortran_zlarz(side, m, n, l, v, incv, tau, c, ldc, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarzb "F_FUNC(zlarzb,ZLARZB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *ldwork) nogil
+cdef void zlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil:
+    _fortran_zlarzb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, c, ldc, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlarzt "F_FUNC(zlarzt,ZLARZT)"(char *direct, char *storev, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *tau, npy_complex128 *t, int *ldt) nogil
+cdef void zlarzt(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil:
+    _fortran_zlarzt(direct, storev, n, k, v, ldv, tau, t, ldt)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlascl "F_FUNC(zlascl,ZLASCL)"(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void zlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_zlascl(type_bn, kl, ku, cfrom, cto, m, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaset "F_FUNC(zlaset,ZLASET)"(char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *a, int *lda) nogil
+cdef void zlaset(char *uplo, int *m, int *n, z *alpha, z *beta, z *a, int *lda) nogil:
+    _fortran_zlaset(uplo, m, n, alpha, beta, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlasr "F_FUNC(zlasr,ZLASR)"(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, npy_complex128 *a, int *lda) nogil
+cdef void zlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, z *a, int *lda) nogil:
+    _fortran_zlasr(side, pivot, direct, m, n, c, s, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlassq "F_FUNC(zlassq,ZLASSQ)"(int *n, npy_complex128 *x, int *incx, d *scale, d *sumsq) nogil
+cdef void zlassq(int *n, z *x, int *incx, d *scale, d *sumsq) nogil:
+    _fortran_zlassq(n, x, incx, scale, sumsq)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlaswp "F_FUNC(zlaswp,ZLASWP)"(int *n, npy_complex128 *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil
+cdef void zlaswp(int *n, z *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil:
+    _fortran_zlaswp(n, a, lda, k1, k2, ipiv, incx)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlasyf "F_FUNC(zlasyf,ZLASYF)"(char *uplo, int *n, int *nb, int *kb, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *w, int *ldw, int *info) nogil
+cdef void zlasyf(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil:
+    _fortran_zlasyf(uplo, n, nb, kb, a, lda, ipiv, w, ldw, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlat2c "F_FUNC(zlat2c,ZLAT2C)"(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex64 *sa, int *ldsa, int *info) nogil
+cdef void zlat2c(char *uplo, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil:
+    _fortran_zlat2c(uplo, n, a, lda, sa, ldsa, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlatbs "F_FUNC(zlatbs,ZLATBS)"(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, npy_complex128 *ab, int *ldab, npy_complex128 *x, d *scale, d *cnorm, int *info) nogil
+cdef void zlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, z *ab, int *ldab, z *x, d *scale, d *cnorm, int *info) nogil:
+    _fortran_zlatbs(uplo, trans, diag, normin, n, kd, ab, ldab, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlatdf "F_FUNC(zlatdf,ZLATDF)"(int *ijob, int *n, npy_complex128 *z, int *ldz, npy_complex128 *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil
+cdef void zlatdf(int *ijob, int *n, z *z, int *ldz, z *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil:
+    _fortran_zlatdf(ijob, n, z, ldz, rhs, rdsum, rdscal, ipiv, jpiv)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlatps "F_FUNC(zlatps,ZLATPS)"(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex128 *ap, npy_complex128 *x, d *scale, d *cnorm, int *info) nogil
+cdef void zlatps(char *uplo, char *trans, char *diag, char *normin, int *n, z *ap, z *x, d *scale, d *cnorm, int *info) nogil:
+    _fortran_zlatps(uplo, trans, diag, normin, n, ap, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlatrd "F_FUNC(zlatrd,ZLATRD)"(char *uplo, int *n, int *nb, npy_complex128 *a, int *lda, d *e, npy_complex128 *tau, npy_complex128 *w, int *ldw) nogil
+cdef void zlatrd(char *uplo, int *n, int *nb, z *a, int *lda, d *e, z *tau, z *w, int *ldw) nogil:
+    _fortran_zlatrd(uplo, n, nb, a, lda, e, tau, w, ldw)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlatrs "F_FUNC(zlatrs,ZLATRS)"(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, d *scale, d *cnorm, int *info) nogil
+cdef void zlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, z *a, int *lda, z *x, d *scale, d *cnorm, int *info) nogil:
+    _fortran_zlatrs(uplo, trans, diag, normin, n, a, lda, x, scale, cnorm, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlatrz "F_FUNC(zlatrz,ZLATRZ)"(int *m, int *n, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work) nogil
+cdef void zlatrz(int *m, int *n, int *l, z *a, int *lda, z *tau, z *work) nogil:
+    _fortran_zlatrz(m, n, l, a, lda, tau, work)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlauu2 "F_FUNC(zlauu2,ZLAUU2)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void zlauu2(char *uplo, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_zlauu2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zlauum "F_FUNC(zlauum,ZLAUUM)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void zlauum(char *uplo, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_zlauum(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbcon "F_FUNC(zpbcon,ZPBCON)"(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, d *anorm, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zpbcon(char *uplo, int *n, int *kd, z *ab, int *ldab, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_zpbcon(uplo, n, kd, ab, ldab, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbequ "F_FUNC(zpbequ,ZPBEQU)"(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil
+cdef void zpbequ(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_zpbequ(uplo, n, kd, ab, ldab, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbrfs "F_FUNC(zpbrfs,ZPBRFS)"(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zpbrfs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zpbrfs(uplo, n, kd, nrhs, ab, ldab, afb, ldafb, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbstf "F_FUNC(zpbstf,ZPBSTF)"(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info) nogil
+cdef void zpbstf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil:
+    _fortran_zpbstf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbsv "F_FUNC(zpbsv,ZPBSV)"(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zpbsv(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil:
+    _fortran_zpbsv(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbsvx "F_FUNC(zpbsvx,ZPBSVX)"(char *fact, char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, char *equed, d *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zpbsvx(fact, uplo, n, kd, nrhs, ab, ldab, afb, ldafb, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbtf2 "F_FUNC(zpbtf2,ZPBTF2)"(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info) nogil
+cdef void zpbtf2(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil:
+    _fortran_zpbtf2(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbtrf "F_FUNC(zpbtrf,ZPBTRF)"(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info) nogil
+cdef void zpbtrf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil:
+    _fortran_zpbtrf(uplo, n, kd, ab, ldab, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpbtrs "F_FUNC(zpbtrs,ZPBTRS)"(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zpbtrs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil:
+    _fortran_zpbtrs(uplo, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpftrf "F_FUNC(zpftrf,ZPFTRF)"(char *transr, char *uplo, int *n, npy_complex128 *a, int *info) nogil
+cdef void zpftrf(char *transr, char *uplo, int *n, z *a, int *info) nogil:
+    _fortran_zpftrf(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpftri "F_FUNC(zpftri,ZPFTRI)"(char *transr, char *uplo, int *n, npy_complex128 *a, int *info) nogil
+cdef void zpftri(char *transr, char *uplo, int *n, z *a, int *info) nogil:
+    _fortran_zpftri(transr, uplo, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpftrs "F_FUNC(zpftrs,ZPFTRS)"(char *transr, char *uplo, int *n, int *nrhs, npy_complex128 *a, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zpftrs(char *transr, char *uplo, int *n, int *nrhs, z *a, z *b, int *ldb, int *info) nogil:
+    _fortran_zpftrs(transr, uplo, n, nrhs, a, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpocon "F_FUNC(zpocon,ZPOCON)"(char *uplo, int *n, npy_complex128 *a, int *lda, d *anorm, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zpocon(char *uplo, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_zpocon(uplo, n, a, lda, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpoequ "F_FUNC(zpoequ,ZPOEQU)"(int *n, npy_complex128 *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+cdef void zpoequ(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_zpoequ(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpoequb "F_FUNC(zpoequb,ZPOEQUB)"(int *n, npy_complex128 *a, int *lda, d *s, d *scond, d *amax, int *info) nogil
+cdef void zpoequb(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_zpoequb(n, a, lda, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zporfs "F_FUNC(zporfs,ZPORFS)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zporfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zporfs(uplo, n, nrhs, a, lda, af, ldaf, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zposv "F_FUNC(zposv,ZPOSV)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil:
+    _fortran_zposv(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zposvx "F_FUNC(zposvx,ZPOSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, char *equed, d *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zposvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zposvx(fact, uplo, n, nrhs, a, lda, af, ldaf, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpotf2 "F_FUNC(zpotf2,ZPOTF2)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void zpotf2(char *uplo, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_zpotf2(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpotrf "F_FUNC(zpotrf,ZPOTRF)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void zpotrf(char *uplo, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_zpotrf(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpotri "F_FUNC(zpotri,ZPOTRI)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void zpotri(char *uplo, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_zpotri(uplo, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpotrs "F_FUNC(zpotrs,ZPOTRS)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zpotrs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil:
+    _fortran_zpotrs(uplo, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zppcon "F_FUNC(zppcon,ZPPCON)"(char *uplo, int *n, npy_complex128 *ap, d *anorm, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zppcon(char *uplo, int *n, z *ap, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_zppcon(uplo, n, ap, anorm, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zppequ "F_FUNC(zppequ,ZPPEQU)"(char *uplo, int *n, npy_complex128 *ap, d *s, d *scond, d *amax, int *info) nogil
+cdef void zppequ(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, int *info) nogil:
+    _fortran_zppequ(uplo, n, ap, s, scond, amax, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpprfs "F_FUNC(zpprfs,ZPPRFS)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zpprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zpprfs(uplo, n, nrhs, ap, afp, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zppsv "F_FUNC(zppsv,ZPPSV)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zppsv(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil:
+    _fortran_zppsv(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zppsvx "F_FUNC(zppsvx,ZPPSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, char *equed, d *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zppsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zppsvx(fact, uplo, n, nrhs, ap, afp, equed, s, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpptrf "F_FUNC(zpptrf,ZPPTRF)"(char *uplo, int *n, npy_complex128 *ap, int *info) nogil
+cdef void zpptrf(char *uplo, int *n, z *ap, int *info) nogil:
+    _fortran_zpptrf(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpptri "F_FUNC(zpptri,ZPPTRI)"(char *uplo, int *n, npy_complex128 *ap, int *info) nogil
+cdef void zpptri(char *uplo, int *n, z *ap, int *info) nogil:
+    _fortran_zpptri(uplo, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpptrs "F_FUNC(zpptrs,ZPPTRS)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zpptrs(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil:
+    _fortran_zpptrs(uplo, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpstf2 "F_FUNC(zpstf2,ZPSTF2)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+cdef void zpstf2(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil:
+    _fortran_zpstf2(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpstrf "F_FUNC(zpstrf,ZPSTRF)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil
+cdef void zpstrf(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil:
+    _fortran_zpstrf(uplo, n, a, lda, piv, rank, tol, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zptcon "F_FUNC(zptcon,ZPTCON)"(int *n, d *d, npy_complex128 *e, d *anorm, d *rcond, d *rwork, int *info) nogil
+cdef void zptcon(int *n, d *d, z *e, d *anorm, d *rcond, d *rwork, int *info) nogil:
+    _fortran_zptcon(n, d, e, anorm, rcond, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpteqr "F_FUNC(zpteqr,ZPTEQR)"(char *compz, int *n, d *d, d *e, npy_complex128 *z, int *ldz, d *work, int *info) nogil
+cdef void zpteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil:
+    _fortran_zpteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zptrfs "F_FUNC(zptrfs,ZPTRFS)"(char *uplo, int *n, int *nrhs, d *d, npy_complex128 *e, d *df, npy_complex128 *ef, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zptrfs(char *uplo, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zptrfs(uplo, n, nrhs, d, e, df, ef, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zptsv "F_FUNC(zptsv,ZPTSV)"(int *n, int *nrhs, d *d, npy_complex128 *e, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zptsv(int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil:
+    _fortran_zptsv(n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zptsvx "F_FUNC(zptsvx,ZPTSVX)"(char *fact, int *n, int *nrhs, d *d, npy_complex128 *e, d *df, npy_complex128 *ef, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zptsvx(char *fact, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zptsvx(fact, n, nrhs, d, e, df, ef, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpttrf "F_FUNC(zpttrf,ZPTTRF)"(int *n, d *d, npy_complex128 *e, int *info) nogil
+cdef void zpttrf(int *n, d *d, z *e, int *info) nogil:
+    _fortran_zpttrf(n, d, e, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zpttrs "F_FUNC(zpttrs,ZPTTRS)"(char *uplo, int *n, int *nrhs, d *d, npy_complex128 *e, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zpttrs(char *uplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil:
+    _fortran_zpttrs(uplo, n, nrhs, d, e, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zptts2 "F_FUNC(zptts2,ZPTTS2)"(int *iuplo, int *n, int *nrhs, d *d, npy_complex128 *e, npy_complex128 *b, int *ldb) nogil
+cdef void zptts2(int *iuplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb) nogil:
+    _fortran_zptts2(iuplo, n, nrhs, d, e, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zrot "F_FUNC(zrot,ZROT)"(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, d *c, npy_complex128 *s) nogil
+cdef void zrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, z *s) nogil:
+    _fortran_zrot(n, cx, incx, cy, incy, c, s)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zspcon "F_FUNC(zspcon,ZSPCON)"(char *uplo, int *n, npy_complex128 *ap, int *ipiv, d *anorm, d *rcond, npy_complex128 *work, int *info) nogil
+cdef void zspcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil:
+    _fortran_zspcon(uplo, n, ap, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zspmv "F_FUNC(zspmv,ZSPMV)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *ap, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy) nogil
+cdef void zspmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil:
+    _fortran_zspmv(uplo, n, alpha, ap, x, incx, beta, y, incy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zspr "F_FUNC(zspr,ZSPR)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *ap) nogil
+cdef void zspr(char *uplo, int *n, z *alpha, z *x, int *incx, z *ap) nogil:
+    _fortran_zspr(uplo, n, alpha, x, incx, ap)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsprfs "F_FUNC(zsprfs,ZSPRFS)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zsprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zsprfs(uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zspsv "F_FUNC(zspsv,ZSPSV)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zspsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zspsv(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zspsvx "F_FUNC(zspsvx,ZSPSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zspsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zspsvx(fact, uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsptrf "F_FUNC(zsptrf,ZSPTRF)"(char *uplo, int *n, npy_complex128 *ap, int *ipiv, int *info) nogil
+cdef void zsptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil:
+    _fortran_zsptrf(uplo, n, ap, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsptri "F_FUNC(zsptri,ZSPTRI)"(char *uplo, int *n, npy_complex128 *ap, int *ipiv, npy_complex128 *work, int *info) nogil
+cdef void zsptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil:
+    _fortran_zsptri(uplo, n, ap, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsptrs "F_FUNC(zsptrs,ZSPTRS)"(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zsptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zsptrs(uplo, n, nrhs, ap, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zstedc "F_FUNC(zstedc,ZSTEDC)"(char *compz, int *n, d *d, d *e, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil
+cdef void zstedc(char *compz, int *n, d *d, d *e, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zstedc(compz, n, d, e, z, ldz, work, lwork, rwork, lrwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zstegr "F_FUNC(zstegr,ZSTEGR)"(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, npy_complex128 *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void zstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zstegr(jobz, range, n, d, e, vl, vu, il, iu, abstol, m, w, z, ldz, isuppz, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zstein "F_FUNC(zstein,ZSTEIN)"(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, npy_complex128 *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil
+cdef void zstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, z *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil:
+    _fortran_zstein(n, d, e, m, w, iblock, isplit, z, ldz, work, iwork, ifail, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zstemr "F_FUNC(zstemr,ZSTEMR)"(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, npy_complex128 *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void zstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, z *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_zstemr(jobz, range, n, d, e, vl, vu, il, iu, m, w, z, ldz, nzc, isuppz, tryrac, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsteqr "F_FUNC(zsteqr,ZSTEQR)"(char *compz, int *n, d *d, d *e, npy_complex128 *z, int *ldz, d *work, int *info) nogil
+cdef void zsteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil:
+    _fortran_zsteqr(compz, n, d, e, z, ldz, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsycon "F_FUNC(zsycon,ZSYCON)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, d *anorm, d *rcond, npy_complex128 *work, int *info) nogil
+cdef void zsycon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil:
+    _fortran_zsycon(uplo, n, a, lda, ipiv, anorm, rcond, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsyconv "F_FUNC(zsyconv,ZSYCONV)"(char *uplo, char *way, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info) nogil
+cdef void zsyconv(char *uplo, char *way, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil:
+    _fortran_zsyconv(uplo, way, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsyequb "F_FUNC(zsyequb,ZSYEQUB)"(char *uplo, int *n, npy_complex128 *a, int *lda, d *s, d *scond, d *amax, npy_complex128 *work, int *info) nogil
+cdef void zsyequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil:
+    _fortran_zsyequb(uplo, n, a, lda, s, scond, amax, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsymv "F_FUNC(zsymv,ZSYMV)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy) nogil
+cdef void zsymv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil:
+    _fortran_zsymv(uplo, n, alpha, a, lda, x, incx, beta, y, incy)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsyr "F_FUNC(zsyr,ZSYR)"(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *a, int *lda) nogil
+cdef void zsyr(char *uplo, int *n, z *alpha, z *x, int *incx, z *a, int *lda) nogil:
+    _fortran_zsyr(uplo, n, alpha, x, incx, a, lda)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsyrfs "F_FUNC(zsyrfs,ZSYRFS)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void zsyrfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_zsyrfs(uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsysv "F_FUNC(zsysv,ZSYSV)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zsysv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil:
+    _fortran_zsysv(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsysvx "F_FUNC(zsysvx,ZSYSVX)"(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *rcond, d *ferr, d *berr, npy_complex128 *work, int *lwork, d *rwork, int *info) nogil
+cdef void zsysvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil:
+    _fortran_zsysvx(fact, uplo, n, nrhs, a, lda, af, ldaf, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, lwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsyswapr "F_FUNC(zsyswapr,ZSYSWAPR)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *i1, int *i2) nogil
+cdef void zsyswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil:
+    _fortran_zsyswapr(uplo, n, a, lda, i1, i2)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsytf2 "F_FUNC(zsytf2,ZSYTF2)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info) nogil
+cdef void zsytf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil:
+    _fortran_zsytf2(uplo, n, a, lda, ipiv, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsytrf "F_FUNC(zsytrf,ZSYTRF)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zsytrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil:
+    _fortran_zsytrf(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsytri "F_FUNC(zsytri,ZSYTRI)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info) nogil
+cdef void zsytri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil:
+    _fortran_zsytri(uplo, n, a, lda, ipiv, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsytri2 "F_FUNC(zsytri2,ZSYTRI2)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zsytri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil:
+    _fortran_zsytri2(uplo, n, a, lda, ipiv, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsytri2x "F_FUNC(zsytri2x,ZSYTRI2X)"(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *nb, int *info) nogil
+cdef void zsytri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil:
+    _fortran_zsytri2x(uplo, n, a, lda, ipiv, work, nb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsytrs "F_FUNC(zsytrs,ZSYTRS)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void zsytrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil:
+    _fortran_zsytrs(uplo, n, nrhs, a, lda, ipiv, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zsytrs2 "F_FUNC(zsytrs2,ZSYTRS2)"(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info) nogil
+cdef void zsytrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil:
+    _fortran_zsytrs2(uplo, n, nrhs, a, lda, ipiv, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztbcon "F_FUNC(ztbcon,ZTBCON)"(char *norm, char *uplo, char *diag, int *n, int *kd, npy_complex128 *ab, int *ldab, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztbcon(char *norm, char *uplo, char *diag, int *n, int *kd, z *ab, int *ldab, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_ztbcon(norm, uplo, diag, n, kd, ab, ldab, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztbrfs "F_FUNC(ztbrfs,ZTBRFS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_ztbrfs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztbtrs "F_FUNC(ztbtrs,ZTBTRS)"(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void ztbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil:
+    _fortran_ztbtrs(uplo, trans, diag, n, kd, nrhs, ab, ldab, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztfsm "F_FUNC(ztfsm,ZTFSM)"(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, npy_complex128 *b, int *ldb) nogil
+cdef void ztfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, z *alpha, z *a, z *b, int *ldb) nogil:
+    _fortran_ztfsm(transr, side, uplo, trans, diag, m, n, alpha, a, b, ldb)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztftri "F_FUNC(ztftri,ZTFTRI)"(char *transr, char *uplo, char *diag, int *n, npy_complex128 *a, int *info) nogil
+cdef void ztftri(char *transr, char *uplo, char *diag, int *n, z *a, int *info) nogil:
+    _fortran_ztftri(transr, uplo, diag, n, a, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztfttp "F_FUNC(ztfttp,ZTFTTP)"(char *transr, char *uplo, int *n, npy_complex128 *arf, npy_complex128 *ap, int *info) nogil
+cdef void ztfttp(char *transr, char *uplo, int *n, z *arf, z *ap, int *info) nogil:
+    _fortran_ztfttp(transr, uplo, n, arf, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztfttr "F_FUNC(ztfttr,ZTFTTR)"(char *transr, char *uplo, int *n, npy_complex128 *arf, npy_complex128 *a, int *lda, int *info) nogil
+cdef void ztfttr(char *transr, char *uplo, int *n, z *arf, z *a, int *lda, int *info) nogil:
+    _fortran_ztfttr(transr, uplo, n, arf, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgevc "F_FUNC(ztgevc,ZTGEVC)"(char *side, char *howmny, bint *select, int *n, npy_complex128 *s, int *lds, npy_complex128 *p, int *ldp, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztgevc(char *side, char *howmny, bint *select, int *n, z *s, int *lds, z *p, int *ldp, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil:
+    _fortran_ztgevc(side, howmny, select, n, s, lds, p, ldp, vl, ldvl, vr, ldvr, mm, m, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgex2 "F_FUNC(ztgex2,ZTGEX2)"(bint *wantq, bint *wantz, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *j1, int *info) nogil
+cdef void ztgex2(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *j1, int *info) nogil:
+    _fortran_ztgex2(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, j1, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgexc "F_FUNC(ztgexc,ZTGEXC)"(bint *wantq, bint *wantz, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *ifst, int *ilst, int *info) nogil
+cdef void ztgexc(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *ifst, int *ilst, int *info) nogil:
+    _fortran_ztgexc(wantq, wantz, n, a, lda, b, ldb, q, ldq, z, ldz, ifst, ilst, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgsen "F_FUNC(ztgsen,ZTGSEN)"(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *m, d *pl, d *pr, d *dif, npy_complex128 *work, int *lwork, int *iwork, int *liwork, int *info) nogil
+cdef void ztgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, int *m, d *pl, d *pr, d *dif, z *work, int *lwork, int *iwork, int *liwork, int *info) nogil:
+    _fortran_ztgsen(ijob, wantq, wantz, select, n, a, lda, b, ldb, alpha, beta, q, ldq, z, ldz, m, pl, pr, dif, work, lwork, iwork, liwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgsja "F_FUNC(ztgsja,ZTGSJA)"(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, npy_complex128 *u, int *ldu, npy_complex128 *v, int *ldv, npy_complex128 *q, int *ldq, npy_complex128 *work, int *ncycle, int *info) nogil
+cdef void ztgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, z *a, int *lda, z *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, z *u, int *ldu, z *v, int *ldv, z *q, int *ldq, z *work, int *ncycle, int *info) nogil:
+    _fortran_ztgsja(jobu, jobv, jobq, m, p, n, k, l, a, lda, b, ldb, tola, tolb, alpha, beta, u, ldu, v, ldv, q, ldq, work, ncycle, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgsna "F_FUNC(ztgsna,ZTGSNA)"(char *job, char *howmny, bint *select, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, d *s, d *dif, int *mm, int *m, npy_complex128 *work, int *lwork, int *iwork, int *info) nogil
+cdef void ztgsna(char *job, char *howmny, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *dif, int *mm, int *m, z *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_ztgsna(job, howmny, select, n, a, lda, b, ldb, vl, ldvl, vr, ldvr, s, dif, mm, m, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgsy2 "F_FUNC(ztgsy2,ZTGSY2)"(char *trans, int *ijob, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, npy_complex128 *d, int *ldd, npy_complex128 *e, int *lde, npy_complex128 *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *info) nogil
+cdef void ztgsy2(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *info) nogil:
+    _fortran_ztgsy2(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, rdsum, rdscal, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztgsyl "F_FUNC(ztgsyl,ZTGSYL)"(char *trans, int *ijob, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, npy_complex128 *d, int *ldd, npy_complex128 *e, int *lde, npy_complex128 *f, int *ldf, d *scale, d *dif, npy_complex128 *work, int *lwork, int *iwork, int *info) nogil
+cdef void ztgsyl(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *dif, z *work, int *lwork, int *iwork, int *info) nogil:
+    _fortran_ztgsyl(trans, ijob, m, n, a, lda, b, ldb, c, ldc, d, ldd, e, lde, f, ldf, scale, dif, work, lwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztpcon "F_FUNC(ztpcon,ZTPCON)"(char *norm, char *uplo, char *diag, int *n, npy_complex128 *ap, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztpcon(char *norm, char *uplo, char *diag, int *n, z *ap, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_ztpcon(norm, uplo, diag, n, ap, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztpmqrt "F_FUNC(ztpmqrt,ZTPMQRT)"(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info) nogil
+cdef void ztpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *info) nogil:
+    _fortran_ztpmqrt(side, trans, m, n, k, l, nb, v, ldv, t, ldt, a, lda, b, ldb, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztpqrt "F_FUNC(ztpqrt,ZTPQRT)"(int *m, int *n, int *l, int *nb, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *t, int *ldt, npy_complex128 *work, int *info) nogil
+cdef void ztpqrt(int *m, int *n, int *l, int *nb, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, z *work, int *info) nogil:
+    _fortran_ztpqrt(m, n, l, nb, a, lda, b, ldb, t, ldt, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztpqrt2 "F_FUNC(ztpqrt2,ZTPQRT2)"(int *m, int *n, int *l, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *t, int *ldt, int *info) nogil
+cdef void ztpqrt2(int *m, int *n, int *l, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, int *info) nogil:
+    _fortran_ztpqrt2(m, n, l, a, lda, b, ldb, t, ldt, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztprfb "F_FUNC(ztprfb,ZTPRFB)"(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *ldwork) nogil
+cdef void ztprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *ldwork) nogil:
+    _fortran_ztprfb(side, trans, direct, storev, m, n, k, l, v, ldv, t, ldt, a, lda, b, ldb, work, ldwork)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztprfs "F_FUNC(ztprfs,ZTPRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_ztprfs(uplo, trans, diag, n, nrhs, ap, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztptri "F_FUNC(ztptri,ZTPTRI)"(char *uplo, char *diag, int *n, npy_complex128 *ap, int *info) nogil
+cdef void ztptri(char *uplo, char *diag, int *n, z *ap, int *info) nogil:
+    _fortran_ztptri(uplo, diag, n, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztptrs "F_FUNC(ztptrs,ZTPTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void ztptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil:
+    _fortran_ztptrs(uplo, trans, diag, n, nrhs, ap, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztpttf "F_FUNC(ztpttf,ZTPTTF)"(char *transr, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *arf, int *info) nogil
+cdef void ztpttf(char *transr, char *uplo, int *n, z *ap, z *arf, int *info) nogil:
+    _fortran_ztpttf(transr, uplo, n, ap, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztpttr "F_FUNC(ztpttr,ZTPTTR)"(char *uplo, int *n, npy_complex128 *ap, npy_complex128 *a, int *lda, int *info) nogil
+cdef void ztpttr(char *uplo, int *n, z *ap, z *a, int *lda, int *info) nogil:
+    _fortran_ztpttr(uplo, n, ap, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrcon "F_FUNC(ztrcon,ZTRCON)"(char *norm, char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, d *rcond, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztrcon(char *norm, char *uplo, char *diag, int *n, z *a, int *lda, d *rcond, z *work, d *rwork, int *info) nogil:
+    _fortran_ztrcon(norm, uplo, diag, n, a, lda, rcond, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrevc "F_FUNC(ztrevc,ZTREVC)"(char *side, char *howmny, bint *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztrevc(char *side, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil:
+    _fortran_ztrevc(side, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, mm, m, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrexc "F_FUNC(ztrexc,ZTREXC)"(char *compq, int *n, npy_complex128 *t, int *ldt, npy_complex128 *q, int *ldq, int *ifst, int *ilst, int *info) nogil
+cdef void ztrexc(char *compq, int *n, z *t, int *ldt, z *q, int *ldq, int *ifst, int *ilst, int *info) nogil:
+    _fortran_ztrexc(compq, n, t, ldt, q, ldq, ifst, ilst, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrrfs "F_FUNC(ztrrfs,ZTRRFS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, d *ferr, d *berr, npy_complex128 *work, d *rwork, int *info) nogil
+cdef void ztrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil:
+    _fortran_ztrrfs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, x, ldx, ferr, berr, work, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrsen "F_FUNC(ztrsen,ZTRSEN)"(char *job, char *compq, bint *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *q, int *ldq, npy_complex128 *w, int *m, d *s, d *sep, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void ztrsen(char *job, char *compq, bint *select, int *n, z *t, int *ldt, z *q, int *ldq, z *w, int *m, d *s, d *sep, z *work, int *lwork, int *info) nogil:
+    _fortran_ztrsen(job, compq, select, n, t, ldt, q, ldq, w, m, s, sep, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrsna "F_FUNC(ztrsna,ZTRSNA)"(char *job, char *howmny, bint *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, d *s, d *sep, int *mm, int *m, npy_complex128 *work, int *ldwork, d *rwork, int *info) nogil
+cdef void ztrsna(char *job, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *sep, int *mm, int *m, z *work, int *ldwork, d *rwork, int *info) nogil:
+    _fortran_ztrsna(job, howmny, select, n, t, ldt, vl, ldvl, vr, ldvr, s, sep, mm, m, work, ldwork, rwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrsyl "F_FUNC(ztrsyl,ZTRSYL)"(char *trana, char *tranb, int *isgn, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, d *scale, int *info) nogil
+cdef void ztrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *scale, int *info) nogil:
+    _fortran_ztrsyl(trana, tranb, isgn, m, n, a, lda, b, ldb, c, ldc, scale, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrti2 "F_FUNC(ztrti2,ZTRTI2)"(char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void ztrti2(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_ztrti2(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrtri "F_FUNC(ztrtri,ZTRTRI)"(char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, int *info) nogil
+cdef void ztrtri(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil:
+    _fortran_ztrtri(uplo, diag, n, a, lda, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrtrs "F_FUNC(ztrtrs,ZTRTRS)"(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info) nogil
+cdef void ztrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil:
+    _fortran_ztrtrs(uplo, trans, diag, n, nrhs, a, lda, b, ldb, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrttf "F_FUNC(ztrttf,ZTRTTF)"(char *transr, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *arf, int *info) nogil
+cdef void ztrttf(char *transr, char *uplo, int *n, z *a, int *lda, z *arf, int *info) nogil:
+    _fortran_ztrttf(transr, uplo, n, a, lda, arf, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztrttp "F_FUNC(ztrttp,ZTRTTP)"(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *ap, int *info) nogil
+cdef void ztrttp(char *uplo, int *n, z *a, int *lda, z *ap, int *info) nogil:
+    _fortran_ztrttp(uplo, n, a, lda, ap, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_ztzrzf "F_FUNC(ztzrzf,ZTZRZF)"(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void ztzrzf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_ztzrzf(m, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunbdb "F_FUNC(zunbdb,ZUNBDB)"(char *trans, char *signs, int *m, int *p, int *q, npy_complex128 *x11, int *ldx11, npy_complex128 *x12, int *ldx12, npy_complex128 *x21, int *ldx21, npy_complex128 *x22, int *ldx22, d *theta, d *phi, npy_complex128 *taup1, npy_complex128 *taup2, npy_complex128 *tauq1, npy_complex128 *tauq2, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunbdb(char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, d *phi, z *taup1, z *taup2, z *tauq1, z *tauq2, z *work, int *lwork, int *info) nogil:
+    _fortran_zunbdb(trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, phi, taup1, taup2, tauq1, tauq2, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zuncsd "F_FUNC(zuncsd,ZUNCSD)"(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, npy_complex128 *x11, int *ldx11, npy_complex128 *x12, int *ldx12, npy_complex128 *x21, int *ldx21, npy_complex128 *x22, int *ldx22, d *theta, npy_complex128 *u1, int *ldu1, npy_complex128 *u2, int *ldu2, npy_complex128 *v1t, int *ldv1t, npy_complex128 *v2t, int *ldv2t, npy_complex128 *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *info) nogil
+cdef void zuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *info) nogil:
+    _fortran_zuncsd(jobu1, jobu2, jobv1t, jobv2t, trans, signs, m, p, q, x11, ldx11, x12, ldx12, x21, ldx21, x22, ldx22, theta, u1, ldu1, u2, ldu2, v1t, ldv1t, v2t, ldv2t, work, lwork, rwork, lrwork, iwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zung2l "F_FUNC(zung2l,ZUNG2L)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zung2l(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zung2l(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zung2r "F_FUNC(zung2r,ZUNG2R)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zung2r(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zung2r(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zungbr "F_FUNC(zungbr,ZUNGBR)"(char *vect, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zungbr(char *vect, int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zungbr(vect, m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunghr "F_FUNC(zunghr,ZUNGHR)"(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunghr(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zunghr(n, ilo, ihi, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zungl2 "F_FUNC(zungl2,ZUNGL2)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zungl2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zungl2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunglq "F_FUNC(zunglq,ZUNGLQ)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunglq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zunglq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zungql "F_FUNC(zungql,ZUNGQL)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zungql(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zungql(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zungqr "F_FUNC(zungqr,ZUNGQR)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zungqr(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zungqr(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zungr2 "F_FUNC(zungr2,ZUNGR2)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info) nogil
+cdef void zungr2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil:
+    _fortran_zungr2(m, n, k, a, lda, tau, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zungrq "F_FUNC(zungrq,ZUNGRQ)"(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zungrq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zungrq(m, n, k, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zungtr "F_FUNC(zungtr,ZUNGTR)"(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zungtr(char *uplo, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil:
+    _fortran_zungtr(uplo, n, a, lda, tau, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunm2l "F_FUNC(zunm2l,ZUNM2L)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info) nogil
+cdef void zunm2l(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil:
+    _fortran_zunm2l(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunm2r "F_FUNC(zunm2r,ZUNM2R)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info) nogil
+cdef void zunm2r(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil:
+    _fortran_zunm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmbr "F_FUNC(zunmbr,ZUNMBR)"(char *vect, char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmbr(vect, side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmhr "F_FUNC(zunmhr,ZUNMHR)"(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmhr(side, trans, m, n, ilo, ihi, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunml2 "F_FUNC(zunml2,ZUNML2)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info) nogil
+cdef void zunml2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil:
+    _fortran_zunml2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmlq "F_FUNC(zunmlq,ZUNMLQ)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmlq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmlq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmql "F_FUNC(zunmql,ZUNMQL)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmql(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmql(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmqr "F_FUNC(zunmqr,ZUNMQR)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmqr(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmqr(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmr2 "F_FUNC(zunmr2,ZUNMR2)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info) nogil
+cdef void zunmr2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil:
+    _fortran_zunmr2(side, trans, m, n, k, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmr3 "F_FUNC(zunmr3,ZUNMR3)"(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info) nogil
+cdef void zunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil:
+    _fortran_zunmr3(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmrq "F_FUNC(zunmrq,ZUNMRQ)"(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmrq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmrq(side, trans, m, n, k, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmrz "F_FUNC(zunmrz,ZUNMRZ)"(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmrz(side, trans, m, n, k, l, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zunmtr "F_FUNC(zunmtr,ZUNMTR)"(char *side, char *uplo, char *trans, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info) nogil
+cdef void zunmtr(char *side, char *uplo, char *trans, int *m, int *n, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil:
+    _fortran_zunmtr(side, uplo, trans, m, n, a, lda, tau, c, ldc, work, lwork, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zupgtr "F_FUNC(zupgtr,ZUPGTR)"(char *uplo, int *n, npy_complex128 *ap, npy_complex128 *tau, npy_complex128 *q, int *ldq, npy_complex128 *work, int *info) nogil
+cdef void zupgtr(char *uplo, int *n, z *ap, z *tau, z *q, int *ldq, z *work, int *info) nogil:
+    _fortran_zupgtr(uplo, n, ap, tau, q, ldq, work, info)
+
+cdef extern from "_lapack_subroutines.h":
+    void _fortran_zupmtr "F_FUNC(zupmtr,ZUPMTR)"(char *side, char *uplo, char *trans, int *m, int *n, npy_complex128 *ap, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info) nogil
+cdef void zupmtr(char *side, char *uplo, char *trans, int *m, int *n, z *ap, z *tau, z *c, int *ldc, z *work, int *info) nogil:
+    _fortran_zupmtr(side, uplo, trans, m, n, ap, tau, c, ldc, work, info)
+
+
+# Python accessible wrappers for testing:
+
+def _test_dlamch(cmach):
+    # This conversion is necessary to handle Python 3 strings.
+    cmach_bytes = bytes(cmach)
+    # Now that it is a bytes representation, a non-temporary variable
+    # must be passed as a part of the function call.
+    cdef char* cmach_char = cmach_bytes
+    return dlamch(cmach_char)
+
+def _test_slamch(cmach):
+    # This conversion is necessary to handle Python 3 strings.
+    cmach_bytes = bytes(cmach)
+    # Now that it is a bytes representation, a non-temporary variable
+    # must be passed as a part of the function call.
+    cdef char* cmach_char = cmach_bytes
+    return slamch(cmach_char)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp.py
new file mode 100644
index 00000000..9c9b604a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _decomp
+
+__all__ = [  # noqa: F822
+    'eig', 'eigvals', 'eigh', 'eigvalsh',
+    'eig_banded', 'eigvals_banded',
+    'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf',
+    'array', 'isfinite', 'inexact', 'nonzero', 'iscomplexobj', 'cast',
+    'flatnonzero', 'argsort', 'iscomplex', 'einsum', 'eye', 'inf',
+    'LinAlgError', 'norm', 'get_lapack_funcs'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.decomp is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.decomp` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_decomp, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_cholesky.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_cholesky.py
new file mode 100644
index 00000000..86f05807
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_cholesky.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _decomp_cholesky
+
+__all__ = [  # noqa: F822
+    'cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',
+    'cho_solve_banded', 'asarray_chkfinite', 'atleast_2d',
+    'LinAlgError', 'get_lapack_funcs'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.decomp_cholesky is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, the"
+                  " `scipy.linalg.decomp_cholesky` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_decomp_cholesky, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_lu.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_lu.py
new file mode 100644
index 00000000..fe97814e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_lu.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _decomp_lu
+
+__all__ = [  # noqa: F822
+    'lu', 'lu_solve', 'lu_factor',
+    'asarray_chkfinite', 'LinAlgWarning', 'get_lapack_funcs',
+    'get_flinalg_funcs'
+
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.decomp_lu is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.decomp_lu` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_decomp_lu, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_qr.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_qr.py
new file mode 100644
index 00000000..df3bac31
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_qr.py
@@ -0,0 +1,27 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _decomp_qr
+
+__all__ = [  # noqa: F822
+    'qr', 'qr_multiply', 'rq', 'get_lapack_funcs', 'safecall'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.decomp_qr is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.decomp_qr` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_decomp_qr, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_schur.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_schur.py
new file mode 100644
index 00000000..5f8df21a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_schur.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _decomp_schur
+
+__all__ = [  # noqa: F822
+    'schur', 'rsf2csf', 'asarray_chkfinite', 'single', 'array', 'norm',
+    'LinAlgError', 'get_lapack_funcs', 'eigvals', 'eps', 'feps'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.decomp_schur is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.decomp_schur` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_decomp_schur, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_svd.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_svd.py
new file mode 100644
index 00000000..12d9cae1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/decomp_svd.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _decomp_svd
+
+__all__ = [  # noqa: F822
+    'svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space',
+    'LinAlgError', 'get_lapack_funcs'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.decomp_svd is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.decomp_svd` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_decomp_svd, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/flinalg.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/flinalg.py
new file mode 100644
index 00000000..2970f2fd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/flinalg.py
@@ -0,0 +1,23 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+
+import warnings
+from . import _flinalg_py
+
+__all__ = ['get_flinalg_funcs', 'has_column_major_storage']  # noqa: F822
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.flinalg is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn("The `scipy.linalg.flinalg` namespace is deprecated and "
+                  "will be removed in SciPy v2.0.0.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_flinalg_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/interpolative.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/interpolative.py
new file mode 100644
index 00000000..a8f7c957
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/interpolative.py
@@ -0,0 +1,1004 @@
+#******************************************************************************
+#   Copyright (C) 2013 Kenneth L. Ho
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions are met:
+#
+#   Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer. Redistributions in binary
+#   form must reproduce the above copyright notice, this list of conditions and
+#   the following disclaimer in the documentation and/or other materials
+#   provided with the distribution.
+#
+#   None of the names of the copyright holders may be used to endorse or
+#   promote products derived from this software without specific prior written
+#   permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+#   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+#   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+#   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+#   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+#   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+#   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+#   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+#   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+#   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+#   POSSIBILITY OF SUCH DAMAGE.
+#******************************************************************************
+
+# Python module for interfacing with `id_dist`.
+
+r"""
+======================================================================
+Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)
+======================================================================
+
+.. moduleauthor:: Kenneth L. Ho 
+
+.. versionadded:: 0.13
+
+.. currentmodule:: scipy.linalg.interpolative
+
+An interpolative decomposition (ID) of a matrix :math:`A \in
+\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a
+factorization
+
+.. math::
+  A \Pi =
+  \begin{bmatrix}
+   A \Pi_{1} & A \Pi_{2}
+  \end{bmatrix} =
+  A \Pi_{1}
+  \begin{bmatrix}
+   I & T
+  \end{bmatrix},
+
+where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with
+:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} =
+A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`,
+where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}`
+are the *skeleton* and *interpolation matrices*, respectively.
+
+If :math:`A` does not have exact rank :math:`k`, then there exists an
+approximation in the form of an ID such that :math:`A = BP + E`, where
+:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k +
+1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k
++ 1}` is the best possible error for a rank-:math:`k` approximation
+and, in fact, is achieved by the singular value decomposition (SVD)
+:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times
+k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns
+and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k
+\times k}` is diagonal with nonnegative entries. The principal
+advantages of using an ID over an SVD are that:
+
+- it is cheaper to construct;
+- it preserves the structure of :math:`A`; and
+- it is more efficient to compute with in light of the identity submatrix of :math:`P`.
+
+Routines
+========
+
+Main functionality:
+
+.. autosummary::
+   :toctree: generated/
+
+   interp_decomp
+   reconstruct_matrix_from_id
+   reconstruct_interp_matrix
+   reconstruct_skel_matrix
+   id_to_svd
+   svd
+   estimate_spectral_norm
+   estimate_spectral_norm_diff
+   estimate_rank
+
+Support functions:
+
+.. autosummary::
+   :toctree: generated/
+
+   seed
+   rand
+
+
+References
+==========
+
+This module uses the ID software package [1]_ by Martinsson, Rokhlin,
+Shkolnisky, and Tygert, which is a Fortran library for computing IDs
+using various algorithms, including the rank-revealing QR approach of
+[2]_ and the more recent randomized methods described in [3]_, [4]_,
+and [5]_. This module exposes its functionality in a way convenient
+for Python users. Note that this module does not add any functionality
+beyond that of organizing a simpler and more consistent interface.
+
+We advise the user to consult also the `documentation for the ID package
+`_.
+
+.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a
+    software package for low-rank approximation of matrices via interpolative
+    decompositions, version 0.2." http://tygert.com/id_doc.4.pdf.
+
+.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the
+    compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404,
+    2005. :doi:`10.1137/030602678`.
+
+.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.
+    Tygert. "Randomized algorithms for the low-rank approximation of matrices."
+    *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.
+    :doi:`10.1073/pnas.0709640104`.
+
+.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized
+    algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30
+    (1): 47--68,  2011. :doi:`10.1016/j.acha.2010.02.003`.
+
+.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast
+    randomized algorithm for the approximation of matrices." *Appl. Comput.
+    Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.
+
+
+Tutorial
+========
+
+Initializing
+------------
+
+The first step is to import :mod:`scipy.linalg.interpolative` by issuing the
+command:
+
+>>> import scipy.linalg.interpolative as sli
+
+Now let's build a matrix. For this, we consider a Hilbert matrix, which is well
+know to have low rank:
+
+>>> from scipy.linalg import hilbert
+>>> n = 1000
+>>> A = hilbert(n)
+
+We can also do this explicitly via:
+
+>>> import numpy as np
+>>> n = 1000
+>>> A = np.empty((n, n), order='F')
+>>> for j in range(n):
+>>>     for i in range(n):
+>>>         A[i,j] = 1. / (i + j + 1)
+
+Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This
+instantiates the matrix in Fortran-contiguous order and is important for
+avoiding data copying when passing to the backend.
+
+We then define multiplication routines for the matrix by regarding it as a
+:class:`scipy.sparse.linalg.LinearOperator`:
+
+>>> from scipy.sparse.linalg import aslinearoperator
+>>> L = aslinearoperator(A)
+
+This automatically sets up methods describing the action of the matrix and its
+adjoint on a vector.
+
+Computing an ID
+---------------
+
+We have several choices of algorithm to compute an ID. These fall largely
+according to two dichotomies:
+
+1. how the matrix is represented, i.e., via its entries or via its action on a
+   vector; and
+2. whether to approximate it to a fixed relative precision or to a fixed rank.
+
+We step through each choice in turn below.
+
+In all cases, the ID is represented by three parameters:
+
+1. a rank ``k``;
+2. an index array ``idx``; and
+3. interpolation coefficients ``proj``.
+
+The ID is specified by the relation
+``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.
+
+From matrix entries
+...................
+
+We first consider a matrix given in terms of its entries.
+
+To compute an ID to a fixed precision, type:
+
+>>> k, idx, proj = sli.interp_decomp(A, eps)
+
+where ``eps < 1`` is the desired precision.
+
+To compute an ID to a fixed rank, use:
+
+>>> idx, proj = sli.interp_decomp(A, k)
+
+where ``k >= 1`` is the desired rank.
+
+Both algorithms use random sampling and are usually faster than the
+corresponding older, deterministic algorithms, which can be accessed via the
+commands:
+
+>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)
+
+and:
+
+>>> idx, proj = sli.interp_decomp(A, k, rand=False)
+
+respectively.
+
+From matrix action
+..................
+
+Now consider a matrix given in terms of its action on a vector as a
+:class:`scipy.sparse.linalg.LinearOperator`.
+
+To compute an ID to a fixed precision, type:
+
+>>> k, idx, proj = sli.interp_decomp(L, eps)
+
+To compute an ID to a fixed rank, use:
+
+>>> idx, proj = sli.interp_decomp(L, k)
+
+These algorithms are randomized.
+
+Reconstructing an ID
+--------------------
+
+The ID routines above do not output the skeleton and interpolation matrices
+explicitly but instead return the relevant information in a more compact (and
+sometimes more useful) form. To build these matrices, write:
+
+>>> B = sli.reconstruct_skel_matrix(A, k, idx)
+
+for the skeleton matrix and:
+
+>>> P = sli.reconstruct_interp_matrix(idx, proj)
+
+for the interpolation matrix. The ID approximation can then be computed as:
+
+>>> C = np.dot(B, P)
+
+This can also be constructed directly using:
+
+>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)
+
+without having to first compute ``P``.
+
+Alternatively, this can be done explicitly as well using:
+
+>>> B = A[:,idx[:k]]
+>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]
+>>> C = np.dot(B, P)
+
+Computing an SVD
+----------------
+
+An ID can be converted to an SVD via the command:
+
+>>> U, S, V = sli.id_to_svd(B, idx, proj)
+
+The SVD approximation is then:
+
+>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))
+
+The SVD can also be computed "fresh" by combining both the ID and conversion
+steps into one command. Following the various ID algorithms above, there are
+correspondingly various SVD algorithms that one can employ.
+
+From matrix entries
+...................
+
+We consider first SVD algorithms for a matrix given in terms of its entries.
+
+To compute an SVD to a fixed precision, type:
+
+>>> U, S, V = sli.svd(A, eps)
+
+To compute an SVD to a fixed rank, use:
+
+>>> U, S, V = sli.svd(A, k)
+
+Both algorithms use random sampling; for the determinstic versions, issue the
+keyword ``rand=False`` as above.
+
+From matrix action
+..................
+
+Now consider a matrix given in terms of its action on a vector.
+
+To compute an SVD to a fixed precision, type:
+
+>>> U, S, V = sli.svd(L, eps)
+
+To compute an SVD to a fixed rank, use:
+
+>>> U, S, V = sli.svd(L, k)
+
+Utility routines
+----------------
+
+Several utility routines are also available.
+
+To estimate the spectral norm of a matrix, use:
+
+>>> snorm = sli.estimate_spectral_norm(A)
+
+This algorithm is based on the randomized power method and thus requires only
+matrix-vector products. The number of iterations to take can be set using the
+keyword ``its`` (default: ``its=20``). The matrix is interpreted as a
+:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it
+as a :class:`numpy.ndarray`, in which case it is trivially converted using
+:func:`scipy.sparse.linalg.aslinearoperator`.
+
+The same algorithm can also estimate the spectral norm of the difference of two
+matrices ``A1`` and ``A2`` as follows:
+
+>>> diff = sli.estimate_spectral_norm_diff(A1, A2)
+
+This is often useful for checking the accuracy of a matrix approximation.
+
+Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank
+of a matrix as well. This can be done with either:
+
+>>> k = sli.estimate_rank(A, eps)
+
+or:
+
+>>> k = sli.estimate_rank(L, eps)
+
+depending on the representation. The parameter ``eps`` controls the definition
+of the numerical rank.
+
+Finally, the random number generation required for all randomized routines can
+be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed
+values to their original values, use:
+
+>>> sli.seed('default')
+
+To specify the seed values, use:
+
+>>> sli.seed(s)
+
+where ``s`` must be an integer or array of 55 floats. If an integer, the array
+of floats is obtained by using ``numpy.random.rand`` with the given integer
+seed.
+
+To simply generate some random numbers, type:
+
+>>> sli.rand(n)
+
+where ``n`` is the number of random numbers to generate.
+
+Remarks
+-------
+
+The above functions all automatically detect the appropriate interface and work
+with both real and complex data types, passing input arguments to the proper
+backend routine.
+
+"""
+
+import scipy.linalg._interpolative_backend as _backend
+import numpy as np
+import sys
+
+__all__ = [
+    'estimate_rank',
+    'estimate_spectral_norm',
+    'estimate_spectral_norm_diff',
+    'id_to_svd',
+    'interp_decomp',
+    'rand',
+    'reconstruct_interp_matrix',
+    'reconstruct_matrix_from_id',
+    'reconstruct_skel_matrix',
+    'seed',
+    'svd',
+]
+
+_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)")
+_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)")
+_32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems "
+                          "with complex128 is buggy")
+_IS_32BIT = (sys.maxsize < 2**32)
+
+
+def _is_real(A):
+    try:
+        if A.dtype == np.complex128:
+            return False
+        elif A.dtype == np.float64:
+            return True
+        else:
+            raise _DTYPE_ERROR
+    except AttributeError as e:
+        raise _TYPE_ERROR from e
+
+
+def seed(seed=None):
+    """
+    Seed the internal random number generator used in this ID package.
+
+    The generator is a lagged Fibonacci method with 55-element internal state.
+
+    Parameters
+    ----------
+    seed : int, sequence, 'default', optional
+        If 'default', the random seed is reset to a default value.
+
+        If `seed` is a sequence containing 55 floating-point numbers
+        in range [0,1], these are used to set the internal state of
+        the generator.
+
+        If the value is an integer, the internal state is obtained
+        from `numpy.random.RandomState` (MT19937) with the integer
+        used as the initial seed.
+
+        If `seed` is omitted (None), ``numpy.random.rand`` is used to
+        initialize the generator.
+
+    """
+    # For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`,
+    # and :func:`_backend.id_srando`.
+
+    if isinstance(seed, str) and seed == 'default':
+        _backend.id_srando()
+    elif hasattr(seed, '__len__'):
+        state = np.asfortranarray(seed, dtype=float)
+        if state.shape != (55,):
+            raise ValueError("invalid input size")
+        elif state.min() < 0 or state.max() > 1:
+            raise ValueError("values not in range [0,1]")
+        _backend.id_srandi(state)
+    elif seed is None:
+        _backend.id_srandi(np.random.rand(55))
+    else:
+        rnd = np.random.RandomState(seed)
+        _backend.id_srandi(rnd.rand(55))
+
+
+def rand(*shape):
+    """
+    Generate standard uniform pseudorandom numbers via a very efficient lagged
+    Fibonacci method.
+
+    This routine is used for all random number generation in this package and
+    can affect ID and SVD results.
+
+    Parameters
+    ----------
+    *shape
+        Shape of output array
+
+    """
+    # For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`.
+    return _backend.id_srand(np.prod(shape)).reshape(shape)
+
+
+def interp_decomp(A, eps_or_k, rand=True):
+    """
+    Compute ID of a matrix.
+
+    An ID of a matrix `A` is a factorization defined by a rank `k`, a column
+    index array `idx`, and interpolation coefficients `proj` such that::
+
+        numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
+
+    The original matrix can then be reconstructed as::
+
+        numpy.hstack([A[:,idx[:k]],
+                                    numpy.dot(A[:,idx[:k]], proj)]
+                                )[:,numpy.argsort(idx)]
+
+    or via the routine :func:`reconstruct_matrix_from_id`. This can
+    equivalently be written as::
+
+        numpy.dot(A[:,idx[:k]],
+                            numpy.hstack([numpy.eye(k), proj])
+                          )[:,np.argsort(idx)]
+
+    in terms of the skeleton and interpolation matrices::
+
+        B = A[:,idx[:k]]
+
+    and::
+
+        P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
+
+    respectively. See also :func:`reconstruct_interp_matrix` and
+    :func:`reconstruct_skel_matrix`.
+
+    The ID can be computed to any relative precision or rank (depending on the
+    value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
+    this function has the output signature::
+
+        k, idx, proj = interp_decomp(A, eps_or_k)
+
+    Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
+    signature is::
+
+        idx, proj = interp_decomp(A, eps_or_k)
+
+    ..  This function automatically detects the form of the input parameters
+        and passes them to the appropriate backend. For details, see
+        :func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,
+        :func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,
+        :func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,
+        :func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,
+        :func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,
+        :func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.
+
+    Parameters
+    ----------
+    A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
+        Matrix to be factored
+    eps_or_k : float or int
+        Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
+        approximation.
+    rand : bool, optional
+        Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
+        (randomized algorithms are always used if `A` is of type
+        :class:`scipy.sparse.linalg.LinearOperator`).
+
+    Returns
+    -------
+    k : int
+        Rank required to achieve specified relative precision if
+        `eps_or_k < 1`.
+    idx : :class:`numpy.ndarray`
+        Column index array.
+    proj : :class:`numpy.ndarray`
+        Interpolation coefficients.
+    """
+    from scipy.sparse.linalg import LinearOperator
+
+    real = _is_real(A)
+
+    if isinstance(A, np.ndarray):
+        if eps_or_k < 1:
+            eps = eps_or_k
+            if rand:
+                if real:
+                    k, idx, proj = _backend.iddp_aid(eps, A)
+                else:
+                    if _IS_32BIT:
+                        raise _32BIT_ERROR
+                    k, idx, proj = _backend.idzp_aid(eps, A)
+            else:
+                if real:
+                    k, idx, proj = _backend.iddp_id(eps, A)
+                else:
+                    k, idx, proj = _backend.idzp_id(eps, A)
+            return k, idx - 1, proj
+        else:
+            k = int(eps_or_k)
+            if rand:
+                if real:
+                    idx, proj = _backend.iddr_aid(A, k)
+                else:
+                    if _IS_32BIT:
+                        raise _32BIT_ERROR
+                    idx, proj = _backend.idzr_aid(A, k)
+            else:
+                if real:
+                    idx, proj = _backend.iddr_id(A, k)
+                else:
+                    idx, proj = _backend.idzr_id(A, k)
+            return idx - 1, proj
+    elif isinstance(A, LinearOperator):
+        m, n = A.shape
+        matveca = A.rmatvec
+        if eps_or_k < 1:
+            eps = eps_or_k
+            if real:
+                k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)
+            else:
+                if _IS_32BIT:
+                    raise _32BIT_ERROR
+                k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)
+            return k, idx - 1, proj
+        else:
+            k = int(eps_or_k)
+            if real:
+                idx, proj = _backend.iddr_rid(m, n, matveca, k)
+            else:
+                if _IS_32BIT:
+                    raise _32BIT_ERROR
+                idx, proj = _backend.idzr_rid(m, n, matveca, k)
+            return idx - 1, proj
+    else:
+        raise _TYPE_ERROR
+
+
+def reconstruct_matrix_from_id(B, idx, proj):
+    """
+    Reconstruct matrix from its ID.
+
+    A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
+    and `proj`, respectively, can be reconstructed as::
+
+        numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
+
+    See also :func:`reconstruct_interp_matrix` and
+    :func:`reconstruct_skel_matrix`.
+
+    ..  This function automatically detects the matrix data type and calls the
+        appropriate backend. For details, see :func:`_backend.idd_reconid` and
+        :func:`_backend.idz_reconid`.
+
+    Parameters
+    ----------
+    B : :class:`numpy.ndarray`
+        Skeleton matrix.
+    idx : :class:`numpy.ndarray`
+        Column index array.
+    proj : :class:`numpy.ndarray`
+        Interpolation coefficients.
+
+    Returns
+    -------
+    :class:`numpy.ndarray`
+        Reconstructed matrix.
+    """
+    if _is_real(B):
+        return _backend.idd_reconid(B, idx + 1, proj)
+    else:
+        return _backend.idz_reconid(B, idx + 1, proj)
+
+
+def reconstruct_interp_matrix(idx, proj):
+    """
+    Reconstruct interpolation matrix from ID.
+
+    The interpolation matrix can be reconstructed from the ID indices and
+    coefficients `idx` and `proj`, respectively, as::
+
+        P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
+
+    The original matrix can then be reconstructed from its skeleton matrix `B`
+    via::
+
+        numpy.dot(B, P)
+
+    See also :func:`reconstruct_matrix_from_id` and
+    :func:`reconstruct_skel_matrix`.
+
+    ..  This function automatically detects the matrix data type and calls the
+        appropriate backend. For details, see :func:`_backend.idd_reconint` and
+        :func:`_backend.idz_reconint`.
+
+    Parameters
+    ----------
+    idx : :class:`numpy.ndarray`
+        Column index array.
+    proj : :class:`numpy.ndarray`
+        Interpolation coefficients.
+
+    Returns
+    -------
+    :class:`numpy.ndarray`
+        Interpolation matrix.
+    """
+    if _is_real(proj):
+        return _backend.idd_reconint(idx + 1, proj)
+    else:
+        return _backend.idz_reconint(idx + 1, proj)
+
+
+def reconstruct_skel_matrix(A, k, idx):
+    """
+    Reconstruct skeleton matrix from ID.
+
+    The skeleton matrix can be reconstructed from the original matrix `A` and its
+    ID rank and indices `k` and `idx`, respectively, as::
+
+        B = A[:,idx[:k]]
+
+    The original matrix can then be reconstructed via::
+
+        numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
+
+    See also :func:`reconstruct_matrix_from_id` and
+    :func:`reconstruct_interp_matrix`.
+
+    ..  This function automatically detects the matrix data type and calls the
+        appropriate backend. For details, see :func:`_backend.idd_copycols` and
+        :func:`_backend.idz_copycols`.
+
+    Parameters
+    ----------
+    A : :class:`numpy.ndarray`
+        Original matrix.
+    k : int
+        Rank of ID.
+    idx : :class:`numpy.ndarray`
+        Column index array.
+
+    Returns
+    -------
+    :class:`numpy.ndarray`
+        Skeleton matrix.
+    """
+    if _is_real(A):
+        return _backend.idd_copycols(A, k, idx + 1)
+    else:
+        return _backend.idz_copycols(A, k, idx + 1)
+
+
+def id_to_svd(B, idx, proj):
+    """
+    Convert ID to SVD.
+
+    The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
+    coefficients `idx` and `proj`, respectively, is::
+
+        U, S, V = id_to_svd(B, idx, proj)
+        A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
+
+    See also :func:`svd`.
+
+    ..  This function automatically detects the matrix data type and calls the
+        appropriate backend. For details, see :func:`_backend.idd_id2svd` and
+        :func:`_backend.idz_id2svd`.
+
+    Parameters
+    ----------
+    B : :class:`numpy.ndarray`
+        Skeleton matrix.
+    idx : :class:`numpy.ndarray`
+        Column index array.
+    proj : :class:`numpy.ndarray`
+        Interpolation coefficients.
+
+    Returns
+    -------
+    U : :class:`numpy.ndarray`
+        Left singular vectors.
+    S : :class:`numpy.ndarray`
+        Singular values.
+    V : :class:`numpy.ndarray`
+        Right singular vectors.
+    """
+    if _is_real(B):
+        U, V, S = _backend.idd_id2svd(B, idx + 1, proj)
+    else:
+        U, V, S = _backend.idz_id2svd(B, idx + 1, proj)
+    return U, S, V
+
+
+def estimate_spectral_norm(A, its=20):
+    """
+    Estimate spectral norm of a matrix by the randomized power method.
+
+    ..  This function automatically detects the matrix data type and calls the
+        appropriate backend. For details, see :func:`_backend.idd_snorm` and
+        :func:`_backend.idz_snorm`.
+
+    Parameters
+    ----------
+    A : :class:`scipy.sparse.linalg.LinearOperator`
+        Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
+        `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
+    its : int, optional
+        Number of power method iterations.
+
+    Returns
+    -------
+    float
+        Spectral norm estimate.
+    """
+    from scipy.sparse.linalg import aslinearoperator
+    A = aslinearoperator(A)
+    m, n = A.shape
+    matvec = lambda x: A. matvec(x)
+    matveca = lambda x: A.rmatvec(x)
+    if _is_real(A):
+        return _backend.idd_snorm(m, n, matveca, matvec, its=its)
+    else:
+        return _backend.idz_snorm(m, n, matveca, matvec, its=its)
+
+
+def estimate_spectral_norm_diff(A, B, its=20):
+    """
+    Estimate spectral norm of the difference of two matrices by the randomized
+    power method.
+
+    ..  This function automatically detects the matrix data type and calls the
+        appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and
+        :func:`_backend.idz_diffsnorm`.
+
+    Parameters
+    ----------
+    A : :class:`scipy.sparse.linalg.LinearOperator`
+        First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
+        `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
+    B : :class:`scipy.sparse.linalg.LinearOperator`
+        Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
+        the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
+    its : int, optional
+        Number of power method iterations.
+
+    Returns
+    -------
+    float
+        Spectral norm estimate of matrix difference.
+    """
+    from scipy.sparse.linalg import aslinearoperator
+    A = aslinearoperator(A)
+    B = aslinearoperator(B)
+    m, n = A.shape
+    matvec1 = lambda x: A. matvec(x)
+    matveca1 = lambda x: A.rmatvec(x)
+    matvec2 = lambda x: B. matvec(x)
+    matveca2 = lambda x: B.rmatvec(x)
+    if _is_real(A):
+        return _backend.idd_diffsnorm(
+            m, n, matveca1, matveca2, matvec1, matvec2, its=its)
+    else:
+        return _backend.idz_diffsnorm(
+            m, n, matveca1, matveca2, matvec1, matvec2, its=its)
+
+
+def svd(A, eps_or_k, rand=True):
+    """
+    Compute SVD of a matrix via an ID.
+
+    An SVD of a matrix `A` is a factorization::
+
+        A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
+
+    where `U` and `V` have orthonormal columns and `S` is nonnegative.
+
+    The SVD can be computed to any relative precision or rank (depending on the
+    value of `eps_or_k`).
+
+    See also :func:`interp_decomp` and :func:`id_to_svd`.
+
+    ..  This function automatically detects the form of the input parameters and
+        passes them to the appropriate backend. For details, see
+        :func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,
+        :func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,
+        :func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,
+        :func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,
+        :func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,
+        :func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.
+
+    Parameters
+    ----------
+    A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
+        Matrix to be factored, given as either a :class:`numpy.ndarray` or a
+        :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
+        `rmatvec` methods (to apply the matrix and its adjoint).
+    eps_or_k : float or int
+        Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
+        approximation.
+    rand : bool, optional
+        Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
+        (randomized algorithms are always used if `A` is of type
+        :class:`scipy.sparse.linalg.LinearOperator`).
+
+    Returns
+    -------
+    U : :class:`numpy.ndarray`
+        Left singular vectors.
+    S : :class:`numpy.ndarray`
+        Singular values.
+    V : :class:`numpy.ndarray`
+        Right singular vectors.
+    """
+    from scipy.sparse.linalg import LinearOperator
+
+    real = _is_real(A)
+
+    if isinstance(A, np.ndarray):
+        if eps_or_k < 1:
+            eps = eps_or_k
+            if rand:
+                if real:
+                    U, V, S = _backend.iddp_asvd(eps, A)
+                else:
+                    if _IS_32BIT:
+                        raise _32BIT_ERROR
+                    U, V, S = _backend.idzp_asvd(eps, A)
+            else:
+                if real:
+                    U, V, S = _backend.iddp_svd(eps, A)
+                else:
+                    U, V, S = _backend.idzp_svd(eps, A)
+        else:
+            k = int(eps_or_k)
+            if k > min(A.shape):
+                raise ValueError("Approximation rank %s exceeds min(A.shape) = "
+                                 " %s " % (k, min(A.shape)))
+            if rand:
+                if real:
+                    U, V, S = _backend.iddr_asvd(A, k)
+                else:
+                    if _IS_32BIT:
+                        raise _32BIT_ERROR
+                    U, V, S = _backend.idzr_asvd(A, k)
+            else:
+                if real:
+                    U, V, S = _backend.iddr_svd(A, k)
+                else:
+                    U, V, S = _backend.idzr_svd(A, k)
+    elif isinstance(A, LinearOperator):
+        m, n = A.shape
+        matvec = lambda x: A.matvec(x)
+        matveca = lambda x: A.rmatvec(x)
+        if eps_or_k < 1:
+            eps = eps_or_k
+            if real:
+                U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
+            else:
+                if _IS_32BIT:
+                    raise _32BIT_ERROR
+                U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
+        else:
+            k = int(eps_or_k)
+            if real:
+                U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)
+            else:
+                if _IS_32BIT:
+                    raise _32BIT_ERROR
+                U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)
+    else:
+        raise _TYPE_ERROR
+    return U, S, V
+
+
+def estimate_rank(A, eps):
+    """
+    Estimate matrix rank to a specified relative precision using randomized
+    methods.
+
+    The matrix `A` can be given as either a :class:`numpy.ndarray` or a
+    :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
+    for each case. If `A` is of type :class:`numpy.ndarray`, then the output
+    rank is typically about 8 higher than the actual numerical rank.
+
+    ..  This function automatically detects the form of the input parameters and
+        passes them to the appropriate backend. For details,
+        see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,
+        :func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.
+
+    Parameters
+    ----------
+    A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
+        Matrix whose rank is to be estimated, given as either a
+        :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
+        with the `rmatvec` method (to apply the matrix adjoint).
+    eps : float
+        Relative error for numerical rank definition.
+
+    Returns
+    -------
+    int
+        Estimated matrix rank.
+    """
+    from scipy.sparse.linalg import LinearOperator
+
+    real = _is_real(A)
+
+    if isinstance(A, np.ndarray):
+        if real:
+            rank = _backend.idd_estrank(eps, A)
+        else:
+            rank = _backend.idz_estrank(eps, A)
+        if rank == 0:
+            # special return value for nearly full rank
+            rank = min(A.shape)
+        return rank
+    elif isinstance(A, LinearOperator):
+        m, n = A.shape
+        matveca = A.rmatvec
+        if real:
+            return _backend.idd_findrank(eps, m, n, matveca)
+        else:
+            return _backend.idz_findrank(eps, m, n, matveca)
+    else:
+        raise _TYPE_ERROR
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/lapack.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/lapack.py
new file mode 100644
index 00000000..3da8a03c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/lapack.py
@@ -0,0 +1,1036 @@
+"""
+Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
+=======================================================
+
+This module contains low-level functions from the LAPACK library.
+
+.. versionadded:: 0.12.0
+
+.. note::
+
+    The common ``overwrite_<>`` option in many routines, allows the
+    input arrays to be overwritten to avoid extra memory allocation.
+    However this requires the array to satisfy two conditions
+    which are memory order and the data type to match exactly the
+    order and the type expected by the routine.
+
+    As an example, if you pass a double precision float array to any
+    ``S....`` routine which expects single precision arguments, f2py
+    will create an intermediate array to match the argument types and
+    overwriting will be performed on that intermediate array.
+
+    Similarly, if a C-contiguous array is passed, f2py will pass a
+    FORTRAN-contiguous array internally. Please make sure that these
+    details are satisfied. More information can be found in the f2py
+    documentation.
+
+.. warning::
+
+   These functions do little to no error checking.
+   It is possible to cause crashes by mis-using them,
+   so prefer using the higher-level routines in `scipy.linalg`.
+
+Finding functions
+-----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   get_lapack_funcs
+
+All functions
+-------------
+
+.. autosummary::
+   :toctree: generated/
+
+   sgbsv
+   dgbsv
+   cgbsv
+   zgbsv
+
+   sgbtrf
+   dgbtrf
+   cgbtrf
+   zgbtrf
+
+   sgbtrs
+   dgbtrs
+   cgbtrs
+   zgbtrs
+
+   sgebal
+   dgebal
+   cgebal
+   zgebal
+
+   sgecon
+   dgecon
+   cgecon
+   zgecon
+
+   sgeequ
+   dgeequ
+   cgeequ
+   zgeequ
+
+   sgeequb
+   dgeequb
+   cgeequb
+   zgeequb
+
+   sgees
+   dgees
+   cgees
+   zgees
+
+   sgeev
+   dgeev
+   cgeev
+   zgeev
+
+   sgeev_lwork
+   dgeev_lwork
+   cgeev_lwork
+   zgeev_lwork
+
+   sgehrd
+   dgehrd
+   cgehrd
+   zgehrd
+
+   sgehrd_lwork
+   dgehrd_lwork
+   cgehrd_lwork
+   zgehrd_lwork
+
+   sgejsv
+   dgejsv
+
+   sgels
+   dgels
+   cgels
+   zgels
+
+   sgels_lwork
+   dgels_lwork
+   cgels_lwork
+   zgels_lwork
+
+   sgelsd
+   dgelsd
+   cgelsd
+   zgelsd
+
+   sgelsd_lwork
+   dgelsd_lwork
+   cgelsd_lwork
+   zgelsd_lwork
+
+   sgelss
+   dgelss
+   cgelss
+   zgelss
+
+   sgelss_lwork
+   dgelss_lwork
+   cgelss_lwork
+   zgelss_lwork
+
+   sgelsy
+   dgelsy
+   cgelsy
+   zgelsy
+
+   sgelsy_lwork
+   dgelsy_lwork
+   cgelsy_lwork
+   zgelsy_lwork
+
+   sgeqp3
+   dgeqp3
+   cgeqp3
+   zgeqp3
+
+   sgeqrf
+   dgeqrf
+   cgeqrf
+   zgeqrf
+
+   sgeqrf_lwork
+   dgeqrf_lwork
+   cgeqrf_lwork
+   zgeqrf_lwork
+
+   sgeqrfp
+   dgeqrfp
+   cgeqrfp
+   zgeqrfp
+
+   sgeqrfp_lwork
+   dgeqrfp_lwork
+   cgeqrfp_lwork
+   zgeqrfp_lwork
+
+   sgerqf
+   dgerqf
+   cgerqf
+   zgerqf
+
+   sgesdd
+   dgesdd
+   cgesdd
+   zgesdd
+
+   sgesdd_lwork
+   dgesdd_lwork
+   cgesdd_lwork
+   zgesdd_lwork
+
+   sgesv
+   dgesv
+   cgesv
+   zgesv
+
+   sgesvd
+   dgesvd
+   cgesvd
+   zgesvd
+
+   sgesvd_lwork
+   dgesvd_lwork
+   cgesvd_lwork
+   zgesvd_lwork
+
+   sgesvx
+   dgesvx
+   cgesvx
+   zgesvx
+
+   sgetrf
+   dgetrf
+   cgetrf
+   zgetrf
+
+   sgetc2
+   dgetc2
+   cgetc2
+   zgetc2
+
+   sgetri
+   dgetri
+   cgetri
+   zgetri
+
+   sgetri_lwork
+   dgetri_lwork
+   cgetri_lwork
+   zgetri_lwork
+
+   sgetrs
+   dgetrs
+   cgetrs
+   zgetrs
+
+   sgesc2
+   dgesc2
+   cgesc2
+   zgesc2
+
+   sgges
+   dgges
+   cgges
+   zgges
+
+   sggev
+   dggev
+   cggev
+   zggev
+
+   sgglse
+   dgglse
+   cgglse
+   zgglse
+
+   sgglse_lwork
+   dgglse_lwork
+   cgglse_lwork
+   zgglse_lwork
+
+   sgtsv
+   dgtsv
+   cgtsv
+   zgtsv
+
+   sgtsvx
+   dgtsvx
+   cgtsvx
+   zgtsvx
+
+   chbevd
+   zhbevd
+
+   chbevx
+   zhbevx
+
+   checon
+   zhecon
+
+   cheequb
+   zheequb
+
+   cheev
+   zheev
+
+   cheev_lwork
+   zheev_lwork
+
+   cheevd
+   zheevd
+
+   cheevd_lwork
+   zheevd_lwork
+
+   cheevr
+   zheevr
+
+   cheevr_lwork
+   zheevr_lwork
+
+   cheevx
+   zheevx
+
+   cheevx_lwork
+   zheevx_lwork
+
+   chegst
+   zhegst
+
+   chegv
+   zhegv
+
+   chegv_lwork
+   zhegv_lwork
+
+   chegvd
+   zhegvd
+
+   chegvx
+   zhegvx
+
+   chegvx_lwork
+   zhegvx_lwork
+
+   chesv
+   zhesv
+
+   chesv_lwork
+   zhesv_lwork
+
+   chesvx
+   zhesvx
+
+   chesvx_lwork
+   zhesvx_lwork
+
+   chetrd
+   zhetrd
+
+   chetrd_lwork
+   zhetrd_lwork
+
+   chetrf
+   zhetrf
+
+   chetrf_lwork
+   zhetrf_lwork
+
+   chfrk
+   zhfrk
+
+   slamch
+   dlamch
+
+   slange
+   dlange
+   clange
+   zlange
+
+   slarf
+   dlarf
+   clarf
+   zlarf
+
+   slarfg
+   dlarfg
+   clarfg
+   zlarfg
+
+   slartg
+   dlartg
+   clartg
+   zlartg
+
+   slasd4
+   dlasd4
+
+   slaswp
+   dlaswp
+   claswp
+   zlaswp
+
+   slauum
+   dlauum
+   clauum
+   zlauum
+
+   sorcsd
+   dorcsd
+   sorcsd_lwork
+   dorcsd_lwork
+
+   sorghr
+   dorghr
+   sorghr_lwork
+   dorghr_lwork
+
+   sorgqr
+   dorgqr
+
+   sorgrq
+   dorgrq
+
+   sormqr
+   dormqr
+
+   sormrz
+   dormrz
+
+   sormrz_lwork
+   dormrz_lwork
+
+   spbsv
+   dpbsv
+   cpbsv
+   zpbsv
+
+   spbtrf
+   dpbtrf
+   cpbtrf
+   zpbtrf
+
+   spbtrs
+   dpbtrs
+   cpbtrs
+   zpbtrs
+
+   spftrf
+   dpftrf
+   cpftrf
+   zpftrf
+
+   spftri
+   dpftri
+   cpftri
+   zpftri
+
+   spftrs
+   dpftrs
+   cpftrs
+   zpftrs
+
+   spocon
+   dpocon
+   cpocon
+   zpocon
+
+   spstrf
+   dpstrf
+   cpstrf
+   zpstrf
+
+   spstf2
+   dpstf2
+   cpstf2
+   zpstf2
+
+   sposv
+   dposv
+   cposv
+   zposv
+
+   sposvx
+   dposvx
+   cposvx
+   zposvx
+
+   spotrf
+   dpotrf
+   cpotrf
+   zpotrf
+
+   spotri
+   dpotri
+   cpotri
+   zpotri
+
+   spotrs
+   dpotrs
+   cpotrs
+   zpotrs
+
+   sppcon
+   dppcon
+   cppcon
+   zppcon
+
+   sppsv
+   dppsv
+   cppsv
+   zppsv
+
+   spptrf
+   dpptrf
+   cpptrf
+   zpptrf
+
+   spptri
+   dpptri
+   cpptri
+   zpptri
+
+   spptrs
+   dpptrs
+   cpptrs
+   zpptrs
+
+   sptsv
+   dptsv
+   cptsv
+   zptsv
+
+   sptsvx
+   dptsvx
+   cptsvx
+   zptsvx
+
+   spttrf
+   dpttrf
+   cpttrf
+   zpttrf
+
+   spttrs
+   dpttrs
+   cpttrs
+   zpttrs
+
+   spteqr
+   dpteqr
+   cpteqr
+   zpteqr
+
+   crot
+   zrot
+
+   ssbev
+   dsbev
+
+   ssbevd
+   dsbevd
+
+   ssbevx
+   dsbevx
+
+   ssfrk
+   dsfrk
+
+   sstebz
+   dstebz
+
+   sstein
+   dstein
+
+   sstemr
+   dstemr
+
+   sstemr_lwork
+   dstemr_lwork
+
+   ssterf
+   dsterf
+
+   sstev
+   dstev
+
+   ssycon
+   dsycon
+   csycon
+   zsycon
+
+   ssyconv
+   dsyconv
+   csyconv
+   zsyconv
+
+   ssyequb
+   dsyequb
+   csyequb
+   zsyequb
+
+   ssyev
+   dsyev
+
+   ssyev_lwork
+   dsyev_lwork
+
+   ssyevd
+   dsyevd
+
+   ssyevd_lwork
+   dsyevd_lwork
+
+   ssyevr
+   dsyevr
+
+   ssyevr_lwork
+   dsyevr_lwork
+
+   ssyevx
+   dsyevx
+
+   ssyevx_lwork
+   dsyevx_lwork
+
+   ssygst
+   dsygst
+
+   ssygv
+   dsygv
+
+   ssygv_lwork
+   dsygv_lwork
+
+   ssygvd
+   dsygvd
+
+   ssygvx
+   dsygvx
+
+   ssygvx_lwork
+   dsygvx_lwork
+
+   ssysv
+   dsysv
+   csysv
+   zsysv
+
+   ssysv_lwork
+   dsysv_lwork
+   csysv_lwork
+   zsysv_lwork
+
+   ssysvx
+   dsysvx
+   csysvx
+   zsysvx
+
+   ssysvx_lwork
+   dsysvx_lwork
+   csysvx_lwork
+   zsysvx_lwork
+
+   ssytf2
+   dsytf2
+   csytf2
+   zsytf2
+
+   ssytrd
+   dsytrd
+
+   ssytrd_lwork
+   dsytrd_lwork
+
+   ssytrf
+   dsytrf
+   csytrf
+   zsytrf
+
+   ssytrf_lwork
+   dsytrf_lwork
+   csytrf_lwork
+   zsytrf_lwork
+
+   stbtrs
+   dtbtrs
+   ctbtrs
+   ztbtrs
+
+   stfsm
+   dtfsm
+   ctfsm
+   ztfsm
+
+   stfttp
+   dtfttp
+   ctfttp
+   ztfttp
+
+   stfttr
+   dtfttr
+   ctfttr
+   ztfttr
+
+   stgexc
+   dtgexc
+   ctgexc
+   ztgexc
+
+   stgsen
+   dtgsen
+   ctgsen
+   ztgsen
+
+   stgsen_lwork
+   dtgsen_lwork
+   ctgsen_lwork
+   ztgsen_lwork
+
+   stpttf
+   dtpttf
+   ctpttf
+   ztpttf
+
+   stpttr
+   dtpttr
+   ctpttr
+   ztpttr
+
+   strexc
+   dtrexc
+   ctrexc
+   ztrexc
+
+   strsen
+   dtrsen
+   ctrsen
+   ztrsen
+
+   strsen_lwork
+   dtrsen_lwork
+   ctrsen_lwork
+   ztrsen_lwork
+
+   strsyl
+   dtrsyl
+   ctrsyl
+   ztrsyl
+
+   strtri
+   dtrtri
+   ctrtri
+   ztrtri
+
+   strtrs
+   dtrtrs
+   ctrtrs
+   ztrtrs
+
+   strttf
+   dtrttf
+   ctrttf
+   ztrttf
+
+   strttp
+   dtrttp
+   ctrttp
+   ztrttp
+
+   stzrzf
+   dtzrzf
+   ctzrzf
+   ztzrzf
+
+   stzrzf_lwork
+   dtzrzf_lwork
+   ctzrzf_lwork
+   ztzrzf_lwork
+
+   cunghr
+   zunghr
+
+   cunghr_lwork
+   zunghr_lwork
+
+   cungqr
+   zungqr
+
+   cungrq
+   zungrq
+
+   cunmqr
+   zunmqr
+
+   sgeqrt
+   dgeqrt
+   cgeqrt
+   zgeqrt
+
+   sgemqrt
+   dgemqrt
+   cgemqrt
+   zgemqrt
+
+   sgttrf
+   dgttrf
+   cgttrf
+   zgttrf
+
+   sgttrs
+   dgttrs
+   cgttrs
+   zgttrs
+
+   stpqrt
+   dtpqrt
+   ctpqrt
+   ztpqrt
+
+   stpmqrt
+   dtpmqrt
+   ctpmqrt
+   ztpmqrt
+
+   cuncsd
+   zuncsd
+
+   cuncsd_lwork
+   zuncsd_lwork
+
+   cunmrz
+   zunmrz
+
+   cunmrz_lwork
+   zunmrz_lwork
+
+   ilaver
+
+"""
+#
+# Author: Pearu Peterson, March 2002
+#
+
+import numpy as _np
+from .blas import _get_funcs, _memoize_get_funcs
+from scipy.linalg import _flapack
+from re import compile as regex_compile
+try:
+    from scipy.linalg import _clapack
+except ImportError:
+    _clapack = None
+
+try:
+    from scipy.linalg import _flapack_64
+    HAS_ILP64 = True
+except ImportError:
+    HAS_ILP64 = False
+    _flapack_64 = None
+
+
+# Expose all functions (only flapack --- clapack is an implementation detail)
+empty_module = None
+from scipy.linalg._flapack import *
+del empty_module
+
+__all__ = ['get_lapack_funcs']
+
+# some convenience alias for complex functions
+_lapack_alias = {
+    'corghr': 'cunghr', 'zorghr': 'zunghr',
+    'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
+    'corgqr': 'cungqr', 'zorgqr': 'zungqr',
+    'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
+    'corgrq': 'cungrq', 'zorgrq': 'zungrq',
+}
+
+
+# Place guards against docstring rendering issues with special characters
+p1 = regex_compile(r'with bounds (?P.*?)( and (?P.*?) storage){0,1}\n')
+p2 = regex_compile(r'Default: (?P.*?)\n')
+
+
+def backtickrepl(m):
+    if m.group('s'):
+        return ('with bounds ``{}`` with ``{}`` storage\n'
+                ''.format(m.group('b'), m.group('s')))
+    else:
+        return 'with bounds ``{}``\n'.format(m.group('b'))
+
+
+for routine in [ssyevr, dsyevr, cheevr, zheevr,
+                ssyevx, dsyevx, cheevx, zheevx,
+                ssygvd, dsygvd, chegvd, zhegvd]:
+    if routine.__doc__:
+        routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)
+        routine.__doc__ = p2.sub('Default ``\\1``\n', routine.__doc__)
+    else:
+        continue
+
+del regex_compile, p1, p2, backtickrepl
+
+
+@_memoize_get_funcs
+def get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):
+    """Return available LAPACK function objects from names.
+
+    Arrays are used to determine the optimal prefix of LAPACK routines.
+
+    Parameters
+    ----------
+    names : str or sequence of str
+        Name(s) of LAPACK functions without type prefix.
+
+    arrays : sequence of ndarrays, optional
+        Arrays can be given to determine optimal prefix of LAPACK
+        routines. If not given, double-precision routines will be
+        used, otherwise the most generic type in arrays will be used.
+
+    dtype : str or dtype, optional
+        Data-type specifier. Not used if `arrays` is non-empty.
+
+    ilp64 : {True, False, 'preferred'}, optional
+        Whether to return ILP64 routine variant.
+        Choosing 'preferred' returns ILP64 routine if available, and
+        otherwise the 32-bit routine. Default: False
+
+    Returns
+    -------
+    funcs : list
+        List containing the found function(s).
+
+    Notes
+    -----
+    This routine automatically chooses between Fortran/C
+    interfaces. Fortran code is used whenever possible for arrays with
+    column major order. In all other cases, C code is preferred.
+
+    In LAPACK, the naming convention is that all functions start with a
+    type prefix, which depends on the type of the principal
+    matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
+    types {float32, float64, complex64, complex128} respectively, and
+    are stored in attribute ``typecode`` of the returned functions.
+
+    Examples
+    --------
+    Suppose we would like to use '?lange' routine which computes the selected
+    norm of an array. We pass our array in order to get the correct 'lange'
+    flavor.
+
+    >>> import numpy as np
+    >>> import scipy.linalg as LA
+    >>> rng = np.random.default_rng()
+
+    >>> a = rng.random((3,2))
+    >>> x_lange = LA.get_lapack_funcs('lange', (a,))
+    >>> x_lange.typecode
+    'd'
+    >>> x_lange = LA.get_lapack_funcs('lange',(a*1j,))
+    >>> x_lange.typecode
+    'z'
+
+    Several LAPACK routines work best when its internal WORK array has
+    the optimal size (big enough for fast computation and small enough to
+    avoid waste of memory). This size is determined also by a dedicated query
+    to the function which is often wrapped as a standalone function and
+    commonly denoted as ``###_lwork``. Below is an example for ``?sysv``
+
+    >>> a = rng.random((1000, 1000))
+    >>> b = rng.random((1000, 1)) * 1j
+    >>> # We pick up zsysv and zsysv_lwork due to b array
+    ... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b))
+    >>> opt_lwork, _ = xlwork(a.shape[0])  # returns a complex for 'z' prefix
+    >>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))
+
+    """
+    if isinstance(ilp64, str):
+        if ilp64 == 'preferred':
+            ilp64 = HAS_ILP64
+        else:
+            raise ValueError("Invalid value for 'ilp64'")
+
+    if not ilp64:
+        return _get_funcs(names, arrays, dtype,
+                          "LAPACK", _flapack, _clapack,
+                          "flapack", "clapack", _lapack_alias,
+                          ilp64=False)
+    else:
+        if not HAS_ILP64:
+            raise RuntimeError("LAPACK ILP64 routine requested, but Scipy "
+                               "compiled only with 32-bit BLAS")
+        return _get_funcs(names, arrays, dtype,
+                          "LAPACK", _flapack_64, None,
+                          "flapack_64", None, _lapack_alias,
+                          ilp64=True)
+
+
+_int32_max = _np.iinfo(_np.int32).max
+_int64_max = _np.iinfo(_np.int64).max
+
+
+def _compute_lwork(routine, *args, **kwargs):
+    """
+    Round floating-point lwork returned by lapack to integer.
+
+    Several LAPACK routines compute optimal values for LWORK, which
+    they return in a floating-point variable. However, for large
+    values of LWORK, single-precision floating point is not sufficient
+    to hold the exact value --- some LAPACK versions (<= 3.5.0 at
+    least) truncate the returned integer to single precision and in
+    some cases this can be smaller than the required value.
+
+    Examples
+    --------
+    >>> from scipy.linalg import lapack
+    >>> n = 5000
+    >>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork'))
+    >>> lwork = lapack._compute_lwork(s_lw, n)
+    >>> lwork
+    32000
+
+    """
+    dtype = getattr(routine, 'dtype', None)
+    int_dtype = getattr(routine, 'int_dtype', None)
+    ret = routine(*args, **kwargs)
+    if ret[-1] != 0:
+        raise ValueError("Internal work array size computation failed: "
+                         "%d" % (ret[-1],))
+
+    if len(ret) == 2:
+        return _check_work_float(ret[0].real, dtype, int_dtype)
+    else:
+        return tuple(_check_work_float(x.real, dtype, int_dtype)
+                     for x in ret[:-1])
+
+
+def _check_work_float(value, dtype, int_dtype):
+    """
+    Convert LAPACK-returned work array size float to integer,
+    carefully for single-precision types.
+    """
+
+    if dtype == _np.float32 or dtype == _np.complex64:
+        # Single-precision routine -- take next fp value to work
+        # around possible truncation in LAPACK code
+        value = _np.nextafter(value, _np.inf, dtype=_np.float32)
+
+    value = int(value)
+    if int_dtype.itemsize == 4:
+        if value < 0 or value > _int32_max:
+            raise ValueError("Too large work array required -- computation "
+                             "cannot be performed with standard 32-bit"
+                             " LAPACK.")
+    elif int_dtype.itemsize == 8:
+        if value < 0 or value > _int64_max:
+            raise ValueError("Too large work array required -- computation"
+                             " cannot be performed with standard 64-bit"
+                             " LAPACK.")
+    return value
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/matfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/matfuncs.py
new file mode 100644
index 00000000..51b80bae
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/matfuncs.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _matfuncs
+
+__all__ = [  # noqa: F822
+    'expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm',
+    'tanhm', 'logm', 'funm', 'signm', 'sqrtm',
+    'expm_frechet', 'expm_cond', 'fractional_matrix_power',
+    'khatri_rao', 'prod', 'logical_not', 'ravel', 'transpose',
+    'conjugate', 'absolute', 'amax', 'sign', 'isfinite', 'single',
+    'norm', 'solve', 'inv', 'triu', 'svd', 'schur', 'rsf2csf', 'eps', 'feps'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.matfuncs is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.matfuncs` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_matfuncs, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/misc.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/misc.py
new file mode 100644
index 00000000..fa3e74d9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/misc.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _misc
+
+__all__ = [  # noqa: F822
+    'LinAlgError', 'LinAlgWarning', 'norm', 'get_blas_funcs',
+    'get_lapack_funcs'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.misc is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, "
+                  "the `scipy.linalg.misc` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_misc, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/special_matrices.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/special_matrices.py
new file mode 100644
index 00000000..81a00542
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/special_matrices.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _special_matrices
+
+__all__ = [  # noqa: F822
+    'tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel',
+    'hadamard', 'leslie', 'kron', 'block_diag', 'companion',
+    'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft',
+    'fiedler', 'fiedler_companion', 'convolution_matrix', 'as_strided'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.linalg.special_matrices is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.linalg` namespace, the"
+                  " `scipy.linalg.special_matrices` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_special_matrices, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_15_data.npz b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_15_data.npz
new file mode 100644
index 00000000..31a7dc6a
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_15_data.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_18_data.npz b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_18_data.npz
new file mode 100644
index 00000000..6bd78dc5
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_18_data.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_19_data.npz b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_19_data.npz
new file mode 100644
index 00000000..3564000b
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_19_data.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_20_data.npz b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_20_data.npz
new file mode 100644
index 00000000..e68e5a2e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_20_data.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_6_data.npz b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_6_data.npz
new file mode 100644
index 00000000..e70ff735
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/carex_6_data.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/gendare_20170120_data.npz b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/gendare_20170120_data.npz
new file mode 100644
index 00000000..22cb1294
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/data/gendare_20170120_data.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_basic.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_basic.py
new file mode 100644
index 00000000..94b653c5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_basic.py
@@ -0,0 +1,1714 @@
+import platform
+import itertools
+import warnings
+
+import numpy as np
+from numpy import (arange, array, dot, zeros, identity, conjugate, transpose,
+                   float32)
+import numpy.linalg as linalg
+from numpy.random import random
+
+from numpy.testing import (assert_equal, assert_almost_equal, assert_,
+                           assert_array_almost_equal, assert_allclose,
+                           assert_array_equal, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy._lib import _pep440
+from scipy.linalg import (solve, inv, det, lstsq, pinv, pinvh, norm,
+                          solve_banded, solveh_banded, solve_triangular,
+                          solve_circulant, circulant, LinAlgError, block_diag,
+                          matrix_balance, qr, LinAlgWarning)
+
+from scipy.linalg._testutils import assert_no_overwrite
+from scipy._lib._testutils import check_free_memory
+from scipy.linalg.blas import HAS_ILP64
+
+REAL_DTYPES = (np.float32, np.float64, np.longdouble)
+COMPLEX_DTYPES = (np.complex64, np.complex128, np.clongdouble)
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def _eps_cast(dtyp):
+    """Get the epsilon for dtype, possibly downcast to BLAS types."""
+    dt = dtyp
+    if dt == np.longdouble:
+        dt = np.float64
+    elif dt == np.clongdouble:
+        dt = np.complex128
+    return np.finfo(dt).eps
+
+
+class TestSolveBanded:
+
+    def test_real(self):
+        a = array([[1.0, 20, 0, 0],
+                   [-30, 4, 6, 0],
+                   [2, 1, 20, 2],
+                   [0, -1, 7, 14]])
+        ab = array([[0.0, 20, 6, 2],
+                    [1, 4, 20, 14],
+                    [-30, 1, 7, 0],
+                    [2, -1, 0, 0]])
+        l, u = 2, 1
+        b4 = array([10.0, 0.0, 2.0, 14.0])
+        b4by1 = b4.reshape(-1, 1)
+        b4by2 = array([[2, 1],
+                       [-30, 4],
+                       [2, 3],
+                       [1, 3]])
+        b4by4 = array([[1, 0, 0, 0],
+                       [0, 0, 0, 1],
+                       [0, 1, 0, 0],
+                       [0, 1, 0, 0]])
+        for b in [b4, b4by1, b4by2, b4by4]:
+            x = solve_banded((l, u), ab, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_complex(self):
+        a = array([[1.0, 20, 0, 0],
+                   [-30, 4, 6, 0],
+                   [2j, 1, 20, 2j],
+                   [0, -1, 7, 14]])
+        ab = array([[0.0, 20, 6, 2j],
+                    [1, 4, 20, 14],
+                    [-30, 1, 7, 0],
+                    [2j, -1, 0, 0]])
+        l, u = 2, 1
+        b4 = array([10.0, 0.0, 2.0, 14.0j])
+        b4by1 = b4.reshape(-1, 1)
+        b4by2 = array([[2, 1],
+                       [-30, 4],
+                       [2, 3],
+                       [1, 3]])
+        b4by4 = array([[1, 0, 0, 0],
+                       [0, 0, 0, 1j],
+                       [0, 1, 0, 0],
+                       [0, 1, 0, 0]])
+        for b in [b4, b4by1, b4by2, b4by4]:
+            x = solve_banded((l, u), ab, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_tridiag_real(self):
+        ab = array([[0.0, 20, 6, 2],
+                   [1, 4, 20, 14],
+                   [-30, 1, 7, 0]])
+        a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
+                                                                ab[2, :-1], -1)
+        b4 = array([10.0, 0.0, 2.0, 14.0])
+        b4by1 = b4.reshape(-1, 1)
+        b4by2 = array([[2, 1],
+                       [-30, 4],
+                       [2, 3],
+                       [1, 3]])
+        b4by4 = array([[1, 0, 0, 0],
+                       [0, 0, 0, 1],
+                       [0, 1, 0, 0],
+                       [0, 1, 0, 0]])
+        for b in [b4, b4by1, b4by2, b4by4]:
+            x = solve_banded((1, 1), ab, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_tridiag_complex(self):
+        ab = array([[0.0, 20, 6, 2j],
+                   [1, 4, 20, 14],
+                   [-30, 1, 7, 0]])
+        a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
+                                                               ab[2, :-1], -1)
+        b4 = array([10.0, 0.0, 2.0, 14.0j])
+        b4by1 = b4.reshape(-1, 1)
+        b4by2 = array([[2, 1],
+                       [-30, 4],
+                       [2, 3],
+                       [1, 3]])
+        b4by4 = array([[1, 0, 0, 0],
+                       [0, 0, 0, 1],
+                       [0, 1, 0, 0],
+                       [0, 1, 0, 0]])
+        for b in [b4, b4by1, b4by2, b4by4]:
+            x = solve_banded((1, 1), ab, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_check_finite(self):
+        a = array([[1.0, 20, 0, 0],
+                   [-30, 4, 6, 0],
+                   [2, 1, 20, 2],
+                   [0, -1, 7, 14]])
+        ab = array([[0.0, 20, 6, 2],
+                    [1, 4, 20, 14],
+                    [-30, 1, 7, 0],
+                    [2, -1, 0, 0]])
+        l, u = 2, 1
+        b4 = array([10.0, 0.0, 2.0, 14.0])
+        x = solve_banded((l, u), ab, b4, check_finite=False)
+        assert_array_almost_equal(dot(a, x), b4)
+
+    def test_bad_shape(self):
+        ab = array([[0.0, 20, 6, 2],
+                    [1, 4, 20, 14],
+                    [-30, 1, 7, 0],
+                    [2, -1, 0, 0]])
+        l, u = 2, 1
+        bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4)
+        assert_raises(ValueError, solve_banded, (l, u), ab, bad)
+        assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
+
+        # Values of (l,u) are not compatible with ab.
+        assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
+
+    def test_1x1(self):
+        b = array([[1., 2., 3.]])
+        x = solve_banded((1, 1), [[0], [2], [0]], b)
+        assert_array_equal(x, [[0.5, 1.0, 1.5]])
+        assert_equal(x.dtype, np.dtype('f8'))
+        assert_array_equal(b, [[1.0, 2.0, 3.0]])
+
+    def test_native_list_arguments(self):
+        a = [[1.0, 20, 0, 0],
+             [-30, 4, 6, 0],
+             [2, 1, 20, 2],
+             [0, -1, 7, 14]]
+        ab = [[0.0, 20, 6, 2],
+              [1, 4, 20, 14],
+              [-30, 1, 7, 0],
+              [2, -1, 0, 0]]
+        l, u = 2, 1
+        b = [10.0, 0.0, 2.0, 14.0]
+        x = solve_banded((l, u), ab, b)
+        assert_array_almost_equal(dot(a, x), b)
+
+
+class TestSolveHBanded:
+
+    def test_01_upper(self):
+        # Solve
+        # [ 4 1 2 0]     [1]
+        # [ 1 4 1 2] X = [4]
+        # [ 2 1 4 1]     [1]
+        # [ 0 2 1 4]     [2]
+        # with the RHS as a 1D array.
+        ab = array([[0.0, 0.0, 2.0, 2.0],
+                    [-99, 1.0, 1.0, 1.0],
+                    [4.0, 4.0, 4.0, 4.0]])
+        b = array([1.0, 4.0, 1.0, 2.0])
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+    def test_02_upper(self):
+        # Solve
+        # [ 4 1 2 0]     [1 6]
+        # [ 1 4 1 2] X = [4 2]
+        # [ 2 1 4 1]     [1 6]
+        # [ 0 2 1 4]     [2 1]
+        #
+        ab = array([[0.0, 0.0, 2.0, 2.0],
+                    [-99, 1.0, 1.0, 1.0],
+                    [4.0, 4.0, 4.0, 4.0]])
+        b = array([[1.0, 6.0],
+                   [4.0, 2.0],
+                   [1.0, 6.0],
+                   [2.0, 1.0]])
+        x = solveh_banded(ab, b)
+        expected = array([[0.0, 1.0],
+                          [1.0, 0.0],
+                          [0.0, 1.0],
+                          [0.0, 0.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_03_upper(self):
+        # Solve
+        # [ 4 1 2 0]     [1]
+        # [ 1 4 1 2] X = [4]
+        # [ 2 1 4 1]     [1]
+        # [ 0 2 1 4]     [2]
+        # with the RHS as a 2D array with shape (3,1).
+        ab = array([[0.0, 0.0, 2.0, 2.0],
+                    [-99, 1.0, 1.0, 1.0],
+                    [4.0, 4.0, 4.0, 4.0]])
+        b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1)
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1))
+
+    def test_01_lower(self):
+        # Solve
+        # [ 4 1 2 0]     [1]
+        # [ 1 4 1 2] X = [4]
+        # [ 2 1 4 1]     [1]
+        # [ 0 2 1 4]     [2]
+        #
+        ab = array([[4.0, 4.0, 4.0, 4.0],
+                    [1.0, 1.0, 1.0, -99],
+                    [2.0, 2.0, 0.0, 0.0]])
+        b = array([1.0, 4.0, 1.0, 2.0])
+        x = solveh_banded(ab, b, lower=True)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+    def test_02_lower(self):
+        # Solve
+        # [ 4 1 2 0]     [1 6]
+        # [ 1 4 1 2] X = [4 2]
+        # [ 2 1 4 1]     [1 6]
+        # [ 0 2 1 4]     [2 1]
+        #
+        ab = array([[4.0, 4.0, 4.0, 4.0],
+                    [1.0, 1.0, 1.0, -99],
+                    [2.0, 2.0, 0.0, 0.0]])
+        b = array([[1.0, 6.0],
+                   [4.0, 2.0],
+                   [1.0, 6.0],
+                   [2.0, 1.0]])
+        x = solveh_banded(ab, b, lower=True)
+        expected = array([[0.0, 1.0],
+                          [1.0, 0.0],
+                          [0.0, 1.0],
+                          [0.0, 0.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_01_float32(self):
+        # Solve
+        # [ 4 1 2 0]     [1]
+        # [ 1 4 1 2] X = [4]
+        # [ 2 1 4 1]     [1]
+        # [ 0 2 1 4]     [2]
+        #
+        ab = array([[0.0, 0.0, 2.0, 2.0],
+                    [-99, 1.0, 1.0, 1.0],
+                    [4.0, 4.0, 4.0, 4.0]], dtype=float32)
+        b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+    def test_02_float32(self):
+        # Solve
+        # [ 4 1 2 0]     [1 6]
+        # [ 1 4 1 2] X = [4 2]
+        # [ 2 1 4 1]     [1 6]
+        # [ 0 2 1 4]     [2 1]
+        #
+        ab = array([[0.0, 0.0, 2.0, 2.0],
+                    [-99, 1.0, 1.0, 1.0],
+                    [4.0, 4.0, 4.0, 4.0]], dtype=float32)
+        b = array([[1.0, 6.0],
+                   [4.0, 2.0],
+                   [1.0, 6.0],
+                   [2.0, 1.0]], dtype=float32)
+        x = solveh_banded(ab, b)
+        expected = array([[0.0, 1.0],
+                          [1.0, 0.0],
+                          [0.0, 1.0],
+                          [0.0, 0.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_01_complex(self):
+        # Solve
+        # [ 4 -j  2  0]     [2-j]
+        # [ j  4 -j  2] X = [4-j]
+        # [ 2  j  4 -j]     [4+j]
+        # [ 0  2  j  4]     [2+j]
+        #
+        ab = array([[0.0, 0.0, 2.0, 2.0],
+                    [-99, -1.0j, -1.0j, -1.0j],
+                    [4.0, 4.0, 4.0, 4.0]])
+        b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
+
+    def test_02_complex(self):
+        # Solve
+        # [ 4 -j  2  0]     [2-j 2+4j]
+        # [ j  4 -j  2] X = [4-j -1-j]
+        # [ 2  j  4 -j]     [4+j 4+2j]
+        # [ 0  2  j  4]     [2+j j]
+        #
+        ab = array([[0.0, 0.0, 2.0, 2.0],
+                    [-99, -1.0j, -1.0j, -1.0j],
+                    [4.0, 4.0, 4.0, 4.0]])
+        b = array([[2-1j, 2+4j],
+                   [4.0-1j, -1-1j],
+                   [4.0+1j, 4+2j],
+                   [2+1j, 1j]])
+        x = solveh_banded(ab, b)
+        expected = array([[0.0, 1.0j],
+                          [1.0, 0.0],
+                          [1.0, 1.0],
+                          [0.0, 0.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_tridiag_01_upper(self):
+        # Solve
+        # [ 4 1 0]     [1]
+        # [ 1 4 1] X = [4]
+        # [ 0 1 4]     [1]
+        # with the RHS as a 1D array.
+        ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
+        b = array([1.0, 4.0, 1.0])
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+    def test_tridiag_02_upper(self):
+        # Solve
+        # [ 4 1 0]     [1 4]
+        # [ 1 4 1] X = [4 2]
+        # [ 0 1 4]     [1 4]
+        #
+        ab = array([[-99, 1.0, 1.0],
+                    [4.0, 4.0, 4.0]])
+        b = array([[1.0, 4.0],
+                   [4.0, 2.0],
+                   [1.0, 4.0]])
+        x = solveh_banded(ab, b)
+        expected = array([[0.0, 1.0],
+                          [1.0, 0.0],
+                          [0.0, 1.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_tridiag_03_upper(self):
+        # Solve
+        # [ 4 1 0]     [1]
+        # [ 1 4 1] X = [4]
+        # [ 0 1 4]     [1]
+        # with the RHS as a 2D array with shape (3,1).
+        ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
+        b = array([1.0, 4.0, 1.0]).reshape(-1, 1)
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1))
+
+    def test_tridiag_01_lower(self):
+        # Solve
+        # [ 4 1 0]     [1]
+        # [ 1 4 1] X = [4]
+        # [ 0 1 4]     [1]
+        #
+        ab = array([[4.0, 4.0, 4.0],
+                    [1.0, 1.0, -99]])
+        b = array([1.0, 4.0, 1.0])
+        x = solveh_banded(ab, b, lower=True)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+    def test_tridiag_02_lower(self):
+        # Solve
+        # [ 4 1 0]     [1 4]
+        # [ 1 4 1] X = [4 2]
+        # [ 0 1 4]     [1 4]
+        #
+        ab = array([[4.0, 4.0, 4.0],
+                    [1.0, 1.0, -99]])
+        b = array([[1.0, 4.0],
+                   [4.0, 2.0],
+                   [1.0, 4.0]])
+        x = solveh_banded(ab, b, lower=True)
+        expected = array([[0.0, 1.0],
+                          [1.0, 0.0],
+                          [0.0, 1.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_tridiag_01_float32(self):
+        # Solve
+        # [ 4 1 0]     [1]
+        # [ 1 4 1] X = [4]
+        # [ 0 1 4]     [1]
+        #
+        ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
+        b = array([1.0, 4.0, 1.0], dtype=float32)
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+    def test_tridiag_02_float32(self):
+        # Solve
+        # [ 4 1 0]     [1 4]
+        # [ 1 4 1] X = [4 2]
+        # [ 0 1 4]     [1 4]
+        #
+        ab = array([[-99, 1.0, 1.0],
+                    [4.0, 4.0, 4.0]], dtype=float32)
+        b = array([[1.0, 4.0],
+                   [4.0, 2.0],
+                   [1.0, 4.0]], dtype=float32)
+        x = solveh_banded(ab, b)
+        expected = array([[0.0, 1.0],
+                          [1.0, 0.0],
+                          [0.0, 1.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_tridiag_01_complex(self):
+        # Solve
+        # [ 4 -j 0]     [ -j]
+        # [ j 4 -j] X = [4-j]
+        # [ 0 j  4]     [4+j]
+        #
+        ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
+        b = array([-1.0j, 4.0-1j, 4+1j])
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, [0.0, 1.0, 1.0])
+
+    def test_tridiag_02_complex(self):
+        # Solve
+        # [ 4 -j 0]     [ -j    4j]
+        # [ j 4 -j] X = [4-j  -1-j]
+        # [ 0 j  4]     [4+j   4  ]
+        #
+        ab = array([[-99, -1.0j, -1.0j],
+                    [4.0, 4.0, 4.0]])
+        b = array([[-1j, 4.0j],
+                   [4.0-1j, -1.0-1j],
+                   [4.0+1j, 4.0]])
+        x = solveh_banded(ab, b)
+        expected = array([[0.0, 1.0j],
+                          [1.0, 0.0],
+                          [1.0, 1.0]])
+        assert_array_almost_equal(x, expected)
+
+    def test_check_finite(self):
+        # Solve
+        # [ 4 1 0]     [1]
+        # [ 1 4 1] X = [4]
+        # [ 0 1 4]     [1]
+        # with the RHS as a 1D array.
+        ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
+        b = array([1.0, 4.0, 1.0])
+        x = solveh_banded(ab, b, check_finite=False)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0])
+
+    def test_bad_shapes(self):
+        ab = array([[-99, 1.0, 1.0],
+                    [4.0, 4.0, 4.0]])
+        b = array([[1.0, 4.0],
+                   [4.0, 2.0]])
+        assert_raises(ValueError, solveh_banded, ab, b)
+        assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
+        assert_raises(ValueError, solveh_banded, ab, [1.0])
+
+    def test_1x1(self):
+        x = solveh_banded([[1]], [[1, 2, 3]])
+        assert_array_equal(x, [[1.0, 2.0, 3.0]])
+        assert_equal(x.dtype, np.dtype('f8'))
+
+    def test_native_list_arguments(self):
+        # Same as test_01_upper, using python's native list.
+        ab = [[0.0, 0.0, 2.0, 2.0],
+              [-99, 1.0, 1.0, 1.0],
+              [4.0, 4.0, 4.0, 4.0]]
+        b = [1.0, 4.0, 1.0, 2.0]
+        x = solveh_banded(ab, b)
+        assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
+
+
+class TestSolve:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_20Feb04_bug(self):
+        a = [[1, 1], [1.0, 0]]  # ok
+        x0 = solve(a, [1, 0j])
+        assert_array_almost_equal(dot(a, x0), [1, 0])
+
+        # gives failure with clapack.zgesv(..,rowmajor=0)
+        a = [[1, 1], [1.2, 0]]
+        b = [1, 0j]
+        x0 = solve(a, b)
+        assert_array_almost_equal(dot(a, x0), [1, 0])
+
+    def test_simple(self):
+        a = [[1, 20], [-30, 4]]
+        for b in ([[1, 0], [0, 1]],
+                  [1, 0],
+                  [[2, 1], [-30, 4]]
+                  ):
+            x = solve(a, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_complex(self):
+        a = array([[5, 2], [2j, 4]], 'D')
+        for b in ([1j, 0],
+                  [[1j, 1j], [0, 2]],
+                  [1, 0j],
+                  array([1, 0], 'D'),
+                  ):
+            x = solve(a, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_pos(self):
+        a = [[2, 3], [3, 5]]
+        for lower in [0, 1]:
+            for b in ([[1, 0], [0, 1]],
+                      [1, 0]
+                      ):
+                x = solve(a, b, assume_a='pos', lower=lower)
+                assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_pos_complexb(self):
+        a = [[5, 2], [2, 4]]
+        for b in ([1j, 0],
+                  [[1j, 1j], [0, 2]],
+                  ):
+            x = solve(a, b, assume_a='pos')
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_sym(self):
+        a = [[2, 3], [3, -5]]
+        for lower in [0, 1]:
+            for b in ([[1, 0], [0, 1]],
+                      [1, 0]
+                      ):
+                x = solve(a, b, assume_a='sym', lower=lower)
+                assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_sym_complexb(self):
+        a = [[5, 2], [2, -4]]
+        for b in ([1j, 0],
+                  [[1j, 1j],[0, 2]]
+                  ):
+            x = solve(a, b, assume_a='sym')
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_sym_complex(self):
+        a = [[5, 2+1j], [2+1j, -4]]
+        for b in ([1j, 0],
+                  [1, 0],
+                  [[1j, 1j], [0, 2]]
+                  ):
+            x = solve(a, b, assume_a='sym')
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_her_actuallysym(self):
+        a = [[2, 3], [3, -5]]
+        for lower in [0, 1]:
+            for b in ([[1, 0], [0, 1]],
+                      [1, 0],
+                      [1j, 0],
+                      ):
+                x = solve(a, b, assume_a='her', lower=lower)
+                assert_array_almost_equal(dot(a, x), b)
+
+    def test_simple_her(self):
+        a = [[5, 2+1j], [2-1j, -4]]
+        for b in ([1j, 0],
+                  [1, 0],
+                  [[1j, 1j], [0, 2]]
+                  ):
+            x = solve(a, b, assume_a='her')
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_nils_20Feb04(self):
+        n = 2
+        A = random([n, n])+random([n, n])*1j
+        X = zeros((n, n), 'D')
+        Ainv = inv(A)
+        R = identity(n)+identity(n)*0j
+        for i in arange(0, n):
+            r = R[:, i]
+            X[:, i] = solve(A, r)
+        assert_array_almost_equal(X, Ainv)
+
+    def test_random(self):
+
+        n = 20
+        a = random([n, n])
+        for i in range(n):
+            a[i, i] = 20*(.1+a[i, i])
+        for i in range(4):
+            b = random([n, 3])
+            x = solve(a, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_random_complex(self):
+        n = 20
+        a = random([n, n]) + 1j * random([n, n])
+        for i in range(n):
+            a[i, i] = 20*(.1+a[i, i])
+        for i in range(2):
+            b = random([n, 3])
+            x = solve(a, b)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_sym_pos_dep(self):
+        with pytest.warns(
+                DeprecationWarning,
+                match="The 'sym_pos' keyword is deprecated",
+        ):
+            solve([[1.]], [1], sym_pos=True)
+
+    def test_random_sym(self):
+        n = 20
+        a = random([n, n])
+        for i in range(n):
+            a[i, i] = abs(20*(.1+a[i, i]))
+            for j in range(i):
+                a[i, j] = a[j, i]
+        for i in range(4):
+            b = random([n])
+            x = solve(a, b, assume_a="pos")
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_random_sym_complex(self):
+        n = 20
+        a = random([n, n])
+        a = a + 1j*random([n, n])
+        for i in range(n):
+            a[i, i] = abs(20*(.1+a[i, i]))
+            for j in range(i):
+                a[i, j] = conjugate(a[j, i])
+        b = random([n])+2j*random([n])
+        for i in range(2):
+            x = solve(a, b, assume_a="pos")
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_check_finite(self):
+        a = [[1, 20], [-30, 4]]
+        for b in ([[1, 0], [0, 1]], [1, 0],
+                  [[2, 1], [-30, 4]]):
+            x = solve(a, b, check_finite=False)
+            assert_array_almost_equal(dot(a, x), b)
+
+    def test_scalar_a_and_1D_b(self):
+        a = 1
+        b = [1, 2, 3]
+        x = solve(a, b)
+        assert_array_almost_equal(x.ravel(), b)
+        assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape')
+
+    def test_simple2(self):
+        a = np.array([[1.80, 2.88, 2.05, -0.89],
+                      [525.00, -295.00, -95.00, -380.00],
+                      [1.58, -2.69, -2.90, -1.04],
+                      [-1.11, -0.66, -0.59, 0.80]])
+
+        b = np.array([[9.52, 18.47],
+                      [2435.00, 225.00],
+                      [0.77, -13.28],
+                      [-6.22, -6.21]])
+
+        x = solve(a, b)
+        assert_array_almost_equal(x, np.array([[1., -1, 3, -5],
+                                               [3, 2, 4, 1]]).T)
+
+    def test_simple_complex2(self):
+        a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j],
+                      [-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j],
+                      [-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j],
+                      [2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]])
+
+        b = np.array([[26.26+51.78j, 31.32-6.70j],
+                      [64.30-86.80j, 158.60-14.20j],
+                      [-5.75+25.31j, -2.15+30.19j],
+                      [1.16+2.57j, -2.56+7.55j]])
+
+        x = solve(a, b)
+        assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j],
+                                                [2-3.j, 5+1.j],
+                                                [-4-5.j, -3+4.j],
+                                                [6.j, 2-3.j]]))
+
+    def test_hermitian(self):
+        # An upper triangular matrix will be used for hermitian matrix a
+        a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j],
+                      [0, -4.63, -1.84+0.03j, 2.21+0.21j],
+                      [0, 0, -8.87, 1.58-0.90j],
+                      [0, 0, 0, -1.36]])
+        b = np.array([[2.98-10.18j, 28.68-39.89j],
+                      [-9.58+3.88j, -24.79-8.40j],
+                      [-0.77-16.05j, 4.23-70.02j],
+                      [7.79+5.48j, -35.39+18.01j]])
+        res = np.array([[2.+1j, -8+6j],
+                        [3.-2j, 7-2j],
+                        [-1+2j, -1+5j],
+                        [1.-1j, 3-4j]])
+        x = solve(a, b, assume_a='her')
+        assert_array_almost_equal(x, res)
+        # Also conjugate a and test for lower triangular data
+        x = solve(a.conj().T, b, assume_a='her', lower=True)
+        assert_array_almost_equal(x, res)
+
+    def test_pos_and_sym(self):
+        A = np.arange(1, 10).reshape(3, 3)
+        x = solve(np.tril(A)/9, np.ones(3), assume_a='pos')
+        assert_array_almost_equal(x, [9., 1.8, 1.])
+        x = solve(np.tril(A)/9, np.ones(3), assume_a='sym')
+        assert_array_almost_equal(x, [9., 1.8, 1.])
+
+    def test_singularity(self):
+        a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1],
+                      [1, 1, 1, 0, 0, 0, 1, 0, 1],
+                      [0, 1, 1, 0, 0, 0, 1, 0, 1],
+                      [1, 0, 1, 1, 1, 1, 0, 0, 0],
+                      [1, 0, 1, 1, 1, 1, 0, 0, 0],
+                      [1, 0, 1, 1, 1, 1, 0, 0, 0],
+                      [1, 0, 1, 1, 1, 1, 0, 0, 0],
+                      [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                      [1, 1, 1, 1, 1, 1, 1, 1, 1]])
+        b = np.arange(9)[:, None]
+        assert_raises(LinAlgError, solve, a, b)
+
+    def test_ill_condition_warning(self):
+        a = np.array([[1, 1], [1+1e-16, 1-1e-16]])
+        b = np.ones(2)
+        with warnings.catch_warnings():
+            warnings.simplefilter('error')
+            assert_raises(LinAlgWarning, solve, a, b)
+
+    def test_empty_rhs(self):
+        a = np.eye(2)
+        b = [[], []]
+        x = solve(a, b)
+        assert_(x.size == 0, 'Returned array is not empty')
+        assert_(x.shape == (2, 0), 'Returned empty array shape is wrong')
+
+    def test_multiple_rhs(self):
+        a = np.eye(2)
+        b = np.random.rand(2, 3, 4)
+        x = solve(a, b)
+        assert_array_almost_equal(x, b)
+
+    def test_transposed_keyword(self):
+        A = np.arange(9).reshape(3, 3) + 1
+        x = solve(np.tril(A)/9, np.ones(3), transposed=True)
+        assert_array_almost_equal(x, [1.2, 0.2, 1])
+        x = solve(np.tril(A)/9, np.ones(3), transposed=False)
+        assert_array_almost_equal(x, [9, -5.4, -1.2])
+
+    def test_transposed_notimplemented(self):
+        a = np.eye(3).astype(complex)
+        with assert_raises(NotImplementedError):
+            solve(a, a, transposed=True)
+
+    def test_nonsquare_a(self):
+        assert_raises(ValueError, solve, [1, 2], 1)
+
+    def test_size_mismatch_with_1D_b(self):
+        assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3))
+        assert_raises(ValueError, solve, np.eye(3), np.ones(4))
+
+    def test_assume_a_keyword(self):
+        assert_raises(ValueError, solve, 1, 1, assume_a='zxcv')
+
+    @pytest.mark.skip(reason="Failure on OS X (gh-7500), "
+                             "crash on Windows (gh-8064)")
+    def test_all_type_size_routine_combinations(self):
+        sizes = [10, 100]
+        assume_as = ['gen', 'sym', 'pos', 'her']
+        dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+        for size, assume_a, dtype in itertools.product(sizes, assume_as,
+                                                       dtypes):
+            is_complex = dtype in (np.complex64, np.complex128)
+            if assume_a == 'her' and not is_complex:
+                continue
+
+            err_msg = ("Failed for size: {}, assume_a: {},"
+                       "dtype: {}".format(size, assume_a, dtype))
+
+            a = np.random.randn(size, size).astype(dtype)
+            b = np.random.randn(size).astype(dtype)
+            if is_complex:
+                a = a + (1j*np.random.randn(size, size)).astype(dtype)
+
+            if assume_a == 'sym':  # Can still be complex but only symmetric
+                a = a + a.T
+            elif assume_a == 'her':  # Handle hermitian matrices here instead
+                a = a + a.T.conj()
+            elif assume_a == 'pos':
+                a = a.conj().T.dot(a) + 0.1*np.eye(size)
+
+            tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6
+
+            if assume_a in ['gen', 'sym', 'her']:
+                # We revert the tolerance from before
+                #   4b4a6e7c34fa4060533db38f9a819b98fa81476c
+                if dtype in (np.float32, np.complex64):
+                    tol *= 10
+
+            x = solve(a, b, assume_a=assume_a)
+            assert_allclose(a.dot(x), b,
+                            atol=tol * size,
+                            rtol=tol * size,
+                            err_msg=err_msg)
+
+            if assume_a == 'sym' and dtype not in (np.complex64,
+                                                   np.complex128):
+                x = solve(a, b, assume_a=assume_a, transposed=True)
+                assert_allclose(a.dot(x), b,
+                                atol=tol * size,
+                                rtol=tol * size,
+                                err_msg=err_msg)
+
+
+class TestSolveTriangular:
+
+    def test_simple(self):
+        """
+        solve_triangular on a simple 2x2 matrix.
+        """
+        A = array([[1, 0], [1, 2]])
+        b = [1, 1]
+        sol = solve_triangular(A, b, lower=True)
+        assert_array_almost_equal(sol, [1, 0])
+
+        # check that it works also for non-contiguous matrices
+        sol = solve_triangular(A.T, b, lower=False)
+        assert_array_almost_equal(sol, [.5, .5])
+
+        # and that it gives the same result as trans=1
+        sol = solve_triangular(A, b, lower=True, trans=1)
+        assert_array_almost_equal(sol, [.5, .5])
+
+        b = identity(2)
+        sol = solve_triangular(A, b, lower=True, trans=1)
+        assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])
+
+    def test_simple_complex(self):
+        """
+        solve_triangular on a simple 2x2 complex matrix
+        """
+        A = array([[1+1j, 0], [1j, 2]])
+        b = identity(2)
+        sol = solve_triangular(A, b, lower=True, trans=1)
+        assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])
+
+        # check other option combinations with complex rhs
+        b = np.diag([1+1j, 1+2j])
+        sol = solve_triangular(A, b, lower=True, trans=0)
+        assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
+
+        sol = solve_triangular(A, b, lower=True, trans=1)
+        assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
+
+        sol = solve_triangular(A, b, lower=True, trans=2)
+        assert_array_almost_equal(sol, [[1j, -0.75-0.25j], [0, 0.5+1j]])
+
+        sol = solve_triangular(A.T, b, lower=False, trans=0)
+        assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
+
+        sol = solve_triangular(A.T, b, lower=False, trans=1)
+        assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
+
+        sol = solve_triangular(A.T, b, lower=False, trans=2)
+        assert_array_almost_equal(sol, [[1j, 0], [-0.5, 0.5+1j]])
+
+    def test_check_finite(self):
+        """
+        solve_triangular on a simple 2x2 matrix.
+        """
+        A = array([[1, 0], [1, 2]])
+        b = [1, 1]
+        sol = solve_triangular(A, b, lower=True, check_finite=False)
+        assert_array_almost_equal(sol, [1, 0])
+
+
+class TestInv:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_simple(self):
+        a = [[1, 2], [3, 4]]
+        a_inv = inv(a)
+        assert_array_almost_equal(dot(a, a_inv), np.eye(2))
+        a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]]
+        a_inv = inv(a)
+        assert_array_almost_equal(dot(a, a_inv), np.eye(3))
+
+    def test_random(self):
+        n = 20
+        for i in range(4):
+            a = random([n, n])
+            for i in range(n):
+                a[i, i] = 20*(.1+a[i, i])
+            a_inv = inv(a)
+            assert_array_almost_equal(dot(a, a_inv),
+                                      identity(n))
+
+    def test_simple_complex(self):
+        a = [[1, 2], [3, 4j]]
+        a_inv = inv(a)
+        assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
+
+    def test_random_complex(self):
+        n = 20
+        for i in range(4):
+            a = random([n, n])+2j*random([n, n])
+            for i in range(n):
+                a[i, i] = 20*(.1+a[i, i])
+            a_inv = inv(a)
+            assert_array_almost_equal(dot(a, a_inv),
+                                      identity(n))
+
+    def test_check_finite(self):
+        a = [[1, 2], [3, 4]]
+        a_inv = inv(a, check_finite=False)
+        assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
+
+
+class TestDet:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_simple(self):
+        a = [[1, 2], [3, 4]]
+        a_det = det(a)
+        assert_almost_equal(a_det, -2.0)
+
+    def test_simple_complex(self):
+        a = [[1, 2], [3, 4j]]
+        a_det = det(a)
+        assert_almost_equal(a_det, -6+4j)
+
+    def test_random(self):
+        basic_det = linalg.det
+        n = 20
+        for i in range(4):
+            a = random([n, n])
+            d1 = det(a)
+            d2 = basic_det(a)
+            assert_almost_equal(d1, d2)
+
+    def test_random_complex(self):
+        basic_det = linalg.det
+        n = 20
+        for i in range(4):
+            a = random([n, n]) + 2j*random([n, n])
+            d1 = det(a)
+            d2 = basic_det(a)
+            assert_allclose(d1, d2, rtol=1e-13)
+
+    def test_check_finite(self):
+        a = [[1, 2], [3, 4]]
+        a_det = det(a, check_finite=False)
+        assert_almost_equal(a_det, -2.0)
+
+
+def direct_lstsq(a, b, cmplx=0):
+    at = transpose(a)
+    if cmplx:
+        at = conjugate(at)
+    a1 = dot(at, a)
+    b1 = dot(at, b)
+    return solve(a1, b1)
+
+
+class TestLstsq:
+
+    lapack_drivers = ('gelsd', 'gelss', 'gelsy', None)
+
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_simple_exact(self):
+        for dtype in REAL_DTYPES:
+            a = np.array([[1, 20], [-30, 4]], dtype=dtype)
+            for lapack_driver in TestLstsq.lapack_drivers:
+                for overwrite in (True, False):
+                    for bt in (((1, 0), (0, 1)), (1, 0),
+                               ((2, 1), (-30, 4))):
+                        # Store values in case they are overwritten
+                        # later
+                        a1 = a.copy()
+                        b = np.array(bt, dtype=dtype)
+                        b1 = b.copy()
+                        out = lstsq(a1, b1,
+                                    lapack_driver=lapack_driver,
+                                    overwrite_a=overwrite,
+                                    overwrite_b=overwrite)
+                        x = out[0]
+                        r = out[2]
+                        assert_(r == 2,
+                                'expected efficient rank 2, got %s' % r)
+                        assert_allclose(dot(a, x), b,
+                                        atol=25 * _eps_cast(a1.dtype),
+                                        rtol=25 * _eps_cast(a1.dtype),
+                                        err_msg="driver: %s" % lapack_driver)
+
+    def test_simple_overdet(self):
+        for dtype in REAL_DTYPES:
+            a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype)
+            b = np.array([1, 2, 3], dtype=dtype)
+            for lapack_driver in TestLstsq.lapack_drivers:
+                for overwrite in (True, False):
+                    # Store values in case they are overwritten later
+                    a1 = a.copy()
+                    b1 = b.copy()
+                    out = lstsq(a1, b1, lapack_driver=lapack_driver,
+                                overwrite_a=overwrite,
+                                overwrite_b=overwrite)
+                    x = out[0]
+                    if lapack_driver == 'gelsy':
+                        residuals = np.sum((b - a.dot(x))**2)
+                    else:
+                        residuals = out[1]
+                    r = out[2]
+                    assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+                    assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
+                                    residuals,
+                                    rtol=25 * _eps_cast(a1.dtype),
+                                    atol=25 * _eps_cast(a1.dtype),
+                                    err_msg="driver: %s" % lapack_driver)
+                    assert_allclose(x, (-0.428571428571429, 0.85714285714285),
+                                    rtol=25 * _eps_cast(a1.dtype),
+                                    atol=25 * _eps_cast(a1.dtype),
+                                    err_msg="driver: %s" % lapack_driver)
+
+    def test_simple_overdet_complex(self):
+        for dtype in COMPLEX_DTYPES:
+            a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype)
+            b = np.array([1, 2+4j, 3], dtype=dtype)
+            for lapack_driver in TestLstsq.lapack_drivers:
+                for overwrite in (True, False):
+                    # Store values in case they are overwritten later
+                    a1 = a.copy()
+                    b1 = b.copy()
+                    out = lstsq(a1, b1, lapack_driver=lapack_driver,
+                                overwrite_a=overwrite,
+                                overwrite_b=overwrite)
+
+                    x = out[0]
+                    if lapack_driver == 'gelsy':
+                        res = b - a.dot(x)
+                        residuals = np.sum(res * res.conj())
+                    else:
+                        residuals = out[1]
+                    r = out[2]
+                    assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+                    assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
+                                    residuals,
+                                    rtol=25 * _eps_cast(a1.dtype),
+                                    atol=25 * _eps_cast(a1.dtype),
+                                    err_msg="driver: %s" % lapack_driver)
+                    assert_allclose(
+                                x, (-0.4831460674157303 + 0.258426966292135j,
+                                    0.921348314606741 + 0.292134831460674j),
+                                rtol=25 * _eps_cast(a1.dtype),
+                                atol=25 * _eps_cast(a1.dtype),
+                                err_msg="driver: %s" % lapack_driver)
+
+    def test_simple_underdet(self):
+        for dtype in REAL_DTYPES:
+            a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
+            b = np.array([1, 2], dtype=dtype)
+            for lapack_driver in TestLstsq.lapack_drivers:
+                for overwrite in (True, False):
+                    # Store values in case they are overwritten later
+                    a1 = a.copy()
+                    b1 = b.copy()
+                    out = lstsq(a1, b1, lapack_driver=lapack_driver,
+                                overwrite_a=overwrite,
+                                overwrite_b=overwrite)
+
+                    x = out[0]
+                    r = out[2]
+                    assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+                    assert_allclose(x, (-0.055555555555555, 0.111111111111111,
+                                        0.277777777777777),
+                                    rtol=25 * _eps_cast(a1.dtype),
+                                    atol=25 * _eps_cast(a1.dtype),
+                                    err_msg="driver: %s" % lapack_driver)
+
+    def test_random_exact(self):
+        for dtype in REAL_DTYPES:
+            for n in (20, 200):
+                for lapack_driver in TestLstsq.lapack_drivers:
+                    for overwrite in (True, False):
+                        a = np.asarray(random([n, n]), dtype=dtype)
+                        for i in range(n):
+                            a[i, i] = 20 * (0.1 + a[i, i])
+                        for i in range(4):
+                            b = np.asarray(random([n, 3]), dtype=dtype)
+                            # Store values in case they are overwritten later
+                            a1 = a.copy()
+                            b1 = b.copy()
+                            out = lstsq(a1, b1,
+                                        lapack_driver=lapack_driver,
+                                        overwrite_a=overwrite,
+                                        overwrite_b=overwrite)
+                            x = out[0]
+                            r = out[2]
+                            assert_(r == n, 'expected efficient rank %s, '
+                                    'got %s' % (n, r))
+                            if dtype is np.float32:
+                                assert_allclose(
+                                          dot(a, x), b,
+                                          rtol=500 * _eps_cast(a1.dtype),
+                                          atol=500 * _eps_cast(a1.dtype),
+                                          err_msg="driver: %s" % lapack_driver)
+                            else:
+                                assert_allclose(
+                                          dot(a, x), b,
+                                          rtol=1000 * _eps_cast(a1.dtype),
+                                          atol=1000 * _eps_cast(a1.dtype),
+                                          err_msg="driver: %s" % lapack_driver)
+
+    def test_random_complex_exact(self):
+        if platform.system() != "Windows":
+            if _pep440.parse(np.__version__) >= _pep440.Version("1.24.0"):
+                libc_flavor = platform.libc_ver()[0]
+                if libc_flavor != "glibc":
+                    pytest.skip("segfault observed on alpine per gh-17630")
+        for dtype in COMPLEX_DTYPES:
+            for n in (20, 200):
+                for lapack_driver in TestLstsq.lapack_drivers:
+                    for overwrite in (True, False):
+                        a = np.asarray(random([n, n]) + 1j*random([n, n]),
+                                       dtype=dtype)
+                        for i in range(n):
+                            a[i, i] = 20 * (0.1 + a[i, i])
+                        for i in range(2):
+                            b = np.asarray(random([n, 3]), dtype=dtype)
+                            # Store values in case they are overwritten later
+                            a1 = a.copy()
+                            b1 = b.copy()
+                            out = lstsq(a1, b1, lapack_driver=lapack_driver,
+                                        overwrite_a=overwrite,
+                                        overwrite_b=overwrite)
+                            x = out[0]
+                            r = out[2]
+                            assert_(r == n, 'expected efficient rank %s, '
+                                    'got %s' % (n, r))
+                            if dtype is np.complex64:
+                                assert_allclose(
+                                          dot(a, x), b,
+                                          rtol=400 * _eps_cast(a1.dtype),
+                                          atol=400 * _eps_cast(a1.dtype),
+                                          err_msg="driver: %s" % lapack_driver)
+                            else:
+                                assert_allclose(
+                                          dot(a, x), b,
+                                          rtol=1000 * _eps_cast(a1.dtype),
+                                          atol=1000 * _eps_cast(a1.dtype),
+                                          err_msg="driver: %s" % lapack_driver)
+
+    def test_random_overdet(self):
+        for dtype in REAL_DTYPES:
+            for (n, m) in ((20, 15), (200, 2)):
+                for lapack_driver in TestLstsq.lapack_drivers:
+                    for overwrite in (True, False):
+                        a = np.asarray(random([n, m]), dtype=dtype)
+                        for i in range(m):
+                            a[i, i] = 20 * (0.1 + a[i, i])
+                        for i in range(4):
+                            b = np.asarray(random([n, 3]), dtype=dtype)
+                            # Store values in case they are overwritten later
+                            a1 = a.copy()
+                            b1 = b.copy()
+                            out = lstsq(a1, b1,
+                                        lapack_driver=lapack_driver,
+                                        overwrite_a=overwrite,
+                                        overwrite_b=overwrite)
+                            x = out[0]
+                            r = out[2]
+                            assert_(r == m, 'expected efficient rank %s, '
+                                    'got %s' % (m, r))
+                            assert_allclose(
+                                          x, direct_lstsq(a, b, cmplx=0),
+                                          rtol=25 * _eps_cast(a1.dtype),
+                                          atol=25 * _eps_cast(a1.dtype),
+                                          err_msg="driver: %s" % lapack_driver)
+
+    def test_random_complex_overdet(self):
+        for dtype in COMPLEX_DTYPES:
+            for (n, m) in ((20, 15), (200, 2)):
+                for lapack_driver in TestLstsq.lapack_drivers:
+                    for overwrite in (True, False):
+                        a = np.asarray(random([n, m]) + 1j*random([n, m]),
+                                       dtype=dtype)
+                        for i in range(m):
+                            a[i, i] = 20 * (0.1 + a[i, i])
+                        for i in range(2):
+                            b = np.asarray(random([n, 3]), dtype=dtype)
+                            # Store values in case they are overwritten
+                            # later
+                            a1 = a.copy()
+                            b1 = b.copy()
+                            out = lstsq(a1, b1,
+                                        lapack_driver=lapack_driver,
+                                        overwrite_a=overwrite,
+                                        overwrite_b=overwrite)
+                            x = out[0]
+                            r = out[2]
+                            assert_(r == m, 'expected efficient rank %s, '
+                                    'got %s' % (m, r))
+                            assert_allclose(
+                                      x, direct_lstsq(a, b, cmplx=1),
+                                      rtol=25 * _eps_cast(a1.dtype),
+                                      atol=25 * _eps_cast(a1.dtype),
+                                      err_msg="driver: %s" % lapack_driver)
+
+    def test_check_finite(self):
+        with suppress_warnings() as sup:
+            # On (some) OSX this tests triggers a warning (gh-7538)
+            sup.filter(RuntimeWarning,
+                       "internal gelsd driver lwork query error,.*"
+                       "Falling back to 'gelss' driver.")
+
+        at = np.array(((1, 20), (-30, 4)))
+        for dtype, bt, lapack_driver, overwrite, check_finite in \
+            itertools.product(REAL_DTYPES,
+                              (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))),
+                              TestLstsq.lapack_drivers,
+                              (True, False),
+                              (True, False)):
+
+            a = at.astype(dtype)
+            b = np.array(bt, dtype=dtype)
+            # Store values in case they are overwritten
+            # later
+            a1 = a.copy()
+            b1 = b.copy()
+            out = lstsq(a1, b1, lapack_driver=lapack_driver,
+                        check_finite=check_finite, overwrite_a=overwrite,
+                        overwrite_b=overwrite)
+            x = out[0]
+            r = out[2]
+            assert_(r == 2, 'expected efficient rank 2, got %s' % r)
+            assert_allclose(dot(a, x), b,
+                            rtol=25 * _eps_cast(a.dtype),
+                            atol=25 * _eps_cast(a.dtype),
+                            err_msg="driver: %s" % lapack_driver)
+
+    def test_zero_size(self):
+        for a_shape, b_shape in (((0, 2), (0,)),
+                                 ((0, 4), (0, 2)),
+                                 ((4, 0), (4,)),
+                                 ((4, 0), (4, 2))):
+            b = np.ones(b_shape)
+            x, residues, rank, s = lstsq(np.zeros(a_shape), b)
+            assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:]))
+            residues_should_be = (np.empty((0,)) if a_shape[1]
+                                  else np.linalg.norm(b, axis=0)**2)
+            assert_equal(residues, residues_should_be)
+            assert_(rank == 0, 'expected rank 0')
+            assert_equal(s, np.empty((0,)))
+
+
+class TestPinv:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_simple_real(self):
+        a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+        a_pinv = pinv(a)
+        assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+
+    def test_simple_complex(self):
+        a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
+             dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
+                                       dtype=float))
+        a_pinv = pinv(a)
+        assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+
+    def test_simple_singular(self):
+        a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
+        a_pinv = pinv(a)
+        expected = array([[-6.38888889e-01, -1.66666667e-01, 3.05555556e-01],
+                          [-5.55555556e-02, 1.30136518e-16, 5.55555556e-02],
+                          [5.27777778e-01, 1.66666667e-01, -1.94444444e-01]])
+        assert_array_almost_equal(a_pinv, expected)
+
+    def test_simple_cols(self):
+        a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
+        a_pinv = pinv(a)
+        expected = array([[-0.94444444, 0.44444444],
+                          [-0.11111111, 0.11111111],
+                          [0.72222222, -0.22222222]])
+        assert_array_almost_equal(a_pinv, expected)
+
+    def test_simple_rows(self):
+        a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
+        a_pinv = pinv(a)
+        expected = array([[-1.33333333, -0.33333333, 0.66666667],
+                          [1.08333333, 0.33333333, -0.41666667]])
+        assert_array_almost_equal(a_pinv, expected)
+
+    def test_check_finite(self):
+        a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]])
+        a_pinv = pinv(a, check_finite=False)
+        assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
+
+    def test_native_list_argument(self):
+        a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+        a_pinv = pinv(a)
+        expected = array([[-6.38888889e-01, -1.66666667e-01, 3.05555556e-01],
+                          [-5.55555556e-02, 1.30136518e-16, 5.55555556e-02],
+                          [5.27777778e-01, 1.66666667e-01, -1.94444444e-01]])
+        assert_array_almost_equal(a_pinv, expected)
+
+    def test_atol_rtol(self):
+        n = 12
+        # get a random ortho matrix for shuffling
+        q, _ = qr(np.random.rand(n, n))
+        a_m = np.arange(35.0).reshape(7,5)
+        a = a_m.copy()
+        a[0,0] = 0.001
+        atol = 1e-5
+        rtol = 0.05
+        # svds of a_m is ~ [116.906, 4.234, tiny, tiny, tiny]
+        # svds of a is ~ [116.906, 4.234, 4.62959e-04, tiny, tiny]
+        # Just abs cutoff such that we arrive at a_modified
+        a_p = pinv(a_m, atol=atol, rtol=0.)
+        adiff1 = a @ a_p @ a - a
+        adiff2 = a_m @ a_p @ a_m - a_m
+        # Now adiff1 should be around atol value while adiff2 should be
+        # relatively tiny
+        assert_allclose(np.linalg.norm(adiff1), 5e-4, atol=5.e-4)
+        assert_allclose(np.linalg.norm(adiff2), 5e-14, atol=5.e-14)
+
+        # Now do the same but remove another sv ~4.234 via rtol
+        a_p = pinv(a_m, atol=atol, rtol=rtol)
+        adiff1 = a @ a_p @ a - a
+        adiff2 = a_m @ a_p @ a_m - a_m
+        assert_allclose(np.linalg.norm(adiff1), 4.233, rtol=0.01)
+        assert_allclose(np.linalg.norm(adiff2), 4.233, rtol=0.01)
+
+
+class TestPinvSymmetric:
+
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_simple_real(self):
+        a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+        a = np.dot(a, a.T)
+        a_pinv = pinvh(a)
+        assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
+
+    def test_nonpositive(self):
+        a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
+        a = np.dot(a, a.T)
+        u, s, vt = np.linalg.svd(a)
+        s[0] *= -1
+        a = np.dot(u * s, vt)  # a is now symmetric non-positive and singular
+        a_pinv = pinv(a)
+        a_pinvh = pinvh(a)
+        assert_array_almost_equal(a_pinv, a_pinvh)
+
+    def test_simple_complex(self):
+        a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
+             dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
+                                       dtype=float))
+        a = np.dot(a, a.conj().T)
+        a_pinv = pinvh(a)
+        assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
+
+    def test_native_list_argument(self):
+        a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+        a = np.dot(a, a.T)
+        a_pinv = pinvh(a.tolist())
+        assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
+
+    def test_atol_rtol(self):
+        n = 12
+        # get a random ortho matrix for shuffling
+        q, _ = qr(np.random.rand(n, n))
+        a = np.diag([4, 3, 2, 1, 0.99e-4, 0.99e-5] + [0.99e-6]*(n-6))
+        a = q.T @ a @ q
+        a_m = np.diag([4, 3, 2, 1, 0.99e-4, 0.] + [0.]*(n-6))
+        a_m = q.T @ a_m @ q
+        atol = 1e-5
+        rtol = (4.01e-4 - 4e-5)/4
+        # Just abs cutoff such that we arrive at a_modified
+        a_p = pinvh(a, atol=atol, rtol=0.)
+        adiff1 = a @ a_p @ a - a
+        adiff2 = a_m @ a_p @ a_m - a_m
+        # Now adiff1 should dance around atol value since truncation
+        # while adiff2 should be relatively tiny
+        assert_allclose(norm(adiff1), atol, rtol=0.1)
+        assert_allclose(norm(adiff2), 1e-12, atol=1e-11)
+
+        # Now do the same but through rtol cancelling atol value
+        a_p = pinvh(a, atol=atol, rtol=rtol)
+        adiff1 = a @ a_p @ a - a
+        adiff2 = a_m @ a_p @ a_m - a_m
+        # adiff1 and adiff2 should be elevated to ~1e-4 due to mismatch
+        assert_allclose(norm(adiff1), 1e-4, rtol=0.1)
+        assert_allclose(norm(adiff2), 1e-4, rtol=0.1)
+
+
+@pytest.mark.parametrize('scale', (1e-20, 1., 1e20))
+@pytest.mark.parametrize('pinv_', (pinv, pinvh))
+def test_auto_rcond(scale, pinv_):
+    x = np.array([[1, 0], [0, 1e-10]]) * scale
+    expected = np.diag(1. / np.diag(x))
+    x_inv = pinv_(x)
+    assert_allclose(x_inv, expected)
+
+
+class TestVectorNorms:
+
+    def test_types(self):
+        for dtype in np.typecodes['AllFloat']:
+            x = np.array([1, 2, 3], dtype=dtype)
+            tol = max(1e-15, np.finfo(dtype).eps.real * 20)
+            assert_allclose(norm(x), np.sqrt(14), rtol=tol)
+            assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
+
+        for dtype in np.typecodes['Complex']:
+            x = np.array([1j, 2j, 3j], dtype=dtype)
+            tol = max(1e-15, np.finfo(dtype).eps.real * 20)
+            assert_allclose(norm(x), np.sqrt(14), rtol=tol)
+            assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
+
+    def test_overflow(self):
+        # unlike numpy's norm, this one is
+        # safer on overflow
+        a = array([1e20], dtype=float32)
+        assert_almost_equal(norm(a), a)
+
+    def test_stable(self):
+        # more stable than numpy's norm
+        a = array([1e4] + [1]*10000, dtype=float32)
+        try:
+            # snrm in double precision; we obtain the same as for float64
+            # -- large atol needed due to varying blas implementations
+            assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2)
+        except AssertionError:
+            # snrm implemented in single precision, == np.linalg.norm result
+            msg = ": Result should equal either 0.0 or 0.5 (depending on " \
+                  "implementation of snrm2)."
+            assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg)
+
+    def test_zero_norm(self):
+        assert_equal(norm([1, 0, 3], 0), 2)
+        assert_equal(norm([1, 2, 3], 0), 3)
+
+    def test_axis_kwd(self):
+        a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
+        assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2)
+        assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2)
+
+    def test_keepdims_kwd(self):
+        a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
+        b = norm(a, axis=1, keepdims=True)
+        assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2)
+        assert_(b.shape == (2, 1, 2))
+        assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2)
+
+    @pytest.mark.skipif(not HAS_ILP64, reason="64-bit BLAS required")
+    def test_large_vector(self):
+        check_free_memory(free_mb=17000)
+        x = np.zeros([2**31], dtype=np.float64)
+        x[-1] = 1
+        res = norm(x)
+        del x
+        assert_allclose(res, 1.0)
+
+
+class TestMatrixNorms:
+
+    def test_matrix_norms(self):
+        # Not all of these are matrix norms in the most technical sense.
+        np.random.seed(1234)
+        for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4):
+            for t in np.single, np.double, np.csingle, np.cdouble, np.int64:
+                A = 10 * np.random.randn(n, m).astype(t)
+                if np.issubdtype(A.dtype, np.complexfloating):
+                    A = (A + 10j * np.random.randn(n, m)).astype(t)
+                    t_high = np.cdouble
+                else:
+                    t_high = np.double
+                for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf):
+                    actual = norm(A, ord=order)
+                    desired = np.linalg.norm(A, ord=order)
+                    # SciPy may return higher precision matrix norms.
+                    # This is a consequence of using LAPACK.
+                    if not np.allclose(actual, desired):
+                        desired = np.linalg.norm(A.astype(t_high), ord=order)
+                        assert_allclose(actual, desired)
+
+    def test_axis_kwd(self):
+        a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
+        b = norm(a, ord=np.inf, axis=(1, 0))
+        c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1))
+        d = norm(a, ord=1, axis=(0, 1))
+        assert_allclose(b, c)
+        assert_allclose(c, d)
+        assert_allclose(b, d)
+        assert_(b.shape == c.shape == d.shape)
+        b = norm(a, ord=1, axis=(1, 0))
+        c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1))
+        d = norm(a, ord=np.inf, axis=(0, 1))
+        assert_allclose(b, c)
+        assert_allclose(c, d)
+        assert_allclose(b, d)
+        assert_(b.shape == c.shape == d.shape)
+
+    def test_keepdims_kwd(self):
+        a = np.arange(120, dtype='d').reshape(2, 3, 4, 5)
+        b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True)
+        c = norm(a, ord=1, axis=(0, 1), keepdims=True)
+        assert_allclose(b, c)
+        assert_(b.shape == c.shape)
+
+
+class TestOverwrite:
+    def test_solve(self):
+        assert_no_overwrite(solve, [(3, 3), (3,)])
+
+    def test_solve_triangular(self):
+        assert_no_overwrite(solve_triangular, [(3, 3), (3,)])
+
+    def test_solve_banded(self):
+        assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b),
+                            [(4, 6), (6,)])
+
+    def test_solveh_banded(self):
+        assert_no_overwrite(solveh_banded, [(2, 6), (6,)])
+
+    def test_inv(self):
+        assert_no_overwrite(inv, [(3, 3)])
+
+    def test_det(self):
+        assert_no_overwrite(det, [(3, 3)])
+
+    def test_lstsq(self):
+        assert_no_overwrite(lstsq, [(3, 2), (3,)])
+
+    def test_pinv(self):
+        assert_no_overwrite(pinv, [(3, 3)])
+
+    def test_pinvh(self):
+        assert_no_overwrite(pinvh, [(3, 3)])
+
+
+class TestSolveCirculant:
+
+    def test_basic1(self):
+        c = np.array([1, 2, 3, 5])
+        b = np.array([1, -1, 1, 0])
+        x = solve_circulant(c, b)
+        y = solve(circulant(c), b)
+        assert_allclose(x, y)
+
+    def test_basic2(self):
+        # b is a 2-d matrix.
+        c = np.array([1, 2, -3, -5])
+        b = np.arange(12).reshape(4, 3)
+        x = solve_circulant(c, b)
+        y = solve(circulant(c), b)
+        assert_allclose(x, y)
+
+    def test_basic3(self):
+        # b is a 3-d matrix.
+        c = np.array([1, 2, -3, -5])
+        b = np.arange(24).reshape(4, 3, 2)
+        x = solve_circulant(c, b)
+        y = solve(circulant(c), b)
+        assert_allclose(x, y)
+
+    def test_complex(self):
+        # Complex b and c
+        c = np.array([1+2j, -3, 4j, 5])
+        b = np.arange(8).reshape(4, 2) + 0.5j
+        x = solve_circulant(c, b)
+        y = solve(circulant(c), b)
+        assert_allclose(x, y)
+
+    def test_random_b_and_c(self):
+        # Random b and c
+        np.random.seed(54321)
+        c = np.random.randn(50)
+        b = np.random.randn(50)
+        x = solve_circulant(c, b)
+        y = solve(circulant(c), b)
+        assert_allclose(x, y)
+
+    def test_singular(self):
+        # c gives a singular circulant matrix.
+        c = np.array([1, 1, 0, 0])
+        b = np.array([1, 2, 3, 4])
+        x = solve_circulant(c, b, singular='lstsq')
+        y, res, rnk, s = lstsq(circulant(c), b)
+        assert_allclose(x, y)
+        assert_raises(LinAlgError, solve_circulant, x, y)
+
+    def test_axis_args(self):
+        # Test use of caxis, baxis and outaxis.
+
+        # c has shape (2, 1, 4)
+        c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]])
+
+        # b has shape (3, 4)
+        b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]])
+
+        x = solve_circulant(c, b, baxis=1)
+        assert_equal(x.shape, (4, 2, 3))
+        expected = np.empty_like(x)
+        expected[:, 0, :] = solve(circulant(c[0]), b.T)
+        expected[:, 1, :] = solve(circulant(c[1]), b.T)
+        assert_allclose(x, expected)
+
+        x = solve_circulant(c, b, baxis=1, outaxis=-1)
+        assert_equal(x.shape, (2, 3, 4))
+        assert_allclose(np.moveaxis(x, -1, 0), expected)
+
+        # np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3).
+        x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1)
+        assert_equal(x.shape, (4, 2, 3))
+        assert_allclose(x, expected)
+
+    def test_native_list_arguments(self):
+        # Same as test_basic1 using python's native list.
+        c = [1, 2, 3, 5]
+        b = [1, -1, 1, 0]
+        x = solve_circulant(c, b)
+        y = solve(circulant(c), b)
+        assert_allclose(x, y)
+
+
+class TestMatrix_Balance:
+
+    def test_string_arg(self):
+        assert_raises(ValueError, matrix_balance, 'Some string for fail')
+
+    def test_infnan_arg(self):
+        assert_raises(ValueError, matrix_balance,
+                      np.array([[1, 2], [3, np.inf]]))
+        assert_raises(ValueError, matrix_balance,
+                      np.array([[1, 2], [3, np.nan]]))
+
+    def test_scaling(self):
+        _, y = matrix_balance(np.array([[1000, 1], [1000, 0]]))
+        # Pre/post LAPACK 3.5.0 gives the same result up to an offset
+        # since in each case col norm is x1000 greater and
+        # 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5.
+        assert_allclose(np.diff(np.log2(np.diag(y))), [5])
+
+    def test_scaling_order(self):
+        A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]])
+        x, y = matrix_balance(A)
+        assert_allclose(solve(y, A).dot(y), x)
+
+    def test_separate(self):
+        _, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]),
+                                   separate=1)
+        assert_equal(np.diff(np.log2(y)), [5])
+        assert_allclose(z, np.arange(2))
+
+    def test_permutation(self):
+        A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))),
+                       np.ones((3, 3)))
+        x, (y, z) = matrix_balance(A, separate=1)
+        assert_allclose(y, np.ones_like(y))
+        assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2]))
+
+    def test_perm_and_scaling(self):
+        # Matrix with its diagonal removed
+        cases = (  # Case 0
+                 np.array([[0., 0., 0., 0., 0.000002],
+                           [0., 0., 0., 0., 0.],
+                           [2., 2., 0., 0., 0.],
+                           [2., 2., 0., 0., 0.],
+                           [0., 0., 0.000002, 0., 0.]]),
+                 #  Case 1 user reported GH-7258
+                 np.array([[-0.5, 0., 0., 0.],
+                           [0., -1., 0., 0.],
+                           [1., 0., -0.5, 0.],
+                           [0., 1., 0., -1.]]),
+                 #  Case 2 user reported GH-7258
+                 np.array([[-3., 0., 1., 0.],
+                           [-1., -1., -0., 1.],
+                           [-3., -0., -0., 0.],
+                           [-1., -0., 1., -1.]])
+                 )
+
+        for A in cases:
+            x, y = matrix_balance(A)
+            x, (s, p) = matrix_balance(A, separate=1)
+            ip = np.empty_like(p)
+            ip[p] = np.arange(A.shape[0])
+            assert_allclose(y, np.diag(s)[ip, :])
+            assert_allclose(solve(y, A).dot(y), x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_blas.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_blas.py
new file mode 100644
index 00000000..b8d57cd6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_blas.py
@@ -0,0 +1,1096 @@
+#
+# Created by: Pearu Peterson, April 2002
+#
+
+__usage__ = """
+Build linalg:
+  python setup.py build
+Run tests if scipy is installed:
+  python -c 'import scipy;scipy.linalg.test()'
+"""
+
+import math
+import pytest
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal, assert_,
+                           assert_array_almost_equal, assert_allclose)
+from pytest import raises as assert_raises
+
+from numpy import float32, float64, complex64, complex128, arange, triu, \
+                  tril, zeros, tril_indices, ones, mod, diag, append, eye, \
+                  nonzero
+
+from numpy.random import rand, seed
+from scipy.linalg import _fblas as fblas, get_blas_funcs, toeplitz, solve
+
+try:
+    from scipy.linalg import _cblas as cblas
+except ImportError:
+    cblas = None
+
+REAL_DTYPES = [float32, float64]
+COMPLEX_DTYPES = [complex64, complex128]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def test_get_blas_funcs():
+    # check that it returns Fortran code for arrays that are
+    # fortran-ordered
+    f1, f2, f3 = get_blas_funcs(
+        ('axpy', 'axpy', 'axpy'),
+        (np.empty((2, 2), dtype=np.complex64, order='F'),
+         np.empty((2, 2), dtype=np.complex128, order='C'))
+        )
+
+    # get_blas_funcs will choose libraries depending on most generic
+    # array
+    assert_equal(f1.typecode, 'z')
+    assert_equal(f2.typecode, 'z')
+    if cblas is not None:
+        assert_equal(f1.module_name, 'cblas')
+        assert_equal(f2.module_name, 'cblas')
+
+    # check defaults.
+    f1 = get_blas_funcs('rotg')
+    assert_equal(f1.typecode, 'd')
+
+    # check also dtype interface
+    f1 = get_blas_funcs('gemm', dtype=np.complex64)
+    assert_equal(f1.typecode, 'c')
+    f1 = get_blas_funcs('gemm', dtype='F')
+    assert_equal(f1.typecode, 'c')
+
+    # extended precision complex
+    f1 = get_blas_funcs('gemm', dtype=np.longcomplex)
+    assert_equal(f1.typecode, 'z')
+
+    # check safe complex upcasting
+    f1 = get_blas_funcs('axpy',
+                        (np.empty((2, 2), dtype=np.float64),
+                         np.empty((2, 2), dtype=np.complex64))
+                        )
+    assert_equal(f1.typecode, 'z')
+
+
+def test_get_blas_funcs_alias():
+    # check alias for get_blas_funcs
+    f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64)
+    assert f.typecode == 'c'
+    assert g.typecode == 'c'
+
+    f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64)
+    assert f is g
+    assert f is h
+
+
+class TestCBLAS1Simple:
+
+    def test_axpy(self):
+        for p in 'sd':
+            f = getattr(cblas, p+'axpy', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5),
+                                      [7, 9, 18])
+        for p in 'cz':
+            f = getattr(cblas, p+'axpy', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5),
+                                      [7, 10j-1, 18])
+
+
+class TestFBLAS1Simple:
+
+    def test_axpy(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'axpy', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5),
+                                      [7, 9, 18])
+        for p in 'cz':
+            f = getattr(fblas, p+'axpy', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5),
+                                      [7, 10j-1, 18])
+
+    def test_copy(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'copy', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5])
+        for p in 'cz':
+            f = getattr(fblas, p+'copy', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j])
+
+    def test_asum(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'asum', None)
+            if f is None:
+                continue
+            assert_almost_equal(f([3, -4, 5]), 12)
+        for p in ['sc', 'dz']:
+            f = getattr(fblas, p+'asum', None)
+            if f is None:
+                continue
+            assert_almost_equal(f([3j, -4, 3-4j]), 14)
+
+    def test_dot(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'dot', None)
+            if f is None:
+                continue
+            assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9)
+
+    def test_complex_dotu(self):
+        for p in 'cz':
+            f = getattr(fblas, p+'dotu', None)
+            if f is None:
+                continue
+            assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j)
+
+    def test_complex_dotc(self):
+        for p in 'cz':
+            f = getattr(fblas, p+'dotc', None)
+            if f is None:
+                continue
+            assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j)
+
+    def test_nrm2(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'nrm2', None)
+            if f is None:
+                continue
+            assert_almost_equal(f([3, -4, 5]), math.sqrt(50))
+        for p in ['c', 'z', 'sc', 'dz']:
+            f = getattr(fblas, p+'nrm2', None)
+            if f is None:
+                continue
+            assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50))
+
+    def test_scal(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'scal', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10])
+        for p in 'cz':
+            f = getattr(fblas, p+'scal', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(3j, [3j, -4, 3-4j]), [-9, -12j, 12+9j])
+        for p in ['cs', 'zd']:
+            f = getattr(fblas, p+'scal', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j])
+
+    def test_swap(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'swap', None)
+            if f is None:
+                continue
+            x, y = [2, 3, 1], [-2, 3, 7]
+            x1, y1 = f(x, y)
+            assert_array_almost_equal(x1, y)
+            assert_array_almost_equal(y1, x)
+        for p in 'cz':
+            f = getattr(fblas, p+'swap', None)
+            if f is None:
+                continue
+            x, y = [2, 3j, 1], [-2, 3, 7-3j]
+            x1, y1 = f(x, y)
+            assert_array_almost_equal(x1, y)
+            assert_array_almost_equal(y1, x)
+
+    def test_amax(self):
+        for p in 'sd':
+            f = getattr(fblas, 'i'+p+'amax')
+            assert_equal(f([-2, 4, 3]), 1)
+        for p in 'cz':
+            f = getattr(fblas, 'i'+p+'amax')
+            assert_equal(f([-5, 4+3j, 6]), 1)
+    # XXX: need tests for rot,rotm,rotg,rotmg
+
+
+class TestFBLAS2Simple:
+
+    def test_gemv(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'gemv', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(3, [[3]], [-4]), [-36])
+            assert_array_almost_equal(f(3, [[3]], [-4], 3, [5]), [-21])
+        for p in 'cz':
+            f = getattr(fblas, p+'gemv', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(3j, [[3-4j]], [-4]), [-48-36j])
+            assert_array_almost_equal(f(3j, [[3-4j]], [-4], 3, [5j]),
+                                      [-48-21j])
+
+    def test_ger(self):
+
+        for p in 'sd':
+            f = getattr(fblas, p+'ger', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]])
+            assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]),
+                                      [[6, 8], [12, 16], [18, 24]])
+
+            assert_array_almost_equal(f(1, [1, 2], [3, 4],
+                                        a=[[1, 2], [3, 4]]), [[4, 6], [9, 12]])
+
+        for p in 'cz':
+            f = getattr(fblas, p+'geru', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
+                                      [[3j, 4j], [6, 8]])
+            assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]),
+                                      [[6, 8], [12, 16], [18, 24]])
+
+        for p in 'cz':
+            for name in ('ger', 'gerc'):
+                f = getattr(fblas, p+name, None)
+                if f is None:
+                    continue
+                assert_array_almost_equal(f(1, [1j, 2], [3, 4]),
+                                          [[3j, 4j], [6, 8]])
+                assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]),
+                                          [[6, 8], [12, 16], [18, 24]])
+
+    def test_syr_her(self):
+        x = np.arange(1, 5, dtype='d')
+        resx = np.triu(x[:, np.newaxis] * x)
+        resx_reverse = np.triu(x[::-1, np.newaxis] * x[::-1])
+
+        y = np.linspace(0, 8.5, 17, endpoint=False)
+
+        z = np.arange(1, 9, dtype='d').view('D')
+        resz = np.triu(z[:, np.newaxis] * z)
+        resz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1])
+        rehz = np.triu(z[:, np.newaxis] * z.conj())
+        rehz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1].conj())
+
+        w = np.c_[np.zeros(4), z, np.zeros(4)].ravel()
+
+        for p, rtol in zip('sd', [1e-7, 1e-14]):
+            f = getattr(fblas, p+'syr', None)
+            if f is None:
+                continue
+            assert_allclose(f(1.0, x), resx, rtol=rtol)
+            assert_allclose(f(1.0, x, lower=True), resx.T, rtol=rtol)
+            assert_allclose(f(1.0, y, incx=2, offx=2, n=4), resx, rtol=rtol)
+            # negative increments imply reversed vectors in blas
+            assert_allclose(f(1.0, y, incx=-2, offx=2, n=4),
+                            resx_reverse, rtol=rtol)
+
+            a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F')
+            b = f(1.0, x, a=a, overwrite_a=True)
+            assert_allclose(a, resx, rtol=rtol)
+
+            b = f(2.0, x, a=a)
+            assert_(a is not b)
+            assert_allclose(b, 3*resx, rtol=rtol)
+
+            assert_raises(Exception, f, 1.0, x, incx=0)
+            assert_raises(Exception, f, 1.0, x, offx=5)
+            assert_raises(Exception, f, 1.0, x, offx=-2)
+            assert_raises(Exception, f, 1.0, x, n=-2)
+            assert_raises(Exception, f, 1.0, x, n=5)
+            assert_raises(Exception, f, 1.0, x, lower=2)
+            assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
+
+        for p, rtol in zip('cz', [1e-7, 1e-14]):
+            f = getattr(fblas, p+'syr', None)
+            if f is None:
+                continue
+            assert_allclose(f(1.0, z), resz, rtol=rtol)
+            assert_allclose(f(1.0, z, lower=True), resz.T, rtol=rtol)
+            assert_allclose(f(1.0, w, incx=3, offx=1, n=4), resz, rtol=rtol)
+            # negative increments imply reversed vectors in blas
+            assert_allclose(f(1.0, w, incx=-3, offx=1, n=4),
+                            resz_reverse, rtol=rtol)
+
+            a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
+            b = f(1.0, z, a=a, overwrite_a=True)
+            assert_allclose(a, resz, rtol=rtol)
+
+            b = f(2.0, z, a=a)
+            assert_(a is not b)
+            assert_allclose(b, 3*resz, rtol=rtol)
+
+            assert_raises(Exception, f, 1.0, x, incx=0)
+            assert_raises(Exception, f, 1.0, x, offx=5)
+            assert_raises(Exception, f, 1.0, x, offx=-2)
+            assert_raises(Exception, f, 1.0, x, n=-2)
+            assert_raises(Exception, f, 1.0, x, n=5)
+            assert_raises(Exception, f, 1.0, x, lower=2)
+            assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
+
+        for p, rtol in zip('cz', [1e-7, 1e-14]):
+            f = getattr(fblas, p+'her', None)
+            if f is None:
+                continue
+            assert_allclose(f(1.0, z), rehz, rtol=rtol)
+            assert_allclose(f(1.0, z, lower=True), rehz.T.conj(), rtol=rtol)
+            assert_allclose(f(1.0, w, incx=3, offx=1, n=4), rehz, rtol=rtol)
+            # negative increments imply reversed vectors in blas
+            assert_allclose(f(1.0, w, incx=-3, offx=1, n=4),
+                            rehz_reverse, rtol=rtol)
+
+            a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
+            b = f(1.0, z, a=a, overwrite_a=True)
+            assert_allclose(a, rehz, rtol=rtol)
+
+            b = f(2.0, z, a=a)
+            assert_(a is not b)
+            assert_allclose(b, 3*rehz, rtol=rtol)
+
+            assert_raises(Exception, f, 1.0, x, incx=0)
+            assert_raises(Exception, f, 1.0, x, offx=5)
+            assert_raises(Exception, f, 1.0, x, offx=-2)
+            assert_raises(Exception, f, 1.0, x, n=-2)
+            assert_raises(Exception, f, 1.0, x, n=5)
+            assert_raises(Exception, f, 1.0, x, lower=2)
+            assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F'))
+
+    def test_syr2(self):
+        x = np.arange(1, 5, dtype='d')
+        y = np.arange(5, 9, dtype='d')
+        resxy = np.triu(x[:, np.newaxis] * y + y[:, np.newaxis] * x)
+        resxy_reverse = np.triu(x[::-1, np.newaxis] * y[::-1]
+                                + y[::-1, np.newaxis] * x[::-1])
+
+        q = np.linspace(0, 8.5, 17, endpoint=False)
+
+        for p, rtol in zip('sd', [1e-7, 1e-14]):
+            f = getattr(fblas, p+'syr2', None)
+            if f is None:
+                continue
+            assert_allclose(f(1.0, x, y), resxy, rtol=rtol)
+            assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol)
+            assert_allclose(f(1.0, x, y, lower=True), resxy.T, rtol=rtol)
+
+            assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10),
+                            resxy, rtol=rtol)
+            assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10, n=3),
+                            resxy[:3, :3], rtol=rtol)
+            # negative increments imply reversed vectors in blas
+            assert_allclose(f(1.0, q, q, incx=-2, offx=2, incy=-2, offy=10),
+                            resxy_reverse, rtol=rtol)
+
+            a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F')
+            b = f(1.0, x, y, a=a, overwrite_a=True)
+            assert_allclose(a, resxy, rtol=rtol)
+
+            b = f(2.0, x, y, a=a)
+            assert_(a is not b)
+            assert_allclose(b, 3*resxy, rtol=rtol)
+
+            assert_raises(Exception, f, 1.0, x, y, incx=0)
+            assert_raises(Exception, f, 1.0, x, y, offx=5)
+            assert_raises(Exception, f, 1.0, x, y, offx=-2)
+            assert_raises(Exception, f, 1.0, x, y, incy=0)
+            assert_raises(Exception, f, 1.0, x, y, offy=5)
+            assert_raises(Exception, f, 1.0, x, y, offy=-2)
+            assert_raises(Exception, f, 1.0, x, y, n=-2)
+            assert_raises(Exception, f, 1.0, x, y, n=5)
+            assert_raises(Exception, f, 1.0, x, y, lower=2)
+            assert_raises(Exception, f, 1.0, x, y,
+                          a=np.zeros((2, 2), 'd', 'F'))
+
+    def test_her2(self):
+        x = np.arange(1, 9, dtype='d').view('D')
+        y = np.arange(9, 17, dtype='d').view('D')
+        resxy = x[:, np.newaxis] * y.conj() + y[:, np.newaxis] * x.conj()
+        resxy = np.triu(resxy)
+
+        resxy_reverse = x[::-1, np.newaxis] * y[::-1].conj()
+        resxy_reverse += y[::-1, np.newaxis] * x[::-1].conj()
+        resxy_reverse = np.triu(resxy_reverse)
+
+        u = np.c_[np.zeros(4), x, np.zeros(4)].ravel()
+        v = np.c_[np.zeros(4), y, np.zeros(4)].ravel()
+
+        for p, rtol in zip('cz', [1e-7, 1e-14]):
+            f = getattr(fblas, p+'her2', None)
+            if f is None:
+                continue
+            assert_allclose(f(1.0, x, y), resxy, rtol=rtol)
+            assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol)
+            assert_allclose(f(1.0, x, y, lower=True), resxy.T.conj(),
+                            rtol=rtol)
+
+            assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1),
+                            resxy, rtol=rtol)
+            assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1, n=3),
+                            resxy[:3, :3], rtol=rtol)
+            # negative increments imply reversed vectors in blas
+            assert_allclose(f(1.0, u, v, incx=-3, offx=1, incy=-3, offy=1),
+                            resxy_reverse, rtol=rtol)
+
+            a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F')
+            b = f(1.0, x, y, a=a, overwrite_a=True)
+            assert_allclose(a, resxy, rtol=rtol)
+
+            b = f(2.0, x, y, a=a)
+            assert_(a is not b)
+            assert_allclose(b, 3*resxy, rtol=rtol)
+
+            assert_raises(Exception, f, 1.0, x, y, incx=0)
+            assert_raises(Exception, f, 1.0, x, y, offx=5)
+            assert_raises(Exception, f, 1.0, x, y, offx=-2)
+            assert_raises(Exception, f, 1.0, x, y, incy=0)
+            assert_raises(Exception, f, 1.0, x, y, offy=5)
+            assert_raises(Exception, f, 1.0, x, y, offy=-2)
+            assert_raises(Exception, f, 1.0, x, y, n=-2)
+            assert_raises(Exception, f, 1.0, x, y, n=5)
+            assert_raises(Exception, f, 1.0, x, y, lower=2)
+            assert_raises(Exception, f, 1.0, x, y,
+                          a=np.zeros((2, 2), 'd', 'F'))
+
+    def test_gbmv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 7
+            m = 5
+            kl = 1
+            ku = 2
+            # fake a banded matrix via toeplitz
+            A = toeplitz(append(rand(kl+1), zeros(m-kl-1)),
+                         append(rand(ku+1), zeros(n-ku-1)))
+            A = A.astype(dtype)
+            Ab = zeros((kl+ku+1, n), dtype=dtype)
+
+            # Form the banded storage
+            Ab[2, :5] = A[0, 0]  # diag
+            Ab[1, 1:6] = A[0, 1]  # sup1
+            Ab[0, 2:7] = A[0, 2]  # sup2
+            Ab[3, :4] = A[1, 0]  # sub1
+
+            x = rand(n).astype(dtype)
+            y = rand(m).astype(dtype)
+            alpha, beta = dtype(3), dtype(-5)
+
+            func, = get_blas_funcs(('gbmv',), dtype=dtype)
+            y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab,
+                      x=x, y=y, beta=beta)
+            y2 = alpha * A.dot(x) + beta * y
+            assert_array_almost_equal(y1, y2)
+
+    def test_sbmv_hbmv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 6
+            k = 2
+            A = zeros((n, n), dtype=dtype)
+            Ab = zeros((k+1, n), dtype=dtype)
+
+            # Form the array and its packed banded storage
+            A[arange(n), arange(n)] = rand(n)
+            for ind2 in range(1, k+1):
+                temp = rand(n-ind2)
+                A[arange(n-ind2), arange(ind2, n)] = temp
+                Ab[-1-ind2, ind2:] = temp
+            A = A.astype(dtype)
+            A = A + A.T if ind < 2 else A + A.conj().T
+            Ab[-1, :] = diag(A)
+            x = rand(n).astype(dtype)
+            y = rand(n).astype(dtype)
+            alpha, beta = dtype(1.25), dtype(3)
+
+            if ind > 1:
+                func, = get_blas_funcs(('hbmv',), dtype=dtype)
+            else:
+                func, = get_blas_funcs(('sbmv',), dtype=dtype)
+            y1 = func(k=k, alpha=alpha, a=Ab, x=x, y=y, beta=beta)
+            y2 = alpha * A.dot(x) + beta * y
+            assert_array_almost_equal(y1, y2)
+
+    def test_spmv_hpmv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
+            n = 3
+            A = rand(n, n).astype(dtype)
+            if ind > 1:
+                A += rand(n, n)*1j
+            A = A.astype(dtype)
+            A = A + A.T if ind < 4 else A + A.conj().T
+            c, r = tril_indices(n)
+            Ap = A[r, c]
+            x = rand(n).astype(dtype)
+            y = rand(n).astype(dtype)
+            xlong = arange(2*n).astype(dtype)
+            ylong = ones(2*n).astype(dtype)
+            alpha, beta = dtype(1.25), dtype(2)
+
+            if ind > 3:
+                func, = get_blas_funcs(('hpmv',), dtype=dtype)
+            else:
+                func, = get_blas_funcs(('spmv',), dtype=dtype)
+            y1 = func(n=n, alpha=alpha, ap=Ap, x=x, y=y, beta=beta)
+            y2 = alpha * A.dot(x) + beta * y
+            assert_array_almost_equal(y1, y2)
+
+            # Test inc and offsets
+            y1 = func(n=n-1, alpha=alpha, beta=beta, x=xlong, y=ylong, ap=Ap,
+                      incx=2, incy=2, offx=n, offy=n)
+            y2 = (alpha * A[:-1, :-1]).dot(xlong[3::2]) + beta * ylong[3::2]
+            assert_array_almost_equal(y1[3::2], y2)
+            assert_almost_equal(y1[4], ylong[4])
+
+    def test_spr_hpr(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
+            n = 3
+            A = rand(n, n).astype(dtype)
+            if ind > 1:
+                A += rand(n, n)*1j
+            A = A.astype(dtype)
+            A = A + A.T if ind < 4 else A + A.conj().T
+            c, r = tril_indices(n)
+            Ap = A[r, c]
+            x = rand(n).astype(dtype)
+            alpha = (DTYPES+COMPLEX_DTYPES)[mod(ind, 4)](2.5)
+
+            if ind > 3:
+                func, = get_blas_funcs(('hpr',), dtype=dtype)
+                y2 = alpha * x[:, None].dot(x[None, :].conj()) + A
+            else:
+                func, = get_blas_funcs(('spr',), dtype=dtype)
+                y2 = alpha * x[:, None].dot(x[None, :]) + A
+
+            y1 = func(n=n, alpha=alpha, ap=Ap, x=x)
+            y1f = zeros((3, 3), dtype=dtype)
+            y1f[r, c] = y1
+            y1f[c, r] = y1.conj() if ind > 3 else y1
+            assert_array_almost_equal(y1f, y2)
+
+    def test_spr2_hpr2(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 3
+            A = rand(n, n).astype(dtype)
+            if ind > 1:
+                A += rand(n, n)*1j
+            A = A.astype(dtype)
+            A = A + A.T if ind < 2 else A + A.conj().T
+            c, r = tril_indices(n)
+            Ap = A[r, c]
+            x = rand(n).astype(dtype)
+            y = rand(n).astype(dtype)
+            alpha = dtype(2)
+
+            if ind > 1:
+                func, = get_blas_funcs(('hpr2',), dtype=dtype)
+            else:
+                func, = get_blas_funcs(('spr2',), dtype=dtype)
+
+            u = alpha.conj() * x[:, None].dot(y[None, :].conj())
+            y2 = A + u + u.conj().T
+            y1 = func(n=n, alpha=alpha, x=x, y=y, ap=Ap)
+            y1f = zeros((3, 3), dtype=dtype)
+            y1f[r, c] = y1
+            y1f[[1, 2, 2], [0, 0, 1]] = y1[[1, 3, 4]].conj()
+            assert_array_almost_equal(y1f, y2)
+
+    def test_tbmv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 10
+            k = 3
+            x = rand(n).astype(dtype)
+            A = zeros((n, n), dtype=dtype)
+            # Banded upper triangular array
+            for sup in range(k+1):
+                A[arange(n-sup), arange(sup, n)] = rand(n-sup)
+
+            # Add complex parts for c,z
+            if ind > 1:
+                A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype)
+
+            # Form the banded storage
+            Ab = zeros((k+1, n), dtype=dtype)
+            for row in range(k+1):
+                Ab[-row-1, row:] = diag(A, k=row)
+            func, = get_blas_funcs(('tbmv',), dtype=dtype)
+
+            y1 = func(k=k, a=Ab, x=x)
+            y2 = A.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(k=k, a=Ab, x=x, diag=1)
+            A[arange(n), arange(n)] = dtype(1)
+            y2 = A.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(k=k, a=Ab, x=x, diag=1, trans=1)
+            y2 = A.T.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(k=k, a=Ab, x=x, diag=1, trans=2)
+            y2 = A.conj().T.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+    def test_tbsv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 6
+            k = 3
+            x = rand(n).astype(dtype)
+            A = zeros((n, n), dtype=dtype)
+            # Banded upper triangular array
+            for sup in range(k+1):
+                A[arange(n-sup), arange(sup, n)] = rand(n-sup)
+
+            # Add complex parts for c,z
+            if ind > 1:
+                A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype)
+
+            # Form the banded storage
+            Ab = zeros((k+1, n), dtype=dtype)
+            for row in range(k+1):
+                Ab[-row-1, row:] = diag(A, k=row)
+            func, = get_blas_funcs(('tbsv',), dtype=dtype)
+
+            y1 = func(k=k, a=Ab, x=x)
+            y2 = solve(A, x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(k=k, a=Ab, x=x, diag=1)
+            A[arange(n), arange(n)] = dtype(1)
+            y2 = solve(A, x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(k=k, a=Ab, x=x, diag=1, trans=1)
+            y2 = solve(A.T, x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(k=k, a=Ab, x=x, diag=1, trans=2)
+            y2 = solve(A.conj().T, x)
+            assert_array_almost_equal(y1, y2)
+
+    def test_tpmv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 10
+            x = rand(n).astype(dtype)
+            # Upper triangular array
+            A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j)
+            # Form the packed storage
+            c, r = tril_indices(n)
+            Ap = A[r, c]
+            func, = get_blas_funcs(('tpmv',), dtype=dtype)
+
+            y1 = func(n=n, ap=Ap, x=x)
+            y2 = A.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(n=n, ap=Ap, x=x, diag=1)
+            A[arange(n), arange(n)] = dtype(1)
+            y2 = A.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1)
+            y2 = A.T.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2)
+            y2 = A.conj().T.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+    def test_tpsv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 10
+            x = rand(n).astype(dtype)
+            # Upper triangular array
+            A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j)
+            A += eye(n)
+            # Form the packed storage
+            c, r = tril_indices(n)
+            Ap = A[r, c]
+            func, = get_blas_funcs(('tpsv',), dtype=dtype)
+
+            y1 = func(n=n, ap=Ap, x=x)
+            y2 = solve(A, x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(n=n, ap=Ap, x=x, diag=1)
+            A[arange(n), arange(n)] = dtype(1)
+            y2 = solve(A, x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1)
+            y2 = solve(A.T, x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2)
+            y2 = solve(A.conj().T, x)
+            assert_array_almost_equal(y1, y2)
+
+    def test_trmv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 3
+            A = (rand(n, n)+eye(n)).astype(dtype)
+            x = rand(3).astype(dtype)
+            func, = get_blas_funcs(('trmv',), dtype=dtype)
+
+            y1 = func(a=A, x=x)
+            y2 = triu(A).dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(a=A, x=x, diag=1)
+            A[arange(n), arange(n)] = dtype(1)
+            y2 = triu(A).dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(a=A, x=x, diag=1, trans=1)
+            y2 = triu(A).T.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(a=A, x=x, diag=1, trans=2)
+            y2 = triu(A).conj().T.dot(x)
+            assert_array_almost_equal(y1, y2)
+
+    def test_trsv(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 15
+            A = (rand(n, n)+eye(n)).astype(dtype)
+            x = rand(n).astype(dtype)
+            func, = get_blas_funcs(('trsv',), dtype=dtype)
+
+            y1 = func(a=A, x=x)
+            y2 = solve(triu(A), x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(a=A, x=x, lower=1)
+            y2 = solve(tril(A), x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(a=A, x=x, diag=1)
+            A[arange(n), arange(n)] = dtype(1)
+            y2 = solve(triu(A), x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(a=A, x=x, diag=1, trans=1)
+            y2 = solve(triu(A).T, x)
+            assert_array_almost_equal(y1, y2)
+
+            y1 = func(a=A, x=x, diag=1, trans=2)
+            y2 = solve(triu(A).conj().T, x)
+            assert_array_almost_equal(y1, y2)
+
+
+class TestFBLAS3Simple:
+
+    def test_gemm(self):
+        for p in 'sd':
+            f = getattr(fblas, p+'gemm', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(3, [3], [-4]), [[-36]])
+            assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21])
+        for p in 'cz':
+            f = getattr(fblas, p+'gemm', None)
+            if f is None:
+                continue
+            assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]])
+            assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j])
+
+
+def _get_func(func, ps='sdzc'):
+    """Just a helper: return a specified BLAS function w/typecode."""
+    for p in ps:
+        f = getattr(fblas, p+func, None)
+        if f is None:
+            continue
+        yield f
+
+
+class TestBLAS3Symm:
+
+    def setup_method(self):
+        self.a = np.array([[1., 2.],
+                           [0., 1.]])
+        self.b = np.array([[1., 0., 3.],
+                           [0., -1., 2.]])
+        self.c = np.ones((2, 3))
+        self.t = np.array([[2., -1., 8.],
+                           [3., 0., 9.]])
+
+    def test_symm(self):
+        for f in _get_func('symm'):
+            res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
+            assert_array_almost_equal(res, self.t)
+
+            res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
+            assert_array_almost_equal(res, self.t)
+
+            res = f(a=self.a, b=self.b.T, side=1, c=self.c.T,
+                    alpha=1., beta=1.)
+            assert_array_almost_equal(res, self.t.T)
+
+    def test_summ_wrong_side(self):
+        f = getattr(fblas, 'dsymm', None)
+        if f is not None:
+            assert_raises(Exception, f, **{'a': self.a, 'b': self.b,
+                                           'alpha': 1, 'side': 1})
+            # `side=1` means C <- B*A, hence shapes of A and B are to be
+            #  compatible. Otherwise, f2py exception is raised
+
+    def test_symm_wrong_uplo(self):
+        """SYMM only considers the upper/lower part of A. Hence setting
+        wrong value for `lower` (default is lower=0, meaning upper triangle)
+        gives a wrong result.
+        """
+        f = getattr(fblas, 'dsymm', None)
+        if f is not None:
+            res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.)
+            assert np.allclose(res, self.t)
+
+            res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.)
+            assert not np.allclose(res, self.t)
+
+
+class TestBLAS3Syrk:
+    def setup_method(self):
+        self.a = np.array([[1., 0.],
+                           [0., -2.],
+                           [2., 3.]])
+        self.t = np.array([[1., 0., 2.],
+                           [0., 4., -6.],
+                           [2., -6., 13.]])
+        self.tt = np.array([[5., 6.],
+                            [6., 13.]])
+
+    def test_syrk(self):
+        for f in _get_func('syrk'):
+            c = f(a=self.a, alpha=1.)
+            assert_array_almost_equal(np.triu(c), np.triu(self.t))
+
+            c = f(a=self.a, alpha=1., lower=1)
+            assert_array_almost_equal(np.tril(c), np.tril(self.t))
+
+            c0 = np.ones(self.t.shape)
+            c = f(a=self.a, alpha=1., beta=1., c=c0)
+            assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
+
+            c = f(a=self.a, alpha=1., trans=1)
+            assert_array_almost_equal(np.triu(c), np.triu(self.tt))
+
+    # prints '0-th dimension must be fixed to 3 but got 5',
+    # FIXME: suppress?
+    # FIXME: how to catch the _fblas.error?
+    def test_syrk_wrong_c(self):
+        f = getattr(fblas, 'dsyrk', None)
+        if f is not None:
+            assert_raises(Exception, f, **{'a': self.a, 'alpha': 1.,
+                                           'c': np.ones((5, 8))})
+        # if C is supplied, it must have compatible dimensions
+
+
+class TestBLAS3Syr2k:
+    def setup_method(self):
+        self.a = np.array([[1., 0.],
+                           [0., -2.],
+                           [2., 3.]])
+        self.b = np.array([[0., 1.],
+                           [1., 0.],
+                           [0, 1.]])
+        self.t = np.array([[0., -1., 3.],
+                           [-1., 0., 0.],
+                           [3., 0., 6.]])
+        self.tt = np.array([[0., 1.],
+                            [1., 6]])
+
+    def test_syr2k(self):
+        for f in _get_func('syr2k'):
+            c = f(a=self.a, b=self.b, alpha=1.)
+            assert_array_almost_equal(np.triu(c), np.triu(self.t))
+
+            c = f(a=self.a, b=self.b, alpha=1., lower=1)
+            assert_array_almost_equal(np.tril(c), np.tril(self.t))
+
+            c0 = np.ones(self.t.shape)
+            c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0)
+            assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
+
+            c = f(a=self.a, b=self.b, alpha=1., trans=1)
+            assert_array_almost_equal(np.triu(c), np.triu(self.tt))
+
+    # prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress?
+    def test_syr2k_wrong_c(self):
+        f = getattr(fblas, 'dsyr2k', None)
+        if f is not None:
+            assert_raises(Exception, f, **{'a': self.a,
+                                           'b': self.b,
+                                           'alpha': 1.,
+                                           'c': np.zeros((15, 8))})
+        # if C is supplied, it must have compatible dimensions
+
+
+class TestSyHe:
+    """Quick and simple tests for (zc)-symm, syrk, syr2k."""
+
+    def setup_method(self):
+        self.sigma_y = np.array([[0., -1.j],
+                                 [1.j, 0.]])
+
+    def test_symm_zc(self):
+        for f in _get_func('symm', 'zc'):
+            # NB: a is symmetric w/upper diag of ONLY
+            res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+            assert_array_almost_equal(np.triu(res), np.diag([1, -1]))
+
+    def test_hemm_zc(self):
+        for f in _get_func('hemm', 'zc'):
+            # NB: a is hermitian w/upper diag of ONLY
+            res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+            assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
+
+    def test_syrk_zr(self):
+        for f in _get_func('syrk', 'zc'):
+            res = f(a=self.sigma_y, alpha=1.)
+            assert_array_almost_equal(np.triu(res), np.diag([-1, -1]))
+
+    def test_herk_zr(self):
+        for f in _get_func('herk', 'zc'):
+            res = f(a=self.sigma_y, alpha=1.)
+            assert_array_almost_equal(np.triu(res), np.diag([1, 1]))
+
+    def test_syr2k_zr(self):
+        for f in _get_func('syr2k', 'zc'):
+            res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+            assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1]))
+
+    def test_her2k_zr(self):
+        for f in _get_func('her2k', 'zc'):
+            res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.)
+            assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1]))
+
+
+class TestTRMM:
+    """Quick and simple tests for dtrmm."""
+
+    def setup_method(self):
+        self.a = np.array([[1., 2., ],
+                           [-2., 1.]])
+        self.b = np.array([[3., 4., -1.],
+                           [5., 6., -2.]])
+
+        self.a2 = np.array([[1, 1, 2, 3],
+                            [0, 1, 4, 5],
+                            [0, 0, 1, 6],
+                            [0, 0, 0, 1]], order="f")
+        self.b2 = np.array([[1, 4], [2, 5], [3, 6], [7, 8], [9, 10]],
+                           order="f")
+
+    @pytest.mark.parametrize("dtype_", DTYPES)
+    def test_side(self, dtype_):
+        trmm = get_blas_funcs("trmm", dtype=dtype_)
+        # Provide large A array that works for side=1 but not 0 (see gh-10841)
+        assert_raises(Exception, trmm, 1.0, self.a2, self.b2)
+        res = trmm(1.0, self.a2.astype(dtype_), self.b2.astype(dtype_),
+                   side=1)
+        k = self.b2.shape[1]
+        assert_allclose(res, self.b2 @ self.a2[:k, :k], rtol=0.,
+                        atol=100*np.finfo(dtype_).eps)
+
+    def test_ab(self):
+        f = getattr(fblas, 'dtrmm', None)
+        if f is not None:
+            result = f(1., self.a, self.b)
+            # default a is upper triangular
+            expected = np.array([[13., 16., -5.],
+                                 [5., 6., -2.]])
+            assert_array_almost_equal(result, expected)
+
+    def test_ab_lower(self):
+        f = getattr(fblas, 'dtrmm', None)
+        if f is not None:
+            result = f(1., self.a, self.b, lower=True)
+            expected = np.array([[3., 4., -1.],
+                                 [-1., -2., 0.]])  # now a is lower triangular
+            assert_array_almost_equal(result, expected)
+
+    def test_b_overwrites(self):
+        # BLAS dtrmm modifies B argument in-place.
+        # Here the default is to copy, but this can be overridden
+        f = getattr(fblas, 'dtrmm', None)
+        if f is not None:
+            for overwr in [True, False]:
+                bcopy = self.b.copy()
+                result = f(1., self.a, bcopy, overwrite_b=overwr)
+                # C-contiguous arrays are copied
+                assert_(bcopy.flags.f_contiguous is False and
+                        np.may_share_memory(bcopy, result) is False)
+                assert_equal(bcopy, self.b)
+
+            bcopy = np.asfortranarray(self.b.copy())  # or just transpose it
+            result = f(1., self.a, bcopy, overwrite_b=True)
+            assert_(bcopy.flags.f_contiguous is True and
+                    np.may_share_memory(bcopy, result) is True)
+            assert_array_almost_equal(bcopy, result)
+
+
+def test_trsm():
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        tol = np.finfo(dtype).eps*1000
+        func, = get_blas_funcs(('trsm',), dtype=dtype)
+
+        # Test protection against size mismatches
+        A = rand(4, 5).astype(dtype)
+        B = rand(4, 4).astype(dtype)
+        alpha = dtype(1)
+        assert_raises(Exception, func, alpha, A, B)
+        assert_raises(Exception, func, alpha, A.T, B)
+
+        n = 8
+        m = 7
+        alpha = dtype(-2.5)
+        A = (rand(m, m) if ind < 2 else rand(m, m) + rand(m, m)*1j) + eye(m)
+        A = A.astype(dtype)
+        Au = triu(A)
+        Al = tril(A)
+        B1 = rand(m, n).astype(dtype)
+        B2 = rand(n, m).astype(dtype)
+
+        x1 = func(alpha=alpha, a=A, b=B1)
+        assert_equal(B1.shape, x1.shape)
+        x2 = solve(Au, alpha*B1)
+        assert_allclose(x1, x2, atol=tol)
+
+        x1 = func(alpha=alpha, a=A, b=B1, trans_a=1)
+        x2 = solve(Au.T, alpha*B1)
+        assert_allclose(x1, x2, atol=tol)
+
+        x1 = func(alpha=alpha, a=A, b=B1, trans_a=2)
+        x2 = solve(Au.conj().T, alpha*B1)
+        assert_allclose(x1, x2, atol=tol)
+
+        x1 = func(alpha=alpha, a=A, b=B1, diag=1)
+        Au[arange(m), arange(m)] = dtype(1)
+        x2 = solve(Au, alpha*B1)
+        assert_allclose(x1, x2, atol=tol)
+
+        x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1)
+        x2 = solve(Au.conj().T, alpha*B2.conj().T)
+        assert_allclose(x1, x2.conj().T, atol=tol)
+
+        x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1, lower=1)
+        Al[arange(m), arange(m)] = dtype(1)
+        x2 = solve(Al.conj().T, alpha*B2.conj().T)
+        assert_allclose(x1, x2.conj().T, atol=tol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_blas.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_blas.py
new file mode 100644
index 00000000..2876c396
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_blas.py
@@ -0,0 +1,120 @@
+import numpy as np
+from numpy.testing import (assert_allclose,
+                           assert_equal)
+import scipy.linalg.cython_blas as blas
+
+class TestDGEMM:
+    
+    def test_transposes(self):
+
+        a = np.arange(12, dtype='d').reshape((3, 4))[:2,:2]
+        b = np.arange(1, 13, dtype='d').reshape((4, 3))[:2,:2]
+        c = np.empty((2, 4))[:2,:2]
+
+        blas._test_dgemm(1., a, b, 0., c)
+        assert_allclose(c, a.dot(b))
+
+        blas._test_dgemm(1., a.T, b, 0., c)
+        assert_allclose(c, a.T.dot(b))
+
+        blas._test_dgemm(1., a, b.T, 0., c)
+        assert_allclose(c, a.dot(b.T))
+
+        blas._test_dgemm(1., a.T, b.T, 0., c)
+        assert_allclose(c, a.T.dot(b.T))
+
+        blas._test_dgemm(1., a, b, 0., c.T)
+        assert_allclose(c, a.dot(b).T)
+
+        blas._test_dgemm(1., a.T, b, 0., c.T)
+        assert_allclose(c, a.T.dot(b).T)
+
+        blas._test_dgemm(1., a, b.T, 0., c.T)
+        assert_allclose(c, a.dot(b.T).T)
+
+        blas._test_dgemm(1., a.T, b.T, 0., c.T)
+        assert_allclose(c, a.T.dot(b.T).T)
+    
+    def test_shapes(self):
+        a = np.arange(6, dtype='d').reshape((3, 2))
+        b = np.arange(-6, 2, dtype='d').reshape((2, 4))
+        c = np.empty((3, 4))
+
+        blas._test_dgemm(1., a, b, 0., c)
+        assert_allclose(c, a.dot(b))
+
+        blas._test_dgemm(1., b.T, a.T, 0., c.T)
+        assert_allclose(c, b.T.dot(a.T).T)
+        
+class TestWfuncPointers:
+    """ Test the function pointers that are expected to fail on
+    Mac OS X without the additional entry statement in their definitions
+    in fblas_l1.pyf.src. """
+
+    def test_complex_args(self):
+
+        cx = np.array([.5 + 1.j, .25 - .375j, 12.5 - 4.j], np.complex64)
+        cy = np.array([.8 + 2.j, .875 - .625j, -1. + 2.j], np.complex64)
+
+        assert_allclose(blas._test_cdotc(cx, cy),
+                        -17.6468753815+21.3718757629j, 5)
+        assert_allclose(blas._test_cdotu(cx, cy),
+                        -6.11562538147+30.3156242371j, 5)
+
+        assert_equal(blas._test_icamax(cx), 3)
+
+        assert_allclose(blas._test_scasum(cx), 18.625, 5)
+        assert_allclose(blas._test_scnrm2(cx), 13.1796483994, 5)
+
+        assert_allclose(blas._test_cdotc(cx[::2], cy[::2]),
+                        -18.1000003815+21.2000007629j, 5)
+        assert_allclose(blas._test_cdotu(cx[::2], cy[::2]),
+                        -6.10000038147+30.7999992371j, 5)
+        assert_allclose(blas._test_scasum(cx[::2]), 18., 5)
+        assert_allclose(blas._test_scnrm2(cx[::2]), 13.1719398499, 5)
+    
+    def test_double_args(self):
+
+        x = np.array([5., -3, -.5], np.float64)
+        y = np.array([2, 1, .5], np.float64)
+
+        assert_allclose(blas._test_dasum(x), 8.5, 10)
+        assert_allclose(blas._test_ddot(x, y), 6.75, 10)
+        assert_allclose(blas._test_dnrm2(x), 5.85234975815, 10)
+
+        assert_allclose(blas._test_dasum(x[::2]), 5.5, 10)
+        assert_allclose(blas._test_ddot(x[::2], y[::2]), 9.75, 10)
+        assert_allclose(blas._test_dnrm2(x[::2]), 5.0249376297, 10)
+
+        assert_equal(blas._test_idamax(x), 1)
+
+    def test_float_args(self):
+
+        x = np.array([5., -3, -.5], np.float32)
+        y = np.array([2, 1, .5], np.float32)
+
+        assert_equal(blas._test_isamax(x), 1)
+
+        assert_allclose(blas._test_sasum(x), 8.5, 5)
+        assert_allclose(blas._test_sdot(x, y), 6.75, 5)
+        assert_allclose(blas._test_snrm2(x), 5.85234975815, 5)
+
+        assert_allclose(blas._test_sasum(x[::2]), 5.5, 5)
+        assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75, 5)
+        assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297, 5)
+
+    def test_double_complex_args(self):
+
+        cx = np.array([.5 + 1.j, .25 - .375j, 13. - 4.j], np.complex128)
+        cy = np.array([.875 + 2.j, .875 - .625j, -1. + 2.j], np.complex128)
+
+        assert_equal(blas._test_izamax(cx), 3)
+
+        assert_allclose(blas._test_zdotc(cx, cy), -18.109375+22.296875j, 10)
+        assert_allclose(blas._test_zdotu(cx, cy), -6.578125+31.390625j, 10)
+
+        assert_allclose(blas._test_zdotc(cx[::2], cy[::2]),
+                        -18.5625+22.125j, 10)
+        assert_allclose(blas._test_zdotu(cx[::2], cy[::2]),
+                        -6.5625+31.875j, 10)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_lapack.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_lapack.py
new file mode 100644
index 00000000..247ab0d6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cython_lapack.py
@@ -0,0 +1,17 @@
+from numpy.testing import assert_allclose
+from scipy.linalg import cython_lapack as cython_lapack
+from scipy.linalg import lapack
+
+
+class TestLamch:
+
+    def test_slamch(self):
+        for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
+            assert_allclose(cython_lapack._test_slamch(c),
+                            lapack.slamch(c))
+
+    def test_dlamch(self):
+        for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
+            assert_allclose(cython_lapack._test_dlamch(c),
+                            lapack.dlamch(c))
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cythonized_array_utils.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cythonized_array_utils.py
new file mode 100644
index 00000000..19a0b39e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_cythonized_array_utils.py
@@ -0,0 +1,121 @@
+import numpy as np
+from scipy.linalg import bandwidth, issymmetric, ishermitian
+import pytest
+from pytest import raises
+
+
+def test_bandwidth_dtypes():
+    n = 5
+    for t in np.typecodes['All']:
+        A = np.zeros([n, n], dtype=t)
+        if t in 'eUVOMm':
+            raises(TypeError, bandwidth, A)
+        elif t == 'G':  # No-op test. On win these pass on others fail.
+            pass
+        else:
+            _ = bandwidth(A)
+
+
+def test_bandwidth_non2d_input():
+    A = np.array([1, 2, 3])
+    raises(ValueError, bandwidth, A)
+    A = np.array([[[1, 2, 3], [4, 5, 6]]])
+    raises(ValueError, bandwidth, A)
+
+
+@pytest.mark.parametrize('T', [x for x in np.typecodes['All']
+                               if x not in 'eGUVOMm'])
+def test_bandwidth_square_inputs(T):
+    n = 20
+    k = 4
+    R = np.zeros([n, n], dtype=T, order='F')
+    # form a banded matrix inplace
+    R[[x for x in range(n)], [x for x in range(n)]] = 1
+    R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1
+    R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1
+    R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1
+    assert bandwidth(R) == (k, k)
+
+
+@pytest.mark.parametrize('T', [x for x in np.typecodes['All']
+                               if x not in 'eGUVOMm'])
+def test_bandwidth_rect_inputs(T):
+    n, m = 10, 20
+    k = 5
+    R = np.zeros([n, m], dtype=T, order='F')
+    # form a banded matrix inplace
+    R[[x for x in range(n)], [x for x in range(n)]] = 1
+    R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1
+    R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1
+    R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1
+    assert bandwidth(R) == (k, k)
+
+
+def test_issymetric_ishermitian_dtypes():
+    n = 5
+    for t in np.typecodes['All']:
+        A = np.zeros([n, n], dtype=t)
+        if t in 'eUVOMm':
+            raises(TypeError, issymmetric, A)
+            raises(TypeError, ishermitian, A)
+        elif t == 'G':  # No-op test. On win these pass on others fail.
+            pass
+        else:
+            assert issymmetric(A)
+            assert ishermitian(A)
+
+
+def test_issymmetric_ishermitian_invalid_input():
+    A = np.array([1, 2, 3])
+    raises(ValueError, issymmetric, A)
+    raises(ValueError, ishermitian, A)
+    A = np.array([[[1, 2, 3], [4, 5, 6]]])
+    raises(ValueError, issymmetric, A)
+    raises(ValueError, ishermitian, A)
+    A = np.array([[1, 2, 3], [4, 5, 6]])
+    raises(ValueError, issymmetric, A)
+    raises(ValueError, ishermitian, A)
+
+
+def test_issymetric_complex_decimals():
+    A = np.arange(1, 10).astype(complex).reshape(3, 3)
+    A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j
+    # make entries decimal
+    A /= np.pi
+    A = A + A.T
+    assert issymmetric(A)
+
+
+def test_ishermitian_complex_decimals():
+    A = np.arange(1, 10).astype(complex).reshape(3, 3)
+    A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j
+    # make entries decimal
+    A /= np.pi
+    A = A + A.T.conj()
+    assert ishermitian(A)
+
+
+def test_issymmetric_approximate_results():
+    n = 20
+    rng = np.random.RandomState(123456789)
+    x = rng.uniform(high=5., size=[n, n])
+    y = x @ x.T  # symmetric
+    p = rng.standard_normal([n, n])
+    z = p @ y @ p.T
+    assert issymmetric(z, atol=1e-10)
+    assert issymmetric(z, atol=1e-10, rtol=0.)
+    assert issymmetric(z, atol=0., rtol=1e-12)
+    assert issymmetric(z, atol=1e-13, rtol=1e-12)
+
+
+def test_ishermitian_approximate_results():
+    n = 20
+    rng = np.random.RandomState(987654321)
+    x = rng.uniform(high=5., size=[n, n])
+    y = x @ x.T  # symmetric
+    p = rng.standard_normal([n, n]) + rng.standard_normal([n, n])*1j
+    z = p @ y @ p.conj().T
+    assert ishermitian(z, atol=1e-10)
+    assert ishermitian(z, atol=1e-10, rtol=0.)
+    assert ishermitian(z, atol=0., rtol=1e-12)
+    assert ishermitian(z, atol=1e-13, rtol=1e-12)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp.py
new file mode 100644
index 00000000..ae2fe1a3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp.py
@@ -0,0 +1,2904 @@
+""" Test functions for linalg.decomp module
+
+"""
+__usage__ = """
+Build linalg:
+  python setup_linalg.py build
+Run tests if scipy is installed:
+  python -c 'import scipy;scipy.linalg.test()'
+"""
+
+import itertools
+import platform
+import sys
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal,
+                           assert_array_almost_equal, assert_array_equal,
+                           assert_, assert_allclose)
+
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
+                          schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd,
+                          hessenberg, rq, eig_banded, eigvals_banded, eigh,
+                          eigvalsh, qr_multiply, qz, orth, ordqz,
+                          subspace_angles, hadamard, eigvalsh_tridiagonal,
+                          eigh_tridiagonal, null_space, cdf2rdf, LinAlgError)
+
+from scipy.linalg.lapack import (dgbtrf, dgbtrs, zgbtrf, zgbtrs, dsbev,
+                                 dsbevd, dsbevx, zhbevd, zhbevx,
+                                 get_lapack_funcs)
+
+from scipy.linalg._misc import norm
+from scipy.linalg._decomp_qz import _select_function
+from scipy.stats import ortho_group
+
+from numpy import (array, diag, ones, full, linalg, argsort, zeros, arange,
+                   float32, complex64, ravel, sqrt, iscomplex, shape, sort,
+                   sign, asarray, isfinite, ndarray, eye, dtype, triu, tril)
+
+from numpy.random import seed, random
+
+from scipy.linalg._testutils import assert_no_overwrite
+from scipy.sparse._sputils import matrix
+
+from scipy._lib._testutils import check_free_memory
+from scipy.linalg.blas import HAS_ILP64
+
+
+def _random_hermitian_matrix(n, posdef=False, dtype=float):
+    "Generate random sym/hermitian array of the given size n"
+    if dtype in COMPLEX_DTYPES:
+        A = np.random.rand(n, n) + np.random.rand(n, n)*1.0j
+        A = (A + A.conj().T)/2
+    else:
+        A = np.random.rand(n, n)
+        A = (A + A.T)/2
+
+    if posdef:
+        A += sqrt(2*n)*np.eye(n)
+
+    return A.astype(dtype)
+
+
+REAL_DTYPES = [np.float32, np.float64]
+COMPLEX_DTYPES = [np.complex64, np.complex128]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def clear_fuss(ar, fuss_binary_bits=7):
+    """Clears trailing `fuss_binary_bits` of mantissa of a floating number"""
+    x = np.asanyarray(ar)
+    if np.iscomplexobj(x):
+        return clear_fuss(x.real) + 1j * clear_fuss(x.imag)
+
+    significant_binary_bits = np.finfo(x.dtype).nmant
+    x_mant, x_exp = np.frexp(x)
+    f = 2.0**(significant_binary_bits - fuss_binary_bits)
+    x_mant *= f
+    np.rint(x_mant, out=x_mant)
+    x_mant /= f
+
+    return np.ldexp(x_mant, x_exp)
+
+
+# XXX: This function should be available through numpy.testing
+def assert_dtype_equal(act, des):
+    if isinstance(act, ndarray):
+        act = act.dtype
+    else:
+        act = dtype(act)
+
+    if isinstance(des, ndarray):
+        des = des.dtype
+    else:
+        des = dtype(des)
+
+    assert_(act == des,
+            'dtype mismatch: "{}" (should be "{}")'.format(act, des))
+
+
+# XXX: This function should not be defined here, but somewhere in
+#      scipy.linalg namespace
+def symrand(dim_or_eigv):
+    """Return a random symmetric (Hermitian) matrix.
+
+    If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
+        uniformly distributed on (-1,1).
+
+    If 'dim_or_eigv' is  1-D real array 'a', return a matrix whose
+                      eigenvalues are 'a'.
+    """
+    if isinstance(dim_or_eigv, int):
+        dim = dim_or_eigv
+        d = random(dim)*2 - 1
+    elif (isinstance(dim_or_eigv, ndarray) and
+          len(dim_or_eigv.shape) == 1):
+        dim = dim_or_eigv.shape[0]
+        d = dim_or_eigv
+    else:
+        raise TypeError("input type not supported.")
+
+    v = ortho_group.rvs(dim)
+    h = v.T.conj() @ diag(d) @ v
+    # to avoid roundoff errors, symmetrize the matrix (again)
+    h = 0.5*(h.T+h)
+    return h
+
+
+def _complex_symrand(dim, dtype):
+    a1, a2 = symrand(dim), symrand(dim)
+    # add antisymmetric matrix as imag part
+    a = a1 + 1j*(triu(a2)-tril(a2))
+    return a.astype(dtype)
+
+
+class TestEigVals:
+
+    def test_simple(self):
+        a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+        w = eigvals(a)
+        exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+        assert_array_almost_equal(w, exact_w)
+
+    def test_simple_tr(self):
+        a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]], 'd').T
+        a = a.copy()
+        a = a.T
+        w = eigvals(a)
+        exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+        assert_array_almost_equal(w, exact_w)
+
+    def test_simple_complex(self):
+        a = [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]
+        w = eigvals(a)
+        exact_w = [(9+1j+sqrt(92+6j))/2,
+                   0,
+                   (9+1j-sqrt(92+6j))/2]
+        assert_array_almost_equal(w, exact_w)
+
+    def test_finite(self):
+        a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+        w = eigvals(a, check_finite=False)
+        exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+        assert_array_almost_equal(w, exact_w)
+
+
+class TestEig:
+
+    def test_simple(self):
+        a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
+        w, v = eig(a)
+        exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+        v0 = array([1, 1, (1+sqrt(93)/3)/2])
+        v1 = array([3., 0, -1])
+        v2 = array([1, 1, (1-sqrt(93)/3)/2])
+        v0 = v0 / norm(v0)
+        v1 = v1 / norm(v1)
+        v2 = v2 / norm(v2)
+        assert_array_almost_equal(w, exact_w)
+        assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
+        assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
+        assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
+        for i in range(3):
+            assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
+        w, v = eig(a, left=1, right=0)
+        for i in range(3):
+            assert_array_almost_equal(a.T @ v[:, i], w[i]*v[:, i])
+
+    def test_simple_complex_eig(self):
+        a = array([[1, 2], [-2, 1]])
+        w, vl, vr = eig(a, left=1, right=1)
+        assert_array_almost_equal(w, array([1+2j, 1-2j]))
+        for i in range(2):
+            assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
+        for i in range(2):
+            assert_array_almost_equal(a.conj().T @ vl[:, i],
+                                      w[i].conj()*vl[:, i])
+
+    def test_simple_complex(self):
+        a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
+        w, vl, vr = eig(a, left=1, right=1)
+        for i in range(3):
+            assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i])
+        for i in range(3):
+            assert_array_almost_equal(a.conj().T @ vl[:, i],
+                                      w[i].conj()*vl[:, i])
+
+    def test_gh_3054(self):
+        a = [[1]]
+        b = [[0]]
+        w, vr = eig(a, b, homogeneous_eigvals=True)
+        assert_allclose(w[1, 0], 0)
+        assert_(w[0, 0] != 0)
+        assert_allclose(vr, 1)
+
+        w, vr = eig(a, b)
+        assert_equal(w, np.inf)
+        assert_allclose(vr, 1)
+
+    def _check_gen_eig(self, A, B):
+        if B is not None:
+            A, B = asarray(A), asarray(B)
+            B0 = B
+        else:
+            A = asarray(A)
+            B0 = B
+            B = np.eye(*A.shape)
+        msg = "\n%r\n%r" % (A, B)
+
+        # Eigenvalues in homogeneous coordinates
+        w, vr = eig(A, B0, homogeneous_eigvals=True)
+        wt = eigvals(A, B0, homogeneous_eigvals=True)
+        val1 = A @ vr * w[1, :]
+        val2 = B @ vr * w[0, :]
+        for i in range(val1.shape[1]):
+            assert_allclose(val1[:, i], val2[:, i],
+                            rtol=1e-13, atol=1e-13, err_msg=msg)
+
+        if B0 is None:
+            assert_allclose(w[1, :], 1)
+            assert_allclose(wt[1, :], 1)
+
+        perm = np.lexsort(w)
+        permt = np.lexsort(wt)
+        assert_allclose(w[:, perm], wt[:, permt], atol=1e-7, rtol=1e-7,
+                        err_msg=msg)
+
+        length = np.empty(len(vr))
+
+        for i in range(len(vr)):
+            length[i] = norm(vr[:, i])
+
+        assert_allclose(length, np.ones(length.size), err_msg=msg,
+                        atol=1e-7, rtol=1e-7)
+
+        # Convert homogeneous coordinates
+        beta_nonzero = (w[1, :] != 0)
+        wh = w[0, beta_nonzero] / w[1, beta_nonzero]
+
+        # Eigenvalues in standard coordinates
+        w, vr = eig(A, B0)
+        wt = eigvals(A, B0)
+        val1 = A @ vr
+        val2 = B @ vr * w
+        res = val1 - val2
+        for i in range(res.shape[1]):
+            if np.all(isfinite(res[:, i])):
+                assert_allclose(res[:, i], 0,
+                                rtol=1e-13, atol=1e-13, err_msg=msg)
+
+        w_fin = w[isfinite(w)]
+        wt_fin = wt[isfinite(wt)]
+        perm = argsort(clear_fuss(w_fin))
+        permt = argsort(clear_fuss(wt_fin))
+        assert_allclose(w[perm], wt[permt],
+                        atol=1e-7, rtol=1e-7, err_msg=msg)
+
+        length = np.empty(len(vr))
+        for i in range(len(vr)):
+            length[i] = norm(vr[:, i])
+        assert_allclose(length, np.ones(length.size), err_msg=msg)
+
+        # Compare homogeneous and nonhomogeneous versions
+        assert_allclose(sort(wh), sort(w[np.isfinite(w)]))
+
+    @pytest.mark.xfail(reason="See gh-2254")
+    def test_singular(self):
+        # Example taken from
+        # https://web.archive.org/web/20040903121217/http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
+        A = array([[22, 34, 31, 31, 17],
+                   [45, 45, 42, 19, 29],
+                   [39, 47, 49, 26, 34],
+                   [27, 31, 26, 21, 15],
+                   [38, 44, 44, 24, 30]])
+        B = array([[13, 26, 25, 17, 24],
+                   [31, 46, 40, 26, 37],
+                   [26, 40, 19, 25, 25],
+                   [16, 25, 27, 14, 23],
+                   [24, 35, 18, 21, 22]])
+
+        with np.errstate(all='ignore'):
+            self._check_gen_eig(A, B)
+
+    def test_falker(self):
+        # Test matrices giving some Nan generalized eigenvalues.
+        M = diag(array(([1, 0, 3])))
+        K = array(([2, -1, -1], [-1, 2, -1], [-1, -1, 2]))
+        D = array(([1, -1, 0], [-1, 1, 0], [0, 0, 0]))
+        Z = zeros((3, 3))
+        I3 = eye(3)
+        A = np.block([[I3, Z], [Z, -K]])
+        B = np.block([[Z, I3], [M, D]])
+
+        with np.errstate(all='ignore'):
+            self._check_gen_eig(A, B)
+
+    def test_bad_geneig(self):
+        # Ticket #709 (strange return values from DGGEV)
+
+        def matrices(omega):
+            c1 = -9 + omega**2
+            c2 = 2*omega
+            A = [[1, 0, 0, 0],
+                 [0, 1, 0, 0],
+                 [0, 0, c1, 0],
+                 [0, 0, 0, c1]]
+            B = [[0, 0, 1, 0],
+                 [0, 0, 0, 1],
+                 [1, 0, 0, -c2],
+                 [0, 1, c2, 0]]
+            return A, B
+
+        # With a buggy LAPACK, this can fail for different omega on different
+        # machines -- so we need to test several values
+        with np.errstate(all='ignore'):
+            for k in range(100):
+                A, B = matrices(omega=k*5./100)
+                self._check_gen_eig(A, B)
+
+    def test_make_eigvals(self):
+        # Step through all paths in _make_eigvals
+        seed(1234)
+        # Real eigenvalues
+        A = symrand(3)
+        self._check_gen_eig(A, None)
+        B = symrand(3)
+        self._check_gen_eig(A, B)
+        # Complex eigenvalues
+        A = random((3, 3)) + 1j*random((3, 3))
+        self._check_gen_eig(A, None)
+        B = random((3, 3)) + 1j*random((3, 3))
+        self._check_gen_eig(A, B)
+
+    def test_check_finite(self):
+        a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+        w, v = eig(a, check_finite=False)
+        exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2]
+        v0 = array([1, 1, (1+sqrt(93)/3)/2])
+        v1 = array([3., 0, -1])
+        v2 = array([1, 1, (1-sqrt(93)/3)/2])
+        v0 = v0 / norm(v0)
+        v1 = v1 / norm(v1)
+        v2 = v2 / norm(v2)
+        assert_array_almost_equal(w, exact_w)
+        assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0]))
+        assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1]))
+        assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2]))
+        for i in range(3):
+            assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i])
+
+    def test_not_square_error(self):
+        """Check that passing a non-square array raises a ValueError."""
+        A = np.arange(6).reshape(3, 2)
+        assert_raises(ValueError, eig, A)
+
+    def test_shape_mismatch(self):
+        """Check that passing arrays of with different shapes
+        raises a ValueError."""
+        A = eye(2)
+        B = np.arange(9.0).reshape(3, 3)
+        assert_raises(ValueError, eig, A, B)
+        assert_raises(ValueError, eig, B, A)
+
+
+class TestEigBanded:
+    def setup_method(self):
+        self.create_bandmat()
+
+    def create_bandmat(self):
+        """Create the full matrix `self.fullmat` and
+           the corresponding band matrix `self.bandmat`."""
+        N = 10
+        self.KL = 2   # number of subdiagonals (below the diagonal)
+        self.KU = 2   # number of superdiagonals (above the diagonal)
+
+        # symmetric band matrix
+        self.sym_mat = (diag(full(N, 1.0))
+                        + diag(full(N-1, -1.0), -1) + diag(full(N-1, -1.0), 1)
+                        + diag(full(N-2, -2.0), -2) + diag(full(N-2, -2.0), 2))
+
+        # hermitian band matrix
+        self.herm_mat = (diag(full(N, -1.0))
+                         + 1j*diag(full(N-1, 1.0), -1)
+                         - 1j*diag(full(N-1, 1.0), 1)
+                         + diag(full(N-2, -2.0), -2)
+                         + diag(full(N-2, -2.0), 2))
+
+        # general real band matrix
+        self.real_mat = (diag(full(N, 1.0))
+                         + diag(full(N-1, -1.0), -1) + diag(full(N-1, -3.0), 1)
+                         + diag(full(N-2, 2.0), -2) + diag(full(N-2, -2.0), 2))
+
+        # general complex band matrix
+        self.comp_mat = (1j*diag(full(N, 1.0))
+                         + diag(full(N-1, -1.0), -1)
+                         + 1j*diag(full(N-1, -3.0), 1)
+                         + diag(full(N-2, 2.0), -2)
+                         + diag(full(N-2, -2.0), 2))
+
+        # Eigenvalues and -vectors from linalg.eig
+        ew, ev = linalg.eig(self.sym_mat)
+        ew = ew.real
+        args = argsort(ew)
+        self.w_sym_lin = ew[args]
+        self.evec_sym_lin = ev[:, args]
+
+        ew, ev = linalg.eig(self.herm_mat)
+        ew = ew.real
+        args = argsort(ew)
+        self.w_herm_lin = ew[args]
+        self.evec_herm_lin = ev[:, args]
+
+        # Extract upper bands from symmetric and hermitian band matrices
+        # (for use in dsbevd, dsbevx, zhbevd, zhbevx
+        #  and their single precision versions)
+        LDAB = self.KU + 1
+        self.bandmat_sym = zeros((LDAB, N), dtype=float)
+        self.bandmat_herm = zeros((LDAB, N), dtype=complex)
+        for i in range(LDAB):
+            self.bandmat_sym[LDAB-i-1, i:N] = diag(self.sym_mat, i)
+            self.bandmat_herm[LDAB-i-1, i:N] = diag(self.herm_mat, i)
+
+        # Extract bands from general real and complex band matrix
+        # (for use in dgbtrf, dgbtrs and their single precision versions)
+        LDAB = 2*self.KL + self.KU + 1
+        self.bandmat_real = zeros((LDAB, N), dtype=float)
+        self.bandmat_real[2*self.KL, :] = diag(self.real_mat)  # diagonal
+        for i in range(self.KL):
+            # superdiagonals
+            self.bandmat_real[2*self.KL-1-i, i+1:N] = diag(self.real_mat, i+1)
+            # subdiagonals
+            self.bandmat_real[2*self.KL+1+i, 0:N-1-i] = diag(self.real_mat,
+                                                             -i-1)
+
+        self.bandmat_comp = zeros((LDAB, N), dtype=complex)
+        self.bandmat_comp[2*self.KL, :] = diag(self.comp_mat)  # diagonal
+        for i in range(self.KL):
+            # superdiagonals
+            self.bandmat_comp[2*self.KL-1-i, i+1:N] = diag(self.comp_mat, i+1)
+            # subdiagonals
+            self.bandmat_comp[2*self.KL+1+i, 0:N-1-i] = diag(self.comp_mat,
+                                                             -i-1)
+
+        # absolute value for linear equation system A*x = b
+        self.b = 1.0*arange(N)
+        self.bc = self.b * (1 + 1j)
+
+    #####################################################################
+
+    def test_dsbev(self):
+        """Compare dsbev eigenvalues and eigenvectors with
+           the result of linalg.eig."""
+        w, evec, info = dsbev(self.bandmat_sym, compute_v=1)
+        evec_ = evec[:, argsort(w)]
+        assert_array_almost_equal(sort(w), self.w_sym_lin)
+        assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
+
+    def test_dsbevd(self):
+        """Compare dsbevd eigenvalues and eigenvectors with
+           the result of linalg.eig."""
+        w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)
+        evec_ = evec[:, argsort(w)]
+        assert_array_almost_equal(sort(w), self.w_sym_lin)
+        assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
+
+    def test_dsbevx(self):
+        """Compare dsbevx eigenvalues and eigenvectors
+           with the result of linalg.eig."""
+        N, N = shape(self.sym_mat)
+        # Achtung: Argumente 0.0,0.0,range?
+        w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,
+                                           compute_v=1, range=2)
+        evec_ = evec[:, argsort(w)]
+        assert_array_almost_equal(sort(w), self.w_sym_lin)
+        assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
+
+    def test_zhbevd(self):
+        """Compare zhbevd eigenvalues and eigenvectors
+           with the result of linalg.eig."""
+        w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)
+        evec_ = evec[:, argsort(w)]
+        assert_array_almost_equal(sort(w), self.w_herm_lin)
+        assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
+
+    def test_zhbevx(self):
+        """Compare zhbevx eigenvalues and eigenvectors
+           with the result of linalg.eig."""
+        N, N = shape(self.herm_mat)
+        # Achtung: Argumente 0.0,0.0,range?
+        w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,
+                                           compute_v=1, range=2)
+        evec_ = evec[:, argsort(w)]
+        assert_array_almost_equal(sort(w), self.w_herm_lin)
+        assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
+
+    def test_eigvals_banded(self):
+        """Compare eigenvalues of eigvals_banded with those of linalg.eig."""
+        w_sym = eigvals_banded(self.bandmat_sym)
+        w_sym = w_sym.real
+        assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+
+        w_herm = eigvals_banded(self.bandmat_herm)
+        w_herm = w_herm.real
+        assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
+
+        # extracting eigenvalues with respect to an index range
+        ind1 = 2
+        ind2 = np.longlong(6)
+        w_sym_ind = eigvals_banded(self.bandmat_sym,
+                                   select='i', select_range=(ind1, ind2))
+        assert_array_almost_equal(sort(w_sym_ind),
+                                  self.w_sym_lin[ind1:ind2+1])
+        w_herm_ind = eigvals_banded(self.bandmat_herm,
+                                    select='i', select_range=(ind1, ind2))
+        assert_array_almost_equal(sort(w_herm_ind),
+                                  self.w_herm_lin[ind1:ind2+1])
+
+        # extracting eigenvalues with respect to a value range
+        v_lower = self.w_sym_lin[ind1] - 1.0e-5
+        v_upper = self.w_sym_lin[ind2] + 1.0e-5
+        w_sym_val = eigvals_banded(self.bandmat_sym,
+                                   select='v', select_range=(v_lower, v_upper))
+        assert_array_almost_equal(sort(w_sym_val),
+                                  self.w_sym_lin[ind1:ind2+1])
+
+        v_lower = self.w_herm_lin[ind1] - 1.0e-5
+        v_upper = self.w_herm_lin[ind2] + 1.0e-5
+        w_herm_val = eigvals_banded(self.bandmat_herm,
+                                    select='v',
+                                    select_range=(v_lower, v_upper))
+        assert_array_almost_equal(sort(w_herm_val),
+                                  self.w_herm_lin[ind1:ind2+1])
+
+        w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
+        w_sym = w_sym.real
+        assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+
+    def test_eig_banded(self):
+        """Compare eigenvalues and eigenvectors of eig_banded
+           with those of linalg.eig. """
+        w_sym, evec_sym = eig_banded(self.bandmat_sym)
+        evec_sym_ = evec_sym[:, argsort(w_sym.real)]
+        assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+        assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
+
+        w_herm, evec_herm = eig_banded(self.bandmat_herm)
+        evec_herm_ = evec_herm[:, argsort(w_herm.real)]
+        assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
+        assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
+
+        # extracting eigenvalues with respect to an index range
+        ind1 = 2
+        ind2 = 6
+        w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
+                                             select='i',
+                                             select_range=(ind1, ind2))
+        assert_array_almost_equal(sort(w_sym_ind),
+                                  self.w_sym_lin[ind1:ind2+1])
+        assert_array_almost_equal(abs(evec_sym_ind),
+                                  abs(self.evec_sym_lin[:, ind1:ind2+1]))
+
+        w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
+                                               select='i',
+                                               select_range=(ind1, ind2))
+        assert_array_almost_equal(sort(w_herm_ind),
+                                  self.w_herm_lin[ind1:ind2+1])
+        assert_array_almost_equal(abs(evec_herm_ind),
+                                  abs(self.evec_herm_lin[:, ind1:ind2+1]))
+
+        # extracting eigenvalues with respect to a value range
+        v_lower = self.w_sym_lin[ind1] - 1.0e-5
+        v_upper = self.w_sym_lin[ind2] + 1.0e-5
+        w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
+                                             select='v',
+                                             select_range=(v_lower, v_upper))
+        assert_array_almost_equal(sort(w_sym_val),
+                                  self.w_sym_lin[ind1:ind2+1])
+        assert_array_almost_equal(abs(evec_sym_val),
+                                  abs(self.evec_sym_lin[:, ind1:ind2+1]))
+
+        v_lower = self.w_herm_lin[ind1] - 1.0e-5
+        v_upper = self.w_herm_lin[ind2] + 1.0e-5
+        w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
+                                               select='v',
+                                               select_range=(v_lower, v_upper))
+        assert_array_almost_equal(sort(w_herm_val),
+                                  self.w_herm_lin[ind1:ind2+1])
+        assert_array_almost_equal(abs(evec_herm_val),
+                                  abs(self.evec_herm_lin[:, ind1:ind2+1]))
+
+        w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
+        evec_sym_ = evec_sym[:, argsort(w_sym.real)]
+        assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
+        assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
+
+    def test_dgbtrf(self):
+        """Compare dgbtrf  LU factorisation with the LU factorisation result
+           of linalg.lu."""
+        M, N = shape(self.real_mat)
+        lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
+
+        # extract matrix u from lu_symm_band
+        u = diag(lu_symm_band[2*self.KL, :])
+        for i in range(self.KL + self.KU):
+            u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1)
+
+        p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
+        assert_array_almost_equal(u, u_lin)
+
+    def test_zgbtrf(self):
+        """Compare zgbtrf  LU factorisation with the LU factorisation result
+           of linalg.lu."""
+        M, N = shape(self.comp_mat)
+        lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
+
+        # extract matrix u from lu_symm_band
+        u = diag(lu_symm_band[2*self.KL, :])
+        for i in range(self.KL + self.KU):
+            u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1)
+
+        p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
+        assert_array_almost_equal(u, u_lin)
+
+    def test_dgbtrs(self):
+        """Compare dgbtrs  solutions for linear equation system  A*x = b
+           with solutions of linalg.solve."""
+
+        lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
+        y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
+
+        y_lin = linalg.solve(self.real_mat, self.b)
+        assert_array_almost_equal(y, y_lin)
+
+    def test_zgbtrs(self):
+        """Compare zgbtrs  solutions for linear equation system  A*x = b
+           with solutions of linalg.solve."""
+
+        lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
+        y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
+
+        y_lin = linalg.solve(self.comp_mat, self.bc)
+        assert_array_almost_equal(y, y_lin)
+
+
+class TestEigTridiagonal:
+    def setup_method(self):
+        self.create_trimat()
+
+    def create_trimat(self):
+        """Create the full matrix `self.fullmat`, `self.d`, and `self.e`."""
+        N = 10
+
+        # symmetric band matrix
+        self.d = full(N, 1.0)
+        self.e = full(N-1, -1.0)
+        self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1))
+
+        ew, ev = linalg.eig(self.full_mat)
+        ew = ew.real
+        args = argsort(ew)
+        self.w = ew[args]
+        self.evec = ev[:, args]
+
+    def test_degenerate(self):
+        """Test error conditions."""
+        # Wrong sizes
+        assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1])
+        # Must be real
+        assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j)
+        # Bad driver
+        assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e,
+                      lapack_driver=1.)
+        assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
+                      lapack_driver='foo')
+        # Bad bounds
+        assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
+                      select='i', select_range=(0, -1))
+
+    def test_eigvalsh_tridiagonal(self):
+        """Compare eigenvalues of eigvalsh_tridiagonal with those of eig."""
+        # can't use ?STERF with subselection
+        for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'):
+            w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver)
+            assert_array_almost_equal(sort(w), self.w)
+
+        for driver in ('sterf', 'stev'):
+            assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e,
+                          lapack_driver='stev', select='i',
+                          select_range=(0, 1))
+        for driver in ('stebz', 'stemr', 'auto'):
+            # extracting eigenvalues with respect to the full index range
+            w_ind = eigvalsh_tridiagonal(
+                self.d, self.e, select='i', select_range=(0, len(self.d)-1),
+                lapack_driver=driver)
+            assert_array_almost_equal(sort(w_ind), self.w)
+
+            # extracting eigenvalues with respect to an index range
+            ind1 = 2
+            ind2 = 6
+            w_ind = eigvalsh_tridiagonal(
+                self.d, self.e, select='i', select_range=(ind1, ind2),
+                lapack_driver=driver)
+            assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1])
+
+            # extracting eigenvalues with respect to a value range
+            v_lower = self.w[ind1] - 1.0e-5
+            v_upper = self.w[ind2] + 1.0e-5
+            w_val = eigvalsh_tridiagonal(
+                self.d, self.e, select='v', select_range=(v_lower, v_upper),
+                lapack_driver=driver)
+            assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1])
+
+    def test_eigh_tridiagonal(self):
+        """Compare eigenvalues and eigenvectors of eigh_tridiagonal
+           with those of eig. """
+        # can't use ?STERF when eigenvectors are requested
+        assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
+                      lapack_driver='sterf')
+        for driver in ('stebz', 'stev', 'stemr', 'auto'):
+            w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver)
+            evec_ = evec[:, argsort(w)]
+            assert_array_almost_equal(sort(w), self.w)
+            assert_array_almost_equal(abs(evec_), abs(self.evec))
+
+        assert_raises(ValueError, eigh_tridiagonal, self.d, self.e,
+                      lapack_driver='stev', select='i', select_range=(0, 1))
+        for driver in ('stebz', 'stemr', 'auto'):
+            # extracting eigenvalues with respect to an index range
+            ind1 = 0
+            ind2 = len(self.d)-1
+            w, evec = eigh_tridiagonal(
+                self.d, self.e, select='i', select_range=(ind1, ind2),
+                lapack_driver=driver)
+            assert_array_almost_equal(sort(w), self.w)
+            assert_array_almost_equal(abs(evec), abs(self.evec))
+            ind1 = 2
+            ind2 = 6
+            w, evec = eigh_tridiagonal(
+                self.d, self.e, select='i', select_range=(ind1, ind2),
+                lapack_driver=driver)
+            assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
+            assert_array_almost_equal(abs(evec),
+                                      abs(self.evec[:, ind1:ind2+1]))
+
+            # extracting eigenvalues with respect to a value range
+            v_lower = self.w[ind1] - 1.0e-5
+            v_upper = self.w[ind2] + 1.0e-5
+            w, evec = eigh_tridiagonal(
+                self.d, self.e, select='v', select_range=(v_lower, v_upper),
+                lapack_driver=driver)
+            assert_array_almost_equal(sort(w), self.w[ind1:ind2+1])
+            assert_array_almost_equal(abs(evec),
+                                      abs(self.evec[:, ind1:ind2+1]))
+
+
+class TestEigh:
+    def setup_class(self):
+        seed(1234)
+
+    def test_wrong_inputs(self):
+        # Nonsquare a
+        assert_raises(ValueError, eigh, np.ones([1, 2]))
+        # Nonsquare b
+        assert_raises(ValueError, eigh, np.ones([2, 2]), np.ones([2, 1]))
+        # Incompatible a, b sizes
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([2, 2]))
+        # Wrong type parameter for generalized problem
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      type=4)
+        # Both value and index subsets requested
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      subset_by_value=[1, 2], subset_by_index=[2, 4])
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
+            assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                          subset_by_value=[1, 2], eigvals=[2, 4])
+        # Invalid upper index spec
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      subset_by_index=[0, 4])
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
+            assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                          eigvals=[0, 4])
+        # Invalid lower index
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      subset_by_index=[-2, 2])
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
+            assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                          eigvals=[-2, 2])
+        # Invalid index spec #2
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      subset_by_index=[2, 0])
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'eigvals")
+            assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                          subset_by_index=[2, 0])
+        # Invalid value spec
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      subset_by_value=[2, 0])
+        # Invalid driver name
+        assert_raises(ValueError, eigh, np.ones([2, 2]), driver='wrong')
+        # Generalized driver selection without b
+        assert_raises(ValueError, eigh, np.ones([3, 3]), None, driver='gvx')
+        # Standard driver with b
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      driver='evr', turbo=False)
+        # Subset request from invalid driver
+        assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                      driver='gvd', subset_by_index=[1, 2], turbo=False)
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "'eigh' keyword argument 'eigvals")
+            assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]),
+                          driver='gvd', subset_by_index=[1, 2], turbo=False)
+
+    def test_nonpositive_b(self):
+        assert_raises(LinAlgError, eigh, np.ones([3, 3]), np.ones([3, 3]))
+
+    # index based subsets are done in the legacy test_eigh()
+    def test_value_subsets(self):
+        for ind, dt in enumerate(DTYPES):
+
+            a = _random_hermitian_matrix(20, dtype=dt)
+            w, v = eigh(a, subset_by_value=[-2, 2])
+            assert_equal(v.shape[1], len(w))
+            assert all((w > -2) & (w < 2))
+
+            b = _random_hermitian_matrix(20, posdef=True, dtype=dt)
+            w, v = eigh(a, b, subset_by_value=[-2, 2])
+            assert_equal(v.shape[1], len(w))
+            assert all((w > -2) & (w < 2))
+
+    def test_eigh_integer(self):
+        a = array([[1, 2], [2, 7]])
+        b = array([[3, 1], [1, 5]])
+        w, z = eigh(a)
+        w, z = eigh(a, b)
+
+    def test_eigh_of_sparse(self):
+        # This tests the rejection of inputs that eigh cannot currently handle.
+        import scipy.sparse
+        a = scipy.sparse.identity(2).tocsc()
+        b = np.atleast_2d(a)
+        assert_raises(ValueError, eigh, a)
+        assert_raises(ValueError, eigh, b)
+
+    @pytest.mark.parametrize('dtype_', DTYPES)
+    @pytest.mark.parametrize('driver', ("ev", "evd", "evr", "evx"))
+    def test_various_drivers_standard(self, driver, dtype_):
+        a = _random_hermitian_matrix(n=20, dtype=dtype_)
+        w, v = eigh(a, driver=driver)
+        assert_allclose(a @ v - (v * w), 0.,
+                        atol=1000*np.finfo(dtype_).eps,
+                        rtol=0.)
+
+    @pytest.mark.parametrize('type', (1, 2, 3))
+    @pytest.mark.parametrize('driver', ("gv", "gvd", "gvx"))
+    def test_various_drivers_generalized(self, driver, type):
+        atol = np.spacing(5000.)
+        a = _random_hermitian_matrix(20)
+        b = _random_hermitian_matrix(20, posdef=True)
+        w, v = eigh(a=a, b=b, driver=driver, type=type)
+        if type == 1:
+            assert_allclose(a @ v - w*(b @ v), 0., atol=atol, rtol=0.)
+        elif type == 2:
+            assert_allclose(a @ b @ v - v * w, 0., atol=atol, rtol=0.)
+        else:
+            assert_allclose(b @ a @ v - v * w, 0., atol=atol, rtol=0.)
+
+    def test_eigvalsh_new_args(self):
+        a = _random_hermitian_matrix(5)
+        w = eigvalsh(a, subset_by_index=[1, 2])
+        assert_equal(len(w), 2)
+
+        w2 = eigvalsh(a, subset_by_index=[1, 2])
+        assert_equal(len(w2), 2)
+        assert_allclose(w, w2)
+
+        b = np.diag([1, 1.2, 1.3, 1.5, 2])
+        w3 = eigvalsh(b, subset_by_value=[1, 1.4])
+        assert_equal(len(w3), 2)
+        assert_allclose(w3, np.array([1.2, 1.3]))
+
+    @pytest.mark.parametrize("method", [eigh, eigvalsh])
+    def test_deprecation_warnings(self, method):
+        with pytest.warns(DeprecationWarning,
+                          match="Keyword argument 'turbo'"):
+            method(np.zeros((2, 2)), turbo=True)
+        with pytest.warns(DeprecationWarning,
+                          match="Keyword argument 'eigvals'"):
+            method(np.zeros((2, 2)), eigvals=[0, 1])
+
+    def test_deprecation_results(self):
+        a = _random_hermitian_matrix(3)
+        b = _random_hermitian_matrix(3, posdef=True)
+
+        # check turbo gives same result as driver='gvd'
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'turbo'")
+            w_dep, v_dep = eigh(a, b, turbo=True)
+        w, v = eigh(a, b, driver='gvd')
+        assert_allclose(w_dep, w)
+        assert_allclose(v_dep, v)
+
+        # check eigvals gives the same result as subset_by_index
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'eigvals'")
+            w_dep, v_dep = eigh(a, eigvals=[0, 1])
+        w, v = eigh(a, subset_by_index=[0, 1])
+        assert_allclose(w_dep, w)
+        assert_allclose(v_dep, v)
+
+
+class TestLU:
+    def setup_method(self):
+        self.a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
+        self.ca = array([[1, 2, 3], [1, 2, 3], [2, 5j, 6]])
+        # Those matrices are more robust to detect problems in permutation
+        # matrices than the ones above
+        self.b = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+        self.cb = array([[1j, 2j, 3j], [4j, 5j, 6j], [7j, 8j, 9j]])
+
+        # Reectangular matrices
+        self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
+        self.chrect = 1.j * array([[1, 2, 3, 4],
+                                   [5, 6, 7, 8],
+                                   [9, 10, 12, 12]])
+
+        self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
+        self.cvrect = 1.j * array([[1, 2, 3],
+                                   [4, 5, 6],
+                                   [7, 8, 9],
+                                   [10, 12, 12]])
+
+        # Medium sizes matrices
+        self.med = random((30, 40))
+        self.cmed = random((30, 40)) + 1.j * random((30, 40))
+
+    def _test_common(self, data):
+        p, l, u = lu(data)
+        assert_array_almost_equal(p @ l @ u, data)
+        pl, u = lu(data, permute_l=1)
+        assert_array_almost_equal(pl @ u, data)
+
+    def _test_common_lu_factor(self, data):
+        l_and_u1, piv1 = lu_factor(data)
+        (getrf,) = get_lapack_funcs(("getrf",), (data,))
+        l_and_u2, piv2, _ = getrf(data, overwrite_a=False)
+        assert_array_equal(l_and_u1, l_and_u2)
+        assert_array_equal(piv1, piv2)
+
+    # Simple tests.
+    # For lu_factor gives a LinAlgWarning because these matrices are singular
+    def test_simple(self):
+        self._test_common(self.a)
+
+    def test_simple_complex(self):
+        self._test_common(self.ca)
+
+    def test_simple2(self):
+        self._test_common(self.b)
+
+    def test_simple2_complex(self):
+        self._test_common(self.cb)
+
+    # rectangular matrices tests
+    def test_hrectangular(self):
+        self._test_common(self.hrect)
+        self._test_common_lu_factor(self.hrect)
+
+    def test_vrectangular(self):
+        self._test_common(self.vrect)
+        self._test_common_lu_factor(self.vrect)
+
+    def test_hrectangular_complex(self):
+        self._test_common(self.chrect)
+        self._test_common_lu_factor(self.chrect)
+
+    def test_vrectangular_complex(self):
+        self._test_common(self.cvrect)
+        self._test_common_lu_factor(self.cvrect)
+
+    # Bigger matrices
+    def test_medium1(self):
+        """Check lu decomposition on medium size, rectangular matrix."""
+        self._test_common(self.med)
+        self._test_common_lu_factor(self.med)
+
+    def test_medium1_complex(self):
+        """Check lu decomposition on medium size, rectangular matrix."""
+        self._test_common(self.cmed)
+        self._test_common_lu_factor(self.cmed)
+
+    def test_check_finite(self):
+        p, l, u = lu(self.a, check_finite=False)
+        assert_array_almost_equal(p @ l @ u, self.a)
+
+    def test_simple_known(self):
+        # Ticket #1458
+        for order in ['C', 'F']:
+            A = np.array([[2, 1], [0, 1.]], order=order)
+            LU, P = lu_factor(A)
+            assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))
+            assert_array_equal(P, np.array([0, 1]))
+
+
+class TestLUSingle(TestLU):
+    """LU testers for single precision, real and double"""
+
+    def setup_method(self):
+        TestLU.setup_method(self)
+
+        self.a = self.a.astype(float32)
+        self.ca = self.ca.astype(complex64)
+        self.b = self.b.astype(float32)
+        self.cb = self.cb.astype(complex64)
+
+        self.hrect = self.hrect.astype(float32)
+        self.chrect = self.hrect.astype(complex64)
+
+        self.vrect = self.vrect.astype(float32)
+        self.cvrect = self.vrect.astype(complex64)
+
+        self.med = self.vrect.astype(float32)
+        self.cmed = self.vrect.astype(complex64)
+
+
+class TestLUSolve:
+    def setup_method(self):
+        seed(1234)
+
+    def test_lu(self):
+        a0 = random((10, 10))
+        b = random((10,))
+
+        for order in ['C', 'F']:
+            a = np.array(a0, order=order)
+            x1 = solve(a, b)
+            lu_a = lu_factor(a)
+            x2 = lu_solve(lu_a, b)
+            assert_array_almost_equal(x1, x2)
+
+    def test_check_finite(self):
+        a = random((10, 10))
+        b = random((10,))
+        x1 = solve(a, b)
+        lu_a = lu_factor(a, check_finite=False)
+        x2 = lu_solve(lu_a, b, check_finite=False)
+        assert_array_almost_equal(x1, x2)
+
+
+class TestSVD_GESDD:
+    def setup_method(self):
+        self.lapack_driver = 'gesdd'
+        seed(1234)
+
+    def test_degenerate(self):
+        assert_raises(TypeError, svd, [[1.]], lapack_driver=1.)
+        assert_raises(ValueError, svd, [[1.]], lapack_driver='foo')
+
+    def test_simple(self):
+        a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]]
+        for full_matrices in (True, False):
+            u, s, vh = svd(a, full_matrices=full_matrices,
+                           lapack_driver=self.lapack_driver)
+            assert_array_almost_equal(u.T @ u, eye(3))
+            assert_array_almost_equal(vh.T @ vh, eye(3))
+            sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+            for i in range(len(s)):
+                sigma[i, i] = s[i]
+            assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_simple_singular(self):
+        a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+        for full_matrices in (True, False):
+            u, s, vh = svd(a, full_matrices=full_matrices,
+                           lapack_driver=self.lapack_driver)
+            assert_array_almost_equal(u.T @ u, eye(3))
+            assert_array_almost_equal(vh.T @ vh, eye(3))
+            sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+            for i in range(len(s)):
+                sigma[i, i] = s[i]
+            assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_simple_underdet(self):
+        a = [[1, 2, 3], [4, 5, 6]]
+        for full_matrices in (True, False):
+            u, s, vh = svd(a, full_matrices=full_matrices,
+                           lapack_driver=self.lapack_driver)
+            assert_array_almost_equal(u.T @ u, eye(u.shape[0]))
+            sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+            for i in range(len(s)):
+                sigma[i, i] = s[i]
+            assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_simple_overdet(self):
+        a = [[1, 2], [4, 5], [3, 4]]
+        for full_matrices in (True, False):
+            u, s, vh = svd(a, full_matrices=full_matrices,
+                           lapack_driver=self.lapack_driver)
+            assert_array_almost_equal(u.T @ u, eye(u.shape[1]))
+            assert_array_almost_equal(vh.T @ vh, eye(2))
+            sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
+            for i in range(len(s)):
+                sigma[i, i] = s[i]
+            assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_random(self):
+        n = 20
+        m = 15
+        for i in range(3):
+            for a in [random([n, m]), random([m, n])]:
+                for full_matrices in (True, False):
+                    u, s, vh = svd(a, full_matrices=full_matrices,
+                                   lapack_driver=self.lapack_driver)
+                    assert_array_almost_equal(u.T @ u, eye(u.shape[1]))
+                    assert_array_almost_equal(vh @ vh.T, eye(vh.shape[0]))
+                    sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
+                    for i in range(len(s)):
+                        sigma[i, i] = s[i]
+                    assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_simple_complex(self):
+        a = [[1, 2, 3], [1, 2j, 3], [2, 5, 6]]
+        for full_matrices in (True, False):
+            u, s, vh = svd(a, full_matrices=full_matrices,
+                           lapack_driver=self.lapack_driver)
+            assert_array_almost_equal(u.conj().T @ u, eye(u.shape[1]))
+            assert_array_almost_equal(vh.conj().T @ vh, eye(vh.shape[0]))
+            sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+            for i in range(len(s)):
+                sigma[i, i] = s[i]
+            assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_random_complex(self):
+        n = 20
+        m = 15
+        for i in range(3):
+            for full_matrices in (True, False):
+                for a in [random([n, m]), random([m, n])]:
+                    a = a + 1j*random(list(a.shape))
+                    u, s, vh = svd(a, full_matrices=full_matrices,
+                                   lapack_driver=self.lapack_driver)
+                    assert_array_almost_equal(u.conj().T @ u,
+                                              eye(u.shape[1]))
+                    # This fails when [m,n]
+                    # assert_array_almost_equal(vh.conj().T @ vh,
+                    #                        eye(len(vh),dtype=vh.dtype.char))
+                    sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char)
+                    for i in range(len(s)):
+                        sigma[i, i] = s[i]
+                    assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_crash_1580(self):
+        sizes = [(13, 23), (30, 50), (60, 100)]
+        np.random.seed(1234)
+        for sz in sizes:
+            for dt in [np.float32, np.float64, np.complex64, np.complex128]:
+                a = np.random.rand(*sz).astype(dt)
+                # should not crash
+                svd(a, lapack_driver=self.lapack_driver)
+
+    def test_check_finite(self):
+        a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]]
+        u, s, vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver)
+        assert_array_almost_equal(u.T @ u, eye(3))
+        assert_array_almost_equal(vh.T @ vh, eye(3))
+        sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char)
+        for i in range(len(s)):
+            sigma[i, i] = s[i]
+        assert_array_almost_equal(u @ sigma @ vh, a)
+
+    def test_gh_5039(self):
+        # This is a smoke test for https://github.com/scipy/scipy/issues/5039
+        #
+        # The following is reported to raise "ValueError: On entry to DGESDD
+        # parameter number 12 had an illegal value".
+        # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')`
+        # This is reported to only show up on LAPACK 3.0.3.
+        #
+        # The matrix below is taken from the call to
+        # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest
+        b = np.array(
+            [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.],
+             [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.],
+             [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.],
+             [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]])
+        svd(b, lapack_driver=self.lapack_driver)
+
+    @pytest.mark.skipif(not HAS_ILP64, reason="64-bit LAPACK required")
+    @pytest.mark.slow
+    def test_large_matrix(self):
+        check_free_memory(free_mb=17000)
+        A = np.zeros([1, 2**31], dtype=np.float32)
+        A[0, -1] = 1
+        u, s, vh = svd(A, full_matrices=False)
+        assert_allclose(s[0], 1.0)
+        assert_allclose(u[0, 0] * vh[0, -1], 1.0)
+
+
+class TestSVD_GESVD(TestSVD_GESDD):
+    def setup_method(self):
+        self.lapack_driver = 'gesvd'
+        seed(1234)
+
+
+class TestSVDVals:
+
+    def test_empty(self):
+        for a in [[]], np.empty((2, 0)), np.ones((0, 3)):
+            s = svdvals(a)
+            assert_equal(s, np.empty(0))
+
+    def test_simple(self):
+        a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+        s = svdvals(a)
+        assert_(len(s) == 3)
+        assert_(s[0] >= s[1] >= s[2])
+
+    def test_simple_underdet(self):
+        a = [[1, 2, 3], [4, 5, 6]]
+        s = svdvals(a)
+        assert_(len(s) == 2)
+        assert_(s[0] >= s[1])
+
+    def test_simple_overdet(self):
+        a = [[1, 2], [4, 5], [3, 4]]
+        s = svdvals(a)
+        assert_(len(s) == 2)
+        assert_(s[0] >= s[1])
+
+    def test_simple_complex(self):
+        a = [[1, 2, 3], [1, 20, 3j], [2, 5, 6]]
+        s = svdvals(a)
+        assert_(len(s) == 3)
+        assert_(s[0] >= s[1] >= s[2])
+
+    def test_simple_underdet_complex(self):
+        a = [[1, 2, 3], [4, 5j, 6]]
+        s = svdvals(a)
+        assert_(len(s) == 2)
+        assert_(s[0] >= s[1])
+
+    def test_simple_overdet_complex(self):
+        a = [[1, 2], [4, 5], [3j, 4]]
+        s = svdvals(a)
+        assert_(len(s) == 2)
+        assert_(s[0] >= s[1])
+
+    def test_check_finite(self):
+        a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]]
+        s = svdvals(a, check_finite=False)
+        assert_(len(s) == 3)
+        assert_(s[0] >= s[1] >= s[2])
+
+    @pytest.mark.slow
+    def test_crash_2609(self):
+        np.random.seed(1234)
+        a = np.random.rand(1500, 2800)
+        # Shouldn't crash:
+        svdvals(a)
+
+
+class TestDiagSVD:
+
+    def test_simple(self):
+        assert_array_almost_equal(diagsvd([1, 0, 0], 3, 3),
+                                  [[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+
+class TestQR:
+
+    def setup_method(self):
+        seed(1234)
+
+    def test_simple(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        q, r = qr(a)
+        assert_array_almost_equal(q.T @ q, eye(3))
+        assert_array_almost_equal(q @ r, a)
+
+    def test_simple_left(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        q, r = qr(a)
+        c = [1, 2, 3]
+        qc, r2 = qr_multiply(a, c, "left")
+        assert_array_almost_equal(q @ c, qc)
+        assert_array_almost_equal(r, r2)
+        qc, r2 = qr_multiply(a, eye(3), "left")
+        assert_array_almost_equal(q, qc)
+
+    def test_simple_right(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        q, r = qr(a)
+        c = [1, 2, 3]
+        qc, r2 = qr_multiply(a, c)
+        assert_array_almost_equal(c @ q, qc)
+        assert_array_almost_equal(r, r2)
+        qc, r = qr_multiply(a, eye(3))
+        assert_array_almost_equal(q, qc)
+
+    def test_simple_pivoting(self):
+        a = np.asarray([[8, 2, 3], [2, 9, 3], [5, 3, 6]])
+        q, r, p = qr(a, pivoting=True)
+        d = abs(diag(r))
+        assert_(np.all(d[1:] <= d[:-1]))
+        assert_array_almost_equal(q.T @ q, eye(3))
+        assert_array_almost_equal(q @ r, a[:, p])
+        q2, r2 = qr(a[:, p])
+        assert_array_almost_equal(q, q2)
+        assert_array_almost_equal(r, r2)
+
+    def test_simple_left_pivoting(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        q, r, jpvt = qr(a, pivoting=True)
+        c = [1, 2, 3]
+        qc, r, jpvt = qr_multiply(a, c, "left", True)
+        assert_array_almost_equal(q @ c, qc)
+
+    def test_simple_right_pivoting(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        q, r, jpvt = qr(a, pivoting=True)
+        c = [1, 2, 3]
+        qc, r, jpvt = qr_multiply(a, c, pivoting=True)
+        assert_array_almost_equal(c @ q, qc)
+
+    def test_simple_trap(self):
+        a = [[8, 2, 3], [2, 9, 3]]
+        q, r = qr(a)
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a)
+
+    def test_simple_trap_pivoting(self):
+        a = np.asarray([[8, 2, 3], [2, 9, 3]])
+        q, r, p = qr(a, pivoting=True)
+        d = abs(diag(r))
+        assert_(np.all(d[1:] <= d[:-1]))
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a[:, p])
+        q2, r2 = qr(a[:, p])
+        assert_array_almost_equal(q, q2)
+        assert_array_almost_equal(r, r2)
+
+    def test_simple_tall(self):
+        # full version
+        a = [[8, 2], [2, 9], [5, 3]]
+        q, r = qr(a)
+        assert_array_almost_equal(q.T @ q, eye(3))
+        assert_array_almost_equal(q @ r, a)
+
+    def test_simple_tall_pivoting(self):
+        # full version pivoting
+        a = np.asarray([[8, 2], [2, 9], [5, 3]])
+        q, r, p = qr(a, pivoting=True)
+        d = abs(diag(r))
+        assert_(np.all(d[1:] <= d[:-1]))
+        assert_array_almost_equal(q.T @ q, eye(3))
+        assert_array_almost_equal(q @ r, a[:, p])
+        q2, r2 = qr(a[:, p])
+        assert_array_almost_equal(q, q2)
+        assert_array_almost_equal(r, r2)
+
+    def test_simple_tall_e(self):
+        # economy version
+        a = [[8, 2], [2, 9], [5, 3]]
+        q, r = qr(a, mode='economic')
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a)
+        assert_equal(q.shape, (3, 2))
+        assert_equal(r.shape, (2, 2))
+
+    def test_simple_tall_e_pivoting(self):
+        # economy version pivoting
+        a = np.asarray([[8, 2], [2, 9], [5, 3]])
+        q, r, p = qr(a, pivoting=True, mode='economic')
+        d = abs(diag(r))
+        assert_(np.all(d[1:] <= d[:-1]))
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a[:, p])
+        q2, r2 = qr(a[:, p], mode='economic')
+        assert_array_almost_equal(q, q2)
+        assert_array_almost_equal(r, r2)
+
+    def test_simple_tall_left(self):
+        a = [[8, 2], [2, 9], [5, 3]]
+        q, r = qr(a, mode="economic")
+        c = [1, 2]
+        qc, r2 = qr_multiply(a, c, "left")
+        assert_array_almost_equal(q @ c, qc)
+        assert_array_almost_equal(r, r2)
+        c = array([1, 2, 0])
+        qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
+        assert_array_almost_equal(q @ c[:2], qc)
+        qc, r = qr_multiply(a, eye(2), "left")
+        assert_array_almost_equal(qc, q)
+
+    def test_simple_tall_left_pivoting(self):
+        a = [[8, 2], [2, 9], [5, 3]]
+        q, r, jpvt = qr(a, mode="economic", pivoting=True)
+        c = [1, 2]
+        qc, r, kpvt = qr_multiply(a, c, "left", True)
+        assert_array_equal(jpvt, kpvt)
+        assert_array_almost_equal(q @ c, qc)
+        qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
+        assert_array_almost_equal(qc, q)
+
+    def test_simple_tall_right(self):
+        a = [[8, 2], [2, 9], [5, 3]]
+        q, r = qr(a, mode="economic")
+        c = [1, 2, 3]
+        cq, r2 = qr_multiply(a, c)
+        assert_array_almost_equal(c @ q, cq)
+        assert_array_almost_equal(r, r2)
+        cq, r = qr_multiply(a, eye(3))
+        assert_array_almost_equal(cq, q)
+
+    def test_simple_tall_right_pivoting(self):
+        a = [[8, 2], [2, 9], [5, 3]]
+        q, r, jpvt = qr(a, pivoting=True, mode="economic")
+        c = [1, 2, 3]
+        cq, r, jpvt = qr_multiply(a, c, pivoting=True)
+        assert_array_almost_equal(c @ q, cq)
+        cq, r, jpvt = qr_multiply(a, eye(3), pivoting=True)
+        assert_array_almost_equal(cq, q)
+
+    def test_simple_fat(self):
+        # full version
+        a = [[8, 2, 5], [2, 9, 3]]
+        q, r = qr(a)
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a)
+        assert_equal(q.shape, (2, 2))
+        assert_equal(r.shape, (2, 3))
+
+    def test_simple_fat_pivoting(self):
+        # full version pivoting
+        a = np.asarray([[8, 2, 5], [2, 9, 3]])
+        q, r, p = qr(a, pivoting=True)
+        d = abs(diag(r))
+        assert_(np.all(d[1:] <= d[:-1]))
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a[:, p])
+        assert_equal(q.shape, (2, 2))
+        assert_equal(r.shape, (2, 3))
+        q2, r2 = qr(a[:, p])
+        assert_array_almost_equal(q, q2)
+        assert_array_almost_equal(r, r2)
+
+    def test_simple_fat_e(self):
+        # economy version
+        a = [[8, 2, 3], [2, 9, 5]]
+        q, r = qr(a, mode='economic')
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a)
+        assert_equal(q.shape, (2, 2))
+        assert_equal(r.shape, (2, 3))
+
+    def test_simple_fat_e_pivoting(self):
+        # economy version pivoting
+        a = np.asarray([[8, 2, 3], [2, 9, 5]])
+        q, r, p = qr(a, pivoting=True, mode='economic')
+        d = abs(diag(r))
+        assert_(np.all(d[1:] <= d[:-1]))
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(q @ r, a[:, p])
+        assert_equal(q.shape, (2, 2))
+        assert_equal(r.shape, (2, 3))
+        q2, r2 = qr(a[:, p], mode='economic')
+        assert_array_almost_equal(q, q2)
+        assert_array_almost_equal(r, r2)
+
+    def test_simple_fat_left(self):
+        a = [[8, 2, 3], [2, 9, 5]]
+        q, r = qr(a, mode="economic")
+        c = [1, 2]
+        qc, r2 = qr_multiply(a, c, "left")
+        assert_array_almost_equal(q @ c, qc)
+        assert_array_almost_equal(r, r2)
+        qc, r = qr_multiply(a, eye(2), "left")
+        assert_array_almost_equal(qc, q)
+
+    def test_simple_fat_left_pivoting(self):
+        a = [[8, 2, 3], [2, 9, 5]]
+        q, r, jpvt = qr(a, mode="economic", pivoting=True)
+        c = [1, 2]
+        qc, r, jpvt = qr_multiply(a, c, "left", True)
+        assert_array_almost_equal(q @ c, qc)
+        qc, r, jpvt = qr_multiply(a, eye(2), "left", True)
+        assert_array_almost_equal(qc, q)
+
+    def test_simple_fat_right(self):
+        a = [[8, 2, 3], [2, 9, 5]]
+        q, r = qr(a, mode="economic")
+        c = [1, 2]
+        cq, r2 = qr_multiply(a, c)
+        assert_array_almost_equal(c @ q, cq)
+        assert_array_almost_equal(r, r2)
+        cq, r = qr_multiply(a, eye(2))
+        assert_array_almost_equal(cq, q)
+
+    def test_simple_fat_right_pivoting(self):
+        a = [[8, 2, 3], [2, 9, 5]]
+        q, r, jpvt = qr(a, pivoting=True, mode="economic")
+        c = [1, 2]
+        cq, r, jpvt = qr_multiply(a, c, pivoting=True)
+        assert_array_almost_equal(c @ q, cq)
+        cq, r, jpvt = qr_multiply(a, eye(2), pivoting=True)
+        assert_array_almost_equal(cq, q)
+
+    def test_simple_complex(self):
+        a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+        q, r = qr(a)
+        assert_array_almost_equal(q.conj().T @ q, eye(3))
+        assert_array_almost_equal(q @ r, a)
+
+    def test_simple_complex_left(self):
+        a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+        q, r = qr(a)
+        c = [1, 2, 3+4j]
+        qc, r = qr_multiply(a, c, "left")
+        assert_array_almost_equal(q @ c, qc)
+        qc, r = qr_multiply(a, eye(3), "left")
+        assert_array_almost_equal(q, qc)
+
+    def test_simple_complex_right(self):
+        a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+        q, r = qr(a)
+        c = [1, 2, 3+4j]
+        qc, r = qr_multiply(a, c)
+        assert_array_almost_equal(c @ q, qc)
+        qc, r = qr_multiply(a, eye(3))
+        assert_array_almost_equal(q, qc)
+
+    def test_simple_tall_complex_left(self):
+        a = [[8, 2+3j], [2, 9], [5+7j, 3]]
+        q, r = qr(a, mode="economic")
+        c = [1, 2+2j]
+        qc, r2 = qr_multiply(a, c, "left")
+        assert_array_almost_equal(q @ c, qc)
+        assert_array_almost_equal(r, r2)
+        c = array([1, 2, 0])
+        qc, r2 = qr_multiply(a, c, "left", overwrite_c=True)
+        assert_array_almost_equal(q @ c[:2], qc)
+        qc, r = qr_multiply(a, eye(2), "left")
+        assert_array_almost_equal(qc, q)
+
+    def test_simple_complex_left_conjugate(self):
+        a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+        q, r = qr(a)
+        c = [1, 2, 3+4j]
+        qc, r = qr_multiply(a, c, "left", conjugate=True)
+        assert_array_almost_equal(q.conj() @ c, qc)
+
+    def test_simple_complex_tall_left_conjugate(self):
+        a = [[3, 3+4j], [5, 2+2j], [3, 2]]
+        q, r = qr(a, mode='economic')
+        c = [1, 3+4j]
+        qc, r = qr_multiply(a, c, "left", conjugate=True)
+        assert_array_almost_equal(q.conj() @ c, qc)
+
+    def test_simple_complex_right_conjugate(self):
+        a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+        q, r = qr(a)
+        c = np.array([1, 2, 3+4j])
+        qc, r = qr_multiply(a, c, conjugate=True)
+        assert_array_almost_equal(c @ q.conj(), qc)
+
+    def test_simple_complex_pivoting(self):
+        a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
+        q, r, p = qr(a, pivoting=True)
+        d = abs(diag(r))
+        assert_(np.all(d[1:] <= d[:-1]))
+        assert_array_almost_equal(q.conj().T @ q, eye(3))
+        assert_array_almost_equal(q @ r, a[:, p])
+        q2, r2 = qr(a[:, p])
+        assert_array_almost_equal(q, q2)
+        assert_array_almost_equal(r, r2)
+
+    def test_simple_complex_left_pivoting(self):
+        a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
+        q, r, jpvt = qr(a, pivoting=True)
+        c = [1, 2, 3+4j]
+        qc, r, jpvt = qr_multiply(a, c, "left", True)
+        assert_array_almost_equal(q @ c, qc)
+
+    def test_simple_complex_right_pivoting(self):
+        a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]])
+        q, r, jpvt = qr(a, pivoting=True)
+        c = [1, 2, 3+4j]
+        qc, r, jpvt = qr_multiply(a, c, pivoting=True)
+        assert_array_almost_equal(c @ q, qc)
+
+    def test_random(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])
+            q, r = qr(a)
+            assert_array_almost_equal(q.T @ q, eye(n))
+            assert_array_almost_equal(q @ r, a)
+
+    def test_random_left(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])
+            q, r = qr(a)
+            c = random([n])
+            qc, r = qr_multiply(a, c, "left")
+            assert_array_almost_equal(q @ c, qc)
+            qc, r = qr_multiply(a, eye(n), "left")
+            assert_array_almost_equal(q, qc)
+
+    def test_random_right(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])
+            q, r = qr(a)
+            c = random([n])
+            cq, r = qr_multiply(a, c)
+            assert_array_almost_equal(c @ q, cq)
+            cq, r = qr_multiply(a, eye(n))
+            assert_array_almost_equal(q, cq)
+
+    def test_random_pivoting(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])
+            q, r, p = qr(a, pivoting=True)
+            d = abs(diag(r))
+            assert_(np.all(d[1:] <= d[:-1]))
+            assert_array_almost_equal(q.T @ q, eye(n))
+            assert_array_almost_equal(q @ r, a[:, p])
+            q2, r2 = qr(a[:, p])
+            assert_array_almost_equal(q, q2)
+            assert_array_almost_equal(r, r2)
+
+    def test_random_tall(self):
+        # full version
+        m = 200
+        n = 100
+        for k in range(2):
+            a = random([m, n])
+            q, r = qr(a)
+            assert_array_almost_equal(q.T @ q, eye(m))
+            assert_array_almost_equal(q @ r, a)
+
+    def test_random_tall_left(self):
+        # full version
+        m = 200
+        n = 100
+        for k in range(2):
+            a = random([m, n])
+            q, r = qr(a, mode="economic")
+            c = random([n])
+            qc, r = qr_multiply(a, c, "left")
+            assert_array_almost_equal(q @ c, qc)
+            qc, r = qr_multiply(a, eye(n), "left")
+            assert_array_almost_equal(qc, q)
+
+    def test_random_tall_right(self):
+        # full version
+        m = 200
+        n = 100
+        for k in range(2):
+            a = random([m, n])
+            q, r = qr(a, mode="economic")
+            c = random([m])
+            cq, r = qr_multiply(a, c)
+            assert_array_almost_equal(c @ q, cq)
+            cq, r = qr_multiply(a, eye(m))
+            assert_array_almost_equal(cq, q)
+
+    def test_random_tall_pivoting(self):
+        # full version pivoting
+        m = 200
+        n = 100
+        for k in range(2):
+            a = random([m, n])
+            q, r, p = qr(a, pivoting=True)
+            d = abs(diag(r))
+            assert_(np.all(d[1:] <= d[:-1]))
+            assert_array_almost_equal(q.T @ q, eye(m))
+            assert_array_almost_equal(q @ r, a[:, p])
+            q2, r2 = qr(a[:, p])
+            assert_array_almost_equal(q, q2)
+            assert_array_almost_equal(r, r2)
+
+    def test_random_tall_e(self):
+        # economy version
+        m = 200
+        n = 100
+        for k in range(2):
+            a = random([m, n])
+            q, r = qr(a, mode='economic')
+            assert_array_almost_equal(q.T @ q, eye(n))
+            assert_array_almost_equal(q @ r, a)
+            assert_equal(q.shape, (m, n))
+            assert_equal(r.shape, (n, n))
+
+    def test_random_tall_e_pivoting(self):
+        # economy version pivoting
+        m = 200
+        n = 100
+        for k in range(2):
+            a = random([m, n])
+            q, r, p = qr(a, pivoting=True, mode='economic')
+            d = abs(diag(r))
+            assert_(np.all(d[1:] <= d[:-1]))
+            assert_array_almost_equal(q.T @ q, eye(n))
+            assert_array_almost_equal(q @ r, a[:, p])
+            assert_equal(q.shape, (m, n))
+            assert_equal(r.shape, (n, n))
+            q2, r2 = qr(a[:, p], mode='economic')
+            assert_array_almost_equal(q, q2)
+            assert_array_almost_equal(r, r2)
+
+    def test_random_trap(self):
+        m = 100
+        n = 200
+        for k in range(2):
+            a = random([m, n])
+            q, r = qr(a)
+            assert_array_almost_equal(q.T @ q, eye(m))
+            assert_array_almost_equal(q @ r, a)
+
+    def test_random_trap_pivoting(self):
+        m = 100
+        n = 200
+        for k in range(2):
+            a = random([m, n])
+            q, r, p = qr(a, pivoting=True)
+            d = abs(diag(r))
+            assert_(np.all(d[1:] <= d[:-1]))
+            assert_array_almost_equal(q.T @ q, eye(m))
+            assert_array_almost_equal(q @ r, a[:, p])
+            q2, r2 = qr(a[:, p])
+            assert_array_almost_equal(q, q2)
+            assert_array_almost_equal(r, r2)
+
+    def test_random_complex(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])+1j*random([n, n])
+            q, r = qr(a)
+            assert_array_almost_equal(q.conj().T @ q, eye(n))
+            assert_array_almost_equal(q @ r, a)
+
+    def test_random_complex_left(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])+1j*random([n, n])
+            q, r = qr(a)
+            c = random([n])+1j*random([n])
+            qc, r = qr_multiply(a, c, "left")
+            assert_array_almost_equal(q @ c, qc)
+            qc, r = qr_multiply(a, eye(n), "left")
+            assert_array_almost_equal(q, qc)
+
+    def test_random_complex_right(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])+1j*random([n, n])
+            q, r = qr(a)
+            c = random([n])+1j*random([n])
+            cq, r = qr_multiply(a, c)
+            assert_array_almost_equal(c @ q, cq)
+            cq, r = qr_multiply(a, eye(n))
+            assert_array_almost_equal(q, cq)
+
+    def test_random_complex_pivoting(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])+1j*random([n, n])
+            q, r, p = qr(a, pivoting=True)
+            d = abs(diag(r))
+            assert_(np.all(d[1:] <= d[:-1]))
+            assert_array_almost_equal(q.conj().T @ q, eye(n))
+            assert_array_almost_equal(q @ r, a[:, p])
+            q2, r2 = qr(a[:, p])
+            assert_array_almost_equal(q, q2)
+            assert_array_almost_equal(r, r2)
+
+    def test_check_finite(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        q, r = qr(a, check_finite=False)
+        assert_array_almost_equal(q.T @ q, eye(3))
+        assert_array_almost_equal(q @ r, a)
+
+    def test_lwork(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        # Get comparison values
+        q, r = qr(a, lwork=None)
+
+        # Test against minimum valid lwork
+        q2, r2 = qr(a, lwork=3)
+        assert_array_almost_equal(q2, q)
+        assert_array_almost_equal(r2, r)
+
+        # Test against larger lwork
+        q3, r3 = qr(a, lwork=10)
+        assert_array_almost_equal(q3, q)
+        assert_array_almost_equal(r3, r)
+
+        # Test against explicit lwork=-1
+        q4, r4 = qr(a, lwork=-1)
+        assert_array_almost_equal(q4, q)
+        assert_array_almost_equal(r4, r)
+
+        # Test against invalid lwork
+        assert_raises(Exception, qr, (a,), {'lwork': 0})
+        assert_raises(Exception, qr, (a,), {'lwork': 2})
+
+
+class TestRQ:
+
+    def setup_method(self):
+        seed(1234)
+
+    def test_simple(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        r, q = rq(a)
+        assert_array_almost_equal(q @ q.T, eye(3))
+        assert_array_almost_equal(r @ q, a)
+
+    def test_r(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        r, q = rq(a)
+        r2 = rq(a, mode='r')
+        assert_array_almost_equal(r, r2)
+
+    def test_random(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])
+            r, q = rq(a)
+            assert_array_almost_equal(q @ q.T, eye(n))
+            assert_array_almost_equal(r @ q, a)
+
+    def test_simple_trap(self):
+        a = [[8, 2, 3], [2, 9, 3]]
+        r, q = rq(a)
+        assert_array_almost_equal(q.T @ q, eye(3))
+        assert_array_almost_equal(r @ q, a)
+
+    def test_simple_tall(self):
+        a = [[8, 2], [2, 9], [5, 3]]
+        r, q = rq(a)
+        assert_array_almost_equal(q.T @ q, eye(2))
+        assert_array_almost_equal(r @ q, a)
+
+    def test_simple_fat(self):
+        a = [[8, 2, 5], [2, 9, 3]]
+        r, q = rq(a)
+        assert_array_almost_equal(q @ q.T, eye(3))
+        assert_array_almost_equal(r @ q, a)
+
+    def test_simple_complex(self):
+        a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
+        r, q = rq(a)
+        assert_array_almost_equal(q @ q.conj().T, eye(3))
+        assert_array_almost_equal(r @ q, a)
+
+    def test_random_tall(self):
+        m = 200
+        n = 100
+        for k in range(2):
+            a = random([m, n])
+            r, q = rq(a)
+            assert_array_almost_equal(q @ q.T, eye(n))
+            assert_array_almost_equal(r @ q, a)
+
+    def test_random_trap(self):
+        m = 100
+        n = 200
+        for k in range(2):
+            a = random([m, n])
+            r, q = rq(a)
+            assert_array_almost_equal(q @ q.T, eye(n))
+            assert_array_almost_equal(r @ q, a)
+
+    def test_random_trap_economic(self):
+        m = 100
+        n = 200
+        for k in range(2):
+            a = random([m, n])
+            r, q = rq(a, mode='economic')
+            assert_array_almost_equal(q @ q.T, eye(m))
+            assert_array_almost_equal(r @ q, a)
+            assert_equal(q.shape, (m, n))
+            assert_equal(r.shape, (m, m))
+
+    def test_random_complex(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])+1j*random([n, n])
+            r, q = rq(a)
+            assert_array_almost_equal(q @ q.conj().T, eye(n))
+            assert_array_almost_equal(r @ q, a)
+
+    def test_random_complex_economic(self):
+        m = 100
+        n = 200
+        for k in range(2):
+            a = random([m, n])+1j*random([m, n])
+            r, q = rq(a, mode='economic')
+            assert_array_almost_equal(q @ q.conj().T, eye(m))
+            assert_array_almost_equal(r @ q, a)
+            assert_equal(q.shape, (m, n))
+            assert_equal(r.shape, (m, m))
+
+    def test_check_finite(self):
+        a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
+        r, q = rq(a, check_finite=False)
+        assert_array_almost_equal(q @ q.T, eye(3))
+        assert_array_almost_equal(r @ q, a)
+
+
+class TestSchur:
+
+    def check_schur(self, a, t, u, rtol, atol):
+        # Check that the Schur decomposition is correct.
+        assert_allclose(u @ t @ u.conj().T, a, rtol=rtol, atol=atol,
+                        err_msg="Schur decomposition does not match 'a'")
+        # The expected value of u @ u.H - I is all zeros, so test
+        # with absolute tolerance only.
+        assert_allclose(u @ u.conj().T - np.eye(len(u)), 0, rtol=0, atol=atol,
+                        err_msg="u is not unitary")
+
+    def test_simple(self):
+        a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]]
+        t, z = schur(a)
+        self.check_schur(a, t, z, rtol=1e-14, atol=5e-15)
+        tc, zc = schur(a, 'complex')
+        assert_(np.any(ravel(iscomplex(zc))) and np.any(ravel(iscomplex(tc))))
+        self.check_schur(a, tc, zc, rtol=1e-14, atol=5e-15)
+        tc2, zc2 = rsf2csf(tc, zc)
+        self.check_schur(a, tc2, zc2, rtol=1e-14, atol=5e-15)
+
+    @pytest.mark.parametrize(
+        'sort, expected_diag',
+        [('lhp', [-np.sqrt(2), -0.5, np.sqrt(2), 0.5]),
+         ('rhp', [np.sqrt(2), 0.5, -np.sqrt(2), -0.5]),
+         ('iuc', [-0.5, 0.5, np.sqrt(2), -np.sqrt(2)]),
+         ('ouc', [np.sqrt(2), -np.sqrt(2), -0.5, 0.5]),
+         (lambda x: x >= 0.0, [np.sqrt(2), 0.5, -np.sqrt(2), -0.5])]
+    )
+    def test_sort(self, sort, expected_diag):
+        # The exact eigenvalues of this matrix are
+        #   -sqrt(2), sqrt(2), -1/2, 1/2.
+        a = [[4., 3., 1., -1.],
+             [-4.5, -3.5, -1., 1.],
+             [9., 6., -4., 4.5],
+             [6., 4., -3., 3.5]]
+        t, u, sdim = schur(a, sort=sort)
+        self.check_schur(a, t, u, rtol=1e-14, atol=5e-15)
+        assert_allclose(np.diag(t), expected_diag, rtol=1e-12)
+        assert_equal(2, sdim)
+
+    def test_sort_errors(self):
+        a = [[4., 3., 1., -1.],
+             [-4.5, -3.5, -1., 1.],
+             [9., 6., -4., 4.5],
+             [6., 4., -3., 3.5]]
+        assert_raises(ValueError, schur, a, sort='unsupported')
+        assert_raises(ValueError, schur, a, sort=1)
+
+    def test_check_finite(self):
+        a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]]
+        t, z = schur(a, check_finite=False)
+        assert_array_almost_equal(z @ t @ z.conj().T, a)
+
+
+class TestHessenberg:
+
+    def test_simple(self):
+        a = [[-149, -50, -154],
+             [537, 180, 546],
+             [-27, -9, -25]]
+        h1 = [[-149.0000, 42.2037, -156.3165],
+              [-537.6783, 152.5511, -554.9272],
+              [0, 0.0728, 2.4489]]
+        h, q = hessenberg(a, calc_q=1)
+        assert_array_almost_equal(q.T @ a @ q, h)
+        assert_array_almost_equal(h, h1, decimal=4)
+
+    def test_simple_complex(self):
+        a = [[-149, -50, -154],
+             [537, 180j, 546],
+             [-27j, -9, -25]]
+        h, q = hessenberg(a, calc_q=1)
+        assert_array_almost_equal(q.conj().T @ a @ q, h)
+
+    def test_simple2(self):
+        a = [[1, 2, 3, 4, 5, 6, 7],
+             [0, 2, 3, 4, 6, 7, 2],
+             [0, 2, 2, 3, 0, 3, 2],
+             [0, 0, 2, 8, 0, 0, 2],
+             [0, 3, 1, 2, 0, 1, 2],
+             [0, 1, 2, 3, 0, 1, 0],
+             [0, 0, 0, 0, 0, 1, 2]]
+        h, q = hessenberg(a, calc_q=1)
+        assert_array_almost_equal(q.T @ a @ q, h)
+
+    def test_simple3(self):
+        a = np.eye(3)
+        a[-1, 0] = 2
+        h, q = hessenberg(a, calc_q=1)
+        assert_array_almost_equal(q.T @ a @ q, h)
+
+    def test_random(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])
+            h, q = hessenberg(a, calc_q=1)
+            assert_array_almost_equal(q.T @ a @ q, h)
+
+    def test_random_complex(self):
+        n = 20
+        for k in range(2):
+            a = random([n, n])+1j*random([n, n])
+            h, q = hessenberg(a, calc_q=1)
+            assert_array_almost_equal(q.conj().T @ a @ q, h)
+
+    def test_check_finite(self):
+        a = [[-149, -50, -154],
+             [537, 180, 546],
+             [-27, -9, -25]]
+        h1 = [[-149.0000, 42.2037, -156.3165],
+              [-537.6783, 152.5511, -554.9272],
+              [0, 0.0728, 2.4489]]
+        h, q = hessenberg(a, calc_q=1, check_finite=False)
+        assert_array_almost_equal(q.T @ a @ q, h)
+        assert_array_almost_equal(h, h1, decimal=4)
+
+    def test_2x2(self):
+        a = [[2, 1], [7, 12]]
+
+        h, q = hessenberg(a, calc_q=1)
+        assert_array_almost_equal(q, np.eye(2))
+        assert_array_almost_equal(h, a)
+
+        b = [[2-7j, 1+2j], [7+3j, 12-2j]]
+        h2, q2 = hessenberg(b, calc_q=1)
+        assert_array_almost_equal(q2, np.eye(2))
+        assert_array_almost_equal(h2, b)
+
+
+class TestQZ:
+    def setup_method(self):
+        seed(12345)
+
+    @pytest.mark.xfail(sys.platform == 'darwin',
+        reason="gges[float32] broken for OpenBLAS on macOS, see gh-16949")
+    def test_qz_single(self):
+        n = 5
+        A = random([n, n]).astype(float32)
+        B = random([n, n]).astype(float32)
+        AA, BB, Q, Z = qz(A, B)
+        assert_array_almost_equal(Q @ AA @ Z.T, A, decimal=5)
+        assert_array_almost_equal(Q @ BB @ Z.T, B, decimal=5)
+        assert_array_almost_equal(Q @ Q.T, eye(n), decimal=5)
+        assert_array_almost_equal(Z @ Z.T, eye(n), decimal=5)
+        assert_(np.all(diag(BB) >= 0))
+
+    def test_qz_double(self):
+        n = 5
+        A = random([n, n])
+        B = random([n, n])
+        AA, BB, Q, Z = qz(A, B)
+        assert_array_almost_equal(Q @ AA @ Z.T, A)
+        assert_array_almost_equal(Q @ BB @ Z.T, B)
+        assert_array_almost_equal(Q @ Q.T, eye(n))
+        assert_array_almost_equal(Z @ Z.T, eye(n))
+        assert_(np.all(diag(BB) >= 0))
+
+    def test_qz_complex(self):
+        n = 5
+        A = random([n, n]) + 1j*random([n, n])
+        B = random([n, n]) + 1j*random([n, n])
+        AA, BB, Q, Z = qz(A, B)
+        assert_array_almost_equal(Q @ AA @ Z.conj().T, A)
+        assert_array_almost_equal(Q @ BB @ Z.conj().T, B)
+        assert_array_almost_equal(Q @ Q.conj().T, eye(n))
+        assert_array_almost_equal(Z @ Z.conj().T, eye(n))
+        assert_(np.all(diag(BB) >= 0))
+        assert_(np.all(diag(BB).imag == 0))
+
+    def test_qz_complex64(self):
+        n = 5
+        A = (random([n, n]) + 1j*random([n, n])).astype(complex64)
+        B = (random([n, n]) + 1j*random([n, n])).astype(complex64)
+        AA, BB, Q, Z = qz(A, B)
+        assert_array_almost_equal(Q @ AA @ Z.conj().T, A, decimal=5)
+        assert_array_almost_equal(Q @ BB @ Z.conj().T, B, decimal=5)
+        assert_array_almost_equal(Q @ Q.conj().T, eye(n), decimal=5)
+        assert_array_almost_equal(Z @ Z.conj().T, eye(n), decimal=5)
+        assert_(np.all(diag(BB) >= 0))
+        assert_(np.all(diag(BB).imag == 0))
+
+    def test_qz_double_complex(self):
+        n = 5
+        A = random([n, n])
+        B = random([n, n])
+        AA, BB, Q, Z = qz(A, B, output='complex')
+        aa = Q @ AA @ Z.conj().T
+        assert_array_almost_equal(aa.real, A)
+        assert_array_almost_equal(aa.imag, 0)
+        bb = Q @ BB @ Z.conj().T
+        assert_array_almost_equal(bb.real, B)
+        assert_array_almost_equal(bb.imag, 0)
+        assert_array_almost_equal(Q @ Q.conj().T, eye(n))
+        assert_array_almost_equal(Z @ Z.conj().T, eye(n))
+        assert_(np.all(diag(BB) >= 0))
+
+    def test_qz_double_sort(self):
+        # from https://www.nag.com/lapack-ex/node119.html
+        # NOTE: These matrices may be ill-conditioned and lead to a
+        # seg fault on certain python versions when compiled with
+        # sse2 or sse3 older ATLAS/LAPACK binaries for windows
+        # A =   np.array([[3.9,  12.5, -34.5,  -0.5],
+        #                [ 4.3,  21.5, -47.5,   7.5],
+        #                [ 4.3,  21.5, -43.5,   3.5],
+        #                [ 4.4,  26.0, -46.0,   6.0 ]])
+
+        # B = np.array([[ 1.0,   2.0,  -3.0,   1.0],
+        #              [1.0,   3.0,  -5.0,   4.0],
+        #              [1.0,   3.0,  -4.0,   3.0],
+        #              [1.0,   3.0,  -4.0,   4.0]])
+        A = np.array([[3.9, 12.5, -34.5, 2.5],
+                      [4.3, 21.5, -47.5, 7.5],
+                      [4.3, 1.5, -43.5, 3.5],
+                      [4.4, 6.0, -46.0, 6.0]])
+
+        B = np.array([[1.0, 1.0, -3.0, 1.0],
+                      [1.0, 3.0, -5.0, 4.4],
+                      [1.0, 2.0, -4.0, 1.0],
+                      [1.2, 3.0, -4.0, 4.0]])
+
+        assert_raises(ValueError, qz, A, B, sort=lambda ar, ai, beta: ai == 0)
+        if False:
+            AA, BB, Q, Z, sdim = qz(A, B, sort=lambda ar, ai, beta: ai == 0)
+            # assert_(sdim == 2)
+            assert_(sdim == 4)
+            assert_array_almost_equal(Q @ AA @ Z.T, A)
+            assert_array_almost_equal(Q @ BB @ Z.T, B)
+
+            # test absolute values bc the sign is ambiguous and
+            # might be platform dependent
+            assert_array_almost_equal(np.abs(AA), np.abs(np.array(
+                            [[35.7864, -80.9061, -12.0629, -9.498],
+                             [0., 2.7638, -2.3505, 7.3256],
+                             [0., 0., 0.6258, -0.0398],
+                             [0., 0., 0., -12.8217]])), 4)
+            assert_array_almost_equal(np.abs(BB), np.abs(np.array(
+                            [[4.5324, -8.7878, 3.2357, -3.5526],
+                             [0., 1.4314, -2.1894, 0.9709],
+                             [0., 0., 1.3126, -0.3468],
+                             [0., 0., 0., 0.559]])), 4)
+            assert_array_almost_equal(np.abs(Q), np.abs(np.array(
+                            [[-0.4193, -0.605, -0.1894, -0.6498],
+                             [-0.5495, 0.6987, 0.2654, -0.3734],
+                             [-0.4973, -0.3682, 0.6194, 0.4832],
+                             [-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
+            assert_array_almost_equal(np.abs(Z), np.abs(np.array(
+                            [[-0.9471, -0.2971, -0.1217, 0.0055],
+                             [-0.0367, 0.1209, 0.0358, 0.9913],
+                             [0.3171, -0.9041, -0.2547, 0.1312],
+                             [0.0346, 0.2824, -0.9587, 0.0014]])), 4)
+
+        # test absolute values bc the sign is ambiguous and might be platform
+        # dependent
+        # assert_array_almost_equal(abs(AA), abs(np.array([
+        #                [3.8009, -69.4505, 50.3135, -43.2884],
+        #                [0.0000, 9.2033, -0.2001, 5.9881],
+        #                [0.0000, 0.0000, 1.4279, 4.4453],
+        #                [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
+        # assert_array_almost_equal(abs(BB), abs(np.array([
+        #                [1.9005, -10.2285, 0.8658, -5.2134],
+        #                [0.0000,   2.3008, 0.7915,  0.4262],
+        #                [0.0000,   0.0000, 0.8101,  0.0000],
+        #                [0.0000,   0.0000, 0.0000, -0.2823]])), 4)
+        # assert_array_almost_equal(abs(Q), abs(np.array([
+        #                [0.4642,  0.7886,  0.2915, -0.2786],
+        #                [0.5002, -0.5986,  0.5638, -0.2713],
+        #                [0.5002,  0.0154, -0.0107,  0.8657],
+        #                [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
+        # assert_array_almost_equal(dot(Q,Q.T), eye(4))
+        # assert_array_almost_equal(abs(Z), abs(np.array([
+        #                [0.9961, -0.0014,  0.0887, -0.0026],
+        #                [0.0057, -0.0404, -0.0938, -0.9948],
+        #                [0.0626,  0.7194, -0.6908,  0.0363],
+        #                [0.0626, -0.6934, -0.7114,  0.0956]])), 4)
+        # assert_array_almost_equal(dot(Z,Z.T), eye(4))
+
+    # def test_qz_complex_sort(self):
+    #    cA = np.array([
+    #   [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+  0.50*1j],
+    #   [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
+    #   [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
+    #   [ 5.50+  4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
+
+    #    cB =  np.array([
+    #   [1.00+ -5.00*1j, 1.60+  1.20*1j,-3.00+  0.00*1j, 0.00+ -1.00*1j],
+    #   [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+  3.00*1j,-2.40+ -3.20*1j],
+    #   [1.00+  0.00*1j, 2.40+  1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
+    #   [0.00+  1.00*1j,-1.80+  2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
+
+    #    AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
+
+    #    eigenvalues = diag(AAS)/diag(BBS)
+    #    assert_(np.all(np.real(eigenvalues[:sdim] < 0)))
+    #    assert_(np.all(np.real(eigenvalues[sdim:] > 0)))
+
+    def test_check_finite(self):
+        n = 5
+        A = random([n, n])
+        B = random([n, n])
+        AA, BB, Q, Z = qz(A, B, check_finite=False)
+        assert_array_almost_equal(Q @ AA @ Z.T, A)
+        assert_array_almost_equal(Q @ BB @ Z.T, B)
+        assert_array_almost_equal(Q @ Q.T, eye(n))
+        assert_array_almost_equal(Z @ Z.T, eye(n))
+        assert_(np.all(diag(BB) >= 0))
+
+
+def _make_pos(X):
+    # the decompositions can have different signs than verified results
+    return np.sign(X)*X
+
+
+class TestOrdQZ:
+    @classmethod
+    def setup_class(cls):
+        # https://www.nag.com/lapack-ex/node119.html
+        A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
+                        7.5 + 0.5j],
+                       [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
+                        -10.5 - 1.5j],
+                       [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
+                        -7.5 - 3.5j],
+                       [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
+                        -19.0 - 32.5j]])
+
+        B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
+                       [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
+                       [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
+                       [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
+
+        # https://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
+        A2 = np.array([[3.9, 12.5, -34.5, -0.5],
+                       [4.3, 21.5, -47.5, 7.5],
+                       [4.3, 21.5, -43.5, 3.5],
+                       [4.4, 26.0, -46.0, 6.0]])
+
+        B2 = np.array([[1, 2, -3, 1],
+                       [1, 3, -5, 4],
+                       [1, 3, -4, 3],
+                       [1, 3, -4, 4]])
+
+        # example with the eigenvalues
+        # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
+        # 0.61244091
+        # thus featuring:
+        #  * one complex conjugate eigenvalue pair,
+        #  * one eigenvalue in the lhp
+        #  * 2 eigenvalues in the unit circle
+        #  * 2 non-real eigenvalues
+        A3 = np.array([[5., 1., 3., 3.],
+                       [4., 4., 2., 7.],
+                       [7., 4., 1., 3.],
+                       [0., 4., 8., 7.]])
+        B3 = np.array([[8., 10., 6., 10.],
+                       [7., 7., 2., 9.],
+                       [9., 1., 6., 6.],
+                       [5., 1., 4., 7.]])
+
+        # example with infinite eigenvalues
+        A4 = np.eye(2)
+        B4 = np.diag([0, 1])
+
+        # example with (alpha, beta) = (0, 0)
+        A5 = np.diag([1, 0])
+
+        cls.A = [A1, A2, A3, A4, A5]
+        cls.B = [B1, B2, B3, B4, A5]
+
+    def qz_decomp(self, sort):
+        with np.errstate(all='raise'):
+            ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)]
+        return tuple(ret)
+
+    def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
+        Id = np.eye(*A.shape)
+        # make sure Q and Z are orthogonal
+        assert_array_almost_equal(Q @ Q.T.conj(), Id)
+        assert_array_almost_equal(Z @ Z.T.conj(), Id)
+        # check factorization
+        assert_array_almost_equal(Q @ AA, A @ Z)
+        assert_array_almost_equal(Q @ BB, B @ Z)
+        # check shape of AA and BB
+        assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
+        assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
+        # check eigenvalues
+        for i in range(A.shape[0]):
+            # does the current diagonal element belong to a 2-by-2 block
+            # that was already checked?
+            if i > 0 and A[i, i - 1] != 0:
+                continue
+            # take care of 2-by-2 blocks
+            if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
+                evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
+                # make sure the pair of complex conjugate eigenvalues
+                # is ordered consistently (positive imaginary part first)
+                if evals[0].imag < 0:
+                    evals = evals[[1, 0]]
+                tmp = alpha[i:i + 2]/beta[i:i + 2]
+                if tmp[0].imag < 0:
+                    tmp = tmp[[1, 0]]
+                assert_array_almost_equal(evals, tmp)
+            else:
+                if alpha[i] == 0 and beta[i] == 0:
+                    assert_equal(AA[i, i], 0)
+                    assert_equal(BB[i, i], 0)
+                elif beta[i] == 0:
+                    assert_equal(BB[i, i], 0)
+                else:
+                    assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
+        sortfun = _select_function(sort)
+        lastsort = True
+        for i in range(A.shape[0]):
+            cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]]))
+            # once the sorting criterion was not matched all subsequent
+            # eigenvalues also shouldn't match
+            if not lastsort:
+                assert not cursort
+            lastsort = cursort
+
+    def check_all(self, sort):
+        ret = self.qz_decomp(sort)
+
+        for reti, Ai, Bi in zip(ret, self.A, self.B):
+            self.check(Ai, Bi, sort, *reti)
+
+    def test_lhp(self):
+        self.check_all('lhp')
+
+    def test_rhp(self):
+        self.check_all('rhp')
+
+    def test_iuc(self):
+        self.check_all('iuc')
+
+    def test_ouc(self):
+        self.check_all('ouc')
+
+    def test_ref(self):
+        # real eigenvalues first (top-left corner)
+        def sort(x, y):
+            out = np.empty_like(x, dtype=bool)
+            nonzero = (y != 0)
+            out[~nonzero] = False
+            out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0
+            return out
+
+        self.check_all(sort)
+
+    def test_cef(self):
+        # complex eigenvalues first (top-left corner)
+        def sort(x, y):
+            out = np.empty_like(x, dtype=bool)
+            nonzero = (y != 0)
+            out[~nonzero] = False
+            out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0
+            return out
+
+        self.check_all(sort)
+
+    def test_diff_input_types(self):
+        ret = ordqz(self.A[1], self.B[2], sort='lhp')
+        self.check(self.A[1], self.B[2], 'lhp', *ret)
+
+        ret = ordqz(self.B[2], self.A[1], sort='lhp')
+        self.check(self.B[2], self.A[1], 'lhp', *ret)
+
+    def test_sort_explicit(self):
+        # Test order of the eigenvalues in the 2 x 2 case where we can
+        # explicitly compute the solution
+        A1 = np.eye(2)
+        B1 = np.diag([-2, 0.5])
+        expected1 = [('lhp', [-0.5, 2]),
+                     ('rhp', [2, -0.5]),
+                     ('iuc', [-0.5, 2]),
+                     ('ouc', [2, -0.5])]
+        A2 = np.eye(2)
+        B2 = np.diag([-2 + 1j, 0.5 + 0.5j])
+        expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
+                     ('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]),
+                     ('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
+                     ('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])]
+        # 'lhp' is ambiguous so don't test it
+        A3 = np.eye(2)
+        B3 = np.diag([2, 0])
+        expected3 = [('rhp', [0.5, np.inf]),
+                     ('iuc', [0.5, np.inf]),
+                     ('ouc', [np.inf, 0.5])]
+        # 'rhp' is ambiguous so don't test it
+        A4 = np.eye(2)
+        B4 = np.diag([-2, 0])
+        expected4 = [('lhp', [-0.5, np.inf]),
+                     ('iuc', [-0.5, np.inf]),
+                     ('ouc', [np.inf, -0.5])]
+        A5 = np.diag([0, 1])
+        B5 = np.diag([0, 0.5])
+        # 'lhp' and 'iuc' are ambiguous so don't test them
+        expected5 = [('rhp', [2, np.nan]),
+                     ('ouc', [2, np.nan])]
+
+        A = [A1, A2, A3, A4, A5]
+        B = [B1, B2, B3, B4, B5]
+        expected = [expected1, expected2, expected3, expected4, expected5]
+        for Ai, Bi, expectedi in zip(A, B, expected):
+            for sortstr, expected_eigvals in expectedi:
+                _, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr)
+                azero = (alpha == 0)
+                bzero = (beta == 0)
+                x = np.empty_like(alpha)
+                x[azero & bzero] = np.nan
+                x[~azero & bzero] = np.inf
+                x[~bzero] = alpha[~bzero]/beta[~bzero]
+                assert_allclose(expected_eigvals, x)
+
+
+class TestOrdQZWorkspaceSize:
+
+    def setup_method(self):
+        seed(12345)
+
+    def test_decompose(self):
+
+        N = 202
+
+        # raises error if lwork parameter to dtrsen is too small
+        for ddtype in [np.float32, np.float64]:
+            A = random((N, N)).astype(ddtype)
+            B = random((N, N)).astype(ddtype)
+            # sort = lambda ar, ai, b: ar**2 + ai**2 < b**2
+            _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta,
+                      output='real')
+
+        for ddtype in [np.complex128, np.complex64]:
+            A = random((N, N)).astype(ddtype)
+            B = random((N, N)).astype(ddtype)
+            _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta,
+                      output='complex')
+
+    @pytest.mark.slow
+    def test_decompose_ouc(self):
+
+        N = 202
+
+        # segfaults if lwork parameter to dtrsen is too small
+        for ddtype in [np.float32, np.float64, np.complex128, np.complex64]:
+            A = random((N, N)).astype(ddtype)
+            B = random((N, N)).astype(ddtype)
+            S, T, alpha, beta, U, V = ordqz(A, B, sort='ouc')
+
+
+class TestDatacopied:
+
+    def test_datacopied(self):
+        from scipy.linalg._decomp import _datacopied
+
+        M = matrix([[0, 1], [2, 3]])
+        A = asarray(M)
+        L = M.tolist()
+        M2 = M.copy()
+
+        class Fake1:
+            def __array__(self):
+                return A
+
+        class Fake2:
+            __array_interface__ = A.__array_interface__
+
+        F1 = Fake1()
+        F2 = Fake2()
+
+        for item, status in [(M, False), (A, False), (L, True),
+                             (M2, False), (F1, False), (F2, False)]:
+            arr = asarray(item)
+            assert_equal(_datacopied(arr, item), status,
+                         err_msg=repr(item))
+
+
+def test_aligned_mem_float():
+    """Check linalg works with non-aligned memory (float32)"""
+    # Allocate 402 bytes of memory (allocated on boundary)
+    a = arange(402, dtype=np.uint8)
+
+    # Create an array with boundary offset 4
+    z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
+    z.shape = 10, 10
+
+    eig(z, overwrite_a=True)
+    eig(z.T, overwrite_a=True)
+
+
+@pytest.mark.skipif(platform.machine() == 'ppc64le',
+                    reason="crashes on ppc64le")
+def test_aligned_mem():
+    """Check linalg works with non-aligned memory (float64)"""
+    # Allocate 804 bytes of memory (allocated on boundary)
+    a = arange(804, dtype=np.uint8)
+
+    # Create an array with boundary offset 4
+    z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
+    z.shape = 10, 10
+
+    eig(z, overwrite_a=True)
+    eig(z.T, overwrite_a=True)
+
+
+def test_aligned_mem_complex():
+    """Check that complex objects don't need to be completely aligned"""
+    # Allocate 1608 bytes of memory (allocated on boundary)
+    a = zeros(1608, dtype=np.uint8)
+
+    # Create an array with boundary offset 8
+    z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
+    z.shape = 10, 10
+
+    eig(z, overwrite_a=True)
+    # This does not need special handling
+    eig(z.T, overwrite_a=True)
+
+
+def check_lapack_misaligned(func, args, kwargs):
+    args = list(args)
+    for i in range(len(args)):
+        a = args[:]
+        if isinstance(a[i], np.ndarray):
+            # Try misaligning a[i]
+            aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
+            aa = np.frombuffer(aa.data, offset=4, count=a[i].size,
+                               dtype=a[i].dtype)
+            aa.shape = a[i].shape
+            aa[...] = a[i]
+            a[i] = aa
+            func(*a, **kwargs)
+            if len(a[i].shape) > 1:
+                a[i] = a[i].T
+                func(*a, **kwargs)
+
+
+@pytest.mark.xfail(run=False,
+                   reason="Ticket #1152, triggers a segfault in rare cases.")
+def test_lapack_misaligned():
+    M = np.eye(10, dtype=float)
+    R = np.arange(100)
+    R.shape = 10, 10
+    S = np.arange(20000, dtype=np.uint8)
+    S = np.frombuffer(S.data, offset=4, count=100, dtype=float)
+    S.shape = 10, 10
+    b = np.ones(10)
+    LU, piv = lu_factor(S)
+    for (func, args, kwargs) in [
+            (eig, (S,), dict(overwrite_a=True)),  # crash
+            (eigvals, (S,), dict(overwrite_a=True)),  # no crash
+            (lu, (S,), dict(overwrite_a=True)),  # no crash
+            (lu_factor, (S,), dict(overwrite_a=True)),  # no crash
+            (lu_solve, ((LU, piv), b), dict(overwrite_b=True)),
+            (solve, (S, b), dict(overwrite_a=True, overwrite_b=True)),
+            (svd, (M,), dict(overwrite_a=True)),  # no crash
+            (svd, (R,), dict(overwrite_a=True)),  # no crash
+            (svd, (S,), dict(overwrite_a=True)),  # crash
+            (svdvals, (S,), dict()),  # no crash
+            (svdvals, (S,), dict(overwrite_a=True)),  # crash
+            (cholesky, (M,), dict(overwrite_a=True)),  # no crash
+            (qr, (S,), dict(overwrite_a=True)),  # crash
+            (rq, (S,), dict(overwrite_a=True)),  # crash
+            (hessenberg, (S,), dict(overwrite_a=True)),  # crash
+            (schur, (S,), dict(overwrite_a=True)),  # crash
+            ]:
+        check_lapack_misaligned(func, args, kwargs)
+# not properly tested
+# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
+
+
+class TestOverwrite:
+    def test_eig(self):
+        assert_no_overwrite(eig, [(3, 3)])
+        assert_no_overwrite(eig, [(3, 3), (3, 3)])
+
+    def test_eigh(self):
+        assert_no_overwrite(eigh, [(3, 3)])
+        assert_no_overwrite(eigh, [(3, 3), (3, 3)])
+
+    def test_eig_banded(self):
+        assert_no_overwrite(eig_banded, [(3, 2)])
+
+    def test_eigvals(self):
+        assert_no_overwrite(eigvals, [(3, 3)])
+
+    def test_eigvalsh(self):
+        assert_no_overwrite(eigvalsh, [(3, 3)])
+
+    def test_eigvals_banded(self):
+        assert_no_overwrite(eigvals_banded, [(3, 2)])
+
+    def test_hessenberg(self):
+        assert_no_overwrite(hessenberg, [(3, 3)])
+
+    def test_lu_factor(self):
+        assert_no_overwrite(lu_factor, [(3, 3)])
+
+    def test_lu_solve(self):
+        x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 8]])
+        xlu = lu_factor(x)
+        assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
+
+    def test_lu(self):
+        assert_no_overwrite(lu, [(3, 3)])
+
+    def test_qr(self):
+        assert_no_overwrite(qr, [(3, 3)])
+
+    def test_rq(self):
+        assert_no_overwrite(rq, [(3, 3)])
+
+    def test_schur(self):
+        assert_no_overwrite(schur, [(3, 3)])
+
+    def test_schur_complex(self):
+        assert_no_overwrite(lambda a: schur(a, 'complex'), [(3, 3)],
+                            dtypes=[np.float32, np.float64])
+
+    def test_svd(self):
+        assert_no_overwrite(svd, [(3, 3)])
+        assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3, 3)])
+
+    def test_svdvals(self):
+        assert_no_overwrite(svdvals, [(3, 3)])
+
+
+def _check_orth(n, dtype, skip_big=False):
+    X = np.ones((n, 2), dtype=float).astype(dtype)
+
+    eps = np.finfo(dtype).eps
+    tol = 1000 * eps
+
+    Y = orth(X)
+    assert_equal(Y.shape, (n, 1))
+    assert_allclose(Y, Y.mean(), atol=tol)
+
+    Y = orth(X.T)
+    assert_equal(Y.shape, (2, 1))
+    assert_allclose(Y, Y.mean(), atol=tol)
+
+    if n > 5 and not skip_big:
+        np.random.seed(1)
+        X = np.random.rand(n, 5) @ np.random.rand(5, n)
+        X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n)
+        X = X.astype(dtype)
+
+        Y = orth(X, rcond=1e-3)
+        assert_equal(Y.shape, (n, 5))
+
+        Y = orth(X, rcond=1e-6)
+        assert_equal(Y.shape, (n, 5 + 1))
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
+                    reason="test only on 64-bit, else too slow")
+def test_orth_memory_efficiency():
+    # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.
+    # Keep in mind that @pytest.mark.slow tests are likely to be running
+    # under configurations that support 4Gb+ memory for tests related to
+    # 32 bit overflow.
+    n = 10*1000*1000
+    try:
+        _check_orth(n, np.float64, skip_big=True)
+    except MemoryError as e:
+        raise AssertionError(
+            'memory error perhaps caused by orth regression'
+        ) from e
+
+
+def test_orth():
+    dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+    sizes = [1, 2, 3, 10, 100]
+    for dt, n in itertools.product(dtypes, sizes):
+        _check_orth(n, dt)
+
+
+def test_null_space():
+    np.random.seed(1)
+
+    dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+    sizes = [1, 2, 3, 10, 100]
+
+    for dt, n in itertools.product(dtypes, sizes):
+        X = np.ones((2, n), dtype=dt)
+
+        eps = np.finfo(dt).eps
+        tol = 1000 * eps
+
+        Y = null_space(X)
+        assert_equal(Y.shape, (n, n-1))
+        assert_allclose(X @ Y, 0, atol=tol)
+
+        Y = null_space(X.T)
+        assert_equal(Y.shape, (2, 1))
+        assert_allclose(X.T @ Y, 0, atol=tol)
+
+        X = np.random.randn(1 + n//2, n)
+        Y = null_space(X)
+        assert_equal(Y.shape, (n, n - 1 - n//2))
+        assert_allclose(X @ Y, 0, atol=tol)
+
+        if n > 5:
+            np.random.seed(1)
+            X = np.random.rand(n, 5) @ np.random.rand(5, n)
+            X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n)
+            X = X.astype(dt)
+
+            Y = null_space(X, rcond=1e-3)
+            assert_equal(Y.shape, (n, n - 5))
+
+            Y = null_space(X, rcond=1e-6)
+            assert_equal(Y.shape, (n, n - 6))
+
+
+def test_subspace_angles():
+    H = hadamard(8, float)
+    A = H[:, :3]
+    B = H[:, 3:]
+    assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14)
+    assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14)
+    for x in (A, B):
+        assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]),
+                        atol=1e-14)
+    # From MATLAB function "subspace", which effectively only returns the
+    # last value that we calculate
+    x = np.array(
+        [[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106],  # noqa: E501
+         [1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656],  # noqa: E501
+         [-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096],  # noqa: E501
+         [0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]])  # noqa: E501
+    expected = 1.481454682101605
+    assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected,
+                    rtol=1e-12)
+    assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected,
+                    rtol=1e-12)
+    expected = 0.746361174247302
+    assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12)
+    assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12)
+    expected = 0.487163718534313
+    assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12)
+    assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12)
+    expected = 0.328950515907756
+    assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0],
+                    atol=1e-12)
+    # Degenerate conditions
+    assert_raises(ValueError, subspace_angles, x[0], x)
+    assert_raises(ValueError, subspace_angles, x, x[0])
+    assert_raises(ValueError, subspace_angles, x[:-1], x)
+
+    # Test branch if mask.any is True:
+    A = np.array([[1, 0, 0],
+                  [0, 1, 0],
+                  [0, 0, 1],
+                  [0, 0, 0],
+                  [0, 0, 0]])
+    B = np.array([[1, 0, 0],
+                  [0, 1, 0],
+                  [0, 0, 0],
+                  [0, 0, 0],
+                  [0, 0, 1]])
+    expected = np.array([np.pi/2, 0, 0])
+    assert_allclose(subspace_angles(A, B), expected, rtol=1e-12)
+
+    # Complex
+    # second column in "b" does not affect result, just there so that
+    # b can have more cols than a, and vice-versa (both conditional code paths)
+    a = [[1 + 1j], [0]]
+    b = [[1 - 1j, 0], [0, 1]]
+    assert_allclose(subspace_angles(a, b), 0., atol=1e-14)
+    assert_allclose(subspace_angles(b, a), 0., atol=1e-14)
+
+
+class TestCDF2RDF:
+
+    def matmul(self, a, b):
+        return np.einsum('...ij,...jk->...ik', a, b)
+
+    def assert_eig_valid(self, w, v, x):
+        assert_array_almost_equal(
+            self.matmul(v, w),
+            self.matmul(x, v)
+        )
+
+    def test_single_array0x0real(self):
+        # eig doesn't support 0x0 in old versions of numpy
+        X = np.empty((0, 0))
+        w, v = np.empty(0), np.empty((0, 0))
+        wr, vr = cdf2rdf(w, v)
+        self.assert_eig_valid(wr, vr, X)
+
+    def test_single_array2x2_real(self):
+        X = np.array([[1, 2], [3, -1]])
+        w, v = np.linalg.eig(X)
+        wr, vr = cdf2rdf(w, v)
+        self.assert_eig_valid(wr, vr, X)
+
+    def test_single_array2x2_complex(self):
+        X = np.array([[1, 2], [-2, 1]])
+        w, v = np.linalg.eig(X)
+        wr, vr = cdf2rdf(w, v)
+        self.assert_eig_valid(wr, vr, X)
+
+    def test_single_array3x3_real(self):
+        X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]])
+        w, v = np.linalg.eig(X)
+        wr, vr = cdf2rdf(w, v)
+        self.assert_eig_valid(wr, vr, X)
+
+    def test_single_array3x3_complex(self):
+        X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
+        w, v = np.linalg.eig(X)
+        wr, vr = cdf2rdf(w, v)
+        self.assert_eig_valid(wr, vr, X)
+
+    def test_random_1d_stacked_arrays(self):
+        # cannot test M == 0 due to bug in old numpy
+        for M in range(1, 7):
+            np.random.seed(999999999)
+            X = np.random.rand(100, M, M)
+            w, v = np.linalg.eig(X)
+            wr, vr = cdf2rdf(w, v)
+            self.assert_eig_valid(wr, vr, X)
+
+    def test_random_2d_stacked_arrays(self):
+        # cannot test M == 0 due to bug in old numpy
+        for M in range(1, 7):
+            X = np.random.rand(10, 10, M, M)
+            w, v = np.linalg.eig(X)
+            wr, vr = cdf2rdf(w, v)
+            self.assert_eig_valid(wr, vr, X)
+
+    def test_low_dimensionality_error(self):
+        w, v = np.empty(()), np.array((2,))
+        assert_raises(ValueError, cdf2rdf, w, v)
+
+    def test_not_square_error(self):
+        # Check that passing a non-square array raises a ValueError.
+        w, v = np.arange(3), np.arange(6).reshape(3, 2)
+        assert_raises(ValueError, cdf2rdf, w, v)
+
+    def test_swapped_v_w_error(self):
+        # Check that exchanging places of w and v raises ValueError.
+        X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]])
+        w, v = np.linalg.eig(X)
+        assert_raises(ValueError, cdf2rdf, v, w)
+
+    def test_non_associated_error(self):
+        # Check that passing non-associated eigenvectors raises a ValueError.
+        w, v = np.arange(3), np.arange(16).reshape(4, 4)
+        assert_raises(ValueError, cdf2rdf, w, v)
+
+    def test_not_conjugate_pairs(self):
+        # Check that passing non-conjugate pairs raises a ValueError.
+        X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]])
+        w, v = np.linalg.eig(X)
+        assert_raises(ValueError, cdf2rdf, w, v)
+
+        # different arrays in the stack, so not conjugate
+        X = np.array([
+            [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]],
+            [[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]],
+        ])
+        w, v = np.linalg.eig(X)
+        assert_raises(ValueError, cdf2rdf, w, v)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cholesky.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cholesky.py
new file mode 100644
index 00000000..db630bde
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cholesky.py
@@ -0,0 +1,202 @@
+from numpy.testing import assert_array_almost_equal, assert_array_equal
+from pytest import raises as assert_raises
+
+from numpy import array, transpose, dot, conjugate, zeros_like, empty
+from numpy.random import random
+from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \
+     cho_factor, cho_solve
+
+from scipy.linalg._testutils import assert_no_overwrite
+
+
+class TestCholesky:
+
+    def test_simple(self):
+        a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
+        c = cholesky(a)
+        assert_array_almost_equal(dot(transpose(c), c), a)
+        c = transpose(c)
+        a = dot(c, transpose(c))
+        assert_array_almost_equal(cholesky(a, lower=1), c)
+
+    def test_check_finite(self):
+        a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
+        c = cholesky(a, check_finite=False)
+        assert_array_almost_equal(dot(transpose(c), c), a)
+        c = transpose(c)
+        a = dot(c, transpose(c))
+        assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c)
+
+    def test_simple_complex(self):
+        m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]])
+        a = dot(transpose(conjugate(m)), m)
+        c = cholesky(a)
+        a1 = dot(transpose(conjugate(c)), c)
+        assert_array_almost_equal(a, a1)
+        c = transpose(c)
+        a = dot(c, transpose(conjugate(c)))
+        assert_array_almost_equal(cholesky(a, lower=1), c)
+
+    def test_random(self):
+        n = 20
+        for k in range(2):
+            m = random([n, n])
+            for i in range(n):
+                m[i, i] = 20*(.1+m[i, i])
+            a = dot(transpose(m), m)
+            c = cholesky(a)
+            a1 = dot(transpose(c), c)
+            assert_array_almost_equal(a, a1)
+            c = transpose(c)
+            a = dot(c, transpose(c))
+            assert_array_almost_equal(cholesky(a, lower=1), c)
+
+    def test_random_complex(self):
+        n = 20
+        for k in range(2):
+            m = random([n, n])+1j*random([n, n])
+            for i in range(n):
+                m[i, i] = 20*(.1+abs(m[i, i]))
+            a = dot(transpose(conjugate(m)), m)
+            c = cholesky(a)
+            a1 = dot(transpose(conjugate(c)), c)
+            assert_array_almost_equal(a, a1)
+            c = transpose(c)
+            a = dot(c, transpose(conjugate(c)))
+            assert_array_almost_equal(cholesky(a, lower=1), c)
+
+
+class TestCholeskyBanded:
+    """Tests for cholesky_banded() and cho_solve_banded."""
+
+    def test_check_finite(self):
+        # Symmetric positive definite banded matrix `a`
+        a = array([[4.0, 1.0, 0.0, 0.0],
+                   [1.0, 4.0, 0.5, 0.0],
+                   [0.0, 0.5, 4.0, 0.2],
+                   [0.0, 0.0, 0.2, 4.0]])
+        # Banded storage form of `a`.
+        ab = array([[-1.0, 1.0, 0.5, 0.2],
+                    [4.0, 4.0, 4.0, 4.0]])
+        c = cholesky_banded(ab, lower=False, check_finite=False)
+        ufac = zeros_like(a)
+        ufac[list(range(4)), list(range(4))] = c[-1]
+        ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
+        assert_array_almost_equal(a, dot(ufac.T, ufac))
+
+        b = array([0.0, 0.5, 4.2, 4.2])
+        x = cho_solve_banded((c, False), b, check_finite=False)
+        assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+    def test_upper_real(self):
+        # Symmetric positive definite banded matrix `a`
+        a = array([[4.0, 1.0, 0.0, 0.0],
+                   [1.0, 4.0, 0.5, 0.0],
+                   [0.0, 0.5, 4.0, 0.2],
+                   [0.0, 0.0, 0.2, 4.0]])
+        # Banded storage form of `a`.
+        ab = array([[-1.0, 1.0, 0.5, 0.2],
+                    [4.0, 4.0, 4.0, 4.0]])
+        c = cholesky_banded(ab, lower=False)
+        ufac = zeros_like(a)
+        ufac[list(range(4)), list(range(4))] = c[-1]
+        ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
+        assert_array_almost_equal(a, dot(ufac.T, ufac))
+
+        b = array([0.0, 0.5, 4.2, 4.2])
+        x = cho_solve_banded((c, False), b)
+        assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+    def test_upper_complex(self):
+        # Hermitian positive definite banded matrix `a`
+        a = array([[4.0, 1.0, 0.0, 0.0],
+                   [1.0, 4.0, 0.5, 0.0],
+                   [0.0, 0.5, 4.0, -0.2j],
+                   [0.0, 0.0, 0.2j, 4.0]])
+        # Banded storage form of `a`.
+        ab = array([[-1.0, 1.0, 0.5, -0.2j],
+                    [4.0, 4.0, 4.0, 4.0]])
+        c = cholesky_banded(ab, lower=False)
+        ufac = zeros_like(a)
+        ufac[list(range(4)), list(range(4))] = c[-1]
+        ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
+        assert_array_almost_equal(a, dot(ufac.conj().T, ufac))
+
+        b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0])
+        x = cho_solve_banded((c, False), b)
+        assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+    def test_lower_real(self):
+        # Symmetric positive definite banded matrix `a`
+        a = array([[4.0, 1.0, 0.0, 0.0],
+                   [1.0, 4.0, 0.5, 0.0],
+                   [0.0, 0.5, 4.0, 0.2],
+                   [0.0, 0.0, 0.2, 4.0]])
+        # Banded storage form of `a`.
+        ab = array([[4.0, 4.0, 4.0, 4.0],
+                    [1.0, 0.5, 0.2, -1.0]])
+        c = cholesky_banded(ab, lower=True)
+        lfac = zeros_like(a)
+        lfac[list(range(4)), list(range(4))] = c[0]
+        lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
+        assert_array_almost_equal(a, dot(lfac, lfac.T))
+
+        b = array([0.0, 0.5, 4.2, 4.2])
+        x = cho_solve_banded((c, True), b)
+        assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
+
+    def test_lower_complex(self):
+        # Hermitian positive definite banded matrix `a`
+        a = array([[4.0, 1.0, 0.0, 0.0],
+                   [1.0, 4.0, 0.5, 0.0],
+                   [0.0, 0.5, 4.0, -0.2j],
+                   [0.0, 0.0, 0.2j, 4.0]])
+        # Banded storage form of `a`.
+        ab = array([[4.0, 4.0, 4.0, 4.0],
+                    [1.0, 0.5, 0.2j, -1.0]])
+        c = cholesky_banded(ab, lower=True)
+        lfac = zeros_like(a)
+        lfac[list(range(4)), list(range(4))] = c[0]
+        lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
+        assert_array_almost_equal(a, dot(lfac, lfac.conj().T))
+
+        b = array([0.0, 0.5j, 3.8j, 3.8])
+        x = cho_solve_banded((c, True), b)
+        assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0])
+
+
+class TestOverwrite:
+    def test_cholesky(self):
+        assert_no_overwrite(cholesky, [(3, 3)])
+
+    def test_cho_factor(self):
+        assert_no_overwrite(cho_factor, [(3, 3)])
+
+    def test_cho_solve(self):
+        x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])
+        xcho = cho_factor(x)
+        assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)])
+
+    def test_cholesky_banded(self):
+        assert_no_overwrite(cholesky_banded, [(2, 3)])
+
+    def test_cho_solve_banded(self):
+        x = array([[0, -1, -1], [2, 2, 2]])
+        xcho = cholesky_banded(x)
+        assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b),
+                            [(3,)])
+
+
+class TestEmptyArray:
+    def test_cho_factor_empty_square(self):
+        a = empty((0, 0))
+        b = array([])
+        c = array([[]])
+        d = []
+        e = [[]]
+
+        x, _ = cho_factor(a)
+        assert_array_equal(x, a)
+
+        for x in ([b, c, d, e]):
+            assert_raises(ValueError, cho_factor, x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cossin.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cossin.py
new file mode 100644
index 00000000..56a908a1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_cossin.py
@@ -0,0 +1,155 @@
+import pytest
+import numpy as np
+from numpy.random import seed
+from numpy.testing import assert_allclose
+
+from scipy.linalg.lapack import _compute_lwork
+from scipy.stats import ortho_group, unitary_group
+from scipy.linalg import cossin, get_lapack_funcs
+
+REAL_DTYPES = (np.float32, np.float64)
+COMPLEX_DTYPES = (np.complex64, np.complex128)
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+@pytest.mark.parametrize('dtype_', DTYPES)
+@pytest.mark.parametrize('m, p, q',
+                         [
+                             (2, 1, 1),
+                             (3, 2, 1),
+                             (3, 1, 2),
+                             (4, 2, 2),
+                             (4, 1, 2),
+                             (40, 12, 20),
+                             (40, 30, 1),
+                             (40, 1, 30),
+                             (100, 50, 1),
+                             (100, 50, 50),
+                         ])
+@pytest.mark.parametrize('swap_sign', [True, False])
+def test_cossin(dtype_, m, p, q, swap_sign):
+    seed(1234)
+    if dtype_ in COMPLEX_DTYPES:
+        x = np.array(unitary_group.rvs(m), dtype=dtype_)
+    else:
+        x = np.array(ortho_group.rvs(m), dtype=dtype_)
+
+    u, cs, vh = cossin(x, p, q,
+                       swap_sign=swap_sign)
+    assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
+    assert u.dtype == dtype_
+    # Test for float32 or float 64
+    assert cs.dtype == np.real(u).dtype
+    assert vh.dtype == dtype_
+
+    u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]],
+                       swap_sign=swap_sign)
+    assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
+    assert u.dtype == dtype_
+    assert cs.dtype == np.real(u).dtype
+    assert vh.dtype == dtype_
+
+    _, cs2, vh2 = cossin(x, p, q,
+                         compute_u=False,
+                         swap_sign=swap_sign)
+    assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
+    assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps)
+
+    u2, cs2, _ = cossin(x, p, q,
+                        compute_vh=False,
+                        swap_sign=swap_sign)
+    assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
+    assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
+
+    _, cs2, _ = cossin(x, p, q,
+                       compute_u=False,
+                       compute_vh=False,
+                       swap_sign=swap_sign)
+    assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
+
+
+def test_cossin_mixed_types():
+    seed(1234)
+    x = np.array(ortho_group.rvs(4), dtype=np.float64)
+    u, cs, vh = cossin([x[:2, :2],
+                        np.array(x[:2, 2:], dtype=np.complex128),
+                        x[2:, :2],
+                        x[2:, 2:]])
+
+    assert u.dtype == np.complex128
+    assert cs.dtype == np.float64
+    assert vh.dtype == np.complex128
+    assert_allclose(x, u @ cs @ vh, rtol=0.,
+                    atol=1e4 * np.finfo(np.complex128).eps)
+
+
+def test_cossin_error_incorrect_subblocks():
+    with pytest.raises(ValueError, match="be due to missing p, q arguments."):
+        cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10]))
+
+
+def test_cossin_error_empty_subblocks():
+    with pytest.raises(ValueError, match="x11.*empty"):
+        cossin(([], [], [], []))
+    with pytest.raises(ValueError, match="x12.*empty"):
+        cossin(([1, 2], [], [6, 7], [8, 9, 10]))
+    with pytest.raises(ValueError, match="x21.*empty"):
+        cossin(([1, 2], [3, 4, 5], [], [8, 9, 10]))
+    with pytest.raises(ValueError, match="x22.*empty"):
+        cossin(([1, 2], [3, 4, 5], [2], []))
+
+
+def test_cossin_error_missing_partitioning():
+    with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"):
+        cossin(unitary_group.rvs(2))
+
+    with pytest.raises(ValueError, match=".*might be due to missing p, q"):
+        cossin(unitary_group.rvs(4))
+
+
+def test_cossin_error_non_iterable():
+    with pytest.raises(ValueError, match="containing the subblocks of X"):
+        cossin(12j)
+
+
+def test_cossin_error_non_square():
+    with pytest.raises(ValueError, match="only supports square"):
+        cossin(np.array([[1, 2]]), 1, 1)
+
+def test_cossin_error_partitioning():
+    x = np.array(ortho_group.rvs(4), dtype=np.float64)
+    with pytest.raises(ValueError, match="invalid p=0.*0= n:
+        assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
+    else:
+        assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
+    # p is Hermitian positive semidefinite.
+    assert_allclose(p.conj().T, p)
+    evals = eigh(p, eigvals_only=True)
+    nonzero_evals = evals[abs(evals) > 1e-14]
+    assert_((nonzero_evals >= 0).all())
+
+    u, p = polar(a, side='left')
+    assert_equal(u.shape, (m, n))
+    assert_equal(p.shape, (m, m))
+    # a = pu
+    assert_allclose(p.dot(u), a, atol=product_atol)
+    if m >= n:
+        assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
+    else:
+        assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
+    # p is Hermitian positive semidefinite.
+    assert_allclose(p.conj().T, p)
+    evals = eigh(p, eigvals_only=True)
+    nonzero_evals = evals[abs(evals) > 1e-14]
+    assert_((nonzero_evals >= 0).all())
+
+
+def test_precomputed_cases():
+    for a, side, expected_u, expected_p in precomputed_cases:
+        check_precomputed_polar(a, side, expected_u, expected_p)
+
+
+def test_verify_cases():
+    for a in verify_cases:
+        verify_polar(a)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_update.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_update.py
new file mode 100644
index 00000000..33068338
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_decomp_update.py
@@ -0,0 +1,1700 @@
+import itertools
+
+import numpy as np
+from numpy.testing import assert_, assert_allclose, assert_equal
+from pytest import raises as assert_raises
+from scipy import linalg
+import scipy.linalg._decomp_update as _decomp_update
+from scipy.linalg._decomp_update import qr_delete, qr_update, qr_insert
+
+def assert_unitary(a, rtol=None, atol=None, assert_sqr=True):
+    if rtol is None:
+        rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
+    if atol is None:
+        atol = 10*np.finfo(a.dtype).eps
+
+    if assert_sqr:
+        assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square')
+    aTa = np.dot(a.T.conj(), a)
+    assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol)
+
+def assert_upper_tri(a, rtol=None, atol=None):
+    if rtol is None:
+        rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
+    if atol is None:
+        atol = 2*np.finfo(a.dtype).eps
+    mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_)
+    assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol)
+
+def check_qr(q, r, a, rtol, atol, assert_sqr=True):
+    assert_unitary(q, rtol, atol, assert_sqr)
+    assert_upper_tri(r, rtol, atol)
+    assert_allclose(q.dot(r), a, rtol=rtol, atol=atol)
+
+def make_strided(arrs):
+    strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)]
+    kmax = len(strides)
+    k = 0
+    ret = []
+    for a in arrs:
+        if a.ndim == 1:
+            s = strides[k % kmax]
+            k += 1
+            base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype)
+            view = base[s[1]::s[0]]
+            view[...] = a
+        elif a.ndim == 2:
+            s = strides[k % kmax]
+            t = strides[(k+1) % kmax]
+            k += 2
+            base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]),
+                            a.dtype)
+            view = base[s[1]::s[0], t[1]::t[0]]
+            view[...] = a
+        else:
+            raise ValueError('make_strided only works for ndim = 1 or'
+                             ' 2 arrays')
+        ret.append(view)
+    return ret
+
+def negate_strides(arrs):
+    ret = []
+    for a in arrs:
+        b = np.zeros_like(a)
+        if b.ndim == 2:
+            b = b[::-1, ::-1]
+        elif b.ndim == 1:
+            b = b[::-1]
+        else:
+            raise ValueError('negate_strides only works for ndim = 1 or'
+                             ' 2 arrays')
+        b[...] = a
+        ret.append(b)
+    return ret
+
+def nonitemsize_strides(arrs):
+    out = []
+    for a in arrs:
+        a_dtype = a.dtype
+        b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')])
+        c = b.getfield(a_dtype)
+        c[...] = a
+        out.append(c)
+    return out
+
+
+def make_nonnative(arrs):
+    return [a.astype(a.dtype.newbyteorder()) for a in arrs]
+
+
+class BaseQRdeltas:
+    def setup_method(self):
+        self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2)
+        self.atol = 10 * np.finfo(self.dtype).eps
+
+    def generate(self, type, mode='full'):
+        np.random.seed(29382)
+        shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12),
+                 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type]
+        a = np.random.random(shape)
+        if np.iscomplexobj(self.dtype.type(1)):
+            b = np.random.random(shape)
+            a = a + 1j * b
+        a = a.astype(self.dtype)
+        q, r = linalg.qr(a, mode=mode)
+        return a, q, r
+
+class BaseQRdelete(BaseQRdeltas):
+    def test_sqr_1_row(self):
+        a, q, r = self.generate('sqr')
+        for row in range(r.shape[0]):
+            q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+            a1 = np.delete(a, row, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_sqr_p_row(self):
+        a, q, r = self.generate('sqr')
+        for ndel in range(2, 6):
+            for row in range(a.shape[0]-ndel):
+                q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+                a1 = np.delete(a, slice(row, row+ndel), 0)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_sqr_1_col(self):
+        a, q, r = self.generate('sqr')
+        for col in range(r.shape[1]):
+            q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+            a1 = np.delete(a, col, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_sqr_p_col(self):
+        a, q, r = self.generate('sqr')
+        for ndel in range(2, 6):
+            for col in range(r.shape[1]-ndel):
+                q1, r1 = qr_delete(q, r, col, ndel, which='col',
+                                   overwrite_qr=False)
+                a1 = np.delete(a, slice(col, col+ndel), 1)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_1_row(self):
+        a, q, r = self.generate('tall')
+        for row in range(r.shape[0]):
+            q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+            a1 = np.delete(a, row, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_p_row(self):
+        a, q, r = self.generate('tall')
+        for ndel in range(2, 6):
+            for row in range(a.shape[0]-ndel):
+                q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+                a1 = np.delete(a, slice(row, row+ndel), 0)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_1_col(self):
+        a, q, r = self.generate('tall')
+        for col in range(r.shape[1]):
+            q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+            a1 = np.delete(a, col, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_p_col(self):
+        a, q, r = self.generate('tall')
+        for ndel in range(2, 6):
+            for col in range(r.shape[1]-ndel):
+                q1, r1 = qr_delete(q, r, col, ndel, which='col',
+                                   overwrite_qr=False)
+                a1 = np.delete(a, slice(col, col+ndel), 1)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_1_row(self):
+        a, q, r = self.generate('fat')
+        for row in range(r.shape[0]):
+            q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+            a1 = np.delete(a, row, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_p_row(self):
+        a, q, r = self.generate('fat')
+        for ndel in range(2, 6):
+            for row in range(a.shape[0]-ndel):
+                q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+                a1 = np.delete(a, slice(row, row+ndel), 0)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_1_col(self):
+        a, q, r = self.generate('fat')
+        for col in range(r.shape[1]):
+            q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+            a1 = np.delete(a, col, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_p_col(self):
+        a, q, r = self.generate('fat')
+        for ndel in range(2, 6):
+            for col in range(r.shape[1]-ndel):
+                q1, r1 = qr_delete(q, r, col, ndel, which='col',
+                                   overwrite_qr=False)
+                a1 = np.delete(a, slice(col, col+ndel), 1)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_economic_1_row(self):
+        # this test always starts and ends with an economic decomp.
+        a, q, r = self.generate('tall', 'economic')
+        for row in range(r.shape[0]):
+            q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+            a1 = np.delete(a, row, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    # for economic row deletes
+    # eco - prow = eco
+    # eco - prow = sqr
+    # eco - prow = fat
+    def base_economic_p_row_xxx(self, ndel):
+        a, q, r = self.generate('tall', 'economic')
+        for row in range(a.shape[0]-ndel):
+            q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+            a1 = np.delete(a, slice(row, row+ndel), 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_economic_p_row_economic(self):
+        # (12, 7) - (3, 7) = (9,7) --> stays economic
+        self.base_economic_p_row_xxx(3)
+
+    def test_economic_p_row_sqr(self):
+        # (12, 7) - (5, 7) = (7, 7) --> becomes square
+        self.base_economic_p_row_xxx(5)
+
+    def test_economic_p_row_fat(self):
+        # (12, 7) - (7,7) = (5, 7) --> becomes fat
+        self.base_economic_p_row_xxx(7)
+
+    def test_economic_1_col(self):
+        a, q, r = self.generate('tall', 'economic')
+        for col in range(r.shape[1]):
+            q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+            a1 = np.delete(a, col, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_economic_p_col(self):
+        a, q, r = self.generate('tall', 'economic')
+        for ndel in range(2, 6):
+            for col in range(r.shape[1]-ndel):
+                q1, r1 = qr_delete(q, r, col, ndel, which='col',
+                                   overwrite_qr=False)
+                a1 = np.delete(a, slice(col, col+ndel), 1)
+                check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_Mx1_1_row(self):
+        a, q, r = self.generate('Mx1')
+        for row in range(r.shape[0]):
+            q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+            a1 = np.delete(a, row, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_p_row(self):
+        a, q, r = self.generate('Mx1')
+        for ndel in range(2, 6):
+            for row in range(a.shape[0]-ndel):
+                q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+                a1 = np.delete(a, slice(row, row+ndel), 0)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1xN_1_col(self):
+        a, q, r = self.generate('1xN')
+        for col in range(r.shape[1]):
+            q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+            a1 = np.delete(a, col, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1xN_p_col(self):
+        a, q, r = self.generate('1xN')
+        for ndel in range(2, 6):
+            for col in range(r.shape[1]-ndel):
+                q1, r1 = qr_delete(q, r, col, ndel, which='col',
+                                   overwrite_qr=False)
+                a1 = np.delete(a, slice(col, col+ndel), 1)
+                check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_economic_1_row(self):
+        a, q, r = self.generate('Mx1', 'economic')
+        for row in range(r.shape[0]):
+            q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+            a1 = np.delete(a, row, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_Mx1_economic_p_row(self):
+        a, q, r = self.generate('Mx1', 'economic')
+        for ndel in range(2, 6):
+            for row in range(a.shape[0]-ndel):
+                q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+                a1 = np.delete(a, slice(row, row+ndel), 0)
+                check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_delete_last_1_row(self):
+        # full and eco are the same for 1xN
+        a, q, r = self.generate('1xN')
+        q1, r1 = qr_delete(q, r, 0, 1, 'row')
+        assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+        assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+    def test_delete_last_p_row(self):
+        a, q, r = self.generate('tall', 'full')
+        q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
+        assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+        assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+        a, q, r = self.generate('tall', 'economic')
+        q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
+        assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+        assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+    def test_delete_last_1_col(self):
+        a, q, r = self.generate('Mx1', 'economic')
+        q1, r1 = qr_delete(q, r, 0, 1, 'col')
+        assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
+        assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
+
+        a, q, r = self.generate('Mx1', 'full')
+        q1, r1 = qr_delete(q, r, 0, 1, 'col')
+        assert_unitary(q1)
+        assert_(q1.dtype == q.dtype)
+        assert_(q1.shape == q.shape)
+        assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+    def test_delete_last_p_col(self):
+        a, q, r = self.generate('tall', 'full')
+        q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
+        assert_unitary(q1)
+        assert_(q1.dtype == q.dtype)
+        assert_(q1.shape == q.shape)
+        assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+        a, q, r = self.generate('tall', 'economic')
+        q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
+        assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
+        assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
+
+    def test_delete_1x1_row_col(self):
+        a, q, r = self.generate('1x1')
+        q1, r1 = qr_delete(q, r, 0, 1, 'row')
+        assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+        assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+        a, q, r = self.generate('1x1')
+        q1, r1 = qr_delete(q, r, 0, 1, 'col')
+        assert_unitary(q1)
+        assert_(q1.dtype == q.dtype)
+        assert_(q1.shape == q.shape)
+        assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+    # all full qr, row deletes and single column deletes should be able to
+    # handle any non negative strides. (only row and column vector
+    # operations are used.) p column delete require fortran ordered
+    # Q and R and will make a copy as necessary.  Economic qr row deletes
+    # requre a contigous q.
+
+    def base_non_simple_strides(self, adjust_strides, ks, p, which,
+                                overwriteable):
+        if which == 'row':
+            qind = (slice(p,None), slice(p,None))
+            rind = (slice(p,None), slice(None))
+        else:
+            qind = (slice(None), slice(None))
+            rind = (slice(None), slice(None,-p))
+
+        for type, k in itertools.product(['sqr', 'tall', 'fat'], ks):
+            a, q0, r0, = self.generate(type)
+            qs, rs = adjust_strides((q0, r0))
+            if p == 1:
+                a1 = np.delete(a, k, 0 if which == 'row' else 1)
+            else:
+                s = slice(k,k+p)
+                if k < 0:
+                    s = slice(k, k + p +
+                              (a.shape[0] if which == 'row' else a.shape[1]))
+                a1 = np.delete(a, s, 0 if which == 'row' else 1)
+
+            # for each variable, q, r we try with it strided and
+            # overwrite=False. Then we try with overwrite=True, and make
+            # sure that q and r are still overwritten.
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            q1, r1 = qr_delete(qs, r, k, p, which, False)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+            q1o, r1o = qr_delete(qs, r, k, p, which, True)
+            check_qr(q1o, r1o, a1, self.rtol, self.atol)
+            if overwriteable:
+                assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol)
+                assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            q2, r2 = qr_delete(q, rs, k, p, which, False)
+            check_qr(q2, r2, a1, self.rtol, self.atol)
+            q2o, r2o = qr_delete(q, rs, k, p, which, True)
+            check_qr(q2o, r2o, a1, self.rtol, self.atol)
+            if overwriteable:
+                assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol)
+                assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            # since some of these were consumed above
+            qs, rs = adjust_strides((q, r))
+            q3, r3 = qr_delete(qs, rs, k, p, which, False)
+            check_qr(q3, r3, a1, self.rtol, self.atol)
+            q3o, r3o = qr_delete(qs, rs, k, p, which, True)
+            check_qr(q3o, r3o, a1, self.rtol, self.atol)
+            if overwriteable:
+                assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol)
+                assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol)
+
+    def test_non_unit_strides_1_row(self):
+        self.base_non_simple_strides(make_strided, [0], 1, 'row', True)
+
+    def test_non_unit_strides_p_row(self):
+        self.base_non_simple_strides(make_strided, [0], 3, 'row', True)
+
+    def test_non_unit_strides_1_col(self):
+        self.base_non_simple_strides(make_strided, [0], 1, 'col', True)
+
+    def test_non_unit_strides_p_col(self):
+        self.base_non_simple_strides(make_strided, [0], 3, 'col', False)
+
+    def test_neg_strides_1_row(self):
+        self.base_non_simple_strides(negate_strides, [0], 1, 'row', False)
+
+    def test_neg_strides_p_row(self):
+        self.base_non_simple_strides(negate_strides, [0], 3, 'row', False)
+
+    def test_neg_strides_1_col(self):
+        self.base_non_simple_strides(negate_strides, [0], 1, 'col', False)
+
+    def test_neg_strides_p_col(self):
+        self.base_non_simple_strides(negate_strides, [0], 3, 'col', False)
+
+    def test_non_itemize_strides_1_row(self):
+        self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False)
+
+    def test_non_itemize_strides_p_row(self):
+        self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False)
+
+    def test_non_itemize_strides_1_col(self):
+        self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False)
+
+    def test_non_itemize_strides_p_col(self):
+        self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False)
+
+    def test_non_native_byte_order_1_row(self):
+        self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False)
+
+    def test_non_native_byte_order_p_row(self):
+        self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False)
+
+    def test_non_native_byte_order_1_col(self):
+        self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False)
+
+    def test_non_native_byte_order_p_col(self):
+        self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False)
+
+    def test_neg_k(self):
+        a, q, r = self.generate('sqr')
+        for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']):
+            q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False)
+            if w == 'row':
+                a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0)
+            else:
+                a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'):
+        assert_sqr = True if mode == 'full' else False
+        if which == 'row':
+            qind = (slice(p,None), slice(p,None))
+            rind = (slice(p,None), slice(None))
+        else:
+            qind = (slice(None), slice(None))
+            rind = (slice(None), slice(None,-p))
+        a, q0, r0 = self.generate('sqr', mode)
+        if p == 1:
+            a1 = np.delete(a, 3, 0 if which == 'row' else 1)
+        else:
+            a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1)
+
+        # don't overwrite
+        q = q0.copy('F')
+        r = r0.copy('F')
+        q1, r1 = qr_delete(q, r, 3, p, which, False)
+        check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr)
+        check_qr(q, r, a, self.rtol, self.atol, assert_sqr)
+
+        if test_F:
+            q = q0.copy('F')
+            r = r0.copy('F')
+            q2, r2 = qr_delete(q, r, 3, p, which, True)
+            check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr)
+            # verify the overwriting
+            assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol)
+            assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol)
+
+        if test_C:
+            q = q0.copy('C')
+            r = r0.copy('C')
+            q3, r3 = qr_delete(q, r, 3, p, which, True)
+            check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr)
+            assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol)
+            assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol)
+
+    def test_overwrite_qr_1_row(self):
+        # any positively strided q and r.
+        self.base_overwrite_qr('row', 1, True, True)
+
+    def test_overwrite_economic_qr_1_row(self):
+        # Any contiguous q and positively strided r.
+        self.base_overwrite_qr('row', 1, True, True, 'economic')
+
+    def test_overwrite_qr_1_col(self):
+        # any positively strided q and r.
+        # full and eco share code paths
+        self.base_overwrite_qr('col', 1, True, True)
+
+    def test_overwrite_qr_p_row(self):
+        # any positively strided q and r.
+        self.base_overwrite_qr('row', 3, True, True)
+
+    def test_overwrite_economic_qr_p_row(self):
+        # any contiguous q and positively strided r
+        self.base_overwrite_qr('row', 3, True, True, 'economic')
+
+    def test_overwrite_qr_p_col(self):
+        # only F orderd q and r can be overwritten for cols
+        # full and eco share code paths
+        self.base_overwrite_qr('col', 3, False, True)
+
+    def test_bad_which(self):
+        a, q, r = self.generate('sqr')
+        assert_raises(ValueError, qr_delete, q, r, 0, which='foo')
+
+    def test_bad_k(self):
+        a, q, r = self.generate('tall')
+        assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1)
+        assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1)
+        assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col')
+        assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col')
+
+    def test_bad_p(self):
+        a, q, r = self.generate('tall')
+        # p must be positive
+        assert_raises(ValueError, qr_delete, q, r, 0, -1)
+        assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col')
+
+        # and nonzero
+        assert_raises(ValueError, qr_delete, q, r, 0, 0)
+        assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col')
+
+        # must have at least k+p rows or cols, depending.
+        assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2)
+        assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col')
+
+    def test_empty_q(self):
+        a, q, r = self.generate('tall')
+        # same code path for 'row' and 'col'
+        assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1)
+
+    def test_empty_r(self):
+        a, q, r = self.generate('tall')
+        # same code path for 'row' and 'col'
+        assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1)
+
+    def test_mismatched_q_and_r(self):
+        a, q, r = self.generate('tall')
+        r = r[1:]
+        assert_raises(ValueError, qr_delete, q, r, 0, 1)
+
+    def test_unsupported_dtypes(self):
+        dts = ['int8', 'int16', 'int32', 'int64',
+               'uint8', 'uint16', 'uint32', 'uint64',
+               'float16', 'longdouble', 'longcomplex',
+               'bool']
+        a, q0, r0 = self.generate('tall')
+        for dtype in dts:
+            q = q0.real.astype(dtype)
+            with np.errstate(invalid="ignore"):
+                r = r0.real.astype(dtype)
+            assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
+            assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row')
+            assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
+            assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col')
+
+            assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
+            assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row')
+            assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
+            assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col')
+
+    def test_check_finite(self):
+        a0, q0, r0 = self.generate('tall')
+
+        q = q0.copy('F')
+        q[1,1] = np.nan
+        assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
+        assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row')
+        assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
+        assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col')
+
+        r = r0.copy('F')
+        r[1,1] = np.nan
+        assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
+        assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row')
+        assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
+        assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col')
+
+    def test_qr_scalar(self):
+        a, q, r = self.generate('1x1')
+        assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row')
+        assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row')
+        assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col')
+        assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col')
+
+class TestQRdelete_f(BaseQRdelete):
+    dtype = np.dtype('f')
+
+class TestQRdelete_F(BaseQRdelete):
+    dtype = np.dtype('F')
+
+class TestQRdelete_d(BaseQRdelete):
+    dtype = np.dtype('d')
+
+class TestQRdelete_D(BaseQRdelete):
+    dtype = np.dtype('D')
+
+class BaseQRinsert(BaseQRdeltas):
+    def generate(self, type, mode='full', which='row', p=1):
+        a, q, r = super().generate(type, mode)
+
+        assert_(p > 0)
+
+        # super call set the seed...
+        if which == 'row':
+            if p == 1:
+                u = np.random.random(a.shape[1])
+            else:
+                u = np.random.random((p, a.shape[1]))
+        elif which == 'col':
+            if p == 1:
+                u = np.random.random(a.shape[0])
+            else:
+                u = np.random.random((a.shape[0], p))
+        else:
+            ValueError('which should be either "row" or "col"')
+
+        if np.iscomplexobj(self.dtype.type(1)):
+            b = np.random.random(u.shape)
+            u = u + 1j * b
+
+        u = u.astype(self.dtype)
+        return a, q, r, u
+
+    def test_sqr_1_row(self):
+        a, q, r, u = self.generate('sqr', which='row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_sqr_p_row(self):
+        # sqr + rows --> fat always
+        a, q, r, u = self.generate('sqr', which='row', p=3)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_sqr_1_col(self):
+        a, q, r, u = self.generate('sqr', which='col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_sqr_p_col(self):
+        # sqr + cols --> fat always
+        a, q, r, u = self.generate('sqr', which='col', p=3)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_1_row(self):
+        a, q, r, u = self.generate('tall', which='row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_p_row(self):
+        # tall + rows --> tall always
+        a, q, r, u = self.generate('tall', which='row', p=3)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_1_col(self):
+        a, q, r, u = self.generate('tall', which='col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    # for column adds to tall matrices there are three cases to test
+    # tall + pcol --> tall
+    # tall + pcol --> sqr
+    # tall + pcol --> fat
+    def base_tall_p_col_xxx(self, p):
+        a, q, r, u = self.generate('tall', which='col', p=p)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_p_col_tall(self):
+        # 12x7 + 12x3 = 12x10 --> stays tall
+        self.base_tall_p_col_xxx(3)
+
+    def test_tall_p_col_sqr(self):
+        # 12x7 + 12x5 = 12x12 --> becomes sqr
+        self.base_tall_p_col_xxx(5)
+
+    def test_tall_p_col_fat(self):
+        # 12x7 + 12x7 = 12x14 --> becomes fat
+        self.base_tall_p_col_xxx(7)
+
+    def test_fat_1_row(self):
+        a, q, r, u = self.generate('fat', which='row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    # for row adds to fat matrices there are three cases to test
+    # fat + prow --> fat
+    # fat + prow --> sqr
+    # fat + prow --> tall
+    def base_fat_p_row_xxx(self, p):
+        a, q, r, u = self.generate('fat', which='row', p=p)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, np.full(p, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_p_row_fat(self):
+        # 7x12 + 3x12 = 10x12 --> stays fat
+        self.base_fat_p_row_xxx(3)
+
+    def test_fat_p_row_sqr(self):
+        # 7x12 + 5x12 = 12x12 --> becomes sqr
+        self.base_fat_p_row_xxx(5)
+
+    def test_fat_p_row_tall(self):
+        # 7x12 + 7x12 = 14x12 --> becomes tall
+        self.base_fat_p_row_xxx(7)
+
+    def test_fat_1_col(self):
+        a, q, r, u = self.generate('fat', which='col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_p_col(self):
+        # fat + cols --> fat always
+        a, q, r, u = self.generate('fat', which='col', p=3)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_economic_1_row(self):
+        a, q, r, u = self.generate('tall', 'economic', 'row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_economic_p_row(self):
+        # tall + rows --> tall always
+        a, q, r, u = self.generate('tall', 'economic', 'row', 3)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
+            a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_economic_1_col(self):
+        a, q, r, u = self.generate('tall', 'economic', which='col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_economic_1_col_bad_update(self):
+        # When the column to be added lies in the span of Q, the update is
+        # not meaningful.  This is detected, and a LinAlgError is issued.
+        q = np.eye(5, 3, dtype=self.dtype)
+        r = np.eye(3, dtype=self.dtype)
+        u = np.array([1, 0, 0, 0, 0], self.dtype)
+        assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col')
+
+    # for column adds to economic matrices there are three cases to test
+    # eco + pcol --> eco
+    # eco + pcol --> sqr
+    # eco + pcol --> fat
+    def base_economic_p_col_xxx(self, p):
+        a, q, r, u = self.generate('tall', 'economic', which='col', p=p)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_economic_p_col_eco(self):
+        # 12x7 + 12x3 = 12x10 --> stays eco
+        self.base_economic_p_col_xxx(3)
+
+    def test_economic_p_col_sqr(self):
+        # 12x7 + 12x5 = 12x12 --> becomes sqr
+        self.base_economic_p_col_xxx(5)
+
+    def test_economic_p_col_fat(self):
+        # 12x7 + 12x7 = 12x14 --> becomes fat
+        self.base_economic_p_col_xxx(7)
+
+    def test_Mx1_1_row(self):
+        a, q, r, u = self.generate('Mx1', which='row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_p_row(self):
+        a, q, r, u = self.generate('Mx1', which='row', p=3)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_1_col(self):
+        a, q, r, u = self.generate('Mx1', which='col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_p_col(self):
+        a, q, r, u = self.generate('Mx1', which='col', p=3)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_economic_1_row(self):
+        a, q, r, u = self.generate('Mx1', 'economic', 'row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_Mx1_economic_p_row(self):
+        a, q, r, u = self.generate('Mx1', 'economic', 'row', 3)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_Mx1_economic_1_col(self):
+        a, q, r, u = self.generate('Mx1', 'economic', 'col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_Mx1_economic_p_col(self):
+        a, q, r, u = self.generate('Mx1', 'economic', 'col', 3)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_1xN_1_row(self):
+        a, q, r, u = self.generate('1xN', which='row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1xN_p_row(self):
+        a, q, r, u = self.generate('1xN', which='row', p=3)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1xN_1_col(self):
+        a, q, r, u = self.generate('1xN', which='col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1xN_p_col(self):
+        a, q, r, u = self.generate('1xN', which='col', p=3)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_1_row(self):
+        a, q, r, u = self.generate('1x1', which='row')
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, row, u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_p_row(self):
+        a, q, r, u = self.generate('1x1', which='row', p=3)
+        for row in range(r.shape[0] + 1):
+            q1, r1 = qr_insert(q, r, u, row)
+            a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_1_col(self):
+        a, q, r, u = self.generate('1x1', which='col')
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, col, u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_p_col(self):
+        a, q, r, u = self.generate('1x1', which='col', p=3)
+        for col in range(r.shape[1] + 1):
+            q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+            a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_1_scalar(self):
+        a, q, r, u = self.generate('1x1', which='row')
+        assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row')
+        assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row')
+        assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row')
+
+        assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col')
+        assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col')
+        assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col')
+
+    def base_non_simple_strides(self, adjust_strides, k, p, which):
+        for type in ['sqr', 'tall', 'fat']:
+            a, q0, r0, u0 = self.generate(type, which=which, p=p)
+            qs, rs, us = adjust_strides((q0, r0, u0))
+            if p == 1:
+                ai = np.insert(a, k, u0, 0 if which == 'row' else 1)
+            else:
+                ai = np.insert(a, np.full(p, k, np.intp),
+                        u0 if which == 'row' else u0,
+                        0 if which == 'row' else 1)
+
+            # for each variable, q, r, u we try with it strided and
+            # overwrite=False. Then we try with overwrite=True. Nothing
+            # is checked to see if it can be overwritten, since only
+            # F ordered Q can be overwritten when adding columns.
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False)
+            check_qr(q1, r1, ai, self.rtol, self.atol)
+            q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True)
+            check_qr(q1o, r1o, ai, self.rtol, self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False)
+            check_qr(q2, r2, ai, self.rtol, self.atol)
+            q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True)
+            check_qr(q2o, r2o, ai, self.rtol, self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False)
+            check_qr(q3, r3, ai, self.rtol, self.atol)
+            q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True)
+            check_qr(q3o, r3o, ai, self.rtol, self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            # since some of these were consumed above
+            qs, rs, us = adjust_strides((q, r, u))
+            q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False)
+            check_qr(q5, r5, ai, self.rtol, self.atol)
+            q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True)
+            check_qr(q5o, r5o, ai, self.rtol, self.atol)
+
+    def test_non_unit_strides_1_row(self):
+        self.base_non_simple_strides(make_strided, 0, 1, 'row')
+
+    def test_non_unit_strides_p_row(self):
+        self.base_non_simple_strides(make_strided, 0, 3, 'row')
+
+    def test_non_unit_strides_1_col(self):
+        self.base_non_simple_strides(make_strided, 0, 1, 'col')
+
+    def test_non_unit_strides_p_col(self):
+        self.base_non_simple_strides(make_strided, 0, 3, 'col')
+
+    def test_neg_strides_1_row(self):
+        self.base_non_simple_strides(negate_strides, 0, 1, 'row')
+
+    def test_neg_strides_p_row(self):
+        self.base_non_simple_strides(negate_strides, 0, 3, 'row')
+
+    def test_neg_strides_1_col(self):
+        self.base_non_simple_strides(negate_strides, 0, 1, 'col')
+
+    def test_neg_strides_p_col(self):
+        self.base_non_simple_strides(negate_strides, 0, 3, 'col')
+
+    def test_non_itemsize_strides_1_row(self):
+        self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row')
+
+    def test_non_itemsize_strides_p_row(self):
+        self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row')
+
+    def test_non_itemsize_strides_1_col(self):
+        self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col')
+
+    def test_non_itemsize_strides_p_col(self):
+        self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col')
+
+    def test_non_native_byte_order_1_row(self):
+        self.base_non_simple_strides(make_nonnative, 0, 1, 'row')
+
+    def test_non_native_byte_order_p_row(self):
+        self.base_non_simple_strides(make_nonnative, 0, 3, 'row')
+
+    def test_non_native_byte_order_1_col(self):
+        self.base_non_simple_strides(make_nonnative, 0, 1, 'col')
+
+    def test_non_native_byte_order_p_col(self):
+        self.base_non_simple_strides(make_nonnative, 0, 3, 'col')
+
+    def test_overwrite_qu_rank_1(self):
+        # when inserting rows, the size of both Q and R change, so only
+        # column inserts can overwrite q. Only complex column inserts
+        # with C ordered Q overwrite u. Any contiguous Q is overwritten
+        # when inserting 1 column
+        a, q0, r, u, = self.generate('sqr', which='col', p=1)
+        q = q0.copy('C')
+        u0 = u.copy()
+        # don't overwrite
+        q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
+        a1 = np.insert(a, 0, u0, 1)
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+        check_qr(q, r, a, self.rtol, self.atol)
+
+        # try overwriting
+        q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
+        check_qr(q2, r2, a1, self.rtol, self.atol)
+        # verify the overwriting
+        assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+        assert_allclose(u, u0.conj(), self.rtol, self.atol)
+
+        # now try with a fortran ordered Q
+        qF = q0.copy('F')
+        u1 = u0.copy()
+        q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False)
+        check_qr(q3, r3, a1, self.rtol, self.atol)
+        check_qr(qF, r, a, self.rtol, self.atol)
+
+        # try overwriting
+        q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True)
+        check_qr(q4, r4, a1, self.rtol, self.atol)
+        assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol)
+
+    def test_overwrite_qu_rank_p(self):
+        # when inserting rows, the size of both Q and R change, so only
+        # column inserts can potentially overwrite Q.  In practice, only
+        # F ordered Q are overwritten with a rank p update.
+        a, q0, r, u, = self.generate('sqr', which='col', p=3)
+        q = q0.copy('F')
+        a1 = np.insert(a, np.zeros(3, np.intp), u, 1)
+
+        # don't overwrite
+        q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+        check_qr(q, r, a, self.rtol, self.atol)
+
+        # try overwriting
+        q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
+        check_qr(q2, r2, a1, self.rtol, self.atol)
+        assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+
+    def test_empty_inputs(self):
+        a, q, r, u = self.generate('sqr', which='row')
+        assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row')
+        assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row')
+        assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row')
+        assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col')
+        assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col')
+        assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col')
+
+    def test_mismatched_shapes(self):
+        a, q, r, u = self.generate('tall', which='row')
+        assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row')
+        assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row')
+        assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row')
+        assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col')
+        assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col')
+        assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col')
+
+    def test_unsupported_dtypes(self):
+        dts = ['int8', 'int16', 'int32', 'int64',
+               'uint8', 'uint16', 'uint32', 'uint64',
+               'float16', 'longdouble', 'longcomplex',
+               'bool']
+        a, q0, r0, u0 = self.generate('sqr', which='row')
+        for dtype in dts:
+            q = q0.real.astype(dtype)
+            with np.errstate(invalid="ignore"):
+                r = r0.real.astype(dtype)
+            u = u0.real.astype(dtype)
+            assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
+            assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
+            assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
+            assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
+            assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
+            assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
+
+    def test_check_finite(self):
+        a0, q0, r0, u0 = self.generate('sqr', which='row', p=3)
+
+        q = q0.copy('F')
+        q[1,1] = np.nan
+        assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row')
+        assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
+        assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col')
+        assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
+
+        r = r0.copy('F')
+        r[1,1] = np.nan
+        assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row')
+        assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
+        assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col')
+        assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
+
+        u = u0.copy('F')
+        u[0,0] = np.nan
+        assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row')
+        assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
+        assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col')
+        assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
+
+class TestQRinsert_f(BaseQRinsert):
+    dtype = np.dtype('f')
+
+class TestQRinsert_F(BaseQRinsert):
+    dtype = np.dtype('F')
+
+class TestQRinsert_d(BaseQRinsert):
+    dtype = np.dtype('d')
+
+class TestQRinsert_D(BaseQRinsert):
+    dtype = np.dtype('D')
+
+class BaseQRupdate(BaseQRdeltas):
+    def generate(self, type, mode='full', p=1):
+        a, q, r = super().generate(type, mode)
+
+        # super call set the seed...
+        if p == 1:
+            u = np.random.random(q.shape[0])
+            v = np.random.random(r.shape[1])
+        else:
+            u = np.random.random((q.shape[0], p))
+            v = np.random.random((r.shape[1], p))
+
+        if np.iscomplexobj(self.dtype.type(1)):
+            b = np.random.random(u.shape)
+            u = u + 1j * b
+
+            c = np.random.random(v.shape)
+            v = v + 1j * c
+
+        u = u.astype(self.dtype)
+        v = v.astype(self.dtype)
+        return a, q, r, u, v
+
+    def test_sqr_rank_1(self):
+        a, q, r, u, v = self.generate('sqr')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_sqr_rank_p(self):
+        # test ndim = 2, rank 1 updates here too
+        for p in [1, 2, 3, 5]:
+            a, q, r, u, v = self.generate('sqr', p=p)
+            if p == 1:
+                u = u.reshape(u.size, 1)
+                v = v.reshape(v.size, 1)
+            q1, r1 = qr_update(q, r, u, v, False)
+            a1 = a + np.dot(u, v.T.conj())
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_rank_1(self):
+        a, q, r, u, v = self.generate('tall')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_tall_rank_p(self):
+        for p in [1, 2, 3, 5]:
+            a, q, r, u, v = self.generate('tall', p=p)
+            if p == 1:
+                u = u.reshape(u.size, 1)
+                v = v.reshape(v.size, 1)
+            q1, r1 = qr_update(q, r, u, v, False)
+            a1 = a + np.dot(u, v.T.conj())
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_rank_1(self):
+        a, q, r, u, v = self.generate('fat')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_fat_rank_p(self):
+        for p in [1, 2, 3, 5]:
+            a, q, r, u, v = self.generate('fat', p=p)
+            if p == 1:
+                u = u.reshape(u.size, 1)
+                v = v.reshape(v.size, 1)
+            q1, r1 = qr_update(q, r, u, v, False)
+            a1 = a + np.dot(u, v.T.conj())
+            check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_economic_rank_1(self):
+        a, q, r, u, v = self.generate('tall', 'economic')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_economic_rank_p(self):
+        for p in [1, 2, 3, 5]:
+            a, q, r, u, v = self.generate('tall', 'economic', p)
+            if p == 1:
+                u = u.reshape(u.size, 1)
+                v = v.reshape(v.size, 1)
+            q1, r1 = qr_update(q, r, u, v, False)
+            a1 = a + np.dot(u, v.T.conj())
+            check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_Mx1_rank_1(self):
+        a, q, r, u, v = self.generate('Mx1')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_rank_p(self):
+        # when M or N == 1, only a rank 1 update is allowed. This isn't
+        # fundamental limitation, but the code does not support it.
+        a, q, r, u, v = self.generate('Mx1', p=1)
+        u = u.reshape(u.size, 1)
+        v = v.reshape(v.size, 1)
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.dot(u, v.T.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_Mx1_economic_rank_1(self):
+        a, q, r, u, v = self.generate('Mx1', 'economic')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_Mx1_economic_rank_p(self):
+        # when M or N == 1, only a rank 1 update is allowed. This isn't
+        # fundamental limitation, but the code does not support it.
+        a, q, r, u, v = self.generate('Mx1', 'economic', p=1)
+        u = u.reshape(u.size, 1)
+        v = v.reshape(v.size, 1)
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.dot(u, v.T.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+    def test_1xN_rank_1(self):
+        a, q, r, u, v = self.generate('1xN')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1xN_rank_p(self):
+        # when M or N == 1, only a rank 1 update is allowed. This isn't
+        # fundamental limitation, but the code does not support it.
+        a, q, r, u, v = self.generate('1xN', p=1)
+        u = u.reshape(u.size, 1)
+        v = v.reshape(v.size, 1)
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.dot(u, v.T.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_rank_1(self):
+        a, q, r, u, v = self.generate('1x1')
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_rank_p(self):
+        # when M or N == 1, only a rank 1 update is allowed. This isn't
+        # fundamental limitation, but the code does not support it.
+        a, q, r, u, v = self.generate('1x1', p=1)
+        u = u.reshape(u.size, 1)
+        v = v.reshape(v.size, 1)
+        q1, r1 = qr_update(q, r, u, v, False)
+        a1 = a + np.dot(u, v.T.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+
+    def test_1x1_rank_1_scalar(self):
+        a, q, r, u, v = self.generate('1x1')
+        assert_raises(ValueError, qr_update, q[0, 0], r, u, v)
+        assert_raises(ValueError, qr_update, q, r[0, 0], u, v)
+        assert_raises(ValueError, qr_update, q, r, u[0], v)
+        assert_raises(ValueError, qr_update, q, r, u, v[0])
+
+    def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable):
+        assert_sqr = False if mode == 'economic' else True
+        for type in ['sqr', 'tall', 'fat']:
+            a, q0, r0, u0, v0 = self.generate(type, mode, p)
+            qs, rs, us, vs = adjust_strides((q0, r0, u0, v0))
+            if p == 1:
+                aup = a + np.outer(u0, v0.conj())
+            else:
+                aup = a + np.dot(u0, v0.T.conj())
+
+            # for each variable, q, r, u, v we try with it strided and
+            # overwrite=False. Then we try with overwrite=True, and make
+            # sure that if p == 1, r and v are still overwritten.
+            # a strided q and u must always be copied.
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            v = v0.copy('C')
+            q1, r1 = qr_update(qs, r, u, v, False)
+            check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr)
+            q1o, r1o = qr_update(qs, r, u, v, True)
+            check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr)
+            if overwriteable:
+                assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol)
+                assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            v = v0.copy('C')
+            q2, r2 = qr_update(q, rs, u, v, False)
+            check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr)
+            q2o, r2o = qr_update(q, rs, u, v, True)
+            check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr)
+            if overwriteable:
+                assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol)
+                assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            v = v0.copy('C')
+            q3, r3 = qr_update(q, r, us, v, False)
+            check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr)
+            q3o, r3o = qr_update(q, r, us, v, True)
+            check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr)
+            if overwriteable:
+                assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol)
+                assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            v = v0.copy('C')
+            q4, r4 = qr_update(q, r, u, vs, False)
+            check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr)
+            q4o, r4o = qr_update(q, r, u, vs, True)
+            check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr)
+            if overwriteable:
+                assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol)
+                assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+            q = q0.copy('F')
+            r = r0.copy('F')
+            u = u0.copy('F')
+            v = v0.copy('C')
+            # since some of these were consumed above
+            qs, rs, us, vs = adjust_strides((q, r, u, v))
+            q5, r5 = qr_update(qs, rs, us, vs, False)
+            check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr)
+            q5o, r5o = qr_update(qs, rs, us, vs, True)
+            check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr)
+            if overwriteable:
+                assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol)
+                assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+    def test_non_unit_strides_rank_1(self):
+        self.base_non_simple_strides(make_strided, 'full', 1, True)
+
+    def test_non_unit_strides_economic_rank_1(self):
+        self.base_non_simple_strides(make_strided, 'economic', 1, True)
+
+    def test_non_unit_strides_rank_p(self):
+        self.base_non_simple_strides(make_strided, 'full', 3, False)
+
+    def test_non_unit_strides_economic_rank_p(self):
+        self.base_non_simple_strides(make_strided, 'economic', 3, False)
+
+    def test_neg_strides_rank_1(self):
+        self.base_non_simple_strides(negate_strides, 'full', 1, False)
+
+    def test_neg_strides_economic_rank_1(self):
+        self.base_non_simple_strides(negate_strides, 'economic', 1, False)
+
+    def test_neg_strides_rank_p(self):
+        self.base_non_simple_strides(negate_strides, 'full', 3, False)
+
+    def test_neg_strides_economic_rank_p(self):
+        self.base_non_simple_strides(negate_strides, 'economic', 3, False)
+
+    def test_non_itemsize_strides_rank_1(self):
+        self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False)
+
+    def test_non_itemsize_strides_economic_rank_1(self):
+        self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False)
+
+    def test_non_itemsize_strides_rank_p(self):
+        self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False)
+
+    def test_non_itemsize_strides_economic_rank_p(self):
+        self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False)
+
+    def test_non_native_byte_order_rank_1(self):
+        self.base_non_simple_strides(make_nonnative, 'full', 1, False)
+
+    def test_non_native_byte_order_economic_rank_1(self):
+        self.base_non_simple_strides(make_nonnative, 'economic', 1, False)
+
+    def test_non_native_byte_order_rank_p(self):
+        self.base_non_simple_strides(make_nonnative, 'full', 3, False)
+
+    def test_non_native_byte_order_economic_rank_p(self):
+        self.base_non_simple_strides(make_nonnative, 'economic', 3, False)
+
+    def test_overwrite_qruv_rank_1(self):
+        # Any positive strided q, r, u, and v can be overwritten for a rank 1
+        # update, only checking C and F contiguous.
+        a, q0, r0, u0, v0 = self.generate('sqr')
+        a1 = a + np.outer(u0, v0.conj())
+        q = q0.copy('F')
+        r = r0.copy('F')
+        u = u0.copy('F')
+        v = v0.copy('F')
+
+        # don't overwrite
+        q1, r1 = qr_update(q, r, u, v, False)
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+        check_qr(q, r, a, self.rtol, self.atol)
+
+        q2, r2 = qr_update(q, r, u, v, True)
+        check_qr(q2, r2, a1, self.rtol, self.atol)
+        # verify the overwriting, no good way to check u and v.
+        assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+        assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+        q = q0.copy('C')
+        r = r0.copy('C')
+        u = u0.copy('C')
+        v = v0.copy('C')
+        q3, r3 = qr_update(q, r, u, v, True)
+        check_qr(q3, r3, a1, self.rtol, self.atol)
+        assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
+        assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
+
+    def test_overwrite_qruv_rank_1_economic(self):
+        # updating economic decompositions can overwrite any contigous r,
+        # and positively strided r and u. V is only ever read.
+        # only checking C and F contiguous.
+        a, q0, r0, u0, v0 = self.generate('tall', 'economic')
+        a1 = a + np.outer(u0, v0.conj())
+        q = q0.copy('F')
+        r = r0.copy('F')
+        u = u0.copy('F')
+        v = v0.copy('F')
+
+        # don't overwrite
+        q1, r1 = qr_update(q, r, u, v, False)
+        check_qr(q1, r1, a1, self.rtol, self.atol, False)
+        check_qr(q, r, a, self.rtol, self.atol, False)
+
+        q2, r2 = qr_update(q, r, u, v, True)
+        check_qr(q2, r2, a1, self.rtol, self.atol, False)
+        # verify the overwriting, no good way to check u and v.
+        assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+        assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+        q = q0.copy('C')
+        r = r0.copy('C')
+        u = u0.copy('C')
+        v = v0.copy('C')
+        q3, r3 = qr_update(q, r, u, v, True)
+        check_qr(q3, r3, a1, self.rtol, self.atol, False)
+        assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
+        assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
+
+    def test_overwrite_qruv_rank_p(self):
+        # for rank p updates, q r must be F contiguous, v must be C (v.T --> F)
+        # and u can be C or F, but is only overwritten if Q is C and complex
+        a, q0, r0, u0, v0 = self.generate('sqr', p=3)
+        a1 = a + np.dot(u0, v0.T.conj())
+        q = q0.copy('F')
+        r = r0.copy('F')
+        u = u0.copy('F')
+        v = v0.copy('C')
+
+        # don't overwrite
+        q1, r1 = qr_update(q, r, u, v, False)
+        check_qr(q1, r1, a1, self.rtol, self.atol)
+        check_qr(q, r, a, self.rtol, self.atol)
+
+        q2, r2 = qr_update(q, r, u, v, True)
+        check_qr(q2, r2, a1, self.rtol, self.atol)
+        # verify the overwriting, no good way to check u and v.
+        assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+        assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+    def test_empty_inputs(self):
+        a, q, r, u, v = self.generate('tall')
+        assert_raises(ValueError, qr_update, np.array([]), r, u, v)
+        assert_raises(ValueError, qr_update, q, np.array([]), u, v)
+        assert_raises(ValueError, qr_update, q, r, np.array([]), v)
+        assert_raises(ValueError, qr_update, q, r, u, np.array([]))
+
+    def test_mismatched_shapes(self):
+        a, q, r, u, v = self.generate('tall')
+        assert_raises(ValueError, qr_update, q, r[1:], u, v)
+        assert_raises(ValueError, qr_update, q[:-2], r, u, v)
+        assert_raises(ValueError, qr_update, q, r, u[1:], v)
+        assert_raises(ValueError, qr_update, q, r, u, v[1:])
+
+    def test_unsupported_dtypes(self):
+        dts = ['int8', 'int16', 'int32', 'int64',
+               'uint8', 'uint16', 'uint32', 'uint64',
+               'float16', 'longdouble', 'longcomplex',
+               'bool']
+        a, q0, r0, u0, v0 = self.generate('tall')
+        for dtype in dts:
+            q = q0.real.astype(dtype)
+            with np.errstate(invalid="ignore"):
+                r = r0.real.astype(dtype)
+            u = u0.real.astype(dtype)
+            v = v0.real.astype(dtype)
+            assert_raises(ValueError, qr_update, q, r0, u0, v0)
+            assert_raises(ValueError, qr_update, q0, r, u0, v0)
+            assert_raises(ValueError, qr_update, q0, r0, u, v0)
+            assert_raises(ValueError, qr_update, q0, r0, u0, v)
+
+    def test_integer_input(self):
+        q = np.arange(16).reshape(4, 4)
+        r = q.copy()  # doesn't matter
+        u = q[:, 0].copy()
+        v = r[0, :].copy()
+        assert_raises(ValueError, qr_update, q, r, u, v)
+
+    def test_check_finite(self):
+        a0, q0, r0, u0, v0 = self.generate('tall', p=3)
+
+        q = q0.copy('F')
+        q[1,1] = np.nan
+        assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
+        assert_raises(ValueError, qr_update, q, r0, u0, v0)
+
+        r = r0.copy('F')
+        r[1,1] = np.nan
+        assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
+        assert_raises(ValueError, qr_update, q0, r, u0, v0)
+
+        u = u0.copy('F')
+        u[0,0] = np.nan
+        assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
+        assert_raises(ValueError, qr_update, q0, r0, u, v0)
+
+        v = v0.copy('F')
+        v[0,0] = np.nan
+        assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
+        assert_raises(ValueError, qr_update, q0, r0, u, v)
+
+    def test_economic_check_finite(self):
+        a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3)
+
+        q = q0.copy('F')
+        q[1,1] = np.nan
+        assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
+        assert_raises(ValueError, qr_update, q, r0, u0, v0)
+
+        r = r0.copy('F')
+        r[1,1] = np.nan
+        assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
+        assert_raises(ValueError, qr_update, q0, r, u0, v0)
+
+        u = u0.copy('F')
+        u[0,0] = np.nan
+        assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
+        assert_raises(ValueError, qr_update, q0, r0, u, v0)
+
+        v = v0.copy('F')
+        v[0,0] = np.nan
+        assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
+        assert_raises(ValueError, qr_update, q0, r0, u, v)
+
+    def test_u_exactly_in_span_q(self):
+        q = np.array([[0, 0], [0, 0], [1, 0], [0, 1]], self.dtype)
+        r = np.array([[1, 0], [0, 1]], self.dtype)
+        u = np.array([0, 0, 0, -1], self.dtype)
+        v = np.array([1, 2], self.dtype)
+        q1, r1 = qr_update(q, r, u, v)
+        a1 = np.dot(q, r) + np.outer(u, v.conj())
+        check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+class TestQRupdate_f(BaseQRupdate):
+    dtype = np.dtype('f')
+
+class TestQRupdate_F(BaseQRupdate):
+    dtype = np.dtype('F')
+
+class TestQRupdate_d(BaseQRupdate):
+    dtype = np.dtype('d')
+
+class TestQRupdate_D(BaseQRupdate):
+    dtype = np.dtype('D')
+
+def test_form_qTu():
+    # We want to ensure that all of the code paths through this function are
+    # tested. Most of them should be hit with the rest of test suite, but
+    # explicit tests make clear precisely what is being tested.
+    #
+    # This function expects that Q is either C or F contiguous and square.
+    # Economic mode decompositions (Q is (M, N), M != N) do not go through this
+    # function. U may have any positive strides.
+    #
+    # Some of these test are duplicates, since contiguous 1d arrays are both C
+    # and F.
+
+    q_order = ['F', 'C']
+    q_shape = [(8, 8), ]
+    u_order = ['F', 'C', 'A']  # here A means is not F not C
+    u_shape = [1, 3]
+    dtype = ['f', 'd', 'F', 'D']
+
+    for qo, qs, uo, us, d in \
+            itertools.product(q_order, q_shape, u_order, u_shape, dtype):
+        if us == 1:
+            check_form_qTu(qo, qs, uo, us, 1, d)
+            check_form_qTu(qo, qs, uo, us, 2, d)
+        else:
+            check_form_qTu(qo, qs, uo, us, 2, d)
+
+def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype):
+    np.random.seed(47)
+    if u_shape == 1 and u_ndim == 1:
+        u_shape = (q_shape[0],)
+    else:
+        u_shape = (q_shape[0], u_shape)
+    dtype = np.dtype(dtype)
+
+    if dtype.char in 'fd':
+        q = np.random.random(q_shape)
+        u = np.random.random(u_shape)
+    elif dtype.char in 'FD':
+        q = np.random.random(q_shape) + 1j*np.random.random(q_shape)
+        u = np.random.random(u_shape) + 1j*np.random.random(u_shape)
+    else:
+        ValueError("form_qTu doesn't support this dtype")
+
+    q = np.require(q, dtype, q_order)
+    if u_order != 'A':
+        u = np.require(u, dtype, u_order)
+    else:
+        u, = make_strided((u.astype(dtype),))
+
+    rtol = 10.0 ** -(np.finfo(dtype).precision-2)
+    atol = 2*np.finfo(dtype).eps
+
+    expected = np.dot(q.T.conj(), u)
+    res = _decomp_update._form_qTu(q, u)
+    assert_allclose(res, expected, rtol=rtol, atol=atol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_fblas.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_fblas.py
new file mode 100644
index 00000000..c2669392
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_fblas.py
@@ -0,0 +1,607 @@
+# Test interfaces to fortran blas.
+#
+# The tests are more of interface than they are of the underlying blas.
+# Only very small matrices checked -- N=3 or so.
+#
+# !! Complex calculations really aren't checked that carefully.
+# !! Only real valued complex numbers are used in tests.
+
+from numpy import float32, float64, complex64, complex128, arange, array, \
+                  zeros, shape, transpose, newaxis, common_type, conjugate
+
+from scipy.linalg import _fblas as fblas
+
+from numpy.testing import assert_array_equal, \
+    assert_allclose, assert_array_almost_equal, assert_
+
+import pytest
+
+# decimal accuracy to require between Python and LAPACK/BLAS calculations
+accuracy = 5
+
+# Since numpy.dot likely uses the same blas, use this routine
+# to check.
+
+
+def matrixmultiply(a, b):
+    if len(b.shape) == 1:
+        b_is_vector = True
+        b = b[:, newaxis]
+    else:
+        b_is_vector = False
+    assert_(a.shape[1] == b.shape[0])
+    c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
+    for i in range(a.shape[0]):
+        for j in range(b.shape[1]):
+            s = 0
+            for k in range(a.shape[1]):
+                s += a[i, k] * b[k, j]
+            c[i, j] = s
+    if b_is_vector:
+        c = c.reshape((a.shape[0],))
+    return c
+
+##################################################
+# Test blas ?axpy
+
+
+class BaseAxpy:
+    ''' Mixin class for axpy tests '''
+
+    def test_default_a(self):
+        x = arange(3., dtype=self.dtype)
+        y = arange(3., dtype=x.dtype)
+        real_y = x*1.+y
+        y = self.blas_func(x, y)
+        assert_array_equal(real_y, y)
+
+    def test_simple(self):
+        x = arange(3., dtype=self.dtype)
+        y = arange(3., dtype=x.dtype)
+        real_y = x*3.+y
+        y = self.blas_func(x, y, a=3.)
+        assert_array_equal(real_y, y)
+
+    def test_x_stride(self):
+        x = arange(6., dtype=self.dtype)
+        y = zeros(3, x.dtype)
+        y = arange(3., dtype=x.dtype)
+        real_y = x[::2]*3.+y
+        y = self.blas_func(x, y, a=3., n=3, incx=2)
+        assert_array_equal(real_y, y)
+
+    def test_y_stride(self):
+        x = arange(3., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        real_y = x*3.+y[::2]
+        y = self.blas_func(x, y, a=3., n=3, incy=2)
+        assert_array_equal(real_y, y[::2])
+
+    def test_x_and_y_stride(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        real_y = x[::4]*3.+y[::2]
+        y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2)
+        assert_array_equal(real_y, y[::2])
+
+    def test_x_bad_size(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        with pytest.raises(Exception, match='failed for 1st keyword'):
+            self.blas_func(x, y, n=4, incx=5)
+
+    def test_y_bad_size(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        with pytest.raises(Exception, match='failed for 1st keyword'):
+            self.blas_func(x, y, n=3, incy=5)
+
+
+try:
+    class TestSaxpy(BaseAxpy):
+        blas_func = fblas.saxpy
+        dtype = float32
+except AttributeError:
+    class TestSaxpy:
+        pass
+
+
+class TestDaxpy(BaseAxpy):
+    blas_func = fblas.daxpy
+    dtype = float64
+
+
+try:
+    class TestCaxpy(BaseAxpy):
+        blas_func = fblas.caxpy
+        dtype = complex64
+except AttributeError:
+    class TestCaxpy:
+        pass
+
+
+class TestZaxpy(BaseAxpy):
+    blas_func = fblas.zaxpy
+    dtype = complex128
+
+
+##################################################
+# Test blas ?scal
+
+class BaseScal:
+    ''' Mixin class for scal testing '''
+
+    def test_simple(self):
+        x = arange(3., dtype=self.dtype)
+        real_x = x*3.
+        x = self.blas_func(3., x)
+        assert_array_equal(real_x, x)
+
+    def test_x_stride(self):
+        x = arange(6., dtype=self.dtype)
+        real_x = x.copy()
+        real_x[::2] = x[::2]*array(3., self.dtype)
+        x = self.blas_func(3., x, n=3, incx=2)
+        assert_array_equal(real_x, x)
+
+    def test_x_bad_size(self):
+        x = arange(12., dtype=self.dtype)
+        with pytest.raises(Exception, match='failed for 1st keyword'):
+            self.blas_func(2., x, n=4, incx=5)
+
+
+try:
+    class TestSscal(BaseScal):
+        blas_func = fblas.sscal
+        dtype = float32
+except AttributeError:
+    class TestSscal:
+        pass
+
+
+class TestDscal(BaseScal):
+    blas_func = fblas.dscal
+    dtype = float64
+
+
+try:
+    class TestCscal(BaseScal):
+        blas_func = fblas.cscal
+        dtype = complex64
+except AttributeError:
+    class TestCscal:
+        pass
+
+
+class TestZscal(BaseScal):
+    blas_func = fblas.zscal
+    dtype = complex128
+
+
+##################################################
+# Test blas ?copy
+
+class BaseCopy:
+    ''' Mixin class for copy testing '''
+
+    def test_simple(self):
+        x = arange(3., dtype=self.dtype)
+        y = zeros(shape(x), x.dtype)
+        y = self.blas_func(x, y)
+        assert_array_equal(x, y)
+
+    def test_x_stride(self):
+        x = arange(6., dtype=self.dtype)
+        y = zeros(3, x.dtype)
+        y = self.blas_func(x, y, n=3, incx=2)
+        assert_array_equal(x[::2], y)
+
+    def test_y_stride(self):
+        x = arange(3., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        y = self.blas_func(x, y, n=3, incy=2)
+        assert_array_equal(x, y[::2])
+
+    def test_x_and_y_stride(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        y = self.blas_func(x, y, n=3, incx=4, incy=2)
+        assert_array_equal(x[::4], y[::2])
+
+    def test_x_bad_size(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        with pytest.raises(Exception, match='failed for 1st keyword'):
+            self.blas_func(x, y, n=4, incx=5)
+
+    def test_y_bad_size(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        with pytest.raises(Exception, match='failed for 1st keyword'):
+            self.blas_func(x, y, n=3, incy=5)
+
+    # def test_y_bad_type(self):
+    ##   Hmmm. Should this work?  What should be the output.
+    #    x = arange(3.,dtype=self.dtype)
+    #    y = zeros(shape(x))
+    #    self.blas_func(x,y)
+    #    assert_array_equal(x,y)
+
+
+try:
+    class TestScopy(BaseCopy):
+        blas_func = fblas.scopy
+        dtype = float32
+except AttributeError:
+    class TestScopy:
+        pass
+
+
+class TestDcopy(BaseCopy):
+    blas_func = fblas.dcopy
+    dtype = float64
+
+
+try:
+    class TestCcopy(BaseCopy):
+        blas_func = fblas.ccopy
+        dtype = complex64
+except AttributeError:
+    class TestCcopy:
+        pass
+
+
+class TestZcopy(BaseCopy):
+    blas_func = fblas.zcopy
+    dtype = complex128
+
+
+##################################################
+# Test blas ?swap
+
+class BaseSwap:
+    ''' Mixin class for swap tests '''
+
+    def test_simple(self):
+        x = arange(3., dtype=self.dtype)
+        y = zeros(shape(x), x.dtype)
+        desired_x = y.copy()
+        desired_y = x.copy()
+        x, y = self.blas_func(x, y)
+        assert_array_equal(desired_x, x)
+        assert_array_equal(desired_y, y)
+
+    def test_x_stride(self):
+        x = arange(6., dtype=self.dtype)
+        y = zeros(3, x.dtype)
+        desired_x = y.copy()
+        desired_y = x.copy()[::2]
+        x, y = self.blas_func(x, y, n=3, incx=2)
+        assert_array_equal(desired_x, x[::2])
+        assert_array_equal(desired_y, y)
+
+    def test_y_stride(self):
+        x = arange(3., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        desired_x = y.copy()[::2]
+        desired_y = x.copy()
+        x, y = self.blas_func(x, y, n=3, incy=2)
+        assert_array_equal(desired_x, x)
+        assert_array_equal(desired_y, y[::2])
+
+    def test_x_and_y_stride(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        desired_x = y.copy()[::2]
+        desired_y = x.copy()[::4]
+        x, y = self.blas_func(x, y, n=3, incx=4, incy=2)
+        assert_array_equal(desired_x, x[::4])
+        assert_array_equal(desired_y, y[::2])
+
+    def test_x_bad_size(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        with pytest.raises(Exception, match='failed for 1st keyword'):
+            self.blas_func(x, y, n=4, incx=5)
+
+    def test_y_bad_size(self):
+        x = arange(12., dtype=self.dtype)
+        y = zeros(6, x.dtype)
+        with pytest.raises(Exception, match='failed for 1st keyword'):
+            self.blas_func(x, y, n=3, incy=5)
+
+
+try:
+    class TestSswap(BaseSwap):
+        blas_func = fblas.sswap
+        dtype = float32
+except AttributeError:
+    class TestSswap:
+        pass
+
+
+class TestDswap(BaseSwap):
+    blas_func = fblas.dswap
+    dtype = float64
+
+
+try:
+    class TestCswap(BaseSwap):
+        blas_func = fblas.cswap
+        dtype = complex64
+except AttributeError:
+    class TestCswap:
+        pass
+
+
+class TestZswap(BaseSwap):
+    blas_func = fblas.zswap
+    dtype = complex128
+
+##################################################
+# Test blas ?gemv
+# This will be a mess to test all cases.
+
+
+class BaseGemv:
+    ''' Mixin class for gemv tests '''
+
+    def get_data(self, x_stride=1, y_stride=1):
+        mult = array(1, dtype=self.dtype)
+        if self.dtype in [complex64, complex128]:
+            mult = array(1+1j, dtype=self.dtype)
+        from numpy.random import normal, seed
+        seed(1234)
+        alpha = array(1., dtype=self.dtype) * mult
+        beta = array(1., dtype=self.dtype) * mult
+        a = normal(0., 1., (3, 3)).astype(self.dtype) * mult
+        x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult
+        y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult
+        return alpha, beta, a, x, y
+
+    def test_simple(self):
+        alpha, beta, a, x, y = self.get_data()
+        desired_y = alpha*matrixmultiply(a, x)+beta*y
+        y = self.blas_func(alpha, a, x, beta, y)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_default_beta_y(self):
+        alpha, beta, a, x, y = self.get_data()
+        desired_y = matrixmultiply(a, x)
+        y = self.blas_func(1, a, x)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_simple_transpose(self):
+        alpha, beta, a, x, y = self.get_data()
+        desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y
+        y = self.blas_func(alpha, a, x, beta, y, trans=1)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_simple_transpose_conj(self):
+        alpha, beta, a, x, y = self.get_data()
+        desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y
+        y = self.blas_func(alpha, a, x, beta, y, trans=2)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_x_stride(self):
+        alpha, beta, a, x, y = self.get_data(x_stride=2)
+        desired_y = alpha*matrixmultiply(a, x[::2])+beta*y
+        y = self.blas_func(alpha, a, x, beta, y, incx=2)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_x_stride_transpose(self):
+        alpha, beta, a, x, y = self.get_data(x_stride=2)
+        desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y
+        y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_x_stride_assert(self):
+        # What is the use of this test?
+        alpha, beta, a, x, y = self.get_data(x_stride=2)
+        with pytest.raises(Exception, match='failed for 3rd argument'):
+            y = self.blas_func(1, a, x, 1, y, trans=0, incx=3)
+        with pytest.raises(Exception, match='failed for 3rd argument'):
+            y = self.blas_func(1, a, x, 1, y, trans=1, incx=3)
+
+    def test_y_stride(self):
+        alpha, beta, a, x, y = self.get_data(y_stride=2)
+        desired_y = y.copy()
+        desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2]
+        y = self.blas_func(alpha, a, x, beta, y, incy=2)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_y_stride_transpose(self):
+        alpha, beta, a, x, y = self.get_data(y_stride=2)
+        desired_y = y.copy()
+        desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2]
+        y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2)
+        assert_array_almost_equal(desired_y, y)
+
+    def test_y_stride_assert(self):
+        # What is the use of this test?
+        alpha, beta, a, x, y = self.get_data(y_stride=2)
+        with pytest.raises(Exception, match='failed for 2nd keyword'):
+            y = self.blas_func(1, a, x, 1, y, trans=0, incy=3)
+        with pytest.raises(Exception, match='failed for 2nd keyword'):
+            y = self.blas_func(1, a, x, 1, y, trans=1, incy=3)
+
+
+try:
+    class TestSgemv(BaseGemv):
+        blas_func = fblas.sgemv
+        dtype = float32
+
+        def test_sgemv_on_osx(self):
+            from itertools import product
+            import sys
+            import numpy as np
+
+            if sys.platform != 'darwin':
+                return
+
+            def aligned_array(shape, align, dtype, order='C'):
+                # Make array shape `shape` with aligned at `align` bytes
+                d = dtype()
+                # Make array of correct size with `align` extra bytes
+                N = np.prod(shape)
+                tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
+                address = tmp.__array_interface__["data"][0]
+                # Find offset into array giving desired alignment
+                for offset in range(align):
+                    if (address + offset) % align == 0:
+                        break
+                tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
+                return tmp.reshape(shape, order=order)
+
+            def as_aligned(arr, align, dtype, order='C'):
+                # Copy `arr` into an aligned array with same shape
+                aligned = aligned_array(arr.shape, align, dtype, order)
+                aligned[:] = arr[:]
+                return aligned
+
+            def assert_dot_close(A, X, desired):
+                assert_allclose(self.blas_func(1.0, A, X), desired,
+                                rtol=1e-5, atol=1e-7)
+
+            testdata = product((15, 32), (10000,), (200, 89), ('C', 'F'))
+            for align, m, n, a_order in testdata:
+                A_d = np.random.rand(m, n)
+                X_d = np.random.rand(n)
+                desired = np.dot(A_d, X_d)
+                # Calculation with aligned single precision
+                A_f = as_aligned(A_d, align, np.float32, order=a_order)
+                X_f = as_aligned(X_d, align, np.float32, order=a_order)
+                assert_dot_close(A_f, X_f, desired)
+
+except AttributeError:
+    class TestSgemv:
+        pass
+
+
+class TestDgemv(BaseGemv):
+    blas_func = fblas.dgemv
+    dtype = float64
+
+
+try:
+    class TestCgemv(BaseGemv):
+        blas_func = fblas.cgemv
+        dtype = complex64
+except AttributeError:
+    class TestCgemv:
+        pass
+
+
+class TestZgemv(BaseGemv):
+    blas_func = fblas.zgemv
+    dtype = complex128
+
+
+"""
+##################################################
+### Test blas ?ger
+### This will be a mess to test all cases.
+
+class BaseGer:
+    def get_data(self,x_stride=1,y_stride=1):
+        from numpy.random import normal, seed
+        seed(1234)
+        alpha = array(1., dtype = self.dtype)
+        a = normal(0.,1.,(3,3)).astype(self.dtype)
+        x = arange(shape(a)[0]*x_stride,dtype=self.dtype)
+        y = arange(shape(a)[1]*y_stride,dtype=self.dtype)
+        return alpha,a,x,y
+    def test_simple(self):
+        alpha,a,x,y = self.get_data()
+        # tranpose takes care of Fortran vs. C(and Python) memory layout
+        desired_a = alpha*transpose(x[:,newaxis]*y) + a
+        self.blas_func(x,y,a)
+        assert_array_almost_equal(desired_a,a)
+    def test_x_stride(self):
+        alpha,a,x,y = self.get_data(x_stride=2)
+        desired_a = alpha*transpose(x[::2,newaxis]*y) + a
+        self.blas_func(x,y,a,incx=2)
+        assert_array_almost_equal(desired_a,a)
+    def test_x_stride_assert(self):
+        alpha,a,x,y = self.get_data(x_stride=2)
+        with pytest.raises(ValueError, match='foo'):
+            self.blas_func(x,y,a,incx=3)
+    def test_y_stride(self):
+        alpha,a,x,y = self.get_data(y_stride=2)
+        desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a
+        self.blas_func(x,y,a,incy=2)
+        assert_array_almost_equal(desired_a,a)
+
+    def test_y_stride_assert(self):
+        alpha,a,x,y = self.get_data(y_stride=2)
+        with pytest.raises(ValueError, match='foo'):
+            self.blas_func(a,x,y,incy=3)
+
+class TestSger(BaseGer):
+    blas_func = fblas.sger
+    dtype = float32
+class TestDger(BaseGer):
+    blas_func = fblas.dger
+    dtype = float64
+"""
+##################################################
+# Test blas ?gerc
+# This will be a mess to test all cases.
+
+"""
+class BaseGerComplex(BaseGer):
+    def get_data(self,x_stride=1,y_stride=1):
+        from numpy.random import normal, seed
+        seed(1234)
+        alpha = array(1+1j, dtype = self.dtype)
+        a = normal(0.,1.,(3,3)).astype(self.dtype)
+        a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype)
+        x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype)
+        x = x + x * array(1j, dtype = self.dtype)
+        y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype)
+        y = y + y * array(1j, dtype = self.dtype)
+        return alpha,a,x,y
+    def test_simple(self):
+        alpha,a,x,y = self.get_data()
+        # tranpose takes care of Fortran vs. C(and Python) memory layout
+        a = a * array(0.,dtype = self.dtype)
+        #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a
+        desired_a = alpha*transpose(x[:,newaxis]*y) + a
+        #self.blas_func(x,y,a,alpha = alpha)
+        fblas.cgeru(x,y,a,alpha = alpha)
+        assert_array_almost_equal(desired_a,a)
+
+    #def test_x_stride(self):
+    #    alpha,a,x,y = self.get_data(x_stride=2)
+    #    desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a
+    #    self.blas_func(x,y,a,incx=2)
+    #    assert_array_almost_equal(desired_a,a)
+    #def test_y_stride(self):
+    #    alpha,a,x,y = self.get_data(y_stride=2)
+    #    desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a
+    #    self.blas_func(x,y,a,incy=2)
+    #    assert_array_almost_equal(desired_a,a)
+
+class TestCgeru(BaseGerComplex):
+    blas_func = fblas.cgeru
+    dtype = complex64
+    def transform(self,x):
+        return x
+class TestZgeru(BaseGerComplex):
+    blas_func = fblas.zgeru
+    dtype = complex128
+    def transform(self,x):
+        return x
+
+class TestCgerc(BaseGerComplex):
+    blas_func = fblas.cgerc
+    dtype = complex64
+    def transform(self,x):
+        return conjugate(x)
+
+class TestZgerc(BaseGerComplex):
+    blas_func = fblas.zgerc
+    dtype = complex128
+    def transform(self,x):
+        return conjugate(x)
+"""
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_interpolative.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_interpolative.py
new file mode 100644
index 00000000..0b77dbc1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_interpolative.py
@@ -0,0 +1,241 @@
+#******************************************************************************
+#   Copyright (C) 2013 Kenneth L. Ho
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions are met:
+#
+#   Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer. Redistributions in binary
+#   form must reproduce the above copyright notice, this list of conditions and
+#   the following disclaimer in the documentation and/or other materials
+#   provided with the distribution.
+#
+#   None of the names of the copyright holders may be used to endorse or
+#   promote products derived from this software without specific prior written
+#   permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+#   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+#   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+#   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+#   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+#   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+#   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+#   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+#   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+#   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+#   POSSIBILITY OF SUCH DAMAGE.
+#******************************************************************************
+
+import scipy.linalg.interpolative as pymatrixid
+import numpy as np
+from scipy.linalg import hilbert, svdvals, norm
+from scipy.sparse.linalg import aslinearoperator
+from scipy.linalg.interpolative import interp_decomp
+
+from numpy.testing import (assert_, assert_allclose, assert_equal,
+                           assert_array_equal)
+import pytest
+from pytest import raises as assert_raises
+import sys
+_IS_32BIT = (sys.maxsize < 2**32)
+
+
+@pytest.fixture()
+def eps():
+    yield 1e-12
+
+
+@pytest.fixture(params=[np.float64, np.complex128])
+def A(request):
+    # construct Hilbert matrix
+    # set parameters
+    n = 300
+    yield hilbert(n).astype(request.param)
+
+
+@pytest.fixture()
+def L(A):
+    yield aslinearoperator(A)
+
+
+@pytest.fixture()
+def rank(A, eps):
+    S = np.linalg.svd(A, compute_uv=False)
+    try:
+        rank = np.nonzero(S < eps)[0][0]
+    except IndexError:
+        rank = A.shape[0]
+    return rank
+
+
+class TestInterpolativeDecomposition:
+
+    @pytest.mark.parametrize(
+        "rand,lin_op",
+        [(False, False), (True, False), (True, True)])
+    def test_real_id_fixed_precision(self, A, L, eps, rand, lin_op):
+        if _IS_32BIT and A.dtype == np.complex_ and rand:
+            pytest.xfail("bug in external fortran code")
+        # Test ID routines on a Hilbert matrix.
+        A_or_L = A if not lin_op else L
+
+        k, idx, proj = pymatrixid.interp_decomp(A_or_L, eps, rand=rand)
+        B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+        assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+    @pytest.mark.parametrize(
+        "rand,lin_op",
+        [(False, False), (True, False), (True, True)])
+    def test_real_id_fixed_rank(self, A, L, eps, rank, rand, lin_op):
+        if _IS_32BIT and A.dtype == np.complex_ and rand:
+            pytest.xfail("bug in external fortran code")
+        k = rank
+        A_or_L = A if not lin_op else L
+
+        idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
+        B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+        assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+    @pytest.mark.parametrize("rand,lin_op", [(False, False)])
+    def test_real_id_skel_and_interp_matrices(
+            self, A, L, eps, rank, rand, lin_op):
+        k = rank
+        A_or_L = A if not lin_op else L
+
+        idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
+        P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+        B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+        assert_allclose(B, A[:, idx[:k]], rtol=eps, atol=1e-08)
+        assert_allclose(B @ P, A, rtol=eps, atol=1e-08)
+
+    @pytest.mark.parametrize(
+        "rand,lin_op",
+        [(False, False), (True, False), (True, True)])
+    def test_svd_fixed_precison(self, A, L, eps, rand, lin_op):
+        if _IS_32BIT and A.dtype == np.complex_ and rand:
+            pytest.xfail("bug in external fortran code")
+        A_or_L = A if not lin_op else L
+
+        U, S, V = pymatrixid.svd(A_or_L, eps, rand=rand)
+        B = U * S @ V.T.conj()
+        assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+    @pytest.mark.parametrize(
+        "rand,lin_op",
+        [(False, False), (True, False), (True, True)])
+    def test_svd_fixed_rank(self, A, L, eps, rank, rand, lin_op):
+        if _IS_32BIT and A.dtype == np.complex_ and rand:
+            pytest.xfail("bug in external fortran code")
+        k = rank
+        A_or_L = A if not lin_op else L
+
+        U, S, V = pymatrixid.svd(A_or_L, k, rand=rand)
+        B = U * S @ V.T.conj()
+        assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+    def test_id_to_svd(self, A, eps, rank):
+        k = rank
+
+        idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
+        U, S, V = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj)
+        B = U * S @ V.T.conj()
+        assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+    def test_estimate_spectral_norm(self, A):
+        s = svdvals(A)
+        norm_2_est = pymatrixid.estimate_spectral_norm(A)
+        assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
+
+    def test_estimate_spectral_norm_diff(self, A):
+        B = A.copy()
+        B[:, 0] *= 1.2
+        s = svdvals(A - B)
+        norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B)
+        assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
+
+    def test_rank_estimates_array(self, A):
+        B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
+
+        for M in [A, B]:
+            rank_tol = 1e-9
+            rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
+            rank_est = pymatrixid.estimate_rank(M, rank_tol)
+            assert_(rank_est >= rank_np)
+            assert_(rank_est <= rank_np + 10)
+
+    def test_rank_estimates_lin_op(self, A):
+        B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
+
+        for M in [A, B]:
+            ML = aslinearoperator(M)
+            rank_tol = 1e-9
+            rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
+            rank_est = pymatrixid.estimate_rank(ML, rank_tol)
+            assert_(rank_est >= rank_np - 4)
+            assert_(rank_est <= rank_np + 4)
+
+    def test_rand(self):
+        pymatrixid.seed('default')
+        assert_allclose(pymatrixid.rand(2), [0.8932059, 0.64500803],
+                        rtol=1e-4, atol=1e-8)
+
+        pymatrixid.seed(1234)
+        x1 = pymatrixid.rand(2)
+        assert_allclose(x1, [0.7513823, 0.06861718], rtol=1e-4, atol=1e-8)
+
+        np.random.seed(1234)
+        pymatrixid.seed()
+        x2 = pymatrixid.rand(2)
+
+        np.random.seed(1234)
+        pymatrixid.seed(np.random.rand(55))
+        x3 = pymatrixid.rand(2)
+
+        assert_allclose(x1, x2)
+        assert_allclose(x1, x3)
+
+    def test_badcall(self):
+        A = hilbert(5).astype(np.float32)
+        with assert_raises(ValueError):
+            pymatrixid.interp_decomp(A, 1e-6, rand=False)
+
+    def test_rank_too_large(self):
+        # svd(array, k) should not segfault
+        a = np.ones((4, 3))
+        with assert_raises(ValueError):
+            pymatrixid.svd(a, 4)
+
+    def test_full_rank(self):
+        eps = 1.0e-12
+
+        # fixed precision
+        A = np.random.rand(16, 8)
+        k, idx, proj = pymatrixid.interp_decomp(A, eps)
+        assert_equal(k, A.shape[1])
+
+        P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+        B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+        assert_allclose(A, B @ P)
+
+        # fixed rank
+        idx, proj = pymatrixid.interp_decomp(A, k)
+
+        P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+        B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+        assert_allclose(A, B @ P)
+
+    @pytest.mark.parametrize("dtype", [np.float_, np.complex_])
+    @pytest.mark.parametrize("rand", [True, False])
+    @pytest.mark.parametrize("eps", [1, 0.1])
+    def test_bug_9793(self, dtype, rand, eps):
+        if _IS_32BIT and dtype == np.complex_ and rand:
+            pytest.xfail("bug in external fortran code")
+        A = np.array([[-1, -1, -1, 0, 0, 0],
+                      [0, 0, 0, 1, 1, 1],
+                      [1, 0, 0, 1, 0, 0],
+                      [0, 1, 0, 0, 1, 0],
+                      [0, 0, 1, 0, 0, 1]],
+                     dtype=dtype, order="C")
+        B = A.copy()
+        interp_decomp(A.T, eps, rand=rand)
+        assert_array_equal(A, B)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_lapack.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_lapack.py
new file mode 100644
index 00000000..179ae344
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_lapack.py
@@ -0,0 +1,3282 @@
+#
+# Created by: Pearu Peterson, September 2002
+#
+
+import sys
+from functools import reduce
+
+from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
+                           assert_allclose, assert_almost_equal,
+                           assert_array_equal)
+import pytest
+from pytest import raises as assert_raises
+
+import numpy as np
+from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
+                   triu_indices)
+
+from numpy.random import rand, randint, seed
+
+from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
+                          solve, ldl, norm, block_diag, qr, eigh)
+
+from scipy.linalg.lapack import _compute_lwork
+from scipy.stats import ortho_group, unitary_group
+
+
+import scipy.sparse as sps
+
+try:
+    from scipy.linalg import _clapack as clapack
+except ImportError:
+    clapack = None
+from scipy.linalg.lapack import get_lapack_funcs
+from scipy.linalg.blas import get_blas_funcs
+
+REAL_DTYPES = [np.float32, np.float64]
+COMPLEX_DTYPES = [np.complex64, np.complex128]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+
+def generate_random_dtype_array(shape, dtype):
+    # generates a random matrix of desired data type of shape
+    if dtype in COMPLEX_DTYPES:
+        return (np.random.rand(*shape)
+                + np.random.rand(*shape)*1.0j).astype(dtype)
+    return np.random.rand(*shape).astype(dtype)
+
+
+def test_lapack_documented():
+    """Test that all entries are in the doc."""
+    if lapack.__doc__ is None:  # just in case there is a python -OO
+        pytest.skip('lapack.__doc__ is None')
+    names = set(lapack.__doc__.split())
+    ignore_list = set([
+        'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
+        'flapack', 'print_function', 'HAS_ILP64',
+    ])
+    missing = list()
+    for name in dir(lapack):
+        if (not name.startswith('_') and name not in ignore_list and
+                name not in names):
+            missing.append(name)
+    assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
+
+
+class TestFlapackSimple:
+
+    def test_gebal(self):
+        a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+        a1 = [[1, 0, 0, 3e-4],
+              [4, 0, 0, 2e-3],
+              [7, 1, 0, 0],
+              [0, 1, 0, 0]]
+        for p in 'sdzc':
+            f = getattr(flapack, p+'gebal', None)
+            if f is None:
+                continue
+            ba, lo, hi, pivscale, info = f(a)
+            assert_(not info, repr(info))
+            assert_array_almost_equal(ba, a)
+            assert_equal((lo, hi), (0, len(a[0])-1))
+            assert_array_almost_equal(pivscale, np.ones(len(a)))
+
+            ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
+            assert_(not info, repr(info))
+            # print(a1)
+            # print(ba, lo, hi, pivscale)
+
+    def test_gehrd(self):
+        a = [[-149, -50, -154],
+             [537, 180, 546],
+             [-27, -9, -25]]
+        for p in 'd':
+            f = getattr(flapack, p+'gehrd', None)
+            if f is None:
+                continue
+            ht, tau, info = f(a)
+            assert_(not info, repr(info))
+
+    def test_trsyl(self):
+        a = np.array([[1, 2], [0, 4]])
+        b = np.array([[5, 6], [0, 8]])
+        c = np.array([[9, 10], [11, 12]])
+        trans = 'T'
+
+        # Test single and double implementations, including most
+        # of the options
+        for dtype in 'fdFD':
+            a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
+            trsyl, = get_lapack_funcs(('trsyl',), (a1,))
+            if dtype.isupper():  # is complex dtype
+                a1[0] += 1j
+                trans = 'C'
+
+            x, scale, info = trsyl(a1, b1, c1)
+            assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
+                                      scale * c1)
+
+            x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
+            assert_array_almost_equal(
+                    np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
+                    scale * c1, decimal=4)
+
+            x, scale, info = trsyl(a1, b1, c1, isgn=-1)
+            assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
+                                      scale * c1, decimal=4)
+
+    def test_lange(self):
+        a = np.array([
+            [-149, -50, -154],
+            [537, 180, 546],
+            [-27, -9, -25]])
+
+        for dtype in 'fdFD':
+            for norm_str in 'Mm1OoIiFfEe':
+                a1 = a.astype(dtype)
+                if dtype.isupper():
+                    # is complex dtype
+                    a1[0, 0] += 1j
+
+                lange, = get_lapack_funcs(('lange',), (a1,))
+                value = lange(norm_str, a1)
+
+                if norm_str in 'FfEe':
+                    if dtype in 'Ff':
+                        decimal = 3
+                    else:
+                        decimal = 7
+                    ref = np.sqrt(np.sum(np.square(np.abs(a1))))
+                    assert_almost_equal(value, ref, decimal)
+                else:
+                    if norm_str in 'Mm':
+                        ref = np.max(np.abs(a1))
+                    elif norm_str in '1Oo':
+                        ref = np.max(np.sum(np.abs(a1), axis=0))
+                    elif norm_str in 'Ii':
+                        ref = np.max(np.sum(np.abs(a1), axis=1))
+
+                    assert_equal(value, ref)
+
+
+class TestLapack:
+
+    def test_flapack(self):
+        if hasattr(flapack, 'empty_module'):
+            # flapack module is empty
+            pass
+
+    def test_clapack(self):
+        if hasattr(clapack, 'empty_module'):
+            # clapack module is empty
+            pass
+
+
+class TestLeastSquaresSolvers:
+
+    def test_gels(self):
+        seed(1234)
+        # Test fat/tall matrix argument handling - gh-issue #8329
+        for ind, dtype in enumerate(DTYPES):
+            m = 10
+            n = 20
+            nrhs = 1
+            a1 = rand(m, n).astype(dtype)
+            b1 = rand(n).astype(dtype)
+            gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
+
+            # Request of sizes
+            lwork = _compute_lwork(glslw, m, n, nrhs)
+            _, _, info = gls(a1, b1, lwork=lwork)
+            assert_(info >= 0)
+            _, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
+            assert_(info >= 0)
+
+        for dtype in REAL_DTYPES:
+            a1 = np.array([[1.0, 2.0],
+                           [4.0, 5.0],
+                           [7.0, 8.0]], dtype=dtype)
+            b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+            gels, gels_lwork, geqrf = get_lapack_funcs(
+                    ('gels', 'gels_lwork', 'geqrf'), (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            lwork = _compute_lwork(gels_lwork, m, n, nrhs)
+
+            lqr, x, info = gels(a1, b1, lwork=lwork)
+            assert_allclose(x[:-1], np.array([-14.333333333333323,
+                                              14.999999999999991],
+                                             dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+            lqr_truth, _, _, _ = geqrf(a1)
+            assert_array_equal(lqr, lqr_truth)
+
+        for dtype in COMPLEX_DTYPES:
+            a1 = np.array([[1.0+4.0j, 2.0],
+                           [4.0+0.5j, 5.0-3.0j],
+                           [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+            b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+            gels, gels_lwork, geqrf = get_lapack_funcs(
+                    ('gels', 'gels_lwork', 'geqrf'), (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            lwork = _compute_lwork(gels_lwork, m, n, nrhs)
+
+            lqr, x, info = gels(a1, b1, lwork=lwork)
+            assert_allclose(x[:-1],
+                            np.array([1.161753632288328-1.901075709391912j,
+                                      1.735882340522193+1.521240901196909j],
+                                     dtype=dtype), rtol=25*np.finfo(dtype).eps)
+            lqr_truth, _, _, _ = geqrf(a1)
+            assert_array_equal(lqr, lqr_truth)
+
+    def test_gelsd(self):
+        for dtype in REAL_DTYPES:
+            a1 = np.array([[1.0, 2.0],
+                           [4.0, 5.0],
+                           [7.0, 8.0]], dtype=dtype)
+            b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+            gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
+                                                  (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
+            lwork = int(np.real(work))
+            iwork_size = iwork
+
+            x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
+                                     -1, False, False)
+            assert_allclose(x[:-1], np.array([-14.333333333333323,
+                                              14.999999999999991],
+                                             dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+            assert_allclose(s, np.array([12.596017180511966,
+                                         0.583396253199685], dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+
+        for dtype in COMPLEX_DTYPES:
+            a1 = np.array([[1.0+4.0j, 2.0],
+                           [4.0+0.5j, 5.0-3.0j],
+                           [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+            b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+            gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
+                                                  (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
+            lwork = int(np.real(work))
+            rwork_size = int(rwork)
+            iwork_size = iwork
+
+            x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
+                                     -1, False, False)
+            assert_allclose(x[:-1],
+                            np.array([1.161753632288328-1.901075709391912j,
+                                      1.735882340522193+1.521240901196909j],
+                                     dtype=dtype), rtol=25*np.finfo(dtype).eps)
+            assert_allclose(s,
+                            np.array([13.035514762572043, 4.337666985231382],
+                                     dtype=dtype), rtol=25*np.finfo(dtype).eps)
+
+    def test_gelss(self):
+
+        for dtype in REAL_DTYPES:
+            a1 = np.array([[1.0, 2.0],
+                           [4.0, 5.0],
+                           [7.0, 8.0]], dtype=dtype)
+            b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+            gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
+                                                  (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            work, info = gelss_lwork(m, n, nrhs, -1)
+            lwork = int(np.real(work))
+
+            v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
+            assert_allclose(x[:-1], np.array([-14.333333333333323,
+                                              14.999999999999991],
+                                             dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+            assert_allclose(s, np.array([12.596017180511966,
+                                         0.583396253199685], dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+
+        for dtype in COMPLEX_DTYPES:
+            a1 = np.array([[1.0+4.0j, 2.0],
+                           [4.0+0.5j, 5.0-3.0j],
+                           [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+            b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+            gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
+                                                  (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            work, info = gelss_lwork(m, n, nrhs, -1)
+            lwork = int(np.real(work))
+
+            v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
+            assert_allclose(x[:-1],
+                            np.array([1.161753632288328-1.901075709391912j,
+                                      1.735882340522193+1.521240901196909j],
+                                     dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+            assert_allclose(s, np.array([13.035514762572043,
+                                         4.337666985231382], dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+
+    def test_gelsy(self):
+
+        for dtype in REAL_DTYPES:
+            a1 = np.array([[1.0, 2.0],
+                           [4.0, 5.0],
+                           [7.0, 8.0]], dtype=dtype)
+            b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+            gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
+                                                  (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
+            lwork = int(np.real(work))
+
+            jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+            v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
+                                        lwork, False, False)
+            assert_allclose(x[:-1], np.array([-14.333333333333323,
+                                              14.999999999999991],
+                                             dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+
+        for dtype in COMPLEX_DTYPES:
+            a1 = np.array([[1.0+4.0j, 2.0],
+                           [4.0+0.5j, 5.0-3.0j],
+                           [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+            b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+            gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
+                                                  (a1, b1))
+
+            m, n = a1.shape
+            if len(b1.shape) == 2:
+                nrhs = b1.shape[1]
+            else:
+                nrhs = 1
+
+            # Request of sizes
+            work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
+            lwork = int(np.real(work))
+
+            jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+            v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
+                                        lwork, False, False)
+            assert_allclose(x[:-1],
+                            np.array([1.161753632288328-1.901075709391912j,
+                                      1.735882340522193+1.521240901196909j],
+                                     dtype=dtype),
+                            rtol=25*np.finfo(dtype).eps)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
+def test_geqrf_lwork(dtype, shape):
+    geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
+    m, n = shape
+    lwork, info = geqrf_lwork(m=m, n=n)
+    assert_equal(info, 0)
+
+
+class TestRegression:
+
+    def test_ticket_1645(self):
+        # Check that RQ routines have correct lwork
+        for dtype in DTYPES:
+            a = np.zeros((300, 2), dtype=dtype)
+
+            gerqf, = get_lapack_funcs(['gerqf'], [a])
+            assert_raises(Exception, gerqf, a, lwork=2)
+            rq, tau, work, info = gerqf(a)
+
+            if dtype in REAL_DTYPES:
+                orgrq, = get_lapack_funcs(['orgrq'], [a])
+                assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
+                orgrq(rq[-2:], tau, lwork=2)
+            elif dtype in COMPLEX_DTYPES:
+                ungrq, = get_lapack_funcs(['ungrq'], [a])
+                assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
+                ungrq(rq[-2:], tau, lwork=2)
+
+
+class TestDpotr:
+    def test_gh_2691(self):
+        # 'lower' argument of dportf/dpotri
+        for lower in [True, False]:
+            for clean in [True, False]:
+                np.random.seed(42)
+                x = np.random.normal(size=(3, 3))
+                a = x.dot(x.T)
+
+                dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
+
+                c, info = dpotrf(a, lower, clean=clean)
+                dpt = dpotri(c, lower)[0]
+
+                if lower:
+                    assert_allclose(np.tril(dpt), np.tril(inv(a)))
+                else:
+                    assert_allclose(np.triu(dpt), np.triu(inv(a)))
+
+
+class TestDlasd4:
+    def test_sing_val_update(self):
+
+        sigmas = np.array([4., 3., 2., 0])
+        m_vec = np.array([3.12, 5.7, -4.8, -2.2])
+
+        M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
+                                  np.zeros((1, len(m_vec) - 1)))),
+                       m_vec[:, np.newaxis]))
+        SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
+                 check_finite=False)
+
+        it_len = len(sigmas)
+        sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
+        mvc = np.concatenate((m_vec[::-1], (0,)))
+
+        lasd4 = get_lapack_funcs('lasd4', (sigmas,))
+
+        roots = []
+        for i in range(0, it_len):
+            res = lasd4(i, sgm, mvc)
+            roots.append(res[1])
+
+            assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
+                                    the singular value %i" % i)
+        roots = np.array(roots)[::-1]
+
+        assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
+        assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
+                        rtol=100*np.finfo(np.float64).eps)
+
+
+class TestTbtrs:
+
+    @pytest.mark.parametrize('dtype', DTYPES)
+    def test_nag_example_f07vef_f07vsf(self, dtype):
+        """Test real (f07vef) and complex (f07vsf) examples from NAG
+
+        Examples available from:
+        * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
+        * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
+
+        """
+        if dtype in REAL_DTYPES:
+            ab = np.array([[-4.16, 4.78, 6.32, 0.16],
+                           [-2.25, 5.86, -4.82, 0]],
+                          dtype=dtype)
+            b = np.array([[-16.64, -4.16],
+                          [-13.78, -16.59],
+                          [13.10, -4.94],
+                          [-14.14, -9.96]],
+                         dtype=dtype)
+            x_out = np.array([[4, 1],
+                              [-1, -3],
+                              [3, 2],
+                              [2, -2]],
+                             dtype=dtype)
+        elif dtype in COMPLEX_DTYPES:
+            ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
+                           [-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
+                           [1.62+3.68j, -2.77-1.93j, 0, 0]],
+                          dtype=dtype)
+            b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
+                          [-15.57 - 23.41j, -57.97 + 8.14j],
+                          [-7.63 + 22.78j, 19.09 - 29.51j],
+                          [-14.74 - 2.40j, 19.17 + 21.33j]],
+                         dtype=dtype)
+            x_out = np.array([[2j, 1 + 5j],
+                              [1 - 3j, -7 - 2j],
+                              [-4.001887 - 4.988417j, 3.026830 + 4.003182j],
+                              [1.996158 - 1.045105j, -6.103357 - 8.986653j]],
+                             dtype=dtype)
+        else:
+            raise ValueError(f"Datatype {dtype} not understood.")
+
+        tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
+        x, info = tbtrs(ab=ab, b=b, uplo='L')
+        assert_equal(info, 0)
+        assert_allclose(x, x_out, rtol=0, atol=1e-5)
+
+    @pytest.mark.parametrize('dtype,trans',
+                             [(dtype, trans)
+                              for dtype in DTYPES for trans in ['N', 'T', 'C']
+                              if not (trans == 'C' and dtype in REAL_DTYPES)])
+    @pytest.mark.parametrize('uplo', ['U', 'L'])
+    @pytest.mark.parametrize('diag', ['N', 'U'])
+    def test_random_matrices(self, dtype, trans, uplo, diag):
+        seed(1724)
+        # n, nrhs, kd are used to specify A and b.
+        # A is of shape n x n with kd super/sub-diagonals
+        # b is of shape n x nrhs matrix
+        n, nrhs, kd = 4, 3, 2
+        tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
+
+        is_upper = (uplo == 'U')
+        ku = kd * is_upper
+        kl = kd - ku
+
+        # Construct the diagonal and kd super/sub diagonals of A with
+        # the corresponding offsets.
+        band_offsets = range(ku, -kl - 1, -1)
+        band_widths = [n - abs(x) for x in band_offsets]
+        bands = [generate_random_dtype_array((width,), dtype)
+                 for width in band_widths]
+
+        if diag == 'U':  # A must be unit triangular
+            bands[ku] = np.ones(n, dtype=dtype)
+
+        # Construct the diagonal banded matrix A from the bands and offsets.
+        a = sps.diags(bands, band_offsets, format='dia')
+
+        # Convert A into banded storage form
+        ab = np.zeros((kd + 1, n), dtype)
+        for row, k in enumerate(band_offsets):
+            ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
+
+        # The RHS values.
+        b = generate_random_dtype_array((n, nrhs), dtype)
+
+        x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
+        assert_equal(info, 0)
+
+        if trans == 'N':
+            assert_allclose(a @ x, b, rtol=5e-5)
+        elif trans == 'T':
+            assert_allclose(a.T @ x, b, rtol=5e-5)
+        elif trans == 'C':
+            assert_allclose(a.H @ x, b, rtol=5e-5)
+        else:
+            raise ValueError('Invalid trans argument')
+
+    @pytest.mark.parametrize('uplo,trans,diag',
+                             [['U', 'N', 'Invalid'],
+                              ['U', 'Invalid', 'N'],
+                              ['Invalid', 'N', 'N']])
+    def test_invalid_argument_raises_exception(self, uplo, trans, diag):
+        """Test if invalid values of uplo, trans and diag raise exceptions"""
+        # Argument checks occur independently of used datatype.
+        # This mean we must not parameterize all available datatypes.
+        tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
+        ab = rand(4, 2)
+        b = rand(2, 4)
+        assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
+
+    def test_zero_element_in_diagonal(self):
+        """Test if a matrix with a zero diagonal element is singular
+
+        If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
+        indicating the provided matrix is singular.
+
+        Note that ?tbtrs requires the matrix A to be stored in banded form.
+        In this form the diagonal corresponds to the last row."""
+        ab = np.ones((3, 4), dtype=float)
+        b = np.ones(4, dtype=float)
+        tbtrs = get_lapack_funcs('tbtrs', dtype=float)
+
+        ab[-1, 3] = 0
+        _, info = tbtrs(ab=ab, b=b, uplo='U')
+        assert_equal(info, 4)
+
+    @pytest.mark.parametrize('ldab,n,ldb,nrhs', [
+                              (5, 5, 0, 5),
+                              (5, 5, 3, 5)
+    ])
+    def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
+        """Test ?tbtrs fails correctly if shapes are invalid."""
+        ab = np.ones((ldab, n), dtype=float)
+        b = np.ones((ldb, nrhs), dtype=float)
+        tbtrs = get_lapack_funcs('tbtrs', dtype=float)
+        assert_raises(Exception, tbtrs, ab, b)
+
+
+def test_lartg():
+    for dtype in 'fdFD':
+        lartg = get_lapack_funcs('lartg', dtype=dtype)
+
+        f = np.array(3, dtype)
+        g = np.array(4, dtype)
+
+        if np.iscomplexobj(g):
+            g *= 1j
+
+        cs, sn, r = lartg(f, g)
+
+        assert_allclose(cs, 3.0/5.0)
+        assert_allclose(r, 5.0)
+
+        if np.iscomplexobj(g):
+            assert_allclose(sn, -4.0j/5.0)
+            assert_(type(r) == complex)
+            assert_(type(cs) == float)
+        else:
+            assert_allclose(sn, 4.0/5.0)
+
+
+def test_rot():
+    # srot, drot from blas and crot and zrot from lapack.
+
+    for dtype in 'fdFD':
+        c = 0.6
+        s = 0.8
+
+        u = np.full(4, 3, dtype)
+        v = np.full(4, 4, dtype)
+        atol = 10**-(np.finfo(dtype).precision-1)
+
+        if dtype in 'fd':
+            rot = get_blas_funcs('rot', dtype=dtype)
+            f = 4
+        else:
+            rot = get_lapack_funcs('rot', dtype=dtype)
+            s *= -1j
+            v *= 1j
+            f = 4j
+
+        assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
+                                          [0, 0, 0, 0]], atol=atol)
+        assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
+                                               [0, 0, f, f]], atol=atol)
+        assert_allclose(rot(u, v, c, s, offx=2, offy=2),
+                        [[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
+        assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
+                        [[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
+        assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
+                        [[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
+        assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
+                        [[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
+        assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
+                        [[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
+
+        a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
+        assert_(a is u)
+        assert_(b is v)
+        assert_allclose(a, [5, 5, 5, 5], atol=atol)
+        assert_allclose(b, [0, 0, 0, 0], atol=atol)
+
+
+def test_larfg_larf():
+    np.random.seed(1234)
+    a0 = np.random.random((4, 4))
+    a0 = a0.T.dot(a0)
+
+    a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
+    a0j = a0j.T.conj().dot(a0j)
+
+    # our test here will be to do one step of reducing a hermetian matrix to
+    # tridiagonal form using householder transforms.
+
+    for dtype in 'fdFD':
+        larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
+
+        if dtype in 'FD':
+            a = a0j.copy()
+        else:
+            a = a0.copy()
+
+        # generate a householder transform to clear a[2:,0]
+        alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
+
+        # create expected output
+        expected = np.zeros_like(a[:, 0])
+        expected[0] = a[0, 0]
+        expected[1] = alpha
+
+        # assemble householder vector
+        v = np.zeros_like(a[1:, 0])
+        v[0] = 1.0
+        v[1:] = x
+
+        # apply transform from the left
+        a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
+
+        # apply transform from the right
+        a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
+
+        assert_allclose(a[:, 0], expected, atol=1e-5)
+        assert_allclose(a[0, :], expected, atol=1e-5)
+
+
+def test_sgesdd_lwork_bug_workaround():
+    # Test that SGESDD lwork is sufficiently large for LAPACK.
+    #
+    # This checks that _compute_lwork() correctly works around a bug in
+    # LAPACK versions older than 3.10.1.
+
+    sgesdd_lwork = get_lapack_funcs('gesdd_lwork', dtype=np.float32,
+                                    ilp64='preferred')
+    n = 9537
+    lwork = _compute_lwork(sgesdd_lwork, n, n,
+                           compute_uv=True, full_matrices=True)
+    # If we called the Fortran function SGESDD directly with IWORK=-1, the
+    # LAPACK bug would result in lwork being 272929856, which was too small.
+    # (The result was returned in a single precision float, which does not
+    # have sufficient precision to represent the exact integer value that it
+    # computed internally.)  The work-around implemented in _compute_lwork()
+    # will convert that to 272929888.  If we are using LAPACK 3.10.1 or later
+    # (such as in OpenBLAS 0.3.21 or later), the work-around will return
+    # 272929920, because it does not know which version of LAPACK is being
+    # used, so it always applies the correction to whatever it is given.  We
+    # will accept either 272929888 or 272929920.
+    # Note that the acceptable values are a LAPACK implementation detail.
+    # If a future version of LAPACK changes how SGESDD works, and therefore
+    # changes the required LWORK size, the acceptable values might have to
+    # be updated.
+    assert lwork == 272929888 or lwork == 272929920
+
+
+class TestSytrd:
+    @pytest.mark.parametrize('dtype', REAL_DTYPES)
+    def test_sytrd_with_zero_dim_array(self, dtype):
+        # Assert that a 0x0 matrix raises an error
+        A = np.zeros((0, 0), dtype=dtype)
+        sytrd = get_lapack_funcs('sytrd', (A,))
+        assert_raises(ValueError, sytrd, A)
+
+    @pytest.mark.parametrize('dtype', REAL_DTYPES)
+    @pytest.mark.parametrize('n', (1, 3))
+    def test_sytrd(self, dtype, n):
+        A = np.zeros((n, n), dtype=dtype)
+
+        sytrd, sytrd_lwork = \
+            get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
+
+        # some upper triangular array
+        A[np.triu_indices_from(A)] = \
+            np.arange(1, n*(n+1)//2+1, dtype=dtype)
+
+        # query lwork
+        lwork, info = sytrd_lwork(n)
+        assert_equal(info, 0)
+
+        # check lower=1 behavior (shouldn't do much since the matrix is
+        # upper triangular)
+        data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
+        assert_equal(info, 0)
+
+        assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
+        assert_allclose(d, np.diag(A))
+        assert_allclose(e, 0.0)
+        assert_allclose(tau, 0.0)
+
+        # and now for the proper test (lower=0 is the default)
+        data, d, e, tau, info = sytrd(A, lwork=lwork)
+        assert_equal(info, 0)
+
+        # assert Q^T*A*Q = tridiag(e, d, e)
+
+        # build tridiagonal matrix
+        T = np.zeros_like(A, dtype=dtype)
+        k = np.arange(A.shape[0])
+        T[k, k] = d
+        k2 = np.arange(A.shape[0]-1)
+        T[k2+1, k2] = e
+        T[k2, k2+1] = e
+
+        # build Q
+        Q = np.eye(n, n, dtype=dtype)
+        for i in range(n-1):
+            v = np.zeros(n, dtype=dtype)
+            v[:i] = data[:i, i+1]
+            v[i] = 1.0
+            H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
+            Q = np.dot(H, Q)
+
+        # Make matrix fully symmetric
+        i_lower = np.tril_indices(n, -1)
+        A[i_lower] = A.T[i_lower]
+
+        QTAQ = np.dot(Q.T, np.dot(A, Q))
+
+        # disable rtol here since some values in QTAQ and T are very close
+        # to 0.
+        assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
+
+
+class TestHetrd:
+    @pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
+    def test_hetrd_with_zero_dim_array(self, complex_dtype):
+        # Assert that a 0x0 matrix raises an error
+        A = np.zeros((0, 0), dtype=complex_dtype)
+        hetrd = get_lapack_funcs('hetrd', (A,))
+        assert_raises(ValueError, hetrd, A)
+
+    @pytest.mark.parametrize('real_dtype,complex_dtype',
+                             zip(REAL_DTYPES, COMPLEX_DTYPES))
+    @pytest.mark.parametrize('n', (1, 3))
+    def test_hetrd(self, n, real_dtype, complex_dtype):
+        A = np.zeros((n, n), dtype=complex_dtype)
+        hetrd, hetrd_lwork = \
+            get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
+
+        # some upper triangular array
+        A[np.triu_indices_from(A)] = (
+            np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+            + 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+            )
+        np.fill_diagonal(A, np.real(np.diag(A)))
+
+        # test query lwork
+        for x in [0, 1]:
+            _, info = hetrd_lwork(n, lower=x)
+            assert_equal(info, 0)
+        # lwork returns complex which segfaults hetrd call (gh-10388)
+        # use the safe and recommended option
+        lwork = _compute_lwork(hetrd_lwork, n)
+
+        # check lower=1 behavior (shouldn't do much since the matrix is
+        # upper triangular)
+        data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
+        assert_equal(info, 0)
+
+        assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
+
+        assert_allclose(d, np.real(np.diag(A)))
+        assert_allclose(e, 0.0)
+        assert_allclose(tau, 0.0)
+
+        # and now for the proper test (lower=0 is the default)
+        data, d, e, tau, info = hetrd(A, lwork=lwork)
+        assert_equal(info, 0)
+
+        # assert Q^T*A*Q = tridiag(e, d, e)
+
+        # build tridiagonal matrix
+        T = np.zeros_like(A, dtype=real_dtype)
+        k = np.arange(A.shape[0], dtype=int)
+        T[k, k] = d
+        k2 = np.arange(A.shape[0]-1, dtype=int)
+        T[k2+1, k2] = e
+        T[k2, k2+1] = e
+
+        # build Q
+        Q = np.eye(n, n, dtype=complex_dtype)
+        for i in range(n-1):
+            v = np.zeros(n, dtype=complex_dtype)
+            v[:i] = data[:i, i+1]
+            v[i] = 1.0
+            H = np.eye(n, n, dtype=complex_dtype) \
+                - tau[i] * np.outer(v, np.conj(v))
+            Q = np.dot(H, Q)
+
+        # Make matrix fully Hermitian
+        i_lower = np.tril_indices(n, -1)
+        A[i_lower] = np.conj(A.T[i_lower])
+
+        QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
+
+        # disable rtol here since some values in QTAQ and T are very close
+        # to 0.
+        assert_allclose(
+            QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
+            )
+
+
+def test_gglse():
+    # Example data taken from NAG manual
+    for ind, dtype in enumerate(DTYPES):
+        # DTYPES =  gglse
+        func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
+                                            dtype=dtype)
+        lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
+        # For gglse
+        if ind < 2:
+            a = np.array([[-0.57, -1.28, -0.39, 0.25],
+                          [-1.93, 1.08, -0.31, -2.14],
+                          [2.30, 0.24, 0.40, -0.35],
+                          [-1.93, 0.64, -0.66, 0.08],
+                          [0.15, 0.30, 0.15, -2.13],
+                          [-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
+            c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
+            d = np.array([0., 0.], dtype=dtype)
+        # For gglse
+        else:
+            a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
+                          [-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
+                          [0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
+                          [0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
+                          [0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
+                          [1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
+            c = np.array([[-2.54+0.09j],
+                          [1.65-2.26j],
+                          [-2.11-3.96j],
+                          [1.82+3.30j],
+                          [-6.41+3.77j],
+                          [2.07+0.66j]])
+            d = np.zeros(2, dtype=dtype)
+
+        b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
+
+        _, _, _, result, _ = func(a, b, c, d, lwork=lwork)
+        if ind < 2:
+            expected = np.array([0.48904455,
+                                 0.99754786,
+                                 0.48904455,
+                                 0.99754786])
+        else:
+            expected = np.array([1.08742917-1.96205783j,
+                                 -0.74093902+3.72973919j,
+                                 1.08742917-1.96205759j,
+                                 -0.74093896+3.72973895j])
+        assert_array_almost_equal(result, expected, decimal=4)
+
+
+def test_sycon_hecon():
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
+        # DTYPES + COMPLEX DTYPES =  sycon + hecon
+        n = 10
+        # For sycon
+        if ind < 4:
+            func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
+            funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
+            A = (rand(n, n)).astype(dtype)
+        # For hecon
+        else:
+            func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
+            funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
+            A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+
+        # Since sycon only refers to upper/lower part, conj() is safe here.
+        A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
+
+        anorm = norm(A, 1)
+        lwork = _compute_lwork(func_lwork, n)
+        ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
+        rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
+        # The error is at most 1-fold
+        assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
+
+
+def test_sygst():
+    seed(1234)
+    for ind, dtype in enumerate(REAL_DTYPES):
+        # DTYPES =  sygst
+        n = 10
+
+        potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
+                                                       'syevd', 'sygvd'),
+                                                      dtype=dtype)
+
+        A = rand(n, n).astype(dtype)
+        A = (A + A.T)/2
+        # B must be positive definite
+        B = rand(n, n).astype(dtype)
+        B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
+
+        # Perform eig (sygvd)
+        eig_gvd, _, info = sygvd(A, B)
+        assert_(info == 0)
+
+        # Convert to std problem potrf
+        b, info = potrf(B)
+        assert_(info == 0)
+        a, info = sygst(A, b)
+        assert_(info == 0)
+
+        eig, _, info = syevd(a)
+        assert_(info == 0)
+        assert_allclose(eig, eig_gvd, rtol=1e-4)
+
+
+def test_hegst():
+    seed(1234)
+    for ind, dtype in enumerate(COMPLEX_DTYPES):
+        # DTYPES =  hegst
+        n = 10
+
+        potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
+                                                       'heevd', 'hegvd'),
+                                                      dtype=dtype)
+
+        A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
+        A = (A + A.conj().T)/2
+        # B must be positive definite
+        B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
+        B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
+
+        # Perform eig (hegvd)
+        eig_gvd, _, info = hegvd(A, B)
+        assert_(info == 0)
+
+        # Convert to std problem potrf
+        b, info = potrf(B)
+        assert_(info == 0)
+        a, info = hegst(A, b)
+        assert_(info == 0)
+
+        eig, _, info = heevd(a)
+        assert_(info == 0)
+        assert_allclose(eig, eig_gvd, rtol=1e-4)
+
+
+def test_tzrzf():
+    """
+    This test performs an RZ decomposition in which an m x n upper trapezoidal
+    array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
+    and Z is unitary.
+    """
+    seed(1234)
+    m, n = 10, 15
+    for ind, dtype in enumerate(DTYPES):
+        tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
+                                           dtype=dtype)
+        lwork = _compute_lwork(tzrzf_lw, m, n)
+
+        if ind < 2:
+            A = triu(rand(m, n).astype(dtype))
+        else:
+            A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
+
+        # assert wrong shape arg, f2py returns generic error
+        assert_raises(Exception, tzrzf, A.T)
+        rz, tau, info = tzrzf(A, lwork=lwork)
+        # Check success
+        assert_(info == 0)
+
+        # Get Z manually for comparison
+        R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
+        V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
+        Id = np.eye(n, dtype=dtype)
+        ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
+        Z = reduce(np.dot, ref)
+        assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
+                        atol=10*np.spacing(dtype(1.0).real), rtol=0.)
+
+
+def test_tfsm():
+    """
+    Test for solving a linear system with the coefficient matrix is a
+    triangular array stored in Full Packed (RFP) format.
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 20
+        if ind > 1:
+            A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
+            trans = 'C'
+        else:
+            A = triu(rand(n, n) + eye(n)).astype(dtype)
+            trans = 'T'
+
+        trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
+                                              dtype=dtype)
+
+        Afp, _ = trttf(A)
+        B = rand(n, 2).astype(dtype)
+        soln = tfsm(-1, Afp, B)
+        assert_array_almost_equal(soln, solve(-A, B),
+                                  decimal=4 if ind % 2 == 0 else 6)
+
+        soln = tfsm(-1, Afp, B, trans=trans)
+        assert_array_almost_equal(soln, solve(-A.conj().T, B),
+                                  decimal=4 if ind % 2 == 0 else 6)
+
+        # Make A, unit diagonal
+        A[np.arange(n), np.arange(n)] = dtype(1.)
+        soln = tfsm(-1, Afp, B, trans=trans, diag='U')
+        assert_array_almost_equal(soln, solve(-A.conj().T, B),
+                                  decimal=4 if ind % 2 == 0 else 6)
+
+        # Change side
+        B2 = rand(3, n).astype(dtype)
+        soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
+        assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
+                                  decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_ormrz_unmrz():
+    """
+    This test performs a matrix multiplication with an arbitrary m x n matric C
+    and a unitary matrix Q without explicitly forming the array. The array data
+    is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
+    size is inferred by m, n, side keywords.
+    """
+    seed(1234)
+    qm, qn, cn = 10, 15, 15
+    for ind, dtype in enumerate(DTYPES):
+        tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
+                                           dtype=dtype)
+        lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
+
+        if ind < 2:
+            A = triu(rand(qm, qn).astype(dtype))
+            C = rand(cn, cn).astype(dtype)
+            orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
+                                                     dtype=dtype)
+        else:
+            A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
+            C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
+            orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
+                                                     dtype=dtype)
+
+        lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
+        rz, tau, info = tzrzf(A, lwork=lwork_rz)
+
+        # Get Q manually for comparison
+        V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
+        Id = np.eye(qn, dtype=dtype)
+        ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
+        Q = reduce(np.dot, ref)
+
+        # Now that we have Q, we can test whether lapack results agree with
+        # each case of CQ, CQ^H, QC, and QC^H
+        trans = 'T' if ind < 2 else 'C'
+        tol = 10*np.spacing(dtype(1.0).real)
+
+        cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
+        assert_(info == 0)
+        assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
+
+        cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
+        assert_(info == 0)
+        assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
+                        rtol=0.)
+
+        cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
+        assert_(info == 0)
+        assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
+
+        cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
+        assert_(info == 0)
+        assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
+                        rtol=0.)
+
+
+def test_tfttr_trttf():
+    """
+    Test conversion routines between the Rectengular Full Packed (RFP) format
+    and Standard Triangular Array (TR)
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 20
+        if ind > 1:
+            A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+            transr = 'C'
+        else:
+            A_full = (rand(n, n)).astype(dtype)
+            transr = 'T'
+
+        trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
+        A_tf_U, info = trttf(A_full)
+        assert_(info == 0)
+        A_tf_L, info = trttf(A_full, uplo='L')
+        assert_(info == 0)
+        A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
+        assert_(info == 0)
+        A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
+        assert_(info == 0)
+
+        # Create the RFP array manually (n is even!)
+        A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
+        A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
+        A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
+
+        A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
+        A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
+        A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
+
+        assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
+        assert_array_almost_equal(A_tf_U_T,
+                                  A_tf_U_m.conj().T.reshape(-1, order='F'))
+
+        assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
+        assert_array_almost_equal(A_tf_L_T,
+                                  A_tf_L_m.conj().T.reshape(-1, order='F'))
+
+        # Get the original array from RFP
+        A_tr_U, info = tfttr(n, A_tf_U)
+        assert_(info == 0)
+        A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
+        assert_(info == 0)
+        A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
+        assert_(info == 0)
+        A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
+        assert_(info == 0)
+
+        assert_array_almost_equal(A_tr_U, triu(A_full))
+        assert_array_almost_equal(A_tr_U_T, triu(A_full))
+        assert_array_almost_equal(A_tr_L, tril(A_full))
+        assert_array_almost_equal(A_tr_L_T, tril(A_full))
+
+
+def test_tpttr_trttp():
+    """
+    Test conversion routines between the Rectengular Full Packed (RFP) format
+    and Standard Triangular Array (TR)
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 20
+        if ind > 1:
+            A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+        else:
+            A_full = (rand(n, n)).astype(dtype)
+
+        trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
+        A_tp_U, info = trttp(A_full)
+        assert_(info == 0)
+        A_tp_L, info = trttp(A_full, uplo='L')
+        assert_(info == 0)
+
+        # Create the TP array manually
+        inds = tril_indices(n)
+        A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
+        A_tp_U_m[:] = (triu(A_full).T)[inds]
+
+        inds = triu_indices(n)
+        A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
+        A_tp_L_m[:] = (tril(A_full).T)[inds]
+
+        assert_array_almost_equal(A_tp_U, A_tp_U_m)
+        assert_array_almost_equal(A_tp_L, A_tp_L_m)
+
+        # Get the original array from TP
+        A_tr_U, info = tpttr(n, A_tp_U)
+        assert_(info == 0)
+        A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
+        assert_(info == 0)
+
+        assert_array_almost_equal(A_tr_U, triu(A_full))
+        assert_array_almost_equal(A_tr_L, tril(A_full))
+
+
+def test_pftrf():
+    """
+    Test Cholesky factorization of a positive definite Rectengular Full
+    Packed (RFP) format array
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 20
+        if ind > 1:
+            A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+            A = A + A.conj().T + n*eye(n)
+        else:
+            A = (rand(n, n)).astype(dtype)
+            A = A + A.T + n*eye(n)
+
+        pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
+                                               dtype=dtype)
+
+        # Get the original array from TP
+        Afp, info = trttf(A)
+        Achol_rfp, info = pftrf(n, Afp)
+        assert_(info == 0)
+        A_chol_r, _ = tfttr(n, Achol_rfp)
+        Achol = cholesky(A)
+        assert_array_almost_equal(A_chol_r, Achol)
+
+
+def test_pftri():
+    """
+    Test Cholesky factorization of a positive definite Rectengular Full
+    Packed (RFP) format array to find its inverse
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 20
+        if ind > 1:
+            A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+            A = A + A.conj().T + n*eye(n)
+        else:
+            A = (rand(n, n)).astype(dtype)
+            A = A + A.T + n*eye(n)
+
+        pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
+                                                       'pftrf',
+                                                       'trttf',
+                                                       'tfttr'),
+                                                      dtype=dtype)
+
+        # Get the original array from TP
+        Afp, info = trttf(A)
+        A_chol_rfp, info = pftrf(n, Afp)
+        A_inv_rfp, info = pftri(n, A_chol_rfp)
+        assert_(info == 0)
+        A_inv_r, _ = tfttr(n, A_inv_rfp)
+        Ainv = inv(A)
+        assert_array_almost_equal(A_inv_r, triu(Ainv),
+                                  decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_pftrs():
+    """
+    Test Cholesky factorization of a positive definite Rectengular Full
+    Packed (RFP) format array and solve a linear system
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 20
+        if ind > 1:
+            A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+            A = A + A.conj().T + n*eye(n)
+        else:
+            A = (rand(n, n)).astype(dtype)
+            A = A + A.T + n*eye(n)
+
+        B = ones((n, 3), dtype=dtype)
+        Bf1 = ones((n+2, 3), dtype=dtype)
+        Bf2 = ones((n-2, 3), dtype=dtype)
+        pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
+                                                       'pftrf',
+                                                       'trttf',
+                                                       'tfttr'),
+                                                      dtype=dtype)
+
+        # Get the original array from TP
+        Afp, info = trttf(A)
+        A_chol_rfp, info = pftrf(n, Afp)
+        # larger B arrays shouldn't segfault
+        soln, info = pftrs(n, A_chol_rfp, Bf1)
+        assert_(info == 0)
+        assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
+        soln, info = pftrs(n, A_chol_rfp, B)
+        assert_(info == 0)
+        assert_array_almost_equal(solve(A, B), soln,
+                                  decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_sfrk_hfrk():
+    """
+    Test for performing a symmetric rank-k operation for matrix in RFP format.
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 20
+        if ind > 1:
+            A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+            A = A + A.conj().T + n*eye(n)
+        else:
+            A = (rand(n, n)).astype(dtype)
+            A = A + A.T + n*eye(n)
+
+        prefix = 's'if ind < 2 else 'h'
+        trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk'
+                                                ''.format(prefix)),
+                                               dtype=dtype)
+
+        Afp, _ = trttf(A)
+        C = np.random.rand(n, 2).astype(dtype)
+        Afp_out = shfrk(n, 2, -1, C, 2, Afp)
+        A_out, _ = tfttr(n, Afp_out)
+        assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
+                                  decimal=4 if ind % 2 == 0 else 6)
+
+
+def test_syconv():
+    """
+    Test for going back and forth between the returned format of he/sytrf to
+    L and D factors/permutations.
+    """
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        n = 10
+
+        if ind > 1:
+            A = (randint(-30, 30, (n, n)) +
+                 randint(-30, 30, (n, n))*1j).astype(dtype)
+
+            A = A + A.conj().T
+        else:
+            A = randint(-30, 30, (n, n)).astype(dtype)
+            A = A + A.T + n*eye(n)
+
+        tol = 100*np.spacing(dtype(1.0).real)
+        syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
+                                                   'sytrf_lwork'), dtype=dtype)
+        lw = _compute_lwork(trf_lwork, n, lower=1)
+        L, D, perm = ldl(A, lower=1, hermitian=False)
+        lw = _compute_lwork(trf_lwork, n, lower=1)
+        ldu, ipiv, info = trf(A, lower=1, lwork=lw)
+        a, e, info = syconv(ldu, ipiv, lower=1)
+        assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
+
+        # Test also upper
+        U, D, perm = ldl(A, lower=0, hermitian=False)
+        ldu, ipiv, info = trf(A, lower=0)
+        a, e, info = syconv(ldu, ipiv, lower=0)
+        assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
+
+
+class TestBlockedQR:
+    """
+    Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt
+    and tpmqr.
+    """
+
+    def test_geqrt_gemqrt(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 20
+
+            if ind > 1:
+                A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+            else:
+                A = (rand(n, n)).astype(dtype)
+
+            tol = 100*np.spacing(dtype(1.0).real)
+            geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype)
+
+            a, t, info = geqrt(n, A)
+            assert info == 0
+
+            # Extract elementary reflectors from lower triangle, adding the
+            # main diagonal of ones.
+            v = np.tril(a, -1) + np.eye(n, dtype=dtype)
+            # Generate the block Householder transform I - VTV^H
+            Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj()
+            R = np.triu(a)
+
+            # Test columns of Q are orthogonal
+            assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol,
+                            rtol=0.)
+            assert_allclose(Q @ R, A, atol=tol, rtol=0.)
+
+            if ind > 1:
+                C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+                transpose = 'C'
+            else:
+                C = (rand(n, n)).astype(dtype)
+                transpose = 'T'
+
+            for side in ('L', 'R'):
+                for trans in ('N', transpose):
+                    c, info = gemqrt(a, t, C, side=side, trans=trans)
+                    assert info == 0
+
+                    if trans == transpose:
+                        q = Q.T.conj()
+                    else:
+                        q = Q
+
+                    if side == 'L':
+                        qC = q @ C
+                    else:
+                        qC = C @ q
+
+                    assert_allclose(c, qC, atol=tol, rtol=0.)
+
+                    # Test default arguments
+                    if (side, trans) == ('L', 'N'):
+                        c_default, info = gemqrt(a, t, C)
+                        assert info == 0
+                        assert_equal(c_default, c)
+
+            # Test invalid side/trans
+            assert_raises(Exception, gemqrt, a, t, C, side='A')
+            assert_raises(Exception, gemqrt, a, t, C, trans='A')
+
+    def test_tpqrt_tpmqrt(self):
+        seed(1234)
+        for ind, dtype in enumerate(DTYPES):
+            n = 20
+
+            if ind > 1:
+                A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+                B = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+            else:
+                A = (rand(n, n)).astype(dtype)
+                B = (rand(n, n)).astype(dtype)
+
+            tol = 100*np.spacing(dtype(1.0).real)
+            tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype)
+
+            # Test for the range of pentagonal B, from square to upper
+            # triangular
+            for l in (0, n // 2, n):
+                a, b, t, info = tpqrt(l, n, A, B)
+                assert info == 0
+
+                # Check that lower triangular part of A has not been modified
+                assert_equal(np.tril(a, -1), np.tril(A, -1))
+                # Check that elements not part of the pentagonal portion of B
+                # have not been modified.
+                assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1))
+
+                # Extract pentagonal portion of B
+                B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n)
+
+                # Generate elementary reflectors
+                v = np.concatenate((np.eye(n, dtype=dtype), b_pent))
+                # Generate the block Householder transform I - VTV^H
+                Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj()
+                R = np.concatenate((np.triu(a), np.zeros_like(a)))
+
+                # Test columns of Q are orthogonal
+                assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype),
+                                atol=tol, rtol=0.)
+                assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)),
+                                atol=tol, rtol=0.)
+
+                if ind > 1:
+                    C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+                    D = (rand(n, n) + rand(n, n)*1j).astype(dtype)
+                    transpose = 'C'
+                else:
+                    C = (rand(n, n)).astype(dtype)
+                    D = (rand(n, n)).astype(dtype)
+                    transpose = 'T'
+
+                for side in ('L', 'R'):
+                    for trans in ('N', transpose):
+                        c, d, info = tpmqrt(l, b, t, C, D, side=side,
+                                            trans=trans)
+                        assert info == 0
+
+                        if trans == transpose:
+                            q = Q.T.conj()
+                        else:
+                            q = Q
+
+                        if side == 'L':
+                            cd = np.concatenate((c, d), axis=0)
+                            CD = np.concatenate((C, D), axis=0)
+                            qCD = q @ CD
+                        else:
+                            cd = np.concatenate((c, d), axis=1)
+                            CD = np.concatenate((C, D), axis=1)
+                            qCD = CD @ q
+
+                        assert_allclose(cd, qCD, atol=tol, rtol=0.)
+
+                        if (side, trans) == ('L', 'N'):
+                            c_default, d_default, info = tpmqrt(l, b, t, C, D)
+                            assert info == 0
+                            assert_equal(c_default, c)
+                            assert_equal(d_default, d)
+
+                # Test invalid side/trans
+                assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A')
+                assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A')
+
+
+def test_pstrf():
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        # DTYPES =  pstrf
+        n = 10
+        r = 2
+        pstrf = get_lapack_funcs('pstrf', dtype=dtype)
+
+        # Create positive semidefinite A
+        if ind > 1:
+            A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
+            A = A @ A.conj().T
+        else:
+            A = rand(n, n-r).astype(dtype)
+            A = A @ A.T
+
+        c, piv, r_c, info = pstrf(A)
+        U = triu(c)
+        U[r_c - n:, r_c - n:] = 0.
+
+        assert_equal(info, 1)
+        # python-dbg 3.5.2 runs cause trouble with the following assertion.
+        # assert_equal(r_c, n - r)
+        single_atol = 1000 * np.finfo(np.float32).eps
+        double_atol = 1000 * np.finfo(np.float64).eps
+        atol = single_atol if ind in [0, 2] else double_atol
+        assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
+
+        c, piv, r_c, info = pstrf(A, lower=1)
+        L = tril(c)
+        L[r_c - n:, r_c - n:] = 0.
+
+        assert_equal(info, 1)
+        # assert_equal(r_c, n - r)
+        single_atol = 1000 * np.finfo(np.float32).eps
+        double_atol = 1000 * np.finfo(np.float64).eps
+        atol = single_atol if ind in [0, 2] else double_atol
+        assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
+
+
+def test_pstf2():
+    seed(1234)
+    for ind, dtype in enumerate(DTYPES):
+        # DTYPES =  pstf2
+        n = 10
+        r = 2
+        pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
+
+        # Create positive semidefinite A
+        if ind > 1:
+            A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
+            A = A @ A.conj().T
+        else:
+            A = rand(n, n-r).astype(dtype)
+            A = A @ A.T
+
+        c, piv, r_c, info = pstf2(A)
+        U = triu(c)
+        U[r_c - n:, r_c - n:] = 0.
+
+        assert_equal(info, 1)
+        # python-dbg 3.5.2 runs cause trouble with the commented assertions.
+        # assert_equal(r_c, n - r)
+        single_atol = 1000 * np.finfo(np.float32).eps
+        double_atol = 1000 * np.finfo(np.float64).eps
+        atol = single_atol if ind in [0, 2] else double_atol
+        assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
+
+        c, piv, r_c, info = pstf2(A, lower=1)
+        L = tril(c)
+        L[r_c - n:, r_c - n:] = 0.
+
+        assert_equal(info, 1)
+        # assert_equal(r_c, n - r)
+        single_atol = 1000 * np.finfo(np.float32).eps
+        double_atol = 1000 * np.finfo(np.float64).eps
+        atol = single_atol if ind in [0, 2] else double_atol
+        assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
+
+
+def test_geequ():
+    desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
+                             [1.0000, -0.5619, -1.0000, -1.0000],
+                             [0.5874, -1.0000, -0.0596, -0.5341],
+                             [-1.0000, -0.5946, -0.0294, 0.9957]])
+
+    desired_cplx = np.array([[-0.2816+0.5359*1j,
+                              0.0812+0.9188*1j,
+                              -0.7439-0.2561*1j],
+                             [-0.3562-0.2954*1j,
+                              0.9566-0.0434*1j,
+                              -0.0174+0.1555*1j],
+                             [0.8607+0.1393*1j,
+                              -0.2759+0.7241*1j,
+                              -0.1642-0.1365*1j]])
+
+    for ind, dtype in enumerate(DTYPES):
+        if ind < 2:
+            # Use examples from the NAG documentation
+            A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
+                          [5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
+                          [1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
+                          [-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
+            A = A.astype(dtype)
+        else:
+            A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
+                          [-1.70e+00, 3.31e+10, -0.15e+00],
+                          [2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
+            A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
+                           [-1.41e+00, -0.15e+10, 1.34e+00],
+                           [0.39e-10, 1.47e+00, -0.69e-10]])*1j
+
+            A = A.astype(dtype)
+
+        geequ = get_lapack_funcs('geequ', dtype=dtype)
+        r, c, rowcnd, colcnd, amax, info = geequ(A)
+
+        if ind < 2:
+            assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
+                            rtol=0, atol=1e-4)
+        else:
+            assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
+                            rtol=0, atol=1e-4)
+
+
+def test_syequb():
+    desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
+
+    for ind, dtype in enumerate(DTYPES):
+        A = np.eye(10, dtype=dtype)
+        alpha = dtype(1. if ind < 2 else 1.j)
+        d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
+        A += np.rot90(np.diag(d))
+
+        syequb = get_lapack_funcs('syequb', dtype=dtype)
+        s, scond, amax, info = syequb(A)
+
+        assert_equal(np.log2(s).astype(int), desired_log2s)
+
+
+@pytest.mark.skipif(True,
+                    reason="Failing on some OpenBLAS version, see gh-12276")
+def test_heequb():
+    # zheequb has a bug for versions =< LAPACK 3.9.0
+    # See Reference-LAPACK gh-61 and gh-408
+    # Hence the zheequb test is customized accordingly to avoid
+    # work scaling.
+    A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
+    s, scond, amax, info = lapack.zheequb(A)
+    assert_equal(info, 0)
+    assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
+
+    A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
+    A[5, 5] = 1024
+    A[5, 0] = 16j
+    s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
+    assert_equal(info, 0)
+    assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
+
+
+def test_getc2_gesc2():
+    np.random.seed(42)
+    n = 10
+    desired_real = np.random.rand(n)
+    desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
+
+    for ind, dtype in enumerate(DTYPES):
+        if ind < 2:
+            A = np.random.rand(n, n)
+            A = A.astype(dtype)
+            b = A @ desired_real
+            b = b.astype(dtype)
+        else:
+            A = np.random.rand(n, n) + np.random.rand(n, n)*1j
+            A = A.astype(dtype)
+            b = A @ desired_cplx
+            b = b.astype(dtype)
+
+        getc2 = get_lapack_funcs('getc2', dtype=dtype)
+        gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
+        lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
+        x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
+
+        if ind < 2:
+            assert_array_almost_equal(desired_real.astype(dtype),
+                                      x/scale, decimal=4)
+        else:
+            assert_array_almost_equal(desired_cplx.astype(dtype),
+                                      x/scale, decimal=4)
+
+
+@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+@pytest.mark.parametrize('joba', range(6))  # 'C', 'E', 'F', 'G', 'A', 'R'
+@pytest.mark.parametrize('jobu', range(4))  # 'U', 'F', 'W', 'N'
+@pytest.mark.parametrize('jobv', range(4))  # 'V', 'J', 'W', 'N'
+@pytest.mark.parametrize('jobr', [0, 1])
+@pytest.mark.parametrize('jobp', [0, 1])
+def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
+    """Test the lapack routine ?gejsv.
+
+    This function tests that a singular value decomposition can be performed
+    on the random M-by-N matrix A. The test performs the SVD using ?gejsv
+    then performs the following checks:
+
+    * ?gejsv exist successfully (info == 0)
+    * The returned singular values are correct
+    * `A` can be reconstructed from `u`, `SIGMA`, `v`
+    * Ensure that u.T @ u is the identity matrix
+    * Ensure that v.T @ v is the identity matrix
+    * The reported matrix rank
+    * The reported number of singular values
+    * If denormalized floats are required
+
+    Notes
+    -----
+    joba specifies several choices effecting the calculation's accuracy
+    Although all arguments are tested, the tests only check that the correct
+    solution is returned - NOT that the prescribed actions are performed
+    internally.
+
+    jobt is, as of v3.9.0, still experimental and removed to cut down number of
+    test cases. However keyword itself is tested externally.
+    """
+    seed(42)
+
+    # Define some constants for later use:
+    m, n = size
+    atol = 100 * np.finfo(dtype).eps
+    A = generate_random_dtype_array(size, dtype)
+    gejsv = get_lapack_funcs('gejsv', dtype=dtype)
+
+    # Set up checks for invalid job? combinations
+    # if an invalid combination occurs we set the appropriate
+    # exit status.
+    lsvec = jobu < 2  # Calculate left singular vectors
+    rsvec = jobv < 2  # Calculate right singular vectors
+    l2tran = (jobt == 1) and (m == n)
+    is_complex = np.iscomplexobj(A)
+
+    invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
+    invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
+    invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
+
+    # Set the exit status to the expected value.
+    # Here we only check for invalid combinations, not individual
+    # parameters.
+    if invalid_cplx_jobu:
+        exit_status = -2
+    elif invalid_real_jobv or invalid_cplx_jobv:
+        exit_status = -3
+    else:
+        exit_status = 0
+
+    if (jobu > 1) and (jobv == 1):
+        assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
+    else:
+        sva, u, v, work, iwork, info = gejsv(A,
+                                             joba=joba,
+                                             jobu=jobu,
+                                             jobv=jobv,
+                                             jobr=jobr,
+                                             jobt=jobt,
+                                             jobp=jobp)
+
+        # Check that ?gejsv exited successfully/as expected
+        assert_equal(info, exit_status)
+
+        # If exit_status is non-zero the combination of jobs is invalid.
+        # We test this above but no calculations are performed.
+        if not exit_status:
+
+            # Check the returned singular values
+            sigma = (work[0] / work[1]) * sva[:n]
+            assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
+
+            if jobu == 1:
+                # If JOBU = 'F', then u contains the M-by-M matrix of
+                # the left singular vectors, including an ONB of the orthogonal
+                # complement of the Range(A)
+                # However, to recalculate A we are concerned about the
+                # first n singular values and so can ignore the latter.
+                # TODO: Add a test for ONB?
+                u = u[:, :n]
+
+            if lsvec and rsvec:
+                assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
+            if lsvec:
+                assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
+            if rsvec:
+                assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
+
+            assert_equal(iwork[0], np.linalg.matrix_rank(A))
+            assert_equal(iwork[1], np.count_nonzero(sigma))
+            # iwork[2] is non-zero if requested accuracy is not warranted for
+            # the data. This should never occur for these tests.
+            assert_equal(iwork[2], 0)
+
+
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+def test_gejsv_edge_arguments(dtype):
+    """Test edge arguments return expected status"""
+    gejsv = get_lapack_funcs('gejsv', dtype=dtype)
+
+    # scalar A
+    sva, u, v, work, iwork, info = gejsv(1.)
+    assert_equal(info, 0)
+    assert_equal(u.shape, (1, 1))
+    assert_equal(v.shape, (1, 1))
+    assert_equal(sva, np.array([1.], dtype=dtype))
+
+    # 1d A
+    A = np.ones((1,), dtype=dtype)
+    sva, u, v, work, iwork, info = gejsv(A)
+    assert_equal(info, 0)
+    assert_equal(u.shape, (1, 1))
+    assert_equal(v.shape, (1, 1))
+    assert_equal(sva, np.array([1.], dtype=dtype))
+
+    # 2d empty A
+    A = np.ones((1, 0), dtype=dtype)
+    sva, u, v, work, iwork, info = gejsv(A)
+    assert_equal(info, 0)
+    assert_equal(u.shape, (1, 0))
+    assert_equal(v.shape, (1, 0))
+    assert_equal(sva, np.array([], dtype=dtype))
+
+    # make sure "overwrite_a" is respected - user reported in gh-13191
+    A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype)
+    A = np.asfortranarray(A + A.T)  # make it symmetric and column major
+    Ac = A.copy('A')
+    _ = gejsv(A)
+    assert_allclose(A, Ac)
+
+
+@pytest.mark.parametrize(('kwargs'),
+                         ({'joba': 9},
+                          {'jobu': 9},
+                          {'jobv': 9},
+                          {'jobr': 9},
+                          {'jobt': 9},
+                          {'jobp': 9})
+                         )
+def test_gejsv_invalid_job_arguments(kwargs):
+    """Test invalid job arguments raise an Exception"""
+    A = np.ones((2, 2), dtype=float)
+    gejsv = get_lapack_funcs('gejsv', dtype=float)
+    assert_raises(Exception, gejsv, A, **kwargs)
+
+
+@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
+                         [(np.array([[2.27, -1.54, 1.15, -1.94],
+                                     [0.28, -1.67, 0.94, -0.78],
+                                     [-0.48, -3.09, 0.99, -0.21],
+                                     [1.07, 1.22, 0.79, 0.63],
+                                     [-2.35, 2.93, -1.45, 2.30],
+                                     [0.62, -7.39, 1.03, -2.57]]),
+                           np.array([9.9966, 3.6831, 1.3569, 0.5000]),
+                           np.array([[0.2774, -0.6003, -0.1277, 0.1323],
+                                     [0.2020, -0.0301, 0.2805, 0.7034],
+                                     [0.2918, 0.3348, 0.6453, 0.1906],
+                                     [-0.0938, -0.3699, 0.6781, -0.5399],
+                                     [-0.4213, 0.5266, 0.0413, -0.0575],
+                                     [0.7816, 0.3353, -0.1645, -0.3957]]),
+                           np.array([[0.1921, -0.8030, 0.0041, -0.5642],
+                                     [-0.8794, -0.3926, -0.0752, 0.2587],
+                                     [0.2140, -0.2980, 0.7827, 0.5027],
+                                     [-0.3795, 0.3351, 0.6178, -0.6017]]))])
+def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
+    """
+    This test implements the example found in the NAG manual, f08khf.
+    An example was not found for the complex case.
+    """
+    # NAG manual provides accuracy up to 4 decimals
+    atol = 1e-4
+    gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
+
+    sva, u, v, work, iwork, info = gejsv(A)
+
+    assert_allclose(sva_expect, sva, atol=atol)
+    assert_allclose(u_expect, u, atol=atol)
+    assert_allclose(v_expect, v, atol=atol)
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+def test_gttrf_gttrs(dtype):
+    # The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
+    # tests that the output of ?gttrf define LU matricies, that input
+    # parameters are unmodified, transposal options function correctly, that
+    # incompatible matrix shapes raise an error, and singular matrices return
+    # non zero info.
+
+    seed(42)
+    n = 10
+    atol = 100 * np.finfo(dtype).eps
+
+    # create the matrix in accordance with the data type
+    du = generate_random_dtype_array((n-1,), dtype=dtype)
+    d = generate_random_dtype_array((n,), dtype=dtype)
+    dl = generate_random_dtype_array((n-1,), dtype=dtype)
+
+    diag_cpy = [dl.copy(), d.copy(), du.copy()]
+
+    A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
+    x = np.random.rand(n)
+    b = A @ x
+
+    gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
+
+    _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
+    # test to assure that the inputs of ?gttrf are unmodified
+    assert_array_equal(dl, diag_cpy[0])
+    assert_array_equal(d, diag_cpy[1])
+    assert_array_equal(du, diag_cpy[2])
+
+    # generate L and U factors from ?gttrf return values
+    # L/U are lower/upper triangular by construction (initially and at end)
+    U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
+    L = np.eye(n, dtype=dtype)
+
+    for i, m in enumerate(_dl):
+        # L is given in a factored form.
+        # See
+        # www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
+        piv = ipiv[i] - 1
+        # right multiply by permutation matrix
+        L[:, [i, piv]] = L[:, [piv, i]]
+        # right multiply by Li, rank-one modification of identity
+        L[:, i] += L[:, i+1]*m
+
+    # one last permutation
+    i, piv = -1, ipiv[-1] - 1
+    # right multiply by final permutation matrix
+    L[:, [i, piv]] = L[:, [piv, i]]
+
+    # check that the outputs of ?gttrf define an LU decomposition of A
+    assert_allclose(A, L @ U, atol=atol)
+
+    b_cpy = b.copy()
+    x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
+    # test that the inputs of ?gttrs are unmodified
+    assert_array_equal(b, b_cpy)
+    # test that the result of ?gttrs matches the expected input
+    assert_allclose(x, x_gttrs, atol=atol)
+
+    # test that ?gttrf and ?gttrs work with transposal options
+    if dtype in REAL_DTYPES:
+        trans = "T"
+        b_trans = A.T @ x
+    else:
+        trans = "C"
+        b_trans = A.conj().T @ x
+
+    x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
+    assert_allclose(x, x_gttrs, atol=atol)
+
+    # test that ValueError is raised with incompatible matrix shapes
+    with assert_raises(ValueError):
+        gttrf(dl[:-1], d, du)
+    with assert_raises(ValueError):
+        gttrf(dl, d[:-1], du)
+    with assert_raises(ValueError):
+        gttrf(dl, d, du[:-1])
+
+    # test that matrix of size n=2 raises exception
+    with assert_raises(Exception):
+        gttrf(dl[0], d[:1], du[0])
+
+    # test that singular (row of all zeroes) matrix fails via info
+    du[0] = 0
+    d[0] = 0
+    __dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
+    np.testing.assert_(__d[info - 1] == 0,
+                       "?gttrf: _d[info-1] is {}, not the illegal value :0."
+                       .format(__d[info - 1]))
+
+
+@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
+                         [(np.array([2.1, -1.0, 1.9, 8.0]),
+                             np.array([3.0, 2.3, -5.0, -.9, 7.1]),
+                             np.array([3.4, 3.6, 7.0, -6.0]),
+                             np.array([2.3, -5, -.9, 7.1]),
+                             np.array([3.4, 3.6, 7, -6, -1.015373]),
+                             np.array([-1, 1.9, 8]),
+                             np.array([2, 3, 4, 5, 5]),
+                             np.array([[2.7, 6.6],
+                                       [-0.5, 10.8],
+                                       [2.6, -3.2],
+                                       [0.6, -11.2],
+                                       [2.7, 19.1]
+                                       ]),
+                             np.array([[-4, 5],
+                                       [7, -4],
+                                       [3, -3],
+                                       [-4, -2],
+                                       [-3, 1]])),
+                          (
+                             np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
+                             np.array([-1.3 + 1.3j, -1.3 + 1.3j,
+                                       -1.3 + 3.3j, - .3 + 4.3j,
+                                       -3.3 + 1.3j]),
+                             np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
+                             # du exp
+                             np.array([-1.3 + 1.3j, -1.3 + 3.3j,
+                                       -0.3 + 4.3j, -3.3 + 1.3j]),
+                             np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
+                                       -1.3399 + 0.2875j]),
+                             np.array([2 + 1j, -1 + 1j, 1 - 1j]),
+                             np.array([2, 3, 4, 5, 5]),
+                             np.array([[2.4 - 5j, 2.7 + 6.9j],
+                                       [3.4 + 18.2j, - 6.9 - 5.3j],
+                                       [-14.7 + 9.7j, - 6 - .6j],
+                                       [31.9 - 7.7j, -3.9 + 9.3j],
+                                       [-1 + 1.6j, -3 + 12.2j]]),
+                             np.array([[1 + 1j, 2 - 1j],
+                                       [3 - 1j, 1 + 2j],
+                                       [4 + 5j, -1 + 1j],
+                                       [-1 - 2j, 2 + 1j],
+                                       [1 - 1j, 2 - 2j]])
+                            )])
+def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
+                                                     du2_exp, ipiv_exp, b, x):
+    # test to assure that wrapper is consistent with NAG Library Manual Mark 26
+    # example problems: f07cdf and f07cef (real)
+    # examples: f07crf and f07csf (complex)
+    # (Links may expire, so search for "NAG Library Manual Mark 26" online)
+
+    gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
+
+    _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
+    assert_allclose(du2, du2_exp)
+    assert_allclose(_du, du_exp)
+    assert_allclose(_d, d_exp, atol=1e-4)  # NAG examples provide 4 decimals.
+    assert_allclose(ipiv, ipiv_exp)
+
+    x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
+
+    assert_allclose(x_gttrs, x)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
+def test_geqrfp_lwork(dtype, shape):
+    geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
+    m, n = shape
+    lwork, info = geqrfp_lwork(m=m, n=n)
+    assert_equal(info, 0)
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+                         zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs(ddtype, dtype):
+    seed(42)
+    # set test tolerance appropriate for dtype
+    atol = 100*np.finfo(dtype).eps
+    # n is the length diagonal of A
+    n = 10
+    # create diagonals according to size and dtype
+
+    # diagonal d should always be real.
+    # add 4 to d so it will be dominant for all dtypes
+    d = generate_random_dtype_array((n,), ddtype) + 4
+    # diagonal e may be real or complex.
+    e = generate_random_dtype_array((n-1,), dtype)
+
+    # assemble diagonals together into matrix
+    A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+    # store a copy of diagonals to later verify
+    diag_cpy = [d.copy(), e.copy()]
+
+    pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+
+    _d, _e, info = pttrf(d, e)
+    # test to assure that the inputs of ?pttrf are unmodified
+    assert_array_equal(d, diag_cpy[0])
+    assert_array_equal(e, diag_cpy[1])
+    assert_equal(info, 0, err_msg="pttrf: info = {}, should be 0".format(info))
+
+    # test that the factors from pttrf can be recombined to make A
+    L = np.diag(_e, -1) + np.diag(np.ones(n))
+    D = np.diag(_d)
+
+    assert_allclose(A, L@D@L.conjugate().T, atol=atol)
+
+    # generate random solution x
+    x = generate_random_dtype_array((n,), dtype)
+    # determine accompanying b to get soln x
+    b = A@x
+
+    # determine _x from pttrs
+    pttrs = get_lapack_funcs('pttrs', dtype=dtype)
+    _x, info = pttrs(_d, _e.conj(), b)
+    assert_equal(info, 0, err_msg="pttrs: info = {}, should be 0".format(info))
+
+    # test that _x from pttrs matches the expected x
+    assert_allclose(x, _x, atol=atol)
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+                         zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
+    n = 10
+    pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+    d = generate_random_dtype_array((n,), ddtype) + 2
+    e = generate_random_dtype_array((n-1,), dtype)
+    # test that ValueError is raised with incompatible matrix shapes
+    assert_raises(ValueError, pttrf, d[:-1], e)
+    assert_raises(ValueError, pttrf, d, e[:-1])
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+                         zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
+    n = 10
+    pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+    d = generate_random_dtype_array((n,), ddtype) + 2
+    e = generate_random_dtype_array((n-1,), dtype)
+    # test that singular (row of all zeroes) matrix fails via info
+    d[0] = 0
+    e[0] = 0
+    _d, _e, info = pttrf(d, e)
+    assert_equal(_d[info - 1], 0,
+                 "?pttrf: _d[info-1] is {}, not the illegal value :0."
+                 .format(_d[info - 1]))
+
+    # test with non-spd matrix
+    d = generate_random_dtype_array((n,), ddtype)
+    _d, _e, info = pttrf(d, e)
+    assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
+
+
+@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
+                         (np.array([4, 10, 29, 25, 5]),
+                          np.array([-2, -6, 15, 8]),
+                          np.array([4, 9, 25, 16, 1]),
+                          np.array([-.5, -.6667, .6, .5]),
+                          np.array([[6, 10], [9, 4], [2, 9], [14, 65],
+                                    [7, 23]]),
+                          np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
+                                    [3, -5]])
+                          ), (
+                          np.array([16, 41, 46, 21]),
+                          np.array([16 + 16j, 18 - 9j, 1 - 4j]),
+                          np.array([16, 9, 1, 4]),
+                          np.array([1+1j, 2-1j, 1-4j]),
+                          np.array([[64+16j, -16-32j], [93+62j, 61-66j],
+                                    [78-80j, 71-74j], [14-27j, 35+15j]]),
+                          np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
+                                    [1-1j, 2+1j]])
+                         )])
+def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
+    # test to assure that wrapper is consistent with NAG Manual Mark 26
+    # example problems: f07jdf and f07jef (real)
+    # examples: f07jrf and f07csf (complex)
+    # NAG examples provide 4 decimals.
+    # (Links expire, so please search for "NAG Library Manual Mark 26" online)
+
+    atol = 1e-4
+    pttrf = get_lapack_funcs('pttrf', dtype=e[0])
+    _d, _e, info = pttrf(d, e)
+    assert_allclose(_d, d_expect, atol=atol)
+    assert_allclose(_e, e_expect, atol=atol)
+
+    pttrs = get_lapack_funcs('pttrs', dtype=e[0])
+    _x, info = pttrs(_d, _e.conj(), b)
+    assert_allclose(_x, x_expect, atol=atol)
+
+    # also test option `lower`
+    if e.dtype in COMPLEX_DTYPES:
+        _x, info = pttrs(_d, _e, b, lower=1)
+        assert_allclose(_x, x_expect, atol=atol)
+
+
+def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
+    # used by ?pteqr tests to build parameters
+    # returns tuple of (d, e, A, z)
+    if compute_z == 1:
+        # build Hermitian A from Q**T * tri * Q = A by creating Q and tri
+        A_eig = generate_random_dtype_array((n, n), dtype)
+        A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
+        A_eig = (A_eig + A_eig.conj().T) / 2
+        # obtain right eigenvectors (orthogonal)
+        vr = eigh(A_eig)[1]
+        # create tridiagonal matrix
+        d = generate_random_dtype_array((n,), realtype) + 4
+        e = generate_random_dtype_array((n-1,), realtype)
+        tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+        # Build A using these factors that sytrd would: (Q**T * tri * Q = A)
+        A = vr @ tri @ vr.conj().T
+        # vr is orthogonal
+        z = vr
+
+    else:
+        # d and e are always real per lapack docs.
+        d = generate_random_dtype_array((n,), realtype)
+        e = generate_random_dtype_array((n-1,), realtype)
+
+        # make SPD
+        d = d + 4
+        A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+        z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
+    return (d, e, A, z)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+                         zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr(dtype, realtype, compute_z):
+    '''
+    Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
+    It generates random SPD matrix diagonals d and e, and then confirms
+    correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
+    tests that z can reform A.
+    '''
+    seed(42)
+    atol = 1000*np.finfo(dtype).eps
+    pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+
+    n = 10
+
+    d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+
+    d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
+    assert_equal(info, 0, "info = {}, should be 0.".format(info))
+
+    # compare the routine's eigenvalues with scipy.linalg.eig's.
+    assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
+
+    if compute_z:
+        # verify z_pteqr as orthogonal
+        assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
+                        atol=atol)
+        # verify that z_pteqr recombines to A
+        assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
+                        A, atol=atol)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+                         zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_error_non_spd(dtype, realtype, compute_z):
+    seed(42)
+    pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+
+    n = 10
+    d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+
+    # test with non-spd matrix
+    d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
+    assert info > 0
+
+
+@pytest.mark.parametrize("dtype,realtype",
+                         zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
+    seed(42)
+    pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+    n = 10
+    d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+    # test with incorrect/incompatible array sizes
+    assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
+    assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
+    if compute_z:
+        assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+                         zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_error_singular(dtype, realtype, compute_z):
+    seed(42)
+    pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+    n = 10
+    d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+    # test with singular matrix
+    d[0] = 0
+    e[0] = 0
+    d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
+    assert info > 0
+
+
+@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
+                         [(2,  # "I"
+                           np.array([4.16, 5.25, 1.09, .62]),
+                           np.array([3.17, -.97, .55]),
+                           np.array([8.0023, 1.9926, 1.0014, 0.1237]),
+                           np.array([[0.6326, 0.6245, -0.4191, 0.1847],
+                                     [0.7668, -0.4270, 0.4176, -0.2352],
+                                     [-0.1082, 0.6071, 0.4594, -0.6393],
+                                     [-0.0081, 0.2432, 0.6625, 0.7084]])),
+                          ])
+def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
+    '''
+    Implements real (f08jgf) example from NAG Manual Mark 26.
+    Tests for correct outputs.
+    '''
+    # the NAG manual has 4 decimals accuracy
+    atol = 1e-4
+    pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
+
+    z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+    _d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
+    assert_allclose(_d, d_expect, atol=atol)
+    assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
+def test_geqrfp(dtype, matrix_size):
+    # Tests for all dytpes, tall, wide, and square matrices.
+    # Using the routine with random matrix A, Q and R are obtained and then
+    # tested such that R is upper triangular and non-negative on the diagonal,
+    # and Q is an orthagonal matrix. Verifies that A=Q@R. It also
+    # tests against a matrix that for which the  linalg.qr method returns
+    # negative diagonals, and for error messaging.
+
+    # set test tolerance appropriate for dtype
+    np.random.seed(42)
+    rtol = 250*np.finfo(dtype).eps
+    atol = 100*np.finfo(dtype).eps
+    # get appropriate ?geqrfp for dtype
+    geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
+    gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
+
+    m, n = matrix_size
+
+    # create random matrix of dimentions m x n
+    A = generate_random_dtype_array((m, n), dtype=dtype)
+    # create qr matrix using geqrfp
+    qr_A, tau, info = geqrfp(A)
+
+    # obtain r from the upper triangular area
+    r = np.triu(qr_A)
+
+    # obtain q from the orgqr lapack routine
+    # based on linalg.qr's extraction strategy of q with orgqr
+
+    if m > n:
+        # this adds an extra column to the end of qr_A
+        # let qqr be an empty m x m matrix
+        qqr = np.zeros((m, m), dtype=dtype)
+        # set first n columns of qqr to qr_A
+        qqr[:, :n] = qr_A
+        # determine q from this qqr
+        # note that m is a sufficient for lwork based on LAPACK documentation
+        q = gqr(qqr, tau=tau, lwork=m)[0]
+    else:
+        q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
+
+    # test that q and r still make A
+    assert_allclose(q@r, A, rtol=rtol)
+    # ensure that q is orthogonal (that q @ transposed q is the identity)
+    assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
+                    atol=atol)
+    # ensure r is upper tri by comparing original r to r as upper triangular
+    assert_allclose(r, np.triu(r), rtol=rtol)
+    # make sure diagonals of r are positive for this random solution
+    assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
+    # ensure that info is zero for this success
+    assert_(info == 0)
+
+    # test that this routine gives r diagonals that are positive for a
+    # matrix that returns negatives in the diagonal with scipy.linalg.rq
+    A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
+    r_rq_neg, q_rq_neg = qr(A_negative)
+    rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
+    # assert that any of the entries on the diagonal from linalg.qr
+    #   are negative and that all of geqrfp are positive.
+    assert_(np.any(np.diag(r_rq_neg) < 0) and
+            np.all(np.diag(r) > 0))
+
+
+def test_geqrfp_errors_with_empty_array():
+    # check that empty array raises good error message
+    A_empty = np.array([])
+    geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
+    assert_raises(Exception, geqrfp, A_empty)
+
+
+@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
+@pytest.mark.parametrize("pfx", ['sy', 'he'])
+def test_standard_eigh_lworks(pfx, driver):
+    n = 1200  # Some sufficiently big arbitrary number
+    dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
+    sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
+    dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
+    try:
+        _compute_lwork(sc_dlw, n, lower=1)
+        _compute_lwork(dz_dlw, n, lower=1)
+    except Exception as e:
+        pytest.fail("{}_lwork raised unexpected exception: {}"
+                    "".format(pfx+driver, e))
+
+
+@pytest.mark.parametrize("driver", ['gv', 'gvx'])
+@pytest.mark.parametrize("pfx", ['sy', 'he'])
+def test_generalized_eigh_lworks(pfx, driver):
+    n = 1200  # Some sufficiently big arbitrary number
+    dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
+    sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
+    dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
+    # Shouldn't raise any exceptions
+    try:
+        _compute_lwork(sc_dlw, n, uplo="L")
+        _compute_lwork(dz_dlw, n, uplo="L")
+    except Exception as e:
+        pytest.fail("{}_lwork raised unexpected exception: {}"
+                    "".format(pfx+driver, e))
+
+
+@pytest.mark.parametrize("dtype_", DTYPES)
+@pytest.mark.parametrize("m", [1, 10, 100, 1000])
+def test_orcsd_uncsd_lwork(dtype_, m):
+    seed(1234)
+    p = randint(0, m)
+    q = m - p
+    pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
+    dlw = pfx + 'csd_lwork'
+    lw = get_lapack_funcs(dlw, dtype=dtype_)
+    lwval = _compute_lwork(lw, m, p, q)
+    lwval = lwval if pfx == 'un' else (lwval,)
+    assert all([x > 0 for x in lwval])
+
+
+@pytest.mark.parametrize("dtype_", DTYPES)
+def test_orcsd_uncsd(dtype_):
+    m, p, q = 250, 80, 170
+
+    pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
+    X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
+
+    drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
+    lwval = _compute_lwork(dlw, m, p, q)
+    lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
+                                                            'lrwork'], lwval))
+
+    cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
+        drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
+
+    assert info == 0
+
+    U = block_diag(u1, u2)
+    VH = block_diag(v1t, v2t)
+    r = min(min(p, q), min(m-p, m-q))
+    n11 = min(p, q) - r
+    n12 = min(p, m-q) - r
+    n21 = min(m-p, q) - r
+    n22 = min(m-p, m-q) - r
+
+    S = np.zeros((m, m), dtype=dtype_)
+    one = dtype_(1.)
+    for i in range(n11):
+        S[i, i] = one
+    for i in range(n22):
+        S[p+i, q+i] = one
+    for i in range(n12):
+        S[i+n11+r, i+n11+r+n21+n22+r] = -one
+    for i in range(n21):
+        S[p+n22+r+i, n11+r+i] = one
+
+    for i in range(r):
+        S[i+n11, i+n11] = np.cos(theta[i])
+        S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
+
+        S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
+        S[p+n22+i, i+n11] = np.sin(theta[i])
+
+    Xc = U @ S @ VH
+    assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("trans_bool", [False, True])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx(dtype, trans_bool, fact):
+    """
+    These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
+    It tests that the outputs define an LU matrix, that inputs are unmodified,
+    transposal options, incompatible shapes, singular matrices, and
+    singular factorizations. It parametrizes DTYPES and the 'fact' value along
+    with the fact related inputs.
+    """
+    seed(42)
+    # set test tolerance appropriate for dtype
+    atol = 100 * np.finfo(dtype).eps
+    # obtain routine
+    gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+    # Generate random tridiagonal matrix A
+    n = 10
+    dl = generate_random_dtype_array((n-1,), dtype=dtype)
+    d = generate_random_dtype_array((n,), dtype=dtype)
+    du = generate_random_dtype_array((n-1,), dtype=dtype)
+    A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+    # generate random solution x
+    x = generate_random_dtype_array((n, 2), dtype=dtype)
+    # create b from x for equation Ax=b
+    trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
+    b = (A.conj().T if trans_bool else A) @ x
+
+    # store a copy of the inputs to check they haven't been modified later
+    inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
+
+    # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+    dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+        gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+    gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+    dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+    assert_(info == 0, "?gtsvx info = {}, should be zero".format(info))
+
+    # assure that inputs are unmodified
+    assert_array_equal(dl, inputs_cpy[0])
+    assert_array_equal(d, inputs_cpy[1])
+    assert_array_equal(du, inputs_cpy[2])
+    assert_array_equal(b, inputs_cpy[3])
+
+    # test that x_soln matches the expected x
+    assert_allclose(x, x_soln, atol=atol)
+
+    # assert that the outputs are of correct type or shape
+    # rcond should be a scalar
+    assert_(hasattr(rcond, "__len__") is not True,
+            "rcond should be scalar but is {}".format(rcond))
+    # ferr should be length of # of cols in x
+    assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {},"
+            .format(ferr.shape[0], b.shape[1]))
+    # berr should be length of # of cols in x
+    assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {},"
+            .format(berr.shape[0], b.shape[1]))
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("trans_bool", [0, 1])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx_error_singular(dtype, trans_bool, fact):
+    seed(42)
+    # obtain routine
+    gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+    # Generate random tridiagonal matrix A
+    n = 10
+    dl = generate_random_dtype_array((n-1,), dtype=dtype)
+    d = generate_random_dtype_array((n,), dtype=dtype)
+    du = generate_random_dtype_array((n-1,), dtype=dtype)
+    A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+    # generate random solution x
+    x = generate_random_dtype_array((n, 2), dtype=dtype)
+    # create b from x for equation Ax=b
+    trans = "T" if dtype in REAL_DTYPES else "C"
+    b = (A.conj().T if trans_bool else A) @ x
+
+    # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+    dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+        gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+    gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+    dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+    # test with singular matrix
+    # no need to test inputs with fact "F" since ?gttrf already does.
+    if fact == "N":
+        # Construct a singular example manually
+        d[-1] = 0
+        dl[-1] = 0
+        # solve using routine
+        gtsvx_out = gtsvx(dl, d, du, b)
+        dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+        # test for the singular matrix.
+        assert info > 0, "info should be > 0 for singular matrix"
+
+    elif fact == 'F':
+        # assuming that a singular factorization is input
+        df_[-1] = 0
+        duf_[-1] = 0
+        du2f_[-1] = 0
+
+        gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
+                          du2=du2f_, ipiv=ipiv_)
+        dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+        # info should not be zero and should provide index of illegal value
+        assert info > 0, "info should be > 0 for singular matrix"
+
+
+@pytest.mark.parametrize("dtype", DTYPES*2)
+@pytest.mark.parametrize("trans_bool", [False, True])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
+    seed(42)
+    # obtain routine
+    gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+    # Generate random tridiagonal matrix A
+    n = 10
+    dl = generate_random_dtype_array((n-1,), dtype=dtype)
+    d = generate_random_dtype_array((n,), dtype=dtype)
+    du = generate_random_dtype_array((n-1,), dtype=dtype)
+    A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+    # generate random solution x
+    x = generate_random_dtype_array((n, 2), dtype=dtype)
+    # create b from x for equation Ax=b
+    trans = "T" if dtype in REAL_DTYPES else "C"
+    b = (A.conj().T if trans_bool else A) @ x
+
+    # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+    dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+        gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+    if fact == "N":
+        assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
+                      fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+        assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
+                      fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+        assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
+                      fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+        assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
+                      fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+    else:
+        assert_raises(ValueError, gtsvx, dl, d, du, b,
+                      fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+        assert_raises(ValueError, gtsvx, dl, d, du, b,
+                      fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
+                      duf=duf_, du2=du2f_, ipiv=ipiv_)
+        assert_raises(ValueError, gtsvx, dl, d, du, b,
+                      fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
+        assert_raises(ValueError, gtsvx, dl, d, du, b,
+                      fact=fact, trans=trans, dlf=dlf_, df=df_,
+                      duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
+
+
+@pytest.mark.parametrize("du,d,dl,b,x",
+                         [(np.array([2.1, -1.0, 1.9, 8.0]),
+                           np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
+                           np.array([3.4, 3.6, 7.0, -6.0]),
+                           np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2],
+                                     [.6, -11.2], [2.7, 19.1]]),
+                           np.array([[-4, 5], [7, -4], [3, -3], [-4, -2],
+                                     [-3, 1]])),
+                          (np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
+                           np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j,
+                                     -.3 + 4.3j, -3.3 + 1.3j]),
+                           np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
+                           np.array([[2.4 - 5j, 2.7 + 6.9j],
+                                     [3.4 + 18.2j, -6.9 - 5.3j],
+                                     [-14.7 + 9.7j, -6 - .6j],
+                                     [31.9 - 7.7j, -3.9 + 9.3j],
+                                     [-1 + 1.6j, -3 + 12.2j]]),
+                           np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j],
+                                     [4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j],
+                                     [1 - 1j, 2 - 2j]]))])
+def test_gtsvx_NAG(du, d, dl, b, x):
+    # Test to ensure wrapper is consistent with NAG Manual Mark 26
+    # example problems: real (f07cbf) and complex (f07cpf)
+    gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype)
+
+    gtsvx_out = gtsvx(dl, d, du, b)
+    dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+
+    assert_array_almost_equal(x, x_soln)
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+                                               + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+                         [("F",
+                           lambda d, e:get_lapack_funcs('pttrf',
+                                                        dtype=e.dtype)(d, e)),
+                          ("N", lambda d, e: (None, None, None))])
+def test_ptsvx(dtype, realtype, fact, df_de_lambda):
+    '''
+    This tests the ?ptsvx lapack routine wrapper to solve a random system
+    Ax = b for all dtypes and input variations. Tests for: unmodified
+    input parameters, fact options, incompatible matrix shapes raise an error,
+    and singular matrices return info of illegal value.
+    '''
+    seed(42)
+    # set test tolerance appropriate for dtype
+    atol = 100 * np.finfo(dtype).eps
+    ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+    n = 5
+    # create diagonals according to size and dtype
+    d = generate_random_dtype_array((n,), realtype) + 4
+    e = generate_random_dtype_array((n-1,), dtype)
+    A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+    x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+    b = A @ x_soln
+
+    # use lambda to determine what df, ef are
+    df, ef, info = df_de_lambda(d, e)
+
+    # create copy to later test that they are unmodified
+    diag_cpy = [d.copy(), e.copy(), b.copy()]
+
+    # solve using routine
+    df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
+                                               df=df, ef=ef)
+    # d, e, and b should be unmodified
+    assert_array_equal(d, diag_cpy[0])
+    assert_array_equal(e, diag_cpy[1])
+    assert_array_equal(b, diag_cpy[2])
+    assert_(info == 0, "info should be 0 but is {}.".format(info))
+    assert_array_almost_equal(x_soln, x)
+
+    # test that the factors from ptsvx can be recombined to make A
+    L = np.diag(ef, -1) + np.diag(np.ones(n))
+    D = np.diag(df)
+    assert_allclose(A, L@D@(np.conj(L).T), atol=atol)
+
+    # assert that the outputs are of correct type or shape
+    # rcond should be a scalar
+    assert not hasattr(rcond, "__len__"), \
+        "rcond should be scalar but is {}".format(rcond)
+    # ferr should be length of # of cols in x
+    assert_(ferr.shape == (2,), "ferr.shape is {} but shoud be ({},)"
+            .format(ferr.shape, x_soln.shape[1]))
+    # berr should be length of # of cols in x
+    assert_(berr.shape == (2,), "berr.shape is {} but shoud be ({},)"
+            .format(berr.shape, x_soln.shape[1]))
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+                                               + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+                         [("F",
+                           lambda d, e:get_lapack_funcs('pttrf',
+                                                        dtype=e.dtype)(d, e)),
+                          ("N", lambda d, e: (None, None, None))])
+def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda):
+    seed(42)
+    ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+    n = 5
+    # create diagonals according to size and dtype
+    d = generate_random_dtype_array((n,), realtype) + 4
+    e = generate_random_dtype_array((n-1,), dtype)
+    A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+    x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+    b = A @ x_soln
+
+    # use lambda to determine what df, ef are
+    df, ef, info = df_de_lambda(d, e)
+
+    # test with malformatted array sizes
+    assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef)
+    assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef)
+    assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef)
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+                                               + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+                         [("F",
+                           lambda d, e:get_lapack_funcs('pttrf',
+                                                        dtype=e.dtype)(d, e)),
+                          ("N", lambda d, e: (None, None, None))])
+def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda):
+    seed(42)
+    ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+    n = 5
+    # create diagonals according to size and dtype
+    d = generate_random_dtype_array((n,), realtype) + 4
+    e = generate_random_dtype_array((n-1,), dtype)
+    A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+    x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+    b = A @ x_soln
+
+    # use lambda to determine what df, ef are
+    df, ef, info = df_de_lambda(d, e)
+
+    if fact == "N":
+        d[3] = 0
+        # obtain new df, ef
+        df, ef, info = df_de_lambda(d, e)
+        # solve using routine
+        df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
+        # test for the singular matrix.
+        assert info > 0 and info <= n
+
+        # non SPD matrix
+        d = generate_random_dtype_array((n,), realtype)
+        df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
+        assert info > 0 and info <= n
+    else:
+        # assuming that someone is using a singular factorization
+        df, ef, info = df_de_lambda(d, e)
+        df[0] = 0
+        ef[0] = 0
+        df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
+                                                   df=df, ef=ef)
+        assert info > 0
+
+
+@pytest.mark.parametrize('d,e,b,x',
+                         [(np.array([4, 10, 29, 25, 5]),
+                           np.array([-2, -6, 15, 8]),
+                           np.array([[6, 10], [9, 4], [2, 9], [14, 65],
+                                     [7, 23]]),
+                           np.array([[2.5, 2], [2, -1], [1, -3],
+                                     [-1, 6], [3, -5]])),
+                          (np.array([16, 41, 46, 21]),
+                           np.array([16 + 16j, 18 - 9j, 1 - 4j]),
+                           np.array([[64 + 16j, -16 - 32j],
+                                     [93 + 62j, 61 - 66j],
+                                     [78 - 80j, 71 - 74j],
+                                     [14 - 27j, 35 + 15j]]),
+                           np.array([[2 + 1j, -3 - 2j],
+                                     [1 + 1j, 1 + 1j],
+                                     [1 - 2j, 1 - 2j],
+                                     [1 - 1j, 2 + 1j]]))])
+def test_ptsvx_NAG(d, e, b, x):
+    # test to assure that wrapper is consistent with NAG Manual Mark 26
+    # example problemss: f07jbf, f07jpf
+    # (Links expire, so please search for "NAG Library Manual Mark 26" online)
+
+    # obtain routine with correct type based on e.dtype
+    ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype)
+    # solve using routine
+    df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b)
+    # determine ptsvx's solution and x are the same.
+    assert_array_almost_equal(x, x_ptsvx)
+
+
+@pytest.mark.parametrize('lower', [False, True])
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower):
+    seed(1234)
+    atol = np.finfo(dtype).eps*100
+    # Manual conversion to/from packed format is feasible here.
+    n, nrhs = 10, 4
+    a = generate_random_dtype_array([n, n], dtype=dtype)
+    b = generate_random_dtype_array([n, nrhs], dtype=dtype)
+
+    a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.)
+    if lower:
+        inds = ([x for y in range(n) for x in range(y, n)],
+                [y for y in range(n) for x in range(y, n)])
+    else:
+        inds = ([x for y in range(1, n+1) for x in range(y)],
+                [y-1 for y in range(1, n+1) for x in range(y)])
+    ap = a[inds]
+    ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs(
+        ('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'),
+        dtype=dtype,
+        ilp64="preferred")
+
+    ul, info = pptrf(n, ap, lower=lower)
+    assert_equal(info, 0)
+    aul = cholesky(a, lower=lower)[inds]
+    assert_allclose(ul, aul, rtol=0, atol=atol)
+
+    uli, info = pptri(n, ul, lower=lower)
+    assert_equal(info, 0)
+    auli = inv(a)[inds]
+    assert_allclose(uli, auli, rtol=0, atol=atol)
+
+    x, info = pptrs(n, ul, b, lower=lower)
+    assert_equal(info, 0)
+    bx = solve(a, b)
+    assert_allclose(x, bx, rtol=0, atol=atol)
+
+    xv, info = ppsv(n, ap, b, lower=lower)
+    assert_equal(info, 0)
+    assert_allclose(xv, bx, rtol=0, atol=atol)
+
+    anorm = np.linalg.norm(a, 1)
+    rcond, info = ppcon(n, ap, anorm=anorm, lower=lower)
+    assert_equal(info, 0)
+    assert_(abs(1/rcond - np.linalg.cond(a, p=1))*rcond < 1)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gees_trexc(dtype):
+    seed(1234)
+    atol = np.finfo(dtype).eps*100
+
+    n = 10
+    a = generate_random_dtype_array([n, n], dtype=dtype)
+
+    gees, trexc = get_lapack_funcs(('gees', 'trexc'), dtype=dtype)
+
+    result = gees(lambda x: None, a, overwrite_a=False)
+    assert_equal(result[-1], 0)
+
+    t = result[0]
+    z = result[-3]
+
+    d2 = t[6, 6]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+    result = trexc(t, z, 7, 1)
+    assert_equal(result[-1], 0)
+
+    t = result[0]
+    z = result[-2]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+    assert_allclose(t[0, 0], d2, rtol=0, atol=atol)
+
+
+@pytest.mark.parametrize(
+    "t, expect, ifst, ilst",
+    [(np.array([[0.80, -0.11, 0.01, 0.03],
+                [0.00, -0.10, 0.25, 0.35],
+                [0.00, -0.65, -0.10, 0.20],
+                [0.00, 0.00, 0.00, -0.10]]),
+      np.array([[-0.1000, -0.6463, 0.0874, 0.2010],
+                [0.2514, -0.1000, 0.0927, 0.3505],
+                [0.0000, 0.0000, 0.8000, -0.0117],
+                [0.0000, 0.0000, 0.0000, -0.1000]]),
+      2, 1),
+     (np.array([[-6.00 - 7.00j, 0.36 - 0.36j, -0.19 + 0.48j, 0.88 - 0.25j],
+                [0.00 + 0.00j, -5.00 + 2.00j, -0.03 - 0.72j, -0.23 + 0.13j],
+                [0.00 + 0.00j, 0.00 + 0.00j, 8.00 - 1.00j, 0.94 + 0.53j],
+                [0.00 + 0.00j, 0.00 + 0.00j, 0.00 + 0.00j, 3.00 - 4.00j]]),
+      np.array([[-5.0000 + 2.0000j, -0.1574 + 0.7143j,
+                 0.1781 - 0.1913j, 0.3950 + 0.3861j],
+                [0.0000 + 0.0000j, 8.0000 - 1.0000j,
+                 1.0742 + 0.1447j, 0.2515 - 0.3397j],
+                [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+                 3.0000 - 4.0000j, 0.2264 + 0.8962j],
+                [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+                 0.0000 + 0.0000j, -6.0000 - 7.0000j]]),
+      1, 4)])
+def test_trexc_NAG(t, ifst, ilst, expect):
+    """
+    This test implements the example found in the NAG manual,
+    f08qfc, f08qtc, f08qgc, f08quc.
+    """
+    # NAG manual provides accuracy up to 4 decimals
+    atol = 1e-4
+    trexc = get_lapack_funcs('trexc', dtype=t.dtype)
+
+    result = trexc(t, t, ifst, ilst, wantq=0)
+    assert_equal(result[-1], 0)
+
+    t = result[0]
+    assert_allclose(expect, t, atol=atol)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gges_tgexc(dtype):
+    if dtype == np.float32 and sys.platform == 'darwin':
+        pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949")
+
+    seed(1234)
+    atol = np.finfo(dtype).eps*100
+
+    n = 10
+    a = generate_random_dtype_array([n, n], dtype=dtype)
+    b = generate_random_dtype_array([n, n], dtype=dtype)
+
+    gges, tgexc = get_lapack_funcs(('gges', 'tgexc'), dtype=dtype)
+
+    result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False)
+    assert_equal(result[-1], 0)
+
+    s = result[0]
+    t = result[1]
+    q = result[-4]
+    z = result[-3]
+
+    d1 = s[0, 0] / t[0, 0]
+    d2 = s[6, 6] / t[6, 6]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+    assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+    result = tgexc(s, t, q, z, 7, 1)
+    assert_equal(result[-1], 0)
+
+    s = result[0]
+    t = result[1]
+    q = result[2]
+    z = result[3]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+    assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+    assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol)
+    assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gees_trsen(dtype):
+    seed(1234)
+    atol = np.finfo(dtype).eps*100
+
+    n = 10
+    a = generate_random_dtype_array([n, n], dtype=dtype)
+
+    gees, trsen, trsen_lwork = get_lapack_funcs(
+        ('gees', 'trsen', 'trsen_lwork'), dtype=dtype)
+
+    result = gees(lambda x: None, a, overwrite_a=False)
+    assert_equal(result[-1], 0)
+
+    t = result[0]
+    z = result[-3]
+
+    d2 = t[6, 6]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+    select = np.zeros(n)
+    select[6] = 1
+
+    lwork = _compute_lwork(trsen_lwork, select, t)
+
+    if dtype in COMPLEX_DTYPES:
+        result = trsen(select, t, z, lwork=lwork)
+    else:
+        result = trsen(select, t, z, lwork=lwork, liwork=lwork[1])
+    assert_equal(result[-1], 0)
+
+    t = result[0]
+    z = result[1]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+    assert_allclose(t[0, 0], d2, rtol=0, atol=atol)
+
+
+@pytest.mark.parametrize(
+    "t, q, expect, select, expect_s, expect_sep",
+    [(np.array([[0.7995, -0.1144, 0.0060, 0.0336],
+                [0.0000, -0.0994, 0.2478, 0.3474],
+                [0.0000, -0.6483, -0.0994, 0.2026],
+                [0.0000, 0.0000, 0.0000, -0.1007]]),
+      np.array([[0.6551, 0.1037, 0.3450, 0.6641],
+                [0.5236, -0.5807, -0.6141, -0.1068],
+                [-0.5362, -0.3073, -0.2935, 0.7293],
+                [0.0956, 0.7467, -0.6463, 0.1249]]),
+      np.array([[0.3500, 0.4500, -0.1400, -0.1700],
+                [0.0900, 0.0700, -0.5399, 0.3500],
+                [-0.4400, -0.3300, -0.0300, 0.1700],
+                [0.2500, -0.3200, -0.1300, 0.1100]]),
+      np.array([1, 0, 0, 1]),
+      1.75e+00, 3.22e+00),
+     (np.array([[-6.0004 - 6.9999j, 0.3637 - 0.3656j,
+                 -0.1880 + 0.4787j, 0.8785 - 0.2539j],
+                [0.0000 + 0.0000j, -5.0000 + 2.0060j,
+                 -0.0307 - 0.7217j, -0.2290 + 0.1313j],
+                [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+                 7.9982 - 0.9964j, 0.9357 + 0.5359j],
+                [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+                 0.0000 + 0.0000j, 3.0023 - 3.9998j]]),
+      np.array([[-0.8347 - 0.1364j, -0.0628 + 0.3806j,
+                 0.2765 - 0.0846j, 0.0633 - 0.2199j],
+                [0.0664 - 0.2968j, 0.2365 + 0.5240j,
+                 -0.5877 - 0.4208j, 0.0835 + 0.2183j],
+                [-0.0362 - 0.3215j, 0.3143 - 0.5473j,
+                 0.0576 - 0.5736j, 0.0057 - 0.4058j],
+                [0.0086 + 0.2958j, -0.3416 - 0.0757j,
+                 -0.1900 - 0.1600j, 0.8327 - 0.1868j]]),
+      np.array([[-3.9702 - 5.0406j, -4.1108 + 3.7002j,
+                 -0.3403 + 1.0098j, 1.2899 - 0.8590j],
+                [0.3397 - 1.5006j, 1.5201 - 0.4301j,
+                 1.8797 - 5.3804j, 3.3606 + 0.6498j],
+                [3.3101 - 3.8506j, 2.4996 + 3.4504j,
+                 0.8802 - 1.0802j, 0.6401 - 1.4800j],
+                [-1.0999 + 0.8199j, 1.8103 - 1.5905j,
+                 3.2502 + 1.3297j, 1.5701 - 3.4397j]]),
+      np.array([1, 0, 0, 1]),
+      1.02e+00, 1.82e-01)])
+def test_trsen_NAG(t, q, select, expect, expect_s, expect_sep):
+    """
+    This test implements the example found in the NAG manual,
+    f08qgc, f08quc.
+    """
+    # NAG manual provides accuracy up to 4 and 2 decimals
+    atol = 1e-4
+    atol2 = 1e-2
+    trsen, trsen_lwork = get_lapack_funcs(
+        ('trsen', 'trsen_lwork'), dtype=t.dtype)
+
+    lwork = _compute_lwork(trsen_lwork, select, t)
+
+    if t.dtype in COMPLEX_DTYPES:
+        result = trsen(select, t, q, lwork=lwork)
+    else:
+        result = trsen(select, t, q, lwork=lwork, liwork=lwork[1])
+    assert_equal(result[-1], 0)
+
+    t = result[0]
+    q = result[1]
+    if t.dtype in COMPLEX_DTYPES:
+        s = result[4]
+        sep = result[5]
+    else:
+        s = result[5]
+        sep = result[6]
+
+    assert_allclose(expect, q @ t @ q.conj().T, atol=atol)
+    assert_allclose(expect_s, 1 / s, atol=atol2)
+    assert_allclose(expect_sep, 1 / sep, atol=atol2)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gges_tgsen(dtype):
+    if dtype == np.float32 and sys.platform == 'darwin':
+        pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949")
+
+    seed(1234)
+    atol = np.finfo(dtype).eps*100
+
+    n = 10
+    a = generate_random_dtype_array([n, n], dtype=dtype)
+    b = generate_random_dtype_array([n, n], dtype=dtype)
+
+    gges, tgsen, tgsen_lwork = get_lapack_funcs(
+        ('gges', 'tgsen', 'tgsen_lwork'), dtype=dtype)
+
+    result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False)
+    assert_equal(result[-1], 0)
+
+    s = result[0]
+    t = result[1]
+    q = result[-4]
+    z = result[-3]
+
+    d1 = s[0, 0] / t[0, 0]
+    d2 = s[6, 6] / t[6, 6]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+    assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+    select = np.zeros(n)
+    select[6] = 1
+
+    lwork = _compute_lwork(tgsen_lwork, select, s, t)
+
+    # off-by-one error in LAPACK, see gh-issue #13397
+    lwork = (lwork[0]+1, lwork[1])
+
+    result = tgsen(select, s, t, q, z, lwork=lwork)
+    assert_equal(result[-1], 0)
+
+    s = result[0]
+    t = result[1]
+    q = result[-7]
+    z = result[-6]
+
+    if dtype in COMPLEX_DTYPES:
+        assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+        assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+    assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+    assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+    assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol)
+    assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matfuncs.py
new file mode 100644
index 00000000..af442e77
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matfuncs.py
@@ -0,0 +1,974 @@
+#
+# Created by: Pearu Peterson, March 2002
+#
+""" Test functions for linalg.matfuncs module
+
+"""
+import random
+import functools
+
+import numpy as np
+from numpy import array, identity, dot, sqrt
+from numpy.testing import (assert_array_almost_equal, assert_allclose, assert_,
+                           assert_array_less, assert_array_equal, assert_warns)
+import pytest
+
+import scipy.linalg
+from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
+                          expm, expm_frechet, expm_cond, norm, khatri_rao)
+from scipy.linalg import _matfuncs_inv_ssq
+import scipy.linalg._expm_frechet
+
+from scipy.optimize import minimize
+
+
+def _get_al_mohy_higham_2012_experiment_1():
+    """
+    Return the test matrix from Experiment (1) of [1]_.
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+           "Improved Inverse Scaling and Squaring Algorithms
+           for the Matrix Logarithm."
+           SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+           ISSN 1095-7197
+
+    """
+    A = np.array([
+        [3.2346e-1, 3e4, 3e4, 3e4],
+        [0, 3.0089e-1, 3e4, 3e4],
+        [0, 0, 3.2210e-1, 3e4],
+        [0, 0, 0, 3.0744e-1]], dtype=float)
+    return A
+
+
+class TestSignM:
+
+    def test_nils(self):
+        a = array([[29.2, -24.2, 69.5, 49.8, 7.],
+                   [-9.2, 5.2, -18., -16.8, -2.],
+                   [-10., 6., -20., -18., -2.],
+                   [-9.6, 9.6, -25.5, -15.4, -2.],
+                   [9.8, -4.8, 18., 18.2, 2.]])
+        cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333],
+                    [-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667],
+                    [-4.08,0.56,-4.92,-7.6,0.56],
+                    [-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667],
+                    [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]])
+        r = signm(a)
+        assert_array_almost_equal(r,cr)
+
+    def test_defective1(self):
+        a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])
+        signm(a, disp=False)
+        #XXX: what would be the correct result?
+
+    def test_defective2(self):
+        a = array((
+            [29.2,-24.2,69.5,49.8,7.0],
+            [-9.2,5.2,-18.0,-16.8,-2.0],
+            [-10.0,6.0,-20.0,-18.0,-2.0],
+            [-9.6,9.6,-25.5,-15.4,-2.0],
+            [9.8,-4.8,18.0,18.2,2.0]))
+        signm(a, disp=False)
+        #XXX: what would be the correct result?
+
+    def test_defective3(self):
+        a = array([[-2., 25., 0., 0., 0., 0., 0.],
+                   [0., -3., 10., 3., 3., 3., 0.],
+                   [0., 0., 2., 15., 3., 3., 0.],
+                   [0., 0., 0., 0., 15., 3., 0.],
+                   [0., 0., 0., 0., 3., 10., 0.],
+                   [0., 0., 0., 0., 0., -2., 25.],
+                   [0., 0., 0., 0., 0., 0., -3.]])
+        signm(a, disp=False)
+        #XXX: what would be the correct result?
+
+
+class TestLogM:
+
+    def test_nils(self):
+        a = array([[-2., 25., 0., 0., 0., 0., 0.],
+                   [0., -3., 10., 3., 3., 3., 0.],
+                   [0., 0., 2., 15., 3., 3., 0.],
+                   [0., 0., 0., 0., 15., 3., 0.],
+                   [0., 0., 0., 0., 3., 10., 0.],
+                   [0., 0., 0., 0., 0., -2., 25.],
+                   [0., 0., 0., 0., 0., 0., -3.]])
+        m = (identity(7)*3.1+0j)-a
+        logm(m, disp=False)
+        #XXX: what would be the correct result?
+
+    def test_al_mohy_higham_2012_experiment_1_logm(self):
+        # The logm completes the round trip successfully.
+        # Note that the expm leg of the round trip is badly conditioned.
+        A = _get_al_mohy_higham_2012_experiment_1()
+        A_logm, info = logm(A, disp=False)
+        A_round_trip = expm(A_logm)
+        assert_allclose(A_round_trip, A, rtol=5e-5, atol=1e-14)
+
+    def test_al_mohy_higham_2012_experiment_1_funm_log(self):
+        # The raw funm with np.log does not complete the round trip.
+        # Note that the expm leg of the round trip is badly conditioned.
+        A = _get_al_mohy_higham_2012_experiment_1()
+        A_funm_log, info = funm(A, np.log, disp=False)
+        A_round_trip = expm(A_funm_log)
+        assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
+
+    def test_round_trip_random_float(self):
+        np.random.seed(1234)
+        for n in range(1, 6):
+            M_unscaled = np.random.randn(n, n)
+            for scale in np.logspace(-4, 4, 9):
+                M = M_unscaled * scale
+
+                # Eigenvalues are related to the branch cut.
+                W = np.linalg.eigvals(M)
+                err_msg = 'M:{0} eivals:{1}'.format(M, W)
+
+                # Check sqrtm round trip because it is used within logm.
+                M_sqrtm, info = sqrtm(M, disp=False)
+                M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+                assert_allclose(M_sqrtm_round_trip, M)
+
+                # Check logm round trip.
+                M_logm, info = logm(M, disp=False)
+                M_logm_round_trip = expm(M_logm)
+                assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
+
+    def test_round_trip_random_complex(self):
+        np.random.seed(1234)
+        for n in range(1, 6):
+            M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+            for scale in np.logspace(-4, 4, 9):
+                M = M_unscaled * scale
+                M_logm, info = logm(M, disp=False)
+                M_round_trip = expm(M_logm)
+                assert_allclose(M_round_trip, M)
+
+    def test_logm_type_preservation_and_conversion(self):
+        # The logm matrix function should preserve the type of a matrix
+        # whose eigenvalues are positive with zero imaginary part.
+        # Test this preservation for variously structured matrices.
+        complex_dtype_chars = ('F', 'D', 'G')
+        for matrix_as_list in (
+                [[1, 0], [0, 1]],
+                [[1, 0], [1, 1]],
+                [[2, 1], [1, 1]],
+                [[2, 3], [1, 2]]):
+
+            # check that the spectrum has the expected properties
+            W = scipy.linalg.eigvals(matrix_as_list)
+            assert_(not any(w.imag or w.real < 0 for w in W))
+
+            # check float type preservation
+            A = np.array(matrix_as_list, dtype=float)
+            A_logm, info = logm(A, disp=False)
+            assert_(A_logm.dtype.char not in complex_dtype_chars)
+
+            # check complex type preservation
+            A = np.array(matrix_as_list, dtype=complex)
+            A_logm, info = logm(A, disp=False)
+            assert_(A_logm.dtype.char in complex_dtype_chars)
+
+            # check float->complex type conversion for the matrix negation
+            A = -np.array(matrix_as_list, dtype=float)
+            A_logm, info = logm(A, disp=False)
+            assert_(A_logm.dtype.char in complex_dtype_chars)
+
+    def test_complex_spectrum_real_logm(self):
+        # This matrix has complex eigenvalues and real logm.
+        # Its output dtype depends on its input dtype.
+        M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
+        for dt in float, complex:
+            X = np.array(M, dtype=dt)
+            w = scipy.linalg.eigvals(X)
+            assert_(1e-2 < np.absolute(w.imag).sum())
+            Y, info = logm(X, disp=False)
+            assert_(np.issubdtype(Y.dtype, np.inexact))
+            assert_allclose(expm(Y), X)
+
+    def test_real_mixed_sign_spectrum(self):
+        # These matrices have real eigenvalues with mixed signs.
+        # The output logm dtype is complex, regardless of input dtype.
+        for M in (
+                [[1, 0], [0, -1]],
+                [[0, 1], [1, 0]]):
+            for dt in float, complex:
+                A = np.array(M, dtype=dt)
+                A_logm, info = logm(A, disp=False)
+                assert_(np.issubdtype(A_logm.dtype, np.complexfloating))
+
+    def test_exactly_singular(self):
+        A = np.array([[0, 0], [1j, 1j]])
+        B = np.asarray([[1, 1], [0, 0]])
+        for M in A, A.T, B, B.T:
+            expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
+            L, info = assert_warns(expected_warning, logm, M, disp=False)
+            E = expm(L)
+            assert_allclose(E, M, atol=1e-14)
+
+    def test_nearly_singular(self):
+        M = np.array([[1e-100]])
+        expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
+        L, info = assert_warns(expected_warning, logm, M, disp=False)
+        E = expm(L)
+        assert_allclose(E, M, atol=1e-14)
+
+    def test_opposite_sign_complex_eigenvalues(self):
+        # See gh-6113
+        E = [[0, 1], [-1, 0]]
+        L = [[0, np.pi*0.5], [-np.pi*0.5, 0]]
+        assert_allclose(expm(L), E, atol=1e-14)
+        assert_allclose(logm(E), L, atol=1e-14)
+        E = [[1j, 4], [0, -1j]]
+        L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]]
+        assert_allclose(expm(L), E, atol=1e-14)
+        assert_allclose(logm(E), L, atol=1e-14)
+        E = [[1j, 0], [0, -1j]]
+        L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]]
+        assert_allclose(expm(L), E, atol=1e-14)
+        assert_allclose(logm(E), L, atol=1e-14)
+
+
+class TestSqrtM:
+    def test_round_trip_random_float(self):
+        np.random.seed(1234)
+        for n in range(1, 6):
+            M_unscaled = np.random.randn(n, n)
+            for scale in np.logspace(-4, 4, 9):
+                M = M_unscaled * scale
+                M_sqrtm, info = sqrtm(M, disp=False)
+                M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+                assert_allclose(M_sqrtm_round_trip, M)
+
+    def test_round_trip_random_complex(self):
+        np.random.seed(1234)
+        for n in range(1, 6):
+            M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+            for scale in np.logspace(-4, 4, 9):
+                M = M_unscaled * scale
+                M_sqrtm, info = sqrtm(M, disp=False)
+                M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+                assert_allclose(M_sqrtm_round_trip, M)
+
+    def test_bad(self):
+        # See https://web.archive.org/web/20051220232650/http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
+        e = 2**-5
+        se = sqrt(e)
+        a = array([[1.0,0,0,1],
+                   [0,e,0,0],
+                   [0,0,e,0],
+                   [0,0,0,1]])
+        sa = array([[1,0,0,0.5],
+                    [0,se,0,0],
+                    [0,0,se,0],
+                    [0,0,0,1]])
+        n = a.shape[0]
+        assert_array_almost_equal(dot(sa,sa),a)
+        # Check default sqrtm.
+        esa = sqrtm(a, disp=False, blocksize=n)[0]
+        assert_array_almost_equal(dot(esa,esa),a)
+        # Check sqrtm with 2x2 blocks.
+        esa = sqrtm(a, disp=False, blocksize=2)[0]
+        assert_array_almost_equal(dot(esa,esa),a)
+
+    def test_sqrtm_type_preservation_and_conversion(self):
+        # The sqrtm matrix function should preserve the type of a matrix
+        # whose eigenvalues are nonnegative with zero imaginary part.
+        # Test this preservation for variously structured matrices.
+        complex_dtype_chars = ('F', 'D', 'G')
+        for matrix_as_list in (
+                [[1, 0], [0, 1]],
+                [[1, 0], [1, 1]],
+                [[2, 1], [1, 1]],
+                [[2, 3], [1, 2]],
+                [[1, 1], [1, 1]]):
+
+            # check that the spectrum has the expected properties
+            W = scipy.linalg.eigvals(matrix_as_list)
+            assert_(not any(w.imag or w.real < 0 for w in W))
+
+            # check float type preservation
+            A = np.array(matrix_as_list, dtype=float)
+            A_sqrtm, info = sqrtm(A, disp=False)
+            assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
+
+            # check complex type preservation
+            A = np.array(matrix_as_list, dtype=complex)
+            A_sqrtm, info = sqrtm(A, disp=False)
+            assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+            # check float->complex type conversion for the matrix negation
+            A = -np.array(matrix_as_list, dtype=float)
+            A_sqrtm, info = sqrtm(A, disp=False)
+            assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+    def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
+        complex_dtype_chars = ('F', 'D', 'G')
+        for matrix_as_list in (
+                [[1, 0], [0, -1]],
+                [[0, 1], [1, 0]],
+                [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
+
+            # check that the spectrum has the expected properties
+            W = scipy.linalg.eigvals(matrix_as_list)
+            assert_(any(w.imag or w.real < 0 for w in W))
+
+            # check complex->complex
+            A = np.array(matrix_as_list, dtype=complex)
+            A_sqrtm, info = sqrtm(A, disp=False)
+            assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+            # check float->complex
+            A = np.array(matrix_as_list, dtype=float)
+            A_sqrtm, info = sqrtm(A, disp=False)
+            assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+    def test_blocksizes(self):
+        # Make sure I do not goof up the blocksizes when they do not divide n.
+        np.random.seed(1234)
+        for n in range(1, 8):
+            A = np.random.rand(n, n) + 1j*np.random.randn(n, n)
+            A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
+            assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
+            for blocksize in range(1, 10):
+                A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
+                assert_allclose(A_sqrtm_default, A_sqrtm_new)
+
+    def test_al_mohy_higham_2012_experiment_1(self):
+        # Matrix square root of a tricky upper triangular matrix.
+        A = _get_al_mohy_higham_2012_experiment_1()
+        A_sqrtm, info = sqrtm(A, disp=False)
+        A_round_trip = A_sqrtm.dot(A_sqrtm)
+        assert_allclose(A_round_trip, A, rtol=1e-5)
+        assert_allclose(np.tril(A_round_trip), np.tril(A))
+
+    def test_strict_upper_triangular(self):
+        # This matrix has no square root.
+        for dt in int, float:
+            A = np.array([
+                [0, 3, 0, 0],
+                [0, 0, 3, 0],
+                [0, 0, 0, 3],
+                [0, 0, 0, 0]], dtype=dt)
+            A_sqrtm, info = sqrtm(A, disp=False)
+            assert_(np.isnan(A_sqrtm).all())
+
+    def test_weird_matrix(self):
+        # The square root of matrix B exists.
+        for dt in int, float:
+            A = np.array([
+                [0, 0, 1],
+                [0, 0, 0],
+                [0, 1, 0]], dtype=dt)
+            B = np.array([
+                [0, 1, 0],
+                [0, 0, 0],
+                [0, 0, 0]], dtype=dt)
+            assert_array_equal(B, A.dot(A))
+
+            # But scipy sqrtm is not clever enough to find it.
+            B_sqrtm, info = sqrtm(B, disp=False)
+            assert_(np.isnan(B_sqrtm).all())
+
+    def test_disp(self):
+        np.random.seed(1234)
+
+        A = np.random.rand(3, 3)
+        B = sqrtm(A, disp=True)
+        assert_allclose(B.dot(B), A)
+
+    def test_opposite_sign_complex_eigenvalues(self):
+        M = [[2j, 4], [0, -2j]]
+        R = [[1+1j, 2], [0, 1-1j]]
+        assert_allclose(np.dot(R, R), M, atol=1e-14)
+        assert_allclose(sqrtm(M), R, atol=1e-14)
+
+    def test_gh4866(self):
+        M = np.array([[1, 0, 0, 1],
+                      [0, 0, 0, 0],
+                      [0, 0, 0, 0],
+                      [1, 0, 0, 1]])
+        R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)],
+                      [0, 0, 0, 0],
+                      [0, 0, 0, 0],
+                      [sqrt(0.5), 0, 0, sqrt(0.5)]])
+        assert_allclose(np.dot(R, R), M, atol=1e-14)
+        assert_allclose(sqrtm(M), R, atol=1e-14)
+
+    def test_gh5336(self):
+        M = np.diag([2, 1, 0])
+        R = np.diag([sqrt(2), 1, 0])
+        assert_allclose(np.dot(R, R), M, atol=1e-14)
+        assert_allclose(sqrtm(M), R, atol=1e-14)
+
+    def test_gh7839(self):
+        M = np.zeros((2, 2))
+        R = np.zeros((2, 2))
+        assert_allclose(np.dot(R, R), M, atol=1e-14)
+        assert_allclose(sqrtm(M), R, atol=1e-14)
+
+    def test_data_size_preservation_uint_in_float_out(self):
+        M = np.zeros((10, 10), dtype=np.uint8)
+        # input bit size is 8, but minimum float bit size is 16
+        assert sqrtm(M).dtype == np.float16
+        M = np.zeros((10, 10), dtype=np.uint16)
+        assert sqrtm(M).dtype == np.float16
+        M = np.zeros((10, 10), dtype=np.uint32)
+        assert sqrtm(M).dtype == np.float32
+        M = np.zeros((10, 10), dtype=np.uint64)
+        assert sqrtm(M).dtype == np.float64
+
+    def test_data_size_preservation_int_in_float_out(self):
+        M = np.zeros((10, 10), dtype=np.int8)
+        # input bit size is 8, but minimum float bit size is 16
+        assert sqrtm(M).dtype == np.float16
+        M = np.zeros((10, 10), dtype=np.int16)
+        assert sqrtm(M).dtype == np.float16
+        M = np.zeros((10, 10), dtype=np.int32)
+        assert sqrtm(M).dtype == np.float32
+        M = np.zeros((10, 10), dtype=np.int64)
+        assert sqrtm(M).dtype == np.float64
+
+    def test_data_size_preservation_int_in_comp_out(self):
+        M = np.array([[2, 4], [0, -2]], dtype=np.int8)
+        # input bit size is 8, but minimum complex bit size is 64
+        assert sqrtm(M).dtype == np.complex64
+        M = np.array([[2, 4], [0, -2]], dtype=np.int16)
+        # input bit size is 16, but minimum complex bit size is 64
+        assert sqrtm(M).dtype == np.complex64
+        M = np.array([[2, 4], [0, -2]], dtype=np.int32)
+        assert sqrtm(M).dtype == np.complex64
+        M = np.array([[2, 4], [0, -2]], dtype=np.int64)
+        assert sqrtm(M).dtype == np.complex128
+
+    def test_data_size_preservation_float_in_float_out(self):
+        M = np.zeros((10, 10), dtype=np.float16)
+        assert sqrtm(M).dtype == np.float16
+        M = np.zeros((10, 10), dtype=np.float32)
+        assert sqrtm(M).dtype == np.float32
+        M = np.zeros((10, 10), dtype=np.float64)
+        assert sqrtm(M).dtype == np.float64
+        if hasattr(np, 'float128'):
+            M = np.zeros((10, 10), dtype=np.float128)
+            assert sqrtm(M).dtype == np.float128
+
+    def test_data_size_preservation_float_in_comp_out(self):
+        M = np.array([[2, 4], [0, -2]], dtype=np.float16)
+        # input bit size is 16, but minimum complex bit size is 64
+        assert sqrtm(M).dtype == np.complex64
+        M = np.array([[2, 4], [0, -2]], dtype=np.float32)
+        assert sqrtm(M).dtype == np.complex64
+        M = np.array([[2, 4], [0, -2]], dtype=np.float64)
+        assert sqrtm(M).dtype == np.complex128
+        if hasattr(np, 'float128') and hasattr(np, 'complex256'):
+            M = np.array([[2, 4], [0, -2]], dtype=np.float128)
+            assert sqrtm(M).dtype == np.complex256
+
+    def test_data_size_preservation_comp_in_comp_out(self):
+        M = np.array([[2j, 4], [0, -2j]], dtype=np.complex64)
+        assert sqrtm(M).dtype == np.complex128
+        if hasattr(np, 'complex256'):
+            M = np.array([[2j, 4], [0, -2j]], dtype=np.complex128)
+            assert sqrtm(M).dtype == np.complex256
+            M = np.array([[2j, 4], [0, -2j]], dtype=np.complex256)
+            assert sqrtm(M).dtype == np.complex256
+
+
+class TestFractionalMatrixPower:
+    def test_round_trip_random_complex(self):
+        np.random.seed(1234)
+        for p in range(1, 5):
+            for n in range(1, 5):
+                M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+                for scale in np.logspace(-4, 4, 9):
+                    M = M_unscaled * scale
+                    M_root = fractional_matrix_power(M, 1/p)
+                    M_round_trip = np.linalg.matrix_power(M_root, p)
+                    assert_allclose(M_round_trip, M)
+
+    def test_round_trip_random_float(self):
+        # This test is more annoying because it can hit the branch cut;
+        # this happens when the matrix has an eigenvalue
+        # with no imaginary component and with a real negative component,
+        # and it means that the principal branch does not exist.
+        np.random.seed(1234)
+        for p in range(1, 5):
+            for n in range(1, 5):
+                M_unscaled = np.random.randn(n, n)
+                for scale in np.logspace(-4, 4, 9):
+                    M = M_unscaled * scale
+                    M_root = fractional_matrix_power(M, 1/p)
+                    M_round_trip = np.linalg.matrix_power(M_root, p)
+                    assert_allclose(M_round_trip, M)
+
+    def test_larger_abs_fractional_matrix_powers(self):
+        np.random.seed(1234)
+        for n in (2, 3, 5):
+            for i in range(10):
+                M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+                M_one_fifth = fractional_matrix_power(M, 0.2)
+                # Test the round trip.
+                M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
+                assert_allclose(M, M_round_trip)
+                # Test a large abs fractional power.
+                X = fractional_matrix_power(M, -5.4)
+                Y = np.linalg.matrix_power(M_one_fifth, -27)
+                assert_allclose(X, Y)
+                # Test another large abs fractional power.
+                X = fractional_matrix_power(M, 3.8)
+                Y = np.linalg.matrix_power(M_one_fifth, 19)
+                assert_allclose(X, Y)
+
+    def test_random_matrices_and_powers(self):
+        # Each independent iteration of this fuzz test picks random parameters.
+        # It tries to hit some edge cases.
+        np.random.seed(1234)
+        nsamples = 20
+        for i in range(nsamples):
+            # Sample a matrix size and a random real power.
+            n = random.randrange(1, 5)
+            p = np.random.randn()
+
+            # Sample a random real or complex matrix.
+            matrix_scale = np.exp(random.randrange(-4, 5))
+            A = np.random.randn(n, n)
+            if random.choice((True, False)):
+                A = A + 1j * np.random.randn(n, n)
+            A = A * matrix_scale
+
+            # Check a couple of analytically equivalent ways
+            # to compute the fractional matrix power.
+            # These can be compared because they both use the principal branch.
+            A_power = fractional_matrix_power(A, p)
+            A_logm, info = logm(A, disp=False)
+            A_power_expm_logm = expm(A_logm * p)
+            assert_allclose(A_power, A_power_expm_logm)
+
+    def test_al_mohy_higham_2012_experiment_1(self):
+        # Fractional powers of a tricky upper triangular matrix.
+        A = _get_al_mohy_higham_2012_experiment_1()
+
+        # Test remainder matrix power.
+        A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
+        A_sqrtm, info = sqrtm(A, disp=False)
+        A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
+        A_power = fractional_matrix_power(A, 0.5)
+        assert_array_equal(A_rem_power, A_power)
+        assert_allclose(A_sqrtm, A_power)
+        assert_allclose(A_sqrtm, A_funm_sqrt)
+
+        # Test more fractional powers.
+        for p in (1/2, 5/3):
+            A_power = fractional_matrix_power(A, p)
+            A_round_trip = fractional_matrix_power(A_power, 1/p)
+            assert_allclose(A_round_trip, A, rtol=1e-2)
+            assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
+
+    def test_briggs_helper_function(self):
+        np.random.seed(1234)
+        for a in np.random.randn(10) + 1j * np.random.randn(10):
+            for k in range(5):
+                x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
+                x_expected = a ** np.exp2(-k) - 1
+                assert_allclose(x_observed, x_expected)
+
+    def test_type_preservation_and_conversion(self):
+        # The fractional_matrix_power matrix function should preserve
+        # the type of a matrix whose eigenvalues
+        # are positive with zero imaginary part.
+        # Test this preservation for variously structured matrices.
+        complex_dtype_chars = ('F', 'D', 'G')
+        for matrix_as_list in (
+                [[1, 0], [0, 1]],
+                [[1, 0], [1, 1]],
+                [[2, 1], [1, 1]],
+                [[2, 3], [1, 2]]):
+
+            # check that the spectrum has the expected properties
+            W = scipy.linalg.eigvals(matrix_as_list)
+            assert_(not any(w.imag or w.real < 0 for w in W))
+
+            # Check various positive and negative powers
+            # with absolute values bigger and smaller than 1.
+            for p in (-2.4, -0.9, 0.2, 3.3):
+
+                # check float type preservation
+                A = np.array(matrix_as_list, dtype=float)
+                A_power = fractional_matrix_power(A, p)
+                assert_(A_power.dtype.char not in complex_dtype_chars)
+
+                # check complex type preservation
+                A = np.array(matrix_as_list, dtype=complex)
+                A_power = fractional_matrix_power(A, p)
+                assert_(A_power.dtype.char in complex_dtype_chars)
+
+                # check float->complex for the matrix negation
+                A = -np.array(matrix_as_list, dtype=float)
+                A_power = fractional_matrix_power(A, p)
+                assert_(A_power.dtype.char in complex_dtype_chars)
+
+    def test_type_conversion_mixed_sign_or_complex_spectrum(self):
+        complex_dtype_chars = ('F', 'D', 'G')
+        for matrix_as_list in (
+                [[1, 0], [0, -1]],
+                [[0, 1], [1, 0]],
+                [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
+
+            # check that the spectrum has the expected properties
+            W = scipy.linalg.eigvals(matrix_as_list)
+            assert_(any(w.imag or w.real < 0 for w in W))
+
+            # Check various positive and negative powers
+            # with absolute values bigger and smaller than 1.
+            for p in (-2.4, -0.9, 0.2, 3.3):
+
+                # check complex->complex
+                A = np.array(matrix_as_list, dtype=complex)
+                A_power = fractional_matrix_power(A, p)
+                assert_(A_power.dtype.char in complex_dtype_chars)
+
+                # check float->complex
+                A = np.array(matrix_as_list, dtype=float)
+                A_power = fractional_matrix_power(A, p)
+                assert_(A_power.dtype.char in complex_dtype_chars)
+
+    @pytest.mark.xfail(reason='Too unstable across LAPACKs.')
+    def test_singular(self):
+        # Negative fractional powers do not work with singular matrices.
+        for matrix_as_list in (
+                [[0, 0], [0, 0]],
+                [[1, 1], [1, 1]],
+                [[1, 2], [3, 6]],
+                [[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
+
+            # Check fractional powers both for float and for complex types.
+            for newtype in (float, complex):
+                A = np.array(matrix_as_list, dtype=newtype)
+                for p in (-0.7, -0.9, -2.4, -1.3):
+                    A_power = fractional_matrix_power(A, p)
+                    assert_(np.isnan(A_power).all())
+                for p in (0.2, 1.43):
+                    A_power = fractional_matrix_power(A, p)
+                    A_round_trip = fractional_matrix_power(A_power, 1/p)
+                    assert_allclose(A_round_trip, A)
+
+    def test_opposite_sign_complex_eigenvalues(self):
+        M = [[2j, 4], [0, -2j]]
+        R = [[1+1j, 2], [0, 1-1j]]
+        assert_allclose(np.dot(R, R), M, atol=1e-14)
+        assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
+
+
+class TestExpM:
+    def test_zero(self):
+        a = array([[0.,0],[0,0]])
+        assert_array_almost_equal(expm(a),[[1,0],[0,1]])
+
+    def test_single_elt(self):
+        elt = expm(1)
+        assert_allclose(elt, np.array([[np.e]]))
+
+    def test_empty_matrix_input(self):
+        # handle gh-11082
+        A = np.zeros((0, 0))
+        result = expm(A)
+        assert result.size == 0
+
+    def test_2x2_input(self):
+        E = np.e
+        a = array([[1, 4], [1, 1]])
+        aa = (E**4 + 1)/(2*E)
+        bb = (E**4 - 1)/E
+        assert_allclose(expm(a), array([[aa, bb], [bb/4, aa]]))
+        assert expm(a.astype(np.complex64)).dtype.char == 'F'
+        assert expm(a.astype(np.float32)).dtype.char == 'f'
+
+    def test_nx2x2_input(self):
+        E = np.e
+        # These are integer matrices with integer eigenvalues
+        a = np.array([[[1, 4], [1, 1]],
+                      [[1, 3], [1, -1]],
+                      [[1, 3], [4, 5]],
+                      [[1, 3], [5, 3]],
+                      [[4, 5], [-3, -4]]], order='F')
+        # Exact results are computed symbolically
+        a_res = np.array([
+                          [[(E**4+1)/(2*E), (E**4-1)/E],
+                           [(E**4-1)/4/E, (E**4+1)/(2*E)]],
+                          [[1/(4*E**2)+(3*E**2)/4, (3*E**2)/4-3/(4*E**2)],
+                           [E**2/4-1/(4*E**2), 3/(4*E**2)+E**2/4]],
+                          [[3/(4*E)+E**7/4, -3/(8*E)+(3*E**7)/8],
+                           [-1/(2*E)+E**7/2, 1/(4*E)+(3*E**7)/4]],
+                          [[5/(8*E**2)+(3*E**6)/8, -3/(8*E**2)+(3*E**6)/8],
+                           [-5/(8*E**2)+(5*E**6)/8, 3/(8*E**2)+(5*E**6)/8]],
+                          [[-3/(2*E)+(5*E)/2, -5/(2*E)+(5*E)/2],
+                           [3/(2*E)-(3*E)/2, 5/(2*E)-(3*E)/2]]
+                         ])
+        assert_allclose(expm(a), a_res)
+
+
+class TestExpmFrechet:
+
+    def test_expm_frechet(self):
+        # a test of the basic functionality
+        M = np.array([
+            [1, 2, 3, 4],
+            [5, 6, 7, 8],
+            [0, 0, 1, 2],
+            [0, 0, 5, 6],
+            ], dtype=float)
+        A = np.array([
+            [1, 2],
+            [5, 6],
+            ], dtype=float)
+        E = np.array([
+            [3, 4],
+            [7, 8],
+            ], dtype=float)
+        expected_expm = scipy.linalg.expm(A)
+        expected_frechet = scipy.linalg.expm(M)[:2, 2:]
+        for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}):
+            observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
+            assert_allclose(expected_expm, observed_expm)
+            assert_allclose(expected_frechet, observed_frechet)
+
+    def test_small_norm_expm_frechet(self):
+        # methodically test matrices with a range of norms, for better coverage
+        M_original = np.array([
+            [1, 2, 3, 4],
+            [5, 6, 7, 8],
+            [0, 0, 1, 2],
+            [0, 0, 5, 6],
+            ], dtype=float)
+        A_original = np.array([
+            [1, 2],
+            [5, 6],
+            ], dtype=float)
+        E_original = np.array([
+            [3, 4],
+            [7, 8],
+            ], dtype=float)
+        A_original_norm_1 = scipy.linalg.norm(A_original, 1)
+        selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
+        m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
+        for ma, mb in m_neighbor_pairs:
+            ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
+            ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
+            target_norm_1 = 0.5 * (ell_a + ell_b)
+            scale = target_norm_1 / A_original_norm_1
+            M = scale * M_original
+            A = scale * A_original
+            E = scale * E_original
+            expected_expm = scipy.linalg.expm(A)
+            expected_frechet = scipy.linalg.expm(M)[:2, 2:]
+            observed_expm, observed_frechet = expm_frechet(A, E)
+            assert_allclose(expected_expm, observed_expm)
+            assert_allclose(expected_frechet, observed_frechet)
+
+    def test_fuzz(self):
+        # try a bunch of crazy inputs
+        rfuncs = (
+                np.random.uniform,
+                np.random.normal,
+                np.random.standard_cauchy,
+                np.random.exponential)
+        ntests = 100
+        for i in range(ntests):
+            rfunc = random.choice(rfuncs)
+            target_norm_1 = random.expovariate(1.0)
+            n = random.randrange(2, 16)
+            A_original = rfunc(size=(n,n))
+            E_original = rfunc(size=(n,n))
+            A_original_norm_1 = scipy.linalg.norm(A_original, 1)
+            scale = target_norm_1 / A_original_norm_1
+            A = scale * A_original
+            E = scale * E_original
+            M = np.vstack([
+                np.hstack([A, E]),
+                np.hstack([np.zeros_like(A), A])])
+            expected_expm = scipy.linalg.expm(A)
+            expected_frechet = scipy.linalg.expm(M)[:n, n:]
+            observed_expm, observed_frechet = expm_frechet(A, E)
+            assert_allclose(expected_expm, observed_expm, atol=5e-8)
+            assert_allclose(expected_frechet, observed_frechet, atol=1e-7)
+
+    def test_problematic_matrix(self):
+        # this test case uncovered a bug which has since been fixed
+        A = np.array([
+                [1.50591997, 1.93537998],
+                [0.41203263, 0.23443516],
+                ], dtype=float)
+        E = np.array([
+                [1.87864034, 2.07055038],
+                [1.34102727, 0.67341123],
+                ], dtype=float)
+        scipy.linalg.norm(A, 1)
+        sps_expm, sps_frechet = expm_frechet(
+                A, E, method='SPS')
+        blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
+                A, E, method='blockEnlarge')
+        assert_allclose(sps_expm, blockEnlarge_expm)
+        assert_allclose(sps_frechet, blockEnlarge_frechet)
+
+    @pytest.mark.slow
+    @pytest.mark.skip(reason='this test is deliberately slow')
+    def test_medium_matrix(self):
+        # profile this to see the speed difference
+        n = 1000
+        A = np.random.exponential(size=(n, n))
+        E = np.random.exponential(size=(n, n))
+        sps_expm, sps_frechet = expm_frechet(
+                A, E, method='SPS')
+        blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
+                A, E, method='blockEnlarge')
+        assert_allclose(sps_expm, blockEnlarge_expm)
+        assert_allclose(sps_frechet, blockEnlarge_frechet)
+
+
+def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
+    p = np.reshape(p, A.shape)
+    p_norm = norm(p)
+    perturbation = eps * p * (A_norm / p_norm)
+    X_prime = expm(A + perturbation)
+    scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
+    return -scaled_relative_error
+
+
+def _normalized_like(A, B):
+    return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
+
+
+def _relative_error(f, A, perturbation):
+    X = f(A)
+    X_prime = f(A + perturbation)
+    return norm(X_prime - X) / norm(X)
+
+
+class TestExpmConditionNumber:
+    def test_expm_cond_smoke(self):
+        np.random.seed(1234)
+        for n in range(1, 4):
+            A = np.random.randn(n, n)
+            kappa = expm_cond(A)
+            assert_array_less(0, kappa)
+
+    def test_expm_bad_condition_number(self):
+        A = np.array([
+            [-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
+            [0, -1.201010529, 9.634696872e4, -4.681048289e9],
+            [0, 0, -1.132893222, 9.532491830e4],
+            [0, 0, 0, -1.179475332],
+            ])
+        kappa = expm_cond(A)
+        assert_array_less(1e36, kappa)
+
+    def test_univariate(self):
+        np.random.seed(12345)
+        for x in np.linspace(-5, 5, num=11):
+            A = np.array([[x]])
+            assert_allclose(expm_cond(A), abs(x))
+        for x in np.logspace(-2, 2, num=11):
+            A = np.array([[x]])
+            assert_allclose(expm_cond(A), abs(x))
+        for i in range(10):
+            A = np.random.randn(1, 1)
+            assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
+
+    @pytest.mark.slow
+    def test_expm_cond_fuzz(self):
+        np.random.seed(12345)
+        eps = 1e-5
+        nsamples = 10
+        for i in range(nsamples):
+            n = np.random.randint(2, 5)
+            A = np.random.randn(n, n)
+            A_norm = scipy.linalg.norm(A)
+            X = expm(A)
+            X_norm = scipy.linalg.norm(X)
+            kappa = expm_cond(A)
+
+            # Look for the small perturbation that gives the greatest
+            # relative error.
+            f = functools.partial(_help_expm_cond_search,
+                    A, A_norm, X, X_norm, eps)
+            guess = np.ones(n*n)
+            out = minimize(f, guess, method='L-BFGS-B')
+            xopt = out.x
+            yopt = f(xopt)
+            p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
+            p_best_relerr = _relative_error(expm, A, p_best)
+            assert_allclose(p_best_relerr, -yopt * eps)
+
+            # Check that the identified perturbation indeed gives greater
+            # relative error than random perturbations with similar norms.
+            for j in range(5):
+                p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
+                assert_allclose(norm(p_best), norm(p_rand))
+                p_rand_relerr = _relative_error(expm, A, p_rand)
+                assert_array_less(p_rand_relerr, p_best_relerr)
+
+            # The greatest relative error should not be much greater than
+            # eps times the condition number kappa.
+            # In the limit as eps approaches zero it should never be greater.
+            assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
+
+
+class TestKhatriRao:
+
+    def test_basic(self):
+        a = khatri_rao(array([[1, 2], [3, 4]]),
+                       array([[5, 6], [7, 8]]))
+
+        assert_array_equal(a, array([[5, 12],
+                                     [7, 16],
+                                     [15, 24],
+                                     [21, 32]]))
+
+        b = khatri_rao(np.empty([2, 2]), np.empty([2, 2]))
+        assert_array_equal(b.shape, (4, 2))
+
+    def test_number_of_columns_equality(self):
+        with pytest.raises(ValueError):
+            a = array([[1, 2, 3],
+                       [4, 5, 6]])
+            b = array([[1, 2],
+                       [3, 4]])
+            khatri_rao(a, b)
+
+    def test_to_assure_2d_array(self):
+        with pytest.raises(ValueError):
+            # both arrays are 1-D
+            a = array([1, 2, 3])
+            b = array([4, 5, 6])
+            khatri_rao(a, b)
+
+        with pytest.raises(ValueError):
+            # first array is 1-D
+            a = array([1, 2, 3])
+            b = array([
+                [1, 2, 3],
+                [4, 5, 6]
+            ])
+            khatri_rao(a, b)
+
+        with pytest.raises(ValueError):
+            # second array is 1-D
+            a = array([
+                [1, 2, 3],
+                [7, 8, 9]
+            ])
+            b = array([4, 5, 6])
+            khatri_rao(a, b)
+
+    def test_equality_of_two_equations(self):
+        a = array([[1, 2], [3, 4]])
+        b = array([[5, 6], [7, 8]])
+
+        res1 = khatri_rao(a, b)
+        res2 = np.vstack([np.kron(a[:, k], b[:, k])
+                          for k in range(b.shape[1])]).T
+
+        assert_array_equal(res1, res2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matmul_toeplitz.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matmul_toeplitz.py
new file mode 100644
index 00000000..b480e9d3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_matmul_toeplitz.py
@@ -0,0 +1,125 @@
+"""Test functions for linalg.matmul_toeplitz function
+"""
+
+import numpy as np
+from scipy.linalg import toeplitz, matmul_toeplitz
+
+from pytest import raises as assert_raises
+from numpy.testing import assert_allclose
+
+
+class TestMatmulToeplitz:
+
+    def setup_method(self):
+        self.rng = np.random.RandomState(42)
+        self.tolerance = 1.5e-13
+
+    def test_real(self):
+        cases = []
+
+        n = 1
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n)
+        x = self.rng.normal(size=(n, 1))
+        cases.append((x, c, r, False))
+
+        n = 2
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n)
+        x = self.rng.normal(size=(n, 1))
+        cases.append((x, c, r, False))
+
+        n = 101
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n)
+        x = self.rng.normal(size=(n, 1))
+        cases.append((x, c, r, True))
+
+        n = 1000
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n)
+        x = self.rng.normal(size=(n, 1))
+        cases.append((x, c, r, False))
+
+        n = 100
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n)
+        x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+        cases.append((x, c, r, False))
+
+        n = 100
+        c = self.rng.normal(size=(n, 1))
+        r = self.rng.normal(size=(n, 1))
+        x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+        cases.append((x, c, r, True))
+
+        n = 100
+        c = self.rng.normal(size=(n, 1))
+        r = None
+        x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+        cases.append((x, c, r, True, -1))
+
+        n = 100
+        c = self.rng.normal(size=(n, 1))
+        r = None
+        x = self.rng.normal(size=n)
+        cases.append((x, c, r, False))
+
+        n = 101
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n-27)
+        x = self.rng.normal(size=(n-27, 1))
+        cases.append((x, c, r, True))
+
+        n = 100
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n//4)
+        x = self.rng.normal(size=(n//4, self.rng.randint(1, 10)))
+        cases.append((x, c, r, True))
+
+        [self.do(*i) for i in cases]
+
+    def test_complex(self):
+        n = 127
+        c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+        r = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+        x = self.rng.normal(size=(n, 3)) + self.rng.normal(size=(n, 3))*1j
+        self.do(x, c, r, False)
+
+        n = 100
+        c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+        r = self.rng.normal(size=(n//2, 1)) +\
+            self.rng.normal(size=(n//2, 1))*1j
+        x = self.rng.normal(size=(n//2, 3)) +\
+            self.rng.normal(size=(n//2, 3))*1j
+        self.do(x, c, r, False)
+
+    def test_exceptions(self):
+
+        n = 100
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=2*n)
+        x = self.rng.normal(size=n)
+        assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+        n = 100
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n)
+        x = self.rng.normal(size=n-1)
+        assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+        n = 100
+        c = self.rng.normal(size=n)
+        r = self.rng.normal(size=n//2)
+        x = self.rng.normal(size=n//2-1)
+        assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+    # For toeplitz matrices, matmul_toeplitz() should be equivalent to @.
+    def do(self, x, c, r=None, check_finite=False, workers=None):
+        if r is None:
+            actual = matmul_toeplitz(c, x, check_finite, workers)
+        else:
+            actual = matmul_toeplitz((c, r), x, check_finite)
+        desired = toeplitz(c, r) @ x
+        assert_allclose(actual, desired,
+            rtol=self.tolerance, atol=self.tolerance)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_misc.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_misc.py
new file mode 100644
index 00000000..1c10923e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_misc.py
@@ -0,0 +1,5 @@
+from scipy.linalg import norm
+
+
+def test_norm():
+    assert norm([]) == 0.0
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_procrustes.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_procrustes.py
new file mode 100644
index 00000000..f41fd0e2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_procrustes.py
@@ -0,0 +1,191 @@
+from itertools import product, permutations
+
+import numpy as np
+from numpy.testing import assert_array_less, assert_allclose
+from pytest import raises as assert_raises
+
+from scipy.linalg import inv, eigh, norm
+from scipy.linalg import orthogonal_procrustes
+from scipy.sparse._sputils import matrix
+
+
+def test_orthogonal_procrustes_ndim_too_large():
+    np.random.seed(1234)
+    A = np.random.randn(3, 4, 5)
+    B = np.random.randn(3, 4, 5)
+    assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_ndim_too_small():
+    np.random.seed(1234)
+    A = np.random.randn(3)
+    B = np.random.randn(3)
+    assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_shape_mismatch():
+    np.random.seed(1234)
+    shapes = ((3, 3), (3, 4), (4, 3), (4, 4))
+    for a, b in permutations(shapes, 2):
+        A = np.random.randn(*a)
+        B = np.random.randn(*b)
+        assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_checkfinite_exception():
+    np.random.seed(1234)
+    m, n = 2, 3
+    A_good = np.random.randn(m, n)
+    B_good = np.random.randn(m, n)
+    for bad_value in np.inf, -np.inf, np.nan:
+        A_bad = A_good.copy()
+        A_bad[1, 2] = bad_value
+        B_bad = B_good.copy()
+        B_bad[1, 2] = bad_value
+        for A, B in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)):
+            assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_scale_invariance():
+    np.random.seed(1234)
+    m, n = 4, 3
+    for i in range(3):
+        A_orig = np.random.randn(m, n)
+        B_orig = np.random.randn(m, n)
+        R_orig, s = orthogonal_procrustes(A_orig, B_orig)
+        for A_scale in np.square(np.random.randn(3)):
+            for B_scale in np.square(np.random.randn(3)):
+                R, s = orthogonal_procrustes(A_orig * A_scale, B_orig * B_scale)
+                assert_allclose(R, R_orig)
+
+
+def test_orthogonal_procrustes_array_conversion():
+    np.random.seed(1234)
+    for m, n in ((6, 4), (4, 4), (4, 6)):
+        A_arr = np.random.randn(m, n)
+        B_arr = np.random.randn(m, n)
+        As = (A_arr, A_arr.tolist(), matrix(A_arr))
+        Bs = (B_arr, B_arr.tolist(), matrix(B_arr))
+        R_arr, s = orthogonal_procrustes(A_arr, B_arr)
+        AR_arr = A_arr.dot(R_arr)
+        for A, B in product(As, Bs):
+            R, s = orthogonal_procrustes(A, B)
+            AR = A_arr.dot(R)
+            assert_allclose(AR, AR_arr)
+
+
+def test_orthogonal_procrustes():
+    np.random.seed(1234)
+    for m, n in ((6, 4), (4, 4), (4, 6)):
+        # Sample a random target matrix.
+        B = np.random.randn(m, n)
+        # Sample a random orthogonal matrix
+        # by computing eigh of a sampled symmetric matrix.
+        X = np.random.randn(n, n)
+        w, V = eigh(X.T + X)
+        assert_allclose(inv(V), V.T)
+        # Compute a matrix with a known orthogonal transformation that gives B.
+        A = np.dot(B, V.T)
+        # Check that an orthogonal transformation from A to B can be recovered.
+        R, s = orthogonal_procrustes(A, B)
+        assert_allclose(inv(R), R.T)
+        assert_allclose(A.dot(R), B)
+        # Create a perturbed input matrix.
+        A_perturbed = A + 1e-2 * np.random.randn(m, n)
+        # Check that the orthogonal procrustes function can find an orthogonal
+        # transformation that is better than the orthogonal transformation
+        # computed from the original input matrix.
+        R_prime, s = orthogonal_procrustes(A_perturbed, B)
+        assert_allclose(inv(R_prime), R_prime.T)
+        # Compute the naive and optimal transformations of the perturbed input.
+        naive_approx = A_perturbed.dot(R)
+        optim_approx = A_perturbed.dot(R_prime)
+        # Compute the Frobenius norm errors of the matrix approximations.
+        naive_approx_error = norm(naive_approx - B, ord='fro')
+        optim_approx_error = norm(optim_approx - B, ord='fro')
+        # Check that the orthogonal Procrustes approximation is better.
+        assert_array_less(optim_approx_error, naive_approx_error)
+
+
+def _centered(A):
+    mu = A.mean(axis=0)
+    return A - mu, mu
+
+
+def test_orthogonal_procrustes_exact_example():
+    # Check a small application.
+    # It uses translation, scaling, reflection, and rotation.
+    #
+    #         |
+    #   a  b  |
+    #         |
+    #   d  c  |        w
+    #         |
+    # --------+--- x ----- z ---
+    #         |
+    #         |        y
+    #         |
+    #
+    A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
+    B_orig = np.array([[3, 2], [1, 0], [3, -2], [5, 0]], dtype=float)
+    A, A_mu = _centered(A_orig)
+    B, B_mu = _centered(B_orig)
+    R, s = orthogonal_procrustes(A, B)
+    scale = s / np.square(norm(A))
+    B_approx = scale * np.dot(A, R) + B_mu
+    assert_allclose(B_approx, B_orig, atol=1e-8)
+
+
+def test_orthogonal_procrustes_stretched_example():
+    # Try again with a target with a stretched y axis.
+    A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
+    B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float)
+    A, A_mu = _centered(A_orig)
+    B, B_mu = _centered(B_orig)
+    R, s = orthogonal_procrustes(A, B)
+    scale = s / np.square(norm(A))
+    B_approx = scale * np.dot(A, R) + B_mu
+    expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float)
+    assert_allclose(B_approx, expected, atol=1e-8)
+    # Check disparity symmetry.
+    expected_disparity = 0.4501246882793018
+    AB_disparity = np.square(norm(B_approx - B_orig) / norm(B))
+    assert_allclose(AB_disparity, expected_disparity)
+    R, s = orthogonal_procrustes(B, A)
+    scale = s / np.square(norm(B))
+    A_approx = scale * np.dot(B, R) + A_mu
+    BA_disparity = np.square(norm(A_approx - A_orig) / norm(A))
+    assert_allclose(BA_disparity, expected_disparity)
+
+
+def test_orthogonal_procrustes_skbio_example():
+    # This transformation is also exact.
+    # It uses translation, scaling, and reflection.
+    #
+    #   |
+    #   | a
+    #   | b
+    #   | c d
+    # --+---------
+    #   |
+    #   |       w
+    #   |
+    #   |       x
+    #   |
+    #   |   z   y
+    #   |
+    #
+    A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float)
+    B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float)
+    B_standardized = np.array([
+        [-0.13363062, 0.6681531],
+        [-0.13363062, 0.13363062],
+        [-0.13363062, -0.40089186],
+        [0.40089186, -0.40089186]])
+    A, A_mu = _centered(A_orig)
+    B, B_mu = _centered(B_orig)
+    R, s = orthogonal_procrustes(A, B)
+    scale = s / np.square(norm(A))
+    B_approx = scale * np.dot(A, R) + B_mu
+    assert_allclose(B_approx, B_orig)
+    assert_allclose(B / norm(B), B_standardized)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_sketches.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_sketches.py
new file mode 100644
index 00000000..f4515e2d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_sketches.py
@@ -0,0 +1,118 @@
+"""Tests for _sketches.py."""
+
+import numpy as np
+from numpy.testing import assert_, assert_equal
+from scipy.linalg import clarkson_woodruff_transform
+from scipy.linalg._sketches import cwt_matrix
+from scipy.sparse import issparse, rand
+from scipy.sparse.linalg import norm
+
+
+class TestClarksonWoodruffTransform:
+    """
+    Testing the Clarkson Woodruff Transform
+    """
+    # set seed for generating test matrices
+    rng = np.random.RandomState(seed=1179103485)
+
+    # Test matrix parameters
+    n_rows = 2000
+    n_cols = 100
+    density = 0.1
+
+    # Sketch matrix dimensions
+    n_sketch_rows = 200
+
+    # Seeds to test with
+    seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
+             1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
+
+    A_dense = rng.randn(n_rows, n_cols)
+    A_csc = rand(
+        n_rows, n_cols, density=density, format='csc', random_state=rng,
+    )
+    A_csr = rand(
+        n_rows, n_cols, density=density, format='csr', random_state=rng,
+    )
+    A_coo = rand(
+        n_rows, n_cols, density=density, format='coo', random_state=rng,
+    )
+
+    # Collect the test matrices
+    test_matrices = [
+        A_dense, A_csc, A_csr, A_coo,
+    ]
+
+    # Test vector with norm ~1
+    x = rng.randn(n_rows, 1) / np.sqrt(n_rows)
+
+    def test_sketch_dimensions(self):
+        for A in self.test_matrices:
+            for seed in self.seeds:
+                sketch = clarkson_woodruff_transform(
+                    A, self.n_sketch_rows, seed=seed
+                )
+                assert_(sketch.shape == (self.n_sketch_rows, self.n_cols))
+
+    def test_seed_returns_identical_transform_matrix(self):
+        for A in self.test_matrices:
+            for seed in self.seeds:
+                S1 = cwt_matrix(
+                    self.n_sketch_rows, self.n_rows, seed=seed
+                ).toarray()
+                S2 = cwt_matrix(
+                    self.n_sketch_rows, self.n_rows, seed=seed
+                ).toarray()
+                assert_equal(S1, S2)
+
+    def test_seed_returns_identically(self):
+        for A in self.test_matrices:
+            for seed in self.seeds:
+                sketch1 = clarkson_woodruff_transform(
+                    A, self.n_sketch_rows, seed=seed
+                )
+                sketch2 = clarkson_woodruff_transform(
+                    A, self.n_sketch_rows, seed=seed
+                )
+                if issparse(sketch1):
+                    sketch1 = sketch1.toarray()
+                if issparse(sketch2):
+                    sketch2 = sketch2.toarray()
+                assert_equal(sketch1, sketch2)
+
+    def test_sketch_preserves_frobenius_norm(self):
+        # Given the probabilistic nature of the sketches
+        # we run the test multiple times and check that
+        # we pass all/almost all the tries.
+        n_errors = 0
+        for A in self.test_matrices:
+            if issparse(A):
+                true_norm = norm(A)
+            else:
+                true_norm = np.linalg.norm(A)
+            for seed in self.seeds:
+                sketch = clarkson_woodruff_transform(
+                    A, self.n_sketch_rows, seed=seed,
+                )
+                if issparse(sketch):
+                    sketch_norm = norm(sketch)
+                else:
+                    sketch_norm = np.linalg.norm(sketch)
+
+                if np.abs(true_norm - sketch_norm) > 0.1 * true_norm:
+                    n_errors += 1
+        assert_(n_errors == 0)
+
+    def test_sketch_preserves_vector_norm(self):
+        n_errors = 0
+        n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2)))
+        true_norm = np.linalg.norm(self.x)
+        for seed in self.seeds:
+            sketch = clarkson_woodruff_transform(
+                self.x, n_sketch_rows, seed=seed,
+            )
+            sketch_norm = np.linalg.norm(sketch)
+
+            if np.abs(true_norm - sketch_norm) > 0.5 * true_norm:
+                n_errors += 1
+        assert_(n_errors == 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solve_toeplitz.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solve_toeplitz.py
new file mode 100644
index 00000000..ecced19e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solve_toeplitz.py
@@ -0,0 +1,121 @@
+"""Test functions for linalg._solve_toeplitz module
+"""
+import numpy as np
+from scipy.linalg._solve_toeplitz import levinson
+from scipy.linalg import solve, toeplitz, solve_toeplitz
+from numpy.testing import assert_equal, assert_allclose
+
+import pytest
+from pytest import raises as assert_raises
+
+
+def test_solve_equivalence():
+    # For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
+    random = np.random.RandomState(1234)
+    for n in (1, 2, 3, 10):
+        c = random.randn(n)
+        if random.rand() < 0.5:
+            c = c + 1j * random.randn(n)
+        r = random.randn(n)
+        if random.rand() < 0.5:
+            r = r + 1j * random.randn(n)
+        y = random.randn(n)
+        if random.rand() < 0.5:
+            y = y + 1j * random.randn(n)
+
+        # Check equivalence when both the column and row are provided.
+        actual = solve_toeplitz((c,r), y)
+        desired = solve(toeplitz(c, r=r), y)
+        assert_allclose(actual, desired)
+
+        # Check equivalence when the column is provided but not the row.
+        actual = solve_toeplitz(c, b=y)
+        desired = solve(toeplitz(c), y)
+        assert_allclose(actual, desired)
+
+
+def test_multiple_rhs():
+    random = np.random.RandomState(1234)
+    c = random.randn(4)
+    r = random.randn(4)
+    for offset in [0, 1j]:
+        for yshape in ((4,), (4, 3), (4, 3, 2)):
+            y = random.randn(*yshape) + offset
+            actual = solve_toeplitz((c,r), b=y)
+            desired = solve(toeplitz(c, r=r), y)
+            assert_equal(actual.shape, yshape)
+            assert_equal(desired.shape, yshape)
+            assert_allclose(actual, desired)
+
+
+def test_native_list_arguments():
+    c = [1,2,4,7]
+    r = [1,3,9,12]
+    y = [5,1,4,2]
+    actual = solve_toeplitz((c,r), y)
+    desired = solve(toeplitz(c, r=r), y)
+    assert_allclose(actual, desired)
+
+
+def test_zero_diag_error():
+    # The Levinson-Durbin implementation fails when the diagonal is zero.
+    random = np.random.RandomState(1234)
+    n = 4
+    c = random.randn(n)
+    r = random.randn(n)
+    y = random.randn(n)
+    c[0] = 0
+    assert_raises(np.linalg.LinAlgError,
+        solve_toeplitz, (c, r), b=y)
+
+
+def test_wikipedia_counterexample():
+    # The Levinson-Durbin implementation also fails in other cases.
+    # This example is from the talk page of the wikipedia article.
+    random = np.random.RandomState(1234)
+    c = [2, 2, 1]
+    y = random.randn(3)
+    assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y)
+
+
+def test_reflection_coeffs():
+    # check that the partial solutions are given by the reflection
+    # coefficients
+
+    random = np.random.RandomState(1234)
+    y_d = random.randn(10)
+    y_z = random.randn(10) + 1j
+    reflection_coeffs_d = [1]
+    reflection_coeffs_z = [1]
+    for i in range(2, 10):
+        reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])
+        reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])
+
+    y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
+    y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
+    _, ref_d = levinson(y_d_concat, b=y_d[1:])
+    _, ref_z = levinson(y_z_concat, b=y_z[1:])
+
+    assert_allclose(reflection_coeffs_d, ref_d[:-1])
+    assert_allclose(reflection_coeffs_z, ref_z[:-1])
+
+
+@pytest.mark.xfail(reason='Instability of Levinson iteration')
+def test_unstable():
+    # this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of
+    # I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with
+    # Partial Pivoting for Matrices with Displacement Structure"
+    # Mathematics of Computation, 64, 212 (1995), pp 1557-1576
+    # which can be unstable for levinson recursion.
+
+    # other fast toeplitz solvers such as GKO or Burg should be better.
+    random = np.random.RandomState(1234)
+    n = 100
+    c = 0.9 ** (np.arange(n)**2)
+    y = random.randn(n)
+
+    solution1 = solve_toeplitz(c, b=y)
+    solution2 = solve(toeplitz(c), y)
+
+    assert_allclose(solution1, solution2)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solvers.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solvers.py
new file mode 100644
index 00000000..3488b974
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_solvers.py
@@ -0,0 +1,766 @@
+import os
+import numpy as np
+
+from numpy.testing import assert_array_almost_equal
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.linalg import solve_sylvester
+from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
+from scipy.linalg import solve_continuous_are, solve_discrete_are
+from scipy.linalg import block_diag, solve, LinAlgError
+from scipy.sparse._sputils import matrix
+
+
+def _load_data(name):
+    """
+    Load npz data file under data/
+    Returns a copy of the data, rather than keeping the npz file open.
+    """
+    filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+                            'data', name)
+    with np.load(filename) as f:
+        return dict(f.items())
+
+
+class TestSolveLyapunov:
+
+    cases = [
+        (np.array([[1, 2], [3, 4]]),
+         np.array([[9, 10], [11, 12]])),
+        # a, q all complex.
+        (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+         np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+        # a real; q complex.
+        (np.array([[1.0, 2.0], [3.0, 5.0]]),
+         np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+        # a complex; q real.
+        (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+         np.array([[2.0, 2.0], [-1.0, 2.0]])),
+        # An example from Kitagawa, 1977
+        (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
+                   [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
+         np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
+                   [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
+        # Companion matrix example. a complex; q real; a.shape[0] = 11
+        (np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
+                    0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
+                    0.010+0.j],
+                   [1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
+                    0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
+                    0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
+                    0.000+0.j],
+                   [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+                    0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
+                    0.000+0.j]]),
+         np.eye(11)),
+        # https://github.com/scipy/scipy/issues/4176
+        (matrix([[0, 1], [-1/2, -1]]),
+         (matrix([0, 3]).T @ matrix([0, 3]).T.T)),
+        # https://github.com/scipy/scipy/issues/4176
+        (matrix([[0, 1], [-1/2, -1]]),
+         (np.array(matrix([0, 3]).T @ matrix([0, 3]).T.T))),
+        ]
+
+    def test_continuous_squareness_and_shape(self):
+        nsq = np.ones((3, 2))
+        sq = np.eye(3)
+        assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
+        assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
+        assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
+
+    def check_continuous_case(self, a, q):
+        x = solve_continuous_lyapunov(a, q)
+        assert_array_almost_equal(
+                          np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
+
+    def check_discrete_case(self, a, q, method=None):
+        x = solve_discrete_lyapunov(a, q, method=method)
+        assert_array_almost_equal(
+                      np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
+
+    def test_cases(self):
+        for case in self.cases:
+            self.check_continuous_case(case[0], case[1])
+            self.check_discrete_case(case[0], case[1])
+            self.check_discrete_case(case[0], case[1], method='direct')
+            self.check_discrete_case(case[0], case[1], method='bilinear')
+
+
+def test_solve_continuous_are():
+    mat6 = _load_data('carex_6_data.npz')
+    mat15 = _load_data('carex_15_data.npz')
+    mat18 = _load_data('carex_18_data.npz')
+    mat19 = _load_data('carex_19_data.npz')
+    mat20 = _load_data('carex_20_data.npz')
+    cases = [
+        # Carex examples taken from (with default parameters):
+        # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
+        #     Examples for the Numerical Solution of Algebraic Riccati
+        #     Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
+        #     Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
+        #
+        # The format of the data is (a, b, q, r, knownfailure), where
+        # knownfailure is None if the test passes or a string
+        # indicating the reason for failure.
+        #
+        # Test Case 0: carex #1
+        (np.diag([1.], 1),
+         np.array([[0], [1]]),
+         block_diag(1., 2.),
+         1,
+         None),
+        # Test Case 1: carex #2
+        (np.array([[4, 3], [-4.5, -3.5]]),
+         np.array([[1], [-1]]),
+         np.array([[9, 6], [6, 4.]]),
+         1,
+         None),
+        # Test Case 2: carex #3
+        (np.array([[0, 1, 0, 0],
+                   [0, -1.89, 0.39, -5.53],
+                   [0, -0.034, -2.98, 2.43],
+                   [0.034, -0.0011, -0.99, -0.21]]),
+         np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
+         np.array([[2.313, 2.727, 0.688, 0.023],
+                   [2.727, 4.271, 1.148, 0.323],
+                   [0.688, 1.148, 0.313, 0.102],
+                   [0.023, 0.323, 0.102, 0.083]]),
+         np.eye(2),
+         None),
+        # Test Case 3: carex #4
+        (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
+                   [0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
+                   [0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
+                   [0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
+                   [0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
+                   [0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
+                   [0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
+                   [0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
+         np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
+                   [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
+                  ).T * 0.001,
+         np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
+                   [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
+                   [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
+                   [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+                   [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
+                   [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
+                   [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
+                   [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
+         np.eye(2),
+         None),
+        # Test Case 4: carex #5
+        (np.array(
+          [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
+           [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
+           [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
+           [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
+           [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
+           [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
+           [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
+           [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
+           [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
+         np.array([[0.010, -0.011, -0.151],
+                   [0.003, -0.021, 0.000],
+                   [0.009, -0.059, 0.000],
+                   [0.024, -0.162, 0.000],
+                   [0.068, -0.445, 0.000],
+                   [0.000, 0.000, 0.000],
+                   [0.000, 0.000, 0.000],
+                   [0.000, 0.000, 0.000],
+                   [0.000, 0.000, 0.000]]),
+         np.eye(9),
+         np.eye(3),
+         None),
+        # Test Case 5: carex #6
+        (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
+        # Test Case 6: carex #7
+        (np.array([[1, 0], [0, -2.]]),
+         np.array([[1e-6], [0]]),
+         np.ones((2, 2)),
+         1.,
+         'Bad residual accuracy'),
+        # Test Case 7: carex #8
+        (block_diag(-0.1, -0.02),
+         np.array([[0.100, 0.000], [0.001, 0.010]]),
+         np.array([[100, 1000], [1000, 10000]]),
+         np.ones((2, 2)) + block_diag(1e-6, 0),
+         None),
+        # Test Case 8: carex #9
+        (np.array([[0, 1e6], [0, 0]]),
+         np.array([[0], [1.]]),
+         np.eye(2),
+         1.,
+         None),
+        # Test Case 9: carex #10
+        (np.array([[1.0000001, 1], [1., 1.0000001]]),
+         np.eye(2),
+         np.eye(2),
+         np.eye(2),
+         None),
+        # Test Case 10: carex #11
+        (np.array([[3, 1.], [4, 2]]),
+         np.array([[1], [1]]),
+         np.array([[-11, -5], [-5, -2.]]),
+         1.,
+         None),
+        # Test Case 11: carex #12
+        (np.array([[7000000., 2000000., -0.],
+                   [2000000., 6000000., -2000000.],
+                   [0., -2000000., 5000000.]]) / 3,
+         np.eye(3),
+         np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
+                np.diag([1e-6, 1, 1e6])).dot(
+            np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
+         np.eye(3) * 1e6,
+         'Bad Residual Accuracy'),
+        # Test Case 12: carex #13
+        (np.array([[0, 0.4, 0, 0],
+                   [0, 0, 0.345, 0],
+                   [0, -0.524e6, -0.465e6, 0.262e6],
+                   [0, 0, 0, -1e6]]),
+         np.array([[0, 0, 0, 1e6]]).T,
+         np.diag([1, 0, 1, 0]),
+         1.,
+         None),
+        # Test Case 13: carex #14
+        (np.array([[-1e-6, 1, 0, 0],
+                   [-1, -1e-6, 0, 0],
+                   [0, 0, 1e-6, 1],
+                   [0, 0, -1, 1e-6]]),
+         np.ones((4, 1)),
+         np.ones((4, 4)),
+         1.,
+         None),
+        # Test Case 14: carex #15
+        (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
+        # Test Case 15: carex #16
+        (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
+                 block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
+         np.eye(64),
+         np.eye(64),
+         np.eye(64),
+         None),
+        # Test Case 16: carex #17
+        (np.diag(np.ones((20, )), 1),
+         np.flipud(np.eye(21, 1)),
+         np.eye(21, 1) * np.eye(21, 1).T,
+         1,
+         'Bad Residual Accuracy'),
+        # Test Case 17: carex #18
+        (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
+        # Test Case 18: carex #19
+        (mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
+         'Bad Residual Accuracy'),
+        # Test Case 19: carex #20
+        (mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
+         'Bad Residual Accuracy')
+        ]
+    # Makes the minimum precision requirements customized to the test.
+    # Here numbers represent the number of decimals that agrees with zero
+    # matrix when the solution x is plugged in to the equation.
+    #
+    # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
+    #
+    # If the test is failing use "None" for that entry.
+    #
+    min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
+                   None, 9, 14, 13, 14, None, 12, None, None)
+
+    def _test_factory(case, dec):
+        """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
+        a, b, q, r, knownfailure = case
+        if knownfailure:
+            pytest.xfail(reason=knownfailure)
+
+        x = solve_continuous_are(a, b, q, r)
+        res = x.dot(a) + a.conj().T.dot(x) + q
+        out_fact = x.dot(b)
+        res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
+        assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+    for ind, case in enumerate(cases):
+        _test_factory(case, min_decimal[ind])
+
+
+def test_solve_discrete_are():
+
+    cases = [
+        # Darex examples taken from (with default parameters):
+        # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
+        #     Examples for the Numerical Solution of Algebraic Riccati
+        #     Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
+        #     Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
+        # [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the
+        #     Discrete-Time Algebraic Riccati Equation to Enhance Stability
+        #     of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
+        #
+        # The format of the data is (a, b, q, r, knownfailure), where
+        # knownfailure is None if the test passes or a string
+        # indicating the reason for failure.
+        #
+        # TEST CASE 0 : Complex a; real b, q, r
+        (np.array([[2, 1-2j], [0, -3j]]),
+         np.array([[0], [1]]),
+         np.array([[1, 0], [0, 2]]),
+         np.array([[1]]),
+         None),
+        # TEST CASE 1 :Real a, q, r; complex b
+        (np.array([[2, 1], [0, -1]]),
+         np.array([[-2j], [1j]]),
+         np.array([[1, 0], [0, 2]]),
+         np.array([[1]]),
+         None),
+        # TEST CASE 2 : Real a, b; complex q, r
+        (np.array([[3, 1], [0, -1]]),
+         np.array([[1, 2], [1, 3]]),
+         np.array([[1, 1+1j], [1-1j, 2]]),
+         np.array([[2, -2j], [2j, 3]]),
+         None),
+        # TEST CASE 3 : User-reported gh-2251 (Trac #1732)
+        (np.array([[0.63399379, 0.54906824, 0.76253406],
+                   [0.5404729, 0.53745766, 0.08731853],
+                   [0.27524045, 0.84922129, 0.4681622]]),
+         np.array([[0.96861695], [0.05532739], [0.78934047]]),
+         np.eye(3),
+         np.eye(1),
+         None),
+        # TEST CASE 4 : darex #1
+        (np.array([[4, 3], [-4.5, -3.5]]),
+         np.array([[1], [-1]]),
+         np.array([[9, 6], [6, 4]]),
+         np.array([[1]]),
+         None),
+        # TEST CASE 5 : darex #2
+        (np.array([[0.9512, 0], [0, 0.9048]]),
+         np.array([[4.877, 4.877], [-1.1895, 3.569]]),
+         np.array([[0.005, 0], [0, 0.02]]),
+         np.array([[1/3, 0], [0, 3]]),
+         None),
+        # TEST CASE 6 : darex #3
+        (np.array([[2, -1], [1, 0]]),
+         np.array([[1], [0]]),
+         np.array([[0, 0], [0, 1]]),
+         np.array([[0]]),
+         None),
+        # TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
+        (np.array([[0, 1], [0, -1]]),
+         np.array([[1, 0], [2, 1]]),
+         np.array([[-4, -4], [-4, 7]]) * (1/11),
+         np.array([[9, 3], [3, 1]]),
+         None),
+        # TEST CASE 8 : darex #5
+        (np.array([[0, 1], [0, 0]]),
+         np.array([[0], [1]]),
+         np.array([[1, 2], [2, 4]]),
+         np.array([[1]]),
+         None),
+        # TEST CASE 9 : darex #6
+        (np.array([[0.998, 0.067, 0, 0],
+                   [-.067, 0.998, 0, 0],
+                   [0, 0, 0.998, 0.153],
+                   [0, 0, -.153, 0.998]]),
+         np.array([[0.0033, 0.0200],
+                   [0.1000, -.0007],
+                   [0.0400, 0.0073],
+                   [-.0028, 0.1000]]),
+         np.array([[1.87, 0, 0, -0.244],
+                   [0, 0.744, 0.205, 0],
+                   [0, 0.205, 0.589, 0],
+                   [-0.244, 0, 0, 1.048]]),
+         np.eye(2),
+         None),
+        # TEST CASE 10 : darex #7
+        (np.array([[0.984750, -.079903, 0.0009054, -.0010765],
+                   [0.041588, 0.998990, -.0358550, 0.0126840],
+                   [-.546620, 0.044916, -.3299100, 0.1931800],
+                   [2.662400, -.100450, -.9245500, -.2632500]]),
+         np.array([[0.0037112, 0.0007361],
+                   [-.0870510, 9.3411e-6],
+                   [-1.198440, -4.1378e-4],
+                   [-3.192700, 9.2535e-4]]),
+         np.eye(4)*1e-2,
+         np.eye(2),
+         None),
+        # TEST CASE 11 : darex #8
+        (np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
+                   [1.0000000, 0.6000000, 0.8000000, 3.3999820],
+                   [0.0000000, 1.0000000, 1.8000000, 3.7999820],
+                   [0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
+         np.array([[1.0, -1.0, -1.0, -1.0],
+                   [0.0, 1.0, -1.0, -1.0],
+                   [0.0, 0.0, 1.0, -1.0],
+                   [0.0, 0.0, 0.0, 1.0]]),
+         np.array([[2, 1, 3, 6],
+                   [1, 2, 2, 5],
+                   [3, 2, 6, 11],
+                   [6, 5, 11, 22]]),
+         np.eye(4),
+         None),
+        # TEST CASE 12 : darex #9
+        (np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
+                   [40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
+                   [12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
+                   [4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
+                   [0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
+         np.array([[0.0434, -0.0122],
+                   [2.6606, -1.0453],
+                   [3.7530, -5.5100],
+                   [3.6076, -6.6000],
+                   [0.4617, -0.9148]]) * 0.01,
+         np.eye(5),
+         np.eye(2),
+         None),
+        # TEST CASE 13 : darex #10
+        (np.kron(np.eye(2), np.diag([1, 1], k=1)),
+         np.kron(np.eye(2), np.array([[0], [0], [1]])),
+         np.array([[1, 1, 0, 0, 0, 0],
+                   [1, 1, 0, 0, 0, 0],
+                   [0, 0, 0, 0, 0, 0],
+                   [0, 0, 0, 1, -1, 0],
+                   [0, 0, 0, -1, 1, 0],
+                   [0, 0, 0, 0, 0, 0]]),
+         np.array([[3, 0], [0, 1]]),
+         None),
+        # TEST CASE 14 : darex #11
+        (0.001 * np.array(
+         [[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
+          [76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
+          [-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
+          [-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
+          [-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
+          [-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
+          [-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
+          [-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
+          [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
+         np.array([[4.7600, -0.5701, -83.6800],
+                   [0.8790, -4.7730, -2.7300],
+                   [1.4820, -13.1200, 8.8760],
+                   [3.8920, -35.1300, 24.8000],
+                   [10.3400, -92.7500, 66.8000],
+                   [7.2030, -61.5900, 38.3400],
+                   [4.4540, -36.8300, 20.2900],
+                   [1.9710, -15.5400, 6.9370],
+                   [3.7730, -30.2800, 14.6900]]) * 0.001,
+         np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
+         np.eye(3),
+         None),
+        # TEST CASE 15 : darex #12 - numerically least accurate example
+        (np.array([[0, 1e6], [0, 0]]),
+         np.array([[0], [1]]),
+         np.eye(2),
+         np.array([[1]]),
+         "Presumed issue with OpenBLAS, see gh-16926"),
+        # TEST CASE 16 : darex #13
+        (np.array([[16, 10, -2],
+                  [10, 13, -8],
+                  [-2, -8, 7]]) * (1/9),
+         np.eye(3),
+         1e6 * np.eye(3),
+         1e6 * np.eye(3),
+         "Issue with OpenBLAS, see gh-16926"),
+        # TEST CASE 17 : darex #14
+        (np.array([[1 - 1/1e8, 0, 0, 0],
+                  [1, 0, 0, 0],
+                  [0, 1, 0, 0],
+                  [0, 0, 1, 0]]),
+         np.array([[1e-08], [0], [0], [0]]),
+         np.diag([0, 0, 0, 1]),
+         np.array([[0.25]]),
+         None),
+        # TEST CASE 18 : darex #15
+        (np.eye(100, k=1),
+         np.flipud(np.eye(100, 1)),
+         np.eye(100),
+         np.array([[1]]),
+         None)
+        ]
+
+    # Makes the minimum precision requirements customized to the test.
+    # Here numbers represent the number of decimals that agrees with zero
+    # matrix when the solution x is plugged in to the equation.
+    #
+    # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
+    #
+    # If the test is failing use "None" for that entry.
+    #
+    min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 14, 13,
+                   14, 13, 13, 14, 12, 2, 5, 6, 10)
+
+    def _test_factory(case, dec):
+        """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+        a, b, q, r, knownfailure = case
+        if knownfailure:
+            pytest.xfail(reason=knownfailure)
+
+        x = solve_discrete_are(a, b, q, r)
+        res = a.conj().T.dot(x.dot(a)) - x + q
+        res -= a.conj().T.dot(x.dot(b)).dot(
+                    solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a))
+                    )
+        assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+    for ind, case in enumerate(cases):
+        _test_factory(case, min_decimal[ind])
+
+    # An infeasible example taken from https://arxiv.org/abs/1505.04861v1
+    A = np.triu(np.ones((3, 3)))
+    A[0, 1] = -1
+    B = np.array([[1, 1, 0], [0, 0, 1]]).T
+    Q = np.full_like(A, -2) + np.diag([8, -1, -1.9])
+    R = np.diag([-10, 0.1])
+    assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
+
+
+def test_solve_generalized_continuous_are():
+    cases = [
+        # Two random examples differ by s term
+        # in the absence of any literature for demanding examples.
+        (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+                   [4.617139e-02, 6.948286e-01, 3.444608e-02],
+                   [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+         np.array([[3.815585e-01, 1.868726e-01],
+                   [7.655168e-01, 4.897644e-01],
+                   [7.951999e-01, 4.455862e-01]]),
+         np.eye(3),
+         np.eye(2),
+         np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+                   [7.093648e-01, 6.797027e-01, 1.189977e-01],
+                   [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+         np.zeros((3, 2)),
+         None),
+        (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+                   [4.617139e-02, 6.948286e-01, 3.444608e-02],
+                   [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+         np.array([[3.815585e-01, 1.868726e-01],
+                   [7.655168e-01, 4.897644e-01],
+                   [7.951999e-01, 4.455862e-01]]),
+         np.eye(3),
+         np.eye(2),
+         np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+                   [7.093648e-01, 6.797027e-01, 1.189977e-01],
+                   [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+         np.ones((3, 2)),
+         None)
+        ]
+
+    min_decimal = (10, 10)
+
+    def _test_factory(case, dec):
+        """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+        a, b, q, r, e, s, knownfailure = case
+        if knownfailure:
+            pytest.xfail(reason=knownfailure)
+
+        x = solve_continuous_are(a, b, q, r, e, s)
+        res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
+        out_fact = e.conj().T.dot(x).dot(b) + s
+        res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
+        assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+    for ind, case in enumerate(cases):
+        _test_factory(case, min_decimal[ind])
+
+
+def test_solve_generalized_discrete_are():
+    mat20170120 = _load_data('gendare_20170120_data.npz')
+
+    cases = [
+        # Two random examples differ by s term
+        # in the absence of any literature for demanding examples.
+        (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+                   [4.617139e-02, 6.948286e-01, 3.444608e-02],
+                   [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+         np.array([[3.815585e-01, 1.868726e-01],
+                   [7.655168e-01, 4.897644e-01],
+                   [7.951999e-01, 4.455862e-01]]),
+         np.eye(3),
+         np.eye(2),
+         np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+                   [7.093648e-01, 6.797027e-01, 1.189977e-01],
+                   [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+         np.zeros((3, 2)),
+         None),
+        (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+                   [4.617139e-02, 6.948286e-01, 3.444608e-02],
+                   [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+         np.array([[3.815585e-01, 1.868726e-01],
+                   [7.655168e-01, 4.897644e-01],
+                   [7.951999e-01, 4.455862e-01]]),
+         np.eye(3),
+         np.eye(2),
+         np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+                   [7.093648e-01, 6.797027e-01, 1.189977e-01],
+                   [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+         np.ones((3, 2)),
+         None),
+        # user-reported (under PR-6616) 20-Jan-2017
+        # tests against the case where E is None but S is provided
+        (mat20170120['A'],
+         mat20170120['B'],
+         mat20170120['Q'],
+         mat20170120['R'],
+         None,
+         mat20170120['S'],
+         None),
+        ]
+
+    min_decimal = (11, 11, 16)
+
+    def _test_factory(case, dec):
+        """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+        a, b, q, r, e, s, knownfailure = case
+        if knownfailure:
+            pytest.xfail(reason=knownfailure)
+
+        x = solve_discrete_are(a, b, q, r, e, s)
+        if e is None:
+            e = np.eye(a.shape[0])
+        if s is None:
+            s = np.zeros_like(b)
+        res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
+        res -= (a.conj().T.dot(x.dot(b)) + s).dot(
+                    solve(r+b.conj().T.dot(x.dot(b)),
+                          (b.conj().T.dot(x.dot(a)) + s.conj().T)
+                          )
+                )
+        assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+    for ind, case in enumerate(cases):
+        _test_factory(case, min_decimal[ind])
+
+
+def test_are_validate_args():
+
+    def test_square_shape():
+        nsq = np.ones((3, 2))
+        sq = np.eye(3)
+        for x in (solve_continuous_are, solve_discrete_are):
+            assert_raises(ValueError, x, nsq, 1, 1, 1)
+            assert_raises(ValueError, x, sq, sq, nsq, 1)
+            assert_raises(ValueError, x, sq, sq, sq, nsq)
+            assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
+
+    def test_compatible_sizes():
+        nsq = np.ones((3, 2))
+        sq = np.eye(4)
+        for x in (solve_continuous_are, solve_discrete_are):
+            assert_raises(ValueError, x, sq, nsq, 1, 1)
+            assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
+            assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
+            assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
+            assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
+
+    def test_symmetry():
+        nsym = np.arange(9).reshape(3, 3)
+        sym = np.eye(3)
+        for x in (solve_continuous_are, solve_discrete_are):
+            assert_raises(ValueError, x, sym, sym, nsym, sym)
+            assert_raises(ValueError, x, sym, sym, sym, nsym)
+
+    def test_singularity():
+        sing = np.full((3, 3), 1e12)
+        sing[2, 2] -= 1
+        sq = np.eye(3)
+        for x in (solve_continuous_are, solve_discrete_are):
+            assert_raises(ValueError, x, sq, sq, sq, sq, sing)
+
+        assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
+
+    def test_finiteness():
+        nm = np.full((2, 2), np.nan)
+        sq = np.eye(2)
+        for x in (solve_continuous_are, solve_discrete_are):
+            assert_raises(ValueError, x, nm, sq, sq, sq)
+            assert_raises(ValueError, x, sq, nm, sq, sq)
+            assert_raises(ValueError, x, sq, sq, nm, sq)
+            assert_raises(ValueError, x, sq, sq, sq, nm)
+            assert_raises(ValueError, x, sq, sq, sq, sq, nm)
+            assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
+
+
+class TestSolveSylvester:
+
+    cases = [
+        # a, b, c all real.
+        (np.array([[1, 2], [0, 4]]),
+         np.array([[5, 6], [0, 8]]),
+         np.array([[9, 10], [11, 12]])),
+        # a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
+        # quasi-triangular form.
+        (np.array([[1.0, 0, 0, 0],
+                   [0, 1.0, 2.0, 0.0],
+                   [0, 0, 3.0, -4],
+                   [0, 0, 2, 5]]),
+         np.array([[2.0, 0, 0, 1.0],
+                   [0, 1.0, 0.0, 0.0],
+                   [0, 0, 1.0, -1],
+                   [0, 0, 1, 1]]),
+         np.array([[1.0, 0, 0, 0],
+                   [0, 1.0, 0, 0],
+                   [0, 0, 1.0, 0],
+                   [0, 0, 0, 1.0]])),
+        # a, b, c all complex.
+        (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+         np.array([[-1.0, 2j], [3.0, 4.0]]),
+         np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+        # a and b real; c complex.
+        (np.array([[1.0, 2.0], [3.0, 5.0]]),
+         np.array([[-1.0, 0], [3.0, 4.0]]),
+         np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+        # a and c complex; b real.
+        (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+         np.array([[-1.0, 0], [3.0, 4.0]]),
+         np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+        # a complex; b and c real.
+        (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+         np.array([[-1.0, 0], [3.0, 4.0]]),
+         np.array([[2.0, 2.0], [-1.0, 2.0]])),
+        # not square matrices, real
+        (np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
+         np.array([[2, 3], [4, 5]]),
+         np.array([[1, 2], [3, 4], [5, 6]])),
+        # not square matrices, complex
+        (np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
+         np.array([[2, 3], [4, 5-1j]]),
+         np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
+    ]
+
+    def check_case(self, a, b, c):
+        x = solve_sylvester(a, b, c)
+        assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
+
+    def test_cases(self):
+        for case in self.cases:
+            self.check_case(case[0], case[1], case[2])
+
+    def test_trivial(self):
+        a = np.array([[1.0, 0.0], [0.0, 1.0]])
+        b = np.array([[1.0]])
+        c = np.array([2.0, 2.0]).reshape(-1, 1)
+        x = solve_sylvester(a, b, c)
+        assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_special_matrices.py b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_special_matrices.py
new file mode 100644
index 00000000..8f60b76e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/linalg/tests/test_special_matrices.py
@@ -0,0 +1,690 @@
+
+import pytest
+import numpy as np
+from numpy import arange, add, array, eye, copy, sqrt
+from numpy.testing import (assert_equal, assert_array_equal,
+                           assert_array_almost_equal, assert_allclose)
+from pytest import raises as assert_raises
+
+from scipy.fft import fft
+from scipy.special import comb
+from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie, dft,
+                          companion, tri, triu, tril, kron, block_diag,
+                          helmert, hilbert, invhilbert, pascal, invpascal,
+                          fiedler, fiedler_companion, eigvals,
+                          convolution_matrix)
+from numpy.linalg import cond
+
+
+def get_mat(n):
+    data = arange(n)
+    data = add.outer(data, data)
+    return data
+
+
+class TestTri:
+    def test_basic(self):
+        assert_equal(tri(4), array([[1, 0, 0, 0],
+                                    [1, 1, 0, 0],
+                                    [1, 1, 1, 0],
+                                    [1, 1, 1, 1]]))
+        assert_equal(tri(4, dtype='f'), array([[1, 0, 0, 0],
+                                               [1, 1, 0, 0],
+                                               [1, 1, 1, 0],
+                                               [1, 1, 1, 1]], 'f'))
+
+    def test_diag(self):
+        assert_equal(tri(4, k=1), array([[1, 1, 0, 0],
+                                         [1, 1, 1, 0],
+                                         [1, 1, 1, 1],
+                                         [1, 1, 1, 1]]))
+        assert_equal(tri(4, k=-1), array([[0, 0, 0, 0],
+                                          [1, 0, 0, 0],
+                                          [1, 1, 0, 0],
+                                          [1, 1, 1, 0]]))
+
+    def test_2d(self):
+        assert_equal(tri(4, 3), array([[1, 0, 0],
+                                       [1, 1, 0],
+                                       [1, 1, 1],
+                                       [1, 1, 1]]))
+        assert_equal(tri(3, 4), array([[1, 0, 0, 0],
+                                       [1, 1, 0, 0],
+                                       [1, 1, 1, 0]]))
+
+    def test_diag2d(self):
+        assert_equal(tri(3, 4, k=2), array([[1, 1, 1, 0],
+                                            [1, 1, 1, 1],
+                                            [1, 1, 1, 1]]))
+        assert_equal(tri(4, 3, k=-2), array([[0, 0, 0],
+                                             [0, 0, 0],
+                                             [1, 0, 0],
+                                             [1, 1, 0]]))
+
+
+class TestTril:
+    def test_basic(self):
+        a = (100*get_mat(5)).astype('l')
+        b = a.copy()
+        for k in range(5):
+            for l in range(k+1, 5):
+                b[k, l] = 0
+        assert_equal(tril(a), b)
+
+    def test_diag(self):
+        a = (100*get_mat(5)).astype('f')
+        b = a.copy()
+        for k in range(5):
+            for l in range(k+3, 5):
+                b[k, l] = 0
+        assert_equal(tril(a, k=2), b)
+        b = a.copy()
+        for k in range(5):
+            for l in range(max((k-1, 0)), 5):
+                b[k, l] = 0
+        assert_equal(tril(a, k=-2), b)
+
+
+class TestTriu:
+    def test_basic(self):
+        a = (100*get_mat(5)).astype('l')
+        b = a.copy()
+        for k in range(5):
+            for l in range(k+1, 5):
+                b[l, k] = 0
+        assert_equal(triu(a), b)
+
+    def test_diag(self):
+        a = (100*get_mat(5)).astype('f')
+        b = a.copy()
+        for k in range(5):
+            for l in range(max((k-1, 0)), 5):
+                b[l, k] = 0
+        assert_equal(triu(a, k=2), b)
+        b = a.copy()
+        for k in range(5):
+            for l in range(k+3, 5):
+                b[l, k] = 0
+        assert_equal(triu(a, k=-2), b)
+
+
+class TestToeplitz:
+
+    def test_basic(self):
+        y = toeplitz([1, 2, 3])
+        assert_array_equal(y, [[1, 2, 3], [2, 1, 2], [3, 2, 1]])
+        y = toeplitz([1, 2, 3], [1, 4, 5])
+        assert_array_equal(y, [[1, 4, 5], [2, 1, 4], [3, 2, 1]])
+
+    def test_complex_01(self):
+        data = (1.0 + arange(3.0)) * (1.0 + 1.0j)
+        x = copy(data)
+        t = toeplitz(x)
+        # Calling toeplitz should not change x.
+        assert_array_equal(x, data)
+        # According to the docstring, x should be the first column of t.
+        col0 = t[:, 0]
+        assert_array_equal(col0, data)
+        assert_array_equal(t[0, 1:], data[1:].conj())
+
+    def test_scalar_00(self):
+        """Scalar arguments still produce a 2D array."""
+        t = toeplitz(10)
+        assert_array_equal(t, [[10]])
+        t = toeplitz(10, 20)
+        assert_array_equal(t, [[10]])
+
+    def test_scalar_01(self):
+        c = array([1, 2, 3])
+        t = toeplitz(c, 1)
+        assert_array_equal(t, [[1], [2], [3]])
+
+    def test_scalar_02(self):
+        c = array([1, 2, 3])
+        t = toeplitz(c, array(1))
+        assert_array_equal(t, [[1], [2], [3]])
+
+    def test_scalar_03(self):
+        c = array([1, 2, 3])
+        t = toeplitz(c, array([1]))
+        assert_array_equal(t, [[1], [2], [3]])
+
+    def test_scalar_04(self):
+        r = array([10, 2, 3])
+        t = toeplitz(1, r)
+        assert_array_equal(t, [[1, 2, 3]])
+
+
+class TestHankel:
+    def test_basic(self):
+        y = hankel([1, 2, 3])
+        assert_array_equal(y, [[1, 2, 3], [2, 3, 0], [3, 0, 0]])
+        y = hankel([1, 2, 3], [3, 4, 5])
+        assert_array_equal(y, [[1, 2, 3], [2, 3, 4], [3, 4, 5]])
+
+
+class TestCirculant:
+    def test_basic(self):
+        y = circulant([1, 2, 3])
+        assert_array_equal(y, [[1, 3, 2], [2, 1, 3], [3, 2, 1]])
+
+
+class TestHadamard:
+
+    def test_basic(self):
+
+        y = hadamard(1)
+        assert_array_equal(y, [[1]])
+
+        y = hadamard(2, dtype=float)
+        assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]])
+
+        y = hadamard(4)
+        assert_array_equal(y, [[1, 1, 1, 1],
+                               [1, -1, 1, -1],
+                               [1, 1, -1, -1],
+                               [1, -1, -1, 1]])
+
+        assert_raises(ValueError, hadamard, 0)
+        assert_raises(ValueError, hadamard, 5)
+
+
+class TestLeslie:
+
+    def test_bad_shapes(self):
+        assert_raises(ValueError, leslie, [[1, 1], [2, 2]], [3, 4, 5])
+        assert_raises(ValueError, leslie, [3, 4, 5], [[1, 1], [2, 2]])
+        assert_raises(ValueError, leslie, [1, 2], [1, 2])
+        assert_raises(ValueError, leslie, [1], [])
+
+    def test_basic(self):
+        a = leslie([1, 2, 3], [0.25, 0.5])
+        expected = array([[1.0, 2.0, 3.0],
+                          [0.25, 0.0, 0.0],
+                          [0.0, 0.5, 0.0]])
+        assert_array_equal(a, expected)
+
+
+class TestCompanion:
+
+    def test_bad_shapes(self):
+        assert_raises(ValueError, companion, [[1, 1], [2, 2]])
+        assert_raises(ValueError, companion, [0, 4, 5])
+        assert_raises(ValueError, companion, [1])
+        assert_raises(ValueError, companion, [])
+
+    def test_basic(self):
+        c = companion([1, 2, 3])
+        expected = array([
+            [-2.0, -3.0],
+            [1.0, 0.0]])
+        assert_array_equal(c, expected)
+
+        c = companion([2.0, 5.0, -10.0])
+        expected = array([
+            [-2.5, 5.0],
+            [1.0, 0.0]])
+        assert_array_equal(c, expected)
+
+
+class TestBlockDiag:
+    def test_basic(self):
+        x = block_diag(eye(2), [[1, 2], [3, 4], [5, 6]], [[1, 2, 3]])
+        assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0],
+                               [0, 1, 0, 0, 0, 0, 0],
+                               [0, 0, 1, 2, 0, 0, 0],
+                               [0, 0, 3, 4, 0, 0, 0],
+                               [0, 0, 5, 6, 0, 0, 0],
+                               [0, 0, 0, 0, 1, 2, 3]])
+
+    def test_dtype(self):
+        x = block_diag([[1.5]])
+        assert_equal(x.dtype, float)
+
+        x = block_diag([[True]])
+        assert_equal(x.dtype, bool)
+
+    def test_mixed_dtypes(self):
+        actual = block_diag([[1]], [[1j]])
+        desired = np.array([[1, 0], [0, 1j]])
+        assert_array_equal(actual, desired)
+
+    def test_scalar_and_1d_args(self):
+        a = block_diag(1)
+        assert_equal(a.shape, (1, 1))
+        assert_array_equal(a, [[1]])
+
+        a = block_diag([2, 3], 4)
+        assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
+
+    def test_bad_arg(self):
+        assert_raises(ValueError, block_diag, [[[1]]])
+
+    def test_no_args(self):
+        a = block_diag()
+        assert_equal(a.ndim, 2)
+        assert_equal(a.nbytes, 0)
+
+    def test_empty_matrix_arg(self):
+        # regression test for gh-4596: check the shape of the result
+        # for empty matrix inputs. Empty matrices are no longer ignored
+        # (gh-4908) it is viewed as a shape (1, 0) matrix.
+        a = block_diag([[1, 0], [0, 1]],
+                       [],
+                       [[2, 3], [4, 5], [6, 7]])
+        assert_array_equal(a, [[1, 0, 0, 0],
+                               [0, 1, 0, 0],
+                               [0, 0, 0, 0],
+                               [0, 0, 2, 3],
+                               [0, 0, 4, 5],
+                               [0, 0, 6, 7]])
+
+    def test_zerosized_matrix_arg(self):
+        # test for gh-4908: check the shape of the result for
+        # zero-sized matrix inputs, i.e. matrices with shape (0,n) or (n,0).
+        # note that [[]] takes shape (1,0)
+        a = block_diag([[1, 0], [0, 1]],
+                       [[]],
+                       [[2, 3], [4, 5], [6, 7]],
+                       np.zeros([0, 2], dtype='int32'))
+        assert_array_equal(a, [[1, 0, 0, 0, 0, 0],
+                               [0, 1, 0, 0, 0, 0],
+                               [0, 0, 0, 0, 0, 0],
+                               [0, 0, 2, 3, 0, 0],
+                               [0, 0, 4, 5, 0, 0],
+                               [0, 0, 6, 7, 0, 0]])
+
+
+class TestKron:
+
+    def test_basic(self):
+
+        a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]]))
+        assert_array_equal(a, array([[1, 1, 1, 2, 2, 2],
+                                     [3, 3, 3, 4, 4, 4]]))
+
+        m1 = array([[1, 2], [3, 4]])
+        m2 = array([[10], [11]])
+        a = kron(m1, m2)
+        expected = array([[10, 20],
+                          [11, 22],
+                          [30, 40],
+                          [33, 44]])
+        assert_array_equal(a, expected)
+
+
+class TestHelmert:
+
+    def test_orthogonality(self):
+        for n in range(1, 7):
+            H = helmert(n, full=True)
+            Id = np.eye(n)
+            assert_allclose(H.dot(H.T), Id, atol=1e-12)
+            assert_allclose(H.T.dot(H), Id, atol=1e-12)
+
+    def test_subspace(self):
+        for n in range(2, 7):
+            H_full = helmert(n, full=True)
+            H_partial = helmert(n)
+            for U in H_full[1:, :].T, H_partial.T:
+                C = np.eye(n) - np.full((n, n), 1 / n)
+                assert_allclose(U.dot(U.T), C)
+                assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12)
+
+
+class TestHilbert:
+
+    def test_basic(self):
+        h3 = array([[1.0, 1/2., 1/3.],
+                    [1/2., 1/3., 1/4.],
+                    [1/3., 1/4., 1/5.]])
+        assert_array_almost_equal(hilbert(3), h3)
+
+        assert_array_equal(hilbert(1), [[1.0]])
+
+        h0 = hilbert(0)
+        assert_equal(h0.shape, (0, 0))
+
+
+class TestInvHilbert:
+
+    def test_basic(self):
+        invh1 = array([[1]])
+        assert_array_equal(invhilbert(1, exact=True), invh1)
+        assert_array_equal(invhilbert(1), invh1)
+
+        invh2 = array([[4, -6],
+                       [-6, 12]])
+        assert_array_equal(invhilbert(2, exact=True), invh2)
+        assert_array_almost_equal(invhilbert(2), invh2)
+
+        invh3 = array([[9, -36, 30],
+                       [-36, 192, -180],
+                       [30, -180, 180]])
+        assert_array_equal(invhilbert(3, exact=True), invh3)
+        assert_array_almost_equal(invhilbert(3), invh3)
+
+        invh4 = array([[16, -120, 240, -140],
+                       [-120, 1200, -2700, 1680],
+                       [240, -2700, 6480, -4200],
+                       [-140, 1680, -4200, 2800]])
+        assert_array_equal(invhilbert(4, exact=True), invh4)
+        assert_array_almost_equal(invhilbert(4), invh4)
+
+        invh5 = array([[25, -300, 1050, -1400, 630],
+                       [-300, 4800, -18900, 26880, -12600],
+                       [1050, -18900, 79380, -117600, 56700],
+                       [-1400, 26880, -117600, 179200, -88200],
+                       [630, -12600, 56700, -88200, 44100]])
+        assert_array_equal(invhilbert(5, exact=True), invh5)
+        assert_array_almost_equal(invhilbert(5), invh5)
+
+        invh17 = array([
+            [289, -41616, 1976760, -46124400, 629598060, -5540462928,
+             33374693352, -143034400080, 446982500250, -1033026222800,
+             1774926873720, -2258997839280, 2099709530100, -1384423866000,
+             613101997800, -163493866080, 19835652870],
+            [-41616, 7990272, -426980160, 10627061760, -151103534400,
+             1367702848512, -8410422724704, 36616806420480, -115857864064800,
+             270465047424000, -468580694662080, 600545887119360,
+             -561522320049600, 372133135180800, -165537539406000,
+             44316454993920, -5395297580640],
+            [1976760, -426980160, 24337869120, -630981792000, 9228108708000,
+             -85267724461920, 532660105897920, -2348052711713280,
+             7504429831470000, -17664748409880000, 30818191841236800,
+             -39732544853164800, 37341234283298400, -24857330514030000,
+             11100752642520000, -2982128117299200, 364182586693200],
+            [-46124400, 10627061760, -630981792000, 16826181120000,
+             -251209625940000, 2358021022156800, -14914482965141760,
+             66409571644416000, -214015221119700000, 507295338950400000,
+             -890303319857952000, 1153715376477081600, -1089119333262870000,
+             727848632044800000, -326170262829600000, 87894302404608000,
+             -10763618673376800],
+            [629598060, -151103534400, 9228108708000,
+             -251209625940000, 3810012660090000, -36210360321495360,
+             231343968720664800, -1038687206500944000, 3370739732635275000,
+             -8037460526495400000, 14178080368737885600, -18454939322943942000,
+             17489975175339030000, -11728977435138600000, 5272370630081100000,
+             -1424711708039692800, 174908803442373000],
+            [-5540462928, 1367702848512, -85267724461920, 2358021022156800,
+             -36210360321495360, 347619459086355456, -2239409617216035264,
+             10124803292907663360, -33052510749726468000,
+             79217210949138662400, -140362995650505067440,
+             183420385176741672960, -174433352415381259200,
+             117339159519533952000, -52892422160973595200,
+             14328529177999196160, -1763080738699119840],
+            [33374693352, -8410422724704, 532660105897920,
+             -14914482965141760, 231343968720664800, -2239409617216035264,
+             14527452132196331328, -66072377044391477760,
+             216799987176909536400, -521925895055522958000,
+             928414062734059661760, -1217424500995626443520,
+             1161358898976091015200, -783401860847777371200,
+             354015418167362952000, -96120549902411274240,
+             11851820521255194480],
+            [-143034400080, 36616806420480, -2348052711713280,
+             66409571644416000, -1038687206500944000, 10124803292907663360,
+             -66072377044391477760, 302045152202932469760,
+             -995510145200094810000, 2405996923185123840000,
+             -4294704507885446054400, 5649058909023744614400,
+             -5403874060541811254400, 3654352703663101440000,
+             -1655137020003255360000, 450325202737117593600,
+             -55630994283442749600],
+            [446982500250, -115857864064800, 7504429831470000,
+             -214015221119700000, 3370739732635275000, -33052510749726468000,
+             216799987176909536400, -995510145200094810000,
+             3293967392206196062500, -7988661659013106500000,
+             14303908928401362270000, -18866974090684772052000,
+             18093328327706957325000, -12263364009096700500000,
+             5565847995255512250000, -1517208935002984080000,
+             187754605706619279900],
+            [-1033026222800, 270465047424000, -17664748409880000,
+             507295338950400000, -8037460526495400000, 79217210949138662400,
+             -521925895055522958000, 2405996923185123840000,
+             -7988661659013106500000, 19434404971634224000000,
+             -34894474126569249192000, 46141453390504792320000,
+             -44349976506971935800000, 30121928988527376000000,
+             -13697025107665828500000, 3740200989399948902400,
+             -463591619028689580000],
+            [1774926873720, -468580694662080,
+             30818191841236800, -890303319857952000, 14178080368737885600,
+             -140362995650505067440, 928414062734059661760,
+             -4294704507885446054400, 14303908928401362270000,
+             -34894474126569249192000, 62810053427824648545600,
+             -83243376594051600326400, 80177044485212743068000,
+             -54558343880470209780000, 24851882355348879230400,
+             -6797096028813368678400, 843736746632215035600],
+            [-2258997839280, 600545887119360, -39732544853164800,
+             1153715376477081600, -18454939322943942000, 183420385176741672960,
+             -1217424500995626443520, 5649058909023744614400,
+             -18866974090684772052000, 46141453390504792320000,
+             -83243376594051600326400, 110552468520163390156800,
+             -106681852579497947388000, 72720410752415168870400,
+             -33177973900974346080000, 9087761081682520473600,
+             -1129631016152221783200],
+            [2099709530100, -561522320049600, 37341234283298400,
+             -1089119333262870000, 17489975175339030000,
+             -174433352415381259200, 1161358898976091015200,
+             -5403874060541811254400, 18093328327706957325000,
+             -44349976506971935800000, 80177044485212743068000,
+             -106681852579497947388000, 103125790826848015808400,
+             -70409051543137015800000, 32171029219823375700000,
+             -8824053728865840192000, 1098252376814660067000],
+            [-1384423866000, 372133135180800,
+             -24857330514030000, 727848632044800000, -11728977435138600000,
+             117339159519533952000, -783401860847777371200,
+             3654352703663101440000, -12263364009096700500000,
+             30121928988527376000000, -54558343880470209780000,
+             72720410752415168870400, -70409051543137015800000,
+             48142941226076592000000, -22027500987368499000000,
+             6049545098753157120000, -753830033789944188000],
+            [613101997800, -165537539406000,
+             11100752642520000, -326170262829600000, 5272370630081100000,
+             -52892422160973595200, 354015418167362952000,
+             -1655137020003255360000, 5565847995255512250000,
+             -13697025107665828500000, 24851882355348879230400,
+             -33177973900974346080000, 32171029219823375700000,
+             -22027500987368499000000, 10091416708498869000000,
+             -2774765838662800128000, 346146444087219270000],
+            [-163493866080, 44316454993920, -2982128117299200,
+             87894302404608000, -1424711708039692800,
+             14328529177999196160, -96120549902411274240,
+             450325202737117593600, -1517208935002984080000,
+             3740200989399948902400, -6797096028813368678400,
+             9087761081682520473600, -8824053728865840192000,
+             6049545098753157120000, -2774765838662800128000,
+             763806510427609497600, -95382575704033754400],
+            [19835652870, -5395297580640, 364182586693200, -10763618673376800,
+             174908803442373000, -1763080738699119840, 11851820521255194480,
+             -55630994283442749600, 187754605706619279900,
+             -463591619028689580000, 843736746632215035600,
+             -1129631016152221783200, 1098252376814660067000,
+             -753830033789944188000, 346146444087219270000,
+             -95382575704033754400, 11922821963004219300]
+        ])
+        assert_array_equal(invhilbert(17, exact=True), invh17)
+        assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12)
+
+    def test_inverse(self):
+        for n in range(1, 10):
+            a = hilbert(n)
+            b = invhilbert(n)
+            # The Hilbert matrix is increasingly badly conditioned,
+            # so take that into account in the test
+            c = cond(a)
+            assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
+
+
+class TestPascal:
+
+    cases = [
+        (1, array([[1]]), array([[1]])),
+        (2, array([[1, 1],
+                   [1, 2]]),
+            array([[1, 0],
+                   [1, 1]])),
+        (3, array([[1, 1, 1],
+                   [1, 2, 3],
+                   [1, 3, 6]]),
+            array([[1, 0, 0],
+                   [1, 1, 0],
+                   [1, 2, 1]])),
+        (4, array([[1, 1, 1, 1],
+                   [1, 2, 3, 4],
+                   [1, 3, 6, 10],
+                   [1, 4, 10, 20]]),
+            array([[1, 0, 0, 0],
+                   [1, 1, 0, 0],
+                   [1, 2, 1, 0],
+                   [1, 3, 3, 1]])),
+    ]
+
+    def check_case(self, n, sym, low):
+        assert_array_equal(pascal(n), sym)
+        assert_array_equal(pascal(n, kind='lower'), low)
+        assert_array_equal(pascal(n, kind='upper'), low.T)
+        assert_array_almost_equal(pascal(n, exact=False), sym)
+        assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
+        assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
+
+    def test_cases(self):
+        for n, sym, low in self.cases:
+            self.check_case(n, sym, low)
+
+    def test_big(self):
+        p = pascal(50)
+        assert p[-1, -1] == comb(98, 49, exact=True)
+
+    def test_threshold(self):
+        # Regression test.  An early version of `pascal` returned an
+        # array of type np.uint64 for n=35, but that data type is too small
+        # to hold p[-1, -1].  The second assert_equal below would fail
+        # because p[-1, -1] overflowed.
+        p = pascal(34)
+        assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
+        p = pascal(35)
+        assert_equal(2.*p.item(-1, -2), 1.*p.item(-1, -1), err_msg="n = 35")
+
+
+def test_invpascal():
+
+    def check_invpascal(n, kind, exact):
+        ip = invpascal(n, kind=kind, exact=exact)
+        p = pascal(n, kind=kind, exact=exact)
+        # Matrix-multiply ip and p, and check that we get the identity matrix.
+        # We can't use the simple expression e = ip.dot(p), because when
+        # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
+        # np.int64. The product of those dtypes is np.float64, which loses
+        # precision when n is greater than 18.  Instead we'll cast both to
+        # object arrays, and then multiply.
+        e = ip.astype(object).dot(p.astype(object))
+        assert_array_equal(e, eye(n), err_msg="n=%d  kind=%r exact=%r" %
+                                              (n, kind, exact))
+
+    kinds = ['symmetric', 'lower', 'upper']
+
+    ns = [1, 2, 5, 18]
+    for n in ns:
+        for kind in kinds:
+            for exact in [True, False]:
+                check_invpascal(n, kind, exact)
+
+    ns = [19, 34, 35, 50]
+    for n in ns:
+        for kind in kinds:
+            check_invpascal(n, kind, True)
+
+
+def test_dft():
+    m = dft(2)
+    expected = array([[1.0, 1.0], [1.0, -1.0]])
+    assert_array_almost_equal(m, expected)
+    m = dft(2, scale='n')
+    assert_array_almost_equal(m, expected/2.0)
+    m = dft(2, scale='sqrtn')
+    assert_array_almost_equal(m, expected/sqrt(2.0))
+
+    x = array([0, 1, 2, 3, 4, 5, 0, 1])
+    m = dft(8)
+    mx = m.dot(x)
+    fx = fft(x)
+    assert_array_almost_equal(mx, fx)
+
+
+def test_fiedler():
+    f = fiedler([])
+    assert_equal(f.size, 0)
+    f = fiedler([123.])
+    assert_array_equal(f, np.array([[0.]]))
+    f = fiedler(np.arange(1, 7))
+    des = np.array([[0, 1, 2, 3, 4, 5],
+                    [1, 0, 1, 2, 3, 4],
+                    [2, 1, 0, 1, 2, 3],
+                    [3, 2, 1, 0, 1, 2],
+                    [4, 3, 2, 1, 0, 1],
+                    [5, 4, 3, 2, 1, 0]])
+    assert_array_equal(f, des)
+
+
+def test_fiedler_companion():
+    fc = fiedler_companion([])
+    assert_equal(fc.size, 0)
+    fc = fiedler_companion([1.])
+    assert_equal(fc.size, 0)
+    fc = fiedler_companion([1., 2.])
+    assert_array_equal(fc, np.array([[-2.]]))
+    fc = fiedler_companion([1e-12, 2., 3.])
+    assert_array_almost_equal(fc, companion([1e-12, 2., 3.]))
+    with assert_raises(ValueError):
+        fiedler_companion([0, 1, 2])
+    fc = fiedler_companion([1., -16., 86., -176., 105.])
+    assert_array_almost_equal(eigvals(fc),
+                              np.array([7., 5., 3., 1.]))
+
+
+class TestConvolutionMatrix:
+    """
+    Test convolution_matrix vs. numpy.convolve for various parameters.
+    """
+
+    def create_vector(self, n, cpx):
+        """Make a complex or real test vector of length n."""
+        x = np.linspace(-2.5, 2.2, n)
+        if cpx:
+            x = x + 1j*np.linspace(-1.5, 3.1, n)
+        return x
+
+    def test_bad_n(self):
+        # n must be a positive integer
+        with pytest.raises(ValueError, match='n must be a positive integer'):
+            convolution_matrix([1, 2, 3], 0)
+
+    def test_bad_first_arg(self):
+        # first arg must be a 1d array, otherwise ValueError
+        with pytest.raises(ValueError, match='one-dimensional'):
+            convolution_matrix(1, 4)
+
+    def test_empty_first_arg(self):
+        # first arg must have at least one value
+        with pytest.raises(ValueError, match=r'len\(a\)'):
+            convolution_matrix([], 4)
+
+    def test_bad_mode(self):
+        # mode must be in ('full', 'valid', 'same')
+        with pytest.raises(ValueError, match='mode.*must be one of'):
+            convolution_matrix((1, 1), 4, mode='invalid argument')
+
+    @pytest.mark.parametrize('cpx', [False, True])
+    @pytest.mark.parametrize('na', [1, 2, 9])
+    @pytest.mark.parametrize('nv', [1, 2, 9])
+    @pytest.mark.parametrize('mode', [None, 'full', 'valid', 'same'])
+    def test_against_numpy_convolve(self, cpx, na, nv, mode):
+        a = self.create_vector(na, cpx)
+        v = self.create_vector(nv, cpx)
+        if mode is None:
+            y1 = np.convolve(v, a)
+            A = convolution_matrix(a, nv)
+        else:
+            y1 = np.convolve(v, a, mode)
+            A = convolution_matrix(a, nv, mode)
+        y2 = A @ v
+        assert_array_almost_equal(y1, y2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/misc/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/misc/__init__.py
new file mode 100644
index 00000000..90b393b4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/misc/__init__.py
@@ -0,0 +1,67 @@
+"""
+==========================================
+Miscellaneous routines (:mod:`scipy.misc`)
+==========================================
+
+.. currentmodule:: scipy.misc
+
+.. deprecated:: 1.10.0
+
+   This module is deprecated and will be completely
+   removed in SciPy v2.0.0.
+
+Various utilities that don't have another home.
+
+.. autosummary::
+   :toctree: generated/
+
+   ascent - Get example image for processing
+   central_diff_weights - Weights for an n-point central mth derivative
+   derivative - Find the nth derivative of a function at a point
+   face - Get example image for processing
+   electrocardiogram - Load an example of a 1-D signal
+
+"""
+
+
+from ._common import *
+from . import _common
+import warnings
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import common, doccer
+
+__all__ = _common.__all__
+
+dataset_methods = ['ascent', 'face', 'electrocardiogram']
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.misc is deprecated and has no attribute "
+            f"{name}.")
+
+    if name in dataset_methods:
+        msg = ("The module `scipy.misc` is deprecated and will be "
+               "completely removed in SciPy v2.0.0. "
+               f"All dataset methods including {name}, must be imported "
+               "directly from the new `scipy.datasets` module.")
+    else:
+        msg = (f"The method `{name}` from the `scipy.misc` namespace is"
+               " deprecated, and will be removed in SciPy v1.12.0.")
+
+    warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+
+    return getattr(name)
+
+
+del _common
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/misc/_common.py b/__packaged__/coreml/.python_dependencies/scipy/misc/_common.py
new file mode 100644
index 00000000..ce4817d9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/misc/_common.py
@@ -0,0 +1,342 @@
+"""
+Functions which are common and require SciPy Base and Level 1 SciPy
+(special, linalg)
+"""
+
+from scipy._lib.deprecation import _deprecated
+from scipy._lib._finite_differences import _central_diff_weights, _derivative
+from numpy import array, frombuffer, load
+
+
+__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face',
+           'electrocardiogram']
+
+
+@_deprecated(msg="scipy.misc.central_diff_weights is deprecated in "
+                 "SciPy v1.10.0; and will be completely removed in "
+                 "SciPy v1.12.0. You may consider using "
+                 "findiff: https://github.com/maroba/findiff or "
+                 "numdifftools: https://github.com/pbrod/numdifftools")
+def central_diff_weights(Np, ndiv=1):
+    """
+    Return weights for an Np-point central derivative.
+
+    Assumes equally-spaced function points.
+
+    If weights are in the vector w, then
+    derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
+
+    .. deprecated:: 1.10.0
+        `central_diff_weights` has been deprecated from
+        `scipy.misc.central_diff_weights` in SciPy 1.10.0 and
+        it will be completely removed in SciPy 1.12.0.
+        You may consider using
+        findiff: https://github.com/maroba/findiff or
+        numdifftools: https://github.com/pbrod/numdifftools
+
+    Parameters
+    ----------
+    Np : int
+        Number of points for the central derivative.
+    ndiv : int, optional
+        Number of divisions. Default is 1.
+
+    Returns
+    -------
+    w : ndarray
+        Weights for an Np-point central derivative. Its size is `Np`.
+
+    Notes
+    -----
+    Can be inaccurate for a large number of points.
+
+    Examples
+    --------
+    We can calculate a derivative value of a function.
+
+    >>> from scipy.misc import central_diff_weights
+    >>> def f(x):
+    ...     return 2 * x**2 + 3
+    >>> x = 3.0 # derivative point
+    >>> h = 0.1 # differential step
+    >>> Np = 3 # point number for central derivative
+    >>> weights = central_diff_weights(Np) # weights for first derivative
+    >>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
+    >>> sum(w * v for (w, v) in zip(weights, vals))/h
+    11.79999999999998
+
+    This value is close to the analytical solution:
+    f'(x) = 4x, so f'(3) = 12
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Finite_difference
+
+    """
+    return _central_diff_weights(Np, ndiv)
+
+
+@_deprecated(msg="scipy.misc.derivative is deprecated in "
+                 "SciPy v1.10.0; and will be completely removed in "
+                 "SciPy v1.12.0. You may consider using "
+                 "findiff: https://github.com/maroba/findiff or "
+                 "numdifftools: https://github.com/pbrod/numdifftools")
+def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
+    """
+    Find the nth derivative of a function at a point.
+
+    Given a function, use a central difference formula with spacing `dx` to
+    compute the nth derivative at `x0`.
+
+    .. deprecated:: 1.10.0
+        `derivative` has been deprecated from `scipy.misc.derivative`
+        in SciPy 1.10.0 and it will be completely removed in SciPy 1.12.0.
+        You may consider using
+        findiff: https://github.com/maroba/findiff or
+        numdifftools: https://github.com/pbrod/numdifftools
+
+    Parameters
+    ----------
+    func : function
+        Input function.
+    x0 : float
+        The point at which the nth derivative is found.
+    dx : float, optional
+        Spacing.
+    n : int, optional
+        Order of the derivative. Default is 1.
+    args : tuple, optional
+        Arguments
+    order : int, optional
+        Number of points to use, must be odd.
+
+    Notes
+    -----
+    Decreasing the step size too small can result in round-off error.
+
+    Examples
+    --------
+    >>> from scipy.misc import derivative
+    >>> def f(x):
+    ...     return x**3 + x**2
+    >>> derivative(f, 1.0, dx=1e-6)
+    4.9999999999217337
+
+    """
+    return _derivative(func, x0, dx, n, args, order)
+
+
+@_deprecated(msg="scipy.misc.ascent has been deprecated in SciPy v1.10.0;"
+                 " and will be completely removed in SciPy v1.12.0. "
+                 "Dataset methods have moved into the scipy.datasets "
+                 "module. Use scipy.datasets.ascent instead.")
+def ascent():
+    """
+    Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
+
+    The image is derived from accent-to-the-top.jpg at
+    http://www.public-domain-image.com/people-public-domain-images-pictures/
+
+    .. deprecated:: 1.10.0
+        `ascent` has been deprecated from `scipy.misc.ascent`
+        in SciPy 1.10.0 and it will be completely removed in SciPy 1.12.0.
+        Dataset methods have moved into the `scipy.datasets` module.
+        Use `scipy.datasets.ascent` instead.
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    ascent : ndarray
+       convenient image to use for testing and demonstration
+
+    Examples
+    --------
+    >>> import scipy.misc
+    >>> ascent = scipy.misc.ascent()
+    >>> ascent.shape
+    (512, 512)
+    >>> ascent.max()
+    255
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.gray()
+    >>> plt.imshow(ascent)
+    >>> plt.show()
+
+    """
+    import pickle
+    import os
+    fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
+    with open(fname, 'rb') as f:
+        ascent = array(pickle.load(f))
+    return ascent
+
+
+@_deprecated(msg="scipy.misc.face has been deprecated in SciPy v1.10.0; "
+                 "and will be completely removed in SciPy v1.12.0. "
+                 "Dataset methods have moved into the scipy.datasets "
+                 "module. Use scipy.datasets.face instead.")
+def face(gray=False):
+    """
+    Get a 1024 x 768, color image of a raccoon face.
+
+    raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
+
+    .. deprecated:: 1.10.0
+        `face` has been deprecated from `scipy.misc.face`
+        in SciPy 1.10.0 and it will be completely removed in SciPy 1.12.0.
+        Dataset methods have moved into the `scipy.datasets` module.
+        Use `scipy.datasets.face` instead.
+
+    Parameters
+    ----------
+    gray : bool, optional
+        If True return 8-bit grey-scale image, otherwise return a color image
+
+    Returns
+    -------
+    face : ndarray
+        image of a racoon face
+
+    Examples
+    --------
+    >>> import scipy.misc
+    >>> face = scipy.misc.face()
+    >>> face.shape
+    (768, 1024, 3)
+    >>> face.max()
+    255
+    >>> face.dtype
+    dtype('uint8')
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.gray()
+    >>> plt.imshow(face)
+    >>> plt.show()
+
+    """
+    import bz2
+    import os
+    with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
+        rawdata = f.read()
+    data = bz2.decompress(rawdata)
+    face = frombuffer(data, dtype='uint8')
+    face.shape = (768, 1024, 3)
+    if gray is True:
+        face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
+    return face
+
+
+@_deprecated(msg="scipy.misc.electrocardiogram has been "
+                 "deprecated in SciPy v1.10.0; and will "
+                 "be completely removed in SciPy v1.12.0. "
+                 "Dataset methods have moved into the scipy.datasets "
+                 "module. Use scipy.datasets.electrocardiogram instead.")
+def electrocardiogram():
+    """
+    Load an electrocardiogram as an example for a 1-D signal.
+
+    The returned signal is a 5 minute long electrocardiogram (ECG), a medical
+    recording of the heart's electrical activity, sampled at 360 Hz.
+
+    .. deprecated:: 1.10.0
+        `electrocardiogram` has been deprecated from
+        `scipy.misc.electrocardiogram` in SciPy 1.10.0 and it will be
+        completely removed in SciPy 1.12.0.
+        Dataset methods have moved into the `scipy.datasets` module.
+        Use `scipy.datasets.electrocardiogram` instead.
+
+    Returns
+    -------
+    ecg : ndarray
+        The electrocardiogram in millivolt (mV) sampled at 360 Hz.
+
+    Notes
+    -----
+    The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
+    (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
+    PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
+    heartbeats as well as pathological changes.
+
+    .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
+
+    .. versionadded:: 1.1.0
+
+    References
+    ----------
+    .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
+           IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
+           (PMID: 11446209); :doi:`10.13026/C2F305`
+    .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
+           Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
+           PhysioToolkit, and PhysioNet: Components of a New Research Resource
+           for Complex Physiologic Signals. Circulation 101(23):e215-e220;
+           :doi:`10.1161/01.CIR.101.23.e215`
+
+    Examples
+    --------
+    >>> from scipy.misc import electrocardiogram
+    >>> ecg = electrocardiogram()
+    >>> ecg
+    array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
+    >>> ecg.shape, ecg.mean(), ecg.std()
+    ((108000,), -0.16510875, 0.5992473991177294)
+
+    As stated the signal features several areas with a different morphology.
+    E.g., the first few seconds show the electrical activity of a heart in
+    normal sinus rhythm as seen below.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> fs = 360
+    >>> time = np.arange(ecg.size) / fs
+    >>> plt.plot(time, ecg)
+    >>> plt.xlabel("time in s")
+    >>> plt.ylabel("ECG in mV")
+    >>> plt.xlim(9, 10.2)
+    >>> plt.ylim(-1, 1.5)
+    >>> plt.show()
+
+    After second 16, however, the first premature ventricular contractions, also
+    called extrasystoles, appear. These have a different morphology compared to
+    typical heartbeats. The difference can easily be observed in the following
+    plot.
+
+    >>> plt.plot(time, ecg)
+    >>> plt.xlabel("time in s")
+    >>> plt.ylabel("ECG in mV")
+    >>> plt.xlim(46.5, 50)
+    >>> plt.ylim(-2, 1.5)
+    >>> plt.show()
+
+    At several points large artifacts disturb the recording, e.g.:
+
+    >>> plt.plot(time, ecg)
+    >>> plt.xlabel("time in s")
+    >>> plt.ylabel("ECG in mV")
+    >>> plt.xlim(207, 215)
+    >>> plt.ylim(-2, 3.5)
+    >>> plt.show()
+
+    Finally, examining the power spectrum reveals that most of the biosignal is
+    made up of lower frequencies. At 60 Hz the noise induced by the mains
+    electricity can be clearly observed.
+
+    >>> from scipy.signal import welch
+    >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
+    >>> plt.semilogy(f, Pxx)
+    >>> plt.xlabel("Frequency in Hz")
+    >>> plt.ylabel("Power spectrum of the ECG in mV**2")
+    >>> plt.xlim(f[[0, -1]])
+    >>> plt.show()
+    """
+    import os
+    file_path = os.path.join(os.path.dirname(__file__), "ecg.dat")
+    with load(file_path) as file:
+        ecg = file["ecg"].astype(int)  # np.uint16 -> int
+    # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
+    ecg = (ecg - 1024) / 200.0
+    return ecg
diff --git a/__packaged__/coreml/.python_dependencies/scipy/misc/ascent.dat b/__packaged__/coreml/.python_dependencies/scipy/misc/ascent.dat
new file mode 100644
index 00000000..f3602460
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/misc/ascent.dat
@@ -0,0 +1,749 @@
+�]q(]q(KSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KZK[KZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKaKaKcKeKdKdKdKdKeKbK^KOKQKRKTKRKVKTKVKNKRKMKOKIKPKYKXKRKPKUK`KjK[KSKRKUK9K!K$K%K&K&K'K*K0K K
+K
KKKKKCKBKAKEK*KKKK!K)K-K(K)K-K+K"KKKK8KBKK9K2K/K/K+K"KKK!K/K0K$K+K3K5K4K?KGKAK;K9K-K+K+K+K$K8KGKFKFKFKFKFKFKFKFKFKFKGK6KK$KBKIKJKJKHKHKAK9K=K=K=KKKHKFKFKFKFKFKFKFKGKFKGKHK2KK*KEKFKHKIKHKGK?KKdKsKrKtKsKsKsKsKsKsKsKsKsKsKuKuKsKtKuKtKsKtKtKtKtKvKtKsKsKsKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KPKQKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKUKUKWKXKUKUKUKUKUKUKUKWKWKUKVKXKWKWKUKVKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[KYKWKWKWKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKeKdKdKdKdKeKcKaKcK^KNKTKTKVKQKVKTKSKQKUKOKTKIKCKVKZKYKMKCKJKNKVKUKSKPK*K$K&K%K!KKKKKK
+K
+KKKK?KAK@KK=K;K;K?K?K=KK.K-K+K)K KKKK'K'K&K%K)K$K K"K%K%K1K>K(K)K)K+K"KKKK0KDKDKFKGKFKFKFKFKFKGKFKFKFK)KK4KFKGKIKHKFKEK@K;KK=K=K=KK=KK:K:K9KK?K=KK?K=K=K=K;K4K*K,K0K4K8K7K5K4K3K1K0K/K0K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K1K6KYKrKtKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKuKuKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKRKSKSKSKSKSKSKSKSKSKSKSKSKRKRKRKRKRKRKSKUKTKRKSKRKSKSKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKUKVKTKVKUKUKUKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKUKUKXKWKWKXKWKXKYK[KYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K]K\K\K\K\K]K]K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKdKdKeKcKbKdKeKdKdKeKcKbKbKXKOKQKWKWKTKVKUKWKSKWKRKVKLKMKLKPKDKNKSK]KhKPKVKVKBK!K&K%K&KKKK
+K	KKK	KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
KKKKKKKKKKKKK#K)K'K)K&KKKKKKKKKKKKKKK1K/K(K+K(K%KKKKKKKKKKKKKK#K/K)K'K)K)K&KKKKKKK1KGKGKGKFKFKFKFKFKFKGKFKHKBK!KK:KHKHKIKIKGKCK?K;K=K=K=K=K>K>K=KK;KK?K=KKK?K=K=KK?K>K>K>K>KKHKHKHKIKGKBKK>KK>K>K?K>K=K;K=K>K4K'K.K2K5K5K8K6K5K2K/K*K*K,K$K0K2K2K4K3K3K3K3K3K3K3K3K3K3K3K3K3K2K0KHKlKtKsKsKtKuKtKtKtKtKuKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKuKuKuKuKuKue]q(KSKSKSKSKSKSKSKSKSKSKSKSKSKSKRKTKVKTKRKRKSKVKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKXKXKXKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZK\K]K\K\K]K\KZK\K]KZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K]K`K`K`K]K\K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKdKeKeKeKdKdKdKdKdKdKdKeKdKaK`KUKSKUKYKWKWKYKWKVKUKVKTKZKRKVKLKGKWKDKMKIKBKTKSKNK(K"K%K&K!K
K
+K
+K	K
+K
+KKK	K
+K
KKKKKKKKKKKKKKKKKKK
K
K
K
K
K
K
KKK
KK
+KK
+KKKKKK
KKKKKKKKKK
K
K
K
K
K
KKKKKKKKKKKKK
K
K
K
K
K
KKKKKKKKKKKKKKKK
K
KKKKKKKK%KDKGKFKFKFKFKFKGKFKGKIKHKGK3KK#KEKGKHKHKHKIKBK:KKK?K?K>KK=K>K?K>KK=KK?K=KKK?K=KK?K=K=K>K>K>K>K>K>K>K?K>K=K5K+K2K6K3K4K6K5K1K2K/K*KKKK2K6K2K4K4K5K5K4K3K3K4K4K4K3K3K4K4K3K0KK?K=K=K>K?K?K?K>K?K?K?K>KK>K>K>K>K>K>K>K>K?K>KK3K*K0K/K4K7K8K5K3K2K0K+KKKK,K4K4K3K4K5K3K3K3K3K3K3K3K3K3K4K4K4K3K9KYKrKsKrKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKSKSKSKSKSKSKRKSKSKTKVKUKUKSKTKVKUKVKVKVKUKUKUKUKUKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKVKUKVKXKWKVKVKVKWKWKWKWKWKVKVKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZKZKXKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZK\K\K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K^K]K]K]K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKbKcKeKdKdKdKdKdKdKdKdKdKdKdKeKdKaKXKTKRKXKYKWKXKWKYKTKWKWKYKXKXK[KPKWKXK^KdKJKTKTKQK,K$K&K&K"KK&K'K(K'K'K)K%K$K'K'K'K'K(K&K&K&K&K&K%K$K&K&K$K$K$K#K"K"K"K"K!K K K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
KKKKKKKKKKKKKKK$KDKFKGKGKFKFKGKFKHKHKFKFKGK>K?KAKEKGKJKKKIKFKFK=K;K=KK?K>K>K>K?K?K?K>K?K>K=K=KKYK\KXKWKWKXKTKXKXKXKXKWK[KRKXKYKcKdKVKUKUKNK)K%K&K'K#K K&K%K&K&K$K&K'K#K%K&K%K%K%K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K'K'K'K'K'K&K$K$K#K$K$K$K$K$K$K$K"K!K!K!K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK-KEKFKGKGKFKFKGKHKHKFKGKIKHKFKEKFKFKFKIKIKHKKKFK>K=K=KK?K?K?K?K=KK?K>K>K?K>K=KK>K>K>K>K=KK?K>K>K?K=K;KK>K5K,K0K4K7K7K5K4K2K0K2K-KKKK-K3K4K4K5K5K5K3K4K4K4K4K5K4K4K3K3K3K3K4KPKoKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKyKwKuKuKuKwKwKxKyKxKwKwKvKvKwKwKwKvKuKwKwKuKuKuKuKuKuKue]q(KRKRKRKRKRKSKSKRKSKVKUKUKUKUKUKWKUKTKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKZK[KZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\KZKZK]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K_K]K\K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKaKaKaKbKeKbKaKdKdKdKdKdKdKdKdKdKeKdK]KRKMKGK[KYKTKYKWKXKTKWKSKXKXKVK\KVKXK^KlKaKUKUKWKBK#K&K&K'K"K#K%K$K&K'K'K&K%K'K%K%K%K%K%K&K&K&K&K%K%K%K%K%K%K&K%K&K&K&K&K&K&K&K&K&K&K&K%K&K&K&K&K%K'K&K%K#K#K#K#K#K#K$K&K$K#K#K#K#K#K$K#K#K%K&K#K$K&K%K#K$K$K!K%K$K$K$K$K$K$K$K$K$K$K&K#K$K$K!K"K"K"K#K#K#K#K#K#K#K#K!K K!K!K!KKKKKKKKKKK;KDKDKFKHKFKFKGKIKIKIKGKFKFKGKGKCKFKIKIKHKHKGKDK?KK>KK?K?K?K>K>K?K=KK=K>K>K>K?K?K>K>K>K=K=K5K-K2K2K4K6K8K6K4K2K2K-KKKK-K4K5K4K4K5K5K5K4K4K3K3K3K3K3K3K3K3K3K3KJKmKsKsKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKvKvKuKuKuKwKxKyKxKvKvKvKwKwKuKwKyKxKwKxKwKuKuKuKwKxKxKwe]q(KVKUKRKUKVKSKSKVKVKUKUKUKUKUKVKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKVKUKWKWKUKUKUKUKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKYK[KZKZK[KZKWKYK[KXKWKWKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKeKeKeKeKdKdKdKeKdKdKdKdKdKdKdKdKdKeKdK[KOKPKVK[K\KWK[KZK[KXKYKWK\K[KYKWKUKYKaKnK\KUKUKUK.K#K'K&K$K'K=KBKHKKKKKK)KK8K/K1K1K7K1K,K-K-K.K(K%K'K$K*K0K.K.K.K.K.K.K-K*K+K(K'K'K%K&K%K%K&K&K%K#K$K%K$K$K$K$K%K$K%K$K#K#K#K$K$K$K$K#K%K&K$K$K$K$K$K$K$K$K$K$K$K#K$K$K$K$K$K$K$K$K$K#K$K$K$K$K$K$K$K#K#K#K#K$K#K!K!K*KDKGKDKGKIKHKIKHKIKGKFKHKHKFKHKBKAKEKHKIKLKJKHKEK>KK>K>K>K>K?K?K?K>KK6K-K/K4K6K8K6K5K3K2K2K.KKKK-K4K4K4K5K5K5K5K5K3K3K3K3K3K3K3K3K3K2K2KEKhKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKyKxKwKuKuKuKxKxKuKwKxKxKxKyKxKuKuKuKwKyKyKxe]q(KUKUKUKUKVKUKUKVKVKVKVKVKVKVKUKUKUKUKVKVKVKVKTKUKWKUKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKVKUKWKWKWKWKWKWKWKWKXKWKWKXKWKVKWKWKWKWKWKWKWKWKWKXKXKWKYK[KXKWKZKZKWKYK[KZKZKWKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K\K\KZKZK[KZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K\K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_KaKaK_K_K_K_K_KaKaK`K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKXKPKSKVK\K[KWK[KYK[KWKYKZK]KYKZKZKYKZKfKkKXKTKVKQK&K$K&K&K$K+K>KBK?KKKKKK,K;K7K5K9K5K+K+K$K7KIKDKDKCKIK;K7K8K;KBKKGKEKJKJKLKKK:K.K0K2K5K,K&K#K#K+K,K-K-K0K6K7K6K5K5K/K(K(K&K%K'K&K&K%K$K%K%K&K'K#K$K#K"K"K"K$K$K!K!K#K$K$K$K$K#K#K$K#K!K#K#K#K$K#K#K$K$K$K#K K"K$K#K$K K2KGKGKFKHKIKIKIKIKHKHKGKFKFKFKGKCKCKDKHKMKKKKKJKCKK?K?K>K>K>K?K>K=K=K=K>KK>K>K>K>K>K?K>K=KK5K/K4K5K6K7K7K4K3K2K1K-KKKK+K7K6K4K3K4K5K4K4K3K3K3K4K4K3K3K3K2K4K2K>KdKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKuKxKxKxKwKvKxKyKxKwKuKvKvKvKvKxKxKyKwKvKvKuKvKvKuKuKuKwKyKvKve]q(KVKVKUKVKVKVKVKVKVKVKVKVKVKVKTKUKVKUKVKVKVKUKUKUKUKVKVKVKUKVKXKVKUKUKUKUKVKXKWKWKWKWKXKXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]KZKZKZKZKZKZKZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K`K_K\K^K`K_K_K_K_K_K_K_K_K_K_K_K`K^K\K_K`K_K_K_K_K_K_K_KaKaK_K_K_K_K_KaKaK_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKbKUKTKYK\K^K\KWK[KXKXKVKYKYK^KXKZKZKWK]KoKhKUKVKXKIK"K&K&K&K#K3KK>K@K9KEKHKJKKKPKQKLK:K3K6K8K7K)K&K!K)K4K4K4K5KHKSKVKQK\KUKAK4K0K0K+K.K'K#K$K,K-K/K+K1KMKWKXKYKWKRK8K-K+K+K)K$K#KK'K+K*K)K&K=KDKCKCK@K=K7K%K!K!K"K KK"K%K%K%K"K%KK>K?K?K?K?K?K>KK?K>KKHKGKJKLKKKIKAK>K?K>K=K=K?K>K=K>K>K?K>K>K=K>K?K=KK6K-K2K5K6K7K9K8K3K1K/K,KKKK)K2K1K4K5K4K4K5K5K5K3K3K2K3K4K3K3K4K5K5K:K]KtKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKvKwKwKwKvKvKwKwKyKwKwKwKwKxKxKxKwKwKxKxKuKvKwKwKwKwKwe]q(KRKSKVKUKUKUKUKUKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKWKUKUKUKUKUKUKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K`K_K\K\K\K\K\K_K`K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_KaKaK_K`KbKaKbKaKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcK`KTKRKXK\K_K[KWK[KZK[KVKYK\KaK[K]K]KXK`KpKaKVKVKXKK>KFK!KKKKK%K:K9K:K3K8K,K,K$K1KEKCKBK@KGKAK6K8K9K=K?K;K6K4K6K/K#K'K#K1K?K?KBKAKDKDKPKRKTKQKCK-K.K3K:K2K(K%K"K.K4K5K3KK=KK?K?K>KK?K>K?K?K?K?K=KK>KK5K/K2K3K6K8K7K5K5K2K0K.KKKK*K2K4K5K5K5K4K4K5K3K2K2K3K5K4K2K4K5K5K3K7KYKqKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKyKyKvKuKuKvKxKxKxKxKyKyKxKuKvKyKvKuKuKue]q(KTKUKUKUKUKUKUKVKVKVKUKUKTKVKVKUKVKTKSKUKUKUKUKUKUKUKWKWKUKVKWKVKUKWKWKUKUKUKVKXKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKYKYKYKZKZKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K[KZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K_K_K^K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKaKaKaKbKbKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKbKdKdKcK^KRKSKXK\K^K]KXK[K[KZKWK[K\K`K[K]K\KYKcKqK_KVKVKXK5K"K&K'K$K2K>K?K?KKKKKK(K9K9K;K;K4K-K-K#K4KGKCKCK@KFK=K8K9K;K?K>K8K6K4K6K.K&K(K%K6K=KAK?K@KHKGKQKTKUKIK:K-K2K3K7K*K(K"K%K2K3K4K4KCKTKTKYK\KWKJK7K,K-K1K/K(K#K K,K.K/K-K1KKKSKUKWKRKPK:K+K/K0K-K'K"KK$K/K-K+K(KCKKKLKPKOKKKEK+K$K#K$K KK K(K+K+K)K4KOKQKSKTKJKFKHKIKHKHKHKHKHKHKHKIKJKEK)KK)KDKHKLKLKKKGKEKAK>K>K=K>K?K=K>K>K?K?K>K=K=K?K>K>K>K>K>K?K>K3K.K2K6K5K6K8K7K4K4K1K.KK
KK'K4K4K4K5K4K4K5K3K2K2K3K3K4K4K5K5K3K4K3K5KWKsKwKuKuKuKuKuKuKuKuKvKxKwKuKuKuKuKvKxKwKxKvKuKwKxKwKwKwKxKwKuKvKyKwKvKvKuKwKxKxKxKxKxKxKxKxKwKxKxKxKwKwKxe]q(KVKUKUKUKUKUKUKVKVKVKUKUKSKVKVKUKUKVKVKUKUKUKUKUKUKUKWKWKUKVKXKVKUKWKWKUKUKUKVKXKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KZK[K[KZKZKZKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K`K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKaKaKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKdKdKcK]KQKSKXK\K^K^KYK[K[KZKXK\K\K`K[K^K\KZKfKpKZKVKXKUK.K"K&K'K"K5KK=KK?K?K?K>K?K>KK?K>K?K@K=K1K.K4K4K6K8K7K4K3K0K1K.KKKK)K2K4K5K4K4K5K3K2K3K3K2K4K5K5K5K3K3K5K4K5KSKsKvKvKuKuKuKuKuKuKvKyKwKuKuKuKuKvKyKxKyKvKuKxKyKyKyKyKyKyKvKvKyKwKuKuKuKxKyKxKxKxKxKxKxKxKyKxKxKxKyKyKye]q(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKUKUKUKUKUKUKUKWKWKUKUKUKUKUKWKWKWKWKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKYK[KZKZKXKWKWKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKeKdKcK\KRKVKZK\K^K[KXK[KZKZKVKZK^K`K[K^K\K[KjKnKXKWKXKQK)K%K)K'K%K9K=KBK7KKKKKK3KKCKEK@KFKFK9K:K:K@K=K4K.K2K5K4K(K)K'K*K?KBKBK=KFKLKPKTKXKUKEK3K2K4K9K4K(K'K"K-K3K3K0K9KQKTKXK[KZKMK>K6K3K2K2K+K$K"K)K.K.K,K,KAKSKUKVKTKSKIK/K/K0K.K(K#K K!K+K+K+K(K9KLKMKNKNKLKPK7K%K$K$K$KKK(K+K+K*K-KIKMKVKWKVKYKNKGKIKIKIKHKHKHKHKHKHKIKHKIKEK$KK3KFKGKJKIKIKGKDK@KK?K?K?K?K?K>K>K>K>K?K>K>K?K>K>K?KAK=K2K,K1K3K5K7K8K6K2K2K/K/KKKK(K5K3K2K4K5K5K4K3K3K3K3K3K4K5K3K3K3K3K3K2KNKrKwKuKvKuKuKuKtKwKuKwKyKvKuKuKuKuKvKuKwKxKxKxKxKxKxKxKxKxKxKuKwKxKxKxKxKxKyKwKuKwKyKxKxKxKxKxKxKxKxKxe]q(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKWKVKVKVKVKVKWKWKWKWKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKYKWKYK[KZKZKXKXKXKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZK[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K^K`K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK`K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKcKZKRKTKZK\K^K\KYK[KZKZKVKZK_K`K[K_K[K[KjKlKWKWKXKLK&K&K)K&K&K-K8KFK/KKKKKK4K6K6K8K4K0K.K(K+K?KAKEKAKEKDK6K:K;K?KKGKJKQKUK\KRKAK3K3K4K9K/K%K&K#K-K3K3K2K@KSKUKYK\KYKJKKKKHKHKHKHKHKHKHKHKHKIKHKKKAKKK8KHKIKIKIKJKMKEK=KK>K>K>K>K>K?K?K?K>K?K>K>K?K>K>K>K>K?K=K1K+K0K5K7K7K8K6K4K2K0K-KKKK'K0K3K4K5K5K5K3K3K3K3K3K4K5K3K3K3K3K3K2K1KJKpKwKuKuKuKuKuKvKuKvKxKwKvKvKvKvKvKvKxKyKxKxKxKxKxKxKxKyKxKvKwKyKxKxKxKxKyKwKvKwKyKxKxKxKxKxKxKxKxKxe]q (KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKUKUKWKWKWKWKWKWKWKVKUKWKWKXKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KXKWKZKZK[KYKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZKZKZK\K]K\K\K]K\KZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K\K^K`K_K_K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKbK`K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKYKOKQK[K[K^K^KYK[K\KZKXK\K_K`K\K_KZK_KoKfKUKXKYKEK$K&K&K%KKK7KEK'KKKKKK2K7K9K4K5K.K.K&K/KAKDKDKAKGKCK9K;K>K=K;K9K4K4K8K/K#K$K"K4K@K?K?KBKKKOKVKYK[KNK2K.K3K3K6K-K&K"K'K.K2K3K4KHKRKYK^K^KMK@K4K.K2K6K0K%K#K$K-K/K.K,K5KMKTKVKUKSKNK7K.K0K,K(K"K!K K&K*K+K+K-KIKOKQKPKMKOK>K%K%K$K#KKK#K*K+K*K*KAKQKUK[K[KYKRK/K-KAKIKFKHKIKHKHKHKHKHKIKHKEKHKAKKKK?K>K>K>K>K>K>K>K>K>K?K>K>K>K;K1K/K3K5K4K7K8K4K4K2K0K.KK
KK(K4K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K2K3K0KGKlKuKuKuKuKuKuKuKuKuKwKyKyKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKuKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q!(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKTKVKUKUKVKVKUKUKUKWKWKWKWKWKWKWKVKVKWKWKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKYKYKYKYKYKZKZKZKZKXKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K[K\K\K\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K^K^K\K^K`K_K_K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKdKdKdKdKdKdKWKPKRK^K\K^K\KYK[K\KZKXK\K^K`KZK^K[KaKpKfKVKWKYK?K"K'K'K"K&K5K=KCK KKKKK&K5K5K8K7K1K-K.K$K2KAKDKCK>KJK?K:K;K?K=K9K8K5K4K5K-K'K#K!K;K>K:K>KCKIKPKVKYKXKJK0K.K1K5K5K*K'K"K)K1K3K2K6KMKTKYK]KYKJK@K2K0K2K5K.K!KK&K-K/K.K+K;KOKTKUKTKSKLK2K.K.K+K'K"K!K"K(K*K+K(K6KMKQKQKQKLKQK6K%K&K&KKKK'K*K)K(K/KKKSKVKZKZKZKFK-K+K,KCKGKHKIKHKHKHKHKHKIKHKGKHKFK7KKK@KGKHKLKLKKKHKBK=K=K=K=KK?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K=K/K.K4K5K7K8K7K7K3K1K1K0K KKK%K2K5K5K5K5K5K5K5K5K4K4K2K4K4K4K4K3K4K3K1KCKjKxKuKuKuKuKuKuKuKwKxKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q"(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKUKUKUKWKWKWKWKWKWKWKWKXKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZK[KZKZK[K[K[KZKZKZKZKZKZKZK\KYKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K]K]K\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K^K`K_K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKfKdKdKdKdKdKbKTKNKRK]K\K^KZKXK[K\KZKXK[K^K`KXK]K[KbKqKbKUKXKYK7K!K'K(K K-KK2K.K1K3K+K#KK*K+K"K(K-KGKOKSKVKTKSK@K.K/K,K,K&K#K!K$K+K+K+K)KCKOKQKQKPKNKLK,K%K&K$KKK!K)K*K)K)K9KPKUKXKZK[KWK9K.K)K"K/KFKIKHKHKHKHKHKHKHKIKHKJKFKEK4KK KBKGKHKLKLKKKHKDKK?K>K>K>K>K>K>K>K>K>K>K>K>K?K>K>K:K1K0K2K4K5K8K8K5K4K2K.K/KKKK'K3K2K2K5K5K5K5K5K3K2K2K2K2K3K5K5K5K3K4K2KBKgKwKuKuKuKuKuKuKwKxKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q#(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKVKUKUKVKUKUKUKVKVKWKVKUKWKWKVKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKZKZKZKYKZKZKZKZKXKYKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZKZK[K\K[KZKZKZK\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K]K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_KaK`K_K`KaKaKaKaKaKaKaKaKbKbKaKaKaKaKaKaKaKaKaKaKaKaKcKdKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKfKeKdKeKcK`KRKNKRK]K^K`KZKXK[K]KYKZK\K^KaK[K]K[KiKrK^KUKXKXK0K$K(K'K#K.K9K?K;KKKKKK-K9K5K6K4K*K.K*K$K?KEKAK?KAKHK:K9K;K?K=K7K7K8K6K-K'K*K#K,KAK:K1K@KJKHKRKYKZKUK;K.K3K0K6K1K(K#K$K1K3K4K3K@KTKSKZK[KOKBK5K0K1K5K2K(K$K"K)K$KK&K1KMKQKTKUKQKLK3K.K-K*K)K#K KK%K+K+K*K/KOKPKSKSKOKPKBK%K'K$K"KKK$K)K&K%K)KFKTKUKXKYK\KNK1K,K$K#K#K4KIKHKHKIKHKHKHKHKHKHKIKIKGKFK/KK#KCKGKIKKKLKKKIK?K>K>KK?K@K@K>K>K>K>K>K>K>K>K>K>K>K>K>K@K>K1K/K3K4K5K6K7K8K6K2K/K-KKKK$K2K3K3K4K5K5K5K4K4K5K3K1K2K3K3K3K2K3K4K4K>KfKwKtKvKuKuKuKwKyKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q$(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKVKXKVKUKWKWKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKZKZKZK[KZKZK[KZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K\KZKZKZK\K\K\K\KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_KbKaK_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKeKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKdK^KNKMKTK\K`K`KZKXK[K]KYK\K]K^KaK\K]K[KkKqKYKVKXKUK*K'K)K&K%K3K9K@K4KKKKKK.K4K5K9K7K,K/K(K*K?K?KCKBKDKDK9K:K;K@KBK8K2K8K8K0K$K*K#K2KAK>K=KAKIKJKVKZK\KVK8K.K1K2K9K.K&K$K&K2K3K2K5KHKTKTKZKVKMKAK3K0K5K7K.K#K K$K*K%K&K*K;KOKQKUKSKPKKK3K/K,K*K*K#K K!K)K+K+K(K7KOKRKSKQKLKOK2K"K!K#KKK K'K*K$KK0KOKUKXKXKYK^KAK(K*K$K$K"K#K:KIKHKHKIKHKHKHKHKHKIKHKIKJKHK*KK(KFKEKHKKKKKJKIKCK=KK>K=K>K?K>K>K>K>K>K>K>K>K>K>K?K>K?K@K>K1K/K2K4K6K8K9K7K5K2K2K.KKKK'K4K2K3K5K5K5K5K5K5K5K3K3K2K2K3K2K3K4K6K5K=KcKwKvKvKuKuKwKyKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q%(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKZKZKZKXKXK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZK\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKdKbKaKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKdK^KOKLKRK\K_K`K\K[KZK\KYK[K\K^K`K\K^K^KmKpKVKWKYKPK&K(K)K$K#K4K9K@K.KKKKK K3K6K7K5K3K-K,K&K-K>K?KCKBKEKAK9K=K=KBK:K6K/K2K7K/K'K)K%K8K@K;K6KAKLKWK^K_K_KPK;K4K2K4K3K+K'K$K+K3K3K2K8KKKTKWKYKRKNK>K0K2K4K7K*K#K!K&K,K.K2K+KEKTKQKSKSKRKHK/K-K+K*K'K#KK%K*K.K-K'KBKRKRKPKOKNKNK(K#K#K$KKK%K)K)K)K'K:KTKVKYK]K\KVK2K+K(K$K$K$K#K'K?KJKHKHKIKIKHKIKHKIKIKIKHKJKEK&KK.KGKHKGKJKLKKKEK>K=K=K=KK>K>K>K>K>K>K>K>K>K>K>K>K?K?K:K/K-K4K4K5K6K8K7K3K2K2K,KKKK#K4K4K5K5K5K5K5K5K5K5K4K3K4K5K3K3K3K3K5K3KK9K>K>KBK8K5K2K5K5K+K'K(K'KK>K>K>K>K>K>K>K>K>K>K>K>K>K=K?K;K-K/K2K5K6K8K7K4K3K2K1K.KKKK%K3K4K4K5K5K5K5K5K5K4K3K4K5K3K3K3K2K/K2K4K9K\KuKwKuKwKxKxKxKvKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKvKwKyKxKxKxKxKxKxKxKxKxKxKxe]q'(KUKUKUKUKUKUKUKUKUKUKUKUKWKWKVKVKVKUKUKUKVKWKWKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KXKWKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZKZKZK\K\KZK[K]K\K\K\K\K\K\K]K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K^K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKbKbKbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbK`KaKeKeKeKeKeKdKdKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKfKdKVKLK?KOK]K]KbK]K\KYK[KYK\K]K^K^K^K[K_KsKhKVKWKXKAK!K(K)K%K*K6K;K>KKKKKK'K6K8K8K5K1K.K*K&K9KCKBK@K>KKK=K:K>K@KDK8K6K8K9K8K*K&K%K*KKKK7KHKGKHKKKLKJKDK@KK?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K>K9K2K/K1K5K8K8K8K5K5K1K/K/KKKK+K4K4K5K5K5K5K4K4K5K2K1K3K5K4K3K2K1K3K5K3K6KWKqKuKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q((KUKUKUKUKUKUKUKUKVKVKVKUKVKVKVKVKVKUKUKUKVKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKYKWKWKWKYK[KXKWKYKYKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZK\K\K\K\KZKZKZK\K\KZK[K\K\K\K\K\K\K\K\K\K[K\K\K\K\K\K\K\K]K]K\K\K\K\K\K^K^K^K^K^K^K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaKaKaKaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKbKaKaKbKcKcKcKcKcKdKdKdKdKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKfKfKeKeKRKIK8KOK]K^K`K[K\KYK[KYK\K\K_K]K\KZKdKvKdKVKXKXKK?KDKGK;K;K=KAKAK8K9K9K;K/K%K'K%K-K:K9K:KBKPKVK\K\K[KSK;K2K/K6K4K,K(K"K(K5K4K4K7KGKTKWKZK\KQKBK1K0K3K7K.K!KK(K/K/K0K,K?KTKTKVKVKVKKK-K,K,K-K'K$K K!K)K.K+K'KBKSKSKRKQKOK?K'K$K#K%KKK%K)K)K)K'K?KVKVKZK[KYKVK.K,K)K%K#K"K"K$K!K!K"K3KHKKKIKIKHKHKJKLKKKJKIKIKGKKK;KKK;KGKGKJKLKIKIKFK?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?KK9KKKKKK0K6K8K8K8K-K+K)K*K>KCKBK?KEKHK9K;K=KAKBK=K:K:K8K2K&K&K%K4K;K:K:KBKRKWK\K^K\KVK=K3K5K7K5K,K'K"K+K2K5K8K;KOKUKXKYKZKKK@K6K5K8K4K(K#K!K)K/K1K/K0KHKSKWKVKUKQK>K,K.K-K,K%K$KK$K*K+K'K+KHKQKRKRKSKQK9K$K#K#K%KKK%K)K)K(K,KLKVKUK[KXKZKGK.K/K'K$K$K$K#K#K K!K"K$KK>K?K?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K>K:K.K.K3K4K5K5K5K5K4K2K0K-K,K(K+K/K3K5K5K5K5K5K5K5K3K2K3K3K2K3K5K3K3K3K3K2K3KOKqKwKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKxKxKxe]q*(KUKUKUKUKUKUKUKUKVKVKVKWKWKWKWKVKUKVKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKYKYKYKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K^K_K]K]K_K_K]K]K\K^K_K`K]K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKdKcKcKcKcKcKcKeKdKdKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKfKcKIKAK"KNK^K]K^KYK[KYK\KYK\K\K_K_KZKYKiKxK_KVKXKTK-K$K(K%K"K.K8K>K1KKKKKK3K5K6K5K5K/K+K&K+K@KCKCK?KGKCK7K;K=KBKDK;K:K8K5K.K%K%K%K7KK>K?K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=K=K>K?K:K/K/K4K3K6K7K8K8K5K2K1K.K*K0K4K2K3K4K5K5K5K5K5K4K4K3K3K1K3K5K3K3K3K3K3K3K4KNKrKxKvKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKvKxKxKxKxe]q+(KVKUKUKUKUKUKUKUKUKUKTKTKWKXKXKWKUKWKXKYKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[KZKZK[K[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K]K`K^K\K\K\K_K`K`K\K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKdKeKdKdKeKdKeKdKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKcKdKgKgKfK`KFK;KKPK]K\K]KYK[KYK\KYK\K\K_K^KZK\KmKtK\KWKYKOK(K%K'K$K K0K7K=K*KKKKK"K6K4K4K4K5K-K+K%K0KAKCKBK>KJK?K9K:K>KFK@K:K?K8K2K-K%K%K(K;K9K5K;KIKXK]K`KXKUKJK9K5K2K7K4K'K$K(K2K5K8K6KCKVKWK[K]K\KIK2K3K3K7K-K%K"K%K.K1K4K,KK=K?K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=KK?K8K,K-K4K8K8K7K9K7K4K1K2K/K,K*K/K3K3K5K5K5K5K5K5K5K3K3K3K4K5K3K3K3K3K3K2K4K3KHKoKxKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKuKwKyKxKxe]q,(KUKUKUKUKUKUKUKUKUKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKWKWKWKWKWKZKZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K]K`K]K\K_K_K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKdKdKbKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKdK]K=KYK'KLK^K]K]KYK\KYK[KYK^K[KaK\K\K]KnKpKZKWKYKJK$K&K'K#K#K5K9K?K$KKKKK(K5K2K8K8K4K*K)K$K4KDK@KBKAKHKK7K6K'K&K$K/K;K8K8KK?K>K>K>K>K>K>K>K>K>K>K?K>K>K>K>K>K>K>K>K@K6K*K0K6K8K8K8K8K6K2K3K2K0K)K)K1K2K4K4K5K5K5K5K5K5K4K5K4K3K3K3K4K4K3K2K2K5K3KFKkKwKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q-(KVKVKUKVKVKUKUKVKVKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKXKXKWKXKXKZKYKXKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K^K]K_K_K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKaKaKdKdKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKfKdK`KAKrKPKKK_K]K]KYK\KYK[KYK^K[KaK\K\K_KrKmKXKWKYKCK$K&K'K#K%K5K:K>KKKKKK*K1K3K8K8K1K*K(K%K;KCK?KAKBKGK:K:K=KCKEKK>K?K>K>K>K?K>K>K>K?K>K>K?K>K>K>K?K>K?K=KK?K;K>KBKDK8K=K?KGKFK9K8K:K6K.K$K'K%K8KK?K>K?K?K>K?K>K>K?K>K>K@K>K>K>K?K=K?K=KK:K-K-K3K7K8K8K8K6K2K3K1K/K-K-K0K2K3K4K6K5K5K5K5K5K4K2K3K3K3K3K3K3K3K3K2K5K2K=KeKwKuKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q/(KWKVKUKVKWKVKVKWKWKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKXKZKZKWKXKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZK[K[KZKZKZKZKZKZKZK[K[KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K]K\K\K\K\K]K^K^K]K_K_K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`K`K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKaKbKcKcKcKcKcKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKfKUK]K[KPK�K`K\K]K]KYK\KYK\KYK]K[KaK]KZKbKtKfKWKXKXK6K$K(K&K#K*K5K:K4KKKKKK.K4K4K2K4K.K(K%K*KKCKJKBK:K=K;K9K.K#K&K'K;KK?K>K>K?K>K?K>K?K>K>K?K>K@K@K?K>K?K>K=K?K@K9K.K/K4K7K7K7K8K6K3K3K2K/K,K+K1K3K3K4K4K4K6K6K5K4K4K3K3K3K3K4K3K3K2K3K4K4K2K;KcKvKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q0(KUKUKUKUKUKUKUKWKWKUKUKUKWKWKWKWKWKWKWKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZK[K[K[KZKWKXK[KZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K[KZKZKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K_K_K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKbKaK_K`KbK_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKaKcKeKbKaKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKfKhKXKXK`KOKfK^K^K\K\KYK\KYK\KXK^K[KbK\KZKhKuKaKYKYKTK/K'K)K%K"K,K5K9K.KKKKK K3K4K2K6K3K(K'K%K-K>K?K;KK?K?K>K>K>K>K>K?K?K>K>K>K>K>K?K>K?K?K>K>K?K2K*K0K3K5K5K8K8K5K4K2K/K1K-K.K3K3K2K2K3K5K5K5K5K5K3K2K3K3K5K4K2K3K3K3K3K5K4K>K`KtKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q1(KUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKXKWKWKXKYKZK[KYKXKZKZKYKYKXKYK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKYKYKZKZKZKZKZK[K[KZK[K\K[KZK[KYK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K^K^K]K]K^K^K]K_K`K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaK`K_K`KaK`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKdKdKdKcKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKfKfKeKeKhKcKXKTKcKQKTK`KYK[K^K[K]K[K]K[K^K[KaK]K[KkKsK^KZKYKQK)K(K)K&K$K1K5K:K&KKKKKK0K5K1K4K1K&K#K#K0K?K>K;K=KFK9K8K=KGKLK:KKFKGKIKLKLKIKBK>K@K?K>K?K>K>K?K>K=K>K?K>K?K>K>K>K>K>K?K>K>K8K)K,K,K3K5K6K6K7K6K3K1K1K0K-K1K0K1K3K4K2K3K3K4K5K4K4K3K3K5K4K4K3K3K3K3K3K4K3K8K[KtKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q2(KUKUKUKWKXKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKXKWKXKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKWKXKZKZK[KYKWKZK[K[KZKWKYK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZKZKZK[K]K\KZK\KYK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K`K]K]K`K^K\K^K`K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_KaKbK`K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKgKfKdKfKcK\KVKaKVKTK{KTK[K_K]K]K\K^K]K^KZKaK^K\KnKrK\KZK[KLK&K'K*K'K&K2K4K7K KKKKK%K6K3K2K1K,K'K$K#K7K=K=KKDK8K;KAKLKKK:K5K5K1K.K"K$K%K7K>K;KK;K9K8K(K"K%K4K8K8K5K=KTK^KaK`K_KMK3K0K0K0K,K#K"K&K0K3K4K.K?KTKYK[KYKWKAK,K*K*K)K&K#K K&K+K,K*K.KLKOKRKSKQKSK7K$K'K&K$KKK$K&K(K'K2KLKQKPKRKUKVK9K6K.K%K&K$K$K$K#KK#K%K#K\KwKsKsKsKsKsKsKsKtKfKPKJKKKLKLKLKKKLKLKHKJKLKKKIKIK/KKK@KCKDKGKLKLKIKDK?K>K?K>K>K>K>K?K?K?K>K>K>K>K>K>K>K>K?K>K=K;K;K6K&K)K5K5K5K8K7K4K3K3K1K/K-K(K2K4K2K3K3K2K4K5K5K5K3K3K5K5K5K3K3K3K3K2K2K3K2K6KWKuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q3(KUKVKWKWKXKXKXKWKWKWKWKXKXKXKWKWKWKWKVKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKXKZKZK[KYKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K]K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K`K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKdKeKfKfKfKgKdK]K[K\K^KHK~K\KWK]K\K]K\K^K\K^K\KaK]K_KpKoK[KZKZKFK$K)K)K#K%K1K7K8KKKKKK+K6K4K5K5K,K&K%K'K;K=K:K:K=K@K6K=KCKNKHK;K9K0K,K*K#K#K&KK?K>K?K>K>K?K?K?K>K>K>K>K>K>K>K>K>K>K>K>K?K?K4K)K-K2K5K5K6K8K5K5K3K3K0K)K&K$K!K-K3K2K3K5K5K5K3K3K5K4K3K4K5K5K4K3K3K3K5K4K5KYKuKvKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q4(KVKVKXKXKXKXKXKWKWKWKXKXKXKXKWKWKWKXKYKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKZKZKZKYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZKZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKcKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKeKfKfKfKdK_KZK[K[KbKHKsKuKTK]K\K]K\K^K]K^K\KbK\K_KrKlKZKZKZKKK6K9K:KK>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K@K=K4K-K0K4K5K6K8K6K5K3K2K/K0K$KKKK1K3K2K5K5K5K3K3K5K4K3K4K5K5K4K3K3K3K4K5K2K3KPKrKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q5(KWKWKWKXKXKXKXKWKWKWKXKXKXKXKWKWKWKXKXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KWKWKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZK[K]K\KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K\K\K\K]K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKeKbKaKdKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKfKfKfKfKeK\KTK]K_KWKaKOK`K}KUK]K\K]K]K\K^K\K]KdK]KbKqKfKZKXKYK5K#K&K&K"K(K2K6K/KKKKKK,K3K4K5K4K(K&K K+K;K9K8K7K?K8K6K@KJKRK=K;K8K1K1K'K"K#K/K=K9K;K;KLKVKQK_KhKVK@K9K:KK>K>K>K>K>K>K>K>K>K>K>K>K>K?K>KK?K>K5K-K2K3K4K5K7K8K4K4K1K0K-KK	K
K!K2K5K5K5K5K4K1K2K4K5K3K2K5K4K3K3K3K2K2K3K2K3KPKqKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q6(KWKWKWKWKXKXKXKWKWKWKWKXKXKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKXKYK[KYKXKYKYKXKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K]K^K^K^K^K\K]K_K_K_K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK`KaKaKaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKbKcKcKcKcKcKbKcKdKdKdKcKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKfKfKfKgKeK]KVK]KdKWK^KZKOKZKVKZK\K]K]K\K]K^K^KbK]KdKuKeKYKYKWK0K%K(K(K#K*K2K7K)KKKKK"K0K4K4K5K1K&K'K#K/K:K9K8K7K@K:K;KDKOKPK;K9K7K5K0K"K#K#K5K=K9K:K=KLKFKCKSKbKMK;K:K:K>K9K&K K$K5K:K8K6K=KYKcKfKbK`KHK6K3K2K2K*K"K"K)K2K3K1K/KFKSKXKWKTKQK9K)K*K&K%K%K#KK%K+K/K)K4KNKPKQKQKQKPK+K!K)K*KKK#K(K&K'K*K?KJKIKKKMKPKIK0K2K(K$K"K$K&K%K#K#K&K K?KuKsKtKsKsKsKsKsKsKsKsKtKsKuKpKQKIKJKJKJKJKIKIKJKIKHKIKHKIKKK>KKK7KFKFKIKJKKKKKJKCK>K?K?K>K?K?K?K?K>K>K>K>K>K>K>K?K>K=K?K>K>K?K>K5K-K/K2K4K7K8K7K7K5K2K0K/K!KKKK1K5K4K5K4K2K3K4K5K4K3K5K5K3K4K4K3K3K3K2K2K2KLKqKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q7(KXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKXK[KZKZK[K[K[K[K[KZKZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K`K`K`K`K]K]K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KbKaKaKaKaKaKaKaKaKbKbK`KaKaKaKaKaKaKbKeKeKeKeKeKeKeKdKdKdKeKdKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKcKcK]K`K[KYKaKZK[K`KLKlKcKUK^K]K]K\K\K_K_KaK[KgKtK`KYK\KSK+K(K)K(K$K-K2K5K"KKKKK%K1K1K4K3K.K#K#K$K4K9K7K8K8K@KK?K?K?K?K?K>K>K>K>K>K>K>K>K?K?K>K>K>K>K?K=K4K-K4K2K6K8K8K9K8K5K2K/K.K KKK!K1K5K5K5K5K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K2K2KHKpKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q8(KVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKZKZKZKYKXKZKZKZKZKZKYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K]K[KZK\K\KZK[K\K\K\K\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K^K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaK`K`KaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKbKeKcKbKdKdKdKdKdKdKdKdKdK`KaKeKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKfKeKdKeKhKjK[KVK`K]KUK\K_KXKaKKKmKlKLK]K[K[K\K_K_K_K^K\KkKsK^KZK[KMK'K)K'K%K#K+K1K7KKKKKK$K.K/K2K3K*K$K#K&K7K9K8K7K9K@KK>K?K?K?K@K?K>K>K>K>K>K>K?K>K>K?K>K>K>K>K>K?K>K2K-K2K4K6K6K7K9K7K3K0K/K.K KKKK2K5K4K5K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K0KFKkKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q9(KUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KZK[KXKWKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K\K\K[KZK\K\KZK[K]K\K\K\K\K\K]K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K`K^K_K`K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKdKeKdKdKdKdKdKdKdKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKgKgKfKfKaK`K^KVKVKaKaKVKYK`KWK`KSKXK|KRK\KZKZK]K`K_K_K\K]KoKsK\KZK[KCK#K)K(K%K%K-K1K4KKKKKK&K0K.K1K4K(K$K"K&K7K8K8K6K9K@K=KIKTKTK;K9K9K5K1K%K K!K,K;K:K=KK>K>K?K>K>K>K>K>K>K?K?K>K>K>K>K>K>K>K>K;K3K.K0K1K4K5K8K7K5K2K.K/K0K KKK K1K5K4K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K4K2K@KjKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q:(KWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKWKWKWKWKXKZKYKWKYKZKXKWKWKWKWKWKWKWKXKZKZKZKZKZKZKZKZKZK[KYKWKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZKZKZK[K]K\KYK\K\K\K\K\K\K\K\K\K\K\K\K]K]K]K\K\K\K\K\K\K[K]K]K\K\K\K\K\K\K\K\K\K]K`K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKdKeKdKdKdKdKdKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKeKaKbK[KSKNKSKTKZK`KUKSK`KTK]K^KIK�KgKZK\K\K^K`K_K_K^K`KqKpKYKZK^K>K$K*K(K%K(K-K0K0KKKKKK-K0K.K1K2K&K"KK)K8K8K8K5K;K?K@KJKXKQK:K9K6K2K+K"K!K"K2K;K:K;KKLKKKLKJKOKDK)K)K)K'KKK$K'K K K*KFKEKHKKKIKLK7K3K.K&K$K$K%K&K#K#K&K$K%KfKvKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKsK[KIKIKJKIKKKKKKKKKKKKKKKHKIKGKEKJKGKDKGKFKFKHKIKJKLKFK?K>K>K>K>K?K>K>K>K>K>K>K>K>K>K?K>KK?K>K?K?KK?K@KJKXKKK8K:K5K2K,K!K!K"K2K;K9K7K:K9K4KKtKsKtKsKsKsKsKsKsKsKsKsKsKtKsKsKtKtKsKuKpKVKIKMKLKKKKKLKKKKKKKKKIKJKKKIKHKBKCKFKGKFKGKHKJKKKIKFK?K?K?K>K>K>K>K>K>K>K>K>K>K>K?K>K=K>K?K>K?K?K=KKAKMKRKAK7K8K6K-K)K K K$K7K;K9K7KK4K5K2K6K,K"K!K2KK3K0K'K$K$K$K%K"K!K%K'K#K[KwKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKvKuKtKkKPKLKJKHKIKLKJKHKKKLKKKKKLKJKIKDKCKFKDKGKFKFKGKJKKKLKFK?K>K?K>K>K>K>K>K>K>K>K>K?K>K>K?K>K>K>K>K>K>K?KK?K>K>K>K>K>K>K?K?K>K>K>K>K>K>K>K>K>K>K>K>K:K2K.K3K4K4K6K7K7K7K4K1K0K0K KKK K1K4K3K4K5K5K3K4K4K4K5K5K5K5K4K3K3K3K3K3K3K2K9K^KuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q>(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKWKWKWKWKWKWK[KYKYK\KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K]K]K]K\K\K\K]K\K\K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K`KbKaKbKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKdKeKeKeKaK_K^K\K]K^K]K]K[K[KZK[KZKPKTKAKIK`KTKQK_KXK]KaKGK~K^KRK^K]KaKWKXK\KiKrK`KXKZKRK(K&K)K'K!K&K*K-KKKKKK(K/K,K*K-K*K K K&K5K5K4K4K6K=K;KCKNKSK:K6K8K2K,K#KK K/K9K8K7K6K:K5K7KEKPKCK1K6K8K6K2K'K"K'K9K:KKAK@KAKAK.K,K*K*K%K KK#K*K+K'K+KEKHKGKGKEKLK5K&K)K*K"KK"K%K%K&K&K7KEKEKHKFKHKCK3K6K'K$K$K$K$K$K"K$K$KKLKxKrKsKsKsKsKsKsKsKsKuKuKsKtKvKtKsKsKsKsKtKvKuKuKdKJKHKJKLKLKLKLKKKKKKKLKJKHKJKJKIKHKCKDKGKDKHKKKJKHKIKBK>K?K?K?K?K?K>K>K?K@K=K>K>K>K>K>K>K>K>K>K?K=KKaKWKQK]K]KWKdKJKiKxKRK_K]K`KVKYK[KkKoK\KYK[KGK#K'K)K'KK$K,K/KKKKKK)K.K-K-K-K%K!KK)K5K5K4K4K9K=K;KDKKKGK6K6K4K.K-KKKK1K4K4K2K6K9K4K9KEKMKK>KAK;K,K+K*K)K%K K!K(K*K*K&K4KJKFKEKEKHKHK*K(K)K*KKK%K'K(K(K,K@KCKCKFKEKJK:K9K2K'K%K%K%K$K"K$K%K%K,KfKuKsKsKsKsKsKsKsKtKuKuKuKuKtKtKtKuKuKuKuKuKuKuKuKvK^KJKJKLKKKKKKKKKKKKKLKKKJKIKIKGKFKHKBKEKFKGKGKGKIKLKHK@K>K=K=K=K>K?K>K?K?K>K>K>K>K>K>K>K>K>K>K?K>K>K=K;K1K2K4K4K5K6K6K5K4K3K3K0K/K KKKK0K4K5K7K5K4K3K3K4K3K3K4K3K3K4K4K3K3K3K3K3K2K3KUKtKwKwKyKxKxKxKxKxKxKxKxKxKxKxe]q@(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKWKWKWKWKWKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZKZKZKZKZKZKZKZKZK\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K^K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_KaKaKaKaK_K_K_K`KbKaKaK_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKdKeKdKeKbK^K^K\K]K^KXKMKK"K(K)K&KKK%K-KKKKK K.K-K.K.K-K"K K K,K3K5K3K0K=K;K;KEKOKEK5K7K2K*K'KKK K7K&KK)K:K6K4KKAK5K,K+K+K'K#K K#K*K(K)K'K=KGKDKCKCKJK@K*K)K'K&KKK%K&K)K(K3KCKAKBKFKFKEK5K9K-K'K#K%K%K$K!K$K'K!K@KuKsKsKsKsKsKsKsKsKuKvKuKuKvKuKsKuKvKuKvKvKuKuKuKuKvKrKZKJKLKLKKKLKLKKKKKKKLKLKIKIKKKGKIKIKFKFKFKFKFKHKLKLKHK>KK>K>K?K>K>K>K>K>K>K>K>K>K>K?K?K?K?KKIKAK1K/K5K7K1K'K#K#K7K:K:KK4K1K2K4K*K"K K0K6K4K2K7K>K9K:K:KKK>K>K>K>K>K>K>K>K>K>K>K>K?K=K=K>K>K?KKBKPKKKUKUKYK[KZKWKXKXKWKXKPKQKCKFK`KSKQK_KZKYK_KIKRKVKUK[K`K[K\KdKtKhKZKZKXK0K'K)K)K$K&K(K.K%KKKKK$K,K-K-K.K)K!KK#K4K4K2K2K3K5K4K8K>KBK8K4K4K1K0K$KK K,K5K3K0K4K=K5K4K@KFK>K0K3K5K2K/K&K!K)K;K8K:K=K@KEKHKJKKKIK:K6K3K2K0K&K"K%K2K,K)K2K9K=K9K:K8K9K:K0K0K-K*K"K!KK&K)K)K'K3KFKCKCKDKCKBK-K(K(K'KKK%K)K)K(K,K@KAKBKCKCKGK5K5K/K&K'K$K"K&K#K#K$K"K0KnKtKsKsKsKsKsKtKtKsKtKuKuKuKsKtKvKuKuKtKtKuKuKuKuKuKuKvKwKmKQKMKJKIKJKKKLKKKKKKKLKLKLKKKHKHKLKHK;KBKFKFKGKJKMKMKFK?KK?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K=K=K>K?K>K?K8K.K.K4K5K4K6K8K7K4K1K.K.K.K!KKKK-K3K3K5K3K3K3K3K3K3K3K3K3K3K3K3K2K2K3K3K1K1K2KKKoKxKyKxKxKxKxKxKxKxKxKxe]qC(KWKXK[KXKWKWKWKWKWKWKWKWKZKZKWKWKWKWKWKWKWKWKXK[KYKWKZKZKWKWKWKWKWKWKWKVKYKWKWKWKWKXKZKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKcKfKeKeKeKdKdKdKdKdKdKdKeKeKcKcKaK\K\K_KaKZKIK=KEKUKLK+KKRKYKYK[K[K[KXKXKWKYKSKMKLKK8KEKDKGKGKLKLKIKEK?K?K?K?K>K>K>K>K?K?K?K>K>K?K?K?K>K?K>K>K>K?K=K=K9K/K/K3K5K5K5K7K8K5K3K.K0K/KKKKK/K3K2K4K5K3K3K3K4K5K4K3K3K3K3K2K1K2K3K3K3K2K1KJKpKxKwKyKxKxKxKxKxKxKxe]qD(KXKYK[KYKWKWKXKWKXKYKXKWKZKZKXKXKXKXKXKXKXKXKYK[KYKWKZKZKWKWKWKWKWKWKWKWKXKYKXKWKWKXKZKZKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKbKdKcKcKcKdKdKdKeKeKdKeKdKbK`K_K]K]K_KZKLKAKFKSKKK0KK#KAK[KWKXK[K[K[KXKXKXKWKQKIKOK5KYK\KUK[K]KVK^KSKKK�KKKXK^KYK[KiKtK`KXKZKOK'K(K)K&K#K(K(K.KKKKKK'K*K*K+K-K&K!K K+K3K2K/K/K2K.K,K4K;KKDKCK4K-K2K4K3K)K#K$K5K:K8K8K=K?K>K@K>K@KAK;K5K1K3K,K"K K.K7K3K2K4K=K6K7K7K;KBK2K0K.K/K(K K!K%K*K)K)K-KAK?K@KBK=KBK0K"K(K+K KK#K(K(K%K&K;K>K>KAKAKCK:K9K5K%K%K$K$K&K#K#K&K%K%KeKvKsKsKsKsKsKtKuKtKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKtKtKuKaKLKKKJKKKLKKKKKKKLKKKKKJKJKJKGKFKAK$K,KCKDKCKFKKKLKJKEK?K=K>K?K>K>K>K>K>K>K?K?K>K>K>K>K?K>K>K>K?K>K=K=K9K-K/K4K5K5K6K7K6K6K2K1K/K-K KKKK.K3K3K5K4K3K3K3K4K2K3K3K3K3K4K5K3K3K3K3K3K4K2KFKnKxKxKxKxKxKxKxKxKxe]qE(K[K[K[KYKWKVKYKWKXK[KYKWKZK[K[K[K[K[K[KXKWK[K[K[KYKWKZKZKWKWKWKWKWKWKWKWKXK[KYKWKWKWKZKZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K]K\KZK\K\K\K\KZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KbKaK_K_K_KaKaK_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKaKaKaKaKdKeKeKeKeKbK^K^KZK_K_K]KXKJKBKHKUKLK.KK$KDK^KhK`KWKWK[K\K[KXKVKTKWKRKJKOK8KOK^KUKWK^KYK\K\KCKzKfKUK_KXK]KmKsK^KYK^KIK$K)K(K%K%K*K)K+KKKKKK*K*K*K+K)K"K K!K*K4K1K/K/K1K+K*K3K:KKFK=K/K0K3K1K0K%K"K'K7K8K6K7K=K;KK2K2K,K,K&K K!K'K,K+K)K3KAK=K>K?K>K@K)K$K'K)KKK'K)K)K(K.K=KK?K?KBK6K=K/K$K#K&K%K'K$K"K'K!K@KwKsKtKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKtK\KKKIKLKKKKKKKKKLKKKHKIKLKJKHKGKAKK
+K/KGKCKDKGKLKMKJKBK=K>K?K>K>K?K=KK?K?K>KK?K?K>K>K?K?K=K=K9K.K.K4K5K5K5K5K5K3K0K/K,K+K KKKK/K5K5K5K4K3K3K3K1K3K3K3K3K2K2K3K3K3K3K3K3K3K3KEKjKwKxKyKxKxKxKxKxe]qF(KXKXKYKXKWKYKZKYKZKZKZKYKZK[KZKZKZKYKXKYKYKXKYK[KYKWKZKZKYKYKWKXKYKXKWKWKXK[KZKYKYKYKZKZKYKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZK[K\K[K[K[K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K^K^K^K^K^K^K^K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_K_K_K_K_K_K`KaKaKaK_K`KaK`K`KaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKcKdKdKaKbKcKbKaKbKcKeKbK_K[K[K]K_KbK[KHK?KIKTKIK/KK(KDK`KiKgKgKcKYKTKZKYKYKXKVKUKXKWKLKJKFKKK_KUKSKYK[KVKaKHKiK�KUK_KYK\KoKtK[KXK\KAK$K*K)K'K$K%K*K(KKKKKK,K*K*K+K(KKK!K-K3K1K/K/K.K*K+K1K:K9K3K2K/K+K$K!KK*K5K4K2K3K>K6K6K@KCK:K0K3K4K4K0K$K"K+K8K8K7K9K>K:K9K9K7K9K:K6K4K5K1K#KK%K4K5K5K4K9K6K5K8K8KKK>K>K?K8K;K*K%K$K$K$K#K!K%K'K"KVKvKsKsKsKsKsKsKtKuKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKsKVKKKLKKKLKKKKKLKKKJKKKLKKKJKKKIK4KKK4KFKDKFKIKKKLKJKCK>KK=K>K>K>K>K?K>KK=K@K>K>K>K>K=K=K9K+K/K4K4K3K4K6K6K4K1K.K/K/K!KKKK-K5K5K4K3K3K3K2K3K1K1K3K2K1K2K3K1K1K3K3K3K3K1KBKjKxKwKxKyKxKxKxe]qG(KWKWKWKWKXKZK[K[K[KZK[K[K[K[KZKZK[KYKWKZKZKWKXK[KYKWKZKZK[KZKWKYK[KXKWKWKXK[KZK[K[K[KZKZK[KZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K]KZKZKZKZKZK\K]K\K\K]K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K`K`K`K`K`K]K\K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K_K_K_K_KaKbKaKaK_KaKbK`K_KbKaK_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKeKeKdKdKaKbKeKcKaKbKbKbK]K[K\K]K`K[KJK?KHKQKJK+KK'KIK]KhKhKcKcKaK]KTKRKYKXKWKXKXKYKVKVKRKHKPKQK\KVKNKYK`KVKaKQKQKlKWK_K\K_KqKnKXKXKZK;K#K)K(K%K!K#K)K%KKKKK K,K*K+K+K'KKK"K.K/K/K0K.K(K&K(K.K8K4K.K/K/K,K#K"K"K.K7K6K3K5K;K6K8KAKBK6K.K0K2K1K,K#K#K/K:K7K8K;KK=KDK4K*K)K*K"KK!K(K)K&K+KK?K?K7K=K2K&K#K$K$K#K K"K'K%K0KlKsKsKsKsKsKsKsKtKvKuKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKpKTKJKLKKKLKKKKKLKLKLKKKLKLKLKLKKK3KKK;KIKEKCKHKLKMKJKBK=K?K>KK>K?K>KK?K?K>K>K>K>K?K?K=K=K6K-K.K3K3K3K7K8K7K2K0K0K0K,K KKKK/K5K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2K@KfKvKxKyKxKxKxe]qH(KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KYKWKZKZKZKYKWKXKXKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[KZK[K\K\K\K[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K_K_K_K_K_K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K`KaKaKaK`K`K`K`KaKaKaK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKdKdKdKdKcKbKcKcKbK`K]K\K_KaK[KKKAKKKOK@K%KK%KHK`KdK`K]K]K\K]K^K]KYKUKVKXKXKXKXKXKVKUKVKHKNKRK[KYKOKXK`KVK^KYKHKSKRKVK\K`KsKiKXK[KZK4K'K*K(K K K)K)K!KKKKK$K,K*K(K*K'KKK$K/K-K-K.K-K(K&K&K-K5K2K,K/K0K)K!K!K"K0K6K3K2K6K:K6K9KAK?K4K.K/K/K1K(K"K$K4K;K7K6K;K:K8K:K7K6K:K8KK?K5K=K+K'K$K#K$K#K!K%K'KKGKxKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKjKNKKKLKLKLKLKKKLKJKKKLKKKLKKKIKHK.KKK>KHKFKFKGKLKLKJKCK>K?K>K>K>K?K?K>K>K>K>K?K>K?K?K>K>K>K>K>K>K>K>K=K7K.K0K3K5K5K5K6K7K6K3K0K0K-K KKKK/K2K2K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K1K>KdKxKwKxKxKxe]qI(KZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KYKXKZKZKZKZKXKXKXKWKWKXKXKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[K[K[K]K\K\K[KZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K\K\K]K]K]K]K]K_K_K_K_K_K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K`KbKaKaK`K_K_K`KbKaKaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKdKdKdKdKdKdKdKaKaK`K_K\K]K_KZKOKCKGKNKCK,K%K2KDKZK`K_K]K_KdKhKiKgKiKiKcKVKUKXKXKXKXKXKWKVKVKIKKKJKSK]KPKSK_KZKYK_KEKgKmKHK]KfKuKfKYK\KTK,K'K)K'KKK'K(KKKKKK$K+K*K(K*K%KKK%K0K-K-K-K+K%K%K&K-K3K/K*K,K.K&K K K$K4K5K3K2K7K8K5K;KAK9K3K0K0K0K.K#K K'K7K7K8K6K;K8K5K6K3K5K7K5K9K8K1K#K K&K4K5K4K2K8K3K1K0K1K9K9K/K3K2K,K"K K#K-K+K+K)K7K9K9K:KK8K;K7K'K$K#K$K$K#K#K&K%K%KdKvKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKvKfKMKKKLKLKLKKKLKJKKKLKKKLKLKLKLKHK+KK KCKHKEKDKHKKKKKJK@K>K?K>K>K>K>K>K>K?K?K>K>K>K>K?K>K>K>K>K>K>K?K=KK?K3K@K.K%K#K"K$K%K!K&K'K!K9KuKsKsKsKsKsKsKsKsKtKvKuKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuK_KIKKKLKKKKKKKKKKKKKKKKKKKLKJKJKDK#KK%KBKGKFKFKIKKKLKGK@K>K?K?K=KK?K>K?K?K?K>K>K>K>K>K>K>K>K>K>K?K=K=K6K-K/K3K5K5K4K7K8K6K3K0K.K.K KKKK-K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2K3K0K7K]KvKxKxe]qK(KZK[K[K[K[K[KZKZKZK[KZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKYKZKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K\K\K\K\K\K]K^K_K_K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K_K_K_K_K`KaKaKaKaK`K`KaKaK_K`KbKaK`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKbKcKcKcKcKbKcKbK^K^K`K`KbKYKKKBKHKWK_K`KdKiKlKlKlKjKiKkKiKhKgKfKgKeKeKeKfKgKaKXKTKWKXKWKXKWKWKXKWKSKJKJK7K^KWKNKZK_KUKaKUKHK~KSK\KhKuK`KYK[KJK%K'K&K%K$K%K(K#KKKKKK*K*K*K)K&K KKK*K,K*K+K+K%KK"K&K+K1K0K.K-K+K$K K!K,K4K4K3K3K4K3K3K3K3K4K/K0K/K1K+K#K"K/K9K7K7K8K;K0K1K2K2K;K8K6K8K8K.K!KK.K4K4K3K5K4K/K/K/K2K6K2K2K2K0K&K!K!K*K.K,K*K2K:K3K7K8K9K9K-K)K)K'KKK%K'K(K(K4K;K8K;K=K>K=K8K=K'K$K#K$K$K$K#K&K&K#KXKxKsKsKsKsKsKsKsKsKtKuKtKsKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKtK[KJKKKLKKKKKKKKKKKKKKKKKLKKKIKKK@K KK(KEKEKGKHKJKLKMKFK?K>K?K=K=K>K>K>K>K>K>K?K>K>K>K>K?K>K>K?K>K>K=KK?K?K?K?K?K=KK?K>K?K?K>K>K>K>K?K>KK,K$K#K$K$K$K"K%K'KKFKxKrKsKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKmKRKLKKKKKLKKKKKKKKKKKKKKKLKKKKKKK9KKK0KGKFKFKGKJKLKLKGK>K>K>K=K>KK>K>K?K?K>K=K>K@K?K>K?K>K>K=KKK?K>K>K?K>KK>K?K>K>K?K>KK6K,K/K3K3K3K6K5K5K1K0K-K,K,K KKKK/K4K3K3K3K3K3K3K3K3K3K3K3K2K2K3K3K3K3K3K3K3K2K3e]qO(K�K�K�KvK]K\K`K_K^K\K\K\K\K\KZKZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K[K\K\K\K\K]K]K_K_K_K_K_K]K\K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K`K_K^K`K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaKaK`K`K`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKbKaK`K`K`K_K_K^KcK^KKKDKPKXKHK+KK1KPKbKfKeKdKdKeKbK`K_K_K]K]K_K`KaKfKjKmKnKnKnKmKlKkKeKWKTKVKXKWKVKTKNKUKWKQKIKNKOK\K]K]KXK]KUK]KWKGK]KcKrKhKZK[KWK/K'K)K'K"K K K"KKKKKK#K$K$K$K%K KKK%K)K'K'K+K$K K!K!K%K+K+K*K,K+K$KK K)K1K1K1K2K/K'K'K'K)K/K.K.K/K1K'K!K K,K6K5K4K5K2K-K.K-K/K5K5K6K8K:K)KK"K1K6K5K5K5K.K.K0K/K0K7K4K3K1K,K"K K"K)K+K+K,K3K2K2K5K6K:K8K)K(K+K#KK!K&K$K&K)K5K8K6K6K9K=K6K9K4K'K$K"K$K%K"K$K%K!K:KqKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKYKFKMKLKKKLKKKKKKKKKKKKKKKLKLKKKIK1KKKK>KK>K?K>K>K=KK?K>K>K?K>K>K=KKKEKFKFKFKLKKKIKBK>K>K=K=K=K>K?K>K>K?K>KK?K>K>K>K>K?K=K=K=K=K>K>K5K,K/K4K5K5K5K8K6K4K2K0K/K/K!KKKK*K3K3K2K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3e]qQ(K�K�K�K�K�K�K�K�KdK\K`K_K]K\K\K\K]K[KZKZKZK\K\KZKZKZKZKZK\K\K]K[KZKZKZK[K[K[KZKZKZKZKZKZKZKZKZK\K]K\K\K\K\K\K\K\KZKZKZK[K]K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKaK_K_K_K`KaK\KNKGKMKLKK>K?K@K=K=K?K=KK?K?K>KK>K?K@K=K3K+K/K3K5K5K5K5K5K4K3K.K0K-KKKKK,K1K3K3K3K3K3K3K3K3K3K3K4K1K3K3K3K3K3K3K3e]qR(K�K�K�K�K�K�K�K�K�K�KlKZK[K_K^K\K\K\K[K[K[K\K\KZK[K[K[K[K\K\K\K[KZKZKZK[K[KZK[K[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K[K[K[K\K]K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K]K]K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKaKaKaKaK`KaKaKYKLKJKIKSKVKJKLKVK_KgKjKjKmKsKvKwKuKtKsKoKmKkKjKfKfKfKdKcKfKdKeKgKgKgKhKhKgKgKfKfK[KLKQKVKVKVKUKUKUKTKSKMKJKPKMK[K\KSK_KgKYKcKaK[KjKqK^KXK]KBK#K*K)K%KKKKKKKKKK#K#K#K$K KKKK'K(K(K)K(KKK!K K&K,K*K,K+K(K!KK#K.K0K.K/K2K'K#K"K%K,K0K.K/K,K(KKK(K3K2K3K4K2K-K.K-K.K3K8K5K4K;K.KKK1K7K4K8K7K/K-K.K-K/K:K9K5K5K0K#KK#K,K,K-K,K2K0K/K2K2K9K5K)K+K*K KKK$K&K&K*K4K4K7K8K6KK?KK=K>K?K>K>K=K=K>K>K>K>K>K>K=KK=K?KK?K?K?K?K?K?K?K?K?K>KK>KK?K?K?K>K=K=K=K=K=K=K=K=K=K>K>K>K>K>KK?K?K>KKKK>K=KK?K=K=KK?K>K=K=KK>K=KK?K?K>K?K=KK>KK?K?K=KK>KK;K=K?K=KKK>K>K=KKK?K=KKK?K>K>K>K>K>K=KK>K?K>KK=K:KK?K?K?K?K>K?K=K=K>K?K>K=KK>K=KK?K>K?K>K?K?K>KK?K=K=K?K?K?K=KK>K=K>K8K,K-K1K4K5K5K5K3K2K3K2K-K/K-K*K/K/K1K0K1K3K3K3K3K3e]q`(KMKSK[KeKqK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqK]K_K`KaK_K^K^K_K_K]K]K]K]K]K\K]K]K]K]K\K\K]K]K]K^K`K_K_K]K^K_K_K_K^K]K]K^K_K_K_K_K_K_K`K^K_K_K_K_K_K_K_K_K_K_K_K_K_K`K^K_K`K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaKaKaK`KaKaKaKaKaKaKaKaKaKaKbKaKaKcKbKbKdKdKbKVKPKVKUKSKRKXK`KcKcKbKaKdKTKPKRKXK^K^KaKdKfKjKmKpKtKvKvKwKwKuKrKoKlKkKiKgKeKeKdKcKdKdKcKfKgKfKfKfKfKfKfKhKgKgKfKfKeKcKbK_K\K\K\KYKWKYKVKWK[KZKWKOKOKRKSKRKSK\K\K\K]K]KNKUKUKaKfK]KVKiK\KQK\K[K8K!K%K%KKK
+KKKKKKKKKKKKKKKK
+K	KK
+KK
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K%KKKKKKKKKKKKKK K"K)K(K,K#KKKK K!K#K%K&K,KkKtKsKsKsKsKtKtKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKvKyKxKxKxKxKwKxKxKxKxKxKxKxKxKxKxKuKZKLKMKLKLKLKKKLKKKKKKKKKKKKKLKKKLK@KKK#KCKFKFKFKFKHKJKJKBK>K?K>K=K>K>K>K>K>KK=K=K?K>K?K=KK>KK7K*K/K4K4K4K5K4K3K2K1K/K.K-K,K,K/K1K/K0K3K3K3K3K3e]qa(K\KZKXKTKOKMKKKHKGKLKRKRK�K�K�K�K�K�K�K�K�K�K�K�K�K[K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KzK^K_KaK_K_K_K_K`K`K`K`K_K\K]K`K`K`K`K`K`K`K`K`K_K_K_K`K`K_K_K_K_K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_K_K_KaKaKaKaKaKbKaKbKbKbKaKaKcKdKeKeKcKaKXKPKVKZKHKHKTKGKVK`KaKaKbKaKdKVKQKQK?KRKbKiKkKlKoKoKnKlKlKjKfKdKdKcKcKcKcKdKdKdKeKeKeKeKeKeKfKhKgKfKdKbK`K_K^K]K]K\KZKYKZKZK]K`KbKdKhKhKhKgKeKhKjKbKQKOKRKSKQKTKVKZK^KZKSKNKQKUKZKdK^KWKcKcKRKZKZK2K$K$K%KKKKKKKKKKKKKKKKK
+K
+K
+K
+K
+KKK
K
KKKKKKKKK
KK
KKK
KKKKKKKKKKKK
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K%K&K+K KKKK K!K%K'K"KBKwKsKsKsKsKsKtKvKuKuKuKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKuKvKyKxKxKxKxKxKxKxKxKzKrKTKMKMKNKMKKKLKKKKKKKKKKKKKKKKKKKMK=KKK(KEKFKFKFKHKIKJKHKBK>K=K;KKK?K=KK=KK?K=KK=KK?K=KK>KKK>K>K>K?K=KK>K=K=K=K>K>K>K>KKZKdK]KWK]KaKaKaK_K]KaKbKbK`KaKYKNKTKFKcKcKdK_K^K^K_K_K`KbKcKeKlKnKqKsKvKyKxKxKvKvKrKqKnKlKhKfKeKdKdKeKeKdKeKfKfKfKfKfKgKfKfKgKgKgKgKhKhKhKhKhKfKeKeKYKEKFKMKNKNKPKQKNKRKSKJKIKQKEKHKfKVKNKVKZKPK>KK$K'K KK	K
KK
K
KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK%KKKKKKK$K&K$K*KiKvKsKsKsKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKuKuKuKuKvKvKvKvKuKuKvKvKvKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKtK[KMKNKLKKKLKLKKKMKMKKKLKKKKKLKJKKKHK&KKK>KJKGKGKEKGKJKKKGK?KK?K?K?K?K=K=K>K>KK?K?K>KK>K>K>K>K?K>K=K=KK>K?K>KKKK`KfKjKjKmKoKmKjKjKhKfKeKdKcKcKcKdKfKdKfKgKgKgKgKfKfKfKfKgKgKgKhKgKfKfKdKdKaK_K]KZKYKXKUKTKUKTKUKWKWKWKPKJKMKOKOKQKMKLKNKNKOKKKKKRKUKTKEKSK;K`K`KQKQKYKFKK#K K"KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK!K%K'K&KZKwKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKvKuKuKuKuKuKuKwKxKxKwKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKnKQKKKLKMKMKNKLKKKMKMKMKMKLKKKLKKKNK@KKK!KCKJKGKFKFKGKKKMKGK>K=K?K>K>K>K>K?K=KK>K?K>K>K=K=KK>K?K=K?K=KK?K>K?K?K=KK?K=KK?K?K>K?K>K=KK>K?K>K?K?K=K>K?K>K>K=KK?K>K?K>K?K>KK>K>K?K>K?K?K>K?K>KKSK�K�K�K�K�K�K�K�K�K�K�K�K�K�KzKaK_KdKeKbKaKaKaKaKaKaKaKaK_K_K_K_K_K_K_K_K_K_K`KaK_K`KaKaKaK`K_K_K_K_K_K_KaKaKaKaKaKaKbK_K`KaKcKdKeKgKiKgKbKZK\KcK\KCK1K?K]KaK_KaKaKbKcK`K\K_KbKbKcKaK[K[K^KaKcKcK^K\K_KaK`K`K`KbKPKWKFKYKeKaKcK`K_K]K]K_KaKaKcKcKdKhKkKoKqKsKvKsKrKsKsKqKoKnKlKjKjKjKhKfKfKfKfKfKfKgKfKfKfKiKhKbK[KQKPKQKQKQKPKQKOKMKDKK>K>K>K>K>K?K>K?K>K>K?K>K?K>K=K>K>K>K=KKNKVKLKBKLKRKSKEKPK;KSK]KLKEKZKNK'K$K"KKK	KKK	K	KKKKKKKKKKKKK"KKKKKK!K$K$K$K$K$K#K#K$K$K$K$K%K&K&K&K%K&K&K%K%K%K&K%K%K%K%K&K&K&K$K%K'K(K%K%K%K&K&K'K'K&K&K&K%K%K&K%K$K(K(K&K&K&K&K)K)K'K(K(K(K(K(K)K*K*K*K(K'K(K(K)K(K%K(K(K'K'K$K$K$K"K!K#K#K!K"K KKKKK!K(K*K$KXKvKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKwKxKwKyKxKxKvKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KnKRKMKNKMKMKMKNKMKKKLKNKMKJKKKLKLKKKHKBKAK=KDKIKJKFKDKDKGKJKDK>K>K?K>K>K>K?K>K?K>K?K>K>K>K>K>K=KK>K?K>K=K=KK=KGKLKJKDKEKFKHKHKDK?K>K?K?K?K?K?K>K>K>K>K?K?K>KK?K=K=K>K>K?K=KK>K>K>K>K?K>K?K?K>K>K=KK?KK?K>KK;KFKKKYK[KNKHKVKFK(K(K)K"K
+KKKKK
+KKKKKKKKKKKKKKKKKK%K&K%K&KKKK$K&K'K&K'K KKKK'K+K*K*K(K!KK K(K)K)K,K/K*K+K.K0K0K*K+K+K(K"KK#K)K)K(K+K2K0K0K0K0K.K,K.K.K.K)K'K-K1K0K1K4K2K-K)K-K0K3K=K=K9K,K"K'K0K1K.K-K-K*K)K)K)K)K(K(K&K(K'K(K#K#K&K%K)KfKwKrKtKuKuKvKtKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKvKyKxKtKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK]KKKKKKKKKLKKKKKLKKKLKNKNKNKMKKKMKFKCKDK@KBKJKMKIKGKFKHKKKIKAKK?K@K=KK?KK>K?K;KK;K8K;K3K/K3K1K3K/K&K"KKK K"K K&K'K"KK>K?K>K?K?K>K>K>K=K>K?K=K=K>K>K?K>KK=K;K=K8K.K/K3K3K3K3K4K3K2e]qq(KlKgKaK^K_KZKtK�K�K�K�K�K�K�K�K�K�K�K�K�K�KjKaKgKdKeKbK~K�K�KuK\KcKbKeKcK�K�K�KpK\KcKcKeKhK�K�K�K_K`KbKcKcK�K�K�KhK_KeKdKhK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KeK_KeKfKeKbKbKaKaKaKaKaKaKaKaKaKaKbKbKbK`K`KeKhKlKlKlKmKdK\K\KeKaKGKMKdKgKcKaKaKdKdKdKcKaKaKaKbKaKaKcKaKbKbKbKaKaKbKaKbKbKcK`K_KaKcKaKcK]KZK_KaK_K`K_KbKYKSKQKJKgKdKdKcKbKaKaK^K\K\K\K\K]K\K[K]K^K^K^KbK`KbKeKdKdKfKiKjKhKfKcKZKUKSKSKQKPKPKRKSKNKFKK>K?K?K?K?K?K>K?K>KK6K/K1K0K1K6K'KKKKKK$K)K)K&K0KkKtKsKsKsKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKvKvKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKyKyKxKxKzKiKNKMKMKKKLKLKKKLKMKMKKKLKLKLKKKMKKKDKBKIKDKGKMKLKGKFKGKHKIKFK@K>K>K=K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=KK>KK3K,K4KAKCKCK,KKKVKKKMKXKIK,K*K*K(KKKKKK
K
K
K
K
K
K
K	KK
K
KKK
KKK K$K&K%K KKK K"K!K"K$KKKKK%K+K+K)K)K!KKK(K)K&K*K)K'K+K*K.K,K)K*K-K'K!KK#K'K$K&K*K/K-K/K0K/K*K*K+K+K(K!K#K,K*K,K0K5K4K*K)K0K1K7K>KHKEK-KK2KLKIKFK@K2K/K1K/K2K0KKKKKKK%K)K)K#KIKtKrKtKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKvKvKuKuKuKuKuKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKxKxKxKxKxKzKdKNKKKKKLKLKKKLKNKMKKKKKKKKKLKKKLKIKBKGKEKAKFKMKKKGKFKHKHKIKFK@K;KK?K>K>K>K>K>K?K>K>K>K?K?K?K=KK>KKLKJKHK?K0K2K3K/K2K)KKK!K!K!KK%K)K)K+KdKwKsKsKsKsKsKsKsKuKuKuKuKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKvKuKxKxKuKuKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKwK^KKKLKKKKKKKLKMKMKKKKKKKKKKKLKKKJKIKDKCKCKDKHKMKLKFKDKGKIKHKFK@K=K>K?K>K>K?K>K?K?K>K?K>K>K>K?K=KK5K+K-K3K3K3e]qu(KdKcKaKdK]K�K�K�K�K�K�K�K�K�K�K�K�K�K�K_KfK_KaK�K�K�K^K`K`KaKeK`K�K�K�KdK]KeKdKdKaK�K�K�KdK^KdKcKeKpK�K�K�K\KcKbKcKdK�K�K�K_KcKeKdKfK�K�K�K^KNK7K?K~K�K�K�K�K�K�K�K�K�K�K�K�K�K�KhKfKmKgKeKhKvKuK�K�KlKqK[KPKbKaKGK.K8KUKhKjKhKfKcKaKbKeKfKcKcKdKdKcKeKeKcKbKbKcKeKdKdKeKeKbKbKdKbKaKbKbKdKeKbK_K\K]KbKaK]K\KaK`K^K\K_KQKKKSK7KcKfKdKdKfKeKbKdKfKeKeKfKfKeKfKgKfKfKgKgKdKdK_KVKTKRKTKVKTKSKSKQKJK>K4K1K1K6K4K6K@KMKXKUKOKMKLKMKMKLKKKKKKKIKHKIKHKHKGKGKHKDK;K0K%K'K'K$K3KCKAKFK8K4KWKLKIKNKSK:K,K,K,K#KKKKKKK
K
K
KKKKKKKK
+KKKKK!K"KKKKK!K K K"KKKKKK)K+K*K'K$KKK K'K)K'K)K'K)K(K*K.K,K)K'K'K!KKK&K&K&K&K*K,K-K-K.K-K+K*K*K+K#KK$K*K+K+K1K4K0K(K0K/K1K6K?KGK;K!K'KEKEKHKHK9K.K1K2K0K1K#KKK!K!K!KK%K*K&K;KtKsKsKtKsKsKtKtKsKuKuKuKuKtKtKvKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKvKwKwKvKvKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKzKzK{KtKTKJKLKLKLKLKMKMKLKLKLKLKKKKKLKMKLKIKEKCKDKEKHKMKLKFKFKHKHKHKHKAKK?K>K?K>K?K?K>K>K>K>K>K?K>K=K=K=K=K>K?K=KK=K6K,K.K3K2e]qv(KbKbKaKeKfK�K�K�K�K�K�K�K�K�K�K�K�K�K�K`KfKbKbKaK�K�K�KcK_KbKbKdKcK�K�K�KoK]KeKaKeK\K�K�K�KkK]KgKfKeKfK�K�K�K[KbKdKgKcK�K�K�KhK`KfKgKeK�K�K�KiKcKUK4K2KmK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KuK{KzK�K|KmKuKPK]KBKK'KOKdKgKjKjKhKcKaKbKgKgKgKdKaKdKeKeKdKeKeKeKeKeKdKeKeKeKeKbKaKaKaKaKcKeKdK`K`K]K^KbKaKdK[KYKaK`K^K\K]KRKGKVK?KbKeKdKeKeKeKeKeKeKeKeKeKeKeKeKeKdKdKcK`K\KWKTKTKVKSKSKSKTKQKGK;K3K2K4K4K5K8KBKMKWKUKOKLKKKKKKKLKLKLKLKIKHKIKIKIKGKGKHKCK:K/K)K)K$K&K'K%K-KCKBKBK@K/KQKNKIKGKUKCK-K.K.K)KKKK"KKKKKKKKKKKK
KK
+K
KKKKKKKKKK!K!KKKKKKK)K(K)K'K KKK#K'K)K(K(K"K%K(K+K+K)K(K)K(KKK"K&K&K%K&K)K*K+K+K/K+K*K'K)K&K K K'K+K*K+K1K1K+K(K-K0K1K2K7KAK1KK/KAKAKFKCK2K.K0K0K3K0KKKK!K!K K"K'K)K%KUKwKrKsKsKsKtKvKuKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKuKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KzKxKxKxKzKzKzKpKPKKKNKLKLKMKNKNKNKNKLKKKLKKKKKKKLKIKDKGKDKBKIKPKLKFKFKFKHKKKFK>K:K>K?K>K?K>K>K>K>K>K>K>K>K?K?K?K?K?K?K?K=KKK?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K=K=K>K=KK?K>K>K>K?K?K?K?K?K?K>K>K>K?K>KK`KeKjKkKiKaKaKgKiKiKiKeKbKbKeKgKeKdKbKcKdKeKfKeKdKbKbKdKdKdKdKdKdKeKdKaK`KcKcKdKdKdKbKUKKKWK]K^K]K]K\K]KVKDKKKLKTKWKXK[K_K`K]KcKdKeKgKjKgK_KXKUKVKVKUKSKSKUKSKNKBK8K3K3K5K6K7K;KJKVKWKQKKKMKMKNKMKKKLKLKLKKKIKHKIKHKHKIKHK=K0K)K%K(K)K&K(K%KKKKKKKK5KDKAKFK:K3KUKOKFKEKUK=K.K/K.K)KKK!K K!K K!K$K!KKKKKKKKKKKKKKKKKKKKKKKKKK&K&K%K&K"KKK"K$K&K(K%KK K"K"K'K)K)K)K&KKK K&K$K$K(K$K#K&K)K+K+K)K(K(K#KKK$K)K)K)K/K0K)K$K0K/K*K*K*K/K)KK!K%K'K*K.K.K.K-K,K2K+KKK K K K"K#K)K+K)K]KwKsKsKsKsKsKtKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKxKxKxKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKzKzKzKzKzKzKzKzKyK{KyK^KKKKKLKKKKKLKMKLKKKJKMKJKLKKKLKLKIKDKGKAK7KFKOKPKIKFKFKHKIKEK@K>K>K?K?K=K=K=K=K=K>K?K>K>K=K=K>K>K>K=KK>K>K=KK>K>K=K=K?K?K?K=KKK?K>K>K?K>KKK=KK?K>K>K?K>K=K=KK?K?K?K?K?K?K=KKJKVKZKSKOKPKQKPKOKOKOKNKMKMKLKMKLKKKKKLKHK=K1K)K'K'K(K(K)K'KKKKKKKK!K!K KKKKKKKKKK(K@KBKDKCK/KPKTKLKEKTKKK4K3K0K1K&K K KKKK%K+K,K%K"K$K$K$K#K#K#K KKKKKKKKKKKKKKKKKK KKKKK!K!K"KKKKKK%K%K&K%K KKK$K%K#K%K$KKKK!K'K)K(K*K&KKK K&K&K'K*K*K(K K'K,K+K#K$K&KKKK"K%K$K&K+K,K,K-K/K+KKK!K!K"K"K%K+K.K,K\KwKrKsKtKtKtKtKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzKyK^KLKNKNKMKNKNKMKKKLKKKLKMKLKKKLKJKKKIK-KKK3KKKOKKKFKFKFKFKHKDK=KK=K=K=K>K?K=KKK>KK>KK>e]q�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K_KfKfKcK[K�K�K�KlK_KcKbKeKeKcK�K�K�KcKaKcKaKeKbKwK�K�K�K]KfKdKcKcKiK�K�K�K]KeKdKgKcKhK�K�K�K]KeKeKfK`K�K�K�KrK`KgKjK`K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KuK�K�K�K�KtKuKKcKdKYK_KeKcKfKkKjKiKdKcKdKeKfKjKjKdKdKeKeKhKhKfKeKeKeKbKbKeKdKdKeKdKdKcK_K`KdKeKdKdKcK^KXK\KcKdKcK`KYKZKVKUK[KTKRKXKRKLKVKTKIK:K4K5K5K:K:K?KKKWKZKTKOKPKSKRKPKQKPKPKPKPKOKNKMKMKNKOKJKK>KK=KK?e]q�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqKfKiKdKfKfKZK�K�K�KxK`KfKdKeKdK_K�K�K�KjK_KfKeKfKcKlK�K�K�K_KcKbKcKeKeK�K�K�KaKcKeKcKhKeK�K�K�KaKcKfKfK`KqK�K�KK_KkKfK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwK�K�K�K�KtKuK�KgKaK?KbKcKgKjKjKhKdKeKeKfKhKjKiKdKdKdKdKiKjKhKcKaKcKeKdKdKeKeKdKfKcKaKaKaKbKeKfKdKbK^K[K]KaKdKdKeK]KVKVKLKMKWKTKRKZKKKCKDK9K3K3K6K8K9KBKNKYKXKTKOKNKQKPKRKSKPKQKPKOKQKPKLKMKOKOKIK:K0K)K'K)K)K*K)K$KKKKKKKK$K$K!KKKKKKKKKKKKKKKKKKK!K#K$K"KKK;KGKCKEK?KDKVKPKGKLKWKAK3K5K4K0K(K$KKKKK!K$K$K#K"K'K.K3K4K.K)K&K%K$K$K$K#K$K$K!KKKKKKKKKKKKKKKKKKKK"K#K$K!KKKK K K!K"KKKKK#K'K%K'K%KKKK$K&K$K'K%K!KK$K)K'K!K!K"K KKK K!KK$K*K,K.K.K.K"KKKKK!K$K+K3K2KK?K?K>KK0KSKRKKKFKUKJK4K5K4K5K+K$KKK K!K!K!K"K&K0K5K3K.K+K+K,K+K(K%K#K"K#K$K#K#K&K"KKKKKKKKKK
KKKKKKKK!K K"KKKKK!K K!KKKKKK%K&K%K'K#KKKK$K%K$K'K"KK!K&K)K$KK!K!KKKK K!KK%K.K-K/K.K.KKK KKK!K%K4K5K1KOKxKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KpKVKLKKKKKLKKKLKKKLKLKMKKKKKKKLKKKHKJKGK*KKK5KLKOKLKFKDKFKGKGKCK>KK=K=K>K>KK>KKPKXKWKRKSKSKSKRKRKQKQKQKQKQKPKMKNKOKQKMKCK5K'K%K*K*K(K*K%KKKKKKK K&K(K"KKKKKKKKKKKKKKKKKKKK%K(K"KKKKKKKKK K+KCKEKBKGK0KCKXKOKEKMKTK@K5K5K5K1K+K$KK#K)K/K2K-K*K)K)K,K-K.K,K.K2K4K4K,K%K$K$K$K$K"K$K$K%K$K KKKKKKKKKKK
KKKKKKKKKKKKKKKKKK$K$K$K$KKKK#K$K#K(K#KKK#K&K'K KK KKKKKKK"K+K-K.K.K1K"KKKKKKK3KK=K=KK>KK/K)K*K*K*K*K)K$KKKKKKK"K(K'K KKKKKKKKKKKKKKKKKKKK'K'K!KKK KKKK!KKK%K%K$K=KEKCKFK8K5KYKQKHKNKVKGK5K5K5K4K.K)K&K,K+K'K(K(K(K*K+K-K-K-K0K4K2K(K"K K$K$K$K$K$K"K$K$K#K$K%K#KKKKKKKKKKKKKKKKKKKKKKKKKKK"K$K#K$K!KKKK$K#K#K%K KKK#K'K KKK KKKKKKK$K(K,K/K2K0KKKKK KK$K5K=K;KJKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKyKyKxKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K_KNKNKLKKKLKKKKKKKLKJKIKKKKKLKJKHKHKIK>KK
K KDKOKNKIKDKFKGKGKEKAK=KK>KKJKPKWKSKQKFKEKTKUKUKRKRKSKRKRKSKQKPKPKQKSKOKBK4K(K'K(K*K)K'K%K!KKKKKK#K)K,K'KKKKKKKKKKKKKKKKKKK#K)K)K#KKK K!K!K KK K!K(K'K"KKKKKKKK!KCKEKBKHK/K@KXKOKIKOKUKDK7K8K6K1K-K)K!K!K#K&K,K/K*K&K#K"K$K$K$K$K$K#K"K!K!K$K$K$K#K!K$K+K0K&K#K$K$K#K#K$KKKKKKKKKKKKKKKKKKKKKK!K$K#KKKKK#K$K#K$K KKKK$K!KKKKKKKKKKK$K#K'K.K/KKKKKKKK$K@K�K�K�K�KpKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KiKPKJKLKLKKKKKLKKKKKLKLKKKKKKKKKLKKKMKIK,KKK3KJKPKKKGKFKGKGKEKBK=K=K=K=KK?KAKCKIKSKSKRKGKDKNKVKWKWKUKRKRKSKRKRKRKQKPKRKOKBK0K)K'K)K+K*K*K$K!KKKKK!K'K*K)K$KKKKK KKKKKKKKKKKKKK$K+K(K#K KK!K"K"K K!K#K%K)K$K KKKKKKKKKKK;KGKDKFK6K4KWKRKKKLKWKKK8K9K8K6K1K-K#K"K*K-K+K%K!K#K#K$K"K"K#K K K"K"K"K"K"K$K$K&K,K4K7K6K1K,K$K$K$K$K#K$K#KKKKKKKKKK	KKKKKKKKKK K"K!KKKKK!K"K#K!KKKKK%KKKKKKKKKKK!K"K"K%K.K$KKKKKK K!K,K�K�K�K�K�KwKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K{KzKzKzK{KzKzKdKKKKKLKMKLKKKLKKKKKKKKKKKKKKKKKLKKKIKEK(KKK8KNKPKNKFKEKGKDKDKDK=KK.K(K&K+K,K,K(K"KKKKKK K'K)K&K"KK K!K!KKKKKKKKKKKKKKK#K*K'K KK K!K!K"K$K!K$K(K(K!KKKKKKKKKKKKKK1KFKFKGK>K-KRKSKNKJKTKQK;K9K8K8K4K-K*K&K%K!K"K#K$K$K"K K K K K K!K"K K#K"K"K*K1K7KKAKFKPKXK[KYKXKSKPKVKVKUKUKVKUKSKTKTKRKRKTKRKIKK=K?K@KBKCKMKZK]K[KWKWKWKXKWKVKUKVKVKVKTKRKRKQKPKPKHK9K,K'K*K,K+K)K(K#KKKKK K%K+K*K"KKKKKK K!KKKKKKKKKKKK K&K*K'K!K K!K$K$K$K#K$K'K+K)K!KKKKKKKKKKKKKKKKKK!KCKDKCKFK7KAKYKQKIKIKWKFK9K:K9K8K4K-K!KK KKKKKKK K"K"K!K!K$K.K6K9K8K8K8K7K/K1K4K9K=K;K2K+K&K%K$K$K$K$K$K$K%K!KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK'K!KKKKKK!K!K#K)K'KxK�K�K�K�K�KtKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKwKyKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzK{K~K|KzK{K}KqKQKKKMKNKMKLKLKKKKKKKKKKKKKLKKKHKHKHKKKKFKDKFK9K2KZKRKHKFKUKKK:K;K9K8K4K3K&KKKKKKK K!KKK#K(K/K8K;K8K9K8K4K3K0K2K:K=K;K1K)K$K#K$K!K"K#K$K%K%K$K$K$K$KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"KKKKKKK!K"K$K&K)K:K�K�K�K�K�K}KwKxKuKuKuKuKuKuKvKxKwKuKuKuKuKvKyKwKuKwKyKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKyKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K~K|KzK|K}K~KjKPKKKLKLKLKLKKKKKKKKKKKKKLKKKHKHKHKJKJK4KKK(KHKOKMKHKEKGKFKGKFK@K:K=K=KKKK9K>K8K;KK:e]q�(KdKdKeKfKcK^KUKPKNKMKKKKKMKbK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KoKhKnKvKyKzKuKsK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~KdK�K�K�KzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwK�K�K�K�K�K|K|K�KlKdK=KhK^KYKhKmKjKiKiKiKiKgKeKaK]K^K`KcKgKiKiKgKbK`K_K^K\K\K\KZK[K\K\K\K[KZKZKYKXKWKJK9K,K(K*K+K,K*K#KK"K"K#K&K*K*K,K)K%K&K+K*K(K'KKKKKKKKKKKKK#K&K/K4K2K1K3K1K3K2K0K0K2K1K+K#K KKKKKKKKKKKKKKKKKK-K.K+K%K#K$K%K#K#K!K%K,K-K'KKKKKKKKKKKKKK'KCKCKDKEK,KEKXKPKFKJKWKHK:K:K:K9K8K7K#KKKKKKKKKKKK K"K KKK$K-K6KKe]q�(KdKeKeKdKeKeKfKaK]KUKNKMKMKIKFKRKtK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KzKgKkKrKwKxKuKtKuK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KtK�K�K�K�K�K~K�K�KiKaKVKfK_KkKlKkKiKiKjKjKgKdK_K`KbKfKjKmKnKiKeKbK_K`K_K\K]K]K\KZKZKZKZKZKZK[K\KXKHK2K)K,K-K,K,K)K%K K K!K!K%K)K.K+K'K&K%K%K%K*K+K)K)K$KKKKKK
KKKKK(K0K4K2K2K3K3K3K2K2K3K4K,K&K#K K K!K!K!K!KKKKKKKKKKKK"K+K.K)K%K$K$K$K&K%K$K(K+K,K"KKKKKKKKKKKKKKKKKAKDKCKFK8K:K\KSKJKGKWKNKK>K;K7K2K5K:K9K;K?K9K2K'K#K"K&K'K&K$K!K"K#K#K&K'K&K&K$K%K,K7KAKKDKCKFK>K2KXKRKMKDKQKTK>K;KK1K&K%K%K#K$K#KKKKKKKKKKKKKKKKKKKKKKKKKKKKK K"K$K&K'K(K)K*K+K.K1K;K�K�K�K�K�K�KtKyKxKxKxKwKxKxKvKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKyKyKxKxKxKyKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KwKXKIKJKJKIKKKJKIKIKIKLKKKJKIKHKIKHKHKHKIK3KKK"KDKKKOKKKFKEKFKEKDe]q�(KdKdKdKdKdKdKdKdKdKdKeKfKcKZKRKJKLKKKIKFKRKoK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K}KlKkKsKyKyKvKrKxK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K{K�K�K�K�K�K|K�K�KaKnKoKoKmKkKlKlKkKeKbKbKfKjKmKmKnKmKlKhKcKaK_K`K`K_K`K^K\K^K[K\K]K[K[KRKAK0K&K)K+K,K+K)K#KKKKK!K%K*K,K'K"K&K%K(K)K&K&K KK'K+K'K)K)KKKKKKKK
KKK,K3K5K5K6K8K2K+K!KKKKK"K!KKKKKKKKKKKK!K+K-K+K(K$K%K'K&K#K"K&K+K*K*K%KKKKKKKKKKKKKKKKKKK%K&K7KDKCKCKDK.KOKTKOKFKMKYKDK:K=K=K=K:K7KKKKKKKKKK"K(K-K2K4K4K6K:KK@K;K2K(K!K!K!K$K$K#K"K"K!K!K$K%K)K*K'K%K+K5KBKMKKKHKIKIKBK@K?K9K0K$K$K$K#K"K$KKKKKKKKKKKKKKKKKK	KKKKKKKKK K"K$K%K&K(K)K+K*K-K0K-KgK�K�K�K�K�KvKyKxKxKxKyKxKxKvKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KrKSKIKIKIKLKJKHKHKHKJKJKHKHKIKIKIKGKFKHKHK/KKK(KEKLKPKIKFKGKDKCe]q�(KbKcKdKdKdKdKdKeKcKaKdKeKeKeKaK_KWKMKIKIKKKGKHK[K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KpKhKnKvKyKwKtKuK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K}K�K�K�K�KKzKyKxKsKpKnKlKmKkKiKeKbKdKgKkKmKoKoKkKlKkKkKiKbK_K`K_K`K_K]K]K\K]K]K\K[KRK@K.K(K*K-K-K+K(K#KKKKK!K%K'K'K&K%K%K$K%K)K(K%K KK"K%K(K)K)K)K*K'KKKKKKKKKK(K0K7KK7K.K%KK"K$K!K!K!K"K"K!K$K)K)K(K(K%K(K/K=KHKIKKKKKHKFKAKAKCKFKDK5K*K$K"K"K%K$K$K%K"KKKKKKKKKK
KKKK
+KKKKKKKKKKK!K!K#K&K)K)K)K+K*K,K/K3K;K�K�K�K�K�K�KuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKmKOKLKIKIKIKHKJKKKIKHKIKIKIKHKHKHKHKIKIKHK'KKK.KGKNKOKIKFKDKBe]q�(KfKeKdKdKdKdKdKeKdKbKcKeKdKdKdKdKdKdK\KSKLKIKJKJKHKLKgK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~KjKiKrKuKvKtKrK{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KuKuKwKtKpKoKnKlKiKfKfKgKhKmKpKrKoKmKmKkKmKmKiKdK`K_K`K_K`K^K\K[K\K]K[KPK=K-K+K+K,K,K*K%KKKKKK#K&K$K'K#K$K%K%K%K$K%K"KK K"K&K*K*K)K)K(K)K)K*K KKKKK
KKKK'K3K1K)KKKKKKKKKKKKKK!KKKK K+K/K0K+K(K&K%K%K%K%K$K)K/K*K"KKKKKKKKKKKKKKKKKKK#K*K)K(K%K&K$K.KDKDKCKGK7K:K[KQKJKDKUKRK=KKBKEKAK6K,K"KK KK!K$K#K$K$K$K%K"KKKKKKKKK
K
+KKKKKKKKKKKKK!K!K#K&K)K(K)K+K+K/K/K5K1KdK�K�K�K�K�KvKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKyKzKzKzKzKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K~K|K{K|K}K}K}K}K}K}K}K}K}K}K}K}K}K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KfKLKJKHKIKIKKKLKIKHKIKIKHKIKIKIKIKIKHKJKDK#KKK3KHKNKNKJKFKCe]q�(KgKfKeKeKdKdKdKdKdKeKbKeKeKdKdKdKdKeKfKeKaKZKRKKKJKKKGKGKTK}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqKgKkKuKzKyKuKtK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwKxKsKqKqKoKjKeKdKgKjKnKqKrKrKpKpKpKqKpKmKmKjKdK`K_K_K_K`K^K]K^KZKMK;K.K*K*K-K,K+K%KKKKK!K"K%K&K"K#K%K$K$K$K%K&K"KKK K&K)K)K)K'K&K*K.K)K'K)K'KKKKKKKKK$K"KKKK K KKKKKKKKK KKK K"K-K4K4K1K.K+K(K%K$K$K)K,K.K+K KKKKKKKKKKKKKKKKKK K%K*K)K'K)K&K%K%K%K)K?KEKCKEK>K.KXKSKKKFKRKWKBK:K=KK2K&K"K#K#K!K!K KK"K%K$K#K#K#K$K#KKKKKKKKKK	K	KKKKKKKKKK K K"K#K&K&K&K*K+K*K.K.K1K5K8K�K�K�K�K�K�KuKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K~K}K}K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K{K_KJKHKJKLKLKLKIKHKIKIKHKJKLKIKIKIKHKHKJK?KKKK8KLKNKMKHKDe]q�(KdKhKfKdKdKdKdKdKdKdKcKeKeKdKdKdKdKdKdKdKdKeKfK_KUKNKJKIKHKCKIKbK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KlKlKrKvKuKsKrKyK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KxK�K{K}K�K�KzKvKpKpKnKiKgKgKkKoKrKtKrKnKmKqKrKpKpKnKlKjKdKbK`K_KaK_K_KaKYKJK6K+K,K-K.K+K*K$KKKKKK!K$K$K!KK K$K#K#K$K!KKKK K$K%K&K*K)K(K(K/K9K8K-K'K)K(K$KKKKKKKKKKKKKKKKKKKKKKKKK#K/K4K5K2K0K0K-K*K&K&K,K.K,K$KKKKKKKKKKKKKKKKKKK$K)K)K(K'K(K(K)K%K&K'K&K%K:KEKCKCKDK*KOKWKMKFKKKXKGK;K=KK=K=K%KK"K#K$K'K)K)K*K,K0K1K/K(K!KKKK!K!KK K K#K%K%K%K%K"K#K&K2K@KFKIKGKAKAKDKGKGKGKAK@K8K-K#K"K#K"K KKK K$K%K'K)K(K%K$K%K$K#K%K"KKKKKKK
K
+KKKKKKKKKKKK K!K#K&K(K(K+K,K,K-K0K0K4K0KdK�K�K�K�K�KxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K}K}K}K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K}KzKUKGKJKJKJKJKIKHKIKIKHKJKLKIKHKHKJKHKHKIK9KKKK=KJKOKJKEe]q�(KKhKbKdKhKfKeKdKdKdKdKdKdKdKdKdKdKeKeKeKeKdKdKfKfKbK[KQKIKGKFKGKFKOKsK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqKiKlKrKuKtKqKnKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsK�K�K~K�K�KvKsKkKgKgKgKkKnKrKrKqKpKnKnKoKnKmKnKoKmKjKeKaK_K_K_KbK`KYKIK1K*K*K-K/K.K(K"KKKKKK!K"K$KKKK K"K$K$K!KKKK!K!K#K$K$K&K)K,K3K:K=K;K8K6K-K(K'K&K!KKKKKKKKKKKKKKKKKKKKKK(K1K8K:K6K1K/K/K,K,K-K0K1K-K"KKKKKKKKKKKKKKKKKKK)K+K(K(K(K)K)K)K(K(K*K)K)K)K"K1KCKCKCKGK.KCKYKNKJKHKUKLK=KK:K:KK5KK K!K"K&K*K(K&K"K KKK KKKKK"K#K%K&K$K"K K%K0K:KEKFKDKAK=K>K=KBKFKGKDK:K.K%K#KK$K KK!K K&K)K+K-K,K*K'K*K,K-K0K.K)K&K$K#K#K!KKKKKKKK
+K	K	K
KKKKKKKK K!K#K&K)K*K,K+K,K/K0K/K0K8K5KdK�K�K�K�K�K|KzKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K|K}K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KlKMKHKIKIKHKHKIKIKHKIKJKIKHKHKIKHKHKHKIKGK/KKK'KEKNKLe]q�(K�K�K�K�K�K�KmKdKeKhKiKgKeKeKdKdKdKdKdKdKdKdKdKdKeKdKdKdKfKgKcK]KRKJKFKGKHKGKLKmK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KtKgKiKnKpKlKfKdKzK�K�K�K�K�K�K�K�K�K�K�K�KrK�K�K~K�KlKMK]KdKiKoKuKsKsKtKtKrKqKpKmKoKmKlKiKhKhKaK_KbK^KUK@K1K*K,K.K/K-K(K"KKKKKK$K$K KK KK K!K!K!K!KKKK K"K K!K K K K)K3K8K=KK>KK>K@K0K$K&K&K#KKKKKKK
KKKKKKKKK#K-K7K;K9K5K5K5K3K3K6K7K7K4K)KKKKKKKKKKKKKKKKKKK'K-K.K+K(K(K(K)K*K*K)K*K,K)K&K$K%K$K!K#K#K>KEKBKDK?K,KXKTKOKJKUKWKCK9K>K=K=K=K;K$KK"K%K$K!KKKKKKKKKK"K"K$K$K"K KK(K6KCKGKFKEKDKCKK�K�K�K�K�K�KvKyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KeKKKHKIKIKHKHKHKIKHKHKHKIKHKHKHKHKIKIKGKCK(KKK-KEKJe]q�(K�K�K�K�K�K�K�K�K�KiKbKeKgKgKgKfKfKfKfKeKdKdKdKdKdKdKdKdKdKdKdKeKgKbKYKPKHKGKGKFKDKXK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KlKfKfKfKaK]KUK�K�K�K�K�K�K�K�K�K�K�KvK�K�K�K�KVKHKaKsK�K�KtKyKwKuKsKtKrKpKmKlKkKlKgKbKcKdKaKTK?K/K+K-K/K-K+K&KKKKKKK"K%K KKKK!K!K!KKKKKKK!K!K K!K K!K*K4K8K:K7K:K=K=KKHKKKIKHKEKCK?K;K=KBKCK:K-K%K$K%K%K#KK!K%K&K,K,K+K*K)K)K,K1K2K3K5K4K3K0K.K+K/K1K-K+KKK$K$KKKKKKKK
+KKKKKKKK K!K!K#K&K)K+K+K+K,K0K2K2K3K2K8K5KaK�K�K�K�K�K{KyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}KzKzKzKzKzK}K}K~K}KzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K{K{K^KIKJKIKHKFKGKIKHKIKIKHKHKHKHKHKIKJKGKIKEK%KKK/KHe]q�(K�K�K�K�K�K�K�K�K�K�K�KwKbK`KfKiKhKfKfKdKdKeKeKdKeKeKdKdKeKeKeKdKdKeKhKgK_KTKKKGKFKHKFKJKhK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KtKbK[KZK_K�K�K�K�K�K�K�K�K�K�K�K}K�K�K�K�KFK9KfK�K�K�KxKwKuKsKsKtKrKoKkKkKnKjKdKaK^KRK>K.K-K-K.K/K,K$KKKKKKK!K!KKKKKKK!K KKKKK K KK K KK"K+K1K5K8K7K:KKKKK7e]q�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�KpKdKdKgKgKgKfKgKfKdKfKgKeKdKdKdKdKdKdKdKdKgKgKeKeKZKPKHKFKFKDKFKUKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K`KXK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K7KBKvKtKrKxKtKsKtKsKpKqKqKnKiKiKlKgK^KMK9K/K,K.K/K/K*K"KKKKKK"K#K!KKKKKK K"K!KKKKKKKKKKK"K(K2K6K5K5K4K8K:K9K;KAK@K8K-K,K-K+K,K-K*K'K&K%K&K%KKKKK
+KKKKK(K0K4K8K8K7K6K9K=K;K3K(K#K"K$K KKKKKKKKKKKKKKK&K*K*K)K)K*K+K+K)K'K)K'K&K&K&K%K&K(K%K$KKKKKKKKK&KDKCKCKFK9K7K\KTKLKJKUKRK@K>K?K>KK:KKKKKKKKKK$K KK K K)K8KCKNKPKPKLKHK>K@KEKCK>K.K'K!KK K!K!K"KK#K(K,K/K.K*K)K*K-K2K4K6K9K8K9K9K9K8K8K:K7K3K*K KKKKKK#K$K%KKKKKKK
+K
+KKKKKKKK!K K#K%K&K'K)K(K.K0K0K3K3K2K4K8K8K`K�K�K�K�K�K|KxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKxKyKzKzK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~KzK{K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~KqKOKIKGKIKHKHKHKHKHKHKHKHKIKIKHKIKHKFKGKHK9KKKe]q�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KkKcKfKiKhKgKdKdKeKeKeKdKeKeKeKdKdKeKeKeKeKdKfKgKaKWKLKEKGKGKDKFKbK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|K�K�K�K�K�KrK|KvKwKxKuKtKsKsKrKqKpKpKlKkKkK_KOK8K*K+K0K.K,K(K%KKKKKK"K%K$KKKKKK K KKKKKKKKKKKK$K*K1K2K2K4K6K5K4K9K;KK=K=KAK$KKKKKKKK KKK"K.K9K@KDKDKFKIKHKHKFKHKCK1K(K"KK K"K!K"K K"K&K,K/K/K-K-K+K-K2K6K8K:K6K3K7K;KK8K-K)K&K)K-K.K)K&K$KKK#K(K'K&K'K$K&KKKKK
KKKK K-K1K8K7K3K-K#KKKKKKKKKKKKKKKKKK'K/K/K+K'K(K)K)K)K(K(K(K)K(K&K#K#K$K&K'K KKKKKKKKKKK K$K(KK>K?K?K=KBK0KKKKKKKKK#K1K:K?K@K@K@K>K>K=KFKRKLKK?K;K6K5K6K8K;K:K7K7K2K(K KKKKKKKKKKKK$K%K&KKKKKKKK
+KKKKKKK!K"K$K$K'K*K-K.K.K0K2K3K2K3K5K6K;K8K`K�K�K�K�K�KKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKxKyKzKzKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K{KzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�KK}K~K~KaKIKIKIKIKIKHKHKHKHKHKIKHKIKHKHKIKHKFKGKGK0Ke]q�(K�KtKEKRKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwKgKdKgKhKgKeKdKeKeKfKfKfKfKfKfKfKfKeKeKgKgKhKhKcKWKKKEKEKEK@KDK[K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|K�K�KK�K�KvKyKwKwKvKsKpKnKkKiKlKmKcKLK4K-K/K1K/K,K(K KKKKK"K&K%K!KKKKKK KKKKKKKKKKKK!K'K-K0K.K3K4K4K3K2K4K8K;K3K,K&K$K&K)K,K,K&K KK!K'K,K.K(KK%K%K$K&K$KKKKK
KKKK1K2K3K'KKKKKKKKKKKK KKKKKK!K-K2K0K-K+K*K,K*K*K*K'K&K&K'K&K#K$K%K%K#K KKKKKKKKKKK%K*K'K'K&K5KEKEKFKGKBKMK[KUKJKEKTKRK?K?K=KK@K:KKKKKKK%K,KK?K7K(K#KKKKK!K#K$K'K)K#K$K*K)K+K2K;K=K>K9K;K9K;K=K>K;K8K3K-K%KKK
KKKKKKKKKKKK#K'K%K#KKKKKKKKKKKKKKK!K"K#K'K(K+K-K/K0K0K3K3K3K4K6K6K;KK>K?KKKKK#K0KBK@K;K9K8K8K9K9K6K8K3K+K"KK"K#K!KKK"K$K)K+K)K)K&K)K2K9K;K=K9KKK;K6K3K4K2K.K+K+K)K'K$K$K"KK K#K#K!K KKKKKKKKKK#K'K)K)K(K'K(K(K'K%K+KCKGKFKGKDKGKXKWKRKHKPKYKKKK?K>K>K@K-KKK"K(K2K?K;K3K1K2K6K8K8K.K%KKK!K!K!K!K#K%K(K)K(K)K)K)K,K3K;K?K>K:K3K+K:K?K;K=K9K4K(KKKKKKKKKKKKKKKKKKKKKK$K$K%K%KKKKKKKK
KKKKKK K!K$K'K(K)K-K0K0K/K1K3K5K3K5K8K8K;KK;K9K;K>KAKAKCKAK8K/K!KKKKKKKKKKKKKKKKKKKKKKKKK%K&K*KKKKKKKKKKKKKKK!K$K&K)K)K,K/K0K/K1K4K6K6K7K8K8K;K;K]K�K�K�K�K�K�KxK{KzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KK~K}K}K~K�KK~K~K~K~K~K�KjKKKGKIKGKGKIKHKHKHKHKHKHKIKHKGKGKFKGKFe]q�(KgKiK_K|K�K�KxKaKkKiKiKcK}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqKeKfKiKjKiKiKgKfKfKfKfKfKfKfKfKfKfKfKfKfKgKhKeK]KQKEKDKDKCKBKSKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KlK�K�KtK�K�KoKtKuKlKXK>K-K.K0K1K.K+K#KKKKK K#K$K!KKKKKKK KKKKK K"KKKKKK#K+K.K.K/K/K3K2K3K4K6K0K+K&K!K K#K$K$K%K"KKKK!K$K$K$K#K!K"K-K:K@KFKDKDKBKBKEK@K1K&K&K%K!KKKKKKKKKKKKKKKKKK-K;K@K@K=KKKKAKCKCK=K2K$KKKKKKKKKKKKKKKKKKKKKKKKKKKK*K+K,K%KKKKKKK
K
KKKKKK!K$K$K(K)K*K-K0K3K3K4K4K4K7K8K8K:K=KK=KK8K-K&K"K#K$K%K$K#K%K!K!KKKKKKKKKKK'K,K.K+K)K)K)K(K(K)K,K-K'K"KKKKK%KEKFKGKGKAK?KYKVKOKGKPKTKEK@KBKAK@KBKDK/KKK!K%K%K"KKKKKKKK!K%K(K(K&K(K)K,K1K8K?K=K>K=K@KBKBKCKCK>K:K*KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK$K,K*K*KKKKKKKK
KKKKKK!K#K$K'K(K*K,K/K1K3K5K4K5K8K8K8K9K=K;K\K�K�K�K�K�K�KxKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K|K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K~K~K}K}K}K~KKK~K}K}K}K~K~KKKKKKKK~K�KzKZKJKHKIKHKGKFKGKHKHKIKIKHKHKGKGKGKGe]q�(KbKiKfKiKcKeK�K�K�KcKgKgKhKdKdK�K�K�KMKHKdK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~KhKfKgKjKiKfKfKfKfKfKfKfKfKfKfKfKfKfKfKfKhKiKeK^KRKIKCKCKDKBKMKtK�K�K�K�K�K�K�K�K�K�K�K�K�K�KoKxKK~K|K�KuKQK7K1K0K/K/K.K%KKKKKKKKKKKKKKKKKKKKKKKKKKKK%K,K.K*K-K0K.K1K3K4K5K.K&K KK K"K$K$K$K"KKKK!K$K$K$K#K"K&K/K=KEK@K:K:K>K@KDKHKKKHKKK>K?KK?KCK7KKKKKKKKKKK!K#K$K&K&K(K'K(K/K7K;KBK@K>K@KCKEKHKIKEKAK/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K,K+K%KKKKKKKKKKKKK!K!K$K%K&K+K*K-K0K3K5K4K7K9K7K8K7K:K@K?K�K�K�K�K�K�KwK{K{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K~K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK�K}K~K�KK}K}K}K}K}K}K}K}K}K}KK�KKKKKKKK�KK~KuKQKHKIKFKFKFKFKFKHKHKFKFKFKHKIKIKHe]q�(K�KbKfKhKhKeK_K�K�K�KgKfKhKhKgK^K�K�K�KiKXKKKGKkK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwKgKfKhKjKiKiKhKgKgKgKgKfKfKfKfKfKfKgKgKgKhKhKdKYKMKGKDKDKCKBK[K�K�K�K�K�K�K�K�K�K�K�K�KiKyK~K|K_KMKKDKEK?K:K9K=KCKGKCK8K1K.K.K0K/K*K%K&K(K"KKKK
K	K
KKKK0K8KK?K>K>K=KAK;K)K"K"K#K#K#K$K"K!KKKKKKKKKKK$K+K,K-K,K.K-K,K+K+K-K.K)K%KKKKKKKKKKK;KHKFKFKIK,KJK\KSKIKJKYKQK@KBKAKAK?KCK?K%KKKKKKKKK K#K%K$K$K&K,K3K9KK;K@KCK?K4K+K-K0K0K1K.K&K!K%K&K&K$KKKKKKKKKK-K5K6K:KK�K�K�K�K�K�KxK{K{K{K{KzKxKxKxKxKxKxKxKxKxKxKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzKzK{K}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�K�K�K�K�KKKKKKKKKKKK�KfKIKFKHKIKGKFKFKFKGKGKIKHKHKHKIe]q�(K�K�K�KpKbKhKgKiK^KuK�K�K~K`KjKjKlK`KtK�K�K}KbKlKjKbKYK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KlKdKgKkKhKhKgKiKiKgKfKfKfKfKfKfKfKgKhKhKhKiKeK[KNKGKDKJK?KcK�K�K�K�K�K�K�K�K_KzKtKWKKKK"KKKKKK
KKKKKKKKKKKKKKKKKKKKKK K'K,K1K3K3K1K0K-K0K3K2K)K!KKKK K"K KKKKKK"K!K!K KK"K*K0K5K9K9K9K:K=K=K=K@K@K;K3K.K.K0K1K1K.K'K$K#K#K'K&K&K&K#KKKKKKKKK$K7K8K9KKAK?KDKFK?K/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K.K/K)KKKKKKKK
KKKKKK"K&K&K)K*K.K/K1K3K3K4K7K8K9K:K8KK[K�K�K�K�K�K�KvK{K{KyKxKxKyKzKyKxKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K|KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}KKK}K~KKKKKKKKKK�K�K�KKKKKKKKKKKKK}K^KFKGKGKGKFKGKGKFKGKGKGKGKGKGe]q�(K[KoK�K�K�K`KhKgKiKaKiK�K�K�KbKjKjKjKdKiK�K�K�KaKhKkKkKaK�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|KgKeKjKlKkKiKfKfKgKgKfKgKgKfKgKiKhKfKfKgKiKiKbKUKHK7K�K�K�K�K�K�K�K�K�K]K{KyK]K%KKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"K.K/K+K0K1K2K3K4K3K.K'K#KK K!KKK!K KKKKK KKK K K#K(K4K6K6K5K7K8K9K;K>K@K@K6K/K,K/K0K0K0K-K$KK!K'K.K.K-K(K%K%K&K!KKKKKKKKK7K?K8K-K!KKKKK KKKKKKKKKKKK K+K1K1K.K)K,K-K,K)K.K2K-K%KKKKKKKKKKKKKKKKKKK-KFKGKFKGKDK.KTKXKPKHKKK[KOKEKDKBKAKAKHK?K)KKKKKKK$K+K.K5K6K8K;K?K?KAKCK=K5K&KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK%K/K.K0K KKKKKKK
KKKKKK"K%K&K)K)K-K.K0K3K3K4K7K8K9K:K9K;K=K@K?K�K�K�K�K�K�KxK{K{KyKyKyKyKzKyKyKzKzKyKyK{KzKzKzKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzKzK{K|K~K}K}K}K}K~K}KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K~K~KKK~K~K�KKKKKKKKK�K�K�KKKKKKKKKKK�KK�KzKVKHKFKFKGKFKFKFKFKFKFKFKFKGe]q�(KiK`KeK�K�K�KbKfKgKhKeKbK�K�K�KeKeKhKhKgK`K�K�K�KdKeKjKlKcKqK�K�KrKOKyK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KtKeKhKiKiKiKjKiKhKjKjKgKfKfKfKfKiKiKiKiKkKlKgK\K�K�K�K�K�K�K�K�K�K\KzKwKYK'KKKKKKKKKKKKKKKK
KKKKKKKKKKKKK#K)K-K-K,K0K2K3K2K,K%K!K K"K$K"K!KKKKKKK K K KKK%K0K4K6K5K4K6K8K5K8KKCK@K4K&KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K/K1K)KKKKKKKK
KKKK K!K$K(K)K(K+K.K1K3K3K5K4K7K:K:K9K9K;K?KK;K8K5K5K2K0K,K-K/K)KKKKKKKKKKKKKKKKKKK)K1K2K0K/K.KK_KTKNKFKRKVKEKBKDKDKEKFKLK1K#KKKK!K%K(K.K5K5K;K?K6K)KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKK(K1K/K/K KKKKKKK
KKKKK!K#K'K)K)K+K,K0K3K3K4K5K7K9K:KKBK@K�K�K�K�K�K�KyK{KzKzKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K|K~K{KzK|K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKKK�KnKMKGKFKGKGKFKGKFKGKGKGKEe]q�(KhKgKgKgK\K�K�K�KlKcKhKgKiK]K{K�K�KtKcKhKgKiK^K|K�K�KvKbKkKkKhKbK�K�K�KkKaKSKDK[K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~KkKgKiKjKkKjKhKjKjKjKiKiKiKiKiKiKlKnK�K�K�K�K�K�K�K�K�KqKwKyKSK+K&KKKKKKKKKKKK	KKKKKKKKKKKKK K)K,K*K,K0K-K(K$K"K#K#K$K$K#K"K KKKKKKKKKKK#K-K5K7K5K5K5K4K4K8K:K6K/K)K*K*K)K+K,K)K'K%K%K+K1K1K2K/K-K/K3K=KIKOKOKNK;K$K$K&K#KKKKKKKKKKKKKKKK KK KKK)K6K@KAK@K?K;K;K9K6K4K5K-K"KKKKKKKKKKKKKKKKKK"K,K3K4K1K/K/K0K/K8KEKEKFKIK=K4K]KXKQKHKNKWKKKCKDKDKBKJKMKKCK@K>K?K=K=K>K;K:K5K)KKKKKKKKKKKKKKKKKKK(K0K2K4K5K3K2K1K/K0K3K8KEKHKFKGKFK/KXKZKQKIKJKVKNKEKFKDKZK�K\KBK+K&KKKK&K+K)K KKKKKKKKKKKKKKKKKKKKKK"K&K,K)KKKKKKK K K K K K K KK K"K K K!K K K K KKKKKKKKKKK,K1K1K1KKKKKKKKKKKKK"K%K(K*K+K+K.K3K3K4K6K9K:K:K:KK=K?K?K>K=KK>K?K@KK@KCKAK�K�K�K�K�K�KzK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~K�KK}KKKKKKKKKKKKKKKKKKKKKK�K�K�K�KKKKKKKKK�KK�KrKOKFKGKGKFKGKGKFKFe]q�(KtK�K�K�K`KhKgKjKgK_K�K�K�KhKgKjKgKhK`K�K�K�KpKfKiKgKjK_K�K�K�KpKdKkKlKeKbK�K�K�KdKiKkKlKcKzK�K�KiKLKdK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K}KkKhKlKmKnKnK�K�K�K�K�K�K�K�K�K�KpKuKpKKDKDKDKFKIKJKMKOKOKIK?K4K,K)K)K'K&K&K%K&K KKKKK	KKKK"KK9K4K-KKKKKKKKKKKKKKKKKK#K-K4K5K5K2K4K4K4K5K7K7K3K(KKKKKKK*KIKFKFKIK?K5K^KYKQKHKPKXKxK�K�K�K�K�KYKFK,K*K'KKKK
KKKKKKKKKKKKKKK!K#K)K,K)K*K5KIKaKpKwKxKvKAKK#K KKK!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K KKKKK*K4K2K5K%KKKKKKKKKKK K"K%K(K+K+K.K/K1K4K7K9K8K8K9K:K=K=K>K@KBKAK[K�K�K�K�K�K�KxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzK{K{K{K{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�K~K}K~K~K�KK}KK�KKKKKKKKKKKKKKKKKKKK�K�K�K�KKKKKKKKKKKK�KmKJKGKGKFKGKFKFKFe]q�(K`KfK�K�K�KbKhKgKgKgK]K�K�K�KpKdKlKjKkK]KuK�K�K|KcKiKiKlK_KwK�K�K}KcKlKkKjK_K�K�K�KlKfKkKlKcKlK�K�K�KZKRKKKfK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwKjKkKpK�K�K�K�K�K�K�K�K�K�KoKtKjK9K1K KKKKK!KKKKKKKKKK	K	KKKKKKKKKKKK!KKKKKK#K$K$K$K"KKK$K+K0K,K'K*K*K)K)K)K*K+K&K!K$K$K'K'K$K$K"K"K"K&K(K*K)K(K'K*K1KK?K>K;K/K$KKKKKKKKKKKKKKKKKK'K0K4K4K4K5K2K1K5K7K9K7K/K KKKKKKKKK)KEKGKFKGKEK/KUKZKSKKKLKWKyK�K�K�K�K�KoKIK6K*K,KKKKKKKKKKKKKKKKK$K(K,K)K&K.K?KUKkKsKuKtKrKrKtK[K#K!K KKK K!K K K K K K K!K!K!K K#K$K K!K$K"K K!K!K K K!K KKK#K2K3K3K.KKKKKKKKKKKK!K%K&K(K+K*K.K1K1K5K8K:K9K8K;K=K=K>K=K?KFKCK�K�K�K�K�K�KxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|KzK}K}K~K|KzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�K~K}KKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�KKKKKKKKKKKKK~KaKFKFKGKFKGKFKFe]q�(KkKeK`K�K�K�KeKeKiKhKkK_KvK�K�KKbKjKjKkKcKjK�K�K�KcKkKiKlKdKiK�K�K�KaKkKjKlK`K�K�K�KvKeKlKkKhKbK�K�K�KiKmK_KHKUK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwK�K�K�K�K�K�K�K�K�K{KoKtK_K4K2KKKKKK K KK KKKKKKKK	KKKKKKKKKKKKKKKK!K"K$K"K"K KK'K,K0K0K-K)K'K*K*K(K&K%K#K$K#K#K!K!K#K"K K"K$K'K'K)K)K&K'K+K-K7KAKBKBKBKBKDKFKFKCKK=K?KKHKGKGKIK1KIK_KWKPKJKVKeK�K�K�K�K�K�K�K�K(K.K'KKKKKKKKKKKKK"K'K+K)K%K)K5KNKeKsKvKtKsKqKsKsKsKrKnK0K!K$K#KK K"K"K"K"K"K"K"K!K K"K"K#K$K"K"K$K#K"K!K K"K"K!K KK!KK,K6K3K5K$KKKKKKKKKKKK%K'K(K+K,K/K0K1K4K6K9K:K9K;KK@K>K?KDKBKYK�K�K�K�K�K�KxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|K{K}K}K}K}K{K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~KK�K~K~KKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�KKKKKKKKKKK�KK�K{KYKEKFKGKFKFKFe]q�(KjKiKgK^K�K�K�KnKdKlKiKlKaKiK�K�K�KbKiKiKkKdKbK�K�K�KcKhKjKlKgKaK�K�K�KeKjKkKkKaKpK�K�K�KbKjKkKiK_K�K�K�KjKlKqKhK]KsK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KpKpKrKRK3K3KKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKK(K/K.K-K-K,K)K(K)K&K#K#K!K!K$K$K!KKKK!K"K&K&K&K&K#K$K(K/K4K6KGKOKCK>K@KCKCK@K;K1K.K/K.K1K/K)KK K K%K.K-K%K%K%K&K%K!KKKKK
KKK,K=KKHKIKHKJK7K>K`KXKRKIKVKZK�K�K�K�K�K�K�K�K:K)K,KKKKKKKKKK K$K(K*K&K&K.KEK`KpKvKsKpKsKsKtKtKsKsKsKsKwKGKK%K#KKK#K$K$K$K$K$K$K"K K#K$K$K$K$K$K$K$K$K#K#K$K#K K!K!KKK'K4K4K8K-KKKKKKKKKKKK"K'K)K(K,K-K-K1K4K4K9K:K:K:K=K=K?K?K?KAKCKCK�K�K�K�K�K�KzK|KzKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K~K~K~K~K}K}K~K|KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK�KKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�KKKKKKKKKKKK�KK�KwKSKFKGKGKGKFe]q�(KjKiKjKjK\K{K�K�K{KcKlKiKlKfK`K�K�K�KcKgKiKjKiK_K�K�K�KlKfKkKkKgK^K�K�K�KkKfKjKkKfKeK�K�K�KdKjKlKmKaK�K�K�KtKfKmKpKgKmK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KjKqKqKJK2K0KKKKKKKK K!K KKKKKKKKKKKKKKKKKKKKKKKKK!K!KK$K(K*K-K-K-K-K&K#K$K#K K"K KK K KKK K!K#K&K%K%K#K#K'K1K5K4K3K:KHKFK@K>K?K=K3K.K.K0K0K.K+K%KKK"K+K0K-K.K)K$K-K(K#K$K#KKKKKKKK$K*K#KKKKKKKKKKKKKKKKK(K1KKDKGKHKIKHK=K5K^KYKRKIKSKWK�K�K�K�K�K�K�K�KdK%K.K%KKKKKKK%K(K)K*K'K+K=KUKhKtKwKtKrKrKrKsKsKsKsKsKsKsKsKwKaK$K#K#K"KK#K$K$K$K$K$K$K#K"K#K$K$K$K"K"K$K$K$K$K%K$K#K#K"K!KKK#K1K5K6K4K"KKKKKKKKKKK K$K'K(K*K+K-K1K4K4K8K:K:K:KK?K?K?K>KAKEKXK�K�K�K�K�K�KyK}K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K}K~K|K{K{K|K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK~K}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�KKKKK�K�KKKKK�K�KKK�KrKLKGKFKFKGe]q�(KgKhKjKjKmK_KnK�K�K�KbKjKjKkKgK[K�K�K�KkKfKkKjKlK`K�K�K�KvKdKkKkKlK^K�K�K�KsKdKkKlKgK`K�K�K�KkKjKnKqKeKtK�K�K�KfKhKmKoK{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KiKpKnKFK6K0KKK
+K
+KKKKK K KKKKKKKKKKKKK
+K
+KKKKKKKKKK#K*K&KK$K#K'K*K,K%K!K K$K#KKK!KKKKKK K"K!K K!K!K%K*K/K3K4K5K5K7K=KAK@K=K7K-K)K-K1K4K5K0K'K#K!K%K-K0K3K.K'K+K5K@KNKEK$K#K$K KKKKKKKKKKKKKKKKKKK KKKKK-K>KKKDK;K8K;KKAKHKHKIKGKEK/KUK\KTKMKMKXKvK�K�K�K�K�K�K�K�K(K.K+KKKK#K&K*K+K(K(K8KLKaKrKxKrKqKsKsKsKsKtKsKsKsKsKsKsKsKsKsKtK4KK'K'K!K!K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K#K$K$K$K"K K!K!K K+K6K5K8K,KKKKKKKKKKK K#K&K)K,K,K/K1K5K4K7K;K7K6K;K?K?K?K?K@KDKFKBK�K�K�K�K�K�K}K~K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K~K|KzKzKzK}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K~K�K~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�KKKK�K�K�K�KKKKK�K�K�K�K�KhKKKIKFKGe]q�(K�KnKgKkKjKkKeKcK�K�K�KcKjKiKiKlK_K�K�K�KvKbKkKjKlKcKpK�K�K�KdKlKkKlKcKnK�K�K�KcKkKlKmKaK�K�K�KrKeKeKkKhKrK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KdKqKjKBK7K/KKKKKKKKKK#K K"K KKKKKKKK
K	K	KK
KKKKKKKKKK#K$KK K K"K&K&K!K"K$K$K#K K KKKKK"K"K"KKK K#K(K0K3K5K5K4K4K6K;K>K:K3K,K)K+K1K4K2K.K&K"K$K)K/K/K.K*K-K,K8KMKUKKKGKNK6K K$K%KKKKKKKKKKKKKKKK K"KKKK&K6KBKNKNKIKFKK>KFKIKHKHKJK5KLK^KWKOKJKZKbK�K�K�K�K�K�K�K�K5K0K/K'K%K*K+K)K'K.KCKZKnKvKsKrKrKpKqKtKsKsKsKsKsKsKsKsKsKsKsKtKrKvKMK!K'K&K"KK#K&K%K%K%K%K%K%K%K%K%K%K%K$K#K$K%K%K%K%K%K#K#K#K!K K K#K2K6K9K3K KKKKKKKKKKK#K&K)K+K,K/K.K4K5K7K8K8K9KK?K?K?KAKCKEKEKYK�K�K�K�K�K�KxK|K}K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K{K{K{KzKzKzKzK{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�KKKK�K�K�K�KKKK�K�K�K�KK�KK`KKKGKIe]q�(K�K�KzKcKkKjKkKhK^K�K�K�KiKeKkKhKlKaKmK�K�K�KaKkKiKkKdKeK�K�K�KcKjKkKmKeKcK�K�K�KgKhKgKiK^KxK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KmK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KVKeKmKfK>K7K*KKKKKKKKK"K K K!K"KKKKKKKKK	K
+K
+KKKKKKK K"K#K K!K%K KKK#K&K&K"K#K$K$K"K KKK K K!K"K KKKK&K+K/K4K6K4K4K5K6K7K4K*K#K!K#K)K0K0K(K"K K%K,K0K0K/K-K,K3K?KEKNKYKVKHKK=K/K KKKKKKKK!K$K"K!K"K&K.K7K=K>K>K;KK>K@KBKEKHKHKIKIKJKOK_K[KSKMKXKZK�K�K�K�K�K�K�K�KTK(K3K.K.K)K-K;KTKjKsKuKrKpKpKsKsKqKrKsKsKsKsKsKsKsKsKsKsKsKsKtKsKtKgK,K%K%K#KK#K&K&K&K&K&K&K&K&K&K&K&K'K&K$K%K&K&K&K&K%K$K$K$K"K!K!K K-K8K7K8K*KKKKKKKKKKK K&K(K+K,K/K.K3K4K6K8K9K:K=K=K>K?K?KAKAKAKGKDK�K�K�K�K�K�K}K|K~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K{K{K{K{K{K{K{K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~KK�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�KKKKK�K�K�KKKK�K�K�K�K�KKK}KZKHKGe]q�(KrK�K�K�KcKjKjKkKjKZK�K�K�KvKfKlKhKkK`KbK�K�K�KdKhKiKkKiK`K�K�K�KlKgKhKiKdK[K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KvKfKhKhKaKxK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K}KXKNKhKlKaK:K5K)KKKKKKKKKKKK K!K"KKKKKKKKK
+K
+K	KKKKKK#K%K%K&K%K$K#K!K"K(K'K$K$K#K"K KKK!K"K KKKKKK"K$K'K,K/K2K4K3K5K3K.K'K KK KKK K$K KK$K*K0K0K0K.K.K6KEKPKUKSKPKSKTKVKOKMKIK/K"K$K$K#KKKKKKK
KKKKKKK-K:KIKPKOKKKJKFKAKJKMKEK5K%KKKKKKKKK K K"K!K"K*K/K7KAK>K?KBK@K?K?K@K@K@K8K'K/KJKHKIKHKMKNK_K\KTKNKVKXK�K�K�K�K�K�K�K�K�K&K1K0K8KNKdKrKwKuKsKrKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsK:K"K'K)K$K#K%K%K%K%K%K'K%K&K(K&K%K&K&K%K&K&K%K%K%K%K&K%K#K$K#K$K#K)K8K8K8K2KKKKKKKKKKKK$K&K)K,K/K0K/K2K5K9K9K:K=K=KK?KAKAK@KEKFKWK�K�K�K�K�K�KzKK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�KKKKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKKKKKKKKKKKKKKKK�K�K�K~K�KxKPKFe]q�(KeKgK�K�K�KdKiKkKjKlK^KsK�K�K�KeKlKjKkKiK\K�K�K�KkKhKjKjKkK[K�K�K�KwKwK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K{KpKlKfK^K~K�K�K{KhKoKpKgKnK�K�K�K�K�K�K�K�K�K�K�K�K�K�KKpK[KOKPKeKiK\K8K5K(KKKKKKKKKKKKK K!K!KKKKKKKKK
+K	K
KKKKK%K)K(K%K"K#K%K(K)K(K#K!K!KKK!K%K#K!K KKKK!K'K(K(K'K'K+K0K3K2K/K*K%K!KK K K"KKKK K&K,K+K-K-K-K3KK3K/K+K(K$K"K%KKKKKK
+K
KKKKK-K@KMKQKQKLKIKIKLKIKHK>K1K KKKKKKKKK!K!K"K#K'K-K6K;K>K?K@KAKAKAK=K?K@K;K.KKKK%KGKIKIKHKIKLKYK]KUKOKSKWKmK�K�K�K�K�K�K�K�K4KIK_KnKwKuKsKsKsKsKsKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKrKwKTK"K'K+K(K!K'K'K'K'K%K'K'K%K"K&K'K&K&K'K&K&K&K'K'K&K&K%K$K$K%K#K"K!K1K9K7K8K(KKKKKKKKKKK$K&K(K+K-K/K/K2K5K8K9K:K=K=KK?KAKBKAKEKJKFK�K�K�K�K�K�K�K}K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K}K}K}K~KKKKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKKKKKKKKKKKKK�K�K�K�K�K�KK�K�KpKMe]q�(KkKgK`K�K�K�KkKfKkKiKmKeKiK�K�K�KcKiKkKkKkKZK�K�K�KtKfKoKxK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKuKlK^K�K�K�KlKjKnKoKdKnK�K�K�KfKoKqKgK�K�K�K�K�K�K�K�K�K�K�K�K�K�KjK^KWKOKOKcKhKXK6K6K&KKKKKKKKKKKKKK"K!K KKKKKKKKK	K
KKKKK$K'K#K#K#K'K&K&K%KKKK!K!K!K$K$K!KKKK#K)K+K)K)K(K)K)K'K*K+K(K#K K K!K!KKKKK#K&K*K,K-K+K-K1K=KGKMKNKOKKKNKSKUKSKIK:K0K/K,K-K*K%K#K$K#K"KKKKKK
+K
KK"KCKGKMKLKKKJKLKMKIKBK4K(KKKKKKKKKKK K"K$K*K1K8KK?KAKBKAKBKGKIKWK�K�K�K�K�K�K{K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K�KK}K}K}K}K}K}K}K}K~K�KKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKKKKKKKKKKKKK�K�K�K�KKKKKK�Kge]q�(KkKkKiK_K�K�K�KtKeKlKjKnKiK`K�K�K�KiKdKeKiKrKkK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|KnKlK�K�K�KaKlKmKmK`K�K�K�KtKfKlKnKhKeK�K�K�KgKqKkK�K�K�K�K�K�K�K�K�K�K�K�K~KaK^KZKSKLKQKfKgKTK6K5K!KKKKKKKKKKKKKKK!KK"KKKKKKKK	K
+KKKKKKK!K%K&K'K%KKKK!K%K$K"K!K!KKKK#K&K+K)K(K*K*K*K(K&K$K#K"K#K!KKKKKK!K#K%K)K(K&K)K/K4K;KCKEKGKLKLKMKLKQKQKCK5K.K-K-K1K.K'K!KKK$K"K#K"KKKKKKKK KCKGKLKJKIKLKHK=K/K!KKKKKKKK K!K"K K K$K+K5K9K;K>KK=KDKHKIKIKFK>K3K.K.K1K2K.K'K!KKK K'K&K"K#K$KKKKKKKKK6KIKKKHKDK7K(KKKKKKKKK!K!K K#K$K)K/K5K;K=KKAK>KK?KAKBKBKDKEKGKGKYK�K�K�K�K�K�KyKK~K|KzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K}K}K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�KKKKKKKKKKK�KKKKKKKKKK�K�K�KKKKKKKK�K�K�K�K�K�K�KKKKKKKK�K�K�KKK�K�K�K�K�KKK�K�K�KK�K�K�K�K�K�K�K�K�e]q�(KfKlKlKjKjK_KhK�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KdKfKhKiKhKaK�K�K�KoKjKmKmKkK\K�K�K�KkKiKmKmKeKfK�K�K�KeKlKmKmKbK�K�K�KnK�K�K�K�K�K�K�K�K�KZKUKVKPKBKWKTKQKGKTKdKdKIK6K7KKKKKKKKKKKKKKKK+K'KK"K KKKKKKKKKKKKKKKKKKK$K&K'K%K!KKKK"KKK K!K'K(K(K$K$K!K K!K KKKKKKK$K%K'K'KK K'K(K-K8K;K;K9KK?K=K>K>KAKBK?K8K(KKKKKKKKKKKKKKK)KHKIKHKFKJKBKTK\KWKPKTK[KhK�K�K�K�K�K�K�K�KuKvKuKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKtKtKsKsKsKsKuKlK2K(K*K+K&K'K+K+K)K)K)K*K+K+K*K*K*K*K*K*K)K)K)K)K)K)K)K)K'K%K&K&K#K*K9K9K9K8K#KKKKKKKKKKK$K(K+K-K1K3K3K3K7K8K;K:K;K?K?K@KCKBKBKDKDKHKGK�K�K�K�K�K�K�K{K~K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}KKKKKK~K~KKKKKKKKKKKKKKKKK�K�K�KKKKKKKK�K�K�K�K�K�K�KKKKKKKK�K�KK�K�K�K�K�K�K�KKK�K�K�K�K�K�K�K�K�K�K�K�Ke]q�(K�KlKoKxK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsKoKkKgKaK]K�K�K�KlKiKlKlKiK_K�K�K�KvKgKmKjKkK`K�K�K�KrKfKlKkKiKaK�K�K�KgKiKmKnKdKwK�K�K�K�K�K�K�K�K�K�KKYKQKQKHK@KXKSKPKFKUKcKbKGK9K2KKKKKKKKKKKKKK#K/K;K=K&KK K KKKKKKKKKKKKK
KKKKKK%K#K!K!K$K'K'K&K"KKK"K#K$K!KKKKK K#KKKKK K$K%K&K&K&K"K#K)K1K6K8K9K:K:KK6K1K-K/K0K0K-K)K%K KK$K)K.K.K.K*K)K/K:K(K!K#K!KKKKKKKK#KKKKKKKKKKKK K"K&K+K7K=KAK@K@K?KK@KCKAKBKDKDKHKLKYK�K�K�K�K�K�KyKK|KzKzKzK{K{K{K{KzKzKzKzKzK{K{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K~KKKKKK~K~KKKKKKKKKKKKKKKKK�K�K�KKKKKKKK�K�K�K�K�K�K�KKKKKKKK�K�K�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwKlKdKfK�K�K�KeKjKnKmKlK\K�K�K�KyKfKkKjKlK_KsK�K�K�KdKmKlKnKbKwK�K�K�KcKmKlKjKbK�K�K�KpKhKmKmKgKlK�K�K�K�K�K�K�K�K�KxKVKPKPKEKBKYKRKNKGKXKaK^KCK7K0KKKKK
KKKKKKKKK)K5K?KIKAK&K!K!K KKKKKKKKK
K
+KKKKKKK K#K,K0K.K*K)K)K&K&K&K$K!KKKKKK K KKKKK K#K$K$K#K"K"K$K%K&K'K.K5K9K:KK!K$K$K KKKKKKKKKKKKKKKKK K$K+K1K6K:KDKEKDKBK>K>K?K9K2K#KKKKKKKKKKKKKKKKKKKK#K=KJKHKHKKK:K:K`KYKRKHKQKWK�K�K�K�K�K�K�K�K�KnKuKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKvKtKrKwK[K,K-K*K+K&K*K+K+K+K+K+K*K+K&K(K)K+K+K+K*K*K+K+K+K*K(K)K)K)K(K%K%K#K*K8K:K:K6K KKKKKKKKKK K$K'K*K.K0K2K3K3K6K:K;K=K>K?KAKAK@KDKDKDKGKLKFK�K�K�K�K�K�K�K}K}KzKzKzK}K}K~K|KzKzKzKzK{K}K|KzKzKzK}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKKKKKK�K�K�K�K�K�K�KKK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�K�K�K�e]q�(K�K�K�K�K�K�K�K�K�KlK|K�K�KxKbKjKjKlKhK^K�K�K�KiKiKmKlKkK]KpK�K�K�KcKlKkKkKeKiK�K�K�KdKkKlKnKfKhK�K�K�KcKkKkKmKaKzK�K�KzKfKmKoKhKvK�K�K�K�K�K�K�K�KqKSKQKPKKKKKVKRKMKKK]K`K\K=K7K0KKKKKKKKKKKKK'K1K;KGKKKJK.K K!K!KKKKKKKKKKKKKKKKKK2KAK:K0K)K(K*K%K$K%K"KKKKKKKKKKK!K"K K"K#K"K#K%K%K$K%K&K'K+K0K6K8K3K,K)K,K-K.K/K1K)K KK$K*K.K0K-K-K-K3K=KGKPKPKMKMKJK,K"K#K#KKKKK
KKKKKKKKKK"K'K,K4K:K?KCKEKDKDKDKCK?K4K'KKKKKKKKKKKKKKKKKKK K!K$K'K'K3KJKHKHKJKDK0KZKZKSKMKKKZKxK�K�K�K�K�K�KK�KsKvKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKsKsKuKuKuKtKsKtKoK9K)K,K+K(K(K+K,K+K*K+K,K+K(K)K*K+K*K)K+K,K+K+K+K*K)K)K)K)K(K'K$K!K$K5K:K:KK4K*K&K!K#K#KKKKKKKKKK!K,K4K7K=KEKCKDKFKGKGKBK7K'KKKKKKKKKKKKKKKKKKK K"K#K&K'K&K*K:KRKhKqKQKGKIKHKKK3KDKbKZKSKJKVK\K�K�K�K�K�K�K�K�K�KqKwKuKuKuKtKsKsKsKsKsKsKsKsKtKuKtKsKsKsKtKuKuKuKuKuKtKuKuKuKuKuKtKwKbK,K-K.K*K%K*K.K.K.K0K,K+K-K,K,K,K-K.K+K*K+K+K+K+K+K*K*K*K*K)K(K)K'K'K8K:K;K=K'KKKKKKKKKK K#K%K)K.K/K1K2K5K9K;K;KK2K*K'K%KKK%K!K!KKKKKKKKK(K1K=KCKEKCKAKBKCKK:KaK[KSKJKRKWK�K�K�K�K�K�K�K�K�KpKwKvKuKvKuKsKsKsKsKsKsKsKsKtKvKtKsKsKtKuKvKuKuKuKuKvKuKuKvKuKuKuKuKrK:K+K-K+K'K(K.K-K-K/K-K-K.K.K.K.K.K.K+K*K+K+K+K+K+K+K+K+K*K)K)K)K'K$K2KK>KBKBKDKCKDKDKBKKKHK�K�K�K�K�K�K�K|K}K~K}K}KzKzKzKzKzK}K}KzK{K~K~K~K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�K~K}K}K}K}K~K�KKKKKKK�KKKKKKKKKKKK�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKKKKK�K�K�KK�K�K�K�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�KK�K�e]q�(K�K�K�K�KpK�K�K�KwKeKmKkKmKmK_K�K�K�KuKeKkKkKnKgKaK�K�K�KhKjKmKkKkK_K{K�K�KzKeKmKjKlKcKlK�K�K�KbKkKjKlKbKkK�K�K�KbKkKjKmK]K�K�K�K�K�KnKkKmKkKoKuKxK^KKKNKJKIKPKRKNKKKQK[K^KQK9K8K1KKKKKK
+K
KKKKKKKKKKKKKKKK!K!K"K"KKKKKKKKK
KKKKKKKK K$K!K#K#K!KK!K%K%K$K#K!KKKKK!K!K K#K&K%K'K'K$K$K$K$K%K$K%K%K$K KK K%K,K.K.K.K,K.K4KAKKKOKIKIKHKEKMKHKCK:K3K,K'K,K'K KKK K#K$K#KKKKKKKK'K1K=KDKEKBK;K1K#KKKKKKKKKKKKKKKKKKKK!K%K)K'K'K-K=KWKhKsKtKqKpKpKrK`KFKIKHKJKCK1KYK]KWKMKMKYKnK�K�K�K�K�K�K�K�KvKvKvKuKtKtKsKsKsKsKsKsKsKsKsKtKsKsKsKsKtKtKtKtKtKtKtKuKuKsKtKuKuKuKwKQK*K/K0K.K'K,K/K/K/K-K-K.K.K.K.K.K.K-K-K-K-K-K,K*K+K+K+K*K(K)K)K)K'K+K;K=K=K:K#KKKKKKKKKKK#K&K(K-K1K1K4K4K7K8K9K>K>K?KBKBKBKCKDKDKGKJKKKYK�K�K�K�K�K�K{KK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK}K}K}K~KKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�KsK|K�K�K�KfKmKlKlKnK`KxK�K�K�KdKlKkKjKjK_K�K�K�KnKhKlKiKnKaKkK�K�K�KeKkKkKmKdK_K�K�K�KeKiKjKmKeK^K�K�K�KiKiKjKmK`KhK�KeK\KgKrKqKkKoKrKwKYKKKNKIK=KPKQKNKDKJK]K]KKK8K8K0KKKKKK
KKKKKKKKKKKKKKKKKKK K!K KKKKKKKKKKKKKKKK#K$KKKK#K$K KK#K"KKK"K$K#K"K!K!K!K#K%K%K%K$K#K#K#K%K'K$K!KKK!K)K,K+K,K-K-K-K.K7KCKFKEKCKEKGKJKDK;K7K/K,K,K.K)K$KKK!KKK#K#K KKKKKKKK K6K@KAK7K&KKKKKKKKKKKKKKKKKKKKK%K&K(K&K(K3KMKdKrKsKqKnKpKpKpKpKqKkKKKHKIKIKJK/KOK`KXKPKKKZK_K�K�K�K�K�K�K�K�K�KrKvKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKtKsKtKuKuKsKtKuKuKuKuKgK2K.K0K/K)K*K/K0K/K.K.K.K.K-K-K.K.K.K.K.K.K.K,K+K+K+K+K+K)K)K)K)K*K(K5K>K=K=K.KKKKKKKKKKK"K&K)K-K0K1K3K4K7K7K9K=K?K?KAKAKAKCKCKDKGKGKLKIK�K�K�K�K�K�K�K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK~K~K}K~KKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�K�KtKlK�K�K�KfKlKlKiKlKcKjK�K�K�KdKjKlKkKkK\K�K�K�KyKeKmKiKmKdKcK�K�K�KfKjKlKmKhK\K�K�K�KjKgKjKlKgKZK�K�KvKgKjKiKnKcKIKaKbKgKpKoKjKnKsKvKSKJKNKGK;KSKQKOKDKKK\K]KJK8K:K/KKKKKKKKKKKKKKKKKKKKKKKK*K*K K!K!KKKKKKKKKKKKKKKKKKK$K&K%KKK#K&K$K(K(K%K&K%K"K!K!K"K!K K#K'K&K'K&K#KKK!K"K(K*K*K)K)K)K(K-K2K7K9KKAKDKDKDKDKHKKKLK\K�K�K�K�K�K�KyK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKK�K�K�K�K�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�K�K�KxKaK�K�K�KhKjKkKjKnKfK`K�K�K�KhKkKlKjKlK`KqK�K�K�KeKlKiKmKjK[K�K�K�KlKgKlKkKlK[K�K�K�KmKhKkKlKlKYKOKbKjKiKkKkKmK\KcK`KiKqKpKlKoKtKrKPKJKNKFK@KUKPKOKDKMK\K^KGK8K9K.K"KKKKKKKKKKKKKKKKKKKK K2KCKNKDK(KK#K!KKKKKKKKKKKKK
K
KKKK"K"K!K$K-K8K6K-K+K+K'K&K%K!KK KK K!K"K&K&K"KKK$K*K,K*K)K(K&K%K&K'K-K0K3K5K6K9KK=K=K-KKKKKKKKKKK K%K)K-K0K3K5K5K6K9K:K>K>K?K?KAKCKCKCKDKGKHKLKKK�K�K�K�K�K�K�K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KKKKKKKKKKKKKKK�K�K�K�K�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKKKK�K�KKKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�K�K�K�KyK]K�K�K�KqKgKlKkKmKhK^K�K�K�KqKgKmKlKnKeKeK�K�K�KeKlKlKkKjKZK�K�K�KyKhKiKkKlK[KfK�KoKiKlKiKkKkK_KLK[KiKjKjKkK[KeK_KdKhKiKgKkKtKoKNKKKNKDKCKVKPKNKBKRK]K]KEK8K6K,K&KKKKKKKKKKKKKKKKKK"K9KMKSKRKNKLKCK&KKKKKKKKKKKKK
+KKKKKKKK#K-K:KAK@K8K0K-K,K'K"K K K K K!K!K!KKKKK&K*K,K,K,K+K&K!K"K#K%K*K-K/K2K5K5K5K3K1K.K/K2K3K*K$K$K!K K"K#K"K%K,K3K;KAKCKEK:K&K"K$K"KKKK
KKK;KKKKKKKKKKKKKKKK#K(K'K'K'K0KEK[KmKtKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKaKFKLKLKJKEK0KXK\KUKMKMK[KjK�K�K�K�K�K�K�K�KKtKvKuKuKvKuKsKsKsKuKuKsKsKsKtKvKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKwKkK6K1K4K1K*K,K0K/K0K0K0K0K0K0K0K0K0K0K0K/K-K.K-K.K.K-K-K.K,K+K)K(K)K'K/K>K?K?K8K!KKKKKKKKKKK#K(K,K.K1K6K3K4K9K:KK@KBKAKBKCKDKFKGKLKMK]K�K�K�K�K�K�KzKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�KKKKKKKKKKKKKKKKK�K�K�K�K�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKKKK�K�KKKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�K�K�K�K�KoK^K�K�K�KKhKmKkKmKmK_K�K�K�K~KfKlKjKkKhK]K�K�K�KhKgKkKiKnK]KnK�K�KhKiKjKiKlKaKEKSKdKhKiKiKiKkKdKPKYKhKjKiKZKbKjK�K�K�K�K�K�KjKKKKKLK?KGKVKPKLKCKVK\K\KCK9K6K)K)KKKKKKKKKKKKKKKK&K:KOKVKTKUKSKSKPKGK/K!K K"KKKKKKKKKK
+KKKKKKK$K/K5K;K=KAK=K5K.K&K"K K K"K"K$K!KKKK K(K*K)K,K,K(K#K$K"K"K%K%K&K(K*K+K/K1K.K0K2K4K5K/K)K%K KK K!KK"K*K/K5KKAKAKBKBKEKEKCKFKJKNKJK�K�K�K�K�K�K�K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~KKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKK�KKKKK�K�K�KKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�K�K�K�K�KiKjKbKuK�K�K�KcKmKlKjKlKbKoK�K�K�KeKlKiKkKmK[K�K�K�KvKgKjKjKlKaKSK_KeKgKjKjKjKjKiKPKNKbKhKiKjKiKlKgKQKSKgKdKfKK�K�K�K�K�K�K�K_KLKLKMK>KJKVKQKLKDKXK\KWK>K9K6K'K)KKKKKKKKKKKKKKK-KEKMKSKVKYKYKXKHK3K"K K KK!K KKKKKKKK
K
+K
+KKKKKK'K+K0K8K?K=K5K*K!KK K!K!K!K!KKKK K&K%K(K(K(K$K#K KK#K$K$K%K&K%K%K)K-K,K.K1K3K4K.K'K"K K K K K"K%K*K1K7K;KK@K5K"KKKKKKKKKK!K%K)K+K/K1K4K5K7K6K9K9K>K@KK:K5K)K'KKKKKKKKKKKKKKK7KLKQKVK^K]KHK3KKKKKKK K K!KKKKKKKKK
+KKKKKK$K+K2K6K3K'KK"K!K K"K KKKKK#K'K'K&K$K%K%K KKKK"K$K$K#K$K'K(K(K,K/K.K/K0K,K&KKKK!K K$K(K.K0K3K7K8K=K@K@K?K>KK8KaK\KUKKKQKYKxK�K�K�K�K�K�K�K�KxKwKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKmK9K1K2K3K0K,K2K1K2K2K0K/K0K/K2K2K1K0K0K0K0K0K/K/K/K.K-K/K.K.K,K*K+K)K/K>K?K?K=K'K KKKKKKKKKK!K&K+K,K/K2K3K4K4K5K9K=KK;K4K(K&KKKKKKKKKKKKKK$KCKSK\KTKEK.KKKKKKKKKK!K KKKKKKKKKKKKKKK$K)K)K#KK K K$K$K!KKKK K!K$K(K'K%K"K"K"KKKKKK"K#K$K$K&K*K-K-K/K.K-K'K KKKK K"K&K,K1K5K5K3K6K8K;K?K@K;K3K$KKKKKKKK#K$K#KKKKKKKKKKK$K'K(K(K*K6KMKdKqKsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKpKpKqKqKpKsKaKGKIKHKJKGK1KXK_KXKPKLKYKdK�K�K�K�K�K�K�K�K�KuKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKyKLK.K3K3K0K+K3K3K3K3K0K0K0K0K-K.K/K0K0K0K0K0K0K0K0K/K-K/K.K/K,K+K+K+K*K:K@K>KAK0KKKKKKKKKKK K%K*K+K/K1K2K1K7K:K=K=K=K@KBKBKAKAKFKHKFKGKLKWKcK�K�K�K�K�K�KzKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K}K~K}K}K}K~K�K~K}KK�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�KK�K�K�K�K�K�K�KKK�K�K�K�K�K�KK�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KeKMK3K;KPKRKQKQKPKPKPKSK�K�K�K�K�K�K�K|KjKlKkKmKlKZKvK�KgKhKjKiKiKjKeKOKOK_KhKhKiKiKjKlKYKEKVKeKiKjKiKiKlKaKLK�K�K}KgKlKlKoKbK�K�K�KK�K�K�K�K�KWKLKNKKK:KQKSKOKEKFK[K\KSK;K;K4K&K'K KKKKK	KKKKKKKK3KRKQK>K(KKKKKKKKKKKKKKKKKKKKKKK
+KKKKKK!K KK K K"K!KKKK%K)K)K&K%K&K$K%K%K$K#KKKKKK"K$K%K)K*K,K*K'K'K#KKK KK!K$K)K+K1K4K3K5K5K5K6K7K9K7K*KKKKKKKKKKK K$K#K!KKKKKKK"K'K(K'K)K0KDK[KlKtKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKqKpKsKsKsKtKjKJKHKIKHKIK1KNK`KYKSKHKWKZK�K�K�K�K�K�K�K�K�KrKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKtKzK`K/K2K3K2K,K/K3K3K3K3K3K3K1K-K2K2K0K/K/K0K0K0K/K/K/K/K/K0K.K-K+K)K&K(K1K?K?K@K9K#KKKKKKKKKKK$K(K+K,K+K.K3K:KAK=K8K=K?K?KBKBKEKGKFKFKGKLKUKRK�K�K�K�K�K�K�KKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K}K}K~K�K~K}K}K~KKKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKKKKKKKKK�K�K�K�KKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K/KMKFK0K9KOKQKPKQKQKQKRKQK�K�K�K�K�K�K�KsKkKlKlKjKlKZKDKUKeKhKjKiKhKkKhKPKKK\KeKiKjKiKiKkK^KEKPKcKiKjKjKiKlKhKXKhKqKcKeKmKmKmK�K�K�K�K�K�K�K�KzKSKMKNKIK;KSKSKOKDKGK\K]KRKKBKAKBKEKGKGKFKGKNKRKTKfK�K�K�K�K�K�K|K�K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK}K}K}K~KK~K~K~K~KKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKK�K�KK�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KK4KEK;K1K5KMKQKQKTKRKTKUKVKwK{K�K�KxKlK^KcKiKjKjKiKlKbKLKQKbKiKiKkKkKiKjKVKIKWKbKhKjKiKiKlKcKHKJKbKiKdKkKlKlKkKTKTK�K�KkKmKcKhKuK�K�KwKtKsKxKvKPKMKOKGK;KTKRKPKCKKK]K_KOKKVKhKrKtKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKqKpKpKqKtKrKqKrKrKsKsKtKaKFKIKHKIKEK/KYK^KYKQKKKWK`K�K�K�K�K�K�K�K�K�KtKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKdK5K5K4K0K-K1K3K2K3K3K3K3K4K3K3K3K3K3K3K4K2K1K3K2K1K0K0K/K/K.K-K*K,K*K.K=K@K?K@K+KKKKKKKKKKKK(K8K@K@K-KKK,K9KK?KAKBKEKGKFKFKFKNKQKRKWKcK�K�K�K�K�K�KzKKKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KjKVKEK:KHKFK/K:KXKbKoK�K�K�K�K�K�KtKdKfKeKdK`K\K]K`KbKcKhKiKPKKK_KfKgKkKkKiKlKdKOKNK]KfKjKkKkKmKkKTKEK�K�KqKgKmKlKoK^KwK�K�K�K�K�K�K�K�K�K�K�KdKLKMKPKBKDKVKPKMKBKQK\K^KGK:K@KBK'K&K&KKKKKKKKKKKKKKKKKKKKKKKK#K1K@KMKSKMKGKGK>K'KKKKKKKKKK
KK	K
+KKKKKK!K$K(K/K0K.K.K+K*K(K)K)K&K%K&K&K*K'K%K KKK K!K#K%K%K)K+K+K+K)K+K-K.K/K/K+K#KKKKKKKKKKKKKKKKKKKKK"K'K(K*K*K4KIKbKqKsKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKpKqKtKqKpKpKqKsKsKuKiKJKIKIKHKIK:KRK`KYKQKIKTKWK�K�K�K�K�K�K�K�K�KrKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsK@K3K5K7K3K.K3K2K3K3K2K4K5K3K3K3K3K3K2K3K0K/K3K2K/K/K0K0K0K.K-K+K+K+K+K8K?K>KBK8K KKKKKKKKKK&K;KAK;K+KKK&K4K9KK2K"KKKK$K-K6K6K9KKNK0K'K*K KKKKKKKKKKKKKKKKKK*K6KK%KKKKKK1K?K=K-KKKKKK&K2K7K8K8K:K=KAK@KCKCKFKGKGKGKGKKKPKTKTKbK�K�K�K�K�K�KzK�KKKKK�K~K}KKK}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KK�K�K�K�K�KKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K{K�K�KSK_K@KKuK�K�KLK�K�K�K�K�K�K�K�K�K�KzK|K�K�K�K�K�K�K�K�K_K�K�K�K�K�K�K�K�KsKiKkKlK`KEKOKbKiKlKlKlKnKkKTKPKdKeKbKkKqKgK�K�KsK�K�K�K�K�K|KYKKKMKJKBKQKUKPKJKOKZK[KXKAKK:K'KKKKKK"K*K3K6K4K5K:K;K=K?KCKCKEKGKGKGKGKIKLKRKTKQK�K�K�K�K�K�K�K�K�KKKKK~K}KKK~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�KK7KOK@KKzK�K�KSK�K�K�K�K�K�K�K�K�K�K~KcK�K�K�K�K�KfKsK�K�K�K�K�K�K�K�K�KdKaKeKhKlKdKGKIK_KhKfKjKlKlKnK`KSK�K�KzKnKjKkKwK�K�K�K|K�KvKxKUKJKLKGKEKSKSKLKLKTK\K\KUK?K=KDKPK;K#K&K'KKKKKKKKKKKKKKKKK!K+K5KKAKBKDKEKFKGKHKIKHKKKSKVKUK�K�K�K�K�K�K�K~K�K�KKK~K}K~KK~KKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K;KKKKK�K�K&KvK�K�KSK�K�K�K�K�K�K�K�K�K�K�KbKdKbKcKfKyK�K�K�K�K�KjKeK�K�K�KSK\KZKYKWK[K_KLKNK�K�KyKjKqKpKqKdKbK�K�K�K�K�K�K�K�K�K�K�KlKQKNKLKKKLKWKRKMKKKUKZK^KPK;K=KHKOKDK&K%K'KKKKKK	KKKKKKKKKKKKKKKKKKKKKKKKKK K!K"K#K)K-K%KKKKKKKKKKKKKKKKKK K&K$K!K#K&K'K+K,K.K/K.K,K)K$KKKKKKKKKKKKKKKKKKKKKK"K$K(K)K*K4KJKaKpKrKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKpKsKtKsKtKtKsKtKqKpKsKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvKaKHKIKHKHKJKEKXK_KWKQKIKYK[K�K�K�K�K�K�K�K�K�KrKxKyKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKxKyKyKxKyKwKuKxKxKzK^K6K9K8K7K/K2K8K8K8K8K5K4K5K5K5K5K5K5K4K4K5K6K4K2K3K3K3K2K0K/K-K.K.K+K*K:KCKCKDK8K"K#KKKKKKKKKKK"K*K1K2K4K6K;KKDKGKFKGKIKHKIKJKMKQKcK�K�K�K�K�K�K~K�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�KWK>KAK9K)KKK1KaKiKpKqKgK�KkK2KJK�K�K|KK�K�K�K�K�K�K�KuKpKmKiKjKdK[KQKGKJKUKIKHKPKRKUKVKWKWKYKYK�K�K�K�K�K�K�K�KpKrKoK�K�K�K�K�K�K�K�K�KYKJKLKNK:KDKWKOKMK>KRK\K]KIK=K?KLKNKJK3K"K&K#KKKKKK
+KKKKKKKKKKKKKKKKKKKKKK&K(K*K)K)K)K+K(K'K$K K!KKKKKKKKKKKKKKKKKK"K$K&K%K!KKKKKKKKKKKKKKKKKKKKKK"K$K'K(K'K/K@KZKmKtKtKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKRKFKIKHKJK=K3K^K^KYKOKPK[KdK�K�K�K�K�K�K�K�K�KtKzKxKxKvKuKuKuKuKuKuKuKvKxKxKuKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{K`K8K9K:K:K2K3K8K8K7K8K8K8K7K5K6K6K7K8K5K5K4K5K5K5K5K3K2K0K0K0K-K+K%K$K2K>KCKCKEK?K$K K KKKKK
KKKK#K'K*K0K4K8K:K:K:K:K>K@KDKCKEKGKEKHKIKHKHKMKQKSKQK�K�K�K�K�K�K�K}KK�K�K�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�KNKK�K|K=KeK�K�K�K�K�K�K�K}K�K�KjKfK�K{KlKkKjKdK\KNK>K;K8K8K>KCKMKSKWKYK\K�K�K�K�K�K�K�K�K�K}KgK�K�KiKxK�K�K�K�K�KXKIKLKKK8KKKTKNKKKAKWK[K\KGK=K?KLKMKJK9K$K&K%KKKKK
KKKKKKKKKKKKKKKKKKKKK!K$K(K)K&K'K'K(K&K$K$K#K"K$K"KKKKKKKKKKKKKKKKK%K%KKKKKKKKKKKKKKKKKKKKKKK!K)K)K%K+K:KPKfKqKsKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKqKsKqKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvK]KGKJKHKIKHK.KVK_KYKPKMK[KYK�K�K�K�K�K�K�K�K�KuKyKyKxKvKuKvKvKuKvKvKuKvKxKwKuKvKxKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKpK@K8K9K;K5K/K7K8K9K8K8K8K8K9K9K9K8K7K5K5K6K5K5K4K4K3K2K0K0K/K.K*K(K5K?K>K?KCKCKDK/KK KKKKKK
+KKK"K(K+K/K3K7K9K:K9K:K>K@KBKDKFKGKEKGKIKIKHKIKQKSKVKcK�K�K�K�K�K�K~K�K�K�K�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�KNKDKGKGK=KKHKOKcK�K�K�K�K�K�K�K�K�K�K�K�KK�K�K�K�K�KzKuKYKKKLKJK7KOKSKOKJKBKXK[K[KFK=K@KLKLKJK>K$K#K#KKKKKKKKKKKKKKKKKKKKKKKKK K!K!K%K&K'K$K!K"K!K&K%K#K"KKKKKKKKKKKKKKKKK K$K$KKKKKKKKKKKKKKKKKKKKK!K&K(K(K&K/KCK\KnKrKqKpKpKpKqKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKtKsKpKpKpKpKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvKeKJKIKHKHKKK0KHKaK[KRKNKYKXK�K�K�K�K�K�K�K�K�K�KwKyKxKvKuKyKxKuKwKyKvKuKuKuKuKuKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKzKOK8K:K9K4K/K5K7K7K7K8K8K8K8K8K8K8K8K7K7K8K6K5K3K2K3K2K/K0K-K)K/K;K?K=K'K4KDKCKCK>K#KKKKKKKKKKK%K+K/K1K4K7K:K9K:K>K>K?KDKGKGKGKGKHKIKHKHKLKSKYKRK�K�K�K�K�K�K�KK�K�K�KKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K,KSKCKBKBKK?KVK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqKQKKKLKHK9KVKTKNKDKEK]K]KZKAK>KAKLKJKFK@K'K K"KKKKKKKKKKKKKKKKKKKKKKKKK K KKKKKK"K#KKKKKKKKK K#KKK!KKKKKKKKKKKIKKKKKKKKKKKKKKKK!K$K%K$K&K3KGK^KmKrKoKnKmKnKnKoKpKpKpKpKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKpKsKtKsKsKtKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKrKSKHKMKIKJK@K4K\K\KYKQKSK[K`K�K�K�K�K�K�K�K�K�KsKzKxKyKxKuKuKuKuKuKxKyKyKxKuKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKtKBK8K:K;K7K/K9K:K:K:K8K7K8K8K8K8K8K8K7K4K4K4K6K4K3K2K/K/K7K?KCK6K KKKK;KGKFKHK:K"K!K KKKKKKKK#K(K,K1K4K7K:K:KKBKCKDKGKFKFKHKIKIKIKLKRKTKUKiK�K�K�K�K�K�K|K�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KKKKKK�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K;K=K4KdK�KcKEKIKIK=K�K�K�K�KDKeK�KnKKuK�K�K_K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K\KQKRKOKOKTK[KbKkKqKnKmKjKbK\KVKQKVKhK�K�K�K�K�K�K�K�K�K�K�KgKMKMKMKCK@KWKPKOKCKJK]K^KVK>KKAKXKQKMKAKMK]K^KSK?K?KDKJKGKEKAK0K!K KKKKKKK	K
KKKKKK K"K#K$K$K!KKKKKKKKKKKK K-K)K K!K(K+K/K1K0K2K3K3K2K.K"K KKKKKKKKKKqK!KKKKKKK!K#K(K'K'K.K>KVKjKsKsKoKmKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKsKuKuKuKvKmKJKHKIKHKLK7KK=K5K1KKrK�K�K^K�K�K�K�K�K�K�K�K�K�K�K�K�K�KOKSKRKQKQKPKPKQKQK]K�K�K�K�K�K|KrKnKnKmKhKcK]K\KaK{K�K|K�K�K_KIKJKIK:KEKXKQKMK@KNK^K^KQK@K@KEKJKFKBK@K7K!KKKKKKKKKK
KKKKK!K&K'K KKKKKKKKKKKKKKK,K,K)K)K-K1K1K0K/K1K0K,K!KKKK!KKKKKKKKKrK#KKKKK!K&K'K'K)K5KMKcKoKrKpKpKpKoKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKpKpKpKpKpKpKpKpKpKpKpKsKsKqKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKuKuKuKuKtKuKuKuKuKtKTKGKIKHKLKBK1K\K^KZKQKJKZK^K�K�K�K�K�K�K�K�K�KuK|KyKxKxKxKxKxKxKxKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK{KYK9K:K:K;K4K6K:K:K:K:K:K8K9K9K7K8K8K8K7K5K3K7K>K@K/KKKKK K$K'K!KK"K.KCKEKDKEK1K#K%K!KKKKKKK!K%K*K0K2K5K8K:K:K>K>K>KAKDKEKBKEKHKHKIKJKKKJKLKSKXKQK�K�K�K�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K�KvK�K�KfKiK�K�KyKNK�K�KnKgK\KPK>K%KzK�K�KcK�K�K�K�K�K�K�K�K�K�K�KyKTKRKQKQKQKPKMKOKRKPKiK�K�K�K�K�K�K�K�K~KpKnKnKlKhK\KMKQKtKmKwKYKFKHKKKK@KHKJKFKDKBK:K!K KKKKKK
KK	KKKKKK KKKKKKKKKKKKKKKKK%K*K+K.K-K,K/K.K/K-K'K KKKKKKKKKKKKKKKrK$KK K%K'K'K'K/KCKZKmKrKqKnKmKnKoKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKpKpKsKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKxK]KHKKKHKHKHK.KSK_KZKTKIKWKWK�K�K�K�K�K�K�K�K�K{KzKyKxKxKxKxKxKxKxKxKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KkKK7K3K4K;K9K:K:K:K9K7K9K:K8K8K8K7K4K4K>K=K'KKK"K&K(K*K(K*K,K(K)K*K=KGKFKIK=K%K&K"K KKKKKKK#K*K.K2K6K8K:K9K;K>K>KCKDKCKCKEKHKIKIKIKHKHKJKTKWKTKiK�K�K�K�K�K�K~K�K�KKKKKKKKKKKKKKKKKKKKKKKKKKKK�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�K�K�K�K[K�K�K�K�K�K�K�K`KCKYK�K�K~KnKqKnKQK�K�K�K�K]KsK�K�K�K�K�K�K�K�K�KKNKQKOKOKMKMKMKNKPKNKzK�K�K�K�K�K�K�K�K�K�K�K�KuKmKpKXKxK�K�KtKQKHKHKKK;KMKVKOKHK?KXK_K^KKK?KAKGKHKFKBK@K;K"KK KKKKKKKKKKKKKKKKKKKKKKKKKKKK'K+K,K+K*K-K-K-K,K'K#KKKKKKKKKKK"K#KKKKKKDK(K%K*K(K*K8KPKeKrKrKpKoKoKoKoKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKtKtKtKuKuKuKuKuKuKtKuKuKwKfKIKIKHKIKLK1KGKaK[KTKLKSKXKzK�K�K�K�K�K�K�K�K�KvKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKGK9K=K7K6K2K:K:K:K:K:K9K7K9K:K8K8K8K8K5K7K:K#KK%K*K,K/K2K2K1K.K/K,K,K*K3KFKGKFKEK/K#K#K KKKKKKK K&K-K2K5K8K9K:K:K=K>KAKCKCKCKEKHKGKGKGKHKIKKKOKSKWKVK�K�K�K�K�K�K�KK�KKKKKKKKKKKKKKKK�K�K�KK�K�K�K�KK�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K�KfK�KXKTKK�K�K�K�KsKpKJKSKFK[K�K�KqKnKqKmK�K�K�K�K�KSKcK�K�K�K�K�K�K�K�K�KyKJKOKMKNKNKNKMKOKMK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K[K�K�K�KqKRKGKIKIK:KRKVKMKHKAKWK]K_KIK>KAKGKGKEK@K;K9K'KK!KKKKKK	KKKKKKKKKKKKKKKKKKK%K,K+K+K*K*K+K*K+K+K&K KKKKKKKKKKKKK#K%K$K$KKK'K&K'K*K0KCK]KoKtKsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKtKtKuKuKvKtKsKuKuKuKuKuKtKsKuKuKtKnKLKHKIKKKOK9K;K^K\KWKNKNK[KfK�K�K�K�K�K�K�K�K�KuKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK{KZK9K>K=K>K7K7K:K:K:K:K:K:K:K9K8K7K8K8K7K;K5K)K2K2K4K3K5K5K3K2K0K0K-K+K)K,KBKGKEKJK;K%K%K!K!K KKKKK K#K*K/K2K5K8K;K9K=K?K@KBKCKCKEKHKFKHKIKHKIKKKMKSKVKUKiK�K�K�K�K�K�K{K�K�KKKKKKKKKKKK�K�KK�K�K�KK�K�K�K�KK�K�K�KKKKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KvKjK`K.KUK{K�K�K�K�K�K|KBKJKTKAKYK�K�KiKpKmK�K�KQK?K�K�KBKLKyKK�K�K�K�K�K�K�KbKKKOKLKLKMKMKQKMK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KmKSKJKKKGK:KTKRKMKEKAKYK\K]KIK?K@KHKHKHKDK=K;K,KKKKKKKKKKK
K
KKKKKKKKKKKK K%K/K1K1K-K*K+K-K+K,K#KKKKKKKKKKKKKKKKK&K&K'K&K)K)K,KK;K=K=K8K3K:K:K:K:K:K;K9K8K9K9K9K8K7K8K8K7K5K2K5K5K6K7K4K3K0K.K.K-K/K-K9KIKGKHKCK+K%K$K K KKKKKK#K(K-K0K3K8K:K:K:K=K@KDKDKDKFKGKFKHKIKHKIKKKMKQKRKWKVK�K�K�K�K�K�K�K}K�KKKKKKKKKKKK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KoKMKEK0KLK�K�K�K�K�K�K�KjKDKNKPKAKOK�KqKnKoKvKuKRKK.KmKjK9KHK�K�K�K}KyKtKsKyKnKGKFKNKLKJKJKMKNKeK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KhKOKKKLKDK:KWKPKMKEKAK\K\K\KFK>KAKUKYKYKXKWKYK>KK KKKKKKKKKK
KKKKKKKKKK#K,K/K.K/K/K1K/K,K)K%KCK*KKKKKKKKKKKKKKKKK!K&K*K%K'K3KFK]KpKsKpKnKpKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKpKpKsKsKqKpKqKqKpKsKtKtKsKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKsKsKsKsKsKtKvKtKsKsKtKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKwKZKHKLKIKHKHK.KSK^KYKTKHKTKXK�K�K�K�K�K�K�K�K�K�KyK{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKLK9K=KK@KCKDKFKFKFKHKHKIKIKLKKKQKSKWKWKkK�K�K�K�K�K�K|K�K�KKKKKKKKK�KKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KVK=KK7K7K:K9K9K:K:K:K:K5K6K:K9K:K8K8K8K8K6K5K5K2K4K4K5K5K4K3K1K/K0K/K@KHKFKJKCK(K%K#K!KKKKKK!K$K(K-K1K5K9K:K9KK?KCKDKEKGKGKEKHKIKLKJKLKSKTKWKVK�K�K�K�K�K�K�K�K�KKKKKK�K�KK�KKK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K:KAK6KKAKYK�K�K�K�K�K�K�KqKdK\K[KdKuK�K�K�K�K�K�K�K`KIKKKLK@K?KZKPKMKAKHK_K^KZKAK?K9KKKKKKKKKKKK
K
+KKK
K
KKKKKKKK"K(K*K+K-K/K/K*K+K#KKKKK.KxKKKKKKKKKKK KK"K&K)K'K'K3KKKbKnKrKqKoKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKqKsKrKqKqKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKsKsKtKtKvKtKsKtKtKtKtKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKnKLKKKMKLKLK:KKCKYKPKMK>KHK^K_KWK>K?K7K*K%KKKKKKKKKKKKKK
K
KKKKKKKK#K$K(K)K+K+K$KKKKKKKK0KuKKKKKKKKK"K#K%K&K&K&K+KK?KAKBKDKDKCKCKHKHKIKLKLKLKLKNKQKZKUK�K�K�K�K�K�K�K�K�K�KK�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K/K9K:KJK{K�K�K�K�K�K�K�K�K�K�K�KmKFKMKGK_K�K�K�KhKpKmKoKWK!KhK�K�KiKwK�K�K�K�K�K�K�K�K�K�KJK@K2K,K3KQKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KyK[KHKIKIKFKNKWKOKKKDKSK]K`KTK>KAK=K2K+K%KKKKKKKKK	KRK#KK
K
KK
KKKKK K'K)K)K#KKKKKKKKKKK/KjKK K KK KKK"K'K&K(K(K2KHKbKpKrKqKnKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKrKrKrKrKrKsKsKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKtKtKtKtKsKtKtKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvK]KHKJKJKJKJK-KRK`KZKTKJKTKXK�K�K�K�K�K�K�K�K�K�KwK{KzKxKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKxKxKxKyKyKyKzKzKzKyK{KdK;K>KK?K@K@KCKCKEKGKFKHKIKKKLKLKJKMKTKWKUKnK�K�K�K�K�K�KK�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KpK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K)K4K=KJK{K�K�K�K�K�K�K�K�K�K�K�K�KmKHKEKxK�K�K�KjKoKoKnKrKXK"KgK�K�KmKtK�K�K�K�K�K�K�K�K�K�KiKcKQK>K1K-K2KLKuK�K�K�K�K�K�K�K�K�K�K�K�KxKTKHKIKIKGKPKWKNKIKKKZK\K_KRK?KCK>K4K.K)K$K!KKKKKKKK9KKK
KK
KKKKKK!K"KKKKKKKKKKKKKK(K[KK!K K K#K%K'K)K&K.K@KWKjKrKsKpKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKsKtKtKsKsKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKsKsKsKsKsKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKfKIKIKHKLKMK3KGK_K[KUKMKPKYKrK�K�K�K�K�K�K�K�K�KuK|KzKxKzK{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKxKxKzK{K{KzKzKzKzK{KtKFK;KKK>K:K9K6K5K7K8K7K9K:K:K9K8K8K8K8K8K8K8K8K6K4K3K2K3K2K1K6KFKIKIKHK;K(K&K#K K!K KKKK K'K,K0K4K6K;K=K>K?K>K?KAKBKGKHKFKHKHKJKMKLKJKKKRKWKWKTK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K%K3K@KGKgK�K�K�K�K�K�K�K�K�K�K�K�K�KiKCK�K�K�K\KcKrKmKoKoKrKYK"KfK�K�KoKsK�K�K�K�K�K�K�K�K�K�KqKxKoKdKUKDK5K,K0KCKnK�K�K�K�K�K�K�K�K�KpKOKGKIKHKGKTKVKOKIKMKZK\K^KNK?KBK=K5K2K/K*K&K"KKKK KKKKKKK
KKK
KKKKKKKKKKKKKKKKKKK*KSKKK"K%K'K%K&K5KMKdKpKsKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKsKpKpKpKpKpKpKpKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKtKsKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKuKuKuKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKkKIKHKIKIKKK9K9K_K]KVKOKLK[K`K�K�K�K�K�K�K�K�K�KxK{KzKzKzKzKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKyKyKzKzKzKzKzKyKzKzKzKzKzK}KVK;K>K>K?K2K8K;K7K5K4K9K;K=KK?K?K?KAKCKDKFKGKGKGKIKIKKKLKKKNKTKUKUKpK�K�K�K�K�K�K~K�K�K�K�K�K�K�K�K�KnKqK�K�KK�K�K�K�K�KuK�K�K�K�K�K|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KK$K:KEK;KwK�K�K�K�KKnKfK[KgKlK�K�K�KaK�K�K�KIKBKfKqKnKnKnKrKXK4K�K�K�KrKuK�K�K�K�K�K�K�K�K�K�KmKtKsKuKrKiKYKIK8K.K0K@KcK�K�K�K�K�K�KgKLKHKIKGKHKVKUKOKHKMK[K]K^KJK?KBK=K6K3K0K*K(K&K!KKKKKKK
KKKKKKKK
KKKKKKKKKKKKKKKKK"K5K#K$K%K&K,K>KWKkKrKqKnKnKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKqKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKtKrKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKvKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsKPKGKIKHKKKBK7KZK]KWKOKIKYKXK�K�K�K�K�K�K�K�K�K�KyK{KzKzKzKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKxKyKzKzKzKzKzKxKyKzKzKzKzK}KgK@K>K>K?K8K0K5K6K:K;KK?K>K>K@KCKDKFKGKFKGKHKHKKKMKKKLKRKTKYKWK�K�K�K�K�K�K�KK�K�K�K�KqKnKdK`K`K`KoKbKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KKK0K>K!KUK}K�K�K�K7K:KK?K:K0K:K=K=K=K=KK:KTK�K�K�K�K�K=KRKFK@KfKrKoKrKlK�K�K�K�K�K�KqKqK�K�K�K�K�K�K�K�K�K�KnKqKqKpKpKpKrKsKsKlK`KOK;K)KK#KhKiKFKGKGKGKHKPKPKKKBKOK]K`K_KIK?KBKK>K5K:KK=K=KAK@KAKEKFKFKGKJKJKKKKKLKLKNKSKWKXKWK�K�K�K�K�K�K�K�K�KnKXKIK/K?KAK2K.K2KUKJK?KNK{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(KKKK)K>K5KdK�K�KIK5K:K8K9K9KK>KKKAKBKDKGKFKFKHKHKJKLKLKKKNKTKVKUKtK�K�K�K�K�K�K�KnKJKHK:K%K2K*K"K"K1K0K"K%K8K>KBKGKK"K"K K)K4K=K;K7K�K�KwKtKmKTK?KJKLK=KK>K?K>K>K=KKIKIKKKHK-K&K%K%K!K KKKKK$K+K/K5K7K9K;K=K=K?K>KAKCKDKGKEKAKHKHKJKLKLKKKLKPKQKZKYK�K�K�K�K�K�KeKRK7K3K+K+K/K)K"K"K#KKK$K*KKK&KK5KQKwKlK�KuKgK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]q�(K/K&KKK=KFK�K�K�KKKK K K!K6K?K7KvK�K�K�KgKmKSKBKFKGK?K>KfKnKhKqKsKtK\K%K7KK|KK;K6K9K@K?K?K?K?K?K>KKAKDKDKFKGKGKGKHKJKLKKKKKMKNKOKWKVKyK�K�K�K�KyK5K.K(K(K-K4K4K)K%KKKKK!KKKKKK#K7K\KgK�K�KyK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K-K-K&KK6KXK�K�K�K*K4K2K9KHKYKmKKMKXK�K�K�KRKsKqKVKCKEKCK?K@KaK�K�K�KnKsK]K&K*KnK|KHKMK�K�K�K�K�K�K}KsKoKpKoKqKoKoKpKpKpKoKoKpKZKXKrKUKDKGKHK8KCKWKOKKK?KKK`K`KZKKKKKFK5K0K0K/K-K+K'K$K"K.K-K)KK
+KKK
K
KKKKKKKKKKKKKKK!K!K"K$K&K%K6KJK]KqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKsKsKsKrKqKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKuKuKsKtKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKvKuKvKvKuKuKuKvKxKaKHKIKIKHKIKLKRK\K[KVKLKMKXKfK�K�K�K�K�K�K�K�K�KwK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KkK>K?K?K@KK>K>K>K=KK?K?K?KBKDKDKEKGKFKHKJKLKKKKKLKLKRKVKXK]K�K�K�K�KBK&K&K#K(K.K1K2K*K%KKKKKKKKKKK KHKXKvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K(K)K)K)K0KuK�K�K�K9KQKgK�K�K�K�K�K�KwK�K�K�KAKpKpKnKVKAKCKJKKOK_K`KZKKKIKBK4K0K0K.K-K+K'K$K"K.K.K.KK
+KKK
KKKKKKKKKKKKK!K"K#K%K%K%K%K&K(K7KKK^KqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKsKqKpKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKuKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKwKuKuKuKvKvKvKuKxKjKKKIKIKIKHKLKOKYK\KVKNKHKYKYK�K�K�K�K�K�K�K�K�K�KzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KyKMK>K?K>K?K6KK?K?K?K?K?K?K?K=KK?K?K>KBKDKCKDKFKGKHKJKKKKKKKLKJKOKUKYKWKyK�K�KRK4K&K)K)K,K(K)K-K+K!K KKK KKK!KKKK#K:KPKoK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K+K+K*K*K&K�K�K�KjKdK�K�K�K�K�K�K�K�K�K�K�KsK9KiKqKnKnKRKFKIKAK{K�K�K�KoKsKrKtK`K%KQK�K�KwKjK�K�K�K�K�K�K�K�K�K�KqKtKqKpKpKoKnKnKVK]KmKJKDKCKHK5KLKTKMKJK=KQK`KaKZKLKJK@K3K1K.K+K-K+K'K$K$K1K0K/KK
+KKK
KKKKKKKKKKKK#K$K$K%K'K(K)K(K+K(K8KKK^KpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKtKrKpKpKpKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKtKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKyKxKxKxKxKxKxKxKvKvKqKNKHKIKHKHKKKEKUK]KWKPKIKVKWK�K�K�K�K�K�K�K�K�K�KxK|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K\KK9K9K?K>K>K>K>K>K?K>K9K;K=K?K>K=K=K=K=K=K=K=K=K;K9K9K:K8K4K4K5K4K4KEKLKKKKKBK*K)K&K#K#K KKKK!K*K2K3K4K8K9K;K?K>K>KBKAKBKEKEKHKIKIKIKKKLKLKKKLKQKVKWKVK�K�K4K3K%K+K'K'K&K)K)K&K$KKKKKKKKKKK#KK>K?K@KCKBKDKEKHKIKHKHKKKLKKKLKIKMKVKWKRKnKaK4K3K,K*K$K'KK K"KK#KKKKKKKKKK$K3KJKYKmK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K$K#K!K5KVK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KCK4KQKrKpKpKrKkKSKJK�K�K�K�K�K�K�KpKrKwKdK+KdK�K�K�KmK�K�K�K�K�K�K�K�K�K�KnKpKqKpKqKiKdK�K^KFKFKGKDK6KPKSKMKGK=KYK_K_KTKMKLK;K-K-K.K+K(K&K&K#K$K0K2K3KKKKKKKKKKKKK$K&K#K&K(K'K+K*K*K*K*K,K-K.K=KMK`KsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKqKpKpKpKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKwKuKwKyKxKxKxKxKxKxKxK{KaKFKIKHKIKKK/KBK]K[KUKMKMK[KdK�K�K�K�K�K�K�K�K�K}K~K~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzKPK@KBKAKBK:K>K?K>K?KBK@K>K@K>K>K?K?K=K;KKZK^K^KRKMKNK:K-K-K,K*K(K'K&K#K%K2K2K1KKKKKKKKKKKK"K'K)K(K*K,K,K,K-K/K.K.K/K0K0KAKNKaKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKrKrKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKsKtKtKtKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKwKwKwKxKxKxKxKxKxKwKxKxKxKxKxKxKxKxKxKzKhKFKIKJKIKKK7K6K^K[KXKPKIKZKZK�K�K�K�K�K�K�K�K�K�KzK~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KbK>KBKBK@K9K9K=K?K@KBK@K>K?K>K>K?K?K?K?K=K=K?K>K=K=K=K=K=K:K:K:K8K8K4K/K5K4KDKNKKKLKHK.K&K&K%K#K"K KK K!K(K0K3K5K7K:KK?K?K>KAKBKEKEKHKGKIKLKKKKKKKLKKKNKTKXK\KPKAK7K7K4K*K-K1K"KK"KKKKKKKKKKKK*KcK�K�K|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K,K+K4KKK�K�K�K�KtK~K�K�K�K�K�K�K�K�K�K�K�K K/K6KbKqKpKqKpKqKwK�K�K�K�K�K�K�K�K�KqKvKqK�K�K�K�K�K�KjK�K�K�K�K�K�K�K�KVKgKrKoKrKeKVKhKZKEKGKHK>K7KVKQKJKBK@K[K^K]KRKMKMK6K,K-K*K*K)K'K&K"K&K4K2K/KKKK
+KKKKKKK#K%K)K)K)K+K.K.K.K.K0K0K0K.K1K3KCKPKbKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKtKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKyKyKyKxKxKxKxKxKxKyKxKxKxKxKxKxKxKxKxKyKqKKKIKLKIKJKAK/KWK\KXKQKHKVKUK�K�K�K�K�K�K�K�K�K�KxKK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KpKEKBKCK>K7K7K?KBKBKBK@K>K>K?K?K>K>K?K?K=KKKAKBK?K@KAK?K>K>K>K>K?K?K>K>K?K?K>K>K>K=K=KKAKBKDKEKGKGKEKGKHKIKJKLKKKJKOKQKYKPKDK>K:K/K-K9K7KMK4K%K&K(K#KKKKKKKKK KMKLKtK�K�K�K�K�K�K�K�K�K�K~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(KKK@KBK@K>K?K?K>K?K?K?K?K?K?K?K?K?K=K=K=K=K=K;K9K9K6K4K5K3K>KLKKKLKLK6K'K(K'K%K$K#KKK!K&K-K0K7K8K9K;K?K@K>KBKBKCKEKGKFKGKHKIKHKIKLKLKLKLKJKUKTKIKKAKAKAKBKAKAKAKAK?K?K?K?K>K?K?K?K>K=K=K=K=K=K=K;K8K9K6K4K5K4K7KIKKKKKMK?K(K(K&K$K#K$K#KKK$K,K/K2K6K9K;KKAKBKCKEKGKGKGKHKIKKKLKKKKKLKKKOK\K^KGK7K8K-KK/KAKDKMK$KK$K(K(K%KKKKKK8K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K}KiK\KWKVK^KcKfKjKtKwKtK|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r
+(K2K;KPK�K�K�K�K�K�K�K�K�K�K�K]KpKjK�K�KwK+K1K0K2K1K^KtKpKtKrKpKxK�K�K�K�K�K�K�K�K�K�K[K�K�K�KpKfK'KFK�K�KzKIK�K�K�K[KLKNKHKDKHKLKRKYKIKCKCKFK5KEKSKMKIKK>K?K?K?K?K?K?K>K=K=K=K=K=K=KKK?K?K?K?K?K=K=K=K>KK)K)K(K%K"K"K!KK#K(K+K.K1K6K:K;KCK@KAKEKDKDKCKEKGKHKIKJKKKKKMKNKLKJKQK^KLK8K=K.KK/KOKIKQK>KK,K4K/K"KK"K1K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KtK�K�K�K�K�K�K�K�K_KK
+KKYK�K�K�KxKhK^KMKCKUK^KnKxK~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K3KlK�K�KmK$K/K5K7K3KMKsKqKsKtK�K�K�K�K�K�K�K�K�K�K�KgK`K{KoKrKrKpKsKcK%K KVK`K:K.KGKPKMKLKHKFKKKEKCKIKIKEKCKEK4KLKRKMKJK:KMK^K[K]K^K[KOK2K(K$K$K'K%K#K$K!K-K5K5K,KKKKKKKKK K%K)K.K1K1K3K5K7K7K4K6K9K8K7K9K9K;KLKTKeKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKsKsKsKtKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKxKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK|K[KGKKKJKGKJK/KCK^K[KUKMKLKYK]K�K�K�K�K�K�K�K�K�K�K|K~K}K}K|K|K|K{KzKzKzKzKzKzKzKzK|K{KzK{K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KvKFKAKDKCKCK;K@KBKBKBKBKBKAKBK@K@K@KAKBK@K@K?K>K?K?K?K>K=K;KKKMKLKKKLK:K)K'K%K&K%K$K#K K K'K+K.K0K5K:KK?K?K?K?K?K>KKCK>K@K;K@KCKAKBKBKBKBKAKCKAK@KBKBK@KBKAK?K?K?K?K?K?K>K=K=K=K:K9K8K8K8K5K@KMKLKNKMK5K(K)K&K%K$K#K K!K$K'K.K1K5K:KK?K?K>K?K?K=K=K;K8K8K6K5K5K9KLKMKKKMK?K)K)K)K&K%K&K#KK!K'K+K/K3K8K:K=K>KDKGKCKBKBKDKGKGKGKDKFKMKKKMKMKMKQKKKIKSKiK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKKAK:K)K K K#K)K7KGKfK�K�KkKIK%KKKKKKKKK6KKKVKeK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(KwK�K�K�K�K�K�K�K�K�K�KdKK KKmK�K�KpK!K/K.K/K2K3K6KgKuKvK�K�K�K�K�K�K�K�K�K�K�K�K[K�K�KWK9KBK/KUKsKoKpKqKlKIKBK'K-KCK0K$K3KIKDKGKAKDKCKCK?KDKNKLKGKBKOKXKZKZK[K\KbKXKRKaK^K]K^K`K_K_K_K_K^K^K[KKK/KKKKKK!K%K*K,K0K3K5K5K9K8K7K:K:K:K:K:K=K;KIKrKSKfKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKtKsKsKsKtKtKsKsKtKtKtKuKuKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKvKxKvKuKvKvKvKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKxKxKxK{KbKGKIKGKGKJK7K5KZK[KYKRKGKUKTK�K�K�K�K�K�K�K�K�K�KyKK~K}K}K}K}K}K{K{KzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|K�KjKBKDKCKEK@K8KBKCKBKBKBKBKAKBKBKAKAKAKBKBKBKBK@K?K?K?K?K?K?K=K=KKDKHKDKBKBKDKFKFKGKGKJKLKKKLKMKOKIKGKaK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�KK0K;K3K,K&K&K"KKK'KgK�K�K�K�K'KK
KKK
K
K
+KK"K,KKKcKzK�K�K�K�K�K�KxKzK}KmKmK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K�K�K�K�K�K�K�K�K�K�KUKKK KK�K�K�KJKK*K-K0K2K6K9KbKsKK�K�K�K�K�K�K�K�K�K�K�KtK[K�K^K>KK?K=KK0K#K1KGK>KBKDKCK>KEKNKJKFKDKPKXKZKWKEKPK`KQKVK_K]K^K^K^K_K`K`K`K`K_K^K^K\K]KaK`K\KOK@K1K+K,K0K4K6K9K:K;K;K=K=K=K=K=K>K=KJKqKTKhKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKtKuKuKtKtKtKuKuKtKtKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKuKuKvKwKwKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKyKyKyKyK{KvKOKHKHKIKJKJK+KMK\K[KWKKKMKWKgK�K�K�K�K�K�K�K�K�K�KKK}K}K}K}K}K}K}K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K�K[K@KDKDKDK=K=KCKCKBKDKCKBKCK@K9K>KBKAKBKBKBKBKBK@KAKBK?K?K>K=KKBKGKHKEKCKCKCKDKGKFKHKKKLKJKJKKK`K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K(KKKKK%K'K(K'K&K$KKKKBK�K�K�KpKLK4KKKKKKKKK
+K	KK4K�K�K�K�K�K�KtKaK}K�K�K�KfKQKcK�KyK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K
KUK}KrK_KKKQKRKK	KKKKK"K}K�KxKKK"K#K*K-K0K2KYKpK�K�K�K�KnK�K�K�K�K�K�K�KIKRKHK\KpKHK2K2K0K7K*KJKtK^KAKCKkKcK!KK2KJK8K!K#K>KGKDKBK3KCKNKIKGK>KJKZKXKSKK*K8K=KXKdKaK]K\K\K_K`K`K`K`K`K`K^K\K\K[K\K]K_KaK[KQKDK;K6K5K8K:K=K=K=K=K=K=KKIKlKSKhKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKvKuKuKuKvKvKuKuKuKuKuKuKuKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzK{K{KzK{KzKxKXKHKHKJKLKNK0KBK]K[KYKOKNKXKYK�K�K�K�K�K�K�K�K�K�K{K�K}K~K}K}K}K}K}K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKkKCKCKDKCKAKK?K?K=K;K9K:K:K9K9K4K?KNKOKNKMK6K)K+K)K&K&K&K"K!K%K)K,K2K6K8K=K@KEKHKFKCKDKDKDKGKFKHKKKLKKKQK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K1KKKKKKKK"K$K"KKKKK2KbK�K�KVKHKRKEK$K
KKKKKK0K6K&KGK�K�K�K�K�K�KoKcK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(KK�K�K�KOKEKLK'KKKKKKK3KmKlKBKK!KK K$K'K0K1KWKrK�K�K�KvKtKqK�K�K�K�K�K\KBKOKFK_KuKlKFK2K'K(K5K%KMKZKAKHKkKvKaK$KK[KgKKKCKEKBKAK-KDKPKIKFK9KHKVKYKQKKFKHK@KCKIKSK[K^K_K^K_K_K`K`K_K]K]K]K_K_K_K^K\K\K]K^K_K\KVKIK>K;KK@KCKDKBKBKDKCKCKCK@KAKBKBK@KBKBKBKBKBKBK@K>K?K?K>K>KK>KHKhKSKiKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKxKuKwKyKxKxKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKyK{KlKFKHKIKHKJKAK0KXK\KYKRKKKUKUKuK�K�K�K�K�K�K�K�K�K~KK~K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|K�K^KBKDKCKK?K?K?K>K=K;K:K6K5K9K7KFKNKNKMKKK5K)K)K(K%K$K$K!K K$K*K-K3K9K;K=KAKHKIKIKGKDKDKGKFKIKJKIKgK�KxK|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwK@K K
K
+K	KKKKKKKKKKKK
+KK^K�KEKK	K	KKK&K9K�K�K�K�K'KKCK�K�K�K�K�K�K�K�K�K�K�K�KsK�K�K{K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(KrK�K�K�KKKKKKKKKKK}K�K�KLKK%K$K!K#K%K'K,KHKqKOKBKaKvKsKsKsKsKeKHKGKAK@KJKFKgKvKpKrKoKHK0K)K/K.KJKvK�KuKpKoKtKdK1K4K&K;KEKBKBK>K.KMKNKIKEK8KLKWK[KMKK+K@KRK\K\KXKRKHKAKCKKKVK\KaK]K\K\K]K]K]K`K`K_K_K_K_K_K_K^K\K]K]K^K\KUKOKFK>K:K;K=KHKfKSKjKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKuKvKxKwKuKxKxKxKxKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzK{KuKMKHKHKIKIKHK.KPK\KZKWKNKOKWKcK�K�K�K�K�K�K�K�K�K�K~K~K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K�KqKDKCKEKAK@KK>K?K?K>K=K;K9K;K:K8K6K=KNKNKMKOK?K)K+K*K(K%K%K$K"K$K'K,K0K8K;K=K@KDKHKIKIKEKEKGKFKIKHK[KwK}K{K{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~K|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~K�K�K�K�K�K�KKDK+KKK
+KKKKKKKKKKKK	K
+KKlK}KK	KKKKK
K+KwK�K�KfK
+KK`K�K�K�K�K�K�K�K�K�K�K�K�KqK�K�KrKlK�K�K�K�K�K�K�K�K�K�K�K�e]r(K�K�K�KfKKKKKKKKKK!K�K�K�K6KK$K#K%K&K&K)K*KDKkKIKGKhKtKrKsKsKrKuKfKJK:K@KHKIKnKuKrKpKtKmKEK-K+K0K�K�K�KoKsKpKpKuKcKGK5KGKCKBKBKKEKcKSKjKuKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKsKsKsKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKxKxKvKvKyKwKuKwKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKyKzKzKzKzKzKzKzKzK|KVKGKJKMKOKTK4KBK\K[KXKOKNKYKVK�K�K�K�K�K�K�K�K�K�KzKK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K|KQKAKEKEKEK=K@KDKCKDKDKDKCKCKDKDKCKBKBKBKBKBKBKAKAKBK?K?K?K?K>K=KK@KBKDKHKIKIKHKFKGKEKSKzK�K�K{K{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|K|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KGK0KKK
+KKKKKK	K
KKKKKKKKK^K7KKKKKKKKK^K�K�KMKKK}K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K�K�K�KCKKKKKKKKKK/K�K�K�K$K$K$K$K%K(K)K'K)K?KaKDKIKkKrKoKsKsKpKpKsKiK>K@KFKNKqKtKsKtKpKrKmKBK-K7K�KK�K@KuKqKpKrK_KEKEKIKAKAK?K8K4KPKKKGKCK9KPKVKXKDK*K+K(K&K"K K&K5KJKXKXKZKXKQKFKCKDKNKYK^K_K]K]K_K`K`K`K_K`K`K`K^K\K_K`K_K_K`K^K^KZKUKUK`KUKkKwKtKsKsKsKsKsKsKsKsKsKsKsKuKvKtKsKvKuKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKyKyKwKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzK|KbKJKRKSKQKXK=K5K[K[KVKRKNKXKTK�K�K�K�K�K�K�K�K�K�KzK�K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K�KaKAKCKBKFKAKKAKBKJKLKKKGKEKFKPKvK}KK�K~K{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KHK7K"KKKKKKKKKKKKK
KKKKKKK
+K	K
KK-KWK7K!K5KvK�K�KLKK2K�K�KvK�K�K�K�K�K�K�K�K�K�K�KUKdK`KYK�K�K�K�K�K�K�K�K�K�e]r(K�K�K�K)KKKKKKKKKK@K�K�K�KK%K"K"K$K%K'K)K)K;KUKEKLKoKrKrKsKsKrKrKrKuKhKNKCKSKtKsKtKsKrKpKsKnKAK>K�K�K�KKHKsKtKqK[K@KNKIK@KAK@K4K9KRKJKGKAK:KSKVKYKFK0K1K,K(K'K&K!KK!K.KIKRKYK^K]KZKRKHKAKEKQK\K`KbK`K^K`KaK`K`K`K_K^K_K`K`K`K`K`K^K]K]K]K^K[KeKoKrKtKtKsKtKtKsKsKsKsKsKsKuKuKtKsKtKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKwKxKxKwKwKxKxKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzK}KmKOKTKTKSKUKHK-KVK[KXKSKMKTKWKtK�K�K�K�K�K�K�K�K�K�KKK~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~K~K~K~K~K~K�KrKEKCKDKCKCK;KDKDKCKDKCKCKDKCKCKDKDKCKDKCKCKCKBKBKBKBKBKBK?K>K?K=K=K;K9K7K8K7K:KJKNKMKOKHK/K)K)K'K'K&K%K"K!K%K,K1K6K:K>KAKCKHKKKJKGKFKFKoK�KwK}K�K�K{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KvKEK>K&KK
KKKKKKKKK
+KKK
K
KK	KKKK
+KKKKJK�K�KYKvK�K�K�K�KyK�K�K�K�K�K�K�K�K�K�K�K�KwKvKSKKK0K;K�K�K�K�K�K�K�K�K�K�e]r(K�K�K�K!KKKKKKKK KKNK�K�KEKK%K!K K"K%K%K)K*K6KNKEKSKqKrKtKsKsKtKsKsKsKqKQKJKfKvKsKsKsKsKtKsKuKnK^K�K�KpK3K,KEKrKqKWK@KHKFKAK@KAK4K=KTKIKGK?K9KVKVKYKDK4K2K)K&K&K&K%K#K#K'KSKK@KDKDKBKAKDKCKBKBKBKBKBK@K=K?K>K=K;K9K:K:K8K8KCKNKMKOKMK8K+K+K*K)K&K&K$K"K$K*K-K5K9K;K@KDKEKJKIKIKIKSK�K�K}KK�K|KwK}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KXKKTKTKUK@K5K4K!KKK$K%K%K#K)K[KAKK$K%K2KDKUK]K_KNKJKMKCK@KHKQK]KbKaK_K\K]K]K^K`K_K]K^K]K\K]K_K^K\K\K\K_K^K_KbKgKmKqKuKuKtKtKtKuKuKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKwKwKwKwKwKxKxKxKxKvKwKxKxKxKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKxKyKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K\KQKVKUKSKPKHKPKZK[KUKOKKKWKUK�K�K�K�K�K�K�K�K�K�KyK�KK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}K}K~KKKKKK~KK�K~K�KcKAKDKDKDKAKK@KCKDKBKBKCKCKBKBKBK@K?KAK@K?K>K>K=KKLKNKNKOKBK-K*K)K(K&K%K$K"K#K'K+K1K5K8K=KBKDKHKFKIKLKsK�K�K{KyK�K{KsKyK|K~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~KiKYK;K-KKKKK
+KKKKKKKK
+K
KKKK
+KKKK	KKKK	KKAK5KEKbKsK{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K�K�KwKKKKKKKKKK$KGKCKK3K3KKKKK K"K!K,KXKAK+K;K8K)KKK1KGKUKZK]KZKSKIKDKBKHKSK\K`K^K\K]K`K_K\K\K]K]K]K`K_K\K]K]K_K`K`K_K_K^KaKgKmKsKuKwKuKuKuKuKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKxKyKyKyKyKxKxKxKxKuKwKyKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKxKyK{KzKzK{KzKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|KbKQKVKTKSKPKNKLKWK[KXKQKHKRKSK�K�K�K�K�K�K�K�K�K�K�K�KK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K�K~K}K}K}K}K~KKKKKK}K~K�K}KKtKGKCKDKCKEK=KBKDKDKDKCKDKCKCKGKEKDKDKBKBKCKCKBKBKBK@K>KAKAK?K?K?K>K=K:K9K7K7K9KHKOKNKMKKK3K)K)K)K&K%K$K"K#K#K(K0K5K8K=KAKDKGKFKEKaK�K�K�KuKsKK�KtKsKyKyK~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K{K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsK\KBK(KK
KK
KKKKKKKKKKK
KKK
KKKKKK
+KKKKKK"K8KAKGKPKZKqK�K�K�K�K�K�K�K�K�K�K�K�K�K�KrKwKwKoKgKhK�K�K�K�K�K�K�e]r(K�K�KSKKKKKKKKKK%KIKFK7KKKKKK!K#K%K'K&KK>K-KGKPKHKFK9K?KWKVKUK=K5K2KKKKKK$K+K8KWKBK+K2K8K-KKKKKMKIKFKVK^K_K[KPKFKBKAKIKSK_KbKaK`K\K]K]K\K]K]K]K\K^K`K`K`K`K`K`K`K_K\K\K^KdKkKsKvKvKuKuKuKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzKzKzKzKxKxKxKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KjKPKTKSKSKRKNKNKVK[KZKTKIKOKVKlK�K�K�K�K�K�K�K�K�K�K|K�K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKK~K~KKKKKKKTKDKDKCKCK=K?KFKDKDKCKCKDKDKDKDKDKDKDKCKBKBKBKBKBKAKAKBKAK>K?K?K=KK>K=K,KIKOKHKFK8KCKVKUKPK9K4K1K0K(KKKK!K/KEKXKAK,K(K*K&KK
KKKZK@KK!K7KLKXK`K]KYKNK1K7KBKLKXK`K`K^K]K]K]K]K]K]K^K_K_K_K_K_K`K`K`K]K\K[K[KZK^KfKmKqKuKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKxKxKxKxKwKvKuKuKuKuKuKuKuKuKuKuKuKvKyKwKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzK{KzKzKyKyKyKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KtKTKRKSKSKSKOKOKTKZK[KXKNKKKXKYK�K�K�K�K�K�K�K�K�K�K{K�K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKKK~K~KKKKKK�KfKDKEKEK?K;K=KCKCKCKCKCKCKCKCKCKCKCKCKDKBKBKBKBKBKBKBKBKAK?K?K?K>K=KK?KK?K?K>K:K:K:K7K5KCKNKMKMKNK:K*K+K)K(K&K$K"K K$K)K-K4K7K=KBKEKEKFKWK�K�K{KsKqKtK}KKwKsKvK~KKK�K~K|K�K�K�K�K�K�K�K�K�K�K�K�K�KKK~K�K�K�K�K�K�K�K�K�K�K�K�K�KKkKTK6K"KKKKKKKKKKKKKKKK
+K
K
KKKKKKK
+K
+KKKKK�K�KKKKKKRKrKpKtKtKrKjKaKpK�KXK#K(KPKxK�K�K�K�K�K�K�K�K�K�e]r!(K?K#KKKKKKKKKKKK>K@KKFKEKCKDKDKCKDKEKEKEKDKCKDKDKDKCKBKCKDKBKBKBKAK@K@K>K>KK"KKKKK KKK"K"K$KAKDK=K'K'K!K!KKKK K$K!K3K�K�K�KpKuKsKsKsKsKsKsKtKnKZKNK\KuKsKsKsKsKsKsKsKuKhKFK@KaKtKsKtKuKNK$K)KDKCK=K?K?K8K4KSKKKDK@K6KKKSKTKMK2K0K-K'K&K&K&KK K!K=KWK2K#K?KTKXKWKOK?KCKZKEK/K'K)K+K1K8K8K0KAKSKKSK\K^K^KWKLKEK@KAKOK[K`K_K`K`K`K_K\K\K\K]K]K]K]K]K]K]K]K]K]K\KZKYKZK^KdKmKvKyKxKvKuKuKtKiKWKPKIKIKBK=K;K5K9K7KBKWKjKvKwKuKuKuKvKyKxKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K}K~KjKPKRKQKRKOKOKLKTK[KXKVKKKOKUKiK�K�K�K�K�K�K�K�K�K�K{K�K�K�KK}K}K}K}K}KKK}K}K}K}K}KKK}K~K�KKKKKKKKKKKKKKKKKK�KhKCKGKGKGKDK9KDKEKCKCKCKCKDKGKEKCKDKCKCKDKCKDKDKDKDKBKAKAKBKBKAK=K>K>K:K9K8K8K9KHKNKQKQKLK4K,K+K*K)K(K%K$K#K&K*K.K2K:K?KBKGKGKEKIKkK�KuKtKsKwKzKKyKsK~K�K�K�K}KxKzK}K�K�K�K�K�K�K�K�K�K�KvKwK�K�KxK�K�K�KxK�K�KkK|K~KnKxKwKvKrK^KAK(KK
+KK
KKKKK
+KKKKKKKKKK
+KK
+KKKKK
K
KK
+KKAKdKyK�K{KgKKKKK7KtK�K�K�K�K�K�K�KxK�KuK.KKKKKK K#K2K|K�e]r#(K8KKKKKKKKKKK&KDKDK3KKK%K'K&K&K$K"K"K KJK�K�K�KmKuKsKsKsKsKsKsKuKlKGKCKaKvKsKsKsKsKsKsKsKuKdKEKAKfKvKsKsKsKsKCK.KIKDK=K?K?K4K4KSKKKEK@K5KMKTKTKGK/K/K-K'K&K%K%K$K"KKAKYK0K%KBKUKWKXKYKZKXKZKDK5K:K.K,K.K1K7K9KIK_K+KK&K2KGKVK^K_K]KQK7K>K?KDKPK[K`K_K]K\K\K]K]K]K]K]K]K]K]K]K]K]K\K\K\K]K[KXKYK]KdKlKrKvKvKpK]KMKFK?K?K8K6K=K9K4K0K,K:KIKZKoKxKvKvKuKxKxKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K{K{K{K~KrKSKRKOKRKOKNKPKWKYKXKVKLKKKXK[K�K�K�K�K�K�K�K�K�K�KyK�K�KKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�KxKJKEKEKEKFK:KCKDKCKEKFKDKDKGKEKCKCKCKDKCKDKCKDKCKBKCKCKCKBKBKAK@K@K=KK6K6K9K2K6K9K4K-K0K/K7K9KGKPKhKwKtKuKxKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|KzKzKzK|KyKXKQKOKRKOKNKPKQKYK\KXKOKHKWKSK�K�K�K�K�K�K�K�K�K�K}K�K�KKK}K~K�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�KYKBKDKCKGK@K=KGKCKFKGKDKDKGKFK@K=KGKCKCKDKCKDKBKAKCKDKDKCKAKBKBK@K=K=KKBKEKGKHKJKwKKoKqKtKwKyK|KwK{K�K�K�K�K{K{KzKyK}K�K�K�K�K�K�K�K�K�K�KwK�KqKiK�K�K�KpK�K�K^KmK[K_KhKjKkK`KKK-KKK
KKKKK
KKKKKKKKK
K
KKKK	KK	KK
KKKKKKKKKK'KRK�K�K�KDK8KbKzK�K�K�K�KqK`KiK`K�K�K�K�K�K�KwKtK�K�e]r%(K#KKKKKKKKKKK0KEKIK-KKKKKKKK#K(K"KyK�K�K�KrKtKsKsKsKsKtKsKvK`KBKEKkKuKsKsKsKsKsKsKsKuK[K?KGKoKuKsKsKtKnKPKCKEK@K>K?K>K0K;KQKHKCK>K9KPKRKTKCK3K4K.K&K&K&K$K"K$K!KGKUK*K(KEKQKUKXKYK[KXKXKBK-K1KFKVKRK5K+K)KGK\K=K5K?KAK?K:K2K/K1KFKSKYK^K\KVKOKFK=K?KEKSK^K]K\K[K[K]K]K]K]K\K[K[K[K[K[K[K[K[K[K[K[KZKYKZKZKSKHK@K;KK=K;K9K9K8K7KEKNKOKPKOK;K,K-K+K)K&K&K%K#K#K'K,K2K8K:K?KDKGKGKJKvK�KpKpKrKuKvK{K{KwK}K�K�K|K{K{KyKyKzK}K�K�K�K�K�K�K�K�K�KsK|KrKdKoK�K�KuKuKqKOKgKeK^KaKfKaK\KNK1KKKKK
+K
+KK
K
KK
+KKKKKKKKKKKKK	KKKKKK
+KKKKK
+KKKKOK�K�K�KmKoKtKiKlK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r&(K%KKKKKKKKKKK=KnK�K=KKKK
KKK
+KKK"K�K�K�KuKuKsKsKsKsKsKsKsKtKZKCKJKpKtKsKsKsKsKsKsKsKtKTK?KLKrKtKtKsKuKkKNK@KFK?K>K>K>K.K@KOKGKDK=K>KTKQKTKBK4K8K0K&K&K&K%K"K$K"KIKTK*K+KIKTKWKWKYKZKXKWK>K(KKK'K4K0K+K7KPKYK>K0K6K=K>K=K?K@K9KDKTKK@KHKUK[K^K^K\K\K\K\KZK[K[K[K[K[K[K[K[K[K[K[K[K[K\K\KYKVKPKDK?K2K.K1K7K6K5K5K:K3K0K0K0K7K)K.KHKdKwK|KyKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzKzKzKzKzKzK{K~K}K{K|K}K{K{K}K}K}K}K}K{K}KhKQKSKSKRKQKNK@KVK\KZKSKJKPKXKfK�K�K�K�K�K�K�K�K�K�K|K�KKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKK�K|KMKEKGKFKFK=K@KEKEKGKGKGKGKFKDKDKDKDKDKDKDKDKDKDKDKCKBKBKBKBKBK?K>K=KKMKPKQKQKEK.K,K+K*K*K'K%K$K$K&K+K2K7K:K?KDKFKGKHK`K�KrKpKrKsKsKwK}K{KzK�K�K|K{KzK{K|K{KzK�K�K�K�K�K�K�K�K�K�KoKoKhKcK{K�K|KlKWKJKXKmKiKaK^KWKUKPK3KK
+K
KKK
+KK
KK
KKKKKKKKKKKKKK
+KK
+K
+K
+K
+K
+K
+K
+K
+K
+KK
KKKKKOK�K�K�KyKkK]KiK�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r'(K$KKKKKKKKK KKkK�K�KKKSKRKSK@K7K9K0K(K(K%K"K K$K$KOKRK)K+KJKUKWKXKWKWKXKXK=K+K0K%KKKKK0KSKXK=K9K7K,K2K:K=K=K8KLK^K/KK2K1KAKRKYK\K\KXKOK4K3KAKJKTK]K`K\KZKZK[K]K\KZK[K[K[K[K[K[K[K[K[K[KZK[KYKYK[KVKPKK>K5K1K;K9KGKQKPKPKNK6K+K*K*K(K&K&K$K#K%K)K.K2K:KKZKvKsKtKtKtKtKsKsKtKpKMK?KVKuKsKsKsKtKtKtKsKuKpKMK?KTKvKsKuKuKuKcKEK@KEK>K>KK;KKAKKKTK]K]K]K\KZKZK[K[K[K[K[K[K[K[KZKZKZK[KYKXKYKZKXKYKWKIK1KK	KKKK
+K
+KK	K	K	KKKKKKKxKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K}K}KzKzKzKzKzK{K|K}K}K|K|K|K}K}K}K}K}K}K}K}K~KwKUKRKRKQKPKOK.KDK\KZKUKOKIKVKSK�K�K�K�K�K�K�K�K�K�K|K�K�KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK�KmKFKHKFKBK@K;KDKGKFKFKFKFKFKGKFKEKEKEKGKEKCKDKDKDKDKDKDKDKBKAKCKBK?K>K=K9K8K:K7K?KQKQKPKRKBK.K,K,K)K&K%K$K$K#K'K*K/K6K:K@KBKEKGKMK|KwKpKpKqKsKuKxK|K{K}K�K�KxK{KwK|K�K|K{KK�K�K�K�K�K�K�K�K�KaKjKgKeKzK}KvKcKSKHKZKjKkKeK\KSKUK>KK	K
K
KK	KK
+K	KKK
+K
K	KK
KKKKKKKK
+K	K
+KK
+K
+KK
+K	K	K	K	K	K	K	K	K	K	K	KK4KqK�KwKqKkKmKsK{K�K�K�K�K�K�K�K�KOKwe]r)(KK K"K"K%K$KKKKK:K�K�K�KK#K%KKKKKKKK$KGK@KaKvKsKvKuKvKtKsKsKuKmKJK?K\KwKsKsKsKtKvKtKsKvKmKIK@K[KvKsKvKuKtK_KEKAKEK>K>KK-K$K&K&K#KKK%KSKLK'K/KKKYKDK)KHKZKWKWK:K'K!K!K(K.K2K5K1KHKYK7K;KWKhKsKXK.K%K'KCK\KAKBKwK{KzKqK_KGK:K8KHKWKYKYKYKSKJKAKKAKHKFKGKFKFKFKGKDKCKFKGKGKEKCKDKCKDKDKDKDKDKBKAKBKAK?K>K=K;K:K8K6K8KLKQKPKQKLK3K-K-K+K)K$K%K%K!K&K*K0K6K:K=K@KEKFKHKuKyKpKpKpKsKuKuK{KK~K�K�KvKyKwKxK�K�KK~KK�K�K�K�K�K�K�K�KsKOKjKYKoK{KoKfKXKJKRKbKgKiKbKZKUKBKK
+KK
KKKK	KK
+KK
+K
+K
+KK
KK
+K
+K
+K
+K
+K
+K
+K
+K
+K
+KKKKKKKKKKKKKKKK
+KKK:KPK^KeKmKqKrKuKzK�K�K�K�K�KjK?Kje]r*(K)K)K(K(K#KKKKKKQK�K�K�KK&K$K"KKKKKKKGKBKCKjKuKsKvKuKtKuKuKsKuKiKEKAKbKxKrKtKuKuKuKuKtKuKhKEK@K`KvKsKtKsKsK^KBKBKDKK:KKKMKFKAKCKMKPKSKOK?K>KK>KK-K,K+K*K&K&K$K#K%K)K.K4K8K=K@KDKGKGKqK}KqKqKoKsKtKvKxK~K�K�K�KwKsKuKwKK�K�KK~KK�K�K�K�K�K�K�K�KCK`KXKiKtKnKcKYKLKJKZKaKfKgK`KYKHKKKK
KK
+KK	KKK	KK
+K
+K
+KKK	K
+K
+K
+KKKK
KKK	K	K	KKKKKKKKKKKK	KK	KK
KK)K8KHKZKfKkKtK{K�K�K�K�K\K:KSe]r+(K)K*K*K#KKK KK KKaK�K�KPKK'K$K#K KK"KK"KjKVK>KHKpKtKtKvKtKsKuKuKsKuKbKAKBKhKuKsKtKvKuKuKuKuKvKeKDK?KeKvKsKsKtKtKZK@KCKCKK=K:K:K7K=KOKQKQKNKJK1K-K/K,K'K'K$K#K$K(K+K/K5KKCKCKKOKIKEKAKDKOKQKQKLK>K?K9K(K%K%K"K!K!KK-KZKCK%K4KOKUKXKYKXKXKYKUK7K)K%K*K.K0K4K3K5KRKXK6K8K?K9K4K4K0K,K,KIK[K;KOKqKUKKKFKAKK?K8K'K%K%K"K!K"KK1KZKAK&K6KPKUKWKWKXKWKXKTK7K)K%K*K.K0K4K6K7KRKXK5K8K>KKDKCKDKGKGKDKCKDKDKDKDKDKDKCKBKAK?K=K=K;K:K:K8K@KPKQKPKRKGK0K-K.K+K'K&K&K%K%K'K)K0K4K8K?KDKEK^K�KtKoKmKoKtKvKyK|K{K�K�KzKtKsKpKpKsKxK�K�K�K�K�K�K�K}K�K�K�K�KtK>KBK K(K8KAKHKIKGKSK[KcKfKgKSKAKKKKKKKKK	KKKKKK
+K
+KK
+KKK
+KK
KKK
KKK
+KKKKKKKK	K
+KKKKKKKKKKKKKK
KKKKK%K1K?KMKEe]r.(K0K+K(K$K K K!K"K#K,K@KCK5KK!K$K$K"K K"K"KVKxKeKCKAK]KyKuKuKuKuKuKuKuKtKrKOK?KPKvKvKuKuKuKuKuKuKuKuKRKK>K6K'K%K$K$K!K!KK4K\KAK'K:KSKUKVKXKXKWKVKRK8K(K%K)K/K1K4K6K9KQKWK2K7K@KK;K;KLKXKAK2K:KJKbKiKNK5KDKCK@K[KNKBKpK{KyKyK{KtKaKJKHKKKMKVKZKYKXKSKKKBK=K>KGKQKWKZKYKWKWKWKWKXKXKXKXKXKXKXKXKXKWKXKXKXKWKTKUKXKZKTKGK7K"KKPK�KwKxKxKzKzKzKzKzKzKzKxKxKxKxKxKxKxKxKxKxKzK{KzKzK{KyKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K�KbKPKSKPKPKTKBK/KWKXKYKRKHKKKTK^K�K�K�K�K�K�K�K�K�K�K}K�K�K�K�K�KKKKKKKKKKKKKKKKKKKKK�K�K�K�K�K�K�K�KK�K�K�K�K�K�KoKGKGKGKGKEK?KCKGKGKGKGKFKFKGKEKBKGKGKGKGKFKDKCKCKDKDKDKDKDKCKBKBKBK@K>K=KKUKxKuKuKuKuKuKuKuKvKsKNKKAKQKPKQKHK=K=K4K&K%K%K#KK KK5K[K>K'K;KTKVKWKXKWKVKWKQK8K)K&K*K/K1K3K6K9KSKVK4K7K>KK7K@K1K6K@KDK?KGKbKcKYKPK;K8KJKdKuK|K{KzKzK^KQK)K#K6KIKWKYKYKUKPKIK?K8K:KFKRKWKZKYKXKVKWKWKWKWKWKWKWKWKXKWKWKXKWKUKUKUKTKUKVKXKVKFKTKpKwKzKzK{KyKyKyKzKzKzKyKyKxKyKyKxKyKyKyKyKzKzKzKzKzKzKyKzKzKzKzKzKzKzKzKzK{K{KzKzKzKzKzK{K|K|K|K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKiKPKSKQKPKRKLK,KSKYKYKTKKKGKWKRK�K�K�K�K�K�K�K�K�K�K~K�K�K�K�K�KKKKKKKKKKKKKKKK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KSKEKHKIKIK@KBKGKFKGKGKFKFKFKGKGKFKGKFKFKFKEKEKDKDKDKDKDKDKCKBKAKBK@K>K=K;K8K9K8KEKQKPKQKSKCK.K-K,K*K(K'K&K&K%K(K*K0K6K9K=KAKTK�KyKrKoKnKqKwKzK}K}K~K�K�KxKsKnKoKqKnKsKyK~K�K�K�K�K�KyK�K�K�K�KKK
KKK(K4K6KCKOKYK[KaKdKQKLKKK
+KKK	K
+KK
+K
+KK
+K	K	K	K	K	KKK
+K	KK
+KKKK
KKKKKK	K	K	K	KKKKKKKKKKKKKKKKK
+KKKKK#K-K/KAe]r0(K,K,K(K KKKKK!K3K?K@K)KK"KK K#K KK(KkKzKXK>KBKlKvKuKuKuKuKuKuKuKwKjKCKK?K>KKRKHKCK;K6KOKPKRKHKK?K>KLKZK9KEKuKfKTK=K2K1K1KKGKTKYKXKVKUKWKWKUKUKUKWKXKXKXKXKWKUKVKVKVKUKUKTKQKWKVKSK[KbKnKvK{KzKyKxKzKzK{KzKxKzK{KxKyK{KzKzKzKzKzKzKzKzK{KzKzKzKzKzKzKzKzKzK}K}KzKzKzKzKzK}K~K~K~K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKtKRKSKRKOKPKRK0KHK[KYKWKNKEKUKRK�K�K�K�K�K�K�K�K�K�K�K�K�K�KKKKKKKKKKKKKKKKKK�K�KK�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�KcKDKIKHKDK;K?KGKFKFKFKGKFKGKIKHKFKGKFKFKFKGKFKCKCKCKCKDKDKCKAKCKAK>K?K>K;K2K4K:KK&K!K$K%KKKKK*KnKvKOK>KGKrKuKuKuKuKuKuKuKuKxKdK@KAKbKxKuKuKuKuKuKuKuKvKmKGK=KYKxKuKuKuKwKcKDK=KCKK=K;K:K8K8KIKQKQKQKQK>K.K-K+K*K)K&K&K%K&K&K-K2K6K=KAKLK}K{KtKpKpKnKmKxK}K~KK�K�KKxKtKpKpKnKnKtKwKxK|K�K�K�K|K�K�K�KhKSKKKKKKKKKKKKK/K5K/K
+KKK	K
+K
+K
+K
+K
+K
+K
+K
+K
+K
+K
+K
+K	K	K	KK	KKKKKKKKK
+K	K
+K
+KKKKKKKKKK	K	K	KKKKKKKKKKKKKKKKe]r2(KK#K%KKKKKKKKMKuKvKuKuKuKuKuKuKuKyK]K@KCKjKxKuKuKuKuKuKuKuKwKgKCK>K]KxKuKuKuKwK`K@K>KDK;KKRKPKRKCKKHKSKVKVKVKUKUKUKUKUKUKVKVKUKUKUKUKUKVKTKRKRKSKSKRKQKQKRKXKbKlKvKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzK}K}KzKzKzK|K~K}K}KzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K`KPKQKQKPKSKAK0KVKWKXKTKKKKKTK\K�K�K�K�K�K�K�K�K�K�K}K�K�K�K�K�K�KKKKKKKKKKKK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKUKGKHKGKHK?K=KGKFKFKGKGKFKFKFKGKGKFKFKFKGKGKGKGKGKEKCKDKDKDKCKBK@K>K?K?K=K=K;K9KAKQKQKPKQKIK0K-K+K+K)K&K%K&K%K%K+K0K4KKAK2KKKKK K#K!KKBKxKkKBK@KUKxKuKuKuKuKuKuKuKuKwKVK=KEKpKwKuKuKuKuKuKuKuKxKbK>K?KeKxKuKuKuKwK\K>K?KCKKHKUKWKWKWKUKUKVKUKUKVKUKVKVKVKVKTKRKSKSKSKRKSKSKRKQKPKQKVKbKmKuK|K}KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K}K}K}K|KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKiKTKQKOKQKQKKK,KPKWKXKVKLKHKVKQK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKKK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KdKFKJKHKHKDK;KEKGKGKGKGKFKHKHKFKGKGKFKGKGKGKFKFKGKEKCKDKDKDKDKCKBKAK?K?K=K:K:K9K:KLKRKRKRKOK8K.K-K-K+K(K(K'K%K(K+K+K3K9K=KAKpKKuKrKpKoKmKrKxK{K~KK�K�KK{KvKoKnKnKnKpKqKtKvKyK�KK�K�KkKaK[K9KKKKKKKKKKKKKKKKKK
+K
+KKKK	K	KKK	K	K	KKKKKKK	K	K	K	K	KK
+KKKKKKKKK	K
+KKKKKKKK
+K
+K
+K
+KK	KKKKKKK	KKKe]r4(K%KKKKKKKK)K@K@K,KK K"K!KK!K(K"KVK|KcKBK@K^KxKuKuKuKuKuKuKuKuKuKPKKAK?KJKSKTKDK@K9K/K:KRKjKtK|KzK4KHKWK$KK!K7KJKSKTKTKSKQKIK?K4K5K>KHKTKWKWKUKUKUKUKUKUKUKUKUKTKRKSKRKRKSKSKRKSKSKSKSKQKPKQKWK_KlKvK{K}K{KzKzK{KzKzKzKzKzKzKzKzK{K}K|KzK|K~K}K}K~K|K{K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K~K~K}K~KKrKSKQKPKQKPKQK.KEKXKWKUKNKHKUKPK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KuKJKIKIKHKFK=KAKGKGKGKGKFKHKHKFKGKFKGKFKFKGKGKFKGKEKCKDKDKDKDKDKBKBK?K?K>K:K:K9K9KEKQKRKRKQKDK/K.K.K+K)K)K'K%K%K'K+K2K5K:K>KoK�KvKsKqKpKmKrKyK{K~KK�K}KK|KzKrKnKnKnKpKpKpKqKuKyKzKsKfKaK^KUKGK KKKKK	KKKKKKKKKKKK
+K
+KKKKKKKKKKKKKKKKKKKKKK
+K	KK	K	KKKKKK	K
+KKK
+K	KKKK	K
+K	K
+KK
KK
KKKKK	K
+KK
+e]r5(K!KKKKKK!KK+KBK=K)KKKK#KKKK&K_K{K[K=K@KdKxKuKuKuKuKuKuKuKvKqKKK>KPKwKuKuKuKuKuKuKuKuKxKYKKJKwKyKxKxKxKxKxKzKnKYKOKDKyKzKpK]KGK2K.K3KIKVKBKRKsK]KEK-K/KAK]KIKKJKWK7KK
+KKK-K=KMKSKUKSKPK:K>K@K8K7K?KIKTKWKWKVKUKUKSKRKRKRKRKRKRKRKRKSKRKRKSKRKSKRKQKQKPKOKQKTK_KmKwK|K~K|KzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKK}K}K}KKK}K}K~KzKUKQKRKQKOKQK4K;KVKVKWKPKGKNKSKjK�K�K�K�K�K�K�K�K�K�K~K�K�K�K�K�K�K�K�KKKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KUKGKIKGKGK@K=KGKGKIKGKGKFKFKFKHKHKGKFKFKGKGKFKGKEKCKDKDKDKDKDKBKAK?K?K@KKTKyKvKwKuKvKwKvKuKuKvKSKKQKYK@KLKwKyKxKxKxKxKxKzKpK[KOKCKtKwKzK}K{KlKRK=KHKVKBKEKdKnKqKVK3K+K+K%K$KMKWKBKKK
+K
+KKK#K4KAKMKQKMKQKQKPKIK=K6K:KCKMKTKWKVKSKRKRKRKRKRKRKRKSKRKSKRKRKRKRKQKQKQKRKSKQKOKNKPKTK^KkKuK{K}K{K{K{KzKzK{K|K{K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKK~K~K~KKK~K~K~K�K]KPKSKQKPKTKBK0KUKXKYKRKHKJKVKYK�K�K�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KfKGKIKHKHKEK;KGKGKIKGKFKGKGKGKFK@KAKHKGKFKGKFKGKEKCKDKDKDKDKDKBKBK@K@K?K=K;K:K:K8KGKSKPKQKRK?K-K/K,K+K(K&K%K%K&K'K+K1K6K8KfK�KxKvKsKrKnKrKwKxK}K�K�K}K~K|K{KtKrKoKoKkKkKjKpKuKzKxKoKeK]KSKMKFK0K@K1K2KIKLKIKKKDK9K0K(KKKKKK	K
+K
+K
+K
+K
+KKKKKK
KKKKKKK	K	K	K	KKKKK	K	KKKKKKKKKKKKKKKKKKKKKKKKKKK	K
+K
Ke]r7(KKKKK%KKKK8KAK:K KKKK#K$K%K K&KkKvKNK>KIKrKvKuKuKuKuKwKyKvKwKhKCK?K\KyKxKyKuKvKyKwKuKuKuKNK>KMKuKuKuKuKyKnKIK;KDK?KKXKQK=K:K:K=K=K=K;K=K>KOKZKCKMKwKyKxKxKxKxKxKzKrK^KPKCKuKyKxKwKxKzK{KxK_KQKCK1K8KFKVK]KMK=KGKMKAKQKVKDK#KKKKKKK
KK%K>KIKLKRKTKQKNKMKEK?K8KKBKHKIKGKFKHKHKFKFKCKCKGKGKFKFKGKGKEKCKCKCKDKDKDKBKAKBKAK?K>K=K;K9K8K@KQKQKPKQKKK2K.K/K*K)K'K%K&K%K'K*K/K6K7KaK�KxKvKsKqKpKsKwKwK{KK�K|K}K~KzKtKpKoKqKoKnKkKkKoKvKvKlKbK[KQKIKBK1K?K&K:KJKPKRKKKDK@K@KBK9K,K6KKKKKKK
+K
+K
+K
+K
+K
+KK
K	KKKKK	KKKKKK
+K	KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r8(K#KK%K$KKKK#K>K=K4KKKKK#K%K%K!K.KrKtKFK=KOKwKxKwKwKwKwKxKxKuKxKaKAKAKcKzKvKwKwKxKxKxKwKwKsKIKKK.K6KQKFKBK>K4KJKPKRKJK:K;K6K)K%K#K"K KKK&KRKOK8K6K7K3KKKKK2KYKGK4K#K&K(K+K0K4K3K>KXKOK>K:K:K:K;K=K=K;K=KOKZKCKMKwKyKxKxKxKxKxKzKrK]KPKDKtKyKxKxKxKxKwKyKfKTKCK?KFK6K5K?KGKCKGK]KfK[KTKFK5K)KKK	KKKKKK0KQK?K,K?KPKUKTKQKQKNKEK;K5K6KBKMKSKUKTKRKRKRKRKSKRKRKRKQKQKRKQKQKQKPKPKPKQKPKOKPKOKLKMKPKZKhKsKzKK~K|K|K}K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKK~K}K~KKKKKKKKKKKKKKKK�KrKQKSKQKPKPKRK-KFKWKWKUKNKFKTKPK~K�K�K�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KXKFKIKIKKKDK>KHKGKGKFKHKHKFKGKIKHKHKGKFKGKGKGKFKEKFKEKDKDKDKCKCKBKAK?K>K>K=KK/KKKKK#K&K'K$K5KtKnKAK=KXKyKxKyKyKyKyKxKxKuKxK[K@KBKhKyKuKvKxKyKxKxKxKyKmKEK=KVKxKuKuKtKzKeKBK:KDK=KK,K9KOKFKCKK:K:K9K;K=KKOKZKCKLKwKyKxKxKxKxKxKzKrK\KQKDKtKyKxKxKxKxKxKzKhKUKBKOK}KnKXKBK4K1K8KAKOKWKUKDKFKJK6K-K%KKK	KKK'KXKEKKK#K6KGKQKSKRKPKOKIKAK7K.K:KEKLKSKUKSKRKRKRKRKRKQKQKSKRKPKPKQKPKPKQKOKMKMKMKNKNKMKLKNKQKYKfKrKzK�KK{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK�KK}K~K�KKKKKKKKKKKKKKK�K{KVKRKQKPKPKTK6K8KWKWKVKNKFKOKSKdK�K�K�K�K�K�K�K�K�K�K~K�K�K�K�K�K�K�K�K�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KhKGKIKIKIK@K>KGKJKGKFKHKHKFKGKIKIKIKGKFKGKGKGKGKGKGKFKDKDKDKDKDKBKAK?K?K?K>K6K9K9KCKRKSKTKTKDK.K.K.K+K)K)K&K%K%K%K,K1K1KXK�KyK{KwKrKpKsKwKwK{K�K�K|KzKzKzKtKpKoKqKrKpKpKqKrKrKqKhK`KVKJK@K:K?K=K4KKK[KUKNKJK;K3K4K;KBKAK9K:K'K
KKK	KK
+K
+K
+K
+K
+K
+K
+KKKK
+K
+K
+KKKKKKKKK
+KKKKKKK
+KK	K	K	KKKKKKKKKKKKKKKK	KK	KKe]r:(KCK=KKKKKK-KCK>K*KK!KKK"K$K%K#KK^KyKuKvKuKvKyKwKuKvKvKRK=KCKmKwKwKxKxKxKxKxKxKyKjKAK=K[K{KwKxKwKyKcK?K:KCK=KK-K>KNKEKCKKWKNKKQKVKCKMKlK`KMK5K*K'K KK	K"KSKJK*KK
+KKK+KK/KUKVKUKRKFKHKUKUK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~K~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KyKKKIKIKGKEKCKEKKKGKFKGKGKHKHKIKGKGKGKFKFKGKGKFKFKGKFKCKCKDKDKDKCKCKAK@K?K=K1K7K;K=KPKQKRKTKNK6K-K-K*K*K'K(K(K&K%K(K/K/KUK�K{KzKyKtKsKsKtKvK{K�K�K�K~KxKvKrKqKpKoKrKrKmKlKpKtKqKfK`KUKIK9K9KDK;KGKYKTKNKTKAK:K=K>K@K?KKcKzKxKxKxKxK]K>K=KCK=K=KK*KAKNKEKCK;K5KKKMKOKEK;K:K3K'K%K!K!K KKK/KUKHK:K8K7K0KKKKK8KYKDK/K#K&K(K/K1K4K2K?KXKMK;K9K:K=KKOKxKxKxKxKxKxKxKxKxKyK\K>K@KjK{KxKxKxKwKYK>K>KCK>K?K=KK7K4K-K"KK
KKKKKK	K	K	KKKKKKKKKK	KKKKKKKKKKKKKKKKK
+KKK
KKKKKKKKKKKKKKe]r=(KHKEKBKEK?K&K!K:KBK8K!KKKKKK#K%K!KSK{KJK>KHKtKyKxKxKxKxKxKxKxKzKkKAK>KVKyKxKxKxKxKxKxKxKxKyKWK=KCKnKyKxKxKyKvKSK=K?KAK=K>K=K:K)KHKKKDKBK6K:KLKMKNKBK(KIKIKCK@KCK9K,K:KAK4K KKKKKK K"K"KZKvKDK=KRKyKxKxKxKxKxKxKxKxKzKeK@K>K\K{KxKxKxKxKxKxKxKyKzKQK>KGKqKxKxKxKyKrKOK;KAK>K;KKSKDK8K8K8K1K(K)K.K/KJKSK?K7K6K7K6K6K5K5K.K;K[KKK:K9K:K9K9K:K9K:K=KPKWKCKSKyKyKxKyKzKzKzK|KqK[KQKKKvK{KzKzKzKzKzK}KkKUKMKOK{KzKzKzKzKyK~KTKK/KYKJK*KKKK,K9K=K>KBKHKWKUKNK;KEK?K2K-K$KKK
+KK.KGKMKQK2KK)K+K;KJKNKPKPKPKMKEK=K3K/K:KFKNKPKOKQKPKQKNKMKMKMKMKMKNKMKMKMKNKSKSKSKPKPKPKQKQKRKQKPKRKVK`KoK{K~KK~KK�KK�KK�K�KKKKKKKKKKKKKKKKKKKKKKKKKKKK�KKZKOKQKPKPKTK?K0KTKUKVKQKIKJKUKUK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KcKOKQKPKSKHK=KIKHKIKHKFKGKIKIKIKGKGKFKFKFKFKFKGKGKGKGKGKEKCKDKDKDKCKBK@K>K=KK=K>K,KKKKKKK K$K#K`KnK?KKKMKLKLK>K:K8K1K*K)K)K'K&K&K"KK4K4K?KLKQKOKNKLK9K3K9K5K5K;KGKNKQKQKNKMKMKNKPKOKMKOKOKNKQKRKSKSKNKMKPKQKPKQKSKSKNKOKNKOKWKaKqKzKK�KK~KK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKK�KeKPKQKQKPKOKGK,KQKVKUKRKLKGKUKOK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqKPKQKPKQKOKK=KK?KAK=K%KKKKKKKK!K&KgKcK;K=K_K|KxKxKxKxKxKxKxKxK{KZK>KAKjKzKxKxKxKxKxKxKxKzKsKGK;KOKzKxKxKxKzKoKLK8K@K=K=KKSKYKEKSKyKyKxKyKzKzKzK|KsKYKTKKKuK{KzKzKzKzKzK|KlKTKMKMKwK{KzKzKzKyK�KLK�K*KYKLK/KKKKKKKKK K1KLKRK=K?KWKcKaKFK1K4K3K/K)K(KDKVKEKCKeKgKjKnK\KCK7K6KAKJKOKIKIKLKIKAK;K5K6KKHKIKIKIKIKGKGKGKGKGKHKEKCKDKDKCKBKAK>KK[K{KyKxKxKzKhKCK8KCK=K=KK/K2KMKGKEK?K2KDKMKKKIK;K9K6K#KKKKKKK	K?KUKK>KMK[K\KQKQKLK:K8KJK[KgKtKrKuK{KzKpKXKIKEK=K@KMKPKNKKKKKHKAK9K4K5K;KIKNKSKWKUKTKVKUKSKTKTKSKSKRKRKSKSKSKRKSKRKRKSKSKSKRKQKQKPKOKTKaKlKzK�K�K�KK~KKKKKKKKKKKKKKKKKKKKKK�KKYKOKNKMKMKOK>K;KWKUKUKPKGKIKUKTK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KrKOKQKQKPKMKAKDKJKIKHKIKIKIKIKIKIKHKIKHKHKHKIKHKGKGKGKGKGKGKFKDKDKDKCKBK@K=KKAK3K K K!K!K!K!KK7KtKFKKK=KEKVKYKQKMK=K5K-K1K?KVKkKvKzKvK|K|KRKNK,KK*K0KCKLKOKLKJKHKEK?K8K-K2KAKQKXKWKVKVKUKRKSKRKRKOKQKSKRKRKRKRKRKRKRKRKRKQKPKQKQKQKPKNKMKUK`KmKxKK�K�KKKKKKKKKKKKKKKKKKKKK�K�K_KMKNKMKNKMKNKSKTKVKVKRKHKEKTKPK�K�K�K�K�K�K�K�K�K�K�K}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KXKPKQKPKDKAKCKLKKKHKIKIKIKIKIKHKIKHKHKHKHKIKHKGKGKGKGKGKGKGKGKFKCKDK@K?K>K;K5K9K;KEKTKRKRKTKIK2K/K+K+K)K&K&K$K#K.KK�K�K�K�K�KKzKzK{K~KK�K~KuKrKwKvKuKqKqKnKjKjKkKlKfK\KSKCKDKXKLKNK\K`KdKeKeKcKaK`K_K`K_K\K\K[KZKVKTKTKVKYKYKSKOKKKTKOK@K?K@KDKBK>K;K:K8K5K6K8K3K-K&K"KKKKKKK1K2K)K"KKKKKKKKKKKK!K%K'K*K,K1K>KGKAK`K�e]rD(KHKIKHKGKGKHKHKHKDKAK@K7K!KK K K KK7KkKBKKhK|KxKxKxKyK_K?KK4K5K:K;KOKSKRKTKSK9K-K,K*K)K&K&K#K K,KK�K�K�K�K�K�K~K|K{K|K}KK|KrKmKqKuKvKuKuKsKoKnKlKiKeKZKOKAKIKSKIKXKaKdKdKeKeKcKaKaK`K`K_K^K\KZKYKWKWKXK[K[KXKQKOKRKVKLKDKFKEKHKGKDKAK>K>K>K?KAK7K3K.K.K-K,K+K,K'K#K,K?KK@KDKKKSK{K�K�e]rE(KIKHKFKFKFKFKGKIKIKDK@K@K6K KKKK!K)KK;K]K|KyK{KzKzK{KzKxKwK|KXKK;K@K;K;K=KKIKNKMK?KMKUKRKCK5K*K&K,K9KKKiKYKOK?KTK�K�KzKgKNK:K3K;KFKMKIKEKJKHKCK=K6K3K;KDKNKWKXKVKRKRKRKRKSKSKRKRKSKRKRKRKSKRKPKPKPKQKPKNKRKQKQKPKMKMKSK]KjKwKK�K�K�KKKK�KKKK�K�K�K�K�K�K�K�KvKRKQKOKMKMKNKRKUKUKTKRKOKGKKKRKPKQK[KbKuK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsKRKQKOKRKOKAKFKKKHKIKHKIKHKHKIKHKIKIKIKIKIKHKFKFKGKGKGKGKGKGKFKCKBKAK@K=K=K:K:K:K8KGKSKRKSKUKFK.K,K)K)K&K&K%K"K(K|K�K�K�K�K�K�K�K�K~K}K|K}K{KrKkKiKnKqKtKvKvKuKpKlKiKcKZKNK@KPKLKQK_KdKeKdKeKeKcKaKbKaK_K`K`K\KYKWKXKZK[K]K[KWKRKTKZKUKMKLKKKKKKKIKHKCKBKCKFKFKCK?KKAK:K#KK!K)K:KEK=KhK}KwKxKyKzKxKxKxKyKzKWK=K?KiK{KyKzKzKzKzKzKxKxKxKJK7KHKxK{KyKxKzKvKUK;K>K@KK:K:KKEKJKHKJKAK;K=K5K#K!KKKKKK!KOKJK9K8K8K1KKKKK"KTKLK9K%K#K&K*K.K/K0K0KHKVKGK8K7K8K7K7K7K8K:K>KSKWKDKSKzK{KzKzKzKzKzK}KuKWKQKHKsK{KzKzK|K~K|KuKbKUKRKIK>K8K8K8K?KNKjKSKKKRKQKEKKKKK
KK-K5K$KK:KTKMK@K KK)K-K-K1K5K4K3KKIKMKPKJKBKDKIKKKJKGKGKGKHKNKOKbK�K�K�K�K�K�K�K�K�e]rH(KGKHKIKIKIKHKEKFKEKDKDKGKDK@KAK7K"K K-K8K>K;KJKrK|K~K~K|KzKyKyKyKzKPK=KEKqK|KzKzKzKzKzKzKzK{KuKFK9KJKxK{KzKzK}KtKOK8K?K?KKFKHKHKJK?K9K:K3K%K#K KKKKK$KQKJK8K7K8K1KKKKK$KVKLK8K&K"K%K*K.K/K1K2KJKWKDK:K6K8K6K9K7K8K:K;KRKWKDKVKzK{KzK{K}K|KzK}KtKWKRKIKsK~K}K|KrKbKTKDKAKSKSKIK@K9K;K=K=K:KKBKNKPK=KBKOKJKIKGKBK@K;K@KKKQKQKNKJK8K/K7KIKcKxK�K~KK}K�KfKIKEKK#K.K@KGKOKLKJKJKJKAK7K1K3K=KKKPKSKPKQKQKRKSKRKRKRKRKRKQKPKPKPKPKQKQKRKRKQKPKQKPKQKQKPKMKOKPKYKiKsKK�K�K�K�K�K�K�K�K�K�KiKPKQKQKOKLKOKMKSKSKSKPKLKEKNKQKRKNKXKIKVKOKOKPKOKOKMKMKOK^KxK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KuKQKQKPKPKRKCKEKIKIKIKHKIKIKIKHKIKIKHKIKIKHKGKGKIKGKFKGKGKFKEKCKCKDKCK@K?K>K=K;K:K8KDKSKSKUKTKNK4K,K)K&K&K"K!K$K{K�K�K�K�K�K�K�K�K�K�K�K�K}KuKkKiKjKhKfKiKqKwKwKqKjKdKZK?KBKOK[KdKhKiKfKgKeKdKdKdKfKgKeKaKaK`K\K^KcKaK]KZKYKXKXK\K]K\KXKTKRKOKNKSKPKPKSKTKNKFKGKKKLKKKIKIKIKHKFKEKBKEKNKNKRKRKJKHKKKOKOKKKKKJKKKSKPK�K�K�K�K�K�K�K�K�K�e]rI(KGKGKIKHKIKHKEKGKFKCKAKCKGKDK?K?K9K%K-K8K;KKKEKHKHKJK@K9K:K2K&K$K!KKKKK%KRKEK5K6K8K2KKKKK&KUKKK8K&K"K%K*K.K.K2K3KKKWKCK:K5K8K6K9K7K7K:K;KRKWKDKWK{K{KzK{K}K|KzK|KuKXKRKIKpKoK^KMK?K;K:K=KCKSKSKJK>K7K=KKK-K)K'K&K"KK(KK�K�K�K�K�K�K�K�K�K�K�K�K�KzKqKhKjKiKfKfKlKrKwKrKjKdKWK;KDKSKaKhKlKmKjKgKdKdKeKeKfKgKfKbKbKcK`K_KbKaK\KZK[K[K\K`K]K[KXKSKRKRKSKTKRKSKVKSKIKHKKKNKLKLKMKMKMKKKIKHKFKIKQKRKSKSKRKLKKKOKTKPKOKQKPKTK]K�K�K�K�K�K�K�K�K�K�e]rJ(KGKGKGKGKGKGKGKGKEKCKDKCKFKGKDK>KK:KXK}KzK{KzK{KmKHK7K@K=KKRKnKmKKKKK@KsK�K~KmKQK;K2K7KBKMKLKHKGK8K;KAK8K3K5K=KJKRKRKQKSKRKSKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKRKNKMKPKRKQKOKNKOKWKdKrK}K�K�K�K�K{KOKNKMKNKMKLKPKSK[KWKQKNKIKIKMKQKUKQKJKQKTKMKNKNKNKNKPKPKNKOKOKOKMKKKQK^KwK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�K�KcKLKQKPKQKNKAKIKIKHKHKHKHKHKIKHK?KEKIKHKIKIKIKIKIKIKGKGKGKGKGKEKCKDKCK?K=K;K9K:K7K6KFKUKRKSKTKIK1K-K(KK K0KIK�K�K�K�K�K�K�K�K�K�K�K�KKK|KvKkKiKjKfKbKgKmKsKuKlKaKRK9KIK\KeKjKoKqKqKeKaKcKfKfKfKfKdKdKdKeKcKaKbKaK\K[K[K[K^K`K\KXKVKPKQKUKUKTKWKWKWKPKJKNKPKOKMKNKOKQKOKJKJKJKJKNKUKTKUKVKSKOKOKQKWKXKUKVKWKRK�K�K�K�K�K�K�K�K�K�K�e]rK(KGKGKGKGKGKGKGKGKFKCKDKEKGKFKHKCK?K?KK=K;KJKsKyK}K~K|KzKzKyKyK}KeK;KKKKEKCK?KBKJKJKHKIK>K:K:K1K%K$K!KKKKK-KPKCK7K6K6K/KKKKK0KYKGK7K$K#K%K*K.K/K1K2KLKWKBK7K4K5K7K7K8K6K0K7KUKWKCKVK{K{K{K}K}K}KvKhKWKWKRKDK=K=K?K@K=KK>K:K:K:K6K;KSKSKRKSKSK8K0K/K)K3KKKSK�K�K�K�K�K�K�K�K�K�K�K�KKK|KyKnKiKgKgKcKaKhKqKvKkKaKNK:KRKcKjKlKmKpKqKfKbKeKgKhKhKgKfKeKeKfKfKbKaK`K^K_K_K_K`K`K\KXKUKTKVKWKVKTKXKZKUKNKMKPKRKSKOKQKSKRKOKNKNKMKNKTKYKUKVKWKVKRKSKSKVKXKTKVKeKyK�K�K�K�K�K�K�K�K�K�K�e]rL(KGKGKGKGKGKGKGKGKFKBKFKGKGKGKFKGKDK?K?K=KKdK~KzK{KzK|KbK>K8KAKKTKWKCKWK�K~KzKqKcKRKEK;K@KXKSKBK=K=KK?K@KRKTKJK-K+K8K9K8K:K9K0KK	KHKQKMK$KKKKKK#K*K
+KKKPKNKEK-K#K%K)K(K)K/K2K6K8KKK?KAK8K5K6K5K6K7K7K7K7K8K8K7K:KKKKK>KKVKTKUKVKQK5K0K7KDKHKCK}K�K�K�K�K�K�K�K�K�K�K�K�KKzKvKtKlKkKfK`KcKcKfKqKpKaK=KBKaKeKjKkKkKpKpKhKdKfKjKkKjKhKgKfKfKfKfKdK`K_K`KaK`K_KbK`K[KYKYKYKWKXKYK[K^KWKRKQKTKUKTKRKRKSKSKQKPKPKQKTKWK[KWKXKYKVKUK[KYKeK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]rN(KGKGKGKGKGKGKGKGKDKCKCKGKGKGKIKIKIKIKDK6K8K?K3K0K9K;K:K9K8K7K7K7K6K8K:K6K7K9K8K7K7K6K4K2K4K9K;K:KK=K4K*K#KK"K/KPKOKJK:K5K3K%K(K)K-K6KKKEKVKOK*KKKKKKKKKKKLKQKKK9K/K2K3K9K=K=K?K>KKHKOKNKHKIKPKPKRKVKZK\K_KbKeKiKUKNKCKYK�KtKhK]KUKMKAK;K?KIKLKLKHK;K3K(K*K;KQKiK|K�K�K~K�K}KMKNK2KK&K)K8KEKJKNKIKEKFKBK;K2K1K7K@KKKQKTKQKPKPKPKQKQKPKQKNKNKKKJKPKRKQKQKPKMKNKLKPKQKQKDKK+K:KKKDKAK:K2KFKHKIKEK9K;K9K-K%K"KKKKKK9KRK>K6K4K6K)KKKKK9KWKDK2K#K$K&K)K,K.K.K2KQKTK?K7K4K5K5K5K5K4K5K:KRKSKCK;K5K5K;K:KKEK;K8K/K*K.K>KVKnKK�K�KSKKK;KQKyKWK=K-K/K;KFKJKJKGKGKFK?K8K/K+K2KAKLKRKRKOKMKMKOKMKMKNKPKOKPKQKQKOKNKMKNKKKKKOKPKQKNKRKFK>KPKOKMKMKPKEK*KPKRKRKMKHKFKNKQKRKNKPKFKRKOKQKPKPKPKPKNKNKMKNKMKNKNKNKNKNKNKNKNKNKNKNKMKPKQKOKPKNKJKKKTKmK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K[KTKTKSKRKKKCKLKKKLKKKKKLKJKHKJKKKKKKKKKJKHKIKIKHKHKHKHKGKFKFKDKCKBKAK?KK:K6K8K4K.K1K0K1K.K:KK(K>KGKCKAK:K0KFKGKIKEK:KKRK>K6K4K5K(KKKKK;KTKCK2K#K$K&K)K+K-K.K2KQKSK?K7K4K4K4K4K5K3K4K:KRKSKCKKFKJKHKDKGK>K/K4K/K1K9KCKLKNKPKQKMKKKPKRKOKKKNKQKNKMKNKNKKKJKNKPKQKPKQKLKKK?KCKEKFK=K8K6K:K7K/K+K5KGKKKFKFKK�K~K�K�K�KqKXK=K-K2K=KFKIKBK=KDKFKAK:K3K2K6KBKJKPKPKPKNKIKFKNKQKNKNKPKPKNKKKNKNKMKAKIKQK>KLKOKNKMKMKSK-K@KWKQKOKLKBKFKOKSKUKWKKKQKSKQKQKQKQKQKQKQKQKNKMKNKNKNKNKMKMKMKMKMKNKNKMKNKNKMKNKNKNKNKNKNKOKNKKKJKHKRKkK�K�K�K�K�K�K�K�K�K�K�K�K�KhKZKOKPKHKIKPKMKNKMKKKLKKKKKLKLKKKHKJKLKJKHKIKIKIKGKGKGKGKDKDKBK?K=K7K7K9KBKMKOKOKSKUKUKUKTKCK.K.K+K"KnK�K�K�K�K�K�K�K�K�K�K�K�K�K~KzKvKpKhKeKeKcK^K`KgKmK[KCKTK^KaKbKfKiKoKmKkKlKkKkKkKkKkKlKkKjKhKcKcKgKgKfKgKeKaK]K\KbKaK`K`KbKbK]K[KXKWKYK[KYKVKWKWKVKUKTKTKXKZK`K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]rR(KDKDKAKDKEKFKFKGKFKCKCKDKCKEKGKGKGKGKGKGKGKGKCK?K=K:K8KAKEKFKFK=K7K;K/KKKKK#K*K/K4K8K8K8K:KKGKGKIKAK;K>K6K'K$K!K KKKKK?KOK=K6K4K4K$KKKKK?KSKAK0K"K$K%K*K+K-K-K1KPKRKAK5K2K3K4K4K4K3K3K;KTKRKCK;K4K0K1K&KKK%K.KAKUKPKEKLKEK9K,K KKKK,KQKQKIKQKHK/K*K;KKKJKEK,KKKK=KBKNKSKRKUKNKK�K;K�K
K
+KK	KKK5KVKNK;KKKKKKK)K$K K$K0KLKNKMKAK;K?KAKFKPKLKOKVKYK[KXKPKNKGKmK�K�K�KKKKKK�K�KVKLKEKZK�K�K~KsKjKcKVKIKDKAKFKIKKKIK?K-K+K*K+K;KSKpK�K�K�KK~K�K�KQKIK6KK"K2KAKIKKKFKDKEKBK=K8K2K1K8KDKJKLKOKPKMKLKLKMKOKPKFKMKLK>KNKMKMKMKMKIK%KHKSKQKMKIKBKHKMKLK(K+KJKLKJKJKHKGKIKNKOKOKOKOKNKMKNKNKMKNKNKNKNKNKNKNKMKLKNKMKMKLKKKLKLKLKLKLKLKLKMKNKMKLKNKOKLKNKLKIKMK^K~K�K�K�K�K�K�K�K�K�K�K�K�K�KhKSKKKKKJKFKMKNKLKLKJKHKJKJKJKHKEKCKAK?KAKIKRKSKUKTKFK)KKKK5KTKSKRKUKJK/K/K&K\K�K�K�K�K�K�K�K�K�K�K�K�K�K�KxKsKtKoKfKeKiKaK`K^KcKTK>KMKTKVKYKaKnKpKmKlKkKkKmKoKlKkKlKkKiKiKiKiKiKiKiKeKdKaK\KaKbKeKcKdKdKaK^K\K[K\K[KZKYK[KZKYKWKYKYK[K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]rU(K4K@KBKAKBKBKBKDKDKCKDKDKGKGKGKGKGKGKFKFKGKGKGKGKGKDK>K=K=KIKAK4K=KK7K5K:KK4K,KIKDKAK?K5KK8KK4K2K3K3K3K1K.K2K:KSKSKBKBKEK>K6K-K$KKKK)KUKQKGKfK}K�K�KK�KHK*KdKaKPKHK`K�KzK-K,KpK�KjKK@KKKNKLKBK=KAKCKIKNKMKMKSKUKTKSKNKNKFKcK�K�K�KKKKKKK�K]KLKGKSK�K�K�K�K�KzKsKeKTKKKGKDKIKKKFK'K*K2K3K+K(K-K=K[KtK�K�K�K�KUKGKK\K9K*K)K6KDKGKEKDKEKFKBK>K5K-K2K9KCKOKPKNKJKLKOKLK:KHKOK=KKKNKMKNKKKHK*K>KSKRKMKIKCKFKOKNK3K'K?KOKLKKKPKOKJKCKDKJKLKMKNKNKMKNKNKMKNKNKNKNKNKNKLKKKMKMKKKKKKKKKKKKKKKKKKKKKMKNKNKNKNKMKKKMKNKNKNKKKHKKKTKlK�K�K�K�K�K�K�K�K�K�K�K�K�K{K^KMKIKLKQKRKOKKKNKOKOKMKGKBKAKGKPKTKSKVKMK3KKKKKK$KQKUKRKSKTK;K/K&KWK�K�K�K�K�K�K�K�K�K�K�K�K�K�KsKoKuKpKcK_KeKgKbK^K`KOK?KJKQKTKYKcKnKnKnKlKlKkKmKoKlKkKlKkKiKiKiKiKiKiKiKcKcKeKbKdKdKdKeKfKdK`K_K]K]K]K\KZK[KZK[KZKWKXKZKtK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]rV(K(K6KAK@K?KAKBKBKDKDKDKDKDKEKGKGKGKGKGKGKGKGKFKDKEKDKAK?K@KSKvKHK9K:K(K%KKKKK!K'K-K3K6K7K:K:K6K5K-K'KKK
+KK7K=KKHKFKFKKK0K-K4K9K9K7K2K,K,KRKRKEK KKKKAKfKwK�K�KyKWKRKIKrK�K}K~K|K�KGK"K^KcKMKIK_K�K}K:K/KjK�K�K^KK7KTKMKGK#K7K�K KKKK
+KKK&KSKMKDKKKKK K KKKK
KKBKPKKKHKFKEKNKSKSKQKTKRKSKUKSKNKNKGKUK}K�K�K�K�K�K�K�KK�KkKKKIKIK{K�K�K�K�K�K�K�K�K�KKwK[KHKHK-K%K1K;K@KDKDKBK;K0K)K*K5KLKTKHKCKAKxK�K�K�K�KtKYK>K1K0K:KGKKKGK;KCKEKBKK@KDKDKDKDKDKDKDKEKGKGKGKGKGKGKGKGKEKDKDKDKAK?K>K=K=K2KNK�K�KK�K|KtKiKHKK!K5KK;K9K;K9K;KK*K3KJKDK@KK0K#K"KKKKKK$KMKGK9K1K2K0K'K"K"K"K+KMKNKAK2K*K+K,K.K.K0K/K6KQKMK>K5K1K2K/K&KKKK KQKRKDK>K`KAK0KhK�K~K|K}KtKWKRKHKsK�KKK}K�KIK"K^KeKOKKK_K�K~K?K.KfK�K�K`KK2KTKPKIK+KKUK;K�KK	KK
+KK!KSKMKFKKKKKKKKKKKKAKPKNKAK,K.K2KBKKKMKSK[K_K^K]KSKNKHKOKuK�K�K�K�K�KK�K�K�KrKKKKKIKvK�K�K�K�K�K�K�K�K�K�K�KnKIKKK2K$K0K;K@KCKDKHKGKCKAK;K3K)K3KGKGK@KnK�K�K�K�K�K�K�KrKUK>K1K5K@KFKJKGKCKCKDKCK:K3K.K,K0K:KGKGK>KNKNKMKKKKKLK*KIKPKRKMKHKBKGKNKMK;K/K]K�K�K�K�K�K�KhKPKSKRKOKMKHKHKFKEKJKMKOKNKNKNKMKNKNKMKMKMKNKLKKKLKLKLKKKKKMKNKNKMKKKLKLKLKLKKKKKLKKKLKKKKKMKNKNKOKOKNKJKHKNKaK�K�K�K�K�K�K�K�K�K�K�K�K�K�KyKdKXKRKTKLK3K K#K*K,K-K,K&K'K&K&K&K,KNKTKRKSKSKBK+KEK�K�K�K�K�K�K�K�K�K�K�K�K�K~KoKiKjKlKlKjK`KWKVK]K^KLK4K?KCKLK[KnKoKnKoKoKoKnKnKnKoKnKlKkKkKkKlKjKiKkKkKjKhKfKgKdKdKdKhKeKdKbKaKcK`K`K^K\K]K\K[K[K[KXK[K]KgK�K|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]rY(KKK(K+K4K?K?KAKCKDKDKDKDKDKDKDKFKFKGKGKGKEKFKGKEKCKDKEKEKAK=K?K@K-KWK�KzKzK{K|K~K�K]KK@K?K9K@KAK0K"KKKKK*K9K;K:KK(KKKKKKKK%KRKRKCKUK�KGK-KbK�K}K~KKvKWKRKHKsK�KKK}K�KMK%K^KeKPKKK^K�K�KDK/KfK�K�KhKK3KTKPKJK5K%KKK-KK	KKKKK&KRKNKGK"KKKKKKKKK+K/KDKOKLKHK;K5K0K0K0K6K?KNK[KcKdKVKLKIKMKfKK�K�K�K�K�K�K�K�KwKNKLKIKoK�K�K�K�K�K�K�K�K�K�K�KwKKKKK7K%K1K:K?KCKDKFKEKFKFKFKDK@K;KFKIKAK;KUKrK�K�K�K�K�K�K�K�KpKRKBKKBKFKIKFKDKAKAK@K:K4K-K*K$K5KPKMKMKLKKKPK-K=KQKRKQKJKCKEKMKOK@K:K�K�K�K�K�K�K�K�KCKFKNKJKLKPKPKDKKDKEKFKHK?K7K2K9KLK\KTKLKIKJKcKwK�K�K�K�K�K�K�K�K|KSKLKIKjK�K�K�K�K�K�K�K�K�K�K�K{KNKKK>K$K.K7K>KBKEKGKGKGKFKFKDKDKBKGKJKDK/K$K-K>K[KuK�K�K�K�K�K�K�KqKEKDK0KK+K=KIKGKDKDKEKEK>K7KK&KRKMKNKNKLKPK4K0KRKRKQKLKDKBKNKSK@KeK�K�K�K�K�K�K�K�KUK*K,KiKeKIKNKGKBKLKIKEKDKDKDKJKNKNKNKNKMKMKNKNKLKKKKKMKNKNKNKKKKKLKLKLKKKKKKKKKKKLKKKKKKKKKKKKKKKKKKKKKMKNKNKOKOKNKKKHKNK`K�K�K�K�K�K�K�K�K�K�K�K�K�K�KtKTK=K6K7K7K8K8K5K5K1K1K8KPKTKRKSKUK;K3K~K�K�K�K�K�K�K�K�K�K�K�KK}KzKmKfKdKdKfKgK\KVKRKRKMK2K3K@KLKaKkKoKoKnKkKlKoKnKoKnKoKoKoKoKmKkKlKlKlKkKiKiKjKgKfKgKfKdKdKeKbKaKbKaK_K`K_K`K^K\K\K]K[KZK[K[KoK~KeK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r[(K$KKKK(K'K5KBKBKDKBKCKDKDKDKDKDKDKEKEKEKHKGKGKEKCKFKGKEKFKFK@K6K>KMKsK�KK}K}K}K�KBKKGK;K:KYK�K~K�K�K�KlK5K)K9KK9K1KEKFKGKCK;K=K;KKKKKKKKK.KQKCK2KKKKKKKK"KNKJKK+K&K%K"K"KKKK&KPKRKDKTK�KAK$KcK�KKK�KxKXKRKHKsK�KKK~K�KXK-KXKeKOKKKWK~K�KPK+K[K�KK�KWK1KPKQKLK(KjKYKK!K!KKK&K'K-KLKNKKK4K'K)K&K(K&K&K'K'K#KK/KNKNKHK$KKKK#K,K9KEK?KPKNKHKKKKKFK[KeKtK�K�K�K�K�K�K�K�K]KJKIK\K�K�K�K�K�K�K�K�K�K�K�K�K\KHKFK(K*K3KK6K1KEKFKGK@K:K=K8K'K!KKKKKKK1KPKCK3K KK KKKKK'KQKIK;K$K K"K%K&K&K)K&K6KTKIKK:KHKpK�K~KuK,K2KHK8K;KnK�KKKK�KXK-K+K;K;K=K;K:K5K'KEKGKAK?K6K6KFKFKFK@K9K=K8K'K!KKKKKKK2KOKCK1KKKKKKKK(KRKHK:K#K K K"K%K&K$K"K6KTKHK=K%KKKKKKKK'KRKRKDKWK�KBK$KcK�KKK�KxKTKPKJKrK�KKK~K�KYK*KXKiKQKMKUKK�KXK*KWK�KKKKyKSKOKKK.KKK K"K%K'K&K&K&K+KJKNKJK7K'K)K+K+K*K'K K$K+K+K3KLKMKKK2K%K%K%K'K$K#KKKKK2KLKKKFKLKPK`KNK(KBK_KzK�K�K�KiKJKKKOK}K�K�K�K�K�K�K�K�K�K�K�KkKEKJK1K$K2K;K>KDKGKEKFKGKFKFKFKDKBKFKJK?KKK K"K%K1KK4K6KFKGKBKXKyK�K�K�K�K�K�K�K�KuK|KwKGKFKGKEKCKGK8K'KPKQKOKJKFKBKNKLKxK�K�K�K�K�K�K�K�K�K|K�K�K�K�K�K�K�KnKHK-K�K�K�KqKaKPKJKKKLKLKLKLKHKDKDKEKGKJKNKLKLKKKKKLKLKKKKKKKKKKKKKLKLKKKKKLKKKLKLKKKKKKKLKJKHKHKHKKKLKLKJKHKIKIKKKLKKKLKLKLKMKMKLKKKHKNKZKuK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KjKEKKBK8KDKxK�KKK~K}KNK,K1K:K:KK2K8KEKCKDK>K:K>K5K!KKKKKKKKK8KIK~K�KKK�K|KKK-K2K:K9K9K;K?K2K-KEKDKBK>K2K8KDKCKDKK/K K"KKKKKK/KTKIK9KKKKK!K K!KK7KUKHK;K!K K!K K K K!K#K0KSKNKAKXK�KIK-KeK�KKK�KxKSKRKJKrK�KKK~K�KbK*KTKkKOKLKQKzK�KcK+KNK�K~K�KTK'KMKRKMK5K K&K&K$K#K"K"K$K(K*KEKOKKK>K!KKK+K0K2K.K/K/K.K2KIKNKJKKEKDKDKDKBKDKDKCKDKCKCKHKCKFKrK�K�KuKgKZKPKGK>K;K;K=KAKGKGKFK7K(K#K&K3KJKgK~K�K�K�K�K�K\KCKGKFKGKDKFK&K@KPKOKMKIKCKIKQKQK�K�K�K�K�K�KLK�K�K�KbK�K�K�K[K�K�K�KUKK�K�K�K�K�K�K�KTK>K3KDKGKJKLKKKMKKK9K>KEKCKEKJKJKKKLKKKKKLKLKKKKKLKKKKKLKKKKKKKLKKKKKKKLKLKLKLKLKKKHKHKIKHKIKHKHKHKJKLKLKLKLKLKKKKKLKLKLKOKMKGKGKNK_K|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsKsKyKmK`KeK_K]KVKPKOK2KK?KZKbKfKjKlKnKoKqKqKqKpKnKoKnKlKkKlKkKkKkKkKnKnKiKhKjKgKfKgKeKgKhKcKbKfKdKhKcKdKfKaK`K_K_K`K^KXK_K^K\KbK[KWK^K\KYKbKcKnK{KKxK|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~e]rb(K)K*K+K*K(K%KKK K)K1K5K4K:K>KAKCKBKBKBKBKCKCKEKEKEKFKGKDKDKFKEKEKDKDKDKCKDKCK>K;K7K,KKKKBKDKCKFK1K3KBKCKCKCKCKIKIKDKzK�K�K�K�K�KrKcKYKNKGKAK=KDKFKFK7K;K3K.K'K"K+K8KQKmK�K�K�KiKDKGKFKGKCKGK-K4KQKOKMKJKDKCKPKLK�K�K�K�K�KWKAK�K�K�KhK�K�KWK5KUK\KsKXKvK�K�K�K�K�K�K�KcKDK%K"KxK�KVKLKJKJK7KBKKKGKBKBKDKGKJKLKLKKKKKKKKKLKKKKKLKKKKKLKKKLKKKKKKKKKKKKKLKKKJKJKHKIKHKJKJKHKIKJKJKJKJKJKLKKKJKIKHKIKJKKKMKLKKKHKJKSKkK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKyK�KzKqKxKtKaKdK_KZKVKPKQK/KKKK[KcKfKiKlKnKoKoKoKoKoKnKoKnKkKkKlKlKlKlKnKoKmKkKiKhKfKeKgKhKgKgKdK`KcKhKgKdKbKcK_K]KaK_K[K[KXK`KaK\KbK]KVK^K]KWKWKVK_KqKtKrKjKiKqK|KgK�K�K�K�K�K�K�K�K�K�K�K�e]rc(K+K+K+K)K$KKK K&KKK3K7K7K8KBKDKBKAKAKBKCKCKCKCKCKFKGKDKEKHKEKCKDKDKCKDKCKDKAK=K>K>KIK9K7KVK�KKKK�KpKAK.K6K>K=K=K;K;K*K3KGKAK?K=K1K=KFKDKCK;K;K?K/KKKKKKKKKAKKK;K.K(K)KKKKKK1KQKGK8KKKKK K!K!KK9KRKIK;K"K#K$K$K$K&K&K%K1KQKNKAKXK�KHK-KdK�KKK�KxKTKQKIKrK�KKK�K�KfK+KOKmKRKMKPK{K�KhK*KHK|K�KQK$K%KHKRKKK;K$K&K&K&K'K)K)K(K)K&K=KQKLKBK1K2K0K0K0K0K0K3K2K1K3KEKNKKK@K1K2K-K!K!K)K$K$K!KKK2KNKJK=KK'KK'K,K&K*K*K*K-K/KFKMKLKBK3K:KOKkK�K�K�K�K�K�K�K�KaKEKHK+K)K5K=K@KDKCK3KKK9KBKCKCKBKHKIKBKnK�K�K�K�K�K�K�K}KpK`KVKNKFKDKDK9K;K=K6K3K3K1K*K&K*K:KSKsKsKHKFKGKGKDKFK5K*KQKPKMKJKGKBKNKKKuK�K�K�K�KAKBK�K�K�K�K�K�K4K2KLKTKPKNK]K�K�K�K�K�K�K�KtKNK1KBK�K�K{KnKcKQKDKIKLKLKKKGKGKEKDKGKFKFKLKLKKKKKKKKKKKLKLKKKLKKKKKKKKKKKKKKKKKKKLKJKHKHKHKKKKKHKHKHKHKHKHKIKLKJKHKHKIKHKIKLKLKLKKKKKNKLKHKEKMK_KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KmKmKzKdKcKbKXKVKOKPK,K(KPK]KcKfKiKlKoKoKnKnKnKnKnKoKnKlKlKlKlKkKlKpKpKkKhKhKfKgKgKhKjKfKfKeK`KbKfKgKeKaKbK^K\KcK_KZK[KZK^KeK^KaKbKYK]K^KXKYKWK[KhKyKpKkKjKhKhKYKsK�K�K�K�K�K�K�K�K�K�K�e]rd(K*K,K*K&K!KKK#K!KKKK/K8K8K;KAKCKAKBKAKCKEKDKEKGKEKDKFKFKDKDKDKDKDKCKCKCKDKDKBK=K>K:K6K7K]K�K~KKK�KkK>K.K6K;K:K=KK*K8KGKAK?KK(K;KGKAK?K;K0K@KDKDKBK;K=K>K+KKKKKKKKKFKHK;K.K(K'KKK
KKK6KRKFK2KKKKKKK K K=KRKHK;K&K&K%K%K(K)K)K'K4KTKPKDK[K�KHK-KeK�K�K�K�KwKTKQKIKsK�K�K�K�K�KkK*KIKmKQKOKNKzK�KqK-KCKvKAK$K(K$KEKRKMKKAKBKEKBK>K@KAKDKDKBKGKJKBKYK�K�K�K�K�K�K�K�K�K�K�K�K�KOKFKCK?KIK>K8K5K6K=KCKBKAK=K/K(K?KEKCKDKCKCKDK&K@KPKQKNKJKAKGKQKPK�K�K�KK*KKKKKKKKKGKGK9K-K(K'KKK
KKK9KRKCK0KKKKK!K K"K"K@KSKIK:K'K&K%K'K)K(K*K)K3KOKMKDK[K�KHK.KdK�K�K�K�KwKTKQKIKsK�K�K�K�K�KmK+KIKpKSKOKMKzK�KrK/KAKMK(K(K$K$KCKPKKK=K(K&K$KKKKK$K+K,K;KOKLKEK1K1K3K2K5K4K3K3K4K5K5KCKMKLKBK"K&K'K"K"K KKKKKK*KOKIKIKKKKKKKK&K&K'K'K9KJKGKEK6K1K4K5K8K8K9K8K:KEKZKvK{KJKIKK:K;K;KBKJKLKCKCKGKGKGKDKCKGK-K6KQKMKMKIKDKEKQKJK�K�KOK?KCKEK=K�K�K�KrK9K2K.K1KFKLKMKLKgK�K�K�K�K�K�K�KoKVK�K�K�K�K;KCKbKmK\KKK5K&K8KUKQKKKKKMKLKJK7K=KBKAKBKFKLKLKLKLKKKLKLKKKKKKKKKKKKKKKLKLKLKKKLKKKIKIKHKIKHKHKHKHKHKIKHKHKHKHKIKIKIKIKHKIKIKIKIKIKHKHKIKIKJKKKLKIKHKJKYKxK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KjK6K4KKKYKaKhKjKjKjKfKgKkKmKqKmKmKoKmKjKiKjKkKpKqKmKiKfKgKeKiKjKiKjKfK`KaKfKkKgKfKgK]KZKaK^K]KaK[KVK[K]K`KdKeKeKaK\K\KXK\KYK]K[KUKUKVKWKPKPKWK\KZK}K�K�K�K�K�K�K�K�K�e]rg(K-K&K!KKK&K KKKKKKKK&K6K5K6K:KCKDKCK@KBKDKDKDKDKCKCKCKDKEKDKDKCKCKDKCKCKBKAKAKAK1K,KDKnK�K~K�K�K\K5K+K6K:K;K=K;K:K*K?KDKBK?K;K:KCKDKDKAK8K>K=K*KKKKKKKKKHKGK9K-K(K'KKKKKK:KQKBK/KKKKK K!K#K#K@KRKHK:K'K&K'K(K)K)K)K)K3KOKNKCK[K�KHK-KdK�K�K�K�KwKTKQKIKsK�K�K�K�K�KoK,KHKpKRKNKMKyK�KrK3K6K-K(K(K%K%KBKPKKK>K#KKKK K(K+K,K,K*K9KOKLKFK6K1K3K4K6K7K7K7K7K5K5KBKLKLKEKKKKKKKKKKKK!KJKIKIK%KKK
+KKKKKK$K&K4KIKHKEK8K2K4K5K6K7K9K:K;K9K6KK6K8KDKmK�K�K~KVK4K+K6K;K>K=K;K9K7K@KFKBK?K=K?KCKDKDKAK;K>K;K&KKKKKKKKKIKGK:K-K(K%KKKKKK=KPK@K/KKKKKK!K%K%KAKQKEK9K'K&K*K)K(K)K'K'K4KPKOKAK[K�KHK*KcK�K�K�K�KwKTKQKIKsK�K�K�K�K�KqK,KEKpKQKPKJKvK�KwK6K/K)K(K&K&K!K>KQKLK?KKK"K)K,K+K*K*K+K'K5KNKLKFK8K4K7K7K8K8K8K8K7K9K6KKMKIK1KKKKKK"K%K'K&K K!KEKJKHK;K&K.K2K2K1K4K5K6K6K6K9K8K>KGKGK-K&K0K:K@KCKBKEKGKGKGKGKGKCKAKDKIKAK]K�K�K�K�K�K�K�K�K�K�K�K�K�KSKDKAKZK�K�K�K�K�K�K�K�K�KtKhK^KJKEKGKDKCKCKFK*K6KPKMKMKHKBKAKKKLKEK?K>K?KBKCK@K�K�K�K{K;K1K/K0KBKHKKKLKaK�K�K�K�K�K�K�KKOK�K�K�K�K{KiK[KkKwKMKeK�K�K�K�KoKdKZKYKaKQKDKFKHKKKJKJKJKJKGKGKDKBKAKBKFKLKMKLKLKLKLKKKLKKKHKHKHKHKHKIKHKHKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKIKHKIKIKIKIKIKHKHKIKIKLKLKIKFKFKLK`K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KwKnKiKiKjKjKmKrKrKkKcKfKjKjKlKlKgKbK^K^KcKhKlKkKjKbK[K`K]KZKbK`KcKaK[KWKVK_KbKeKfK`KXK]KdK_K[KUK[K`K_KTKQKMKLKYK\K_K]K`K_K}KzKpK�K�K�Ke]rk(KKK#K%KKKKKKK
+K%K8K5K4K0K/KKK!K)K(K1KAKCKBKBKBKDKCKAKBKBKBKBKBKBKBKBKBKBKAKBKAKBK@K?K=K8K7K?K^KGK-K-K9K=K=K>K=K:K7KBKBKAK=K;K?KCKDKFKKBKCKDKDKDKDKDKDKDKAKEKIKCKUK�K�K�K�K�K�K�K�K�K�K�K�K�K]KCKCKLK�K�K�K�K�K�K�K�K�K�K�K~KaKEKGKFKFKCKFK4K.KOKLKMKIKFK@KHKMKHK?K?K?KAKAKSK�K�K�K�KBK5K1K-K=KFKHKMKQK�K�K�K�KbKzKrK^KKK�K�K�K�K�K�K�K�KrKWKbK�K�K�K�K�KxKtKlKfK]KLK=K'K8KCKEKIKLKIKHKGKGKCK9KAK@KCKHKIKKKJKLKLKKKKKKKKKKKKKIKIKKKJKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKIKHKGKHKIKIKHKGKGKIKIKIKGKGKHKHKHKJKLKKKHKDKFKVKrK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KlKiKkKqKrKjKfKgKiKkKlKlKgK[K`KcKfKlKnKjKgKaKZK\KaKZK_KdKdK`K[K[KVK^KbKcKhKbK[KZKaKbK]KVK[K^KdKXKSKRKOKZKaKcKbKbK_KtK�KhK�K�K�Kne]rl(KKK%KKKKKKKKK7K4K4K3K1K'KKKKK%K*K3KAKBKAKBKCKCKAKBKBKBKAKAKAKAKBKBKAKAKBKAKBK@K?K?KKAKDKCKCKCKCKCKCKDKCKDKFKEKMK�K�K�K�K�K�K�K�K�K�K�K�K�KjKBKDKAK|K�K�K�K�K�K�K�K�K�K�K�K~KIKFKGKFKDKDK=K&KJKMKMKJKGK>KDKNKJK?K>K>KBK?K�K�K�K�K�KOK8K2K-K6KFKHKNKLK�K�K�KRK=KHK;K>KLKhK�K�K�K�K�K�K�K�KqKKK�K�K�K�K�KbKdK`KaKfKQKGK&KK?K�KdKGKHKHKJKJKAK1K@KDK@K@K@KCKHKLKMKKKLKKKLKLKLKIKIKLKKKIKIKIKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKFKHKHKHKHKFKGKHKHKHKGKFKHKHKIKHKGKGKHKHKIKHKDKDKMKbK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~KnKaKbKgKkKlKlKjKcKZK_KeKgKlKmKkKfKaK[KXKbK[K]KeKdK`K[K[KUKZKaKcKiKbK\KZKbKeK`KZK]K^KhK]KZKZKQKWKdKfKeKaK_KbK�KiKvK�K�Kle]rm(KK$K$KKKKKKKK5K:K7K1K0K&K,KKKKKK'K/K7KAKDKCKDKCKAKBKBK@K>KBKAKAKBKBKCKDKBK@KBKAKBK?K?K=K:K7K8K0K,K7K:K:KK=K:K9K5KKKK%K"K'KFKMKJKKVKKKJKHKzK�K�KGK0K(KKKKKK3KLKHKGK-K%K&K"KKKK(K1K5K7K5K9KGKJK>K+K/K7K>KBKDKCKDKDKCKCKCKDKCKCKGKFKFKyK�K�K�K�K�K�K�K�K�K�K�K�KwKEKHKAKpK�K�K�K�K�K�K�K�K�K�K�K�KRKCKDKDKCKDKCK#KDKOKNKLKHK@KCKOKLKBK>K?K?KSK�K�K�K�K�KfK9K3K.K1KFKKKLKLKkK�K�K5K;KDKSKxKqKRK�K�K�K�K�K�K�K�K�KKKK�K�K�K�K�K�KrKhKFKCKNK:K#KuK�K�KVKVKQKJKGKGK6KBKJKIKGKEKAK?KAKEKGKJKKKLKKKKKKKKKLKKKLKJKHKIKIKHKHKHKHKHKHKHKHKHKHKHKIKGKFKFKGKIKGKFKHKHKGKGKGKGKGKGKFKGKGKGKGKGKFKFKFKGKGKIKJKIKFKEKGKWKuK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KmKhKhKkKhKaK\K]KcKgKnKmKjKfKbK^KWKaK`KZKeKbK_K^K]KXKYK`KdKlKgK_K^KfKhKfK_KbK`KjKcK[K\KTKUKaKhKfKaK_KbKpKdK^KvK�Kte]rn(K"K"KKKKKKK
K,K>K;K:K2K(K'K>KKK%K&K$K K(K.K9KBKDKCKCKBKBK@K?K>KAKAKAKBKBKCKCKBK@KBKBKBK@K?K;K?K9K3K3K,K6K9K;K=KK;K:K6KKK%K&K$K$K,KFKMKKKAK/K0K/K/K0K4K8K2K1K8KKwKHKKKFKvK�K�KEK$KSK\K4KKKK(KLKIKFK2K,K-K*K&K$KKKK#K.K3K7KEKJKCK8K8K8K=KAKDKDKBKCKDKDKDKDKBKDKGKGKCKoK�K�K�K�K�K�K�K�K�K�K�K�K�KJKFK>KdK�K�K�K�K�K�K�K�K�K�K�K�K[KAKDKCKCKCKDK9KCKMKMKLKIKCKAKJKKKDK=KAK?K�K�K�K�K�K�K�K=K6K0K-K@KIKHKJKUK�K]K2K:KvK�K�K�KPK�K�K�K�K�K�K�K�K�KZK`K�K�K�K�K�K�K�K�KwKIKLKDK7K�K�K�KcKVK[K^K[KOKFKHKHKGKGKHKFKDKBK@K@KCKHKJKLKMKLKKKKKKKLKJKIKIKHKIKIKHKHKHKHKHKHKHKHKHKIKGKGKFKGKHKGKFKHKHKGKFKGKFKFKFKGKGKFKGKFKDKFKGKGKGKFKGKHKGKGKIKHKFKCKBKNKdK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KzKdKWKZK^KfKkKqKnKkKfKbK_KWK`KeKZKaKfK`K^K^K\K]KaKdKnKnKcK`KfKlKkKdKbK`KkKeK^K^KUKUK\KeKfK`K\K`K]KaKzK�K�K�e]ro(K%KKKKKKKK K7K9K=K;K1K!KK>K>K@KBKBKBKBKBKBKBKBKBKAKBKBK:K>K=K9K1K%K4K9KK:K7K>KCKDKDK2K.K?KGK@K1KKKKKKK2KNK?K1K)K)KKKKKKKGKJK>K'KKKK K#K$K!K%KIKOKCK4K'K)K)K(K)K)K)K'K3KOKMKCK^K�KJK,KbK�K�K�K�KyKQKPKGKqK�K�K�K�K�K|K2K9KoKVKKKJKBK.K+K(K$K$K$K'K)K'K8KQKMKHKeKeKQKDK?KBK@K:K5K8K:KJKNKJKGK>K:K9K#KK K$K#K%K)K,KEKMKJKCK1K0K0K0K3K6K8K5K3K?KuK{KJKJKEKqK�K�KQKKUK�K�KhKGK%K'KJKIKIK2K)K.K+K,K+K&K%K"KKK K-KCKJKFK:K8K7K9K=K?KBKAKCKDKGKFKCKCKDKFKHKCKdK�K�K�K�K�K�K�K�K�K�K�K�K�KSKAKAKTK�K�K�K�K�K�K�K�K�K�K�K�KjKBKDKDKCKDKCKGKEKKKLKLKJKGK?KHKKKHK@K@KQK�K�K�K�K�K�K�KFK5K2K-K:KFK@K9KMKbK?K8K5KoK�K�K�K[KhK�K�K�K�K�K�K�K�KoKLK�K�K�K�K�K�K�K�K�K{KQKDKhK�K�K�K�K`K[KYKiKtKbKJKJKJKIKGKFKHKJKGKEKCKAK?KBKIKJKIKHKIKLKKKLKIKHKHKHKIKHKHKHKHKHKHKHKHKHKIKHKGKFKFKFKGKFKFKFKFKFKFKGKFKGKGKDKGKGKGKGKFKFKFKGKFKFKHKEKFKGKGKHKHKHKDKCKHKXKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KiKfKjKnKnKlKhKbK`K[K`KgK]K_KiKdKaK_KaKaKdKiKnKpKeKbKhKlKlKhKbK^KfKfK`K^KYKYK[K]KdK_K_K_K\K�K�K�K�K�e]rp(K"KKKKKKKK9K9K;KK=K=K3K"K4K:K;K=K;K%K7KEKBK?K:K,KK(KKKK!K!K$K!K&KIKNKCK4K'K'K'K)K(K'K(K'K3KOKMKCK^K�KJK,KbK�K�K�K�KzKRKOKHKrK�K�K�K�K�K}K3K6KnKXKNKKK9K,K+K)K'K(K*K+K,K.K=KOKMKIKNK:K KKKKKKK'K/KFKPKKKFKK5K4K5K7K9KK@KBKBKBKBKBKBKBKAKAK@K;K=K=K;K8K0K1K:K:K=K;K!K:KCKBKAK9K1KAKCKAKCKDK?K=KDKHKGKIKKK=K%KKK7KOK>K1K&K'KKKKKKKKKHK>K&KKKK!K K$K"K'KHKNKCK4K'K&K&K)K'K%K(K'K3KPKMKCK^K�KKK,KbK�K�K�K�KzKSKNKJKsK�K�K�K�K�K~K6K5KnKYKPKLK:K)K+K+K,K)K"KKKK5KMKLKHKZK^KjKZK#K"KKKKKK?KQKLKEKK?KCKDKDKCKGKFKDKAKDKNKbKwK�K�K�K�K�K�K�K�K�KnKBKDKAKyK�K�K�K�K�K�K�K�K�K�K�K�KMKBKDKDKCKCKDKDKGKMKNKLKGK@KCKLKMKCKNK�K�K�K�K�K�K�K�KrK;K2K.K/K5K$K"K7KNKK:K0KCKBKAKBKFKFKCKDK>KBKHKIKKKHK7KK7KMKK@KBKFKIKIKIKHKIKHKHKIKHKHKHKHKHKIKHKHKHKGKGKFKHKHKGKGKGKGKFKGKGKGKFKFKFKFKFKFKFKFKFKGKGKGKFKGKGKFKGKGKEKDKDKEKGKEKDKFKHKHKFKBKCKDKTKqK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KkKjKiKgKnKkKdKaKfKkKhKdKmKlKeKhKgKeKdKgKgK_KhK_KbKcKfKYKXK_KfKcK_KYK\KgKoK�K�e]rs(KKKKK
+KKK;K2K+K%KKKK&K(K/K?KBKAKAKBKAK?K?K?K>K?KAKBKBKBKBK@KK@K8K5KK9K1KBKAKBKBKDKCKFKDK;K?K?KGKHKJKMKBKCKIK;K/K%K'KKKKKK!KMKIK:K"KKKK!K#K$K K(KLKNKCK2K$K&K&K(K'K&K%K K4KOKLKFK^K�KJK,KbK�K�K�K�KyKRKOKIKpK�K�K�K�K�KK9K5KlK[KNKLKKAK4K>KpK�KtKK@KCKBKAKAK?K>K>KAKBKBKBK@K>K>K=K;K;K=K4K5K:K:K?K5K&KBKAK?K?K6K1KAKCKCKCKEK9K>KCK?KHKFKCKDKHKKKKKKKIK?K/K%K'KKKKKK$KNKHK>K#KKKK!K"K$K#K*KLKMKCK2K$K&K%K%K'K%K(K'K5KNKLKDK^K�KKK)K`K�K�K�K�KyKQKPKIKqK�K�K�K�K�K�K;K3KkK^KLKKK9K%K(K+K,K,K.K'K%K!K%KMKPKKK[K~K�K�K�K�K�K�K�K�KVK;KPKNKHKKKKK$K*K'K'K*K,K4K^KOKKKOK�KfKDK�K�K�K�K�K�K*KTK�K`KJKJKSK�K�K�K6K.KqK�K�K�K�K�KjKFKJKAKEK�K�KK`K?K'KK"K*K2K9K;KEKJKEK8K1K0K+K&K K!K)K1K6K8K;K=KK8KFKFKFKGKDK@K?K?K@KDKGKIKGKHKHKHKHKIKHKHKIKHKFKFKFKIKHKFKFKFKFKGKFKFKFKFKFKFKFKFKFKFKFKGKEKCKFKFKCKDKGKEKCKDKDKDKDKDKDKDKDKDKDKDKDKDKCKFKHKIKHKCK?KDKYKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KoKeKeKgKiKnKpKlKcKeKhKjKgKcKgKbKhK^KaKaKgK`K_KgKeKcK_K[KXKMK?K0e]ru(KKKKK,K=KK=K>K5K6K;K;K>K1K(KCKAK?K>K6K2KBKCKCKCKEKDKFKAKAKJKNKQKJKBKEKIKCKFKMKDK3K&KKKKKK%KMKGK=K"KKKK!K"K$K"K+KLKMKCK2K%K&K%K%K&K%K'K'K6KNKLKEK_K�KKK)K`K�K�K�K�KyKQKPKFKoK�K�K�K�K�K�K>K1KkK_KLKLK=K+K/K0K0K.K*K&K"KK%KJKOKGK�K�K�K�K�K�K�K�K�K�K�K�KLKNKIKKKKK K-K0K=K5K-K4KOKQKKKLK�K�KKKHK�K�K�K�K�K!KMK�KfKIKKKNK�K�K�K@K*KhK�K�K�K�K�KqKJKMKEKAKyK�K�K�K�KqKKK.K!K&K*K3KDKHKFK>K7K3K2K1K-K$K#K!K%K0K6K;K:K>KEKEKCKAKAKCKDKDKDKDKFKGKDKDKKK[KsK]KCKCKEK�K�K�K�K�K�K�K�K�K�K�K�K�KJKBKDKDKCKCKGK?KHKOKMKJKGKCKFKNKLK�K�K�K�K�K�K�K�K�K�K|KK4K#KAKCK+K"K1K7K7K8K9K8K2K.K/K0K3K3K(K#K#KKK+K-K0K=KBKAKAKBKBKBKBKBKBKBKBKBK@K9K@KCKBKAK=K5K6KK5K4KDKCKCKCKDKJKJK@KAKKKOKQKRKPKIKBK7KDKNKLKKK?K KKKKK(KOKFK;K"KKKK!K"K$K!K,KMKLKEK2K%K&K&K&K%K%K'K&K8KPKLKFKaK�KJK)K`K�K�K�K�KyKRKOKHKoK�K�K�K�K�K�K?K.KjK`KLKOK?K.K/K/K.K(K$K"KK K,KIKMKIKsK�K�K�K�K�K�K�K�K�K�K�KOKNKMKKKKKK&K3K�K�K\K,KFKNKNKIK�K�K�K6KkK�K�K�KnK#KEK�KkKIKIKJK~K�K�KIK&K_K�K�K�K�K�KyKKKJKFK=KrK�K�K�K�K�K�KxKXK3K'K$KKBKBKBKDKDKCKEKGKGKFKEKGKHKDKDK@KxK�K�K�K�K�K�K�K�K�K�K�K�KUKCKFKFKDKDKCKDKGKNKNKKKIKEKCKOKJK�K�K�K�K�K�K�K�K�K�K�KCK4K/K*K8KFKFKHKNK?K5K7K5KlK�K�K�KcKFKK@KBKBKBK?K;KBKAK8K8K:K7K9KKLK]KlK}K�K�K�K�K�K�KUKNKHK�K�KPK"K&K*K%K&KnK�KlKEKMKOKHK�K�K�KZK@K�K�K�KwK.K:K}KwKHKJKEKrK�K�K_K'KQK�K�K�K�K�K�KRKIKJK?KaK�K�K�K�K�K�K�K�KTK'KRKiKGKIKEK;KIKPKGK>K:K7K7K5K4K3K5K2K)K6KEKBK>K;K=K?K?KAKBKCKCKCKCKCKGKGKGKEKDKAK?KOKgK~K�K�K�K�K�K�K�K�K�KqKBKDKCKCKDKCKHKDKLKOKNKKKFKCKIKMKVK�K�K�K�K�K�K�K�K�K�KeK8K1K-K,KAKGKIKMKLK:K7K=K�K�K�K�K�KNKBK8K6KAK�K�K�K�K�KaKVK�K�KXKFK�K~K�K�K�K�KWKQK�K�K�K�K�K�KWKLKNKNKNKMKNK�K�K�K�K�KFKOK\KjKiKbKYKVKKKIK6K&K8KFKCKDKIKIKIKGKGKFKCKAK@K?K?K@KDKIKIKIKIKHKHKIKHKFKFKFKHKIKFKFKGKFKFKFKFKFKFKFKFKFKFKFKFKGKFKGKGKDKCKDKCKCKCKDKCKDKEKBKCKDKCKCKCKCKDKDKDKDKCKDKDKBKCKEKGKGKHKFKDKCKBKNKdK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KaK.KKKKK	K
+KKKKKKKKKKKe]ry(KK)K=K;K9KK>K(K)K6K8K:K;KKBKBK@K?K>K@KBKBKCK?K:KBK@KK1K:KEKBKBKCKDK;KFK@KDKPKQKPKPKQKRKRKSKRKQKIKFKGKHKKKNKMK?K:KLKEK9KKKKK!K"K$K!K-KMKKK@K,K$K&K%K%K%K%K&K#K6KPKMKDKbK�KKK)K`K�K�K�K�KzKQKOKGKmK�K�K�K�K�K�KDK.KfKbKLKMKFK1K/K1K1K0K0K0K1K2K3KIKMKMKDK4K4K3K2K6KIKhK~K�K�K�KVKNKMKmK�K�K�K@K,K2K)K,K�K�KNKLKPKHK|K�K�KYKK@KBKAKBKCKDKDKDKDKDKDK>KCKDKIKZKnK�K�K�K�K�K�K�KKEKCKDKCKDKCKFKDKKKOKLKKKHKBKGKNKKK�K�K�K�K�K�K�K�K�K�KzK;K2K/K*K?KGKGKJKMK@K5KaK�K�K�K�K�KYKCK7K9K6KlK�K�K�K�KuKHK�K�K:K:KxK�K{K�K�K�KhKGK�K�K�K�K�K�KgKLKNKLKLKMKFK�K�K�K�K�KYKDKDKJKWKfKlKeKOKHKAK KK"KgKnKIKEKFKIKHKGKFKFKEKEK>K8K=K=K@KDKHKIKHKIKHKHKGKFKGKGKHKHKFKGKFKFKFKFKGKFKFKFKFKFKFKFKGKEKDKDKCKCKCKCKCKCKCKDKDKCKCKCKCKDKDKDKCKBKBKBKBKBKDKDKDKCKDKDKDKCKEKGKGKGKCKAKHKZKwK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsK?KKKKK	K
+KKKKKKKKe]rz(K"K9K:K9K;K2K K;KAK*K$K3K5K9K9K:K9K1K%KKKKK!K8K8K5K/K)K)K$K K K%K1K3K3K5K>KBK?K?K>K?KBK@K@K>K;KDK=KK�K�KPKNKGKyK�K�KUK@K�K�K�K�K7K-KrK�KNKLKDKdK�K�KsK(K@K�K�K�K�K�K�K_KEKKKBKRK�K�K�K�K�K�K�K�KqK*K?K}KTKEKDKXKkKDK.K7KLKSKMKFKBK;K8K7K8K>KGKFKCK4K'K'K.K9KKAKBK@K@KCKCKCKCKDKDK?KEKHKHKGKGKPKcKxK�K�K�K�K�KNKAKDKDKCKCKEK@KFKPKKKKKHKDKCKNKJK�K�K�K�K�K�K�K�K�K�K}KFK4K/K)K6KDKGKIKMKFK9K�K�K�K�K�K�KnKHK;K:K5KUK�K�K�K�K�KMKvKpK3K6KeK�K�K�K�K�K|KIKoK�K�K�K�K�KyKMKMKLKLKLKJK_K�K�K�K�K�KAKHKHKGKEKNKbK]KEKFK1K K,K�K�KnK]KRKFKFKGKGKGKEKDKK;K=KBKGKIKIKHKGKFKFKGKHKHKFKGKFKFKFKGKFKGKFKFKFKFKFKFKGKEKCKDKDKDKDKDKDKDKDKDKCKDKCKCKDKCKCKDKCKAKAKAKAKBKCKDKDKBKCKCKCKDKCKBKCKEKFKFKDKEKBKBKNKjK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K_K0KKKKK
+K
+K	KKKe]r{(K6KK>K>K?KBKAK>KKBKAKAKDKGKFKJK=KGKPKPKQKPKQKSKRKRKSKRKPKRKSKMKIKEKHKJKKKKKIK@K$KKKK K"K$K#K0KMKIK@K-K#K&K%K%K%K&K$KK7KPKKKBKbK�KJK)K`K�K�K�K�KzKRKOKHKjK�K�K�K�K�K�KIK)KeKgKOKLKWK�KUK-K2K2K3K3K2K0K0KCKMKKKDK*K7KaK�K�K�K�K�K�K�K�KaKLKHK�K�K�K�K�K�KRK8K3K(KYK�KtKIKGKyK�K�KXKEK�K�K�K�KCK)KiK�KSKKKEKaK�K�K{K0KK:K7KKK�K�K�K�K�K�KVKUK�K�K�K�KhKsK`KIKMKLKLKLKJK�K�K�K�K�KJKHKGKGKGKEKCKHKIKIK=K!KZK�K�K�KdKyKkKSKHKEKFKFKGK=K,KK=KKBKDKGKGKHKHKGKGKFKFKFKFKGKGKEKGKFKFKFKFKFKFKGKFKFKFKFKDKCKCKCKCKCKCKCKCKCKCKDKBKAKCKCKAKAKAKCKDKCKDKDKCKDKDKDKDKDK@KBKDKCKCKEKFKIKHKFKBK@KIK]K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KzKIKKKKKK
+Ke]r|(K9K=K:K;K%K)K?K6K$K.K4K6K:K9K9K7K*K!KKKKK2K>K7K4K-K(K'K"K"K#K4K9K)K&K.K2K3K8K=K?K?K@KAK@K>KKCKAKAKEKFKDKJK=KHKQKQKQKPKQKSKRKRKRKRKQKRKRKSKQKNKJKEKBK;KJKMKGK3K!KK!K#K$K#K1KMKHK?K,K#K&K%K&K%K%K$K!K7KPKJKBKbK�KKK)K`K�K�K�K�KzKQKPKIKjK�K�K�K�K�K�KIK)KeKhKPKLKVK�K�KKK,K2K2K2K4K5K3KCKMKKKEK1KK2KsKfKCKFKLK�K�K�K�KdK2K1K3K@KOKQKIKCKAKDKEKDK@K8K8K8K5K.K*K)K1K:K?K@K?KCKDKCKGKCKAKGKFKGKHKIKIKHKGKEKLK]KyKeK@KDKCKEKCKDK2K+KMKLKNKKKEK@KIKLKVK�K�K�K�K�K�K�K�KqKeKUKK8K7K;KK�K�K�K�K�KhKGK�K�K�K�K}K>KdKHKIKIKKKLKFKxK�K�K�K�KbKCKHKFKGKGKGKGKIKIKGK,K�K�K�K�K^K_KoKxKpK^KMKFKGKEK:KCKFKEKDKCKBKAK>KK;K'K'K1K3K7K9K:K7K/K"KKKKK-K@K:K6K.K)K'K#K!K K,K9K+K&K(K$K)K2K6K:K>KBKAK>K@K?K9K=KDK6K.K=K9K7K;K=K=K8K#KK8K.KAKDKBKBKFKGKHKIK=KIKPKSKRKPKQKSKSKSKPKQKSKRKSKSKSKSKSKOKDKK1KnK�K�K�K�K�KxKHKIKFKCKxK�K�K�K�K�K�K�K�KGK/KkKpKGKIKEK|K�K�K�KrK1K>K[KFK1K=KLKRKNKGKEKDKBK8K6K7K8K:K9K2K+K(K*K5K=K>KBKEKEKAK?KFKGKGKFKFKGKHKIKJKHKEKFKIKCKDKDKCKDKCK>K KFKLKLKKKFKBKFKNKMK�K�K�K�K�K�K�K�KfKaKVKBK0K0K,K=KIKIKLKLK�K�K�K�K�K�K�K�KgKFK:K;K:K^K�K�K�K�K�KNKDK5K7K4KiK�K�K�K�K�K}KJKsK�K�K�KwKK@KMKHKIKHKJKLKZK�K�K�K�K�KBKHKFKGKFKGKFKFKIKGKJK�K�K�K�K~KZKYKbKoKwKmK\KMKEKGKGKFKGKEKCKDKDKDKCK=KK>K>K8K?KBK;K:KK8K0KBKDKCKBKEKIKKKHK=KFKMKRKQKPKQKRKQKQKQKRKRKRKRKRKRKRKRKUKPKIKFKFKJKLKNKKKAK0K"K K1KOKIK>K+K$K%K&K%K&K$K%K$K6KQKNKDKaK�KMK)K`K�K�K�K�KyKPKQKEKfK�K�K�K�K�K�KPK(KaKkKNKNKVK�K�K|K5K/K/K0K3K5K4KBKLKLKJK9K7K7K7K7K5K5K5K0K+K-KEKQKIKcK�K�K�K�K�K�K�K�KgK?K,KFKPKJK\K�K�KkKnK�K�K�K�K[K#KPK�KcKHKKKOK�K�K�KJK,KgK�K�K�K�K�KKLKHKHKAKpK�K�K�K�K�K�K�K�KTK-K]KyKFKHKBKsK�K�K�K|K5K8K|K�KuKVK?K8KDKNKGKEKCK@K;K8K6K8K9K9K:K5K,K(K)K0K9KBKDKBK=KDKEKEKFKFKEKIKHKGKIKKKHKFKDKCKDKCKDKBKBK!K=KNKLKKKGKCKEKLKHK�K�K�K�K�K�K�K�KnK`KWKJK5K1K.K6KEKJKMKJK~K�K�K�K�K�K�K�KxKMK?KK;K;K?KCKFKHKGKFKFKFKFKGKFKFKFKFKFKFKFKFKGKFKEKEKGKFKEKCKBKCKDKCKCKCKCKCKDKCKCKBKAKCKDKCKCKBKCKDKBKAKBKCKBKBKBKCKDKCKCKDKDKDKCKCKDKCKCKDKDKEKFKDKAK>KDKPK[KiKqKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�e]r(K8K5KK0KBK0K!K.K0K5K;K=K9K6K)KKKKK$K9K:K7K2K*K*K%K!K K'K:K2K*K)K$KKKKK#K3K:K:K=K?K=K=K9K?KAK;K=KK4K%K>KAKAK>K8K2KCKDKDKCKFK>K?KHK=KFKMKQKPKQKPKPKPKPKRKSKSKSKRKRKRKRKRKRKSKQKOKJKGKHKJKMKNKLK>K)K2KMKIKAK+K$K&K&K%K'K&K%K#K7KPKMKCKaK�KMK*K`K�K�K�K�KyKPKQKIKfK�K�K�K�K�K�KOK'K_KkKOKNKTK�K�K~K2K3K7K.K4K6K4K@KLKLKKK8K3K4K4K4K0K,K(K6KeKyK`KMKLK\K�K�K�K�K�K�K�K�K�K�K:K>KPKKKQK�K�K|K}K�K�K�K�KeK%KJK�KkKHKKKMK�K�K�KTK,K_K�K�K�K�K�K�KRKGKGKCKgK�K�K�K�K�K�K�K�KaK/KTK}KIKFKBKhK�K�K�K�K@K2KmK�K�K�K�KcKAK=KHKGKAKFKGK@K=K9K8K9K9K8K9K9K-K'K&K8KEKDK=K?KAK@KCKGKGKFKFKFKGKHKGKGKDKBKDKDKDKAKEK)K2KOKLKLKIKDKCKKKKKkK�K�K�K�K�K�K�K�K[K\KPK:K2K.KPKaKHKJKMKbK�K�K�K�K�K�K�K�KUKDK>K:K�K�K�K�K�K�KkKIK?K5K6K=KuK�K�K�K�K�KgKDK�K�K�K4KEKGKAKJKIKHKJKMKEKrK�K�K�K�KfKBKHKGKGK>K;KDKFKIKOK�K�K�K�K�KZKVKgKcKaK_K]KcKeK^KGKAKCKFKJKFKCKCKDKDKDKDKBKBK@K=K=KK1K&K>KBK@K>K8K3KBKBKCKDKCKBKFKFK=KGKMKOKQKQKPKQKQKQKRKRKQKRKSKRKRKRKRKRKSKRKSKRKMKHKGKIKKKKKOKHK?KDKEK>K,K$K%K&K&K%K$K%K#K7KPKKKDKdK�KMK'K_K�K�K�K�K{KPKQKIKfK�K�K�K�K�K�KRK&K]KmKOKMKQK�K�K�K2K4KfKTK0K3K5KCKNKJKKK8K4K1K-K+K*K*K1KCKcK�KwKIKMKUK�K�K�K�K�K�K�K�K�K�K�K:KRKOKTK�KbK[K~K�K�K�K�KoK(KBK�KsKEKIKJKyK�K�K\K)KUK�K�K�K�K�K�KVKDKEKBK_K�K�K�K�K�K�K�K�KlK2KLK�KOKFKBK`K�K�K�K�KMK3KbK�K�K�K�K�K�KnKIKFKDK@KGKKKLKGKBK;K;K7K6K8K7K7K3K7KDKDK=K=KAK@KAKDKDKCKDKFKDKDKFKFKEKBKBKCKDKAKEK2K&KLKKKLKIKDKDKIKKKUK�K�K�K�K�K�K�K�KYK]KTK>K1K/K�K�KwKZKLKQK�K�K�K�K�K�K�K�KbKGK;KSK�K�K�K�K�K�KyKMKDK6K8K8KFKKKAKLKlKjKWKGKoK�KbK1KKEKHKIKIKJKJKVK�K�K�K�K�KBKHKEKDKCKCKCKEKJKFK�K�K�K�K�KwK6KBKVKjKlKeK_K[K]KOKCKCK5K-K=KGKGKCKEKDKCKDKDKCKCKBK@KKKK2K7KCK?KK,K'K3K2K9K>KK-K,KCKCK@K?K5K2KDKCKCKDKFKFKGKDK:KIKHKIKNKOKPKQKPKQKQKQKSKRKRKRKRKRKRKRKSKSKSKRKRKSKTKSKMKGKEKIKJKHKKKHK5K%K$K&K&K&K&K&K$K7KPKKKAKdK�KLK&K^K�K�K�K�K|KPKQKGKcK�K�K�K�K�K�KVK&KZKlKKKMKPK�K�K�K9K4KpK�K�KvKCK;KMKKKJK9K6K:K9K9K9K8K6K3K3K3K=KKKIKHK8KEKvK�K�K�K�K�K�K�K�K�KRKNKGK�KgKRK�K�K�K�K�K}K2K8KzKKHKIKDKmK�K�KpK.KFK�K�K�K�K�K�KeKEKJKBKNK�K�K�K�K�K�K�K�KK>K=K{KdKCKEKMK�K�K�K�KhK4KNK�K�K�K�K�K�K�K`KBKFKBK@KEK:K?KLKOKLKLKDK>K;K:K7K6KBKDKBK8K/K*K,K3KK9K9K;K:K@KGKIKGKGKFKFKGKGKFKCKEKGKGKGKGKFKCKDKCKCKCKCKCKCKDKDKEKCKCKCKCKBKAKBKBKBKAKBKBKAKBKBKAKAKBKBKBKBKBKBKBKAKAKBKAKBKBKBKBKBKBKBKBKBKAKAKAKAKAKAKBKAK>K>K?K@KDKBKCe]r�(K1KCK2K&K0K/K6K>K?KK)K/KBKBK?K>K5K7KEKCKCKCKFKFKGKBKK1KmK�K�K�K�K]KJKLKLK5K/K3K5K6K6K7K8K7K5K4K>KKKIKHK9K;KEKZK|K�KjKBK�K�K�K�K]KKKKKoKoKfK�K�K�K�K�K�K8K4KtK�KMKHKDKgK�K�K{K2K>K�K�K�K�K�K�KlKHKKKCKGK�K�K�K�K�K�K�K�K�KEK6KtKnKBKGKEK�K�K�K�KuK8KFK�K�K�K�K�K�K�KkKAKFK@KKK�KsKSK?K=KIKSKQKMKJK@K7K9K@KDKAK:K;K;K4K-K)K-K9K>KBKBKBKAKCKDKDKCKAKAKAKFK(K1KOKKKKKHKDKEKLKJKhK�K�K�K�K�K�K�KxKUKZKNKSK�KbK\K�K�K�KyKXK�K�K�K�K�K�K�KuKQKhK�K�K�K�K�K�KqKKKFKKK?K6K9K9KAKJKJKIKGKHKIKLKBK3K2K4K8KGK?KEKGKGKHKHKIKSK�K�K�K�K�KDKEKDKCKBKBKBKDKIKDKtK�K�K�K�K�KeKeKWKDK6K0K.K6KIKJKEKHK=K$K!K{K�K�K|KsK[KKKEKDKCKDKDKCKDKBK?K-K7K@K;K:K=K@KDKFKGKGKEKFKEKCKEKFKFKFKFKEKCKDKCKCKCKDKDKCKCKCKDKCKCKDKDKCKBKBKAKBKAKAKAKAKAKBKBKBKAKAKAKAKAKAKBKBKBKAKBKAKAKAKAKAKAKAKAKAKBKBKBKBKAKAK@KAK?K?K?K>K@K?K?e]r�(K?K5K$K0K2K3KKK;K7K3K8K;K9KK?K>K>K>K?K>K?K>e]r�(K:K&K*K4K8K=KBK>K?K6K#KKKK!K=K9K4K8K/K*K&K!K"K*K;K2K+K+K&K KKKKKK(K'K#KKK K"K%K'K,K0K3K3K,K1K9K4K4K9K:K:K:K$K5KBKAK?K=K/K:KDKCKBKDKEK>KEK@K:KGKKKPKQKPKPKPKPKPKPKQKPKQKSKRKRKRKRKSKRKQKSKRKRKSKRKSKRKRKSKRKSKNKEKFKIKHKMKOKJK>K+K#K"K5KNKKK@KeK�KLK&K^K�K�K�K�K|KPKNKFKbK�K�K�K�K�K�K^K%KWKqKPKOKNK}K�K�KFK-KhK�K�K�K�KyKLKJKMK=K3K6K0K-K+K+K-K/K1K1K9KLKIKIK:K2KCK5K(K,K]K~K�K�K�K�KUKHKJKPKdKaKlK�K�K�K�K�KJK,KeK�KTKHKFKZK�K�K�K?K5KsK�K�K�K�K�K}KIKHKGKBKqK�K�K�K�K�K�K�K�KZK0K\K~KFKIK?KlK�K�K�K�KJK:KnK�K�K�K�K�K�K�KIKEKCK?KuK�K�K�K�K�KlKLK>KEKVK^KVKKKCKCKBK9K8K8K7K;K8K1K2K,K*K-K7K>KCKDKBKAKAKAKCKK?K>K>K?K>K>K?K>K>e]r�(K)K#K1K5KK=K7K)KKKKK6KAK8K9K3K+K%K#K"K'K=K7K.K,K(K!KKKKKK'K&K#KKKK#K%K(K)K*K-K2K4K8K+K.K3K5K:K9K;K7K#K8KCK@K>KK:K9K9KK>K?K>K?K>K>K>K>K>K>K>e]r�(K%K/K4K8K=K;K:K7K*K KKKK1KAK:K;K3K,K&K"K"K'K;K7K.K-K)K"KKKKKK(K)K&KKKK!K#K%K(K*K+K-K/K0K3K8KK K5K:K9K:K8K"K;KCK@K>KK_K�K�K�K�K�K�K�K�K�KaK6K8K=KBKDKCK>KNKJKDK@K:K8K5K6K=KK;K=K>KK?K>K>K?K>K>K>K>K>K>K>e]r�(K.K5K9K:K=K=K7K.K"KKKK,KAKK/K+K)K%KKKKKK(K'K$KKKK K"K&K)K+K*K-K2K0K(K*K&KK&K6K9K9K:K6K#K=KBK@K>K=K0K@KDKCKFK&KKK+K:K@KJKLKMKNKQKPKPKPKPKPKPKQKPKPKRKSKRKRKRKRKSKRKSKSKSKSKSKRKRKRKSKSKSKRKOKNKPKNKFKBKFKLKLKMKMKJKHK>KfK�KLK%K[K�K�K�K�K|KMKNKFK^K�K�K�K�K�K�KeK%KPKtKPKQKJKwK�K�KPK)K^K�K�K�K�K�KMKKKKKNK�K�KiKDK7K1K,K,K)K'K.KHKHKKK?K(K KKK#K6KIK^KZKuK�K^KGKMKGK9KHKMKaKfKvK�K�KfK+KNK�KiKGKIKJK�K�K�K\K.KZK�K�K�K�K�K�KYKEKFKAKWK�K�K�K�K�K�K�K�K}K=KBKK_KDKFKMK�K�K�K�KlK:KPK�K�K�K�K�K�K�KgKAKEKAKTK�K�K�K�K�K�K�K�K�KqK3KCKgKOKBKDK@KTKVKOKJKFKAK9K3K4K6K8K;K9K?KCK@K@KBKAKEK1K(KNKLKKKJKFKDKKKMKUK�K�K�K�K�K�K�KjKSKVKSKGK;K;KFKoKhK[KTKKK�K�K�K�K[KKKOKKKMKQK�K�K�K�K`KDKIKIKKKKKJK9KK�K�K^KCKIKHKFKCKGKKK=K3K6K9K'KK5KGKCKDKCKHKGK^K�K�K0K3K?KNKRKYKaK_KOKCK;KHKFKuK�K�K�K�K|K8K>K>K;KK>K>K?K=K>K?K>K>K>K?K?K>K>e]r�(K6K9KK:K0KAKCKCKDK$KKKKK8KNKMKMKNKQKPKPKPKPKPKPKPKQKPKRKSKQKQKQKQKSKRKRKSKSKSKSKRKRKRKSKSKSKQKNKOKOKSKSKLKEKCKGKLKOKMKMKEKdK�KNK%K\K�K�K�K�K|KJKNKFK^K�K�K�K�K�K�KhK%KNKvKQKOKGKvK�K�KTK(KZK�K�K�K�K�KOKIKJKNK~K�K�K�KyKeKOK>K5K1K0KFKHKIKAK$KKKKKLKiKfKuK�K�K_KGKMKIK;K;KFKQK\K]K�K�KnK.KHK�KpKGKIKHK{K�K�KfK,KSK�K�K�K�K�K�KaKCKHKCKQK�K�K�K�K�K�K�K�K�KAK=KyKjKCKGKGK�K�K�K�KyK>KGK�K�K�K�K�K�K�KqKCKHKDKKK�K�K�K�K�K�K�K�K�K}K;KKDK?K>KLKYKZKSKMKJKDK;K6K4K5K9K?KCK@KAKBKAKCK9K"KHKLKKKJKFKBKIKNKKK�K�K�K�K�K�K�K|KVKTKSKIK:K=KK>K:K;KIKLK�K]K-K2K@K\KYKVKMK>K2K-K3KCKGKXK�K�K�K�K�KDK;K=K=K:K:K:K9K7K:KGKIKNK�K�K�K�K�KnK@KNKPKUKVK]K]K\KZKWKXKJK>K?K@KBKEKFKCKCKDKCKDKCKBKBKBK?KK>K>K?K>K=K>K?K>K>K?K>K>K>K>e]r�(K/K9KK5K0K,K%KKKK
KK%K*K&KK
KK%K"K#K'K*K+K+K/K2K.K"K K"K'K7K8K8K:K9KK9K2KAKCKDKCK$KKKK
+KK1KOKPKNKPKPKPKPKPKPKPKPKQKPKRKRKQKPKPKQKSKRKRKSKSKSKSKRKRKRKSKSKSKRKRKRKRKRKRKRKRKJKDKGKGK;KIKQKOKhKHK&KZK�K�K�K�K|KHKKKEK]K�K�K�K�K�K�KjK%KLKuKQKOKGKuK�K�KWK'KXK�K�K�K�K�KRKIKJKLK}K�K�K�K�K�K�KfK/K=KAKGKIKKK@KKKKK.KK5K=KIK/KlK�KuK/KDK�KwKGKJKEKuK�K�KrK0KIK�K�K�K�K�K�KkKDKHKFKKK�K�K�K�K�K�K�K�K�KKK7KpKuKCKHKDKwK�K�K�K�KDKCKzK�K�K�K�K�K�K|KGKGKDKEK|K�K�K�K�K�K�K�K�K�KHK7KoK�KBKCK=KJKKK@KIKYK\KWKQKKKDK?K8K6K=KDK@KAKBKAKAK?KK@KMKKKJKHKCKCKKKHK�K�K�K�K�K�K�K�K\KSKQKMK=K:KKDKIKHKXK>K1K2K7KIKBK8K1K*K-K3K>KBKIKGK�K�K�K�K�KdK5K=K=K:K9K9K9K7K8KBKJKCK�K�K�K�K�K�KHKHKHKJKIKGKSK]K\KXKWKQKDKDKCK0K'K8KBKGKDKDKEKCKBKBKBKAKAK?K?K=K9K9KK?K@K?K>K?K?K>K?K?K?K>e]r�(K5KK5K3K.K'K!KKKKK#K(K#KKKK&K&K$K%K)K,K/K/K3K.K$K!K K"K,K>K;K9K:K9KKZK�K�K�K�K{KLKJKDKZK�K�K�K�K�K�KmK%KHKsKQKQKJKrK�K�KZK'KWK�K�K�K�K�KTKHKKKKKzK�K�K�K�K�K�KqK(K@KxKXKGKHKMKSKK
+KKKKKDKoK�KrKQKHKIKIKKnK�K�K�K�K�K�K�KLKDKDKAKsK�K�K�K�K�K�K�K�K�KUK7KbK�KJKCK=K\K�KuKSKCKGKXKXKXKTKOKJKCK?KBKAKBKAKAKAKDK$K4KOKKKMKHKDK?KIKHKeK�K�K�K�K�K�K�K^KQKQKOKEK7K:K@KJKLKLKOKMK�K�K�K{KJKLKJKHKMKVK�K�K�K�KWKHKEK?K=KGK[K�K�K�K�KQK:KAK@KBKAKFKKK=K5K8K7KK@K=KKAKBKBKBKBK@K>K>K>K>K>K>K?K>K?K?K?K>e]r�(K9K=K=K9K+K KKKK4KAK7K7K0K*K&K#K!K*KAK9K1K/K*K"KKK
KK!K+K'KKKK"K%K%K'K*K*K-K1K1K+K"K"K#K"K#K0K@K;K9K:K9K;K9K:K@KBK@K=K9K@KCKCKEK@K#KKKKKKKK)KK�KaKEKIKJKbK=K4KK%KOKfKxK�KjKJKJKIKHKIK;KGK9K/K5K2KhK�K�K;K7KxK�KIKIKDKfK�K�K�K7KKAK?K?K=KBKLKCK6K8K9K?KVKWKRKOKNKLKJKIKHK8K1K3K3K0K/K7KK@K@K@K@K@K@K@K@K@K?K>K?K>K?K>K=K>K>e]r�(K:K8K8K-K!KKKK.K@K6K7K3K,K'KK!K(KK:K?KDKCKEK>K#KKK
KKKK-K:K;K>KHKOKOKMKPKQKPKPKPKPKPKPKPKQKPKPKPKPKRKSKSKSKRKRKRKRKSKSKSKRKRKRKSKSKSKSKSKQKSKTKTKKKDKBKGKLKLKNKKKYKtK�K�KKKKKAKYK�K�K�K�K�K�KrK$KEKuKOKMKDKpK�K�KcK$KNK�K�K�K�K�KYKIKKKKKuK�K�K�K�K�K�KzK,K:K}KdKEKHKCK#KQK_K!K3K:K]K�KoKQKZKSKIKHKKK7K0K$K0K,KK=KK>K>K>K>K>K@KBKBKBK?K>K?K>K>K?K?K?K?e]r�(K8K7K0K$KKKK'K?K6K8K3K,K'K!K K$K9K:K3K/K)K$KKK
K
KK'K%KKKK$K#K$K(K)K*K+K,K1K-K%K#K$K"K&KKK1KDK9K9K9K:K;K6K9KAKBK?K;K:K@KDKCKEKKFK>KvK�K�K�K�KFK6K7K3K3K=KKK^KUKAKBKAKBKAKBK?K!KCKMKKKKKGKAKCKOKGK~K�K�K�K�K�K�K�K`KQKQKMK?K7K7K?KIKHKIKJKQK�K�KKDK=K8K2K.KDK]K�K�K�K�KLKBKBKAKBKGKaK�K�K�K�KYKUKfKjKjKfKPKIK>K:K5KzK�K6K0K0K-K/K,K;KIKDK6K1K2K3K7K:K:K9K9K:K:K9K;KHKGKbK�K�K�K�KYK9K?K?K?KKBKCKBKAKAKBK=K+K5K>K:K7K6K;KAKCKDKCKCKCKCKCKCKCKDKCKCKBKBKAKBKCKBKAKAKAKAKAKAKAKAKBKAKAKAKAKAK@K>K?K?K?K?K>K?K?K>K>K>K>K=K=e]r�(K8K0K#KKKK%K>K;K6K6K/K*K"KK K5K=K3K1K+K#KKKK
KK)K$KKKKK#K$K(K+K,K-K/K2K/K#K#K$K#K#KKKK;KBK9K9K9K:K;K6K=KCKAK?K:K=KDKDKCKEK;K$KKKKK K-K=KIKIKBK6K$KK4KKKRKOKPKPKPKPKPKPKPKPKPKPKQKRKRKRKRKRKRKRKSKRKQKRKSKRKRKRKRKRKRKRKTKRKRKRKSKRKSKQKJKBKDKIKKKMKJKKKHKGK@KXK�K�K�K�K�K�KvK(KAKuKQKMKHKlK�K�KkK%KHK�K�K�K�K�K^KJKLKGKmK�K�K�K�K�K�K�K5K0KxKoKEKHKGK"KKIKJKeK~KyK^KfKgKcKKKGKIKJK>K1KK;K|K�K�K�K�KOK,KaK�KYKJKFKSK�K�K�KSK2KeK�K�K�K�K�K�KSKDKEK?KYK�K�K�K�K�K�K�K�K�KCK@KKgKBKFKGK�K�K�K�K{KAKHK�K�K�K�K�K�K�KtKBKEKBKHK�K�K�K�K�K�K�K�K�K�KPK?KrK�KDKEKKK>K=K=K=K;K9K:KDKJKDK�K�K�K�K�K�KLK0KKKKKKKKKKK?KDKEK;K!K}K�K�K�K}KbKzKxKkK[KLKAKK>K>K=K@K?K>K?K>K>K=K=e]r�(K1K%KKKKK8K>K8K7K/K(K"K K K2KK=K8K:K9K9K9K6K>KBKAKAK=KKHKFKYK�K�K�K�K�KYKKKKKKKKKKKK1KEKDKAK4K�K�K�K�K�KWKXKeKnKqKhKUKHKEKFKBKDKBKAK9K0K%K?KBKAK@K=K=K9K8K7K:KKAKAKAK@K>K?K?K>K>K?K>K?K=KK6K6K-K)K$KK K2K>K4K0K-K%KKKKKK,K*K KKK"K"K!K(K,K,K.K.K2K/K$K%K$K%K"K"K)K-K(K'K>K>K8K:K9K9K9K5K>KBKAK@KKWK�KYK@KAKHK�K�K�K�KyKK;K9K7K:K=K?KAKDKBKAKAKAKBKBKAKAKAKAKAKAKAKAKAKAKAKAKBK@K?KAKAKBKAK?K?K>K?K?K>K>K?K>K=K=K=e]r�(K"KKKK/K=K1K4K.K(K$K KK,K=K3K0K.K)KKKKKK)K)K%KKKK$K%K(K)K,K/K0K4K.K#K$K'K#K$K*K-K-K,K'K)KEK@K9K:K9K:K9K5K?KAKBK?K:K=KAKCKCKEK7K#K"KKKKKK
KKKKKKKKKKKK9KOKQKLKNKQKPKQKPKPKPKPKPKQKSKQKPKRKSKSKSKRKSKSKQKQKSKSKSKOKQKSKRKRKRKRKRKRKRKRKTKSKNKDKAK5K?KKKMKKKXKsK�K�K�KK+K6KtKSKJKGKcK�K�KxK(K>K�K�K�K�K�KeKGKIKFKeK�K�K�K�K�K�K�KBK+KiK{KIKIKFKbK5KPK\KEK;KK^K�K�K�K�K�K�K�K�K�KuKDKPK�KhK>KCK?K�K�K�K�K�KIK?K=K:K9K8K9KdKTK?KBKAKAKAKBKK>K>KKAKAK>K>K>K?K?K?K>e]r�(KKKK)K=K1K,K0K+K$KKK'K;K2K*K*K'K!KK
KKK(K)K"KKKK"K$K&K,K+K+K/K5K2K%K#K%K%K%K!KK"K!K KK.KHK=K9K:K9K:K8K6K?KAKAK?K:KK0KDKJKBK-K,KJKLKHKiK�K�K�K�K�K�K�K�KvK4KDK�KvKDKIKEKwK�K�KxK4KFK�K�K�K�K�K�KuKEKHKFKDKvK�K�K�K�K�K�K�K�KiK8KVK�KNKFKCKZK�K�K�K�KeK;KYK�K�K�K�K�K�K�K`K@KEK>KRK�K�K�K�K�K�K�K�K�K�KKKHK�KvK=KDK=KrK�K�K�K�KUK@KCK@K=K:K@KgK`KAKAKAKBKAK@KAK$K8KKKHKIKHKDKAKIKIKaK�K�K�K�K�K�K�K�KIKMKNKCK9K9KKBKEKzK�KCKKGKAKrK�K.K2K0K6K=K=K=KK-K@KQK^KcK]KVKWK[KYKVKPKKKGKAKCKAKBKBKAKAKAK@K@KAKAK?K?K=K8K7K5K9K=K@KCKDKBKAKAKAKBKAKAKAKAKAKAKAKBKAK@K@K@K@K@K@K?K>KAKAK>K>K>K>K>K?K>e]r�(KKK'KK;K2KAKDKDKEK5K%K"KKKKKKKKKKK
+KKKKKKKKKK1KIKOKNKLKPKQKPKQKSKSKSKSKSKQKPKPKQKSKQKPKQKPKPKPKPKPKPKRKSKRKRKRKRKRKSKRKOKRKSKQKQKOKEK@KBKHKLKKKIKOKeKrK,K3KrKWKHKFK]K�K�K}K)K3KzK�K�K�K�KmKIKIKGK]K�K�K�K�K�K�K�KNK&K`K�KKKHKLK6K%K*K(K&K0K9K*K/K)K!KaK_KIKJK[K�K�K�K�K�K�K�K�K{K3K@K�K|KFKJKDKqK�K�K�K:K@KyK�K�K�K�K�KKGKFKGKCKnK�K�K�K�K�K�K�K�KvK:KLK�KWKDKEKQK�K�K�K�KoK?KQK�K�K�K�K�K�K�KnK@KEK@KKK�K�K�K�K�K�K�K�K�K�KQKBKsK�KDKDKKEKCK@K=K@KNKXKDKAKBKAKAK?KAK+K.KLKHKHKIKFK?KGKLKPK�K�K�K�K�K�K�K�KPKIKOKJK:K9K;K@KCKKKRKLK_K�KLK:KNK^K\KQKSKKKmK�K�K�K�KJK\KoK�K�KOKiK�K�K�K�K�K�K�K�K�K�KbKEK�K�K�K�K�K1KJK9K:K9K:K@KGKGK8K3K5K4K7KK@K?KK>K>K>K>K>K>K>KAKAK>K>K>K>K>K>K?e]r�(KK!K;K3K(K,K*K%KKK!K3K+KKKKKKKK
K"K)K"KK
KKK#K&K+K*K+K.K4K5K(K$K%K&K&KKK	KKKKKK0KIK=K9K:K9K;K.K)KAKAKBK=K8K+K@KDKCKCK2K'K$K KKKKK
KK
+K	KK
KKKKKKKKKKKK;KNKPKMKNKNKNKPKRKQKQKQKQKQKRKRKSKQKPKPKQKRKRKRKRKRKRKSKRKRKRKRKRKRKSKRKQKQKQKQKQKSKNKEKAKEKGKKKLKIKNK>K:KtKYKGKFKZK�K�K�K.K1KvK�K�K�K�KpKHKJKGK\K�K�K�K�K�K�K�KRK$K\K�KMKGKJK6KK&K(K&K!K%K/K,K"K!KPK[KJKLKTK�K�K�K�K�K�K�K�K�K6K:K|K�KHKJKDKiK�K�K�KBK9KsK�K�K�K�K�K�KJKFKFKAKcK�K�K�K�K�K�K�K�K�K?KEK�KbKBKFKJK�K�K�K�K|K@KHK�K�K�K�K�K�K�KyKCKGKCKEK|K�K�K�K�K�K�K�K�K�K^KAKhK�KMKBK?KRK�K�K�K�KrK@KFKEKCK?K>K@KGKCKAKBKAKAKAKCK5K$KKKHKHKIKFK?KDKKKGK�K�K�K�K�K�K�K�KVKEKLKLKKK>K>K=K=K>K;K?KHKGK9K.K0K0K/K9K>KKOK^K\KUKOKQKTKQKAK?K:K8K;KAKAKAKAKAKAKAK@K?K>K?K?K?KK@KAKAKAK>K>K>K>K>K>K>e]r�(KK6K6K%K&K'K&K KKK/K.KKKKKK
KK
+KK)K"KKKKK!K&K*K*K+K/K1K3K)K&K&K%K&K#KKKKKK KKK8KEK:K8K:K9KKXK�K�K�K�K�K�K�K�K�KFKCK|KoKBKHKEK}K�K�K�K�KGKDKwK�K�K�K�K�K�K�KHKGKDK@KqK�K�K�K�K�K�K�K�K�KlKBK[K�KZK?KCKEK�K�K�K�K�KFKFKGKBK@K?K?KCKAKBKBKBKBKAKCKK>K=KKEKGKAK0K/K0K/K3K=K=K=K=K=K=K:K9K8K7KCKHKCK�K�K�K�K�K�KKKKKKKKKKKKKBKHKDKjK�K�K�K�K�KaKK#KKKK!K)K9KLK[K_KYKOKGKBKCKAKK>K?K=K;K;K;K:K5K2K7K>K?KBKCKBK?KAKAKAKBKBKBKAK>K?K>K@KBKAKAK>K>K>K>K>K>K>e]r�(K0K9K K!K#K"KKKK.K0KKKKKKKK	KK,K%KKKK!K#K$K'K*K+K.K0K4K)K$K'K%K&K"KKKKKKKKKK;KEK:K:K:K9KK6K/KBKDKCK?K/K)K&K%KKKKKK
KK
+KKK
KK
K
KKKKKKKKKK%KK>KK>K>K?K;K;K=K9K@KIKCKmK�K�K�K�K�K0KKKKKKKKKKKK7KIKFKJK�K�K�K�K�K�K$K"K KKKKKK!K3KFKZKaKZKNK@KAKCK9KKKK(K;KAK>KBKBKBKAKAKAK=K?K>KK>K>K@KBK?K?K>K>K>K>K>K>K>e]r�(K:K KKKKKKK(K2KKKKKK
+K	KKK,K(KKKK!K$K%K%K'K+K.K2K0K(K%K&K%K&K#KKKKKKKKKKKAKEK:K:K:K9KK>K6K0KCKDKCK?K/K)K'K$K"KKKKK
KK
+K
+K
+KKKKKKK
KKKKK K%KKK K@KOKMKOKQKPKQKRKRKPKPKPKPKPKPKPKPKQKQKRKRKQKQKRKRKSKRKRKQKQKRKRKPKQKQKPKPKQKPKOKQKRKLKEK@KBKGKGKIKKKJKEKSK�K�K�K4K*KlK�K�K�K�KzKJKLKHKRK�K�K�K�K�K�K�KcK"KJKPK@KIKJK:KK!KK K#K(K)K"K%K�K�KzKHKMKJK�K�K�K�K�K�K�K�K�KKK2KcK�KWKFKCKTK�K�K�KZK4K_K�K�K�K�K�K�K_KCKGKCKOK�K�K�K�K�K�K�K�K�KYK9KfK�KGKHK@KgK�K�K�K�KXK@KfK�K�K�K�K�K�K�KZKAKCK=KXK�K�K�K�K�K�K�K�K�K�KHKIK�K|K@KCK@KoK�K�K�K�KaKCKGKBKAKVK]KLKIKAKAKBKAKAKAKDK(K/KKKHKIKFKCK?KFKJKNK�K�K�K�KvKAK;K=KFKEKIKIKK>K=KK@K?K=K=K>K>K=K:K9K5K4K7K:K?KAKBKAK@KAK@K>K?K?KAKBK?K>K?K>K>K>K>K>K>e]r�(K'KKKKKKK!K-KKKKKK	KKKK(K&KKKK"K#K&K)K)K)K-K/K/K+K%K&K&K&K#KKKKKKKKKKK"KBKCK8K:K9K9K;K%K2K@K?K>KK9K:K7KDK�K�KyKIK}KcK8KQK�K�K�KwKIKMK�K�K�K�K�K�K�K�K�KvKHK�K�K�K�K�K�K�K�K�K�K�KMKYK�K�K�K�KeK,KBK@KAKBK@KAKHKBK7KKBKDKCKBKBKBKBKEKGKEK2K-K-K0K5K>K@KBK@K>K>K?K=K:K:KBKEKDK�K�K�K�K�KPKKKKKKKKKKKKK;KHKCKTK�K�K�K�K�K�K"K!KKKKKKKKKKK%K4K@KAKAKCKK?K>K?K=K;KK>K@KBKBKAK?K>K?K>K>K>K>K>K>e]r�(KKKKKKKK/KKKKKK	KKKK&K'K!KKK"K!K%K'K)K)K+K1K2K(K%K'K%K'K"KKKKKKKKKKKKKEKCK8K:K;K:K;K#K4KAK@K>K;K1K4KEKCKDK@K-K(K%K#K$K KKKKK
KK	KK	K	K
+KK
K
K
KKKKK"KKKKKKKK;KPKRKQKQKPKRKQKPKPKPKPKPKPKQKRKQKPKQKRKRKRKSKQKPKQKQKPKQKQKQKQKSKTKQKQKRKQKPKQKRKRKQKLKDKAK7K7KGKJKHKNK`K;K'KgK�K�K�K�KKHKJKFKMK�K�K�K�K�K�K�KqK&K=K)K;KMKFKCKKKK-K,K!K(K'K!K9K�K�KRKHKIKHK�K�K�K�K�K�K�K�KaK-KVK�KdKEKGKJK�K�K�KnK4KPK�K�K�K�K�K�KuKDKFKCKDKxK�K�K�K�K�K�K�K�KoK:KSK�KWKCKCKRK�K�K�K�KrK?KUK�K�K�K�K�K�K�KqKAKEK>KDK�K�K�K�K�K�K�K�K�K�K]K@KeK�KPKAK@KOK�K�K�K�K�KHKDKDKTK^KZKHKLKMK@KBKBKAK@K@K;K KDKIKHKGKDK@KAKIKEKjK�KpKGKKK>K>K?K>K=K=K;K8K5K4K5K9KK?K@K@KAKAK?K>K>K>K>K>K>K>K>e]r�(KKKKKKK)KKKKKKKKKK#K%K KKKK!K#K'K)K*K*K0K1K'K#K'K&K&K KKKKKK
KKKKKKK KHKCK8K:K9K:K:K"K6KBKBK>K=K1K6KEKCKCK=K*K&K#K$K$K#KKKKK
K
KK
KKKKKKKKKKKK#KKKKKKKKKKKK�KbKAKEKJK�K�K�K�K}KCKNK�K�K�K�K�K�K�K}KCKFK@K@KwK�K�K�K�K�K�K�K�K�KmKCKXK�K^K>KBKBK�K�K�K�K�KSKHKGK\K^K]K\K^KPKAKBKBK@K>K>K@K"KKFKHKKKPKAK:K=KKzK�K�K�KeKAKHKFKDKDKDKDKDKGKGK?K/K0K0K/K9KBKBKBKBKBKBK@K?K:K&K=KHKEK�K�K�K�K.K"KKKKKKKKKKKKK>KFKBK_K�K�K�K�K�K|KKKKKKKKKKKKKKK.KAKAKCK:K"K)K�K�K�K�KvKqKlKbKWKLKDKBKAKAK?K>K?K?K>K?K?K=KK?K?K>K>K>K>K>e]r�(K
KK
KKK$KKK
KKK	KKK
+KK!KKKKK K$K(K)K*K,K-K0K'K"K%K&K%K&KKKK!K"K!K"K%K%K&K(K(K%K.KIKBK9K9K9K8K7K!K9KBKAK>KKEKBK�K�K�K�K�KHKFK{K�K�K�K�K�K�K�KIKDKAK?KlK�K�K�K�K�K�K�K�K�KzKEKNK�KnK;KBK>K|K�K�K�K�K^KGKCKLKYK^K_K_KNKAK@KBKAKAK>KAK&K0KLKHKHKFKBK>KBKHKGK=K;KK>K=K=K?K?K>K>K>K6K1K"K1K=K8K6K4K5K6K9K?KAK>K=K=K>K>K?K?K>e]r�(K	K	KKK KKKK
+KKKKKKKKKKK!K!K#K'K)K)K+K.K.K'K#K(K&K&K#K'K+K-K+K+K-K)K(K*K*K)K*K'K"K5KKKAK9K:K:K9K5KKK;K1K:KCKCKBK9K*K'K%K!K K KKKK
K
K
KK
+K
+K
+KKKK
KKKKKK"K9KBKLKMKUKWKTKUKTKRKIKMKOKMKOKPKPKPKPKPKPKPKPKPKPKRKSKRKSKPKOKQKRKRKSKRKRKRKRKRKRKSKRKPKQKPKPKPKPKQKQKPKPKRKRKOKHKAKAKEKGKHKKKGKSKqK�K�KMKHKHKHKzK�K�K�K�K�K�K�K+K1KyKiKCKGKHK6K;K/K*K*K,K�K�K�K�K�K�K^KJKKKRKSKNK�K�K�K�K�K�KzK4KBK�K{KEKJKAKqK�K�K�KAK>KuK�K�K�K�K�K�KOKFKEK?K\K�K�K�K�K�K�K�K�K�KKK>KvKxK@KEK@KuK�K�K�K�KQKBKnK�K�K�K�K�K�K�KRKAKDKKIKVK\K8K=KCKAKBKBK?KAK0K&KLKIKIKFKBK=KAKGKHK@K;K=K=KK>K?K>K@K1K+K)K&KK?K>K>K>K>e]r�(K	KKKKKKKKKKKKKKKKKKKK"K)K'K&K*K.K/K%K$K'K'K'K'K#K"K K K$K#K$K#K#K$K$K%K#K!KK5KLK>K9K:K:K;K3K!K>K?K@K?K;K.K=KEKCKDK9K+K*K&KKKK KKKKK
K
K
+KKK	K
+K
+KKKKKKKAKSKUKXKYKZK[K[KXK[KXKVKWKTKLKIKNKPKPKQKQKPKPKPKPKPKRKRKQKQKPKPKPKQKQKQKQKQKQKQKQKQKQKQKPKPKPKQKPKOKRKSKQKPKQKQKQKQKNKFKAKAKDKHKGKGKHKZKnKLKGKHKDKuK�K�K�K�K�K�K�K0K.KxKtKFKIKCKtK�K6K)K/K/K�K�K�K�K�K�KXKJKGKdK�KZK�K�K�K�K�K�K�K8K?K�K�KFKKKAKjK�K�K�KGK:KnK�K�K�K�K�K�KUKFKEK@KVK�K�K�K�K�K�K�K�K�KUK?KmK�KDKEK?KlK�K�K�K�K]K@KdK�K�K�K�K�K�K�K\KCKGKK=K:K/K>K@K>K@KBKAKCK;K!KEKIKJKHKDKAKEKHKHKCK:K:K:K:K:K8K7K=KBKAKIKAK8K:K9K;KqK�K�KUKeK�KHKK�K�K1K-K-K&K(K2K-K KKKKKKKKK3KFKDKMK�K�K�K�K�K�K&KKKKKKKKKKKKKKK@KCKEKK@K=K5K*K*K=K=KK?K?K>e]r�(KKKKK	KKKKKKKKKKKK
KKK K#K)K(K)K,K.K%K K&K%K%K!KKKKKKKKKKKKKKKKK5KIK:K9K9K9K;K2K#K>K>K?K>K;K-K=KEKCKDK8K+K*K&K KKKKKKKK
K
K
+KKK
+KKKKKKKKKIKXK[K\K[K]K_K_K_KcK_KVKMKKKKFKHKEK@KK=K=K=K;K9K;K:K9K8K3K2K1K5K:K>K?e]r�(K	KKKKKKKKKKKKKKK
KKK!K%K&K(K)K,K0K'K!K'K&K'K KK
KKKKKKKKKKKKKKKKK>KKDKCKEK8K,K*K&K!K!K KKKKK
K
K
K
KK
+K
+KK
K
KKKKKKKKYKZKZK\K`KbK`KVKGK6K&KKKKKK(KDKOKOKPKPKPKPKPKPKPKPKQKQKQKQKPKPKPKQKPKPKPKPKPKPKPKPKPKPKPKPKQKPKPKPKPKPKPKQKPKMKKKNKPKKKDK@KDKJKJKIKIKIKGKCKsK�K�K�K�K�K�K�K7K)KnK~KEKJKDKpK�KQK+K1K2KDK�K�K�K�K�K{KHKIKXK�K�K�K�K�K�K�K�K�KAK5KpK�KLKEKFKZK�K�K�KYK2K_K�K�K�K�K�K�KdKCKEK@KIK�K�K�K�K�K�K�K�K�KjK;KXK�KSKBKAKRK�K�K�K�KrK?KRK�K�K�K�K�K�K�KvKAKHK?KAK{K�K�K�K�K�K�K�K�K�KlKCKWK�K_K>KBKBK�K�K�K�K�KVKDKIKHKHKGKFKEKBK?KBKAKBK?K>K@K"K2KKKHKHKEKAK9KCKIKFK=K7K:K:K:K9K7K:KAKCKEKIKKBK>K>K>K?K?K=K;K;K;K;K;K9K8K6K4K3K5K7e]r�(KKKKKKKKKKK
KKKKKKKK$K'K'K'K*K0K*K$K'K'K%K"KKKKKKKKKKKKKKKKKKKAKHK:K9K:K;KK>KKCKBKDK6K+K*K&KK KKK KKK
K
K
K
KK
+K
+KKK
KKKKKKQK[K]K]KUKEK3K&KKKKKKKKKKKK2KLKSKQKQKQKPKPKPKPKPKOKNKPKPKQKQKPKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKMKOKQKRKRKJKAK?KEKHKEK?KGKGK\K�K�K�K�K�K�K�KKNK�K\KAKCKIK�K�K�K�KKDKKK�K�K�K�K�K�K�K�KEKGKCK@KpK�K�K�K�K�K�K�K�K�KzKGKLK�KqK=KCK:KyK�K�K�K�KcKK@K7K8KIKHKHKEKBK>KBKHKFK?K8K9K9K:K9K7K:KAKEKCKHK=K6K7K;KEK�K�K�KNKsK�KGK>K�K�K�K�KPKGKxK�K�K�K�K�K�K�K�K�KTKaK�K�K�K�KwK�K�K�K�K�KrKGKyK�K�K�K�K>KKGKFKGKFKGKJKBKsK�K�K�K�KxKAKHKEKCKBKDKEKEKFKHKEK4K2K5K2KGKMKAKCK@KAK8K(K+K3K8K?KGKFK=K.K-K.K-K1KEKGKDKDKFK*KKKKKKK*KDKEKBK�K�K�K�K�K�K@KKKKKKKKKKK
KKKK3KDKBK?KkK�K�K�K�K�K�KK
+KK(K9KKKZK]KVKRKSKWK^K`K^K[KTKMKBK@KAK@K>K>K>K?K?K=K>K>KK>KKRKQKPKPKQKQKQKPKMKKKNKPKQKQKNKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKPKPKPKQKQKNKHK>K@K:K4KEKIKGKPKjK�K�K�K�K�KCK"KdK�KHKJKBKbK�KwK3K5K2K6K�K�K�K�K�K�KGKPKRK�K�K�K�K�K�K�K�K�KSK0K_K�KZKCKCKNK�K�K�KlK2KOK�K�K�K�K�K�KxKBKHKFKBKrK�K�K�K�K�K�K�K�K�KBKEK�KgK?KDKCK�K�K�K�K�KLKEKwK�K�K�K�K�K�K�KMKEKEK>KcK�K�K�K�K�K�K�K�K�K�KKKIK�K�K@KDK6KdK�K�K�K�KvKBKJKuKeKNKHKFKHKDKAKAKBK?K>K>KAK@KGKIKHKEKBK>K;KEKGKCK7K7K8K:K9K7K8K>KDKCKGKBK8K8K;KAKoK�K�K\K_K�KSK8KpK�K�K�KdKDK[K�K�K�K�K�K�K�K�K�KfKLK�K�K�K�K�K�K�K�K�K�K�KMKYK�K�K�K�KdKK=KHKFKFKGKIKFKSK�K�K�K�K�KEKGKEKCKEKDKCKFKGKEKHK:K0K5K.K}K|K>KEKDKAKBKCK@K>K=KK=KK>K=K;K9K:K:K9K:e]r�(K
KKKKKKKKKKKK
KKKK!K#K%K'K)K.K'K#K'K%K&K"KKKKKKKKKKKKKKKKKKKKKGKEK8K9K:K:K;K$K.KAK>K>KK}KuKAKEK@KyK�K�K�K�KSK@KkK�K�K�K�K�K�K�KUKEKEKK>K@K?K>K>K@K=KCKIKGKFKDK?K;KEKGKDK7K7K8K8K8K8K7K=KCKDKHKGK:K9K:K>K^K�K�KnKOK�KfK9KZK�K�K�KyKHKMK�K�K�K�K�K�K�K�K�KMKGK�K�K�K�K�K�K�K�K�K�K�KXKJK�K�K�K�K�KK/KJKFKFKGKIKJKGK�K�K�K�K�KZKCKGKEKFK=K8KDKGKEKHKAK3K4K7K�K�KBKEKGKCKAK@K=K;KK&KKKKKKK1KBKBKEK�K�K�K�K�K�K2KKKKKKKKKKKKKKK8KDKCK?KrK�K�K�K�K�K�KKK	KKK
+KK.K@KRKZK]KVKRKTKWK\KZKBK:K8K8K;K@K?K=K=K>K>K=KK>KKrKKBKEK?KlK�K�K�K�K]KAKaK�K�K�K�K�K�K�KcK@KFK=KNK�K�K�K�K�K�K�K�K�K�K`KAKcK�KSK>K?KHK�K�K�K�K�KSKBKnK�K�K�K�KsKRK@K@K>K?K?K>K?K?KBKJKGKGKEKBK=KCKGKFKKOK�K�K�KIKK�KIKFK�K�K�K�KQKEK�K�K�K�K�K�K�K�KsK'KGKhK�K�K�K�K�K�K�K�K�K�KiKFKyK�K�K�K�K4KKGKGKFKGKIKJKFKlK�K�K�K�K�KAKIKFKFKDK>KEKGKFKGKEK9K0KPK�K�KbKBKGKBK@K>K@KKDKDK@K+K,K.K.K.K?KHKFKFKDK2KKKKKKKK?KEKAKnK�K�K�K�K�KkKKK
KKKKKKKKKKKK!KCKBKAKKK�K�K�K�K�K�KGKKKKKKKKK(K4KDKVK^KYKOKNKWKJK;K=K8K8K5K-K7K?KAK>KK>KK=KyK�K�K�K�K�K�KNKBKDKAKVK�K�K�K�K�K�K�K�K�KXK=KhK�KDKBK?K`K�K�K�K�KhKAKVK�K�K�K�K�K�K�KnK?KEK?KGK�K�K�K�K�K�K�K�K�K�KnKAKXK�KaK=K@K>K�K�K�K�K�K^K@K_K�K�K�K�K�K�KCK?KBKAKAK?K?K>KBKIKGKGKGKCK>KAKIKFK?K7K8K8K7K8K8K7K?KCKBKGK?K7K8KKyK�K�K�K�K�K�K
KKKKKKKK
+KgKzKMKBKPK]KZKPKIK=K>K?K=K:K"KK$K0KK>K=K3K5KCKAKDK@K-K+K+K"KKKKKKKKKKKK
K
KK
+KKKKKK
K
KKK
KKKKKKKKKKKKKKKKKKKKKKKKK.KIKRKNKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKPKPKQKPKPKPKPKPKPKQKPKOKOKQKPKPKPKPKPKQKNKGK>K=KBKIKIKHKGKFK@KNK�KYKFKEKRK�K�K�K;K3K-K}K�K�K�K�K�KZKHKHK~K�K�K�K�K�K�K�K�KvK2KCK�KyKAKHKBKqK�K�K�KEK8KpK�K�K�K�K�K�KVKAKCKBKOK�K�K�K�K�K�K�K�K�KcK:K]K�KNKDK@KTK�K�K�K�KtKBKRK�K�K�K�K�K�K�KyKBKEKAKAKtK�K�K�K�K�K�K�K�K�K}KFKMK�KrK;KBK9KuK�K�K�K�KoKAKTK�K�K�K�K�K�KPK>KBKAKAK?K?K=K@KFKFKGKGKDK?K>KEKEK@K7K7K8K7K8K7K7K:K?KAKGKEK8K7K;KEKkK�K�KiKYK�K�KJKbK�K�K�KpKFKPK�K�K�K�K�K�K�KYKK6KKK�K�K�K�K�K�K�K�K�K�K�K\KJK�K�K�K�K�KwKsKBKGKGKGKDKGKGK�K�K�K�K�KcKBKHKDKCKDKDKCKFKFKEKBK7K�K�K�K�KQKAKDKDKBK?K?K;K:K8K?KCKCK>K*K*K-K-K/KEKGKEKHK6KKKKKKKKKAKEK>K{K�K�K�K�K�KKKKKKKKKKKKKKKK&KCKCKBKRK�K�K�K�K�K�K>KK	KKKKK	KgK�K�K�K�K]KDKIKZK\KJK=K?K=K=K6KKKKK.KAK=KK>K=e]r�(KKKKKKKKKK
KKKKK$K(K,K0K+K%K&K&K&K KKK
KKKKKKKKKKKK K K K KK"K K"KK5KMK>K>K;K9K:K8K"K8KAK?K=K>K1K6KCKAKDK?K/K,K*K"KKKKKKKKKK
KKK
KK	KKK
+KK
K
K
KKKKKKKKKKKKKKKKKKKKKKKKKK KKKK:KPKSKPKPKQKQKQKPKPKPKPKPKPKPKPKPKPKPKPKPKQKOKQKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKOKMKOKNKGK@KCKFKHKHKFKIKIKbKPKEKDKMK�K�K�K?K4K0KMK^KPK�K�K�KeKFKHKrK�K�K�K�K�K�K�K�K�K2K>K�K�KCKHKBKgK�K�K�KLK3KiK�K�K�K�K�K�KbK@KAK@KKK�K�K�K�K�K�K�K�K�KpK:KRK�KXKBKBKJK�K�K�K�KKDKLK�K�K�K�K�K�K�K�KEKDKDK?KjK�K�K�K�K�K�K�K�K�K�KLKHK�K�K?KCK;KdK�K�K�K�K}KGKKK�K�K�K�K�K�K`KK>K?K>K>KFKGKFKGKFKCKK=KKGKQKEK?K>K>K?K(KKKKKwK�KSK>KAK?e]r�(KKKKKKKKKKKKKK#K'K*K1K.K#K%K&K&K&K$K#K$K"KKKKKKKKKKKKKKKKKKKKK6KKKOKXKGK;K:K4K#KK4K5KK
+KKKKKK�KcK@KFKCK�K�K�K�K�KKKEK{K�K�K�K�K�K�K�KKKBKDK?K^K�K�K�K�K�K�K�K�K�K�KXKFKpK�KIK@K?KVK�K�K�K�K�KMKEKwK�K�K�K�K�KrK=KBK@K@K?K?K>K?KCKGKGKGKFKBKK@KGKKK=K@K@K?K7KKKK$K�K�K�KYKKKBe]r�(KKKKKKKKKKKKK K&K*K-K+K$K&K&K&K#KKK"K$K!K#K'K'K'K$K#KKK KKKKKKKK
KKK
+K9KNKmKsKSK;K:K3K$K>K@K>K=K:K/K8KAKBKBK=K-K*K,KKKKK
KKKKKKKK
KKKKK	KKK
+K
+K
+KKKKKKKKKKKKKKKKKK
+K	K	K
+K
+KKKKKKK%KKKK0KKKQKMKNKQKQKQKPKPKQKQKQKQKPKPKPKQKOKMKOKQKPKPKPKQKQKQKPKPKPKPKPKPKPKPKPKPKQKQKQKQKQKNKNKQKMKCK=K@KGKFKGKHKGKEKGK�K�K�K>K2K2KKK
+KK0K/KCKJKGKWK�K�K�K�KeK�K�K�K�K>K2KqK�KKKDK@KZK�K�K�K`K4KZK�K�K�K�K�K�KtKBKEKDKEKuK�K�K�K�K�K�K�K�K�KDKDK�KpK?KFK?K{K�K�K�K�KRKAKnK�K�K�K�K�K�K�KVK?KDK>KRK�K�K�K�K�K�K�K�K�K�KbKCKbK�KUK=KAKHK�K�K�K�K�KYKBKkK�K�K�K�K�K�KBKBKBKAK?K>K?K?K@KHKIKGKFKBKKFKGK=K5K:K9K7K8K8K5K:K?K>KFKAK7K8K=KHKxK�K�K\KjK�K�KcKsK�K�K�KfKDKVK�K�K�K�K�K�K+KKK7KLK�K�K�K�K�K�K�K�K�K�KRKAKLK�K�K�K�K�KTKeKFKEKGKEKFKKKDK�K�K�K�K�KiKCKHKGKHKDKDKDKCKDKFKVK�K�K�K�K�KZK@KDKBKAK>K=KKCKDK?K-K-K-K,K1K?KDKCKCKEK5K"KKKK
KKK>KCK@K\K�K�K�KIK)KKKKKKKKKKKKKKKKK>K@KBK>K�K�K�K�K�K�K}K
+KK
+K-KGKBKCK�K�K�K�K�K�K�K�KLKXK�KQK?KAK>K?K+KKK5K�K�K�K�KbKZe]r�(KKKKKKKKKKKKK%K)K-K,K$K&K%K&K$KKKKKKKKKK K!K"K!K#K'K(K&K&K KKKK
+K
+KKK=KTK�K�KaK=K;K1K"K>K@K?KKEKIKHKGKwK�K�K�KOKZK�K�K�KDK/KjK�KOKCKAKSK�K�K�KkK3KRK�K�K�K�K�K�K}KCKDKBKAKkK�K�K�K�K�K�K�K�K�KJK>KzK}K@KFK>KnK�K�K�K�K\K?KdK�K�K�K�K�K�K�KdK@KFKAKJK�K�K�K�K�K�K�K�K�K�KpKCKVK�KdKKBKAK?K>K>K@K>KDKHKGKEKDK=KK=KKKK>K?K9KAKHKGKDKEK?K:KEKGKAK6K7K7K8K8K5K7K9KK=KKCK>KhK�K�K)K*K,KKKKKKKKKKKKKKKKK>K?K?KAK�K�K�K�K�K�KsKKKK!KFKDKDK�K�K�K�K�K�K�K�KFK_K�KXK?KCK@K>K.KKVK�K�K�K�K�Kie]r�(KKKKKKKKKKKK&K)K)K&K%K%K'K#KK
KKKKKKKKKKKKKKKKKKKKKK
KKKKKCKfK�K�KpK:K9K,K)K@K?K?K;K7K-K>KDKCKCK8K+K,K+KKKKK
KKKKKKKKK
KKKKKKKKKKKKKKKKKKKK
+KKK
+K	K	KK
+KK
+KK
KKKKKKK#KKKKKKKKK3KLKNKPKQKPKOKMKNKQKQKPKQKPKNKQKPKQKQKQKQKPKPKPKPKPKPKQKQKQKPKPKPKPKPKQKOKMKPKQKQKPKPKPKPKOKNKGKKDKFKHKDKEK?KDKKKKK&K;KDKHKFKFKxKnKxK�K�K�K�K�K�KWK,KYK�K]KCKFKFK�K�K�K�K6KDK�K�K�K�K�K�K�KLKDKCK?KUK�K�K�K�K�K�K�K�K�K]K:KdK�KKKFKBKVK�K�K�K�KvKBKSK�K�K�K�K�K�K�K|K@KEKDKBKmK�K�K�K�K�K�K�K�K�K�KJKFK~K�K?KCK=K`K�K�K�K�K�KJKIK�K�K�K�K�K�KlKK?K>K=KBKEKFKFKDK@K:K@KFKCK9K7K8K7K7K7K5K5K:KK7K7KK>K>K=KKKKKKK:KHKEKDKFKbKbKQKvK�K�K�K�K`K+KPK�KdKBKFKDK�K�K�K�K;KK?K?K>K?K>K>K>K?KFKFKFKDK@K;K?KEKEK?K7K8K7K7K7K5K5K8K;KK?K?K=K;K9K9K@KCKEK:K-K.K.K/K4KAKEKDKFK@K-K&KKKKK
K K@KCK?KgKMK&K,K)K'KKKKKKKKKKKKKKKKK?KBKDKBK�K�K�K�K�K�KmKKKKKBKAKBK,KvK�K�K�K�K�Kye]r�(KKKKKKKKKKK"K+K&K%K%K&K"KKKKKKKKKKKKKKKKKKKKKKKKK
KKKK
K!KHK�K�K�KbK7K9K&K.KDKAKK?KK=KDKDKBK6K5K8K7K6K8K7K6K;KK;KK-K*K+K)K*KK	KKKKKKKKKKKKKKK5KDKBK=KhK�K�K�K�K�K�K!KKK%KEKDK@KdK�K�K�K�K�K�K�KjKDK�K�K=KBKBKAKKEKRKcK�K�KtK*KBK�KwK>KDK=KqK�K�K�KIK3KiK�K�K�K�K�K�KfK?KFKBKDK}K�K�K�K�K�K�K�K�K~K@KHK�KnK>KEK@K}K�K�K�K�KSKDKpK�K�K�K�K�K�K�K\K@KDK?KMK�K�K�K�K�K�K�K�K�K�KrKAKUK�KiK=KDKK=K=KK�K�K�K�KWKEKdK�K�K�K�KPK
KKKK9KMK�K�K�K�K�KKK;K@KDKDKCKEKDK@K6K?KBKBK2KK3KFKPK�K�K}K�K}K/K;K�KK?KEK=KiK�K�K�KQK0KaK�K�K�K�K�K�KoK>KGKGKBKuK�K�K�K�K�K�K�K�K�KEKCKKzK?KEKK>K*K;KHKFKGKEKAK;KAKCKDK9K6K8K7K7K8K7K7K9K9K=KDK?K8K7K;KJK�K�K�KZKZKlK@K8KqK�K�K�KjKHKSK�K�K�K�KfKKKKK*KFK}K�K�K�K�K5K
KKKKK8KJKxK�K�K�K�K�KuKDKEKGKGKGKHKHKZK�K�K�K�K�KHKDKCKDKCK9K9KBKBKHKDKxK�K�K�K�K�K@K@K>KKAK4KKK+KK`K�K�K�K[K.KYK�K�K�K�K�K�KyKAKEKDK?KlK�K�K�K�K�K�K�K�K�KMK>KvK�KAKEK=KeK�K�K�K�KkK?KYK�K�K�K�K�K�K�KwK@KEK@KBKsK�K�K�K�K�K�K�K�K�K�KLKGK{K�KBKBKK?K=K?K&K.KHKGKGKDKAK;K>KEKHK=K3K6K7K8K6K5K5K7K:K:KAKCK9K7K9KBKpK�K�KpKGKJK=K8KYK�K�K�K�KIKFK�K�K�K�K�KKKKKKDKaK�K�K�K�KCKK)KKKKDKMKWK�K�K�K�K�KZKCKGKEKEKGKGKIKHK�K�K�K�K�K^KBKFKDKCK;K;KDKCKGKGKVK�K�K�K�K�K[K=KAK=K=K=KKAKMK�K�K�K�K�KYK?KhK�K�K�K�K�K�KCKAK?K>K?K=K@K+KKGKHKGKCKAK;KKBK=K=K=KK[K�K�K�K�K�K�K�K�K�K�KdKDK`K�K\KK@K>K?K=K>K5KKCKIKFKDKCK>K;KEKCKBK7K4K5K5K7K7K4K5K8K:K=KFK;K7K7KK;K}K�K�K�KcKEKWK�K�K�K�K;K
KKKK.KHK�K�K�K�K�KMKLK0K0K=KK9KFKyK�K�K�K�K�K�KGKGKEKEK>KAKKKVK�K�K�K�K�KGKDKDKBKDKCK?KAKBKHKDKkK�K�K�K�K�KHK>K>K>K=KKEKCKCK3K)K,K&KKKKK4KDK>K?K.K(K)K(K)K KKKKKKKKKKKKKKKK(KDKBK?KLK�K�K�K�K�K�KSK�K	KK+KFKAKJK�K�K�K�K�K�K�K�KDKiK�K_K=KAKAKKJK�K�K�K~K1K@K�K�K�K�K�K�K�KPKCKDK?KNK�K�K�K�K�K�K�K�K�KnK;KTK�K^KAKBKBK�K�K�K�K�KKKEKwK�K�K�K�K�K�K�KRKAKDK>KRK�K�K�K�K�K�K�K�K�K�KqKCKQK�KlK:KCK;KzK�K�K�K�KxKDKOK�K�K�K�K�K�K_K:K@K>K?K=KKK:KKKFKEKDK?K:KBKFKEK9K4K5K4K7K7K4K5K7K7K9KFKAK6K7K:KBK|K�K�KfKCK:KKEKDKDKCK@KAKBKGKGKMK�K�K�K�K�KjK9K@K>K=KKAK8K'K)K)K(K'KKKKKKKKKKKKKKKKK?KAKDK=KzK�K�K�K�K�K�KKKKK=KEK=KsK�K�K�K�K�K�K�KeKEK�K�K?K>K=K?KGK�K�K�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KyKqKcKVKKKAK8K0K+K'K"KKKKKKKKKKKKKKKKGKlK�K�K}K?K9K7K7KAKBKAK?K6K5K@KFKCKDK>K,K*K+KKKK
KK
+KK
+KKKK
KKKKKKKKKKKKKK	KKK
KKKKKKKKKKKKKKKKKKKKKKKK&K,K�K�K�K�K�K�K�K�K�K�K�K�KcK5K#K%K(K$KKKK$K;KLKNKJKIKMKKKKKNKNKMKMKMKMKNKMKMKNKNKLKNKMKNKMKMKMKMKNKMKMKNKMKMKMKNKLKNKNKMKNKMKMKMKNKMKIKKKMKIKBK4K4KBKDKEKBKCKBKLK%K=KVK�KQK'KYK�KXKBKCKDK�K�K�K�K6K9K|K�K�K�K�K�K�KWK?KEK>KIK�K�K�K�K�K�K�K�K�KxK?KKK�KiK=KCK?K�K�K�K�K�KTKAKmK�K�K�K�K�K�K�K`K?KDK?KHK�K�K�K�K�K�K�K�K�K�K�KDKHK�K�K=KDK;KiK�K�K�K�K�KHKHK�K�K�K�K�K�KqK;K@K>K?K=KKkK�K�K|KEK=K;K:KNK�K�K�K�KLKEKxK�K�K�K�KKKK
KK=KOK�K�K�K�KoKK,K`K~KWKQKGKIK�K�K�K�K�KyKaKDKEKCKDKDKFKDKmK�K�K�K�K�K@KDKDKCKBKBKBKBKCKGKBK�K�K�K�K�K�K=K=K=K=KKkK�K�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K~KsKeK[KPKFK>K6K.K'K#KKKKKKKKHKtK�K�KwK=KK)K*K)K&KKKK*K?KLKKKLKKKLKMKNKMKMKNKNKMKNKMKNKNKLKMKNKMKMKMKMKMKMKNKMKMKNKMKMKNKMKNKNKMKMKMKMKMKNKMKKKLKLKLKMKHK>K;K?KAKAKCKEKDKAK>K>KqK_K#KOK�KbK?KGKBK�K�K�K�KKFK?KEK�K�K�K�K�K�K�K�K�K�KBKCK�KvK=KEK=KsK�K�K�K�K`K@KaK�K�K�K�K�K�K�KmK=KEK@KDKzK�K�K�K�K�K�K�K�K�K�KKKBK|K�KCKBKK?K>K?K>K=KAK*KKGKFKFKDKDK>K=KBKDK@K3K3K5K4K4K4K4K6K8K9KAKDK8K5K7KK�K�K�K�K[KEK[K�K�K�K�K2K
KKKK0KEK�K�K�K�K�K!KAKYKGKbKZK?KDKyK�K�K�K�KuKdKHKDKCKDKCKEKHKRK�K�K�K�K�KLKBKDKCKBKBKBKBKCKHKDK]K�K�K�K�K�KPKK=KKCKCKCKEK:K)K+K,KKKKK
K
KK
+KK
+KKKKKKKKKKKKKKKKK
+KKKKKKKKKKKKKKKKKKKKKKKKK$K5K@K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KqKIK0K-K,K(KKK K2KIKOKKKKKKKMKNKMKNKNKLKNKMKMKNKMKMKOKMKNKNKNKNKMKMKMKNKMKMKMKMKNKMKMKMKMKNKNKNKNKLKKKNKMKKKKKLKLKEK=K;K=KCKDKDKDKFK=KbK`K#KGK�KiK=KGK>K{K�K�K�KDK0KkK�K�K�K�K�K�KhK=KDK@K@KxK�K�K�K�K�K�K�K�K�KGKAK|K�K?KFK>KeK�K�K�K�KnKAKVK�K�K�K�K�K�K�K{K@KDK@KBKlK�K�K�K�K�K�K�K�K�K�KWKBKjK�KNK?K@KIK�K�K�K�K�K]K?KcK�K�K�K�K�K�KGK=K?K>K?K>K@K3KKCKGKFKGKFK>K;KCKDKBK4K4K5K4K4K4K5K4K7K7K9KEK>K7K8K:KJK�K�K�K[K@K=K>K9KsK�K�K�KoKBKJK�K�K�K�KcKKKKK$KCKhK�K�K�K�KCKjK�KTK:K4KEKIKZK�K�K�K�K�KhKCKEKCKDKCKEKIKDK�K�K�K�K�KlK@KEKDKBKBKBKAK@KEKGKGK�K�K�K�K�KvK:K?K=KK?KKsK�K�K�KNK.KbK�K�K�K�K�K�KuK>KBK?K=KmK�K�K�K�K�K�K�K�K�KNK?KpK�KDKDK>KWK�K�K�K�KyKAKLK�K�K�K�K�K�K�K�KCKBKAK?K^K�K�K�K�K�K�K�K�K�K�KeKAKYK�K]K;KAK>K�K�K�K�K�KoKAKVK�K�K�K�K�K�KVK;K?K?K>K?K?K:KK:KFKEKGKDK?K9KAKDKCK6K2K5K5K4K4K4K4K5K5K7KCK@K5K8K8K@KxK�K�KpKAK;K:K8KXK�K�K�K�KGKDK�K�K�K�K�K-KKKKKAKRK�K�K�K�K{KvK�K�KOK$KPKKKIK�K�K�K�K�K�KiKAKDKDKCKDKHKFKgK�K�K�K�K�KCKDKBKBKBKBKAK@KCKFKCKvK�K�K�K�K�KDK>K=KKFK>KhK�K�K�KYK,KVK�K�K�K�K�K�KK@KAK>K:KcK�K�K�K�K�K�K�K�K�KYKK>K>K=KK/KGKDKGKDK@K:K>KDKEKKDK6K8K7K9KeK�K�K�KGK=K:K9KCK�K�K�K�KSKBKfK�K�K�K�KYKKKKK6KHK�K�K�K�K�K�K�K�K�K5KEKHKEKxK�K�K�K�K�KoKDKCKDKCKCKEKGKNK�K�K�K�K�KQK?KBKBKBKBKBKBKBKEKEKUK�K�K�K�K�K_K;K>KK9K8K=KCKDKCKDK7K*K+K)KKKKKKKKKKK
+K
KKKKK
KKKKKKKK
+KKKKKKKKKKKKKKKKKKKKKKKKKKK(K:KBK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KdK@K,K*K'KKK3KJKOKLKMKMKMKNKMKMKMKMKMKLKKKMKMKLKLKNKMKMKMKMKNKNKMKMKMKNKNKLKLKLKMKMKMKMKLKLKLKLKKKKKKKKKKKLKLKEKAK=K=KAKAKBKCKDKGKPK;KAK;K^K�K�K�KbK,KOK�K�K�K�K�K�K�KCKDKCK;KYK�K�K�K�K�K�K�K�K�KgK:KXK�KWK?KAKDK�K�K�K�K�KKKCKtK�K�K�K�K�K�K�KVK?KBK?KKK�K�K�K�K�K�K�K�K�K�K�KDKFK�K�KK=KK:K=K=KeK�K�K�KgKEKQK�K�K�K�KeKKKKK(KFKoK�K�K�K�K�K�K�K�KzK\KVKEK\K�K�K�K�K�KHKLKCKDKDKCKEKHKCK�K�K�K�K�KtK>KCKBKBKBKAK?K?KBKHKEK�K�K�K�K�K�K:K@K>KK{K�K�K�K�K�K�K�KcKCK�K�KK@K>KEK�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KNK*K4KSK�K�K�KUK9KK@KBKBKAKBK?K9KSK�K�K�KoK+KFK�K�K�K�K�K�K�KJKCKDKK:KOK�K�K�K�K�KVK@KhK�K�K�K�K�K�KCK=K?K=KK2KKCKGKFKEKCK;K:KCKCKDK8K4K5K4K4K4K5K3K5K6K8KCK>K6K6K6KDK�K�K�K_K>KK?KBKGKDKmK�K�K�K�K�KHKK=K;K9K8K8K8K6KK6K'K'K*K*K+KK	KKK
+KKKK	K	KKKKKK
+K6K@K@K>K]K�K�K]K%K*K#KKKKKK>K?KPK�K�K�K�K�K�K�K�KDKbK�KXK9K@K?K=Kce]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKK*K:KWK�K�K�KOK9KKEK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KxKBKKKKK%K@KOKOKMKNKNKNKMKKKMKNKMKMKKKLKNKMKMKMKNKLKJKNKMKNKMKJKKKLKNKMKKKMKNKLKKKLKKKKKKKKKKKKKKKLKLKKKLKIKEK?KK�K�K�K�K�K�K�KRK>KCK=KJK�K�K�K�K�K�K�K�K�KK=KEK�KtK>K?K-KvK�K�K�K�K]KKhK�KPK=K?KBK�K�K�K�K�KeK?KYK�K�K�K�K�K�KPK;K?K>KK=K;K:K8K7K8K7K8KBKFK@KK�K�K�K�K�KWKAKDKDKBK0K(K*K+K)K)K$K K:K@K?K=K-K(K)K*K*K"KKKK
+KKKKK
+KKKKKKK!K@K@KBKAK�K�KIK&K*K'KK
+KK
+KK"KCK>K�K�K�K�K�K�K�K�KdKBK�K�KK@K@KAe]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KHK(K=K[K�K�K�KIK9K;K9K;K@K@K=K6K9K?KBKBKDKCK1K*K+K)KKKKKKK
KKKK	KKKKKKK
+K	KKKK
+K
+K
+KKKKKKKKKKKKKKKKKKKKKKKKKKK,K>KEK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KyKKKKKKK.KJKPKMKMKNKMKLKMKNKMKMKLKMKNKMKMKMKNKMKKKNKMKMKMKJKKKLKMKLKKKMKLKKKKKKKKKKKKKKKKKKKKKKKKKKKKKLKJKIKEK=K;K>KAK@K>KBKBKMKpK�K�K/K8K}K�K�K�K�K�K�KYK?KCK@KDK�K�K�K�K�K�K�K�K�K�KAKAK�K�KAKCK.KgK�K�K�K�KjK>KTK�K�K�K�K�K�K�K�K@KCK@K=KbK�K�K�K�K�K�K�K�K�K�KbK?KZK�K`KKKDKDKKBK5K5K7K8K\K�K�K�KIKAKK?K3K'K*K+K)K*KK
+KKKK	K	K	K	K	KKKKKKK6KAKAK;KhK�K9K'K*K)K"KKKK
+K
K5KAKSK�K�K�K�K�K�K�K�KCKgK�K]K8K?K?K;e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKDK%KAKbK�K�K�KCK8K:K4K:KAK?KKFK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KyKKKKKKKKK9KOKOKMKMKNKNKMKMKMKNKNKMKNKNKNKNKNKNKNKLKKKKKLKLKKKKKKKKKMKIKKKLKKKKKKKKKKKKKKKKKLKLKLKLKLKKKHKKKHKAK;K:K5K.K>KBK@K@KTKkK5K1KtK�K�K�K�K�K�KbK>KEKAK@KzK�K�K�K�K�K�K�K�K�KHK=KtK�KDKCK+KVK�K�K�K�KxKAKNK�K�K�K�K�K�K�K�KGKBKCK=KUK�K�K�K�K�K�K�K�K�K�KsK?KLK�KtK9KBK7KpK�K�K�K�K�KGKEK�K�K�K�K�K�KyKKK;K9K:K:K8K6K7K:KDKDKDK�K�K�K�K�K�KLKBKCKBK>K,K)K+K*K&K%K$K)K>K>KAK:K)K*K+K+K*K;KKKK
+KKKKKK	KKKKK	K%K@K>K>KHKnK1K'K)K)K*KKKK	KKK?K?K�K�K�K�K�K�K�K�KcKEK�K�K>K>K>K?e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KxK?K'KDKjK�K�K�K>K:K5K$K:KAK@KKCK?KLK�K�K�K�K�K�K�K�K�K�K�KCKCK�K�K?KCK:K]K�K�K�K�K�KNKAKsK�K�K�K�K�K�K@K>K>K>K=KK+KKEKGKCKBKBK=KK4K5K7KAK�K�K�KhKAK>K;K5KUK�K�K�KpKFKCKxK�K�K�K�KUKKKKK:KIK�K�K�K�K�K�K�K�K�K�K�KlK@K{K�K�K�K�K�K�KGKBKDKCK?K@KHKFK�K�K�K�K�K]K>KBKBKBKAK?K?K@KDKFKGK�K�K�K�K�K{K8K>K;K9K:K:K8K6K8K6K>KDK?KjK�K�K�K�K�KiK?KFKCKBK2K)K+K)K(K(K&K%K7K@K?K@K/K(K+K+K&K�KKKK
K
+K
+K
+K	K
+KK	KKKKK	KK:K>K?K?K;K*K&K'K&K'K!KKKKKK*K?KTK�K�K�K�K�K�K�K�KEKjK�K`K:K@KAe]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsK>K(KEKrK�K�K}K>K;K1K"KKEKKKNKMKMKMKMKMKMKNKLKKKKKKKMKMKKKKKKKKKKKKKKKKKKKLKKKLKLKIKLKKKKKKKKKJKJKLKJKHKHKHKHKIKLKKKHKFKGKIKEKKCK@K9K`K�K�K�K�K�K�K�K�K�K\K7K]K�KSK>KBKGK�K�K�K�K�KGKAKvK�K�K�K�K�K�K�K[K=KCKAKFK�K�K�K�K�K�K�K�K�K�K�KKKAKxK�KHKAK?KMK�K�K�K�K�K\K?KbK�K�K�K�K�K�KKKK=KDKDKCK:K4K4K4K4K4K5K5K5K5K3K>K@K4K5K6K9KjK�K�KKCK?K9K7KCKaKrK�K�KPKDK]K�K�K�K�KrK3KKKK.KFKyK�K�K�K�K�K�K�K�K�K�K�KEK[K�K�K�K�K�K�K[K@KDKDK?K7KEKAKsK�K�K�K�K�KK?KCKHKAKwK�K�K�K�K�KCKK>K@K:K*K+K*K1K�K�K
K
+KK
+K
+KKKK	KKKK
+KKK*K?K>K?K=K+K$K&K%K&K(KKKKK
+KK5KAK�K�K�K�K�K�K�K�KbKDK�K�K>K=K?e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KlK>K*KJK}K�K�KtKK:K9K.K9KFKCKCK=K-K*K+K&KKKKKKK
KK
KK
+KKKKKKKKKK
+KK
+KKKKKKKKKKKKKKKKKKKKKKKKKKKKK1K@KFK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK K,K5K1K=KHKUKUKWKNK?K?KGKHKLKMKMKLKLKNKMKMKLKKKLKLKKKKKKKKKKKKKKKLKJKIKKKLKLKKKKKKKKKLKKKHKIKLKKKKKIKHKGKHKIKIKHKHKHKIKHKHKEK=K7K:KAKAKCKCKBKCK_K�K�K�K�K�KAKBK?K8KWK�K�K�K�K�K�K�K�K�KhK5KRK�KaK=KBK@K�K�K�K�K�KRK>KgK�K�K�K�K�K�K�KjK;KCK@KAKtK�K�K�K�K�K�K�K�K�K�KUKKBKBK�K�K�K�K�KlK@KSK�K�K�K�K�K�K]K:K>K=K=KK7K:K9KXK�K�K�KdKCKKK�K�K�K�K�KKKKKK"KCK\K�K�K�K�K�K�K�K�K�K�K�KVKHK�K�K�K�K�K�KkK?KDKCKAK@KDKEKVK�K�K�K�K�KGK@KBK@KBK@K>KAKBKEKAKVK�K�K�K�K�K`K:K>KKEKCK@K0K*K*K*K)K)K(K'K;KBKAK>K0K-K*KDK�K�K4KKKKKK
+KKKKKKK	K	KK;K?K>K@K3K%K&K%K%K'K K	KKK
+K	KK;KYK�K�K�K�K�K�K�K�KEKhK�K^K:K@e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KcK9K,KKK�K�K�KlK;K=K*K&K>K@K=K:K8K,K;KEKCKCKKKAKAKCKEKDK�K�K�K�K�K�K;K>KK>K?K;K*K&K%K%K%K&KKKKK	KK0KCK�K�K�K�K�K�K�K�K`KEK�K�K@K@e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K]K7K/KLK�K�K�KfK;K=K*K*K>K?K>K:K6K+KKAK;KDK�K�K�K�K�K�K�K�K�K�KKCK:KiK�K�K�K�KlK;KRK�K�K�K�K�K�K�K�K>KBKAK=KZK�K�K�K�K�K�K�K�K�K�KrK>KMK�KxK=KDK8KoK�K�K�K�K�KIKBK|K�K�K�K�K�K�KK?KDKDK@K5K2K5K4K5K4K1K2K3K2K5KBK>K6K5K5K=K{K�KRKK?K?KBKHKBKlK�K�K�K�K�KKK9KK2K#K#K%K%K%KKKKKKK#KAK[K�K�K�K�K�K�K�K�KEKiK�KaK9e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KXK4K4KOK�K�K�K^K9KKBKEKEK@KJKBK;K?K;K?K�K�K�K�K�K�K�K�K�K�K@K7KzK�KAKCK=K\K�K�K�K�KyK>KHK�K�K�K�K�K�K�K�KFKAK@K;KNK�K�K�K�K�K�K�K�K�K�K�KCKDK�K�K>KBKKCKCKAK7K3K4K4K4K4K1K2K3K2K2K>KCK6K5K5K8K^K�K�K]KFK=K:K9K8KdKpKJK{KXKAKPK�K�K�K�K^KIK3KKK#KDKcK�K�K�K�K�K�K�K�K�K�K�KSKIK�K�K�K�K�K�K^KAKCKBKCK@KEKEKRK�K�K�K�K�KMK@KBKBK@K>K?K?KBKDKEKNK�K�K�K�K�KoK9K?K;K9K8K8K8K6K4K5K>KCK@KiK�K�K�K�K�KlK>KDKAK@K4K+K)K)K)K)K&K#K3KAK?K@K4K+K�K�K�K�KeKK
K
KK
+KK
+K	KKK	KKKK
+K0K?KK:K9K7K+K>KDKCKBK7K)K,K,K!KKKKKKKKKKKKKKKKKKKKK
+KKK
KKKKKKKKKKKKKKKKKKKKKKKKK!K!K K5K@KGK�K�K�K�K�K�K�K�K�KtKCKuK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KgKmKiKfKgKfKcKaK_K_K]K[KTKLK=K2K%KKKK*KBKNKKKMKNKLKKKKKKKLKIKHKKKKKKKLKKKKKKKKKKKLKJKHKHKHKKKKKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKGKFKFKGKHKGKAK;KKIK�K�K�K�K�KcK>KZK�K�K�K�K�K�KTK:K=KK:K;K9KJKyKcK{K`KDKEK�K�K�K�K�K\KDKKKK@KOK�K�K�K�K�K�K�K�K�K�K�KfKAKzK�K�K�K�K�KxKIKBKBKBKBKBKFKBK�K�K�K�K�KjKK?K?KBKBKGKCK�K�K�K�K�K�K?K;K:K:K8K8K8K6K5K3K:KDKDKLK�K�K�K�K�K�KBKBKAKBK=K+K)K)K)K)K'K$K)K>KBKCK;K8K�K�K�K�K�KKK
KK
+KK	KKKKK
+KKKKK;K:K=K=K0K$K#K&K&K'KKKKKKK,K@K\K�K�K�K�K�K�K�K�KAKjK�Kbe]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KOK-KK;K9K5K-KAKCKCKCK7K)K*K*KKKKKKKKKKKKKKKKKKKK	K	K
+KKK
KKKKKKKKKKKKKKKKKKKK K KKKK K!K K5KBKGK�K�K�K�K�K�K�K�K�K|K:KJK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KdKbKdKeKcK_KaKbKgKsK{K[K/K#K!K!KKKKKKK2KHKNKKKKKKKKKKKLKIKHKJKLKKKKKKKKKKKKKKKLKKKJKJKJKLKKKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKHKGKHKHKHKHKGKGKHKFK>K8K:K>KDKHKGKDKDKCK:KpK�K�K�K�K�K�K�K�K�KQK5KbK�KQK>K@KGK�K�K�K�K�KFKK@KxK�K�K�K�K�K�K�K�K�K�KUK>KfK�KQK:K@K?K�K�K�K�K�KvK?KOK�K�K�K�K�K�KfK7K?K=KKBKBKBKBKGKEKiK�K�K�K�K�K>KAK@K?K?K?K?K@KAKHKCK`K�K�K�K�K�KWK7K:K:K8K7K6K5K5K2K4KAKEK?K}K�K�K�K�K�K[K@KCKBKAK1K*K)K)K)K'K%K%K:KBKAK>KQK�K�K�K�K�KPKK
KK
+KK
+K	KKKK	K	K
+K
+K
+K/K9K;K?K8K$K%K&K%K$K%KKK	KKKKAK@K�K�K�K�K�K�K�K�K_KDK�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KIK-K?KYK�K�K�KKK8K7K"K5K?K>K;K8K3K-K@KCKCKCK7K)K+K(KKKKKKKKKKKKKKKKKKKK	KK
+KKKKKKKKKKKKKKKKK KKKKKK K KKKKK!K K6KDKHK�K�K�K�K�K�K�K�K�K�K[K"KPK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K`K]K_KkKtK|K�K�K�K�K�K�K�KXK3K K!K$K"KKKKK"KKiK�K�K�K�K�K�K�K�K�K�KeK=KUK�KbK8KAK8K{K�K�K�K�K�KDKGK�K�K�K�K�K�K}K;K@K=KK3K1K4K2K4K5K3K2K/K/K0K>K>K4K5K5K;KSK�K|KdKCKKCKBKBKBKEKEKNK�K�K�K�K�KNKK>KCKFKHK�K�K�K�K�K}K6K;K:K8K8K8K6K4K3K2K:KCKAKYK�K�K�K�K�K�K?KEKBKBK:K*K)K)K)K&K&K&K/K>K@K=K[K�K�K�K�K�K�KK
+KK
+K
+KKKKKKKKKKKK7K8K?K=K.K$K%K$K#K&KKKKKKK1KAKaK�K�K�K�K�K�K�K�KCKhK�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|KGK/KBK^K�K�K�KGK9K7K!K7K@K>K;K9K1K.KCKAKAKAK6K)K*K)KKKKKKKKKKKKKKKKKK
KK	KKKKKKKKKKKKKKKKKKKKKKK K K K K KKK!K!K"K7KCKJK�K�K�K�K�K�K�KvK�KgK9K%KXKoKvK�K�K�K�K�K�K�K�K�K�K�K�K�KzK�K�K�K�K�K�K�K�K�K�K�K�K�K�KgK;K%K K%K$K KKKK,KBKMKKKJKLKIKHKIKIKIKIKIKKKLKIKIKLKJKHKKKLKJKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKGKGKGKGKFKGKFKFKGKHKGKBK8K8K;K2K4KEKIKFKHKbK�K�K�K�K�K�K�KlK2KIK�KiK=KEK=K|K�K�K�K�K[K9KZK�K�K�K�K�K�K�K{KKsK�K�K�K�K�K�KBK>K>K>K=KK4KK@KGKCKDKAK>K>KAKBKAK6K2K3K2K4K1K2K2K0K/K.K9KAK7K5K4K7K_K�KkK{KMK>K:K:K9KHKQKeK�KdKCKGK�K�K�K�K�K]KJKKKK=KOK�K�K�K�K�K�K�K�K�K�K�KeK=K}K�K�K�K�K�K�KIK?KAKBKBKBKDKAK�K�K�K�K�KqK9K@K?K?K=K=K=K=KAKEKAKvK�K�K�K�K�KCK:K9K7K8K:K7K4K4K4K9K?KBKAK�K�K�K�K�K�KMK@KAKBKK6K%K$K%K%K&K%KKKK	KK KCKDK�K�K�K�K�K�K�K�K]KEK�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KvKEK+KDKhK�K�K�KBK9K4K!K7K?K>K:K9K0K/KAKAKAKBK2K)K,K(KKKKKKKKKKKKKKKKKK
KK	KKK
KKKKKKKKKKKKKKKKKKKK K!K K K!K K"K%K%K#K8KCKJK�K�K�K�K�K�KvKKKEKIK.K7KtK|KK�K�KvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KuKKK.K#K&K#KKKKK2KGKMKKKIKHKHKHKIKHKHKJKKKHKIKKKJKHKJKKKIKGKHKIKIKHKHKHKHKHKIKIKIKHKHKHKHKHKIKGKGKFKFKFKFKFKFKFKFKEKGKHK@K5K-K5KBKDKEKHKCKGKeK�K�K�K�K�KzK2KBK�KyKK?K=KK?K?K=K=KKoK�K�K�K�K�K~K	KKKK	KK
+K
+KKKKKKKK!K>KKIKkK�K�K�K�K7K:K�K�K;KAK8K]K�K�K�K�KzK9KHK�K�K�K�K�K�K�K�KHK?K?KKuK�KGK=K=KFK�K�K�K�K�KkK=KUK�K�K�K�K�K�KaK9K?K?KK4K3K3K2K2K2K2K0K.K-K/KBK=K5K8K7K?K�K�K�KbKBK;K9K:K3K5KIKeK~KLK@K_K�K�K�K�KsKNKKKGK.K$KEKiK�K�K�K�K�K�K�K�K�K�K�KSKGK�K�K�K�K�K�K|K=KCKBK?K?KCKDKJK�K�K�K�K�KUKKK?KAKEKCK�K�K�K�K�K�K9K;K:K9K8K6K4K5K5K0K9KCKCKJK�K�K�K�K�K�KBKAKBKAK9K(K(K)K&K%K&K$K)K@KBK?KJK�K�K�K�K�K�K+KKKKKKKKKKK	KKKKK8K=K=K=K4K$K&K&K%K&K$KKKKKK#KAKEK�K�K�K�K�K�K�K�K^KEe]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KiKAK,KIKzK�K�KwK=KK:K9K/K3KAKBKBK@K0K*K+K)KKKKKKKKKKKKKKKKKKKK
+K
+KK
KKKKKKKKKKKKKKKKK K K!K K K K!K K!K!K#K$K#K7KDKEKuK�K�K�K�K�K�K�K�K�K�K�KfKmKxKzKtKkK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KjK>K$K#K'K(KKK)KBKJKHKIKJKIKHKIKJKIKHKHKIKKKJKIKIKHKHKIKJKIKHKHKHKIKHKFKHKIKHKHKIKHKHKFKEKFKGKFKFKFKFKFKFKFKGKFKFKGKGKCK:K8K9KKQK�K�K�K�K�KK@KK�K�K�K�K�K�K�K�K�K�KVK;KaK�KVK:K@K;K�K�K�K�K�K}K?KHK�K�K�K�K�K�KwK9K?K>KK9K9K:K?KlK�K�KOKCKMK�K�K�K�K�KZKJK_K{K:KKK?KFKCKjK�K�K�K�K�KMK7K9K8K8K6K4K5K4K1K3K?KDK?K{K�K�K�K�K�K]K>KBK@K?K/K'K)K'K'K&K%K#K6KDKAKK;K9K-K5KCKCKDKAK/K*K,K)KKKKKKKKKKKKKKKKKKKK	KKK
KKKKKKKKKKKKKKKKK!K!K K K K!K!K K"K$K$K#K#K8KDKFKyK�K�K�K�K�K�K�KxK^K�K�K�K�K{K�KvKrK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KyKMK-K#K+K-KKK0KCKJKFKGKHKKKLKIKHKIKHKHKKKLKIKHKHKIKIKIKIKIKIKIKHKFKHKIKIKIKIKGKFKGKGKFKFKFKFKFKFKFKFKFKFKFKFKFKGKCKDKDK=K:K=K?KBKFKHKFKCKTKpKEK0KfK�KKK?KAKEK�K�K�K�K�KFK:KoK�K�K�K�K�K�K�KbKK=KqK�K�K�K�K�K�K�K�K�K�KeKK=K=KK@KBKBK?K7K3K2K4K5K3K2K3K0K.K-K8KEK8K4K5K5KVK�K{KSKHK@K7K9K9K@KXKpKmKPKBKEK�K�K�K�K�KyKEKAKdKaK>KHK�K�K�K�K�KoKyK�K�K�K�K�KBK\K�K�K�K�K�KkKLK?KBK?K?K@KEKBK]K�K�K�K�K�KAK>K?K>K:K3K6K=KK@K>K9K8K-K5KCKBKCK?K.K(K*K'KKKKKKKKKKKKKK KKKKKK
+KKK
KKKKKKKKKKKKKKKKK K K K K!K K!K"K"K#K$K#K#K7KBKMK�K�K�K�K�K�K�K�K�K�K�K�K�K~KfK�K�K}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K\K7K*K'K'K"K"K5KEKHKHKHKJKIKHKHKIKHKIKJKIKHKIKHKGKGKGKGKHKIKHKFKHKIKGKHKIKHKHKGKFKFKFKFKFKFKFKFKFKGKGKGKGKGKGKEKEKFKDKAK>K;KK?KBKBKAK:K1K3K3K3K3K2K3K0K,K-K4KCK;K4K5K7KEK�K�KPKDKBK:K:K9K;KGKVKVKUKDKCKgK�K�K�K�K�K]K[KgK�KcK>KqK�K�K�K�KwKcKsK�K�K�K�KQKHK�K�K�K�K�K�K`K>KBK?K?KK@K=K8K5K,K7KCKBKBK>K,K(K)K&KKKKKKKKKKKKKK!K KKKK
+KK
+KKKKKKKKKKKKKKKKKKK!K K!K K K K!K$K"K#K%K#K#K7KBKNK�K�K�K�K�K�K�K�K�K�K�KdKYK�K�K�K�KK�KK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KmKXKK9K;KBKAK=K2K2K2K2K2K2K3K0K,K.K.K>K>K4K5K7K9K~K�KFKNKDK;K8K:K7K;KFK9K0KBKEKLK�K�K�K�K�K^KjK{KjK�KHKTK�K�K�K�K�KpKbKwK�K�K�KfK=K}K�K�K�K�K�K�KDKAK?K?KKGKCK^K�K�K�K�K�KXK2K8K8K6K5K3K2K3K2K1K?KCK?KgK�K�K�K�K�KqK:KCKAK?K5K(K)K)K&K%K$K"K/K?K>K>KaK�K�K�K�K�K�KKKKKKKKKKKK
+K
+KKKKK;K8K5K+K8KCKCKDK>K-K*K)K$KKKKKKKKKKKKK!K!K KKK
K
+K	KKK
KKKKKKKKKKKKKKK K K K!K K"K#K#K#K$K"K#K$K#K&K8KBKMK�K�K�K�K�K�K�K�KnKKKrKuKkKvKKtKKxKrK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KnKrK�KuKHK&K$K#K#K3KIKLKHKHKIKHKHKIKIKHKHKIKHKGKGKFKFKGKGKGKHKGKGKFKDKIKHKGKGKFKFKFKFKGKFKFKFKFKFKGKFKFKGKFKFKFKFKFKGKEKBKBK>K:K8KK?KAKAK?KBKCK7KKK?K@KDK�K�K�K�K�K�KDK�KKKKKKKKKK
+KKKK
+K*KK:K8K4K+K:KCKCKDK>K-K+K)K#KKKKKKKKKKKKK!K!K KKK
K
+K	KKK
KKKKKKKKKKKKKKK K K K"K!K#K$K$K$K$K"K#K%K#K&K8KBKLK�K�K�K�K�K�K�K�K{KdK_KhK]KIKnK�K|KoK~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�KKIK\K4K/K,K-K;KJKJKHKHKIKHKHKHKHKIKHKFKGKGKGKFKFKGKIKGKGKHKFKIKHKFKFKGKGKGKGKFKFKFKFKFKFKGKEKFKGKFKGKFKFKFKGKDKFKGKEKEK?K8K6K:K=K=KBKAK=K@K>K4K\K�K�K�K�K}K5K?K�K�K�K�K�K�K�K�KJK@K?K;KAK�K�K�K�K�K�K�K�K�K�K�KWK:KcK�K\K:K?K8K�K�K�K�K�K�K@KCK�K�K�K�K�K�K�KK9K?KCKCK6K.K2K3K3K2K3K2K/K.K,K7KDK7K4K6K6KPK�K�KJKGK@K9K9KKyK�K�K�K�K�K_K>KBK>K?K.K(K)K'K$K&K%K"K3KAK@K=KoK�K�K�K�K�K�KKKKKKKKKKK	K
+K
+K
+K	KK:KK>K;K9K5K*K:KBKBKBK;K+K*K*K#KKKKKKKKKKKKK!K K!KKK
K
+KK
+KK
KKKKKKKKKKKKKK!K K!K K"K$K#K#K#K#K$K"K!K!K$K'K8KBKLK�KtK�K�K�K�K�K�K�KtKeKkKbKnK�K�K�KfK�K�K�K�KzKqK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KsK|K�KqKKKKK(K@KJKIKHKIKIKHKIKIKIKIKIKHKGKFKGKFKFKFKGKGKGKIKHKGKGKFKGKGKGKFKGKFKFKFKGKGKGKGKGKFKFKFKFKFKFKFKGKGKFKGKGKCK;K8K8KK?KK=K9K:K:KK7KCKCKBKBKAK9K=KCKCK:K/K2K3K2K2K3K2K/K.K,K0KBK:K3K5K3K@KtK�K_KDKBK;K9KK?K?K>K>KDKAKwK�K�K�K�K�K:K=K=K=K=K=K;K9KK@K>K@K5K(K)K&K%K#K$K#K(KKK9K&K#K&K%K$K&KK	KKKKK	K	K K�K�K�K�K�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K{KKK-KDK_K�K�K�KGK9K7K!K4K?K=K:K9K4K*K>KBKBKBK:K+K*K*K#KKKKKKKKKKKKK!K K"KKKK
+KK
+KK
KKKKKKKKKKKK K K!K!K!K"K"K$K#K#K#K#K$K#KK K%K&K9KCKKK�K�K�K�K�K�K�KUKXKBKSKbKQKSK|KhK�K�K�K�KK�K|KLKyK�KyKpK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KvKqKlKgKKKKKKK,KDKLKHKGKIKHKHKHKHKHKHKGKFKFKFKFKFKFKFKGKIKHKGKGKFKEKDKDKFKFKGKFKFKGKGKGKGKGKFKFKFKFKFKFKFKFKFKFKFKFKFKGKCK=K7K:K=K>KAKAK@K?KJKpK�K�K�KCK6KmK�K�K�K�K�K�K�KeK:K@K=K8KdK�K�K�K�K�K�K�K�K�K�KvK;KHK�K�K;KAK8K\K�K�K�K�K�KYK=KbK�K�K�K�K�K�KTK5K=K=K=K:K9K>KK+KFKCKCKAK@K9K9K@KCK;K1K2K2K2K2K3K2K/K.K-K.K>K=K4K5K5K;KLKyKHK4KEK;K:KK>K=KBKCKUK�K�K�K�K�KIK;K=KKEK@K�K�K�K�K�K�K9K8K6K4K5K4K3K1K.K/K4KBKCKAK�K�K�K�K�K�KRK;K@K>K;K,K'K&K%K$K$K$K K3KAKBK?K~K�K�K�K�K�KsKKKKKKKKKKKKKKKKK=KKMKvK�KMK1K`K�K�K�K�K�K�K�KrK:K@K?K9KSK�K�K�K�K�K�K�K�K�K�K�KK=K=K;K9K;K&K!KCKCKDK@K>K:K9K@KBK?K1K1K3K2K2K3K1K/K.K.K-K:KCK5K4K4K4K^KKBK7KAK@K:K9KKhK�K�K�K�K�KOK3K5K5K5K4K3K2K/K0K1K>KDK=KcK�K�K�K�K�KuK:K@K=K>K3K%K&K&K&K%K$K K(K@K>K>KVK�K�K�K�K�K�K$KKKKKKKKKKKKKK
+K
K2KK=K;K9K2K+KBKAKAKBK6K)K+K*K!KKKKKKKKKKKKK#K"K#KKKKK
+K
+KK
KKKKKKKKKKKK K K!K"K"KK K$K#K$K#K$K%K%K&K&K%K&K:KCKJK�K�KqKnK�K�KuKMKmK�K�K}K�KsK�K�K�K�K�K�K�K|K8KFK{K�K~KiK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KyK{K�K�K�KqK}K�KkK�K�K}KK^KfK#KKKKKKKKK(K?KJKIKGKHKHKHKHKHKGKFKFKFKFKFKFKGKGKGKGKGKFKFKFKGKFKFKGKFKFKFKGKGKGKGKFKFKFKGKFKDKFKGKFKFKFKFKFKGKFKDKDKAKBKDK6K(K/K>KCKDKAK>KRKFK0KSK�K�K�K�K�K�K�K�K>K@K?K9KHK�K�K�K�K�K�K�K�K�K�K�KGK:KsK�KLKKBKBK7K0K1K2K2K1K0K0K-K,K+K6KCK8K4K5K5KHKuKCKIKQK?K9K9KK=KKEK@KoK�K�K�K�K�K:K>KK>K=K8K)K&K&K&K%K#K#K#K8KCKBK?K�K�K�K�K�K�KbKKKKKKKKKKKKKKKK K>KK1K$K&K(K+K�K�K%KKK	KKK
+K	KK�K�K�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KdK@K.KIK}K�K�KzK=K8K0K"K7K=K=K=K:K1K-K@KBKAKBK5K(K,K*KKKKKKKKKKKKKK$K$K$KK
KKK	K
+KK
KKKKKKKKKKKK!K K!K#K#K!K"K$K#K#K#K$K&K&K%K&K%K%K;KCKJKdK�KhKRK�K�KKaKOKmK�K�KeKxKtKKyK�K�KvK�KvKPKvK�KwKRKlK|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KuKvK�KlKXKrK�KMKxK�KqKbK�K}K�K'KKKKKKKKKKK-KEKIKHKGKHKIKIKHKFKFKFKFKFKFKFKFKFKFKFKFKFKFKFKGKFKFKFKFKFKFKFKFKFKFKFKFKGKEKCKEKGKFKFKFKFKFKGKGKDKBK?KDKEK@K9K:K;K=K?KAKBK>K=K:KFK�K�K�K�K�K�K�K�KDKAK?K;K>K�K�K�K�K�K�K�K�K�K�K�KSK9K`K�K_K9K?K8K�K�K�K�K�K�KCKAKK�K�K�K�K�K�KCKK8KK=K=KCKDKRK�K�K�K�K�KKK8K>K:K:K:K:K:K9K?KDK@K�K�K�K�K�K�K=K5K5K4K5K3K1K3K3K.K2K=KDK>KwK�K�K�K�K�KdKKK7K&K$K'K9K�K�KiKK	KKKKK	K	KK2KKK�K�K�KpK9K7K-K!K;K=K=K;K7K/K.KBKAKAKBK4K)K)K*KKKKKKKKKKKKKK#K#K$KKKKK	K
+KKKKKKKKKKKKKK!K!K K#K$K#K$K%K%K%K%K%K%K%K&K%K%K%K:KCKIKdKvKcKtK{KuKcKSKZK�K�KxKOKQK>KXKtK�KxKjKxKrKwK�K�K|KrK�K_KwK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K[KvKuKHK[K�K�K{K)KKKKKKK!K&K-K1K5KBKEKHKIKGKGKIKHKFKGKFKFKFKFKFKGKGKFKFKFKFKFKGKGKGKGKFKFKFKFKFKFKFKFKFKFKGKFKFKFKGKGKGKFKFKFKGKGKFKFKEKFKFKEKEKBKAK=KKAK?KYK�K�K�K�K�K�K�KLK?K?K;K6KxK�K�K�K�K�K�K�K�K�K�KbK8KOK�KqK:KAK6KoK�K�K�K�K�KOK=KkK�K�K�K�K�K�KOK:KKAK3K3K5K6KeK�K�KqKDKK=K=K?KEKCK�K�K�K�K�KpK5K>K:K:K:K:K:K9K:KAKAK^K�K�K�K�K�K]K1K6K5K5K4K0K0K0K/K.K9KBK@KSK�K�K�K�K�K�K;K=KK8K7KAKBK@K3K/K0K/K0K/K.K/K,K*K)K8KDK7K5K5K4KOK�K�K�KPK?K:K9KK>K=K=K>KEKBKkK�K�K�K�K�K=K>K:K:K:K9K:K9K9K@KBKDK�K�K�K�K�K�K3K5K5K5K4K0K0K0K0K.K3K@KCKBK�K�K�K�K�K�KQK9K=K=K;K*K&K&K$K$K"K!K!K2K@K?K;KiK�K�K�K�K�K�KKKKKKKKKKKKKKKKK6K>KK=KFKiK�K�K�KeK9K@KK�K�K?KKBKAK4K.K0K/K0K/K/K/K,K)K(K1KCK;K4K4K5KBK�K�K�KgK?KK\K�K�K�K�K�K�KTKKEKAKtK�K�K�K�K�KFK2K5K5K3K1K3K1K/K0K1K;KBK=KeK�K�K�K�K�KyK9K=K=K=K1K%K&K$K$K$K#K"K)K?K@K@KIK�K�K�K�K�K�K:K�K
+KKKKKKKKKKKK
+KK$K=KK=K;K9K8K,K3KBKAKBK?K/K(K)K*KKKKKKKKKKKKK!K#K$K"KKKK
+KKKKKKKKKKKKKKK K K"K$K#K$K#K$K%K%K&K%K&K&K$K&K&K%K&K9KAKOKiKiK\KpK�K�K�K^KkK�K�K{KrKTKmKZK`KJKhK�KfK|KKfK{KzK�KdKAK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KoKK�K�K�KLKbK�KjKkKQKQKRK[KXKVKZK]K[KYKUKMKLKJKFKEK?KAK@KAKGKGKGKFKFKGKFKFKFKFKFKGKGKGKFKCKDKCKEKGKFKGKGKFKFKFKGKFKFKGKFKFKEKDKEKEKEKEKEKFKEKEKGKFKEKCKDKCKFKFKCKDKEKFKHKEKK?K�K�K�K�K�K�K�K=K:K=K;K9K9K:K5KK9KBKAK@K=K:K5K=KDKBK8K,K/K0K/K0K/K/K,K)K(K-K@K=K4K4K4K7K~K�K\KbKBK=K8K9KK;K;KKGKAK�K�K�K�K�KyK7KKyK�K�K�K�K�K}KKK	KKKKKKKKKKK
+K	KK9KK;K(KvK�K�K�K�K^KK	KKKKK
+K	KKK=K:K9K6K+K6KCKAK@K>K-K'K+K(KKKKKKKKKKKKK!K#K$K#KKK
K
+K
+K
+KKKKKKKKKKKKK!K K"K$K#K#K$K#K#K%K&K%K%K%K&K%K%K%K%K:KBKKK�K|KFKwK�K�K�K�K}K�K�KgKJKUKpKmKvK�KnK�KwKhKoK�K�KeK�K{KdKpKzK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KPKSKWK]K\K\K]K`K^K\K[KVKUKMKKKJKFKHKGKDKK;K9K:K9K9KK0KDKAK?K>K;K6K:KBKBK>K/K/K0K/K0K/K0K+K)K(K)K9K?K4K5K6K5KhK�KSKgKLK=K9K:K8KGK'K&KK,KBK?KqK�K�K�K�K�K�K�KwK�KnKAKfK�K�K�K�KKK	KK2KMK;KEKGK?K|K�K�K�K�K�K�KBK;K:K:K9KK?K>KRK�K�K�K�K�K�K*KKKKKKKKKKKKKK	K	K,K>K=K=KK?KAK>K=K=K8K7K5K5K�K�K�K�K�K�K�K�K�K�K�KaK6KRK�KwK7K?K5KgK�K�K�K�K�KSK8K`K�K�K�K�K�K�K[K5KKKK�K�K�K�K�K�KiKKKKKKKKKKKKKKKKK:KK=KKAKOK�K�KhKfKK�K�K�K�K�KkK�K_KaK[K_KmKRK|KK�KVKMKeKK?K?KAK@K=K:K4KpK�K�K�K�K�K�K�K�K�K�KtK6KCK�K�K;K>K7KSK�K�K�K�K�KhK8KNK�K�K�K�K�K�KsK4K:K9K:K8K7K:K)KKAKBK?K>K>K:K7K?KBK@K3K.K0K0K.K-K-K.K*K'K(K0K?K7K5K5K4K;KhK�KOK:KDK;K9K:K>K+KKK K;KCKEK�K�K�K�K�K�K�K�K�K�KhK>K�K�K�K�K�K"K&K,K'KKK.KDKJK�K�K�K�K�K�KmK7K:K:K:K2K5KDK?K�K�K�K�K�K�K5K;K9K7K8K8K8K7K6K@KCKJK�K�K�K�K�K|K0K6K3K2K0K1K/K-K.K,K5K?KCKCK�K�K�K�K�K�KTK;K;K:K9K*K"K$K$K$K!K"K!K,K?K?K>K\K�K�K�K�K�K�KKKKKKKKKKKKKKKK
+K*KK2K2K/K'K$KKKKK#K6KFKEKCKDKDKCKFKGKFKFKFKFKFKGKFKDKDKCKDKDKFKFKDKEKFKFKFKGKFKCKCKCKDKDKDKDKDKDKDKEKCKCKDKCKCKCKCKCKDKCKAKAK@KBKKKMK>K7K9K>K?K?K=K?K=K[K�K�K�K�K�K�K�K�K�K�K�KK:K6K;K@KCK7K-K.K0K/K/K.K-K+K)K(K+KK7K9K;K-KKKK1K>K?KK�K�K�K�K�K�K�K�K�KxKAKmK�K�K�K�KGKK@K(KKKKAKCK�K�K�K�K�K�KfK:K=K;K:K8K8KCK?K_K�K�K�K�K�KDK8K9K8K8K8K8K6K5K;KDK?K�K�K�K�K�K�K;K1K3K2K/K0K0K.K.K.K.KKgK�K�K�K�K�KyK9KK_KvK�KcK�K�K�KtKWK9K'K/K5K+K KKKK+K=KCKDKDKCKFKFKFKGKGKGKGKGKFKDKDKCKDKDKFKFKCKEKFKFKFKFKFKCKCKCKCKCKCKDKDKDKDKEKCKCKDKCKCKCKCKCKCKCKBKAKBKAK?KIKLKCK9K;K=K:K6KK|K�K�K�K�K�K�KEK8K:K:K:K9K7K9K;K8KBKBKAK>K:K6K9K?KAK:K-K.K0K/K0K.K-K,K)K(K'K7KBK7K4K5K5KKKuKtK�KRK=KKaK�K�K�K�K�K�K�K�K�K�KIKRK�K�K�K�KwKK;K=K0K+K*KAKDK`K�K�K�K�K�KKJK;K;K9K8K8K>KCKFK�K�K�K�K�KaK4K9K8K8K8K7K7K2K7KEKAK\K�K�K�K�K�KaK,K4K2K0K0K0K.K.K/K*K5KCKBKGK�K�K�K�K�K�KDK9K:K:K5K&K$K#K K!K K KK-KBKCK=KiK�K�K�K�K�K�K!K	KKKKKKKKKKKKKKK/K?KK=KKDKdK�K�K�K�K�K�K�K�KPK3KZK�KcK4KK@K>K/K.K0K/K0K/K,K+K(K%K&K0K>K8K4K5K5K?KfKZKhKTK?K=K:K:K8K KK KK8KCKMK�K�K�K�K�K�K�K�K�K�KaKBK�K�K�K�K�K,K4K9K?K>K-K/KDKJK�K�K�K�K�KxKZK7K:K:K:K9K=KFK@K}K�K�K�K�K�K4K7K8K8K8K5K5K4K6K?KEKEK�K�K�K�K�K�K1K4K0K0K0K0K/K,K.K-K-K?KCK=KvK�K�K�K�K�KfK2K;K9K8K.K$K!K!K!K!K!KK#K=KAK@KGK�K�K�K�K�K�K`K#KKKKKKKKKKKKKKKK=K=K=K:KiK�K�K�K�K�K�KVKK KKKKKe]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KYK:K2KKK�K�K�KrK;K8K/K!K8K?K=K;K8K0K/K@K?KBKAK7K)K(K)K#KKKKKKKKKKKKK$K$K&K"KK
KK
+KK
KKKKKKKKKKKK K!K#K#K$K$K%K%K%K&K&K%K&K&K&K%K&K(K(K*K:K?KDKBK9K;K�K�KXKyK�K�K�K�K�K�KzK`KoKrKRKYKVKIK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|K�K�K�K�K�K�K�K�KqK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KzK�KxKtK�K�K�K�K�K�KUK;K+K1K7K)KKK K7KDKDKDKCKEKFKEKFKFKEKFKEKCKEKFKFKEKCKDKCKEKFKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKDKCKCKCKCKBKCKBKBKBKAK@KAKBK?KAK=K2K+K0K9K>K=K@KAK=KGKlK�K�K�K�K�K�K`K2KIK�KyK4K>K3K`K�K�K�K�K�K[K5KVK�K�K�K�K�K�KgK6K>K;K9K9K9K8K8K8K@KBKAK@KK>KAK4K-K0K/K/K/K,K,K*K&K%K-K=K9K4K5K5K:KOKOKWKTK?K=K9K:K8K!KKK$K6K?KBK�K�K�K�K�K�K�K�K�K�KxK?KuK�K�K�K�KCKK KGK>K6K.KAK@K�K�K�K�K�K�K�K=K8K:K9K9K;KAKAK^K�K�K�K�K�KBK4K8K7K6K5K5K5K4K;KEKAKrK�K�K�K�K�KGK/K1K0K/K/K/K,K-K,K)K9KDK?KTK�K�K�K�K�K�K;K9K:K:K3K#K!K!K!K K KKK3K?K@KK=K:K6K/K6K?KAKAKAK5K*K+K)K"KKKKKKKKKKKKK$K$K&K"KKKK
+K
+K
KKKKKKKKKKKK!KK K#K$K%K&K&K&K&K&K%K)K)K&K$K)K)K(K*K;K@KFKHKgKHK�K�K�KpK�K�KlK�K�K�K�K�K�K�K>K=KLKpKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KK�K�KTKbKRK�K�K�K�K�K�K�K�K�KkKGK3K+K.K/K#KK(K@KGKCKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKDKCKDKDKDKCKCKCKDKCKAKBKBKBKEKDKCKAKAK@KAKAKBKAKCKAK9K0K1K9K=K@K>K@K>K;KFKqK�K�K�K�KuK2K>K�K�K;K>K8KKK�K�K�K�K�KpK6KFK�K�K�K�K�K�KsK9K=K9K9K:K7K8K:K9KAKBKAK@K=K:K4K:K@K?K:K-K/K0K-K-K.K.K-K&K%K*K9K=K5K4K4K6KDKBKQKjKDK=K8K9K;K&KKKK)KK`K�K�K�K�K�K�KWK5K;K8K8K8K=KDKFK�K�K�K�K�KgK3K9K5K4K5K5K5K4K8KBKCKQK�K�K�K�K�KnK,K1K0K/K-K/K,K*K+K)K1K?KBK>K�K�K�K�K�K�KUK6K:K8K7K(K!K!K!K KKKK&K>KAK@KPK�K�K�K�K�K�KQK&K+K)K'K#KKKKKKKKKKKK;K9K>K:KoK�K�K�K�K�K�KRKKKK%K'Ke]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KSK3K6KNK�K�K�KbK7K6K4K5K9KKMKwK�K�K�K3K4K�K�KBK8K:K;K�K�K�K�K�K�K9K:K�K�K�K�K�K�K�K=K:K:K:K9K8K9K9K:K;KAKAK?K>KK@KKvK�K�K�K�K�K4K6K5K5K5K5K5K3K3K?KEK?K�K�K�K�K�K�K9K1K0K.K.K-K+K+K+K+K+K;KBK>KeK�K�K�K�K�K~K5K:K8K8K.K!K K K K KKKK7KAKAK>K�K�K�K�K�K�K�K;KDKLKPKWKiKjKhKGKK
K$K(KKKK0KKKBK?K=KQK�K�K>K2KlK�KTK6KK:K9K;K9K9K:K8K;K7KAKAK?K>K:K7K5KK0K-K-K-K.K-K+K*K%K%K$K.K?K7K2K5K4K>K�KmKVKRK>K:K9K:K8KKKKK1KBKAK�K�K�K�K�K�K�K�K�K�K{KK�K�K�K�K�K�K�K=K5K9K8K8K8KBKCKXK�K�K�K�K�KJK3K5K5K5K5K5K2K2K9KCK?KcK�K�K�K�K�KUK+K1K/K.K+K*K+K+K+K)K4KAKAKFK�K�K�K�K�K�KEK7K9K7K1K$KKKKKK KK-K?K?K>K[K�K�K�K�K�K�K{KqKpKcKPKWKUKQKHKFKNKFK+K1K)KK&K>K=K>K:KvK�K�K�K�K�K�KPKK!K1K&Ke]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KKKK/KAKQK�K�K�KWK6K7K5K4K9KK:K5K4K;K?K?K?K>K/K'K(K(KKKKKKKKKKKKKK&K%K&K!KKKK
+KKKKKKKKKKKKK K K!K#K$K%K%K&K%K&K%K&K(K)K)K)K(K)K)K(K*K;K>KNKcKoK�K�K�K�K=K'K0K9KMKXK}K�KKyKoK/KdK�K}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|KjK�KmK3KK�K�K�KKVKeKzKpK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KaKQK|K�K+KYK�K�KmK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K}KVK7K'K$K*K8KCKDKDKCKDKDKDKDKCKEKGKDKCKCKDKCKCKDKCKCKCKCKDKEKBKDKCKAKCKDKBKBKBKBKAKAKAKAKBKDKCKBKBKBKAKAKAKAKBK@K?KAKAKCKAKK?K>K=KK;K7K5KK>KuK�K�K�K�K�K�K�K�K�K�K@K\K�K�K�K�KwKYK�K�KSKhK�KfK?K`K�K�K�K�K�K�KZK1K9K8K5K4KKBKIK�K�K�K�K�KK,K1K0K.K,K*K+K)K*K'K,KK/K'K(K'KKKKKKKKKKKKK K&K%K&K!KK
KK
+KKKKKKKKKKKKKK K"K$K%K&K%K%K%K&K&K&K)K)K(K)K(K(K)K(K*K;K?KKKgKtKaK�K�K�KSKVKjKSKIKSK�K�K}K�KrK7KVKTKcK�K�KaKsK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KnK�K�K,K\K�K�K�K�KYKyK�K|KvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KXKSKrKZKoK�K{KuK�K�K�K�K�K�K�KpK�K�K�K�K�K�K�K�K�K�K�KiK@K)K*K7K>KDKDKDKCKDKDKCKDKGKDKCKCKEKCKCKDKCKCKCKCKDKDKCKCKCKAKCKCKAKAKAKAKAKAKAKAKBKCKCKAKAKAKBKBKBKAKAK@K?KAKBKAKAK@KBKKKBK?K?K>K;K6K;K@KBK7K,K-K-K-K-K+K+K+K&K&K%K7K>K1K3K2K6K7KKZK�KGKKoK�K�K�K�K�K6K5K5K2K3K3K/K0K/K9KEK?K}K�K�K�K�K�K=K+K-K,K+K-K-K)K(K(K(K8KBK?KRK�K�K�K�K�K�KKKKKKK9KsKoKiKUKnKZK?K?K@KKKKCKDKCKCKDKDKDKCKCKCKDKCKCKCKCKCKCKCKCKDKCKCKCKDKBKAK@K@KAKAKAKAKAKAKAKBKBKAKBKBKBKBK@K?K?K@KBK@K>K?K?K>K@KCK@K=K8K6K8K9K8KKK9KKKaK�K�K�K9K9K:K9K:K9K7K8K7K;KBK>K?K?K=K6K7K@KBK:K+K,K.K.K-K+K+K,K'K%K%K2K?K5K4K4K3K=KKK`KZKK>K?K>K?KAK?K>K?K;K4K2K5K6KK?K=K@K9K1K4K7K�K�K�K�KUK+K:K:K5KKK�K�K�K�K�KGK7K;K9K9K8K7K9K8K7K?K?K@K?KK?K=K.K-K-K-K-K+K+K+K(K%K%K,KKcK�K�K�K�K�KvKKKKKCKyK|KmKoKmKrKsKVK@K>KAK>KsK�K�K�K�K�K�KvKwKKKKyKqKhKdKdKDK.K!K.K8K'K2KK,KIKnK�K�K�KBK4K5K1K7K=KK?KK>K>K?K>K>K?K>K>K?K>K?K=K7K5K2K5K9KK?K>KK>K?K4K*K*K-K-K+K*K+K(K%K$K&KK?K^K�K�K�K�K|K7K'K K9KKKBKHK�K�K�K�K�KGKdKlKYKYKfKeKCKGK�K�K�K�K�KqKDK3K6K4K5K5K9KFK?KiK�K�K�K�K�K/KKKKKK
KKK%KEKKAK?KNK�K�K�K�K�K�K�KyK�KzKzK|KzKqKgKiK]K>K-K&K4K*K(K=K=K=K:KUK�K�K�K�K�K�K�K'K,K7e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K\K;K.KHKvK�K�K}K>K5K6K/K5KK?K:K)K(K)K(KKKKKKKKKKKKK#K&K&K&KKKKK
+KKKKKKKKKKKK K!K"K#K%K%K%K&K$K#K'K)K'K)K)K(K)K(K&K)K*K.KK?KAK>K=K>K?K>K?K?K>K?K?K>K>K>K?K@K>K9K4K3K7K:KK=KK;K9K8K6K9K3K;KAK@K?KKJK�K�K�K�K�KFK>K*K7KTKLK>K�K�K�K�K�KLKGKQKOKVKvKpKHK>K�K�K�K�K�KlK9K1K,K)K'K#K K6KBKOK�K�K�K�K�K;KK	K	KKKKKKK?KAKQK�K�K�K�K�K�KqKRKKKKKKKKKK=KEK>KvK�K�K�K�K�K_KKKKJKjKqKyK}KqKfKsKoKHK9KBKBK>K�K�K�K�K�K�K�KvK�KwKnKnKfKVKWKQKTKOK>K=KK=K>KK?K9K*K(K)K)KKKKKKKKKKKKK#K&K&K&KKK
KK
+KKKKKKKKKKKK!K K K"K&K&K%K(K"K K'K)K)K(K(K(K(K)K,K+K*K.KK[K�K�K�K�K�K�K�K�K�KPKRKOK'KdK7KKcKSK�K�K�K�K�KrKtKhK�K�K�K�K�K�K�K�K�K�K}K�K�KZK`KbKrKaKjK�K~KcK�KkKcKxKoK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KeK9KvK�K�K�K�KhK�K�K1KqK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K#KKKKKK%K?KFKDKCKDKCKCKCKCKCKCKDKCKDKCKAKBKDKBKAKCKCKBKBKBKBKBKBKBKBKBKBKBKBKAKBKBKBKBK@K?K?K>K>K>K?K=K?K?K>K?K>K>K>K?K?KKK?K=K9K5K8K?K?K5K(K+K*K*K+K*K(K)K$K&K$K.K@K5K1K3K1KKJKNK?KgK�K�K�K�KvKKK8K3KNKMK>KCK@K^K�K�K�K�K�KKKKKKK	K#KFKAK�K�K�K�K�KrKKKKKKKKKK1KCK>K�K�K�K�K�K�KlKAKKKKKKKK
K
K-KAK?KSK�K�K�K�K�K�KKK,KUKnKlKgKpK{KkKmKvKUK4KBKCK?KZK�K�K�K�K�K�KzKlKeKUKTKSKCK:K8KCK5K9KBK;K&KKK:K;K=K:K\K�K�K�K�K�K�K�KK?K7K'K(K)K&KKKKKKKKKKKKK%K&K%K&KKKKK
+KKKKKKKKKKKK!K!K#K$K&K%K%K&K%K(K)K(K)K*K*K*K(K)K*K+K*K-K=K>K[K�K�K�K�K�K�K�K�K�K�K]KFKK+KK+K�K_K�KvK�K�K�K�K�K�K�KhK~K�K�K�K�K�K�K�KxK�KgKUKYKXKnKCK`K�KyK^KoKfKbKKKvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KZKTK}K�K�K�K�KZK�KwKAK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K2KKKKKKKK/KDKDKCKCKDKCKCKCKCKCKCKDKCKCKCKBKCKCKBKBKCKCKBKBKBKBKBKBKBKBK@K?KBKBKBKBKAKBKBKAKAKAK@K>K>K?K>K>K>K>K?K>K=K>K?K@K=K7K3K1K6K6K/K8K>K:K=K>K KkK�K1K5K�K�K�K�K�K�K�KDK>K?KK@K:K&K)K+K+K)K)K*K(K"K%K%K)K>K6K1K3K2K:K6KKQKrK?K;K9K9KKcK�K�K�K�K�KvK(KKKKKKKKKKKAKBK?K�K�K�K�K�K�KCKK4KYK]KTKQKVK[KbKDKQKCK$K8KAK?K?K�K�K�K�K�K�K�KbK\K]KKKIKSKKKIK>K.KK@K6K&K(K)K%KKKKKKKKKKKKK&K&K%K&KKKKK
+K
KKKKKKKKKKK!K!K#K$K%K%K&K&K&K)K(K(K*K+K+K*K(K)K+K+K*K-K=K>K\K�K�K�K�K�K�K�K�K�KYK]KK"KKK,KwKdK�KgK�K�K�K�K�K�K�K�K|K�K�K�K�K�KtKoKsK9K6KUKCK(KEKDKCKtKtK�KTKYK�KEKgK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KjK>K|K�K�K�K�K\K�KOKvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K@KKKKKKKKKK8KDKEKCKDKCKCKCKCKCKCKCKDKCKAKCKDKBKBKDKCKBKBKBKBKBKBKBKBK@K>KAKAKAKAKAKAKAKBKBKBKAK?K?K>K?K?K>K>K?K>K=K>K?K>K>KK=K:K6K4K>K?K=K*K(K+K*K(K(K'K&K"K$K&K$K9KKdK�K�K�K�K�KxKK%K@KFKBKGKNKKKfKUKK>K@K4K%K(K)K%KKKKKKKKKKKKK%K%K%K'KKKKKKKKKKKKKKKKK K K"K%K#K%K&K(K(K(K)K(K*K+K+K*K(K)K+K+K*K.KKAKBKAKBKAKAKAKAKAKBKBKBK@K>K>K>K>K>K>K>K>K>K>K?K>K>K>K=K=K4K.K3K8K9KK?KKDK�K�K�K�K�K�KrKKKKKKK3K8K*K KKK"K1KK%K8K:KK>K@K3K&K)K(K$KKKKKKKKKKKKK&K&K&K&KKKKK
KKKKKKKKKKKK K!K#K$K#K%K&K(K)K(K)K(K*K+K*K*K(K)K+K+K+K.K=K=K_K�K�K�K�KXKK>K>K>K>K?K>K>K>K?K?K>K=K>K>K=K9K6K5K8K:KK?KK4K'K(K#K'K%K K*KBK7K1K2K2KKKKK)KBK;K=KKrK�K�K�K�K KK	KKKK>KOK�K�K�K�K}K	K
KK
+KKKKCKIK�K�K�K�K�K'K
+K
KK
KKKK>KAK]K�K�K�K�K�K KKK	KKKK	KKKDK>K_K�K�K�K�K�K[KK
+KKKKKKK	KKK:K@K=KxK�K�K�K�K�KkKAKOK'K!KEKOK3K K-K1K$KKK0K@K?KK8KdK�K�K�K�K�K�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KvKIK+KBKUK�K�K�KTK4K6K#K'K9K9K9K7K4K*K.K@K>K>K@K2K'K*K&K"KKKKKKKKKKKKK(K)K(K&KKK
KKKKKKKKKKKKKK!K!K$K#K#K%K%K(K)K(K)K(K*K+K)K(K(K)K*K-K.K1K?K=K_K�K�K�K�KWKKKYK�K�K�K^K�K�K�K(K_KaK{KiKnK�K~K�K�K~K�K�K�K�K�KjK�K�K�KwK�K�K�KKK4K?KcK�K�K�K�K�K�K�KVK_K�K|KqK_K}KuKzK�K�K�K`KiKtK]K|KlKNKbK�K�KbKbKqKUK�K�KuKxK|K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KnKKKK(K,KK%K+K2K9K>KBK?K:K>KBKDKCKDKCKCKCKCKCKDKDKBKAKAKAKAKAKAKAKAKAKAKBK@K?K>K>K@KBKAKAKAKBKBKBKAK>K@KBK?K?K?K>K>K>K>K>K>K>K>K>K?K?K>K>K?K?KK>K;K9K5K2K=K?K;K%K&K(K;K>K.K$K#K'K*KKK;K9K0K1K1KKKKKK?K?K=KK=K>K?K;K=K=K=KCK�K�K�K�K�K�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KlKEK)KDK[K�K�K�KNK4K6K!K)K:K;K:K7K5K+K/K@K>K?K@K3K&K)K(K"KKKKKKKKKKKKK'K(K(K&KKK
KKKKKKKKKKKKKK K!K#K$K$K&K'K(K)K(K)K(K)K)K*K)K(K)K+K+K+K/K=KKFKLKLKJKHKGKHKCK:K7K@KEKCKCKCKCKCKCKCKCKBKAKBKAKAKAKAKBKAK@K@KBK?K=K@K@KAKAKAKAKBK@K@KBKAK@K@K@K?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K?KK?K=K9K4K2K=K?K>K)K"K'K7KK0K0K2K#KKKKK7K@KK2KKKKKK2KHK�K�K�K�KwKK
+KKKK&K@KnK�K�K�K�KFKK
K	KKKK1KAK`K�K�K�K�K�KKKKK	K	KKKFK@K|K�K�K�K�K�KKKKKKKKKK'KFK=KsK�K�K�K�K�K6KKKKKKKKK	KKKAK?K;K�K�K�K�K�K�KRKK*K,K0K2K4K5K5K4K5K6K7K6KK:KiK�K�K�K�K�K�e]r�(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KgKAK)KFKdK�K�K�KIK3K5KK+K;K=K;K6K5K*K1K@K>K?K@K1K'K)K)K"KKKKKKKKKKKKK'K(K(K'KKK
KK
KKKKKKKKKKKK K!K#K$K&K(K)K(K(K)K(K(K(K(K*K*K(K)K+K*K*K-KK_KVK�K�KXKMKtK}K�K�K�KcKuK�K�K�K�KoKeK�K�K�KjK(K[K[KtK|KzKlK�KkKwKuK�KlKwKsK\K)KKKYKYKzKcK[KHK�KKNKxK�K�K�KoKhK}KQK\K]KOKvK�KxKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KXKPKLKJKOKPKPKMKOKNKOKNKJKIKKKHKBKKBKEKCKCKCKDKDKDKBKAKAKAKAKAKAKAKAKBKBKBK@K>KAKBKBKBKBKBKBK?K>KBKBKBK@K>K>K>K>K>K>K>K>K>K>K>K?K?K?K>K>K>K>K>K>K>K?K=K:K6K1K4K7K7K7K;K;K:KQK�K�K[K5K;K9K8K5K4K7K'KK?K>K?K?K:K6K4K;K?K>K3K%K$K/K?K@K7KKKK(KK$K?K3K0K0K*KKKKK-K@K;K=K?KKK�K�K�K�KKKKKKK?KTK�K�K�K�KvKKK
+KKKKKDKIK�K�K�K�K�K#KKKKKKK
+K;KBKYK�K�K�K�K�K'KKKKKKKKKK@K?KRK�K�K�K�K�KoKK
+K
+KK
KKKKKK%K:K@KK?K@K.K&K)K)K!KKKKKKKKKKKKK'K&K&K%KK
KKKKKKKKKKKKKK!K!K!K#K$K%K'K)K)K(K'K*K*K*K*K*K*K*K*K+K+K*K0K?K>KcK�K�K�K�K�K�K�K�K\K�KUKLKxK{K�K�KKKoK�K�K�K�K�K�K�K�K�K�K�KWK�K�K�K�K�K�K�K�K�KgKFKVKvK�K]KIK\KqK[KjK_KLKyKmKPK=KjKVK>KCK�KZK.K�K�K�K�K�K�K�K�K(KEKPK:KUK#KXK}KsK}K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KJKMKPKTKUKWKTKQKQKOKQKQKKKIKKKHKEKHKFK@K9K;KCKEKCKBKBKBKBKAKBKBKAKAKAKBKBKAKAKBKAKAK?K?K?K?K?KAKBKAKAK?K@KBKAKAK?K?KAK@K>K?K>K>K>K?K>K=K>K?K>K>K?K?K?K>K?KK;K7K4K7K?K>K8K)K"K(K=K@K>K1KKK K&K-K;K6K0K2K0KKKKKK@K:K>KEKSK1KKKKKK;KbK�K�K�K�K6KK	KKK
+K9KCK�K�K�K�K�KKKK
+K	KKK?K?K�K�K�K�K�KOKK
+KKKKKK)KEKCK�K�K�K�K�KUK�KKKKKKKK	K3KEKAK�K�K�K�K�K�K-KK!K!K#K&K&K'K(K)K+K3K?K@KGK�K�K�K�K�K�KMK6K:K9K;K=KK?K?K?K?K?K>K�K�K�K�K�K�K�K}KtK_K'K-K'KKKKKKKKKKK+K;K9KK?K@K.K%K)K)K!KKKKKKKKKKKK K'K%K&K$KK
KK
+KKKKKKKKKKKK"K!K!K#K$K%K'K)K(K)K(K(K+K*K+K*K*K*K*K)K+K*K0K@K>KcK�K�K�K�K�K�K�KTKcKsK`K.KUKK�K�K2KK�K�K�K�K�K�K�KzKqK�K�K�K�K�K�K�K�K�K�K�KzKDK>KIKrK�K�K�K�KGKmKgKnKVKNKRKVKQK:K�K^K9K(KgK?KIK�KnK�K�K�K�K�K�KLK0K5K?KYK+KDKIKjK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KTKTKTKSKUKXKTKSKTKOKQKPKMKLKOKLKJKJKDKEK=K0K3K>KCKBKAKBKBKAKAKAKAKAKAKAKAKAKAKAKBKBK?K>K?K?K>K@KAKBKAK>K?KBKAKBK?K?KAKAK>K?K>K>K>K?K>KK?K>K>K>K>K>K>K>K=K=K>K=K5K*K(K0K2K2K4K6K-K,KgK9K9K9K7K6K4K7K7KK0KDK?K>K=K:K4K6K?K@K;K1K.K/K9K?K>K?K/KKKK*K=K9K1K2K4KKKKKK;KCKYKhK|KkK	KKKK	K5KLK�K�K�K�KfKK
+KKKK+KAKuK�K�K�K�K=KKKK	KKK2KAK_K�K�K�K�K�K	K
+K	KKKKKKBK>KuK�K�K�K�K�KKKKKKKKKK*KCK>KdK�K�K�K�K�K^K"K*K)K)K*K)K)K,K,K.K3K>K@K>KtK�K�K�K�K�KyK4K9K9K:KK?K=K-K&K(K)K!KKKKKKKKKKKK K'K%K'K%KK
KK
KKKKKKKKKKKK"K!K!K#K#K&K(K)K&K'K)K'K)K*K+K+K*K)K(K"K*K*K0K?K;KeK�K�K�K�K�K�K�KxK]K1KiKQKcKhK�K�K0K(K�K�K�K�K�K�K�KTKeKwK�K�K�K�K�K�K�K_K}K�KsK.K$KCKmK�K�K�KWK5K-KSK�KjKgK?K6K5K^K�K�KaK;KyKOKrK�K�K�K�K�K�K�K�K_K>K-K6KLKMK=KLKgK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KWKUKVKUKUKXKWKVKVKQKPKSKPKRKPKKKIKEKAKK@KBKBKBK?K?KAKAKAKAKAKAKAKAKAKBK@K>KAKBKAKAKAKBKBK?K>K?K?K>K>K>K?K?K?K?K>K?K>K?K>K=K=K=K=K=K>K?K=K>K>K9K/K&K)K/K1K0K2KK,KK>K>K;K5K4K;K=K;K4K5K/K.K9K?K>K?K)KKKK6K?K1K/K3K%KKKKK0KHKzK�K�K�K*KKKKK+KCK�K�K�K�K�K
KKKKKKAKWK�K�K�K�KoKK
+KKK	KKKCKHK�K�K�K�K�K$KKK
+K	K
+K
KK9KAKTK�K�K�K�K�KKIK�K�K�K�K�K�K-K+K.K-K.K,K,K/K0K3K1K8K?KKBK�K�K�K�K�K�K�K�K@K(K3K2K0K4K4K2K0K+KK K#KKK/K=KK?K=K,K'K(K(K KKKKKKKKKKKK K'K&K'K$KK
K
KK
KKKKKKKKKKK!K!K!K$K$K%K(K)K'K$K&K*K(K*K+K*K)K)K)K(K+K+K0K?K;KeK�K�K�K�K�K�K�KmK;K%K;KlKzKhK�K�K,K2K�K�K�K�KJKlK{KeKdKiK�K�K�K�K�K�K�K:KCK�KoKKNKhKuK�K�KgKMKKKBKWKjK_K�KiKbKUKVK�K�KuKNK|K�K�K�K�K�K�K�K�K�KJKOKEK4K2KBKK�KaK7KeK]K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KWKRKVKWKVKXKWKSKPKLKPKQKQKVK]K`KGK+K*K,K)K"KKKKK5KAKBKAK?K@KBKAKBK@K?KAKAKAKAKAKBKBKBKAKBK@K?KAKBKBKAKAKAKAK?K>K>K>K>K>K?K>K>K?K?K>K?K>K>K>K=K=K=K=K=K>K>K=K=K=K:K;K8K,K%K*K-K0KK K:K:K:KK>K>K;K7K3K7KK>K>K(KKK+K?K3K.K/K,K
+KKKK%KAK�K�K�K�K^K�KKKKKAKkK�K�K�K�K,KK	KKK
K:KDK�K�K�K�K�KKKKKKKK=K?K�K�K�K�K�KZKKKKKKKK0KFKAK�K�K�K�K�KnK K%K$K$K"K#K&K'K)K9KDK?K}K�K�K�K�K�KHK(K.K-K-K/K0K0K1K3K1K8K?K?K>K�K�K�K�K�K�K�K�K�K�K�K�K�KUKCKDKDKEKDKDK@KK?K>K+K(K)K(KKKKKKKKKKKKK"K)K)K)K%KKKKK
KKKKKKKKKKKK K!K&K&K%K'K(K(K#K$K(K(K*K+K*K*K+K+K+K-K-K1K?KK?K>K>K>K>K>K>K>K>K?K?K?K?K?K=KKK@K=KK>K>K;K9K4K5K>K>K?K:K2K1K)K0K>K>K?K?K#KK K>K9K.K/K1KKKKKK;KyK�K�K�K�KKK	KK
K9KRK�K�K�K�KYK�KKKKK-K@KyK�K�K�K�KKoK�K�K�K�K�K0K%K(K(K%K&K)K'K$K/KDK>KWK�K�K�K�K�KtK*K0K/K/K2K3K2K2K2K2K7K>K@KK>K;K+K'K'K'KKKKKKKKKKKKK$K)K(K(K#KKK
KKK
KKKKKKKKKK K K"K%K$K%K(K(K'K(K'K(K*K*K+K,K,K*K+K*K-K-K1K?K=KfK�K�K�K�K�K�K�K�K\KVKhK~KiK�K�K�K!K]K�K�K�K�KtKYK@KeK|KMK�K�K�K�K�K�KZK"K#KiKFKCKZKcK�K�K�KyKnKfKKKRKdKYKdKsKrKdK/KiK�K�K�K�K�K�K�K�K�K�KlKXKmKbKMKK5KRKnKzK�K�KlKOKSK�K�KvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K_KQK^KgKvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KnKDK+K%K+K(K"KKK2KAKBK@K@KBKBKAKAKAKAKAKAKAKAKAKAKAKBKBK@K@KBKAK@K?K>K?K>K>K>K>K>K>K@K@K?K?K?K?K?K=KK=KKK>K;K9K3K1K>K@K>K@K9K0K0K,K1KKKBKAK�K�K�K�K�K�K9K0K3K2K3K4K3K4K3K4K8KKBKAK>K>K>K>K>K>K>K>K?K?K>K>K>K>K>K>K?K=KK8K&K K4K8K9K;K9K5K4K5KK'KAK>K>K;K8K5K1KK>K@KK8KK*K?K0K-K.K&KKKKK.KNK�K�K�K�KNKKK
K
K)K;KtK�K�K�K�K3KK KKK$K>KHK�K�K�K�K�K(K#K$K$K$K#K)KAK>K�K�K�K�K�KgKK'K&K%K%K%K$K6KEK;K�K�K�K�K�K{K&K*K)K+K+K(K8KK^K7KCK=KlK�K�K�K�K�K[K+K3K3K4K5K5K5K4K6K8K8KK?K;KOK�K�K�K�K�K�K_K'K/K0K8KHKDK/K1K"K3KKKPKSKSKRKQK@K8K:KK?K>K?K>K=K=K=K>K?K>K>K?K=KK?K=K:K7K3K8KK?K=K>K3K6K2K+K6K?K>K@K4K$K>K4K-K.K-KKK
+K	K%KAK�K�K�K�K�KKKKK&K;KXK�K�K�K�K]KK%K!K!K#K:K?KK�K�K�K�KIK"K)K(K%K%K(KKLK�K�K�K�K�K�K.K/K3K4K4K4K5K7K7K6K?KGKAK?KOK�K�K�K�K�K�K�K�K�K�K�K�KyKRKHKHKJKKKLKKKCK>K?K=K�K�K�K�K�K�K�K6K1K5KK=K?K?K7K(K(K&K%KKKKKKKKKKKKK$K'K(K)K KKKK
+KKKKKKKKKKKKK K!K$K$K%K'K)K)K(K(K)K+K*K*K*K*K+K*K+K-K-K1K?K:KiK�K�K�KuKwK�KNKK$K)KRK�K�K�K�KXK&K�K�K�K�K�K�K�KlKbKxKfKfK~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K|K�K�K�K�K�K�K�KlKDK'K.KKLK�KQK9K�K�KzK�K�K�K�K}KAK�K�KWKQKEKpK^KoK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KpKDK,KKK%K5KCKCKBKAK?KAKAK?K>KAK@K>K>K>KAKAK>K?KBK@KBK@K>K?K>K>K=KK>K>K>K>K=K=K=K=K=K=KK=KK;K9K4K6K=K>K?K=K9K7K0K>K2K+K7K>K>K@K4K4K8K-K-K,KKKKK"K;KpK�K�K�K�K)KK#KK!K:KFK�K�K�K�K�K$K(K%K$K#K2K@K^K�K�K�K�KrK!K+K)K'K(K'K6KDKIK�K�K�K�K�KEK&K)K)K)K'K&K&K;K?KKK�K�K�K�K�K_K%K,K+K*K)K0KXK@K+K=KBK>K�K�K�K�K�K�KDK.K3K5K4K5K5K8K8K5KFKcKAKCK>K�K�K�K�K�K�K�K�K�K�K�K�K�KcKJKHKKKLKKKMKHKK>K?K4K'K(K&K$KKKKKKKKKKKKK(K)K(K*K!KKKKKKKKKKKKKKK!K KK#K&K%K%K'K)K'K)K+K+K+K+K*K*K+K*K+K.K,K*K0K@KK>K?K?K>K=K=K?K?K?K?K?K?K?K?K?K?K?K>K?K>KK?K>K>KK=K>K?K>K>K?K=KK?K>K9K4K5K;K>K?K8K1K:K.K=K;K0K,K6K>K>K;K6K2K-K)K+K'KKKKK9KWK�K�K�K�KKKK%K"K!K3K>KK�K�K�K�K2K#K'K#K'K,K?KIK�K�K�K�K�K.K)K+K*K+K)K-KCKBK�K�K�K�K�KhK&K+K(K)K(K'K%K6KCK>K�K�K�K�K�K�K*K,K+K*K+K*K>K5K+K5KAK@K_K�K�K�K�K�KnK-K3K5K5K7K7K7K:K8KNK�KRK?KK=K>K@K�K�K�K�K�K�K�K2K;KQKBK*K>KMKLKNKPKNKTKRKQKMK8K,K7K:KK>K?K3K'K(K'K$KKKKKKKKKKKKK(K)K(K*K KKKKKKKKKKKKKKK K!K K"K%K&K&K(K(K&K+K*K*K*K*K+K*K+K*K,K.K,K*K0K@KK=K=K>K>K?K>K>K?K>K>K>K>K>K>K>K>KK?K?K>K=K=K?K>K=K>K?K?K?K>K=KK>K:K6K2K9K?K>K9K3K=K1K:K?KK@KaK�K�K�K�K�K.K*K)K)K)K)K'K.K@K=KaK�K�K�K�K�K?K(K,K+K+K+K.K.K(K/K?K@KEK�K�K�K�K�K�K6K2K4K5K7K8K8K9KKK>K?K2K(K(K(K#KKKKKKKKKKKKK(K)K(K)KKKK
+KKKKKKKKKKKKK!K!K!K#K&K)K)K(K)K(K(K(K(K*K+K*K+K*K,K.K.K-K1K?KK?K>KK?K>K>K?K>KK>K>KK?K>K>K?K?K?K>K=KK=K=K9K1K8K>KK>K@K2K(K(K%K KKKKKKKKKKKKK)K(K(K)KKKKKKKKKKKKKKKK K!K"K#K$K&K'K(K)K(K)K(K)K*K*K+K*K+K*K,K.K-K-K1K?K;KlK�KcK%KCKK'K5KRK�K�K�K�K�K�K�K KK�K�K�K�K�K�K�K�K�K\KnK�K�K�KuKCK�K�K�K�K�K�K�K�KfKjK`K�K�K�K�K�KtKWKyK�K�K\K5K(K'KdK�KbK�K�KiKZKyK)KK@K(KUKoKvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KaK=K:K>K?K>K>KK?K>K>K?K=KK=K=K=K>K?K>K>K?K>K=K=K=K=K=K;K;K=KK5K$K6K7K8K6K6K4K7K+KK8K>K>K=KK>K?K7K0K?K>K?K;K.K1K;K7K2K=K0K#K!KK'K+KK%K9KbK�K�K�K�K>K!K'K&K%K8K>K�K�K�K�K�K3K)K*K*K(K.K@KPK�K�K�K�K�K,K*K,K*K+K*K0KDKBK�K�K�K�K�KjK'K,K+K*K)K)K(K3KBK>KK�K�K�K�K�K.K,K,K+K)K'K+K-K,K3K@K>KVK�K�K�K�K�K}K1K5K5K7K8K9K8KBKwK�K�KgK>K@KOK�K�K�K�K�K�K�K�K�K�K�K�K�KXKLKNKJKLKTKPKAK:K=K;KoK�K�K�K�K�K�KLKEKIKDKHKGKAKCKIKGK=K8K-KKKKK0K9K9KK>K>K/K'K(K%K KKKKKKKKKKKKK)K(K(K*KKKK
K
KKKKKKKKKKK!K K#K$K&K%K%K'K)K(K(K(K)K+K*K*K*K*K*K+K(K-K-K1K>K:KoK�K�K�KJKEK[KfKmK�K�K�K�KaK�K�KK+KoK�K�K�K�K}K�K�KqK0KDK�KNKxK�KUK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K]K5KkKzKtK~KWKAKK>K>K?K=K;K?K>K>K>KK>K?K>KK;K"K0K8K7K5K4K4K5K3KK2K@K>K=KK>K@K:K-K=K?K>K@KKK?K�K�K�K�K�KCK(K-K)K*K+K,K=K@KcK�K�K�K�K�K-K+K+K,K)K)K(K,K?K?K\K�K�K�K�K�KGK&K+K*K+K+K-K.K,K.KK:KCK@K8K2K*KKKKKK*K:K9K;K9e]r(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KNK.K:KJK�K�K�KjK6K5K/KK3K:K9K7K4K1K1K9K>K?K?K=K,K'K(K%K KKKKKKKKKKKKK)K(K(K)KKKKKKKKKKKKKKKK K!K#K$K%K&K(K(K(K(K+K(K(K+K*K*K*K*K+K(K'K/K-K3K@K=KcK�K�K�KqK�K�K�K�K�K�K�K�KUKzKjK
KIKLKyK�K�K�KfK�K�K{KzK{KbKVK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KeKKKLKCK9KrKcKNK1K!K2KvK�K�K�K�K�K!KZKvKBK1KGKSKKK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K@K:K@KAKAK?K>K?K>KK>K>K>K>K?K>K=K>K?K?K>KK>K>K=KK>K=K:K4K1K;K=K>K>K>K?K0K9K@K>K?K@K8K,K0K3K?K>K>K(KKKK+K*K3KAK�K�K�K�K�K"K'K&K$K,K?KSK�K�K�K�KpK$K+K+K*K(K8K>KmK�K�K�K�KjK%K*K)K*K+K)K5KCKHK�K�K�K�K�KGK'K+K+K*K(K)K+K:KBKDK�K�K�K�K�KqK$K,K*K+K,K-K.K.K,K6KBK=KfK�K�K�K�K�KdK-K4K5K4K9K;KWK�K�K�K�KUK?K>K\K�K�K�K�K�K�K�K�K�K�K�K�KyKUKOKMKQKWKLKDK=KK:KyK�K�K�K�K�K�KMKFKFK=K0K4K9K,K KKKKKKK.KK1K8K9K9e]r(K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K}KHK*K=KMK�K�K�KdK3K5K*KK5K:K:K8K4K1K2K:K>K@K?K=K,K'K(K%K KKKKKKKKKKKKK)K(K(K(KKKK
+KKKKKKKKKKKK K!K#K$K%K'K)K(K)K(K%K(K*K*K*K*K*K+K*K+K+K.K-K4K@K:K]K�K�K�K�K~K�K�K�K�K�K�K�KSK6KKK]K~K�K�K�K�KxKrK�K�K�K�KBKvK�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�KnKMKZK�K�K�KWK/K'K@KXKWK�K�K]KKTKqKNKCKPKnKFK~K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K�K9KK4KBKBK?K>K?K>KK?K>K>K?K?K?K=KK?K?K>KK?K?K>K=KK>K?K,K)K7K8K8K8K5K5K7K KK@K>K?K>K:K7K0K9K=K>K?K>K?K3K6KAK?K?K?K>K3K)K6K?K>K?K;KKKKK-K5K:KoK�K�K�K�K5K$K&K%K&K;KCK�K�K�K�K�K-K'K+K*K*K2K?KQK�K�K�K�K�K.K,K,K+K+K)K0KCK?K�K�K�K�K�KlK%K,K+K*K)K(K)K2KBKKK 1
+
+docstring = \
+"""Docstring
+    %(strtest1)s
+        %(strtest2)s
+     %(strtest3)s
+"""
+param_doc1 = \
+"""Another test
+   with some indent"""
+
+param_doc2 = \
+"""Another test, one line"""
+
+param_doc3 = \
+"""    Another test
+       with some indent"""
+
+doc_dict = {'strtest1':param_doc1,
+            'strtest2':param_doc2,
+            'strtest3':param_doc3}
+
+filled_docstring = \
+"""Docstring
+    Another test
+       with some indent
+        Another test, one line
+     Another test
+       with some indent
+"""
+
+
+def test_unindent():
+    with suppress_warnings() as sup:
+        sup.filter(category=DeprecationWarning)
+        assert_equal(doccer.unindent_string(param_doc1), param_doc1)
+        assert_equal(doccer.unindent_string(param_doc2), param_doc2)
+        assert_equal(doccer.unindent_string(param_doc3), param_doc1)
+
+
+def test_unindent_dict():
+    with suppress_warnings() as sup:
+        sup.filter(category=DeprecationWarning)
+        d2 = doccer.unindent_dict(doc_dict)
+    assert_equal(d2['strtest1'], doc_dict['strtest1'])
+    assert_equal(d2['strtest2'], doc_dict['strtest2'])
+    assert_equal(d2['strtest3'], doc_dict['strtest1'])
+
+
+def test_docformat():
+    with suppress_warnings() as sup:
+        sup.filter(category=DeprecationWarning)
+        udd = doccer.unindent_dict(doc_dict)
+        formatted = doccer.docformat(docstring, udd)
+        assert_equal(formatted, filled_docstring)
+        single_doc = 'Single line doc %(strtest1)s'
+        formatted = doccer.docformat(single_doc, doc_dict)
+        # Note - initial indent of format string does not
+        # affect subsequent indent of inserted parameter
+        assert_equal(formatted, """Single line doc Another test
+   with some indent""")
+
+
+@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
+def test_decorator():
+    with suppress_warnings() as sup:
+        sup.filter(category=DeprecationWarning)
+        # with unindentation of parameters
+        decorator = doccer.filldoc(doc_dict, True)
+
+        @decorator
+        def func():
+            """ Docstring
+            %(strtest3)s
+            """
+        assert_equal(func.__doc__, """ Docstring
+            Another test
+               with some indent
+            """)
+
+        # without unindentation of parameters
+        decorator = doccer.filldoc(doc_dict, False)
+
+        @decorator
+        def func():
+            """ Docstring
+            %(strtest3)s
+            """
+        assert_equal(func.__doc__, """ Docstring
+                Another test
+                   with some indent
+            """)
+
+
+@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
+def test_inherit_docstring_from():
+
+    with suppress_warnings() as sup:
+        sup.filter(category=DeprecationWarning)
+
+        class Foo:
+            def func(self):
+                '''Do something useful.'''
+                return
+
+            def func2(self):
+                '''Something else.'''
+
+        class Bar(Foo):
+            @doccer.inherit_docstring_from(Foo)
+            def func(self):
+                '''%(super)sABC'''
+                return
+
+            @doccer.inherit_docstring_from(Foo)
+            def func2(self):
+                # No docstring.
+                return
+
+    assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC')
+    assert_equal(Bar.func2.__doc__, Foo.func2.__doc__)
+    bar = Bar()
+    assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC')
+    assert_equal(bar.func2.__doc__, Foo.func2.__doc__)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/__init__.py
new file mode 100644
index 00000000..660355fd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/__init__.py
@@ -0,0 +1,169 @@
+"""
+=========================================================
+Multidimensional image processing (:mod:`scipy.ndimage`)
+=========================================================
+
+.. currentmodule:: scipy.ndimage
+
+This package contains various functions for multidimensional image
+processing.
+
+
+Filters
+=======
+
+.. autosummary::
+   :toctree: generated/
+
+   convolve - Multidimensional convolution
+   convolve1d - 1-D convolution along the given axis
+   correlate - Multidimensional correlation
+   correlate1d - 1-D correlation along the given axis
+   gaussian_filter
+   gaussian_filter1d
+   gaussian_gradient_magnitude
+   gaussian_laplace
+   generic_filter - Multidimensional filter using a given function
+   generic_filter1d - 1-D generic filter along the given axis
+   generic_gradient_magnitude
+   generic_laplace
+   laplace - N-D Laplace filter based on approximate second derivatives
+   maximum_filter
+   maximum_filter1d
+   median_filter - Calculates a multidimensional median filter
+   minimum_filter
+   minimum_filter1d
+   percentile_filter - Calculates a multidimensional percentile filter
+   prewitt
+   rank_filter - Calculates a multidimensional rank filter
+   sobel
+   uniform_filter - Multidimensional uniform filter
+   uniform_filter1d - 1-D uniform filter along the given axis
+
+Fourier filters
+===============
+
+.. autosummary::
+   :toctree: generated/
+
+   fourier_ellipsoid
+   fourier_gaussian
+   fourier_shift
+   fourier_uniform
+
+Interpolation
+=============
+
+.. autosummary::
+   :toctree: generated/
+
+   affine_transform - Apply an affine transformation
+   geometric_transform - Apply an arbritrary geometric transform
+   map_coordinates - Map input array to new coordinates by interpolation
+   rotate - Rotate an array
+   shift - Shift an array
+   spline_filter
+   spline_filter1d
+   zoom - Zoom an array
+
+Measurements
+============
+
+.. autosummary::
+   :toctree: generated/
+
+   center_of_mass - The center of mass of the values of an array at labels
+   extrema - Min's and max's of an array at labels, with their positions
+   find_objects - Find objects in a labeled array
+   histogram - Histogram of the values of an array, optionally at labels
+   label - Label features in an array
+   labeled_comprehension
+   maximum
+   maximum_position
+   mean - Mean of the values of an array at labels
+   median
+   minimum
+   minimum_position
+   standard_deviation - Standard deviation of an N-D image array
+   sum_labels - Sum of the values of the array
+   value_indices - Find indices of each distinct value in given array
+   variance - Variance of the values of an N-D image array
+   watershed_ift
+
+Morphology
+==========
+
+.. autosummary::
+   :toctree: generated/
+
+   binary_closing
+   binary_dilation
+   binary_erosion
+   binary_fill_holes
+   binary_hit_or_miss
+   binary_opening
+   binary_propagation
+   black_tophat
+   distance_transform_bf
+   distance_transform_cdt
+   distance_transform_edt
+   generate_binary_structure
+   grey_closing
+   grey_dilation
+   grey_erosion
+   grey_opening
+   iterate_structure
+   morphological_gradient
+   morphological_laplace
+   white_tophat
+
+"""
+
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from ._filters import *  # noqa: F401 F403
+from ._fourier import *  # noqa: F401 F403
+from ._interpolation import *  # noqa: F401 F403
+from ._measurements import *  # noqa: F401 F403
+from ._morphology import *  # noqa: F401 F403
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import filters  # noqa: F401
+from . import fourier  # noqa: F401
+from . import interpolation  # noqa: F401
+from . import measurements  # noqa: F401
+from . import morphology  # noqa: F401
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/_filters.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_filters.py
new file mode 100644
index 00000000..6804ef51
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_filters.py
@@ -0,0 +1,1635 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from collections.abc import Iterable
+import numbers
+import warnings
+import numpy
+import operator
+from numpy.core.multiarray import normalize_axis_index
+from . import _ni_support
+from . import _nd_image
+from . import _ni_docstrings
+
+__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
+           'prewitt', 'sobel', 'generic_laplace', 'laplace',
+           'gaussian_laplace', 'generic_gradient_magnitude',
+           'gaussian_gradient_magnitude', 'correlate', 'convolve',
+           'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
+           'maximum_filter1d', 'minimum_filter', 'maximum_filter',
+           'rank_filter', 'median_filter', 'percentile_filter',
+           'generic_filter1d', 'generic_filter']
+
+
+def _invalid_origin(origin, lenw):
+    return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
+
+
+def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
+    """Complex convolution via a linear combination of real convolutions."""
+    complex_input = input.dtype.kind == 'c'
+    complex_weights = weights.dtype.kind == 'c'
+    if complex_input and complex_weights:
+        # real component of the output
+        func(input.real, weights.real, output=output.real,
+             cval=numpy.real(cval), **kwargs)
+        output.real -= func(input.imag, weights.imag, output=None,
+                            cval=numpy.imag(cval), **kwargs)
+        # imaginary component of the output
+        func(input.real, weights.imag, output=output.imag,
+             cval=numpy.real(cval), **kwargs)
+        output.imag += func(input.imag, weights.real, output=None,
+                            cval=numpy.imag(cval), **kwargs)
+    elif complex_input:
+        func(input.real, weights, output=output.real, cval=numpy.real(cval),
+             **kwargs)
+        func(input.imag, weights, output=output.imag, cval=numpy.imag(cval),
+             **kwargs)
+    else:
+        if numpy.iscomplexobj(cval):
+            raise ValueError("Cannot provide a complex-valued cval when the "
+                             "input is real.")
+        func(input, weights.real, output=output.real, cval=cval, **kwargs)
+        func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
+    return output
+
+
+@_ni_docstrings.docfiller
+def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
+                cval=0.0, origin=0):
+    """Calculate a 1-D correlation along the given axis.
+
+    The lines of the array along the given axis are correlated with the
+    given weights.
+
+    Parameters
+    ----------
+    %(input)s
+    weights : array
+        1-D sequence of numbers.
+    %(axis)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin)s
+
+    Examples
+    --------
+    >>> from scipy.ndimage import correlate1d
+    >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
+    array([ 8, 26,  8, 12,  7, 28, 36,  9])
+    """
+    input = numpy.asarray(input)
+    weights = numpy.asarray(weights)
+    complex_input = input.dtype.kind == 'c'
+    complex_weights = weights.dtype.kind == 'c'
+    if complex_input or complex_weights:
+        if complex_weights:
+            weights = weights.conj()
+            weights = weights.astype(numpy.complex128, copy=False)
+        kwargs = dict(axis=axis, mode=mode, origin=origin)
+        output = _ni_support._get_output(output, input, complex_output=True)
+        return _complex_via_real_components(correlate1d, input, weights,
+                                            output, cval, **kwargs)
+
+    output = _ni_support._get_output(output, input)
+    weights = numpy.asarray(weights, dtype=numpy.float64)
+    if weights.ndim != 1 or weights.shape[0] < 1:
+        raise RuntimeError('no filter weights given')
+    if not weights.flags.contiguous:
+        weights = weights.copy()
+    axis = normalize_axis_index(axis, input.ndim)
+    if _invalid_origin(origin, len(weights)):
+        raise ValueError('Invalid origin; origin must satisfy '
+                         '-(len(weights) // 2) <= origin <= '
+                         '(len(weights)-1) // 2')
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.correlate1d(input, weights, axis, output, mode, cval,
+                          origin)
+    return output
+
+
+@_ni_docstrings.docfiller
+def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
+               cval=0.0, origin=0):
+    """Calculate a 1-D convolution along the given axis.
+
+    The lines of the array along the given axis are convolved with the
+    given weights.
+
+    Parameters
+    ----------
+    %(input)s
+    weights : ndarray
+        1-D sequence of numbers.
+    %(axis)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin)s
+
+    Returns
+    -------
+    convolve1d : ndarray
+        Convolved array with same shape as input
+
+    Examples
+    --------
+    >>> from scipy.ndimage import convolve1d
+    >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
+    array([14, 24,  4, 13, 12, 36, 27,  0])
+    """
+    weights = weights[::-1]
+    origin = -origin
+    if not len(weights) & 1:
+        origin -= 1
+    weights = numpy.asarray(weights)
+    if weights.dtype.kind == 'c':
+        # pre-conjugate here to counteract the conjugation in correlate1d
+        weights = weights.conj()
+    return correlate1d(input, weights, axis, output, mode, cval, origin)
+
+
+def _gaussian_kernel1d(sigma, order, radius):
+    """
+    Computes a 1-D Gaussian convolution kernel.
+    """
+    if order < 0:
+        raise ValueError('order must be non-negative')
+    exponent_range = numpy.arange(order + 1)
+    sigma2 = sigma * sigma
+    x = numpy.arange(-radius, radius+1)
+    phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
+    phi_x = phi_x / phi_x.sum()
+
+    if order == 0:
+        return phi_x
+    else:
+        # f(x) = q(x) * phi(x) = q(x) * exp(p(x))
+        # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
+        # p'(x) = -1 / sigma ** 2
+        # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
+        # coefficients of q(x)
+        q = numpy.zeros(order + 1)
+        q[0] = 1
+        D = numpy.diag(exponent_range[1:], 1)  # D @ q(x) = q'(x)
+        P = numpy.diag(numpy.ones(order)/-sigma2, -1)  # P @ q(x) = q(x) * p'(x)
+        Q_deriv = D + P
+        for _ in range(order):
+            q = Q_deriv.dot(q)
+        q = (x[:, None] ** exponent_range).dot(q)
+        return q * phi_x
+
+
+@_ni_docstrings.docfiller
+def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
+                      mode="reflect", cval=0.0, truncate=4.0, *, radius=None):
+    """1-D Gaussian filter.
+
+    Parameters
+    ----------
+    %(input)s
+    sigma : scalar
+        standard deviation for Gaussian kernel
+    %(axis)s
+    order : int, optional
+        An order of 0 corresponds to convolution with a Gaussian
+        kernel. A positive order corresponds to convolution with
+        that derivative of a Gaussian.
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    truncate : float, optional
+        Truncate the filter at this many standard deviations.
+        Default is 4.0.
+    radius : None or int, optional
+        Radius of the Gaussian kernel. If specified, the size of
+        the kernel will be ``2*radius + 1``, and `truncate` is ignored.
+        Default is None.
+
+    Returns
+    -------
+    gaussian_filter1d : ndarray
+
+    Notes
+    -----
+    The Gaussian kernel will have size ``2*radius + 1`` where
+    ``radius = round(truncate * sigma)``.
+
+    Examples
+    --------
+    >>> from scipy.ndimage import gaussian_filter1d
+    >>> import numpy as np
+    >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
+    array([ 1.42704095,  2.06782203,  3.        ,  3.93217797,  4.57295905])
+    >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
+    array([ 2.91948343,  2.95023502,  3.        ,  3.04976498,  3.08051657])
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> x = rng.standard_normal(101).cumsum()
+    >>> y3 = gaussian_filter1d(x, 3)
+    >>> y6 = gaussian_filter1d(x, 6)
+    >>> plt.plot(x, 'k', label='original data')
+    >>> plt.plot(y3, '--', label='filtered, sigma=3')
+    >>> plt.plot(y6, ':', label='filtered, sigma=6')
+    >>> plt.legend()
+    >>> plt.grid()
+    >>> plt.show()
+
+    """
+    sd = float(sigma)
+    # make the radius of the filter equal to truncate standard deviations
+    lw = int(truncate * sd + 0.5)
+    if radius is not None:
+        lw = radius
+    if not isinstance(lw, numbers.Integral) or lw < 0:
+        raise ValueError('Radius must be a nonnegative integer.')
+    # Since we are calling correlate, not convolve, revert the kernel
+    weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
+    return correlate1d(input, weights, axis, output, mode, cval, 0)
+
+
+@_ni_docstrings.docfiller
+def gaussian_filter(input, sigma, order=0, output=None,
+                    mode="reflect", cval=0.0, truncate=4.0, *, radius=None):
+    """Multidimensional Gaussian filter.
+
+    Parameters
+    ----------
+    %(input)s
+    sigma : scalar or sequence of scalars
+        Standard deviation for Gaussian kernel. The standard
+        deviations of the Gaussian filter are given for each axis as a
+        sequence, or as a single number, in which case it is equal for
+        all axes.
+    order : int or sequence of ints, optional
+        The order of the filter along each axis is given as a sequence
+        of integers, or as a single number. An order of 0 corresponds
+        to convolution with a Gaussian kernel. A positive order
+        corresponds to convolution with that derivative of a Gaussian.
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    truncate : float, optional
+        Truncate the filter at this many standard deviations.
+        Default is 4.0.
+    radius : None or int or sequence of ints, optional
+        Radius of the Gaussian kernel. The radius are given for each axis
+        as a sequence, or as a single number, in which case it is equal
+        for all axes. If specified, the size of the kernel along each axis
+        will be ``2*radius + 1``, and `truncate` is ignored.
+        Default is None.
+
+    Returns
+    -------
+    gaussian_filter : ndarray
+        Returned array of same shape as `input`.
+
+    Notes
+    -----
+    The multidimensional filter is implemented as a sequence of
+    1-D convolution filters. The intermediate arrays are
+    stored in the same data type as the output. Therefore, for output
+    types with a limited precision, the results may be imprecise
+    because intermediate results may be stored with insufficient
+    precision.
+
+    The Gaussian kernel will have size ``2*radius + 1`` along each axis
+    where ``radius = round(truncate * sigma)``.
+
+    Examples
+    --------
+    >>> from scipy.ndimage import gaussian_filter
+    >>> import numpy as np
+    >>> a = np.arange(50, step=2).reshape((5,5))
+    >>> a
+    array([[ 0,  2,  4,  6,  8],
+           [10, 12, 14, 16, 18],
+           [20, 22, 24, 26, 28],
+           [30, 32, 34, 36, 38],
+           [40, 42, 44, 46, 48]])
+    >>> gaussian_filter(a, sigma=1)
+    array([[ 4,  6,  8,  9, 11],
+           [10, 12, 14, 15, 17],
+           [20, 22, 24, 25, 27],
+           [29, 31, 33, 34, 36],
+           [35, 37, 39, 40, 42]])
+
+    >>> from scipy import datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = gaussian_filter(ascent, sigma=5)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    output = _ni_support._get_output(output, input)
+    orders = _ni_support._normalize_sequence(order, input.ndim)
+    sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
+    modes = _ni_support._normalize_sequence(mode, input.ndim)
+    radiuses = _ni_support._normalize_sequence(radius, input.ndim)
+    axes = list(range(input.ndim))
+    axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii])
+            for ii in range(len(axes)) if sigmas[ii] > 1e-15]
+    if len(axes) > 0:
+        for axis, sigma, order, mode, radius in axes:
+            gaussian_filter1d(input, sigma, axis, order, output,
+                              mode, cval, truncate, radius=radius)
+            input = output
+    else:
+        output[...] = input[...]
+    return output
+
+
+@_ni_docstrings.docfiller
+def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
+    """Calculate a Prewitt filter.
+
+    Parameters
+    ----------
+    %(input)s
+    %(axis)s
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.prewitt(ascent)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    axis = normalize_axis_index(axis, input.ndim)
+    output = _ni_support._get_output(output, input)
+    modes = _ni_support._normalize_sequence(mode, input.ndim)
+    correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
+    axes = [ii for ii in range(input.ndim) if ii != axis]
+    for ii in axes:
+        correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
+    return output
+
+
+@_ni_docstrings.docfiller
+def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
+    """Calculate a Sobel filter.
+
+    Parameters
+    ----------
+    %(input)s
+    %(axis)s
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.sobel(ascent)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    axis = normalize_axis_index(axis, input.ndim)
+    output = _ni_support._get_output(output, input)
+    modes = _ni_support._normalize_sequence(mode, input.ndim)
+    correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
+    axes = [ii for ii in range(input.ndim) if ii != axis]
+    for ii in axes:
+        correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
+    return output
+
+
+@_ni_docstrings.docfiller
+def generic_laplace(input, derivative2, output=None, mode="reflect",
+                    cval=0.0,
+                    extra_arguments=(),
+                    extra_keywords=None):
+    """
+    N-D Laplace filter using a provided second derivative function.
+
+    Parameters
+    ----------
+    %(input)s
+    derivative2 : callable
+        Callable with the following signature::
+
+            derivative2(input, axis, output, mode, cval,
+                        *extra_arguments, **extra_keywords)
+
+        See `extra_arguments`, `extra_keywords` below.
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    %(extra_keywords)s
+    %(extra_arguments)s
+    """
+    if extra_keywords is None:
+        extra_keywords = {}
+    input = numpy.asarray(input)
+    output = _ni_support._get_output(output, input)
+    axes = list(range(input.ndim))
+    if len(axes) > 0:
+        modes = _ni_support._normalize_sequence(mode, len(axes))
+        derivative2(input, axes[0], output, modes[0], cval,
+                    *extra_arguments, **extra_keywords)
+        for ii in range(1, len(axes)):
+            tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
+                              *extra_arguments, **extra_keywords)
+            output += tmp
+    else:
+        output[...] = input[...]
+    return output
+
+
+@_ni_docstrings.docfiller
+def laplace(input, output=None, mode="reflect", cval=0.0):
+    """N-D Laplace filter based on approximate second derivatives.
+
+    Parameters
+    ----------
+    %(input)s
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.laplace(ascent)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    def derivative2(input, axis, output, mode, cval):
+        return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
+    return generic_laplace(input, derivative2, output, mode, cval)
+
+
+@_ni_docstrings.docfiller
+def gaussian_laplace(input, sigma, output=None, mode="reflect",
+                     cval=0.0, **kwargs):
+    """Multidimensional Laplace filter using Gaussian second derivatives.
+
+    Parameters
+    ----------
+    %(input)s
+    sigma : scalar or sequence of scalars
+        The standard deviations of the Gaussian filter are given for
+        each axis as a sequence, or as a single number, in which case
+        it is equal for all axes.
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    Extra keyword arguments will be passed to gaussian_filter().
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> ascent = datasets.ascent()
+
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+
+    >>> result = ndimage.gaussian_laplace(ascent, sigma=1)
+    >>> ax1.imshow(result)
+
+    >>> result = ndimage.gaussian_laplace(ascent, sigma=3)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+
+    def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
+        order = [0] * input.ndim
+        order[axis] = 2
+        return gaussian_filter(input, sigma, order, output, mode, cval,
+                               **kwargs)
+
+    return generic_laplace(input, derivative2, output, mode, cval,
+                           extra_arguments=(sigma,),
+                           extra_keywords=kwargs)
+
+
+@_ni_docstrings.docfiller
+def generic_gradient_magnitude(input, derivative, output=None,
+                               mode="reflect", cval=0.0,
+                               extra_arguments=(), extra_keywords=None):
+    """Gradient magnitude using a provided gradient function.
+
+    Parameters
+    ----------
+    %(input)s
+    derivative : callable
+        Callable with the following signature::
+
+            derivative(input, axis, output, mode, cval,
+                       *extra_arguments, **extra_keywords)
+
+        See `extra_arguments`, `extra_keywords` below.
+        `derivative` can assume that `input` and `output` are ndarrays.
+        Note that the output from `derivative` is modified inplace;
+        be careful to copy important inputs before returning them.
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    %(extra_keywords)s
+    %(extra_arguments)s
+    """
+    if extra_keywords is None:
+        extra_keywords = {}
+    input = numpy.asarray(input)
+    output = _ni_support._get_output(output, input)
+    axes = list(range(input.ndim))
+    if len(axes) > 0:
+        modes = _ni_support._normalize_sequence(mode, len(axes))
+        derivative(input, axes[0], output, modes[0], cval,
+                   *extra_arguments, **extra_keywords)
+        numpy.multiply(output, output, output)
+        for ii in range(1, len(axes)):
+            tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
+                             *extra_arguments, **extra_keywords)
+            numpy.multiply(tmp, tmp, tmp)
+            output += tmp
+        # This allows the sqrt to work with a different default casting
+        numpy.sqrt(output, output, casting='unsafe')
+    else:
+        output[...] = input[...]
+    return output
+
+
+@_ni_docstrings.docfiller
+def gaussian_gradient_magnitude(input, sigma, output=None,
+                                mode="reflect", cval=0.0, **kwargs):
+    """Multidimensional gradient magnitude using Gaussian derivatives.
+
+    Parameters
+    ----------
+    %(input)s
+    sigma : scalar or sequence of scalars
+        The standard deviations of the Gaussian filter are given for
+        each axis as a sequence, or as a single number, in which case
+        it is equal for all axes.
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    Extra keyword arguments will be passed to gaussian_filter().
+
+    Returns
+    -------
+    gaussian_gradient_magnitude : ndarray
+        Filtered array. Has the same shape as `input`.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+
+    def derivative(input, axis, output, mode, cval, sigma, **kwargs):
+        order = [0] * input.ndim
+        order[axis] = 1
+        return gaussian_filter(input, sigma, order, output, mode,
+                               cval, **kwargs)
+
+    return generic_gradient_magnitude(input, derivative, output, mode,
+                                      cval, extra_arguments=(sigma,),
+                                      extra_keywords=kwargs)
+
+
+def _correlate_or_convolve(input, weights, output, mode, cval, origin,
+                           convolution):
+    input = numpy.asarray(input)
+    weights = numpy.asarray(weights)
+    complex_input = input.dtype.kind == 'c'
+    complex_weights = weights.dtype.kind == 'c'
+    if complex_input or complex_weights:
+        if complex_weights and not convolution:
+            # As for numpy.correlate, conjugate weights rather than input.
+            weights = weights.conj()
+        kwargs = dict(
+            mode=mode, origin=origin, convolution=convolution
+        )
+        output = _ni_support._get_output(output, input, complex_output=True)
+
+        return _complex_via_real_components(_correlate_or_convolve, input,
+                                            weights, output, cval, **kwargs)
+
+    origins = _ni_support._normalize_sequence(origin, input.ndim)
+    weights = numpy.asarray(weights, dtype=numpy.float64)
+    wshape = [ii for ii in weights.shape if ii > 0]
+    if len(wshape) != input.ndim:
+        raise RuntimeError('filter weights array has incorrect shape.')
+    if convolution:
+        weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
+        for ii in range(len(origins)):
+            origins[ii] = -origins[ii]
+            if not weights.shape[ii] & 1:
+                origins[ii] -= 1
+    for origin, lenw in zip(origins, wshape):
+        if _invalid_origin(origin, lenw):
+            raise ValueError('Invalid origin; origin must satisfy '
+                             '-(weights.shape[k] // 2) <= origin[k] <= '
+                             '(weights.shape[k]-1) // 2')
+
+    if not weights.flags.contiguous:
+        weights = weights.copy()
+    output = _ni_support._get_output(output, input)
+    temp_needed = numpy.may_share_memory(input, output)
+    if temp_needed:
+        # input and output arrays cannot share memory
+        temp = output
+        output = _ni_support._get_output(output.dtype, input)
+    if not isinstance(mode, str) and isinstance(mode, Iterable):
+        raise RuntimeError("A sequence of modes is not supported")
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.correlate(input, weights, output, mode, cval, origins)
+    if temp_needed:
+        temp[...] = output
+        output = temp
+    return output
+
+
+@_ni_docstrings.docfiller
+def correlate(input, weights, output=None, mode='reflect', cval=0.0,
+              origin=0):
+    """
+    Multidimensional correlation.
+
+    The array is correlated with the given kernel.
+
+    Parameters
+    ----------
+    %(input)s
+    weights : ndarray
+        array of weights, same number of dimensions as input
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin_multiple)s
+
+    Returns
+    -------
+    result : ndarray
+        The result of correlation of `input` with `weights`.
+
+    See Also
+    --------
+    convolve : Convolve an image with a kernel.
+
+    Examples
+    --------
+    Correlation is the process of moving a filter mask often referred to
+    as kernel over the image and computing the sum of products at each location.
+
+    >>> from scipy.ndimage import correlate
+    >>> import numpy as np
+    >>> input_img = np.arange(25).reshape(5,5)
+    >>> print(input_img)
+    [[ 0  1  2  3  4]
+    [ 5  6  7  8  9]
+    [10 11 12 13 14]
+    [15 16 17 18 19]
+    [20 21 22 23 24]]
+
+    Define a kernel (weights) for correlation. In this example, it is for sum of
+    center and up, down, left and right next elements.
+
+    >>> weights = [[0, 1, 0],
+    ...            [1, 1, 1],
+    ...            [0, 1, 0]]
+
+    We can calculate a correlation result:
+    For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
+
+    >>> correlate(input_img, weights)
+    array([[  6,  10,  15,  20,  24],
+        [ 26,  30,  35,  40,  44],
+        [ 51,  55,  60,  65,  69],
+        [ 76,  80,  85,  90,  94],
+        [ 96, 100, 105, 110, 114]])
+
+    """
+    return _correlate_or_convolve(input, weights, output, mode, cval,
+                                  origin, False)
+
+
+@_ni_docstrings.docfiller
+def convolve(input, weights, output=None, mode='reflect', cval=0.0,
+             origin=0):
+    """
+    Multidimensional convolution.
+
+    The array is convolved with the given kernel.
+
+    Parameters
+    ----------
+    %(input)s
+    weights : array_like
+        Array of weights, same number of dimensions as input
+    %(output)s
+    %(mode_reflect)s
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'. Default
+        is 0.0
+    origin : int, optional
+        Controls the origin of the input signal, which is where the
+        filter is centered to produce the first element of the output.
+        Positive values shift the filter to the right, and negative values
+        shift the filter to the left. Default is 0.
+
+    Returns
+    -------
+    result : ndarray
+        The result of convolution of `input` with `weights`.
+
+    See Also
+    --------
+    correlate : Correlate an image with a kernel.
+
+    Notes
+    -----
+    Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
+    W is the `weights` kernel,
+    j is the N-D spatial index over :math:`W`,
+    I is the `input` and k is the coordinate of the center of
+    W, specified by `origin` in the input parameters.
+
+    Examples
+    --------
+    Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
+    because in this case borders (i.e., where the `weights` kernel, centered
+    on any one value, extends beyond an edge of `input`) are treated as zeros.
+
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
+    >>> from scipy import ndimage
+    >>> ndimage.convolve(a, k, mode='constant', cval=0.0)
+    array([[11, 10,  7,  4],
+           [10,  3, 11, 11],
+           [15, 12, 14,  7],
+           [12,  3,  7,  0]])
+
+    Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
+    with 1.0's (and then extracting only the original region of the result).
+
+    >>> ndimage.convolve(a, k, mode='constant', cval=1.0)
+    array([[13, 11,  8,  7],
+           [11,  3, 11, 14],
+           [16, 12, 14, 10],
+           [15,  6, 10,  5]])
+
+    With ``mode='reflect'`` (the default), outer values are reflected at the
+    edge of `input` to fill in missing values.
+
+    >>> b = np.array([[2, 0, 0],
+    ...               [1, 0, 0],
+    ...               [0, 0, 0]])
+    >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
+    >>> ndimage.convolve(b, k, mode='reflect')
+    array([[5, 0, 0],
+           [3, 0, 0],
+           [1, 0, 0]])
+
+    This includes diagonally at the corners.
+
+    >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
+    >>> ndimage.convolve(b, k)
+    array([[4, 2, 0],
+           [3, 2, 0],
+           [1, 1, 0]])
+
+    With ``mode='nearest'``, the single nearest value in to an edge in
+    `input` is repeated as many times as needed to match the overlapping
+    `weights`.
+
+    >>> c = np.array([[2, 0, 1],
+    ...               [1, 0, 0],
+    ...               [0, 0, 0]])
+    >>> k = np.array([[0, 1, 0],
+    ...               [0, 1, 0],
+    ...               [0, 1, 0],
+    ...               [0, 1, 0],
+    ...               [0, 1, 0]])
+    >>> ndimage.convolve(c, k, mode='nearest')
+    array([[7, 0, 3],
+           [5, 0, 2],
+           [3, 0, 1]])
+
+    """
+    return _correlate_or_convolve(input, weights, output, mode, cval,
+                                  origin, True)
+
+
+@_ni_docstrings.docfiller
+def uniform_filter1d(input, size, axis=-1, output=None,
+                     mode="reflect", cval=0.0, origin=0):
+    """Calculate a 1-D uniform filter along the given axis.
+
+    The lines of the array along the given axis are filtered with a
+    uniform filter of given size.
+
+    Parameters
+    ----------
+    %(input)s
+    size : int
+        length of uniform filter
+    %(axis)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin)s
+
+    Examples
+    --------
+    >>> from scipy.ndimage import uniform_filter1d
+    >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
+    array([4, 3, 4, 1, 4, 6, 6, 3])
+    """
+    input = numpy.asarray(input)
+    axis = normalize_axis_index(axis, input.ndim)
+    if size < 1:
+        raise RuntimeError('incorrect filter size')
+    complex_output = input.dtype.kind == 'c'
+    output = _ni_support._get_output(output, input,
+                                     complex_output=complex_output)
+    if (size // 2 + origin < 0) or (size // 2 + origin >= size):
+        raise ValueError('invalid origin')
+    mode = _ni_support._extend_mode_to_code(mode)
+    if not complex_output:
+        _nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
+                                   origin)
+    else:
+        _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
+                                   numpy.real(cval), origin)
+        _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
+                                   numpy.imag(cval), origin)
+    return output
+
+
+@_ni_docstrings.docfiller
+def uniform_filter(input, size=3, output=None, mode="reflect",
+                   cval=0.0, origin=0):
+    """Multidimensional uniform filter.
+
+    Parameters
+    ----------
+    %(input)s
+    size : int or sequence of ints, optional
+        The sizes of the uniform filter are given for each axis as a
+        sequence, or as a single number, in which case the size is
+        equal for all axes.
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    %(origin_multiple)s
+
+    Returns
+    -------
+    uniform_filter : ndarray
+        Filtered array. Has the same shape as `input`.
+
+    Notes
+    -----
+    The multidimensional filter is implemented as a sequence of
+    1-D uniform filters. The intermediate arrays are stored
+    in the same data type as the output. Therefore, for output types
+    with a limited precision, the results may be imprecise because
+    intermediate results may be stored with insufficient precision.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.uniform_filter(ascent, size=20)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    output = _ni_support._get_output(output, input,
+                                     complex_output=input.dtype.kind == 'c')
+    sizes = _ni_support._normalize_sequence(size, input.ndim)
+    origins = _ni_support._normalize_sequence(origin, input.ndim)
+    modes = _ni_support._normalize_sequence(mode, input.ndim)
+    axes = list(range(input.ndim))
+    axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
+            for ii in range(len(axes)) if sizes[ii] > 1]
+    if len(axes) > 0:
+        for axis, size, origin, mode in axes:
+            uniform_filter1d(input, int(size), axis, output, mode,
+                             cval, origin)
+            input = output
+    else:
+        output[...] = input[...]
+    return output
+
+
+@_ni_docstrings.docfiller
+def minimum_filter1d(input, size, axis=-1, output=None,
+                     mode="reflect", cval=0.0, origin=0):
+    """Calculate a 1-D minimum filter along the given axis.
+
+    The lines of the array along the given axis are filtered with a
+    minimum filter of given size.
+
+    Parameters
+    ----------
+    %(input)s
+    size : int
+        length along which to calculate 1D minimum
+    %(axis)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin)s
+
+    Notes
+    -----
+    This function implements the MINLIST algorithm [1]_, as described by
+    Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
+    the `input` length, regardless of filter size.
+
+    References
+    ----------
+    .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
+    .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
+
+
+    Examples
+    --------
+    >>> from scipy.ndimage import minimum_filter1d
+    >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
+    array([2, 0, 0, 0, 1, 1, 0, 0])
+    """
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    axis = normalize_axis_index(axis, input.ndim)
+    if size < 1:
+        raise RuntimeError('incorrect filter size')
+    output = _ni_support._get_output(output, input)
+    if (size // 2 + origin < 0) or (size // 2 + origin >= size):
+        raise ValueError('invalid origin')
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
+                                  origin, 1)
+    return output
+
+
+@_ni_docstrings.docfiller
+def maximum_filter1d(input, size, axis=-1, output=None,
+                     mode="reflect", cval=0.0, origin=0):
+    """Calculate a 1-D maximum filter along the given axis.
+
+    The lines of the array along the given axis are filtered with a
+    maximum filter of given size.
+
+    Parameters
+    ----------
+    %(input)s
+    size : int
+        Length along which to calculate the 1-D maximum.
+    %(axis)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin)s
+
+    Returns
+    -------
+    maximum1d : ndarray, None
+        Maximum-filtered array with same shape as input.
+        None if `output` is not None
+
+    Notes
+    -----
+    This function implements the MAXLIST algorithm [1]_, as described by
+    Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
+    the `input` length, regardless of filter size.
+
+    References
+    ----------
+    .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
+    .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
+
+    Examples
+    --------
+    >>> from scipy.ndimage import maximum_filter1d
+    >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
+    array([8, 8, 8, 4, 9, 9, 9, 9])
+    """
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    axis = normalize_axis_index(axis, input.ndim)
+    if size < 1:
+        raise RuntimeError('incorrect filter size')
+    output = _ni_support._get_output(output, input)
+    if (size // 2 + origin < 0) or (size // 2 + origin >= size):
+        raise ValueError('invalid origin')
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
+                                  origin, 0)
+    return output
+
+
+def _min_or_max_filter(input, size, footprint, structure, output, mode,
+                       cval, origin, minimum):
+    if (size is not None) and (footprint is not None):
+        warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
+    if structure is None:
+        if footprint is None:
+            if size is None:
+                raise RuntimeError("no footprint provided")
+            separable = True
+        else:
+            footprint = numpy.asarray(footprint, dtype=bool)
+            if not footprint.any():
+                raise ValueError("All-zero footprint is not supported.")
+            if footprint.all():
+                size = footprint.shape
+                footprint = None
+                separable = True
+            else:
+                separable = False
+    else:
+        structure = numpy.asarray(structure, dtype=numpy.float64)
+        separable = False
+        if footprint is None:
+            footprint = numpy.ones(structure.shape, bool)
+        else:
+            footprint = numpy.asarray(footprint, dtype=bool)
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    output = _ni_support._get_output(output, input)
+    temp_needed = numpy.may_share_memory(input, output)
+    if temp_needed:
+        # input and output arrays cannot share memory
+        temp = output
+        output = _ni_support._get_output(output.dtype, input)
+    origins = _ni_support._normalize_sequence(origin, input.ndim)
+    if separable:
+        sizes = _ni_support._normalize_sequence(size, input.ndim)
+        modes = _ni_support._normalize_sequence(mode, input.ndim)
+        axes = list(range(input.ndim))
+        axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
+                for ii in range(len(axes)) if sizes[ii] > 1]
+        if minimum:
+            filter_ = minimum_filter1d
+        else:
+            filter_ = maximum_filter1d
+        if len(axes) > 0:
+            for axis, size, origin, mode in axes:
+                filter_(input, int(size), axis, output, mode, cval, origin)
+                input = output
+        else:
+            output[...] = input[...]
+    else:
+        fshape = [ii for ii in footprint.shape if ii > 0]
+        if len(fshape) != input.ndim:
+            raise RuntimeError('footprint array has incorrect shape.')
+        for origin, lenf in zip(origins, fshape):
+            if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
+                raise ValueError('invalid origin')
+        if not footprint.flags.contiguous:
+            footprint = footprint.copy()
+        if structure is not None:
+            if len(structure.shape) != input.ndim:
+                raise RuntimeError('structure array has incorrect shape')
+            if not structure.flags.contiguous:
+                structure = structure.copy()
+        if not isinstance(mode, str) and isinstance(mode, Iterable):
+            raise RuntimeError(
+                "A sequence of modes is not supported for non-separable "
+                "footprints")
+        mode = _ni_support._extend_mode_to_code(mode)
+        _nd_image.min_or_max_filter(input, footprint, structure, output,
+                                    mode, cval, origins, minimum)
+    if temp_needed:
+        temp[...] = output
+        output = temp
+    return output
+
+
+@_ni_docstrings.docfiller
+def minimum_filter(input, size=None, footprint=None, output=None,
+                   mode="reflect", cval=0.0, origin=0):
+    """Calculate a multidimensional minimum filter.
+
+    Parameters
+    ----------
+    %(input)s
+    %(size_foot)s
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    %(origin_multiple)s
+
+    Returns
+    -------
+    minimum_filter : ndarray
+        Filtered array. Has the same shape as `input`.
+
+    Notes
+    -----
+    A sequence of modes (one per axis) is only supported when the footprint is
+    separable. Otherwise, a single mode string must be provided.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.minimum_filter(ascent, size=20)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    return _min_or_max_filter(input, size, footprint, None, output, mode,
+                              cval, origin, 1)
+
+
+@_ni_docstrings.docfiller
+def maximum_filter(input, size=None, footprint=None, output=None,
+                   mode="reflect", cval=0.0, origin=0):
+    """Calculate a multidimensional maximum filter.
+
+    Parameters
+    ----------
+    %(input)s
+    %(size_foot)s
+    %(output)s
+    %(mode_multiple)s
+    %(cval)s
+    %(origin_multiple)s
+
+    Returns
+    -------
+    maximum_filter : ndarray
+        Filtered array. Has the same shape as `input`.
+
+    Notes
+    -----
+    A sequence of modes (one per axis) is only supported when the footprint is
+    separable. Otherwise, a single mode string must be provided.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.maximum_filter(ascent, size=20)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    return _min_or_max_filter(input, size, footprint, None, output, mode,
+                              cval, origin, 0)
+
+
+@_ni_docstrings.docfiller
+def _rank_filter(input, rank, size=None, footprint=None, output=None,
+                 mode="reflect", cval=0.0, origin=0, operation='rank'):
+    if (size is not None) and (footprint is not None):
+        warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    origins = _ni_support._normalize_sequence(origin, input.ndim)
+    if footprint is None:
+        if size is None:
+            raise RuntimeError("no footprint or filter size provided")
+        sizes = _ni_support._normalize_sequence(size, input.ndim)
+        footprint = numpy.ones(sizes, dtype=bool)
+    else:
+        footprint = numpy.asarray(footprint, dtype=bool)
+    fshape = [ii for ii in footprint.shape if ii > 0]
+    if len(fshape) != input.ndim:
+        raise RuntimeError('filter footprint array has incorrect shape.')
+    for origin, lenf in zip(origins, fshape):
+        if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
+            raise ValueError('invalid origin')
+    if not footprint.flags.contiguous:
+        footprint = footprint.copy()
+    filter_size = numpy.where(footprint, 1, 0).sum()
+    if operation == 'median':
+        rank = filter_size // 2
+    elif operation == 'percentile':
+        percentile = rank
+        if percentile < 0.0:
+            percentile += 100.0
+        if percentile < 0 or percentile > 100:
+            raise RuntimeError('invalid percentile')
+        if percentile == 100.0:
+            rank = filter_size - 1
+        else:
+            rank = int(float(filter_size) * percentile / 100.0)
+    if rank < 0:
+        rank += filter_size
+    if rank < 0 or rank >= filter_size:
+        raise RuntimeError('rank not within filter footprint size')
+    if rank == 0:
+        return minimum_filter(input, None, footprint, output, mode, cval,
+                              origins)
+    elif rank == filter_size - 1:
+        return maximum_filter(input, None, footprint, output, mode, cval,
+                              origins)
+    else:
+        output = _ni_support._get_output(output, input)
+        temp_needed = numpy.may_share_memory(input, output)
+        if temp_needed:
+            # input and output arrays cannot share memory
+            temp = output
+            output = _ni_support._get_output(output.dtype, input)
+        if not isinstance(mode, str) and isinstance(mode, Iterable):
+            raise RuntimeError(
+                "A sequence of modes is not supported by non-separable rank "
+                "filters")
+        mode = _ni_support._extend_mode_to_code(mode)
+        _nd_image.rank_filter(input, rank, footprint, output, mode, cval,
+                              origins)
+        if temp_needed:
+            temp[...] = output
+            output = temp
+        return output
+
+
+@_ni_docstrings.docfiller
+def rank_filter(input, rank, size=None, footprint=None, output=None,
+                mode="reflect", cval=0.0, origin=0):
+    """Calculate a multidimensional rank filter.
+
+    Parameters
+    ----------
+    %(input)s
+    rank : int
+        The rank parameter may be less than zero, i.e., rank = -1
+        indicates the largest element.
+    %(size_foot)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin_multiple)s
+
+    Returns
+    -------
+    rank_filter : ndarray
+        Filtered array. Has the same shape as `input`.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.rank_filter(ascent, rank=42, size=20)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    rank = operator.index(rank)
+    return _rank_filter(input, rank, size, footprint, output, mode, cval,
+                        origin, 'rank')
+
+
+@_ni_docstrings.docfiller
+def median_filter(input, size=None, footprint=None, output=None,
+                  mode="reflect", cval=0.0, origin=0):
+    """
+    Calculate a multidimensional median filter.
+
+    Parameters
+    ----------
+    %(input)s
+    %(size_foot)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin_multiple)s
+
+    Returns
+    -------
+    median_filter : ndarray
+        Filtered array. Has the same shape as `input`.
+
+    See Also
+    --------
+    scipy.signal.medfilt2d
+
+    Notes
+    -----
+    For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
+    the specialised function `scipy.signal.medfilt2d` may be faster. It is
+    however limited to constant mode with ``cval=0``.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.median_filter(ascent, size=20)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    return _rank_filter(input, 0, size, footprint, output, mode, cval,
+                        origin, 'median')
+
+
+@_ni_docstrings.docfiller
+def percentile_filter(input, percentile, size=None, footprint=None,
+                      output=None, mode="reflect", cval=0.0, origin=0):
+    """Calculate a multidimensional percentile filter.
+
+    Parameters
+    ----------
+    %(input)s
+    percentile : scalar
+        The percentile parameter may be less than zero, i.e.,
+        percentile = -20 equals percentile = 80
+    %(size_foot)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin_multiple)s
+
+    Returns
+    -------
+    percentile_filter : ndarray
+        Filtered array. Has the same shape as `input`.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result)
+    >>> plt.show()
+    """
+    return _rank_filter(input, percentile, size, footprint, output, mode,
+                        cval, origin, 'percentile')
+
+
+@_ni_docstrings.docfiller
+def generic_filter1d(input, function, filter_size, axis=-1,
+                     output=None, mode="reflect", cval=0.0, origin=0,
+                     extra_arguments=(), extra_keywords=None):
+    """Calculate a 1-D filter along the given axis.
+
+    `generic_filter1d` iterates over the lines of the array, calling the
+    given function at each line. The arguments of the line are the
+    input line, and the output line. The input and output lines are 1-D
+    double arrays. The input line is extended appropriately according
+    to the filter size and origin. The output line must be modified
+    in-place with the result.
+
+    Parameters
+    ----------
+    %(input)s
+    function : {callable, scipy.LowLevelCallable}
+        Function to apply along given axis.
+    filter_size : scalar
+        Length of the filter.
+    %(axis)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin)s
+    %(extra_arguments)s
+    %(extra_keywords)s
+
+    Notes
+    -----
+    This function also accepts low-level callback functions with one of
+    the following signatures and wrapped in `scipy.LowLevelCallable`:
+
+    .. code:: c
+
+       int function(double *input_line, npy_intp input_length,
+                    double *output_line, npy_intp output_length,
+                    void *user_data)
+       int function(double *input_line, intptr_t input_length,
+                    double *output_line, intptr_t output_length,
+                    void *user_data)
+
+    The calling function iterates over the lines of the input and output
+    arrays, calling the callback function at each line. The current line
+    is extended according to the border conditions set by the calling
+    function, and the result is copied into the array that is passed
+    through ``input_line``. The length of the input line (after extension)
+    is passed through ``input_length``. The callback function should apply
+    the filter and store the result in the array passed through
+    ``output_line``. The length of the output line is passed through
+    ``output_length``. ``user_data`` is the data pointer provided
+    to `scipy.LowLevelCallable` as-is.
+
+    The callback function must return an integer error status that is zero
+    if something went wrong and one otherwise. If an error occurs, you should
+    normally set the python error status with an informative message
+    before returning, otherwise a default error message is set by the
+    calling function.
+
+    In addition, some other low-level function pointer specifications
+    are accepted, but these are for backward compatibility only and should
+    not be used in new code.
+
+    """
+    if extra_keywords is None:
+        extra_keywords = {}
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    output = _ni_support._get_output(output, input)
+    if filter_size < 1:
+        raise RuntimeError('invalid filter size')
+    axis = normalize_axis_index(axis, input.ndim)
+    if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
+                                           filter_size):
+        raise ValueError('invalid origin')
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.generic_filter1d(input, function, filter_size, axis, output,
+                               mode, cval, origin, extra_arguments,
+                               extra_keywords)
+    return output
+
+
+@_ni_docstrings.docfiller
+def generic_filter(input, function, size=None, footprint=None,
+                   output=None, mode="reflect", cval=0.0, origin=0,
+                   extra_arguments=(), extra_keywords=None):
+    """Calculate a multidimensional filter using the given function.
+
+    At each element the provided function is called. The input values
+    within the filter footprint at that element are passed to the function
+    as a 1-D array of double values.
+
+    Parameters
+    ----------
+    %(input)s
+    function : {callable, scipy.LowLevelCallable}
+        Function to apply at each element.
+    %(size_foot)s
+    %(output)s
+    %(mode_reflect)s
+    %(cval)s
+    %(origin_multiple)s
+    %(extra_arguments)s
+    %(extra_keywords)s
+
+    Notes
+    -----
+    This function also accepts low-level callback functions with one of
+    the following signatures and wrapped in `scipy.LowLevelCallable`:
+
+    .. code:: c
+
+       int callback(double *buffer, npy_intp filter_size,
+                    double *return_value, void *user_data)
+       int callback(double *buffer, intptr_t filter_size,
+                    double *return_value, void *user_data)
+
+    The calling function iterates over the elements of the input and
+    output arrays, calling the callback function at each element. The
+    elements within the footprint of the filter at the current element are
+    passed through the ``buffer`` parameter, and the number of elements
+    within the footprint through ``filter_size``. The calculated value is
+    returned in ``return_value``. ``user_data`` is the data pointer provided
+    to `scipy.LowLevelCallable` as-is.
+
+    The callback function must return an integer error status that is zero
+    if something went wrong and one otherwise. If an error occurs, you should
+    normally set the python error status with an informative message
+    before returning, otherwise a default error message is set by the
+    calling function.
+
+    In addition, some other low-level function pointer specifications
+    are accepted, but these are for backward compatibility only and should
+    not be used in new code.
+
+    """
+    if (size is not None) and (footprint is not None):
+        warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
+    if extra_keywords is None:
+        extra_keywords = {}
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    origins = _ni_support._normalize_sequence(origin, input.ndim)
+    if footprint is None:
+        if size is None:
+            raise RuntimeError("no footprint or filter size provided")
+        sizes = _ni_support._normalize_sequence(size, input.ndim)
+        footprint = numpy.ones(sizes, dtype=bool)
+    else:
+        footprint = numpy.asarray(footprint, dtype=bool)
+    fshape = [ii for ii in footprint.shape if ii > 0]
+    if len(fshape) != input.ndim:
+        raise RuntimeError('filter footprint array has incorrect shape.')
+    for origin, lenf in zip(origins, fshape):
+        if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
+            raise ValueError('invalid origin')
+    if not footprint.flags.contiguous:
+        footprint = footprint.copy()
+    output = _ni_support._get_output(output, input)
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.generic_filter(input, function, footprint, output, mode,
+                             cval, origins, extra_arguments, extra_keywords)
+    return output
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/_fourier.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_fourier.py
new file mode 100644
index 00000000..79578611
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_fourier.py
@@ -0,0 +1,307 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import numpy
+from numpy.core.multiarray import normalize_axis_index
+from . import _ni_support
+from . import _nd_image
+
+__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
+           'fourier_shift']
+
+
+def _get_output_fourier(output, input):
+    if output is None:
+        if input.dtype.type in [numpy.complex64, numpy.complex128,
+                                numpy.float32]:
+            output = numpy.zeros(input.shape, dtype=input.dtype)
+        else:
+            output = numpy.zeros(input.shape, dtype=numpy.float64)
+    elif type(output) is type:
+        if output not in [numpy.complex64, numpy.complex128,
+                          numpy.float32, numpy.float64]:
+            raise RuntimeError("output type not supported")
+        output = numpy.zeros(input.shape, dtype=output)
+    elif output.shape != input.shape:
+        raise RuntimeError("output shape not correct")
+    return output
+
+
+def _get_output_fourier_complex(output, input):
+    if output is None:
+        if input.dtype.type in [numpy.complex64, numpy.complex128]:
+            output = numpy.zeros(input.shape, dtype=input.dtype)
+        else:
+            output = numpy.zeros(input.shape, dtype=numpy.complex128)
+    elif type(output) is type:
+        if output not in [numpy.complex64, numpy.complex128]:
+            raise RuntimeError("output type not supported")
+        output = numpy.zeros(input.shape, dtype=output)
+    elif output.shape != input.shape:
+        raise RuntimeError("output shape not correct")
+    return output
+
+
+def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
+    """
+    Multidimensional Gaussian fourier filter.
+
+    The array is multiplied with the fourier transform of a Gaussian
+    kernel.
+
+    Parameters
+    ----------
+    input : array_like
+        The input array.
+    sigma : float or sequence
+        The sigma of the Gaussian kernel. If a float, `sigma` is the same for
+        all axes. If a sequence, `sigma` has to contain one value for each
+        axis.
+    n : int, optional
+        If `n` is negative (default), then the input is assumed to be the
+        result of a complex fft.
+        If `n` is larger than or equal to zero, the input is assumed to be the
+        result of a real fft, and `n` gives the length of the array before
+        transformation along the real transform direction.
+    axis : int, optional
+        The axis of the real transform.
+    output : ndarray, optional
+        If given, the result of filtering the input is placed in this array.
+
+    Returns
+    -------
+    fourier_gaussian : ndarray
+        The filtered input.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import numpy.fft
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ascent = datasets.ascent()
+    >>> input_ = numpy.fft.fft2(ascent)
+    >>> result = ndimage.fourier_gaussian(input_, sigma=4)
+    >>> result = numpy.fft.ifft2(result)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result.real)  # the imaginary part is an artifact
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    output = _get_output_fourier(output, input)
+    axis = normalize_axis_index(axis, input.ndim)
+    sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
+    sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
+    if not sigmas.flags.contiguous:
+        sigmas = sigmas.copy()
+
+    _nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
+    return output
+
+
+def fourier_uniform(input, size, n=-1, axis=-1, output=None):
+    """
+    Multidimensional uniform fourier filter.
+
+    The array is multiplied with the Fourier transform of a box of given
+    size.
+
+    Parameters
+    ----------
+    input : array_like
+        The input array.
+    size : float or sequence
+        The size of the box used for filtering.
+        If a float, `size` is the same for all axes. If a sequence, `size` has
+        to contain one value for each axis.
+    n : int, optional
+        If `n` is negative (default), then the input is assumed to be the
+        result of a complex fft.
+        If `n` is larger than or equal to zero, the input is assumed to be the
+        result of a real fft, and `n` gives the length of the array before
+        transformation along the real transform direction.
+    axis : int, optional
+        The axis of the real transform.
+    output : ndarray, optional
+        If given, the result of filtering the input is placed in this array.
+
+    Returns
+    -------
+    fourier_uniform : ndarray
+        The filtered input.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import numpy.fft
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ascent = datasets.ascent()
+    >>> input_ = numpy.fft.fft2(ascent)
+    >>> result = ndimage.fourier_uniform(input_, size=20)
+    >>> result = numpy.fft.ifft2(result)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result.real)  # the imaginary part is an artifact
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    output = _get_output_fourier(output, input)
+    axis = normalize_axis_index(axis, input.ndim)
+    sizes = _ni_support._normalize_sequence(size, input.ndim)
+    sizes = numpy.asarray(sizes, dtype=numpy.float64)
+    if not sizes.flags.contiguous:
+        sizes = sizes.copy()
+    _nd_image.fourier_filter(input, sizes, n, axis, output, 1)
+    return output
+
+
+def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
+    """
+    Multidimensional ellipsoid Fourier filter.
+
+    The array is multiplied with the fourier transform of an ellipsoid of
+    given sizes.
+
+    Parameters
+    ----------
+    input : array_like
+        The input array.
+    size : float or sequence
+        The size of the box used for filtering.
+        If a float, `size` is the same for all axes. If a sequence, `size` has
+        to contain one value for each axis.
+    n : int, optional
+        If `n` is negative (default), then the input is assumed to be the
+        result of a complex fft.
+        If `n` is larger than or equal to zero, the input is assumed to be the
+        result of a real fft, and `n` gives the length of the array before
+        transformation along the real transform direction.
+    axis : int, optional
+        The axis of the real transform.
+    output : ndarray, optional
+        If given, the result of filtering the input is placed in this array.
+
+    Returns
+    -------
+    fourier_ellipsoid : ndarray
+        The filtered input.
+
+    Notes
+    -----
+    This function is implemented for arrays of rank 1, 2, or 3.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import numpy.fft
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ascent = datasets.ascent()
+    >>> input_ = numpy.fft.fft2(ascent)
+    >>> result = ndimage.fourier_ellipsoid(input_, size=20)
+    >>> result = numpy.fft.ifft2(result)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result.real)  # the imaginary part is an artifact
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    if input.ndim > 3:
+        raise NotImplementedError("Only 1d, 2d and 3d inputs are supported")
+    output = _get_output_fourier(output, input)
+    if output.size == 0:
+        # The C code has a bug that can result in a segfault with arrays
+        # that have size 0 (gh-17270), so check here.
+        return output
+    axis = normalize_axis_index(axis, input.ndim)
+    sizes = _ni_support._normalize_sequence(size, input.ndim)
+    sizes = numpy.asarray(sizes, dtype=numpy.float64)
+    if not sizes.flags.contiguous:
+        sizes = sizes.copy()
+    _nd_image.fourier_filter(input, sizes, n, axis, output, 2)
+    return output
+
+
+def fourier_shift(input, shift, n=-1, axis=-1, output=None):
+    """
+    Multidimensional Fourier shift filter.
+
+    The array is multiplied with the Fourier transform of a shift operation.
+
+    Parameters
+    ----------
+    input : array_like
+        The input array.
+    shift : float or sequence
+        The size of the box used for filtering.
+        If a float, `shift` is the same for all axes. If a sequence, `shift`
+        has to contain one value for each axis.
+    n : int, optional
+        If `n` is negative (default), then the input is assumed to be the
+        result of a complex fft.
+        If `n` is larger than or equal to zero, the input is assumed to be the
+        result of a real fft, and `n` gives the length of the array before
+        transformation along the real transform direction.
+    axis : int, optional
+        The axis of the real transform.
+    output : ndarray, optional
+        If given, the result of shifting the input is placed in this array.
+
+    Returns
+    -------
+    fourier_shift : ndarray
+        The shifted input.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy.fft
+    >>> fig, (ax1, ax2) = plt.subplots(1, 2)
+    >>> plt.gray()  # show the filtered result in grayscale
+    >>> ascent = datasets.ascent()
+    >>> input_ = numpy.fft.fft2(ascent)
+    >>> result = ndimage.fourier_shift(input_, shift=200)
+    >>> result = numpy.fft.ifft2(result)
+    >>> ax1.imshow(ascent)
+    >>> ax2.imshow(result.real)  # the imaginary part is an artifact
+    >>> plt.show()
+    """
+    input = numpy.asarray(input)
+    output = _get_output_fourier_complex(output, input)
+    axis = normalize_axis_index(axis, input.ndim)
+    shifts = _ni_support._normalize_sequence(shift, input.ndim)
+    shifts = numpy.asarray(shifts, dtype=numpy.float64)
+    if not shifts.flags.contiguous:
+        shifts = shifts.copy()
+    _nd_image.fourier_shift(input, shifts, n, axis, output)
+    return output
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/_interpolation.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_interpolation.py
new file mode 100644
index 00000000..7ac7cf23
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_interpolation.py
@@ -0,0 +1,960 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import itertools
+import warnings
+
+import numpy
+from numpy.core.multiarray import normalize_axis_index
+
+from scipy import special
+from . import _ni_support
+from . import _nd_image
+from ._ni_docstrings import docfiller
+
+
+__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
+           'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
+
+
+@docfiller
+def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
+                    mode='mirror'):
+    """
+    Calculate a 1-D spline filter along the given axis.
+
+    The lines of the array along the given axis are filtered by a
+    spline filter. The order of the spline must be >= 2 and <= 5.
+
+    Parameters
+    ----------
+    %(input)s
+    order : int, optional
+        The order of the spline, default is 3.
+    axis : int, optional
+        The axis along which the spline filter is applied. Default is the last
+        axis.
+    output : ndarray or dtype, optional
+        The array in which to place the output, or the dtype of the returned
+        array. Default is ``numpy.float64``.
+    %(mode_interp_mirror)s
+
+    Returns
+    -------
+    spline_filter1d : ndarray
+        The filtered input.
+
+    Notes
+    -----
+    All of the interpolation functions in `ndimage` do spline interpolation of
+    the input image. If using B-splines of `order > 1`, the input image
+    values have to be converted to B-spline coefficients first, which is
+    done by applying this 1-D filter sequentially along all
+    axes of the input. All functions that require B-spline coefficients
+    will automatically filter their inputs, a behavior controllable with
+    the `prefilter` keyword argument. For functions that accept a `mode`
+    parameter, the result will only be correct if it matches the `mode`
+    used when filtering.
+
+    For complex-valued `input`, this function processes the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    See Also
+    --------
+    spline_filter : Multidimensional spline filter.
+
+    Examples
+    --------
+    We can filter an image using 1-D spline along the given axis:
+
+    >>> from scipy.ndimage import spline_filter1d
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> orig_img = np.eye(20)  # create an image
+    >>> orig_img[10, :] = 1.0
+    >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
+    >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
+    >>> f, ax = plt.subplots(1, 3, sharex=True)
+    >>> for ind, data in enumerate([[orig_img, "original image"],
+    ...             [sp_filter_axis_0, "spline filter (axis=0)"],
+    ...             [sp_filter_axis_1, "spline filter (axis=1)"]]):
+    ...     ax[ind].imshow(data[0], cmap='gray_r')
+    ...     ax[ind].set_title(data[1])
+    >>> plt.tight_layout()
+    >>> plt.show()
+
+    """
+    if order < 0 or order > 5:
+        raise RuntimeError('spline order not supported')
+    input = numpy.asarray(input)
+    complex_output = numpy.iscomplexobj(input)
+    output = _ni_support._get_output(output, input,
+                                     complex_output=complex_output)
+    if complex_output:
+        spline_filter1d(input.real, order, axis, output.real, mode)
+        spline_filter1d(input.imag, order, axis, output.imag, mode)
+        return output
+    if order in [0, 1]:
+        output[...] = numpy.array(input)
+    else:
+        mode = _ni_support._extend_mode_to_code(mode)
+        axis = normalize_axis_index(axis, input.ndim)
+        _nd_image.spline_filter1d(input, order, axis, output, mode)
+    return output
+
+
+def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
+    """
+    Multidimensional spline filter.
+
+    For more details, see `spline_filter1d`.
+
+    See Also
+    --------
+    spline_filter1d : Calculate a 1-D spline filter along the given axis.
+
+    Notes
+    -----
+    The multidimensional filter is implemented as a sequence of
+    1-D spline filters. The intermediate arrays are stored
+    in the same data type as the output. Therefore, for output types
+    with a limited precision, the results may be imprecise because
+    intermediate results may be stored with insufficient precision.
+
+    For complex-valued `input`, this function processes the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    Examples
+    --------
+    We can filter an image using multidimentional splines:
+
+    >>> from scipy.ndimage import spline_filter
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> orig_img = np.eye(20)  # create an image
+    >>> orig_img[10, :] = 1.0
+    >>> sp_filter = spline_filter(orig_img, order=3)
+    >>> f, ax = plt.subplots(1, 2, sharex=True)
+    >>> for ind, data in enumerate([[orig_img, "original image"],
+    ...                             [sp_filter, "spline filter"]]):
+    ...     ax[ind].imshow(data[0], cmap='gray_r')
+    ...     ax[ind].set_title(data[1])
+    >>> plt.tight_layout()
+    >>> plt.show()
+
+    """
+    if order < 2 or order > 5:
+        raise RuntimeError('spline order not supported')
+    input = numpy.asarray(input)
+    complex_output = numpy.iscomplexobj(input)
+    output = _ni_support._get_output(output, input,
+                                     complex_output=complex_output)
+    if complex_output:
+        spline_filter(input.real, order, output.real, mode)
+        spline_filter(input.imag, order, output.imag, mode)
+        return output
+    if order not in [0, 1] and input.ndim > 0:
+        for axis in range(input.ndim):
+            spline_filter1d(input, order, axis, output=output, mode=mode)
+            input = output
+    else:
+        output[...] = input[...]
+    return output
+
+
+def _prepad_for_spline_filter(input, mode, cval):
+    if mode in ['nearest', 'grid-constant']:
+        npad = 12
+        if mode == 'grid-constant':
+            padded = numpy.pad(input, npad, mode='constant',
+                               constant_values=cval)
+        elif mode == 'nearest':
+            padded = numpy.pad(input, npad, mode='edge')
+    else:
+        # other modes have exact boundary conditions implemented so
+        # no prepadding is needed
+        npad = 0
+        padded = input
+    return padded, npad
+
+
+@docfiller
+def geometric_transform(input, mapping, output_shape=None,
+                        output=None, order=3,
+                        mode='constant', cval=0.0, prefilter=True,
+                        extra_arguments=(), extra_keywords={}):
+    """
+    Apply an arbitrary geometric transform.
+
+    The given mapping function is used to find, for each point in the
+    output, the corresponding coordinates in the input. The value of the
+    input at those coordinates is determined by spline interpolation of
+    the requested order.
+
+    Parameters
+    ----------
+    %(input)s
+    mapping : {callable, scipy.LowLevelCallable}
+        A callable object that accepts a tuple of length equal to the output
+        array rank, and returns the corresponding input coordinates as a tuple
+        of length equal to the input array rank.
+    output_shape : tuple of ints, optional
+        Shape tuple.
+    %(output)s
+    order : int, optional
+        The order of the spline interpolation, default is 3.
+        The order has to be in the range 0-5.
+    %(mode_interp_constant)s
+    %(cval)s
+    %(prefilter)s
+    extra_arguments : tuple, optional
+        Extra arguments passed to `mapping`.
+    extra_keywords : dict, optional
+        Extra keywords passed to `mapping`.
+
+    Returns
+    -------
+    output : ndarray
+        The filtered input.
+
+    See Also
+    --------
+    map_coordinates, affine_transform, spline_filter1d
+
+
+    Notes
+    -----
+    This function also accepts low-level callback functions with one
+    the following signatures and wrapped in `scipy.LowLevelCallable`:
+
+    .. code:: c
+
+       int mapping(npy_intp *output_coordinates, double *input_coordinates,
+                   int output_rank, int input_rank, void *user_data)
+       int mapping(intptr_t *output_coordinates, double *input_coordinates,
+                   int output_rank, int input_rank, void *user_data)
+
+    The calling function iterates over the elements of the output array,
+    calling the callback function at each element. The coordinates of the
+    current output element are passed through ``output_coordinates``. The
+    callback function must return the coordinates at which the input must
+    be interpolated in ``input_coordinates``. The rank of the input and
+    output arrays are given by ``input_rank`` and ``output_rank``
+    respectively. ``user_data`` is the data pointer provided
+    to `scipy.LowLevelCallable` as-is.
+
+    The callback function must return an integer error status that is zero
+    if something went wrong and one otherwise. If an error occurs, you should
+    normally set the Python error status with an informative message
+    before returning, otherwise a default error message is set by the
+    calling function.
+
+    In addition, some other low-level function pointer specifications
+    are accepted, but these are for backward compatibility only and should
+    not be used in new code.
+
+    For complex-valued `input`, this function transforms the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.ndimage import geometric_transform
+    >>> a = np.arange(12.).reshape((4, 3))
+    >>> def shift_func(output_coords):
+    ...     return (output_coords[0] - 0.5, output_coords[1] - 0.5)
+    ...
+    >>> geometric_transform(a, shift_func)
+    array([[ 0.   ,  0.   ,  0.   ],
+           [ 0.   ,  1.362,  2.738],
+           [ 0.   ,  4.812,  6.187],
+           [ 0.   ,  8.263,  9.637]])
+
+    >>> b = [1, 2, 3, 4, 5]
+    >>> def shift_func(output_coords):
+    ...     return (output_coords[0] - 3,)
+    ...
+    >>> geometric_transform(b, shift_func, mode='constant')
+    array([0, 0, 0, 1, 2])
+    >>> geometric_transform(b, shift_func, mode='nearest')
+    array([1, 1, 1, 1, 2])
+    >>> geometric_transform(b, shift_func, mode='reflect')
+    array([3, 2, 1, 1, 2])
+    >>> geometric_transform(b, shift_func, mode='wrap')
+    array([2, 3, 4, 1, 2])
+
+    """
+    if order < 0 or order > 5:
+        raise RuntimeError('spline order not supported')
+    input = numpy.asarray(input)
+    if output_shape is None:
+        output_shape = input.shape
+    if input.ndim < 1 or len(output_shape) < 1:
+        raise RuntimeError('input and output rank must be > 0')
+    complex_output = numpy.iscomplexobj(input)
+    output = _ni_support._get_output(output, input, shape=output_shape,
+                                     complex_output=complex_output)
+    if complex_output:
+        kwargs = dict(order=order, mode=mode, prefilter=prefilter,
+                      output_shape=output_shape,
+                      extra_arguments=extra_arguments,
+                      extra_keywords=extra_keywords)
+        geometric_transform(input.real, mapping, output=output.real,
+                            cval=numpy.real(cval), **kwargs)
+        geometric_transform(input.imag, mapping, output=output.imag,
+                            cval=numpy.imag(cval), **kwargs)
+        return output
+
+    if prefilter and order > 1:
+        padded, npad = _prepad_for_spline_filter(input, mode, cval)
+        filtered = spline_filter(padded, order, output=numpy.float64,
+                                 mode=mode)
+    else:
+        npad = 0
+        filtered = input
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.geometric_transform(filtered, mapping, None, None, None, output,
+                                  order, mode, cval, npad, extra_arguments,
+                                  extra_keywords)
+    return output
+
+
+@docfiller
+def map_coordinates(input, coordinates, output=None, order=3,
+                    mode='constant', cval=0.0, prefilter=True):
+    """
+    Map the input array to new coordinates by interpolation.
+
+    The array of coordinates is used to find, for each point in the output,
+    the corresponding coordinates in the input. The value of the input at
+    those coordinates is determined by spline interpolation of the
+    requested order.
+
+    The shape of the output is derived from that of the coordinate
+    array by dropping the first axis. The values of the array along
+    the first axis are the coordinates in the input array at which the
+    output value is found.
+
+    Parameters
+    ----------
+    %(input)s
+    coordinates : array_like
+        The coordinates at which `input` is evaluated.
+    %(output)s
+    order : int, optional
+        The order of the spline interpolation, default is 3.
+        The order has to be in the range 0-5.
+    %(mode_interp_constant)s
+    %(cval)s
+    %(prefilter)s
+
+    Returns
+    -------
+    map_coordinates : ndarray
+        The result of transforming the input. The shape of the output is
+        derived from that of `coordinates` by dropping the first axis.
+
+    See Also
+    --------
+    spline_filter, geometric_transform, scipy.interpolate
+
+    Notes
+    -----
+    For complex-valued `input`, this function maps the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.arange(12.).reshape((4, 3))
+    >>> a
+    array([[  0.,   1.,   2.],
+           [  3.,   4.,   5.],
+           [  6.,   7.,   8.],
+           [  9.,  10.,  11.]])
+    >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
+    array([ 2.,  7.])
+
+    Above, the interpolated value of a[0.5, 0.5] gives output[0], while
+    a[2, 1] is output[1].
+
+    >>> inds = np.array([[0.5, 2], [0.5, 4]])
+    >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
+    array([  2. , -33.3])
+    >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
+    array([ 2.,  8.])
+    >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
+    array([ True, False], dtype=bool)
+
+    """
+    if order < 0 or order > 5:
+        raise RuntimeError('spline order not supported')
+    input = numpy.asarray(input)
+    coordinates = numpy.asarray(coordinates)
+    if numpy.iscomplexobj(coordinates):
+        raise TypeError('Complex type not supported')
+    output_shape = coordinates.shape[1:]
+    if input.ndim < 1 or len(output_shape) < 1:
+        raise RuntimeError('input and output rank must be > 0')
+    if coordinates.shape[0] != input.ndim:
+        raise RuntimeError('invalid shape for coordinate array')
+    complex_output = numpy.iscomplexobj(input)
+    output = _ni_support._get_output(output, input, shape=output_shape,
+                                     complex_output=complex_output)
+    if complex_output:
+        kwargs = dict(order=order, mode=mode, prefilter=prefilter)
+        map_coordinates(input.real, coordinates, output=output.real,
+                        cval=numpy.real(cval), **kwargs)
+        map_coordinates(input.imag, coordinates, output=output.imag,
+                        cval=numpy.imag(cval), **kwargs)
+        return output
+    if prefilter and order > 1:
+        padded, npad = _prepad_for_spline_filter(input, mode, cval)
+        filtered = spline_filter(padded, order, output=numpy.float64,
+                                 mode=mode)
+    else:
+        npad = 0
+        filtered = input
+    mode = _ni_support._extend_mode_to_code(mode)
+    _nd_image.geometric_transform(filtered, None, coordinates, None, None,
+                                  output, order, mode, cval, npad, None, None)
+    return output
+
+
+@docfiller
+def affine_transform(input, matrix, offset=0.0, output_shape=None,
+                     output=None, order=3,
+                     mode='constant', cval=0.0, prefilter=True):
+    """
+    Apply an affine transformation.
+
+    Given an output image pixel index vector ``o``, the pixel value
+    is determined from the input image at position
+    ``np.dot(matrix, o) + offset``.
+
+    This does 'pull' (or 'backward') resampling, transforming the output space
+    to the input to locate data. Affine transformations are often described in
+    the 'push' (or 'forward') direction, transforming input to output. If you
+    have a matrix for the 'push' transformation, use its inverse
+    (:func:`numpy.linalg.inv`) in this function.
+
+    Parameters
+    ----------
+    %(input)s
+    matrix : ndarray
+        The inverse coordinate transformation matrix, mapping output
+        coordinates to input coordinates. If ``ndim`` is the number of
+        dimensions of ``input``, the given matrix must have one of the
+        following shapes:
+
+            - ``(ndim, ndim)``: the linear transformation matrix for each
+              output coordinate.
+            - ``(ndim,)``: assume that the 2-D transformation matrix is
+              diagonal, with the diagonal specified by the given value. A more
+              efficient algorithm is then used that exploits the separability
+              of the problem.
+            - ``(ndim + 1, ndim + 1)``: assume that the transformation is
+              specified using homogeneous coordinates [1]_. In this case, any
+              value passed to ``offset`` is ignored.
+            - ``(ndim, ndim + 1)``: as above, but the bottom row of a
+              homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
+              and may be omitted.
+
+    offset : float or sequence, optional
+        The offset into the array where the transform is applied. If a float,
+        `offset` is the same for each axis. If a sequence, `offset` should
+        contain one value for each axis.
+    output_shape : tuple of ints, optional
+        Shape tuple.
+    %(output)s
+    order : int, optional
+        The order of the spline interpolation, default is 3.
+        The order has to be in the range 0-5.
+    %(mode_interp_constant)s
+    %(cval)s
+    %(prefilter)s
+
+    Returns
+    -------
+    affine_transform : ndarray
+        The transformed input.
+
+    Notes
+    -----
+    The given matrix and offset are used to find for each point in the
+    output the corresponding coordinates in the input by an affine
+    transformation. The value of the input at those coordinates is
+    determined by spline interpolation of the requested order. Points
+    outside the boundaries of the input are filled according to the given
+    mode.
+
+    .. versionchanged:: 0.18.0
+        Previously, the exact interpretation of the affine transformation
+        depended on whether the matrix was supplied as a 1-D or a
+        2-D array. If a 1-D array was supplied
+        to the matrix parameter, the output pixel value at index ``o``
+        was determined from the input image at position
+        ``matrix * (o + offset)``.
+
+    For complex-valued `input`, this function transforms the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
+    """
+    if order < 0 or order > 5:
+        raise RuntimeError('spline order not supported')
+    input = numpy.asarray(input)
+    if output_shape is None:
+        if isinstance(output, numpy.ndarray):
+            output_shape = output.shape
+        else:
+            output_shape = input.shape
+    if input.ndim < 1 or len(output_shape) < 1:
+        raise RuntimeError('input and output rank must be > 0')
+    complex_output = numpy.iscomplexobj(input)
+    output = _ni_support._get_output(output, input, shape=output_shape,
+                                     complex_output=complex_output)
+    if complex_output:
+        kwargs = dict(offset=offset, output_shape=output_shape, order=order,
+                      mode=mode, prefilter=prefilter)
+        affine_transform(input.real, matrix, output=output.real,
+                         cval=numpy.real(cval), **kwargs)
+        affine_transform(input.imag, matrix, output=output.imag,
+                         cval=numpy.imag(cval), **kwargs)
+        return output
+    if prefilter and order > 1:
+        padded, npad = _prepad_for_spline_filter(input, mode, cval)
+        filtered = spline_filter(padded, order, output=numpy.float64,
+                                 mode=mode)
+    else:
+        npad = 0
+        filtered = input
+    mode = _ni_support._extend_mode_to_code(mode)
+    matrix = numpy.asarray(matrix, dtype=numpy.float64)
+    if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
+        raise RuntimeError('no proper affine matrix provided')
+    if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
+            (matrix.shape[0] in [input.ndim, input.ndim + 1])):
+        if matrix.shape[0] == input.ndim + 1:
+            exptd = [0] * input.ndim + [1]
+            if not numpy.all(matrix[input.ndim] == exptd):
+                msg = ('Expected homogeneous transformation matrix with '
+                       'shape %s for image shape %s, but bottom row was '
+                       'not equal to %s' % (matrix.shape, input.shape, exptd))
+                raise ValueError(msg)
+        # assume input is homogeneous coordinate transformation matrix
+        offset = matrix[:input.ndim, input.ndim]
+        matrix = matrix[:input.ndim, :input.ndim]
+    if matrix.shape[0] != input.ndim:
+        raise RuntimeError('affine matrix has wrong number of rows')
+    if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
+        raise RuntimeError('affine matrix has wrong number of columns')
+    if not matrix.flags.contiguous:
+        matrix = matrix.copy()
+    offset = _ni_support._normalize_sequence(offset, input.ndim)
+    offset = numpy.asarray(offset, dtype=numpy.float64)
+    if offset.ndim != 1 or offset.shape[0] < 1:
+        raise RuntimeError('no proper offset provided')
+    if not offset.flags.contiguous:
+        offset = offset.copy()
+    if matrix.ndim == 1:
+        warnings.warn(
+            "The behavior of affine_transform with a 1-D "
+            "array supplied for the matrix parameter has changed in "
+            "SciPy 0.18.0."
+        )
+        _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
+                             mode, cval, npad, False)
+    else:
+        _nd_image.geometric_transform(filtered, None, None, matrix, offset,
+                                      output, order, mode, cval, npad, None,
+                                      None)
+    return output
+
+
+@docfiller
+def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
+          prefilter=True):
+    """
+    Shift an array.
+
+    The array is shifted using spline interpolation of the requested order.
+    Points outside the boundaries of the input are filled according to the
+    given mode.
+
+    Parameters
+    ----------
+    %(input)s
+    shift : float or sequence
+        The shift along the axes. If a float, `shift` is the same for each
+        axis. If a sequence, `shift` should contain one value for each axis.
+    %(output)s
+    order : int, optional
+        The order of the spline interpolation, default is 3.
+        The order has to be in the range 0-5.
+    %(mode_interp_constant)s
+    %(cval)s
+    %(prefilter)s
+
+    Returns
+    -------
+    shift : ndarray
+        The shifted input.
+
+    Notes
+    -----
+    For complex-valued `input`, this function shifts the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    """
+    if order < 0 or order > 5:
+        raise RuntimeError('spline order not supported')
+    input = numpy.asarray(input)
+    if input.ndim < 1:
+        raise RuntimeError('input and output rank must be > 0')
+    complex_output = numpy.iscomplexobj(input)
+    output = _ni_support._get_output(output, input,
+                                     complex_output=complex_output)
+    if complex_output:
+        # import under different name to avoid confusion with shift parameter
+        from scipy.ndimage._interpolation import shift as _shift
+
+        kwargs = dict(order=order, mode=mode, prefilter=prefilter)
+        _shift(input.real, shift, output=output.real, cval=numpy.real(cval),
+               **kwargs)
+        _shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval),
+               **kwargs)
+        return output
+    if prefilter and order > 1:
+        padded, npad = _prepad_for_spline_filter(input, mode, cval)
+        filtered = spline_filter(padded, order, output=numpy.float64,
+                                 mode=mode)
+    else:
+        npad = 0
+        filtered = input
+    mode = _ni_support._extend_mode_to_code(mode)
+    shift = _ni_support._normalize_sequence(shift, input.ndim)
+    shift = [-ii for ii in shift]
+    shift = numpy.asarray(shift, dtype=numpy.float64)
+    if not shift.flags.contiguous:
+        shift = shift.copy()
+    _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
+                         npad, False)
+    return output
+
+
+@docfiller
+def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
+         prefilter=True, *, grid_mode=False):
+    """
+    Zoom an array.
+
+    The array is zoomed using spline interpolation of the requested order.
+
+    Parameters
+    ----------
+    %(input)s
+    zoom : float or sequence
+        The zoom factor along the axes. If a float, `zoom` is the same for each
+        axis. If a sequence, `zoom` should contain one value for each axis.
+    %(output)s
+    order : int, optional
+        The order of the spline interpolation, default is 3.
+        The order has to be in the range 0-5.
+    %(mode_interp_constant)s
+    %(cval)s
+    %(prefilter)s
+    grid_mode : bool, optional
+        If False, the distance from the pixel centers is zoomed. Otherwise, the
+        distance including the full pixel extent is used. For example, a 1d
+        signal of length 5 is considered to have length 4 when `grid_mode` is
+        False, but length 5 when `grid_mode` is True. See the following
+        visual illustration:
+
+        .. code-block:: text
+
+                | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
+                     |<-------------------------------------->|
+                                        vs.
+                |<----------------------------------------------->|
+
+        The starting point of the arrow in the diagram above corresponds to
+        coordinate location 0 in each mode.
+
+    Returns
+    -------
+    zoom : ndarray
+        The zoomed input.
+
+    Notes
+    -----
+    For complex-valued `input`, this function zooms the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+
+    >>> fig = plt.figure()
+    >>> ax1 = fig.add_subplot(121)  # left side
+    >>> ax2 = fig.add_subplot(122)  # right side
+    >>> ascent = datasets.ascent()
+    >>> result = ndimage.zoom(ascent, 3.0)
+    >>> ax1.imshow(ascent, vmin=0, vmax=255)
+    >>> ax2.imshow(result, vmin=0, vmax=255)
+    >>> plt.show()
+
+    >>> print(ascent.shape)
+    (512, 512)
+
+    >>> print(result.shape)
+    (1536, 1536)
+    """
+    if order < 0 or order > 5:
+        raise RuntimeError('spline order not supported')
+    input = numpy.asarray(input)
+    if input.ndim < 1:
+        raise RuntimeError('input and output rank must be > 0')
+    zoom = _ni_support._normalize_sequence(zoom, input.ndim)
+    output_shape = tuple(
+            [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
+    complex_output = numpy.iscomplexobj(input)
+    output = _ni_support._get_output(output, input, shape=output_shape,
+                                     complex_output=complex_output)
+    if complex_output:
+        # import under different name to avoid confusion with zoom parameter
+        from scipy.ndimage._interpolation import zoom as _zoom
+
+        kwargs = dict(order=order, mode=mode, prefilter=prefilter)
+        _zoom(input.real, zoom, output=output.real, cval=numpy.real(cval),
+              **kwargs)
+        _zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval),
+              **kwargs)
+        return output
+    if prefilter and order > 1:
+        padded, npad = _prepad_for_spline_filter(input, mode, cval)
+        filtered = spline_filter(padded, order, output=numpy.float64,
+                                 mode=mode)
+    else:
+        npad = 0
+        filtered = input
+    if grid_mode:
+        # warn about modes that may have surprising behavior
+        suggest_mode = None
+        if mode == 'constant':
+            suggest_mode = 'grid-constant'
+        elif mode == 'wrap':
+            suggest_mode = 'grid-wrap'
+        if suggest_mode is not None:
+            warnings.warn(
+                ("It is recommended to use mode = {} instead of {} when "
+                 "grid_mode is True.").format(suggest_mode, mode)
+            )
+    mode = _ni_support._extend_mode_to_code(mode)
+
+    zoom_div = numpy.array(output_shape)
+    zoom_nominator = numpy.array(input.shape)
+    if not grid_mode:
+        zoom_div -= 1
+        zoom_nominator -= 1
+
+    # Zooming to infinite values is unpredictable, so just choose
+    # zoom factor 1 instead
+    zoom = numpy.divide(zoom_nominator, zoom_div,
+                        out=numpy.ones_like(input.shape, dtype=numpy.float64),
+                        where=zoom_div != 0)
+    zoom = numpy.ascontiguousarray(zoom)
+    _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
+                         grid_mode)
+    return output
+
+
+@docfiller
+def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
+           mode='constant', cval=0.0, prefilter=True):
+    """
+    Rotate an array.
+
+    The array is rotated in the plane defined by the two axes given by the
+    `axes` parameter using spline interpolation of the requested order.
+
+    Parameters
+    ----------
+    %(input)s
+    angle : float
+        The rotation angle in degrees.
+    axes : tuple of 2 ints, optional
+        The two axes that define the plane of rotation. Default is the first
+        two axes.
+    reshape : bool, optional
+        If `reshape` is true, the output shape is adapted so that the input
+        array is contained completely in the output. Default is True.
+    %(output)s
+    order : int, optional
+        The order of the spline interpolation, default is 3.
+        The order has to be in the range 0-5.
+    %(mode_interp_constant)s
+    %(cval)s
+    %(prefilter)s
+
+    Returns
+    -------
+    rotate : ndarray
+        The rotated input.
+
+    Notes
+    -----
+    For complex-valued `input`, this function rotates the real and imaginary
+    components independently.
+
+    .. versionadded:: 1.6.0
+        Complex-valued support added.
+
+    Examples
+    --------
+    >>> from scipy import ndimage, datasets
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure(figsize=(10, 3))
+    >>> ax1, ax2, ax3 = fig.subplots(1, 3)
+    >>> img = datasets.ascent()
+    >>> img_45 = ndimage.rotate(img, 45, reshape=False)
+    >>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
+    >>> ax1.imshow(img, cmap='gray')
+    >>> ax1.set_axis_off()
+    >>> ax2.imshow(img_45, cmap='gray')
+    >>> ax2.set_axis_off()
+    >>> ax3.imshow(full_img_45, cmap='gray')
+    >>> ax3.set_axis_off()
+    >>> fig.set_layout_engine('tight')
+    >>> plt.show()
+    >>> print(img.shape)
+    (512, 512)
+    >>> print(img_45.shape)
+    (512, 512)
+    >>> print(full_img_45.shape)
+    (724, 724)
+
+    """
+    input_arr = numpy.asarray(input)
+    ndim = input_arr.ndim
+
+    if ndim < 2:
+        raise ValueError('input array should be at least 2D')
+
+    axes = list(axes)
+
+    if len(axes) != 2:
+        raise ValueError('axes should contain exactly two values')
+
+    if not all([float(ax).is_integer() for ax in axes]):
+        raise ValueError('axes should contain only integer values')
+
+    if axes[0] < 0:
+        axes[0] += ndim
+    if axes[1] < 0:
+        axes[1] += ndim
+    if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
+        raise ValueError('invalid rotation plane specified')
+
+    axes.sort()
+
+    c, s = special.cosdg(angle), special.sindg(angle)
+
+    rot_matrix = numpy.array([[c, s],
+                              [-s, c]])
+
+    img_shape = numpy.asarray(input_arr.shape)
+    in_plane_shape = img_shape[axes]
+    if reshape:
+        # Compute transformed input bounds
+        iy, ix = in_plane_shape
+        out_bounds = rot_matrix @ [[0, 0, iy, iy],
+                                   [0, ix, 0, ix]]
+        # Compute the shape of the transformed input plane
+        out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
+    else:
+        out_plane_shape = img_shape[axes]
+
+    out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
+    in_center = (in_plane_shape - 1) / 2
+    offset = in_center - out_center
+
+    output_shape = img_shape
+    output_shape[axes] = out_plane_shape
+    output_shape = tuple(output_shape)
+
+    complex_output = numpy.iscomplexobj(input_arr)
+    output = _ni_support._get_output(output, input_arr, shape=output_shape,
+                                     complex_output=complex_output)
+
+    if ndim <= 2:
+        affine_transform(input_arr, rot_matrix, offset, output_shape, output,
+                         order, mode, cval, prefilter)
+    else:
+        # If ndim > 2, the rotation is applied over all the planes
+        # parallel to axes
+        planes_coord = itertools.product(
+            *[[slice(None)] if ax in axes else range(img_shape[ax])
+              for ax in range(ndim)])
+
+        out_plane_shape = tuple(out_plane_shape)
+
+        for coordinates in planes_coord:
+            ia = input_arr[coordinates]
+            oa = output[coordinates]
+            affine_transform(ia, rot_matrix, offset, out_plane_shape,
+                             oa, order, mode, cval, prefilter)
+
+    return output
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/_measurements.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_measurements.py
new file mode 100644
index 00000000..1686482f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_measurements.py
@@ -0,0 +1,1674 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import numpy
+import numpy as np
+from . import _ni_support
+from . import _ni_label
+from . import _nd_image
+from . import _morphology
+
+__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
+           'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
+           'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
+           'histogram', 'watershed_ift', 'sum_labels', 'value_indices']
+
+
+def label(input, structure=None, output=None):
+    """
+    Label features in an array.
+
+    Parameters
+    ----------
+    input : array_like
+        An array-like object to be labeled. Any non-zero values in `input` are
+        counted as features and zero values are considered the background.
+    structure : array_like, optional
+        A structuring element that defines feature connections.
+        `structure` must be centrosymmetric
+        (see Notes).
+        If no structuring element is provided,
+        one is automatically generated with a squared connectivity equal to
+        one.  That is, for a 2-D `input` array, the default structuring element
+        is::
+
+            [[0,1,0],
+             [1,1,1],
+             [0,1,0]]
+
+    output : (None, data-type, array_like), optional
+        If `output` is a data type, it specifies the type of the resulting
+        labeled feature array.
+        If `output` is an array-like object, then `output` will be updated
+        with the labeled features from this function.  This function can
+        operate in-place, by passing output=input.
+        Note that the output must be able to store the largest label, or this
+        function will raise an Exception.
+
+    Returns
+    -------
+    label : ndarray or int
+        An integer ndarray where each unique feature in `input` has a unique
+        label in the returned array.
+    num_features : int
+        How many objects were found.
+
+        If `output` is None, this function returns a tuple of
+        (`labeled_array`, `num_features`).
+
+        If `output` is a ndarray, then it will be updated with values in
+        `labeled_array` and only `num_features` will be returned by this
+        function.
+
+    See Also
+    --------
+    find_objects : generate a list of slices for the labeled features (or
+                   objects); useful for finding features' position or
+                   dimensions
+
+    Notes
+    -----
+    A centrosymmetric matrix is a matrix that is symmetric about the center.
+    See [1]_ for more information.
+
+    The `structure` matrix must be centrosymmetric to ensure
+    two-way connections.
+    For instance, if the `structure` matrix is not centrosymmetric
+    and is defined as::
+
+        [[0,1,0],
+         [1,1,0],
+         [0,0,0]]
+
+    and the `input` is::
+
+        [[1,2],
+         [0,3]]
+
+    then the structure matrix would indicate the
+    entry 2 in the input is connected to 1,
+    but 1 is not connected to 2.
+
+    Examples
+    --------
+    Create an image with some features, then label it using the default
+    (cross-shaped) structuring element:
+
+    >>> from scipy.ndimage import label, generate_binary_structure
+    >>> import numpy as np
+    >>> a = np.array([[0,0,1,1,0,0],
+    ...               [0,0,0,1,0,0],
+    ...               [1,1,0,0,1,0],
+    ...               [0,0,0,1,0,0]])
+    >>> labeled_array, num_features = label(a)
+
+    Each of the 4 features are labeled with a different integer:
+
+    >>> num_features
+    4
+    >>> labeled_array
+    array([[0, 0, 1, 1, 0, 0],
+           [0, 0, 0, 1, 0, 0],
+           [2, 2, 0, 0, 3, 0],
+           [0, 0, 0, 4, 0, 0]])
+
+    Generate a structuring element that will consider features connected even
+    if they touch diagonally:
+
+    >>> s = generate_binary_structure(2,2)
+
+    or,
+
+    >>> s = [[1,1,1],
+    ...      [1,1,1],
+    ...      [1,1,1]]
+
+    Label the image using the new structuring element:
+
+    >>> labeled_array, num_features = label(a, structure=s)
+
+    Show the 2 labeled features (note that features 1, 3, and 4 from above are
+    now considered a single feature):
+
+    >>> num_features
+    2
+    >>> labeled_array
+    array([[0, 0, 1, 1, 0, 0],
+           [0, 0, 0, 1, 0, 0],
+           [2, 2, 0, 0, 1, 0],
+           [0, 0, 0, 1, 0, 0]])
+
+    References
+    ----------
+
+    .. [1] James R. Weaver, "Centrosymmetric (cross-symmetric)
+       matrices, their basic properties, eigenvalues, and
+       eigenvectors." The American Mathematical Monthly 92.10
+       (1985): 711-717.
+
+    """
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    if structure is None:
+        structure = _morphology.generate_binary_structure(input.ndim, 1)
+    structure = numpy.asarray(structure, dtype=bool)
+    if structure.ndim != input.ndim:
+        raise RuntimeError('structure and input must have equal rank')
+    for ii in structure.shape:
+        if ii != 3:
+            raise ValueError('structure dimensions must be equal to 3')
+
+    # Use 32 bits if it's large enough for this image.
+    # _ni_label.label() needs two entries for background and
+    # foreground tracking
+    need_64bits = input.size >= (2**31 - 2)
+
+    if isinstance(output, numpy.ndarray):
+        if output.shape != input.shape:
+            raise ValueError("output shape not correct")
+        caller_provided_output = True
+    else:
+        caller_provided_output = False
+        if output is None:
+            output = np.empty(input.shape, np.intp if need_64bits else np.int32)
+        else:
+            output = np.empty(input.shape, output)
+
+    # handle scalars, 0-D arrays
+    if input.ndim == 0 or input.size == 0:
+        if input.ndim == 0:
+            # scalar
+            maxlabel = 1 if (input != 0) else 0
+            output[...] = maxlabel
+        else:
+            # 0-D
+            maxlabel = 0
+        if caller_provided_output:
+            return maxlabel
+        else:
+            return output, maxlabel
+
+    try:
+        max_label = _ni_label._label(input, structure, output)
+    except _ni_label.NeedMoreBits as e:
+        # Make another attempt with enough bits, then try to cast to the
+        # new type.
+        tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
+        max_label = _ni_label._label(input, structure, tmp_output)
+        output[...] = tmp_output[...]
+        if not np.all(output == tmp_output):
+            # refuse to return bad results
+            raise RuntimeError(
+                "insufficient bit-depth in requested output type"
+            ) from e
+
+    if caller_provided_output:
+        # result was written in-place
+        return max_label
+    else:
+        return output, max_label
+
+
+def find_objects(input, max_label=0):
+    """
+    Find objects in a labeled array.
+
+    Parameters
+    ----------
+    input : ndarray of ints
+        Array containing objects defined by different labels. Labels with
+        value 0 are ignored.
+    max_label : int, optional
+        Maximum label to be searched for in `input`. If max_label is not
+        given, the positions of all objects are returned.
+
+    Returns
+    -------
+    object_slices : list of tuples
+        A list of tuples, with each tuple containing N slices (with N the
+        dimension of the input array). Slices correspond to the minimal
+        parallelepiped that contains the object. If a number is missing,
+        None is returned instead of a slice.
+
+    See Also
+    --------
+    label, center_of_mass
+
+    Notes
+    -----
+    This function is very useful for isolating a volume of interest inside
+    a 3-D array, that cannot be "seen through".
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((6,6), dtype=int)
+    >>> a[2:4, 2:4] = 1
+    >>> a[4, 4] = 1
+    >>> a[:2, :3] = 2
+    >>> a[0, 5] = 3
+    >>> a
+    array([[2, 2, 2, 0, 0, 3],
+           [2, 2, 2, 0, 0, 0],
+           [0, 0, 1, 1, 0, 0],
+           [0, 0, 1, 1, 0, 0],
+           [0, 0, 0, 0, 1, 0],
+           [0, 0, 0, 0, 0, 0]])
+    >>> ndimage.find_objects(a)
+    [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None)), (slice(0, 1, None), slice(5, 6, None))]
+    >>> ndimage.find_objects(a, max_label=2)
+    [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))]
+    >>> ndimage.find_objects(a == 1, max_label=2)
+    [(slice(2, 5, None), slice(2, 5, None)), None]
+
+    >>> loc = ndimage.find_objects(a)[0]
+    >>> a[loc]
+    array([[1, 1, 0],
+           [1, 1, 0],
+           [0, 0, 1]])
+
+    """
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+
+    if max_label < 1:
+        max_label = input.max()
+
+    return _nd_image.find_objects(input, max_label)
+
+
+def value_indices(arr, *, ignore_value=None):
+    """
+    Find indices of each distinct value in given array.
+
+    Parameters
+    ----------
+    arr : ndarray of ints
+        Array containing integer values.
+    ignore_value : int, optional
+        This value will be ignored in searching the `arr` array. If not
+        given, all values found will be included in output. Default
+        is None.
+
+    Returns
+    -------
+    indices : dictionary
+        A Python dictionary of array indices for each distinct value. The
+        dictionary is keyed by the distinct values, the entries are array
+        index tuples covering all occurrences of the value within the
+        array.
+
+        This dictionary can occupy significant memory, usually several times
+        the size of the input array.
+
+    Notes
+    -----
+    For a small array with few distinct values, one might use
+    `numpy.unique()` to find all possible values, and ``(arr == val)`` to
+    locate each value within that array. However, for large arrays,
+    with many distinct values, this can become extremely inefficient,
+    as locating each value would require a new search through the entire
+    array. Using this function, there is essentially one search, with
+    the indices saved for all distinct values.
+
+    This is useful when matching a categorical image (e.g. a segmentation
+    or classification) to an associated image of other data, allowing
+    any per-class statistic(s) to then be calculated. Provides a
+    more flexible alternative to functions like ``scipy.ndimage.mean()``
+    and ``scipy.ndimage.variance()``.
+
+    Some other closely related functionality, with different strengths and
+    weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and
+    the `scikit-image `_ function
+    ``skimage.measure.regionprops()``.
+
+    Note for IDL users: this provides functionality equivalent to IDL's
+    REVERSE_INDICES option (as per the IDL documentation for the
+    `HISTOGRAM `_
+    function).
+
+    .. versionadded:: 1.10.0
+
+    See Also
+    --------
+    label, maximum, median, minimum_position, extrema, sum, mean, variance,
+    standard_deviation, numpy.where, numpy.unique
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import ndimage
+    >>> a = np.zeros((6, 6), dtype=int)
+    >>> a[2:4, 2:4] = 1
+    >>> a[4, 4] = 1
+    >>> a[:2, :3] = 2
+    >>> a[0, 5] = 3
+    >>> a
+    array([[2, 2, 2, 0, 0, 3],
+           [2, 2, 2, 0, 0, 0],
+           [0, 0, 1, 1, 0, 0],
+           [0, 0, 1, 1, 0, 0],
+           [0, 0, 0, 0, 1, 0],
+           [0, 0, 0, 0, 0, 0]])
+    >>> val_indices = ndimage.value_indices(a)
+
+    The dictionary `val_indices` will have an entry for each distinct
+    value in the input array.
+
+    >>> val_indices.keys()
+    dict_keys([0, 1, 2, 3])
+
+    The entry for each value is an index tuple, locating the elements
+    with that value.
+
+    >>> ndx1 = val_indices[1]
+    >>> ndx1
+    (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4]))
+
+    This can be used to index into the original array, or any other
+    array with the same shape.
+
+    >>> a[ndx1]
+    array([1, 1, 1, 1, 1])
+
+    If the zeros were to be ignored, then the resulting dictionary
+    would no longer have an entry for zero.
+
+    >>> val_indices = ndimage.value_indices(a, ignore_value=0)
+    >>> val_indices.keys()
+    dict_keys([1, 2, 3])
+
+    """
+    # Cope with ignore_value being None, without too much extra complexity
+    # in the C code. If not None, the value is passed in as a numpy array
+    # with the same dtype as arr.
+    ignore_value_arr = numpy.zeros((1,), dtype=arr.dtype)
+    ignoreIsNone = (ignore_value is None)
+    if not ignoreIsNone:
+        ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value)
+
+    val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr)
+    return val_indices
+
+
+def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False):
+    """
+    Roughly equivalent to [func(input[labels == i]) for i in index].
+
+    Sequentially applies an arbitrary function (that works on array_like input)
+    to subsets of an N-D image array specified by `labels` and `index`.
+    The option exists to provide the function with positional parameters as the
+    second argument.
+
+    Parameters
+    ----------
+    input : array_like
+        Data from which to select `labels` to process.
+    labels : array_like or None
+        Labels to objects in `input`.
+        If not None, array must be same shape as `input`.
+        If None, `func` is applied to raveled `input`.
+    index : int, sequence of ints or None
+        Subset of `labels` to which to apply `func`.
+        If a scalar, a single value is returned.
+        If None, `func` is applied to all non-zero values of `labels`.
+    func : callable
+        Python function to apply to `labels` from `input`.
+    out_dtype : dtype
+        Dtype to use for `result`.
+    default : int, float or None
+        Default return value when a element of `index` does not exist
+        in `labels`.
+    pass_positions : bool, optional
+        If True, pass linear indices to `func` as a second argument.
+        Default is False.
+
+    Returns
+    -------
+    result : ndarray
+        Result of applying `func` to each of `labels` to `input` in `index`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> from scipy import ndimage
+    >>> lbl, nlbl = ndimage.label(a)
+    >>> lbls = np.arange(1, nlbl+1)
+    >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0)
+    array([ 2.75,  5.5 ,  6.  ])
+
+    Falling back to `default`:
+
+    >>> lbls = np.arange(1, nlbl+2)
+    >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1)
+    array([ 2.75,  5.5 ,  6.  , -1.  ])
+
+    Passing positions:
+
+    >>> def fn(val, pos):
+    ...     print("fn says: %s : %s" % (val, pos))
+    ...     return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum())
+    ...
+    >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True)
+    fn says: [1 2 5 3] : [0 1 4 5]
+    fn says: [4 7] : [ 7 11]
+    fn says: [9 3] : [12 13]
+    array([ 11.,  11., -12.,   0.])
+
+    """
+
+    as_scalar = numpy.isscalar(index)
+    input = numpy.asarray(input)
+
+    if pass_positions:
+        positions = numpy.arange(input.size).reshape(input.shape)
+
+    if labels is None:
+        if index is not None:
+            raise ValueError("index without defined labels")
+        if not pass_positions:
+            return func(input.ravel())
+        else:
+            return func(input.ravel(), positions.ravel())
+
+    try:
+        input, labels = numpy.broadcast_arrays(input, labels)
+    except ValueError as e:
+        raise ValueError("input and labels must have the same shape "
+                            "(excepting dimensions with width 1)") from e
+
+    if index is None:
+        if not pass_positions:
+            return func(input[labels > 0])
+        else:
+            return func(input[labels > 0], positions[labels > 0])
+
+    index = numpy.atleast_1d(index)
+    if np.any(index.astype(labels.dtype).astype(index.dtype) != index):
+        raise ValueError("Cannot convert index values from <%s> to <%s> "
+                            "(labels' type) without loss of precision" %
+                            (index.dtype, labels.dtype))
+
+    index = index.astype(labels.dtype)
+
+    # optimization: find min/max in index, and select those parts of labels, input, and positions
+    lo = index.min()
+    hi = index.max()
+    mask = (labels >= lo) & (labels <= hi)
+
+    # this also ravels the arrays
+    labels = labels[mask]
+    input = input[mask]
+    if pass_positions:
+        positions = positions[mask]
+
+    # sort everything by labels
+    label_order = labels.argsort()
+    labels = labels[label_order]
+    input = input[label_order]
+    if pass_positions:
+        positions = positions[label_order]
+
+    index_order = index.argsort()
+    sorted_index = index[index_order]
+
+    def do_map(inputs, output):
+        """labels must be sorted"""
+        nidx = sorted_index.size
+
+        # Find boundaries for each stretch of constant labels
+        # This could be faster, but we already paid N log N to sort labels.
+        lo = numpy.searchsorted(labels, sorted_index, side='left')
+        hi = numpy.searchsorted(labels, sorted_index, side='right')
+
+        for i, l, h in zip(range(nidx), lo, hi):
+            if l == h:
+                continue
+            output[i] = func(*[inp[l:h] for inp in inputs])
+
+    temp = numpy.empty(index.shape, out_dtype)
+    temp[:] = default
+    if not pass_positions:
+        do_map([input], temp)
+    else:
+        do_map([input, positions], temp)
+
+    output = numpy.zeros(index.shape, out_dtype)
+    output[index_order] = temp
+    if as_scalar:
+        output = output[0]
+
+    return output
+
+
+def _safely_castable_to_int(dt):
+    """Test whether the NumPy data type `dt` can be safely cast to an int."""
+    int_size = np.dtype(int).itemsize
+    safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or
+            (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size))
+    return safe
+
+
+def _stats(input, labels=None, index=None, centered=False):
+    """Count, sum, and optionally compute (sum - centre)^2 of input by label
+
+    Parameters
+    ----------
+    input : array_like, N-D
+        The input data to be analyzed.
+    labels : array_like (N-D), optional
+        The labels of the data in `input`. This array must be broadcast
+        compatible with `input`; typically, it is the same shape as `input`.
+        If `labels` is None, all nonzero values in `input` are treated as
+        the single labeled group.
+    index : label or sequence of labels, optional
+        These are the labels of the groups for which the stats are computed.
+        If `index` is None, the stats are computed for the single group where
+        `labels` is greater than 0.
+    centered : bool, optional
+        If True, the centered sum of squares for each labeled group is
+        also returned. Default is False.
+
+    Returns
+    -------
+    counts : int or ndarray of ints
+        The number of elements in each labeled group.
+    sums : scalar or ndarray of scalars
+        The sums of the values in each labeled group.
+    sums_c : scalar or ndarray of scalars, optional
+        The sums of mean-centered squares of the values in each labeled group.
+        This is only returned if `centered` is True.
+
+    """
+    def single_group(vals):
+        if centered:
+            vals_c = vals - vals.mean()
+            return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum()
+        else:
+            return vals.size, vals.sum()
+
+    if labels is None:
+        return single_group(input)
+
+    # ensure input and labels match sizes
+    input, labels = numpy.broadcast_arrays(input, labels)
+
+    if index is None:
+        return single_group(input[labels > 0])
+
+    if numpy.isscalar(index):
+        return single_group(input[labels == index])
+
+    def _sum_centered(labels):
+        # `labels` is expected to be an ndarray with the same shape as `input`.
+        # It must contain the label indices (which are not necessarily the labels
+        # themselves).
+        means = sums / counts
+        centered_input = input - means[labels]
+        # bincount expects 1-D inputs, so we ravel the arguments.
+        bc = numpy.bincount(labels.ravel(),
+                              weights=(centered_input *
+                                       centered_input.conjugate()).ravel())
+        return bc
+
+    # Remap labels to unique integers if necessary, or if the largest
+    # label is larger than the number of values.
+
+    if (not _safely_castable_to_int(labels.dtype) or
+            labels.min() < 0 or labels.max() > labels.size):
+        # Use numpy.unique to generate the label indices.  `new_labels` will
+        # be 1-D, but it should be interpreted as the flattened N-D array of
+        # label indices.
+        unique_labels, new_labels = numpy.unique(labels, return_inverse=True)
+        counts = numpy.bincount(new_labels)
+        sums = numpy.bincount(new_labels, weights=input.ravel())
+        if centered:
+            # Compute the sum of the mean-centered squares.
+            # We must reshape new_labels to the N-D shape of `input` before
+            # passing it _sum_centered.
+            sums_c = _sum_centered(new_labels.reshape(labels.shape))
+        idxs = numpy.searchsorted(unique_labels, index)
+        # make all of idxs valid
+        idxs[idxs >= unique_labels.size] = 0
+        found = (unique_labels[idxs] == index)
+    else:
+        # labels are an integer type allowed by bincount, and there aren't too
+        # many, so call bincount directly.
+        counts = numpy.bincount(labels.ravel())
+        sums = numpy.bincount(labels.ravel(), weights=input.ravel())
+        if centered:
+            sums_c = _sum_centered(labels)
+        # make sure all index values are valid
+        idxs = numpy.asanyarray(index, numpy.int_).copy()
+        found = (idxs >= 0) & (idxs < counts.size)
+        idxs[~found] = 0
+
+    counts = counts[idxs]
+    counts[~found] = 0
+    sums = sums[idxs]
+    sums[~found] = 0
+
+    if not centered:
+        return (counts, sums)
+    else:
+        sums_c = sums_c[idxs]
+        sums_c[~found] = 0
+        return (counts, sums, sums_c)
+
+
+def sum(input, labels=None, index=None):
+    """
+    Calculate the sum of the values of the array.
+
+    Notes
+    -----
+    This is an alias for `ndimage.sum_labels` kept for backwards compatibility
+    reasons, for new code please prefer `sum_labels`.  See the `sum_labels`
+    docstring for more details.
+
+    """
+    return sum_labels(input, labels, index)
+
+
+def sum_labels(input, labels=None, index=None):
+    """
+    Calculate the sum of the values of the array.
+
+    Parameters
+    ----------
+    input : array_like
+        Values of `input` inside the regions defined by `labels`
+        are summed together.
+    labels : array_like of ints, optional
+        Assign labels to the values of the array. Has to have the same shape as
+        `input`.
+    index : array_like, optional
+        A single label number or a sequence of label numbers of
+        the objects to be measured.
+
+    Returns
+    -------
+    sum : ndarray or scalar
+        An array of the sums of values of `input` inside the regions defined
+        by `labels` with the same shape as `index`. If 'index' is None or scalar,
+        a scalar is returned.
+
+    See Also
+    --------
+    mean, median
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> input =  [0,1,2,3]
+    >>> labels = [1,1,2,2]
+    >>> ndimage.sum_labels(input, labels, index=[1,2])
+    [1.0, 5.0]
+    >>> ndimage.sum_labels(input, labels, index=1)
+    1
+    >>> ndimage.sum_labels(input, labels)
+    6
+
+
+    """
+    count, sum = _stats(input, labels, index)
+    return sum
+
+
+def mean(input, labels=None, index=None):
+    """
+    Calculate the mean of the values of an array at labels.
+
+    Parameters
+    ----------
+    input : array_like
+        Array on which to compute the mean of elements over distinct
+        regions.
+    labels : array_like, optional
+        Array of labels of same shape, or broadcastable to the same shape as
+        `input`. All elements sharing the same label form one region over
+        which the mean of the elements is computed.
+    index : int or sequence of ints, optional
+        Labels of the objects over which the mean is to be computed.
+        Default is None, in which case the mean for all values where label is
+        greater than 0 is calculated.
+
+    Returns
+    -------
+    out : list
+        Sequence of same length as `index`, with the mean of the different
+        regions labeled by the labels in `index`.
+
+    See Also
+    --------
+    variance, standard_deviation, minimum, maximum, sum, label
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.arange(25).reshape((5,5))
+    >>> labels = np.zeros_like(a)
+    >>> labels[3:5,3:5] = 1
+    >>> index = np.unique(labels)
+    >>> labels
+    array([[0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0],
+           [0, 0, 0, 1, 1],
+           [0, 0, 0, 1, 1]])
+    >>> index
+    array([0, 1])
+    >>> ndimage.mean(a, labels=labels, index=index)
+    [10.285714285714286, 21.0]
+
+    """
+
+    count, sum = _stats(input, labels, index)
+    return sum / numpy.asanyarray(count).astype(numpy.float64)
+
+
+def variance(input, labels=None, index=None):
+    """
+    Calculate the variance of the values of an N-D image array, optionally at
+    specified sub-regions.
+
+    Parameters
+    ----------
+    input : array_like
+        Nd-image data to process.
+    labels : array_like, optional
+        Labels defining sub-regions in `input`.
+        If not None, must be same shape as `input`.
+    index : int or sequence of ints, optional
+        `labels` to include in output.  If None (default), all values where
+        `labels` is non-zero are used.
+
+    Returns
+    -------
+    variance : float or ndarray
+        Values of variance, for each sub-region if `labels` and `index` are
+        specified.
+
+    See Also
+    --------
+    label, standard_deviation, maximum, minimum, extrema
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> from scipy import ndimage
+    >>> ndimage.variance(a)
+    7.609375
+
+    Features to process can be specified using `labels` and `index`:
+
+    >>> lbl, nlbl = ndimage.label(a)
+    >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1))
+    array([ 2.1875,  2.25  ,  9.    ])
+
+    If no index is given, all non-zero `labels` are processed:
+
+    >>> ndimage.variance(a, lbl)
+    6.1875
+
+    """
+    count, sum, sum_c_sq = _stats(input, labels, index, centered=True)
+    return sum_c_sq / np.asanyarray(count).astype(float)
+
+
+def standard_deviation(input, labels=None, index=None):
+    """
+    Calculate the standard deviation of the values of an N-D image array,
+    optionally at specified sub-regions.
+
+    Parameters
+    ----------
+    input : array_like
+        N-D image data to process.
+    labels : array_like, optional
+        Labels to identify sub-regions in `input`.
+        If not None, must be same shape as `input`.
+    index : int or sequence of ints, optional
+        `labels` to include in output. If None (default), all values where
+        `labels` is non-zero are used.
+
+    Returns
+    -------
+    standard_deviation : float or ndarray
+        Values of standard deviation, for each sub-region if `labels` and
+        `index` are specified.
+
+    See Also
+    --------
+    label, variance, maximum, minimum, extrema
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> from scipy import ndimage
+    >>> ndimage.standard_deviation(a)
+    2.7585095613392387
+
+    Features to process can be specified using `labels` and `index`:
+
+    >>> lbl, nlbl = ndimage.label(a)
+    >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1))
+    array([ 1.479,  1.5  ,  3.   ])
+
+    If no index is given, non-zero `labels` are processed:
+
+    >>> ndimage.standard_deviation(a, lbl)
+    2.4874685927665499
+
+    """
+    return numpy.sqrt(variance(input, labels, index))
+
+
+def _select(input, labels=None, index=None, find_min=False, find_max=False,
+            find_min_positions=False, find_max_positions=False,
+            find_median=False):
+    """Returns min, max, or both, plus their positions (if requested), and
+    median."""
+
+    input = numpy.asanyarray(input)
+
+    find_positions = find_min_positions or find_max_positions
+    positions = None
+    if find_positions:
+        positions = numpy.arange(input.size).reshape(input.shape)
+
+    def single_group(vals, positions):
+        result = []
+        if find_min:
+            result += [vals.min()]
+        if find_min_positions:
+            result += [positions[vals == vals.min()][0]]
+        if find_max:
+            result += [vals.max()]
+        if find_max_positions:
+            result += [positions[vals == vals.max()][0]]
+        if find_median:
+            result += [numpy.median(vals)]
+        return result
+
+    if labels is None:
+        return single_group(input, positions)
+
+    # ensure input and labels match sizes
+    input, labels = numpy.broadcast_arrays(input, labels)
+
+    if index is None:
+        mask = (labels > 0)
+        masked_positions = None
+        if find_positions:
+            masked_positions = positions[mask]
+        return single_group(input[mask], masked_positions)
+
+    if numpy.isscalar(index):
+        mask = (labels == index)
+        masked_positions = None
+        if find_positions:
+            masked_positions = positions[mask]
+        return single_group(input[mask], masked_positions)
+
+    # remap labels to unique integers if necessary, or if the largest
+    # label is larger than the number of values.
+    if (not _safely_castable_to_int(labels.dtype) or
+            labels.min() < 0 or labels.max() > labels.size):
+        # remap labels, and indexes
+        unique_labels, labels = numpy.unique(labels, return_inverse=True)
+        idxs = numpy.searchsorted(unique_labels, index)
+
+        # make all of idxs valid
+        idxs[idxs >= unique_labels.size] = 0
+        found = (unique_labels[idxs] == index)
+    else:
+        # labels are an integer type, and there aren't too many
+        idxs = numpy.asanyarray(index, numpy.int_).copy()
+        found = (idxs >= 0) & (idxs <= labels.max())
+
+    idxs[~ found] = labels.max() + 1
+
+    if find_median:
+        order = numpy.lexsort((input.ravel(), labels.ravel()))
+    else:
+        order = input.ravel().argsort()
+    input = input.ravel()[order]
+    labels = labels.ravel()[order]
+    if find_positions:
+        positions = positions.ravel()[order]
+
+    result = []
+    if find_min:
+        mins = numpy.zeros(labels.max() + 2, input.dtype)
+        mins[labels[::-1]] = input[::-1]
+        result += [mins[idxs]]
+    if find_min_positions:
+        minpos = numpy.zeros(labels.max() + 2, int)
+        minpos[labels[::-1]] = positions[::-1]
+        result += [minpos[idxs]]
+    if find_max:
+        maxs = numpy.zeros(labels.max() + 2, input.dtype)
+        maxs[labels] = input
+        result += [maxs[idxs]]
+    if find_max_positions:
+        maxpos = numpy.zeros(labels.max() + 2, int)
+        maxpos[labels] = positions
+        result += [maxpos[idxs]]
+    if find_median:
+        locs = numpy.arange(len(labels))
+        lo = numpy.zeros(labels.max() + 2, numpy.int_)
+        lo[labels[::-1]] = locs[::-1]
+        hi = numpy.zeros(labels.max() + 2, numpy.int_)
+        hi[labels] = locs
+        lo = lo[idxs]
+        hi = hi[idxs]
+        # lo is an index to the lowest value in input for each label,
+        # hi is an index to the largest value.
+        # move them to be either the same ((hi - lo) % 2 == 0) or next
+        # to each other ((hi - lo) % 2 == 1), then average.
+        step = (hi - lo) // 2
+        lo += step
+        hi -= step
+        if (np.issubdtype(input.dtype, np.integer)
+                or np.issubdtype(input.dtype, np.bool_)):
+            # avoid integer overflow or boolean addition (gh-12836)
+            result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0]
+        else:
+            result += [(input[lo] + input[hi]) / 2.0]
+
+    return result
+
+
+def minimum(input, labels=None, index=None):
+    """
+    Calculate the minimum of the values of an array over labeled regions.
+
+    Parameters
+    ----------
+    input : array_like
+        Array_like of values. For each region specified by `labels`, the
+        minimal values of `input` over the region is computed.
+    labels : array_like, optional
+        An array_like of integers marking different regions over which the
+        minimum value of `input` is to be computed. `labels` must have the
+        same shape as `input`. If `labels` is not specified, the minimum
+        over the whole array is returned.
+    index : array_like, optional
+        A list of region labels that are taken into account for computing the
+        minima. If index is None, the minimum over all elements where `labels`
+        is non-zero is returned.
+
+    Returns
+    -------
+    minimum : float or list of floats
+        List of minima of `input` over the regions determined by `labels` and
+        whose index is in `index`. If `index` or `labels` are not specified, a
+        float is returned: the minimal value of `input` if `labels` is None,
+        and the minimal value of elements where `labels` is greater than zero
+        if `index` is None.
+
+    See Also
+    --------
+    label, maximum, median, minimum_position, extrema, sum, mean, variance,
+    standard_deviation
+
+    Notes
+    -----
+    The function returns a Python list and not a NumPy array, use
+    `np.array` to convert the list to an array.
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> labels, labels_nb = ndimage.label(a)
+    >>> labels
+    array([[1, 1, 0, 0],
+           [1, 1, 0, 2],
+           [0, 0, 0, 2],
+           [3, 3, 0, 0]])
+    >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1))
+    [1.0, 4.0, 3.0]
+    >>> ndimage.minimum(a)
+    0.0
+    >>> ndimage.minimum(a, labels=labels)
+    1.0
+
+    """
+    return _select(input, labels, index, find_min=True)[0]
+
+
+def maximum(input, labels=None, index=None):
+    """
+    Calculate the maximum of the values of an array over labeled regions.
+
+    Parameters
+    ----------
+    input : array_like
+        Array_like of values. For each region specified by `labels`, the
+        maximal values of `input` over the region is computed.
+    labels : array_like, optional
+        An array of integers marking different regions over which the
+        maximum value of `input` is to be computed. `labels` must have the
+        same shape as `input`. If `labels` is not specified, the maximum
+        over the whole array is returned.
+    index : array_like, optional
+        A list of region labels that are taken into account for computing the
+        maxima. If index is None, the maximum over all elements where `labels`
+        is non-zero is returned.
+
+    Returns
+    -------
+    output : float or list of floats
+        List of maxima of `input` over the regions determined by `labels` and
+        whose index is in `index`. If `index` or `labels` are not specified, a
+        float is returned: the maximal value of `input` if `labels` is None,
+        and the maximal value of elements where `labels` is greater than zero
+        if `index` is None.
+
+    See Also
+    --------
+    label, minimum, median, maximum_position, extrema, sum, mean, variance,
+    standard_deviation
+
+    Notes
+    -----
+    The function returns a Python list and not a NumPy array, use
+    `np.array` to convert the list to an array.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.arange(16).reshape((4,4))
+    >>> a
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15]])
+    >>> labels = np.zeros_like(a)
+    >>> labels[:2,:2] = 1
+    >>> labels[2:, 1:3] = 2
+    >>> labels
+    array([[1, 1, 0, 0],
+           [1, 1, 0, 0],
+           [0, 2, 2, 0],
+           [0, 2, 2, 0]])
+    >>> from scipy import ndimage
+    >>> ndimage.maximum(a)
+    15.0
+    >>> ndimage.maximum(a, labels=labels, index=[1,2])
+    [5.0, 14.0]
+    >>> ndimage.maximum(a, labels=labels)
+    14.0
+
+    >>> b = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> labels, labels_nb = ndimage.label(b)
+    >>> labels
+    array([[1, 1, 0, 0],
+           [1, 1, 0, 2],
+           [0, 0, 0, 2],
+           [3, 3, 0, 0]])
+    >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1))
+    [5.0, 7.0, 9.0]
+
+    """
+    return _select(input, labels, index, find_max=True)[0]
+
+
+def median(input, labels=None, index=None):
+    """
+    Calculate the median of the values of an array over labeled regions.
+
+    Parameters
+    ----------
+    input : array_like
+        Array_like of values. For each region specified by `labels`, the
+        median value of `input` over the region is computed.
+    labels : array_like, optional
+        An array_like of integers marking different regions over which the
+        median value of `input` is to be computed. `labels` must have the
+        same shape as `input`. If `labels` is not specified, the median
+        over the whole array is returned.
+    index : array_like, optional
+        A list of region labels that are taken into account for computing the
+        medians. If index is None, the median over all elements where `labels`
+        is non-zero is returned.
+
+    Returns
+    -------
+    median : float or list of floats
+        List of medians of `input` over the regions determined by `labels` and
+        whose index is in `index`. If `index` or `labels` are not specified, a
+        float is returned: the median value of `input` if `labels` is None,
+        and the median value of elements where `labels` is greater than zero
+        if `index` is None.
+
+    See Also
+    --------
+    label, minimum, maximum, extrema, sum, mean, variance, standard_deviation
+
+    Notes
+    -----
+    The function returns a Python list and not a NumPy array, use
+    `np.array` to convert the list to an array.
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 1],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> labels, labels_nb = ndimage.label(a)
+    >>> labels
+    array([[1, 1, 0, 2],
+           [1, 1, 0, 2],
+           [0, 0, 0, 2],
+           [3, 3, 0, 0]])
+    >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1))
+    [2.5, 4.0, 6.0]
+    >>> ndimage.median(a)
+    1.0
+    >>> ndimage.median(a, labels=labels)
+    3.0
+
+    """
+    return _select(input, labels, index, find_median=True)[0]
+
+
+def minimum_position(input, labels=None, index=None):
+    """
+    Find the positions of the minimums of the values of an array at labels.
+
+    Parameters
+    ----------
+    input : array_like
+        Array_like of values.
+    labels : array_like, optional
+        An array of integers marking different regions over which the
+        position of the minimum value of `input` is to be computed.
+        `labels` must have the same shape as `input`. If `labels` is not
+        specified, the location of the first minimum over the whole
+        array is returned.
+
+        The `labels` argument only works when `index` is specified.
+    index : array_like, optional
+        A list of region labels that are taken into account for finding the
+        location of the minima. If `index` is None, the ``first`` minimum
+        over all elements where `labels` is non-zero is returned.
+
+        The `index` argument only works when `labels` is specified.
+
+    Returns
+    -------
+    output : list of tuples of ints
+        Tuple of ints or list of tuples of ints that specify the location
+        of minima of `input` over the regions determined by `labels` and
+        whose index is in `index`.
+
+        If `index` or `labels` are not specified, a tuple of ints is
+        returned specifying the location of the first minimal value of `input`.
+
+    See Also
+    --------
+    label, minimum, median, maximum_position, extrema, sum, mean, variance,
+    standard_deviation
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[10, 20, 30],
+    ...               [40, 80, 100],
+    ...               [1, 100, 200]])
+    >>> b = np.array([[1, 2, 0, 1],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+
+    >>> from scipy import ndimage
+
+    >>> ndimage.minimum_position(a)
+    (2, 0)
+    >>> ndimage.minimum_position(b)
+    (0, 2)
+
+    Features to process can be specified using `labels` and `index`:
+
+    >>> label, pos = ndimage.label(a)
+    >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1))
+    [(2, 0)]
+
+    >>> label, pos = ndimage.label(b)
+    >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1))
+    [(0, 0), (0, 3), (3, 1)]
+
+    """
+    dims = numpy.array(numpy.asarray(input).shape)
+    # see numpy.unravel_index to understand this line.
+    dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+    result = _select(input, labels, index, find_min_positions=True)[0]
+
+    if numpy.isscalar(result):
+        return tuple((result // dim_prod) % dims)
+
+    return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
+
+
+def maximum_position(input, labels=None, index=None):
+    """
+    Find the positions of the maximums of the values of an array at labels.
+
+    For each region specified by `labels`, the position of the maximum
+    value of `input` within the region is returned.
+
+    Parameters
+    ----------
+    input : array_like
+        Array_like of values.
+    labels : array_like, optional
+        An array of integers marking different regions over which the
+        position of the maximum value of `input` is to be computed.
+        `labels` must have the same shape as `input`. If `labels` is not
+        specified, the location of the first maximum over the whole
+        array is returned.
+
+        The `labels` argument only works when `index` is specified.
+    index : array_like, optional
+        A list of region labels that are taken into account for finding the
+        location of the maxima. If `index` is None, the first maximum
+        over all elements where `labels` is non-zero is returned.
+
+        The `index` argument only works when `labels` is specified.
+
+    Returns
+    -------
+    output : list of tuples of ints
+        List of tuples of ints that specify the location of maxima of
+        `input` over the regions determined by `labels` and whose index
+        is in `index`.
+
+        If `index` or `labels` are not specified, a tuple of ints is
+        returned specifying the location of the ``first`` maximal value
+        of `input`.
+
+    See also
+    --------
+    label, minimum, median, maximum_position, extrema, sum, mean, variance,
+    standard_deviation
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> ndimage.maximum_position(a)
+    (3, 0)
+
+    Features to process can be specified using `labels` and `index`:
+
+    >>> lbl = np.array([[0, 1, 2, 3],
+    ...                 [0, 1, 2, 3],
+    ...                 [0, 1, 2, 3],
+    ...                 [0, 1, 2, 3]])
+    >>> ndimage.maximum_position(a, lbl, 1)
+    (1, 1)
+
+    If no index is given, non-zero `labels` are processed:
+
+    >>> ndimage.maximum_position(a, lbl)
+    (2, 3)
+
+    If there are no maxima, the position of the first element is returned:
+
+    >>> ndimage.maximum_position(a, lbl, 2)
+    (0, 2)
+
+    """
+    dims = numpy.array(numpy.asarray(input).shape)
+    # see numpy.unravel_index to understand this line.
+    dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+    result = _select(input, labels, index, find_max_positions=True)[0]
+
+    if numpy.isscalar(result):
+        return tuple((result // dim_prod) % dims)
+
+    return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]
+
+
+def extrema(input, labels=None, index=None):
+    """
+    Calculate the minimums and maximums of the values of an array
+    at labels, along with their positions.
+
+    Parameters
+    ----------
+    input : ndarray
+        N-D image data to process.
+    labels : ndarray, optional
+        Labels of features in input.
+        If not None, must be same shape as `input`.
+    index : int or sequence of ints, optional
+        Labels to include in output.  If None (default), all values where
+        non-zero `labels` are used.
+
+    Returns
+    -------
+    minimums, maximums : int or ndarray
+        Values of minimums and maximums in each feature.
+    min_positions, max_positions : tuple or list of tuples
+        Each tuple gives the N-D coordinates of the corresponding minimum
+        or maximum.
+
+    See Also
+    --------
+    maximum, minimum, maximum_position, minimum_position, center_of_mass
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[1, 2, 0, 0],
+    ...               [5, 3, 0, 4],
+    ...               [0, 0, 0, 7],
+    ...               [9, 3, 0, 0]])
+    >>> from scipy import ndimage
+    >>> ndimage.extrema(a)
+    (0, 9, (0, 2), (3, 0))
+
+    Features to process can be specified using `labels` and `index`:
+
+    >>> lbl, nlbl = ndimage.label(a)
+    >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1))
+    (array([1, 4, 3]),
+     array([5, 7, 9]),
+     [(0, 0), (1, 3), (3, 1)],
+     [(1, 0), (2, 3), (3, 0)])
+
+    If no index is given, non-zero `labels` are processed:
+
+    >>> ndimage.extrema(a, lbl)
+    (1, 9, (0, 0), (3, 0))
+
+    """
+    dims = numpy.array(numpy.asarray(input).shape)
+    # see numpy.unravel_index to understand this line.
+    dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]
+
+    minimums, min_positions, maximums, max_positions = _select(input, labels,
+                                                               index,
+                                                               find_min=True,
+                                                               find_max=True,
+                                                               find_min_positions=True,
+                                                               find_max_positions=True)
+
+    if numpy.isscalar(minimums):
+        return (minimums, maximums, tuple((min_positions // dim_prod) % dims),
+                tuple((max_positions // dim_prod) % dims))
+
+    min_positions = [tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims]
+    max_positions = [tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims]
+
+    return minimums, maximums, min_positions, max_positions
+
+
+def center_of_mass(input, labels=None, index=None):
+    """
+    Calculate the center of mass of the values of an array at labels.
+
+    Parameters
+    ----------
+    input : ndarray
+        Data from which to calculate center-of-mass. The masses can either
+        be positive or negative.
+    labels : ndarray, optional
+        Labels for objects in `input`, as generated by `ndimage.label`.
+        Only used with `index`. Dimensions must be the same as `input`.
+    index : int or sequence of ints, optional
+        Labels for which to calculate centers-of-mass. If not specified,
+        the combined center of mass of all labels greater than zero
+        will be calculated. Only used with `labels`.
+
+    Returns
+    -------
+    center_of_mass : tuple, or list of tuples
+        Coordinates of centers-of-mass.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array(([0,0,0,0],
+    ...               [0,1,1,0],
+    ...               [0,1,1,0],
+    ...               [0,1,1,0]))
+    >>> from scipy import ndimage
+    >>> ndimage.center_of_mass(a)
+    (2.0, 1.5)
+
+    Calculation of multiple objects in an image
+
+    >>> b = np.array(([0,1,1,0],
+    ...               [0,1,0,0],
+    ...               [0,0,0,0],
+    ...               [0,0,1,1],
+    ...               [0,0,1,1]))
+    >>> lbl = ndimage.label(b)[0]
+    >>> ndimage.center_of_mass(b, lbl, [1,2])
+    [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)]
+
+    Negative masses are also accepted, which can occur for example when
+    bias is removed from measured data due to random noise.
+
+    >>> c = np.array(([-1,0,0,0],
+    ...               [0,-1,-1,0],
+    ...               [0,1,-1,0],
+    ...               [0,1,1,0]))
+    >>> ndimage.center_of_mass(c)
+    (-4.0, 1.0)
+
+    If there are division by zero issues, the function does not raise an
+    error but rather issues a RuntimeWarning before returning inf and/or NaN.
+
+    >>> d = np.array([-1, 1])
+    >>> ndimage.center_of_mass(d)
+    (inf,)
+    """
+    normalizer = sum(input, labels, index)
+    grids = numpy.ogrid[[slice(0, i) for i in input.shape]]
+
+    results = [sum(input * grids[dir].astype(float), labels, index) / normalizer
+               for dir in range(input.ndim)]
+
+    if numpy.isscalar(results[0]):
+        return tuple(results)
+
+    return [tuple(v) for v in numpy.array(results).T]
+
+
+def histogram(input, min, max, bins, labels=None, index=None):
+    """
+    Calculate the histogram of the values of an array, optionally at labels.
+
+    Histogram calculates the frequency of values in an array within bins
+    determined by `min`, `max`, and `bins`. The `labels` and `index`
+    keywords can limit the scope of the histogram to specified sub-regions
+    within the array.
+
+    Parameters
+    ----------
+    input : array_like
+        Data for which to calculate histogram.
+    min, max : int
+        Minimum and maximum values of range of histogram bins.
+    bins : int
+        Number of bins.
+    labels : array_like, optional
+        Labels for objects in `input`.
+        If not None, must be same shape as `input`.
+    index : int or sequence of ints, optional
+        Label or labels for which to calculate histogram. If None, all values
+        where label is greater than zero are used
+
+    Returns
+    -------
+    hist : ndarray
+        Histogram counts.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[ 0.    ,  0.2146,  0.5962,  0.    ],
+    ...               [ 0.    ,  0.7778,  0.    ,  0.    ],
+    ...               [ 0.    ,  0.    ,  0.    ,  0.    ],
+    ...               [ 0.    ,  0.    ,  0.7181,  0.2787],
+    ...               [ 0.    ,  0.    ,  0.6573,  0.3094]])
+    >>> from scipy import ndimage
+    >>> ndimage.histogram(a, 0, 1, 10)
+    array([13,  0,  2,  1,  0,  1,  1,  2,  0,  0])
+
+    With labels and no indices, non-zero elements are counted:
+
+    >>> lbl, nlbl = ndimage.label(a)
+    >>> ndimage.histogram(a, 0, 1, 10, lbl)
+    array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0])
+
+    Indices can be used to count only certain objects:
+
+    >>> ndimage.histogram(a, 0, 1, 10, lbl, 2)
+    array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
+
+    """
+    _bins = numpy.linspace(min, max, bins + 1)
+
+    def _hist(vals):
+        return numpy.histogram(vals, _bins)[0]
+
+    return labeled_comprehension(input, labels, index, _hist, object, None,
+                                 pass_positions=False)
+
+
+def watershed_ift(input, markers, structure=None, output=None):
+    """
+    Apply watershed from markers using image foresting transform algorithm.
+
+    Parameters
+    ----------
+    input : array_like
+        Input.
+    markers : array_like
+        Markers are points within each watershed that form the beginning
+        of the process. Negative markers are considered background markers
+        which are processed after the other markers.
+    structure : structure element, optional
+        A structuring element defining the connectivity of the object can be
+        provided. If None, an element is generated with a squared
+        connectivity equal to one.
+    output : ndarray, optional
+        An output array can optionally be provided. The same shape as input.
+
+    Returns
+    -------
+    watershed_ift : ndarray
+        Output.  Same shape as `input`.
+
+    References
+    ----------
+    .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image
+           foresting transform: theory, algorithms, and applications",
+           Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.
+
+    """
+    input = numpy.asarray(input)
+    if input.dtype.type not in [numpy.uint8, numpy.uint16]:
+        raise TypeError('only 8 and 16 unsigned inputs are supported')
+
+    if structure is None:
+        structure = _morphology.generate_binary_structure(input.ndim, 1)
+    structure = numpy.asarray(structure, dtype=bool)
+    if structure.ndim != input.ndim:
+        raise RuntimeError('structure and input must have equal rank')
+    for ii in structure.shape:
+        if ii != 3:
+            raise RuntimeError('structure dimensions must be equal to 3')
+
+    if not structure.flags.contiguous:
+        structure = structure.copy()
+    markers = numpy.asarray(markers)
+    if input.shape != markers.shape:
+        raise RuntimeError('input and markers must have equal shape')
+
+    integral_types = [numpy.int8,
+                      numpy.int16,
+                      numpy.int32,
+                      numpy.int_,
+                      numpy.int64,
+                      numpy.intc,
+                      numpy.intp]
+
+    if markers.dtype.type not in integral_types:
+        raise RuntimeError('marker should be of integer type')
+
+    if isinstance(output, numpy.ndarray):
+        if output.dtype.type not in integral_types:
+            raise RuntimeError('output should be of integer type')
+    else:
+        output = markers.dtype
+
+    output = _ni_support._get_output(output, input)
+    _nd_image.watershed_ift(input, markers, structure, output)
+    return output
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/_morphology.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_morphology.py
new file mode 100644
index 00000000..a3d8f3e3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_morphology.py
@@ -0,0 +1,2342 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import warnings
+import operator
+
+import numpy
+from . import _ni_support
+from . import _nd_image
+from . import _filters
+
+__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
+           'binary_dilation', 'binary_opening', 'binary_closing',
+           'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
+           'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
+           'morphological_gradient', 'morphological_laplace', 'white_tophat',
+           'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
+           'distance_transform_edt']
+
+
+def _center_is_true(structure, origin):
+    structure = numpy.array(structure)
+    coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
+                                                 origin)])
+    return bool(structure[coor])
+
+
+def iterate_structure(structure, iterations, origin=None):
+    """
+    Iterate a structure by dilating it with itself.
+
+    Parameters
+    ----------
+    structure : array_like
+       Structuring element (an array of bools, for example), to be dilated with
+       itself.
+    iterations : int
+       number of dilations performed on the structure with itself
+    origin : optional
+        If origin is None, only the iterated structure is returned. If
+        not, a tuple of the iterated structure and the modified origin is
+        returned.
+
+    Returns
+    -------
+    iterate_structure : ndarray of bools
+        A new structuring element obtained by dilating `structure`
+        (`iterations` - 1) times with itself.
+
+    See also
+    --------
+    generate_binary_structure
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> struct = ndimage.generate_binary_structure(2, 1)
+    >>> struct.astype(int)
+    array([[0, 1, 0],
+           [1, 1, 1],
+           [0, 1, 0]])
+    >>> ndimage.iterate_structure(struct, 2).astype(int)
+    array([[0, 0, 1, 0, 0],
+           [0, 1, 1, 1, 0],
+           [1, 1, 1, 1, 1],
+           [0, 1, 1, 1, 0],
+           [0, 0, 1, 0, 0]])
+    >>> ndimage.iterate_structure(struct, 3).astype(int)
+    array([[0, 0, 0, 1, 0, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [1, 1, 1, 1, 1, 1, 1],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 0, 1, 0, 0, 0]])
+
+    """
+    structure = numpy.asarray(structure)
+    if iterations < 2:
+        return structure.copy()
+    ni = iterations - 1
+    shape = [ii + ni * (ii - 1) for ii in structure.shape]
+    pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
+    slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None)
+                for ii in range(len(shape)))
+    out = numpy.zeros(shape, bool)
+    out[slc] = structure != 0
+    out = binary_dilation(out, structure, iterations=ni)
+    if origin is None:
+        return out
+    else:
+        origin = _ni_support._normalize_sequence(origin, structure.ndim)
+        origin = [iterations * o for o in origin]
+        return out, origin
+
+
+def generate_binary_structure(rank, connectivity):
+    """
+    Generate a binary structure for binary morphological operations.
+
+    Parameters
+    ----------
+    rank : int
+         Number of dimensions of the array to which the structuring element
+         will be applied, as returned by `np.ndim`.
+    connectivity : int
+         `connectivity` determines which elements of the output array belong
+         to the structure, i.e., are considered as neighbors of the central
+         element. Elements up to a squared distance of `connectivity` from
+         the center are considered neighbors. `connectivity` may range from 1
+         (no diagonal elements are neighbors) to `rank` (all elements are
+         neighbors).
+
+    Returns
+    -------
+    output : ndarray of bools
+         Structuring element which may be used for binary morphological
+         operations, with `rank` dimensions and all dimensions equal to 3.
+
+    See also
+    --------
+    iterate_structure, binary_dilation, binary_erosion
+
+    Notes
+    -----
+    `generate_binary_structure` can only create structuring elements with
+    dimensions equal to 3, i.e., minimal dimensions. For larger structuring
+    elements, that are useful e.g., for eroding large objects, one may either
+    use `iterate_structure`, or create directly custom arrays with
+    numpy functions such as `numpy.ones`.
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> struct = ndimage.generate_binary_structure(2, 1)
+    >>> struct
+    array([[False,  True, False],
+           [ True,  True,  True],
+           [False,  True, False]], dtype=bool)
+    >>> a = np.zeros((5,5))
+    >>> a[2, 2] = 1
+    >>> a
+    array([[ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.]])
+    >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
+    >>> b
+    array([[ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.]])
+    >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
+    array([[ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 1.,  1.,  1.,  1.,  1.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.]])
+    >>> struct = ndimage.generate_binary_structure(2, 2)
+    >>> struct
+    array([[ True,  True,  True],
+           [ True,  True,  True],
+           [ True,  True,  True]], dtype=bool)
+    >>> struct = ndimage.generate_binary_structure(3, 1)
+    >>> struct # no diagonal elements
+    array([[[False, False, False],
+            [False,  True, False],
+            [False, False, False]],
+           [[False,  True, False],
+            [ True,  True,  True],
+            [False,  True, False]],
+           [[False, False, False],
+            [False,  True, False],
+            [False, False, False]]], dtype=bool)
+
+    """
+    if connectivity < 1:
+        connectivity = 1
+    if rank < 1:
+        return numpy.array(True, dtype=bool)
+    output = numpy.fabs(numpy.indices([3] * rank) - 1)
+    output = numpy.add.reduce(output, 0)
+    return output <= connectivity
+
+
+def _binary_erosion(input, structure, iterations, mask, output,
+                    border_value, origin, invert, brute_force):
+    try:
+        iterations = operator.index(iterations)
+    except TypeError as e:
+        raise TypeError('iterations parameter should be an integer') from e
+
+    input = numpy.asarray(input)
+    if numpy.iscomplexobj(input):
+        raise TypeError('Complex type not supported')
+    if structure is None:
+        structure = generate_binary_structure(input.ndim, 1)
+    else:
+        structure = numpy.asarray(structure, dtype=bool)
+    if structure.ndim != input.ndim:
+        raise RuntimeError('structure and input must have same dimensionality')
+    if not structure.flags.contiguous:
+        structure = structure.copy()
+    if numpy.prod(structure.shape, axis=0) < 1:
+        raise RuntimeError('structure must not be empty')
+    if mask is not None:
+        mask = numpy.asarray(mask)
+        if mask.shape != input.shape:
+            raise RuntimeError('mask and input must have equal sizes')
+    origin = _ni_support._normalize_sequence(origin, input.ndim)
+    cit = _center_is_true(structure, origin)
+    if isinstance(output, numpy.ndarray):
+        if numpy.iscomplexobj(output):
+            raise TypeError('Complex output type not supported')
+    else:
+        output = bool
+    output = _ni_support._get_output(output, input)
+    temp_needed = numpy.may_share_memory(input, output)
+    if temp_needed:
+        # input and output arrays cannot share memory
+        temp = output
+        output = _ni_support._get_output(output.dtype, input)
+    if iterations == 1:
+        _nd_image.binary_erosion(input, structure, mask, output,
+                                 border_value, origin, invert, cit, 0)
+    elif cit and not brute_force:
+        changed, coordinate_list = _nd_image.binary_erosion(
+            input, structure, mask, output,
+            border_value, origin, invert, cit, 1)
+        structure = structure[tuple([slice(None, None, -1)] *
+                                    structure.ndim)]
+        for ii in range(len(origin)):
+            origin[ii] = -origin[ii]
+            if not structure.shape[ii] & 1:
+                origin[ii] -= 1
+        if mask is not None:
+            mask = numpy.asarray(mask, dtype=numpy.int8)
+        if not structure.flags.contiguous:
+            structure = structure.copy()
+        _nd_image.binary_erosion2(output, structure, mask, iterations - 1,
+                                  origin, invert, coordinate_list)
+    else:
+        tmp_in = numpy.empty_like(input, dtype=bool)
+        tmp_out = output
+        if iterations >= 1 and not iterations & 1:
+            tmp_in, tmp_out = tmp_out, tmp_in
+        changed = _nd_image.binary_erosion(
+            input, structure, mask, tmp_out,
+            border_value, origin, invert, cit, 0)
+        ii = 1
+        while ii < iterations or (iterations < 1 and changed):
+            tmp_in, tmp_out = tmp_out, tmp_in
+            changed = _nd_image.binary_erosion(
+                tmp_in, structure, mask, tmp_out,
+                border_value, origin, invert, cit, 0)
+            ii += 1
+    if temp_needed:
+        temp[...] = output
+        output = temp
+    return output
+
+
+def binary_erosion(input, structure=None, iterations=1, mask=None, output=None,
+                   border_value=0, origin=0, brute_force=False):
+    """
+    Multidimensional binary erosion with a given structuring element.
+
+    Binary erosion is a mathematical morphology operation used for image
+    processing.
+
+    Parameters
+    ----------
+    input : array_like
+        Binary image to be eroded. Non-zero (True) elements form
+        the subset to be eroded.
+    structure : array_like, optional
+        Structuring element used for the erosion. Non-zero elements are
+        considered True. If no structuring element is provided, an element
+        is generated with a square connectivity equal to one.
+    iterations : int, optional
+        The erosion is repeated `iterations` times (one, by default).
+        If iterations is less than 1, the erosion is repeated until the
+        result does not change anymore.
+    mask : array_like, optional
+        If a mask is given, only those elements with a True value at
+        the corresponding mask element are modified at each iteration.
+    output : ndarray, optional
+        Array of the same shape as input, into which the output is placed.
+        By default, a new array is created.
+    border_value : int (cast to 0 or 1), optional
+        Value at the border in the output array.
+    origin : int or tuple of ints, optional
+        Placement of the filter, by default 0.
+    brute_force : boolean, optional
+        Memory condition: if False, only the pixels whose value was changed in
+        the last iteration are tracked as candidates to be updated (eroded) in
+        the current iteration; if True all pixels are considered as candidates
+        for erosion, regardless of what happened in the previous iteration.
+        False by default.
+
+    Returns
+    -------
+    binary_erosion : ndarray of bools
+        Erosion of the input by the structuring element.
+
+    See also
+    --------
+    grey_erosion, binary_dilation, binary_closing, binary_opening,
+    generate_binary_structure
+
+    Notes
+    -----
+    Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
+    structuring element for shrinking the shapes in an image. The binary
+    erosion of an image by a structuring element is the locus of the points
+    where a superimposition of the structuring element centered on the point
+    is entirely contained in the set of non-zero elements of the image.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
+    .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((7,7), dtype=int)
+    >>> a[1:6, 2:5] = 1
+    >>> a
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.binary_erosion(a).astype(a.dtype)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 1, 0, 0, 0],
+           [0, 0, 0, 1, 0, 0, 0],
+           [0, 0, 0, 1, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> #Erosion removes objects smaller than the structure
+    >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+
+    """
+    return _binary_erosion(input, structure, iterations, mask,
+                           output, border_value, origin, 0, brute_force)
+
+
+def binary_dilation(input, structure=None, iterations=1, mask=None,
+                    output=None, border_value=0, origin=0,
+                    brute_force=False):
+    """
+    Multidimensional binary dilation with the given structuring element.
+
+    Parameters
+    ----------
+    input : array_like
+        Binary array_like to be dilated. Non-zero (True) elements form
+        the subset to be dilated.
+    structure : array_like, optional
+        Structuring element used for the dilation. Non-zero elements are
+        considered True. If no structuring element is provided an element
+        is generated with a square connectivity equal to one.
+    iterations : int, optional
+        The dilation is repeated `iterations` times (one, by default).
+        If iterations is less than 1, the dilation is repeated until the
+        result does not change anymore. Only an integer of iterations is
+        accepted.
+    mask : array_like, optional
+        If a mask is given, only those elements with a True value at
+        the corresponding mask element are modified at each iteration.
+    output : ndarray, optional
+        Array of the same shape as input, into which the output is placed.
+        By default, a new array is created.
+    border_value : int (cast to 0 or 1), optional
+        Value at the border in the output array.
+    origin : int or tuple of ints, optional
+        Placement of the filter, by default 0.
+    brute_force : boolean, optional
+        Memory condition: if False, only the pixels whose value was changed in
+        the last iteration are tracked as candidates to be updated (dilated)
+        in the current iteration; if True all pixels are considered as
+        candidates for dilation, regardless of what happened in the previous
+        iteration. False by default.
+
+    Returns
+    -------
+    binary_dilation : ndarray of bools
+        Dilation of the input by the structuring element.
+
+    See also
+    --------
+    grey_dilation, binary_erosion, binary_closing, binary_opening,
+    generate_binary_structure
+
+    Notes
+    -----
+    Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
+    structuring element for expanding the shapes in an image. The binary
+    dilation of an image by a structuring element is the locus of the points
+    covered by the structuring element, when its center lies within the
+    non-zero points of the image.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
+    .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((5, 5))
+    >>> a[2, 2] = 1
+    >>> a
+    array([[ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.]])
+    >>> ndimage.binary_dilation(a)
+    array([[False, False, False, False, False],
+           [False, False,  True, False, False],
+           [False,  True,  True,  True, False],
+           [False, False,  True, False, False],
+           [False, False, False, False, False]], dtype=bool)
+    >>> ndimage.binary_dilation(a).astype(a.dtype)
+    array([[ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.]])
+    >>> # 3x3 structuring element with connectivity 1, used by default
+    >>> struct1 = ndimage.generate_binary_structure(2, 1)
+    >>> struct1
+    array([[False,  True, False],
+           [ True,  True,  True],
+           [False,  True, False]], dtype=bool)
+    >>> # 3x3 structuring element with connectivity 2
+    >>> struct2 = ndimage.generate_binary_structure(2, 2)
+    >>> struct2
+    array([[ True,  True,  True],
+           [ True,  True,  True],
+           [ True,  True,  True]], dtype=bool)
+    >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
+    array([[ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.]])
+    >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
+    array([[ 0.,  0.,  0.,  0.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.]])
+    >>> ndimage.binary_dilation(a, structure=struct1,\\
+    ... iterations=2).astype(a.dtype)
+    array([[ 0.,  0.,  1.,  0.,  0.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 1.,  1.,  1.,  1.,  1.],
+           [ 0.,  1.,  1.,  1.,  0.],
+           [ 0.,  0.,  1.,  0.,  0.]])
+
+    """
+    input = numpy.asarray(input)
+    if structure is None:
+        structure = generate_binary_structure(input.ndim, 1)
+    origin = _ni_support._normalize_sequence(origin, input.ndim)
+    structure = numpy.asarray(structure)
+    structure = structure[tuple([slice(None, None, -1)] *
+                                structure.ndim)]
+    for ii in range(len(origin)):
+        origin[ii] = -origin[ii]
+        if not structure.shape[ii] & 1:
+            origin[ii] -= 1
+
+    return _binary_erosion(input, structure, iterations, mask,
+                           output, border_value, origin, 1, brute_force)
+
+
+def binary_opening(input, structure=None, iterations=1, output=None,
+                   origin=0, mask=None, border_value=0, brute_force=False):
+    """
+    Multidimensional binary opening with the given structuring element.
+
+    The *opening* of an input image by a structuring element is the
+    *dilation* of the *erosion* of the image by the structuring element.
+
+    Parameters
+    ----------
+    input : array_like
+        Binary array_like to be opened. Non-zero (True) elements form
+        the subset to be opened.
+    structure : array_like, optional
+        Structuring element used for the opening. Non-zero elements are
+        considered True. If no structuring element is provided an element
+        is generated with a square connectivity equal to one (i.e., only
+        nearest neighbors are connected to the center, diagonally-connected
+        elements are not considered neighbors).
+    iterations : int, optional
+        The erosion step of the opening, then the dilation step are each
+        repeated `iterations` times (one, by default). If `iterations` is
+        less than 1, each operation is repeated until the result does
+        not change anymore. Only an integer of iterations is accepted.
+    output : ndarray, optional
+        Array of the same shape as input, into which the output is placed.
+        By default, a new array is created.
+    origin : int or tuple of ints, optional
+        Placement of the filter, by default 0.
+    mask : array_like, optional
+        If a mask is given, only those elements with a True value at
+        the corresponding mask element are modified at each iteration.
+
+        .. versionadded:: 1.1.0
+    border_value : int (cast to 0 or 1), optional
+        Value at the border in the output array.
+
+        .. versionadded:: 1.1.0
+    brute_force : boolean, optional
+        Memory condition: if False, only the pixels whose value was changed in
+        the last iteration are tracked as candidates to be updated in the
+        current iteration; if true all pixels are considered as candidates for
+        update, regardless of what happened in the previous iteration.
+        False by default.
+
+        .. versionadded:: 1.1.0
+
+    Returns
+    -------
+    binary_opening : ndarray of bools
+        Opening of the input by the structuring element.
+
+    See also
+    --------
+    grey_opening, binary_closing, binary_erosion, binary_dilation,
+    generate_binary_structure
+
+    Notes
+    -----
+    *Opening* [1]_ is a mathematical morphology operation [2]_ that
+    consists in the succession of an erosion and a dilation of the
+    input with the same structuring element. Opening, therefore, removes
+    objects smaller than the structuring element.
+
+    Together with *closing* (`binary_closing`), opening can be used for
+    noise removal.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29
+    .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((5,5), dtype=int)
+    >>> a[1:4, 1:4] = 1; a[4, 4] = 1
+    >>> a
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 1]])
+    >>> # Opening removes small objects
+    >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0]])
+    >>> # Opening can also smooth corners
+    >>> ndimage.binary_opening(a).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0]])
+    >>> # Opening is the dilation of the erosion of the input
+    >>> ndimage.binary_erosion(a).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0]])
+    >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0]])
+
+    """
+    input = numpy.asarray(input)
+    if structure is None:
+        rank = input.ndim
+        structure = generate_binary_structure(rank, 1)
+
+    tmp = binary_erosion(input, structure, iterations, mask, None,
+                         border_value, origin, brute_force)
+    return binary_dilation(tmp, structure, iterations, mask, output,
+                           border_value, origin, brute_force)
+
+
+def binary_closing(input, structure=None, iterations=1, output=None,
+                   origin=0, mask=None, border_value=0, brute_force=False):
+    """
+    Multidimensional binary closing with the given structuring element.
+
+    The *closing* of an input image by a structuring element is the
+    *erosion* of the *dilation* of the image by the structuring element.
+
+    Parameters
+    ----------
+    input : array_like
+        Binary array_like to be closed. Non-zero (True) elements form
+        the subset to be closed.
+    structure : array_like, optional
+        Structuring element used for the closing. Non-zero elements are
+        considered True. If no structuring element is provided an element
+        is generated with a square connectivity equal to one (i.e., only
+        nearest neighbors are connected to the center, diagonally-connected
+        elements are not considered neighbors).
+    iterations : int, optional
+        The dilation step of the closing, then the erosion step are each
+        repeated `iterations` times (one, by default). If iterations is
+        less than 1, each operations is repeated until the result does
+        not change anymore. Only an integer of iterations is accepted.
+    output : ndarray, optional
+        Array of the same shape as input, into which the output is placed.
+        By default, a new array is created.
+    origin : int or tuple of ints, optional
+        Placement of the filter, by default 0.
+    mask : array_like, optional
+        If a mask is given, only those elements with a True value at
+        the corresponding mask element are modified at each iteration.
+
+        .. versionadded:: 1.1.0
+    border_value : int (cast to 0 or 1), optional
+        Value at the border in the output array.
+
+        .. versionadded:: 1.1.0
+    brute_force : boolean, optional
+        Memory condition: if False, only the pixels whose value was changed in
+        the last iteration are tracked as candidates to be updated in the
+        current iteration; if true al pixels are considered as candidates for
+        update, regardless of what happened in the previous iteration.
+        False by default.
+
+        .. versionadded:: 1.1.0
+
+    Returns
+    -------
+    binary_closing : ndarray of bools
+        Closing of the input by the structuring element.
+
+    See also
+    --------
+    grey_closing, binary_opening, binary_dilation, binary_erosion,
+    generate_binary_structure
+
+    Notes
+    -----
+    *Closing* [1]_ is a mathematical morphology operation [2]_ that
+    consists in the succession of a dilation and an erosion of the
+    input with the same structuring element. Closing therefore fills
+    holes smaller than the structuring element.
+
+    Together with *opening* (`binary_opening`), closing can be used for
+    noise removal.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29
+    .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((5,5), dtype=int)
+    >>> a[1:-1, 1:-1] = 1; a[2,2] = 0
+    >>> a
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 0, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0]])
+    >>> # Closing removes small holes
+    >>> ndimage.binary_closing(a).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0]])
+    >>> # Closing is the erosion of the dilation of the input
+    >>> ndimage.binary_dilation(a).astype(int)
+    array([[0, 1, 1, 1, 0],
+           [1, 1, 1, 1, 1],
+           [1, 1, 1, 1, 1],
+           [1, 1, 1, 1, 1],
+           [0, 1, 1, 1, 0]])
+    >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0]])
+
+
+    >>> a = np.zeros((7,7), dtype=int)
+    >>> a[1:6, 2:5] = 1; a[1:3,3] = 0
+    >>> a
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 1, 0, 0],
+           [0, 0, 1, 0, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> # In addition to removing holes, closing can also
+    >>> # coarsen boundaries with fine hollows.
+    >>> ndimage.binary_closing(a).astype(int)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+
+    """
+    input = numpy.asarray(input)
+    if structure is None:
+        rank = input.ndim
+        structure = generate_binary_structure(rank, 1)
+
+    tmp = binary_dilation(input, structure, iterations, mask, None,
+                          border_value, origin, brute_force)
+    return binary_erosion(tmp, structure, iterations, mask, output,
+                          border_value, origin, brute_force)
+
+
+def binary_hit_or_miss(input, structure1=None, structure2=None,
+                       output=None, origin1=0, origin2=None):
+    """
+    Multidimensional binary hit-or-miss transform.
+
+    The hit-or-miss transform finds the locations of a given pattern
+    inside the input image.
+
+    Parameters
+    ----------
+    input : array_like (cast to booleans)
+        Binary image where a pattern is to be detected.
+    structure1 : array_like (cast to booleans), optional
+        Part of the structuring element to be fitted to the foreground
+        (non-zero elements) of `input`. If no value is provided, a
+        structure of square connectivity 1 is chosen.
+    structure2 : array_like (cast to booleans), optional
+        Second part of the structuring element that has to miss completely
+        the foreground. If no value is provided, the complementary of
+        `structure1` is taken.
+    output : ndarray, optional
+        Array of the same shape as input, into which the output is placed.
+        By default, a new array is created.
+    origin1 : int or tuple of ints, optional
+        Placement of the first part of the structuring element `structure1`,
+        by default 0 for a centered structure.
+    origin2 : int or tuple of ints, optional
+        Placement of the second part of the structuring element `structure2`,
+        by default 0 for a centered structure. If a value is provided for
+        `origin1` and not for `origin2`, then `origin2` is set to `origin1`.
+
+    Returns
+    -------
+    binary_hit_or_miss : ndarray
+        Hit-or-miss transform of `input` with the given structuring
+        element (`structure1`, `structure2`).
+
+    See also
+    --------
+    binary_erosion
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((7,7), dtype=int)
+    >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
+    >>> a
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 0, 0, 0, 0, 0],
+           [0, 0, 1, 1, 0, 0, 0],
+           [0, 0, 1, 1, 0, 0, 0],
+           [0, 0, 0, 0, 1, 1, 0],
+           [0, 0, 0, 0, 1, 1, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
+    >>> structure1
+    array([[1, 0, 0],
+           [0, 1, 1],
+           [0, 1, 1]])
+    >>> # Find the matches of structure1 in the array a
+    >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> # Change the origin of the filter
+    >>> # origin1=1 is equivalent to origin1=(1,1) here
+    >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
+    ... origin1=1).astype(int)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 1, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 1, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+
+    """
+    input = numpy.asarray(input)
+    if structure1 is None:
+        structure1 = generate_binary_structure(input.ndim, 1)
+    if structure2 is None:
+        structure2 = numpy.logical_not(structure1)
+    origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
+    if origin2 is None:
+        origin2 = origin1
+    else:
+        origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
+
+    tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
+                           0, False)
+    inplace = isinstance(output, numpy.ndarray)
+    result = _binary_erosion(input, structure2, 1, None, output, 0,
+                             origin2, 1, False)
+    if inplace:
+        numpy.logical_not(output, output)
+        numpy.logical_and(tmp1, output, output)
+    else:
+        numpy.logical_not(result, result)
+        return numpy.logical_and(tmp1, result)
+
+
+def binary_propagation(input, structure=None, mask=None,
+                       output=None, border_value=0, origin=0):
+    """
+    Multidimensional binary propagation with the given structuring element.
+
+    Parameters
+    ----------
+    input : array_like
+        Binary image to be propagated inside `mask`.
+    structure : array_like, optional
+        Structuring element used in the successive dilations. The output
+        may depend on the structuring element, especially if `mask` has
+        several connex components. If no structuring element is
+        provided, an element is generated with a squared connectivity equal
+        to one.
+    mask : array_like, optional
+        Binary mask defining the region into which `input` is allowed to
+        propagate.
+    output : ndarray, optional
+        Array of the same shape as input, into which the output is placed.
+        By default, a new array is created.
+    border_value : int (cast to 0 or 1), optional
+        Value at the border in the output array.
+    origin : int or tuple of ints, optional
+        Placement of the filter, by default 0.
+
+    Returns
+    -------
+    binary_propagation : ndarray
+        Binary propagation of `input` inside `mask`.
+
+    Notes
+    -----
+    This function is functionally equivalent to calling binary_dilation
+    with the number of iterations less than one: iterative dilation until
+    the result does not change anymore.
+
+    The succession of an erosion and propagation inside the original image
+    can be used instead of an *opening* for deleting small objects while
+    keeping the contours of larger objects untouched.
+
+    References
+    ----------
+    .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
+    .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of
+        image processing", 1998
+        ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> input = np.zeros((8, 8), dtype=int)
+    >>> input[2, 2] = 1
+    >>> mask = np.zeros((8, 8), dtype=int)
+    >>> mask[1:4, 1:4] = mask[4, 4]  = mask[6:8, 6:8] = 1
+    >>> input
+    array([[0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0]])
+    >>> mask
+    array([[0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 0, 0, 0, 1, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 1, 1],
+           [0, 0, 0, 0, 0, 0, 1, 1]])
+    >>> ndimage.binary_propagation(input, mask=mask).astype(int)
+    array([[0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.binary_propagation(input, mask=mask,\\
+    ... structure=np.ones((3,3))).astype(int)
+    array([[0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0, 0, 0, 0],
+           [0, 0, 0, 0, 1, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0]])
+
+    >>> # Comparison between opening and erosion+propagation
+    >>> a = np.zeros((6,6), dtype=int)
+    >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
+    >>> a
+    array([[1, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 1, 1, 0],
+           [0, 0, 1, 1, 1, 0],
+           [0, 0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0, 1]])
+    >>> ndimage.binary_opening(a).astype(int)
+    array([[0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0],
+           [0, 0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0, 0]])
+    >>> b = ndimage.binary_erosion(a)
+    >>> b.astype(int)
+    array([[0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0]])
+    >>> ndimage.binary_propagation(b, mask=a).astype(int)
+    array([[0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 1, 1, 0],
+           [0, 0, 1, 1, 1, 0],
+           [0, 0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0, 0]])
+
+    """
+    return binary_dilation(input, structure, -1, mask, output,
+                           border_value, origin)
+
+
+def binary_fill_holes(input, structure=None, output=None, origin=0):
+    """
+    Fill the holes in binary objects.
+
+
+    Parameters
+    ----------
+    input : array_like
+        N-D binary array with holes to be filled
+    structure : array_like, optional
+        Structuring element used in the computation; large-size elements
+        make computations faster but may miss holes separated from the
+        background by thin regions. The default element (with a square
+        connectivity equal to one) yields the intuitive result where all
+        holes in the input have been filled.
+    output : ndarray, optional
+        Array of the same shape as input, into which the output is placed.
+        By default, a new array is created.
+    origin : int, tuple of ints, optional
+        Position of the structuring element.
+
+    Returns
+    -------
+    out : ndarray
+        Transformation of the initial image `input` where holes have been
+        filled.
+
+    See also
+    --------
+    binary_dilation, binary_propagation, label
+
+    Notes
+    -----
+    The algorithm used in this function consists in invading the complementary
+    of the shapes in `input` from the outer boundary of the image,
+    using binary dilations. Holes are not connected to the boundary and are
+    therefore not invaded. The result is the complementary subset of the
+    invaded region.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((5, 5), dtype=int)
+    >>> a[1:4, 1:4] = 1
+    >>> a[2,2] = 0
+    >>> a
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 0, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0]])
+    >>> ndimage.binary_fill_holes(a).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0]])
+    >>> # Too big structuring element
+    >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
+    array([[0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 0],
+           [0, 1, 0, 1, 0],
+           [0, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0]])
+
+    """
+    mask = numpy.logical_not(input)
+    tmp = numpy.zeros(mask.shape, bool)
+    inplace = isinstance(output, numpy.ndarray)
+    if inplace:
+        binary_dilation(tmp, structure, -1, mask, output, 1, origin)
+        numpy.logical_not(output, output)
+    else:
+        output = binary_dilation(tmp, structure, -1, mask, None, 1,
+                                 origin)
+        numpy.logical_not(output, output)
+        return output
+
+
+def grey_erosion(input, size=None, footprint=None, structure=None,
+                 output=None, mode="reflect", cval=0.0, origin=0):
+    """
+    Calculate a greyscale erosion, using either a structuring element,
+    or a footprint corresponding to a flat structuring element.
+
+    Grayscale erosion is a mathematical morphology operation. For the
+    simple case of a full and flat structuring element, it can be viewed
+    as a minimum filter over a sliding window.
+
+    Parameters
+    ----------
+    input : array_like
+        Array over which the grayscale erosion is to be computed.
+    size : tuple of ints
+        Shape of a flat and full structuring element used for the grayscale
+        erosion. Optional if `footprint` or `structure` is provided.
+    footprint : array of ints, optional
+        Positions of non-infinite elements of a flat structuring element
+        used for the grayscale erosion. Non-zero values give the set of
+        neighbors of the center over which the minimum is chosen.
+    structure : array of ints, optional
+        Structuring element used for the grayscale erosion. `structure`
+        may be a non-flat structuring element.
+    output : array, optional
+        An array used for storing the output of the erosion may be provided.
+    mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+        The `mode` parameter determines how the array borders are
+        handled, where `cval` is the value when mode is equal to
+        'constant'. Default is 'reflect'
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'. Default
+        is 0.0.
+    origin : scalar, optional
+        The `origin` parameter controls the placement of the filter.
+        Default 0
+
+    Returns
+    -------
+    output : ndarray
+        Grayscale erosion of `input`.
+
+    See also
+    --------
+    binary_erosion, grey_dilation, grey_opening, grey_closing
+    generate_binary_structure, minimum_filter
+
+    Notes
+    -----
+    The grayscale erosion of an image input by a structuring element s defined
+    over a domain E is given by:
+
+    (input+s)(x) = min {input(y) - s(x-y), for y in E}
+
+    In particular, for structuring elements defined as
+    s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
+    input image inside a sliding window defined by E.
+
+    Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29
+    .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((7,7), dtype=int)
+    >>> a[1:6, 1:6] = 3
+    >>> a[4,4] = 2; a[2,3] = 1
+    >>> a
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 3, 3, 3, 3, 3, 0],
+           [0, 3, 3, 1, 3, 3, 0],
+           [0, 3, 3, 3, 3, 3, 0],
+           [0, 3, 3, 3, 2, 3, 0],
+           [0, 3, 3, 3, 3, 3, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.grey_erosion(a, size=(3,3))
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 3, 2, 2, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> footprint = ndimage.generate_binary_structure(2, 1)
+    >>> footprint
+    array([[False,  True, False],
+           [ True,  True,  True],
+           [False,  True, False]], dtype=bool)
+    >>> # Diagonally-connected elements are not considered neighbors
+    >>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 3, 1, 2, 0, 0],
+           [0, 0, 3, 2, 2, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+
+    """
+    if size is None and footprint is None and structure is None:
+        raise ValueError("size, footprint, or structure must be specified")
+
+    return _filters._min_or_max_filter(input, size, footprint, structure,
+                                       output, mode, cval, origin, 1)
+
+
+def grey_dilation(input, size=None, footprint=None, structure=None,
+                  output=None, mode="reflect", cval=0.0, origin=0):
+    """
+    Calculate a greyscale dilation, using either a structuring element,
+    or a footprint corresponding to a flat structuring element.
+
+    Grayscale dilation is a mathematical morphology operation. For the
+    simple case of a full and flat structuring element, it can be viewed
+    as a maximum filter over a sliding window.
+
+    Parameters
+    ----------
+    input : array_like
+        Array over which the grayscale dilation is to be computed.
+    size : tuple of ints
+        Shape of a flat and full structuring element used for the grayscale
+        dilation. Optional if `footprint` or `structure` is provided.
+    footprint : array of ints, optional
+        Positions of non-infinite elements of a flat structuring element
+        used for the grayscale dilation. Non-zero values give the set of
+        neighbors of the center over which the maximum is chosen.
+    structure : array of ints, optional
+        Structuring element used for the grayscale dilation. `structure`
+        may be a non-flat structuring element.
+    output : array, optional
+        An array used for storing the output of the dilation may be provided.
+    mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+        The `mode` parameter determines how the array borders are
+        handled, where `cval` is the value when mode is equal to
+        'constant'. Default is 'reflect'
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'. Default
+        is 0.0.
+    origin : scalar, optional
+        The `origin` parameter controls the placement of the filter.
+        Default 0
+
+    Returns
+    -------
+    grey_dilation : ndarray
+        Grayscale dilation of `input`.
+
+    See also
+    --------
+    binary_dilation, grey_erosion, grey_closing, grey_opening
+    generate_binary_structure, maximum_filter
+
+    Notes
+    -----
+    The grayscale dilation of an image input by a structuring element s defined
+    over a domain E is given by:
+
+    (input+s)(x) = max {input(y) + s(x-y), for y in E}
+
+    In particular, for structuring elements defined as
+    s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
+    input image inside a sliding window defined by E.
+
+    Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
+    .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((7,7), dtype=int)
+    >>> a[2:5, 2:5] = 1
+    >>> a[4,4] = 2; a[2,3] = 3
+    >>> a
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 3, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 2, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.grey_dilation(a, size=(3,3))
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 3, 3, 3, 1, 0],
+           [0, 1, 3, 3, 3, 1, 0],
+           [0, 1, 3, 3, 3, 2, 0],
+           [0, 1, 1, 2, 2, 2, 0],
+           [0, 1, 1, 2, 2, 2, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 3, 3, 3, 1, 0],
+           [0, 1, 3, 3, 3, 1, 0],
+           [0, 1, 3, 3, 3, 2, 0],
+           [0, 1, 1, 2, 2, 2, 0],
+           [0, 1, 1, 2, 2, 2, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> s = ndimage.generate_binary_structure(2,1)
+    >>> s
+    array([[False,  True, False],
+           [ True,  True,  True],
+           [False,  True, False]], dtype=bool)
+    >>> ndimage.grey_dilation(a, footprint=s)
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 3, 1, 0, 0],
+           [0, 1, 3, 3, 3, 1, 0],
+           [0, 1, 1, 3, 2, 1, 0],
+           [0, 1, 1, 2, 2, 2, 0],
+           [0, 0, 1, 1, 2, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
+    array([[1, 1, 1, 1, 1, 1, 1],
+           [1, 2, 4, 4, 4, 2, 1],
+           [1, 2, 4, 4, 4, 2, 1],
+           [1, 2, 4, 4, 4, 3, 1],
+           [1, 2, 2, 3, 3, 3, 1],
+           [1, 2, 2, 3, 3, 3, 1],
+           [1, 1, 1, 1, 1, 1, 1]])
+
+    """
+    if size is None and footprint is None and structure is None:
+        raise ValueError("size, footprint, or structure must be specified")
+    if structure is not None:
+        structure = numpy.asarray(structure)
+        structure = structure[tuple([slice(None, None, -1)] *
+                                    structure.ndim)]
+    if footprint is not None:
+        footprint = numpy.asarray(footprint)
+        footprint = footprint[tuple([slice(None, None, -1)] *
+                                    footprint.ndim)]
+
+    input = numpy.asarray(input)
+    origin = _ni_support._normalize_sequence(origin, input.ndim)
+    for ii in range(len(origin)):
+        origin[ii] = -origin[ii]
+        if footprint is not None:
+            sz = footprint.shape[ii]
+        elif structure is not None:
+            sz = structure.shape[ii]
+        elif numpy.isscalar(size):
+            sz = size
+        else:
+            sz = size[ii]
+        if not sz & 1:
+            origin[ii] -= 1
+
+    return _filters._min_or_max_filter(input, size, footprint, structure,
+                                       output, mode, cval, origin, 0)
+
+
+def grey_opening(input, size=None, footprint=None, structure=None,
+                 output=None, mode="reflect", cval=0.0, origin=0):
+    """
+    Multidimensional grayscale opening.
+
+    A grayscale opening consists in the succession of a grayscale erosion,
+    and a grayscale dilation.
+
+    Parameters
+    ----------
+    input : array_like
+        Array over which the grayscale opening is to be computed.
+    size : tuple of ints
+        Shape of a flat and full structuring element used for the grayscale
+        opening. Optional if `footprint` or `structure` is provided.
+    footprint : array of ints, optional
+        Positions of non-infinite elements of a flat structuring element
+        used for the grayscale opening.
+    structure : array of ints, optional
+        Structuring element used for the grayscale opening. `structure`
+        may be a non-flat structuring element.
+    output : array, optional
+        An array used for storing the output of the opening may be provided.
+    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+        The `mode` parameter determines how the array borders are
+        handled, where `cval` is the value when mode is equal to
+        'constant'. Default is 'reflect'
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'. Default
+        is 0.0.
+    origin : scalar, optional
+        The `origin` parameter controls the placement of the filter.
+        Default 0
+
+    Returns
+    -------
+    grey_opening : ndarray
+        Result of the grayscale opening of `input` with `structure`.
+
+    See also
+    --------
+    binary_opening, grey_dilation, grey_erosion, grey_closing
+    generate_binary_structure
+
+    Notes
+    -----
+    The action of a grayscale opening with a flat structuring element amounts
+    to smoothen high local maxima, whereas binary opening erases small objects.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.arange(36).reshape((6,6))
+    >>> a[3, 3] = 50
+    >>> a
+    array([[ 0,  1,  2,  3,  4,  5],
+           [ 6,  7,  8,  9, 10, 11],
+           [12, 13, 14, 15, 16, 17],
+           [18, 19, 20, 50, 22, 23],
+           [24, 25, 26, 27, 28, 29],
+           [30, 31, 32, 33, 34, 35]])
+    >>> ndimage.grey_opening(a, size=(3,3))
+    array([[ 0,  1,  2,  3,  4,  4],
+           [ 6,  7,  8,  9, 10, 10],
+           [12, 13, 14, 15, 16, 16],
+           [18, 19, 20, 22, 22, 22],
+           [24, 25, 26, 27, 28, 28],
+           [24, 25, 26, 27, 28, 28]])
+    >>> # Note that the local maximum a[3,3] has disappeared
+
+    """
+    if (size is not None) and (footprint is not None):
+        warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
+    tmp = grey_erosion(input, size, footprint, structure, None, mode,
+                       cval, origin)
+    return grey_dilation(tmp, size, footprint, structure, output, mode,
+                         cval, origin)
+
+
+def grey_closing(input, size=None, footprint=None, structure=None,
+                 output=None, mode="reflect", cval=0.0, origin=0):
+    """
+    Multidimensional grayscale closing.
+
+    A grayscale closing consists in the succession of a grayscale dilation,
+    and a grayscale erosion.
+
+    Parameters
+    ----------
+    input : array_like
+        Array over which the grayscale closing is to be computed.
+    size : tuple of ints
+        Shape of a flat and full structuring element used for the grayscale
+        closing. Optional if `footprint` or `structure` is provided.
+    footprint : array of ints, optional
+        Positions of non-infinite elements of a flat structuring element
+        used for the grayscale closing.
+    structure : array of ints, optional
+        Structuring element used for the grayscale closing. `structure`
+        may be a non-flat structuring element.
+    output : array, optional
+        An array used for storing the output of the closing may be provided.
+    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+        The `mode` parameter determines how the array borders are
+        handled, where `cval` is the value when mode is equal to
+        'constant'. Default is 'reflect'
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'. Default
+        is 0.0.
+    origin : scalar, optional
+        The `origin` parameter controls the placement of the filter.
+        Default 0
+
+    Returns
+    -------
+    grey_closing : ndarray
+        Result of the grayscale closing of `input` with `structure`.
+
+    See also
+    --------
+    binary_closing, grey_dilation, grey_erosion, grey_opening,
+    generate_binary_structure
+
+    Notes
+    -----
+    The action of a grayscale closing with a flat structuring element amounts
+    to smoothen deep local minima, whereas binary closing fills small holes.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.arange(36).reshape((6,6))
+    >>> a[3,3] = 0
+    >>> a
+    array([[ 0,  1,  2,  3,  4,  5],
+           [ 6,  7,  8,  9, 10, 11],
+           [12, 13, 14, 15, 16, 17],
+           [18, 19, 20,  0, 22, 23],
+           [24, 25, 26, 27, 28, 29],
+           [30, 31, 32, 33, 34, 35]])
+    >>> ndimage.grey_closing(a, size=(3,3))
+    array([[ 7,  7,  8,  9, 10, 11],
+           [ 7,  7,  8,  9, 10, 11],
+           [13, 13, 14, 15, 16, 17],
+           [19, 19, 20, 20, 22, 23],
+           [25, 25, 26, 27, 28, 29],
+           [31, 31, 32, 33, 34, 35]])
+    >>> # Note that the local minimum a[3,3] has disappeared
+
+    """
+    if (size is not None) and (footprint is not None):
+        warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
+    tmp = grey_dilation(input, size, footprint, structure, None, mode,
+                        cval, origin)
+    return grey_erosion(tmp, size, footprint, structure, output, mode,
+                        cval, origin)
+
+
+def morphological_gradient(input, size=None, footprint=None, structure=None,
+                           output=None, mode="reflect", cval=0.0, origin=0):
+    """
+    Multidimensional morphological gradient.
+
+    The morphological gradient is calculated as the difference between a
+    dilation and an erosion of the input with a given structuring element.
+
+    Parameters
+    ----------
+    input : array_like
+        Array over which to compute the morphlogical gradient.
+    size : tuple of ints
+        Shape of a flat and full structuring element used for the mathematical
+        morphology operations. Optional if `footprint` or `structure` is
+        provided. A larger `size` yields a more blurred gradient.
+    footprint : array of ints, optional
+        Positions of non-infinite elements of a flat structuring element
+        used for the morphology operations. Larger footprints
+        give a more blurred morphological gradient.
+    structure : array of ints, optional
+        Structuring element used for the morphology operations.
+        `structure` may be a non-flat structuring element.
+    output : array, optional
+        An array used for storing the output of the morphological gradient
+        may be provided.
+    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+        The `mode` parameter determines how the array borders are
+        handled, where `cval` is the value when mode is equal to
+        'constant'. Default is 'reflect'
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'. Default
+        is 0.0.
+    origin : scalar, optional
+        The `origin` parameter controls the placement of the filter.
+        Default 0
+
+    Returns
+    -------
+    morphological_gradient : ndarray
+        Morphological gradient of `input`.
+
+    See also
+    --------
+    grey_dilation, grey_erosion, gaussian_gradient_magnitude
+
+    Notes
+    -----
+    For a flat structuring element, the morphological gradient
+    computed at a given point corresponds to the maximal difference
+    between elements of the input among the elements covered by the
+    structuring element centered on the point.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.zeros((7,7), dtype=int)
+    >>> a[2:5, 2:5] = 1
+    >>> ndimage.morphological_gradient(a, size=(3,3))
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 1, 1, 0, 1, 1, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> # The morphological gradient is computed as the difference
+    >>> # between a dilation and an erosion
+    >>> ndimage.grey_dilation(a, size=(3,3)) -\\
+    ...  ndimage.grey_erosion(a, size=(3,3))
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 1, 1, 0, 1, 1, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 1, 1, 1, 1, 1, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> a = np.zeros((7,7), dtype=int)
+    >>> a[2:5, 2:5] = 1
+    >>> a[4,4] = 2; a[2,3] = 3
+    >>> a
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 1, 3, 1, 0, 0],
+           [0, 0, 1, 1, 1, 0, 0],
+           [0, 0, 1, 1, 2, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+    >>> ndimage.morphological_gradient(a, size=(3,3))
+    array([[0, 0, 0, 0, 0, 0, 0],
+           [0, 1, 3, 3, 3, 1, 0],
+           [0, 1, 3, 3, 3, 1, 0],
+           [0, 1, 3, 2, 3, 2, 0],
+           [0, 1, 1, 2, 2, 2, 0],
+           [0, 1, 1, 2, 2, 2, 0],
+           [0, 0, 0, 0, 0, 0, 0]])
+
+    """
+    tmp = grey_dilation(input, size, footprint, structure, None, mode,
+                        cval, origin)
+    if isinstance(output, numpy.ndarray):
+        grey_erosion(input, size, footprint, structure, output, mode,
+                     cval, origin)
+        return numpy.subtract(tmp, output, output)
+    else:
+        return (tmp - grey_erosion(input, size, footprint, structure,
+                                   None, mode, cval, origin))
+
+
+def morphological_laplace(input, size=None, footprint=None,
+                          structure=None, output=None,
+                          mode="reflect", cval=0.0, origin=0):
+    """
+    Multidimensional morphological laplace.
+
+    Parameters
+    ----------
+    input : array_like
+        Input.
+    size : int or sequence of ints, optional
+        See `structure`.
+    footprint : bool or ndarray, optional
+        See `structure`.
+    structure : structure, optional
+        Either `size`, `footprint`, or the `structure` must be provided.
+    output : ndarray, optional
+        An output array can optionally be provided.
+    mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
+        The mode parameter determines how the array borders are handled.
+        For 'constant' mode, values beyond borders are set to be `cval`.
+        Default is 'reflect'.
+    cval : scalar, optional
+        Value to fill past edges of input if mode is 'constant'.
+        Default is 0.0
+    origin : origin, optional
+        The origin parameter controls the placement of the filter.
+
+    Returns
+    -------
+    morphological_laplace : ndarray
+        Output
+
+    """
+    tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
+                         cval, origin)
+    if isinstance(output, numpy.ndarray):
+        grey_erosion(input, size, footprint, structure, output, mode,
+                     cval, origin)
+        numpy.add(tmp1, output, output)
+        numpy.subtract(output, input, output)
+        return numpy.subtract(output, input, output)
+    else:
+        tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
+                            cval, origin)
+        numpy.add(tmp1, tmp2, tmp2)
+        numpy.subtract(tmp2, input, tmp2)
+        numpy.subtract(tmp2, input, tmp2)
+        return tmp2
+
+
+def white_tophat(input, size=None, footprint=None, structure=None,
+                 output=None, mode="reflect", cval=0.0, origin=0):
+    """
+    Multidimensional white tophat filter.
+
+    Parameters
+    ----------
+    input : array_like
+        Input.
+    size : tuple of ints
+        Shape of a flat and full structuring element used for the filter.
+        Optional if `footprint` or `structure` is provided.
+    footprint : array of ints, optional
+        Positions of elements of a flat structuring element
+        used for the white tophat filter.
+    structure : array of ints, optional
+        Structuring element used for the filter. `structure`
+        may be a non-flat structuring element.
+    output : array, optional
+        An array used for storing the output of the filter may be provided.
+    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+        The `mode` parameter determines how the array borders are
+        handled, where `cval` is the value when mode is equal to
+        'constant'. Default is 'reflect'
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'.
+        Default is 0.0.
+    origin : scalar, optional
+        The `origin` parameter controls the placement of the filter.
+        Default is 0.
+
+    Returns
+    -------
+    output : ndarray
+        Result of the filter of `input` with `structure`.
+
+    Examples
+    --------
+    Subtract gray background from a bright peak.
+
+    >>> from scipy.ndimage import generate_binary_structure, white_tophat
+    >>> import numpy as np
+    >>> square = generate_binary_structure(rank=2, connectivity=3)
+    >>> bright_on_gray = np.array([[2, 3, 3, 3, 2],
+    ...                            [3, 4, 5, 4, 3],
+    ...                            [3, 5, 9, 5, 3],
+    ...                            [3, 4, 5, 4, 3],
+    ...                            [2, 3, 3, 3, 2]])
+    >>> white_tophat(input=bright_on_gray, structure=square)
+    array([[0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 0],
+           [0, 1, 5, 1, 0],
+           [0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0]])
+
+    See also
+    --------
+    black_tophat
+
+    """
+    if (size is not None) and (footprint is not None):
+        warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
+    tmp = grey_erosion(input, size, footprint, structure, None, mode,
+                       cval, origin)
+    tmp = grey_dilation(tmp, size, footprint, structure, output, mode,
+                        cval, origin)
+    if tmp is None:
+        tmp = output
+
+    if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
+        numpy.bitwise_xor(input, tmp, out=tmp)
+    else:
+        numpy.subtract(input, tmp, out=tmp)
+    return tmp
+
+
+def black_tophat(input, size=None, footprint=None,
+                 structure=None, output=None, mode="reflect",
+                 cval=0.0, origin=0):
+    """
+    Multidimensional black tophat filter.
+
+    Parameters
+    ----------
+    input : array_like
+        Input.
+    size : tuple of ints, optional
+        Shape of a flat and full structuring element used for the filter.
+        Optional if `footprint` or `structure` is provided.
+    footprint : array of ints, optional
+        Positions of non-infinite elements of a flat structuring element
+        used for the black tophat filter.
+    structure : array of ints, optional
+        Structuring element used for the filter. `structure`
+        may be a non-flat structuring element.
+    output : array, optional
+        An array used for storing the output of the filter may be provided.
+    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+        The `mode` parameter determines how the array borders are
+        handled, where `cval` is the value when mode is equal to
+        'constant'. Default is 'reflect'
+    cval : scalar, optional
+        Value to fill past edges of input if `mode` is 'constant'. Default
+        is 0.0.
+    origin : scalar, optional
+        The `origin` parameter controls the placement of the filter.
+        Default 0
+
+    Returns
+    -------
+    black_tophat : ndarray
+        Result of the filter of `input` with `structure`.
+
+    Examples
+    --------
+    Change dark peak to bright peak and subtract background.
+
+    >>> from scipy.ndimage import generate_binary_structure, black_tophat
+    >>> import numpy as np
+    >>> square = generate_binary_structure(rank=2, connectivity=3)
+    >>> dark_on_gray = np.array([[7, 6, 6, 6, 7],
+    ...                          [6, 5, 4, 5, 6],
+    ...                          [6, 4, 0, 4, 6],
+    ...                          [6, 5, 4, 5, 6],
+    ...                          [7, 6, 6, 6, 7]])
+    >>> black_tophat(input=dark_on_gray, structure=square)
+    array([[0, 0, 0, 0, 0],
+           [0, 0, 1, 0, 0],
+           [0, 1, 5, 1, 0],
+           [0, 0, 1, 0, 0],
+           [0, 0, 0, 0, 0]])
+
+    See also
+    --------
+    white_tophat, grey_opening, grey_closing
+
+    """
+    if (size is not None) and (footprint is not None):
+        warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
+    tmp = grey_dilation(input, size, footprint, structure, None, mode,
+                        cval, origin)
+    tmp = grey_erosion(tmp, size, footprint, structure, output, mode,
+                       cval, origin)
+    if tmp is None:
+        tmp = output
+
+    if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
+        numpy.bitwise_xor(tmp, input, out=tmp)
+    else:
+        numpy.subtract(tmp, input, out=tmp)
+    return tmp
+
+
+def distance_transform_bf(input, metric="euclidean", sampling=None,
+                          return_distances=True, return_indices=False,
+                          distances=None, indices=None):
+    """
+    Distance transform function by a brute force algorithm.
+
+    This function calculates the distance transform of the `input`, by
+    replacing each foreground (non-zero) element, with its
+    shortest distance to the background (any zero-valued element).
+
+    In addition to the distance transform, the feature transform can
+    be calculated. In this case the index of the closest background
+    element to each foreground element is returned in a separate array.
+
+    Parameters
+    ----------
+    input : array_like
+        Input
+    metric : {'euclidean', 'taxicab', 'chessboard'}, optional
+        'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
+        The default is 'euclidean'.
+    sampling : float, or sequence of float, optional
+        This parameter is only used when `metric` is 'euclidean'.
+        Spacing of elements along each dimension. If a sequence, must be of
+        length equal to the input rank; if a single number, this is used for
+        all axes. If not specified, a grid spacing of unity is implied.
+    return_distances : bool, optional
+        Whether to calculate the distance transform.
+        Default is True.
+    return_indices : bool, optional
+        Whether to calculate the feature transform.
+        Default is False.
+    distances : ndarray, optional
+        An output array to store the calculated distance transform, instead of
+        returning it.
+        `return_distances` must be True.
+        It must be the same shape as `input`, and of type float64 if `metric`
+        is 'euclidean', uint32 otherwise.
+    indices : int32 ndarray, optional
+        An output array to store the calculated feature transform, instead of
+        returning it.
+        `return_indicies` must be True.
+        Its shape must be `(input.ndim,) + input.shape`.
+
+    Returns
+    -------
+    distances : ndarray, optional
+        The calculated distance transform. Returned only when
+        `return_distances` is True and `distances` is not supplied.
+        It will have the same shape as the input array.
+    indices : int32 ndarray, optional
+        The calculated feature transform. It has an input-shaped array for each
+        dimension of the input. See distance_transform_edt documentation for an
+        example.
+        Returned only when `return_indices` is True and `indices` is not
+        supplied.
+
+    Notes
+    -----
+    This function employs a slow brute force algorithm, see also the
+    function distance_transform_cdt for more efficient taxicab and
+    chessboard algorithms.
+
+    """
+    ft_inplace = isinstance(indices, numpy.ndarray)
+    dt_inplace = isinstance(distances, numpy.ndarray)
+    _distance_tranform_arg_check(
+        dt_inplace, ft_inplace, return_distances, return_indices
+    )
+
+    tmp1 = numpy.asarray(input) != 0
+    struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
+    tmp2 = binary_dilation(tmp1, struct)
+    tmp2 = numpy.logical_xor(tmp1, tmp2)
+    tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8)
+    metric = metric.lower()
+    if metric == 'euclidean':
+        metric = 1
+    elif metric in ['taxicab', 'cityblock', 'manhattan']:
+        metric = 2
+    elif metric == 'chessboard':
+        metric = 3
+    else:
+        raise RuntimeError('distance metric not supported')
+    if sampling is not None:
+        sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
+        sampling = numpy.asarray(sampling, dtype=numpy.float64)
+        if not sampling.flags.contiguous:
+            sampling = sampling.copy()
+    if return_indices:
+        ft = numpy.zeros(tmp1.shape, dtype=numpy.int32)
+    else:
+        ft = None
+    if return_distances:
+        if distances is None:
+            if metric == 1:
+                dt = numpy.zeros(tmp1.shape, dtype=numpy.float64)
+            else:
+                dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32)
+        else:
+            if distances.shape != tmp1.shape:
+                raise RuntimeError('distances array has wrong shape')
+            if metric == 1:
+                if distances.dtype.type != numpy.float64:
+                    raise RuntimeError('distances array must be float64')
+            else:
+                if distances.dtype.type != numpy.uint32:
+                    raise RuntimeError('distances array must be uint32')
+            dt = distances
+    else:
+        dt = None
+
+    _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
+    if return_indices:
+        if isinstance(indices, numpy.ndarray):
+            if indices.dtype.type != numpy.int32:
+                raise RuntimeError('indices array must be int32')
+            if indices.shape != (tmp1.ndim,) + tmp1.shape:
+                raise RuntimeError('indices array has wrong shape')
+            tmp2 = indices
+        else:
+            tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32)
+        ft = numpy.ravel(ft)
+        for ii in range(tmp2.shape[0]):
+            rtmp = numpy.ravel(tmp2[ii, ...])[ft]
+            rtmp.shape = tmp1.shape
+            tmp2[ii, ...] = rtmp
+        ft = tmp2
+
+    # construct and return the result
+    result = []
+    if return_distances and not dt_inplace:
+        result.append(dt)
+    if return_indices and not ft_inplace:
+        result.append(ft)
+
+    if len(result) == 2:
+        return tuple(result)
+    elif len(result) == 1:
+        return result[0]
+    else:
+        return None
+
+
+def distance_transform_cdt(input, metric='chessboard', return_distances=True,
+                           return_indices=False, distances=None, indices=None):
+    """
+    Distance transform for chamfer type of transforms.
+
+    In addition to the distance transform, the feature transform can
+    be calculated. In this case the index of the closest background
+    element to each foreground element is returned in a separate array.
+
+    Parameters
+    ----------
+    input : array_like
+        Input
+    metric : {'chessboard', 'taxicab'} or array_like, optional
+        The `metric` determines the type of chamfering that is done. If the
+        `metric` is equal to 'taxicab' a structure is generated using
+        generate_binary_structure with a squared distance equal to 1. If
+        the `metric` is equal to 'chessboard', a `metric` is generated
+        using generate_binary_structure with a squared distance equal to
+        the dimensionality of the array. These choices correspond to the
+        common interpretations of the 'taxicab' and the 'chessboard'
+        distance metrics in two dimensions.
+        A custom metric may be provided, in the form of a matrix where
+        each dimension has a length of three.
+        'cityblock' and 'manhattan' are also valid, and map to 'taxicab'.
+        The default is 'chessboard'.
+    return_distances : bool, optional
+        Whether to calculate the distance transform.
+        Default is True.
+    return_indices : bool, optional
+        Whether to calculate the feature transform.
+        Default is False.
+    distances : int32 ndarray, optional
+        An output array to store the calculated distance transform, instead of
+        returning it.
+        `return_distances` must be True.
+        It must be the same shape as `input`.
+    indices : int32 ndarray, optional
+        An output array to store the calculated feature transform, instead of
+        returning it.
+        `return_indicies` must be True.
+        Its shape must be `(input.ndim,) + input.shape`.
+
+    Returns
+    -------
+    distances : int32 ndarray, optional
+        The calculated distance transform. Returned only when
+        `return_distances` is True, and `distances` is not supplied.
+        It will have the same shape as the input array.
+    indices : int32 ndarray, optional
+        The calculated feature transform. It has an input-shaped array for each
+        dimension of the input. See distance_transform_edt documentation for an
+        example.
+        Returned only when `return_indices` is True, and `indices` is not
+        supplied.
+
+    """
+    ft_inplace = isinstance(indices, numpy.ndarray)
+    dt_inplace = isinstance(distances, numpy.ndarray)
+    _distance_tranform_arg_check(
+        dt_inplace, ft_inplace, return_distances, return_indices
+    )
+    input = numpy.asarray(input)
+    if metric in ['taxicab', 'cityblock', 'manhattan']:
+        rank = input.ndim
+        metric = generate_binary_structure(rank, 1)
+    elif metric == 'chessboard':
+        rank = input.ndim
+        metric = generate_binary_structure(rank, rank)
+    else:
+        try:
+            metric = numpy.asarray(metric)
+        except Exception as e:
+            raise RuntimeError('invalid metric provided') from e
+        for s in metric.shape:
+            if s != 3:
+                raise RuntimeError('metric sizes must be equal to 3')
+
+    if not metric.flags.contiguous:
+        metric = metric.copy()
+    if dt_inplace:
+        if distances.dtype.type != numpy.int32:
+            raise RuntimeError('distances must be of int32 type')
+        if distances.shape != input.shape:
+            raise RuntimeError('distances has wrong shape')
+        dt = distances
+        dt[...] = numpy.where(input, -1, 0).astype(numpy.int32)
+    else:
+        dt = numpy.where(input, -1, 0).astype(numpy.int32)
+
+    rank = dt.ndim
+    if return_indices:
+        sz = numpy.prod(dt.shape, axis=0)
+        ft = numpy.arange(sz, dtype=numpy.int32)
+        ft.shape = dt.shape
+    else:
+        ft = None
+
+    _nd_image.distance_transform_op(metric, dt, ft)
+    dt = dt[tuple([slice(None, None, -1)] * rank)]
+    if return_indices:
+        ft = ft[tuple([slice(None, None, -1)] * rank)]
+    _nd_image.distance_transform_op(metric, dt, ft)
+    dt = dt[tuple([slice(None, None, -1)] * rank)]
+    if return_indices:
+        ft = ft[tuple([slice(None, None, -1)] * rank)]
+        ft = numpy.ravel(ft)
+        if ft_inplace:
+            if indices.dtype.type != numpy.int32:
+                raise RuntimeError('indices array must be int32')
+            if indices.shape != (dt.ndim,) + dt.shape:
+                raise RuntimeError('indices array has wrong shape')
+            tmp = indices
+        else:
+            tmp = numpy.indices(dt.shape, dtype=numpy.int32)
+        for ii in range(tmp.shape[0]):
+            rtmp = numpy.ravel(tmp[ii, ...])[ft]
+            rtmp.shape = dt.shape
+            tmp[ii, ...] = rtmp
+        ft = tmp
+
+    # construct and return the result
+    result = []
+    if return_distances and not dt_inplace:
+        result.append(dt)
+    if return_indices and not ft_inplace:
+        result.append(ft)
+
+    if len(result) == 2:
+        return tuple(result)
+    elif len(result) == 1:
+        return result[0]
+    else:
+        return None
+
+
+def distance_transform_edt(input, sampling=None, return_distances=True,
+                           return_indices=False, distances=None, indices=None):
+    """
+    Exact Euclidean distance transform.
+
+    In addition to the distance transform, the feature transform can
+    be calculated. In this case the index of the closest background
+    element to each foreground element is returned in a separate array.
+
+    Parameters
+    ----------
+    input : array_like
+        Input data to transform. Can be any type but will be converted
+        into binary: 1 wherever input equates to True, 0 elsewhere.
+    sampling : float, or sequence of float, optional
+        Spacing of elements along each dimension. If a sequence, must be of
+        length equal to the input rank; if a single number, this is used for
+        all axes. If not specified, a grid spacing of unity is implied.
+    return_distances : bool, optional
+        Whether to calculate the distance transform.
+        Default is True.
+    return_indices : bool, optional
+        Whether to calculate the feature transform.
+        Default is False.
+    distances : float64 ndarray, optional
+        An output array to store the calculated distance transform, instead of
+        returning it.
+        `return_distances` must be True.
+        It must be the same shape as `input`.
+    indices : int32 ndarray, optional
+        An output array to store the calculated feature transform, instead of
+        returning it.
+        `return_indicies` must be True.
+        Its shape must be `(input.ndim,) + input.shape`.
+
+    Returns
+    -------
+    distances : float64 ndarray, optional
+        The calculated distance transform. Returned only when
+        `return_distances` is True and `distances` is not supplied.
+        It will have the same shape as the input array.
+    indices : int32 ndarray, optional
+        The calculated feature transform. It has an input-shaped array for each
+        dimension of the input. See example below.
+        Returned only when `return_indices` is True and `indices` is not
+        supplied.
+
+    Notes
+    -----
+    The Euclidean distance transform gives values of the Euclidean
+    distance::
+
+                    n
+      y_i = sqrt(sum (x[i]-b[i])**2)
+                    i
+
+    where b[i] is the background point (value 0) with the smallest
+    Euclidean distance to input points x[i], and n is the
+    number of dimensions.
+
+    Examples
+    --------
+    >>> from scipy import ndimage
+    >>> import numpy as np
+    >>> a = np.array(([0,1,1,1,1],
+    ...               [0,0,1,1,1],
+    ...               [0,1,1,1,1],
+    ...               [0,1,1,1,0],
+    ...               [0,1,1,0,0]))
+    >>> ndimage.distance_transform_edt(a)
+    array([[ 0.    ,  1.    ,  1.4142,  2.2361,  3.    ],
+           [ 0.    ,  0.    ,  1.    ,  2.    ,  2.    ],
+           [ 0.    ,  1.    ,  1.4142,  1.4142,  1.    ],
+           [ 0.    ,  1.    ,  1.4142,  1.    ,  0.    ],
+           [ 0.    ,  1.    ,  1.    ,  0.    ,  0.    ]])
+
+    With a sampling of 2 units along x, 1 along y:
+
+    >>> ndimage.distance_transform_edt(a, sampling=[2,1])
+    array([[ 0.    ,  1.    ,  2.    ,  2.8284,  3.6056],
+           [ 0.    ,  0.    ,  1.    ,  2.    ,  3.    ],
+           [ 0.    ,  1.    ,  2.    ,  2.2361,  2.    ],
+           [ 0.    ,  1.    ,  2.    ,  1.    ,  0.    ],
+           [ 0.    ,  1.    ,  1.    ,  0.    ,  0.    ]])
+
+    Asking for indices as well:
+
+    >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
+    >>> inds
+    array([[[0, 0, 1, 1, 3],
+            [1, 1, 1, 1, 3],
+            [2, 2, 1, 3, 3],
+            [3, 3, 4, 4, 3],
+            [4, 4, 4, 4, 4]],
+           [[0, 0, 1, 1, 4],
+            [0, 1, 1, 1, 4],
+            [0, 0, 1, 4, 4],
+            [0, 0, 3, 3, 4],
+            [0, 0, 3, 3, 4]]])
+
+    With arrays provided for inplace outputs:
+
+    >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
+    >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
+    array([[ 0.    ,  1.    ,  1.4142,  2.2361,  3.    ],
+           [ 0.    ,  0.    ,  1.    ,  2.    ,  2.    ],
+           [ 0.    ,  1.    ,  1.4142,  1.4142,  1.    ],
+           [ 0.    ,  1.    ,  1.4142,  1.    ,  0.    ],
+           [ 0.    ,  1.    ,  1.    ,  0.    ,  0.    ]])
+    >>> indices
+    array([[[0, 0, 1, 1, 3],
+            [1, 1, 1, 1, 3],
+            [2, 2, 1, 3, 3],
+            [3, 3, 4, 4, 3],
+            [4, 4, 4, 4, 4]],
+           [[0, 0, 1, 1, 4],
+            [0, 1, 1, 1, 4],
+            [0, 0, 1, 4, 4],
+            [0, 0, 3, 3, 4],
+            [0, 0, 3, 3, 4]]])
+
+    """
+    ft_inplace = isinstance(indices, numpy.ndarray)
+    dt_inplace = isinstance(distances, numpy.ndarray)
+    _distance_tranform_arg_check(
+        dt_inplace, ft_inplace, return_distances, return_indices
+    )
+
+    # calculate the feature transform
+    input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8))
+    if sampling is not None:
+        sampling = _ni_support._normalize_sequence(sampling, input.ndim)
+        sampling = numpy.asarray(sampling, dtype=numpy.float64)
+        if not sampling.flags.contiguous:
+            sampling = sampling.copy()
+
+    if ft_inplace:
+        ft = indices
+        if ft.shape != (input.ndim,) + input.shape:
+            raise RuntimeError('indices array has wrong shape')
+        if ft.dtype.type != numpy.int32:
+            raise RuntimeError('indices array must be int32')
+    else:
+        ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32)
+
+    _nd_image.euclidean_feature_transform(input, sampling, ft)
+    # if requested, calculate the distance transform
+    if return_distances:
+        dt = ft - numpy.indices(input.shape, dtype=ft.dtype)
+        dt = dt.astype(numpy.float64)
+        if sampling is not None:
+            for ii in range(len(sampling)):
+                dt[ii, ...] *= sampling[ii]
+        numpy.multiply(dt, dt, dt)
+        if dt_inplace:
+            dt = numpy.add.reduce(dt, axis=0)
+            if distances.shape != dt.shape:
+                raise RuntimeError('distances array has wrong shape')
+            if distances.dtype.type != numpy.float64:
+                raise RuntimeError('distances array must be float64')
+            numpy.sqrt(dt, distances)
+        else:
+            dt = numpy.add.reduce(dt, axis=0)
+            dt = numpy.sqrt(dt)
+
+    # construct and return the result
+    result = []
+    if return_distances and not dt_inplace:
+        result.append(dt)
+    if return_indices and not ft_inplace:
+        result.append(ft)
+
+    if len(result) == 2:
+        return tuple(result)
+    elif len(result) == 1:
+        return result[0]
+    else:
+        return None
+
+
+def _distance_tranform_arg_check(distances_out, indices_out,
+                                 return_distances, return_indices):
+    """Raise a RuntimeError if the arguments are invalid"""
+    error_msgs = []
+    if (not return_distances) and (not return_indices):
+        error_msgs.append(
+            'at least one of return_distances/return_indices must be True')
+    if distances_out and not return_distances:
+        error_msgs.append(
+            'return_distances must be True if distances is supplied'
+        )
+    if indices_out and not return_indices:
+        error_msgs.append('return_indices must be True if indices is supplied')
+    if error_msgs:
+        raise RuntimeError(', '.join(error_msgs))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_docstrings.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_docstrings.py
new file mode 100644
index 00000000..c3ccba7c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_docstrings.py
@@ -0,0 +1,208 @@
+"""Docstring components common to several ndimage functions."""
+from scipy._lib import doccer
+
+__all__ = ['docfiller']
+
+
+_input_doc = (
+"""input : array_like
+    The input array.""")
+_axis_doc = (
+"""axis : int, optional
+    The axis of `input` along which to calculate. Default is -1.""")
+_output_doc = (
+"""output : array or dtype, optional
+    The array in which to place the output, or the dtype of the
+    returned array. By default an array of the same dtype as input
+    will be created.""")
+_size_foot_doc = (
+"""size : scalar or tuple, optional
+    See footprint, below. Ignored if footprint is given.
+footprint : array, optional
+    Either `size` or `footprint` must be defined. `size` gives
+    the shape that is taken from the input array, at every element
+    position, to define the input to the filter function.
+    `footprint` is a boolean array that specifies (implicitly) a
+    shape, but also which of the elements within this shape will get
+    passed to the filter function. Thus ``size=(n,m)`` is equivalent
+    to ``footprint=np.ones((n,m))``.  We adjust `size` to the number
+    of dimensions of the input array, so that, if the input array is
+    shape (10,10,10), and `size` is 2, then the actual size used is
+    (2,2,2). When `footprint` is given, `size` is ignored.""")
+_mode_reflect_doc = (
+"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
+    The `mode` parameter determines how the input array is extended
+    beyond its boundaries. Default is 'reflect'. Behavior for each valid
+    value is as follows:
+
+    'reflect' (`d c b a | a b c d | d c b a`)
+        The input is extended by reflecting about the edge of the last
+        pixel. This mode is also sometimes referred to as half-sample
+        symmetric.
+
+    'constant' (`k k k k | a b c d | k k k k`)
+        The input is extended by filling all values beyond the edge with
+        the same constant value, defined by the `cval` parameter.
+
+    'nearest' (`a a a a | a b c d | d d d d`)
+        The input is extended by replicating the last pixel.
+
+    'mirror' (`d c b | a b c d | c b a`)
+        The input is extended by reflecting about the center of the last
+        pixel. This mode is also sometimes referred to as whole-sample
+        symmetric.
+
+    'wrap' (`a b c d | a b c d | a b c d`)
+        The input is extended by wrapping around to the opposite edge.
+
+    For consistency with the interpolation functions, the following mode
+    names can also be used:
+
+    'grid-mirror'
+        This is a synonym for 'reflect'.
+
+    'grid-constant'
+        This is a synonym for 'constant'.
+
+    'grid-wrap'
+        This is a synonym for 'wrap'.""")
+
+_mode_interp_constant_doc = (
+"""mode : {'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', \
+           'mirror', 'grid-wrap', 'wrap'}, optional
+    The `mode` parameter determines how the input array is extended
+    beyond its boundaries. Default is 'constant'. Behavior for each valid
+    value is as follows (see additional plots and details on
+    :ref:`boundary modes `):
+
+    'reflect' (`d c b a | a b c d | d c b a`)
+        The input is extended by reflecting about the edge of the last
+        pixel. This mode is also sometimes referred to as half-sample
+        symmetric.
+
+    'grid-mirror'
+        This is a synonym for 'reflect'.
+
+    'constant' (`k k k k | a b c d | k k k k`)
+        The input is extended by filling all values beyond the edge with
+        the same constant value, defined by the `cval` parameter. No
+        interpolation is performed beyond the edges of the input.
+
+    'grid-constant' (`k k k k | a b c d | k k k k`)
+        The input is extended by filling all values beyond the edge with
+        the same constant value, defined by the `cval` parameter. Interpolation
+        occurs for samples outside the input's extent  as well.
+
+    'nearest' (`a a a a | a b c d | d d d d`)
+        The input is extended by replicating the last pixel.
+
+    'mirror' (`d c b | a b c d | c b a`)
+        The input is extended by reflecting about the center of the last
+        pixel. This mode is also sometimes referred to as whole-sample
+        symmetric.
+
+    'grid-wrap' (`a b c d | a b c d | a b c d`)
+        The input is extended by wrapping around to the opposite edge.
+
+    'wrap' (`d b c d | a b c d | b c a b`)
+        The input is extended by wrapping around to the opposite edge, but in a
+        way such that the last point and initial point exactly overlap. In this
+        case it is not well defined which sample will be chosen at the point of
+        overlap.""")
+_mode_interp_mirror_doc = (
+    _mode_interp_constant_doc.replace("Default is 'constant'",
+                                      "Default is 'mirror'")
+)
+assert _mode_interp_mirror_doc != _mode_interp_constant_doc, \
+    'Default not replaced'
+
+_mode_multiple_doc = (
+"""mode : str or sequence, optional
+    The `mode` parameter determines how the input array is extended
+    when the filter overlaps a border. By passing a sequence of modes
+    with length equal to the number of dimensions of the input array,
+    different modes can be specified along each axis. Default value is
+    'reflect'. The valid values and their behavior is as follows:
+
+    'reflect' (`d c b a | a b c d | d c b a`)
+        The input is extended by reflecting about the edge of the last
+        pixel. This mode is also sometimes referred to as half-sample
+        symmetric.
+
+    'constant' (`k k k k | a b c d | k k k k`)
+        The input is extended by filling all values beyond the edge with
+        the same constant value, defined by the `cval` parameter.
+
+    'nearest' (`a a a a | a b c d | d d d d`)
+        The input is extended by replicating the last pixel.
+
+    'mirror' (`d c b | a b c d | c b a`)
+        The input is extended by reflecting about the center of the last
+        pixel. This mode is also sometimes referred to as whole-sample
+        symmetric.
+
+    'wrap' (`a b c d | a b c d | a b c d`)
+        The input is extended by wrapping around to the opposite edge.
+
+    For consistency with the interpolation functions, the following mode
+    names can also be used:
+
+    'grid-constant'
+        This is a synonym for 'constant'.
+
+    'grid-mirror'
+        This is a synonym for 'reflect'.
+
+    'grid-wrap'
+        This is a synonym for 'wrap'.""")
+_cval_doc = (
+"""cval : scalar, optional
+    Value to fill past edges of input if `mode` is 'constant'. Default
+    is 0.0.""")
+_origin_doc = (
+"""origin : int, optional
+    Controls the placement of the filter on the input array's pixels.
+    A value of 0 (the default) centers the filter over the pixel, with
+    positive values shifting the filter to the left, and negative ones
+    to the right.""")
+_origin_multiple_doc = (
+"""origin : int or sequence, optional
+    Controls the placement of the filter on the input array's pixels.
+    A value of 0 (the default) centers the filter over the pixel, with
+    positive values shifting the filter to the left, and negative ones
+    to the right. By passing a sequence of origins with length equal to
+    the number of dimensions of the input array, different shifts can
+    be specified along each axis.""")
+_extra_arguments_doc = (
+"""extra_arguments : sequence, optional
+    Sequence of extra positional arguments to pass to passed function.""")
+_extra_keywords_doc = (
+"""extra_keywords : dict, optional
+    dict of extra keyword arguments to pass to passed function.""")
+_prefilter_doc = (
+"""prefilter : bool, optional
+    Determines if the input array is prefiltered with `spline_filter`
+    before interpolation. The default is True, which will create a
+    temporary `float64` array of filtered values if `order > 1`. If
+    setting this to False, the output will be slightly blurred if
+    `order > 1`, unless the input is prefiltered, i.e. it is the result
+    of calling `spline_filter` on the original input.""")
+
+docdict = {
+    'input': _input_doc,
+    'axis': _axis_doc,
+    'output': _output_doc,
+    'size_foot': _size_foot_doc,
+    'mode_interp_constant': _mode_interp_constant_doc,
+    'mode_interp_mirror': _mode_interp_mirror_doc,
+    'mode_reflect': _mode_reflect_doc,
+    'mode_multiple': _mode_multiple_doc,
+    'cval': _cval_doc,
+    'origin': _origin_doc,
+    'origin_multiple': _origin_multiple_doc,
+    'extra_arguments': _extra_arguments_doc,
+    'extra_keywords': _extra_keywords_doc,
+    'prefilter': _prefilter_doc
+    }
+
+docfiller = doccer.filldoc(docdict)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_support.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_support.py
new file mode 100644
index 00000000..e8f39ed5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/_ni_support.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2003-2005 Peter J. Verveer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from collections.abc import Iterable
+import warnings
+import numpy
+
+
+def _extend_mode_to_code(mode):
+    """Convert an extension mode to the corresponding integer code.
+    """
+    if mode == 'nearest':
+        return 0
+    elif mode == 'wrap':
+        return 1
+    elif mode in ['reflect', 'grid-mirror']:
+        return 2
+    elif mode == 'mirror':
+        return 3
+    elif mode == 'constant':
+        return 4
+    elif mode == 'grid-wrap':
+        return 5
+    elif mode == 'grid-constant':
+        return 6
+    else:
+        raise RuntimeError('boundary mode not supported')
+
+
+def _normalize_sequence(input, rank):
+    """If input is a scalar, create a sequence of length equal to the
+    rank by duplicating the input. If input is a sequence,
+    check if its length is equal to the length of array.
+    """
+    is_str = isinstance(input, str)
+    if not is_str and isinstance(input, Iterable):
+        normalized = list(input)
+        if len(normalized) != rank:
+            err = "sequence argument must have length equal to input rank"
+            raise RuntimeError(err)
+    else:
+        normalized = [input] * rank
+    return normalized
+
+
+def _get_output(output, input, shape=None, complex_output=False):
+    if shape is None:
+        shape = input.shape
+    if output is None:
+        if not complex_output:
+            output = numpy.zeros(shape, dtype=input.dtype.name)
+        else:
+            complex_type = numpy.promote_types(input.dtype, numpy.complex64)
+            output = numpy.zeros(shape, dtype=complex_type)
+    elif isinstance(output, (type, numpy.dtype)):
+        # Classes (like `np.float32`) and dtypes are interpreted as dtype
+        if complex_output and numpy.dtype(output).kind != 'c':
+            warnings.warn("promoting specified output dtype to complex")
+            output = numpy.promote_types(output, numpy.complex64)
+        output = numpy.zeros(shape, dtype=output)
+    elif isinstance(output, str):
+        output = numpy.sctypeDict[output]
+        if complex_output and numpy.dtype(output).kind != 'c':
+            raise RuntimeError("output must have complex dtype")
+        output = numpy.zeros(shape, dtype=output)
+    elif output.shape != shape:
+        raise RuntimeError("output shape not correct")
+    elif complex_output and output.dtype.kind != 'c':
+        raise RuntimeError("output must have complex dtype")
+    return output
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/filters.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/filters.py
new file mode 100644
index 00000000..13296544
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/filters.py
@@ -0,0 +1,35 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _filters
+
+
+__all__ = [  # noqa: F822
+    'correlate1d', 'convolve1d', 'gaussian_filter1d',
+    'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace',
+    'laplace', 'gaussian_laplace', 'generic_gradient_magnitude',
+    'gaussian_gradient_magnitude', 'correlate', 'convolve',
+    'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
+    'maximum_filter1d', 'minimum_filter', 'maximum_filter',
+    'rank_filter', 'median_filter', 'percentile_filter',
+    'generic_filter1d', 'generic_filter', 'normalize_axis_index'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.ndimage.filters is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.ndimage instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, "
+                  "the `scipy.ndimage.filters` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_filters, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/fourier.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/fourier.py
new file mode 100644
index 00000000..edf11ceb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/fourier.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _fourier
+
+
+__all__ = [  # noqa: F822
+    'fourier_gaussian', 'fourier_uniform',
+    'fourier_ellipsoid', 'fourier_shift', 'normalize_axis_index'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.ndimage.fourier is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.ndimage instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, "
+                  "the `scipy.ndimage.fourier` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_fourier, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/interpolation.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/interpolation.py
new file mode 100644
index 00000000..90fbbf57
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/interpolation.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _interpolation
+
+
+__all__ = [  # noqa: F822
+    'spline_filter1d', 'spline_filter',
+    'geometric_transform', 'map_coordinates',
+    'affine_transform', 'shift', 'zoom', 'rotate',
+    'normalize_axis_index', 'docfiller'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.ndimage.interpolation is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.ndimage instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, "
+                  "the `scipy.ndimage.interpolation` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_interpolation, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/measurements.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/measurements.py
new file mode 100644
index 00000000..a13026f8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/measurements.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _measurements
+
+
+__all__ = [  # noqa: F822
+    'label', 'find_objects', 'labeled_comprehension',
+    'sum', 'mean', 'variance', 'standard_deviation',
+    'minimum', 'maximum', 'median', 'minimum_position',
+    'maximum_position', 'extrema', 'center_of_mass',
+    'histogram', 'watershed_ift', 'sum_labels'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.ndimage.measurements is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.ndimage instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, "
+                  "the `scipy.ndimage.measurements` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_measurements, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/morphology.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/morphology.py
new file mode 100644
index 00000000..5102c673
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/morphology.py
@@ -0,0 +1,35 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.ndimage` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _morphology
+
+
+__all__ = [  # noqa: F822
+    'iterate_structure', 'generate_binary_structure',
+    'binary_erosion', 'binary_dilation', 'binary_opening',
+    'binary_closing', 'binary_hit_or_miss', 'binary_propagation',
+    'binary_fill_holes', 'grey_erosion', 'grey_dilation',
+    'grey_opening', 'grey_closing', 'morphological_gradient',
+    'morphological_laplace', 'white_tophat', 'black_tophat',
+    'distance_transform_bf', 'distance_transform_cdt',
+    'distance_transform_edt'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.ndimage.morphology is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.ndimage instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, "
+                  "the `scipy.ndimage.morphology` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_morphology, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/__init__.py
new file mode 100644
index 00000000..2853e0d7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/__init__.py
@@ -0,0 +1,15 @@
+
+from __future__ import annotations
+from typing import List, Type 
+import numpy
+
+# list of numarray data types
+integer_types: List[Type] = [
+    numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
+    numpy.int32, numpy.uint32, numpy.int64, numpy.uint64]
+
+float_types: List[Type] = [numpy.float32, numpy.float64]
+
+complex_types: List[Type] = [numpy.complex64, numpy.complex128]
+
+types: List[Type] = integer_types + float_types
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_inputs.txt b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_inputs.txt
new file mode 100644
index 00000000..6c3cff3b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_inputs.txt
@@ -0,0 +1,21 @@
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 0 1 1 1
+1 1 0 0 0 1 1
+1 0 1 0 1 0 1
+0 0 0 1 0 0 0
+1 0 1 0 1 0 1
+1 1 0 0 0 1 1
+1 1 1 0 1 1 1
+1 0 1 1 1 0 1
+0 0 0 1 0 0 0
+1 0 0 1 0 0 1
+1 1 1 1 1 1 1
+1 0 0 1 0 0 1
+0 0 0 1 0 0 0
+1 0 1 1 1 0 1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_results.txt b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_results.txt
new file mode 100644
index 00000000..c239b036
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_results.txt
@@ -0,0 +1,294 @@
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+4 4 4 4 4 4 4
+5 5 5 5 5 5 5
+6 6 6 6 6 6 6
+7 7 7 7 7 7 7
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 2 3 4 5 6 7
+8 9 10 11 12 13 14
+15 16 17 18 19 20 21
+22 23 24 25 26 27 28
+29 30 31 32 33 34 35
+36 37 38 39 40 41 42
+43 44 45 46 47 48 49
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 2 3 4 5 6 7
+8 1 2 3 4 5 6
+9 8 1 2 3 4 5
+10 9 8 1 2 3 4
+11 10 9 8 1 2 3
+12 11 10 9 8 1 2
+13 12 11 10 9 8 1
+1 2 3 4 5 6 7
+1 2 3 4 5 6 7
+1 2 3 4 5 6 7
+1 2 3 4 5 6 7
+1 2 3 4 5 6 7
+1 2 3 4 5 6 7
+1 2 3 4 5 6 7
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 2 1 2 1 2 1
+2 1 2 1 2 1 2
+1 2 1 2 1 2 1
+2 1 2 1 2 1 2
+1 2 1 2 1 2 1
+2 1 2 1 2 1 2
+1 2 1 2 1 2 1
+1 2 3 4 5 6 7
+2 3 4 5 6 7 8
+3 4 5 6 7 8 9
+4 5 6 7 8 9 10
+5 6 7 8 9 10 11
+6 7 8 9 10 11 12
+7 8 9 10 11 12 13
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 1 1 1 1
+1 1 1 0 2 2 2
+1 1 0 0 0 2 2
+1 0 3 0 2 0 4
+0 0 0 2 0 0 0
+5 0 2 0 6 0 7
+2 2 0 0 0 7 7
+2 2 2 0 7 7 7
+1 1 1 0 2 2 2
+1 1 0 0 0 2 2
+3 0 1 0 4 0 2
+0 0 0 1 0 0 0
+5 0 6 0 1 0 7
+5 5 0 0 0 1 1
+5 5 5 0 1 1 1
+1 1 1 0 2 2 2
+3 3 0 0 0 4 4
+5 0 6 0 7 0 8
+0 0 0 9 0 0 0
+10 0 11 0 12 0 13
+14 14 0 0 0 15 15
+16 16 16 0 17 17 17
+1 1 1 0 2 3 3
+1 1 0 0 0 3 3
+1 0 4 0 3 0 3
+0 0 0 3 0 0 0
+3 0 3 0 5 0 6
+3 3 0 0 0 6 6
+3 3 7 0 6 6 6
+1 2 3 0 4 5 6
+7 8 0 0 0 9 10
+11 0 12 0 13 0 14
+0 0 0 15 0 0 0
+16 0 17 0 18 0 19
+20 21 0 0 0 22 23
+24 25 26 0 27 28 29
+1 1 1 0 2 2 2
+1 1 0 0 0 2 2
+1 0 3 0 2 0 2
+0 0 0 2 0 0 0
+2 0 2 0 4 0 5
+2 2 0 0 0 5 5
+2 2 2 0 5 5 5
+1 1 1 0 2 2 2
+1 1 0 0 0 2 2
+1 0 3 0 4 0 2
+0 0 0 5 0 0 0
+6 0 7 0 8 0 9
+6 6 0 0 0 9 9
+6 6 6 0 9 9 9
+1 2 3 0 4 5 6
+7 1 0 0 0 4 5
+8 0 1 0 9 0 4
+0 0 0 1 0 0 0
+10 0 11 0 1 0 12
+13 10 0 0 0 1 14
+15 13 10 0 16 17 1
+1 2 3 0 4 5 6
+1 2 0 0 0 5 6
+1 0 7 0 8 0 6
+0 0 0 9 0 0 0
+10 0 11 0 12 0 13
+10 14 0 0 0 15 13
+10 14 16 0 17 15 13
+1 1 1 0 1 1 1
+1 1 0 0 0 1 1
+1 0 1 0 1 0 1
+0 0 0 1 0 0 0
+1 0 1 0 1 0 1
+1 1 0 0 0 1 1
+1 1 1 0 1 1 1
+1 1 2 0 3 3 3
+1 1 0 0 0 3 3
+1 0 1 0 4 0 3
+0 0 0 1 0 0 0
+5 0 6 0 1 0 1
+5 5 0 0 0 1 1
+5 5 5 0 7 1 1
+1 2 1 0 1 3 1
+2 1 0 0 0 1 3
+1 0 1 0 1 0 1
+0 0 0 1 0 0 0
+1 0 1 0 1 0 1
+4 1 0 0 0 1 5
+1 4 1 0 1 5 1
+1 2 3 0 4 5 6
+2 3 0 0 0 6 7
+3 0 8 0 6 0 9
+0 0 0 6 0 0 0
+10 0 6 0 11 0 12
+13 6 0 0 0 12 14
+6 15 16 0 12 14 17
+1 1 1 0 2 2 2
+1 1 0 0 0 2 2
+1 0 1 0 3 0 2
+0 0 0 1 0 0 0
+4 0 5 0 1 0 1
+4 4 0 0 0 1 1
+4 4 4 0 1 1 1
+1 0 2 2 2 0 3
+0 0 0 2 0 0 0
+4 0 0 5 0 0 5
+5 5 5 5 5 5 5
+5 0 0 5 0 0 6
+0 0 0 7 0 0 0
+8 0 7 7 7 0 9
+1 0 2 2 2 0 3
+0 0 0 2 0 0 0
+4 0 0 4 0 0 5
+4 4 4 4 4 4 4
+6 0 0 4 0 0 4
+0 0 0 7 0 0 0
+8 0 7 7 7 0 9
+1 0 2 2 2 0 3
+0 0 0 4 0 0 0
+5 0 0 6 0 0 7
+8 8 8 8 8 8 8
+9 0 0 10 0 0 11
+0 0 0 12 0 0 0
+13 0 14 14 14 0 15
+1 0 2 3 3 0 4
+0 0 0 3 0 0 0
+5 0 0 3 0 0 6
+5 5 3 3 3 6 6
+5 0 0 3 0 0 6
+0 0 0 3 0 0 0
+7 0 3 3 8 0 9
+1 0 2 3 4 0 5
+0 0 0 6 0 0 0
+7 0 0 8 0 0 9
+10 11 12 13 14 15 16
+17 0 0 18 0 0 19
+0 0 0 20 0 0 0
+21 0 22 23 24 0 25
+1 0 2 2 2 0 3
+0 0 0 2 0 0 0
+2 0 0 2 0 0 2
+2 2 2 2 2 2 2
+2 0 0 2 0 0 2
+0 0 0 2 0 0 0
+4 0 2 2 2 0 5
+1 0 2 2 2 0 3
+0 0 0 2 0 0 0
+2 0 0 2 0 0 2
+2 2 2 2 2 2 2
+2 0 0 2 0 0 2
+0 0 0 2 0 0 0
+4 0 2 2 2 0 5
+1 0 2 3 4 0 5
+0 0 0 2 0 0 0
+6 0 0 7 0 0 8
+9 6 10 11 7 12 13
+14 0 0 10 0 0 12
+0 0 0 15 0 0 0
+16 0 17 18 15 0 19
+1 0 2 3 4 0 5
+0 0 0 3 0 0 0
+6 0 0 3 0 0 7
+6 8 9 3 10 11 7
+6 0 0 3 0 0 7
+0 0 0 3 0 0 0
+12 0 13 3 14 0 15
+1 0 2 2 2 0 3
+0 0 0 2 0 0 0
+2 0 0 2 0 0 2
+2 2 2 2 2 2 2
+2 0 0 2 0 0 2
+0 0 0 2 0 0 0
+4 0 2 2 2 0 5
+1 0 2 2 3 0 4
+0 0 0 2 0 0 0
+5 0 0 2 0 0 6
+5 5 2 2 2 6 6
+5 0 0 2 0 0 6
+0 0 0 2 0 0 0
+7 0 8 2 2 0 9
+1 0 2 3 2 0 4
+0 0 0 2 0 0 0
+5 0 0 6 0 0 7
+8 5 6 9 6 7 10
+5 0 0 6 0 0 7
+0 0 0 11 0 0 0
+12 0 11 13 11 0 14
+1 0 2 3 4 0 5
+0 0 0 4 0 0 0
+6 0 0 7 0 0 8
+9 10 7 11 12 8 13
+10 0 0 12 0 0 14
+0 0 0 15 0 0 0
+16 0 15 17 18 0 19
+1 0 2 2 2 0 3
+0 0 0 2 0 0 0
+2 0 0 2 0 0 2
+2 2 2 2 2 2 2
+2 0 0 2 0 0 2
+0 0 0 2 0 0 0
+4 0 2 2 2 0 5
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_strels.txt b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_strels.txt
new file mode 100644
index 00000000..35ae8121
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/data/label_strels.txt
@@ -0,0 +1,42 @@
+0 0 1
+1 1 1
+1 0 0
+1 0 0
+1 1 1
+0 0 1
+0 0 0
+1 1 1
+0 0 0
+0 1 1
+0 1 0
+1 1 0
+0 0 0
+0 0 0
+0 0 0
+0 1 1
+1 1 1
+1 1 0
+0 1 0
+1 1 1
+0 1 0
+1 0 0
+0 1 0
+0 0 1
+0 1 0
+0 1 0
+0 1 0
+1 1 1
+1 1 1
+1 1 1
+1 1 0
+0 1 0
+0 1 1
+1 0 1
+0 1 0
+1 0 1
+0 0 1
+0 1 0
+1 0 0
+1 1 0
+1 1 1
+0 1 1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/dots.png b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/dots.png
new file mode 100644
index 00000000..640030ca
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/dots.png differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_c_api.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_c_api.py
new file mode 100644
index 00000000..4e9a5f81
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_c_api.py
@@ -0,0 +1,94 @@
+import numpy as np
+from numpy.testing import assert_allclose
+
+from scipy import ndimage
+from scipy.ndimage import _ctest
+from scipy.ndimage import _cytest
+from scipy._lib._ccallback import LowLevelCallable
+
+FILTER1D_FUNCTIONS = [
+    lambda filter_size: _ctest.filter1d(filter_size),
+    lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
+    lambda filter_size: LowLevelCallable(_cytest.filter1d(filter_size, with_signature=True)),
+    lambda filter_size: LowLevelCallable.from_cython(_cytest, "_filter1d",
+                                                     _cytest.filter1d_capsule(filter_size)),
+]
+
+FILTER2D_FUNCTIONS = [
+    lambda weights: _ctest.filter2d(weights),
+    lambda weights: _cytest.filter2d(weights, with_signature=False),
+    lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
+    lambda weights: LowLevelCallable.from_cython(_cytest, "_filter2d", _cytest.filter2d_capsule(weights)),
+]
+
+TRANSFORM_FUNCTIONS = [
+    lambda shift: _ctest.transform(shift),
+    lambda shift: _cytest.transform(shift, with_signature=False),
+    lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
+    lambda shift: LowLevelCallable.from_cython(_cytest, "_transform", _cytest.transform_capsule(shift)),
+]
+
+
+def test_generic_filter():
+    def filter2d(footprint_elements, weights):
+        return (weights*footprint_elements).sum()
+
+    def check(j):
+        func = FILTER2D_FUNCTIONS[j]
+
+        im = np.ones((20, 20))
+        im[:10,:10] = 0
+        footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
+        footprint_size = np.count_nonzero(footprint)
+        weights = np.ones(footprint_size)/footprint_size
+
+        res = ndimage.generic_filter(im, func(weights),
+                                     footprint=footprint)
+        std = ndimage.generic_filter(im, filter2d, footprint=footprint,
+                                     extra_arguments=(weights,))
+        assert_allclose(res, std, err_msg="#{} failed".format(j))
+
+    for j, func in enumerate(FILTER2D_FUNCTIONS):
+        check(j)
+
+
+def test_generic_filter1d():
+    def filter1d(input_line, output_line, filter_size):
+        for i in range(output_line.size):
+            output_line[i] = 0
+            for j in range(filter_size):
+                output_line[i] += input_line[i+j]
+        output_line /= filter_size
+
+    def check(j):
+        func = FILTER1D_FUNCTIONS[j]
+
+        im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
+        filter_size = 3
+
+        res = ndimage.generic_filter1d(im, func(filter_size),
+                                       filter_size)
+        std = ndimage.generic_filter1d(im, filter1d, filter_size,
+                                       extra_arguments=(filter_size,))
+        assert_allclose(res, std, err_msg="#{} failed".format(j))
+
+    for j, func in enumerate(FILTER1D_FUNCTIONS):
+        check(j)
+
+
+def test_geometric_transform():
+    def transform(output_coordinates, shift):
+        return output_coordinates[0] - shift, output_coordinates[1] - shift
+
+    def check(j):
+        func = TRANSFORM_FUNCTIONS[j]
+
+        im = np.arange(12).reshape(4, 3).astype(np.float64)
+        shift = 0.5
+
+        res = ndimage.geometric_transform(im, func(shift))
+        std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
+        assert_allclose(res, std, err_msg="#{} failed".format(j))
+
+    for j, func in enumerate(TRANSFORM_FUNCTIONS):
+        check(j)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_datatypes.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_datatypes.py
new file mode 100644
index 00000000..327cc5ac
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_datatypes.py
@@ -0,0 +1,66 @@
+""" Testing data types for ndimage calls
+"""
+import sys
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_
+import pytest
+
+from scipy import ndimage
+
+
+def test_map_coordinates_dts():
+    # check that ndimage accepts different data types for interpolation
+    data = np.array([[4, 1, 3, 2],
+                     [7, 6, 8, 5],
+                     [3, 5, 3, 6]])
+    shifted_data = np.array([[0, 0, 0, 0],
+                             [0, 4, 1, 3],
+                             [0, 7, 6, 8]])
+    idx = np.indices(data.shape)
+    dts = (np.uint8, np.uint16, np.uint32, np.uint64,
+           np.int8, np.int16, np.int32, np.int64,
+           np.intp, np.uintp, np.float32, np.float64)
+    for order in range(0, 6):
+        for data_dt in dts:
+            these_data = data.astype(data_dt)
+            for coord_dt in dts:
+                # affine mapping
+                mat = np.eye(2, dtype=coord_dt)
+                off = np.zeros((2,), dtype=coord_dt)
+                out = ndimage.affine_transform(these_data, mat, off)
+                assert_array_almost_equal(these_data, out)
+                # map coordinates
+                coords_m1 = idx.astype(coord_dt) - 1
+                coords_p10 = idx.astype(coord_dt) + 10
+                out = ndimage.map_coordinates(these_data, coords_m1, order=order)
+                assert_array_almost_equal(out, shifted_data)
+                # check constant fill works
+                out = ndimage.map_coordinates(these_data, coords_p10, order=order)
+                assert_array_almost_equal(out, np.zeros((3,4)))
+            # check shift and zoom
+            out = ndimage.shift(these_data, 1)
+            assert_array_almost_equal(out, shifted_data)
+            out = ndimage.zoom(these_data, 1)
+            assert_array_almost_equal(these_data, out)
+
+
+@pytest.mark.xfail(not sys.platform == 'darwin', reason="runs only on darwin")
+def test_uint64_max():
+    # Test interpolation respects uint64 max.  Reported to fail at least on
+    # win32 (due to the 32 bit visual C compiler using signed int64 when
+    # converting between uint64 to double) and Debian on s390x.
+    # Interpolation is always done in double precision floating point, so
+    # we use the largest uint64 value for which int(float(big)) still fits
+    # in a uint64.
+    big = 2**64 - 1025
+    arr = np.array([big, big, big], dtype=np.uint64)
+    # Tests geometric transform (map_coordinates, affine_transform)
+    inds = np.indices(arr.shape) - 0.1
+    x = ndimage.map_coordinates(arr, inds)
+    assert_(x[1] == int(float(big)))
+    assert_(x[2] == int(float(big)))
+    # Tests zoom / shift
+    x = ndimage.shift(arr, 0.1)
+    assert_(x[1] == int(float(big)))
+    assert_(x[2] == int(float(big)))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_filters.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_filters.py
new file mode 100644
index 00000000..af7d8f42
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_filters.py
@@ -0,0 +1,1995 @@
+''' Some tests for filters '''
+import functools
+import math
+import numpy
+
+from numpy.testing import (assert_equal, assert_allclose,
+                           assert_array_almost_equal,
+                           assert_array_equal, assert_almost_equal,
+                           suppress_warnings, assert_)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy import ndimage
+from scipy.ndimage._filters import _gaussian_kernel1d
+
+from . import types, float_types, complex_types
+
+
+def sumsq(a, b):
+    return math.sqrt(((a - b)**2).sum())
+
+
+def _complex_correlate(array, kernel, real_dtype, convolve=False,
+                       mode="reflect", cval=0, ):
+    """Utility to perform a reference complex-valued convolutions.
+
+    When convolve==False, correlation is performed instead
+    """
+    array = numpy.asarray(array)
+    kernel = numpy.asarray(kernel)
+    complex_array = array.dtype.kind == 'c'
+    complex_kernel = kernel.dtype.kind == 'c'
+    if array.ndim == 1:
+        func = ndimage.convolve1d if convolve else ndimage.correlate1d
+    else:
+        func = ndimage.convolve if convolve else ndimage.correlate
+    if not convolve:
+        kernel = kernel.conj()
+    if complex_array and complex_kernel:
+        # use: real(cval) for array.real component
+        #      imag(cval) for array.imag component
+        output = (
+            func(array.real, kernel.real, output=real_dtype,
+                 mode=mode, cval=numpy.real(cval)) -
+            func(array.imag, kernel.imag, output=real_dtype,
+                 mode=mode, cval=numpy.imag(cval)) +
+            1j * func(array.imag, kernel.real, output=real_dtype,
+                      mode=mode, cval=numpy.imag(cval)) +
+            1j * func(array.real, kernel.imag, output=real_dtype,
+                      mode=mode, cval=numpy.real(cval))
+        )
+    elif complex_array:
+        output = (
+            func(array.real, kernel, output=real_dtype, mode=mode,
+                 cval=numpy.real(cval)) +
+            1j * func(array.imag, kernel, output=real_dtype, mode=mode,
+                      cval=numpy.imag(cval))
+        )
+    elif complex_kernel:
+        # real array so cval is real too
+        output = (
+            func(array, kernel.real, output=real_dtype, mode=mode, cval=cval) +
+            1j * func(array, kernel.imag, output=real_dtype, mode=mode,
+                      cval=cval)
+        )
+    return output
+
+
+class TestNdimageFilters:
+
+    def _validate_complex(self, array, kernel, type2, mode='reflect', cval=0):
+        # utility for validating complex-valued correlations
+        real_dtype = numpy.asarray([], dtype=type2).real.dtype
+        expected = _complex_correlate(
+            array, kernel, real_dtype, convolve=False, mode=mode, cval=cval
+        )
+
+        if array.ndim == 1:
+            correlate = functools.partial(ndimage.correlate1d, axis=-1,
+                                          mode=mode, cval=cval)
+            convolve = functools.partial(ndimage.convolve1d, axis=-1,
+                                         mode=mode, cval=cval)
+        else:
+            correlate = functools.partial(ndimage.correlate, mode=mode,
+                                          cval=cval)
+            convolve = functools.partial(ndimage.convolve, mode=mode,
+                                          cval=cval)
+
+        # test correlate output dtype
+        output = correlate(array, kernel, output=type2)
+        assert_array_almost_equal(expected, output)
+        assert_equal(output.dtype.type, type2)
+
+        # test correlate with pre-allocated output
+        output = numpy.zeros_like(array, dtype=type2)
+        correlate(array, kernel, output=output)
+        assert_array_almost_equal(expected, output)
+
+        # test convolve output dtype
+        output = convolve(array, kernel, output=type2)
+        expected = _complex_correlate(
+            array, kernel, real_dtype, convolve=True, mode=mode, cval=cval,
+        )
+        assert_array_almost_equal(expected, output)
+        assert_equal(output.dtype.type, type2)
+
+        # convolve with pre-allocated output
+        convolve(array, kernel, output=output)
+        assert_array_almost_equal(expected, output)
+        assert_equal(output.dtype.type, type2)
+
+        # warns if the output is not a complex dtype
+        with pytest.warns(UserWarning,
+                          match="promoting specified output dtype to complex"):
+            correlate(array, kernel, output=real_dtype)
+
+        with pytest.warns(UserWarning,
+                          match="promoting specified output dtype to complex"):
+            convolve(array, kernel, output=real_dtype)
+
+        # raises if output array is provided, but is not complex-valued
+        output_real = numpy.zeros_like(array, dtype=real_dtype)
+        with assert_raises(RuntimeError):
+            correlate(array, kernel, output=output_real)
+
+        with assert_raises(RuntimeError):
+            convolve(array, kernel, output=output_real)
+
+    def test_correlate01(self):
+        array = numpy.array([1, 2])
+        weights = numpy.array([2])
+        expected = [2, 4]
+
+        output = ndimage.correlate(array, weights)
+        assert_array_almost_equal(output, expected)
+
+        output = ndimage.convolve(array, weights)
+        assert_array_almost_equal(output, expected)
+
+        output = ndimage.correlate1d(array, weights)
+        assert_array_almost_equal(output, expected)
+
+        output = ndimage.convolve1d(array, weights)
+        assert_array_almost_equal(output, expected)
+
+    def test_correlate01_overlap(self):
+        array = numpy.arange(256).reshape(16, 16)
+        weights = numpy.array([2])
+        expected = 2 * array
+
+        ndimage.correlate1d(array, weights, output=array)
+        assert_array_almost_equal(array, expected)
+
+    def test_correlate02(self):
+        array = numpy.array([1, 2, 3])
+        kernel = numpy.array([1])
+
+        output = ndimage.correlate(array, kernel)
+        assert_array_almost_equal(array, output)
+
+        output = ndimage.convolve(array, kernel)
+        assert_array_almost_equal(array, output)
+
+        output = ndimage.correlate1d(array, kernel)
+        assert_array_almost_equal(array, output)
+
+        output = ndimage.convolve1d(array, kernel)
+        assert_array_almost_equal(array, output)
+
+    def test_correlate03(self):
+        array = numpy.array([1])
+        weights = numpy.array([1, 1])
+        expected = [2]
+
+        output = ndimage.correlate(array, weights)
+        assert_array_almost_equal(output, expected)
+
+        output = ndimage.convolve(array, weights)
+        assert_array_almost_equal(output, expected)
+
+        output = ndimage.correlate1d(array, weights)
+        assert_array_almost_equal(output, expected)
+
+        output = ndimage.convolve1d(array, weights)
+        assert_array_almost_equal(output, expected)
+
+    def test_correlate04(self):
+        array = numpy.array([1, 2])
+        tcor = [2, 3]
+        tcov = [3, 4]
+        weights = numpy.array([1, 1])
+        output = ndimage.correlate(array, weights)
+        assert_array_almost_equal(output, tcor)
+        output = ndimage.convolve(array, weights)
+        assert_array_almost_equal(output, tcov)
+        output = ndimage.correlate1d(array, weights)
+        assert_array_almost_equal(output, tcor)
+        output = ndimage.convolve1d(array, weights)
+        assert_array_almost_equal(output, tcov)
+
+    def test_correlate05(self):
+        array = numpy.array([1, 2, 3])
+        tcor = [2, 3, 5]
+        tcov = [3, 5, 6]
+        kernel = numpy.array([1, 1])
+        output = ndimage.correlate(array, kernel)
+        assert_array_almost_equal(tcor, output)
+        output = ndimage.convolve(array, kernel)
+        assert_array_almost_equal(tcov, output)
+        output = ndimage.correlate1d(array, kernel)
+        assert_array_almost_equal(tcor, output)
+        output = ndimage.convolve1d(array, kernel)
+        assert_array_almost_equal(tcov, output)
+
+    def test_correlate06(self):
+        array = numpy.array([1, 2, 3])
+        tcor = [9, 14, 17]
+        tcov = [7, 10, 15]
+        weights = numpy.array([1, 2, 3])
+        output = ndimage.correlate(array, weights)
+        assert_array_almost_equal(output, tcor)
+        output = ndimage.convolve(array, weights)
+        assert_array_almost_equal(output, tcov)
+        output = ndimage.correlate1d(array, weights)
+        assert_array_almost_equal(output, tcor)
+        output = ndimage.convolve1d(array, weights)
+        assert_array_almost_equal(output, tcov)
+
+    def test_correlate07(self):
+        array = numpy.array([1, 2, 3])
+        expected = [5, 8, 11]
+        weights = numpy.array([1, 2, 1])
+        output = ndimage.correlate(array, weights)
+        assert_array_almost_equal(output, expected)
+        output = ndimage.convolve(array, weights)
+        assert_array_almost_equal(output, expected)
+        output = ndimage.correlate1d(array, weights)
+        assert_array_almost_equal(output, expected)
+        output = ndimage.convolve1d(array, weights)
+        assert_array_almost_equal(output, expected)
+
+    def test_correlate08(self):
+        array = numpy.array([1, 2, 3])
+        tcor = [1, 2, 5]
+        tcov = [3, 6, 7]
+        weights = numpy.array([1, 2, -1])
+        output = ndimage.correlate(array, weights)
+        assert_array_almost_equal(output, tcor)
+        output = ndimage.convolve(array, weights)
+        assert_array_almost_equal(output, tcov)
+        output = ndimage.correlate1d(array, weights)
+        assert_array_almost_equal(output, tcor)
+        output = ndimage.convolve1d(array, weights)
+        assert_array_almost_equal(output, tcov)
+
+    def test_correlate09(self):
+        array = []
+        kernel = numpy.array([1, 1])
+        output = ndimage.correlate(array, kernel)
+        assert_array_almost_equal(array, output)
+        output = ndimage.convolve(array, kernel)
+        assert_array_almost_equal(array, output)
+        output = ndimage.correlate1d(array, kernel)
+        assert_array_almost_equal(array, output)
+        output = ndimage.convolve1d(array, kernel)
+        assert_array_almost_equal(array, output)
+
+    def test_correlate10(self):
+        array = [[]]
+        kernel = numpy.array([[1, 1]])
+        output = ndimage.correlate(array, kernel)
+        assert_array_almost_equal(array, output)
+        output = ndimage.convolve(array, kernel)
+        assert_array_almost_equal(array, output)
+
+    def test_correlate11(self):
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]])
+        kernel = numpy.array([[1, 1],
+                              [1, 1]])
+        output = ndimage.correlate(array, kernel)
+        assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
+        output = ndimage.convolve(array, kernel)
+        assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
+
+    def test_correlate12(self):
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]])
+        kernel = numpy.array([[1, 0],
+                              [0, 1]])
+        output = ndimage.correlate(array, kernel)
+        assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+        output = ndimage.convolve(array, kernel)
+        assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_kernel', types)
+    def test_correlate13(self, dtype_array, dtype_kernel):
+        kernel = numpy.array([[1, 0],
+                              [0, 1]])
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_array)
+        output = ndimage.correlate(array, kernel, output=dtype_kernel)
+        assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+        assert_equal(output.dtype.type, dtype_kernel)
+
+        output = ndimage.convolve(array, kernel,
+                                  output=dtype_kernel)
+        assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+        assert_equal(output.dtype.type, dtype_kernel)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_output', types)
+    def test_correlate14(self, dtype_array, dtype_output):
+        kernel = numpy.array([[1, 0],
+                              [0, 1]])
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_array)
+        output = numpy.zeros(array.shape, dtype_output)
+        ndimage.correlate(array, kernel, output=output)
+        assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+        assert_equal(output.dtype.type, dtype_output)
+
+        ndimage.convolve(array, kernel, output=output)
+        assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+        assert_equal(output.dtype.type, dtype_output)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    def test_correlate15(self, dtype_array):
+        kernel = numpy.array([[1, 0],
+                              [0, 1]])
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_array)
+        output = ndimage.correlate(array, kernel, output=numpy.float32)
+        assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+        output = ndimage.convolve(array, kernel, output=numpy.float32)
+        assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    def test_correlate16(self, dtype_array):
+        kernel = numpy.array([[0.5, 0],
+                              [0, 0.5]])
+        array = numpy.array([[1, 2, 3], [4, 5, 6]], dtype_array)
+        output = ndimage.correlate(array, kernel, output=numpy.float32)
+        assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+        output = ndimage.convolve(array, kernel, output=numpy.float32)
+        assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+    def test_correlate17(self):
+        array = numpy.array([1, 2, 3])
+        tcor = [3, 5, 6]
+        tcov = [2, 3, 5]
+        kernel = numpy.array([1, 1])
+        output = ndimage.correlate(array, kernel, origin=-1)
+        assert_array_almost_equal(tcor, output)
+        output = ndimage.convolve(array, kernel, origin=-1)
+        assert_array_almost_equal(tcov, output)
+        output = ndimage.correlate1d(array, kernel, origin=-1)
+        assert_array_almost_equal(tcor, output)
+        output = ndimage.convolve1d(array, kernel, origin=-1)
+        assert_array_almost_equal(tcov, output)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    def test_correlate18(self, dtype_array):
+        kernel = numpy.array([[1, 0],
+                              [0, 1]])
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_array)
+        output = ndimage.correlate(array, kernel,
+                                   output=numpy.float32,
+                                   mode='nearest', origin=-1)
+        assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+        output = ndimage.convolve(array, kernel,
+                                  output=numpy.float32,
+                                  mode='nearest', origin=-1)
+        assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+    def test_correlate_mode_sequence(self):
+        kernel = numpy.ones((2, 2))
+        array = numpy.ones((3, 3), float)
+        with assert_raises(RuntimeError):
+            ndimage.correlate(array, kernel, mode=['nearest', 'reflect'])
+        with assert_raises(RuntimeError):
+            ndimage.convolve(array, kernel, mode=['nearest', 'reflect'])
+
+    @pytest.mark.parametrize('dtype_array', types)
+    def test_correlate19(self, dtype_array):
+        kernel = numpy.array([[1, 0],
+                              [0, 1]])
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_array)
+        output = ndimage.correlate(array, kernel,
+                                   output=numpy.float32,
+                                   mode='nearest', origin=[-1, 0])
+        assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+        output = ndimage.convolve(array, kernel,
+                                  output=numpy.float32,
+                                  mode='nearest', origin=[-1, 0])
+        assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
+        assert_equal(output.dtype.type, numpy.float32)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_output', types)
+    def test_correlate20(self, dtype_array, dtype_output):
+        weights = numpy.array([1, 2, 1])
+        expected = [[5, 10, 15], [7, 14, 21]]
+        array = numpy.array([[1, 2, 3],
+                             [2, 4, 6]], dtype_array)
+        output = numpy.zeros((2, 3), dtype_output)
+        ndimage.correlate1d(array, weights, axis=0, output=output)
+        assert_array_almost_equal(output, expected)
+        ndimage.convolve1d(array, weights, axis=0, output=output)
+        assert_array_almost_equal(output, expected)
+
+    def test_correlate21(self):
+        array = numpy.array([[1, 2, 3],
+                             [2, 4, 6]])
+        expected = [[5, 10, 15], [7, 14, 21]]
+        weights = numpy.array([1, 2, 1])
+        output = ndimage.correlate1d(array, weights, axis=0)
+        assert_array_almost_equal(output, expected)
+        output = ndimage.convolve1d(array, weights, axis=0)
+        assert_array_almost_equal(output, expected)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_output', types)
+    def test_correlate22(self, dtype_array, dtype_output):
+        weights = numpy.array([1, 2, 1])
+        expected = [[6, 12, 18], [6, 12, 18]]
+        array = numpy.array([[1, 2, 3],
+                             [2, 4, 6]], dtype_array)
+        output = numpy.zeros((2, 3), dtype_output)
+        ndimage.correlate1d(array, weights, axis=0,
+                            mode='wrap', output=output)
+        assert_array_almost_equal(output, expected)
+        ndimage.convolve1d(array, weights, axis=0,
+                           mode='wrap', output=output)
+        assert_array_almost_equal(output, expected)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_output', types)
+    def test_correlate23(self, dtype_array, dtype_output):
+        weights = numpy.array([1, 2, 1])
+        expected = [[5, 10, 15], [7, 14, 21]]
+        array = numpy.array([[1, 2, 3],
+                             [2, 4, 6]], dtype_array)
+        output = numpy.zeros((2, 3), dtype_output)
+        ndimage.correlate1d(array, weights, axis=0,
+                            mode='nearest', output=output)
+        assert_array_almost_equal(output, expected)
+        ndimage.convolve1d(array, weights, axis=0,
+                           mode='nearest', output=output)
+        assert_array_almost_equal(output, expected)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_output', types)
+    def test_correlate24(self, dtype_array, dtype_output):
+        weights = numpy.array([1, 2, 1])
+        tcor = [[7, 14, 21], [8, 16, 24]]
+        tcov = [[4, 8, 12], [5, 10, 15]]
+        array = numpy.array([[1, 2, 3],
+                             [2, 4, 6]], dtype_array)
+        output = numpy.zeros((2, 3), dtype_output)
+        ndimage.correlate1d(array, weights, axis=0,
+                            mode='nearest', output=output, origin=-1)
+        assert_array_almost_equal(output, tcor)
+        ndimage.convolve1d(array, weights, axis=0,
+                           mode='nearest', output=output, origin=-1)
+        assert_array_almost_equal(output, tcov)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_output', types)
+    def test_correlate25(self, dtype_array, dtype_output):
+        weights = numpy.array([1, 2, 1])
+        tcor = [[4, 8, 12], [5, 10, 15]]
+        tcov = [[7, 14, 21], [8, 16, 24]]
+        array = numpy.array([[1, 2, 3],
+                             [2, 4, 6]], dtype_array)
+        output = numpy.zeros((2, 3), dtype_output)
+        ndimage.correlate1d(array, weights, axis=0,
+                            mode='nearest', output=output, origin=1)
+        assert_array_almost_equal(output, tcor)
+        ndimage.convolve1d(array, weights, axis=0,
+                           mode='nearest', output=output, origin=1)
+        assert_array_almost_equal(output, tcov)
+
+    def test_correlate26(self):
+        # test fix for gh-11661 (mirror extension of a length 1 signal)
+        y = ndimage.convolve1d(numpy.ones(1), numpy.ones(5), mode='mirror')
+        assert_array_equal(y, numpy.array(5.))
+
+        y = ndimage.correlate1d(numpy.ones(1), numpy.ones(5), mode='mirror')
+        assert_array_equal(y, numpy.array(5.))
+
+    @pytest.mark.parametrize('dtype_kernel', complex_types)
+    @pytest.mark.parametrize('dtype_input', types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate_complex_kernel(self, dtype_input, dtype_kernel,
+                                      dtype_output):
+        kernel = numpy.array([[1, 0],
+                              [0, 1 + 1j]], dtype_kernel)
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_input)
+        self._validate_complex(array, kernel, dtype_output)
+
+    @pytest.mark.parametrize('dtype_kernel', complex_types)
+    @pytest.mark.parametrize('dtype_input', types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    @pytest.mark.parametrize('mode', ['grid-constant', 'constant'])
+    def test_correlate_complex_kernel_cval(self, dtype_input, dtype_kernel,
+                                           dtype_output, mode):
+        # test use of non-zero cval with complex inputs
+        # also verifies that mode 'grid-constant' does not segfault
+        kernel = numpy.array([[1, 0],
+                              [0, 1 + 1j]], dtype_kernel)
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_input)
+        self._validate_complex(array, kernel, dtype_output, mode=mode,
+                               cval=5.0)
+
+    @pytest.mark.parametrize('dtype_kernel', complex_types)
+    @pytest.mark.parametrize('dtype_input', types)
+    def test_correlate_complex_kernel_invalid_cval(self, dtype_input,
+                                                   dtype_kernel):
+        # cannot give complex cval with a real image
+        kernel = numpy.array([[1, 0],
+                              [0, 1 + 1j]], dtype_kernel)
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype_input)
+        for func in [ndimage.convolve, ndimage.correlate, ndimage.convolve1d,
+                     ndimage.correlate1d]:
+            with pytest.raises(ValueError):
+                func(array, kernel, mode='constant', cval=5.0 + 1.0j,
+                     output=numpy.complex64)
+
+    @pytest.mark.parametrize('dtype_kernel', complex_types)
+    @pytest.mark.parametrize('dtype_input', types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate1d_complex_kernel(self, dtype_input, dtype_kernel,
+                                        dtype_output):
+        kernel = numpy.array([1, 1 + 1j], dtype_kernel)
+        array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
+        self._validate_complex(array, kernel, dtype_output)
+
+    @pytest.mark.parametrize('dtype_kernel', complex_types)
+    @pytest.mark.parametrize('dtype_input', types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate1d_complex_kernel_cval(self, dtype_input, dtype_kernel,
+                                             dtype_output):
+        kernel = numpy.array([1, 1 + 1j], dtype_kernel)
+        array = numpy.array([1, 2, 3, 4, 5, 6], dtype_input)
+        self._validate_complex(array, kernel, dtype_output, mode='constant',
+                               cval=5.0)
+
+    @pytest.mark.parametrize('dtype_kernel', types)
+    @pytest.mark.parametrize('dtype_input', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate_complex_input(self, dtype_input, dtype_kernel,
+                                     dtype_output):
+        kernel = numpy.array([[1, 0],
+                              [0, 1]], dtype_kernel)
+        array = numpy.array([[1, 2j, 3],
+                             [1 + 4j, 5, 6j]], dtype_input)
+        self._validate_complex(array, kernel, dtype_output)
+
+    @pytest.mark.parametrize('dtype_kernel', types)
+    @pytest.mark.parametrize('dtype_input', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate1d_complex_input(self, dtype_input, dtype_kernel,
+                                       dtype_output):
+        kernel = numpy.array([1, 0, 1], dtype_kernel)
+        array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
+        self._validate_complex(array, kernel, dtype_output)
+
+    @pytest.mark.parametrize('dtype_kernel', types)
+    @pytest.mark.parametrize('dtype_input', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate1d_complex_input_cval(self, dtype_input, dtype_kernel,
+                                            dtype_output):
+        kernel = numpy.array([1, 0, 1], dtype_kernel)
+        array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype_input)
+        self._validate_complex(array, kernel, dtype_output, mode='constant',
+                               cval=5 - 3j)
+
+    @pytest.mark.parametrize('dtype', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate_complex_input_and_kernel(self, dtype, dtype_output):
+        kernel = numpy.array([[1, 0],
+                              [0, 1 + 1j]], dtype)
+        array = numpy.array([[1, 2j, 3],
+                             [1 + 4j, 5, 6j]], dtype)
+        self._validate_complex(array, kernel, dtype_output)
+
+    @pytest.mark.parametrize('dtype', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate_complex_input_and_kernel_cval(self, dtype,
+                                                     dtype_output):
+        kernel = numpy.array([[1, 0],
+                              [0, 1 + 1j]], dtype)
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6]], dtype)
+        self._validate_complex(array, kernel, dtype_output, mode='constant',
+                               cval=5.0 + 2.0j)
+
+    @pytest.mark.parametrize('dtype', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate1d_complex_input_and_kernel(self, dtype, dtype_output):
+        kernel = numpy.array([1, 1 + 1j], dtype)
+        array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
+        self._validate_complex(array, kernel, dtype_output)
+
+    @pytest.mark.parametrize('dtype', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_correlate1d_complex_input_and_kernel_cval(self, dtype,
+                                                       dtype_output):
+        kernel = numpy.array([1, 1 + 1j], dtype)
+        array = numpy.array([1, 2j, 3, 1 + 4j, 5, 6j], dtype)
+        self._validate_complex(array, kernel, dtype_output, mode='constant',
+                               cval=5.0 + 2.0j)
+
+    def test_gauss01(self):
+        input = numpy.array([[1, 2, 3],
+                             [2, 4, 6]], numpy.float32)
+        output = ndimage.gaussian_filter(input, 0)
+        assert_array_almost_equal(output, input)
+
+    def test_gauss02(self):
+        input = numpy.array([[1, 2, 3],
+                             [2, 4, 6]], numpy.float32)
+        output = ndimage.gaussian_filter(input, 1.0)
+        assert_equal(input.dtype, output.dtype)
+        assert_equal(input.shape, output.shape)
+
+    def test_gauss03(self):
+        # single precision data
+        input = numpy.arange(100 * 100).astype(numpy.float32)
+        input.shape = (100, 100)
+        output = ndimage.gaussian_filter(input, [1.0, 1.0])
+
+        assert_equal(input.dtype, output.dtype)
+        assert_equal(input.shape, output.shape)
+
+        # input.sum() is 49995000.0.  With single precision floats, we can't
+        # expect more than 8 digits of accuracy, so use decimal=0 in this test.
+        assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
+                            decimal=0)
+        assert_(sumsq(input, output) > 1.0)
+
+    def test_gauss04(self):
+        input = numpy.arange(100 * 100).astype(numpy.float32)
+        input.shape = (100, 100)
+        otype = numpy.float64
+        output = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
+        assert_equal(output.dtype.type, numpy.float64)
+        assert_equal(input.shape, output.shape)
+        assert_(sumsq(input, output) > 1.0)
+
+    def test_gauss05(self):
+        input = numpy.arange(100 * 100).astype(numpy.float32)
+        input.shape = (100, 100)
+        otype = numpy.float64
+        output = ndimage.gaussian_filter(input, [1.0, 1.0],
+                                         order=1, output=otype)
+        assert_equal(output.dtype.type, numpy.float64)
+        assert_equal(input.shape, output.shape)
+        assert_(sumsq(input, output) > 1.0)
+
+    def test_gauss06(self):
+        input = numpy.arange(100 * 100).astype(numpy.float32)
+        input.shape = (100, 100)
+        otype = numpy.float64
+        output1 = ndimage.gaussian_filter(input, [1.0, 1.0], output=otype)
+        output2 = ndimage.gaussian_filter(input, 1.0, output=otype)
+        assert_array_almost_equal(output1, output2)
+
+    def test_gauss_memory_overlap(self):
+        input = numpy.arange(100 * 100).astype(numpy.float32)
+        input.shape = (100, 100)
+        output1 = ndimage.gaussian_filter(input, 1.0)
+        ndimage.gaussian_filter(input, 1.0, output=input)
+        assert_array_almost_equal(output1, input)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_prewitt01(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+        t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
+        output = ndimage.prewitt(array, 0)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_prewitt02(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+        t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
+        output = numpy.zeros(array.shape, dtype)
+        ndimage.prewitt(array, 0, output)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_prewitt03(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
+        t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
+        output = ndimage.prewitt(array, 1)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_prewitt04(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.prewitt(array, -1)
+        output = ndimage.prewitt(array, 1)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_sobel01(sel, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+        t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
+        output = ndimage.sobel(array, 0)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_sobel02(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
+        t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
+        output = numpy.zeros(array.shape, dtype)
+        ndimage.sobel(array, 0, output)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_sobel03(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
+        t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
+        output = numpy.zeros(array.shape, dtype)
+        output = ndimage.sobel(array, 1)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_sobel04(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        t = ndimage.sobel(array, -1)
+        output = ndimage.sobel(array, 1)
+        assert_array_almost_equal(t, output)
+
+    @pytest.mark.parametrize('dtype',
+                             [numpy.int32, numpy.float32, numpy.float64,
+                              numpy.complex64, numpy.complex128])
+    def test_laplace01(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype) * 100
+        tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
+        tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
+        output = ndimage.laplace(array)
+        assert_array_almost_equal(tmp1 + tmp2, output)
+
+    @pytest.mark.parametrize('dtype',
+                             [numpy.int32, numpy.float32, numpy.float64,
+                              numpy.complex64, numpy.complex128])
+    def test_laplace02(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype) * 100
+        tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
+        tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
+        output = numpy.zeros(array.shape, dtype)
+        ndimage.laplace(array, output=output)
+        assert_array_almost_equal(tmp1 + tmp2, output)
+
+    @pytest.mark.parametrize('dtype',
+                             [numpy.int32, numpy.float32, numpy.float64,
+                              numpy.complex64, numpy.complex128])
+    def test_gaussian_laplace01(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype) * 100
+        tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
+        tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
+        output = ndimage.gaussian_laplace(array, 1.0)
+        assert_array_almost_equal(tmp1 + tmp2, output)
+
+    @pytest.mark.parametrize('dtype',
+                             [numpy.int32, numpy.float32, numpy.float64,
+                              numpy.complex64, numpy.complex128])
+    def test_gaussian_laplace02(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype) * 100
+        tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
+        tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
+        output = numpy.zeros(array.shape, dtype)
+        ndimage.gaussian_laplace(array, 1.0, output)
+        assert_array_almost_equal(tmp1 + tmp2, output)
+
+    @pytest.mark.parametrize('dtype', types + complex_types)
+    def test_generic_laplace01(self, dtype):
+        def derivative2(input, axis, output, mode, cval, a, b):
+            sigma = [a, b / 2.0]
+            input = numpy.asarray(input)
+            order = [0] * input.ndim
+            order[axis] = 2
+            return ndimage.gaussian_filter(input, sigma, order,
+                                           output, mode, cval)
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        output = numpy.zeros(array.shape, dtype)
+        tmp = ndimage.generic_laplace(array, derivative2,
+                                      extra_arguments=(1.0,),
+                                      extra_keywords={'b': 2.0})
+        ndimage.gaussian_laplace(array, 1.0, output)
+        assert_array_almost_equal(tmp, output)
+
+    @pytest.mark.parametrize('dtype',
+                             [numpy.int32, numpy.float32, numpy.float64,
+                              numpy.complex64, numpy.complex128])
+    def test_gaussian_gradient_magnitude01(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype) * 100
+        tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
+        tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
+        output = ndimage.gaussian_gradient_magnitude(array, 1.0)
+        expected = tmp1 * tmp1 + tmp2 * tmp2
+        expected = numpy.sqrt(expected).astype(dtype)
+        assert_array_almost_equal(expected, output)
+
+    @pytest.mark.parametrize('dtype',
+                             [numpy.int32, numpy.float32, numpy.float64,
+                              numpy.complex64, numpy.complex128])
+    def test_gaussian_gradient_magnitude02(self, dtype):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype) * 100
+        tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
+        tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
+        output = numpy.zeros(array.shape, dtype)
+        ndimage.gaussian_gradient_magnitude(array, 1.0, output)
+        expected = tmp1 * tmp1 + tmp2 * tmp2
+        expected = numpy.sqrt(expected).astype(dtype)
+        assert_array_almost_equal(expected, output)
+
+    def test_generic_gradient_magnitude01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], numpy.float64)
+
+        def derivative(input, axis, output, mode, cval, a, b):
+            sigma = [a, b / 2.0]
+            input = numpy.asarray(input)
+            order = [0] * input.ndim
+            order[axis] = 1
+            return ndimage.gaussian_filter(input, sigma, order,
+                                           output, mode, cval)
+        tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
+        tmp2 = ndimage.generic_gradient_magnitude(
+            array, derivative, extra_arguments=(1.0,),
+            extra_keywords={'b': 2.0})
+        assert_array_almost_equal(tmp1, tmp2)
+
+    def test_uniform01(self):
+        array = numpy.array([2, 4, 6])
+        size = 2
+        output = ndimage.uniform_filter1d(array, size, origin=-1)
+        assert_array_almost_equal([3, 5, 6], output)
+
+    def test_uniform01_complex(self):
+        array = numpy.array([2 + 1j, 4 + 2j, 6 + 3j], dtype=numpy.complex128)
+        size = 2
+        output = ndimage.uniform_filter1d(array, size, origin=-1)
+        assert_array_almost_equal([3, 5, 6], output.real)
+        assert_array_almost_equal([1.5, 2.5, 3], output.imag)
+
+    def test_uniform02(self):
+        array = numpy.array([1, 2, 3])
+        filter_shape = [0]
+        output = ndimage.uniform_filter(array, filter_shape)
+        assert_array_almost_equal(array, output)
+
+    def test_uniform03(self):
+        array = numpy.array([1, 2, 3])
+        filter_shape = [1]
+        output = ndimage.uniform_filter(array, filter_shape)
+        assert_array_almost_equal(array, output)
+
+    def test_uniform04(self):
+        array = numpy.array([2, 4, 6])
+        filter_shape = [2]
+        output = ndimage.uniform_filter(array, filter_shape)
+        assert_array_almost_equal([2, 3, 5], output)
+
+    def test_uniform05(self):
+        array = []
+        filter_shape = [1]
+        output = ndimage.uniform_filter(array, filter_shape)
+        assert_array_almost_equal([], output)
+
+    @pytest.mark.parametrize('dtype_array', types)
+    @pytest.mark.parametrize('dtype_output', types)
+    def test_uniform06(self, dtype_array, dtype_output):
+        filter_shape = [2, 2]
+        array = numpy.array([[4, 8, 12],
+                             [16, 20, 24]], dtype_array)
+        output = ndimage.uniform_filter(
+            array, filter_shape, output=dtype_output)
+        assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
+        assert_equal(output.dtype.type, dtype_output)
+
+    @pytest.mark.parametrize('dtype_array', complex_types)
+    @pytest.mark.parametrize('dtype_output', complex_types)
+    def test_uniform06_complex(self, dtype_array, dtype_output):
+        filter_shape = [2, 2]
+        array = numpy.array([[4, 8 + 5j, 12],
+                             [16, 20, 24]], dtype_array)
+        output = ndimage.uniform_filter(
+            array, filter_shape, output=dtype_output)
+        assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output.real)
+        assert_equal(output.dtype.type, dtype_output)
+
+    def test_minimum_filter01(self):
+        array = numpy.array([1, 2, 3, 4, 5])
+        filter_shape = numpy.array([2])
+        output = ndimage.minimum_filter(array, filter_shape)
+        assert_array_almost_equal([1, 1, 2, 3, 4], output)
+
+    def test_minimum_filter02(self):
+        array = numpy.array([1, 2, 3, 4, 5])
+        filter_shape = numpy.array([3])
+        output = ndimage.minimum_filter(array, filter_shape)
+        assert_array_almost_equal([1, 1, 2, 3, 4], output)
+
+    def test_minimum_filter03(self):
+        array = numpy.array([3, 2, 5, 1, 4])
+        filter_shape = numpy.array([2])
+        output = ndimage.minimum_filter(array, filter_shape)
+        assert_array_almost_equal([3, 2, 2, 1, 1], output)
+
+    def test_minimum_filter04(self):
+        array = numpy.array([3, 2, 5, 1, 4])
+        filter_shape = numpy.array([3])
+        output = ndimage.minimum_filter(array, filter_shape)
+        assert_array_almost_equal([2, 2, 1, 1, 1], output)
+
+    def test_minimum_filter05(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        filter_shape = numpy.array([2, 3])
+        output = ndimage.minimum_filter(array, filter_shape)
+        assert_array_almost_equal([[2, 2, 1, 1, 1],
+                                   [2, 2, 1, 1, 1],
+                                   [5, 3, 3, 1, 1]], output)
+
+    def test_minimum_filter05_overlap(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        filter_shape = numpy.array([2, 3])
+        ndimage.minimum_filter(array, filter_shape, output=array)
+        assert_array_almost_equal([[2, 2, 1, 1, 1],
+                                   [2, 2, 1, 1, 1],
+                                   [5, 3, 3, 1, 1]], array)
+
+    def test_minimum_filter06(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 1, 1], [1, 1, 1]]
+        output = ndimage.minimum_filter(array, footprint=footprint)
+        assert_array_almost_equal([[2, 2, 1, 1, 1],
+                                   [2, 2, 1, 1, 1],
+                                   [5, 3, 3, 1, 1]], output)
+        # separable footprint should allow mode sequence
+        output2 = ndimage.minimum_filter(array, footprint=footprint,
+                                         mode=['reflect', 'reflect'])
+        assert_array_almost_equal(output2, output)
+
+    def test_minimum_filter07(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.minimum_filter(array, footprint=footprint)
+        assert_array_almost_equal([[2, 2, 1, 1, 1],
+                                   [2, 3, 1, 3, 1],
+                                   [5, 5, 3, 3, 1]], output)
+        with assert_raises(RuntimeError):
+            ndimage.minimum_filter(array, footprint=footprint,
+                                   mode=['reflect', 'constant'])
+
+    def test_minimum_filter08(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.minimum_filter(array, footprint=footprint, origin=-1)
+        assert_array_almost_equal([[3, 1, 3, 1, 1],
+                                   [5, 3, 3, 1, 1],
+                                   [3, 3, 1, 1, 1]], output)
+
+    def test_minimum_filter09(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.minimum_filter(array, footprint=footprint,
+                                        origin=[-1, 0])
+        assert_array_almost_equal([[2, 3, 1, 3, 1],
+                                   [5, 5, 3, 3, 1],
+                                   [5, 3, 3, 1, 1]], output)
+
+    def test_maximum_filter01(self):
+        array = numpy.array([1, 2, 3, 4, 5])
+        filter_shape = numpy.array([2])
+        output = ndimage.maximum_filter(array, filter_shape)
+        assert_array_almost_equal([1, 2, 3, 4, 5], output)
+
+    def test_maximum_filter02(self):
+        array = numpy.array([1, 2, 3, 4, 5])
+        filter_shape = numpy.array([3])
+        output = ndimage.maximum_filter(array, filter_shape)
+        assert_array_almost_equal([2, 3, 4, 5, 5], output)
+
+    def test_maximum_filter03(self):
+        array = numpy.array([3, 2, 5, 1, 4])
+        filter_shape = numpy.array([2])
+        output = ndimage.maximum_filter(array, filter_shape)
+        assert_array_almost_equal([3, 3, 5, 5, 4], output)
+
+    def test_maximum_filter04(self):
+        array = numpy.array([3, 2, 5, 1, 4])
+        filter_shape = numpy.array([3])
+        output = ndimage.maximum_filter(array, filter_shape)
+        assert_array_almost_equal([3, 5, 5, 5, 4], output)
+
+    def test_maximum_filter05(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        filter_shape = numpy.array([2, 3])
+        output = ndimage.maximum_filter(array, filter_shape)
+        assert_array_almost_equal([[3, 5, 5, 5, 4],
+                                   [7, 9, 9, 9, 5],
+                                   [8, 9, 9, 9, 7]], output)
+
+    def test_maximum_filter06(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 1, 1], [1, 1, 1]]
+        output = ndimage.maximum_filter(array, footprint=footprint)
+        assert_array_almost_equal([[3, 5, 5, 5, 4],
+                                   [7, 9, 9, 9, 5],
+                                   [8, 9, 9, 9, 7]], output)
+        # separable footprint should allow mode sequence
+        output2 = ndimage.maximum_filter(array, footprint=footprint,
+                                         mode=['reflect', 'reflect'])
+        assert_array_almost_equal(output2, output)
+
+    def test_maximum_filter07(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.maximum_filter(array, footprint=footprint)
+        assert_array_almost_equal([[3, 5, 5, 5, 4],
+                                   [7, 7, 9, 9, 5],
+                                   [7, 9, 8, 9, 7]], output)
+        # non-separable footprint should not allow mode sequence
+        with assert_raises(RuntimeError):
+            ndimage.maximum_filter(array, footprint=footprint,
+                                   mode=['reflect', 'reflect'])
+
+    def test_maximum_filter08(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.maximum_filter(array, footprint=footprint, origin=-1)
+        assert_array_almost_equal([[7, 9, 9, 5, 5],
+                                   [9, 8, 9, 7, 5],
+                                   [8, 8, 7, 7, 7]], output)
+
+    def test_maximum_filter09(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.maximum_filter(array, footprint=footprint,
+                                        origin=[-1, 0])
+        assert_array_almost_equal([[7, 7, 9, 9, 5],
+                                   [7, 9, 8, 9, 7],
+                                   [8, 8, 8, 7, 7]], output)
+
+    def test_rank01(self):
+        array = numpy.array([1, 2, 3, 4, 5])
+        output = ndimage.rank_filter(array, 1, size=2)
+        assert_array_almost_equal(array, output)
+        output = ndimage.percentile_filter(array, 100, size=2)
+        assert_array_almost_equal(array, output)
+        output = ndimage.median_filter(array, 2)
+        assert_array_almost_equal(array, output)
+
+    def test_rank02(self):
+        array = numpy.array([1, 2, 3, 4, 5])
+        output = ndimage.rank_filter(array, 1, size=[3])
+        assert_array_almost_equal(array, output)
+        output = ndimage.percentile_filter(array, 50, size=3)
+        assert_array_almost_equal(array, output)
+        output = ndimage.median_filter(array, (3,))
+        assert_array_almost_equal(array, output)
+
+    def test_rank03(self):
+        array = numpy.array([3, 2, 5, 1, 4])
+        output = ndimage.rank_filter(array, 1, size=[2])
+        assert_array_almost_equal([3, 3, 5, 5, 4], output)
+        output = ndimage.percentile_filter(array, 100, size=2)
+        assert_array_almost_equal([3, 3, 5, 5, 4], output)
+
+    def test_rank04(self):
+        array = numpy.array([3, 2, 5, 1, 4])
+        expected = [3, 3, 2, 4, 4]
+        output = ndimage.rank_filter(array, 1, size=3)
+        assert_array_almost_equal(expected, output)
+        output = ndimage.percentile_filter(array, 50, size=3)
+        assert_array_almost_equal(expected, output)
+        output = ndimage.median_filter(array, size=3)
+        assert_array_almost_equal(expected, output)
+
+    def test_rank05(self):
+        array = numpy.array([3, 2, 5, 1, 4])
+        expected = [3, 3, 2, 4, 4]
+        output = ndimage.rank_filter(array, -2, size=3)
+        assert_array_almost_equal(expected, output)
+
+    def test_rank06(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]])
+        expected = [[2, 2, 1, 1, 1],
+                    [3, 3, 2, 1, 1],
+                    [5, 5, 3, 3, 1]]
+        output = ndimage.rank_filter(array, 1, size=[2, 3])
+        assert_array_almost_equal(expected, output)
+        output = ndimage.percentile_filter(array, 17, size=(2, 3))
+        assert_array_almost_equal(expected, output)
+
+    def test_rank06_overlap(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]])
+        array_copy = array.copy()
+        expected = [[2, 2, 1, 1, 1],
+                    [3, 3, 2, 1, 1],
+                    [5, 5, 3, 3, 1]]
+        ndimage.rank_filter(array, 1, size=[2, 3], output=array)
+        assert_array_almost_equal(expected, array)
+
+        ndimage.percentile_filter(array_copy, 17, size=(2, 3),
+                                  output=array_copy)
+        assert_array_almost_equal(expected, array_copy)
+
+    def test_rank07(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]])
+        expected = [[3, 5, 5, 5, 4],
+                    [5, 5, 7, 5, 4],
+                    [6, 8, 8, 7, 5]]
+        output = ndimage.rank_filter(array, -2, size=[2, 3])
+        assert_array_almost_equal(expected, output)
+
+    def test_rank08(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]])
+        expected = [[3, 3, 2, 4, 4],
+                    [5, 5, 5, 4, 4],
+                    [5, 6, 7, 5, 5]]
+        output = ndimage.percentile_filter(array, 50.0, size=(2, 3))
+        assert_array_almost_equal(expected, output)
+        output = ndimage.rank_filter(array, 3, size=(2, 3))
+        assert_array_almost_equal(expected, output)
+        output = ndimage.median_filter(array, size=(2, 3))
+        assert_array_almost_equal(expected, output)
+
+        # non-separable: does not allow mode sequence
+        with assert_raises(RuntimeError):
+            ndimage.percentile_filter(array, 50.0, size=(2, 3),
+                                      mode=['reflect', 'constant'])
+        with assert_raises(RuntimeError):
+            ndimage.rank_filter(array, 3, size=(2, 3), mode=['reflect']*2)
+        with assert_raises(RuntimeError):
+            ndimage.median_filter(array, size=(2, 3), mode=['reflect']*2)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_rank09(self, dtype):
+        expected = [[3, 3, 2, 4, 4],
+                    [3, 5, 2, 5, 1],
+                    [5, 5, 8, 3, 5]]
+        footprint = [[1, 0, 1], [0, 1, 0]]
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        output = ndimage.rank_filter(array, 1, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+        output = ndimage.percentile_filter(array, 35, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+
+    def test_rank10(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        expected = [[2, 2, 1, 1, 1],
+                    [2, 3, 1, 3, 1],
+                    [5, 5, 3, 3, 1]]
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.rank_filter(array, 0, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+        output = ndimage.percentile_filter(array, 0.0, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+
+    def test_rank11(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        expected = [[3, 5, 5, 5, 4],
+                    [7, 7, 9, 9, 5],
+                    [7, 9, 8, 9, 7]]
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.rank_filter(array, -1, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+        output = ndimage.percentile_filter(array, 100.0, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_rank12(self, dtype):
+        expected = [[3, 3, 2, 4, 4],
+                    [3, 5, 2, 5, 1],
+                    [5, 5, 8, 3, 5]]
+        footprint = [[1, 0, 1], [0, 1, 0]]
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        output = ndimage.rank_filter(array, 1, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+        output = ndimage.percentile_filter(array, 50.0,
+                                           footprint=footprint)
+        assert_array_almost_equal(expected, output)
+        output = ndimage.median_filter(array, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_rank13(self, dtype):
+        expected = [[5, 2, 5, 1, 1],
+                    [5, 8, 3, 5, 5],
+                    [6, 6, 5, 5, 5]]
+        footprint = [[1, 0, 1], [0, 1, 0]]
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        output = ndimage.rank_filter(array, 1, footprint=footprint,
+                                     origin=-1)
+        assert_array_almost_equal(expected, output)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_rank14(self, dtype):
+        expected = [[3, 5, 2, 5, 1],
+                    [5, 5, 8, 3, 5],
+                    [5, 6, 6, 5, 5]]
+        footprint = [[1, 0, 1], [0, 1, 0]]
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        output = ndimage.rank_filter(array, 1, footprint=footprint,
+                                     origin=[-1, 0])
+        assert_array_almost_equal(expected, output)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_rank15(self, dtype):
+        expected = [[2, 3, 1, 4, 1],
+                    [5, 3, 7, 1, 1],
+                    [5, 5, 3, 3, 3]]
+        footprint = [[1, 0, 1], [0, 1, 0]]
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [5, 8, 3, 7, 1],
+                             [5, 6, 9, 3, 5]], dtype)
+        output = ndimage.rank_filter(array, 0, footprint=footprint,
+                                     origin=[-1, 0])
+        assert_array_almost_equal(expected, output)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_generic_filter1d01(self, dtype):
+        weights = numpy.array([1.1, 2.2, 3.3])
+
+        def _filter_func(input, output, fltr, total):
+            fltr = fltr / total
+            for ii in range(input.shape[0] - 2):
+                output[ii] = input[ii] * fltr[0]
+                output[ii] += input[ii + 1] * fltr[1]
+                output[ii] += input[ii + 2] * fltr[2]
+        a = numpy.arange(12, dtype=dtype)
+        a.shape = (3, 4)
+        r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
+        r2 = ndimage.generic_filter1d(
+            a, _filter_func, 3, axis=0, origin=-1,
+            extra_arguments=(weights,),
+            extra_keywords={'total': weights.sum()})
+        assert_array_almost_equal(r1, r2)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_generic_filter01(self, dtype):
+        filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
+        footprint = numpy.array([[1, 0], [0, 1]])
+        cf = numpy.array([1., 4.])
+
+        def _filter_func(buffer, weights, total=1.0):
+            weights = cf / total
+            return (buffer * weights).sum()
+
+        a = numpy.arange(12, dtype=dtype)
+        a.shape = (3, 4)
+        r1 = ndimage.correlate(a, filter_ * footprint)
+        if dtype in float_types:
+            r1 /= 5
+        else:
+            r1 //= 5
+        r2 = ndimage.generic_filter(
+            a, _filter_func, footprint=footprint, extra_arguments=(cf,),
+            extra_keywords={'total': cf.sum()})
+        assert_array_almost_equal(r1, r2)
+
+        # generic_filter doesn't allow mode sequence
+        with assert_raises(RuntimeError):
+            r2 = ndimage.generic_filter(
+                a, _filter_func, mode=['reflect', 'reflect'],
+                footprint=footprint, extra_arguments=(cf,),
+                extra_keywords={'total': cf.sum()})
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [1, 1, 2]),
+         ('wrap', [3, 1, 2]),
+         ('reflect', [1, 1, 2]),
+         ('mirror', [2, 1, 2]),
+         ('constant', [0, 1, 2])]
+    )
+    def test_extend01(self, mode, expected_value):
+        array = numpy.array([1, 2, 3])
+        weights = numpy.array([1, 0])
+        output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [1, 1, 1]),
+         ('wrap', [3, 1, 2]),
+         ('reflect', [3, 3, 2]),
+         ('mirror', [1, 2, 3]),
+         ('constant', [0, 0, 0])]
+    )
+    def test_extend02(self, mode, expected_value):
+        array = numpy.array([1, 2, 3])
+        weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
+        output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [2, 3, 3]),
+         ('wrap', [2, 3, 1]),
+         ('reflect', [2, 3, 3]),
+         ('mirror', [2, 3, 2]),
+         ('constant', [2, 3, 0])]
+    )
+    def test_extend03(self, mode, expected_value):
+        array = numpy.array([1, 2, 3])
+        weights = numpy.array([0, 0, 1])
+        output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [3, 3, 3]),
+         ('wrap', [2, 3, 1]),
+         ('reflect', [2, 1, 1]),
+         ('mirror', [1, 2, 3]),
+         ('constant', [0, 0, 0])]
+    )
+    def test_extend04(self, mode, expected_value):
+        array = numpy.array([1, 2, 3])
+        weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
+        output = ndimage.correlate1d(array, weights, 0, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
+         ('wrap', [[9, 7, 8], [3, 1, 2], [6, 4, 5]]),
+         ('reflect', [[1, 1, 2], [1, 1, 2], [4, 4, 5]]),
+         ('mirror', [[5, 4, 5], [2, 1, 2], [5, 4, 5]]),
+         ('constant', [[0, 0, 0], [0, 1, 2], [0, 4, 5]])]
+    )
+    def test_extend05(self, mode, expected_value):
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6],
+                             [7, 8, 9]])
+        weights = numpy.array([[1, 0], [0, 0]])
+        output = ndimage.correlate(array, weights, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
+         ('wrap', [[5, 6, 4], [8, 9, 7], [2, 3, 1]]),
+         ('reflect', [[5, 6, 6], [8, 9, 9], [8, 9, 9]]),
+         ('mirror', [[5, 6, 5], [8, 9, 8], [5, 6, 5]]),
+         ('constant', [[5, 6, 0], [8, 9, 0], [0, 0, 0]])]
+    )
+    def test_extend06(self, mode, expected_value):
+        array = numpy.array([[1, 2, 3],
+                             [4, 5, 6],
+                             [7, 8, 9]])
+        weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
+        output = ndimage.correlate(array, weights, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [3, 3, 3]),
+         ('wrap', [2, 3, 1]),
+         ('reflect', [2, 1, 1]),
+         ('mirror', [1, 2, 3]),
+         ('constant', [0, 0, 0])]
+    )
+    def test_extend07(self, mode, expected_value):
+        array = numpy.array([1, 2, 3])
+        weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
+        output = ndimage.correlate(array, weights, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [[3], [3], [3]]),
+         ('wrap', [[2], [3], [1]]),
+         ('reflect', [[2], [1], [1]]),
+         ('mirror', [[1], [2], [3]]),
+         ('constant', [[0], [0], [0]])]
+    )
+    def test_extend08(self, mode, expected_value):
+        array = numpy.array([[1], [2], [3]])
+        weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
+        output = ndimage.correlate(array, weights, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [3, 3, 3]),
+         ('wrap', [2, 3, 1]),
+         ('reflect', [2, 1, 1]),
+         ('mirror', [1, 2, 3]),
+         ('constant', [0, 0, 0])]
+    )
+    def test_extend09(self, mode, expected_value):
+        array = numpy.array([1, 2, 3])
+        weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
+        output = ndimage.correlate(array, weights, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [[3], [3], [3]]),
+         ('wrap', [[2], [3], [1]]),
+         ('reflect', [[2], [1], [1]]),
+         ('mirror', [[1], [2], [3]]),
+         ('constant', [[0], [0], [0]])]
+    )
+    def test_extend10(self, mode, expected_value):
+        array = numpy.array([[1], [2], [3]])
+        weights = numpy.array([[0], [0], [0], [0], [0], [0], [0], [0], [1]])
+        output = ndimage.correlate(array, weights, mode=mode, cval=0)
+        assert_array_equal(output, expected_value)
+
+
+def test_ticket_701():
+    # Test generic filter sizes
+    arr = numpy.arange(4).reshape((2, 2))
+    func = lambda x: numpy.min(x)
+    res = ndimage.generic_filter(arr, func, size=(1, 1))
+    # The following raises an error unless ticket 701 is fixed
+    res2 = ndimage.generic_filter(arr, func, size=1)
+    assert_equal(res, res2)
+
+
+def test_gh_5430():
+    # At least one of these raises an error unless gh-5430 is
+    # fixed. In py2k an int is implemented using a C long, so
+    # which one fails depends on your system. In py3k there is only
+    # one arbitrary precision integer type, so both should fail.
+    sigma = numpy.int32(1)
+    out = ndimage._ni_support._normalize_sequence(sigma, 1)
+    assert_equal(out, [sigma])
+    sigma = numpy.int64(1)
+    out = ndimage._ni_support._normalize_sequence(sigma, 1)
+    assert_equal(out, [sigma])
+    # This worked before; make sure it still works
+    sigma = 1
+    out = ndimage._ni_support._normalize_sequence(sigma, 1)
+    assert_equal(out, [sigma])
+    # This worked before; make sure it still works
+    sigma = [1, 1]
+    out = ndimage._ni_support._normalize_sequence(sigma, 2)
+    assert_equal(out, sigma)
+    # Also include the OPs original example to make sure we fixed the issue
+    x = numpy.random.normal(size=(256, 256))
+    perlin = numpy.zeros_like(x)
+    for i in 2**numpy.arange(6):
+        perlin += ndimage.gaussian_filter(x, i, mode="wrap") * i**2
+    # This also fixes gh-4106, show that the OPs example now runs.
+    x = numpy.int64(21)
+    ndimage._ni_support._normalize_sequence(x, 0)
+
+
+def test_gaussian_kernel1d():
+    radius = 10
+    sigma = 2
+    sigma2 = sigma * sigma
+    x = numpy.arange(-radius, radius + 1, dtype=numpy.double)
+    phi_x = numpy.exp(-0.5 * x * x / sigma2)
+    phi_x /= phi_x.sum()
+    assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
+    assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
+    assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
+                    _gaussian_kernel1d(sigma, 2, radius))
+    assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
+                    _gaussian_kernel1d(sigma, 3, radius))
+
+
+def test_orders_gauss():
+    # Check order inputs to Gaussians
+    arr = numpy.zeros((1,))
+    assert_equal(0, ndimage.gaussian_filter(arr, 1, order=0))
+    assert_equal(0, ndimage.gaussian_filter(arr, 1, order=3))
+    assert_raises(ValueError, ndimage.gaussian_filter, arr, 1, -1)
+    assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=0))
+    assert_equal(0, ndimage.gaussian_filter1d(arr, 1, axis=-1, order=3))
+    assert_raises(ValueError, ndimage.gaussian_filter1d, arr, 1, -1, -1)
+
+
+def test_valid_origins():
+    """Regression test for #1311."""
+    func = lambda x: numpy.mean(x)
+    data = numpy.array([1, 2, 3, 4, 5], dtype=numpy.float64)
+    assert_raises(ValueError, ndimage.generic_filter, data, func, size=3,
+                  origin=2)
+    assert_raises(ValueError, ndimage.generic_filter1d, data, func,
+                  filter_size=3, origin=2)
+    assert_raises(ValueError, ndimage.percentile_filter, data, 0.2, size=3,
+                  origin=2)
+
+    for filter in [ndimage.uniform_filter, ndimage.minimum_filter,
+                   ndimage.maximum_filter, ndimage.maximum_filter1d,
+                   ndimage.median_filter, ndimage.minimum_filter1d]:
+        # This should work, since for size == 3, the valid range for origin is
+        # -1 to 1.
+        list(filter(data, 3, origin=-1))
+        list(filter(data, 3, origin=1))
+        # Just check this raises an error instead of silently accepting or
+        # segfaulting.
+        assert_raises(ValueError, filter, data, 3, origin=2)
+
+
+def test_bad_convolve_and_correlate_origins():
+    """Regression test for gh-822."""
+    # Before gh-822 was fixed, these would generate seg. faults or
+    # other crashes on many system.
+    assert_raises(ValueError, ndimage.correlate1d,
+                  [0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
+    assert_raises(ValueError, ndimage.correlate,
+                  [0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
+    assert_raises(ValueError, ndimage.correlate,
+                  numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, 1])
+
+    assert_raises(ValueError, ndimage.convolve1d,
+                  numpy.arange(10), numpy.ones(3), origin=-2)
+    assert_raises(ValueError, ndimage.convolve,
+                  numpy.arange(10), numpy.ones(3), origin=[-2])
+    assert_raises(ValueError, ndimage.convolve,
+                  numpy.ones((3, 5)), numpy.ones((2, 2)), origin=[0, -2])
+
+
+def test_multiple_modes():
+    # Test that the filters with multiple mode cababilities for different
+    # dimensions give the same result as applying a single mode.
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    mode1 = 'reflect'
+    mode2 = ['reflect', 'reflect']
+
+    assert_equal(ndimage.gaussian_filter(arr, 1, mode=mode1),
+                 ndimage.gaussian_filter(arr, 1, mode=mode2))
+    assert_equal(ndimage.prewitt(arr, mode=mode1),
+                 ndimage.prewitt(arr, mode=mode2))
+    assert_equal(ndimage.sobel(arr, mode=mode1),
+                 ndimage.sobel(arr, mode=mode2))
+    assert_equal(ndimage.laplace(arr, mode=mode1),
+                 ndimage.laplace(arr, mode=mode2))
+    assert_equal(ndimage.gaussian_laplace(arr, 1, mode=mode1),
+                 ndimage.gaussian_laplace(arr, 1, mode=mode2))
+    assert_equal(ndimage.maximum_filter(arr, size=5, mode=mode1),
+                 ndimage.maximum_filter(arr, size=5, mode=mode2))
+    assert_equal(ndimage.minimum_filter(arr, size=5, mode=mode1),
+                 ndimage.minimum_filter(arr, size=5, mode=mode2))
+    assert_equal(ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode1),
+                 ndimage.gaussian_gradient_magnitude(arr, 1, mode=mode2))
+    assert_equal(ndimage.uniform_filter(arr, 5, mode=mode1),
+                 ndimage.uniform_filter(arr, 5, mode=mode2))
+
+
+def test_multiple_modes_sequentially():
+    # Test that the filters with multiple mode cababilities for different
+    # dimensions give the same result as applying the filters with
+    # different modes sequentially
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    modes = ['reflect', 'wrap']
+
+    expected = ndimage.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
+    expected = ndimage.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
+    assert_equal(expected,
+                 ndimage.gaussian_filter(arr, 1, mode=modes))
+
+    expected = ndimage.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
+    expected = ndimage.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
+    assert_equal(expected,
+                 ndimage.uniform_filter(arr, 5, mode=modes))
+
+    expected = ndimage.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
+    expected = ndimage.maximum_filter1d(expected, size=5, axis=1,
+                                        mode=modes[1])
+    assert_equal(expected,
+                 ndimage.maximum_filter(arr, size=5, mode=modes))
+
+    expected = ndimage.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
+    expected = ndimage.minimum_filter1d(expected, size=5, axis=1,
+                                        mode=modes[1])
+    assert_equal(expected,
+                 ndimage.minimum_filter(arr, size=5, mode=modes))
+
+
+def test_multiple_modes_prewitt():
+    # Test prewitt filter for multiple extrapolation modes
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    expected = numpy.array([[1., -3., 2.],
+                            [1., -2., 1.],
+                            [1., -1., 0.]])
+
+    modes = ['reflect', 'wrap']
+
+    assert_equal(expected,
+                 ndimage.prewitt(arr, mode=modes))
+
+
+def test_multiple_modes_sobel():
+    # Test sobel filter for multiple extrapolation modes
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    expected = numpy.array([[1., -4., 3.],
+                            [2., -3., 1.],
+                            [1., -1., 0.]])
+
+    modes = ['reflect', 'wrap']
+
+    assert_equal(expected,
+                 ndimage.sobel(arr, mode=modes))
+
+
+def test_multiple_modes_laplace():
+    # Test laplace filter for multiple extrapolation modes
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    expected = numpy.array([[-2., 2., 1.],
+                            [-2., -3., 2.],
+                            [1., 1., 0.]])
+
+    modes = ['reflect', 'wrap']
+
+    assert_equal(expected,
+                 ndimage.laplace(arr, mode=modes))
+
+
+def test_multiple_modes_gaussian_laplace():
+    # Test gaussian_laplace filter for multiple extrapolation modes
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    expected = numpy.array([[-0.28438687, 0.01559809, 0.19773499],
+                            [-0.36630503, -0.20069774, 0.07483620],
+                            [0.15849176, 0.18495566, 0.21934094]])
+
+    modes = ['reflect', 'wrap']
+
+    assert_almost_equal(expected,
+                        ndimage.gaussian_laplace(arr, 1, mode=modes))
+
+
+def test_multiple_modes_gaussian_gradient_magnitude():
+    # Test gaussian_gradient_magnitude filter for multiple
+    # extrapolation modes
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    expected = numpy.array([[0.04928965, 0.09745625, 0.06405368],
+                            [0.23056905, 0.14025305, 0.04550846],
+                            [0.19894369, 0.14950060, 0.06796850]])
+
+    modes = ['reflect', 'wrap']
+
+    calculated = ndimage.gaussian_gradient_magnitude(arr, 1, mode=modes)
+
+    assert_almost_equal(expected, calculated)
+
+
+def test_multiple_modes_uniform():
+    # Test uniform filter for multiple extrapolation modes
+    arr = numpy.array([[1., 0., 0.],
+                       [1., 1., 0.],
+                       [0., 0., 0.]])
+
+    expected = numpy.array([[0.32, 0.40, 0.48],
+                            [0.20, 0.28, 0.32],
+                            [0.28, 0.32, 0.40]])
+
+    modes = ['reflect', 'wrap']
+
+    assert_almost_equal(expected,
+                        ndimage.uniform_filter(arr, 5, mode=modes))
+
+
+def test_gaussian_truncate():
+    # Test that Gaussian filters can be truncated at different widths.
+    # These tests only check that the result has the expected number
+    # of nonzero elements.
+    arr = numpy.zeros((100, 100), float)
+    arr[50, 50] = 1
+    num_nonzeros_2 = (ndimage.gaussian_filter(arr, 5, truncate=2) > 0).sum()
+    assert_equal(num_nonzeros_2, 21**2)
+    num_nonzeros_5 = (ndimage.gaussian_filter(arr, 5, truncate=5) > 0).sum()
+    assert_equal(num_nonzeros_5, 51**2)
+
+    # Test truncate when sigma is a sequence.
+    f = ndimage.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
+    fpos = f > 0
+    n0 = fpos.any(axis=0).sum()
+    # n0 should be 2*int(2.5*3.5 + 0.5) + 1
+    assert_equal(n0, 19)
+    n1 = fpos.any(axis=1).sum()
+    # n1 should be 2*int(0.5*3.5 + 0.5) + 1
+    assert_equal(n1, 5)
+
+    # Test gaussian_filter1d.
+    x = numpy.zeros(51)
+    x[25] = 1
+    f = ndimage.gaussian_filter1d(x, sigma=2, truncate=3.5)
+    n = (f > 0).sum()
+    assert_equal(n, 15)
+
+    # Test gaussian_laplace
+    y = ndimage.gaussian_laplace(x, sigma=2, truncate=3.5)
+    nonzero_indices = numpy.nonzero(y != 0)[0]
+    n = nonzero_indices.ptp() + 1
+    assert_equal(n, 15)
+
+    # Test gaussian_gradient_magnitude
+    y = ndimage.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
+    nonzero_indices = numpy.nonzero(y != 0)[0]
+    n = nonzero_indices.ptp() + 1
+    assert_equal(n, 15)
+
+
+def test_gaussian_radius():
+    # Test that Gaussian filters with radius argument produce the same
+    # results as the filters with corresponding truncate argument.
+    # radius = int(truncate * sigma + 0.5)
+    # Test gaussian_filter1d
+    x = numpy.zeros(7)
+    x[3] = 1
+    f1 = ndimage.gaussian_filter1d(x, sigma=2, truncate=1.5)
+    f2 = ndimage.gaussian_filter1d(x, sigma=2, radius=3)
+    assert_equal(f1, f2)
+
+    # Test gaussian_filter when sigma is a number.
+    a = numpy.zeros((9, 9))
+    a[4, 4] = 1
+    f1 = ndimage.gaussian_filter(a, sigma=0.5, truncate=3.5)
+    f2 = ndimage.gaussian_filter(a, sigma=0.5, radius=2)
+    assert_equal(f1, f2)
+
+    # Test gaussian_filter when sigma is a sequence.
+    a = numpy.zeros((50, 50))
+    a[25, 25] = 1
+    f1 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], truncate=3.5)
+    f2 = ndimage.gaussian_filter(a, sigma=[0.5, 2.5], radius=[2, 9])
+    assert_equal(f1, f2)
+
+
+def test_gaussian_radius_invalid():
+    # radius must be a nonnegative integer
+    with assert_raises(ValueError):
+        ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=-1)
+    with assert_raises(ValueError):
+        ndimage.gaussian_filter1d(numpy.zeros(8), sigma=1, radius=1.1)
+
+
+class TestThreading:
+    def check_func_thread(self, n, fun, args, out):
+        from threading import Thread
+        thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]})
+                 for x in range(n)]
+        [t.start() for t in thrds]
+        [t.join() for t in thrds]
+
+    def check_func_serial(self, n, fun, args, out):
+        for i in range(n):
+            fun(*args, output=out[i])
+
+    def test_correlate1d(self):
+        d = numpy.random.randn(5000)
+        os = numpy.empty((4, d.size))
+        ot = numpy.empty_like(os)
+        k = numpy.arange(5)
+        self.check_func_serial(4, ndimage.correlate1d, (d, k), os)
+        self.check_func_thread(4, ndimage.correlate1d, (d, k), ot)
+        assert_array_equal(os, ot)
+
+    def test_correlate(self):
+        d = numpy.random.randn(500, 500)
+        k = numpy.random.randn(10, 10)
+        os = numpy.empty([4] + list(d.shape))
+        ot = numpy.empty_like(os)
+        self.check_func_serial(4, ndimage.correlate, (d, k), os)
+        self.check_func_thread(4, ndimage.correlate, (d, k), ot)
+        assert_array_equal(os, ot)
+
+    def test_median_filter(self):
+        d = numpy.random.randn(500, 500)
+        os = numpy.empty([4] + list(d.shape))
+        ot = numpy.empty_like(os)
+        self.check_func_serial(4, ndimage.median_filter, (d, 3), os)
+        self.check_func_thread(4, ndimage.median_filter, (d, 3), ot)
+        assert_array_equal(os, ot)
+
+    def test_uniform_filter1d(self):
+        d = numpy.random.randn(5000)
+        os = numpy.empty((4, d.size))
+        ot = numpy.empty_like(os)
+        self.check_func_serial(4, ndimage.uniform_filter1d, (d, 5), os)
+        self.check_func_thread(4, ndimage.uniform_filter1d, (d, 5), ot)
+        assert_array_equal(os, ot)
+
+    def test_minmax_filter(self):
+        d = numpy.random.randn(500, 500)
+        os = numpy.empty([4] + list(d.shape))
+        ot = numpy.empty_like(os)
+        self.check_func_serial(4, ndimage.maximum_filter, (d, 3), os)
+        self.check_func_thread(4, ndimage.maximum_filter, (d, 3), ot)
+        assert_array_equal(os, ot)
+        self.check_func_serial(4, ndimage.minimum_filter, (d, 3), os)
+        self.check_func_thread(4, ndimage.minimum_filter, (d, 3), ot)
+        assert_array_equal(os, ot)
+
+
+def test_minmaximum_filter1d():
+    # Regression gh-3898
+    in_ = numpy.arange(10)
+    out = ndimage.minimum_filter1d(in_, 1)
+    assert_equal(in_, out)
+    out = ndimage.maximum_filter1d(in_, 1)
+    assert_equal(in_, out)
+    # Test reflect
+    out = ndimage.minimum_filter1d(in_, 5, mode='reflect')
+    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
+    out = ndimage.maximum_filter1d(in_, 5, mode='reflect')
+    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
+    # Test constant
+    out = ndimage.minimum_filter1d(in_, 5, mode='constant', cval=-1)
+    assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
+    out = ndimage.maximum_filter1d(in_, 5, mode='constant', cval=10)
+    assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
+    # Test nearest
+    out = ndimage.minimum_filter1d(in_, 5, mode='nearest')
+    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
+    out = ndimage.maximum_filter1d(in_, 5, mode='nearest')
+    assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
+    # Test wrap
+    out = ndimage.minimum_filter1d(in_, 5, mode='wrap')
+    assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
+    out = ndimage.maximum_filter1d(in_, 5, mode='wrap')
+    assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
+
+
+def test_uniform_filter1d_roundoff_errors():
+    # gh-6930
+    in_ = numpy.repeat([0, 1, 0], [9, 9, 9])
+    for filter_size in range(3, 10):
+        out = ndimage.uniform_filter1d(in_, filter_size)
+        assert_equal(out.sum(), 10 - filter_size)
+
+
+def test_footprint_all_zeros():
+    # regression test for gh-6876: footprint of all zeros segfaults
+    arr = numpy.random.randint(0, 100, (100, 100))
+    kernel = numpy.zeros((3, 3), bool)
+    with assert_raises(ValueError):
+        ndimage.maximum_filter(arr, footprint=kernel)
+
+
+def test_gaussian_filter():
+    # Test gaussian filter with numpy.float16
+    # gh-8207
+    data = numpy.array([1], dtype=numpy.float16)
+    sigma = 1.0
+    with assert_raises(RuntimeError):
+        ndimage.gaussian_filter(data, sigma)
+
+
+def test_rank_filter_noninteger_rank():
+    # regression test for issue 9388: ValueError for
+    # non integer rank when performing rank_filter
+    arr = numpy.random.random((10, 20, 30))
+    assert_raises(TypeError, ndimage.rank_filter, arr, 0.5,
+                  footprint=numpy.ones((1, 1, 10), dtype=bool))
+
+
+def test_size_footprint_both_set():
+    # test for input validation, expect user warning when
+    # size and footprint is set
+    with suppress_warnings() as sup:
+        sup.filter(UserWarning,
+                   "ignoring size because footprint is set")
+        arr = numpy.random.random((10, 20, 30))
+        ndimage.rank_filter(arr, 5, size=2, footprint=numpy.ones((1, 1, 10),
+                    dtype=bool))
+
+
+def test_byte_order_median():
+    """Regression test for #413: median_filter does not handle bytes orders."""
+    a = numpy.arange(9, dtype=' 3 raise NotImplementedError
+        x = numpy.ones((4, 6, 8, 10), dtype=numpy.complex128)
+        with pytest.raises(NotImplementedError):
+            a = ndimage.fourier_ellipsoid(x, 3)
+
+    def test_fourier_ellipsoid_1d_complex(self):
+        # expected result of 1d ellipsoid is the same as for fourier_uniform
+        for shape in [(32, ), (31, )]:
+            for type_, dec in zip([numpy.complex64, numpy.complex128],
+                                  [5, 14]):
+                x = numpy.ones(shape, dtype=type_)
+                a = ndimage.fourier_ellipsoid(x, 5, -1, 0)
+                b = ndimage.fourier_uniform(x, 5, -1, 0)
+                assert_array_almost_equal(a, b, decimal=dec)
+
+    @pytest.mark.parametrize('shape', [(0, ), (0, 10), (10, 0)])
+    @pytest.mark.parametrize('dtype',
+                             [numpy.float32, numpy.float64,
+                              numpy.complex64, numpy.complex128])
+    @pytest.mark.parametrize('test_func',
+                             [ndimage.fourier_ellipsoid,
+                              ndimage.fourier_gaussian,
+                              ndimage.fourier_uniform])
+    def test_fourier_zero_length_dims(self, shape, dtype, test_func):
+        a = numpy.ones(shape, dtype)
+        b = test_func(a, 3)
+        assert_equal(a, b)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_interpolation.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_interpolation.py
new file mode 100644
index 00000000..eb162046
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_interpolation.py
@@ -0,0 +1,1328 @@
+import sys
+
+import numpy
+from numpy.testing import (assert_, assert_equal, assert_array_equal,
+                           assert_array_almost_equal, assert_allclose,
+                           suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+import scipy.ndimage as ndimage
+
+from . import types
+
+eps = 1e-12
+
+ndimage_to_numpy_mode = {
+    'mirror': 'reflect',
+    'reflect': 'symmetric',
+    'grid-mirror': 'symmetric',
+    'grid-wrap': 'wrap',
+    'nearest': 'edge',
+    'grid-constant': 'constant',
+}
+
+
+class TestNdimageInterpolation:
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [1.5, 2.5, 3.5, 4, 4, 4, 4]),
+         ('wrap', [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5]),
+         ('grid-wrap', [1.5, 2.5, 3.5, 2.5, 1.5, 2.5, 3.5]),
+         ('mirror', [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5]),
+         ('reflect', [1.5, 2.5, 3.5, 4, 3.5, 2.5, 1.5]),
+         ('constant', [1.5, 2.5, 3.5, -1, -1, -1, -1]),
+         ('grid-constant', [1.5, 2.5, 3.5, 1.5, -1, -1, -1])]
+    )
+    def test_boundaries(self, mode, expected_value):
+        def shift(x):
+            return (x[0] + 0.5,)
+
+        data = numpy.array([1, 2, 3, 4.])
+        assert_array_equal(
+            expected_value,
+            ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
+                                        output_shape=(7,), order=1))
+
+    @pytest.mark.parametrize(
+        'mode, expected_value',
+        [('nearest', [1, 1, 2, 3]),
+         ('wrap', [3, 1, 2, 3]),
+         ('grid-wrap', [4, 1, 2, 3]),
+         ('mirror', [2, 1, 2, 3]),
+         ('reflect', [1, 1, 2, 3]),
+         ('constant', [-1, 1, 2, 3]),
+         ('grid-constant', [-1, 1, 2, 3])]
+    )
+    def test_boundaries2(self, mode, expected_value):
+        def shift(x):
+            return (x[0] - 0.9,)
+
+        data = numpy.array([1, 2, 3, 4])
+        assert_array_equal(
+            expected_value,
+            ndimage.geometric_transform(data, shift, cval=-1, mode=mode,
+                                        output_shape=(4,)))
+
+    @pytest.mark.parametrize('mode', ['mirror', 'reflect', 'grid-mirror',
+                                      'grid-wrap', 'grid-constant',
+                                      'nearest'])
+    @pytest.mark.parametrize('order', range(6))
+    def test_boundary_spline_accuracy(self, mode, order):
+        """Tests based on examples from gh-2640"""
+        data = numpy.arange(-6, 7, dtype=float)
+        x = numpy.linspace(-8, 15, num=1000)
+        y = ndimage.map_coordinates(data, [x], order=order, mode=mode)
+
+        # compute expected value using explicit padding via numpy.pad
+        npad = 32
+        pad_mode = ndimage_to_numpy_mode.get(mode)
+        padded = numpy.pad(data, npad, mode=pad_mode)
+        expected = ndimage.map_coordinates(padded, [npad + x], order=order,
+                                           mode=mode)
+
+        atol = 1e-5 if mode == 'grid-constant' else 1e-12
+        assert_allclose(y, expected, rtol=1e-7, atol=atol)
+
+    @pytest.mark.parametrize('order', range(2, 6))
+    @pytest.mark.parametrize('dtype', types)
+    def test_spline01(self, dtype, order):
+        data = numpy.ones([], dtype)
+        out = ndimage.spline_filter(data, order=order)
+        assert_array_almost_equal(out, 1)
+
+    @pytest.mark.parametrize('order', range(2, 6))
+    @pytest.mark.parametrize('dtype', types)
+    def test_spline02(self, dtype, order):
+        data = numpy.array([1], dtype)
+        out = ndimage.spline_filter(data, order=order)
+        assert_array_almost_equal(out, [1])
+
+    @pytest.mark.parametrize('order', range(2, 6))
+    @pytest.mark.parametrize('dtype', types)
+    def test_spline03(self, dtype, order):
+        data = numpy.ones([], dtype)
+        out = ndimage.spline_filter(data, order, output=dtype)
+        assert_array_almost_equal(out, 1)
+
+    @pytest.mark.parametrize('order', range(2, 6))
+    @pytest.mark.parametrize('dtype', types)
+    def test_spline04(self, dtype, order):
+        data = numpy.ones([4], dtype)
+        out = ndimage.spline_filter(data, order)
+        assert_array_almost_equal(out, [1, 1, 1, 1])
+
+    @pytest.mark.parametrize('order', range(2, 6))
+    @pytest.mark.parametrize('dtype', types)
+    def test_spline05(self, dtype, order):
+        data = numpy.ones([4, 4], dtype)
+        out = ndimage.spline_filter(data, order=order)
+        assert_array_almost_equal(out, [[1, 1, 1, 1],
+                                        [1, 1, 1, 1],
+                                        [1, 1, 1, 1],
+                                        [1, 1, 1, 1]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform01(self, order):
+        data = numpy.array([1])
+
+        def mapping(x):
+            return x
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, [1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform02(self, order):
+        data = numpy.ones([4])
+
+        def mapping(x):
+            return x
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, [1, 1, 1, 1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform03(self, order):
+        data = numpy.ones([4])
+
+        def mapping(x):
+            return (x[0] - 1,)
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, [0, 1, 1, 1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform04(self, order):
+        data = numpy.array([4, 1, 3, 2])
+
+        def mapping(x):
+            return (x[0] - 1,)
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, [0, 4, 1, 3])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+    def test_geometric_transform05(self, order, dtype):
+        data = numpy.array([[1, 1, 1, 1],
+                            [1, 1, 1, 1],
+                            [1, 1, 1, 1]], dtype=dtype)
+        expected = numpy.array([[0, 1, 1, 1],
+                                [0, 1, 1, 1],
+                                [0, 1, 1, 1]], dtype=dtype)
+        if data.dtype.kind == 'c':
+            data -= 1j * data
+            expected -= 1j * expected
+
+        def mapping(x):
+            return (x[0], x[1] - 1)
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform06(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+
+        def mapping(x):
+            return (x[0], x[1] - 1)
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, [[0, 4, 1, 3],
+                                        [0, 7, 6, 8],
+                                        [0, 3, 5, 3]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform07(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+
+        def mapping(x):
+            return (x[0] - 1, x[1])
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [4, 1, 3, 2],
+                                        [7, 6, 8, 5]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform08(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+
+        def mapping(x):
+            return (x[0] - 1, x[1] - 1)
+
+        out = ndimage.geometric_transform(data, mapping, data.shape,
+                                          order=order)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3],
+                                        [0, 7, 6, 8]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform10(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+
+        def mapping(x):
+            return (x[0] - 1, x[1] - 1)
+
+        if (order > 1):
+            filtered = ndimage.spline_filter(data, order=order)
+        else:
+            filtered = data
+        out = ndimage.geometric_transform(filtered, mapping, data.shape,
+                                          order=order, prefilter=False)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3],
+                                        [0, 7, 6, 8]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform13(self, order):
+        data = numpy.ones([2], numpy.float64)
+
+        def mapping(x):
+            return (x[0] // 2,)
+
+        out = ndimage.geometric_transform(data, mapping, [4], order=order)
+        assert_array_almost_equal(out, [1, 1, 1, 1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform14(self, order):
+        data = [1, 5, 2, 6, 3, 7, 4, 4]
+
+        def mapping(x):
+            return (2 * x[0],)
+
+        out = ndimage.geometric_transform(data, mapping, [4], order=order)
+        assert_array_almost_equal(out, [1, 2, 3, 4])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform15(self, order):
+        data = [1, 2, 3, 4]
+
+        def mapping(x):
+            return (x[0] / 2,)
+
+        out = ndimage.geometric_transform(data, mapping, [8], order=order)
+        assert_array_almost_equal(out[::2], [1, 2, 3, 4])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform16(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9.0, 10, 11, 12]]
+
+        def mapping(x):
+            return (x[0], x[1] * 2)
+
+        out = ndimage.geometric_transform(data, mapping, (3, 2),
+                                          order=order)
+        assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform17(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+
+        def mapping(x):
+            return (x[0] * 2, x[1])
+
+        out = ndimage.geometric_transform(data, mapping, (1, 4),
+                                          order=order)
+        assert_array_almost_equal(out, [[1, 2, 3, 4]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform18(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+
+        def mapping(x):
+            return (x[0] * 2, x[1] * 2)
+
+        out = ndimage.geometric_transform(data, mapping, (1, 2),
+                                          order=order)
+        assert_array_almost_equal(out, [[1, 3]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform19(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+
+        def mapping(x):
+            return (x[0], x[1] / 2)
+
+        out = ndimage.geometric_transform(data, mapping, (3, 8),
+                                          order=order)
+        assert_array_almost_equal(out[..., ::2], data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform20(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+
+        def mapping(x):
+            return (x[0] / 2, x[1])
+
+        out = ndimage.geometric_transform(data, mapping, (6, 4),
+                                          order=order)
+        assert_array_almost_equal(out[::2, ...], data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform21(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+
+        def mapping(x):
+            return (x[0] / 2, x[1] / 2)
+
+        out = ndimage.geometric_transform(data, mapping, (6, 8),
+                                          order=order)
+        assert_array_almost_equal(out[::2, ::2], data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform22(self, order):
+        data = numpy.array([[1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12]], numpy.float64)
+
+        def mapping1(x):
+            return (x[0] / 2, x[1] / 2)
+
+        def mapping2(x):
+            return (x[0] * 2, x[1] * 2)
+
+        out = ndimage.geometric_transform(data, mapping1,
+                                          (6, 8), order=order)
+        out = ndimage.geometric_transform(out, mapping2,
+                                          (3, 4), order=order)
+        assert_array_almost_equal(out, data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform23(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+
+        def mapping(x):
+            return (1, x[0] * 2)
+
+        out = ndimage.geometric_transform(data, mapping, (2,), order=order)
+        out = out.astype(numpy.int32)
+        assert_array_almost_equal(out, [5, 7])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_geometric_transform24(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+
+        def mapping(x, a, b):
+            return (a, x[0] * b)
+
+        out = ndimage.geometric_transform(
+            data, mapping, (2,), order=order, extra_arguments=(1,),
+            extra_keywords={'b': 2})
+        assert_array_almost_equal(out, [5, 7])
+
+    def test_geometric_transform_grid_constant_order1(self):
+        # verify interpolation outside the original bounds
+        x = numpy.array([[1, 2, 3],
+                         [4, 5, 6]], dtype=float)
+
+        def mapping(x):
+            return (x[0] - 0.5), (x[1] - 0.5)
+
+        expected_result = numpy.array([[0.25, 0.75, 1.25],
+                                       [1.25, 3.00, 4.00]])
+        assert_array_almost_equal(
+            ndimage.geometric_transform(x, mapping, mode='grid-constant',
+                                        order=1),
+            expected_result,
+        )
+
+    @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
+                                      'mirror', 'reflect'])
+    @pytest.mark.parametrize('order', range(6))
+    def test_geometric_transform_vs_padded(self, order, mode):
+        x = numpy.arange(144, dtype=float).reshape(12, 12)
+
+        def mapping(x):
+            return (x[0] - 0.4), (x[1] + 2.3)
+
+        # Manually pad and then extract center after the transform to get the
+        # expected result.
+        npad = 24
+        pad_mode = ndimage_to_numpy_mode.get(mode)
+        xp = numpy.pad(x, npad, mode=pad_mode)
+        center_slice = tuple([slice(npad, -npad)] * x.ndim)
+        expected_result = ndimage.geometric_transform(
+            xp, mapping, mode=mode, order=order)[center_slice]
+
+        assert_allclose(
+            ndimage.geometric_transform(x, mapping, mode=mode,
+                                        order=order),
+            expected_result,
+            rtol=1e-7,
+        )
+
+    def test_geometric_transform_endianness_with_output_parameter(self):
+        # geometric transform given output ndarray or dtype with
+        # non-native endianness. see issue #4127
+        data = numpy.array([1])
+
+        def mapping(x):
+            return x
+
+        for out in [data.dtype, data.dtype.newbyteorder(),
+                    numpy.empty_like(data),
+                    numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
+            returned = ndimage.geometric_transform(data, mapping, data.shape,
+                                                   output=out)
+            result = out if returned is None else returned
+            assert_array_almost_equal(result, [1])
+
+    def test_geometric_transform_with_string_output(self):
+        data = numpy.array([1])
+
+        def mapping(x):
+            return x
+
+        out = ndimage.geometric_transform(data, mapping, output='f')
+        assert_(out.dtype is numpy.dtype('f'))
+        assert_array_almost_equal(out, [1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+    def test_map_coordinates01(self, order, dtype):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        expected = numpy.array([[0, 0, 0, 0],
+                                [0, 4, 1, 3],
+                                [0, 7, 6, 8]])
+        if data.dtype.kind == 'c':
+            data = data - 1j * data
+            expected = expected - 1j * expected
+
+        idx = numpy.indices(data.shape)
+        idx -= 1
+
+        out = ndimage.map_coordinates(data, idx, order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_map_coordinates02(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        idx = numpy.indices(data.shape, numpy.float64)
+        idx -= 0.5
+
+        out1 = ndimage.shift(data, 0.5, order=order)
+        out2 = ndimage.map_coordinates(data, idx, order=order)
+        assert_array_almost_equal(out1, out2)
+
+    def test_map_coordinates03(self):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]], order='F')
+        idx = numpy.indices(data.shape) - 1
+        out = ndimage.map_coordinates(data, idx)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3],
+                                        [0, 7, 6, 8]])
+        assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
+        idx = numpy.indices(data[::2].shape) - 1
+        out = ndimage.map_coordinates(data[::2], idx)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3]])
+        assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
+        idx = numpy.indices(data[:, ::2].shape) - 1
+        out = ndimage.map_coordinates(data[:, ::2], idx)
+        assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
+        assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
+
+    def test_map_coordinates_endianness_with_output_parameter(self):
+        # output parameter given as array or dtype with either endianness
+        # see issue #4127
+        data = numpy.array([[1, 2], [7, 6]])
+        expected = numpy.array([[0, 0], [0, 1]])
+        idx = numpy.indices(data.shape)
+        idx -= 1
+        for out in [
+            data.dtype,
+            data.dtype.newbyteorder(),
+            numpy.empty_like(expected),
+            numpy.empty_like(expected).astype(expected.dtype.newbyteorder())
+        ]:
+            returned = ndimage.map_coordinates(data, idx, output=out)
+            result = out if returned is None else returned
+            assert_array_almost_equal(result, expected)
+
+    def test_map_coordinates_with_string_output(self):
+        data = numpy.array([[1]])
+        idx = numpy.indices(data.shape)
+        out = ndimage.map_coordinates(data, idx, output='f')
+        assert_(out.dtype is numpy.dtype('f'))
+        assert_array_almost_equal(out, [[1]])
+
+    @pytest.mark.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8,
+                        reason='do not run on 32 bit or windows '
+                               '(no sparse memory)')
+    def test_map_coordinates_large_data(self):
+        # check crash on large data
+        try:
+            n = 30000
+            a = numpy.empty(n**2, dtype=numpy.float32).reshape(n, n)
+            # fill the part we might read
+            a[n - 3:, n - 3:] = 0
+            ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
+        except MemoryError as e:
+            raise pytest.skip('Not enough memory available') from e
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform01(self, order):
+        data = numpy.array([1])
+        out = ndimage.affine_transform(data, [[1]], order=order)
+        assert_array_almost_equal(out, [1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform02(self, order):
+        data = numpy.ones([4])
+        out = ndimage.affine_transform(data, [[1]], order=order)
+        assert_array_almost_equal(out, [1, 1, 1, 1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform03(self, order):
+        data = numpy.ones([4])
+        out = ndimage.affine_transform(data, [[1]], -1, order=order)
+        assert_array_almost_equal(out, [0, 1, 1, 1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform04(self, order):
+        data = numpy.array([4, 1, 3, 2])
+        out = ndimage.affine_transform(data, [[1]], -1, order=order)
+        assert_array_almost_equal(out, [0, 4, 1, 3])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+    def test_affine_transform05(self, order, dtype):
+        data = numpy.array([[1, 1, 1, 1],
+                            [1, 1, 1, 1],
+                            [1, 1, 1, 1]], dtype=dtype)
+        expected = numpy.array([[0, 1, 1, 1],
+                                [0, 1, 1, 1],
+                                [0, 1, 1, 1]], dtype=dtype)
+        if data.dtype.kind == 'c':
+            data -= 1j * data
+            expected -= 1j * expected
+        out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+                                       [0, -1], order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform06(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+                                       [0, -1], order=order)
+        assert_array_almost_equal(out, [[0, 4, 1, 3],
+                                        [0, 7, 6, 8],
+                                        [0, 3, 5, 3]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform07(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+                                       [-1, 0], order=order)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [4, 1, 3, 2],
+                                        [7, 6, 8, 5]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform08(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
+                                       [-1, -1], order=order)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3],
+                                        [0, 7, 6, 8]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform09(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        if (order > 1):
+            filtered = ndimage.spline_filter(data, order=order)
+        else:
+            filtered = data
+        out = ndimage.affine_transform(filtered, [[1, 0], [0, 1]],
+                                       [-1, -1], order=order,
+                                       prefilter=False)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3],
+                                        [0, 7, 6, 8]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform10(self, order):
+        data = numpy.ones([2], numpy.float64)
+        out = ndimage.affine_transform(data, [[0.5]], output_shape=(4,),
+                                       order=order)
+        assert_array_almost_equal(out, [1, 1, 1, 0])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform11(self, order):
+        data = [1, 5, 2, 6, 3, 7, 4, 4]
+        out = ndimage.affine_transform(data, [[2]], 0, (4,), order=order)
+        assert_array_almost_equal(out, [1, 2, 3, 4])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform12(self, order):
+        data = [1, 2, 3, 4]
+        out = ndimage.affine_transform(data, [[0.5]], 0, (8,), order=order)
+        assert_array_almost_equal(out[::2], [1, 2, 3, 4])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform13(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9.0, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[1, 0], [0, 2]], 0, (3, 2),
+                                       order=order)
+        assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform14(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[2, 0], [0, 1]], 0, (1, 4),
+                                       order=order)
+        assert_array_almost_equal(out, [[1, 2, 3, 4]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform15(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[2, 0], [0, 2]], 0, (1, 2),
+                                       order=order)
+        assert_array_almost_equal(out, [[1, 3]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform16(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[1, 0.0], [0, 0.5]], 0,
+                                       (3, 8), order=order)
+        assert_array_almost_equal(out[..., ::2], data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform17(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[0.5, 0], [0, 1]], 0,
+                                       (6, 4), order=order)
+        assert_array_almost_equal(out[::2, ...], data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform18(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
+                                       (6, 8), order=order)
+        assert_array_almost_equal(out[::2, ::2], data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform19(self, order):
+        data = numpy.array([[1, 2, 3, 4],
+                            [5, 6, 7, 8],
+                            [9, 10, 11, 12]], numpy.float64)
+        out = ndimage.affine_transform(data, [[0.5, 0], [0, 0.5]], 0,
+                                       (6, 8), order=order)
+        out = ndimage.affine_transform(out, [[2.0, 0], [0, 2.0]], 0,
+                                       (3, 4), order=order)
+        assert_array_almost_equal(out, data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform20(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[0], [2]], 0, (2,),
+                                       order=order)
+        assert_array_almost_equal(out, [1, 3])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform21(self, order):
+        data = [[1, 2, 3, 4],
+                [5, 6, 7, 8],
+                [9, 10, 11, 12]]
+        out = ndimage.affine_transform(data, [[2], [0]], 0, (2,),
+                                       order=order)
+        assert_array_almost_equal(out, [1, 9])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform22(self, order):
+        # shift and offset interaction; see issue #1547
+        data = numpy.array([4, 1, 3, 2])
+        out = ndimage.affine_transform(data, [[2]], [-1], (3,),
+                                       order=order)
+        assert_array_almost_equal(out, [0, 1, 2])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform23(self, order):
+        # shift and offset interaction; see issue #1547
+        data = numpy.array([4, 1, 3, 2])
+        out = ndimage.affine_transform(data, [[0.5]], [-1], (8,),
+                                       order=order)
+        assert_array_almost_equal(out[::2], [0, 4, 1, 3])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform24(self, order):
+        # consistency between diagonal and non-diagonal case; see issue #1547
+        data = numpy.array([4, 1, 3, 2])
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       'The behavior of affine_transform with a 1-D array .* '
+                       'has changed')
+            out1 = ndimage.affine_transform(data, [2], -1, order=order)
+        out2 = ndimage.affine_transform(data, [[2]], -1, order=order)
+        assert_array_almost_equal(out1, out2)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform25(self, order):
+        # consistency between diagonal and non-diagonal case; see issue #1547
+        data = numpy.array([4, 1, 3, 2])
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       'The behavior of affine_transform with a 1-D array .* '
+                       'has changed')
+            out1 = ndimage.affine_transform(data, [0.5], -1, order=order)
+        out2 = ndimage.affine_transform(data, [[0.5]], -1, order=order)
+        assert_array_almost_equal(out1, out2)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform26(self, order):
+        # test homogeneous coordinates
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        if (order > 1):
+            filtered = ndimage.spline_filter(data, order=order)
+        else:
+            filtered = data
+        tform_original = numpy.eye(2)
+        offset_original = -numpy.ones((2, 1))
+        tform_h1 = numpy.hstack((tform_original, offset_original))
+        tform_h2 = numpy.vstack((tform_h1, [[0, 0, 1]]))
+        out1 = ndimage.affine_transform(filtered, tform_original,
+                                        offset_original.ravel(),
+                                        order=order, prefilter=False)
+        out2 = ndimage.affine_transform(filtered, tform_h1, order=order,
+                                        prefilter=False)
+        out3 = ndimage.affine_transform(filtered, tform_h2, order=order,
+                                        prefilter=False)
+        for out in [out1, out2, out3]:
+            assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                            [0, 4, 1, 3],
+                                            [0, 7, 6, 8]])
+
+    def test_affine_transform27(self):
+        # test valid homogeneous transformation matrix
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        tform_h1 = numpy.hstack((numpy.eye(2), -numpy.ones((2, 1))))
+        tform_h2 = numpy.vstack((tform_h1, [[5, 2, 1]]))
+        assert_raises(ValueError, ndimage.affine_transform, data, tform_h2)
+
+    def test_affine_transform_1d_endianness_with_output_parameter(self):
+        # 1d affine transform given output ndarray or dtype with
+        # either endianness. see issue #7388
+        data = numpy.ones((2, 2))
+        for out in [numpy.empty_like(data),
+                    numpy.empty_like(data).astype(data.dtype.newbyteorder()),
+                    data.dtype, data.dtype.newbyteorder()]:
+            with suppress_warnings() as sup:
+                sup.filter(UserWarning,
+                           'The behavior of affine_transform with a 1-D array '
+                           '.* has changed')
+                returned = ndimage.affine_transform(data, [1, 1], output=out)
+            result = out if returned is None else returned
+            assert_array_almost_equal(result, [[1, 1], [1, 1]])
+
+    def test_affine_transform_multi_d_endianness_with_output_parameter(self):
+        # affine transform given output ndarray or dtype with either endianness
+        # see issue #4127
+        data = numpy.array([1])
+        for out in [data.dtype, data.dtype.newbyteorder(),
+                    numpy.empty_like(data),
+                    numpy.empty_like(data).astype(data.dtype.newbyteorder())]:
+            returned = ndimage.affine_transform(data, [[1]], output=out)
+            result = out if returned is None else returned
+            assert_array_almost_equal(result, [1])
+
+    def test_affine_transform_output_shape(self):
+        # don't require output_shape when out of a different size is given
+        data = numpy.arange(8, dtype=numpy.float64)
+        out = numpy.ones((16,))
+        oshape = out.shape
+
+        ndimage.affine_transform(data, [[1]], output=out)
+        assert_array_almost_equal(out[:8], data)
+
+        # mismatched output shape raises an error
+        with pytest.raises(RuntimeError):
+            ndimage.affine_transform(
+                data, [[1]], output=out, output_shape=(12,))
+
+    def test_affine_transform_with_string_output(self):
+        data = numpy.array([1])
+        out = ndimage.affine_transform(data, [[1]], output='f')
+        assert_(out.dtype is numpy.dtype('f'))
+        assert_array_almost_equal(out, [1])
+
+    @pytest.mark.parametrize('shift',
+                             [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform_shift_via_grid_wrap(self, shift, order):
+        # For mode 'grid-wrap', integer shifts should match numpy.roll
+        x = numpy.array([[0, 1],
+                         [2, 3]])
+        affine = numpy.zeros((2, 3))
+        affine[:2, :2] = numpy.eye(2)
+        affine[:, 2] = shift
+        assert_array_almost_equal(
+            ndimage.affine_transform(x, affine, mode='grid-wrap', order=order),
+            numpy.roll(x, shift, axis=(0, 1)),
+        )
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_affine_transform_shift_reflect(self, order):
+        # shift by x.shape results in reflection
+        x = numpy.array([[0, 1, 2],
+                         [3, 4, 5]])
+        affine = numpy.zeros((2, 3))
+        affine[:2, :2] = numpy.eye(2)
+        affine[:, 2] = x.shape
+        assert_array_almost_equal(
+            ndimage.affine_transform(x, affine, mode='reflect', order=order),
+            x[::-1, ::-1],
+        )
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift01(self, order):
+        data = numpy.array([1])
+        out = ndimage.shift(data, [1], order=order)
+        assert_array_almost_equal(out, [0])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift02(self, order):
+        data = numpy.ones([4])
+        out = ndimage.shift(data, [1], order=order)
+        assert_array_almost_equal(out, [0, 1, 1, 1])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift03(self, order):
+        data = numpy.ones([4])
+        out = ndimage.shift(data, -1, order=order)
+        assert_array_almost_equal(out, [1, 1, 1, 0])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift04(self, order):
+        data = numpy.array([4, 1, 3, 2])
+        out = ndimage.shift(data, 1, order=order)
+        assert_array_almost_equal(out, [0, 4, 1, 3])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+    def test_shift05(self, order, dtype):
+        data = numpy.array([[1, 1, 1, 1],
+                            [1, 1, 1, 1],
+                            [1, 1, 1, 1]], dtype=dtype)
+        expected = numpy.array([[0, 1, 1, 1],
+                                [0, 1, 1, 1],
+                                [0, 1, 1, 1]], dtype=dtype)
+        if data.dtype.kind == 'c':
+            data -= 1j * data
+            expected -= 1j * expected
+        out = ndimage.shift(data, [0, 1], order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('mode', ['constant', 'grid-constant'])
+    @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+    def test_shift_with_nonzero_cval(self, order, mode, dtype):
+        data = numpy.array([[1, 1, 1, 1],
+                            [1, 1, 1, 1],
+                            [1, 1, 1, 1]], dtype=dtype)
+
+        expected = numpy.array([[0, 1, 1, 1],
+                                [0, 1, 1, 1],
+                                [0, 1, 1, 1]], dtype=dtype)
+
+        if data.dtype.kind == 'c':
+            data -= 1j * data
+            expected -= 1j * expected
+        cval = 5.0
+        expected[:, 0] = cval  # specific to shift of [0, 1] used below
+        out = ndimage.shift(data, [0, 1], order=order, mode=mode, cval=cval)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift06(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        out = ndimage.shift(data, [0, 1], order=order)
+        assert_array_almost_equal(out, [[0, 4, 1, 3],
+                                        [0, 7, 6, 8],
+                                        [0, 3, 5, 3]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift07(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        out = ndimage.shift(data, [1, 0], order=order)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [4, 1, 3, 2],
+                                        [7, 6, 8, 5]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift08(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        out = ndimage.shift(data, [1, 1], order=order)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3],
+                                        [0, 7, 6, 8]])
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift09(self, order):
+        data = numpy.array([[4, 1, 3, 2],
+                            [7, 6, 8, 5],
+                            [3, 5, 3, 6]])
+        if (order > 1):
+            filtered = ndimage.spline_filter(data, order=order)
+        else:
+            filtered = data
+        out = ndimage.shift(filtered, [1, 1], order=order, prefilter=False)
+        assert_array_almost_equal(out, [[0, 0, 0, 0],
+                                        [0, 4, 1, 3],
+                                        [0, 7, 6, 8]])
+
+    @pytest.mark.parametrize('shift',
+                             [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift_grid_wrap(self, shift, order):
+        # For mode 'grid-wrap', integer shifts should match numpy.roll
+        x = numpy.array([[0, 1],
+                         [2, 3]])
+        assert_array_almost_equal(
+            ndimage.shift(x, shift, mode='grid-wrap', order=order),
+            numpy.roll(x, shift, axis=(0, 1)),
+        )
+
+    @pytest.mark.parametrize('shift',
+                             [(1, 0), (0, 1), (-1, 1), (3, -5), (2, 7)])
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift_grid_constant1(self, shift, order):
+        # For integer shifts, 'constant' and 'grid-constant' should be equal
+        x = numpy.arange(20).reshape((5, 4))
+        assert_array_almost_equal(
+            ndimage.shift(x, shift, mode='grid-constant', order=order),
+            ndimage.shift(x, shift, mode='constant', order=order),
+        )
+
+    def test_shift_grid_constant_order1(self):
+        x = numpy.array([[1, 2, 3],
+                         [4, 5, 6]], dtype=float)
+        expected_result = numpy.array([[0.25, 0.75, 1.25],
+                                       [1.25, 3.00, 4.00]])
+        assert_array_almost_equal(
+            ndimage.shift(x, (0.5, 0.5), mode='grid-constant', order=1),
+            expected_result,
+        )
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_shift_reflect(self, order):
+        # shift by x.shape results in reflection
+        x = numpy.array([[0, 1, 2],
+                         [3, 4, 5]])
+        assert_array_almost_equal(
+            ndimage.shift(x, x.shape, mode='reflect', order=order),
+            x[::-1, ::-1],
+        )
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('prefilter', [False, True])
+    def test_shift_nearest_boundary(self, order, prefilter):
+        # verify that shifting at least order // 2 beyond the end of the array
+        # gives a value equal to the edge value.
+        x = numpy.arange(16)
+        kwargs = dict(mode='nearest', order=order, prefilter=prefilter)
+        assert_array_almost_equal(
+            ndimage.shift(x, order // 2 + 1, **kwargs)[0], x[0],
+        )
+        assert_array_almost_equal(
+            ndimage.shift(x, -order // 2 - 1, **kwargs)[-1], x[-1],
+        )
+
+    @pytest.mark.parametrize('mode', ['grid-constant', 'grid-wrap', 'nearest',
+                                      'mirror', 'reflect'])
+    @pytest.mark.parametrize('order', range(6))
+    def test_shift_vs_padded(self, order, mode):
+        x = numpy.arange(144, dtype=float).reshape(12, 12)
+        shift = (0.4, -2.3)
+
+        # manually pad and then extract center to get expected result
+        npad = 32
+        pad_mode = ndimage_to_numpy_mode.get(mode)
+        xp = numpy.pad(x, npad, mode=pad_mode)
+        center_slice = tuple([slice(npad, -npad)] * x.ndim)
+        expected_result = ndimage.shift(
+            xp, shift, mode=mode, order=order)[center_slice]
+
+        assert_allclose(
+            ndimage.shift(x, shift, mode=mode, order=order),
+            expected_result,
+            rtol=1e-7,
+        )
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_zoom1(self, order):
+        for z in [2, [2, 2]]:
+            arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
+            arr = ndimage.zoom(arr, z, order=order)
+            assert_equal(arr.shape, (10, 10))
+            assert_(numpy.all(arr[-1, :] != 0))
+            assert_(numpy.all(arr[-1, :] >= (20 - eps)))
+            assert_(numpy.all(arr[0, :] <= (5 + eps)))
+            assert_(numpy.all(arr >= (0 - eps)))
+            assert_(numpy.all(arr <= (24 + eps)))
+
+    def test_zoom2(self):
+        arr = numpy.arange(12).reshape((3, 4))
+        out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
+        assert_array_equal(out, arr)
+
+    def test_zoom3(self):
+        arr = numpy.array([[1, 2]])
+        out1 = ndimage.zoom(arr, (2, 1))
+        out2 = ndimage.zoom(arr, (1, 2))
+
+        assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
+        assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+    def test_zoom_affine01(self, order, dtype):
+        data = numpy.asarray([[1, 2, 3, 4],
+                              [5, 6, 7, 8],
+                              [9, 10, 11, 12]], dtype=dtype)
+        if data.dtype.kind == 'c':
+            data -= 1j * data
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       'The behavior of affine_transform with a 1-D array .* '
+                       'has changed')
+            out = ndimage.affine_transform(data, [0.5, 0.5], 0,
+                                           (6, 8), order=order)
+        assert_array_almost_equal(out[::2, ::2], data)
+
+    def test_zoom_infinity(self):
+        # Ticket #1419 regression test
+        dim = 8
+        ndimage.zoom(numpy.zeros((dim, dim)), 1. / dim, mode='nearest')
+
+    def test_zoom_zoomfactor_one(self):
+        # Ticket #1122 regression test
+        arr = numpy.zeros((1, 5, 5))
+        zoom = (1.0, 2.0, 2.0)
+
+        out = ndimage.zoom(arr, zoom, cval=7)
+        ref = numpy.zeros((1, 10, 10))
+        assert_array_almost_equal(out, ref)
+
+    def test_zoom_output_shape_roundoff(self):
+        arr = numpy.zeros((3, 11, 25))
+        zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
+        out = ndimage.zoom(arr, zoom)
+        assert_array_equal(out.shape, (4, 15, 29))
+
+    @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
+    @pytest.mark.parametrize('mode', ['nearest', 'constant', 'wrap', 'reflect',
+                                      'mirror', 'grid-wrap', 'grid-mirror',
+                                      'grid-constant'])
+    def test_zoom_by_int_order0(self, zoom, mode):
+        # order 0 zoom should be the same as replication via numpy.kron
+        # Note: This is not True for general x shapes when grid_mode is False,
+        #       but works here for all modes because the size ratio happens to
+        #       always be an integer when x.shape = (2, 2).
+        x = numpy.array([[0, 1],
+                         [2, 3]], dtype=float)
+        # x = numpy.arange(16, dtype=float).reshape(4, 4)
+        assert_array_almost_equal(
+            ndimage.zoom(x, zoom, order=0, mode=mode),
+            numpy.kron(x, numpy.ones(zoom))
+        )
+
+    @pytest.mark.parametrize('shape', [(2, 3), (4, 4)])
+    @pytest.mark.parametrize('zoom', [(1, 1), (3, 5), (8, 2), (8, 8)])
+    @pytest.mark.parametrize('mode', ['nearest', 'reflect', 'mirror',
+                                      'grid-wrap', 'grid-constant'])
+    def test_zoom_grid_by_int_order0(self, shape, zoom, mode):
+        # When grid_mode is True,  order 0 zoom should be the same as
+        # replication via numpy.kron. The only exceptions to this are the
+        # non-grid modes 'constant' and 'wrap'.
+        x = numpy.arange(numpy.prod(shape), dtype=float).reshape(shape)
+        assert_array_almost_equal(
+            ndimage.zoom(x, zoom, order=0, mode=mode, grid_mode=True),
+            numpy.kron(x, numpy.ones(zoom))
+        )
+
+    @pytest.mark.parametrize('mode', ['constant', 'wrap'])
+    def test_zoom_grid_mode_warnings(self, mode):
+        # Warn on use of non-grid modes when grid_mode is True
+        x = numpy.arange(9, dtype=float).reshape((3, 3))
+        with pytest.warns(UserWarning,
+                          match="It is recommended to use mode"):
+            ndimage.zoom(x, 2, mode=mode, grid_mode=True),
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_rotate01(self, order):
+        data = numpy.array([[0, 0, 0, 0],
+                            [0, 1, 1, 0],
+                            [0, 0, 0, 0]], dtype=numpy.float64)
+        out = ndimage.rotate(data, 0, order=order)
+        assert_array_almost_equal(out, data)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_rotate02(self, order):
+        data = numpy.array([[0, 0, 0, 0],
+                            [0, 1, 0, 0],
+                            [0, 0, 0, 0]], dtype=numpy.float64)
+        expected = numpy.array([[0, 0, 0],
+                               [0, 0, 0],
+                               [0, 1, 0],
+                               [0, 0, 0]], dtype=numpy.float64)
+        out = ndimage.rotate(data, 90, order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    @pytest.mark.parametrize('dtype', [numpy.float64, numpy.complex128])
+    def test_rotate03(self, order, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0],
+                            [0, 1, 1, 0, 0],
+                            [0, 0, 0, 0, 0]], dtype=dtype)
+        expected = numpy.array([[0, 0, 0],
+                               [0, 0, 0],
+                               [0, 1, 0],
+                               [0, 1, 0],
+                               [0, 0, 0]], dtype=dtype)
+        if data.dtype.kind == 'c':
+            data -= 1j * data
+            expected -= 1j * expected
+        out = ndimage.rotate(data, 90, order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_rotate04(self, order):
+        data = numpy.array([[0, 0, 0, 0, 0],
+                            [0, 1, 1, 0, 0],
+                            [0, 0, 0, 0, 0]], dtype=numpy.float64)
+        expected = numpy.array([[0, 0, 0, 0, 0],
+                                [0, 0, 1, 0, 0],
+                                [0, 0, 1, 0, 0]], dtype=numpy.float64)
+        out = ndimage.rotate(data, 90, reshape=False, order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_rotate05(self, order):
+        data = numpy.empty((4, 3, 3))
+        for i in range(3):
+            data[:, :, i] = numpy.array([[0, 0, 0],
+                                         [0, 1, 0],
+                                         [0, 1, 0],
+                                         [0, 0, 0]], dtype=numpy.float64)
+        expected = numpy.array([[0, 0, 0, 0],
+                                [0, 1, 1, 0],
+                                [0, 0, 0, 0]], dtype=numpy.float64)
+        out = ndimage.rotate(data, 90, order=order)
+        for i in range(3):
+            assert_array_almost_equal(out[:, :, i], expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_rotate06(self, order):
+        data = numpy.empty((3, 4, 3))
+        for i in range(3):
+            data[:, :, i] = numpy.array([[0, 0, 0, 0],
+                                         [0, 1, 1, 0],
+                                         [0, 0, 0, 0]], dtype=numpy.float64)
+        expected = numpy.array([[0, 0, 0],
+                                [0, 1, 0],
+                                [0, 1, 0],
+                                [0, 0, 0]], dtype=numpy.float64)
+        out = ndimage.rotate(data, 90, order=order)
+        for i in range(3):
+            assert_array_almost_equal(out[:, :, i], expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_rotate07(self, order):
+        data = numpy.array([[[0, 0, 0, 0, 0],
+                             [0, 1, 1, 0, 0],
+                             [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
+        data = data.transpose()
+        expected = numpy.array([[[0, 0, 0],
+                                 [0, 1, 0],
+                                 [0, 1, 0],
+                                 [0, 0, 0],
+                                 [0, 0, 0]]] * 2, dtype=numpy.float64)
+        expected = expected.transpose([2, 1, 0])
+        out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('order', range(0, 6))
+    def test_rotate08(self, order):
+        data = numpy.array([[[0, 0, 0, 0, 0],
+                             [0, 1, 1, 0, 0],
+                             [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
+        data = data.transpose()
+        expected = numpy.array([[[0, 0, 1, 0, 0],
+                                 [0, 0, 1, 0, 0],
+                                 [0, 0, 0, 0, 0]]] * 2, dtype=numpy.float64)
+        expected = expected.transpose()
+        out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
+        assert_array_almost_equal(out, expected)
+
+    def test_rotate09(self):
+        data = numpy.array([[0, 0, 0, 0, 0],
+                            [0, 1, 1, 0, 0],
+                            [0, 0, 0, 0, 0]] * 2, dtype=numpy.float64)
+        with assert_raises(ValueError):
+            ndimage.rotate(data, 90, axes=(0, data.ndim))
+
+    def test_rotate10(self):
+        data = numpy.arange(45, dtype=numpy.float64).reshape((3, 5, 3))
+
+        # The output of ndimage.rotate before refactoring
+        expected = numpy.array([[[0.0, 0.0, 0.0],
+                                 [0.0, 0.0, 0.0],
+                                 [6.54914793, 7.54914793, 8.54914793],
+                                 [10.84520162, 11.84520162, 12.84520162],
+                                 [0.0, 0.0, 0.0]],
+                                [[6.19286575, 7.19286575, 8.19286575],
+                                 [13.4730712, 14.4730712, 15.4730712],
+                                 [21.0, 22.0, 23.0],
+                                 [28.5269288, 29.5269288, 30.5269288],
+                                 [35.80713425, 36.80713425, 37.80713425]],
+                                [[0.0, 0.0, 0.0],
+                                 [31.15479838, 32.15479838, 33.15479838],
+                                 [35.45085207, 36.45085207, 37.45085207],
+                                 [0.0, 0.0, 0.0],
+                                 [0.0, 0.0, 0.0]]])
+
+        out = ndimage.rotate(data, angle=12, reshape=False)
+        assert_array_almost_equal(out, expected)
+
+    def test_rotate_exact_180(self):
+        a = numpy.tile(numpy.arange(5), (5, 1))
+        b = ndimage.rotate(ndimage.rotate(a, 180), -180)
+        assert_equal(a, b)
+
+
+def test_zoom_output_shape():
+    """Ticket #643"""
+    x = numpy.arange(12).reshape((3, 4))
+    ndimage.zoom(x, 2, output=numpy.zeros((6, 8)))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_measurements.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_measurements.py
new file mode 100644
index 00000000..8ddf5681
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_measurements.py
@@ -0,0 +1,1393 @@
+import os.path
+
+import numpy as np
+from numpy.testing import (assert_, assert_array_almost_equal, assert_equal,
+                           assert_almost_equal, assert_array_equal,
+                           suppress_warnings)
+from pytest import raises as assert_raises
+
+import scipy.ndimage as ndimage
+
+
+from . import types
+
+
+class Test_measurements_stats:
+    """ndimage._measurements._stats() is a utility used by other functions."""
+
+    def test_a(self):
+        x = [0, 1, 2, 6]
+        labels = [0, 0, 1, 1]
+        index = [0, 1]
+        for shp in [(4,), (2, 2)]:
+            x = np.array(x).reshape(shp)
+            labels = np.array(labels).reshape(shp)
+            counts, sums = ndimage._measurements._stats(
+                x, labels=labels, index=index)
+            assert_array_equal(counts, [2, 2])
+            assert_array_equal(sums, [1.0, 8.0])
+
+    def test_b(self):
+        # Same data as test_a, but different labels.  The label 9 exceeds the
+        # length of 'labels', so this test will follow a different code path.
+        x = [0, 1, 2, 6]
+        labels = [0, 0, 9, 9]
+        index = [0, 9]
+        for shp in [(4,), (2, 2)]:
+            x = np.array(x).reshape(shp)
+            labels = np.array(labels).reshape(shp)
+            counts, sums = ndimage._measurements._stats(
+                x, labels=labels, index=index)
+            assert_array_equal(counts, [2, 2])
+            assert_array_equal(sums, [1.0, 8.0])
+
+    def test_a_centered(self):
+        x = [0, 1, 2, 6]
+        labels = [0, 0, 1, 1]
+        index = [0, 1]
+        for shp in [(4,), (2, 2)]:
+            x = np.array(x).reshape(shp)
+            labels = np.array(labels).reshape(shp)
+            counts, sums, centers = ndimage._measurements._stats(
+                x, labels=labels, index=index, centered=True)
+            assert_array_equal(counts, [2, 2])
+            assert_array_equal(sums, [1.0, 8.0])
+            assert_array_equal(centers, [0.5, 8.0])
+
+    def test_b_centered(self):
+        x = [0, 1, 2, 6]
+        labels = [0, 0, 9, 9]
+        index = [0, 9]
+        for shp in [(4,), (2, 2)]:
+            x = np.array(x).reshape(shp)
+            labels = np.array(labels).reshape(shp)
+            counts, sums, centers = ndimage._measurements._stats(
+                x, labels=labels, index=index, centered=True)
+            assert_array_equal(counts, [2, 2])
+            assert_array_equal(sums, [1.0, 8.0])
+            assert_array_equal(centers, [0.5, 8.0])
+
+    def test_nonint_labels(self):
+        x = [0, 1, 2, 6]
+        labels = [0.0, 0.0, 9.0, 9.0]
+        index = [0.0, 9.0]
+        for shp in [(4,), (2, 2)]:
+            x = np.array(x).reshape(shp)
+            labels = np.array(labels).reshape(shp)
+            counts, sums, centers = ndimage._measurements._stats(
+                x, labels=labels, index=index, centered=True)
+            assert_array_equal(counts, [2, 2])
+            assert_array_equal(sums, [1.0, 8.0])
+            assert_array_equal(centers, [0.5, 8.0])
+
+
+class Test_measurements_select:
+    """ndimage._measurements._select() is a utility used by other functions."""
+
+    def test_basic(self):
+        x = [0, 1, 6, 2]
+        cases = [
+            ([0, 0, 1, 1], [0, 1]),           # "Small" integer labels
+            ([0, 0, 9, 9], [0, 9]),           # A label larger than len(labels)
+            ([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]),   # Non-integer labels
+        ]
+        for labels, index in cases:
+            result = ndimage._measurements._select(
+                x, labels=labels, index=index)
+            assert_(len(result) == 0)
+            result = ndimage._measurements._select(
+                x, labels=labels, index=index, find_max=True)
+            assert_(len(result) == 1)
+            assert_array_equal(result[0], [1, 6])
+            result = ndimage._measurements._select(
+                x, labels=labels, index=index, find_min=True)
+            assert_(len(result) == 1)
+            assert_array_equal(result[0], [0, 2])
+            result = ndimage._measurements._select(
+                x, labels=labels, index=index, find_min=True,
+                find_min_positions=True)
+            assert_(len(result) == 2)
+            assert_array_equal(result[0], [0, 2])
+            assert_array_equal(result[1], [0, 3])
+            assert_equal(result[1].dtype.kind, 'i')
+            result = ndimage._measurements._select(
+                x, labels=labels, index=index, find_max=True,
+                find_max_positions=True)
+            assert_(len(result) == 2)
+            assert_array_equal(result[0], [1, 6])
+            assert_array_equal(result[1], [1, 2])
+            assert_equal(result[1].dtype.kind, 'i')
+
+
+def test_label01():
+    data = np.ones([])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, 1)
+    assert_equal(n, 1)
+
+
+def test_label02():
+    data = np.zeros([])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, 0)
+    assert_equal(n, 0)
+
+
+def test_label03():
+    data = np.ones([1])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, [1])
+    assert_equal(n, 1)
+
+
+def test_label04():
+    data = np.zeros([1])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, [0])
+    assert_equal(n, 0)
+
+
+def test_label05():
+    data = np.ones([5])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, [1, 1, 1, 1, 1])
+    assert_equal(n, 1)
+
+
+def test_label06():
+    data = np.array([1, 0, 1, 1, 0, 1])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
+    assert_equal(n, 3)
+
+
+def test_label07():
+    data = np.array([[0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0]])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
+                                    [0, 0, 0, 0, 0, 0],
+                                    [0, 0, 0, 0, 0, 0],
+                                    [0, 0, 0, 0, 0, 0],
+                                    [0, 0, 0, 0, 0, 0],
+                                    [0, 0, 0, 0, 0, 0]])
+    assert_equal(n, 0)
+
+
+def test_label08():
+    data = np.array([[1, 0, 0, 0, 0, 0],
+                     [0, 0, 1, 1, 0, 0],
+                     [0, 0, 1, 1, 1, 0],
+                     [1, 1, 0, 0, 0, 0],
+                     [1, 1, 0, 0, 0, 0],
+                     [0, 0, 0, 1, 1, 0]])
+    out, n = ndimage.label(data)
+    assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
+                                    [0, 0, 2, 2, 0, 0],
+                                    [0, 0, 2, 2, 2, 0],
+                                    [3, 3, 0, 0, 0, 0],
+                                    [3, 3, 0, 0, 0, 0],
+                                    [0, 0, 0, 4, 4, 0]])
+    assert_equal(n, 4)
+
+
+def test_label09():
+    data = np.array([[1, 0, 0, 0, 0, 0],
+                     [0, 0, 1, 1, 0, 0],
+                     [0, 0, 1, 1, 1, 0],
+                     [1, 1, 0, 0, 0, 0],
+                     [1, 1, 0, 0, 0, 0],
+                     [0, 0, 0, 1, 1, 0]])
+    struct = ndimage.generate_binary_structure(2, 2)
+    out, n = ndimage.label(data, struct)
+    assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
+                                    [0, 0, 2, 2, 0, 0],
+                                    [0, 0, 2, 2, 2, 0],
+                                    [2, 2, 0, 0, 0, 0],
+                                    [2, 2, 0, 0, 0, 0],
+                                    [0, 0, 0, 3, 3, 0]])
+    assert_equal(n, 3)
+
+
+def test_label10():
+    data = np.array([[0, 0, 0, 0, 0, 0],
+                     [0, 1, 1, 0, 1, 0],
+                     [0, 1, 1, 1, 1, 0],
+                     [0, 0, 0, 0, 0, 0]])
+    struct = ndimage.generate_binary_structure(2, 2)
+    out, n = ndimage.label(data, struct)
+    assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
+                                    [0, 1, 1, 0, 1, 0],
+                                    [0, 1, 1, 1, 1, 0],
+                                    [0, 0, 0, 0, 0, 0]])
+    assert_equal(n, 1)
+
+
+def test_label11():
+    for type in types:
+        data = np.array([[1, 0, 0, 0, 0, 0],
+                         [0, 0, 1, 1, 0, 0],
+                         [0, 0, 1, 1, 1, 0],
+                         [1, 1, 0, 0, 0, 0],
+                         [1, 1, 0, 0, 0, 0],
+                         [0, 0, 0, 1, 1, 0]], type)
+        out, n = ndimage.label(data)
+        expected = [[1, 0, 0, 0, 0, 0],
+                    [0, 0, 2, 2, 0, 0],
+                    [0, 0, 2, 2, 2, 0],
+                    [3, 3, 0, 0, 0, 0],
+                    [3, 3, 0, 0, 0, 0],
+                    [0, 0, 0, 4, 4, 0]]
+        assert_array_almost_equal(out, expected)
+        assert_equal(n, 4)
+
+
+def test_label11_inplace():
+    for type in types:
+        data = np.array([[1, 0, 0, 0, 0, 0],
+                         [0, 0, 1, 1, 0, 0],
+                         [0, 0, 1, 1, 1, 0],
+                         [1, 1, 0, 0, 0, 0],
+                         [1, 1, 0, 0, 0, 0],
+                         [0, 0, 0, 1, 1, 0]], type)
+        n = ndimage.label(data, output=data)
+        expected = [[1, 0, 0, 0, 0, 0],
+                    [0, 0, 2, 2, 0, 0],
+                    [0, 0, 2, 2, 2, 0],
+                    [3, 3, 0, 0, 0, 0],
+                    [3, 3, 0, 0, 0, 0],
+                    [0, 0, 0, 4, 4, 0]]
+        assert_array_almost_equal(data, expected)
+        assert_equal(n, 4)
+
+
+def test_label12():
+    for type in types:
+        data = np.array([[0, 0, 0, 0, 1, 1],
+                         [0, 0, 0, 0, 0, 1],
+                         [0, 0, 1, 0, 1, 1],
+                         [0, 0, 1, 1, 1, 1],
+                         [0, 0, 0, 1, 1, 0]], type)
+        out, n = ndimage.label(data)
+        expected = [[0, 0, 0, 0, 1, 1],
+                    [0, 0, 0, 0, 0, 1],
+                    [0, 0, 1, 0, 1, 1],
+                    [0, 0, 1, 1, 1, 1],
+                    [0, 0, 0, 1, 1, 0]]
+        assert_array_almost_equal(out, expected)
+        assert_equal(n, 1)
+
+
+def test_label13():
+    for type in types:
+        data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
+                         [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
+                         [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+                         [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
+                        type)
+        out, n = ndimage.label(data)
+        expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
+                    [1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
+                    [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
+                    [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
+        assert_array_almost_equal(out, expected)
+        assert_equal(n, 1)
+
+
+def test_label_output_typed():
+    data = np.ones([5])
+    for t in types:
+        output = np.zeros([5], dtype=t)
+        n = ndimage.label(data, output=output)
+        assert_array_almost_equal(output, 1)
+        assert_equal(n, 1)
+
+
+def test_label_output_dtype():
+    data = np.ones([5])
+    for t in types:
+        output, n = ndimage.label(data, output=t)
+        assert_array_almost_equal(output, 1)
+        assert output.dtype == t
+
+
+def test_label_output_wrong_size():
+    data = np.ones([5])
+    for t in types:
+        output = np.zeros([10], t)
+        assert_raises((RuntimeError, ValueError),
+                      ndimage.label, data, output=output)
+
+
+def test_label_structuring_elements():
+    data = np.loadtxt(os.path.join(os.path.dirname(
+        __file__), "data", "label_inputs.txt"))
+    strels = np.loadtxt(os.path.join(
+        os.path.dirname(__file__), "data", "label_strels.txt"))
+    results = np.loadtxt(os.path.join(
+        os.path.dirname(__file__), "data", "label_results.txt"))
+    data = data.reshape((-1, 7, 7))
+    strels = strels.reshape((-1, 3, 3))
+    results = results.reshape((-1, 7, 7))
+    r = 0
+    for i in range(data.shape[0]):
+        d = data[i, :, :]
+        for j in range(strels.shape[0]):
+            s = strels[j, :, :]
+            assert_equal(ndimage.label(d, s)[0], results[r, :, :])
+            r += 1
+
+
+def test_ticket_742():
+    def SE(img, thresh=.7, size=4):
+        mask = img > thresh
+        rank = len(mask.shape)
+        la, co = ndimage.label(mask,
+                               ndimage.generate_binary_structure(rank, rank))
+        _ = ndimage.find_objects(la)
+
+    if np.dtype(np.intp) != np.dtype('i'):
+        shape = (3, 1240, 1240)
+        a = np.random.rand(np.prod(shape)).reshape(shape)
+        # shouldn't crash
+        SE(a)
+
+
+def test_gh_issue_3025():
+    """Github issue #3025 - improper merging of labels"""
+    d = np.zeros((60, 320))
+    d[:, :257] = 1
+    d[:, 260:] = 1
+    d[36, 257] = 1
+    d[35, 258] = 1
+    d[35, 259] = 1
+    assert ndimage.label(d, np.ones((3, 3)))[1] == 1
+
+
+def test_label_default_dtype():
+    test_array = np.random.rand(10, 10)
+    label, no_features = ndimage.label(test_array > 0.5)
+    assert_(label.dtype in (np.int32, np.int64))
+    # Shouldn't raise an exception
+    ndimage.find_objects(label)
+
+
+def test_find_objects01():
+    data = np.ones([], dtype=int)
+    out = ndimage.find_objects(data)
+    assert_(out == [()])
+
+
+def test_find_objects02():
+    data = np.zeros([], dtype=int)
+    out = ndimage.find_objects(data)
+    assert_(out == [])
+
+
+def test_find_objects03():
+    data = np.ones([1], dtype=int)
+    out = ndimage.find_objects(data)
+    assert_equal(out, [(slice(0, 1, None),)])
+
+
+def test_find_objects04():
+    data = np.zeros([1], dtype=int)
+    out = ndimage.find_objects(data)
+    assert_equal(out, [])
+
+
+def test_find_objects05():
+    data = np.ones([5], dtype=int)
+    out = ndimage.find_objects(data)
+    assert_equal(out, [(slice(0, 5, None),)])
+
+
+def test_find_objects06():
+    data = np.array([1, 0, 2, 2, 0, 3])
+    out = ndimage.find_objects(data)
+    assert_equal(out, [(slice(0, 1, None),),
+                       (slice(2, 4, None),),
+                       (slice(5, 6, None),)])
+
+
+def test_find_objects07():
+    data = np.array([[0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0]])
+    out = ndimage.find_objects(data)
+    assert_equal(out, [])
+
+
+def test_find_objects08():
+    data = np.array([[1, 0, 0, 0, 0, 0],
+                     [0, 0, 2, 2, 0, 0],
+                     [0, 0, 2, 2, 2, 0],
+                     [3, 3, 0, 0, 0, 0],
+                     [3, 3, 0, 0, 0, 0],
+                     [0, 0, 0, 4, 4, 0]])
+    out = ndimage.find_objects(data)
+    assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
+                       (slice(1, 3, None), slice(2, 5, None)),
+                       (slice(3, 5, None), slice(0, 2, None)),
+                       (slice(5, 6, None), slice(3, 5, None))])
+
+
+def test_find_objects09():
+    data = np.array([[1, 0, 0, 0, 0, 0],
+                     [0, 0, 2, 2, 0, 0],
+                     [0, 0, 2, 2, 2, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 4, 4, 0]])
+    out = ndimage.find_objects(data)
+    assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
+                       (slice(1, 3, None), slice(2, 5, None)),
+                       None,
+                       (slice(5, 6, None), slice(3, 5, None))])
+
+
+def test_value_indices01():
+    "Test dictionary keys and entries"
+    data = np.array([[1, 0, 0, 0, 0, 0],
+                     [0, 0, 2, 2, 0, 0],
+                     [0, 0, 2, 2, 2, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 0, 0, 0],
+                     [0, 0, 0, 4, 4, 0]])
+    vi = ndimage.value_indices(data, ignore_value=0)
+    true_keys = [1, 2, 4]
+    assert_equal(list(vi.keys()), true_keys)
+
+    truevi = {}
+    for k in true_keys:
+        truevi[k] = np.where(data == k)
+
+    vi = ndimage.value_indices(data, ignore_value=0)
+    assert_equal(vi, truevi)
+
+
+def test_value_indices02():
+    "Test input checking"
+    data = np.zeros((5, 4), dtype=np.float32)
+    msg = "Parameter 'arr' must be an integer array"
+    with assert_raises(ValueError, match=msg):
+        ndimage.value_indices(data)
+
+
+def test_value_indices03():
+    "Test different input array shapes, from 1-D to 4-D"
+    for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
+        a = np.array((12*[1]+12*[2]+12*[3]), dtype=np.int32).reshape(shape)
+        trueKeys = np.unique(a)
+        vi = ndimage.value_indices(a)
+        assert_equal(list(vi.keys()), list(trueKeys))
+        for k in trueKeys:
+            trueNdx = np.where(a == k)
+            assert_equal(vi[k], trueNdx)
+
+
+def test_sum01():
+    for type in types:
+        input = np.array([], type)
+        output = ndimage.sum(input)
+        assert_equal(output, 0.0)
+
+
+def test_sum02():
+    for type in types:
+        input = np.zeros([0, 4], type)
+        output = ndimage.sum(input)
+        assert_equal(output, 0.0)
+
+
+def test_sum03():
+    for type in types:
+        input = np.ones([], type)
+        output = ndimage.sum(input)
+        assert_almost_equal(output, 1.0)
+
+
+def test_sum04():
+    for type in types:
+        input = np.array([1, 2], type)
+        output = ndimage.sum(input)
+        assert_almost_equal(output, 3.0)
+
+
+def test_sum05():
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.sum(input)
+        assert_almost_equal(output, 10.0)
+
+
+def test_sum06():
+    labels = np.array([], bool)
+    for type in types:
+        input = np.array([], type)
+        output = ndimage.sum(input, labels=labels)
+        assert_equal(output, 0.0)
+
+
+def test_sum07():
+    labels = np.ones([0, 4], bool)
+    for type in types:
+        input = np.zeros([0, 4], type)
+        output = ndimage.sum(input, labels=labels)
+        assert_equal(output, 0.0)
+
+
+def test_sum08():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([1, 2], type)
+        output = ndimage.sum(input, labels=labels)
+        assert_equal(output, 1.0)
+
+
+def test_sum09():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.sum(input, labels=labels)
+        assert_almost_equal(output, 4.0)
+
+
+def test_sum10():
+    labels = np.array([1, 0], bool)
+    input = np.array([[1, 2], [3, 4]], bool)
+    output = ndimage.sum(input, labels=labels)
+    assert_almost_equal(output, 2.0)
+
+
+def test_sum11():
+    labels = np.array([1, 2], np.int8)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.sum(input, labels=labels,
+                             index=2)
+        assert_almost_equal(output, 6.0)
+
+
+def test_sum12():
+    labels = np.array([[1, 2], [2, 4]], np.int8)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
+        assert_array_almost_equal(output, [4.0, 0.0, 5.0])
+
+
+def test_sum_labels():
+    labels = np.array([[1, 2], [2, 4]], np.int8)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
+        output_labels = ndimage.sum_labels(
+            input, labels=labels, index=[4, 8, 2])
+
+        assert (output_sum == output_labels).all()
+        assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
+
+
+def test_mean01():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.mean(input, labels=labels)
+        assert_almost_equal(output, 2.0)
+
+
+def test_mean02():
+    labels = np.array([1, 0], bool)
+    input = np.array([[1, 2], [3, 4]], bool)
+    output = ndimage.mean(input, labels=labels)
+    assert_almost_equal(output, 1.0)
+
+
+def test_mean03():
+    labels = np.array([1, 2])
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.mean(input, labels=labels,
+                              index=2)
+        assert_almost_equal(output, 3.0)
+
+
+def test_mean04():
+    labels = np.array([[1, 2], [2, 4]], np.int8)
+    with np.errstate(all='ignore'):
+        for type in types:
+            input = np.array([[1, 2], [3, 4]], type)
+            output = ndimage.mean(input, labels=labels,
+                                  index=[4, 8, 2])
+            assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
+            assert_(np.isnan(output[1]))
+
+
+def test_minimum01():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.minimum(input, labels=labels)
+        assert_almost_equal(output, 1.0)
+
+
+def test_minimum02():
+    labels = np.array([1, 0], bool)
+    input = np.array([[2, 2], [2, 4]], bool)
+    output = ndimage.minimum(input, labels=labels)
+    assert_almost_equal(output, 1.0)
+
+
+def test_minimum03():
+    labels = np.array([1, 2])
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.minimum(input, labels=labels,
+                                 index=2)
+        assert_almost_equal(output, 2.0)
+
+
+def test_minimum04():
+    labels = np.array([[1, 2], [2, 3]])
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.minimum(input, labels=labels,
+                                 index=[2, 3, 8])
+        assert_array_almost_equal(output, [2.0, 4.0, 0.0])
+
+
+def test_maximum01():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.maximum(input, labels=labels)
+        assert_almost_equal(output, 3.0)
+
+
+def test_maximum02():
+    labels = np.array([1, 0], bool)
+    input = np.array([[2, 2], [2, 4]], bool)
+    output = ndimage.maximum(input, labels=labels)
+    assert_almost_equal(output, 1.0)
+
+
+def test_maximum03():
+    labels = np.array([1, 2])
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.maximum(input, labels=labels,
+                                 index=2)
+        assert_almost_equal(output, 4.0)
+
+
+def test_maximum04():
+    labels = np.array([[1, 2], [2, 3]])
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.maximum(input, labels=labels,
+                                 index=[2, 3, 8])
+        assert_array_almost_equal(output, [3.0, 4.0, 0.0])
+
+
+def test_maximum05():
+    # Regression test for ticket #501 (Trac)
+    x = np.array([-3, -2, -1])
+    assert_equal(ndimage.maximum(x), -1)
+
+
+def test_median01():
+    a = np.array([[1, 2, 0, 1],
+                  [5, 3, 0, 4],
+                  [0, 0, 0, 7],
+                  [9, 3, 0, 0]])
+    labels = np.array([[1, 1, 0, 2],
+                       [1, 1, 0, 2],
+                       [0, 0, 0, 2],
+                       [3, 3, 0, 0]])
+    output = ndimage.median(a, labels=labels, index=[1, 2, 3])
+    assert_array_almost_equal(output, [2.5, 4.0, 6.0])
+
+
+def test_median02():
+    a = np.array([[1, 2, 0, 1],
+                  [5, 3, 0, 4],
+                  [0, 0, 0, 7],
+                  [9, 3, 0, 0]])
+    output = ndimage.median(a)
+    assert_almost_equal(output, 1.0)
+
+
+def test_median03():
+    a = np.array([[1, 2, 0, 1],
+                  [5, 3, 0, 4],
+                  [0, 0, 0, 7],
+                  [9, 3, 0, 0]])
+    labels = np.array([[1, 1, 0, 2],
+                       [1, 1, 0, 2],
+                       [0, 0, 0, 2],
+                       [3, 3, 0, 0]])
+    output = ndimage.median(a, labels=labels)
+    assert_almost_equal(output, 3.0)
+
+
+def test_median_gh12836_bool():
+    # test boolean addition fix on example from gh-12836
+    a = np.asarray([1, 1], dtype=bool)
+    output = ndimage.median(a, labels=np.ones((2,)), index=[1])
+    assert_array_almost_equal(output, [1.0])
+
+
+def test_median_no_int_overflow():
+    # test integer overflow fix on example from gh-12836
+    a = np.asarray([65, 70], dtype=np.int8)
+    output = ndimage.median(a, labels=np.ones((2,)), index=[1])
+    assert_array_almost_equal(output, [67.5])
+
+
+def test_variance01():
+    with np.errstate(all='ignore'):
+        for type in types:
+            input = np.array([], type)
+            with suppress_warnings() as sup:
+                sup.filter(RuntimeWarning, "Mean of empty slice")
+                output = ndimage.variance(input)
+            assert_(np.isnan(output))
+
+
+def test_variance02():
+    for type in types:
+        input = np.array([1], type)
+        output = ndimage.variance(input)
+        assert_almost_equal(output, 0.0)
+
+
+def test_variance03():
+    for type in types:
+        input = np.array([1, 3], type)
+        output = ndimage.variance(input)
+        assert_almost_equal(output, 1.0)
+
+
+def test_variance04():
+    input = np.array([1, 0], bool)
+    output = ndimage.variance(input)
+    assert_almost_equal(output, 0.25)
+
+
+def test_variance05():
+    labels = [2, 2, 3]
+    for type in types:
+        input = np.array([1, 3, 8], type)
+        output = ndimage.variance(input, labels, 2)
+        assert_almost_equal(output, 1.0)
+
+
+def test_variance06():
+    labels = [2, 2, 3, 3, 4]
+    with np.errstate(all='ignore'):
+        for type in types:
+            input = np.array([1, 3, 8, 10, 8], type)
+            output = ndimage.variance(input, labels, [2, 3, 4])
+            assert_array_almost_equal(output, [1.0, 1.0, 0.0])
+
+
+def test_standard_deviation01():
+    with np.errstate(all='ignore'):
+        for type in types:
+            input = np.array([], type)
+            with suppress_warnings() as sup:
+                sup.filter(RuntimeWarning, "Mean of empty slice")
+                output = ndimage.standard_deviation(input)
+            assert_(np.isnan(output))
+
+
+def test_standard_deviation02():
+    for type in types:
+        input = np.array([1], type)
+        output = ndimage.standard_deviation(input)
+        assert_almost_equal(output, 0.0)
+
+
+def test_standard_deviation03():
+    for type in types:
+        input = np.array([1, 3], type)
+        output = ndimage.standard_deviation(input)
+        assert_almost_equal(output, np.sqrt(1.0))
+
+
+def test_standard_deviation04():
+    input = np.array([1, 0], bool)
+    output = ndimage.standard_deviation(input)
+    assert_almost_equal(output, 0.5)
+
+
+def test_standard_deviation05():
+    labels = [2, 2, 3]
+    for type in types:
+        input = np.array([1, 3, 8], type)
+        output = ndimage.standard_deviation(input, labels, 2)
+        assert_almost_equal(output, 1.0)
+
+
+def test_standard_deviation06():
+    labels = [2, 2, 3, 3, 4]
+    with np.errstate(all='ignore'):
+        for type in types:
+            input = np.array([1, 3, 8, 10, 8], type)
+            output = ndimage.standard_deviation(input, labels, [2, 3, 4])
+            assert_array_almost_equal(output, [1.0, 1.0, 0.0])
+
+
+def test_standard_deviation07():
+    labels = [1]
+    with np.errstate(all='ignore'):
+        for type in types:
+            input = np.array([-0.00619519], type)
+            output = ndimage.standard_deviation(input, labels, [1])
+            assert_array_almost_equal(output, [0])
+
+
+def test_minimum_position01():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.minimum_position(input, labels=labels)
+        assert_equal(output, (0, 0))
+
+
+def test_minimum_position02():
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 0, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.minimum_position(input)
+        assert_equal(output, (1, 2))
+
+
+def test_minimum_position03():
+    input = np.array([[5, 4, 2, 5],
+                      [3, 7, 0, 2],
+                      [1, 5, 1, 1]], bool)
+    output = ndimage.minimum_position(input)
+    assert_equal(output, (1, 2))
+
+
+def test_minimum_position04():
+    input = np.array([[5, 4, 2, 5],
+                      [3, 7, 1, 2],
+                      [1, 5, 1, 1]], bool)
+    output = ndimage.minimum_position(input)
+    assert_equal(output, (0, 0))
+
+
+def test_minimum_position05():
+    labels = [1, 2, 0, 4]
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 0, 2],
+                          [1, 5, 2, 3]], type)
+        output = ndimage.minimum_position(input, labels)
+        assert_equal(output, (2, 0))
+
+
+def test_minimum_position06():
+    labels = [1, 2, 3, 4]
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 0, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.minimum_position(input, labels, 2)
+        assert_equal(output, (0, 1))
+
+
+def test_minimum_position07():
+    labels = [1, 2, 3, 4]
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 0, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.minimum_position(input, labels,
+                                          [2, 3])
+        assert_equal(output[0], (0, 1))
+        assert_equal(output[1], (1, 2))
+
+
+def test_maximum_position01():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output = ndimage.maximum_position(input,
+                                          labels=labels)
+        assert_equal(output, (1, 0))
+
+
+def test_maximum_position02():
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 8, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.maximum_position(input)
+        assert_equal(output, (1, 2))
+
+
+def test_maximum_position03():
+    input = np.array([[5, 4, 2, 5],
+                      [3, 7, 8, 2],
+                      [1, 5, 1, 1]], bool)
+    output = ndimage.maximum_position(input)
+    assert_equal(output, (0, 0))
+
+
+def test_maximum_position04():
+    labels = [1, 2, 0, 4]
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 8, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.maximum_position(input, labels)
+        assert_equal(output, (1, 1))
+
+
+def test_maximum_position05():
+    labels = [1, 2, 0, 4]
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 8, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.maximum_position(input, labels, 1)
+        assert_equal(output, (0, 0))
+
+
+def test_maximum_position06():
+    labels = [1, 2, 0, 4]
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 8, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.maximum_position(input, labels,
+                                          [1, 2])
+        assert_equal(output[0], (0, 0))
+        assert_equal(output[1], (1, 1))
+
+
+def test_maximum_position07():
+    # Test float labels
+    labels = np.array([1.0, 2.5, 0.0, 4.5])
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 8, 2],
+                          [1, 5, 1, 1]], type)
+        output = ndimage.maximum_position(input, labels,
+                                          [1.0, 4.5])
+        assert_equal(output[0], (0, 0))
+        assert_equal(output[1], (0, 3))
+
+
+def test_extrema01():
+    labels = np.array([1, 0], bool)
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output1 = ndimage.extrema(input, labels=labels)
+        output2 = ndimage.minimum(input, labels=labels)
+        output3 = ndimage.maximum(input, labels=labels)
+        output4 = ndimage.minimum_position(input,
+                                           labels=labels)
+        output5 = ndimage.maximum_position(input,
+                                           labels=labels)
+        assert_equal(output1, (output2, output3, output4, output5))
+
+
+def test_extrema02():
+    labels = np.array([1, 2])
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output1 = ndimage.extrema(input, labels=labels,
+                                  index=2)
+        output2 = ndimage.minimum(input, labels=labels,
+                                  index=2)
+        output3 = ndimage.maximum(input, labels=labels,
+                                  index=2)
+        output4 = ndimage.minimum_position(input,
+                                           labels=labels, index=2)
+        output5 = ndimage.maximum_position(input,
+                                           labels=labels, index=2)
+        assert_equal(output1, (output2, output3, output4, output5))
+
+
+def test_extrema03():
+    labels = np.array([[1, 2], [2, 3]])
+    for type in types:
+        input = np.array([[1, 2], [3, 4]], type)
+        output1 = ndimage.extrema(input, labels=labels,
+                                  index=[2, 3, 8])
+        output2 = ndimage.minimum(input, labels=labels,
+                                  index=[2, 3, 8])
+        output3 = ndimage.maximum(input, labels=labels,
+                                  index=[2, 3, 8])
+        output4 = ndimage.minimum_position(input,
+                                           labels=labels, index=[2, 3, 8])
+        output5 = ndimage.maximum_position(input,
+                                           labels=labels, index=[2, 3, 8])
+        assert_array_almost_equal(output1[0], output2)
+        assert_array_almost_equal(output1[1], output3)
+        assert_array_almost_equal(output1[2], output4)
+        assert_array_almost_equal(output1[3], output5)
+
+
+def test_extrema04():
+    labels = [1, 2, 0, 4]
+    for type in types:
+        input = np.array([[5, 4, 2, 5],
+                          [3, 7, 8, 2],
+                          [1, 5, 1, 1]], type)
+        output1 = ndimage.extrema(input, labels, [1, 2])
+        output2 = ndimage.minimum(input, labels, [1, 2])
+        output3 = ndimage.maximum(input, labels, [1, 2])
+        output4 = ndimage.minimum_position(input, labels,
+                                           [1, 2])
+        output5 = ndimage.maximum_position(input, labels,
+                                           [1, 2])
+        assert_array_almost_equal(output1[0], output2)
+        assert_array_almost_equal(output1[1], output3)
+        assert_array_almost_equal(output1[2], output4)
+        assert_array_almost_equal(output1[3], output5)
+
+
+def test_center_of_mass01():
+    expected = [0.0, 0.0]
+    for type in types:
+        input = np.array([[1, 0], [0, 0]], type)
+        output = ndimage.center_of_mass(input)
+        assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass02():
+    expected = [1, 0]
+    for type in types:
+        input = np.array([[0, 0], [1, 0]], type)
+        output = ndimage.center_of_mass(input)
+        assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass03():
+    expected = [0, 1]
+    for type in types:
+        input = np.array([[0, 1], [0, 0]], type)
+        output = ndimage.center_of_mass(input)
+        assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass04():
+    expected = [1, 1]
+    for type in types:
+        input = np.array([[0, 0], [0, 1]], type)
+        output = ndimage.center_of_mass(input)
+        assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass05():
+    expected = [0.5, 0.5]
+    for type in types:
+        input = np.array([[1, 1], [1, 1]], type)
+        output = ndimage.center_of_mass(input)
+        assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass06():
+    expected = [0.5, 0.5]
+    input = np.array([[1, 2], [3, 1]], bool)
+    output = ndimage.center_of_mass(input)
+    assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass07():
+    labels = [1, 0]
+    expected = [0.5, 0.0]
+    input = np.array([[1, 2], [3, 1]], bool)
+    output = ndimage.center_of_mass(input, labels)
+    assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass08():
+    labels = [1, 2]
+    expected = [0.5, 1.0]
+    input = np.array([[5, 2], [3, 1]], bool)
+    output = ndimage.center_of_mass(input, labels, 2)
+    assert_array_almost_equal(output, expected)
+
+
+def test_center_of_mass09():
+    labels = [1, 2]
+    expected = [(0.5, 0.0), (0.5, 1.0)]
+    input = np.array([[1, 2], [1, 1]], bool)
+    output = ndimage.center_of_mass(input, labels, [1, 2])
+    assert_array_almost_equal(output, expected)
+
+
+def test_histogram01():
+    expected = np.ones(10)
+    input = np.arange(10)
+    output = ndimage.histogram(input, 0, 10, 10)
+    assert_array_almost_equal(output, expected)
+
+
+def test_histogram02():
+    labels = [1, 1, 1, 1, 2, 2, 2, 2]
+    expected = [0, 2, 0, 1, 1]
+    input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
+    output = ndimage.histogram(input, 0, 4, 5, labels, 1)
+    assert_array_almost_equal(output, expected)
+
+
+def test_histogram03():
+    labels = [1, 0, 1, 1, 2, 2, 2, 2]
+    expected1 = [0, 1, 0, 1, 1]
+    expected2 = [0, 0, 0, 3, 0]
+    input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
+    output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
+
+    assert_array_almost_equal(output[0], expected1)
+    assert_array_almost_equal(output[1], expected2)
+
+
+def test_stat_funcs_2d():
+    a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
+    lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
+
+    mean = ndimage.mean(a, labels=lbl, index=[1, 2])
+    assert_array_equal(mean, [7.0, 4.0])
+
+    var = ndimage.variance(a, labels=lbl, index=[1, 2])
+    assert_array_equal(var, [2.5, 1.0])
+
+    std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
+    assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
+
+    med = ndimage.median(a, labels=lbl, index=[1, 2])
+    assert_array_equal(med, [7.0, 4.0])
+
+    min = ndimage.minimum(a, labels=lbl, index=[1, 2])
+    assert_array_equal(min, [5, 3])
+
+    max = ndimage.maximum(a, labels=lbl, index=[1, 2])
+    assert_array_equal(max, [9, 5])
+
+
+class TestWatershedIft:
+
+    def test_watershed_ift01(self):
+        data = np.array([[0, 0, 0, 0, 0, 0, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 0, 0, 0, 0, 0, 0],
+                         [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+        markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], np.int8)
+        out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
+                                                              [1, 1, 1],
+                                                              [1, 1, 1]])
+        expected = [[-1, -1, -1, -1, -1, -1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1]]
+        assert_array_almost_equal(out, expected)
+
+    def test_watershed_ift02(self):
+        data = np.array([[0, 0, 0, 0, 0, 0, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 0, 0, 0, 0, 0, 0],
+                         [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+        markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], np.int8)
+        out = ndimage.watershed_ift(data, markers)
+        expected = [[-1, -1, -1, -1, -1, -1, -1],
+                    [-1, -1, 1, 1, 1, -1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, -1, 1, 1, 1, -1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1]]
+        assert_array_almost_equal(out, expected)
+
+    def test_watershed_ift03(self):
+        data = np.array([[0, 0, 0, 0, 0, 0, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+        markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 2, 0, 3, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, -1]], np.int8)
+        out = ndimage.watershed_ift(data, markers)
+        expected = [[-1, -1, -1, -1, -1, -1, -1],
+                    [-1, -1, 2, -1, 3, -1, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, -1, 2, -1, 3, -1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1]]
+        assert_array_almost_equal(out, expected)
+
+    def test_watershed_ift04(self):
+        data = np.array([[0, 0, 0, 0, 0, 0, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+        markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 2, 0, 3, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, -1]],
+                           np.int8)
+        out = ndimage.watershed_ift(data, markers,
+                                    structure=[[1, 1, 1],
+                                               [1, 1, 1],
+                                               [1, 1, 1]])
+        expected = [[-1, -1, -1, -1, -1, -1, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, 2, 2, 3, 3, 3, -1],
+                    [-1, -1, -1, -1, -1, -1, -1]]
+        assert_array_almost_equal(out, expected)
+
+    def test_watershed_ift05(self):
+        data = np.array([[0, 0, 0, 0, 0, 0, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 0, 1, 0, 1, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+        markers = np.array([[0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 3, 0, 2, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, -1]],
+                           np.int8)
+        out = ndimage.watershed_ift(data, markers,
+                                    structure=[[1, 1, 1],
+                                               [1, 1, 1],
+                                               [1, 1, 1]])
+        expected = [[-1, -1, -1, -1, -1, -1, -1],
+                    [-1, 3, 3, 2, 2, 2, -1],
+                    [-1, 3, 3, 2, 2, 2, -1],
+                    [-1, 3, 3, 2, 2, 2, -1],
+                    [-1, 3, 3, 2, 2, 2, -1],
+                    [-1, 3, 3, 2, 2, 2, -1],
+                    [-1, -1, -1, -1, -1, -1, -1]]
+        assert_array_almost_equal(out, expected)
+
+    def test_watershed_ift06(self):
+        data = np.array([[0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 0, 0, 0, 1, 0],
+                         [0, 1, 1, 1, 1, 1, 0],
+                         [0, 0, 0, 0, 0, 0, 0],
+                         [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+        markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], np.int8)
+        out = ndimage.watershed_ift(data, markers,
+                                    structure=[[1, 1, 1],
+                                               [1, 1, 1],
+                                               [1, 1, 1]])
+        expected = [[-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1]]
+        assert_array_almost_equal(out, expected)
+
+    def test_watershed_ift07(self):
+        shape = (7, 6)
+        data = np.zeros(shape, dtype=np.uint8)
+        data = data.transpose()
+        data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
+                              [0, 1, 0, 0, 0, 1, 0],
+                              [0, 1, 0, 0, 0, 1, 0],
+                              [0, 1, 1, 1, 1, 1, 0],
+                              [0, 0, 0, 0, 0, 0, 0],
+                              [0, 0, 0, 0, 0, 0, 0]], np.uint8)
+        markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], np.int8)
+        out = np.zeros(shape, dtype=np.int16)
+        out = out.transpose()
+        ndimage.watershed_ift(data, markers,
+                              structure=[[1, 1, 1],
+                                         [1, 1, 1],
+                                         [1, 1, 1]],
+                              output=out)
+        expected = [[-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, 1, 1, 1, 1, 1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1],
+                    [-1, -1, -1, -1, -1, -1, -1]]
+        assert_array_almost_equal(out, expected)
+
+    def test_watershed_ift08(self):
+        # Test cost larger than uint8. See gh-10069.
+        shape = (2, 2)
+        data = np.array([[256, 0],
+                         [0, 0]], np.uint16)
+        markers = np.array([[1, 0],
+                            [0, 0]], np.int8)
+        out = ndimage.watershed_ift(data, markers)
+        expected = [[1, 1],
+                    [1, 1]]
+        assert_array_almost_equal(out, expected)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_morphology.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_morphology.py
new file mode 100644
index 00000000..6090cfa3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_morphology.py
@@ -0,0 +1,2371 @@
+import numpy
+from numpy.testing import (assert_, assert_equal, assert_array_equal,
+                           assert_array_almost_equal)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy import ndimage
+
+from . import types
+
+
+class TestNdimageMorphology:
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_bf01(self, dtype):
+        # brute force (bf) distance transform
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_bf(data, 'euclidean',
+                                                return_indices=True)
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                    [0, 0, 1, 2, 4, 2, 1, 0, 0],
+                    [0, 0, 1, 4, 8, 4, 1, 0, 0],
+                    [0, 0, 1, 2, 4, 2, 1, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+        assert_array_almost_equal(out * out, expected)
+
+        expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                     [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                     [2, 2, 2, 2, 1, 2, 2, 2, 2],
+                     [3, 3, 3, 2, 1, 2, 3, 3, 3],
+                     [4, 4, 4, 4, 6, 4, 4, 4, 4],
+                     [5, 5, 6, 6, 7, 6, 6, 5, 5],
+                     [6, 6, 6, 7, 7, 7, 6, 6, 6],
+                     [7, 7, 7, 7, 7, 7, 7, 7, 7],
+                     [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+                    [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 2, 4, 6, 6, 7, 8],
+                     [0, 1, 1, 2, 4, 6, 7, 7, 8],
+                     [0, 1, 1, 1, 6, 7, 7, 7, 8],
+                     [0, 1, 2, 2, 4, 6, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+        assert_array_almost_equal(ft, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_bf02(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_bf(data, 'cityblock',
+                                                return_indices=True)
+
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                    [0, 0, 1, 2, 2, 2, 1, 0, 0],
+                    [0, 0, 1, 2, 3, 2, 1, 0, 0],
+                    [0, 0, 1, 2, 2, 2, 1, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+        assert_array_almost_equal(out, expected)
+
+        expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                     [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                     [2, 2, 2, 2, 1, 2, 2, 2, 2],
+                     [3, 3, 3, 3, 1, 3, 3, 3, 3],
+                     [4, 4, 4, 4, 7, 4, 4, 4, 4],
+                     [5, 5, 6, 7, 7, 7, 6, 5, 5],
+                     [6, 6, 6, 7, 7, 7, 6, 6, 6],
+                     [7, 7, 7, 7, 7, 7, 7, 7, 7],
+                     [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+                    [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 2, 4, 6, 6, 7, 8],
+                     [0, 1, 1, 1, 4, 7, 7, 7, 8],
+                     [0, 1, 1, 1, 4, 7, 7, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+        assert_array_almost_equal(expected, ft)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_bf03(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_bf(data, 'chessboard',
+                                                return_indices=True)
+
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                    [0, 0, 1, 1, 2, 1, 1, 0, 0],
+                    [0, 0, 1, 2, 2, 2, 1, 0, 0],
+                    [0, 0, 1, 1, 2, 1, 1, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+        assert_array_almost_equal(out, expected)
+
+        expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                     [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                     [2, 2, 2, 2, 1, 2, 2, 2, 2],
+                     [3, 3, 4, 2, 2, 2, 4, 3, 3],
+                     [4, 4, 5, 6, 6, 6, 5, 4, 4],
+                     [5, 5, 6, 6, 7, 6, 6, 5, 5],
+                     [6, 6, 6, 7, 7, 7, 6, 6, 6],
+                     [7, 7, 7, 7, 7, 7, 7, 7, 7],
+                     [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+                    [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 2, 5, 6, 6, 7, 8],
+                     [0, 1, 1, 2, 6, 6, 7, 7, 8],
+                     [0, 1, 1, 2, 6, 7, 7, 7, 8],
+                     [0, 1, 2, 2, 6, 6, 7, 7, 8],
+                     [0, 1, 2, 4, 5, 6, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+        assert_array_almost_equal(ft, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_bf04(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        tdt, tft = ndimage.distance_transform_bf(data, return_indices=1)
+        dts = []
+        fts = []
+        dt = numpy.zeros(data.shape, dtype=numpy.float64)
+        ndimage.distance_transform_bf(data, distances=dt)
+        dts.append(dt)
+        ft = ndimage.distance_transform_bf(
+            data, return_distances=False, return_indices=1)
+        fts.append(ft)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        ndimage.distance_transform_bf(
+            data, return_distances=False, return_indices=True, indices=ft)
+        fts.append(ft)
+        dt, ft = ndimage.distance_transform_bf(
+            data, return_indices=1)
+        dts.append(dt)
+        fts.append(ft)
+        dt = numpy.zeros(data.shape, dtype=numpy.float64)
+        ft = ndimage.distance_transform_bf(
+            data, distances=dt, return_indices=True)
+        dts.append(dt)
+        fts.append(ft)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        dt = ndimage.distance_transform_bf(
+            data, return_indices=True, indices=ft)
+        dts.append(dt)
+        fts.append(ft)
+        dt = numpy.zeros(data.shape, dtype=numpy.float64)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        ndimage.distance_transform_bf(
+            data, distances=dt, return_indices=True, indices=ft)
+        dts.append(dt)
+        fts.append(ft)
+        for dt in dts:
+            assert_array_almost_equal(tdt, dt)
+        for ft in fts:
+            assert_array_almost_equal(tft, ft)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_bf05(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_bf(
+            data, 'euclidean', return_indices=True, sampling=[2, 2])
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 4, 4, 4, 0, 0, 0],
+                    [0, 0, 4, 8, 16, 8, 4, 0, 0],
+                    [0, 0, 4, 16, 32, 16, 4, 0, 0],
+                    [0, 0, 4, 8, 16, 8, 4, 0, 0],
+                    [0, 0, 0, 4, 4, 4, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+        assert_array_almost_equal(out * out, expected)
+
+        expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                     [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                     [2, 2, 2, 2, 1, 2, 2, 2, 2],
+                     [3, 3, 3, 2, 1, 2, 3, 3, 3],
+                     [4, 4, 4, 4, 6, 4, 4, 4, 4],
+                     [5, 5, 6, 6, 7, 6, 6, 5, 5],
+                     [6, 6, 6, 7, 7, 7, 6, 6, 6],
+                     [7, 7, 7, 7, 7, 7, 7, 7, 7],
+                     [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+                    [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 2, 4, 6, 6, 7, 8],
+                     [0, 1, 1, 2, 4, 6, 7, 7, 8],
+                     [0, 1, 1, 1, 6, 7, 7, 7, 8],
+                     [0, 1, 2, 2, 4, 6, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+        assert_array_almost_equal(ft, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_bf06(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_bf(
+            data, 'euclidean', return_indices=True, sampling=[2, 1])
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 4, 1, 0, 0, 0],
+                    [0, 0, 1, 4, 8, 4, 1, 0, 0],
+                    [0, 0, 1, 4, 9, 4, 1, 0, 0],
+                    [0, 0, 1, 4, 8, 4, 1, 0, 0],
+                    [0, 0, 0, 1, 4, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0, 0]]
+        assert_array_almost_equal(out * out, expected)
+
+        expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                     [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                     [2, 2, 2, 2, 2, 2, 2, 2, 2],
+                     [3, 3, 3, 3, 2, 3, 3, 3, 3],
+                     [4, 4, 4, 4, 4, 4, 4, 4, 4],
+                     [5, 5, 5, 5, 6, 5, 5, 5, 5],
+                     [6, 6, 6, 6, 7, 6, 6, 6, 6],
+                     [7, 7, 7, 7, 7, 7, 7, 7, 7],
+                     [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+                    [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 2, 6, 6, 6, 7, 8],
+                     [0, 1, 1, 1, 6, 7, 7, 7, 8],
+                     [0, 1, 1, 1, 7, 7, 7, 7, 8],
+                     [0, 1, 1, 1, 6, 7, 7, 7, 8],
+                     [0, 1, 2, 2, 4, 6, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+        assert_array_almost_equal(ft, expected)
+
+    def test_distance_transform_bf07(self):
+        # test input validation per discussion on PR #13302
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+        with assert_raises(RuntimeError):
+            ndimage.distance_transform_bf(
+                data, return_distances=False, return_indices=False
+            )
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_cdt01(self, dtype):
+        # chamfer type distance (cdt) transform
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_cdt(
+            data, 'cityblock', return_indices=True)
+        bf = ndimage.distance_transform_bf(data, 'cityblock')
+        assert_array_almost_equal(bf, out)
+
+        expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                     [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                     [2, 2, 2, 1, 1, 1, 2, 2, 2],
+                     [3, 3, 2, 1, 1, 1, 2, 3, 3],
+                     [4, 4, 4, 4, 1, 4, 4, 4, 4],
+                     [5, 5, 5, 5, 7, 7, 6, 5, 5],
+                     [6, 6, 6, 6, 7, 7, 6, 6, 6],
+                     [7, 7, 7, 7, 7, 7, 7, 7, 7],
+                     [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+                    [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 1, 1, 4, 7, 7, 7, 8],
+                     [0, 1, 1, 1, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 2, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+        assert_array_almost_equal(ft, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_cdt02(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_cdt(data, 'chessboard',
+                                                 return_indices=True)
+        bf = ndimage.distance_transform_bf(data, 'chessboard')
+        assert_array_almost_equal(bf, out)
+
+        expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                     [1, 1, 1, 1, 1, 1, 1, 1, 1],
+                     [2, 2, 2, 1, 1, 1, 2, 2, 2],
+                     [3, 3, 2, 2, 1, 2, 2, 3, 3],
+                     [4, 4, 3, 2, 2, 2, 3, 4, 4],
+                     [5, 5, 4, 6, 7, 6, 4, 5, 5],
+                     [6, 6, 6, 6, 7, 7, 6, 6, 6],
+                     [7, 7, 7, 7, 7, 7, 7, 7, 7],
+                     [8, 8, 8, 8, 8, 8, 8, 8, 8]],
+                    [[0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 2, 3, 4, 6, 7, 8],
+                     [0, 1, 1, 2, 2, 6, 6, 7, 8],
+                     [0, 1, 1, 1, 2, 6, 7, 7, 8],
+                     [0, 1, 1, 2, 6, 6, 7, 7, 8],
+                     [0, 1, 2, 2, 5, 6, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8],
+                     [0, 1, 2, 3, 4, 5, 6, 7, 8]]]
+        assert_array_almost_equal(ft, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_cdt03(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        tdt, tft = ndimage.distance_transform_cdt(data, return_indices=True)
+        dts = []
+        fts = []
+        dt = numpy.zeros(data.shape, dtype=numpy.int32)
+        ndimage.distance_transform_cdt(data, distances=dt)
+        dts.append(dt)
+        ft = ndimage.distance_transform_cdt(
+            data, return_distances=False, return_indices=True)
+        fts.append(ft)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        ndimage.distance_transform_cdt(
+            data, return_distances=False, return_indices=True, indices=ft)
+        fts.append(ft)
+        dt, ft = ndimage.distance_transform_cdt(
+            data, return_indices=True)
+        dts.append(dt)
+        fts.append(ft)
+        dt = numpy.zeros(data.shape, dtype=numpy.int32)
+        ft = ndimage.distance_transform_cdt(
+            data, distances=dt, return_indices=True)
+        dts.append(dt)
+        fts.append(ft)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        dt = ndimage.distance_transform_cdt(
+            data, return_indices=True, indices=ft)
+        dts.append(dt)
+        fts.append(ft)
+        dt = numpy.zeros(data.shape, dtype=numpy.int32)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        ndimage.distance_transform_cdt(data, distances=dt,
+                                       return_indices=True, indices=ft)
+        dts.append(dt)
+        fts.append(ft)
+        for dt in dts:
+            assert_array_almost_equal(tdt, dt)
+        for ft in fts:
+            assert_array_almost_equal(tft, ft)
+
+    def test_distance_transform_cdt04(self):
+        # test input validation per discussion on PR #13302
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+        indices_out = numpy.zeros((data.ndim,) + data.shape, dtype=numpy.int32)
+        with assert_raises(RuntimeError):
+            ndimage.distance_transform_bf(
+                data,
+                return_distances=True,
+                return_indices=False,
+                indices=indices_out
+            )
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_edt01(self, dtype):
+        # euclidean distance transform (edt)
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out, ft = ndimage.distance_transform_edt(data, return_indices=True)
+        bf = ndimage.distance_transform_bf(data, 'euclidean')
+        assert_array_almost_equal(bf, out)
+
+        dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype)
+        dt = dt.astype(numpy.float64)
+        numpy.multiply(dt, dt, dt)
+        dt = numpy.add.reduce(dt, axis=0)
+        numpy.sqrt(dt, dt)
+
+        assert_array_almost_equal(bf, dt)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_edt02(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        tdt, tft = ndimage.distance_transform_edt(data, return_indices=True)
+        dts = []
+        fts = []
+        dt = numpy.zeros(data.shape, dtype=numpy.float64)
+        ndimage.distance_transform_edt(data, distances=dt)
+        dts.append(dt)
+        ft = ndimage.distance_transform_edt(
+            data, return_distances=0, return_indices=True)
+        fts.append(ft)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        ndimage.distance_transform_edt(
+            data, return_distances=False, return_indices=True, indices=ft)
+        fts.append(ft)
+        dt, ft = ndimage.distance_transform_edt(
+            data, return_indices=True)
+        dts.append(dt)
+        fts.append(ft)
+        dt = numpy.zeros(data.shape, dtype=numpy.float64)
+        ft = ndimage.distance_transform_edt(
+            data, distances=dt, return_indices=True)
+        dts.append(dt)
+        fts.append(ft)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        dt = ndimage.distance_transform_edt(
+            data, return_indices=True, indices=ft)
+        dts.append(dt)
+        fts.append(ft)
+        dt = numpy.zeros(data.shape, dtype=numpy.float64)
+        ft = numpy.indices(data.shape, dtype=numpy.int32)
+        ndimage.distance_transform_edt(
+            data, distances=dt, return_indices=True, indices=ft)
+        dts.append(dt)
+        fts.append(ft)
+        for dt in dts:
+            assert_array_almost_equal(tdt, dt)
+        for ft in fts:
+            assert_array_almost_equal(tft, ft)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_edt03(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 2])
+        out = ndimage.distance_transform_edt(data, sampling=[2, 2])
+        assert_array_almost_equal(ref, out)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_distance_transform_edt4(self, dtype):
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        ref = ndimage.distance_transform_bf(data, 'euclidean', sampling=[2, 1])
+        out = ndimage.distance_transform_edt(data, sampling=[2, 1])
+        assert_array_almost_equal(ref, out)
+
+    def test_distance_transform_edt5(self):
+        # Ticket #954 regression test
+        out = ndimage.distance_transform_edt(False)
+        assert_array_almost_equal(out, [0.])
+
+    def test_distance_transform_edt6(self):
+        # test input validation per discussion on PR #13302
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+        distances_out = numpy.zeros(data.shape, dtype=numpy.float64)
+        with assert_raises(RuntimeError):
+            ndimage.distance_transform_bf(
+                data,
+                return_indices=True,
+                return_distances=False,
+                distances=distances_out
+            )
+
+    def test_generate_structure01(self):
+        struct = ndimage.generate_binary_structure(0, 1)
+        assert_array_almost_equal(struct, 1)
+
+    def test_generate_structure02(self):
+        struct = ndimage.generate_binary_structure(1, 1)
+        assert_array_almost_equal(struct, [1, 1, 1])
+
+    def test_generate_structure03(self):
+        struct = ndimage.generate_binary_structure(2, 1)
+        assert_array_almost_equal(struct, [[0, 1, 0],
+                                           [1, 1, 1],
+                                           [0, 1, 0]])
+
+    def test_generate_structure04(self):
+        struct = ndimage.generate_binary_structure(2, 2)
+        assert_array_almost_equal(struct, [[1, 1, 1],
+                                           [1, 1, 1],
+                                           [1, 1, 1]])
+
+    def test_iterate_structure01(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        out = ndimage.iterate_structure(struct, 2)
+        assert_array_almost_equal(out, [[0, 0, 1, 0, 0],
+                                        [0, 1, 1, 1, 0],
+                                        [1, 1, 1, 1, 1],
+                                        [0, 1, 1, 1, 0],
+                                        [0, 0, 1, 0, 0]])
+
+    def test_iterate_structure02(self):
+        struct = [[0, 1],
+                  [1, 1],
+                  [0, 1]]
+        out = ndimage.iterate_structure(struct, 2)
+        assert_array_almost_equal(out, [[0, 0, 1],
+                                        [0, 1, 1],
+                                        [1, 1, 1],
+                                        [0, 1, 1],
+                                        [0, 0, 1]])
+
+    def test_iterate_structure03(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        out = ndimage.iterate_structure(struct, 2, 1)
+        expected = [[0, 0, 1, 0, 0],
+                    [0, 1, 1, 1, 0],
+                    [1, 1, 1, 1, 1],
+                    [0, 1, 1, 1, 0],
+                    [0, 0, 1, 0, 0]]
+        assert_array_almost_equal(out[0], expected)
+        assert_equal(out[1], [2, 2])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion01(self, dtype):
+        data = numpy.ones([], dtype)
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, 1)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion02(self, dtype):
+        data = numpy.ones([], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, 1)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion03(self, dtype):
+        data = numpy.ones([1], dtype)
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, [0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion04(self, dtype):
+        data = numpy.ones([1], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, [1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion05(self, dtype):
+        data = numpy.ones([3], dtype)
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, [0, 1, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion06(self, dtype):
+        data = numpy.ones([3], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, [1, 1, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion07(self, dtype):
+        data = numpy.ones([5], dtype)
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, [0, 1, 1, 1, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion08(self, dtype):
+        data = numpy.ones([5], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, [1, 1, 1, 1, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion09(self, dtype):
+        data = numpy.ones([5], dtype)
+        data[2] = 0
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, [0, 0, 0, 0, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion10(self, dtype):
+        data = numpy.ones([5], dtype)
+        data[2] = 0
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, [1, 0, 0, 0, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion11(self, dtype):
+        data = numpy.ones([5], dtype)
+        data[2] = 0
+        struct = [1, 0, 1]
+        out = ndimage.binary_erosion(data, struct, border_value=1)
+        assert_array_almost_equal(out, [1, 0, 1, 0, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion12(self, dtype):
+        data = numpy.ones([5], dtype)
+        data[2] = 0
+        struct = [1, 0, 1]
+        out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
+        assert_array_almost_equal(out, [0, 1, 0, 1, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion13(self, dtype):
+        data = numpy.ones([5], dtype)
+        data[2] = 0
+        struct = [1, 0, 1]
+        out = ndimage.binary_erosion(data, struct, border_value=1, origin=1)
+        assert_array_almost_equal(out, [1, 1, 0, 1, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion14(self, dtype):
+        data = numpy.ones([5], dtype)
+        data[2] = 0
+        struct = [1, 1]
+        out = ndimage.binary_erosion(data, struct, border_value=1)
+        assert_array_almost_equal(out, [1, 1, 0, 0, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion15(self, dtype):
+        data = numpy.ones([5], dtype)
+        data[2] = 0
+        struct = [1, 1]
+        out = ndimage.binary_erosion(data, struct, border_value=1, origin=-1)
+        assert_array_almost_equal(out, [1, 0, 0, 1, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion16(self, dtype):
+        data = numpy.ones([1, 1], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, [[1]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion17(self, dtype):
+        data = numpy.ones([1, 1], dtype)
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, [[0]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion18(self, dtype):
+        data = numpy.ones([1, 3], dtype)
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, [[0, 0, 0]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion19(self, dtype):
+        data = numpy.ones([1, 3], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, [[1, 1, 1]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion20(self, dtype):
+        data = numpy.ones([3, 3], dtype)
+        out = ndimage.binary_erosion(data)
+        assert_array_almost_equal(out, [[0, 0, 0],
+                                        [0, 1, 0],
+                                        [0, 0, 0]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion21(self, dtype):
+        data = numpy.ones([3, 3], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, [[1, 1, 1],
+                                        [1, 1, 1],
+                                        [1, 1, 1]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion22(self, dtype):
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 1, 1, 0, 0, 0],
+                    [0, 0, 1, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 0, 0, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_erosion(data, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion23(self, dtype):
+        struct = ndimage.generate_binary_structure(2, 2)
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 0, 0, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_erosion(data, struct, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion24(self, dtype):
+        struct = [[0, 1],
+                  [1, 1]]
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 1, 1, 1],
+                    [0, 0, 0, 1, 1, 1, 0, 0],
+                    [0, 0, 1, 1, 1, 1, 0, 0],
+                    [0, 0, 1, 0, 0, 0, 1, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 0, 0, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_erosion(data, struct, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion25(self, dtype):
+        struct = [[0, 1, 0],
+                  [1, 0, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0, 0],
+                    [0, 0, 1, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 0, 1, 1],
+                            [0, 0, 1, 0, 1, 1, 0, 0],
+                            [0, 1, 0, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 0, 0, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_erosion(data, struct, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_erosion26(self, dtype):
+        struct = [[0, 1, 0],
+                  [1, 0, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 1],
+                    [0, 0, 0, 0, 1, 0, 0, 1],
+                    [0, 0, 1, 0, 0, 0, 0, 0],
+                    [0, 1, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 1]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 0, 1, 1],
+                            [0, 0, 1, 0, 1, 1, 0, 0],
+                            [0, 1, 0, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 0, 0, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_erosion(data, struct, border_value=1,
+                                     origin=(-1, -1))
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion27(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_erosion(data, struct, border_value=1,
+                                     iterations=2)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion28(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], bool)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_erosion(data, struct, border_value=1,
+                               iterations=2, output=out)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion29(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [1, 1, 1, 1, 1, 1, 1],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0]], bool)
+        out = ndimage.binary_erosion(data, struct,
+                                     border_value=1, iterations=3)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion30(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [1, 1, 1, 1, 1, 1, 1],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0]], bool)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_erosion(data, struct, border_value=1,
+                               iterations=3, output=out)
+        assert_array_almost_equal(out, expected)
+
+        # test with output memory overlap
+        ndimage.binary_erosion(data, struct, border_value=1,
+                               iterations=3, output=data)
+        assert_array_almost_equal(data, expected)
+
+    def test_binary_erosion31(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 1, 0, 0, 0, 0],
+                    [0, 1, 1, 1, 0, 0, 0],
+                    [1, 1, 1, 1, 1, 0, 1],
+                    [0, 1, 1, 1, 0, 0, 0],
+                    [0, 0, 1, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 1, 0, 0, 0, 1]]
+        data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [1, 1, 1, 1, 1, 1, 1],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0]], bool)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_erosion(data, struct, border_value=1,
+                               iterations=1, output=out, origin=(-1, -1))
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion32(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_erosion(data, struct,
+                                     border_value=1, iterations=2)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion33(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 1, 1],
+                    [0, 0, 0, 0, 0, 0, 1],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        mask = [[1, 1, 1, 1, 1, 0, 0],
+                [1, 1, 1, 1, 1, 1, 0],
+                [1, 1, 1, 1, 1, 1, 1],
+                [1, 1, 1, 1, 1, 1, 1],
+                [1, 1, 1, 1, 1, 1, 1],
+                [1, 1, 1, 1, 1, 1, 1],
+                [1, 1, 1, 1, 1, 1, 1]]
+        data = numpy.array([[0, 0, 0, 0, 0, 1, 1],
+                            [0, 0, 0, 1, 0, 0, 1],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_erosion(data, struct,
+                                     border_value=1, mask=mask, iterations=-1)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion34(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 1, 1, 1, 1, 1, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        mask = [[0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0],
+                [0, 0, 1, 0, 1, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_erosion(data, struct,
+                                     border_value=1, mask=mask)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion35(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        mask = [[0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0],
+                [0, 0, 1, 0, 1, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [1, 1, 1, 1, 1, 1, 1],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0]], bool)
+        tmp = [[0, 0, 1, 0, 0, 0, 0],
+               [0, 1, 1, 1, 0, 0, 0],
+               [1, 1, 1, 1, 1, 0, 1],
+               [0, 1, 1, 1, 0, 0, 0],
+               [0, 0, 1, 0, 0, 0, 0],
+               [0, 0, 0, 0, 0, 0, 0],
+               [0, 0, 1, 0, 0, 0, 1]]
+        expected = numpy.logical_and(tmp, mask)
+        tmp = numpy.logical_and(data, numpy.logical_not(mask))
+        expected = numpy.logical_or(expected, tmp)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_erosion(data, struct, border_value=1,
+                               iterations=1, output=out,
+                               origin=(-1, -1), mask=mask)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion36(self):
+        struct = [[0, 1, 0],
+                  [1, 0, 1],
+                  [0, 1, 0]]
+        mask = [[0, 0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0, 0],
+                [0, 0, 1, 0, 1, 0, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0, 0],
+                [0, 0, 1, 1, 1, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0, 0]]
+        tmp = [[0, 0, 0, 0, 0, 0, 0, 0],
+               [0, 0, 0, 0, 0, 0, 0, 1],
+               [0, 0, 0, 0, 1, 0, 0, 1],
+               [0, 0, 1, 0, 0, 0, 0, 0],
+               [0, 1, 0, 0, 1, 0, 0, 0],
+               [0, 0, 0, 0, 0, 0, 0, 0],
+               [0, 0, 0, 0, 0, 0, 0, 0],
+               [0, 0, 0, 0, 0, 0, 0, 1]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 1, 1],
+                            [0, 0, 1, 1, 1, 0, 1, 1],
+                            [0, 0, 1, 0, 1, 1, 0, 0],
+                            [0, 1, 0, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 0, 0, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]])
+        expected = numpy.logical_and(tmp, mask)
+        tmp = numpy.logical_and(data, numpy.logical_not(mask))
+        expected = numpy.logical_or(expected, tmp)
+        out = ndimage.binary_erosion(data, struct, mask=mask,
+                                     border_value=1, origin=(-1, -1))
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion37(self):
+        a = numpy.array([[1, 0, 1],
+                         [0, 1, 0],
+                         [1, 0, 1]], dtype=bool)
+        b = numpy.zeros_like(a)
+        out = ndimage.binary_erosion(a, structure=a, output=b, iterations=0,
+                                     border_value=True, brute_force=True)
+        assert_(out is b)
+        assert_array_equal(
+            ndimage.binary_erosion(a, structure=a, iterations=0,
+                                   border_value=True),
+            b)
+
+    def test_binary_erosion38(self):
+        data = numpy.array([[1, 0, 1],
+                           [0, 1, 0],
+                           [1, 0, 1]], dtype=bool)
+        iterations = 2.0
+        with assert_raises(TypeError):
+            _ = ndimage.binary_erosion(data, iterations=iterations)
+
+    def test_binary_erosion39(self):
+        iterations = numpy.int32(3)
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [1, 1, 1, 1, 1, 1, 1],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0]], bool)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_erosion(data, struct, border_value=1,
+                               iterations=iterations, output=out)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_erosion40(self):
+        iterations = numpy.int64(3)
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [1, 1, 1, 1, 1, 1, 1],
+                            [0, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 1, 0, 0, 0]], bool)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_erosion(data, struct, border_value=1,
+                               iterations=iterations, output=out)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation01(self, dtype):
+        data = numpy.ones([], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, 1)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation02(self, dtype):
+        data = numpy.zeros([], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, 0)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation03(self, dtype):
+        data = numpy.ones([1], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation04(self, dtype):
+        data = numpy.zeros([1], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation05(self, dtype):
+        data = numpy.ones([3], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [1, 1, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation06(self, dtype):
+        data = numpy.zeros([3], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [0, 0, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation07(self, dtype):
+        data = numpy.zeros([3], dtype)
+        data[1] = 1
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [1, 1, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation08(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        data[3] = 1
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [1, 1, 1, 1, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation09(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [1, 1, 1, 0, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation10(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        out = ndimage.binary_dilation(data, origin=-1)
+        assert_array_almost_equal(out, [0, 1, 1, 1, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation11(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        out = ndimage.binary_dilation(data, origin=1)
+        assert_array_almost_equal(out, [1, 1, 0, 0, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation12(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        struct = [1, 0, 1]
+        out = ndimage.binary_dilation(data, struct)
+        assert_array_almost_equal(out, [1, 0, 1, 0, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation13(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        struct = [1, 0, 1]
+        out = ndimage.binary_dilation(data, struct, border_value=1)
+        assert_array_almost_equal(out, [1, 0, 1, 0, 1])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation14(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        struct = [1, 0, 1]
+        out = ndimage.binary_dilation(data, struct, origin=-1)
+        assert_array_almost_equal(out, [0, 1, 0, 1, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation15(self, dtype):
+        data = numpy.zeros([5], dtype)
+        data[1] = 1
+        struct = [1, 0, 1]
+        out = ndimage.binary_dilation(data, struct,
+                                      origin=-1, border_value=1)
+        assert_array_almost_equal(out, [1, 1, 0, 1, 0])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation16(self, dtype):
+        data = numpy.ones([1, 1], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [[1]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation17(self, dtype):
+        data = numpy.zeros([1, 1], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [[0]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation18(self, dtype):
+        data = numpy.ones([1, 3], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [[1, 1, 1]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation19(self, dtype):
+        data = numpy.ones([3, 3], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [[1, 1, 1],
+                                        [1, 1, 1],
+                                        [1, 1, 1]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation20(self, dtype):
+        data = numpy.zeros([3, 3], dtype)
+        data[1, 1] = 1
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, [[0, 1, 0],
+                                        [1, 1, 1],
+                                        [0, 1, 0]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation21(self, dtype):
+        struct = ndimage.generate_binary_structure(2, 2)
+        data = numpy.zeros([3, 3], dtype)
+        data[1, 1] = 1
+        out = ndimage.binary_dilation(data, struct)
+        assert_array_almost_equal(out, [[1, 1, 1],
+                                        [1, 1, 1],
+                                        [1, 1, 1]])
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation22(self, dtype):
+        expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+                    [1, 1, 1, 0, 0, 0, 0, 0],
+                    [0, 1, 0, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 1, 0],
+                    [0, 0, 1, 1, 1, 1, 0, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 0, 1, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation23(self, dtype):
+        expected = [[1, 1, 1, 1, 1, 1, 1, 1],
+                    [1, 1, 1, 0, 0, 0, 0, 1],
+                    [1, 1, 0, 0, 0, 1, 0, 1],
+                    [1, 0, 0, 1, 1, 1, 1, 1],
+                    [1, 0, 1, 1, 1, 1, 0, 1],
+                    [1, 1, 1, 1, 1, 1, 1, 1],
+                    [1, 0, 1, 0, 0, 1, 0, 1],
+                    [1, 1, 1, 1, 1, 1, 1, 1]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation24(self, dtype):
+        expected = [[1, 1, 0, 0, 0, 0, 0, 0],
+                    [1, 0, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 1, 1, 1, 1, 0, 0],
+                    [0, 1, 1, 1, 1, 0, 0, 0],
+                    [1, 1, 1, 1, 1, 1, 0, 0],
+                    [0, 1, 0, 0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data, origin=(1, 1))
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation25(self, dtype):
+        expected = [[1, 1, 0, 0, 0, 0, 1, 1],
+                    [1, 0, 0, 0, 1, 0, 1, 1],
+                    [0, 0, 1, 1, 1, 1, 1, 1],
+                    [0, 1, 1, 1, 1, 0, 1, 1],
+                    [1, 1, 1, 1, 1, 1, 1, 1],
+                    [0, 1, 0, 0, 1, 0, 1, 1],
+                    [1, 1, 1, 1, 1, 1, 1, 1],
+                    [1, 1, 1, 1, 1, 1, 1, 1]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data, origin=(1, 1), border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation26(self, dtype):
+        struct = ndimage.generate_binary_structure(2, 2)
+        expected = [[1, 1, 1, 0, 0, 0, 0, 0],
+                    [1, 1, 1, 0, 0, 0, 0, 0],
+                    [1, 1, 1, 0, 1, 1, 1, 0],
+                    [0, 0, 1, 1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data, struct)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation27(self, dtype):
+        struct = [[0, 1],
+                  [1, 1]]
+        expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+                    [1, 1, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 1, 1, 1, 0, 0],
+                    [0, 0, 1, 1, 1, 1, 0, 0],
+                    [0, 1, 1, 0, 1, 1, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data, struct)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation28(self, dtype):
+        expected = [[1, 1, 1, 1],
+                    [1, 0, 0, 1],
+                    [1, 0, 0, 1],
+                    [1, 1, 1, 1]]
+        data = numpy.array([[0, 0, 0, 0],
+                            [0, 0, 0, 0],
+                            [0, 0, 0, 0],
+                            [0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_dilation29(self):
+        struct = [[0, 1],
+                  [1, 1]]
+        expected = [[0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0],
+                    [0, 0, 1, 1, 0],
+                    [0, 1, 1, 1, 0],
+                    [0, 0, 0, 0, 0]]
+
+        data = numpy.array([[0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0],
+                            [0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_dilation(data, struct, iterations=2)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_dilation30(self):
+        struct = [[0, 1],
+                  [1, 1]]
+        expected = [[0, 0, 0, 0, 0],
+                    [0, 0, 0, 1, 0],
+                    [0, 0, 1, 1, 0],
+                    [0, 1, 1, 1, 0],
+                    [0, 0, 0, 0, 0]]
+
+        data = numpy.array([[0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0],
+                            [0, 0, 0, 0, 0]], bool)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_dilation(data, struct, iterations=2, output=out)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_dilation31(self):
+        struct = [[0, 1],
+                  [1, 1]]
+        expected = [[0, 0, 0, 1, 0],
+                    [0, 0, 1, 1, 0],
+                    [0, 1, 1, 1, 0],
+                    [1, 1, 1, 1, 0],
+                    [0, 0, 0, 0, 0]]
+
+        data = numpy.array([[0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0],
+                            [0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_dilation(data, struct, iterations=3)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_dilation32(self):
+        struct = [[0, 1],
+                  [1, 1]]
+        expected = [[0, 0, 0, 1, 0],
+                    [0, 0, 1, 1, 0],
+                    [0, 1, 1, 1, 0],
+                    [1, 1, 1, 1, 0],
+                    [0, 0, 0, 0, 0]]
+
+        data = numpy.array([[0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 0],
+                            [0, 0, 0, 0, 0]], bool)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_dilation(data, struct, iterations=3, output=out)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_dilation33(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 0, 0, 0],
+                                [0, 1, 1, 0, 1, 1, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 1, 0],
+                            [0, 0, 0, 0, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 1, 1, 0, 1, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+
+        out = ndimage.binary_dilation(data, struct, iterations=-1,
+                                      mask=mask, border_value=0)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_dilation34(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+                    [0, 1, 1, 0, 0, 0, 0, 0],
+                    [0, 0, 1, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 1, 0, 0, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        data = numpy.zeros(mask.shape, bool)
+        out = ndimage.binary_dilation(data, struct, iterations=-1,
+                                      mask=mask, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_dilation35(self, dtype):
+        tmp = [[1, 1, 0, 0, 0, 0, 1, 1],
+               [1, 0, 0, 0, 1, 0, 1, 1],
+               [0, 0, 1, 1, 1, 1, 1, 1],
+               [0, 1, 1, 1, 1, 0, 1, 1],
+               [1, 1, 1, 1, 1, 1, 1, 1],
+               [0, 1, 0, 0, 1, 0, 1, 1],
+               [1, 1, 1, 1, 1, 1, 1, 1],
+               [1, 1, 1, 1, 1, 1, 1, 1]]
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]])
+        mask = [[0, 0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 1, 1, 1, 1, 0, 0],
+                [0, 0, 1, 1, 1, 1, 0, 0],
+                [0, 0, 1, 1, 1, 1, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0, 0, 0]]
+        expected = numpy.logical_and(tmp, mask)
+        tmp = numpy.logical_and(data, numpy.logical_not(mask))
+        expected = numpy.logical_or(expected, tmp)
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_dilation(data, mask=mask,
+                                      origin=(1, 1), border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_propagation01(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 0, 0, 0],
+                                [0, 1, 1, 0, 1, 1, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 1, 0],
+                            [0, 0, 0, 0, 1, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 0, 0, 0],
+                            [0, 1, 1, 0, 1, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+
+        out = ndimage.binary_propagation(data, struct,
+                                         mask=mask, border_value=0)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_propagation02(self):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+                    [0, 1, 1, 0, 0, 0, 0, 0],
+                    [0, 0, 1, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [0, 1, 1, 0, 0, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        data = numpy.zeros(mask.shape, bool)
+        out = ndimage.binary_propagation(data, struct,
+                                         mask=mask, border_value=1)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_opening01(self, dtype):
+        expected = [[0, 1, 0, 0, 0, 0, 0, 0],
+                    [1, 1, 1, 0, 0, 0, 0, 0],
+                    [0, 1, 0, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 0, 1, 1, 1, 0],
+                    [0, 0, 1, 0, 0, 1, 0, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 0, 1, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [1, 1, 1, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 0, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_opening(data)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_opening02(self, dtype):
+        struct = ndimage.generate_binary_structure(2, 2)
+        expected = [[1, 1, 1, 0, 0, 0, 0, 0],
+                    [1, 1, 1, 0, 0, 0, 0, 0],
+                    [1, 1, 1, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 1, 1, 1, 0, 0, 0, 0],
+                    [0, 1, 1, 1, 0, 0, 0, 0],
+                    [0, 1, 1, 1, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
+                            [1, 1, 1, 0, 0, 0, 0, 0],
+                            [1, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 1, 0, 1, 1, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_opening(data, struct)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_closing01(self, dtype):
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 1, 1, 0, 0, 0, 0, 0],
+                    [0, 1, 1, 1, 0, 1, 0, 0],
+                    [0, 0, 1, 1, 1, 1, 1, 0],
+                    [0, 0, 1, 1, 1, 1, 0, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 0, 1, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
+                            [1, 1, 1, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 0, 1, 0, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_closing(data)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_binary_closing02(self, dtype):
+        struct = ndimage.generate_binary_structure(2, 2)
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 1, 1, 0, 0, 0, 0, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 1, 1, 1, 1, 1, 1, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
+                            [1, 1, 1, 0, 0, 0, 0, 0],
+                            [1, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 1, 0, 1, 1, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_closing(data, struct)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_fill_holes01(self):
+        expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 1, 1, 1, 1, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_fill_holes(data)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_fill_holes02(self):
+        expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 0, 1, 1, 0, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 1, 1, 1, 1, 0, 0],
+                                [0, 0, 0, 1, 1, 0, 0, 0],
+                                [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 1, 0, 0, 1, 0, 0],
+                            [0, 0, 0, 1, 1, 0, 0, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_fill_holes(data)
+        assert_array_almost_equal(out, expected)
+
+    def test_binary_fill_holes03(self):
+        expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                                [0, 0, 1, 0, 0, 0, 0, 0],
+                                [0, 1, 1, 1, 0, 1, 1, 1],
+                                [0, 1, 1, 1, 0, 1, 1, 1],
+                                [0, 1, 1, 1, 0, 1, 1, 1],
+                                [0, 0, 1, 0, 0, 1, 1, 1],
+                                [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
+                            [0, 0, 1, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 1, 0, 1, 1, 1],
+                            [0, 1, 0, 1, 0, 1, 0, 1],
+                            [0, 1, 0, 1, 0, 1, 0, 1],
+                            [0, 0, 1, 0, 0, 1, 1, 1],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], bool)
+        out = ndimage.binary_fill_holes(data)
+        assert_array_almost_equal(out, expected)
+
+    def test_grey_erosion01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        output = ndimage.grey_erosion(array, footprint=footprint)
+        assert_array_almost_equal([[2, 2, 1, 1, 1],
+                                   [2, 3, 1, 3, 1],
+                                   [5, 5, 3, 3, 1]], output)
+
+    def test_grey_erosion01_overlap(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        ndimage.grey_erosion(array, footprint=footprint, output=array)
+        assert_array_almost_equal([[2, 2, 1, 1, 1],
+                                   [2, 3, 1, 3, 1],
+                                   [5, 5, 3, 3, 1]], array)
+
+    def test_grey_erosion02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        output = ndimage.grey_erosion(array, footprint=footprint,
+                                      structure=structure)
+        assert_array_almost_equal([[2, 2, 1, 1, 1],
+                                   [2, 3, 1, 3, 1],
+                                   [5, 5, 3, 3, 1]], output)
+
+    def test_grey_erosion03(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[1, 1, 1], [1, 1, 1]]
+        output = ndimage.grey_erosion(array, footprint=footprint,
+                                      structure=structure)
+        assert_array_almost_equal([[1, 1, 0, 0, 0],
+                                   [1, 2, 0, 2, 0],
+                                   [4, 4, 2, 2, 0]], output)
+
+    def test_grey_dilation01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[0, 1, 1], [1, 0, 1]]
+        output = ndimage.grey_dilation(array, footprint=footprint)
+        assert_array_almost_equal([[7, 7, 9, 9, 5],
+                                   [7, 9, 8, 9, 7],
+                                   [8, 8, 8, 7, 7]], output)
+
+    def test_grey_dilation02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[0, 1, 1], [1, 0, 1]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        output = ndimage.grey_dilation(array, footprint=footprint,
+                                       structure=structure)
+        assert_array_almost_equal([[7, 7, 9, 9, 5],
+                                   [7, 9, 8, 9, 7],
+                                   [8, 8, 8, 7, 7]], output)
+
+    def test_grey_dilation03(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[0, 1, 1], [1, 0, 1]]
+        structure = [[1, 1, 1], [1, 1, 1]]
+        output = ndimage.grey_dilation(array, footprint=footprint,
+                                       structure=structure)
+        assert_array_almost_equal([[8, 8, 10, 10, 6],
+                                   [8, 10, 9, 10, 8],
+                                   [9, 9, 9, 8, 8]], output)
+
+    def test_grey_opening01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        tmp = ndimage.grey_erosion(array, footprint=footprint)
+        expected = ndimage.grey_dilation(tmp, footprint=footprint)
+        output = ndimage.grey_opening(array, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+
+    def test_grey_opening02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp = ndimage.grey_erosion(array, footprint=footprint,
+                                   structure=structure)
+        expected = ndimage.grey_dilation(tmp, footprint=footprint,
+                                         structure=structure)
+        output = ndimage.grey_opening(array, footprint=footprint,
+                                      structure=structure)
+        assert_array_almost_equal(expected, output)
+
+    def test_grey_closing01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        tmp = ndimage.grey_dilation(array, footprint=footprint)
+        expected = ndimage.grey_erosion(tmp, footprint=footprint)
+        output = ndimage.grey_closing(array, footprint=footprint)
+        assert_array_almost_equal(expected, output)
+
+    def test_grey_closing02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp = ndimage.grey_dilation(array, footprint=footprint,
+                                    structure=structure)
+        expected = ndimage.grey_erosion(tmp, footprint=footprint,
+                                        structure=structure)
+        output = ndimage.grey_closing(array, footprint=footprint,
+                                      structure=structure)
+        assert_array_almost_equal(expected, output)
+
+    def test_morphological_gradient01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+                                     structure=structure)
+        tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+                                    structure=structure)
+        expected = tmp1 - tmp2
+        output = numpy.zeros(array.shape, array.dtype)
+        ndimage.morphological_gradient(array, footprint=footprint,
+                                       structure=structure, output=output)
+        assert_array_almost_equal(expected, output)
+
+    def test_morphological_gradient02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+                                     structure=structure)
+        tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+                                    structure=structure)
+        expected = tmp1 - tmp2
+        output = ndimage.morphological_gradient(array, footprint=footprint,
+                                                structure=structure)
+        assert_array_almost_equal(expected, output)
+
+    def test_morphological_laplace01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+                                     structure=structure)
+        tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+                                    structure=structure)
+        expected = tmp1 + tmp2 - 2 * array
+        output = numpy.zeros(array.shape, array.dtype)
+        ndimage.morphological_laplace(array, footprint=footprint,
+                                      structure=structure, output=output)
+        assert_array_almost_equal(expected, output)
+
+    def test_morphological_laplace02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp1 = ndimage.grey_dilation(array, footprint=footprint,
+                                     structure=structure)
+        tmp2 = ndimage.grey_erosion(array, footprint=footprint,
+                                    structure=structure)
+        expected = tmp1 + tmp2 - 2 * array
+        output = ndimage.morphological_laplace(array, footprint=footprint,
+                                               structure=structure)
+        assert_array_almost_equal(expected, output)
+
+    def test_white_tophat01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp = ndimage.grey_opening(array, footprint=footprint,
+                                   structure=structure)
+        expected = array - tmp
+        output = numpy.zeros(array.shape, array.dtype)
+        ndimage.white_tophat(array, footprint=footprint,
+                             structure=structure, output=output)
+        assert_array_almost_equal(expected, output)
+
+    def test_white_tophat02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp = ndimage.grey_opening(array, footprint=footprint,
+                                   structure=structure)
+        expected = array - tmp
+        output = ndimage.white_tophat(array, footprint=footprint,
+                                      structure=structure)
+        assert_array_almost_equal(expected, output)
+
+    def test_white_tophat03(self):
+        array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 1, 1, 1, 0, 1, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
+        structure = numpy.ones((3, 3), dtype=numpy.bool_)
+        expected = numpy.array([[0, 1, 1, 0, 0, 0, 0],
+                                [1, 0, 0, 1, 1, 1, 0],
+                                [1, 0, 0, 1, 1, 1, 0],
+                                [0, 1, 1, 0, 0, 0, 1],
+                                [0, 1, 1, 0, 1, 0, 1],
+                                [0, 1, 1, 0, 0, 0, 1],
+                                [0, 0, 0, 1, 1, 1, 1]], dtype=numpy.bool_)
+
+        output = ndimage.white_tophat(array, structure=structure)
+        assert_array_equal(expected, output)
+
+    def test_white_tophat04(self):
+        array = numpy.eye(5, dtype=numpy.bool_)
+        structure = numpy.ones((3, 3), dtype=numpy.bool_)
+
+        # Check that type mismatch is properly handled
+        output = numpy.empty_like(array, dtype=numpy.float64)
+        ndimage.white_tophat(array, structure=structure, output=output)
+
+    def test_black_tophat01(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp = ndimage.grey_closing(array, footprint=footprint,
+                                   structure=structure)
+        expected = tmp - array
+        output = numpy.zeros(array.shape, array.dtype)
+        ndimage.black_tophat(array, footprint=footprint,
+                             structure=structure, output=output)
+        assert_array_almost_equal(expected, output)
+
+    def test_black_tophat02(self):
+        array = numpy.array([[3, 2, 5, 1, 4],
+                             [7, 6, 9, 3, 5],
+                             [5, 8, 3, 7, 1]])
+        footprint = [[1, 0, 1], [1, 1, 0]]
+        structure = [[0, 0, 0], [0, 0, 0]]
+        tmp = ndimage.grey_closing(array, footprint=footprint,
+                                   structure=structure)
+        expected = tmp - array
+        output = ndimage.black_tophat(array, footprint=footprint,
+                                      structure=structure)
+        assert_array_almost_equal(expected, output)
+
+    def test_black_tophat03(self):
+        array = numpy.array([[1, 0, 0, 0, 0, 0, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 1, 1, 1, 0, 1, 0],
+                             [0, 1, 1, 1, 1, 1, 0],
+                             [0, 0, 0, 0, 0, 0, 1]], dtype=numpy.bool_)
+        structure = numpy.ones((3, 3), dtype=numpy.bool_)
+        expected = numpy.array([[0, 1, 1, 1, 1, 1, 1],
+                                [1, 0, 0, 0, 0, 0, 1],
+                                [1, 0, 0, 0, 0, 0, 1],
+                                [1, 0, 0, 0, 0, 0, 1],
+                                [1, 0, 0, 0, 1, 0, 1],
+                                [1, 0, 0, 0, 0, 0, 1],
+                                [1, 1, 1, 1, 1, 1, 0]], dtype=numpy.bool_)
+
+        output = ndimage.black_tophat(array, structure=structure)
+        assert_array_equal(expected, output)
+
+    def test_black_tophat04(self):
+        array = numpy.eye(5, dtype=numpy.bool_)
+        structure = numpy.ones((3, 3), dtype=numpy.bool_)
+
+        # Check that type mismatch is properly handled
+        output = numpy.empty_like(array, dtype=numpy.float64)
+        ndimage.black_tophat(array, structure=structure, output=output)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_hit_or_miss01(self, dtype):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0],
+                    [0, 1, 0, 0, 0],
+                    [0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 1, 0, 0, 0],
+                            [1, 1, 1, 0, 0],
+                            [0, 1, 0, 1, 1],
+                            [0, 0, 1, 1, 1],
+                            [0, 1, 1, 1, 0],
+                            [0, 1, 1, 1, 1],
+                            [0, 1, 1, 1, 1],
+                            [0, 0, 0, 0, 0]], dtype)
+        out = numpy.zeros(data.shape, bool)
+        ndimage.binary_hit_or_miss(data, struct, output=out)
+        assert_array_almost_equal(expected, out)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_hit_or_miss02(self, dtype):
+        struct = [[0, 1, 0],
+                  [1, 1, 1],
+                  [0, 1, 0]]
+        expected = [[0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 1, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
+                            [1, 1, 1, 0, 0, 1, 0, 0],
+                            [0, 1, 0, 1, 1, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_hit_or_miss(data, struct)
+        assert_array_almost_equal(expected, out)
+
+    @pytest.mark.parametrize('dtype', types)
+    def test_hit_or_miss03(self, dtype):
+        struct1 = [[0, 0, 0],
+                   [1, 1, 1],
+                   [0, 0, 0]]
+        struct2 = [[1, 1, 1],
+                   [0, 0, 0],
+                   [1, 1, 1]]
+        expected = [[0, 0, 0, 0, 0, 1, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0],
+                    [0, 0, 1, 0, 0, 0, 0, 0],
+                    [0, 0, 0, 0, 0, 0, 0, 0]]
+        data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
+                            [1, 1, 1, 0, 0, 0, 0, 0],
+                            [0, 1, 0, 1, 1, 1, 1, 0],
+                            [0, 0, 1, 1, 1, 1, 1, 0],
+                            [0, 1, 1, 1, 0, 1, 1, 0],
+                            [0, 0, 0, 0, 1, 1, 1, 0],
+                            [0, 1, 1, 1, 1, 1, 1, 0],
+                            [0, 0, 0, 0, 0, 0, 0, 0]], dtype)
+        out = ndimage.binary_hit_or_miss(data, struct1, struct2)
+        assert_array_almost_equal(expected, out)
+
+
+class TestDilateFix:
+
+    def setup_method(self):
+        # dilation related setup
+        self.array = numpy.array([[0, 0, 0, 0, 0],
+                                  [0, 0, 0, 0, 0],
+                                  [0, 0, 0, 1, 0],
+                                  [0, 0, 1, 1, 0],
+                                  [0, 0, 0, 0, 0]], dtype=numpy.uint8)
+
+        self.sq3x3 = numpy.ones((3, 3))
+        dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3)
+        self.dilated3x3 = dilated3x3.view(numpy.uint8)
+
+    def test_dilation_square_structure(self):
+        result = ndimage.grey_dilation(self.array, structure=self.sq3x3)
+        # +1 accounts for difference between grey and binary dilation
+        assert_array_almost_equal(result, self.dilated3x3 + 1)
+
+    def test_dilation_scalar_size(self):
+        result = ndimage.grey_dilation(self.array, size=3)
+        assert_array_almost_equal(result, self.dilated3x3)
+
+
+class TestBinaryOpeningClosing:
+
+    def setup_method(self):
+        a = numpy.zeros((5, 5), dtype=bool)
+        a[1:4, 1:4] = True
+        a[4, 4] = True
+        self.array = a
+        self.sq3x3 = numpy.ones((3, 3))
+        self.opened_old = ndimage.binary_opening(self.array, self.sq3x3,
+                                                 1, None, 0)
+        self.closed_old = ndimage.binary_closing(self.array, self.sq3x3,
+                                                 1, None, 0)
+
+    def test_opening_new_arguments(self):
+        opened_new = ndimage.binary_opening(self.array, self.sq3x3, 1, None,
+                                            0, None, 0, False)
+        assert_array_equal(opened_new, self.opened_old)
+
+    def test_closing_new_arguments(self):
+        closed_new = ndimage.binary_closing(self.array, self.sq3x3, 1, None,
+                                            0, None, 0, False)
+        assert_array_equal(closed_new, self.closed_old)
+
+
+def test_binary_erosion_noninteger_iterations():
+    # regression test for gh-9905, gh-9909: ValueError for
+    # non integer iterations
+    data = numpy.ones([1])
+    assert_raises(TypeError, ndimage.binary_erosion, data, iterations=0.5)
+    assert_raises(TypeError, ndimage.binary_erosion, data, iterations=1.5)
+
+
+def test_binary_dilation_noninteger_iterations():
+    # regression test for gh-9905, gh-9909: ValueError for
+    # non integer iterations
+    data = numpy.ones([1])
+    assert_raises(TypeError, ndimage.binary_dilation, data, iterations=0.5)
+    assert_raises(TypeError, ndimage.binary_dilation, data, iterations=1.5)
+
+
+def test_binary_opening_noninteger_iterations():
+    # regression test for gh-9905, gh-9909: ValueError for
+    # non integer iterations
+    data = numpy.ones([1])
+    assert_raises(TypeError, ndimage.binary_opening, data, iterations=0.5)
+    assert_raises(TypeError, ndimage.binary_opening, data, iterations=1.5)
+
+
+def test_binary_closing_noninteger_iterations():
+    # regression test for gh-9905, gh-9909: ValueError for
+    # non integer iterations
+    data = numpy.ones([1])
+    assert_raises(TypeError, ndimage.binary_closing, data, iterations=0.5)
+    assert_raises(TypeError, ndimage.binary_closing, data, iterations=1.5)
+
+
+def test_binary_closing_noninteger_brute_force_passes_when_true():
+    # regression test for gh-9905, gh-9909: ValueError for
+    # non integer iterations
+    data = numpy.ones([1])
+
+    assert ndimage.binary_erosion(
+        data, iterations=2, brute_force=1.5
+    ) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(1.5))
+    assert ndimage.binary_erosion(
+        data, iterations=2, brute_force=0.0
+    ) == ndimage.binary_erosion(data, iterations=2, brute_force=bool(0.0))
+
+
+@pytest.mark.parametrize(
+    'function',
+    ['binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing'],
+)
+@pytest.mark.parametrize('iterations', [1, 5])
+@pytest.mark.parametrize('brute_force', [False, True])
+def test_binary_input_as_output(function, iterations, brute_force):
+    rstate = numpy.random.RandomState(123)
+    data = rstate.randint(low=0, high=2, size=100).astype(bool)
+    ndi_func = getattr(ndimage, function)
+
+    # input data is not modified
+    data_orig = data.copy()
+    expected = ndi_func(data, brute_force=brute_force, iterations=iterations)
+    assert_array_equal(data, data_orig)
+
+    # data should now contain the expected result
+    ndi_func(data, brute_force=brute_force, iterations=iterations, output=data)
+    assert_array_equal(expected, data)
+
+
+def test_binary_hit_or_miss_input_as_output():
+    rstate = numpy.random.RandomState(123)
+    data = rstate.randint(low=0, high=2, size=100).astype(bool)
+
+    # input data is not modified
+    data_orig = data.copy()
+    expected = ndimage.binary_hit_or_miss(data)
+    assert_array_equal(data, data_orig)
+
+    # data should now contain the expected result
+    ndimage.binary_hit_or_miss(data, output=data)
+    assert_array_equal(expected, data)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_splines.py b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_splines.py
new file mode 100644
index 00000000..514b2160
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/ndimage/tests/test_splines.py
@@ -0,0 +1,65 @@
+"""Tests for spline filtering."""
+import numpy as np
+import pytest
+
+from numpy.testing import assert_almost_equal
+
+from scipy import ndimage
+
+
+def get_spline_knot_values(order):
+    """Knot values to the right of a B-spline's center."""
+    knot_values = {0: [1],
+                   1: [1],
+                   2: [6, 1],
+                   3: [4, 1],
+                   4: [230, 76, 1],
+                   5: [66, 26, 1]}
+
+    return knot_values[order]
+
+
+def make_spline_knot_matrix(n, order, mode='mirror'):
+    """Matrix to invert to find the spline coefficients."""
+    knot_values = get_spline_knot_values(order)
+
+    matrix = np.zeros((n, n))
+    for diag, knot_value in enumerate(knot_values):
+        indices = np.arange(diag, n)
+        if diag == 0:
+            matrix[indices, indices] = knot_value
+        else:
+            matrix[indices, indices - diag] = knot_value
+            matrix[indices - diag, indices] = knot_value
+
+    knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
+
+    if mode == 'mirror':
+        start, step = 1, 1
+    elif mode == 'reflect':
+        start, step = 0, 1
+    elif mode == 'grid-wrap':
+        start, step = -1, -1
+    else:
+        raise ValueError('unsupported mode {}'.format(mode))
+
+    for row in range(len(knot_values) - 1):
+        for idx, knot_value in enumerate(knot_values[row + 1:]):
+            matrix[row, start + step*idx] += knot_value
+            matrix[-row - 1, -start - 1 - step*idx] += knot_value
+
+    return matrix / knot_values_sum
+
+
+@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
+@pytest.mark.parametrize('mode', ['mirror', 'grid-wrap', 'reflect'])
+def test_spline_filter_vs_matrix_solution(order, mode):
+    n = 100
+    eye = np.eye(n, dtype=float)
+    spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order,
+                                                   mode=mode)
+    spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order,
+                                                   mode=mode)
+    matrix = make_spline_knot_matrix(n, order, mode=mode)
+    assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
+    assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/odr/__init__.py
new file mode 100644
index 00000000..a44a8c13
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/odr/__init__.py
@@ -0,0 +1,131 @@
+"""
+=================================================
+Orthogonal distance regression (:mod:`scipy.odr`)
+=================================================
+
+.. currentmodule:: scipy.odr
+
+Package Content
+===============
+
+.. autosummary::
+   :toctree: generated/
+
+   Data          -- The data to fit.
+   RealData      -- Data with weights as actual std. dev.s and/or covariances.
+   Model         -- Stores information about the function to be fit.
+   ODR           -- Gathers all info & manages the main fitting routine.
+   Output        -- Result from the fit.
+   odr           -- Low-level function for ODR.
+
+   OdrWarning    -- Warning about potential problems when running ODR.
+   OdrError      -- Error exception.
+   OdrStop       -- Stop exception.
+
+   polynomial    -- Factory function for a general polynomial model.
+   exponential   -- Exponential model
+   multilinear   -- Arbitrary-dimensional linear model
+   unilinear     -- Univariate linear model
+   quadratic     -- Quadratic model
+
+Usage information
+=================
+
+Introduction
+------------
+
+Why Orthogonal Distance Regression (ODR)? Sometimes one has
+measurement errors in the explanatory (a.k.a., "independent")
+variable(s), not just the response (a.k.a., "dependent") variable(s).
+Ordinary Least Squares (OLS) fitting procedures treat the data for
+explanatory variables as fixed, i.e., not subject to error of any kind.
+Furthermore, OLS procedures require that the response variables be an
+explicit function of the explanatory variables; sometimes making the
+equation explicit is impractical and/or introduces errors.  ODR can
+handle both of these cases with ease, and can even reduce to the OLS
+case if that is sufficient for the problem.
+
+ODRPACK is a FORTRAN-77 library for performing ODR with possibly
+non-linear fitting functions. It uses a modified trust-region
+Levenberg-Marquardt-type algorithm [1]_ to estimate the function
+parameters.  The fitting functions are provided by Python functions
+operating on NumPy arrays. The required derivatives may be provided
+by Python functions as well, or may be estimated numerically. ODRPACK
+can do explicit or implicit ODR fits, or it can do OLS. Input and
+output variables may be multidimensional. Weights can be provided to
+account for different variances of the observations, and even
+covariances between dimensions of the variables.
+
+The `scipy.odr` package offers an object-oriented interface to
+ODRPACK, in addition to the low-level `odr` function.
+
+Additional background information about ODRPACK can be found in the
+`ODRPACK User's Guide
+`_, reading
+which is recommended.
+
+Basic usage
+-----------
+
+1. Define the function you want to fit against.::
+
+       def f(B, x):
+           '''Linear function y = m*x + b'''
+           # B is a vector of the parameters.
+           # x is an array of the current x values.
+           # x is in the same format as the x passed to Data or RealData.
+           #
+           # Return an array in the same format as y passed to Data or RealData.
+           return B[0]*x + B[1]
+
+2. Create a Model.::
+
+       linear = Model(f)
+
+3. Create a Data or RealData instance.::
+
+       mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
+
+   or, when the actual covariances are known::
+
+       mydata = RealData(x, y, sx=sx, sy=sy)
+
+4. Instantiate ODR with your data, model and initial parameter estimate.::
+
+       myodr = ODR(mydata, linear, beta0=[1., 2.])
+
+5. Run the fit.::
+
+       myoutput = myodr.run()
+
+6. Examine output.::
+
+       myoutput.pprint()
+
+
+References
+----------
+.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression,"
+   in "Statistical analysis of measurement error models and
+   applications: proceedings of the AMS-IMS-SIAM joint summer research
+   conference held June 10-16, 1989," Contemporary Mathematics,
+   vol. 112, pg. 186, 1990.
+
+"""
+# version: 0.7
+# author: Robert Kern 
+# date: 2006-09-21
+
+from ._odrpack import *
+from ._models import *
+from . import _add_newdocs
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import models, odrpack
+
+__all__ = [s for s in dir()
+           if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/_add_newdocs.py b/__packaged__/coreml/.python_dependencies/scipy/odr/_add_newdocs.py
new file mode 100644
index 00000000..2cbe1f0a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/odr/_add_newdocs.py
@@ -0,0 +1,30 @@
+from numpy import add_newdoc
+
+add_newdoc('scipy.odr', 'odr',
+    """
+    odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, full_output=0)
+
+    Low-level function for ODR.
+
+    See Also
+    --------
+    ODR : The ODR class gathers all information and coordinates the running of the main fitting routine.
+    Model : The Model class stores information about the function you wish to fit.
+    Data : The data to fit.
+    RealData : Data with weights as actual std. dev.s and/or covariances.
+
+    Notes
+    -----
+    This is a function performing the same operation as the `ODR`,
+    `Model`, and `Data` classes together. The parameters of this
+    function are explained in the class documentation.
+
+    """)
+
+add_newdoc('scipy.odr.__odrpack', '_set_exceptions',
+    """
+    _set_exceptions(odr_error, odr_stop)
+
+    Internal function: set exception classes.
+
+    """)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/_models.py b/__packaged__/coreml/.python_dependencies/scipy/odr/_models.py
new file mode 100644
index 00000000..e0a8d227
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/odr/_models.py
@@ -0,0 +1,315 @@
+""" Collection of Model instances for use with the odrpack fitting package.
+"""
+import numpy as np
+from scipy.odr._odrpack import Model
+
+__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic',
+           'polynomial']
+
+
+def _lin_fcn(B, x):
+    a, b = B[0], B[1:]
+    b.shape = (b.shape[0], 1)
+
+    return a + (x*b).sum(axis=0)
+
+
+def _lin_fjb(B, x):
+    a = np.ones(x.shape[-1], float)
+    res = np.concatenate((a, x.ravel()))
+    res.shape = (B.shape[-1], x.shape[-1])
+    return res
+
+
+def _lin_fjd(B, x):
+    b = B[1:]
+    b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0)
+    b.shape = x.shape
+    return b
+
+
+def _lin_est(data):
+    # Eh. The answer is analytical, so just return all ones.
+    # Don't return zeros since that will interfere with
+    # ODRPACK's auto-scaling procedures.
+
+    if len(data.x.shape) == 2:
+        m = data.x.shape[0]
+    else:
+        m = 1
+
+    return np.ones((m + 1,), float)
+
+
+def _poly_fcn(B, x, powers):
+    a, b = B[0], B[1:]
+    b.shape = (b.shape[0], 1)
+
+    return a + np.sum(b * np.power(x, powers), axis=0)
+
+
+def _poly_fjacb(B, x, powers):
+    res = np.concatenate((np.ones(x.shape[-1], float),
+                          np.power(x, powers).flat))
+    res.shape = (B.shape[-1], x.shape[-1])
+    return res
+
+
+def _poly_fjacd(B, x, powers):
+    b = B[1:]
+    b.shape = (b.shape[0], 1)
+
+    b = b * powers
+
+    return np.sum(b * np.power(x, powers-1), axis=0)
+
+
+def _exp_fcn(B, x):
+    return B[0] + np.exp(B[1] * x)
+
+
+def _exp_fjd(B, x):
+    return B[1] * np.exp(B[1] * x)
+
+
+def _exp_fjb(B, x):
+    res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
+    res.shape = (2, x.shape[-1])
+    return res
+
+
+def _exp_est(data):
+    # Eh.
+    return np.array([1., 1.])
+
+
+class _MultilinearModel(Model):
+    r"""
+    Arbitrary-dimensional linear model
+
+    This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i`
+
+    Examples
+    --------
+    We can calculate orthogonal distance regression with an arbitrary
+    dimensional linear model:
+
+    >>> from scipy import odr
+    >>> import numpy as np
+    >>> x = np.linspace(0.0, 5.0)
+    >>> y = 10.0 + 5.0 * x
+    >>> data = odr.Data(x, y)
+    >>> odr_obj = odr.ODR(data, odr.multilinear)
+    >>> output = odr_obj.run()
+    >>> print(output.beta)
+    [10.  5.]
+
+    """
+
+    def __init__(self):
+        super().__init__(
+            _lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est,
+            meta={'name': 'Arbitrary-dimensional Linear',
+                  'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]',
+                  'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'})
+
+
+multilinear = _MultilinearModel()
+
+
+def polynomial(order):
+    """
+    Factory function for a general polynomial model.
+
+    Parameters
+    ----------
+    order : int or sequence
+        If an integer, it becomes the order of the polynomial to fit. If
+        a sequence of numbers, then these are the explicit powers in the
+        polynomial.
+        A constant term (power 0) is always included, so don't include 0.
+        Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
+
+    Returns
+    -------
+    polynomial : Model instance
+        Model instance.
+
+    Examples
+    --------
+    We can fit an input data using orthogonal distance regression (ODR) with
+    a polynomial model:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy import odr
+    >>> x = np.linspace(0.0, 5.0)
+    >>> y = np.sin(x)
+    >>> poly_model = odr.polynomial(3)  # using third order polynomial model
+    >>> data = odr.Data(x, y)
+    >>> odr_obj = odr.ODR(data, poly_model)
+    >>> output = odr_obj.run()  # running ODR fitting
+    >>> poly = np.poly1d(output.beta[::-1])
+    >>> poly_y = poly(x)
+    >>> plt.plot(x, y, label="input data")
+    >>> plt.plot(x, poly_y, label="polynomial ODR")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+
+    powers = np.asarray(order)
+    if powers.shape == ():
+        # Scalar.
+        powers = np.arange(1, powers + 1)
+
+    powers.shape = (len(powers), 1)
+    len_beta = len(powers) + 1
+
+    def _poly_est(data, len_beta=len_beta):
+        # Eh. Ignore data and return all ones.
+        return np.ones((len_beta,), float)
+
+    return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
+                 estimate=_poly_est, extra_args=(powers,),
+                 meta={'name': 'Sorta-general Polynomial',
+                 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
+                 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' %
+                        (len_beta-1)})
+
+
+class _ExponentialModel(Model):
+    r"""
+    Exponential model
+
+    This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}`
+
+    Examples
+    --------
+    We can calculate orthogonal distance regression with an exponential model:
+
+    >>> from scipy import odr
+    >>> import numpy as np
+    >>> x = np.linspace(0.0, 5.0)
+    >>> y = -10.0 + np.exp(0.5*x)
+    >>> data = odr.Data(x, y)
+    >>> odr_obj = odr.ODR(data, odr.exponential)
+    >>> output = odr_obj.run()
+    >>> print(output.beta)
+    [-10.    0.5]
+
+    """
+
+    def __init__(self):
+        super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
+                         estimate=_exp_est,
+                         meta={'name': 'Exponential',
+                               'equ': 'y= B_0 + exp(B_1 * x)',
+                               'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'})
+
+
+exponential = _ExponentialModel()
+
+
+def _unilin(B, x):
+    return x*B[0] + B[1]
+
+
+def _unilin_fjd(B, x):
+    return np.ones(x.shape, float) * B[0]
+
+
+def _unilin_fjb(B, x):
+    _ret = np.concatenate((x, np.ones(x.shape, float)))
+    _ret.shape = (2,) + x.shape
+
+    return _ret
+
+
+def _unilin_est(data):
+    return (1., 1.)
+
+
+def _quadratic(B, x):
+    return x*(x*B[0] + B[1]) + B[2]
+
+
+def _quad_fjd(B, x):
+    return 2*x*B[0] + B[1]
+
+
+def _quad_fjb(B, x):
+    _ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
+    _ret.shape = (3,) + x.shape
+
+    return _ret
+
+
+def _quad_est(data):
+    return (1.,1.,1.)
+
+
+class _UnilinearModel(Model):
+    r"""
+    Univariate linear model
+
+    This model is defined by :math:`y = \beta_0 x + \beta_1`
+
+    Examples
+    --------
+    We can calculate orthogonal distance regression with an unilinear model:
+
+    >>> from scipy import odr
+    >>> import numpy as np
+    >>> x = np.linspace(0.0, 5.0)
+    >>> y = 1.0 * x + 2.0
+    >>> data = odr.Data(x, y)
+    >>> odr_obj = odr.ODR(data, odr.unilinear)
+    >>> output = odr_obj.run()
+    >>> print(output.beta)
+    [1. 2.]
+
+    """
+
+    def __init__(self):
+        super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
+                         estimate=_unilin_est,
+                         meta={'name': 'Univariate Linear',
+                               'equ': 'y = B_0 * x + B_1',
+                               'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
+
+
+unilinear = _UnilinearModel()
+
+
+class _QuadraticModel(Model):
+    r"""
+    Quadratic model
+
+    This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2`
+
+    Examples
+    --------
+    We can calculate orthogonal distance regression with a quadratic model:
+
+    >>> from scipy import odr
+    >>> import numpy as np
+    >>> x = np.linspace(0.0, 5.0)
+    >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0
+    >>> data = odr.Data(x, y)
+    >>> odr_obj = odr.ODR(data, odr.quadratic)
+    >>> output = odr_obj.run()
+    >>> print(output.beta)
+    [1. 2. 3.]
+
+    """
+
+    def __init__(self):
+        super().__init__(
+            _quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est,
+            meta={'name': 'Quadratic',
+                  'equ': 'y = B_0*x**2 + B_1*x + B_2',
+                  'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
+
+
+quadratic = _QuadraticModel()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/_odrpack.py b/__packaged__/coreml/.python_dependencies/scipy/odr/_odrpack.py
new file mode 100644
index 00000000..1164f77d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/odr/_odrpack.py
@@ -0,0 +1,1142 @@
+"""
+Python wrappers for Orthogonal Distance Regression (ODRPACK).
+
+Notes
+=====
+
+* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an
+  array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
+  NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
+  efficiency and convenience, the input and output arrays of the fitting
+  function (and its Jacobians) are passed to FORTRAN without transposition.
+  Therefore, where the ODRPACK documentation says that the X array is of shape
+  (N, M), it will be passed to the Python function as an array of shape (M, N).
+  If M==1, the 1-D case, then nothing matters; if M>1, then your
+  Python functions will be dealing with arrays that are indexed in reverse of
+  the ODRPACK documentation. No real issue, but watch out for your indexing of
+  the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth
+  observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
+  really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
+  you can always use the transpose() function from SciPy explicitly.
+
+* Examples -- See the accompanying file test/test.py for examples of how to set
+  up fits of your own. Some are taken from the User's Guide; some are from
+  other sources.
+
+* Models -- Some common models are instantiated in the accompanying module
+  models.py . Contributions are welcome.
+
+Credits
+=======
+
+* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
+
+Robert Kern
+robert.kern@gmail.com
+
+"""
+import os
+
+import numpy
+from warnings import warn
+from scipy.odr import __odrpack
+
+__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop',
+           'Data', 'RealData', 'Model', 'Output', 'ODR',
+           'odr_error', 'odr_stop']
+
+odr = __odrpack.odr
+
+
+class OdrWarning(UserWarning):
+    """
+    Warning indicating that the data passed into
+    ODR will cause problems when passed into 'odr'
+    that the user should be aware of.
+    """
+    pass
+
+
+class OdrError(Exception):
+    """
+    Exception indicating an error in fitting.
+
+    This is raised by `~scipy.odr.odr` if an error occurs during fitting.
+    """
+    pass
+
+
+class OdrStop(Exception):
+    """
+    Exception stopping fitting.
+
+    You can raise this exception in your objective function to tell
+    `~scipy.odr.odr` to stop fitting.
+    """
+    pass
+
+
+# Backwards compatibility
+odr_error = OdrError
+odr_stop = OdrStop
+
+__odrpack._set_exceptions(OdrError, OdrStop)
+
+
+def _conv(obj, dtype=None):
+    """ Convert an object to the preferred form for input to the odr routine.
+    """
+
+    if obj is None:
+        return obj
+    else:
+        if dtype is None:
+            obj = numpy.asarray(obj)
+        else:
+            obj = numpy.asarray(obj, dtype)
+        if obj.shape == ():
+            # Scalar.
+            return obj.dtype.type(obj)
+        else:
+            return obj
+
+
+def _report_error(info):
+    """ Interprets the return code of the odr routine.
+
+    Parameters
+    ----------
+    info : int
+        The return code of the odr routine.
+
+    Returns
+    -------
+    problems : list(str)
+        A list of messages about why the odr() routine stopped.
+    """
+
+    stopreason = ('Blank',
+                  'Sum of squares convergence',
+                  'Parameter convergence',
+                  'Both sum of squares and parameter convergence',
+                  'Iteration limit reached')[info % 5]
+
+    if info >= 5:
+        # questionable results or fatal error
+
+        I = (info//10000 % 10,
+             info//1000 % 10,
+             info//100 % 10,
+             info//10 % 10,
+             info % 10)
+        problems = []
+
+        if I[0] == 0:
+            if I[1] != 0:
+                problems.append('Derivatives possibly not correct')
+            if I[2] != 0:
+                problems.append('Error occurred in callback')
+            if I[3] != 0:
+                problems.append('Problem is not full rank at solution')
+            problems.append(stopreason)
+        elif I[0] == 1:
+            if I[1] != 0:
+                problems.append('N < 1')
+            if I[2] != 0:
+                problems.append('M < 1')
+            if I[3] != 0:
+                problems.append('NP < 1 or NP > N')
+            if I[4] != 0:
+                problems.append('NQ < 1')
+        elif I[0] == 2:
+            if I[1] != 0:
+                problems.append('LDY and/or LDX incorrect')
+            if I[2] != 0:
+                problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
+            if I[3] != 0:
+                problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
+            if I[4] != 0:
+                problems.append('LWORK and/or LIWORK too small')
+        elif I[0] == 3:
+            if I[1] != 0:
+                problems.append('STPB and/or STPD incorrect')
+            if I[2] != 0:
+                problems.append('SCLB and/or SCLD incorrect')
+            if I[3] != 0:
+                problems.append('WE incorrect')
+            if I[4] != 0:
+                problems.append('WD incorrect')
+        elif I[0] == 4:
+            problems.append('Error in derivatives')
+        elif I[0] == 5:
+            problems.append('Error occurred in callback')
+        elif I[0] == 6:
+            problems.append('Numerical error detected')
+
+        return problems
+
+    else:
+        return [stopreason]
+
+
+class Data:
+    """
+    The data to fit.
+
+    Parameters
+    ----------
+    x : array_like
+        Observed data for the independent variable of the regression
+    y : array_like, optional
+        If array-like, observed data for the dependent variable of the
+        regression. A scalar input implies that the model to be used on
+        the data is implicit.
+    we : array_like, optional
+        If `we` is a scalar, then that value is used for all data points (and
+        all dimensions of the response variable).
+        If `we` is a rank-1 array of length q (the dimensionality of the
+        response variable), then this vector is the diagonal of the covariant
+        weighting matrix for all data points.
+        If `we` is a rank-1 array of length n (the number of data points), then
+        the i'th element is the weight for the i'th response variable
+        observation (single-dimensional only).
+        If `we` is a rank-2 array of shape (q, q), then this is the full
+        covariant weighting matrix broadcast to each observation.
+        If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
+        diagonal of the covariant weighting matrix for the i'th observation.
+        If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
+        full specification of the covariant weighting matrix for each
+        observation.
+        If the fit is implicit, then only a positive scalar value is used.
+    wd : array_like, optional
+        If `wd` is a scalar, then that value is used for all data points
+        (and all dimensions of the input variable). If `wd` = 0, then the
+        covariant weighting matrix for each observation is set to the identity
+        matrix (so each dimension of each observation has the same weight).
+        If `wd` is a rank-1 array of length m (the dimensionality of the input
+        variable), then this vector is the diagonal of the covariant weighting
+        matrix for all data points.
+        If `wd` is a rank-1 array of length n (the number of data points), then
+        the i'th element is the weight for the ith input variable observation
+        (single-dimensional only).
+        If `wd` is a rank-2 array of shape (m, m), then this is the full
+        covariant weighting matrix broadcast to each observation.
+        If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
+        diagonal of the covariant weighting matrix for the ith observation.
+        If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
+        full specification of the covariant weighting matrix for each
+        observation.
+    fix : array_like of ints, optional
+        The `fix` argument is the same as ifixx in the class ODR. It is an
+        array of integers with the same shape as data.x that determines which
+        input observations are treated as fixed. One can use a sequence of
+        length m (the dimensionality of the input observations) to fix some
+        dimensions for all observations. A value of 0 fixes the observation,
+        a value > 0 makes it free.
+    meta : dict, optional
+        Free-form dictionary for metadata.
+
+    Notes
+    -----
+    Each argument is attached to the member of the instance of the same name.
+    The structures of `x` and `y` are described in the Model class docstring.
+    If `y` is an integer, then the Data instance can only be used to fit with
+    implicit models where the dimensionality of the response is equal to the
+    specified value of `y`.
+
+    The `we` argument weights the effect a deviation in the response variable
+    has on the fit. The `wd` argument weights the effect a deviation in the
+    input variable has on the fit. To handle multidimensional inputs and
+    responses easily, the structure of these arguments has the n'th
+    dimensional axis first. These arguments heavily use the structured
+    arguments feature of ODRPACK to conveniently and flexibly support all
+    options. See the ODRPACK User's Guide for a full explanation of how these
+    weights are used in the algorithm. Basically, a higher value of the weight
+    for a particular data point makes a deviation at that point more
+    detrimental to the fit.
+
+    """
+
+    def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None):
+        self.x = _conv(x)
+
+        if not isinstance(self.x, numpy.ndarray):
+            raise ValueError(("Expected an 'ndarray' of data for 'x', "
+                              "but instead got data of type '{name}'").format(
+                    name=type(self.x).__name__))
+
+        self.y = _conv(y)
+        self.we = _conv(we)
+        self.wd = _conv(wd)
+        self.fix = _conv(fix)
+        self.meta = {} if meta is None else meta
+
+    def set_meta(self, **kwds):
+        """ Update the metadata dictionary with the keywords and data provided
+        by keywords.
+
+        Examples
+        --------
+        ::
+
+            data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay")
+        """
+
+        self.meta.update(kwds)
+
+    def __getattr__(self, attr):
+        """ Dispatch attribute access to the metadata dictionary.
+        """
+        if attr in self.meta:
+            return self.meta[attr]
+        else:
+            raise AttributeError("'%s' not in metadata" % attr)
+
+
+class RealData(Data):
+    """
+    The data, with weightings as actual standard deviations and/or
+    covariances.
+
+    Parameters
+    ----------
+    x : array_like
+        Observed data for the independent variable of the regression
+    y : array_like, optional
+        If array-like, observed data for the dependent variable of the
+        regression. A scalar input implies that the model to be used on
+        the data is implicit.
+    sx : array_like, optional
+        Standard deviations of `x`.
+        `sx` are standard deviations of `x` and are converted to weights by
+        dividing 1.0 by their squares.
+    sy : array_like, optional
+        Standard deviations of `y`.
+        `sy` are standard deviations of `y` and are converted to weights by
+        dividing 1.0 by their squares.
+    covx : array_like, optional
+        Covariance of `x`
+        `covx` is an array of covariance matrices of `x` and are converted to
+        weights by performing a matrix inversion on each observation's
+        covariance matrix.
+    covy : array_like, optional
+        Covariance of `y`
+        `covy` is an array of covariance matrices and are converted to
+        weights by performing a matrix inversion on each observation's
+        covariance matrix.
+    fix : array_like, optional
+        The argument and member fix is the same as Data.fix and ODR.ifixx:
+        It is an array of integers with the same shape as `x` that
+        determines which input observations are treated as fixed. One can
+        use a sequence of length m (the dimensionality of the input
+        observations) to fix some dimensions for all observations. A value
+        of 0 fixes the observation, a value > 0 makes it free.
+    meta : dict, optional
+        Free-form dictionary for metadata.
+
+    Notes
+    -----
+    The weights `wd` and `we` are computed from provided values as follows:
+
+    `sx` and `sy` are converted to weights by dividing 1.0 by their squares.
+    For example, ``wd = 1./numpy.power(`sx`, 2)``.
+
+    `covx` and `covy` are arrays of covariance matrices and are converted to
+    weights by performing a matrix inversion on each observation's covariance
+    matrix. For example, ``we[i] = numpy.linalg.inv(covy[i])``.
+
+    These arguments follow the same structured argument conventions as wd and
+    we only restricted by their natures: `sx` and `sy` can't be rank-3, but
+    `covx` and `covy` can be.
+
+    Only set *either* `sx` or `covx` (not both). Setting both will raise an
+    exception. Same with `sy` and `covy`.
+
+    """
+
+    def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None,
+                 fix=None, meta=None):
+        if (sx is not None) and (covx is not None):
+            raise ValueError("cannot set both sx and covx")
+        if (sy is not None) and (covy is not None):
+            raise ValueError("cannot set both sy and covy")
+
+        # Set flags for __getattr__
+        self._ga_flags = {}
+        if sx is not None:
+            self._ga_flags['wd'] = 'sx'
+        else:
+            self._ga_flags['wd'] = 'covx'
+        if sy is not None:
+            self._ga_flags['we'] = 'sy'
+        else:
+            self._ga_flags['we'] = 'covy'
+
+        self.x = _conv(x)
+
+        if not isinstance(self.x, numpy.ndarray):
+            raise ValueError(("Expected an 'ndarray' of data for 'x', "
+                              "but instead got data of type '{name}'").format(
+                    name=type(self.x).__name__))
+
+        self.y = _conv(y)
+        self.sx = _conv(sx)
+        self.sy = _conv(sy)
+        self.covx = _conv(covx)
+        self.covy = _conv(covy)
+        self.fix = _conv(fix)
+        self.meta = {} if meta is None else meta
+
+    def _sd2wt(self, sd):
+        """ Convert standard deviation to weights.
+        """
+
+        return 1./numpy.power(sd, 2)
+
+    def _cov2wt(self, cov):
+        """ Convert covariance matrix(-ices) to weights.
+        """
+
+        from scipy.linalg import inv
+
+        if len(cov.shape) == 2:
+            return inv(cov)
+        else:
+            weights = numpy.zeros(cov.shape, float)
+
+            for i in range(cov.shape[-1]):  # n
+                weights[:,:,i] = inv(cov[:,:,i])
+
+            return weights
+
+    def __getattr__(self, attr):
+        lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx),
+                      ('wd', 'covx'): (self._cov2wt, self.covx),
+                      ('we', 'sy'): (self._sd2wt, self.sy),
+                      ('we', 'covy'): (self._cov2wt, self.covy)}
+
+        if attr not in ('wd', 'we'):
+            if attr in self.meta:
+                return self.meta[attr]
+            else:
+                raise AttributeError("'%s' not in metadata" % attr)
+        else:
+            func, arg = lookup_tbl[(attr, self._ga_flags[attr])]
+
+            if arg is not None:
+                return func(*(arg,))
+            else:
+                return None
+
+
+class Model:
+    """
+    The Model class stores information about the function you wish to fit.
+
+    It stores the function itself, at the least, and optionally stores
+    functions which compute the Jacobians used during fitting. Also, one
+    can provide a function that will provide reasonable starting values
+    for the fit parameters possibly given the set of data.
+
+    Parameters
+    ----------
+    fcn : function
+          fcn(beta, x) --> y
+    fjacb : function
+          Jacobian of fcn wrt the fit parameters beta.
+
+          fjacb(beta, x) --> @f_i(x,B)/@B_j
+    fjacd : function
+          Jacobian of fcn wrt the (possibly multidimensional) input
+          variable.
+
+          fjacd(beta, x) --> @f_i(x,B)/@x_j
+    extra_args : tuple, optional
+          If specified, `extra_args` should be a tuple of extra
+          arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called
+          by `apply(fcn, (beta, x) + extra_args)`
+    estimate : array_like of rank-1
+          Provides estimates of the fit parameters from the data
+
+          estimate(data) --> estbeta
+    implicit : boolean
+          If TRUE, specifies that the model
+          is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit
+          against
+    meta : dict, optional
+          freeform dictionary of metadata for the model
+
+    Notes
+    -----
+    Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and
+    return a NumPy array. The `estimate` object takes an instance of the
+    Data class.
+
+    Here are the rules for the shapes of the argument and return
+    arrays of the callback functions:
+
+    `x`
+        if the input data is single-dimensional, then `x` is rank-1
+        array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)``
+        If the input data is multi-dimensional, then `x` is a rank-2 array;
+        i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``.
+        In all cases, it has the same shape as the input data array passed to
+        `~scipy.odr.odr`. `m` is the dimensionality of the input data,
+        `n` is the number of observations.
+    `y`
+        if the response variable is single-dimensional, then `y` is a
+        rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``.
+        If the response variable is multi-dimensional, then `y` is a rank-2
+        array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape =
+        (q, n)`` where `q` is the dimensionality of the response variable.
+    `beta`
+        rank-1 array of length `p` where `p` is the number of parameters;
+        i.e. ``beta = array([B_1, B_2, ..., B_p])``
+    `fjacb`
+        if the response variable is multi-dimensional, then the
+        return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] =
+        d f_l(X,B)/d B_k`` evaluated at the ith data point.  If `q == 1`, then
+        the return array is only rank-2 and with shape `(p, n)`.
+    `fjacd`
+        as with fjacb, only the return array's shape is `(q, m, n)`
+        such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data
+        point.  If `q == 1`, then the return array's shape is `(m, n)`. If
+        `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`.
+
+    """
+
+    def __init__(self, fcn, fjacb=None, fjacd=None,
+                 extra_args=None, estimate=None, implicit=0, meta=None):
+
+        self.fcn = fcn
+        self.fjacb = fjacb
+        self.fjacd = fjacd
+
+        if extra_args is not None:
+            extra_args = tuple(extra_args)
+
+        self.extra_args = extra_args
+        self.estimate = estimate
+        self.implicit = implicit
+        self.meta = meta if meta is not None else {}
+
+    def set_meta(self, **kwds):
+        """ Update the metadata dictionary with the keywords and data provided
+        here.
+
+        Examples
+        --------
+        set_meta(name="Exponential", equation="y = a exp(b x) + c")
+        """
+
+        self.meta.update(kwds)
+
+    def __getattr__(self, attr):
+        """ Dispatch attribute access to the metadata.
+        """
+
+        if attr in self.meta:
+            return self.meta[attr]
+        else:
+            raise AttributeError("'%s' not in metadata" % attr)
+
+
+class Output:
+    """
+    The Output class stores the output of an ODR run.
+
+    Attributes
+    ----------
+    beta : ndarray
+        Estimated parameter values, of shape (q,).
+    sd_beta : ndarray
+        Standard deviations of the estimated parameters, of shape (p,).
+    cov_beta : ndarray
+        Covariance matrix of the estimated parameters, of shape (p,p).
+    delta : ndarray, optional
+        Array of estimated errors in input variables, of same shape as `x`.
+    eps : ndarray, optional
+        Array of estimated errors in response variables, of same shape as `y`.
+    xplus : ndarray, optional
+        Array of ``x + delta``.
+    y : ndarray, optional
+        Array ``y = fcn(x + delta)``.
+    res_var : float, optional
+        Residual variance.
+    sum_square : float, optional
+        Sum of squares error.
+    sum_square_delta : float, optional
+        Sum of squares of delta error.
+    sum_square_eps : float, optional
+        Sum of squares of eps error.
+    inv_condnum : float, optional
+        Inverse condition number (cf. ODRPACK UG p. 77).
+    rel_error : float, optional
+        Relative error in function values computed within fcn.
+    work : ndarray, optional
+        Final work array.
+    work_ind : dict, optional
+        Indices into work for drawing out values (cf. ODRPACK UG p. 83).
+    info : int, optional
+        Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38).
+    stopreason : list of str, optional
+        `info` interpreted into English.
+
+    Notes
+    -----
+    Takes one argument for initialization, the return value from the
+    function `~scipy.odr.odr`. The attributes listed as "optional" above are
+    only present if `~scipy.odr.odr` was run with ``full_output=1``.
+
+    """
+
+    def __init__(self, output):
+        self.beta = output[0]
+        self.sd_beta = output[1]
+        self.cov_beta = output[2]
+
+        if len(output) == 4:
+            # full output
+            self.__dict__.update(output[3])
+            self.stopreason = _report_error(self.info)
+
+    def pprint(self):
+        """ Pretty-print important results.
+        """
+
+        print('Beta:', self.beta)
+        print('Beta Std Error:', self.sd_beta)
+        print('Beta Covariance:', self.cov_beta)
+        if hasattr(self, 'info'):
+            print('Residual Variance:',self.res_var)
+            print('Inverse Condition #:', self.inv_condnum)
+            print('Reason(s) for Halting:')
+            for r in self.stopreason:
+                print('  %s' % r)
+
+
+class ODR:
+    """
+    The ODR class gathers all information and coordinates the running of the
+    main fitting routine.
+
+    Members of instances of the ODR class have the same names as the arguments
+    to the initialization routine.
+
+    Parameters
+    ----------
+    data : Data class instance
+        instance of the Data class
+    model : Model class instance
+        instance of the Model class
+
+    Other Parameters
+    ----------------
+    beta0 : array_like of rank-1
+        a rank-1 sequence of initial parameter values. Optional if
+        model provides an "estimate" function to estimate these values.
+    delta0 : array_like of floats of rank-1, optional
+        a (double-precision) float array to hold the initial values of
+        the errors in the input variables. Must be same shape as data.x
+    ifixb : array_like of ints of rank-1, optional
+        sequence of integers with the same length as beta0 that determines
+        which parameters are held fixed. A value of 0 fixes the parameter,
+        a value > 0 makes the parameter free.
+    ifixx : array_like of ints with same shape as data.x, optional
+        an array of integers with the same shape as data.x that determines
+        which input observations are treated as fixed. One can use a sequence
+        of length m (the dimensionality of the input observations) to fix some
+        dimensions for all observations. A value of 0 fixes the observation,
+        a value > 0 makes it free.
+    job : int, optional
+        an integer telling ODRPACK what tasks to perform. See p. 31 of the
+        ODRPACK User's Guide if you absolutely must set the value here. Use the
+        method set_job post-initialization for a more readable interface.
+    iprint : int, optional
+        an integer telling ODRPACK what to print. See pp. 33-34 of the
+        ODRPACK User's Guide if you absolutely must set the value here. Use the
+        method set_iprint post-initialization for a more readable interface.
+    errfile : str, optional
+        string with the filename to print ODRPACK errors to. If the file already
+        exists, an error will be thrown. The `overwrite` argument can be used to
+        prevent this. *Do Not Open This File Yourself!*
+    rptfile : str, optional
+        string with the filename to print ODRPACK summaries to. If the file
+        already exists, an error will be thrown. The `overwrite` argument can be
+        used to prevent this. *Do Not Open This File Yourself!*
+    ndigit : int, optional
+        integer specifying the number of reliable digits in the computation
+        of the function.
+    taufac : float, optional
+        float specifying the initial trust region. The default value is 1.
+        The initial trust region is equal to taufac times the length of the
+        first computed Gauss-Newton step. taufac must be less than 1.
+    sstol : float, optional
+        float specifying the tolerance for convergence based on the relative
+        change in the sum-of-squares. The default value is eps**(1/2) where eps
+        is the smallest value such that 1 + eps > 1 for double precision
+        computation on the machine. sstol must be less than 1.
+    partol : float, optional
+        float specifying the tolerance for convergence based on the relative
+        change in the estimated parameters. The default value is eps**(2/3) for
+        explicit models and ``eps**(1/3)`` for implicit models. partol must be less
+        than 1.
+    maxit : int, optional
+        integer specifying the maximum number of iterations to perform. For
+        first runs, maxit is the total number of iterations performed and
+        defaults to 50. For restarts, maxit is the number of additional
+        iterations to perform and defaults to 10.
+    stpb : array_like, optional
+        sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute
+        finite difference derivatives wrt the parameters.
+    stpd : optional
+        array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative
+        step sizes to compute finite difference derivatives wrt the input
+        variable errors. If stpd is a rank-1 array with length m (the
+        dimensionality of the input variable), then the values are broadcast to
+        all observations.
+    sclb : array_like, optional
+        sequence (``len(stpb) == len(beta0)``) of scaling factors for the
+        parameters. The purpose of these scaling factors are to scale all of
+        the parameters to around unity. Normally appropriate scaling factors
+        are computed if this argument is not specified. Specify them yourself
+        if the automatic procedure goes awry.
+    scld : array_like, optional
+        array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
+        factors for the *errors* in the input variables. Again, these factors
+        are automatically computed if you do not provide them. If scld.shape ==
+        (m,), then the scaling factors are broadcast to all observations.
+    work : ndarray, optional
+        array to hold the double-valued working data for ODRPACK. When
+        restarting, takes the value of self.output.work.
+    iwork : ndarray, optional
+        array to hold the integer-valued working data for ODRPACK. When
+        restarting, takes the value of self.output.iwork.
+    overwrite : bool, optional
+        If it is True, output files defined by `errfile` and `rptfile` are
+        overwritten. The default is False.
+
+    Attributes
+    ----------
+    data : Data
+        The data for this fit
+    model : Model
+        The model used in fit
+    output : Output
+        An instance if the Output class containing all of the returned
+        data from an invocation of ODR.run() or ODR.restart()
+
+    """
+
+    def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,
+        ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,
+        ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,
+        stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None,
+        overwrite=False):
+
+        self.data = data
+        self.model = model
+
+        if beta0 is None:
+            if self.model.estimate is not None:
+                self.beta0 = _conv(self.model.estimate(self.data))
+            else:
+                raise ValueError(
+                  "must specify beta0 or provide an estimater with the model"
+                )
+        else:
+            self.beta0 = _conv(beta0)
+
+        if ifixx is None and data.fix is not None:
+            ifixx = data.fix
+
+        if overwrite:
+            # remove output files for overwriting.
+            if rptfile is not None and os.path.exists(rptfile):
+                os.remove(rptfile)
+            if errfile is not None and os.path.exists(errfile):
+                os.remove(errfile)
+
+        self.delta0 = _conv(delta0)
+        # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit
+        # platforms.
+        # XXX: some other FORTRAN compilers may not agree.
+        self.ifixx = _conv(ifixx, dtype=numpy.int32)
+        self.ifixb = _conv(ifixb, dtype=numpy.int32)
+        self.job = job
+        self.iprint = iprint
+        self.errfile = errfile
+        self.rptfile = rptfile
+        self.ndigit = ndigit
+        self.taufac = taufac
+        self.sstol = sstol
+        self.partol = partol
+        self.maxit = maxit
+        self.stpb = _conv(stpb)
+        self.stpd = _conv(stpd)
+        self.sclb = _conv(sclb)
+        self.scld = _conv(scld)
+        self.work = _conv(work)
+        self.iwork = _conv(iwork)
+
+        self.output = None
+
+        self._check()
+
+    def _check(self):
+        """ Check the inputs for consistency, but don't bother checking things
+        that the builtin function odr will check.
+        """
+
+        x_s = list(self.data.x.shape)
+
+        if isinstance(self.data.y, numpy.ndarray):
+            y_s = list(self.data.y.shape)
+            if self.model.implicit:
+                raise OdrError("an implicit model cannot use response data")
+        else:
+            # implicit model with q == self.data.y
+            y_s = [self.data.y, x_s[-1]]
+            if not self.model.implicit:
+                raise OdrError("an explicit model needs response data")
+            self.set_job(fit_type=1)
+
+        if x_s[-1] != y_s[-1]:
+            raise OdrError("number of observations do not match")
+
+        n = x_s[-1]
+
+        if len(x_s) == 2:
+            m = x_s[0]
+        else:
+            m = 1
+        if len(y_s) == 2:
+            q = y_s[0]
+        else:
+            q = 1
+
+        p = len(self.beta0)
+
+        # permissible output array shapes
+
+        fcn_perms = [(q, n)]
+        fjacd_perms = [(q, m, n)]
+        fjacb_perms = [(q, p, n)]
+
+        if q == 1:
+            fcn_perms.append((n,))
+            fjacd_perms.append((m, n))
+            fjacb_perms.append((p, n))
+        if m == 1:
+            fjacd_perms.append((q, n))
+        if p == 1:
+            fjacb_perms.append((q, n))
+        if m == q == 1:
+            fjacd_perms.append((n,))
+        if p == q == 1:
+            fjacb_perms.append((n,))
+
+        # try evaluating the supplied functions to make sure they provide
+        # sensible outputs
+
+        arglist = (self.beta0, self.data.x)
+        if self.model.extra_args is not None:
+            arglist = arglist + self.model.extra_args
+        res = self.model.fcn(*arglist)
+
+        if res.shape not in fcn_perms:
+            print(res.shape)
+            print(fcn_perms)
+            raise OdrError("fcn does not output %s-shaped array" % y_s)
+
+        if self.model.fjacd is not None:
+            res = self.model.fjacd(*arglist)
+            if res.shape not in fjacd_perms:
+                raise OdrError(
+                    "fjacd does not output %s-shaped array" % repr((q, m, n)))
+        if self.model.fjacb is not None:
+            res = self.model.fjacb(*arglist)
+            if res.shape not in fjacb_perms:
+                raise OdrError(
+                    "fjacb does not output %s-shaped array" % repr((q, p, n)))
+
+        # check shape of delta0
+
+        if self.delta0 is not None and self.delta0.shape != self.data.x.shape:
+            raise OdrError(
+                "delta0 is not a %s-shaped array" % repr(self.data.x.shape))
+
+        if self.data.x.size == 0:
+            warn(("Empty data detected for ODR instance. "
+                  "Do not expect any fitting to occur"),
+                 OdrWarning)
+
+    def _gen_work(self):
+        """ Generate a suitable work array if one does not already exist.
+        """
+
+        n = self.data.x.shape[-1]
+        p = self.beta0.shape[0]
+
+        if len(self.data.x.shape) == 2:
+            m = self.data.x.shape[0]
+        else:
+            m = 1
+
+        if self.model.implicit:
+            q = self.data.y
+        elif len(self.data.y.shape) == 2:
+            q = self.data.y.shape[0]
+        else:
+            q = 1
+
+        if self.data.we is None:
+            ldwe = ld2we = 1
+        elif len(self.data.we.shape) == 3:
+            ld2we, ldwe = self.data.we.shape[1:]
+        else:
+            # Okay, this isn't precisely right, but for this calculation,
+            # it's fine
+            ldwe = 1
+            ld2we = self.data.we.shape[1]
+
+        if self.job % 10 < 2:
+            # ODR not OLS
+            lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +
+                     2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)
+        else:
+            # OLS not ODR
+            lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +
+                     5*q + q*(p+m) + ldwe*ld2we*q)
+
+        if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\
+                and self.work.dtype.str.endswith('f8'):
+            # the existing array is fine
+            return
+        else:
+            self.work = numpy.zeros((lwork,), float)
+
+    def set_job(self, fit_type=None, deriv=None, var_calc=None,
+        del_init=None, restart=None):
+        """
+        Sets the "job" parameter is a hopefully comprehensible way.
+
+        If an argument is not specified, then the value is left as is. The
+        default value from class initialization is for all of these options set
+        to 0.
+
+        Parameters
+        ----------
+        fit_type : {0, 1, 2} int
+            0 -> explicit ODR
+
+            1 -> implicit ODR
+
+            2 -> ordinary least-squares
+        deriv : {0, 1, 2, 3} int
+            0 -> forward finite differences
+
+            1 -> central finite differences
+
+            2 -> user-supplied derivatives (Jacobians) with results
+              checked by ODRPACK
+
+            3 -> user-supplied derivatives, no checking
+        var_calc : {0, 1, 2} int
+            0 -> calculate asymptotic covariance matrix and fit
+                 parameter uncertainties (V_B, s_B) using derivatives
+                 recomputed at the final solution
+
+            1 -> calculate V_B and s_B using derivatives from last iteration
+
+            2 -> do not calculate V_B and s_B
+        del_init : {0, 1} int
+            0 -> initial input variable offsets set to 0
+
+            1 -> initial offsets provided by user in variable "work"
+        restart : {0, 1} int
+            0 -> fit is not a restart
+
+            1 -> fit is a restart
+
+        Notes
+        -----
+        The permissible values are different from those given on pg. 31 of the
+        ODRPACK User's Guide only in that one cannot specify numbers greater than
+        the last value for each variable.
+
+        If one does not supply functions to compute the Jacobians, the fitting
+        procedure will change deriv to 0, finite differences, as a default. To
+        initialize the input variable offsets by yourself, set del_init to 1 and
+        put the offsets into the "work" variable correctly.
+
+        """
+
+        if self.job is None:
+            job_l = [0, 0, 0, 0, 0]
+        else:
+            job_l = [self.job // 10000 % 10,
+                     self.job // 1000 % 10,
+                     self.job // 100 % 10,
+                     self.job // 10 % 10,
+                     self.job % 10]
+
+        if fit_type in (0, 1, 2):
+            job_l[4] = fit_type
+        if deriv in (0, 1, 2, 3):
+            job_l[3] = deriv
+        if var_calc in (0, 1, 2):
+            job_l[2] = var_calc
+        if del_init in (0, 1):
+            job_l[1] = del_init
+        if restart in (0, 1):
+            job_l[0] = restart
+
+        self.job = (job_l[0]*10000 + job_l[1]*1000 +
+                    job_l[2]*100 + job_l[3]*10 + job_l[4])
+
+    def set_iprint(self, init=None, so_init=None,
+        iter=None, so_iter=None, iter_step=None, final=None, so_final=None):
+        """ Set the iprint parameter for the printing of computation reports.
+
+        If any of the arguments are specified here, then they are set in the
+        iprint member. If iprint is not set manually or with this method, then
+        ODRPACK defaults to no printing. If no filename is specified with the
+        member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to
+        print to stdout in addition to the specified filename by setting the
+        so_* arguments to this function, but one cannot specify to print to
+        stdout but not a file since one can do that by not specifying a rptfile
+        filename.
+
+        There are three reports: initialization, iteration, and final reports.
+        They are represented by the arguments init, iter, and final
+        respectively.  The permissible values are 0, 1, and 2 representing "no
+        report", "short report", and "long report" respectively.
+
+        The argument iter_step (0 <= iter_step <= 9) specifies how often to make
+        the iteration report; the report will be made for every iter_step'th
+        iteration starting with iteration one. If iter_step == 0, then no
+        iteration report is made, regardless of the other arguments.
+
+        If the rptfile is None, then any so_* arguments supplied will raise an
+        exception.
+        """
+        if self.iprint is None:
+            self.iprint = 0
+
+        ip = [self.iprint // 1000 % 10,
+              self.iprint // 100 % 10,
+              self.iprint // 10 % 10,
+              self.iprint % 10]
+
+        # make a list to convert iprint digits to/from argument inputs
+        #                   rptfile, stdout
+        ip2arg = [[0, 0],  # none,  none
+                  [1, 0],  # short, none
+                  [2, 0],  # long,  none
+                  [1, 1],  # short, short
+                  [2, 1],  # long,  short
+                  [1, 2],  # short, long
+                  [2, 2]]  # long,  long
+
+        if (self.rptfile is None and
+            (so_init is not None or
+             so_iter is not None or
+             so_final is not None)):
+            raise OdrError(
+                "no rptfile specified, cannot output to stdout twice")
+
+        iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]
+
+        if init is not None:
+            iprint_l[0] = init
+        if so_init is not None:
+            iprint_l[1] = so_init
+        if iter is not None:
+            iprint_l[2] = iter
+        if so_iter is not None:
+            iprint_l[3] = so_iter
+        if final is not None:
+            iprint_l[4] = final
+        if so_final is not None:
+            iprint_l[5] = so_final
+
+        if iter_step in range(10):
+            # 0..9
+            ip[2] = iter_step
+
+        ip[0] = ip2arg.index(iprint_l[0:2])
+        ip[1] = ip2arg.index(iprint_l[2:4])
+        ip[3] = ip2arg.index(iprint_l[4:6])
+
+        self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]
+
+    def run(self):
+        """ Run the fitting routine with all of the information given and with ``full_output=1``.
+
+        Returns
+        -------
+        output : Output instance
+            This object is also assigned to the attribute .output .
+        """
+
+        args = (self.model.fcn, self.beta0, self.data.y, self.data.x)
+        kwds = {'full_output': 1}
+        kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',
+                 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',
+                 'stpd', 'sclb', 'scld', 'work', 'iwork']
+
+        if self.delta0 is not None and (self.job // 10000) % 10 == 0:
+            # delta0 provided and fit is not a restart
+            self._gen_work()
+
+            d0 = numpy.ravel(self.delta0)
+
+            self.work[:len(d0)] = d0
+
+        # set the kwds from other objects explicitly
+        if self.model.fjacb is not None:
+            kwds['fjacb'] = self.model.fjacb
+        if self.model.fjacd is not None:
+            kwds['fjacd'] = self.model.fjacd
+        if self.data.we is not None:
+            kwds['we'] = self.data.we
+        if self.data.wd is not None:
+            kwds['wd'] = self.data.wd
+        if self.model.extra_args is not None:
+            kwds['extra_args'] = self.model.extra_args
+
+        # implicitly set kwds from self's members
+        for attr in kwd_l:
+            obj = getattr(self, attr)
+            if obj is not None:
+                kwds[attr] = obj
+
+        self.output = Output(odr(*args, **kwds))
+
+        return self.output
+
+    def restart(self, iter=None):
+        """ Restarts the run with iter more iterations.
+
+        Parameters
+        ----------
+        iter : int, optional
+            ODRPACK's default for the number of new iterations is 10.
+
+        Returns
+        -------
+        output : Output instance
+            This object is also assigned to the attribute .output .
+        """
+
+        if self.output is None:
+            raise OdrError("cannot restart: run() has not been called before")
+
+        self.set_job(restart=1)
+        self.work = self.output.work
+        self.iwork = self.output.iwork
+
+        self.maxit = iter
+
+        return self.run()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/models.py b/__packaged__/coreml/.python_dependencies/scipy/odr/models.py
new file mode 100644
index 00000000..bbbdc06b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/odr/models.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.odr` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _models
+
+__all__ = [  # noqa: F822
+    'Model', 'exponential', 'multilinear', 'unilinear',
+    'quadratic', 'polynomial'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.odr.models is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.odr instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.odr` namespace, "
+                  "the `scipy.odr.models` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_models, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/odrpack.py b/__packaged__/coreml/.python_dependencies/scipy/odr/odrpack.py
new file mode 100644
index 00000000..fee430d5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/odr/odrpack.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.odr` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _odrpack
+
+__all__ = [  # noqa: F822
+    'odr', 'OdrWarning', 'OdrError', 'OdrStop',
+    'Data', 'RealData', 'Model', 'Output', 'ODR',
+    'odr_error', 'odr_stop'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.odr.odrpack is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.odr instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.odr` namespace, "
+                  "the `scipy.odr.odrpack` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_odrpack, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/odr/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/odr/tests/test_odr.py b/__packaged__/coreml/.python_dependencies/scipy/odr/tests/test_odr.py
new file mode 100644
index 00000000..017a3f23
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/odr/tests/test_odr.py
@@ -0,0 +1,533 @@
+import tempfile
+import shutil
+import os
+
+import numpy as np
+from numpy import pi
+from numpy.testing import (assert_array_almost_equal,
+                           assert_equal, assert_warns)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning,
+                       multilinear, exponential, unilinear, quadratic,
+                       polynomial)
+
+
+class TestODR:
+
+    # Bad Data for 'x'
+
+    def test_bad_data(self):
+        assert_raises(ValueError, Data, 2, 1)
+        assert_raises(ValueError, RealData, 2, 1)
+
+    # Empty Data for 'x'
+    def empty_data_func(self, B, x):
+        return B[0]*x + B[1]
+
+    def test_empty_data(self):
+        beta0 = [0.02, 0.0]
+        linear = Model(self.empty_data_func)
+
+        empty_dat = Data([], [])
+        assert_warns(OdrWarning, ODR,
+                     empty_dat, linear, beta0=beta0)
+
+        empty_dat = RealData([], [])
+        assert_warns(OdrWarning, ODR,
+                     empty_dat, linear, beta0=beta0)
+
+    # Explicit Example
+
+    def explicit_fcn(self, B, x):
+        ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2)
+        return ret
+
+    def explicit_fjd(self, B, x):
+        eBx = np.exp(B[2]*x)
+        ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx
+        return ret
+
+    def explicit_fjb(self, B, x):
+        eBx = np.exp(B[2]*x)
+        res = np.vstack([np.ones(x.shape[-1]),
+                         np.power(eBx-1.0, 2),
+                         B[1]*2.0*(eBx-1.0)*eBx*x])
+        return res
+
+    def test_explicit(self):
+        explicit_mod = Model(
+            self.explicit_fcn,
+            fjacb=self.explicit_fjb,
+            fjacd=self.explicit_fjd,
+            meta=dict(name='Sample Explicit Model',
+                      ref='ODRPACK UG, pg. 39'),
+        )
+        explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
+                        [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
+                         1213.8,1215.5,1212.])
+        explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
+                       ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
+        explicit_odr.set_job(deriv=2)
+        explicit_odr.set_iprint(init=0, iter=0, final=0)
+
+        out = explicit_odr.run()
+        assert_array_almost_equal(
+            out.beta,
+            np.array([1.2646548050648876e+03, -5.4018409956678255e+01,
+                -8.7849712165253724e-02]),
+        )
+        assert_array_almost_equal(
+            out.sd_beta,
+            np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]),
+        )
+        assert_array_almost_equal(
+            out.cov_beta,
+            np.array([[4.4949592379003039e-01, -3.7421976890364739e-01,
+                 -8.0978217468468912e-04],
+               [-3.7421976890364739e-01, 1.0529686462751804e+00,
+                 -1.9453521827942002e-03],
+               [-8.0978217468468912e-04, -1.9453521827942002e-03,
+                  1.6827336938454476e-05]]),
+        )
+
+    # Implicit Example
+
+    def implicit_fcn(self, B, x):
+        return (B[2]*np.power(x[0]-B[0], 2) +
+                2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) +
+                B[4]*np.power(x[1]-B[1], 2) - 1.0)
+
+    def test_implicit(self):
+        implicit_mod = Model(
+            self.implicit_fcn,
+            implicit=1,
+            meta=dict(name='Sample Implicit Model',
+                      ref='ODRPACK UG, pg. 49'),
+        )
+        implicit_dat = Data([
+            [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28,
+             -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44],
+            [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32,
+             -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]],
+            1,
+        )
+        implicit_odr = ODR(implicit_dat, implicit_mod,
+            beta0=[-1.0, -3.0, 0.09, 0.02, 0.08])
+
+        out = implicit_odr.run()
+        assert_array_almost_equal(
+            out.beta,
+            np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354,
+                0.0162299708984738, 0.0797537982976416]),
+        )
+        assert_array_almost_equal(
+            out.sd_beta,
+            np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314,
+                0.0027500347539902, 0.0034962501532468]),
+        )
+        assert_array_almost_equal(
+            out.cov_beta,
+            np.array([[2.1089274602333052e+00, -1.9437686411979040e+00,
+                  7.0263550868344446e-02, -4.7175267373474862e-02,
+                  5.2515575927380355e-02],
+               [-1.9437686411979040e+00, 2.0481509222414456e+00,
+                 -6.1600515853057307e-02, 4.6268827806232933e-02,
+                 -5.8822307501391467e-02],
+               [7.0263550868344446e-02, -6.1600515853057307e-02,
+                  2.8659542561579308e-03, -1.4628662260014491e-03,
+                  1.4528860663055824e-03],
+               [-4.7175267373474862e-02, 4.6268827806232933e-02,
+                 -1.4628662260014491e-03, 1.2855592885514335e-03,
+                 -1.2692942951415293e-03],
+               [5.2515575927380355e-02, -5.8822307501391467e-02,
+                  1.4528860663055824e-03, -1.2692942951415293e-03,
+                  2.0778813389755596e-03]]),
+        )
+
+    # Multi-variable Example
+
+    def multi_fcn(self, B, x):
+        if (x < 0.0).any():
+            raise OdrStop
+        theta = pi*B[3]/2.
+        ctheta = np.cos(theta)
+        stheta = np.sin(theta)
+        omega = np.power(2.*pi*x*np.exp(-B[2]), B[3])
+        phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta))
+        r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) +
+             np.power(omega*stheta, 2)), -B[4])
+        ret = np.vstack([B[1] + r*np.cos(B[4]*phi),
+                         r*np.sin(B[4]*phi)])
+        return ret
+
+    def test_multi(self):
+        multi_mod = Model(
+            self.multi_fcn,
+            meta=dict(name='Sample Multi-Response Model',
+                      ref='ODRPACK UG, pg. 56'),
+        )
+
+        multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0,
+            700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0,
+            15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0])
+        multi_y = np.array([
+            [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713,
+             3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984,
+             2.934, 2.876, 2.838, 2.798, 2.759],
+            [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309,
+             0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218,
+             0.202, 0.182, 0.168, 0.153, 0.139],
+        ])
+        n = len(multi_x)
+        multi_we = np.zeros((2, 2, n), dtype=float)
+        multi_ifixx = np.ones(n, dtype=int)
+        multi_delta = np.zeros(n, dtype=float)
+
+        multi_we[0,0,:] = 559.6
+        multi_we[1,0,:] = multi_we[0,1,:] = -1634.0
+        multi_we[1,1,:] = 8397.0
+
+        for i in range(n):
+            if multi_x[i] < 100.0:
+                multi_ifixx[i] = 0
+            elif multi_x[i] <= 150.0:
+                pass  # defaults are fine
+            elif multi_x[i] <= 1000.0:
+                multi_delta[i] = 25.0
+            elif multi_x[i] <= 10000.0:
+                multi_delta[i] = 560.0
+            elif multi_x[i] <= 100000.0:
+                multi_delta[i] = 9500.0
+            else:
+                multi_delta[i] = 144000.0
+            if multi_x[i] == 100.0 or multi_x[i] == 150.0:
+                multi_we[:,:,i] = 0.0
+
+        multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2),
+            we=multi_we)
+        multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5],
+            delta0=multi_delta, ifixx=multi_ifixx)
+        multi_odr.set_job(deriv=1, del_init=1)
+
+        out = multi_odr.run()
+        assert_array_almost_equal(
+            out.beta,
+            np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978,
+                0.5101147161764654, 0.5173902330489161]),
+        )
+        assert_array_almost_equal(
+            out.sd_beta,
+            np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757,
+                0.0132642749596149, 0.0288529201353984]),
+        )
+        assert_array_almost_equal(
+            out.cov_beta,
+            np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406,
+                -0.0058700836512467, 0.011281212888768],
+               [0.0036159705923791, 0.0064793789429006, 0.0517610978353126,
+                -0.0051181304940204, 0.0130726943624117],
+               [0.0438637051470406, 0.0517610978353126, 0.5182263323095322,
+                -0.0563083340093696, 0.1269490939468611],
+               [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696,
+                 0.0066939246261263, -0.0140184391377962],
+               [0.011281212888768, 0.0130726943624117, 0.1269490939468611,
+                -0.0140184391377962, 0.0316733013820852]]),
+        )
+
+    # Pearson's Data
+    # K. Pearson, Philosophical Magazine, 2, 559 (1901)
+
+    def pearson_fcn(self, B, x):
+        return B[0] + B[1]*x
+
+    def test_pearson(self):
+        p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4])
+        p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5])
+        p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.])
+        p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04])
+
+        p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)
+
+        # Reverse the data to test invariance of results
+        pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)
+
+        p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))
+
+        p_odr = ODR(p_dat, p_mod, beta0=[1.,1.])
+        pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.])
+
+        out = p_odr.run()
+        assert_array_almost_equal(
+            out.beta,
+            np.array([5.4767400299231674, -0.4796082367610305]),
+        )
+        assert_array_almost_equal(
+            out.sd_beta,
+            np.array([0.3590121690702467, 0.0706291186037444]),
+        )
+        assert_array_almost_equal(
+            out.cov_beta,
+            np.array([[0.0854275622946333, -0.0161807025443155],
+               [-0.0161807025443155, 0.003306337993922]]),
+        )
+
+        rout = pr_odr.run()
+        assert_array_almost_equal(
+            rout.beta,
+            np.array([11.4192022410781231, -2.0850374506165474]),
+        )
+        assert_array_almost_equal(
+            rout.sd_beta,
+            np.array([0.9820231665657161, 0.3070515616198911]),
+        )
+        assert_array_almost_equal(
+            rout.cov_beta,
+            np.array([[0.6391799462548782, -0.1955657291119177],
+               [-0.1955657291119177, 0.0624888159223392]]),
+        )
+
+    # Lorentz Peak
+    # The data is taken from one of the undergraduate physics labs I performed.
+
+    def lorentz(self, beta, x):
+        return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x -
+            beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0)))
+
+    def test_lorentz(self):
+        l_sy = np.array([.29]*18)
+        l_sx = np.array([.000972971,.000948268,.000707632,.000706679,
+            .000706074, .000703918,.000698955,.000456856,
+            .000455207,.000662717,.000654619,.000652694,
+            .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839])
+
+        l_dat = RealData(
+            [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
+             3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
+             3.6562, 3.62498, 3.55525, 3.41886],
+            [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122,
+             957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5],
+            sx=l_sx,
+            sy=l_sy,
+        )
+        l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
+        l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))
+
+        out = l_odr.run()
+        assert_array_almost_equal(
+            out.beta,
+            np.array([1.4306780846149925e+03, 1.3390509034538309e-01,
+                 3.7798193600109009e+00]),
+        )
+        assert_array_almost_equal(
+            out.sd_beta,
+            np.array([7.3621186811330963e-01, 3.5068899941471650e-04,
+                 2.4451209281408992e-04]),
+        )
+        assert_array_almost_equal(
+            out.cov_beta,
+            np.array([[2.4714409064597873e-01, -6.9067261911110836e-05,
+                 -3.1236953270424990e-05],
+               [-6.9067261911110836e-05, 5.6077531517333009e-08,
+                  3.6133261832722601e-08],
+               [-3.1236953270424990e-05, 3.6133261832722601e-08,
+                  2.7261220025171730e-08]]),
+        )
+
+    def test_ticket_1253(self):
+        def linear(c, x):
+            return c[0]*x+c[1]
+
+        c = [2.0, 3.0]
+        x = np.linspace(0, 10)
+        y = linear(c, x)
+
+        model = Model(linear)
+        data = Data(x, y, wd=1.0, we=1.0)
+        job = ODR(data, model, beta0=[1.0, 1.0])
+        result = job.run()
+        assert_equal(result.info, 2)
+
+    # Verify fix for gh-9140
+
+    def test_ifixx(self):
+        x1 = [-2.01, -0.99, -0.001, 1.02, 1.98]
+        x2 = [3.98, 1.01, 0.001, 0.998, 4.01]
+        fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int)))
+        data = Data(np.vstack((x1, x2)), y=1, fix=fix)
+        model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True)
+
+        odr1 = ODR(data, model, beta0=np.array([1.]))
+        sol1 = odr1.run()
+        odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix)
+        sol2 = odr2.run()
+        assert_equal(sol1.beta, sol2.beta)
+
+    # verify bugfix for #11800 in #11802
+    def test_ticket_11800(self):
+        # parameters
+        beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5])
+        nr_measurements = 10
+
+        std_dev_x = 0.01
+        x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866,
+            -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301],
+            [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829,
+            0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]])
+
+        std_dev_y = 0.05
+        y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642,
+            0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929],
+            [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536,
+            -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]])
+
+        beta_solution = np.array([
+            2.62920235756665876536e+00, -1.26608484996299608838e+02, 1.29703572775403074502e+02,
+            -1.88560985401185465804e+00, 7.83834160771274923718e+01, -7.64124076838087091801e+01])
+
+        # model's function and Jacobians
+        def func(beta, x):
+            y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :]
+            y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :]
+
+            return np.vstack((y0, y1))
+
+        def df_dbeta_odr(beta, x):
+            nr_meas = np.shape(x)[1]
+            zeros = np.zeros(nr_meas)
+            ones = np.ones(nr_meas)
+
+            dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros])
+            dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]])
+
+            return np.stack((dy0, dy1))
+
+        def df_dx_odr(beta, x):
+            nr_meas = np.shape(x)[1]
+            ones = np.ones(nr_meas)
+
+            dy0 = np.array([beta[1] * ones, beta[2] * ones])
+            dy1 = np.array([beta[4] * ones, beta[5] * ones])
+            return np.stack((dy0, dy1))
+
+        # do measurements with errors in independent and dependent variables
+        x0_true = np.linspace(1, 10, nr_measurements)
+        x1_true = np.linspace(1, 10, nr_measurements)
+        x_true = np.array([x0_true, x1_true])
+
+        y_true = func(beta_true, x_true)
+
+        x_meas = x_true + x_error
+        y_meas = y_true + y_error
+
+        # estimate model's parameters
+        model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr)
+
+        data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y)
+
+        odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100)
+        #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1)
+        odr_obj.set_job(deriv=3)
+
+        odr_out = odr_obj.run()
+
+        # check results
+        assert_equal(odr_out.info, 1)
+        assert_array_almost_equal(odr_out.beta, beta_solution)
+
+    def test_multilinear_model(self):
+        x = np.linspace(0.0, 5.0)
+        y = 10.0 + 5.0 * x
+        data = Data(x, y)
+        odr_obj = ODR(data, multilinear)
+        output = odr_obj.run()
+        assert_array_almost_equal(output.beta, [10.0, 5.0])
+
+    def test_exponential_model(self):
+        x = np.linspace(0.0, 5.0)
+        y = -10.0 + np.exp(0.5*x)
+        data = Data(x, y)
+        odr_obj = ODR(data, exponential)
+        output = odr_obj.run()
+        assert_array_almost_equal(output.beta, [-10.0, 0.5])
+
+    def test_polynomial_model(self):
+        x = np.linspace(0.0, 5.0)
+        y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3
+        poly_model = polynomial(3)
+        data = Data(x, y)
+        odr_obj = ODR(data, poly_model)
+        output = odr_obj.run()
+        assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0])
+
+    def test_unilinear_model(self):
+        x = np.linspace(0.0, 5.0)
+        y = 1.0 * x + 2.0
+        data = Data(x, y)
+        odr_obj = ODR(data, unilinear)
+        output = odr_obj.run()
+        assert_array_almost_equal(output.beta, [1.0, 2.0])
+
+    def test_quadratic_model(self):
+        x = np.linspace(0.0, 5.0)
+        y = 1.0 * x ** 2 + 2.0 * x + 3.0
+        data = Data(x, y)
+        odr_obj = ODR(data, quadratic)
+        output = odr_obj.run()
+        assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0])
+
+    def test_work_ind(self):
+
+        def func(par, x):
+            b0, b1 = par
+            return b0 + b1 * x
+
+        # generate some data
+        n_data = 4
+        x = np.arange(n_data)
+        y = np.where(x % 2, x + 0.1, x - 0.1)
+        x_err = np.full(n_data, 0.1)
+        y_err = np.full(n_data, 0.1)
+
+        # do the fitting
+        linear_model = Model(func)
+        real_data = RealData(x, y, sx=x_err, sy=y_err)
+        odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4])
+        odr_obj.set_job(fit_type=0)
+        out = odr_obj.run()
+
+        sd_ind = out.work_ind['sd']
+        assert_array_almost_equal(out.sd_beta,
+                                  out.work[sd_ind:sd_ind + len(out.sd_beta)])
+
+    @pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better "
+                                     "not to run this test, see gh-13127")
+    def test_output_file_overwrite(self):
+        """
+        Verify fix for gh-1892
+        """
+        def func(b, x):
+            return b[0] + b[1] * x
+
+        p = Model(func)
+        data = Data(np.arange(10), 12 * np.arange(10))
+        tmp_dir = tempfile.mkdtemp()
+        error_file_path = os.path.join(tmp_dir, "error.dat")
+        report_file_path = os.path.join(tmp_dir, "report.dat")
+        try:
+            ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
+                rptfile=report_file_path).run()
+            ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
+                rptfile=report_file_path, overwrite=True).run()
+        finally:
+            # remove output files for clean up
+            shutil.rmtree(tmp_dir)
+
+    def test_odr_model_default_meta(self):
+        def func(b, x):
+            return b[0] + b[1] * x
+
+        p = Model(func)
+        p.set_meta(name='Sample Model Meta', ref='ODRPACK')
+        assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'})
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize.pxd
new file mode 100644
index 00000000..2402eeb0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize.pxd
@@ -0,0 +1 @@
+from .optimize cimport cython_optimize
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/README b/__packaged__/coreml/.python_dependencies/scipy/optimize/README
new file mode 100644
index 00000000..ff3b10c8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/README
@@ -0,0 +1,87 @@
+From the website for the L-BFGS-B code (from at
+http://www.ece.northwestern.edu/~nocedal/lbfgsb.html):
+
+"""
+L-BFGS-B is a limited-memory quasi-Newton code for bound-constrained
+optimization, i.e. for problems where the only constraints are of the
+form l<= x <= u.
+"""
+
+This is a Python wrapper (using F2PY) written by David M. Cooke
+ and released as version 0.9 on April 9, 2004.
+The wrapper was slightly modified by Joonas Paalasmaa for the 3.0 version
+in March 2012.
+
+License of L-BFGS-B (Fortran code)
+==================================
+
+The version included here (in lbfgsb.f) is 3.0 (released April 25, 2011). It was
+written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal . It
+carries the following condition for use:
+
+  """
+  This software is freely available, but we expect that all publications
+  describing work using this software, or all commercial products using it,
+  quote at least one of the references given below. This software is released
+  under the BSD License.
+  
+  References
+    * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
+      Constrained Optimization, (1995), SIAM Journal on Scientific and
+      Statistical Computing, 16, 5, pp. 1190-1208.
+    * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
+      FORTRAN routines for large scale bound constrained optimization (1997),
+      ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
+    * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
+      FORTRAN routines for large scale bound constrained optimization (2011),
+      ACM Transactions on Mathematical Software, 38, 1.
+  """
+
+The Python wrapper
+==================
+
+This code uses F2PY (http://cens.ioc.ee/projects/f2py2e/) to generate
+the wrapper around the Fortran code.
+
+The Python code and wrapper are copyrighted 2004 by David M. Cooke
+.
+
+Installation
+============
+
+Make sure you have F2PY, scipy_distutils, and a BLAS library that
+scipy_distutils can find. Then,
+
+$ python setup.py build
+$ python setup.py install
+
+and you're done.
+
+Example usage
+=============
+
+An example of the usage is given at the bottom of the lbfgsb.py file.
+Run it with 'python lbfgsb.py'.
+
+License for the Python wrapper
+==============================
+
+Copyright (c) 2004 David M. Cooke 
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/__init__.py
new file mode 100644
index 00000000..32690285
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/__init__.py
@@ -0,0 +1,441 @@
+"""
+=====================================================
+Optimization and root finding (:mod:`scipy.optimize`)
+=====================================================
+
+.. currentmodule:: scipy.optimize
+
+SciPy ``optimize`` provides functions for minimizing (or maximizing)
+objective functions, possibly subject to constraints. It includes
+solvers for nonlinear problems (with support for both local and global
+optimization algorithms), linear programing, constrained
+and nonlinear least-squares, root finding, and curve fitting.
+
+Common functions and objects, shared across different solvers, are:
+
+.. autosummary::
+   :toctree: generated/
+
+   show_options - Show specific options optimization solvers.
+   OptimizeResult - The optimization result returned by some optimizers.
+   OptimizeWarning - The optimization encountered problems.
+
+
+Optimization
+============
+
+Scalar functions optimization
+-----------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   minimize_scalar - Interface for minimizers of univariate functions
+
+The `minimize_scalar` function supports the following methods:
+
+.. toctree::
+
+   optimize.minimize_scalar-brent
+   optimize.minimize_scalar-bounded
+   optimize.minimize_scalar-golden
+
+Local (multivariate) optimization
+---------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   minimize - Interface for minimizers of multivariate functions.
+
+The `minimize` function supports the following methods:
+
+.. toctree::
+
+   optimize.minimize-neldermead
+   optimize.minimize-powell
+   optimize.minimize-cg
+   optimize.minimize-bfgs
+   optimize.minimize-newtoncg
+   optimize.minimize-lbfgsb
+   optimize.minimize-tnc
+   optimize.minimize-cobyla
+   optimize.minimize-slsqp
+   optimize.minimize-trustconstr
+   optimize.minimize-dogleg
+   optimize.minimize-trustncg
+   optimize.minimize-trustkrylov
+   optimize.minimize-trustexact
+
+Constraints are passed to `minimize` function as a single object or
+as a list of objects from the following classes:
+
+.. autosummary::
+   :toctree: generated/
+
+   NonlinearConstraint - Class defining general nonlinear constraints.
+   LinearConstraint - Class defining general linear constraints.
+
+Simple bound constraints are handled separately and there is a special class
+for them:
+
+.. autosummary::
+   :toctree: generated/
+
+   Bounds - Bound constraints.
+
+Quasi-Newton strategies implementing `HessianUpdateStrategy`
+interface can be used to approximate the Hessian in `minimize`
+function (available only for the 'trust-constr' method). Available
+quasi-Newton methods implementing this interface are:
+
+.. autosummary::
+   :toctree: generated/
+
+   BFGS - Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
+   SR1 - Symmetric-rank-1 Hessian update strategy.
+
+Global optimization
+-------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   basinhopping - Basinhopping stochastic optimizer.
+   brute - Brute force searching optimizer.
+   differential_evolution - Stochastic optimizer using differential evolution.
+
+   shgo - Simplicial homology global optimizer.
+   dual_annealing - Dual annealing stochastic optimizer.
+   direct - DIRECT (Dividing Rectangles) optimizer.
+
+Least-squares and curve fitting
+===============================
+
+Nonlinear least-squares
+-----------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   least_squares - Solve a nonlinear least-squares problem with bounds on the variables.
+
+Linear least-squares
+--------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   nnls - Linear least-squares problem with non-negativity constraint.
+   lsq_linear - Linear least-squares problem with bound constraints.
+
+Curve fitting
+-------------
+
+.. autosummary::
+   :toctree: generated/
+
+   curve_fit -- Fit curve to a set of points.
+
+Root finding
+============
+
+Scalar functions
+----------------
+.. autosummary::
+   :toctree: generated/
+
+   root_scalar - Unified interface for nonlinear solvers of scalar functions.
+   brentq - quadratic interpolation Brent method.
+   brenth - Brent method, modified by Harris with hyperbolic extrapolation.
+   ridder - Ridder's method.
+   bisect - Bisection method.
+   newton - Newton's method (also Secant and Halley's methods).
+   toms748 - Alefeld, Potra & Shi Algorithm 748.
+   RootResults - The root finding result returned by some root finders.
+
+The `root_scalar` function supports the following methods:
+
+.. toctree::
+
+   optimize.root_scalar-brentq
+   optimize.root_scalar-brenth
+   optimize.root_scalar-bisect
+   optimize.root_scalar-ridder
+   optimize.root_scalar-newton
+   optimize.root_scalar-toms748
+   optimize.root_scalar-secant
+   optimize.root_scalar-halley
+
+
+
+The table below lists situations and appropriate methods, along with
+*asymptotic* convergence rates per iteration (and per function evaluation)
+for successful convergence to a simple root(*).
+Bisection is the slowest of them all, adding one bit of accuracy for each
+function evaluation, but is guaranteed to converge.
+The other bracketing methods all (eventually) increase the number of accurate
+bits by about 50% for every function evaluation.
+The derivative-based methods, all built on `newton`, can converge quite quickly
+if the initial value is close to the root.  They can also be applied to
+functions defined on (a subset of) the complex plane.
+
++-------------+----------+----------+-----------+-------------+-------------+----------------+
+| Domain of f | Bracket? |    Derivatives?      | Solvers     |        Convergence           |
++             +          +----------+-----------+             +-------------+----------------+
+|             |          | `fprime` | `fprime2` |             | Guaranteed? |  Rate(s)(*)    |
++=============+==========+==========+===========+=============+=============+================+
+| `R`         | Yes      | N/A      | N/A       | - bisection | - Yes       | - 1 "Linear"   |
+|             |          |          |           | - brentq    | - Yes       | - >=1, <= 1.62 |
+|             |          |          |           | - brenth    | - Yes       | - >=1, <= 1.62 |
+|             |          |          |           | - ridder    | - Yes       | - 2.0 (1.41)   |
+|             |          |          |           | - toms748   | - Yes       | - 2.7 (1.65)   |
++-------------+----------+----------+-----------+-------------+-------------+----------------+
+| `R` or `C`  | No       | No       | No        | secant      | No          | 1.62 (1.62)    |
++-------------+----------+----------+-----------+-------------+-------------+----------------+
+| `R` or `C`  | No       | Yes      | No        | newton      | No          | 2.00 (1.41)    |
++-------------+----------+----------+-----------+-------------+-------------+----------------+
+| `R` or `C`  | No       | Yes      | Yes       | halley      | No          | 3.00 (1.44)    |
++-------------+----------+----------+-----------+-------------+-------------+----------------+
+
+.. seealso::
+
+   `scipy.optimize.cython_optimize` -- Typed Cython versions of zeros functions
+
+Fixed point finding:
+
+.. autosummary::
+   :toctree: generated/
+
+   fixed_point - Single-variable fixed-point solver.
+
+Multidimensional
+----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   root - Unified interface for nonlinear solvers of multivariate functions.
+
+The `root` function supports the following methods:
+
+.. toctree::
+
+   optimize.root-hybr
+   optimize.root-lm
+   optimize.root-broyden1
+   optimize.root-broyden2
+   optimize.root-anderson
+   optimize.root-linearmixing
+   optimize.root-diagbroyden
+   optimize.root-excitingmixing
+   optimize.root-krylov
+   optimize.root-dfsane
+
+Linear programming / MILP
+=========================
+
+.. autosummary::
+   :toctree: generated/
+
+   milp -- Mixed integer linear programming.
+   linprog -- Unified interface for minimizers of linear programming problems.
+
+The `linprog` function supports the following methods:
+
+.. toctree::
+
+   optimize.linprog-simplex
+   optimize.linprog-interior-point
+   optimize.linprog-revised_simplex
+   optimize.linprog-highs-ipm
+   optimize.linprog-highs-ds
+   optimize.linprog-highs
+
+The simplex, interior-point, and revised simplex methods support callback
+functions, such as:
+
+.. autosummary::
+   :toctree: generated/
+
+   linprog_verbose_callback -- Sample callback function for linprog (simplex).
+
+Assignment problems
+===================
+
+.. autosummary::
+   :toctree: generated/
+
+   linear_sum_assignment -- Solves the linear-sum assignment problem.
+   quadratic_assignment -- Solves the quadratic assignment problem.
+
+The `quadratic_assignment` function supports the following methods:
+
+.. toctree::
+
+   optimize.qap-faq
+   optimize.qap-2opt
+
+Utilities
+=========
+
+Finite-difference approximation
+-------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   approx_fprime - Approximate the gradient of a scalar function.
+   check_grad - Check the supplied derivative using finite differences.
+
+
+Line search
+-----------
+
+.. autosummary::
+   :toctree: generated/
+
+   bracket - Bracket a minimum, given two starting points.
+   line_search - Return a step that satisfies the strong Wolfe conditions.
+
+Hessian approximation
+---------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   LbfgsInvHessProduct - Linear operator for L-BFGS approximate inverse Hessian.
+   HessianUpdateStrategy - Interface for implementing Hessian update strategies
+
+Benchmark problems
+------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   rosen - The Rosenbrock function.
+   rosen_der - The derivative of the Rosenbrock function.
+   rosen_hess - The Hessian matrix of the Rosenbrock function.
+   rosen_hess_prod - Product of the Rosenbrock Hessian with a vector.
+
+Legacy functions
+================
+
+The functions below are not recommended for use in new scripts;
+all of these methods are accessible via a newer, more consistent
+interfaces, provided by the interfaces above.
+
+Optimization
+------------
+
+General-purpose multivariate methods:
+
+.. autosummary::
+   :toctree: generated/
+
+   fmin - Nelder-Mead Simplex algorithm.
+   fmin_powell - Powell's (modified) level set method.
+   fmin_cg - Non-linear (Polak-Ribiere) conjugate gradient algorithm.
+   fmin_bfgs - Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno).
+   fmin_ncg - Line-search Newton Conjugate Gradient.
+
+Constrained multivariate methods:
+
+.. autosummary::
+   :toctree: generated/
+
+   fmin_l_bfgs_b - Zhu, Byrd, and Nocedal's constrained optimizer.
+   fmin_tnc - Truncated Newton code.
+   fmin_cobyla - Constrained optimization by linear approximation.
+   fmin_slsqp - Minimization using sequential least-squares programming.
+
+Univariate (scalar) minimization methods:
+
+.. autosummary::
+   :toctree: generated/
+
+   fminbound - Bounded minimization of a scalar function.
+   brent - 1-D function minimization using Brent method.
+   golden - 1-D function minimization using Golden Section method.
+
+Least-squares
+-------------
+
+.. autosummary::
+   :toctree: generated/
+
+   leastsq - Minimize the sum of squares of M equations in N unknowns.
+
+Root finding
+------------
+
+General nonlinear solvers:
+
+.. autosummary::
+   :toctree: generated/
+
+   fsolve - Non-linear multivariable equation solver.
+   broyden1 - Broyden's first method.
+   broyden2 - Broyden's second method.
+
+Large-scale nonlinear solvers:
+
+.. autosummary::
+   :toctree: generated/
+
+   newton_krylov
+   anderson
+
+   BroydenFirst
+   InverseJacobian
+   KrylovJacobian
+
+Simple iteration solvers:
+
+.. autosummary::
+   :toctree: generated/
+
+   excitingmixing
+   linearmixing
+   diagbroyden
+
+"""
+
+from ._optimize import *
+from ._minimize import *
+from ._root import *
+from ._root_scalar import *
+from ._minpack_py import *
+from ._zeros_py import *
+from ._lbfgsb_py import fmin_l_bfgs_b, LbfgsInvHessProduct
+from ._tnc import fmin_tnc
+from ._cobyla_py import fmin_cobyla
+from ._nonlin import *
+from ._slsqp_py import fmin_slsqp
+from ._nnls import nnls
+from ._basinhopping import basinhopping
+from ._linprog import linprog, linprog_verbose_callback
+from ._lsap import linear_sum_assignment
+from ._differentialevolution import differential_evolution
+from ._lsq import least_squares, lsq_linear
+from ._constraints import (NonlinearConstraint,
+                           LinearConstraint,
+                           Bounds)
+from ._hessian_update_strategy import HessianUpdateStrategy, BFGS, SR1
+from ._shgo import shgo
+from ._dual_annealing import dual_annealing
+from ._qap import quadratic_assignment
+from ._direct_py import direct
+from ._milp import milp
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import (
+    cobyla, lbfgsb, linesearch, minpack, minpack2, moduleTNC, nonlin, optimize,
+    slsqp, tnc, zeros
+)
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/__nnls.pyi b/__packaged__/coreml/.python_dependencies/scipy/optimize/__nnls.pyi
new file mode 100644
index 00000000..b26243b0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/__nnls.pyi
@@ -0,0 +1,21 @@
+from __future__ import annotations
+from typing import TYPE_CHECKING, Tuple
+import numpy as np
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+
+def nnls(
+        a: npt.ArrayLike,
+        mda: int,
+        m: int,
+        n: int,
+        b: npt.ArrayLike,
+        x: npt.ArrayLike,
+        rnorm: float,
+        w: float,
+        zz: float,
+        index_bn: int,
+        mode: int,
+        maxiter: int
+) -> Tuple[npt.ArrayLike, float, int]: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_basinhopping.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_basinhopping.py
new file mode 100644
index 00000000..4e14cd10
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_basinhopping.py
@@ -0,0 +1,741 @@
+"""
+basinhopping: The basinhopping global optimization algorithm
+"""
+import numpy as np
+import math
+import scipy.optimize
+from scipy._lib._util import check_random_state
+
+__all__ = ['basinhopping']
+
+
+class Storage:
+    """
+    Class used to store the lowest energy structure
+    """
+    def __init__(self, minres):
+        self._add(minres)
+
+    def _add(self, minres):
+        self.minres = minres
+        self.minres.x = np.copy(minres.x)
+
+    def update(self, minres):
+        if minres.fun < self.minres.fun:
+            self._add(minres)
+            return True
+        else:
+            return False
+
+    def get_lowest(self):
+        return self.minres
+
+
+class BasinHoppingRunner:
+    """This class implements the core of the basinhopping algorithm.
+
+    x0 : ndarray
+        The starting coordinates.
+    minimizer : callable
+        The local minimizer, with signature ``result = minimizer(x)``.
+        The return value is an `optimize.OptimizeResult` object.
+    step_taking : callable
+        This function displaces the coordinates randomly. Signature should
+        be ``x_new = step_taking(x)``. Note that `x` may be modified in-place.
+    accept_tests : list of callables
+        Each test is passed the kwargs `f_new`, `x_new`, `f_old` and
+        `x_old`. These tests will be used to judge whether or not to accept
+        the step. The acceptable return values are True, False, or ``"force
+        accept"``. If any of the tests return False then the step is rejected.
+        If ``"force accept"``, then this will override any other tests in
+        order to accept the step. This can be used, for example, to forcefully
+        escape from a local minimum that ``basinhopping`` is trapped in.
+    disp : bool, optional
+        Display status messages.
+
+    """
+    def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):
+        self.x = np.copy(x0)
+        self.minimizer = minimizer
+        self.step_taking = step_taking
+        self.accept_tests = accept_tests
+        self.disp = disp
+
+        self.nstep = 0
+
+        # initialize return object
+        self.res = scipy.optimize.OptimizeResult()
+        self.res.minimization_failures = 0
+
+        # do initial minimization
+        minres = minimizer(self.x)
+        if not minres.success:
+            self.res.minimization_failures += 1
+            if self.disp:
+                print("warning: basinhopping: local minimization failure")
+        self.x = np.copy(minres.x)
+        self.energy = minres.fun
+        if self.disp:
+            print("basinhopping step %d: f %g" % (self.nstep, self.energy))
+
+        # initialize storage class
+        self.storage = Storage(minres)
+
+        if hasattr(minres, "nfev"):
+            self.res.nfev = minres.nfev
+        if hasattr(minres, "njev"):
+            self.res.njev = minres.njev
+        if hasattr(minres, "nhev"):
+            self.res.nhev = minres.nhev
+
+    def _monte_carlo_step(self):
+        """Do one Monte Carlo iteration
+
+        Randomly displace the coordinates, minimize, and decide whether
+        or not to accept the new coordinates.
+        """
+        # Take a random step.  Make a copy of x because the step_taking
+        # algorithm might change x in place
+        x_after_step = np.copy(self.x)
+        x_after_step = self.step_taking(x_after_step)
+
+        # do a local minimization
+        minres = self.minimizer(x_after_step)
+        x_after_quench = minres.x
+        energy_after_quench = minres.fun
+        if not minres.success:
+            self.res.minimization_failures += 1
+            if self.disp:
+                print("warning: basinhopping: local minimization failure")
+
+        if hasattr(minres, "nfev"):
+            self.res.nfev += minres.nfev
+        if hasattr(minres, "njev"):
+            self.res.njev += minres.njev
+        if hasattr(minres, "nhev"):
+            self.res.nhev += minres.nhev
+
+        # accept the move based on self.accept_tests. If any test is False,
+        # then reject the step.  If any test returns the special string
+        # 'force accept', then accept the step regardless. This can be used
+        # to forcefully escape from a local minimum if normal basin hopping
+        # steps are not sufficient.
+        accept = True
+        for test in self.accept_tests:
+            testres = test(f_new=energy_after_quench, x_new=x_after_quench,
+                           f_old=self.energy, x_old=self.x)
+            if testres == 'force accept':
+                accept = True
+                break
+            elif testres is None:
+                raise ValueError("accept_tests must return True, False, or "
+                                 "'force accept'")
+            elif not testres:
+                accept = False
+
+        # Report the result of the acceptance test to the take step class.
+        # This is for adaptive step taking
+        if hasattr(self.step_taking, "report"):
+            self.step_taking.report(accept, f_new=energy_after_quench,
+                                    x_new=x_after_quench, f_old=self.energy,
+                                    x_old=self.x)
+
+        return accept, minres
+
+    def one_cycle(self):
+        """Do one cycle of the basinhopping algorithm
+        """
+        self.nstep += 1
+        new_global_min = False
+
+        accept, minres = self._monte_carlo_step()
+
+        if accept:
+            self.energy = minres.fun
+            self.x = np.copy(minres.x)
+            new_global_min = self.storage.update(minres)
+
+        # print some information
+        if self.disp:
+            self.print_report(minres.fun, accept)
+            if new_global_min:
+                print("found new global minimum on step %d with function"
+                      " value %g" % (self.nstep, self.energy))
+
+        # save some variables as BasinHoppingRunner attributes
+        self.xtrial = minres.x
+        self.energy_trial = minres.fun
+        self.accept = accept
+
+        return new_global_min
+
+    def print_report(self, energy_trial, accept):
+        """print a status update"""
+        minres = self.storage.get_lowest()
+        print("basinhopping step %d: f %g trial_f %g accepted %d "
+              " lowest_f %g" % (self.nstep, self.energy, energy_trial,
+                                accept, minres.fun))
+
+
+class AdaptiveStepsize:
+    """
+    Class to implement adaptive stepsize.
+
+    This class wraps the step taking class and modifies the stepsize to
+    ensure the true acceptance rate is as close as possible to the target.
+
+    Parameters
+    ----------
+    takestep : callable
+        The step taking routine.  Must contain modifiable attribute
+        takestep.stepsize
+    accept_rate : float, optional
+        The target step acceptance rate
+    interval : int, optional
+        Interval for how often to update the stepsize
+    factor : float, optional
+        The step size is multiplied or divided by this factor upon each
+        update.
+    verbose : bool, optional
+        Print information about each update
+
+    """
+    def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9,
+                 verbose=True):
+        self.takestep = takestep
+        self.target_accept_rate = accept_rate
+        self.interval = interval
+        self.factor = factor
+        self.verbose = verbose
+
+        self.nstep = 0
+        self.nstep_tot = 0
+        self.naccept = 0
+
+    def __call__(self, x):
+        return self.take_step(x)
+
+    def _adjust_step_size(self):
+        old_stepsize = self.takestep.stepsize
+        accept_rate = float(self.naccept) / self.nstep
+        if accept_rate > self.target_accept_rate:
+            # We're accepting too many steps. This generally means we're
+            # trapped in a basin. Take bigger steps.
+            self.takestep.stepsize /= self.factor
+        else:
+            # We're not accepting enough steps. Take smaller steps.
+            self.takestep.stepsize *= self.factor
+        if self.verbose:
+            print("adaptive stepsize: acceptance rate %f target %f new "
+                  "stepsize %g old stepsize %g" % (accept_rate,
+                  self.target_accept_rate, self.takestep.stepsize,
+                  old_stepsize))
+
+    def take_step(self, x):
+        self.nstep += 1
+        self.nstep_tot += 1
+        if self.nstep % self.interval == 0:
+            self._adjust_step_size()
+        return self.takestep(x)
+
+    def report(self, accept, **kwargs):
+        "called by basinhopping to report the result of the step"
+        if accept:
+            self.naccept += 1
+
+
+class RandomDisplacement:
+    """Add a random displacement of maximum size `stepsize` to each coordinate.
+
+    Calling this updates `x` in-place.
+
+    Parameters
+    ----------
+    stepsize : float, optional
+        Maximum stepsize in any dimension
+    random_gen : {None, int, `numpy.random.Generator`,
+                  `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    """
+
+    def __init__(self, stepsize=0.5, random_gen=None):
+        self.stepsize = stepsize
+        self.random_gen = check_random_state(random_gen)
+
+    def __call__(self, x):
+        x += self.random_gen.uniform(-self.stepsize, self.stepsize,
+                                     np.shape(x))
+        return x
+
+
+class MinimizerWrapper:
+    """
+    wrap a minimizer function as a minimizer class
+    """
+    def __init__(self, minimizer, func=None, **kwargs):
+        self.minimizer = minimizer
+        self.func = func
+        self.kwargs = kwargs
+
+    def __call__(self, x0):
+        if self.func is None:
+            return self.minimizer(x0, **self.kwargs)
+        else:
+            return self.minimizer(self.func, x0, **self.kwargs)
+
+
+class Metropolis:
+    """Metropolis acceptance criterion.
+
+    Parameters
+    ----------
+    T : float
+        The "temperature" parameter for the accept or reject criterion.
+    random_gen : {None, int, `numpy.random.Generator`,
+                  `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        Random number generator used for acceptance test.
+
+    """
+
+    def __init__(self, T, random_gen=None):
+        # Avoid ZeroDivisionError since "MBH can be regarded as a special case
+        # of the BH framework with the Metropolis criterion, where temperature
+        # T = 0." (Reject all steps that increase energy.)
+        self.beta = 1.0 / T if T != 0 else float('inf')
+        self.random_gen = check_random_state(random_gen)
+
+    def accept_reject(self, energy_new, energy_old):
+        """
+        If new energy is lower than old, it will always be accepted.
+        If new is higher than old, there is a chance it will be accepted,
+        less likely for larger differences.
+        """
+        with np.errstate(invalid='ignore'):
+            # The energy values being fed to Metropolis are 1-length arrays, and if
+            # they are equal, their difference is 0, which gets multiplied by beta,
+            # which is inf, and array([0]) * float('inf') causes
+            #
+            # RuntimeWarning: invalid value encountered in multiply
+            #
+            # Ignore this warning so when the algorithm is on a flat plane, it always
+            # accepts the step, to try to move off the plane.
+            prod = -(energy_new - energy_old) * self.beta
+            w = math.exp(min(0, prod))
+
+        rand = self.random_gen.uniform()
+        return w >= rand
+
+    def __call__(self, **kwargs):
+        """
+        f_new and f_old are mandatory in kwargs
+        """
+        return bool(self.accept_reject(kwargs["f_new"],
+                    kwargs["f_old"]))
+
+
+def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,
+                 minimizer_kwargs=None, take_step=None, accept_test=None,
+                 callback=None, interval=50, disp=False, niter_success=None,
+                 seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9):
+    """Find the global minimum of a function using the basin-hopping algorithm.
+
+    Basin-hopping is a two-phase method that combines a global stepping
+    algorithm with local minimization at each step. Designed to mimic
+    the natural process of energy minimization of clusters of atoms, it works
+    well for similar problems with "funnel-like, but rugged" energy landscapes
+    [5]_.
+
+    As the step-taking, step acceptance, and minimization methods are all
+    customizable, this function can also be used to implement other two-phase
+    methods.
+
+    Parameters
+    ----------
+    func : callable ``f(x, *args)``
+        Function to be optimized.  ``args`` can be passed as an optional item
+        in the dict `minimizer_kwargs`
+    x0 : array_like
+        Initial guess.
+    niter : integer, optional
+        The number of basin-hopping iterations. There will be a total of
+        ``niter + 1`` runs of the local minimizer.
+    T : float, optional
+        The "temperature" parameter for the acceptance or rejection criterion.
+        Higher "temperatures" mean that larger jumps in function value will be
+        accepted.  For best results `T` should be comparable to the
+        separation (in function value) between local minima.
+    stepsize : float, optional
+        Maximum step size for use in the random displacement.
+    minimizer_kwargs : dict, optional
+        Extra keyword arguments to be passed to the local minimizer
+        `scipy.optimize.minimize` Some important options could be:
+
+            method : str
+                The minimization method (e.g. ``"L-BFGS-B"``)
+            args : tuple
+                Extra arguments passed to the objective function (`func`) and
+                its derivatives (Jacobian, Hessian).
+
+    take_step : callable ``take_step(x)``, optional
+        Replace the default step-taking routine with this routine. The default
+        step-taking routine is a random displacement of the coordinates, but
+        other step-taking algorithms may be better for some systems.
+        `take_step` can optionally have the attribute ``take_step.stepsize``.
+        If this attribute exists, then `basinhopping` will adjust
+        ``take_step.stepsize`` in order to try to optimize the global minimum
+        search.
+    accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional
+        Define a test which will be used to judge whether to accept the
+        step. This will be used in addition to the Metropolis test based on
+        "temperature" `T`. The acceptable return values are True,
+        False, or ``"force accept"``. If any of the tests return False
+        then the step is rejected. If the latter, then this will override any
+        other tests in order to accept the step. This can be used, for example,
+        to forcefully escape from a local minimum that `basinhopping` is
+        trapped in.
+    callback : callable, ``callback(x, f, accept)``, optional
+        A callback function which will be called for all minima found. ``x``
+        and ``f`` are the coordinates and function value of the trial minimum,
+        and ``accept`` is whether that minimum was accepted. This can
+        be used, for example, to save the lowest N minima found. Also,
+        `callback` can be used to specify a user defined stop criterion by
+        optionally returning True to stop the `basinhopping` routine.
+    interval : integer, optional
+        interval for how often to update the `stepsize`
+    disp : bool, optional
+        Set to True to print status messages
+    niter_success : integer, optional
+        Stop the run if the global minimum candidate remains the same for this
+        number of iterations.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        Specify `seed` for repeatable minimizations. The random numbers
+        generated with this seed only affect the default Metropolis
+        `accept_test` and the default `take_step`. If you supply your own
+        `take_step` and `accept_test`, and these functions use random
+        number generation, then those functions are responsible for the state
+        of their random number generator.
+    target_accept_rate : float, optional
+        The target acceptance rate that is used to adjust the `stepsize`.
+        If the current acceptance rate is greater than the target,
+        then the `stepsize` is increased. Otherwise, it is decreased.
+        Range is (0, 1). Default is 0.5.
+
+        .. versionadded:: 1.8.0
+
+    stepwise_factor : float, optional
+        The `stepsize` is multiplied or divided by this stepwise factor upon
+        each update. Range is (0, 1). Default is 0.9.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a `OptimizeResult` object.
+        Important attributes are: ``x`` the solution array, ``fun`` the value
+        of the function at the solution, and ``message`` which describes the
+        cause of the termination. The ``OptimizeResult`` object returned by the
+        selected minimizer at the lowest minimum is also contained within this
+        object and can be accessed through the ``lowest_optimization_result``
+        attribute.  See `OptimizeResult` for a description of other attributes.
+
+    See Also
+    --------
+    minimize :
+        The local minimization function called once for each basinhopping step.
+        `minimizer_kwargs` is passed to this routine.
+
+    Notes
+    -----
+    Basin-hopping is a stochastic algorithm which attempts to find the global
+    minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_
+    [4]_. The algorithm in its current form was described by David Wales and
+    Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.
+
+    The algorithm is iterative with each cycle composed of the following
+    features
+
+    1) random perturbation of the coordinates
+
+    2) local minimization
+
+    3) accept or reject the new coordinates based on the minimized function
+       value
+
+    The acceptance test used here is the Metropolis criterion of standard Monte
+    Carlo algorithms, although there are many other possibilities [3]_.
+
+    This global minimization method has been shown to be extremely efficient
+    for a wide variety of problems in physics and chemistry. It is
+    particularly useful when the function has many minima separated by large
+    barriers. See the `Cambridge Cluster Database
+    `_ for databases of molecular
+    systems that have been optimized primarily using basin-hopping. This
+    database includes minimization problems exceeding 300 degrees of freedom.
+
+    See the free software program `GMIN `_
+    for a Fortran implementation of basin-hopping. This implementation has many
+    variations of the procedure described above, including more
+    advanced step taking algorithms and alternate acceptance criterion.
+
+    For stochastic global optimization there is no way to determine if the true
+    global minimum has actually been found. Instead, as a consistency check,
+    the algorithm can be run from a number of different random starting points
+    to ensure the lowest minimum found in each example has converged to the
+    global minimum. For this reason, `basinhopping` will by default simply
+    run for the number of iterations `niter` and return the lowest minimum
+    found. It is left to the user to ensure that this is in fact the global
+    minimum.
+
+    Choosing `stepsize`:  This is a crucial parameter in `basinhopping` and
+    depends on the problem being solved. The step is chosen uniformly in the
+    region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it
+    should be comparable to the typical separation (in argument values) between
+    local minima of the function being optimized. `basinhopping` will, by
+    default, adjust `stepsize` to find an optimal value, but this may take
+    many iterations. You will get quicker results if you set a sensible
+    initial value for ``stepsize``.
+
+    Choosing `T`: The parameter `T` is the "temperature" used in the
+    Metropolis criterion. Basinhopping steps are always accepted if
+    ``func(xnew) < func(xold)``. Otherwise, they are accepted with
+    probability::
+
+        exp( -(func(xnew) - func(xold)) / T )
+
+    So, for best results, `T` should to be comparable to the typical
+    difference (in function values) between local minima. (The height of
+    "walls" between local minima is irrelevant.)
+
+    If `T` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all
+    steps that increase energy are rejected.
+
+    .. versionadded:: 0.12.0
+
+    References
+    ----------
+    .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press,
+        Cambridge, UK.
+    .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and
+        the Lowest Energy Structures of Lennard-Jones Clusters Containing up to
+        110 Atoms.  Journal of Physical Chemistry A, 1997, 101, 5111.
+    .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the
+        multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA,
+        1987, 84, 6611.
+    .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters,
+        crystals, and biomolecules, Science, 1999, 285, 1368.
+    .. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as
+        a General and Versatile Optimization Framework for the Characterization
+        of Biological Macromolecules, Advances in Artificial Intelligence,
+        Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832`
+
+    Examples
+    --------
+    The following example is a 1-D minimization problem, with many
+    local minima superimposed on a parabola.
+
+    >>> import numpy as np
+    >>> from scipy.optimize import basinhopping
+    >>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x
+    >>> x0 = [1.]
+
+    Basinhopping, internally, uses a local minimization algorithm. We will use
+    the parameter `minimizer_kwargs` to tell basinhopping which algorithm to
+    use and how to set up that minimizer. This parameter will be passed to
+    `scipy.optimize.minimize`.
+
+    >>> minimizer_kwargs = {"method": "BFGS"}
+    >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs,
+    ...                    niter=200)
+    >>> print("global minimum: x = %.4f, f(x) = %.4f" % (ret.x, ret.fun))
+    global minimum: x = -0.1951, f(x) = -1.0009
+
+    Next consider a 2-D minimization problem. Also, this time, we
+    will use gradient information to significantly speed up the search.
+
+    >>> def func2d(x):
+    ...     f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] +
+    ...                                                            0.2) * x[0]
+    ...     df = np.zeros(2)
+    ...     df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
+    ...     df[1] = 2. * x[1] + 0.2
+    ...     return f, df
+
+    We'll also use a different local minimization algorithm. Also, we must tell
+    the minimizer that our function returns both energy and gradient (Jacobian).
+
+    >>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True}
+    >>> x0 = [1.0, 1.0]
+    >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
+    ...                    niter=200)
+    >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
+    ...                                                           ret.x[1],
+    ...                                                           ret.fun))
+    global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
+
+    Here is an example using a custom step-taking routine. Imagine you want
+    the first coordinate to take larger steps than the rest of the coordinates.
+    This can be implemented like so:
+
+    >>> class MyTakeStep:
+    ...    def __init__(self, stepsize=0.5):
+    ...        self.stepsize = stepsize
+    ...        self.rng = np.random.default_rng()
+    ...    def __call__(self, x):
+    ...        s = self.stepsize
+    ...        x[0] += self.rng.uniform(-2.*s, 2.*s)
+    ...        x[1:] += self.rng.uniform(-s, s, x[1:].shape)
+    ...        return x
+
+    Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude
+    of `stepsize` to optimize the search. We'll use the same 2-D function as
+    before
+
+    >>> mytakestep = MyTakeStep()
+    >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
+    ...                    niter=200, take_step=mytakestep)
+    >>> print("global minimum: x = [%.4f, %.4f], f(x) = %.4f" % (ret.x[0],
+    ...                                                           ret.x[1],
+    ...                                                           ret.fun))
+    global minimum: x = [-0.1951, -0.1000], f(x) = -1.0109
+
+    Now, let's do an example using a custom callback function which prints the
+    value of every minimum found
+
+    >>> def print_fun(x, f, accepted):
+    ...         print("at minimum %.4f accepted %d" % (f, int(accepted)))
+
+    We'll run it for only 10 basinhopping steps this time.
+
+    >>> rng = np.random.default_rng()
+    >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
+    ...                    niter=10, callback=print_fun, seed=rng)
+    at minimum 0.4159 accepted 1
+    at minimum -0.4317 accepted 1
+    at minimum -1.0109 accepted 1
+    at minimum -0.9073 accepted 1
+    at minimum -0.4317 accepted 0
+    at minimum -0.1021 accepted 1
+    at minimum -0.7425 accepted 1
+    at minimum -0.9073 accepted 1
+    at minimum -0.4317 accepted 0
+    at minimum -0.7425 accepted 1
+    at minimum -0.9073 accepted 1
+
+    The minimum at -1.0109 is actually the global minimum, found already on the
+    8th iteration.
+
+    """
+    if target_accept_rate <= 0. or target_accept_rate >= 1.:
+        raise ValueError('target_accept_rate has to be in range (0, 1)')
+    if stepwise_factor <= 0. or stepwise_factor >= 1.:
+        raise ValueError('stepwise_factor has to be in range (0, 1)')
+
+    x0 = np.array(x0)
+
+    # set up the np.random generator
+    rng = check_random_state(seed)
+
+    # set up minimizer
+    if minimizer_kwargs is None:
+        minimizer_kwargs = dict()
+    wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func,
+                                         **minimizer_kwargs)
+
+    # set up step-taking algorithm
+    if take_step is not None:
+        if not callable(take_step):
+            raise TypeError("take_step must be callable")
+        # if take_step.stepsize exists then use AdaptiveStepsize to control
+        # take_step.stepsize
+        if hasattr(take_step, "stepsize"):
+            take_step_wrapped = AdaptiveStepsize(
+                take_step, interval=interval,
+                accept_rate=target_accept_rate,
+                factor=stepwise_factor,
+                verbose=disp)
+        else:
+            take_step_wrapped = take_step
+    else:
+        # use default
+        displace = RandomDisplacement(stepsize=stepsize, random_gen=rng)
+        take_step_wrapped = AdaptiveStepsize(displace, interval=interval,
+                                             accept_rate=target_accept_rate,
+                                             factor=stepwise_factor,
+                                             verbose=disp)
+
+    # set up accept tests
+    accept_tests = []
+    if accept_test is not None:
+        if not callable(accept_test):
+            raise TypeError("accept_test must be callable")
+        accept_tests = [accept_test]
+
+    # use default
+    metropolis = Metropolis(T, random_gen=rng)
+    accept_tests.append(metropolis)
+
+    if niter_success is None:
+        niter_success = niter + 2
+
+    bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped,
+                            accept_tests, disp=disp)
+
+    # The wrapped minimizer is called once during construction of
+    # BasinHoppingRunner, so run the callback
+    if callable(callback):
+        callback(bh.storage.minres.x, bh.storage.minres.fun, True)
+
+    # start main iteration loop
+    count, i = 0, 0
+    message = ["requested number of basinhopping iterations completed"
+               " successfully"]
+    for i in range(niter):
+        new_global_min = bh.one_cycle()
+
+        if callable(callback):
+            # should we pass a copy of x?
+            val = callback(bh.xtrial, bh.energy_trial, bh.accept)
+            if val is not None:
+                if val:
+                    message = ["callback function requested stop early by"
+                               "returning True"]
+                    break
+
+        count += 1
+        if new_global_min:
+            count = 0
+        elif count > niter_success:
+            message = ["success condition satisfied"]
+            break
+
+    # prepare return object
+    res = bh.res
+    res.lowest_optimization_result = bh.storage.get_lowest()
+    res.x = np.copy(res.lowest_optimization_result.x)
+    res.fun = res.lowest_optimization_result.fun
+    res.message = message
+    res.nit = i + 1
+    res.success = res.lowest_optimization_result.success
+    return res
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_cobyla_py.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_cobyla_py.py
new file mode 100644
index 00000000..fefe85d5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_cobyla_py.py
@@ -0,0 +1,293 @@
+"""
+Interface to Constrained Optimization By Linear Approximation
+
+Functions
+---------
+.. autosummary::
+   :toctree: generated/
+
+    fmin_cobyla
+
+"""
+
+import functools
+from threading import RLock
+
+import numpy as np
+from scipy.optimize import _cobyla as cobyla
+from ._optimize import OptimizeResult, _check_unknown_options
+try:
+    from itertools import izip
+except ImportError:
+    izip = zip
+
+__all__ = ['fmin_cobyla']
+
+# Workarund as _cobyla.minimize is not threadsafe
+# due to an unknown f2py bug and can segfault,
+# see gh-9658.
+_module_lock = RLock()
+def synchronized(func):
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+        with _module_lock:
+            return func(*args, **kwargs)
+    return wrapper
+
+@synchronized
+def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
+                rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4,
+                *, callback=None):
+    """
+    Minimize a function using the Constrained Optimization By Linear
+    Approximation (COBYLA) method. This method wraps a FORTRAN
+    implementation of the algorithm.
+
+    Parameters
+    ----------
+    func : callable
+        Function to minimize. In the form func(x, \\*args).
+    x0 : ndarray
+        Initial guess.
+    cons : sequence
+        Constraint functions; must all be ``>=0`` (a single function
+        if only 1 constraint). Each function takes the parameters `x`
+        as its first argument, and it can return either a single number or
+        an array or list of numbers.
+    args : tuple, optional
+        Extra arguments to pass to function.
+    consargs : tuple, optional
+        Extra arguments to pass to constraint functions (default of None means
+        use same extra arguments as those passed to func).
+        Use ``()`` for no extra arguments.
+    rhobeg : float, optional
+        Reasonable initial changes to the variables.
+    rhoend : float, optional
+        Final accuracy in the optimization (not precisely guaranteed). This
+        is a lower bound on the size of the trust region.
+    disp : {0, 1, 2, 3}, optional
+        Controls the frequency of output; 0 implies no output.
+    maxfun : int, optional
+        Maximum number of function evaluations.
+    catol : float, optional
+        Absolute tolerance for constraint violations.
+    callback : callable, optional
+        Called after each iteration, as ``callback(x)``, where ``x`` is the
+        current parameter vector.
+
+    Returns
+    -------
+    x : ndarray
+        The argument that minimises `f`.
+
+    See also
+    --------
+    minimize: Interface to minimization algorithms for multivariate
+        functions. See the 'COBYLA' `method` in particular.
+
+    Notes
+    -----
+    This algorithm is based on linear approximations to the objective
+    function and each constraint. We briefly describe the algorithm.
+
+    Suppose the function is being minimized over k variables. At the
+    jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
+    an approximate solution x_j, and a radius RHO_j.
+    (i.e., linear plus a constant) approximations to the objective
+    function and constraint functions such that their function values
+    agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
+    This gives a linear program to solve (where the linear approximations
+    of the constraint functions are constrained to be non-negative).
+
+    However, the linear approximations are likely only good
+    approximations near the current simplex, so the linear program is
+    given the further requirement that the solution, which
+    will become x_(j+1), must be within RHO_j from x_j. RHO_j only
+    decreases, never increases. The initial RHO_j is rhobeg and the
+    final RHO_j is rhoend. In this way COBYLA's iterations behave
+    like a trust region algorithm.
+
+    Additionally, the linear program may be inconsistent, or the
+    approximation may give poor improvement. For details about
+    how these issues are resolved, as well as how the points v_i are
+    updated, refer to the source code or the references below.
+
+
+    References
+    ----------
+    Powell M.J.D. (1994), "A direct search optimization method that models
+    the objective and constraint functions by linear interpolation.", in
+    Advances in Optimization and Numerical Analysis, eds. S. Gomez and
+    J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
+
+    Powell M.J.D. (1998), "Direct search algorithms for optimization
+    calculations", Acta Numerica 7, 287-336
+
+    Powell M.J.D. (2007), "A view of algorithms for optimization without
+    derivatives", Cambridge University Technical Report DAMTP 2007/NA03
+
+
+    Examples
+    --------
+    Minimize the objective function f(x,y) = x*y subject
+    to the constraints x**2 + y**2 < 1 and y > 0::
+
+        >>> def objective(x):
+        ...     return x[0]*x[1]
+        ...
+        >>> def constr1(x):
+        ...     return 1 - (x[0]**2 + x[1]**2)
+        ...
+        >>> def constr2(x):
+        ...     return x[1]
+        ...
+        >>> from scipy.optimize import fmin_cobyla
+        >>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
+        array([-0.70710685,  0.70710671])
+
+    The exact solution is (-sqrt(2)/2, sqrt(2)/2).
+
+
+
+    """
+    err = "cons must be a sequence of callable functions or a single"\
+          " callable function."
+    try:
+        len(cons)
+    except TypeError as e:
+        if callable(cons):
+            cons = [cons]
+        else:
+            raise TypeError(err) from e
+    else:
+        for thisfunc in cons:
+            if not callable(thisfunc):
+                raise TypeError(err)
+
+    if consargs is None:
+        consargs = args
+
+    # build constraints
+    con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
+
+    # options
+    opts = {'rhobeg': rhobeg,
+            'tol': rhoend,
+            'disp': disp,
+            'maxiter': maxfun,
+            'catol': catol,
+            'callback': callback}
+
+    sol = _minimize_cobyla(func, x0, args, constraints=con,
+                           **opts)
+    if disp and not sol['success']:
+        print("COBYLA failed to find a solution: %s" % (sol.message,))
+    return sol['x']
+
+@synchronized
+def _minimize_cobyla(fun, x0, args=(), constraints=(),
+                     rhobeg=1.0, tol=1e-4, maxiter=1000,
+                     disp=False, catol=2e-4, callback=None,
+                     **unknown_options):
+    """
+    Minimize a scalar function of one or more variables using the
+    Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
+
+    Options
+    -------
+    rhobeg : float
+        Reasonable initial changes to the variables.
+    tol : float
+        Final accuracy in the optimization (not precisely guaranteed).
+        This is a lower bound on the size of the trust region.
+    disp : bool
+        Set to True to print convergence messages. If False,
+        `verbosity` is ignored as set to 0.
+    maxiter : int
+        Maximum number of function evaluations.
+    catol : float
+        Tolerance (absolute) for constraint violations
+
+    """
+    _check_unknown_options(unknown_options)
+    maxfun = maxiter
+    rhoend = tol
+    iprint = int(bool(disp))
+
+    # check constraints
+    if isinstance(constraints, dict):
+        constraints = (constraints, )
+
+    for ic, con in enumerate(constraints):
+        # check type
+        try:
+            ctype = con['type'].lower()
+        except KeyError as e:
+            raise KeyError('Constraint %d has no type defined.' % ic) from e
+        except TypeError as e:
+            raise TypeError('Constraints must be defined using a '
+                            'dictionary.') from e
+        except AttributeError as e:
+            raise TypeError("Constraint's type must be a string.") from e
+        else:
+            if ctype != 'ineq':
+                raise ValueError("Constraints of type '%s' not handled by "
+                                 "COBYLA." % con['type'])
+
+        # check function
+        if 'fun' not in con:
+            raise KeyError('Constraint %d has no function defined.' % ic)
+
+        # check extra arguments
+        if 'args' not in con:
+            con['args'] = ()
+
+    # m is the total number of constraint values
+    # it takes into account that some constraints may be vector-valued
+    cons_lengths = []
+    for c in constraints:
+        f = c['fun'](x0, *c['args'])
+        try:
+            cons_length = len(f)
+        except TypeError:
+            cons_length = 1
+        cons_lengths.append(cons_length)
+    m = sum(cons_lengths)
+
+    def calcfc(x, con):
+        f = fun(np.copy(x), *args)
+        i = 0
+        for size, c in izip(cons_lengths, constraints):
+            con[i: i + size] = c['fun'](x, *c['args'])
+            i += size
+        return f
+
+    def wrapped_callback(x):
+        if callback is not None:
+            callback(np.copy(x))
+
+    info = np.zeros(4, np.float64)
+    xopt, info = cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
+                                  rhoend=rhoend, iprint=iprint, maxfun=maxfun,
+                                  dinfo=info, callback=wrapped_callback)
+
+    if info[3] > catol:
+        # Check constraint violation
+        info[0] = 4
+
+    return OptimizeResult(x=xopt,
+                          status=int(info[0]),
+                          success=info[0] == 1,
+                          message={1: 'Optimization terminated successfully.',
+                                   2: 'Maximum number of function evaluations '
+                                      'has been exceeded.',
+                                   3: 'Rounding errors are becoming damaging '
+                                      'in COBYLA subroutine.',
+                                   4: 'Did not converge to a solution '
+                                      'satisfying the constraints. See '
+                                      '`maxcv` for magnitude of violation.',
+                                   5: 'NaN result encountered.'
+                                   }.get(info[0], 'Unknown exit status.'),
+                          nfev=int(info[1]),
+                          fun=info[2],
+                          maxcv=info[3])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_constraints.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_constraints.py
new file mode 100644
index 00000000..edd69b72
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_constraints.py
@@ -0,0 +1,570 @@
+"""Constraints definition for minimize."""
+import numpy as np
+from ._hessian_update_strategy import BFGS
+from ._differentiable_functions import (
+    VectorFunction, LinearVectorFunction, IdentityVectorFunction)
+from ._optimize import OptimizeWarning
+from warnings import warn, catch_warnings, simplefilter
+from numpy.testing import suppress_warnings
+from scipy.sparse import issparse
+
+
+def _arr_to_scalar(x):
+    # If x is a numpy array, return x.item().  This will
+    # fail if the array has more than one element.
+    return x.item() if isinstance(x, np.ndarray) else x
+
+
+class NonlinearConstraint:
+    """Nonlinear constraint on the variables.
+
+    The constraint has the general inequality form::
+
+        lb <= fun(x) <= ub
+
+    Here the vector of independent variables x is passed as ndarray of shape
+    (n,) and ``fun`` returns a vector with m components.
+
+    It is possible to use equal bounds to represent an equality constraint or
+    infinite bounds to represent a one-sided constraint.
+
+    Parameters
+    ----------
+    fun : callable
+        The function defining the constraint.
+        The signature is ``fun(x) -> array_like, shape (m,)``.
+    lb, ub : array_like
+        Lower and upper bounds on the constraint. Each array must have the
+        shape (m,) or be a scalar, in the latter case a bound will be the same
+        for all components of the constraint. Use ``np.inf`` with an
+        appropriate sign to specify a one-sided constraint.
+        Set components of `lb` and `ub` equal to represent an equality
+        constraint. Note that you can mix constraints of different types:
+        interval, one-sided or equality, by setting different components of
+        `lb` and `ub` as  necessary.
+    jac : {callable,  '2-point', '3-point', 'cs'}, optional
+        Method of computing the Jacobian matrix (an m-by-n matrix,
+        where element (i, j) is the partial derivative of f[i] with
+        respect to x[j]).  The keywords {'2-point', '3-point',
+        'cs'} select a finite difference scheme for the numerical estimation.
+        A callable must have the following signature:
+        ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``.
+        Default is '2-point'.
+    hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional
+        Method for computing the Hessian matrix. The keywords
+        {'2-point', '3-point', 'cs'} select a finite difference scheme for
+        numerical  estimation.  Alternatively, objects implementing
+        `HessianUpdateStrategy` interface can be used to approximate the
+        Hessian. Currently available implementations are:
+
+            - `BFGS` (default option)
+            - `SR1`
+
+        A callable must return the Hessian matrix of ``dot(fun, v)`` and
+        must have the following signature:
+        ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``.
+        Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers.
+    keep_feasible : array_like of bool, optional
+        Whether to keep the constraint components feasible throughout
+        iterations. A single value set this property for all components.
+        Default is False. Has no effect for equality constraints.
+    finite_diff_rel_step: None or array_like, optional
+        Relative step size for the finite difference approximation. Default is
+        None, which will select a reasonable value automatically depending
+        on a finite difference scheme.
+    finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional
+        Defines the sparsity structure of the Jacobian matrix for finite
+        difference estimation, its shape must be (m, n). If the Jacobian has
+        only few non-zero elements in *each* row, providing the sparsity
+        structure will greatly speed up the computations. A zero entry means
+        that a corresponding element in the Jacobian is identically zero.
+        If provided, forces the use of 'lsmr' trust-region solver.
+        If None (default) then dense differencing will be used.
+
+    Notes
+    -----
+    Finite difference schemes {'2-point', '3-point', 'cs'} may be used for
+    approximating either the Jacobian or the Hessian. We, however, do not allow
+    its use for approximating both simultaneously. Hence whenever the Jacobian
+    is estimated via finite-differences, we require the Hessian to be estimated
+    using one of the quasi-Newton strategies.
+
+    The scheme 'cs' is potentially the most accurate, but requires the function
+    to correctly handles complex inputs and be analytically continuable to the
+    complex plane. The scheme '3-point' is more accurate than '2-point' but
+    requires twice as many operations.
+
+    Examples
+    --------
+    Constrain ``x[0] < sin(x[1]) + 1.9``
+
+    >>> from scipy.optimize import NonlinearConstraint
+    >>> import numpy as np
+    >>> con = lambda x: x[0] - np.sin(x[1])
+    >>> nlc = NonlinearConstraint(con, -np.inf, 1.9)
+
+    """
+    def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(),
+                 keep_feasible=False, finite_diff_rel_step=None,
+                 finite_diff_jac_sparsity=None):
+        self.fun = fun
+        self.lb = lb
+        self.ub = ub
+        self.finite_diff_rel_step = finite_diff_rel_step
+        self.finite_diff_jac_sparsity = finite_diff_jac_sparsity
+        self.jac = jac
+        self.hess = hess
+        self.keep_feasible = keep_feasible
+
+
+class LinearConstraint:
+    """Linear constraint on the variables.
+
+    The constraint has the general inequality form::
+
+        lb <= A.dot(x) <= ub
+
+    Here the vector of independent variables x is passed as ndarray of shape
+    (n,) and the matrix A has shape (m, n).
+
+    It is possible to use equal bounds to represent an equality constraint or
+    infinite bounds to represent a one-sided constraint.
+
+    Parameters
+    ----------
+    A : {array_like, sparse matrix}, shape (m, n)
+        Matrix defining the constraint.
+    lb, ub : array_like, optional
+        Lower and upper limits on the constraint. Each array must have the
+        shape (m,) or be a scalar, in the latter case a bound will be the same
+        for all components of the constraint. Use ``np.inf`` with an
+        appropriate sign to specify a one-sided constraint.
+        Set components of `lb` and `ub` equal to represent an equality
+        constraint. Note that you can mix constraints of different types:
+        interval, one-sided or equality, by setting different components of
+        `lb` and `ub` as  necessary. Defaults to ``lb = -np.inf``
+        and ``ub = np.inf`` (no limits).
+    keep_feasible : array_like of bool, optional
+        Whether to keep the constraint components feasible throughout
+        iterations. A single value set this property for all components.
+        Default is False. Has no effect for equality constraints.
+    """
+    def _input_validation(self):
+        if self.A.ndim != 2:
+            message = "`A` must have exactly two dimensions."
+            raise ValueError(message)
+
+        try:
+            shape = self.A.shape[0:1]
+            self.lb = np.broadcast_to(self.lb, shape)
+            self.ub = np.broadcast_to(self.ub, shape)
+            self.keep_feasible = np.broadcast_to(self.keep_feasible, shape)
+        except ValueError:
+            message = ("`lb`, `ub`, and `keep_feasible` must be broadcastable "
+                       "to shape `A.shape[0:1]`")
+            raise ValueError(message)
+
+    def __init__(self, A, lb=-np.inf, ub=np.inf, keep_feasible=False):
+        if not issparse(A):
+            # In some cases, if the constraint is not valid, this emits a
+            # VisibleDeprecationWarning about ragged nested sequences
+            # before eventually causing an error. `scipy.optimize.milp` would
+            # prefer that this just error out immediately so it can handle it
+            # rather than concerning the user.
+            with catch_warnings():
+                simplefilter("error")
+                self.A = np.atleast_2d(A).astype(np.float64)
+        else:
+            self.A = A
+        self.lb = np.atleast_1d(lb).astype(np.float64)
+        self.ub = np.atleast_1d(ub).astype(np.float64)
+        self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
+        self._input_validation()
+
+    def residual(self, x):
+        """
+        Calculate the residual between the constraint function and the limits
+
+        For a linear constraint of the form::
+
+            lb <= A@x <= ub
+
+        the lower and upper residuals between ``A@x`` and the limits are values
+        ``sl`` and ``sb`` such that::
+
+            lb + sl == A@x == ub - sb
+
+        When all elements of ``sl`` and ``sb`` are positive, all elements of
+        the constraint are satisfied; a negative element in ``sl`` or ``sb``
+        indicates that the corresponding element of the constraint is not
+        satisfied.
+
+        Parameters
+        ----------
+        x: array_like
+            Vector of independent variables
+
+        Returns
+        -------
+        sl, sb : array-like
+            The lower and upper residuals
+        """
+        return self.A@x - self.lb, self.ub - self.A@x
+
+
+class Bounds:
+    """Bounds constraint on the variables.
+
+    The constraint has the general inequality form::
+
+        lb <= x <= ub
+
+    It is possible to use equal bounds to represent an equality constraint or
+    infinite bounds to represent a one-sided constraint.
+
+    Parameters
+    ----------
+    lb, ub : array_like, optional
+        Lower and upper bounds on independent variables. `lb`, `ub`, and
+        `keep_feasible` must be the same shape or broadcastable.
+        Set components of `lb` and `ub` equal
+        to fix a variable. Use ``np.inf`` with an appropriate sign to disable
+        bounds on all or some variables. Note that you can mix constraints of
+        different types: interval, one-sided or equality, by setting different
+        components of `lb` and `ub` as necessary. Defaults to ``lb = -np.inf``
+        and ``ub = np.inf`` (no bounds).
+    keep_feasible : array_like of bool, optional
+        Whether to keep the constraint components feasible throughout
+        iterations. Must be broadcastable with `lb` and `ub`.
+        Default is False. Has no effect for equality constraints.
+    """
+    def _input_validation(self):
+        try:
+            res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible)
+            self.lb, self.ub, self.keep_feasible = res
+        except ValueError:
+            message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
+            raise ValueError(message)
+
+    def __init__(self, lb=-np.inf, ub=np.inf, keep_feasible=False):
+        self.lb = np.atleast_1d(lb)
+        self.ub = np.atleast_1d(ub)
+        self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool)
+        self._input_validation()
+
+    def __repr__(self):
+        start = f"{type(self).__name__}({self.lb!r}, {self.ub!r}"
+        if np.any(self.keep_feasible):
+            end = f", keep_feasible={self.keep_feasible!r})"
+        else:
+            end = ")"
+        return start + end
+
+    def residual(self, x):
+        """Calculate the residual (slack) between the input and the bounds
+
+        For a bound constraint of the form::
+
+            lb <= x <= ub
+
+        the lower and upper residuals between `x` and the bounds are values
+        ``sl`` and ``sb`` such that::
+
+            lb + sl == x == ub - sb
+
+        When all elements of ``sl`` and ``sb`` are positive, all elements of
+        ``x`` lie within the bounds; a negative element in ``sl`` or ``sb``
+        indicates that the corresponding element of ``x`` is out of bounds.
+
+        Parameters
+        ----------
+        x: array_like
+            Vector of independent variables
+
+        Returns
+        -------
+        sl, sb : array-like
+            The lower and upper residuals
+        """
+        return x - self.lb, self.ub - x
+
+
+class PreparedConstraint:
+    """Constraint prepared from a user defined constraint.
+
+    On creation it will check whether a constraint definition is valid and
+    the initial point is feasible. If created successfully, it will contain
+    the attributes listed below.
+
+    Parameters
+    ----------
+    constraint : {NonlinearConstraint, LinearConstraint`, Bounds}
+        Constraint to check and prepare.
+    x0 : array_like
+        Initial vector of independent variables.
+    sparse_jacobian : bool or None, optional
+        If bool, then the Jacobian of the constraint will be converted
+        to the corresponded format if necessary. If None (default), such
+        conversion is not made.
+    finite_diff_bounds : 2-tuple, optional
+        Lower and upper bounds on the independent variables for the finite
+        difference approximation, if applicable. Defaults to no bounds.
+
+    Attributes
+    ----------
+    fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction}
+        Function defining the constraint wrapped by one of the convenience
+        classes.
+    bounds : 2-tuple
+        Contains lower and upper bounds for the constraints --- lb and ub.
+        These are converted to ndarray and have a size equal to the number of
+        the constraints.
+    keep_feasible : ndarray
+         Array indicating which components must be kept feasible with a size
+         equal to the number of the constraints.
+    """
+    def __init__(self, constraint, x0, sparse_jacobian=None,
+                 finite_diff_bounds=(-np.inf, np.inf)):
+        if isinstance(constraint, NonlinearConstraint):
+            fun = VectorFunction(constraint.fun, x0,
+                                 constraint.jac, constraint.hess,
+                                 constraint.finite_diff_rel_step,
+                                 constraint.finite_diff_jac_sparsity,
+                                 finite_diff_bounds, sparse_jacobian)
+        elif isinstance(constraint, LinearConstraint):
+            fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian)
+        elif isinstance(constraint, Bounds):
+            fun = IdentityVectorFunction(x0, sparse_jacobian)
+        else:
+            raise ValueError("`constraint` of an unknown type is passed.")
+
+        m = fun.m
+
+        lb = np.asarray(constraint.lb, dtype=float)
+        ub = np.asarray(constraint.ub, dtype=float)
+        keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool)
+
+        lb = np.broadcast_to(lb, m)
+        ub = np.broadcast_to(ub, m)
+        keep_feasible = np.broadcast_to(keep_feasible, m)
+
+        if keep_feasible.shape != (m,):
+            raise ValueError("`keep_feasible` has a wrong shape.")
+
+        mask = keep_feasible & (lb != ub)
+        f0 = fun.f
+        if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]):
+            raise ValueError("`x0` is infeasible with respect to some "
+                             "inequality constraint with `keep_feasible` "
+                             "set to True.")
+
+        self.fun = fun
+        self.bounds = (lb, ub)
+        self.keep_feasible = keep_feasible
+
+    def violation(self, x):
+        """How much the constraint is exceeded by.
+
+        Parameters
+        ----------
+        x : array-like
+            Vector of independent variables
+
+        Returns
+        -------
+        excess : array-like
+            How much the constraint is exceeded by, for each of the
+            constraints specified by `PreparedConstraint.fun`.
+        """
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            ev = self.fun.fun(np.asarray(x))
+
+        excess_lb = np.maximum(self.bounds[0] - ev, 0)
+        excess_ub = np.maximum(ev - self.bounds[1], 0)
+
+        return excess_lb + excess_ub
+
+
+def new_bounds_to_old(lb, ub, n):
+    """Convert the new bounds representation to the old one.
+
+    The new representation is a tuple (lb, ub) and the old one is a list
+    containing n tuples, ith containing lower and upper bound on a ith
+    variable.
+    If any of the entries in lb/ub are -np.inf/np.inf they are replaced by
+    None.
+    """
+    lb = np.broadcast_to(lb, n)
+    ub = np.broadcast_to(ub, n)
+
+    lb = [float(x) if x > -np.inf else None for x in lb]
+    ub = [float(x) if x < np.inf else None for x in ub]
+
+    return list(zip(lb, ub))
+
+
+def old_bound_to_new(bounds):
+    """Convert the old bounds representation to the new one.
+
+    The new representation is a tuple (lb, ub) and the old one is a list
+    containing n tuples, ith containing lower and upper bound on a ith
+    variable.
+    If any of the entries in lb/ub are None they are replaced by
+    -np.inf/np.inf.
+    """
+    lb, ub = zip(*bounds)
+
+    # Convert occurrences of None to -inf or inf, and replace occurrences of
+    # any numpy array x with x.item(). Then wrap the results in numpy arrays.
+    lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf
+                   for x in lb])
+    ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf
+                   for x in ub])
+
+    return lb, ub
+
+
+def strict_bounds(lb, ub, keep_feasible, n_vars):
+    """Remove bounds which are not asked to be kept feasible."""
+    strict_lb = np.resize(lb, n_vars).astype(float)
+    strict_ub = np.resize(ub, n_vars).astype(float)
+    keep_feasible = np.resize(keep_feasible, n_vars)
+    strict_lb[~keep_feasible] = -np.inf
+    strict_ub[~keep_feasible] = np.inf
+    return strict_lb, strict_ub
+
+
+def new_constraint_to_old(con, x0):
+    """
+    Converts new-style constraint objects to old-style constraint dictionaries.
+    """
+    if isinstance(con, NonlinearConstraint):
+        if (con.finite_diff_jac_sparsity is not None or
+                con.finite_diff_rel_step is not None or
+                not isinstance(con.hess, BFGS) or  # misses user specified BFGS
+                con.keep_feasible):
+            warn("Constraint options `finite_diff_jac_sparsity`, "
+                 "`finite_diff_rel_step`, `keep_feasible`, and `hess`"
+                 "are ignored by this method.", OptimizeWarning)
+
+        fun = con.fun
+        if callable(con.jac):
+            jac = con.jac
+        else:
+            jac = None
+
+    else:  # LinearConstraint
+        if np.any(con.keep_feasible):
+            warn("Constraint option `keep_feasible` is ignored by this "
+                 "method.", OptimizeWarning)
+
+        A = con.A
+        if issparse(A):
+            A = A.toarray()
+        fun = lambda x: np.dot(A, x)
+        jac = lambda x: A
+
+    # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out,
+    # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above.
+    pcon = PreparedConstraint(con, x0)
+    lb, ub = pcon.bounds
+
+    i_eq = lb == ub
+    i_bound_below = np.logical_xor(lb != -np.inf, i_eq)
+    i_bound_above = np.logical_xor(ub != np.inf, i_eq)
+    i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf)
+
+    if np.any(i_unbounded):
+        warn("At least one constraint is unbounded above and below. Such "
+             "constraints are ignored.", OptimizeWarning)
+
+    ceq = []
+    if np.any(i_eq):
+        def f_eq(x):
+            y = np.array(fun(x)).flatten()
+            return y[i_eq] - lb[i_eq]
+        ceq = [{"type": "eq", "fun": f_eq}]
+
+        if jac is not None:
+            def j_eq(x):
+                dy = jac(x)
+                if issparse(dy):
+                    dy = dy.toarray()
+                dy = np.atleast_2d(dy)
+                return dy[i_eq, :]
+            ceq[0]["jac"] = j_eq
+
+    cineq = []
+    n_bound_below = np.sum(i_bound_below)
+    n_bound_above = np.sum(i_bound_above)
+    if n_bound_below + n_bound_above:
+        def f_ineq(x):
+            y = np.zeros(n_bound_below + n_bound_above)
+            y_all = np.array(fun(x)).flatten()
+            y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below]
+            y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above])
+            return y
+        cineq = [{"type": "ineq", "fun": f_ineq}]
+
+        if jac is not None:
+            def j_ineq(x):
+                dy = np.zeros((n_bound_below + n_bound_above, len(x0)))
+                dy_all = jac(x)
+                if issparse(dy_all):
+                    dy_all = dy_all.toarray()
+                dy_all = np.atleast_2d(dy_all)
+                dy[:n_bound_below, :] = dy_all[i_bound_below]
+                dy[n_bound_below:, :] = -dy_all[i_bound_above]
+                return dy
+            cineq[0]["jac"] = j_ineq
+
+    old_constraints = ceq + cineq
+
+    if len(old_constraints) > 1:
+        warn("Equality and inequality constraints are specified in the same "
+             "element of the constraint list. For efficient use with this "
+             "method, equality and inequality constraints should be specified "
+             "in separate elements of the constraint list. ", OptimizeWarning)
+    return old_constraints
+
+
+def old_constraint_to_new(ic, con):
+    """
+    Converts old-style constraint dictionaries to new-style constraint objects.
+    """
+    # check type
+    try:
+        ctype = con['type'].lower()
+    except KeyError as e:
+        raise KeyError('Constraint %d has no type defined.' % ic) from e
+    except TypeError as e:
+        raise TypeError(
+            'Constraints must be a sequence of dictionaries.'
+        ) from e
+    except AttributeError as e:
+        raise TypeError("Constraint's type must be a string.") from e
+    else:
+        if ctype not in ['eq', 'ineq']:
+            raise ValueError("Unknown constraint type '%s'." % con['type'])
+    if 'fun' not in con:
+        raise ValueError('Constraint %d has no function defined.' % ic)
+
+    lb = 0
+    if ctype == 'eq':
+        ub = 0
+    else:
+        ub = np.inf
+
+    jac = '2-point'
+    if 'args' in con:
+        args = con['args']
+        fun = lambda x: con['fun'](x, *args)
+        if 'jac' in con:
+            jac = lambda x: con['jac'](x, *args)
+    else:
+        fun = con['fun']
+        if 'jac' in con:
+            jac = con['jac']
+
+    return NonlinearConstraint(fun, lb, ub, jac)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_differentiable_functions.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_differentiable_functions.py
new file mode 100644
index 00000000..5a631961
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_differentiable_functions.py
@@ -0,0 +1,616 @@
+import numpy as np
+import scipy.sparse as sps
+from ._numdiff import approx_derivative, group_columns
+from ._hessian_update_strategy import HessianUpdateStrategy
+from scipy.sparse.linalg import LinearOperator
+
+
+FD_METHODS = ('2-point', '3-point', 'cs')
+
+
+class ScalarFunction:
+    """Scalar function and its derivatives.
+
+    This class defines a scalar function F: R^n->R and methods for
+    computing or approximating its first and second derivatives.
+
+    Parameters
+    ----------
+    fun : callable
+        evaluates the scalar function. Must be of the form ``fun(x, *args)``,
+        where ``x`` is the argument in the form of a 1-D array and ``args`` is
+        a tuple of any additional fixed parameters needed to completely specify
+        the function. Should return a scalar.
+    x0 : array-like
+        Provides an initial set of variables for evaluating fun. Array of real
+        elements of size (n,), where 'n' is the number of independent
+        variables.
+    args : tuple, optional
+        Any additional fixed parameters needed to completely specify the scalar
+        function.
+    grad : {callable, '2-point', '3-point', 'cs'}
+        Method for computing the gradient vector.
+        If it is a callable, it should be a function that returns the gradient
+        vector:
+
+            ``grad(x, *args) -> array_like, shape (n,)``
+
+        where ``x`` is an array with shape (n,) and ``args`` is a tuple with
+        the fixed parameters.
+        Alternatively, the keywords  {'2-point', '3-point', 'cs'} can be used
+        to select a finite difference scheme for numerical estimation of the
+        gradient with a relative step size. These finite difference schemes
+        obey any specified `bounds`.
+    hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}
+        Method for computing the Hessian matrix. If it is callable, it should
+        return the  Hessian matrix:
+
+            ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
+
+        where x is a (n,) ndarray and `args` is a tuple with the fixed
+        parameters. Alternatively, the keywords {'2-point', '3-point', 'cs'}
+        select a finite difference scheme for numerical estimation. Or, objects
+        implementing `HessianUpdateStrategy` interface can be used to
+        approximate the Hessian.
+        Whenever the gradient is estimated via finite-differences, the Hessian
+        cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
+        to be estimated using one of the quasi-Newton strategies.
+    finite_diff_rel_step : None or array_like
+        Relative step size to use. The absolute step size is computed as
+        ``h = finite_diff_rel_step * sign(x0) * max(1, abs(x0))``, possibly
+        adjusted to fit into the bounds. For ``method='3-point'`` the sign
+        of `h` is ignored. If None then finite_diff_rel_step is selected
+        automatically,
+    finite_diff_bounds : tuple of array_like
+        Lower and upper bounds on independent variables. Defaults to no bounds,
+        (-np.inf, np.inf). Each bound must match the size of `x0` or be a
+        scalar, in the latter case the bound will be the same for all
+        variables. Use it to limit the range of function evaluation.
+    epsilon : None or array_like, optional
+        Absolute step size to use, possibly adjusted to fit into the bounds.
+        For ``method='3-point'`` the sign of `epsilon` is ignored. By default
+        relative steps are used, only if ``epsilon is not None`` are absolute
+        steps used.
+
+    Notes
+    -----
+    This class implements a memoization logic. There are methods `fun`,
+    `grad`, hess` and corresponding attributes `f`, `g` and `H`. The following
+    things should be considered:
+
+        1. Use only public methods `fun`, `grad` and `hess`.
+        2. After one of the methods is called, the corresponding attribute
+           will be set. However, a subsequent call with a different argument
+           of *any* of the methods may overwrite the attribute.
+    """
+    def __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step,
+                 finite_diff_bounds, epsilon=None):
+        if not callable(grad) and grad not in FD_METHODS:
+            raise ValueError(
+                f"`grad` must be either callable or one of {FD_METHODS}."
+            )
+
+        if not (callable(hess) or hess in FD_METHODS
+                or isinstance(hess, HessianUpdateStrategy)):
+            raise ValueError(
+                f"`hess` must be either callable, HessianUpdateStrategy"
+                f" or one of {FD_METHODS}."
+            )
+
+        if grad in FD_METHODS and hess in FD_METHODS:
+            raise ValueError("Whenever the gradient is estimated via "
+                             "finite-differences, we require the Hessian "
+                             "to be estimated using one of the "
+                             "quasi-Newton strategies.")
+
+        # the astype call ensures that self.x is a copy of x0
+        self.x = np.atleast_1d(x0).astype(float)
+        self.n = self.x.size
+        self.nfev = 0
+        self.ngev = 0
+        self.nhev = 0
+        self.f_updated = False
+        self.g_updated = False
+        self.H_updated = False
+
+        self._lowest_x = None
+        self._lowest_f = np.inf
+
+        finite_diff_options = {}
+        if grad in FD_METHODS:
+            finite_diff_options["method"] = grad
+            finite_diff_options["rel_step"] = finite_diff_rel_step
+            finite_diff_options["abs_step"] = epsilon
+            finite_diff_options["bounds"] = finite_diff_bounds
+        if hess in FD_METHODS:
+            finite_diff_options["method"] = hess
+            finite_diff_options["rel_step"] = finite_diff_rel_step
+            finite_diff_options["abs_step"] = epsilon
+            finite_diff_options["as_linear_operator"] = True
+
+        # Function evaluation
+        def fun_wrapped(x):
+            self.nfev += 1
+            # Send a copy because the user may overwrite it.
+            # Overwriting results in undefined behaviour because
+            # fun(self.x) will change self.x, with the two no longer linked.
+            fx = fun(np.copy(x), *args)
+            # Make sure the function returns a true scalar
+            if not np.isscalar(fx):
+                try:
+                    fx = np.asarray(fx).item()
+                except (TypeError, ValueError) as e:
+                    raise ValueError(
+                        "The user-provided objective function "
+                        "must return a scalar value."
+                    ) from e
+
+            if fx < self._lowest_f:
+                self._lowest_x = x
+                self._lowest_f = fx
+
+            return fx
+
+        def update_fun():
+            self.f = fun_wrapped(self.x)
+
+        self._update_fun_impl = update_fun
+        self._update_fun()
+
+        # Gradient evaluation
+        if callable(grad):
+            def grad_wrapped(x):
+                self.ngev += 1
+                return np.atleast_1d(grad(np.copy(x), *args))
+
+            def update_grad():
+                self.g = grad_wrapped(self.x)
+
+        elif grad in FD_METHODS:
+            def update_grad():
+                self._update_fun()
+                self.ngev += 1
+                self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
+                                           **finite_diff_options)
+
+        self._update_grad_impl = update_grad
+        self._update_grad()
+
+        # Hessian Evaluation
+        if callable(hess):
+            self.H = hess(np.copy(x0), *args)
+            self.H_updated = True
+            self.nhev += 1
+
+            if sps.issparse(self.H):
+                def hess_wrapped(x):
+                    self.nhev += 1
+                    return sps.csr_matrix(hess(np.copy(x), *args))
+                self.H = sps.csr_matrix(self.H)
+
+            elif isinstance(self.H, LinearOperator):
+                def hess_wrapped(x):
+                    self.nhev += 1
+                    return hess(np.copy(x), *args)
+
+            else:
+                def hess_wrapped(x):
+                    self.nhev += 1
+                    return np.atleast_2d(np.asarray(hess(np.copy(x), *args)))
+                self.H = np.atleast_2d(np.asarray(self.H))
+
+            def update_hess():
+                self.H = hess_wrapped(self.x)
+
+        elif hess in FD_METHODS:
+            def update_hess():
+                self._update_grad()
+                self.H = approx_derivative(grad_wrapped, self.x, f0=self.g,
+                                           **finite_diff_options)
+                return self.H
+
+            update_hess()
+            self.H_updated = True
+        elif isinstance(hess, HessianUpdateStrategy):
+            self.H = hess
+            self.H.initialize(self.n, 'hess')
+            self.H_updated = True
+            self.x_prev = None
+            self.g_prev = None
+
+            def update_hess():
+                self._update_grad()
+                self.H.update(self.x - self.x_prev, self.g - self.g_prev)
+
+        self._update_hess_impl = update_hess
+
+        if isinstance(hess, HessianUpdateStrategy):
+            def update_x(x):
+                self._update_grad()
+                self.x_prev = self.x
+                self.g_prev = self.g
+                # ensure that self.x is a copy of x. Don't store a reference
+                # otherwise the memoization doesn't work properly.
+                self.x = np.atleast_1d(x).astype(float)
+                self.f_updated = False
+                self.g_updated = False
+                self.H_updated = False
+                self._update_hess()
+        else:
+            def update_x(x):
+                # ensure that self.x is a copy of x. Don't store a reference
+                # otherwise the memoization doesn't work properly.
+                self.x = np.atleast_1d(x).astype(float)
+                self.f_updated = False
+                self.g_updated = False
+                self.H_updated = False
+        self._update_x_impl = update_x
+
+    def _update_fun(self):
+        if not self.f_updated:
+            self._update_fun_impl()
+            self.f_updated = True
+
+    def _update_grad(self):
+        if not self.g_updated:
+            self._update_grad_impl()
+            self.g_updated = True
+
+    def _update_hess(self):
+        if not self.H_updated:
+            self._update_hess_impl()
+            self.H_updated = True
+
+    def fun(self, x):
+        if not np.array_equal(x, self.x):
+            self._update_x_impl(x)
+        self._update_fun()
+        return self.f
+
+    def grad(self, x):
+        if not np.array_equal(x, self.x):
+            self._update_x_impl(x)
+        self._update_grad()
+        return self.g
+
+    def hess(self, x):
+        if not np.array_equal(x, self.x):
+            self._update_x_impl(x)
+        self._update_hess()
+        return self.H
+
+    def fun_and_grad(self, x):
+        if not np.array_equal(x, self.x):
+            self._update_x_impl(x)
+        self._update_fun()
+        self._update_grad()
+        return self.f, self.g
+
+
+class VectorFunction:
+    """Vector function and its derivatives.
+
+    This class defines a vector function F: R^n->R^m and methods for
+    computing or approximating its first and second derivatives.
+
+    Notes
+    -----
+    This class implements a memoization logic. There are methods `fun`,
+    `jac`, hess` and corresponding attributes `f`, `J` and `H`. The following
+    things should be considered:
+
+        1. Use only public methods `fun`, `jac` and `hess`.
+        2. After one of the methods is called, the corresponding attribute
+           will be set. However, a subsequent call with a different argument
+           of *any* of the methods may overwrite the attribute.
+    """
+    def __init__(self, fun, x0, jac, hess,
+                 finite_diff_rel_step, finite_diff_jac_sparsity,
+                 finite_diff_bounds, sparse_jacobian):
+        if not callable(jac) and jac not in FD_METHODS:
+            raise ValueError("`jac` must be either callable or one of {}."
+                             .format(FD_METHODS))
+
+        if not (callable(hess) or hess in FD_METHODS
+                or isinstance(hess, HessianUpdateStrategy)):
+            raise ValueError("`hess` must be either callable,"
+                             "HessianUpdateStrategy or one of {}."
+                             .format(FD_METHODS))
+
+        if jac in FD_METHODS and hess in FD_METHODS:
+            raise ValueError("Whenever the Jacobian is estimated via "
+                             "finite-differences, we require the Hessian to "
+                             "be estimated using one of the quasi-Newton "
+                             "strategies.")
+
+        self.x = np.atleast_1d(x0).astype(float)
+        self.n = self.x.size
+        self.nfev = 0
+        self.njev = 0
+        self.nhev = 0
+        self.f_updated = False
+        self.J_updated = False
+        self.H_updated = False
+
+        finite_diff_options = {}
+        if jac in FD_METHODS:
+            finite_diff_options["method"] = jac
+            finite_diff_options["rel_step"] = finite_diff_rel_step
+            if finite_diff_jac_sparsity is not None:
+                sparsity_groups = group_columns(finite_diff_jac_sparsity)
+                finite_diff_options["sparsity"] = (finite_diff_jac_sparsity,
+                                                   sparsity_groups)
+            finite_diff_options["bounds"] = finite_diff_bounds
+            self.x_diff = np.copy(self.x)
+        if hess in FD_METHODS:
+            finite_diff_options["method"] = hess
+            finite_diff_options["rel_step"] = finite_diff_rel_step
+            finite_diff_options["as_linear_operator"] = True
+            self.x_diff = np.copy(self.x)
+        if jac in FD_METHODS and hess in FD_METHODS:
+            raise ValueError("Whenever the Jacobian is estimated via "
+                             "finite-differences, we require the Hessian to "
+                             "be estimated using one of the quasi-Newton "
+                             "strategies.")
+
+        # Function evaluation
+        def fun_wrapped(x):
+            self.nfev += 1
+            return np.atleast_1d(fun(x))
+
+        def update_fun():
+            self.f = fun_wrapped(self.x)
+
+        self._update_fun_impl = update_fun
+        update_fun()
+
+        self.v = np.zeros_like(self.f)
+        self.m = self.v.size
+
+        # Jacobian Evaluation
+        if callable(jac):
+            self.J = jac(self.x)
+            self.J_updated = True
+            self.njev += 1
+
+            if (sparse_jacobian or
+                    sparse_jacobian is None and sps.issparse(self.J)):
+                def jac_wrapped(x):
+                    self.njev += 1
+                    return sps.csr_matrix(jac(x))
+                self.J = sps.csr_matrix(self.J)
+                self.sparse_jacobian = True
+
+            elif sps.issparse(self.J):
+                def jac_wrapped(x):
+                    self.njev += 1
+                    return jac(x).toarray()
+                self.J = self.J.toarray()
+                self.sparse_jacobian = False
+
+            else:
+                def jac_wrapped(x):
+                    self.njev += 1
+                    return np.atleast_2d(jac(x))
+                self.J = np.atleast_2d(self.J)
+                self.sparse_jacobian = False
+
+            def update_jac():
+                self.J = jac_wrapped(self.x)
+
+        elif jac in FD_METHODS:
+            self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
+                                       **finite_diff_options)
+            self.J_updated = True
+
+            if (sparse_jacobian or
+                    sparse_jacobian is None and sps.issparse(self.J)):
+                def update_jac():
+                    self._update_fun()
+                    self.J = sps.csr_matrix(
+                        approx_derivative(fun_wrapped, self.x, f0=self.f,
+                                          **finite_diff_options))
+                self.J = sps.csr_matrix(self.J)
+                self.sparse_jacobian = True
+
+            elif sps.issparse(self.J):
+                def update_jac():
+                    self._update_fun()
+                    self.J = approx_derivative(fun_wrapped, self.x, f0=self.f,
+                                               **finite_diff_options).toarray()
+                self.J = self.J.toarray()
+                self.sparse_jacobian = False
+
+            else:
+                def update_jac():
+                    self._update_fun()
+                    self.J = np.atleast_2d(
+                        approx_derivative(fun_wrapped, self.x, f0=self.f,
+                                          **finite_diff_options))
+                self.J = np.atleast_2d(self.J)
+                self.sparse_jacobian = False
+
+        self._update_jac_impl = update_jac
+
+        # Define Hessian
+        if callable(hess):
+            self.H = hess(self.x, self.v)
+            self.H_updated = True
+            self.nhev += 1
+
+            if sps.issparse(self.H):
+                def hess_wrapped(x, v):
+                    self.nhev += 1
+                    return sps.csr_matrix(hess(x, v))
+                self.H = sps.csr_matrix(self.H)
+
+            elif isinstance(self.H, LinearOperator):
+                def hess_wrapped(x, v):
+                    self.nhev += 1
+                    return hess(x, v)
+
+            else:
+                def hess_wrapped(x, v):
+                    self.nhev += 1
+                    return np.atleast_2d(np.asarray(hess(x, v)))
+                self.H = np.atleast_2d(np.asarray(self.H))
+
+            def update_hess():
+                self.H = hess_wrapped(self.x, self.v)
+        elif hess in FD_METHODS:
+            def jac_dot_v(x, v):
+                return jac_wrapped(x).T.dot(v)
+
+            def update_hess():
+                self._update_jac()
+                self.H = approx_derivative(jac_dot_v, self.x,
+                                           f0=self.J.T.dot(self.v),
+                                           args=(self.v,),
+                                           **finite_diff_options)
+            update_hess()
+            self.H_updated = True
+        elif isinstance(hess, HessianUpdateStrategy):
+            self.H = hess
+            self.H.initialize(self.n, 'hess')
+            self.H_updated = True
+            self.x_prev = None
+            self.J_prev = None
+
+            def update_hess():
+                self._update_jac()
+                # When v is updated before x was updated, then x_prev and
+                # J_prev are None and we need this check.
+                if self.x_prev is not None and self.J_prev is not None:
+                    delta_x = self.x - self.x_prev
+                    delta_g = self.J.T.dot(self.v) - self.J_prev.T.dot(self.v)
+                    self.H.update(delta_x, delta_g)
+
+        self._update_hess_impl = update_hess
+
+        if isinstance(hess, HessianUpdateStrategy):
+            def update_x(x):
+                self._update_jac()
+                self.x_prev = self.x
+                self.J_prev = self.J
+                self.x = np.atleast_1d(x).astype(float)
+                self.f_updated = False
+                self.J_updated = False
+                self.H_updated = False
+                self._update_hess()
+        else:
+            def update_x(x):
+                self.x = np.atleast_1d(x).astype(float)
+                self.f_updated = False
+                self.J_updated = False
+                self.H_updated = False
+
+        self._update_x_impl = update_x
+
+    def _update_v(self, v):
+        if not np.array_equal(v, self.v):
+            self.v = v
+            self.H_updated = False
+
+    def _update_x(self, x):
+        if not np.array_equal(x, self.x):
+            self._update_x_impl(x)
+
+    def _update_fun(self):
+        if not self.f_updated:
+            self._update_fun_impl()
+            self.f_updated = True
+
+    def _update_jac(self):
+        if not self.J_updated:
+            self._update_jac_impl()
+            self.J_updated = True
+
+    def _update_hess(self):
+        if not self.H_updated:
+            self._update_hess_impl()
+            self.H_updated = True
+
+    def fun(self, x):
+        self._update_x(x)
+        self._update_fun()
+        return self.f
+
+    def jac(self, x):
+        self._update_x(x)
+        self._update_jac()
+        return self.J
+
+    def hess(self, x, v):
+        # v should be updated before x.
+        self._update_v(v)
+        self._update_x(x)
+        self._update_hess()
+        return self.H
+
+
+class LinearVectorFunction:
+    """Linear vector function and its derivatives.
+
+    Defines a linear function F = A x, where x is N-D vector and
+    A is m-by-n matrix. The Jacobian is constant and equals to A. The Hessian
+    is identically zero and it is returned as a csr matrix.
+    """
+    def __init__(self, A, x0, sparse_jacobian):
+        if sparse_jacobian or sparse_jacobian is None and sps.issparse(A):
+            self.J = sps.csr_matrix(A)
+            self.sparse_jacobian = True
+        elif sps.issparse(A):
+            self.J = A.toarray()
+            self.sparse_jacobian = False
+        else:
+            # np.asarray makes sure A is ndarray and not matrix
+            self.J = np.atleast_2d(np.asarray(A))
+            self.sparse_jacobian = False
+
+        self.m, self.n = self.J.shape
+
+        self.x = np.atleast_1d(x0).astype(float)
+        self.f = self.J.dot(self.x)
+        self.f_updated = True
+
+        self.v = np.zeros(self.m, dtype=float)
+        self.H = sps.csr_matrix((self.n, self.n))
+
+    def _update_x(self, x):
+        if not np.array_equal(x, self.x):
+            self.x = np.atleast_1d(x).astype(float)
+            self.f_updated = False
+
+    def fun(self, x):
+        self._update_x(x)
+        if not self.f_updated:
+            self.f = self.J.dot(x)
+            self.f_updated = True
+        return self.f
+
+    def jac(self, x):
+        self._update_x(x)
+        return self.J
+
+    def hess(self, x, v):
+        self._update_x(x)
+        self.v = v
+        return self.H
+
+
+class IdentityVectorFunction(LinearVectorFunction):
+    """Identity vector function and its derivatives.
+
+    The Jacobian is the identity matrix, returned as a dense array when
+    `sparse_jacobian=False` and as a csr matrix otherwise. The Hessian is
+    identically zero and it is returned as a csr matrix.
+    """
+    def __init__(self, x0, sparse_jacobian):
+        n = len(x0)
+        if sparse_jacobian or sparse_jacobian is None:
+            A = sps.eye(n, format='csr')
+            sparse_jacobian = True
+        else:
+            A = np.eye(n)
+            sparse_jacobian = False
+        super().__init__(A, x0, sparse_jacobian)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_differentialevolution.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_differentialevolution.py
new file mode 100644
index 00000000..151b43fd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_differentialevolution.py
@@ -0,0 +1,1668 @@
+"""
+differential_evolution: The differential evolution global optimization algorithm
+Added by Andrew Nelson 2014
+"""
+import warnings
+
+import numpy as np
+from scipy.optimize import OptimizeResult, minimize
+from scipy.optimize._optimize import _status_message
+from scipy._lib._util import check_random_state, MapWrapper, _FunctionWrapper
+
+from scipy.optimize._constraints import (Bounds, new_bounds_to_old,
+                                         NonlinearConstraint, LinearConstraint)
+from scipy.sparse import issparse
+
+__all__ = ['differential_evolution']
+
+
+_MACHEPS = np.finfo(np.float64).eps
+
+
+def differential_evolution(func, bounds, args=(), strategy='best1bin',
+                           maxiter=1000, popsize=15, tol=0.01,
+                           mutation=(0.5, 1), recombination=0.7, seed=None,
+                           callback=None, disp=False, polish=True,
+                           init='latinhypercube', atol=0, updating='immediate',
+                           workers=1, constraints=(), x0=None, *,
+                           integrality=None, vectorized=False):
+    """Finds the global minimum of a multivariate function.
+
+    The differential evolution method [1]_ is stochastic in nature. It does
+    not use gradient methods to find the minimum, and can search large areas
+    of candidate space, but often requires larger numbers of function
+    evaluations than conventional gradient-based techniques.
+
+    The algorithm is due to Storn and Price [2]_.
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized. Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a tuple of any additional fixed parameters needed to
+        completely specify the function. The number of parameters, N, is equal
+        to ``len(x)``.
+    bounds : sequence or `Bounds`
+        Bounds for variables. There are two ways to specify the bounds:
+        1. Instance of `Bounds` class.
+        2. ``(min, max)`` pairs for each element in ``x``, defining the finite
+        lower and upper bounds for the optimizing argument of `func`.
+        The total number of bounds is used to determine the number of
+        parameters, N.
+    args : tuple, optional
+        Any additional fixed parameters needed to
+        completely specify the objective function.
+    strategy : str, optional
+        The differential evolution strategy to use. Should be one of:
+
+            - 'best1bin'
+            - 'best1exp'
+            - 'rand1exp'
+            - 'randtobest1exp'
+            - 'currenttobest1exp'
+            - 'best2exp'
+            - 'rand2exp'
+            - 'randtobest1bin'
+            - 'currenttobest1bin'
+            - 'best2bin'
+            - 'rand2bin'
+            - 'rand1bin'
+
+        The default is 'best1bin'.
+    maxiter : int, optional
+        The maximum number of generations over which the entire population is
+        evolved. The maximum number of function evaluations (with no polishing)
+        is: ``(maxiter + 1) * popsize * N``
+    popsize : int, optional
+        A multiplier for setting the total population size. The population has
+        ``popsize * N`` individuals. This keyword is overridden if an
+        initial population is supplied via the `init` keyword. When using
+        ``init='sobol'`` the population size is calculated as the next power
+        of 2 after ``popsize * N``.
+    tol : float, optional
+        Relative tolerance for convergence, the solving stops when
+        ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
+        where and `atol` and `tol` are the absolute and relative tolerance
+        respectively.
+    mutation : float or tuple(float, float), optional
+        The mutation constant. In the literature this is also known as
+        differential weight, being denoted by F.
+        If specified as a float it should be in the range [0, 2].
+        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
+        randomly changes the mutation constant on a generation by generation
+        basis. The mutation constant for that generation is taken from
+        ``U[min, max)``. Dithering can help speed convergence significantly.
+        Increasing the mutation constant increases the search radius, but will
+        slow down convergence.
+    recombination : float, optional
+        The recombination constant, should be in the range [0, 1]. In the
+        literature this is also known as the crossover probability, being
+        denoted by CR. Increasing this value allows a larger number of mutants
+        to progress into the next generation, but at the risk of population
+        stability.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        Specify `seed` for repeatable minimizations.
+    disp : bool, optional
+        Prints the evaluated `func` at every iteration.
+    callback : callable, `callback(xk, convergence=val)`, optional
+        A function to follow the progress of the minimization. ``xk`` is
+        the best solution found so far. ``val`` represents the fractional
+        value of the population convergence.  When ``val`` is greater than one
+        the function halts. If callback returns `True`, then the minimization
+        is halted (any polishing is still carried out).
+    polish : bool, optional
+        If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
+        method is used to polish the best population member at the end, which
+        can improve the minimization slightly. If a constrained problem is
+        being studied then the `trust-constr` method is used instead. For large
+        problems with many constraints, polishing can take a long time due to
+        the Jacobian computations.
+    init : str or array-like, optional
+        Specify which type of population initialization is performed. Should be
+        one of:
+
+            - 'latinhypercube'
+            - 'sobol'
+            - 'halton'
+            - 'random'
+            - array specifying the initial population. The array should have
+              shape ``(S, N)``, where S is the total population size and N is
+              the number of parameters.
+              `init` is clipped to `bounds` before use.
+
+        The default is 'latinhypercube'. Latin Hypercube sampling tries to
+        maximize coverage of the available parameter space.
+
+        'sobol' and 'halton' are superior alternatives and maximize even more
+        the parameter space. 'sobol' will enforce an initial population
+        size which is calculated as the next power of 2 after
+        ``popsize * N``. 'halton' has no requirements but is a bit less
+        efficient. See `scipy.stats.qmc` for more details.
+
+        'random' initializes the population randomly - this has the drawback
+        that clustering can occur, preventing the whole of parameter space
+        being covered. Use of an array to specify a population could be used,
+        for example, to create a tight bunch of initial guesses in an location
+        where the solution is known to exist, thereby reducing time for
+        convergence.
+    atol : float, optional
+        Absolute tolerance for convergence, the solving stops when
+        ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
+        where and `atol` and `tol` are the absolute and relative tolerance
+        respectively.
+    updating : {'immediate', 'deferred'}, optional
+        If ``'immediate'``, the best solution vector is continuously updated
+        within a single generation [4]_. This can lead to faster convergence as
+        trial vectors can take advantage of continuous improvements in the best
+        solution.
+        With ``'deferred'``, the best solution vector is updated once per
+        generation. Only ``'deferred'`` is compatible with parallelization or
+        vectorization, and the `workers` and `vectorized` keywords can
+        over-ride this option.
+
+        .. versionadded:: 1.2.0
+
+    workers : int or map-like callable, optional
+        If `workers` is an int the population is subdivided into `workers`
+        sections and evaluated in parallel
+        (uses `multiprocessing.Pool `).
+        Supply -1 to use all available CPU cores.
+        Alternatively supply a map-like callable, such as
+        `multiprocessing.Pool.map` for evaluating the population in parallel.
+        This evaluation is carried out as ``workers(func, iterable)``.
+        This option will override the `updating` keyword to
+        ``updating='deferred'`` if ``workers != 1``.
+        This option overrides the `vectorized` keyword if ``workers != 1``.
+        Requires that `func` be pickleable.
+
+        .. versionadded:: 1.2.0
+
+    constraints : {NonLinearConstraint, LinearConstraint, Bounds}
+        Constraints on the solver, over and above those applied by the `bounds`
+        kwd. Uses the approach by Lampinen [5]_.
+
+        .. versionadded:: 1.4.0
+
+    x0 : None or array-like, optional
+        Provides an initial guess to the minimization. Once the population has
+        been initialized this vector replaces the first (best) member. This
+        replacement is done even if `init` is given an initial population.
+        ``x0.shape == (N,)``.
+
+        .. versionadded:: 1.7.0
+
+    integrality : 1-D array, optional
+        For each decision variable, a boolean value indicating whether the
+        decision variable is constrained to integer values. The array is
+        broadcast to ``(N,)``.
+        If any decision variables are constrained to be integral, they will not
+        be changed during polishing.
+        Only integer values lying between the lower and upper bounds are used.
+        If there are no integer values lying between the bounds then a
+        `ValueError` is raised.
+
+        .. versionadded:: 1.9.0
+
+    vectorized : bool, optional
+        If ``vectorized is True``, `func` is sent an `x` array with
+        ``x.shape == (N, S)``, and is expected to return an array of shape
+        ``(S,)``, where `S` is the number of solution vectors to be calculated.
+        If constraints are applied, each of the functions used to construct
+        a `Constraint` object should accept an `x` array with
+        ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
+        `M` is the number of constraint components.
+        This option is an alternative to the parallelization offered by
+        `workers`, and may help in optimization speed by reducing interpreter
+        overhead from multiple function calls. This keyword is ignored if
+        ``workers != 1``.
+        This option will override the `updating` keyword to
+        ``updating='deferred'``.
+        See the notes section for further discussion on when to use
+        ``'vectorized'``, and when to use ``'workers'``.
+
+        .. versionadded:: 1.9.0
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a `OptimizeResult` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the optimizer exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes. If `polish`
+        was employed, and a lower minimum was obtained by the polishing, then
+        OptimizeResult also contains the ``jac`` attribute.
+        If the eventual solution does not satisfy the applied constraints
+        ``success`` will be `False`.
+
+    Notes
+    -----
+    Differential evolution is a stochastic population based method that is
+    useful for global optimization problems. At each pass through the
+    population the algorithm mutates each candidate solution by mixing with
+    other candidate solutions to create a trial candidate. There are several
+    strategies [3]_ for creating trial candidates, which suit some problems
+    more than others. The 'best1bin' strategy is a good starting point for
+    many systems. In this strategy two members of the population are randomly
+    chosen. Their difference is used to mutate the best member (the 'best' in
+    'best1bin'), :math:`b_0`, so far:
+
+    .. math::
+
+        b' = b_0 + mutation * (population[rand0] - population[rand1])
+
+    A trial vector is then constructed. Starting with a randomly chosen ith
+    parameter the trial is sequentially filled (in modulo) with parameters
+    from ``b'`` or the original candidate. The choice of whether to use ``b'``
+    or the original candidate is made with a binomial distribution (the 'bin'
+    in 'best1bin') - a random number in [0, 1) is generated. If this number is
+    less than the `recombination` constant then the parameter is loaded from
+    ``b'``, otherwise it is loaded from the original candidate. The final
+    parameter is always loaded from ``b'``. Once the trial candidate is built
+    its fitness is assessed. If the trial is better than the original candidate
+    then it takes its place. If it is also better than the best overall
+    candidate it also replaces that.
+    To improve your chances of finding a global minimum use higher `popsize`
+    values, with higher `mutation` and (dithering), but lower `recombination`
+    values. This has the effect of widening the search radius, but slowing
+    convergence.
+    By default the best solution vector is updated continuously within a single
+    iteration (``updating='immediate'``). This is a modification [4]_ of the
+    original differential evolution algorithm which can lead to faster
+    convergence as trial vectors can immediately benefit from improved
+    solutions. To use the original Storn and Price behaviour, updating the best
+    solution once per iteration, set ``updating='deferred'``.
+    The ``'deferred'`` approach is compatible with both parallelization and
+    vectorization (``'workers'`` and ``'vectorized'`` keywords). These may
+    improve minimization speed by using computer resources more efficiently.
+    The ``'workers'`` distribute calculations over multiple processors. By
+    default the Python `multiprocessing` module is used, but other approaches
+    are also possible, such as the Message Passing Interface (MPI) used on
+    clusters [6]_ [7]_. The overhead from these approaches (creating new
+    Processes, etc) may be significant, meaning that computational speed
+    doesn't necessarily scale with the number of processors used.
+    Parallelization is best suited to computationally expensive objective
+    functions. If the objective function is less expensive, then
+    ``'vectorized'`` may aid by only calling the objective function once per
+    iteration, rather than multiple times for all the population members; the
+    interpreter overhead is reduced.
+
+    .. versionadded:: 0.15.0
+
+    References
+    ----------
+    .. [1] Differential evolution, Wikipedia,
+           http://en.wikipedia.org/wiki/Differential_evolution
+    .. [2] Storn, R and Price, K, Differential Evolution - a Simple and
+           Efficient Heuristic for Global Optimization over Continuous Spaces,
+           Journal of Global Optimization, 1997, 11, 341 - 359.
+    .. [3] http://www1.icsi.berkeley.edu/~storn/code.html
+    .. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., -
+           Characterization of structures from X-ray scattering data using
+           genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357,
+           2827-2848
+    .. [5] Lampinen, J., A constraint handling approach for the differential
+           evolution algorithm. Proceedings of the 2002 Congress on
+           Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE,
+           2002.
+    .. [6] https://mpi4py.readthedocs.io/en/stable/
+    .. [7] https://schwimmbad.readthedocs.io/en/latest/
+
+    Examples
+    --------
+    Let us consider the problem of minimizing the Rosenbrock function. This
+    function is implemented in `rosen` in `scipy.optimize`.
+
+    >>> import numpy as np
+    >>> from scipy.optimize import rosen, differential_evolution
+    >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
+    >>> result = differential_evolution(rosen, bounds)
+    >>> result.x, result.fun
+    (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
+
+    Now repeat, but with parallelization.
+
+    >>> result = differential_evolution(rosen, bounds, updating='deferred',
+    ...                                 workers=2)
+    >>> result.x, result.fun
+    (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
+
+    Let's do a constrained minimization.
+
+    >>> from scipy.optimize import LinearConstraint, Bounds
+
+    We add the constraint that the sum of ``x[0]`` and ``x[1]`` must be less
+    than or equal to 1.9.  This is a linear constraint, which may be written
+    ``A @ x <= 1.9``, where ``A = array([[1, 1]])``.  This can be encoded as
+    a `LinearConstraint` instance:
+
+    >>> lc = LinearConstraint([[1, 1]], -np.inf, 1.9)
+
+    Specify limits using a `Bounds` object.
+
+    >>> bounds = Bounds([0., 0.], [2., 2.])
+    >>> result = differential_evolution(rosen, bounds, constraints=lc,
+    ...                                 seed=1)
+    >>> result.x, result.fun
+    (array([0.96632622, 0.93367155]), 0.0011352416852625719)
+
+    Next find the minimum of the Ackley function
+    (https://en.wikipedia.org/wiki/Test_functions_for_optimization).
+
+    >>> def ackley(x):
+    ...     arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
+    ...     arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
+    ...     return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
+    >>> bounds = [(-5, 5), (-5, 5)]
+    >>> result = differential_evolution(ackley, bounds, seed=1)
+    >>> result.x, result.fun
+    (array([0., 0.]), 4.440892098500626e-16)
+
+    The Ackley function is written in a vectorized manner, so the
+    ``'vectorized'`` keyword can be employed. Note the reduced number of
+    function evaluations.
+
+    >>> result = differential_evolution(
+    ...     ackley, bounds, vectorized=True, updating='deferred', seed=1
+    ... )
+    >>> result.x, result.fun
+    (array([0., 0.]), 4.440892098500626e-16)
+
+    """
+
+    # using a context manager means that any created Pool objects are
+    # cleared up.
+    with DifferentialEvolutionSolver(func, bounds, args=args,
+                                     strategy=strategy,
+                                     maxiter=maxiter,
+                                     popsize=popsize, tol=tol,
+                                     mutation=mutation,
+                                     recombination=recombination,
+                                     seed=seed, polish=polish,
+                                     callback=callback,
+                                     disp=disp, init=init, atol=atol,
+                                     updating=updating,
+                                     workers=workers,
+                                     constraints=constraints,
+                                     x0=x0,
+                                     integrality=integrality,
+                                     vectorized=vectorized) as solver:
+        ret = solver.solve()
+
+    return ret
+
+
+class DifferentialEvolutionSolver:
+
+    """This class implements the differential evolution solver
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized. Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a tuple of any additional fixed parameters needed to
+        completely specify the function. The number of parameters, N, is equal
+        to ``len(x)``.
+    bounds : sequence or `Bounds`
+        Bounds for variables. There are two ways to specify the bounds:
+        1. Instance of `Bounds` class.
+        2. ``(min, max)`` pairs for each element in ``x``, defining the finite
+        lower and upper bounds for the optimizing argument of `func`.
+        The total number of bounds is used to determine the number of
+        parameters, N.
+    args : tuple, optional
+        Any additional fixed parameters needed to
+        completely specify the objective function.
+    strategy : str, optional
+        The differential evolution strategy to use. Should be one of:
+
+            - 'best1bin'
+            - 'best1exp'
+            - 'rand1exp'
+            - 'randtobest1exp'
+            - 'currenttobest1exp'
+            - 'best2exp'
+            - 'rand2exp'
+            - 'randtobest1bin'
+            - 'currenttobest1bin'
+            - 'best2bin'
+            - 'rand2bin'
+            - 'rand1bin'
+
+        The default is 'best1bin'
+
+    maxiter : int, optional
+        The maximum number of generations over which the entire population is
+        evolved. The maximum number of function evaluations (with no polishing)
+        is: ``(maxiter + 1) * popsize * N``
+    popsize : int, optional
+        A multiplier for setting the total population size. The population has
+        ``popsize * N`` individuals. This keyword is overridden if an
+        initial population is supplied via the `init` keyword. When using
+        ``init='sobol'`` the population size is calculated as the next power
+        of 2 after ``popsize * N``.
+    tol : float, optional
+        Relative tolerance for convergence, the solving stops when
+        ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
+        where and `atol` and `tol` are the absolute and relative tolerance
+        respectively.
+    mutation : float or tuple(float, float), optional
+        The mutation constant. In the literature this is also known as
+        differential weight, being denoted by F.
+        If specified as a float it should be in the range [0, 2].
+        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
+        randomly changes the mutation constant on a generation by generation
+        basis. The mutation constant for that generation is taken from
+        U[min, max). Dithering can help speed convergence significantly.
+        Increasing the mutation constant increases the search radius, but will
+        slow down convergence.
+    recombination : float, optional
+        The recombination constant, should be in the range [0, 1]. In the
+        literature this is also known as the crossover probability, being
+        denoted by CR. Increasing this value allows a larger number of mutants
+        to progress into the next generation, but at the risk of population
+        stability.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        Specify `seed` for repeatable minimizations.
+    disp : bool, optional
+        Prints the evaluated `func` at every iteration.
+    callback : callable, `callback(xk, convergence=val)`, optional
+        A function to follow the progress of the minimization. ``xk`` is
+        the current value of ``x0``. ``val`` represents the fractional
+        value of the population convergence. When ``val`` is greater than one
+        the function halts. If callback returns `True`, then the minimization
+        is halted (any polishing is still carried out).
+    polish : bool, optional
+        If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
+        method is used to polish the best population member at the end, which
+        can improve the minimization slightly. If a constrained problem is
+        being studied then the `trust-constr` method is used instead. For large
+        problems with many constraints, polishing can take a long time due to
+        the Jacobian computations.
+    maxfun : int, optional
+        Set the maximum number of function evaluations. However, it probably
+        makes more sense to set `maxiter` instead.
+    init : str or array-like, optional
+        Specify which type of population initialization is performed. Should be
+        one of:
+
+            - 'latinhypercube'
+            - 'sobol'
+            - 'halton'
+            - 'random'
+            - array specifying the initial population. The array should have
+              shape ``(S, N)``, where S is the total population size and
+              N is the number of parameters.
+              `init` is clipped to `bounds` before use.
+
+        The default is 'latinhypercube'. Latin Hypercube sampling tries to
+        maximize coverage of the available parameter space.
+
+        'sobol' and 'halton' are superior alternatives and maximize even more
+        the parameter space. 'sobol' will enforce an initial population
+        size which is calculated as the next power of 2 after
+        ``popsize * N``. 'halton' has no requirements but is a bit less
+        efficient. See `scipy.stats.qmc` for more details.
+
+        'random' initializes the population randomly - this has the drawback
+        that clustering can occur, preventing the whole of parameter space
+        being covered. Use of an array to specify a population could be used,
+        for example, to create a tight bunch of initial guesses in an location
+        where the solution is known to exist, thereby reducing time for
+        convergence.
+    atol : float, optional
+        Absolute tolerance for convergence, the solving stops when
+        ``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
+        where and `atol` and `tol` are the absolute and relative tolerance
+        respectively.
+    updating : {'immediate', 'deferred'}, optional
+        If ``'immediate'``, the best solution vector is continuously updated
+        within a single generation [4]_. This can lead to faster convergence as
+        trial vectors can take advantage of continuous improvements in the best
+        solution.
+        With ``'deferred'``, the best solution vector is updated once per
+        generation. Only ``'deferred'`` is compatible with parallelization or
+        vectorization, and the `workers` and `vectorized` keywords can
+        over-ride this option.
+    workers : int or map-like callable, optional
+        If `workers` is an int the population is subdivided into `workers`
+        sections and evaluated in parallel
+        (uses `multiprocessing.Pool `).
+        Supply `-1` to use all cores available to the Process.
+        Alternatively supply a map-like callable, such as
+        `multiprocessing.Pool.map` for evaluating the population in parallel.
+        This evaluation is carried out as ``workers(func, iterable)``.
+        This option will override the `updating` keyword to
+        `updating='deferred'` if `workers != 1`.
+        Requires that `func` be pickleable.
+    constraints : {NonLinearConstraint, LinearConstraint, Bounds}
+        Constraints on the solver, over and above those applied by the `bounds`
+        kwd. Uses the approach by Lampinen.
+    x0 : None or array-like, optional
+        Provides an initial guess to the minimization. Once the population has
+        been initialized this vector replaces the first (best) member. This
+        replacement is done even if `init` is given an initial population.
+        ``x0.shape == (N,)``.
+    integrality : 1-D array, optional
+        For each decision variable, a boolean value indicating whether the
+        decision variable is constrained to integer values. The array is
+        broadcast to ``(N,)``.
+        If any decision variables are constrained to be integral, they will not
+        be changed during polishing.
+        Only integer values lying between the lower and upper bounds are used.
+        If there are no integer values lying between the bounds then a
+        `ValueError` is raised.
+    vectorized : bool, optional
+        If ``vectorized is True``, `func` is sent an `x` array with
+        ``x.shape == (N, S)``, and is expected to return an array of shape
+        ``(S,)``, where `S` is the number of solution vectors to be calculated.
+        If constraints are applied, each of the functions used to construct
+        a `Constraint` object should accept an `x` array with
+        ``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
+        `M` is the number of constraint components.
+        This option is an alternative to the parallelization offered by
+        `workers`, and may help in optimization speed. This keyword is
+        ignored if ``workers != 1``.
+        This option will override the `updating` keyword to
+        ``updating='deferred'``.
+    """
+
+    # Dispatch of mutation strategy method (binomial or exponential).
+    _binomial = {'best1bin': '_best1',
+                 'randtobest1bin': '_randtobest1',
+                 'currenttobest1bin': '_currenttobest1',
+                 'best2bin': '_best2',
+                 'rand2bin': '_rand2',
+                 'rand1bin': '_rand1'}
+    _exponential = {'best1exp': '_best1',
+                    'rand1exp': '_rand1',
+                    'randtobest1exp': '_randtobest1',
+                    'currenttobest1exp': '_currenttobest1',
+                    'best2exp': '_best2',
+                    'rand2exp': '_rand2'}
+
+    __init_error_msg = ("The population initialization method must be one of "
+                        "'latinhypercube' or 'random', or an array of shape "
+                        "(S, N) where N is the number of parameters and S>5")
+
+    def __init__(self, func, bounds, args=(),
+                 strategy='best1bin', maxiter=1000, popsize=15,
+                 tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
+                 maxfun=np.inf, callback=None, disp=False, polish=True,
+                 init='latinhypercube', atol=0, updating='immediate',
+                 workers=1, constraints=(), x0=None, *, integrality=None,
+                 vectorized=False):
+
+        if strategy in self._binomial:
+            self.mutation_func = getattr(self, self._binomial[strategy])
+        elif strategy in self._exponential:
+            self.mutation_func = getattr(self, self._exponential[strategy])
+        else:
+            raise ValueError("Please select a valid mutation strategy")
+        self.strategy = strategy
+
+        self.callback = callback
+        self.polish = polish
+
+        # set the updating / parallelisation options
+        if updating in ['immediate', 'deferred']:
+            self._updating = updating
+
+        self.vectorized = vectorized
+
+        # want to use parallelisation, but updating is immediate
+        if workers != 1 and updating == 'immediate':
+            warnings.warn("differential_evolution: the 'workers' keyword has"
+                          " overridden updating='immediate' to"
+                          " updating='deferred'", UserWarning, stacklevel=2)
+            self._updating = 'deferred'
+
+        if vectorized and workers != 1:
+            warnings.warn("differential_evolution: the 'workers' keyword"
+                          " overrides the 'vectorized' keyword", stacklevel=2)
+            self.vectorized = vectorized = False
+
+        if vectorized and updating == 'immediate':
+            warnings.warn("differential_evolution: the 'vectorized' keyword"
+                          " has overridden updating='immediate' to updating"
+                          "='deferred'", UserWarning, stacklevel=2)
+            self._updating = 'deferred'
+
+        # an object with a map method.
+        if vectorized:
+            def maplike_for_vectorized_func(func, x):
+                # send an array (N, S) to the user func,
+                # expect to receive (S,). Transposition is required because
+                # internally the population is held as (S, N)
+                return np.atleast_1d(func(x.T))
+            workers = maplike_for_vectorized_func
+
+        self._mapwrapper = MapWrapper(workers)
+
+        # relative and absolute tolerances for convergence
+        self.tol, self.atol = tol, atol
+
+        # Mutation constant should be in [0, 2). If specified as a sequence
+        # then dithering is performed.
+        self.scale = mutation
+        if (not np.all(np.isfinite(mutation)) or
+                np.any(np.array(mutation) >= 2) or
+                np.any(np.array(mutation) < 0)):
+            raise ValueError('The mutation constant must be a float in '
+                             'U[0, 2), or specified as a tuple(min, max)'
+                             ' where min < max and min, max are in U[0, 2).')
+
+        self.dither = None
+        if hasattr(mutation, '__iter__') and len(mutation) > 1:
+            self.dither = [mutation[0], mutation[1]]
+            self.dither.sort()
+
+        self.cross_over_probability = recombination
+
+        # we create a wrapped function to allow the use of map (and Pool.map
+        # in the future)
+        self.func = _FunctionWrapper(func, args)
+        self.args = args
+
+        # convert tuple of lower and upper bounds to limits
+        # [(low_0, high_0), ..., (low_n, high_n]
+        #     -> [[low_0, ..., low_n], [high_0, ..., high_n]]
+        if isinstance(bounds, Bounds):
+            self.limits = np.array(new_bounds_to_old(bounds.lb,
+                                                     bounds.ub,
+                                                     len(bounds.lb)),
+                                   dtype=float).T
+        else:
+            self.limits = np.array(bounds, dtype='float').T
+
+        if (np.size(self.limits, 0) != 2 or not
+                np.all(np.isfinite(self.limits))):
+            raise ValueError('bounds should be a sequence containing '
+                             'real valued (min, max) pairs for each value'
+                             ' in x')
+
+        if maxiter is None:  # the default used to be None
+            maxiter = 1000
+        self.maxiter = maxiter
+        if maxfun is None:  # the default used to be None
+            maxfun = np.inf
+        self.maxfun = maxfun
+
+        # population is scaled to between [0, 1].
+        # We have to scale between parameter <-> population
+        # save these arguments for _scale_parameter and
+        # _unscale_parameter. This is an optimization
+        self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
+        self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
+
+        self.parameter_count = np.size(self.limits, 1)
+
+        self.random_number_generator = check_random_state(seed)
+
+        # Which parameters are going to be integers?
+        if np.any(integrality):
+            # # user has provided a truth value for integer constraints
+            integrality = np.broadcast_to(
+                integrality,
+                self.parameter_count
+            )
+            integrality = np.asarray(integrality, bool)
+            # For integrality parameters change the limits to only allow
+            # integer values lying between the limits.
+            lb, ub = np.copy(self.limits)
+
+            lb = np.ceil(lb)
+            ub = np.floor(ub)
+            if not (lb[integrality] <= ub[integrality]).all():
+                # there's a parameter that doesn't have an integer value
+                # lying between the limits
+                raise ValueError("One of the integrality constraints does not"
+                                 " have any possible integer values between"
+                                 " the lower/upper bounds.")
+            nlb = np.nextafter(lb[integrality] - 0.5, np.inf)
+            nub = np.nextafter(ub[integrality] + 0.5, -np.inf)
+
+            self.integrality = integrality
+            self.limits[0, self.integrality] = nlb
+            self.limits[1, self.integrality] = nub
+        else:
+            self.integrality = False
+
+        # default population initialization is a latin hypercube design, but
+        # there are other population initializations possible.
+        # the minimum is 5 because 'best2bin' requires a population that's at
+        # least 5 long
+        self.num_population_members = max(5, popsize * self.parameter_count)
+        self.population_shape = (self.num_population_members,
+                                 self.parameter_count)
+
+        self._nfev = 0
+        # check first str otherwise will fail to compare str with array
+        if isinstance(init, str):
+            if init == 'latinhypercube':
+                self.init_population_lhs()
+            elif init == 'sobol':
+                # must be Ns = 2**m for Sobol'
+                n_s = int(2 ** np.ceil(np.log2(self.num_population_members)))
+                self.num_population_members = n_s
+                self.population_shape = (self.num_population_members,
+                                         self.parameter_count)
+                self.init_population_qmc(qmc_engine='sobol')
+            elif init == 'halton':
+                self.init_population_qmc(qmc_engine='halton')
+            elif init == 'random':
+                self.init_population_random()
+            else:
+                raise ValueError(self.__init_error_msg)
+        else:
+            self.init_population_array(init)
+
+        if x0 is not None:
+            # scale to within unit interval and
+            # ensure parameters are within bounds.
+            x0_scaled = self._unscale_parameters(np.asarray(x0))
+            if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any():
+                raise ValueError(
+                    "Some entries in x0 lay outside the specified bounds"
+                )
+            self.population[0] = x0_scaled
+
+        # infrastructure for constraints
+        self.constraints = constraints
+        self._wrapped_constraints = []
+
+        if hasattr(constraints, '__len__'):
+            # sequence of constraints, this will also deal with default
+            # keyword parameter
+            for c in constraints:
+                self._wrapped_constraints.append(
+                    _ConstraintWrapper(c, self.x)
+                )
+        else:
+            self._wrapped_constraints = [
+                _ConstraintWrapper(constraints, self.x)
+            ]
+        self.total_constraints = np.sum(
+            [c.num_constr for c in self._wrapped_constraints]
+        )
+        self.constraint_violation = np.zeros((self.num_population_members, 1))
+        self.feasible = np.ones(self.num_population_members, bool)
+
+        self.disp = disp
+
+    def init_population_lhs(self):
+        """
+        Initializes the population with Latin Hypercube Sampling.
+        Latin Hypercube Sampling ensures that each parameter is uniformly
+        sampled over its range.
+        """
+        rng = self.random_number_generator
+
+        # Each parameter range needs to be sampled uniformly. The scaled
+        # parameter range ([0, 1)) needs to be split into
+        # `self.num_population_members` segments, each of which has the following
+        # size:
+        segsize = 1.0 / self.num_population_members
+
+        # Within each segment we sample from a uniform random distribution.
+        # We need to do this sampling for each parameter.
+        samples = (segsize * rng.uniform(size=self.population_shape)
+
+        # Offset each segment to cover the entire parameter range [0, 1)
+                   + np.linspace(0., 1., self.num_population_members,
+                                 endpoint=False)[:, np.newaxis])
+
+        # Create an array for population of candidate solutions.
+        self.population = np.zeros_like(samples)
+
+        # Initialize population of candidate solutions by permutation of the
+        # random samples.
+        for j in range(self.parameter_count):
+            order = rng.permutation(range(self.num_population_members))
+            self.population[:, j] = samples[order, j]
+
+        # reset population energies
+        self.population_energies = np.full(self.num_population_members,
+                                           np.inf)
+
+        # reset number of function evaluations counter
+        self._nfev = 0
+
+    def init_population_qmc(self, qmc_engine):
+        """Initializes the population with a QMC method.
+
+        QMC methods ensures that each parameter is uniformly
+        sampled over its range.
+
+        Parameters
+        ----------
+        qmc_engine : str
+            The QMC method to use for initialization. Can be one of
+            ``latinhypercube``, ``sobol`` or ``halton``.
+
+        """
+        from scipy.stats import qmc
+
+        rng = self.random_number_generator
+
+        # Create an array for population of candidate solutions.
+        if qmc_engine == 'latinhypercube':
+            sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)
+        elif qmc_engine == 'sobol':
+            sampler = qmc.Sobol(d=self.parameter_count, seed=rng)
+        elif qmc_engine == 'halton':
+            sampler = qmc.Halton(d=self.parameter_count, seed=rng)
+        else:
+            raise ValueError(self.__init_error_msg)
+
+        self.population = sampler.random(n=self.num_population_members)
+
+        # reset population energies
+        self.population_energies = np.full(self.num_population_members,
+                                           np.inf)
+
+        # reset number of function evaluations counter
+        self._nfev = 0
+
+    def init_population_random(self):
+        """
+        Initializes the population at random. This type of initialization
+        can possess clustering, Latin Hypercube sampling is generally better.
+        """
+        rng = self.random_number_generator
+        self.population = rng.uniform(size=self.population_shape)
+
+        # reset population energies
+        self.population_energies = np.full(self.num_population_members,
+                                           np.inf)
+
+        # reset number of function evaluations counter
+        self._nfev = 0
+
+    def init_population_array(self, init):
+        """
+        Initializes the population with a user specified population.
+
+        Parameters
+        ----------
+        init : np.ndarray
+            Array specifying subset of the initial population. The array should
+            have shape (S, N), where N is the number of parameters.
+            The population is clipped to the lower and upper bounds.
+        """
+        # make sure you're using a float array
+        popn = np.asfarray(init)
+
+        if (np.size(popn, 0) < 5 or
+                popn.shape[1] != self.parameter_count or
+                len(popn.shape) != 2):
+            raise ValueError("The population supplied needs to have shape"
+                             " (S, len(x)), where S > 4.")
+
+        # scale values and clip to bounds, assigning to population
+        self.population = np.clip(self._unscale_parameters(popn), 0, 1)
+
+        self.num_population_members = np.size(self.population, 0)
+
+        self.population_shape = (self.num_population_members,
+                                 self.parameter_count)
+
+        # reset population energies
+        self.population_energies = np.full(self.num_population_members,
+                                           np.inf)
+
+        # reset number of function evaluations counter
+        self._nfev = 0
+
+    @property
+    def x(self):
+        """
+        The best solution from the solver
+        """
+        return self._scale_parameters(self.population[0])
+
+    @property
+    def convergence(self):
+        """
+        The standard deviation of the population energies divided by their
+        mean.
+        """
+        if np.any(np.isinf(self.population_energies)):
+            return np.inf
+        return (np.std(self.population_energies) /
+                np.abs(np.mean(self.population_energies) + _MACHEPS))
+
+    def converged(self):
+        """
+        Return True if the solver has converged.
+        """
+        if np.any(np.isinf(self.population_energies)):
+            return False
+
+        return (np.std(self.population_energies) <=
+                self.atol +
+                self.tol * np.abs(np.mean(self.population_energies)))
+
+    def solve(self):
+        """
+        Runs the DifferentialEvolutionSolver.
+
+        Returns
+        -------
+        res : OptimizeResult
+            The optimization result represented as a ``OptimizeResult`` object.
+            Important attributes are: ``x`` the solution array, ``success`` a
+            Boolean flag indicating if the optimizer exited successfully and
+            ``message`` which describes the cause of the termination. See
+            `OptimizeResult` for a description of other attributes.  If `polish`
+            was employed, and a lower minimum was obtained by the polishing,
+            then OptimizeResult also contains the ``jac`` attribute.
+        """
+        nit, warning_flag = 0, False
+        status_message = _status_message['success']
+
+        # The population may have just been initialized (all entries are
+        # np.inf). If it has you have to calculate the initial energies.
+        # Although this is also done in the evolve generator it's possible
+        # that someone can set maxiter=0, at which point we still want the
+        # initial energies to be calculated (the following loop isn't run).
+        if np.all(np.isinf(self.population_energies)):
+            self.feasible, self.constraint_violation = (
+                self._calculate_population_feasibilities(self.population))
+
+            # only work out population energies for feasible solutions
+            self.population_energies[self.feasible] = (
+                self._calculate_population_energies(
+                    self.population[self.feasible]))
+
+            self._promote_lowest_energy()
+
+        # do the optimization.
+        for nit in range(1, self.maxiter + 1):
+            # evolve the population by a generation
+            try:
+                next(self)
+            except StopIteration:
+                warning_flag = True
+                if self._nfev > self.maxfun:
+                    status_message = _status_message['maxfev']
+                elif self._nfev == self.maxfun:
+                    status_message = ('Maximum number of function evaluations'
+                                      ' has been reached.')
+                break
+
+            if self.disp:
+                print("differential_evolution step %d: f(x)= %g"
+                      % (nit,
+                         self.population_energies[0]))
+
+            if self.callback:
+                c = self.tol / (self.convergence + _MACHEPS)
+                warning_flag = bool(self.callback(self.x, convergence=c))
+                if warning_flag:
+                    status_message = ('callback function requested stop early'
+                                      ' by returning True')
+
+            # should the solver terminate?
+            if warning_flag or self.converged():
+                break
+
+        else:
+            status_message = _status_message['maxiter']
+            warning_flag = True
+
+        DE_result = OptimizeResult(
+            x=self.x,
+            fun=self.population_energies[0],
+            nfev=self._nfev,
+            nit=nit,
+            message=status_message,
+            success=(warning_flag is not True))
+
+        if self.polish and not np.all(self.integrality):
+            # can't polish if all the parameters are integers
+            if np.any(self.integrality):
+                # set the lower/upper bounds equal so that any integrality
+                # constraints work.
+                limits, integrality = self.limits, self.integrality
+                limits[0, integrality] = DE_result.x[integrality]
+                limits[1, integrality] = DE_result.x[integrality]
+
+            polish_method = 'L-BFGS-B'
+
+            if self._wrapped_constraints:
+                polish_method = 'trust-constr'
+
+                constr_violation = self._constraint_violation_fn(DE_result.x)
+                if np.any(constr_violation > 0.):
+                    warnings.warn("differential evolution didn't find a"
+                                  " solution satisfying the constraints,"
+                                  " attempting to polish from the least"
+                                  " infeasible solution", UserWarning)
+            if self.disp:
+                print(f"Polishing solution with '{polish_method}'")
+            result = minimize(self.func,
+                              np.copy(DE_result.x),
+                              method=polish_method,
+                              bounds=self.limits.T,
+                              constraints=self.constraints)
+
+            self._nfev += result.nfev
+            DE_result.nfev = self._nfev
+
+            # Polishing solution is only accepted if there is an improvement in
+            # cost function, the polishing was successful and the solution lies
+            # within the bounds.
+            if (result.fun < DE_result.fun and
+                    result.success and
+                    np.all(result.x <= self.limits[1]) and
+                    np.all(self.limits[0] <= result.x)):
+                DE_result.fun = result.fun
+                DE_result.x = result.x
+                DE_result.jac = result.jac
+                # to keep internal state consistent
+                self.population_energies[0] = result.fun
+                self.population[0] = self._unscale_parameters(result.x)
+
+        if self._wrapped_constraints:
+            DE_result.constr = [c.violation(DE_result.x) for
+                                c in self._wrapped_constraints]
+            DE_result.constr_violation = np.max(
+                np.concatenate(DE_result.constr))
+            DE_result.maxcv = DE_result.constr_violation
+            if DE_result.maxcv > 0:
+                # if the result is infeasible then success must be False
+                DE_result.success = False
+                DE_result.message = ("The solution does not satisfy the "
+                                     f"constraints, MAXCV = {DE_result.maxcv}")
+
+        return DE_result
+
+    def _calculate_population_energies(self, population):
+        """
+        Calculate the energies of a population.
+
+        Parameters
+        ----------
+        population : ndarray
+            An array of parameter vectors normalised to [0, 1] using lower
+            and upper limits. Has shape ``(np.size(population, 0), N)``.
+
+        Returns
+        -------
+        energies : ndarray
+            An array of energies corresponding to each population member. If
+            maxfun will be exceeded during this call, then the number of
+            function evaluations will be reduced and energies will be
+            right-padded with np.inf. Has shape ``(np.size(population, 0),)``
+        """
+        num_members = np.size(population, 0)
+        # S is the number of function evals left to stay under the
+        # maxfun budget
+        S = min(num_members, self.maxfun - self._nfev)
+
+        energies = np.full(num_members, np.inf)
+
+        parameters_pop = self._scale_parameters(population)
+        try:
+            calc_energies = list(
+                self._mapwrapper(self.func, parameters_pop[0:S])
+            )
+            calc_energies = np.squeeze(calc_energies)
+        except (TypeError, ValueError) as e:
+            # wrong number of arguments for _mapwrapper
+            # or wrong length returned from the mapper
+            raise RuntimeError(
+                "The map-like callable must be of the form f(func, iterable), "
+                "returning a sequence of numbers the same length as 'iterable'"
+            ) from e
+
+        if calc_energies.size != S:
+            if self.vectorized:
+                raise RuntimeError("The vectorized function must return an"
+                                   " array of shape (S,) when given an array"
+                                   " of shape (len(x), S)")
+            raise RuntimeError("func(x, *args) must return a scalar value")
+
+        energies[0:S] = calc_energies
+
+        if self.vectorized:
+            self._nfev += 1
+        else:
+            self._nfev += S
+
+        return energies
+
+    def _promote_lowest_energy(self):
+        # swaps 'best solution' into first population entry
+
+        idx = np.arange(self.num_population_members)
+        feasible_solutions = idx[self.feasible]
+        if feasible_solutions.size:
+            # find the best feasible solution
+            idx_t = np.argmin(self.population_energies[feasible_solutions])
+            l = feasible_solutions[idx_t]
+        else:
+            # no solution was feasible, use 'best' infeasible solution, which
+            # will violate constraints the least
+            l = np.argmin(np.sum(self.constraint_violation, axis=1))
+
+        self.population_energies[[0, l]] = self.population_energies[[l, 0]]
+        self.population[[0, l], :] = self.population[[l, 0], :]
+        self.feasible[[0, l]] = self.feasible[[l, 0]]
+        self.constraint_violation[[0, l], :] = (
+        self.constraint_violation[[l, 0], :])
+
+    def _constraint_violation_fn(self, x):
+        """
+        Calculates total constraint violation for all the constraints, for a
+        set of solutions.
+
+        Parameters
+        ----------
+        x : ndarray
+            Solution vector(s). Has shape (S, N), or (N,), where S is the
+            number of solutions to investigate and N is the number of
+            parameters.
+
+        Returns
+        -------
+        cv : ndarray
+            Total violation of constraints. Has shape ``(S, M)``, where M is
+            the total number of constraint components (which is not necessarily
+            equal to len(self._wrapped_constraints)).
+        """
+        # how many solution vectors you're calculating constraint violations
+        # for
+        S = np.size(x) // self.parameter_count
+        _out = np.zeros((S, self.total_constraints))
+        offset = 0
+        for con in self._wrapped_constraints:
+            # the input/output of the (vectorized) constraint function is
+            # {(N, S), (N,)} --> (M, S)
+            # The input to _constraint_violation_fn is (S, N) or (N,), so
+            # transpose to pass it to the constraint. The output is transposed
+            # from (M, S) to (S, M) for further use.
+            c = con.violation(x.T).T
+
+            # The shape of c should be (M,), (1, M), or (S, M). Check for
+            # those shapes, as an incorrect shape indicates that the
+            # user constraint function didn't return the right thing, and
+            # the reshape operation will fail. Intercept the wrong shape
+            # to give a reasonable error message. I'm not sure what failure
+            # modes an inventive user will come up with.
+            if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S):
+                raise RuntimeError("An array returned from a Constraint has"
+                                   " the wrong shape. If `vectorized is False`"
+                                   " the Constraint should return an array of"
+                                   " shape (M,). If `vectorized is True` then"
+                                   " the Constraint must return an array of"
+                                   " shape (M, S), where S is the number of"
+                                   " solution vectors and M is the number of"
+                                   " constraint components in a given"
+                                   " Constraint object.")
+
+            # the violation function may return a 1D array, but is it a
+            # sequence of constraints for one solution (S=1, M>=1), or the
+            # value of a single constraint for a sequence of solutions
+            # (S>=1, M=1)
+            c = np.reshape(c, (S, con.num_constr))
+            _out[:, offset:offset + con.num_constr] = c
+            offset += con.num_constr
+
+        return _out
+
+    def _calculate_population_feasibilities(self, population):
+        """
+        Calculate the feasibilities of a population.
+
+        Parameters
+        ----------
+        population : ndarray
+            An array of parameter vectors normalised to [0, 1] using lower
+            and upper limits. Has shape ``(np.size(population, 0), N)``.
+
+        Returns
+        -------
+        feasible, constraint_violation : ndarray, ndarray
+            Boolean array of feasibility for each population member, and an
+            array of the constraint violation for each population member.
+            constraint_violation has shape ``(np.size(population, 0), M)``,
+            where M is the number of constraints.
+        """
+        num_members = np.size(population, 0)
+        if not self._wrapped_constraints:
+            # shortcut for no constraints
+            return np.ones(num_members, bool), np.zeros((num_members, 1))
+
+        # (S, N)
+        parameters_pop = self._scale_parameters(population)
+
+        if self.vectorized:
+            # (S, M)
+            constraint_violation = np.array(
+                self._constraint_violation_fn(parameters_pop)
+            )
+        else:
+            # (S, 1, M)
+            constraint_violation = np.array([self._constraint_violation_fn(x)
+                                             for x in parameters_pop])
+            # if you use the list comprehension in the line above it will
+            # create an array of shape (S, 1, M), because each iteration
+            # generates an array of (1, M). In comparison the vectorized
+            # version returns (S, M). It's therefore necessary to remove axis 1
+            constraint_violation = constraint_violation[:, 0]
+
+        feasible = ~(np.sum(constraint_violation, axis=1) > 0)
+
+        return feasible, constraint_violation
+
+    def __iter__(self):
+        return self
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        return self._mapwrapper.__exit__(*args)
+
+    def _accept_trial(self, energy_trial, feasible_trial, cv_trial,
+                      energy_orig, feasible_orig, cv_orig):
+        """
+        Trial is accepted if:
+        * it satisfies all constraints and provides a lower or equal objective
+          function value, while both the compared solutions are feasible
+        - or -
+        * it is feasible while the original solution is infeasible,
+        - or -
+        * it is infeasible, but provides a lower or equal constraint violation
+          for all constraint functions.
+
+        This test corresponds to section III of Lampinen [1]_.
+
+        Parameters
+        ----------
+        energy_trial : float
+            Energy of the trial solution
+        feasible_trial : float
+            Feasibility of trial solution
+        cv_trial : array-like
+            Excess constraint violation for the trial solution
+        energy_orig : float
+            Energy of the original solution
+        feasible_orig : float
+            Feasibility of original solution
+        cv_orig : array-like
+            Excess constraint violation for the original solution
+
+        Returns
+        -------
+        accepted : bool
+
+        """
+        if feasible_orig and feasible_trial:
+            return energy_trial <= energy_orig
+        elif feasible_trial and not feasible_orig:
+            return True
+        elif not feasible_trial and (cv_trial <= cv_orig).all():
+            # cv_trial < cv_orig would imply that both trial and orig are not
+            # feasible
+            return True
+
+        return False
+
+    def __next__(self):
+        """
+        Evolve the population by a single generation
+
+        Returns
+        -------
+        x : ndarray
+            The best solution from the solver.
+        fun : float
+            Value of objective function obtained from the best solution.
+        """
+        # the population may have just been initialized (all entries are
+        # np.inf). If it has you have to calculate the initial energies
+        if np.all(np.isinf(self.population_energies)):
+            self.feasible, self.constraint_violation = (
+                self._calculate_population_feasibilities(self.population))
+
+            # only need to work out population energies for those that are
+            # feasible
+            self.population_energies[self.feasible] = (
+                self._calculate_population_energies(
+                    self.population[self.feasible]))
+
+            self._promote_lowest_energy()
+
+        if self.dither is not None:
+            self.scale = self.random_number_generator.uniform(self.dither[0],
+                                                              self.dither[1])
+
+        if self._updating == 'immediate':
+            # update best solution immediately
+            for candidate in range(self.num_population_members):
+                if self._nfev > self.maxfun:
+                    raise StopIteration
+
+                # create a trial solution
+                trial = self._mutate(candidate)
+
+                # ensuring that it's in the range [0, 1)
+                self._ensure_constraint(trial)
+
+                # scale from [0, 1) to the actual parameter value
+                parameters = self._scale_parameters(trial)
+
+                # determine the energy of the objective function
+                if self._wrapped_constraints:
+                    cv = self._constraint_violation_fn(parameters)
+                    feasible = False
+                    energy = np.inf
+                    if not np.sum(cv) > 0:
+                        # solution is feasible
+                        feasible = True
+                        energy = self.func(parameters)
+                        self._nfev += 1
+                else:
+                    feasible = True
+                    cv = np.atleast_2d([0.])
+                    energy = self.func(parameters)
+                    self._nfev += 1
+
+                # compare trial and population member
+                if self._accept_trial(energy, feasible, cv,
+                                      self.population_energies[candidate],
+                                      self.feasible[candidate],
+                                      self.constraint_violation[candidate]):
+                    self.population[candidate] = trial
+                    self.population_energies[candidate] = np.squeeze(energy)
+                    self.feasible[candidate] = feasible
+                    self.constraint_violation[candidate] = cv
+
+                    # if the trial candidate is also better than the best
+                    # solution then promote it.
+                    if self._accept_trial(energy, feasible, cv,
+                                          self.population_energies[0],
+                                          self.feasible[0],
+                                          self.constraint_violation[0]):
+                        self._promote_lowest_energy()
+
+        elif self._updating == 'deferred':
+            # update best solution once per generation
+            if self._nfev >= self.maxfun:
+                raise StopIteration
+
+            # 'deferred' approach, vectorised form.
+            # create trial solutions
+            trial_pop = np.array(
+                [self._mutate(i) for i in range(self.num_population_members)])
+
+            # enforce bounds
+            self._ensure_constraint(trial_pop)
+
+            # determine the energies of the objective function, but only for
+            # feasible trials
+            feasible, cv = self._calculate_population_feasibilities(trial_pop)
+            trial_energies = np.full(self.num_population_members, np.inf)
+
+            # only calculate for feasible entries
+            trial_energies[feasible] = self._calculate_population_energies(
+                trial_pop[feasible])
+
+            # which solutions are 'improved'?
+            loc = [self._accept_trial(*val) for val in
+                   zip(trial_energies, feasible, cv, self.population_energies,
+                       self.feasible, self.constraint_violation)]
+            loc = np.array(loc)
+            self.population = np.where(loc[:, np.newaxis],
+                                       trial_pop,
+                                       self.population)
+            self.population_energies = np.where(loc,
+                                                trial_energies,
+                                                self.population_energies)
+            self.feasible = np.where(loc,
+                                     feasible,
+                                     self.feasible)
+            self.constraint_violation = np.where(loc[:, np.newaxis],
+                                                 cv,
+                                                 self.constraint_violation)
+
+            # make sure the best solution is updated if updating='deferred'.
+            # put the lowest energy into the best solution position.
+            self._promote_lowest_energy()
+
+        return self.x, self.population_energies[0]
+
+    def _scale_parameters(self, trial):
+        """Scale from a number between 0 and 1 to parameters."""
+        # trial either has shape (N, ) or (L, N), where L is the number of
+        # solutions being scaled
+        scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
+        if np.any(self.integrality):
+            i = np.broadcast_to(self.integrality, scaled.shape)
+            scaled[i] = np.round(scaled[i])
+        return scaled
+
+    def _unscale_parameters(self, parameters):
+        """Scale from parameters to a number between 0 and 1."""
+        return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
+
+    def _ensure_constraint(self, trial):
+        """Make sure the parameters lie between the limits."""
+        mask = np.where((trial > 1) | (trial < 0))
+        trial[mask] = self.random_number_generator.uniform(size=mask[0].shape)
+
+    def _mutate(self, candidate):
+        """Create a trial vector based on a mutation strategy."""
+        trial = np.copy(self.population[candidate])
+
+        rng = self.random_number_generator
+
+        fill_point = rng.choice(self.parameter_count)
+
+        if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
+            bprime = self.mutation_func(candidate,
+                                        self._select_samples(candidate, 5))
+        else:
+            bprime = self.mutation_func(self._select_samples(candidate, 5))
+
+        if self.strategy in self._binomial:
+            crossovers = rng.uniform(size=self.parameter_count)
+            crossovers = crossovers < self.cross_over_probability
+            # the last one is always from the bprime vector for binomial
+            # If you fill in modulo with a loop you have to set the last one to
+            # true. If you don't use a loop then you can have any random entry
+            # be True.
+            crossovers[fill_point] = True
+            trial = np.where(crossovers, bprime, trial)
+            return trial
+
+        elif self.strategy in self._exponential:
+            i = 0
+            crossovers = rng.uniform(size=self.parameter_count)
+            crossovers = crossovers < self.cross_over_probability
+            crossovers[0] = True
+            while (i < self.parameter_count and crossovers[i]):
+                trial[fill_point] = bprime[fill_point]
+                fill_point = (fill_point + 1) % self.parameter_count
+                i += 1
+
+            return trial
+
+    def _best1(self, samples):
+        """best1bin, best1exp"""
+        r0, r1 = samples[:2]
+        return (self.population[0] + self.scale *
+                (self.population[r0] - self.population[r1]))
+
+    def _rand1(self, samples):
+        """rand1bin, rand1exp"""
+        r0, r1, r2 = samples[:3]
+        return (self.population[r0] + self.scale *
+                (self.population[r1] - self.population[r2]))
+
+    def _randtobest1(self, samples):
+        """randtobest1bin, randtobest1exp"""
+        r0, r1, r2 = samples[:3]
+        bprime = np.copy(self.population[r0])
+        bprime += self.scale * (self.population[0] - bprime)
+        bprime += self.scale * (self.population[r1] -
+                                self.population[r2])
+        return bprime
+
+    def _currenttobest1(self, candidate, samples):
+        """currenttobest1bin, currenttobest1exp"""
+        r0, r1 = samples[:2]
+        bprime = (self.population[candidate] + self.scale *
+                  (self.population[0] - self.population[candidate] +
+                   self.population[r0] - self.population[r1]))
+        return bprime
+
+    def _best2(self, samples):
+        """best2bin, best2exp"""
+        r0, r1, r2, r3 = samples[:4]
+        bprime = (self.population[0] + self.scale *
+                  (self.population[r0] + self.population[r1] -
+                   self.population[r2] - self.population[r3]))
+
+        return bprime
+
+    def _rand2(self, samples):
+        """rand2bin, rand2exp"""
+        r0, r1, r2, r3, r4 = samples
+        bprime = (self.population[r0] + self.scale *
+                  (self.population[r1] + self.population[r2] -
+                   self.population[r3] - self.population[r4]))
+
+        return bprime
+
+    def _select_samples(self, candidate, number_samples):
+        """
+        obtain random integers from range(self.num_population_members),
+        without replacement. You can't have the original candidate either.
+        """
+        idxs = list(range(self.num_population_members))
+        idxs.remove(candidate)
+        self.random_number_generator.shuffle(idxs)
+        idxs = idxs[:number_samples]
+        return idxs
+
+
+class _ConstraintWrapper:
+    """Object to wrap/evaluate user defined constraints.
+
+    Very similar in practice to `PreparedConstraint`, except that no evaluation
+    of jac/hess is performed (explicit or implicit).
+
+    If created successfully, it will contain the attributes listed below.
+
+    Parameters
+    ----------
+    constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`}
+        Constraint to check and prepare.
+    x0 : array_like
+        Initial vector of independent variables, shape (N,)
+
+    Attributes
+    ----------
+    fun : callable
+        Function defining the constraint wrapped by one of the convenience
+        classes.
+    bounds : 2-tuple
+        Contains lower and upper bounds for the constraints --- lb and ub.
+        These are converted to ndarray and have a size equal to the number of
+        the constraints.
+    """
+    def __init__(self, constraint, x0):
+        self.constraint = constraint
+
+        if isinstance(constraint, NonlinearConstraint):
+            def fun(x):
+                x = np.asarray(x)
+                return np.atleast_1d(constraint.fun(x))
+        elif isinstance(constraint, LinearConstraint):
+            def fun(x):
+                if issparse(constraint.A):
+                    A = constraint.A
+                else:
+                    A = np.atleast_2d(constraint.A)
+                return A.dot(x)
+        elif isinstance(constraint, Bounds):
+            def fun(x):
+                return np.asarray(x)
+        else:
+            raise ValueError("`constraint` of an unknown type is passed.")
+
+        self.fun = fun
+
+        lb = np.asarray(constraint.lb, dtype=float)
+        ub = np.asarray(constraint.ub, dtype=float)
+
+        x0 = np.asarray(x0)
+
+        # find out the number of constraints
+        f0 = fun(x0)
+        self.num_constr = m = f0.size
+        self.parameter_count = x0.size
+
+        if lb.ndim == 0:
+            lb = np.resize(lb, m)
+        if ub.ndim == 0:
+            ub = np.resize(ub, m)
+
+        self.bounds = (lb, ub)
+
+    def __call__(self, x):
+        return np.atleast_1d(self.fun(x))
+
+    def violation(self, x):
+        """How much the constraint is exceeded by.
+
+        Parameters
+        ----------
+        x : array-like
+            Vector of independent variables, (N, S), where N is number of
+            parameters and S is the number of solutions to be investigated.
+
+        Returns
+        -------
+        excess : array-like
+            How much the constraint is exceeded by, for each of the
+            constraints specified by `_ConstraintWrapper.fun`.
+            Has shape (M, S) where M is the number of constraint components.
+        """
+        # expect ev to have shape (num_constr, S) or (num_constr,)
+        ev = self.fun(np.asarray(x))
+
+        try:
+            excess_lb = np.maximum(self.bounds[0] - ev.T, 0)
+            excess_ub = np.maximum(ev.T - self.bounds[1], 0)
+        except ValueError as e:
+            raise RuntimeError("An array returned from a Constraint has"
+                               " the wrong shape. If `vectorized is False`"
+                               " the Constraint should return an array of"
+                               " shape (M,). If `vectorized is True` then"
+                               " the Constraint must return an array of"
+                               " shape (M, S), where S is the number of"
+                               " solution vectors and M is the number of"
+                               " constraint components in a given"
+                               " Constraint object.") from e
+
+        v = (excess_lb + excess_ub).T
+        return v
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_direct_py.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_direct_py.py
new file mode 100644
index 00000000..b0e830dc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_direct_py.py
@@ -0,0 +1,279 @@
+from __future__ import annotations
+from typing import (
+    Any, Callable, Iterable, Optional, Tuple, TYPE_CHECKING, Union
+)
+
+import numpy as np
+from scipy.optimize import OptimizeResult
+from ._constraints import old_bound_to_new, Bounds
+from ._direct import direct as _direct  # type: ignore
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+    from _typeshed import NoneType
+
+__all__ = ['direct']
+
+ERROR_MESSAGES = (
+    "Number of function evaluations done is larger than maxfun={}",
+    "Number of iterations is larger than maxiter={}",
+    "u[i] < l[i] for some i",
+    "maxfun is too large",
+    "Initialization failed",
+    "There was an error in the creation of the sample points",
+    "An error occured while the function was sampled",
+    "Maximum number of levels has been reached.",
+    "Forced stop",
+    "Invalid arguments",
+    "Out of memory",
+)
+
+SUCCESS_MESSAGES = (
+    ("The best function value found is within a relative error={} "
+     "of the (known) global optimum f_min"),
+    ("The volume of the hyperrectangle containing the lowest function value "
+     "found is below vol_tol={}"),
+    ("The side length measure of the hyperrectangle containing the lowest "
+     "function value found is below len_tol={}"),
+)
+
+
+def direct(
+    func: Callable[[npt.ArrayLike, Tuple[Any]], float],
+    bounds: Union[Iterable, Bounds],
+    *,
+    args: tuple = (),
+    eps: float = 1e-4,
+    maxfun: Union[int, None] = None,
+    maxiter: int = 1000,
+    locally_biased: bool = True,
+    f_min: float = -np.inf,
+    f_min_rtol: float = 1e-4,
+    vol_tol: float = 1e-16,
+    len_tol: float = 1e-6,
+    callback: Optional[Callable[[npt.ArrayLike], NoneType]] = None
+) -> OptimizeResult:
+    """
+    Finds the global minimum of a function using the
+    DIRECT algorithm.
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized.
+        ``func(x, *args) -> float``
+        where ``x`` is an 1-D array with shape (n,) and ``args`` is a tuple of
+        the fixed parameters needed to completely specify the function.
+    bounds : sequence or `Bounds`
+        Bounds for variables. There are two ways to specify the bounds:
+
+        1. Instance of `Bounds` class.
+        2. ``(min, max)`` pairs for each element in ``x``.
+
+    args : tuple, optional
+        Any additional fixed parameters needed to
+        completely specify the objective function.
+    eps : float, optional
+        Minimal required difference of the objective function values
+        between the current best hyperrectangle and the next potentially
+        optimal hyperrectangle to be divided. In consequence, `eps` serves as a
+        tradeoff between local and global search: the smaller, the more local
+        the search becomes. Default is 1e-4.
+    maxfun : int or None, optional
+        Approximate upper bound on objective function evaluations.
+        If `None`, will be automatically set to ``1000 * N`` where ``N``
+        represents the number of dimensions. Will be capped if necessary to
+        limit DIRECT's RAM usage to app. 1GiB. This will only occur for very
+        high dimensional problems and excessive `max_fun`. Default is `None`.
+    maxiter : int, optional
+        Maximum number of iterations. Default is 1000.
+    locally_biased : bool, optional
+        If `True` (default), use the locally biased variant of the
+        algorithm known as DIRECT_L. If `False`, use the original unbiased
+        DIRECT algorithm. For hard problems with many local minima,
+        `False` is recommended.
+    f_min : float, optional
+        Function value of the global optimum. Set this value only if the
+        global optimum is known. Default is ``-np.inf``, so that this
+        termination criterion is deactivated.
+    f_min_rtol : float, optional
+        Terminate the optimization once the relative error between the
+        current best minimum `f` and the supplied global minimum `f_min`
+        is smaller than `f_min_rtol`. This parameter is only used if
+        `f_min` is also set. Must lie between 0 and 1. Default is 1e-4.
+    vol_tol : float, optional
+        Terminate the optimization once the volume of the hyperrectangle
+        containing the lowest function value is smaller than `vol_tol`
+        of the complete search space. Must lie between 0 and 1.
+        Default is 1e-16.
+    len_tol : float, optional
+        If `locally_biased=True`, terminate the optimization once half of
+        the normalized maximal side length of the hyperrectangle containing
+        the lowest function value is smaller than `len_tol`.
+        If `locally_biased=False`, terminate the optimization once half of
+        the normalized diagonal of the hyperrectangle containing the lowest
+        function value is smaller than `len_tol`. Must lie between 0 and 1.
+        Default is 1e-6.
+    callback : callable, optional
+        A callback function with signature ``callback(xk)`` where ``xk``
+        represents the best function value found so far.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a ``OptimizeResult`` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the optimizer exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes.
+
+    Notes
+    -----
+    DIviding RECTangles (DIRECT) is a deterministic global
+    optimization algorithm capable of minimizing a black box function with
+    its variables subject to lower and upper bound constraints by sampling
+    potential solutions in the search space [1]_. The algorithm starts by
+    normalising the search space to an n-dimensional unit hypercube.
+    It samples the function at the center of this hypercube and at 2n
+    (n is the number of variables) more points, 2 in each coordinate
+    direction. Using these function values, DIRECT then divides the
+    domain into hyperrectangles, each having exactly one of the sampling
+    points as its center. In each iteration, DIRECT chooses, using the `eps`
+    parameter which defaults to 1e-4, some of the existing hyperrectangles
+    to be further divided. This division process continues until either the
+    maximum number of iterations or maximum function evaluations allowed
+    are exceeded, or the hyperrectangle containing the minimal value found
+    so far becomes small enough. If `f_min` is specified, the optimization
+    will stop once this function value is reached within a relative tolerance.
+    The locally biased variant of DIRECT (originally called DIRECT_L) [2]_ is
+    used by default. It makes the search more locally biased and more
+    efficient for cases with only a few local minima.
+
+    A note about termination criteria: `vol_tol` refers to the volume of the
+    hyperrectangle containing the lowest function value found so far. This
+    volume decreases exponentially with increasing dimensionality of the
+    problem. Therefore `vol_tol` should be decreased to avoid premature
+    termination of the algorithm for higher dimensions. This does not hold
+    for `len_tol`: it refers either to half of the maximal side length
+    (for ``locally_biased=True``) or half of the diagonal of the
+    hyperrectangle (for ``locally_biased=False``).
+
+    This code is based on the DIRECT 2.0.4 Fortran code by Gablonsky et al. at
+    https://ctk.math.ncsu.edu/SOFTWARE/DIRECTv204.tar.gz .
+    This original version was initially converted via f2c and then cleaned up
+    and reorganized by Steven G. Johnson, August 2007, for the NLopt project.
+    The `direct` function wraps the C implementation.
+
+    .. versionadded:: 1.9.0
+
+    References
+    ----------
+    .. [1] Jones, D.R., Perttunen, C.D. & Stuckman, B.E. Lipschitzian
+        optimization without the Lipschitz constant. J Optim Theory Appl
+        79, 157-181 (1993).
+    .. [2] Gablonsky, J., Kelley, C. A Locally-Biased form of the DIRECT
+        Algorithm. Journal of Global Optimization 21, 27-37 (2001).
+
+    Examples
+    --------
+    The following example is a 2-D problem with four local minima: minimizing
+    the Styblinski-Tang function
+    (https://en.wikipedia.org/wiki/Test_functions_for_optimization).
+
+    >>> from scipy.optimize import direct, Bounds
+    >>> def styblinski_tang(pos):
+    ...     x, y = pos
+    ...     return 0.5 * (x**4 - 16*x**2 + 5*x + y**4 - 16*y**2 + 5*y)
+    >>> bounds = Bounds([-4., -4.], [4., 4.])
+    >>> result = direct(styblinski_tang, bounds)
+    >>> result.x, result.fun, result.nfev
+    array([-2.90321597, -2.90321597]), -78.3323279095383, 2011
+
+    The correct global minimum was found but with a huge number of function
+    evaluations (2011). Loosening the termination tolerances `vol_tol` and
+    `len_tol` can be used to stop DIRECT earlier.
+
+    >>> result = direct(styblinski_tang, bounds, len_tol=1e-3)
+    >>> result.x, result.fun, result.nfev
+    array([-2.9044353, -2.9044353]), -78.33230330754142, 207
+
+    """
+    # convert bounds to new Bounds class if necessary
+    if not isinstance(bounds, Bounds):
+        if isinstance(bounds, list) or isinstance(bounds, tuple):
+            lb, ub = old_bound_to_new(bounds)
+            bounds = Bounds(lb, ub)
+        else:
+            message = ("bounds must be a sequence or "
+                       "instance of Bounds class")
+            raise ValueError(message)
+
+    lb = np.ascontiguousarray(bounds.lb, dtype=np.float64)
+    ub = np.ascontiguousarray(bounds.ub, dtype=np.float64)
+
+    # validate bounds
+    # check that lower bounds are smaller than upper bounds
+    if not np.all(lb < ub):
+        raise ValueError('Bounds are not consistent min < max')
+    # check for infs
+    if (np.any(np.isinf(lb)) or np.any(np.isinf(ub))):
+        raise ValueError("Bounds must not be inf.")
+
+    # validate tolerances
+    if (vol_tol < 0 or vol_tol > 1):
+        raise ValueError("vol_tol must be between 0 and 1.")
+    if (len_tol < 0 or len_tol > 1):
+        raise ValueError("len_tol must be between 0 and 1.")
+    if (f_min_rtol < 0 or f_min_rtol > 1):
+        raise ValueError("f_min_rtol must be between 0 and 1.")
+
+    # validate maxfun and maxiter
+    if maxfun is None:
+        maxfun = 1000 * lb.shape[0]
+    if not isinstance(maxfun, int):
+        raise ValueError("maxfun must be of type int.")
+    if maxfun < 0:
+        raise ValueError("maxfun must be > 0.")
+    if not isinstance(maxiter, int):
+        raise ValueError("maxiter must be of type int.")
+    if maxiter < 0:
+        raise ValueError("maxiter must be > 0.")
+
+    # validate boolean parameters
+    if not isinstance(locally_biased, bool):
+        raise ValueError("locally_biased must be True or False.")
+
+    def _func_wrap(x, args=None):
+        x = np.asarray(x)
+        if args is None:
+            f = func(x)
+        else:
+            f = func(x, *args)
+        # always return a float
+        return np.asarray(f).item()
+
+    # TODO: fix disp argument
+    x, fun, ret_code, nfev, nit = _direct(
+        _func_wrap,
+        np.asarray(lb), np.asarray(ub),
+        args,
+        False, eps, maxfun, maxiter,
+        locally_biased,
+        f_min, f_min_rtol,
+        vol_tol, len_tol, callback
+    )
+
+    format_val = (maxfun, maxiter, f_min_rtol, vol_tol, len_tol)
+    if ret_code > 2:
+        message = SUCCESS_MESSAGES[ret_code - 3].format(
+                    format_val[ret_code - 1])
+    elif 0 < ret_code <= 2:
+        message = ERROR_MESSAGES[ret_code - 1].format(format_val[ret_code - 1])
+    elif 0 > ret_code > -100:
+        message = ERROR_MESSAGES[abs(ret_code) + 1]
+    else:
+        message = ERROR_MESSAGES[ret_code + 99]
+
+    return OptimizeResult(x=np.asarray(x), fun=fun, status=ret_code,
+                          success=ret_code > 2, message=message,
+                          nfev=nfev, nit=nit)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_dual_annealing.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_dual_annealing.py
new file mode 100644
index 00000000..30c03496
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_dual_annealing.py
@@ -0,0 +1,711 @@
+# Dual Annealing implementation.
+# Copyright (c) 2018 Sylvain Gubian ,
+# Yang Xiang 
+# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
+
+"""
+A Dual Annealing global optimization algorithm
+"""
+
+import numpy as np
+from scipy.optimize import OptimizeResult
+from scipy.optimize import minimize, Bounds
+from scipy.special import gammaln
+from scipy._lib._util import check_random_state
+from scipy.optimize._constraints import new_bounds_to_old
+
+__all__ = ['dual_annealing']
+
+
+class VisitingDistribution:
+    """
+    Class used to generate new coordinates based on the distorted
+    Cauchy-Lorentz distribution. Depending on the steps within the strategy
+    chain, the class implements the strategy for generating new location
+    changes.
+
+    Parameters
+    ----------
+    lb : array_like
+        A 1-D NumPy ndarray containing lower bounds of the generated
+        components. Neither NaN or inf are allowed.
+    ub : array_like
+        A 1-D NumPy ndarray containing upper bounds for the generated
+        components. Neither NaN or inf are allowed.
+    visiting_param : float
+        Parameter for visiting distribution. Default value is 2.62.
+        Higher values give the visiting distribution a heavier tail, this
+        makes the algorithm jump to a more distant region.
+        The value range is (1, 3]. Its value is fixed for the life of the
+        object.
+    rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
+        A `~numpy.random.RandomState`, `~numpy.random.Generator` object
+        for using the current state of the created random generator container.
+
+    """
+    TAIL_LIMIT = 1.e8
+    MIN_VISIT_BOUND = 1.e-10
+
+    def __init__(self, lb, ub, visiting_param, rand_gen):
+        # if you wish to make _visiting_param adjustable during the life of
+        # the object then _factor2, _factor3, _factor5, _d1, _factor6 will
+        # have to be dynamically calculated in `visit_fn`. They're factored
+        # out here so they don't need to be recalculated all the time.
+        self._visiting_param = visiting_param
+        self.rand_gen = rand_gen
+        self.lower = lb
+        self.upper = ub
+        self.bound_range = ub - lb
+
+        # these are invariant numbers unless visiting_param changes
+        self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
+            self._visiting_param - 1.0))
+        self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
+                               / (self._visiting_param - 1.0))
+        self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
+            3.0 - self._visiting_param))
+
+        self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
+        self._d1 = 2.0 - self._factor5
+        self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
+            np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
+
+    def visiting(self, x, step, temperature):
+        """ Based on the step in the strategy chain, new coordinates are
+        generated by changing all components is the same time or only
+        one of them, the new values are computed with visit_fn method
+        """
+        dim = x.size
+        if step < dim:
+            # Changing all coordinates with a new visiting value
+            visits = self.visit_fn(temperature, dim)
+            upper_sample, lower_sample = self.rand_gen.uniform(size=2)
+            visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
+            visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
+            x_visit = visits + x
+            a = x_visit - self.lower
+            b = np.fmod(a, self.bound_range) + self.bound_range
+            x_visit = np.fmod(b, self.bound_range) + self.lower
+            x_visit[np.fabs(
+                x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
+        else:
+            # Changing only one coordinate at a time based on strategy
+            # chain step
+            x_visit = np.copy(x)
+            visit = self.visit_fn(temperature, 1)[0]
+            if visit > self.TAIL_LIMIT:
+                visit = self.TAIL_LIMIT * self.rand_gen.uniform()
+            elif visit < -self.TAIL_LIMIT:
+                visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
+            index = step - dim
+            x_visit[index] = visit + x[index]
+            a = x_visit[index] - self.lower[index]
+            b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
+            x_visit[index] = np.fmod(b, self.bound_range[
+                index]) + self.lower[index]
+            if np.fabs(x_visit[index] - self.lower[
+                    index]) < self.MIN_VISIT_BOUND:
+                x_visit[index] += self.MIN_VISIT_BOUND
+        return x_visit
+
+    def visit_fn(self, temperature, dim):
+        """ Formula Visita from p. 405 of reference [2] """
+        x, y = self.rand_gen.normal(size=(dim, 2)).T
+
+        factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
+        factor4 = self._factor4_p * factor1
+
+        # sigmax
+        x *= np.exp(-(self._visiting_param - 1.0) * np.log(
+            self._factor6 / factor4) / (3.0 - self._visiting_param))
+
+        den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
+                     (3.0 - self._visiting_param))
+
+        return x / den
+
+
+class EnergyState:
+    """
+    Class used to record the energy state. At any time, it knows what is the
+    currently used coordinates and the most recent best location.
+
+    Parameters
+    ----------
+    lower : array_like
+        A 1-D NumPy ndarray containing lower bounds for generating an initial
+        random components in the `reset` method.
+    upper : array_like
+        A 1-D NumPy ndarray containing upper bounds for generating an initial
+        random components in the `reset` method
+        components. Neither NaN or inf are allowed.
+    callback : callable, ``callback(x, f, context)``, optional
+        A callback function which will be called for all minima found.
+        ``x`` and ``f`` are the coordinates and function value of the
+        latest minimum found, and `context` has value in [0, 1, 2]
+    """
+    # Maximum number of trials for generating a valid starting point
+    MAX_REINIT_COUNT = 1000
+
+    def __init__(self, lower, upper, callback=None):
+        self.ebest = None
+        self.current_energy = None
+        self.current_location = None
+        self.xbest = None
+        self.lower = lower
+        self.upper = upper
+        self.callback = callback
+
+    def reset(self, func_wrapper, rand_gen, x0=None):
+        """
+        Initialize current location is the search domain. If `x0` is not
+        provided, a random location within the bounds is generated.
+        """
+        if x0 is None:
+            self.current_location = rand_gen.uniform(self.lower, self.upper,
+                                                     size=len(self.lower))
+        else:
+            self.current_location = np.copy(x0)
+        init_error = True
+        reinit_counter = 0
+        while init_error:
+            self.current_energy = func_wrapper.fun(self.current_location)
+            if self.current_energy is None:
+                raise ValueError('Objective function is returning None')
+            if (not np.isfinite(self.current_energy) or np.isnan(
+                    self.current_energy)):
+                if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
+                    init_error = False
+                    message = (
+                        'Stopping algorithm because function '
+                        'create NaN or (+/-) infinity values even with '
+                        'trying new random parameters'
+                    )
+                    raise ValueError(message)
+                self.current_location = rand_gen.uniform(self.lower,
+                                                         self.upper,
+                                                         size=self.lower.size)
+                reinit_counter += 1
+            else:
+                init_error = False
+            # If first time reset, initialize ebest and xbest
+            if self.ebest is None and self.xbest is None:
+                self.ebest = self.current_energy
+                self.xbest = np.copy(self.current_location)
+            # Otherwise, we keep them in case of reannealing reset
+
+    def update_best(self, e, x, context):
+        self.ebest = e
+        self.xbest = np.copy(x)
+        if self.callback is not None:
+            val = self.callback(x, e, context)
+            if val is not None:
+                if val:
+                    return ('Callback function requested to stop early by '
+                           'returning True')
+
+    def update_current(self, e, x):
+        self.current_energy = e
+        self.current_location = np.copy(x)
+
+
+class StrategyChain:
+    """
+    Class that implements within a Markov chain the strategy for location
+    acceptance and local search decision making.
+
+    Parameters
+    ----------
+    acceptance_param : float
+        Parameter for acceptance distribution. It is used to control the
+        probability of acceptance. The lower the acceptance parameter, the
+        smaller the probability of acceptance. Default value is -5.0 with
+        a range (-1e4, -5].
+    visit_dist : VisitingDistribution
+        Instance of `VisitingDistribution` class.
+    func_wrapper : ObjectiveFunWrapper
+        Instance of `ObjectiveFunWrapper` class.
+    minimizer_wrapper: LocalSearchWrapper
+        Instance of `LocalSearchWrapper` class.
+    rand_gen : {None, int, `numpy.random.Generator`,
+                `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+    energy_state: EnergyState
+        Instance of `EnergyState` class.
+
+    """
+
+    def __init__(self, acceptance_param, visit_dist, func_wrapper,
+                 minimizer_wrapper, rand_gen, energy_state):
+        # Local strategy chain minimum energy and location
+        self.emin = energy_state.current_energy
+        self.xmin = np.array(energy_state.current_location)
+        # Global optimizer state
+        self.energy_state = energy_state
+        # Acceptance parameter
+        self.acceptance_param = acceptance_param
+        # Visiting distribution instance
+        self.visit_dist = visit_dist
+        # Wrapper to objective function
+        self.func_wrapper = func_wrapper
+        # Wrapper to the local minimizer
+        self.minimizer_wrapper = minimizer_wrapper
+        self.not_improved_idx = 0
+        self.not_improved_max_idx = 1000
+        self._rand_gen = rand_gen
+        self.temperature_step = 0
+        self.K = 100 * len(energy_state.current_location)
+
+    def accept_reject(self, j, e, x_visit):
+        r = self._rand_gen.uniform()
+        pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
+            (e - self.energy_state.current_energy) / self.temperature_step)
+        if pqv_temp <= 0.:
+            pqv = 0.
+        else:
+            pqv = np.exp(np.log(pqv_temp) / (
+                1. - self.acceptance_param))
+
+        if r <= pqv:
+            # We accept the new location and update state
+            self.energy_state.update_current(e, x_visit)
+            self.xmin = np.copy(self.energy_state.current_location)
+
+        # No improvement for a long time
+        if self.not_improved_idx >= self.not_improved_max_idx:
+            if j == 0 or self.energy_state.current_energy < self.emin:
+                self.emin = self.energy_state.current_energy
+                self.xmin = np.copy(self.energy_state.current_location)
+
+    def run(self, step, temperature):
+        self.temperature_step = temperature / float(step + 1)
+        self.not_improved_idx += 1
+        for j in range(self.energy_state.current_location.size * 2):
+            if j == 0:
+                if step == 0:
+                    self.energy_state_improved = True
+                else:
+                    self.energy_state_improved = False
+            x_visit = self.visit_dist.visiting(
+                self.energy_state.current_location, j, temperature)
+            # Calling the objective function
+            e = self.func_wrapper.fun(x_visit)
+            if e < self.energy_state.current_energy:
+                # We have got a better energy value
+                self.energy_state.update_current(e, x_visit)
+                if e < self.energy_state.ebest:
+                    val = self.energy_state.update_best(e, x_visit, 0)
+                    if val is not None:
+                        if val:
+                            return val
+                    self.energy_state_improved = True
+                    self.not_improved_idx = 0
+            else:
+                # We have not improved but do we accept the new location?
+                self.accept_reject(j, e, x_visit)
+            if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
+                return ('Maximum number of function call reached '
+                        'during annealing')
+        # End of StrategyChain loop
+
+    def local_search(self):
+        # Decision making for performing a local search
+        # based on strategy chain results
+        # If energy has been improved or no improvement since too long,
+        # performing a local search with the best strategy chain location
+        if self.energy_state_improved:
+            # Global energy has improved, let's see if LS improves further
+            e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
+                                                       self.energy_state.ebest)
+            if e < self.energy_state.ebest:
+                self.not_improved_idx = 0
+                val = self.energy_state.update_best(e, x, 1)
+                if val is not None:
+                    if val:
+                        return val
+                self.energy_state.update_current(e, x)
+            if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
+                return ('Maximum number of function call reached '
+                        'during local search')
+        # Check probability of a need to perform a LS even if no improvement
+        do_ls = False
+        if self.K < 90 * len(self.energy_state.current_location):
+            pls = np.exp(self.K * (
+                self.energy_state.ebest - self.energy_state.current_energy) /
+                self.temperature_step)
+            if pls >= self._rand_gen.uniform():
+                do_ls = True
+        # Global energy not improved, let's see what LS gives
+        # on the best strategy chain location
+        if self.not_improved_idx >= self.not_improved_max_idx:
+            do_ls = True
+        if do_ls:
+            e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
+            self.xmin = np.copy(x)
+            self.emin = e
+            self.not_improved_idx = 0
+            self.not_improved_max_idx = self.energy_state.current_location.size
+            if e < self.energy_state.ebest:
+                val = self.energy_state.update_best(
+                    self.emin, self.xmin, 2)
+                if val is not None:
+                    if val:
+                        return val
+                self.energy_state.update_current(e, x)
+            if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
+                return ('Maximum number of function call reached '
+                        'during dual annealing')
+
+
+class ObjectiveFunWrapper:
+
+    def __init__(self, func, maxfun=1e7, *args):
+        self.func = func
+        self.args = args
+        # Number of objective function evaluations
+        self.nfev = 0
+        # Number of gradient function evaluation if used
+        self.ngev = 0
+        # Number of hessian of the objective function if used
+        self.nhev = 0
+        self.maxfun = maxfun
+
+    def fun(self, x):
+        self.nfev += 1
+        return self.func(x, *self.args)
+
+
+class LocalSearchWrapper:
+    """
+    Class used to wrap around the minimizer used for local search
+    Default local minimizer is SciPy minimizer L-BFGS-B
+    """
+
+    LS_MAXITER_RATIO = 6
+    LS_MAXITER_MIN = 100
+    LS_MAXITER_MAX = 1000
+
+    def __init__(self, search_bounds, func_wrapper, **kwargs):
+        self.func_wrapper = func_wrapper
+        self.kwargs = kwargs
+        self.minimizer = minimize
+        bounds_list = list(zip(*search_bounds))
+        self.lower = np.array(bounds_list[0])
+        self.upper = np.array(bounds_list[1])
+
+        # If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
+        if not self.kwargs:
+            n = len(self.lower)
+            ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
+                                  self.LS_MAXITER_MIN),
+                              self.LS_MAXITER_MAX)
+            self.kwargs['method'] = 'L-BFGS-B'
+            self.kwargs['options'] = {
+                'maxiter': ls_max_iter,
+            }
+            self.kwargs['bounds'] = list(zip(self.lower, self.upper))
+
+    def local_search(self, x, e):
+        # Run local search from the given x location where energy value is e
+        x_tmp = np.copy(x)
+        mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
+        if 'njev' in mres:
+            self.func_wrapper.ngev += mres.njev
+        if 'nhev' in mres:
+            self.func_wrapper.nhev += mres.nhev
+        # Check if is valid value
+        is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
+        in_bounds = np.all(mres.x >= self.lower) and np.all(
+            mres.x <= self.upper)
+        is_valid = is_finite and in_bounds
+
+        # Use the new point only if it is valid and return a better results
+        if is_valid and mres.fun < e:
+            return mres.fun, mres.x
+        else:
+            return e, x_tmp
+
+
+def dual_annealing(func, bounds, args=(), maxiter=1000,
+                   minimizer_kwargs=None, initial_temp=5230.,
+                   restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
+                   maxfun=1e7, seed=None, no_local_search=False,
+                   callback=None, x0=None):
+    """
+    Find the global minimum of a function using Dual Annealing.
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized. Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a  tuple of any additional fixed parameters needed to
+        completely specify the function.
+    bounds : sequence or `Bounds`
+        Bounds for variables. There are two ways to specify the bounds:
+
+        1. Instance of `Bounds` class.
+        2. Sequence of ``(min, max)`` pairs for each element in `x`.
+
+    args : tuple, optional
+        Any additional fixed parameters needed to completely specify the
+        objective function.
+    maxiter : int, optional
+        The maximum number of global search iterations. Default value is 1000.
+    minimizer_kwargs : dict, optional
+        Extra keyword arguments to be passed to the local minimizer
+        (`minimize`). Some important options could be:
+        ``method`` for the minimizer method to use and ``args`` for
+        objective function additional arguments.
+    initial_temp : float, optional
+        The initial temperature, use higher values to facilitates a wider
+        search of the energy landscape, allowing dual_annealing to escape
+        local minima that it is trapped in. Default value is 5230. Range is
+        (0.01, 5.e4].
+    restart_temp_ratio : float, optional
+        During the annealing process, temperature is decreasing, when it
+        reaches ``initial_temp * restart_temp_ratio``, the reannealing process
+        is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
+    visit : float, optional
+        Parameter for visiting distribution. Default value is 2.62. Higher
+        values give the visiting distribution a heavier tail, this makes
+        the algorithm jump to a more distant region. The value range is (1, 3].
+    accept : float, optional
+        Parameter for acceptance distribution. It is used to control the
+        probability of acceptance. The lower the acceptance parameter, the
+        smaller the probability of acceptance. Default value is -5.0 with
+        a range (-1e4, -5].
+    maxfun : int, optional
+        Soft limit for the number of objective function calls. If the
+        algorithm is in the middle of a local search, this number will be
+        exceeded, the algorithm will stop just after the local search is
+        done. Default value is 1e7.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        Specify `seed` for repeatable minimizations. The random numbers
+        generated with this seed only affect the visiting distribution function
+        and new coordinates generation.
+    no_local_search : bool, optional
+        If `no_local_search` is set to True, a traditional Generalized
+        Simulated Annealing will be performed with no local search
+        strategy applied.
+    callback : callable, optional
+        A callback function with signature ``callback(x, f, context)``,
+        which will be called for all minima found.
+        ``x`` and ``f`` are the coordinates and function value of the
+        latest minimum found, and ``context`` has value in [0, 1, 2], with the
+        following meaning:
+
+            - 0: minimum detected in the annealing process.
+            - 1: detection occurred in the local search process.
+            - 2: detection done in the dual annealing process.
+
+        If the callback implementation returns True, the algorithm will stop.
+    x0 : ndarray, shape(n,), optional
+        Coordinates of a single N-D starting point.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a `OptimizeResult` object.
+        Important attributes are: ``x`` the solution array, ``fun`` the value
+        of the function at the solution, and ``message`` which describes the
+        cause of the termination.
+        See `OptimizeResult` for a description of other attributes.
+
+    Notes
+    -----
+    This function implements the Dual Annealing optimization. This stochastic
+    approach derived from [3]_ combines the generalization of CSA (Classical
+    Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
+    to a strategy for applying a local search on accepted locations [4]_.
+    An alternative implementation of this same algorithm is described in [5]_
+    and benchmarks are presented in [6]_. This approach introduces an advanced
+    method to refine the solution found by the generalized annealing
+    process. This algorithm uses a distorted Cauchy-Lorentz visiting
+    distribution, with its shape controlled by the parameter :math:`q_{v}`
+
+    .. math::
+
+        g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
+        \\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
+        \\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
+        \\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
+        \\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
+
+    Where :math:`t` is the artificial time. This visiting distribution is used
+    to generate a trial jump distance :math:`\\Delta x(t)` of variable
+    :math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
+
+    From the starting point, after calling the visiting distribution
+    function, the acceptance probability is computed as follows:
+
+    .. math::
+
+        p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
+        \\frac{1}{1-q_{a}}}\\}}
+
+    Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
+    acceptance probability is assigned to the cases where
+
+    .. math::
+
+        [1-(1-q_{a}) \\beta \\Delta E] < 0
+
+    The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
+
+    .. math::
+
+        T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
+        1 + t\\right)^{q_{v}-1}-1}
+
+    Where :math:`q_{v}` is the visiting parameter.
+
+    .. versionadded:: 1.2.0
+
+    References
+    ----------
+    .. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
+        statistics. Journal of Statistical Physics, 52, 479-487 (1998).
+    .. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
+        Physica A, 233, 395-406 (1996).
+    .. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
+        Annealing Algorithm and Its Application to the Thomson Model.
+        Physics Letters A, 233, 216-220 (1997).
+    .. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
+        Annealing. Physical Review E, 62, 4473 (2000).
+    .. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
+        Simulated Annealing for Efficient Global Optimization: the GenSA
+        Package for R. The R Journal, Volume 5/1 (2013).
+    .. [6] Mullen, K. Continuous Global Optimization in R. Journal of
+        Statistical Software, 60(6), 1 - 45, (2014).
+        :doi:`10.18637/jss.v060.i06`
+
+    Examples
+    --------
+    The following example is a 10-D problem, with many local minima.
+    The function involved is called Rastrigin
+    (https://en.wikipedia.org/wiki/Rastrigin_function)
+
+    >>> import numpy as np
+    >>> from scipy.optimize import dual_annealing
+    >>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
+    >>> lw = [-5.12] * 10
+    >>> up = [5.12] * 10
+    >>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
+    >>> ret.x
+    array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
+           -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
+           -6.05775280e-09, -5.00668935e-09]) # random
+    >>> ret.fun
+    0.000000
+
+    """
+
+    if isinstance(bounds, Bounds):
+        bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
+
+    # noqa: E501
+    if x0 is not None and not len(x0) == len(bounds):
+        raise ValueError('Bounds size does not match x0')
+
+    lu = list(zip(*bounds))
+    lower = np.array(lu[0])
+    upper = np.array(lu[1])
+    # Check that restart temperature ratio is correct
+    if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
+        raise ValueError('Restart temperature ratio has to be in range (0, 1)')
+    # Checking bounds are valid
+    if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
+            np.isnan(lower)) or np.any(np.isnan(upper))):
+        raise ValueError('Some bounds values are inf values or nan values')
+    # Checking that bounds are consistent
+    if not np.all(lower < upper):
+        raise ValueError('Bounds are not consistent min < max')
+    # Checking that bounds are the same length
+    if not len(lower) == len(upper):
+        raise ValueError('Bounds do not have the same dimensions')
+
+    # Wrapper for the objective function
+    func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
+
+    # minimizer_kwargs has to be a dict, not None
+    minimizer_kwargs = minimizer_kwargs or {}
+
+    minimizer_wrapper = LocalSearchWrapper(
+        bounds, func_wrapper, **minimizer_kwargs)
+
+    # Initialization of random Generator for reproducible runs if seed provided
+    rand_state = check_random_state(seed)
+    # Initialization of the energy state
+    energy_state = EnergyState(lower, upper, callback)
+    energy_state.reset(func_wrapper, rand_state, x0)
+    # Minimum value of annealing temperature reached to perform
+    # re-annealing
+    temperature_restart = initial_temp * restart_temp_ratio
+    # VisitingDistribution instance
+    visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
+    # Strategy chain instance
+    strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
+                                   minimizer_wrapper, rand_state, energy_state)
+    need_to_stop = False
+    iteration = 0
+    message = []
+    # OptimizeResult object to be returned
+    optimize_res = OptimizeResult()
+    optimize_res.success = True
+    optimize_res.status = 0
+
+    t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
+    # Run the search loop
+    while not need_to_stop:
+        for i in range(maxiter):
+            # Compute temperature for this step
+            s = float(i) + 2.0
+            t2 = np.exp((visit - 1) * np.log(s)) - 1.0
+            temperature = initial_temp * t1 / t2
+            if iteration >= maxiter:
+                message.append("Maximum number of iteration reached")
+                need_to_stop = True
+                break
+            # Need a re-annealing process?
+            if temperature < temperature_restart:
+                energy_state.reset(func_wrapper, rand_state)
+                break
+            # starting strategy chain
+            val = strategy_chain.run(i, temperature)
+            if val is not None:
+                message.append(val)
+                need_to_stop = True
+                optimize_res.success = False
+                break
+            # Possible local search at the end of the strategy chain
+            if not no_local_search:
+                val = strategy_chain.local_search()
+                if val is not None:
+                    message.append(val)
+                    need_to_stop = True
+                    optimize_res.success = False
+                    break
+            iteration += 1
+
+    # Setting the OptimizeResult values
+    optimize_res.x = energy_state.xbest
+    optimize_res.fun = energy_state.ebest
+    optimize_res.nit = iteration
+    optimize_res.nfev = func_wrapper.nfev
+    optimize_res.njev = func_wrapper.ngev
+    optimize_res.nhev = func_wrapper.nhev
+    optimize_res.message = message
+    return optimize_res
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_hessian_update_strategy.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_hessian_update_strategy.py
new file mode 100644
index 00000000..2c516003
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_hessian_update_strategy.py
@@ -0,0 +1,429 @@
+"""Hessian update strategies for quasi-Newton optimization methods."""
+import numpy as np
+from numpy.linalg import norm
+from scipy.linalg import get_blas_funcs
+from warnings import warn
+
+
+__all__ = ['HessianUpdateStrategy', 'BFGS', 'SR1']
+
+
+class HessianUpdateStrategy:
+    """Interface for implementing Hessian update strategies.
+
+    Many optimization methods make use of Hessian (or inverse Hessian)
+    approximations, such as the quasi-Newton methods BFGS, SR1, L-BFGS.
+    Some of these  approximations, however, do not actually need to store
+    the entire matrix or can compute the internal matrix product with a
+    given vector in a very efficiently manner. This class serves as an
+    abstract interface between the optimization algorithm and the
+    quasi-Newton update strategies, giving freedom of implementation
+    to store and update the internal matrix as efficiently as possible.
+    Different choices of initialization and update procedure will result
+    in different quasi-Newton strategies.
+
+    Four methods should be implemented in derived classes: ``initialize``,
+    ``update``, ``dot`` and ``get_matrix``.
+
+    Notes
+    -----
+    Any instance of a class that implements this interface,
+    can be accepted by the method ``minimize`` and used by
+    the compatible solvers to approximate the Hessian (or
+    inverse Hessian) used by the optimization algorithms.
+    """
+
+    def initialize(self, n, approx_type):
+        """Initialize internal matrix.
+
+        Allocate internal memory for storing and updating
+        the Hessian or its inverse.
+
+        Parameters
+        ----------
+        n : int
+            Problem dimension.
+        approx_type : {'hess', 'inv_hess'}
+            Selects either the Hessian or the inverse Hessian.
+            When set to 'hess' the Hessian will be stored and updated.
+            When set to 'inv_hess' its inverse will be used instead.
+        """
+        raise NotImplementedError("The method ``initialize(n, approx_type)``"
+                                  " is not implemented.")
+
+    def update(self, delta_x, delta_grad):
+        """Update internal matrix.
+
+        Update Hessian matrix or its inverse (depending on how 'approx_type'
+        is defined) using information about the last evaluated points.
+
+        Parameters
+        ----------
+        delta_x : ndarray
+            The difference between two points the gradient
+            function have been evaluated at: ``delta_x = x2 - x1``.
+        delta_grad : ndarray
+            The difference between the gradients:
+            ``delta_grad = grad(x2) - grad(x1)``.
+        """
+        raise NotImplementedError("The method ``update(delta_x, delta_grad)``"
+                                  " is not implemented.")
+
+    def dot(self, p):
+        """Compute the product of the internal matrix with the given vector.
+
+        Parameters
+        ----------
+        p : array_like
+            1-D array representing a vector.
+
+        Returns
+        -------
+        Hp : array
+            1-D represents the result of multiplying the approximation matrix
+            by vector p.
+        """
+        raise NotImplementedError("The method ``dot(p)``"
+                                  " is not implemented.")
+
+    def get_matrix(self):
+        """Return current internal matrix.
+
+        Returns
+        -------
+        H : ndarray, shape (n, n)
+            Dense matrix containing either the Hessian
+            or its inverse (depending on how 'approx_type'
+            is defined).
+        """
+        raise NotImplementedError("The method ``get_matrix(p)``"
+                                  " is not implemented.")
+
+
+class FullHessianUpdateStrategy(HessianUpdateStrategy):
+    """Hessian update strategy with full dimensional internal representation.
+    """
+    _syr = get_blas_funcs('syr', dtype='d')  # Symmetric rank 1 update
+    _syr2 = get_blas_funcs('syr2', dtype='d')  # Symmetric rank 2 update
+    # Symmetric matrix-vector product
+    _symv = get_blas_funcs('symv', dtype='d')
+
+    def __init__(self, init_scale='auto'):
+        self.init_scale = init_scale
+        # Until initialize is called we can't really use the class,
+        # so it makes sense to set everything to None.
+        self.first_iteration = None
+        self.approx_type = None
+        self.B = None
+        self.H = None
+
+    def initialize(self, n, approx_type):
+        """Initialize internal matrix.
+
+        Allocate internal memory for storing and updating
+        the Hessian or its inverse.
+
+        Parameters
+        ----------
+        n : int
+            Problem dimension.
+        approx_type : {'hess', 'inv_hess'}
+            Selects either the Hessian or the inverse Hessian.
+            When set to 'hess' the Hessian will be stored and updated.
+            When set to 'inv_hess' its inverse will be used instead.
+        """
+        self.first_iteration = True
+        self.n = n
+        self.approx_type = approx_type
+        if approx_type not in ('hess', 'inv_hess'):
+            raise ValueError("`approx_type` must be 'hess' or 'inv_hess'.")
+        # Create matrix
+        if self.approx_type == 'hess':
+            self.B = np.eye(n, dtype=float)
+        else:
+            self.H = np.eye(n, dtype=float)
+
+    def _auto_scale(self, delta_x, delta_grad):
+        # Heuristic to scale matrix at first iteration.
+        # Described in Nocedal and Wright "Numerical Optimization"
+        # p.143 formula (6.20).
+        s_norm2 = np.dot(delta_x, delta_x)
+        y_norm2 = np.dot(delta_grad, delta_grad)
+        ys = np.abs(np.dot(delta_grad, delta_x))
+        if ys == 0.0 or y_norm2 == 0 or s_norm2 == 0:
+            return 1
+        if self.approx_type == 'hess':
+            return y_norm2 / ys
+        else:
+            return ys / y_norm2
+
+    def _update_implementation(self, delta_x, delta_grad):
+        raise NotImplementedError("The method ``_update_implementation``"
+                                  " is not implemented.")
+
+    def update(self, delta_x, delta_grad):
+        """Update internal matrix.
+
+        Update Hessian matrix or its inverse (depending on how 'approx_type'
+        is defined) using information about the last evaluated points.
+
+        Parameters
+        ----------
+        delta_x : ndarray
+            The difference between two points the gradient
+            function have been evaluated at: ``delta_x = x2 - x1``.
+        delta_grad : ndarray
+            The difference between the gradients:
+            ``delta_grad = grad(x2) - grad(x1)``.
+        """
+        if np.all(delta_x == 0.0):
+            return
+        if np.all(delta_grad == 0.0):
+            warn('delta_grad == 0.0. Check if the approximated '
+                 'function is linear. If the function is linear '
+                 'better results can be obtained by defining the '
+                 'Hessian as zero instead of using quasi-Newton '
+                 'approximations.', UserWarning)
+            return
+        if self.first_iteration:
+            # Get user specific scale
+            if self.init_scale == "auto":
+                scale = self._auto_scale(delta_x, delta_grad)
+            else:
+                scale = float(self.init_scale)
+            # Scale initial matrix with ``scale * np.eye(n)``
+            if self.approx_type == 'hess':
+                self.B *= scale
+            else:
+                self.H *= scale
+            self.first_iteration = False
+        self._update_implementation(delta_x, delta_grad)
+
+    def dot(self, p):
+        """Compute the product of the internal matrix with the given vector.
+
+        Parameters
+        ----------
+        p : array_like
+            1-D array representing a vector.
+
+        Returns
+        -------
+        Hp : array
+            1-D represents the result of multiplying the approximation matrix
+            by vector p.
+        """
+        if self.approx_type == 'hess':
+            return self._symv(1, self.B, p)
+        else:
+            return self._symv(1, self.H, p)
+
+    def get_matrix(self):
+        """Return the current internal matrix.
+
+        Returns
+        -------
+        M : ndarray, shape (n, n)
+            Dense matrix containing either the Hessian or its inverse
+            (depending on how `approx_type` was defined).
+        """
+        if self.approx_type == 'hess':
+            M = np.copy(self.B)
+        else:
+            M = np.copy(self.H)
+        li = np.tril_indices_from(M, k=-1)
+        M[li] = M.T[li]
+        return M
+
+
+class BFGS(FullHessianUpdateStrategy):
+    """Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy.
+
+    Parameters
+    ----------
+    exception_strategy : {'skip_update', 'damp_update'}, optional
+        Define how to proceed when the curvature condition is violated.
+        Set it to 'skip_update' to just skip the update. Or, alternatively,
+        set it to 'damp_update' to interpolate between the actual BFGS
+        result and the unmodified matrix. Both exceptions strategies
+        are explained  in [1]_, p.536-537.
+    min_curvature : float
+        This number, scaled by a normalization factor, defines the
+        minimum curvature ``dot(delta_grad, delta_x)`` allowed to go
+        unaffected by the exception strategy. By default is equal to
+        1e-8 when ``exception_strategy = 'skip_update'`` and equal
+        to 0.2 when ``exception_strategy = 'damp_update'``.
+    init_scale : {float, 'auto'}
+        Matrix scale at first iteration. At the first
+        iteration the Hessian matrix or its inverse will be initialized
+        with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
+        Set it to 'auto' in order to use an automatic heuristic for choosing
+        the initial scale. The heuristic is described in [1]_, p.143.
+        By default uses 'auto'.
+
+    Notes
+    -----
+    The update is based on the description in [1]_, p.140.
+
+    References
+    ----------
+    .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
+           Second Edition (2006).
+    """
+
+    def __init__(self, exception_strategy='skip_update', min_curvature=None,
+                 init_scale='auto'):
+        if exception_strategy == 'skip_update':
+            if min_curvature is not None:
+                self.min_curvature = min_curvature
+            else:
+                self.min_curvature = 1e-8
+        elif exception_strategy == 'damp_update':
+            if min_curvature is not None:
+                self.min_curvature = min_curvature
+            else:
+                self.min_curvature = 0.2
+        else:
+            raise ValueError("`exception_strategy` must be 'skip_update' "
+                             "or 'damp_update'.")
+
+        super().__init__(init_scale)
+        self.exception_strategy = exception_strategy
+
+    def _update_inverse_hessian(self, ys, Hy, yHy, s):
+        """Update the inverse Hessian matrix.
+
+        BFGS update using the formula:
+
+            ``H <- H + ((H*y).T*y + s.T*y)/(s.T*y)^2 * (s*s.T)
+                     - 1/(s.T*y) * ((H*y)*s.T + s*(H*y).T)``
+
+        where ``s = delta_x`` and ``y = delta_grad``. This formula is
+        equivalent to (6.17) in [1]_ written in a more efficient way
+        for implementation.
+
+        References
+        ----------
+        .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
+               Second Edition (2006).
+        """
+        self.H = self._syr2(-1.0 / ys, s, Hy, a=self.H)
+        self.H = self._syr((ys+yHy)/ys**2, s, a=self.H)
+
+    def _update_hessian(self, ys, Bs, sBs, y):
+        """Update the Hessian matrix.
+
+        BFGS update using the formula:
+
+            ``B <- B - (B*s)*(B*s).T/s.T*(B*s) + y*y^T/s.T*y``
+
+        where ``s`` is short for ``delta_x`` and ``y`` is short
+        for ``delta_grad``. Formula (6.19) in [1]_.
+
+        References
+        ----------
+        .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
+               Second Edition (2006).
+        """
+        self.B = self._syr(1.0 / ys, y, a=self.B)
+        self.B = self._syr(-1.0 / sBs, Bs, a=self.B)
+
+    def _update_implementation(self, delta_x, delta_grad):
+        # Auxiliary variables w and z
+        if self.approx_type == 'hess':
+            w = delta_x
+            z = delta_grad
+        else:
+            w = delta_grad
+            z = delta_x
+        # Do some common operations
+        wz = np.dot(w, z)
+        Mw = self.dot(w)
+        wMw = Mw.dot(w)
+        # Guarantee that wMw > 0 by reinitializing matrix.
+        # While this is always true in exact arithmetics,
+        # indefinite matrix may appear due to roundoff errors.
+        if wMw <= 0.0:
+            scale = self._auto_scale(delta_x, delta_grad)
+            # Reinitialize matrix
+            if self.approx_type == 'hess':
+                self.B = scale * np.eye(self.n, dtype=float)
+            else:
+                self.H = scale * np.eye(self.n, dtype=float)
+            # Do common operations for new matrix
+            Mw = self.dot(w)
+            wMw = Mw.dot(w)
+        # Check if curvature condition is violated
+        if wz <= self.min_curvature * wMw:
+            # If the option 'skip_update' is set
+            # we just skip the update when the condion
+            # is violated.
+            if self.exception_strategy == 'skip_update':
+                return
+            # If the option 'damp_update' is set we
+            # interpolate between the actual BFGS
+            # result and the unmodified matrix.
+            elif self.exception_strategy == 'damp_update':
+                update_factor = (1-self.min_curvature) / (1 - wz/wMw)
+                z = update_factor*z + (1-update_factor)*Mw
+                wz = np.dot(w, z)
+        # Update matrix
+        if self.approx_type == 'hess':
+            self._update_hessian(wz, Mw, wMw, z)
+        else:
+            self._update_inverse_hessian(wz, Mw, wMw, z)
+
+
+class SR1(FullHessianUpdateStrategy):
+    """Symmetric-rank-1 Hessian update strategy.
+
+    Parameters
+    ----------
+    min_denominator : float
+        This number, scaled by a normalization factor,
+        defines the minimum denominator magnitude allowed
+        in the update. When the condition is violated we skip
+        the update. By default uses ``1e-8``.
+    init_scale : {float, 'auto'}, optional
+        Matrix scale at first iteration. At the first
+        iteration the Hessian matrix or its inverse will be initialized
+        with ``init_scale*np.eye(n)``, where ``n`` is the problem dimension.
+        Set it to 'auto' in order to use an automatic heuristic for choosing
+        the initial scale. The heuristic is described in [1]_, p.143.
+        By default uses 'auto'.
+
+    Notes
+    -----
+    The update is based on the description in [1]_, p.144-146.
+
+    References
+    ----------
+    .. [1] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
+           Second Edition (2006).
+    """
+
+    def __init__(self, min_denominator=1e-8, init_scale='auto'):
+        self.min_denominator = min_denominator
+        super().__init__(init_scale)
+
+    def _update_implementation(self, delta_x, delta_grad):
+        # Auxiliary variables w and z
+        if self.approx_type == 'hess':
+            w = delta_x
+            z = delta_grad
+        else:
+            w = delta_grad
+            z = delta_x
+        # Do some common operations
+        Mw = self.dot(w)
+        z_minus_Mw = z - Mw
+        denominator = np.dot(w, z_minus_Mw)
+        # If the denominator is too small
+        # we just skip the update.
+        if np.abs(denominator) <= self.min_denominator*norm(w)*norm(z_minus_Mw):
+            return
+        # Update matrix
+        if self.approx_type == 'hess':
+            self.B = self._syr(1/denominator, z_minus_Mw, a=self.B)
+        else:
+            self.H = self._syr(1/denominator, z_minus_Mw, a=self.H)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HConst.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HConst.pxd
new file mode 100644
index 00000000..e4ceed64
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HConst.pxd
@@ -0,0 +1,107 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libcpp cimport bool
+from libcpp.string cimport string
+
+cdef extern from "HConst.h" nogil:
+
+    const int HIGHS_CONST_I_INF "kHighsIInf"
+    const double HIGHS_CONST_INF "kHighsInf"
+    const double kHighsTiny
+    const double kHighsZero
+    const int kHighsThreadLimit
+
+    cdef enum HighsDebugLevel:
+      HighsDebugLevel_kHighsDebugLevelNone "kHighsDebugLevelNone" = 0
+      HighsDebugLevel_kHighsDebugLevelCheap "kHighsDebugLevelCheap"
+      HighsDebugLevel_kHighsDebugLevelCostly "kHighsDebugLevelCostly"
+      HighsDebugLevel_kHighsDebugLevelExpensive "kHighsDebugLevelExpensive"
+      HighsDebugLevel_kHighsDebugLevelMin "kHighsDebugLevelMin" = HighsDebugLevel_kHighsDebugLevelNone
+      HighsDebugLevel_kHighsDebugLevelMax "kHighsDebugLevelMax" = HighsDebugLevel_kHighsDebugLevelExpensive
+
+    ctypedef enum HighsModelStatus:
+        HighsModelStatusNOTSET "HighsModelStatus::kNotset" = 0
+        HighsModelStatusLOAD_ERROR "HighsModelStatus::kLoadError"
+        HighsModelStatusMODEL_ERROR "HighsModelStatus::kModelError"
+        HighsModelStatusPRESOLVE_ERROR "HighsModelStatus::kPresolveError"
+        HighsModelStatusSOLVE_ERROR "HighsModelStatus::kSolveError"
+        HighsModelStatusPOSTSOLVE_ERROR "HighsModelStatus::kPostsolveError"
+        HighsModelStatusMODEL_EMPTY "HighsModelStatus::kModelEmpty"
+        HighsModelStatusOPTIMAL "HighsModelStatus::kOptimal"
+        HighsModelStatusINFEASIBLE "HighsModelStatus::kInfeasible"
+        HighsModelStatus_UNBOUNDED_OR_INFEASIBLE "HighsModelStatus::kUnboundedOrInfeasible"
+        HighsModelStatusUNBOUNDED "HighsModelStatus::kUnbounded"
+        HighsModelStatusREACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND "HighsModelStatus::kObjectiveBound"
+        HighsModelStatusREACHED_OBJECTIVE_TARGET "HighsModelStatus::kObjectiveTarget"
+        HighsModelStatusREACHED_TIME_LIMIT "HighsModelStatus::kTimeLimit"
+        HighsModelStatusREACHED_ITERATION_LIMIT "HighsModelStatus::kIterationLimit"
+        HighsModelStatusUNKNOWN "HighsModelStatus::kUnknown"
+        HighsModelStatusHIGHS_MODEL_STATUS_MIN "HighsModelStatus::kMin" = HighsModelStatusNOTSET
+        HighsModelStatusHIGHS_MODEL_STATUS_MAX "HighsModelStatus::kMax" = HighsModelStatusUNKNOWN
+
+    cdef enum HighsBasisStatus:
+        HighsBasisStatusLOWER "HighsBasisStatus::kLower" = 0, # (slack) variable is at its lower bound [including fixed variables]
+        HighsBasisStatusBASIC "HighsBasisStatus::kBasic" # (slack) variable is basic
+        HighsBasisStatusUPPER "HighsBasisStatus::kUpper" # (slack) variable is at its upper bound
+        HighsBasisStatusZERO "HighsBasisStatus::kZero" # free variable is non-basic and set to zero
+        HighsBasisStatusNONBASIC "HighsBasisStatus::kNonbasic" # nonbasic with no specific bound information - useful for users and postsolve
+
+    cdef enum SolverOption:
+        SOLVER_OPTION_SIMPLEX "SolverOption::SOLVER_OPTION_SIMPLEX" = -1
+        SOLVER_OPTION_CHOOSE "SolverOption::SOLVER_OPTION_CHOOSE"
+        SOLVER_OPTION_IPM "SolverOption::SOLVER_OPTION_IPM"
+
+    cdef enum PrimalDualStatus:
+        PrimalDualStatusSTATUS_NOT_SET "PrimalDualStatus::STATUS_NOT_SET" = -1
+        PrimalDualStatusSTATUS_MIN "PrimalDualStatus::STATUS_MIN" = PrimalDualStatusSTATUS_NOT_SET
+        PrimalDualStatusSTATUS_NO_SOLUTION "PrimalDualStatus::STATUS_NO_SOLUTION"
+        PrimalDualStatusSTATUS_UNKNOWN "PrimalDualStatus::STATUS_UNKNOWN"
+        PrimalDualStatusSTATUS_INFEASIBLE_POINT "PrimalDualStatus::STATUS_INFEASIBLE_POINT"
+        PrimalDualStatusSTATUS_FEASIBLE_POINT "PrimalDualStatus::STATUS_FEASIBLE_POINT"
+        PrimalDualStatusSTATUS_MAX "PrimalDualStatus::STATUS_MAX" = PrimalDualStatusSTATUS_FEASIBLE_POINT
+
+    cdef enum HighsOptionType:
+        HighsOptionTypeBOOL "HighsOptionType::kBool" = 0
+        HighsOptionTypeINT "HighsOptionType::kInt"
+        HighsOptionTypeDOUBLE "HighsOptionType::kDouble"
+        HighsOptionTypeSTRING "HighsOptionType::kString"
+
+    # workaround for lack of enum class support in Cython < 3.x
+    # cdef enum class ObjSense(int):
+    #     ObjSenseMINIMIZE "ObjSense::kMinimize" = 1
+    #     ObjSenseMAXIMIZE "ObjSense::kMaximize" = -1
+
+    cdef cppclass ObjSense:
+        pass
+
+    cdef ObjSense ObjSenseMINIMIZE "ObjSense::kMinimize"
+    cdef ObjSense ObjSenseMAXIMIZE "ObjSense::kMaximize"
+
+    # cdef enum class MatrixFormat(int):
+    #     MatrixFormatkColwise "MatrixFormat::kColwise" = 1
+    #     MatrixFormatkRowwise "MatrixFormat::kRowwise"
+    #     MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
+
+    cdef cppclass MatrixFormat:
+        pass
+
+    cdef MatrixFormat MatrixFormatkColwise "MatrixFormat::kColwise"
+    cdef MatrixFormat MatrixFormatkRowwise "MatrixFormat::kRowwise"
+    cdef MatrixFormat MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
+
+    # cdef enum class HighsVarType(int):
+    #     kContinuous "HighsVarType::kContinuous"
+    #     kInteger "HighsVarType::kInteger"
+    #     kSemiContinuous "HighsVarType::kSemiContinuous"
+    #     kSemiInteger "HighsVarType::kSemiInteger"
+    #     kImplicitInteger "HighsVarType::kImplicitInteger"
+
+    cdef cppclass HighsVarType:
+        pass
+
+    cdef HighsVarType kContinuous "HighsVarType::kContinuous"
+    cdef HighsVarType kInteger "HighsVarType::kInteger"
+    cdef HighsVarType kSemiContinuous "HighsVarType::kSemiContinuous"
+    cdef HighsVarType kSemiInteger "HighsVarType::kSemiInteger"
+    cdef HighsVarType kImplicitInteger "HighsVarType::kImplicitInteger"
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/Highs.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/Highs.pxd
new file mode 100644
index 00000000..b5615907
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/Highs.pxd
@@ -0,0 +1,55 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libc.stdio cimport FILE
+
+from libcpp cimport bool
+from libcpp.string cimport string
+
+from .HighsStatus cimport HighsStatus
+from .HighsOptions cimport HighsOptions
+from .HighsInfo cimport HighsInfo
+from .HighsLp cimport (
+    HighsLp,
+    HighsSolution,
+    HighsBasis,
+    ObjSense,
+)
+from .HConst cimport HighsModelStatus
+
+cdef extern from "Highs.h":
+    # From HiGHS/src/Highs.h
+    cdef cppclass Highs:
+        HighsStatus passHighsOptions(const HighsOptions& options)
+        HighsStatus passModel(const HighsLp& lp)
+        HighsStatus run()
+        HighsStatus setHighsLogfile(FILE* logfile)
+        HighsStatus setHighsOutput(FILE* output)
+        HighsStatus writeHighsOptions(const string filename, const bool report_only_non_default_values = true)
+
+        # split up for cython below
+        #const HighsModelStatus& getModelStatus(const bool scaled_model = False) const
+        const HighsModelStatus & getModelStatus() const
+
+        const HighsInfo& getHighsInfo "getInfo" () const
+        string modelStatusToString(const HighsModelStatus model_status) const
+        #HighsStatus getHighsInfoValue(const string& info, int& value)
+        HighsStatus getHighsInfoValue(const string& info, double& value) const
+        const HighsOptions& getHighsOptions() const
+
+        const HighsLp& getLp() const
+
+        HighsStatus writeSolution(const string filename, const bool pretty) const
+
+        HighsStatus setBasis()
+        const HighsSolution& getSolution() const
+        const HighsBasis& getBasis() const
+
+        bool changeObjectiveSense(const ObjSense sense)
+
+        HighsStatus setHighsOptionValueBool "setOptionValue" (const string & option, const bool value)
+        HighsStatus setHighsOptionValueInt "setOptionValue" (const string & option, const int value)
+        HighsStatus setHighsOptionValueStr "setOptionValue" (const string & option, const string & value)
+        HighsStatus setHighsOptionValueDbl "setOptionValue" (const string & option, const double value)
+
+        string primalDualStatusToString(const int primal_dual_status)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsIO.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsIO.pxd
new file mode 100644
index 00000000..4f02ba20
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsIO.pxd
@@ -0,0 +1,21 @@
+# distutils: language=c++
+# cython: language_level=3
+
+
+cdef extern from "HighsIO.h" nogil:
+    # workaround for lack of enum class support in Cython < 3.x
+    # cdef enum class HighsLogType(int):
+    #     kInfo "HighsLogType::kInfo" = 1
+    #     kDetailed "HighsLogType::kDetailed"
+    #     kVerbose "HighsLogType::kVerbose"
+    #     kWarning "HighsLogType::kWarning"
+    #     kError "HighsLogType::kError"
+
+    cdef cppclass HighsLogType:
+        pass
+
+    cdef HighsLogType kInfo "HighsLogType::kInfo"
+    cdef HighsLogType kDetailed "HighsLogType::kDetailed"
+    cdef HighsLogType kVerbose "HighsLogType::kVerbose"
+    cdef HighsLogType kWarning "HighsLogType::kWarning"
+    cdef HighsLogType kError "HighsLogType::kError"
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsInfo.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsInfo.pxd
new file mode 100644
index 00000000..a499feda
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsInfo.pxd
@@ -0,0 +1,23 @@
+# distutils: language=c++
+# cython: language_level=3
+
+cdef extern from "HighsInfo.h" nogil:
+    # From HiGHS/src/lp_data/HighsInfo.h
+    cdef cppclass HighsInfo:
+        # Inherited from HighsInfoStruct:
+        int mip_node_count
+        int simplex_iteration_count
+        int ipm_iteration_count
+        int crossover_iteration_count
+        int primal_solution_status
+        int dual_solution_status
+        int basis_validity
+        double objective_function_value
+        double mip_dual_bound
+        double mip_gap
+        int num_primal_infeasibilities
+        double max_primal_infeasibility
+        double sum_primal_infeasibilities
+        int num_dual_infeasibilities
+        double max_dual_infeasibility
+        double sum_dual_infeasibilities
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLp.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLp.pxd
new file mode 100644
index 00000000..74370d3d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLp.pxd
@@ -0,0 +1,47 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libcpp cimport bool
+from libcpp.string cimport string
+from libcpp.vector cimport vector
+
+from .HConst cimport HighsBasisStatus, ObjSense, HighsVarType
+from .HighsSparseMatrix cimport HighsSparseMatrix
+
+
+cdef extern from "HighsLp.h" nogil:
+    # From HiGHS/src/lp_data/HighsLp.h
+    cdef cppclass HighsLp:
+        int num_col_
+        int num_row_
+
+        vector[double] col_cost_
+        vector[double] col_lower_
+        vector[double] col_upper_
+        vector[double] row_lower_
+        vector[double] row_upper_
+
+        HighsSparseMatrix a_matrix_
+
+        ObjSense sense_
+        double offset_
+
+        string model_name_
+
+        vector[string] row_names_
+        vector[string] col_names_
+
+        vector[HighsVarType] integrality_
+
+        bool isMip() const
+
+    cdef cppclass HighsSolution:
+        vector[double] col_value
+        vector[double] col_dual
+        vector[double] row_value
+        vector[double] row_dual
+
+    cdef cppclass HighsBasis:
+        bool valid_
+        vector[HighsBasisStatus] col_status
+        vector[HighsBasisStatus] row_status
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd
new file mode 100644
index 00000000..25d62db2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd
@@ -0,0 +1,10 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from .HighsStatus cimport HighsStatus
+from .HighsLp cimport HighsLp
+from .HighsOptions cimport HighsOptions
+
+cdef extern from "HighsLpUtils.h" nogil:
+    # From HiGHS/src/lp_data/HighsLpUtils.h
+    HighsStatus assessLp(HighsLp& lp, const HighsOptions& options)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd
new file mode 100644
index 00000000..87b76229
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsModelUtils.pxd
@@ -0,0 +1,11 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libcpp.string cimport string
+
+from .HConst cimport HighsModelStatus
+
+cdef extern from "HighsModelUtils.h" nogil:
+    # From HiGHS/src/lp_data/HighsModelUtils.h
+    string utilHighsModelStatusToString(const HighsModelStatus model_status)
+    string utilBasisStatusToString(const int primal_dual_status)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsOptions.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsOptions.pxd
new file mode 100644
index 00000000..8b35f3f9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsOptions.pxd
@@ -0,0 +1,111 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libc.stdio cimport FILE
+
+from libcpp cimport bool
+from libcpp.string cimport string
+from libcpp.vector cimport vector
+
+from .HConst cimport HighsOptionType
+
+cdef extern from "HighsOptions.h" nogil:
+
+    cdef cppclass OptionRecord:
+        HighsOptionType type
+        string name
+        string description
+        bool advanced
+
+    cdef cppclass OptionRecordBool(OptionRecord):
+        bool* value
+        bool default_value
+
+    cdef cppclass OptionRecordInt(OptionRecord):
+        int* value
+        int lower_bound
+        int default_value
+        int upper_bound
+
+    cdef cppclass OptionRecordDouble(OptionRecord):
+        double* value
+        double lower_bound
+        double default_value
+        double upper_bound
+
+    cdef cppclass OptionRecordString(OptionRecord):
+        string* value
+        string default_value
+
+    cdef cppclass HighsOptions:
+        # From HighsOptionsStruct:
+
+        # Options read from the command line
+        string model_file
+        string presolve
+        string solver
+        string parallel
+        double time_limit
+        string options_file
+
+        # Options read from the file
+        double infinite_cost
+        double infinite_bound
+        double small_matrix_value
+        double large_matrix_value
+        double primal_feasibility_tolerance
+        double dual_feasibility_tolerance
+        double ipm_optimality_tolerance
+        double dual_objective_value_upper_bound
+        int highs_debug_level
+        int simplex_strategy
+        int simplex_scale_strategy
+        int simplex_crash_strategy
+        int simplex_dual_edge_weight_strategy
+        int simplex_primal_edge_weight_strategy
+        int simplex_iteration_limit
+        int simplex_update_limit
+        int ipm_iteration_limit
+        int highs_min_threads
+        int highs_max_threads
+        int message_level
+        string solution_file
+        bool write_solution_to_file
+        bool write_solution_pretty
+
+        # Advanced options
+        bool run_crossover
+        bool mps_parser_type_free
+        int keep_n_rows
+        int allowed_simplex_matrix_scale_factor
+        int allowed_simplex_cost_scale_factor
+        int simplex_dualise_strategy
+        int simplex_permute_strategy
+        int dual_simplex_cleanup_strategy
+        int simplex_price_strategy
+        int dual_chuzc_sort_strategy
+        bool simplex_initial_condition_check
+        double simplex_initial_condition_tolerance
+        double dual_steepest_edge_weight_log_error_threshhold
+        double dual_simplex_cost_perturbation_multiplier
+        double start_crossover_tolerance
+        bool less_infeasible_DSE_check
+        bool less_infeasible_DSE_choose_row
+        bool use_original_HFactor_logic
+
+        # Options for MIP solver
+        int mip_max_nodes
+        int mip_report_level
+
+        # Switch for MIP solver
+        bool mip
+
+        # Options for HighsPrintMessage and HighsLogMessage
+        FILE* logfile
+        FILE* output
+        int message_level
+        string solution_file
+        bool write_solution_to_file
+        bool write_solution_pretty
+
+        vector[OptionRecord*] records
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd
new file mode 100644
index 00000000..0f0e093b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd
@@ -0,0 +1,10 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libcpp cimport bool
+
+from .HighsOptions cimport HighsOptions
+
+cdef extern from "HighsRuntimeOptions.h" nogil:
+    # From HiGHS/src/lp_data/HighsRuntimeOptions.h
+    bool loadOptions(int argc, char** argv, HighsOptions& options)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsStatus.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsStatus.pxd
new file mode 100644
index 00000000..a04da09b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/HighsStatus.pxd
@@ -0,0 +1,13 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libcpp.string cimport string
+
+cdef extern from "HighsStatus.h" nogil:
+    ctypedef enum HighsStatus:
+        HighsStatusError "HighsStatus::kError" = -1
+        HighsStatusOK "HighsStatus::kOk" = 0
+        HighsStatusWarning "HighsStatus::kWarning" = 1
+
+
+    string highsStatusToString(HighsStatus status)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/SimplexConst.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/SimplexConst.pxd
new file mode 100644
index 00000000..bcd03e3e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/SimplexConst.pxd
@@ -0,0 +1,96 @@
+# distutils: language=c++
+# cython: language_level=3
+
+from libcpp cimport bool
+
+cdef extern from "SimplexConst.h" nogil:
+
+    cdef enum SimplexAlgorithm:
+        PRIMAL "SimplexAlgorithm::kPrimal" = 0
+        DUAL "SimplexAlgorithm::kDual"
+
+    cdef enum SimplexStrategy:
+        SIMPLEX_STRATEGY_MIN "SimplexStrategy::kSimplexStrategyMin" = 0
+        SIMPLEX_STRATEGY_CHOOSE "SimplexStrategy::kSimplexStrategyChoose" = SIMPLEX_STRATEGY_MIN
+        SIMPLEX_STRATEGY_DUAL "SimplexStrategy::kSimplexStrategyDual"
+        SIMPLEX_STRATEGY_DUAL_PLAIN "SimplexStrategy::kSimplexStrategyDualPlain" = SIMPLEX_STRATEGY_DUAL
+        SIMPLEX_STRATEGY_DUAL_TASKS "SimplexStrategy::kSimplexStrategyDualTasks"
+        SIMPLEX_STRATEGY_DUAL_MULTI "SimplexStrategy::kSimplexStrategyDualMulti"
+        SIMPLEX_STRATEGY_PRIMAL "SimplexStrategy::kSimplexStrategyPrimal"
+        SIMPLEX_STRATEGY_MAX "SimplexStrategy::kSimplexStrategyMax" = SIMPLEX_STRATEGY_PRIMAL
+        SIMPLEX_STRATEGY_NUM "SimplexStrategy::kSimplexStrategyNum"
+
+    cdef enum SimplexCrashStrategy:
+        SIMPLEX_CRASH_STRATEGY_MIN "SimplexCrashStrategy::kSimplexCrashStrategyMin" = 0
+        SIMPLEX_CRASH_STRATEGY_OFF "SimplexCrashStrategy::kSimplexCrashStrategyOff" = SIMPLEX_CRASH_STRATEGY_MIN
+        SIMPLEX_CRASH_STRATEGY_LTSSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtssfK"
+        SIMPLEX_CRASH_STRATEGY_LTSSF "SimplexCrashStrategy::kSimplexCrashStrategyLtssf" = SIMPLEX_CRASH_STRATEGY_LTSSF_K
+        SIMPLEX_CRASH_STRATEGY_BIXBY "SimplexCrashStrategy::kSimplexCrashStrategyBixby"
+        SIMPLEX_CRASH_STRATEGY_LTSSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtssfPri"
+        SIMPLEX_CRASH_STRATEGY_LTSF_K "SimplexCrashStrategy::kSimplexCrashStrategyLtsfK"
+        SIMPLEX_CRASH_STRATEGY_LTSF_PRI "SimplexCrashStrategy::kSimplexCrashStrategyLtsfPri"
+        SIMPLEX_CRASH_STRATEGY_LTSF "SimplexCrashStrategy::kSimplexCrashStrategyLtsf"
+        SIMPLEX_CRASH_STRATEGY_BIXBY_NO_NONZERO_COL_COSTS "SimplexCrashStrategy::kSimplexCrashStrategyBixbyNoNonzeroColCosts"
+        SIMPLEX_CRASH_STRATEGY_BASIC "SimplexCrashStrategy::kSimplexCrashStrategyBasic"
+        SIMPLEX_CRASH_STRATEGY_TEST_SING "SimplexCrashStrategy::kSimplexCrashStrategyTestSing"
+        SIMPLEX_CRASH_STRATEGY_MAX "SimplexCrashStrategy::kSimplexCrashStrategyMax" = SIMPLEX_CRASH_STRATEGY_TEST_SING
+
+    cdef enum SimplexEdgeWeightStrategy:
+        SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMin" = -1
+        SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyChoose" = SIMPLEX_EDGE_WEIGHT_STRATEGY_MIN
+        SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDantzig"
+        SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyDevex"
+        SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdge"
+        SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategySteepestEdgeUnitInitial"
+        SIMPLEX_EDGE_WEIGHT_STRATEGY_MAX "SimplexEdgeWeightStrategy::kSimplexEdgeWeightStrategyMax" = SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE_UNIT_INITIAL
+
+    cdef enum SimplexPriceStrategy:
+        SIMPLEX_PRICE_STRATEGY_MIN = 0
+        SIMPLEX_PRICE_STRATEGY_COL = SIMPLEX_PRICE_STRATEGY_MIN
+        SIMPLEX_PRICE_STRATEGY_ROW
+        SIMPLEX_PRICE_STRATEGY_ROW_SWITCH
+        SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
+        SIMPLEX_PRICE_STRATEGY_MAX = SIMPLEX_PRICE_STRATEGY_ROW_SWITCH_COL_SWITCH
+
+    cdef enum SimplexDualChuzcStrategy:
+        SIMPLEX_DUAL_CHUZC_STRATEGY_MIN = 0
+        SIMPLEX_DUAL_CHUZC_STRATEGY_CHOOSE = SIMPLEX_DUAL_CHUZC_STRATEGY_MIN
+        SIMPLEX_DUAL_CHUZC_STRATEGY_QUAD
+        SIMPLEX_DUAL_CHUZC_STRATEGY_HEAP
+        SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
+        SIMPLEX_DUAL_CHUZC_STRATEGY_MAX = SIMPLEX_DUAL_CHUZC_STRATEGY_BOTH
+
+    cdef enum InvertHint:
+        INVERT_HINT_NO = 0
+        INVERT_HINT_UPDATE_LIMIT_REACHED
+        INVERT_HINT_SYNTHETIC_CLOCK_SAYS_INVERT
+        INVERT_HINT_POSSIBLY_OPTIMAL
+        INVERT_HINT_POSSIBLY_PRIMAL_UNBOUNDED
+        INVERT_HINT_POSSIBLY_DUAL_UNBOUNDED
+        INVERT_HINT_POSSIBLY_SINGULAR_BASIS
+        INVERT_HINT_PRIMAL_INFEASIBLE_IN_PRIMAL_SIMPLEX
+        INVERT_HINT_CHOOSE_COLUMN_FAIL
+        INVERT_HINT_Count
+
+    cdef enum DualEdgeWeightMode:
+        DANTZIG "DualEdgeWeightMode::DANTZIG" = 0
+        DEVEX "DualEdgeWeightMode::DEVEX"
+        STEEPEST_EDGE "DualEdgeWeightMode::STEEPEST_EDGE"
+        Count "DualEdgeWeightMode::Count"
+
+    cdef enum PriceMode:
+        ROW "PriceMode::ROW" = 0
+        COL "PriceMode::COL"
+
+    const int PARALLEL_THREADS_DEFAULT
+    const int DUAL_TASKS_MIN_THREADS
+    const int DUAL_MULTI_MIN_THREADS
+
+    const bool invert_if_row_out_negative
+
+    const int NONBASIC_FLAG_TRUE
+    const int NONBASIC_FLAG_FALSE
+
+    const int NONBASIC_MOVE_UP
+    const int NONBASIC_MOVE_DN
+    const int NONBASIC_MOVE_ZE
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/highs_c_api.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/highs_c_api.pxd
new file mode 100644
index 00000000..58b7a2a5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_highs/src/cython/highs_c_api.pxd
@@ -0,0 +1,8 @@
+# distutils: language=c++
+# cython: language_level=3
+
+cdef extern from "highs_c_api.h" nogil:
+    int Highs_passLp(void* highs, int numcol, int numrow, int numnz,
+                     double* colcost, double* collower, double* colupper,
+                     double* rowlower, double* rowupper,
+                     int* astart, int* aindex,  double* avalue)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lbfgsb_py.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lbfgsb_py.py
new file mode 100644
index 00000000..c030f80d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lbfgsb_py.py
@@ -0,0 +1,494 @@
+"""
+Functions
+---------
+.. autosummary::
+   :toctree: generated/
+
+    fmin_l_bfgs_b
+
+"""
+
+## License for the Python wrapper
+## ==============================
+
+## Copyright (c) 2004 David M. Cooke 
+
+## Permission is hereby granted, free of charge, to any person obtaining a
+## copy of this software and associated documentation files (the "Software"),
+## to deal in the Software without restriction, including without limitation
+## the rights to use, copy, modify, merge, publish, distribute, sublicense,
+## and/or sell copies of the Software, and to permit persons to whom the
+## Software is furnished to do so, subject to the following conditions:
+
+## The above copyright notice and this permission notice shall be included in
+## all copies or substantial portions of the Software.
+
+## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+## DEALINGS IN THE SOFTWARE.
+
+## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
+
+import numpy as np
+from numpy import array, asarray, float64, zeros
+from . import _lbfgsb
+from ._optimize import (MemoizeJac, OptimizeResult,
+                       _check_unknown_options, _prepare_scalar_function)
+from ._constraints import old_bound_to_new
+
+from scipy.sparse.linalg import LinearOperator
+
+__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
+
+
+def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
+                  approx_grad=0,
+                  bounds=None, m=10, factr=1e7, pgtol=1e-5,
+                  epsilon=1e-8,
+                  iprint=-1, maxfun=15000, maxiter=15000, disp=None,
+                  callback=None, maxls=20):
+    """
+    Minimize a function func using the L-BFGS-B algorithm.
+
+    Parameters
+    ----------
+    func : callable f(x,*args)
+        Function to minimize.
+    x0 : ndarray
+        Initial guess.
+    fprime : callable fprime(x,*args), optional
+        The gradient of `func`. If None, then `func` returns the function
+        value and the gradient (``f, g = func(x, *args)``), unless
+        `approx_grad` is True in which case `func` returns only ``f``.
+    args : sequence, optional
+        Arguments to pass to `func` and `fprime`.
+    approx_grad : bool, optional
+        Whether to approximate the gradient numerically (in which case
+        `func` returns only the function value).
+    bounds : list, optional
+        ``(min, max)`` pairs for each element in ``x``, defining
+        the bounds on that parameter. Use None or +-inf for one of ``min`` or
+        ``max`` when there is no bound in that direction.
+    m : int, optional
+        The maximum number of variable metric corrections
+        used to define the limited memory matrix. (The limited memory BFGS
+        method does not store the full hessian but uses this many terms in an
+        approximation to it.)
+    factr : float, optional
+        The iteration stops when
+        ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
+        where ``eps`` is the machine precision, which is automatically
+        generated by the code. Typical values for `factr` are: 1e12 for
+        low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
+        high accuracy. See Notes for relationship to `ftol`, which is exposed
+        (instead of `factr`) by the `scipy.optimize.minimize` interface to
+        L-BFGS-B.
+    pgtol : float, optional
+        The iteration will stop when
+        ``max{|proj g_i | i = 1, ..., n} <= pgtol``
+        where ``pg_i`` is the i-th component of the projected gradient.
+    epsilon : float, optional
+        Step size used when `approx_grad` is True, for numerically
+        calculating the gradient
+    iprint : int, optional
+        Controls the frequency of output. ``iprint < 0`` means no output;
+        ``iprint = 0``    print only one line at the last iteration;
+        ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
+        ``iprint = 99``   print details of every iteration except n-vectors;
+        ``iprint = 100``  print also the changes of active set and final x;
+        ``iprint > 100``  print details of every iteration including x and g.
+    disp : int, optional
+        If zero, then no output. If a positive number, then this over-rides
+        `iprint` (i.e., `iprint` gets the value of `disp`).
+    maxfun : int, optional
+        Maximum number of function evaluations. Note that this function
+        may violate the limit because of evaluating gradients by numerical
+        differentiation.
+    maxiter : int, optional
+        Maximum number of iterations.
+    callback : callable, optional
+        Called after each iteration, as ``callback(xk)``, where ``xk`` is the
+        current parameter vector.
+    maxls : int, optional
+        Maximum number of line search steps (per iteration). Default is 20.
+
+    Returns
+    -------
+    x : array_like
+        Estimated position of the minimum.
+    f : float
+        Value of `func` at the minimum.
+    d : dict
+        Information dictionary.
+
+        * d['warnflag'] is
+
+          - 0 if converged,
+          - 1 if too many function evaluations or too many iterations,
+          - 2 if stopped for another reason, given in d['task']
+
+        * d['grad'] is the gradient at the minimum (should be 0 ish)
+        * d['funcalls'] is the number of function calls made.
+        * d['nit'] is the number of iterations.
+
+    See also
+    --------
+    minimize: Interface to minimization algorithms for multivariate
+        functions. See the 'L-BFGS-B' `method` in particular. Note that the
+        `ftol` option is made available via that interface, while `factr` is
+        provided via this interface, where `factr` is the factor multiplying
+        the default machine floating-point precision to arrive at `ftol`:
+        ``ftol = factr * numpy.finfo(float).eps``.
+
+    Notes
+    -----
+    License of L-BFGS-B (FORTRAN code):
+
+    The version included here (in fortran code) is 3.0
+    (released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
+    and Jorge Nocedal . It carries the following
+    condition for use:
+
+    This software is freely available, but we expect that all publications
+    describing work using this software, or all commercial products using it,
+    quote at least one of the references given below. This software is released
+    under the BSD License.
+
+    References
+    ----------
+    * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
+      Constrained Optimization, (1995), SIAM Journal on Scientific and
+      Statistical Computing, 16, 5, pp. 1190-1208.
+    * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
+      FORTRAN routines for large scale bound constrained optimization (1997),
+      ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
+    * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
+      FORTRAN routines for large scale bound constrained optimization (2011),
+      ACM Transactions on Mathematical Software, 38, 1.
+
+    """
+    # handle fprime/approx_grad
+    if approx_grad:
+        fun = func
+        jac = None
+    elif fprime is None:
+        fun = MemoizeJac(func)
+        jac = fun.derivative
+    else:
+        fun = func
+        jac = fprime
+
+    # build options
+    opts = {'disp': disp,
+            'iprint': iprint,
+            'maxcor': m,
+            'ftol': factr * np.finfo(float).eps,
+            'gtol': pgtol,
+            'eps': epsilon,
+            'maxfun': maxfun,
+            'maxiter': maxiter,
+            'callback': callback,
+            'maxls': maxls}
+
+    res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
+                           **opts)
+    d = {'grad': res['jac'],
+         'task': res['message'],
+         'funcalls': res['nfev'],
+         'nit': res['nit'],
+         'warnflag': res['status']}
+    f = res['fun']
+    x = res['x']
+
+    return x, f, d
+
+
+def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
+                     disp=None, maxcor=10, ftol=2.2204460492503131e-09,
+                     gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
+                     iprint=-1, callback=None, maxls=20,
+                     finite_diff_rel_step=None, **unknown_options):
+    """
+    Minimize a scalar function of one or more variables using the L-BFGS-B
+    algorithm.
+
+    Options
+    -------
+    disp : None or int
+        If `disp is None` (the default), then the supplied version of `iprint`
+        is used. If `disp is not None`, then it overrides the supplied version
+        of `iprint` with the behaviour you outlined.
+    maxcor : int
+        The maximum number of variable metric corrections used to
+        define the limited memory matrix. (The limited memory BFGS
+        method does not store the full hessian but uses this many terms
+        in an approximation to it.)
+    ftol : float
+        The iteration stops when ``(f^k -
+        f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
+    gtol : float
+        The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
+        <= gtol`` where ``pg_i`` is the i-th component of the
+        projected gradient.
+    eps : float or ndarray
+        If `jac is None` the absolute step size used for numerical
+        approximation of the jacobian via forward differences.
+    maxfun : int
+        Maximum number of function evaluations. Note that this function
+        may violate the limit because of evaluating gradients by numerical
+        differentiation.
+    maxiter : int
+        Maximum number of iterations.
+    iprint : int, optional
+        Controls the frequency of output. ``iprint < 0`` means no output;
+        ``iprint = 0``    print only one line at the last iteration;
+        ``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
+        ``iprint = 99``   print details of every iteration except n-vectors;
+        ``iprint = 100``  print also the changes of active set and final x;
+        ``iprint > 100``  print details of every iteration including x and g.
+    maxls : int, optional
+        Maximum number of line search steps (per iteration). Default is 20.
+    finite_diff_rel_step : None or array_like, optional
+        If `jac in ['2-point', '3-point', 'cs']` the relative step size to
+        use for numerical approximation of the jacobian. The absolute step
+        size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
+        possibly adjusted to fit into the bounds. For ``method='3-point'``
+        the sign of `h` is ignored. If None (default) then step is selected
+        automatically.
+
+    Notes
+    -----
+    The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
+    but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
+    relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
+    I.e., `factr` multiplies the default machine floating-point precision to
+    arrive at `ftol`.
+
+    """
+    _check_unknown_options(unknown_options)
+    m = maxcor
+    pgtol = gtol
+    factr = ftol / np.finfo(float).eps
+
+    x0 = asarray(x0).ravel()
+    n, = x0.shape
+
+    if bounds is None:
+        bounds = [(None, None)] * n
+    if len(bounds) != n:
+        raise ValueError('length of x0 != length of bounds')
+
+    # unbounded variables must use None, not +-inf, for optimizer to work properly
+    bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
+    # LBFGSB is sent 'old-style' bounds, 'new-style' bounds are required by
+    # approx_derivative and ScalarFunction
+    new_bounds = old_bound_to_new(bounds)
+
+    # check bounds
+    if (new_bounds[0] > new_bounds[1]).any():
+        raise ValueError("LBFGSB - one of the lower bounds is greater than an upper bound.")
+
+    # initial vector must lie within the bounds. Otherwise ScalarFunction and
+    # approx_derivative will cause problems
+    x0 = np.clip(x0, new_bounds[0], new_bounds[1])
+
+    if disp is not None:
+        if disp == 0:
+            iprint = -1
+        else:
+            iprint = disp
+
+    sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
+                                  bounds=new_bounds,
+                                  finite_diff_rel_step=finite_diff_rel_step)
+
+    func_and_grad = sf.fun_and_grad
+
+    fortran_int = _lbfgsb.types.intvar.dtype
+
+    nbd = zeros(n, fortran_int)
+    low_bnd = zeros(n, float64)
+    upper_bnd = zeros(n, float64)
+    bounds_map = {(None, None): 0,
+                  (1, None): 1,
+                  (1, 1): 2,
+                  (None, 1): 3}
+    for i in range(0, n):
+        l, u = bounds[i]
+        if l is not None:
+            low_bnd[i] = l
+            l = 1
+        if u is not None:
+            upper_bnd[i] = u
+            u = 1
+        nbd[i] = bounds_map[l, u]
+
+    if not maxls > 0:
+        raise ValueError('maxls must be positive.')
+
+    x = array(x0, float64)
+    f = array(0.0, float64)
+    g = zeros((n,), float64)
+    wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
+    iwa = zeros(3*n, fortran_int)
+    task = zeros(1, 'S60')
+    csave = zeros(1, 'S60')
+    lsave = zeros(4, fortran_int)
+    isave = zeros(44, fortran_int)
+    dsave = zeros(29, float64)
+
+    task[:] = 'START'
+
+    n_iterations = 0
+
+    while 1:
+        # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
+        _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
+                       pgtol, wa, iwa, task, iprint, csave, lsave,
+                       isave, dsave, maxls)
+        task_str = task.tobytes()
+        if task_str.startswith(b'FG'):
+            # The minimization routine wants f and g at the current x.
+            # Note that interruptions due to maxfun are postponed
+            # until the completion of the current minimization iteration.
+            # Overwrite f and g:
+            f, g = func_and_grad(x)
+        elif task_str.startswith(b'NEW_X'):
+            # new iteration
+            n_iterations += 1
+            if callback is not None:
+                callback(np.copy(x))
+
+            if n_iterations >= maxiter:
+                task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
+            elif sf.nfev > maxfun:
+                task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
+                           'EXCEEDS LIMIT')
+        else:
+            break
+
+    task_str = task.tobytes().strip(b'\x00').strip()
+    if task_str.startswith(b'CONV'):
+        warnflag = 0
+    elif sf.nfev > maxfun or n_iterations >= maxiter:
+        warnflag = 1
+    else:
+        warnflag = 2
+
+    # These two portions of the workspace are described in the mainlb
+    # subroutine in lbfgsb.f. See line 363.
+    s = wa[0: m*n].reshape(m, n)
+    y = wa[m*n: 2*m*n].reshape(m, n)
+
+    # See lbfgsb.f line 160 for this portion of the workspace.
+    # isave(31) = the total number of BFGS updates prior the current iteration;
+    n_bfgs_updates = isave[30]
+
+    n_corrs = min(n_bfgs_updates, maxcor)
+    hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
+
+    task_str = task_str.decode()
+    return OptimizeResult(fun=f, jac=g, nfev=sf.nfev,
+                          njev=sf.ngev,
+                          nit=n_iterations, status=warnflag, message=task_str,
+                          x=x, success=(warnflag == 0), hess_inv=hess_inv)
+
+
+class LbfgsInvHessProduct(LinearOperator):
+    """Linear operator for the L-BFGS approximate inverse Hessian.
+
+    This operator computes the product of a vector with the approximate inverse
+    of the Hessian of the objective function, using the L-BFGS limited
+    memory approximation to the inverse Hessian, accumulated during the
+    optimization.
+
+    Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
+    interface.
+
+    Parameters
+    ----------
+    sk : array_like, shape=(n_corr, n)
+        Array of `n_corr` most recent updates to the solution vector.
+        (See [1]).
+    yk : array_like, shape=(n_corr, n)
+        Array of `n_corr` most recent updates to the gradient. (See [1]).
+
+    References
+    ----------
+    .. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
+       storage." Mathematics of computation 35.151 (1980): 773-782.
+
+    """
+
+    def __init__(self, sk, yk):
+        """Construct the operator."""
+        if sk.shape != yk.shape or sk.ndim != 2:
+            raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
+        n_corrs, n = sk.shape
+
+        super().__init__(dtype=np.float64, shape=(n, n))
+
+        self.sk = sk
+        self.yk = yk
+        self.n_corrs = n_corrs
+        self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
+
+    def _matvec(self, x):
+        """Efficient matrix-vector multiply with the BFGS matrices.
+
+        This calculation is described in Section (4) of [1].
+
+        Parameters
+        ----------
+        x : ndarray
+            An array with shape (n,) or (n,1).
+
+        Returns
+        -------
+        y : ndarray
+            The matrix-vector product
+
+        """
+        s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
+        q = np.array(x, dtype=self.dtype, copy=True)
+        if q.ndim == 2 and q.shape[1] == 1:
+            q = q.reshape(-1)
+
+        alpha = np.empty(n_corrs)
+
+        for i in range(n_corrs-1, -1, -1):
+            alpha[i] = rho[i] * np.dot(s[i], q)
+            q = q - alpha[i]*y[i]
+
+        r = q
+        for i in range(n_corrs):
+            beta = rho[i] * np.dot(y[i], r)
+            r = r + s[i] * (alpha[i] - beta)
+
+        return r
+
+    def todense(self):
+        """Return a dense array representation of this operator.
+
+        Returns
+        -------
+        arr : ndarray, shape=(n, n)
+            An array with the same shape and containing
+            the same data represented by this `LinearOperator`.
+
+        """
+        s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
+        I = np.eye(*self.shape, dtype=self.dtype)
+        Hk = I
+
+        for i in range(n_corrs):
+            A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
+            A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
+
+            Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
+                                                        s[i][np.newaxis, :])
+        return Hk
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linesearch.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linesearch.py
new file mode 100644
index 00000000..e936ee98
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linesearch.py
@@ -0,0 +1,881 @@
+"""
+Functions
+---------
+.. autosummary::
+   :toctree: generated/
+
+    line_search_armijo
+    line_search_wolfe1
+    line_search_wolfe2
+    scalar_search_wolfe1
+    scalar_search_wolfe2
+
+"""
+from warnings import warn
+
+from scipy.optimize import _minpack2 as minpack2
+import numpy as np
+
+__all__ = ['LineSearchWarning', 'line_search_wolfe1', 'line_search_wolfe2',
+           'scalar_search_wolfe1', 'scalar_search_wolfe2',
+           'line_search_armijo']
+
+class LineSearchWarning(RuntimeWarning):
+    pass
+
+
+#------------------------------------------------------------------------------
+# Minpack's Wolfe line and scalar searches
+#------------------------------------------------------------------------------
+
+def line_search_wolfe1(f, fprime, xk, pk, gfk=None,
+                       old_fval=None, old_old_fval=None,
+                       args=(), c1=1e-4, c2=0.9, amax=50, amin=1e-8,
+                       xtol=1e-14):
+    """
+    As `scalar_search_wolfe1` but do a line search to direction `pk`
+
+    Parameters
+    ----------
+    f : callable
+        Function `f(x)`
+    fprime : callable
+        Gradient of `f`
+    xk : array_like
+        Current point
+    pk : array_like
+        Search direction
+
+    gfk : array_like, optional
+        Gradient of `f` at point `xk`
+    old_fval : float, optional
+        Value of `f` at point `xk`
+    old_old_fval : float, optional
+        Value of `f` at point preceding `xk`
+
+    The rest of the parameters are the same as for `scalar_search_wolfe1`.
+
+    Returns
+    -------
+    stp, f_count, g_count, fval, old_fval
+        As in `line_search_wolfe1`
+    gval : array
+        Gradient of `f` at the final point
+
+    """
+    if gfk is None:
+        gfk = fprime(xk, *args)
+
+    gval = [gfk]
+    gc = [0]
+    fc = [0]
+
+    def phi(s):
+        fc[0] += 1
+        return f(xk + s*pk, *args)
+
+    def derphi(s):
+        gval[0] = fprime(xk + s*pk, *args)
+        gc[0] += 1
+        return np.dot(gval[0], pk)
+
+    derphi0 = np.dot(gfk, pk)
+
+    stp, fval, old_fval = scalar_search_wolfe1(
+            phi, derphi, old_fval, old_old_fval, derphi0,
+            c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
+
+    return stp, fc[0], gc[0], fval, old_fval, gval[0]
+
+
+def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None,
+                         c1=1e-4, c2=0.9,
+                         amax=50, amin=1e-8, xtol=1e-14):
+    """
+    Scalar function search for alpha that satisfies strong Wolfe conditions
+
+    alpha > 0 is assumed to be a descent direction.
+
+    Parameters
+    ----------
+    phi : callable phi(alpha)
+        Function at point `alpha`
+    derphi : callable phi'(alpha)
+        Objective function derivative. Returns a scalar.
+    phi0 : float, optional
+        Value of phi at 0
+    old_phi0 : float, optional
+        Value of phi at previous point
+    derphi0 : float, optional
+        Value derphi at 0
+    c1 : float, optional
+        Parameter for Armijo condition rule.
+    c2 : float, optional
+        Parameter for curvature condition rule.
+    amax, amin : float, optional
+        Maximum and minimum step size
+    xtol : float, optional
+        Relative tolerance for an acceptable step.
+
+    Returns
+    -------
+    alpha : float
+        Step size, or None if no suitable step was found
+    phi : float
+        Value of `phi` at the new point `alpha`
+    phi0 : float
+        Value of `phi` at `alpha=0`
+
+    Notes
+    -----
+    Uses routine DCSRCH from MINPACK.
+
+    """
+
+    if phi0 is None:
+        phi0 = phi(0.)
+    if derphi0 is None:
+        derphi0 = derphi(0.)
+
+    if old_phi0 is not None and derphi0 != 0:
+        alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
+        if alpha1 < 0:
+            alpha1 = 1.0
+    else:
+        alpha1 = 1.0
+
+    phi1 = phi0
+    derphi1 = derphi0
+    isave = np.zeros((2,), np.intc)
+    dsave = np.zeros((13,), float)
+    task = b'START'
+
+    maxiter = 100
+    for i in range(maxiter):
+        stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1,
+                                                   c1, c2, xtol, task,
+                                                   amin, amax, isave, dsave)
+        if task[:2] == b'FG':
+            alpha1 = stp
+            phi1 = phi(stp)
+            derphi1 = derphi(stp)
+        else:
+            break
+    else:
+        # maxiter reached, the line search did not converge
+        stp = None
+
+    if task[:5] == b'ERROR' or task[:4] == b'WARN':
+        stp = None  # failed
+
+    return stp, phi1, phi0
+
+
+line_search = line_search_wolfe1
+
+
+#------------------------------------------------------------------------------
+# Pure-Python Wolfe line and scalar searches
+#------------------------------------------------------------------------------
+
+def line_search_wolfe2(f, myfprime, xk, pk, gfk=None, old_fval=None,
+                       old_old_fval=None, args=(), c1=1e-4, c2=0.9, amax=None,
+                       extra_condition=None, maxiter=10):
+    """Find alpha that satisfies strong Wolfe conditions.
+
+    Parameters
+    ----------
+    f : callable f(x,*args)
+        Objective function.
+    myfprime : callable f'(x,*args)
+        Objective function gradient.
+    xk : ndarray
+        Starting point.
+    pk : ndarray
+        Search direction.
+    gfk : ndarray, optional
+        Gradient value for x=xk (xk being the current parameter
+        estimate). Will be recomputed if omitted.
+    old_fval : float, optional
+        Function value for x=xk. Will be recomputed if omitted.
+    old_old_fval : float, optional
+        Function value for the point preceding x=xk.
+    args : tuple, optional
+        Additional arguments passed to objective function.
+    c1 : float, optional
+        Parameter for Armijo condition rule.
+    c2 : float, optional
+        Parameter for curvature condition rule.
+    amax : float, optional
+        Maximum step size
+    extra_condition : callable, optional
+        A callable of the form ``extra_condition(alpha, x, f, g)``
+        returning a boolean. Arguments are the proposed step ``alpha``
+        and the corresponding ``x``, ``f`` and ``g`` values. The line search
+        accepts the value of ``alpha`` only if this
+        callable returns ``True``. If the callable returns ``False``
+        for the step length, the algorithm will continue with
+        new iterates. The callable is only called for iterates
+        satisfying the strong Wolfe conditions.
+    maxiter : int, optional
+        Maximum number of iterations to perform.
+
+    Returns
+    -------
+    alpha : float or None
+        Alpha for which ``x_new = x0 + alpha * pk``,
+        or None if the line search algorithm did not converge.
+    fc : int
+        Number of function evaluations made.
+    gc : int
+        Number of gradient evaluations made.
+    new_fval : float or None
+        New function value ``f(x_new)=f(x0+alpha*pk)``,
+        or None if the line search algorithm did not converge.
+    old_fval : float
+        Old function value ``f(x0)``.
+    new_slope : float or None
+        The local slope along the search direction at the
+        new value ````,
+        or None if the line search algorithm did not converge.
+
+
+    Notes
+    -----
+    Uses the line search algorithm to enforce strong Wolfe
+    conditions. See Wright and Nocedal, 'Numerical Optimization',
+    1999, pp. 59-61.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import line_search
+
+    A objective function and its gradient are defined.
+
+    >>> def obj_func(x):
+    ...     return (x[0])**2+(x[1])**2
+    >>> def obj_grad(x):
+    ...     return [2*x[0], 2*x[1]]
+
+    We can find alpha that satisfies strong Wolfe conditions.
+
+    >>> start_point = np.array([1.8, 1.7])
+    >>> search_gradient = np.array([-1.0, -1.0])
+    >>> line_search(obj_func, obj_grad, start_point, search_gradient)
+    (1.0, 2, 1, 1.1300000000000001, 6.13, [1.6, 1.4])
+
+    """
+    fc = [0]
+    gc = [0]
+    gval = [None]
+    gval_alpha = [None]
+
+    def phi(alpha):
+        fc[0] += 1
+        return f(xk + alpha * pk, *args)
+
+    fprime = myfprime
+
+    def derphi(alpha):
+        gc[0] += 1
+        gval[0] = fprime(xk + alpha * pk, *args)  # store for later use
+        gval_alpha[0] = alpha
+        return np.dot(gval[0], pk)
+
+    if gfk is None:
+        gfk = fprime(xk, *args)
+    derphi0 = np.dot(gfk, pk)
+
+    if extra_condition is not None:
+        # Add the current gradient as argument, to avoid needless
+        # re-evaluation
+        def extra_condition2(alpha, phi):
+            if gval_alpha[0] != alpha:
+                derphi(alpha)
+            x = xk + alpha * pk
+            return extra_condition(alpha, x, phi, gval[0])
+    else:
+        extra_condition2 = None
+
+    alpha_star, phi_star, old_fval, derphi_star = scalar_search_wolfe2(
+            phi, derphi, old_fval, old_old_fval, derphi0, c1, c2, amax,
+            extra_condition2, maxiter=maxiter)
+
+    if derphi_star is None:
+        warn('The line search algorithm did not converge', LineSearchWarning)
+    else:
+        # derphi_star is a number (derphi) -- so use the most recently
+        # calculated gradient used in computing it derphi = gfk*pk
+        # this is the gradient at the next step no need to compute it
+        # again in the outer loop.
+        derphi_star = gval[0]
+
+    return alpha_star, fc[0], gc[0], phi_star, old_fval, derphi_star
+
+
+def scalar_search_wolfe2(phi, derphi, phi0=None,
+                         old_phi0=None, derphi0=None,
+                         c1=1e-4, c2=0.9, amax=None,
+                         extra_condition=None, maxiter=10):
+    """Find alpha that satisfies strong Wolfe conditions.
+
+    alpha > 0 is assumed to be a descent direction.
+
+    Parameters
+    ----------
+    phi : callable phi(alpha)
+        Objective scalar function.
+    derphi : callable phi'(alpha)
+        Objective function derivative. Returns a scalar.
+    phi0 : float, optional
+        Value of phi at 0.
+    old_phi0 : float, optional
+        Value of phi at previous point.
+    derphi0 : float, optional
+        Value of derphi at 0
+    c1 : float, optional
+        Parameter for Armijo condition rule.
+    c2 : float, optional
+        Parameter for curvature condition rule.
+    amax : float, optional
+        Maximum step size.
+    extra_condition : callable, optional
+        A callable of the form ``extra_condition(alpha, phi_value)``
+        returning a boolean. The line search accepts the value
+        of ``alpha`` only if this callable returns ``True``.
+        If the callable returns ``False`` for the step length,
+        the algorithm will continue with new iterates.
+        The callable is only called for iterates satisfying
+        the strong Wolfe conditions.
+    maxiter : int, optional
+        Maximum number of iterations to perform.
+
+    Returns
+    -------
+    alpha_star : float or None
+        Best alpha, or None if the line search algorithm did not converge.
+    phi_star : float
+        phi at alpha_star.
+    phi0 : float
+        phi at 0.
+    derphi_star : float or None
+        derphi at alpha_star, or None if the line search algorithm
+        did not converge.
+
+    Notes
+    -----
+    Uses the line search algorithm to enforce strong Wolfe
+    conditions. See Wright and Nocedal, 'Numerical Optimization',
+    1999, pp. 59-61.
+
+    """
+
+    if phi0 is None:
+        phi0 = phi(0.)
+
+    if derphi0 is None:
+        derphi0 = derphi(0.)
+
+    alpha0 = 0
+    if old_phi0 is not None and derphi0 != 0:
+        alpha1 = min(1.0, 1.01*2*(phi0 - old_phi0)/derphi0)
+    else:
+        alpha1 = 1.0
+
+    if alpha1 < 0:
+        alpha1 = 1.0
+
+    if amax is not None:
+        alpha1 = min(alpha1, amax)
+
+    phi_a1 = phi(alpha1)
+    #derphi_a1 = derphi(alpha1) evaluated below
+
+    phi_a0 = phi0
+    derphi_a0 = derphi0
+
+    if extra_condition is None:
+        extra_condition = lambda alpha, phi: True
+
+    for i in range(maxiter):
+        if alpha1 == 0 or (amax is not None and alpha0 == amax):
+            # alpha1 == 0: This shouldn't happen. Perhaps the increment has
+            # slipped below machine precision?
+            alpha_star = None
+            phi_star = phi0
+            phi0 = old_phi0
+            derphi_star = None
+
+            if alpha1 == 0:
+                msg = 'Rounding errors prevent the line search from converging'
+            else:
+                msg = "The line search algorithm could not find a solution " + \
+                      "less than or equal to amax: %s" % amax
+
+            warn(msg, LineSearchWarning)
+            break
+
+        not_first_iteration = i > 0
+        if (phi_a1 > phi0 + c1 * alpha1 * derphi0) or \
+           ((phi_a1 >= phi_a0) and not_first_iteration):
+            alpha_star, phi_star, derphi_star = \
+                        _zoom(alpha0, alpha1, phi_a0,
+                              phi_a1, derphi_a0, phi, derphi,
+                              phi0, derphi0, c1, c2, extra_condition)
+            break
+
+        derphi_a1 = derphi(alpha1)
+        if (abs(derphi_a1) <= -c2*derphi0):
+            if extra_condition(alpha1, phi_a1):
+                alpha_star = alpha1
+                phi_star = phi_a1
+                derphi_star = derphi_a1
+                break
+
+        if (derphi_a1 >= 0):
+            alpha_star, phi_star, derphi_star = \
+                        _zoom(alpha1, alpha0, phi_a1,
+                              phi_a0, derphi_a1, phi, derphi,
+                              phi0, derphi0, c1, c2, extra_condition)
+            break
+
+        alpha2 = 2 * alpha1  # increase by factor of two on each iteration
+        if amax is not None:
+            alpha2 = min(alpha2, amax)
+        alpha0 = alpha1
+        alpha1 = alpha2
+        phi_a0 = phi_a1
+        phi_a1 = phi(alpha1)
+        derphi_a0 = derphi_a1
+
+    else:
+        # stopping test maxiter reached
+        alpha_star = alpha1
+        phi_star = phi_a1
+        derphi_star = None
+        warn('The line search algorithm did not converge', LineSearchWarning)
+
+    return alpha_star, phi_star, phi0, derphi_star
+
+
+def _cubicmin(a, fa, fpa, b, fb, c, fc):
+    """
+    Finds the minimizer for a cubic polynomial that goes through the
+    points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
+
+    If no minimizer can be found, return None.
+
+    """
+    # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
+
+    with np.errstate(divide='raise', over='raise', invalid='raise'):
+        try:
+            C = fpa
+            db = b - a
+            dc = c - a
+            denom = (db * dc) ** 2 * (db - dc)
+            d1 = np.empty((2, 2))
+            d1[0, 0] = dc ** 2
+            d1[0, 1] = -db ** 2
+            d1[1, 0] = -dc ** 3
+            d1[1, 1] = db ** 3
+            [A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
+                                            fc - fa - C * dc]).flatten())
+            A /= denom
+            B /= denom
+            radical = B * B - 3 * A * C
+            xmin = a + (-B + np.sqrt(radical)) / (3 * A)
+        except ArithmeticError:
+            return None
+    if not np.isfinite(xmin):
+        return None
+    return xmin
+
+
+def _quadmin(a, fa, fpa, b, fb):
+    """
+    Finds the minimizer for a quadratic polynomial that goes through
+    the points (a,fa), (b,fb) with derivative at a of fpa.
+
+    """
+    # f(x) = B*(x-a)^2 + C*(x-a) + D
+    with np.errstate(divide='raise', over='raise', invalid='raise'):
+        try:
+            D = fa
+            C = fpa
+            db = b - a * 1.0
+            B = (fb - D - C * db) / (db * db)
+            xmin = a - C / (2.0 * B)
+        except ArithmeticError:
+            return None
+    if not np.isfinite(xmin):
+        return None
+    return xmin
+
+
+def _zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
+          phi, derphi, phi0, derphi0, c1, c2, extra_condition):
+    """Zoom stage of approximate linesearch satisfying strong Wolfe conditions.
+    
+    Part of the optimization algorithm in `scalar_search_wolfe2`.
+    
+    Notes
+    -----
+    Implements Algorithm 3.6 (zoom) in Wright and Nocedal,
+    'Numerical Optimization', 1999, pp. 61.
+
+    """
+
+    maxiter = 10
+    i = 0
+    delta1 = 0.2  # cubic interpolant check
+    delta2 = 0.1  # quadratic interpolant check
+    phi_rec = phi0
+    a_rec = 0
+    while True:
+        # interpolate to find a trial step length between a_lo and
+        # a_hi Need to choose interpolation here. Use cubic
+        # interpolation and then if the result is within delta *
+        # dalpha or outside of the interval bounded by a_lo or a_hi
+        # then use quadratic interpolation, if the result is still too
+        # close, then use bisection
+
+        dalpha = a_hi - a_lo
+        if dalpha < 0:
+            a, b = a_hi, a_lo
+        else:
+            a, b = a_lo, a_hi
+
+        # minimizer of cubic interpolant
+        # (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
+        #
+        # if the result is too close to the end points (or out of the
+        # interval), then use quadratic interpolation with phi_lo,
+        # derphi_lo and phi_hi if the result is still too close to the
+        # end points (or out of the interval) then use bisection
+
+        if (i > 0):
+            cchk = delta1 * dalpha
+            a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi,
+                            a_rec, phi_rec)
+        if (i == 0) or (a_j is None) or (a_j > b - cchk) or (a_j < a + cchk):
+            qchk = delta2 * dalpha
+            a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
+            if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
+                a_j = a_lo + 0.5*dalpha
+
+        # Check new value of a_j
+
+        phi_aj = phi(a_j)
+        if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
+            phi_rec = phi_hi
+            a_rec = a_hi
+            a_hi = a_j
+            phi_hi = phi_aj
+        else:
+            derphi_aj = derphi(a_j)
+            if abs(derphi_aj) <= -c2*derphi0 and extra_condition(a_j, phi_aj):
+                a_star = a_j
+                val_star = phi_aj
+                valprime_star = derphi_aj
+                break
+            if derphi_aj*(a_hi - a_lo) >= 0:
+                phi_rec = phi_hi
+                a_rec = a_hi
+                a_hi = a_lo
+                phi_hi = phi_lo
+            else:
+                phi_rec = phi_lo
+                a_rec = a_lo
+            a_lo = a_j
+            phi_lo = phi_aj
+            derphi_lo = derphi_aj
+        i += 1
+        if (i > maxiter):
+            # Failed to find a conforming step size
+            a_star = None
+            val_star = None
+            valprime_star = None
+            break
+    return a_star, val_star, valprime_star
+
+
+#------------------------------------------------------------------------------
+# Armijo line and scalar searches
+#------------------------------------------------------------------------------
+
+def line_search_armijo(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
+    """Minimize over alpha, the function ``f(xk+alpha pk)``.
+
+    Parameters
+    ----------
+    f : callable
+        Function to be minimized.
+    xk : array_like
+        Current point.
+    pk : array_like
+        Search direction.
+    gfk : array_like
+        Gradient of `f` at point `xk`.
+    old_fval : float
+        Value of `f` at point `xk`.
+    args : tuple, optional
+        Optional arguments.
+    c1 : float, optional
+        Value to control stopping criterion.
+    alpha0 : scalar, optional
+        Value of `alpha` at start of the optimization.
+
+    Returns
+    -------
+    alpha
+    f_count
+    f_val_at_alpha
+
+    Notes
+    -----
+    Uses the interpolation algorithm (Armijo backtracking) as suggested by
+    Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
+
+    """
+    xk = np.atleast_1d(xk)
+    fc = [0]
+
+    def phi(alpha1):
+        fc[0] += 1
+        return f(xk + alpha1*pk, *args)
+
+    if old_fval is None:
+        phi0 = phi(0.)
+    else:
+        phi0 = old_fval  # compute f(xk) -- done in past loop
+
+    derphi0 = np.dot(gfk, pk)
+    alpha, phi1 = scalar_search_armijo(phi, phi0, derphi0, c1=c1,
+                                       alpha0=alpha0)
+    return alpha, fc[0], phi1
+
+
+def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1):
+    """
+    Compatibility wrapper for `line_search_armijo`
+    """
+    r = line_search_armijo(f, xk, pk, gfk, old_fval, args=args, c1=c1,
+                           alpha0=alpha0)
+    return r[0], r[1], 0, r[2]
+
+
+def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
+    """Minimize over alpha, the function ``phi(alpha)``.
+
+    Uses the interpolation algorithm (Armijo backtracking) as suggested by
+    Wright and Nocedal in 'Numerical Optimization', 1999, pp. 56-57
+
+    alpha > 0 is assumed to be a descent direction.
+
+    Returns
+    -------
+    alpha
+    phi1
+
+    """
+    phi_a0 = phi(alpha0)
+    if phi_a0 <= phi0 + c1*alpha0*derphi0:
+        return alpha0, phi_a0
+
+    # Otherwise, compute the minimizer of a quadratic interpolant:
+
+    alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
+    phi_a1 = phi(alpha1)
+
+    if (phi_a1 <= phi0 + c1*alpha1*derphi0):
+        return alpha1, phi_a1
+
+    # Otherwise, loop with cubic interpolation until we find an alpha which
+    # satisfies the first Wolfe condition (since we are backtracking, we will
+    # assume that the value of alpha is not too small and satisfies the second
+    # condition.
+
+    while alpha1 > amin:       # we are assuming alpha>0 is a descent direction
+        factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
+        a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
+            alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
+        a = a / factor
+        b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
+            alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
+        b = b / factor
+
+        alpha2 = (-b + np.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a)
+        phi_a2 = phi(alpha2)
+
+        if (phi_a2 <= phi0 + c1*alpha2*derphi0):
+            return alpha2, phi_a2
+
+        if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
+            alpha2 = alpha1 / 2.0
+
+        alpha0 = alpha1
+        alpha1 = alpha2
+        phi_a0 = phi_a1
+        phi_a1 = phi_a2
+
+    # Failed to find a suitable step length
+    return None, phi_a1
+
+
+#------------------------------------------------------------------------------
+# Non-monotone line search for DF-SANE
+#------------------------------------------------------------------------------
+
+def _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta,
+                                  gamma=1e-4, tau_min=0.1, tau_max=0.5):
+    """
+    Nonmonotone backtracking line search as described in [1]_
+
+    Parameters
+    ----------
+    f : callable
+        Function returning a tuple ``(f, F)`` where ``f`` is the value
+        of a merit function and ``F`` the residual.
+    x_k : ndarray
+        Initial position.
+    d : ndarray
+        Search direction.
+    prev_fs : float
+        List of previous merit function values. Should have ``len(prev_fs) <= M``
+        where ``M`` is the nonmonotonicity window parameter.
+    eta : float
+        Allowed merit function increase, see [1]_
+    gamma, tau_min, tau_max : float, optional
+        Search parameters, see [1]_
+
+    Returns
+    -------
+    alpha : float
+        Step length
+    xp : ndarray
+        Next position
+    fp : float
+        Merit function value at next position
+    Fp : ndarray
+        Residual at next position
+
+    References
+    ----------
+    [1] "Spectral residual method without gradient information for solving
+        large-scale nonlinear systems of equations." W. La Cruz,
+        J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
+
+    """
+    f_k = prev_fs[-1]
+    f_bar = max(prev_fs)
+
+    alpha_p = 1
+    alpha_m = 1
+    alpha = 1
+
+    while True:
+        xp = x_k + alpha_p * d
+        fp, Fp = f(xp)
+
+        if fp <= f_bar + eta - gamma * alpha_p**2 * f_k:
+            alpha = alpha_p
+            break
+
+        alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
+
+        xp = x_k - alpha_m * d
+        fp, Fp = f(xp)
+
+        if fp <= f_bar + eta - gamma * alpha_m**2 * f_k:
+            alpha = -alpha_m
+            break
+
+        alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
+
+        alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
+        alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
+
+    return alpha, xp, fp, Fp
+
+
+def _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta,
+                                   gamma=1e-4, tau_min=0.1, tau_max=0.5,
+                                   nu=0.85):
+    """
+    Nonmonotone line search from [1]
+
+    Parameters
+    ----------
+    f : callable
+        Function returning a tuple ``(f, F)`` where ``f`` is the value
+        of a merit function and ``F`` the residual.
+    x_k : ndarray
+        Initial position.
+    d : ndarray
+        Search direction.
+    f_k : float
+        Initial merit function value.
+    C, Q : float
+        Control parameters. On the first iteration, give values
+        Q=1.0, C=f_k
+    eta : float
+        Allowed merit function increase, see [1]_
+    nu, gamma, tau_min, tau_max : float, optional
+        Search parameters, see [1]_
+
+    Returns
+    -------
+    alpha : float
+        Step length
+    xp : ndarray
+        Next position
+    fp : float
+        Merit function value at next position
+    Fp : ndarray
+        Residual at next position
+    C : float
+        New value for the control parameter C
+    Q : float
+        New value for the control parameter Q
+
+    References
+    ----------
+    .. [1] W. Cheng & D.-H. Li, ''A derivative-free nonmonotone line
+           search and its application to the spectral residual
+           method'', IMA J. Numer. Anal. 29, 814 (2009).
+
+    """
+    alpha_p = 1
+    alpha_m = 1
+    alpha = 1
+
+    while True:
+        xp = x_k + alpha_p * d
+        fp, Fp = f(xp)
+
+        if fp <= C + eta - gamma * alpha_p**2 * f_k:
+            alpha = alpha_p
+            break
+
+        alpha_tp = alpha_p**2 * f_k / (fp + (2*alpha_p - 1)*f_k)
+
+        xp = x_k - alpha_m * d
+        fp, Fp = f(xp)
+
+        if fp <= C + eta - gamma * alpha_m**2 * f_k:
+            alpha = -alpha_m
+            break
+
+        alpha_tm = alpha_m**2 * f_k / (fp + (2*alpha_m - 1)*f_k)
+
+        alpha_p = np.clip(alpha_tp, tau_min * alpha_p, tau_max * alpha_p)
+        alpha_m = np.clip(alpha_tm, tau_min * alpha_m, tau_max * alpha_m)
+
+    # Update C and Q
+    Q_next = nu * Q + 1
+    C = (nu * Q * (C + eta) + fp) / Q_next
+    Q = Q_next
+
+    return alpha, xp, fp, Fp, C, Q
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog.py
new file mode 100644
index 00000000..ad68329a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog.py
@@ -0,0 +1,708 @@
+"""
+A top-level linear programming interface.
+
+.. versionadded:: 0.15.0
+
+Functions
+---------
+.. autosummary::
+   :toctree: generated/
+
+    linprog
+    linprog_verbose_callback
+    linprog_terse_callback
+
+"""
+
+import numpy as np
+
+from ._optimize import OptimizeResult, OptimizeWarning
+from warnings import warn
+from ._linprog_highs import _linprog_highs
+from ._linprog_ip import _linprog_ip
+from ._linprog_simplex import _linprog_simplex
+from ._linprog_rs import _linprog_rs
+from ._linprog_doc import (_linprog_highs_doc, _linprog_ip_doc,
+                           _linprog_rs_doc, _linprog_simplex_doc,
+                           _linprog_highs_ipm_doc, _linprog_highs_ds_doc)
+from ._linprog_util import (
+    _parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
+    _postsolve, _check_result, _display_summary)
+from copy import deepcopy
+
+__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
+
+__docformat__ = "restructuredtext en"
+
+LINPROG_METHODS = ['simplex', 'revised simplex', 'interior-point', 'highs', 'highs-ds', 'highs-ipm']
+
+
+def linprog_verbose_callback(res):
+    """
+    A sample callback function demonstrating the linprog callback interface.
+    This callback produces detailed output to sys.stdout before each iteration
+    and after the final iteration of the simplex algorithm.
+
+    Parameters
+    ----------
+    res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
+
+        x : 1-D array
+            The independent variable vector which optimizes the linear
+            programming problem.
+        fun : float
+            Value of the objective function.
+        success : bool
+            True if the algorithm succeeded in finding an optimal solution.
+        slack : 1-D array
+            The values of the slack variables. Each slack variable corresponds
+            to an inequality constraint. If the slack is zero, then the
+            corresponding constraint is active.
+        con : 1-D array
+            The (nominally zero) residuals of the equality constraints, that is,
+            ``b - A_eq @ x``
+        phase : int
+            The phase of the optimization being executed. In phase 1 a basic
+            feasible solution is sought and the T has an additional row
+            representing an alternate objective function.
+        status : int
+            An integer representing the exit status of the optimization::
+
+                 0 : Optimization terminated successfully
+                 1 : Iteration limit reached
+                 2 : Problem appears to be infeasible
+                 3 : Problem appears to be unbounded
+                 4 : Serious numerical difficulties encountered
+
+        nit : int
+            The number of iterations performed.
+        message : str
+            A string descriptor of the exit status of the optimization.
+    """
+    x = res['x']
+    fun = res['fun']
+    phase = res['phase']
+    status = res['status']
+    nit = res['nit']
+    message = res['message']
+    complete = res['complete']
+
+    saved_printoptions = np.get_printoptions()
+    np.set_printoptions(linewidth=500,
+                        formatter={'float': lambda x: "{0: 12.4f}".format(x)})
+    if status:
+        print('--------- Simplex Early Exit -------\n')
+        print('The simplex method exited early with status {0:d}'.format(status))
+        print(message)
+    elif complete:
+        print('--------- Simplex Complete --------\n')
+        print('Iterations required: {}'.format(nit))
+    else:
+        print('--------- Iteration {0:d}  ---------\n'.format(nit))
+
+    if nit > 0:
+        if phase == 1:
+            print('Current Pseudo-Objective Value:')
+        else:
+            print('Current Objective Value:')
+        print('f = ', fun)
+        print()
+        print('Current Solution Vector:')
+        print('x = ', x)
+        print()
+
+    np.set_printoptions(**saved_printoptions)
+
+
+def linprog_terse_callback(res):
+    """
+    A sample callback function demonstrating the linprog callback interface.
+    This callback produces brief output to sys.stdout before each iteration
+    and after the final iteration of the simplex algorithm.
+
+    Parameters
+    ----------
+    res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
+
+        x : 1-D array
+            The independent variable vector which optimizes the linear
+            programming problem.
+        fun : float
+            Value of the objective function.
+        success : bool
+            True if the algorithm succeeded in finding an optimal solution.
+        slack : 1-D array
+            The values of the slack variables. Each slack variable corresponds
+            to an inequality constraint. If the slack is zero, then the
+            corresponding constraint is active.
+        con : 1-D array
+            The (nominally zero) residuals of the equality constraints, that is,
+            ``b - A_eq @ x``.
+        phase : int
+            The phase of the optimization being executed. In phase 1 a basic
+            feasible solution is sought and the T has an additional row
+            representing an alternate objective function.
+        status : int
+            An integer representing the exit status of the optimization::
+
+                 0 : Optimization terminated successfully
+                 1 : Iteration limit reached
+                 2 : Problem appears to be infeasible
+                 3 : Problem appears to be unbounded
+                 4 : Serious numerical difficulties encountered
+
+        nit : int
+            The number of iterations performed.
+        message : str
+            A string descriptor of the exit status of the optimization.
+    """
+    nit = res['nit']
+    x = res['x']
+
+    if nit == 0:
+        print("Iter:   X:")
+    print("{0: <5d}   ".format(nit), end="")
+    print(x)
+
+
+def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
+            bounds=None, method='highs', callback=None,
+            options=None, x0=None, integrality=None):
+    r"""
+    Linear programming: minimize a linear objective function subject to linear
+    equality and inequality constraints.
+
+    Linear programming solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
+        & A_{eq} x = b_{eq},\\
+        & l \leq x \leq u ,
+
+    where :math:`x` is a vector of decision variables; :math:`c`,
+    :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
+    :math:`A_{ub}` and :math:`A_{eq}` are matrices.
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+        lb <= x <= ub
+
+    Note that by default ``lb = 0`` and ``ub = None`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1-D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2-D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1-D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2-D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1-D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : sequence, optional
+        A sequence of ``(min, max)`` pairs for each element in ``x``, defining
+        the minimum and maximum values of that decision variable. Use ``None``
+        to indicate that there is no bound. By default, bounds are
+        ``(0, None)`` (all decision variables are non-negative).
+        If a single tuple ``(min, max)`` is provided, then ``min`` and
+        ``max`` will serve as bounds for all decision variables.
+    method : str, optional
+        The algorithm used to solve the standard form problem.
+        :ref:`'highs' ` (default),
+        :ref:`'highs-ds' `,
+        :ref:`'highs-ipm' `,
+        :ref:`'interior-point' ` (legacy),
+        :ref:`'revised simplex' ` (legacy),
+        and
+        :ref:`'simplex' ` (legacy) are supported.
+        The legacy methods are deprecated and will be removed in SciPy 1.11.0.
+    callback : callable, optional
+        If a callback function is provided, it will be called at least once per
+        iteration of the algorithm. The callback function must accept a single
+        `scipy.optimize.OptimizeResult` consisting of the following fields:
+
+        x : 1-D array
+            The current solution vector.
+        fun : float
+            The current value of the objective function ``c @ x``.
+        success : bool
+            ``True`` when the algorithm has completed successfully.
+        slack : 1-D array
+            The (nominally positive) values of the slack,
+            ``b_ub - A_ub @ x``.
+        con : 1-D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        phase : int
+            The phase of the algorithm being executed.
+        status : int
+            An integer representing the status of the algorithm.
+
+            ``0`` : Optimization proceeding nominally.
+
+            ``1`` : Iteration limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : Numerical difficulties encountered.
+
+            nit : int
+                The current iteration number.
+            message : str
+                A string descriptor of the algorithm status.
+
+        Callback functions are not currently supported by the HiGHS methods.
+
+    options : dict, optional
+        A dictionary of solver options. All methods accept the following
+        options:
+
+        maxiter : int
+            Maximum number of iterations to perform.
+            Default: see method-specific documentation.
+        disp : bool
+            Set to ``True`` to print convergence messages.
+            Default: ``False``.
+        presolve : bool
+            Set to ``False`` to disable automatic presolve.
+            Default: ``True``.
+
+        All methods except the HiGHS solvers also accept:
+
+        tol : float
+            A tolerance which determines when a residual is "close enough" to
+            zero to be considered exactly zero.
+        autoscale : bool
+            Set to ``True`` to automatically perform equilibration.
+            Consider using this option if the numerical values in the
+            constraints are separated by several orders of magnitude.
+            Default: ``False``.
+        rr : bool
+            Set to ``False`` to disable automatic redundancy removal.
+            Default: ``True``.
+        rr_method : string
+            Method used to identify and remove redundant rows from the
+            equality constraint matrix after presolve. For problems with
+            dense input, the available methods for redundancy removal are:
+
+            "SVD":
+                Repeatedly performs singular value decomposition on
+                the matrix, detecting redundant rows based on nonzeros
+                in the left singular vectors that correspond with
+                zero singular values. May be fast when the matrix is
+                nearly full rank.
+            "pivot":
+                Uses the algorithm presented in [5]_ to identify
+                redundant rows.
+            "ID":
+                Uses a randomized interpolative decomposition.
+                Identifies columns of the matrix transpose not used in
+                a full-rank interpolative decomposition of the matrix.
+            None:
+                Uses "svd" if the matrix is nearly full rank, that is,
+                the difference between the matrix rank and the number
+                of rows is less than five. If not, uses "pivot". The
+                behavior of this default is subject to change without
+                prior notice.
+
+            Default: None.
+            For problems with sparse input, this option is ignored, and the
+            pivot-based algorithm presented in [5]_ is used.
+
+        For method-specific options, see
+        :func:`show_options('linprog') `.
+
+    x0 : 1-D array, optional
+        Guess values of the decision variables, which will be refined by
+        the optimization algorithm. This argument is currently used only by the
+        'revised simplex' method, and can only be used if `x0` represents a
+        basic feasible solution.
+
+    integrality : 1-D array or int, optional
+        Indicates the type of integrality constraint on each decision variable.
+
+        ``0`` : Continuous variable; no integrality constraint.
+
+        ``1`` : Integer variable; decision variable must be an integer
+        within `bounds`.
+
+        ``2`` : Semi-continuous variable; decision variable must be within
+        `bounds` or take value ``0``.
+
+        ``3`` : Semi-integer variable; decision variable must be an integer
+        within `bounds` or take value ``0``.
+
+        By default, all variables are continuous.
+
+        For mixed integrality constraints, supply an array of shape `c.shape`.
+        To infer a constraint on each decision variable from shorter inputs,
+        the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
+
+        This argument is currently used only by the ``'highs'`` method and
+        ignored otherwise.
+
+    Returns
+    -------
+    res : OptimizeResult
+        A :class:`scipy.optimize.OptimizeResult` consisting of the fields
+        below. Note that the return types of the fields may depend on whether
+        the optimization was successful, therefore it is recommended to check
+        `OptimizeResult.status` before relying on the other fields:
+
+        x : 1-D array
+            The values of the decision variables that minimizes the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        slack : 1-D array
+            The (nominally positive) values of the slack variables,
+            ``b_ub - A_ub @ x``.
+        con : 1-D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        success : bool
+            ``True`` when the algorithm succeeds in finding an optimal
+            solution.
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimization terminated successfully.
+
+            ``1`` : Iteration limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : Numerical difficulties encountered.
+
+        nit : int
+            The total number of iterations performed in all phases.
+        message : str
+            A string descriptor of the exit status of the algorithm.
+
+    See Also
+    --------
+    show_options : Additional options accepted by the solvers.
+
+    Notes
+    -----
+    This section describes the available solvers that can be selected by the
+    'method' parameter.
+
+    `'highs-ds'` and
+    `'highs-ipm'` are interfaces to the
+    HiGHS simplex and interior-point method solvers [13]_, respectively.
+    `'highs'` (default) chooses between
+    the two automatically. These are the fastest linear
+    programming solvers in SciPy, especially for large, sparse problems;
+    which of these two is faster is problem-dependent.
+    The other solvers (`'interior-point'`, `'revised simplex'`, and
+    `'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
+
+    Method *highs-ds* is a wrapper of the C++ high performance dual
+    revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
+    is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
+    **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
+    as a simplex solver. Method *highs* chooses between the two automatically.
+    For new code involving `linprog`, we recommend explicitly choosing one of
+    these three method values.
+
+    .. versionadded:: 1.6.0
+
+    Method *interior-point* uses the primal-dual path following algorithm
+    as outlined in [4]_. This algorithm supports sparse constraint matrices and
+    is typically faster than the simplex methods, especially for large, sparse
+    problems. Note, however, that the solution returned may be slightly less
+    accurate than those of the simplex methods and will not, in general,
+    correspond with a vertex of the polytope defined by the constraints.
+
+    .. versionadded:: 1.0.0
+
+    Method *revised simplex* uses the revised simplex method as described in
+    [9]_, except that a factorization [11]_ of the basis matrix, rather than
+    its inverse, is efficiently maintained and used to solve the linear systems
+    at each iteration of the algorithm.
+
+    .. versionadded:: 1.3.0
+
+    Method *simplex* uses a traditional, full-tableau implementation of
+    Dantzig's simplex algorithm [1]_, [2]_ (*not* the
+    Nelder-Mead simplex). This algorithm is included for backwards
+    compatibility and educational purposes.
+
+    .. versionadded:: 0.15.0
+
+    Before applying *interior-point*, *revised simplex*, or *simplex*,
+    a presolve procedure based on [8]_ attempts
+    to identify trivial infeasibilities, trivial unboundedness, and potential
+    problem simplifications. Specifically, it checks for:
+
+    - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
+    - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
+      variables;
+    - column singletons in ``A_eq``, representing fixed variables; and
+    - column singletons in ``A_ub``, representing simple bounds.
+
+    If presolve reveals that the problem is unbounded (e.g. an unconstrained
+    and unbounded variable has negative cost) or infeasible (e.g., a row of
+    zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
+    terminates with the appropriate status code. Note that presolve terminates
+    as soon as any sign of unboundedness is detected; consequently, a problem
+    may be reported as unbounded when in reality the problem is infeasible
+    (but infeasibility has not been detected yet). Therefore, if it is
+    important to know whether the problem is actually infeasible, solve the
+    problem again with option ``presolve=False``.
+
+    If neither infeasibility nor unboundedness are detected in a single pass
+    of the presolve, bounds are tightened where possible and fixed
+    variables are removed from the problem. Then, linearly dependent rows
+    of the ``A_eq`` matrix are removed, (unless they represent an
+    infeasibility) to avoid numerical difficulties in the primary solve
+    routine. Note that rows that are nearly linearly dependent (within a
+    prescribed tolerance) may also be removed, which can change the optimal
+    solution in rare cases. If this is a concern, eliminate redundancy from
+    your problem formulation and run with option ``rr=False`` or
+    ``presolve=False``.
+
+    Several potential improvements can be made here: additional presolve
+    checks outlined in [8]_ should be implemented, the presolve routine should
+    be run multiple times (until no further simplifications can be made), and
+    more of the efficiency improvements from [5]_ should be implemented in the
+    redundancy removal routines.
+
+    After presolve, the problem is transformed to standard form by converting
+    the (tightened) simple bounds to upper bound constraints, introducing
+    non-negative slack variables for inequality constraints, and expressing
+    unbounded variables as the difference between two non-negative variables.
+    Optionally, the problem is automatically scaled via equilibration [12]_.
+    The selected algorithm solves the standard form problem, and a
+    postprocessing routine converts the result to a solution to the original
+    problem.
+
+    References
+    ----------
+    .. [1] Dantzig, George B., Linear programming and extensions. Rand
+           Corporation Research Study Princeton Univ. Press, Princeton, NJ,
+           1963
+    .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
+           Mathematical Programming", McGraw-Hill, Chapter 4.
+    .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
+           Mathematics of Operations Research (2), 1977: pp. 103-107.
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+    .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
+           large-scale linear programming." Optimization Methods and Software
+           6.3 (1995): 219-227.
+    .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
+           Programming based on Newton's Method." Unpublished Course Notes,
+           March 2004. Available 2/25/2017 at
+           https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
+    .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
+           Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
+           http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
+    .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
+           programming." Mathematical Programming 71.2 (1995): 221-245.
+    .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
+           programming." Athena Scientific 1 (1997): 997.
+    .. [10] Andersen, Erling D., et al. Implementation of interior point
+            methods for large scale linear programming. HEC/Universite de
+            Geneve, 1996.
+    .. [11] Bartels, Richard H. "A stabilization of the simplex method."
+            Journal in  Numerische Mathematik 16.5 (1971): 414-434.
+    .. [12] Tomlin, J. A. "On scaling linear programming problems."
+            Mathematical Programming Study 4 (1975): 146-166.
+    .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
+            "HiGHS - high performance software for linear optimization."
+            https://highs.dev/
+    .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
+            simplex method." Mathematical Programming Computation, 10 (1),
+            119-142, 2018. DOI: 10.1007/s12532-017-0130-5
+
+    Examples
+    --------
+    Consider the following problem:
+
+    .. math::
+
+        \min_{x_0, x_1} \ -x_0 + 4x_1 & \\
+        \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
+        -x_0 - 2x_1 & \geq -4,\\
+        x_1 & \geq -3.
+
+    The problem is not presented in the form accepted by `linprog`. This is
+    easily remedied by converting the "greater than" inequality
+    constraint to a "less than" inequality constraint by
+    multiplying both sides by a factor of :math:`-1`. Note also that the last
+    constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
+    Finally, since there are no bounds on :math:`x_0`, we must explicitly
+    specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
+    default is for variables to be non-negative. After collecting coeffecients
+    into arrays and tuples, the input for this problem is:
+
+    >>> from scipy.optimize import linprog
+    >>> c = [-1, 4]
+    >>> A = [[-3, 1], [1, 2]]
+    >>> b = [6, 4]
+    >>> x0_bounds = (None, None)
+    >>> x1_bounds = (-3, None)
+    >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
+    >>> res.fun
+    -22.0
+    >>> res.x
+    array([10., -3.])
+    >>> res.message
+    'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
+
+    The marginals (AKA dual values / shadow prices / Lagrange multipliers)
+    and residuals (slacks) are also available.
+
+    >>> res.ineqlin
+      residual: [ 3.900e+01  0.000e+00]
+     marginals: [-0.000e+00 -1.000e+00]
+
+    For example, because the marginal associated with the second inequality
+    constraint is -1, we expect the optimal value of the objective function
+    to decrease by ``eps`` if we add a small amount ``eps`` to the right hand
+    side of the second inequality constraint:
+
+    >>> eps = 0.05
+    >>> b[1] += eps
+    >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
+    -22.05
+
+    Also, because the residual on the first inequality constraint is 39, we
+    can decrease the right hand side of the first constraint by 39 without
+    affecting the optimal solution.
+
+    >>> b = [6, 4]  # reset to original values
+    >>> b[0] -= 39
+    >>> linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]).fun
+    -22.0
+
+    """
+
+    meth = method.lower()
+    methods = {"highs", "highs-ds", "highs-ipm",
+               "simplex", "revised simplex", "interior-point"}
+
+    if meth not in methods:
+        raise ValueError(f"Unknown solver '{method}'")
+
+    if x0 is not None and meth != "revised simplex":
+        warning_message = "x0 is used only when method is 'revised simplex'. "
+        warn(warning_message, OptimizeWarning)
+
+    if np.any(integrality) and not meth == "highs":
+        integrality = None
+        warning_message = ("Only `method='highs'` supports integer "
+                           "constraints. Ignoring `integrality`.")
+        warn(warning_message, OptimizeWarning)
+    elif np.any(integrality):
+        integrality = np.broadcast_to(integrality, np.shape(c))
+
+    lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
+    lp, solver_options = _parse_linprog(lp, options, meth)
+    tol = solver_options.get('tol', 1e-9)
+
+    # Give unmodified problem to HiGHS
+    if meth.startswith('highs'):
+        if callback is not None:
+            raise NotImplementedError("HiGHS solvers do not support the "
+                                      "callback interface.")
+        highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
+                         'highs': None}
+
+        sol = _linprog_highs(lp, solver=highs_solvers[meth],
+                             **solver_options)
+        sol['status'], sol['message'] = (
+            _check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
+                          sol['con'], lp.bounds, tol, sol['message']))
+        sol['success'] = sol['status'] == 0
+        return OptimizeResult(sol)
+
+    warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
+         "1.11.0. Please use one of the HiGHS solvers (e.g. "
+         "`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
+
+    iteration = 0
+    complete = False  # will become True if solved in presolve
+    undo = []
+
+    # Keep the original arrays to calculate slack/residuals for original
+    # problem.
+    lp_o = deepcopy(lp)
+
+    # Solve trivial problem, eliminate variables, tighten bounds, etc.
+    rr_method = solver_options.pop('rr_method', None)  # need to pop these;
+    rr = solver_options.pop('rr', True)  # they're not passed to methods
+    c0 = 0  # we might get a constant term in the objective
+    if solver_options.pop('presolve', True):
+        (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
+                                                                 rr_method,
+                                                                 tol)
+
+    C, b_scale = 1, 1  # for trivial unscaling if autoscale is not used
+    postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
+
+    if not complete:
+        A, b, c, c0, x0 = _get_Abc(lp, c0)
+        if solver_options.pop('autoscale', False):
+            A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
+            postsolve_args = postsolve_args[:-2] + (C, b_scale)
+
+        if meth == 'simplex':
+            x, status, message, iteration = _linprog_simplex(
+                c, c0=c0, A=A, b=b, callback=callback,
+                postsolve_args=postsolve_args, **solver_options)
+        elif meth == 'interior-point':
+            x, status, message, iteration = _linprog_ip(
+                c, c0=c0, A=A, b=b, callback=callback,
+                postsolve_args=postsolve_args, **solver_options)
+        elif meth == 'revised simplex':
+            x, status, message, iteration = _linprog_rs(
+                c, c0=c0, A=A, b=b, x0=x0, callback=callback,
+                postsolve_args=postsolve_args, **solver_options)
+
+    # Eliminate artificial variables, re-introduce presolved variables, etc.
+    disp = solver_options.get('disp', False)
+
+    x, fun, slack, con = _postsolve(x, postsolve_args, complete)
+
+    status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, tol, message)
+
+    if disp:
+        _display_summary(message, status, fun, iteration)
+
+    sol = {
+        'x': x,
+        'fun': fun,
+        'slack': slack,
+        'con': con,
+        'status': status,
+        'message': message,
+        'nit': iteration,
+        'success': status == 0}
+
+    return OptimizeResult(sol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_doc.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_doc.py
new file mode 100644
index 00000000..8ca3686c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_doc.py
@@ -0,0 +1,1435 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sat Aug 22 19:49:17 2020
+
+@author: matth
+"""
+
+
+def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
+                       bounds=None, method='highs', callback=None,
+                       maxiter=None, disp=False, presolve=True,
+                       time_limit=None,
+                       dual_feasibility_tolerance=None,
+                       primal_feasibility_tolerance=None,
+                       ipm_optimality_tolerance=None,
+                       simplex_dual_edge_weight_strategy=None,
+                       mip_rel_gap=None,
+                       **unknown_options):
+    r"""
+    Linear programming: minimize a linear objective function subject to linear
+    equality and inequality constraints using one of the HiGHS solvers.
+
+    Linear programming solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
+        & A_{eq} x = b_{eq},\\
+        & l \leq x \leq u ,
+
+    where :math:`x` is a vector of decision variables; :math:`c`,
+    :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
+    :math:`A_{ub}` and :math:`A_{eq}` are matrices.
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+        lb <= x <= ub
+
+    Note that by default ``lb = 0`` and ``ub = None`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1-D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2-D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1-D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2-D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1-D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : sequence, optional
+        A sequence of ``(min, max)`` pairs for each element in ``x``, defining
+        the minimum and maximum values of that decision variable. Use ``None``
+        to indicate that there is no bound. By default, bounds are
+        ``(0, None)`` (all decision variables are non-negative).
+        If a single tuple ``(min, max)`` is provided, then ``min`` and
+        ``max`` will serve as bounds for all decision variables.
+    method : str
+
+        This is the method-specific documentation for 'highs', which chooses
+        automatically between
+        :ref:`'highs-ds' ` and
+        :ref:`'highs-ipm' `.
+        :ref:`'interior-point' ` (default),
+        :ref:`'revised simplex' `, and
+        :ref:`'simplex' ` (legacy)
+        are also available.
+    integrality : 1-D array or int, optional
+        Indicates the type of integrality constraint on each decision variable.
+
+        ``0`` : Continuous variable; no integrality constraint.
+
+        ``1`` : Integer variable; decision variable must be an integer
+        within `bounds`.
+
+        ``2`` : Semi-continuous variable; decision variable must be within
+        `bounds` or take value ``0``.
+
+        ``3`` : Semi-integer variable; decision variable must be an integer
+        within `bounds` or take value ``0``.
+
+        By default, all variables are continuous.
+
+        For mixed integrality constraints, supply an array of shape `c.shape`.
+        To infer a constraint on each decision variable from shorter inputs,
+        the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
+
+        This argument is currently used only by the ``'highs'`` method and
+        ignored otherwise.
+
+    Options
+    -------
+    maxiter : int
+        The maximum number of iterations to perform in either phase.
+        For :ref:`'highs-ipm' `, this does not
+        include the number of crossover iterations. Default is the largest
+        possible value for an ``int`` on the platform.
+    disp : bool (default: ``False``)
+        Set to ``True`` if indicators of optimization status are to be
+        printed to the console during optimization.
+    presolve : bool (default: ``True``)
+        Presolve attempts to identify trivial infeasibilities,
+        identify trivial unboundedness, and simplify the problem before
+        sending it to the main solver. It is generally recommended
+        to keep the default setting ``True``; set to ``False`` if
+        presolve is to be disabled.
+    time_limit : float
+        The maximum time in seconds allotted to solve the problem;
+        default is the largest possible value for a ``double`` on the
+        platform.
+    dual_feasibility_tolerance : double (default: 1e-07)
+        Dual feasibility tolerance for
+        :ref:`'highs-ds' `.
+        The minimum of this and ``primal_feasibility_tolerance``
+        is used for the feasibility tolerance of
+        :ref:`'highs-ipm' `.
+    primal_feasibility_tolerance : double (default: 1e-07)
+        Primal feasibility tolerance for
+        :ref:`'highs-ds' `.
+        The minimum of this and ``dual_feasibility_tolerance``
+        is used for the feasibility tolerance of
+        :ref:`'highs-ipm' `.
+    ipm_optimality_tolerance : double (default: ``1e-08``)
+        Optimality tolerance for
+        :ref:`'highs-ipm' `.
+        Minimum allowable value is 1e-12.
+    simplex_dual_edge_weight_strategy : str (default: None)
+        Strategy for simplex dual edge weights. The default, ``None``,
+        automatically selects one of the following.
+
+        ``'dantzig'`` uses Dantzig's original strategy of choosing the most
+        negative reduced cost.
+
+        ``'devex'`` uses the strategy described in [15]_.
+
+        ``steepest`` uses the exact steepest edge strategy as described in
+        [16]_.
+
+        ``'steepest-devex'`` begins with the exact steepest edge strategy
+        until the computation is too costly or inexact and then switches to
+        the devex method.
+
+        Curently, ``None`` always selects ``'steepest-devex'``, but this
+        may change as new options become available.
+    mip_rel_gap : double (default: None)
+        Termination criterion for MIP solver: solver will terminate when the
+        gap between the primal objective value and the dual objective bound,
+        scaled by the primal objective value, is <= mip_rel_gap.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        ``unknown_options`` is non-empty, a warning is issued listing
+        all unused options.
+
+    Returns
+    -------
+    res : OptimizeResult
+        A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
+
+        x : 1D array
+            The values of the decision variables that minimizes the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        slack : 1D array
+            The (nominally positive) values of the slack,
+            ``b_ub - A_ub @ x``.
+        con : 1D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        success : bool
+            ``True`` when the algorithm succeeds in finding an optimal
+            solution.
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimization terminated successfully.
+
+            ``1`` : Iteration or time limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : The HiGHS solver ran into a problem.
+
+        message : str
+            A string descriptor of the exit status of the algorithm.
+        nit : int
+            The total number of iterations performed.
+            For the HiGHS simplex method, this includes iterations in all
+            phases. For the HiGHS interior-point method, this does not include
+            crossover iterations.
+        crossover_nit : int
+            The number of primal/dual pushes performed during the
+            crossover routine for the HiGHS interior-point method.
+            This is ``0`` for the HiGHS simplex method.
+        ineqlin : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            inequality constraints, `b_ub`. A dictionary consisting of the
+            fields:
+
+            residual : np.ndnarray
+                The (nominally positive) values of the slack variables,
+                ``b_ub - A_ub @ x``.  This quantity is also commonly
+                referred to as "slack".
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the right-hand side of the
+                inequality constraints, `b_ub`.
+
+        eqlin : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            equality constraints, `b_eq`.  A dictionary consisting of the
+            fields:
+
+            residual : np.ndarray
+                The (nominally zero) residuals of the equality constraints,
+                ``b_eq - A_eq @ x``.
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the right-hand side of the
+                equality constraints, `b_eq`.
+
+        lower, upper : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            lower and upper bounds on decision variables, `bounds`.
+
+            residual : np.ndarray
+                The (nominally positive) values of the quantity
+                ``x - lb`` (lower) or ``ub - x`` (upper).
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the lower and upper
+                `bounds`.
+
+    Notes
+    -----
+
+    Method :ref:`'highs-ds' ` is a wrapper
+    of the C++ high performance dual revised simplex implementation (HSOL)
+    [13]_, [14]_. Method :ref:`'highs-ipm' `
+    is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
+    **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
+    as a simplex solver. Method :ref:`'highs' ` chooses
+    between the two automatically. For new code involving `linprog`, we
+    recommend explicitly choosing one of these three method values instead of
+    :ref:`'interior-point' ` (default),
+    :ref:`'revised simplex' `, and
+    :ref:`'simplex' ` (legacy).
+
+    The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
+    `marginals`, or partial derivatives of the objective function with respect
+    to the right-hand side of each constraint. These partial derivatives are
+    also referred to as "Lagrange multipliers", "dual values", and
+    "shadow prices". The sign convention of `marginals` is opposite that
+    of Lagrange multipliers produced by many nonlinear solvers.
+
+    References
+    ----------
+    .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
+           "HiGHS - high performance software for linear optimization."
+           https://highs.dev/
+    .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
+           simplex method." Mathematical Programming Computation, 10 (1),
+           119-142, 2018. DOI: 10.1007/s12532-017-0130-5
+    .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
+            Mathematical programming 5.1 (1973): 1-28.
+    .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
+            simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
+    """
+    pass
+
+
+def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
+                          bounds=None, method='highs-ds', callback=None,
+                          maxiter=None, disp=False, presolve=True,
+                          time_limit=None,
+                          dual_feasibility_tolerance=None,
+                          primal_feasibility_tolerance=None,
+                          simplex_dual_edge_weight_strategy=None,
+                          **unknown_options):
+    r"""
+    Linear programming: minimize a linear objective function subject to linear
+    equality and inequality constraints using the HiGHS dual simplex solver.
+
+    Linear programming solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
+        & A_{eq} x = b_{eq},\\
+        & l \leq x \leq u ,
+
+    where :math:`x` is a vector of decision variables; :math:`c`,
+    :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
+    :math:`A_{ub}` and :math:`A_{eq}` are matrices.
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+        lb <= x <= ub
+
+    Note that by default ``lb = 0`` and ``ub = None`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1-D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2-D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1-D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2-D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1-D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : sequence, optional
+        A sequence of ``(min, max)`` pairs for each element in ``x``, defining
+        the minimum and maximum values of that decision variable. Use ``None``
+        to indicate that there is no bound. By default, bounds are
+        ``(0, None)`` (all decision variables are non-negative).
+        If a single tuple ``(min, max)`` is provided, then ``min`` and
+        ``max`` will serve as bounds for all decision variables.
+    method : str
+
+        This is the method-specific documentation for 'highs-ds'.
+        :ref:`'highs' `,
+        :ref:`'highs-ipm' `,
+        :ref:`'interior-point' ` (default),
+        :ref:`'revised simplex' `, and
+        :ref:`'simplex' ` (legacy)
+        are also available.
+
+    Options
+    -------
+    maxiter : int
+        The maximum number of iterations to perform in either phase.
+        Default is the largest possible value for an ``int`` on the platform.
+    disp : bool (default: ``False``)
+        Set to ``True`` if indicators of optimization status are to be
+        printed to the console during optimization.
+    presolve : bool (default: ``True``)
+        Presolve attempts to identify trivial infeasibilities,
+        identify trivial unboundedness, and simplify the problem before
+        sending it to the main solver. It is generally recommended
+        to keep the default setting ``True``; set to ``False`` if
+        presolve is to be disabled.
+    time_limit : float
+        The maximum time in seconds allotted to solve the problem;
+        default is the largest possible value for a ``double`` on the
+        platform.
+    dual_feasibility_tolerance : double (default: 1e-07)
+        Dual feasibility tolerance for
+        :ref:`'highs-ds' `.
+    primal_feasibility_tolerance : double (default: 1e-07)
+        Primal feasibility tolerance for
+        :ref:`'highs-ds' `.
+    simplex_dual_edge_weight_strategy : str (default: None)
+        Strategy for simplex dual edge weights. The default, ``None``,
+        automatically selects one of the following.
+
+        ``'dantzig'`` uses Dantzig's original strategy of choosing the most
+        negative reduced cost.
+
+        ``'devex'`` uses the strategy described in [15]_.
+
+        ``steepest`` uses the exact steepest edge strategy as described in
+        [16]_.
+
+        ``'steepest-devex'`` begins with the exact steepest edge strategy
+        until the computation is too costly or inexact and then switches to
+        the devex method.
+
+        Curently, ``None`` always selects ``'steepest-devex'``, but this
+        may change as new options become available.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        ``unknown_options`` is non-empty, a warning is issued listing
+        all unused options.
+
+    Returns
+    -------
+    res : OptimizeResult
+        A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
+
+        x : 1D array
+            The values of the decision variables that minimizes the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        slack : 1D array
+            The (nominally positive) values of the slack,
+            ``b_ub - A_ub @ x``.
+        con : 1D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        success : bool
+            ``True`` when the algorithm succeeds in finding an optimal
+            solution.
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimization terminated successfully.
+
+            ``1`` : Iteration or time limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : The HiGHS solver ran into a problem.
+
+        message : str
+            A string descriptor of the exit status of the algorithm.
+        nit : int
+            The total number of iterations performed. This includes iterations
+            in all phases.
+        crossover_nit : int
+            This is always ``0`` for the HiGHS simplex method.
+            For the HiGHS interior-point method, this is the number of
+            primal/dual pushes performed during the crossover routine.
+        ineqlin : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            inequality constraints, `b_ub`. A dictionary consisting of the
+            fields:
+
+            residual : np.ndnarray
+                The (nominally positive) values of the slack variables,
+                ``b_ub - A_ub @ x``.  This quantity is also commonly
+                referred to as "slack".
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the right-hand side of the
+                inequality constraints, `b_ub`.
+
+        eqlin : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            equality constraints, `b_eq`.  A dictionary consisting of the
+            fields:
+
+            residual : np.ndarray
+                The (nominally zero) residuals of the equality constraints,
+                ``b_eq - A_eq @ x``.
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the right-hand side of the
+                equality constraints, `b_eq`.
+
+        lower, upper : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            lower and upper bounds on decision variables, `bounds`.
+
+            residual : np.ndarray
+                The (nominally positive) values of the quantity
+                ``x - lb`` (lower) or ``ub - x`` (upper).
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the lower and upper
+                `bounds`.
+
+    Notes
+    -----
+
+    Method :ref:`'highs-ds' ` is a wrapper
+    of the C++ high performance dual revised simplex implementation (HSOL)
+    [13]_, [14]_. Method :ref:`'highs-ipm' `
+    is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
+    **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
+    as a simplex solver. Method :ref:`'highs' ` chooses
+    between the two automatically. For new code involving `linprog`, we
+    recommend explicitly choosing one of these three method values instead of
+    :ref:`'interior-point' ` (default),
+    :ref:`'revised simplex' `, and
+    :ref:`'simplex' ` (legacy).
+
+    The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
+    `marginals`, or partial derivatives of the objective function with respect
+    to the right-hand side of each constraint. These partial derivatives are
+    also referred to as "Lagrange multipliers", "dual values", and
+    "shadow prices". The sign convention of `marginals` is opposite that
+    of Lagrange multipliers produced by many nonlinear solvers.
+
+    References
+    ----------
+    .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
+           "HiGHS - high performance software for linear optimization."
+           https://highs.dev/
+    .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
+           simplex method." Mathematical Programming Computation, 10 (1),
+           119-142, 2018. DOI: 10.1007/s12532-017-0130-5
+    .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
+            Mathematical programming 5.1 (1973): 1-28.
+    .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
+            simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
+    """
+    pass
+
+
+def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
+                           bounds=None, method='highs-ipm', callback=None,
+                           maxiter=None, disp=False, presolve=True,
+                           time_limit=None,
+                           dual_feasibility_tolerance=None,
+                           primal_feasibility_tolerance=None,
+                           ipm_optimality_tolerance=None,
+                           **unknown_options):
+    r"""
+    Linear programming: minimize a linear objective function subject to linear
+    equality and inequality constraints using the HiGHS interior point solver.
+
+    Linear programming solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
+        & A_{eq} x = b_{eq},\\
+        & l \leq x \leq u ,
+
+    where :math:`x` is a vector of decision variables; :math:`c`,
+    :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
+    :math:`A_{ub}` and :math:`A_{eq}` are matrices.
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+        lb <= x <= ub
+
+    Note that by default ``lb = 0`` and ``ub = None`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1-D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2-D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1-D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2-D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1-D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : sequence, optional
+        A sequence of ``(min, max)`` pairs for each element in ``x``, defining
+        the minimum and maximum values of that decision variable. Use ``None``
+        to indicate that there is no bound. By default, bounds are
+        ``(0, None)`` (all decision variables are non-negative).
+        If a single tuple ``(min, max)`` is provided, then ``min`` and
+        ``max`` will serve as bounds for all decision variables.
+    method : str
+
+        This is the method-specific documentation for 'highs-ipm'.
+        :ref:`'highs-ipm' `,
+        :ref:`'highs-ds' `,
+        :ref:`'interior-point' ` (default),
+        :ref:`'revised simplex' `, and
+        :ref:`'simplex' ` (legacy)
+        are also available.
+
+    Options
+    -------
+    maxiter : int
+        The maximum number of iterations to perform in either phase.
+        For :ref:`'highs-ipm' `, this does not
+        include the number of crossover iterations. Default is the largest
+        possible value for an ``int`` on the platform.
+    disp : bool (default: ``False``)
+        Set to ``True`` if indicators of optimization status are to be
+        printed to the console during optimization.
+    presolve : bool (default: ``True``)
+        Presolve attempts to identify trivial infeasibilities,
+        identify trivial unboundedness, and simplify the problem before
+        sending it to the main solver. It is generally recommended
+        to keep the default setting ``True``; set to ``False`` if
+        presolve is to be disabled.
+    time_limit : float
+        The maximum time in seconds allotted to solve the problem;
+        default is the largest possible value for a ``double`` on the
+        platform.
+    dual_feasibility_tolerance : double (default: 1e-07)
+        The minimum of this and ``primal_feasibility_tolerance``
+        is used for the feasibility tolerance of
+        :ref:`'highs-ipm' `.
+    primal_feasibility_tolerance : double (default: 1e-07)
+        The minimum of this and ``dual_feasibility_tolerance``
+        is used for the feasibility tolerance of
+        :ref:`'highs-ipm' `.
+    ipm_optimality_tolerance : double (default: ``1e-08``)
+        Optimality tolerance for
+        :ref:`'highs-ipm' `.
+        Minimum allowable value is 1e-12.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        ``unknown_options`` is non-empty, a warning is issued listing
+        all unused options.
+
+    Returns
+    -------
+    res : OptimizeResult
+        A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
+
+        x : 1D array
+            The values of the decision variables that minimizes the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        slack : 1D array
+            The (nominally positive) values of the slack,
+            ``b_ub - A_ub @ x``.
+        con : 1D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        success : bool
+            ``True`` when the algorithm succeeds in finding an optimal
+            solution.
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimization terminated successfully.
+
+            ``1`` : Iteration or time limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : The HiGHS solver ran into a problem.
+
+        message : str
+            A string descriptor of the exit status of the algorithm.
+        nit : int
+            The total number of iterations performed.
+            For the HiGHS interior-point method, this does not include
+            crossover iterations.
+        crossover_nit : int
+            The number of primal/dual pushes performed during the
+            crossover routine for the HiGHS interior-point method.
+        ineqlin : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            inequality constraints, `b_ub`. A dictionary consisting of the
+            fields:
+
+            residual : np.ndnarray
+                The (nominally positive) values of the slack variables,
+                ``b_ub - A_ub @ x``.  This quantity is also commonly
+                referred to as "slack".
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the right-hand side of the
+                inequality constraints, `b_ub`.
+
+        eqlin : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            equality constraints, `b_eq`.  A dictionary consisting of the
+            fields:
+
+            residual : np.ndarray
+                The (nominally zero) residuals of the equality constraints,
+                ``b_eq - A_eq @ x``.
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the right-hand side of the
+                equality constraints, `b_eq`.
+
+        lower, upper : OptimizeResult
+            Solution and sensitivity information corresponding to the
+            lower and upper bounds on decision variables, `bounds`.
+
+            residual : np.ndarray
+                The (nominally positive) values of the quantity
+                ``x - lb`` (lower) or ``ub - x`` (upper).
+
+            marginals : np.ndarray
+                The sensitivity (partial derivative) of the objective
+                function with respect to the lower and upper
+                `bounds`.
+
+    Notes
+    -----
+
+    Method :ref:`'highs-ipm' `
+    is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
+    **m**\ ethod [13]_; it features a crossover routine, so it is as accurate
+    as a simplex solver.
+    Method :ref:`'highs-ds' ` is a wrapper
+    of the C++ high performance dual revised simplex implementation (HSOL)
+    [13]_, [14]_. Method :ref:`'highs' ` chooses
+    between the two automatically. For new code involving `linprog`, we
+    recommend explicitly choosing one of these three method values instead of
+    :ref:`'interior-point' ` (default),
+    :ref:`'revised simplex' `, and
+    :ref:`'simplex' ` (legacy).
+
+    The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
+    `marginals`, or partial derivatives of the objective function with respect
+    to the right-hand side of each constraint. These partial derivatives are
+    also referred to as "Lagrange multipliers", "dual values", and
+    "shadow prices". The sign convention of `marginals` is opposite that
+    of Lagrange multipliers produced by many nonlinear solvers.
+
+    References
+    ----------
+    .. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
+           "HiGHS - high performance software for linear optimization."
+           https://highs.dev/
+    .. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
+           simplex method." Mathematical Programming Computation, 10 (1),
+           119-142, 2018. DOI: 10.1007/s12532-017-0130-5
+    """
+    pass
+
+
+def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
+                    bounds=None, method='interior-point', callback=None,
+                    maxiter=1000, disp=False, presolve=True,
+                    tol=1e-8, autoscale=False, rr=True,
+                    alpha0=.99995, beta=0.1, sparse=False,
+                    lstsq=False, sym_pos=True, cholesky=True, pc=True,
+                    ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options):
+    r"""
+    Linear programming: minimize a linear objective function subject to linear
+    equality and inequality constraints using the interior-point method of
+    [4]_.
+
+    .. deprecated:: 1.9.0
+        `method='interior-point'` will be removed in SciPy 1.11.0.
+        It is replaced by `method='highs'` because the latter is
+        faster and more robust.
+
+    Linear programming solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
+        & A_{eq} x = b_{eq},\\
+        & l \leq x \leq u ,
+
+    where :math:`x` is a vector of decision variables; :math:`c`,
+    :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
+    :math:`A_{ub}` and :math:`A_{eq}` are matrices.
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+        lb <= x <= ub
+
+    Note that by default ``lb = 0`` and ``ub = None`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1-D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2-D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1-D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2-D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1-D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : sequence, optional
+        A sequence of ``(min, max)`` pairs for each element in ``x``, defining
+        the minimum and maximum values of that decision variable. Use ``None``
+        to indicate that there is no bound. By default, bounds are
+        ``(0, None)`` (all decision variables are non-negative).
+        If a single tuple ``(min, max)`` is provided, then ``min`` and
+        ``max`` will serve as bounds for all decision variables.
+    method : str
+        This is the method-specific documentation for 'interior-point'.
+        :ref:`'highs' `,
+        :ref:`'highs-ds' `,
+        :ref:`'highs-ipm' `,
+        :ref:`'revised simplex' `, and
+        :ref:`'simplex' ` (legacy)
+        are also available.
+    callback : callable, optional
+        Callback function to be executed once per iteration.
+
+    Options
+    -------
+    maxiter : int (default: 1000)
+        The maximum number of iterations of the algorithm.
+    disp : bool (default: False)
+        Set to ``True`` if indicators of optimization status are to be printed
+        to the console each iteration.
+    presolve : bool (default: True)
+        Presolve attempts to identify trivial infeasibilities,
+        identify trivial unboundedness, and simplify the problem before
+        sending it to the main solver. It is generally recommended
+        to keep the default setting ``True``; set to ``False`` if
+        presolve is to be disabled.
+    tol : float (default: 1e-8)
+        Termination tolerance to be used for all termination criteria;
+        see [4]_ Section 4.5.
+    autoscale : bool (default: False)
+        Set to ``True`` to automatically perform equilibration.
+        Consider using this option if the numerical values in the
+        constraints are separated by several orders of magnitude.
+    rr : bool (default: True)
+        Set to ``False`` to disable automatic redundancy removal.
+    alpha0 : float (default: 0.99995)
+        The maximal step size for Mehrota's predictor-corrector search
+        direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
+    beta : float (default: 0.1)
+        The desired reduction of the path parameter :math:`\mu` (see [6]_)
+        when Mehrota's predictor-corrector is not in use (uncommon).
+    sparse : bool (default: False)
+        Set to ``True`` if the problem is to be treated as sparse after
+        presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
+        this option will automatically be set ``True``, and the problem
+        will be treated as sparse even during presolve. If your constraint
+        matrices contain mostly zeros and the problem is not very small (less
+        than about 100 constraints or variables), consider setting ``True``
+        or providing ``A_eq`` and ``A_ub`` as sparse matrices.
+    lstsq : bool (default: ``False``)
+        Set to ``True`` if the problem is expected to be very poorly
+        conditioned. This should always be left ``False`` unless severe
+        numerical difficulties are encountered. Leave this at the default
+        unless you receive a warning message suggesting otherwise.
+    sym_pos : bool (default: True)
+        Leave ``True`` if the problem is expected to yield a well conditioned
+        symmetric positive definite normal equation matrix
+        (almost always). Leave this at the default unless you receive
+        a warning message suggesting otherwise.
+    cholesky : bool (default: True)
+        Set to ``True`` if the normal equations are to be solved by explicit
+        Cholesky decomposition followed by explicit forward/backward
+        substitution. This is typically faster for problems
+        that are numerically well-behaved.
+    pc : bool (default: True)
+        Leave ``True`` if the predictor-corrector method of Mehrota is to be
+        used. This is almost always (if not always) beneficial.
+    ip : bool (default: False)
+        Set to ``True`` if the improved initial point suggestion due to [4]_
+        Section 4.3 is desired. Whether this is beneficial or not
+        depends on the problem.
+    permc_spec : str (default: 'MMD_AT_PLUS_A')
+        (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
+        True``, and no SuiteSparse.)
+        A matrix is factorized in each iteration of the algorithm.
+        This option specifies how to permute the columns of the matrix for
+        sparsity preservation. Acceptable values are:
+
+        - ``NATURAL``: natural ordering.
+        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
+        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
+        - ``COLAMD``: approximate minimum degree column ordering.
+
+        This option can impact the convergence of the
+        interior point algorithm; test different values to determine which
+        performs best for your problem. For more information, refer to
+        ``scipy.sparse.linalg.splu``.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        `unknown_options` is non-empty a warning is issued listing all
+        unused options.
+
+    Returns
+    -------
+    res : OptimizeResult
+        A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
+
+        x : 1-D array
+            The values of the decision variables that minimizes the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        slack : 1-D array
+            The (nominally positive) values of the slack variables,
+            ``b_ub - A_ub @ x``.
+        con : 1-D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        success : bool
+            ``True`` when the algorithm succeeds in finding an optimal
+            solution.
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimization terminated successfully.
+
+            ``1`` : Iteration limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : Numerical difficulties encountered.
+
+        message : str
+            A string descriptor of the exit status of the algorithm.
+        nit : int
+            The total number of iterations performed in all phases.
+
+
+    Notes
+    -----
+    This method implements the algorithm outlined in [4]_ with ideas from [8]_
+    and a structure inspired by the simpler methods of [6]_.
+
+    The primal-dual path following method begins with initial 'guesses' of
+    the primal and dual variables of the standard form problem and iteratively
+    attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
+    problem with a gradually reduced logarithmic barrier term added to the
+    objective. This particular implementation uses a homogeneous self-dual
+    formulation, which provides certificates of infeasibility or unboundedness
+    where applicable.
+
+    The default initial point for the primal and dual variables is that
+    defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
+    point option ``ip=True``), an alternate (potentially improved) starting
+    point can be calculated according to the additional recommendations of
+    [4]_ Section 4.4.
+
+    A search direction is calculated using the predictor-corrector method
+    (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
+    (A potential improvement would be to implement the method of multiple
+    corrections described in [4]_ Section 4.2.) In practice, this is
+    accomplished by solving the normal equations, [4]_ Section 5.1 Equations
+    8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
+    8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
+    solving the normal equations rather than 8.25 directly is that the
+    matrices involved are symmetric positive definite, so Cholesky
+    decomposition can be used rather than the more expensive LU factorization.
+
+    With default options, the solver used to perform the factorization depends
+    on third-party software availability and the conditioning of the problem.
+
+    For dense problems, solvers are tried in the following order:
+
+    1. ``scipy.linalg.cho_factor``
+
+    2. ``scipy.linalg.solve`` with option ``sym_pos=True``
+
+    3. ``scipy.linalg.solve`` with option ``sym_pos=False``
+
+    4. ``scipy.linalg.lstsq``
+
+    For sparse problems:
+
+    1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are
+       installed)
+
+    2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse
+       are installed)
+
+    3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
+
+    4. ``scipy.sparse.linalg.lsqr``
+
+    If the solver fails for any reason, successively more robust (but slower)
+    solvers are attempted in the order indicated. Attempting, failing, and
+    re-starting factorization can be time consuming, so if the problem is
+    numerically challenging, options can be set to  bypass solvers that are
+    failing. Setting ``cholesky=False`` skips to solver 2,
+    ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
+    to solver 4 for both sparse and dense problems.
+
+    Potential improvements for combatting issues associated with dense
+    columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
+    [10]_ Section 4.1-4.2; the latter also discusses the alleviation of
+    accuracy issues associated with the substitution approach to free
+    variables.
+
+    After calculating the search direction, the maximum possible step size
+    that does not activate the non-negativity constraints is calculated, and
+    the smaller of this step size and unity is applied (as in [4]_ Section
+    4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
+
+    The new point is tested according to the termination conditions of [4]_
+    Section 4.5. The same tolerance, which can be set using the ``tol`` option,
+    is used for all checks. (A potential improvement would be to expose
+    the different tolerances to be set independently.) If optimality,
+    unboundedness, or infeasibility is detected, the solve procedure
+    terminates; otherwise it repeats.
+
+    Whereas the top level ``linprog`` module expects a problem of form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+         lb <= x <= ub
+
+    where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem
+    is automatically converted to the form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    for solution. That is, the original problem contains equality, upper-bound
+    and variable constraints whereas the method specific solver requires
+    equality constraints and variable non-negativity. ``linprog`` converts the
+    original problem to standard form by converting the simple bounds to upper
+    bound constraints, introducing non-negative slack variables for inequality
+    constraints, and expressing unbounded variables as the difference between
+    two non-negative variables. The problem is converted back to the original
+    form before results are reported.
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+    .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
+           Programming based on Newton's Method." Unpublished Course Notes,
+           March 2004. Available 2/25/2017 at
+           https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
+    .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
+           programming." Mathematical Programming 71.2 (1995): 221-245.
+    .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
+           programming." Athena Scientific 1 (1997): 997.
+    .. [10] Andersen, Erling D., et al. Implementation of interior point
+            methods for large scale linear programming. HEC/Universite de
+            Geneve, 1996.
+    """
+    pass
+
+
+def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
+                    bounds=None, method='interior-point', callback=None,
+                    x0=None, maxiter=5000, disp=False, presolve=True,
+                    tol=1e-12, autoscale=False, rr=True, maxupdate=10,
+                    mast=False, pivot="mrc", **unknown_options):
+    r"""
+    Linear programming: minimize a linear objective function subject to linear
+    equality and inequality constraints using the revised simplex method.
+
+    .. deprecated:: 1.9.0
+        `method='revised simplex'` will be removed in SciPy 1.11.0.
+        It is replaced by `method='highs'` because the latter is
+        faster and more robust.
+
+    Linear programming solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
+        & A_{eq} x = b_{eq},\\
+        & l \leq x \leq u ,
+
+    where :math:`x` is a vector of decision variables; :math:`c`,
+    :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
+    :math:`A_{ub}` and :math:`A_{eq}` are matrices.
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+        lb <= x <= ub
+
+    Note that by default ``lb = 0`` and ``ub = None`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1-D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2-D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1-D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2-D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1-D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : sequence, optional
+        A sequence of ``(min, max)`` pairs for each element in ``x``, defining
+        the minimum and maximum values of that decision variable. Use ``None``
+        to indicate that there is no bound. By default, bounds are
+        ``(0, None)`` (all decision variables are non-negative).
+        If a single tuple ``(min, max)`` is provided, then ``min`` and
+        ``max`` will serve as bounds for all decision variables.
+    method : str
+        This is the method-specific documentation for 'revised simplex'.
+        :ref:`'highs' `,
+        :ref:`'highs-ds' `,
+        :ref:`'highs-ipm' `,
+        :ref:`'interior-point' ` (default),
+        and :ref:`'simplex' ` (legacy)
+        are also available.
+    callback : callable, optional
+        Callback function to be executed once per iteration.
+    x0 : 1-D array, optional
+        Guess values of the decision variables, which will be refined by
+        the optimization algorithm. This argument is currently used only by the
+        'revised simplex' method, and can only be used if `x0` represents a
+        basic feasible solution.
+
+    Options
+    -------
+    maxiter : int (default: 5000)
+       The maximum number of iterations to perform in either phase.
+    disp : bool (default: False)
+        Set to ``True`` if indicators of optimization status are to be printed
+        to the console each iteration.
+    presolve : bool (default: True)
+        Presolve attempts to identify trivial infeasibilities,
+        identify trivial unboundedness, and simplify the problem before
+        sending it to the main solver. It is generally recommended
+        to keep the default setting ``True``; set to ``False`` if
+        presolve is to be disabled.
+    tol : float (default: 1e-12)
+        The tolerance which determines when a solution is "close enough" to
+        zero in Phase 1 to be considered a basic feasible solution or close
+        enough to positive to serve as an optimal solution.
+    autoscale : bool (default: False)
+        Set to ``True`` to automatically perform equilibration.
+        Consider using this option if the numerical values in the
+        constraints are separated by several orders of magnitude.
+    rr : bool (default: True)
+        Set to ``False`` to disable automatic redundancy removal.
+    maxupdate : int (default: 10)
+        The maximum number of updates performed on the LU factorization.
+        After this many updates is reached, the basis matrix is factorized
+        from scratch.
+    mast : bool (default: False)
+        Minimize Amortized Solve Time. If enabled, the average time to solve
+        a linear system using the basis factorization is measured. Typically,
+        the average solve time will decrease with each successive solve after
+        initial factorization, as factorization takes much more time than the
+        solve operation (and updates). Eventually, however, the updated
+        factorization becomes sufficiently complex that the average solve time
+        begins to increase. When this is detected, the basis is refactorized
+        from scratch. Enable this option to maximize speed at the risk of
+        nondeterministic behavior. Ignored if ``maxupdate`` is 0.
+    pivot : "mrc" or "bland" (default: "mrc")
+        Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland").
+        Choose Bland's rule if iteration limit is reached and cycling is
+        suspected.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        `unknown_options` is non-empty a warning is issued listing all
+        unused options.
+
+    Returns
+    -------
+    res : OptimizeResult
+        A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
+
+        x : 1-D array
+            The values of the decision variables that minimizes the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        slack : 1-D array
+            The (nominally positive) values of the slack variables,
+            ``b_ub - A_ub @ x``.
+        con : 1-D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        success : bool
+            ``True`` when the algorithm succeeds in finding an optimal
+            solution.
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimization terminated successfully.
+
+            ``1`` : Iteration limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : Numerical difficulties encountered.
+
+            ``5`` : Problem has no constraints; turn presolve on.
+
+            ``6`` : Invalid guess provided.
+
+        message : str
+            A string descriptor of the exit status of the algorithm.
+        nit : int
+            The total number of iterations performed in all phases.
+
+
+    Notes
+    -----
+    Method *revised simplex* uses the revised simplex method as described in
+    [9]_, except that a factorization [11]_ of the basis matrix, rather than
+    its inverse, is efficiently maintained and used to solve the linear systems
+    at each iteration of the algorithm.
+
+    References
+    ----------
+    .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
+           programming." Athena Scientific 1 (1997): 997.
+    .. [11] Bartels, Richard H. "A stabilization of the simplex method."
+            Journal in  Numerische Mathematik 16.5 (1971): 414-434.
+    """
+    pass
+
+
+def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
+                         bounds=None, method='interior-point', callback=None,
+                         maxiter=5000, disp=False, presolve=True,
+                         tol=1e-12, autoscale=False, rr=True, bland=False,
+                         **unknown_options):
+    r"""
+    Linear programming: minimize a linear objective function subject to linear
+    equality and inequality constraints using the tableau-based simplex method.
+
+    .. deprecated:: 1.9.0
+        `method='simplex'` will be removed in SciPy 1.11.0.
+        It is replaced by `method='highs'` because the latter is
+        faster and more robust.
+
+    Linear programming solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & A_{ub} x \leq b_{ub},\\
+        & A_{eq} x = b_{eq},\\
+        & l \leq x \leq u ,
+
+    where :math:`x` is a vector of decision variables; :math:`c`,
+    :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
+    :math:`A_{ub}` and :math:`A_{eq}` are matrices.
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+        lb <= x <= ub
+
+    Note that by default ``lb = 0`` and ``ub = None`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1-D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2-D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1-D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2-D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1-D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : sequence, optional
+        A sequence of ``(min, max)`` pairs for each element in ``x``, defining
+        the minimum and maximum values of that decision variable. Use ``None``
+        to indicate that there is no bound. By default, bounds are
+        ``(0, None)`` (all decision variables are non-negative).
+        If a single tuple ``(min, max)`` is provided, then ``min`` and
+        ``max`` will serve as bounds for all decision variables.
+    method : str
+        This is the method-specific documentation for 'simplex'.
+        :ref:`'highs' `,
+        :ref:`'highs-ds' `,
+        :ref:`'highs-ipm' `,
+        :ref:`'interior-point' ` (default),
+        and :ref:`'revised simplex' `
+        are also available.
+    callback : callable, optional
+        Callback function to be executed once per iteration.
+
+    Options
+    -------
+    maxiter : int (default: 5000)
+       The maximum number of iterations to perform in either phase.
+    disp : bool (default: False)
+        Set to ``True`` if indicators of optimization status are to be printed
+        to the console each iteration.
+    presolve : bool (default: True)
+        Presolve attempts to identify trivial infeasibilities,
+        identify trivial unboundedness, and simplify the problem before
+        sending it to the main solver. It is generally recommended
+        to keep the default setting ``True``; set to ``False`` if
+        presolve is to be disabled.
+    tol : float (default: 1e-12)
+        The tolerance which determines when a solution is "close enough" to
+        zero in Phase 1 to be considered a basic feasible solution or close
+        enough to positive to serve as an optimal solution.
+    autoscale : bool (default: False)
+        Set to ``True`` to automatically perform equilibration.
+        Consider using this option if the numerical values in the
+        constraints are separated by several orders of magnitude.
+    rr : bool (default: True)
+        Set to ``False`` to disable automatic redundancy removal.
+    bland : bool
+        If True, use Bland's anti-cycling rule [3]_ to choose pivots to
+        prevent cycling. If False, choose pivots which should lead to a
+        converged solution more quickly. The latter method is subject to
+        cycling (non-convergence) in rare instances.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        `unknown_options` is non-empty a warning is issued listing all
+        unused options.
+
+    Returns
+    -------
+    res : OptimizeResult
+        A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
+
+        x : 1-D array
+            The values of the decision variables that minimizes the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        slack : 1-D array
+            The (nominally positive) values of the slack variables,
+            ``b_ub - A_ub @ x``.
+        con : 1-D array
+            The (nominally zero) residuals of the equality constraints,
+            ``b_eq - A_eq @ x``.
+        success : bool
+            ``True`` when the algorithm succeeds in finding an optimal
+            solution.
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimization terminated successfully.
+
+            ``1`` : Iteration limit reached.
+
+            ``2`` : Problem appears to be infeasible.
+
+            ``3`` : Problem appears to be unbounded.
+
+            ``4`` : Numerical difficulties encountered.
+
+        message : str
+            A string descriptor of the exit status of the algorithm.
+        nit : int
+            The total number of iterations performed in all phases.
+
+    References
+    ----------
+    .. [1] Dantzig, George B., Linear programming and extensions. Rand
+           Corporation Research Study Princeton Univ. Press, Princeton, NJ,
+           1963
+    .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
+           Mathematical Programming", McGraw-Hill, Chapter 4.
+    .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
+           Mathematics of Operations Research (2), 1977: pp. 103-107.
+    """
+    pass
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_highs.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_highs.py
new file mode 100644
index 00000000..18ef5925
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_highs.py
@@ -0,0 +1,440 @@
+"""HiGHS Linear Optimization Methods
+
+Interface to HiGHS linear optimization software.
+https://highs.dev/
+
+.. versionadded:: 1.5.0
+
+References
+----------
+.. [1] Q. Huangfu and J.A.J. Hall. "Parallelizing the dual revised simplex
+           method." Mathematical Programming Computation, 10 (1), 119-142,
+           2018. DOI: 10.1007/s12532-017-0130-5
+
+"""
+
+import inspect
+import numpy as np
+from ._optimize import _check_unknown_options, OptimizeWarning, OptimizeResult
+from warnings import warn
+from ._highs._highs_wrapper import _highs_wrapper
+from ._highs._highs_constants import (
+    CONST_I_INF,
+    CONST_INF,
+    MESSAGE_LEVEL_NONE,
+    HIGHS_OBJECTIVE_SENSE_MINIMIZE,
+
+    MODEL_STATUS_NOTSET,
+    MODEL_STATUS_LOAD_ERROR,
+    MODEL_STATUS_MODEL_ERROR,
+    MODEL_STATUS_PRESOLVE_ERROR,
+    MODEL_STATUS_SOLVE_ERROR,
+    MODEL_STATUS_POSTSOLVE_ERROR,
+    MODEL_STATUS_MODEL_EMPTY,
+    MODEL_STATUS_OPTIMAL,
+    MODEL_STATUS_INFEASIBLE,
+    MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE,
+    MODEL_STATUS_UNBOUNDED,
+    MODEL_STATUS_REACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND
+    as MODEL_STATUS_RDOVUB,
+    MODEL_STATUS_REACHED_OBJECTIVE_TARGET,
+    MODEL_STATUS_REACHED_TIME_LIMIT,
+    MODEL_STATUS_REACHED_ITERATION_LIMIT,
+
+    HIGHS_SIMPLEX_STRATEGY_CHOOSE,
+    HIGHS_SIMPLEX_STRATEGY_DUAL,
+
+    HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
+
+    HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
+    HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
+    HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
+    HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
+
+    HIGHS_VAR_TYPE_CONTINUOUS,
+)
+from scipy.sparse import csc_matrix, vstack, issparse
+
+
+def _highs_to_scipy_status_message(highs_status, highs_message):
+    """Converts HiGHS status number/message to SciPy status number/message"""
+
+    scipy_statuses_messages = {
+        None: (4, "HiGHS did not provide a status code. "),
+        MODEL_STATUS_NOTSET: (4, ""),
+        MODEL_STATUS_LOAD_ERROR: (4, ""),
+        MODEL_STATUS_MODEL_ERROR: (2, ""),
+        MODEL_STATUS_PRESOLVE_ERROR: (4, ""),
+        MODEL_STATUS_SOLVE_ERROR: (4, ""),
+        MODEL_STATUS_POSTSOLVE_ERROR: (4, ""),
+        MODEL_STATUS_MODEL_EMPTY: (4, ""),
+        MODEL_STATUS_RDOVUB: (4, ""),
+        MODEL_STATUS_REACHED_OBJECTIVE_TARGET: (4, ""),
+        MODEL_STATUS_OPTIMAL: (0, "Optimization terminated successfully. "),
+        MODEL_STATUS_REACHED_TIME_LIMIT: (1, "Time limit reached. "),
+        MODEL_STATUS_REACHED_ITERATION_LIMIT: (1, "Iteration limit reached. "),
+        MODEL_STATUS_INFEASIBLE: (2, "The problem is infeasible. "),
+        MODEL_STATUS_UNBOUNDED: (3, "The problem is unbounded. "),
+        MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE: (4, "The problem is unbounded "
+                                               "or infeasible. ")}
+    unrecognized = (4, "The HiGHS status code was not recognized. ")
+    scipy_status, scipy_message = (
+        scipy_statuses_messages.get(highs_status, unrecognized))
+    scipy_message = (f"{scipy_message}"
+                     f"(HiGHS Status {highs_status}: {highs_message})")
+    return scipy_status, scipy_message
+
+
+def _replace_inf(x):
+    # Replace `np.inf` with CONST_INF
+    infs = np.isinf(x)
+    x[infs] = np.sign(x[infs])*CONST_INF
+    return x
+
+
+def _convert_to_highs_enum(option, option_str, choices):
+    # If option is in the choices we can look it up, if not use
+    # the default value taken from function signature and warn:
+    try:
+        return choices[option.lower()]
+    except AttributeError:
+        return choices[option]
+    except KeyError:
+        sig = inspect.signature(_linprog_highs)
+        default_str = sig.parameters[option_str].default
+        warn(f"Option {option_str} is {option}, but only values in "
+             f"{set(choices.keys())} are allowed. Using default: "
+             f"{default_str}.",
+             OptimizeWarning, stacklevel=3)
+        return choices[default_str]
+
+
+def _linprog_highs(lp, solver, time_limit=None, presolve=True,
+                   disp=False, maxiter=None,
+                   dual_feasibility_tolerance=None,
+                   primal_feasibility_tolerance=None,
+                   ipm_optimality_tolerance=None,
+                   simplex_dual_edge_weight_strategy=None,
+                   mip_rel_gap=None,
+                   mip_max_nodes=None,
+                   **unknown_options):
+    r"""
+    Solve the following linear programming problem using one of the HiGHS
+    solvers:
+
+    User-facing documentation is in _linprog_doc.py.
+
+    Parameters
+    ----------
+    lp :  _LPProblem
+        A ``scipy.optimize._linprog_util._LPProblem`` ``namedtuple``.
+    solver : "ipm" or "simplex" or None
+        Which HiGHS solver to use.  If ``None``, "simplex" will be used.
+
+    Options
+    -------
+    maxiter : int
+        The maximum number of iterations to perform in either phase. For
+        ``solver='ipm'``, this does not include the number of crossover
+        iterations.  Default is the largest possible value for an ``int``
+        on the platform.
+    disp : bool
+        Set to ``True`` if indicators of optimization status are to be printed
+        to the console each iteration; default ``False``.
+    time_limit : float
+        The maximum time in seconds allotted to solve the problem; default is
+        the largest possible value for a ``double`` on the platform.
+    presolve : bool
+        Presolve attempts to identify trivial infeasibilities,
+        identify trivial unboundedness, and simplify the problem before
+        sending it to the main solver. It is generally recommended
+        to keep the default setting ``True``; set to ``False`` if presolve is
+        to be disabled.
+    dual_feasibility_tolerance : double
+        Dual feasibility tolerance.  Default is 1e-07.
+        The minimum of this and ``primal_feasibility_tolerance``
+        is used for the feasibility tolerance when ``solver='ipm'``.
+    primal_feasibility_tolerance : double
+        Primal feasibility tolerance.  Default is 1e-07.
+        The minimum of this and ``dual_feasibility_tolerance``
+        is used for the feasibility tolerance when ``solver='ipm'``.
+    ipm_optimality_tolerance : double
+        Optimality tolerance for ``solver='ipm'``.  Default is 1e-08.
+        Minimum possible value is 1e-12 and must be smaller than the largest
+        possible value for a ``double`` on the platform.
+    simplex_dual_edge_weight_strategy : str (default: None)
+        Strategy for simplex dual edge weights. The default, ``None``,
+        automatically selects one of the following.
+
+        ``'dantzig'`` uses Dantzig's original strategy of choosing the most
+        negative reduced cost.
+
+        ``'devex'`` uses the strategy described in [15]_.
+
+        ``steepest`` uses the exact steepest edge strategy as described in
+        [16]_.
+
+        ``'steepest-devex'`` begins with the exact steepest edge strategy
+        until the computation is too costly or inexact and then switches to
+        the devex method.
+
+        Curently, using ``None`` always selects ``'steepest-devex'``, but this
+        may change as new options become available.
+
+    mip_max_nodes : int
+        The maximum number of nodes allotted to solve the problem; default is
+        the largest possible value for a ``HighsInt`` on the platform.
+        Ignored if not using the MIP solver.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        ``unknown_options`` is non-empty, a warning is issued listing all
+        unused options.
+
+    Returns
+    -------
+    sol : dict
+        A dictionary consisting of the fields:
+
+            x : 1D array
+                The values of the decision variables that minimizes the
+                objective function while satisfying the constraints.
+            fun : float
+                The optimal value of the objective function ``c @ x``.
+            slack : 1D array
+                The (nominally positive) values of the slack,
+                ``b_ub - A_ub @ x``.
+            con : 1D array
+                The (nominally zero) residuals of the equality constraints,
+                ``b_eq - A_eq @ x``.
+            success : bool
+                ``True`` when the algorithm succeeds in finding an optimal
+                solution.
+            status : int
+                An integer representing the exit status of the algorithm.
+
+                ``0`` : Optimization terminated successfully.
+
+                ``1`` : Iteration or time limit reached.
+
+                ``2`` : Problem appears to be infeasible.
+
+                ``3`` : Problem appears to be unbounded.
+
+                ``4`` : The HiGHS solver ran into a problem.
+
+            message : str
+                A string descriptor of the exit status of the algorithm.
+            nit : int
+                The total number of iterations performed.
+                For ``solver='simplex'``, this includes iterations in all
+                phases. For ``solver='ipm'``, this does not include
+                crossover iterations.
+            crossover_nit : int
+                The number of primal/dual pushes performed during the
+                crossover routine for ``solver='ipm'``.  This is ``0``
+                for ``solver='simplex'``.
+            ineqlin : OptimizeResult
+                Solution and sensitivity information corresponding to the
+                inequality constraints, `b_ub`. A dictionary consisting of the
+                fields:
+
+                residual : np.ndnarray
+                    The (nominally positive) values of the slack variables,
+                    ``b_ub - A_ub @ x``.  This quantity is also commonly
+                    referred to as "slack".
+
+                marginals : np.ndarray
+                    The sensitivity (partial derivative) of the objective
+                    function with respect to the right-hand side of the
+                    inequality constraints, `b_ub`.
+
+            eqlin : OptimizeResult
+                Solution and sensitivity information corresponding to the
+                equality constraints, `b_eq`.  A dictionary consisting of the
+                fields:
+
+                residual : np.ndarray
+                    The (nominally zero) residuals of the equality constraints,
+                    ``b_eq - A_eq @ x``.
+
+                marginals : np.ndarray
+                    The sensitivity (partial derivative) of the objective
+                    function with respect to the right-hand side of the
+                    equality constraints, `b_eq`.
+
+            lower, upper : OptimizeResult
+                Solution and sensitivity information corresponding to the
+                lower and upper bounds on decision variables, `bounds`.
+
+                residual : np.ndarray
+                    The (nominally positive) values of the quantity
+                    ``x - lb`` (lower) or ``ub - x`` (upper).
+
+                marginals : np.ndarray
+                    The sensitivity (partial derivative) of the objective
+                    function with respect to the lower and upper
+                    `bounds`.
+
+            mip_node_count : int
+                The number of subproblems or "nodes" solved by the MILP
+                solver. Only present when `integrality` is not `None`.
+
+            mip_dual_bound : float
+                The MILP solver's final estimate of the lower bound on the
+                optimal solution. Only present when `integrality` is not
+                `None`.
+
+            mip_gap : float
+                The difference between the final objective function value
+                and the final dual bound, scaled by the final objective
+                function value. Only present when `integrality` is not
+                `None`.
+
+    Notes
+    -----
+    The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
+    `marginals`, or partial derivatives of the objective function with respect
+    to the right-hand side of each constraint. These partial derivatives are
+    also referred to as "Lagrange multipliers", "dual values", and
+    "shadow prices". The sign convention of `marginals` is opposite that
+    of Lagrange multipliers produced by many nonlinear solvers.
+
+    References
+    ----------
+    .. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
+            Mathematical programming 5.1 (1973): 1-28.
+    .. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
+            simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
+    """
+
+    _check_unknown_options(unknown_options)
+
+    # Map options to HiGHS enum values
+    simplex_dual_edge_weight_strategy_enum = _convert_to_highs_enum(
+        simplex_dual_edge_weight_strategy,
+        'simplex_dual_edge_weight_strategy',
+        choices={'dantzig': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DANTZIG,
+                 'devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_DEVEX,
+                 'steepest-devex': HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_CHOOSE,
+                 'steepest':
+                 HIGHS_SIMPLEX_EDGE_WEIGHT_STRATEGY_STEEPEST_EDGE,
+                 None: None})
+
+    c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
+
+    lb, ub = bounds.T.copy()  # separate bounds, copy->C-cntgs
+    # highs_wrapper solves LHS <= A*x <= RHS, not equality constraints
+    lhs_ub = -np.ones_like(b_ub)*np.inf  # LHS of UB constraints is -inf
+    rhs_ub = b_ub  # RHS of UB constraints is b_ub
+    lhs_eq = b_eq  # Equality constaint is inequality
+    rhs_eq = b_eq  # constraint with LHS=RHS
+    lhs = np.concatenate((lhs_ub, lhs_eq))
+    rhs = np.concatenate((rhs_ub, rhs_eq))
+
+    if issparse(A_ub) or issparse(A_eq):
+        A = vstack((A_ub, A_eq))
+    else:
+        A = np.vstack((A_ub, A_eq))
+    A = csc_matrix(A)
+
+    options = {
+        'presolve': presolve,
+        'sense': HIGHS_OBJECTIVE_SENSE_MINIMIZE,
+        'solver': solver,
+        'time_limit': time_limit,
+        'highs_debug_level': MESSAGE_LEVEL_NONE,
+        'dual_feasibility_tolerance': dual_feasibility_tolerance,
+        'ipm_optimality_tolerance': ipm_optimality_tolerance,
+        'log_to_console': disp,
+        'mip_max_nodes': mip_max_nodes,
+        'output_flag': disp,
+        'primal_feasibility_tolerance': primal_feasibility_tolerance,
+        'simplex_dual_edge_weight_strategy':
+            simplex_dual_edge_weight_strategy_enum,
+        'simplex_strategy': HIGHS_SIMPLEX_STRATEGY_DUAL,
+        'simplex_crash_strategy': HIGHS_SIMPLEX_CRASH_STRATEGY_OFF,
+        'ipm_iteration_limit': maxiter,
+        'simplex_iteration_limit': maxiter,
+        'mip_rel_gap': mip_rel_gap,
+    }
+
+    # np.inf doesn't work; use very large constant
+    rhs = _replace_inf(rhs)
+    lhs = _replace_inf(lhs)
+    lb = _replace_inf(lb)
+    ub = _replace_inf(ub)
+
+    if integrality is None or np.sum(integrality) == 0:
+        integrality = np.empty(0)
+    else:
+        integrality = np.array(integrality)
+
+    res = _highs_wrapper(c, A.indptr, A.indices, A.data, lhs, rhs,
+                         lb, ub, integrality.astype(np.uint8), options)
+
+    # HiGHS represents constraints as lhs/rhs, so
+    # Ax + s = b => Ax = b - s
+    # and we need to split up s by A_ub and A_eq
+    if 'slack' in res:
+        slack = res['slack']
+        con = np.array(slack[len(b_ub):])
+        slack = np.array(slack[:len(b_ub)])
+    else:
+        slack, con = None, None
+
+    # lagrange multipliers for equalities/inequalities and upper/lower bounds
+    if 'lambda' in res:
+        lamda = res['lambda']
+        marg_ineqlin = np.array(lamda[:len(b_ub)])
+        marg_eqlin = np.array(lamda[len(b_ub):])
+        marg_upper = np.array(res['marg_bnds'][1, :])
+        marg_lower = np.array(res['marg_bnds'][0, :])
+    else:
+        marg_ineqlin, marg_eqlin = None, None
+        marg_upper, marg_lower = None, None
+
+    # this needs to be updated if we start choosing the solver intelligently
+    solvers = {"ipm": "highs-ipm", "simplex": "highs-ds", None: "highs-ds"}
+
+    # Convert to scipy-style status and message
+    highs_status = res.get('status', None)
+    highs_message = res.get('message', None)
+    status, message = _highs_to_scipy_status_message(highs_status,
+                                                     highs_message)
+
+    x = np.array(res['x']) if 'x' in res else None
+    sol = {'x': x,
+           'slack': slack,
+           'con': con,
+           'ineqlin': OptimizeResult({
+               'residual': slack,
+               'marginals': marg_ineqlin,
+           }),
+           'eqlin': OptimizeResult({
+               'residual': con,
+               'marginals': marg_eqlin,
+           }),
+           'lower': OptimizeResult({
+               'residual': None if x is None else x - lb,
+               'marginals': marg_lower,
+           }),
+           'upper': OptimizeResult({
+               'residual': None if x is None else ub - x,
+               'marginals': marg_upper
+            }),
+           'fun': res.get('fun'),
+           'status': status,
+           'success': res['status'] == MODEL_STATUS_OPTIMAL,
+           'message': message,
+           'nit': res.get('simplex_nit', 0) or res.get('ipm_nit', 0),
+           'crossover_nit': res.get('crossover_nit'),
+           }
+
+    if np.any(x) and integrality is not None:
+        sol.update({
+            'mip_node_count': res.get('mip_node_count', 0),
+            'mip_dual_bound': res.get('mip_dual_bound', 0.0),
+            'mip_gap': res.get('mip_gap', 0.0),
+        })
+
+    return sol
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_ip.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_ip.py
new file mode 100644
index 00000000..37779a20
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_ip.py
@@ -0,0 +1,1128 @@
+"""Interior-point method for linear programming
+
+The *interior-point* method uses the primal-dual path following algorithm
+outlined in [1]_. This algorithm supports sparse constraint matrices and
+is typically faster than the simplex methods, especially for large, sparse
+problems. Note, however, that the solution returned may be slightly less
+accurate than those of the simplex methods and will not, in general,
+correspond with a vertex of the polytope defined by the constraints.
+
+    .. versionadded:: 1.0.0
+
+References
+----------
+.. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+       optimizer for linear programming: an implementation of the
+       homogeneous algorithm." High performance optimization. Springer US,
+       2000. 197-232.
+"""
+# Author: Matt Haberland
+
+import numpy as np
+import scipy as sp
+import scipy.sparse as sps
+from warnings import warn
+from scipy.linalg import LinAlgError
+from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options
+from ._linprog_util import _postsolve
+has_umfpack = True
+has_cholmod = True
+try:
+    import sksparse
+    from sksparse.cholmod import cholesky as cholmod
+    from sksparse.cholmod import analyze as cholmod_analyze
+except ImportError:
+    has_cholmod = False
+try:
+    import scikits.umfpack  # test whether to use factorized
+except ImportError:
+    has_umfpack = False
+
+
+def _get_solver(M, sparse=False, lstsq=False, sym_pos=True,
+                cholesky=True, permc_spec='MMD_AT_PLUS_A'):
+    """
+    Given solver options, return a handle to the appropriate linear system
+    solver.
+
+    Parameters
+    ----------
+    M : 2-D array
+        As defined in [4] Equation 8.31
+    sparse : bool (default = False)
+        True if the system to be solved is sparse. This is typically set
+        True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
+    lstsq : bool (default = False)
+        True if the system is ill-conditioned and/or (nearly) singular and
+        thus a more robust least-squares solver is desired. This is sometimes
+        needed as the solution is approached.
+    sym_pos : bool (default = True)
+        True if the system matrix is symmetric positive definite
+        Sometimes this needs to be set false as the solution is approached,
+        even when the system should be symmetric positive definite, due to
+        numerical difficulties.
+    cholesky : bool (default = True)
+        True if the system is to be solved by Cholesky, rather than LU,
+        decomposition. This is typically faster unless the problem is very
+        small or prone to numerical difficulties.
+    permc_spec : str (default = 'MMD_AT_PLUS_A')
+        Sparsity preservation strategy used by SuperLU. Acceptable values are:
+
+        - ``NATURAL``: natural ordering.
+        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
+        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
+        - ``COLAMD``: approximate minimum degree column ordering.
+
+        See SuperLU documentation.
+
+    Returns
+    -------
+    solve : function
+        Handle to the appropriate solver function
+
+    """
+    try:
+        if sparse:
+            if lstsq:
+                def solve(r, sym_pos=False):
+                    return sps.linalg.lsqr(M, r)[0]
+            elif cholesky:
+                try:
+                    # Will raise an exception in the first call,
+                    # or when the matrix changes due to a new problem
+                    _get_solver.cholmod_factor.cholesky_inplace(M)
+                except Exception:
+                    _get_solver.cholmod_factor = cholmod_analyze(M)
+                    _get_solver.cholmod_factor.cholesky_inplace(M)
+                solve = _get_solver.cholmod_factor
+            else:
+                if has_umfpack and sym_pos:
+                    solve = sps.linalg.factorized(M)
+                else:  # factorized doesn't pass permc_spec
+                    solve = sps.linalg.splu(M, permc_spec=permc_spec).solve
+
+        else:
+            if lstsq:  # sometimes necessary as solution is approached
+                def solve(r):
+                    return sp.linalg.lstsq(M, r)[0]
+            elif cholesky:
+                L = sp.linalg.cho_factor(M)
+
+                def solve(r):
+                    return sp.linalg.cho_solve(L, r)
+            else:
+                # this seems to cache the matrix factorization, so solving
+                # with multiple right hand sides is much faster
+                def solve(r, sym_pos=sym_pos):
+                    if sym_pos:
+                        return sp.linalg.solve(M, r, assume_a="pos")
+                    else:
+                        return sp.linalg.solve(M, r)
+    # There are many things that can go wrong here, and it's hard to say
+    # what all of them are. It doesn't really matter: if the matrix can't be
+    # factorized, return None. get_solver will be called again with different
+    # inputs, and a new routine will try to factorize the matrix.
+    except KeyboardInterrupt:
+        raise
+    except Exception:
+        return None
+    return solve
+
+
+def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False,
+               lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False,
+               permc_spec='MMD_AT_PLUS_A'):
+    """
+    Given standard form problem defined by ``A``, ``b``, and ``c``;
+    current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``;
+    algorithmic parameters ``gamma and ``eta;
+    and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc``
+    (predictor-corrector), and ``ip`` (initial point improvement),
+    get the search direction for increments to the variable estimates.
+
+    Parameters
+    ----------
+    As defined in [4], except:
+    sparse : bool
+        True if the system to be solved is sparse. This is typically set
+        True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
+    lstsq : bool
+        True if the system is ill-conditioned and/or (nearly) singular and
+        thus a more robust least-squares solver is desired. This is sometimes
+        needed as the solution is approached.
+    sym_pos : bool
+        True if the system matrix is symmetric positive definite
+        Sometimes this needs to be set false as the solution is approached,
+        even when the system should be symmetric positive definite, due to
+        numerical difficulties.
+    cholesky : bool
+        True if the system is to be solved by Cholesky, rather than LU,
+        decomposition. This is typically faster unless the problem is very
+        small or prone to numerical difficulties.
+    pc : bool
+        True if the predictor-corrector method of Mehrota is to be used. This
+        is almost always (if not always) beneficial. Even though it requires
+        the solution of an additional linear system, the factorization
+        is typically (implicitly) reused so solution is efficient, and the
+        number of algorithm iterations is typically reduced.
+    ip : bool
+        True if the improved initial point suggestion due to [4] section 4.3
+        is desired. It's unclear whether this is beneficial.
+    permc_spec : str (default = 'MMD_AT_PLUS_A')
+        (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
+        True``.) A matrix is factorized in each iteration of the algorithm.
+        This option specifies how to permute the columns of the matrix for
+        sparsity preservation. Acceptable values are:
+
+        - ``NATURAL``: natural ordering.
+        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
+        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
+        - ``COLAMD``: approximate minimum degree column ordering.
+
+        This option can impact the convergence of the
+        interior point algorithm; test different values to determine which
+        performs best for your problem. For more information, refer to
+        ``scipy.sparse.linalg.splu``.
+
+    Returns
+    -------
+    Search directions as defined in [4]
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+
+    """
+    if A.shape[0] == 0:
+        # If there are no constraints, some solvers fail (understandably)
+        # rather than returning empty solution. This gets the job done.
+        sparse, lstsq, sym_pos, cholesky = False, False, True, False
+    n_x = len(x)
+
+    # [4] Equation 8.8
+    r_P = b * tau - A.dot(x)
+    r_D = c * tau - A.T.dot(y) - z
+    r_G = c.dot(x) - b.transpose().dot(y) + kappa
+    mu = (x.dot(z) + tau * kappa) / (n_x + 1)
+
+    #  Assemble M from [4] Equation 8.31
+    Dinv = x / z
+
+    if sparse:
+        M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T))
+    else:
+        M = A.dot(Dinv.reshape(-1, 1) * A.T)
+    solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec)
+
+    # pc: "predictor-corrector" [4] Section 4.1
+    # In development this option could be turned off
+    # but it always seems to improve performance substantially
+    n_corrections = 1 if pc else 0
+
+    i = 0
+    alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0
+    while i <= n_corrections:
+        # Reference [4] Eq. 8.6
+        rhatp = eta(gamma) * r_P
+        rhatd = eta(gamma) * r_D
+        rhatg = eta(gamma) * r_G
+
+        # Reference [4] Eq. 8.7
+        rhatxs = gamma * mu - x * z
+        rhattk = gamma * mu - tau * kappa
+
+        if i == 1:
+            if ip:  # if the correction is to get "initial point"
+                # Reference [4] Eq. 8.23
+                rhatxs = ((1 - alpha) * gamma * mu -
+                          x * z - alpha**2 * d_x * d_z)
+                rhattk = ((1 - alpha) * gamma * mu -
+                    tau * kappa -
+                    alpha**2 * d_tau * d_kappa)
+            else:  # if the correction is for "predictor-corrector"
+                # Reference [4] Eq. 8.13
+                rhatxs -= d_x * d_z
+                rhattk -= d_tau * d_kappa
+
+        # sometimes numerical difficulties arise as the solution is approached
+        # this loop tries to solve the equations using a sequence of functions
+        # for solve. For dense systems, the order is:
+        # 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve,
+        # 2. scipy.linalg.solve w/ sym_pos = True,
+        # 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails
+        # 4. scipy.linalg.lstsq
+        # For sparse systems, the order is:
+        # 1. sksparse.cholmod.cholesky (if available)
+        # 2. scipy.sparse.linalg.factorized (if umfpack available)
+        # 3. scipy.sparse.linalg.splu
+        # 4. scipy.sparse.linalg.lsqr
+        solved = False
+        while not solved:
+            try:
+                # [4] Equation 8.28
+                p, q = _sym_solve(Dinv, A, c, b, solve)
+                # [4] Equation 8.29
+                u, v = _sym_solve(Dinv, A, rhatd -
+                                  (1 / x) * rhatxs, rhatp, solve)
+                if np.any(np.isnan(p)) or np.any(np.isnan(q)):
+                    raise LinAlgError
+                solved = True
+            except (LinAlgError, ValueError, TypeError) as e:
+                # Usually this doesn't happen. If it does, it happens when
+                # there are redundant constraints or when approaching the
+                # solution. If so, change solver.
+                if cholesky:
+                    cholesky = False
+                    warn(
+                        "Solving system with option 'cholesky':True "
+                        "failed. It is normal for this to happen "
+                        "occasionally, especially as the solution is "
+                        "approached. However, if you see this frequently, "
+                        "consider setting option 'cholesky' to False.",
+                        OptimizeWarning, stacklevel=5)
+                elif sym_pos:
+                    sym_pos = False
+                    warn(
+                        "Solving system with option 'sym_pos':True "
+                        "failed. It is normal for this to happen "
+                        "occasionally, especially as the solution is "
+                        "approached. However, if you see this frequently, "
+                        "consider setting option 'sym_pos' to False.",
+                        OptimizeWarning, stacklevel=5)
+                elif not lstsq:
+                    lstsq = True
+                    warn(
+                        "Solving system with option 'sym_pos':False "
+                        "failed. This may happen occasionally, "
+                        "especially as the solution is "
+                        "approached. However, if you see this frequently, "
+                        "your problem may be numerically challenging. "
+                        "If you cannot improve the formulation, consider "
+                        "setting 'lstsq' to True. Consider also setting "
+                        "`presolve` to True, if it is not already.",
+                        OptimizeWarning, stacklevel=5)
+                else:
+                    raise e
+                solve = _get_solver(M, sparse, lstsq, sym_pos,
+                                    cholesky, permc_spec)
+        # [4] Results after 8.29
+        d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) /
+                 (1 / tau * kappa + (-c.dot(p) + b.dot(q))))
+        d_x = u + p * d_tau
+        d_y = v + q * d_tau
+
+        # [4] Relations between  after 8.25 and 8.26
+        d_z = (1 / x) * (rhatxs - z * d_x)
+        d_kappa = 1 / tau * (rhattk - kappa * d_tau)
+
+        # [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23
+        alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1)
+        if ip:  # initial point - see [4] 4.4
+            gamma = 10
+        else:  # predictor-corrector, [4] definition after 8.12
+            beta1 = 0.1  # [4] pg. 220 (Table 8.1)
+            gamma = (1 - alpha)**2 * min(beta1, (1 - alpha))
+        i += 1
+
+    return d_x, d_y, d_z, d_tau, d_kappa
+
+
+def _sym_solve(Dinv, A, r1, r2, solve):
+    """
+    An implementation of [4] equation 8.31 and 8.32
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+
+    """
+    # [4] 8.31
+    r = r2 + A.dot(Dinv * r1)
+    v = solve(r)
+    # [4] 8.32
+    u = Dinv * (A.T.dot(v) - r1)
+    return u, v
+
+
+def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0):
+    """
+    An implementation of [4] equation 8.21
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+
+    """
+    # [4] 4.3 Equation 8.21, ignoring 8.20 requirement
+    # same step is taken in primal and dual spaces
+    # alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3
+    # the value 1 is used in Mehrota corrector and initial point correction
+    i_x = d_x < 0
+    i_z = d_z < 0
+    alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1
+    alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1
+    alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1
+    alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1
+    alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa])
+    return alpha
+
+
+def _get_message(status):
+    """
+    Given problem status code, return a more detailed message.
+
+    Parameters
+    ----------
+    status : int
+        An integer representing the exit status of the optimization::
+
+         0 : Optimization terminated successfully
+         1 : Iteration limit reached
+         2 : Problem appears to be infeasible
+         3 : Problem appears to be unbounded
+         4 : Serious numerical difficulties encountered
+
+    Returns
+    -------
+    message : str
+        A string descriptor of the exit status of the optimization.
+
+    """
+    messages = (
+        ["Optimization terminated successfully.",
+         "The iteration limit was reached before the algorithm converged.",
+         "The algorithm terminated successfully and determined that the "
+         "problem is infeasible.",
+         "The algorithm terminated successfully and determined that the "
+         "problem is unbounded.",
+         "Numerical difficulties were encountered before the problem "
+         "converged. Please check your problem formulation for errors, "
+         "independence of linear equality constraints, and reasonable "
+         "scaling and matrix condition numbers. If you continue to "
+         "encounter this error, please submit a bug report."
+         ])
+    return messages[status]
+
+
+def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha):
+    """
+    An implementation of [4] Equation 8.9
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+
+    """
+    x = x + alpha * d_x
+    tau = tau + alpha * d_tau
+    z = z + alpha * d_z
+    kappa = kappa + alpha * d_kappa
+    y = y + alpha * d_y
+    return x, y, z, tau, kappa
+
+
+def _get_blind_start(shape):
+    """
+    Return the starting point from [4] 4.4
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+
+    """
+    m, n = shape
+    x0 = np.ones(n)
+    y0 = np.zeros(m)
+    z0 = np.ones(n)
+    tau0 = 1
+    kappa0 = 1
+    return x0, y0, z0, tau0, kappa0
+
+
+def _indicators(A, b, c, c0, x, y, z, tau, kappa):
+    """
+    Implementation of several equations from [4] used as indicators of
+    the status of optimization.
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+
+    """
+
+    # residuals for termination are relative to initial values
+    x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape)
+
+    # See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8
+    def r_p(x, tau):
+        return b * tau - A.dot(x)
+
+    def r_d(y, z, tau):
+        return c * tau - A.T.dot(y) - z
+
+    def r_g(x, y, kappa):
+        return kappa + c.dot(x) - b.dot(y)
+
+    # np.dot unpacks if they are arrays of size one
+    def mu(x, tau, z, kappa):
+        return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1)
+
+    obj = c.dot(x / tau) + c0
+
+    def norm(a):
+        return np.linalg.norm(a)
+
+    # See [4], Section 4.5 - The Stopping Criteria
+    r_p0 = r_p(x0, tau0)
+    r_d0 = r_d(y0, z0, tau0)
+    r_g0 = r_g(x0, y0, kappa0)
+    mu_0 = mu(x0, tau0, z0, kappa0)
+    rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y)))
+    rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0))
+    rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0))
+    rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0))
+    rho_mu = mu(x, tau, z, kappa) / mu_0
+    return rho_p, rho_d, rho_A, rho_g, rho_mu, obj
+
+
+def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False):
+    """
+    Print indicators of optimization status to the console.
+
+    Parameters
+    ----------
+    rho_p : float
+        The (normalized) primal feasibility, see [4] 4.5
+    rho_d : float
+        The (normalized) dual feasibility, see [4] 4.5
+    rho_g : float
+        The (normalized) duality gap, see [4] 4.5
+    alpha : float
+        The step size, see [4] 4.3
+    rho_mu : float
+        The (normalized) path parameter, see [4] 4.5
+    obj : float
+        The objective function value of the current iterate
+    header : bool
+        True if a header is to be printed
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+
+    """
+    if header:
+        print("Primal Feasibility ",
+              "Dual Feasibility   ",
+              "Duality Gap        ",
+              "Step            ",
+              "Path Parameter     ",
+              "Objective          ")
+
+    # no clue why this works
+    fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}'
+    print(fmt.format(
+        float(rho_p),
+        float(rho_d),
+        float(rho_g),
+        alpha if isinstance(alpha, str) else float(alpha),
+        float(rho_mu),
+        float(obj)))
+
+
+def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq,
+            sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args):
+    r"""
+    Solve a linear programming problem in standard form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    using the interior point method of [4].
+
+    Parameters
+    ----------
+    A : 2-D array
+        2-D array such that ``A @ x``, gives the values of the equality
+        constraints at ``x``.
+    b : 1-D array
+        1-D array of values representing the RHS of each equality constraint
+        (row) in ``A`` (for standard form problem).
+    c : 1-D array
+        Coefficients of the linear objective function to be minimized (for
+        standard form problem).
+    c0 : float
+        Constant term in objective function due to fixed (and eliminated)
+        variables. (Purely for display.)
+    alpha0 : float
+        The maximal step size for Mehrota's predictor-corrector search
+        direction; see :math:`\beta_3`of [4] Table 8.1
+    beta : float
+        The desired reduction of the path parameter :math:`\mu` (see  [6]_)
+    maxiter : int
+        The maximum number of iterations of the algorithm.
+    disp : bool
+        Set to ``True`` if indicators of optimization status are to be printed
+        to the console each iteration.
+    tol : float
+        Termination tolerance; see [4]_ Section 4.5.
+    sparse : bool
+        Set to ``True`` if the problem is to be treated as sparse. However,
+        the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as
+        (dense) arrays rather than sparse matrices.
+    lstsq : bool
+        Set to ``True`` if the problem is expected to be very poorly
+        conditioned. This should always be left as ``False`` unless severe
+        numerical difficulties are frequently encountered, and a better option
+        would be to improve the formulation of the problem.
+    sym_pos : bool
+        Leave ``True`` if the problem is expected to yield a well conditioned
+        symmetric positive definite normal equation matrix (almost always).
+    cholesky : bool
+        Set to ``True`` if the normal equations are to be solved by explicit
+        Cholesky decomposition followed by explicit forward/backward
+        substitution. This is typically faster for moderate, dense problems
+        that are numerically well-behaved.
+    pc : bool
+        Leave ``True`` if the predictor-corrector method of Mehrota is to be
+        used. This is almost always (if not always) beneficial.
+    ip : bool
+        Set to ``True`` if the improved initial point suggestion due to [4]_
+        Section 4.3 is desired. It's unclear whether this is beneficial.
+    permc_spec : str (default = 'MMD_AT_PLUS_A')
+        (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
+        True``.) A matrix is factorized in each iteration of the algorithm.
+        This option specifies how to permute the columns of the matrix for
+        sparsity preservation. Acceptable values are:
+
+        - ``NATURAL``: natural ordering.
+        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
+        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
+        - ``COLAMD``: approximate minimum degree column ordering.
+
+        This option can impact the convergence of the
+        interior point algorithm; test different values to determine which
+        performs best for your problem. For more information, refer to
+        ``scipy.sparse.linalg.splu``.
+    callback : callable, optional
+        If a callback function is provided, it will be called within each
+        iteration of the algorithm. The callback function must accept a single
+        `scipy.optimize.OptimizeResult` consisting of the following fields:
+
+            x : 1-D array
+                Current solution vector
+            fun : float
+                Current value of the objective function
+            success : bool
+                True only when an algorithm has completed successfully,
+                so this is always False as the callback function is called
+                only while the algorithm is still iterating.
+            slack : 1-D array
+                The values of the slack variables. Each slack variable
+                corresponds to an inequality constraint. If the slack is zero,
+                the corresponding constraint is active.
+            con : 1-D array
+                The (nominally zero) residuals of the equality constraints,
+                that is, ``b - A_eq @ x``
+            phase : int
+                The phase of the algorithm being executed. This is always
+                1 for the interior-point method because it has only one phase.
+            status : int
+                For revised simplex, this is always 0 because if a different
+                status is detected, the algorithm terminates.
+            nit : int
+                The number of iterations performed.
+            message : str
+                A string descriptor of the exit status of the optimization.
+    postsolve_args : tuple
+        Data needed by _postsolve to convert the solution to the standard-form
+        problem into the solution to the original problem.
+
+    Returns
+    -------
+    x_hat : float
+        Solution vector (for standard form problem).
+    status : int
+        An integer representing the exit status of the optimization::
+
+         0 : Optimization terminated successfully
+         1 : Iteration limit reached
+         2 : Problem appears to be infeasible
+         3 : Problem appears to be unbounded
+         4 : Serious numerical difficulties encountered
+
+    message : str
+        A string descriptor of the exit status of the optimization.
+    iteration : int
+        The number of iterations taken to solve the problem
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+    .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
+           Programming based on Newton's Method." Unpublished Course Notes,
+           March 2004. Available 2/25/2017 at:
+           https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
+
+    """
+
+    iteration = 0
+
+    # default initial point
+    x, y, z, tau, kappa = _get_blind_start(A.shape)
+
+    # first iteration is special improvement of initial point
+    ip = ip if pc else False
+
+    # [4] 4.5
+    rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
+        A, b, c, c0, x, y, z, tau, kappa)
+    go = rho_p > tol or rho_d > tol or rho_A > tol  # we might get lucky : )
+
+    if disp:
+        _display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True)
+    if callback is not None:
+        x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
+        res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
+                              'con': con, 'nit': iteration, 'phase': 1,
+                              'complete': False, 'status': 0,
+                              'message': "", 'success': False})
+        callback(res)
+
+    status = 0
+    message = "Optimization terminated successfully."
+
+    if sparse:
+        A = sps.csc_matrix(A)
+        A.T = A.transpose()  # A.T is defined for sparse matrices but is slow
+        # Redefine it to avoid calculating again
+        # This is fine as long as A doesn't change
+
+    while go:
+
+        iteration += 1
+
+        if ip:  # initial point
+            # [4] Section 4.4
+            gamma = 1
+
+            def eta(g):
+                return 1
+        else:
+            # gamma = 0 in predictor step according to [4] 4.1
+            # if predictor/corrector is off, use mean of complementarity [6]
+            # 5.1 / [4] Below Figure 10-4
+            gamma = 0 if pc else beta * np.mean(z * x)
+            # [4] Section 4.1
+
+            def eta(g=gamma):
+                return 1 - g
+
+        try:
+            # Solve [4] 8.6 and 8.7/8.13/8.23
+            d_x, d_y, d_z, d_tau, d_kappa = _get_delta(
+                A, b, c, x, y, z, tau, kappa, gamma, eta,
+                sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec)
+
+            if ip:  # initial point
+                # [4] 4.4
+                # Formula after 8.23 takes a full step regardless if this will
+                # take it negative
+                alpha = 1.0
+                x, y, z, tau, kappa = _do_step(
+                    x, y, z, tau, kappa, d_x, d_y,
+                    d_z, d_tau, d_kappa, alpha)
+                x[x < 1] = 1
+                z[z < 1] = 1
+                tau = max(1, tau)
+                kappa = max(1, kappa)
+                ip = False  # done with initial point
+            else:
+                # [4] Section 4.3
+                alpha = _get_step(x, d_x, z, d_z, tau,
+                                  d_tau, kappa, d_kappa, alpha0)
+                # [4] Equation 8.9
+                x, y, z, tau, kappa = _do_step(
+                    x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha)
+
+        except (LinAlgError, FloatingPointError,
+                ValueError, ZeroDivisionError):
+            # this can happen when sparse solver is used and presolve
+            # is turned off. Also observed ValueError in AppVeyor Python 3.6
+            # Win32 build (PR #8676). I've never seen it otherwise.
+            status = 4
+            message = _get_message(status)
+            break
+
+        # [4] 4.5
+        rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
+            A, b, c, c0, x, y, z, tau, kappa)
+        go = rho_p > tol or rho_d > tol or rho_A > tol
+
+        if disp:
+            _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj)
+        if callback is not None:
+            x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
+            res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
+                                  'con': con, 'nit': iteration, 'phase': 1,
+                                  'complete': False, 'status': 0,
+                                  'message': "", 'success': False})
+            callback(res)
+
+        # [4] 4.5
+        inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol *
+                max(1, kappa))
+        inf2 = rho_mu < tol and tau < tol * min(1, kappa)
+        if inf1 or inf2:
+            # [4] Lemma 8.4 / Theorem 8.3
+            if b.transpose().dot(y) > tol:
+                status = 2
+            else:  # elif c.T.dot(x) < tol: ? Probably not necessary.
+                status = 3
+            message = _get_message(status)
+            break
+        elif iteration >= maxiter:
+            status = 1
+            message = _get_message(status)
+            break
+
+    x_hat = x / tau
+    # [4] Statement after Theorem 8.2
+    return x_hat, status, message, iteration
+
+
+def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8,
+                disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False,
+                sym_pos=True, cholesky=None, pc=True, ip=False,
+                permc_spec='MMD_AT_PLUS_A', **unknown_options):
+    r"""
+    Minimize a linear objective function subject to linear
+    equality and non-negativity constraints using the interior point method
+    of [4]_. Linear programming is intended to solve problems
+    of the following form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    User-facing documentation is in _linprog_doc.py.
+
+    Parameters
+    ----------
+    c : 1-D array
+        Coefficients of the linear objective function to be minimized.
+    c0 : float
+        Constant term in objective function due to fixed (and eliminated)
+        variables. (Purely for display.)
+    A : 2-D array
+        2-D array such that ``A @ x``, gives the values of the equality
+        constraints at ``x``.
+    b : 1-D array
+        1-D array of values representing the right hand side of each equality
+        constraint (row) in ``A``.
+    callback : callable, optional
+        Callback function to be executed once per iteration.
+    postsolve_args : tuple
+        Data needed by _postsolve to convert the solution to the standard-form
+        problem into the solution to the original problem.
+
+    Options
+    -------
+    maxiter : int (default = 1000)
+        The maximum number of iterations of the algorithm.
+    tol : float (default = 1e-8)
+        Termination tolerance to be used for all termination criteria;
+        see [4]_ Section 4.5.
+    disp : bool (default = False)
+        Set to ``True`` if indicators of optimization status are to be printed
+        to the console each iteration.
+    alpha0 : float (default = 0.99995)
+        The maximal step size for Mehrota's predictor-corrector search
+        direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
+    beta : float (default = 0.1)
+        The desired reduction of the path parameter :math:`\mu` (see [6]_)
+        when Mehrota's predictor-corrector is not in use (uncommon).
+    sparse : bool (default = False)
+        Set to ``True`` if the problem is to be treated as sparse after
+        presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
+        this option will automatically be set ``True``, and the problem
+        will be treated as sparse even during presolve. If your constraint
+        matrices contain mostly zeros and the problem is not very small (less
+        than about 100 constraints or variables), consider setting ``True``
+        or providing ``A_eq`` and ``A_ub`` as sparse matrices.
+    lstsq : bool (default = False)
+        Set to ``True`` if the problem is expected to be very poorly
+        conditioned. This should always be left ``False`` unless severe
+        numerical difficulties are encountered. Leave this at the default
+        unless you receive a warning message suggesting otherwise.
+    sym_pos : bool (default = True)
+        Leave ``True`` if the problem is expected to yield a well conditioned
+        symmetric positive definite normal equation matrix
+        (almost always). Leave this at the default unless you receive
+        a warning message suggesting otherwise.
+    cholesky : bool (default = True)
+        Set to ``True`` if the normal equations are to be solved by explicit
+        Cholesky decomposition followed by explicit forward/backward
+        substitution. This is typically faster for problems
+        that are numerically well-behaved.
+    pc : bool (default = True)
+        Leave ``True`` if the predictor-corrector method of Mehrota is to be
+        used. This is almost always (if not always) beneficial.
+    ip : bool (default = False)
+        Set to ``True`` if the improved initial point suggestion due to [4]_
+        Section 4.3 is desired. Whether this is beneficial or not
+        depends on the problem.
+    permc_spec : str (default = 'MMD_AT_PLUS_A')
+        (Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
+        True``, and no SuiteSparse.)
+        A matrix is factorized in each iteration of the algorithm.
+        This option specifies how to permute the columns of the matrix for
+        sparsity preservation. Acceptable values are:
+
+        - ``NATURAL``: natural ordering.
+        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
+        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
+        - ``COLAMD``: approximate minimum degree column ordering.
+
+        This option can impact the convergence of the
+        interior point algorithm; test different values to determine which
+        performs best for your problem. For more information, refer to
+        ``scipy.sparse.linalg.splu``.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        `unknown_options` is non-empty a warning is issued listing all
+        unused options.
+
+    Returns
+    -------
+    x : 1-D array
+        Solution vector.
+    status : int
+        An integer representing the exit status of the optimization::
+
+         0 : Optimization terminated successfully
+         1 : Iteration limit reached
+         2 : Problem appears to be infeasible
+         3 : Problem appears to be unbounded
+         4 : Serious numerical difficulties encountered
+
+    message : str
+        A string descriptor of the exit status of the optimization.
+    iteration : int
+        The number of iterations taken to solve the problem.
+
+    Notes
+    -----
+    This method implements the algorithm outlined in [4]_ with ideas from [8]_
+    and a structure inspired by the simpler methods of [6]_.
+
+    The primal-dual path following method begins with initial 'guesses' of
+    the primal and dual variables of the standard form problem and iteratively
+    attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
+    problem with a gradually reduced logarithmic barrier term added to the
+    objective. This particular implementation uses a homogeneous self-dual
+    formulation, which provides certificates of infeasibility or unboundedness
+    where applicable.
+
+    The default initial point for the primal and dual variables is that
+    defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
+    point option ``ip=True``), an alternate (potentially improved) starting
+    point can be calculated according to the additional recommendations of
+    [4]_ Section 4.4.
+
+    A search direction is calculated using the predictor-corrector method
+    (single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
+    (A potential improvement would be to implement the method of multiple
+    corrections described in [4]_ Section 4.2.) In practice, this is
+    accomplished by solving the normal equations, [4]_ Section 5.1 Equations
+    8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
+    8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
+    solving the normal equations rather than 8.25 directly is that the
+    matrices involved are symmetric positive definite, so Cholesky
+    decomposition can be used rather than the more expensive LU factorization.
+
+    With default options, the solver used to perform the factorization depends
+    on third-party software availability and the conditioning of the problem.
+
+    For dense problems, solvers are tried in the following order:
+
+    1. ``scipy.linalg.cho_factor``
+
+    2. ``scipy.linalg.solve`` with option ``sym_pos=True``
+
+    3. ``scipy.linalg.solve`` with option ``sym_pos=False``
+
+    4. ``scipy.linalg.lstsq``
+
+    For sparse problems:
+
+    1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed)
+
+    2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse are installed)
+
+    3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
+
+    4. ``scipy.sparse.linalg.lsqr``
+
+    If the solver fails for any reason, successively more robust (but slower)
+    solvers are attempted in the order indicated. Attempting, failing, and
+    re-starting factorization can be time consuming, so if the problem is
+    numerically challenging, options can be set to  bypass solvers that are
+    failing. Setting ``cholesky=False`` skips to solver 2,
+    ``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
+    to solver 4 for both sparse and dense problems.
+
+    Potential improvements for combatting issues associated with dense
+    columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
+    [10]_ Section 4.1-4.2; the latter also discusses the alleviation of
+    accuracy issues associated with the substitution approach to free
+    variables.
+
+    After calculating the search direction, the maximum possible step size
+    that does not activate the non-negativity constraints is calculated, and
+    the smaller of this step size and unity is applied (as in [4]_ Section
+    4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
+
+    The new point is tested according to the termination conditions of [4]_
+    Section 4.5. The same tolerance, which can be set using the ``tol`` option,
+    is used for all checks. (A potential improvement would be to expose
+    the different tolerances to be set independently.) If optimality,
+    unboundedness, or infeasibility is detected, the solve procedure
+    terminates; otherwise it repeats.
+
+    The expected problem formulation differs between the top level ``linprog``
+    module and the method specific solvers. The method specific solvers expect a
+    problem in standard form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    Whereas the top level ``linprog`` module expects a problem of form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+         lb <= x <= ub
+
+    where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
+
+    The original problem contains equality, upper-bound and variable constraints
+    whereas the method specific solver requires equality constraints and
+    variable non-negativity.
+
+    ``linprog`` module converts the original problem to standard form by
+    converting the simple bounds to upper bound constraints, introducing
+    non-negative slack variables for inequality constraints, and expressing
+    unbounded variables as the difference between two non-negative variables.
+
+
+    References
+    ----------
+    .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
+           optimizer for linear programming: an implementation of the
+           homogeneous algorithm." High performance optimization. Springer US,
+           2000. 197-232.
+    .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
+           Programming based on Newton's Method." Unpublished Course Notes,
+           March 2004. Available 2/25/2017 at
+           https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
+    .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
+           programming." Mathematical Programming 71.2 (1995): 221-245.
+    .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
+           programming." Athena Scientific 1 (1997): 997.
+    .. [10] Andersen, Erling D., et al. Implementation of interior point methods
+            for large scale linear programming. HEC/Universite de Geneve, 1996.
+
+    """
+
+    _check_unknown_options(unknown_options)
+
+    # These should be warnings, not errors
+    if (cholesky or cholesky is None) and sparse and not has_cholmod:
+        if cholesky:
+            warn("Sparse cholesky is only available with scikit-sparse. "
+                 "Setting `cholesky = False`",
+                 OptimizeWarning, stacklevel=3)
+        cholesky = False
+
+    if sparse and lstsq:
+        warn("Option combination 'sparse':True and 'lstsq':True "
+             "is not recommended.",
+             OptimizeWarning, stacklevel=3)
+
+    if lstsq and cholesky:
+        warn("Invalid option combination 'lstsq':True "
+             "and 'cholesky':True; option 'cholesky' has no effect when "
+             "'lstsq' is set True.",
+             OptimizeWarning, stacklevel=3)
+
+    valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD')
+    if permc_spec.upper() not in valid_permc_spec:
+        warn("Invalid permc_spec option: '" + str(permc_spec) + "'. "
+             "Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', "
+             "and 'COLAMD'. Reverting to default.",
+             OptimizeWarning, stacklevel=3)
+        permc_spec = 'MMD_AT_PLUS_A'
+
+    # This can be an error
+    if not sym_pos and cholesky:
+        raise ValueError(
+            "Invalid option combination 'sym_pos':False "
+            "and 'cholesky':True: Cholesky decomposition is only possible "
+            "for symmetric positive definite matrices.")
+
+    cholesky = cholesky or (cholesky is None and sym_pos and not lstsq)
+
+    x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta,
+                                            maxiter, disp, tol, sparse,
+                                            lstsq, sym_pos, cholesky,
+                                            pc, ip, permc_spec, callback,
+                                            postsolve_args)
+
+    return x, status, message, iteration
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_rs.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_rs.py
new file mode 100644
index 00000000..338707d8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_rs.py
@@ -0,0 +1,572 @@
+"""Revised simplex method for linear programming
+
+The *revised simplex* method uses the method described in [1]_, except
+that a factorization [2]_ of the basis matrix, rather than its inverse,
+is efficiently maintained and used to solve the linear systems at each
+iteration of the algorithm.
+
+.. versionadded:: 1.3.0
+
+References
+----------
+.. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
+           programming." Athena Scientific 1 (1997): 997.
+.. [2] Bartels, Richard H. "A stabilization of the simplex method."
+            Journal in  Numerische Mathematik 16.5 (1971): 414-434.
+
+"""
+# Author: Matt Haberland
+
+import numpy as np
+from numpy.linalg import LinAlgError
+
+from scipy.linalg import solve
+from ._optimize import _check_unknown_options
+from ._bglu_dense import LU
+from ._bglu_dense import BGLU as BGLU
+from ._linprog_util import _postsolve
+from ._optimize import OptimizeResult
+
+
+def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
+               maxupdate, mast, pivot):
+    """
+    The purpose of phase one is to find an initial basic feasible solution
+    (BFS) to the original problem.
+
+    Generates an auxiliary problem with a trivial BFS and an objective that
+    minimizes infeasibility of the original problem. Solves the auxiliary
+    problem using the main simplex routine (phase two). This either yields
+    a BFS to the original problem or determines that the original problem is
+    infeasible. If feasible, phase one detects redundant rows in the original
+    constraint matrix and removes them, then chooses additional indices as
+    necessary to complete a basis/BFS for the original problem.
+    """
+
+    m, n = A.shape
+    status = 0
+
+    # generate auxiliary problem to get initial BFS
+    A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)
+
+    if status == 6:
+        residual = c.dot(x)
+        iter_k = 0
+        return x, basis, A, b, residual, status, iter_k
+
+    # solve auxiliary problem
+    phase_one_n = n
+    iter_k = 0
+    x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
+                                          postsolve_args,
+                                          maxiter, tol, disp,
+                                          maxupdate, mast, pivot,
+                                          iter_k, phase_one_n)
+
+    # check for infeasibility
+    residual = c.dot(x)
+    if status == 0 and residual > tol:
+        status = 2
+
+    # drive artificial variables out of basis
+    # TODO: test redundant row removal better
+    # TODO: make solve more efficient with BGLU? This could take a while.
+    keep_rows = np.ones(m, dtype=bool)
+    for basis_column in basis[basis >= n]:
+        B = A[:, basis]
+        try:
+            basis_finder = np.abs(solve(B, A))  # inefficient
+            pertinent_row = np.argmax(basis_finder[:, basis_column])
+            eligible_columns = np.ones(n, dtype=bool)
+            eligible_columns[basis[basis < n]] = 0
+            eligible_column_indices = np.where(eligible_columns)[0]
+            index = np.argmax(basis_finder[:, :n]
+                              [pertinent_row, eligible_columns])
+            new_basis_column = eligible_column_indices[index]
+            if basis_finder[pertinent_row, new_basis_column] < tol:
+                keep_rows[pertinent_row] = False
+            else:
+                basis[basis == basis_column] = new_basis_column
+        except LinAlgError:
+            status = 4
+
+    # form solution to original problem
+    A = A[keep_rows, :n]
+    basis = basis[keep_rows]
+    x = x[:n]
+    m = A.shape[0]
+    return x, basis, A, b, residual, status, iter_k
+
+
+def _get_more_basis_columns(A, basis):
+    """
+    Called when the auxiliary problem terminates with artificial columns in
+    the basis, which must be removed and replaced with non-artificial
+    columns. Finds additional columns that do not make the matrix singular.
+    """
+    m, n = A.shape
+
+    # options for inclusion are those that aren't already in the basis
+    a = np.arange(m+n)
+    bl = np.zeros(len(a), dtype=bool)
+    bl[basis] = 1
+    options = a[~bl]
+    options = options[options < n]  # and they have to be non-artificial
+
+    # form basis matrix
+    B = np.zeros((m, m))
+    B[:, 0:len(basis)] = A[:, basis]
+
+    if (basis.size > 0 and
+            np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)):
+        raise Exception("Basis has dependent columns")
+
+    rank = 0  # just enter the loop
+    for i in range(n):  # somewhat arbitrary, but we need another way out
+        # permute the options, and take as many as needed
+        new_basis = np.random.permutation(options)[:m-len(basis)]
+        B[:, len(basis):] = A[:, new_basis]  # update the basis matrix
+        rank = np.linalg.matrix_rank(B)      # check the rank
+        if rank == m:
+            break
+
+    return np.concatenate((basis, new_basis))
+
+
+def _generate_auxiliary_problem(A, b, x0, tol):
+    """
+    Modifies original problem to create an auxiliary problem with a trivial
+    initial basic feasible solution and an objective that minimizes
+    infeasibility in the original problem.
+
+    Conceptually, this is done by stacking an identity matrix on the right of
+    the original constraint matrix, adding artificial variables to correspond
+    with each of these new columns, and generating a cost vector that is all
+    zeros except for ones corresponding with each of the new variables.
+
+    A initial basic feasible solution is trivial: all variables are zero
+    except for the artificial variables, which are set equal to the
+    corresponding element of the right hand side `b`.
+
+    Runnning the simplex method on this auxiliary problem drives all of the
+    artificial variables - and thus the cost - to zero if the original problem
+    is feasible. The original problem is declared infeasible otherwise.
+
+    Much of the complexity below is to improve efficiency by using singleton
+    columns in the original problem where possible, thus generating artificial
+    variables only as necessary, and using an initial 'guess' basic feasible
+    solution.
+    """
+    status = 0
+    m, n = A.shape
+
+    if x0 is not None:
+        x = x0
+    else:
+        x = np.zeros(n)
+
+    r = b - A@x  # residual; this must be all zeros for feasibility
+
+    A[r < 0] = -A[r < 0]  # express problem with RHS positive for trivial BFS
+    b[r < 0] = -b[r < 0]  # to the auxiliary problem
+    r[r < 0] *= -1
+
+    # Rows which we will need to find a trivial way to zero.
+    # This should just be the rows where there is a nonzero residual.
+    # But then we would not necessarily have a column singleton in every row.
+    # This makes it difficult to find an initial basis.
+    if x0 is None:
+        nonzero_constraints = np.arange(m)
+    else:
+        nonzero_constraints = np.where(r > tol)[0]
+
+    # these are (at least some of) the initial basis columns
+    basis = np.where(np.abs(x) > tol)[0]
+
+    if len(nonzero_constraints) == 0 and len(basis) <= m:  # already a BFS
+        c = np.zeros(n)
+        basis = _get_more_basis_columns(A, basis)
+        return A, b, c, basis, x, status
+    elif (len(nonzero_constraints) > m - len(basis) or
+          np.any(x < 0)):  # can't get trivial BFS
+        c = np.zeros(n)
+        status = 6
+        return A, b, c, basis, x, status
+
+    # chooses existing columns appropriate for inclusion in initial basis
+    cols, rows = _select_singleton_columns(A, r)
+
+    # find the rows we need to zero that we _can_ zero with column singletons
+    i_tofix = np.isin(rows, nonzero_constraints)
+    # these columns can't already be in the basis, though
+    # we are going to add them to the basis and change the corresponding x val
+    i_notinbasis = np.logical_not(np.isin(cols, basis))
+    i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis)
+    rows = rows[i_fix_without_aux]
+    cols = cols[i_fix_without_aux]
+
+    # indices of the rows we can only zero with auxiliary variable
+    # these rows will get a one in each auxiliary column
+    arows = nonzero_constraints[np.logical_not(
+                                np.isin(nonzero_constraints, rows))]
+    n_aux = len(arows)
+    acols = n + np.arange(n_aux)          # indices of auxiliary columns
+
+    basis_ng = np.concatenate((cols, acols))   # basis columns not from guess
+    basis_ng_rows = np.concatenate((rows, arows))  # rows we need to zero
+
+    # add auxiliary singleton columns
+    A = np.hstack((A, np.zeros((m, n_aux))))
+    A[arows, acols] = 1
+
+    # generate initial BFS
+    x = np.concatenate((x, np.zeros(n_aux)))
+    x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng]
+
+    # generate costs to minimize infeasibility
+    c = np.zeros(n_aux + n)
+    c[acols] = 1
+
+    # basis columns correspond with nonzeros in guess, those with column
+    # singletons we used to zero remaining constraints, and any additional
+    # columns to get a full set (m columns)
+    basis = np.concatenate((basis, basis_ng))
+    basis = _get_more_basis_columns(A, basis)  # add columns as needed
+
+    return A, b, c, basis, x, status
+
+
+def _select_singleton_columns(A, b):
+    """
+    Finds singleton columns for which the singleton entry is of the same sign
+    as the right-hand side; these columns are eligible for inclusion in an
+    initial basis. Determines the rows in which the singleton entries are
+    located. For each of these rows, returns the indices of the one singleton
+    column and its corresponding row.
+    """
+    # find indices of all singleton columns and corresponding row indicies
+    column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]
+    columns = A[:, column_indices]          # array of singleton columns
+    row_indices = np.zeros(len(column_indices), dtype=int)
+    nonzero_rows, nonzero_columns = np.nonzero(columns)
+    row_indices[nonzero_columns] = nonzero_rows   # corresponding row indicies
+
+    # keep only singletons with entries that have same sign as RHS
+    # this is necessary because all elements of BFS must be non-negative
+    same_sign = A[row_indices, column_indices]*b[row_indices] >= 0
+    column_indices = column_indices[same_sign][::-1]
+    row_indices = row_indices[same_sign][::-1]
+    # Reversing the order so that steps below select rightmost columns
+    # for initial basis, which will tend to be slack variables. (If the
+    # guess corresponds with a basic feasible solution but a constraint
+    # is not satisfied with the corresponding slack variable zero, the slack
+    # variable must be basic.)
+
+    # for each row, keep rightmost singleton column with an entry in that row
+    unique_row_indices, first_columns = np.unique(row_indices,
+                                                  return_index=True)
+    return column_indices[first_columns], unique_row_indices
+
+
+def _find_nonzero_rows(A, tol):
+    """
+    Returns logical array indicating the locations of rows with at least
+    one nonzero element.
+    """
+    return np.any(np.abs(A) > tol, axis=1)
+
+
+def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12):
+    """
+    Selects a pivot to enter the basis. Currently Bland's rule - the smallest
+    index that has a negative reduced cost - is the default.
+    """
+    if rule.lower() == "mrc":  # index with minimum reduced cost
+        return a[~bl][np.argmin(c_hat)]
+    else:  # smallest index w/ negative reduced cost
+        return a[~bl][c_hat < -tol][0]
+
+
+def _display_iter(phase, iteration, slack, con, fun):
+    """
+    Print indicators of optimization status to the console.
+    """
+    header = True if not iteration % 20 else False
+
+    if header:
+        print("Phase",
+              "Iteration",
+              "Minimum Slack      ",
+              "Constraint Residual",
+              "Objective          ")
+
+    # := -tol):  # all reduced costs positive -> terminate
+            break
+
+        j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol)
+        u = B.solve(A[:, j])        # similar to u = solve(B, A[:, j])
+
+        i = u > tol                 # if none of the u are positive, unbounded
+        if not np.any(i):
+            status = 3
+            break
+
+        th = xb[i]/u[i]
+        l = np.argmin(th)           # implicitly selects smallest subscript
+        th_star = th[l]             # step size
+
+        x[b] = x[b] - th_star*u     # take step
+        x[j] = th_star
+        B.update(ab[i][l], j)       # modify basis
+        b = B.b                     # similar to b[ab[i][l]] =
+
+    else:
+        # If the end of the for loop is reached (without a break statement),
+        # then another step has been taken, so the iteration counter should
+        # increment, info should be displayed, and callback should be called.
+        iteration += 1
+        status = 1
+        if disp or callback is not None:
+            _display_and_callback(phase_one_n, x, postsolve_args, status,
+                                  iteration, disp, callback)
+
+    return x, b, status, iteration
+
+
+def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args,
+                maxiter=5000, tol=1e-12, disp=False,
+                maxupdate=10, mast=False, pivot="mrc",
+                **unknown_options):
+    """
+    Solve the following linear programming problem via a two-phase
+    revised simplex algorithm.::
+
+        minimize:     c @ x
+
+        subject to:  A @ x == b
+                     0 <= x < oo
+
+    User-facing documentation is in _linprog_doc.py.
+
+    Parameters
+    ----------
+    c : 1-D array
+        Coefficients of the linear objective function to be minimized.
+    c0 : float
+        Constant term in objective function due to fixed (and eliminated)
+        variables. (Currently unused.)
+    A : 2-D array
+        2-D array which, when matrix-multiplied by ``x``, gives the values of
+        the equality constraints at ``x``.
+    b : 1-D array
+        1-D array of values representing the RHS of each equality constraint
+        (row) in ``A_eq``.
+    x0 : 1-D array, optional
+        Starting values of the independent variables, which will be refined by
+        the optimization algorithm. For the revised simplex method, these must
+        correspond with a basic feasible solution.
+    callback : callable, optional
+        If a callback function is provided, it will be called within each
+        iteration of the algorithm. The callback function must accept a single
+        `scipy.optimize.OptimizeResult` consisting of the following fields:
+
+            x : 1-D array
+                Current solution vector.
+            fun : float
+                Current value of the objective function ``c @ x``.
+            success : bool
+                True only when an algorithm has completed successfully,
+                so this is always False as the callback function is called
+                only while the algorithm is still iterating.
+            slack : 1-D array
+                The values of the slack variables. Each slack variable
+                corresponds to an inequality constraint. If the slack is zero,
+                the corresponding constraint is active.
+            con : 1-D array
+                The (nominally zero) residuals of the equality constraints,
+                that is, ``b - A_eq @ x``.
+            phase : int
+                The phase of the algorithm being executed.
+            status : int
+                For revised simplex, this is always 0 because if a different
+                status is detected, the algorithm terminates.
+            nit : int
+                The number of iterations performed.
+            message : str
+                A string descriptor of the exit status of the optimization.
+    postsolve_args : tuple
+        Data needed by _postsolve to convert the solution to the standard-form
+        problem into the solution to the original problem.
+
+    Options
+    -------
+    maxiter : int
+       The maximum number of iterations to perform in either phase.
+    tol : float
+        The tolerance which determines when a solution is "close enough" to
+        zero in Phase 1 to be considered a basic feasible solution or close
+        enough to positive to serve as an optimal solution.
+    disp : bool
+        Set to ``True`` if indicators of optimization status are to be printed
+        to the console each iteration.
+    maxupdate : int
+        The maximum number of updates performed on the LU factorization.
+        After this many updates is reached, the basis matrix is factorized
+        from scratch.
+    mast : bool
+        Minimize Amortized Solve Time. If enabled, the average time to solve
+        a linear system using the basis factorization is measured. Typically,
+        the average solve time will decrease with each successive solve after
+        initial factorization, as factorization takes much more time than the
+        solve operation (and updates). Eventually, however, the updated
+        factorization becomes sufficiently complex that the average solve time
+        begins to increase. When this is detected, the basis is refactorized
+        from scratch. Enable this option to maximize speed at the risk of
+        nondeterministic behavior. Ignored if ``maxupdate`` is 0.
+    pivot : "mrc" or "bland"
+        Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose
+        Bland's rule if iteration limit is reached and cycling is suspected.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        `unknown_options` is non-empty a warning is issued listing all
+        unused options.
+
+    Returns
+    -------
+    x : 1-D array
+        Solution vector.
+    status : int
+        An integer representing the exit status of the optimization::
+
+         0 : Optimization terminated successfully
+         1 : Iteration limit reached
+         2 : Problem appears to be infeasible
+         3 : Problem appears to be unbounded
+         4 : Numerical difficulties encountered
+         5 : No constraints; turn presolve on
+         6 : Guess x0 cannot be converted to a basic feasible solution
+
+    message : str
+        A string descriptor of the exit status of the optimization.
+    iteration : int
+        The number of iterations taken to solve the problem.
+    """
+
+    _check_unknown_options(unknown_options)
+
+    messages = ["Optimization terminated successfully.",
+                "Iteration limit reached.",
+                "The problem appears infeasible, as the phase one auxiliary "
+                "problem terminated successfully with a residual of {0:.1e}, "
+                "greater than the tolerance {1} required for the solution to "
+                "be considered feasible. Consider increasing the tolerance to "
+                "be greater than {0:.1e}. If this tolerance is unnaceptably "
+                "large, the problem is likely infeasible.",
+                "The problem is unbounded, as the simplex algorithm found "
+                "a basic feasible solution from which there is a direction "
+                "with negative reduced cost in which all decision variables "
+                "increase.",
+                "Numerical difficulties encountered; consider trying "
+                "method='interior-point'.",
+                "Problems with no constraints are trivially solved; please "
+                "turn presolve on.",
+                "The guess x0 cannot be converted to a basic feasible "
+                "solution. "
+                ]
+
+    if A.size == 0:  # address test_unbounded_below_no_presolve_corrected
+        return np.zeros(c.shape), 5, messages[5], 0
+
+    x, basis, A, b, residual, status, iteration = (
+        _phase_one(A, b, x0, callback, postsolve_args,
+                   maxiter, tol, disp, maxupdate, mast, pivot))
+
+    if status == 0:
+        x, basis, status, iteration = _phase_two(c, A, x, basis, callback,
+                                                 postsolve_args,
+                                                 maxiter, tol, disp,
+                                                 maxupdate, mast, pivot,
+                                                 iteration)
+
+    return x, status, messages[status].format(residual, tol), iteration
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_simplex.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_simplex.py
new file mode 100644
index 00000000..708e3145
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_simplex.py
@@ -0,0 +1,661 @@
+"""Simplex method for  linear programming
+
+The *simplex* method uses a traditional, full-tableau implementation of
+Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex).
+This algorithm is included for backwards compatibility and educational
+purposes.
+
+    .. versionadded:: 0.15.0
+
+Warnings
+--------
+
+The simplex method may encounter numerical difficulties when pivot
+values are close to the specified tolerance. If encountered try
+remove any redundant constraints, change the pivot strategy to Bland's
+rule or increase the tolerance value.
+
+Alternatively, more robust methods maybe be used. See
+:ref:`'interior-point' ` and
+:ref:`'revised simplex' `.
+
+References
+----------
+.. [1] Dantzig, George B., Linear programming and extensions. Rand
+       Corporation Research Study Princeton Univ. Press, Princeton, NJ,
+       1963
+.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
+       Mathematical Programming", McGraw-Hill, Chapter 4.
+"""
+
+import numpy as np
+from warnings import warn
+from ._optimize import OptimizeResult, OptimizeWarning, _check_unknown_options
+from ._linprog_util import _postsolve
+
+
+def _pivot_col(T, tol=1e-9, bland=False):
+    """
+    Given a linear programming simplex tableau, determine the column
+    of the variable to enter the basis.
+
+    Parameters
+    ----------
+    T : 2-D array
+        A 2-D array representing the simplex tableau, T, corresponding to the
+        linear programming problem. It should have the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],    0]]
+
+        for a Phase 2 problem, or the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],   0],
+         [c'[0],  c'[1], ...,  c'[n_total],  0]]
+
+         for a Phase 1 problem (a problem in which a basic feasible solution is
+         sought prior to maximizing the actual objective. ``T`` is modified in
+         place by ``_solve_simplex``.
+    tol : float
+        Elements in the objective row larger than -tol will not be considered
+        for pivoting. Nominally this value is zero, but numerical issues
+        cause a tolerance about zero to be necessary.
+    bland : bool
+        If True, use Bland's rule for selection of the column (select the
+        first column with a negative coefficient in the objective row,
+        regardless of magnitude).
+
+    Returns
+    -------
+    status: bool
+        True if a suitable pivot column was found, otherwise False.
+        A return of False indicates that the linear programming simplex
+        algorithm is complete.
+    col: int
+        The index of the column of the pivot element.
+        If status is False, col will be returned as nan.
+    """
+    ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
+    if ma.count() == 0:
+        return False, np.nan
+    if bland:
+        # ma.mask is sometimes 0d
+        return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0]
+    return True, np.ma.nonzero(ma == ma.min())[0][0]
+
+
+def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False):
+    """
+    Given a linear programming simplex tableau, determine the row for the
+    pivot operation.
+
+    Parameters
+    ----------
+    T : 2-D array
+        A 2-D array representing the simplex tableau, T, corresponding to the
+        linear programming problem. It should have the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],    0]]
+
+        for a Phase 2 problem, or the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],   0],
+         [c'[0],  c'[1], ...,  c'[n_total],  0]]
+
+         for a Phase 1 problem (a Problem in which a basic feasible solution is
+         sought prior to maximizing the actual objective. ``T`` is modified in
+         place by ``_solve_simplex``.
+    basis : array
+        A list of the current basic variables.
+    pivcol : int
+        The index of the pivot column.
+    phase : int
+        The phase of the simplex algorithm (1 or 2).
+    tol : float
+        Elements in the pivot column smaller than tol will not be considered
+        for pivoting. Nominally this value is zero, but numerical issues
+        cause a tolerance about zero to be necessary.
+    bland : bool
+        If True, use Bland's rule for selection of the row (if more than one
+        row can be used, choose the one with the lowest variable index).
+
+    Returns
+    -------
+    status: bool
+        True if a suitable pivot row was found, otherwise False. A return
+        of False indicates that the linear programming problem is unbounded.
+    row: int
+        The index of the row of the pivot element. If status is False, row
+        will be returned as nan.
+    """
+    if phase == 1:
+        k = 2
+    else:
+        k = 1
+    ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
+    if ma.count() == 0:
+        return False, np.nan
+    mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
+    q = mb / ma
+    min_rows = np.ma.nonzero(q == q.min())[0]
+    if bland:
+        return True, min_rows[np.argmin(np.take(basis, min_rows))]
+    return True, min_rows[0]
+
+
+def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9):
+    """
+    Pivot the simplex tableau inplace on the element given by (pivrow, pivol).
+    The entering variable corresponds to the column given by pivcol forcing
+    the variable basis[pivrow] to leave the basis.
+
+    Parameters
+    ----------
+    T : 2-D array
+        A 2-D array representing the simplex tableau, T, corresponding to the
+        linear programming problem. It should have the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],    0]]
+
+        for a Phase 2 problem, or the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],   0],
+         [c'[0],  c'[1], ...,  c'[n_total],  0]]
+
+         for a Phase 1 problem (a problem in which a basic feasible solution is
+         sought prior to maximizing the actual objective. ``T`` is modified in
+         place by ``_solve_simplex``.
+    basis : 1-D array
+        An array of the indices of the basic variables, such that basis[i]
+        contains the column corresponding to the basic variable for row i.
+        Basis is modified in place by _apply_pivot.
+    pivrow : int
+        Row index of the pivot.
+    pivcol : int
+        Column index of the pivot.
+    """
+    basis[pivrow] = pivcol
+    pivval = T[pivrow, pivcol]
+    T[pivrow] = T[pivrow] / pivval
+    for irow in range(T.shape[0]):
+        if irow != pivrow:
+            T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]
+
+    # The selected pivot should never lead to a pivot value less than the tol.
+    if np.isclose(pivval, tol, atol=0, rtol=1e4):
+        message = (
+            "The pivot operation produces a pivot value of:{0: .1e}, "
+            "which is only slightly greater than the specified "
+            "tolerance{1: .1e}. This may lead to issues regarding the "
+            "numerical stability of the simplex method. "
+            "Removing redundant constraints, changing the pivot strategy "
+            "via Bland's rule or increasing the tolerance may "
+            "help reduce the issue.".format(pivval, tol))
+        warn(message, OptimizeWarning, stacklevel=5)
+
+
+def _solve_simplex(T, n, basis, callback, postsolve_args,
+                   maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0,
+                   ):
+    """
+    Solve a linear programming problem in "standard form" using the Simplex
+    Method. Linear Programming is intended to solve the following problem form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    Parameters
+    ----------
+    T : 2-D array
+        A 2-D array representing the simplex tableau, T, corresponding to the
+        linear programming problem. It should have the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],    0]]
+
+        for a Phase 2 problem, or the form:
+
+        [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
+         [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
+         .
+         .
+         .
+         [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
+         [c[0],   c[1], ...,   c[n_total],   0],
+         [c'[0],  c'[1], ...,  c'[n_total],  0]]
+
+         for a Phase 1 problem (a problem in which a basic feasible solution is
+         sought prior to maximizing the actual objective. ``T`` is modified in
+         place by ``_solve_simplex``.
+    n : int
+        The number of true variables in the problem.
+    basis : 1-D array
+        An array of the indices of the basic variables, such that basis[i]
+        contains the column corresponding to the basic variable for row i.
+        Basis is modified in place by _solve_simplex
+    callback : callable, optional
+        If a callback function is provided, it will be called within each
+        iteration of the algorithm. The callback must accept a
+        `scipy.optimize.OptimizeResult` consisting of the following fields:
+
+            x : 1-D array
+                Current solution vector
+            fun : float
+                Current value of the objective function
+            success : bool
+                True only when a phase has completed successfully. This
+                will be False for most iterations.
+            slack : 1-D array
+                The values of the slack variables. Each slack variable
+                corresponds to an inequality constraint. If the slack is zero,
+                the corresponding constraint is active.
+            con : 1-D array
+                The (nominally zero) residuals of the equality constraints,
+                that is, ``b - A_eq @ x``
+            phase : int
+                The phase of the optimization being executed. In phase 1 a basic
+                feasible solution is sought and the T has an additional row
+                representing an alternate objective function.
+            status : int
+                An integer representing the exit status of the optimization::
+
+                     0 : Optimization terminated successfully
+                     1 : Iteration limit reached
+                     2 : Problem appears to be infeasible
+                     3 : Problem appears to be unbounded
+                     4 : Serious numerical difficulties encountered
+
+            nit : int
+                The number of iterations performed.
+            message : str
+                A string descriptor of the exit status of the optimization.
+    postsolve_args : tuple
+        Data needed by _postsolve to convert the solution to the standard-form
+        problem into the solution to the original problem.
+    maxiter : int
+        The maximum number of iterations to perform before aborting the
+        optimization.
+    tol : float
+        The tolerance which determines when a solution is "close enough" to
+        zero in Phase 1 to be considered a basic feasible solution or close
+        enough to positive to serve as an optimal solution.
+    phase : int
+        The phase of the optimization being executed. In phase 1 a basic
+        feasible solution is sought and the T has an additional row
+        representing an alternate objective function.
+    bland : bool
+        If True, choose pivots using Bland's rule [3]_. In problems which
+        fail to converge due to cycling, using Bland's rule can provide
+        convergence at the expense of a less optimal path about the simplex.
+    nit0 : int
+        The initial iteration number used to keep an accurate iteration total
+        in a two-phase problem.
+
+    Returns
+    -------
+    nit : int
+        The number of iterations. Used to keep an accurate iteration total
+        in the two-phase problem.
+    status : int
+        An integer representing the exit status of the optimization::
+
+         0 : Optimization terminated successfully
+         1 : Iteration limit reached
+         2 : Problem appears to be infeasible
+         3 : Problem appears to be unbounded
+         4 : Serious numerical difficulties encountered
+
+    """
+    nit = nit0
+    status = 0
+    message = ''
+    complete = False
+
+    if phase == 1:
+        m = T.shape[1]-2
+    elif phase == 2:
+        m = T.shape[1]-1
+    else:
+        raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
+
+    if phase == 2:
+        # Check if any artificial variables are still in the basis.
+        # If yes, check if any coefficients from this row and a column
+        # corresponding to one of the non-artificial variable is non-zero.
+        # If found, pivot at this term. If not, start phase 2.
+        # Do this for all artificial variables in the basis.
+        # Ref: "An Introduction to Linear Programming and Game Theory"
+        # by Paul R. Thie, Gerard E. Keough, 3rd Ed,
+        # Chapter 3.7 Redundant Systems (pag 102)
+        for pivrow in [row for row in range(basis.size)
+                       if basis[row] > T.shape[1] - 2]:
+            non_zero_row = [col for col in range(T.shape[1] - 1)
+                            if abs(T[pivrow, col]) > tol]
+            if len(non_zero_row) > 0:
+                pivcol = non_zero_row[0]
+                _apply_pivot(T, basis, pivrow, pivcol, tol)
+                nit += 1
+
+    if len(basis[:m]) == 0:
+        solution = np.empty(T.shape[1] - 1, dtype=np.float64)
+    else:
+        solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1),
+                            dtype=np.float64)
+
+    while not complete:
+        # Find the pivot column
+        pivcol_found, pivcol = _pivot_col(T, tol, bland)
+        if not pivcol_found:
+            pivcol = np.nan
+            pivrow = np.nan
+            status = 0
+            complete = True
+        else:
+            # Find the pivot row
+            pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)
+            if not pivrow_found:
+                status = 3
+                complete = True
+
+        if callback is not None:
+            solution[:] = 0
+            solution[basis[:n]] = T[:n, -1]
+            x = solution[:m]
+            x, fun, slack, con = _postsolve(
+                x, postsolve_args
+            )
+            res = OptimizeResult({
+                'x': x,
+                'fun': fun,
+                'slack': slack,
+                'con': con,
+                'status': status,
+                'message': message,
+                'nit': nit,
+                'success': status == 0 and complete,
+                'phase': phase,
+                'complete': complete,
+                })
+            callback(res)
+
+        if not complete:
+            if nit >= maxiter:
+                # Iteration limit exceeded
+                status = 1
+                complete = True
+            else:
+                _apply_pivot(T, basis, pivrow, pivcol, tol)
+                nit += 1
+    return nit, status
+
+
+def _linprog_simplex(c, c0, A, b, callback, postsolve_args,
+                     maxiter=1000, tol=1e-9, disp=False, bland=False,
+                     **unknown_options):
+    """
+    Minimize a linear objective function subject to linear equality and
+    non-negativity constraints using the two phase simplex method.
+    Linear programming is intended to solve problems of the following form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    User-facing documentation is in _linprog_doc.py.
+
+    Parameters
+    ----------
+    c : 1-D array
+        Coefficients of the linear objective function to be minimized.
+    c0 : float
+        Constant term in objective function due to fixed (and eliminated)
+        variables. (Purely for display.)
+    A : 2-D array
+        2-D array such that ``A @ x``, gives the values of the equality
+        constraints at ``x``.
+    b : 1-D array
+        1-D array of values representing the right hand side of each equality
+        constraint (row) in ``A``.
+    callback : callable, optional
+        If a callback function is provided, it will be called within each
+        iteration of the algorithm. The callback function must accept a single
+        `scipy.optimize.OptimizeResult` consisting of the following fields:
+
+            x : 1-D array
+                Current solution vector
+            fun : float
+                Current value of the objective function
+            success : bool
+                True when an algorithm has completed successfully.
+            slack : 1-D array
+                The values of the slack variables. Each slack variable
+                corresponds to an inequality constraint. If the slack is zero,
+                the corresponding constraint is active.
+            con : 1-D array
+                The (nominally zero) residuals of the equality constraints,
+                that is, ``b - A_eq @ x``
+            phase : int
+                The phase of the algorithm being executed.
+            status : int
+                An integer representing the status of the optimization::
+
+                     0 : Algorithm proceeding nominally
+                     1 : Iteration limit reached
+                     2 : Problem appears to be infeasible
+                     3 : Problem appears to be unbounded
+                     4 : Serious numerical difficulties encountered
+            nit : int
+                The number of iterations performed.
+            message : str
+                A string descriptor of the exit status of the optimization.
+    postsolve_args : tuple
+        Data needed by _postsolve to convert the solution to the standard-form
+        problem into the solution to the original problem.
+
+    Options
+    -------
+    maxiter : int
+       The maximum number of iterations to perform.
+    disp : bool
+        If True, print exit status message to sys.stdout
+    tol : float
+        The tolerance which determines when a solution is "close enough" to
+        zero in Phase 1 to be considered a basic feasible solution or close
+        enough to positive to serve as an optimal solution.
+    bland : bool
+        If True, use Bland's anti-cycling rule [3]_ to choose pivots to
+        prevent cycling. If False, choose pivots which should lead to a
+        converged solution more quickly. The latter method is subject to
+        cycling (non-convergence) in rare instances.
+    unknown_options : dict
+        Optional arguments not used by this particular solver. If
+        `unknown_options` is non-empty a warning is issued listing all
+        unused options.
+
+    Returns
+    -------
+    x : 1-D array
+        Solution vector.
+    status : int
+        An integer representing the exit status of the optimization::
+
+         0 : Optimization terminated successfully
+         1 : Iteration limit reached
+         2 : Problem appears to be infeasible
+         3 : Problem appears to be unbounded
+         4 : Serious numerical difficulties encountered
+
+    message : str
+        A string descriptor of the exit status of the optimization.
+    iteration : int
+        The number of iterations taken to solve the problem.
+
+    References
+    ----------
+    .. [1] Dantzig, George B., Linear programming and extensions. Rand
+           Corporation Research Study Princeton Univ. Press, Princeton, NJ,
+           1963
+    .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
+           Mathematical Programming", McGraw-Hill, Chapter 4.
+    .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
+           Mathematics of Operations Research (2), 1977: pp. 103-107.
+
+
+    Notes
+    -----
+    The expected problem formulation differs between the top level ``linprog``
+    module and the method specific solvers. The method specific solvers expect a
+    problem in standard form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    Whereas the top level ``linprog`` module expects a problem of form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+         lb <= x <= ub
+
+    where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
+
+    The original problem contains equality, upper-bound and variable constraints
+    whereas the method specific solver requires equality constraints and
+    variable non-negativity.
+
+    ``linprog`` module converts the original problem to standard form by
+    converting the simple bounds to upper bound constraints, introducing
+    non-negative slack variables for inequality constraints, and expressing
+    unbounded variables as the difference between two non-negative variables.
+    """
+    _check_unknown_options(unknown_options)
+
+    status = 0
+    messages = {0: "Optimization terminated successfully.",
+                1: "Iteration limit reached.",
+                2: "Optimization failed. Unable to find a feasible"
+                   " starting point.",
+                3: "Optimization failed. The problem appears to be unbounded.",
+                4: "Optimization failed. Singular matrix encountered."}
+
+    n, m = A.shape
+
+    # All constraints must have b >= 0.
+    is_negative_constraint = np.less(b, 0)
+    A[is_negative_constraint] *= -1
+    b[is_negative_constraint] *= -1
+
+    # As all constraints are equality constraints the artificial variables
+    # will also be basic variables.
+    av = np.arange(n) + m
+    basis = av.copy()
+
+    # Format the phase one tableau by adding artificial variables and stacking
+    # the constraints, the objective row and pseudo-objective row.
+    row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))
+    row_objective = np.hstack((c, np.zeros(n), c0))
+    row_pseudo_objective = -row_constraints.sum(axis=0)
+    row_pseudo_objective[av] = 0
+    T = np.vstack((row_constraints, row_objective, row_pseudo_objective))
+
+    nit1, status = _solve_simplex(T, n, basis, callback=callback,
+                                  postsolve_args=postsolve_args,
+                                  maxiter=maxiter, tol=tol, phase=1,
+                                  bland=bland
+                                  )
+    # if pseudo objective is zero, remove the last row from the tableau and
+    # proceed to phase 2
+    nit2 = nit1
+    if abs(T[-1, -1]) < tol:
+        # Remove the pseudo-objective row from the tableau
+        T = T[:-1, :]
+        # Remove the artificial variable columns from the tableau
+        T = np.delete(T, av, 1)
+    else:
+        # Failure to find a feasible starting point
+        status = 2
+        messages[status] = (
+            "Phase 1 of the simplex method failed to find a feasible "
+            "solution. The pseudo-objective function evaluates to {0:.1e} "
+            "which exceeds the required tolerance of {1} for a solution to be "
+            "considered 'close enough' to zero to be a basic solution. "
+            "Consider increasing the tolerance to be greater than {0:.1e}. "
+            "If this tolerance is unacceptably  large the problem may be "
+            "infeasible.".format(abs(T[-1, -1]), tol)
+        )
+
+    if status == 0:
+        # Phase 2
+        nit2, status = _solve_simplex(T, n, basis, callback=callback,
+                                      postsolve_args=postsolve_args,
+                                      maxiter=maxiter, tol=tol, phase=2,
+                                      bland=bland, nit0=nit1
+                                      )
+
+    solution = np.zeros(n + m)
+    solution[basis[:n]] = T[:n, -1]
+    x = solution[:m]
+
+    return x, status, messages[status], int(nit2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_util.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_util.py
new file mode 100644
index 00000000..a1e59041
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_linprog_util.py
@@ -0,0 +1,1515 @@
+"""
+Method agnostic utility functions for linear progamming
+"""
+
+import numpy as np
+import scipy.sparse as sps
+from warnings import warn
+from ._optimize import OptimizeWarning
+from scipy.optimize._remove_redundancy import (
+    _remove_redundancy_svd, _remove_redundancy_pivot_sparse,
+    _remove_redundancy_pivot_dense, _remove_redundancy_id
+    )
+from collections import namedtuple
+
+_LPProblem = namedtuple('_LPProblem',
+                        'c A_ub b_ub A_eq b_eq bounds x0 integrality')
+_LPProblem.__new__.__defaults__ = (None,) * 7  # make c the only required arg
+_LPProblem.__doc__ = \
+    """ Represents a linear-programming problem.
+
+    Attributes
+    ----------
+    c : 1D array
+        The coefficients of the linear objective function to be minimized.
+    A_ub : 2D array, optional
+        The inequality constraint matrix. Each row of ``A_ub`` specifies the
+        coefficients of a linear inequality constraint on ``x``.
+    b_ub : 1D array, optional
+        The inequality constraint vector. Each element represents an
+        upper bound on the corresponding value of ``A_ub @ x``.
+    A_eq : 2D array, optional
+        The equality constraint matrix. Each row of ``A_eq`` specifies the
+        coefficients of a linear equality constraint on ``x``.
+    b_eq : 1D array, optional
+        The equality constraint vector. Each element of ``A_eq @ x`` must equal
+        the corresponding element of ``b_eq``.
+    bounds : various valid formats, optional
+        The bounds of ``x``, as ``min`` and ``max`` pairs.
+        If bounds are specified for all N variables separately, valid formats
+        are:
+        * a 2D array (N x 2);
+        * a sequence of N sequences, each with 2 values.
+        If all variables have the same bounds, the bounds can be specified as
+        a 1-D or 2-D array or sequence with 2 scalar values.
+        If all variables have a lower bound of 0 and no upper bound, the bounds
+        parameter can be omitted (or given as None).
+        Absent lower and/or upper bounds can be specified as -numpy.inf (no
+        lower bound), numpy.inf (no upper bound) or None (both).
+    x0 : 1D array, optional
+        Guess values of the decision variables, which will be refined by
+        the optimization algorithm. This argument is currently used only by the
+        'revised simplex' method, and can only be used if `x0` represents a
+        basic feasible solution.
+    integrality : 1-D array or int, optional
+        Indicates the type of integrality constraint on each decision variable.
+
+        ``0`` : Continuous variable; no integrality constraint.
+
+        ``1`` : Integer variable; decision variable must be an integer
+        within `bounds`.
+
+        ``2`` : Semi-continuous variable; decision variable must be within
+        `bounds` or take value ``0``.
+
+        ``3`` : Semi-integer variable; decision variable must be an integer
+        within `bounds` or take value ``0``.
+
+        By default, all variables are continuous.
+
+        For mixed integrality constraints, supply an array of shape `c.shape`.
+        To infer a constraint on each decision variable from shorter inputs,
+        the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
+
+        This argument is currently used only by the ``'highs'`` method and
+        ignored otherwise.
+
+    Notes
+    -----
+    This namedtuple supports 2 ways of initialization:
+    >>> lp1 = _LPProblem(c=[-1, 4], A_ub=[[-3, 1], [1, 2]], b_ub=[6, 4])
+    >>> lp2 = _LPProblem([-1, 4], [[-3, 1], [1, 2]], [6, 4])
+
+    Note that only ``c`` is a required argument here, whereas all other arguments
+    ``A_ub``, ``b_ub``, ``A_eq``, ``b_eq``, ``bounds``, ``x0`` are optional with
+    default values of None.
+    For example, ``A_eq`` and ``b_eq`` can be set without ``A_ub`` or ``b_ub``:
+    >>> lp3 = _LPProblem(c=[-1, 4], A_eq=[[2, 1]], b_eq=[10])
+    """
+
+
+def _check_sparse_inputs(options, meth, A_ub, A_eq):
+    """
+    Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified
+    optional sparsity variables.
+
+    Parameters
+    ----------
+    A_ub : 2-D array, optional
+        2-D array such that ``A_ub @ x`` gives the values of the upper-bound
+        inequality constraints at ``x``.
+    A_eq : 2-D array, optional
+        2-D array such that ``A_eq @ x`` gives the values of the equality
+        constraints at ``x``.
+    options : dict
+        A dictionary of solver options. All methods accept the following
+        generic options:
+
+            maxiter : int
+                Maximum number of iterations to perform.
+            disp : bool
+                Set to True to print convergence messages.
+
+        For method-specific options, see :func:`show_options('linprog')`.
+    method : str, optional
+        The algorithm used to solve the standard form problem.
+
+    Returns
+    -------
+    A_ub : 2-D array, optional
+        2-D array such that ``A_ub @ x`` gives the values of the upper-bound
+        inequality constraints at ``x``.
+    A_eq : 2-D array, optional
+        2-D array such that ``A_eq @ x`` gives the values of the equality
+        constraints at ``x``.
+    options : dict
+        A dictionary of solver options. All methods accept the following
+        generic options:
+
+            maxiter : int
+                Maximum number of iterations to perform.
+            disp : bool
+                Set to True to print convergence messages.
+
+        For method-specific options, see :func:`show_options('linprog')`.
+    """
+    # This is an undocumented option for unit testing sparse presolve
+    _sparse_presolve = options.pop('_sparse_presolve', False)
+    if _sparse_presolve and A_eq is not None:
+        A_eq = sps.coo_matrix(A_eq)
+    if _sparse_presolve and A_ub is not None:
+        A_ub = sps.coo_matrix(A_ub)
+
+    sparse_constraint = sps.issparse(A_eq) or sps.issparse(A_ub)
+
+    preferred_methods = {"highs", "highs-ds", "highs-ipm"}
+    dense_methods = {"simplex", "revised simplex"}
+    if meth in dense_methods and sparse_constraint:
+        raise ValueError(f"Method '{meth}' does not support sparse "
+                         "constraint matrices. Please consider using one of "
+                         f"{preferred_methods}.")
+
+    sparse = options.get('sparse', False)
+    if not sparse and sparse_constraint and meth == 'interior-point':
+        options['sparse'] = True
+        warn("Sparse constraint matrix detected; setting 'sparse':True.",
+             OptimizeWarning, stacklevel=4)
+    return options, A_ub, A_eq
+
+
+def _format_A_constraints(A, n_x, sparse_lhs=False):
+    """Format the left hand side of the constraints to a 2-D array
+
+    Parameters
+    ----------
+    A : 2-D array
+        2-D array such that ``A @ x`` gives the values of the upper-bound
+        (in)equality constraints at ``x``.
+    n_x : int
+        The number of variables in the linear programming problem.
+    sparse_lhs : bool
+        Whether either of `A_ub` or `A_eq` are sparse. If true return a
+        coo_matrix instead of a numpy array.
+
+    Returns
+    -------
+    np.ndarray or sparse.coo_matrix
+        2-D array such that ``A @ x`` gives the values of the upper-bound
+        (in)equality constraints at ``x``.
+
+    """
+    if sparse_lhs:
+        return sps.coo_matrix(
+            (0, n_x) if A is None else A, dtype=float, copy=True
+        )
+    elif A is None:
+        return np.zeros((0, n_x), dtype=float)
+    else:
+        return np.array(A, dtype=float, copy=True)
+
+
+def _format_b_constraints(b):
+    """Format the upper bounds of the constraints to a 1-D array
+
+    Parameters
+    ----------
+    b : 1-D array
+        1-D array of values representing the upper-bound of each (in)equality
+        constraint (row) in ``A``.
+
+    Returns
+    -------
+    1-D np.array
+        1-D array of values representing the upper-bound of each (in)equality
+        constraint (row) in ``A``.
+
+    """
+    if b is None:
+        return np.array([], dtype=float)
+    b = np.array(b, dtype=float, copy=True).squeeze()
+    return b if b.size != 1 else b.reshape((-1))
+
+
+def _clean_inputs(lp):
+    """
+    Given user inputs for a linear programming problem, return the
+    objective vector, upper bound constraints, equality constraints,
+    and simple bounds in a preferred format.
+
+    Parameters
+    ----------
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : various valid formats, optional
+            The bounds of ``x``, as ``min`` and ``max`` pairs.
+            If bounds are specified for all N variables separately, valid formats are:
+            * a 2D array (2 x N or N x 2);
+            * a sequence of N sequences, each with 2 values.
+            If all variables have the same bounds, a single pair of values can
+            be specified. Valid formats are:
+            * a sequence with 2 scalar values;
+            * a sequence with a single element containing 2 scalar values.
+            If all variables have a lower bound of 0 and no upper bound, the bounds
+            parameter can be omitted (or given as None).
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    Returns
+    -------
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : 2D array
+            The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
+            elements of ``x``. The N x 2 array contains lower bounds in the first
+            column and upper bounds in the 2nd. Unbounded variables have lower
+            bound -np.inf and/or upper bound np.inf.
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    """
+    c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
+
+    if c is None:
+        raise TypeError
+
+    try:
+        c = np.array(c, dtype=np.float64, copy=True).squeeze()
+    except ValueError as e:
+        raise TypeError(
+            "Invalid input for linprog: c must be a 1-D array of numerical "
+            "coefficients") from e
+    else:
+        # If c is a single value, convert it to a 1-D array.
+        if c.size == 1:
+            c = c.reshape((-1))
+
+        n_x = len(c)
+        if n_x == 0 or len(c.shape) != 1:
+            raise ValueError(
+                "Invalid input for linprog: c must be a 1-D array and must "
+                "not have more than one non-singleton dimension")
+        if not np.isfinite(c).all():
+            raise ValueError(
+                "Invalid input for linprog: c must not contain values "
+                "inf, nan, or None")
+
+    sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)
+    try:
+        A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)
+    except ValueError as e:
+        raise TypeError(
+            "Invalid input for linprog: A_ub must be a 2-D array "
+            "of numerical values") from e
+    else:
+        n_ub = A_ub.shape[0]
+        if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:
+            raise ValueError(
+                "Invalid input for linprog: A_ub must have exactly two "
+                "dimensions, and the number of columns in A_ub must be "
+                "equal to the size of c")
+        if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()
+                or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):
+            raise ValueError(
+                "Invalid input for linprog: A_ub must not contain values "
+                "inf, nan, or None")
+
+    try:
+        b_ub = _format_b_constraints(b_ub)
+    except ValueError as e:
+        raise TypeError(
+            "Invalid input for linprog: b_ub must be a 1-D array of "
+            "numerical values, each representing the upper bound of an "
+            "inequality constraint (row) in A_ub") from e
+    else:
+        if b_ub.shape != (n_ub,):
+            raise ValueError(
+                "Invalid input for linprog: b_ub must be a 1-D array; b_ub "
+                "must not have more than one non-singleton dimension and "
+                "the number of rows in A_ub must equal the number of values "
+                "in b_ub")
+        if not np.isfinite(b_ub).all():
+            raise ValueError(
+                "Invalid input for linprog: b_ub must not contain values "
+                "inf, nan, or None")
+
+    try:
+        A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)
+    except ValueError as e:
+        raise TypeError(
+            "Invalid input for linprog: A_eq must be a 2-D array "
+            "of numerical values") from e
+    else:
+        n_eq = A_eq.shape[0]
+        if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:
+            raise ValueError(
+                "Invalid input for linprog: A_eq must have exactly two "
+                "dimensions, and the number of columns in A_eq must be "
+                "equal to the size of c")
+
+        if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()
+                or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):
+            raise ValueError(
+                "Invalid input for linprog: A_eq must not contain values "
+                "inf, nan, or None")
+
+    try:
+        b_eq = _format_b_constraints(b_eq)
+    except ValueError as e:
+        raise TypeError(
+            "Invalid input for linprog: b_eq must be a dense, 1-D array of "
+            "numerical values, each representing the right hand side of an "
+            "equality constraint (row) in A_eq") from e
+    else:
+        if b_eq.shape != (n_eq,):
+            raise ValueError(
+                "Invalid input for linprog: b_eq must be a 1-D array; b_eq "
+                "must not have more than one non-singleton dimension and "
+                "the number of rows in A_eq must equal the number of values "
+                "in b_eq")
+        if not np.isfinite(b_eq).all():
+            raise ValueError(
+                "Invalid input for linprog: b_eq must not contain values "
+                "inf, nan, or None")
+
+    # x0 gives a (optional) starting solution to the solver. If x0 is None,
+    # skip the checks. Initial solution will be generated automatically.
+    if x0 is not None:
+        try:
+            x0 = np.array(x0, dtype=float, copy=True).squeeze()
+        except ValueError as e:
+            raise TypeError(
+                "Invalid input for linprog: x0 must be a 1-D array of "
+                "numerical coefficients") from e
+        if x0.ndim == 0:
+            x0 = x0.reshape((-1))
+        if len(x0) == 0 or x0.ndim != 1:
+            raise ValueError(
+                "Invalid input for linprog: x0 should be a 1-D array; it "
+                "must not have more than one non-singleton dimension")
+        if not x0.size == c.size:
+            raise ValueError(
+                "Invalid input for linprog: x0 and c should contain the "
+                "same number of elements")
+        if not np.isfinite(x0).all():
+            raise ValueError(
+                "Invalid input for linprog: x0 must not contain values "
+                "inf, nan, or None")
+
+    # Bounds can be one of these formats:
+    # (1) a 2-D array or sequence, with shape N x 2
+    # (2) a 1-D or 2-D sequence or array with 2 scalars
+    # (3) None (or an empty sequence or array)
+    # Unspecified bounds can be represented by None or (-)np.inf.
+    # All formats are converted into a N x 2 np.array with (-)np.inf where
+    # bounds are unspecified.
+
+    # Prepare clean bounds array
+    bounds_clean = np.zeros((n_x, 2), dtype=float)
+
+    # Convert to a numpy array.
+    # np.array(..,dtype=float) raises an error if dimensions are inconsistent
+    # or if there are invalid data types in bounds. Just add a linprog prefix
+    # to the error and re-raise.
+    # Creating at least a 2-D array simplifies the cases to distinguish below.
+    if bounds is None or np.array_equal(bounds, []) or np.array_equal(bounds, [[]]):
+        bounds = (0, np.inf)
+    try:
+        bounds_conv = np.atleast_2d(np.array(bounds, dtype=float))
+    except ValueError as e:
+        raise ValueError(
+            "Invalid input for linprog: unable to interpret bounds, "
+            "check values and dimensions: " + e.args[0]) from e
+    except TypeError as e:
+        raise TypeError(
+            "Invalid input for linprog: unable to interpret bounds, "
+            "check values and dimensions: " + e.args[0]) from e
+
+    # Check bounds options
+    bsh = bounds_conv.shape
+    if len(bsh) > 2:
+        # Do not try to handle multidimensional bounds input
+        raise ValueError(
+            "Invalid input for linprog: provide a 2-D array for bounds, "
+            "not a {:d}-D array.".format(len(bsh)))
+    elif np.all(bsh == (n_x, 2)):
+        # Regular N x 2 array
+        bounds_clean = bounds_conv
+    elif (np.all(bsh == (2, 1)) or np.all(bsh == (1, 2))):
+        # 2 values: interpret as overall lower and upper bound
+        bounds_flat = bounds_conv.flatten()
+        bounds_clean[:, 0] = bounds_flat[0]
+        bounds_clean[:, 1] = bounds_flat[1]
+    elif np.all(bsh == (2, n_x)):
+        # Reject a 2 x N array
+        raise ValueError(
+            "Invalid input for linprog: provide a {:d} x 2 array for bounds, "
+            "not a 2 x {:d} array.".format(n_x, n_x))
+    else:
+        raise ValueError(
+            "Invalid input for linprog: unable to interpret bounds with this "
+            "dimension tuple: {0}.".format(bsh))
+
+    # The process above creates nan-s where the input specified None
+    # Convert the nan-s in the 1st column to -np.inf and in the 2nd column
+    # to np.inf
+    i_none = np.isnan(bounds_clean[:, 0])
+    bounds_clean[i_none, 0] = -np.inf
+    i_none = np.isnan(bounds_clean[:, 1])
+    bounds_clean[i_none, 1] = np.inf
+
+    return _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds_clean, x0, integrality)
+
+
+def _presolve(lp, rr, rr_method, tol=1e-9):
+    """
+    Given inputs for a linear programming problem in preferred format,
+    presolve the problem: identify trivial infeasibilities, redundancies,
+    and unboundedness, tighten bounds where possible, and eliminate fixed
+    variables.
+
+    Parameters
+    ----------
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : 2D array
+            The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
+            elements of ``x``. The N x 2 array contains lower bounds in the first
+            column and upper bounds in the 2nd. Unbounded variables have lower
+            bound -np.inf and/or upper bound np.inf.
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    rr : bool
+        If ``True`` attempts to eliminate any redundant rows in ``A_eq``.
+        Set False if ``A_eq`` is known to be of full row rank, or if you are
+        looking for a potential speedup (at the expense of reliability).
+    rr_method : string
+        Method used to identify and remove redundant rows from the
+        equality constraint matrix after presolve.
+    tol : float
+        The tolerance which determines when a solution is "close enough" to
+        zero in Phase 1 to be considered a basic feasible solution or close
+        enough to positive to serve as an optimal solution.
+
+    Returns
+    -------
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : 2D array
+            The bounds of ``x``, as ``min`` and ``max`` pairs, possibly tightened.
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    c0 : 1D array
+        Constant term in objective function due to fixed (and eliminated)
+        variables.
+    x : 1D array
+        Solution vector (when the solution is trivial and can be determined
+        in presolve)
+    revstack: list of functions
+        the functions in the list reverse the operations of _presolve()
+        the function signature is x_org = f(x_mod), where x_mod is the result
+        of a presolve step and x_org the value at the start of the step
+        (currently, the revstack contains only one function)
+    complete: bool
+        Whether the solution is complete (solved or determined to be infeasible
+        or unbounded in presolve)
+    status : int
+        An integer representing the exit status of the optimization::
+
+         0 : Optimization terminated successfully
+         1 : Iteration limit reached
+         2 : Problem appears to be infeasible
+         3 : Problem appears to be unbounded
+         4 : Serious numerical difficulties encountered
+
+    message : str
+        A string descriptor of the exit status of the optimization.
+
+    References
+    ----------
+    .. [5] Andersen, Erling D. "Finding all linearly dependent rows in
+           large-scale linear programming." Optimization Methods and Software
+           6.3 (1995): 219-227.
+    .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
+           programming." Mathematical Programming 71.2 (1995): 221-245.
+
+    """
+    # ideas from Reference [5] by Andersen and Andersen
+    # however, unlike the reference, this is performed before converting
+    # problem to standard form
+    # There are a few advantages:
+    #  * artificial variables have not been added, so matrices are smaller
+    #  * bounds have not been converted to constraints yet. (It is better to
+    #    do that after presolve because presolve may adjust the simple bounds.)
+    # There are many improvements that can be made, namely:
+    #  * implement remaining checks from [5]
+    #  * loop presolve until no additional changes are made
+    #  * implement additional efficiency improvements in redundancy removal [2]
+
+    c, A_ub, b_ub, A_eq, b_eq, bounds, x0, _ = lp
+
+    revstack = []               # record of variables eliminated from problem
+    # constant term in cost function may be added if variables are eliminated
+    c0 = 0
+    complete = False        # complete is True if detected infeasible/unbounded
+    x = np.zeros(c.shape)   # this is solution vector if completed in presolve
+
+    status = 0              # all OK unless determined otherwise
+    message = ""
+
+    # Lower and upper bounds. Copy to prevent feedback.
+    lb = bounds[:, 0].copy()
+    ub = bounds[:, 1].copy()
+
+    m_eq, n = A_eq.shape
+    m_ub, n = A_ub.shape
+
+    if (rr_method is not None
+            and rr_method.lower() not in {"svd", "pivot", "id"}):
+        message = ("'" + str(rr_method) + "' is not a valid option "
+                   "for redundancy removal. Valid options are 'SVD', "
+                   "'pivot', and 'ID'.")
+        raise ValueError(message)
+
+    if sps.issparse(A_eq):
+        A_eq = A_eq.tocsr()
+        A_ub = A_ub.tocsr()
+
+        def where(A):
+            return A.nonzero()
+
+        vstack = sps.vstack
+    else:
+        where = np.where
+        vstack = np.vstack
+
+    # upper bounds > lower bounds
+    if np.any(ub < lb) or np.any(lb == np.inf) or np.any(ub == -np.inf):
+        status = 2
+        message = ("The problem is (trivially) infeasible since one "
+                   "or more upper bounds are smaller than the corresponding "
+                   "lower bounds, a lower bound is np.inf or an upper bound "
+                   "is -np.inf.")
+        complete = True
+        return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                c0, x, revstack, complete, status, message)
+
+    # zero row in equality constraints
+    zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()
+    if np.any(zero_row):
+        if np.any(
+            np.logical_and(
+                zero_row,
+                np.abs(b_eq) > tol)):  # test_zero_row_1
+            # infeasible if RHS is not zero
+            status = 2
+            message = ("The problem is (trivially) infeasible due to a row "
+                       "of zeros in the equality constraint matrix with a "
+                       "nonzero corresponding constraint value.")
+            complete = True
+            return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                    c0, x, revstack, complete, status, message)
+        else:  # test_zero_row_2
+            # if RHS is zero, we can eliminate this equation entirely
+            A_eq = A_eq[np.logical_not(zero_row), :]
+            b_eq = b_eq[np.logical_not(zero_row)]
+
+    # zero row in inequality constraints
+    zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()
+    if np.any(zero_row):
+        if np.any(np.logical_and(zero_row, b_ub < -tol)):  # test_zero_row_1
+            # infeasible if RHS is less than zero (because LHS is zero)
+            status = 2
+            message = ("The problem is (trivially) infeasible due to a row "
+                       "of zeros in the equality constraint matrix with a "
+                       "nonzero corresponding  constraint value.")
+            complete = True
+            return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                    c0, x, revstack, complete, status, message)
+        else:  # test_zero_row_2
+            # if LHS is >= 0, we can eliminate this constraint entirely
+            A_ub = A_ub[np.logical_not(zero_row), :]
+            b_ub = b_ub[np.logical_not(zero_row)]
+
+    # zero column in (both) constraints
+    # this indicates that a variable isn't constrained and can be removed
+    A = vstack((A_eq, A_ub))
+    if A.shape[0] > 0:
+        zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()
+        # variable will be at upper or lower bound, depending on objective
+        x[np.logical_and(zero_col, c < 0)] = ub[
+            np.logical_and(zero_col, c < 0)]
+        x[np.logical_and(zero_col, c > 0)] = lb[
+            np.logical_and(zero_col, c > 0)]
+        if np.any(np.isinf(x)):  # if an unconstrained variable has no bound
+            status = 3
+            message = ("If feasible, the problem is (trivially) unbounded "
+                       "due  to a zero column in the constraint matrices. If "
+                       "you wish to check whether the problem is infeasible, "
+                       "turn presolve off.")
+            complete = True
+            return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                    c0, x, revstack, complete, status, message)
+        # variables will equal upper/lower bounds will be removed later
+        lb[np.logical_and(zero_col, c < 0)] = ub[
+            np.logical_and(zero_col, c < 0)]
+        ub[np.logical_and(zero_col, c > 0)] = lb[
+            np.logical_and(zero_col, c > 0)]
+
+    # row singleton in equality constraints
+    # this fixes a variable and removes the constraint
+    singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()
+    rows = where(singleton_row)[0]
+    cols = where(A_eq[rows, :])[1]
+    if len(rows) > 0:
+        for row, col in zip(rows, cols):
+            val = b_eq[row] / A_eq[row, col]
+            if not lb[col] - tol <= val <= ub[col] + tol:
+                # infeasible if fixed value is not within bounds
+                status = 2
+                message = ("The problem is (trivially) infeasible because a "
+                           "singleton row in the equality constraints is "
+                           "inconsistent with the bounds.")
+                complete = True
+                return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                        c0, x, revstack, complete, status, message)
+            else:
+                # sets upper and lower bounds at that fixed value - variable
+                # will be removed later
+                lb[col] = val
+                ub[col] = val
+        A_eq = A_eq[np.logical_not(singleton_row), :]
+        b_eq = b_eq[np.logical_not(singleton_row)]
+
+    # row singleton in inequality constraints
+    # this indicates a simple bound and the constraint can be removed
+    # simple bounds may be adjusted here
+    # After all of the simple bound information is combined here, get_Abc will
+    # turn the simple bounds into constraints
+    singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()
+    cols = where(A_ub[singleton_row, :])[1]
+    rows = where(singleton_row)[0]
+    if len(rows) > 0:
+        for row, col in zip(rows, cols):
+            val = b_ub[row] / A_ub[row, col]
+            if A_ub[row, col] > 0:  # upper bound
+                if val < lb[col] - tol:  # infeasible
+                    complete = True
+                elif val < ub[col]:  # new upper bound
+                    ub[col] = val
+            else:  # lower bound
+                if val > ub[col] + tol:  # infeasible
+                    complete = True
+                elif val > lb[col]:  # new lower bound
+                    lb[col] = val
+            if complete:
+                status = 2
+                message = ("The problem is (trivially) infeasible because a "
+                           "singleton row in the upper bound constraints is "
+                           "inconsistent with the bounds.")
+                return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                        c0, x, revstack, complete, status, message)
+        A_ub = A_ub[np.logical_not(singleton_row), :]
+        b_ub = b_ub[np.logical_not(singleton_row)]
+
+    # identical bounds indicate that variable can be removed
+    i_f = np.abs(lb - ub) < tol   # indices of "fixed" variables
+    i_nf = np.logical_not(i_f)  # indices of "not fixed" variables
+
+    # test_bounds_equal_but_infeasible
+    if np.all(i_f):  # if bounds define solution, check for consistency
+        residual = b_eq - A_eq.dot(lb)
+        slack = b_ub - A_ub.dot(lb)
+        if ((A_ub.size > 0 and np.any(slack < 0)) or
+                (A_eq.size > 0 and not np.allclose(residual, 0))):
+            status = 2
+            message = ("The problem is (trivially) infeasible because the "
+                       "bounds fix all variables to values inconsistent with "
+                       "the constraints")
+            complete = True
+            return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                    c0, x, revstack, complete, status, message)
+
+    ub_mod = ub
+    lb_mod = lb
+    if np.any(i_f):
+        c0 += c[i_f].dot(lb[i_f])
+        b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])
+        b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])
+        c = c[i_nf]
+        x_undo = lb[i_f]  # not x[i_f], x is just zeroes
+        x = x[i_nf]
+        # user guess x0 stays separate from presolve solution x
+        if x0 is not None:
+            x0 = x0[i_nf]
+        A_eq = A_eq[:, i_nf]
+        A_ub = A_ub[:, i_nf]
+        # modify bounds
+        lb_mod = lb[i_nf]
+        ub_mod = ub[i_nf]
+
+        def rev(x_mod):
+            # Function to restore x: insert x_undo into x_mod.
+            # When elements have been removed at positions k1, k2, k3, ...
+            # then these must be replaced at (after) positions k1-1, k2-2,
+            # k3-3, ... in the modified array to recreate the original
+            i = np.flatnonzero(i_f)
+            # Number of variables to restore
+            N = len(i)
+            index_offset = np.arange(N)
+            # Create insert indices
+            insert_indices = i - index_offset
+            x_rev = np.insert(x_mod.astype(float), insert_indices, x_undo)
+            return x_rev
+
+        # Use revstack as a list of functions, currently just this one.
+        revstack.append(rev)
+
+    # no constraints indicates that problem is trivial
+    if A_eq.size == 0 and A_ub.size == 0:
+        b_eq = np.array([])
+        b_ub = np.array([])
+        # test_empty_constraint_1
+        if c.size == 0:
+            status = 0
+            message = ("The solution was determined in presolve as there are "
+                       "no non-trivial constraints.")
+        elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or
+              np.any(np.logical_and(c > 0, lb_mod == -np.inf))):
+            # test_no_constraints()
+            # test_unbounded_no_nontrivial_constraints_1
+            # test_unbounded_no_nontrivial_constraints_2
+            status = 3
+            message = ("The problem is (trivially) unbounded "
+                       "because there are no non-trivial constraints and "
+                       "a) at least one decision variable is unbounded "
+                       "above and its corresponding cost is negative, or "
+                       "b) at least one decision variable is unbounded below "
+                       "and its corresponding cost is positive. ")
+        else:  # test_empty_constraint_2
+            status = 0
+            message = ("The solution was determined in presolve as there are "
+                       "no non-trivial constraints.")
+        complete = True
+        x[c < 0] = ub_mod[c < 0]
+        x[c > 0] = lb_mod[c > 0]
+        # where c is zero, set x to a finite bound or zero
+        x_zero_c = ub_mod[c == 0]
+        x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
+        x_zero_c[np.isinf(x_zero_c)] = 0
+        x[c == 0] = x_zero_c
+        # if this is not the last step of presolve, should convert bounds back
+        # to array and return here
+
+    # Convert modified lb and ub back into N x 2 bounds
+    bounds = np.hstack((lb_mod[:, np.newaxis], ub_mod[:, np.newaxis]))
+
+    # remove redundant (linearly dependent) rows from equality constraints
+    n_rows_A = A_eq.shape[0]
+    redundancy_warning = ("A_eq does not appear to be of full row rank. To "
+                          "improve performance, check the problem formulation "
+                          "for redundant equality constraints.")
+    if (sps.issparse(A_eq)):
+        if rr and A_eq.size > 0:  # TODO: Fast sparse rank check?
+            rr_res = _remove_redundancy_pivot_sparse(A_eq, b_eq)
+            A_eq, b_eq, status, message = rr_res
+            if A_eq.shape[0] < n_rows_A:
+                warn(redundancy_warning, OptimizeWarning, stacklevel=1)
+            if status != 0:
+                complete = True
+        return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+                c0, x, revstack, complete, status, message)
+
+    # This is a wild guess for which redundancy removal algorithm will be
+    # faster. More testing would be good.
+    small_nullspace = 5
+    if rr and A_eq.size > 0:
+        try:  # TODO: use results of first SVD in _remove_redundancy_svd
+            rank = np.linalg.matrix_rank(A_eq)
+        # oh well, we'll have to go with _remove_redundancy_pivot_dense
+        except Exception:
+            rank = 0
+    if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
+        warn(redundancy_warning, OptimizeWarning, stacklevel=3)
+        dim_row_nullspace = A_eq.shape[0]-rank
+        if rr_method is None:
+            if dim_row_nullspace <= small_nullspace:
+                rr_res = _remove_redundancy_svd(A_eq, b_eq)
+                A_eq, b_eq, status, message = rr_res
+            if dim_row_nullspace > small_nullspace or status == 4:
+                rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
+                A_eq, b_eq, status, message = rr_res
+
+        else:
+            rr_method = rr_method.lower()
+            if rr_method == "svd":
+                rr_res = _remove_redundancy_svd(A_eq, b_eq)
+                A_eq, b_eq, status, message = rr_res
+            elif rr_method == "pivot":
+                rr_res = _remove_redundancy_pivot_dense(A_eq, b_eq)
+                A_eq, b_eq, status, message = rr_res
+            elif rr_method == "id":
+                rr_res = _remove_redundancy_id(A_eq, b_eq, rank)
+                A_eq, b_eq, status, message = rr_res
+            else:  # shouldn't get here; option validity checked above
+                pass
+        if A_eq.shape[0] < rank:
+            message = ("Due to numerical issues, redundant equality "
+                       "constraints could not be removed automatically. "
+                       "Try providing your constraint matrices as sparse "
+                       "matrices to activate sparse presolve, try turning "
+                       "off redundancy removal, or try turning off presolve "
+                       "altogether.")
+            status = 4
+        if status != 0:
+            complete = True
+    return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
+            c0, x, revstack, complete, status, message)
+
+
+def _parse_linprog(lp, options, meth):
+    """
+    Parse the provided linear programming problem
+
+    ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
+    ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
+    provided constraints (``A_ub`` and ``A_eq) and if these match the provided
+    sparsity optional values.
+
+    ``_clean inputs`` checks of the provided inputs. If no violations are
+    identified the objective vector, upper bound constraints, equality
+    constraints, and simple bounds are returned in the expected format.
+
+    Parameters
+    ----------
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : various valid formats, optional
+            The bounds of ``x``, as ``min`` and ``max`` pairs.
+            If bounds are specified for all N variables separately, valid formats are:
+            * a 2D array (2 x N or N x 2);
+            * a sequence of N sequences, each with 2 values.
+            If all variables have the same bounds, a single pair of values can
+            be specified. Valid formats are:
+            * a sequence with 2 scalar values;
+            * a sequence with a single element containing 2 scalar values.
+            If all variables have a lower bound of 0 and no upper bound, the bounds
+            parameter can be omitted (or given as None).
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    options : dict
+        A dictionary of solver options. All methods accept the following
+        generic options:
+
+            maxiter : int
+                Maximum number of iterations to perform.
+            disp : bool
+                Set to True to print convergence messages.
+
+        For method-specific options, see :func:`show_options('linprog')`.
+
+    Returns
+    -------
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : 2D array
+            The bounds of ``x``, as ``min`` and ``max`` pairs, one for each of the N
+            elements of ``x``. The N x 2 array contains lower bounds in the first
+            column and upper bounds in the 2nd. Unbounded variables have lower
+            bound -np.inf and/or upper bound np.inf.
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    options : dict, optional
+        A dictionary of solver options. All methods accept the following
+        generic options:
+
+            maxiter : int
+                Maximum number of iterations to perform.
+            disp : bool
+                Set to True to print convergence messages.
+
+        For method-specific options, see :func:`show_options('linprog')`.
+
+    """
+    if options is None:
+        options = {}
+
+    solver_options = {k: v for k, v in options.items()}
+    solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, meth,
+                                                      lp.A_ub, lp.A_eq)
+    # Convert lists to numpy arrays, etc...
+    lp = _clean_inputs(lp._replace(A_ub=A_ub, A_eq=A_eq))
+    return lp, solver_options
+
+
+def _get_Abc(lp, c0):
+    """
+    Given a linear programming problem of the form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A_ub @ x <= b_ub
+        A_eq @ x == b_eq
+         lb <= x <= ub
+
+    where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
+
+    Return the problem in standard form:
+
+    Minimize::
+
+        c @ x
+
+    Subject to::
+
+        A @ x == b
+            x >= 0
+
+    by adding slack variables and making variable substitutions as necessary.
+
+    Parameters
+    ----------
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : 2D array
+            The bounds of ``x``, lower bounds in the 1st column, upper
+            bounds in the 2nd column. The bounds are possibly tightened
+            by the presolve procedure.
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    c0 : float
+        Constant term in objective function due to fixed (and eliminated)
+        variables.
+
+    Returns
+    -------
+    A : 2-D array
+        2-D array such that ``A`` @ ``x``, gives the values of the equality
+        constraints at ``x``.
+    b : 1-D array
+        1-D array of values representing the RHS of each equality constraint
+        (row) in A (for standard form problem).
+    c : 1-D array
+        Coefficients of the linear objective function to be minimized (for
+        standard form problem).
+    c0 : float
+        Constant term in objective function due to fixed (and eliminated)
+        variables.
+    x0 : 1-D array
+        Starting values of the independent variables, which will be refined by
+        the optimization algorithm
+
+    References
+    ----------
+    .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
+           programming." Athena Scientific 1 (1997): 997.
+
+    """
+    c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = lp
+
+    if sps.issparse(A_eq):
+        sparse = True
+        A_eq = sps.csr_matrix(A_eq)
+        A_ub = sps.csr_matrix(A_ub)
+
+        def hstack(blocks):
+            return sps.hstack(blocks, format="csr")
+
+        def vstack(blocks):
+            return sps.vstack(blocks, format="csr")
+
+        zeros = sps.csr_matrix
+        eye = sps.eye
+    else:
+        sparse = False
+        hstack = np.hstack
+        vstack = np.vstack
+        zeros = np.zeros
+        eye = np.eye
+
+    # Variables lbs and ubs (see below) may be changed, which feeds back into
+    # bounds, so copy.
+    bounds = np.array(bounds, copy=True)
+
+    # modify problem such that all variables have only non-negativity bounds
+    lbs = bounds[:, 0]
+    ubs = bounds[:, 1]
+    m_ub, n_ub = A_ub.shape
+
+    lb_none = np.equal(lbs, -np.inf)
+    ub_none = np.equal(ubs, np.inf)
+    lb_some = np.logical_not(lb_none)
+    ub_some = np.logical_not(ub_none)
+
+    # unbounded below: substitute xi = -xi' (unbounded above)
+    # if -inf <= xi <= ub, then -ub <= -xi <= inf, so swap and invert bounds
+    l_nolb_someub = np.logical_and(lb_none, ub_some)
+    i_nolb = np.nonzero(l_nolb_someub)[0]
+    lbs[l_nolb_someub], ubs[l_nolb_someub] = (
+        -ubs[l_nolb_someub], -lbs[l_nolb_someub])
+    lb_none = np.equal(lbs, -np.inf)
+    ub_none = np.equal(ubs, np.inf)
+    lb_some = np.logical_not(lb_none)
+    ub_some = np.logical_not(ub_none)
+    c[i_nolb] *= -1
+    if x0 is not None:
+        x0[i_nolb] *= -1
+    if len(i_nolb) > 0:
+        if A_ub.shape[0] > 0:  # sometimes needed for sparse arrays... weird
+            A_ub[:, i_nolb] *= -1
+        if A_eq.shape[0] > 0:
+            A_eq[:, i_nolb] *= -1
+
+    # upper bound: add inequality constraint
+    i_newub, = ub_some.nonzero()
+    ub_newub = ubs[ub_some]
+    n_bounds = len(i_newub)
+    if n_bounds > 0:
+        shape = (n_bounds, A_ub.shape[1])
+        if sparse:
+            idxs = (np.arange(n_bounds), i_newub)
+            A_ub = vstack((A_ub, sps.csr_matrix((np.ones(n_bounds), idxs),
+                                                shape=shape)))
+        else:
+            A_ub = vstack((A_ub, np.zeros(shape)))
+            A_ub[np.arange(m_ub, A_ub.shape[0]), i_newub] = 1
+        b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))
+        b_ub[m_ub:] = ub_newub
+
+    A1 = vstack((A_ub, A_eq))
+    b = np.concatenate((b_ub, b_eq))
+    c = np.concatenate((c, np.zeros((A_ub.shape[0],))))
+    if x0 is not None:
+        x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))
+    # unbounded: substitute xi = xi+ + xi-
+    l_free = np.logical_and(lb_none, ub_none)
+    i_free = np.nonzero(l_free)[0]
+    n_free = len(i_free)
+    c = np.concatenate((c, np.zeros(n_free)))
+    if x0 is not None:
+        x0 = np.concatenate((x0, np.zeros(n_free)))
+    A1 = hstack((A1[:, :n_ub], -A1[:, i_free]))
+    c[n_ub:n_ub+n_free] = -c[i_free]
+    if x0 is not None:
+        i_free_neg = x0[i_free] < 0
+        x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]
+        x0[i_free[i_free_neg]] = 0
+
+    # add slack variables
+    A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])
+
+    A = hstack([A1, A2])
+
+    # lower bound: substitute xi = xi' + lb
+    # now there is a constant term in objective
+    i_shift = np.nonzero(lb_some)[0]
+    lb_shift = lbs[lb_some].astype(float)
+    c0 += np.sum(lb_shift * c[i_shift])
+    if sparse:
+        b = b.reshape(-1, 1)
+        A = A.tocsc()
+        b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)
+        b = b.ravel()
+    else:
+        b -= (A[:, i_shift] * lb_shift).sum(axis=1)
+    if x0 is not None:
+        x0[i_shift] -= lb_shift
+
+    return A, b, c, c0, x0
+
+
+def _round_to_power_of_two(x):
+    """
+    Round elements of the array to the nearest power of two.
+    """
+    return 2**np.around(np.log2(x))
+
+
+def _autoscale(A, b, c, x0):
+    """
+    Scales the problem according to equilibration from [12].
+    Also normalizes the right hand side vector by its maximum element.
+    """
+    m, n = A.shape
+
+    C = 1
+    R = 1
+
+    if A.size > 0:
+
+        R = np.max(np.abs(A), axis=1)
+        if sps.issparse(A):
+            R = R.toarray().flatten()
+        R[R == 0] = 1
+        R = 1/_round_to_power_of_two(R)
+        A = sps.diags(R)*A if sps.issparse(A) else A*R.reshape(m, 1)
+        b = b*R
+
+        C = np.max(np.abs(A), axis=0)
+        if sps.issparse(A):
+            C = C.toarray().flatten()
+        C[C == 0] = 1
+        C = 1/_round_to_power_of_two(C)
+        A = A*sps.diags(C) if sps.issparse(A) else A*C
+        c = c*C
+
+    b_scale = np.max(np.abs(b)) if b.size > 0 else 1
+    if b_scale == 0:
+        b_scale = 1.
+    b = b/b_scale
+
+    if x0 is not None:
+        x0 = x0/b_scale*(1/C)
+    return A, b, c, x0, C, b_scale
+
+
+def _unscale(x, C, b_scale):
+    """
+    Converts solution to _autoscale problem -> solution to original problem.
+    """
+
+    try:
+        n = len(C)
+        # fails if sparse or scalar; that's OK.
+        # this is only needed for original simplex (never sparse)
+    except TypeError:
+        n = len(x)
+
+    return x[:n]*b_scale*C
+
+
+def _display_summary(message, status, fun, iteration):
+    """
+    Print the termination summary of the linear program
+
+    Parameters
+    ----------
+    message : str
+            A string descriptor of the exit status of the optimization.
+    status : int
+        An integer representing the exit status of the optimization::
+
+                0 : Optimization terminated successfully
+                1 : Iteration limit reached
+                2 : Problem appears to be infeasible
+                3 : Problem appears to be unbounded
+                4 : Serious numerical difficulties encountered
+
+    fun : float
+        Value of the objective function.
+    iteration : iteration
+        The number of iterations performed.
+    """
+    print(message)
+    if status in (0, 1):
+        print("         Current function value: {0: <12.6f}".format(fun))
+    print("         Iterations: {0:d}".format(iteration))
+
+
+def _postsolve(x, postsolve_args, complete=False):
+    """
+    Given solution x to presolved, standard form linear program x, add
+    fixed variables back into the problem and undo the variable substitutions
+    to get solution to original linear program. Also, calculate the objective
+    function value, slack in original upper bound constraints, and residuals
+    in original equality constraints.
+
+    Parameters
+    ----------
+    x : 1-D array
+        Solution vector to the standard-form problem.
+    postsolve_args : tuple
+        Data needed by _postsolve to convert the solution to the standard-form
+        problem into the solution to the original problem, including:
+
+    lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
+
+        c : 1D array
+            The coefficients of the linear objective function to be minimized.
+        A_ub : 2D array, optional
+            The inequality constraint matrix. Each row of ``A_ub`` specifies the
+            coefficients of a linear inequality constraint on ``x``.
+        b_ub : 1D array, optional
+            The inequality constraint vector. Each element represents an
+            upper bound on the corresponding value of ``A_ub @ x``.
+        A_eq : 2D array, optional
+            The equality constraint matrix. Each row of ``A_eq`` specifies the
+            coefficients of a linear equality constraint on ``x``.
+        b_eq : 1D array, optional
+            The equality constraint vector. Each element of ``A_eq @ x`` must equal
+            the corresponding element of ``b_eq``.
+        bounds : 2D array
+            The bounds of ``x``, lower bounds in the 1st column, upper
+            bounds in the 2nd column. The bounds are possibly tightened
+            by the presolve procedure.
+        x0 : 1D array, optional
+            Guess values of the decision variables, which will be refined by
+            the optimization algorithm. This argument is currently used only by the
+            'revised simplex' method, and can only be used if `x0` represents a
+            basic feasible solution.
+
+    revstack: list of functions
+        the functions in the list reverse the operations of _presolve()
+        the function signature is x_org = f(x_mod), where x_mod is the result
+        of a presolve step and x_org the value at the start of the step
+    complete : bool
+        Whether the solution is was determined in presolve (``True`` if so)
+
+    Returns
+    -------
+    x : 1-D array
+        Solution vector to original linear programming problem
+    fun: float
+        optimal objective value for original problem
+    slack : 1-D array
+        The (non-negative) slack in the upper bound constraints, that is,
+        ``b_ub - A_ub @ x``
+    con : 1-D array
+        The (nominally zero) residuals of the equality constraints, that is,
+        ``b - A_eq @ x``
+    """
+    # note that all the inputs are the ORIGINAL, unmodified versions
+    # no rows, columns have been removed
+
+    c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality = postsolve_args[0]
+    revstack, C, b_scale = postsolve_args[1:]
+
+    x = _unscale(x, C, b_scale)
+
+    # Undo variable substitutions of _get_Abc()
+    # if "complete", problem was solved in presolve; don't do anything here
+    n_x = bounds.shape[0]
+    if not complete and bounds is not None:  # bounds are never none, probably
+        n_unbounded = 0
+        for i, bi in enumerate(bounds):
+            lbi = bi[0]
+            ubi = bi[1]
+            if lbi == -np.inf and ubi == np.inf:
+                n_unbounded += 1
+                x[i] = x[i] - x[n_x + n_unbounded - 1]
+            else:
+                if lbi == -np.inf:
+                    x[i] = ubi - x[i]
+                else:
+                    x[i] += lbi
+    # all the rest of the variables were artificial
+    x = x[:n_x]
+
+    # If there were variables removed from the problem, add them back into the
+    # solution vector
+    # Apply the functions in revstack (reverse direction)
+    for rev in reversed(revstack):
+        x = rev(x)
+
+    fun = x.dot(c)
+    slack = b_ub - A_ub.dot(x)  # report slack for ORIGINAL UB constraints
+    # report residuals of ORIGINAL EQ constraints
+    con = b_eq - A_eq.dot(x)
+
+    return x, fun, slack, con
+
+
+def _check_result(x, fun, status, slack, con, bounds, tol, message):
+    """
+    Check the validity of the provided solution.
+
+    A valid (optimal) solution satisfies all bounds, all slack variables are
+    negative and all equality constraint residuals are strictly non-zero.
+    Further, the lower-bounds, upper-bounds, slack and residuals contain
+    no nan values.
+
+    Parameters
+    ----------
+    x : 1-D array
+        Solution vector to original linear programming problem
+    fun: float
+        optimal objective value for original problem
+    status : int
+        An integer representing the exit status of the optimization::
+
+             0 : Optimization terminated successfully
+             1 : Iteration limit reached
+             2 : Problem appears to be infeasible
+             3 : Problem appears to be unbounded
+             4 : Serious numerical difficulties encountered
+
+    slack : 1-D array
+        The (non-negative) slack in the upper bound constraints, that is,
+        ``b_ub - A_ub @ x``
+    con : 1-D array
+        The (nominally zero) residuals of the equality constraints, that is,
+        ``b - A_eq @ x``
+    bounds : 2D array
+        The bounds on the original variables ``x``
+    message : str
+        A string descriptor of the exit status of the optimization.
+    tol : float
+        Termination tolerance; see [1]_ Section 4.5.
+
+    Returns
+    -------
+    status : int
+        An integer representing the exit status of the optimization::
+
+             0 : Optimization terminated successfully
+             1 : Iteration limit reached
+             2 : Problem appears to be infeasible
+             3 : Problem appears to be unbounded
+             4 : Serious numerical difficulties encountered
+
+    message : str
+        A string descriptor of the exit status of the optimization.
+    """
+    # Somewhat arbitrary
+    tol = np.sqrt(tol) * 10
+
+    if x is None:
+        # HiGHS does not provide x if infeasible/unbounded
+        if status == 0:  # Observed with HiGHS Simplex Primal
+            status = 4
+            message = ("The solver did not provide a solution nor did it "
+                       "report a failure. Please submit a bug report.")
+        return status, message
+
+    contains_nans = (
+        np.isnan(x).any()
+        or np.isnan(fun)
+        or np.isnan(slack).any()
+        or np.isnan(con).any()
+    )
+
+    if contains_nans:
+        is_feasible = False
+    else:
+        invalid_bounds = (x < bounds[:, 0] - tol).any() or (x > bounds[:, 1] + tol).any()
+        invalid_slack = status != 3 and (slack < -tol).any()
+        invalid_con = status != 3 and (np.abs(con) > tol).any()
+        is_feasible = not (invalid_bounds or invalid_slack or invalid_con)
+
+    if status == 0 and not is_feasible:
+        status = 4
+        message = ("The solution does not satisfy the constraints within the "
+                   "required tolerance of " + "{:.2E}".format(tol) + ", yet "
+                   "no errors were raised and there is no certificate of "
+                   "infeasibility or unboundedness. Check whether "
+                   "the slack and constraint residuals are acceptable; "
+                   "if not, consider enabling presolve, adjusting the "
+                   "tolerance option(s), and/or using a different method. "
+                   "Please consider submitting a bug report.")
+    elif status == 2 and is_feasible:
+        # Occurs if the simplex method exits after phase one with a very
+        # nearly basic feasible solution. Postsolving can make the solution
+        # basic, however, this solution is NOT optimal
+        status = 4
+        message = ("The solution is feasible, but the solver did not report "
+                   "that the solution was optimal. Please try a different "
+                   "method.")
+
+    return status, message
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/__init__.py
new file mode 100644
index 00000000..f60adcc8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/__init__.py
@@ -0,0 +1,5 @@
+"""This module contains least-squares algorithms."""
+from .least_squares import least_squares
+from .lsq_linear import lsq_linear
+
+__all__ = ['least_squares', 'lsq_linear']
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/bvls.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/bvls.py
new file mode 100644
index 00000000..8f34ead4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/bvls.py
@@ -0,0 +1,183 @@
+"""Bounded-variable least-squares algorithm."""
+import numpy as np
+from numpy.linalg import norm, lstsq
+from scipy.optimize import OptimizeResult
+
+from .common import print_header_linear, print_iteration_linear
+
+
+def compute_kkt_optimality(g, on_bound):
+    """Compute the maximum violation of KKT conditions."""
+    g_kkt = g * on_bound
+    free_set = on_bound == 0
+    g_kkt[free_set] = np.abs(g[free_set])
+    return np.max(g_kkt)
+
+
+def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None):
+    m, n = A.shape
+
+    x = x_lsq.copy()
+    on_bound = np.zeros(n)
+
+    mask = x <= lb
+    x[mask] = lb[mask]
+    on_bound[mask] = -1
+
+    mask = x >= ub
+    x[mask] = ub[mask]
+    on_bound[mask] = 1
+
+    free_set = on_bound == 0
+    active_set = ~free_set
+    free_set, = np.nonzero(free_set)
+
+    r = A.dot(x) - b
+    cost = 0.5 * np.dot(r, r)
+    initial_cost = cost
+    g = A.T.dot(r)
+
+    cost_change = None
+    step_norm = None
+    iteration = 0
+
+    if verbose == 2:
+        print_header_linear()
+
+    # This is the initialization loop. The requirement is that the
+    # least-squares solution on free variables is feasible before BVLS starts.
+    # One possible initialization is to set all variables to lower or upper
+    # bounds, but many iterations may be required from this state later on.
+    # The implemented ad-hoc procedure which intuitively should give a better
+    # initial state: find the least-squares solution on current free variables,
+    # if its feasible then stop, otherwise, set violating variables to
+    # corresponding bounds and continue on the reduced set of free variables.
+
+    while free_set.size > 0:
+        if verbose == 2:
+            optimality = compute_kkt_optimality(g, on_bound)
+            print_iteration_linear(iteration, cost, cost_change, step_norm,
+                                   optimality)
+
+        iteration += 1
+        x_free_old = x[free_set].copy()
+
+        A_free = A[:, free_set]
+        b_free = b - A.dot(x * active_set)
+        z = lstsq(A_free, b_free, rcond=rcond)[0]
+
+        lbv = z < lb[free_set]
+        ubv = z > ub[free_set]
+        v = lbv | ubv
+
+        if np.any(lbv):
+            ind = free_set[lbv]
+            x[ind] = lb[ind]
+            active_set[ind] = True
+            on_bound[ind] = -1
+
+        if np.any(ubv):
+            ind = free_set[ubv]
+            x[ind] = ub[ind]
+            active_set[ind] = True
+            on_bound[ind] = 1
+
+        ind = free_set[~v]
+        x[ind] = z[~v]
+
+        r = A.dot(x) - b
+        cost_new = 0.5 * np.dot(r, r)
+        cost_change = cost - cost_new
+        cost = cost_new
+        g = A.T.dot(r)
+        step_norm = norm(x[free_set] - x_free_old)
+
+        if np.any(v):
+            free_set = free_set[~v]
+        else:
+            break
+
+    if max_iter is None:
+        max_iter = n
+    max_iter += iteration
+
+    termination_status = None
+
+    # Main BVLS loop.
+
+    optimality = compute_kkt_optimality(g, on_bound)
+    for iteration in range(iteration, max_iter):  # BVLS Loop A
+        if verbose == 2:
+            print_iteration_linear(iteration, cost, cost_change,
+                                   step_norm, optimality)
+
+        if optimality < tol:
+            termination_status = 1
+
+        if termination_status is not None:
+            break
+
+        move_to_free = np.argmax(g * on_bound)
+        on_bound[move_to_free] = 0
+        
+        while True:   # BVLS Loop B
+
+            free_set = on_bound == 0
+            active_set = ~free_set
+            free_set, = np.nonzero(free_set)
+    
+            x_free = x[free_set]
+            x_free_old = x_free.copy()
+            lb_free = lb[free_set]
+            ub_free = ub[free_set]
+
+            A_free = A[:, free_set]
+            b_free = b - A.dot(x * active_set)
+            z = lstsq(A_free, b_free, rcond=rcond)[0]
+
+            lbv, = np.nonzero(z < lb_free)
+            ubv, = np.nonzero(z > ub_free)
+            v = np.hstack((lbv, ubv))
+
+            if v.size > 0:
+                alphas = np.hstack((
+                    lb_free[lbv] - x_free[lbv],
+                    ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v])
+
+                i = np.argmin(alphas)
+                i_free = v[i]
+                alpha = alphas[i]
+
+                x_free *= 1 - alpha
+                x_free += alpha * z
+                x[free_set] = x_free
+
+                if i < lbv.size:
+                    on_bound[free_set[i_free]] = -1
+                else:
+                    on_bound[free_set[i_free]] = 1
+            else:
+                x_free = z
+                x[free_set] = x_free
+                break
+
+        step_norm = norm(x_free - x_free_old)
+
+        r = A.dot(x) - b
+        cost_new = 0.5 * np.dot(r, r)
+        cost_change = cost - cost_new
+
+        if cost_change < tol * cost:
+            termination_status = 2
+        cost = cost_new
+
+        g = A.T.dot(r)
+        optimality = compute_kkt_optimality(g, on_bound)
+
+    if termination_status is None:
+        termination_status = 0
+
+    return OptimizeResult(
+        x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound,
+        nit=iteration + 1, status=termination_status,
+        initial_cost=initial_cost)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/common.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/common.py
new file mode 100644
index 00000000..414bc754
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/common.py
@@ -0,0 +1,734 @@
+"""Functions used by least-squares algorithms."""
+from math import copysign
+
+import numpy as np
+from numpy.linalg import norm
+
+from scipy.linalg import cho_factor, cho_solve, LinAlgError
+from scipy.sparse import issparse
+from scipy.sparse.linalg import LinearOperator, aslinearoperator
+
+
+EPS = np.finfo(float).eps
+
+
+# Functions related to a trust-region problem.
+
+
+def intersect_trust_region(x, s, Delta):
+    """Find the intersection of a line with the boundary of a trust region.
+
+    This function solves the quadratic equation with respect to t
+    ||(x + s*t)||**2 = Delta**2.
+
+    Returns
+    -------
+    t_neg, t_pos : tuple of float
+        Negative and positive roots.
+
+    Raises
+    ------
+    ValueError
+        If `s` is zero or `x` is not within the trust region.
+    """
+    a = np.dot(s, s)
+    if a == 0:
+        raise ValueError("`s` is zero.")
+
+    b = np.dot(x, s)
+
+    c = np.dot(x, x) - Delta**2
+    if c > 0:
+        raise ValueError("`x` is not within the trust region.")
+
+    d = np.sqrt(b*b - a*c)  # Root from one fourth of the discriminant.
+
+    # Computations below avoid loss of significance, see "Numerical Recipes".
+    q = -(b + copysign(d, b))
+    t1 = q / a
+    t2 = c / q
+
+    if t1 < t2:
+        return t1, t2
+    else:
+        return t2, t1
+
+
+def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
+                           rtol=0.01, max_iter=10):
+    """Solve a trust-region problem arising in least-squares minimization.
+
+    This function implements a method described by J. J. More [1]_ and used
+    in MINPACK, but it relies on a single SVD of Jacobian instead of series
+    of Cholesky decompositions. Before running this function, compute:
+    ``U, s, VT = svd(J, full_matrices=False)``.
+
+    Parameters
+    ----------
+    n : int
+        Number of variables.
+    m : int
+        Number of residuals.
+    uf : ndarray
+        Computed as U.T.dot(f).
+    s : ndarray
+        Singular values of J.
+    V : ndarray
+        Transpose of VT.
+    Delta : float
+        Radius of a trust region.
+    initial_alpha : float, optional
+        Initial guess for alpha, which might be available from a previous
+        iteration. If None, determined automatically.
+    rtol : float, optional
+        Stopping tolerance for the root-finding procedure. Namely, the
+        solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
+    max_iter : int, optional
+        Maximum allowed number of iterations for the root-finding procedure.
+
+    Returns
+    -------
+    p : ndarray, shape (n,)
+        Found solution of a trust-region problem.
+    alpha : float
+        Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
+        Sometimes called Levenberg-Marquardt parameter.
+    n_iter : int
+        Number of iterations made by root-finding procedure. Zero means
+        that Gauss-Newton step was selected as the solution.
+
+    References
+    ----------
+    .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
+           and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
+           in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
+    """
+    def phi_and_derivative(alpha, suf, s, Delta):
+        """Function of which to find zero.
+
+        It is defined as "norm of regularized (by alpha) least-squares
+        solution minus `Delta`". Refer to [1]_.
+        """
+        denom = s**2 + alpha
+        p_norm = norm(suf / denom)
+        phi = p_norm - Delta
+        phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
+        return phi, phi_prime
+
+    suf = s * uf
+
+    # Check if J has full rank and try Gauss-Newton step.
+    if m >= n:
+        threshold = EPS * m * s[0]
+        full_rank = s[-1] > threshold
+    else:
+        full_rank = False
+
+    if full_rank:
+        p = -V.dot(uf / s)
+        if norm(p) <= Delta:
+            return p, 0.0, 0
+
+    alpha_upper = norm(suf) / Delta
+
+    if full_rank:
+        phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
+        alpha_lower = -phi / phi_prime
+    else:
+        alpha_lower = 0.0
+
+    if initial_alpha is None or not full_rank and initial_alpha == 0:
+        alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
+    else:
+        alpha = initial_alpha
+
+    for it in range(max_iter):
+        if alpha < alpha_lower or alpha > alpha_upper:
+            alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
+
+        phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
+
+        if phi < 0:
+            alpha_upper = alpha
+
+        ratio = phi / phi_prime
+        alpha_lower = max(alpha_lower, alpha - ratio)
+        alpha -= (phi + Delta) * ratio / Delta
+
+        if np.abs(phi) < rtol * Delta:
+            break
+
+    p = -V.dot(suf / (s**2 + alpha))
+
+    # Make the norm of p equal to Delta, p is changed only slightly during
+    # this. It is done to prevent p lie outside the trust region (which can
+    # cause problems later).
+    p *= Delta / norm(p)
+
+    return p, alpha, it + 1
+
+
+def solve_trust_region_2d(B, g, Delta):
+    """Solve a general trust-region problem in 2 dimensions.
+
+    The problem is reformulated as a 4th order algebraic equation,
+    the solution of which is found by numpy.roots.
+
+    Parameters
+    ----------
+    B : ndarray, shape (2, 2)
+        Symmetric matrix, defines a quadratic term of the function.
+    g : ndarray, shape (2,)
+        Defines a linear term of the function.
+    Delta : float
+        Radius of a trust region.
+
+    Returns
+    -------
+    p : ndarray, shape (2,)
+        Found solution.
+    newton_step : bool
+        Whether the returned solution is the Newton step which lies within
+        the trust region.
+    """
+    try:
+        R, lower = cho_factor(B)
+        p = -cho_solve((R, lower), g)
+        if np.dot(p, p) <= Delta**2:
+            return p, True
+    except LinAlgError:
+        pass
+
+    a = B[0, 0] * Delta**2
+    b = B[0, 1] * Delta**2
+    c = B[1, 1] * Delta**2
+
+    d = g[0] * Delta
+    f = g[1] * Delta
+
+    coeffs = np.array(
+        [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
+    t = np.roots(coeffs)  # Can handle leading zeros.
+    t = np.real(t[np.isreal(t)])
+
+    p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
+    value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
+    i = np.argmin(value)
+    p = p[:, i]
+
+    return p, False
+
+
+def update_tr_radius(Delta, actual_reduction, predicted_reduction,
+                     step_norm, bound_hit):
+    """Update the radius of a trust region based on the cost reduction.
+
+    Returns
+    -------
+    Delta : float
+        New radius.
+    ratio : float
+        Ratio between actual and predicted reductions.
+    """
+    if predicted_reduction > 0:
+        ratio = actual_reduction / predicted_reduction
+    elif predicted_reduction == actual_reduction == 0:
+        ratio = 1
+    else:
+        ratio = 0
+
+    if ratio < 0.25:
+        Delta = 0.25 * step_norm
+    elif ratio > 0.75 and bound_hit:
+        Delta *= 2.0
+
+    return Delta, ratio
+
+
+# Construction and minimization of quadratic functions.
+
+
+def build_quadratic_1d(J, g, s, diag=None, s0=None):
+    """Parameterize a multivariate quadratic function along a line.
+
+    The resulting univariate quadratic function is given as follows::
+
+        f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
+               g.T * (s0 + s*t)
+
+    Parameters
+    ----------
+    J : ndarray, sparse matrix or LinearOperator shape (m, n)
+        Jacobian matrix, affects the quadratic term.
+    g : ndarray, shape (n,)
+        Gradient, defines the linear term.
+    s : ndarray, shape (n,)
+        Direction vector of a line.
+    diag : None or ndarray with shape (n,), optional
+        Addition diagonal part, affects the quadratic term.
+        If None, assumed to be 0.
+    s0 : None or ndarray with shape (n,), optional
+        Initial point. If None, assumed to be 0.
+
+    Returns
+    -------
+    a : float
+        Coefficient for t**2.
+    b : float
+        Coefficient for t.
+    c : float
+        Free term. Returned only if `s0` is provided.
+    """
+    v = J.dot(s)
+    a = np.dot(v, v)
+    if diag is not None:
+        a += np.dot(s * diag, s)
+    a *= 0.5
+
+    b = np.dot(g, s)
+
+    if s0 is not None:
+        u = J.dot(s0)
+        b += np.dot(u, v)
+        c = 0.5 * np.dot(u, u) + np.dot(g, s0)
+        if diag is not None:
+            b += np.dot(s0 * diag, s)
+            c += 0.5 * np.dot(s0 * diag, s0)
+        return a, b, c
+    else:
+        return a, b
+
+
+def minimize_quadratic_1d(a, b, lb, ub, c=0):
+    """Minimize a 1-D quadratic function subject to bounds.
+
+    The free term `c` is 0 by default. Bounds must be finite.
+
+    Returns
+    -------
+    t : float
+        Minimum point.
+    y : float
+        Minimum value.
+    """
+    t = [lb, ub]
+    if a != 0:
+        extremum = -0.5 * b / a
+        if lb < extremum < ub:
+            t.append(extremum)
+    t = np.asarray(t)
+    y = t * (a * t + b) + c
+    min_index = np.argmin(y)
+    return t[min_index], y[min_index]
+
+
+def evaluate_quadratic(J, g, s, diag=None):
+    """Compute values of a quadratic function arising in least squares.
+
+    The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
+
+    Parameters
+    ----------
+    J : ndarray, sparse matrix or LinearOperator, shape (m, n)
+        Jacobian matrix, affects the quadratic term.
+    g : ndarray, shape (n,)
+        Gradient, defines the linear term.
+    s : ndarray, shape (k, n) or (n,)
+        Array containing steps as rows.
+    diag : ndarray, shape (n,), optional
+        Addition diagonal part, affects the quadratic term.
+        If None, assumed to be 0.
+
+    Returns
+    -------
+    values : ndarray with shape (k,) or float
+        Values of the function. If `s` was 2-D, then ndarray is
+        returned, otherwise, float is returned.
+    """
+    if s.ndim == 1:
+        Js = J.dot(s)
+        q = np.dot(Js, Js)
+        if diag is not None:
+            q += np.dot(s * diag, s)
+    else:
+        Js = J.dot(s.T)
+        q = np.sum(Js**2, axis=0)
+        if diag is not None:
+            q += np.sum(diag * s**2, axis=1)
+
+    l = np.dot(s, g)
+
+    return 0.5 * q + l
+
+
+# Utility functions to work with bound constraints.
+
+
+def in_bounds(x, lb, ub):
+    """Check if a point lies within bounds."""
+    return np.all((x >= lb) & (x <= ub))
+
+
+def step_size_to_bound(x, s, lb, ub):
+    """Compute a min_step size required to reach a bound.
+
+    The function computes a positive scalar t, such that x + s * t is on
+    the bound.
+
+    Returns
+    -------
+    step : float
+        Computed step. Non-negative value.
+    hits : ndarray of int with shape of x
+        Each element indicates whether a corresponding variable reaches the
+        bound:
+
+             *  0 - the bound was not hit.
+             * -1 - the lower bound was hit.
+             *  1 - the upper bound was hit.
+    """
+    non_zero = np.nonzero(s)
+    s_non_zero = s[non_zero]
+    steps = np.empty_like(x)
+    steps.fill(np.inf)
+    with np.errstate(over='ignore'):
+        steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,
+                                     (ub - x)[non_zero] / s_non_zero)
+    min_step = np.min(steps)
+    return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)
+
+
+def find_active_constraints(x, lb, ub, rtol=1e-10):
+    """Determine which constraints are active in a given point.
+
+    The threshold is computed using `rtol` and the absolute value of the
+    closest bound.
+
+    Returns
+    -------
+    active : ndarray of int with shape of x
+        Each component shows whether the corresponding constraint is active:
+
+             *  0 - a constraint is not active.
+             * -1 - a lower bound is active.
+             *  1 - a upper bound is active.
+    """
+    active = np.zeros_like(x, dtype=int)
+
+    if rtol == 0:
+        active[x <= lb] = -1
+        active[x >= ub] = 1
+        return active
+
+    lower_dist = x - lb
+    upper_dist = ub - x
+
+    lower_threshold = rtol * np.maximum(1, np.abs(lb))
+    upper_threshold = rtol * np.maximum(1, np.abs(ub))
+
+    lower_active = (np.isfinite(lb) &
+                    (lower_dist <= np.minimum(upper_dist, lower_threshold)))
+    active[lower_active] = -1
+
+    upper_active = (np.isfinite(ub) &
+                    (upper_dist <= np.minimum(lower_dist, upper_threshold)))
+    active[upper_active] = 1
+
+    return active
+
+
+def make_strictly_feasible(x, lb, ub, rstep=1e-10):
+    """Shift a point to the interior of a feasible region.
+
+    Each element of the returned vector is at least at a relative distance
+    `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
+    """
+    x_new = x.copy()
+
+    active = find_active_constraints(x, lb, ub, rstep)
+    lower_mask = np.equal(active, -1)
+    upper_mask = np.equal(active, 1)
+
+    if rstep == 0:
+        x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
+        x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
+    else:
+        x_new[lower_mask] = (lb[lower_mask] +
+                             rstep * np.maximum(1, np.abs(lb[lower_mask])))
+        x_new[upper_mask] = (ub[upper_mask] -
+                             rstep * np.maximum(1, np.abs(ub[upper_mask])))
+
+    tight_bounds = (x_new < lb) | (x_new > ub)
+    x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
+
+    return x_new
+
+
+def CL_scaling_vector(x, g, lb, ub):
+    """Compute Coleman-Li scaling vector and its derivatives.
+
+    Components of a vector v are defined as follows::
+
+               | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
+        v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
+               | 1,           otherwise
+
+    According to this definition v[i] >= 0 for all i. It differs from the
+    definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
+    used. Both definitions are equivalent down the line.
+    Derivatives of v with respect to x take value 1, -1 or 0 depending on a
+    case.
+
+    Returns
+    -------
+    v : ndarray with shape of x
+        Scaling vector.
+    dv : ndarray with shape of x
+        Derivatives of v[i] with respect to x[i], diagonal elements of v's
+        Jacobian.
+
+    References
+    ----------
+    .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
+           and Conjugate Gradient Method for Large-Scale Bound-Constrained
+           Minimization Problems," SIAM Journal on Scientific Computing,
+           Vol. 21, Number 1, pp 1-23, 1999.
+    """
+    v = np.ones_like(x)
+    dv = np.zeros_like(x)
+
+    mask = (g < 0) & np.isfinite(ub)
+    v[mask] = ub[mask] - x[mask]
+    dv[mask] = -1
+
+    mask = (g > 0) & np.isfinite(lb)
+    v[mask] = x[mask] - lb[mask]
+    dv[mask] = 1
+
+    return v, dv
+
+
+def reflective_transformation(y, lb, ub):
+    """Compute reflective transformation and its gradient."""
+    if in_bounds(y, lb, ub):
+        return y, np.ones_like(y)
+
+    lb_finite = np.isfinite(lb)
+    ub_finite = np.isfinite(ub)
+
+    x = y.copy()
+    g_negative = np.zeros_like(y, dtype=bool)
+
+    mask = lb_finite & ~ub_finite
+    x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
+    g_negative[mask] = y[mask] < lb[mask]
+
+    mask = ~lb_finite & ub_finite
+    x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
+    g_negative[mask] = y[mask] > ub[mask]
+
+    mask = lb_finite & ub_finite
+    d = ub - lb
+    t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
+    x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
+    g_negative[mask] = t > d[mask]
+
+    g = np.ones_like(y)
+    g[g_negative] = -1
+
+    return x, g
+
+
+# Functions to display algorithm's progress.
+
+
+def print_header_nonlinear():
+    print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}"
+          .format("Iteration", "Total nfev", "Cost", "Cost reduction",
+                  "Step norm", "Optimality"))
+
+
+def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,
+                              step_norm, optimality):
+    if cost_reduction is None:
+        cost_reduction = " " * 15
+    else:
+        cost_reduction = "{0:^15.2e}".format(cost_reduction)
+
+    if step_norm is None:
+        step_norm = " " * 15
+    else:
+        step_norm = "{0:^15.2e}".format(step_norm)
+
+    print("{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}"
+          .format(iteration, nfev, cost, cost_reduction,
+                  step_norm, optimality))
+
+
+def print_header_linear():
+    print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}"
+          .format("Iteration", "Cost", "Cost reduction", "Step norm",
+                  "Optimality"))
+
+
+def print_iteration_linear(iteration, cost, cost_reduction, step_norm,
+                           optimality):
+    if cost_reduction is None:
+        cost_reduction = " " * 15
+    else:
+        cost_reduction = "{0:^15.2e}".format(cost_reduction)
+
+    if step_norm is None:
+        step_norm = " " * 15
+    else:
+        step_norm = "{0:^15.2e}".format(step_norm)
+
+    print("{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}".format(
+        iteration, cost, cost_reduction, step_norm, optimality))
+
+
+# Simple helper functions.
+
+
+def compute_grad(J, f):
+    """Compute gradient of the least-squares cost function."""
+    if isinstance(J, LinearOperator):
+        return J.rmatvec(f)
+    else:
+        return J.T.dot(f)
+
+
+def compute_jac_scale(J, scale_inv_old=None):
+    """Compute variables scale based on the Jacobian matrix."""
+    if issparse(J):
+        scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5
+    else:
+        scale_inv = np.sum(J**2, axis=0)**0.5
+
+    if scale_inv_old is None:
+        scale_inv[scale_inv == 0] = 1
+    else:
+        scale_inv = np.maximum(scale_inv, scale_inv_old)
+
+    return 1 / scale_inv, scale_inv
+
+
+def left_multiplied_operator(J, d):
+    """Return diag(d) J as LinearOperator."""
+    J = aslinearoperator(J)
+
+    def matvec(x):
+        return d * J.matvec(x)
+
+    def matmat(X):
+        return d[:, np.newaxis] * J.matmat(X)
+
+    def rmatvec(x):
+        return J.rmatvec(x.ravel() * d)
+
+    return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
+                          rmatvec=rmatvec)
+
+
+def right_multiplied_operator(J, d):
+    """Return J diag(d) as LinearOperator."""
+    J = aslinearoperator(J)
+
+    def matvec(x):
+        return J.matvec(np.ravel(x) * d)
+
+    def matmat(X):
+        return J.matmat(X * d[:, np.newaxis])
+
+    def rmatvec(x):
+        return d * J.rmatvec(x)
+
+    return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
+                          rmatvec=rmatvec)
+
+
+def regularized_lsq_operator(J, diag):
+    """Return a matrix arising in regularized least squares as LinearOperator.
+
+    The matrix is
+        [ J ]
+        [ D ]
+    where D is diagonal matrix with elements from `diag`.
+    """
+    J = aslinearoperator(J)
+    m, n = J.shape
+
+    def matvec(x):
+        return np.hstack((J.matvec(x), diag * x))
+
+    def rmatvec(x):
+        x1 = x[:m]
+        x2 = x[m:]
+        return J.rmatvec(x1) + diag * x2
+
+    return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
+
+
+def right_multiply(J, d, copy=True):
+    """Compute J diag(d).
+
+    If `copy` is False, `J` is modified in place (unless being LinearOperator).
+    """
+    if copy and not isinstance(J, LinearOperator):
+        J = J.copy()
+
+    if issparse(J):
+        J.data *= d.take(J.indices, mode='clip')  # scikit-learn recipe.
+    elif isinstance(J, LinearOperator):
+        J = right_multiplied_operator(J, d)
+    else:
+        J *= d
+
+    return J
+
+
+def left_multiply(J, d, copy=True):
+    """Compute diag(d) J.
+
+    If `copy` is False, `J` is modified in place (unless being LinearOperator).
+    """
+    if copy and not isinstance(J, LinearOperator):
+        J = J.copy()
+
+    if issparse(J):
+        J.data *= np.repeat(d, np.diff(J.indptr))  # scikit-learn recipe.
+    elif isinstance(J, LinearOperator):
+        J = left_multiplied_operator(J, d)
+    else:
+        J *= d[:, np.newaxis]
+
+    return J
+
+
+def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):
+    """Check termination condition for nonlinear least squares."""
+    ftol_satisfied = dF < ftol * F and ratio > 0.25
+    xtol_satisfied = dx_norm < xtol * (xtol + x_norm)
+
+    if ftol_satisfied and xtol_satisfied:
+        return 4
+    elif ftol_satisfied:
+        return 2
+    elif xtol_satisfied:
+        return 3
+    else:
+        return None
+
+
+def scale_for_robust_loss_function(J, f, rho):
+    """Scale Jacobian and residuals for a robust loss function.
+
+    Arrays are modified in place.
+    """
+    J_scale = rho[1] + 2 * rho[2] * f**2
+    J_scale[J_scale < EPS] = EPS
+    J_scale **= 0.5
+
+    f *= rho[1] / J_scale
+
+    return left_multiply(J, J_scale, copy=False), f
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/dogbox.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/dogbox.py
new file mode 100644
index 00000000..6bb5abbe
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/dogbox.py
@@ -0,0 +1,331 @@
+"""
+Dogleg algorithm with rectangular trust regions for least-squares minimization.
+
+The description of the algorithm can be found in [Voglis]_. The algorithm does
+trust-region iterations, but the shape of trust regions is rectangular as
+opposed to conventional elliptical. The intersection of a trust region and
+an initial feasible region is again some rectangle. Thus, on each iteration a
+bound-constrained quadratic optimization problem is solved.
+
+A quadratic problem is solved by well-known dogleg approach, where the
+function is minimized along piecewise-linear "dogleg" path [NumOpt]_,
+Chapter 4. If Jacobian is not rank-deficient then the function is decreasing
+along this path, and optimization amounts to simply following along this
+path as long as a point stays within the bounds. A constrained Cauchy step
+(along the anti-gradient) is considered for safety in rank deficient cases,
+in this situations the convergence might be slow.
+
+If during iterations some variable hit the initial bound and the component
+of anti-gradient points outside the feasible region, then a next dogleg step
+won't make any progress. At this state such variables satisfy first-order
+optimality conditions and they are excluded before computing a next dogleg
+step.
+
+Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense
+Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for
+dense and sparse matrices, or Jacobian being LinearOperator). The second
+option allows to solve very large problems (up to couple of millions of
+residuals on a regular PC), provided the Jacobian matrix is sufficiently
+sparse. But note that dogbox is not very good for solving problems with
+large number of constraints, because of variables exclusion-inclusion on each
+iteration (a required number of function evaluations might be high or accuracy
+of a solution will be poor), thus its large-scale usage is probably limited
+to unconstrained problems.
+
+References
+----------
+.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg
+            Approach for Unconstrained and Bound Constrained Nonlinear
+            Optimization", WSEAS International Conference on Applied
+            Mathematics, Corfu, Greece, 2004.
+.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition".
+"""
+import numpy as np
+from numpy.linalg import lstsq, norm
+
+from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr
+from scipy.optimize import OptimizeResult
+
+from .common import (
+    step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,
+    build_quadratic_1d, minimize_quadratic_1d, compute_grad,
+    compute_jac_scale, check_termination, scale_for_robust_loss_function,
+    print_header_nonlinear, print_iteration_nonlinear)
+
+
+def lsmr_operator(Jop, d, active_set):
+    """Compute LinearOperator to use in LSMR by dogbox algorithm.
+
+    `active_set` mask is used to excluded active variables from computations
+    of matrix-vector products.
+    """
+    m, n = Jop.shape
+
+    def matvec(x):
+        x_free = x.ravel().copy()
+        x_free[active_set] = 0
+        return Jop.matvec(x * d)
+
+    def rmatvec(x):
+        r = d * Jop.rmatvec(x)
+        r[active_set] = 0
+        return r
+
+    return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)
+
+
+def find_intersection(x, tr_bounds, lb, ub):
+    """Find intersection of trust-region bounds and initial bounds.
+
+    Returns
+    -------
+    lb_total, ub_total : ndarray with shape of x
+        Lower and upper bounds of the intersection region.
+    orig_l, orig_u : ndarray of bool with shape of x
+        True means that an original bound is taken as a corresponding bound
+        in the intersection region.
+    tr_l, tr_u : ndarray of bool with shape of x
+        True means that a trust-region bound is taken as a corresponding bound
+        in the intersection region.
+    """
+    lb_centered = lb - x
+    ub_centered = ub - x
+
+    lb_total = np.maximum(lb_centered, -tr_bounds)
+    ub_total = np.minimum(ub_centered, tr_bounds)
+
+    orig_l = np.equal(lb_total, lb_centered)
+    orig_u = np.equal(ub_total, ub_centered)
+
+    tr_l = np.equal(lb_total, -tr_bounds)
+    tr_u = np.equal(ub_total, tr_bounds)
+
+    return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
+
+
+def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):
+    """Find dogleg step in a rectangular region.
+
+    Returns
+    -------
+    step : ndarray, shape (n,)
+        Computed dogleg step.
+    bound_hits : ndarray of int, shape (n,)
+        Each component shows whether a corresponding variable hits the
+        initial bound after the step is taken:
+            *  0 - a variable doesn't hit the bound.
+            * -1 - lower bound is hit.
+            *  1 - upper bound is hit.
+    tr_hit : bool
+        Whether the step hit the boundary of the trust-region.
+    """
+    lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(
+        x, tr_bounds, lb, ub
+    )
+    bound_hits = np.zeros_like(x, dtype=int)
+
+    if in_bounds(newton_step, lb_total, ub_total):
+        return newton_step, bound_hits, False
+
+    to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)
+
+    # The classical dogleg algorithm would check if Cauchy step fits into
+    # the bounds, and just return it constrained version if not. But in a
+    # rectangular trust region it makes sense to try to improve constrained
+    # Cauchy step too. Thus, we don't distinguish these two cases.
+
+    cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g
+
+    step_diff = newton_step - cauchy_step
+    step_size, hits = step_size_to_bound(cauchy_step, step_diff,
+                                         lb_total, ub_total)
+    bound_hits[(hits < 0) & orig_l] = -1
+    bound_hits[(hits > 0) & orig_u] = 1
+    tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)
+
+    return cauchy_step + step_size * step_diff, bound_hits, tr_hit
+
+
+def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
+           loss_function, tr_solver, tr_options, verbose):
+    f = f0
+    f_true = f.copy()
+    nfev = 1
+
+    J = J0
+    njev = 1
+
+    if loss_function is not None:
+        rho = loss_function(f)
+        cost = 0.5 * np.sum(rho[0])
+        J, f = scale_for_robust_loss_function(J, f, rho)
+    else:
+        cost = 0.5 * np.dot(f, f)
+
+    g = compute_grad(J, f)
+
+    jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+    if jac_scale:
+        scale, scale_inv = compute_jac_scale(J)
+    else:
+        scale, scale_inv = x_scale, 1 / x_scale
+
+    Delta = norm(x0 * scale_inv, ord=np.inf)
+    if Delta == 0:
+        Delta = 1.0
+
+    on_bound = np.zeros_like(x0, dtype=int)
+    on_bound[np.equal(x0, lb)] = -1
+    on_bound[np.equal(x0, ub)] = 1
+
+    x = x0
+    step = np.empty_like(x0)
+
+    if max_nfev is None:
+        max_nfev = x0.size * 100
+
+    termination_status = None
+    iteration = 0
+    step_norm = None
+    actual_reduction = None
+
+    if verbose == 2:
+        print_header_nonlinear()
+
+    while True:
+        active_set = on_bound * g < 0
+        free_set = ~active_set
+
+        g_free = g[free_set]
+        g_full = g.copy()
+        g[active_set] = 0
+
+        g_norm = norm(g, ord=np.inf)
+        if g_norm < gtol:
+            termination_status = 1
+
+        if verbose == 2:
+            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
+                                      step_norm, g_norm)
+
+        if termination_status is not None or nfev == max_nfev:
+            break
+
+        x_free = x[free_set]
+        lb_free = lb[free_set]
+        ub_free = ub[free_set]
+        scale_free = scale[free_set]
+
+        # Compute (Gauss-)Newton and build quadratic model for Cauchy step.
+        if tr_solver == 'exact':
+            J_free = J[:, free_set]
+            newton_step = lstsq(J_free, -f, rcond=-1)[0]
+
+            # Coefficients for the quadratic model along the anti-gradient.
+            a, b = build_quadratic_1d(J_free, g_free, -g_free)
+        elif tr_solver == 'lsmr':
+            Jop = aslinearoperator(J)
+
+            # We compute lsmr step in scaled variables and then
+            # transform back to normal variables, if lsmr would give exact lsq
+            # solution, this would be equivalent to not doing any
+            # transformations, but from experience it's better this way.
+
+            # We pass active_set to make computations as if we selected
+            # the free subset of J columns, but without actually doing any
+            # slicing, which is expensive for sparse matrices and impossible
+            # for LinearOperator.
+
+            lsmr_op = lsmr_operator(Jop, scale, active_set)
+            newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
+            newton_step *= scale_free
+
+            # Components of g for active variables were zeroed, so this call
+            # is correct and equivalent to using J_free and g_free.
+            a, b = build_quadratic_1d(Jop, g, -g)
+
+        actual_reduction = -1.0
+        while actual_reduction <= 0 and nfev < max_nfev:
+            tr_bounds = Delta * scale_free
+
+            step_free, on_bound_free, tr_hit = dogleg_step(
+                x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)
+
+            step.fill(0.0)
+            step[free_set] = step_free
+
+            if tr_solver == 'exact':
+                predicted_reduction = -evaluate_quadratic(J_free, g_free,
+                                                          step_free)
+            elif tr_solver == 'lsmr':
+                predicted_reduction = -evaluate_quadratic(Jop, g, step)
+
+            # gh11403 ensure that solution is fully within bounds.
+            x_new = np.clip(x + step, lb, ub)
+
+            f_new = fun(x_new)
+            nfev += 1
+
+            step_h_norm = norm(step * scale_inv, ord=np.inf)
+
+            if not np.all(np.isfinite(f_new)):
+                Delta = 0.25 * step_h_norm
+                continue
+
+            # Usual trust-region step quality estimation.
+            if loss_function is not None:
+                cost_new = loss_function(f_new, cost_only=True)
+            else:
+                cost_new = 0.5 * np.dot(f_new, f_new)
+            actual_reduction = cost - cost_new
+
+            Delta, ratio = update_tr_radius(
+                Delta, actual_reduction, predicted_reduction,
+                step_h_norm, tr_hit
+            )
+
+            step_norm = norm(step)
+            termination_status = check_termination(
+                actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
+
+            if termination_status is not None:
+                break
+
+        if actual_reduction > 0:
+            on_bound[free_set] = on_bound_free
+
+            x = x_new
+            # Set variables exactly at the boundary.
+            mask = on_bound == -1
+            x[mask] = lb[mask]
+            mask = on_bound == 1
+            x[mask] = ub[mask]
+
+            f = f_new
+            f_true = f.copy()
+
+            cost = cost_new
+
+            J = jac(x, f)
+            njev += 1
+
+            if loss_function is not None:
+                rho = loss_function(f)
+                J, f = scale_for_robust_loss_function(J, f, rho)
+
+            g = compute_grad(J, f)
+
+            if jac_scale:
+                scale, scale_inv = compute_jac_scale(J, scale_inv)
+        else:
+            step_norm = 0
+            actual_reduction = 0
+
+        iteration += 1
+
+    if termination_status is None:
+        termination_status = 0
+
+    return OptimizeResult(
+        x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,
+        active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/least_squares.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/least_squares.py
new file mode 100644
index 00000000..34adad15
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/least_squares.py
@@ -0,0 +1,963 @@
+"""Generic interface for least-squares minimization."""
+from warnings import warn
+
+import numpy as np
+from numpy.linalg import norm
+
+from scipy.sparse import issparse
+from scipy.sparse.linalg import LinearOperator
+from scipy.optimize import _minpack, OptimizeResult
+from scipy.optimize._numdiff import approx_derivative, group_columns
+from scipy.optimize._minimize import Bounds
+
+from .trf import trf
+from .dogbox import dogbox
+from .common import EPS, in_bounds, make_strictly_feasible
+
+
+TERMINATION_MESSAGES = {
+    -1: "Improper input parameters status returned from `leastsq`",
+    0: "The maximum number of function evaluations is exceeded.",
+    1: "`gtol` termination condition is satisfied.",
+    2: "`ftol` termination condition is satisfied.",
+    3: "`xtol` termination condition is satisfied.",
+    4: "Both `ftol` and `xtol` termination conditions are satisfied."
+}
+
+
+FROM_MINPACK_TO_COMMON = {
+    0: -1,  # Improper input parameters from MINPACK.
+    1: 2,
+    2: 3,
+    3: 4,
+    4: 1,
+    5: 0
+    # There are 6, 7, 8 for too small tolerance parameters,
+    # but we guard against it by checking ftol, xtol, gtol beforehand.
+}
+
+
+def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
+    n = x0.size
+
+    if diff_step is None:
+        epsfcn = EPS
+    else:
+        epsfcn = diff_step**2
+
+    # Compute MINPACK's `diag`, which is inverse of our `x_scale` and
+    # ``x_scale='jac'`` corresponds to ``diag=None``.
+    if isinstance(x_scale, str) and x_scale == 'jac':
+        diag = None
+    else:
+        diag = 1 / x_scale
+
+    full_output = True
+    col_deriv = False
+    factor = 100.0
+
+    if jac is None:
+        if max_nfev is None:
+            # n squared to account for Jacobian evaluations.
+            max_nfev = 100 * n * (n + 1)
+        x, info, status = _minpack._lmdif(
+            fun, x0, (), full_output, ftol, xtol, gtol,
+            max_nfev, epsfcn, factor, diag)
+    else:
+        if max_nfev is None:
+            max_nfev = 100 * n
+        x, info, status = _minpack._lmder(
+            fun, jac, x0, (), full_output, col_deriv,
+            ftol, xtol, gtol, max_nfev, factor, diag)
+
+    f = info['fvec']
+
+    if callable(jac):
+        J = jac(x)
+    else:
+        J = np.atleast_2d(approx_derivative(fun, x))
+
+    cost = 0.5 * np.dot(f, f)
+    g = J.T.dot(f)
+    g_norm = norm(g, ord=np.inf)
+
+    nfev = info['nfev']
+    njev = info.get('njev', None)
+
+    status = FROM_MINPACK_TO_COMMON[status]
+    active_mask = np.zeros_like(x0, dtype=int)
+
+    return OptimizeResult(
+        x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
+        active_mask=active_mask, nfev=nfev, njev=njev, status=status)
+
+
+def prepare_bounds(bounds, n):
+    lb, ub = [np.asarray(b, dtype=float) for b in bounds]
+    if lb.ndim == 0:
+        lb = np.resize(lb, n)
+
+    if ub.ndim == 0:
+        ub = np.resize(ub, n)
+
+    return lb, ub
+
+
+def check_tolerance(ftol, xtol, gtol, method):
+    def check(tol, name):
+        if tol is None:
+            tol = 0
+        elif tol < EPS:
+            warn("Setting `{}` below the machine epsilon ({:.2e}) effectively "
+                 "disables the corresponding termination condition."
+                 .format(name, EPS))
+        return tol
+
+    ftol = check(ftol, "ftol")
+    xtol = check(xtol, "xtol")
+    gtol = check(gtol, "gtol")
+
+    if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS):
+        raise ValueError("All tolerances must be higher than machine epsilon "
+                         "({:.2e}) for method 'lm'.".format(EPS))
+    elif ftol < EPS and xtol < EPS and gtol < EPS:
+        raise ValueError("At least one of the tolerances must be higher than "
+                         "machine epsilon ({:.2e}).".format(EPS))
+
+    return ftol, xtol, gtol
+
+
+def check_x_scale(x_scale, x0):
+    if isinstance(x_scale, str) and x_scale == 'jac':
+        return x_scale
+
+    try:
+        x_scale = np.asarray(x_scale, dtype=float)
+        valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
+    except (ValueError, TypeError):
+        valid = False
+
+    if not valid:
+        raise ValueError("`x_scale` must be 'jac' or array_like with "
+                         "positive numbers.")
+
+    if x_scale.ndim == 0:
+        x_scale = np.resize(x_scale, x0.shape)
+
+    if x_scale.shape != x0.shape:
+        raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
+
+    return x_scale
+
+
+def check_jac_sparsity(jac_sparsity, m, n):
+    if jac_sparsity is None:
+        return None
+
+    if not issparse(jac_sparsity):
+        jac_sparsity = np.atleast_2d(jac_sparsity)
+
+    if jac_sparsity.shape != (m, n):
+        raise ValueError("`jac_sparsity` has wrong shape.")
+
+    return jac_sparsity, group_columns(jac_sparsity)
+
+
+# Loss functions.
+
+
+def huber(z, rho, cost_only):
+    mask = z <= 1
+    rho[0, mask] = z[mask]
+    rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
+    if cost_only:
+        return
+    rho[1, mask] = 1
+    rho[1, ~mask] = z[~mask]**-0.5
+    rho[2, mask] = 0
+    rho[2, ~mask] = -0.5 * z[~mask]**-1.5
+
+
+def soft_l1(z, rho, cost_only):
+    t = 1 + z
+    rho[0] = 2 * (t**0.5 - 1)
+    if cost_only:
+        return
+    rho[1] = t**-0.5
+    rho[2] = -0.5 * t**-1.5
+
+
+def cauchy(z, rho, cost_only):
+    rho[0] = np.log1p(z)
+    if cost_only:
+        return
+    t = 1 + z
+    rho[1] = 1 / t
+    rho[2] = -1 / t**2
+
+
+def arctan(z, rho, cost_only):
+    rho[0] = np.arctan(z)
+    if cost_only:
+        return
+    t = 1 + z**2
+    rho[1] = 1 / t
+    rho[2] = -2 * z / t**2
+
+
+IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
+                          cauchy=cauchy, arctan=arctan)
+
+
+def construct_loss_function(m, loss, f_scale):
+    if loss == 'linear':
+        return None
+
+    if not callable(loss):
+        loss = IMPLEMENTED_LOSSES[loss]
+        rho = np.empty((3, m))
+
+        def loss_function(f, cost_only=False):
+            z = (f / f_scale) ** 2
+            loss(z, rho, cost_only=cost_only)
+            if cost_only:
+                return 0.5 * f_scale ** 2 * np.sum(rho[0])
+            rho[0] *= f_scale ** 2
+            rho[2] /= f_scale ** 2
+            return rho
+    else:
+        def loss_function(f, cost_only=False):
+            z = (f / f_scale) ** 2
+            rho = loss(z)
+            if cost_only:
+                return 0.5 * f_scale ** 2 * np.sum(rho[0])
+            rho[0] *= f_scale ** 2
+            rho[2] /= f_scale ** 2
+            return rho
+
+    return loss_function
+
+
+def least_squares(
+        fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
+        ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
+        f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
+        jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
+    """Solve a nonlinear least-squares problem with bounds on the variables.
+
+    Given the residuals f(x) (an m-D real function of n real
+    variables) and the loss function rho(s) (a scalar function), `least_squares`
+    finds a local minimum of the cost function F(x)::
+
+        minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
+        subject to lb <= x <= ub
+
+    The purpose of the loss function rho(s) is to reduce the influence of
+    outliers on the solution.
+
+    Parameters
+    ----------
+    fun : callable
+        Function which computes the vector of residuals, with the signature
+        ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
+        respect to its first argument. The argument ``x`` passed to this
+        function is an ndarray of shape (n,) (never a scalar, even for n=1).
+        It must allocate and return a 1-D array_like of shape (m,) or a scalar.
+        If the argument ``x`` is complex or the function ``fun`` returns
+        complex residuals, it must be wrapped in a real function of real
+        arguments, as shown at the end of the Examples section.
+    x0 : array_like with shape (n,) or float
+        Initial guess on independent variables. If float, it will be treated
+        as a 1-D array with one element.
+    jac : {'2-point', '3-point', 'cs', callable}, optional
+        Method of computing the Jacobian matrix (an m-by-n matrix, where
+        element (i, j) is the partial derivative of f[i] with respect to
+        x[j]). The keywords select a finite difference scheme for numerical
+        estimation. The scheme '3-point' is more accurate, but requires
+        twice as many operations as '2-point' (default). The scheme 'cs'
+        uses complex steps, and while potentially the most accurate, it is
+        applicable only when `fun` correctly handles complex inputs and
+        can be analytically continued to the complex plane. Method 'lm'
+        always uses the '2-point' scheme. If callable, it is used as
+        ``jac(x, *args, **kwargs)`` and should return a good approximation
+        (or the exact value) for the Jacobian as an array_like (np.atleast_2d
+        is applied), a sparse matrix (csr_matrix preferred for performance) or
+        a `scipy.sparse.linalg.LinearOperator`.
+    bounds : 2-tuple of array_like or `Bounds`, optional
+        There are two ways to specify bounds:
+
+            1. Instance of `Bounds` class
+            2. Lower and upper bounds on independent variables. Defaults to no
+               bounds. Each array must match the size of `x0` or be a scalar,
+               in the latter case a bound will be the same for all variables.
+               Use ``np.inf`` with an appropriate sign to disable bounds on all
+               or some variables.
+    method : {'trf', 'dogbox', 'lm'}, optional
+        Algorithm to perform minimization.
+
+            * 'trf' : Trust Region Reflective algorithm, particularly suitable
+              for large sparse problems with bounds. Generally robust method.
+            * 'dogbox' : dogleg algorithm with rectangular trust regions,
+              typical use case is small problems with bounds. Not recommended
+              for problems with rank-deficient Jacobian.
+            * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
+              Doesn't handle bounds and sparse Jacobians. Usually the most
+              efficient method for small unconstrained problems.
+
+        Default is 'trf'. See Notes for more information.
+    ftol : float or None, optional
+        Tolerance for termination by the change of the cost function. Default
+        is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
+        and there was an adequate agreement between a local quadratic model and
+        the true model in the last step.
+
+        If None and 'method' is not 'lm', the termination by this condition is
+        disabled. If 'method' is 'lm', this tolerance must be higher than
+        machine epsilon.
+    xtol : float or None, optional
+        Tolerance for termination by the change of the independent variables.
+        Default is 1e-8. The exact condition depends on the `method` used:
+
+            * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``.
+            * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
+              a trust-region radius and ``xs`` is the value of ``x``
+              scaled according to `x_scale` parameter (see below).
+
+        If None and 'method' is not 'lm', the termination by this condition is
+        disabled. If 'method' is 'lm', this tolerance must be higher than
+        machine epsilon.
+    gtol : float or None, optional
+        Tolerance for termination by the norm of the gradient. Default is 1e-8.
+        The exact condition depends on a `method` used:
+
+            * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
+              ``g_scaled`` is the value of the gradient scaled to account for
+              the presence of the bounds [STIR]_.
+            * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
+              ``g_free`` is the gradient with respect to the variables which
+              are not in the optimal state on the boundary.
+            * For 'lm' : the maximum absolute value of the cosine of angles
+              between columns of the Jacobian and the residual vector is less
+              than `gtol`, or the residual vector is zero.
+
+        If None and 'method' is not 'lm', the termination by this condition is
+        disabled. If 'method' is 'lm', this tolerance must be higher than
+        machine epsilon.
+    x_scale : array_like or 'jac', optional
+        Characteristic scale of each variable. Setting `x_scale` is equivalent
+        to reformulating the problem in scaled variables ``xs = x / x_scale``.
+        An alternative view is that the size of a trust region along jth
+        dimension is proportional to ``x_scale[j]``. Improved convergence may
+        be achieved by setting `x_scale` such that a step of a given size
+        along any of the scaled variables has a similar effect on the cost
+        function. If set to 'jac', the scale is iteratively updated using the
+        inverse norms of the columns of the Jacobian matrix (as described in
+        [JJMore]_).
+    loss : str or callable, optional
+        Determines the loss function. The following keyword values are allowed:
+
+            * 'linear' (default) : ``rho(z) = z``. Gives a standard
+              least-squares problem.
+            * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
+              approximation of l1 (absolute value) loss. Usually a good
+              choice for robust least squares.
+            * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
+              similarly to 'soft_l1'.
+            * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
+              influence, but may cause difficulties in optimization process.
+            * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
+              a single residual, has properties similar to 'cauchy'.
+
+        If callable, it must take a 1-D ndarray ``z=f**2`` and return an
+        array_like with shape (3, m) where row 0 contains function values,
+        row 1 contains first derivatives and row 2 contains second
+        derivatives. Method 'lm' supports only 'linear' loss.
+    f_scale : float, optional
+        Value of soft margin between inlier and outlier residuals, default
+        is 1.0. The loss function is evaluated as follows
+        ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
+        and ``rho`` is determined by `loss` parameter. This parameter has
+        no effect with ``loss='linear'``, but for other `loss` values it is
+        of crucial importance.
+    max_nfev : None or int, optional
+        Maximum number of function evaluations before the termination.
+        If None (default), the value is chosen automatically:
+
+            * For 'trf' and 'dogbox' : 100 * n.
+            * For 'lm' :  100 * n if `jac` is callable and 100 * n * (n + 1)
+              otherwise (because 'lm' counts function calls in Jacobian
+              estimation).
+
+    diff_step : None or array_like, optional
+        Determines the relative step size for the finite difference
+        approximation of the Jacobian. The actual step is computed as
+        ``x * diff_step``. If None (default), then `diff_step` is taken to be
+        a conventional "optimal" power of machine epsilon for the finite
+        difference scheme used [NR]_.
+    tr_solver : {None, 'exact', 'lsmr'}, optional
+        Method for solving trust-region subproblems, relevant only for 'trf'
+        and 'dogbox' methods.
+
+            * 'exact' is suitable for not very large problems with dense
+              Jacobian matrices. The computational complexity per iteration is
+              comparable to a singular value decomposition of the Jacobian
+              matrix.
+            * 'lsmr' is suitable for problems with sparse and large Jacobian
+              matrices. It uses the iterative procedure
+              `scipy.sparse.linalg.lsmr` for finding a solution of a linear
+              least-squares problem and only requires matrix-vector product
+              evaluations.
+
+        If None (default), the solver is chosen based on the type of Jacobian
+        returned on the first iteration.
+    tr_options : dict, optional
+        Keyword options passed to trust-region solver.
+
+            * ``tr_solver='exact'``: `tr_options` are ignored.
+            * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
+              Additionally,  ``method='trf'`` supports  'regularize' option
+              (bool, default is True), which adds a regularization term to the
+              normal equation, which improves convergence if the Jacobian is
+              rank-deficient [Byrd]_ (eq. 3.4).
+
+    jac_sparsity : {None, array_like, sparse matrix}, optional
+        Defines the sparsity structure of the Jacobian matrix for finite
+        difference estimation, its shape must be (m, n). If the Jacobian has
+        only few non-zero elements in *each* row, providing the sparsity
+        structure will greatly speed up the computations [Curtis]_. A zero
+        entry means that a corresponding element in the Jacobian is identically
+        zero. If provided, forces the use of 'lsmr' trust-region solver.
+        If None (default), then dense differencing will be used. Has no effect
+        for 'lm' method.
+    verbose : {0, 1, 2}, optional
+        Level of algorithm's verbosity:
+
+            * 0 (default) : work silently.
+            * 1 : display a termination report.
+            * 2 : display progress during iterations (not supported by 'lm'
+              method).
+
+    args, kwargs : tuple and dict, optional
+        Additional arguments passed to `fun` and `jac`. Both empty by default.
+        The calling signature is ``fun(x, *args, **kwargs)`` and the same for
+        `jac`.
+
+    Returns
+    -------
+    result : OptimizeResult
+        `OptimizeResult` with the following fields defined:
+
+            x : ndarray, shape (n,)
+                Solution found.
+            cost : float
+                Value of the cost function at the solution.
+            fun : ndarray, shape (m,)
+                Vector of residuals at the solution.
+            jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
+                Modified Jacobian matrix at the solution, in the sense that J^T J
+                is a Gauss-Newton approximation of the Hessian of the cost function.
+                The type is the same as the one used by the algorithm.
+            grad : ndarray, shape (m,)
+                Gradient of the cost function at the solution.
+            optimality : float
+                First-order optimality measure. In unconstrained problems, it is
+                always the uniform norm of the gradient. In constrained problems,
+                it is the quantity which was compared with `gtol` during iterations.
+            active_mask : ndarray of int, shape (n,)
+                Each component shows whether a corresponding constraint is active
+                (that is, whether a variable is at the bound):
+
+                    *  0 : a constraint is not active.
+                    * -1 : a lower bound is active.
+                    *  1 : an upper bound is active.
+
+                Might be somewhat arbitrary for 'trf' method as it generates a
+                sequence of strictly feasible iterates and `active_mask` is
+                determined within a tolerance threshold.
+            nfev : int
+                Number of function evaluations done. Methods 'trf' and 'dogbox' do
+                not count function calls for numerical Jacobian approximation, as
+                opposed to 'lm' method.
+            njev : int or None
+                Number of Jacobian evaluations done. If numerical Jacobian
+                approximation is used in 'lm' method, it is set to None.
+            status : int
+                The reason for algorithm termination:
+
+                    * -1 : improper input parameters status returned from MINPACK.
+                    *  0 : the maximum number of function evaluations is exceeded.
+                    *  1 : `gtol` termination condition is satisfied.
+                    *  2 : `ftol` termination condition is satisfied.
+                    *  3 : `xtol` termination condition is satisfied.
+                    *  4 : Both `ftol` and `xtol` termination conditions are satisfied.
+
+            message : str
+                Verbal description of the termination reason.
+            success : bool
+                True if one of the convergence criteria is satisfied (`status` > 0).
+
+    See Also
+    --------
+    leastsq : A legacy wrapper for the MINPACK implementation of the
+              Levenberg-Marquadt algorithm.
+    curve_fit : Least-squares minimization applied to a curve-fitting problem.
+
+    Notes
+    -----
+    Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
+    algorithms implemented in MINPACK (lmder, lmdif). It runs the
+    Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
+    The implementation is based on paper [JJMore]_, it is very robust and
+    efficient with a lot of smart tricks. It should be your first choice
+    for unconstrained problems. Note that it doesn't support bounds. Also,
+    it doesn't work when m < n.
+
+    Method 'trf' (Trust Region Reflective) is motivated by the process of
+    solving a system of equations, which constitute the first-order optimality
+    condition for a bound-constrained minimization problem as formulated in
+    [STIR]_. The algorithm iteratively solves trust-region subproblems
+    augmented by a special diagonal quadratic term and with trust-region shape
+    determined by the distance from the bounds and the direction of the
+    gradient. This enhancements help to avoid making steps directly into bounds
+    and efficiently explore the whole space of variables. To further improve
+    convergence, the algorithm considers search directions reflected from the
+    bounds. To obey theoretical requirements, the algorithm keeps iterates
+    strictly feasible. With dense Jacobians trust-region subproblems are
+    solved by an exact method very similar to the one described in [JJMore]_
+    (and implemented in MINPACK). The difference from the MINPACK
+    implementation is that a singular value decomposition of a Jacobian
+    matrix is done once per iteration, instead of a QR decomposition and series
+    of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace
+    approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
+    The subspace is spanned by a scaled gradient and an approximate
+    Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
+    constraints are imposed the algorithm is very similar to MINPACK and has
+    generally comparable performance. The algorithm works quite robust in
+    unbounded and bounded problems, thus it is chosen as a default algorithm.
+
+    Method 'dogbox' operates in a trust-region framework, but considers
+    rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
+    The intersection of a current trust region and initial bounds is again
+    rectangular, so on each iteration a quadratic minimization problem subject
+    to bound constraints is solved approximately by Powell's dogleg method
+    [NumOpt]_. The required Gauss-Newton step can be computed exactly for
+    dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
+    sparse Jacobians. The algorithm is likely to exhibit slow convergence when
+    the rank of Jacobian is less than the number of variables. The algorithm
+    often outperforms 'trf' in bounded problems with a small number of
+    variables.
+
+    Robust loss functions are implemented as described in [BA]_. The idea
+    is to modify a residual vector and a Jacobian matrix on each iteration
+    such that computed gradient and Gauss-Newton Hessian approximation match
+    the true gradient and Hessian approximation of the cost function. Then
+    the algorithm proceeds in a normal way, i.e., robust loss functions are
+    implemented as a simple wrapper over standard least-squares algorithms.
+
+    .. versionadded:: 0.17.0
+
+    References
+    ----------
+    .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
+              and Conjugate Gradient Method for Large-Scale Bound-Constrained
+              Minimization Problems," SIAM Journal on Scientific Computing,
+              Vol. 21, Number 1, pp 1-23, 1999.
+    .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
+            Computing. 3rd edition", Sec. 5.7.
+    .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
+              solution of the trust region problem by minimization over
+              two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
+              1988.
+    .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+                sparse Jacobian matrices", Journal of the Institute of
+                Mathematics and its Applications, 13, pp. 117-120, 1974.
+    .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
+                and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
+                Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
+    .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
+                Dogleg Approach for Unconstrained and Bound Constrained
+                Nonlinear Optimization", WSEAS International Conference on
+                Applied Mathematics, Corfu, Greece, 2004.
+    .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
+                2nd edition", Chapter 4.
+    .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
+            Proceedings of the International Workshop on Vision Algorithms:
+            Theory and Practice, pp. 298-372, 1999.
+
+    Examples
+    --------
+    In this example we find a minimum of the Rosenbrock function without bounds
+    on independent variables.
+
+    >>> import numpy as np
+    >>> def fun_rosenbrock(x):
+    ...     return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
+
+    Notice that we only provide the vector of the residuals. The algorithm
+    constructs the cost function as a sum of squares of the residuals, which
+    gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
+
+    >>> from scipy.optimize import least_squares
+    >>> x0_rosenbrock = np.array([2, 2])
+    >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
+    >>> res_1.x
+    array([ 1.,  1.])
+    >>> res_1.cost
+    9.8669242910846867e-30
+    >>> res_1.optimality
+    8.8928864934219529e-14
+
+    We now constrain the variables, in such a way that the previous solution
+    becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
+    ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
+    to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
+
+    We also provide the analytic Jacobian:
+
+    >>> def jac_rosenbrock(x):
+    ...     return np.array([
+    ...         [-20 * x[0], 10],
+    ...         [-1, 0]])
+
+    Putting this all together, we see that the new solution lies on the bound:
+
+    >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
+    ...                       bounds=([-np.inf, 1.5], np.inf))
+    >>> res_2.x
+    array([ 1.22437075,  1.5       ])
+    >>> res_2.cost
+    0.025213093946805685
+    >>> res_2.optimality
+    1.5885401433157753e-07
+
+    Now we solve a system of equations (i.e., the cost function should be zero
+    at a minimum) for a Broyden tridiagonal vector-valued function of 100000
+    variables:
+
+    >>> def fun_broyden(x):
+    ...     f = (3 - x) * x + 1
+    ...     f[1:] -= x[:-1]
+    ...     f[:-1] -= 2 * x[1:]
+    ...     return f
+
+    The corresponding Jacobian matrix is sparse. We tell the algorithm to
+    estimate it by finite differences and provide the sparsity structure of
+    Jacobian to significantly speed up this process.
+
+    >>> from scipy.sparse import lil_matrix
+    >>> def sparsity_broyden(n):
+    ...     sparsity = lil_matrix((n, n), dtype=int)
+    ...     i = np.arange(n)
+    ...     sparsity[i, i] = 1
+    ...     i = np.arange(1, n)
+    ...     sparsity[i, i - 1] = 1
+    ...     i = np.arange(n - 1)
+    ...     sparsity[i, i + 1] = 1
+    ...     return sparsity
+    ...
+    >>> n = 100000
+    >>> x0_broyden = -np.ones(n)
+    ...
+    >>> res_3 = least_squares(fun_broyden, x0_broyden,
+    ...                       jac_sparsity=sparsity_broyden(n))
+    >>> res_3.cost
+    4.5687069299604613e-23
+    >>> res_3.optimality
+    1.1650454296851518e-11
+
+    Let's also solve a curve fitting problem using robust loss function to
+    take care of outliers in the data. Define the model function as
+    ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
+    observation and a, b, c are parameters to estimate.
+
+    First, define the function which generates the data with noise and
+    outliers, define the model parameters, and generate data:
+
+    >>> from numpy.random import default_rng
+    >>> rng = default_rng()
+    >>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None):
+    ...     rng = default_rng(seed)
+    ...
+    ...     y = a + b * np.exp(t * c)
+    ...
+    ...     error = noise * rng.standard_normal(t.size)
+    ...     outliers = rng.integers(0, t.size, n_outliers)
+    ...     error[outliers] *= 10
+    ...
+    ...     return y + error
+    ...
+    >>> a = 0.5
+    >>> b = 2.0
+    >>> c = -1
+    >>> t_min = 0
+    >>> t_max = 10
+    >>> n_points = 15
+    ...
+    >>> t_train = np.linspace(t_min, t_max, n_points)
+    >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
+
+    Define function for computing residuals and initial estimate of
+    parameters.
+
+    >>> def fun(x, t, y):
+    ...     return x[0] + x[1] * np.exp(x[2] * t) - y
+    ...
+    >>> x0 = np.array([1.0, 1.0, 0.0])
+
+    Compute a standard least-squares solution:
+
+    >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
+
+    Now compute two solutions with two different robust loss functions. The
+    parameter `f_scale` is set to 0.1, meaning that inlier residuals should
+    not significantly exceed 0.1 (the noise level used).
+
+    >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
+    ...                             args=(t_train, y_train))
+    >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
+    ...                         args=(t_train, y_train))
+
+    And, finally, plot all the curves. We see that by selecting an appropriate
+    `loss`  we can get estimates close to optimal even in the presence of
+    strong outliers. But keep in mind that generally it is recommended to try
+    'soft_l1' or 'huber' losses first (if at all necessary) as the other two
+    options may cause difficulties in optimization process.
+
+    >>> t_test = np.linspace(t_min, t_max, n_points * 10)
+    >>> y_true = gen_data(t_test, a, b, c)
+    >>> y_lsq = gen_data(t_test, *res_lsq.x)
+    >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
+    >>> y_log = gen_data(t_test, *res_log.x)
+    ...
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(t_train, y_train, 'o')
+    >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
+    >>> plt.plot(t_test, y_lsq, label='linear loss')
+    >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
+    >>> plt.plot(t_test, y_log, label='cauchy loss')
+    >>> plt.xlabel("t")
+    >>> plt.ylabel("y")
+    >>> plt.legend()
+    >>> plt.show()
+
+    In the next example, we show how complex-valued residual functions of
+    complex variables can be optimized with ``least_squares()``. Consider the
+    following function:
+
+    >>> def f(z):
+    ...     return z - (0.5 + 0.5j)
+
+    We wrap it into a function of real variables that returns real residuals
+    by simply handling the real and imaginary parts as independent variables:
+
+    >>> def f_wrap(x):
+    ...     fx = f(x[0] + 1j*x[1])
+    ...     return np.array([fx.real, fx.imag])
+
+    Thus, instead of the original m-D complex function of n complex
+    variables we optimize a 2m-D real function of 2n real variables:
+
+    >>> from scipy.optimize import least_squares
+    >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
+    >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
+    >>> z
+    (0.49999999999925893+0.49999999999925893j)
+
+    """
+    if method not in ['trf', 'dogbox', 'lm']:
+        raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
+
+    if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
+        raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
+                         "callable.")
+
+    if tr_solver not in [None, 'exact', 'lsmr']:
+        raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
+
+    if loss not in IMPLEMENTED_LOSSES and not callable(loss):
+        raise ValueError("`loss` must be one of {0} or a callable."
+                         .format(IMPLEMENTED_LOSSES.keys()))
+
+    if method == 'lm' and loss != 'linear':
+        raise ValueError("method='lm' supports only 'linear' loss function.")
+
+    if verbose not in [0, 1, 2]:
+        raise ValueError("`verbose` must be in [0, 1, 2].")
+
+    if max_nfev is not None and max_nfev <= 0:
+        raise ValueError("`max_nfev` must be None or positive integer.")
+
+    if np.iscomplexobj(x0):
+        raise ValueError("`x0` must be real.")
+
+    x0 = np.atleast_1d(x0).astype(float)
+
+    if x0.ndim > 1:
+        raise ValueError("`x0` must have at most 1 dimension.")
+
+    if isinstance(bounds, Bounds):
+        lb, ub = bounds.lb, bounds.ub
+        bounds = (lb, ub)
+    else:
+        if len(bounds) == 2:
+            lb, ub = prepare_bounds(bounds, x0.shape[0])
+        else:
+            raise ValueError("`bounds` must contain 2 elements.")
+
+    if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
+        raise ValueError("Method 'lm' doesn't support bounds.")
+
+    if lb.shape != x0.shape or ub.shape != x0.shape:
+        raise ValueError("Inconsistent shapes between bounds and `x0`.")
+
+    if np.any(lb >= ub):
+        raise ValueError("Each lower bound must be strictly less than each "
+                         "upper bound.")
+
+    if not in_bounds(x0, lb, ub):
+        raise ValueError("`x0` is infeasible.")
+
+    x_scale = check_x_scale(x_scale, x0)
+
+    ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method)
+
+    def fun_wrapped(x):
+        return np.atleast_1d(fun(x, *args, **kwargs))
+
+    if method == 'trf':
+        x0 = make_strictly_feasible(x0, lb, ub)
+
+    f0 = fun_wrapped(x0)
+
+    if f0.ndim != 1:
+        raise ValueError("`fun` must return at most 1-d array_like. "
+                         "f0.shape: {0}".format(f0.shape))
+
+    if not np.all(np.isfinite(f0)):
+        raise ValueError("Residuals are not finite in the initial point.")
+
+    n = x0.size
+    m = f0.size
+
+    if method == 'lm' and m < n:
+        raise ValueError("Method 'lm' doesn't work when the number of "
+                         "residuals is less than the number of variables.")
+
+    loss_function = construct_loss_function(m, loss, f_scale)
+    if callable(loss):
+        rho = loss_function(f0)
+        if rho.shape != (3, m):
+            raise ValueError("The return value of `loss` callable has wrong "
+                             "shape.")
+        initial_cost = 0.5 * np.sum(rho[0])
+    elif loss_function is not None:
+        initial_cost = loss_function(f0, cost_only=True)
+    else:
+        initial_cost = 0.5 * np.dot(f0, f0)
+
+    if callable(jac):
+        J0 = jac(x0, *args, **kwargs)
+
+        if issparse(J0):
+            J0 = J0.tocsr()
+
+            def jac_wrapped(x, _=None):
+                return jac(x, *args, **kwargs).tocsr()
+
+        elif isinstance(J0, LinearOperator):
+            def jac_wrapped(x, _=None):
+                return jac(x, *args, **kwargs)
+
+        else:
+            J0 = np.atleast_2d(J0)
+
+            def jac_wrapped(x, _=None):
+                return np.atleast_2d(jac(x, *args, **kwargs))
+
+    else:  # Estimate Jacobian by finite differences.
+        if method == 'lm':
+            if jac_sparsity is not None:
+                raise ValueError("method='lm' does not support "
+                                 "`jac_sparsity`.")
+
+            if jac != '2-point':
+                warn("jac='{0}' works equivalently to '2-point' "
+                     "for method='lm'.".format(jac))
+
+            J0 = jac_wrapped = None
+        else:
+            if jac_sparsity is not None and tr_solver == 'exact':
+                raise ValueError("tr_solver='exact' is incompatible "
+                                 "with `jac_sparsity`.")
+
+            jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
+
+            def jac_wrapped(x, f):
+                J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
+                                      f0=f, bounds=bounds, args=args,
+                                      kwargs=kwargs, sparsity=jac_sparsity)
+                if J.ndim != 2:  # J is guaranteed not sparse.
+                    J = np.atleast_2d(J)
+
+                return J
+
+            J0 = jac_wrapped(x0, f0)
+
+    if J0 is not None:
+        if J0.shape != (m, n):
+            raise ValueError(
+                "The return value of `jac` has wrong shape: expected {0}, "
+                "actual {1}.".format((m, n), J0.shape))
+
+        if not isinstance(J0, np.ndarray):
+            if method == 'lm':
+                raise ValueError("method='lm' works only with dense "
+                                 "Jacobian matrices.")
+
+            if tr_solver == 'exact':
+                raise ValueError(
+                    "tr_solver='exact' works only with dense "
+                    "Jacobian matrices.")
+
+        jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+        if isinstance(J0, LinearOperator) and jac_scale:
+            raise ValueError("x_scale='jac' can't be used when `jac` "
+                             "returns LinearOperator.")
+
+        if tr_solver is None:
+            if isinstance(J0, np.ndarray):
+                tr_solver = 'exact'
+            else:
+                tr_solver = 'lsmr'
+
+    if method == 'lm':
+        result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
+                              max_nfev, x_scale, diff_step)
+
+    elif method == 'trf':
+        result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
+                     gtol, max_nfev, x_scale, loss_function, tr_solver,
+                     tr_options.copy(), verbose)
+
+    elif method == 'dogbox':
+        if tr_solver == 'lsmr' and 'regularize' in tr_options:
+            warn("The keyword 'regularize' in `tr_options` is not relevant "
+                 "for 'dogbox' method.")
+            tr_options = tr_options.copy()
+            del tr_options['regularize']
+
+        result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
+                        xtol, gtol, max_nfev, x_scale, loss_function,
+                        tr_solver, tr_options, verbose)
+
+    result.message = TERMINATION_MESSAGES[result.status]
+    result.success = result.status > 0
+
+    if verbose >= 1:
+        print(result.message)
+        print("Function evaluations {0}, initial cost {1:.4e}, final cost "
+              "{2:.4e}, first-order optimality {3:.2e}."
+              .format(result.nfev, initial_cost, result.cost,
+                      result.optimality))
+
+    return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/lsq_linear.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/lsq_linear.py
new file mode 100644
index 00000000..d8306bab
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/lsq_linear.py
@@ -0,0 +1,351 @@
+"""Linear least squares with bound constraints on independent variables."""
+import numpy as np
+from numpy.linalg import norm
+from scipy.sparse import issparse, csr_matrix
+from scipy.sparse.linalg import LinearOperator, lsmr
+from scipy.optimize import OptimizeResult
+
+from .common import in_bounds, compute_grad
+from .trf_linear import trf_linear
+from .bvls import bvls
+
+
+def prepare_bounds(bounds, n):
+    lb, ub = [np.asarray(b, dtype=float) for b in bounds]
+
+    if lb.ndim == 0:
+        lb = np.resize(lb, n)
+
+    if ub.ndim == 0:
+        ub = np.resize(ub, n)
+
+    return lb, ub
+
+
+TERMINATION_MESSAGES = {
+    -1: "The algorithm was not able to make progress on the last iteration.",
+    0: "The maximum number of iterations is exceeded.",
+    1: "The first-order optimality measure is less than `tol`.",
+    2: "The relative change of the cost function is less than `tol`.",
+    3: "The unconstrained solution is optimal."
+}
+
+
+def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10,
+               lsq_solver=None, lsmr_tol=None, max_iter=None,
+               verbose=0, *, lsmr_maxiter=None,):
+    r"""Solve a linear least-squares problem with bounds on the variables.
+
+    Given a m-by-n design matrix A and a target vector b with m elements,
+    `lsq_linear` solves the following optimization problem::
+
+        minimize 0.5 * ||A x - b||**2
+        subject to lb <= x <= ub
+
+    This optimization problem is convex, hence a found minimum (if iterations
+    have converged) is guaranteed to be global.
+
+    Parameters
+    ----------
+    A : array_like, sparse matrix of LinearOperator, shape (m, n)
+        Design matrix. Can be `scipy.sparse.linalg.LinearOperator`.
+    b : array_like, shape (m,)
+        Target vector.
+    bounds : 2-tuple of array_like, optional
+        Lower and upper bounds on independent variables. Defaults to no bounds.
+        Each array must have shape (n,) or be a scalar, in the latter
+        case a bound will be the same for all variables. Use ``np.inf`` with
+        an appropriate sign to disable bounds on all or some variables.
+    method : 'trf' or 'bvls', optional
+        Method to perform minimization.
+
+            * 'trf' : Trust Region Reflective algorithm adapted for a linear
+              least-squares problem. This is an interior-point-like method
+              and the required number of iterations is weakly correlated with
+              the number of variables.
+            * 'bvls' : Bounded-variable least-squares algorithm. This is
+              an active set method, which requires the number of iterations
+              comparable to the number of variables. Can't be used when `A` is
+              sparse or LinearOperator.
+
+        Default is 'trf'.
+    tol : float, optional
+        Tolerance parameter. The algorithm terminates if a relative change
+        of the cost function is less than `tol` on the last iteration.
+        Additionally, the first-order optimality measure is considered:
+
+            * ``method='trf'`` terminates if the uniform norm of the gradient,
+              scaled to account for the presence of the bounds, is less than
+              `tol`.
+            * ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions
+              are satisfied within `tol` tolerance.
+
+    lsq_solver : {None, 'exact', 'lsmr'}, optional
+        Method of solving unbounded least-squares problems throughout
+        iterations:
+
+            * 'exact' : Use dense QR or SVD decomposition approach. Can't be
+              used when `A` is sparse or LinearOperator.
+            * 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure
+              which requires only matrix-vector product evaluations. Can't
+              be used with ``method='bvls'``.
+
+        If None (default), the solver is chosen based on type of `A`.
+    lsmr_tol : None, float or 'auto', optional
+        Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr`
+        If None (default), it is set to ``1e-2 * tol``. If 'auto', the
+        tolerance will be adjusted based on the optimality of the current
+        iterate, which can speed up the optimization process, but is not always
+        reliable.
+    max_iter : None or int, optional
+        Maximum number of iterations before termination. If None (default), it
+        is set to 100 for ``method='trf'`` or to the number of variables for
+        ``method='bvls'`` (not counting iterations for 'bvls' initialization).
+    verbose : {0, 1, 2}, optional
+        Level of algorithm's verbosity:
+
+            * 0 : work silently (default).
+            * 1 : display a termination report.
+            * 2 : display progress during iterations.
+    lsmr_maxiter : None or int, optional
+        Maximum number of iterations for the lsmr least squares solver,
+        if it is used (by setting ``lsq_solver='lsmr'``). If None (default), it
+        uses lsmr's default of ``min(m, n)`` where ``m`` and ``n`` are the
+        number of rows and columns of `A`, respectively. Has no effect if
+        ``lsq_solver='exact'``.
+
+    Returns
+    -------
+    OptimizeResult with the following fields defined:
+    x : ndarray, shape (n,)
+        Solution found.
+    cost : float
+        Value of the cost function at the solution.
+    fun : ndarray, shape (m,)
+        Vector of residuals at the solution.
+    optimality : float
+        First-order optimality measure. The exact meaning depends on `method`,
+        refer to the description of `tol` parameter.
+    active_mask : ndarray of int, shape (n,)
+        Each component shows whether a corresponding constraint is active
+        (that is, whether a variable is at the bound):
+
+            *  0 : a constraint is not active.
+            * -1 : a lower bound is active.
+            *  1 : an upper bound is active.
+
+        Might be somewhat arbitrary for the `trf` method as it generates a
+        sequence of strictly feasible iterates and active_mask is determined
+        within a tolerance threshold.
+    unbounded_sol : tuple
+        Unbounded least squares solution tuple returned by the least squares
+        solver (set with `lsq_solver` option). If `lsq_solver` is not set or is
+        set to ``'exact'``, the tuple contains an ndarray of shape (n,) with
+        the unbounded solution, an ndarray with the sum of squared residuals,
+        an int with the rank of `A`, and an ndarray with the singular values
+        of `A` (see NumPy's ``linalg.lstsq`` for more information). If
+        `lsq_solver` is set to ``'lsmr'``, the tuple contains an ndarray of
+        shape (n,) with the unbounded solution, an int with the exit code,
+        an int with the number of iterations, and five floats with
+        various norms and the condition number of `A` (see SciPy's
+        ``sparse.linalg.lsmr`` for more information). This output can be
+        useful for determining the convergence of the least squares solver,
+        particularly the iterative ``'lsmr'`` solver. The unbounded least
+        squares problem is to minimize ``0.5 * ||A x - b||**2``.
+    nit : int
+        Number of iterations. Zero if the unconstrained solution is optimal.
+    status : int
+        Reason for algorithm termination:
+
+            * -1 : the algorithm was not able to make progress on the last
+              iteration.
+            *  0 : the maximum number of iterations is exceeded.
+            *  1 : the first-order optimality measure is less than `tol`.
+            *  2 : the relative change of the cost function is less than `tol`.
+            *  3 : the unconstrained solution is optimal.
+
+    message : str
+        Verbal description of the termination reason.
+    success : bool
+        True if one of the convergence criteria is satisfied (`status` > 0).
+
+    See Also
+    --------
+    nnls : Linear least squares with non-negativity constraint.
+    least_squares : Nonlinear least squares with bounds on the variables.
+
+    Notes
+    -----
+    The algorithm first computes the unconstrained least-squares solution by
+    `numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on
+    `lsq_solver`. This solution is returned as optimal if it lies within the
+    bounds.
+
+    Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for
+    a linear least-squares problem. The iterations are essentially the same as
+    in the nonlinear least-squares algorithm, but as the quadratic function
+    model is always accurate, we don't need to track or modify the radius of
+    a trust region. The line search (backtracking) is used as a safety net
+    when a selected step does not decrease the cost function. Read more
+    detailed description of the algorithm in `scipy.optimize.least_squares`.
+
+    Method 'bvls' runs a Python implementation of the algorithm described in
+    [BVLS]_. The algorithm maintains active and free sets of variables, on
+    each iteration chooses a new variable to move from the active set to the
+    free set and then solves the unconstrained least-squares problem on free
+    variables. This algorithm is guaranteed to give an accurate solution
+    eventually, but may require up to n iterations for a problem with n
+    variables. Additionally, an ad-hoc initialization procedure is
+    implemented, that determines which variables to set free or active
+    initially. It takes some number of iterations before actual BVLS starts,
+    but can significantly reduce the number of further iterations.
+
+    References
+    ----------
+    .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
+              and Conjugate Gradient Method for Large-Scale Bound-Constrained
+              Minimization Problems," SIAM Journal on Scientific Computing,
+              Vol. 21, Number 1, pp 1-23, 1999.
+    .. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares:
+              an Algorithm and Applications", Computational Statistics, 10,
+              129-141, 1995.
+
+    Examples
+    --------
+    In this example, a problem with a large sparse matrix and bounds on the
+    variables is solved.
+
+    >>> import numpy as np
+    >>> from scipy.sparse import rand
+    >>> from scipy.optimize import lsq_linear
+    >>> rng = np.random.default_rng()
+    ...
+    >>> m = 20000
+    >>> n = 10000
+    ...
+    >>> A = rand(m, n, density=1e-4, random_state=rng)
+    >>> b = rng.standard_normal(m)
+    ...
+    >>> lb = rng.standard_normal(n)
+    >>> ub = lb + 1
+    ...
+    >>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1)
+    # may vary
+    The relative change of the cost function is less than `tol`.
+    Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04,
+    first-order optimality 4.66e-08.
+    """
+    if method not in ['trf', 'bvls']:
+        raise ValueError("`method` must be 'trf' or 'bvls'")
+
+    if lsq_solver not in [None, 'exact', 'lsmr']:
+        raise ValueError("`solver` must be None, 'exact' or 'lsmr'.")
+
+    if verbose not in [0, 1, 2]:
+        raise ValueError("`verbose` must be in [0, 1, 2].")
+
+    if issparse(A):
+        A = csr_matrix(A)
+    elif not isinstance(A, LinearOperator):
+        A = np.atleast_2d(np.asarray(A))
+
+    if method == 'bvls':
+        if lsq_solver == 'lsmr':
+            raise ValueError("method='bvls' can't be used with "
+                             "lsq_solver='lsmr'")
+
+        if not isinstance(A, np.ndarray):
+            raise ValueError("method='bvls' can't be used with `A` being "
+                             "sparse or LinearOperator.")
+
+    if lsq_solver is None:
+        if isinstance(A, np.ndarray):
+            lsq_solver = 'exact'
+        else:
+            lsq_solver = 'lsmr'
+    elif lsq_solver == 'exact' and not isinstance(A, np.ndarray):
+        raise ValueError("`exact` solver can't be used when `A` is "
+                         "sparse or LinearOperator.")
+
+    if len(A.shape) != 2:  # No ndim for LinearOperator.
+        raise ValueError("`A` must have at most 2 dimensions.")
+
+    if len(bounds) != 2:
+        raise ValueError("`bounds` must contain 2 elements.")
+
+    if max_iter is not None and max_iter <= 0:
+        raise ValueError("`max_iter` must be None or positive integer.")
+
+    m, n = A.shape
+
+    b = np.atleast_1d(b)
+    if b.ndim != 1:
+        raise ValueError("`b` must have at most 1 dimension.")
+
+    if b.size != m:
+        raise ValueError("Inconsistent shapes between `A` and `b`.")
+
+    lb, ub = prepare_bounds(bounds, n)
+
+    if lb.shape != (n,) and ub.shape != (n,):
+        raise ValueError("Bounds have wrong shape.")
+
+    if np.any(lb >= ub):
+        raise ValueError("Each lower bound must be strictly less than each "
+                         "upper bound.")
+
+    if lsmr_maxiter is not None and lsmr_maxiter < 1:
+        raise ValueError("`lsmr_maxiter` must be None or positive integer.")
+
+    if not ((isinstance(lsmr_tol, float) and lsmr_tol > 0) or
+            lsmr_tol in ('auto', None)):
+        raise ValueError("`lsmr_tol` must be None, 'auto', or positive float.")
+
+    if lsq_solver == 'exact':
+        unbd_lsq = np.linalg.lstsq(A, b, rcond=-1)
+    elif lsq_solver == 'lsmr':
+        first_lsmr_tol = lsmr_tol  # tol of first call to lsmr
+        if lsmr_tol is None or lsmr_tol == 'auto':
+            first_lsmr_tol = 1e-2 * tol  # default if lsmr_tol not defined
+        unbd_lsq = lsmr(A, b, maxiter=lsmr_maxiter,
+                        atol=first_lsmr_tol, btol=first_lsmr_tol)
+    x_lsq = unbd_lsq[0]  # extract the solution from the least squares solver
+
+    if in_bounds(x_lsq, lb, ub):
+        r = A @ x_lsq - b
+        cost = 0.5 * np.dot(r, r)
+        termination_status = 3
+        termination_message = TERMINATION_MESSAGES[termination_status]
+        g = compute_grad(A, r)
+        g_norm = norm(g, ord=np.inf)
+
+        if verbose > 0:
+            print(termination_message)
+            print("Final cost {0:.4e}, first-order optimality {1:.2e}"
+                  .format(cost, g_norm))
+
+        return OptimizeResult(
+            x=x_lsq, fun=r, cost=cost, optimality=g_norm,
+            active_mask=np.zeros(n), unbounded_sol=unbd_lsq,
+            nit=0, status=termination_status,
+            message=termination_message, success=True)
+
+    if method == 'trf':
+        res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
+                         max_iter, verbose, lsmr_maxiter=lsmr_maxiter)
+    elif method == 'bvls':
+        res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose)
+
+    res.unbounded_sol = unbd_lsq
+    res.message = TERMINATION_MESSAGES[res.status]
+    res.success = res.status > 0
+
+    if verbose > 0:
+        print(res.message)
+        print("Number of iterations {0}, initial cost {1:.4e}, "
+              "final cost {2:.4e}, first-order optimality {3:.2e}."
+              .format(res.nit, res.initial_cost, res.cost, res.optimality))
+
+    del res.initial_cost
+
+    return res
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf.py
new file mode 100644
index 00000000..b12c7bfc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf.py
@@ -0,0 +1,560 @@
+"""Trust Region Reflective algorithm for least-squares optimization.
+
+The algorithm is based on ideas from paper [STIR]_. The main idea is to
+account for the presence of the bounds by appropriate scaling of the variables (or,
+equivalently, changing a trust-region shape). Let's introduce a vector v:
+
+           | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
+    v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
+           | 1,           otherwise
+
+where g is the gradient of a cost function and lb, ub are the bounds. Its
+components are distances to the bounds at which the anti-gradient points (if
+this distance is finite). Define a scaling matrix D = diag(v**0.5).
+First-order optimality conditions can be stated as
+
+    D^2 g(x) = 0.
+
+Meaning that components of the gradient should be zero for strictly interior
+variables, and components must point inside the feasible region for variables
+on the bound.
+
+Now consider this system of equations as a new optimization problem. If the
+point x is strictly interior (not on the bound), then the left-hand side is
+differentiable and the Newton step for it satisfies
+
+    (D^2 H + diag(g) Jv) p = -D^2 g
+
+where H is the Hessian matrix (or its J^T J approximation in least squares),
+Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
+elements of matrix C = diag(g) Jv are non-negative. Introduce the change
+of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables,
+we have a Newton step satisfying
+
+    B_h p_h = -g_h,
+
+where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
+J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
+to "hat" variables. To guarantee global convergence we formulate a
+trust-region problem based on the Newton step in the new variables:
+
+    0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
+
+In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
+problem is
+
+    0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
+
+Here, the meaning of the matrix D becomes more clear: it alters the shape
+of a trust-region, such that large steps towards the bounds are not allowed.
+In the implementation, the trust-region problem is solved in "hat" space,
+but handling of the bounds is done in the original space (see below and read
+the code).
+
+The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
+must keep iterates strictly feasible (to satisfy aforementioned
+differentiability), the parameter theta controls step back from the boundary
+(see the code for details).
+
+The algorithm does another important trick. If the trust-region solution
+doesn't fit into the bounds, then a reflected (from a firstly encountered
+bound) search direction is considered. For motivation and analysis refer to
+[STIR]_ paper (and other papers of the authors). In practice, it doesn't need
+a lot of justifications, the algorithm simply chooses the best step among
+three: a constrained trust-region step, a reflected step and a constrained
+Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
+space).
+
+Another feature is that a trust-region radius control strategy is modified to
+account for appearance of the diagonal C matrix (called diag_h in the code).
+
+Note that all described peculiarities are completely gone as we consider
+problems without bounds (the algorithm becomes a standard trust-region type
+algorithm very similar to ones implemented in MINPACK).
+
+The implementation supports two methods of solving the trust-region problem.
+The first, called 'exact', applies SVD on Jacobian and then solves the problem
+very accurately using the algorithm described in [JJMore]_. It is not
+applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
+approach (sometimes called "indefinite dogleg"), where the problem is solved
+in a subspace spanned by the gradient and the approximate Gauss-Newton step
+found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
+reformulated as a 4th order algebraic equation and solved very accurately by
+``numpy.roots``. The subspace approach allows to solve very large problems
+(up to couple of millions of residuals on a regular PC), provided the Jacobian
+matrix is sufficiently sparse.
+
+References
+----------
+.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
+      and Conjugate Gradient Method for Large-Scale Bound-Constrained
+      Minimization Problems," SIAM Journal on Scientific Computing,
+      Vol. 21, Number 1, pp 1-23, 1999.
+.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
+    and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
+"""
+import numpy as np
+from numpy.linalg import norm
+from scipy.linalg import svd, qr
+from scipy.sparse.linalg import lsmr
+from scipy.optimize import OptimizeResult
+
+from .common import (
+    step_size_to_bound, find_active_constraints, in_bounds,
+    make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
+    solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
+    evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
+    CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
+    update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
+    print_iteration_nonlinear)
+
+
+def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
+        loss_function, tr_solver, tr_options, verbose):
+    # For efficiency, it makes sense to run the simplified version of the
+    # algorithm when no bounds are imposed. We decided to write the two
+    # separate functions. It violates the DRY principle, but the individual
+    # functions are kept the most readable.
+    if np.all(lb == -np.inf) and np.all(ub == np.inf):
+        return trf_no_bounds(
+            fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
+            loss_function, tr_solver, tr_options, verbose)
+    else:
+        return trf_bounds(
+            fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
+            loss_function, tr_solver, tr_options, verbose)
+
+
+def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
+    """Select the best step according to Trust Region Reflective algorithm."""
+    if in_bounds(x + p, lb, ub):
+        p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
+        return p, p_h, -p_value
+
+    p_stride, hits = step_size_to_bound(x, p, lb, ub)
+
+    # Compute the reflected direction.
+    r_h = np.copy(p_h)
+    r_h[hits.astype(bool)] *= -1
+    r = d * r_h
+
+    # Restrict trust-region step, such that it hits the bound.
+    p *= p_stride
+    p_h *= p_stride
+    x_on_bound = x + p
+
+    # Reflected direction will cross first either feasible region or trust
+    # region boundary.
+    _, to_tr = intersect_trust_region(p_h, r_h, Delta)
+    to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
+
+    # Find lower and upper bounds on a step size along the reflected
+    # direction, considering the strict feasibility requirement. There is no
+    # single correct way to do that, the chosen approach seems to work best
+    # on test problems.
+    r_stride = min(to_bound, to_tr)
+    if r_stride > 0:
+        r_stride_l = (1 - theta) * p_stride / r_stride
+        if r_stride == to_bound:
+            r_stride_u = theta * to_bound
+        else:
+            r_stride_u = to_tr
+    else:
+        r_stride_l = 0
+        r_stride_u = -1
+
+    # Check if reflection step is available.
+    if r_stride_l <= r_stride_u:
+        a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
+        r_stride, r_value = minimize_quadratic_1d(
+            a, b, r_stride_l, r_stride_u, c=c)
+        r_h *= r_stride
+        r_h += p_h
+        r = r_h * d
+    else:
+        r_value = np.inf
+
+    # Now correct p_h to make it strictly interior.
+    p *= theta
+    p_h *= theta
+    p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
+
+    ag_h = -g_h
+    ag = d * ag_h
+
+    to_tr = Delta / norm(ag_h)
+    to_bound, _ = step_size_to_bound(x, ag, lb, ub)
+    if to_bound < to_tr:
+        ag_stride = theta * to_bound
+    else:
+        ag_stride = to_tr
+
+    a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
+    ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
+    ag_h *= ag_stride
+    ag *= ag_stride
+
+    if p_value < r_value and p_value < ag_value:
+        return p, p_h, -p_value
+    elif r_value < p_value and r_value < ag_value:
+        return r, r_h, -r_value
+    else:
+        return ag, ag_h, -ag_value
+
+
+def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
+               x_scale, loss_function, tr_solver, tr_options, verbose):
+    x = x0.copy()
+
+    f = f0
+    f_true = f.copy()
+    nfev = 1
+
+    J = J0
+    njev = 1
+    m, n = J.shape
+
+    if loss_function is not None:
+        rho = loss_function(f)
+        cost = 0.5 * np.sum(rho[0])
+        J, f = scale_for_robust_loss_function(J, f, rho)
+    else:
+        cost = 0.5 * np.dot(f, f)
+
+    g = compute_grad(J, f)
+
+    jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+    if jac_scale:
+        scale, scale_inv = compute_jac_scale(J)
+    else:
+        scale, scale_inv = x_scale, 1 / x_scale
+
+    v, dv = CL_scaling_vector(x, g, lb, ub)
+    v[dv != 0] *= scale_inv[dv != 0]
+    Delta = norm(x0 * scale_inv / v**0.5)
+    if Delta == 0:
+        Delta = 1.0
+
+    g_norm = norm(g * v, ord=np.inf)
+
+    f_augmented = np.zeros((m + n))
+    if tr_solver == 'exact':
+        J_augmented = np.empty((m + n, n))
+    elif tr_solver == 'lsmr':
+        reg_term = 0.0
+        regularize = tr_options.pop('regularize', True)
+
+    if max_nfev is None:
+        max_nfev = x0.size * 100
+
+    alpha = 0.0  # "Levenberg-Marquardt" parameter
+
+    termination_status = None
+    iteration = 0
+    step_norm = None
+    actual_reduction = None
+
+    if verbose == 2:
+        print_header_nonlinear()
+
+    while True:
+        v, dv = CL_scaling_vector(x, g, lb, ub)
+
+        g_norm = norm(g * v, ord=np.inf)
+        if g_norm < gtol:
+            termination_status = 1
+
+        if verbose == 2:
+            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
+                                      step_norm, g_norm)
+
+        if termination_status is not None or nfev == max_nfev:
+            break
+
+        # Now compute variables in "hat" space. Here, we also account for
+        # scaling introduced by `x_scale` parameter. This part is a bit tricky,
+        # you have to write down the formulas and see how the trust-region
+        # problem is formulated when the two types of scaling are applied.
+        # The idea is that first we apply `x_scale` and then apply Coleman-Li
+        # approach in the new variables.
+
+        # v is recomputed in the variables after applying `x_scale`, note that
+        # components which were identically 1 not affected.
+        v[dv != 0] *= scale_inv[dv != 0]
+
+        # Here, we apply two types of scaling.
+        d = v**0.5 * scale
+
+        # C = diag(g * scale) Jv
+        diag_h = g * dv * scale
+
+        # After all this has been done, we continue normally.
+
+        # "hat" gradient.
+        g_h = d * g
+
+        f_augmented[:m] = f
+        if tr_solver == 'exact':
+            J_augmented[:m] = J * d
+            J_h = J_augmented[:m]  # Memory view.
+            J_augmented[m:] = np.diag(diag_h**0.5)
+            U, s, V = svd(J_augmented, full_matrices=False)
+            V = V.T
+            uf = U.T.dot(f_augmented)
+        elif tr_solver == 'lsmr':
+            J_h = right_multiplied_operator(J, d)
+
+            if regularize:
+                a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
+                to_tr = Delta / norm(g_h)
+                ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
+                reg_term = -ag_value / Delta**2
+
+            lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
+            gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
+            S = np.vstack((g_h, gn_h)).T
+            S, _ = qr(S, mode='economic')
+            JS = J_h.dot(S)  # LinearOperator does dot too.
+            B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
+            g_S = S.T.dot(g_h)
+
+        # theta controls step back step ratio from the bounds.
+        theta = max(0.995, 1 - g_norm)
+
+        actual_reduction = -1
+        while actual_reduction <= 0 and nfev < max_nfev:
+            if tr_solver == 'exact':
+                p_h, alpha, n_iter = solve_lsq_trust_region(
+                    n, m, uf, s, V, Delta, initial_alpha=alpha)
+            elif tr_solver == 'lsmr':
+                p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
+                p_h = S.dot(p_S)
+
+            p = d * p_h  # Trust-region solution in the original space.
+            step, step_h, predicted_reduction = select_step(
+                x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
+
+            x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
+            f_new = fun(x_new)
+            nfev += 1
+
+            step_h_norm = norm(step_h)
+
+            if not np.all(np.isfinite(f_new)):
+                Delta = 0.25 * step_h_norm
+                continue
+
+            # Usual trust-region step quality estimation.
+            if loss_function is not None:
+                cost_new = loss_function(f_new, cost_only=True)
+            else:
+                cost_new = 0.5 * np.dot(f_new, f_new)
+            actual_reduction = cost - cost_new
+            Delta_new, ratio = update_tr_radius(
+                Delta, actual_reduction, predicted_reduction,
+                step_h_norm, step_h_norm > 0.95 * Delta)
+
+            step_norm = norm(step)
+            termination_status = check_termination(
+                actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
+            if termination_status is not None:
+                break
+
+            alpha *= Delta / Delta_new
+            Delta = Delta_new
+
+        if actual_reduction > 0:
+            x = x_new
+
+            f = f_new
+            f_true = f.copy()
+
+            cost = cost_new
+
+            J = jac(x, f)
+            njev += 1
+
+            if loss_function is not None:
+                rho = loss_function(f)
+                J, f = scale_for_robust_loss_function(J, f, rho)
+
+            g = compute_grad(J, f)
+
+            if jac_scale:
+                scale, scale_inv = compute_jac_scale(J, scale_inv)
+        else:
+            step_norm = 0
+            actual_reduction = 0
+
+        iteration += 1
+
+    if termination_status is None:
+        termination_status = 0
+
+    active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
+    return OptimizeResult(
+        x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
+        active_mask=active_mask, nfev=nfev, njev=njev,
+        status=termination_status)
+
+
+def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
+                  x_scale, loss_function, tr_solver, tr_options, verbose):
+    x = x0.copy()
+
+    f = f0
+    f_true = f.copy()
+    nfev = 1
+
+    J = J0
+    njev = 1
+    m, n = J.shape
+
+    if loss_function is not None:
+        rho = loss_function(f)
+        cost = 0.5 * np.sum(rho[0])
+        J, f = scale_for_robust_loss_function(J, f, rho)
+    else:
+        cost = 0.5 * np.dot(f, f)
+
+    g = compute_grad(J, f)
+
+    jac_scale = isinstance(x_scale, str) and x_scale == 'jac'
+    if jac_scale:
+        scale, scale_inv = compute_jac_scale(J)
+    else:
+        scale, scale_inv = x_scale, 1 / x_scale
+
+    Delta = norm(x0 * scale_inv)
+    if Delta == 0:
+        Delta = 1.0
+
+    if tr_solver == 'lsmr':
+        reg_term = 0
+        damp = tr_options.pop('damp', 0.0)
+        regularize = tr_options.pop('regularize', True)
+
+    if max_nfev is None:
+        max_nfev = x0.size * 100
+
+    alpha = 0.0  # "Levenberg-Marquardt" parameter
+
+    termination_status = None
+    iteration = 0
+    step_norm = None
+    actual_reduction = None
+
+    if verbose == 2:
+        print_header_nonlinear()
+
+    while True:
+        g_norm = norm(g, ord=np.inf)
+        if g_norm < gtol:
+            termination_status = 1
+
+        if verbose == 2:
+            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
+                                      step_norm, g_norm)
+
+        if termination_status is not None or nfev == max_nfev:
+            break
+
+        d = scale
+        g_h = d * g
+
+        if tr_solver == 'exact':
+            J_h = J * d
+            U, s, V = svd(J_h, full_matrices=False)
+            V = V.T
+            uf = U.T.dot(f)
+        elif tr_solver == 'lsmr':
+            J_h = right_multiplied_operator(J, d)
+
+            if regularize:
+                a, b = build_quadratic_1d(J_h, g_h, -g_h)
+                to_tr = Delta / norm(g_h)
+                ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
+                reg_term = -ag_value / Delta**2
+
+            damp_full = (damp**2 + reg_term)**0.5
+            gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
+            S = np.vstack((g_h, gn_h)).T
+            S, _ = qr(S, mode='economic')
+            JS = J_h.dot(S)
+            B_S = np.dot(JS.T, JS)
+            g_S = S.T.dot(g_h)
+
+        actual_reduction = -1
+        while actual_reduction <= 0 and nfev < max_nfev:
+            if tr_solver == 'exact':
+                step_h, alpha, n_iter = solve_lsq_trust_region(
+                    n, m, uf, s, V, Delta, initial_alpha=alpha)
+            elif tr_solver == 'lsmr':
+                p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
+                step_h = S.dot(p_S)
+
+            predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
+            step = d * step_h
+            x_new = x + step
+            f_new = fun(x_new)
+            nfev += 1
+
+            step_h_norm = norm(step_h)
+
+            if not np.all(np.isfinite(f_new)):
+                Delta = 0.25 * step_h_norm
+                continue
+
+            # Usual trust-region step quality estimation.
+            if loss_function is not None:
+                cost_new = loss_function(f_new, cost_only=True)
+            else:
+                cost_new = 0.5 * np.dot(f_new, f_new)
+            actual_reduction = cost - cost_new
+
+            Delta_new, ratio = update_tr_radius(
+                Delta, actual_reduction, predicted_reduction,
+                step_h_norm, step_h_norm > 0.95 * Delta)
+
+            step_norm = norm(step)
+            termination_status = check_termination(
+                actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
+            if termination_status is not None:
+                break
+
+            alpha *= Delta / Delta_new
+            Delta = Delta_new
+
+        if actual_reduction > 0:
+            x = x_new
+
+            f = f_new
+            f_true = f.copy()
+
+            cost = cost_new
+
+            J = jac(x, f)
+            njev += 1
+
+            if loss_function is not None:
+                rho = loss_function(f)
+                J, f = scale_for_robust_loss_function(J, f, rho)
+
+            g = compute_grad(J, f)
+
+            if jac_scale:
+                scale, scale_inv = compute_jac_scale(J, scale_inv)
+        else:
+            step_norm = 0
+            actual_reduction = 0
+
+        iteration += 1
+
+    if termination_status is None:
+        termination_status = 0
+
+    active_mask = np.zeros_like(x)
+    return OptimizeResult(
+        x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
+        active_mask=active_mask, nfev=nfev, njev=njev,
+        status=termination_status)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf_linear.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf_linear.py
new file mode 100644
index 00000000..dd752763
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_lsq/trf_linear.py
@@ -0,0 +1,249 @@
+"""The adaptation of Trust Region Reflective algorithm for a linear
+least-squares problem."""
+import numpy as np
+from numpy.linalg import norm
+from scipy.linalg import qr, solve_triangular
+from scipy.sparse.linalg import lsmr
+from scipy.optimize import OptimizeResult
+
+from .givens_elimination import givens_elimination
+from .common import (
+    EPS, step_size_to_bound, find_active_constraints, in_bounds,
+    make_strictly_feasible, build_quadratic_1d, evaluate_quadratic,
+    minimize_quadratic_1d, CL_scaling_vector, reflective_transformation,
+    print_header_linear, print_iteration_linear, compute_grad,
+    regularized_lsq_operator, right_multiplied_operator)
+
+
+def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True):
+    """Solve regularized least squares using information from QR-decomposition.
+
+    The initial problem is to solve the following system in a least-squares
+    sense::
+
+        A x = b
+        D x = 0
+
+    where D is diagonal matrix. The method is based on QR decomposition
+    of the form A P = Q R, where P is a column permutation matrix, Q is an
+    orthogonal matrix and R is an upper triangular matrix.
+
+    Parameters
+    ----------
+    m, n : int
+        Initial shape of A.
+    R : ndarray, shape (n, n)
+        Upper triangular matrix from QR decomposition of A.
+    QTb : ndarray, shape (n,)
+        First n components of Q^T b.
+    perm : ndarray, shape (n,)
+        Array defining column permutation of A, such that ith column of
+        P is perm[i]-th column of identity matrix.
+    diag : ndarray, shape (n,)
+        Array containing diagonal elements of D.
+
+    Returns
+    -------
+    x : ndarray, shape (n,)
+        Found least-squares solution.
+    """
+    if copy_R:
+        R = R.copy()
+    v = QTb.copy()
+
+    givens_elimination(R, v, diag[perm])
+
+    abs_diag_R = np.abs(np.diag(R))
+    threshold = EPS * max(m, n) * np.max(abs_diag_R)
+    nns, = np.nonzero(abs_diag_R > threshold)
+
+    R = R[np.ix_(nns, nns)]
+    v = v[nns]
+
+    x = np.zeros(n)
+    x[perm[nns]] = solve_triangular(R, v)
+
+    return x
+
+
+def backtracking(A, g, x, p, theta, p_dot_g, lb, ub):
+    """Find an appropriate step size using backtracking line search."""
+    alpha = 1
+    while True:
+        x_new, _ = reflective_transformation(x + alpha * p, lb, ub)
+        step = x_new - x
+        cost_change = -evaluate_quadratic(A, g, step)
+        if cost_change > -0.1 * alpha * p_dot_g:
+            break
+        alpha *= 0.5
+
+    active = find_active_constraints(x_new, lb, ub)
+    if np.any(active != 0):
+        x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub)
+        x_new = make_strictly_feasible(x_new, lb, ub, rstep=0)
+        step = x_new - x
+        cost_change = -evaluate_quadratic(A, g, step)
+
+    return x, step, cost_change
+
+
+def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta):
+    """Select the best step according to Trust Region Reflective algorithm."""
+    if in_bounds(x + p, lb, ub):
+        return p
+
+    p_stride, hits = step_size_to_bound(x, p, lb, ub)
+    r_h = np.copy(p_h)
+    r_h[hits.astype(bool)] *= -1
+    r = d * r_h
+
+    # Restrict step, such that it hits the bound.
+    p *= p_stride
+    p_h *= p_stride
+    x_on_bound = x + p
+
+    # Find the step size along reflected direction.
+    r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub)
+
+    # Stay interior.
+    r_stride_l = (1 - theta) * r_stride_u
+    r_stride_u *= theta
+
+    if r_stride_u > 0:
+        a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h)
+        r_stride, r_value = minimize_quadratic_1d(
+            a, b, r_stride_l, r_stride_u, c=c)
+        r_h = p_h + r_h * r_stride
+        r = d * r_h
+    else:
+        r_value = np.inf
+
+    # Now correct p_h to make it strictly interior.
+    p_h *= theta
+    p *= theta
+    p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h)
+
+    ag_h = -g_h
+    ag = d * ag_h
+    ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub)
+    ag_stride_u *= theta
+    a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h)
+    ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u)
+    ag *= ag_stride
+
+    if p_value < r_value and p_value < ag_value:
+        return p
+    elif r_value < p_value and r_value < ag_value:
+        return r
+    else:
+        return ag
+
+
+def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol,
+               max_iter, verbose, *, lsmr_maxiter=None):
+    m, n = A.shape
+    x, _ = reflective_transformation(x_lsq, lb, ub)
+    x = make_strictly_feasible(x, lb, ub, rstep=0.1)
+
+    if lsq_solver == 'exact':
+        QT, R, perm = qr(A, mode='economic', pivoting=True)
+        QT = QT.T
+
+        if m < n:
+            R = np.vstack((R, np.zeros((n - m, n))))
+
+        QTr = np.zeros(n)
+        k = min(m, n)
+    elif lsq_solver == 'lsmr':
+        r_aug = np.zeros(m + n)
+        auto_lsmr_tol = False
+        if lsmr_tol is None:
+            lsmr_tol = 1e-2 * tol
+        elif lsmr_tol == 'auto':
+            auto_lsmr_tol = True
+
+    r = A.dot(x) - b
+    g = compute_grad(A, r)
+    cost = 0.5 * np.dot(r, r)
+    initial_cost = cost
+
+    termination_status = None
+    step_norm = None
+    cost_change = None
+
+    if max_iter is None:
+        max_iter = 100
+
+    if verbose == 2:
+        print_header_linear()
+
+    for iteration in range(max_iter):
+        v, dv = CL_scaling_vector(x, g, lb, ub)
+        g_scaled = g * v
+        g_norm = norm(g_scaled, ord=np.inf)
+        if g_norm < tol:
+            termination_status = 1
+
+        if verbose == 2:
+            print_iteration_linear(iteration, cost, cost_change,
+                                   step_norm, g_norm)
+
+        if termination_status is not None:
+            break
+
+        diag_h = g * dv
+        diag_root_h = diag_h ** 0.5
+        d = v ** 0.5
+        g_h = d * g
+
+        A_h = right_multiplied_operator(A, d)
+        if lsq_solver == 'exact':
+            QTr[:k] = QT.dot(r)
+            p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm,
+                                           diag_root_h, copy_R=False)
+        elif lsq_solver == 'lsmr':
+            lsmr_op = regularized_lsq_operator(A_h, diag_root_h)
+            r_aug[:m] = r
+            if auto_lsmr_tol:
+                eta = 1e-2 * min(0.5, g_norm)
+                lsmr_tol = max(EPS, min(0.1, eta * g_norm))
+            p_h = -lsmr(lsmr_op, r_aug, maxiter=lsmr_maxiter,
+                        atol=lsmr_tol, btol=lsmr_tol)[0]
+
+        p = d * p_h
+
+        p_dot_g = np.dot(p, g)
+        if p_dot_g > 0:
+            termination_status = -1
+
+        theta = 1 - min(0.005, g_norm)
+        step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta)
+        cost_change = -evaluate_quadratic(A, g, step)
+
+        # Perhaps almost never executed, the idea is that `p` is descent
+        # direction thus we must find acceptable cost decrease using simple
+        # "backtracking", otherwise the algorithm's logic would break.
+        if cost_change < 0:
+            x, step, cost_change = backtracking(
+                A, g, x, p, theta, p_dot_g, lb, ub)
+        else:
+            x = make_strictly_feasible(x + step, lb, ub, rstep=0)
+
+        step_norm = norm(step)
+        r = A.dot(x) - b
+        g = compute_grad(A, r)
+
+        if cost_change < tol * cost:
+            termination_status = 2
+
+        cost = 0.5 * np.dot(r, r)
+
+    if termination_status is None:
+        termination_status = 0
+
+    active_mask = find_active_constraints(x, lb, ub, rtol=tol)
+
+    return OptimizeResult(
+        x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask,
+        nit=iteration + 1, status=termination_status,
+        initial_cost=initial_cost)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_milp.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_milp.py
new file mode 100644
index 00000000..558283bc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_milp.py
@@ -0,0 +1,387 @@
+import warnings
+import numpy as np
+from scipy.sparse import csc_array, vstack
+from ._highs._highs_wrapper import _highs_wrapper  # type: ignore[import]
+from ._constraints import LinearConstraint, Bounds
+from ._optimize import OptimizeResult
+from ._linprog_highs import _highs_to_scipy_status_message
+
+
+def _constraints_to_components(constraints):
+    """
+    Convert sequence of constraints to a single set of components A, b_l, b_u.
+
+    `constraints` could be
+
+    1. A LinearConstraint
+    2. A tuple representing a LinearConstraint
+    3. An invalid object
+    4. A sequence of composed entirely of objects of type 1/2
+    5. A sequence containing at least one object of type 3
+
+    We want to accept 1, 2, and 4 and reject 3 and 5.
+    """
+    message = ("`constraints` (or each element within `constraints`) must be "
+               "convertible into an instance of "
+               "`scipy.optimize.LinearConstraint`.")
+    As = []
+    b_ls = []
+    b_us = []
+
+    # Accept case 1 by standardizing as case 4
+    if isinstance(constraints, LinearConstraint):
+        constraints = [constraints]
+    else:
+        # Reject case 3
+        try:
+            iter(constraints)
+        except TypeError as exc:
+            raise ValueError(message) from exc
+
+        # Accept case 2 by standardizing as case 4
+        if len(constraints) == 3:
+            # argument could be a single tuple representing a LinearConstraint
+            try:
+                constraints = [LinearConstraint(*constraints)]
+            except (TypeError, ValueError, np.VisibleDeprecationWarning):
+                # argument was not a tuple representing a LinearConstraint
+                pass
+
+    # Address cases 4/5
+    for constraint in constraints:
+        # if it's not a LinearConstraint or something that represents a
+        # LinearConstraint at this point, it's invalid
+        if not isinstance(constraint, LinearConstraint):
+            try:
+                constraint = LinearConstraint(*constraint)
+            except TypeError as exc:
+                raise ValueError(message) from exc
+        As.append(csc_array(constraint.A))
+        b_ls.append(np.atleast_1d(constraint.lb).astype(np.double))
+        b_us.append(np.atleast_1d(constraint.ub).astype(np.double))
+
+    if len(As) > 1:
+        A = vstack(As)
+        b_l = np.concatenate(b_ls)
+        b_u = np.concatenate(b_us)
+    else:  # avoid unnecessary copying
+        A = As[0]
+        b_l = b_ls[0]
+        b_u = b_us[0]
+
+    return A, b_l, b_u
+
+
+def _milp_iv(c, integrality, bounds, constraints, options):
+    # objective IV
+    c = np.atleast_1d(c).astype(np.double)
+    if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)):
+        message = ("`c` must be a one-dimensional array of finite numbers "
+                   "with at least one element.")
+        raise ValueError(message)
+
+    # integrality IV
+    message = ("`integrality` must contain integers 0-3 and be broadcastable "
+               "to `c.shape`.")
+    if integrality is None:
+        integrality = 0
+    try:
+        integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8)
+    except ValueError:
+        raise ValueError(message)
+    if integrality.min() < 0 or integrality.max() > 3:
+        raise ValueError(message)
+
+    # bounds IV
+    if bounds is None:
+        bounds = Bounds(0, np.inf)
+    elif not isinstance(bounds, Bounds):
+        message = ("`bounds` must be convertible into an instance of "
+                   "`scipy.optimize.Bounds`.")
+        try:
+            bounds = Bounds(*bounds)
+        except TypeError as exc:
+            raise ValueError(message) from exc
+
+    try:
+        lb = np.broadcast_to(bounds.lb, c.shape).astype(np.double)
+        ub = np.broadcast_to(bounds.ub, c.shape).astype(np.double)
+    except (ValueError, TypeError) as exc:
+        message = ("`bounds.lb` and `bounds.ub` must contain reals and "
+                   "be broadcastable to `c.shape`.")
+        raise ValueError(message) from exc
+
+    # constraints IV
+    if not constraints:
+        constraints = [LinearConstraint(np.empty((0, c.size)),
+                                        np.empty((0,)), np.empty((0,)))]
+    try:
+        A, b_l, b_u = _constraints_to_components(constraints)
+    except ValueError as exc:
+        message = ("`constraints` (or each element within `constraints`) must "
+                   "be convertible into an instance of "
+                   "`scipy.optimize.LinearConstraint`.")
+        raise ValueError(message) from exc
+
+    if A.shape != (b_l.size, c.size):
+        message = "The shape of `A` must be (len(b_l), len(c))."
+        raise ValueError(message)
+    indptr, indices, data = A.indptr, A.indices, A.data.astype(np.double)
+
+    # options IV
+    options = options or {}
+    supported_options = {'disp', 'presolve', 'time_limit', 'node_limit',
+                         'mip_rel_gap'}
+    unsupported_options = set(options).difference(supported_options)
+    if unsupported_options:
+        message = (f"Unrecognized options detected: {unsupported_options}. "
+                   "These will be passed to HiGHS verbatim.")
+        warnings.warn(message, RuntimeWarning, stacklevel=3)
+    options_iv = {'log_to_console': options.pop("disp", False),
+                  'mip_max_nodes': options.pop("node_limit", None)}
+    options_iv.update(options)
+
+    return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv
+
+
+def milp(c, *, integrality=None, bounds=None, constraints=None, options=None):
+    r"""
+    Mixed-integer linear programming
+
+    Solves problems of the following form:
+
+    .. math::
+
+        \min_x \ & c^T x \\
+        \mbox{such that} \ & b_l \leq A x \leq b_u,\\
+        & l \leq x \leq u, \\
+        & x_i \in \mathbb{Z}, i \in X_i
+
+    where :math:`x` is a vector of decision variables;
+    :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors;
+    :math:`A` is a matrix, and :math:`X_i` is the set of indices of
+    decision variables that must be integral. (In this context, a
+    variable that can assume only integer values is said to be "integral";
+    it has an "integrality" constraint.)
+
+    Alternatively, that's:
+
+    minimize::
+
+        c @ x
+
+    such that::
+
+        b_l <= A @ x <= b_u
+        l <= x <= u
+        Specified elements of x must be integers
+
+    By default, ``l = 0`` and ``u = np.inf`` unless specified with
+    ``bounds``.
+
+    Parameters
+    ----------
+    c : 1D array_like
+        The coefficients of the linear objective function to be minimized.
+        `c` is converted to a double precision array before the problem is
+        solved.
+    integrality : 1D array_like, optional
+        Indicates the type of integrality constraint on each decision variable.
+
+        ``0`` : Continuous variable; no integrality constraint.
+
+        ``1`` : Integer variable; decision variable must be an integer
+        within `bounds`.
+
+        ``2`` : Semi-continuous variable; decision variable must be within
+        `bounds` or take value ``0``.
+
+        ``3`` : Semi-integer variable; decision variable must be an integer
+        within `bounds` or take value ``0``.
+
+        By default, all variables are continuous. `integrality` is converted
+        to an array of integers before the problem is solved.
+
+    bounds : scipy.optimize.Bounds, optional
+        Bounds on the decision variables. Lower and upper bounds are converted
+        to double precision arrays before the problem is solved. The
+        ``keep_feasible`` parameter of the `Bounds` object is ignored. If
+        not specified, all decision variables are constrained to be
+        non-negative.
+    constraints : sequence of scipy.optimize.LinearConstraint, optional
+        Linear constraints of the optimization problem. Arguments may be
+        one of the following:
+
+        1. A single `LinearConstraint` object
+        2. A single tuple that can be converted to a `LinearConstraint` object
+           as ``LinearConstraint(*constraints)``
+        3. A sequence composed entirely of objects of type 1. and 2.
+
+        Before the problem is solved, all values are converted to double
+        precision, and the matrices of constraint coefficients are converted to
+        instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter
+        of `LinearConstraint` objects is ignored.
+    options : dict, optional
+        A dictionary of solver options. The following keys are recognized.
+
+        disp : bool (default: ``False``)
+            Set to ``True`` if indicators of optimization status are to be
+            printed to the console during optimization.
+        node_limit : int, optional
+            The maximum number of nodes (linear program relaxations) to solve
+            before stopping. Default is no maximum number of nodes.
+        presolve : bool (default: ``True``)
+            Presolve attempts to identify trivial infeasibilities,
+            identify trivial unboundedness, and simplify the problem before
+            sending it to the main solver.
+        time_limit : float, optional
+            The maximum number of seconds allotted to solve the problem.
+            Default is no time limit.
+        mip_rel_gap : float, optional
+            Termination criterion for MIP solver: solver will terminate when
+            the gap between the primal objective value and the dual objective
+            bound, scaled by the primal objective value, is <= mip_rel_gap.
+
+    Returns
+    -------
+    res : OptimizeResult
+        An instance of :class:`scipy.optimize.OptimizeResult`. The object
+        is guaranteed to have the following attributes.
+
+        status : int
+            An integer representing the exit status of the algorithm.
+
+            ``0`` : Optimal solution found.
+
+            ``1`` : Iteration or time limit reached.
+
+            ``2`` : Problem is infeasible.
+
+            ``3`` : Problem is unbounded.
+
+            ``4`` : Other; see message for details.
+
+        success : bool
+            ``True`` when an optimal solution is found and ``False`` otherwise.
+
+        message : str
+            A string descriptor of the exit status of the algorithm.
+
+        The following attributes will also be present, but the values may be
+        ``None``, depending on the solution status.
+
+        x : ndarray
+            The values of the decision variables that minimize the
+            objective function while satisfying the constraints.
+        fun : float
+            The optimal value of the objective function ``c @ x``.
+        mip_node_count : int
+            The number of subproblems or "nodes" solved by the MILP solver.
+        mip_dual_bound : float
+            The MILP solver's final estimate of the lower bound on the optimal
+            solution.
+        mip_gap : float
+            The difference between the primal objective value and the dual
+            objective bound, scaled by the primal objective value.
+
+    Notes
+    -----
+    `milp` is a wrapper of the HiGHS linear optimization software [1]_. The
+    algorithm is deterministic, and it typically finds the global optimum of
+    moderately challenging mixed-integer linear programs (when it exists).
+
+    References
+    ----------
+    .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
+           "HiGHS - high performance software for linear optimization."
+           https://highs.dev/
+    .. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
+           simplex method." Mathematical Programming Computation, 10 (1),
+           119-142, 2018. DOI: 10.1007/s12532-017-0130-5
+
+    Examples
+    --------
+    Consider the problem at
+    https://en.wikipedia.org/wiki/Integer_programming#Example, which is
+    expressed as a maximization problem of two variables. Since `milp` requires
+    that the problem be expressed as a minimization problem, the objective
+    function coefficients on the decision variables are:
+
+    >>> import numpy as np
+    >>> c = -np.array([0, 1])
+
+    Note the negative sign: we maximize the original objective function
+    by minimizing the negative of the objective function.
+
+    We collect the coefficients of the constraints into arrays like:
+
+    >>> A = np.array([[-1, 1], [3, 2], [2, 3]])
+    >>> b_u = np.array([1, 12, 12])
+    >>> b_l = np.full_like(b_u, -np.inf)
+
+    Because there is no lower limit on these constraints, we have defined a
+    variable ``b_l`` full of values representing negative infinity. This may
+    be unfamiliar to users of `scipy.optimize.linprog`, which only accepts
+    "less than" (or "upper bound") inequality constraints of the form
+    ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints
+    ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than"
+    inequality constraints, "less than" inequality constraints, and equality
+    constraints concisely.
+
+    These arrays are collected into a single `LinearConstraint` object like:
+
+    >>> from scipy.optimize import LinearConstraint
+    >>> constraints = LinearConstraint(A, b_l, b_u)
+
+    The non-negativity bounds on the decision variables are enforced by
+    default, so we do not need to provide an argument for `bounds`.
+
+    Finally, the problem states that both decision variables must be integers:
+
+    >>> integrality = np.ones_like(c)
+
+    We solve the problem like:
+
+    >>> from scipy.optimize import milp
+    >>> res = milp(c=c, constraints=constraints, integrality=integrality)
+    >>> res.x
+    [1.0, 2.0]
+
+    Note that had we solved the relaxed problem (without integrality
+    constraints):
+
+    >>> res = milp(c=c, constraints=constraints)  # OR:
+    >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u)
+    >>> res.x
+    [1.8, 2.8]
+
+    we would not have obtained the correct solution by rounding to the nearest
+    integers.
+
+    Other examples are given :ref:`in the tutorial `.
+
+    """
+    args_iv = _milp_iv(c, integrality, bounds, constraints, options)
+    c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv
+
+    highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u,
+                               lb, ub, integrality, options)
+
+    res = {}
+
+    # Convert to scipy-style status and message
+    highs_status = highs_res.get('status', None)
+    highs_message = highs_res.get('message', None)
+    status, message = _highs_to_scipy_status_message(highs_status,
+                                                     highs_message)
+    res['status'] = status
+    res['message'] = message
+    res['success'] = (status == 0)
+    x = highs_res.get('x', None)
+    res['x'] = np.array(x) if x is not None else None
+    res['fun'] = highs_res.get('fun', None)
+    res['mip_node_count'] = highs_res.get('mip_node_count', None)
+    res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None)
+    res['mip_gap'] = highs_res.get('mip_gap', None)
+
+    return OptimizeResult(res)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_minimize.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_minimize.py
new file mode 100644
index 00000000..8471f07f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_minimize.py
@@ -0,0 +1,1038 @@
+"""
+Unified interfaces to minimization algorithms.
+
+Functions
+---------
+- minimize : minimization of a function of several variables.
+- minimize_scalar : minimization of a function of one variable.
+"""
+
+__all__ = ['minimize', 'minimize_scalar']
+
+
+from warnings import warn
+
+import numpy as np
+
+# unconstrained minimization
+from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
+                        _minimize_bfgs, _minimize_newtoncg,
+                        _minimize_scalar_brent, _minimize_scalar_bounded,
+                        _minimize_scalar_golden, MemoizeJac, OptimizeResult)
+from ._trustregion_dogleg import _minimize_dogleg
+from ._trustregion_ncg import _minimize_trust_ncg
+from ._trustregion_krylov import _minimize_trust_krylov
+from ._trustregion_exact import _minimize_trustregion_exact
+from ._trustregion_constr import _minimize_trustregion_constr
+
+# constrained minimization
+from ._lbfgsb_py import _minimize_lbfgsb
+from ._tnc import _minimize_tnc
+from ._cobyla_py import _minimize_cobyla
+from ._slsqp_py import _minimize_slsqp
+from ._constraints import (old_bound_to_new, new_bounds_to_old,
+                           old_constraint_to_new, new_constraint_to_old,
+                           NonlinearConstraint, LinearConstraint, Bounds,
+                           PreparedConstraint)
+from ._differentiable_functions import FD_METHODS
+
+MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
+                    'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr',
+                    'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']
+
+MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
+
+def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
+             hessp=None, bounds=None, constraints=(), tol=None,
+             callback=None, options=None):
+    """Minimization of scalar function of one or more variables.
+
+    Parameters
+    ----------
+    fun : callable
+        The objective function to be minimized.
+
+            ``fun(x, *args) -> float``
+
+        where ``x`` is a 1-D array with shape (n,) and ``args``
+        is a tuple of the fixed parameters needed to completely
+        specify the function.
+    x0 : ndarray, shape (n,)
+        Initial guess. Array of real elements of size (n,),
+        where ``n`` is the number of independent variables.
+    args : tuple, optional
+        Extra arguments passed to the objective function and its
+        derivatives (`fun`, `jac` and `hess` functions).
+    method : str or callable, optional
+        Type of solver.  Should be one of
+
+            - 'Nelder-Mead' :ref:`(see here) `
+            - 'Powell'      :ref:`(see here) `
+            - 'CG'          :ref:`(see here) `
+            - 'BFGS'        :ref:`(see here) `
+            - 'Newton-CG'   :ref:`(see here) `
+            - 'L-BFGS-B'    :ref:`(see here) `
+            - 'TNC'         :ref:`(see here) `
+            - 'COBYLA'      :ref:`(see here) `
+            - 'SLSQP'       :ref:`(see here) `
+            - 'trust-constr':ref:`(see here) `
+            - 'dogleg'      :ref:`(see here) `
+            - 'trust-ncg'   :ref:`(see here) `
+            - 'trust-exact' :ref:`(see here) `
+            - 'trust-krylov' :ref:`(see here) `
+            - custom - a callable object, see below for description.
+
+        If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
+        depending on whether or not the problem has constraints or bounds.
+    jac : {callable,  '2-point', '3-point', 'cs', bool}, optional
+        Method for computing the gradient vector. Only for CG, BFGS,
+        Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
+        trust-exact and trust-constr.
+        If it is a callable, it should be a function that returns the gradient
+        vector:
+
+            ``jac(x, *args) -> array_like, shape (n,)``
+
+        where ``x`` is an array with shape (n,) and ``args`` is a tuple with
+        the fixed parameters. If `jac` is a Boolean and is True, `fun` is
+        assumed to return a tuple ``(f, g)`` containing the objective
+        function and the gradient.
+        Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
+        'trust-krylov' require that either a callable be supplied, or that
+        `fun` return the objective and gradient.
+        If None or False, the gradient will be estimated using 2-point finite
+        difference estimation with an absolute step size.
+        Alternatively, the keywords  {'2-point', '3-point', 'cs'} can be used
+        to select a finite difference scheme for numerical estimation of the
+        gradient with a relative step size. These finite difference schemes
+        obey any specified `bounds`.
+    hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
+        Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
+        trust-ncg, trust-krylov, trust-exact and trust-constr.
+        If it is callable, it should return the Hessian matrix:
+
+            ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
+
+        where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
+        parameters.
+        The keywords {'2-point', '3-point', 'cs'} can also be used to select
+        a finite difference scheme for numerical estimation of the hessian.
+        Alternatively, objects implementing the `HessianUpdateStrategy`
+        interface can be used to approximate the Hessian. Available
+        quasi-Newton methods implementing this interface are:
+
+            - `BFGS`;
+            - `SR1`.
+
+        Not all of the options are available for each of the methods; for
+        availability refer to the notes.
+    hessp : callable, optional
+        Hessian of objective function times an arbitrary vector p. Only for
+        Newton-CG, trust-ncg, trust-krylov, trust-constr.
+        Only one of `hessp` or `hess` needs to be given. If `hess` is
+        provided, then `hessp` will be ignored. `hessp` must compute the
+        Hessian times an arbitrary vector:
+
+            ``hessp(x, p, *args) ->  ndarray shape (n,)``
+
+        where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
+        dimension (n,) and ``args`` is a tuple with the fixed
+        parameters.
+    bounds : sequence or `Bounds`, optional
+        Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell, and
+        trust-constr methods. There are two ways to specify the bounds:
+
+            1. Instance of `Bounds` class.
+            2. Sequence of ``(min, max)`` pairs for each element in `x`. None
+               is used to specify no bound.
+
+    constraints : {Constraint, dict} or List of {Constraint, dict}, optional
+        Constraints definition. Only for COBYLA, SLSQP and trust-constr.
+
+        Constraints for 'trust-constr' are defined as a single object or a
+        list of objects specifying constraints to the optimization problem.
+        Available constraints are:
+
+            - `LinearConstraint`
+            - `NonlinearConstraint`
+
+        Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
+        Each dictionary with fields:
+
+            type : str
+                Constraint type: 'eq' for equality, 'ineq' for inequality.
+            fun : callable
+                The function defining the constraint.
+            jac : callable, optional
+                The Jacobian of `fun` (only for SLSQP).
+            args : sequence, optional
+                Extra arguments to be passed to the function and Jacobian.
+
+        Equality constraint means that the constraint function result is to
+        be zero whereas inequality means that it is to be non-negative.
+        Note that COBYLA only supports inequality constraints.
+    tol : float, optional
+        Tolerance for termination. When `tol` is specified, the selected
+        minimization algorithm sets some relevant solver-specific tolerance(s)
+        equal to `tol`. For detailed control, use solver-specific
+        options.
+    options : dict, optional
+        A dictionary of solver options. All methods except `TNC` accept the
+        following generic options:
+
+            maxiter : int
+                Maximum number of iterations to perform. Depending on the
+                method each iteration may use several function evaluations.
+
+                For `TNC` use `maxfun` instead of `maxiter`.
+            disp : bool
+                Set to True to print convergence messages.
+
+        For method-specific options, see :func:`show_options()`.
+    callback : callable, optional
+        Called after each iteration. For 'trust-constr' it is a callable with
+        the signature:
+
+            ``callback(xk, OptimizeResult state) -> bool``
+
+        where ``xk`` is the current parameter vector. and ``state``
+        is an `OptimizeResult` object, with the same fields
+        as the ones from the return. If callback returns True
+        the algorithm execution is terminated.
+        For all the other methods, the signature is:
+
+            ``callback(xk)``
+
+        where ``xk`` is the current parameter vector.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a ``OptimizeResult`` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the optimizer exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes.
+
+    See also
+    --------
+    minimize_scalar : Interface to minimization algorithms for scalar
+        univariate functions
+    show_options : Additional options accepted by the solvers
+
+    Notes
+    -----
+    This section describes the available solvers that can be selected by the
+    'method' parameter. The default method is *BFGS*.
+
+    **Unconstrained minimization**
+
+    Method :ref:`CG ` uses a nonlinear conjugate
+    gradient algorithm by Polak and Ribiere, a variant of the
+    Fletcher-Reeves method described in [5]_ pp.120-122. Only the
+    first derivatives are used.
+
+    Method :ref:`BFGS ` uses the quasi-Newton
+    method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
+    pp. 136. It uses the first derivatives only. BFGS has proven good
+    performance even for non-smooth optimizations. This method also
+    returns an approximation of the Hessian inverse, stored as
+    `hess_inv` in the OptimizeResult object.
+
+    Method :ref:`Newton-CG ` uses a
+    Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
+    Newton method). It uses a CG method to the compute the search
+    direction. See also *TNC* method for a box-constrained
+    minimization with a similar algorithm. Suitable for large-scale
+    problems.
+
+    Method :ref:`dogleg ` uses the dog-leg
+    trust-region algorithm [5]_ for unconstrained minimization. This
+    algorithm requires the gradient and Hessian; furthermore the
+    Hessian is required to be positive definite.
+
+    Method :ref:`trust-ncg ` uses the
+    Newton conjugate gradient trust-region algorithm [5]_ for
+    unconstrained minimization. This algorithm requires the gradient
+    and either the Hessian or a function that computes the product of
+    the Hessian with a given vector. Suitable for large-scale problems.
+
+    Method :ref:`trust-krylov ` uses
+    the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
+    minimization. This algorithm requires the gradient
+    and either the Hessian or a function that computes the product of
+    the Hessian with a given vector. Suitable for large-scale problems.
+    On indefinite problems it requires usually less iterations than the
+    `trust-ncg` method and is recommended for medium and large-scale problems.
+
+    Method :ref:`trust-exact `
+    is a trust-region method for unconstrained minimization in which
+    quadratic subproblems are solved almost exactly [13]_. This
+    algorithm requires the gradient and the Hessian (which is
+    *not* required to be positive definite). It is, in many
+    situations, the Newton method to converge in fewer iterations
+    and the most recommended for small and medium-size problems.
+
+    **Bound-Constrained minimization**
+
+    Method :ref:`Nelder-Mead ` uses the
+    Simplex algorithm [1]_, [2]_. This algorithm is robust in many
+    applications. However, if numerical computation of derivative can be
+    trusted, other algorithms using the first and/or second derivatives
+    information might be preferred for their better performance in
+    general.
+
+    Method :ref:`L-BFGS-B ` uses the L-BFGS-B
+    algorithm [6]_, [7]_ for bound constrained minimization.
+
+    Method :ref:`Powell ` is a modification
+    of Powell's method [3]_, [4]_ which is a conjugate direction
+    method. It performs sequential one-dimensional minimizations along
+    each vector of the directions set (`direc` field in `options` and
+    `info`), which is updated at each iteration of the main
+    minimization loop. The function need not be differentiable, and no
+    derivatives are taken. If bounds are not provided, then an
+    unbounded line search will be used. If bounds are provided and
+    the initial guess is within the bounds, then every function
+    evaluation throughout the minimization procedure will be within
+    the bounds. If bounds are provided, the initial guess is outside
+    the bounds, and `direc` is full rank (default has full rank), then
+    some function evaluations during the first iteration may be
+    outside the bounds, but every function evaluation after the first
+    iteration will be within the bounds. If `direc` is not full rank,
+    then some parameters may not be optimized and the solution is not
+    guaranteed to be within the bounds.
+
+    Method :ref:`TNC ` uses a truncated Newton
+    algorithm [5]_, [8]_ to minimize a function with variables subject
+    to bounds. This algorithm uses gradient information; it is also
+    called Newton Conjugate-Gradient. It differs from the *Newton-CG*
+    method described above as it wraps a C implementation and allows
+    each variable to be given upper and lower bounds.
+
+    **Constrained Minimization**
+
+    Method :ref:`COBYLA ` uses the
+    Constrained Optimization BY Linear Approximation (COBYLA) method
+    [9]_, [10]_, [11]_. The algorithm is based on linear
+    approximations to the objective function and each constraint. The
+    method wraps a FORTRAN implementation of the algorithm. The
+    constraints functions 'fun' may return either a single number
+    or an array or list of numbers.
+
+    Method :ref:`SLSQP ` uses Sequential
+    Least SQuares Programming to minimize a function of several
+    variables with any combination of bounds, equality and inequality
+    constraints. The method wraps the SLSQP Optimization subroutine
+    originally implemented by Dieter Kraft [12]_. Note that the
+    wrapper handles infinite values in bounds by converting them into
+    large floating values.
+
+    Method :ref:`trust-constr ` is a
+    trust-region algorithm for constrained optimization. It swiches
+    between two implementations depending on the problem definition.
+    It is the most versatile constrained minimization algorithm
+    implemented in SciPy and the most appropriate for large-scale problems.
+    For equality constrained problems it is an implementation of Byrd-Omojokun
+    Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
+    inequality constraints are imposed as well, it swiches to the trust-region
+    interior point method described in [16]_. This interior point algorithm,
+    in turn, solves inequality constraints by introducing slack variables
+    and solving a sequence of equality-constrained barrier problems
+    for progressively smaller values of the barrier parameter.
+    The previously described equality constrained SQP method is
+    used to solve the subproblems with increasing levels of accuracy
+    as the iterate gets closer to a solution.
+
+    **Finite-Difference Options**
+
+    For Method :ref:`trust-constr `
+    the gradient and the Hessian may be approximated using
+    three finite-difference schemes: {'2-point', '3-point', 'cs'}.
+    The scheme 'cs' is, potentially, the most accurate but it
+    requires the function to correctly handle complex inputs and to
+    be differentiable in the complex plane. The scheme '3-point' is more
+    accurate than '2-point' but requires twice as many operations. If the
+    gradient is estimated via finite-differences the Hessian must be
+    estimated using one of the quasi-Newton strategies.
+
+    **Method specific options for the** `hess` **keyword**
+
+    +--------------+------+----------+-------------------------+-----+
+    | method/Hess  | None | callable | '2-point/'3-point'/'cs' | HUS |
+    +==============+======+==========+=========================+=====+
+    | Newton-CG    | x    | (n, n)   | x                       | x   |
+    |              |      | LO       |                         |     |
+    +--------------+------+----------+-------------------------+-----+
+    | dogleg       |      | (n, n)   |                         |     |
+    +--------------+------+----------+-------------------------+-----+
+    | trust-ncg    |      | (n, n)   | x                       | x   |
+    +--------------+------+----------+-------------------------+-----+
+    | trust-krylov |      | (n, n)   | x                       | x   |
+    +--------------+------+----------+-------------------------+-----+
+    | trust-exact  |      | (n, n)   |                         |     |
+    +--------------+------+----------+-------------------------+-----+
+    | trust-constr | x    | (n, n)   |  x                      | x   |
+    |              |      | LO       |                         |     |
+    |              |      | sp       |                         |     |
+    +--------------+------+----------+-------------------------+-----+
+
+    where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
+
+    **Custom minimizers**
+
+    It may be useful to pass a custom minimization method, for example
+    when using a frontend to this method such as `scipy.optimize.basinhopping`
+    or a different library.  You can simply pass a callable as the ``method``
+    parameter.
+
+    The callable is called as ``method(fun, x0, args, **kwargs, **options)``
+    where ``kwargs`` corresponds to any other parameters passed to `minimize`
+    (such as `callback`, `hess`, etc.), except the `options` dict, which has
+    its contents also passed as `method` parameters pair by pair.  Also, if
+    `jac` has been passed as a bool type, `jac` and `fun` are mangled so that
+    `fun` returns just the function values and `jac` is converted to a function
+    returning the Jacobian.  The method shall return an `OptimizeResult`
+    object.
+
+    The provided `method` callable must be able to accept (and possibly ignore)
+    arbitrary parameters; the set of parameters accepted by `minimize` may
+    expand in future versions and then these parameters will be passed to
+    the method.  You can find an example in the scipy.optimize tutorial.
+
+    References
+    ----------
+    .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
+        Minimization. The Computer Journal 7: 308-13.
+    .. [2] Wright M H. 1996. Direct search methods: Once scorned, now
+        respectable, in Numerical Analysis 1995: Proceedings of the 1995
+        Dundee Biennial Conference in Numerical Analysis (Eds. D F
+        Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
+        191-208.
+    .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
+       a function of several variables without calculating derivatives. The
+       Computer Journal 7: 155-162.
+    .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
+       Numerical Recipes (any edition), Cambridge University Press.
+    .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
+       Springer New York.
+    .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
+       Algorithm for Bound Constrained Optimization. SIAM Journal on
+       Scientific and Statistical Computing 16 (5): 1190-1208.
+    .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
+       778: L-BFGS-B, FORTRAN routines for large scale bound constrained
+       optimization. ACM Transactions on Mathematical Software 23 (4):
+       550-560.
+    .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
+       1984. SIAM Journal of Numerical Analysis 21: 770-778.
+    .. [9] Powell, M J D. A direct search optimization method that models
+       the objective and constraint functions by linear interpolation.
+       1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
+       and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
+    .. [10] Powell M J D. Direct search algorithms for optimization
+       calculations. 1998. Acta Numerica 7: 287-336.
+    .. [11] Powell M J D. A view of algorithms for optimization without
+       derivatives. 2007.Cambridge University Technical Report DAMTP
+       2007/NA03
+    .. [12] Kraft, D. A software package for sequential quadratic
+       programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
+       Center -- Institute for Flight Mechanics, Koln, Germany.
+    .. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
+       Trust region methods. 2000. Siam. pp. 169-200.
+    .. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
+       implementation of the GLTR method for iterative solution of
+       the trust region problem", :arxiv:`1611.04718`
+    .. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
+       Trust-Region Subproblem using the Lanczos Method",
+       SIAM J. Optim., 9(2), 504--525, (1999).
+    .. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
+        An interior point algorithm for large-scale nonlinear  programming.
+        SIAM Journal on Optimization 9.4: 877-900.
+    .. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
+        implementation of an algorithm for large-scale equality constrained
+        optimization. SIAM Journal on Optimization 8.3: 682-706.
+
+    Examples
+    --------
+    Let us consider the problem of minimizing the Rosenbrock function. This
+    function (and its respective derivatives) is implemented in `rosen`
+    (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
+
+    >>> from scipy.optimize import minimize, rosen, rosen_der
+
+    A simple application of the *Nelder-Mead* method is:
+
+    >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
+    >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
+    >>> res.x
+    array([ 1.,  1.,  1.,  1.,  1.])
+
+    Now using the *BFGS* algorithm, using the first derivative and a few
+    options:
+
+    >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
+    ...                options={'gtol': 1e-6, 'disp': True})
+    Optimization terminated successfully.
+             Current function value: 0.000000
+             Iterations: 26
+             Function evaluations: 31
+             Gradient evaluations: 31
+    >>> res.x
+    array([ 1.,  1.,  1.,  1.,  1.])
+    >>> print(res.message)
+    Optimization terminated successfully.
+    >>> res.hess_inv
+    array([[ 0.00749589,  0.01255155,  0.02396251,  0.04750988,  0.09495377],  # may vary
+           [ 0.01255155,  0.02510441,  0.04794055,  0.09502834,  0.18996269],
+           [ 0.02396251,  0.04794055,  0.09631614,  0.19092151,  0.38165151],
+           [ 0.04750988,  0.09502834,  0.19092151,  0.38341252,  0.7664427 ],
+           [ 0.09495377,  0.18996269,  0.38165151,  0.7664427,   1.53713523]])
+
+
+    Next, consider a minimization problem with several constraints (namely
+    Example 16.4 from [5]_). The objective function is:
+
+    >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
+
+    There are three constraints defined as:
+
+    >>> cons = ({'type': 'ineq', 'fun': lambda x:  x[0] - 2 * x[1] + 2},
+    ...         {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
+    ...         {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
+
+    And variables must be positive, hence the following bounds:
+
+    >>> bnds = ((0, None), (0, None))
+
+    The optimization problem is solved using the SLSQP method as:
+
+    >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
+    ...                constraints=cons)
+
+    It should converge to the theoretical solution (1.4 ,1.7).
+
+    """
+    x0 = np.atleast_1d(np.asarray(x0))
+
+    if x0.ndim != 1:
+        message = ('Use of `minimize` with `x0.ndim != 1` is deprecated. '
+                   'Currently, singleton dimensions will be removed from '
+                   '`x0`, but an error will be raised in SciPy 1.11.0.')
+        warn(message, DeprecationWarning, stacklevel=2)
+        x0 = np.atleast_1d(np.squeeze(x0))
+
+    if x0.dtype.kind in np.typecodes["AllInteger"]:
+        x0 = np.asarray(x0, dtype=float)
+
+    if not isinstance(args, tuple):
+        args = (args,)
+
+    if method is None:
+        # Select automatically
+        if constraints:
+            method = 'SLSQP'
+        elif bounds is not None:
+            method = 'L-BFGS-B'
+        else:
+            method = 'BFGS'
+
+    if callable(method):
+        meth = "_custom"
+    else:
+        meth = method.lower()
+
+    if options is None:
+        options = {}
+    # check if optional parameters are supported by the selected method
+    # - jac
+    if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
+        warn('Method %s does not use gradient information (jac).' % method,
+             RuntimeWarning)
+    # - hess
+    if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
+                    'trust-krylov', 'trust-exact', '_custom') and hess is not None:
+        warn('Method %s does not use Hessian information (hess).' % method,
+             RuntimeWarning)
+    # - hessp
+    if meth not in ('newton-cg', 'trust-ncg', 'trust-constr',
+                    'trust-krylov', '_custom') \
+       and hessp is not None:
+        warn('Method %s does not use Hessian-vector product '
+             'information (hessp).' % method, RuntimeWarning)
+    # - constraints or bounds
+    if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and
+            np.any(constraints)):
+        warn('Method %s cannot handle constraints.' % method,
+             RuntimeWarning)
+    if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'tnc', 'slsqp',
+                    'trust-constr', '_custom') and bounds is not None:
+        warn('Method %s cannot handle bounds.' % method,
+             RuntimeWarning)
+    # - return_all
+    if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
+            options.get('return_all', False)):
+        warn('Method %s does not support the return_all option.' % method,
+             RuntimeWarning)
+
+    # check gradient vector
+    if callable(jac):
+        pass
+    elif jac is True:
+        # fun returns func and grad
+        fun = MemoizeJac(fun)
+        jac = fun.derivative
+    elif (jac in FD_METHODS and
+          meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
+        # finite differences with relative step
+        pass
+    elif meth in ['trust-constr']:
+        # default jac calculation for this method
+        jac = '2-point'
+    elif jac is None or bool(jac) is False:
+        # this will cause e.g. LBFGS to use forward difference, absolute step
+        jac = None
+    else:
+        # default if jac option is not understood
+        jac = None
+
+    # set default tolerances
+    if tol is not None:
+        options = dict(options)
+        if meth == 'nelder-mead':
+            options.setdefault('xatol', tol)
+            options.setdefault('fatol', tol)
+        if meth in ('newton-cg', 'powell', 'tnc'):
+            options.setdefault('xtol', tol)
+        if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
+            options.setdefault('ftol', tol)
+        if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
+                    'trust-ncg', 'trust-exact', 'trust-krylov'):
+            options.setdefault('gtol', tol)
+        if meth in ('cobyla', '_custom'):
+            options.setdefault('tol', tol)
+        if meth == 'trust-constr':
+            options.setdefault('xtol', tol)
+            options.setdefault('gtol', tol)
+            options.setdefault('barrier_tol', tol)
+
+    if meth == '_custom':
+        # custom method called before bounds and constraints are 'standardised'
+        # custom method should be able to accept whatever bounds/constraints
+        # are provided to it.
+        return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
+                      bounds=bounds, constraints=constraints,
+                      callback=callback, **options)
+
+    constraints = standardize_constraints(constraints, x0, meth)
+
+    remove_vars = False
+    if bounds is not None:
+        if meth in {"tnc", "slsqp", "l-bfgs-b"}:
+            # These methods can't take the finite-difference derivatives they
+            # need when a variable is fixed by the bounds. To avoid this issue,
+            # remove fixed variables from the problem.
+            # NOTE: if this list is expanded, then be sure to update the
+            # accompanying tests and test_optimize.eb_data. Consider also if
+            # default OptimizeResult will need updating.
+
+            # convert to new-style bounds so we only have to consider one case
+            bounds = standardize_bounds(bounds, x0, 'new')
+
+            # determine whether any variables are fixed
+            i_fixed = (bounds.lb == bounds.ub)
+
+            if np.all(i_fixed):
+                # all the parameters are fixed, a minimizer is not able to do
+                # anything
+                return _optimize_result_for_equal_bounds(
+                    fun, bounds, meth, args=args, constraints=constraints
+                )
+
+            # determine whether finite differences are needed for any grad/jac
+            fd_needed = (not callable(jac))
+            for con in constraints:
+                if not callable(con.get('jac', None)):
+                    fd_needed = True
+
+            # If finite differences are ever used, remove all fixed variables
+            # Always remove fixed variables for TNC; see gh-14565
+            remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
+            if remove_vars:
+                x_fixed = (bounds.lb)[i_fixed]
+                x0 = x0[~i_fixed]
+                bounds = _remove_from_bounds(bounds, i_fixed)
+                fun = _remove_from_func(fun, i_fixed, x_fixed)
+                if callable(callback):
+                    callback = _remove_from_func(callback, i_fixed, x_fixed)
+                if callable(jac):
+                    jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
+
+                # make a copy of the constraints so the user's version doesn't
+                # get changed. (Shallow copy is ok)
+                constraints = [con.copy() for con in constraints]
+                for con in constraints:  # yes, guaranteed to be a list
+                    con['fun'] = _remove_from_func(con['fun'], i_fixed,
+                                                   x_fixed, min_dim=1,
+                                                   remove=0)
+                    if callable(con.get('jac', None)):
+                        con['jac'] = _remove_from_func(con['jac'], i_fixed,
+                                                       x_fixed, min_dim=2,
+                                                       remove=1)
+        bounds = standardize_bounds(bounds, x0, meth)
+
+    if meth == 'nelder-mead':
+        res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
+                                   **options)
+    elif meth == 'powell':
+        res = _minimize_powell(fun, x0, args, callback, bounds, **options)
+    elif meth == 'cg':
+        res = _minimize_cg(fun, x0, args, jac, callback, **options)
+    elif meth == 'bfgs':
+        res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
+    elif meth == 'newton-cg':
+        res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
+                                 **options)
+    elif meth == 'l-bfgs-b':
+        res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
+                               callback=callback, **options)
+    elif meth == 'tnc':
+        res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
+                            **options)
+    elif meth == 'cobyla':
+        res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
+                                **options)
+    elif meth == 'slsqp':
+        res = _minimize_slsqp(fun, x0, args, jac, bounds,
+                              constraints, callback=callback, **options)
+    elif meth == 'trust-constr':
+        res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
+                                           bounds, constraints,
+                                           callback=callback, **options)
+    elif meth == 'dogleg':
+        res = _minimize_dogleg(fun, x0, args, jac, hess,
+                               callback=callback, **options)
+    elif meth == 'trust-ncg':
+        res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
+                                  callback=callback, **options)
+    elif meth == 'trust-krylov':
+        res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
+                                     callback=callback, **options)
+    elif meth == 'trust-exact':
+        res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
+                                          callback=callback, **options)
+    else:
+        raise ValueError('Unknown solver %s' % method)
+
+    if remove_vars:
+        res.x = _add_to_array(res.x, i_fixed, x_fixed)
+        res.jac = _add_to_array(res.jac, i_fixed, np.nan)
+        if "hess_inv" in res:
+            res.hess_inv = None  # unknown
+
+    return res
+
+
+def minimize_scalar(fun, bracket=None, bounds=None, args=(),
+                    method=None, tol=None, options=None):
+    """Minimization of scalar function of one variable.
+
+    Parameters
+    ----------
+    fun : callable
+        Objective function.
+        Scalar function, must return a scalar.
+    bracket : sequence, optional
+        For methods 'brent' and 'golden', `bracket` defines the bracketing
+        interval and can either have three items ``(a, b, c)`` so that
+        ``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and
+        ``c`` which are assumed to be a starting interval for a downhill
+        bracket search (see `bracket`); it doesn't always mean that the
+        obtained solution will satisfy ``a <= x <= c``.
+    bounds : sequence, optional
+        For method 'bounded', `bounds` is mandatory and must have two finite
+        items corresponding to the optimization bounds.
+    args : tuple, optional
+        Extra arguments passed to the objective function.
+    method : str or callable, optional
+        Type of solver.  Should be one of:
+
+            - :ref:`Brent `
+            - :ref:`Bounded `
+            - :ref:`Golden `
+            - custom - a callable object (added in version 0.14.0), see below
+
+        Default is "Bounded" if bounds are provided and "Brent" otherwise.
+        See the 'Notes' section for details of each solver.
+
+    tol : float, optional
+        Tolerance for termination. For detailed control, use solver-specific
+        options.
+    options : dict, optional
+        A dictionary of solver options.
+
+            maxiter : int
+                Maximum number of iterations to perform.
+            disp : bool
+                Set to True to print convergence messages.
+
+        See :func:`show_options()` for solver-specific options.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a ``OptimizeResult`` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the optimizer exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes.
+
+    See also
+    --------
+    minimize : Interface to minimization algorithms for scalar multivariate
+        functions
+    show_options : Additional options accepted by the solvers
+
+    Notes
+    -----
+    This section describes the available solvers that can be selected by the
+    'method' parameter. The default method is the ``"Bounded"`` Brent method if
+    `bounds` are passed and unbounded ``"Brent"`` otherwise.
+
+    Method :ref:`Brent ` uses Brent's
+    algorithm [1]_ to find a local minimum.  The algorithm uses inverse
+    parabolic interpolation when possible to speed up convergence of
+    the golden section method.
+
+    Method :ref:`Golden ` uses the
+    golden section search technique [1]_. It uses analog of the bisection
+    method to decrease the bracketed interval. It is usually
+    preferable to use the *Brent* method.
+
+    Method :ref:`Bounded ` can
+    perform bounded minimization [2]_ [3]_. It uses the Brent method to find a
+    local minimum in the interval x1 < xopt < x2.
+
+    **Custom minimizers**
+
+    It may be useful to pass a custom minimization method, for example
+    when using some library frontend to minimize_scalar. You can simply
+    pass a callable as the ``method`` parameter.
+
+    The callable is called as ``method(fun, args, **kwargs, **options)``
+    where ``kwargs`` corresponds to any other parameters passed to `minimize`
+    (such as `bracket`, `tol`, etc.), except the `options` dict, which has
+    its contents also passed as `method` parameters pair by pair.  The method
+    shall return an `OptimizeResult` object.
+
+    The provided `method` callable must be able to accept (and possibly ignore)
+    arbitrary parameters; the set of parameters accepted by `minimize` may
+    expand in future versions and then these parameters will be passed to
+    the method. You can find an example in the scipy.optimize tutorial.
+
+    .. versionadded:: 0.11.0
+
+    References
+    ----------
+    .. [1] Press, W., S.A. Teukolsky, W.T. Vetterling, and B.P. Flannery.
+           Numerical Recipes in C. Cambridge University Press.
+    .. [2] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods
+           for Mathematical Computations." Prentice-Hall Series in Automatic
+           Computation 259 (1977).
+    .. [3] Brent, Richard P. Algorithms for Minimization Without Derivatives.
+           Courier Corporation, 2013.
+
+    Examples
+    --------
+    Consider the problem of minimizing the following function.
+
+    >>> def f(x):
+    ...     return (x - 2) * x * (x + 2)**2
+
+    Using the *Brent* method, we find the local minimum as:
+
+    >>> from scipy.optimize import minimize_scalar
+    >>> res = minimize_scalar(f)
+    >>> res.fun
+    -9.9149495908
+
+    The minimizer is:
+
+    >>> res.x
+    1.28077640403
+
+    Using the *Bounded* method, we find a local minimum with specified
+    bounds as:
+
+    >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
+    >>> res.fun  # minimum
+    3.28365179850e-13
+    >>> res.x  # minimizer
+    -2.0000002026
+
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+
+    if callable(method):
+        meth = "_custom"
+    elif method is None:
+        meth = 'brent' if bounds is None else 'bounded'
+    else:
+        meth = method.lower()
+    if options is None:
+        options = {}
+
+    if bounds is not None and meth in {'brent', 'golden'}:
+        message = f"Use of `bounds` is incompatible with 'method={method}'."
+        raise ValueError(message)
+
+    if tol is not None:
+        options = dict(options)
+        if meth == 'bounded' and 'xatol' not in options:
+            warn("Method 'bounded' does not support relative tolerance in x; "
+                 "defaulting to absolute tolerance.", RuntimeWarning)
+            options['xatol'] = tol
+        elif meth == '_custom':
+            options.setdefault('tol', tol)
+        else:
+            options.setdefault('xtol', tol)
+
+    # replace boolean "disp" option, if specified, by an integer value.
+    disp = options.get('disp')
+    if isinstance(disp, bool):
+        options['disp'] = 2 * int(disp)
+
+    if meth == '_custom':
+        return method(fun, args=args, bracket=bracket, bounds=bounds, **options)
+    elif meth == 'brent':
+        return _minimize_scalar_brent(fun, bracket, args, **options)
+    elif meth == 'bounded':
+        if bounds is None:
+            raise ValueError('The `bounds` parameter is mandatory for '
+                             'method `bounded`.')
+        return _minimize_scalar_bounded(fun, bounds, args, **options)
+    elif meth == 'golden':
+        return _minimize_scalar_golden(fun, bracket, args, **options)
+    else:
+        raise ValueError('Unknown solver %s' % method)
+
+
+def _remove_from_bounds(bounds, i_fixed):
+    """Removes fixed variables from a `Bounds` instance"""
+    lb = bounds.lb[~i_fixed]
+    ub = bounds.ub[~i_fixed]
+    return Bounds(lb, ub)  # don't mutate original Bounds object
+
+
+def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
+    """Wraps a function such that fixed variables need not be passed in"""
+    def fun_out(x_in, *args, **kwargs):
+        x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
+        x_out[i_fixed] = x_fixed
+        x_out[~i_fixed] = x_in
+        y_out = fun_in(x_out, *args, **kwargs)
+        y_out = np.array(y_out)
+
+        if min_dim == 1:
+            y_out = np.atleast_1d(y_out)
+        elif min_dim == 2:
+            y_out = np.atleast_2d(y_out)
+
+        if remove == 1:
+            y_out = y_out[..., ~i_fixed]
+        elif remove == 2:
+            y_out = y_out[~i_fixed, ~i_fixed]
+
+        return y_out
+    return fun_out
+
+
+def _add_to_array(x_in, i_fixed, x_fixed):
+    """Adds fixed variables back to an array"""
+    i_free = ~i_fixed
+    if x_in.ndim == 2:
+        i_free = i_free[:, None] @ i_free[None, :]
+    x_out = np.zeros_like(i_free, dtype=x_in.dtype)
+    x_out[~i_free] = x_fixed
+    x_out[i_free] = x_in.ravel()
+    return x_out
+
+
+def standardize_bounds(bounds, x0, meth):
+    """Converts bounds to the form required by the solver."""
+    if meth in {'trust-constr', 'powell', 'nelder-mead', 'new'}:
+        if not isinstance(bounds, Bounds):
+            lb, ub = old_bound_to_new(bounds)
+            bounds = Bounds(lb, ub)
+    elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
+        if isinstance(bounds, Bounds):
+            bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
+    return bounds
+
+
+def standardize_constraints(constraints, x0, meth):
+    """Converts constraints to the form required by the solver."""
+    all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
+    new_constraint_types = all_constraint_types[:-1]
+    if constraints is None:
+        constraints = []
+    elif isinstance(constraints, all_constraint_types):
+        constraints = [constraints]
+    else:
+        constraints = list(constraints)  # ensure it's a mutable sequence
+
+    if meth in ['trust-constr', 'new']:
+        for i, con in enumerate(constraints):
+            if not isinstance(con, new_constraint_types):
+                constraints[i] = old_constraint_to_new(i, con)
+    else:
+        # iterate over copy, changing original
+        for i, con in enumerate(list(constraints)):
+            if isinstance(con, new_constraint_types):
+                old_constraints = new_constraint_to_old(con, x0)
+                constraints[i] = old_constraints[0]
+                constraints.extend(old_constraints[1:])  # appends 1 if present
+
+    return constraints
+
+
+def _optimize_result_for_equal_bounds(
+        fun, bounds, method, args=(), constraints=()
+):
+    """
+    Provides a default OptimizeResult for when a bounded minimization method
+    has (lb == ub).all().
+
+    Parameters
+    ----------
+    fun: callable
+    bounds: Bounds
+    method: str
+    constraints: Constraint
+    """
+    success = True
+    message = 'All independent variables were fixed by bounds.'
+
+    # bounds is new-style
+    x0 = bounds.lb
+
+    if constraints:
+        message = ("All independent variables were fixed by bounds at values"
+                   " that satisfy the constraints.")
+        constraints = standardize_constraints(constraints, x0, 'new')
+
+    maxcv = 0
+    for c in constraints:
+        pc = PreparedConstraint(c, x0)
+        violation = pc.violation(x0)
+        if np.sum(violation):
+            maxcv = max(maxcv, np.max(violation))
+            success = False
+            message = (f"All independent variables were fixed by bounds, but "
+                       f"the independent variables do not satisfy the "
+                       f"constraints exactly. (Maximum violation: {maxcv}).")
+
+    return OptimizeResult(
+        x=x0, fun=fun(x0, *args), success=success, message=message, nfev=1,
+        njev=0, nhev=0,
+    )
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_minpack_py.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_minpack_py.py
new file mode 100644
index 00000000..27030a86
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_minpack_py.py
@@ -0,0 +1,1016 @@
+import warnings
+from . import _minpack
+
+import numpy as np
+from numpy import (atleast_1d, dot, take, triu, shape, eye,
+                   transpose, zeros, prod, greater,
+                   asarray, inf,
+                   finfo, inexact, issubdtype, dtype)
+from scipy import linalg
+from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv
+from scipy._lib._util import _asarray_validated, _lazywhere
+from scipy._lib._util import getfullargspec_no_self as _getfullargspec
+from ._optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
+from ._lsq import least_squares
+# from ._lsq.common import make_strictly_feasible
+from ._lsq.least_squares import prepare_bounds
+from scipy.optimize._minimize import Bounds
+
+error = _minpack.error
+
+__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
+
+
+def _check_func(checker, argname, thefunc, x0, args, numinputs,
+                output_shape=None):
+    res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
+    if (output_shape is not None) and (shape(res) != output_shape):
+        if (output_shape[0] != 1):
+            if len(output_shape) > 1:
+                if output_shape[1] == 1:
+                    return shape(res)
+            msg = "%s: there is a mismatch between the input and output " \
+                  "shape of the '%s' argument" % (checker, argname)
+            func_name = getattr(thefunc, '__name__', None)
+            if func_name:
+                msg += " '%s'." % func_name
+            else:
+                msg += "."
+            msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
+            raise TypeError(msg)
+    if issubdtype(res.dtype, inexact):
+        dt = res.dtype
+    else:
+        dt = dtype(float)
+    return shape(res), dt
+
+
+def fsolve(func, x0, args=(), fprime=None, full_output=0,
+           col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
+           epsfcn=None, factor=100, diag=None):
+    """
+    Find the roots of a function.
+
+    Return the roots of the (non-linear) equations defined by
+    ``func(x) = 0`` given a starting estimate.
+
+    Parameters
+    ----------
+    func : callable ``f(x, *args)``
+        A function that takes at least one (possibly vector) argument,
+        and returns a value of the same length.
+    x0 : ndarray
+        The starting estimate for the roots of ``func(x) = 0``.
+    args : tuple, optional
+        Any extra arguments to `func`.
+    fprime : callable ``f(x, *args)``, optional
+        A function to compute the Jacobian of `func` with derivatives
+        across the rows. By default, the Jacobian will be estimated.
+    full_output : bool, optional
+        If True, return optional outputs.
+    col_deriv : bool, optional
+        Specify whether the Jacobian function computes derivatives down
+        the columns (faster, because there is no transpose operation).
+    xtol : float, optional
+        The calculation will terminate if the relative error between two
+        consecutive iterates is at most `xtol`.
+    maxfev : int, optional
+        The maximum number of calls to the function. If zero, then
+        ``100*(N+1)`` is the maximum where N is the number of elements
+        in `x0`.
+    band : tuple, optional
+        If set to a two-sequence containing the number of sub- and
+        super-diagonals within the band of the Jacobi matrix, the
+        Jacobi matrix is considered banded (only for ``fprime=None``).
+    epsfcn : float, optional
+        A suitable step length for the forward-difference
+        approximation of the Jacobian (for ``fprime=None``). If
+        `epsfcn` is less than the machine precision, it is assumed
+        that the relative errors in the functions are of the order of
+        the machine precision.
+    factor : float, optional
+        A parameter determining the initial step bound
+        (``factor * || diag * x||``). Should be in the interval
+        ``(0.1, 100)``.
+    diag : sequence, optional
+        N positive entries that serve as a scale factors for the
+        variables.
+
+    Returns
+    -------
+    x : ndarray
+        The solution (or the result of the last iteration for
+        an unsuccessful call).
+    infodict : dict
+        A dictionary of optional outputs with the keys:
+
+        ``nfev``
+            number of function calls
+        ``njev``
+            number of Jacobian calls
+        ``fvec``
+            function evaluated at the output
+        ``fjac``
+            the orthogonal matrix, q, produced by the QR
+            factorization of the final approximate Jacobian
+            matrix, stored column wise
+        ``r``
+            upper triangular matrix produced by QR factorization
+            of the same matrix
+        ``qtf``
+            the vector ``(transpose(q) * fvec)``
+
+    ier : int
+        An integer flag.  Set to 1 if a solution was found, otherwise refer
+        to `mesg` for more information.
+    mesg : str
+        If no solution is found, `mesg` details the cause of failure.
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See the ``method='hybr'`` in particular.
+
+    Notes
+    -----
+    ``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
+
+    Examples
+    --------
+    Find a solution to the system of equations:
+    ``x0*cos(x1) = 4,  x1*x0 - x1 = 5``.
+
+    >>> import numpy as np
+    >>> from scipy.optimize import fsolve
+    >>> def func(x):
+    ...     return [x[0] * np.cos(x[1]) - 4,
+    ...             x[1] * x[0] - x[1] - 5]
+    >>> root = fsolve(func, [1, 1])
+    >>> root
+    array([6.50409711, 0.90841421])
+    >>> np.isclose(func(root), [0.0, 0.0])  # func(root) should be almost 0.0.
+    array([ True,  True])
+
+    """
+    options = {'col_deriv': col_deriv,
+               'xtol': xtol,
+               'maxfev': maxfev,
+               'band': band,
+               'eps': epsfcn,
+               'factor': factor,
+               'diag': diag}
+
+    res = _root_hybr(func, x0, args, jac=fprime, **options)
+    if full_output:
+        x = res['x']
+        info = dict((k, res.get(k))
+                    for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
+        info['fvec'] = res['fun']
+        return x, info, res['status'], res['message']
+    else:
+        status = res['status']
+        msg = res['message']
+        if status == 0:
+            raise TypeError(msg)
+        elif status == 1:
+            pass
+        elif status in [2, 3, 4, 5]:
+            warnings.warn(msg, RuntimeWarning)
+        else:
+            raise TypeError(msg)
+        return res['x']
+
+
+def _root_hybr(func, x0, args=(), jac=None,
+               col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
+               factor=100, diag=None, **unknown_options):
+    """
+    Find the roots of a multivariate function using MINPACK's hybrd and
+    hybrj routines (modified Powell method).
+
+    Options
+    -------
+    col_deriv : bool
+        Specify whether the Jacobian function computes derivatives down
+        the columns (faster, because there is no transpose operation).
+    xtol : float
+        The calculation will terminate if the relative error between two
+        consecutive iterates is at most `xtol`.
+    maxfev : int
+        The maximum number of calls to the function. If zero, then
+        ``100*(N+1)`` is the maximum where N is the number of elements
+        in `x0`.
+    band : tuple
+        If set to a two-sequence containing the number of sub- and
+        super-diagonals within the band of the Jacobi matrix, the
+        Jacobi matrix is considered banded (only for ``fprime=None``).
+    eps : float
+        A suitable step length for the forward-difference
+        approximation of the Jacobian (for ``fprime=None``). If
+        `eps` is less than the machine precision, it is assumed
+        that the relative errors in the functions are of the order of
+        the machine precision.
+    factor : float
+        A parameter determining the initial step bound
+        (``factor * || diag * x||``). Should be in the interval
+        ``(0.1, 100)``.
+    diag : sequence
+        N positive entries that serve as a scale factors for the
+        variables.
+
+    """
+    _check_unknown_options(unknown_options)
+    epsfcn = eps
+
+    x0 = asarray(x0).flatten()
+    n = len(x0)
+    if not isinstance(args, tuple):
+        args = (args,)
+    shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
+    if epsfcn is None:
+        epsfcn = finfo(dtype).eps
+    Dfun = jac
+    if Dfun is None:
+        if band is None:
+            ml, mu = -10, -10
+        else:
+            ml, mu = band[:2]
+        if maxfev == 0:
+            maxfev = 200 * (n + 1)
+        retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
+                                 ml, mu, epsfcn, factor, diag)
+    else:
+        _check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
+        if (maxfev == 0):
+            maxfev = 100 * (n + 1)
+        retval = _minpack._hybrj(func, Dfun, x0, args, 1,
+                                 col_deriv, xtol, maxfev, factor, diag)
+
+    x, status = retval[0], retval[-1]
+
+    errors = {0: "Improper input parameters were entered.",
+              1: "The solution converged.",
+              2: "The number of calls to function has "
+                  "reached maxfev = %d." % maxfev,
+              3: "xtol=%f is too small, no further improvement "
+                  "in the approximate\n  solution "
+                  "is possible." % xtol,
+              4: "The iteration is not making good progress, as measured "
+                  "by the \n  improvement from the last five "
+                  "Jacobian evaluations.",
+              5: "The iteration is not making good progress, "
+                  "as measured by the \n  improvement from the last "
+                  "ten iterations.",
+              'unknown': "An error occurred."}
+
+    info = retval[1]
+    info['fun'] = info.pop('fvec')
+    sol = OptimizeResult(x=x, success=(status == 1), status=status)
+    sol.update(info)
+    try:
+        sol['message'] = errors[status]
+    except KeyError:
+        sol['message'] = errors['unknown']
+
+    return sol
+
+
+LEASTSQ_SUCCESS = [1, 2, 3, 4]
+LEASTSQ_FAILURE = [5, 6, 7, 8]
+
+
+def leastsq(func, x0, args=(), Dfun=None, full_output=0,
+            col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
+            gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
+    """
+    Minimize the sum of squares of a set of equations.
+
+    ::
+
+        x = arg min(sum(func(y)**2,axis=0))
+                 y
+
+    Parameters
+    ----------
+    func : callable
+        Should take at least one (possibly length ``N`` vector) argument and
+        returns ``M`` floating point numbers. It must not return NaNs or
+        fitting might fail. ``M`` must be greater than or equal to ``N``.
+    x0 : ndarray
+        The starting estimate for the minimization.
+    args : tuple, optional
+        Any extra arguments to func are placed in this tuple.
+    Dfun : callable, optional
+        A function or method to compute the Jacobian of func with derivatives
+        across the rows. If this is None, the Jacobian will be estimated.
+    full_output : bool, optional
+        non-zero to return all optional outputs.
+    col_deriv : bool, optional
+        non-zero to specify that the Jacobian function computes derivatives
+        down the columns (faster, because there is no transpose operation).
+    ftol : float, optional
+        Relative error desired in the sum of squares.
+    xtol : float, optional
+        Relative error desired in the approximate solution.
+    gtol : float, optional
+        Orthogonality desired between the function vector and the columns of
+        the Jacobian.
+    maxfev : int, optional
+        The maximum number of calls to the function. If `Dfun` is provided,
+        then the default `maxfev` is 100*(N+1) where N is the number of elements
+        in x0, otherwise the default `maxfev` is 200*(N+1).
+    epsfcn : float, optional
+        A variable used in determining a suitable step length for the forward-
+        difference approximation of the Jacobian (for Dfun=None).
+        Normally the actual step length will be sqrt(epsfcn)*x
+        If epsfcn is less than the machine precision, it is assumed that the
+        relative errors are of the order of the machine precision.
+    factor : float, optional
+        A parameter determining the initial step bound
+        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
+    diag : sequence, optional
+        N positive entries that serve as a scale factors for the variables.
+
+    Returns
+    -------
+    x : ndarray
+        The solution (or the result of the last iteration for an unsuccessful
+        call).
+    cov_x : ndarray
+        The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
+        estimate of the Hessian. A value of None indicates a singular matrix,
+        which means the curvature in parameters `x` is numerically flat. To
+        obtain the covariance matrix of the parameters `x`, `cov_x` must be
+        multiplied by the variance of the residuals -- see curve_fit.
+    infodict : dict
+        a dictionary of optional outputs with the keys:
+
+        ``nfev``
+            The number of function calls
+        ``fvec``
+            The function evaluated at the output
+        ``fjac``
+            A permutation of the R matrix of a QR
+            factorization of the final approximate
+            Jacobian matrix, stored column wise.
+            Together with ipvt, the covariance of the
+            estimate can be approximated.
+        ``ipvt``
+            An integer array of length N which defines
+            a permutation matrix, p, such that
+            fjac*p = q*r, where r is upper triangular
+            with diagonal elements of nonincreasing
+            magnitude. Column j of p is column ipvt(j)
+            of the identity matrix.
+        ``qtf``
+            The vector (transpose(q) * fvec).
+
+    mesg : str
+        A string message giving information about the cause of failure.
+    ier : int
+        An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
+        found. Otherwise, the solution was not found. In either case, the
+        optional output variable 'mesg' gives more information.
+
+    See Also
+    --------
+    least_squares : Newer interface to solve nonlinear least-squares problems
+        with bounds on the variables. See ``method='lm'`` in particular.
+
+    Notes
+    -----
+    "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
+
+    cov_x is a Jacobian approximation to the Hessian of the least squares
+    objective function.
+    This approximation assumes that the objective function is based on the
+    difference between some observed target data (ydata) and a (non-linear)
+    function of the parameters `f(xdata, params)` ::
+
+           func(params) = ydata - f(xdata, params)
+
+    so that the objective function is ::
+
+           min   sum((ydata - f(xdata, params))**2, axis=0)
+         params
+
+    The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
+    or whether `x0` is a scalar.
+
+    Examples
+    --------
+    >>> from scipy.optimize import leastsq
+    >>> def func(x):
+    ...     return 2*(x-3)**2+1
+    >>> leastsq(func, 0)
+    (array([2.99999999]), 1)
+
+    """
+    x0 = asarray(x0).flatten()
+    n = len(x0)
+    if not isinstance(args, tuple):
+        args = (args,)
+    shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
+    m = shape[0]
+
+    if n > m:
+        raise TypeError(f"Improper input: func input vector length N={n} must"
+                        f" not exceed func output vector length M={m}")
+
+    if epsfcn is None:
+        epsfcn = finfo(dtype).eps
+
+    if Dfun is None:
+        if maxfev == 0:
+            maxfev = 200*(n + 1)
+        retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
+                                 gtol, maxfev, epsfcn, factor, diag)
+    else:
+        if col_deriv:
+            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
+        else:
+            _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
+        if maxfev == 0:
+            maxfev = 100 * (n + 1)
+        retval = _minpack._lmder(func, Dfun, x0, args, full_output,
+                                 col_deriv, ftol, xtol, gtol, maxfev,
+                                 factor, diag)
+
+    errors = {0: ["Improper input parameters.", TypeError],
+              1: ["Both actual and predicted relative reductions "
+                  "in the sum of squares\n  are at most %f" % ftol, None],
+              2: ["The relative error between two consecutive "
+                  "iterates is at most %f" % xtol, None],
+              3: ["Both actual and predicted relative reductions in "
+                  "the sum of squares\n  are at most %f and the "
+                  "relative error between two consecutive "
+                  "iterates is at \n  most %f" % (ftol, xtol), None],
+              4: ["The cosine of the angle between func(x) and any "
+                  "column of the\n  Jacobian is at most %f in "
+                  "absolute value" % gtol, None],
+              5: ["Number of calls to function has reached "
+                  "maxfev = %d." % maxfev, ValueError],
+              6: ["ftol=%f is too small, no further reduction "
+                  "in the sum of squares\n  is possible." % ftol,
+                  ValueError],
+              7: ["xtol=%f is too small, no further improvement in "
+                  "the approximate\n  solution is possible." % xtol,
+                  ValueError],
+              8: ["gtol=%f is too small, func(x) is orthogonal to the "
+                  "columns of\n  the Jacobian to machine "
+                  "precision." % gtol, ValueError]}
+
+    # The FORTRAN return value (possible return values are >= 0 and <= 8)
+    info = retval[-1]
+
+    if full_output:
+        cov_x = None
+        if info in LEASTSQ_SUCCESS:
+            # This was
+            # perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
+            # r = triu(transpose(retval[1]['fjac'])[:n, :])
+            # R = dot(r, perm)
+            # cov_x = inv(dot(transpose(R), R))
+            # but the explicit dot product was not necessary and sometimes
+            # the result was not symmetric positive definite. See gh-4555.
+            perm = retval[1]['ipvt'] - 1
+            n = len(perm)
+            r = triu(transpose(retval[1]['fjac'])[:n, :])
+            inv_triu = linalg.get_lapack_funcs('trtri', (r,))
+            try:
+                # inverse of permuted matrix is a permuation of matrix inverse
+                invR, trtri_info = inv_triu(r)  # default: upper, non-unit diag
+                if trtri_info != 0:  # explicit comparison for readability
+                    raise LinAlgError(f'trtri returned info {trtri_info}')
+                invR[perm] = invR.copy()
+                cov_x = invR @ invR.T
+            except (LinAlgError, ValueError):
+                pass
+        return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
+    else:
+        if info in LEASTSQ_FAILURE:
+            warnings.warn(errors[info][0], RuntimeWarning)
+        elif info == 0:
+            raise errors[info][1](errors[info][0])
+        return retval[0], info
+
+
+def _wrap_func(func, xdata, ydata, transform):
+    if transform is None:
+        def func_wrapped(params):
+            return func(xdata, *params) - ydata
+    elif transform.ndim == 1:
+        def func_wrapped(params):
+            return transform * (func(xdata, *params) - ydata)
+    else:
+        # Chisq = (y - yd)^T C^{-1} (y-yd)
+        # transform = L such that C = L L^T
+        # C^{-1} = L^{-T} L^{-1}
+        # Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
+        # Define (y-yd)' = L^{-1} (y-yd)
+        # by solving
+        # L (y-yd)' = (y-yd)
+        # and minimize (y-yd)'^T (y-yd)'
+        def func_wrapped(params):
+            return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
+    return func_wrapped
+
+
+def _wrap_jac(jac, xdata, transform):
+    if transform is None:
+        def jac_wrapped(params):
+            return jac(xdata, *params)
+    elif transform.ndim == 1:
+        def jac_wrapped(params):
+            return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
+    else:
+        def jac_wrapped(params):
+            return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
+    return jac_wrapped
+
+
+def _initialize_feasible(lb, ub):
+    p0 = np.ones_like(lb)
+    lb_finite = np.isfinite(lb)
+    ub_finite = np.isfinite(ub)
+
+    mask = lb_finite & ub_finite
+    p0[mask] = 0.5 * (lb[mask] + ub[mask])
+
+    mask = lb_finite & ~ub_finite
+    p0[mask] = lb[mask] + 1
+
+    mask = ~lb_finite & ub_finite
+    p0[mask] = ub[mask] - 1
+
+    return p0
+
+
+def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
+              check_finite=True, bounds=(-np.inf, np.inf), method=None,
+              jac=None, *, full_output=False, **kwargs):
+    """
+    Use non-linear least squares to fit a function, f, to data.
+
+    Assumes ``ydata = f(xdata, *params) + eps``.
+
+    Parameters
+    ----------
+    f : callable
+        The model function, f(x, ...). It must take the independent
+        variable as the first argument and the parameters to fit as
+        separate remaining arguments.
+    xdata : array_like
+        The independent variable where the data is measured.
+        Should usually be an M-length sequence or an (k,M)-shaped array for
+        functions with k predictors, and each element should be float
+        convertible if it is an array like object.
+    ydata : array_like
+        The dependent data, a length M array - nominally ``f(xdata, ...)``.
+    p0 : array_like, optional
+        Initial guess for the parameters (length N). If None, then the
+        initial values will all be 1 (if the number of parameters for the
+        function can be determined using introspection, otherwise a
+        ValueError is raised).
+    sigma : None or M-length sequence or MxM array, optional
+        Determines the uncertainty in `ydata`. If we define residuals as
+        ``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
+        depends on its number of dimensions:
+
+            - A 1-D `sigma` should contain values of standard deviations of
+              errors in `ydata`. In this case, the optimized function is
+              ``chisq = sum((r / sigma) ** 2)``.
+
+            - A 2-D `sigma` should contain the covariance matrix of
+              errors in `ydata`. In this case, the optimized function is
+              ``chisq = r.T @ inv(sigma) @ r``.
+
+              .. versionadded:: 0.19
+
+        None (default) is equivalent of 1-D `sigma` filled with ones.
+    absolute_sigma : bool, optional
+        If True, `sigma` is used in an absolute sense and the estimated parameter
+        covariance `pcov` reflects these absolute values.
+
+        If False (default), only the relative magnitudes of the `sigma` values matter.
+        The returned parameter covariance matrix `pcov` is based on scaling
+        `sigma` by a constant factor. This constant is set by demanding that the
+        reduced `chisq` for the optimal parameters `popt` when using the
+        *scaled* `sigma` equals unity. In other words, `sigma` is scaled to
+        match the sample variance of the residuals after the fit. Default is False.
+        Mathematically,
+        ``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
+    check_finite : bool, optional
+        If True, check that the input arrays do not contain nans of infs,
+        and raise a ValueError if they do. Setting this parameter to
+        False may silently produce nonsensical results if the input arrays
+        do contain nans. Default is True.
+    bounds : 2-tuple of array_like or `Bounds`, optional
+        Lower and upper bounds on parameters. Defaults to no bounds.
+        There are two ways to specify the bounds:
+
+            - Instance of `Bounds` class.
+
+            - 2-tuple of array_like: Each element of the tuple must be either
+              an array with the length equal to the number of parameters, or a
+              scalar (in which case the bound is taken to be the same for all
+              parameters). Use ``np.inf`` with an appropriate sign to disable
+              bounds on all or some parameters.
+
+    method : {'lm', 'trf', 'dogbox'}, optional
+        Method to use for optimization. See `least_squares` for more details.
+        Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
+        provided. The method 'lm' won't work when the number of observations
+        is less than the number of variables, use 'trf' or 'dogbox' in this
+        case.
+
+        .. versionadded:: 0.17
+    jac : callable, string or None, optional
+        Function with signature ``jac(x, ...)`` which computes the Jacobian
+        matrix of the model function with respect to parameters as a dense
+        array_like structure. It will be scaled according to provided `sigma`.
+        If None (default), the Jacobian will be estimated numerically.
+        String keywords for 'trf' and 'dogbox' methods can be used to select
+        a finite difference scheme, see `least_squares`.
+
+        .. versionadded:: 0.18
+    full_output : boolean, optional
+        If True, this function returns additioal information: `infodict`,
+        `mesg`, and `ier`.
+
+        .. versionadded:: 1.9
+    **kwargs
+        Keyword arguments passed to `leastsq` for ``method='lm'`` or
+        `least_squares` otherwise.
+
+    Returns
+    -------
+    popt : array
+        Optimal values for the parameters so that the sum of the squared
+        residuals of ``f(xdata, *popt) - ydata`` is minimized.
+    pcov : 2-D array
+        The estimated covariance of popt. The diagonals provide the variance
+        of the parameter estimate. To compute one standard deviation errors
+        on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
+
+        How the `sigma` parameter affects the estimated covariance
+        depends on `absolute_sigma` argument, as described above.
+
+        If the Jacobian matrix at the solution doesn't have a full rank, then
+        'lm' method returns a matrix filled with ``np.inf``, on the other hand
+        'trf'  and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
+        the covariance matrix.
+    infodict : dict (returned only if `full_output` is True)
+        a dictionary of optional outputs with the keys:
+
+        ``nfev``
+            The number of function calls. Methods 'trf' and 'dogbox' do not
+            count function calls for numerical Jacobian approximation,
+            as opposed to 'lm' method.
+        ``fvec``
+            The function values evaluated at the solution.
+        ``fjac``
+            A permutation of the R matrix of a QR
+            factorization of the final approximate
+            Jacobian matrix, stored column wise.
+            Together with ipvt, the covariance of the
+            estimate can be approximated.
+            Method 'lm' only provides this information.
+        ``ipvt``
+            An integer array of length N which defines
+            a permutation matrix, p, such that
+            fjac*p = q*r, where r is upper triangular
+            with diagonal elements of nonincreasing
+            magnitude. Column j of p is column ipvt(j)
+            of the identity matrix.
+            Method 'lm' only provides this information.
+        ``qtf``
+            The vector (transpose(q) * fvec).
+            Method 'lm' only provides this information.
+
+        .. versionadded:: 1.9
+    mesg : str (returned only if `full_output` is True)
+        A string message giving information about the solution.
+
+        .. versionadded:: 1.9
+    ier : int (returnned only if `full_output` is True)
+        An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
+        found. Otherwise, the solution was not found. In either case, the
+        optional output variable `mesg` gives more information.
+
+        .. versionadded:: 1.9
+
+    Raises
+    ------
+    ValueError
+        if either `ydata` or `xdata` contain NaNs, or if incompatible options
+        are used.
+
+    RuntimeError
+        if the least-squares minimization fails.
+
+    OptimizeWarning
+        if covariance of the parameters can not be estimated.
+
+    See Also
+    --------
+    least_squares : Minimize the sum of squares of nonlinear functions.
+    scipy.stats.linregress : Calculate a linear least squares regression for
+                             two sets of measurements.
+
+    Notes
+    -----
+    Users should ensure that inputs `xdata`, `ydata`, and the output of `f`
+    are ``float64``, or else the optimization may return incorrect results.
+
+    With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
+    through `leastsq`. Note that this algorithm can only deal with
+    unconstrained problems.
+
+    Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
+    the docstring of `least_squares` for more information.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.optimize import curve_fit
+
+    >>> def func(x, a, b, c):
+    ...     return a * np.exp(-b * x) + c
+
+    Define the data to be fit with some noise:
+
+    >>> xdata = np.linspace(0, 4, 50)
+    >>> y = func(xdata, 2.5, 1.3, 0.5)
+    >>> rng = np.random.default_rng()
+    >>> y_noise = 0.2 * rng.normal(size=xdata.size)
+    >>> ydata = y + y_noise
+    >>> plt.plot(xdata, ydata, 'b-', label='data')
+
+    Fit for the parameters a, b, c of the function `func`:
+
+    >>> popt, pcov = curve_fit(func, xdata, ydata)
+    >>> popt
+    array([2.56274217, 1.37268521, 0.47427475])
+    >>> plt.plot(xdata, func(xdata, *popt), 'r-',
+    ...          label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
+
+    Constrain the optimization to the region of ``0 <= a <= 3``,
+    ``0 <= b <= 1`` and ``0 <= c <= 0.5``:
+
+    >>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
+    >>> popt
+    array([2.43736712, 1.        , 0.34463856])
+    >>> plt.plot(xdata, func(xdata, *popt), 'g--',
+    ...          label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
+
+    >>> plt.xlabel('x')
+    >>> plt.ylabel('y')
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    if p0 is None:
+        # determine number of parameters by inspecting the function
+        sig = _getfullargspec(f)
+        args = sig.args
+        if len(args) < 2:
+            raise ValueError("Unable to determine number of fit parameters.")
+        n = len(args) - 1
+    else:
+        p0 = np.atleast_1d(p0)
+        n = p0.size
+
+    if isinstance(bounds, Bounds):
+        lb, ub = bounds.lb, bounds.ub
+    else:
+        lb, ub = prepare_bounds(bounds, n)
+    if p0 is None:
+        p0 = _initialize_feasible(lb, ub)
+
+    bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
+    if method is None:
+        if bounded_problem:
+            method = 'trf'
+        else:
+            method = 'lm'
+
+    if method == 'lm' and bounded_problem:
+        raise ValueError("Method 'lm' only works for unconstrained problems. "
+                         "Use 'trf' or 'dogbox' instead.")
+
+    # optimization may produce garbage for float32 inputs, cast them to float64
+
+    # NaNs cannot be handled
+    if check_finite:
+        ydata = np.asarray_chkfinite(ydata, float)
+    else:
+        ydata = np.asarray(ydata, float)
+
+    if isinstance(xdata, (list, tuple, np.ndarray)):
+        # `xdata` is passed straight to the user-defined `f`, so allow
+        # non-array_like `xdata`.
+        if check_finite:
+            xdata = np.asarray_chkfinite(xdata, float)
+        else:
+            xdata = np.asarray(xdata, float)
+
+    if ydata.size == 0:
+        raise ValueError("`ydata` must not be empty!")
+
+    # Determine type of sigma
+    if sigma is not None:
+        sigma = np.asarray(sigma)
+
+        # if 1-D, sigma are errors, define transform = 1/sigma
+        if sigma.shape == (ydata.size, ):
+            transform = 1.0 / sigma
+        # if 2-D, sigma is the covariance matrix,
+        # define transform = L such that L L^T = C
+        elif sigma.shape == (ydata.size, ydata.size):
+            try:
+                # scipy.linalg.cholesky requires lower=True to return L L^T = A
+                transform = cholesky(sigma, lower=True)
+            except LinAlgError as e:
+                raise ValueError("`sigma` must be positive definite.") from e
+        else:
+            raise ValueError("`sigma` has incorrect shape.")
+    else:
+        transform = None
+
+    func = _wrap_func(f, xdata, ydata, transform)
+    if callable(jac):
+        jac = _wrap_jac(jac, xdata, transform)
+    elif jac is None and method != 'lm':
+        jac = '2-point'
+
+    if 'args' in kwargs:
+        # The specification for the model function `f` does not support
+        # additional arguments. Refer to the `curve_fit` docstring for
+        # acceptable call signatures of `f`.
+        raise ValueError("'args' is not a supported keyword argument.")
+
+    if method == 'lm':
+        # if ydata.size == 1, this might be used for broadcast.
+        if ydata.size != 1 and n > ydata.size:
+            raise TypeError(f"The number of func parameters={n} must not"
+                            f" exceed the number of data points={ydata.size}")
+        res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
+        popt, pcov, infodict, errmsg, ier = res
+        ysize = len(infodict['fvec'])
+        cost = np.sum(infodict['fvec'] ** 2)
+        if ier not in [1, 2, 3, 4]:
+            raise RuntimeError("Optimal parameters not found: " + errmsg)
+    else:
+        # Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
+        if 'max_nfev' not in kwargs:
+            kwargs['max_nfev'] = kwargs.pop('maxfev', None)
+
+        res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
+                            **kwargs)
+
+        if not res.success:
+            raise RuntimeError("Optimal parameters not found: " + res.message)
+
+        infodict = dict(nfev=res.nfev, fvec=res.fun)
+        ier = res.status
+        errmsg = res.message
+
+        ysize = len(res.fun)
+        cost = 2 * res.cost  # res.cost is half sum of squares!
+        popt = res.x
+
+        # Do Moore-Penrose inverse discarding zero singular values.
+        _, s, VT = svd(res.jac, full_matrices=False)
+        threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
+        s = s[s > threshold]
+        VT = VT[:s.size]
+        pcov = np.dot(VT.T / s**2, VT)
+
+    warn_cov = False
+    if pcov is None:
+        # indeterminate covariance
+        pcov = zeros((len(popt), len(popt)), dtype=float)
+        pcov.fill(inf)
+        warn_cov = True
+    elif not absolute_sigma:
+        if ysize > p0.size:
+            s_sq = cost / (ysize - p0.size)
+            pcov = pcov * s_sq
+        else:
+            pcov.fill(inf)
+            warn_cov = True
+
+    if warn_cov:
+        warnings.warn('Covariance of the parameters could not be estimated',
+                      category=OptimizeWarning)
+
+    if full_output:
+        return popt, pcov, infodict, errmsg, ier
+    else:
+        return popt, pcov
+
+
+def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
+    """Perform a simple check on the gradient for correctness.
+
+    """
+
+    x = atleast_1d(x0)
+    n = len(x)
+    x = x.reshape((n,))
+    fvec = atleast_1d(fcn(x, *args))
+    m = len(fvec)
+    fvec = fvec.reshape((m,))
+    ldfjac = m
+    fjac = atleast_1d(Dfcn(x, *args))
+    fjac = fjac.reshape((m, n))
+    if col_deriv == 0:
+        fjac = transpose(fjac)
+
+    xp = zeros((n,), float)
+    err = zeros((m,), float)
+    fvecp = None
+    _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
+
+    fvecp = atleast_1d(fcn(xp, *args))
+    fvecp = fvecp.reshape((m,))
+    _minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
+
+    good = (prod(greater(err, 0.5), axis=0))
+
+    return (good, err)
+
+
+def _del2(p0, p1, d):
+    return p0 - np.square(p1 - p0) / d
+
+
+def _relerr(actual, desired):
+    return (actual - desired) / desired
+
+
+def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
+    p0 = x0
+    for i in range(maxiter):
+        p1 = func(p0, *args)
+        if use_accel:
+            p2 = func(p1, *args)
+            d = p2 - 2.0 * p1 + p0
+            p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
+        else:
+            p = p1
+        relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
+        if np.all(np.abs(relerr) < xtol):
+            return p
+        p0 = p
+    msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
+    raise RuntimeError(msg)
+
+
+def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
+    """
+    Find a fixed point of the function.
+
+    Given a function of one or more variables and a starting point, find a
+    fixed point of the function: i.e., where ``func(x0) == x0``.
+
+    Parameters
+    ----------
+    func : function
+        Function to evaluate.
+    x0 : array_like
+        Fixed point of function.
+    args : tuple, optional
+        Extra arguments to `func`.
+    xtol : float, optional
+        Convergence tolerance, defaults to 1e-08.
+    maxiter : int, optional
+        Maximum number of iterations, defaults to 500.
+    method : {"del2", "iteration"}, optional
+        Method of finding the fixed-point, defaults to "del2",
+        which uses Steffensen's Method with Aitken's ``Del^2``
+        convergence acceleration [1]_. The "iteration" method simply iterates
+        the function until convergence is detected, without attempting to
+        accelerate the convergence.
+
+    References
+    ----------
+    .. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import optimize
+    >>> def func(x, c1, c2):
+    ...    return np.sqrt(c1/(x+c2))
+    >>> c1 = np.array([10,12.])
+    >>> c2 = np.array([3, 5.])
+    >>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
+    array([ 1.4920333 ,  1.37228132])
+
+    """
+    use_accel = {'del2': True, 'iteration': False}[method]
+    x0 = _asarray_validated(x0, as_inexact=True)
+    return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_nnls.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_nnls.py
new file mode 100644
index 00000000..2663311f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_nnls.py
@@ -0,0 +1,85 @@
+from . import __nnls
+from numpy import asarray_chkfinite, zeros, double
+
+__all__ = ['nnls']
+
+
+def nnls(A, b, maxiter=None):
+    """
+    Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper
+    for a FORTRAN non-negative least squares solver.
+
+    Parameters
+    ----------
+    A : ndarray
+        Matrix ``A`` as shown above.
+    b : ndarray
+        Right-hand side vector.
+    maxiter: int, optional
+        Maximum number of iterations, optional.
+        Default is ``3 * A.shape[1]``.
+
+    Returns
+    -------
+    x : ndarray
+        Solution vector.
+    rnorm : float
+        The residual, ``|| Ax-b ||_2``.
+
+    See Also
+    --------
+    lsq_linear : Linear least squares with bounds on the variables
+
+    Notes
+    -----
+    The FORTRAN code was published in the book below. The algorithm
+    is an active set method. It solves the KKT (Karush-Kuhn-Tucker)
+    conditions for the non-negative least squares problem.
+
+    References
+    ----------
+    Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM
+
+     Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import nnls
+    ...
+    >>> A = np.array([[1, 0], [1, 0], [0, 1]])
+    >>> b = np.array([2, 1, 1])
+    >>> nnls(A, b)
+    (array([1.5, 1. ]), 0.7071067811865475)
+
+    >>> b = np.array([-1, -1, -1])
+    >>> nnls(A, b)
+    (array([0., 0.]), 1.7320508075688772)
+
+    """
+
+    A, b = map(asarray_chkfinite, (A, b))
+
+    if len(A.shape) != 2:
+        raise ValueError("Expected a two-dimensional array (matrix)" +
+                         ", but the shape of A is %s" % (A.shape, ))
+    if len(b.shape) != 1:
+        raise ValueError("Expected a one-dimensional array (vector)" +
+                         ", but the shape of b is %s" % (b.shape, ))
+
+    m, n = A.shape
+
+    if m != b.shape[0]:
+        raise ValueError(
+                "Incompatible dimensions. The first dimension of " +
+                "A is %s, while the shape of b is %s" % (m, (b.shape[0], )))
+
+    maxiter = -1 if maxiter is None else int(maxiter)
+
+    w = zeros((n,), dtype=double)
+    zz = zeros((m,), dtype=double)
+    index = zeros((n,), dtype=int)
+
+    x, rnorm, mode = __nnls.nnls(A, m, n, b, w, zz, index, maxiter)
+    if mode != 1:
+        raise RuntimeError("too many iterations")
+
+    return x, rnorm
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_nonlin.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_nonlin.py
new file mode 100644
index 00000000..7c6a505c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_nonlin.py
@@ -0,0 +1,1566 @@
+# Copyright (C) 2009, Pauli Virtanen 
+# Distributed under the same license as SciPy.
+
+import sys
+import numpy as np
+from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
+from numpy import asarray, dot, vdot
+import scipy.sparse.linalg
+import scipy.sparse
+from scipy.linalg import get_blas_funcs
+import inspect
+from scipy._lib._util import getfullargspec_no_self as _getfullargspec
+from ._linesearch import scalar_search_wolfe1, scalar_search_armijo
+
+
+__all__ = [
+    'broyden1', 'broyden2', 'anderson', 'linearmixing',
+    'diagbroyden', 'excitingmixing', 'newton_krylov',
+    'BroydenFirst', 'KrylovJacobian', 'InverseJacobian']
+
+#------------------------------------------------------------------------------
+# Utility functions
+#------------------------------------------------------------------------------
+
+
+class NoConvergence(Exception):
+    pass
+
+
+def maxnorm(x):
+    return np.absolute(x).max()
+
+
+def _as_inexact(x):
+    """Return `x` as an array, of either floats or complex floats"""
+    x = asarray(x)
+    if not np.issubdtype(x.dtype, np.inexact):
+        return asarray(x, dtype=np.float_)
+    return x
+
+
+def _array_like(x, x0):
+    """Return ndarray `x` as same array subclass and shape as `x0`"""
+    x = np.reshape(x, np.shape(x0))
+    wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
+    return wrap(x)
+
+
+def _safe_norm(v):
+    if not np.isfinite(v).all():
+        return np.array(np.inf)
+    return norm(v)
+
+#------------------------------------------------------------------------------
+# Generic nonlinear solver machinery
+#------------------------------------------------------------------------------
+
+
+_doc_parts = dict(
+    params_basic="""
+    F : function(x) -> f
+        Function whose root to find; should take and return an array-like
+        object.
+    xin : array_like
+        Initial guess for the solution
+    """.strip(),
+    params_extra="""
+    iter : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    verbose : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, `NoConvergence` is raised.
+    f_tol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    f_rtol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    x_tol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    x_rtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in the
+        direction given by the Jacobian approximation. Defaults to 'armijo'.
+    callback : function, optional
+        Optional callback function. It is called on every iteration as
+        ``callback(x, f)`` where `x` is the current solution and `f`
+        the corresponding residual.
+
+    Returns
+    -------
+    sol : ndarray
+        An array (of similar array type as `x0`) containing the final solution.
+
+    Raises
+    ------
+    NoConvergence
+        When a solution was not found.
+
+    """.strip()
+)
+
+
+def _set_doc(obj):
+    if obj.__doc__:
+        obj.__doc__ = obj.__doc__ % _doc_parts
+
+
+def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
+                 maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
+                 tol_norm=None, line_search='armijo', callback=None,
+                 full_output=False, raise_exception=True):
+    """
+    Find a root of a function, in a way suitable for large-scale problems.
+
+    Parameters
+    ----------
+    %(params_basic)s
+    jacobian : Jacobian
+        A Jacobian approximation: `Jacobian` object or something that
+        `asjacobian` can transform to one. Alternatively, a string specifying
+        which of the builtin Jacobian approximations to use:
+
+            krylov, broyden1, broyden2, anderson
+            diagbroyden, linearmixing, excitingmixing
+
+    %(params_extra)s
+    full_output : bool
+        If true, returns a dictionary `info` containing convergence
+        information.
+    raise_exception : bool
+        If True, a `NoConvergence` exception is raise if no solution is found.
+
+    See Also
+    --------
+    asjacobian, Jacobian
+
+    Notes
+    -----
+    This algorithm implements the inexact Newton method, with
+    backtracking or full line searches. Several Jacobian
+    approximations are available, including Krylov and Quasi-Newton
+    methods.
+
+    References
+    ----------
+    .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
+       Equations\". Society for Industrial and Applied Mathematics. (1995)
+       https://archive.siam.org/books/kelley/fr16/
+
+    """
+    # Can't use default parameters because it's being explicitly passed as None
+    # from the calling function, so we need to set it here.
+    tol_norm = maxnorm if tol_norm is None else tol_norm
+    condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
+                                     x_tol=x_tol, x_rtol=x_rtol,
+                                     iter=iter, norm=tol_norm)
+
+    x0 = _as_inexact(x0)
+    func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
+    x = x0.flatten()
+
+    dx = np.full_like(x, np.inf)
+    Fx = func(x)
+    Fx_norm = norm(Fx)
+
+    jacobian = asjacobian(jacobian)
+    jacobian.setup(x.copy(), Fx, func)
+
+    if maxiter is None:
+        if iter is not None:
+            maxiter = iter + 1
+        else:
+            maxiter = 100*(x.size+1)
+
+    if line_search is True:
+        line_search = 'armijo'
+    elif line_search is False:
+        line_search = None
+
+    if line_search not in (None, 'armijo', 'wolfe'):
+        raise ValueError("Invalid line search")
+
+    # Solver tolerance selection
+    gamma = 0.9
+    eta_max = 0.9999
+    eta_treshold = 0.1
+    eta = 1e-3
+
+    for n in range(maxiter):
+        status = condition.check(Fx, x, dx)
+        if status:
+            break
+
+        # The tolerance, as computed for scipy.sparse.linalg.* routines
+        tol = min(eta, eta*Fx_norm)
+        dx = -jacobian.solve(Fx, tol=tol)
+
+        if norm(dx) == 0:
+            raise ValueError("Jacobian inversion yielded zero vector. "
+                             "This indicates a bug in the Jacobian "
+                             "approximation.")
+
+        # Line search, or Newton step
+        if line_search:
+            s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
+                                                        line_search)
+        else:
+            s = 1.0
+            x = x + dx
+            Fx = func(x)
+            Fx_norm_new = norm(Fx)
+
+        jacobian.update(x.copy(), Fx)
+
+        if callback:
+            callback(x, Fx)
+
+        # Adjust forcing parameters for inexact methods
+        eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
+        if gamma * eta**2 < eta_treshold:
+            eta = min(eta_max, eta_A)
+        else:
+            eta = min(eta_max, max(eta_A, gamma*eta**2))
+
+        Fx_norm = Fx_norm_new
+
+        # Print status
+        if verbose:
+            sys.stdout.write("%d:  |F(x)| = %g; step %g\n" % (
+                n, tol_norm(Fx), s))
+            sys.stdout.flush()
+    else:
+        if raise_exception:
+            raise NoConvergence(_array_like(x, x0))
+        else:
+            status = 2
+
+    if full_output:
+        info = {'nit': condition.iteration,
+                'fun': Fx,
+                'status': status,
+                'success': status == 1,
+                'message': {1: 'A solution was found at the specified '
+                               'tolerance.',
+                            2: 'The maximum number of iterations allowed '
+                               'has been reached.'
+                            }[status]
+                }
+        return _array_like(x, x0), info
+    else:
+        return _array_like(x, x0)
+
+
+_set_doc(nonlin_solve)
+
+
+def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
+                        smin=1e-2):
+    tmp_s = [0]
+    tmp_Fx = [Fx]
+    tmp_phi = [norm(Fx)**2]
+    s_norm = norm(x) / norm(dx)
+
+    def phi(s, store=True):
+        if s == tmp_s[0]:
+            return tmp_phi[0]
+        xt = x + s*dx
+        v = func(xt)
+        p = _safe_norm(v)**2
+        if store:
+            tmp_s[0] = s
+            tmp_phi[0] = p
+            tmp_Fx[0] = v
+        return p
+
+    def derphi(s):
+        ds = (abs(s) + s_norm + 1) * rdiff
+        return (phi(s+ds, store=False) - phi(s)) / ds
+
+    if search_type == 'wolfe':
+        s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
+                                             xtol=1e-2, amin=smin)
+    elif search_type == 'armijo':
+        s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
+                                       amin=smin)
+
+    if s is None:
+        # XXX: No suitable step length found. Take the full Newton step,
+        #      and hope for the best.
+        s = 1.0
+
+    x = x + s*dx
+    if s == tmp_s[0]:
+        Fx = tmp_Fx[0]
+    else:
+        Fx = func(x)
+    Fx_norm = norm(Fx)
+
+    return s, x, Fx, Fx_norm
+
+
+class TerminationCondition:
+    """
+    Termination condition for an iteration. It is terminated if
+
+    - |F| < f_rtol*|F_0|, AND
+    - |F| < f_tol
+
+    AND
+
+    - |dx| < x_rtol*|x|, AND
+    - |dx| < x_tol
+
+    """
+    def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
+                 iter=None, norm=maxnorm):
+
+        if f_tol is None:
+            f_tol = np.finfo(np.float_).eps ** (1./3)
+        if f_rtol is None:
+            f_rtol = np.inf
+        if x_tol is None:
+            x_tol = np.inf
+        if x_rtol is None:
+            x_rtol = np.inf
+
+        self.x_tol = x_tol
+        self.x_rtol = x_rtol
+        self.f_tol = f_tol
+        self.f_rtol = f_rtol
+
+        self.norm = norm
+
+        self.iter = iter
+
+        self.f0_norm = None
+        self.iteration = 0
+
+    def check(self, f, x, dx):
+        self.iteration += 1
+        f_norm = self.norm(f)
+        x_norm = self.norm(x)
+        dx_norm = self.norm(dx)
+
+        if self.f0_norm is None:
+            self.f0_norm = f_norm
+
+        if f_norm == 0:
+            return 1
+
+        if self.iter is not None:
+            # backwards compatibility with SciPy 0.6.0
+            return 2 * (self.iteration > self.iter)
+
+        # NB: condition must succeed for rtol=inf even if norm == 0
+        return int((f_norm <= self.f_tol
+                    and f_norm/self.f_rtol <= self.f0_norm)
+                   and (dx_norm <= self.x_tol
+                        and dx_norm/self.x_rtol <= x_norm))
+
+
+#------------------------------------------------------------------------------
+# Generic Jacobian approximation
+#------------------------------------------------------------------------------
+
+class Jacobian:
+    """
+    Common interface for Jacobians or Jacobian approximations.
+
+    The optional methods come useful when implementing trust region
+    etc., algorithms that often require evaluating transposes of the
+    Jacobian.
+
+    Methods
+    -------
+    solve
+        Returns J^-1 * v
+    update
+        Updates Jacobian to point `x` (where the function has residual `Fx`)
+
+    matvec : optional
+        Returns J * v
+    rmatvec : optional
+        Returns A^H * v
+    rsolve : optional
+        Returns A^-H * v
+    matmat : optional
+        Returns A * V, where V is a dense matrix with dimensions (N,K).
+    todense : optional
+        Form the dense Jacobian matrix. Necessary for dense trust region
+        algorithms, and useful for testing.
+
+    Attributes
+    ----------
+    shape
+        Matrix dimensions (M, N)
+    dtype
+        Data type of the matrix.
+    func : callable, optional
+        Function the Jacobian corresponds to
+
+    """
+
+    def __init__(self, **kw):
+        names = ["solve", "update", "matvec", "rmatvec", "rsolve",
+                 "matmat", "todense", "shape", "dtype"]
+        for name, value in kw.items():
+            if name not in names:
+                raise ValueError("Unknown keyword argument %s" % name)
+            if value is not None:
+                setattr(self, name, kw[name])
+
+        if hasattr(self, 'todense'):
+            self.__array__ = lambda: self.todense()
+
+    def aspreconditioner(self):
+        return InverseJacobian(self)
+
+    def solve(self, v, tol=0):
+        raise NotImplementedError
+
+    def update(self, x, F):
+        pass
+
+    def setup(self, x, F, func):
+        self.func = func
+        self.shape = (F.size, x.size)
+        self.dtype = F.dtype
+        if self.__class__.setup is Jacobian.setup:
+            # Call on the first point unless overridden
+            self.update(x, F)
+
+
+class InverseJacobian:
+    def __init__(self, jacobian):
+        self.jacobian = jacobian
+        self.matvec = jacobian.solve
+        self.update = jacobian.update
+        if hasattr(jacobian, 'setup'):
+            self.setup = jacobian.setup
+        if hasattr(jacobian, 'rsolve'):
+            self.rmatvec = jacobian.rsolve
+
+    @property
+    def shape(self):
+        return self.jacobian.shape
+
+    @property
+    def dtype(self):
+        return self.jacobian.dtype
+
+
+def asjacobian(J):
+    """
+    Convert given object to one suitable for use as a Jacobian.
+    """
+    spsolve = scipy.sparse.linalg.spsolve
+    if isinstance(J, Jacobian):
+        return J
+    elif inspect.isclass(J) and issubclass(J, Jacobian):
+        return J()
+    elif isinstance(J, np.ndarray):
+        if J.ndim > 2:
+            raise ValueError('array must have rank <= 2')
+        J = np.atleast_2d(np.asarray(J))
+        if J.shape[0] != J.shape[1]:
+            raise ValueError('array must be square')
+
+        return Jacobian(matvec=lambda v: dot(J, v),
+                        rmatvec=lambda v: dot(J.conj().T, v),
+                        solve=lambda v: solve(J, v),
+                        rsolve=lambda v: solve(J.conj().T, v),
+                        dtype=J.dtype, shape=J.shape)
+    elif scipy.sparse.isspmatrix(J):
+        if J.shape[0] != J.shape[1]:
+            raise ValueError('matrix must be square')
+        return Jacobian(matvec=lambda v: J*v,
+                        rmatvec=lambda v: J.conj().T * v,
+                        solve=lambda v: spsolve(J, v),
+                        rsolve=lambda v: spsolve(J.conj().T, v),
+                        dtype=J.dtype, shape=J.shape)
+    elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
+        return Jacobian(matvec=getattr(J, 'matvec'),
+                        rmatvec=getattr(J, 'rmatvec'),
+                        solve=J.solve,
+                        rsolve=getattr(J, 'rsolve'),
+                        update=getattr(J, 'update'),
+                        setup=getattr(J, 'setup'),
+                        dtype=J.dtype,
+                        shape=J.shape)
+    elif callable(J):
+        # Assume it's a function J(x) that returns the Jacobian
+        class Jac(Jacobian):
+            def update(self, x, F):
+                self.x = x
+
+            def solve(self, v, tol=0):
+                m = J(self.x)
+                if isinstance(m, np.ndarray):
+                    return solve(m, v)
+                elif scipy.sparse.isspmatrix(m):
+                    return spsolve(m, v)
+                else:
+                    raise ValueError("Unknown matrix type")
+
+            def matvec(self, v):
+                m = J(self.x)
+                if isinstance(m, np.ndarray):
+                    return dot(m, v)
+                elif scipy.sparse.isspmatrix(m):
+                    return m*v
+                else:
+                    raise ValueError("Unknown matrix type")
+
+            def rsolve(self, v, tol=0):
+                m = J(self.x)
+                if isinstance(m, np.ndarray):
+                    return solve(m.conj().T, v)
+                elif scipy.sparse.isspmatrix(m):
+                    return spsolve(m.conj().T, v)
+                else:
+                    raise ValueError("Unknown matrix type")
+
+            def rmatvec(self, v):
+                m = J(self.x)
+                if isinstance(m, np.ndarray):
+                    return dot(m.conj().T, v)
+                elif scipy.sparse.isspmatrix(m):
+                    return m.conj().T * v
+                else:
+                    raise ValueError("Unknown matrix type")
+        return Jac()
+    elif isinstance(J, str):
+        return dict(broyden1=BroydenFirst,
+                    broyden2=BroydenSecond,
+                    anderson=Anderson,
+                    diagbroyden=DiagBroyden,
+                    linearmixing=LinearMixing,
+                    excitingmixing=ExcitingMixing,
+                    krylov=KrylovJacobian)[J]()
+    else:
+        raise TypeError('Cannot convert object to a Jacobian')
+
+
+#------------------------------------------------------------------------------
+# Broyden
+#------------------------------------------------------------------------------
+
+class GenericBroyden(Jacobian):
+    def setup(self, x0, f0, func):
+        Jacobian.setup(self, x0, f0, func)
+        self.last_f = f0
+        self.last_x = x0
+
+        if hasattr(self, 'alpha') and self.alpha is None:
+            # Autoscale the initial Jacobian parameter
+            # unless we have already guessed the solution.
+            normf0 = norm(f0)
+            if normf0:
+                self.alpha = 0.5*max(norm(x0), 1) / normf0
+            else:
+                self.alpha = 1.0
+
+    def _update(self, x, f, dx, df, dx_norm, df_norm):
+        raise NotImplementedError
+
+    def update(self, x, f):
+        df = f - self.last_f
+        dx = x - self.last_x
+        self._update(x, f, dx, df, norm(dx), norm(df))
+        self.last_f = f
+        self.last_x = x
+
+
+class LowRankMatrix:
+    r"""
+    A matrix represented as
+
+    .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
+
+    However, if the rank of the matrix reaches the dimension of the vectors,
+    full matrix representation will be used thereon.
+
+    """
+
+    def __init__(self, alpha, n, dtype):
+        self.alpha = alpha
+        self.cs = []
+        self.ds = []
+        self.n = n
+        self.dtype = dtype
+        self.collapsed = None
+
+    @staticmethod
+    def _matvec(v, alpha, cs, ds):
+        axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
+                                          cs[:1] + [v])
+        w = alpha * v
+        for c, d in zip(cs, ds):
+            a = dotc(d, v)
+            w = axpy(c, w, w.size, a)
+        return w
+
+    @staticmethod
+    def _solve(v, alpha, cs, ds):
+        """Evaluate w = M^-1 v"""
+        if len(cs) == 0:
+            return v/alpha
+
+        # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
+
+        axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
+
+        c0 = cs[0]
+        A = alpha * np.identity(len(cs), dtype=c0.dtype)
+        for i, d in enumerate(ds):
+            for j, c in enumerate(cs):
+                A[i,j] += dotc(d, c)
+
+        q = np.zeros(len(cs), dtype=c0.dtype)
+        for j, d in enumerate(ds):
+            q[j] = dotc(d, v)
+        q /= alpha
+        q = solve(A, q)
+
+        w = v/alpha
+        for c, qc in zip(cs, q):
+            w = axpy(c, w, w.size, -qc)
+
+        return w
+
+    def matvec(self, v):
+        """Evaluate w = M v"""
+        if self.collapsed is not None:
+            return np.dot(self.collapsed, v)
+        return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
+
+    def rmatvec(self, v):
+        """Evaluate w = M^H v"""
+        if self.collapsed is not None:
+            return np.dot(self.collapsed.T.conj(), v)
+        return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
+
+    def solve(self, v, tol=0):
+        """Evaluate w = M^-1 v"""
+        if self.collapsed is not None:
+            return solve(self.collapsed, v)
+        return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
+
+    def rsolve(self, v, tol=0):
+        """Evaluate w = M^-H v"""
+        if self.collapsed is not None:
+            return solve(self.collapsed.T.conj(), v)
+        return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
+
+    def append(self, c, d):
+        if self.collapsed is not None:
+            self.collapsed += c[:,None] * d[None,:].conj()
+            return
+
+        self.cs.append(c)
+        self.ds.append(d)
+
+        if len(self.cs) > c.size:
+            self.collapse()
+
+    def __array__(self):
+        if self.collapsed is not None:
+            return self.collapsed
+
+        Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
+        for c, d in zip(self.cs, self.ds):
+            Gm += c[:,None]*d[None,:].conj()
+        return Gm
+
+    def collapse(self):
+        """Collapse the low-rank matrix to a full-rank one."""
+        self.collapsed = np.array(self)
+        self.cs = None
+        self.ds = None
+        self.alpha = None
+
+    def restart_reduce(self, rank):
+        """
+        Reduce the rank of the matrix by dropping all vectors.
+        """
+        if self.collapsed is not None:
+            return
+        assert rank > 0
+        if len(self.cs) > rank:
+            del self.cs[:]
+            del self.ds[:]
+
+    def simple_reduce(self, rank):
+        """
+        Reduce the rank of the matrix by dropping oldest vectors.
+        """
+        if self.collapsed is not None:
+            return
+        assert rank > 0
+        while len(self.cs) > rank:
+            del self.cs[0]
+            del self.ds[0]
+
+    def svd_reduce(self, max_rank, to_retain=None):
+        """
+        Reduce the rank of the matrix by retaining some SVD components.
+
+        This corresponds to the \"Broyden Rank Reduction Inverse\"
+        algorithm described in [1]_.
+
+        Note that the SVD decomposition can be done by solving only a
+        problem whose size is the effective rank of this matrix, which
+        is viable even for large problems.
+
+        Parameters
+        ----------
+        max_rank : int
+            Maximum rank of this matrix after reduction.
+        to_retain : int, optional
+            Number of SVD components to retain when reduction is done
+            (ie. rank > max_rank). Default is ``max_rank - 2``.
+
+        References
+        ----------
+        .. [1] B.A. van der Rotten, PhD thesis,
+           \"A limited memory Broyden method to solve high-dimensional
+           systems of nonlinear equations\". Mathematisch Instituut,
+           Universiteit Leiden, The Netherlands (2003).
+
+           https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
+
+        """
+        if self.collapsed is not None:
+            return
+
+        p = max_rank
+        if to_retain is not None:
+            q = to_retain
+        else:
+            q = p - 2
+
+        if self.cs:
+            p = min(p, len(self.cs[0]))
+        q = max(0, min(q, p-1))
+
+        m = len(self.cs)
+        if m < p:
+            # nothing to do
+            return
+
+        C = np.array(self.cs).T
+        D = np.array(self.ds).T
+
+        D, R = qr(D, mode='economic')
+        C = dot(C, R.T.conj())
+
+        U, S, WH = svd(C, full_matrices=False)
+
+        C = dot(C, inv(WH))
+        D = dot(D, WH.T.conj())
+
+        for k in range(q):
+            self.cs[k] = C[:,k].copy()
+            self.ds[k] = D[:,k].copy()
+
+        del self.cs[q:]
+        del self.ds[q:]
+
+
+_doc_parts['broyden_params'] = """
+    alpha : float, optional
+        Initial guess for the Jacobian is ``(-1/alpha)``.
+    reduction_method : str or tuple, optional
+        Method used in ensuring that the rank of the Broyden matrix
+        stays low. Can either be a string giving the name of the method,
+        or a tuple of the form ``(method, param1, param2, ...)``
+        that gives the name of the method and values for additional parameters.
+
+        Methods available:
+
+            - ``restart``: drop all matrix columns. Has no extra parameters.
+            - ``simple``: drop oldest matrix column. Has no extra parameters.
+            - ``svd``: keep only the most significant SVD components.
+              Takes an extra parameter, ``to_retain``, which determines the
+              number of SVD components to retain when rank reduction is done.
+              Default is ``max_rank - 2``.
+
+    max_rank : int, optional
+        Maximum rank for the Broyden matrix.
+        Default is infinity (i.e., no rank reduction).
+    """.strip()
+
+
+class BroydenFirst(GenericBroyden):
+    r"""
+    Find a root of a function, using Broyden's first Jacobian approximation.
+
+    This method is also known as \"Broyden's good method\".
+
+    Parameters
+    ----------
+    %(params_basic)s
+    %(broyden_params)s
+    %(params_extra)s
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See ``method='broyden1'`` in particular.
+
+    Notes
+    -----
+    This algorithm implements the inverse Jacobian Quasi-Newton update
+
+    .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
+
+    which corresponds to Broyden's first Jacobian update
+
+    .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
+
+
+    References
+    ----------
+    .. [1] B.A. van der Rotten, PhD thesis,
+       \"A limited memory Broyden method to solve high-dimensional
+       systems of nonlinear equations\". Mathematisch Instituut,
+       Universiteit Leiden, The Netherlands (2003).
+
+       https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
+
+    Examples
+    --------
+    The following functions define a system of nonlinear equations
+
+    >>> def fun(x):
+    ...     return [x[0]  + 0.5 * (x[0] - x[1])**3 - 1.0,
+    ...             0.5 * (x[1] - x[0])**3 + x[1]]
+
+    A solution can be obtained as follows.
+
+    >>> from scipy import optimize
+    >>> sol = optimize.broyden1(fun, [0, 0])
+    >>> sol
+    array([0.84116396, 0.15883641])
+
+    """
+
+    def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
+        GenericBroyden.__init__(self)
+        self.alpha = alpha
+        self.Gm = None
+
+        if max_rank is None:
+            max_rank = np.inf
+        self.max_rank = max_rank
+
+        if isinstance(reduction_method, str):
+            reduce_params = ()
+        else:
+            reduce_params = reduction_method[1:]
+            reduction_method = reduction_method[0]
+        reduce_params = (max_rank - 1,) + reduce_params
+
+        if reduction_method == 'svd':
+            self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
+        elif reduction_method == 'simple':
+            self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
+        elif reduction_method == 'restart':
+            self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
+        else:
+            raise ValueError("Unknown rank reduction method '%s'" %
+                             reduction_method)
+
+    def setup(self, x, F, func):
+        GenericBroyden.setup(self, x, F, func)
+        self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
+
+    def todense(self):
+        return inv(self.Gm)
+
+    def solve(self, f, tol=0):
+        r = self.Gm.matvec(f)
+        if not np.isfinite(r).all():
+            # singular; reset the Jacobian approximation
+            self.setup(self.last_x, self.last_f, self.func)
+            return self.Gm.matvec(f)
+        return r
+
+    def matvec(self, f):
+        return self.Gm.solve(f)
+
+    def rsolve(self, f, tol=0):
+        return self.Gm.rmatvec(f)
+
+    def rmatvec(self, f):
+        return self.Gm.rsolve(f)
+
+    def _update(self, x, f, dx, df, dx_norm, df_norm):
+        self._reduce()  # reduce first to preserve secant condition
+
+        v = self.Gm.rmatvec(dx)
+        c = dx - self.Gm.matvec(df)
+        d = v / vdot(df, v)
+
+        self.Gm.append(c, d)
+
+
+class BroydenSecond(BroydenFirst):
+    """
+    Find a root of a function, using Broyden\'s second Jacobian approximation.
+
+    This method is also known as \"Broyden's bad method\".
+
+    Parameters
+    ----------
+    %(params_basic)s
+    %(broyden_params)s
+    %(params_extra)s
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See ``method='broyden2'`` in particular.
+
+    Notes
+    -----
+    This algorithm implements the inverse Jacobian Quasi-Newton update
+
+    .. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
+
+    corresponding to Broyden's second method.
+
+    References
+    ----------
+    .. [1] B.A. van der Rotten, PhD thesis,
+       \"A limited memory Broyden method to solve high-dimensional
+       systems of nonlinear equations\". Mathematisch Instituut,
+       Universiteit Leiden, The Netherlands (2003).
+
+       https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
+
+    Examples
+    --------
+    The following functions define a system of nonlinear equations
+
+    >>> def fun(x):
+    ...     return [x[0]  + 0.5 * (x[0] - x[1])**3 - 1.0,
+    ...             0.5 * (x[1] - x[0])**3 + x[1]]
+
+    A solution can be obtained as follows.
+
+    >>> from scipy import optimize
+    >>> sol = optimize.broyden2(fun, [0, 0])
+    >>> sol
+    array([0.84116365, 0.15883529])
+
+    """
+
+    def _update(self, x, f, dx, df, dx_norm, df_norm):
+        self._reduce()  # reduce first to preserve secant condition
+
+        v = df
+        c = dx - self.Gm.matvec(df)
+        d = v / df_norm**2
+        self.Gm.append(c, d)
+
+
+#------------------------------------------------------------------------------
+# Broyden-like (restricted memory)
+#------------------------------------------------------------------------------
+
+class Anderson(GenericBroyden):
+    """
+    Find a root of a function, using (extended) Anderson mixing.
+
+    The Jacobian is formed by for a 'best' solution in the space
+    spanned by last `M` vectors. As a result, only a MxM matrix
+    inversions and MxN multiplications are required. [Ey]_
+
+    Parameters
+    ----------
+    %(params_basic)s
+    alpha : float, optional
+        Initial guess for the Jacobian is (-1/alpha).
+    M : float, optional
+        Number of previous vectors to retain. Defaults to 5.
+    w0 : float, optional
+        Regularization parameter for numerical stability.
+        Compared to unity, good values of the order of 0.01.
+    %(params_extra)s
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See ``method='anderson'`` in particular.
+
+    References
+    ----------
+    .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
+
+    Examples
+    --------
+    The following functions define a system of nonlinear equations
+
+    >>> def fun(x):
+    ...     return [x[0]  + 0.5 * (x[0] - x[1])**3 - 1.0,
+    ...             0.5 * (x[1] - x[0])**3 + x[1]]
+
+    A solution can be obtained as follows.
+
+    >>> from scipy import optimize
+    >>> sol = optimize.anderson(fun, [0, 0])
+    >>> sol
+    array([0.84116588, 0.15883789])
+
+    """
+
+    # Note:
+    #
+    # Anderson method maintains a rank M approximation of the inverse Jacobian,
+    #
+    #     J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
+    #     A      = W + dF^H dF
+    #     W      = w0^2 diag(dF^H dF)
+    #
+    # so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
+    #
+    #     J^-1 df_j = dx_j
+    #
+    # for all j = 0 ... M-1.
+    #
+    # Moreover, (from Sherman-Morrison-Woodbury formula)
+    #
+    #    J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
+    #    C   = (dX + alpha dF) A^-1
+    #    b   = -1/alpha
+    #
+    # and after simplification
+    #
+    #    J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
+    #
+
+    def __init__(self, alpha=None, w0=0.01, M=5):
+        GenericBroyden.__init__(self)
+        self.alpha = alpha
+        self.M = M
+        self.dx = []
+        self.df = []
+        self.gamma = None
+        self.w0 = w0
+
+    def solve(self, f, tol=0):
+        dx = -self.alpha*f
+
+        n = len(self.dx)
+        if n == 0:
+            return dx
+
+        df_f = np.empty(n, dtype=f.dtype)
+        for k in range(n):
+            df_f[k] = vdot(self.df[k], f)
+
+        try:
+            gamma = solve(self.a, df_f)
+        except LinAlgError:
+            # singular; reset the Jacobian approximation
+            del self.dx[:]
+            del self.df[:]
+            return dx
+
+        for m in range(n):
+            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
+        return dx
+
+    def matvec(self, f):
+        dx = -f/self.alpha
+
+        n = len(self.dx)
+        if n == 0:
+            return dx
+
+        df_f = np.empty(n, dtype=f.dtype)
+        for k in range(n):
+            df_f[k] = vdot(self.df[k], f)
+
+        b = np.empty((n, n), dtype=f.dtype)
+        for i in range(n):
+            for j in range(n):
+                b[i,j] = vdot(self.df[i], self.dx[j])
+                if i == j and self.w0 != 0:
+                    b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
+        gamma = solve(b, df_f)
+
+        for m in range(n):
+            dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
+        return dx
+
+    def _update(self, x, f, dx, df, dx_norm, df_norm):
+        if self.M == 0:
+            return
+
+        self.dx.append(dx)
+        self.df.append(df)
+
+        while len(self.dx) > self.M:
+            self.dx.pop(0)
+            self.df.pop(0)
+
+        n = len(self.dx)
+        a = np.zeros((n, n), dtype=f.dtype)
+
+        for i in range(n):
+            for j in range(i, n):
+                if i == j:
+                    wd = self.w0**2
+                else:
+                    wd = 0
+                a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
+
+        a += np.triu(a, 1).T.conj()
+        self.a = a
+
+#------------------------------------------------------------------------------
+# Simple iterations
+#------------------------------------------------------------------------------
+
+
+class DiagBroyden(GenericBroyden):
+    """
+    Find a root of a function, using diagonal Broyden Jacobian approximation.
+
+    The Jacobian approximation is derived from previous iterations, by
+    retaining only the diagonal of Broyden matrices.
+
+    .. warning::
+
+       This algorithm may be useful for specific problems, but whether
+       it will work may depend strongly on the problem.
+
+    Parameters
+    ----------
+    %(params_basic)s
+    alpha : float, optional
+        Initial guess for the Jacobian is (-1/alpha).
+    %(params_extra)s
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See ``method='diagbroyden'`` in particular.
+
+    Examples
+    --------
+    The following functions define a system of nonlinear equations
+
+    >>> def fun(x):
+    ...     return [x[0]  + 0.5 * (x[0] - x[1])**3 - 1.0,
+    ...             0.5 * (x[1] - x[0])**3 + x[1]]
+
+    A solution can be obtained as follows.
+
+    >>> from scipy import optimize
+    >>> sol = optimize.diagbroyden(fun, [0, 0])
+    >>> sol
+    array([0.84116403, 0.15883384])
+
+    """
+
+    def __init__(self, alpha=None):
+        GenericBroyden.__init__(self)
+        self.alpha = alpha
+
+    def setup(self, x, F, func):
+        GenericBroyden.setup(self, x, F, func)
+        self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
+
+    def solve(self, f, tol=0):
+        return -f / self.d
+
+    def matvec(self, f):
+        return -f * self.d
+
+    def rsolve(self, f, tol=0):
+        return -f / self.d.conj()
+
+    def rmatvec(self, f):
+        return -f * self.d.conj()
+
+    def todense(self):
+        return np.diag(-self.d)
+
+    def _update(self, x, f, dx, df, dx_norm, df_norm):
+        self.d -= (df + self.d*dx)*dx/dx_norm**2
+
+
+class LinearMixing(GenericBroyden):
+    """
+    Find a root of a function, using a scalar Jacobian approximation.
+
+    .. warning::
+
+       This algorithm may be useful for specific problems, but whether
+       it will work may depend strongly on the problem.
+
+    Parameters
+    ----------
+    %(params_basic)s
+    alpha : float, optional
+        The Jacobian approximation is (-1/alpha).
+    %(params_extra)s
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See ``method='linearmixing'`` in particular.
+
+    """
+
+    def __init__(self, alpha=None):
+        GenericBroyden.__init__(self)
+        self.alpha = alpha
+
+    def solve(self, f, tol=0):
+        return -f*self.alpha
+
+    def matvec(self, f):
+        return -f/self.alpha
+
+    def rsolve(self, f, tol=0):
+        return -f*np.conj(self.alpha)
+
+    def rmatvec(self, f):
+        return -f/np.conj(self.alpha)
+
+    def todense(self):
+        return np.diag(np.full(self.shape[0], -1/self.alpha))
+
+    def _update(self, x, f, dx, df, dx_norm, df_norm):
+        pass
+
+
+class ExcitingMixing(GenericBroyden):
+    """
+    Find a root of a function, using a tuned diagonal Jacobian approximation.
+
+    The Jacobian matrix is diagonal and is tuned on each iteration.
+
+    .. warning::
+
+       This algorithm may be useful for specific problems, but whether
+       it will work may depend strongly on the problem.
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See ``method='excitingmixing'`` in particular.
+
+    Parameters
+    ----------
+    %(params_basic)s
+    alpha : float, optional
+        Initial Jacobian approximation is (-1/alpha).
+    alphamax : float, optional
+        The entries of the diagonal Jacobian are kept in the range
+        ``[alpha, alphamax]``.
+    %(params_extra)s
+    """
+
+    def __init__(self, alpha=None, alphamax=1.0):
+        GenericBroyden.__init__(self)
+        self.alpha = alpha
+        self.alphamax = alphamax
+        self.beta = None
+
+    def setup(self, x, F, func):
+        GenericBroyden.setup(self, x, F, func)
+        self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
+
+    def solve(self, f, tol=0):
+        return -f*self.beta
+
+    def matvec(self, f):
+        return -f/self.beta
+
+    def rsolve(self, f, tol=0):
+        return -f*self.beta.conj()
+
+    def rmatvec(self, f):
+        return -f/self.beta.conj()
+
+    def todense(self):
+        return np.diag(-1/self.beta)
+
+    def _update(self, x, f, dx, df, dx_norm, df_norm):
+        incr = f*self.last_f > 0
+        self.beta[incr] += self.alpha
+        self.beta[~incr] = self.alpha
+        np.clip(self.beta, 0, self.alphamax, out=self.beta)
+
+
+#------------------------------------------------------------------------------
+# Iterative/Krylov approximated Jacobians
+#------------------------------------------------------------------------------
+
+class KrylovJacobian(Jacobian):
+    r"""
+    Find a root of a function, using Krylov approximation for inverse Jacobian.
+
+    This method is suitable for solving large-scale problems.
+
+    Parameters
+    ----------
+    %(params_basic)s
+    rdiff : float, optional
+        Relative step size to use in numerical differentiation.
+    method : str or callable, optional
+        Krylov method to use to approximate the Jacobian.  Can be a string,
+        or a function implementing the same interface as the iterative
+        solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
+        ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
+        ``'tfqmr'``.
+
+        The default is `scipy.sparse.linalg.lgmres`.
+    inner_maxiter : int, optional
+        Parameter to pass to the "inner" Krylov solver: maximum number of
+        iterations. Iteration will stop after maxiter steps even if the
+        specified tolerance has not been achieved.
+    inner_M : LinearOperator or InverseJacobian
+        Preconditioner for the inner Krylov iteration.
+        Note that you can use also inverse Jacobians as (adaptive)
+        preconditioners. For example,
+
+        >>> from scipy.optimize import BroydenFirst, KrylovJacobian
+        >>> from scipy.optimize import InverseJacobian
+        >>> jac = BroydenFirst()
+        >>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
+
+        If the preconditioner has a method named 'update', it will be called
+        as ``update(x, f)`` after each nonlinear step, with ``x`` giving
+        the current point, and ``f`` the current function value.
+    outer_k : int, optional
+        Size of the subspace kept across LGMRES nonlinear iterations.
+        See `scipy.sparse.linalg.lgmres` for details.
+    inner_kwargs : kwargs
+        Keyword parameters for the "inner" Krylov solver
+        (defined with `method`). Parameter names must start with
+        the `inner_` prefix which will be stripped before passing on
+        the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
+    %(params_extra)s
+
+    See Also
+    --------
+    root : Interface to root finding algorithms for multivariate
+           functions. See ``method='krylov'`` in particular.
+    scipy.sparse.linalg.gmres
+    scipy.sparse.linalg.lgmres
+
+    Notes
+    -----
+    This function implements a Newton-Krylov solver. The basic idea is
+    to compute the inverse of the Jacobian with an iterative Krylov
+    method. These methods require only evaluating the Jacobian-vector
+    products, which are conveniently approximated by a finite difference:
+
+    .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
+
+    Due to the use of iterative matrix inverses, these methods can
+    deal with large nonlinear problems.
+
+    SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
+    solvers to choose from. The default here is `lgmres`, which is a
+    variant of restarted GMRES iteration that reuses some of the
+    information obtained in the previous Newton steps to invert
+    Jacobians in subsequent steps.
+
+    For a review on Newton-Krylov methods, see for example [1]_,
+    and for the LGMRES sparse inverse method, see [2]_.
+
+    References
+    ----------
+    .. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method,
+           SIAM, pp.57-83, 2003.
+           :doi:`10.1137/1.9780898718898.ch3`
+    .. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
+           :doi:`10.1016/j.jcp.2003.08.010`
+    .. [3] A.H. Baker and E.R. Jessup and T. Manteuffel,
+           SIAM J. Matrix Anal. Appl. 26, 962 (2005).
+           :doi:`10.1137/S0895479803422014`
+
+    Examples
+    --------
+    The following functions define a system of nonlinear equations
+
+    >>> def fun(x):
+    ...     return [x[0] + 0.5 * x[1] - 1.0,
+    ...             0.5 * (x[1] - x[0]) ** 2]
+
+    A solution can be obtained as follows.
+
+    >>> from scipy import optimize
+    >>> sol = optimize.newton_krylov(fun, [0, 0])
+    >>> sol
+    array([0.66731771, 0.66536458])
+
+    """
+
+    def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
+                 inner_M=None, outer_k=10, **kw):
+        self.preconditioner = inner_M
+        self.rdiff = rdiff
+        # Note that this retrieves one of the named functions, or otherwise
+        # uses `method` as is (i.e., for a user-provided callable).
+        self.method = dict(
+            bicgstab=scipy.sparse.linalg.bicgstab,
+            gmres=scipy.sparse.linalg.gmres,
+            lgmres=scipy.sparse.linalg.lgmres,
+            cgs=scipy.sparse.linalg.cgs,
+            minres=scipy.sparse.linalg.minres,
+            tfqmr=scipy.sparse.linalg.tfqmr,
+            ).get(method, method)
+
+        self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
+
+        if self.method is scipy.sparse.linalg.gmres:
+            # Replace GMRES's outer iteration with Newton steps
+            self.method_kw['restart'] = inner_maxiter
+            self.method_kw['maxiter'] = 1
+            self.method_kw.setdefault('atol', 0)
+        elif self.method in (scipy.sparse.linalg.gcrotmk,
+                             scipy.sparse.linalg.bicgstab,
+                             scipy.sparse.linalg.cgs):
+            self.method_kw.setdefault('atol', 0)
+        elif self.method is scipy.sparse.linalg.lgmres:
+            self.method_kw['outer_k'] = outer_k
+            # Replace LGMRES's outer iteration with Newton steps
+            self.method_kw['maxiter'] = 1
+            # Carry LGMRES's `outer_v` vectors across nonlinear iterations
+            self.method_kw.setdefault('outer_v', [])
+            self.method_kw.setdefault('prepend_outer_v', True)
+            # But don't carry the corresponding Jacobian*v products, in case
+            # the Jacobian changes a lot in the nonlinear step
+            #
+            # XXX: some trust-region inspired ideas might be more efficient...
+            #      See e.g., Brown & Saad. But needs to be implemented separately
+            #      since it's not an inexact Newton method.
+            self.method_kw.setdefault('store_outer_Av', False)
+            self.method_kw.setdefault('atol', 0)
+
+        for key, value in kw.items():
+            if not key.startswith('inner_'):
+                raise ValueError("Unknown parameter %s" % key)
+            self.method_kw[key[6:]] = value
+
+    def _update_diff_step(self):
+        mx = abs(self.x0).max()
+        mf = abs(self.f0).max()
+        self.omega = self.rdiff * max(1, mx) / max(1, mf)
+
+    def matvec(self, v):
+        nv = norm(v)
+        if nv == 0:
+            return 0*v
+        sc = self.omega / nv
+        r = (self.func(self.x0 + sc*v) - self.f0) / sc
+        if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
+            raise ValueError('Function returned non-finite results')
+        return r
+
+    def solve(self, rhs, tol=0):
+        if 'tol' in self.method_kw:
+            sol, info = self.method(self.op, rhs, **self.method_kw)
+        else:
+            sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
+        return sol
+
+    def update(self, x, f):
+        self.x0 = x
+        self.f0 = f
+        self._update_diff_step()
+
+        # Update also the preconditioner, if possible
+        if self.preconditioner is not None:
+            if hasattr(self.preconditioner, 'update'):
+                self.preconditioner.update(x, f)
+
+    def setup(self, x, f, func):
+        Jacobian.setup(self, x, f, func)
+        self.x0 = x
+        self.f0 = f
+        self.op = scipy.sparse.linalg.aslinearoperator(self)
+
+        if self.rdiff is None:
+            self.rdiff = np.finfo(x.dtype).eps ** (1./2)
+
+        self._update_diff_step()
+
+        # Setup also the preconditioner, if possible
+        if self.preconditioner is not None:
+            if hasattr(self.preconditioner, 'setup'):
+                self.preconditioner.setup(x, f, func)
+
+
+#------------------------------------------------------------------------------
+# Wrapper functions
+#------------------------------------------------------------------------------
+
+def _nonlin_wrapper(name, jac):
+    """
+    Construct a solver wrapper with given name and Jacobian approx.
+
+    It inspects the keyword arguments of ``jac.__init__``, and allows to
+    use the same arguments in the wrapper function, in addition to the
+    keyword arguments of `nonlin_solve`
+
+    """
+    signature = _getfullargspec(jac.__init__)
+    args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
+    kwargs = list(zip(args[-len(defaults):], defaults))
+    kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
+    if kw_str:
+        kw_str = ", " + kw_str
+    kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
+    if kwkw_str:
+        kwkw_str = kwkw_str + ", "
+    if kwonlyargs:
+        raise ValueError('Unexpected signature %s' % signature)
+
+    # Construct the wrapper function so that its keyword arguments
+    # are visible in pydoc.help etc.
+    wrapper = """
+def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
+             f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
+             tol_norm=None, line_search='armijo', callback=None, **kw):
+    jac = %(jac)s(%(kwkw)s **kw)
+    return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
+                        f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
+                        callback)
+"""
+
+    wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
+                             kwkw=kwkw_str)
+    ns = {}
+    ns.update(globals())
+    exec(wrapper, ns)
+    func = ns[name]
+    func.__doc__ = jac.__doc__
+    _set_doc(func)
+    return func
+
+
+broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
+broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
+anderson = _nonlin_wrapper('anderson', Anderson)
+linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
+diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
+excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
+newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_numdiff.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_numdiff.py
new file mode 100644
index 00000000..b0f2d290
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_numdiff.py
@@ -0,0 +1,761 @@
+"""Routines for numerical differentiation."""
+import functools
+import numpy as np
+from numpy.linalg import norm
+
+from scipy.sparse.linalg import LinearOperator
+from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
+from ._group_columns import group_dense, group_sparse
+
+
+def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
+    """Adjust final difference scheme to the presence of bounds.
+
+    Parameters
+    ----------
+    x0 : ndarray, shape (n,)
+        Point at which we wish to estimate derivative.
+    h : ndarray, shape (n,)
+        Desired absolute finite difference steps.
+    num_steps : int
+        Number of `h` steps in one direction required to implement finite
+        difference scheme. For example, 2 means that we need to evaluate
+        f(x0 + 2 * h) or f(x0 - 2 * h)
+    scheme : {'1-sided', '2-sided'}
+        Whether steps in one or both directions are required. In other
+        words '1-sided' applies to forward and backward schemes, '2-sided'
+        applies to center schemes.
+    lb : ndarray, shape (n,)
+        Lower bounds on independent variables.
+    ub : ndarray, shape (n,)
+        Upper bounds on independent variables.
+
+    Returns
+    -------
+    h_adjusted : ndarray, shape (n,)
+        Adjusted absolute step sizes. Step size decreases only if a sign flip
+        or switching to one-sided scheme doesn't allow to take a full step.
+    use_one_sided : ndarray of bool, shape (n,)
+        Whether to switch to one-sided scheme. Informative only for
+        ``scheme='2-sided'``.
+    """
+    if scheme == '1-sided':
+        use_one_sided = np.ones_like(h, dtype=bool)
+    elif scheme == '2-sided':
+        h = np.abs(h)
+        use_one_sided = np.zeros_like(h, dtype=bool)
+    else:
+        raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
+
+    if np.all((lb == -np.inf) & (ub == np.inf)):
+        return h, use_one_sided
+
+    h_total = h * num_steps
+    h_adjusted = h.copy()
+
+    lower_dist = x0 - lb
+    upper_dist = ub - x0
+
+    if scheme == '1-sided':
+        x = x0 + h_total
+        violated = (x < lb) | (x > ub)
+        fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
+        h_adjusted[violated & fitting] *= -1
+
+        forward = (upper_dist >= lower_dist) & ~fitting
+        h_adjusted[forward] = upper_dist[forward] / num_steps
+        backward = (upper_dist < lower_dist) & ~fitting
+        h_adjusted[backward] = -lower_dist[backward] / num_steps
+    elif scheme == '2-sided':
+        central = (lower_dist >= h_total) & (upper_dist >= h_total)
+
+        forward = (upper_dist >= lower_dist) & ~central
+        h_adjusted[forward] = np.minimum(
+            h[forward], 0.5 * upper_dist[forward] / num_steps)
+        use_one_sided[forward] = True
+
+        backward = (upper_dist < lower_dist) & ~central
+        h_adjusted[backward] = -np.minimum(
+            h[backward], 0.5 * lower_dist[backward] / num_steps)
+        use_one_sided[backward] = True
+
+        min_dist = np.minimum(upper_dist, lower_dist) / num_steps
+        adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
+        h_adjusted[adjusted_central] = min_dist[adjusted_central]
+        use_one_sided[adjusted_central] = False
+
+    return h_adjusted, use_one_sided
+
+
+@functools.lru_cache()
+def _eps_for_method(x0_dtype, f0_dtype, method):
+    """
+    Calculates relative EPS step to use for a given data type
+    and numdiff step method.
+
+    Progressively smaller steps are used for larger floating point types.
+
+    Parameters
+    ----------
+    f0_dtype: np.dtype
+        dtype of function evaluation
+
+    x0_dtype: np.dtype
+        dtype of parameter vector
+
+    method: {'2-point', '3-point', 'cs'}
+
+    Returns
+    -------
+    EPS: float
+        relative step size. May be np.float16, np.float32, np.float64
+
+    Notes
+    -----
+    The default relative step will be np.float64. However, if x0 or f0 are
+    smaller floating point types (np.float16, np.float32), then the smallest
+    floating point type is chosen.
+    """
+    # the default EPS value
+    EPS = np.finfo(np.float64).eps
+
+    x0_is_fp = False
+    if np.issubdtype(x0_dtype, np.inexact):
+        # if you're a floating point type then over-ride the default EPS
+        EPS = np.finfo(x0_dtype).eps
+        x0_itemsize = np.dtype(x0_dtype).itemsize
+        x0_is_fp = True
+
+    if np.issubdtype(f0_dtype, np.inexact):
+        f0_itemsize = np.dtype(f0_dtype).itemsize
+        # choose the smallest itemsize between x0 and f0
+        if x0_is_fp and f0_itemsize < x0_itemsize:
+            EPS = np.finfo(f0_dtype).eps
+
+    if method in ["2-point", "cs"]:
+        return EPS**0.5
+    elif method in ["3-point"]:
+        return EPS**(1/3)
+    else:
+        raise RuntimeError("Unknown step method, should be one of "
+                           "{'2-point', '3-point', 'cs'}")
+
+
+def _compute_absolute_step(rel_step, x0, f0, method):
+    """
+    Computes an absolute step from a relative step for finite difference
+    calculation.
+
+    Parameters
+    ----------
+    rel_step: None or array-like
+        Relative step for the finite difference calculation
+    x0 : np.ndarray
+        Parameter vector
+    f0 : np.ndarray or scalar
+    method : {'2-point', '3-point', 'cs'}
+
+    Returns
+    -------
+    h : float
+        The absolute step size
+
+    Notes
+    -----
+    `h` will always be np.float64. However, if `x0` or `f0` are
+    smaller floating point dtypes (e.g. np.float32), then the absolute
+    step size will be calculated from the smallest floating point size.
+    """
+    # this is used instead of np.sign(x0) because we need
+    # sign_x0 to be 1 when x0 == 0.
+    sign_x0 = (x0 >= 0).astype(float) * 2 - 1
+
+    rstep = _eps_for_method(x0.dtype, f0.dtype, method)
+
+    if rel_step is None:
+        abs_step = rstep * sign_x0 * np.maximum(1.0, np.abs(x0))
+    else:
+        # User has requested specific relative steps.
+        # Don't multiply by max(1, abs(x0) because if x0 < 1 then their
+        # requested step is not used.
+        abs_step = rel_step * sign_x0 * np.abs(x0)
+
+        # however we don't want an abs_step of 0, which can happen if
+        # rel_step is 0, or x0 is 0. Instead, substitute a realistic step
+        dx = ((x0 + abs_step) - x0)
+        abs_step = np.where(dx == 0,
+                            rstep * sign_x0 * np.maximum(1.0, np.abs(x0)),
+                            abs_step)
+
+    return abs_step
+
+
+def _prepare_bounds(bounds, x0):
+    """
+    Prepares new-style bounds from a two-tuple specifying the lower and upper
+    limits for values in x0. If a value is not bound then the lower/upper bound
+    will be expected to be -np.inf/np.inf.
+
+    Examples
+    --------
+    >>> _prepare_bounds([(0, 1, 2), (1, 2, np.inf)], [0.5, 1.5, 2.5])
+    (array([0., 1., 2.]), array([ 1.,  2., inf]))
+    """
+    lb, ub = [np.asarray(b, dtype=float) for b in bounds]
+    if lb.ndim == 0:
+        lb = np.resize(lb, x0.shape)
+
+    if ub.ndim == 0:
+        ub = np.resize(ub, x0.shape)
+
+    return lb, ub
+
+
+def group_columns(A, order=0):
+    """Group columns of a 2-D matrix for sparse finite differencing [1]_.
+
+    Two columns are in the same group if in each row at least one of them
+    has zero. A greedy sequential algorithm is used to construct groups.
+
+    Parameters
+    ----------
+    A : array_like or sparse matrix, shape (m, n)
+        Matrix of which to group columns.
+    order : int, iterable of int with shape (n,) or None
+        Permutation array which defines the order of columns enumeration.
+        If int or None, a random permutation is used with `order` used as
+        a random seed. Default is 0, that is use a random permutation but
+        guarantee repeatability.
+
+    Returns
+    -------
+    groups : ndarray of int, shape (n,)
+        Contains values from 0 to n_groups-1, where n_groups is the number
+        of found groups. Each value ``groups[i]`` is an index of a group to
+        which ith column assigned. The procedure was helpful only if
+        n_groups is significantly less than n.
+
+    References
+    ----------
+    .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+           sparse Jacobian matrices", Journal of the Institute of Mathematics
+           and its Applications, 13 (1974), pp. 117-120.
+    """
+    if issparse(A):
+        A = csc_matrix(A)
+    else:
+        A = np.atleast_2d(A)
+        A = (A != 0).astype(np.int32)
+
+    if A.ndim != 2:
+        raise ValueError("`A` must be 2-dimensional.")
+
+    m, n = A.shape
+
+    if order is None or np.isscalar(order):
+        rng = np.random.RandomState(order)
+        order = rng.permutation(n)
+    else:
+        order = np.asarray(order)
+        if order.shape != (n,):
+            raise ValueError("`order` has incorrect shape.")
+
+    A = A[:, order]
+
+    if issparse(A):
+        groups = group_sparse(m, n, A.indices, A.indptr)
+    else:
+        groups = group_dense(m, n, A)
+
+    groups[order] = groups.copy()
+
+    return groups
+
+
+def approx_derivative(fun, x0, method='3-point', rel_step=None, abs_step=None,
+                      f0=None, bounds=(-np.inf, np.inf), sparsity=None,
+                      as_linear_operator=False, args=(), kwargs={}):
+    """Compute finite difference approximation of the derivatives of a
+    vector-valued function.
+
+    If a function maps from R^n to R^m, its derivatives form m-by-n matrix
+    called the Jacobian, where an element (i, j) is a partial derivative of
+    f[i] with respect to x[j].
+
+    Parameters
+    ----------
+    fun : callable
+        Function of which to estimate the derivatives. The argument x
+        passed to this function is ndarray of shape (n,) (never a scalar
+        even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
+    x0 : array_like of shape (n,) or float
+        Point at which to estimate the derivatives. Float will be converted
+        to a 1-D array.
+    method : {'3-point', '2-point', 'cs'}, optional
+        Finite difference method to use:
+            - '2-point' - use the first order accuracy forward or backward
+                          difference.
+            - '3-point' - use central difference in interior points and the
+                          second order accuracy forward or backward difference
+                          near the boundary.
+            - 'cs' - use a complex-step finite difference scheme. This assumes
+                     that the user function is real-valued and can be
+                     analytically continued to the complex plane. Otherwise,
+                     produces bogus results.
+    rel_step : None or array_like, optional
+        Relative step size to use. If None (default) the absolute step size is
+        computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, with
+        `rel_step` being selected automatically, see Notes. Otherwise
+        ``h = rel_step * sign(x0) * abs(x0)``. For ``method='3-point'`` the
+        sign of `h` is ignored. The calculated step size is possibly adjusted
+        to fit into the bounds.
+    abs_step : array_like, optional
+        Absolute step size to use, possibly adjusted to fit into the bounds.
+        For ``method='3-point'`` the sign of `abs_step` is ignored. By default
+        relative steps are used, only if ``abs_step is not None`` are absolute
+        steps used.
+    f0 : None or array_like, optional
+        If not None it is assumed to be equal to ``fun(x0)``, in this case
+        the ``fun(x0)`` is not called. Default is None.
+    bounds : tuple of array_like, optional
+        Lower and upper bounds on independent variables. Defaults to no bounds.
+        Each bound must match the size of `x0` or be a scalar, in the latter
+        case the bound will be the same for all variables. Use it to limit the
+        range of function evaluation. Bounds checking is not implemented
+        when `as_linear_operator` is True.
+    sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
+        Defines a sparsity structure of the Jacobian matrix. If the Jacobian
+        matrix is known to have only few non-zero elements in each row, then
+        it's possible to estimate its several columns by a single function
+        evaluation [3]_. To perform such economic computations two ingredients
+        are required:
+
+        * structure : array_like or sparse matrix of shape (m, n). A zero
+          element means that a corresponding element of the Jacobian
+          identically equals to zero.
+        * groups : array_like of shape (n,). A column grouping for a given
+          sparsity structure, use `group_columns` to obtain it.
+
+        A single array or a sparse matrix is interpreted as a sparsity
+        structure, and groups are computed inside the function. A tuple is
+        interpreted as (structure, groups). If None (default), a standard
+        dense differencing will be used.
+
+        Note, that sparse differencing makes sense only for large Jacobian
+        matrices where each row contains few non-zero elements.
+    as_linear_operator : bool, optional
+        When True the function returns an `scipy.sparse.linalg.LinearOperator`.
+        Otherwise it returns a dense array or a sparse matrix depending on
+        `sparsity`. The linear operator provides an efficient way of computing
+        ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
+        direct access to individual elements of the matrix. By default
+        `as_linear_operator` is False.
+    args, kwargs : tuple and dict, optional
+        Additional arguments passed to `fun`. Both empty by default.
+        The calling signature is ``fun(x, *args, **kwargs)``.
+
+    Returns
+    -------
+    J : {ndarray, sparse matrix, LinearOperator}
+        Finite difference approximation of the Jacobian matrix.
+        If `as_linear_operator` is True returns a LinearOperator
+        with shape (m, n). Otherwise it returns a dense array or sparse
+        matrix depending on how `sparsity` is defined. If `sparsity`
+        is None then a ndarray with shape (m, n) is returned. If
+        `sparsity` is not None returns a csr_matrix with shape (m, n).
+        For sparse matrices and linear operators it is always returned as
+        a 2-D structure, for ndarrays, if m=1 it is returned
+        as a 1-D gradient array with shape (n,).
+
+    See Also
+    --------
+    check_derivative : Check correctness of a function computing derivatives.
+
+    Notes
+    -----
+    If `rel_step` is not provided, it assigned as ``EPS**(1/s)``, where EPS is
+    determined from the smallest floating point dtype of `x0` or `fun(x0)`,
+    ``np.finfo(x0.dtype).eps``, s=2 for '2-point' method and
+    s=3 for '3-point' method. Such relative step approximately minimizes a sum
+    of truncation and round-off errors, see [1]_. Relative steps are used by
+    default. However, absolute steps are used when ``abs_step is not None``.
+    If any of the absolute or relative steps produces an indistinguishable
+    difference from the original `x0`, ``(x0 + dx) - x0 == 0``, then a
+    automatic step size is substituted for that particular entry.
+
+    A finite difference scheme for '3-point' method is selected automatically.
+    The well-known central difference scheme is used for points sufficiently
+    far from the boundary, and 3-point forward or backward scheme is used for
+    points near the boundary. Both schemes have the second-order accuracy in
+    terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
+    forward and backward difference schemes.
+
+    For dense differencing when m=1 Jacobian is returned with a shape (n,),
+    on the other hand when n=1 Jacobian is returned with a shape (m, 1).
+    Our motivation is the following: a) It handles a case of gradient
+    computation (m=1) in a conventional way. b) It clearly separates these two
+    different cases. b) In all cases np.atleast_2d can be called to get 2-D
+    Jacobian with correct dimensions.
+
+    References
+    ----------
+    .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
+           Computing. 3rd edition", sec. 5.7.
+
+    .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
+           sparse Jacobian matrices", Journal of the Institute of Mathematics
+           and its Applications, 13 (1974), pp. 117-120.
+
+    .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
+           Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize._numdiff import approx_derivative
+    >>>
+    >>> def f(x, c1, c2):
+    ...     return np.array([x[0] * np.sin(c1 * x[1]),
+    ...                      x[0] * np.cos(c2 * x[1])])
+    ...
+    >>> x0 = np.array([1.0, 0.5 * np.pi])
+    >>> approx_derivative(f, x0, args=(1, 2))
+    array([[ 1.,  0.],
+           [-1.,  0.]])
+
+    Bounds can be used to limit the region of function evaluation.
+    In the example below we compute left and right derivative at point 1.0.
+
+    >>> def g(x):
+    ...     return x**2 if x >= 1 else x
+    ...
+    >>> x0 = 1.0
+    >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
+    array([ 1.])
+    >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
+    array([ 2.])
+    """
+    if method not in ['2-point', '3-point', 'cs']:
+        raise ValueError("Unknown method '%s'. " % method)
+
+    x0 = np.atleast_1d(x0)
+    if x0.ndim > 1:
+        raise ValueError("`x0` must have at most 1 dimension.")
+
+    lb, ub = _prepare_bounds(bounds, x0)
+
+    if lb.shape != x0.shape or ub.shape != x0.shape:
+        raise ValueError("Inconsistent shapes between bounds and `x0`.")
+
+    if as_linear_operator and not (np.all(np.isinf(lb))
+                                   and np.all(np.isinf(ub))):
+        raise ValueError("Bounds not supported when "
+                         "`as_linear_operator` is True.")
+
+    def fun_wrapped(x):
+        f = np.atleast_1d(fun(x, *args, **kwargs))
+        if f.ndim > 1:
+            raise RuntimeError("`fun` return value has "
+                               "more than 1 dimension.")
+        return f
+
+    if f0 is None:
+        f0 = fun_wrapped(x0)
+    else:
+        f0 = np.atleast_1d(f0)
+        if f0.ndim > 1:
+            raise ValueError("`f0` passed has more than 1 dimension.")
+
+    if np.any((x0 < lb) | (x0 > ub)):
+        raise ValueError("`x0` violates bound constraints.")
+
+    if as_linear_operator:
+        if rel_step is None:
+            rel_step = _eps_for_method(x0.dtype, f0.dtype, method)
+
+        return _linear_operator_difference(fun_wrapped, x0,
+                                           f0, rel_step, method)
+    else:
+        # by default we use rel_step
+        if abs_step is None:
+            h = _compute_absolute_step(rel_step, x0, f0, method)
+        else:
+            # user specifies an absolute step
+            sign_x0 = (x0 >= 0).astype(float) * 2 - 1
+            h = abs_step
+
+            # cannot have a zero step. This might happen if x0 is very large
+            # or small. In which case fall back to relative step.
+            dx = ((x0 + h) - x0)
+            h = np.where(dx == 0,
+                         _eps_for_method(x0.dtype, f0.dtype, method) *
+                         sign_x0 * np.maximum(1.0, np.abs(x0)),
+                         h)
+
+        if method == '2-point':
+            h, use_one_sided = _adjust_scheme_to_bounds(
+                x0, h, 1, '1-sided', lb, ub)
+        elif method == '3-point':
+            h, use_one_sided = _adjust_scheme_to_bounds(
+                x0, h, 1, '2-sided', lb, ub)
+        elif method == 'cs':
+            use_one_sided = False
+
+        if sparsity is None:
+            return _dense_difference(fun_wrapped, x0, f0, h,
+                                     use_one_sided, method)
+        else:
+            if not issparse(sparsity) and len(sparsity) == 2:
+                structure, groups = sparsity
+            else:
+                structure = sparsity
+                groups = group_columns(sparsity)
+
+            if issparse(structure):
+                structure = csc_matrix(structure)
+            else:
+                structure = np.atleast_2d(structure)
+
+            groups = np.atleast_1d(groups)
+            return _sparse_difference(fun_wrapped, x0, f0, h,
+                                      use_one_sided, structure,
+                                      groups, method)
+
+
+def _linear_operator_difference(fun, x0, f0, h, method):
+    m = f0.size
+    n = x0.size
+
+    if method == '2-point':
+        def matvec(p):
+            if np.array_equal(p, np.zeros_like(p)):
+                return np.zeros(m)
+            dx = h / norm(p)
+            x = x0 + dx*p
+            df = fun(x) - f0
+            return df / dx
+
+    elif method == '3-point':
+        def matvec(p):
+            if np.array_equal(p, np.zeros_like(p)):
+                return np.zeros(m)
+            dx = 2*h / norm(p)
+            x1 = x0 - (dx/2)*p
+            x2 = x0 + (dx/2)*p
+            f1 = fun(x1)
+            f2 = fun(x2)
+            df = f2 - f1
+            return df / dx
+
+    elif method == 'cs':
+        def matvec(p):
+            if np.array_equal(p, np.zeros_like(p)):
+                return np.zeros(m)
+            dx = h / norm(p)
+            x = x0 + dx*p*1.j
+            f1 = fun(x)
+            df = f1.imag
+            return df / dx
+
+    else:
+        raise RuntimeError("Never be here.")
+
+    return LinearOperator((m, n), matvec)
+
+
+def _dense_difference(fun, x0, f0, h, use_one_sided, method):
+    m = f0.size
+    n = x0.size
+    J_transposed = np.empty((n, m))
+    h_vecs = np.diag(h)
+
+    for i in range(h.size):
+        if method == '2-point':
+            x = x0 + h_vecs[i]
+            dx = x[i] - x0[i]  # Recompute dx as exactly representable number.
+            df = fun(x) - f0
+        elif method == '3-point' and use_one_sided[i]:
+            x1 = x0 + h_vecs[i]
+            x2 = x0 + 2 * h_vecs[i]
+            dx = x2[i] - x0[i]
+            f1 = fun(x1)
+            f2 = fun(x2)
+            df = -3.0 * f0 + 4 * f1 - f2
+        elif method == '3-point' and not use_one_sided[i]:
+            x1 = x0 - h_vecs[i]
+            x2 = x0 + h_vecs[i]
+            dx = x2[i] - x1[i]
+            f1 = fun(x1)
+            f2 = fun(x2)
+            df = f2 - f1
+        elif method == 'cs':
+            f1 = fun(x0 + h_vecs[i]*1.j)
+            df = f1.imag
+            dx = h_vecs[i, i]
+        else:
+            raise RuntimeError("Never be here.")
+
+        J_transposed[i] = df / dx
+
+    if m == 1:
+        J_transposed = np.ravel(J_transposed)
+
+    return J_transposed.T
+
+
+def _sparse_difference(fun, x0, f0, h, use_one_sided,
+                       structure, groups, method):
+    m = f0.size
+    n = x0.size
+    row_indices = []
+    col_indices = []
+    fractions = []
+
+    n_groups = np.max(groups) + 1
+    for group in range(n_groups):
+        # Perturb variables which are in the same group simultaneously.
+        e = np.equal(group, groups)
+        h_vec = h * e
+        if method == '2-point':
+            x = x0 + h_vec
+            dx = x - x0
+            df = fun(x) - f0
+            # The result is  written to columns which correspond to perturbed
+            # variables.
+            cols, = np.nonzero(e)
+            # Find all non-zero elements in selected columns of Jacobian.
+            i, j, _ = find(structure[:, cols])
+            # Restore column indices in the full array.
+            j = cols[j]
+        elif method == '3-point':
+            # Here we do conceptually the same but separate one-sided
+            # and two-sided schemes.
+            x1 = x0.copy()
+            x2 = x0.copy()
+
+            mask_1 = use_one_sided & e
+            x1[mask_1] += h_vec[mask_1]
+            x2[mask_1] += 2 * h_vec[mask_1]
+
+            mask_2 = ~use_one_sided & e
+            x1[mask_2] -= h_vec[mask_2]
+            x2[mask_2] += h_vec[mask_2]
+
+            dx = np.zeros(n)
+            dx[mask_1] = x2[mask_1] - x0[mask_1]
+            dx[mask_2] = x2[mask_2] - x1[mask_2]
+
+            f1 = fun(x1)
+            f2 = fun(x2)
+
+            cols, = np.nonzero(e)
+            i, j, _ = find(structure[:, cols])
+            j = cols[j]
+
+            mask = use_one_sided[j]
+            df = np.empty(m)
+
+            rows = i[mask]
+            df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
+
+            rows = i[~mask]
+            df[rows] = f2[rows] - f1[rows]
+        elif method == 'cs':
+            f1 = fun(x0 + h_vec*1.j)
+            df = f1.imag
+            dx = h_vec
+            cols, = np.nonzero(e)
+            i, j, _ = find(structure[:, cols])
+            j = cols[j]
+        else:
+            raise ValueError("Never be here.")
+
+        # All that's left is to compute the fraction. We store i, j and
+        # fractions as separate arrays and later construct coo_matrix.
+        row_indices.append(i)
+        col_indices.append(j)
+        fractions.append(df[i] / dx[j])
+
+    row_indices = np.hstack(row_indices)
+    col_indices = np.hstack(col_indices)
+    fractions = np.hstack(fractions)
+    J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
+    return csr_matrix(J)
+
+
+def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
+                     kwargs={}):
+    """Check correctness of a function computing derivatives (Jacobian or
+    gradient) by comparison with a finite difference approximation.
+
+    Parameters
+    ----------
+    fun : callable
+        Function of which to estimate the derivatives. The argument x
+        passed to this function is ndarray of shape (n,) (never a scalar
+        even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
+    jac : callable
+        Function which computes Jacobian matrix of `fun`. It must work with
+        argument x the same way as `fun`. The return value must be array_like
+        or sparse matrix with an appropriate shape.
+    x0 : array_like of shape (n,) or float
+        Point at which to estimate the derivatives. Float will be converted
+        to 1-D array.
+    bounds : 2-tuple of array_like, optional
+        Lower and upper bounds on independent variables. Defaults to no bounds.
+        Each bound must match the size of `x0` or be a scalar, in the latter
+        case the bound will be the same for all variables. Use it to limit the
+        range of function evaluation.
+    args, kwargs : tuple and dict, optional
+        Additional arguments passed to `fun` and `jac`. Both empty by default.
+        The calling signature is ``fun(x, *args, **kwargs)`` and the same
+        for `jac`.
+
+    Returns
+    -------
+    accuracy : float
+        The maximum among all relative errors for elements with absolute values
+        higher than 1 and absolute errors for elements with absolute values
+        less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
+        then it is likely that your `jac` implementation is correct.
+
+    See Also
+    --------
+    approx_derivative : Compute finite difference approximation of derivative.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize._numdiff import check_derivative
+    >>>
+    >>>
+    >>> def f(x, c1, c2):
+    ...     return np.array([x[0] * np.sin(c1 * x[1]),
+    ...                      x[0] * np.cos(c2 * x[1])])
+    ...
+    >>> def jac(x, c1, c2):
+    ...     return np.array([
+    ...         [np.sin(c1 * x[1]),  c1 * x[0] * np.cos(c1 * x[1])],
+    ...         [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
+    ...     ])
+    ...
+    >>>
+    >>> x0 = np.array([1.0, 0.5 * np.pi])
+    >>> check_derivative(f, jac, x0, args=(1, 2))
+    2.4492935982947064e-16
+    """
+    J_to_test = jac(x0, *args, **kwargs)
+    if issparse(J_to_test):
+        J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
+                                   args=args, kwargs=kwargs)
+        J_to_test = csr_matrix(J_to_test)
+        abs_err = J_to_test - J_diff
+        i, j, abs_err_data = find(abs_err)
+        J_diff_data = np.asarray(J_diff[i, j]).ravel()
+        return np.max(np.abs(abs_err_data) /
+                      np.maximum(1, np.abs(J_diff_data)))
+    else:
+        J_diff = approx_derivative(fun, x0, bounds=bounds,
+                                   args=args, kwargs=kwargs)
+        abs_err = np.abs(J_to_test - J_diff)
+        return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_optimize.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_optimize.py
new file mode 100644
index 00000000..0a685aec
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_optimize.py
@@ -0,0 +1,3951 @@
+#__docformat__ = "restructuredtext en"
+# ******NOTICE***************
+# optimize.py module by Travis E. Oliphant
+#
+# You may copy and use this module as you see fit with no
+# guarantee implied provided you keep this notice in all copies.
+# *****END NOTICE************
+
+# A collection of optimization algorithms. Version 0.5
+# CHANGES
+#  Added fminbound (July 2001)
+#  Added brute (Aug. 2002)
+#  Finished line search satisfying strong Wolfe conditions (Mar. 2004)
+#  Updated strong Wolfe conditions line search to use
+#  cubic-interpolation (Mar. 2004)
+
+
+# Minimization routines
+
+__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
+           'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
+           'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
+           'line_search', 'check_grad', 'OptimizeResult', 'show_options',
+           'OptimizeWarning']
+
+__docformat__ = "restructuredtext en"
+
+import warnings
+import sys
+from numpy import (atleast_1d, eye, argmin, zeros, shape, squeeze,
+                   asarray, sqrt, Inf, asfarray)
+import numpy as np
+from scipy.sparse.linalg import LinearOperator
+from ._linesearch import (line_search_wolfe1, line_search_wolfe2,
+                          line_search_wolfe2 as line_search,
+                          LineSearchWarning)
+from ._numdiff import approx_derivative
+from ._hessian_update_strategy import HessianUpdateStrategy
+from scipy._lib._util import getfullargspec_no_self as _getfullargspec
+from scipy._lib._util import MapWrapper, check_random_state
+from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS
+
+
+# standard status messages of optimizers
+_status_message = {'success': 'Optimization terminated successfully.',
+                   'maxfev': 'Maximum number of function evaluations has '
+                              'been exceeded.',
+                   'maxiter': 'Maximum number of iterations has been '
+                              'exceeded.',
+                   'pr_loss': 'Desired error not necessarily achieved due '
+                              'to precision loss.',
+                   'nan': 'NaN result encountered.',
+                   'out_of_bounds': 'The result is outside of the provided '
+                                    'bounds.'}
+
+
+class MemoizeJac:
+    """ Decorator that caches the return values of a function returning `(fun, grad)`
+        each time it is called. """
+
+    def __init__(self, fun):
+        self.fun = fun
+        self.jac = None
+        self._value = None
+        self.x = None
+
+    def _compute_if_needed(self, x, *args):
+        if not np.all(x == self.x) or self._value is None or self.jac is None:
+            self.x = np.asarray(x).copy()
+            fg = self.fun(x, *args)
+            self.jac = fg[1]
+            self._value = fg[0]
+
+    def __call__(self, x, *args):
+        """ returns the function value """
+        self._compute_if_needed(x, *args)
+        return self._value
+
+    def derivative(self, x, *args):
+        self._compute_if_needed(x, *args)
+        return self.jac
+
+
+def _indenter(s, n=0):
+    """
+    Ensures that lines after the first are indented by the specified amount
+    """
+    split = s.split("\n")
+    indent = " "*n
+    return ("\n" + indent).join(split)
+
+
+def _float_formatter_10(x):
+    """
+    Returns a string representation of a float with exactly ten characters
+    """
+    if np.isposinf(x):
+        return "       inf"
+    elif np.isneginf(x):
+        return "      -inf"
+    elif np.isnan(x):
+        return "       nan"
+    return np.format_float_scientific(x, precision=3, pad_left=2, unique=False)
+
+
+def _dict_formatter(d, n=0, mplus=1, sorter=None):
+    """
+    Pretty printer for dictionaries
+
+    `n` keeps track of the starting indentation;
+    lines are indented by this much after a line break.
+    `mplus` is additional left padding applied to keys
+    """
+    if isinstance(d, dict):
+        m = max(map(len, list(d.keys()))) + mplus  # width to print keys
+        s = '\n'.join([k.rjust(m) + ': ' +  # right justified, width m
+                       _indenter(_dict_formatter(v, m+n+2, 0, sorter), m+2)
+                       for k, v in sorter(d)])  # +2 for ': '
+    else:
+        # By default, NumPy arrays print with linewidth=76. `n` is
+        # the indent at which a line begins printing, so it is subtracted
+        # from the default to avoid exceeding 76 characters total.
+        # `edgeitems` is the number of elements to include before and after
+        # ellipses when arrays are not shown in full.
+        # `threshold` is the maximum number of elements for which an
+        # array is shown in full.
+        # These values tend to work well for use with OptimizeResult.
+        with np.printoptions(linewidth=76-n, edgeitems=2, threshold=12,
+                             formatter={'float_kind': _float_formatter_10}):
+            s = str(d)
+    return s
+
+
+class OptimizeResult(dict):
+    """ Represents the optimization result.
+
+    Attributes
+    ----------
+    x : ndarray
+        The solution of the optimization.
+    success : bool
+        Whether or not the optimizer exited successfully.
+    status : int
+        Termination status of the optimizer. Its value depends on the
+        underlying solver. Refer to `message` for details.
+    message : str
+        Description of the cause of the termination.
+    fun, jac, hess: ndarray
+        Values of objective function, its Jacobian and its Hessian (if
+        available). The Hessians may be approximations, see the documentation
+        of the function in question.
+    hess_inv : object
+        Inverse of the objective function's Hessian; may be an approximation.
+        Not available for all solvers. The type of this attribute may be
+        either np.ndarray or scipy.sparse.linalg.LinearOperator.
+    nfev, njev, nhev : int
+        Number of evaluations of the objective functions and of its
+        Jacobian and Hessian.
+    nit : int
+        Number of iterations performed by the optimizer.
+    maxcv : float
+        The maximum constraint violation.
+
+    Notes
+    -----
+    `OptimizeResult` may have additional attributes not listed here depending
+    on the specific solver being used. Since this class is essentially a
+    subclass of dict with attribute accessors, one can see which
+    attributes are available using the `OptimizeResult.keys` method.
+    """
+
+    def __getattr__(self, name):
+        try:
+            return self[name]
+        except KeyError as e:
+            raise AttributeError(name) from e
+
+    __setattr__ = dict.__setitem__
+    __delattr__ = dict.__delitem__
+
+    def __repr__(self):
+        order_keys = ['message', 'success', 'status', 'fun', 'funl', 'x', 'xl',
+                      'col_ind', 'nit', 'lower', 'upper', 'eqlin', 'ineqlin']
+        # 'slack', 'con' are redundant with residuals
+        # 'crossover_nit' is probably not interesting to most users
+        omit_keys = {'slack', 'con', 'crossover_nit'}
+
+        def key(item):
+            try:
+                return order_keys.index(item[0].lower())
+            except ValueError:  # item not in list
+                return np.inf
+
+        def omit_redundant(items):
+            for item in items:
+                if item[0] in omit_keys:
+                    continue
+                yield item
+
+        def item_sorter(d):
+            return sorted(omit_redundant(d.items()), key=key)
+
+        if self.keys():
+            return _dict_formatter(self, sorter=item_sorter)
+        else:
+            return self.__class__.__name__ + "()"
+
+    def __dir__(self):
+        return list(self.keys())
+
+
+class OptimizeWarning(UserWarning):
+    pass
+
+
+def _check_unknown_options(unknown_options):
+    if unknown_options:
+        msg = ", ".join(map(str, unknown_options.keys()))
+        # Stack level 4: this is called from _minimize_*, which is
+        # called from another function in SciPy. Level 4 is the first
+        # level in user code.
+        warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
+
+
+def is_finite_scalar(x):
+    """Test whether `x` is either a finite scalar or a finite array scalar.
+
+    """
+    return np.size(x) == 1 and np.isfinite(x)
+
+
+_epsilon = sqrt(np.finfo(float).eps)
+
+
+def vecnorm(x, ord=2):
+    if ord == Inf:
+        return np.amax(np.abs(x))
+    elif ord == -Inf:
+        return np.amin(np.abs(x))
+    else:
+        return np.sum(np.abs(x)**ord, axis=0)**(1.0 / ord)
+
+
+def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None,
+                             epsilon=None, finite_diff_rel_step=None,
+                             hess=None):
+    """
+    Creates a ScalarFunction object for use with scalar minimizers
+    (BFGS/LBFGSB/SLSQP/TNC/CG/etc).
+
+    Parameters
+    ----------
+    fun : callable
+        The objective function to be minimized.
+
+            ``fun(x, *args) -> float``
+
+        where ``x`` is an 1-D array with shape (n,) and ``args``
+        is a tuple of the fixed parameters needed to completely
+        specify the function.
+    x0 : ndarray, shape (n,)
+        Initial guess. Array of real elements of size (n,),
+        where 'n' is the number of independent variables.
+    jac : {callable,  '2-point', '3-point', 'cs', None}, optional
+        Method for computing the gradient vector. If it is a callable, it
+        should be a function that returns the gradient vector:
+
+            ``jac(x, *args) -> array_like, shape (n,)``
+
+        If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient
+        is calculated with a relative step for finite differences. If `None`,
+        then two-point finite differences with an absolute step is used.
+    args : tuple, optional
+        Extra arguments passed to the objective function and its
+        derivatives (`fun`, `jac` functions).
+    bounds : sequence, optional
+        Bounds on variables. 'new-style' bounds are required.
+    eps : float or ndarray
+        If `jac is None` the absolute step size used for numerical
+        approximation of the jacobian via forward differences.
+    finite_diff_rel_step : None or array_like, optional
+        If `jac in ['2-point', '3-point', 'cs']` the relative step size to
+        use for numerical approximation of the jacobian. The absolute step
+        size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
+        possibly adjusted to fit into the bounds. For ``method='3-point'``
+        the sign of `h` is ignored. If None (default) then step is selected
+        automatically.
+    hess : {callable,  '2-point', '3-point', 'cs', None}
+        Computes the Hessian matrix. If it is callable, it should return the
+        Hessian matrix:
+
+            ``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
+
+        Alternatively, the keywords {'2-point', '3-point', 'cs'} select a
+        finite difference scheme for numerical estimation.
+        Whenever the gradient is estimated via finite-differences, the Hessian
+        cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
+        to be estimated using one of the quasi-Newton strategies.
+
+    Returns
+    -------
+    sf : ScalarFunction
+    """
+    if callable(jac):
+        grad = jac
+    elif jac in FD_METHODS:
+        # epsilon is set to None so that ScalarFunction is made to use
+        # rel_step
+        epsilon = None
+        grad = jac
+    else:
+        # default (jac is None) is to do 2-point finite differences with
+        # absolute step size. ScalarFunction has to be provided an
+        # epsilon value that is not None to use absolute steps. This is
+        # normally the case from most _minimize* methods.
+        grad = '2-point'
+        epsilon = epsilon
+
+    if hess is None:
+        # ScalarFunction requires something for hess, so we give a dummy
+        # implementation here if nothing is provided, return a value of None
+        # so that downstream minimisers halt. The results of `fun.hess`
+        # should not be used.
+        def hess(x, *args):
+            return None
+
+    if bounds is None:
+        bounds = (-np.inf, np.inf)
+
+    # ScalarFunction caches. Reuse of fun(x) during grad
+    # calculation reduces overall function evaluations.
+    sf = ScalarFunction(fun, x0, args, grad, hess,
+                        finite_diff_rel_step, bounds, epsilon=epsilon)
+
+    return sf
+
+
+def _clip_x_for_func(func, bounds):
+    # ensures that x values sent to func are clipped to bounds
+
+    # this is used as a mitigation for gh11403, slsqp/tnc sometimes
+    # suggest a move that is outside the limits by 1 or 2 ULP. This
+    # unclean fix makes sure x is strictly within bounds.
+    def eval(x):
+        x = _check_clip_x(x, bounds)
+        return func(x)
+
+    return eval
+
+
+def _check_clip_x(x, bounds):
+    if (x < bounds[0]).any() or (x > bounds[1]).any():
+        warnings.warn("Values in x were outside bounds during a "
+                      "minimize step, clipping to bounds", RuntimeWarning)
+        x = np.clip(x, bounds[0], bounds[1])
+        return x
+
+    return x
+
+
+def rosen(x):
+    """
+    The Rosenbrock function.
+
+    The function computed is::
+
+        sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
+
+    Parameters
+    ----------
+    x : array_like
+        1-D array of points at which the Rosenbrock function is to be computed.
+
+    Returns
+    -------
+    f : float
+        The value of the Rosenbrock function.
+
+    See Also
+    --------
+    rosen_der, rosen_hess, rosen_hess_prod
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import rosen
+    >>> X = 0.1 * np.arange(10)
+    >>> rosen(X)
+    76.56
+
+    For higher-dimensional input ``rosen`` broadcasts.
+    In the following example, we use this to plot a 2D landscape.
+    Note that ``rosen_hess`` does not broadcast in this manner.
+
+    >>> import matplotlib.pyplot as plt
+    >>> from mpl_toolkits.mplot3d import Axes3D
+    >>> x = np.linspace(-1, 1, 50)
+    >>> X, Y = np.meshgrid(x, x)
+    >>> ax = plt.subplot(111, projection='3d')
+    >>> ax.plot_surface(X, Y, rosen([X, Y]))
+    >>> plt.show()
+    """
+    x = asarray(x)
+    r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
+                  axis=0)
+    return r
+
+
+def rosen_der(x):
+    """
+    The derivative (i.e. gradient) of the Rosenbrock function.
+
+    Parameters
+    ----------
+    x : array_like
+        1-D array of points at which the derivative is to be computed.
+
+    Returns
+    -------
+    rosen_der : (N,) ndarray
+        The gradient of the Rosenbrock function at `x`.
+
+    See Also
+    --------
+    rosen, rosen_hess, rosen_hess_prod
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import rosen_der
+    >>> X = 0.1 * np.arange(9)
+    >>> rosen_der(X)
+    array([ -2. ,  10.6,  15.6,  13.4,   6.4,  -3. , -12.4, -19.4,  62. ])
+
+    """
+    x = asarray(x)
+    xm = x[1:-1]
+    xm_m1 = x[:-2]
+    xm_p1 = x[2:]
+    der = np.zeros_like(x)
+    der[1:-1] = (200 * (xm - xm_m1**2) -
+                 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
+    der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
+    der[-1] = 200 * (x[-1] - x[-2]**2)
+    return der
+
+
+def rosen_hess(x):
+    """
+    The Hessian matrix of the Rosenbrock function.
+
+    Parameters
+    ----------
+    x : array_like
+        1-D array of points at which the Hessian matrix is to be computed.
+
+    Returns
+    -------
+    rosen_hess : ndarray
+        The Hessian matrix of the Rosenbrock function at `x`.
+
+    See Also
+    --------
+    rosen, rosen_der, rosen_hess_prod
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import rosen_hess
+    >>> X = 0.1 * np.arange(4)
+    >>> rosen_hess(X)
+    array([[-38.,   0.,   0.,   0.],
+           [  0., 134., -40.,   0.],
+           [  0., -40., 130., -80.],
+           [  0.,   0., -80., 200.]])
+
+    """
+    x = atleast_1d(x)
+    H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
+    diagonal = np.zeros(len(x), dtype=x.dtype)
+    diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
+    diagonal[-1] = 200
+    diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
+    H = H + np.diag(diagonal)
+    return H
+
+
+def rosen_hess_prod(x, p):
+    """
+    Product of the Hessian matrix of the Rosenbrock function with a vector.
+
+    Parameters
+    ----------
+    x : array_like
+        1-D array of points at which the Hessian matrix is to be computed.
+    p : array_like
+        1-D array, the vector to be multiplied by the Hessian matrix.
+
+    Returns
+    -------
+    rosen_hess_prod : ndarray
+        The Hessian matrix of the Rosenbrock function at `x` multiplied
+        by the vector `p`.
+
+    See Also
+    --------
+    rosen, rosen_der, rosen_hess
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import rosen_hess_prod
+    >>> X = 0.1 * np.arange(9)
+    >>> p = 0.5 * np.arange(9)
+    >>> rosen_hess_prod(X, p)
+    array([  -0.,   27.,  -10.,  -95., -192., -265., -278., -195., -180.])
+
+    """
+    x = atleast_1d(x)
+    Hp = np.zeros(len(x), dtype=x.dtype)
+    Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
+    Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
+                (202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
+                400 * x[1:-1] * p[2:])
+    Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
+    return Hp
+
+
+def _wrap_scalar_function(function, args):
+    # wraps a minimizer function to count number of evaluations
+    # and to easily provide an args kwd.
+    ncalls = [0]
+    if function is None:
+        return ncalls, None
+
+    def function_wrapper(x, *wrapper_args):
+        ncalls[0] += 1
+        # A copy of x is sent to the user function (gh13740)
+        fx = function(np.copy(x), *(wrapper_args + args))
+        # Ideally, we'd like to a have a true scalar returned from f(x). For
+        # backwards-compatibility, also allow np.array([1.3]), np.array([[1.3]]) etc.
+        if not np.isscalar(fx):
+            try:
+                fx = np.asarray(fx).item()
+            except (TypeError, ValueError) as e:
+                raise ValueError("The user-provided objective function "
+                                 "must return a scalar value.") from e
+        return fx
+
+    return ncalls, function_wrapper
+
+
+class _MaxFuncCallError(RuntimeError):
+    pass
+
+
+def _wrap_scalar_function_maxfun_validation(function, args, maxfun):
+    # wraps a minimizer function to count number of evaluations
+    # and to easily provide an args kwd.
+    ncalls = [0]
+    if function is None:
+        return ncalls, None
+
+    def function_wrapper(x, *wrapper_args):
+        if ncalls[0] >= maxfun:
+            raise _MaxFuncCallError("Too many function calls")
+        ncalls[0] += 1
+        # A copy of x is sent to the user function (gh13740)
+        fx = function(np.copy(x), *(wrapper_args + args))
+        # Ideally, we'd like to a have a true scalar returned from f(x). For
+        # backwards-compatibility, also allow np.array([1.3]),
+        # np.array([[1.3]]) etc.
+        if not np.isscalar(fx):
+            try:
+                fx = np.asarray(fx).item()
+            except (TypeError, ValueError) as e:
+                raise ValueError("The user-provided objective function "
+                                 "must return a scalar value.") from e
+        return fx
+
+    return ncalls, function_wrapper
+
+
+def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
+         full_output=0, disp=1, retall=0, callback=None, initial_simplex=None):
+    """
+    Minimize a function using the downhill simplex algorithm.
+
+    This algorithm only uses function values, not derivatives or second
+    derivatives.
+
+    Parameters
+    ----------
+    func : callable func(x,*args)
+        The objective function to be minimized.
+    x0 : ndarray
+        Initial guess.
+    args : tuple, optional
+        Extra arguments passed to func, i.e., ``f(x,*args)``.
+    xtol : float, optional
+        Absolute error in xopt between iterations that is acceptable for
+        convergence.
+    ftol : number, optional
+        Absolute error in func(xopt) between iterations that is acceptable for
+        convergence.
+    maxiter : int, optional
+        Maximum number of iterations to perform.
+    maxfun : number, optional
+        Maximum number of function evaluations to make.
+    full_output : bool, optional
+        Set to True if fopt and warnflag outputs are desired.
+    disp : bool, optional
+        Set to True to print convergence messages.
+    retall : bool, optional
+        Set to True to return list of solutions at each iteration.
+    callback : callable, optional
+        Called after each iteration, as callback(xk), where xk is the
+        current parameter vector.
+    initial_simplex : array_like of shape (N + 1, N), optional
+        Initial simplex. If given, overrides `x0`.
+        ``initial_simplex[j,:]`` should contain the coordinates of
+        the jth vertex of the ``N+1`` vertices in the simplex, where
+        ``N`` is the dimension.
+
+    Returns
+    -------
+    xopt : ndarray
+        Parameter that minimizes function.
+    fopt : float
+        Value of function at minimum: ``fopt = func(xopt)``.
+    iter : int
+        Number of iterations performed.
+    funcalls : int
+        Number of function calls made.
+    warnflag : int
+        1 : Maximum number of function evaluations made.
+        2 : Maximum number of iterations reached.
+    allvecs : list
+        Solution at each iteration.
+
+    See also
+    --------
+    minimize: Interface to minimization algorithms for multivariate
+        functions. See the 'Nelder-Mead' `method` in particular.
+
+    Notes
+    -----
+    Uses a Nelder-Mead simplex algorithm to find the minimum of function of
+    one or more variables.
+
+    This algorithm has a long history of successful use in applications.
+    But it will usually be slower than an algorithm that uses first or
+    second derivative information. In practice, it can have poor
+    performance in high-dimensional problems and is not robust to
+    minimizing complicated functions. Additionally, there currently is no
+    complete theory describing when the algorithm will successfully
+    converge to the minimum, or how fast it will if it does. Both the ftol and
+    xtol criteria must be met for convergence.
+
+    Examples
+    --------
+    >>> def f(x):
+    ...     return x**2
+
+    >>> from scipy import optimize
+
+    >>> minimum = optimize.fmin(f, 1)
+    Optimization terminated successfully.
+             Current function value: 0.000000
+             Iterations: 17
+             Function evaluations: 34
+    >>> minimum[0]
+    -8.8817841970012523e-16
+
+    References
+    ----------
+    .. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
+           minimization", The Computer Journal, 7, pp. 308-313
+
+    .. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
+           Respectable", in Numerical Analysis 1995, Proceedings of the
+           1995 Dundee Biennial Conference in Numerical Analysis, D.F.
+           Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
+           Harlow, UK, pp. 191-208.
+
+    """
+    opts = {'xatol': xtol,
+            'fatol': ftol,
+            'maxiter': maxiter,
+            'maxfev': maxfun,
+            'disp': disp,
+            'return_all': retall,
+            'initial_simplex': initial_simplex}
+
+    res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
+    if full_output:
+        retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
+        if retall:
+            retlist += (res['allvecs'], )
+        return retlist
+    else:
+        if retall:
+            return res['x'], res['allvecs']
+        else:
+            return res['x']
+
+
+def _minimize_neldermead(func, x0, args=(), callback=None,
+                         maxiter=None, maxfev=None, disp=False,
+                         return_all=False, initial_simplex=None,
+                         xatol=1e-4, fatol=1e-4, adaptive=False, bounds=None,
+                         **unknown_options):
+    """
+    Minimization of scalar function of one or more variables using the
+    Nelder-Mead algorithm.
+
+    Options
+    -------
+    disp : bool
+        Set to True to print convergence messages.
+    maxiter, maxfev : int
+        Maximum allowed number of iterations and function evaluations.
+        Will default to ``N*200``, where ``N`` is the number of
+        variables, if neither `maxiter` or `maxfev` is set. If both
+        `maxiter` and `maxfev` are set, minimization will stop at the
+        first reached.
+    return_all : bool, optional
+        Set to True to return a list of the best solution at each of the
+        iterations.
+    initial_simplex : array_like of shape (N + 1, N)
+        Initial simplex. If given, overrides `x0`.
+        ``initial_simplex[j,:]`` should contain the coordinates of
+        the jth vertex of the ``N+1`` vertices in the simplex, where
+        ``N`` is the dimension.
+    xatol : float, optional
+        Absolute error in xopt between iterations that is acceptable for
+        convergence.
+    fatol : number, optional
+        Absolute error in func(xopt) between iterations that is acceptable for
+        convergence.
+    adaptive : bool, optional
+        Adapt algorithm parameters to dimensionality of problem. Useful for
+        high-dimensional minimization [1]_.
+    bounds : sequence or `Bounds`, optional
+        Bounds on variables. There are two ways to specify the bounds:
+
+            1. Instance of `Bounds` class.
+            2. Sequence of ``(min, max)`` pairs for each element in `x`. None
+               is used to specify no bound.
+
+        Note that this just clips all vertices in simplex based on
+        the bounds.
+
+    References
+    ----------
+    .. [1] Gao, F. and Han, L.
+       Implementing the Nelder-Mead simplex algorithm with adaptive
+       parameters. 2012. Computational Optimization and Applications.
+       51:1, pp. 259-277
+
+    """
+    _check_unknown_options(unknown_options)
+    maxfun = maxfev
+    retall = return_all
+
+    x0 = asfarray(x0).flatten()
+
+    if adaptive:
+        dim = float(len(x0))
+        rho = 1
+        chi = 1 + 2/dim
+        psi = 0.75 - 1/(2*dim)
+        sigma = 1 - 1/dim
+    else:
+        rho = 1
+        chi = 2
+        psi = 0.5
+        sigma = 0.5
+
+    nonzdelt = 0.05
+    zdelt = 0.00025
+
+    if bounds is not None:
+        lower_bound, upper_bound = bounds.lb, bounds.ub
+        # check bounds
+        if (lower_bound > upper_bound).any():
+            raise ValueError("Nelder Mead - one of the lower bounds is greater than an upper bound.")
+        if np.any(lower_bound > x0) or np.any(x0 > upper_bound):
+            warnings.warn("Initial guess is not within the specified bounds",
+                          OptimizeWarning, 3)
+
+    if bounds is not None:
+        x0 = np.clip(x0, lower_bound, upper_bound)
+
+    if initial_simplex is None:
+        N = len(x0)
+
+        sim = np.empty((N + 1, N), dtype=x0.dtype)
+        sim[0] = x0
+        for k in range(N):
+            y = np.array(x0, copy=True)
+            if y[k] != 0:
+                y[k] = (1 + nonzdelt)*y[k]
+            else:
+                y[k] = zdelt
+            sim[k + 1] = y
+    else:
+        sim = np.asfarray(initial_simplex).copy()
+        if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
+            raise ValueError("`initial_simplex` should be an array of shape (N+1,N)")
+        if len(x0) != sim.shape[1]:
+            raise ValueError("Size of `initial_simplex` is not consistent with `x0`")
+        N = sim.shape[1]
+
+    if retall:
+        allvecs = [sim[0]]
+
+    # If neither are set, then set both to default
+    if maxiter is None and maxfun is None:
+        maxiter = N * 200
+        maxfun = N * 200
+    elif maxiter is None:
+        # Convert remaining Nones, to np.inf, unless the other is np.inf, in
+        # which case use the default to avoid unbounded iteration
+        if maxfun == np.inf:
+            maxiter = N * 200
+        else:
+            maxiter = np.inf
+    elif maxfun is None:
+        if maxiter == np.inf:
+            maxfun = N * 200
+        else:
+            maxfun = np.inf
+
+    if bounds is not None:
+        sim = np.clip(sim, lower_bound, upper_bound)
+
+    one2np1 = list(range(1, N + 1))
+    fsim = np.full((N + 1,), np.inf, dtype=float)
+
+    fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun)
+
+    try:
+        for k in range(N + 1):
+            fsim[k] = func(sim[k])
+    except _MaxFuncCallError:
+        pass
+    finally:
+        ind = np.argsort(fsim)
+        sim = np.take(sim, ind, 0)
+        fsim = np.take(fsim, ind, 0)
+
+    ind = np.argsort(fsim)
+    fsim = np.take(fsim, ind, 0)
+    # sort so sim[0,:] has the lowest function value
+    sim = np.take(sim, ind, 0)
+
+    iterations = 1
+
+    while (fcalls[0] < maxfun and iterations < maxiter):
+        try:
+            if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and
+                    np.max(np.abs(fsim[0] - fsim[1:])) <= fatol):
+                break
+
+            xbar = np.add.reduce(sim[:-1], 0) / N
+            xr = (1 + rho) * xbar - rho * sim[-1]
+            if bounds is not None:
+                xr = np.clip(xr, lower_bound, upper_bound)
+            fxr = func(xr)
+            doshrink = 0
+
+            if fxr < fsim[0]:
+                xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
+                if bounds is not None:
+                    xe = np.clip(xe, lower_bound, upper_bound)
+                fxe = func(xe)
+
+                if fxe < fxr:
+                    sim[-1] = xe
+                    fsim[-1] = fxe
+                else:
+                    sim[-1] = xr
+                    fsim[-1] = fxr
+            else:  # fsim[0] <= fxr
+                if fxr < fsim[-2]:
+                    sim[-1] = xr
+                    fsim[-1] = fxr
+                else:  # fxr >= fsim[-2]
+                    # Perform contraction
+                    if fxr < fsim[-1]:
+                        xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
+                        if bounds is not None:
+                            xc = np.clip(xc, lower_bound, upper_bound)
+                        fxc = func(xc)
+
+                        if fxc <= fxr:
+                            sim[-1] = xc
+                            fsim[-1] = fxc
+                        else:
+                            doshrink = 1
+                    else:
+                        # Perform an inside contraction
+                        xcc = (1 - psi) * xbar + psi * sim[-1]
+                        if bounds is not None:
+                            xcc = np.clip(xcc, lower_bound, upper_bound)
+                        fxcc = func(xcc)
+
+                        if fxcc < fsim[-1]:
+                            sim[-1] = xcc
+                            fsim[-1] = fxcc
+                        else:
+                            doshrink = 1
+
+                    if doshrink:
+                        for j in one2np1:
+                            sim[j] = sim[0] + sigma * (sim[j] - sim[0])
+                            if bounds is not None:
+                                sim[j] = np.clip(
+                                    sim[j], lower_bound, upper_bound)
+                            fsim[j] = func(sim[j])
+            iterations += 1
+        except _MaxFuncCallError:
+            pass
+        finally:
+            ind = np.argsort(fsim)
+            sim = np.take(sim, ind, 0)
+            fsim = np.take(fsim, ind, 0)
+            if callback is not None:
+                callback(sim[0])
+            if retall:
+                allvecs.append(sim[0])
+
+    x = sim[0]
+    fval = np.min(fsim)
+    warnflag = 0
+
+    if fcalls[0] >= maxfun:
+        warnflag = 1
+        msg = _status_message['maxfev']
+        if disp:
+            warnings.warn(msg, RuntimeWarning, 3)
+    elif iterations >= maxiter:
+        warnflag = 2
+        msg = _status_message['maxiter']
+        if disp:
+            warnings.warn(msg, RuntimeWarning, 3)
+    else:
+        msg = _status_message['success']
+        if disp:
+            print(msg)
+            print("         Current function value: %f" % fval)
+            print("         Iterations: %d" % iterations)
+            print("         Function evaluations: %d" % fcalls[0])
+
+    result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
+                            status=warnflag, success=(warnflag == 0),
+                            message=msg, x=x, final_simplex=(sim, fsim))
+    if retall:
+        result['allvecs'] = allvecs
+    return result
+
+
+def approx_fprime(xk, f, epsilon=_epsilon, *args):
+    """Finite difference approximation of the derivatives of a
+    scalar or vector-valued function.
+
+    If a function maps from :math:`R^n` to :math:`R^m`, its derivatives form
+    an m-by-n matrix
+    called the Jacobian, where an element :math:`(i, j)` is a partial
+    derivative of f[i] with respect to ``xk[j]``.
+
+    Parameters
+    ----------
+    xk : array_like
+        The coordinate vector at which to determine the gradient of `f`.
+    f : callable
+        Function of which to estimate the derivatives of. Has the signature
+        ``f(xk, *args)`` where `xk` is the argument in the form of a 1-D array
+        and `args` is a tuple of any additional fixed parameters needed to
+        completely specify the function. The argument `xk` passed to this
+        function is an ndarray of shape (n,) (never a scalar even if n=1).
+        It must return a 1-D array_like of shape (m,) or a scalar.
+
+        .. versionchanged:: 1.9.0
+            `f` is now able to return a 1-D array-like, with the :math:`(m, n)`
+            Jacobian being estimated.
+
+    epsilon : {float, array_like}, optional
+        Increment to `xk` to use for determining the function gradient.
+        If a scalar, uses the same finite difference delta for all partial
+        derivatives. If an array, should contain one value per element of
+        `xk`. Defaults to ``sqrt(np.finfo(float).eps)``, which is approximately
+        1.49e-08.
+    \\*args : args, optional
+        Any other arguments that are to be passed to `f`.
+
+    Returns
+    -------
+    jac : ndarray
+        The partial derivatives of `f` to `xk`.
+
+    See Also
+    --------
+    check_grad : Check correctness of gradient function against approx_fprime.
+
+    Notes
+    -----
+    The function gradient is determined by the forward finite difference
+    formula::
+
+                 f(xk[i] + epsilon[i]) - f(xk[i])
+        f'[i] = ---------------------------------
+                            epsilon[i]
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import optimize
+    >>> def func(x, c0, c1):
+    ...     "Coordinate vector `x` should be an array of size two."
+    ...     return c0 * x[0]**2 + c1*x[1]**2
+
+    >>> x = np.ones(2)
+    >>> c0, c1 = (1, 200)
+    >>> eps = np.sqrt(np.finfo(float).eps)
+    >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
+    array([   2.        ,  400.00004198])
+
+    """
+    xk = np.asarray(xk, float)
+    f0 = f(xk, *args)
+
+    return approx_derivative(f, xk, method='2-point', abs_step=epsilon,
+                             args=args, f0=f0)
+
+
+def check_grad(func, grad, x0, *args, epsilon=_epsilon,
+                direction='all', seed=None):
+    """Check the correctness of a gradient function by comparing it against a
+    (forward) finite-difference approximation of the gradient.
+
+    Parameters
+    ----------
+    func : callable ``func(x0, *args)``
+        Function whose derivative is to be checked.
+    grad : callable ``grad(x0, *args)``
+        Jacobian of `func`.
+    x0 : ndarray
+        Points to check `grad` against forward difference approximation of grad
+        using `func`.
+    args : \\*args, optional
+        Extra arguments passed to `func` and `grad`.
+    epsilon : float, optional
+        Step size used for the finite difference approximation. It defaults to
+        ``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08.
+    direction : str, optional
+        If set to ``'random'``, then gradients along a random vector
+        are used to check `grad` against forward difference approximation
+        using `func`. By default it is ``'all'``, in which case, all
+        the one hot direction vectors are considered to check `grad`.
+        If `func` is a vector valued function then only ``'all'`` can be used.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        Specify `seed` for reproducing the return value from this function.
+        The random numbers generated with this seed affect the random vector
+        along which gradients are computed to check ``grad``. Note that `seed`
+        is only used when `direction` argument is set to `'random'`.
+
+    Returns
+    -------
+    err : float
+        The square root of the sum of squares (i.e., the 2-norm) of the
+        difference between ``grad(x0, *args)`` and the finite difference
+        approximation of `grad` using func at the points `x0`.
+
+    See Also
+    --------
+    approx_fprime
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> def func(x):
+    ...     return x[0]**2 - 0.5 * x[1]**3
+    >>> def grad(x):
+    ...     return [2 * x[0], -1.5 * x[1]**2]
+    >>> from scipy.optimize import check_grad
+    >>> check_grad(func, grad, [1.5, -1.5])
+    2.9802322387695312e-08  # may vary
+    >>> rng = np.random.default_rng()
+    >>> check_grad(func, grad, [1.5, -1.5],
+    ...             direction='random', seed=rng)
+    2.9802322387695312e-08
+
+    """
+    step = epsilon
+    x0 = np.asarray(x0)
+
+    def g(w, func, x0, v, *args):
+        return func(x0 + w*v, *args)
+
+    if direction == 'random':
+        _grad = np.asanyarray(grad(x0, *args))
+        if _grad.ndim > 1:
+            raise ValueError("'random' can only be used with scalar valued"
+                             " func")
+        random_state = check_random_state(seed)
+        v = random_state.normal(0, 1, size=(x0.shape))
+        _args = (func, x0, v) + args
+        _func = g
+        vars = np.zeros((1,))
+        analytical_grad = np.dot(_grad, v)
+    elif direction == 'all':
+        _args = args
+        _func = func
+        vars = x0
+        analytical_grad = grad(x0, *args)
+    else:
+        raise ValueError("{} is not a valid string for "
+                         "``direction`` argument".format(direction))
+
+    return np.sqrt(np.sum(np.abs(
+        (analytical_grad - approx_fprime(vars, _func, step, *_args))**2
+    )))
+
+
+def approx_fhess_p(x0, p, fprime, epsilon, *args):
+    # calculate fprime(x0) first, as this may be cached by ScalarFunction
+    f1 = fprime(*((x0,) + args))
+    f2 = fprime(*((x0 + epsilon*p,) + args))
+    return (f2 - f1) / epsilon
+
+
+class _LineSearchError(RuntimeError):
+    pass
+
+
+def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
+                         **kwargs):
+    """
+    Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
+    suitable step length is not found, and raise an exception if a
+    suitable step length is not found.
+
+    Raises
+    ------
+    _LineSearchError
+        If no suitable step size is found
+
+    """
+
+    extra_condition = kwargs.pop('extra_condition', None)
+
+    ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
+                             old_fval, old_old_fval,
+                             **kwargs)
+
+    if ret[0] is not None and extra_condition is not None:
+        xp1 = xk + ret[0] * pk
+        if not extra_condition(ret[0], xp1, ret[3], ret[5]):
+            # Reject step if extra_condition fails
+            ret = (None,)
+
+    if ret[0] is None:
+        # line search failed: try different one.
+        with warnings.catch_warnings():
+            warnings.simplefilter('ignore', LineSearchWarning)
+            kwargs2 = {}
+            for key in ('c1', 'c2', 'amax'):
+                if key in kwargs:
+                    kwargs2[key] = kwargs[key]
+            ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
+                                     old_fval, old_old_fval,
+                                     extra_condition=extra_condition,
+                                     **kwargs2)
+
+    if ret[0] is None:
+        raise _LineSearchError()
+
+    return ret
+
+
+def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
+              epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
+              retall=0, callback=None, xrtol=0):
+    """
+    Minimize a function using the BFGS algorithm.
+
+    Parameters
+    ----------
+    f : callable ``f(x,*args)``
+        Objective function to be minimized.
+    x0 : ndarray
+        Initial guess.
+    fprime : callable ``f'(x,*args)``, optional
+        Gradient of f.
+    args : tuple, optional
+        Extra arguments passed to f and fprime.
+    gtol : float, optional
+        Terminate successfully if gradient norm is less than `gtol`
+    norm : float, optional
+        Order of norm (Inf is max, -Inf is min)
+    epsilon : int or ndarray, optional
+        If `fprime` is approximated, use this value for the step size.
+    callback : callable, optional
+        An optional user-supplied function to call after each
+        iteration. Called as ``callback(xk)``, where ``xk`` is the
+        current parameter vector.
+    maxiter : int, optional
+        Maximum number of iterations to perform.
+    full_output : bool, optional
+        If True, return ``fopt``, ``func_calls``, ``grad_calls``, and
+        ``warnflag`` in addition to ``xopt``.
+    disp : bool, optional
+        Print convergence message if True.
+    retall : bool, optional
+        Return a list of results at each iteration if True.
+    xrtol : float, default: 0
+        Relative tolerance for `x`. Terminate successfully if step
+        size is less than ``xk * xrtol`` where ``xk`` is the current
+        parameter vector.
+
+    Returns
+    -------
+    xopt : ndarray
+        Parameters which minimize f, i.e., ``f(xopt) == fopt``.
+    fopt : float
+        Minimum value.
+    gopt : ndarray
+        Value of gradient at minimum, f'(xopt), which should be near 0.
+    Bopt : ndarray
+        Value of 1/f''(xopt), i.e., the inverse Hessian matrix.
+    func_calls : int
+        Number of function_calls made.
+    grad_calls : int
+        Number of gradient calls made.
+    warnflag : integer
+        1 : Maximum number of iterations exceeded.
+        2 : Gradient and/or function calls not changing.
+        3 : NaN result encountered.
+    allvecs : list
+        The value of `xopt` at each iteration. Only returned if `retall` is
+        True.
+
+    Notes
+    -----
+    Optimize the function, `f`, whose gradient is given by `fprime`
+    using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
+    and Shanno (BFGS).
+
+    See Also
+    --------
+    minimize: Interface to minimization algorithms for multivariate
+        functions. See ``method='BFGS'`` in particular.
+
+    References
+    ----------
+    Wright, and Nocedal 'Numerical Optimization', 1999, p. 198.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import fmin_bfgs
+    >>> def quadratic_cost(x, Q):
+    ...     return x @ Q @ x
+    ...
+    >>> x0 = np.array([-3, -4])
+    >>> cost_weight =  np.diag([1., 10.])
+    >>> # Note that a trailing comma is necessary for a tuple with single element
+    >>> fmin_bfgs(quadratic_cost, x0, args=(cost_weight,))
+    Optimization terminated successfully.
+            Current function value: 0.000000
+            Iterations: 7                   # may vary
+            Function evaluations: 24        # may vary
+            Gradient evaluations: 8         # may vary
+    array([ 2.85169950e-06, -4.61820139e-07])
+
+    >>> def quadratic_cost_grad(x, Q):
+    ...     return 2 * Q @ x
+    ...
+    >>> fmin_bfgs(quadratic_cost, x0, quadratic_cost_grad, args=(cost_weight,))
+    Optimization terminated successfully.
+            Current function value: 0.000000
+            Iterations: 7
+            Function evaluations: 8
+            Gradient evaluations: 8
+    array([ 2.85916637e-06, -4.54371951e-07])
+
+    """
+    opts = {'gtol': gtol,
+            'norm': norm,
+            'eps': epsilon,
+            'disp': disp,
+            'maxiter': maxiter,
+            'return_all': retall}
+
+    res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
+
+    if full_output:
+        retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
+                   res['nfev'], res['njev'], res['status'])
+        if retall:
+            retlist += (res['allvecs'], )
+        return retlist
+    else:
+        if retall:
+            return res['x'], res['allvecs']
+        else:
+            return res['x']
+
+
+def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
+                   gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
+                   disp=False, return_all=False, finite_diff_rel_step=None,
+                   xrtol=0, **unknown_options):
+    """
+    Minimization of scalar function of one or more variables using the
+    BFGS algorithm.
+
+    Options
+    -------
+    disp : bool
+        Set to True to print convergence messages.
+    maxiter : int
+        Maximum number of iterations to perform.
+    gtol : float
+        Terminate successfully if gradient norm is less than `gtol`.
+    norm : float
+        Order of norm (Inf is max, -Inf is min).
+    eps : float or ndarray
+        If `jac is None` the absolute step size used for numerical
+        approximation of the jacobian via forward differences.
+    return_all : bool, optional
+        Set to True to return a list of the best solution at each of the
+        iterations.
+    finite_diff_rel_step : None or array_like, optional
+        If `jac in ['2-point', '3-point', 'cs']` the relative step size to
+        use for numerical approximation of the jacobian. The absolute step
+        size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
+        possibly adjusted to fit into the bounds. For ``method='3-point'``
+        the sign of `h` is ignored. If None (default) then step is selected
+        automatically.
+    xrtol : float, default: 0
+        Relative tolerance for `x`. Terminate successfully if step size is
+        less than ``xk * xrtol`` where ``xk`` is the current parameter vector.
+    """
+    _check_unknown_options(unknown_options)
+    retall = return_all
+
+    x0 = asarray(x0).flatten()
+    if x0.ndim == 0:
+        x0.shape = (1,)
+    if maxiter is None:
+        maxiter = len(x0) * 200
+
+    sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps,
+                                  finite_diff_rel_step=finite_diff_rel_step)
+
+    f = sf.fun
+    myfprime = sf.grad
+
+    old_fval = f(x0)
+    gfk = myfprime(x0)
+
+    k = 0
+    N = len(x0)
+    I = np.eye(N, dtype=int)
+    Hk = I
+
+    # Sets the initial step guess to dx ~ 1
+    old_old_fval = old_fval + np.linalg.norm(gfk) / 2
+
+    xk = x0
+    if retall:
+        allvecs = [x0]
+    warnflag = 0
+    gnorm = vecnorm(gfk, ord=norm)
+    while (gnorm > gtol) and (k < maxiter):
+        pk = -np.dot(Hk, gfk)
+        try:
+            alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
+                     _line_search_wolfe12(f, myfprime, xk, pk, gfk,
+                                          old_fval, old_old_fval, amin=1e-100, amax=1e100)
+        except _LineSearchError:
+            # Line search failed to find a better solution.
+            warnflag = 2
+            break
+
+        sk = alpha_k * pk
+        xkp1 = xk + sk
+
+        if retall:
+            allvecs.append(xkp1)
+        xk = xkp1
+        if gfkp1 is None:
+            gfkp1 = myfprime(xkp1)
+
+        yk = gfkp1 - gfk
+        gfk = gfkp1
+        if callback is not None:
+            callback(xk)
+        k += 1
+        gnorm = vecnorm(gfk, ord=norm)
+        if (gnorm <= gtol):
+            break
+
+        #  See Chapter 5 in  P.E. Frandsen, K. Jonasson, H.B. Nielsen,
+        #  O. Tingleff: "Unconstrained Optimization", IMM, DTU.  1999.
+        #  These notes are available here:
+        #  http://www2.imm.dtu.dk/documents/ftp/publlec.html
+        if (alpha_k*vecnorm(pk) <= xrtol*(xrtol + vecnorm(xk))):
+            break
+
+        if not np.isfinite(old_fval):
+            # We correctly found +-Inf as optimal value, or something went
+            # wrong.
+            warnflag = 2
+            break
+
+        rhok_inv = np.dot(yk, sk)
+        # this was handled in numeric, let it remaines for more safety
+        # Cryptic comment above is preserved for posterity. Future reader:
+        # consider change to condition below proposed in gh-1261/gh-17345.
+        if rhok_inv == 0.:
+            rhok = 1000.0
+            if disp:
+                print("Divide-by-zero encountered: rhok assumed large")
+        else:
+            rhok = 1. / rhok_inv
+
+        A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok
+        A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok
+        Hk = np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] *
+                                                 sk[np.newaxis, :])
+
+    fval = old_fval
+
+    if warnflag == 2:
+        msg = _status_message['pr_loss']
+    elif k >= maxiter:
+        warnflag = 1
+        msg = _status_message['maxiter']
+    elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any():
+        warnflag = 3
+        msg = _status_message['nan']
+    else:
+        msg = _status_message['success']
+
+    if disp:
+        print("%s%s" % ("Warning: " if warnflag != 0 else "", msg))
+        print("         Current function value: %f" % fval)
+        print("         Iterations: %d" % k)
+        print("         Function evaluations: %d" % sf.nfev)
+        print("         Gradient evaluations: %d" % sf.ngev)
+
+    result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=sf.nfev,
+                            njev=sf.ngev, status=warnflag,
+                            success=(warnflag == 0), message=msg, x=xk,
+                            nit=k)
+    if retall:
+        result['allvecs'] = allvecs
+    return result
+
+
+def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
+            maxiter=None, full_output=0, disp=1, retall=0, callback=None):
+    """
+    Minimize a function using a nonlinear conjugate gradient algorithm.
+
+    Parameters
+    ----------
+    f : callable, ``f(x, *args)``
+        Objective function to be minimized. Here `x` must be a 1-D array of
+        the variables that are to be changed in the search for a minimum, and
+        `args` are the other (fixed) parameters of `f`.
+    x0 : ndarray
+        A user-supplied initial estimate of `xopt`, the optimal value of `x`.
+        It must be a 1-D array of values.
+    fprime : callable, ``fprime(x, *args)``, optional
+        A function that returns the gradient of `f` at `x`. Here `x` and `args`
+        are as described above for `f`. The returned value must be a 1-D array.
+        Defaults to None, in which case the gradient is approximated
+        numerically (see `epsilon`, below).
+    args : tuple, optional
+        Parameter values passed to `f` and `fprime`. Must be supplied whenever
+        additional fixed parameters are needed to completely specify the
+        functions `f` and `fprime`.
+    gtol : float, optional
+        Stop when the norm of the gradient is less than `gtol`.
+    norm : float, optional
+        Order to use for the norm of the gradient
+        (``-np.Inf`` is min, ``np.Inf`` is max).
+    epsilon : float or ndarray, optional
+        Step size(s) to use when `fprime` is approximated numerically. Can be a
+        scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
+        floating point machine precision.  Usually ``sqrt(eps)`` is about
+        1.5e-8.
+    maxiter : int, optional
+        Maximum number of iterations to perform. Default is ``200 * len(x0)``.
+    full_output : bool, optional
+        If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
+        addition to `xopt`.  See the Returns section below for additional
+        information on optional return values.
+    disp : bool, optional
+        If True, return a convergence message, followed by `xopt`.
+    retall : bool, optional
+        If True, add to the returned values the results of each iteration.
+    callback : callable, optional
+        An optional user-supplied function, called after each iteration.
+        Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
+
+    Returns
+    -------
+    xopt : ndarray
+        Parameters which minimize f, i.e., ``f(xopt) == fopt``.
+    fopt : float, optional
+        Minimum value found, f(xopt). Only returned if `full_output` is True.
+    func_calls : int, optional
+        The number of function_calls made. Only returned if `full_output`
+        is True.
+    grad_calls : int, optional
+        The number of gradient calls made. Only returned if `full_output` is
+        True.
+    warnflag : int, optional
+        Integer value with warning status, only returned if `full_output` is
+        True.
+
+        0 : Success.
+
+        1 : The maximum number of iterations was exceeded.
+
+        2 : Gradient and/or function calls were not changing. May indicate
+            that precision was lost, i.e., the routine did not converge.
+
+        3 : NaN result encountered.
+
+    allvecs : list of ndarray, optional
+        List of arrays, containing the results at each iteration.
+        Only returned if `retall` is True.
+
+    See Also
+    --------
+    minimize : common interface to all `scipy.optimize` algorithms for
+               unconstrained and constrained minimization of multivariate
+               functions. It provides an alternative way to call
+               ``fmin_cg``, by specifying ``method='CG'``.
+
+    Notes
+    -----
+    This conjugate gradient algorithm is based on that of Polak and Ribiere
+    [1]_.
+
+    Conjugate gradient methods tend to work better when:
+
+    1. `f` has a unique global minimizing point, and no local minima or
+       other stationary points,
+    2. `f` is, at least locally, reasonably well approximated by a
+       quadratic function of the variables,
+    3. `f` is continuous and has a continuous gradient,
+    4. `fprime` is not too large, e.g., has a norm less than 1000,
+    5. The initial guess, `x0`, is reasonably close to `f` 's global
+       minimizing point, `xopt`.
+
+    References
+    ----------
+    .. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
+
+    Examples
+    --------
+    Example 1: seek the minimum value of the expression
+    ``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
+    of the parameters and an initial guess ``(u, v) = (0, 0)``.
+
+    >>> import numpy as np
+    >>> args = (2, 3, 7, 8, 9, 10)  # parameter values
+    >>> def f(x, *args):
+    ...     u, v = x
+    ...     a, b, c, d, e, f = args
+    ...     return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
+    >>> def gradf(x, *args):
+    ...     u, v = x
+    ...     a, b, c, d, e, f = args
+    ...     gu = 2*a*u + b*v + d     # u-component of the gradient
+    ...     gv = b*u + 2*c*v + e     # v-component of the gradient
+    ...     return np.asarray((gu, gv))
+    >>> x0 = np.asarray((0, 0))  # Initial guess.
+    >>> from scipy import optimize
+    >>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
+    Optimization terminated successfully.
+             Current function value: 1.617021
+             Iterations: 4
+             Function evaluations: 8
+             Gradient evaluations: 8
+    >>> res1
+    array([-1.80851064, -0.25531915])
+
+    Example 2: solve the same problem using the `minimize` function.
+    (This `myopts` dictionary shows all of the available options,
+    although in practice only non-default values would be needed.
+    The returned value will be a dictionary.)
+
+    >>> opts = {'maxiter' : None,    # default value.
+    ...         'disp' : True,    # non-default value.
+    ...         'gtol' : 1e-5,    # default value.
+    ...         'norm' : np.inf,  # default value.
+    ...         'eps' : 1.4901161193847656e-08}  # default value.
+    >>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
+    ...                          method='CG', options=opts)
+    Optimization terminated successfully.
+            Current function value: 1.617021
+            Iterations: 4
+            Function evaluations: 8
+            Gradient evaluations: 8
+    >>> res2.x  # minimum found
+    array([-1.80851064, -0.25531915])
+
+    """
+    opts = {'gtol': gtol,
+            'norm': norm,
+            'eps': epsilon,
+            'disp': disp,
+            'maxiter': maxiter,
+            'return_all': retall}
+
+    res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
+
+    if full_output:
+        retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
+        if retall:
+            retlist += (res['allvecs'], )
+        return retlist
+    else:
+        if retall:
+            return res['x'], res['allvecs']
+        else:
+            return res['x']
+
+
+def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
+                 gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
+                 disp=False, return_all=False, finite_diff_rel_step=None,
+                 **unknown_options):
+    """
+    Minimization of scalar function of one or more variables using the
+    conjugate gradient algorithm.
+
+    Options
+    -------
+    disp : bool
+        Set to True to print convergence messages.
+    maxiter : int
+        Maximum number of iterations to perform.
+    gtol : float
+        Gradient norm must be less than `gtol` before successful
+        termination.
+    norm : float
+        Order of norm (Inf is max, -Inf is min).
+    eps : float or ndarray
+        If `jac is None` the absolute step size used for numerical
+        approximation of the jacobian via forward differences.
+    return_all : bool, optional
+        Set to True to return a list of the best solution at each of the
+        iterations.
+    finite_diff_rel_step : None or array_like, optional
+        If `jac in ['2-point', '3-point', 'cs']` the relative step size to
+        use for numerical approximation of the jacobian. The absolute step
+        size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
+        possibly adjusted to fit into the bounds. For ``method='3-point'``
+        the sign of `h` is ignored. If None (default) then step is selected
+        automatically.
+    """
+    _check_unknown_options(unknown_options)
+
+    retall = return_all
+
+    x0 = asarray(x0).flatten()
+    if maxiter is None:
+        maxiter = len(x0) * 200
+
+    sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
+                                  finite_diff_rel_step=finite_diff_rel_step)
+
+    f = sf.fun
+    myfprime = sf.grad
+
+    old_fval = f(x0)
+    gfk = myfprime(x0)
+
+    k = 0
+    xk = x0
+    # Sets the initial step guess to dx ~ 1
+    old_old_fval = old_fval + np.linalg.norm(gfk) / 2
+
+    if retall:
+        allvecs = [xk]
+    warnflag = 0
+    pk = -gfk
+    gnorm = vecnorm(gfk, ord=norm)
+
+    sigma_3 = 0.01
+
+    while (gnorm > gtol) and (k < maxiter):
+        deltak = np.dot(gfk, gfk)
+
+        cached_step = [None]
+
+        def polak_ribiere_powell_step(alpha, gfkp1=None):
+            xkp1 = xk + alpha * pk
+            if gfkp1 is None:
+                gfkp1 = myfprime(xkp1)
+            yk = gfkp1 - gfk
+            beta_k = max(0, np.dot(yk, gfkp1) / deltak)
+            pkp1 = -gfkp1 + beta_k * pk
+            gnorm = vecnorm(gfkp1, ord=norm)
+            return (alpha, xkp1, pkp1, gfkp1, gnorm)
+
+        def descent_condition(alpha, xkp1, fp1, gfkp1):
+            # Polak-Ribiere+ needs an explicit check of a sufficient
+            # descent condition, which is not guaranteed by strong Wolfe.
+            #
+            # See Gilbert & Nocedal, "Global convergence properties of
+            # conjugate gradient methods for optimization",
+            # SIAM J. Optimization 2, 21 (1992).
+            cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1)
+            alpha, xk, pk, gfk, gnorm = cached_step
+
+            # Accept step if it leads to convergence.
+            if gnorm <= gtol:
+                return True
+
+            # Accept step if sufficient descent condition applies.
+            return np.dot(pk, gfk) <= -sigma_3 * np.dot(gfk, gfk)
+
+        try:
+            alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
+                     _line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
+                                          old_old_fval, c2=0.4, amin=1e-100, amax=1e100,
+                                          extra_condition=descent_condition)
+        except _LineSearchError:
+            # Line search failed to find a better solution.
+            warnflag = 2
+            break
+
+        # Reuse already computed results if possible
+        if alpha_k == cached_step[0]:
+            alpha_k, xk, pk, gfk, gnorm = cached_step
+        else:
+            alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1)
+
+        if retall:
+            allvecs.append(xk)
+        if callback is not None:
+            callback(xk)
+        k += 1
+
+    fval = old_fval
+    if warnflag == 2:
+        msg = _status_message['pr_loss']
+    elif k >= maxiter:
+        warnflag = 1
+        msg = _status_message['maxiter']
+    elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any():
+        warnflag = 3
+        msg = _status_message['nan']
+    else:
+        msg = _status_message['success']
+
+    if disp:
+        print("%s%s" % ("Warning: " if warnflag != 0 else "", msg))
+        print("         Current function value: %f" % fval)
+        print("         Iterations: %d" % k)
+        print("         Function evaluations: %d" % sf.nfev)
+        print("         Gradient evaluations: %d" % sf.ngev)
+
+    result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev,
+                            njev=sf.ngev, status=warnflag,
+                            success=(warnflag == 0), message=msg, x=xk,
+                            nit=k)
+    if retall:
+        result['allvecs'] = allvecs
+    return result
+
+
+def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
+             epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
+             callback=None):
+    """
+    Unconstrained minimization of a function using the Newton-CG method.
+
+    Parameters
+    ----------
+    f : callable ``f(x, *args)``
+        Objective function to be minimized.
+    x0 : ndarray
+        Initial guess.
+    fprime : callable ``f'(x, *args)``
+        Gradient of f.
+    fhess_p : callable ``fhess_p(x, p, *args)``, optional
+        Function which computes the Hessian of f times an
+        arbitrary vector, p.
+    fhess : callable ``fhess(x, *args)``, optional
+        Function to compute the Hessian matrix of f.
+    args : tuple, optional
+        Extra arguments passed to f, fprime, fhess_p, and fhess
+        (the same set of extra arguments is supplied to all of
+        these functions).
+    epsilon : float or ndarray, optional
+        If fhess is approximated, use this value for the step size.
+    callback : callable, optional
+        An optional user-supplied function which is called after
+        each iteration. Called as callback(xk), where xk is the
+        current parameter vector.
+    avextol : float, optional
+        Convergence is assumed when the average relative error in
+        the minimizer falls below this amount.
+    maxiter : int, optional
+        Maximum number of iterations to perform.
+    full_output : bool, optional
+        If True, return the optional outputs.
+    disp : bool, optional
+        If True, print convergence message.
+    retall : bool, optional
+        If True, return a list of results at each iteration.
+
+    Returns
+    -------
+    xopt : ndarray
+        Parameters which minimize f, i.e., ``f(xopt) == fopt``.
+    fopt : float
+        Value of the function at xopt, i.e., ``fopt = f(xopt)``.
+    fcalls : int
+        Number of function calls made.
+    gcalls : int
+        Number of gradient calls made.
+    hcalls : int
+        Number of Hessian calls made.
+    warnflag : int
+        Warnings generated by the algorithm.
+        1 : Maximum number of iterations exceeded.
+        2 : Line search failure (precision loss).
+        3 : NaN result encountered.
+    allvecs : list
+        The result at each iteration, if retall is True (see below).
+
+    See also
+    --------
+    minimize: Interface to minimization algorithms for multivariate
+        functions. See the 'Newton-CG' `method` in particular.
+
+    Notes
+    -----
+    Only one of `fhess_p` or `fhess` need to be given.  If `fhess`
+    is provided, then `fhess_p` will be ignored. If neither `fhess`
+    nor `fhess_p` is provided, then the hessian product will be
+    approximated using finite differences on `fprime`. `fhess_p`
+    must compute the hessian times an arbitrary vector. If it is not
+    given, finite-differences on `fprime` are used to compute
+    it.
+
+    Newton-CG methods are also called truncated Newton methods. This
+    function differs from scipy.optimize.fmin_tnc because
+
+    1. scipy.optimize.fmin_ncg is written purely in Python using NumPy
+        and scipy while scipy.optimize.fmin_tnc calls a C function.
+    2. scipy.optimize.fmin_ncg is only for unconstrained minimization
+        while scipy.optimize.fmin_tnc is for unconstrained minimization
+        or box constrained minimization. (Box constraints give
+        lower and upper bounds for each variable separately.)
+
+    References
+    ----------
+    Wright & Nocedal, 'Numerical Optimization', 1999, p. 140.
+
+    """
+    opts = {'xtol': avextol,
+            'eps': epsilon,
+            'maxiter': maxiter,
+            'disp': disp,
+            'return_all': retall}
+
+    res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
+                             callback=callback, **opts)
+
+    if full_output:
+        retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
+                   res['nhev'], res['status'])
+        if retall:
+            retlist += (res['allvecs'], )
+        return retlist
+    else:
+        if retall:
+            return res['x'], res['allvecs']
+        else:
+            return res['x']
+
+
+def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
+                       callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
+                       disp=False, return_all=False,
+                       **unknown_options):
+    """
+    Minimization of scalar function of one or more variables using the
+    Newton-CG algorithm.
+
+    Note that the `jac` parameter (Jacobian) is required.
+
+    Options
+    -------
+    disp : bool
+        Set to True to print convergence messages.
+    xtol : float
+        Average relative error in solution `xopt` acceptable for
+        convergence.
+    maxiter : int
+        Maximum number of iterations to perform.
+    eps : float or ndarray
+        If `hessp` is approximated, use this value for the step size.
+    return_all : bool, optional
+        Set to True to return a list of the best solution at each of the
+        iterations.
+    """
+    _check_unknown_options(unknown_options)
+    if jac is None:
+        raise ValueError('Jacobian is required for Newton-CG method')
+    fhess_p = hessp
+    fhess = hess
+    avextol = xtol
+    epsilon = eps
+    retall = return_all
+
+    x0 = asarray(x0).flatten()
+    # TODO: add hessp (callable or FD) to ScalarFunction?
+    sf = _prepare_scalar_function(
+        fun, x0, jac, args=args, epsilon=eps, hess=hess
+    )
+    f = sf.fun
+    fprime = sf.grad
+    _h = sf.hess(x0)
+
+    # Logic for hess/hessp
+    # - If a callable(hess) is provided, then use that
+    # - If hess is a FD_METHOD, or the output fom hess(x) is a LinearOperator
+    #   then create a hessp function using those.
+    # - If hess is None but you have callable(hessp) then use the hessp.
+    # - If hess and hessp are None then approximate hessp using the grad/jac.
+
+    if (hess in FD_METHODS or isinstance(_h, LinearOperator)):
+        fhess = None
+
+        def _hessp(x, p, *args):
+            return sf.hess(x).dot(p)
+
+        fhess_p = _hessp
+
+    def terminate(warnflag, msg):
+        if disp:
+            print(msg)
+            print("         Current function value: %f" % old_fval)
+            print("         Iterations: %d" % k)
+            print("         Function evaluations: %d" % sf.nfev)
+            print("         Gradient evaluations: %d" % sf.ngev)
+            print("         Hessian evaluations: %d" % hcalls)
+        fval = old_fval
+        result = OptimizeResult(fun=fval, jac=gfk, nfev=sf.nfev,
+                                njev=sf.ngev, nhev=hcalls, status=warnflag,
+                                success=(warnflag == 0), message=msg, x=xk,
+                                nit=k)
+        if retall:
+            result['allvecs'] = allvecs
+        return result
+
+    hcalls = 0
+    if maxiter is None:
+        maxiter = len(x0)*200
+    cg_maxiter = 20*len(x0)
+
+    xtol = len(x0) * avextol
+    update = [2 * xtol]
+    xk = x0
+    if retall:
+        allvecs = [xk]
+    k = 0
+    gfk = None
+    old_fval = f(x0)
+    old_old_fval = None
+    float64eps = np.finfo(np.float64).eps
+    while np.add.reduce(np.abs(update)) > xtol:
+        if k >= maxiter:
+            msg = "Warning: " + _status_message['maxiter']
+            return terminate(1, msg)
+        # Compute a search direction pk by applying the CG method to
+        #  del2 f(xk) p = - grad f(xk) starting from 0.
+        b = -fprime(xk)
+        maggrad = np.add.reduce(np.abs(b))
+        eta = np.min([0.5, np.sqrt(maggrad)])
+        termcond = eta * maggrad
+        xsupi = zeros(len(x0), dtype=x0.dtype)
+        ri = -b
+        psupi = -ri
+        i = 0
+        dri0 = np.dot(ri, ri)
+
+        if fhess is not None:             # you want to compute hessian once.
+            A = sf.hess(xk)
+            hcalls = hcalls + 1
+
+        for k2 in range(cg_maxiter):
+            if np.add.reduce(np.abs(ri)) <= termcond:
+                break
+            if fhess is None:
+                if fhess_p is None:
+                    Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
+                else:
+                    Ap = fhess_p(xk, psupi, *args)
+                    hcalls = hcalls + 1
+            else:
+                if isinstance(A, HessianUpdateStrategy):
+                    # if hess was supplied as a HessianUpdateStrategy
+                    Ap = A.dot(psupi)
+                else:
+                    Ap = np.dot(A, psupi)
+            # check curvature
+            Ap = asarray(Ap).squeeze()  # get rid of matrices...
+            curv = np.dot(psupi, Ap)
+            if 0 <= curv <= 3 * float64eps:
+                break
+            elif curv < 0:
+                if (i > 0):
+                    break
+                else:
+                    # fall back to steepest descent direction
+                    xsupi = dri0 / (-curv) * b
+                    break
+            alphai = dri0 / curv
+            xsupi = xsupi + alphai * psupi
+            ri = ri + alphai * Ap
+            dri1 = np.dot(ri, ri)
+            betai = dri1 / dri0
+            psupi = -ri + betai * psupi
+            i = i + 1
+            dri0 = dri1          # update np.dot(ri,ri) for next time.
+        else:
+            # curvature keeps increasing, bail out
+            msg = ("Warning: CG iterations didn't converge. The Hessian is not "
+                   "positive definite.")
+            return terminate(3, msg)
+
+        pk = xsupi  # search direction is solution to system.
+        gfk = -b    # gradient at xk
+
+        try:
+            alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
+                     _line_search_wolfe12(f, fprime, xk, pk, gfk,
+                                          old_fval, old_old_fval)
+        except _LineSearchError:
+            # Line search failed to find a better solution.
+            msg = "Warning: " + _status_message['pr_loss']
+            return terminate(2, msg)
+
+        update = alphak * pk
+        xk = xk + update        # upcast if necessary
+        if callback is not None:
+            callback(xk)
+        if retall:
+            allvecs.append(xk)
+        k += 1
+    else:
+        if np.isnan(old_fval) or np.isnan(update).any():
+            return terminate(3, _status_message['nan'])
+
+        msg = _status_message['success']
+        return terminate(0, msg)
+
+
+def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
+              full_output=0, disp=1):
+    """Bounded minimization for scalar functions.
+
+    Parameters
+    ----------
+    func : callable f(x,*args)
+        Objective function to be minimized (must accept and return scalars).
+    x1, x2 : float or array scalar
+        Finite optimization bounds.
+    args : tuple, optional
+        Extra arguments passed to function.
+    xtol : float, optional
+        The convergence tolerance.
+    maxfun : int, optional
+        Maximum number of function evaluations allowed.
+    full_output : bool, optional
+        If True, return optional outputs.
+    disp : int, optional
+        If non-zero, print messages.
+            0 : no message printing.
+            1 : non-convergence notification messages only.
+            2 : print a message on convergence too.
+            3 : print iteration results.
+
+
+    Returns
+    -------
+    xopt : ndarray
+        Parameters (over given interval) which minimize the
+        objective function.
+    fval : number
+        The function value evaluated at the minimizer.
+    ierr : int
+        An error flag (0 if converged, 1 if maximum number of
+        function calls reached).
+    numfunc : int
+      The number of function calls made.
+
+    See also
+    --------
+    minimize_scalar: Interface to minimization algorithms for scalar
+        univariate functions. See the 'Bounded' `method` in particular.
+
+    Notes
+    -----
+    Finds a local minimizer of the scalar function `func` in the
+    interval x1 < xopt < x2 using Brent's method. (See `brent`
+    for auto-bracketing.)
+
+    References
+    ----------
+    .. [1] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods
+           for Mathematical Computations." Prentice-Hall Series in Automatic
+           Computation 259 (1977).
+    .. [2] Brent, Richard P. Algorithms for Minimization Without Derivatives.
+           Courier Corporation, 2013.
+
+    Examples
+    --------
+    `fminbound` finds the minimizer of the function in the given range.
+    The following examples illustrate this.
+
+    >>> from scipy import optimize
+    >>> def f(x):
+    ...     return (x-1)**2
+    >>> minimizer = optimize.fminbound(f, -4, 4)
+    >>> minimizer
+    1.0
+    >>> minimum = f(minimizer)
+    >>> minimum
+    0.0
+    >>> minimizer = optimize.fminbound(f, 3, 4)
+    >>> minimizer
+    3.000005960860986
+    >>> minimum = f(minimizer)
+    >>> minimum
+    4.000023843479476
+    """
+    options = {'xatol': xtol,
+               'maxiter': maxfun,
+               'disp': disp}
+
+    res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
+    if full_output:
+        return res['x'], res['fun'], res['status'], res['nfev']
+    else:
+        return res['x']
+
+
+def _minimize_scalar_bounded(func, bounds, args=(),
+                             xatol=1e-5, maxiter=500, disp=0,
+                             **unknown_options):
+    """
+    Options
+    -------
+    maxiter : int
+        Maximum number of iterations to perform.
+    disp: int, optional
+        If non-zero, print messages.
+            0 : no message printing.
+            1 : non-convergence notification messages only.
+            2 : print a message on convergence too.
+            3 : print iteration results.
+    xatol : float
+        Absolute error in solution `xopt` acceptable for convergence.
+
+    """
+    _check_unknown_options(unknown_options)
+    maxfun = maxiter
+    # Test bounds are of correct form
+    if len(bounds) != 2:
+        raise ValueError('bounds must have two elements.')
+    x1, x2 = bounds
+
+    if not (is_finite_scalar(x1) and is_finite_scalar(x2)):
+        raise ValueError("Optimization bounds must be finite scalars.")
+
+    if x1 > x2:
+        raise ValueError("The lower bound exceeds the upper bound.")
+
+    flag = 0
+    header = ' Func-count     x          f(x)          Procedure'
+    step = '       initial'
+
+    sqrt_eps = sqrt(2.2e-16)
+    golden_mean = 0.5 * (3.0 - sqrt(5.0))
+    a, b = x1, x2
+    fulc = a + golden_mean * (b - a)
+    nfc, xf = fulc, fulc
+    rat = e = 0.0
+    x = xf
+    fx = func(x, *args)
+    num = 1
+    fmin_data = (1, xf, fx)
+    fu = np.inf
+
+    ffulc = fnfc = fx
+    xm = 0.5 * (a + b)
+    tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0
+    tol2 = 2.0 * tol1
+
+    if disp > 2:
+        print(" ")
+        print(header)
+        print("%5.0f   %12.6g %12.6g %s" % (fmin_data + (step,)))
+
+    while (np.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
+        golden = 1
+        # Check for parabolic fit
+        if np.abs(e) > tol1:
+            golden = 0
+            r = (xf - nfc) * (fx - ffulc)
+            q = (xf - fulc) * (fx - fnfc)
+            p = (xf - fulc) * q - (xf - nfc) * r
+            q = 2.0 * (q - r)
+            if q > 0.0:
+                p = -p
+            q = np.abs(q)
+            r = e
+            e = rat
+
+            # Check for acceptability of parabola
+            if ((np.abs(p) < np.abs(0.5*q*r)) and (p > q*(a - xf)) and
+                    (p < q * (b - xf))):
+                rat = (p + 0.0) / q
+                x = xf + rat
+                step = '       parabolic'
+
+                if ((x - a) < tol2) or ((b - x) < tol2):
+                    si = np.sign(xm - xf) + ((xm - xf) == 0)
+                    rat = tol1 * si
+            else:      # do a golden-section step
+                golden = 1
+
+        if golden:  # do a golden-section step
+            if xf >= xm:
+                e = a - xf
+            else:
+                e = b - xf
+            rat = golden_mean*e
+            step = '       golden'
+
+        si = np.sign(rat) + (rat == 0)
+        x = xf + si * np.maximum(np.abs(rat), tol1)
+        fu = func(x, *args)
+        num += 1
+        fmin_data = (num, x, fu)
+        if disp > 2:
+            print("%5.0f   %12.6g %12.6g %s" % (fmin_data + (step,)))
+
+        if fu <= fx:
+            if x >= xf:
+                a = xf
+            else:
+                b = xf
+            fulc, ffulc = nfc, fnfc
+            nfc, fnfc = xf, fx
+            xf, fx = x, fu
+        else:
+            if x < xf:
+                a = x
+            else:
+                b = x
+            if (fu <= fnfc) or (nfc == xf):
+                fulc, ffulc = nfc, fnfc
+                nfc, fnfc = x, fu
+            elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
+                fulc, ffulc = x, fu
+
+        xm = 0.5 * (a + b)
+        tol1 = sqrt_eps * np.abs(xf) + xatol / 3.0
+        tol2 = 2.0 * tol1
+
+        if num >= maxfun:
+            flag = 1
+            break
+
+    if np.isnan(xf) or np.isnan(fx) or np.isnan(fu):
+        flag = 2
+
+    fval = fx
+    if disp > 0:
+        _endprint(x, flag, fval, maxfun, xatol, disp)
+
+    result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
+                            message={0: 'Solution found.',
+                                     1: 'Maximum number of function calls '
+                                        'reached.',
+                                     2: _status_message['nan']}.get(flag, ''),
+                            x=xf, nfev=num, nit=num)
+
+    return result
+
+
+class Brent:
+    #need to rethink design of __init__
+    def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
+                 full_output=0, disp=0):
+        self.func = func
+        self.args = args
+        self.tol = tol
+        self.maxiter = maxiter
+        self._mintol = 1.0e-11
+        self._cg = 0.3819660
+        self.xmin = None
+        self.fval = None
+        self.iter = 0
+        self.funcalls = 0
+        self.disp = disp
+
+    # need to rethink design of set_bracket (new options, etc.)
+    def set_bracket(self, brack=None):
+        self.brack = brack
+
+    def get_bracket_info(self):
+        #set up
+        func = self.func
+        args = self.args
+        brack = self.brack
+        ### BEGIN core bracket_info code ###
+        ### carefully DOCUMENT any CHANGES in core ##
+        if brack is None:
+            xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
+        elif len(brack) == 2:
+            xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
+                                                       xb=brack[1], args=args)
+        elif len(brack) == 3:
+            xa, xb, xc = brack
+            if (xa > xc):  # swap so xa < xc can be assumed
+                xc, xa = xa, xc
+            if not ((xa < xb) and (xb < xc)):
+                raise ValueError(
+                    "Bracketing values (xa, xb, xc) do not"
+                    " fulfill this requirement: (xa < xb) and (xb < xc)"
+                )
+            fa = func(*((xa,) + args))
+            fb = func(*((xb,) + args))
+            fc = func(*((xc,) + args))
+            if not ((fb < fa) and (fb < fc)):
+                raise ValueError(
+                    "Bracketing values (xa, xb, xc) do not fulfill"
+                    " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))"
+                )
+
+            funcalls = 3
+        else:
+            raise ValueError("Bracketing interval must be "
+                             "length 2 or 3 sequence.")
+        ### END core bracket_info code ###
+
+        return xa, xb, xc, fa, fb, fc, funcalls
+
+    def optimize(self):
+        # set up for optimization
+        func = self.func
+        xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
+        _mintol = self._mintol
+        _cg = self._cg
+        #################################
+        #BEGIN CORE ALGORITHM
+        #################################
+        x = w = v = xb
+        fw = fv = fx = fb
+        if (xa < xc):
+            a = xa
+            b = xc
+        else:
+            a = xc
+            b = xa
+        deltax = 0.0
+        iter = 0
+
+        if self.disp > 2:
+            print(" ")
+            print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}")
+            print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}")
+
+        while (iter < self.maxiter):
+            tol1 = self.tol * np.abs(x) + _mintol
+            tol2 = 2.0 * tol1
+            xmid = 0.5 * (a + b)
+            # check for convergence
+            if np.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
+                break
+            # XXX In the first iteration, rat is only bound in the true case
+            # of this conditional. This used to cause an UnboundLocalError
+            # (gh-4140). It should be set before the if (but to what?).
+            if (np.abs(deltax) <= tol1):
+                if (x >= xmid):
+                    deltax = a - x       # do a golden section step
+                else:
+                    deltax = b - x
+                rat = _cg * deltax
+            else:                              # do a parabolic step
+                tmp1 = (x - w) * (fx - fv)
+                tmp2 = (x - v) * (fx - fw)
+                p = (x - v) * tmp2 - (x - w) * tmp1
+                tmp2 = 2.0 * (tmp2 - tmp1)
+                if (tmp2 > 0.0):
+                    p = -p
+                tmp2 = np.abs(tmp2)
+                dx_temp = deltax
+                deltax = rat
+                # check parabolic fit
+                if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
+                        (np.abs(p) < np.abs(0.5 * tmp2 * dx_temp))):
+                    rat = p * 1.0 / tmp2        # if parabolic step is useful.
+                    u = x + rat
+                    if ((u - a) < tol2 or (b - u) < tol2):
+                        if xmid - x >= 0:
+                            rat = tol1
+                        else:
+                            rat = -tol1
+                else:
+                    if (x >= xmid):
+                        deltax = a - x  # if it's not do a golden section step
+                    else:
+                        deltax = b - x
+                    rat = _cg * deltax
+
+            if (np.abs(rat) < tol1):            # update by at least tol1
+                if rat >= 0:
+                    u = x + tol1
+                else:
+                    u = x - tol1
+            else:
+                u = x + rat
+            fu = func(*((u,) + self.args))      # calculate new output value
+            funcalls += 1
+
+            if (fu > fx):                 # if it's bigger than current
+                if (u < x):
+                    a = u
+                else:
+                    b = u
+                if (fu <= fw) or (w == x):
+                    v = w
+                    w = u
+                    fv = fw
+                    fw = fu
+                elif (fu <= fv) or (v == x) or (v == w):
+                    v = u
+                    fv = fu
+            else:
+                if (u >= x):
+                    a = x
+                else:
+                    b = x
+                v = w
+                w = x
+                x = u
+                fv = fw
+                fw = fx
+                fx = fu
+
+            if self.disp > 2:
+                print(f"{funcalls:^12g} {x:^12.6g} {fx:^12.6g}")
+
+            iter += 1
+        #################################
+        #END CORE ALGORITHM
+        #################################
+
+        self.xmin = x
+        self.fval = fx
+        self.iter = iter
+        self.funcalls = funcalls
+
+    def get_result(self, full_output=False):
+        if full_output:
+            return self.xmin, self.fval, self.iter, self.funcalls
+        else:
+            return self.xmin
+
+
+def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
+    """
+    Given a function of one variable and a possible bracket, return
+    the local minimum of the function isolated to a fractional precision
+    of tol.
+
+    Parameters
+    ----------
+    func : callable f(x,*args)
+        Objective function.
+    args : tuple, optional
+        Additional arguments (if present).
+    brack : tuple, optional
+        Either a triple (xa,xb,xc) where xa>> def f(x):
+    ...     return x**2
+
+    >>> from scipy import optimize
+
+    >>> minimum = optimize.brent(f,brack=(1,2))
+    >>> minimum
+    0.0
+    >>> minimum = optimize.brent(f,brack=(-1,0.5,2))
+    >>> minimum
+    -2.7755575615628914e-17
+
+    """
+    options = {'xtol': tol,
+               'maxiter': maxiter}
+    res = _minimize_scalar_brent(func, brack, args, **options)
+    if full_output:
+        return res['x'], res['fun'], res['nit'], res['nfev']
+    else:
+        return res['x']
+
+
+def _minimize_scalar_brent(func, brack=None, args=(), xtol=1.48e-8,
+                           maxiter=500, disp=0,
+                           **unknown_options):
+    """
+    Options
+    -------
+    maxiter : int
+        Maximum number of iterations to perform.
+    xtol : float
+        Relative error in solution `xopt` acceptable for convergence.
+    disp: int, optional
+        If non-zero, print messages.
+            0 : no message printing.
+            1 : non-convergence notification messages only.
+            2 : print a message on convergence too.
+            3 : print iteration results.
+    Notes
+    -----
+    Uses inverse parabolic interpolation when possible to speed up
+    convergence of golden section method.
+
+    """
+    _check_unknown_options(unknown_options)
+    tol = xtol
+    if tol < 0:
+        raise ValueError('tolerance should be >= 0, got %r' % tol)
+
+    brent = Brent(func=func, args=args, tol=tol,
+                  full_output=True, maxiter=maxiter, disp=disp)
+    brent.set_bracket(brack)
+    brent.optimize()
+    x, fval, nit, nfev = brent.get_result(full_output=True)
+
+    success = nit < maxiter and not (np.isnan(x) or np.isnan(fval))
+
+    if success:
+        message = ("\nOptimization terminated successfully;\n"
+                   "The returned value satisfies the termination criteria\n"
+                   f"(using xtol = {xtol} )")
+    else:
+        if nit >= maxiter:
+            message = "\nMaximum number of iterations exceeded"
+        if np.isnan(x) or np.isnan(fval):
+            message = f"{_status_message['nan']}"
+
+    if disp:
+        print(message)
+
+    return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
+                          success=success, message=message)
+
+
+def golden(func, args=(), brack=None, tol=_epsilon,
+           full_output=0, maxiter=5000):
+    """
+    Return the minimum of a function of one variable using golden section
+    method.
+
+    Given a function of one variable and a possible bracketing interval,
+    return the minimum of the function isolated to a fractional precision of
+    tol.
+
+    Parameters
+    ----------
+    func : callable func(x,*args)
+        Objective function to minimize.
+    args : tuple, optional
+        Additional arguments (if present), passed to func.
+    brack : tuple, optional
+        Triple (a,b,c), where (a>> def f(x):
+    ...     return x**2
+
+    >>> from scipy import optimize
+
+    >>> minimum = optimize.golden(f, brack=(1, 2))
+    >>> minimum
+    1.5717277788484873e-162
+    >>> minimum = optimize.golden(f, brack=(-1, 0.5, 2))
+    >>> minimum
+    -1.5717277788484873e-162
+
+    """
+    options = {'xtol': tol, 'maxiter': maxiter}
+    res = _minimize_scalar_golden(func, brack, args, **options)
+    if full_output:
+        return res['x'], res['fun'], res['nfev']
+    else:
+        return res['x']
+
+
+def _minimize_scalar_golden(func, brack=None, args=(),
+                            xtol=_epsilon, maxiter=5000, disp=0,
+                            **unknown_options):
+    """
+    Options
+    -------
+    xtol : float
+        Relative error in solution `xopt` acceptable for convergence.
+    maxiter : int
+        Maximum number of iterations to perform.
+    disp: int, optional
+        If non-zero, print messages.
+            0 : no message printing.
+            1 : non-convergence notification messages only.
+            2 : print a message on convergence too.
+            3 : print iteration results.
+    """
+    _check_unknown_options(unknown_options)
+    tol = xtol
+    if brack is None:
+        xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
+    elif len(brack) == 2:
+        xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
+                                                   xb=brack[1], args=args)
+    elif len(brack) == 3:
+        xa, xb, xc = brack
+        if (xa > xc):  # swap so xa < xc can be assumed
+            xc, xa = xa, xc
+        if not ((xa < xb) and (xb < xc)):
+            raise ValueError(
+                "Bracketing values (xa, xb, xc) do not"
+                " fulfill this requirement: (xa < xb) and (xb < xc)"
+            )
+        fa = func(*((xa,) + args))
+        fb = func(*((xb,) + args))
+        fc = func(*((xc,) + args))
+        if not ((fb < fa) and (fb < fc)):
+            raise ValueError(
+                "Bracketing values (xa, xb, xc) do not fulfill"
+                " this requirement: (f(xb) < f(xa)) and (f(xb) < f(xc))"
+            )
+        funcalls = 3
+    else:
+        raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
+
+    _gR = 0.61803399  # golden ratio conjugate: 2.0/(1.0+sqrt(5.0))
+    _gC = 1.0 - _gR
+    x3 = xc
+    x0 = xa
+    if (np.abs(xc - xb) > np.abs(xb - xa)):
+        x1 = xb
+        x2 = xb + _gC * (xc - xb)
+    else:
+        x2 = xb
+        x1 = xb - _gC * (xb - xa)
+    f1 = func(*((x1,) + args))
+    f2 = func(*((x2,) + args))
+    funcalls += 2
+    nit = 0
+
+    if disp > 2:
+        print(" ")
+        print(f"{'Func-count':^12} {'x':^12} {'f(x)': ^12}")
+
+    for i in range(maxiter):
+        if np.abs(x3 - x0) <= tol * (np.abs(x1) + np.abs(x2)):
+            break
+        if (f2 < f1):
+            x0 = x1
+            x1 = x2
+            x2 = _gR * x1 + _gC * x3
+            f1 = f2
+            f2 = func(*((x2,) + args))
+        else:
+            x3 = x2
+            x2 = x1
+            x1 = _gR * x2 + _gC * x0
+            f2 = f1
+            f1 = func(*((x1,) + args))
+        funcalls += 1
+        if disp > 2:
+            if (f1 < f2):
+                xmin, fval = x1, f1
+            else:
+                xmin, fval = x2, f2
+            print(f"{funcalls:^12g} {xmin:^12.6g} {fval:^12.6g}")
+
+        nit += 1
+    # end of iteration loop
+
+    if (f1 < f2):
+        xmin = x1
+        fval = f1
+    else:
+        xmin = x2
+        fval = f2
+
+    success = nit < maxiter and not (np.isnan(fval) or np.isnan(xmin))
+
+    if success:
+        message = ("\nOptimization terminated successfully;\n"
+                   "The returned value satisfies the termination criteria\n"
+                   f"(using xtol = {xtol} )")
+    else:
+        if nit >= maxiter:
+            message = "\nMaximum number of iterations exceeded"
+        if np.isnan(xmin) or np.isnan(fval):
+            message = f"{_status_message['nan']}"
+
+    if disp:
+        print(message)
+
+    return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit,
+                          success=success, message=message)
+
+
+def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
+    """
+    Bracket the minimum of the function.
+
+    Given a function and distinct initial points, search in the
+    downhill direction (as defined by the initial points) and return
+    new points xa, xb, xc that bracket the minimum of the function
+    f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
+    solution will satisfy xa<=x<=xb.
+
+    Parameters
+    ----------
+    func : callable f(x,*args)
+        Objective function to minimize.
+    xa, xb : float, optional
+        Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
+    args : tuple, optional
+        Additional arguments (if present), passed to `func`.
+    grow_limit : float, optional
+        Maximum grow limit.  Defaults to 110.0
+    maxiter : int, optional
+        Maximum number of iterations to perform. Defaults to 1000.
+
+    Returns
+    -------
+    xa, xb, xc : float
+        Bracket.
+    fa, fb, fc : float
+        Objective function values in bracket.
+    funcalls : int
+        Number of function evaluations made.
+
+    Examples
+    --------
+    This function can find a downward convex region of a function:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.optimize import bracket
+    >>> def f(x):
+    ...     return 10*x**2 + 3*x + 5
+    >>> x = np.linspace(-2, 2)
+    >>> y = f(x)
+    >>> init_xa, init_xb = 0, 1
+    >>> xa, xb, xc, fa, fb, fc, funcalls = bracket(f, xa=init_xa, xb=init_xb)
+    >>> plt.axvline(x=init_xa, color="k", linestyle="--")
+    >>> plt.axvline(x=init_xb, color="k", linestyle="--")
+    >>> plt.plot(x, y, "-k")
+    >>> plt.plot(xa, fa, "bx")
+    >>> plt.plot(xb, fb, "rx")
+    >>> plt.plot(xc, fc, "bx")
+    >>> plt.show()
+
+    """
+    _gold = 1.618034  # golden ratio: (1.0+sqrt(5.0))/2.0
+    _verysmall_num = 1e-21
+    fa = func(*(xa,) + args)
+    fb = func(*(xb,) + args)
+    if (fa < fb):                      # Switch so fa > fb
+        xa, xb = xb, xa
+        fa, fb = fb, fa
+    xc = xb + _gold * (xb - xa)
+    fc = func(*((xc,) + args))
+    funcalls = 3
+    iter = 0
+    while (fc < fb):
+        tmp1 = (xb - xa) * (fb - fc)
+        tmp2 = (xb - xc) * (fb - fa)
+        val = tmp2 - tmp1
+        if np.abs(val) < _verysmall_num:
+            denom = 2.0 * _verysmall_num
+        else:
+            denom = 2.0 * val
+        w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
+        wlim = xb + grow_limit * (xc - xb)
+        if iter > maxiter:
+            raise RuntimeError("Too many iterations.")
+        iter += 1
+        if (w - xc) * (xb - w) > 0.0:
+            fw = func(*((w,) + args))
+            funcalls += 1
+            if (fw < fc):
+                xa = xb
+                xb = w
+                fa = fb
+                fb = fw
+                return xa, xb, xc, fa, fb, fc, funcalls
+            elif (fw > fb):
+                xc = w
+                fc = fw
+                return xa, xb, xc, fa, fb, fc, funcalls
+            w = xc + _gold * (xc - xb)
+            fw = func(*((w,) + args))
+            funcalls += 1
+        elif (w - wlim)*(wlim - xc) >= 0.0:
+            w = wlim
+            fw = func(*((w,) + args))
+            funcalls += 1
+        elif (w - wlim)*(xc - w) > 0.0:
+            fw = func(*((w,) + args))
+            funcalls += 1
+            if (fw < fc):
+                xb = xc
+                xc = w
+                w = xc + _gold * (xc - xb)
+                fb = fc
+                fc = fw
+                fw = func(*((w,) + args))
+                funcalls += 1
+        else:
+            w = xc + _gold * (xc - xb)
+            fw = func(*((w,) + args))
+            funcalls += 1
+        xa = xb
+        xb = xc
+        xc = w
+        fa = fb
+        fb = fc
+        fc = fw
+    return xa, xb, xc, fa, fb, fc, funcalls
+
+
+def _line_for_search(x0, alpha, lower_bound, upper_bound):
+    """
+    Given a parameter vector ``x0`` with length ``n`` and a direction
+    vector ``alpha`` with length ``n``, and lower and upper bounds on
+    each of the ``n`` parameters, what are the bounds on a scalar
+    ``l`` such that ``lower_bound <= x0 + alpha * l <= upper_bound``.
+
+
+    Parameters
+    ----------
+    x0 : np.array.
+        The vector representing the current location.
+        Note ``np.shape(x0) == (n,)``.
+    alpha : np.array.
+        The vector representing the direction.
+        Note ``np.shape(alpha) == (n,)``.
+    lower_bound : np.array.
+        The lower bounds for each parameter in ``x0``. If the ``i``th
+        parameter in ``x0`` is unbounded below, then ``lower_bound[i]``
+        should be ``-np.inf``.
+        Note ``np.shape(lower_bound) == (n,)``.
+    upper_bound : np.array.
+        The upper bounds for each parameter in ``x0``. If the ``i``th
+        parameter in ``x0`` is unbounded above, then ``upper_bound[i]``
+        should be ``np.inf``.
+        Note ``np.shape(upper_bound) == (n,)``.
+
+    Returns
+    -------
+    res : tuple ``(lmin, lmax)``
+        The bounds for ``l`` such that
+            ``lower_bound[i] <= x0[i] + alpha[i] * l <= upper_bound[i]``
+        for all ``i``.
+
+    """
+    # get nonzero indices of alpha so we don't get any zero division errors.
+    # alpha will not be all zero, since it is called from _linesearch_powell
+    # where we have a check for this.
+    nonzero, = alpha.nonzero()
+    lower_bound, upper_bound = lower_bound[nonzero], upper_bound[nonzero]
+    x0, alpha = x0[nonzero], alpha[nonzero]
+    low = (lower_bound - x0) / alpha
+    high = (upper_bound - x0) / alpha
+
+    # positive and negative indices
+    pos = alpha > 0
+
+    lmin_pos = np.where(pos, low, 0)
+    lmin_neg = np.where(pos, 0, high)
+    lmax_pos = np.where(pos, high, 0)
+    lmax_neg = np.where(pos, 0, low)
+
+    lmin = np.max(lmin_pos + lmin_neg)
+    lmax = np.min(lmax_pos + lmax_neg)
+
+    # if x0 is outside the bounds, then it is possible that there is
+    # no way to get back in the bounds for the parameters being updated
+    # with the current direction alpha.
+    # when this happens, lmax < lmin.
+    # If this is the case, then we can just return (0, 0)
+    return (lmin, lmax) if lmax >= lmin else (0, 0)
+
+
+def _linesearch_powell(func, p, xi, tol=1e-3,
+                       lower_bound=None, upper_bound=None, fval=None):
+    """Line-search algorithm using fminbound.
+
+    Find the minimium of the function ``func(x0 + alpha*direc)``.
+
+    lower_bound : np.array.
+        The lower bounds for each parameter in ``x0``. If the ``i``th
+        parameter in ``x0`` is unbounded below, then ``lower_bound[i]``
+        should be ``-np.inf``.
+        Note ``np.shape(lower_bound) == (n,)``.
+    upper_bound : np.array.
+        The upper bounds for each parameter in ``x0``. If the ``i``th
+        parameter in ``x0`` is unbounded above, then ``upper_bound[i]``
+        should be ``np.inf``.
+        Note ``np.shape(upper_bound) == (n,)``.
+    fval : number.
+        ``fval`` is equal to ``func(p)``, the idea is just to avoid
+        recomputing it so we can limit the ``fevals``.
+
+    """
+    def myfunc(alpha):
+        return func(p + alpha*xi)
+
+    # if xi is zero, then don't optimize
+    if not np.any(xi):
+        return ((fval, p, xi) if fval is not None else (func(p), p, xi))
+    elif lower_bound is None and upper_bound is None:
+        # non-bounded minimization
+        alpha_min, fret, _, _ = brent(myfunc, full_output=1, tol=tol)
+        xi = alpha_min * xi
+        return squeeze(fret), p + xi, xi
+    else:
+        bound = _line_for_search(p, xi, lower_bound, upper_bound)
+        if np.isneginf(bound[0]) and np.isposinf(bound[1]):
+            # equivalent to unbounded
+            return _linesearch_powell(func, p, xi, fval=fval, tol=tol)
+        elif not np.isneginf(bound[0]) and not np.isposinf(bound[1]):
+            # we can use a bounded scalar minimization
+            res = _minimize_scalar_bounded(myfunc, bound, xatol=tol / 100)
+            xi = res.x * xi
+            return squeeze(res.fun), p + xi, xi
+        else:
+            # only bounded on one side. use the tangent function to convert
+            # the infinity bound to a finite bound. The new bounded region
+            # is a subregion of the region bounded by -np.pi/2 and np.pi/2.
+            bound = np.arctan(bound[0]), np.arctan(bound[1])
+            res = _minimize_scalar_bounded(
+                lambda x: myfunc(np.tan(x)),
+                bound,
+                xatol=tol / 100)
+            xi = np.tan(res.x) * xi
+            return squeeze(res.fun), p + xi, xi
+
+
+def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
+                maxfun=None, full_output=0, disp=1, retall=0, callback=None,
+                direc=None):
+    """
+    Minimize a function using modified Powell's method.
+
+    This method only uses function values, not derivatives.
+
+    Parameters
+    ----------
+    func : callable f(x,*args)
+        Objective function to be minimized.
+    x0 : ndarray
+        Initial guess.
+    args : tuple, optional
+        Extra arguments passed to func.
+    xtol : float, optional
+        Line-search error tolerance.
+    ftol : float, optional
+        Relative error in ``func(xopt)`` acceptable for convergence.
+    maxiter : int, optional
+        Maximum number of iterations to perform.
+    maxfun : int, optional
+        Maximum number of function evaluations to make.
+    full_output : bool, optional
+        If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and
+        ``warnflag`` are returned.
+    disp : bool, optional
+        If True, print convergence messages.
+    retall : bool, optional
+        If True, return a list of the solution at each iteration.
+    callback : callable, optional
+        An optional user-supplied function, called after each
+        iteration.  Called as ``callback(xk)``, where ``xk`` is the
+        current parameter vector.
+    direc : ndarray, optional
+        Initial fitting step and parameter order set as an (N, N) array, where N
+        is the number of fitting parameters in `x0`. Defaults to step size 1.0
+        fitting all parameters simultaneously (``np.eye((N, N))``). To
+        prevent initial consideration of values in a step or to change initial
+        step size, set to 0 or desired step size in the Jth position in the Mth
+        block, where J is the position in `x0` and M is the desired evaluation
+        step, with steps being evaluated in index order. Step size and ordering
+        will change freely as minimization proceeds.
+
+    Returns
+    -------
+    xopt : ndarray
+        Parameter which minimizes `func`.
+    fopt : number
+        Value of function at minimum: ``fopt = func(xopt)``.
+    direc : ndarray
+        Current direction set.
+    iter : int
+        Number of iterations.
+    funcalls : int
+        Number of function calls made.
+    warnflag : int
+        Integer warning flag:
+            1 : Maximum number of function evaluations.
+            2 : Maximum number of iterations.
+            3 : NaN result encountered.
+            4 : The result is out of the provided bounds.
+    allvecs : list
+        List of solutions at each iteration.
+
+    See also
+    --------
+    minimize: Interface to unconstrained minimization algorithms for
+        multivariate functions. See the 'Powell' method in particular.
+
+    Notes
+    -----
+    Uses a modification of Powell's method to find the minimum of
+    a function of N variables. Powell's method is a conjugate
+    direction method.
+
+    The algorithm has two loops. The outer loop merely iterates over the inner
+    loop. The inner loop minimizes over each current direction in the direction
+    set. At the end of the inner loop, if certain conditions are met, the
+    direction that gave the largest decrease is dropped and replaced with the
+    difference between the current estimated x and the estimated x from the
+    beginning of the inner-loop.
+
+    The technical conditions for replacing the direction of greatest
+    increase amount to checking that
+
+    1. No further gain can be made along the direction of greatest increase
+       from that iteration.
+    2. The direction of greatest increase accounted for a large sufficient
+       fraction of the decrease in the function value from that iteration of
+       the inner loop.
+
+    References
+    ----------
+    Powell M.J.D. (1964) An efficient method for finding the minimum of a
+    function of several variables without calculating derivatives,
+    Computer Journal, 7 (2):155-162.
+
+    Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
+    Numerical Recipes (any edition), Cambridge University Press
+
+    Examples
+    --------
+    >>> def f(x):
+    ...     return x**2
+
+    >>> from scipy import optimize
+
+    >>> minimum = optimize.fmin_powell(f, -1)
+    Optimization terminated successfully.
+             Current function value: 0.000000
+             Iterations: 2
+             Function evaluations: 16
+    >>> minimum
+    array(0.0)
+
+    """
+    opts = {'xtol': xtol,
+            'ftol': ftol,
+            'maxiter': maxiter,
+            'maxfev': maxfun,
+            'disp': disp,
+            'direc': direc,
+            'return_all': retall}
+
+    res = _minimize_powell(func, x0, args, callback=callback, **opts)
+
+    if full_output:
+        retlist = (res['x'], res['fun'], res['direc'], res['nit'],
+                   res['nfev'], res['status'])
+        if retall:
+            retlist += (res['allvecs'], )
+        return retlist
+    else:
+        if retall:
+            return res['x'], res['allvecs']
+        else:
+            return res['x']
+
+
+def _minimize_powell(func, x0, args=(), callback=None, bounds=None,
+                     xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
+                     disp=False, direc=None, return_all=False,
+                     **unknown_options):
+    """
+    Minimization of scalar function of one or more variables using the
+    modified Powell algorithm.
+
+    Parameters
+    ----------
+    fun : callable
+        The objective function to be minimized.
+
+            ``fun(x, *args) -> float``
+
+        where ``x`` is a 1-D array with shape (n,) and ``args``
+        is a tuple of the fixed parameters needed to completely
+        specify the function.
+    x0 : ndarray, shape (n,)
+        Initial guess. Array of real elements of size (n,),
+        where ``n`` is the number of independent variables.
+    args : tuple, optional
+        Extra arguments passed to the objective function and its
+        derivatives (`fun`, `jac` and `hess` functions).
+    method : str or callable, optional
+        The present documentation is specific to ``method='powell'``, but other
+        options are available. See documentation for `scipy.optimize.minimize`.
+    bounds : sequence or `Bounds`, optional
+        Bounds on decision variables. There are two ways to specify the bounds:
+
+            1. Instance of `Bounds` class.
+            2. Sequence of ``(min, max)`` pairs for each element in `x`. None
+               is used to specify no bound.
+
+        If bounds are not provided, then an unbounded line search will be used.
+        If bounds are provided and the initial guess is within the bounds, then
+        every function evaluation throughout the minimization procedure will be
+        within the bounds. If bounds are provided, the initial guess is outside
+        the bounds, and `direc` is full rank (or left to default), then some
+        function evaluations during the first iteration may be outside the
+        bounds, but every function evaluation after the first iteration will be
+        within the bounds. If `direc` is not full rank, then some parameters
+        may not be optimized and the solution is not guaranteed to be within
+        the bounds.
+
+    options : dict, optional
+        A dictionary of solver options. All methods accept the following
+        generic options:
+
+            maxiter : int
+                Maximum number of iterations to perform. Depending on the
+                method each iteration may use several function evaluations.
+            disp : bool
+                Set to True to print convergence messages.
+
+        See method-specific options for ``method='powell'`` below.
+    callback : callable, optional
+        Called after each iteration. The signature is:
+
+            ``callback(xk)``
+
+        where ``xk`` is the current parameter vector.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a ``OptimizeResult`` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the optimizer exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes.
+
+    Options
+    -------
+    disp : bool
+        Set to True to print convergence messages.
+    xtol : float
+        Relative error in solution `xopt` acceptable for convergence.
+    ftol : float
+        Relative error in ``fun(xopt)`` acceptable for convergence.
+    maxiter, maxfev : int
+        Maximum allowed number of iterations and function evaluations.
+        Will default to ``N*1000``, where ``N`` is the number of
+        variables, if neither `maxiter` or `maxfev` is set. If both
+        `maxiter` and `maxfev` are set, minimization will stop at the
+        first reached.
+    direc : ndarray
+        Initial set of direction vectors for the Powell method.
+    return_all : bool, optional
+        Set to True to return a list of the best solution at each of the
+        iterations.
+    """
+    _check_unknown_options(unknown_options)
+    maxfun = maxfev
+    retall = return_all
+
+    x = asarray(x0).flatten()
+    if retall:
+        allvecs = [x]
+    N = len(x)
+    # If neither are set, then set both to default
+    if maxiter is None and maxfun is None:
+        maxiter = N * 1000
+        maxfun = N * 1000
+    elif maxiter is None:
+        # Convert remaining Nones, to np.inf, unless the other is np.inf, in
+        # which case use the default to avoid unbounded iteration
+        if maxfun == np.inf:
+            maxiter = N * 1000
+        else:
+            maxiter = np.inf
+    elif maxfun is None:
+        if maxiter == np.inf:
+            maxfun = N * 1000
+        else:
+            maxfun = np.inf
+
+    # we need to use a mutable object here that we can update in the
+    # wrapper function
+    fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun)
+
+    if direc is None:
+        direc = eye(N, dtype=float)
+    else:
+        direc = asarray(direc, dtype=float)
+        if np.linalg.matrix_rank(direc) != direc.shape[0]:
+            warnings.warn("direc input is not full rank, some parameters may "
+                          "not be optimized",
+                          OptimizeWarning, 3)
+
+    if bounds is None:
+        # don't make these arrays of all +/- inf. because
+        # _linesearch_powell will do an unnecessary check of all the elements.
+        # just keep them None, _linesearch_powell will not have to check
+        # all the elements.
+        lower_bound, upper_bound = None, None
+    else:
+        # bounds is standardized in _minimize.py.
+        lower_bound, upper_bound = bounds.lb, bounds.ub
+        if np.any(lower_bound > x0) or np.any(x0 > upper_bound):
+            warnings.warn("Initial guess is not within the specified bounds",
+                          OptimizeWarning, 3)
+
+    fval = squeeze(func(x))
+    x1 = x.copy()
+    iter = 0
+    while True:
+        try:
+            fx = fval
+            bigind = 0
+            delta = 0.0
+            for i in range(N):
+                direc1 = direc[i]
+                fx2 = fval
+                fval, x, direc1 = _linesearch_powell(func, x, direc1,
+                                                     tol=xtol * 100,
+                                                     lower_bound=lower_bound,
+                                                     upper_bound=upper_bound,
+                                                     fval=fval)
+                if (fx2 - fval) > delta:
+                    delta = fx2 - fval
+                    bigind = i
+            iter += 1
+            if callback is not None:
+                callback(x)
+            if retall:
+                allvecs.append(x)
+            bnd = ftol * (np.abs(fx) + np.abs(fval)) + 1e-20
+            if 2.0 * (fx - fval) <= bnd:
+                break
+            if fcalls[0] >= maxfun:
+                break
+            if iter >= maxiter:
+                break
+            if np.isnan(fx) and np.isnan(fval):
+                # Ended up in a nan-region: bail out
+                break
+
+            # Construct the extrapolated point
+            direc1 = x - x1
+            x1 = x.copy()
+            # make sure that we don't go outside the bounds when extrapolating
+            if lower_bound is None and upper_bound is None:
+                lmax = 1
+            else:
+                _, lmax = _line_for_search(x, direc1, lower_bound, upper_bound)
+            x2 = x + min(lmax, 1) * direc1
+            fx2 = squeeze(func(x2))
+
+            if (fx > fx2):
+                t = 2.0*(fx + fx2 - 2.0*fval)
+                temp = (fx - fval - delta)
+                t *= temp*temp
+                temp = fx - fx2
+                t -= delta*temp*temp
+                if t < 0.0:
+                    fval, x, direc1 = _linesearch_powell(
+                        func, x, direc1,
+                        tol=xtol * 100,
+                        lower_bound=lower_bound,
+                        upper_bound=upper_bound,
+                        fval=fval
+                    )
+                    if np.any(direc1):
+                        direc[bigind] = direc[-1]
+                        direc[-1] = direc1
+        except _MaxFuncCallError:
+            break
+
+    warnflag = 0
+    # out of bounds is more urgent than exceeding function evals or iters,
+    # but I don't want to cause inconsistencies by changing the
+    # established warning flags for maxfev and maxiter, so the out of bounds
+    # warning flag becomes 3, but is checked for first.
+    if bounds and (np.any(lower_bound > x) or np.any(x > upper_bound)):
+        warnflag = 4
+        msg = _status_message['out_of_bounds']
+    elif fcalls[0] >= maxfun:
+        warnflag = 1
+        msg = _status_message['maxfev']
+        if disp:
+            warnings.warn(msg, RuntimeWarning, 3)
+    elif iter >= maxiter:
+        warnflag = 2
+        msg = _status_message['maxiter']
+        if disp:
+            warnings.warn(msg, RuntimeWarning, 3)
+    elif np.isnan(fval) or np.isnan(x).any():
+        warnflag = 3
+        msg = _status_message['nan']
+        if disp:
+            warnings.warn(msg, RuntimeWarning, 3)
+    else:
+        msg = _status_message['success']
+        if disp:
+            print(msg)
+            print("         Current function value: %f" % fval)
+            print("         Iterations: %d" % iter)
+            print("         Function evaluations: %d" % fcalls[0])
+
+    result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
+                            status=warnflag, success=(warnflag == 0),
+                            message=msg, x=x)
+    if retall:
+        result['allvecs'] = allvecs
+    return result
+
+
+def _endprint(x, flag, fval, maxfun, xtol, disp):
+    if flag == 0:
+        if disp > 1:
+            print("\nOptimization terminated successfully;\n"
+                  "The returned value satisfies the termination criteria\n"
+                  "(using xtol = ", xtol, ")")
+    if flag == 1:
+        if disp:
+            print("\nMaximum number of function evaluations exceeded --- "
+                  "increase maxfun argument.\n")
+    if flag == 2:
+        if disp:
+            print("\n{}".format(_status_message['nan']))
+    return
+
+
+def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
+          disp=False, workers=1):
+    """Minimize a function over a given range by brute force.
+
+    Uses the "brute force" method, i.e., computes the function's value
+    at each point of a multidimensional grid of points, to find the global
+    minimum of the function.
+
+    The function is evaluated everywhere in the range with the datatype of the
+    first call to the function, as enforced by the ``vectorize`` NumPy
+    function. The value and type of the function evaluation returned when
+    ``full_output=True`` are affected in addition by the ``finish`` argument
+    (see Notes).
+
+    The brute force approach is inefficient because the number of grid points
+    increases exponentially - the number of grid points to evaluate is
+    ``Ns ** len(x)``. Consequently, even with coarse grid spacing, even
+    moderately sized problems can take a long time to run, and/or run into
+    memory limitations.
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized. Must be in the
+        form ``f(x, *args)``, where ``x`` is the argument in
+        the form of a 1-D array and ``args`` is a tuple of any
+        additional fixed parameters needed to completely specify
+        the function.
+    ranges : tuple
+        Each component of the `ranges` tuple must be either a
+        "slice object" or a range tuple of the form ``(low, high)``.
+        The program uses these to create the grid of points on which
+        the objective function will be computed. See `Note 2` for
+        more detail.
+    args : tuple, optional
+        Any additional fixed parameters needed to completely specify
+        the function.
+    Ns : int, optional
+        Number of grid points along the axes, if not otherwise
+        specified. See `Note2`.
+    full_output : bool, optional
+        If True, return the evaluation grid and the objective function's
+        values on it.
+    finish : callable, optional
+        An optimization function that is called with the result of brute force
+        minimization as initial guess. `finish` should take `func` and
+        the initial guess as positional arguments, and take `args` as
+        keyword arguments. It may additionally take `full_output`
+        and/or `disp` as keyword arguments. Use None if no "polishing"
+        function is to be used. See Notes for more details.
+    disp : bool, optional
+        Set to True to print convergence messages from the `finish` callable.
+    workers : int or map-like callable, optional
+        If `workers` is an int the grid is subdivided into `workers`
+        sections and evaluated in parallel (uses
+        `multiprocessing.Pool `).
+        Supply `-1` to use all cores available to the Process.
+        Alternatively supply a map-like callable, such as
+        `multiprocessing.Pool.map` for evaluating the grid in parallel.
+        This evaluation is carried out as ``workers(func, iterable)``.
+        Requires that `func` be pickleable.
+
+        .. versionadded:: 1.3.0
+
+    Returns
+    -------
+    x0 : ndarray
+        A 1-D array containing the coordinates of a point at which the
+        objective function had its minimum value. (See `Note 1` for
+        which point is returned.)
+    fval : float
+        Function value at the point `x0`. (Returned when `full_output` is
+        True.)
+    grid : tuple
+        Representation of the evaluation grid. It has the same
+        length as `x0`. (Returned when `full_output` is True.)
+    Jout : ndarray
+        Function values at each point of the evaluation
+        grid, i.e., ``Jout = func(*grid)``. (Returned
+        when `full_output` is True.)
+
+    See Also
+    --------
+    basinhopping, differential_evolution
+
+    Notes
+    -----
+    *Note 1*: The program finds the gridpoint at which the lowest value
+    of the objective function occurs. If `finish` is None, that is the
+    point returned. When the global minimum occurs within (or not very far
+    outside) the grid's boundaries, and the grid is fine enough, that
+    point will be in the neighborhood of the global minimum.
+
+    However, users often employ some other optimization program to
+    "polish" the gridpoint values, i.e., to seek a more precise
+    (local) minimum near `brute's` best gridpoint.
+    The `brute` function's `finish` option provides a convenient way to do
+    that. Any polishing program used must take `brute's` output as its
+    initial guess as a positional argument, and take `brute's` input values
+    for `args` as keyword arguments, otherwise an error will be raised.
+    It may additionally take `full_output` and/or `disp` as keyword arguments.
+
+    `brute` assumes that the `finish` function returns either an
+    `OptimizeResult` object or a tuple in the form:
+    ``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
+    value of the argument, ``Jmin`` is the minimum value of the objective
+    function, "..." may be some other returned values (which are not used
+    by `brute`), and ``statuscode`` is the status code of the `finish` program.
+
+    Note that when `finish` is not None, the values returned are those
+    of the `finish` program, *not* the gridpoint ones. Consequently,
+    while `brute` confines its search to the input grid points,
+    the `finish` program's results usually will not coincide with any
+    gridpoint, and may fall outside the grid's boundary. Thus, if a
+    minimum only needs to be found over the provided grid points, make
+    sure to pass in `finish=None`.
+
+    *Note 2*: The grid of points is a `numpy.mgrid` object.
+    For `brute` the `ranges` and `Ns` inputs have the following effect.
+    Each component of the `ranges` tuple can be either a slice object or a
+    two-tuple giving a range of values, such as (0, 5). If the component is a
+    slice object, `brute` uses it directly. If the component is a two-tuple
+    range, `brute` internally converts it to a slice object that interpolates
+    `Ns` points from its low-value to its high-value, inclusive.
+
+    Examples
+    --------
+    We illustrate the use of `brute` to seek the global minimum of a function
+    of two variables that is given as the sum of a positive-definite
+    quadratic and two deep "Gaussian-shaped" craters. Specifically, define
+    the objective function `f` as the sum of three other functions,
+    ``f = f1 + f2 + f3``. We suppose each of these has a signature
+    ``(z, *params)``, where ``z = (x, y)``,  and ``params`` and the functions
+    are as defined below.
+
+    >>> import numpy as np
+    >>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
+    >>> def f1(z, *params):
+    ...     x, y = z
+    ...     a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+    ...     return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
+
+    >>> def f2(z, *params):
+    ...     x, y = z
+    ...     a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+    ...     return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
+
+    >>> def f3(z, *params):
+    ...     x, y = z
+    ...     a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+    ...     return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
+
+    >>> def f(z, *params):
+    ...     return f1(z, *params) + f2(z, *params) + f3(z, *params)
+
+    Thus, the objective function may have local minima near the minimum
+    of each of the three functions of which it is composed. To
+    use `fmin` to polish its gridpoint result, we may then continue as
+    follows:
+
+    >>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
+    >>> from scipy import optimize
+    >>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
+    ...                           finish=optimize.fmin)
+    >>> resbrute[0]  # global minimum
+    array([-1.05665192,  1.80834843])
+    >>> resbrute[1]  # function value at global minimum
+    -3.4085818767
+
+    Note that if `finish` had been set to None, we would have gotten the
+    gridpoint [-1.0 1.75] where the rounded function value is -2.892.
+
+    """
+    N = len(ranges)
+    if N > 40:
+        raise ValueError("Brute Force not possible with more "
+                         "than 40 variables.")
+    lrange = list(ranges)
+    for k in range(N):
+        if type(lrange[k]) is not type(slice(None)):
+            if len(lrange[k]) < 3:
+                lrange[k] = tuple(lrange[k]) + (complex(Ns),)
+            lrange[k] = slice(*lrange[k])
+    if (N == 1):
+        lrange = lrange[0]
+
+    grid = np.mgrid[lrange]
+
+    # obtain an array of parameters that is iterable by a map-like callable
+    inpt_shape = grid.shape
+    if (N > 1):
+        grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T
+
+    if not np.iterable(args):
+        args = (args,)
+
+    wrapped_func = _Brute_Wrapper(func, args)
+
+    # iterate over input arrays, possibly in parallel
+    with MapWrapper(pool=workers) as mapper:
+        Jout = np.array(list(mapper(wrapped_func, grid)))
+        if (N == 1):
+            grid = (grid,)
+            Jout = np.squeeze(Jout)
+        elif (N > 1):
+            Jout = np.reshape(Jout, inpt_shape[1:])
+            grid = np.reshape(grid.T, inpt_shape)
+
+    Nshape = shape(Jout)
+
+    indx = argmin(Jout.ravel(), axis=-1)
+    Nindx = np.empty(N, int)
+    xmin = np.empty(N, float)
+    for k in range(N - 1, -1, -1):
+        thisN = Nshape[k]
+        Nindx[k] = indx % Nshape[k]
+        indx = indx // thisN
+    for k in range(N):
+        xmin[k] = grid[k][tuple(Nindx)]
+
+    Jmin = Jout[tuple(Nindx)]
+    if (N == 1):
+        grid = grid[0]
+        xmin = xmin[0]
+
+    if callable(finish):
+        # set up kwargs for `finish` function
+        finish_args = _getfullargspec(finish).args
+        finish_kwargs = dict()
+        if 'full_output' in finish_args:
+            finish_kwargs['full_output'] = 1
+        if 'disp' in finish_args:
+            finish_kwargs['disp'] = disp
+        elif 'options' in finish_args:
+            # pass 'disp' as `options`
+            # (e.g., if `finish` is `minimize`)
+            finish_kwargs['options'] = {'disp': disp}
+
+        # run minimizer
+        res = finish(func, xmin, args=args, **finish_kwargs)
+
+        if isinstance(res, OptimizeResult):
+            xmin = res.x
+            Jmin = res.fun
+            success = res.success
+        else:
+            xmin = res[0]
+            Jmin = res[1]
+            success = res[-1] == 0
+        if not success:
+            if disp:
+                warnings.warn(
+                    "Either final optimization did not succeed "
+                    "or `finish` does not return `statuscode` as its last "
+                    "argument.", RuntimeWarning, 2)
+
+    if full_output:
+        return xmin, Jmin, grid, Jout
+    else:
+        return xmin
+
+
+class _Brute_Wrapper:
+    """
+    Object to wrap user cost function for optimize.brute, allowing picklability
+    """
+
+    def __init__(self, f, args):
+        self.f = f
+        self.args = [] if args is None else args
+
+    def __call__(self, x):
+        # flatten needed for one dimensional case.
+        return self.f(np.asarray(x).flatten(), *self.args)
+
+
+def show_options(solver=None, method=None, disp=True):
+    """
+    Show documentation for additional options of optimization solvers.
+
+    These are method-specific options that can be supplied through the
+    ``options`` dict.
+
+    Parameters
+    ----------
+    solver : str
+        Type of optimization solver. One of 'minimize', 'minimize_scalar',
+        'root', 'root_scalar', 'linprog', or 'quadratic_assignment'.
+    method : str, optional
+        If not given, shows all methods of the specified solver. Otherwise,
+        show only the options for the specified method. Valid values
+        corresponds to methods' names of respective solver (e.g., 'BFGS' for
+        'minimize').
+    disp : bool, optional
+        Whether to print the result rather than returning it.
+
+    Returns
+    -------
+    text
+        Either None (for disp=True) or the text string (disp=False)
+
+    Notes
+    -----
+    The solver-specific methods are:
+
+    `scipy.optimize.minimize`
+
+    - :ref:`Nelder-Mead `
+    - :ref:`Powell      `
+    - :ref:`CG          `
+    - :ref:`BFGS        `
+    - :ref:`Newton-CG   `
+    - :ref:`L-BFGS-B    `
+    - :ref:`TNC         `
+    - :ref:`COBYLA      `
+    - :ref:`SLSQP       `
+    - :ref:`dogleg      `
+    - :ref:`trust-ncg   `
+
+    `scipy.optimize.root`
+
+    - :ref:`hybr              `
+    - :ref:`lm                `
+    - :ref:`broyden1          `
+    - :ref:`broyden2          `
+    - :ref:`anderson          `
+    - :ref:`linearmixing      `
+    - :ref:`diagbroyden       `
+    - :ref:`excitingmixing    `
+    - :ref:`krylov            `
+    - :ref:`df-sane           `
+
+    `scipy.optimize.minimize_scalar`
+
+    - :ref:`brent       `
+    - :ref:`golden      `
+    - :ref:`bounded     `
+
+    `scipy.optimize.root_scalar`
+
+    - :ref:`bisect  `
+    - :ref:`brentq  `
+    - :ref:`brenth  `
+    - :ref:`ridder  `
+    - :ref:`toms748 `
+    - :ref:`newton  `
+    - :ref:`secant  `
+    - :ref:`halley  `
+
+    `scipy.optimize.linprog`
+
+    - :ref:`simplex           `
+    - :ref:`interior-point    `
+    - :ref:`revised simplex   `
+    - :ref:`highs             `
+    - :ref:`highs-ds          `
+    - :ref:`highs-ipm         `
+
+    `scipy.optimize.quadratic_assignment`
+
+    - :ref:`faq             `
+    - :ref:`2opt            `
+
+    Examples
+    --------
+    We can print documentations of a solver in stdout:
+
+    >>> from scipy.optimize import show_options
+    >>> show_options(solver="minimize")
+    ...
+
+    Specifying a method is possible:
+
+    >>> show_options(solver="minimize", method="Nelder-Mead")
+    ...
+
+    We can also get the documentations as a string:
+
+    >>> show_options(solver="minimize", method="Nelder-Mead", disp=False)
+    Minimization of scalar function of one or more variables using the ...
+
+    """
+    import textwrap
+
+    doc_routines = {
+        'minimize': (
+            ('bfgs', 'scipy.optimize._optimize._minimize_bfgs'),
+            ('cg', 'scipy.optimize._optimize._minimize_cg'),
+            ('cobyla', 'scipy.optimize._cobyla_py._minimize_cobyla'),
+            ('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
+            ('l-bfgs-b', 'scipy.optimize._lbfgsb_py._minimize_lbfgsb'),
+            ('nelder-mead', 'scipy.optimize._optimize._minimize_neldermead'),
+            ('newton-cg', 'scipy.optimize._optimize._minimize_newtoncg'),
+            ('powell', 'scipy.optimize._optimize._minimize_powell'),
+            ('slsqp', 'scipy.optimize._slsqp_py._minimize_slsqp'),
+            ('tnc', 'scipy.optimize._tnc._minimize_tnc'),
+            ('trust-ncg',
+             'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
+            ('trust-constr',
+             'scipy.optimize._trustregion_constr.'
+             '_minimize_trustregion_constr'),
+            ('trust-exact',
+             'scipy.optimize._trustregion_exact._minimize_trustregion_exact'),
+            ('trust-krylov',
+             'scipy.optimize._trustregion_krylov._minimize_trust_krylov'),
+        ),
+        'root': (
+            ('hybr', 'scipy.optimize._minpack_py._root_hybr'),
+            ('lm', 'scipy.optimize._root._root_leastsq'),
+            ('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
+            ('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
+            ('anderson', 'scipy.optimize._root._root_anderson_doc'),
+            ('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
+            ('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
+            ('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
+            ('krylov', 'scipy.optimize._root._root_krylov_doc'),
+            ('df-sane', 'scipy.optimize._spectral._root_df_sane'),
+        ),
+        'root_scalar': (
+            ('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'),
+            ('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'),
+            ('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'),
+            ('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'),
+            ('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'),
+            ('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'),
+            ('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'),
+            ('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'),
+        ),
+        'linprog': (
+            ('simplex', 'scipy.optimize._linprog._linprog_simplex_doc'),
+            ('interior-point', 'scipy.optimize._linprog._linprog_ip_doc'),
+            ('revised simplex', 'scipy.optimize._linprog._linprog_rs_doc'),
+            ('highs-ipm', 'scipy.optimize._linprog._linprog_highs_ipm_doc'),
+            ('highs-ds', 'scipy.optimize._linprog._linprog_highs_ds_doc'),
+            ('highs', 'scipy.optimize._linprog._linprog_highs_doc'),
+        ),
+        'quadratic_assignment': (
+            ('faq', 'scipy.optimize._qap._quadratic_assignment_faq'),
+            ('2opt', 'scipy.optimize._qap._quadratic_assignment_2opt'),
+        ),
+        'minimize_scalar': (
+            ('brent', 'scipy.optimize._optimize._minimize_scalar_brent'),
+            ('bounded', 'scipy.optimize._optimize._minimize_scalar_bounded'),
+            ('golden', 'scipy.optimize._optimize._minimize_scalar_golden'),
+        ),
+    }
+
+    if solver is None:
+        text = ["\n\n\n========\n", "minimize\n", "========\n"]
+        text.append(show_options('minimize', disp=False))
+        text.extend(["\n\n===============\n", "minimize_scalar\n",
+                     "===============\n"])
+        text.append(show_options('minimize_scalar', disp=False))
+        text.extend(["\n\n\n====\n", "root\n",
+                     "====\n"])
+        text.append(show_options('root', disp=False))
+        text.extend(['\n\n\n=======\n', 'linprog\n',
+                     '=======\n'])
+        text.append(show_options('linprog', disp=False))
+        text = "".join(text)
+    else:
+        solver = solver.lower()
+        if solver not in doc_routines:
+            raise ValueError('Unknown solver %r' % (solver,))
+
+        if method is None:
+            text = []
+            for name, _ in doc_routines[solver]:
+                text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
+                text.append(show_options(solver, name, disp=False))
+            text = "".join(text)
+        else:
+            method = method.lower()
+            methods = dict(doc_routines[solver])
+            if method not in methods:
+                raise ValueError("Unknown method %r" % (method,))
+            name = methods[method]
+
+            # Import function object
+            parts = name.split('.')
+            mod_name = ".".join(parts[:-1])
+            __import__(mod_name)
+            obj = getattr(sys.modules[mod_name], parts[-1])
+
+            # Get doc
+            doc = obj.__doc__
+            if doc is not None:
+                text = textwrap.dedent(doc).strip()
+            else:
+                text = ""
+
+    if disp:
+        print(text)
+        return
+    else:
+        return text
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_qap.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_qap.py
new file mode 100644
index 00000000..26b0db77
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_qap.py
@@ -0,0 +1,724 @@
+import numpy as np
+import operator
+from . import (linear_sum_assignment, OptimizeResult)
+from ._optimize import _check_unknown_options
+
+from scipy._lib._util import check_random_state
+import itertools
+
+QUADRATIC_ASSIGNMENT_METHODS = ['faq', '2opt']
+
+def quadratic_assignment(A, B, method="faq", options=None):
+    r"""
+    Approximates solution to the quadratic assignment problem and
+    the graph matching problem.
+
+    Quadratic assignment solves problems of the following form:
+
+    .. math::
+
+        \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
+        \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
+
+    where :math:`\mathcal{P}` is the set of all permutation matrices,
+    and :math:`A` and :math:`B` are square matrices.
+
+    Graph matching tries to *maximize* the same objective function.
+    This algorithm can be thought of as finding the alignment of the
+    nodes of two graphs that minimizes the number of induced edge
+    disagreements, or, in the case of weighted graphs, the sum of squared
+    edge weight differences.
+
+    Note that the quadratic assignment problem is NP-hard. The results given
+    here are approximations and are not guaranteed to be optimal.
+
+
+    Parameters
+    ----------
+    A : 2-D array, square
+        The square matrix :math:`A` in the objective function above.
+
+    B : 2-D array, square
+        The square matrix :math:`B` in the objective function above.
+
+    method :  str in {'faq', '2opt'} (default: 'faq')
+        The algorithm used to solve the problem.
+        :ref:`'faq' ` (default) and
+        :ref:`'2opt' ` are available.
+
+    options : dict, optional
+        A dictionary of solver options. All solvers support the following:
+
+        maximize : bool (default: False)
+            Maximizes the objective function if ``True``.
+
+        partial_match : 2-D array of integers, optional (default: None)
+            Fixes part of the matching. Also known as a "seed" [2]_.
+
+            Each row of `partial_match` specifies a pair of matched nodes:
+            node ``partial_match[i, 0]`` of `A` is matched to node
+            ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
+            where ``m`` is not greater than the number of nodes, :math:`n`.
+
+        rng : {None, int, `numpy.random.Generator`,
+               `numpy.random.RandomState`}, optional
+
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance then
+            that instance is used.
+
+        For method-specific options, see
+        :func:`show_options('quadratic_assignment') `.
+
+    Returns
+    -------
+    res : OptimizeResult
+        `OptimizeResult` containing the following fields.
+
+        col_ind : 1-D array
+            Column indices corresponding to the best permutation found of the
+            nodes of `B`.
+        fun : float
+            The objective value of the solution.
+        nit : int
+            The number of iterations performed during optimization.
+
+    Notes
+    -----
+    The default method :ref:`'faq' ` uses the Fast
+    Approximate QAP algorithm [1]_; it typically offers the best combination of
+    speed and accuracy.
+    Method :ref:`'2opt' ` can be computationally expensive,
+    but may be a useful alternative, or it can be used to refine the solution
+    returned by another method.
+
+    References
+    ----------
+    .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
+           S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
+           C.E. Priebe, "Fast approximate quadratic programming for graph
+           matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
+           :doi:`10.1371/journal.pone.0121002`
+
+    .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
+           C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
+           203-215, :doi:`10.1016/j.patcog.2018.09.014`
+
+    .. [3] "2-opt," Wikipedia.
+           https://en.wikipedia.org/wiki/2-opt
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.optimize import quadratic_assignment
+    >>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100],
+    ...               [150, 130, 0, 120], [170, 100, 120, 0]])
+    >>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8],
+    ...               [0, 0, 0, 3], [0, 0, 0, 0]])
+    >>> res = quadratic_assignment(A, B)
+    >>> print(res)
+         fun: 3260
+     col_ind: [0 3 2 1]
+         nit: 9
+
+    The see the relationship between the returned ``col_ind`` and ``fun``,
+    use ``col_ind`` to form the best permutation matrix found, then evaluate
+    the objective function :math:`f(P) = trace(A^T P B P^T )`.
+
+    >>> perm = res['col_ind']
+    >>> P = np.eye(len(A), dtype=int)[perm]
+    >>> fun = np.trace(A.T @ P @ B @ P.T)
+    >>> print(fun)
+    3260
+
+    Alternatively, to avoid constructing the permutation matrix explicitly,
+    directly permute the rows and columns of the distance matrix.
+
+    >>> fun = np.trace(A.T @ B[perm][:, perm])
+    >>> print(fun)
+    3260
+
+    Although not guaranteed in general, ``quadratic_assignment`` happens to
+    have found the globally optimal solution.
+
+    >>> from itertools import permutations
+    >>> perm_opt, fun_opt = None, np.inf
+    >>> for perm in permutations([0, 1, 2, 3]):
+    ...     perm = np.array(perm)
+    ...     fun = np.trace(A.T @ B[perm][:, perm])
+    ...     if fun < fun_opt:
+    ...         fun_opt, perm_opt = fun, perm
+    >>> print(np.array_equal(perm_opt, res['col_ind']))
+    True
+
+    Here is an example for which the default method,
+    :ref:`'faq' `, does not find the global optimum.
+
+    >>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1],
+    ...               [8, 5, 0, 2], [6, 1, 2, 0]])
+    >>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2],
+    ...               [8, 5, 0, 5], [4, 2, 5, 0]])
+    >>> res = quadratic_assignment(A, B)
+    >>> print(res)
+         fun: 178
+     col_ind: [1 0 3 2]
+         nit: 13
+
+    If accuracy is important, consider using  :ref:`'2opt' `
+    to refine the solution.
+
+    >>> guess = np.array([np.arange(len(A)), res.col_ind]).T
+    >>> res = quadratic_assignment(A, B, method="2opt",
+    ...                            options = {'partial_guess': guess})
+    >>> print(res)
+         fun: 176
+     col_ind: [1 2 3 0]
+         nit: 17
+
+    """
+
+    if options is None:
+        options = {}
+
+    method = method.lower()
+    methods = {"faq": _quadratic_assignment_faq,
+               "2opt": _quadratic_assignment_2opt}
+    if method not in methods:
+        raise ValueError(f"method {method} must be in {methods}.")
+    res = methods[method](A, B, **options)
+    return res
+
+
+def _calc_score(A, B, perm):
+    # equivalent to objective function but avoids matmul
+    return np.sum(A * B[perm][:, perm])
+
+
+def _common_input_validation(A, B, partial_match):
+    A = np.atleast_2d(A)
+    B = np.atleast_2d(B)
+
+    if partial_match is None:
+        partial_match = np.array([[], []]).T
+    partial_match = np.atleast_2d(partial_match).astype(int)
+
+    msg = None
+    if A.shape[0] != A.shape[1]:
+        msg = "`A` must be square"
+    elif B.shape[0] != B.shape[1]:
+        msg = "`B` must be square"
+    elif A.ndim != 2 or B.ndim != 2:
+        msg = "`A` and `B` must have exactly two dimensions"
+    elif A.shape != B.shape:
+        msg = "`A` and `B` matrices must be of equal size"
+    elif partial_match.shape[0] > A.shape[0]:
+        msg = "`partial_match` can have only as many seeds as there are nodes"
+    elif partial_match.shape[1] != 2:
+        msg = "`partial_match` must have two columns"
+    elif partial_match.ndim != 2:
+        msg = "`partial_match` must have exactly two dimensions"
+    elif (partial_match < 0).any():
+        msg = "`partial_match` must contain only positive indices"
+    elif (partial_match >= len(A)).any():
+        msg = "`partial_match` entries must be less than number of nodes"
+    elif (not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or
+          not len(set(partial_match[:, 1])) == len(partial_match[:, 1])):
+        msg = "`partial_match` column entries must be unique"
+
+    if msg is not None:
+        raise ValueError(msg)
+
+    return A, B, partial_match
+
+
+def _quadratic_assignment_faq(A, B,
+                              maximize=False, partial_match=None, rng=None,
+                              P0="barycenter", shuffle_input=False, maxiter=30,
+                              tol=0.03, **unknown_options):
+    r"""Solve the quadratic assignment problem (approximately).
+
+    This function solves the Quadratic Assignment Problem (QAP) and the
+    Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm
+    (FAQ) [1]_.
+
+    Quadratic assignment solves problems of the following form:
+
+    .. math::
+
+        \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
+        \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
+
+    where :math:`\mathcal{P}` is the set of all permutation matrices,
+    and :math:`A` and :math:`B` are square matrices.
+
+    Graph matching tries to *maximize* the same objective function.
+    This algorithm can be thought of as finding the alignment of the
+    nodes of two graphs that minimizes the number of induced edge
+    disagreements, or, in the case of weighted graphs, the sum of squared
+    edge weight differences.
+
+    Note that the quadratic assignment problem is NP-hard. The results given
+    here are approximations and are not guaranteed to be optimal.
+
+    Parameters
+    ----------
+    A : 2-D array, square
+        The square matrix :math:`A` in the objective function above.
+    B : 2-D array, square
+        The square matrix :math:`B` in the objective function above.
+    method :  str in {'faq', '2opt'} (default: 'faq')
+        The algorithm used to solve the problem. This is the method-specific
+        documentation for 'faq'.
+        :ref:`'2opt' ` is also available.
+
+    Options
+    -------
+    maximize : bool (default: False)
+        Maximizes the objective function if ``True``.
+    partial_match : 2-D array of integers, optional (default: None)
+        Fixes part of the matching. Also known as a "seed" [2]_.
+
+        Each row of `partial_match` specifies a pair of matched nodes:
+        node ``partial_match[i, 0]`` of `A` is matched to node
+        ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``, where
+        ``m`` is not greater than the number of nodes, :math:`n`.
+
+    rng : {None, int, `numpy.random.Generator`,
+           `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+    P0 : 2-D array, "barycenter", or "randomized" (default: "barycenter")
+        Initial position. Must be a doubly-stochastic matrix [3]_.
+
+        If the initial position is an array, it must be a doubly stochastic
+        matrix of size :math:`m' \times m'` where :math:`m' = n - m`.
+
+        If ``"barycenter"`` (default), the initial position is the barycenter
+        of the Birkhoff polytope (the space of doubly stochastic matrices).
+        This is a :math:`m' \times m'` matrix with all entries equal to
+        :math:`1 / m'`.
+
+        If ``"randomized"`` the initial search position is
+        :math:`P_0 = (J + K) / 2`, where :math:`J` is the barycenter and
+        :math:`K` is a random doubly stochastic matrix.
+    shuffle_input : bool (default: False)
+        Set to `True` to resolve degenerate gradients randomly. For
+        non-degenerate gradients this option has no effect.
+    maxiter : int, positive (default: 30)
+        Integer specifying the max number of Frank-Wolfe iterations performed.
+    tol : float (default: 0.03)
+        Tolerance for termination. Frank-Wolfe iteration terminates when
+        :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{m')}} \leq tol`,
+        where :math:`i` is the iteration number.
+
+    Returns
+    -------
+    res : OptimizeResult
+        `OptimizeResult` containing the following fields.
+
+        col_ind : 1-D array
+            Column indices corresponding to the best permutation found of the
+            nodes of `B`.
+        fun : float
+            The objective value of the solution.
+        nit : int
+            The number of Frank-Wolfe iterations performed.
+
+    Notes
+    -----
+    The algorithm may be sensitive to the initial permutation matrix (or
+    search "position") due to the possibility of several local minima
+    within the feasible region. A barycenter initialization is more likely to
+    result in a better solution than a single random initialization. However,
+    calling ``quadratic_assignment`` several times with different random
+    initializations may result in a better optimum at the cost of longer
+    total execution time.
+
+    Examples
+    --------
+    As mentioned above, a barycenter initialization often results in a better
+    solution than a single random initialization.
+
+    >>> from numpy.random import default_rng
+    >>> rng = default_rng()
+    >>> n = 15
+    >>> A = rng.random((n, n))
+    >>> B = rng.random((n, n))
+    >>> res = quadratic_assignment(A, B)  # FAQ is default method
+    >>> print(res.fun)
+    46.871483385480545  # may vary
+
+    >>> options = {"P0": "randomized"}  # use randomized initialization
+    >>> res = quadratic_assignment(A, B, options=options)
+    >>> print(res.fun)
+    47.224831071310625 # may vary
+
+    However, consider running from several randomized initializations and
+    keeping the best result.
+
+    >>> res = min([quadratic_assignment(A, B, options=options)
+    ...            for i in range(30)], key=lambda x: x.fun)
+    >>> print(res.fun)
+    46.671852533681516 # may vary
+
+    The '2-opt' method can be used to further refine the results.
+
+    >>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T}
+    >>> res = quadratic_assignment(A, B, method="2opt", options=options)
+    >>> print(res.fun)
+    46.47160735721583 # may vary
+
+    References
+    ----------
+    .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
+           S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
+           C.E. Priebe, "Fast approximate quadratic programming for graph
+           matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
+           :doi:`10.1371/journal.pone.0121002`
+
+    .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
+           C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
+           203-215, :doi:`10.1016/j.patcog.2018.09.014`
+
+    .. [3] "Doubly stochastic Matrix," Wikipedia.
+           https://en.wikipedia.org/wiki/Doubly_stochastic_matrix
+
+    """
+
+    _check_unknown_options(unknown_options)
+
+    maxiter = operator.index(maxiter)
+
+    # ValueError check
+    A, B, partial_match = _common_input_validation(A, B, partial_match)
+
+    msg = None
+    if isinstance(P0, str) and P0 not in {'barycenter', 'randomized'}:
+        msg = "Invalid 'P0' parameter string"
+    elif maxiter <= 0:
+        msg = "'maxiter' must be a positive integer"
+    elif tol <= 0:
+        msg = "'tol' must be a positive float"
+    if msg is not None:
+        raise ValueError(msg)
+
+    rng = check_random_state(rng)
+    n = len(A)  # number of vertices in graphs
+    n_seeds = len(partial_match)  # number of seeds
+    n_unseed = n - n_seeds
+
+    # [1] Algorithm 1 Line 1 - choose initialization
+    if not isinstance(P0, str):
+        P0 = np.atleast_2d(P0)
+        if P0.shape != (n_unseed, n_unseed):
+            msg = "`P0` matrix must have shape m' x m', where m'=n-m"
+        elif ((P0 < 0).any() or not np.allclose(np.sum(P0, axis=0), 1)
+              or not np.allclose(np.sum(P0, axis=1), 1)):
+            msg = "`P0` matrix must be doubly stochastic"
+        if msg is not None:
+            raise ValueError(msg)
+    elif P0 == 'barycenter':
+        P0 = np.ones((n_unseed, n_unseed)) / n_unseed
+    elif P0 == 'randomized':
+        J = np.ones((n_unseed, n_unseed)) / n_unseed
+        # generate a nxn matrix where each entry is a random number [0, 1]
+        # would use rand, but Generators don't have it
+        # would use random, but old mtrand.RandomStates don't have it
+        K = _doubly_stochastic(rng.uniform(size=(n_unseed, n_unseed)))
+        P0 = (J + K) / 2
+
+    # check trivial cases
+    if n == 0 or n_seeds == n:
+        score = _calc_score(A, B, partial_match[:, 1])
+        res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
+        return OptimizeResult(res)
+
+    obj_func_scalar = 1
+    if maximize:
+        obj_func_scalar = -1
+
+    nonseed_B = np.setdiff1d(range(n), partial_match[:, 1])
+    if shuffle_input:
+        nonseed_B = rng.permutation(nonseed_B)
+
+    nonseed_A = np.setdiff1d(range(n), partial_match[:, 0])
+    perm_A = np.concatenate([partial_match[:, 0], nonseed_A])
+    perm_B = np.concatenate([partial_match[:, 1], nonseed_B])
+
+    # definitions according to Seeded Graph Matching [2].
+    A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds)
+    B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds)
+    const_sum = A21 @ B21.T + A12.T @ B12
+
+    P = P0
+    # [1] Algorithm 1 Line 2 - loop while stopping criteria not met
+    for n_iter in range(1, maxiter+1):
+        # [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t)
+        grad_fp = (const_sum + A22 @ P @ B22.T + A22.T @ P @ B22)
+        # [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8
+        _, cols = linear_sum_assignment(grad_fp, maximize=maximize)
+        Q = np.eye(n_unseed)[cols]
+
+        # [1] Algorithm 1 Line 5 - compute the step size
+        # Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect
+        # terms as ax**2 + bx + c. c does not affect location of minimum
+        # and can be ignored. Also, note that trace(A@B) = (A.T*B).sum();
+        # apply where possible for efficiency.
+        R = P - Q
+        b21 = ((R.T @ A21) * B21).sum()
+        b12 = ((R.T @ A12.T) * B12.T).sum()
+        AR22 = A22.T @ R
+        BR22 = B22 @ R.T
+        b22a = (AR22 * B22.T[cols]).sum()
+        b22b = (A22 * BR22[cols]).sum()
+        a = (AR22.T * BR22).sum()
+        b = b21 + b12 + b22a + b22b
+        # critical point of ax^2 + bx + c is at x = -d/(2*e)
+        # if a * obj_func_scalar > 0, it is a minimum
+        # if minimum is not in [0, 1], only endpoints need to be considered
+        if a*obj_func_scalar > 0 and 0 <= -b/(2*a) <= 1:
+            alpha = -b/(2*a)
+        else:
+            alpha = np.argmin([0, (b + a)*obj_func_scalar])
+
+        # [1] Algorithm 1 Line 6 - Update P
+        P_i1 = alpha * P + (1 - alpha) * Q
+        if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol:
+            P = P_i1
+            break
+        P = P_i1
+    # [1] Algorithm 1 Line 7 - end main loop
+
+    # [1] Algorithm 1 Line 8 - project onto the set of permutation matrices
+    _, col = linear_sum_assignment(P, maximize=True)
+    perm = np.concatenate((np.arange(n_seeds), col + n_seeds))
+
+    unshuffled_perm = np.zeros(n, dtype=int)
+    unshuffled_perm[perm_A] = perm_B[perm]
+
+    score = _calc_score(A, B, unshuffled_perm)
+    res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter}
+    return OptimizeResult(res)
+
+
+def _split_matrix(X, n):
+    # definitions according to Seeded Graph Matching [2].
+    upper, lower = X[:n], X[n:]
+    return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:]
+
+
+def _doubly_stochastic(P, tol=1e-3):
+    # Adapted from @btaba implementation
+    # https://github.com/btaba/sinkhorn_knopp
+    # of Sinkhorn-Knopp algorithm
+    # https://projecteuclid.org/euclid.pjm/1102992505
+
+    max_iter = 1000
+    c = 1 / P.sum(axis=0)
+    r = 1 / (P @ c)
+    P_eps = P
+
+    for it in range(max_iter):
+        if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and
+                (np.abs(P_eps.sum(axis=0) - 1) < tol).all()):
+            # All column/row sums ~= 1 within threshold
+            break
+
+        c = 1 / (r @ P)
+        r = 1 / (P @ c)
+        P_eps = r[:, None] * P * c
+
+    return P_eps
+
+
+def _quadratic_assignment_2opt(A, B, maximize=False, rng=None,
+                               partial_match=None,
+                               partial_guess=None,
+                               **unknown_options):
+    r"""Solve the quadratic assignment problem (approximately).
+
+    This function solves the Quadratic Assignment Problem (QAP) and the
+    Graph Matching Problem (GMP) using the 2-opt algorithm [1]_.
+
+    Quadratic assignment solves problems of the following form:
+
+    .. math::
+
+        \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
+        \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
+
+    where :math:`\mathcal{P}` is the set of all permutation matrices,
+    and :math:`A` and :math:`B` are square matrices.
+
+    Graph matching tries to *maximize* the same objective function.
+    This algorithm can be thought of as finding the alignment of the
+    nodes of two graphs that minimizes the number of induced edge
+    disagreements, or, in the case of weighted graphs, the sum of squared
+    edge weight differences.
+
+    Note that the quadratic assignment problem is NP-hard. The results given
+    here are approximations and are not guaranteed to be optimal.
+
+    Parameters
+    ----------
+    A : 2-D array, square
+        The square matrix :math:`A` in the objective function above.
+    B : 2-D array, square
+        The square matrix :math:`B` in the objective function above.
+    method :  str in {'faq', '2opt'} (default: 'faq')
+        The algorithm used to solve the problem. This is the method-specific
+        documentation for '2opt'.
+        :ref:`'faq' ` is also available.
+
+    Options
+    -------
+    maximize : bool (default: False)
+        Maximizes the objective function if ``True``.
+    rng : {None, int, `numpy.random.Generator`,
+           `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+    partial_match : 2-D array of integers, optional (default: None)
+        Fixes part of the matching. Also known as a "seed" [2]_.
+
+        Each row of `partial_match` specifies a pair of matched nodes: node
+        ``partial_match[i, 0]`` of `A` is matched to node
+        ``partial_match[i, 1]`` of `B`. The array has shape ``(m, 2)``,
+        where ``m`` is not greater than the number of nodes, :math:`n`.
+    partial_guess : 2-D array of integers, optional (default: None)
+        A guess for the matching between the two matrices. Unlike
+        `partial_match`, `partial_guess` does not fix the indices; they are
+        still free to be optimized.
+
+        Each row of `partial_guess` specifies a pair of matched nodes: node
+        ``partial_guess[i, 0]`` of `A` is matched to node
+        ``partial_guess[i, 1]`` of `B`. The array has shape ``(m, 2)``,
+        where ``m`` is not greater than the number of nodes, :math:`n`.
+
+    Returns
+    -------
+    res : OptimizeResult
+        `OptimizeResult` containing the following fields.
+
+        col_ind : 1-D array
+            Column indices corresponding to the best permutation found of the
+            nodes of `B`.
+        fun : float
+            The objective value of the solution.
+        nit : int
+            The number of iterations performed during optimization.
+
+    Notes
+    -----
+    This is a greedy algorithm that works similarly to bubble sort: beginning
+    with an initial permutation, it iteratively swaps pairs of indices to
+    improve the objective function until no such improvements are possible.
+
+    References
+    ----------
+    .. [1] "2-opt," Wikipedia.
+           https://en.wikipedia.org/wiki/2-opt
+
+    .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
+           C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
+           203-215, https://doi.org/10.1016/j.patcog.2018.09.014
+
+    """
+    _check_unknown_options(unknown_options)
+    rng = check_random_state(rng)
+    A, B, partial_match = _common_input_validation(A, B, partial_match)
+
+    N = len(A)
+    # check trivial cases
+    if N == 0 or partial_match.shape[0] == N:
+        score = _calc_score(A, B, partial_match[:, 1])
+        res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
+        return OptimizeResult(res)
+
+    if partial_guess is None:
+        partial_guess = np.array([[], []]).T
+    partial_guess = np.atleast_2d(partial_guess).astype(int)
+
+    msg = None
+    if partial_guess.shape[0] > A.shape[0]:
+        msg = ("`partial_guess` can have only as "
+               "many entries as there are nodes")
+    elif partial_guess.shape[1] != 2:
+        msg = "`partial_guess` must have two columns"
+    elif partial_guess.ndim != 2:
+        msg = "`partial_guess` must have exactly two dimensions"
+    elif (partial_guess < 0).any():
+        msg = "`partial_guess` must contain only positive indices"
+    elif (partial_guess >= len(A)).any():
+        msg = "`partial_guess` entries must be less than number of nodes"
+    elif (not len(set(partial_guess[:, 0])) == len(partial_guess[:, 0]) or
+          not len(set(partial_guess[:, 1])) == len(partial_guess[:, 1])):
+        msg = "`partial_guess` column entries must be unique"
+    if msg is not None:
+        raise ValueError(msg)
+
+    fixed_rows = None
+    if partial_match.size or partial_guess.size:
+        # use partial_match and partial_guess for initial permutation,
+        # but randomly permute the rest.
+        guess_rows = np.zeros(N, dtype=bool)
+        guess_cols = np.zeros(N, dtype=bool)
+        fixed_rows = np.zeros(N, dtype=bool)
+        fixed_cols = np.zeros(N, dtype=bool)
+        perm = np.zeros(N, dtype=int)
+
+        rg, cg = partial_guess.T
+        guess_rows[rg] = True
+        guess_cols[cg] = True
+        perm[guess_rows] = cg
+
+        # match overrides guess
+        rf, cf = partial_match.T
+        fixed_rows[rf] = True
+        fixed_cols[cf] = True
+        perm[fixed_rows] = cf
+
+        random_rows = ~fixed_rows & ~guess_rows
+        random_cols = ~fixed_cols & ~guess_cols
+        perm[random_rows] = rng.permutation(np.arange(N)[random_cols])
+    else:
+        perm = rng.permutation(np.arange(N))
+
+    best_score = _calc_score(A, B, perm)
+
+    i_free = np.arange(N)
+    if fixed_rows is not None:
+        i_free = i_free[~fixed_rows]
+
+    better = operator.gt if maximize else operator.lt
+    n_iter = 0
+    done = False
+    while not done:
+        # equivalent to nested for loops i in range(N), j in range(i, N)
+        for i, j in itertools.combinations_with_replacement(i_free, 2):
+            n_iter += 1
+            perm[i], perm[j] = perm[j], perm[i]
+            score = _calc_score(A, B, perm)
+            if better(score, best_score):
+                best_score = score
+                break
+            # faster to swap back than to create a new list every time
+            perm[i], perm[j] = perm[j], perm[i]
+        else:  # no swaps made
+            done = True
+
+    res = {"col_ind": perm, "fun": best_score, "nit": n_iter}
+    return OptimizeResult(res)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_remove_redundancy.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_remove_redundancy.py
new file mode 100644
index 00000000..54f695ef
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_remove_redundancy.py
@@ -0,0 +1,522 @@
+"""
+Routines for removing redundant (linearly dependent) equations from linear
+programming equality constraints.
+"""
+# Author: Matt Haberland
+
+import numpy as np
+from scipy.linalg import svd
+from scipy.linalg.interpolative import interp_decomp
+import scipy
+from scipy.linalg.blas import dtrsm
+
+
+def _row_count(A):
+    """
+    Counts the number of nonzeros in each row of input array A.
+    Nonzeros are defined as any element with absolute value greater than
+    tol = 1e-13. This value should probably be an input to the function.
+
+    Parameters
+    ----------
+    A : 2-D array
+        An array representing a matrix
+
+    Returns
+    -------
+    rowcount : 1-D array
+        Number of nonzeros in each row of A
+
+    """
+    tol = 1e-13
+    return np.array((abs(A) > tol).sum(axis=1)).flatten()
+
+
+def _get_densest(A, eligibleRows):
+    """
+    Returns the index of the densest row of A. Ignores rows that are not
+    eligible for consideration.
+
+    Parameters
+    ----------
+    A : 2-D array
+        An array representing a matrix
+    eligibleRows : 1-D logical array
+        Values indicate whether the corresponding row of A is eligible
+        to be considered
+
+    Returns
+    -------
+    i_densest : int
+        Index of the densest row in A eligible for consideration
+
+    """
+    rowCounts = _row_count(A)
+    return np.argmax(rowCounts * eligibleRows)
+
+
+def _remove_zero_rows(A, b):
+    """
+    Eliminates trivial equations from system of equations defined by Ax = b
+   and identifies trivial infeasibilities
+
+    Parameters
+    ----------
+    A : 2-D array
+        An array representing the left-hand side of a system of equations
+    b : 1-D array
+        An array representing the right-hand side of a system of equations
+
+    Returns
+    -------
+    A : 2-D array
+        An array representing the left-hand side of a system of equations
+    b : 1-D array
+        An array representing the right-hand side of a system of equations
+    status: int
+        An integer indicating the status of the removal operation
+        0: No infeasibility identified
+        2: Trivially infeasible
+    message : str
+        A string descriptor of the exit status of the optimization.
+
+    """
+    status = 0
+    message = ""
+    i_zero = _row_count(A) == 0
+    A = A[np.logical_not(i_zero), :]
+    if not np.allclose(b[i_zero], 0):
+        status = 2
+        message = "There is a zero row in A_eq with a nonzero corresponding " \
+                  "entry in b_eq. The problem is infeasible."
+    b = b[np.logical_not(i_zero)]
+    return A, b, status, message
+
+
+def bg_update_dense(plu, perm_r, v, j):
+    LU, p = plu
+
+    vperm = v[perm_r]
+    u = dtrsm(1, LU, vperm, lower=1, diag=1)
+    LU[:j+1, j] = u[:j+1]
+    l = u[j+1:]
+    piv = LU[j, j]
+    LU[j+1:, j] += (l/piv)
+    return LU, p
+
+
+def _remove_redundancy_pivot_dense(A, rhs, true_rank=None):
+    """
+    Eliminates redundant equations from system of equations defined by Ax = b
+    and identifies infeasibilities.
+
+    Parameters
+    ----------
+    A : 2-D sparse matrix
+        An matrix representing the left-hand side of a system of equations
+    rhs : 1-D array
+        An array representing the right-hand side of a system of equations
+
+    Returns
+    -------
+    A : 2-D sparse matrix
+        A matrix representing the left-hand side of a system of equations
+    rhs : 1-D array
+        An array representing the right-hand side of a system of equations
+    status: int
+        An integer indicating the status of the system
+        0: No infeasibility identified
+        2: Trivially infeasible
+    message : str
+        A string descriptor of the exit status of the optimization.
+
+    References
+    ----------
+    .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
+           large-scale linear programming." Optimization Methods and Software
+           6.3 (1995): 219-227.
+
+    """
+    tolapiv = 1e-8
+    tolprimal = 1e-8
+    status = 0
+    message = ""
+    inconsistent = ("There is a linear combination of rows of A_eq that "
+                    "results in zero, suggesting a redundant constraint. "
+                    "However the same linear combination of b_eq is "
+                    "nonzero, suggesting that the constraints conflict "
+                    "and the problem is infeasible.")
+    A, rhs, status, message = _remove_zero_rows(A, rhs)
+
+    if status != 0:
+        return A, rhs, status, message
+
+    m, n = A.shape
+
+    v = list(range(m))      # Artificial column indices.
+    b = list(v)             # Basis column indices.
+    # This is better as a list than a set because column order of basis matrix
+    # needs to be consistent.
+    d = []                  # Indices of dependent rows
+    perm_r = None
+
+    A_orig = A
+    A = np.zeros((m, m + n), order='F')
+    np.fill_diagonal(A, 1)
+    A[:, m:] = A_orig
+    e = np.zeros(m)
+
+    js_candidates = np.arange(m, m+n, dtype=int)  # candidate columns for basis
+    # manual masking was faster than masked array
+    js_mask = np.ones(js_candidates.shape, dtype=bool)
+
+    # Implements basic algorithm from [2]
+    # Uses some of the suggested improvements (removing zero rows and
+    # Bartels-Golub update idea).
+    # Removing column singletons would be easy, but it is not as important
+    # because the procedure is performed only on the equality constraint
+    # matrix from the original problem - not on the canonical form matrix,
+    # which would have many more column singletons due to slack variables
+    # from the inequality constraints.
+    # The thoughts on "crashing" the initial basis are only really useful if
+    # the matrix is sparse.
+
+    lu = np.eye(m, order='F'), np.arange(m)  # initial LU is trivial
+    perm_r = lu[1]
+    for i in v:
+
+        e[i] = 1
+        if i > 0:
+            e[i-1] = 0
+
+        try:  # fails for i==0 and any time it gets ill-conditioned
+            j = b[i-1]
+            lu = bg_update_dense(lu, perm_r, A[:, j], i-1)
+        except Exception:
+            lu = scipy.linalg.lu_factor(A[:, b])
+            LU, p = lu
+            perm_r = list(range(m))
+            for i1, i2 in enumerate(p):
+                perm_r[i1], perm_r[i2] = perm_r[i2], perm_r[i1]
+
+        pi = scipy.linalg.lu_solve(lu, e, trans=1)
+
+        js = js_candidates[js_mask]
+        batch = 50
+
+        # This is a tiny bit faster than looping over columns indivually,
+        # like for j in js: if abs(A[:,j].transpose().dot(pi)) > tolapiv:
+        for j_index in range(0, len(js), batch):
+            j_indices = js[j_index: min(j_index+batch, len(js))]
+
+            c = abs(A[:, j_indices].transpose().dot(pi))
+            if (c > tolapiv).any():
+                j = js[j_index + np.argmax(c)]  # very independent column
+                b[i] = j
+                js_mask[j-m] = False
+                break
+        else:
+            bibar = pi.T.dot(rhs.reshape(-1, 1))
+            bnorm = np.linalg.norm(rhs)
+            if abs(bibar)/(1+bnorm) > tolprimal:  # inconsistent
+                status = 2
+                message = inconsistent
+                return A_orig, rhs, status, message
+            else:  # dependent
+                d.append(i)
+                if true_rank is not None and len(d) == m - true_rank:
+                    break   # found all redundancies
+
+    keep = set(range(m))
+    keep = list(keep - set(d))
+    return A_orig[keep, :], rhs[keep], status, message
+
+
+def _remove_redundancy_pivot_sparse(A, rhs):
+    """
+    Eliminates redundant equations from system of equations defined by Ax = b
+    and identifies infeasibilities.
+
+    Parameters
+    ----------
+    A : 2-D sparse matrix
+        An matrix representing the left-hand side of a system of equations
+    rhs : 1-D array
+        An array representing the right-hand side of a system of equations
+
+    Returns
+    -------
+    A : 2-D sparse matrix
+        A matrix representing the left-hand side of a system of equations
+    rhs : 1-D array
+        An array representing the right-hand side of a system of equations
+    status: int
+        An integer indicating the status of the system
+        0: No infeasibility identified
+        2: Trivially infeasible
+    message : str
+        A string descriptor of the exit status of the optimization.
+
+    References
+    ----------
+    .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
+           large-scale linear programming." Optimization Methods and Software
+           6.3 (1995): 219-227.
+
+    """
+
+    tolapiv = 1e-8
+    tolprimal = 1e-8
+    status = 0
+    message = ""
+    inconsistent = ("There is a linear combination of rows of A_eq that "
+                    "results in zero, suggesting a redundant constraint. "
+                    "However the same linear combination of b_eq is "
+                    "nonzero, suggesting that the constraints conflict "
+                    "and the problem is infeasible.")
+    A, rhs, status, message = _remove_zero_rows(A, rhs)
+
+    if status != 0:
+        return A, rhs, status, message
+
+    m, n = A.shape
+
+    v = list(range(m))      # Artificial column indices.
+    b = list(v)             # Basis column indices.
+    # This is better as a list than a set because column order of basis matrix
+    # needs to be consistent.
+    k = set(range(m, m+n))  # Structural column indices.
+    d = []                  # Indices of dependent rows
+
+    A_orig = A
+    A = scipy.sparse.hstack((scipy.sparse.eye(m), A)).tocsc()
+    e = np.zeros(m)
+
+    # Implements basic algorithm from [2]
+    # Uses only one of the suggested improvements (removing zero rows).
+    # Removing column singletons would be easy, but it is not as important
+    # because the procedure is performed only on the equality constraint
+    # matrix from the original problem - not on the canonical form matrix,
+    # which would have many more column singletons due to slack variables
+    # from the inequality constraints.
+    # The thoughts on "crashing" the initial basis sound useful, but the
+    # description of the procedure seems to assume a lot of familiarity with
+    # the subject; it is not very explicit. I already went through enough
+    # trouble getting the basic algorithm working, so I was not interested in
+    # trying to decipher this, too. (Overall, the paper is fraught with
+    # mistakes and ambiguities - which is strange, because the rest of
+    # Andersen's papers are quite good.)
+    # I tried and tried and tried to improve performance using the
+    # Bartels-Golub update. It works, but it's only practical if the LU
+    # factorization can be specialized as described, and that is not possible
+    # until the SciPy SuperLU interface permits control over column
+    # permutation - see issue #7700.
+
+    for i in v:
+        B = A[:, b]
+
+        e[i] = 1
+        if i > 0:
+            e[i-1] = 0
+
+        pi = scipy.sparse.linalg.spsolve(B.transpose(), e).reshape(-1, 1)
+
+        js = list(k-set(b))  # not efficient, but this is not the time sink...
+
+        # Due to overhead, it tends to be faster (for problems tested) to
+        # compute the full matrix-vector product rather than individual
+        # vector-vector products (with the chance of terminating as soon
+        # as any are nonzero). For very large matrices, it might be worth
+        # it to compute, say, 100 or 1000 at a time and stop when a nonzero
+        # is found.
+
+        c = (np.abs(A[:, js].transpose().dot(pi)) > tolapiv).nonzero()[0]
+        if len(c) > 0:  # independent
+            j = js[c[0]]
+            # in a previous commit, the previous line was changed to choose
+            # index j corresponding with the maximum dot product.
+            # While this avoided issues with almost
+            # singular matrices, it slowed the routine in most NETLIB tests.
+            # I think this is because these columns were denser than the
+            # first column with nonzero dot product (c[0]).
+            # It would be nice to have a heuristic that balances sparsity with
+            # high dot product, but I don't think it's worth the time to
+            # develop one right now. Bartels-Golub update is a much higher
+            # priority.
+            b[i] = j  # replace artificial column
+        else:
+            bibar = pi.T.dot(rhs.reshape(-1, 1))
+            bnorm = np.linalg.norm(rhs)
+            if abs(bibar)/(1 + bnorm) > tolprimal:
+                status = 2
+                message = inconsistent
+                return A_orig, rhs, status, message
+            else:  # dependent
+                d.append(i)
+
+    keep = set(range(m))
+    keep = list(keep - set(d))
+    return A_orig[keep, :], rhs[keep], status, message
+
+
+def _remove_redundancy_svd(A, b):
+    """
+    Eliminates redundant equations from system of equations defined by Ax = b
+    and identifies infeasibilities.
+
+    Parameters
+    ----------
+    A : 2-D array
+        An array representing the left-hand side of a system of equations
+    b : 1-D array
+        An array representing the right-hand side of a system of equations
+
+    Returns
+    -------
+    A : 2-D array
+        An array representing the left-hand side of a system of equations
+    b : 1-D array
+        An array representing the right-hand side of a system of equations
+    status: int
+        An integer indicating the status of the system
+        0: No infeasibility identified
+        2: Trivially infeasible
+    message : str
+        A string descriptor of the exit status of the optimization.
+
+    References
+    ----------
+    .. [2] Andersen, Erling D. "Finding all linearly dependent rows in
+           large-scale linear programming." Optimization Methods and Software
+           6.3 (1995): 219-227.
+
+    """
+
+    A, b, status, message = _remove_zero_rows(A, b)
+
+    if status != 0:
+        return A, b, status, message
+
+    U, s, Vh = svd(A)
+    eps = np.finfo(float).eps
+    tol = s.max() * max(A.shape) * eps
+
+    m, n = A.shape
+    s_min = s[-1] if m <= n else 0
+
+    # this algorithm is faster than that of [2] when the nullspace is small
+    # but it could probably be improvement by randomized algorithms and with
+    # a sparse implementation.
+    # it relies on repeated singular value decomposition to find linearly
+    # dependent rows (as identified by columns of U that correspond with zero
+    # singular values). Unfortunately, only one row can be removed per
+    # decomposition (I tried otherwise; doing so can cause problems.)
+    # It would be nice if we could do truncated SVD like sp.sparse.linalg.svds
+    # but that function is unreliable at finding singular values near zero.
+    # Finding max eigenvalue L of A A^T, then largest eigenvalue (and
+    # associated eigenvector) of -A A^T + L I (I is identity) via power
+    # iteration would also work in theory, but is only efficient if the
+    # smallest nonzero eigenvalue of A A^T is close to the largest nonzero
+    # eigenvalue.
+
+    while abs(s_min) < tol:
+        v = U[:, -1]  # TODO: return these so user can eliminate from problem?
+        # rows need to be represented in significant amount
+        eligibleRows = np.abs(v) > tol * 10e6
+        if not np.any(eligibleRows) or np.any(np.abs(v.dot(A)) > tol):
+            status = 4
+            message = ("Due to numerical issues, redundant equality "
+                       "constraints could not be removed automatically. "
+                       "Try providing your constraint matrices as sparse "
+                       "matrices to activate sparse presolve, try turning "
+                       "off redundancy removal, or try turning off presolve "
+                       "altogether.")
+            break
+        if np.any(np.abs(v.dot(b)) > tol * 100):  # factor of 100 to fix 10038 and 10349
+            status = 2
+            message = ("There is a linear combination of rows of A_eq that "
+                       "results in zero, suggesting a redundant constraint. "
+                       "However the same linear combination of b_eq is "
+                       "nonzero, suggesting that the constraints conflict "
+                       "and the problem is infeasible.")
+            break
+
+        i_remove = _get_densest(A, eligibleRows)
+        A = np.delete(A, i_remove, axis=0)
+        b = np.delete(b, i_remove)
+        U, s, Vh = svd(A)
+        m, n = A.shape
+        s_min = s[-1] if m <= n else 0
+
+    return A, b, status, message
+
+
+def _remove_redundancy_id(A, rhs, rank=None, randomized=True):
+    """Eliminates redundant equations from a system of equations.
+
+    Eliminates redundant equations from system of equations defined by Ax = b
+    and identifies infeasibilities.
+
+    Parameters
+    ----------
+    A : 2-D array
+        An array representing the left-hand side of a system of equations
+    rhs : 1-D array
+        An array representing the right-hand side of a system of equations
+    rank : int, optional
+        The rank of A
+    randomized: bool, optional
+        True for randomized interpolative decomposition
+
+    Returns
+    -------
+    A : 2-D array
+        An array representing the left-hand side of a system of equations
+    rhs : 1-D array
+        An array representing the right-hand side of a system of equations
+    status: int
+        An integer indicating the status of the system
+        0: No infeasibility identified
+        2: Trivially infeasible
+    message : str
+        A string descriptor of the exit status of the optimization.
+
+    """
+
+    status = 0
+    message = ""
+    inconsistent = ("There is a linear combination of rows of A_eq that "
+                    "results in zero, suggesting a redundant constraint. "
+                    "However the same linear combination of b_eq is "
+                    "nonzero, suggesting that the constraints conflict "
+                    "and the problem is infeasible.")
+
+    A, rhs, status, message = _remove_zero_rows(A, rhs)
+
+    if status != 0:
+        return A, rhs, status, message
+
+    m, n = A.shape
+
+    k = rank
+    if rank is None:
+        k = np.linalg.matrix_rank(A)
+
+    idx, proj = interp_decomp(A.T, k, rand=randomized)
+
+    # first k entries in idx are indices of the independent rows
+    # remaining entries are the indices of the m-k dependent rows
+    # proj provides a linear combinations of rows of A2 that form the
+    # remaining m-k (dependent) rows. The same linear combination of entries
+    # in rhs2 must give the remaining m-k entries. If not, the system is
+    # inconsistent, and the problem is infeasible.
+    if not np.allclose(rhs[idx[:k]] @ proj, rhs[idx[k:]]):
+        status = 2
+        message = inconsistent
+
+    # sort indices because the other redundancy removal routines leave rows
+    # in original order and tests were written with that in mind
+    idx = sorted(idx[:k])
+    A2 = A[idx, :]
+    rhs2 = rhs[idx]
+    return A2, rhs2, status, message
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_root.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_root.py
new file mode 100644
index 00000000..8dc02f19
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_root.py
@@ -0,0 +1,717 @@
+"""
+Unified interfaces to root finding algorithms.
+
+Functions
+---------
+- root : find a root of a vector function.
+"""
+__all__ = ['root']
+
+import numpy as np
+
+ROOT_METHODS = ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
+                'linearmixing', 'diagbroyden', 'excitingmixing', 'krylov',
+                'df-sane']
+
+from warnings import warn
+
+from ._optimize import MemoizeJac, OptimizeResult, _check_unknown_options
+from ._minpack_py import _root_hybr, leastsq
+from ._spectral import _root_df_sane
+from . import _nonlin as nonlin
+
+
+def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
+         options=None):
+    r"""
+    Find a root of a vector function.
+
+    Parameters
+    ----------
+    fun : callable
+        A vector function to find a root of.
+    x0 : ndarray
+        Initial guess.
+    args : tuple, optional
+        Extra arguments passed to the objective function and its Jacobian.
+    method : str, optional
+        Type of solver. Should be one of
+
+            - 'hybr'             :ref:`(see here) `
+            - 'lm'               :ref:`(see here) `
+            - 'broyden1'         :ref:`(see here) `
+            - 'broyden2'         :ref:`(see here) `
+            - 'anderson'         :ref:`(see here) `
+            - 'linearmixing'     :ref:`(see here) `
+            - 'diagbroyden'      :ref:`(see here) `
+            - 'excitingmixing'   :ref:`(see here) `
+            - 'krylov'           :ref:`(see here) `
+            - 'df-sane'          :ref:`(see here) `
+
+    jac : bool or callable, optional
+        If `jac` is a Boolean and is True, `fun` is assumed to return the
+        value of Jacobian along with the objective function. If False, the
+        Jacobian will be estimated numerically.
+        `jac` can also be a callable returning the Jacobian of `fun`. In
+        this case, it must accept the same arguments as `fun`.
+    tol : float, optional
+        Tolerance for termination. For detailed control, use solver-specific
+        options.
+    callback : function, optional
+        Optional callback function. It is called on every iteration as
+        ``callback(x, f)`` where `x` is the current solution and `f`
+        the corresponding residual. For all methods but 'hybr' and 'lm'.
+    options : dict, optional
+        A dictionary of solver options. E.g., `xtol` or `maxiter`, see
+        :obj:`show_options()` for details.
+
+    Returns
+    -------
+    sol : OptimizeResult
+        The solution represented as a ``OptimizeResult`` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the algorithm exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes.
+
+    See also
+    --------
+    show_options : Additional options accepted by the solvers
+
+    Notes
+    -----
+    This section describes the available solvers that can be selected by the
+    'method' parameter. The default method is *hybr*.
+
+    Method *hybr* uses a modification of the Powell hybrid method as
+    implemented in MINPACK [1]_.
+
+    Method *lm* solves the system of nonlinear equations in a least squares
+    sense using a modification of the Levenberg-Marquardt algorithm as
+    implemented in MINPACK [1]_.
+
+    Method *df-sane* is a derivative-free spectral method. [3]_
+
+    Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
+    *diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
+    with backtracking or full line searches [2]_. Each method corresponds
+    to a particular Jacobian approximations.
+
+    - Method *broyden1* uses Broyden's first Jacobian approximation, it is
+      known as Broyden's good method.
+    - Method *broyden2* uses Broyden's second Jacobian approximation, it
+      is known as Broyden's bad method.
+    - Method *anderson* uses (extended) Anderson mixing.
+    - Method *Krylov* uses Krylov approximation for inverse Jacobian. It
+      is suitable for large-scale problem.
+    - Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
+    - Method *linearmixing* uses a scalar Jacobian approximation.
+    - Method *excitingmixing* uses a tuned diagonal Jacobian
+      approximation.
+
+    .. warning::
+
+        The algorithms implemented for methods *diagbroyden*,
+        *linearmixing* and *excitingmixing* may be useful for specific
+        problems, but whether they will work may depend strongly on the
+        problem.
+
+    .. versionadded:: 0.11.0
+
+    References
+    ----------
+    .. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
+       1980. User Guide for MINPACK-1.
+    .. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
+       Equations. Society for Industrial and Applied Mathematics.
+       
+    .. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
+
+    Examples
+    --------
+    The following functions define a system of nonlinear equations and its
+    jacobian.
+
+    >>> import numpy as np
+    >>> def fun(x):
+    ...     return [x[0]  + 0.5 * (x[0] - x[1])**3 - 1.0,
+    ...             0.5 * (x[1] - x[0])**3 + x[1]]
+
+    >>> def jac(x):
+    ...     return np.array([[1 + 1.5 * (x[0] - x[1])**2,
+    ...                       -1.5 * (x[0] - x[1])**2],
+    ...                      [-1.5 * (x[1] - x[0])**2,
+    ...                       1 + 1.5 * (x[1] - x[0])**2]])
+
+    A solution can be obtained as follows.
+
+    >>> from scipy import optimize
+    >>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
+    >>> sol.x
+    array([ 0.8411639,  0.1588361])
+
+    **Large problem**
+
+    Suppose that we needed to solve the following integrodifferential
+    equation on the square :math:`[0,1]\times[0,1]`:
+
+    .. math::
+
+       \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
+
+    with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
+    the square.
+
+    The solution can be found using the ``method='krylov'`` solver:
+
+    >>> from scipy import optimize
+    >>> # parameters
+    >>> nx, ny = 75, 75
+    >>> hx, hy = 1./(nx-1), 1./(ny-1)
+
+    >>> P_left, P_right = 0, 0
+    >>> P_top, P_bottom = 1, 0
+
+    >>> def residual(P):
+    ...    d2x = np.zeros_like(P)
+    ...    d2y = np.zeros_like(P)
+    ...
+    ...    d2x[1:-1] = (P[2:]   - 2*P[1:-1] + P[:-2]) / hx/hx
+    ...    d2x[0]    = (P[1]    - 2*P[0]    + P_left)/hx/hx
+    ...    d2x[-1]   = (P_right - 2*P[-1]   + P[-2])/hx/hx
+    ...
+    ...    d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
+    ...    d2y[:,0]    = (P[:,1]  - 2*P[:,0]    + P_bottom)/hy/hy
+    ...    d2y[:,-1]   = (P_top   - 2*P[:,-1]   + P[:,-2])/hy/hy
+    ...
+    ...    return d2x + d2y - 10*np.cosh(P).mean()**2
+
+    >>> guess = np.zeros((nx, ny), float)
+    >>> sol = optimize.root(residual, guess, method='krylov')
+    >>> print('Residual: %g' % abs(residual(sol.x)).max())
+    Residual: 5.7972e-06  # may vary
+
+    >>> import matplotlib.pyplot as plt
+    >>> x, y = np.mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
+    >>> plt.pcolormesh(x, y, sol.x, shading='gouraud')
+    >>> plt.colorbar()
+    >>> plt.show()
+
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+
+    meth = method.lower()
+    if options is None:
+        options = {}
+
+    if callback is not None and meth in ('hybr', 'lm'):
+        warn('Method %s does not accept callback.' % method,
+             RuntimeWarning)
+
+    # fun also returns the Jacobian
+    if not callable(jac) and meth in ('hybr', 'lm'):
+        if bool(jac):
+            fun = MemoizeJac(fun)
+            jac = fun.derivative
+        else:
+            jac = None
+
+    # set default tolerances
+    if tol is not None:
+        options = dict(options)
+        if meth in ('hybr', 'lm'):
+            options.setdefault('xtol', tol)
+        elif meth in ('df-sane',):
+            options.setdefault('ftol', tol)
+        elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
+                      'diagbroyden', 'excitingmixing', 'krylov'):
+            options.setdefault('xtol', tol)
+            options.setdefault('xatol', np.inf)
+            options.setdefault('ftol', np.inf)
+            options.setdefault('fatol', np.inf)
+
+    if meth == 'hybr':
+        sol = _root_hybr(fun, x0, args=args, jac=jac, **options)
+    elif meth == 'lm':
+        sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
+    elif meth == 'df-sane':
+        _warn_jac_unused(jac, method)
+        sol = _root_df_sane(fun, x0, args=args, callback=callback,
+                            **options)
+    elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
+                  'diagbroyden', 'excitingmixing', 'krylov'):
+        _warn_jac_unused(jac, method)
+        sol = _root_nonlin_solve(fun, x0, args=args, jac=jac,
+                                 _method=meth, _callback=callback,
+                                 **options)
+    else:
+        raise ValueError('Unknown solver %s' % method)
+
+    return sol
+
+
+def _warn_jac_unused(jac, method):
+    if jac is not None:
+        warn('Method %s does not use the jacobian (jac).' % (method,),
+             RuntimeWarning)
+
+
+def _root_leastsq(fun, x0, args=(), jac=None,
+                  col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
+                  gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
+                  **unknown_options):
+    """
+    Solve for least squares with Levenberg-Marquardt
+
+    Options
+    -------
+    col_deriv : bool
+        non-zero to specify that the Jacobian function computes derivatives
+        down the columns (faster, because there is no transpose operation).
+    ftol : float
+        Relative error desired in the sum of squares.
+    xtol : float
+        Relative error desired in the approximate solution.
+    gtol : float
+        Orthogonality desired between the function vector and the columns
+        of the Jacobian.
+    maxiter : int
+        The maximum number of calls to the function. If zero, then
+        100*(N+1) is the maximum where N is the number of elements in x0.
+    epsfcn : float
+        A suitable step length for the forward-difference approximation of
+        the Jacobian (for Dfun=None). If epsfcn is less than the machine
+        precision, it is assumed that the relative errors in the functions
+        are of the order of the machine precision.
+    factor : float
+        A parameter determining the initial step bound
+        (``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
+    diag : sequence
+        N positive entries that serve as a scale factors for the variables.
+    """
+
+    _check_unknown_options(unknown_options)
+    x, cov_x, info, msg, ier = leastsq(fun, x0, args=args, Dfun=jac,
+                                       full_output=True,
+                                       col_deriv=col_deriv, xtol=xtol,
+                                       ftol=ftol, gtol=gtol,
+                                       maxfev=maxiter, epsfcn=eps,
+                                       factor=factor, diag=diag)
+    sol = OptimizeResult(x=x, message=msg, status=ier,
+                         success=ier in (1, 2, 3, 4), cov_x=cov_x,
+                         fun=info.pop('fvec'))
+    sol.update(info)
+    return sol
+
+
+def _root_nonlin_solve(fun, x0, args=(), jac=None,
+                       _callback=None, _method=None,
+                       nit=None, disp=False, maxiter=None,
+                       ftol=None, fatol=None, xtol=None, xatol=None,
+                       tol_norm=None, line_search='armijo', jac_options=None,
+                       **unknown_options):
+    _check_unknown_options(unknown_options)
+
+    f_tol = fatol
+    f_rtol = ftol
+    x_tol = xatol
+    x_rtol = xtol
+    verbose = disp
+    if jac_options is None:
+        jac_options = dict()
+
+    jacobian = {'broyden1': nonlin.BroydenFirst,
+                'broyden2': nonlin.BroydenSecond,
+                'anderson': nonlin.Anderson,
+                'linearmixing': nonlin.LinearMixing,
+                'diagbroyden': nonlin.DiagBroyden,
+                'excitingmixing': nonlin.ExcitingMixing,
+                'krylov': nonlin.KrylovJacobian
+                }[_method]
+
+    if args:
+        if jac:
+            def f(x):
+                return fun(x, *args)[0]
+        else:
+            def f(x):
+                return fun(x, *args)
+    else:
+        f = fun
+
+    x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
+                                  iter=nit, verbose=verbose,
+                                  maxiter=maxiter, f_tol=f_tol,
+                                  f_rtol=f_rtol, x_tol=x_tol,
+                                  x_rtol=x_rtol, tol_norm=tol_norm,
+                                  line_search=line_search,
+                                  callback=_callback, full_output=True,
+                                  raise_exception=False)
+    sol = OptimizeResult(x=x)
+    sol.update(info)
+    return sol
+
+def _root_broyden1_doc():
+    """
+    Options
+    -------
+    nit : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    disp : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, `NoConvergence` is raised.
+    ftol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    fatol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    xtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    xatol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in
+        the direction given by the Jacobian approximation. Defaults to
+        'armijo'.
+    jac_options : dict, optional
+        Options for the respective Jacobian approximation.
+            alpha : float, optional
+                Initial guess for the Jacobian is (-1/alpha).
+            reduction_method : str or tuple, optional
+                Method used in ensuring that the rank of the Broyden
+                matrix stays low. Can either be a string giving the
+                name of the method, or a tuple of the form ``(method,
+                param1, param2, ...)`` that gives the name of the
+                method and values for additional parameters.
+
+                Methods available:
+
+                    - ``restart``
+                        Drop all matrix columns. Has no
+                        extra parameters.
+                    - ``simple``
+                        Drop oldest matrix column. Has no
+                        extra parameters.
+                    - ``svd``
+                        Keep only the most significant SVD
+                        components.
+
+                        Extra parameters:
+
+                            - ``to_retain``
+                                Number of SVD components to
+                                retain when rank reduction is done.
+                                Default is ``max_rank - 2``.
+            max_rank : int, optional
+                Maximum rank for the Broyden matrix.
+                Default is infinity (i.e., no rank reduction).
+
+    Examples
+    --------
+    >>> def func(x):
+    ...     return np.cos(x) + x[::-1] - [1, 2, 3, 4]
+    ...
+    >>> from scipy import optimize
+    >>> res = optimize.root(func, [1, 1, 1, 1], method='broyden1', tol=1e-14)
+    >>> x = res.x
+    >>> x
+    array([4.04674914, 3.91158389, 2.71791677, 1.61756251])
+    >>> np.cos(x) + x[::-1]
+    array([1., 2., 3., 4.])
+
+    """
+    pass
+
+def _root_broyden2_doc():
+    """
+    Options
+    -------
+    nit : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    disp : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, `NoConvergence` is raised.
+    ftol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    fatol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    xtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    xatol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in
+        the direction given by the Jacobian approximation. Defaults to
+        'armijo'.
+    jac_options : dict, optional
+        Options for the respective Jacobian approximation.
+
+        alpha : float, optional
+            Initial guess for the Jacobian is (-1/alpha).
+        reduction_method : str or tuple, optional
+            Method used in ensuring that the rank of the Broyden
+            matrix stays low. Can either be a string giving the
+            name of the method, or a tuple of the form ``(method,
+            param1, param2, ...)`` that gives the name of the
+            method and values for additional parameters.
+
+            Methods available:
+
+                - ``restart``
+                    Drop all matrix columns. Has no
+                    extra parameters.
+                - ``simple``
+                    Drop oldest matrix column. Has no
+                    extra parameters.
+                - ``svd``
+                    Keep only the most significant SVD
+                    components.
+
+                    Extra parameters:
+
+                        - ``to_retain``
+                            Number of SVD components to
+                            retain when rank reduction is done.
+                            Default is ``max_rank - 2``.
+        max_rank : int, optional
+            Maximum rank for the Broyden matrix.
+            Default is infinity (i.e., no rank reduction).
+    """
+    pass
+
+def _root_anderson_doc():
+    """
+    Options
+    -------
+    nit : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    disp : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, `NoConvergence` is raised.
+    ftol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    fatol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    xtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    xatol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in
+        the direction given by the Jacobian approximation. Defaults to
+        'armijo'.
+    jac_options : dict, optional
+        Options for the respective Jacobian approximation.
+
+        alpha : float, optional
+            Initial guess for the Jacobian is (-1/alpha).
+        M : float, optional
+            Number of previous vectors to retain. Defaults to 5.
+        w0 : float, optional
+            Regularization parameter for numerical stability.
+            Compared to unity, good values of the order of 0.01.
+    """
+    pass
+
+def _root_linearmixing_doc():
+    """
+    Options
+    -------
+    nit : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    disp : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, ``NoConvergence`` is raised.
+    ftol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    fatol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    xtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    xatol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in
+        the direction given by the Jacobian approximation. Defaults to
+        'armijo'.
+    jac_options : dict, optional
+        Options for the respective Jacobian approximation.
+
+        alpha : float, optional
+            initial guess for the jacobian is (-1/alpha).
+    """
+    pass
+
+def _root_diagbroyden_doc():
+    """
+    Options
+    -------
+    nit : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    disp : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, `NoConvergence` is raised.
+    ftol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    fatol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    xtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    xatol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in
+        the direction given by the Jacobian approximation. Defaults to
+        'armijo'.
+    jac_options : dict, optional
+        Options for the respective Jacobian approximation.
+
+        alpha : float, optional
+            initial guess for the jacobian is (-1/alpha).
+    """
+    pass
+
+def _root_excitingmixing_doc():
+    """
+    Options
+    -------
+    nit : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    disp : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, `NoConvergence` is raised.
+    ftol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    fatol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    xtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    xatol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in
+        the direction given by the Jacobian approximation. Defaults to
+        'armijo'.
+    jac_options : dict, optional
+        Options for the respective Jacobian approximation.
+
+        alpha : float, optional
+            Initial Jacobian approximation is (-1/alpha).
+        alphamax : float, optional
+            The entries of the diagonal Jacobian are kept in the range
+            ``[alpha, alphamax]``.
+    """
+    pass
+
+def _root_krylov_doc():
+    """
+    Options
+    -------
+    nit : int, optional
+        Number of iterations to make. If omitted (default), make as many
+        as required to meet tolerances.
+    disp : bool, optional
+        Print status to stdout on every iteration.
+    maxiter : int, optional
+        Maximum number of iterations to make. If more are needed to
+        meet convergence, `NoConvergence` is raised.
+    ftol : float, optional
+        Relative tolerance for the residual. If omitted, not used.
+    fatol : float, optional
+        Absolute tolerance (in max-norm) for the residual.
+        If omitted, default is 6e-6.
+    xtol : float, optional
+        Relative minimum step size. If omitted, not used.
+    xatol : float, optional
+        Absolute minimum step size, as determined from the Jacobian
+        approximation. If the step size is smaller than this, optimization
+        is terminated as successful. If omitted, not used.
+    tol_norm : function(vector) -> scalar, optional
+        Norm to use in convergence check. Default is the maximum norm.
+    line_search : {None, 'armijo' (default), 'wolfe'}, optional
+        Which type of a line search to use to determine the step size in
+        the direction given by the Jacobian approximation. Defaults to
+        'armijo'.
+    jac_options : dict, optional
+        Options for the respective Jacobian approximation.
+
+        rdiff : float, optional
+            Relative step size to use in numerical differentiation.
+        method : str or callable, optional
+            Krylov method to use to approximate the Jacobian.  Can be a string,
+            or a function implementing the same interface as the iterative
+            solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
+            ``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
+            ``'tfqmr'``.
+
+            The default is `scipy.sparse.linalg.lgmres`.
+        inner_M : LinearOperator or InverseJacobian
+            Preconditioner for the inner Krylov iteration.
+            Note that you can use also inverse Jacobians as (adaptive)
+            preconditioners. For example,
+
+            >>> jac = BroydenFirst()
+            >>> kjac = KrylovJacobian(inner_M=jac.inverse).
+
+            If the preconditioner has a method named 'update', it will
+            be called as ``update(x, f)`` after each nonlinear step,
+            with ``x`` giving the current point, and ``f`` the current
+            function value.
+        inner_tol, inner_maxiter, ...
+            Parameters to pass on to the "inner" Krylov solver.
+            See `scipy.sparse.linalg.gmres` for details.
+        outer_k : int, optional
+            Size of the subspace kept across LGMRES nonlinear
+            iterations.
+
+            See `scipy.sparse.linalg.lgmres` for details.
+    """
+    pass
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_root_scalar.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_root_scalar.py
new file mode 100644
index 00000000..54ffd95c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_root_scalar.py
@@ -0,0 +1,502 @@
+"""
+Unified interfaces to root finding algorithms for real or complex
+scalar functions.
+
+Functions
+---------
+- root : find a root of a scalar function.
+"""
+import numpy as np
+
+from . import _zeros_py as optzeros
+
+__all__ = ['root_scalar']
+
+ROOT_SCALAR_METHODS = ['bisect', 'brentq', 'brenth', 'ridder', 'toms748',
+                       'newton', 'secant', 'halley']
+
+
+class MemoizeDer:
+    """Decorator that caches the value and derivative(s) of function each
+    time it is called.
+
+    This is a simplistic memoizer that calls and caches a single value
+    of `f(x, *args)`.
+    It assumes that `args` does not change between invocations.
+    It supports the use case of a root-finder where `args` is fixed,
+    `x` changes, and only rarely, if at all, does x assume the same value
+    more than once."""
+    def __init__(self, fun):
+        self.fun = fun
+        self.vals = None
+        self.x = None
+        self.n_calls = 0
+
+    def __call__(self, x, *args):
+        r"""Calculate f or use cached value if available"""
+        # Derivative may be requested before the function itself, always check
+        if self.vals is None or x != self.x:
+            fg = self.fun(x, *args)
+            self.x = x
+            self.n_calls += 1
+            self.vals = fg[:]
+        return self.vals[0]
+
+    def fprime(self, x, *args):
+        r"""Calculate f' or use a cached value if available"""
+        if self.vals is None or x != self.x:
+            self(x, *args)
+        return self.vals[1]
+
+    def fprime2(self, x, *args):
+        r"""Calculate f'' or use a cached value if available"""
+        if self.vals is None or x != self.x:
+            self(x, *args)
+        return self.vals[2]
+
+    def ncalls(self):
+        return self.n_calls
+
+
+def root_scalar(f, args=(), method=None, bracket=None,
+                fprime=None, fprime2=None,
+                x0=None, x1=None,
+                xtol=None, rtol=None, maxiter=None,
+                options=None):
+    """
+    Find a root of a scalar function.
+
+    Parameters
+    ----------
+    f : callable
+        A function to find a root of.
+    args : tuple, optional
+        Extra arguments passed to the objective function and its derivative(s).
+    method : str, optional
+        Type of solver.  Should be one of
+
+            - 'bisect'    :ref:`(see here) `
+            - 'brentq'    :ref:`(see here) `
+            - 'brenth'    :ref:`(see here) `
+            - 'ridder'    :ref:`(see here) `
+            - 'toms748'    :ref:`(see here) `
+            - 'newton'    :ref:`(see here) `
+            - 'secant'    :ref:`(see here) `
+            - 'halley'    :ref:`(see here) `
+
+    bracket: A sequence of 2 floats, optional
+        An interval bracketing a root.  `f(x, *args)` must have different
+        signs at the two endpoints.
+    x0 : float, optional
+        Initial guess.
+    x1 : float, optional
+        A second guess.
+    fprime : bool or callable, optional
+        If `fprime` is a boolean and is True, `f` is assumed to return the
+        value of the objective function and of the derivative.
+        `fprime` can also be a callable returning the derivative of `f`. In
+        this case, it must accept the same arguments as `f`.
+    fprime2 : bool or callable, optional
+        If `fprime2` is a boolean and is True, `f` is assumed to return the
+        value of the objective function and of the
+        first and second derivatives.
+        `fprime2` can also be a callable returning the second derivative of `f`.
+        In this case, it must accept the same arguments as `f`.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    options : dict, optional
+        A dictionary of solver options. E.g., ``k``, see
+        :obj:`show_options()` for details.
+
+    Returns
+    -------
+    sol : RootResults
+        The solution represented as a ``RootResults`` object.
+        Important attributes are: ``root`` the solution , ``converged`` a
+        boolean flag indicating if the algorithm exited successfully and
+        ``flag`` which describes the cause of the termination. See
+        `RootResults` for a description of other attributes.
+
+    See also
+    --------
+    show_options : Additional options accepted by the solvers
+    root : Find a root of a vector function.
+
+    Notes
+    -----
+    This section describes the available solvers that can be selected by the
+    'method' parameter.
+
+    The default is to use the best method available for the situation
+    presented.
+    If a bracket is provided, it may use one of the bracketing methods.
+    If a derivative and an initial value are specified, it may
+    select one of the derivative-based methods.
+    If no method is judged applicable, it will raise an Exception.
+
+    Arguments for each method are as follows (x=required, o=optional).
+
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    |                    method                     | f | args | bracket | x0 | x1 | fprime | fprime2 | xtol | rtol | maxiter | options |
+    +===============================================+===+======+=========+====+====+========+=========+======+======+=========+=========+
+    | :ref:`bisect `   | x |  o   |    x    |    |    |        |         |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    | :ref:`brentq `   | x |  o   |    x    |    |    |        |         |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    | :ref:`brenth `   | x |  o   |    x    |    |    |        |         |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    | :ref:`ridder `   | x |  o   |    x    |    |    |        |         |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    | :ref:`toms748 ` | x |  o   |    x    |    |    |        |         |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    | :ref:`newton `   | x |  o   |         | x  |    |   x    |         |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    | :ref:`secant `   | x |  o   |         | x  | x  |        |         |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+    | :ref:`halley `   | x |  o   |         | x  |    |   x    |    x    |  o   |  o   |    o    |   o     |
+    +-----------------------------------------------+---+------+---------+----+----+--------+---------+------+------+---------+---------+
+
+    Examples
+    --------
+
+    Find the root of a simple cubic
+
+    >>> from scipy import optimize
+    >>> def f(x):
+    ...     return (x**3 - 1)  # only one real root at x = 1
+
+    >>> def fprime(x):
+    ...     return 3*x**2
+
+    The `brentq` method takes as input a bracket
+
+    >>> sol = optimize.root_scalar(f, bracket=[0, 3], method='brentq')
+    >>> sol.root, sol.iterations, sol.function_calls
+    (1.0, 10, 11)
+
+    The `newton` method takes as input a single point and uses the
+    derivative(s).
+
+    >>> sol = optimize.root_scalar(f, x0=0.2, fprime=fprime, method='newton')
+    >>> sol.root, sol.iterations, sol.function_calls
+    (1.0, 11, 22)
+
+    The function can provide the value and derivative(s) in a single call.
+
+    >>> def f_p_pp(x):
+    ...     return (x**3 - 1), 3*x**2, 6*x
+
+    >>> sol = optimize.root_scalar(
+    ...     f_p_pp, x0=0.2, fprime=True, method='newton'
+    ... )
+    >>> sol.root, sol.iterations, sol.function_calls
+    (1.0, 11, 11)
+
+    >>> sol = optimize.root_scalar(
+    ...     f_p_pp, x0=0.2, fprime=True, fprime2=True, method='halley'
+    ... )
+    >>> sol.root, sol.iterations, sol.function_calls
+    (1.0, 7, 8)
+
+
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+
+    if options is None:
+        options = {}
+
+    # fun also returns the derivative(s)
+    is_memoized = False
+    if fprime2 is not None and not callable(fprime2):
+        if bool(fprime2):
+            f = MemoizeDer(f)
+            is_memoized = True
+            fprime2 = f.fprime2
+            fprime = f.fprime
+        else:
+            fprime2 = None
+    if fprime is not None and not callable(fprime):
+        if bool(fprime):
+            f = MemoizeDer(f)
+            is_memoized = True
+            fprime = f.fprime
+        else:
+            fprime = None
+
+    # respect solver-specific default tolerances - only pass in if actually set
+    kwargs = {}
+    for k in ['xtol', 'rtol', 'maxiter']:
+        v = locals().get(k)
+        if v is not None:
+            kwargs[k] = v
+
+    # Set any solver-specific options
+    if options:
+        kwargs.update(options)
+    # Always request full_output from the underlying method as _root_scalar
+    # always returns a RootResults object
+    kwargs.update(full_output=True, disp=False)
+
+    # Pick a method if not specified.
+    # Use the "best" method available for the situation.
+    if not method:
+        if bracket:
+            method = 'brentq'
+        elif x0 is not None:
+            if fprime:
+                if fprime2:
+                    method = 'halley'
+                else:
+                    method = 'newton'
+            else:
+                method = 'secant'
+    if not method:
+        raise ValueError('Unable to select a solver as neither bracket '
+                         'nor starting point provided.')
+
+    meth = method.lower()
+    map2underlying = {'halley': 'newton', 'secant': 'newton'}
+
+    try:
+        methodc = getattr(optzeros, map2underlying.get(meth, meth))
+    except AttributeError as e:
+        raise ValueError('Unknown solver %s' % meth) from e
+
+    if meth in ['bisect', 'ridder', 'brentq', 'brenth', 'toms748']:
+        if not isinstance(bracket, (list, tuple, np.ndarray)):
+            raise ValueError('Bracket needed for %s' % method)
+
+        a, b = bracket[:2]
+        r, sol = methodc(f, a, b, args=args, **kwargs)
+    elif meth in ['secant']:
+        if x0 is None:
+            raise ValueError('x0 must not be None for %s' % method)
+        if x1 is None:
+            raise ValueError('x1 must not be None for %s' % method)
+        if 'xtol' in kwargs:
+            kwargs['tol'] = kwargs.pop('xtol')
+        r, sol = methodc(f, x0, args=args, fprime=None, fprime2=None,
+                         x1=x1, **kwargs)
+    elif meth in ['newton']:
+        if x0 is None:
+            raise ValueError('x0 must not be None for %s' % method)
+        if not fprime:
+            raise ValueError('fprime must be specified for %s' % method)
+        if 'xtol' in kwargs:
+            kwargs['tol'] = kwargs.pop('xtol')
+        r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=None,
+                         **kwargs)
+    elif meth in ['halley']:
+        if x0 is None:
+            raise ValueError('x0 must not be None for %s' % method)
+        if not fprime:
+            raise ValueError('fprime must be specified for %s' % method)
+        if not fprime2:
+            raise ValueError('fprime2 must be specified for %s' % method)
+        if 'xtol' in kwargs:
+            kwargs['tol'] = kwargs.pop('xtol')
+        r, sol = methodc(f, x0, args=args, fprime=fprime, fprime2=fprime2, **kwargs)
+    else:
+        raise ValueError('Unknown solver %s' % method)
+
+    if is_memoized:
+        # Replace the function_calls count with the memoized count.
+        # Avoids double and triple-counting.
+        n_calls = f.n_calls
+        sol.function_calls = n_calls
+
+    return sol
+
+
+def _root_scalar_brentq_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function.
+    bracket: A sequence of 2 floats, optional
+        An interval bracketing a root.  `f(x, *args)` must have different
+        signs at the two endpoints.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    options: dict, optional
+        Specifies any method-specific options not covered above
+
+    """
+    pass
+
+
+def _root_scalar_brenth_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function.
+    bracket: A sequence of 2 floats, optional
+        An interval bracketing a root.  `f(x, *args)` must have different
+        signs at the two endpoints.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    options: dict, optional
+        Specifies any method-specific options not covered above.
+
+    """
+    pass
+
+def _root_scalar_toms748_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function.
+    bracket: A sequence of 2 floats, optional
+        An interval bracketing a root.  `f(x, *args)` must have different
+        signs at the two endpoints.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    options: dict, optional
+        Specifies any method-specific options not covered above.
+
+    """
+    pass
+
+
+def _root_scalar_secant_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    x0 : float, required
+        Initial guess.
+    x1 : float, required
+        A second guess.
+    options: dict, optional
+        Specifies any method-specific options not covered above.
+
+    """
+    pass
+
+
+def _root_scalar_newton_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function and its derivative.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    x0 : float, required
+        Initial guess.
+    fprime : bool or callable, optional
+        If `fprime` is a boolean and is True, `f` is assumed to return the
+        value of derivative along with the objective function.
+        `fprime` can also be a callable returning the derivative of `f`. In
+        this case, it must accept the same arguments as `f`.
+    options: dict, optional
+        Specifies any method-specific options not covered above.
+
+    """
+    pass
+
+
+def _root_scalar_halley_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function and its derivatives.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    x0 : float, required
+        Initial guess.
+    fprime : bool or callable, required
+        If `fprime` is a boolean and is True, `f` is assumed to return the
+        value of derivative along with the objective function.
+        `fprime` can also be a callable returning the derivative of `f`. In
+        this case, it must accept the same arguments as `f`.
+    fprime2 : bool or callable, required
+        If `fprime2` is a boolean and is True, `f` is assumed to return the
+        value of 1st and 2nd derivatives along with the objective function.
+        `fprime2` can also be a callable returning the 2nd derivative of `f`.
+        In this case, it must accept the same arguments as `f`.
+    options: dict, optional
+        Specifies any method-specific options not covered above.
+
+    """
+    pass
+
+
+def _root_scalar_ridder_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function.
+    bracket: A sequence of 2 floats, optional
+        An interval bracketing a root.  `f(x, *args)` must have different
+        signs at the two endpoints.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    options: dict, optional
+        Specifies any method-specific options not covered above.
+
+    """
+    pass
+
+
+def _root_scalar_bisect_doc():
+    r"""
+    Options
+    -------
+    args : tuple, optional
+        Extra arguments passed to the objective function.
+    bracket: A sequence of 2 floats, optional
+        An interval bracketing a root.  `f(x, *args)` must have different
+        signs at the two endpoints.
+    xtol : float, optional
+        Tolerance (absolute) for termination.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    maxiter : int, optional
+        Maximum number of iterations.
+    options: dict, optional
+        Specifies any method-specific options not covered above.
+
+    """
+    pass
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo.py
new file mode 100644
index 00000000..fc922f9d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo.py
@@ -0,0 +1,1604 @@
+"""
+shgo: The simplicial homology global optimisation algorithm
+"""
+
+import numpy as np
+import time
+import logging
+import warnings
+from scipy import spatial
+from scipy.optimize import OptimizeResult, minimize, Bounds
+from scipy.optimize._constraints import new_bounds_to_old
+from ._optimize import _wrap_scalar_function
+from scipy.optimize._shgo_lib.triangulation import Complex
+
+
+__all__ = ['shgo']
+
+
+def shgo(func, bounds, args=(), constraints=None, n=None, iters=1,
+         callback=None,
+         minimizer_kwargs=None, options=None, sampling_method='simplicial'):
+    """
+    Finds the global minimum of a function using SHG optimization.
+
+    SHGO stands for "simplicial homology global optimization".
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized.  Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a tuple of any additional fixed parameters needed to
+        completely specify the function.
+    bounds : sequence or `Bounds`
+        Bounds for variables. There are two ways to specify the bounds:
+
+        1. Instance of `Bounds` class.
+        2. Sequence of ``(min, max)`` pairs for each element in `x`.
+
+    args : tuple, optional
+        Any additional fixed parameters needed to completely specify the
+        objective function.
+    constraints : dict or sequence of dict, optional
+        Constraints definition.
+        Function(s) ``R**n`` in the form::
+
+            g(x) >= 0 applied as g : R^n -> R^m
+            h(x) == 0 applied as h : R^n -> R^p
+
+        Each constraint is defined in a dictionary with fields:
+
+            type : str
+                Constraint type: 'eq' for equality, 'ineq' for inequality.
+            fun : callable
+                The function defining the constraint.
+            jac : callable, optional
+                The Jacobian of `fun` (only for SLSQP).
+            args : sequence, optional
+                Extra arguments to be passed to the function and Jacobian.
+
+        Equality constraint means that the constraint function result is to
+        be zero whereas inequality means that it is to be non-negative.
+        Note that COBYLA only supports inequality constraints.
+
+        .. note::
+
+           Only the COBYLA and SLSQP local minimize methods currently
+           support constraint arguments. If the ``constraints`` sequence
+           used in the local optimization problem is not defined in
+           ``minimizer_kwargs`` and a constrained method is used then the
+           global ``constraints`` will be used.
+           (Defining a ``constraints`` sequence in ``minimizer_kwargs``
+           means that ``constraints`` will not be added so if equality
+           constraints and so forth need to be added then the inequality
+           functions in ``constraints`` need to be added to
+           ``minimizer_kwargs`` too).
+
+    n : int, optional
+        Number of sampling points used in the construction of the simplicial
+        complex. Note that this argument is only used for ``sobol`` and other
+        arbitrary `sampling_methods`. In case of ``sobol``, it must be a
+        power of 2: ``n=2**m``, and the argument will automatically be
+        converted to the next higher power of 2. Default is 100 for
+        ``sampling_method='simplicial'`` and 128 for
+        ``sampling_method='sobol'``.
+    iters : int, optional
+        Number of iterations used in the construction of the simplicial
+        complex. Default is 1.
+    callback : callable, optional
+        Called after each iteration, as ``callback(xk)``, where ``xk`` is the
+        current parameter vector.
+    minimizer_kwargs : dict, optional
+        Extra keyword arguments to be passed to the minimizer
+        ``scipy.optimize.minimize`` Some important options could be:
+
+            * method : str
+                The minimization method, the default is ``SLSQP``.
+            * args : tuple
+                Extra arguments passed to the objective function (``func``) and
+                its derivatives (Jacobian, Hessian).
+            * options : dict, optional
+                Note that by default the tolerance is specified as
+                ``{ftol: 1e-12}``
+
+    options : dict, optional
+        A dictionary of solver options. Many of the options specified for the
+        global routine are also passed to the scipy.optimize.minimize routine.
+        The options that are also passed to the local routine are marked with
+        "(L)".
+
+        Stopping criteria, the algorithm will terminate if any of the specified
+        criteria are met. However, the default algorithm does not require any to
+        be specified:
+
+        * maxfev : int (L)
+            Maximum number of function evaluations in the feasible domain.
+            (Note only methods that support this option will terminate
+            the routine at precisely exact specified value. Otherwise the
+            criterion will only terminate during a global iteration)
+        * f_min
+            Specify the minimum objective function value, if it is known.
+        * f_tol : float
+            Precision goal for the value of f in the stopping
+            criterion. Note that the global routine will also
+            terminate if a sampling point in the global routine is
+            within this tolerance.
+        * maxiter : int
+            Maximum number of iterations to perform.
+        * maxev : int
+            Maximum number of sampling evaluations to perform (includes
+            searching in infeasible points).
+        * maxtime : float
+            Maximum processing runtime allowed
+        * minhgrd : int
+            Minimum homology group rank differential. The homology group of the
+            objective function is calculated (approximately) during every
+            iteration. The rank of this group has a one-to-one correspondence
+            with the number of locally convex subdomains in the objective
+            function (after adequate sampling points each of these subdomains
+            contain a unique global minimum). If the difference in the hgr is 0
+            between iterations for ``maxhgrd`` specified iterations the
+            algorithm will terminate.
+
+        Objective function knowledge:
+
+        * symmetry : bool
+            Specify True if the objective function contains symmetric variables.
+            The search space (and therefore performance) is decreased by O(n!).
+
+        * jac : bool or callable, optional
+            Jacobian (gradient) of objective function. Only for CG, BFGS,
+            Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If ``jac`` is a
+            boolean and is True, ``fun`` is assumed to return the gradient along
+            with the objective function. If False, the gradient will be
+            estimated numerically. ``jac`` can also be a callable returning the
+            gradient of the objective. In this case, it must accept the same
+            arguments as ``fun``. (Passed to `scipy.optimize.minmize` automatically)
+
+        * hess, hessp : callable, optional
+            Hessian (matrix of second-order derivatives) of objective function
+            or Hessian of objective function times an arbitrary vector p.
+            Only for Newton-CG, dogleg, trust-ncg. Only one of ``hessp`` or
+            ``hess`` needs to be given. If ``hess`` is provided, then
+            ``hessp`` will be ignored. If neither ``hess`` nor ``hessp`` is
+            provided, then the Hessian product will be approximated using
+            finite differences on ``jac``. ``hessp`` must compute the Hessian
+            times an arbitrary vector. (Passed to `scipy.optimize.minmize`
+            automatically)
+
+        Algorithm settings:
+
+        * minimize_every_iter : bool
+            If True then promising global sampling points will be passed to a
+            local minimization routine every iteration. If False then only the
+            final minimizer pool will be run. Defaults to False.
+        * local_iter : int
+            Only evaluate a few of the best minimizer pool candidates every
+            iteration. If False all potential points are passed to the local
+            minimization routine.
+        * infty_constraints: bool
+            If True then any sampling points generated which are outside will
+            the feasible domain will be saved and given an objective function
+            value of ``inf``. If False then these points will be discarded.
+            Using this functionality could lead to higher performance with
+            respect to function evaluations before the global minimum is found,
+            specifying False will use less memory at the cost of a slight
+            decrease in performance. Defaults to True.
+
+        Feedback:
+
+        * disp : bool (L)
+            Set to True to print convergence messages.
+
+    sampling_method : str or function, optional
+        Current built in sampling method options are ``halton``, ``sobol`` and
+        ``simplicial``. The default ``simplicial`` provides
+        the theoretical guarantee of convergence to the global minimum in finite
+        time. ``halton`` and ``sobol`` method are faster in terms of sampling
+        point generation at the cost of the loss of
+        guaranteed convergence. It is more appropriate for most "easier"
+        problems where the convergence is relatively fast.
+        User defined sampling functions must accept two arguments of ``n``
+        sampling points of dimension ``dim`` per call and output an array of
+        sampling points with shape `n x dim`.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a `OptimizeResult` object.
+        Important attributes are:
+        ``x`` the solution array corresponding to the global minimum,
+        ``fun`` the function output at the global solution,
+        ``xl`` an ordered list of local minima solutions,
+        ``funl`` the function output at the corresponding local solutions,
+        ``success`` a Boolean flag indicating if the optimizer exited
+        successfully,
+        ``message`` which describes the cause of the termination,
+        ``nfev`` the total number of objective function evaluations including
+        the sampling calls,
+        ``nlfev`` the total number of objective function evaluations
+        culminating from all local search optimizations,
+        ``nit`` number of iterations performed by the global routine.
+
+    Notes
+    -----
+    Global optimization using simplicial homology global optimization [1]_.
+    Appropriate for solving general purpose NLP and blackbox optimization
+    problems to global optimality (low-dimensional problems).
+
+    In general, the optimization problems are of the form::
+
+        minimize f(x) subject to
+
+        g_i(x) >= 0,  i = 1,...,m
+        h_j(x)  = 0,  j = 1,...,p
+
+    where x is a vector of one or more variables. ``f(x)`` is the objective
+    function ``R^n -> R``, ``g_i(x)`` are the inequality constraints, and
+    ``h_j(x)`` are the equality constraints.
+
+    Optionally, the lower and upper bounds for each element in x can also be
+    specified using the `bounds` argument.
+
+    While most of the theoretical advantages of SHGO are only proven for when
+    ``f(x)`` is a Lipschitz smooth function, the algorithm is also proven to
+    converge to the global optimum for the more general case where ``f(x)`` is
+    non-continuous, non-convex and non-smooth, if the default sampling method
+    is used [1]_.
+
+    The local search method may be specified using the ``minimizer_kwargs``
+    parameter which is passed on to ``scipy.optimize.minimize``. By default,
+    the ``SLSQP`` method is used. In general, it is recommended to use the
+    ``SLSQP`` or ``COBYLA`` local minimization if inequality constraints
+    are defined for the problem since the other methods do not use constraints.
+
+    The ``halton`` and ``sobol`` method points are generated using
+    `scipy.stats.qmc`. Any other QMC method could be used.
+
+    References
+    ----------
+    .. [1] Endres, SC, Sandrock, C, Focke, WW (2018) "A simplicial homology
+           algorithm for lipschitz optimisation", Journal of Global Optimization.
+    .. [2] Joe, SW and Kuo, FY (2008) "Constructing Sobol' sequences with
+           better  two-dimensional projections", SIAM J. Sci. Comput. 30,
+           2635-2654.
+    .. [3] Hoch, W and Schittkowski, K (1981) "Test examples for nonlinear
+           programming codes", Lecture Notes in Economics and Mathematical
+           Systems, 187. Springer-Verlag, New York.
+           http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
+    .. [4] Wales, DJ (2015) "Perspective: Insight into reaction coordinates and
+           dynamics from the potential energy landscape",
+           Journal of Chemical Physics, 142(13), 2015.
+
+    Examples
+    --------
+    First consider the problem of minimizing the Rosenbrock function, `rosen`:
+
+    >>> import numpy as np
+    >>> from scipy.optimize import rosen, shgo
+    >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
+    >>> result = shgo(rosen, bounds)
+    >>> result.x, result.fun
+    (array([1., 1., 1., 1., 1.]), 2.920392374190081e-18)
+
+    Note that bounds determine the dimensionality of the objective
+    function and is therefore a required input, however you can specify
+    empty bounds using ``None`` or objects like ``np.inf`` which will be
+    converted to large float numbers.
+
+    >>> bounds = [(None, None), ]*4
+    >>> result = shgo(rosen, bounds)
+    >>> result.x
+    array([0.99999851, 0.99999704, 0.99999411, 0.9999882 ])
+
+    Next, we consider the Eggholder function, a problem with several local
+    minima and one global minimum. We will demonstrate the use of arguments and
+    the capabilities of `shgo`.
+    (https://en.wikipedia.org/wiki/Test_functions_for_optimization)
+
+    >>> def eggholder(x):
+    ...     return (-(x[1] + 47.0)
+    ...             * np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
+    ...             - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
+    ...             )
+    ...
+    >>> bounds = [(-512, 512), (-512, 512)]
+
+    `shgo` has built-in low discrepancy sampling sequences. First, we will
+    input 64 initial sampling points of the *Sobol'* sequence:
+
+    >>> result = shgo(eggholder, bounds, n=64, sampling_method='sobol')
+    >>> result.x, result.fun
+    (array([512.        , 404.23180824]), -959.6406627208397)
+
+    `shgo` also has a return for any other local minima that was found, these
+    can be called using:
+
+    >>> result.xl
+    array([[ 512.        ,  404.23180824],
+           [ 283.0759062 , -487.12565635],
+           [-294.66820039, -462.01964031],
+           [-105.87688911,  423.15323845],
+           [-242.97926   ,  274.38030925],
+           [-506.25823477,    6.3131022 ],
+           [-408.71980731, -156.10116949],
+           [ 150.23207937,  301.31376595],
+           [  91.00920901, -391.283763  ],
+           [ 202.89662724, -269.38043241],
+           [ 361.66623976, -106.96493868],
+           [-219.40612786, -244.06020508]])
+
+    >>> result.funl
+    array([-959.64066272, -718.16745962, -704.80659592, -565.99778097,
+           -559.78685655, -557.36868733, -507.87385942, -493.9605115 ,
+           -426.48799655, -421.15571437, -419.31194957, -410.98477763])
+
+    These results are useful in applications where there are many global minima
+    and the values of other global minima are desired or where the local minima
+    can provide insight into the system (for example morphologies
+    in physical chemistry [4]_).
+
+    If we want to find a larger number of local minima, we can increase the
+    number of sampling points or the number of iterations. We'll increase the
+    number of sampling points to 64 and the number of iterations from the
+    default of 1 to 3. Using ``simplicial`` this would have given us
+    64 x 3 = 192 initial sampling points.
+
+    >>> result_2 = shgo(eggholder, bounds, n=64, iters=3, sampling_method='sobol')
+    >>> len(result.xl), len(result_2.xl)
+    (12, 20)
+
+    Note the difference between, e.g., ``n=192, iters=1`` and ``n=64,
+    iters=3``.
+    In the first case the promising points contained in the minimiser pool
+    are processed only once. In the latter case it is processed every 64
+    sampling points for a total of 3 times.
+
+    To demonstrate solving problems with non-linear constraints consider the
+    following example from Hock and Schittkowski problem 73 (cattle-feed) [3]_::
+
+        minimize: f = 24.55 * x_1 + 26.75 * x_2 + 39 * x_3 + 40.50 * x_4
+
+        subject to: 2.3 * x_1 + 5.6 * x_2 + 11.1 * x_3 + 1.3 * x_4 - 5     >= 0,
+
+                    12 * x_1 + 11.9 * x_2 + 41.8 * x_3 + 52.1 * x_4 - 21
+                        -1.645 * sqrt(0.28 * x_1**2 + 0.19 * x_2**2 +
+                                      20.5 * x_3**2 + 0.62 * x_4**2)       >= 0,
+
+                    x_1 + x_2 + x_3 + x_4 - 1                              == 0,
+
+                    1 >= x_i >= 0 for all i
+
+    The approximate answer given in [3]_ is::
+
+        f([0.6355216, -0.12e-11, 0.3127019, 0.05177655]) = 29.894378
+
+    >>> def f(x):  # (cattle-feed)
+    ...     return 24.55*x[0] + 26.75*x[1] + 39*x[2] + 40.50*x[3]
+    ...
+    >>> def g1(x):
+    ...     return 2.3*x[0] + 5.6*x[1] + 11.1*x[2] + 1.3*x[3] - 5  # >=0
+    ...
+    >>> def g2(x):
+    ...     return (12*x[0] + 11.9*x[1] +41.8*x[2] + 52.1*x[3] - 21
+    ...             - 1.645 * np.sqrt(0.28*x[0]**2 + 0.19*x[1]**2
+    ...                             + 20.5*x[2]**2 + 0.62*x[3]**2)
+    ...             ) # >=0
+    ...
+    >>> def h1(x):
+    ...     return x[0] + x[1] + x[2] + x[3] - 1  # == 0
+    ...
+    >>> cons = ({'type': 'ineq', 'fun': g1},
+    ...         {'type': 'ineq', 'fun': g2},
+    ...         {'type': 'eq', 'fun': h1})
+    >>> bounds = [(0, 1.0),]*4
+    >>> res = shgo(f, bounds, iters=3, constraints=cons)
+    >>> res
+     message: Optimization terminated successfully.
+     success: True
+         fun: 29.894378159142136
+        funl: [ 2.989e+01]
+           x: [ 6.355e-01  1.137e-13  3.127e-01  5.178e-02]
+          xl: [[ 6.355e-01  1.137e-13  3.127e-01  5.178e-02]]
+         nit: 3
+        nfev: 114
+       nlfev: 35
+       nljev: 5
+       nlhev: 0
+
+    >>> g1(res.x), g2(res.x), h1(res.x)
+    (-5.062616992290714e-14, -2.9594104944408173e-12, 0.0)
+
+    """
+
+    # if necessary, convert bounds class to old bounds
+    if isinstance(bounds, Bounds):
+        bounds = new_bounds_to_old(bounds.lb, bounds.ub, len(bounds.lb))
+
+    # Initiate SHGO class
+    shc = SHGO(func, bounds, args=args, constraints=constraints, n=n,
+               iters=iters, callback=callback,
+               minimizer_kwargs=minimizer_kwargs,
+               options=options, sampling_method=sampling_method)
+
+    # Run the algorithm, process results and test success
+    shc.construct_complex()
+
+    if not shc.break_routine:
+        if shc.disp:
+            print("Successfully completed construction of complex.")
+
+    # Test post iterations success
+    if len(shc.LMC.xl_maps) == 0:
+        # If sampling failed to find pool, return lowest sampled point
+        # with a warning
+        shc.find_lowest_vertex()
+        shc.break_routine = True
+        shc.fail_routine(mes="Failed to find a feasible minimizer point. "
+                             "Lowest sampling point = {}".format(shc.f_lowest))
+        shc.res.fun = shc.f_lowest
+        shc.res.x = shc.x_lowest
+        shc.res.nfev = shc.fn
+
+    # Confirm the routine ran successfully
+    if not shc.break_routine:
+        shc.res.message = 'Optimization terminated successfully.'
+        shc.res.success = True
+
+    # Return the final results
+    return shc.res
+
+
+class SHGO:
+    def __init__(self, func, bounds, args=(), constraints=None, n=None,
+                 iters=None, callback=None, minimizer_kwargs=None,
+                 options=None, sampling_method='sobol'):
+
+        from scipy.stats import qmc
+
+        # Input checks
+        methods = ['halton', 'sobol', 'simplicial']
+        if isinstance(sampling_method, str) and sampling_method not in methods:
+            raise ValueError(("Unknown sampling_method specified."
+                              " Valid methods: {}").format(', '.join(methods)))
+
+        # Initiate class
+        self._raw_func = func  # some methods pass args in (e.g. Complex)
+        _, self.func = _wrap_scalar_function(func, args)
+        self.bounds = bounds
+        self.args = args
+        self.callback = callback
+
+        # Bounds
+        abound = np.array(bounds, float)
+        self.dim = np.shape(abound)[0]  # Dimensionality of problem
+
+        # Set none finite values to large floats
+        infind = ~np.isfinite(abound)
+        abound[infind[:, 0], 0] = -1e50
+        abound[infind[:, 1], 1] = 1e50
+
+        # Check if bounds are correctly specified
+        bnderr = abound[:, 0] > abound[:, 1]
+        if bnderr.any():
+            raise ValueError('Error: lb > ub in bounds {}.'
+                             .format(', '.join(str(b) for b in bnderr)))
+
+        self.bounds = abound
+
+        # Constraints
+        # Process constraint dict sequence:
+        if constraints is not None:
+            self.min_cons = constraints
+            self.g_cons = []
+            self.g_args = []
+            if (type(constraints) is not tuple) and (type(constraints)
+                                                     is not list):
+                constraints = (constraints,)
+
+            for cons in constraints:
+                if cons['type'] == 'ineq':
+                    self.g_cons.append(cons['fun'])
+                    try:
+                        self.g_args.append(cons['args'])
+                    except KeyError:
+                        self.g_args.append(())
+            self.g_cons = tuple(self.g_cons)
+            self.g_args = tuple(self.g_args)
+        else:
+            self.g_cons = None
+            self.g_args = None
+
+        # Define local minimization keyword arguments
+        # Start with defaults
+        self.minimizer_kwargs = {'method': 'SLSQP',
+                                 'bounds': self.bounds,
+                                 'options': {},
+                                 'callback': self.callback
+                                 }
+        if minimizer_kwargs is not None:
+            # Overwrite with supplied values
+            self.minimizer_kwargs.update(minimizer_kwargs)
+
+        else:
+            self.minimizer_kwargs['options'] = {'ftol': 1e-12}
+
+        if (self.minimizer_kwargs['method'] in ('SLSQP', 'COBYLA') and
+                (minimizer_kwargs is not None and
+                 'constraints' not in minimizer_kwargs and
+                 constraints is not None) or
+                (self.g_cons is not None)):
+            self.minimizer_kwargs['constraints'] = self.min_cons
+
+        # Process options dict
+        if options is not None:
+            self.init_options(options)
+        else:  # Default settings:
+            self.f_min_true = None
+            self.minimize_every_iter = False
+
+            # Algorithm limits
+            self.maxiter = None
+            self.maxfev = None
+            self.maxev = None
+            self.maxtime = None
+            self.f_min_true = None
+            self.minhgrd = None
+
+            # Objective function knowledge
+            self.symmetry = False
+
+            # Algorithm functionality
+            self.local_iter = False
+            self.infty_cons_sampl = True
+
+            # Feedback
+            self.disp = False
+
+        # Remove unknown arguments in self.minimizer_kwargs
+        # Start with arguments all the solvers have in common
+        self.min_solver_args = ['fun', 'x0', 'args',
+                                'callback', 'options', 'method']
+        # then add the ones unique to specific solvers
+        solver_args = {
+            '_custom': ['jac', 'hess', 'hessp', 'bounds', 'constraints'],
+            'nelder-mead': [],
+            'powell': [],
+            'cg': ['jac'],
+            'bfgs': ['jac'],
+            'newton-cg': ['jac', 'hess', 'hessp'],
+            'l-bfgs-b': ['jac', 'bounds'],
+            'tnc': ['jac', 'bounds'],
+            'cobyla': ['constraints'],
+            'slsqp': ['jac', 'bounds', 'constraints'],
+            'dogleg': ['jac', 'hess'],
+            'trust-ncg': ['jac', 'hess', 'hessp'],
+            'trust-krylov': ['jac', 'hess', 'hessp'],
+            'trust-exact': ['jac', 'hess'],
+            'trust-constr': ['jac', 'hess', 'hessp'],
+        }
+        method = self.minimizer_kwargs['method']
+        self.min_solver_args += solver_args[method.lower()]
+
+        # Only retain the known arguments
+        def _restrict_to_keys(dictionary, goodkeys):
+            """Remove keys from dictionary if not in goodkeys - inplace"""
+            existingkeys = set(dictionary)
+            for key in existingkeys - set(goodkeys):
+                dictionary.pop(key, None)
+
+        _restrict_to_keys(self.minimizer_kwargs, self.min_solver_args)
+        _restrict_to_keys(self.minimizer_kwargs['options'],
+                          self.min_solver_args + ['ftol'])
+
+        # Algorithm controls
+        # Global controls
+        self.stop_global = False  # Used in the stopping_criteria method
+        self.break_routine = False  # Break the algorithm globally
+        self.iters = iters  # Iterations to be ran
+        self.iters_done = 0  # Iterations to be ran
+        self.n = n  # Sampling points per iteration
+        self.nc = n  # Sampling points to sample in current iteration
+        self.n_prc = 0  # Processed points (used to track Delaunay iters)
+        self.n_sampled = 0  # To track number of sampling points already generated
+        self.fn = 0  # Number of feasible sampling points evaluations performed
+        self.hgr = 0  # Homology group rank
+
+        # Default settings if no sampling criteria.
+        if self.iters is None:
+            self.iters = 1
+        if self.n is None:
+            self.n = 100
+            if sampling_method == 'sobol':
+                self.n = 128
+            self.nc = self.n
+
+        if not ((self.maxiter is None) and (self.maxfev is None) and (
+                    self.maxev is None)
+                and (self.minhgrd is None) and (self.f_min_true is None)):
+            self.iters = None
+
+        # Set complex construction mode based on a provided stopping criteria:
+        # Choose complex constructor
+        if sampling_method == 'simplicial':
+            self.iterate_complex = self.iterate_hypercube
+            self.minimizers = self.simplex_minimizers
+            self.sampling_method = sampling_method
+
+        elif sampling_method in ['halton', 'sobol'] or \
+                not isinstance(sampling_method, str):
+            self.iterate_complex = self.iterate_delaunay
+            self.minimizers = self.delaunay_complex_minimisers
+            # Sampling method used
+            if sampling_method in ['halton', 'sobol']:
+                if sampling_method == 'sobol':
+                    self.n = int(2 ** np.ceil(np.log2(self.n)))
+                    self.nc = self.n
+                    self.sampling_method = 'sobol'
+                    self.qmc_engine = qmc.Sobol(d=self.dim, scramble=False,
+                                                seed=np.random.RandomState())
+                else:
+                    self.sampling_method = 'halton'
+                    self.qmc_engine = qmc.Halton(d=self.dim, scramble=True,
+                                                 seed=np.random.RandomState())
+                sampling_method = lambda n, d: self.qmc_engine.random(n)
+            else:
+                # A user defined sampling method:
+                self.sampling_method = 'custom'
+
+            self.sampling = self.sampling_custom
+            self.sampling_function = sampling_method  # F(n, d)
+
+        # Local controls
+        self.stop_l_iter = False  # Local minimisation iterations
+        self.stop_complex_iter = False  # Sampling iterations
+
+        # Initiate storage objects used in algorithm classes
+        self.minimizer_pool = []
+
+        # Cache of local minimizers mapped
+        self.LMC = LMapCache()
+
+        # Initialize return object
+        self.res = OptimizeResult()  # scipy.optimize.OptimizeResult object
+        self.res.nfev = 0  # Includes each sampling point as func evaluation
+        self.res.nlfev = 0  # Local function evals for all minimisers
+        self.res.nljev = 0  # Local Jacobian evals for all minimisers
+        self.res.nlhev = 0  # Local Hessian evals for all minimisers
+
+    # Initiation aids
+    def init_options(self, options):
+        """
+        Initiates the options.
+
+        Can also be useful to change parameters after class initiation.
+
+        Parameters
+        ----------
+        options : dict
+
+        Returns
+        -------
+        None
+
+        """
+        # Update 'options' dict passed to optimize.minimize
+        # Do this first so we don't mutate `options` below.
+        self.minimizer_kwargs['options'].update(options)
+
+        # Ensure that 'jac', 'hess', and 'hessp' are passed directly to
+        # `minimize` as keywords, not as part of its 'options' dictionary.
+        for opt in ['jac', 'hess', 'hessp']:
+            if opt in self.minimizer_kwargs['options']:
+                self.minimizer_kwargs[opt] = (
+                    self.minimizer_kwargs['options'].pop(opt))
+
+        # Default settings:
+        self.minimize_every_iter = options.get('minimize_every_iter', False)
+
+        # Algorithm limits
+        # Maximum number of iterations to perform.
+        self.maxiter = options.get('maxiter', None)
+        # Maximum number of function evaluations in the feasible domain
+        self.maxfev = options.get('maxfev', None)
+        # Maximum number of sampling evaluations (includes searching in
+        # infeasible points
+        self.maxev = options.get('maxev', None)
+        # Maximum processing runtime allowed
+        self.init = time.time()
+        self.maxtime = options.get('maxtime', None)
+        if 'f_min' in options:
+            # Specify the minimum objective function value, if it is known.
+            self.f_min_true = options['f_min']
+            self.f_tol = options.get('f_tol', 1e-4)
+        else:
+            self.f_min_true = None
+
+        self.minhgrd = options.get('minhgrd', None)
+
+        # Objective function knowledge
+        self.symmetry = 'symmetry' in options
+
+        # Algorithm functionality
+        # Only evaluate a few of the best candiates
+        self.local_iter = options.get('local_iter', False)
+
+        self.infty_cons_sampl = options.get('infty_constraints', True)
+
+        # Feedback
+        self.disp = options.get('disp', False)
+
+    # Iteration properties
+    # Main construction loop:
+    def construct_complex(self):
+        """
+        Construct for `iters` iterations.
+
+        If uniform sampling is used, every iteration adds 'n' sampling points.
+
+        Iterations if a stopping criteria (e.g., sampling points or
+        processing time) has been met.
+
+        """
+        if self.disp:
+            print('Splitting first generation')
+
+        while not self.stop_global:
+            if self.break_routine:
+                break
+            # Iterate complex, process minimisers
+            self.iterate()
+            self.stopping_criteria()
+
+        # Build minimiser pool
+        # Final iteration only needed if pools weren't minimised every iteration
+        if not self.minimize_every_iter:
+            if not self.break_routine:
+                self.find_minima()
+
+        self.res.nit = self.iters_done + 1
+
+    def find_minima(self):
+        """
+        Construct the minimizer pool, map the minimizers to local minima
+        and sort the results into a global return object.
+        """
+        self.minimizers()
+        if len(self.X_min) != 0:
+            # Minimize the pool of minimizers with local minimization methods
+            # Note that if Options['local_iter'] is an `int` instead of default
+            # value False then only that number of candidates will be minimized
+            self.minimise_pool(self.local_iter)
+            # Sort results and build the global return object
+            self.sort_result()
+
+            # Lowest values used to report in case of failures
+            self.f_lowest = self.res.fun
+            self.x_lowest = self.res.x
+        else:
+            self.find_lowest_vertex()
+
+    def find_lowest_vertex(self):
+        # Find the lowest objective function value on one of
+        # the vertices of the simplicial complex
+        if self.sampling_method == 'simplicial':
+            self.f_lowest = np.inf
+            for x in self.HC.V.cache:
+                if self.HC.V[x].f < self.f_lowest:
+                    self.f_lowest = self.HC.V[x].f
+                    self.x_lowest = self.HC.V[x].x_a
+            if self.f_lowest == np.inf:  # no feasible point
+                self.f_lowest = None
+                self.x_lowest = None
+        else:
+            if self.fn == 0:
+                self.f_lowest = None
+                self.x_lowest = None
+            else:
+                self.f_I = np.argsort(self.F, axis=-1)
+                self.f_lowest = self.F[self.f_I[0]]
+                self.x_lowest = self.C[self.f_I[0]]
+
+    # Stopping criteria functions:
+    def finite_iterations(self):
+        if self.iters is not None:
+            if self.iters_done >= (self.iters - 1):
+                self.stop_global = True
+
+        if self.maxiter is not None:  # Stop for infeasible sampling
+            if self.iters_done >= (self.maxiter - 1):
+                self.stop_global = True
+        return self.stop_global
+
+    def finite_fev(self):
+        # Finite function evals in the feasible domain
+        if self.fn >= self.maxfev:
+            self.stop_global = True
+        return self.stop_global
+
+    def finite_ev(self):
+        # Finite evaluations including infeasible sampling points
+        if self.n_sampled >= self.maxev:
+            self.stop_global = True
+
+    def finite_time(self):
+        if (time.time() - self.init) >= self.maxtime:
+            self.stop_global = True
+
+    def finite_precision(self):
+        """
+        Stop the algorithm if the final function value is known
+
+        Specify in options (with ``self.f_min_true = options['f_min']``)
+        and the tolerance with ``f_tol = options['f_tol']``
+        """
+        # If no minimizer has been found use the lowest sampling value
+        if len(self.LMC.xl_maps) == 0:
+            self.find_lowest_vertex()
+
+        # Function to stop algorithm at specified percentage error:
+        if self.f_lowest == 0.0:
+            if self.f_min_true == 0.0:
+                if self.f_lowest <= self.f_tol:
+                    self.stop_global = True
+        else:
+            pe = (self.f_lowest - self.f_min_true) / abs(self.f_min_true)
+            if self.f_lowest <= self.f_min_true:
+                self.stop_global = True
+                # 2if (pe - self.f_tol) <= abs(1.0 / abs(self.f_min_true)):
+                if abs(pe) >= 2 * self.f_tol:
+                    warnings.warn("A much lower value than expected f* =" +
+                                  " {} than".format(self.f_min_true) +
+                                  " the was found f_lowest =" +
+                                  "{} ".format(self.f_lowest))
+            if pe <= self.f_tol:
+                self.stop_global = True
+
+        return self.stop_global
+
+    def finite_homology_growth(self):
+        if self.LMC.size == 0:
+            return  # pass on no reason to stop yet.
+        self.hgrd = self.LMC.size - self.hgr
+
+        self.hgr = self.LMC.size
+        if self.hgrd <= self.minhgrd:
+            self.stop_global = True
+        return self.stop_global
+
+    def stopping_criteria(self):
+        """
+        Various stopping criteria ran every iteration
+
+        Returns
+        -------
+        stop : bool
+        """
+        if self.maxiter is not None:
+            self.finite_iterations()
+        if self.iters is not None:
+            self.finite_iterations()
+        if self.maxfev is not None:
+            self.finite_fev()
+        if self.maxev is not None:
+            self.finite_ev()
+        if self.maxtime is not None:
+            self.finite_time()
+        if self.f_min_true is not None:
+            self.finite_precision()
+        if self.minhgrd is not None:
+            self.finite_homology_growth()
+
+    def iterate(self):
+        self.iterate_complex()
+
+        # Build minimizer pool
+        if self.minimize_every_iter:
+            if not self.break_routine:
+                self.find_minima()  # Process minimizer pool
+
+        # Algorithm updates
+        self.iters_done += 1
+
+    def iterate_hypercube(self):
+        """
+        Iterate a subdivision of the complex
+
+        Note: called with ``self.iterate_complex()`` after class initiation
+        """
+        # Iterate the complex
+        if self.n_sampled == 0:
+            # Initial triangulation of the hyper-rectangle. Note that
+            # we use `self.raw_func` as `self.func` is a *wrapped* function
+            # that already takes the original function arguments into
+            # account.
+            self.HC = Complex(self.dim, self._raw_func, self.args,
+                              self.symmetry, self.bounds, self.g_cons,
+                              self.g_args)
+        else:
+            self.HC.split_generation()
+
+        # feasible sampling points counted by the triangulation.py routines
+        self.fn = self.HC.V.nfev
+        self.n_sampled = self.HC.V.size  # nevs counted in triangulation.py
+        return
+
+    def iterate_delaunay(self):
+        """
+        Build a complex of Delaunay triangulated points
+
+        Note: called with ``self.iterate_complex()`` after class initiation
+        """
+        self.sampled_surface(infty_cons_sampl=self.infty_cons_sampl)
+        self.nc += self.n
+        self.n_sampled = self.nc
+
+    # Hypercube minimizers
+    def simplex_minimizers(self):
+        """
+        Returns the indexes of all minimizers
+        """
+        self.minimizer_pool = []
+        # Note: Can implement parallelization here
+        for x in self.HC.V.cache:
+            if self.HC.V[x].minimiser():
+                if self.disp:
+                    logging.info('=' * 60)
+                    logging.info(
+                        'v.x = {} is minimizer'.format(self.HC.V[x].x_a))
+                    logging.info('v.f = {} is minimizer'.format(self.HC.V[x].f))
+                    logging.info('=' * 30)
+
+                if self.HC.V[x] not in self.minimizer_pool:
+                    self.minimizer_pool.append(self.HC.V[x])
+
+                if self.disp:
+                    logging.info('Neighbors:')
+                    logging.info('=' * 30)
+                    for vn in self.HC.V[x].nn:
+                        logging.info('x = {} || f = {}'.format(vn.x, vn.f))
+
+                    logging.info('=' * 60)
+
+        self.minimizer_pool_F = []
+        self.X_min = []
+        # normalized tuple in the Vertex cache
+        self.X_min_cache = {}  # Cache used in hypercube sampling
+
+        for v in self.minimizer_pool:
+            self.X_min.append(v.x_a)
+            self.minimizer_pool_F.append(v.f)
+            self.X_min_cache[tuple(v.x_a)] = v.x
+
+        self.minimizer_pool_F = np.array(self.minimizer_pool_F)
+        self.X_min = np.array(self.X_min)
+
+        # TODO: Only do this if global mode
+        self.sort_min_pool()
+
+        return self.X_min
+
+    # Local minimization
+    # Minimizer pool processing
+    def minimise_pool(self, force_iter=False):
+        """
+        This processing method can optionally minimise only the best candidate
+        solutions in the minimizer pool
+
+        Parameters
+        ----------
+        force_iter : int
+                     Number of starting minimizers to process (can be sepcified
+                     globally or locally)
+
+        """
+        # Find first local minimum
+        # NOTE: Since we always minimize this value regardless it is a waste to
+        # build the topograph first before minimizing
+        lres_f_min = self.minimize(self.X_min[0], ind=self.minimizer_pool[0])
+
+        # Trim minimized point from current minimizer set
+        self.trim_min_pool(0)
+
+        # Force processing to only
+        if force_iter:
+            self.local_iter = force_iter
+
+        while not self.stop_l_iter:
+            # Global stopping criteria:
+            if self.f_min_true is not None:
+                if (lres_f_min.fun - self.f_min_true) / abs(
+                        self.f_min_true) <= self.f_tol:
+                    self.stop_l_iter = True
+                    break
+            # Note first iteration is outside loop:
+            if self.local_iter is not None:
+                if self.disp:
+                    logging.info(
+                        'SHGO.iters in function minimise_pool = {}'.format(
+                            self.local_iter))
+                self.local_iter -= 1
+                if self.local_iter == 0:
+                    self.stop_l_iter = True
+                    break
+
+            if np.shape(self.X_min)[0] == 0:
+                self.stop_l_iter = True
+                break
+
+            # Construct topograph from current minimizer set
+            # (NOTE: This is a very small topograph using only the minizer pool
+            #        , it might be worth using some graph theory tools instead.
+            self.g_topograph(lres_f_min.x, self.X_min)
+
+            # Find local minimum at the miniser with the greatest Euclidean
+            # distance from the current solution
+            ind_xmin_l = self.Z[:, -1]
+            lres_f_min = self.minimize(self.Ss[-1, :], self.minimizer_pool[-1])
+
+            # Trim minimised point from current minimizer set
+            self.trim_min_pool(ind_xmin_l)
+
+        # Reset controls
+        self.stop_l_iter = False
+        return
+
+    def sort_min_pool(self):
+        # Sort to find minimum func value in min_pool
+        self.ind_f_min = np.argsort(self.minimizer_pool_F)
+        self.minimizer_pool = np.array(self.minimizer_pool)[self.ind_f_min]
+        self.minimizer_pool_F = np.array(self.minimizer_pool_F)[
+            self.ind_f_min]
+        return
+
+    def trim_min_pool(self, trim_ind):
+        self.X_min = np.delete(self.X_min, trim_ind, axis=0)
+        self.minimizer_pool_F = np.delete(self.minimizer_pool_F, trim_ind)
+        self.minimizer_pool = np.delete(self.minimizer_pool, trim_ind)
+        return
+
+    def g_topograph(self, x_min, X_min):
+        """
+        Returns the topographical vector stemming from the specified value
+        ``x_min`` for the current feasible set ``X_min`` with True boolean
+        values indicating positive entries and False values indicating
+        negative entries.
+
+        """
+        x_min = np.array([x_min])
+        self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean')
+        # Find sorted indexes of spatial distances:
+        self.Z = np.argsort(self.Y, axis=-1)
+
+        self.Ss = X_min[self.Z][0]
+        self.minimizer_pool = self.minimizer_pool[self.Z]
+        self.minimizer_pool = self.minimizer_pool[0]
+        return self.Ss
+
+    # Local bound functions
+    def construct_lcb_simplicial(self, v_min):
+        """
+        Construct locally (approximately) convex bounds
+
+        Parameters
+        ----------
+        v_min : Vertex object
+                The minimizer vertex
+
+        Returns
+        -------
+        cbounds : list of lists
+            List of size dimension with length-2 list of bounds for each dimension
+
+        """
+        cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]
+        # Loop over all bounds
+        for vn in v_min.nn:
+            for i, x_i in enumerate(vn.x_a):
+                # Lower bound
+                if (x_i < v_min.x_a[i]) and (x_i > cbounds[i][0]):
+                    cbounds[i][0] = x_i
+
+                # Upper bound
+                if (x_i > v_min.x_a[i]) and (x_i < cbounds[i][1]):
+                    cbounds[i][1] = x_i
+
+        if self.disp:
+            logging.info('cbounds found for v_min.x_a = {}'.format(v_min.x_a))
+            logging.info('cbounds = {}'.format(cbounds))
+
+        return cbounds
+
+    def construct_lcb_delaunay(self, v_min, ind=None):
+        """
+        Construct locally (approximately) convex bounds
+
+        Parameters
+        ----------
+        v_min : Vertex object
+                The minimizer vertex
+
+        Returns
+        -------
+        cbounds : list of lists
+            List of size dimension with length-2 list of bounds for each dimension
+        """
+        cbounds = [[x_b_i[0], x_b_i[1]] for x_b_i in self.bounds]
+
+        return cbounds
+
+    # Minimize a starting point locally
+    def minimize(self, x_min, ind=None):
+        """
+        This function is used to calculate the local minima using the specified
+        sampling point as a starting value.
+
+        Parameters
+        ----------
+        x_min : vector of floats
+            Current starting point to minimize.
+
+        Returns
+        -------
+        lres : OptimizeResult
+            The local optimization result represented as a `OptimizeResult`
+            object.
+        """
+        # Use minima maps if vertex was already run
+        if self.disp:
+            logging.info('Vertex minimiser maps = {}'.format(self.LMC.v_maps))
+
+        if self.LMC[x_min].lres is not None:
+            return self.LMC[x_min].lres
+
+        # TODO: Check discarded bound rules
+
+        if self.callback is not None:
+            print('Callback for '
+                  'minimizer starting at {}:'.format(x_min))
+
+        if self.disp:
+            print('Starting '
+                  'minimization at {}...'.format(x_min))
+
+        if self.sampling_method == 'simplicial':
+            x_min_t = tuple(x_min)
+            # Find the normalized tuple in the Vertex cache:
+            x_min_t_norm = self.X_min_cache[tuple(x_min_t)]
+
+            x_min_t_norm = tuple(x_min_t_norm)
+
+            g_bounds = self.construct_lcb_simplicial(self.HC.V[x_min_t_norm])
+            if 'bounds' in self.min_solver_args:
+                self.minimizer_kwargs['bounds'] = g_bounds
+
+        else:
+            g_bounds = self.construct_lcb_delaunay(x_min, ind=ind)
+            if 'bounds' in self.min_solver_args:
+                self.minimizer_kwargs['bounds'] = g_bounds
+
+        if self.disp and 'bounds' in self.minimizer_kwargs:
+            print('bounds in kwarg:')
+            print(self.minimizer_kwargs['bounds'])
+
+        # Local minimization using scipy.optimize.minimize:
+        lres = minimize(self.func, x_min, **self.minimizer_kwargs)
+
+        if self.disp:
+            print('lres = {}'.format(lres))
+
+        # Local function evals for all minimizers
+        self.res.nlfev += lres.nfev
+        if 'njev' in lres:
+            self.res.nljev += lres.njev
+        if 'nhev' in lres:
+            self.res.nlhev += lres.nhev
+
+        try:  # Needed because of the brain dead 1x1 NumPy arrays
+            lres.fun = lres.fun[0]
+        except (IndexError, TypeError):
+            lres.fun
+
+        # Append minima maps
+        self.LMC[x_min]
+        self.LMC.add_res(x_min, lres, bounds=g_bounds)
+
+        return lres
+
+    # Post local minimization processing
+    def sort_result(self):
+        """
+        Sort results and build the global return object
+        """
+        # Sort results in local minima cache
+        results = self.LMC.sort_cache_result()
+        self.res.xl = results['xl']
+        self.res.funl = results['funl']
+        self.res.x = results['x']
+        self.res.fun = results['fun']
+
+        # Add local func evals to sampling func evals
+        # Count the number of feasible vertices and add to local func evals:
+        self.res.nfev = self.fn + self.res.nlfev
+        return self.res
+
+    # Algorithm controls
+    def fail_routine(self, mes=("Failed to converge")):
+        self.break_routine = True
+        self.res.success = False
+        self.X_min = [None]
+        self.res.message = mes
+
+    def sampled_surface(self, infty_cons_sampl=False):
+        """
+        Sample the function surface.
+
+        There are 2 modes, if ``infty_cons_sampl`` is True then the sampled
+        points that are generated outside the feasible domain will be
+        assigned an ``inf`` value in accordance with SHGO rules.
+        This guarantees convergence and usually requires less objective function
+        evaluations at the computational costs of more Delaunay triangulation
+        points.
+
+        If ``infty_cons_sampl`` is False, then the infeasible points are discarded
+        and only a subspace of the sampled points are used. This comes at the
+        cost of the loss of guaranteed convergence and usually requires more
+        objective function evaluations.
+        """
+        # Generate sampling points
+        if self.disp:
+            print('Generating sampling points')
+        self.sampling(self.nc, self.dim)
+        self.n = self.nc
+
+        if not infty_cons_sampl:
+            # Find subspace of feasible points
+            if self.g_cons is not None:
+                self.sampling_subspace()
+
+        # Sort remaining samples
+        self.sorted_samples()
+
+        # Find objective function references
+        self.fun_ref()
+
+        self.n_sampled = self.nc
+
+    def delaunay_complex_minimisers(self):
+        # Construct complex minimizers on the current sampling set.
+        # if self.fn >= (self.dim + 1):
+        if self.fn >= (self.dim + 2):
+            # TODO: Check on strange Qhull error where the number of vertices
+            # required for an initial simplex is higher than n + 1?
+            if self.dim < 2:  # Scalar objective functions
+                if self.disp:
+                    print('Constructing 1-D minimizer pool')
+
+                self.ax_subspace()
+                self.surface_topo_ref()
+                self.minimizers_1D()
+
+            else:  # Multivariate functions.
+                if self.disp:
+                    print('Constructing Gabrial graph and minimizer pool')
+
+                if self.iters == 1:
+                    self.delaunay_triangulation(grow=False)
+                else:
+                    self.delaunay_triangulation(grow=True, n_prc=self.n_prc)
+                    self.n_prc = self.C.shape[0]
+
+                if self.disp:
+                    print('Triangulation completed, building minimizer pool')
+
+                self.delaunay_minimizers()
+
+            if self.disp:
+                logging.info(
+                    "Minimizer pool = SHGO.X_min = {}".format(self.X_min))
+        else:
+            if self.disp:
+                print(
+                    'Not enough sampling points found in the feasible domain.')
+            self.minimizer_pool = [None]
+            try:
+                self.X_min
+            except AttributeError:
+                self.X_min = []
+
+    def sampling_custom(self, n, dim):
+        """
+        Generates uniform sampling points in a hypercube and scales the points
+        to the bound limits.
+        """
+        # Generate sampling points.
+        # Generate uniform sample points in [0, 1]^m \subset R^m
+        self.C = self.sampling_function(n, dim)
+        # Distribute over bounds
+        for i in range(len(self.bounds)):
+            self.C[:, i] = (self.C[:, i] *
+                            (self.bounds[i][1] - self.bounds[i][0])
+                            + self.bounds[i][0])
+        return self.C
+
+    def sampling_subspace(self):
+        """Find subspace of feasible points from g_func definition"""
+        # Subspace of feasible points.
+        for ind, g in enumerate(self.g_cons):
+            self.C = self.C[g(self.C.T, *self.g_args[ind]) >= 0.0]
+            if self.C.size == 0:
+                self.res.message = ('No sampling point found within the '
+                                    + 'feasible set. Increasing sampling '
+                                    + 'size.')
+                # sampling correctly for both 1-D and >1-D cases
+                if self.disp:
+                    print(self.res.message)
+
+    def sorted_samples(self):  # Validated
+        """Find indexes of the sorted sampling points"""
+        self.Ind_sorted = np.argsort(self.C, axis=0)
+        self.Xs = self.C[self.Ind_sorted]
+        return self.Ind_sorted, self.Xs
+
+    def ax_subspace(self):  # Validated
+        """
+        Finds the subspace vectors along each component axis.
+        """
+        self.Ci = []
+        self.Xs_i = []
+        self.Ii = []
+        for i in range(self.dim):
+            self.Ci.append(self.C[:, i])
+            self.Ii.append(self.Ind_sorted[:, i])
+            self.Xs_i.append(self.Xs[:, i])
+
+    def fun_ref(self):
+        """
+        Find the objective function output reference table
+        """
+        # TODO: Replace with cached wrapper
+
+        # Note: This process can be pooled easily
+        # Obj. function returns to be used as reference table.:
+        f_cache_bool = False
+        if self.fn > 0:  # Store old function evaluations
+            Ftemp = self.F
+            fn_old = self.fn
+            f_cache_bool = True
+
+        self.F = np.zeros(np.shape(self.C)[0])
+        # NOTE: It might be easier to replace this with a cached
+        #      objective function
+        for i in range(self.fn, np.shape(self.C)[0]):
+            eval_f = True
+            if self.g_cons is not None:
+                for g in self.g_cons:
+                    if g(self.C[i, :], *self.args) < 0.0:
+                        eval_f = False
+                        break  # Breaks the g loop
+
+            if eval_f:
+                self.F[i] = self.func(self.C[i, :])
+                self.fn += 1
+            elif self.infty_cons_sampl:
+                self.F[i] = np.inf
+                self.fn += 1
+        if f_cache_bool:
+            if fn_old > 0:  # Restore saved function evaluations
+                self.F[0:fn_old] = Ftemp
+
+        return self.F
+
+    def surface_topo_ref(self):  # Validated
+        """
+        Find the BD and FD finite differences along each component vector.
+        """
+        # Replace numpy inf, -inf and nan objects with floating point numbers
+        # nan --> float
+        self.F[np.isnan(self.F)] = np.inf
+        # inf, -inf  --> floats
+        self.F = np.nan_to_num(self.F)
+
+        self.Ft = self.F[self.Ind_sorted]
+        self.Ftp = np.diff(self.Ft, axis=0)  # FD
+        self.Ftm = np.diff(self.Ft[::-1], axis=0)[::-1]  # BD
+
+    def sample_topo(self, ind):
+        # Find the position of the sample in the component axial directions
+        self.Xi_ind_pos = []
+        self.Xi_ind_topo_i = []
+
+        for i in range(self.dim):
+            for x, I_ind in zip(self.Ii[i], range(len(self.Ii[i]))):
+                if x == ind:
+                    self.Xi_ind_pos.append(I_ind)
+
+            # Use the topo reference tables to find if point is a minimizer on
+            # the current axis
+
+            # First check if index is on the boundary of the sampling points:
+            if self.Xi_ind_pos[i] == 0:
+                # if boundary is in basin
+                self.Xi_ind_topo_i.append(self.Ftp[:, i][0] > 0)
+
+            elif self.Xi_ind_pos[i] == self.fn - 1:
+                # Largest value at sample size
+                self.Xi_ind_topo_i.append(self.Ftp[:, i][self.fn - 2] < 0)
+
+            # Find axial reference for other points
+            else:
+                Xi_ind_top_p = self.Ftp[:, i][self.Xi_ind_pos[i]] > 0
+                Xi_ind_top_m = self.Ftm[:, i][self.Xi_ind_pos[i] - 1] > 0
+                self.Xi_ind_topo_i.append(Xi_ind_top_p and Xi_ind_top_m)
+
+        if np.array(self.Xi_ind_topo_i).all():
+            self.Xi_ind_topo = True
+        else:
+            self.Xi_ind_topo = False
+        self.Xi_ind_topo = np.array(self.Xi_ind_topo_i).all()
+
+        return self.Xi_ind_topo
+
+    def minimizers_1D(self):
+        """
+        Returns the indices of all minimizers
+        """
+        self.minimizer_pool = []
+        # Note: Can implement parallelization here
+        for ind in range(self.fn):
+            min_bool = self.sample_topo(ind)
+            if min_bool:
+                self.minimizer_pool.append(ind)
+
+        self.minimizer_pool_F = self.F[self.minimizer_pool]
+
+        # Sort to find minimum func value in min_pool
+        self.sort_min_pool()
+        if not len(self.minimizer_pool) == 0:
+            self.X_min = self.C[self.minimizer_pool]
+            # If function is called again and pool is found unbreak:
+        else:
+            self.X_min = []
+
+        return self.X_min
+
+    def delaunay_triangulation(self, grow=False, n_prc=0):
+        if not grow:
+            self.Tri = spatial.Delaunay(self.C)
+        else:
+            if hasattr(self, 'Tri'):
+                self.Tri.add_points(self.C[n_prc:, :])
+            else:
+                self.Tri = spatial.Delaunay(self.C, incremental=True)
+
+        return self.Tri
+
+    @staticmethod
+    def find_neighbors_delaunay(pindex, triang):
+        """
+        Returns the indices of points connected to ``pindex`` on the Gabriel
+        chain subgraph of the Delaunay triangulation.
+        """
+        return triang.vertex_neighbor_vertices[1][
+               triang.vertex_neighbor_vertices[0][pindex]:
+               triang.vertex_neighbor_vertices[0][pindex + 1]]
+
+    def sample_delaunay_topo(self, ind):
+        self.Xi_ind_topo_i = []
+
+        # Find the position of the sample in the component Gabrial chain
+        G_ind = self.find_neighbors_delaunay(ind, self.Tri)
+
+        # Find finite deference between each point
+        for g_i in G_ind:
+            rel_topo_bool = self.F[ind] < self.F[g_i]
+            self.Xi_ind_topo_i.append(rel_topo_bool)
+
+        # Check if minimizer
+        self.Xi_ind_topo = np.array(self.Xi_ind_topo_i).all()
+
+        return self.Xi_ind_topo
+
+    def delaunay_minimizers(self):
+        """
+        Returns the indices of all minimizers
+        """
+        self.minimizer_pool = []
+        # Note: Can easily be parralized
+        if self.disp:
+            logging.info('self.fn = {}'.format(self.fn))
+            logging.info('self.nc = {}'.format(self.nc))
+            logging.info('np.shape(self.C)'
+                         ' = {}'.format(np.shape(self.C)))
+        for ind in range(self.fn):
+            min_bool = self.sample_delaunay_topo(ind)
+            if min_bool:
+                self.minimizer_pool.append(ind)
+
+        self.minimizer_pool_F = self.F[self.minimizer_pool]
+
+        # Sort to find minimum func value in min_pool
+        self.sort_min_pool()
+        if self.disp:
+            logging.info('self.minimizer_pool = {}'.format(self.minimizer_pool))
+        if not len(self.minimizer_pool) == 0:
+            self.X_min = self.C[self.minimizer_pool]
+        else:
+            self.X_min = []  # Empty pool breaks main routine
+        return self.X_min
+
+
+class LMap:
+    def __init__(self, v):
+        self.v = v
+        self.x_l = None
+        self.lres = None
+        self.f_min = None
+        self.lbounds = []
+
+
+class LMapCache:
+    def __init__(self):
+        self.cache = {}
+
+        # Lists for search queries
+        self.v_maps = []
+        self.xl_maps = []
+        self.f_maps = []
+        self.lbound_maps = []
+        self.size = 0
+
+    def __getitem__(self, v):
+        v = np.ndarray.tolist(v)
+        v = tuple(v)
+        try:
+            return self.cache[v]
+        except KeyError:
+            xval = LMap(v)
+            self.cache[v] = xval
+
+            return self.cache[v]
+
+    def add_res(self, v, lres, bounds=None):
+        v = np.ndarray.tolist(v)
+        v = tuple(v)
+        self.cache[v].x_l = lres.x
+        self.cache[v].lres = lres
+        self.cache[v].f_min = lres.fun
+        self.cache[v].lbounds = bounds
+
+        # Update cache size
+        self.size += 1
+
+        # Cache lists for search queries
+        self.v_maps.append(v)
+        self.xl_maps.append(lres.x)
+        self.f_maps.append(lres.fun)
+        self.lbound_maps.append(bounds)
+
+    def sort_cache_result(self):
+        """
+        Sort results and build the global return object
+        """
+        results = {}
+        # Sort results and save
+        self.xl_maps = np.array(self.xl_maps)
+        self.f_maps = np.array(self.f_maps)
+
+        # Sorted indexes in Func_min
+        ind_sorted = np.argsort(self.f_maps)
+
+        # Save ordered list of minima
+        results['xl'] = self.xl_maps[ind_sorted]  # Ordered x vals
+        self.f_maps = np.array(self.f_maps)
+        results['funl'] = self.f_maps[ind_sorted]
+        results['funl'] = results['funl'].T
+
+        # Find global of all minimizers
+        results['x'] = self.xl_maps[ind_sorted[0]]  # Save global minima
+        results['fun'] = self.f_maps[ind_sorted[0]]  # Save global fun value
+
+        self.xl_maps = np.ndarray.tolist(self.xl_maps)
+        self.f_maps = np.ndarray.tolist(self.f_maps)
+        return results
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo_lib/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo_lib/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo_lib/triangulation.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo_lib/triangulation.py
new file mode 100644
index 00000000..125f927d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_shgo_lib/triangulation.py
@@ -0,0 +1,661 @@
+import numpy as np
+import copy
+
+
+class Complex:
+    def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
+                 g_cons=None, g_args=()):
+        self.dim = dim
+        self.bounds = bounds
+        self.symmetry = symmetry  # TODO: Define the functions to be used
+        #      here in init to avoid if checks
+        self.gen = 0
+        self.perm_cycle = 0
+
+        # Every cell is stored in a list of its generation,
+        # e.g., the initial cell is stored in self.H[0]
+        # 1st get new cells are stored in self.H[1] etc.
+        # When a cell is subgenerated it is removed from this list
+
+        self.H = []  # Storage structure of cells
+        # Cache of all vertices
+        self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
+
+        # Generate n-cube here:
+        self.n_cube(dim, symmetry=symmetry)
+
+        # TODO: Assign functions to a the complex instead
+        if symmetry:
+            self.generation_cycle = 1
+            # self.centroid = self.C0()[-1].x
+            # self.C0.centroid = self.centroid
+        else:
+            self.add_centroid()
+
+        self.H.append([])
+        self.H[0].append(self.C0)
+        self.hgr = self.C0.homology_group_rank()
+        self.hgrd = 0  # Complex group rank differential
+        # self.hgr = self.C0.hg_n
+
+        # Build initial graph
+        self.graph_map()
+
+        self.performance = []
+        self.performance.append(0)
+        self.performance.append(0)
+
+    def __call__(self):
+        return self.H
+
+    def n_cube(self, dim, symmetry=False, printout=False):
+        """
+        Generate the simplicial triangulation of the N-D hypercube
+        containing 2**n vertices
+        """
+        origin = list(np.zeros(dim, dtype=int))
+        self.origin = origin
+        supremum = list(np.ones(dim, dtype=int))
+        self.supremum = supremum
+
+        # tuple versions for indexing
+        origintuple = tuple(origin)
+        supremumtuple = tuple(supremum)
+
+        x_parents = [origintuple]
+
+        if symmetry:
+            self.C0 = Simplex(0, 0, 0, self.dim)  # Initial cell object
+            self.C0.add_vertex(self.V[origintuple])
+
+            i_s = 0
+            self.perm_symmetry(i_s, x_parents, origin)
+            self.C0.add_vertex(self.V[supremumtuple])
+        else:
+            self.C0 = Cell(0, 0, origin, supremum)  # Initial cell object
+            self.C0.add_vertex(self.V[origintuple])
+            self.C0.add_vertex(self.V[supremumtuple])
+
+            i_parents = []
+            self.perm(i_parents, x_parents, origin)
+
+        if printout:
+            print("Initial hyper cube:")
+            for v in self.C0():
+                v.print_out()
+
+    def perm(self, i_parents, x_parents, xi):
+        # TODO: Cut out of for if outside linear constraint cutting planes
+        xi_t = tuple(xi)
+
+        # Construct required iterator
+        iter_range = [x for x in range(self.dim) if x not in i_parents]
+
+        for i in iter_range:
+            i2_parents = copy.copy(i_parents)
+            i2_parents.append(i)
+            xi2 = copy.copy(xi)
+            xi2[i] = 1
+            # Make new vertex list a hashable tuple
+            xi2_t = tuple(xi2)
+            # Append to cell
+            self.C0.add_vertex(self.V[xi2_t])
+            # Connect neighbors and vice versa
+            # Parent point
+            self.V[xi2_t].connect(self.V[xi_t])
+
+            # Connect all family of simplices in parent containers
+            for x_ip in x_parents:
+                self.V[xi2_t].connect(self.V[x_ip])
+
+            x_parents2 = copy.copy(x_parents)
+            x_parents2.append(xi_t)
+
+            # Permutate
+            self.perm(i2_parents, x_parents2, xi2)
+
+    def perm_symmetry(self, i_s, x_parents, xi):
+        # TODO: Cut out of for if outside linear constraint cutting planes
+        xi_t = tuple(xi)
+        xi2 = copy.copy(xi)
+        xi2[i_s] = 1
+        # Make new vertex list a hashable tuple
+        xi2_t = tuple(xi2)
+        # Append to cell
+        self.C0.add_vertex(self.V[xi2_t])
+        # Connect neighbors and vice versa
+        # Parent point
+        self.V[xi2_t].connect(self.V[xi_t])
+
+        # Connect all family of simplices in parent containers
+        for x_ip in x_parents:
+            self.V[xi2_t].connect(self.V[x_ip])
+
+        x_parents2 = copy.copy(x_parents)
+        x_parents2.append(xi_t)
+
+        i_s += 1
+        if i_s == self.dim:
+            return
+        # Permutate
+        self.perm_symmetry(i_s, x_parents2, xi2)
+
+    def add_centroid(self):
+        """Split the central edge between the origin and supremum of
+        a cell and add the new vertex to the complex"""
+        self.centroid = list(
+            (np.array(self.origin) + np.array(self.supremum)) / 2.0)
+        self.C0.add_vertex(self.V[tuple(self.centroid)])
+        self.C0.centroid = self.centroid
+
+        # Disconnect origin and supremum
+        self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
+
+        # Connect centroid to all other vertices
+        for v in self.C0():
+            self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
+
+        self.centroid_added = True
+        return
+
+    # Construct incidence array:
+    def incidence(self):
+        if self.centroid_added:
+            self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
+                                         dtype=int)
+        else:
+            self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
+                                         dtype=int)
+
+        for v in self.HC.C0():
+            for v2 in v.nn:
+                self.structure[v.index, v2.index] = 1
+
+        return
+
+    # A more sparse incidence generator:
+    def graph_map(self):
+        """ Make a list of size 2**n + 1 where an entry is a vertex
+        incidence, each list element contains a list of indexes
+        corresponding to that entries neighbors"""
+
+        self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
+
+    # Graph structure method:
+    # 0. Capture the indices of the initial cell.
+    # 1. Generate new origin and supremum scalars based on current generation
+    # 2. Generate a new set of vertices corresponding to a new
+    #    "origin" and "supremum"
+    # 3. Connected based on the indices of the previous graph structure
+    # 4. Disconnect the edges in the original cell
+
+    def sub_generate_cell(self, C_i, gen):
+        """Subgenerate a cell `C_i` of generation `gen` and
+        homology group rank `hgr`."""
+        origin_new = tuple(C_i.centroid)
+        centroid_index = len(C_i()) - 1
+
+        # If not gen append
+        try:
+            self.H[gen]
+        except IndexError:
+            self.H.append([])
+
+        # Generate subcubes using every extreme vertex in C_i as a supremum
+        # and the centroid of C_i as the origin
+        H_new = []  # list storing all the new cubes split from C_i
+        for i, v in enumerate(C_i()[:-1]):
+            supremum = tuple(v.x)
+            H_new.append(
+                self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
+
+        for i, connections in enumerate(self.graph):
+            # Present vertex V_new[i]; connect to all connections:
+            if i == centroid_index:  # Break out of centroid
+                break
+
+            for j in connections:
+                C_i()[i].disconnect(C_i()[j])
+
+        # Destroy the old cell
+        if C_i is not self.C0:  # Garbage collector does this anyway; not needed
+            del C_i
+
+        # TODO: Recalculate all the homology group ranks of each cell
+        return H_new
+
+    def split_generation(self):
+        """
+        Run sub_generate_cell for every cell in the current complex self.gen
+        """
+        no_splits = False  # USED IN SHGO
+        try:
+            for c in self.H[self.gen]:
+                if self.symmetry:
+                    # self.sub_generate_cell_symmetry(c, self.gen + 1)
+                    self.split_simplex_symmetry(c, self.gen + 1)
+                else:
+                    self.sub_generate_cell(c, self.gen + 1)
+        except IndexError:
+            no_splits = True  # USED IN SHGO
+
+        self.gen += 1
+        return no_splits  # USED IN SHGO
+
+    def construct_hypercube(self, origin, supremum, gen, hgr,
+                            printout=False):
+        """
+        Build a hypercube with triangulations symmetric to C0.
+
+        Parameters
+        ----------
+        origin : vec
+        supremum : vec (tuple)
+        gen : generation
+        hgr : parent homology group rank
+        """
+        # Initiate new cell
+        v_o = np.array(origin)
+        v_s = np.array(supremum)
+
+        C_new = Cell(gen, hgr, origin, supremum)
+        C_new.centroid = tuple((v_o + v_s) * .5)
+
+        # Build new indexed vertex list
+        V_new = []
+
+        for i, v in enumerate(self.C0()[:-1]):
+            v_x = np.array(v.x)
+            sub_cell_t1 = v_o - v_o * v_x
+            sub_cell_t2 = v_s * v_x
+
+            vec = sub_cell_t1 + sub_cell_t2
+
+            vec = tuple(vec)
+            C_new.add_vertex(self.V[vec])
+            V_new.append(vec)
+
+        # Add new centroid
+        C_new.add_vertex(self.V[C_new.centroid])
+        V_new.append(C_new.centroid)
+
+        # Connect new vertices #TODO: Thread into other loop; no need for V_new
+        for i, connections in enumerate(self.graph):
+            # Present vertex V_new[i]; connect to all connections:
+            for j in connections:
+                self.V[V_new[i]].connect(self.V[V_new[j]])
+
+        if printout:
+            print("A sub hyper cube with:")
+            print("origin: {}".format(origin))
+            print("supremum: {}".format(supremum))
+            for v in C_new():
+                v.print_out()
+
+        # Append the new cell to the to complex
+        self.H[gen].append(C_new)
+
+        return C_new
+
+    def split_simplex_symmetry(self, S, gen):
+        """
+        Split a hypersimplex S into two sub simplices by building a hyperplane
+        which connects to a new vertex on an edge (the longest edge in
+        dim = {2, 3}) and every other vertex in the simplex that is not
+        connected to the edge being split.
+
+        This function utilizes the knowledge that the problem is specified
+        with symmetric constraints
+
+        The longest edge is tracked by an ordering of the
+        vertices in every simplices, the edge between first and second
+        vertex is the longest edge to be split in the next iteration.
+        """
+        # If not gen append
+        try:
+            self.H[gen]
+        except IndexError:
+            self.H.append([])
+
+        # Find new vertex.
+        # V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
+        s = S()
+        firstx = s[0].x
+        lastx = s[-1].x
+        V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
+
+        # Disconnect old longest edge
+        self.V[firstx].disconnect(self.V[lastx])
+
+        # Connect new vertices to all other vertices
+        for v in s[:]:
+            v.connect(self.V[V_new.x])
+
+        # New "lower" simplex
+        S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
+                          self.dim)
+        S_new_l.add_vertex(s[0])
+        S_new_l.add_vertex(V_new)  # Add new vertex
+        for v in s[1:-1]:  # Add all other vertices
+            S_new_l.add_vertex(v)
+
+        # New "upper" simplex
+        S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
+
+        # First vertex on new long edge
+        S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
+
+        for v in s[1:-1]:  # Remaining vertices
+            S_new_u.add_vertex(v)
+
+        for k, v in enumerate(s[1:-1]):  # iterate through inner vertices
+            if k == S.generation_cycle:
+                S_new_u.add_vertex(V_new)
+            else:
+                S_new_u.add_vertex(v)
+
+        S_new_u.add_vertex(s[-1])  # Second vertex on new long edge
+
+        self.H[gen].append(S_new_l)
+        self.H[gen].append(S_new_u)
+
+        return
+
+    # Plots
+    def plot_complex(self):
+        """
+             Here, C is the LIST of simplexes S in the
+             2- or 3-D complex
+
+             To plot a single simplex S in a set C, use e.g., [C[0]]
+        """
+        from matplotlib import pyplot
+        if self.dim == 2:
+            pyplot.figure()
+            for C in self.H:
+                for c in C:
+                    for v in c():
+                        if self.bounds is None:
+                            x_a = np.array(v.x, dtype=float)
+                        else:
+                            x_a = np.array(v.x, dtype=float)
+                            for i in range(len(self.bounds)):
+                                x_a[i] = (x_a[i] * (self.bounds[i][1]
+                                                    - self.bounds[i][0])
+                                          + self.bounds[i][0])
+
+                        # logging.info('v.x_a = {}'.format(x_a))
+
+                        pyplot.plot([x_a[0]], [x_a[1]], 'o')
+
+                        xlines = []
+                        ylines = []
+                        for vn in v.nn:
+                            if self.bounds is None:
+                                xn_a = np.array(vn.x, dtype=float)
+                            else:
+                                xn_a = np.array(vn.x, dtype=float)
+                                for i in range(len(self.bounds)):
+                                    xn_a[i] = (xn_a[i] * (self.bounds[i][1]
+                                                          - self.bounds[i][0])
+                                               + self.bounds[i][0])
+
+                            # logging.info('vn.x = {}'.format(vn.x))
+
+                            xlines.append(xn_a[0])
+                            ylines.append(xn_a[1])
+                            xlines.append(x_a[0])
+                            ylines.append(x_a[1])
+
+                        pyplot.plot(xlines, ylines)
+
+            if self.bounds is None:
+                pyplot.ylim([-1e-2, 1 + 1e-2])
+                pyplot.xlim([-1e-2, 1 + 1e-2])
+            else:
+                pyplot.ylim(
+                    [self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
+                pyplot.xlim(
+                    [self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
+
+            pyplot.show()
+
+        elif self.dim == 3:
+            fig = pyplot.figure()
+            ax = fig.add_subplot(111, projection='3d')
+
+            for C in self.H:
+                for c in C:
+                    for v in c():
+                        x = []
+                        y = []
+                        z = []
+                        # logging.info('v.x = {}'.format(v.x))
+                        x.append(v.x[0])
+                        y.append(v.x[1])
+                        z.append(v.x[2])
+                        for vn in v.nn:
+                            x.append(vn.x[0])
+                            y.append(vn.x[1])
+                            z.append(vn.x[2])
+                            x.append(v.x[0])
+                            y.append(v.x[1])
+                            z.append(v.x[2])
+                            # logging.info('vn.x = {}'.format(vn.x))
+
+                        ax.plot(x, y, z, label='simplex')
+
+            pyplot.show()
+        else:
+            print("dimension higher than 3 or wrong complex format")
+        return
+
+
+class VertexGroup:
+    def __init__(self, p_gen, p_hgr):
+        self.p_gen = p_gen  # parent generation
+        self.p_hgr = p_hgr  # parent homology group rank
+        self.hg_n = None
+        self.hg_d = None
+
+        # Maybe add parent homology group rank total history
+        # This is the sum off all previously split cells
+        # cumulatively throughout its entire history
+        self.C = []
+
+    def __call__(self):
+        return self.C
+
+    def add_vertex(self, V):
+        if V not in self.C:
+            self.C.append(V)
+
+    def homology_group_rank(self):
+        """
+        Returns the homology group order of the current cell
+        """
+        if self.hg_n is None:
+            self.hg_n = sum(1 for v in self.C if v.minimiser())
+
+        return self.hg_n
+
+    def homology_group_differential(self):
+        """
+        Returns the difference between the current homology group of the
+        cell and its parent group
+        """
+        if self.hg_d is None:
+            self.hgd = self.hg_n - self.p_hgr
+
+        return self.hgd
+
+    def polytopial_sperner_lemma(self):
+        """
+        Returns the number of stationary points theoretically contained in the
+        cell based information currently known about the cell
+        """
+        pass
+
+    def print_out(self):
+        """
+        Print the current cell to console
+        """
+        for v in self():
+            v.print_out()
+
+
+class Cell(VertexGroup):
+    """
+    Contains a cell that is symmetric to the initial hypercube triangulation
+    """
+
+    def __init__(self, p_gen, p_hgr, origin, supremum):
+        super().__init__(p_gen, p_hgr)
+
+        self.origin = origin
+        self.supremum = supremum
+        self.centroid = None  # (Not always used)
+        # TODO: self.bounds
+
+
+class Simplex(VertexGroup):
+    """
+    Contains a simplex that is symmetric to the initial symmetry constrained
+    hypersimplex triangulation
+    """
+
+    def __init__(self, p_gen, p_hgr, generation_cycle, dim):
+        super().__init__(p_gen, p_hgr)
+
+        self.generation_cycle = (generation_cycle + 1) % (dim - 1)
+
+
+class Vertex:
+    def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
+                 g_cons_args=(), nn=None, index=None):
+        self.x = x
+        self.order = sum(x)
+        x_a = np.array(x, dtype=float)
+        if bounds is not None:
+            for i, (lb, ub) in enumerate(bounds):
+                x_a[i] = x_a[i] * (ub - lb) + lb
+
+        # TODO: Make saving the array structure optional
+        self.x_a = x_a
+
+        # Note Vertex is only initiated once for all x so only
+        # evaluated once
+        if func is not None:
+            self.feasible = True
+            if g_cons is not None:
+                for g, args in zip(g_cons, g_cons_args):
+                    if g(self.x_a, *args) < 0.0:
+                        self.f = np.inf
+                        self.feasible = False
+                        break
+            if self.feasible:
+                self.f = func(x_a, *func_args)
+
+        if nn is not None:
+            self.nn = nn
+        else:
+            self.nn = set()
+
+        self.fval = None
+        self.check_min = True
+
+        # Index:
+        if index is not None:
+            self.index = index
+
+    def __hash__(self):
+        return hash(self.x)
+
+    def connect(self, v):
+        if v is not self and v not in self.nn:
+            self.nn.add(v)
+            v.nn.add(self)
+
+            if self.minimiser():
+                v._min = False
+                v.check_min = False
+
+            # TEMPORARY
+            self.check_min = True
+            v.check_min = True
+
+    def disconnect(self, v):
+        if v in self.nn:
+            self.nn.remove(v)
+            v.nn.remove(self)
+            self.check_min = True
+            v.check_min = True
+
+    def minimiser(self):
+        """Check whether this vertex is strictly less than all its neighbors"""
+        if self.check_min:
+            self._min = all(self.f < v.f for v in self.nn)
+            self.check_min = False
+
+        return self._min
+
+    def print_out(self):
+        print("Vertex: {}".format(self.x))
+        constr = 'Connections: '
+        for vc in self.nn:
+            constr += '{} '.format(vc.x)
+
+        print(constr)
+        print('Order = {}'.format(self.order))
+
+
+class VertexCache:
+    def __init__(self, func, func_args=(), bounds=None, g_cons=None,
+                 g_cons_args=(), indexed=True):
+
+        self.cache = {}
+        self.func = func
+        self.g_cons = g_cons
+        self.g_cons_args = g_cons_args
+        self.func_args = func_args
+        self.bounds = bounds
+        self.nfev = 0
+        self.size = 0
+
+        if indexed:
+            self.index = -1
+
+    def __getitem__(self, x, indexed=True):
+        try:
+            return self.cache[x]
+        except KeyError:
+            if indexed:
+                self.index += 1
+                xval = Vertex(x, bounds=self.bounds,
+                              func=self.func, func_args=self.func_args,
+                              g_cons=self.g_cons,
+                              g_cons_args=self.g_cons_args,
+                              index=self.index)
+            else:
+                xval = Vertex(x, bounds=self.bounds,
+                              func=self.func, func_args=self.func_args,
+                              g_cons=self.g_cons,
+                              g_cons_args=self.g_cons_args)
+
+            # logging.info("New generated vertex at x = {}".format(x))
+            # NOTE: Surprisingly high performance increase if logging is commented out
+            self.cache[x] = xval
+
+            # TODO: Check
+            if self.func is not None:
+                if self.g_cons is not None:
+                    if xval.feasible:
+                        self.nfev += 1
+                        self.size += 1
+                    else:
+                        self.size += 1
+                else:
+                    self.nfev += 1
+                    self.size += 1
+
+            return self.cache[x]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_slsqp_py.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_slsqp_py.py
new file mode 100644
index 00000000..e133f3af
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_slsqp_py.py
@@ -0,0 +1,504 @@
+"""
+This module implements the Sequential Least Squares Programming optimization
+algorithm (SLSQP), originally developed by Dieter Kraft.
+See http://www.netlib.org/toms/733
+
+Functions
+---------
+.. autosummary::
+   :toctree: generated/
+
+    approx_jacobian
+    fmin_slsqp
+
+"""
+
+__all__ = ['approx_jacobian', 'fmin_slsqp']
+
+import numpy as np
+from scipy.optimize._slsqp import slsqp
+from numpy import (zeros, array, linalg, append, asfarray, concatenate, finfo,
+                   sqrt, vstack, isfinite, atleast_1d)
+from ._optimize import (OptimizeResult, _check_unknown_options,
+                        _prepare_scalar_function, _clip_x_for_func,
+                        _check_clip_x)
+from ._numdiff import approx_derivative
+from ._constraints import old_bound_to_new, _arr_to_scalar
+
+
+__docformat__ = "restructuredtext en"
+
+_epsilon = sqrt(finfo(float).eps)
+
+
+def approx_jacobian(x, func, epsilon, *args):
+    """
+    Approximate the Jacobian matrix of a callable function.
+
+    Parameters
+    ----------
+    x : array_like
+        The state vector at which to compute the Jacobian matrix.
+    func : callable f(x,*args)
+        The vector-valued function.
+    epsilon : float
+        The perturbation used to determine the partial derivatives.
+    args : sequence
+        Additional arguments passed to func.
+
+    Returns
+    -------
+    An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length
+    of the outputs of `func`, and ``lenx`` is the number of elements in
+    `x`.
+
+    Notes
+    -----
+    The approximation is done using forward differences.
+
+    """
+    # approx_derivative returns (m, n) == (lenf, lenx)
+    jac = approx_derivative(func, x, method='2-point', abs_step=epsilon,
+                            args=args)
+    # if func returns a scalar jac.shape will be (lenx,). Make sure
+    # it's at least a 2D array.
+    return np.atleast_2d(jac)
+
+
+def fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None,
+               bounds=(), fprime=None, fprime_eqcons=None,
+               fprime_ieqcons=None, args=(), iter=100, acc=1.0E-6,
+               iprint=1, disp=None, full_output=0, epsilon=_epsilon,
+               callback=None):
+    """
+    Minimize a function using Sequential Least Squares Programming
+
+    Python interface function for the SLSQP Optimization subroutine
+    originally implemented by Dieter Kraft.
+
+    Parameters
+    ----------
+    func : callable f(x,*args)
+        Objective function.  Must return a scalar.
+    x0 : 1-D ndarray of float
+        Initial guess for the independent variable(s).
+    eqcons : list, optional
+        A list of functions of length n such that
+        eqcons[j](x,*args) == 0.0 in a successfully optimized
+        problem.
+    f_eqcons : callable f(x,*args), optional
+        Returns a 1-D array in which each element must equal 0.0 in a
+        successfully optimized problem. If f_eqcons is specified,
+        eqcons is ignored.
+    ieqcons : list, optional
+        A list of functions of length n such that
+        ieqcons[j](x,*args) >= 0.0 in a successfully optimized
+        problem.
+    f_ieqcons : callable f(x,*args), optional
+        Returns a 1-D ndarray in which each element must be greater or
+        equal to 0.0 in a successfully optimized problem. If
+        f_ieqcons is specified, ieqcons is ignored.
+    bounds : list, optional
+        A list of tuples specifying the lower and upper bound
+        for each independent variable [(xl0, xu0),(xl1, xu1),...]
+        Infinite values will be interpreted as large floating values.
+    fprime : callable `f(x,*args)`, optional
+        A function that evaluates the partial derivatives of func.
+    fprime_eqcons : callable `f(x,*args)`, optional
+        A function of the form `f(x, *args)` that returns the m by n
+        array of equality constraint normals. If not provided,
+        the normals will be approximated. The array returned by
+        fprime_eqcons should be sized as ( len(eqcons), len(x0) ).
+    fprime_ieqcons : callable `f(x,*args)`, optional
+        A function of the form `f(x, *args)` that returns the m by n
+        array of inequality constraint normals. If not provided,
+        the normals will be approximated. The array returned by
+        fprime_ieqcons should be sized as ( len(ieqcons), len(x0) ).
+    args : sequence, optional
+        Additional arguments passed to func and fprime.
+    iter : int, optional
+        The maximum number of iterations.
+    acc : float, optional
+        Requested accuracy.
+    iprint : int, optional
+        The verbosity of fmin_slsqp :
+
+        * iprint <= 0 : Silent operation
+        * iprint == 1 : Print summary upon completion (default)
+        * iprint >= 2 : Print status of each iterate and summary
+    disp : int, optional
+        Overrides the iprint interface (preferred).
+    full_output : bool, optional
+        If False, return only the minimizer of func (default).
+        Otherwise, output final objective function and summary
+        information.
+    epsilon : float, optional
+        The step size for finite-difference derivative estimates.
+    callback : callable, optional
+        Called after each iteration, as ``callback(x)``, where ``x`` is the
+        current parameter vector.
+
+    Returns
+    -------
+    out : ndarray of float
+        The final minimizer of func.
+    fx : ndarray of float, if full_output is true
+        The final value of the objective function.
+    its : int, if full_output is true
+        The number of iterations.
+    imode : int, if full_output is true
+        The exit mode from the optimizer (see below).
+    smode : string, if full_output is true
+        Message describing the exit mode from the optimizer.
+
+    See also
+    --------
+    minimize: Interface to minimization algorithms for multivariate
+        functions. See the 'SLSQP' `method` in particular.
+
+    Notes
+    -----
+    Exit modes are defined as follows ::
+
+        -1 : Gradient evaluation required (g & a)
+         0 : Optimization terminated successfully
+         1 : Function evaluation required (f & c)
+         2 : More equality constraints than independent variables
+         3 : More than 3*n iterations in LSQ subproblem
+         4 : Inequality constraints incompatible
+         5 : Singular matrix E in LSQ subproblem
+         6 : Singular matrix C in LSQ subproblem
+         7 : Rank-deficient equality constraint subproblem HFTI
+         8 : Positive directional derivative for linesearch
+         9 : Iteration limit reached
+
+    Examples
+    --------
+    Examples are given :ref:`in the tutorial `.
+
+    """
+    if disp is not None:
+        iprint = disp
+
+    opts = {'maxiter': iter,
+            'ftol': acc,
+            'iprint': iprint,
+            'disp': iprint != 0,
+            'eps': epsilon,
+            'callback': callback}
+
+    # Build the constraints as a tuple of dictionaries
+    cons = ()
+    # 1. constraints of the 1st kind (eqcons, ieqcons); no Jacobian; take
+    #    the same extra arguments as the objective function.
+    cons += tuple({'type': 'eq', 'fun': c, 'args': args} for c in eqcons)
+    cons += tuple({'type': 'ineq', 'fun': c, 'args': args} for c in ieqcons)
+    # 2. constraints of the 2nd kind (f_eqcons, f_ieqcons) and their Jacobian
+    #    (fprime_eqcons, fprime_ieqcons); also take the same extra arguments
+    #    as the objective function.
+    if f_eqcons:
+        cons += ({'type': 'eq', 'fun': f_eqcons, 'jac': fprime_eqcons,
+                  'args': args}, )
+    if f_ieqcons:
+        cons += ({'type': 'ineq', 'fun': f_ieqcons, 'jac': fprime_ieqcons,
+                  'args': args}, )
+
+    res = _minimize_slsqp(func, x0, args, jac=fprime, bounds=bounds,
+                          constraints=cons, **opts)
+    if full_output:
+        return res['x'], res['fun'], res['nit'], res['status'], res['message']
+    else:
+        return res['x']
+
+
+def _minimize_slsqp(func, x0, args=(), jac=None, bounds=None,
+                    constraints=(),
+                    maxiter=100, ftol=1.0E-6, iprint=1, disp=False,
+                    eps=_epsilon, callback=None, finite_diff_rel_step=None,
+                    **unknown_options):
+    """
+    Minimize a scalar function of one or more variables using Sequential
+    Least Squares Programming (SLSQP).
+
+    Options
+    -------
+    ftol : float
+        Precision goal for the value of f in the stopping criterion.
+    eps : float
+        Step size used for numerical approximation of the Jacobian.
+    disp : bool
+        Set to True to print convergence messages. If False,
+        `verbosity` is ignored and set to 0.
+    maxiter : int
+        Maximum number of iterations.
+    finite_diff_rel_step : None or array_like, optional
+        If `jac in ['2-point', '3-point', 'cs']` the relative step size to
+        use for numerical approximation of `jac`. The absolute step
+        size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
+        possibly adjusted to fit into the bounds. For ``method='3-point'``
+        the sign of `h` is ignored. If None (default) then step is selected
+        automatically.
+    """
+    _check_unknown_options(unknown_options)
+    iter = maxiter - 1
+    acc = ftol
+    epsilon = eps
+
+    if not disp:
+        iprint = 0
+
+    # Transform x0 into an array.
+    x = asfarray(x0).flatten()
+
+    # SLSQP is sent 'old-style' bounds, 'new-style' bounds are required by
+    # ScalarFunction
+    if bounds is None or len(bounds) == 0:
+        new_bounds = (-np.inf, np.inf)
+    else:
+        new_bounds = old_bound_to_new(bounds)
+
+    # clip the initial guess to bounds, otherwise ScalarFunction doesn't work
+    x = np.clip(x, new_bounds[0], new_bounds[1])
+
+    # Constraints are triaged per type into a dictionary of tuples
+    if isinstance(constraints, dict):
+        constraints = (constraints, )
+
+    cons = {'eq': (), 'ineq': ()}
+    for ic, con in enumerate(constraints):
+        # check type
+        try:
+            ctype = con['type'].lower()
+        except KeyError as e:
+            raise KeyError('Constraint %d has no type defined.' % ic) from e
+        except TypeError as e:
+            raise TypeError('Constraints must be defined using a '
+                            'dictionary.') from e
+        except AttributeError as e:
+            raise TypeError("Constraint's type must be a string.") from e
+        else:
+            if ctype not in ['eq', 'ineq']:
+                raise ValueError("Unknown constraint type '%s'." % con['type'])
+
+        # check function
+        if 'fun' not in con:
+            raise ValueError('Constraint %d has no function defined.' % ic)
+
+        # check Jacobian
+        cjac = con.get('jac')
+        if cjac is None:
+            # approximate Jacobian function. The factory function is needed
+            # to keep a reference to `fun`, see gh-4240.
+            def cjac_factory(fun):
+                def cjac(x, *args):
+                    x = _check_clip_x(x, new_bounds)
+
+                    if jac in ['2-point', '3-point', 'cs']:
+                        return approx_derivative(fun, x, method=jac, args=args,
+                                                 rel_step=finite_diff_rel_step,
+                                                 bounds=new_bounds)
+                    else:
+                        return approx_derivative(fun, x, method='2-point',
+                                                 abs_step=epsilon, args=args,
+                                                 bounds=new_bounds)
+
+                return cjac
+            cjac = cjac_factory(con['fun'])
+
+        # update constraints' dictionary
+        cons[ctype] += ({'fun': con['fun'],
+                         'jac': cjac,
+                         'args': con.get('args', ())}, )
+
+    exit_modes = {-1: "Gradient evaluation required (g & a)",
+                   0: "Optimization terminated successfully",
+                   1: "Function evaluation required (f & c)",
+                   2: "More equality constraints than independent variables",
+                   3: "More than 3*n iterations in LSQ subproblem",
+                   4: "Inequality constraints incompatible",
+                   5: "Singular matrix E in LSQ subproblem",
+                   6: "Singular matrix C in LSQ subproblem",
+                   7: "Rank-deficient equality constraint subproblem HFTI",
+                   8: "Positive directional derivative for linesearch",
+                   9: "Iteration limit reached"}
+
+    # Set the parameters that SLSQP will need
+    # meq, mieq: number of equality and inequality constraints
+    meq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
+              for c in cons['eq']]))
+    mieq = sum(map(len, [atleast_1d(c['fun'](x, *c['args']))
+               for c in cons['ineq']]))
+    # m = The total number of constraints
+    m = meq + mieq
+    # la = The number of constraints, or 1 if there are no constraints
+    la = array([1, m]).max()
+    # n = The number of independent variables
+    n = len(x)
+
+    # Define the workspaces for SLSQP
+    n1 = n + 1
+    mineq = m - meq + n1 + n1
+    len_w = (3*n1+m)*(n1+1)+(n1-meq+1)*(mineq+2) + 2*mineq+(n1+mineq)*(n1-meq) \
+            + 2*meq + n1 + ((n+1)*n)//2 + 2*m + 3*n + 3*n1 + 1
+    len_jw = mineq
+    w = zeros(len_w)
+    jw = zeros(len_jw)
+
+    # Decompose bounds into xl and xu
+    if bounds is None or len(bounds) == 0:
+        xl = np.empty(n, dtype=float)
+        xu = np.empty(n, dtype=float)
+        xl.fill(np.nan)
+        xu.fill(np.nan)
+    else:
+        bnds = array([(_arr_to_scalar(l), _arr_to_scalar(u))
+                      for (l, u) in bounds], float)
+        if bnds.shape[0] != n:
+            raise IndexError('SLSQP Error: the length of bounds is not '
+                             'compatible with that of x0.')
+
+        with np.errstate(invalid='ignore'):
+            bnderr = bnds[:, 0] > bnds[:, 1]
+
+        if bnderr.any():
+            raise ValueError('SLSQP Error: lb > ub in bounds %s.' %
+                             ', '.join(str(b) for b in bnderr))
+        xl, xu = bnds[:, 0], bnds[:, 1]
+
+        # Mark infinite bounds with nans; the Fortran code understands this
+        infbnd = ~isfinite(bnds)
+        xl[infbnd[:, 0]] = np.nan
+        xu[infbnd[:, 1]] = np.nan
+
+    # ScalarFunction provides function and gradient evaluation
+    sf = _prepare_scalar_function(func, x, jac=jac, args=args, epsilon=eps,
+                                  finite_diff_rel_step=finite_diff_rel_step,
+                                  bounds=new_bounds)
+    # gh11403 SLSQP sometimes exceeds bounds by 1 or 2 ULP, make sure this
+    # doesn't get sent to the func/grad evaluator.
+    wrapped_fun = _clip_x_for_func(sf.fun, new_bounds)
+    wrapped_grad = _clip_x_for_func(sf.grad, new_bounds)
+
+    # Initialize the iteration counter and the mode value
+    mode = array(0, int)
+    acc = array(acc, float)
+    majiter = array(iter, int)
+    majiter_prev = 0
+
+    # Initialize internal SLSQP state variables
+    alpha = array(0, float)
+    f0 = array(0, float)
+    gs = array(0, float)
+    h1 = array(0, float)
+    h2 = array(0, float)
+    h3 = array(0, float)
+    h4 = array(0, float)
+    t = array(0, float)
+    t0 = array(0, float)
+    tol = array(0, float)
+    iexact = array(0, int)
+    incons = array(0, int)
+    ireset = array(0, int)
+    itermx = array(0, int)
+    line = array(0, int)
+    n1 = array(0, int)
+    n2 = array(0, int)
+    n3 = array(0, int)
+
+    # Print the header if iprint >= 2
+    if iprint >= 2:
+        print("%5s %5s %16s %16s" % ("NIT", "FC", "OBJFUN", "GNORM"))
+
+    # mode is zero on entry, so call objective, constraints and gradients
+    # there should be no func evaluations here because it's cached from
+    # ScalarFunction
+    fx = wrapped_fun(x)
+    g = append(wrapped_grad(x), 0.0)
+    c = _eval_constraint(x, cons)
+    a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
+
+    while 1:
+        # Call SLSQP
+        slsqp(m, meq, x, xl, xu, fx, c, g, a, acc, majiter, mode, w, jw,
+              alpha, f0, gs, h1, h2, h3, h4, t, t0, tol,
+              iexact, incons, ireset, itermx, line,
+              n1, n2, n3)
+
+        if mode == 1:  # objective and constraint evaluation required
+            fx = wrapped_fun(x)
+            c = _eval_constraint(x, cons)
+
+        if mode == -1:  # gradient evaluation required
+            g = append(wrapped_grad(x), 0.0)
+            a = _eval_con_normals(x, cons, la, n, m, meq, mieq)
+
+        if majiter > majiter_prev:
+            # call callback if major iteration has incremented
+            if callback is not None:
+                callback(np.copy(x))
+
+            # Print the status of the current iterate if iprint > 2
+            if iprint >= 2:
+                print("%5i %5i % 16.6E % 16.6E" % (majiter, sf.nfev,
+                                                   fx, linalg.norm(g)))
+
+        # If exit mode is not -1 or 1, slsqp has completed
+        if abs(mode) != 1:
+            break
+
+        majiter_prev = int(majiter)
+
+    # Optimization loop complete. Print status if requested
+    if iprint >= 1:
+        print(exit_modes[int(mode)] + "    (Exit mode " + str(mode) + ')')
+        print("            Current function value:", fx)
+        print("            Iterations:", majiter)
+        print("            Function evaluations:", sf.nfev)
+        print("            Gradient evaluations:", sf.ngev)
+
+    return OptimizeResult(x=x, fun=fx, jac=g[:-1], nit=int(majiter),
+                          nfev=sf.nfev, njev=sf.ngev, status=int(mode),
+                          message=exit_modes[int(mode)], success=(mode == 0))
+
+
+def _eval_constraint(x, cons):
+    # Compute constraints
+    if cons['eq']:
+        c_eq = concatenate([atleast_1d(con['fun'](x, *con['args']))
+                            for con in cons['eq']])
+    else:
+        c_eq = zeros(0)
+
+    if cons['ineq']:
+        c_ieq = concatenate([atleast_1d(con['fun'](x, *con['args']))
+                             for con in cons['ineq']])
+    else:
+        c_ieq = zeros(0)
+
+    # Now combine c_eq and c_ieq into a single matrix
+    c = concatenate((c_eq, c_ieq))
+    return c
+
+
+def _eval_con_normals(x, cons, la, n, m, meq, mieq):
+    # Compute the normals of the constraints
+    if cons['eq']:
+        a_eq = vstack([con['jac'](x, *con['args'])
+                       for con in cons['eq']])
+    else:  # no equality constraint
+        a_eq = zeros((meq, n))
+
+    if cons['ineq']:
+        a_ieq = vstack([con['jac'](x, *con['args'])
+                        for con in cons['ineq']])
+    else:  # no inequality constraint
+        a_ieq = zeros((mieq, n))
+
+    # Now combine a_eq and a_ieq into a single a matrix
+    if m == 0:  # no constraints
+        a = zeros((la, n))
+    else:
+        a = vstack((a_eq, a_ieq))
+    a = concatenate((a, zeros([la, 1])), 1)
+
+    return a
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_spectral.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_spectral.py
new file mode 100644
index 00000000..06d70b98
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_spectral.py
@@ -0,0 +1,257 @@
+"""
+Spectral Algorithm for Nonlinear Equations
+"""
+import collections
+
+import numpy as np
+from scipy.optimize import OptimizeResult
+from scipy.optimize._optimize import _check_unknown_options
+from ._linesearch import _nonmonotone_line_search_cruz, _nonmonotone_line_search_cheng
+
+class _NoConvergence(Exception):
+    pass
+
+
+def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000,
+                  fnorm=None, callback=None, disp=False, M=10, eta_strategy=None,
+                  sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options):
+    r"""
+    Solve nonlinear equation with the DF-SANE method
+
+    Options
+    -------
+    ftol : float, optional
+        Relative norm tolerance.
+    fatol : float, optional
+        Absolute norm tolerance.
+        Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``.
+    fnorm : callable, optional
+        Norm to use in the convergence check. If None, 2-norm is used.
+    maxfev : int, optional
+        Maximum number of function evaluations.
+    disp : bool, optional
+        Whether to print convergence process to stdout.
+    eta_strategy : callable, optional
+        Choice of the ``eta_k`` parameter, which gives slack for growth
+        of ``||F||**2``.  Called as ``eta_k = eta_strategy(k, x, F)`` with
+        `k` the iteration number, `x` the current iterate and `F` the current
+        residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``.
+        Default: ``||F||**2 / (1 + k)**2``.
+    sigma_eps : float, optional
+        The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``.
+        Default: 1e-10
+    sigma_0 : float, optional
+        Initial spectral coefficient.
+        Default: 1.0
+    M : int, optional
+        Number of iterates to include in the nonmonotonic line search.
+        Default: 10
+    line_search : {'cruz', 'cheng'}
+        Type of line search to employ. 'cruz' is the original one defined in
+        [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is
+        a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)].
+        Default: 'cruz'
+
+    References
+    ----------
+    .. [1] "Spectral residual method without gradient information for solving
+           large-scale nonlinear systems of equations." W. La Cruz,
+           J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
+    .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014).
+    .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009).
+
+    """
+    _check_unknown_options(unknown_options)
+
+    if line_search not in ('cheng', 'cruz'):
+        raise ValueError("Invalid value %r for 'line_search'" % (line_search,))
+
+    nexp = 2
+
+    if eta_strategy is None:
+        # Different choice from [1], as their eta is not invariant
+        # vs. scaling of F.
+        def eta_strategy(k, x, F):
+            # Obtain squared 2-norm of the initial residual from the outer scope
+            return f_0 / (1 + k)**2
+
+    if fnorm is None:
+        def fnorm(F):
+            # Obtain squared 2-norm of the current residual from the outer scope
+            return f_k**(1.0/nexp)
+
+    def fmerit(F):
+        return np.linalg.norm(F)**nexp
+
+    nfev = [0]
+    f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, nfev, maxfev, args)
+
+    k = 0
+    f_0 = f_k
+    sigma_k = sigma_0
+
+    F_0_norm = fnorm(F_k)
+
+    # For the 'cruz' line search
+    prev_fs = collections.deque([f_k], M)
+
+    # For the 'cheng' line search
+    Q = 1.0
+    C = f_0
+
+    converged = False
+    message = "too many function evaluations required"
+
+    while True:
+        F_k_norm = fnorm(F_k)
+
+        if disp:
+            print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k))
+
+        if callback is not None:
+            callback(x_k, F_k)
+
+        if F_k_norm < ftol * F_0_norm + fatol:
+            # Converged!
+            message = "successful convergence"
+            converged = True
+            break
+
+        # Control spectral parameter, from [2]
+        if abs(sigma_k) > 1/sigma_eps:
+            sigma_k = 1/sigma_eps * np.sign(sigma_k)
+        elif abs(sigma_k) < sigma_eps:
+            sigma_k = sigma_eps
+
+        # Line search direction
+        d = -sigma_k * F_k
+
+        # Nonmonotone line search
+        eta = eta_strategy(k, x_k, F_k)
+        try:
+            if line_search == 'cruz':
+                alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta=eta)
+            elif line_search == 'cheng':
+                alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta=eta)
+        except _NoConvergence:
+            break
+
+        # Update spectral parameter
+        s_k = xp - x_k
+        y_k = Fp - F_k
+        sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k)
+
+        # Take step
+        x_k = xp
+        F_k = Fp
+        f_k = fp
+
+        # Store function value
+        if line_search == 'cruz':
+            prev_fs.append(fp)
+
+        k += 1
+
+    x = _wrap_result(x_k, is_complex, shape=x_shape)
+    F = _wrap_result(F_k, is_complex)
+
+    result = OptimizeResult(x=x, success=converged,
+                            message=message,
+                            fun=F, nfev=nfev[0], nit=k)
+
+    return result
+
+
+def _wrap_func(func, x0, fmerit, nfev_list, maxfev, args=()):
+    """
+    Wrap a function and an initial value so that (i) complex values
+    are wrapped to reals, and (ii) value for a merit function
+    fmerit(x, f) is computed at the same time, (iii) iteration count
+    is maintained and an exception is raised if it is exceeded.
+
+    Parameters
+    ----------
+    func : callable
+        Function to wrap
+    x0 : ndarray
+        Initial value
+    fmerit : callable
+        Merit function fmerit(f) for computing merit value from residual.
+    nfev_list : list
+        List to store number of evaluations in. Should be [0] in the beginning.
+    maxfev : int
+        Maximum number of evaluations before _NoConvergence is raised.
+    args : tuple
+        Extra arguments to func
+
+    Returns
+    -------
+    wrap_func : callable
+        Wrapped function, to be called as
+        ``F, fp = wrap_func(x0)``
+    x0_wrap : ndarray of float
+        Wrapped initial value; raveled to 1-D and complex
+        values mapped to reals.
+    x0_shape : tuple
+        Shape of the initial value array
+    f : float
+        Merit function at F
+    F : ndarray of float
+        Residual at x0_wrap
+    is_complex : bool
+        Whether complex values were mapped to reals
+
+    """
+    x0 = np.asarray(x0)
+    x0_shape = x0.shape
+    F = np.asarray(func(x0, *args)).ravel()
+    is_complex = np.iscomplexobj(x0) or np.iscomplexobj(F)
+    x0 = x0.ravel()
+
+    nfev_list[0] = 1
+
+    if is_complex:
+        def wrap_func(x):
+            if nfev_list[0] >= maxfev:
+                raise _NoConvergence()
+            nfev_list[0] += 1
+            z = _real2complex(x).reshape(x0_shape)
+            v = np.asarray(func(z, *args)).ravel()
+            F = _complex2real(v)
+            f = fmerit(F)
+            return f, F
+
+        x0 = _complex2real(x0)
+        F = _complex2real(F)
+    else:
+        def wrap_func(x):
+            if nfev_list[0] >= maxfev:
+                raise _NoConvergence()
+            nfev_list[0] += 1
+            x = x.reshape(x0_shape)
+            F = np.asarray(func(x, *args)).ravel()
+            f = fmerit(F)
+            return f, F
+
+    return wrap_func, x0, x0_shape, fmerit(F), F, is_complex
+
+
+def _wrap_result(result, is_complex, shape=None):
+    """
+    Convert from real to complex and reshape result arrays.
+    """
+    if is_complex:
+        z = _real2complex(result)
+    else:
+        z = result
+    if shape is not None:
+        z = z.reshape(shape)
+    return z
+
+
+def _real2complex(x):
+    return np.ascontiguousarray(x, dtype=float).view(np.complex128)
+
+
+def _complex2real(z):
+    return np.ascontiguousarray(z, dtype=complex).view(np.float64)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_tnc.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_tnc.py
new file mode 100644
index 00000000..ef18b42c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_tnc.py
@@ -0,0 +1,441 @@
+# TNC Python interface
+# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
+
+# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org)
+
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""
+TNC: A Python interface to the TNC non-linear optimizer
+
+TNC is a non-linear optimizer. To use it, you must provide a function to
+minimize. The function must take one argument: the list of coordinates where to
+evaluate the function; and it must return either a tuple, whose first element is the
+value of the function, and whose second argument is the gradient of the function
+(as a list of values); or None, to abort the minimization.
+"""
+
+import warnings
+
+from scipy.optimize import _moduleTNC as moduleTNC
+from ._optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
+                       _prepare_scalar_function)
+from ._constraints import old_bound_to_new
+
+from numpy import inf, array, zeros, asfarray
+
+__all__ = ['fmin_tnc']
+
+
+MSG_NONE = 0  # No messages
+MSG_ITER = 1  # One line per iteration
+MSG_INFO = 2  # Informational messages
+MSG_VERS = 4  # Version info
+MSG_EXIT = 8  # Exit reasons
+MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
+
+MSGS = {
+        MSG_NONE: "No messages",
+        MSG_ITER: "One line per iteration",
+        MSG_INFO: "Informational messages",
+        MSG_VERS: "Version info",
+        MSG_EXIT: "Exit reasons",
+        MSG_ALL: "All messages"
+}
+
+INFEASIBLE = -1  # Infeasible (lower bound > upper bound)
+LOCALMINIMUM = 0  # Local minimum reached (|pg| ~= 0)
+FCONVERGED = 1  # Converged (|f_n-f_(n-1)| ~= 0)
+XCONVERGED = 2  # Converged (|x_n-x_(n-1)| ~= 0)
+MAXFUN = 3  # Max. number of function evaluations reached
+LSFAIL = 4  # Linear search failed
+CONSTANT = 5  # All lower bounds are equal to the upper bounds
+NOPROGRESS = 6  # Unable to progress
+USERABORT = 7  # User requested end of minimization
+
+RCSTRINGS = {
+        INFEASIBLE: "Infeasible (lower bound > upper bound)",
+        LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
+        FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
+        XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
+        MAXFUN: "Max. number of function evaluations reached",
+        LSFAIL: "Linear search failed",
+        CONSTANT: "All lower bounds are equal to the upper bounds",
+        NOPROGRESS: "Unable to progress",
+        USERABORT: "User requested end of minimization"
+}
+
+# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
+#  SciPy
+
+
+def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
+             bounds=None, epsilon=1e-8, scale=None, offset=None,
+             messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
+             stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
+             rescale=-1, disp=None, callback=None):
+    """
+    Minimize a function with variables subject to bounds, using
+    gradient information in a truncated Newton algorithm. This
+    method wraps a C implementation of the algorithm.
+
+    Parameters
+    ----------
+    func : callable ``func(x, *args)``
+        Function to minimize.  Must do one of:
+
+        1. Return f and g, where f is the value of the function and g its
+           gradient (a list of floats).
+
+        2. Return the function value but supply gradient function
+           separately as `fprime`.
+
+        3. Return the function value and set ``approx_grad=True``.
+
+        If the function returns None, the minimization
+        is aborted.
+    x0 : array_like
+        Initial estimate of minimum.
+    fprime : callable ``fprime(x, *args)``, optional
+        Gradient of `func`. If None, then either `func` must return the
+        function value and the gradient (``f,g = func(x, *args)``)
+        or `approx_grad` must be True.
+    args : tuple, optional
+        Arguments to pass to function.
+    approx_grad : bool, optional
+        If true, approximate the gradient numerically.
+    bounds : list, optional
+        (min, max) pairs for each element in x0, defining the
+        bounds on that parameter. Use None or +/-inf for one of
+        min or max when there is no bound in that direction.
+    epsilon : float, optional
+        Used if approx_grad is True. The stepsize in a finite
+        difference approximation for fprime.
+    scale : array_like, optional
+        Scaling factors to apply to each variable. If None, the
+        factors are up-low for interval bounded variables and
+        1+|x| for the others. Defaults to None.
+    offset : array_like, optional
+        Value to subtract from each variable. If None, the
+        offsets are (up+low)/2 for interval bounded variables
+        and x for the others.
+    messages : int, optional
+        Bit mask used to select messages display during
+        minimization values defined in the MSGS dict. Defaults to
+        MGS_ALL.
+    disp : int, optional
+        Integer interface to messages. 0 = no message, 5 = all messages
+    maxCGit : int, optional
+        Maximum number of hessian*vector evaluations per main
+        iteration. If maxCGit == 0, the direction chosen is
+        -gradient if maxCGit < 0, maxCGit is set to
+        max(1,min(50,n/2)). Defaults to -1.
+    maxfun : int, optional
+        Maximum number of function evaluation. If None, maxfun is
+        set to max(100, 10*len(x0)). Defaults to None. Note that this function
+        may violate the limit because of evaluating gradients by numerical
+        differentiation.
+    eta : float, optional
+        Severity of the line search. If < 0 or > 1, set to 0.25.
+        Defaults to -1.
+    stepmx : float, optional
+        Maximum step for the line search. May be increased during
+        call. If too small, it will be set to 10.0. Defaults to 0.
+    accuracy : float, optional
+        Relative precision for finite difference calculations. If
+        <= machine_precision, set to sqrt(machine_precision).
+        Defaults to 0.
+    fmin : float, optional
+        Minimum function value estimate. Defaults to 0.
+    ftol : float, optional
+        Precision goal for the value of f in the stopping criterion.
+        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
+    xtol : float, optional
+        Precision goal for the value of x in the stopping
+        criterion (after applying x scaling factors). If xtol <
+        0.0, xtol is set to sqrt(machine_precision). Defaults to
+        -1.
+    pgtol : float, optional
+        Precision goal for the value of the projected gradient in
+        the stopping criterion (after applying x scaling factors).
+        If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
+        Setting it to 0.0 is not recommended. Defaults to -1.
+    rescale : float, optional
+        Scaling factor (in log10) used to trigger f value
+        rescaling. If 0, rescale at each iteration. If a large
+        value, never rescale. If < 0, rescale is set to 1.3.
+    callback : callable, optional
+        Called after each iteration, as callback(xk), where xk is the
+        current parameter vector.
+
+    Returns
+    -------
+    x : ndarray
+        The solution.
+    nfeval : int
+        The number of function evaluations.
+    rc : int
+        Return code, see below
+
+    See also
+    --------
+    minimize: Interface to minimization algorithms for multivariate
+        functions. See the 'TNC' `method` in particular.
+
+    Notes
+    -----
+    The underlying algorithm is truncated Newton, also called
+    Newton Conjugate-Gradient. This method differs from
+    scipy.optimize.fmin_ncg in that
+
+    1. it wraps a C implementation of the algorithm
+    2. it allows each variable to be given an upper and lower bound.
+
+    The algorithm incorporates the bound constraints by determining
+    the descent direction as in an unconstrained truncated Newton,
+    but never taking a step-size large enough to leave the space
+    of feasible x's. The algorithm keeps track of a set of
+    currently active constraints, and ignores them when computing
+    the minimum allowable step size. (The x's associated with the
+    active constraint are kept fixed.) If the maximum allowable
+    step size is zero then a new constraint is added. At the end
+    of each iteration one of the constraints may be deemed no
+    longer active and removed. A constraint is considered
+    no longer active is if it is currently active
+    but the gradient for that variable points inward from the
+    constraint. The specific constraint removed is the one
+    associated with the variable of largest index whose
+    constraint is no longer active.
+
+    Return codes are defined as follows::
+
+        -1 : Infeasible (lower bound > upper bound)
+         0 : Local minimum reached (|pg| ~= 0)
+         1 : Converged (|f_n-f_(n-1)| ~= 0)
+         2 : Converged (|x_n-x_(n-1)| ~= 0)
+         3 : Max. number of function evaluations reached
+         4 : Linear search failed
+         5 : All lower bounds are equal to the upper bounds
+         6 : Unable to progress
+         7 : User requested end of minimization
+
+    References
+    ----------
+    Wright S., Nocedal J. (2006), 'Numerical Optimization'
+
+    Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
+    SIAM Journal of Numerical Analysis 21, pp. 770-778
+
+    """
+    # handle fprime/approx_grad
+    if approx_grad:
+        fun = func
+        jac = None
+    elif fprime is None:
+        fun = MemoizeJac(func)
+        jac = fun.derivative
+    else:
+        fun = func
+        jac = fprime
+
+    if disp is not None:  # disp takes precedence over messages
+        mesg_num = disp
+    else:
+        mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
+                    4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
+    # build options
+    opts = {'eps': epsilon,
+            'scale': scale,
+            'offset': offset,
+            'mesg_num': mesg_num,
+            'maxCGit': maxCGit,
+            'maxfun': maxfun,
+            'eta': eta,
+            'stepmx': stepmx,
+            'accuracy': accuracy,
+            'minfev': fmin,
+            'ftol': ftol,
+            'xtol': xtol,
+            'gtol': pgtol,
+            'rescale': rescale,
+            'disp': False}
+
+    res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
+
+    return res['x'], res['nfev'], res['status']
+
+
+def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
+                  eps=1e-8, scale=None, offset=None, mesg_num=None,
+                  maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
+                  minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
+                  callback=None, finite_diff_rel_step=None, maxfun=None,
+                  **unknown_options):
+    """
+    Minimize a scalar function of one or more variables using a truncated
+    Newton (TNC) algorithm.
+
+    Options
+    -------
+    eps : float or ndarray
+        If `jac is None` the absolute step size used for numerical
+        approximation of the jacobian via forward differences.
+    scale : list of floats
+        Scaling factors to apply to each variable. If None, the
+        factors are up-low for interval bounded variables and
+        1+|x] fo the others. Defaults to None.
+    offset : float
+        Value to subtract from each variable. If None, the
+        offsets are (up+low)/2 for interval bounded variables
+        and x for the others.
+    disp : bool
+       Set to True to print convergence messages.
+    maxCGit : int
+        Maximum number of hessian*vector evaluations per main
+        iteration. If maxCGit == 0, the direction chosen is
+        -gradient if maxCGit < 0, maxCGit is set to
+        max(1,min(50,n/2)). Defaults to -1.
+    maxiter : int, optional
+        Maximum number of function evaluations. If `maxfun` is also provided
+        then `maxiter` is ignored.
+        Default is None.
+
+        .. deprecated :: 1.9.0
+            `maxiter` is deprecated in favor of `maxfun` and will removed in
+            SciPy 1.11.0.
+    eta : float
+        Severity of the line search. If < 0 or > 1, set to 0.25.
+        Defaults to -1.
+    stepmx : float
+        Maximum step for the line search. May be increased during
+        call. If too small, it will be set to 10.0. Defaults to 0.
+    accuracy : float
+        Relative precision for finite difference calculations. If
+        <= machine_precision, set to sqrt(machine_precision).
+        Defaults to 0.
+    minfev : float
+        Minimum function value estimate. Defaults to 0.
+    ftol : float
+        Precision goal for the value of f in the stopping criterion.
+        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
+    xtol : float
+        Precision goal for the value of x in the stopping
+        criterion (after applying x scaling factors). If xtol <
+        0.0, xtol is set to sqrt(machine_precision). Defaults to
+        -1.
+    gtol : float
+        Precision goal for the value of the projected gradient in
+        the stopping criterion (after applying x scaling factors).
+        If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
+        Setting it to 0.0 is not recommended. Defaults to -1.
+    rescale : float
+        Scaling factor (in log10) used to trigger f value
+        rescaling.  If 0, rescale at each iteration.  If a large
+        value, never rescale.  If < 0, rescale is set to 1.3.
+    finite_diff_rel_step : None or array_like, optional
+        If `jac in ['2-point', '3-point', 'cs']` the relative step size to
+        use for numerical approximation of the jacobian. The absolute step
+        size is computed as ``h = rel_step * sign(x) * max(1, abs(x))``,
+        possibly adjusted to fit into the bounds. For ``method='3-point'``
+        the sign of `h` is ignored. If None (default) then step is selected
+        automatically.
+    maxfun : int
+        Maximum number of function evaluations. If None, `maxfun` is
+        set to max(100, 10*len(x0)). Defaults to None.
+    """
+    _check_unknown_options(unknown_options)
+    fmin = minfev
+    pgtol = gtol
+
+    x0 = asfarray(x0).flatten()
+    n = len(x0)
+
+    if bounds is None:
+        bounds = [(None,None)] * n
+    if len(bounds) != n:
+        raise ValueError('length of x0 != length of bounds')
+    new_bounds = old_bound_to_new(bounds)
+
+    if mesg_num is not None:
+        messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
+                    4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
+    elif disp:
+        messages = MSG_ALL
+    else:
+        messages = MSG_NONE
+
+    sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
+                                  finite_diff_rel_step=finite_diff_rel_step,
+                                  bounds=new_bounds)
+    func_and_grad = sf.fun_and_grad
+
+    """
+    low, up   : the bounds (lists of floats)
+                if low is None, the lower bounds are removed.
+                if up is None, the upper bounds are removed.
+                low and up defaults to None
+    """
+    low = zeros(n)
+    up = zeros(n)
+    for i in range(n):
+        if bounds[i] is None:
+            l, u = -inf, inf
+        else:
+            l,u = bounds[i]
+            if l is None:
+                low[i] = -inf
+            else:
+                low[i] = l
+            if u is None:
+                up[i] = inf
+            else:
+                up[i] = u
+
+    if scale is None:
+        scale = array([])
+
+    if offset is None:
+        offset = array([])
+
+    if maxfun is None:
+        if maxiter is not None:
+            warnings.warn(
+                "'maxiter' has been deprecated in favor of 'maxfun'"
+                " and will be removed in SciPy 1.11.0.",
+                DeprecationWarning, stacklevel=3
+            )
+            maxfun = maxiter
+        else:
+            maxfun = max(100, 10*len(x0))
+
+    rc, nf, nit, x, funv, jacv = moduleTNC.tnc_minimize(
+        func_and_grad, x0, low, up, scale,
+        offset, messages, maxCGit, maxfun,
+        eta, stepmx, accuracy, fmin, ftol,
+        xtol, pgtol, rescale, callback
+    )
+    # the TNC documentation states: "On output, x, f and g may be very
+    # slightly out of sync because of scaling". Therefore re-evaluate
+    # func_and_grad so they are synced.
+    funv, jacv = func_and_grad(x)
+
+    return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
+                          nit=nit, status=rc, message=RCSTRINGS[rc],
+                          success=(-1 < rc < 3))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trlib/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trlib/__init__.py
new file mode 100644
index 00000000..537b73b3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trlib/__init__.py
@@ -0,0 +1,12 @@
+from ._trlib import TRLIBQuadraticSubproblem
+
+__all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem']
+
+
+def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False):
+    def subproblem_factory(x, fun, jac, hess, hessp):
+        return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp,
+                                        tol_rel_i=tol_rel_i,
+                                        tol_rel_b=tol_rel_b,
+                                        disp=disp)
+    return subproblem_factory
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion.py
new file mode 100644
index 00000000..b54428f4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion.py
@@ -0,0 +1,301 @@
+"""Trust-region optimization."""
+import math
+import warnings
+
+import numpy as np
+import scipy.linalg
+from ._optimize import (_check_unknown_options, _status_message,
+                       OptimizeResult, _prepare_scalar_function)
+from scipy.optimize._hessian_update_strategy import HessianUpdateStrategy
+from scipy.optimize._differentiable_functions import FD_METHODS
+__all__ = []
+
+
+def _wrap_function(function, args):
+    # wraps a minimizer function to count number of evaluations
+    # and to easily provide an args kwd.
+    ncalls = [0]
+    if function is None:
+        return ncalls, None
+
+    def function_wrapper(x, *wrapper_args):
+        ncalls[0] += 1
+        # A copy of x is sent to the user function (gh13740)
+        return function(np.copy(x), *(wrapper_args + args))
+
+    return ncalls, function_wrapper
+
+
+class BaseQuadraticSubproblem:
+    """
+    Base/abstract class defining the quadratic model for trust-region
+    minimization. Child classes must implement the ``solve`` method.
+
+    Values of the objective function, Jacobian and Hessian (if provided) at
+    the current iterate ``x`` are evaluated on demand and then stored as
+    attributes ``fun``, ``jac``, ``hess``.
+    """
+
+    def __init__(self, x, fun, jac, hess=None, hessp=None):
+        self._x = x
+        self._f = None
+        self._g = None
+        self._h = None
+        self._g_mag = None
+        self._cauchy_point = None
+        self._newton_point = None
+        self._fun = fun
+        self._jac = jac
+        self._hess = hess
+        self._hessp = hessp
+
+    def __call__(self, p):
+        return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p))
+
+    @property
+    def fun(self):
+        """Value of objective function at current iteration."""
+        if self._f is None:
+            self._f = self._fun(self._x)
+        return self._f
+
+    @property
+    def jac(self):
+        """Value of Jacobian of objective function at current iteration."""
+        if self._g is None:
+            self._g = self._jac(self._x)
+        return self._g
+
+    @property
+    def hess(self):
+        """Value of Hessian of objective function at current iteration."""
+        if self._h is None:
+            self._h = self._hess(self._x)
+        return self._h
+
+    def hessp(self, p):
+        if self._hessp is not None:
+            return self._hessp(self._x, p)
+        else:
+            return np.dot(self.hess, p)
+
+    @property
+    def jac_mag(self):
+        """Magnitude of jacobian of objective function at current iteration."""
+        if self._g_mag is None:
+            self._g_mag = scipy.linalg.norm(self.jac)
+        return self._g_mag
+
+    def get_boundaries_intersections(self, z, d, trust_radius):
+        """
+        Solve the scalar quadratic equation ||z + t d|| == trust_radius.
+        This is like a line-sphere intersection.
+        Return the two values of t, sorted from low to high.
+        """
+        a = np.dot(d, d)
+        b = 2 * np.dot(z, d)
+        c = np.dot(z, z) - trust_radius**2
+        sqrt_discriminant = math.sqrt(b*b - 4*a*c)
+
+        # The following calculation is mathematically
+        # equivalent to:
+        # ta = (-b - sqrt_discriminant) / (2*a)
+        # tb = (-b + sqrt_discriminant) / (2*a)
+        # but produce smaller round off errors.
+        # Look at Matrix Computation p.97
+        # for a better justification.
+        aux = b + math.copysign(sqrt_discriminant, b)
+        ta = -aux / (2*a)
+        tb = -2*c / aux
+        return sorted([ta, tb])
+
+    def solve(self, trust_radius):
+        raise NotImplementedError('The solve method should be implemented by '
+                                  'the child class')
+
+
+def _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None,
+                           subproblem=None, initial_trust_radius=1.0,
+                           max_trust_radius=1000.0, eta=0.15, gtol=1e-4,
+                           maxiter=None, disp=False, return_all=False,
+                           callback=None, inexact=True, **unknown_options):
+    """
+    Minimization of scalar function of one or more variables using a
+    trust-region algorithm.
+
+    Options for the trust-region algorithm are:
+        initial_trust_radius : float
+            Initial trust radius.
+        max_trust_radius : float
+            Never propose steps that are longer than this value.
+        eta : float
+            Trust region related acceptance stringency for proposed steps.
+        gtol : float
+            Gradient norm must be less than `gtol`
+            before successful termination.
+        maxiter : int
+            Maximum number of iterations to perform.
+        disp : bool
+            If True, print convergence message.
+        inexact : bool
+            Accuracy to solve subproblems. If True requires less nonlinear
+            iterations, but more vector products. Only effective for method
+            trust-krylov.
+
+    This function is called by the `minimize` function.
+    It is not supposed to be called directly.
+    """
+    _check_unknown_options(unknown_options)
+
+    if jac is None:
+        raise ValueError('Jacobian is currently required for trust-region '
+                         'methods')
+    if hess is None and hessp is None:
+        raise ValueError('Either the Hessian or the Hessian-vector product '
+                         'is currently required for trust-region methods')
+    if subproblem is None:
+        raise ValueError('A subproblem solving strategy is required for '
+                         'trust-region methods')
+    if not (0 <= eta < 0.25):
+        raise Exception('invalid acceptance stringency')
+    if max_trust_radius <= 0:
+        raise Exception('the max trust radius must be positive')
+    if initial_trust_radius <= 0:
+        raise ValueError('the initial trust radius must be positive')
+    if initial_trust_radius >= max_trust_radius:
+        raise ValueError('the initial trust radius must be less than the '
+                         'max trust radius')
+
+    # force the initial guess into a nice format
+    x0 = np.asarray(x0).flatten()
+
+    # A ScalarFunction representing the problem. This caches calls to fun, jac,
+    # hess.
+    sf = _prepare_scalar_function(fun, x0, jac=jac, hess=hess, args=args)
+    fun = sf.fun
+    jac = sf.grad
+    if callable(hess):
+        hess = sf.hess
+    elif callable(hessp):
+        # this elif statement must come before examining whether hess
+        # is estimated by FD methods or a HessianUpdateStrategy
+        pass
+    elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)):
+        # If the Hessian is being estimated by finite differences or a
+        # Hessian update strategy then ScalarFunction.hess returns a
+        # LinearOperator or a HessianUpdateStrategy. This enables the
+        # calculation/creation of a hessp. BUT you only want to do this
+        # if the user *hasn't* provided a callable(hessp) function.
+        hess = None
+
+        def hessp(x, p, *args):
+            return sf.hess(x).dot(p)
+    else:
+        raise ValueError('Either the Hessian or the Hessian-vector product '
+                         'is currently required for trust-region methods')
+
+    # ScalarFunction doesn't represent hessp
+    nhessp, hessp = _wrap_function(hessp, args)
+
+    # limit the number of iterations
+    if maxiter is None:
+        maxiter = len(x0)*200
+
+    # init the search status
+    warnflag = 0
+
+    # initialize the search
+    trust_radius = initial_trust_radius
+    x = x0
+    if return_all:
+        allvecs = [x]
+    m = subproblem(x, fun, jac, hess, hessp)
+    k = 0
+
+    # search for the function min
+    # do not even start if the gradient is small enough
+    while m.jac_mag >= gtol:
+
+        # Solve the sub-problem.
+        # This gives us the proposed step relative to the current position
+        # and it tells us whether the proposed step
+        # has reached the trust region boundary or not.
+        try:
+            p, hits_boundary = m.solve(trust_radius)
+        except np.linalg.LinAlgError:
+            warnflag = 3
+            break
+
+        # calculate the predicted value at the proposed point
+        predicted_value = m(p)
+
+        # define the local approximation at the proposed point
+        x_proposed = x + p
+        m_proposed = subproblem(x_proposed, fun, jac, hess, hessp)
+
+        # evaluate the ratio defined in equation (4.4)
+        actual_reduction = m.fun - m_proposed.fun
+        predicted_reduction = m.fun - predicted_value
+        if predicted_reduction <= 0:
+            warnflag = 2
+            break
+        rho = actual_reduction / predicted_reduction
+
+        # update the trust radius according to the actual/predicted ratio
+        if rho < 0.25:
+            trust_radius *= 0.25
+        elif rho > 0.75 and hits_boundary:
+            trust_radius = min(2*trust_radius, max_trust_radius)
+
+        # if the ratio is high enough then accept the proposed step
+        if rho > eta:
+            x = x_proposed
+            m = m_proposed
+
+        # append the best guess, call back, increment the iteration count
+        if return_all:
+            allvecs.append(np.copy(x))
+        if callback is not None:
+            callback(np.copy(x))
+        k += 1
+
+        # check if the gradient is small enough to stop
+        if m.jac_mag < gtol:
+            warnflag = 0
+            break
+
+        # check if we have looked at enough iterations
+        if k >= maxiter:
+            warnflag = 1
+            break
+
+    # print some stuff if requested
+    status_messages = (
+            _status_message['success'],
+            _status_message['maxiter'],
+            'A bad approximation caused failure to predict improvement.',
+            'A linalg error occurred, such as a non-psd Hessian.',
+            )
+    if disp:
+        if warnflag == 0:
+            print(status_messages[warnflag])
+        else:
+            warnings.warn(status_messages[warnflag], RuntimeWarning, 3)
+        print("         Current function value: %f" % m.fun)
+        print("         Iterations: %d" % k)
+        print("         Function evaluations: %d" % sf.nfev)
+        print("         Gradient evaluations: %d" % sf.ngev)
+        print("         Hessian evaluations: %d" % (sf.nhev + nhessp[0]))
+
+    result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag,
+                            fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev,
+                            nhev=sf.nhev + nhessp[0], nit=k,
+                            message=status_messages[warnflag])
+
+    if hess is not None:
+        result['hess'] = m.hess
+
+    if return_all:
+        result['allvecs'] = allvecs
+
+    return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/__init__.py
new file mode 100644
index 00000000..549cfb97
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/__init__.py
@@ -0,0 +1,6 @@
+"""This module contains the equality constrained SQP solver."""
+
+
+from .minimize_trustregion_constr import _minimize_trustregion_constr
+
+__all__ = ['_minimize_trustregion_constr']
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/canonical_constraint.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/canonical_constraint.py
new file mode 100644
index 00000000..e1ad583b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/canonical_constraint.py
@@ -0,0 +1,390 @@
+import numpy as np
+import scipy.sparse as sps
+
+
+class CanonicalConstraint:
+    """Canonical constraint to use with trust-constr algorithm.
+
+    It represents the set of constraints of the form::
+
+        f_eq(x) = 0
+        f_ineq(x) <= 0
+
+    where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see
+    below.
+
+    The class is supposed to be instantiated by factory methods, which
+    should prepare the parameters listed below.
+
+    Parameters
+    ----------
+    n_eq, n_ineq : int
+        Number of equality and inequality constraints respectively.
+    fun : callable
+        Function defining the constraints. The signature is
+        ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq`
+        components and ``c_ineq`` is ndarray with `n_ineq` components.
+    jac : callable
+        Function to evaluate the Jacobian of the constraint. The signature
+        is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are
+        either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n),
+        respectively.
+    hess : callable
+        Function to evaluate the Hessian of the constraints multiplied
+        by Lagrange multipliers, that is
+        ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is
+        ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied
+        shape (n, n) and provide a matrix-vector product operation
+        ``H.dot(p)``.
+    keep_feasible : ndarray, shape (n_ineq,)
+        Mask indicating which inequality constraints should be kept feasible.
+    """
+    def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible):
+        self.n_eq = n_eq
+        self.n_ineq = n_ineq
+        self.fun = fun
+        self.jac = jac
+        self.hess = hess
+        self.keep_feasible = keep_feasible
+
+    @classmethod
+    def from_PreparedConstraint(cls, constraint):
+        """Create an instance from `PreparedConstrained` object."""
+        lb, ub = constraint.bounds
+        cfun = constraint.fun
+        keep_feasible = constraint.keep_feasible
+
+        if np.all(lb == -np.inf) and np.all(ub == np.inf):
+            return cls.empty(cfun.n)
+
+        if np.all(lb == -np.inf) and np.all(ub == np.inf):
+            return cls.empty(cfun.n)
+        elif np.all(lb == ub):
+            return cls._equal_to_canonical(cfun, lb)
+        elif np.all(lb == -np.inf):
+            return cls._less_to_canonical(cfun, ub, keep_feasible)
+        elif np.all(ub == np.inf):
+            return cls._greater_to_canonical(cfun, lb, keep_feasible)
+        else:
+            return cls._interval_to_canonical(cfun, lb, ub, keep_feasible)
+
+    @classmethod
+    def empty(cls, n):
+        """Create an "empty" instance.
+
+        This "empty" instance is required to allow working with unconstrained
+        problems as if they have some constraints.
+        """
+        empty_fun = np.empty(0)
+        empty_jac = np.empty((0, n))
+        empty_hess = sps.csr_matrix((n, n))
+
+        def fun(x):
+            return empty_fun, empty_fun
+
+        def jac(x):
+            return empty_jac, empty_jac
+
+        def hess(x, v_eq, v_ineq):
+            return empty_hess
+
+        return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_))
+
+    @classmethod
+    def concatenate(cls, canonical_constraints, sparse_jacobian):
+        """Concatenate multiple `CanonicalConstraint` into one.
+
+        `sparse_jacobian` (bool) determines the Jacobian format of the
+        concatenated constraint. Note that items in `canonical_constraints`
+        must have their Jacobians in the same format.
+        """
+        def fun(x):
+            if canonical_constraints:
+                eq_all, ineq_all = zip(
+                        *[c.fun(x) for c in canonical_constraints])
+            else:
+                eq_all, ineq_all = [], []
+
+            return np.hstack(eq_all), np.hstack(ineq_all)
+
+        if sparse_jacobian:
+            vstack = sps.vstack
+        else:
+            vstack = np.vstack
+
+        def jac(x):
+            if canonical_constraints:
+                eq_all, ineq_all = zip(
+                        *[c.jac(x) for c in canonical_constraints])
+            else:
+                eq_all, ineq_all = [], []
+
+            return vstack(eq_all), vstack(ineq_all)
+
+        def hess(x, v_eq, v_ineq):
+            hess_all = []
+            index_eq = 0
+            index_ineq = 0
+            for c in canonical_constraints:
+                vc_eq = v_eq[index_eq:index_eq + c.n_eq]
+                vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq]
+                hess_all.append(c.hess(x, vc_eq, vc_ineq))
+                index_eq += c.n_eq
+                index_ineq += c.n_ineq
+
+            def matvec(p):
+                result = np.zeros_like(p)
+                for h in hess_all:
+                    result += h.dot(p)
+                return result
+
+            n = x.shape[0]
+            return sps.linalg.LinearOperator((n, n), matvec, dtype=float)
+
+        n_eq = sum(c.n_eq for c in canonical_constraints)
+        n_ineq = sum(c.n_ineq for c in canonical_constraints)
+        keep_feasible = np.hstack([c.keep_feasible for c in
+                                   canonical_constraints])
+
+        return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
+
+    @classmethod
+    def _equal_to_canonical(cls, cfun, value):
+        empty_fun = np.empty(0)
+        n = cfun.n
+
+        n_eq = value.shape[0]
+        n_ineq = 0
+        keep_feasible = np.empty(0, dtype=bool)
+
+        if cfun.sparse_jacobian:
+            empty_jac = sps.csr_matrix((0, n))
+        else:
+            empty_jac = np.empty((0, n))
+
+        def fun(x):
+            return cfun.fun(x) - value, empty_fun
+
+        def jac(x):
+            return cfun.jac(x), empty_jac
+
+        def hess(x, v_eq, v_ineq):
+            return cfun.hess(x, v_eq)
+
+        empty_fun = np.empty(0)
+        n = cfun.n
+        if cfun.sparse_jacobian:
+            empty_jac = sps.csr_matrix((0, n))
+        else:
+            empty_jac = np.empty((0, n))
+
+        return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
+
+    @classmethod
+    def _less_to_canonical(cls, cfun, ub, keep_feasible):
+        empty_fun = np.empty(0)
+        n = cfun.n
+        if cfun.sparse_jacobian:
+            empty_jac = sps.csr_matrix((0, n))
+        else:
+            empty_jac = np.empty((0, n))
+
+        finite_ub = ub < np.inf
+        n_eq = 0
+        n_ineq = np.sum(finite_ub)
+
+        if np.all(finite_ub):
+            def fun(x):
+                return empty_fun, cfun.fun(x) - ub
+
+            def jac(x):
+                return empty_jac, cfun.jac(x)
+
+            def hess(x, v_eq, v_ineq):
+                return cfun.hess(x, v_ineq)
+        else:
+            finite_ub = np.nonzero(finite_ub)[0]
+            keep_feasible = keep_feasible[finite_ub]
+            ub = ub[finite_ub]
+
+            def fun(x):
+                return empty_fun, cfun.fun(x)[finite_ub] - ub
+
+            def jac(x):
+                return empty_jac, cfun.jac(x)[finite_ub]
+
+            def hess(x, v_eq, v_ineq):
+                v = np.zeros(cfun.m)
+                v[finite_ub] = v_ineq
+                return cfun.hess(x, v)
+
+        return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
+
+    @classmethod
+    def _greater_to_canonical(cls, cfun, lb, keep_feasible):
+        empty_fun = np.empty(0)
+        n = cfun.n
+        if cfun.sparse_jacobian:
+            empty_jac = sps.csr_matrix((0, n))
+        else:
+            empty_jac = np.empty((0, n))
+
+        finite_lb = lb > -np.inf
+        n_eq = 0
+        n_ineq = np.sum(finite_lb)
+
+        if np.all(finite_lb):
+            def fun(x):
+                return empty_fun, lb - cfun.fun(x)
+
+            def jac(x):
+                return empty_jac, -cfun.jac(x)
+
+            def hess(x, v_eq, v_ineq):
+                return cfun.hess(x, -v_ineq)
+        else:
+            finite_lb = np.nonzero(finite_lb)[0]
+            keep_feasible = keep_feasible[finite_lb]
+            lb = lb[finite_lb]
+
+            def fun(x):
+                return empty_fun, lb - cfun.fun(x)[finite_lb]
+
+            def jac(x):
+                return empty_jac, -cfun.jac(x)[finite_lb]
+
+            def hess(x, v_eq, v_ineq):
+                v = np.zeros(cfun.m)
+                v[finite_lb] = -v_ineq
+                return cfun.hess(x, v)
+
+        return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
+
+    @classmethod
+    def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible):
+        lb_inf = lb == -np.inf
+        ub_inf = ub == np.inf
+        equal = lb == ub
+        less = lb_inf & ~ub_inf
+        greater = ub_inf & ~lb_inf
+        interval = ~equal & ~lb_inf & ~ub_inf
+
+        equal = np.nonzero(equal)[0]
+        less = np.nonzero(less)[0]
+        greater = np.nonzero(greater)[0]
+        interval = np.nonzero(interval)[0]
+        n_less = less.shape[0]
+        n_greater = greater.shape[0]
+        n_interval = interval.shape[0]
+        n_ineq = n_less + n_greater + 2 * n_interval
+        n_eq = equal.shape[0]
+
+        keep_feasible = np.hstack((keep_feasible[less],
+                                   keep_feasible[greater],
+                                   keep_feasible[interval],
+                                   keep_feasible[interval]))
+
+        def fun(x):
+            f = cfun.fun(x)
+            eq = f[equal] - lb[equal]
+            le = f[less] - ub[less]
+            ge = lb[greater] - f[greater]
+            il = f[interval] - ub[interval]
+            ig = lb[interval] - f[interval]
+            return eq, np.hstack((le, ge, il, ig))
+
+        def jac(x):
+            J = cfun.jac(x)
+            eq = J[equal]
+            le = J[less]
+            ge = -J[greater]
+            il = J[interval]
+            ig = -il
+            if sps.issparse(J):
+                ineq = sps.vstack((le, ge, il, ig))
+            else:
+                ineq = np.vstack((le, ge, il, ig))
+            return eq, ineq
+
+        def hess(x, v_eq, v_ineq):
+            n_start = 0
+            v_l = v_ineq[n_start:n_start + n_less]
+            n_start += n_less
+            v_g = v_ineq[n_start:n_start + n_greater]
+            n_start += n_greater
+            v_il = v_ineq[n_start:n_start + n_interval]
+            n_start += n_interval
+            v_ig = v_ineq[n_start:n_start + n_interval]
+
+            v = np.zeros_like(lb)
+            v[equal] = v_eq
+            v[less] = v_l
+            v[greater] = -v_g
+            v[interval] = v_il - v_ig
+
+            return cfun.hess(x, v)
+
+        return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
+
+
+def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian):
+    """Convert initial values of the constraints to the canonical format.
+
+    The purpose to avoid one additional call to the constraints at the initial
+    point. It takes saved values in `PreparedConstraint`, modififies and
+    concatenates them to the canonical constraint format.
+    """
+    c_eq = []
+    c_ineq = []
+    J_eq = []
+    J_ineq = []
+
+    for c in prepared_constraints:
+        f = c.fun.f
+        J = c.fun.J
+        lb, ub = c.bounds
+        if np.all(lb == ub):
+            c_eq.append(f - lb)
+            J_eq.append(J)
+        elif np.all(lb == -np.inf):
+            finite_ub = ub < np.inf
+            c_ineq.append(f[finite_ub] - ub[finite_ub])
+            J_ineq.append(J[finite_ub])
+        elif np.all(ub == np.inf):
+            finite_lb = lb > -np.inf
+            c_ineq.append(lb[finite_lb] - f[finite_lb])
+            J_ineq.append(-J[finite_lb])
+        else:
+            lb_inf = lb == -np.inf
+            ub_inf = ub == np.inf
+            equal = lb == ub
+            less = lb_inf & ~ub_inf
+            greater = ub_inf & ~lb_inf
+            interval = ~equal & ~lb_inf & ~ub_inf
+
+            c_eq.append(f[equal] - lb[equal])
+            c_ineq.append(f[less] - ub[less])
+            c_ineq.append(lb[greater] - f[greater])
+            c_ineq.append(f[interval] - ub[interval])
+            c_ineq.append(lb[interval] - f[interval])
+
+            J_eq.append(J[equal])
+            J_ineq.append(J[less])
+            J_ineq.append(-J[greater])
+            J_ineq.append(J[interval])
+            J_ineq.append(-J[interval])
+
+    c_eq = np.hstack(c_eq) if c_eq else np.empty(0)
+    c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0)
+
+    if sparse_jacobian:
+        vstack = sps.vstack
+        empty = sps.csr_matrix((0, n))
+    else:
+        vstack = np.vstack
+        empty = np.empty((0, n))
+
+    J_eq = vstack(J_eq) if J_eq else empty
+    J_ineq = vstack(J_ineq) if J_ineq else empty
+
+    return c_eq, c_ineq, J_eq, J_ineq
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py
new file mode 100644
index 00000000..d50e1e79
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py
@@ -0,0 +1,217 @@
+"""Byrd-Omojokun Trust-Region SQP method."""
+
+from scipy.sparse import eye as speye
+from .projections import projections
+from .qp_subproblem import modified_dogleg, projected_cg, box_intersections
+import numpy as np
+from numpy.linalg import norm
+
+__all__ = ['equality_constrained_sqp']
+
+
+def default_scaling(x):
+    n, = np.shape(x)
+    return speye(n)
+
+
+def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess,
+                             x0, fun0, grad0, constr0,
+                             jac0, stop_criteria,
+                             state,
+                             initial_penalty,
+                             initial_trust_radius,
+                             factorization_method,
+                             trust_lb=None,
+                             trust_ub=None,
+                             scaling=default_scaling):
+    """Solve nonlinear equality-constrained problem using trust-region SQP.
+
+    Solve optimization problem:
+
+        minimize fun(x)
+        subject to: constr(x) = 0
+
+    using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several
+    implementation details are based on [2]_ and [3]_, p. 549.
+
+    References
+    ----------
+    .. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the
+           implementation of an algorithm for large-scale equality
+           constrained optimization." SIAM Journal on
+           Optimization 8.3 (1998): 682-706.
+    .. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
+           "An interior point algorithm for large-scale nonlinear
+           programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
+    .. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
+           Second Edition (2006).
+    """
+    PENALTY_FACTOR = 0.3  # Rho from formula (3.51), reference [2]_, p.891.
+    LARGE_REDUCTION_RATIO = 0.9
+    INTERMEDIARY_REDUCTION_RATIO = 0.3
+    SUFFICIENT_REDUCTION_RATIO = 1e-8  # Eta from reference [2]_, p.892.
+    TRUST_ENLARGEMENT_FACTOR_L = 7.0
+    TRUST_ENLARGEMENT_FACTOR_S = 2.0
+    MAX_TRUST_REDUCTION = 0.5
+    MIN_TRUST_REDUCTION = 0.1
+    SOC_THRESHOLD = 0.1
+    TR_FACTOR = 0.8  # Zeta from formula (3.21), reference [2]_, p.885.
+    BOX_FACTOR = 0.5
+
+    n, = np.shape(x0)  # Number of parameters
+
+    # Set default lower and upper bounds.
+    if trust_lb is None:
+        trust_lb = np.full(n, -np.inf)
+    if trust_ub is None:
+        trust_ub = np.full(n, np.inf)
+
+    # Initial values
+    x = np.copy(x0)
+    trust_radius = initial_trust_radius
+    penalty = initial_penalty
+    # Compute Values
+    f = fun0
+    c = grad0
+    b = constr0
+    A = jac0
+    S = scaling(x)
+    # Get projections
+    Z, LS, Y = projections(A, factorization_method)
+    # Compute least-square lagrange multipliers
+    v = -LS.dot(c)
+    # Compute Hessian
+    H = lagr_hess(x, v)
+
+    # Update state parameters
+    optimality = norm(c + A.T.dot(v), np.inf)
+    constr_violation = norm(b, np.inf) if len(b) > 0 else 0
+    cg_info = {'niter': 0, 'stop_cond': 0,
+               'hits_boundary': False}
+
+    last_iteration_failed = False
+    while not stop_criteria(state, x, last_iteration_failed,
+                            optimality, constr_violation,
+                            trust_radius, penalty, cg_info):
+        # Normal Step - `dn`
+        # minimize 1/2*||A dn + b||^2
+        # subject to:
+        # ||dn|| <= TR_FACTOR * trust_radius
+        # BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub.
+        dn = modified_dogleg(A, Y, b,
+                             TR_FACTOR*trust_radius,
+                             BOX_FACTOR*trust_lb,
+                             BOX_FACTOR*trust_ub)
+
+        # Tangential Step - `dt`
+        # Solve the QP problem:
+        # minimize 1/2 dt.T H dt + dt.T (H dn + c)
+        # subject to:
+        # A dt = 0
+        # ||dt|| <= sqrt(trust_radius**2 - ||dn||**2)
+        # lb - dn <= dt <= ub - dn
+        c_t = H.dot(dn) + c
+        b_t = np.zeros_like(b)
+        trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2)
+        lb_t = trust_lb - dn
+        ub_t = trust_ub - dn
+        dt, cg_info = projected_cg(H, c_t, Z, Y, b_t,
+                                   trust_radius_t,
+                                   lb_t, ub_t)
+
+        # Compute update (normal + tangential steps).
+        d = dn + dt
+
+        # Compute second order model: 1/2 d H d + c.T d + f.
+        quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d)
+        # Compute linearized constraint: l = A d + b.
+        linearized_constr = A.dot(d)+b
+        # Compute new penalty parameter according to formula (3.52),
+        # reference [2]_, p.891.
+        vpred = norm(b) - norm(linearized_constr)
+        # Guarantee `vpred` always positive,
+        # regardless of roundoff errors.
+        vpred = max(1e-16, vpred)
+        previous_penalty = penalty
+        if quadratic_model > 0:
+            new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred)
+            penalty = max(penalty, new_penalty)
+        # Compute predicted reduction according to formula (3.52),
+        # reference [2]_, p.891.
+        predicted_reduction = -quadratic_model + penalty*vpred
+
+        # Compute merit function at current point
+        merit_function = f + penalty*norm(b)
+        # Evaluate function and constraints at trial point
+        x_next = x + S.dot(d)
+        f_next, b_next = fun_and_constr(x_next)
+        # Compute merit function at trial point
+        merit_function_next = f_next + penalty*norm(b_next)
+        # Compute actual reduction according to formula (3.54),
+        # reference [2]_, p.892.
+        actual_reduction = merit_function - merit_function_next
+        # Compute reduction ratio
+        reduction_ratio = actual_reduction / predicted_reduction
+
+        # Second order correction (SOC), reference [2]_, p.892.
+        if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \
+           norm(dn) <= SOC_THRESHOLD * norm(dt):
+            # Compute second order correction
+            y = -Y.dot(b_next)
+            # Make sure increment is inside box constraints
+            _, t, intersect = box_intersections(d, y, trust_lb, trust_ub)
+            # Compute tentative point
+            x_soc = x + S.dot(d + t*y)
+            f_soc, b_soc = fun_and_constr(x_soc)
+            # Recompute actual reduction
+            merit_function_soc = f_soc + penalty*norm(b_soc)
+            actual_reduction_soc = merit_function - merit_function_soc
+            # Recompute reduction ratio
+            reduction_ratio_soc = actual_reduction_soc / predicted_reduction
+            if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO:
+                x_next = x_soc
+                f_next = f_soc
+                b_next = b_soc
+                reduction_ratio = reduction_ratio_soc
+
+        # Readjust trust region step, formula (3.55), reference [2]_, p.892.
+        if reduction_ratio >= LARGE_REDUCTION_RATIO:
+            trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d),
+                               trust_radius)
+        elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO:
+            trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d),
+                               trust_radius)
+        # Reduce trust region step, according to reference [3]_, p.696.
+        elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO:
+            trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) /
+                               (1-reduction_ratio))
+            new_trust_radius = trust_reduction * norm(d)
+            if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius:
+                trust_radius *= MAX_TRUST_REDUCTION
+            elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius:
+                trust_radius = new_trust_radius
+            else:
+                trust_radius *= MIN_TRUST_REDUCTION
+
+        # Update iteration
+        if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO:
+            x = x_next
+            f, b = f_next, b_next
+            c, A = grad_and_jac(x)
+            S = scaling(x)
+            # Get projections
+            Z, LS, Y = projections(A, factorization_method)
+            # Compute least-square lagrange multipliers
+            v = -LS.dot(c)
+            # Compute Hessian
+            H = lagr_hess(x, v)
+            # Set Flag
+            last_iteration_failed = False
+            # Otimality values
+            optimality = norm(c + A.T.dot(v), np.inf)
+            constr_violation = norm(b, np.inf) if len(b) > 0 else 0
+        else:
+            penalty = previous_penalty
+            last_iteration_failed = True
+
+    return x, state
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py
new file mode 100644
index 00000000..d576e51e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py
@@ -0,0 +1,545 @@
+import time
+import numpy as np
+from scipy.sparse.linalg import LinearOperator
+from .._differentiable_functions import VectorFunction
+from .._constraints import (
+    NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds)
+from .._hessian_update_strategy import BFGS
+from .._optimize import OptimizeResult
+from .._differentiable_functions import ScalarFunction
+from .equality_constrained_sqp import equality_constrained_sqp
+from .canonical_constraint import (CanonicalConstraint,
+                                   initial_constraints_as_canonical)
+from .tr_interior_point import tr_interior_point
+from .report import BasicReport, SQPReport, IPReport
+
+
+TERMINATION_MESSAGES = {
+    0: "The maximum number of function evaluations is exceeded.",
+    1: "`gtol` termination condition is satisfied.",
+    2: "`xtol` termination condition is satisfied.",
+    3: "`callback` function requested termination."
+}
+
+
+class HessianLinearOperator:
+    """Build LinearOperator from hessp"""
+    def __init__(self, hessp, n):
+        self.hessp = hessp
+        self.n = n
+
+    def __call__(self, x, *args):
+        def matvec(p):
+            return self.hessp(x, p, *args)
+
+        return LinearOperator((self.n, self.n), matvec=matvec)
+
+
+class LagrangianHessian:
+    """The Hessian of the Lagrangian as LinearOperator.
+
+    The Lagrangian is computed as the objective function plus all the
+    constraints multiplied with some numbers (Lagrange multipliers).
+    """
+    def __init__(self, n, objective_hess, constraints_hess):
+        self.n = n
+        self.objective_hess = objective_hess
+        self.constraints_hess = constraints_hess
+
+    def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
+        H_objective = self.objective_hess(x)
+        H_constraints = self.constraints_hess(x, v_eq, v_ineq)
+
+        def matvec(p):
+            return H_objective.dot(p) + H_constraints.dot(p)
+
+        return LinearOperator((self.n, self.n), matvec)
+
+
+def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
+                     start_time, tr_radius, constr_penalty, cg_info):
+    state.nit += 1
+    state.nfev = objective.nfev
+    state.njev = objective.ngev
+    state.nhev = objective.nhev
+    state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
+                         for c in prepared_constraints]
+    state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
+                         for c in prepared_constraints]
+    state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
+                         for c in prepared_constraints]
+
+    if not last_iteration_failed:
+        state.x = x
+        state.fun = objective.f
+        state.grad = objective.g
+        state.v = [c.fun.v for c in prepared_constraints]
+        state.constr = [c.fun.f for c in prepared_constraints]
+        state.jac = [c.fun.J for c in prepared_constraints]
+        # Compute Lagrangian Gradient
+        state.lagrangian_grad = np.copy(state.grad)
+        for c in prepared_constraints:
+            state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
+        state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
+        # Compute maximum constraint violation
+        state.constr_violation = 0
+        for i in range(len(prepared_constraints)):
+            lb, ub = prepared_constraints[i].bounds
+            c = state.constr[i]
+            state.constr_violation = np.max([state.constr_violation,
+                                             np.max(lb - c),
+                                             np.max(c - ub)])
+
+    state.execution_time = time.time() - start_time
+    state.tr_radius = tr_radius
+    state.constr_penalty = constr_penalty
+    state.cg_niter += cg_info["niter"]
+    state.cg_stop_cond = cg_info["stop_cond"]
+
+    return state
+
+
+def update_state_ip(state, x, last_iteration_failed, objective,
+                    prepared_constraints, start_time,
+                    tr_radius, constr_penalty, cg_info,
+                    barrier_parameter, barrier_tolerance):
+    state = update_state_sqp(state, x, last_iteration_failed, objective,
+                             prepared_constraints, start_time, tr_radius,
+                             constr_penalty, cg_info)
+    state.barrier_parameter = barrier_parameter
+    state.barrier_tolerance = barrier_tolerance
+    return state
+
+
+def _minimize_trustregion_constr(fun, x0, args, grad,
+                                 hess, hessp, bounds, constraints,
+                                 xtol=1e-8, gtol=1e-8,
+                                 barrier_tol=1e-8,
+                                 sparse_jacobian=None,
+                                 callback=None, maxiter=1000,
+                                 verbose=0, finite_diff_rel_step=None,
+                                 initial_constr_penalty=1.0, initial_tr_radius=1.0,
+                                 initial_barrier_parameter=0.1,
+                                 initial_barrier_tolerance=0.1,
+                                 factorization_method=None,
+                                 disp=False):
+    """Minimize a scalar function subject to constraints.
+
+    Parameters
+    ----------
+    gtol : float, optional
+        Tolerance for termination by the norm of the Lagrangian gradient.
+        The algorithm will terminate when both the infinity norm (i.e., max
+        abs value) of the Lagrangian gradient and the constraint violation
+        are smaller than ``gtol``. Default is 1e-8.
+    xtol : float, optional
+        Tolerance for termination by the change of the independent variable.
+        The algorithm will terminate when ``tr_radius < xtol``, where
+        ``tr_radius`` is the radius of the trust region used in the algorithm.
+        Default is 1e-8.
+    barrier_tol : float, optional
+        Threshold on the barrier parameter for the algorithm termination.
+        When inequality constraints are present, the algorithm will terminate
+        only when the barrier parameter is less than `barrier_tol`.
+        Default is 1e-8.
+    sparse_jacobian : {bool, None}, optional
+        Determines how to represent Jacobians of the constraints. If bool,
+        then Jacobians of all the constraints will be converted to the
+        corresponding format. If None (default), then Jacobians won't be
+        converted, but the algorithm can proceed only if they all have the
+        same format.
+    initial_tr_radius: float, optional
+        Initial trust radius. The trust radius gives the maximum distance
+        between solution points in consecutive iterations. It reflects the
+        trust the algorithm puts in the local approximation of the optimization
+        problem. For an accurate local approximation the trust-region should be
+        large and for an  approximation valid only close to the current point it
+        should be a small one. The trust radius is automatically updated throughout
+        the optimization process, with ``initial_tr_radius`` being its initial value.
+        Default is 1 (recommended in [1]_, p. 19).
+    initial_constr_penalty : float, optional
+        Initial constraints penalty parameter. The penalty parameter is used for
+        balancing the requirements of decreasing the objective function
+        and satisfying the constraints. It is used for defining the merit function:
+        ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
+        where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
+        the constraints. The merit function is used for accepting or rejecting
+        trial points and ``constr_penalty`` weights the two conflicting goals
+        of reducing objective function and constraints. The penalty is automatically
+        updated throughout the optimization  process, with
+        ``initial_constr_penalty`` being its  initial value. Default is 1
+        (recommended in [1]_, p 19).
+    initial_barrier_parameter, initial_barrier_tolerance: float, optional
+        Initial barrier parameter and initial tolerance for the barrier subproblem.
+        Both are used only when inequality constraints are present. For dealing with
+        optimization problems ``min_x f(x)`` subject to inequality constraints
+        ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
+        ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
+        constraints  ``c(x) + s = 0`` instead of the original problem. This subproblem
+        is solved for decreasing values of ``barrier_parameter`` and with decreasing
+        tolerances for the termination, starting with ``initial_barrier_parameter``
+        for the barrier parameter and ``initial_barrier_tolerance`` for the
+        barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
+        Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
+        with the same prefactor.
+    factorization_method : string or None, optional
+        Method to factorize the Jacobian of the constraints. Use None (default)
+        for the auto selection or one of:
+
+            - 'NormalEquation' (requires scikit-sparse)
+            - 'AugmentedSystem'
+            - 'QRFactorization'
+            - 'SVDFactorization'
+
+        The methods 'NormalEquation' and 'AugmentedSystem' can be used only
+        with sparse constraints. The projections required by the algorithm
+        will be computed using, respectively, the normal equation  and the
+        augmented system approaches explained in [1]_. 'NormalEquation'
+        computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
+        performs the LU factorization of an augmented system. They usually
+        provide similar results. 'AugmentedSystem' is used by default for
+        sparse matrices.
+
+        The methods 'QRFactorization' and 'SVDFactorization' can be used
+        only with dense constraints. They compute the required projections
+        using, respectively, QR and SVD factorizations. The 'SVDFactorization'
+        method can cope with Jacobian matrices with deficient row rank and will
+        be used whenever other factorization methods fail (which may imply the
+        conversion of sparse matrices to a dense format when required).
+        By default, 'QRFactorization' is used for dense matrices.
+    finite_diff_rel_step : None or array_like, optional
+        Relative step size for the finite difference approximation.
+    maxiter : int, optional
+        Maximum number of algorithm iterations. Default is 1000.
+    verbose : {0, 1, 2}, optional
+        Level of algorithm's verbosity:
+
+            * 0 (default) : work silently.
+            * 1 : display a termination report.
+            * 2 : display progress during iterations.
+            * 3 : display progress during iterations (more complete report).
+
+    disp : bool, optional
+        If True (default), then `verbose` will be set to 1 if it was 0.
+
+    Returns
+    -------
+    `OptimizeResult` with the fields documented below. Note the following:
+
+        1. All values corresponding to the constraints are ordered as they
+           were passed to the solver. And values corresponding to `bounds`
+           constraints are put *after* other constraints.
+        2. All numbers of function, Jacobian or Hessian evaluations correspond
+           to numbers of actual Python function calls. It means, for example,
+           that if a Jacobian is estimated by finite differences, then the
+           number of Jacobian evaluations will be zero and the number of
+           function evaluations will be incremented by all calls during the
+           finite difference estimation.
+
+    x : ndarray, shape (n,)
+        Solution found.
+    optimality : float
+        Infinity norm of the Lagrangian gradient at the solution.
+    constr_violation : float
+        Maximum constraint violation at the solution.
+    fun : float
+        Objective function at the solution.
+    grad : ndarray, shape (n,)
+        Gradient of the objective function at the solution.
+    lagrangian_grad : ndarray, shape (n,)
+        Gradient of the Lagrangian function at the solution.
+    nit : int
+        Total number of iterations.
+    nfev : integer
+        Number of the objective function evaluations.
+    njev : integer
+        Number of the objective function gradient evaluations.
+    nhev : integer
+        Number of the objective function Hessian evaluations.
+    cg_niter : int
+        Total number of the conjugate gradient method iterations.
+    method : {'equality_constrained_sqp', 'tr_interior_point'}
+        Optimization method used.
+    constr : list of ndarray
+        List of constraint values at the solution.
+    jac : list of {ndarray, sparse matrix}
+        List of the Jacobian matrices of the constraints at the solution.
+    v : list of ndarray
+        List of the Lagrange multipliers for the constraints at the solution.
+        For an inequality constraint a positive multiplier means that the upper
+        bound is active, a negative multiplier means that the lower bound is
+        active and if a multiplier is zero it means the constraint is not
+        active.
+    constr_nfev : list of int
+        Number of constraint evaluations for each of the constraints.
+    constr_njev : list of int
+        Number of Jacobian matrix evaluations for each of the constraints.
+    constr_nhev : list of int
+        Number of Hessian evaluations for each of the constraints.
+    tr_radius : float
+        Radius of the trust region at the last iteration.
+    constr_penalty : float
+        Penalty parameter at the last iteration, see `initial_constr_penalty`.
+    barrier_tolerance : float
+        Tolerance for the barrier subproblem at the last iteration.
+        Only for problems with inequality constraints.
+    barrier_parameter : float
+        Barrier parameter at the last iteration. Only for problems
+        with inequality constraints.
+    execution_time : float
+        Total execution time.
+    message : str
+        Termination message.
+    status : {0, 1, 2, 3}
+        Termination status:
+
+            * 0 : The maximum number of function evaluations is exceeded.
+            * 1 : `gtol` termination condition is satisfied.
+            * 2 : `xtol` termination condition is satisfied.
+            * 3 : `callback` function requested termination.
+
+    cg_stop_cond : int
+        Reason for CG subproblem termination at the last iteration:
+
+            * 0 : CG subproblem not evaluated.
+            * 1 : Iteration limit was reached.
+            * 2 : Reached the trust-region boundary.
+            * 3 : Negative curvature detected.
+            * 4 : Tolerance was satisfied.
+
+    References
+    ----------
+    .. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
+           Trust region methods. 2000. Siam. pp. 19.
+    """
+    x0 = np.atleast_1d(x0).astype(float)
+    n_vars = np.size(x0)
+    if hess is None:
+        if callable(hessp):
+            hess = HessianLinearOperator(hessp, n_vars)
+        else:
+            hess = BFGS()
+    if disp and verbose == 0:
+        verbose = 1
+
+    if bounds is not None:
+        finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
+                                           bounds.keep_feasible, n_vars)
+    else:
+        finite_diff_bounds = (-np.inf, np.inf)
+
+    # Define Objective Function
+    objective = ScalarFunction(fun, x0, args, grad, hess,
+                               finite_diff_rel_step, finite_diff_bounds)
+
+    # Put constraints in list format when needed.
+    if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
+        constraints = [constraints]
+
+    # Prepare constraints.
+    prepared_constraints = [
+        PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
+        for c in constraints]
+
+    # Check that all constraints are either sparse or dense.
+    n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
+    if 0 < n_sparse < len(prepared_constraints):
+        raise ValueError("All constraints must have the same kind of the "
+                         "Jacobian --- either all sparse or all dense. "
+                         "You can set the sparsity globally by setting "
+                         "`sparse_jacobian` to either True of False.")
+    if prepared_constraints:
+        sparse_jacobian = n_sparse > 0
+
+    if bounds is not None:
+        if sparse_jacobian is None:
+            sparse_jacobian = True
+        prepared_constraints.append(PreparedConstraint(bounds, x0,
+                                                       sparse_jacobian))
+
+    # Concatenate initial constraints to the canonical form.
+    c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
+        n_vars, prepared_constraints, sparse_jacobian)
+
+    # Prepare all canonical constraints and concatenate it into one.
+    canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
+                     for c in prepared_constraints]
+
+    if len(canonical_all) == 0:
+        canonical = CanonicalConstraint.empty(n_vars)
+    elif len(canonical_all) == 1:
+        canonical = canonical_all[0]
+    else:
+        canonical = CanonicalConstraint.concatenate(canonical_all,
+                                                    sparse_jacobian)
+
+    # Generate the Hessian of the Lagrangian.
+    lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
+
+    # Choose appropriate method
+    if canonical.n_ineq == 0:
+        method = 'equality_constrained_sqp'
+    else:
+        method = 'tr_interior_point'
+
+    # Construct OptimizeResult
+    state = OptimizeResult(
+        nit=0, nfev=0, njev=0, nhev=0,
+        cg_niter=0, cg_stop_cond=0,
+        fun=objective.f, grad=objective.g,
+        lagrangian_grad=np.copy(objective.g),
+        constr=[c.fun.f for c in prepared_constraints],
+        jac=[c.fun.J for c in prepared_constraints],
+        constr_nfev=[0 for c in prepared_constraints],
+        constr_njev=[0 for c in prepared_constraints],
+        constr_nhev=[0 for c in prepared_constraints],
+        v=[c.fun.v for c in prepared_constraints],
+        method=method)
+
+    # Start counting
+    start_time = time.time()
+
+    # Define stop criteria
+    if method == 'equality_constrained_sqp':
+        def stop_criteria(state, x, last_iteration_failed,
+                          optimality, constr_violation,
+                          tr_radius, constr_penalty, cg_info):
+            state = update_state_sqp(state, x, last_iteration_failed,
+                                     objective, prepared_constraints,
+                                     start_time, tr_radius, constr_penalty,
+                                     cg_info)
+            if verbose == 2:
+                BasicReport.print_iteration(state.nit,
+                                            state.nfev,
+                                            state.cg_niter,
+                                            state.fun,
+                                            state.tr_radius,
+                                            state.optimality,
+                                            state.constr_violation)
+            elif verbose > 2:
+                SQPReport.print_iteration(state.nit,
+                                          state.nfev,
+                                          state.cg_niter,
+                                          state.fun,
+                                          state.tr_radius,
+                                          state.optimality,
+                                          state.constr_violation,
+                                          state.constr_penalty,
+                                          state.cg_stop_cond)
+            state.status = None
+            state.niter = state.nit  # Alias for callback (backward-compatibility)
+            if callback is not None and callback(np.copy(state.x), state):
+                state.status = 3
+            elif state.optimality < gtol and state.constr_violation < gtol:
+                state.status = 1
+            elif state.tr_radius < xtol:
+                state.status = 2
+            elif state.nit >= maxiter:
+                state.status = 0
+            return state.status in (0, 1, 2, 3)
+    elif method == 'tr_interior_point':
+        def stop_criteria(state, x, last_iteration_failed, tr_radius,
+                          constr_penalty, cg_info, barrier_parameter,
+                          barrier_tolerance):
+            state = update_state_ip(state, x, last_iteration_failed,
+                                    objective, prepared_constraints,
+                                    start_time, tr_radius, constr_penalty,
+                                    cg_info, barrier_parameter, barrier_tolerance)
+            if verbose == 2:
+                BasicReport.print_iteration(state.nit,
+                                            state.nfev,
+                                            state.cg_niter,
+                                            state.fun,
+                                            state.tr_radius,
+                                            state.optimality,
+                                            state.constr_violation)
+            elif verbose > 2:
+                IPReport.print_iteration(state.nit,
+                                         state.nfev,
+                                         state.cg_niter,
+                                         state.fun,
+                                         state.tr_radius,
+                                         state.optimality,
+                                         state.constr_violation,
+                                         state.constr_penalty,
+                                         state.barrier_parameter,
+                                         state.cg_stop_cond)
+            state.status = None
+            state.niter = state.nit  # Alias for callback (backward compatibility)
+            if callback is not None and callback(np.copy(state.x), state):
+                state.status = 3
+            elif state.optimality < gtol and state.constr_violation < gtol:
+                state.status = 1
+            elif (state.tr_radius < xtol
+                  and state.barrier_parameter < barrier_tol):
+                state.status = 2
+            elif state.nit >= maxiter:
+                state.status = 0
+            return state.status in (0, 1, 2, 3)
+
+    if verbose == 2:
+        BasicReport.print_header()
+    elif verbose > 2:
+        if method == 'equality_constrained_sqp':
+            SQPReport.print_header()
+        elif method == 'tr_interior_point':
+            IPReport.print_header()
+
+    # Call inferior function to do the optimization
+    if method == 'equality_constrained_sqp':
+        def fun_and_constr(x):
+            f = objective.fun(x)
+            c_eq, _ = canonical.fun(x)
+            return f, c_eq
+
+        def grad_and_jac(x):
+            g = objective.grad(x)
+            J_eq, _ = canonical.jac(x)
+            return g, J_eq
+
+        _, result = equality_constrained_sqp(
+            fun_and_constr, grad_and_jac, lagrangian_hess,
+            x0, objective.f, objective.g,
+            c_eq0, J_eq0,
+            stop_criteria, state,
+            initial_constr_penalty, initial_tr_radius,
+            factorization_method)
+
+    elif method == 'tr_interior_point':
+        _, result = tr_interior_point(
+            objective.fun, objective.grad, lagrangian_hess,
+            n_vars, canonical.n_ineq, canonical.n_eq,
+            canonical.fun, canonical.jac,
+            x0, objective.f, objective.g,
+            c_ineq0, J_ineq0, c_eq0, J_eq0,
+            stop_criteria,
+            canonical.keep_feasible,
+            xtol, state, initial_barrier_parameter,
+            initial_barrier_tolerance,
+            initial_constr_penalty, initial_tr_radius,
+            factorization_method)
+
+    # Status 3 occurs when the callback function requests termination,
+    # this is assumed to not be a success.
+    result.success = True if result.status in (1, 2) else False
+    result.message = TERMINATION_MESSAGES[result.status]
+
+    # Alias (for backward compatibility with 1.1.0)
+    result.niter = result.nit
+
+    if verbose == 2:
+        BasicReport.print_footer()
+    elif verbose > 2:
+        if method == 'equality_constrained_sqp':
+            SQPReport.print_footer()
+        elif method == 'tr_interior_point':
+            IPReport.print_footer()
+    if verbose >= 1:
+        print(result.message)
+        print("Number of iterations: {}, function evaluations: {}, "
+              "CG iterations: {}, optimality: {:.2e}, "
+              "constraint violation: {:.2e}, execution time: {:4.2} s."
+              .format(result.nit, result.nfev, result.cg_niter,
+                      result.optimality, result.constr_violation,
+                      result.execution_time))
+    return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/projections.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/projections.py
new file mode 100644
index 00000000..f8e2ff95
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/projections.py
@@ -0,0 +1,405 @@
+"""Basic linear factorizations needed by the solver."""
+
+from scipy.sparse import (bmat, csc_matrix, eye, issparse)
+from scipy.sparse.linalg import LinearOperator
+import scipy.linalg
+import scipy.sparse.linalg
+try:
+    from sksparse.cholmod import cholesky_AAt
+    sksparse_available = True
+except ImportError:
+    import warnings
+    sksparse_available = False
+import numpy as np
+from warnings import warn
+
+__all__ = [
+    'orthogonality',
+    'projections',
+]
+
+
+def orthogonality(A, g):
+    """Measure orthogonality between a vector and the null space of a matrix.
+
+    Compute a measure of orthogonality between the null space
+    of the (possibly sparse) matrix ``A`` and a given vector ``g``.
+
+    The formula is a simplified (and cheaper) version of formula (3.13)
+    from [1]_.
+    ``orth =  norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.
+
+    References
+    ----------
+    .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
+           "On the solution of equality constrained quadratic
+            programming problems arising in optimization."
+            SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
+    """
+    # Compute vector norms
+    norm_g = np.linalg.norm(g)
+    # Compute Froebnius norm of the matrix A
+    if issparse(A):
+        norm_A = scipy.sparse.linalg.norm(A, ord='fro')
+    else:
+        norm_A = np.linalg.norm(A, ord='fro')
+
+    # Check if norms are zero
+    if norm_g == 0 or norm_A == 0:
+        return 0
+
+    norm_A_g = np.linalg.norm(A.dot(g))
+    # Orthogonality measure
+    orth = norm_A_g / (norm_A*norm_g)
+    return orth
+
+
+def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):
+    """Return linear operators for matrix A using ``NormalEquation`` approach.
+    """
+    # Cholesky factorization
+    factor = cholesky_AAt(A)
+
+    # z = x - A.T inv(A A.T) A x
+    def null_space(x):
+        v = factor(A.dot(x))
+        z = x - A.T.dot(v)
+
+        # Iterative refinement to improve roundoff
+        # errors described in [2]_, algorithm 5.1.
+        k = 0
+        while orthogonality(A, z) > orth_tol:
+            if k >= max_refin:
+                break
+            # z_next = z - A.T inv(A A.T) A z
+            v = factor(A.dot(z))
+            z = z - A.T.dot(v)
+            k += 1
+
+        return z
+
+    # z = inv(A A.T) A x
+    def least_squares(x):
+        return factor(A.dot(x))
+
+    # z = A.T inv(A A.T) x
+    def row_space(x):
+        return A.T.dot(factor(x))
+
+    return null_space, least_squares, row_space
+
+
+def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):
+    """Return linear operators for matrix A - ``AugmentedSystem``."""
+    # Form augmented system
+    K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))
+    # LU factorization
+    # TODO: Use a symmetric indefinite factorization
+    #       to solve the system twice as fast (because
+    #       of the symmetry).
+    try:
+        solve = scipy.sparse.linalg.factorized(K)
+    except RuntimeError:
+        warn("Singular Jacobian matrix. Using dense SVD decomposition to "
+             "perform the factorizations.")
+        return svd_factorization_projections(A.toarray(),
+                                             m, n, orth_tol,
+                                             max_refin, tol)
+
+    # z = x - A.T inv(A A.T) A x
+    # is computed solving the extended system:
+    # [I A.T] * [ z ] = [x]
+    # [A  O ]   [aux]   [0]
+    def null_space(x):
+        # v = [x]
+        #     [0]
+        v = np.hstack([x, np.zeros(m)])
+        # lu_sol = [ z ]
+        #          [aux]
+        lu_sol = solve(v)
+        z = lu_sol[:n]
+
+        # Iterative refinement to improve roundoff
+        # errors described in [2]_, algorithm 5.2.
+        k = 0
+        while orthogonality(A, z) > orth_tol:
+            if k >= max_refin:
+                break
+            # new_v = [x] - [I A.T] * [ z ]
+            #         [0]   [A  O ]   [aux]
+            new_v = v - K.dot(lu_sol)
+            # [I A.T] * [delta  z ] = new_v
+            # [A  O ]   [delta aux]
+            lu_update = solve(new_v)
+            #  [ z ] += [delta  z ]
+            #  [aux]    [delta aux]
+            lu_sol += lu_update
+            z = lu_sol[:n]
+            k += 1
+
+        # return z = x - A.T inv(A A.T) A x
+        return z
+
+    # z = inv(A A.T) A x
+    # is computed solving the extended system:
+    # [I A.T] * [aux] = [x]
+    # [A  O ]   [ z ]   [0]
+    def least_squares(x):
+        # v = [x]
+        #     [0]
+        v = np.hstack([x, np.zeros(m)])
+        # lu_sol = [aux]
+        #          [ z ]
+        lu_sol = solve(v)
+        # return z = inv(A A.T) A x
+        return lu_sol[n:m+n]
+
+    # z = A.T inv(A A.T) x
+    # is computed solving the extended system:
+    # [I A.T] * [ z ] = [0]
+    # [A  O ]   [aux]   [x]
+    def row_space(x):
+        # v = [0]
+        #     [x]
+        v = np.hstack([np.zeros(n), x])
+        # lu_sol = [ z ]
+        #          [aux]
+        lu_sol = solve(v)
+        # return z = A.T inv(A A.T) x
+        return lu_sol[:n]
+
+    return null_space, least_squares, row_space
+
+
+def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):
+    """Return linear operators for matrix A using ``QRFactorization`` approach.
+    """
+    # QRFactorization
+    Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')
+
+    if np.linalg.norm(R[-1, :], np.inf) < tol:
+        warn('Singular Jacobian matrix. Using SVD decomposition to ' +
+             'perform the factorizations.')
+        return svd_factorization_projections(A, m, n,
+                                             orth_tol,
+                                             max_refin,
+                                             tol)
+
+    # z = x - A.T inv(A A.T) A x
+    def null_space(x):
+        # v = P inv(R) Q.T x
+        aux1 = Q.T.dot(x)
+        aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
+        v = np.zeros(m)
+        v[P] = aux2
+        z = x - A.T.dot(v)
+
+        # Iterative refinement to improve roundoff
+        # errors described in [2]_, algorithm 5.1.
+        k = 0
+        while orthogonality(A, z) > orth_tol:
+            if k >= max_refin:
+                break
+            # v = P inv(R) Q.T x
+            aux1 = Q.T.dot(z)
+            aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
+            v[P] = aux2
+            # z_next = z - A.T v
+            z = z - A.T.dot(v)
+            k += 1
+
+        return z
+
+    # z = inv(A A.T) A x
+    def least_squares(x):
+        # z = P inv(R) Q.T x
+        aux1 = Q.T.dot(x)
+        aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
+        z = np.zeros(m)
+        z[P] = aux2
+        return z
+
+    # z = A.T inv(A A.T) x
+    def row_space(x):
+        # z = Q inv(R.T) P.T x
+        aux1 = x[P]
+        aux2 = scipy.linalg.solve_triangular(R, aux1,
+                                             lower=False,
+                                             trans='T')
+        z = Q.dot(aux2)
+        return z
+
+    return null_space, least_squares, row_space
+
+
+def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):
+    """Return linear operators for matrix A using ``SVDFactorization`` approach.
+    """
+    # SVD Factorization
+    U, s, Vt = scipy.linalg.svd(A, full_matrices=False)
+
+    # Remove dimensions related with very small singular values
+    U = U[:, s > tol]
+    Vt = Vt[s > tol, :]
+    s = s[s > tol]
+
+    # z = x - A.T inv(A A.T) A x
+    def null_space(x):
+        # v = U 1/s V.T x = inv(A A.T) A x
+        aux1 = Vt.dot(x)
+        aux2 = 1/s*aux1
+        v = U.dot(aux2)
+        z = x - A.T.dot(v)
+
+        # Iterative refinement to improve roundoff
+        # errors described in [2]_, algorithm 5.1.
+        k = 0
+        while orthogonality(A, z) > orth_tol:
+            if k >= max_refin:
+                break
+            # v = U 1/s V.T x = inv(A A.T) A x
+            aux1 = Vt.dot(z)
+            aux2 = 1/s*aux1
+            v = U.dot(aux2)
+            # z_next = z - A.T v
+            z = z - A.T.dot(v)
+            k += 1
+
+        return z
+
+    # z = inv(A A.T) A x
+    def least_squares(x):
+        # z = U 1/s V.T x = inv(A A.T) A x
+        aux1 = Vt.dot(x)
+        aux2 = 1/s*aux1
+        z = U.dot(aux2)
+        return z
+
+    # z = A.T inv(A A.T) x
+    def row_space(x):
+        # z = V 1/s U.T x
+        aux1 = U.T.dot(x)
+        aux2 = 1/s*aux1
+        z = Vt.T.dot(aux2)
+        return z
+
+    return null_space, least_squares, row_space
+
+
+def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15):
+    """Return three linear operators related with a given matrix A.
+
+    Parameters
+    ----------
+    A : sparse matrix (or ndarray), shape (m, n)
+        Matrix ``A`` used in the projection.
+    method : string, optional
+        Method used for compute the given linear
+        operators. Should be one of:
+
+            - 'NormalEquation': The operators
+               will be computed using the
+               so-called normal equation approach
+               explained in [1]_. In order to do
+               so the Cholesky factorization of
+               ``(A A.T)`` is computed. Exclusive
+               for sparse matrices.
+            - 'AugmentedSystem': The operators
+               will be computed using the
+               so-called augmented system approach
+               explained in [1]_. Exclusive
+               for sparse matrices.
+            - 'QRFactorization': Compute projections
+               using QR factorization. Exclusive for
+               dense matrices.
+            - 'SVDFactorization': Compute projections
+               using SVD factorization. Exclusive for
+               dense matrices.
+
+    orth_tol : float, optional
+        Tolerance for iterative refinements.
+    max_refin : int, optional
+        Maximum number of iterative refinements.
+    tol : float, optional
+        Tolerance for singular values.
+
+    Returns
+    -------
+    Z : LinearOperator, shape (n, n)
+        Null-space operator. For a given vector ``x``,
+        the null space operator is equivalent to apply
+        a projection matrix ``P = I - A.T inv(A A.T) A``
+        to the vector. It can be shown that this is
+        equivalent to project ``x`` into the null space
+        of A.
+    LS : LinearOperator, shape (m, n)
+        Least-squares operator. For a given vector ``x``,
+        the least-squares operator is equivalent to apply a
+        pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
+        to the vector. It can be shown that this vector
+        ``pinv(A.T) x`` is the least_square solution to
+        ``A.T y = x``.
+    Y : LinearOperator, shape (n, m)
+        Row-space operator. For a given vector ``x``,
+        the row-space operator is equivalent to apply a
+        projection matrix ``Q = A.T inv(A A.T)``
+        to the vector.  It can be shown that this
+        vector ``y = Q x``  the minimum norm solution
+        of ``A y = x``.
+
+    Notes
+    -----
+    Uses iterative refinements described in [1]
+    during the computation of ``Z`` in order to
+    cope with the possibility of large roundoff errors.
+
+    References
+    ----------
+    .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
+        "On the solution of equality constrained quadratic
+        programming problems arising in optimization."
+        SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
+    """
+    m, n = np.shape(A)
+
+    # The factorization of an empty matrix
+    # only works for the sparse representation.
+    if m*n == 0:
+        A = csc_matrix(A)
+
+    # Check Argument
+    if issparse(A):
+        if method is None:
+            method = "AugmentedSystem"
+        if method not in ("NormalEquation", "AugmentedSystem"):
+            raise ValueError("Method not allowed for sparse matrix.")
+        if method == "NormalEquation" and not sksparse_available:
+            warnings.warn(("Only accepts 'NormalEquation' option when"
+                           " scikit-sparse is available. Using "
+                           "'AugmentedSystem' option instead."),
+                          ImportWarning)
+            method = 'AugmentedSystem'
+    else:
+        if method is None:
+            method = "QRFactorization"
+        if method not in ("QRFactorization", "SVDFactorization"):
+            raise ValueError("Method not allowed for dense array.")
+
+    if method == 'NormalEquation':
+        null_space, least_squares, row_space \
+            = normal_equation_projections(A, m, n, orth_tol, max_refin, tol)
+    elif method == 'AugmentedSystem':
+        null_space, least_squares, row_space \
+            = augmented_system_projections(A, m, n, orth_tol, max_refin, tol)
+    elif method == "QRFactorization":
+        null_space, least_squares, row_space \
+            = qr_factorization_projections(A, m, n, orth_tol, max_refin, tol)
+    elif method == "SVDFactorization":
+        null_space, least_squares, row_space \
+            = svd_factorization_projections(A, m, n, orth_tol, max_refin, tol)
+
+    Z = LinearOperator((n, n), null_space)
+    LS = LinearOperator((m, n), least_squares)
+    Y = LinearOperator((n, m), row_space)
+
+    return Z, LS, Y
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/qp_subproblem.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/qp_subproblem.py
new file mode 100644
index 00000000..a039a773
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/qp_subproblem.py
@@ -0,0 +1,637 @@
+"""Equality-constrained quadratic programming solvers."""
+
+from scipy.sparse import (linalg, bmat, csc_matrix)
+from math import copysign
+import numpy as np
+from numpy.linalg import norm
+
+__all__ = [
+    'eqp_kktfact',
+    'sphere_intersections',
+    'box_intersections',
+    'box_sphere_intersections',
+    'inside_box_boundaries',
+    'modified_dogleg',
+    'projected_cg'
+]
+
+
+# For comparison with the projected CG
+def eqp_kktfact(H, c, A, b):
+    """Solve equality-constrained quadratic programming (EQP) problem.
+
+    Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0``
+    using direct factorization of the KKT system.
+
+    Parameters
+    ----------
+    H : sparse matrix, shape (n, n)
+        Hessian matrix of the EQP problem.
+    c : array_like, shape (n,)
+        Gradient of the quadratic objective function.
+    A : sparse matrix
+        Jacobian matrix of the EQP problem.
+    b : array_like, shape (m,)
+        Right-hand side of the constraint equation.
+
+    Returns
+    -------
+    x : array_like, shape (n,)
+        Solution of the KKT problem.
+    lagrange_multipliers : ndarray, shape (m,)
+        Lagrange multipliers of the KKT problem.
+    """
+    n, = np.shape(c)  # Number of parameters
+    m, = np.shape(b)  # Number of constraints
+
+    # Karush-Kuhn-Tucker matrix of coefficients.
+    # Defined as in Nocedal/Wright "Numerical
+    # Optimization" p.452 in Eq. (16.4).
+    kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))
+    # Vector of coefficients.
+    kkt_vec = np.hstack([-c, -b])
+
+    # TODO: Use a symmetric indefinite factorization
+    #       to solve the system twice as fast (because
+    #       of the symmetry).
+    lu = linalg.splu(kkt_matrix)
+    kkt_sol = lu.solve(kkt_vec)
+    x = kkt_sol[:n]
+    lagrange_multipliers = -kkt_sol[n:n+m]
+
+    return x, lagrange_multipliers
+
+
+def sphere_intersections(z, d, trust_radius,
+                         entire_line=False):
+    """Find the intersection between segment (or line) and spherical constraints.
+
+    Find the intersection between the segment (or line) defined by the
+    parametric  equation ``x(t) = z + t*d`` and the ball
+    ``||x|| <= trust_radius``.
+
+    Parameters
+    ----------
+    z : array_like, shape (n,)
+        Initial point.
+    d : array_like, shape (n,)
+        Direction.
+    trust_radius : float
+        Ball radius.
+    entire_line : bool, optional
+        When ``True``, the function returns the intersection between the line
+        ``x(t) = z + t*d`` (``t`` can assume any value) and the ball
+        ``||x|| <= trust_radius``. When ``False``, the function returns the intersection
+        between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball.
+
+    Returns
+    -------
+    ta, tb : float
+        The line/segment ``x(t) = z + t*d`` is inside the ball for
+        for ``ta <= t <= tb``.
+    intersect : bool
+        When ``True``, there is a intersection between the line/segment
+        and the sphere. On the other hand, when ``False``, there is no
+        intersection.
+    """
+    # Special case when d=0
+    if norm(d) == 0:
+        return 0, 0, False
+    # Check for inf trust_radius
+    if np.isinf(trust_radius):
+        if entire_line:
+            ta = -np.inf
+            tb = np.inf
+        else:
+            ta = 0
+            tb = 1
+        intersect = True
+        return ta, tb, intersect
+
+    a = np.dot(d, d)
+    b = 2 * np.dot(z, d)
+    c = np.dot(z, z) - trust_radius**2
+    discriminant = b*b - 4*a*c
+    if discriminant < 0:
+        intersect = False
+        return 0, 0, intersect
+    sqrt_discriminant = np.sqrt(discriminant)
+
+    # The following calculation is mathematically
+    # equivalent to:
+    # ta = (-b - sqrt_discriminant) / (2*a)
+    # tb = (-b + sqrt_discriminant) / (2*a)
+    # but produce smaller round off errors.
+    # Look at Matrix Computation p.97
+    # for a better justification.
+    aux = b + copysign(sqrt_discriminant, b)
+    ta = -aux / (2*a)
+    tb = -2*c / aux
+    ta, tb = sorted([ta, tb])
+
+    if entire_line:
+        intersect = True
+    else:
+        # Checks to see if intersection happens
+        # within vectors length.
+        if tb < 0 or ta > 1:
+            intersect = False
+            ta = 0
+            tb = 0
+        else:
+            intersect = True
+            # Restrict intersection interval
+            # between 0 and 1.
+            ta = max(0, ta)
+            tb = min(1, tb)
+
+    return ta, tb, intersect
+
+
+def box_intersections(z, d, lb, ub,
+                      entire_line=False):
+    """Find the intersection between segment (or line) and box constraints.
+
+    Find the intersection between the segment (or line) defined by the
+    parametric  equation ``x(t) = z + t*d`` and the rectangular box
+    ``lb <= x <= ub``.
+
+    Parameters
+    ----------
+    z : array_like, shape (n,)
+        Initial point.
+    d : array_like, shape (n,)
+        Direction.
+    lb : array_like, shape (n,)
+        Lower bounds to each one of the components of ``x``. Used
+        to delimit the rectangular box.
+    ub : array_like, shape (n, )
+        Upper bounds to each one of the components of ``x``. Used
+        to delimit the rectangular box.
+    entire_line : bool, optional
+        When ``True``, the function returns the intersection between the line
+        ``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular
+        box. When ``False``, the function returns the intersection between the segment
+        ``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box.
+
+    Returns
+    -------
+    ta, tb : float
+        The line/segment ``x(t) = z + t*d`` is inside the box for
+        for ``ta <= t <= tb``.
+    intersect : bool
+        When ``True``, there is a intersection between the line (or segment)
+        and the rectangular box. On the other hand, when ``False``, there is no
+        intersection.
+    """
+    # Make sure it is a numpy array
+    z = np.asarray(z)
+    d = np.asarray(d)
+    lb = np.asarray(lb)
+    ub = np.asarray(ub)
+    # Special case when d=0
+    if norm(d) == 0:
+        return 0, 0, False
+
+    # Get values for which d==0
+    zero_d = (d == 0)
+    # If the boundaries are not satisfied for some coordinate
+    # for which "d" is zero, there is no box-line intersection.
+    if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any():
+        intersect = False
+        return 0, 0, intersect
+    # Remove values for which d is zero
+    not_zero_d = np.logical_not(zero_d)
+    z = z[not_zero_d]
+    d = d[not_zero_d]
+    lb = lb[not_zero_d]
+    ub = ub[not_zero_d]
+
+    # Find a series of intervals (t_lb[i], t_ub[i]).
+    t_lb = (lb-z) / d
+    t_ub = (ub-z) / d
+    # Get the intersection of all those intervals.
+    ta = max(np.minimum(t_lb, t_ub))
+    tb = min(np.maximum(t_lb, t_ub))
+
+    # Check if intersection is feasible
+    if ta <= tb:
+        intersect = True
+    else:
+        intersect = False
+    # Checks to see if intersection happens within vectors length.
+    if not entire_line:
+        if tb < 0 or ta > 1:
+            intersect = False
+            ta = 0
+            tb = 0
+        else:
+            # Restrict intersection interval between 0 and 1.
+            ta = max(0, ta)
+            tb = min(1, tb)
+
+    return ta, tb, intersect
+
+
+def box_sphere_intersections(z, d, lb, ub, trust_radius,
+                             entire_line=False,
+                             extra_info=False):
+    """Find the intersection between segment (or line) and box/sphere constraints.
+
+    Find the intersection between the segment (or line) defined by the
+    parametric  equation ``x(t) = z + t*d``, the rectangular box
+    ``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``.
+
+    Parameters
+    ----------
+    z : array_like, shape (n,)
+        Initial point.
+    d : array_like, shape (n,)
+        Direction.
+    lb : array_like, shape (n,)
+        Lower bounds to each one of the components of ``x``. Used
+        to delimit the rectangular box.
+    ub : array_like, shape (n, )
+        Upper bounds to each one of the components of ``x``. Used
+        to delimit the rectangular box.
+    trust_radius : float
+        Ball radius.
+    entire_line : bool, optional
+        When ``True``, the function returns the intersection between the line
+        ``x(t) = z + t*d`` (``t`` can assume any value) and the constraints.
+        When ``False``, the function returns the intersection between the segment
+        ``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints.
+    extra_info : bool, optional
+        When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``.
+
+    Returns
+    -------
+    ta, tb : float
+        The line/segment ``x(t) = z + t*d`` is inside the rectangular box and
+        inside the ball for ``ta <= t <= tb``.
+    intersect : bool
+        When ``True``, there is a intersection between the line (or segment)
+        and both constraints. On the other hand, when ``False``, there is no
+        intersection.
+    sphere_info : dict, optional
+        Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
+        for which the line intercepts the ball. And a boolean value indicating
+        whether the sphere is intersected by the line.
+    box_info : dict, optional
+        Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
+        for which the line intercepts the box. And a boolean value indicating
+        whether the box is intersected by the line.
+    """
+    ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub,
+                                                entire_line)
+    ta_s, tb_s, intersect_s = sphere_intersections(z, d,
+                                                   trust_radius,
+                                                   entire_line)
+    ta = np.maximum(ta_b, ta_s)
+    tb = np.minimum(tb_b, tb_s)
+    if intersect_b and intersect_s and ta <= tb:
+        intersect = True
+    else:
+        intersect = False
+
+    if extra_info:
+        sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}
+        box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}
+        return ta, tb, intersect, sphere_info, box_info
+    else:
+        return ta, tb, intersect
+
+
+def inside_box_boundaries(x, lb, ub):
+    """Check if lb <= x <= ub."""
+    return (lb <= x).all() and (x <= ub).all()
+
+
+def reinforce_box_boundaries(x, lb, ub):
+    """Return clipped value of x"""
+    return np.minimum(np.maximum(x, lb), ub)
+
+
+def modified_dogleg(A, Y, b, trust_radius, lb, ub):
+    """Approximately  minimize ``1/2*|| A x + b ||^2`` inside trust-region.
+
+    Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2``
+    subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification
+    of the classical dogleg approach.
+
+    Parameters
+    ----------
+    A : LinearOperator (or sparse matrix or ndarray), shape (m, n)
+        Matrix ``A`` in the minimization problem. It should have
+        dimension ``(m, n)`` such that ``m < n``.
+    Y : LinearOperator (or sparse matrix or ndarray), shape (n, m)
+        LinearOperator that apply the projection matrix
+        ``Q = A.T inv(A A.T)`` to the vector. The obtained vector
+        ``y = Q x`` being the minimum norm solution of ``A y = x``.
+    b : array_like, shape (m,)
+        Vector ``b``in the minimization problem.
+    trust_radius: float
+        Trust radius to be considered. Delimits a sphere boundary
+        to the problem.
+    lb : array_like, shape (n,)
+        Lower bounds to each one of the components of ``x``.
+        It is expected that ``lb <= 0``, otherwise the algorithm
+        may fail. If ``lb[i] = -Inf``, the lower
+        bound for the ith component is just ignored.
+    ub : array_like, shape (n, )
+        Upper bounds to each one of the components of ``x``.
+        It is expected that ``ub >= 0``, otherwise the algorithm
+        may fail. If ``ub[i] = Inf``, the upper bound for the ith
+        component is just ignored.
+
+    Returns
+    -------
+    x : array_like, shape (n,)
+        Solution to the problem.
+
+    Notes
+    -----
+    Based on implementations described in pp. 885-886 from [1]_.
+
+    References
+    ----------
+    .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
+           "An interior point algorithm for large-scale nonlinear
+           programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
+    """
+    # Compute minimum norm minimizer of 1/2*|| A x + b ||^2.
+    newton_point = -Y.dot(b)
+    # Check for interior point
+    if inside_box_boundaries(newton_point, lb, ub)  \
+       and norm(newton_point) <= trust_radius:
+        x = newton_point
+        return x
+
+    # Compute gradient vector ``g = A.T b``
+    g = A.T.dot(b)
+    # Compute Cauchy point
+    # `cauchy_point = g.T g / (g.T A.T A g)``.
+    A_g = A.dot(g)
+    cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g
+    # Origin
+    origin_point = np.zeros_like(cauchy_point)
+
+    # Check the segment between cauchy_point and newton_point
+    # for a possible solution.
+    z = cauchy_point
+    p = newton_point - cauchy_point
+    _, alpha, intersect = box_sphere_intersections(z, p, lb, ub,
+                                                   trust_radius)
+    if intersect:
+        x1 = z + alpha*p
+    else:
+        # Check the segment between the origin and cauchy_point
+        # for a possible solution.
+        z = origin_point
+        p = cauchy_point
+        _, alpha, _ = box_sphere_intersections(z, p, lb, ub,
+                                               trust_radius)
+        x1 = z + alpha*p
+
+    # Check the segment between origin and newton_point
+    # for a possible solution.
+    z = origin_point
+    p = newton_point
+    _, alpha, _ = box_sphere_intersections(z, p, lb, ub,
+                                           trust_radius)
+    x2 = z + alpha*p
+
+    # Return the best solution among x1 and x2.
+    if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):
+        return x1
+    else:
+        return x2
+
+
+def projected_cg(H, c, Z, Y, b, trust_radius=np.inf,
+                 lb=None, ub=None, tol=None,
+                 max_iter=None, max_infeasible_iter=None,
+                 return_all=False):
+    """Solve EQP problem with projected CG method.
+
+    Solve equality-constrained quadratic programming problem
+    ``min 1/2 x.T H x + x.t c``  subject to ``A x + b = 0`` and,
+    possibly, to trust region constraints ``||x|| < trust_radius``
+    and box constraints ``lb <= x <= ub``.
+
+    Parameters
+    ----------
+    H : LinearOperator (or sparse matrix or ndarray), shape (n, n)
+        Operator for computing ``H v``.
+    c : array_like, shape (n,)
+        Gradient of the quadratic objective function.
+    Z : LinearOperator (or sparse matrix or ndarray), shape (n, n)
+        Operator for projecting ``x`` into the null space of A.
+    Y : LinearOperator,  sparse matrix, ndarray, shape (n, m)
+        Operator that, for a given a vector ``b``, compute smallest
+        norm solution of ``A x + b = 0``.
+    b : array_like, shape (m,)
+        Right-hand side of the constraint equation.
+    trust_radius : float, optional
+        Trust radius to be considered. By default, uses ``trust_radius=inf``,
+        which means no trust radius at all.
+    lb : array_like, shape (n,), optional
+        Lower bounds to each one of the components of ``x``.
+        If ``lb[i] = -Inf`` the lower bound for the i-th
+        component is just ignored (default).
+    ub : array_like, shape (n, ), optional
+        Upper bounds to each one of the components of ``x``.
+        If ``ub[i] = Inf`` the upper bound for the i-th
+        component is just ignored (default).
+    tol : float, optional
+        Tolerance used to interrupt the algorithm.
+    max_iter : int, optional
+        Maximum algorithm iterations. Where ``max_inter <= n-m``.
+        By default, uses ``max_iter = n-m``.
+    max_infeasible_iter : int, optional
+        Maximum infeasible (regarding box constraints) iterations the
+        algorithm is allowed to take.
+        By default, uses ``max_infeasible_iter = n-m``.
+    return_all : bool, optional
+        When ``true``, return the list of all vectors through the iterations.
+
+    Returns
+    -------
+    x : array_like, shape (n,)
+        Solution of the EQP problem.
+    info : Dict
+        Dictionary containing the following:
+
+            - niter : Number of iterations.
+            - stop_cond : Reason for algorithm termination:
+                1. Iteration limit was reached;
+                2. Reached the trust-region boundary;
+                3. Negative curvature detected;
+                4. Tolerance was satisfied.
+            - allvecs : List containing all intermediary vectors (optional).
+            - hits_boundary : True if the proposed step is on the boundary
+              of the trust region.
+
+    Notes
+    -----
+    Implementation of Algorithm 6.2 on [1]_.
+
+    In the absence of spherical and box constraints, for sufficient
+    iterations, the method returns a truly optimal result.
+    In the presence of those constraints, the value returned is only
+    a inexpensive approximation of the optimal value.
+
+    References
+    ----------
+    .. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
+           "On the solution of equality constrained quadratic
+            programming problems arising in optimization."
+            SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
+    """
+    CLOSE_TO_ZERO = 1e-25
+
+    n, = np.shape(c)  # Number of parameters
+    m, = np.shape(b)  # Number of constraints
+
+    # Initial Values
+    x = Y.dot(-b)
+    r = Z.dot(H.dot(x) + c)
+    g = Z.dot(r)
+    p = -g
+
+    # Store ``x`` value
+    if return_all:
+        allvecs = [x]
+    # Values for the first iteration
+    H_p = H.dot(p)
+    rt_g = norm(g)**2  # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
+
+    # If x > trust-region the problem does not have a solution.
+    tr_distance = trust_radius - norm(x)
+    if tr_distance < 0:
+        raise ValueError("Trust region problem does not have a solution.")
+    # If x == trust_radius, then x is the solution
+    # to the optimization problem, since x is the
+    # minimum norm solution to Ax=b.
+    elif tr_distance < CLOSE_TO_ZERO:
+        info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True}
+        if return_all:
+            allvecs.append(x)
+            info['allvecs'] = allvecs
+        return x, info
+
+    # Set default tolerance
+    if tol is None:
+        tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO)
+    # Set default lower and upper bounds
+    if lb is None:
+        lb = np.full(n, -np.inf)
+    if ub is None:
+        ub = np.full(n, np.inf)
+    # Set maximum iterations
+    if max_iter is None:
+        max_iter = n-m
+    max_iter = min(max_iter, n-m)
+    # Set maximum infeasible iterations
+    if max_infeasible_iter is None:
+        max_infeasible_iter = n-m
+
+    hits_boundary = False
+    stop_cond = 1
+    counter = 0
+    last_feasible_x = np.zeros_like(x)
+    k = 0
+    for i in range(max_iter):
+        # Stop criteria - Tolerance : r.T g < tol
+        if rt_g < tol:
+            stop_cond = 4
+            break
+        k += 1
+        # Compute curvature
+        pt_H_p = H_p.dot(p)
+        # Stop criteria - Negative curvature
+        if pt_H_p <= 0:
+            if np.isinf(trust_radius):
+                raise ValueError("Negative curvature not allowed "
+                                 "for unrestricted problems.")
+            else:
+                # Find intersection with constraints
+                _, alpha, intersect = box_sphere_intersections(
+                    x, p, lb, ub, trust_radius, entire_line=True)
+                # Update solution
+                if intersect:
+                    x = x + alpha*p
+                # Reinforce variables are inside box constraints.
+                # This is only necessary because of roundoff errors.
+                x = reinforce_box_boundaries(x, lb, ub)
+                # Attribute information
+                stop_cond = 3
+                hits_boundary = True
+                break
+
+        # Get next step
+        alpha = rt_g / pt_H_p
+        x_next = x + alpha*p
+
+        # Stop criteria - Hits boundary
+        if np.linalg.norm(x_next) >= trust_radius:
+            # Find intersection with box constraints
+            _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
+                                                           trust_radius)
+            # Update solution
+            if intersect:
+                x = x + theta*alpha*p
+            # Reinforce variables are inside box constraints.
+            # This is only necessary because of roundoff errors.
+            x = reinforce_box_boundaries(x, lb, ub)
+            # Attribute information
+            stop_cond = 2
+            hits_boundary = True
+            break
+
+        # Check if ``x`` is inside the box and start counter if it is not.
+        if inside_box_boundaries(x_next, lb, ub):
+            counter = 0
+        else:
+            counter += 1
+        # Whenever outside box constraints keep looking for intersections.
+        if counter > 0:
+            _, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
+                                                           trust_radius)
+            if intersect:
+                last_feasible_x = x + theta*alpha*p
+                # Reinforce variables are inside box constraints.
+                # This is only necessary because of roundoff errors.
+                last_feasible_x = reinforce_box_boundaries(last_feasible_x,
+                                                           lb, ub)
+                counter = 0
+        # Stop after too many infeasible (regarding box constraints) iteration.
+        if counter > max_infeasible_iter:
+            break
+        # Store ``x_next`` value
+        if return_all:
+            allvecs.append(x_next)
+
+        # Update residual
+        r_next = r + alpha*H_p
+        # Project residual g+ = Z r+
+        g_next = Z.dot(r_next)
+        # Compute conjugate direction step d
+        rt_g_next = norm(g_next)**2  # g.T g = r.T g (ref [1]_ p.1389)
+        beta = rt_g_next / rt_g
+        p = - g_next + beta*p
+        # Prepare for next iteration
+        x = x_next
+        g = g_next
+        r = g_next
+        rt_g = norm(g)**2  # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
+        H_p = H.dot(p)
+
+    if not inside_box_boundaries(x, lb, ub):
+        x = last_feasible_x
+        hits_boundary = True
+    info = {'niter': k, 'stop_cond': stop_cond,
+            'hits_boundary': hits_boundary}
+    if return_all:
+        info['allvecs'] = allvecs
+    return x, info
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/report.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/report.py
new file mode 100644
index 00000000..1631bf21
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/report.py
@@ -0,0 +1,52 @@
+"""Progress report printers."""
+
+from __future__ import annotations
+from typing import List
+
+class ReportBase:
+    COLUMN_NAMES: List[str] = NotImplemented
+    COLUMN_WIDTHS: List[int] = NotImplemented
+    ITERATION_FORMATS: List[str] = NotImplemented
+
+    @classmethod
+    def print_header(cls):
+        fmt = ("|"
+               + "|".join(["{{:^{}}}".format(x) for x in cls.COLUMN_WIDTHS])
+               + "|")
+        separators = ['-' * x for x in cls.COLUMN_WIDTHS]
+        print(fmt.format(*cls.COLUMN_NAMES))
+        print(fmt.format(*separators))
+
+    @classmethod
+    def print_iteration(cls, *args):
+        iteration_format = ["{{:{}}}".format(x) for x in cls.ITERATION_FORMATS]
+        fmt = "|" + "|".join(iteration_format) + "|"
+        print(fmt.format(*args))
+
+    @classmethod
+    def print_footer(cls):
+        print()
+
+
+class BasicReport(ReportBase):
+    COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
+                    "opt", "c viol"]
+    COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10]
+    ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e",
+                         "^10.2e", "^10.2e", "^10.2e"]
+
+
+class SQPReport(ReportBase):
+    COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
+                    "opt", "c viol", "penalty", "CG stop"]
+    COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7]
+    ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
+                         "^10.2e", "^10.2e", "^7"]
+
+
+class IPReport(ReportBase):
+    COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
+                    "opt", "c viol", "penalty", "barrier param", "CG stop"]
+    COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7]
+    ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
+                         "^10.2e", "^10.2e", "^13.2e", "^7"]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py
new file mode 100644
index 00000000..452b327d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py
@@ -0,0 +1,296 @@
+import numpy as np
+from numpy.testing import assert_array_equal, assert_equal
+from scipy.optimize._constraints import (NonlinearConstraint, Bounds,
+                                         PreparedConstraint)
+from scipy.optimize._trustregion_constr.canonical_constraint \
+    import CanonicalConstraint, initial_constraints_as_canonical
+
+
+def create_quadratic_function(n, m, rng):
+    a = rng.rand(m)
+    A = rng.rand(m, n)
+    H = rng.rand(m, n, n)
+    HT = np.transpose(H, (1, 2, 0))
+
+    def fun(x):
+        return a + A.dot(x) + 0.5 * H.dot(x).dot(x)
+
+    def jac(x):
+        return A + H.dot(x)
+
+    def hess(x, v):
+        return HT.dot(v)
+
+    return fun, jac, hess
+
+
+def test_bounds_cases():
+    # Test 1: no constraints.
+    user_constraint = Bounds(-np.inf, np.inf)
+    x0 = np.array([-1, 2])
+    prepared_constraint = PreparedConstraint(user_constraint, x0, False)
+    c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
+
+    assert_equal(c.n_eq, 0)
+    assert_equal(c.n_ineq, 0)
+
+    c_eq, c_ineq = c.fun(x0)
+    assert_array_equal(c_eq, [])
+    assert_array_equal(c_ineq, [])
+
+    J_eq, J_ineq = c.jac(x0)
+    assert_array_equal(J_eq, np.empty((0, 2)))
+    assert_array_equal(J_ineq, np.empty((0, 2)))
+
+    assert_array_equal(c.keep_feasible, [])
+
+    # Test 2: infinite lower bound.
+    user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True])
+    x0 = np.array([-1, -2, -3], dtype=float)
+    prepared_constraint = PreparedConstraint(user_constraint, x0, False)
+    c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
+
+    assert_equal(c.n_eq, 0)
+    assert_equal(c.n_ineq, 2)
+
+    c_eq, c_ineq = c.fun(x0)
+    assert_array_equal(c_eq, [])
+    assert_array_equal(c_ineq, [-1, -4])
+
+    J_eq, J_ineq = c.jac(x0)
+    assert_array_equal(J_eq, np.empty((0, 3)))
+    assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]]))
+
+    assert_array_equal(c.keep_feasible, [False, True])
+
+    # Test 3: infinite upper bound.
+    user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True])
+    x0 = np.array([1, 2, 3], dtype=float)
+    prepared_constraint = PreparedConstraint(user_constraint, x0, False)
+    c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
+
+    assert_equal(c.n_eq, 0)
+    assert_equal(c.n_ineq, 2)
+
+    c_eq, c_ineq = c.fun(x0)
+    assert_array_equal(c_eq, [])
+    assert_array_equal(c_ineq, [-1, -1])
+
+    J_eq, J_ineq = c.jac(x0)
+    assert_array_equal(J_eq, np.empty((0, 3)))
+    assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]]))
+
+    assert_array_equal(c.keep_feasible, [True, False])
+
+    # Test 4: interval constraint.
+    user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3],
+                             [False, True, True, True])
+    x0 = np.array([0, 10, 8, 5])
+    prepared_constraint = PreparedConstraint(user_constraint, x0, False)
+    c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
+
+    assert_equal(c.n_eq, 1)
+    assert_equal(c.n_ineq, 4)
+
+    c_eq, c_ineq = c.fun(x0)
+    assert_array_equal(c_eq, [2])
+    assert_array_equal(c_ineq, [-1, -2, -1, -6])
+
+    J_eq, J_ineq = c.jac(x0)
+    assert_array_equal(J_eq, [[0, 0, 0, 1]])
+    assert_array_equal(J_ineq, [[1, 0, 0, 0],
+                                [0, 0, 1, 0],
+                                [-1, 0, 0, 0],
+                                [0, 0, -1, 0]])
+
+    assert_array_equal(c.keep_feasible, [False, True, False, True])
+
+
+def test_nonlinear_constraint():
+    n = 3
+    m = 5
+    rng = np.random.RandomState(0)
+    x0 = rng.rand(n)
+
+    fun, jac, hess = create_quadratic_function(n, m, rng)
+    f = fun(x0)
+    J = jac(x0)
+
+    lb = [-10, 3, -np.inf, -np.inf, -5]
+    ub = [10, 3, np.inf, 3, np.inf]
+    user_constraint = NonlinearConstraint(
+        fun, lb, ub, jac, hess, [True, False, False, True, False])
+
+    for sparse_jacobian in [False, True]:
+        prepared_constraint = PreparedConstraint(user_constraint, x0,
+                                                 sparse_jacobian)
+        c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
+
+        assert_array_equal(c.n_eq, 1)
+        assert_array_equal(c.n_ineq, 4)
+
+        c_eq, c_ineq = c.fun(x0)
+        assert_array_equal(c_eq, [f[1] - lb[1]])
+        assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4],
+                                    f[0] - ub[0], lb[0] - f[0]])
+
+        J_eq, J_ineq = c.jac(x0)
+        if sparse_jacobian:
+            J_eq = J_eq.toarray()
+            J_ineq = J_ineq.toarray()
+
+        assert_array_equal(J_eq, J[1, None])
+        assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0])))
+
+        v_eq = rng.rand(c.n_eq)
+        v_ineq = rng.rand(c.n_ineq)
+        v = np.zeros(m)
+        v[1] = v_eq[0]
+        v[3] = v_ineq[0]
+        v[4] = -v_ineq[1]
+        v[0] = v_ineq[2] - v_ineq[3]
+        assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v))
+
+        assert_array_equal(c.keep_feasible, [True, False, True, True])
+
+
+def test_concatenation():
+    rng = np.random.RandomState(0)
+    n = 4
+    x0 = rng.rand(n)
+
+    f1 = x0
+    J1 = np.eye(n)
+    lb1 = [-1, -np.inf, -2, 3]
+    ub1 = [1, np.inf, np.inf, 3]
+    bounds = Bounds(lb1, ub1, [False, False, True, False])
+
+    fun, jac, hess = create_quadratic_function(n, 5, rng)
+    f2 = fun(x0)
+    J2 = jac(x0)
+    lb2 = [-10, 3, -np.inf, -np.inf, -5]
+    ub2 = [10, 3, np.inf, 5, np.inf]
+    nonlinear = NonlinearConstraint(
+        fun, lb2, ub2, jac, hess, [True, False, False, True, False])
+
+    for sparse_jacobian in [False, True]:
+        bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
+        nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
+
+        c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared)
+        c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared)
+        c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian)
+
+        assert_equal(c.n_eq, 2)
+        assert_equal(c.n_ineq, 7)
+
+        c_eq, c_ineq = c.fun(x0)
+        assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
+        assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
+                                    lb1[0] - f1[0], f2[3] - ub2[3],
+                                    lb2[4] - f2[4], f2[0] - ub2[0],
+                                    lb2[0] - f2[0]])
+
+        J_eq, J_ineq = c.jac(x0)
+        if sparse_jacobian:
+            J_eq = J_eq.toarray()
+            J_ineq = J_ineq.toarray()
+
+        assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
+        assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
+                                              -J2[4], J2[0], -J2[0])))
+
+        v_eq = rng.rand(c.n_eq)
+        v_ineq = rng.rand(c.n_ineq)
+        v = np.zeros(5)
+        v[1] = v_eq[1]
+        v[3] = v_ineq[3]
+        v[4] = -v_ineq[4]
+        v[0] = v_ineq[5] - v_ineq[6]
+        H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n))
+        assert_array_equal(H, hess(x0, v))
+
+        assert_array_equal(c.keep_feasible,
+                           [True, False, False, True, False, True, True])
+
+
+def test_empty():
+    x = np.array([1, 2, 3])
+    c = CanonicalConstraint.empty(3)
+    assert_equal(c.n_eq, 0)
+    assert_equal(c.n_ineq, 0)
+
+    c_eq, c_ineq = c.fun(x)
+    assert_array_equal(c_eq, [])
+    assert_array_equal(c_ineq, [])
+
+    J_eq, J_ineq = c.jac(x)
+    assert_array_equal(J_eq, np.empty((0, 3)))
+    assert_array_equal(J_ineq, np.empty((0, 3)))
+
+    H = c.hess(x, None, None).toarray()
+    assert_array_equal(H, np.zeros((3, 3)))
+
+
+def test_initial_constraints_as_canonical():
+    # rng is only used to generate the coefficients of the quadratic
+    # function that is used by the nonlinear constraint.
+    rng = np.random.RandomState(0)
+
+    x0 = np.array([0.5, 0.4, 0.3, 0.2])
+    n = len(x0)
+
+    lb1 = [-1, -np.inf, -2, 3]
+    ub1 = [1, np.inf, np.inf, 3]
+    bounds = Bounds(lb1, ub1, [False, False, True, False])
+
+    fun, jac, hess = create_quadratic_function(n, 5, rng)
+    lb2 = [-10, 3, -np.inf, -np.inf, -5]
+    ub2 = [10, 3, np.inf, 5, np.inf]
+    nonlinear = NonlinearConstraint(
+        fun, lb2, ub2, jac, hess, [True, False, False, True, False])
+
+    for sparse_jacobian in [False, True]:
+        bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
+        nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
+
+        f1 = bounds_prepared.fun.f
+        J1 = bounds_prepared.fun.J
+        f2 = nonlinear_prepared.fun.f
+        J2 = nonlinear_prepared.fun.J
+
+        c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
+            n, [bounds_prepared, nonlinear_prepared], sparse_jacobian)
+
+        assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
+        assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
+                                    lb1[0] - f1[0], f2[3] - ub2[3],
+                                    lb2[4] - f2[4], f2[0] - ub2[0],
+                                    lb2[0] - f2[0]])
+
+        if sparse_jacobian:
+            J1 = J1.toarray()
+            J2 = J2.toarray()
+            J_eq = J_eq.toarray()
+            J_ineq = J_ineq.toarray()
+
+        assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
+        assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
+                                              -J2[4], J2[0], -J2[0])))
+
+
+def test_initial_constraints_as_canonical_empty():
+    n = 3
+    for sparse_jacobian in [False, True]:
+        c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
+            n, [], sparse_jacobian)
+
+        assert_array_equal(c_eq, [])
+        assert_array_equal(c_ineq, [])
+
+        if sparse_jacobian:
+            J_eq = J_eq.toarray()
+            J_ineq = J_ineq.toarray()
+
+        assert_array_equal(J_eq, np.empty((0, n)))
+        assert_array_equal(J_ineq, np.empty((0, n)))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_projections.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_projections.py
new file mode 100644
index 00000000..449c18a4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_projections.py
@@ -0,0 +1,214 @@
+import numpy as np
+import scipy.linalg
+from scipy.sparse import csc_matrix
+from scipy.optimize._trustregion_constr.projections \
+    import projections, orthogonality
+from numpy.testing import (TestCase, assert_array_almost_equal,
+                           assert_equal, assert_allclose)
+
+try:
+    from sksparse.cholmod import cholesky_AAt
+    sksparse_available = True
+    available_sparse_methods = ("NormalEquation", "AugmentedSystem")
+except ImportError:
+    sksparse_available = False
+    available_sparse_methods = ("AugmentedSystem",)
+available_dense_methods = ('QRFactorization', 'SVDFactorization')
+
+
+class TestProjections(TestCase):
+
+    def test_nullspace_and_least_squares_sparse(self):
+        A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                            [0, 8, 7, 0, 1, 5, 9, 0],
+                            [1, 0, 0, 0, 0, 1, 2, 3]])
+        At_dense = A_dense.T
+        A = csc_matrix(A_dense)
+        test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
+                       [1, 10, 3, 0, 1, 6, 7, 8],
+                       [1.12, 10, 0, 0, 100000, 6, 0.7, 8])
+
+        for method in available_sparse_methods:
+            Z, LS, _ = projections(A, method)
+            for z in test_points:
+                # Test if x is in the null_space
+                x = Z.matvec(z)
+                assert_array_almost_equal(A.dot(x), 0)
+                # Test orthogonality
+                assert_array_almost_equal(orthogonality(A, x), 0)
+                # Test if x is the least square solution
+                x = LS.matvec(z)
+                x2 = scipy.linalg.lstsq(At_dense, z)[0]
+                assert_array_almost_equal(x, x2)
+
+    def test_iterative_refinements_sparse(self):
+        A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                            [0, 8, 7, 0, 1, 5, 9, 0],
+                            [1, 0, 0, 0, 0, 1, 2, 3]])
+        A = csc_matrix(A_dense)
+        test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
+                       [1, 10, 3, 0, 1, 6, 7, 8],
+                       [1.12, 10, 0, 0, 100000, 6, 0.7, 8],
+                       [1, 0, 0, 0, 0, 1, 2, 3+1e-10])
+
+        for method in available_sparse_methods:
+            Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100)
+            for z in test_points:
+                # Test if x is in the null_space
+                x = Z.matvec(z)
+                atol = 1e-13 * abs(x).max()
+                assert_allclose(A.dot(x), 0, atol=atol)
+                # Test orthogonality
+                assert_allclose(orthogonality(A, x), 0, atol=1e-13)
+
+    def test_rowspace_sparse(self):
+        A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                            [0, 8, 7, 0, 1, 5, 9, 0],
+                            [1, 0, 0, 0, 0, 1, 2, 3]])
+        A = csc_matrix(A_dense)
+        test_points = ([1, 2, 3],
+                       [1, 10, 3],
+                       [1.12, 10, 0])
+
+        for method in available_sparse_methods:
+            _, _, Y = projections(A, method)
+            for z in test_points:
+                # Test if x is solution of A x = z
+                x = Y.matvec(z)
+                assert_array_almost_equal(A.dot(x), z)
+                # Test if x is in the return row space of A
+                A_ext = np.vstack((A_dense, x))
+                assert_equal(np.linalg.matrix_rank(A_dense),
+                             np.linalg.matrix_rank(A_ext))
+
+    def test_nullspace_and_least_squares_dense(self):
+        A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                      [0, 8, 7, 0, 1, 5, 9, 0],
+                      [1, 0, 0, 0, 0, 1, 2, 3]])
+        At = A.T
+        test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
+                       [1, 10, 3, 0, 1, 6, 7, 8],
+                       [1.12, 10, 0, 0, 100000, 6, 0.7, 8])
+
+        for method in available_dense_methods:
+            Z, LS, _ = projections(A, method)
+            for z in test_points:
+                # Test if x is in the null_space
+                x = Z.matvec(z)
+                assert_array_almost_equal(A.dot(x), 0)
+                # Test orthogonality
+                assert_array_almost_equal(orthogonality(A, x), 0)
+                # Test if x is the least square solution
+                x = LS.matvec(z)
+                x2 = scipy.linalg.lstsq(At, z)[0]
+                assert_array_almost_equal(x, x2)
+
+    def test_compare_dense_and_sparse(self):
+        D = np.diag(range(1, 101))
+        A = np.hstack([D, D, D, D])
+        A_sparse = csc_matrix(A)
+        np.random.seed(0)
+
+        Z, LS, Y = projections(A)
+        Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
+        for k in range(20):
+            z = np.random.normal(size=(400,))
+            assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
+            assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
+            x = np.random.normal(size=(100,))
+            assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
+
+    def test_compare_dense_and_sparse2(self):
+        D1 = np.diag([-1.7, 1, 0.5])
+        D2 = np.diag([1, -0.6, -0.3])
+        D3 = np.diag([-0.3, -1.5, 2])
+        A = np.hstack([D1, D2, D3])
+        A_sparse = csc_matrix(A)
+        np.random.seed(0)
+
+        Z, LS, Y = projections(A)
+        Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
+        for k in range(1):
+            z = np.random.normal(size=(9,))
+            assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
+            assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
+            x = np.random.normal(size=(3,))
+            assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
+
+    def test_iterative_refinements_dense(self):
+        A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                            [0, 8, 7, 0, 1, 5, 9, 0],
+                            [1, 0, 0, 0, 0, 1, 2, 3]])
+        test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
+                       [1, 10, 3, 0, 1, 6, 7, 8],
+                       [1, 0, 0, 0, 0, 1, 2, 3+1e-10])
+
+        for method in available_dense_methods:
+            Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10)
+            for z in test_points:
+                # Test if x is in the null_space
+                x = Z.matvec(z)
+                assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14)
+                # Test orthogonality
+                assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16)
+
+    def test_rowspace_dense(self):
+        A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                      [0, 8, 7, 0, 1, 5, 9, 0],
+                      [1, 0, 0, 0, 0, 1, 2, 3]])
+        test_points = ([1, 2, 3],
+                       [1, 10, 3],
+                       [1.12, 10, 0])
+
+        for method in available_dense_methods:
+            _, _, Y = projections(A, method)
+            for z in test_points:
+                # Test if x is solution of A x = z
+                x = Y.matvec(z)
+                assert_array_almost_equal(A.dot(x), z)
+                # Test if x is in the return row space of A
+                A_ext = np.vstack((A, x))
+                assert_equal(np.linalg.matrix_rank(A),
+                             np.linalg.matrix_rank(A_ext))
+
+
+class TestOrthogonality(TestCase):
+
+    def test_dense_matrix(self):
+        A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                      [0, 8, 7, 0, 1, 5, 9, 0],
+                      [1, 0, 0, 0, 0, 1, 2, 3]])
+        test_vectors = ([-1.98931144, -1.56363389,
+                         -0.84115584, 2.2864762,
+                         5.599141, 0.09286976,
+                         1.37040802, -0.28145812],
+                        [697.92794044, -4091.65114008,
+                         -3327.42316335, 836.86906951,
+                         99434.98929065, -1285.37653682,
+                         -4109.21503806, 2935.29289083])
+        test_expected_orth = (0, 0)
+
+        for i in range(len(test_vectors)):
+            x = test_vectors[i]
+            orth = test_expected_orth[i]
+            assert_array_almost_equal(orthogonality(A, x), orth)
+
+    def test_sparse_matrix(self):
+        A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
+                      [0, 8, 7, 0, 1, 5, 9, 0],
+                      [1, 0, 0, 0, 0, 1, 2, 3]])
+        A = csc_matrix(A)
+        test_vectors = ([-1.98931144, -1.56363389,
+                         -0.84115584, 2.2864762,
+                         5.599141, 0.09286976,
+                         1.37040802, -0.28145812],
+                        [697.92794044, -4091.65114008,
+                         -3327.42316335, 836.86906951,
+                         99434.98929065, -1285.37653682,
+                         -4109.21503806, 2935.29289083])
+        test_expected_orth = (0, 0)
+
+        for i in range(len(test_vectors)):
+            x = test_vectors[i]
+            orth = test_expected_orth[i]
+            assert_array_almost_equal(orthogonality(A, x), orth)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py
new file mode 100644
index 00000000..70727c22
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py
@@ -0,0 +1,645 @@
+import numpy as np
+from scipy.sparse import csc_matrix
+from scipy.optimize._trustregion_constr.qp_subproblem \
+    import (eqp_kktfact,
+            projected_cg,
+            box_intersections,
+            sphere_intersections,
+            box_sphere_intersections,
+            modified_dogleg)
+from scipy.optimize._trustregion_constr.projections \
+    import projections
+from numpy.testing import TestCase, assert_array_almost_equal, assert_equal
+import pytest
+
+
+class TestEQPDirectFactorization(TestCase):
+
+    # From Example 16.2 Nocedal/Wright "Numerical
+    # Optimization" p.452.
+    def test_nocedal_example(self):
+        H = csc_matrix([[6, 2, 1],
+                        [2, 5, 2],
+                        [1, 2, 4]])
+        A = csc_matrix([[1, 0, 1],
+                        [0, 1, 1]])
+        c = np.array([-8, -3, -3])
+        b = -np.array([3, 0])
+        x, lagrange_multipliers = eqp_kktfact(H, c, A, b)
+        assert_array_almost_equal(x, [2, -1, 1])
+        assert_array_almost_equal(lagrange_multipliers, [3, -2])
+
+
+class TestSphericalBoundariesIntersections(TestCase):
+
+    def test_2d_sphere_constraints(self):
+        # Interior inicial point
+        ta, tb, intersect = sphere_intersections([0, 0],
+                                                 [1, 0], 0.5)
+        assert_array_almost_equal([ta, tb], [0, 0.5])
+        assert_equal(intersect, True)
+
+        # No intersection between line and circle
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [0, 1], 1)
+        assert_equal(intersect, False)
+
+        # Outside initial point pointing toward outside the circle
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [1, 0], 1)
+        assert_equal(intersect, False)
+
+        # Outside initial point pointing toward inside the circle
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [-1, 0], 1.5)
+        assert_array_almost_equal([ta, tb], [0.5, 1])
+        assert_equal(intersect, True)
+
+        # Initial point on the boundary
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [1, 0], 2)
+        assert_array_almost_equal([ta, tb], [0, 0])
+        assert_equal(intersect, True)
+
+    def test_2d_sphere_constraints_line_intersections(self):
+        # Interior initial point
+        ta, tb, intersect = sphere_intersections([0, 0],
+                                                 [1, 0], 0.5,
+                                                 entire_line=True)
+        assert_array_almost_equal([ta, tb], [-0.5, 0.5])
+        assert_equal(intersect, True)
+
+        # No intersection between line and circle
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [0, 1], 1,
+                                                 entire_line=True)
+        assert_equal(intersect, False)
+
+        # Outside initial point pointing toward outside the circle
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [1, 0], 1,
+                                                 entire_line=True)
+        assert_array_almost_equal([ta, tb], [-3, -1])
+        assert_equal(intersect, True)
+
+        # Outside initial point pointing toward inside the circle
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [-1, 0], 1.5,
+                                                 entire_line=True)
+        assert_array_almost_equal([ta, tb], [0.5, 3.5])
+        assert_equal(intersect, True)
+
+        # Initial point on the boundary
+        ta, tb, intersect = sphere_intersections([2, 0],
+                                                 [1, 0], 2,
+                                                 entire_line=True)
+        assert_array_almost_equal([ta, tb], [-4, 0])
+        assert_equal(intersect, True)
+
+
+class TestBoxBoundariesIntersections(TestCase):
+
+    def test_2d_box_constraints(self):
+        # Box constraint in the direction of vector d
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [1, 1], [3, 3])
+        assert_array_almost_equal([ta, tb], [0.5, 1])
+        assert_equal(intersect, True)
+
+        # Negative direction
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [1, -3], [3, -1])
+        assert_equal(intersect, False)
+
+        # Some constraints are absent (set to +/- inf)
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-np.inf, 1],
+                                              [np.inf, np.inf])
+        assert_array_almost_equal([ta, tb], [0.5, 1])
+        assert_equal(intersect, True)
+
+        # Intersect on the face of the box
+        ta, tb, intersect = box_intersections([1, 0], [0, 1],
+                                              [1, 1], [3, 3])
+        assert_array_almost_equal([ta, tb], [1, 1])
+        assert_equal(intersect, True)
+
+        # Interior initial point
+        ta, tb, intersect = box_intersections([0, 0], [4, 4],
+                                              [-2, -3], [3, 2])
+        assert_array_almost_equal([ta, tb], [0, 0.5])
+        assert_equal(intersect, True)
+
+        # No intersection between line and box constraints
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-3, -3], [-1, -1])
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-3, 3], [-1, 1])
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-3, -np.inf],
+                                              [-1, np.inf])
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([0, 0], [1, 100],
+                                              [1, 1], [3, 3])
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
+                                                         [1, 1], [3, 3])
+        assert_equal(intersect, False)
+
+        # Initial point on the boundary
+        ta, tb, intersect = box_intersections([2, 2], [0, 1],
+                                              [-2, -2], [2, 2])
+        assert_array_almost_equal([ta, tb], [0, 0])
+        assert_equal(intersect, True)
+
+    def test_2d_box_constraints_entire_line(self):
+        # Box constraint in the direction of vector d
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [1, 1], [3, 3],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [0.5, 1.5])
+        assert_equal(intersect, True)
+
+        # Negative direction
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [1, -3], [3, -1],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [-1.5, -0.5])
+        assert_equal(intersect, True)
+
+        # Some constraints are absent (set to +/- inf)
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-np.inf, 1],
+                                              [np.inf, np.inf],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [0.5, np.inf])
+        assert_equal(intersect, True)
+
+        # Intersect on the face of the box
+        ta, tb, intersect = box_intersections([1, 0], [0, 1],
+                                              [1, 1], [3, 3],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [1, 3])
+        assert_equal(intersect, True)
+
+        # Interior initial pointoint
+        ta, tb, intersect = box_intersections([0, 0], [4, 4],
+                                              [-2, -3], [3, 2],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [-0.5, 0.5])
+        assert_equal(intersect, True)
+
+        # No intersection between line and box constraints
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-3, -3], [-1, -1],
+                                              entire_line=True)
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-3, 3], [-1, 1],
+                                              entire_line=True)
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([2, 0], [0, 2],
+                                              [-3, -np.inf],
+                                              [-1, np.inf],
+                                              entire_line=True)
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([0, 0], [1, 100],
+                                              [1, 1], [3, 3],
+                                              entire_line=True)
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
+                                              [1, 1], [3, 3],
+                                              entire_line=True)
+        assert_equal(intersect, False)
+
+        # Initial point on the boundary
+        ta, tb, intersect = box_intersections([2, 2], [0, 1],
+                                              [-2, -2], [2, 2],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [-4, 0])
+        assert_equal(intersect, True)
+
+    def test_3d_box_constraints(self):
+        # Simple case
+        ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
+                                              [1, 1, 1], [3, 3, 3])
+        assert_array_almost_equal([ta, tb], [1, 1])
+        assert_equal(intersect, True)
+
+        # Negative direction
+        ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
+                                              [1, 1, 1], [3, 3, 3])
+        assert_equal(intersect, False)
+
+        # Interior point
+        ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
+                                              [1, 1, 1], [3, 3, 3])
+        assert_array_almost_equal([ta, tb], [0, 1])
+        assert_equal(intersect, True)
+
+    def test_3d_box_constraints_entire_line(self):
+        # Simple case
+        ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
+                                              [1, 1, 1], [3, 3, 3],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [1, 3])
+        assert_equal(intersect, True)
+
+        # Negative direction
+        ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
+                                              [1, 1, 1], [3, 3, 3],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [-3, -1])
+        assert_equal(intersect, True)
+
+        # Interior point
+        ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
+                                              [1, 1, 1], [3, 3, 3],
+                                              entire_line=True)
+        assert_array_almost_equal([ta, tb], [-1, 1])
+        assert_equal(intersect, True)
+
+
+class TestBoxSphereBoundariesIntersections(TestCase):
+
+    def test_2d_box_constraints(self):
+        # Both constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
+                                                     [-1, -2], [1, 2], 2,
+                                                     entire_line=False)
+        assert_array_almost_equal([ta, tb], [0, 0.5])
+        assert_equal(intersect, True)
+
+        # None of the constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
+                                                     [-1, -3], [1, 3], 10,
+                                                     entire_line=False)
+        assert_array_almost_equal([ta, tb], [0, 1])
+        assert_equal(intersect, True)
+
+        # Box constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
+                                                     [-1, -3], [1, 3], 10,
+                                                     entire_line=False)
+        assert_array_almost_equal([ta, tb], [0, 0.5])
+        assert_equal(intersect, True)
+
+        # Spherical constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
+                                                     [-1, -3], [1, 3], 2,
+                                                     entire_line=False)
+        assert_array_almost_equal([ta, tb], [0, 0.25])
+        assert_equal(intersect, True)
+
+        # Infeasible problems
+        ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
+                                                     [-1, -3], [1, 3], 2,
+                                                     entire_line=False)
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
+                                                     [2, 4], [2, 4], 2,
+                                                     entire_line=False)
+        assert_equal(intersect, False)
+
+    def test_2d_box_constraints_entire_line(self):
+        # Both constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
+                                                     [-1, -2], [1, 2], 2,
+                                                     entire_line=True)
+        assert_array_almost_equal([ta, tb], [0, 0.5])
+        assert_equal(intersect, True)
+
+        # None of the constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
+                                                     [-1, -3], [1, 3], 10,
+                                                     entire_line=True)
+        assert_array_almost_equal([ta, tb], [0, 2])
+        assert_equal(intersect, True)
+
+        # Box constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
+                                                     [-1, -3], [1, 3], 10,
+                                                     entire_line=True)
+        assert_array_almost_equal([ta, tb], [0, 0.5])
+        assert_equal(intersect, True)
+
+        # Spherical constraints are active
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
+                                                     [-1, -3], [1, 3], 2,
+                                                     entire_line=True)
+        assert_array_almost_equal([ta, tb], [0, 0.25])
+        assert_equal(intersect, True)
+
+        # Infeasible problems
+        ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
+                                                     [-1, -3], [1, 3], 2,
+                                                     entire_line=True)
+        assert_equal(intersect, False)
+        ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
+                                                     [2, 4], [2, 4], 2,
+                                                     entire_line=True)
+        assert_equal(intersect, False)
+
+
+class TestModifiedDogleg(TestCase):
+
+    def test_cauchypoint_equalsto_newtonpoint(self):
+        A = np.array([[1, 8]])
+        b = np.array([-16])
+        _, _, Y = projections(A)
+        newton_point = np.array([0.24615385, 1.96923077])
+
+        # Newton point inside boundaries
+        x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf])
+        assert_array_almost_equal(x, newton_point)
+
+        # Spherical constraint active
+        x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf])
+        assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point))
+
+        # Box constraints active
+        x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf])
+        assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1)
+
+    def test_3d_example(self):
+        A = np.array([[1, 8, 1],
+                      [4, 2, 2]])
+        b = np.array([-16, 2])
+        Z, LS, Y = projections(A)
+
+        newton_point = np.array([-1.37090909, 2.23272727, -0.49090909])
+        cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585])
+        origin = np.zeros_like(newton_point)
+
+        # newton_point inside boundaries
+        x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf],
+                            [np.inf, np.inf, np.inf])
+        assert_array_almost_equal(x, newton_point)
+
+        # line between cauchy_point and newton_point contains best point
+        # (spherical constraint is active).
+        x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
+                            [np.inf, np.inf, np.inf])
+        z = cauchy_point
+        d = newton_point-cauchy_point
+        t = ((x-z)/(d))
+        assert_array_almost_equal(t, np.full(3, 0.40807330))
+        assert_array_almost_equal(np.linalg.norm(x), 2)
+
+        # line between cauchy_point and newton_point contains best point
+        # (box constraint is active).
+        x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf],
+                            [np.inf, np.inf, np.inf])
+        z = cauchy_point
+        d = newton_point-cauchy_point
+        t = ((x-z)/(d))
+        assert_array_almost_equal(t, np.full(3, 0.7498195))
+        assert_array_almost_equal(x[0], -1)
+
+        # line between origin and cauchy_point contains best point
+        # (spherical constraint is active).
+        x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf],
+                            [np.inf, np.inf, np.inf])
+        z = origin
+        d = cauchy_point
+        t = ((x-z)/(d))
+        assert_array_almost_equal(t, np.full(3, 0.573936265))
+        assert_array_almost_equal(np.linalg.norm(x), 1)
+
+        # line between origin and newton_point contains best point
+        # (box constraint is active).
+        x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
+                            [np.inf, 1, np.inf])
+        z = origin
+        d = newton_point
+        t = ((x-z)/(d))
+        assert_array_almost_equal(t, np.full(3, 0.4478827364))
+        assert_array_almost_equal(x[1], 1)
+
+
+class TestProjectCG(TestCase):
+
+    # From Example 16.2 Nocedal/Wright "Numerical
+    # Optimization" p.452.
+    def test_nocedal_example(self):
+        H = csc_matrix([[6, 2, 1],
+                        [2, 5, 2],
+                        [1, 2, 4]])
+        A = csc_matrix([[1, 0, 1],
+                        [0, 1, 1]])
+        c = np.array([-8, -3, -3])
+        b = -np.array([3, 0])
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b)
+        assert_equal(info["stop_cond"], 4)
+        assert_equal(info["hits_boundary"], False)
+        assert_array_almost_equal(x, [2, -1, 1])
+
+    def test_compare_with_direct_fact(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b, tol=0)
+        x_kkt, _ = eqp_kktfact(H, c, A, b)
+        assert_equal(info["stop_cond"], 1)
+        assert_equal(info["hits_boundary"], False)
+        assert_array_almost_equal(x, x_kkt)
+
+    def test_trust_region_infeasible(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        trust_radius = 1
+        Z, _, Y = projections(A)
+        with pytest.raises(ValueError):
+            projected_cg(H, c, Z, Y, b, trust_radius=trust_radius)
+
+    def test_trust_region_barely_feasible(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        trust_radius = 2.32379000772445021283
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               trust_radius=trust_radius)
+        assert_equal(info["stop_cond"], 2)
+        assert_equal(info["hits_boundary"], True)
+        assert_array_almost_equal(np.linalg.norm(x), trust_radius)
+        assert_array_almost_equal(x, -Y.dot(b))
+
+    def test_hits_boundary(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        trust_radius = 3
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               trust_radius=trust_radius)
+        assert_equal(info["stop_cond"], 2)
+        assert_equal(info["hits_boundary"], True)
+        assert_array_almost_equal(np.linalg.norm(x), trust_radius)
+
+    def test_negative_curvature_unconstrained(self):
+        H = csc_matrix([[1, 2, 1, 3],
+                        [2, 0, 2, 4],
+                        [1, 2, 0, 2],
+                        [3, 4, 2, 0]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 0, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        Z, _, Y = projections(A)
+        with pytest.raises(ValueError):
+            projected_cg(H, c, Z, Y, b, tol=0)
+
+    def test_negative_curvature(self):
+        H = csc_matrix([[1, 2, 1, 3],
+                        [2, 0, 2, 4],
+                        [1, 2, 0, 2],
+                        [3, 4, 2, 0]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 0, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        Z, _, Y = projections(A)
+        trust_radius = 1000
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               trust_radius=trust_radius)
+        assert_equal(info["stop_cond"], 3)
+        assert_equal(info["hits_boundary"], True)
+        assert_array_almost_equal(np.linalg.norm(x), trust_radius)
+
+    # The box constraints are inactive at the solution but
+    # are active during the iterations.
+    def test_inactive_box_constraints(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               lb=[0.5, -np.inf,
+                                   -np.inf, -np.inf],
+                               return_all=True)
+        x_kkt, _ = eqp_kktfact(H, c, A, b)
+        assert_equal(info["stop_cond"], 1)
+        assert_equal(info["hits_boundary"], False)
+        assert_array_almost_equal(x, x_kkt)
+
+    # The box constraints active and the termination is
+    # by maximum iterations (infeasible iteraction).
+    def test_active_box_constraints_maximum_iterations_reached(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               lb=[0.8, -np.inf,
+                                   -np.inf, -np.inf],
+                               return_all=True)
+        assert_equal(info["stop_cond"], 1)
+        assert_equal(info["hits_boundary"], True)
+        assert_array_almost_equal(A.dot(x), -b)
+        assert_array_almost_equal(x[0], 0.8)
+
+    # The box constraints are active and the termination is
+    # because it hits boundary (without infeasible iteraction).
+    def test_active_box_constraints_hits_boundaries(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        trust_radius = 3
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               ub=[np.inf, np.inf, 1.6, np.inf],
+                               trust_radius=trust_radius,
+                               return_all=True)
+        assert_equal(info["stop_cond"], 2)
+        assert_equal(info["hits_boundary"], True)
+        assert_array_almost_equal(x[2], 1.6)
+
+    # The box constraints are active and the termination is
+    # because it hits boundary (infeasible iteraction).
+    def test_active_box_constraints_hits_boundaries_infeasible_iter(self):
+        H = csc_matrix([[6, 2, 1, 3],
+                        [2, 5, 2, 4],
+                        [1, 2, 4, 5],
+                        [3, 4, 5, 7]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 1, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        trust_radius = 4
+        Z, _, Y = projections(A)
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               ub=[np.inf, 0.1, np.inf, np.inf],
+                               trust_radius=trust_radius,
+                               return_all=True)
+        assert_equal(info["stop_cond"], 2)
+        assert_equal(info["hits_boundary"], True)
+        assert_array_almost_equal(x[1], 0.1)
+
+    # The box constraints are active and the termination is
+    # because it hits boundary (no infeasible iteraction).
+    def test_active_box_constraints_negative_curvature(self):
+        H = csc_matrix([[1, 2, 1, 3],
+                        [2, 0, 2, 4],
+                        [1, 2, 0, 2],
+                        [3, 4, 2, 0]])
+        A = csc_matrix([[1, 0, 1, 0],
+                        [0, 1, 0, 1]])
+        c = np.array([-2, -3, -3, 1])
+        b = -np.array([3, 0])
+        Z, _, Y = projections(A)
+        trust_radius = 1000
+        x, info = projected_cg(H, c, Z, Y, b,
+                               tol=0,
+                               ub=[np.inf, np.inf, 100, np.inf],
+                               trust_radius=trust_radius)
+        assert_equal(info["stop_cond"], 3)
+        assert_equal(info["hits_boundary"], True)
+        assert_array_almost_equal(x[2], 100)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_report.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_report.py
new file mode 100644
index 00000000..eb0c3f4b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tests/test_report.py
@@ -0,0 +1,32 @@
+import numpy as np
+from scipy.optimize import minimize, Bounds
+
+def test_gh10880():
+    # checks that verbose reporting works with trust-constr for
+    # bound-contrained problems
+    bnds = Bounds(1, 2)
+    opts = {'maxiter': 1000, 'verbose': 2}
+    minimize(lambda x: x**2, x0=2., method='trust-constr',
+             bounds=bnds, options=opts)
+
+    opts = {'maxiter': 1000, 'verbose': 3}
+    minimize(lambda x: x**2, x0=2., method='trust-constr',
+             bounds=bnds, options=opts)
+
+def test_gh12922():
+    # checks that verbose reporting works with trust-constr for
+    # general constraints
+    def objective(x):
+        return np.array([(np.sum((x+1)**4))])
+
+    cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2}
+    n = 25
+    x0 = np.linspace(-5, 5, n)
+
+    opts = {'maxiter': 1000, 'verbose': 2}
+    result = minimize(objective, x0=x0, method='trust-constr',
+                      constraints=cons, options=opts)
+
+    opts = {'maxiter': 1000, 'verbose': 3}
+    result = minimize(objective, x0=x0, method='trust-constr',
+                      constraints=cons, options=opts)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tr_interior_point.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tr_interior_point.py
new file mode 100644
index 00000000..35b8b179
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_constr/tr_interior_point.py
@@ -0,0 +1,346 @@
+"""Trust-region interior point method.
+
+References
+----------
+.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
+       "An interior point algorithm for large-scale nonlinear
+       programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
+.. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal.
+       "On the local behavior of an interior point method for
+       nonlinear programming." Numerical analysis 1997 (1997): 37-56.
+.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
+       Second Edition (2006).
+"""
+
+import scipy.sparse as sps
+import numpy as np
+from .equality_constrained_sqp import equality_constrained_sqp
+from scipy.sparse.linalg import LinearOperator
+
+__all__ = ['tr_interior_point']
+
+
+class BarrierSubproblem:
+    """
+    Barrier optimization problem:
+        minimize fun(x) - barrier_parameter*sum(log(s))
+        subject to: constr_eq(x)     = 0
+                  constr_ineq(x) + s = 0
+    """
+
+    def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
+                 constr, jac, barrier_parameter, tolerance,
+                 enforce_feasibility, global_stop_criteria,
+                 xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0,
+                 jac_eq0):
+        # Store parameters
+        self.n_vars = n_vars
+        self.x0 = x0
+        self.s0 = s0
+        self.fun = fun
+        self.grad = grad
+        self.lagr_hess = lagr_hess
+        self.constr = constr
+        self.jac = jac
+        self.barrier_parameter = barrier_parameter
+        self.tolerance = tolerance
+        self.n_eq = n_eq
+        self.n_ineq = n_ineq
+        self.enforce_feasibility = enforce_feasibility
+        self.global_stop_criteria = global_stop_criteria
+        self.xtol = xtol
+        self.fun0 = self._compute_function(fun0, constr_ineq0, s0)
+        self.grad0 = self._compute_gradient(grad0)
+        self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0)
+        self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0)
+        self.terminate = False
+
+    def update(self, barrier_parameter, tolerance):
+        self.barrier_parameter = barrier_parameter
+        self.tolerance = tolerance
+
+    def get_slack(self, z):
+        return z[self.n_vars:self.n_vars+self.n_ineq]
+
+    def get_variables(self, z):
+        return z[:self.n_vars]
+
+    def function_and_constraints(self, z):
+        """Returns barrier function and constraints at given point.
+
+        For z = [x, s], returns barrier function:
+            function(z) = fun(x) - barrier_parameter*sum(log(s))
+        and barrier constraints:
+            constraints(z) = [   constr_eq(x)     ]
+                             [ constr_ineq(x) + s ]
+
+        """
+        # Get variables and slack variables
+        x = self.get_variables(z)
+        s = self.get_slack(z)
+        # Compute function and constraints
+        f = self.fun(x)
+        c_eq, c_ineq = self.constr(x)
+        # Return objective function and constraints
+        return (self._compute_function(f, c_ineq, s),
+                self._compute_constr(c_ineq, c_eq, s))
+
+    def _compute_function(self, f, c_ineq, s):
+        # Use technique from Nocedal and Wright book, ref [3]_, p.576,
+        # to guarantee constraints from `enforce_feasibility`
+        # stay feasible along iterations.
+        s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility]
+        log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s]
+        # Compute barrier objective function
+        return f - self.barrier_parameter*np.sum(log_s)
+
+    def _compute_constr(self, c_ineq, c_eq, s):
+        # Compute barrier constraint
+        return np.hstack((c_eq,
+                          c_ineq + s))
+
+    def scaling(self, z):
+        """Returns scaling vector.
+        Given by:
+            scaling = [ones(n_vars), s]
+        """
+        s = self.get_slack(z)
+        diag_elements = np.hstack((np.ones(self.n_vars), s))
+
+        # Diagonal matrix
+        def matvec(vec):
+            return diag_elements*vec
+        return LinearOperator((self.n_vars+self.n_ineq,
+                               self.n_vars+self.n_ineq),
+                              matvec)
+
+    def gradient_and_jacobian(self, z):
+        """Returns scaled gradient.
+
+        Return scaled gradient:
+            gradient = [             grad(x)             ]
+                       [ -barrier_parameter*ones(n_ineq) ]
+        and scaled Jacobian matrix:
+            jacobian = [  jac_eq(x)  0  ]
+                       [ jac_ineq(x) S  ]
+        Both of them scaled by the previously defined scaling factor.
+        """
+        # Get variables and slack variables
+        x = self.get_variables(z)
+        s = self.get_slack(z)
+        # Compute first derivatives
+        g = self.grad(x)
+        J_eq, J_ineq = self.jac(x)
+        # Return gradient and Jacobian
+        return (self._compute_gradient(g),
+                self._compute_jacobian(J_eq, J_ineq, s))
+
+    def _compute_gradient(self, g):
+        return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq)))
+
+    def _compute_jacobian(self, J_eq, J_ineq, s):
+        if self.n_ineq == 0:
+            return J_eq
+        else:
+            if sps.issparse(J_eq) or sps.issparse(J_ineq):
+                # It is expected that J_eq and J_ineq
+                # are already `csr_matrix` because of
+                # the way ``BoxConstraint``, ``NonlinearConstraint``
+                # and ``LinearConstraint`` are defined.
+                J_eq = sps.csr_matrix(J_eq)
+                J_ineq = sps.csr_matrix(J_ineq)
+                return self._assemble_sparse_jacobian(J_eq, J_ineq, s)
+            else:
+                S = np.diag(s)
+                zeros = np.zeros((self.n_eq, self.n_ineq))
+                # Convert to matrix
+                if sps.issparse(J_ineq):
+                    J_ineq = J_ineq.toarray()
+                if sps.issparse(J_eq):
+                    J_eq = J_eq.toarray()
+                # Concatenate matrices
+                return np.block([[J_eq, zeros],
+                                 [J_ineq, S]])
+
+    def _assemble_sparse_jacobian(self, J_eq, J_ineq, s):
+        """Assemble sparse Jacobian given its components.
+
+        Given ``J_eq``, ``J_ineq`` and ``s`` returns:
+            jacobian = [ J_eq,     0     ]
+                       [ J_ineq, diag(s) ]
+
+        It is equivalent to:
+            sps.bmat([[ J_eq,   None    ],
+                      [ J_ineq, diag(s) ]], "csr")
+        but significantly more efficient for this
+        given structure.
+        """
+        n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq
+        J_aux = sps.vstack([J_eq, J_ineq], "csr")
+        indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data
+        new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int),
+                                         np.arange(n_ineq+1, dtype=int)))
+        size = indices.size+n_ineq
+        new_indices = np.empty(size)
+        new_data = np.empty(size)
+        mask = np.full(size, False, bool)
+        mask[new_indptr[-n_ineq:]-1] = True
+        new_indices[mask] = n_vars+np.arange(n_ineq)
+        new_indices[~mask] = indices
+        new_data[mask] = s
+        new_data[~mask] = data
+        J = sps.csr_matrix((new_data, new_indices, new_indptr),
+                           (n_eq + n_ineq, n_vars + n_ineq))
+        return J
+
+    def lagrangian_hessian_x(self, z, v):
+        """Returns Lagrangian Hessian (in relation to `x`) -> Hx"""
+        x = self.get_variables(z)
+        # Get lagrange multipliers relatated to nonlinear equality constraints
+        v_eq = v[:self.n_eq]
+        # Get lagrange multipliers relatated to nonlinear ineq. constraints
+        v_ineq = v[self.n_eq:self.n_eq+self.n_ineq]
+        lagr_hess = self.lagr_hess
+        return lagr_hess(x, v_eq, v_ineq)
+
+    def lagrangian_hessian_s(self, z, v):
+        """Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S"""
+        s = self.get_slack(z)
+        # Using the primal formulation:
+        #     S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s).
+        # Reference [1]_ p. 882, formula (3.1)
+        primal = self.barrier_parameter
+        # Using the primal-dual formulation
+        #     S Hs S = diag(s)*diag(v/s)*diag(s)
+        # Reference [1]_ p. 883, formula (3.11)
+        primal_dual = v[-self.n_ineq:]*s
+        # Uses the primal-dual formulation for
+        # positives values of v_ineq, and primal
+        # formulation for the remaining ones.
+        return np.where(v[-self.n_ineq:] > 0, primal_dual, primal)
+
+    def lagrangian_hessian(self, z, v):
+        """Returns scaled Lagrangian Hessian"""
+        # Compute Hessian in relation to x and s
+        Hx = self.lagrangian_hessian_x(z, v)
+        if self.n_ineq > 0:
+            S_Hs_S = self.lagrangian_hessian_s(z, v)
+
+        # The scaled Lagragian Hessian is:
+        #     [ Hx    0    ]
+        #     [ 0   S Hs S ]
+        def matvec(vec):
+            vec_x = self.get_variables(vec)
+            vec_s = self.get_slack(vec)
+            if self.n_ineq > 0:
+                return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s))
+            else:
+                return Hx.dot(vec_x)
+        return LinearOperator((self.n_vars+self.n_ineq,
+                               self.n_vars+self.n_ineq),
+                              matvec)
+
+    def stop_criteria(self, state, z, last_iteration_failed,
+                      optimality, constr_violation,
+                      trust_radius, penalty, cg_info):
+        """Stop criteria to the barrier problem.
+        The criteria here proposed is similar to formula (2.3)
+        from [1]_, p.879.
+        """
+        x = self.get_variables(z)
+        if self.global_stop_criteria(state, x,
+                                     last_iteration_failed,
+                                     trust_radius, penalty,
+                                     cg_info,
+                                     self.barrier_parameter,
+                                     self.tolerance):
+            self.terminate = True
+            return True
+        else:
+            g_cond = (optimality < self.tolerance and
+                      constr_violation < self.tolerance)
+            x_cond = trust_radius < self.xtol
+            return g_cond or x_cond
+
+
+def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
+                      constr, jac, x0, fun0, grad0,
+                      constr_ineq0, jac_ineq0, constr_eq0,
+                      jac_eq0, stop_criteria,
+                      enforce_feasibility, xtol, state,
+                      initial_barrier_parameter,
+                      initial_tolerance,
+                      initial_penalty,
+                      initial_trust_radius,
+                      factorization_method):
+    """Trust-region interior points method.
+
+    Solve problem:
+        minimize fun(x)
+        subject to: constr_ineq(x) <= 0
+                    constr_eq(x) = 0
+    using trust-region interior point method described in [1]_.
+    """
+    # BOUNDARY_PARAMETER controls the decrease on the slack
+    # variables. Represents ``tau`` from [1]_ p.885, formula (3.18).
+    BOUNDARY_PARAMETER = 0.995
+    # BARRIER_DECAY_RATIO controls the decay of the barrier parameter
+    # and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879.
+    BARRIER_DECAY_RATIO = 0.2
+    # TRUST_ENLARGEMENT controls the enlargement on trust radius
+    # after each iteration
+    TRUST_ENLARGEMENT = 5
+
+    # Default enforce_feasibility
+    if enforce_feasibility is None:
+        enforce_feasibility = np.zeros(n_ineq, bool)
+    # Initial Values
+    barrier_parameter = initial_barrier_parameter
+    tolerance = initial_tolerance
+    trust_radius = initial_trust_radius
+    # Define initial value for the slack variables
+    s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq))
+    # Define barrier subproblem
+    subprob = BarrierSubproblem(
+        x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac,
+        barrier_parameter, tolerance, enforce_feasibility,
+        stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0,
+        constr_eq0, jac_eq0)
+    # Define initial parameter for the first iteration.
+    z = np.hstack((x0, s0))
+    fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0
+    grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0
+    # Define trust region bounds
+    trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf),
+                          np.full(subprob.n_ineq, -BOUNDARY_PARAMETER)))
+    trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf)
+
+    # Solves a sequence of barrier problems
+    while True:
+        # Solve SQP subproblem
+        z, state = equality_constrained_sqp(
+            subprob.function_and_constraints,
+            subprob.gradient_and_jacobian,
+            subprob.lagrangian_hessian,
+            z, fun0_subprob, grad0_subprob,
+            constr0_subprob, jac0_subprob, subprob.stop_criteria,
+            state, initial_penalty, trust_radius,
+            factorization_method, trust_lb, trust_ub, subprob.scaling)
+        if subprob.terminate:
+            break
+        # Update parameters
+        trust_radius = max(initial_trust_radius,
+                           TRUST_ENLARGEMENT*state.tr_radius)
+        # TODO: Use more advanced strategies from [2]_
+        # to update this parameters.
+        barrier_parameter *= BARRIER_DECAY_RATIO
+        tolerance *= BARRIER_DECAY_RATIO
+        # Update Barrier Problem
+        subprob.update(barrier_parameter, tolerance)
+        # Compute initial values for next iteration
+        fun0_subprob, constr0_subprob = subprob.function_and_constraints(z)
+        grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z)
+
+    # Get x and s
+    x = subprob.get_variables(z)
+    return x, state
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_dogleg.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_dogleg.py
new file mode 100644
index 00000000..a54abd60
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_dogleg.py
@@ -0,0 +1,122 @@
+"""Dog-leg trust-region optimization."""
+import numpy as np
+import scipy.linalg
+from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
+
+__all__ = []
+
+
+def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None,
+                     **trust_region_options):
+    """
+    Minimization of scalar function of one or more variables using
+    the dog-leg trust-region algorithm.
+
+    Options
+    -------
+    initial_trust_radius : float
+        Initial trust-region radius.
+    max_trust_radius : float
+        Maximum value of the trust-region radius. No steps that are longer
+        than this value will be proposed.
+    eta : float
+        Trust region related acceptance stringency for proposed steps.
+    gtol : float
+        Gradient norm must be less than `gtol` before successful
+        termination.
+
+    """
+    if jac is None:
+        raise ValueError('Jacobian is required for dogleg minimization')
+    if not callable(hess):
+        raise ValueError('Hessian is required for dogleg minimization')
+    return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
+                                  subproblem=DoglegSubproblem,
+                                  **trust_region_options)
+
+
+class DoglegSubproblem(BaseQuadraticSubproblem):
+    """Quadratic subproblem solved by the dogleg method"""
+
+    def cauchy_point(self):
+        """
+        The Cauchy point is minimal along the direction of steepest descent.
+        """
+        if self._cauchy_point is None:
+            g = self.jac
+            Bg = self.hessp(g)
+            self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g
+        return self._cauchy_point
+
+    def newton_point(self):
+        """
+        The Newton point is a global minimum of the approximate function.
+        """
+        if self._newton_point is None:
+            g = self.jac
+            B = self.hess
+            cho_info = scipy.linalg.cho_factor(B)
+            self._newton_point = -scipy.linalg.cho_solve(cho_info, g)
+        return self._newton_point
+
+    def solve(self, trust_radius):
+        """
+        Minimize a function using the dog-leg trust-region algorithm.
+
+        This algorithm requires function values and first and second derivatives.
+        It also performs a costly Hessian decomposition for most iterations,
+        and the Hessian is required to be positive definite.
+
+        Parameters
+        ----------
+        trust_radius : float
+            We are allowed to wander only this far away from the origin.
+
+        Returns
+        -------
+        p : ndarray
+            The proposed step.
+        hits_boundary : bool
+            True if the proposed step is on the boundary of the trust region.
+
+        Notes
+        -----
+        The Hessian is required to be positive definite.
+
+        References
+        ----------
+        .. [1] Jorge Nocedal and Stephen Wright,
+               Numerical Optimization, second edition,
+               Springer-Verlag, 2006, page 73.
+        """
+
+        # Compute the Newton point.
+        # This is the optimum for the quadratic model function.
+        # If it is inside the trust radius then return this point.
+        p_best = self.newton_point()
+        if scipy.linalg.norm(p_best) < trust_radius:
+            hits_boundary = False
+            return p_best, hits_boundary
+
+        # Compute the Cauchy point.
+        # This is the predicted optimum along the direction of steepest descent.
+        p_u = self.cauchy_point()
+
+        # If the Cauchy point is outside the trust region,
+        # then return the point where the path intersects the boundary.
+        p_u_norm = scipy.linalg.norm(p_u)
+        if p_u_norm >= trust_radius:
+            p_boundary = p_u * (trust_radius / p_u_norm)
+            hits_boundary = True
+            return p_boundary, hits_boundary
+
+        # Compute the intersection of the trust region boundary
+        # and the line segment connecting the Cauchy and Newton points.
+        # This requires solving a quadratic equation.
+        # ||p_u + t*(p_best - p_u)||**2 == trust_radius**2
+        # Solve this for positive time t using the quadratic formula.
+        _, tb = self.get_boundaries_intersections(p_u, p_best - p_u,
+                                                  trust_radius)
+        p_boundary = p_u + tb * (p_best - p_u)
+        hits_boundary = True
+        return p_boundary, hits_boundary
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_exact.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_exact.py
new file mode 100644
index 00000000..b3bd7ff5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_exact.py
@@ -0,0 +1,430 @@
+"""Nearly exact trust-region optimization subproblem."""
+import numpy as np
+from scipy.linalg import (norm, get_lapack_funcs, solve_triangular,
+                          cho_solve)
+from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
+
+__all__ = ['_minimize_trustregion_exact',
+           'estimate_smallest_singular_value',
+           'singular_leading_submatrix',
+           'IterativeSubproblem']
+
+
+def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None,
+                                **trust_region_options):
+    """
+    Minimization of scalar function of one or more variables using
+    a nearly exact trust-region algorithm.
+
+    Options
+    -------
+    initial_tr_radius : float
+        Initial trust-region radius.
+    max_tr_radius : float
+        Maximum value of the trust-region radius. No steps that are longer
+        than this value will be proposed.
+    eta : float
+        Trust region related acceptance stringency for proposed steps.
+    gtol : float
+        Gradient norm must be less than ``gtol`` before successful
+        termination.
+    """
+
+    if jac is None:
+        raise ValueError('Jacobian is required for trust region '
+                         'exact minimization.')
+    if not callable(hess):
+        raise ValueError('Hessian matrix is required for trust region '
+                         'exact minimization.')
+    return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
+                                  subproblem=IterativeSubproblem,
+                                  **trust_region_options)
+
+
+def estimate_smallest_singular_value(U):
+    """Given upper triangular matrix ``U`` estimate the smallest singular
+    value and the correspondent right singular vector in O(n**2) operations.
+
+    Parameters
+    ----------
+    U : ndarray
+        Square upper triangular matrix.
+
+    Returns
+    -------
+    s_min : float
+        Estimated smallest singular value of the provided matrix.
+    z_min : ndarray
+        Estimatied right singular vector.
+
+    Notes
+    -----
+    The procedure is based on [1]_ and is done in two steps. First, it finds
+    a vector ``e`` with components selected from {+1, -1} such that the
+    solution ``w`` from the system ``U.T w = e`` is as large as possible.
+    Next it estimate ``U v = w``. The smallest singular value is close
+    to ``norm(w)/norm(v)`` and the right singular vector is close
+    to ``v/norm(v)``.
+
+    The estimation will be better more ill-conditioned is the matrix.
+
+    References
+    ----------
+    .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H.
+           An estimate for the condition number of a matrix.  1979.
+           SIAM Journal on Numerical Analysis, 16(2), 368-375.
+    """
+
+    U = np.atleast_2d(U)
+    m, n = U.shape
+
+    if m != n:
+        raise ValueError("A square triangular matrix should be provided.")
+
+    # A vector `e` with components selected from {+1, -1}
+    # is selected so that the solution `w` to the system
+    # `U.T w = e` is as large as possible. Implementation
+    # based on algorithm 3.5.1, p. 142, from reference [2]
+    # adapted for lower triangular matrix.
+
+    p = np.zeros(n)
+    w = np.empty(n)
+
+    # Implemented according to:  Golub, G. H., Van Loan, C. F. (2013).
+    # "Matrix computations". Forth Edition. JHU press. pp. 140-142.
+    for k in range(n):
+        wp = (1-p[k]) / U.T[k, k]
+        wm = (-1-p[k]) / U.T[k, k]
+        pp = p[k+1:] + U.T[k+1:, k]*wp
+        pm = p[k+1:] + U.T[k+1:, k]*wm
+
+        if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):
+            w[k] = wp
+            p[k+1:] = pp
+        else:
+            w[k] = wm
+            p[k+1:] = pm
+
+    # The system `U v = w` is solved using backward substitution.
+    v = solve_triangular(U, w)
+
+    v_norm = norm(v)
+    w_norm = norm(w)
+
+    # Smallest singular value
+    s_min = w_norm / v_norm
+
+    # Associated vector
+    z_min = v / v_norm
+
+    return s_min, z_min
+
+
+def gershgorin_bounds(H):
+    """
+    Given a square matrix ``H`` compute upper
+    and lower bounds for its eigenvalues (Gregoshgorin Bounds).
+    Defined ref. [1].
+
+    References
+    ----------
+    .. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
+           Trust region methods. 2000. Siam. pp. 19.
+    """
+
+    H_diag = np.diag(H)
+    H_diag_abs = np.abs(H_diag)
+    H_row_sums = np.sum(np.abs(H), axis=1)
+    lb = np.min(H_diag + H_diag_abs - H_row_sums)
+    ub = np.max(H_diag - H_diag_abs + H_row_sums)
+
+    return lb, ub
+
+
+def singular_leading_submatrix(A, U, k):
+    """
+    Compute term that makes the leading ``k`` by ``k``
+    submatrix from ``A`` singular.
+
+    Parameters
+    ----------
+    A : ndarray
+        Symmetric matrix that is not positive definite.
+    U : ndarray
+        Upper triangular matrix resulting of an incomplete
+        Cholesky decomposition of matrix ``A``.
+    k : int
+        Positive integer such that the leading k by k submatrix from
+        `A` is the first non-positive definite leading submatrix.
+
+    Returns
+    -------
+    delta : float
+        Amount that should be added to the element (k, k) of the
+        leading k by k submatrix of ``A`` to make it singular.
+    v : ndarray
+        A vector such that ``v.T B v = 0``. Where B is the matrix A after
+        ``delta`` is added to its element (k, k).
+    """
+
+    # Compute delta
+    delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1]
+
+    n = len(A)
+
+    # Inicialize v
+    v = np.zeros(n)
+    v[k-1] = 1
+
+    # Compute the remaining values of v by solving a triangular system.
+    if k != 1:
+        v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1])
+
+    return delta, v
+
+
+class IterativeSubproblem(BaseQuadraticSubproblem):
+    """Quadratic subproblem solved by nearly exact iterative method.
+
+    Notes
+    -----
+    This subproblem solver was based on [1]_, [2]_ and [3]_,
+    which implement similar algorithms. The algorithm is basically
+    that of [1]_ but ideas from [2]_ and [3]_ were also used.
+
+    References
+    ----------
+    .. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
+           Siam, pp. 169-200, 2000.
+    .. [2] J. Nocedal and  S. Wright, "Numerical optimization",
+           Springer Science & Business Media. pp. 83-91, 2006.
+    .. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
+           SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
+           pp. 553-572, 1983.
+    """
+
+    # UPDATE_COEFF appears in reference [1]_
+    # in formula 7.3.14 (p. 190) named as "theta".
+    # As recommended there it value is fixed in 0.01.
+    UPDATE_COEFF = 0.01
+
+    EPS = np.finfo(float).eps
+
+    def __init__(self, x, fun, jac, hess, hessp=None,
+                 k_easy=0.1, k_hard=0.2):
+
+        super().__init__(x, fun, jac, hess)
+
+        # When the trust-region shrinks in two consecutive
+        # calculations (``tr_radius < previous_tr_radius``)
+        # the lower bound ``lambda_lb`` may be reused,
+        # facilitating  the convergence. To indicate no
+        # previous value is known at first ``previous_tr_radius``
+        # is set to -1  and ``lambda_lb`` to None.
+        self.previous_tr_radius = -1
+        self.lambda_lb = None
+
+        self.niter = 0
+
+        # ``k_easy`` and ``k_hard`` are parameters used
+        # to determine the stop criteria to the iterative
+        # subproblem solver. Take a look at pp. 194-197
+        # from reference _[1] for a more detailed description.
+        self.k_easy = k_easy
+        self.k_hard = k_hard
+
+        # Get Lapack function for cholesky decomposition.
+        # The implemented SciPy wrapper does not return
+        # the incomplete factorization needed by the method.
+        self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
+
+        # Get info about Hessian
+        self.dimension = len(self.hess)
+        self.hess_gershgorin_lb,\
+            self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
+        self.hess_inf = norm(self.hess, np.Inf)
+        self.hess_fro = norm(self.hess, 'fro')
+
+        # A constant such that for vectors smaler than that
+        # backward substituition is not reliable. It was stabilished
+        # based on Golub, G. H., Van Loan, C. F. (2013).
+        # "Matrix computations". Forth Edition. JHU press., p.165.
+        self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
+
+    def _initial_values(self, tr_radius):
+        """Given a trust radius, return a good initial guess for
+        the damping factor, the lower bound and the upper bound.
+        The values were chosen accordingly to the guidelines on
+        section 7.3.8 (p. 192) from [1]_.
+        """
+
+        # Upper bound for the damping factor
+        lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
+                                                        self.hess_fro,
+                                                        self.hess_inf))
+
+        # Lower bound for the damping factor
+        lambda_lb = max(0, -min(self.hess.diagonal()),
+                        self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
+                                                     self.hess_fro,
+                                                     self.hess_inf))
+
+        # Improve bounds with previous info
+        if tr_radius < self.previous_tr_radius:
+            lambda_lb = max(self.lambda_lb, lambda_lb)
+
+        # Initial guess for the damping factor
+        if lambda_lb == 0:
+            lambda_initial = 0
+        else:
+            lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
+                                 lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
+
+        return lambda_initial, lambda_lb, lambda_ub
+
+    def solve(self, tr_radius):
+        """Solve quadratic subproblem"""
+
+        lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
+        n = self.dimension
+        hits_boundary = True
+        already_factorized = False
+        self.niter = 0
+
+        while True:
+
+            # Compute Cholesky factorization
+            if already_factorized:
+                already_factorized = False
+            else:
+                H = self.hess+lambda_current*np.eye(n)
+                U, info = self.cholesky(H, lower=False,
+                                        overwrite_a=False,
+                                        clean=True)
+
+            self.niter += 1
+
+            # Check if factorization succeeded
+            if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
+                # Successful factorization
+
+                # Solve `U.T U p = s`
+                p = cho_solve((U, False), -self.jac)
+
+                p_norm = norm(p)
+
+                # Check for interior convergence
+                if p_norm <= tr_radius and lambda_current == 0:
+                    hits_boundary = False
+                    break
+
+                # Solve `U.T w = p`
+                w = solve_triangular(U, p, trans='T')
+
+                w_norm = norm(w)
+
+                # Compute Newton step accordingly to
+                # formula (4.44) p.87 from ref [2]_.
+                delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
+                lambda_new = lambda_current + delta_lambda
+
+                if p_norm < tr_radius:  # Inside boundary
+                    s_min, z_min = estimate_smallest_singular_value(U)
+
+                    ta, tb = self.get_boundaries_intersections(p, z_min,
+                                                               tr_radius)
+
+                    # Choose `step_len` with the smallest magnitude.
+                    # The reason for this choice is explained at
+                    # ref [3]_, p. 6 (Immediately before the formula
+                    # for `tau`).
+                    step_len = min([ta, tb], key=abs)
+
+                    # Compute the quadratic term  (p.T*H*p)
+                    quadratic_term = np.dot(p, np.dot(H, p))
+
+                    # Check stop criteria
+                    relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2)
+                    if relative_error <= self.k_hard:
+                        p += step_len * z_min
+                        break
+
+                    # Update uncertanty bounds
+                    lambda_ub = lambda_current
+                    lambda_lb = max(lambda_lb, lambda_current - s_min**2)
+
+                    # Compute Cholesky factorization
+                    H = self.hess + lambda_new*np.eye(n)
+                    c, info = self.cholesky(H, lower=False,
+                                            overwrite_a=False,
+                                            clean=True)
+
+                    # Check if the factorization have succeeded
+                    #
+                    if info == 0:  # Successful factorization
+                        # Update damping factor
+                        lambda_current = lambda_new
+                        already_factorized = True
+                    else:  # Unsuccessful factorization
+                        # Update uncertanty bounds
+                        lambda_lb = max(lambda_lb, lambda_new)
+
+                        # Update damping factor
+                        lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
+                                             lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
+
+                else:  # Outside boundary
+                    # Check stop criteria
+                    relative_error = abs(p_norm - tr_radius) / tr_radius
+                    if relative_error <= self.k_easy:
+                        break
+
+                    # Update uncertanty bounds
+                    lambda_lb = lambda_current
+
+                    # Update damping factor
+                    lambda_current = lambda_new
+
+            elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
+                # jac_mag very close to zero
+
+                # Check for interior convergence
+                if lambda_current == 0:
+                    p = np.zeros(n)
+                    hits_boundary = False
+                    break
+
+                s_min, z_min = estimate_smallest_singular_value(U)
+                step_len = tr_radius
+
+                # Check stop criteria
+                if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2:
+                    p = step_len * z_min
+                    break
+
+                # Update uncertanty bounds
+                lambda_ub = lambda_current
+                lambda_lb = max(lambda_lb, lambda_current - s_min**2)
+
+                # Update damping factor
+                lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
+                                     lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
+
+            else:  # Unsuccessful factorization
+
+                # Compute auxiliary terms
+                delta, v = singular_leading_submatrix(H, U, info)
+                v_norm = norm(v)
+
+                # Update uncertanty interval
+                lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
+
+                # Update damping factor
+                lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
+                                     lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
+
+        self.lambda_lb = lambda_lb
+        self.lambda_current = lambda_current
+        self.previous_tr_radius = tr_radius
+
+        return p, hits_boundary
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_krylov.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_krylov.py
new file mode 100644
index 00000000..54e861ae
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_krylov.py
@@ -0,0 +1,65 @@
+from ._trustregion import (_minimize_trust_region)
+from ._trlib import (get_trlib_quadratic_subproblem)
+
+__all__ = ['_minimize_trust_krylov']
+
+def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None,
+                           inexact=True, **trust_region_options):
+    """
+    Minimization of a scalar function of one or more variables using
+    a nearly exact trust-region algorithm that only requires matrix
+    vector products with the hessian matrix.
+
+    .. versionadded:: 1.0.0
+
+    Options
+    -------
+    inexact : bool, optional
+        Accuracy to solve subproblems. If True requires less nonlinear
+        iterations, but more vector products.
+    """
+
+    if jac is None:
+        raise ValueError('Jacobian is required for trust region ',
+                         'exact minimization.')
+    if hess is None and hessp is None:
+        raise ValueError('Either the Hessian or the Hessian-vector product '
+                         'is required for Krylov trust-region minimization')
+
+    # tol_rel specifies the termination tolerance relative to the initial
+    # gradient norm in the Krylov subspace iteration.
+
+    # - tol_rel_i specifies the tolerance for interior convergence.
+    # - tol_rel_b specifies the tolerance for boundary convergence.
+    #   in nonlinear programming applications it is not necessary to solve
+    #   the boundary case as exact as the interior case.
+
+    # - setting tol_rel_i=-2 leads to a forcing sequence in the Krylov
+    #   subspace iteration leading to quadratic convergence if eventually
+    #   the trust region stays inactive.
+    # - setting tol_rel_b=-3 leads to a forcing sequence in the Krylov
+    #   subspace iteration leading to superlinear convergence as long
+    #   as the iterates hit the trust region boundary.
+
+    # For details consult the documentation of trlib_krylov_min
+    # in _trlib/trlib_krylov.h
+    #
+    # Optimality of this choice of parameters among a range of possibilities
+    # has been tested on the unconstrained subset of the CUTEst library.
+
+    if inexact:
+        return _minimize_trust_region(fun, x0, args=args, jac=jac,
+                                      hess=hess, hessp=hessp,
+                                      subproblem=get_trlib_quadratic_subproblem(
+                                          tol_rel_i=-2.0, tol_rel_b=-3.0,
+                                          disp=trust_region_options.get('disp', False)
+                                          ),
+                                      **trust_region_options)
+    else:
+        return _minimize_trust_region(fun, x0, args=args, jac=jac,
+                                      hess=hess, hessp=hessp,
+                                      subproblem=get_trlib_quadratic_subproblem(
+                                          tol_rel_i=1e-8, tol_rel_b=1e-6,
+                                          disp=trust_region_options.get('disp', False)
+                                          ),
+                                      **trust_region_options)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_ncg.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_ncg.py
new file mode 100644
index 00000000..fed17ff8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_trustregion_ncg.py
@@ -0,0 +1,126 @@
+"""Newton-CG trust-region optimization."""
+import math
+
+import numpy as np
+import scipy.linalg
+from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
+
+__all__ = []
+
+
+def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
+                        **trust_region_options):
+    """
+    Minimization of scalar function of one or more variables using
+    the Newton conjugate gradient trust-region algorithm.
+
+    Options
+    -------
+    initial_trust_radius : float
+        Initial trust-region radius.
+    max_trust_radius : float
+        Maximum value of the trust-region radius. No steps that are longer
+        than this value will be proposed.
+    eta : float
+        Trust region related acceptance stringency for proposed steps.
+    gtol : float
+        Gradient norm must be less than `gtol` before successful
+        termination.
+
+    """
+    if jac is None:
+        raise ValueError('Jacobian is required for Newton-CG trust-region '
+                         'minimization')
+    if hess is None and hessp is None:
+        raise ValueError('Either the Hessian or the Hessian-vector product '
+                         'is required for Newton-CG trust-region minimization')
+    return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
+                                  hessp=hessp, subproblem=CGSteihaugSubproblem,
+                                  **trust_region_options)
+
+
+class CGSteihaugSubproblem(BaseQuadraticSubproblem):
+    """Quadratic subproblem solved by a conjugate gradient method"""
+    def solve(self, trust_radius):
+        """
+        Solve the subproblem using a conjugate gradient method.
+
+        Parameters
+        ----------
+        trust_radius : float
+            We are allowed to wander only this far away from the origin.
+
+        Returns
+        -------
+        p : ndarray
+            The proposed step.
+        hits_boundary : bool
+            True if the proposed step is on the boundary of the trust region.
+
+        Notes
+        -----
+        This is algorithm (7.2) of Nocedal and Wright 2nd edition.
+        Only the function that computes the Hessian-vector product is required.
+        The Hessian itself is not required, and the Hessian does
+        not need to be positive semidefinite.
+        """
+
+        # get the norm of jacobian and define the origin
+        p_origin = np.zeros_like(self.jac)
+
+        # define a default tolerance
+        tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag
+
+        # Stop the method if the search direction
+        # is a direction of nonpositive curvature.
+        if self.jac_mag < tolerance:
+            hits_boundary = False
+            return p_origin, hits_boundary
+
+        # init the state for the first iteration
+        z = p_origin
+        r = self.jac
+        d = -r
+
+        # Search for the min of the approximation of the objective function.
+        while True:
+
+            # do an iteration
+            Bd = self.hessp(d)
+            dBd = np.dot(d, Bd)
+            if dBd <= 0:
+                # Look at the two boundary points.
+                # Find both values of t to get the boundary points such that
+                # ||z + t d|| == trust_radius
+                # and then choose the one with the predicted min value.
+                ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
+                pa = z + ta * d
+                pb = z + tb * d
+                if self(pa) < self(pb):
+                    p_boundary = pa
+                else:
+                    p_boundary = pb
+                hits_boundary = True
+                return p_boundary, hits_boundary
+            r_squared = np.dot(r, r)
+            alpha = r_squared / dBd
+            z_next = z + alpha * d
+            if scipy.linalg.norm(z_next) >= trust_radius:
+                # Find t >= 0 to get the boundary point such that
+                # ||z + t d|| == trust_radius
+                ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
+                p_boundary = z + tb * d
+                hits_boundary = True
+                return p_boundary, hits_boundary
+            r_next = r + alpha * Bd
+            r_next_squared = np.dot(r_next, r_next)
+            if math.sqrt(r_next_squared) < tolerance:
+                hits_boundary = False
+                return z_next, hits_boundary
+            beta_next = r_next_squared / r_squared
+            d_next = -r_next + beta_next * d
+
+            # update the state for the next iteration
+            z = z_next
+            r = r_next
+            d = d_next
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_tstutils.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_tstutils.py
new file mode 100644
index 00000000..4ab69df5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_tstutils.py
@@ -0,0 +1,676 @@
+r"""
+Parameters used in test and benchmark methods.
+
+Collections of test cases suitable for testing 1-D root-finders
+  'original': The original benchmarking functions.
+     Real-valued functions of real-valued inputs on an interval
+     with a zero.
+     f1, .., f3 are continuous and infinitely differentiable
+     f4 has a left- and right- discontinuity at the root
+     f5 has a root at 1 replacing a 1st order pole
+     f6 is randomly positive on one side of the root,
+     randomly negative on the other.
+     f4 - f6 are not continuous at the root.
+
+  'aps': The test problems in the 1995 paper
+     TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions"
+     by Alefeld, Potra and Shi. Real-valued functions of
+     real-valued inputs on an interval with a zero.
+     Suitable for methods which start with an enclosing interval, and
+     derivatives up to 2nd order.
+
+  'complex': Some complex-valued functions of complex-valued inputs.
+     No enclosing bracket is provided.
+     Suitable for methods which use one or more starting values, and
+     derivatives up to 2nd order.
+
+  The test cases are provided as a list of dictionaries. The dictionary
+  keys will be a subset of:
+  ["f", "fprime", "fprime2", "args", "bracket", "smoothness",
+  "a", "b", "x0", "x1", "root", "ID"]
+"""
+
+# Sources:
+#  [1] Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
+#      "Algorithm 748: Enclosing Zeros of Continuous Functions",
+#      ACM Trans. Math. Softw. Volume 221(1995)
+#       doi = {10.1145/210089.210111},
+
+from random import random
+
+import numpy as np
+
+from scipy.optimize import _zeros_py as cc
+
+# "description" refers to the original functions
+description = """
+f2 is a symmetric parabola, x**2 - 1
+f3 is a quartic polynomial with large hump in interval
+f4 is step function with a discontinuity at 1
+f5 is a hyperbola with vertical asymptote at 1
+f6 has random values positive to left of 1, negative to right
+
+Of course, these are not real problems. They just test how the
+'good' solvers behave in bad circumstances where bisection is
+really the best. A good solver should not be much worse than
+bisection in such circumstance, while being faster for smooth
+monotone sorts of functions.
+"""
+
+
+def f1(x):
+    r"""f1 is a quadratic with roots at 0 and 1"""
+    return x * (x - 1.)
+
+
+def f1_fp(x):
+    return 2 * x - 1
+
+
+def f1_fpp(x):
+    return 2
+
+
+def f2(x):
+    r"""f2 is a symmetric parabola, x**2 - 1"""
+    return x**2 - 1
+
+
+def f2_fp(x):
+    return 2 * x
+
+
+def f2_fpp(x):
+    return 2
+
+
+def f3(x):
+    r"""A quartic with roots at 0, 1, 2 and 3"""
+    return x * (x - 1.) * (x - 2.) * (x - 3.)  # x**4 - 6x**3 + 11x**2 - 6x
+
+
+def f3_fp(x):
+    return 4 * x**3 - 18 * x**2 + 22 * x - 6
+
+
+def f3_fpp(x):
+    return 12 * x**2 - 36 * x + 22
+
+
+def f4(x):
+    r"""Piecewise linear, left- and right- discontinuous at x=1, the root."""
+    if x > 1:
+        return 1.0 + .1 * x
+    if x < 1:
+        return -1.0 + .1 * x
+    return 0
+
+
+def f5(x):
+    r"""Hyperbola with a pole at x=1, but pole replaced with 0. Not continuous at root."""
+    if x != 1:
+        return 1.0 / (1. - x)
+    return 0
+
+
+# f6(x) returns random value. Without memoization, calling twice with the
+# same x returns different values, hence a "random value", not a
+# "function with random values"
+_f6_cache = {}
+def f6(x):
+    v = _f6_cache.get(x, None)
+    if v is None:
+        if x > 1:
+            v = random()
+        elif x < 1:
+            v = -random()
+        else:
+            v = 0
+        _f6_cache[x] = v
+    return v
+
+
+# Each Original test case has
+# - a function and its two derivatives,
+# - additional arguments,
+# - a bracket enclosing a root,
+# - the order of differentiability (smoothness) on this interval
+# - a starting value for methods which don't require a bracket
+# - the root (inside the bracket)
+# - an Identifier of the test case
+
+_ORIGINAL_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"]
+_ORIGINAL_TESTS = [
+    [f1, f1_fp, f1_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.01.00"],
+    [f2, f2_fp, f2_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.02.00"],
+    [f3, f3_fp, f3_fpp, (), [0.5, np.sqrt(3)], np.inf, 0.6, 1.0, "original.03.00"],
+    [f4, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.04.00"],
+    [f5, None, None, (), [0.5, np.sqrt(3)], -1, 0.6, 1.0, "original.05.00"],
+    [f6, None, None, (), [0.5, np.sqrt(3)], -np.inf, 0.6, 1.0, "original.05.00"]
+]
+
+_ORIGINAL_TESTS_DICTS = [dict(zip(_ORIGINAL_TESTS_KEYS, testcase)) for testcase in _ORIGINAL_TESTS]
+
+#   ##################
+#   "APS" test cases
+#   Functions and test cases that appear in [1]
+
+
+def aps01_f(x):
+    r"""Straightforward sum of trigonometric function and polynomial"""
+    return np.sin(x) - x / 2
+
+
+def aps01_fp(x):
+    return np.cos(x) - 1.0 / 2
+
+
+def aps01_fpp(x):
+    return -np.sin(x)
+
+
+def aps02_f(x):
+    r"""poles at x=n**2, 1st and 2nd derivatives at root are also close to 0"""
+    ii = np.arange(1, 21)
+    return -2 * np.sum((2 * ii - 5)**2 / (x - ii**2)**3)
+
+
+def aps02_fp(x):
+    ii = np.arange(1, 21)
+    return 6 * np.sum((2 * ii - 5)**2 / (x - ii**2)**4)
+
+
+def aps02_fpp(x):
+    ii = np.arange(1, 21)
+    return 24 * np.sum((2 * ii - 5)**2 / (x - ii**2)**5)
+
+
+def aps03_f(x, a, b):
+    r"""Rapidly changing at the root"""
+    return a * x * np.exp(b * x)
+
+
+def aps03_fp(x, a, b):
+    return a * (b * x + 1) * np.exp(b * x)
+
+
+def aps03_fpp(x, a, b):
+    return a * (b * (b * x + 1) + b) * np.exp(b * x)
+
+
+def aps04_f(x, n, a):
+    r"""Medium-degree polynomial"""
+    return x**n - a
+
+
+def aps04_fp(x, n, a):
+    return n * x**(n - 1)
+
+
+def aps04_fpp(x, n, a):
+    return n * (n - 1) * x**(n - 2)
+
+
+def aps05_f(x):
+    r"""Simple Trigonometric function"""
+    return np.sin(x) - 1.0 / 2
+
+
+def aps05_fp(x):
+    return np.cos(x)
+
+
+def aps05_fpp(x):
+    return -np.sin(x)
+
+
+def aps06_f(x, n):
+    r"""Exponential rapidly changing from -1 to 1 at x=0"""
+    return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1
+
+
+def aps06_fp(x, n):
+    return 2 * np.exp(-n) + 2 * n * np.exp(-n * x)
+
+
+def aps06_fpp(x, n):
+    return -2 * n * n * np.exp(-n * x)
+
+
+def aps07_f(x, n):
+    r"""Upside down parabola with parametrizable height"""
+    return (1 + (1 - n)**2) * x - (1 - n * x)**2
+
+
+def aps07_fp(x, n):
+    return (1 + (1 - n)**2) + 2 * n * (1 - n * x)
+
+
+def aps07_fpp(x, n):
+    return -2 * n * n
+
+
+def aps08_f(x, n):
+    r"""Degree n polynomial"""
+    return x * x - (1 - x)**n
+
+
+def aps08_fp(x, n):
+    return 2 * x + n * (1 - x)**(n - 1)
+
+
+def aps08_fpp(x, n):
+    return 2 - n * (n - 1) * (1 - x)**(n - 2)
+
+
+def aps09_f(x, n):
+    r"""Upside down quartic with parametrizable height"""
+    return (1 + (1 - n)**4) * x - (1 - n * x)**4
+
+
+def aps09_fp(x, n):
+    return (1 + (1 - n)**4) + 4 * n * (1 - n * x)**3
+
+
+def aps09_fpp(x, n):
+    return -12 * n * (1 - n * x)**2
+
+
+def aps10_f(x, n):
+    r"""Exponential plus a polynomial"""
+    return np.exp(-n * x) * (x - 1) + x**n
+
+
+def aps10_fp(x, n):
+    return np.exp(-n * x) * (-n * (x - 1) + 1) + n * x**(n - 1)
+
+
+def aps10_fpp(x, n):
+    return np.exp(-n * x) * (-n * (-n * (x - 1) + 1) + -n * x) + n * (n - 1) * x**(n - 2)
+
+
+def aps11_f(x, n):
+    r"""Rational function with a zero at x=1/n and a pole at x=0"""
+    return (n * x - 1) / ((n - 1) * x)
+
+
+def aps11_fp(x, n):
+    return 1 / (n - 1) / x**2
+
+
+def aps11_fpp(x, n):
+    return -2 / (n - 1) / x**3
+
+
+def aps12_f(x, n):
+    r"""nth root of x, with a zero at x=n"""
+    return np.power(x, 1.0 / n) - np.power(n, 1.0 / n)
+
+
+def aps12_fp(x, n):
+    return np.power(x, (1.0 - n) / n) / n
+
+
+def aps12_fpp(x, n):
+    return np.power(x, (1.0 - 2 * n) / n) * (1.0 / n) * (1.0 - n) / n
+
+
+_MAX_EXPABLE = np.log(np.finfo(float).max)
+
+
+def aps13_f(x):
+    r"""Function with *all* derivatives 0 at the root"""
+    if x == 0:
+        return 0
+    # x2 = 1.0/x**2
+    # if x2 > 708:
+    #     return 0
+    y = 1 / x**2
+    if y > _MAX_EXPABLE:
+        return 0
+    return x / np.exp(y)
+
+
+def aps13_fp(x):
+    if x == 0:
+        return 0
+    y = 1 / x**2
+    if y > _MAX_EXPABLE:
+        return 0
+    return (1 + 2 / x**2) / np.exp(y)
+
+
+def aps13_fpp(x):
+    if x == 0:
+        return 0
+    y = 1 / x**2
+    if y > _MAX_EXPABLE:
+        return 0
+    return 2 * (2 - x**2) / x**5 / np.exp(y)
+
+
+def aps14_f(x, n):
+    r"""0 for negative x-values, trigonometric+linear for x positive"""
+    if x <= 0:
+        return -n / 20.0
+    return n / 20.0 * (x / 1.5 + np.sin(x) - 1)
+
+
+def aps14_fp(x, n):
+    if x <= 0:
+        return 0
+    return n / 20.0 * (1.0 / 1.5 + np.cos(x))
+
+
+def aps14_fpp(x, n):
+    if x <= 0:
+        return 0
+    return -n / 20.0 * (np.sin(x))
+
+
+def aps15_f(x, n):
+    r"""piecewise linear, constant outside of [0, 0.002/(1+n)]"""
+    if x < 0:
+        return -0.859
+    if x > 2 * 1e-3 / (1 + n):
+        return np.e - 1.859
+    return np.exp((n + 1) * x / 2 * 1000) - 1.859
+
+
+def aps15_fp(x, n):
+    if not 0 <= x <= 2 * 1e-3 / (1 + n):
+        return np.e - 1.859
+    return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000
+
+
+def aps15_fpp(x, n):
+    if not 0 <= x <= 2 * 1e-3 / (1 + n):
+        return np.e - 1.859
+    return np.exp((n + 1) * x / 2 * 1000) * (n + 1) / 2 * 1000 * (n + 1) / 2 * 1000
+
+
+# Each APS test case has
+# - a function and its two derivatives,
+# - additional arguments,
+# - a bracket enclosing a root,
+# - the order of differentiability of the function on this interval
+# - a starting value for methods which don't require a bracket
+# - the root (inside the bracket)
+# - an Identifier of the test case
+#
+# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided
+# in [1] for each test case. Newton and Halley methods need a single
+# starting point x0, which was chosen to be near the middle of the interval,
+# unless that would have made the problem too easy.
+
+_APS_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "bracket", "smoothness", "x0", "root", "ID"]
+_APS_TESTS = [
+    [aps01_f, aps01_fp, aps01_fpp, (), [np.pi / 2, np.pi], np.inf, 3, 1.89549426703398094e+00, "aps.01.00"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [1 + 1e-9, 4 - 1e-9], np.inf, 2, 3.02291534727305677e+00, "aps.02.00"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [4 + 1e-9, 9 - 1e-9], np.inf, 5, 6.68375356080807848e+00, "aps.02.01"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [9 + 1e-9, 16 - 1e-9], np.inf, 10, 1.12387016550022114e+01, "aps.02.02"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [16 + 1e-9, 25 - 1e-9], np.inf, 17, 1.96760000806234103e+01, "aps.02.03"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [25 + 1e-9, 36 - 1e-9], np.inf, 26, 2.98282273265047557e+01, "aps.02.04"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [36 + 1e-9, 49 - 1e-9], np.inf, 37, 4.19061161952894139e+01, "aps.02.05"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [49 + 1e-9, 64 - 1e-9], np.inf, 50, 5.59535958001430913e+01, "aps.02.06"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [64 + 1e-9, 81 - 1e-9], np.inf, 65, 7.19856655865877997e+01, "aps.02.07"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [81 + 1e-9, 100 - 1e-9], np.inf, 82, 9.00088685391666701e+01, "aps.02.08"],
+    [aps02_f, aps02_fp, aps02_fpp, (), [100 + 1e-9, 121 - 1e-9], np.inf, 101, 1.10026532748330197e+02, "aps.02.09"],
+    [aps03_f, aps03_fp, aps03_fpp, (-40, -1), [-9, 31], np.inf, -2, 0, "aps.03.00"],
+    [aps03_f, aps03_fp, aps03_fpp, (-100, -2), [-9, 31], np.inf, -2, 0, "aps.03.01"],
+    [aps03_f, aps03_fp, aps03_fpp, (-200, -3), [-9, 31], np.inf, -2, 0, "aps.03.02"],
+    [aps04_f, aps04_fp, aps04_fpp, (4, 0.2), [0, 5], np.inf, 2.5, 6.68740304976422006e-01, "aps.04.00"],
+    [aps04_f, aps04_fp, aps04_fpp, (6, 0.2), [0, 5], np.inf, 2.5, 7.64724491331730039e-01, "aps.04.01"],
+    [aps04_f, aps04_fp, aps04_fpp, (8, 0.2), [0, 5], np.inf, 2.5, 8.17765433957942545e-01, "aps.04.02"],
+    [aps04_f, aps04_fp, aps04_fpp, (10, 0.2), [0, 5], np.inf, 2.5, 8.51339922520784609e-01, "aps.04.03"],
+    [aps04_f, aps04_fp, aps04_fpp, (12, 0.2), [0, 5], np.inf, 2.5, 8.74485272221167897e-01, "aps.04.04"],
+    [aps04_f, aps04_fp, aps04_fpp, (4, 1), [0, 5], np.inf, 2.5, 1, "aps.04.05"],
+    [aps04_f, aps04_fp, aps04_fpp, (6, 1), [0, 5], np.inf, 2.5, 1, "aps.04.06"],
+    [aps04_f, aps04_fp, aps04_fpp, (8, 1), [0, 5], np.inf, 2.5, 1, "aps.04.07"],
+    [aps04_f, aps04_fp, aps04_fpp, (10, 1), [0, 5], np.inf, 2.5, 1, "aps.04.08"],
+    [aps04_f, aps04_fp, aps04_fpp, (12, 1), [0, 5], np.inf, 2.5, 1, "aps.04.09"],
+    [aps04_f, aps04_fp, aps04_fpp, (8, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.10"],
+    [aps04_f, aps04_fp, aps04_fpp, (10, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.11"],
+    [aps04_f, aps04_fp, aps04_fpp, (12, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.12"],
+    [aps04_f, aps04_fp, aps04_fpp, (14, 1), [-0.95, 4.05], np.inf, 1.5, 1, "aps.04.13"],
+    [aps05_f, aps05_fp, aps05_fpp, (), [0, 1.5], np.inf, 1.3, np.pi / 6, "aps.05.00"],
+    [aps06_f, aps06_fp, aps06_fpp, (1,), [0, 1], np.inf, 0.5, 4.22477709641236709e-01, "aps.06.00"],
+    [aps06_f, aps06_fp, aps06_fpp, (2,), [0, 1], np.inf, 0.5, 3.06699410483203705e-01, "aps.06.01"],
+    [aps06_f, aps06_fp, aps06_fpp, (3,), [0, 1], np.inf, 0.5, 2.23705457654662959e-01, "aps.06.02"],
+    [aps06_f, aps06_fp, aps06_fpp, (4,), [0, 1], np.inf, 0.5, 1.71719147519508369e-01, "aps.06.03"],
+    [aps06_f, aps06_fp, aps06_fpp, (5,), [0, 1], np.inf, 0.4, 1.38257155056824066e-01, "aps.06.04"],
+    [aps06_f, aps06_fp, aps06_fpp, (20,), [0, 1], np.inf, 0.1, 3.46573590208538521e-02, "aps.06.05"],
+    [aps06_f, aps06_fp, aps06_fpp, (40,), [0, 1], np.inf, 5e-02, 1.73286795139986315e-02, "aps.06.06"],
+    [aps06_f, aps06_fp, aps06_fpp, (60,), [0, 1], np.inf, 1.0 / 30, 1.15524530093324210e-02, "aps.06.07"],
+    [aps06_f, aps06_fp, aps06_fpp, (80,), [0, 1], np.inf, 2.5e-02, 8.66433975699931573e-03, "aps.06.08"],
+    [aps06_f, aps06_fp, aps06_fpp, (100,), [0, 1], np.inf, 2e-02, 6.93147180559945415e-03, "aps.06.09"],
+    [aps07_f, aps07_fp, aps07_fpp, (5,), [0, 1], np.inf, 0.4, 3.84025518406218985e-02, "aps.07.00"],
+    [aps07_f, aps07_fp, aps07_fpp, (10,), [0, 1], np.inf, 0.4, 9.90000999800049949e-03, "aps.07.01"],
+    [aps07_f, aps07_fp, aps07_fpp, (20,), [0, 1], np.inf, 0.4, 2.49375003906201174e-03, "aps.07.02"],
+    [aps08_f, aps08_fp, aps08_fpp, (2,), [0, 1], np.inf, 0.9, 0.5, "aps.08.00"],
+    [aps08_f, aps08_fp, aps08_fpp, (5,), [0, 1], np.inf, 0.9, 3.45954815848242059e-01, "aps.08.01"],
+    [aps08_f, aps08_fp, aps08_fpp, (10,), [0, 1], np.inf, 0.9, 2.45122333753307220e-01, "aps.08.02"],
+    [aps08_f, aps08_fp, aps08_fpp, (15,), [0, 1], np.inf, 0.9, 1.95547623536565629e-01, "aps.08.03"],
+    [aps08_f, aps08_fp, aps08_fpp, (20,), [0, 1], np.inf, 0.9, 1.64920957276440960e-01, "aps.08.04"],
+    [aps09_f, aps09_fp, aps09_fpp, (1,), [0, 1], np.inf, 0.5, 2.75508040999484394e-01, "aps.09.00"],
+    [aps09_f, aps09_fp, aps09_fpp, (2,), [0, 1], np.inf, 0.5, 1.37754020499742197e-01, "aps.09.01"],
+    [aps09_f, aps09_fp, aps09_fpp, (4,), [0, 1], np.inf, 0.5, 1.03052837781564422e-02, "aps.09.02"],
+    [aps09_f, aps09_fp, aps09_fpp, (5,), [0, 1], np.inf, 0.5, 3.61710817890406339e-03, "aps.09.03"],
+    [aps09_f, aps09_fp, aps09_fpp, (8,), [0, 1], np.inf, 0.5, 4.10872918496395375e-04, "aps.09.04"],
+    [aps09_f, aps09_fp, aps09_fpp, (15,), [0, 1], np.inf, 0.5, 2.59895758929076292e-05, "aps.09.05"],
+    [aps09_f, aps09_fp, aps09_fpp, (20,), [0, 1], np.inf, 0.5, 7.66859512218533719e-06, "aps.09.06"],
+    [aps10_f, aps10_fp, aps10_fpp, (1,), [0, 1], np.inf, 0.9, 4.01058137541547011e-01, "aps.10.00"],
+    [aps10_f, aps10_fp, aps10_fpp, (5,), [0, 1], np.inf, 0.9, 5.16153518757933583e-01, "aps.10.01"],
+    [aps10_f, aps10_fp, aps10_fpp, (10,), [0, 1], np.inf, 0.9, 5.39522226908415781e-01, "aps.10.02"],
+    [aps10_f, aps10_fp, aps10_fpp, (15,), [0, 1], np.inf, 0.9, 5.48182294340655241e-01, "aps.10.03"],
+    [aps10_f, aps10_fp, aps10_fpp, (20,), [0, 1], np.inf, 0.9, 5.52704666678487833e-01, "aps.10.04"],
+    [aps11_f, aps11_fp, aps11_fpp, (2,), [0.01, 1], np.inf, 1e-02, 1.0 / 2, "aps.11.00"],
+    [aps11_f, aps11_fp, aps11_fpp, (5,), [0.01, 1], np.inf, 1e-02, 1.0 / 5, "aps.11.01"],
+    [aps11_f, aps11_fp, aps11_fpp, (15,), [0.01, 1], np.inf, 1e-02, 1.0 / 15, "aps.11.02"],
+    [aps11_f, aps11_fp, aps11_fpp, (20,), [0.01, 1], np.inf, 1e-02, 1.0 / 20, "aps.11.03"],
+    [aps12_f, aps12_fp, aps12_fpp, (2,), [1, 100], np.inf, 1.1, 2, "aps.12.00"],
+    [aps12_f, aps12_fp, aps12_fpp, (3,), [1, 100], np.inf, 1.1, 3, "aps.12.01"],
+    [aps12_f, aps12_fp, aps12_fpp, (4,), [1, 100], np.inf, 1.1, 4, "aps.12.02"],
+    [aps12_f, aps12_fp, aps12_fpp, (5,), [1, 100], np.inf, 1.1, 5, "aps.12.03"],
+    [aps12_f, aps12_fp, aps12_fpp, (6,), [1, 100], np.inf, 1.1, 6, "aps.12.04"],
+    [aps12_f, aps12_fp, aps12_fpp, (7,), [1, 100], np.inf, 1.1, 7, "aps.12.05"],
+    [aps12_f, aps12_fp, aps12_fpp, (9,), [1, 100], np.inf, 1.1, 9, "aps.12.06"],
+    [aps12_f, aps12_fp, aps12_fpp, (11,), [1, 100], np.inf, 1.1, 11, "aps.12.07"],
+    [aps12_f, aps12_fp, aps12_fpp, (13,), [1, 100], np.inf, 1.1, 13, "aps.12.08"],
+    [aps12_f, aps12_fp, aps12_fpp, (15,), [1, 100], np.inf, 1.1, 15, "aps.12.09"],
+    [aps12_f, aps12_fp, aps12_fpp, (17,), [1, 100], np.inf, 1.1, 17, "aps.12.10"],
+    [aps12_f, aps12_fp, aps12_fpp, (19,), [1, 100], np.inf, 1.1, 19, "aps.12.11"],
+    [aps12_f, aps12_fp, aps12_fpp, (21,), [1, 100], np.inf, 1.1, 21, "aps.12.12"],
+    [aps12_f, aps12_fp, aps12_fpp, (23,), [1, 100], np.inf, 1.1, 23, "aps.12.13"],
+    [aps12_f, aps12_fp, aps12_fpp, (25,), [1, 100], np.inf, 1.1, 25, "aps.12.14"],
+    [aps12_f, aps12_fp, aps12_fpp, (27,), [1, 100], np.inf, 1.1, 27, "aps.12.15"],
+    [aps12_f, aps12_fp, aps12_fpp, (29,), [1, 100], np.inf, 1.1, 29, "aps.12.16"],
+    [aps12_f, aps12_fp, aps12_fpp, (31,), [1, 100], np.inf, 1.1, 31, "aps.12.17"],
+    [aps12_f, aps12_fp, aps12_fpp, (33,), [1, 100], np.inf, 1.1, 33, "aps.12.18"],
+    [aps13_f, aps13_fp, aps13_fpp, (), [-1, 4], np.inf, 1.5, 1.54720911915117165e-02, "aps.13.00"],
+    [aps14_f, aps14_fp, aps14_fpp, (1,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.00"],
+    [aps14_f, aps14_fp, aps14_fpp, (2,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.01"],
+    [aps14_f, aps14_fp, aps14_fpp, (3,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.02"],
+    [aps14_f, aps14_fp, aps14_fpp, (4,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.03"],
+    [aps14_f, aps14_fp, aps14_fpp, (5,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.04"],
+    [aps14_f, aps14_fp, aps14_fpp, (6,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.05"],
+    [aps14_f, aps14_fp, aps14_fpp, (7,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.06"],
+    [aps14_f, aps14_fp, aps14_fpp, (8,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.07"],
+    [aps14_f, aps14_fp, aps14_fpp, (9,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.08"],
+    [aps14_f, aps14_fp, aps14_fpp, (10,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.09"],
+    [aps14_f, aps14_fp, aps14_fpp, (11,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.10"],
+    [aps14_f, aps14_fp, aps14_fpp, (12,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.11"],
+    [aps14_f, aps14_fp, aps14_fpp, (13,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.12"],
+    [aps14_f, aps14_fp, aps14_fpp, (14,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.13"],
+    [aps14_f, aps14_fp, aps14_fpp, (15,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.14"],
+    [aps14_f, aps14_fp, aps14_fpp, (16,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.15"],
+    [aps14_f, aps14_fp, aps14_fpp, (17,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.16"],
+    [aps14_f, aps14_fp, aps14_fpp, (18,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.17"],
+    [aps14_f, aps14_fp, aps14_fpp, (19,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.18"],
+    [aps14_f, aps14_fp, aps14_fpp, (20,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.19"],
+    [aps14_f, aps14_fp, aps14_fpp, (21,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.20"],
+    [aps14_f, aps14_fp, aps14_fpp, (22,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.21"],
+    [aps14_f, aps14_fp, aps14_fpp, (23,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.22"],
+    [aps14_f, aps14_fp, aps14_fpp, (24,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.23"],
+    [aps14_f, aps14_fp, aps14_fpp, (25,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.24"],
+    [aps14_f, aps14_fp, aps14_fpp, (26,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.25"],
+    [aps14_f, aps14_fp, aps14_fpp, (27,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.26"],
+    [aps14_f, aps14_fp, aps14_fpp, (28,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.27"],
+    [aps14_f, aps14_fp, aps14_fpp, (29,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.28"],
+    [aps14_f, aps14_fp, aps14_fpp, (30,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.29"],
+    [aps14_f, aps14_fp, aps14_fpp, (31,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.30"],
+    [aps14_f, aps14_fp, aps14_fpp, (32,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.31"],
+    [aps14_f, aps14_fp, aps14_fpp, (33,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.32"],
+    [aps14_f, aps14_fp, aps14_fpp, (34,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.33"],
+    [aps14_f, aps14_fp, aps14_fpp, (35,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.34"],
+    [aps14_f, aps14_fp, aps14_fpp, (36,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.35"],
+    [aps14_f, aps14_fp, aps14_fpp, (37,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.36"],
+    [aps14_f, aps14_fp, aps14_fpp, (38,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.37"],
+    [aps14_f, aps14_fp, aps14_fpp, (39,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.38"],
+    [aps14_f, aps14_fp, aps14_fpp, (40,), [-1000, np.pi / 2], 0, 1, 6.23806518961612433e-01, "aps.14.39"],
+    [aps15_f, aps15_fp, aps15_fpp, (20,), [-1000, 1e-4], 0, -2, 5.90513055942197166e-05, "aps.15.00"],
+    [aps15_f, aps15_fp, aps15_fpp, (21,), [-1000, 1e-4], 0, -2, 5.63671553399369967e-05, "aps.15.01"],
+    [aps15_f, aps15_fp, aps15_fpp, (22,), [-1000, 1e-4], 0, -2, 5.39164094555919196e-05, "aps.15.02"],
+    [aps15_f, aps15_fp, aps15_fpp, (23,), [-1000, 1e-4], 0, -2, 5.16698923949422470e-05, "aps.15.03"],
+    [aps15_f, aps15_fp, aps15_fpp, (24,), [-1000, 1e-4], 0, -2, 4.96030966991445609e-05, "aps.15.04"],
+    [aps15_f, aps15_fp, aps15_fpp, (25,), [-1000, 1e-4], 0, -2, 4.76952852876389951e-05, "aps.15.05"],
+    [aps15_f, aps15_fp, aps15_fpp, (26,), [-1000, 1e-4], 0, -2, 4.59287932399486662e-05, "aps.15.06"],
+    [aps15_f, aps15_fp, aps15_fpp, (27,), [-1000, 1e-4], 0, -2, 4.42884791956647841e-05, "aps.15.07"],
+    [aps15_f, aps15_fp, aps15_fpp, (28,), [-1000, 1e-4], 0, -2, 4.27612902578832391e-05, "aps.15.08"],
+    [aps15_f, aps15_fp, aps15_fpp, (29,), [-1000, 1e-4], 0, -2, 4.13359139159538030e-05, "aps.15.09"],
+    [aps15_f, aps15_fp, aps15_fpp, (30,), [-1000, 1e-4], 0, -2, 4.00024973380198076e-05, "aps.15.10"],
+    [aps15_f, aps15_fp, aps15_fpp, (31,), [-1000, 1e-4], 0, -2, 3.87524192962066869e-05, "aps.15.11"],
+    [aps15_f, aps15_fp, aps15_fpp, (32,), [-1000, 1e-4], 0, -2, 3.75781035599579910e-05, "aps.15.12"],
+    [aps15_f, aps15_fp, aps15_fpp, (33,), [-1000, 1e-4], 0, -2, 3.64728652199592355e-05, "aps.15.13"],
+    [aps15_f, aps15_fp, aps15_fpp, (34,), [-1000, 1e-4], 0, -2, 3.54307833565318273e-05, "aps.15.14"],
+    [aps15_f, aps15_fp, aps15_fpp, (35,), [-1000, 1e-4], 0, -2, 3.44465949299614980e-05, "aps.15.15"],
+    [aps15_f, aps15_fp, aps15_fpp, (36,), [-1000, 1e-4], 0, -2, 3.35156058778003705e-05, "aps.15.16"],
+    [aps15_f, aps15_fp, aps15_fpp, (37,), [-1000, 1e-4], 0, -2, 3.26336162494372125e-05, "aps.15.17"],
+    [aps15_f, aps15_fp, aps15_fpp, (38,), [-1000, 1e-4], 0, -2, 3.17968568584260013e-05, "aps.15.18"],
+    [aps15_f, aps15_fp, aps15_fpp, (39,), [-1000, 1e-4], 0, -2, 3.10019354369653455e-05, "aps.15.19"],
+    [aps15_f, aps15_fp, aps15_fpp, (40,), [-1000, 1e-4], 0, -2, 3.02457906702100968e-05, "aps.15.20"],
+    [aps15_f, aps15_fp, aps15_fpp, (100,), [-1000, 1e-4], 0, -2, 1.22779942324615231e-05, "aps.15.21"],
+    [aps15_f, aps15_fp, aps15_fpp, (200,), [-1000, 1e-4], 0, -2, 6.16953939044086617e-06, "aps.15.22"],
+    [aps15_f, aps15_fp, aps15_fpp, (300,), [-1000, 1e-4], 0, -2, 4.11985852982928163e-06, "aps.15.23"],
+    [aps15_f, aps15_fp, aps15_fpp, (400,), [-1000, 1e-4], 0, -2, 3.09246238772721682e-06, "aps.15.24"],
+    [aps15_f, aps15_fp, aps15_fpp, (500,), [-1000, 1e-4], 0, -2, 2.47520442610501789e-06, "aps.15.25"],
+    [aps15_f, aps15_fp, aps15_fpp, (600,), [-1000, 1e-4], 0, -2, 2.06335676785127107e-06, "aps.15.26"],
+    [aps15_f, aps15_fp, aps15_fpp, (700,), [-1000, 1e-4], 0, -2, 1.76901200781542651e-06, "aps.15.27"],
+    [aps15_f, aps15_fp, aps15_fpp, (800,), [-1000, 1e-4], 0, -2, 1.54816156988591016e-06, "aps.15.28"],
+    [aps15_f, aps15_fp, aps15_fpp, (900,), [-1000, 1e-4], 0, -2, 1.37633453660223511e-06, "aps.15.29"],
+    [aps15_f, aps15_fp, aps15_fpp, (1000,), [-1000, 1e-4], 0, -2, 1.23883857889971403e-06, "aps.15.30"]
+]
+
+_APS_TESTS_DICTS = [dict(zip(_APS_TESTS_KEYS, testcase)) for testcase in _APS_TESTS]
+
+
+#   ##################
+#   "complex" test cases
+#   A few simple, complex-valued, functions, defined on the complex plane.
+
+
+def cplx01_f(z, n, a):
+    r"""z**n-a:  Use to find the nth root of a"""
+    return z**n - a
+
+
+def cplx01_fp(z, n, a):
+    return n * z**(n - 1)
+
+
+def cplx01_fpp(z, n, a):
+    return n * (n - 1) * z**(n - 2)
+
+
+def cplx02_f(z, a):
+    r"""e**z - a: Use to find the log of a"""
+    return np.exp(z) - a
+
+
+def cplx02_fp(z, a):
+    return np.exp(z)
+
+
+def cplx02_fpp(z, a):
+    return np.exp(z)
+
+
+# Each "complex" test case has
+# - a function and its two derivatives,
+# - additional arguments,
+# - the order of differentiability of the function on this interval
+# - two starting values x0 and x1
+# - the root
+# - an Identifier of the test case
+#
+# Algorithm 748 is a bracketing algorithm so a bracketing interval was provided
+# in [1] for each test case. Newton and Halley need a single starting point
+# x0, which was chosen to be near the middle of the interval, unless that
+# would make the problem too easy.
+
+
+_COMPLEX_TESTS_KEYS = ["f", "fprime", "fprime2", "args", "smoothness", "x0", "x1", "root", "ID"]
+_COMPLEX_TESTS = [
+    [cplx01_f, cplx01_fp, cplx01_fpp, (2, -1), np.inf, (1 + 1j), (0.5 + 0.5j), 1j, "complex.01.00"],
+    [cplx01_f, cplx01_fp, cplx01_fpp, (3, 1), np.inf, (-1 + 1j), (-0.5 + 2.0j), (-0.5 + np.sqrt(3) / 2 * 1.0j),
+     "complex.01.01"],
+    [cplx01_f, cplx01_fp, cplx01_fpp, (3, -1), np.inf, 1j, (0.5 + 0.5j), (0.5 + np.sqrt(3) / 2 * 1.0j),
+     "complex.01.02"],
+    [cplx01_f, cplx01_fp, cplx01_fpp, (3, 8), np.inf, 5, 4, 2, "complex.01.03"],
+    [cplx02_f, cplx02_fp, cplx02_fpp, (-1,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 1.0j, "complex.02.00"],
+    [cplx02_f, cplx02_fp, cplx02_fpp, (1j,), np.inf, (1 + 2j), (0.5 + 0.5j), np.pi * 0.5j, "complex.02.01"],
+]
+
+_COMPLEX_TESTS_DICTS = [dict(zip(_COMPLEX_TESTS_KEYS, testcase)) for testcase in _COMPLEX_TESTS]
+
+
+def _add_a_b(tests):
+    r"""Add "a" and "b" keys to each test from the "bracket" value"""
+    for d in tests:
+        for k, v in zip(['a', 'b'], d.get('bracket', [])):
+            d[k] = v
+
+
+_add_a_b(_ORIGINAL_TESTS_DICTS)
+_add_a_b(_APS_TESTS_DICTS)
+_add_a_b(_COMPLEX_TESTS_DICTS)
+
+
+def get_tests(collection='original', smoothness=None):
+    r"""Return the requested collection of test cases, as an array of dicts with subset-specific keys
+
+    Allowed values of collection:
+    'original': The original benchmarking functions.
+         Real-valued functions of real-valued inputs on an interval with a zero.
+         f1, .., f3 are continuous and infinitely differentiable
+         f4 has a single discontinuity at the root
+         f5 has a root at 1 replacing a 1st order pole
+         f6 is randomly positive on one side of the root, randomly negative on the other
+    'aps': The test problems in the TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions"
+         paper by Alefeld, Potra and Shi. Real-valued functions of
+         real-valued inputs on an interval with a zero.
+         Suitable for methods which start with an enclosing interval, and
+         derivatives up to 2nd order.
+    'complex': Some complex-valued functions of complex-valued inputs.
+         No enclosing bracket is provided.
+         Suitable for methods which use one or more starting values, and
+         derivatives up to 2nd order.
+
+    The dictionary keys will be a subset of
+    ["f", "fprime", "fprime2", "args", "bracket", "a", b", "smoothness", "x0", "x1", "root", "ID"]
+     """
+    collection = collection or "original"
+    subsets = {"aps": _APS_TESTS_DICTS,
+               "complex": _COMPLEX_TESTS_DICTS,
+               "original": _ORIGINAL_TESTS_DICTS}
+    tests = subsets.get(collection, [])
+    if smoothness is not None:
+        tests = [tc for tc in tests if tc['smoothness'] >= smoothness]
+    return tests
+
+
+# Backwards compatibility
+methods = [cc.bisect, cc.ridder, cc.brenth, cc.brentq]
+mstrings = ['cc.bisect', 'cc.ridder', 'cc.brenth', 'cc.brentq']
+functions = [f2, f3, f4, f5, f6]
+fstrings = ['f2', 'f3', 'f4', 'f5', 'f6']
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/_zeros_py.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/_zeros_py.py
new file mode 100644
index 00000000..b74db99f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/_zeros_py.py
@@ -0,0 +1,1377 @@
+import warnings
+from collections import namedtuple
+import operator
+from . import _zeros
+import numpy as np
+
+
+_iter = 100
+_xtol = 2e-12
+_rtol = 4 * np.finfo(float).eps
+
+__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth', 'toms748',
+           'RootResults']
+
+# Must agree with CONVERGED, SIGNERR, CONVERR, ...  in zeros.h
+_ECONVERGED = 0
+_ESIGNERR = -1
+_ECONVERR = -2
+_EVALUEERR = -3
+_EINPROGRESS = 1
+
+CONVERGED = 'converged'
+SIGNERR = 'sign error'
+CONVERR = 'convergence error'
+VALUEERR = 'value error'
+INPROGRESS = 'No error'
+
+
+flag_map = {_ECONVERGED: CONVERGED, _ESIGNERR: SIGNERR, _ECONVERR: CONVERR,
+            _EVALUEERR: VALUEERR, _EINPROGRESS: INPROGRESS}
+
+
+class RootResults:
+    """Represents the root finding result.
+
+    Attributes
+    ----------
+    root : float
+        Estimated root location.
+    iterations : int
+        Number of iterations needed to find the root.
+    function_calls : int
+        Number of times the function was called.
+    converged : bool
+        True if the routine converged.
+    flag : str
+        Description of the cause of termination.
+
+    """
+
+    def __init__(self, root, iterations, function_calls, flag):
+        self.root = root
+        self.iterations = iterations
+        self.function_calls = function_calls
+        self.converged = flag == _ECONVERGED
+        self.flag = None
+        try:
+            self.flag = flag_map[flag]
+        except KeyError:
+            self.flag = 'unknown error %d' % (flag,)
+
+    def __repr__(self):
+        attrs = ['converged', 'flag', 'function_calls',
+                 'iterations', 'root']
+        m = max(map(len, attrs)) + 1
+        return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a))
+                          for a in attrs])
+
+
+def results_c(full_output, r):
+    if full_output:
+        x, funcalls, iterations, flag = r
+        results = RootResults(root=x,
+                              iterations=iterations,
+                              function_calls=funcalls,
+                              flag=flag)
+        return x, results
+    else:
+        return r
+
+
+def _results_select(full_output, r):
+    """Select from a tuple of (root, funccalls, iterations, flag)"""
+    x, funcalls, iterations, flag = r
+    if full_output:
+        results = RootResults(root=x,
+                              iterations=iterations,
+                              function_calls=funcalls,
+                              flag=flag)
+        return x, results
+    return x
+
+
+def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
+           fprime2=None, x1=None, rtol=0.0,
+           full_output=False, disp=True):
+    """
+    Find a zero of a real or complex function using the Newton-Raphson
+    (or secant or Halley's) method.
+
+    Find a zero of the scalar-valued function `func` given a nearby scalar
+    starting point `x0`.
+    The Newton-Raphson method is used if the derivative `fprime` of `func`
+    is provided, otherwise the secant method is used. If the second order
+    derivative `fprime2` of `func` is also provided, then Halley's method is
+    used.
+
+    If `x0` is a sequence with more than one item, `newton` returns an array:
+    the zeros of the function from each (scalar) starting point in `x0`.
+    In this case, `func` must be vectorized to return a sequence or array of
+    the same shape as its first argument. If `fprime` (`fprime2`) is given,
+    then its return must also have the same shape: each element is the first
+    (second) derivative of `func` with respect to its only variable evaluated
+    at each element of its first argument.
+
+    `newton` is for finding roots of a scalar-valued functions of a single
+    variable. For problems involving several variables, see `root`.
+
+    Parameters
+    ----------
+    func : callable
+        The function whose zero is wanted. It must be a function of a
+        single variable of the form ``f(x,a,b,c...)``, where ``a,b,c...``
+        are extra arguments that can be passed in the `args` parameter.
+    x0 : float, sequence, or ndarray
+        An initial estimate of the zero that should be somewhere near the
+        actual zero. If not scalar, then `func` must be vectorized and return
+        a sequence or array of the same shape as its first argument.
+    fprime : callable, optional
+        The derivative of the function when available and convenient. If it
+        is None (default), then the secant method is used.
+    args : tuple, optional
+        Extra arguments to be used in the function call.
+    tol : float, optional
+        The allowable error of the zero value. If `func` is complex-valued,
+        a larger `tol` is recommended as both the real and imaginary parts
+        of `x` contribute to ``|x - x0|``.
+    maxiter : int, optional
+        Maximum number of iterations.
+    fprime2 : callable, optional
+        The second order derivative of the function when available and
+        convenient. If it is None (default), then the normal Newton-Raphson
+        or the secant method is used. If it is not None, then Halley's method
+        is used.
+    x1 : float, optional
+        Another estimate of the zero that should be somewhere near the
+        actual zero. Used if `fprime` is not provided.
+    rtol : float, optional
+        Tolerance (relative) for termination.
+    full_output : bool, optional
+        If `full_output` is False (default), the root is returned.
+        If True and `x0` is scalar, the return value is ``(x, r)``, where ``x``
+        is the root and ``r`` is a `RootResults` object.
+        If True and `x0` is non-scalar, the return value is ``(x, converged,
+        zero_der)`` (see Returns section for details).
+    disp : bool, optional
+        If True, raise a RuntimeError if the algorithm didn't converge, with
+        the error message containing the number of iterations and current
+        function value. Otherwise, the convergence status is recorded in a
+        `RootResults` return object.
+        Ignored if `x0` is not scalar.
+        *Note: this has little to do with displaying, however,
+        the `disp` keyword cannot be renamed for backwards compatibility.*
+
+    Returns
+    -------
+    root : float, sequence, or ndarray
+        Estimated location where function is zero.
+    r : `RootResults`, optional
+        Present if ``full_output=True`` and `x0` is scalar.
+        Object containing information about the convergence. In particular,
+        ``r.converged`` is True if the routine converged.
+    converged : ndarray of bool, optional
+        Present if ``full_output=True`` and `x0` is non-scalar.
+        For vector functions, indicates which elements converged successfully.
+    zero_der : ndarray of bool, optional
+        Present if ``full_output=True`` and `x0` is non-scalar.
+        For vector functions, indicates which elements had a zero derivative.
+
+    See Also
+    --------
+    root_scalar : interface to root solvers for scalar functions
+    root : interface to root solvers for multi-input, multi-output functions
+
+    Notes
+    -----
+    The convergence rate of the Newton-Raphson method is quadratic,
+    the Halley method is cubic, and the secant method is
+    sub-quadratic. This means that if the function is well-behaved
+    the actual error in the estimated zero after the nth iteration
+    is approximately the square (cube for Halley) of the error
+    after the (n-1)th step. However, the stopping criterion used
+    here is the step size and there is no guarantee that a zero
+    has been found. Consequently, the result should be verified.
+    Safer algorithms are brentq, brenth, ridder, and bisect,
+    but they all require that the root first be bracketed in an
+    interval where the function changes sign. The brentq algorithm
+    is recommended for general use in one dimensional problems
+    when such an interval has been found.
+
+    When `newton` is used with arrays, it is best suited for the following
+    types of problems:
+
+    * The initial guesses, `x0`, are all relatively the same distance from
+      the roots.
+    * Some or all of the extra arguments, `args`, are also arrays so that a
+      class of similar problems can be solved together.
+    * The size of the initial guesses, `x0`, is larger than O(100) elements.
+      Otherwise, a naive loop may perform as well or better than a vector.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy import optimize
+
+    >>> def f(x):
+    ...     return (x**3 - 1)  # only one real root at x = 1
+
+    ``fprime`` is not provided, use the secant method:
+
+    >>> root = optimize.newton(f, 1.5)
+    >>> root
+    1.0000000000000016
+    >>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x)
+    >>> root
+    1.0000000000000016
+
+    Only ``fprime`` is provided, use the Newton-Raphson method:
+
+    >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
+    >>> root
+    1.0
+
+    Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
+
+    >>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
+    ...                        fprime2=lambda x: 6 * x)
+    >>> root
+    1.0
+
+    When we want to find zeros for a set of related starting values and/or
+    function parameters, we can provide both of those as an array of inputs:
+
+    >>> f = lambda x, a: x**3 - a
+    >>> fder = lambda x, a: 3 * x**2
+    >>> rng = np.random.default_rng()
+    >>> x = rng.standard_normal(100)
+    >>> a = np.arange(-50, 50)
+    >>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200)
+
+    The above is the equivalent of solving for each value in ``(x, a)``
+    separately in a for-loop, just faster:
+
+    >>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,),
+    ...                             maxiter=200)
+    ...             for x0, a0 in zip(x, a)]
+    >>> np.allclose(vec_res, loop_res)
+    True
+
+    Plot the results found for all values of ``a``:
+
+    >>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(a, analytical_result, 'o')
+    >>> ax.plot(a, vec_res, '.')
+    >>> ax.set_xlabel('$a$')
+    >>> ax.set_ylabel('$x$ where $f(x, a)=0$')
+    >>> plt.show()
+
+    """
+    if tol <= 0:
+        raise ValueError("tol too small (%g <= 0)" % tol)
+    maxiter = operator.index(maxiter)
+    if maxiter < 1:
+        raise ValueError("maxiter must be greater than 0")
+    if np.size(x0) > 1:
+        return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
+                             full_output)
+
+    # Convert to float (don't use float(x0); this works also for complex x0)
+    p0 = 1.0 * x0
+    funcalls = 0
+    if fprime is not None:
+        # Newton-Raphson method
+        for itr in range(maxiter):
+            # first evaluate fval
+            fval = func(p0, *args)
+            funcalls += 1
+            # If fval is 0, a root has been found, then terminate
+            if fval == 0:
+                return _results_select(
+                    full_output, (p0, funcalls, itr, _ECONVERGED))
+            fder = fprime(p0, *args)
+            funcalls += 1
+            if fder == 0:
+                msg = "Derivative was zero."
+                if disp:
+                    msg += (
+                        " Failed to converge after %d iterations, value is %s."
+                        % (itr + 1, p0))
+                    raise RuntimeError(msg)
+                warnings.warn(msg, RuntimeWarning)
+                return _results_select(
+                    full_output, (p0, funcalls, itr + 1, _ECONVERR))
+            newton_step = fval / fder
+            if fprime2:
+                fder2 = fprime2(p0, *args)
+                funcalls += 1
+                # Halley's method:
+                #   newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder)
+                # Only do it if denominator stays close enough to 1
+                # Rationale: If 1-adj < 0, then Halley sends x in the
+                # opposite direction to Newton. Doesn't happen if x is close
+                # enough to root.
+                adj = newton_step * fder2 / fder / 2
+                if np.abs(adj) < 1:
+                    newton_step /= 1.0 - adj
+            p = p0 - newton_step
+            if np.isclose(p, p0, rtol=rtol, atol=tol):
+                return _results_select(
+                    full_output, (p, funcalls, itr + 1, _ECONVERGED))
+            p0 = p
+    else:
+        # Secant method
+        if x1 is not None:
+            if x1 == x0:
+                raise ValueError("x1 and x0 must be different")
+            p1 = x1
+        else:
+            eps = 1e-4
+            p1 = x0 * (1 + eps)
+            p1 += (eps if p1 >= 0 else -eps)
+        q0 = func(p0, *args)
+        funcalls += 1
+        q1 = func(p1, *args)
+        funcalls += 1
+        if abs(q1) < abs(q0):
+            p0, p1, q0, q1 = p1, p0, q1, q0
+        for itr in range(maxiter):
+            if q1 == q0:
+                if p1 != p0:
+                    msg = "Tolerance of %s reached." % (p1 - p0)
+                    if disp:
+                        msg += (
+                            " Failed to converge after %d iterations, value is %s."
+                            % (itr + 1, p1))
+                        raise RuntimeError(msg)
+                    warnings.warn(msg, RuntimeWarning)
+                p = (p1 + p0) / 2.0
+                return _results_select(
+                    full_output, (p, funcalls, itr + 1, _ECONVERGED))
+            else:
+                if abs(q1) > abs(q0):
+                    p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1)
+                else:
+                    p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0)
+            if np.isclose(p, p1, rtol=rtol, atol=tol):
+                return _results_select(
+                    full_output, (p, funcalls, itr + 1, _ECONVERGED))
+            p0, q0 = p1, q1
+            p1 = p
+            q1 = func(p1, *args)
+            funcalls += 1
+
+    if disp:
+        msg = ("Failed to converge after %d iterations, value is %s."
+               % (itr + 1, p))
+        raise RuntimeError(msg)
+
+    return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR))
+
+
+def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output):
+    """
+    A vectorized version of Newton, Halley, and secant methods for arrays.
+
+    Do not use this method directly. This method is called from `newton`
+    when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
+    """
+    # Explicitly copy `x0` as `p` will be modified inplace, but the
+    # user's array should not be altered.
+    p = np.array(x0, copy=True)
+
+    failures = np.ones_like(p, dtype=bool)
+    nz_der = np.ones_like(failures)
+    if fprime is not None:
+        # Newton-Raphson method
+        for iteration in range(maxiter):
+            # first evaluate fval
+            fval = np.asarray(func(p, *args))
+            # If all fval are 0, all roots have been found, then terminate
+            if not fval.any():
+                failures = fval.astype(bool)
+                break
+            fder = np.asarray(fprime(p, *args))
+            nz_der = (fder != 0)
+            # stop iterating if all derivatives are zero
+            if not nz_der.any():
+                break
+            # Newton step
+            dp = fval[nz_der] / fder[nz_der]
+            if fprime2 is not None:
+                fder2 = np.asarray(fprime2(p, *args))
+                dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])
+            # only update nonzero derivatives
+            p = np.asarray(p, dtype=np.result_type(p, dp, np.float64))
+            p[nz_der] -= dp
+            failures[nz_der] = np.abs(dp) >= tol  # items not yet converged
+            # stop iterating if there aren't any failures, not incl zero der
+            if not failures[nz_der].any():
+                break
+    else:
+        # Secant method
+        dx = np.finfo(float).eps**0.33
+        p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
+        q0 = np.asarray(func(p, *args))
+        q1 = np.asarray(func(p1, *args))
+        active = np.ones_like(p, dtype=bool)
+        for iteration in range(maxiter):
+            nz_der = (q1 != q0)
+            # stop iterating if all derivatives are zero
+            if not nz_der.any():
+                p = (p1 + p) / 2.0
+                break
+            # Secant Step
+            dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
+            # only update nonzero derivatives
+            p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64))
+            p[nz_der] = p1[nz_der] - dp
+            active_zero_der = ~nz_der & active
+            p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
+            active &= nz_der  # don't assign zero derivatives again
+            failures[nz_der] = np.abs(dp) >= tol  # not yet converged
+            # stop iterating if there aren't any failures, not incl zero der
+            if not failures[nz_der].any():
+                break
+            p1, p = p, p1
+            q0 = q1
+            q1 = np.asarray(func(p1, *args))
+
+    zero_der = ~nz_der & failures  # don't include converged with zero-ders
+    if zero_der.any():
+        # Secant warnings
+        if fprime is None:
+            nonzero_dp = (p1 != p)
+            # non-zero dp, but infinite newton step
+            zero_der_nz_dp = (zero_der & nonzero_dp)
+            if zero_der_nz_dp.any():
+                rms = np.sqrt(
+                    sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
+                )
+                warnings.warn(
+                    'RMS of {:g} reached'.format(rms), RuntimeWarning)
+        # Newton or Halley warnings
+        else:
+            all_or_some = 'all' if zero_der.all() else 'some'
+            msg = '{:s} derivatives were zero'.format(all_or_some)
+            warnings.warn(msg, RuntimeWarning)
+    elif failures.any():
+        all_or_some = 'all' if failures.all() else 'some'
+        msg = '{0:s} failed to converge after {1:d} iterations'.format(
+            all_or_some, maxiter
+        )
+        if failures.all():
+            raise RuntimeError(msg)
+        warnings.warn(msg, RuntimeWarning)
+
+    if full_output:
+        result = namedtuple('result', ('root', 'converged', 'zero_der'))
+        p = result(p, ~failures, zero_der)
+
+    return p
+
+
+def bisect(f, a, b, args=(),
+           xtol=_xtol, rtol=_rtol, maxiter=_iter,
+           full_output=False, disp=True):
+    """
+    Find root of a function within an interval using bisection.
+
+    Basic bisection routine to find a zero of the function `f` between the
+    arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
+    Slow but sure.
+
+    Parameters
+    ----------
+    f : function
+        Python function returning a number.  `f` must be continuous, and
+        f(a) and f(b) must have opposite signs.
+    a : scalar
+        One end of the bracketing interval [a,b].
+    b : scalar
+        The other end of the bracketing interval [a,b].
+    xtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter must be nonnegative.
+    rtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter cannot be smaller than its default value of
+        ``4*np.finfo(float).eps``.
+    maxiter : int, optional
+        If convergence is not achieved in `maxiter` iterations, an error is
+        raised. Must be >= 0.
+    args : tuple, optional
+        Containing extra arguments for the function `f`.
+        `f` is called by ``apply(f, (x)+args)``.
+    full_output : bool, optional
+        If `full_output` is False, the root is returned. If `full_output` is
+        True, the return value is ``(x, r)``, where x is the root, and r is
+        a `RootResults` object.
+    disp : bool, optional
+        If True, raise RuntimeError if the algorithm didn't converge.
+        Otherwise, the convergence status is recorded in a `RootResults`
+        return object.
+
+    Returns
+    -------
+    x0 : float
+        Zero of `f` between `a` and `b`.
+    r : `RootResults` (present if ``full_output = True``)
+        Object containing information about the convergence. In particular,
+        ``r.converged`` is True if the routine converged.
+
+    Examples
+    --------
+
+    >>> def f(x):
+    ...     return (x**2 - 1)
+
+    >>> from scipy import optimize
+
+    >>> root = optimize.bisect(f, 0, 2)
+    >>> root
+    1.0
+
+    >>> root = optimize.bisect(f, -2, 0)
+    >>> root
+    -1.0
+
+    See Also
+    --------
+    brentq, brenth, bisect, newton
+    fixed_point : scalar fixed-point finder
+    fsolve : n-dimensional root-finding
+
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+    maxiter = operator.index(maxiter)
+    if xtol <= 0:
+        raise ValueError("xtol too small (%g <= 0)" % xtol)
+    if rtol < _rtol:
+        raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
+    r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
+    return results_c(full_output, r)
+
+
+def ridder(f, a, b, args=(),
+           xtol=_xtol, rtol=_rtol, maxiter=_iter,
+           full_output=False, disp=True):
+    """
+    Find a root of a function in an interval using Ridder's method.
+
+    Parameters
+    ----------
+    f : function
+        Python function returning a number. f must be continuous, and f(a) and
+        f(b) must have opposite signs.
+    a : scalar
+        One end of the bracketing interval [a,b].
+    b : scalar
+        The other end of the bracketing interval [a,b].
+    xtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter must be nonnegative.
+    rtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter cannot be smaller than its default value of
+        ``4*np.finfo(float).eps``.
+    maxiter : int, optional
+        If convergence is not achieved in `maxiter` iterations, an error is
+        raised. Must be >= 0.
+    args : tuple, optional
+        Containing extra arguments for the function `f`.
+        `f` is called by ``apply(f, (x)+args)``.
+    full_output : bool, optional
+        If `full_output` is False, the root is returned. If `full_output` is
+        True, the return value is ``(x, r)``, where `x` is the root, and `r` is
+        a `RootResults` object.
+    disp : bool, optional
+        If True, raise RuntimeError if the algorithm didn't converge.
+        Otherwise, the convergence status is recorded in any `RootResults`
+        return object.
+
+    Returns
+    -------
+    x0 : float
+        Zero of `f` between `a` and `b`.
+    r : `RootResults` (present if ``full_output = True``)
+        Object containing information about the convergence.
+        In particular, ``r.converged`` is True if the routine converged.
+
+    See Also
+    --------
+    brentq, brenth, bisect, newton : 1-D root-finding
+    fixed_point : scalar fixed-point finder
+
+    Notes
+    -----
+    Uses [Ridders1979]_ method to find a zero of the function `f` between the
+    arguments `a` and `b`. Ridders' method is faster than bisection, but not
+    generally as fast as the Brent routines. [Ridders1979]_ provides the
+    classic description and source of the algorithm. A description can also be
+    found in any recent edition of Numerical Recipes.
+
+    The routine used here diverges slightly from standard presentations in
+    order to be a bit more careful of tolerance.
+
+    References
+    ----------
+    .. [Ridders1979]
+       Ridders, C. F. J. "A New Algorithm for Computing a
+       Single Root of a Real Continuous Function."
+       IEEE Trans. Circuits Systems 26, 979-980, 1979.
+
+    Examples
+    --------
+
+    >>> def f(x):
+    ...     return (x**2 - 1)
+
+    >>> from scipy import optimize
+
+    >>> root = optimize.ridder(f, 0, 2)
+    >>> root
+    1.0
+
+    >>> root = optimize.ridder(f, -2, 0)
+    >>> root
+    -1.0
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+    maxiter = operator.index(maxiter)
+    if xtol <= 0:
+        raise ValueError("xtol too small (%g <= 0)" % xtol)
+    if rtol < _rtol:
+        raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
+    r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
+    return results_c(full_output, r)
+
+
+def brentq(f, a, b, args=(),
+           xtol=_xtol, rtol=_rtol, maxiter=_iter,
+           full_output=False, disp=True):
+    """
+    Find a root of a function in a bracketing interval using Brent's method.
+
+    Uses the classic Brent's method to find a zero of the function `f` on
+    the sign changing interval [a , b]. Generally considered the best of the
+    rootfinding routines here. It is a safe version of the secant method that
+    uses inverse quadratic extrapolation. Brent's method combines root
+    bracketing, interval bisection, and inverse quadratic interpolation. It is
+    sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
+    claims convergence is guaranteed for functions computable within [a,b].
+
+    [Brent1973]_ provides the classic description of the algorithm. Another
+    description can be found in a recent edition of Numerical Recipes, including
+    [PressEtal1992]_. A third description is at
+    http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
+    understand the algorithm just by reading our code. Our code diverges a bit
+    from standard presentations: we choose a different formula for the
+    extrapolation step.
+
+    Parameters
+    ----------
+    f : function
+        Python function returning a number. The function :math:`f`
+        must be continuous, and :math:`f(a)` and :math:`f(b)` must
+        have opposite signs.
+    a : scalar
+        One end of the bracketing interval :math:`[a, b]`.
+    b : scalar
+        The other end of the bracketing interval :math:`[a, b]`.
+    xtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter must be nonnegative. For nice functions, Brent's
+        method will often satisfy the above condition with ``xtol/2``
+        and ``rtol/2``. [Brent1973]_
+    rtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter cannot be smaller than its default value of
+        ``4*np.finfo(float).eps``. For nice functions, Brent's
+        method will often satisfy the above condition with ``xtol/2``
+        and ``rtol/2``. [Brent1973]_
+    maxiter : int, optional
+        If convergence is not achieved in `maxiter` iterations, an error is
+        raised. Must be >= 0.
+    args : tuple, optional
+        Containing extra arguments for the function `f`.
+        `f` is called by ``apply(f, (x)+args)``.
+    full_output : bool, optional
+        If `full_output` is False, the root is returned. If `full_output` is
+        True, the return value is ``(x, r)``, where `x` is the root, and `r` is
+        a `RootResults` object.
+    disp : bool, optional
+        If True, raise RuntimeError if the algorithm didn't converge.
+        Otherwise, the convergence status is recorded in any `RootResults`
+        return object.
+
+    Returns
+    -------
+    x0 : float
+        Zero of `f` between `a` and `b`.
+    r : `RootResults` (present if ``full_output = True``)
+        Object containing information about the convergence. In particular,
+        ``r.converged`` is True if the routine converged.
+
+    Notes
+    -----
+    `f` must be continuous.  f(a) and f(b) must have opposite signs.
+
+    Related functions fall into several classes:
+
+    multivariate local optimizers
+      `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
+    nonlinear least squares minimizer
+      `leastsq`
+    constrained multivariate optimizers
+      `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
+    global optimizers
+      `basinhopping`, `brute`, `differential_evolution`
+    local scalar minimizers
+      `fminbound`, `brent`, `golden`, `bracket`
+    N-D root-finding
+      `fsolve`
+    1-D root-finding
+      `brenth`, `ridder`, `bisect`, `newton`
+    scalar fixed-point finder
+      `fixed_point`
+
+    References
+    ----------
+    .. [Brent1973]
+       Brent, R. P.,
+       *Algorithms for Minimization Without Derivatives*.
+       Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
+
+    .. [PressEtal1992]
+       Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
+       *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
+       Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
+       Section 9.3:  "Van Wijngaarden-Dekker-Brent Method."
+
+    Examples
+    --------
+    >>> def f(x):
+    ...     return (x**2 - 1)
+
+    >>> from scipy import optimize
+
+    >>> root = optimize.brentq(f, -2, 0)
+    >>> root
+    -1.0
+
+    >>> root = optimize.brentq(f, 0, 2)
+    >>> root
+    1.0
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+    maxiter = operator.index(maxiter)
+    if xtol <= 0:
+        raise ValueError("xtol too small (%g <= 0)" % xtol)
+    if rtol < _rtol:
+        raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
+    r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
+    return results_c(full_output, r)
+
+
+def brenth(f, a, b, args=(),
+           xtol=_xtol, rtol=_rtol, maxiter=_iter,
+           full_output=False, disp=True):
+    """Find a root of a function in a bracketing interval using Brent's
+    method with hyperbolic extrapolation.
+
+    A variation on the classic Brent routine to find a zero of the function f
+    between the arguments a and b that uses hyperbolic extrapolation instead of
+    inverse quadratic extrapolation. Bus & Dekker (1975) guarantee convergence
+    for this method, claiming that the upper bound of function evaluations here
+    is 4 or 5 times lesser than that for bisection.
+    f(a) and f(b) cannot have the same signs. Generally, on a par with the
+    brent routine, but not as heavily tested. It is a safe version of the
+    secant method that uses hyperbolic extrapolation.
+    The version here is by Chuck Harris, and implements Algorithm M of
+    [BusAndDekker1975]_, where further details (convergence properties,
+    additional remarks and such) can be found
+
+    Parameters
+    ----------
+    f : function
+        Python function returning a number. f must be continuous, and f(a) and
+        f(b) must have opposite signs.
+    a : scalar
+        One end of the bracketing interval [a,b].
+    b : scalar
+        The other end of the bracketing interval [a,b].
+    xtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter must be nonnegative. As with `brentq`, for nice
+        functions the method will often satisfy the above condition
+        with ``xtol/2`` and ``rtol/2``.
+    rtol : number, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter cannot be smaller than its default value of
+        ``4*np.finfo(float).eps``. As with `brentq`, for nice functions
+        the method will often satisfy the above condition with
+        ``xtol/2`` and ``rtol/2``.
+    maxiter : int, optional
+        If convergence is not achieved in `maxiter` iterations, an error is
+        raised. Must be >= 0.
+    args : tuple, optional
+        Containing extra arguments for the function `f`.
+        `f` is called by ``apply(f, (x)+args)``.
+    full_output : bool, optional
+        If `full_output` is False, the root is returned. If `full_output` is
+        True, the return value is ``(x, r)``, where `x` is the root, and `r` is
+        a `RootResults` object.
+    disp : bool, optional
+        If True, raise RuntimeError if the algorithm didn't converge.
+        Otherwise, the convergence status is recorded in any `RootResults`
+        return object.
+
+    Returns
+    -------
+    x0 : float
+        Zero of `f` between `a` and `b`.
+    r : `RootResults` (present if ``full_output = True``)
+        Object containing information about the convergence. In particular,
+        ``r.converged`` is True if the routine converged.
+
+    See Also
+    --------
+    fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers
+    leastsq : nonlinear least squares minimizer
+    fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
+    basinhopping, differential_evolution, brute : global optimizers
+    fminbound, brent, golden, bracket : local scalar minimizers
+    fsolve : N-D root-finding
+    brentq, brenth, ridder, bisect, newton : 1-D root-finding
+    fixed_point : scalar fixed-point finder
+
+    References
+    ----------
+    .. [BusAndDekker1975]
+       Bus, J. C. P., Dekker, T. J.,
+       "Two Efficient Algorithms with Guaranteed Convergence for Finding a Zero
+       of a Function", ACM Transactions on Mathematical Software, Vol. 1, Issue
+       4, Dec. 1975, pp. 330-345. Section 3: "Algorithm M".
+       :doi:`10.1145/355656.355659`
+
+    Examples
+    --------
+    >>> def f(x):
+    ...     return (x**2 - 1)
+
+    >>> from scipy import optimize
+
+    >>> root = optimize.brenth(f, -2, 0)
+    >>> root
+    -1.0
+
+    >>> root = optimize.brenth(f, 0, 2)
+    >>> root
+    1.0
+
+    """
+    if not isinstance(args, tuple):
+        args = (args,)
+    maxiter = operator.index(maxiter)
+    if xtol <= 0:
+        raise ValueError("xtol too small (%g <= 0)" % xtol)
+    if rtol < _rtol:
+        raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
+    r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
+    return results_c(full_output, r)
+
+
+################################
+# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by
+#  Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
+#  See [1]
+
+
+def _notclose(fs, rtol=_rtol, atol=_xtol):
+    # Ensure not None, not 0, all finite, and not very close to each other
+    notclosefvals = (
+            all(fs) and all(np.isfinite(fs)) and
+            not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol))
+                    for i, _f in enumerate(fs[:-1])))
+    return notclosefvals
+
+
+def _secant(xvals, fvals):
+    """Perform a secant step, taking a little care"""
+    # Secant has many "mathematically" equivalent formulations
+    # x2 = x0 - (x1 - x0)/(f1 - f0) * f0
+    #    = x1 - (x1 - x0)/(f1 - f0) * f1
+    #    = (-x1 * f0 + x0 * f1) / (f1 - f0)
+    #    = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
+    #    = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
+    x0, x1 = xvals[:2]
+    f0, f1 = fvals[:2]
+    if f0 == f1:
+        return np.nan
+    if np.abs(f1) > np.abs(f0):
+        x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
+    else:
+        x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
+    return x2
+
+
+def _update_bracket(ab, fab, c, fc):
+    """Update a bracket given (c, fc), return the discarded endpoints."""
+    fa, fb = fab
+    idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1)
+    rx, rfx = ab[idx], fab[idx]
+    fab[idx] = fc
+    ab[idx] = c
+    return rx, rfx
+
+
+def _compute_divided_differences(xvals, fvals, N=None, full=True,
+                                 forward=True):
+    """Return a matrix of divided differences for the xvals, fvals pairs
+
+    DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i
+
+    If full is False, just return the main diagonal(or last row):
+      f[a], f[a, b] and f[a, b, c].
+    If forward is False, return f[c], f[b, c], f[a, b, c]."""
+    if full:
+        if forward:
+            xvals = np.asarray(xvals)
+        else:
+            xvals = np.array(xvals)[::-1]
+        M = len(xvals)
+        N = M if N is None else min(N, M)
+        DD = np.zeros([M, N])
+        DD[:, 0] = fvals[:]
+        for i in range(1, N):
+            DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) /
+                         (xvals[i:] - xvals[:M - i]))
+        return DD
+
+    xvals = np.asarray(xvals)
+    dd = np.array(fvals)
+    row = np.array(fvals)
+    idx2Use = (0 if forward else -1)
+    dd[0] = fvals[idx2Use]
+    for i in range(1, len(xvals)):
+        denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
+        row = np.diff(row)[:] / denom
+        dd[i] = row[idx2Use]
+    return dd
+
+
+def _interpolated_poly(xvals, fvals, x):
+    """Compute p(x) for the polynomial passing through the specified locations.
+
+    Use Neville's algorithm to compute p(x) where p is the minimal degree
+    polynomial passing through the points xvals, fvals"""
+    xvals = np.asarray(xvals)
+    N = len(xvals)
+    Q = np.zeros([N, N])
+    D = np.zeros([N, N])
+    Q[:, 0] = fvals[:]
+    D[:, 0] = fvals[:]
+    for k in range(1, N):
+        alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]
+        diffik = xvals[0:N - k] - xvals[k:N]
+        Q[k:, k] = (xvals[k:] - x) / diffik * alpha
+        D[k:, k] = (xvals[:N - k] - x) / diffik * alpha
+    # Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root
+    return np.sum(Q[-1, 1:]) + Q[-1, 0]
+
+
+def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):
+    """Inverse cubic interpolation f-values -> x-values
+
+    Given four points (fa, a), (fb, b), (fc, c), (fd, d) with
+    fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points
+    and compute x=IP(0).
+    """
+    return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)
+
+
+def _newton_quadratic(ab, fab, d, fd, k):
+    """Apply Newton-Raphson like steps, using divided differences to approximate f'
+
+    ab is a real interval [a, b] containing a root,
+    fab holds the real values of f(a), f(b)
+    d is a real number outside [ab, b]
+    k is the number of steps to apply
+    """
+    a, b = ab
+    fa, fb = fab
+    _, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd],
+                                           forward=True, full=False)
+
+    # _P  is the quadratic polynomial through the 3 points
+    def _P(x):
+        # Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b)
+        return (A * (x - b) + B) * (x - a) + fa
+
+    if A == 0:
+        r = a - fa / B
+    else:
+        r = (a if np.sign(A) * np.sign(fa) > 0 else b)
+    # Apply k Newton-Raphson steps to _P(x), starting from x=r
+    for i in range(k):
+        r1 = r - _P(r) / (B + A * (2 * r - a - b))
+        if not (ab[0] < r1 < ab[1]):
+            if (ab[0] < r < ab[1]):
+                return r
+            r = sum(ab) / 2.0
+            break
+        r = r1
+
+    return r
+
+
+class TOMS748Solver:
+    """Solve f(x, *args) == 0 using Algorithm748 of Alefeld, Potro & Shi.
+    """
+    _MU = 0.5
+    _K_MIN = 1
+    _K_MAX = 100  # A very high value for real usage. Expect 1, 2, maybe 3.
+
+    def __init__(self):
+        self.f = None
+        self.args = None
+        self.function_calls = 0
+        self.iterations = 0
+        self.k = 2
+        # ab=[a,b] is a global interval containing a root
+        self.ab = [np.nan, np.nan]
+        # fab is function values at a, b
+        self.fab = [np.nan, np.nan]
+        self.d = None
+        self.fd = None
+        self.e = None
+        self.fe = None
+        self.disp = False
+        self.xtol = _xtol
+        self.rtol = _rtol
+        self.maxiter = _iter
+
+    def configure(self, xtol, rtol, maxiter, disp, k):
+        self.disp = disp
+        self.xtol = xtol
+        self.rtol = rtol
+        self.maxiter = maxiter
+        # Silently replace a low value of k with 1
+        self.k = max(k, self._K_MIN)
+        # Noisily replace a high value of k with self._K_MAX
+        if self.k > self._K_MAX:
+            msg = "toms748: Overriding k: ->%d" % self._K_MAX
+            warnings.warn(msg, RuntimeWarning)
+            self.k = self._K_MAX
+
+    def _callf(self, x, error=True):
+        """Call the user-supplied function, update book-keeping"""
+        fx = self.f(x, *self.args)
+        self.function_calls += 1
+        if not np.isfinite(fx) and error:
+            raise ValueError("Invalid function value: f(%f) -> %s " % (x, fx))
+        return fx
+
+    def get_result(self, x, flag=_ECONVERGED):
+        r"""Package the result and statistics into a tuple."""
+        return (x, self.function_calls, self.iterations, flag)
+
+    def _update_bracket(self, c, fc):
+        return _update_bracket(self.ab, self.fab, c, fc)
+
+    def start(self, f, a, b, args=()):
+        r"""Prepare for the iterations."""
+        self.function_calls = 0
+        self.iterations = 0
+
+        self.f = f
+        self.args = args
+        self.ab[:] = [a, b]
+        if not np.isfinite(a) or np.imag(a) != 0:
+            raise ValueError("Invalid x value: %s " % (a))
+        if not np.isfinite(b) or np.imag(b) != 0:
+            raise ValueError("Invalid x value: %s " % (b))
+
+        fa = self._callf(a)
+        if not np.isfinite(fa) or np.imag(fa) != 0:
+            raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa))
+        if fa == 0:
+            return _ECONVERGED, a
+        fb = self._callf(b)
+        if not np.isfinite(fb) or np.imag(fb) != 0:
+            raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb))
+        if fb == 0:
+            return _ECONVERGED, b
+
+        if np.sign(fb) * np.sign(fa) > 0:
+            raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " %
+                             (a, fa, b, fb))
+        self.fab[:] = [fa, fb]
+
+        return _EINPROGRESS, sum(self.ab) / 2.0
+
+    def get_status(self):
+        """Determine the current status."""
+        a, b = self.ab[:2]
+        if np.isclose(a, b, rtol=self.rtol, atol=self.xtol):
+            return _ECONVERGED, sum(self.ab) / 2.0
+        if self.iterations >= self.maxiter:
+            return _ECONVERR, sum(self.ab) / 2.0
+        return _EINPROGRESS, sum(self.ab) / 2.0
+
+    def iterate(self):
+        """Perform one step in the algorithm.
+
+        Implements Algorithm 4.1(k=1) or 4.2(k=2) in [APS1995]
+        """
+        self.iterations += 1
+        eps = np.finfo(float).eps
+        d, fd, e, fe = self.d, self.fd, self.e, self.fe
+        ab_width = self.ab[1] - self.ab[0]  # Need the start width below
+        c = None
+
+        for nsteps in range(2, self.k+2):
+            # If the f-values are sufficiently separated, perform an inverse
+            # polynomial interpolation step. Otherwise, nsteps repeats of
+            # an approximate Newton-Raphson step.
+            if _notclose(self.fab + [fd, fe], rtol=0, atol=32*eps):
+                c0 = _inverse_poly_zero(self.ab[0], self.ab[1], d, e,
+                                        self.fab[0], self.fab[1], fd, fe)
+                if self.ab[0] < c0 < self.ab[1]:
+                    c = c0
+            if c is None:
+                c = _newton_quadratic(self.ab, self.fab, d, fd, nsteps)
+
+            fc = self._callf(c)
+            if fc == 0:
+                return _ECONVERGED, c
+
+            # re-bracket
+            e, fe = d, fd
+            d, fd = self._update_bracket(c, fc)
+
+        # u is the endpoint with the smallest f-value
+        uix = (0 if np.abs(self.fab[0]) < np.abs(self.fab[1]) else 1)
+        u, fu = self.ab[uix], self.fab[uix]
+
+        _, A = _compute_divided_differences(self.ab, self.fab,
+                                            forward=(uix == 0), full=False)
+        c = u - 2 * fu / A
+        if np.abs(c - u) > 0.5 * (self.ab[1] - self.ab[0]):
+            c = sum(self.ab) / 2.0
+        else:
+            if np.isclose(c, u, rtol=eps, atol=0):
+                # c didn't change (much).
+                # Either because the f-values at the endpoints have vastly
+                # differing magnitudes, or because the root is very close to
+                # that endpoint
+                frs = np.frexp(self.fab)[1]
+                if frs[uix] < frs[1 - uix] - 50:  # Differ by more than 2**50
+                    c = (31 * self.ab[uix] + self.ab[1 - uix]) / 32
+                else:
+                    # Make a bigger adjustment, about the
+                    # size of the requested tolerance.
+                    mm = (1 if uix == 0 else -1)
+                    adj = mm * np.abs(c) * self.rtol + mm * self.xtol
+                    c = u + adj
+                if not self.ab[0] < c < self.ab[1]:
+                    c = sum(self.ab) / 2.0
+
+        fc = self._callf(c)
+        if fc == 0:
+            return _ECONVERGED, c
+
+        e, fe = d, fd
+        d, fd = self._update_bracket(c, fc)
+
+        # If the width of the new interval did not decrease enough, bisect
+        if self.ab[1] - self.ab[0] > self._MU * ab_width:
+            e, fe = d, fd
+            z = sum(self.ab) / 2.0
+            fz = self._callf(z)
+            if fz == 0:
+                return _ECONVERGED, z
+            d, fd = self._update_bracket(z, fz)
+
+        # Record d and e for next iteration
+        self.d, self.fd = d, fd
+        self.e, self.fe = e, fe
+
+        status, xn = self.get_status()
+        return status, xn
+
+    def solve(self, f, a, b, args=(),
+              xtol=_xtol, rtol=_rtol, k=2, maxiter=_iter, disp=True):
+        r"""Solve f(x) = 0 given an interval containing a zero."""
+        self.configure(xtol=xtol, rtol=rtol, maxiter=maxiter, disp=disp, k=k)
+        status, xn = self.start(f, a, b, args)
+        if status == _ECONVERGED:
+            return self.get_result(xn)
+
+        # The first step only has two x-values.
+        c = _secant(self.ab, self.fab)
+        if not self.ab[0] < c < self.ab[1]:
+            c = sum(self.ab) / 2.0
+        fc = self._callf(c)
+        if fc == 0:
+            return self.get_result(c)
+
+        self.d, self.fd = self._update_bracket(c, fc)
+        self.e, self.fe = None, None
+        self.iterations += 1
+
+        while True:
+            status, xn = self.iterate()
+            if status == _ECONVERGED:
+                return self.get_result(xn)
+            if status == _ECONVERR:
+                fmt = "Failed to converge after %d iterations, bracket is %s"
+                if disp:
+                    msg = fmt % (self.iterations + 1, self.ab)
+                    raise RuntimeError(msg)
+                return self.get_result(xn, _ECONVERR)
+
+
+def toms748(f, a, b, args=(), k=1,
+            xtol=_xtol, rtol=_rtol, maxiter=_iter,
+            full_output=False, disp=True):
+    """
+    Find a zero using TOMS Algorithm 748 method.
+
+    Implements the Algorithm 748 method of Alefeld, Potro and Shi to find a
+    zero of the function `f` on the interval `[a , b]`, where `f(a)` and
+    `f(b)` must have opposite signs.
+
+    It uses a mixture of inverse cubic interpolation and
+    "Newton-quadratic" steps. [APS1995].
+
+    Parameters
+    ----------
+    f : function
+        Python function returning a scalar. The function :math:`f`
+        must be continuous, and :math:`f(a)` and :math:`f(b)`
+        have opposite signs.
+    a : scalar,
+        lower boundary of the search interval
+    b : scalar,
+        upper boundary of the search interval
+    args : tuple, optional
+        containing extra arguments for the function `f`.
+        `f` is called by ``f(x, *args)``.
+    k : int, optional
+        The number of Newton quadratic steps to perform each
+        iteration. ``k>=1``.
+    xtol : scalar, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
+        parameter must be nonnegative.
+    rtol : scalar, optional
+        The computed root ``x0`` will satisfy ``np.allclose(x, x0,
+        atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
+    maxiter : int, optional
+        If convergence is not achieved in `maxiter` iterations, an error is
+        raised. Must be >= 0.
+    full_output : bool, optional
+        If `full_output` is False, the root is returned. If `full_output` is
+        True, the return value is ``(x, r)``, where `x` is the root, and `r` is
+        a `RootResults` object.
+    disp : bool, optional
+        If True, raise RuntimeError if the algorithm didn't converge.
+        Otherwise, the convergence status is recorded in the `RootResults`
+        return object.
+
+    Returns
+    -------
+    x0 : float
+        Approximate Zero of `f`
+    r : `RootResults` (present if ``full_output = True``)
+        Object containing information about the convergence. In particular,
+        ``r.converged`` is True if the routine converged.
+
+    See Also
+    --------
+    brentq, brenth, ridder, bisect, newton
+    fsolve : find zeroes in N dimensions.
+
+    Notes
+    -----
+    `f` must be continuous.
+    Algorithm 748 with ``k=2`` is asymptotically the most efficient
+    algorithm known for finding roots of a four times continuously
+    differentiable function.
+    In contrast with Brent's algorithm, which may only decrease the length of
+    the enclosing bracket on the last step, Algorithm 748 decreases it each
+    iteration with the same asymptotic efficiency as it finds the root.
+
+    For easy statement of efficiency indices, assume that `f` has 4
+    continuouous deriviatives.
+    For ``k=1``, the convergence order is at least 2.7, and with about
+    asymptotically 2 function evaluations per iteration, the efficiency
+    index is approximately 1.65.
+    For ``k=2``, the order is about 4.6 with asymptotically 3 function
+    evaluations per iteration, and the efficiency index 1.66.
+    For higher values of `k`, the efficiency index approaches
+    the kth root of ``(3k-2)``, hence ``k=1`` or ``k=2`` are
+    usually appropriate.
+
+    References
+    ----------
+    .. [APS1995]
+       Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
+       *Algorithm 748: Enclosing Zeros of Continuous Functions*,
+       ACM Trans. Math. Softw. Volume 221(1995)
+       doi = {10.1145/210089.210111}
+
+    Examples
+    --------
+    >>> def f(x):
+    ...     return (x**3 - 1)  # only one real root at x = 1
+
+    >>> from scipy import optimize
+    >>> root, results = optimize.toms748(f, 0, 2, full_output=True)
+    >>> root
+    1.0
+    >>> results
+          converged: True
+               flag: 'converged'
+     function_calls: 11
+         iterations: 5
+               root: 1.0
+    """
+    if xtol <= 0:
+        raise ValueError("xtol too small (%g <= 0)" % xtol)
+    if rtol < _rtol / 4:
+        raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
+    maxiter = operator.index(maxiter)
+    if maxiter < 1:
+        raise ValueError("maxiter must be greater than 0")
+    if not np.isfinite(a):
+        raise ValueError("a is not finite %s" % a)
+    if not np.isfinite(b):
+        raise ValueError("b is not finite %s" % b)
+    if a >= b:
+        raise ValueError("a and b are not an interval [{}, {}]".format(a, b))
+    if not k >= 1:
+        raise ValueError("k too small (%s < 1)" % k)
+
+    if not isinstance(args, tuple):
+        args = (args,)
+    solver = TOMS748Solver()
+    result = solver.solve(f, a, b, args=args, k=k, xtol=xtol, rtol=rtol,
+                          maxiter=maxiter, disp=disp)
+    x, function_calls, iterations, flag = result
+    return _results_select(full_output, (x, function_calls, iterations, flag))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/cobyla.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/cobyla.py
new file mode 100644
index 00000000..cd013fe4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/cobyla.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _cobyla_py
+
+
+__all__ = [  # noqa: F822
+    'OptimizeResult',
+    'RLock',
+    'fmin_cobyla',
+    'functools',
+    'izip',
+    'synchronized',
+]
+
+def __dir__():
+    return __all__
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.cobyla is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.cobyla` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_cobyla_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize.pxd
new file mode 100644
index 00000000..d5a0bdd7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize.pxd
@@ -0,0 +1,11 @@
+# Public Cython API declarations
+#
+# See doc/source/dev/contributor/public_cython_api.rst for guidelines
+
+
+# The following cimport statement provides legacy ABI
+# support. Changing it causes an ABI forward-compatibility break
+# (gh-11793), so we currently leave it as is (no further cimport
+# statements should be used in this file).
+from .cython_optimize._zeros cimport (
+    brentq, brenth, ridder, bisect, zeros_full_output)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/__init__.py
new file mode 100644
index 00000000..3822b839
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/__init__.py
@@ -0,0 +1,132 @@
+"""
+Cython optimize zeros API
+=========================
+The underlying C functions for the following root finders can be accessed
+directly using Cython:
+
+- `~scipy.optimize.bisect`
+- `~scipy.optimize.ridder`
+- `~scipy.optimize.brenth`
+- `~scipy.optimize.brentq`
+
+The Cython API for the zeros functions is similar except there is no ``disp``
+argument. Import the zeros functions using ``cimport`` from
+`scipy.optimize.cython_optimize`. ::
+
+    from scipy.optimize.cython_optimize cimport bisect, ridder, brentq, brenth
+
+
+Callback signature
+------------------
+The zeros functions in `~scipy.optimize.cython_optimize` expect a callback that
+takes a double for the scalar independent variable as the 1st argument and a
+user defined ``struct`` with any extra parameters as the 2nd argument. ::
+
+    double (*callback_type)(double, void*)
+
+
+Examples
+--------
+Usage of `~scipy.optimize.cython_optimize` requires Cython to write callbacks
+that are compiled into C. For more information on compiling Cython, see the
+`Cython Documentation `_.
+
+These are the basic steps:
+
+1. Create a Cython ``.pyx`` file, for example: ``myexample.pyx``.
+2. Import the desired root finder from `~scipy.optimize.cython_optimize`.
+3. Write the callback function, and call the selected zeros function passing
+   the callback, any extra arguments, and the other solver parameters. ::
+
+       from scipy.optimize.cython_optimize cimport brentq
+
+       # import math from Cython
+       from libc cimport math
+
+       myargs = {'C0': 1.0, 'C1': 0.7}  # a dictionary of extra arguments
+       XLO, XHI = 0.5, 1.0  # lower and upper search boundaries
+       XTOL, RTOL, MITR = 1e-3, 1e-3, 10  # other solver parameters
+
+       # user-defined struct for extra parameters
+       ctypedef struct test_params:
+           double C0
+           double C1
+
+
+       # user-defined callback
+       cdef double f(double x, void *args):
+           cdef test_params *myargs =  args
+           return myargs.C0 - math.exp(-(x - myargs.C1))
+
+
+       # Cython wrapper function
+       cdef double brentq_wrapper_example(dict args, double xa, double xb,
+                                          double xtol, double rtol, int mitr):
+           # Cython automatically casts dictionary to struct
+           cdef test_params myargs = args
+           return brentq(
+               f, xa, xb,  &myargs, xtol, rtol, mitr, NULL)
+
+
+       # Python function
+       def brentq_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, rtol=RTOL,
+                          mitr=MITR):
+           '''Calls Cython wrapper from Python.'''
+           return brentq_wrapper_example(args, xa, xb, xtol, rtol, mitr)
+
+4. If you want to call your function from Python, create a Cython wrapper, and
+   a Python function that calls the wrapper, or use ``cpdef``. Then, in Python,
+   you can import and run the example. ::
+
+       from myexample import brentq_example
+
+       x = brentq_example()
+       # 0.6999942848231314
+
+5. Create a Cython ``.pxd`` file if you need to export any Cython functions.
+
+
+Full output
+-----------
+The  functions in `~scipy.optimize.cython_optimize` can also copy the full
+output from the solver to a C ``struct`` that is passed as its last argument.
+If you don't want the full output, just pass ``NULL``. The full output
+``struct`` must be type ``zeros_full_output``, which is defined in
+`scipy.optimize.cython_optimize` with the following fields:
+
+- ``int funcalls``: number of function calls
+- ``int iterations``: number of iterations
+- ``int error_num``: error number
+- ``double root``: root of function
+
+The root is copied by `~scipy.optimize.cython_optimize` to the full output
+``struct``. An error number of -1 means a sign error, -2 means a convergence
+error, and 0 means the solver converged. Continuing from the previous example::
+
+    from scipy.optimize.cython_optimize cimport zeros_full_output
+
+
+    # cython brentq solver with full output
+    cdef zeros_full_output brentq_full_output_wrapper_example(
+            dict args, double xa, double xb, double xtol, double rtol,
+            int mitr):
+        cdef test_params myargs = args
+        cdef zeros_full_output my_full_output
+        # use my_full_output instead of NULL
+        brentq(f, xa, xb, &myargs, xtol, rtol, mitr, &my_full_output)
+        return my_full_output
+
+
+    # Python function
+    def brent_full_output_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL,
+                                  rtol=RTOL, mitr=MITR):
+        '''Returns full output'''
+        return brentq_full_output_wrapper_example(args, xa, xb, xtol, rtol,
+                                                  mitr)
+
+    result = brent_full_output_example()
+    # {'error_num': 0,
+    #  'funcalls': 6,
+    #  'iterations': 5,
+    #  'root': 0.6999942848231314}
+"""
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/_zeros.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/_zeros.pxd
new file mode 100644
index 00000000..1ae32c9f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/_zeros.pxd
@@ -0,0 +1,33 @@
+# Legacy public Cython API declarations
+#
+# NOTE: due to the way Cython ABI compatibility works, **no changes
+# should be made to this file** --- any API additions/changes should be
+# done in `cython_optimize.pxd` (see gh-11793).
+
+ctypedef double (*callback_type)(double, void*)
+
+ctypedef struct zeros_parameters:
+    callback_type function
+    void* args
+
+ctypedef struct zeros_full_output:
+    int funcalls
+    int iterations
+    int error_num
+    double root
+
+cdef double bisect(callback_type f, double xa, double xb, void* args,
+                   double xtol, double rtol, int iter,
+                   zeros_full_output *full_output) nogil
+
+cdef double ridder(callback_type f, double xa, double xb, void* args,
+                   double xtol, double rtol, int iter,
+                   zeros_full_output *full_output) nogil
+
+cdef double brenth(callback_type f, double xa, double xb, void* args,
+                   double xtol, double rtol, int iter,
+                   zeros_full_output *full_output) nogil
+
+cdef double brentq(callback_type f, double xa, double xb, void* args,
+                   double xtol, double rtol, int iter,
+                   zeros_full_output *full_output) nogil
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/c_zeros.pxd b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/c_zeros.pxd
new file mode 100644
index 00000000..723e479e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/cython_optimize/c_zeros.pxd
@@ -0,0 +1,26 @@
+cdef extern from "../Zeros/zeros.h":
+    ctypedef double (*callback_type)(double, void*)
+    ctypedef struct scipy_zeros_info:
+        int funcalls
+        int iterations
+        int error_num
+
+cdef extern from "../Zeros/bisect.c" nogil:
+    double bisect(callback_type f, double xa, double xb, double xtol,
+                  double rtol, int iter, void *func_data_param,
+                  scipy_zeros_info *solver_stats)
+
+cdef extern from "../Zeros/ridder.c" nogil:
+    double ridder(callback_type f, double xa, double xb, double xtol,
+                  double rtol, int iter, void *func_data_param,
+                  scipy_zeros_info *solver_stats)
+
+cdef extern from "../Zeros/brenth.c" nogil:
+    double brenth(callback_type f, double xa, double xb, double xtol,
+                  double rtol, int iter, void *func_data_param,
+                  scipy_zeros_info *solver_stats)
+
+cdef extern from "../Zeros/brentq.c" nogil:
+    double brentq(callback_type f, double xa, double xb, double xtol,
+                  double rtol, int iter, void *func_data_param,
+                  scipy_zeros_info *solver_stats)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/lbfgsb.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/lbfgsb.py
new file mode 100644
index 00000000..9b7b1b62
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/lbfgsb.py
@@ -0,0 +1,37 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _lbfgsb_py
+
+
+__all__ = [  # noqa: F822
+    'LbfgsInvHessProduct',
+    'LinearOperator',
+    'MemoizeJac',
+    'OptimizeResult',
+    'array',
+    'asarray',
+    'float64',
+    'fmin_l_bfgs_b',
+    'old_bound_to_new',
+    'zeros',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.lbfgsb is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.lbfgsb` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_lbfgsb_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/linesearch.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/linesearch.py
new file mode 100644
index 00000000..90569303
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/linesearch.py
@@ -0,0 +1,38 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _linesearch
+
+
+__all__ = [  # noqa: F822
+    'LineSearchWarning',
+    'line_search',
+    'line_search_BFGS',
+    'line_search_armijo',
+    'line_search_wolfe1',
+    'line_search_wolfe2',
+    'minpack2',
+    'scalar_search_armijo',
+    'scalar_search_wolfe1',
+    'scalar_search_wolfe2',
+    'warn',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.linesearch is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.linesearch` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_linesearch, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/minpack.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/minpack.py
new file mode 100644
index 00000000..cf15278f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/minpack.py
@@ -0,0 +1,60 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _minpack_py
+
+
+__all__ = [  # noqa: F822
+    'LEASTSQ_FAILURE',
+    'LEASTSQ_SUCCESS',
+    'LinAlgError',
+    'OptimizeResult',
+    'OptimizeWarning',
+    'asarray',
+    'atleast_1d',
+    'check_gradient',
+    'cholesky',
+    'curve_fit',
+    'dot',
+    'dtype',
+    'error',
+    'eye',
+    'finfo',
+    'fixed_point',
+    'fsolve',
+    'greater',
+    'inexact',
+    'inf',
+    'inv',
+    'issubdtype',
+    'least_squares',
+    'leastsq',
+    'prepare_bounds',
+    'prod',
+    'shape',
+    'solve_triangular',
+    'svd',
+    'take',
+    'transpose',
+    'triu',
+    'zeros',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.minpack is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.minpack` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_minpack_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/minpack2.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/minpack2.py
new file mode 100644
index 00000000..e2820966
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/minpack2.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _minpack2
+
+
+__all__ = [  # noqa: F822
+    'dcsrch',
+    'dcstep',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.minpack2 is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.minpack2` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_minpack2, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/moduleTNC.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/moduleTNC.py
new file mode 100644
index 00000000..42e3ae19
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/moduleTNC.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _moduleTNC
+
+
+__all__ = [  # noqa: F822
+
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.moduleTNC is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.moduleTNC` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_moduleTNC, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/nonlin.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/nonlin.py
new file mode 100644
index 00000000..245d6c2a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/nonlin.py
@@ -0,0 +1,65 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _nonlin
+
+
+__all__ = [  # noqa: F822
+    'Anderson',
+    'BroydenFirst',
+    'BroydenSecond',
+    'DiagBroyden',
+    'ExcitingMixing',
+    'GenericBroyden',
+    'InverseJacobian',
+    'Jacobian',
+    'KrylovJacobian',
+    'LinAlgError',
+    'LinearMixing',
+    'LowRankMatrix',
+    'NoConvergence',
+    'TerminationCondition',
+    'anderson',
+    'asarray',
+    'asjacobian',
+    'broyden1',
+    'broyden2',
+    'diagbroyden',
+    'dot',
+    'excitingmixing',
+    'get_blas_funcs',
+    'inspect',
+    'inv',
+    'linearmixing',
+    'maxnorm',
+    'newton_krylov',
+    'nonlin_solve',
+    'norm',
+    'qr',
+    'scalar_search_armijo',
+    'scalar_search_wolfe1',
+    'scipy',
+    'solve',
+    'svd',
+    'sys',
+    'vdot',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.nonlin is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.nonlin` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_nonlin, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/optimize.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/optimize.py
new file mode 100644
index 00000000..2aac3667
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/optimize.py
@@ -0,0 +1,72 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _optimize
+
+
+__all__ = [  # noqa: F822
+    'Brent',
+    'FD_METHODS',
+    'Inf',
+    'LineSearchWarning',
+    'MapWrapper',
+    'MemoizeJac',
+    'OptimizeResult',
+    'OptimizeWarning',
+    'ScalarFunction',
+    'approx_derivative',
+    'approx_fhess_p',
+    'approx_fprime',
+    'argmin',
+    'asarray',
+    'asfarray',
+    'atleast_1d',
+    'bracket',
+    'brent',
+    'brute',
+    'check_grad',
+    'check_random_state',
+    'eye',
+    'fmin',
+    'fmin_bfgs',
+    'fmin_cg',
+    'fmin_ncg',
+    'fmin_powell',
+    'fminbound',
+    'golden',
+    'is_array_scalar',
+    'line_search',
+    'line_search_wolfe1',
+    'line_search_wolfe2',
+    'main',
+    'rosen',
+    'rosen_der',
+    'rosen_hess',
+    'rosen_hess_prod',
+    'shape',
+    'show_options',
+    'sqrt',
+    'squeeze',
+    'sys',
+    'vecnorm',
+    'zeros',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.optimize is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.optimize` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_optimize, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/slsqp.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/slsqp.py
new file mode 100644
index 00000000..ff6ac415
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/slsqp.py
@@ -0,0 +1,46 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _slsqp_py
+
+
+__all__ = [  # noqa: F822
+    'OptimizeResult',
+    'append',
+    'approx_derivative',
+    'approx_jacobian',
+    'array',
+    'asfarray',
+    'atleast_1d',
+    'concatenate',
+    'exp',
+    'finfo',
+    'fmin_slsqp',
+    'inf',
+    'isfinite',
+    'linalg',
+    'old_bound_to_new',
+    'slsqp',
+    'sqrt',
+    'vstack',
+    'zeros',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.slsqp is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.slsqp` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_slsqp_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__basinhopping.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__basinhopping.py
new file mode 100644
index 00000000..b43d6810
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__basinhopping.py
@@ -0,0 +1,480 @@
+"""
+Unit tests for the basin hopping global minimization algorithm.
+"""
+import copy
+
+from numpy.testing import assert_almost_equal, assert_equal, assert_
+import pytest
+from pytest import raises as assert_raises
+import numpy as np
+from numpy import cos, sin
+
+from scipy.optimize import basinhopping, OptimizeResult
+from scipy.optimize._basinhopping import (
+    Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
+from scipy._lib._pep440 import Version
+
+
+def func1d(x):
+    f = cos(14.5 * x - 0.3) + (x + 0.2) * x
+    df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
+    return f, df
+
+
+def func2d_nograd(x):
+    f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
+    return f
+
+
+def func2d(x):
+    f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
+    df = np.zeros(2)
+    df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
+    df[1] = 2. * x[1] + 0.2
+    return f, df
+
+
+def func2d_easyderiv(x):
+    f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
+    df = np.zeros(2)
+    df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
+    df[1] = 2.0*x[0] + 4.0*x[1]
+
+    return f, df
+
+
+class MyTakeStep1(RandomDisplacement):
+    """use a copy of displace, but have it set a special parameter to
+    make sure it's actually being used."""
+    def __init__(self):
+        self.been_called = False
+        super().__init__()
+
+    def __call__(self, x):
+        self.been_called = True
+        return super().__call__(x)
+
+
+def myTakeStep2(x):
+    """redo RandomDisplacement in function form without the attribute stepsize
+    to make sure everything still works ok
+    """
+    s = 0.5
+    x += np.random.uniform(-s, s, np.shape(x))
+    return x
+
+
+class MyAcceptTest:
+    """pass a custom accept test
+
+    This does nothing but make sure it's being used and ensure all the
+    possible return values are accepted
+    """
+    def __init__(self):
+        self.been_called = False
+        self.ncalls = 0
+        self.testres = [False, 'force accept', True, np.bool_(True),
+                        np.bool_(False), [], {}, 0, 1]
+
+    def __call__(self, **kwargs):
+        self.been_called = True
+        self.ncalls += 1
+        if self.ncalls - 1 < len(self.testres):
+            return self.testres[self.ncalls - 1]
+        else:
+            return True
+
+
+class MyCallBack:
+    """pass a custom callback function
+
+    This makes sure it's being used. It also returns True after 10
+    steps to ensure that it's stopping early.
+
+    """
+    def __init__(self):
+        self.been_called = False
+        self.ncalls = 0
+
+    def __call__(self, x, f, accepted):
+        self.been_called = True
+        self.ncalls += 1
+        if self.ncalls == 10:
+            return True
+
+
+class TestBasinHopping:
+
+    def setup_method(self):
+        """ Tests setup.
+
+        Run tests based on the 1-D and 2-D functions described above.
+        """
+        self.x0 = (1.0, [1.0, 1.0])
+        self.sol = (-0.195, np.array([-0.195, -0.1]))
+
+        self.tol = 3  # number of decimal places
+
+        self.niter = 100
+        self.disp = False
+
+        # fix random seed
+        np.random.seed(1234)
+
+        self.kwargs = {"method": "L-BFGS-B", "jac": True}
+        self.kwargs_nograd = {"method": "L-BFGS-B"}
+
+    def test_TypeError(self):
+        # test the TypeErrors are raised on bad input
+        i = 1
+        # if take_step is passed, it must be callable
+        assert_raises(TypeError, basinhopping, func2d, self.x0[i],
+                      take_step=1)
+        # if accept_test is passed, it must be callable
+        assert_raises(TypeError, basinhopping, func2d, self.x0[i],
+                      accept_test=1)
+
+    def test_input_validation(self):
+        msg = 'target_accept_rate has to be in range \\(0, 1\\)'
+        with assert_raises(ValueError, match=msg):
+            basinhopping(func1d, self.x0[0], target_accept_rate=0.)
+        with assert_raises(ValueError, match=msg):
+            basinhopping(func1d, self.x0[0], target_accept_rate=1.)
+
+        msg = 'stepwise_factor has to be in range \\(0, 1\\)'
+        with assert_raises(ValueError, match=msg):
+            basinhopping(func1d, self.x0[0], stepwise_factor=0.)
+        with assert_raises(ValueError, match=msg):
+            basinhopping(func1d, self.x0[0], stepwise_factor=1.)
+
+    def test_1d_grad(self):
+        # test 1-D minimizations with gradient
+        i = 0
+        res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
+                           niter=self.niter, disp=self.disp)
+        assert_almost_equal(res.x, self.sol[i], self.tol)
+
+    def test_2d(self):
+        # test 2d minimizations with gradient
+        i = 1
+        res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
+                           niter=self.niter, disp=self.disp)
+        assert_almost_equal(res.x, self.sol[i], self.tol)
+        assert_(res.nfev > 0)
+
+    def test_njev(self):
+        # test njev is returned correctly
+        i = 1
+        minimizer_kwargs = self.kwargs.copy()
+        # L-BFGS-B doesn't use njev, but BFGS does
+        minimizer_kwargs["method"] = "BFGS"
+        res = basinhopping(func2d, self.x0[i],
+                           minimizer_kwargs=minimizer_kwargs, niter=self.niter,
+                           disp=self.disp)
+        assert_(res.nfev > 0)
+        assert_equal(res.nfev, res.njev)
+
+    def test_jac(self):
+        # test Jacobian returned
+        minimizer_kwargs = self.kwargs.copy()
+        # BFGS returns a Jacobian
+        minimizer_kwargs["method"] = "BFGS"
+
+        res = basinhopping(func2d_easyderiv, [0.0, 0.0],
+                           minimizer_kwargs=minimizer_kwargs, niter=self.niter,
+                           disp=self.disp)
+
+        assert_(hasattr(res.lowest_optimization_result, "jac"))
+
+        # in this case, the Jacobian is just [df/dx, df/dy]
+        _, jacobian = func2d_easyderiv(res.x)
+        assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
+                            self.tol)
+
+    def test_2d_nograd(self):
+        # test 2-D minimizations without gradient
+        i = 1
+        res = basinhopping(func2d_nograd, self.x0[i],
+                           minimizer_kwargs=self.kwargs_nograd,
+                           niter=self.niter, disp=self.disp)
+        assert_almost_equal(res.x, self.sol[i], self.tol)
+
+    def test_all_minimizers(self):
+        # Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA
+        # don't accept jac=True, so aren't included here.
+        i = 1
+        methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
+        minimizer_kwargs = copy.copy(self.kwargs)
+        for method in methods:
+            minimizer_kwargs["method"] = method
+            res = basinhopping(func2d, self.x0[i],
+                               minimizer_kwargs=minimizer_kwargs,
+                               niter=self.niter, disp=self.disp)
+            assert_almost_equal(res.x, self.sol[i], self.tol)
+
+    def test_all_nograd_minimizers(self):
+        # Test 2-D minimizations without gradient. Newton-CG requires jac=True,
+        # so not included here.
+        i = 1
+        methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
+                   'Nelder-Mead', 'Powell', 'COBYLA']
+        minimizer_kwargs = copy.copy(self.kwargs_nograd)
+        for method in methods:
+            minimizer_kwargs["method"] = method
+            res = basinhopping(func2d_nograd, self.x0[i],
+                               minimizer_kwargs=minimizer_kwargs,
+                               niter=self.niter, disp=self.disp)
+            tol = self.tol
+            if method == 'COBYLA':
+                tol = 2
+            assert_almost_equal(res.x, self.sol[i], decimal=tol)
+
+    def test_pass_takestep(self):
+        # test that passing a custom takestep works
+        # also test that the stepsize is being adjusted
+        takestep = MyTakeStep1()
+        initial_step_size = takestep.stepsize
+        i = 1
+        res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
+                           niter=self.niter, disp=self.disp,
+                           take_step=takestep)
+        assert_almost_equal(res.x, self.sol[i], self.tol)
+        assert_(takestep.been_called)
+        # make sure that the build in adaptive step size has been used
+        assert_(initial_step_size != takestep.stepsize)
+
+    def test_pass_simple_takestep(self):
+        # test that passing a custom takestep without attribute stepsize
+        takestep = myTakeStep2
+        i = 1
+        res = basinhopping(func2d_nograd, self.x0[i],
+                           minimizer_kwargs=self.kwargs_nograd,
+                           niter=self.niter, disp=self.disp,
+                           take_step=takestep)
+        assert_almost_equal(res.x, self.sol[i], self.tol)
+
+    def test_pass_accept_test(self):
+        # test passing a custom accept test
+        # makes sure it's being used and ensures all the possible return values
+        # are accepted.
+        accept_test = MyAcceptTest()
+        i = 1
+        # there's no point in running it more than a few steps.
+        basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
+                     niter=10, disp=self.disp, accept_test=accept_test)
+        assert_(accept_test.been_called)
+
+    def test_pass_callback(self):
+        # test passing a custom callback function
+        # This makes sure it's being used. It also returns True after 10 steps
+        # to ensure that it's stopping early.
+        callback = MyCallBack()
+        i = 1
+        # there's no point in running it more than a few steps.
+        res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
+                           niter=30, disp=self.disp, callback=callback)
+        assert_(callback.been_called)
+        assert_("callback" in res.message[0])
+        # One of the calls of MyCallBack is during BasinHoppingRunner
+        # construction, so there are only 9 remaining before MyCallBack stops
+        # the minimization.
+        assert_equal(res.nit, 9)
+
+    def test_minimizer_fail(self):
+        # test if a minimizer fails
+        i = 1
+        self.kwargs["options"] = dict(maxiter=0)
+        self.niter = 10
+        res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
+                           niter=self.niter, disp=self.disp)
+        # the number of failed minimizations should be the number of
+        # iterations + 1
+        assert_equal(res.nit + 1, res.minimization_failures)
+
+    def test_niter_zero(self):
+        # gh5915, what happens if you call basinhopping with niter=0
+        i = 0
+        basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
+                     niter=0, disp=self.disp)
+
+    def test_seed_reproducibility(self):
+        # seed should ensure reproducibility between runs
+        minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
+
+        f_1 = []
+
+        def callback(x, f, accepted):
+            f_1.append(f)
+
+        basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
+                     niter=10, callback=callback, seed=10)
+
+        f_2 = []
+
+        def callback2(x, f, accepted):
+            f_2.append(f)
+
+        basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
+                     niter=10, callback=callback2, seed=10)
+        assert_equal(np.array(f_1), np.array(f_2))
+
+    def test_random_gen(self):
+        # check that np.random.Generator can be used (numpy >= 1.17)
+        rng = np.random.default_rng(1)
+
+        minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
+
+        res1 = basinhopping(func2d, [1.0, 1.0],
+                            minimizer_kwargs=minimizer_kwargs,
+                            niter=10, seed=rng)
+
+        rng = np.random.default_rng(1)
+        res2 = basinhopping(func2d, [1.0, 1.0],
+                            minimizer_kwargs=minimizer_kwargs,
+                            niter=10, seed=rng)
+        assert_equal(res1.x, res2.x)
+
+    def test_monotonic_basin_hopping(self):
+        # test 1-D minimizations with gradient and T=0
+        i = 0
+        res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
+                           niter=self.niter, disp=self.disp, T=0)
+        assert_almost_equal(res.x, self.sol[i], self.tol)
+
+
+class Test_Storage:
+    def setup_method(self):
+        self.x0 = np.array(1)
+        self.f0 = 0
+
+        minres = OptimizeResult()
+        minres.x = self.x0
+        minres.fun = self.f0
+
+        self.storage = Storage(minres)
+
+    def test_higher_f_rejected(self):
+        new_minres = OptimizeResult()
+        new_minres.x = self.x0 + 1
+        new_minres.fun = self.f0 + 1
+
+        ret = self.storage.update(new_minres)
+        minres = self.storage.get_lowest()
+        assert_equal(self.x0, minres.x)
+        assert_equal(self.f0, minres.fun)
+        assert_(not ret)
+
+    def test_lower_f_accepted(self):
+        new_minres = OptimizeResult()
+        new_minres.x = self.x0 + 1
+        new_minres.fun = self.f0 - 1
+
+        ret = self.storage.update(new_minres)
+        minres = self.storage.get_lowest()
+        assert_(self.x0 != minres.x)
+        assert_(self.f0 != minres.fun)
+        assert_(ret)
+
+
+class Test_RandomDisplacement:
+    def setup_method(self):
+        self.stepsize = 1.0
+        self.displace = RandomDisplacement(stepsize=self.stepsize)
+        self.N = 300000
+        self.x0 = np.zeros([self.N])
+
+    def test_random(self):
+        # the mean should be 0
+        # the variance should be (2*stepsize)**2 / 12
+        # note these tests are random, they will fail from time to time
+        x = self.displace(self.x0)
+        v = (2. * self.stepsize) ** 2 / 12
+        assert_almost_equal(np.mean(x), 0., 1)
+        assert_almost_equal(np.var(x), v, 1)
+
+
+class Test_Metropolis:
+    def setup_method(self):
+        self.T = 2.
+        self.met = Metropolis(self.T)
+
+    def test_boolean_return(self):
+        # the return must be a bool, else an error will be raised in
+        # basinhopping
+        ret = self.met(f_new=0., f_old=1.)
+        assert isinstance(ret, bool)
+
+    def test_lower_f_accepted(self):
+        assert_(self.met(f_new=0., f_old=1.))
+
+    def test_KeyError(self):
+        # should raise KeyError if kwargs f_old or f_new is not passed
+        assert_raises(KeyError, self.met, f_old=1.)
+        assert_raises(KeyError, self.met, f_new=1.)
+
+    def test_accept(self):
+        # test that steps are randomly accepted for f_new > f_old
+        one_accept = False
+        one_reject = False
+        for i in range(1000):
+            if one_accept and one_reject:
+                break
+            ret = self.met(f_new=1., f_old=0.5)
+            if ret:
+                one_accept = True
+            else:
+                one_reject = True
+        assert_(one_accept)
+        assert_(one_reject)
+
+    def test_GH7495(self):
+        # an overflow in exp was producing a RuntimeWarning
+        # create own object here in case someone changes self.T
+        met = Metropolis(2)
+        with np.errstate(over='raise'):
+            met.accept_reject(0, 2000)
+
+
+class Test_AdaptiveStepsize:
+    def setup_method(self):
+        self.stepsize = 1.
+        self.ts = RandomDisplacement(stepsize=self.stepsize)
+        self.target_accept_rate = 0.5
+        self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
+                                         accept_rate=self.target_accept_rate)
+
+    def test_adaptive_increase(self):
+        # if few steps are rejected, the stepsize should increase
+        x = 0.
+        self.takestep(x)
+        self.takestep.report(False)
+        for i in range(self.takestep.interval):
+            self.takestep(x)
+            self.takestep.report(True)
+        assert_(self.ts.stepsize > self.stepsize)
+
+    def test_adaptive_decrease(self):
+        # if few steps are rejected, the stepsize should increase
+        x = 0.
+        self.takestep(x)
+        self.takestep.report(True)
+        for i in range(self.takestep.interval):
+            self.takestep(x)
+            self.takestep.report(False)
+        assert_(self.ts.stepsize < self.stepsize)
+
+    def test_all_accepted(self):
+        # test that everything works OK if all steps were accepted
+        x = 0.
+        for i in range(self.takestep.interval + 1):
+            self.takestep(x)
+            self.takestep.report(True)
+        assert_(self.ts.stepsize > self.stepsize)
+
+    def test_all_rejected(self):
+        # test that everything works OK if all steps were rejected
+        x = 0.
+        for i in range(self.takestep.interval + 1):
+            self.takestep(x)
+            self.takestep.report(False)
+        assert_(self.ts.stepsize < self.stepsize)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__differential_evolution.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__differential_evolution.py
new file mode 100644
index 00000000..fd159572
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__differential_evolution.py
@@ -0,0 +1,1485 @@
+"""
+Unit tests for the differential global minimization algorithm.
+"""
+import multiprocessing
+import platform
+
+from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,
+                                                   _ConstraintWrapper)
+from scipy.optimize import differential_evolution
+from scipy.optimize._constraints import (Bounds, NonlinearConstraint,
+                                         LinearConstraint)
+from scipy.optimize import rosen, minimize
+from scipy.sparse import csr_matrix
+from scipy import stats
+from scipy._lib._pep440 import Version
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
+                           assert_string_equal, assert_, suppress_warnings)
+from pytest import raises as assert_raises, warns
+import pytest
+
+
+class TestDifferentialEvolutionSolver:
+
+    def setup_method(self):
+        self.old_seterr = np.seterr(invalid='raise')
+        self.limits = np.array([[0., 0.],
+                                [2., 2.]])
+        self.bounds = [(0., 2.), (0., 2.)]
+
+        self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
+                                                        [(0, 100)])
+
+        # dummy_solver2 will be used to test mutation strategies
+        self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
+                                                         [(0, 1)],
+                                                         popsize=7,
+                                                         mutation=0.5)
+        # create a population that's only 7 members long
+        # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
+        population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
+        self.dummy_solver2.population = population
+
+    def teardown_method(self):
+        np.seterr(**self.old_seterr)
+
+    def quadratic(self, x):
+        return x[0]**2
+
+    def test__strategy_resolves(self):
+        # test that the correct mutation function is resolved by
+        # different requested strategy arguments
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='best1exp')
+        assert_equal(solver.strategy, 'best1exp')
+        assert_equal(solver.mutation_func.__name__, '_best1')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='best1bin')
+        assert_equal(solver.strategy, 'best1bin')
+        assert_equal(solver.mutation_func.__name__, '_best1')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='rand1bin')
+        assert_equal(solver.strategy, 'rand1bin')
+        assert_equal(solver.mutation_func.__name__, '_rand1')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='rand1exp')
+        assert_equal(solver.strategy, 'rand1exp')
+        assert_equal(solver.mutation_func.__name__, '_rand1')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='rand2exp')
+        assert_equal(solver.strategy, 'rand2exp')
+        assert_equal(solver.mutation_func.__name__, '_rand2')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='best2bin')
+        assert_equal(solver.strategy, 'best2bin')
+        assert_equal(solver.mutation_func.__name__, '_best2')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='rand2bin')
+        assert_equal(solver.strategy, 'rand2bin')
+        assert_equal(solver.mutation_func.__name__, '_rand2')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='rand2exp')
+        assert_equal(solver.strategy, 'rand2exp')
+        assert_equal(solver.mutation_func.__name__, '_rand2')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='randtobest1bin')
+        assert_equal(solver.strategy, 'randtobest1bin')
+        assert_equal(solver.mutation_func.__name__, '_randtobest1')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='randtobest1exp')
+        assert_equal(solver.strategy, 'randtobest1exp')
+        assert_equal(solver.mutation_func.__name__, '_randtobest1')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='currenttobest1bin')
+        assert_equal(solver.strategy, 'currenttobest1bin')
+        assert_equal(solver.mutation_func.__name__, '_currenttobest1')
+
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='currenttobest1exp')
+        assert_equal(solver.strategy, 'currenttobest1exp')
+        assert_equal(solver.mutation_func.__name__, '_currenttobest1')
+
+    def test__mutate1(self):
+        # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
+        result = np.array([0.05])
+        trial = self.dummy_solver2._best1((2, 3, 4, 5, 6))
+        assert_allclose(trial, result)
+
+        result = np.array([0.25])
+        trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6))
+        assert_allclose(trial, result)
+
+    def test__mutate2(self):
+        # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
+        # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
+
+        result = np.array([-0.1])
+        trial = self.dummy_solver2._best2((2, 3, 4, 5, 6))
+        assert_allclose(trial, result)
+
+        result = np.array([0.1])
+        trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6))
+        assert_allclose(trial, result)
+
+    def test__randtobest1(self):
+        # strategies randtobest/1/*
+        result = np.array([0.15])
+        trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6))
+        assert_allclose(trial, result)
+
+    def test__currenttobest1(self):
+        # strategies currenttobest/1/*
+        result = np.array([0.1])
+        trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6))
+        assert_allclose(trial, result)
+
+    def test_can_init_with_dithering(self):
+        mutation = (0.5, 1)
+        solver = DifferentialEvolutionSolver(self.quadratic,
+                                             self.bounds,
+                                             mutation=mutation)
+
+        assert_equal(solver.dither, list(mutation))
+
+    def test_invalid_mutation_values_arent_accepted(self):
+        func = rosen
+        mutation = (0.5, 3)
+        assert_raises(ValueError,
+                          DifferentialEvolutionSolver,
+                          func,
+                          self.bounds,
+                          mutation=mutation)
+
+        mutation = (-1, 1)
+        assert_raises(ValueError,
+                          DifferentialEvolutionSolver,
+                          func,
+                          self.bounds,
+                          mutation=mutation)
+
+        mutation = (0.1, np.nan)
+        assert_raises(ValueError,
+                          DifferentialEvolutionSolver,
+                          func,
+                          self.bounds,
+                          mutation=mutation)
+
+        mutation = 0.5
+        solver = DifferentialEvolutionSolver(func,
+                                             self.bounds,
+                                             mutation=mutation)
+        assert_equal(0.5, solver.scale)
+        assert_equal(None, solver.dither)
+
+    def test_invalid_functional(self):
+        def func(x):
+            return np.array([np.sum(x ** 2), np.sum(x)])
+
+        with assert_raises(
+                RuntimeError,
+                match=r"func\(x, \*args\) must return a scalar value"):
+            differential_evolution(func, [(-2, 2), (-2, 2)])
+
+    def test__scale_parameters(self):
+        trial = np.array([0.3])
+        assert_equal(30, self.dummy_solver._scale_parameters(trial))
+
+        # it should also work with the limits reversed
+        self.dummy_solver.limits = np.array([[100], [0.]])
+        assert_equal(30, self.dummy_solver._scale_parameters(trial))
+
+    def test__unscale_parameters(self):
+        trial = np.array([30])
+        assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
+
+        # it should also work with the limits reversed
+        self.dummy_solver.limits = np.array([[100], [0.]])
+        assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
+
+    def test__ensure_constraint(self):
+        trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
+        self.dummy_solver._ensure_constraint(trial)
+
+        assert_equal(trial[2], 0.9)
+        assert_(np.logical_and(trial >= 0, trial <= 1).all())
+
+    def test_differential_evolution(self):
+        # test that the Jmin of DifferentialEvolutionSolver
+        # is the same as the function evaluation
+        solver = DifferentialEvolutionSolver(
+            self.quadratic, [(-2, 2)], maxiter=1, polish=False
+        )
+        result = solver.solve()
+        assert_equal(result.fun, self.quadratic(result.x))
+
+        solver = DifferentialEvolutionSolver(
+            self.quadratic, [(-2, 2)], maxiter=1, polish=True
+        )
+        result = solver.solve()
+        assert_equal(result.fun, self.quadratic(result.x))
+
+    def test_best_solution_retrieval(self):
+        # test that the getter property method for the best solution works.
+        solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
+        result = solver.solve()
+        assert_equal(result.x, solver.x)
+
+    def test_callback_terminates(self):
+        # test that if the callback returns true, then the minimization halts
+        bounds = [(0, 2), (0, 2)]
+        expected_msg = 'callback function requested stop early by returning True'
+
+        def callback_python_true(param, convergence=0.):
+            return True
+
+        result = differential_evolution(rosen, bounds, callback=callback_python_true)
+        assert_string_equal(result.message, expected_msg)
+
+        def callback_evaluates_true(param, convergence=0.):
+            # DE should stop if bool(self.callback) is True
+            return [10]
+
+        result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)
+        assert_string_equal(result.message, expected_msg)
+
+        def callback_evaluates_false(param, convergence=0.):
+            return []
+
+        result = differential_evolution(rosen, bounds, callback=callback_evaluates_false)
+        assert result.success
+
+    def test_args_tuple_is_passed(self):
+        # test that the args tuple is passed to the cost function properly.
+        bounds = [(-10, 10)]
+        args = (1., 2., 3.)
+
+        def quadratic(x, *args):
+            if type(args) != tuple:
+                raise ValueError('args should be a tuple')
+            return args[0] + args[1] * x + args[2] * x**2.
+
+        result = differential_evolution(quadratic,
+                                        bounds,
+                                        args=args,
+                                        polish=True)
+        assert_almost_equal(result.fun, 2 / 3.)
+
+    def test_init_with_invalid_strategy(self):
+        # test that passing an invalid strategy raises ValueError
+        func = rosen
+        bounds = [(-3, 3)]
+        assert_raises(ValueError,
+                          differential_evolution,
+                          func,
+                          bounds,
+                          strategy='abc')
+
+    def test_bounds_checking(self):
+        # test that the bounds checking works
+        func = rosen
+        bounds = [(-3)]
+        assert_raises(ValueError,
+                          differential_evolution,
+                          func,
+                          bounds)
+        bounds = [(-3, 3), (3, 4, 5)]
+        assert_raises(ValueError,
+                          differential_evolution,
+                          func,
+                          bounds)
+
+        # test that we can use a new-type Bounds object
+        result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))
+        assert_almost_equal(result.x, (1., 1.))
+
+    def test_select_samples(self):
+        # select_samples should return 5 separate random numbers.
+        limits = np.arange(12., dtype='float64').reshape(2, 6)
+        bounds = list(zip(limits[0, :], limits[1, :]))
+        solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
+        candidate = 0
+        r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
+        assert_equal(
+            len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
+
+    def test_maxiter_stops_solve(self):
+        # test that if the maximum number of iterations is exceeded
+        # the solver stops.
+        solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
+        result = solver.solve()
+        assert_equal(result.success, False)
+        assert_equal(result.message,
+                        'Maximum number of iterations has been exceeded.')
+
+    def test_maxfun_stops_solve(self):
+        # test that if the maximum number of function evaluations is exceeded
+        # during initialisation the solver stops
+        solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
+                                             polish=False)
+        result = solver.solve()
+
+        assert_equal(result.nfev, 2)
+        assert_equal(result.success, False)
+        assert_equal(result.message,
+                     'Maximum number of function evaluations has '
+                     'been exceeded.')
+
+        # test that if the maximum number of function evaluations is exceeded
+        # during the actual minimisation, then the solver stops.
+        # Have to turn polishing off, as this will still occur even if maxfun
+        # is reached. For popsize=5 and len(bounds)=2, then there are only 10
+        # function evaluations during initialisation.
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             popsize=5,
+                                             polish=False,
+                                             maxfun=40)
+        result = solver.solve()
+
+        assert_equal(result.nfev, 41)
+        assert_equal(result.success, False)
+        assert_equal(result.message,
+                     'Maximum number of function evaluations has '
+                     'been exceeded.')
+
+        # now repeat for updating='deferred version
+        # 47 function evaluations is not a multiple of the population size,
+        # so maxfun is reached partway through a population evaluation.
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             popsize=5,
+                                             polish=False,
+                                             maxfun=47,
+                                             updating='deferred')
+        result = solver.solve()
+
+        assert_equal(result.nfev, 47)
+        assert_equal(result.success, False)
+        assert_equal(result.message,
+                     'Maximum number of function evaluations has '
+                     'been reached.')
+
+    def test_quadratic(self):
+        # test the quadratic function from object
+        solver = DifferentialEvolutionSolver(self.quadratic,
+                                             [(-100, 100)],
+                                             tol=0.02)
+        solver.solve()
+        assert_equal(np.argmin(solver.population_energies), 0)
+
+    def test_quadratic_from_diff_ev(self):
+        # test the quadratic function from differential_evolution function
+        differential_evolution(self.quadratic,
+                               [(-100, 100)],
+                               tol=0.02)
+
+    def test_seed_gives_repeatability(self):
+        result = differential_evolution(self.quadratic,
+                                        [(-100, 100)],
+                                        polish=False,
+                                        seed=1,
+                                        tol=0.5)
+        result2 = differential_evolution(self.quadratic,
+                                        [(-100, 100)],
+                                        polish=False,
+                                        seed=1,
+                                        tol=0.5)
+        assert_equal(result.x, result2.x)
+        assert_equal(result.nfev, result2.nfev)
+
+    def test_random_generator(self):
+        # check that np.random.Generator can be used (numpy >= 1.17)
+        # obtain a np.random.Generator object
+        rng = np.random.default_rng()
+
+        inits = ['random', 'latinhypercube', 'sobol', 'halton']
+        for init in inits:
+            differential_evolution(self.quadratic,
+                                   [(-100, 100)],
+                                   polish=False,
+                                   seed=rng,
+                                   tol=0.5,
+                                   init=init)
+
+    def test_exp_runs(self):
+        # test whether exponential mutation loop runs
+        solver = DifferentialEvolutionSolver(rosen,
+                                             self.bounds,
+                                             strategy='best1exp',
+                                             maxiter=1)
+
+        solver.solve()
+
+    def test_gh_4511_regression(self):
+        # This modification of the differential evolution docstring example
+        # uses a custom popsize that had triggered an off-by-one error.
+        # Because we do not care about solving the optimization problem in
+        # this test, we use maxiter=1 to reduce the testing time.
+        bounds = [(-5, 5), (-5, 5)]
+        # result = differential_evolution(rosen, bounds, popsize=1815,
+        #                                 maxiter=1)
+
+        # the original issue arose because of rounding error in arange, with
+        # linspace being a much better solution. 1815 is quite a large popsize
+        # to use and results in a long test time (~13s). I used the original
+        # issue to figure out the lowest number of samples that would cause
+        # this rounding error to occur, 49.
+        differential_evolution(rosen, bounds, popsize=49, maxiter=1)
+
+    def test_calculate_population_energies(self):
+        # if popsize is 3, then the overall generation has size (6,)
+        solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
+        solver._calculate_population_energies(solver.population)
+        solver._promote_lowest_energy()
+        assert_equal(np.argmin(solver.population_energies), 0)
+
+        # initial calculation of the energies should require 6 nfev.
+        assert_equal(solver._nfev, 6)
+
+    def test_iteration(self):
+        # test that DifferentialEvolutionSolver is iterable
+        # if popsize is 3, then the overall generation has size (6,)
+        solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
+                                             maxfun=12)
+        x, fun = next(solver)
+        assert_equal(np.size(x, 0), 2)
+
+        # 6 nfev are required for initial calculation of energies, 6 nfev are
+        # required for the evolution of the 6 population members.
+        assert_equal(solver._nfev, 12)
+
+        # the next generation should halt because it exceeds maxfun
+        assert_raises(StopIteration, next, solver)
+
+        # check a proper minimisation can be done by an iterable solver
+        solver = DifferentialEvolutionSolver(rosen, self.bounds)
+        _, fun_prev = next(solver)
+        for i, soln in enumerate(solver):
+            x_current, fun_current = soln
+            assert fun_prev >= fun_current
+            _, fun_prev = x_current, fun_current
+            # need to have this otherwise the solver would never stop.
+            if i == 50:
+                break
+
+    def test_convergence(self):
+        solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
+                                             polish=False)
+        solver.solve()
+        assert_(solver.convergence < 0.2)
+
+    def test_maxiter_none_GH5731(self):
+        # Pre 0.17 the previous default for maxiter and maxfun was None.
+        # the numerical defaults are now 1000 and np.inf. However, some scripts
+        # will still supply None for both of those, this will raise a TypeError
+        # in the solve method.
+        solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
+                                             maxfun=None)
+        solver.solve()
+
+    def test_population_initiation(self):
+        # test the different modes of population initiation
+
+        # init must be either 'latinhypercube' or 'random'
+        # raising ValueError is something else is passed in
+        assert_raises(ValueError,
+                      DifferentialEvolutionSolver,
+                      *(rosen, self.bounds),
+                      **{'init': 'rubbish'})
+
+        solver = DifferentialEvolutionSolver(rosen, self.bounds)
+
+        # check that population initiation:
+        # 1) resets _nfev to 0
+        # 2) all population energies are np.inf
+        solver.init_population_random()
+        assert_equal(solver._nfev, 0)
+        assert_(np.all(np.isinf(solver.population_energies)))
+
+        solver.init_population_lhs()
+        assert_equal(solver._nfev, 0)
+        assert_(np.all(np.isinf(solver.population_energies)))
+
+        solver.init_population_qmc(qmc_engine='halton')
+        assert_equal(solver._nfev, 0)
+        assert_(np.all(np.isinf(solver.population_energies)))
+
+        solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol')
+        solver.init_population_qmc(qmc_engine='sobol')
+        assert_equal(solver._nfev, 0)
+        assert_(np.all(np.isinf(solver.population_energies)))
+
+        # we should be able to initialize with our own array
+        population = np.linspace(-1, 3, 10).reshape(5, 2)
+        solver = DifferentialEvolutionSolver(rosen, self.bounds,
+                                             init=population,
+                                             strategy='best2bin',
+                                             atol=0.01, seed=1, popsize=5)
+
+        assert_equal(solver._nfev, 0)
+        assert_(np.all(np.isinf(solver.population_energies)))
+        assert_(solver.num_population_members == 5)
+        assert_(solver.population_shape == (5, 2))
+
+        # check that the population was initialized correctly
+        unscaled_population = np.clip(solver._unscale_parameters(population),
+                                      0, 1)
+        assert_almost_equal(solver.population[:5], unscaled_population)
+
+        # population values need to be clipped to bounds
+        assert_almost_equal(np.min(solver.population[:5]), 0)
+        assert_almost_equal(np.max(solver.population[:5]), 1)
+
+        # shouldn't be able to initialize with an array if it's the wrong shape
+        # this would have too many parameters
+        population = np.linspace(-1, 3, 15).reshape(5, 3)
+        assert_raises(ValueError,
+                      DifferentialEvolutionSolver,
+                      *(rosen, self.bounds),
+                      **{'init': population})
+
+        # provide an initial solution
+        # bounds are [(0, 2), (0, 2)]
+        x0 = np.random.uniform(low=0.0, high=2.0, size=2)
+        solver = DifferentialEvolutionSolver(
+            rosen, self.bounds, x0=x0
+        )
+        # parameters are scaled to unit interval
+        assert_allclose(solver.population[0], x0 / 2.0)
+
+    def test_x0(self):
+        # smoke test that checks that x0 is usable.
+        res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8])
+        assert res.success
+
+        # check what happens if some of the x0 lay outside the bounds
+        with assert_raises(ValueError):
+            differential_evolution(rosen, self.bounds, x0=[0.2, 2.1])
+
+    def test_infinite_objective_function(self):
+        # Test that there are no problems if the objective function
+        # returns inf on some runs
+        def sometimes_inf(x):
+            if x[0] < .5:
+                return np.inf
+            return x[1]
+        bounds = [(0, 1), (0, 1)]
+        differential_evolution(sometimes_inf, bounds=bounds, disp=False)
+
+    def test_deferred_updating(self):
+        # check setting of deferred updating, with default workers
+        bounds = [(0., 2.), (0., 2.)]
+        solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')
+        assert_(solver._updating == 'deferred')
+        assert_(solver._mapwrapper._mapfunc is map)
+        solver.solve()
+
+    def test_immediate_updating(self):
+        # check setting of immediate updating, with default workers
+        bounds = [(0., 2.), (0., 2.)]
+        solver = DifferentialEvolutionSolver(rosen, bounds)
+        assert_(solver._updating == 'immediate')
+
+        # should raise a UserWarning because the updating='immediate'
+        # is being overridden by the workers keyword
+        with warns(UserWarning):
+            with DifferentialEvolutionSolver(rosen, bounds, workers=2) as solver:
+                pass
+        assert_(solver._updating == 'deferred')
+
+    def test_parallel(self):
+        # smoke test for parallelization with deferred updating
+        bounds = [(0., 2.), (0., 2.)]
+        with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver(
+                rosen, bounds, updating='deferred', workers=p.map) as solver:
+            assert_(solver._mapwrapper.pool is not None)
+            assert_(solver._updating == 'deferred')
+            solver.solve()
+
+        with DifferentialEvolutionSolver(rosen, bounds, updating='deferred',
+                                         workers=2) as solver:
+            assert_(solver._mapwrapper.pool is not None)
+            assert_(solver._updating == 'deferred')
+            solver.solve()
+
+    def test_converged(self):
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])
+        solver.solve()
+        assert_(solver.converged())
+
+    def test_constraint_violation_fn(self):
+        def constr_f(x):
+            return [x[0] + x[1]]
+
+        def constr_f2(x):
+            return np.array([x[0]**2 + x[1], x[0] - x[1]])
+
+        nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
+
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc))
+
+        cv = solver._constraint_violation_fn(np.array([1.0, 1.0]))
+        assert_almost_equal(cv, 0.1)
+
+        nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc, nlc2))
+
+        # for multiple constraints the constraint violations should
+        # be concatenated.
+        xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)]
+        vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)]
+
+        for x, v in zip(xs, vs):
+            cv = solver._constraint_violation_fn(np.array(x))
+            assert_allclose(cv, np.atleast_2d(v))
+
+        # vectorized calculation of a series of solutions
+        assert_allclose(
+            solver._constraint_violation_fn(np.array(xs)), np.array(vs)
+        )
+
+        # the following line is used in _calculate_population_feasibilities.
+        # _constraint_violation_fn returns an (1, M) array when
+        # x.shape == (N,), i.e. a single solution. Therefore this list
+        # comprehension should generate (S, 1, M) array.
+        constraint_violation = np.array([solver._constraint_violation_fn(x)
+                                         for x in np.array(xs)])
+        assert constraint_violation.shape == (3, 1, 3)
+
+        # we need reasonable error messages if the constraint function doesn't
+        # return the right thing
+        def constr_f3(x):
+            # returns (S, M), rather than (M, S)
+            return constr_f2(x).T
+
+        nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8)
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc, nlc2),
+                                             vectorized=False)
+        solver.vectorized = True
+        with pytest.raises(
+                RuntimeError, match="An array returned from a Constraint"
+        ):
+            solver._constraint_violation_fn(np.array(xs))
+
+    def test_constraint_population_feasibilities(self):
+        def constr_f(x):
+            return [x[0] + x[1]]
+
+        def constr_f2(x):
+            return [x[0]**2 + x[1], x[0] - x[1]]
+
+        nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
+
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc))
+
+        # are population feasibilities correct
+        # [0.5, 0.5] corresponds to scaled values of [1., 1.]
+        feas, cv = solver._calculate_population_feasibilities(
+            np.array([[0.5, 0.5], [1., 1.]]))
+        assert_equal(feas, [False, False])
+        assert_almost_equal(cv, np.array([[0.1], [2.1]]))
+        assert cv.shape == (2, 1)
+
+        nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
+
+        for vectorize in [False, True]:
+            solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                                 constraints=(nlc, nlc2),
+                                                 vectorized=vectorize,
+                                                 updating='deferred')
+
+            feas, cv = solver._calculate_population_feasibilities(
+                np.array([[0.5, 0.5], [0.6, 0.5]]))
+            assert_equal(feas, [False, False])
+            assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))
+
+            feas, cv = solver._calculate_population_feasibilities(
+                np.array([[0.5, 0.5], [1., 1.]]))
+            assert_equal(feas, [False, False])
+            assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))
+            assert cv.shape == (2, 3)
+
+            feas, cv = solver._calculate_population_feasibilities(
+                np.array([[0.25, 0.25], [1., 1.]]))
+            assert_equal(feas, [True, False])
+            assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))
+            assert cv.shape == (2, 3)
+
+    def test_constraint_solve(self):
+        def constr_f(x):
+            return np.array([x[0] + x[1]])
+
+        nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
+
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc))
+
+        # trust-constr warns if the constraint function is linear
+        with warns(UserWarning):
+            res = solver.solve()
+
+        assert constr_f(res.x) <= 1.9
+        assert res.success
+
+    def test_impossible_constraint(self):
+        def constr_f(x):
+            return np.array([x[0] + x[1]])
+
+        nlc = NonlinearConstraint(constr_f, -np.inf, -1)
+
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc), popsize=3,
+                                             seed=1)
+
+        # a UserWarning is issued because the 'trust-constr' polishing is
+        # attempted on the least infeasible solution found.
+        with warns(UserWarning):
+            res = solver.solve()
+
+        assert res.maxcv > 0
+        assert not res.success
+
+        # test _promote_lowest_energy works when none of the population is
+        # feasible. In this case, the solution with the lowest constraint
+        # violation should be promoted.
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc), polish=False)
+        next(solver)
+        assert not solver.feasible.all()
+        assert not np.isfinite(solver.population_energies).all()
+
+        # now swap two of the entries in the population
+        l = 20
+        cv = solver.constraint_violation[0]
+
+        solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]
+        solver.population[[0, l], :] = solver.population[[l, 0], :]
+        solver.constraint_violation[[0, l], :] = (
+            solver.constraint_violation[[l, 0], :])
+
+        solver._promote_lowest_energy()
+        assert_equal(solver.constraint_violation[0], cv)
+
+    def test_accept_trial(self):
+        # _accept_trial(self, energy_trial, feasible_trial, cv_trial,
+        #               energy_orig, feasible_orig, cv_orig)
+        def constr_f(x):
+            return [x[0] + x[1]]
+        nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
+        solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
+                                             constraints=(nlc))
+        fn = solver._accept_trial
+        # both solutions are feasible, select lower energy
+        assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))
+        assert (fn(1.0, True, np.array([0.]), 0.1, True, np.array([0.]))
+               == False)
+        assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))
+
+        # trial is feasible, original is not
+        assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))
+
+        # trial and original are infeasible
+        # cv_trial have to be <= cv_original to be better
+        assert (fn(0.1, False, np.array([0.5, 0.5]),
+                  1.0, False, np.array([1., 1.0])))
+        assert (fn(0.1, False, np.array([0.5, 0.5]),
+                  1.0, False, np.array([1., 0.50])))
+        assert (fn(1.0, False, np.array([0.5, 0.5]),
+                  1.0, False, np.array([1., 0.4])) == False)
+
+    def test_constraint_wrapper(self):
+        lb = np.array([0, 20, 30])
+        ub = np.array([0.5, np.inf, 70])
+        x0 = np.array([1, 2, 3])
+        pc = _ConstraintWrapper(Bounds(lb, ub), x0)
+        assert (pc.violation(x0) > 0).any()
+        assert (pc.violation([0.25, 21, 31]) == 0).all()
+
+        # check vectorized Bounds constraint
+        xs = np.arange(1, 16).reshape(5, 3)
+        violations = []
+        for x in xs:
+            violations.append(pc.violation(x))
+        np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
+
+        x0 = np.array([1, 2, 3, 4])
+        A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
+        pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)
+        assert (pc.violation(x0) > 0).any()
+        assert (pc.violation([-10, 2, -10, 4]) == 0).all()
+
+        # check vectorized LinearConstraint, for 7 lots of parameter vectors
+        # with each parameter vector being 4 long, with 3 constraints
+        # xs is the same shape as stored in the differential evolution
+        # population, but it's sent to the violation function as (len(x), M)
+        xs = np.arange(1, 29).reshape(7, 4)
+        violations = []
+        for x in xs:
+            violations.append(pc.violation(x))
+        np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
+
+        pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),
+                                x0)
+        assert (pc.violation(x0) > 0).any()
+        assert (pc.violation([-10, 2, -10, 4]) == 0).all()
+
+        def fun(x):
+            return A.dot(x)
+
+        nonlinear = NonlinearConstraint(fun, -np.inf, 0)
+        pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])
+        assert (pc.violation(x0) > 0).any()
+        assert (pc.violation([-10, 2, -10, 4]) == 0).all()
+
+    def test_constraint_wrapper_violation(self):
+        def cons_f(x):
+            # written in vectorised form to accept an array of (N, S)
+            # returning (M, S)
+            # where N is the number of parameters,
+            # S is the number of solution vectors to be examined,
+            # and M is the number of constraint components
+            return np.array([x[0] ** 2 + x[1],
+                             x[0] ** 2 - x[1]])
+
+        nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
+        pc = _ConstraintWrapper(nlc, [0.5, 1])
+        assert np.size(pc.bounds[0]) == 2
+
+        xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)]
+        vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)]
+
+        for x, v in zip(xs, vs):
+            assert_allclose(pc.violation(x), v)
+
+        # now check that we can vectorize the constraint wrapper
+        assert_allclose(pc.violation(np.array(xs).T),
+                        np.array(vs).T)
+        assert pc.fun(np.array(xs).T).shape == (2, len(xs))
+        assert pc.violation(np.array(xs).T).shape == (2, len(xs))
+        assert pc.num_constr == 2
+        assert pc.parameter_count == 2
+
+    def test_L1(self):
+        # Lampinen ([5]) test problem 1
+
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
+            return fun
+
+        A = np.zeros((10, 14))  # 1-indexed to match reference
+        A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
+        A[2, [1, 10]] = -8, 1
+        A[3, [4, 5, 10]] = -2, -1, 1
+        A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
+        A[5, [2, 11]] = -8, 1
+        A[6, [6, 7, 11]] = -2, -1, 1
+        A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
+        A[8, [3, 12]] = -8, 1
+        A[9, [8, 9, 12]] = -2, -1, 1
+        A = A[1:, 1:]
+
+        b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])
+
+        L = LinearConstraint(A, -np.inf, b)
+
+        bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]
+
+        # using a lower popsize to speed the test up
+        res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
+                                     constraints=(L), popsize=2)
+
+        x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
+        f_opt = -15
+
+        assert_allclose(f(x_opt), f_opt)
+        assert res.success
+        assert_allclose(res.x, x_opt, atol=5e-4)
+        assert_allclose(res.fun, f_opt, atol=5e-3)
+        assert_(np.all(A@res.x <= b))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+        # now repeat the same solve, using the same overall constraints,
+        # but using a sparse matrix for the LinearConstraint instead of an
+        # array
+
+        L = LinearConstraint(csr_matrix(A), -np.inf, b)
+
+        # using a lower popsize to speed the test up
+        res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
+                                     constraints=(L), popsize=2)
+
+        assert_allclose(f(x_opt), f_opt)
+        assert res.success
+        assert_allclose(res.x, x_opt, atol=5e-4)
+        assert_allclose(res.fun, f_opt, atol=5e-3)
+        assert_(np.all(A@res.x <= b))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+        # now repeat the same solve, using the same overall constraints,
+        # but specify half the constraints in terms of LinearConstraint,
+        # and the other half by NonlinearConstraint
+        def c1(x):
+            x = np.hstack(([0], x))
+            return [2*x[2] + 2*x[3] + x[11] + x[12],
+                    -8*x[3] + x[12]]
+
+        def c2(x):
+            x = np.hstack(([0], x))
+            return -2*x[8] - x[9] + x[12]
+
+        L = LinearConstraint(A[:5, :], -np.inf, b[:5])
+        L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
+        N = NonlinearConstraint(c1, -np.inf, b[6:8])
+        N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
+        constraints = (L, N, L2, N2)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            res = differential_evolution(f, bounds, strategy='rand1bin',
+                                         seed=1234, constraints=constraints,
+                                         popsize=2)
+
+        assert_allclose(res.x, x_opt, atol=5e-4)
+        assert_allclose(res.fun, f_opt, atol=5e-3)
+        assert_(np.all(A@res.x <= b))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_L2(self):
+        # Lampinen ([5]) test problem 2
+
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +
+                   10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -
+                   8*x[7])
+            return fun
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],
+                    196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],
+                    282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],
+                    -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -
+                    5*x[6] + 11*x[7]]
+
+        N = NonlinearConstraint(c1, 0, np.inf)
+        bounds = [(-10, 10)]*7
+        constraints = (N)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            res = differential_evolution(f, bounds, strategy='rand1bin',
+                                         seed=1234, constraints=constraints)
+
+        f_opt = 680.6300599487869
+        x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,
+                 -0.6244870, 1.038131, 1.594227)
+
+        assert_allclose(f(x_opt), f_opt)
+        assert_allclose(res.fun, f_opt)
+        assert_allclose(res.x, x_opt, atol=1e-5)
+        assert res.success
+        assert_(np.all(np.array(c1(res.x)) >= 0))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_L3(self):
+        # Lampinen ([5]) test problem 3
+
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +
+                   (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +
+                   5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +
+                   (x[10] - 7)**2 + 45
+                   )
+            return fun  # maximize
+
+        A = np.zeros((4, 11))
+        A[1, [1, 2, 7, 8]] = -4, -5, 3, -9
+        A[2, [1, 2, 7, 8]] = -10, 8, 17, -2
+        A[3, [1, 2, 9, 10]] = 8, -2, -5, 2
+        A = A[1:, 1:]
+        b = np.array([-105, 0, -12])
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],
+                    -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,
+                    -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],
+                    -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,
+                    -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]
+
+        L = LinearConstraint(A, b, np.inf)
+        N = NonlinearConstraint(c1, 0, np.inf)
+        bounds = [(-10, 10)]*10
+        constraints = (L, N)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            res = differential_evolution(f, bounds, seed=1234,
+                                         constraints=constraints, popsize=3)
+
+        x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,
+                 1.430574, 1.321644, 9.828726, 8.280092, 8.375927)
+        f_opt = 24.3062091
+
+        assert_allclose(f(x_opt), f_opt, atol=1e-5)
+        assert_allclose(res.x, x_opt, atol=1e-6)
+        assert_allclose(res.fun, f_opt, atol=1e-5)
+        assert res.success
+        assert_(np.all(A @ res.x >= b))
+        assert_(np.all(np.array(c1(res.x)) >= 0))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_L4(self):
+        # Lampinen ([5]) test problem 4
+        def f(x):
+            return np.sum(x[:3])
+
+        A = np.zeros((4, 9))
+        A[1, [4, 6]] = 0.0025, 0.0025
+        A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025
+        A[3, [8, 5]] = 0.01, -0.01
+        A = A[1:, 1:]
+        b = np.array([1, 1, 1])
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,
+                    x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],
+                    x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]
+
+        L = LinearConstraint(A, -np.inf, 1)
+        N = NonlinearConstraint(c1, 0, np.inf)
+
+        bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5
+        constraints = (L, N)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            res = differential_evolution(f, bounds, strategy='rand1bin',
+                                     seed=1234, constraints=constraints,
+                                     popsize=3)
+
+        f_opt = 7049.248
+
+        x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,
+                217.9823, 286.416528, 395.601172]
+
+        assert_allclose(f(x_opt), f_opt, atol=0.001)
+        assert_allclose(res.fun, f_opt, atol=0.001)
+
+        # use higher tol here for 32-bit Windows, see gh-11693
+        if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):
+            assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)
+        else:
+            # tolerance determined from macOS + MKL failure, see gh-12701
+            assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024)
+
+        assert res.success
+        assert_(np.all(A @ res.x <= b))
+        assert_(np.all(np.array(c1(res.x)) >= 0))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_L5(self):
+        # Lampinen ([5]) test problem 5
+
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
+                   (x[1]**3*(x[1]+x[2])))
+            return -fun  # maximize
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [x[1]**2 - x[2] + 1,
+                    1 - x[1] + (x[2]-4)**2]
+
+        N = NonlinearConstraint(c1, -np.inf, 0)
+        bounds = [(0, 10)]*2
+        constraints = (N)
+
+        res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
+                                     constraints=constraints)
+
+        x_opt = (1.22797135, 4.24537337)
+        f_opt = -0.095825
+        assert_allclose(f(x_opt), f_opt, atol=2e-5)
+        assert_allclose(res.fun, f_opt, atol=1e-4)
+        assert res.success
+        assert_(np.all(np.array(c1(res.x)) <= 0))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_L6(self):
+        # Lampinen ([5]) test problem 6
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            fun = (x[1]-10)**3 + (x[2] - 20)**3
+            return fun
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
+                    -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]
+
+        N = NonlinearConstraint(c1, 0, np.inf)
+        bounds = [(13, 100), (0, 100)]
+        constraints = (N)
+        res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
+                                     constraints=constraints, tol=1e-7)
+        x_opt = (14.095, 0.84296)
+        f_opt = -6961.814744
+
+        assert_allclose(f(x_opt), f_opt, atol=1e-6)
+        assert_allclose(res.fun, f_opt, atol=0.001)
+        assert_allclose(res.x, x_opt, atol=1e-4)
+        assert res.success
+        assert_(np.all(np.array(c1(res.x)) >= 0))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_L7(self):
+        # Lampinen ([5]) test problem 7
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +
+                   37.293239*x[1] - 40792.141)
+            return fun
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [
+                    85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -
+                    0.0022053*x[3]*x[5],
+
+                    80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +
+                    0.0021813*x[3]**2,
+
+                    9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +
+                    0.0019085*x[3]*x[4]
+                    ]
+
+        N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])
+
+        bounds = [(78, 102), (33, 45)] + [(27, 45)]*3
+        constraints = (N)
+
+        res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
+                                     constraints=constraints)
+
+        # using our best solution, rather than Lampinen/Koziel. Koziel solution
+        # doesn't satisfy constraints, Lampinen f_opt just plain wrong.
+        x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,
+                 36.77579979]
+
+        f_opt = -30665.537578
+
+        assert_allclose(f(x_opt), f_opt)
+        assert_allclose(res.x, x_opt, atol=1e-3)
+        assert_allclose(res.fun, f_opt, atol=1e-3)
+
+        assert res.success
+        assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))
+        assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    @pytest.mark.slow
+    @pytest.mark.xfail(platform.machine() == 'ppc64le',
+                       reason="fails on ppc64le")
+    def test_L8(self):
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3
+            return fun
+
+        A = np.zeros((3, 5))
+        A[1, [4, 3]] = 1, -1
+        A[2, [3, 4]] = 1, -1
+        A = A[1:, 1:]
+        b = np.array([-.55, -.55])
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [
+                    1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +
+                    894.8 - x[1],
+                    1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +
+                    894.8 - x[2],
+                    1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +
+                    1294.8
+                    ]
+        L = LinearConstraint(A, b, np.inf)
+        N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))
+
+        bounds = [(0, 1200)]*2+[(-.55, .55)]*2
+        constraints = (L, N)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            # original Lampinen test was with rand1bin, but that takes a
+            # huge amount of CPU time. Changing strategy to best1bin speeds
+            # things up a lot
+            res = differential_evolution(f, bounds, strategy='best1bin',
+                                         seed=1234, constraints=constraints,
+                                         maxiter=5000)
+
+        x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)
+        f_opt = 5126.4981
+
+        assert_allclose(f(x_opt), f_opt, atol=1e-3)
+        assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)
+        assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)
+        assert_allclose(res.fun, f_opt, atol=2e-2)
+        assert res.success
+        assert_(np.all(A@res.x >= b))
+        assert_(np.all(np.array(c1(res.x)) >= -0.001))
+        assert_(np.all(np.array(c1(res.x)) <= 0.001))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_L9(self):
+        # Lampinen ([5]) test problem 9
+
+        def f(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return x[1]**2 + (x[2]-1)**2
+
+        def c1(x):
+            x = np.hstack(([0], x))  # 1-indexed to match reference
+            return [x[2] - x[1]**2]
+
+        N = NonlinearConstraint(c1, [-.001], [0.001])
+
+        bounds = [(-1, 1)]*2
+        constraints = (N)
+        res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
+                                     constraints=constraints)
+
+        x_opt = [np.sqrt(2)/2, 0.5]
+        f_opt = 0.75
+
+        assert_allclose(f(x_opt), f_opt)
+        assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
+        assert_allclose(res.fun, f_opt, atol=1e-3)
+        assert res.success
+        assert_(np.all(np.array(c1(res.x)) >= -0.001))
+        assert_(np.all(np.array(c1(res.x)) <= 0.001))
+        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
+        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
+
+    def test_integrality(self):
+        # test fitting discrete distribution to data
+        rng = np.random.default_rng(6519843218105)
+        dist = stats.nbinom
+        shapes = (5, 0.5)
+        x = dist.rvs(*shapes, size=10000, random_state=rng)
+
+        def func(p, *args):
+            dist, x = args
+            # negative log-likelihood function
+            ll = -np.log(dist.pmf(x, *p)).sum(axis=-1)
+            if np.isnan(ll):  # occurs when x is outside of support
+                ll = np.inf  # we don't want that
+            return ll
+
+        integrality = [True, False]
+        bounds = [(1, 18), (0, 0.95)]
+
+        res = differential_evolution(func, bounds, args=(dist, x),
+                                     integrality=integrality, polish=False,
+                                     seed=rng)
+        # tolerance has to be fairly relaxed for the second parameter
+        # because we're fitting a distribution to random variates.
+        assert res.x[0] == 5
+        assert_allclose(res.x, shapes, rtol=0.02)
+
+        # check that we can still use integrality constraints with polishing
+        res2 = differential_evolution(func, bounds, args=(dist, x),
+                                      integrality=integrality, polish=True,
+                                      seed=rng)
+
+        def func2(p, *args):
+            n, dist, x = args
+            return func(np.array([n, p[0]]), dist, x)
+
+        # compare the DE derived solution to an LBFGSB solution (that doesn't
+        # have to find the integral values). Note we're setting x0 to be the
+        # output from the first DE result, thereby making the polishing step
+        # and this minimisation pretty much equivalent.
+        LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x),
+                          bounds=[(0, 0.95)])
+        assert_allclose(res2.x[1], LBFGSB.x)
+        assert res2.fun <= res.fun
+
+    def test_integrality_limits(self):
+        def f(x):
+            return x
+
+        integrality = [True, False, True]
+        bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)]
+
+        # no integrality constraints
+        solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
+                                             integrality=False)
+        assert_allclose(solver.limits[0], [0.2, 0.9, 3.3])
+        assert_allclose(solver.limits[1], [1.1, 2.2, 4.9])
+
+        # with integrality constraints
+        solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
+                                             integrality=integrality)
+        assert_allclose(solver.limits[0], [0.5, 0.9, 3.5])
+        assert_allclose(solver.limits[1], [1.5, 2.2, 4.5])
+        assert_equal(solver.integrality, [True, False, True])
+        assert solver.polish is False
+
+        bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)]
+        solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
+                                             integrality=integrality)
+        assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5])
+        assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5])
+
+        # A lower bound of -1.2 is converted to
+        # np.nextafter(np.ceil(-1.2) - 0.5, np.inf)
+        # with a similar process to the upper bound. Check that the
+        # conversions work
+        assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0])
+        assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0])
+
+        bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)]
+        solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
+                                             integrality=integrality)
+        assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5])
+        assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5])
+
+        bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)]
+        with pytest.raises(ValueError, match='One of the integrality'):
+            DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
+                                        integrality=integrality)
+
+    def test_vectorized(self):
+        def quadratic(x):
+            return np.sum(x**2)
+
+        def quadratic_vec(x):
+            return np.sum(x**2, axis=0)
+
+        # A vectorized function needs to accept (len(x), S) and return (S,)
+        with pytest.raises(RuntimeError, match='The vectorized function'):
+            differential_evolution(quadratic, self.bounds,
+                                   vectorized=True, updating='deferred')
+
+        # vectorized overrides the updating keyword, check for warning
+        with warns(UserWarning, match="differential_evolution: the 'vector"):
+            differential_evolution(quadratic_vec, self.bounds,
+                                   vectorized=True)
+
+        # vectorized defers to the workers keyword, check for warning
+        with warns(UserWarning, match="differential_evolution: the 'workers"):
+            differential_evolution(quadratic_vec, self.bounds,
+                                   vectorized=True, workers=map,
+                                   updating='deferred')
+
+        ncalls = [0]
+
+        def rosen_vec(x):
+            ncalls[0] += 1
+            return rosen(x)
+
+        bounds = [(0, 10), (0, 10)]
+        res1 = differential_evolution(rosen, bounds, updating='deferred',
+                                      seed=1)
+        res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
+                                      updating='deferred', seed=1)
+
+        # the two minimisation runs should be functionally equivalent
+        assert_allclose(res1.x, res2.x)
+        assert ncalls[0] == res2.nfev
+        assert res1.nit == res2.nit
+
+    def test_vectorized_constraints(self):
+        def constr_f(x):
+            return np.array([x[0] + x[1]])
+
+        def constr_f2(x):
+            return np.array([x[0]**2 + x[1], x[0] - x[1]])
+
+        nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9)
+        nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0))
+
+        def rosen_vec(x):
+            # accept an (len(x0), S) array, returning a (S,) array
+            v = 100 * (x[1:] - x[:-1]**2.0)**2.0
+            v += (1 - x[:-1])**2.0
+            return np.squeeze(v)
+
+        bounds = [(0, 10), (0, 10)]
+
+        res1 = differential_evolution(rosen, bounds, updating='deferred',
+                                      seed=1, constraints=[nlc1, nlc2],
+                                      polish=False)
+        res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
+                                      updating='deferred', seed=1,
+                                      constraints=[nlc1, nlc2],
+                                      polish=False)
+        # the two minimisation runs should be functionally equivalent
+        assert_allclose(res1.x, res2.x)
+
+    def test_constraint_violation_error_message(self):
+
+        def func(x):
+            return np.cos(x[0]) + np.sin(x[1])
+
+        # Intentionally infeasible constraints.
+        c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf)
+        c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0)
+
+        result = differential_evolution(func,
+                                        bounds=[(-1, 2), (-1, 1)],
+                                        constraints=[c0, c1],
+                                        maxiter=10,
+                                        polish=False,
+                                        seed=864197532)
+        assert result.success is False
+        # The numerical value in the error message might be sensitive to
+        # changes in the implementation.  It can be updated if the code is
+        # changed.  The essential part of the test is that there is a number
+        # after the '=', so if necessary, the text could be reduced to, say,
+        # "MAXCV = 0.".
+        assert "MAXCV = 0.404" in result.message
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__dual_annealing.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__dual_annealing.py
new file mode 100644
index 00000000..f607bcd2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__dual_annealing.py
@@ -0,0 +1,360 @@
+# Dual annealing unit tests implementation.
+# Copyright (c) 2018 Sylvain Gubian ,
+# Yang Xiang 
+# Author: Sylvain Gubian, PMP S.A.
+"""
+Unit tests for the dual annealing global optimizer
+"""
+from scipy.optimize import dual_annealing, Bounds
+
+from scipy.optimize._dual_annealing import EnergyState
+from scipy.optimize._dual_annealing import LocalSearchWrapper
+from scipy.optimize._dual_annealing import ObjectiveFunWrapper
+from scipy.optimize._dual_annealing import StrategyChain
+from scipy.optimize._dual_annealing import VisitingDistribution
+from scipy.optimize import rosen, rosen_der
+import pytest
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose, assert_array_less
+from pytest import raises as assert_raises
+from scipy._lib._util import check_random_state
+from scipy._lib._pep440 import Version
+
+
+class TestDualAnnealing:
+
+    def setup_method(self):
+        # A function that returns always infinity for initialization tests
+        self.weirdfunc = lambda x: np.inf
+        # 2-D bounds for testing function
+        self.ld_bounds = [(-5.12, 5.12)] * 2
+        # 4-D bounds for testing function
+        self.hd_bounds = self.ld_bounds * 4
+        # Number of values to be generated for testing visit function
+        self.nbtestvalues = 5000
+        self.high_temperature = 5230
+        self.low_temperature = 0.1
+        self.qv = 2.62
+        self.seed = 1234
+        self.rs = check_random_state(self.seed)
+        self.nb_fun_call = 0
+        self.ngev = 0
+
+    def callback(self, x, f, context):
+        # For testing callback mechanism. Should stop for e <= 1 as
+        # the callback function returns True
+        if f <= 1.0:
+            return True
+
+    def func(self, x, args=()):
+        # Using Rastrigin function for performing tests
+        if args:
+            shift = args
+        else:
+            shift = 0
+        y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * (
+            x - shift))) + 10 * np.size(x) + shift
+        self.nb_fun_call += 1
+        return y
+
+    def rosen_der_wrapper(self, x, args=()):
+        self.ngev += 1
+        return rosen_der(x, *args)
+
+    # FIXME: there are some discontinuities in behaviour as a function of `qv`,
+    #        this needs investigating - see gh-12384
+    @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9])
+    def test_visiting_stepping(self, qv):
+        lu = list(zip(*self.ld_bounds))
+        lower = np.array(lu[0])
+        upper = np.array(lu[1])
+        dim = lower.size
+        vd = VisitingDistribution(lower, upper, qv, self.rs)
+        values = np.zeros(dim)
+        x_step_low = vd.visiting(values, 0, self.high_temperature)
+        # Make sure that only the first component is changed
+        assert_equal(np.not_equal(x_step_low, 0), True)
+        values = np.zeros(dim)
+        x_step_high = vd.visiting(values, dim, self.high_temperature)
+        # Make sure that component other than at dim has changed
+        assert_equal(np.not_equal(x_step_high[0], 0), True)
+
+    @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9])
+    def test_visiting_dist_high_temperature(self, qv):
+        lu = list(zip(*self.ld_bounds))
+        lower = np.array(lu[0])
+        upper = np.array(lu[1])
+        vd = VisitingDistribution(lower, upper, qv, self.rs)
+        # values = np.zeros(self.nbtestvalues)
+        # for i in np.arange(self.nbtestvalues):
+        #     values[i] = vd.visit_fn(self.high_temperature)
+        values = vd.visit_fn(self.high_temperature, self.nbtestvalues)
+
+        # Visiting distribution is a distorted version of Cauchy-Lorentz
+        # distribution, and as no 1st and higher moments (no mean defined,
+        # no variance defined).
+        # Check that big tails values are generated
+        assert_array_less(np.min(values), 1e-10)
+        assert_array_less(1e+10, np.max(values))
+
+    def test_reset(self):
+        owf = ObjectiveFunWrapper(self.weirdfunc)
+        lu = list(zip(*self.ld_bounds))
+        lower = np.array(lu[0])
+        upper = np.array(lu[1])
+        es = EnergyState(lower, upper)
+        assert_raises(ValueError, es.reset, owf, check_random_state(None))
+
+    def test_low_dim(self):
+        ret = dual_annealing(
+            self.func, self.ld_bounds, seed=self.seed)
+        assert_allclose(ret.fun, 0., atol=1e-12)
+        assert ret.success
+
+    def test_high_dim(self):
+        ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed)
+        assert_allclose(ret.fun, 0., atol=1e-12)
+        assert ret.success
+
+    def test_low_dim_no_ls(self):
+        ret = dual_annealing(self.func, self.ld_bounds,
+                             no_local_search=True, seed=self.seed)
+        assert_allclose(ret.fun, 0., atol=1e-4)
+
+    def test_high_dim_no_ls(self):
+        ret = dual_annealing(self.func, self.hd_bounds,
+                             no_local_search=True, seed=self.seed)
+        assert_allclose(ret.fun, 0., atol=1e-4)
+
+    def test_nb_fun_call(self):
+        ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
+        assert_equal(self.nb_fun_call, ret.nfev)
+
+    def test_nb_fun_call_no_ls(self):
+        ret = dual_annealing(self.func, self.ld_bounds,
+                             no_local_search=True, seed=self.seed)
+        assert_equal(self.nb_fun_call, ret.nfev)
+
+    def test_max_reinit(self):
+        assert_raises(ValueError, dual_annealing, self.weirdfunc,
+                      self.ld_bounds)
+
+    def test_reproduce(self):
+        res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
+        res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
+        res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
+        # If we have reproducible results, x components found has to
+        # be exactly the same, which is not the case with no seeding
+        assert_equal(res1.x, res2.x)
+        assert_equal(res1.x, res3.x)
+
+    def test_rand_gen(self):
+        # check that np.random.Generator can be used (numpy >= 1.17)
+        # obtain a np.random.Generator object
+        rng = np.random.default_rng(1)
+
+        res1 = dual_annealing(self.func, self.ld_bounds, seed=rng)
+        # seed again
+        rng = np.random.default_rng(1)
+        res2 = dual_annealing(self.func, self.ld_bounds, seed=rng)
+        # If we have reproducible results, x components found has to
+        # be exactly the same, which is not the case with no seeding
+        assert_equal(res1.x, res2.x)
+
+    def test_bounds_integrity(self):
+        wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)]
+        assert_raises(ValueError, dual_annealing, self.func,
+                      wrong_bounds)
+
+    def test_bound_validity(self):
+        invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)]
+        assert_raises(ValueError, dual_annealing, self.func,
+                      invalid_bounds)
+        invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)]
+        assert_raises(ValueError, dual_annealing, self.func,
+                      invalid_bounds)
+        invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)]
+        assert_raises(ValueError, dual_annealing, self.func,
+                      invalid_bounds)
+
+    def test_deprecated_local_search_options_bounds(self):
+        func = lambda x: np.sum((x-5) * (x-1))
+        bounds = list(zip([-6, -5], [6, 5]))
+        # Test bounds can be passed (see gh-10831)
+
+        with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
+            dual_annealing(
+                func,
+                bounds=bounds,
+                minimizer_kwargs={"method": "CG", "bounds": bounds})
+
+    def test_minimizer_kwargs_bounds(self):
+        func = lambda x: np.sum((x-5) * (x-1))
+        bounds = list(zip([-6, -5], [6, 5]))
+        # Test bounds can be passed (see gh-10831)
+        dual_annealing(
+            func,
+            bounds=bounds,
+            minimizer_kwargs={"method": "SLSQP", "bounds": bounds})
+
+        with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
+            dual_annealing(
+                func,
+                bounds=bounds,
+                minimizer_kwargs={"method": "CG", "bounds": bounds})
+
+    def test_max_fun_ls(self):
+        ret = dual_annealing(self.func, self.ld_bounds, maxfun=100,
+                             seed=self.seed)
+
+        ls_max_iter = min(max(
+            len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO,
+            LocalSearchWrapper.LS_MAXITER_MIN),
+            LocalSearchWrapper.LS_MAXITER_MAX)
+        assert ret.nfev <= 100 + ls_max_iter
+        assert not ret.success
+
+    def test_max_fun_no_ls(self):
+        ret = dual_annealing(self.func, self.ld_bounds,
+                             no_local_search=True, maxfun=500, seed=self.seed)
+        assert ret.nfev <= 500
+        assert not ret.success
+
+    def test_maxiter(self):
+        ret = dual_annealing(self.func, self.ld_bounds, maxiter=700,
+                             seed=self.seed)
+        assert ret.nit <= 700
+
+    # Testing that args are passed correctly for dual_annealing
+    def test_fun_args_ls(self):
+        ret = dual_annealing(self.func, self.ld_bounds,
+                             args=((3.14159,)), seed=self.seed)
+        assert_allclose(ret.fun, 3.14159, atol=1e-6)
+
+    # Testing that args are passed correctly for pure simulated annealing
+    def test_fun_args_no_ls(self):
+        ret = dual_annealing(self.func, self.ld_bounds,
+                             args=((3.14159, )), no_local_search=True,
+                             seed=self.seed)
+        assert_allclose(ret.fun, 3.14159, atol=1e-4)
+
+    def test_callback_stop(self):
+        # Testing that callback make the algorithm stop for
+        # fun value <= 1.0 (see callback method)
+        ret = dual_annealing(self.func, self.ld_bounds,
+                             callback=self.callback, seed=self.seed)
+        assert ret.fun <= 1.0
+        assert 'stop early' in ret.message[0]
+        assert not ret.success
+
+    @pytest.mark.parametrize('method, atol', [
+        ('Nelder-Mead', 2e-5),
+        ('COBYLA', 1e-5),
+        ('Powell', 1e-8),
+        ('CG', 1e-8),
+        ('BFGS', 1e-8),
+        ('TNC', 1e-8),
+        ('SLSQP', 2e-7),
+    ])
+    def test_multi_ls_minimizer(self, method, atol):
+        ret = dual_annealing(self.func, self.ld_bounds,
+                             minimizer_kwargs=dict(method=method),
+                             seed=self.seed)
+        assert_allclose(ret.fun, 0., atol=atol)
+
+    def test_wrong_restart_temp(self):
+        assert_raises(ValueError, dual_annealing, self.func,
+                      self.ld_bounds, restart_temp_ratio=1)
+        assert_raises(ValueError, dual_annealing, self.func,
+                      self.ld_bounds, restart_temp_ratio=0)
+
+    def test_gradient_gnev(self):
+        minimizer_opts = {
+            'jac': self.rosen_der_wrapper,
+        }
+        ret = dual_annealing(rosen, self.ld_bounds,
+                             minimizer_kwargs=minimizer_opts,
+                             seed=self.seed)
+        assert ret.njev == self.ngev
+
+    def test_from_docstring(self):
+        func = lambda x: np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
+        lw = [-5.12] * 10
+        up = [5.12] * 10
+        ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
+        assert_allclose(ret.x,
+                        [-4.26437714e-09, -3.91699361e-09, -1.86149218e-09,
+                         -3.97165720e-09, -6.29151648e-09, -6.53145322e-09,
+                         -3.93616815e-09, -6.55623025e-09, -6.05775280e-09,
+                         -5.00668935e-09], atol=4e-8)
+        assert_allclose(ret.fun, 0.000000, atol=5e-13)
+
+    @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [
+        (0, 100, 1000, 1.0097587941791923),
+        (0, 2, 1000, 1.2599210498948732),
+        (10, 100, 878, 0.8786035869128718),
+        (10, 60, 695, 0.6812920690579612),
+        (2, 100, 990, 0.9897404249173424),
+    ])
+    def test_accept_reject_probabilistic(
+            self, new_e, temp_step, accepted, accept_rate):
+        # Test accepts unconditionally with e < current_energy and
+        # probabilistically with e > current_energy
+
+        rs = check_random_state(123)
+
+        count_accepted = 0
+        iterations = 1000
+
+        accept_param = -5
+        current_energy = 1
+        for _ in range(iterations):
+            energy_state = EnergyState(lower=None, upper=None)
+            # Set energy state with current_energy, any location.
+            energy_state.update_current(current_energy, [0])
+
+            chain = StrategyChain(
+                accept_param, None, None, None, rs, energy_state)
+            # Normally this is set in run()
+            chain.temperature_step = temp_step
+
+            # Check if update is accepted.
+            chain.accept_reject(j=1, e=new_e, x_visit=[2])
+            if energy_state.current_energy == new_e:
+                count_accepted += 1
+
+        assert count_accepted == accepted
+
+        # Check accept rate
+        pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step
+        rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param))
+
+        assert_allclose(rate, accept_rate)
+
+    def test_bounds_class(self):
+        # test that result does not depend on the bounds type
+        def func(x):
+            f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
+            return f
+        lw = [-5.12] * 5
+        up = [5.12] * 5
+
+        # Unbounded global minimum is all zeros. Most bounds below will force
+        # a DV away from unbounded minimum and be active at solution.
+        up[0] = -2.0
+        up[1] = -1.0
+        lw[3] = 1.0
+        lw[4] = 2.0
+
+        # run optimizations
+        bounds = Bounds(lw, up)
+        ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234)
+
+        bounds_old = list(zip(lw, up))
+        ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234)
+
+        # test that found minima, function evaluations and iterations match
+        assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8)
+        assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7)
+        assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9)
+        assert ret_bounds_list.nfev == ret_bounds_class.nfev
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__linprog_clean_inputs.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__linprog_clean_inputs.py
new file mode 100644
index 00000000..784906bd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__linprog_clean_inputs.py
@@ -0,0 +1,297 @@
+"""
+Unit test for Linear Programming via Simplex Algorithm.
+"""
+import numpy as np
+from numpy.testing import assert_, assert_allclose, assert_equal
+from pytest import raises as assert_raises
+from scipy.optimize._linprog_util import _clean_inputs, _LPProblem
+from copy import deepcopy
+from datetime import date
+
+
+def test_aliasing():
+    """
+    Test for ensuring that no objects referred to by `lp` attributes,
+    `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified
+    by `_clean_inputs` as a side effect.
+    """
+    lp = _LPProblem(
+        c=1,
+        A_ub=[[1]],
+        b_ub=[1],
+        A_eq=[[1]],
+        b_eq=[1],
+        bounds=(-np.inf, np.inf)
+    )
+    lp_copy = deepcopy(lp)
+
+    _clean_inputs(lp)
+
+    assert_(lp.c == lp_copy.c, "c modified by _clean_inputs")
+    assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs")
+    assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs")
+    assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs")
+    assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs")
+    assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
+
+
+def test_aliasing2():
+    """
+    Similar purpose as `test_aliasing` above.
+    """
+    lp = _LPProblem(
+        c=np.array([1, 1]),
+        A_ub=np.array([[1, 1], [2, 2]]),
+        b_ub=np.array([[1], [1]]),
+        A_eq=np.array([[1, 1]]),
+        b_eq=np.array([1]),
+        bounds=[(-np.inf, np.inf), (None, 1)]
+    )
+    lp_copy = deepcopy(lp)
+
+    _clean_inputs(lp)
+
+    assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs")
+    assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs")
+    assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs")
+    assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs")
+    assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs")
+    assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
+
+
+def test_missing_inputs():
+    c = [1, 2]
+    A_ub = np.array([[1, 1], [2, 2]])
+    b_ub = np.array([1, 1])
+    A_eq = np.array([[1, 1], [2, 2]])
+    b_eq = np.array([1, 1])
+
+    assert_raises(TypeError, _clean_inputs)
+    assert_raises(TypeError, _clean_inputs, _LPProblem(c=None))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq))
+
+
+def test_too_many_dimensions():
+    cb = [1, 2, 3, 4]
+    A = np.random.rand(4, 4)
+    bad2D = [[1, 2], [3, 4]]
+    bad3D = np.random.rand(4, 4, 4)
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D))
+
+
+def test_too_few_dimensions():
+    bad = np.random.rand(4, 4).ravel()
+    cb = np.random.rand(4)
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb))
+
+
+def test_inconsistent_dimensions():
+    m = 2
+    n = 4
+    c = [1, 2, 3, 4]
+
+    Agood = np.random.rand(m, n)
+    Abad = np.random.rand(m, n + 1)
+    bgood = np.random.rand(m)
+    bbad = np.random.rand(m + 1)
+    boundsbad = [(0, 1)] * (n + 1)
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad))
+    assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]]))
+
+
+def test_type_errors():
+    lp = _LPProblem(
+        c=[1, 2],
+        A_ub=np.array([[1, 1], [2, 2]]),
+        b_ub=np.array([1, 1]),
+        A_eq=np.array([[1, 1], [2, 2]]),
+        b_eq=np.array([1, 1]),
+        bounds=[(0, 1)]
+    )
+    bad = "hello"
+
+    assert_raises(TypeError, _clean_inputs, lp._replace(c=bad))
+    assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad))
+    assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad))
+    assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad))
+    assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad))
+
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi"))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")]))
+    assert_raises(TypeError, _clean_inputs, lp._replace(bounds=[(1, date(2020, 2, 29))]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]]))
+
+
+def test_non_finite_errors():
+    lp = _LPProblem(
+        c=[1, 2],
+        A_ub=np.array([[1, 1], [2, 2]]),
+        b_ub=np.array([1, 1]),
+        A_eq=np.array([[1, 1], [2, 2]]),
+        b_eq=np.array([1, 1]),
+        bounds=[(0, 1)]
+    )
+    assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0]))
+
+    assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan]))
+
+
+def test__clean_inputs1():
+    lp = _LPProblem(
+        c=[1, 2],
+        A_ub=[[1, 1], [2, 2]],
+        b_ub=[1, 1],
+        A_eq=[[1, 1], [2, 2]],
+        b_eq=[1, 1],
+        bounds=None
+    )
+
+    lp_cleaned = _clean_inputs(lp)
+
+    assert_allclose(lp_cleaned.c, np.array(lp.c))
+    assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
+    assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
+    assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
+    assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
+    assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
+
+    assert_(lp_cleaned.c.shape == (2,), "")
+    assert_(lp_cleaned.A_ub.shape == (2, 2), "")
+    assert_(lp_cleaned.b_ub.shape == (2,), "")
+    assert_(lp_cleaned.A_eq.shape == (2, 2), "")
+    assert_(lp_cleaned.b_eq.shape == (2,), "")
+
+
+def test__clean_inputs2():
+    lp = _LPProblem(
+        c=1,
+        A_ub=[[1]],
+        b_ub=1,
+        A_eq=[[1]],
+        b_eq=1,
+        bounds=(0, 1)
+    )
+
+    lp_cleaned = _clean_inputs(lp)
+
+    assert_allclose(lp_cleaned.c, np.array(lp.c))
+    assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
+    assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
+    assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
+    assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
+    assert_equal(lp_cleaned.bounds, [(0, 1)])
+
+    assert_(lp_cleaned.c.shape == (1,), "")
+    assert_(lp_cleaned.A_ub.shape == (1, 1), "")
+    assert_(lp_cleaned.b_ub.shape == (1,), "")
+    assert_(lp_cleaned.A_eq.shape == (1, 1), "")
+    assert_(lp_cleaned.b_eq.shape == (1,), "")
+
+
+def test__clean_inputs3():
+    lp = _LPProblem(
+        c=[[1, 2]],
+        A_ub=np.random.rand(2, 2),
+        b_ub=[[1], [2]],
+        A_eq=np.random.rand(2, 2),
+        b_eq=[[1], [2]],
+        bounds=[(0, 1)]
+    )
+
+    lp_cleaned = _clean_inputs(lp)
+
+    assert_allclose(lp_cleaned.c, np.array([1, 2]))
+    assert_allclose(lp_cleaned.b_ub, np.array([1, 2]))
+    assert_allclose(lp_cleaned.b_eq, np.array([1, 2]))
+    assert_equal(lp_cleaned.bounds, [(0, 1)] * 2)
+
+    assert_(lp_cleaned.c.shape == (2,), "")
+    assert_(lp_cleaned.b_ub.shape == (2,), "")
+    assert_(lp_cleaned.b_eq.shape == (2,), "")
+
+
+def test_bad_bounds():
+    lp = _LPProblem(c=[1, 2])
+
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2)))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2, 2)]))
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2), (1, 2)]))
+
+    lp = _LPProblem(c=[1, 2, 3, 4])
+
+    assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)]))
+
+
+def test_good_bounds():
+    lp = _LPProblem(c=[1, 2])
+
+    lp_cleaned = _clean_inputs(lp)  # lp.bounds is None by default
+    assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[]))
+    assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[[]]))
+    assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
+    assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
+    assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
+    assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
+    assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)]))
+    assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2)
+
+    lp = _LPProblem(c=[1, 2, 3, 4])
+
+    lp_cleaned = _clean_inputs(lp)  # lp.bounds is None by default
+    assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
+    assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
+    assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
+    assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
+    assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4)
+
+    lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None), (None, np.inf), (-np.inf, np.inf)]))
+    assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__numdiff.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__numdiff.py
new file mode 100644
index 00000000..a2ff2e46
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__numdiff.py
@@ -0,0 +1,813 @@
+import math
+from itertools import product
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, assert_
+from pytest import raises as assert_raises
+
+from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
+
+from scipy.optimize._numdiff import (
+    _adjust_scheme_to_bounds, approx_derivative, check_derivative,
+    group_columns, _eps_for_method, _compute_absolute_step)
+
+
+def test_group_columns():
+    structure = [
+        [1, 1, 0, 0, 0, 0],
+        [1, 1, 1, 0, 0, 0],
+        [0, 1, 1, 1, 0, 0],
+        [0, 0, 1, 1, 1, 0],
+        [0, 0, 0, 1, 1, 1],
+        [0, 0, 0, 0, 1, 1],
+        [0, 0, 0, 0, 0, 0]
+    ]
+    for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]:
+        A = transform(structure)
+        order = np.arange(6)
+        groups_true = np.array([0, 1, 2, 0, 1, 2])
+        groups = group_columns(A, order)
+        assert_equal(groups, groups_true)
+
+        order = [1, 2, 4, 3, 5, 0]
+        groups_true = np.array([2, 0, 1, 2, 0, 1])
+        groups = group_columns(A, order)
+        assert_equal(groups, groups_true)
+
+    # Test repeatability.
+    groups_1 = group_columns(A)
+    groups_2 = group_columns(A)
+    assert_equal(groups_1, groups_2)
+
+
+def test_correct_fp_eps():
+    # check that relative step size is correct for FP size
+    EPS = np.finfo(np.float64).eps
+    relative_step = {"2-point": EPS**0.5,
+                    "3-point": EPS**(1/3),
+                     "cs": EPS**0.5}
+    for method in ['2-point', '3-point', 'cs']:
+        assert_allclose(
+            _eps_for_method(np.float64, np.float64, method),
+            relative_step[method])
+        assert_allclose(
+            _eps_for_method(np.complex128, np.complex128, method),
+            relative_step[method]
+        )
+
+    # check another FP size
+    EPS = np.finfo(np.float32).eps
+    relative_step = {"2-point": EPS**0.5,
+                    "3-point": EPS**(1/3),
+                     "cs": EPS**0.5}
+
+    for method in ['2-point', '3-point', 'cs']:
+        assert_allclose(
+            _eps_for_method(np.float64, np.float32, method),
+            relative_step[method]
+        )
+        assert_allclose(
+            _eps_for_method(np.float32, np.float64, method),
+            relative_step[method]
+        )
+        assert_allclose(
+            _eps_for_method(np.float32, np.float32, method),
+            relative_step[method]
+        )
+
+
+class TestAdjustSchemeToBounds:
+    def test_no_bounds(self):
+        x0 = np.zeros(3)
+        h = np.full(3, 1e-2)
+        inf_lower = np.empty_like(x0)
+        inf_upper = np.empty_like(x0)
+        inf_lower.fill(-np.inf)
+        inf_upper.fill(np.inf)
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 1, '1-sided', inf_lower, inf_upper)
+        assert_allclose(h_adjusted, h)
+        assert_(np.all(one_sided))
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 2, '1-sided', inf_lower, inf_upper)
+        assert_allclose(h_adjusted, h)
+        assert_(np.all(one_sided))
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 1, '2-sided', inf_lower, inf_upper)
+        assert_allclose(h_adjusted, h)
+        assert_(np.all(~one_sided))
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 2, '2-sided', inf_lower, inf_upper)
+        assert_allclose(h_adjusted, h)
+        assert_(np.all(~one_sided))
+
+    def test_with_bound(self):
+        x0 = np.array([0.0, 0.85, -0.85])
+        lb = -np.ones(3)
+        ub = np.ones(3)
+        h = np.array([1, 1, -1]) * 1e-1
+
+        h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
+        assert_allclose(h_adjusted, h)
+
+        h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
+        assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 1, '2-sided', lb, ub)
+        assert_allclose(h_adjusted, np.abs(h))
+        assert_(np.all(~one_sided))
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 2, '2-sided', lb, ub)
+        assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
+        assert_equal(one_sided, np.array([False, True, True]))
+
+    def test_tight_bounds(self):
+        lb = np.array([-0.03, -0.03])
+        ub = np.array([0.05, 0.05])
+        x0 = np.array([0.0, 0.03])
+        h = np.array([-0.1, -0.1])
+
+        h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
+        assert_allclose(h_adjusted, np.array([0.05, -0.06]))
+
+        h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
+        assert_allclose(h_adjusted, np.array([0.025, -0.03]))
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 1, '2-sided', lb, ub)
+        assert_allclose(h_adjusted, np.array([0.03, -0.03]))
+        assert_equal(one_sided, np.array([False, True]))
+
+        h_adjusted, one_sided = _adjust_scheme_to_bounds(
+            x0, h, 2, '2-sided', lb, ub)
+        assert_allclose(h_adjusted, np.array([0.015, -0.015]))
+        assert_equal(one_sided, np.array([False, True]))
+
+
+class TestApproxDerivativesDense:
+    def fun_scalar_scalar(self, x):
+        return np.sinh(x)
+
+    def jac_scalar_scalar(self, x):
+        return np.cosh(x)
+
+    def fun_scalar_vector(self, x):
+        return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
+
+    def jac_scalar_vector(self, x):
+        return np.array(
+            [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
+
+    def fun_vector_scalar(self, x):
+        return np.sin(x[0] * x[1]) * np.log(x[0])
+
+    def wrong_dimensions_fun(self, x):
+        return np.array([x**2, np.tan(x), np.exp(x)])
+
+    def jac_vector_scalar(self, x):
+        return np.array([
+            x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
+            np.sin(x[0] * x[1]) / x[0],
+            x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
+        ])
+
+    def fun_vector_vector(self, x):
+        return np.array([
+            x[0] * np.sin(x[1]),
+            x[1] * np.cos(x[0]),
+            x[0] ** 3 * x[1] ** -0.5
+        ])
+
+    def jac_vector_vector(self, x):
+        return np.array([
+            [np.sin(x[1]), x[0] * np.cos(x[1])],
+            [-x[1] * np.sin(x[0]), np.cos(x[0])],
+            [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
+        ])
+
+    def fun_parametrized(self, x, c0, c1=1.0):
+        return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])])
+
+    def jac_parametrized(self, x, c0, c1=0.1):
+        return np.array([
+            [c0 * np.exp(c0 * x[0]), 0],
+            [0, c1 * np.exp(c1 * x[1])]
+        ])
+
+    def fun_with_nan(self, x):
+        return x if np.abs(x) <= 1e-8 else np.nan
+
+    def jac_with_nan(self, x):
+        return 1.0 if np.abs(x) <= 1e-8 else np.nan
+
+    def fun_zero_jacobian(self, x):
+        return np.array([x[0] * x[1], np.cos(x[0] * x[1])])
+
+    def jac_zero_jacobian(self, x):
+        return np.array([
+            [x[1], x[0]],
+            [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])]
+        ])
+
+    def fun_non_numpy(self, x):
+        return math.exp(x)
+
+    def jac_non_numpy(self, x):
+        # x can be a scalar or an array [val].
+        # Cast to true scalar before handing over to math.exp
+        xp = np.asarray(x).item()
+        return math.exp(xp)
+
+    def test_scalar_scalar(self):
+        x0 = 1.0
+        jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       method='2-point')
+        jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0)
+        jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       method='cs')
+        jac_true = self.jac_scalar_scalar(x0)
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
+        assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
+
+    def test_scalar_scalar_abs_step(self):
+        # can approx_derivative use abs_step?
+        x0 = 1.0
+        jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       method='2-point', abs_step=1.49e-8)
+        jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       abs_step=1.49e-8)
+        jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       method='cs', abs_step=1.49e-8)
+        jac_true = self.jac_scalar_scalar(x0)
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
+        assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
+
+    def test_scalar_vector(self):
+        x0 = 0.5
+        jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
+                                       method='2-point')
+        jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0)
+        jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
+                                       method='cs')
+        jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
+        assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
+
+    def test_vector_scalar(self):
+        x0 = np.array([100.0, -0.5])
+        jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
+                                       method='2-point')
+        jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0)
+        jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
+                                       method='cs')
+        jac_true = self.jac_vector_scalar(x0)
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-7)
+        assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
+
+    def test_vector_scalar_abs_step(self):
+        # can approx_derivative use abs_step?
+        x0 = np.array([100.0, -0.5])
+        jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
+                                       method='2-point', abs_step=1.49e-8)
+        jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
+                                       abs_step=1.49e-8, rel_step=np.inf)
+        jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
+                                       method='cs', abs_step=1.49e-8)
+        jac_true = self.jac_vector_scalar(x0)
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=3e-9)
+        assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
+
+    def test_vector_vector(self):
+        x0 = np.array([-100.0, 0.2])
+        jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
+                                       method='2-point')
+        jac_diff_3 = approx_derivative(self.fun_vector_vector, x0)
+        jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
+                                       method='cs')
+        jac_true = self.jac_vector_vector(x0)
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-5)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
+
+    def test_wrong_dimensions(self):
+        x0 = 1.0
+        assert_raises(RuntimeError, approx_derivative,
+                      self.wrong_dimensions_fun, x0)
+        f0 = self.wrong_dimensions_fun(np.atleast_1d(x0))
+        assert_raises(ValueError, approx_derivative,
+                      self.wrong_dimensions_fun, x0, f0=f0)
+
+    def test_custom_rel_step(self):
+        x0 = np.array([-0.1, 0.1])
+        jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
+                                       method='2-point', rel_step=1e-4)
+        jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
+                                       rel_step=1e-4)
+        jac_true = self.jac_vector_vector(x0)
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-2)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-4)
+
+    def test_options(self):
+        x0 = np.array([1.0, 1.0])
+        c0 = -1.0
+        c1 = 1.0
+        lb = 0.0
+        ub = 2.0
+        f0 = self.fun_parametrized(x0, c0, c1=c1)
+        rel_step = np.array([-1e-6, 1e-7])
+        jac_true = self.jac_parametrized(x0, c0, c1)
+        jac_diff_2 = approx_derivative(
+            self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
+            f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
+        jac_diff_3 = approx_derivative(
+            self.fun_parametrized, x0, rel_step=rel_step,
+            f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
+
+    def test_with_bounds_2_point(self):
+        lb = -np.ones(2)
+        ub = np.ones(2)
+
+        x0 = np.array([-2.0, 0.2])
+        assert_raises(ValueError, approx_derivative,
+                      self.fun_vector_vector, x0, bounds=(lb, ub))
+
+        x0 = np.array([-1.0, 1.0])
+        jac_diff = approx_derivative(self.fun_vector_vector, x0,
+                                     method='2-point', bounds=(lb, ub))
+        jac_true = self.jac_vector_vector(x0)
+        assert_allclose(jac_diff, jac_true, rtol=1e-6)
+
+    def test_with_bounds_3_point(self):
+        lb = np.array([1.0, 1.0])
+        ub = np.array([2.0, 2.0])
+
+        x0 = np.array([1.0, 2.0])
+        jac_true = self.jac_vector_vector(x0)
+
+        jac_diff = approx_derivative(self.fun_vector_vector, x0)
+        assert_allclose(jac_diff, jac_true, rtol=1e-9)
+
+        jac_diff = approx_derivative(self.fun_vector_vector, x0,
+                                     bounds=(lb, np.inf))
+        assert_allclose(jac_diff, jac_true, rtol=1e-9)
+
+        jac_diff = approx_derivative(self.fun_vector_vector, x0,
+                                     bounds=(-np.inf, ub))
+        assert_allclose(jac_diff, jac_true, rtol=1e-9)
+
+        jac_diff = approx_derivative(self.fun_vector_vector, x0,
+                                     bounds=(lb, ub))
+        assert_allclose(jac_diff, jac_true, rtol=1e-9)
+
+    def test_tight_bounds(self):
+        x0 = np.array([10.0, 10.0])
+        lb = x0 - 3e-9
+        ub = x0 + 2e-9
+        jac_true = self.jac_vector_vector(x0)
+        jac_diff = approx_derivative(
+            self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
+        assert_allclose(jac_diff, jac_true, rtol=1e-6)
+        jac_diff = approx_derivative(
+            self.fun_vector_vector, x0, method='2-point',
+            rel_step=1e-6, bounds=(lb, ub))
+        assert_allclose(jac_diff, jac_true, rtol=1e-6)
+
+        jac_diff = approx_derivative(
+            self.fun_vector_vector, x0, bounds=(lb, ub))
+        assert_allclose(jac_diff, jac_true, rtol=1e-6)
+        jac_diff = approx_derivative(
+            self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
+        assert_allclose(jac_true, jac_diff, rtol=1e-6)
+
+    def test_bound_switches(self):
+        lb = -1e-8
+        ub = 1e-8
+        x0 = 0.0
+        jac_true = self.jac_with_nan(x0)
+        jac_diff_2 = approx_derivative(
+            self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
+            bounds=(lb, ub))
+        jac_diff_3 = approx_derivative(
+            self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
+
+        x0 = 1e-8
+        jac_true = self.jac_with_nan(x0)
+        jac_diff_2 = approx_derivative(
+            self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
+            bounds=(lb, ub))
+        jac_diff_3 = approx_derivative(
+            self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
+
+    def test_non_numpy(self):
+        x0 = 1.0
+        jac_true = self.jac_non_numpy(x0)
+        jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
+                                       method='2-point')
+        jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
+        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
+        assert_allclose(jac_diff_3, jac_true, rtol=1e-8)
+
+        # math.exp cannot handle complex arguments, hence this raises
+        assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
+                      **dict(method='cs'))
+
+    def test_fp(self):
+        # checks that approx_derivative works for FP size other than 64.
+        # Example is derived from the minimal working example in gh12991.
+        np.random.seed(1)
+
+        def func(p, x):
+            return p[0] + p[1] * x
+
+        def err(p, x, y):
+            return func(p, x) - y
+
+        x = np.linspace(0, 1, 100, dtype=np.float64)
+        y = np.random.random(100).astype(np.float64)
+        p0 = np.array([-1.0, -1.0])
+
+        jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y))
+
+        # parameter vector is float32, func output is float64
+        jac_fp = approx_derivative(err, p0.astype(np.float32),
+                                   method='2-point', args=(x, y))
+        assert err(p0, x, y).dtype == np.float64
+        assert_allclose(jac_fp, jac_fp64, atol=1e-3)
+
+        # parameter vector is float64, func output is float32
+        err_fp32 = lambda p: err(p, x, y).astype(np.float32)
+        jac_fp = approx_derivative(err_fp32, p0,
+                                   method='2-point')
+        assert err_fp32(p0).dtype == np.float32
+        assert_allclose(jac_fp, jac_fp64, atol=1e-3)
+
+        # check upper bound of error on the derivative for 2-point
+        f = lambda x: np.sin(x)
+        g = lambda x: np.cos(x)
+        hess = lambda x: -np.sin(x)
+
+        def calc_atol(h, x0, f, hess, EPS):
+            # truncation error
+            t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h)))
+            # roundoff error. There may be a divisor (>1) missing from
+            # the following line, so this contribution is possibly
+            # overestimated
+            t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h)))
+            return t0 + t1
+
+        for dtype in [np.float16, np.float32, np.float64]:
+            EPS = np.finfo(dtype).eps
+            x0 = np.array(1.0).astype(dtype)
+            h = _compute_absolute_step(None, x0, f(x0), '2-point')
+            atol = calc_atol(h, x0, f, hess, EPS)
+            err = approx_derivative(f, x0, method='2-point',
+                                    abs_step=h) - g(x0)
+            assert abs(err) < atol
+
+    def test_check_derivative(self):
+        x0 = np.array([-10.0, 10])
+        accuracy = check_derivative(self.fun_vector_vector,
+                                    self.jac_vector_vector, x0)
+        assert_(accuracy < 1e-9)
+        accuracy = check_derivative(self.fun_vector_vector,
+                                    self.jac_vector_vector, x0)
+        assert_(accuracy < 1e-6)
+
+        x0 = np.array([0.0, 0.0])
+        accuracy = check_derivative(self.fun_zero_jacobian,
+                                    self.jac_zero_jacobian, x0)
+        assert_(accuracy == 0)
+        accuracy = check_derivative(self.fun_zero_jacobian,
+                                    self.jac_zero_jacobian, x0)
+        assert_(accuracy == 0)
+
+
+class TestApproxDerivativeSparse:
+    # Example from Numerical Optimization 2nd edition, p. 198.
+    def setup_method(self):
+        np.random.seed(0)
+        self.n = 50
+        self.lb = -0.1 * (1 + np.arange(self.n))
+        self.ub = 0.1 * (1 + np.arange(self.n))
+        self.x0 = np.empty(self.n)
+        self.x0[::2] = (1 - 1e-7) * self.lb[::2]
+        self.x0[1::2] = (1 - 1e-7) * self.ub[1::2]
+
+        self.J_true = self.jac(self.x0)
+
+    def fun(self, x):
+        e = x[1:]**3 - x[:-1]**2
+        return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0))
+
+    def jac(self, x):
+        n = x.size
+        J = np.zeros((n, n))
+        J[0, 0] = -4 * x[0]
+        J[0, 1] = 6 * x[1]**2
+        for i in range(1, n - 1):
+            J[i, i - 1] = -6 * x[i-1]
+            J[i, i] = 9 * x[i]**2 - 4 * x[i]
+            J[i, i + 1] = 6 * x[i+1]**2
+        J[-1, -1] = 9 * x[-1]**2
+        J[-1, -2] = -6 * x[-2]
+
+        return J
+
+    def structure(self, n):
+        A = np.zeros((n, n), dtype=int)
+        A[0, 0] = 1
+        A[0, 1] = 1
+        for i in range(1, n - 1):
+            A[i, i - 1: i + 2] = 1
+        A[-1, -1] = 1
+        A[-1, -2] = 1
+
+        return A
+
+    def test_all(self):
+        A = self.structure(self.n)
+        order = np.arange(self.n)
+        groups_1 = group_columns(A, order)
+        np.random.shuffle(order)
+        groups_2 = group_columns(A, order)
+
+        for method, groups, l, u in product(
+                ['2-point', '3-point', 'cs'], [groups_1, groups_2],
+                [-np.inf, self.lb], [np.inf, self.ub]):
+            J = approx_derivative(self.fun, self.x0, method=method,
+                                  bounds=(l, u), sparsity=(A, groups))
+            assert_(isinstance(J, csr_matrix))
+            assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
+
+            rel_step = np.full_like(self.x0, 1e-8)
+            rel_step[::2] *= -1
+            J = approx_derivative(self.fun, self.x0, method=method,
+                                  rel_step=rel_step, sparsity=(A, groups))
+            assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
+
+    def test_no_precomputed_groups(self):
+        A = self.structure(self.n)
+        J = approx_derivative(self.fun, self.x0, sparsity=A)
+        assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
+
+    def test_equivalence(self):
+        structure = np.ones((self.n, self.n), dtype=int)
+        groups = np.arange(self.n)
+        for method in ['2-point', '3-point', 'cs']:
+            J_dense = approx_derivative(self.fun, self.x0, method=method)
+            J_sparse = approx_derivative(
+                self.fun, self.x0, sparsity=(structure, groups), method=method)
+            assert_allclose(J_dense, J_sparse.toarray(),
+                            rtol=5e-16, atol=7e-15)
+
+    def test_check_derivative(self):
+        def jac(x):
+            return csr_matrix(self.jac(x))
+
+        accuracy = check_derivative(self.fun, jac, self.x0,
+                                    bounds=(self.lb, self.ub))
+        assert_(accuracy < 1e-9)
+
+        accuracy = check_derivative(self.fun, jac, self.x0,
+                                    bounds=(self.lb, self.ub))
+        assert_(accuracy < 1e-9)
+
+
+class TestApproxDerivativeLinearOperator:
+
+    def fun_scalar_scalar(self, x):
+        return np.sinh(x)
+
+    def jac_scalar_scalar(self, x):
+        return np.cosh(x)
+
+    def fun_scalar_vector(self, x):
+        return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
+
+    def jac_scalar_vector(self, x):
+        return np.array(
+            [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
+
+    def fun_vector_scalar(self, x):
+        return np.sin(x[0] * x[1]) * np.log(x[0])
+
+    def jac_vector_scalar(self, x):
+        return np.array([
+            x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
+            np.sin(x[0] * x[1]) / x[0],
+            x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
+        ])
+
+    def fun_vector_vector(self, x):
+        return np.array([
+            x[0] * np.sin(x[1]),
+            x[1] * np.cos(x[0]),
+            x[0] ** 3 * x[1] ** -0.5
+        ])
+
+    def jac_vector_vector(self, x):
+        return np.array([
+            [np.sin(x[1]), x[0] * np.cos(x[1])],
+            [-x[1] * np.sin(x[0]), np.cos(x[0])],
+            [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
+        ])
+
+    def test_scalar_scalar(self):
+        x0 = 1.0
+        jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       method='2-point',
+                                       as_linear_operator=True)
+        jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       as_linear_operator=True)
+        jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
+                                       method='cs',
+                                       as_linear_operator=True)
+        jac_true = self.jac_scalar_scalar(x0)
+        np.random.seed(1)
+        for i in range(10):
+            p = np.random.uniform(-10, 10, size=(1,))
+            assert_allclose(jac_diff_2.dot(p), jac_true*p,
+                            rtol=1e-5)
+            assert_allclose(jac_diff_3.dot(p), jac_true*p,
+                            rtol=5e-6)
+            assert_allclose(jac_diff_4.dot(p), jac_true*p,
+                            rtol=5e-6)
+
+    def test_scalar_vector(self):
+        x0 = 0.5
+        jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
+                                       method='2-point',
+                                       as_linear_operator=True)
+        jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
+                                       as_linear_operator=True)
+        jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
+                                       method='cs',
+                                       as_linear_operator=True)
+        jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
+        np.random.seed(1)
+        for i in range(10):
+            p = np.random.uniform(-10, 10, size=(1,))
+            assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
+                            rtol=1e-5)
+            assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
+                            rtol=5e-6)
+            assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
+                            rtol=5e-6)
+
+    def test_vector_scalar(self):
+        x0 = np.array([100.0, -0.5])
+        jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
+                                       method='2-point',
+                                       as_linear_operator=True)
+        jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
+                                       as_linear_operator=True)
+        jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
+                                       method='cs',
+                                       as_linear_operator=True)
+        jac_true = self.jac_vector_scalar(x0)
+        np.random.seed(1)
+        for i in range(10):
+            p = np.random.uniform(-10, 10, size=x0.shape)
+            assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)),
+                            rtol=1e-5)
+            assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)),
+                            rtol=5e-6)
+            assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)),
+                            rtol=1e-7)
+
+    def test_vector_vector(self):
+        x0 = np.array([-100.0, 0.2])
+        jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
+                                       method='2-point',
+                                       as_linear_operator=True)
+        jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
+                                       as_linear_operator=True)
+        jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
+                                       method='cs',
+                                       as_linear_operator=True)
+        jac_true = self.jac_vector_vector(x0)
+        np.random.seed(1)
+        for i in range(10):
+            p = np.random.uniform(-10, 10, size=x0.shape)
+            assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
+            assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
+            assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
+
+    def test_exception(self):
+        x0 = np.array([-100.0, 0.2])
+        assert_raises(ValueError, approx_derivative,
+                      self.fun_vector_vector, x0,
+                      method='2-point', bounds=(1, np.inf))
+
+
+def test_absolute_step_sign():
+    # test for gh12487
+    # if an absolute step is specified for 2-point differences make sure that
+    # the side corresponds to the step. i.e. if step is positive then forward
+    # differences should be used, if step is negative then backwards
+    # differences should be used.
+
+    # function has double discontinuity at x = [-1, -1]
+    # first component is \/, second component is /\
+    def f(x):
+        return -np.abs(x[0] + 1) + np.abs(x[1] + 1)
+
+    # check that the forward difference is used
+    grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8)
+    assert_allclose(grad, [-1.0, 1.0])
+
+    # check that the backwards difference is used
+    grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8)
+    assert_allclose(grad, [1.0, -1.0])
+
+    # check that the forwards difference is used with a step for both
+    # parameters
+    grad = approx_derivative(
+        f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8]
+    )
+    assert_allclose(grad, [-1.0, 1.0])
+
+    # check that we can mix forward/backwards steps.
+    grad = approx_derivative(
+        f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8]
+     )
+    assert_allclose(grad, [-1.0, -1.0])
+    grad = approx_derivative(
+        f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8]
+    )
+    assert_allclose(grad, [1.0, 1.0])
+
+    # the forward step should reverse to a backwards step if it runs into a
+    # bound
+    # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level
+    # function.
+    grad = approx_derivative(
+        f, [-1, -1], method='2-point', abs_step=1e-8,
+        bounds=(-np.inf, -1)
+    )
+    assert_allclose(grad, [1.0, -1.0])
+
+    grad = approx_derivative(
+        f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf)
+    )
+    assert_allclose(grad, [-1.0, 1.0])
+
+
+def test__compute_absolute_step():
+    # tests calculation of absolute step from rel_step
+    methods = ['2-point', '3-point', 'cs']
+
+    x0 = np.array([1e-5, 0, 1, 1e5])
+
+    EPS = np.finfo(np.float64).eps
+    relative_step = {
+        "2-point": EPS**0.5,
+        "3-point": EPS**(1/3),
+        "cs": EPS**0.5
+    }
+    f0 = np.array(1.0)
+
+    for method in methods:
+        rel_step = relative_step[method]
+        correct_step = np.array([rel_step,
+                                 rel_step * 1.,
+                                 rel_step * 1.,
+                                 rel_step * np.abs(x0[3])])
+
+        abs_step = _compute_absolute_step(None, x0, f0, method)
+        assert_allclose(abs_step, correct_step)
+
+        sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
+        abs_step = _compute_absolute_step(None, -x0, f0, method)
+        assert_allclose(abs_step, sign_x0 * correct_step)
+
+    # if a relative step is provided it should be used
+    rel_step = np.array([0.1, 1, 10, 100])
+    correct_step = np.array([rel_step[0] * x0[0],
+                             relative_step['2-point'],
+                             rel_step[2] * 1.,
+                             rel_step[3] * np.abs(x0[3])])
+
+    abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point')
+    assert_allclose(abs_step, correct_step)
+
+    sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
+    abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point')
+    assert_allclose(abs_step, sign_x0 * correct_step)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__remove_redundancy.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__remove_redundancy.py
new file mode 100644
index 00000000..f1354be4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__remove_redundancy.py
@@ -0,0 +1,255 @@
+"""
+Unit test for Linear Programming via Simplex Algorithm.
+"""
+
+# TODO: add tests for:
+# https://github.com/scipy/scipy/issues/5400
+# https://github.com/scipy/scipy/issues/6690
+
+import numpy as np
+from numpy.testing import (
+    assert_,
+    assert_allclose,
+    assert_equal)
+
+from .test_linprog import magic_square
+from scipy.optimize._remove_redundancy import _remove_redundancy_svd
+from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense
+from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse
+from scipy.optimize._remove_redundancy import _remove_redundancy_id
+
+from scipy.sparse import csc_matrix
+
+
+def setup_module():
+    np.random.seed(2017)
+
+
+def _assert_success(
+        res,
+        desired_fun=None,
+        desired_x=None,
+        rtol=1e-7,
+        atol=1e-7):
+    # res: linprog result object
+    # desired_fun: desired objective function value or None
+    # desired_x: desired solution or None
+    assert_(res.success)
+    assert_equal(res.status, 0)
+    if desired_fun is not None:
+        assert_allclose(
+            res.fun,
+            desired_fun,
+            err_msg="converged to an unexpected objective value",
+            rtol=rtol,
+            atol=atol)
+    if desired_x is not None:
+        assert_allclose(
+            res.x,
+            desired_x,
+            err_msg="converged to an unexpected solution",
+            rtol=rtol,
+            atol=atol)
+
+
+def redundancy_removed(A, B):
+    """Checks whether a matrix contains only independent rows of another"""
+    for rowA in A:
+        # `rowA in B` is not a reliable check
+        for rowB in B:
+            if np.all(rowA == rowB):
+                break
+        else:
+            return False
+    return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B)
+
+
+class RRCommonTests:
+    def test_no_redundancy(self):
+        m, n = 10, 10
+        A0 = np.random.rand(m, n)
+        b0 = np.random.rand(m)
+        A1, b1, status, message = self.rr(A0, b0)
+        assert_allclose(A0, A1)
+        assert_allclose(b0, b1)
+        assert_equal(status, 0)
+
+    def test_infeasible_zero_row(self):
+        A = np.eye(3)
+        A[1, :] = 0
+        b = np.random.rand(3)
+        A1, b1, status, message = self.rr(A, b)
+        assert_equal(status, 2)
+
+    def test_remove_zero_row(self):
+        A = np.eye(3)
+        A[1, :] = 0
+        b = np.random.rand(3)
+        b[1] = 0
+        A1, b1, status, message = self.rr(A, b)
+        assert_equal(status, 0)
+        assert_allclose(A1, A[[0, 2], :])
+        assert_allclose(b1, b[[0, 2]])
+
+    def test_infeasible_m_gt_n(self):
+        m, n = 20, 10
+        A0 = np.random.rand(m, n)
+        b0 = np.random.rand(m)
+        A1, b1, status, message = self.rr(A0, b0)
+        assert_equal(status, 2)
+
+    def test_infeasible_m_eq_n(self):
+        m, n = 10, 10
+        A0 = np.random.rand(m, n)
+        b0 = np.random.rand(m)
+        A0[-1, :] = 2 * A0[-2, :]
+        A1, b1, status, message = self.rr(A0, b0)
+        assert_equal(status, 2)
+
+    def test_infeasible_m_lt_n(self):
+        m, n = 9, 10
+        A0 = np.random.rand(m, n)
+        b0 = np.random.rand(m)
+        A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
+        A1, b1, status, message = self.rr(A0, b0)
+        assert_equal(status, 2)
+
+    def test_m_gt_n(self):
+        np.random.seed(2032)
+        m, n = 20, 10
+        A0 = np.random.rand(m, n)
+        b0 = np.random.rand(m)
+        x = np.linalg.solve(A0[:n, :], b0[:n])
+        b0[n:] = A0[n:, :].dot(x)
+        A1, b1, status, message = self.rr(A0, b0)
+        assert_equal(status, 0)
+        assert_equal(A1.shape[0], n)
+        assert_equal(np.linalg.matrix_rank(A1), n)
+
+    def test_m_gt_n_rank_deficient(self):
+        m, n = 20, 10
+        A0 = np.zeros((m, n))
+        A0[:, 0] = 1
+        b0 = np.ones(m)
+        A1, b1, status, message = self.rr(A0, b0)
+        assert_equal(status, 0)
+        assert_allclose(A1, A0[0:1, :])
+        assert_allclose(b1, b0[0])
+
+    def test_m_lt_n_rank_deficient(self):
+        m, n = 9, 10
+        A0 = np.random.rand(m, n)
+        b0 = np.random.rand(m)
+        A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
+        b0[-1] = np.arange(m - 1).dot(b0[:-1])
+        A1, b1, status, message = self.rr(A0, b0)
+        assert_equal(status, 0)
+        assert_equal(A1.shape[0], 8)
+        assert_equal(np.linalg.matrix_rank(A1), 8)
+
+    def test_dense1(self):
+        A = np.ones((6, 6))
+        A[0, :3] = 0
+        A[1, 3:] = 0
+        A[3:, ::2] = -1
+        A[3, :2] = 0
+        A[4, 2:] = 0
+        b = np.zeros(A.shape[0])
+
+        A1, b1, status, message = self.rr(A, b)
+        assert_(redundancy_removed(A1, A))
+        assert_equal(status, 0)
+
+    def test_dense2(self):
+        A = np.eye(6)
+        A[-2, -1] = 1
+        A[-1, :] = 1
+        b = np.zeros(A.shape[0])
+        A1, b1, status, message = self.rr(A, b)
+        assert_(redundancy_removed(A1, A))
+        assert_equal(status, 0)
+
+    def test_dense3(self):
+        A = np.eye(6)
+        A[-2, -1] = 1
+        A[-1, :] = 1
+        b = np.random.rand(A.shape[0])
+        b[-1] = np.sum(b[:-1])
+        A1, b1, status, message = self.rr(A, b)
+        assert_(redundancy_removed(A1, A))
+        assert_equal(status, 0)
+
+    def test_m_gt_n_sparse(self):
+        np.random.seed(2013)
+        m, n = 20, 5
+        p = 0.1
+        A = np.random.rand(m, n)
+        A[np.random.rand(m, n) > p] = 0
+        rank = np.linalg.matrix_rank(A)
+        b = np.zeros(A.shape[0])
+        A1, b1, status, message = self.rr(A, b)
+        assert_equal(status, 0)
+        assert_equal(A1.shape[0], rank)
+        assert_equal(np.linalg.matrix_rank(A1), rank)
+
+    def test_m_lt_n_sparse(self):
+        np.random.seed(2017)
+        m, n = 20, 50
+        p = 0.05
+        A = np.random.rand(m, n)
+        A[np.random.rand(m, n) > p] = 0
+        rank = np.linalg.matrix_rank(A)
+        b = np.zeros(A.shape[0])
+        A1, b1, status, message = self.rr(A, b)
+        assert_equal(status, 0)
+        assert_equal(A1.shape[0], rank)
+        assert_equal(np.linalg.matrix_rank(A1), rank)
+
+    def test_m_eq_n_sparse(self):
+        np.random.seed(2017)
+        m, n = 100, 100
+        p = 0.01
+        A = np.random.rand(m, n)
+        A[np.random.rand(m, n) > p] = 0
+        rank = np.linalg.matrix_rank(A)
+        b = np.zeros(A.shape[0])
+        A1, b1, status, message = self.rr(A, b)
+        assert_equal(status, 0)
+        assert_equal(A1.shape[0], rank)
+        assert_equal(np.linalg.matrix_rank(A1), rank)
+
+    def test_magic_square(self):
+        A, b, c, numbers, _ = magic_square(3)
+        A1, b1, status, message = self.rr(A, b)
+        assert_equal(status, 0)
+        assert_equal(A1.shape[0], 23)
+        assert_equal(np.linalg.matrix_rank(A1), 23)
+
+    def test_magic_square2(self):
+        A, b, c, numbers, _ = magic_square(4)
+        A1, b1, status, message = self.rr(A, b)
+        assert_equal(status, 0)
+        assert_equal(A1.shape[0], 39)
+        assert_equal(np.linalg.matrix_rank(A1), 39)
+
+
+class TestRRSVD(RRCommonTests):
+    def rr(self, A, b):
+        return _remove_redundancy_svd(A, b)
+
+
+class TestRRPivotDense(RRCommonTests):
+    def rr(self, A, b):
+        return _remove_redundancy_pivot_dense(A, b)
+
+
+class TestRRID(RRCommonTests):
+    def rr(self, A, b):
+        return _remove_redundancy_id(A, b)
+
+
+class TestRRPivotSparse(RRCommonTests):
+    def rr(self, A, b):
+        rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b)
+        A1, b1, status, message = rr_res
+        return A1.toarray(), b1, status, message
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__root.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__root.py
new file mode 100644
index 00000000..f8d12e1e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__root.py
@@ -0,0 +1,85 @@
+"""
+Unit tests for optimization routines from _root.py.
+"""
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+import numpy as np
+
+from scipy.optimize import root
+
+
+class TestRoot:
+    def test_tol_parameter(self):
+        # Check that the minimize() tol= argument does something
+        def func(z):
+            x, y = z
+            return np.array([x**3 - 1, y**3 - 1])
+
+        def dfunc(z):
+            x, y = z
+            return np.array([[3*x**2, 0], [0, 3*y**2]])
+
+        for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
+                       'diagbroyden', 'krylov']:
+            if method in ('linearmixing', 'excitingmixing'):
+                # doesn't converge
+                continue
+
+            if method in ('hybr', 'lm'):
+                jac = dfunc
+            else:
+                jac = None
+
+            sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)
+            sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)
+            msg = "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))
+            assert_(sol1.success, msg)
+            assert_(sol2.success, msg)
+            assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),
+                    msg)
+
+    def test_tol_norm(self):
+
+        def norm(x):
+            return abs(x[0])
+
+        for method in ['excitingmixing',
+                       'diagbroyden',
+                       'linearmixing',
+                       'anderson',
+                       'broyden1',
+                       'broyden2',
+                       'krylov']:
+
+            root(np.zeros_like, np.zeros(2), method=method,
+                options={"tol_norm": norm})
+
+    def test_minimize_scalar_coerce_args_param(self):
+        # github issue #3503
+        def func(z, f=1):
+            x, y = z
+            return np.array([x**3 - 1, y**3 - f])
+        root(func, [1.1, 1.1], args=1.5)
+
+    def test_f_size(self):
+        # gh8320
+        # check that decreasing the size of the returned array raises an error
+        # and doesn't segfault
+        class fun:
+            def __init__(self):
+                self.count = 0
+
+            def __call__(self, x):
+                self.count += 1
+
+                if not (self.count % 5):
+                    ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0
+                else:
+                    ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0,
+                           0.5 * (x[1] - x[0]) ** 3 + x[1]])
+
+                return ret
+
+        F = fun()
+        with assert_raises(ValueError):
+            root(F, [0.1, 0.0], method='lm')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__shgo.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__shgo.py
new file mode 100644
index 00000000..f8be86e6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__shgo.py
@@ -0,0 +1,812 @@
+import logging
+import numpy
+from numpy.testing import assert_allclose
+import pytest
+from pytest import raises as assert_raises, warns
+from scipy.optimize import shgo, Bounds, minimize
+from scipy.optimize._shgo import SHGO
+
+
+class StructTestFunction:
+    def __init__(self, bounds, expected_x, expected_fun=None,
+                 expected_xl=None, expected_funl=None):
+        self.bounds = bounds
+        self.expected_x = expected_x
+        self.expected_fun = expected_fun
+        self.expected_xl = expected_xl
+        self.expected_funl = expected_funl
+
+
+def wrap_constraints(g):
+    cons = []
+    if g is not None:
+        if (type(g) is not tuple) and (type(g) is not list):
+            g = (g,)
+        else:
+            pass
+        for g in g:
+            cons.append({'type': 'ineq',
+                         'fun': g})
+        cons = tuple(cons)
+    else:
+        cons = None
+    return cons
+
+
+class StructTest1(StructTestFunction):
+    def f(self, x):
+        return x[0] ** 2 + x[1] ** 2
+
+    def g(x):
+        return -(numpy.sum(x, axis=0) - 6.0)
+
+    cons = wrap_constraints(g)
+
+
+test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)],
+                      expected_x=[0, 0])
+test1_2 = StructTest1(bounds=[(0, 1), (0, 1)],
+                      expected_x=[0, 0])
+test1_3 = StructTest1(bounds=[(None, None), (None, None)],
+                      expected_x=[0, 0])
+
+
+class StructTest2(StructTestFunction):
+    """
+    Scalar function with several minima to test all minimizer retrievals
+    """
+
+    def f(self, x):
+        return (x - 30) * numpy.sin(x)
+
+    def g(x):
+        return 58 - numpy.sum(x, axis=0)
+
+    cons = wrap_constraints(g)
+
+
+test2_1 = StructTest2(bounds=[(0, 60)],
+                      expected_x=[1.53567906],
+                      expected_fun=-28.44677132,
+                      # Important: test that funl return is in the correct order
+                      expected_xl=numpy.array([[1.53567906],
+                                               [55.01782167],
+                                               [7.80894889],
+                                               [48.74797493],
+                                               [14.07445705],
+                                               [42.4913859],
+                                               [20.31743841],
+                                               [36.28607535],
+                                               [26.43039605],
+                                               [30.76371366]]),
+
+                      expected_funl=numpy.array([-28.44677132, -24.99785984,
+                                                 -22.16855376, -18.72136195,
+                                                 -15.89423937, -12.45154942,
+                                                 -9.63133158, -6.20801301,
+                                                 -3.43727232, -0.46353338])
+                      )
+
+test2_2 = StructTest2(bounds=[(0, 4.5)],
+                      expected_x=[1.53567906],
+                      expected_fun=[-28.44677132],
+                      expected_xl=numpy.array([[1.53567906]]),
+                      expected_funl=numpy.array([-28.44677132])
+                      )
+
+
+class StructTest3(StructTestFunction):
+    """
+    Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981)
+    http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
+    Minimize: f = 0.01 * (x_1)**2 + (x_2)**2
+
+    Subject to: x_1 * x_2 - 25.0 >= 0,
+                (x_1)**2 + (x_2)**2 - 25.0 >= 0,
+                2 <= x_1 <= 50,
+                0 <= x_2 <= 50.
+
+    Approx. Answer:
+        f([(250)**0.5 , (2.5)**0.5]) = 5.0
+
+
+    """
+
+    def f(self, x):
+        return 0.01 * (x[0]) ** 2 + (x[1]) ** 2
+
+    def g1(x):
+        return x[0] * x[1] - 25.0
+
+    def g2(x):
+        return x[0] ** 2 + x[1] ** 2 - 25.0
+
+    g = (g1, g2)
+
+    cons = wrap_constraints(g)
+
+
+test3_1 = StructTest3(bounds=[(2, 50), (0, 50)],
+                      expected_x=[250 ** 0.5, 2.5 ** 0.5],
+                      expected_fun=5.0
+                      )
+
+
+class StructTest4(StructTestFunction):
+    """
+    Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981)
+
+    NOTE: Did not find in original reference to HS collection, refer to
+          Henderson (2015) problem 7 instead. 02.03.2016
+    """
+
+    def f(self, x):
+        return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4
+                + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[
+                    6] ** 4
+                - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6]
+                )
+
+    def g1(x):
+        return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2
+                 + 5 * x[4] - 127)
+
+    def g2(x):
+        return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0)
+
+    def g3(x):
+        return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196)
+
+    def g4(x):
+        return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2
+                 + 5 * x[5] - 11 * x[6])
+
+    g = (g1, g2, g3, g4)
+
+    cons = wrap_constraints(g)
+
+
+test4_1 = StructTest4(bounds=[(-10, 10), ] * 7,
+                      expected_x=[2.330499, 1.951372, -0.4775414,
+                                  4.365726, -0.6244870, 1.038131, 1.594227],
+                      expected_fun=680.6300573
+                      )
+
+
+class StructTest5(StructTestFunction):
+    def f(self, x):
+        return (-(x[1] + 47.0)
+                * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0))))
+                - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0))))
+                )
+
+    g = None
+    cons = wrap_constraints(g)
+
+
+test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)],
+                      expected_fun=[-959.64066272085051],
+                      expected_x=[512., 404.23180542])
+
+
+class StructTestLJ(StructTestFunction):
+    """
+    LennardJones objective function. Used to test symmetry constraints settings.
+    """
+
+    def f(self, x, *args):
+        self.N = args[0]
+        k = int(self.N / 3)
+        s = 0.0
+
+        for i in range(k - 1):
+            for j in range(i + 1, k):
+                a = 3 * i
+                b = 3 * j
+                xd = x[a] - x[b]
+                yd = x[a + 1] - x[b + 1]
+                zd = x[a + 2] - x[b + 2]
+                ed = xd * xd + yd * yd + zd * zd
+                ud = ed * ed * ed
+                if ed > 0.0:
+                    s += (1.0 / ud - 2.0) / ud
+
+        return s
+
+    g = None
+    cons = wrap_constraints(g)
+
+
+N = 6
+boundsLJ = list(zip([-4.0] * 6, [4.0] * 6))
+
+testLJ = StructTestLJ(bounds=boundsLJ,
+                      expected_fun=[-1.0],
+                      expected_x=[-2.71247337e-08,
+                                  -2.71247337e-08,
+                                  -2.50000222e+00,
+                                  -2.71247337e-08,
+                                  -2.71247337e-08,
+                                  -1.50000222e+00]
+                      )
+
+
+class StructTestTable(StructTestFunction):
+    def f(self, x):
+        if x[0] == 3.0 and x[1] == 3.0:
+            return 50
+        else:
+            return 100
+
+    g = None
+    cons = wrap_constraints(g)
+
+
+test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)],
+                             expected_fun=[50],
+                             expected_x=[3.0, 3.0])
+
+
+class StructTestInfeasible(StructTestFunction):
+    """
+    Test function with no feasible domain.
+    """
+
+    def f(self, x, *args):
+        return x[0] ** 2 + x[1] ** 2
+
+    def g1(x):
+        return x[0] + x[1] - 1
+
+    def g2(x):
+        return -(x[0] + x[1] - 1)
+
+    def g3(x):
+        return -x[0] + x[1] - 1
+
+    def g4(x):
+        return -(-x[0] + x[1] - 1)
+
+    g = (g1, g2, g3, g4)
+    cons = wrap_constraints(g)
+
+
+test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)],
+                                       expected_fun=None,
+                                       expected_x=None
+                                       )
+
+
+def run_test(test, args=(), test_atol=1e-5, n=128, iters=None,
+             callback=None, minimizer_kwargs=None, options=None,
+             sampling_method='sobol'):
+    res = shgo(test.f, test.bounds, args=args, constraints=test.cons,
+               n=n, iters=iters, callback=callback,
+               minimizer_kwargs=minimizer_kwargs, options=options,
+               sampling_method=sampling_method)
+
+    logging.info(res)
+
+    if test.expected_x is not None:
+        numpy.testing.assert_allclose(res.x, test.expected_x,
+                                      rtol=test_atol,
+                                      atol=test_atol)
+
+    # (Optional tests)
+    if test.expected_fun is not None:
+        numpy.testing.assert_allclose(res.fun,
+                                      test.expected_fun,
+                                      atol=test_atol)
+
+    if test.expected_xl is not None:
+        numpy.testing.assert_allclose(res.xl,
+                                      test.expected_xl,
+                                      atol=test_atol)
+
+    if test.expected_funl is not None:
+        numpy.testing.assert_allclose(res.funl,
+                                      test.expected_funl,
+                                      atol=test_atol)
+    return
+
+
+# Base test functions:
+class TestShgoSobolTestFunctions:
+    """
+    Global optimization tests with Sobol sampling:
+    """
+
+    # Sobol algorithm
+    def test_f1_1_sobol(self):
+        """Multivariate test function 1:
+        x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
+        run_test(test1_1)
+
+    def test_f1_2_sobol(self):
+        """Multivariate test function 1:
+         x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
+        run_test(test1_2)
+
+    def test_f1_3_sobol(self):
+        """Multivariate test function 1:
+        x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]"""
+        run_test(test1_3)
+
+    def test_f2_1_sobol(self):
+        """Univariate test function on
+        f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
+        run_test(test2_1)
+
+    def test_f2_2_sobol(self):
+        """Univariate test function on
+        f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
+        run_test(test2_2)
+
+    def test_f3_sobol(self):
+        """NLP: Hock and Schittkowski problem 18"""
+        run_test(test3_1)
+
+    @pytest.mark.slow
+    def test_f4_sobol(self):
+        """NLP: (High-dimensional) Hock and Schittkowski 11 problem (HS11)"""
+        # run_test(test4_1, n=500)
+        # run_test(test4_1, n=800)
+        options = {'infty_constraints': False}
+        run_test(test4_1, n=2048, options=options)
+
+    def test_f5_1_sobol(self):
+        """NLP: Eggholder, multimodal"""
+        run_test(test5_1, n=64)
+
+    def test_f5_2_sobol(self):
+        """NLP: Eggholder, multimodal"""
+        # run_test(test5_1, n=60, iters=5)
+        run_test(test5_1, n=128, iters=5)
+
+        # def test_t911(self):
+        #    """1-D tabletop function"""
+        #    run_test(test11_1)
+
+
+class TestShgoSimplicialTestFunctions:
+    """
+    Global optimization tests with Simplicial sampling:
+    """
+
+    def test_f1_1_simplicial(self):
+        """Multivariate test function 1:
+        x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
+        run_test(test1_1, n=1, sampling_method='simplicial')
+
+    def test_f1_2_simplicial(self):
+        """Multivariate test function 1:
+        x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
+        run_test(test1_2, n=1, sampling_method='simplicial')
+
+    def test_f1_3_simplicial(self):
+        """Multivariate test function 1: x[0]**2 + x[1]**2
+        with bounds=[(None, None),(None, None)]"""
+        run_test(test1_3, n=1, sampling_method='simplicial')
+
+    def test_f2_1_simplicial(self):
+        """Univariate test function on
+        f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
+        options = {'minimize_every_iter': False}
+        run_test(test2_1, iters=7, options=options,
+                 sampling_method='simplicial')
+
+    def test_f2_2_simplicial(self):
+        """Univariate test function on
+        f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
+        run_test(test2_2, n=1, sampling_method='simplicial')
+
+    def test_f3_simplicial(self):
+        """NLP: Hock and Schittkowski problem 18"""
+        run_test(test3_1, n=1, sampling_method='simplicial')
+
+    @pytest.mark.slow
+    def test_f4_simplicial(self):
+        """NLP: (High-dimensional) Hock and Schittkowski 11 problem (HS11)"""
+        run_test(test4_1, n=1, sampling_method='simplicial')
+
+    def test_lj_symmetry(self):
+        """LJ: Symmetry-constrained test function"""
+        options = {'symmetry': True,
+                   'disp': True}
+        args = (6,)  # Number of atoms
+        run_test(testLJ, args=args, n=None,
+                 options=options, iters=4,
+                 sampling_method='simplicial')
+
+
+# Argument test functions
+class TestShgoArguments:
+    def test_1_1_simpl_iter(self):
+        """Iterative simplicial sampling on TestFunction 1 (multivariate)"""
+        run_test(test1_2, n=None, iters=2, sampling_method='simplicial')
+
+    def test_1_2_simpl_iter(self):
+        """Iterative simplicial on TestFunction 2 (univariate)"""
+        options = {'minimize_every_iter': False}
+        run_test(test2_1, n=None, iters=7, options=options,
+                 sampling_method='simplicial')
+
+    def test_2_1_sobol_iter(self):
+        """Iterative Sobol sampling on TestFunction 1 (multivariate)"""
+        run_test(test1_2, n=None, iters=1, sampling_method='sobol')
+
+    def test_2_2_sobol_iter(self):
+        """Iterative Sobol sampling on TestFunction 2 (univariate)"""
+        res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
+                   n=None, iters=1, sampling_method='sobol')
+
+        numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
+                                      atol=1e-5)
+        numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5)
+
+    def test_3_1_disp_simplicial(self):
+        """Iterative sampling on TestFunction 1 and 2  (multi- and univariate)"""
+
+        def callback_func(x):
+            print("Local minimization callback test")
+
+        for test in [test1_1, test2_1]:
+            shgo(test.f, test.bounds, iters=1,
+                 sampling_method='simplicial',
+                 callback=callback_func, options={'disp': True})
+            shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
+                 callback=callback_func, options={'disp': True})
+
+    def test_3_2_disp_sobol(self):
+        """Iterative sampling on TestFunction 1 and 2 (multi- and univariate)"""
+
+        def callback_func(x):
+            print("Local minimization callback test")
+
+        for test in [test1_1, test2_1]:
+            shgo(test.f, test.bounds, iters=1, sampling_method='sobol',
+                 callback=callback_func, options={'disp': True})
+
+            shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
+                 callback=callback_func, options={'disp': True})
+
+    def test_args_gh14589(self):
+        # Using `args` used to cause `shgo` to fail; see #14589, #15986, #16506
+        res = shgo(func=lambda x, y, z: x*z + y, bounds=[(0, 3)], args=(1, 2))
+        ref = shgo(func=lambda x: 2*x + 1, bounds=[(0, 3)])
+        assert_allclose(res.fun, ref.fun)
+        assert_allclose(res.x, ref.x)
+
+    @pytest.mark.slow
+    def test_4_1_known_f_min(self):
+        """Test known function minima stopping criteria"""
+        # Specify known function value
+        options = {'f_min': test4_1.expected_fun,
+                   'f_tol': 1e-6,
+                   'minimize_every_iter': True}
+        # TODO: Make default n higher for faster tests
+        run_test(test4_1, n=None, test_atol=1e-5, options=options,
+                 sampling_method='simplicial')
+
+    @pytest.mark.slow
+    def test_4_2_known_f_min(self):
+        """Test Global mode limiting local evalutions"""
+        options = {  # Specify known function value
+            'f_min': test4_1.expected_fun,
+            'f_tol': 1e-6,
+            # Specify number of local iterations to perform
+            'minimize_every_iter': True,
+            'local_iter': 1}
+
+        run_test(test4_1, n=None, test_atol=1e-5, options=options,
+                 sampling_method='simplicial')
+
+    @pytest.mark.slow
+    def test_4_3_known_f_min(self):
+        """Test Global mode limiting local evalutions"""
+        options = {  # Specify known function value
+            'f_min': test4_1.expected_fun,
+            'f_tol': 1e-6,
+            # Specify number of local iterations to perform+
+            'minimize_every_iter': True,
+            'local_iter': 1,
+            'infty_constraints': False}
+
+        run_test(test4_1, n=1024, test_atol=1e-5, options=options,
+                 sampling_method='sobol')
+
+    def test_4_4_known_f_min(self):
+        """Test Global mode limiting local evalutions for 1-D functions"""
+        options = {  # Specify known function value
+            'f_min': test2_1.expected_fun,
+            'f_tol': 1e-6,
+            # Specify number of local iterations to perform+
+            'minimize_every_iter': True,
+            'local_iter': 1,
+            'infty_constraints': False}
+
+        res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
+                   n=None, iters=None, options=options,
+                   sampling_method='sobol')
+        numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5,
+                                      atol=1e-5)
+
+    def test_5_1_simplicial_argless(self):
+        """Test Default simplicial sampling settings on TestFunction 1"""
+        res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons)
+        numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
+                                      atol=1e-5)
+
+    def test_5_2_sobol_argless(self):
+        """Test Default sobol sampling settings on TestFunction 1"""
+        res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons,
+                   sampling_method='sobol')
+        numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5,
+                                      atol=1e-5)
+
+    def test_6_1_simplicial_max_iter(self):
+        """Test that maximum iteration option works on TestFunction 3"""
+        options = {'max_iter': 2}
+        res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
+                   options=options, sampling_method='simplicial')
+        numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5,
+                                      atol=1e-5)
+        numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
+
+    def test_6_2_simplicial_min_iter(self):
+        """Test that maximum iteration option works on TestFunction 3"""
+        options = {'min_iter': 2}
+        res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
+                   options=options, sampling_method='simplicial')
+        numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5,
+                                      atol=1e-5)
+        numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
+
+    def test_7_1_minkwargs(self):
+        """Test the minimizer_kwargs arguments for solvers with constraints"""
+        # Test solvers
+        for solver in ['COBYLA', 'SLSQP']:
+            # Note that passing global constraints to SLSQP is tested in other
+            # unittests which run test4_1 normally
+            minimizer_kwargs = {'method': solver,
+                                'constraints': test3_1.cons}
+            print("Solver = {}".format(solver))
+            print("=" * 100)
+            run_test(test3_1, n=128, test_atol=1e-3,
+                     minimizer_kwargs=minimizer_kwargs, sampling_method='sobol')
+
+    def test_7_2_minkwargs(self):
+        """Test the minimizer_kwargs default inits"""
+        minimizer_kwargs = {'ftol': 1e-5}
+        options = {'disp': True}  # For coverage purposes
+        SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0],
+             minimizer_kwargs=minimizer_kwargs, options=options)
+
+    def test_7_3_minkwargs(self):
+        """Test minimizer_kwargs arguments for solvers without constraints"""
+        for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
+                       'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact',
+                       'trust-krylov']:
+            def jac(x):
+                return numpy.array([2 * x[0], 2 * x[1]]).T
+
+            def hess(x):
+                return numpy.array([[2, 0], [0, 2]])
+
+            minimizer_kwargs = {'method': solver,
+                                'jac': jac,
+                                'hess': hess}
+            logging.info("Solver = {}".format(solver))
+            logging.info("=" * 100)
+            run_test(test1_1, n=128, test_atol=1e-3,
+                     minimizer_kwargs=minimizer_kwargs, sampling_method='sobol')
+
+    def test_8_homology_group_diff(self):
+        options = {'minhgrd': 1,
+                   'minimize_every_iter': True}
+
+        run_test(test1_1, n=None, iters=None, options=options,
+                 sampling_method='simplicial')
+
+    def test_9_cons_g(self):
+        """Test single function constraint passing"""
+        SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0])
+
+    def test_10_finite_time(self):
+        """Test single function constraint passing"""
+        options = {'maxtime': 1e-15}
+        shgo(test1_1.f, test1_1.bounds, n=1, iters=None,
+             options=options, sampling_method='sobol')
+
+    def test_11_f_min_time(self):
+        """Test to cover the case where f_lowest == 0"""
+        options = {'maxtime': 1e-15,
+                   'f_min': 0.0}
+        shgo(test1_2.f, test1_2.bounds, n=1, iters=None,
+             options=options, sampling_method='sobol')
+
+    def test_12_sobol_inf_cons(self):
+        """Test to cover the case where f_lowest == 0"""
+        options = {'maxtime': 1e-15,
+                   'f_min': 0.0}
+        shgo(test1_2.f, test1_2.bounds, n=1, iters=None,
+             options=options, sampling_method='sobol')
+
+    def test_14_local_iter(self):
+        """Test limited local iterations for a pseudo-global mode"""
+        options = {'local_iter': 4}
+        run_test(test5_1, n=64, options=options)
+
+    def test_15_min_every_iter(self):
+        """Test minimize every iter options and cover function cache"""
+        options = {'minimize_every_iter': True}
+        run_test(test1_1, n=1, iters=7, options=options,
+                 sampling_method='sobol')
+
+    def test_16_disp_bounds_minimizer(self):
+        """Test disp=True with minimizers that do not support bounds """
+        options = {'disp': True}
+        minimizer_kwargs = {'method': 'nelder-mead'}
+        run_test(test1_2, sampling_method='simplicial',
+                 options=options, minimizer_kwargs=minimizer_kwargs)
+
+    def test_17_custom_sampling(self):
+        """Test the functionality to add custom sampling methods to shgo"""
+        def sample(n, d):
+            return numpy.random.uniform(size=(n,d))
+
+        run_test(test1_1, n=30, sampling_method=sample)
+
+    def test_18_bounds_class(self):
+        # test that new and old bounds yield same result
+        def f(x):
+            return numpy.square(x).sum()
+
+        lb = [-6., 1., -5.]
+        ub = [-1., 3., 5.]
+        bounds_old = list(zip(lb, ub))
+        bounds_new = Bounds(lb, ub)
+
+        res_old_bounds = shgo(f, bounds_old)
+        res_new_bounds = shgo(f, bounds_new)
+
+        assert res_new_bounds.nfev == res_old_bounds.nfev
+        assert res_new_bounds.message == res_old_bounds.message
+        assert res_new_bounds.success == res_old_bounds.success
+        x_opt = numpy.array([-1., 1., 0.])
+        numpy.testing.assert_allclose(res_new_bounds.x, x_opt)
+        numpy.testing.assert_allclose(res_new_bounds.x,
+                                      res_old_bounds.x)
+
+
+# Failure test functions
+class TestShgoFailures:
+    def test_1_maxiter(self):
+        """Test failure on insufficient iterations"""
+        options = {'maxiter': 2}
+        res = shgo(test4_1.f, test4_1.bounds, n=4, iters=None,
+                   options=options, sampling_method='sobol')
+
+        numpy.testing.assert_equal(False, res.success)
+        numpy.testing.assert_equal(4, res.nfev)
+
+    def test_2_sampling(self):
+        """Rejection of unknown sampling method"""
+        assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds,
+                      sampling_method='not_Sobol')
+
+    def test_3_1_no_min_pool_sobol(self):
+        """Check that the routine stops when no minimiser is found
+           after maximum specified function evaluations"""
+        options = {'maxfev': 10,
+                   'disp': True}
+        res = shgo(test_table.f, test_table.bounds, n=4, options=options,
+                   sampling_method='sobol')
+        numpy.testing.assert_equal(False, res.success)
+
+        numpy.testing.assert_equal(16, res.nfev)
+
+    def test_3_2_no_min_pool_simplicial(self):
+        """Check that the routine stops when no minimiser is found
+           after maximum specified sampling evaluations"""
+        options = {'maxev': 10,
+                   'disp': True}
+        res = shgo(test_table.f, test_table.bounds, n=3, options=options,
+                   sampling_method='simplicial')
+        numpy.testing.assert_equal(False, res.success)
+
+    def test_4_1_bound_err(self):
+        """Specified bounds ub > lb"""
+        bounds = [(6, 3), (3, 5)]
+        assert_raises(ValueError, shgo, test1_1.f, bounds)
+
+    def test_4_2_bound_err(self):
+        """Specified bounds are of the form (lb, ub)"""
+        bounds = [(3, 5, 5), (3, 5)]
+        assert_raises(ValueError, shgo, test1_1.f, bounds)
+
+    def test_5_1_1_infeasible_sobol(self):
+        """Ensures the algorithm terminates on infeasible problems
+           after maxev is exceeded. Use infty constraints option"""
+        options = {'maxev': 64,
+                   'disp': True}
+
+        res = shgo(test_infeasible.f, test_infeasible.bounds,
+                   constraints=test_infeasible.cons, n=64, options=options,
+                   sampling_method='sobol')
+
+        numpy.testing.assert_equal(False, res.success)
+
+    def test_5_1_2_infeasible_sobol(self):
+        """Ensures the algorithm terminates on infeasible problems
+           after maxev is exceeded. Do not use infty constraints option"""
+        options = {'maxev': 64,
+                   'disp': True,
+                   'infty_constraints': False}
+
+        res = shgo(test_infeasible.f, test_infeasible.bounds,
+                   constraints=test_infeasible.cons, n=64, options=options,
+                   sampling_method='sobol')
+
+        numpy.testing.assert_equal(False, res.success)
+
+    def test_5_2_infeasible_simplicial(self):
+        """Ensures the algorithm terminates on infeasible problems
+           after maxev is exceeded."""
+        options = {'maxev': 1000,
+                   'disp': False}
+
+        res = shgo(test_infeasible.f, test_infeasible.bounds,
+                   constraints=test_infeasible.cons, n=100, options=options,
+                   sampling_method='simplicial')
+
+        numpy.testing.assert_equal(False, res.success)
+
+    def test_6_1_lower_known_f_min(self):
+        """Test Global mode limiting local evalutions with f* too high"""
+        options = {  # Specify known function value
+            'f_min': test2_1.expected_fun + 2.0,
+            'f_tol': 1e-6,
+            # Specify number of local iterations to perform+
+            'minimize_every_iter': True,
+            'local_iter': 1,
+            'infty_constraints': False}
+        args = (test2_1.f, test2_1.bounds)
+        kwargs = {'constraints': test2_1.cons,
+                  'n': None,
+                  'iters': None,
+                  'options': options,
+                  'sampling_method': 'sobol'
+                  }
+        warns(UserWarning, shgo, *args, **kwargs)
+
+    @pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp'])
+    def test_21_2_derivative_options(self, derivative):
+        """shgo used to raise an error when passing `options` with 'jac'
+        # see gh-12829. check that this is resolved
+        """
+        def objective(x):
+            return 3 * x[0] * x[0] + 2 * x[0] + 5
+
+        def gradient(x):
+            return 6 * x[0] + 2
+
+        def hess(x):
+            return 6
+
+        def hessp(x, p):
+            return 6 * p
+
+        derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp}
+        options = {derivative: derivative_funcs[derivative]}
+        minimizer_kwargs = {'method': 'trust-constr'}
+
+        bounds = [(-100, 100)]
+        res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs,
+                   options=options)
+        ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs,
+                       **options)
+
+        assert res.success
+        numpy.testing.assert_allclose(res.fun, ref.fun)
+        numpy.testing.assert_allclose(res.x, ref.x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__spectral.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__spectral.py
new file mode 100644
index 00000000..49c14369
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test__spectral.py
@@ -0,0 +1,208 @@
+import itertools
+
+import numpy as np
+from numpy import exp
+from numpy.testing import assert_, assert_equal
+
+from scipy.optimize import root
+
+
+def test_performance():
+    # Compare performance results to those listed in
+    # [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
+    # and
+    # [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
+    # and those produced by dfsane.f from M. Raydan's website.
+    #
+    # Where the results disagree, the largest limits are taken.
+
+    e_a = 1e-5
+    e_r = 1e-4
+
+    table_1 = [
+        dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
+        dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
+        dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
+        dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
+        # dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188),  removed: too sensitive to rounding errors
+        dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6),  # Results from dfsane.f; papers list nit=3, nfev=3
+        dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29),  # Must have n%3==0, typo in papers?
+        dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29),  # Must have n%3==0, typo in papers?
+        dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18),  # Results from dfsane.f; papers list nit=nfev=6?
+        dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
+        dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5),  # Results from dfsane.f; papers list nit=2, nfev=12
+    ]
+
+    # Check also scaling invariance
+    for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
+                                                         ['cruz', 'cheng']):
+        for problem in table_1:
+            n = problem['n']
+            func = lambda x, n: yscale*problem['F'](x/xscale, n)
+            args = (n,)
+            x0 = problem['x0'](n) * xscale
+
+            fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
+
+            sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
+            sigma_0 = xscale/yscale
+
+            with np.errstate(over='ignore'):
+                sol = root(func, x0, args=args,
+                           options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
+                                        sigma_0=sigma_0, sigma_eps=sigma_eps,
+                                        line_search=line_search),
+                           method='DF-SANE')
+
+            err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
+                            fatol, sol.success, sol.nit, sol.nfev])
+            assert_(sol.success, err_msg)
+            assert_(sol.nfev <= problem['nfev'] + 1, err_msg)  # nfev+1: dfsane.f doesn't count first eval
+            assert_(sol.nit <= problem['nit'], err_msg)
+            assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
+
+
+def test_complex():
+    def func(z):
+        return z**2 - 1 + 2j
+    x0 = 2.0j
+
+    ftol = 1e-4
+    sol = root(func, x0, tol=ftol, method='DF-SANE')
+
+    assert_(sol.success)
+
+    f0 = np.linalg.norm(func(x0))
+    fx = np.linalg.norm(func(sol.x))
+    assert_(fx <= ftol*f0)
+
+
+def test_linear_definite():
+    # The DF-SANE paper proves convergence for "strongly isolated"
+    # solutions.
+    #
+    # For linear systems F(x) = A x - b = 0, with A positive or
+    # negative definite, the solution is strongly isolated.
+
+    def check_solvability(A, b, line_search='cruz'):
+        func = lambda x: A.dot(x) - b
+        xp = np.linalg.solve(A, b)
+        eps = np.linalg.norm(func(xp)) * 1e3
+        sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
+                   method='DF-SANE')
+        assert_(sol.success)
+        assert_(np.linalg.norm(func(sol.x)) <= eps)
+
+    n = 90
+
+    # Test linear pos.def. system
+    np.random.seed(1234)
+    A = np.arange(n*n).reshape(n, n)
+    A = A + n*n * np.diag(1 + np.arange(n))
+    assert_(np.linalg.eigvals(A).min() > 0)
+    b = np.arange(n) * 1.0
+    check_solvability(A, b, 'cruz')
+    check_solvability(A, b, 'cheng')
+
+    # Test linear neg.def. system
+    check_solvability(-A, b, 'cruz')
+    check_solvability(-A, b, 'cheng')
+
+
+def test_shape():
+    def f(x, arg):
+        return x - arg
+
+    for dt in [float, complex]:
+        x = np.zeros([2,2])
+        arg = np.ones([2,2], dtype=dt)
+
+        sol = root(f, x, args=(arg,), method='DF-SANE')
+        assert_(sol.success)
+        assert_equal(sol.x.shape, x.shape)
+
+
+# Some of the test functions and initial guesses listed in
+# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
+
+def F_1(x, n):
+    g = np.zeros([n])
+    i = np.arange(2, n+1)
+    g[0] = exp(x[0] - 1) - 1
+    g[1:] = i*(exp(x[1:] - 1) - x[1:])
+    return g
+
+def x0_1(n):
+    x0 = np.empty([n])
+    x0.fill(n/(n-1))
+    return x0
+
+def F_2(x, n):
+    g = np.zeros([n])
+    i = np.arange(2, n+1)
+    g[0] = exp(x[0]) - 1
+    g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
+    return g
+
+def x0_2(n):
+    x0 = np.empty([n])
+    x0.fill(1/n**2)
+    return x0
+
+def F_4(x, n):
+    assert_equal(n % 3, 0)
+    g = np.zeros([n])
+    # Note: the first line is typoed in some of the references;
+    # correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
+    g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
+    g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
+    g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
+    return g
+
+def x0_4(n):
+    assert_equal(n % 3, 0)
+    x0 = np.array([-1, 1/2, -1] * (n//3))
+    return x0
+
+def F_6(x, n):
+    c = 0.9
+    mu = (np.arange(1, n+1) - 0.5)/n
+    return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
+
+def x0_6(n):
+    return np.ones([n])
+
+def F_7(x, n):
+    assert_equal(n % 3, 0)
+
+    def phi(t):
+        v = 0.5*t - 2
+        v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
+        v[t >= 2] = (0.5*t + 2)[t >= 2]
+        return v
+    g = np.zeros([n])
+    g[::3] = 1e4 * x[1::3]**2 - 1
+    g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
+    g[2::3] = phi(x[2::3])
+    return g
+
+def x0_7(n):
+    assert_equal(n % 3, 0)
+    return np.array([1e-3, 18, 1] * (n//3))
+
+def F_9(x, n):
+    g = np.zeros([n])
+    i = np.arange(2, n)
+    g[0] = x[0]**3/3 + x[1]**2/2
+    g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
+    g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
+    return g
+
+def x0_9(n):
+    return np.ones([n])
+
+def F_10(x, n):
+    return np.log(1 + x) - x/n
+
+def x0_10(n):
+    return np.ones([n])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cobyla.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cobyla.py
new file mode 100644
index 00000000..c902e9f7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cobyla.py
@@ -0,0 +1,131 @@
+import math
+import numpy as np
+import pytest
+
+from numpy.testing import assert_allclose, assert_, assert_array_equal
+
+from scipy.optimize import fmin_cobyla, minimize
+
+
+class TestCobyla:
+    def setup_method(self):
+        self.x0 = [4.95, 0.66]
+        self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
+        self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
+                     'maxiter': 100}
+
+    def fun(self, x):
+        return x[0]**2 + abs(x[1])**3
+
+    def con1(self, x):
+        return x[0]**2 + x[1]**2 - 25
+
+    def con2(self, x):
+        return -self.con1(x)
+
+    def test_simple(self):
+        # use disp=True as smoke test for gh-8118
+        x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
+                        rhoend=1e-5, maxfun=100, disp=True)
+        assert_allclose(x, self.solution, atol=1e-4)
+
+    def test_minimize_simple(self):
+        class Callback:
+            def __init__(self):
+                self.n_calls = 0
+                self.last_x = None
+
+            def __call__(self, x):
+                self.n_calls += 1
+                self.last_x = x
+
+        callback = Callback()
+
+        # Minimize with method='COBYLA'
+        cons = ({'type': 'ineq', 'fun': self.con1},
+                {'type': 'ineq', 'fun': self.con2})
+        sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
+                       callback=callback, options=self.opts)
+        assert_allclose(sol.x, self.solution, atol=1e-4)
+        assert_(sol.success, sol.message)
+        assert_(sol.maxcv < 1e-5, sol)
+        assert_(sol.nfev < 70, sol)
+        assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
+        assert_(sol.nfev == callback.n_calls,
+                "Callback is not called exactly once for every function eval.")
+        assert_array_equal(sol.x, callback.last_x,
+                           "Last design vector sent to the callback is not equal to returned value.")
+
+    def test_minimize_constraint_violation(self):
+        np.random.seed(1234)
+        pb = np.random.rand(10, 10)
+        spread = np.random.rand(10)
+
+        def p(w):
+            return pb.dot(w)
+
+        def f(w):
+            return -(w * spread).sum()
+
+        def c1(w):
+            return 500 - abs(p(w)).sum()
+
+        def c2(w):
+            return 5 - abs(p(w).sum())
+
+        def c3(w):
+            return 5 - abs(p(w)).max()
+
+        cons = ({'type': 'ineq', 'fun': c1},
+                {'type': 'ineq', 'fun': c2},
+                {'type': 'ineq', 'fun': c3})
+        w0 = np.zeros((10, 1))
+        message = 'Use of `minimize` with `x0.ndim != 1` is deprecated.'
+        with pytest.warns(DeprecationWarning, match=message):
+            sol = minimize(f, w0, method='cobyla', constraints=cons,
+                           options={'catol': 1e-6})
+            assert_(sol.maxcv > 1e-6)
+            assert_(not sol.success)
+
+
+def test_vector_constraints():
+    # test that fmin_cobyla and minimize can take a combination
+    # of constraints, some returning a number and others an array
+    def fun(x):
+        return (x[0] - 1)**2 + (x[1] - 2.5)**2
+
+    def fmin(x):
+        return fun(x) - 1
+
+    def cons1(x):
+        a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
+        return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
+                         a[i, 2] for i in range(len(a))])
+
+    def cons2(x):
+        return x     # identity, acts as bounds x > 0
+
+    x0 = np.array([2, 0])
+    cons_list = [fun, cons1, cons2]
+
+    xsol = [1.4, 1.7]
+    fsol = 0.8
+
+    # testing fmin_cobyla
+    sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5)
+    assert_allclose(sol, xsol, atol=1e-4)
+
+    sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5)
+    assert_allclose(fun(sol), 1, atol=1e-4)
+
+    # testing minimize
+    constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
+    sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
+    assert_allclose(sol.x, xsol, atol=1e-4)
+    assert_(sol.success, sol.message)
+    assert_allclose(sol.fun, fsol, atol=1e-4)
+
+    constraints = {'type': 'ineq', 'fun': fmin}
+    sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
+    assert_allclose(sol.fun, 1, atol=1e-4)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraint_conversion.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraint_conversion.py
new file mode 100644
index 00000000..73c70799
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraint_conversion.py
@@ -0,0 +1,267 @@
+"""
+Unit test for constraint conversion
+"""
+
+import numpy as np
+from numpy.testing import (assert_array_almost_equal,
+                           assert_allclose, assert_warns, suppress_warnings)
+import pytest
+from scipy.optimize import (NonlinearConstraint, LinearConstraint,
+                            OptimizeWarning, minimize, BFGS)
+from .test_minimize_constrained import (Maratos, HyperbolicIneq, Rosenbrock,
+                                        IneqRosenbrock, EqIneqRosenbrock,
+                                        BoundedRosenbrock, Elec)
+
+
+class TestOldToNew:
+    x0 = (2, 0)
+    bnds = ((0, None), (0, None))
+    method = "trust-constr"
+
+    def test_constraint_dictionary_1(self):
+        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
+        cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
+                {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
+                {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "delta_grad == 0.0")
+            res = minimize(fun, self.x0, method=self.method,
+                           bounds=self.bnds, constraints=cons)
+        assert_allclose(res.x, [1.4, 1.7], rtol=1e-4)
+        assert_allclose(res.fun, 0.8, rtol=1e-4)
+
+    def test_constraint_dictionary_2(self):
+        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
+        cons = {'type': 'eq',
+                'fun': lambda x, p1, p2: p1*x[0] - p2*x[1],
+                'args': (1, 1.1),
+                'jac': lambda x, p1, p2: np.array([[p1, -p2]])}
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "delta_grad == 0.0")
+            res = minimize(fun, self.x0, method=self.method,
+                           bounds=self.bnds, constraints=cons)
+        assert_allclose(res.x, [1.7918552, 1.62895927])
+        assert_allclose(res.fun, 1.3857466063348418)
+
+    def test_constraint_dictionary_3(self):
+        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
+        cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
+                NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)]
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "delta_grad == 0.0")
+            res = minimize(fun, self.x0, method=self.method,
+                           bounds=self.bnds, constraints=cons)
+        assert_allclose(res.x, [1.75, 1.75], rtol=1e-4)
+        assert_allclose(res.fun, 1.125, rtol=1e-4)
+
+
+class TestNewToOld:
+
+    def test_multiple_constraint_objects(self):
+        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
+        x0 = [2, 0, 1]
+        coni = []  # only inequality constraints (can use cobyla)
+        methods = ["slsqp", "cobyla", "trust-constr"]
+
+        # mixed old and new
+        coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
+                     NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
+
+        coni.append([LinearConstraint([1, -2, 0], -2, np.inf),
+                     NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
+
+        coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf),
+                     NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)])
+
+        for con in coni:
+            funs = {}
+            for method in methods:
+                with suppress_warnings() as sup:
+                    sup.filter(UserWarning)
+                    result = minimize(fun, x0, method=method, constraints=con)
+                    funs[method] = result.fun
+            assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4)
+            assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4)
+
+    def test_individual_constraint_objects(self):
+        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
+        x0 = [2, 0, 1]
+
+        cone = []  # with equality constraints (can't use cobyla)
+        coni = []  # only inequality constraints (can use cobyla)
+        methods = ["slsqp", "cobyla", "trust-constr"]
+
+        # nonstandard data types for constraint equality bounds
+        cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1))
+        cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21]))
+        cone.append(NonlinearConstraint(lambda x: x[0] - x[1],
+                                        1.21, np.array([1.21])))
+
+        # multiple equalities
+        cone.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    1.21, 1.21))  # two same equalities
+        cone.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    [1.21, 1.4], [1.21, 1.4]))  # two different equalities
+        cone.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    [1.21, 1.21], 1.21))  # equality specified two ways
+        cone.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    [1.21, -np.inf], [1.21, np.inf]))  # equality + unbounded
+
+        # nonstandard data types for constraint inequality bounds
+        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf))
+        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf))
+        coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
+                                        1.21, np.array([np.inf])))
+        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3))
+        coni.append(NonlinearConstraint(lambda x: x[0] - x[1],
+                                        np.array(-np.inf), -3))
+
+        # multiple inequalities/equalities
+        coni.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    1.21, np.inf))  # two same inequalities
+        cone.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    [1.21, -np.inf], [1.21, 1.4]))  # mixed equality/inequality
+        coni.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    [1.1, .8], [1.2, 1.4]))  # bounded above and below
+        coni.append(NonlinearConstraint(
+                    lambda x: [x[0] - x[1], x[1] - x[2]],
+                    [-1.2, -1.4], [-1.1, -.8]))  # - bounded above and below
+
+        # quick check of LinearConstraint class (very little new code to test)
+        cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21))
+        cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21))
+        cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]],
+                                     [1.21, -np.inf], [1.21, 1.4]))
+
+        for con in coni:
+            funs = {}
+            for method in methods:
+                with suppress_warnings() as sup:
+                    sup.filter(UserWarning)
+                    result = minimize(fun, x0, method=method, constraints=con)
+                    funs[method] = result.fun
+            assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
+            assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3)
+
+        for con in cone:
+            funs = {}
+            for method in methods[::2]:  # skip cobyla
+                with suppress_warnings() as sup:
+                    sup.filter(UserWarning)
+                    result = minimize(fun, x0, method=method, constraints=con)
+                    funs[method] = result.fun
+            assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
+
+
+class TestNewToOldSLSQP:
+    method = 'slsqp'
+    elec = Elec(n_electrons=2)
+    elec.x_opt = np.array([-0.58438468, 0.58438466, 0.73597047,
+                           -0.73597044, 0.34180668, -0.34180667])
+    brock = BoundedRosenbrock()
+    brock.x_opt = [0, 0]
+    list_of_problems = [Maratos(),
+                        HyperbolicIneq(),
+                        Rosenbrock(),
+                        IneqRosenbrock(),
+                        EqIneqRosenbrock(),
+                        elec,
+                        brock
+                        ]
+
+    def test_list_of_problems(self):
+
+        for prob in self.list_of_problems:
+
+            with suppress_warnings() as sup:
+                sup.filter(UserWarning)
+                result = minimize(prob.fun, prob.x0,
+                                  method=self.method,
+                                  bounds=prob.bounds,
+                                  constraints=prob.constr)
+
+            assert_array_almost_equal(result.x, prob.x_opt, decimal=3)
+
+    def test_warn_mixed_constraints(self):
+        # warns about inefficiency of mixed equality/inequality constraints
+        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
+        cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]],
+                                   [1.1, .8], [1.1, 1.4])
+        bnds = ((0, None), (0, None), (0, None))
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "delta_grad == 0.0")
+            assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1),
+                         method=self.method, bounds=bnds, constraints=cons)
+
+    def test_warn_ignored_options(self):
+        # warns about constraint options being ignored
+        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
+        x0 = (2, 0, 1)
+
+        if self.method == "slsqp":
+            bnds = ((0, None), (0, None), (0, None))
+        else:
+            bnds = None
+
+        cons = NonlinearConstraint(lambda x: x[0], 2, np.inf)
+        res = minimize(fun, x0, method=self.method,
+                       bounds=bnds, constraints=cons)
+        # no warnings without constraint options
+        assert_allclose(res.fun, 1)
+
+        cons = LinearConstraint([1, 0, 0], 2, np.inf)
+        res = minimize(fun, x0, method=self.method,
+                       bounds=bnds, constraints=cons)
+        # no warnings without constraint options
+        assert_allclose(res.fun, 1)
+
+        cons = []
+        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
+                                        keep_feasible=True))
+        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
+                                        hess=BFGS()))
+        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
+                                        finite_diff_jac_sparsity=42))
+        cons.append(NonlinearConstraint(lambda x: x[0]**2, 2, np.inf,
+                                        finite_diff_rel_step=42))
+        cons.append(LinearConstraint([1, 0, 0], 2, np.inf,
+                                     keep_feasible=True))
+        for con in cons:
+            assert_warns(OptimizeWarning, minimize, fun, x0,
+                         method=self.method, bounds=bnds, constraints=cons)
+
+
+class TestNewToOldCobyla:
+    method = 'cobyla'
+
+    list_of_problems = [
+                        Elec(n_electrons=2),
+                        Elec(n_electrons=4),
+                        ]
+
+    @pytest.mark.slow
+    def test_list_of_problems(self):
+
+        for prob in self.list_of_problems:
+
+            with suppress_warnings() as sup:
+                sup.filter(UserWarning)
+                truth = minimize(prob.fun, prob.x0,
+                                 method='trust-constr',
+                                 bounds=prob.bounds,
+                                 constraints=prob.constr)
+                result = minimize(prob.fun, prob.x0,
+                                  method=self.method,
+                                  bounds=prob.bounds,
+                                  constraints=prob.constr)
+
+            assert_allclose(result.fun, truth.fun, rtol=1e-3)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraints.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraints.py
new file mode 100644
index 00000000..e54e86dd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_constraints.py
@@ -0,0 +1,234 @@
+import pytest
+import numpy as np
+from numpy.testing import TestCase, assert_array_equal
+import scipy.sparse as sps
+from scipy.optimize._constraints import (
+    Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint,
+    new_bounds_to_old, old_bound_to_new, strict_bounds)
+
+
+class TestStrictBounds(TestCase):
+    def test_scalarvalue_unique_enforce_feasibility(self):
+        m = 3
+        lb = 2
+        ub = 4
+        enforce_feasibility = False
+        strict_lb, strict_ub = strict_bounds(lb, ub,
+                                             enforce_feasibility,
+                                             m)
+        assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
+        assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
+
+        enforce_feasibility = True
+        strict_lb, strict_ub = strict_bounds(lb, ub,
+                                             enforce_feasibility,
+                                             m)
+        assert_array_equal(strict_lb, [2, 2, 2])
+        assert_array_equal(strict_ub, [4, 4, 4])
+
+    def test_vectorvalue_unique_enforce_feasibility(self):
+        m = 3
+        lb = [1, 2, 3]
+        ub = [4, 5, 6]
+        enforce_feasibility = False
+        strict_lb, strict_ub = strict_bounds(lb, ub,
+                                              enforce_feasibility,
+                                              m)
+        assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
+        assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
+
+        enforce_feasibility = True
+        strict_lb, strict_ub = strict_bounds(lb, ub,
+                                              enforce_feasibility,
+                                              m)
+        assert_array_equal(strict_lb, [1, 2, 3])
+        assert_array_equal(strict_ub, [4, 5, 6])
+
+    def test_scalarvalue_vector_enforce_feasibility(self):
+        m = 3
+        lb = 2
+        ub = 4
+        enforce_feasibility = [False, True, False]
+        strict_lb, strict_ub = strict_bounds(lb, ub,
+                                             enforce_feasibility,
+                                             m)
+        assert_array_equal(strict_lb, [-np.inf, 2, -np.inf])
+        assert_array_equal(strict_ub, [np.inf, 4, np.inf])
+
+    def test_vectorvalue_vector_enforce_feasibility(self):
+        m = 3
+        lb = [1, 2, 3]
+        ub = [4, 6, np.inf]
+        enforce_feasibility = [True, False, True]
+        strict_lb, strict_ub = strict_bounds(lb, ub,
+                                             enforce_feasibility,
+                                             m)
+        assert_array_equal(strict_lb, [1, -np.inf, 3])
+        assert_array_equal(strict_ub, [4, np.inf, np.inf])
+
+
+def test_prepare_constraint_infeasible_x0():
+    lb = np.array([0, 20, 30])
+    ub = np.array([0.5, np.inf, 70])
+    x0 = np.array([1, 2, 3])
+    enforce_feasibility = np.array([False, True, True], dtype=bool)
+    bounds = Bounds(lb, ub, enforce_feasibility)
+    pytest.raises(ValueError, PreparedConstraint, bounds, x0)
+
+    pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3])
+    assert (pc.violation([1, 2, 3]) > 0).any()
+    assert (pc.violation([0.25, 21, 31]) == 0).all()
+
+    x0 = np.array([1, 2, 3, 4])
+    A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
+    enforce_feasibility = np.array([True, True, True], dtype=bool)
+    linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility)
+    pytest.raises(ValueError, PreparedConstraint, linear, x0)
+
+    pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0),
+                            [1, 2, 3, 4])
+    assert (pc.violation([1, 2, 3, 4]) > 0).any()
+    assert (pc.violation([-10, 2, -10, 4]) == 0).all()
+
+    def fun(x):
+        return A.dot(x)
+
+    def jac(x):
+        return A
+
+    def hess(x, v):
+        return sps.csr_matrix((4, 4))
+
+    nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess,
+                                    enforce_feasibility)
+    pytest.raises(ValueError, PreparedConstraint, nonlinear, x0)
+
+    pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4])
+    assert (pc.violation([1, 2, 3, 4]) > 0).any()
+    assert (pc.violation([-10, 2, -10, 4]) == 0).all()
+
+
+def test_violation():
+    def cons_f(x):
+        return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]])
+
+    nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
+    pc = PreparedConstraint(nlc, [0.5, 1])
+
+    assert_array_equal(pc.violation([0.5, 1]), [0., 0.])
+
+    np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1])
+
+    np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0])
+
+    np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0])
+
+    np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14])
+
+
+def test_new_bounds_to_old():
+    lb = np.array([-np.inf, 2, 3])
+    ub = np.array([3, np.inf, 10])
+
+    bounds = [(None, 3), (2, None), (3, 10)]
+    assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds)
+
+    bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)]
+    assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb)
+
+    bounds_no_lb = [(None, 3), (None, None), (None, 10)]
+    assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb)
+
+    bounds_single_ub = [(None, 20), (2, 20), (3, 20)]
+    assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub)
+
+    bounds_no_ub = [(None, None), (2, None), (3, None)]
+    assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub)
+
+    bounds_single_both = [(1, 2), (1, 2), (1, 2)]
+    assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both)
+
+    bounds_no_both = [(None, None), (None, None), (None, None)]
+    assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both)
+
+
+def test_old_bounds_to_new():
+    bounds = ([1, 2], (None, 3), (-1, None))
+    lb_true = np.array([1, -np.inf, -1])
+    ub_true = np.array([2, 3, np.inf])
+
+    lb, ub = old_bound_to_new(bounds)
+    assert_array_equal(lb, lb_true)
+    assert_array_equal(ub, ub_true)
+
+    bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))]
+    lb, ub = old_bound_to_new(bounds)
+
+    assert_array_equal(lb, [-np.inf, 1])
+    assert_array_equal(ub, [np.inf, 1])
+
+
+class TestBounds:
+    def test_repr(self):
+        # so that eval works
+        from numpy import array, inf  # noqa
+        for args in (
+            (-1.0, 5.0),
+            (-1.0, np.inf, True),
+            (np.array([1.0, -np.inf]), np.array([2.0, np.inf])),
+            (np.array([1.0, -np.inf]), np.array([2.0, np.inf]),
+             np.array([True, False])),
+        ):
+            bounds = Bounds(*args)
+            bounds2 = eval(repr(Bounds(*args)))
+            assert_array_equal(bounds.lb, bounds2.lb)
+            assert_array_equal(bounds.ub, bounds2.ub)
+            assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible)
+
+    def test_array(self):
+        # gh13501
+        b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0])
+        assert isinstance(b.lb, np.ndarray)
+        assert isinstance(b.ub, np.ndarray)
+
+    def test_defaults(self):
+        b1 = Bounds()
+        b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf))
+        assert b1.lb == b2.lb
+        assert b1.ub == b2.ub
+
+    def test_input_validation(self):
+        message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
+        with pytest.raises(ValueError, match=message):
+            Bounds([1, 2], [1, 2, 3])
+
+    def test_residual(self):
+        bounds = Bounds(-2, 4)
+        x0 = [-1, 2]
+        np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2]))
+
+
+class TestLinearConstraint:
+    def test_defaults(self):
+        A = np.eye(4)
+        lc = LinearConstraint(A)
+        lc2 = LinearConstraint(A, -np.inf, np.inf)
+        assert_array_equal(lc.lb, lc2.lb)
+        assert_array_equal(lc.ub, lc2.ub)
+
+    def test_input_validation(self):
+        A = np.eye(4)
+        message = "`lb`, `ub`, and `keep_feasible` must be broadcastable"
+        with pytest.raises(ValueError, match=message):
+            LinearConstraint(A, [1, 2], [1, 2, 3])
+
+        A = np.empty((4, 3, 5))
+        message = "`A` must have exactly two dimensions."
+        with pytest.raises(ValueError, match=message):
+            LinearConstraint(A)
+
+    def test_residual(self):
+        A = np.eye(2)
+        lc = LinearConstraint(A, -2, 4)
+        x0 = [-1, 2]
+        np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2]))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cython_optimize.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cython_optimize.py
new file mode 100644
index 00000000..2f859c11
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_cython_optimize.py
@@ -0,0 +1,92 @@
+"""
+Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``,
+and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a
+3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st,
+2nd, and 3rd order terms in ``args``.
+
+.. math::
+
+    f(x, a0, args) =  ((args[2]*x + args[1])*x + args[0])*x + a0
+
+The 3rd order polynomial function is written in Cython and called in a Python
+wrapper named after the zero function. See the private ``_zeros`` Cython module
+in `scipy.optimize.cython_optimze` for more information.
+"""
+
+import numpy.testing as npt
+from scipy.optimize.cython_optimize import _zeros
+
+# CONSTANTS
+# Solve x**3 - A0 = 0  for A0 = [2.0, 2.1, ..., 2.9].
+# The ARGS have 3 elements just to show how this could be done for any cubic
+# polynomial.
+A0 = tuple(-2.0 - x/10.0 for x in range(10))  # constant term
+ARGS = (0.0, 0.0, 1.0)  # 1st, 2nd, and 3rd order terms
+XLO, XHI = 0.0, 2.0  # first and second bounds of zeros functions
+# absolute and relative tolerances and max iterations for zeros functions
+XTOL, RTOL, MITR = 0.001, 0.001, 10
+EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0]
+# = [1.2599210498948732,
+#    1.2805791649874942,
+#    1.300591446851387,
+#    1.3200061217959123,
+#    1.338865900164339,
+#    1.3572088082974532,
+#    1.375068867074141,
+#    1.3924766500838337,
+#    1.4094597464129783,
+#    1.4260431471424087]
+
+
+# test bisect
+def test_bisect():
+    npt.assert_allclose(
+        EXPECTED,
+        list(
+            _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
+        ),
+        rtol=RTOL, atol=XTOL
+    )
+
+
+# test ridder
+def test_ridder():
+    npt.assert_allclose(
+        EXPECTED,
+        list(
+            _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
+        ),
+        rtol=RTOL, atol=XTOL
+    )
+
+
+# test brenth
+def test_brenth():
+    npt.assert_allclose(
+        EXPECTED,
+        list(
+            _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
+        ),
+        rtol=RTOL, atol=XTOL
+    )
+
+
+# test brentq
+def test_brentq():
+    npt.assert_allclose(
+        EXPECTED,
+        list(
+            _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
+        ),
+        rtol=RTOL, atol=XTOL
+    )
+
+
+# test brentq with full output
+def test_brentq_full_output():
+    output = _zeros.full_output_example(
+        (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR)
+    npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL)
+    npt.assert_equal(6, output['iterations'])
+    npt.assert_equal(7, output['funcalls'])
+    npt.assert_equal(0, output['error_num'])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_differentiable_functions.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_differentiable_functions.py
new file mode 100644
index 00000000..6123b769
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_differentiable_functions.py
@@ -0,0 +1,731 @@
+import pytest
+import numpy as np
+from numpy.testing import (TestCase, assert_array_almost_equal,
+                           assert_array_equal, assert_, assert_allclose,
+                           assert_equal)
+from scipy.sparse import csr_matrix
+from scipy.sparse.linalg import LinearOperator
+from scipy.optimize._differentiable_functions import (ScalarFunction,
+                                                      VectorFunction,
+                                                      LinearVectorFunction,
+                                                      IdentityVectorFunction)
+from scipy.optimize import rosen, rosen_der, rosen_hess
+from scipy.optimize._hessian_update_strategy import BFGS
+
+
+class ExScalarFunction:
+
+    def __init__(self):
+        self.nfev = 0
+        self.ngev = 0
+        self.nhev = 0
+
+    def fun(self, x):
+        self.nfev += 1
+        return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
+
+    def grad(self, x):
+        self.ngev += 1
+        return np.array([4*x[0]-1, 4*x[1]])
+
+    def hess(self, x):
+        self.nhev += 1
+        return 4*np.eye(2)
+
+
+class TestScalarFunction(TestCase):
+
+    def test_finite_difference_grad(self):
+        ex = ExScalarFunction()
+        nfev = 0
+        ngev = 0
+
+        x0 = [1.0, 0.0]
+        analit = ScalarFunction(ex.fun, x0, (), ex.grad,
+                                ex.hess, None, (-np.inf, np.inf))
+        nfev += 1
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev, nfev)
+        approx = ScalarFunction(ex.fun, x0, (), '2-point',
+                                ex.hess, None, (-np.inf, np.inf))
+        nfev += 3
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(analit.f, approx.f)
+        assert_array_almost_equal(analit.g, approx.g)
+
+        x = [10, 0.3]
+        f_analit = analit.fun(x)
+        g_analit = analit.grad(x)
+        nfev += 1
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        f_approx = approx.fun(x)
+        g_approx = approx.grad(x)
+        nfev += 3
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_almost_equal(f_analit, f_approx)
+        assert_array_almost_equal(g_analit, g_approx)
+
+        x = [2.0, 1.0]
+        g_analit = analit.grad(x)
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+
+        g_approx = approx.grad(x)
+        nfev += 3
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_almost_equal(g_analit, g_approx)
+
+        x = [2.5, 0.3]
+        f_analit = analit.fun(x)
+        g_analit = analit.grad(x)
+        nfev += 1
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        f_approx = approx.fun(x)
+        g_approx = approx.grad(x)
+        nfev += 3
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_almost_equal(f_analit, f_approx)
+        assert_array_almost_equal(g_analit, g_approx)
+
+        x = [2, 0.3]
+        f_analit = analit.fun(x)
+        g_analit = analit.grad(x)
+        nfev += 1
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        f_approx = approx.fun(x)
+        g_approx = approx.grad(x)
+        nfev += 3
+        ngev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_almost_equal(f_analit, f_approx)
+        assert_array_almost_equal(g_analit, g_approx)
+
+    def test_fun_and_grad(self):
+        ex = ExScalarFunction()
+
+        def fg_allclose(x, y):
+            assert_allclose(x[0], y[0])
+            assert_allclose(x[1], y[1])
+
+        # with analytic gradient
+        x0 = [2.0, 0.3]
+        analit = ScalarFunction(ex.fun, x0, (), ex.grad,
+                                ex.hess, None, (-np.inf, np.inf))
+
+        fg = ex.fun(x0), ex.grad(x0)
+        fg_allclose(analit.fun_and_grad(x0), fg)
+        assert analit.ngev == 1
+
+        x0[1] = 1.
+        fg = ex.fun(x0), ex.grad(x0)
+        fg_allclose(analit.fun_and_grad(x0), fg)
+
+        # with finite difference gradient
+        x0 = [2.0, 0.3]
+        sf = ScalarFunction(ex.fun, x0, (), '3-point',
+                                ex.hess, None, (-np.inf, np.inf))
+        assert sf.ngev == 1
+        fg = ex.fun(x0), ex.grad(x0)
+        fg_allclose(sf.fun_and_grad(x0), fg)
+        assert sf.ngev == 1
+
+        x0[1] = 1.
+        fg = ex.fun(x0), ex.grad(x0)
+        fg_allclose(sf.fun_and_grad(x0), fg)
+
+    def test_finite_difference_hess_linear_operator(self):
+        ex = ExScalarFunction()
+        nfev = 0
+        ngev = 0
+        nhev = 0
+
+        x0 = [1.0, 0.0]
+        analit = ScalarFunction(ex.fun, x0, (), ex.grad,
+                                ex.hess, None, (-np.inf, np.inf))
+        nfev += 1
+        ngev += 1
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev, nhev)
+        approx = ScalarFunction(ex.fun, x0, (), ex.grad,
+                                '2-point', None, (-np.inf, np.inf))
+        assert_(isinstance(approx.H, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_equal(analit.f, approx.f)
+            assert_array_almost_equal(analit.g, approx.g)
+            assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v))
+        nfev += 1
+        ngev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [2.0, 1.0]
+        H_analit = analit.hess(x)
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        H_approx = approx.hess(x)
+        assert_(isinstance(H_approx, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
+        ngev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [2.1, 1.2]
+        H_analit = analit.hess(x)
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        H_approx = approx.hess(x)
+        assert_(isinstance(H_approx, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
+        ngev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [2.5, 0.3]
+        _ = analit.grad(x)
+        H_analit = analit.hess(x)
+        ngev += 1
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        _ = approx.grad(x)
+        H_approx = approx.hess(x)
+        assert_(isinstance(H_approx, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
+        ngev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [5.2, 2.3]
+        _ = analit.grad(x)
+        H_analit = analit.hess(x)
+        ngev += 1
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        _ = approx.grad(x)
+        H_approx = approx.hess(x)
+        assert_(isinstance(H_approx, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
+        ngev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.ngev, ngev)
+        assert_array_equal(analit.ngev+approx.ngev, ngev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+    def test_x_storage_overlap(self):
+        # Scalar_Function should not store references to arrays, it should
+        # store copies - this checks that updating an array in-place causes
+        # Scalar_Function.x to be updated.
+
+        def f(x):
+            return np.sum(np.asarray(x) ** 2)
+
+        x = np.array([1., 2., 3.])
+        sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf))
+
+        assert x is not sf.x
+        assert_equal(sf.fun(x), 14.0)
+        assert x is not sf.x
+
+        x[0] = 0.
+        f1 = sf.fun(x)
+        assert_equal(f1, 13.0)
+
+        x[0] = 1
+        f2 = sf.fun(x)
+        assert_equal(f2, 14.0)
+        assert x is not sf.x
+
+        # now test with a HessianUpdate strategy specified
+        hess = BFGS()
+        x = np.array([1., 2., 3.])
+        sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf))
+
+        assert x is not sf.x
+        assert_equal(sf.fun(x), 14.0)
+        assert x is not sf.x
+
+        x[0] = 0.
+        f1 = sf.fun(x)
+        assert_equal(f1, 13.0)
+
+        x[0] = 1
+        f2 = sf.fun(x)
+        assert_equal(f2, 14.0)
+        assert x is not sf.x
+
+        # gh13740 x is changed in user function
+        def ff(x):
+            x *= x    # overwrite x
+            return np.sum(x)
+
+        x = np.array([1., 2., 3.])
+        sf = ScalarFunction(
+            ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)
+        )
+        assert x is not sf.x
+        assert_equal(sf.fun(x), 14.0)
+        assert_equal(sf.x, np.array([1., 2., 3.]))
+        assert x is not sf.x
+
+    def test_lowest_x(self):
+        # ScalarFunction should remember the lowest func(x) visited.
+        x0 = np.array([2, 3, 4])
+        sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess,
+                            None, None)
+        sf.fun([1, 1, 1])
+        sf.fun(x0)
+        sf.fun([1.01, 1, 1.0])
+        sf.grad([1.01, 1, 1.0])
+        assert_equal(sf._lowest_f, 0.0)
+        assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
+
+        sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess,
+                            None, (-np.inf, np.inf))
+        sf.fun([1, 1, 1])
+        sf.fun(x0)
+        sf.fun([1.01, 1, 1.0])
+        sf.grad([1.01, 1, 1.0])
+        assert_equal(sf._lowest_f, 0.0)
+        assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
+
+
+class ExVectorialFunction:
+
+    def __init__(self):
+        self.nfev = 0
+        self.njev = 0
+        self.nhev = 0
+
+    def fun(self, x):
+        self.nfev += 1
+        return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0],
+                         4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]])
+
+    def jac(self, x):
+        self.njev += 1
+        return np.array([[4*x[0]-1, 4*x[1]],
+                         [12*x[0]**2-3, 8*x[1]]])
+
+    def hess(self, x, v):
+        self.nhev += 1
+        return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0],
+                                                 [0, 8]])
+
+
+class TestVectorialFunction(TestCase):
+
+    def test_finite_difference_jac(self):
+        ex = ExVectorialFunction()
+        nfev = 0
+        njev = 0
+
+        x0 = [1.0, 0.0]
+        analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
+                                (-np.inf, np.inf), None)
+        nfev += 1
+        njev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev, njev)
+        approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None,
+                                (-np.inf, np.inf), None)
+        nfev += 3
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(analit.f, approx.f)
+        assert_array_almost_equal(analit.J, approx.J)
+
+        x = [10, 0.3]
+        f_analit = analit.fun(x)
+        J_analit = analit.jac(x)
+        nfev += 1
+        njev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        f_approx = approx.fun(x)
+        J_approx = approx.jac(x)
+        nfev += 3
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_almost_equal(f_analit, f_approx)
+        assert_array_almost_equal(J_analit, J_approx, decimal=4)
+
+        x = [2.0, 1.0]
+        J_analit = analit.jac(x)
+        njev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        J_approx = approx.jac(x)
+        nfev += 3
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_almost_equal(J_analit, J_approx)
+
+        x = [2.5, 0.3]
+        f_analit = analit.fun(x)
+        J_analit = analit.jac(x)
+        nfev += 1
+        njev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        f_approx = approx.fun(x)
+        J_approx = approx.jac(x)
+        nfev += 3
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_almost_equal(f_analit, f_approx)
+        assert_array_almost_equal(J_analit, J_approx)
+
+        x = [2, 0.3]
+        f_analit = analit.fun(x)
+        J_analit = analit.jac(x)
+        nfev += 1
+        njev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        f_approx = approx.fun(x)
+        J_approx = approx.jac(x)
+        nfev += 3
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_almost_equal(f_analit, f_approx)
+        assert_array_almost_equal(J_analit, J_approx)
+
+    def test_finite_difference_hess_linear_operator(self):
+        ex = ExVectorialFunction()
+        nfev = 0
+        njev = 0
+        nhev = 0
+
+        x0 = [1.0, 0.0]
+        v0 = [1.0, 2.0]
+        analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
+                                (-np.inf, np.inf), None)
+        nfev += 1
+        njev += 1
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev, nhev)
+        approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None,
+                                (-np.inf, np.inf), None)
+        assert_(isinstance(approx.H, LinearOperator))
+        for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_equal(analit.f, approx.f)
+            assert_array_almost_equal(analit.J, approx.J)
+            assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p))
+        nfev += 1
+        njev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [2.0, 1.0]
+        H_analit = analit.hess(x, v0)
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        H_approx = approx.hess(x, v0)
+        assert_(isinstance(H_approx, LinearOperator))
+        for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p),
+                                      decimal=5)
+        njev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [2.1, 1.2]
+        v = [1.0, 1.0]
+        H_analit = analit.hess(x, v)
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        H_approx = approx.hess(x, v)
+        assert_(isinstance(H_approx, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
+        njev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [2.5, 0.3]
+        _ = analit.jac(x)
+        H_analit = analit.hess(x, v0)
+        njev += 1
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        _ = approx.jac(x)
+        H_approx = approx.hess(x, v0)
+        assert_(isinstance(H_approx, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
+        njev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+        x = [5.2, 2.3]
+        v = [2.3, 5.2]
+        _ = analit.jac(x)
+        H_analit = analit.hess(x, v)
+        njev += 1
+        nhev += 1
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+        _ = approx.jac(x)
+        H_approx = approx.hess(x, v)
+        assert_(isinstance(H_approx, LinearOperator))
+        for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
+            assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
+        njev += 4
+        assert_array_equal(ex.nfev, nfev)
+        assert_array_equal(analit.nfev+approx.nfev, nfev)
+        assert_array_equal(ex.njev, njev)
+        assert_array_equal(analit.njev+approx.njev, njev)
+        assert_array_equal(ex.nhev, nhev)
+        assert_array_equal(analit.nhev+approx.nhev, nhev)
+
+    def test_x_storage_overlap(self):
+        # VectorFunction should not store references to arrays, it should
+        # store copies - this checks that updating an array in-place causes
+        # Scalar_Function.x to be updated.
+        ex = ExVectorialFunction()
+        x0 = np.array([1.0, 0.0])
+
+        vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None,
+                            (-np.inf, np.inf), None)
+
+        assert x0 is not vf.x
+        assert_equal(vf.fun(x0), ex.fun(x0))
+        assert x0 is not vf.x
+
+        x0[0] = 2.
+        assert_equal(vf.fun(x0), ex.fun(x0))
+        assert x0 is not vf.x
+
+        x0[0] = 1.
+        assert_equal(vf.fun(x0), ex.fun(x0))
+        assert x0 is not vf.x
+
+        # now test with a HessianUpdate strategy specified
+        hess = BFGS()
+        x0 = np.array([1.0, 0.0])
+        vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None,
+                            (-np.inf, np.inf), None)
+
+        with pytest.warns(UserWarning):
+            # filter UserWarning because ExVectorialFunction is linear and
+            # a quasi-Newton approximation is used for the Hessian.
+            assert x0 is not vf.x
+            assert_equal(vf.fun(x0), ex.fun(x0))
+            assert x0 is not vf.x
+
+            x0[0] = 2.
+            assert_equal(vf.fun(x0), ex.fun(x0))
+            assert x0 is not vf.x
+
+            x0[0] = 1.
+            assert_equal(vf.fun(x0), ex.fun(x0))
+            assert x0 is not vf.x
+
+
+def test_LinearVectorFunction():
+    A_dense = np.array([
+        [-1, 2, 0],
+        [0, 4, 2]
+    ])
+    x0 = np.zeros(3)
+    A_sparse = csr_matrix(A_dense)
+    x = np.array([1, -1, 0])
+    v = np.array([-1, 1])
+    Ax = np.array([-3, -4])
+
+    f1 = LinearVectorFunction(A_dense, x0, None)
+    assert_(not f1.sparse_jacobian)
+
+    f2 = LinearVectorFunction(A_dense, x0, True)
+    assert_(f2.sparse_jacobian)
+
+    f3 = LinearVectorFunction(A_dense, x0, False)
+    assert_(not f3.sparse_jacobian)
+
+    f4 = LinearVectorFunction(A_sparse, x0, None)
+    assert_(f4.sparse_jacobian)
+
+    f5 = LinearVectorFunction(A_sparse, x0, True)
+    assert_(f5.sparse_jacobian)
+
+    f6 = LinearVectorFunction(A_sparse, x0, False)
+    assert_(not f6.sparse_jacobian)
+
+    assert_array_equal(f1.fun(x), Ax)
+    assert_array_equal(f2.fun(x), Ax)
+    assert_array_equal(f1.jac(x), A_dense)
+    assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray())
+    assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
+
+
+def test_LinearVectorFunction_memoization():
+    A = np.array([[-1, 2, 0], [0, 4, 2]])
+    x0 = np.array([1, 2, -1])
+    fun = LinearVectorFunction(A, x0, False)
+
+    assert_array_equal(x0, fun.x)
+    assert_array_equal(A.dot(x0), fun.f)
+
+    x1 = np.array([-1, 3, 10])
+    assert_array_equal(A, fun.jac(x1))
+    assert_array_equal(x1, fun.x)
+    assert_array_equal(A.dot(x0), fun.f)
+    assert_array_equal(A.dot(x1), fun.fun(x1))
+    assert_array_equal(A.dot(x1), fun.f)
+
+
+def test_IdentityVectorFunction():
+    x0 = np.zeros(3)
+
+    f1 = IdentityVectorFunction(x0, None)
+    f2 = IdentityVectorFunction(x0, False)
+    f3 = IdentityVectorFunction(x0, True)
+
+    assert_(f1.sparse_jacobian)
+    assert_(not f2.sparse_jacobian)
+    assert_(f3.sparse_jacobian)
+
+    x = np.array([-1, 2, 1])
+    v = np.array([-2, 3, 0])
+
+    assert_array_equal(f1.fun(x), x)
+    assert_array_equal(f2.fun(x), x)
+
+    assert_array_equal(f1.jac(x).toarray(), np.eye(3))
+    assert_array_equal(f2.jac(x), np.eye(3))
+
+    assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_direct.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_direct.py
new file mode 100644
index 00000000..f131527d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_direct.py
@@ -0,0 +1,318 @@
+"""
+Unit test for DIRECT optimization algorithm.
+"""
+from numpy.testing import (assert_allclose,
+                           assert_array_less)
+import pytest
+import numpy as np
+from scipy.optimize import direct, Bounds
+
+
+class TestDIRECT:
+
+    def setup_method(self):
+        self.fun_calls = 0
+        self.bounds_sphere = 4*[(-2, 3)]
+        self.optimum_sphere_pos = np.zeros((4, ))
+        self.optimum_sphere = 0.0
+        self.bounds_stylinski_tang = Bounds([-4., -4.], [4., 4.])
+        self.maxiter = 1000
+
+    # test functions
+    def sphere(self, x):
+        self.fun_calls += 1
+        return np.square(x).sum()
+
+    def inv(self, x):
+        if np.sum(x) == 0:
+            raise ZeroDivisionError()
+        return 1/np.sum(x)
+
+    def nan_fun(self, x):
+        return np.nan
+
+    def inf_fun(self, x):
+        return np.inf
+
+    def styblinski_tang(self, pos):
+        x, y = pos
+        return 0.5 * (x**4 - 16 * x**2 + 5 * x + y**4 - 16 * y**2 + 5 * y)
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_direct(self, locally_biased):
+        res = direct(self.sphere, self.bounds_sphere,
+                     locally_biased=locally_biased)
+
+        # test accuracy
+        assert_allclose(res.x, self.optimum_sphere_pos,
+                        rtol=1e-3, atol=1e-3)
+        assert_allclose(res.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5)
+
+        # test that result lies within bounds
+        _bounds = np.asarray(self.bounds_sphere)
+        assert_array_less(_bounds[:, 0], res.x)
+        assert_array_less(res.x, _bounds[:, 1])
+
+        # test number of function evaluations. Original DIRECT overshoots by
+        # up to 500 evaluations in last iteration
+        assert res.nfev <= 1000 * (len(self.bounds_sphere) + 1)
+        # test that number of function evaluations is correct
+        assert res.nfev == self.fun_calls
+
+        # test that number of iterations is below supplied maximum
+        assert res.nit <= self.maxiter
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_direct_callback(self, locally_biased):
+        # test that callback does not change the result
+        res = direct(self.sphere, self.bounds_sphere,
+                     locally_biased=locally_biased)
+
+        def callback(x):
+            x = 2*x
+            dummy = np.square(x)
+            print("DIRECT minimization algorithm callback test")
+            return dummy
+
+        res_callback = direct(self.sphere, self.bounds_sphere,
+                              locally_biased=locally_biased,
+                              callback=callback)
+
+        assert_allclose(res.x, res_callback.x)
+
+        assert res.nit == res_callback.nit
+        assert res.nfev == res_callback.nfev
+        assert res.status == res_callback.status
+        assert res.success == res_callback.success
+        assert res.fun == res_callback.fun
+        assert_allclose(res.x, res_callback.x)
+        assert res.message == res_callback.message
+
+        # test accuracy
+        assert_allclose(res_callback.x, self.optimum_sphere_pos,
+                        rtol=1e-3, atol=1e-3)
+        assert_allclose(res_callback.fun, self.optimum_sphere,
+                        atol=1e-5, rtol=1e-5)
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_exception(self, locally_biased):
+        bounds = 4*[(-10, 10)]
+        with pytest.raises(ZeroDivisionError):
+            direct(self.inv, bounds=bounds,
+                   locally_biased=locally_biased)
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_nan(self, locally_biased):
+        bounds = 4*[(-10, 10)]
+        direct(self.nan_fun, bounds=bounds,
+               locally_biased=locally_biased)
+
+    @pytest.mark.parametrize("len_tol", [1e-3, 1e-4])
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_len_tol(self, len_tol, locally_biased):
+        bounds = 4*[(-10., 10.)]
+        res = direct(self.sphere, bounds=bounds, len_tol=len_tol,
+                     vol_tol=1e-30, locally_biased=locally_biased)
+        assert res.status == 5
+        assert res.success
+        assert_allclose(res.x, np.zeros((4, )))
+        message = ("The side length measure of the hyperrectangle containing "
+                   "the lowest function value found is below "
+                   f"len_tol={len_tol}")
+        assert res.message == message
+
+    @pytest.mark.parametrize("vol_tol", [1e-6, 1e-8])
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_vol_tol(self, vol_tol, locally_biased):
+        bounds = 4*[(-10., 10.)]
+        res = direct(self.sphere, bounds=bounds, vol_tol=vol_tol,
+                     len_tol=0., locally_biased=locally_biased)
+        assert res.status == 4
+        assert res.success
+        assert_allclose(res.x, np.zeros((4, )))
+        message = ("The volume of the hyperrectangle containing the lowest "
+                   f"function value found is below vol_tol={vol_tol}")
+        assert res.message == message
+
+    @pytest.mark.parametrize("f_min_rtol", [1e-3, 1e-5, 1e-7])
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_f_min(self, f_min_rtol, locally_biased):
+        # test that desired function value is reached within
+        # relative tolerance of f_min_rtol
+        f_min = 1.
+        bounds = 4*[(-2., 10.)]
+        res = direct(self.sphere, bounds=bounds, f_min=f_min,
+                     f_min_rtol=f_min_rtol,
+                     locally_biased=locally_biased)
+        assert res.status == 3
+        assert res.success
+        assert res.fun < f_min * (1. + f_min_rtol)
+        message = ("The best function value found is within a relative "
+                   f"error={f_min_rtol} of the (known) global optimum f_min")
+        assert res.message == message
+
+    def circle_with_args(self, x, a, b):
+        return np.square(x[0] - a) + np.square(x[1] - b).sum()
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_f_circle_with_args(self, locally_biased):
+        bounds = 2*[(-2.0, 2.0)]
+
+        res = direct(self.circle_with_args, bounds, args=(1, 1), maxfun=1250,
+                     locally_biased=locally_biased)
+        assert_allclose(res.x, np.array([1., 1.]), rtol=1e-5)
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_failure_maxfun(self, locally_biased):
+        # test that if optimization runs for the maximal number of
+        # evaluations, success = False is returned
+
+        maxfun = 100
+        result = direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                        maxfun=maxfun, locally_biased=locally_biased)
+        assert result.success is False
+        assert result.status == 1
+        assert result.nfev >= maxfun
+        message = ("Number of function evaluations done is "
+                   f"larger than maxfun={maxfun}")
+        assert result.message == message
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_failure_maxiter(self, locally_biased):
+        # test that if optimization runs for the maximal number of
+        # iterations, success = False is returned
+
+        maxiter = 10
+        result = direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                        maxiter=maxiter, locally_biased=locally_biased)
+        assert result.success is False
+        assert result.status == 2
+        assert result.nit >= maxiter
+        message = f"Number of iterations is larger than maxiter={maxiter}"
+        assert result.message == message
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_bounds_variants(self, locally_biased):
+        # test that new and old bounds yield same result
+
+        lb = [-6., 1., -5.]
+        ub = [-1., 3., 5.]
+        x_opt = np.array([-1., 1., 0.])
+        bounds_old = list(zip(lb, ub))
+        bounds_new = Bounds(lb, ub)
+
+        res_old_bounds = direct(self.sphere, bounds_old,
+                                locally_biased=locally_biased)
+        res_new_bounds = direct(self.sphere, bounds_new,
+                                locally_biased=locally_biased)
+
+        assert res_new_bounds.nfev == res_old_bounds.nfev
+        assert res_new_bounds.message == res_old_bounds.message
+        assert res_new_bounds.success == res_old_bounds.success
+        assert res_new_bounds.nit == res_old_bounds.nit
+        assert_allclose(res_new_bounds.x, res_old_bounds.x)
+        assert_allclose(res_new_bounds.x, x_opt, rtol=1e-2)
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    @pytest.mark.parametrize("eps", [1e-5, 1e-4, 1e-3])
+    def test_epsilon(self, eps, locally_biased):
+        result = direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                        eps=eps, vol_tol=1e-6,
+                        locally_biased=locally_biased)
+        assert result.status == 4
+        assert result.success
+
+    @pytest.mark.xslow
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_no_segmentation_fault(self, locally_biased):
+        # test that an excessive number of function evaluations
+        # does not result in segmentation fault
+        bounds = [(-5., 20.)] * 100
+        result = direct(self.sphere, bounds, maxfun=10000000,
+                        maxiter=1000000, locally_biased=locally_biased)
+        assert result is not None
+
+    @pytest.mark.parametrize("locally_biased", [True, False])
+    def test_inf_fun(self, locally_biased):
+        # test that an objective value of infinity does not crash DIRECT
+        bounds = [(-5., 5.)] * 2
+        result = direct(self.inf_fun, bounds,
+                        locally_biased=locally_biased)
+        assert result is not None
+
+    @pytest.mark.parametrize("len_tol", [-1, 2])
+    def test_len_tol_validation(self, len_tol):
+        error_msg = "len_tol must be between 0 and 1."
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   len_tol=len_tol)
+
+    @pytest.mark.parametrize("vol_tol", [-1, 2])
+    def test_vol_tol_validation(self, vol_tol):
+        error_msg = "vol_tol must be between 0 and 1."
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   vol_tol=vol_tol)
+
+    @pytest.mark.parametrize("f_min_rtol", [-1, 2])
+    def test_fmin_rtol_validation(self, f_min_rtol):
+        error_msg = "f_min_rtol must be between 0 and 1."
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   f_min_rtol=f_min_rtol, f_min=0.)
+
+    @pytest.mark.parametrize("maxfun", [1.5, "string", (1, 2)])
+    def test_maxfun_wrong_type(self, maxfun):
+        error_msg = "maxfun must be of type int."
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   maxfun=maxfun)
+
+    @pytest.mark.parametrize("maxiter", [1.5, "string", (1, 2)])
+    def test_maxiter_wrong_type(self, maxiter):
+        error_msg = "maxiter must be of type int."
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   maxiter=maxiter)
+
+    def test_negative_maxiter(self):
+        error_msg = "maxiter must be > 0."
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   maxiter=-1)
+
+    def test_negative_maxfun(self):
+        error_msg = "maxfun must be > 0."
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   maxfun=-1)
+
+    @pytest.mark.parametrize("bounds", ["bounds", 2., 0])
+    def test_invalid_bounds_type(self, bounds):
+        error_msg = ("bounds must be a sequence or "
+                     "instance of Bounds class")
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, bounds)
+
+    @pytest.mark.parametrize("bounds",
+                             [Bounds([-1., -1], [-2, 1]),
+                              Bounds([-np.nan, -1], [-2, np.nan]),
+                              ]
+                             )
+    def test_incorrect_bounds(self, bounds):
+        error_msg = 'Bounds are not consistent min < max'
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, bounds)
+
+    def test_inf_bounds(self):
+        error_msg = 'Bounds must not be inf.'
+        bounds = Bounds([-np.inf, -1], [-2, np.inf])
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, bounds)
+
+    @pytest.mark.parametrize("locally_biased", ["bias", [0, 0], 2.])
+    def test_locally_biased_validation(self, locally_biased):
+        error_msg = 'locally_biased must be True or False.'
+        with pytest.raises(ValueError, match=error_msg):
+            direct(self.styblinski_tang, self.bounds_stylinski_tang,
+                   locally_biased=locally_biased)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_hessian_update_strategy.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_hessian_update_strategy.py
new file mode 100644
index 00000000..39593aa9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_hessian_update_strategy.py
@@ -0,0 +1,208 @@
+import numpy as np
+from copy import deepcopy
+from numpy.linalg import norm
+from numpy.testing import (TestCase, assert_array_almost_equal,
+                           assert_array_equal, assert_array_less)
+from scipy.optimize import (BFGS, SR1)
+
+
+class Rosenbrock:
+    """Rosenbrock function.
+
+    The following optimization problem:
+        minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
+    """
+
+    def __init__(self, n=2, random_state=0):
+        rng = np.random.RandomState(random_state)
+        self.x0 = rng.uniform(-1, 1, n)
+        self.x_opt = np.ones(n)
+
+    def fun(self, x):
+        x = np.asarray(x)
+        r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
+                   axis=0)
+        return r
+
+    def grad(self, x):
+        x = np.asarray(x)
+        xm = x[1:-1]
+        xm_m1 = x[:-2]
+        xm_p1 = x[2:]
+        der = np.zeros_like(x)
+        der[1:-1] = (200 * (xm - xm_m1**2) -
+                     400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
+        der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
+        der[-1] = 200 * (x[-1] - x[-2]**2)
+        return der
+
+    def hess(self, x):
+        x = np.atleast_1d(x)
+        H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
+        diagonal = np.zeros(len(x), dtype=x.dtype)
+        diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
+        diagonal[-1] = 200
+        diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
+        H = H + np.diag(diagonal)
+        return H
+
+
+class TestHessianUpdateStrategy(TestCase):
+
+    def test_hessian_initialization(self):
+        quasi_newton = (BFGS(), SR1())
+
+        for qn in quasi_newton:
+            qn.initialize(5, 'hess')
+            B = qn.get_matrix()
+
+            assert_array_equal(B, np.eye(5))
+
+    # For this list of points, it is known
+    # that no exception occur during the
+    # Hessian update. Hence no update is
+    # skiped or damped.
+    def test_rosenbrock_with_no_exception(self):
+        # Define auxiliar problem
+        prob = Rosenbrock(n=5)
+        # Define iteration points
+        x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
+                  [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
+                  [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
+                  [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
+                  [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
+                  [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
+                  [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
+                  [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
+                  [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
+                  [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
+                  [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
+                  [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
+                  [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
+                  [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
+                  [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
+                  [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
+                  [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
+                  [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
+                  [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338],
+                  [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691],
+                  [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041],
+                  [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744],
+                  [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623],
+                  [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448],
+                  [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437],
+                  [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581],
+                  [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553],
+                  [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149],
+                  [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663],
+                  [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288],
+                  [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356],
+                  [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912],
+                  [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305],
+                  [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047],
+                  [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297],
+                  [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032],
+                  [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786],
+                  [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]]
+        # Get iteration points
+        grad_list = [prob.grad(x) for x in x_list]
+        delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
+                   for i in range(len(x_list)-1)]
+        delta_grad = [grad_list[i+1]-grad_list[i]
+                      for i in range(len(grad_list)-1)]
+        # Check curvature condition
+        for s, y in zip(delta_x, delta_grad):
+            if np.dot(s, y) <= 0:
+                raise ArithmeticError()
+        # Define QuasiNewton update
+        for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4),
+                             SR1(init_scale=1)):
+            hess = deepcopy(quasi_newton)
+            inv_hess = deepcopy(quasi_newton)
+            hess.initialize(len(x_list[0]), 'hess')
+            inv_hess.initialize(len(x_list[0]), 'inv_hess')
+            # Compare the hessian and its inverse
+            for s, y in zip(delta_x, delta_grad):
+                hess.update(s, y)
+                inv_hess.update(s, y)
+                B = hess.get_matrix()
+                H = inv_hess.get_matrix()
+                assert_array_almost_equal(np.linalg.inv(B), H, decimal=10)
+            B_true = prob.hess(x_list[len(delta_x)])
+            assert_array_less(norm(B - B_true)/norm(B_true), 0.1)
+
+    def test_SR1_skip_update(self):
+        # Define auxiliary problem
+        prob = Rosenbrock(n=5)
+        # Define iteration points
+        x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
+                  [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
+                  [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
+                  [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
+                  [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
+                  [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
+                  [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
+                  [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
+                  [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
+                  [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
+                  [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
+                  [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
+                  [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
+                  [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
+                  [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
+                  [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
+                  [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
+                  [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
+                  [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]]
+        # Get iteration points
+        grad_list = [prob.grad(x) for x in x_list]
+        delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
+                   for i in range(len(x_list)-1)]
+        delta_grad = [grad_list[i+1]-grad_list[i]
+                      for i in range(len(grad_list)-1)]
+        hess = SR1(init_scale=1, min_denominator=1e-2)
+        hess.initialize(len(x_list[0]), 'hess')
+        # Compare the Hessian and its inverse
+        for i in range(len(delta_x)-1):
+            s = delta_x[i]
+            y = delta_grad[i]
+            hess.update(s, y)
+        # Test skip update
+        B = np.copy(hess.get_matrix())
+        s = delta_x[17]
+        y = delta_grad[17]
+        hess.update(s, y)
+        B_updated = np.copy(hess.get_matrix())
+        assert_array_equal(B, B_updated)
+
+    def test_BFGS_skip_update(self):
+        # Define auxiliar problem
+        prob = Rosenbrock(n=5)
+        # Define iteration points
+        x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
+                  [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
+                  [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
+                  [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
+                  [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
+                  [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
+                  [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]]
+        # Get iteration points
+        grad_list = [prob.grad(x) for x in x_list]
+        delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
+                   for i in range(len(x_list)-1)]
+        delta_grad = [grad_list[i+1]-grad_list[i]
+                      for i in range(len(grad_list)-1)]
+        hess = BFGS(init_scale=1, min_curvature=10)
+        hess.initialize(len(x_list[0]), 'hess')
+        # Compare the Hessian and its inverse
+        for i in range(len(delta_x)-1):
+            s = delta_x[i]
+            y = delta_grad[i]
+            hess.update(s, y)
+        # Test skip update
+        B = np.copy(hess.get_matrix())
+        s = delta_x[5]
+        y = delta_grad[5]
+        hess.update(s, y)
+        B_updated = np.copy(hess.get_matrix())
+        assert_array_equal(B, B_updated)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_hessinv.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_hessinv.py
new file mode 100644
index 00000000..8e4452cd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_hessinv.py
@@ -0,0 +1,43 @@
+import numpy as np
+from numpy.testing import assert_allclose
+import scipy.linalg
+from scipy.optimize import minimize
+
+
+def test_1():
+    def f(x):
+        return x**4, 4*x**3
+
+    for gtol in [1e-8, 1e-12, 1e-20]:
+        for maxcor in range(20, 35):
+            result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20,
+                options={'gtol': gtol, 'maxcor': maxcor})
+
+            H1 = result.hess_inv(np.array([1])).reshape(1,1)
+            H2 = result.hess_inv.todense()
+
+            assert_allclose(H1, H2)
+
+
+def test_2():
+    H0 = [[3, 0], [1, 2]]
+
+    def f(x):
+        return np.dot(x, np.dot(scipy.linalg.inv(H0), x))
+
+    result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20])
+    result2 = minimize(fun=f, method='BFGS', x0=[10, 20])
+
+    H1 = result1.hess_inv.todense()
+
+    H2 = np.vstack((
+        result1.hess_inv(np.array([1, 0])),
+        result1.hess_inv(np.array([0, 1]))))
+
+    assert_allclose(
+        result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1),
+        result1.hess_inv(np.array([1, 0])))
+    assert_allclose(H1, H2)
+    assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03)
+
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_setulb.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_setulb.py
new file mode 100644
index 00000000..3d9fc254
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lbfgsb_setulb.py
@@ -0,0 +1,116 @@
+import numpy as np
+from scipy.optimize import _lbfgsb
+
+
+def objfun(x):
+    """simplified objective func to test lbfgsb bound violation"""
+    x0 = [0.8750000000000278,
+          0.7500000000000153,
+          0.9499999999999722,
+          0.8214285714285992,
+          0.6363636363636085]
+    x1 = [1.0, 0.0, 1.0, 0.0, 0.0]
+    x2 = [1.0,
+          0.0,
+          0.9889733043149325,
+          0.0,
+          0.026353554421041155]
+    x3 = [1.0,
+          0.0,
+          0.9889917442915558,
+          0.0,
+          0.020341986743231205]
+
+    f0 = 5163.647901211178
+    f1 = 5149.8181642072905
+    f2 = 5149.379332309634
+    f3 = 5149.374490771297
+
+    g0 = np.array([-0.5934820547965749,
+                   1.6251549718258351,
+                   -71.99168459202559,
+                   5.346636965797545,
+                   37.10732723092604])
+    g1 = np.array([-0.43295349282641515,
+                   1.008607936794592,
+                   18.223666726602975,
+                   31.927010036981997,
+                   -19.667512518739386])
+    g2 = np.array([-0.4699874455100256,
+                   0.9466285353668347,
+                   -0.016874360242016825,
+                   48.44999161133457,
+                   5.819631620590712])
+    g3 = np.array([-0.46970678696829116,
+                   0.9612719312174818,
+                   0.006129809488833699,
+                   48.43557729419473,
+                   6.005481418498221])
+
+    if np.allclose(x, x0):
+        f = f0
+        g = g0
+    elif np.allclose(x, x1):
+        f = f1
+        g = g1
+    elif np.allclose(x, x2):
+        f = f2
+        g = g2
+    elif np.allclose(x, x3):
+        f = f3
+        g = g3
+    else:
+        raise ValueError(
+            'Simplified objective function not defined '
+            'at requested point')
+    return (np.copy(f), np.copy(g))
+
+
+def test_setulb_floatround():
+    """test if setulb() violates bounds
+
+    checks for violation due to floating point rounding error
+    """
+
+    n = 5
+    m = 10
+    factr = 1e7
+    pgtol = 1e-5
+    maxls = 20
+    iprint = -1
+    nbd = np.full((n,), 2)
+    low_bnd = np.zeros(n, np.float64)
+    upper_bnd = np.ones(n, np.float64)
+
+    x0 = np.array(
+        [0.8750000000000278,
+         0.7500000000000153,
+         0.9499999999999722,
+         0.8214285714285992,
+         0.6363636363636085])
+    x = np.copy(x0)
+
+    f = np.array(0.0, np.float64)
+    g = np.zeros(n, np.float64)
+
+    fortran_int = _lbfgsb.types.intvar.dtype
+
+    wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, np.float64)
+    iwa = np.zeros(3*n, fortran_int)
+    task = np.zeros(1, 'S60')
+    csave = np.zeros(1, 'S60')
+    lsave = np.zeros(4, fortran_int)
+    isave = np.zeros(44, fortran_int)
+    dsave = np.zeros(29, np.float64)
+
+    task[:] = b'START'
+
+    for n_iter in range(7):  # 7 steps required to reproduce error
+        f, g = objfun(x)
+
+        _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
+                       pgtol, wa, iwa, task, iprint, csave, lsave,
+                       isave, dsave, maxls)
+
+        assert (x <= upper_bnd).all() and (x >= low_bnd).all(), (
+            "_lbfgsb.setulb() stepped to a point outside of the bounds")
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_least_squares.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_least_squares.py
new file mode 100644
index 00000000..d419a828
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_least_squares.py
@@ -0,0 +1,811 @@
+from itertools import product
+
+import numpy as np
+from numpy.linalg import norm
+from numpy.testing import (assert_, assert_allclose,
+                           assert_equal, suppress_warnings)
+from pytest import raises as assert_raises
+from scipy.sparse import issparse, lil_matrix
+from scipy.sparse.linalg import aslinearoperator
+
+from scipy.optimize import least_squares, Bounds
+from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
+from scipy.optimize._lsq.common import EPS, make_strictly_feasible
+
+
+def fun_trivial(x, a=0):
+    return (x - a)**2 + 5.0
+
+
+def jac_trivial(x, a=0.0):
+    return 2 * (x - a)
+
+
+def fun_2d_trivial(x):
+    return np.array([x[0], x[1]])
+
+
+def jac_2d_trivial(x):
+    return np.identity(2)
+
+
+def fun_rosenbrock(x):
+    return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
+
+
+def jac_rosenbrock(x):
+    return np.array([
+        [-20 * x[0], 10],
+        [-1, 0]
+    ])
+
+
+def jac_rosenbrock_bad_dim(x):
+    return np.array([
+        [-20 * x[0], 10],
+        [-1, 0],
+        [0.0, 0.0]
+    ])
+
+
+def fun_rosenbrock_cropped(x):
+    return fun_rosenbrock(x)[0]
+
+
+def jac_rosenbrock_cropped(x):
+    return jac_rosenbrock(x)[0]
+
+
+# When x is 1-D array, return is 2-D array.
+def fun_wrong_dimensions(x):
+    return np.array([x, x**2, x**3])
+
+
+def jac_wrong_dimensions(x, a=0.0):
+    return np.atleast_3d(jac_trivial(x, a=a))
+
+
+def fun_bvp(x):
+    n = int(np.sqrt(x.shape[0]))
+    u = np.zeros((n + 2, n + 2))
+    x = x.reshape((n, n))
+    u[1:-1, 1:-1] = x
+    y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
+    return y.ravel()
+
+
+class BroydenTridiagonal:
+    def __init__(self, n=100, mode='sparse'):
+        np.random.seed(0)
+
+        self.n = n
+
+        self.x0 = -np.ones(n)
+        self.lb = np.linspace(-2, -1.5, n)
+        self.ub = np.linspace(-0.8, 0.0, n)
+
+        self.lb += 0.1 * np.random.randn(n)
+        self.ub += 0.1 * np.random.randn(n)
+
+        self.x0 += 0.1 * np.random.randn(n)
+        self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
+
+        if mode == 'sparse':
+            self.sparsity = lil_matrix((n, n), dtype=int)
+            i = np.arange(n)
+            self.sparsity[i, i] = 1
+            i = np.arange(1, n)
+            self.sparsity[i, i - 1] = 1
+            i = np.arange(n - 1)
+            self.sparsity[i, i + 1] = 1
+
+            self.jac = self._jac
+        elif mode == 'operator':
+            self.jac = lambda x: aslinearoperator(self._jac(x))
+        elif mode == 'dense':
+            self.sparsity = None
+            self.jac = lambda x: self._jac(x).toarray()
+        else:
+            assert_(False)
+
+    def fun(self, x):
+        f = (3 - x) * x + 1
+        f[1:] -= x[:-1]
+        f[:-1] -= 2 * x[1:]
+        return f
+
+    def _jac(self, x):
+        J = lil_matrix((self.n, self.n))
+        i = np.arange(self.n)
+        J[i, i] = 3 - 2 * x
+        i = np.arange(1, self.n)
+        J[i, i - 1] = -1
+        i = np.arange(self.n - 1)
+        J[i, i + 1] = -2
+        return J
+
+
+class ExponentialFittingProblem:
+    """Provide data and function for exponential fitting in the form
+    y = a + exp(b * x) + noise."""
+
+    def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
+                 n_points=11, random_seed=None):
+        np.random.seed(random_seed)
+        self.m = n_points
+        self.n = 2
+
+        self.p0 = np.zeros(2)
+        self.x = np.linspace(x_range[0], x_range[1], n_points)
+
+        self.y = a + np.exp(b * self.x)
+        self.y += noise * np.random.randn(self.m)
+
+        outliers = np.random.randint(0, self.m, n_outliers)
+        self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
+
+        self.p_opt = np.array([a, b])
+
+    def fun(self, p):
+        return p[0] + np.exp(p[1] * self.x) - self.y
+
+    def jac(self, p):
+        J = np.empty((self.m, self.n))
+        J[:, 0] = 1
+        J[:, 1] = self.x * np.exp(p[1] * self.x)
+        return J
+
+
+def cubic_soft_l1(z):
+    rho = np.empty((3, z.size))
+
+    t = 1 + z
+    rho[0] = 3 * (t**(1/3) - 1)
+    rho[1] = t ** (-2/3)
+    rho[2] = -2/3 * t**(-5/3)
+
+    return rho
+
+
+LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
+
+
+class BaseMixin:
+    def test_basic(self):
+        # Test that the basic calling sequence works.
+        res = least_squares(fun_trivial, 2., method=self.method)
+        assert_allclose(res.x, 0, atol=1e-4)
+        assert_allclose(res.fun, fun_trivial(res.x))
+
+    def test_args_kwargs(self):
+        # Test that args and kwargs are passed correctly to the functions.
+        a = 3.0
+        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
+            with suppress_warnings() as sup:
+                sup.filter(UserWarning,
+                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
+                res = least_squares(fun_trivial, 2.0, jac, args=(a,),
+                                    method=self.method)
+                res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
+                                    method=self.method)
+
+            assert_allclose(res.x, a, rtol=1e-4)
+            assert_allclose(res1.x, a, rtol=1e-4)
+
+            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
+                          args=(3, 4,), method=self.method)
+            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
+                          kwargs={'kaboom': 3}, method=self.method)
+
+    def test_jac_options(self):
+        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
+            with suppress_warnings() as sup:
+                sup.filter(UserWarning,
+                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
+                res = least_squares(fun_trivial, 2.0, jac, method=self.method)
+            assert_allclose(res.x, 0, atol=1e-4)
+
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
+                      method=self.method)
+
+    def test_nfev_options(self):
+        for max_nfev in [None, 20]:
+            res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
+                                method=self.method)
+            assert_allclose(res.x, 0, atol=1e-4)
+
+    def test_x_scale_options(self):
+        for x_scale in [1.0, np.array([0.5]), 'jac']:
+            res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
+            assert_allclose(res.x, 0)
+        assert_raises(ValueError, least_squares, fun_trivial,
+                      2.0, x_scale='auto', method=self.method)
+        assert_raises(ValueError, least_squares, fun_trivial,
+                      2.0, x_scale=-1.0, method=self.method)
+        assert_raises(ValueError, least_squares, fun_trivial,
+                      2.0, x_scale=None, method=self.method)
+        assert_raises(ValueError, least_squares, fun_trivial,
+                      2.0, x_scale=1.0+2.0j, method=self.method)
+
+    def test_diff_step(self):
+        # res1 and res2 should be equivalent.
+        # res2 and res3 should be different.
+        res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
+                             method=self.method)
+        res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
+                             method=self.method)
+        res3 = least_squares(fun_trivial, 2.0,
+                             diff_step=None, method=self.method)
+        assert_allclose(res1.x, 0, atol=1e-4)
+        assert_allclose(res2.x, 0, atol=1e-4)
+        assert_allclose(res3.x, 0, atol=1e-4)
+        assert_equal(res1.x, res2.x)
+        assert_equal(res1.nfev, res2.nfev)
+
+    def test_incorrect_options_usage(self):
+        assert_raises(TypeError, least_squares, fun_trivial, 2.0,
+                      method=self.method, options={'no_such_option': 100})
+        assert_raises(TypeError, least_squares, fun_trivial, 2.0,
+                      method=self.method, options={'max_nfev': 100})
+
+    def test_full_result(self):
+        # MINPACK doesn't work very well with factor=100 on this problem,
+        # thus using low 'atol'.
+        res = least_squares(fun_trivial, 2.0, method=self.method)
+        assert_allclose(res.x, 0, atol=1e-4)
+        assert_allclose(res.cost, 12.5)
+        assert_allclose(res.fun, 5)
+        assert_allclose(res.jac, 0, atol=1e-4)
+        assert_allclose(res.grad, 0, atol=1e-2)
+        assert_allclose(res.optimality, 0, atol=1e-2)
+        assert_equal(res.active_mask, 0)
+        if self.method == 'lm':
+            assert_(res.nfev < 30)
+            assert_(res.njev is None)
+        else:
+            assert_(res.nfev < 10)
+            assert_(res.njev < 10)
+        assert_(res.status > 0)
+        assert_(res.success)
+
+    def test_full_result_single_fev(self):
+        # MINPACK checks the number of nfev after the iteration,
+        # so it's hard to tell what he is going to compute.
+        if self.method == 'lm':
+            return
+
+        res = least_squares(fun_trivial, 2.0, method=self.method,
+                            max_nfev=1)
+        assert_equal(res.x, np.array([2]))
+        assert_equal(res.cost, 40.5)
+        assert_equal(res.fun, np.array([9]))
+        assert_equal(res.jac, np.array([[4]]))
+        assert_equal(res.grad, np.array([36]))
+        assert_equal(res.optimality, 36)
+        assert_equal(res.active_mask, np.array([0]))
+        assert_equal(res.nfev, 1)
+        assert_equal(res.njev, 1)
+        assert_equal(res.status, 0)
+        assert_equal(res.success, 0)
+
+    def test_rosenbrock(self):
+        x0 = [-2, 1]
+        x_opt = [1, 1]
+        for jac, x_scale, tr_solver in product(
+                ['2-point', '3-point', 'cs', jac_rosenbrock],
+                [1.0, np.array([1.0, 0.2]), 'jac'],
+                ['exact', 'lsmr']):
+            with suppress_warnings() as sup:
+                sup.filter(UserWarning,
+                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
+                res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
+                                    tr_solver=tr_solver, method=self.method)
+            assert_allclose(res.x, x_opt)
+
+    def test_rosenbrock_cropped(self):
+        x0 = [-2, 1]
+        if self.method == 'lm':
+            assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
+                          x0, method='lm')
+        else:
+            for jac, x_scale, tr_solver in product(
+                    ['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
+                    [1.0, np.array([1.0, 0.2]), 'jac'],
+                    ['exact', 'lsmr']):
+                res = least_squares(
+                    fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
+                    tr_solver=tr_solver, method=self.method)
+                assert_allclose(res.cost, 0, atol=1e-14)
+
+    def test_fun_wrong_dimensions(self):
+        assert_raises(ValueError, least_squares, fun_wrong_dimensions,
+                      2.0, method=self.method)
+
+    def test_jac_wrong_dimensions(self):
+        assert_raises(ValueError, least_squares, fun_trivial,
+                      2.0, jac_wrong_dimensions, method=self.method)
+
+    def test_fun_and_jac_inconsistent_dimensions(self):
+        x0 = [1, 2]
+        assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
+                      jac_rosenbrock_bad_dim, method=self.method)
+
+    def test_x0_multidimensional(self):
+        x0 = np.ones(4).reshape(2, 2)
+        assert_raises(ValueError, least_squares, fun_trivial, x0,
+                      method=self.method)
+
+    def test_x0_complex_scalar(self):
+        x0 = 2.0 + 0.0*1j
+        assert_raises(ValueError, least_squares, fun_trivial, x0,
+                      method=self.method)
+
+    def test_x0_complex_array(self):
+        x0 = [1.0, 2.0 + 0.0*1j]
+        assert_raises(ValueError, least_squares, fun_trivial, x0,
+                      method=self.method)
+
+    def test_bvp(self):
+        # This test was introduced with fix #5556. It turned out that
+        # dogbox solver had a bug with trust-region radius update, which
+        # could block its progress and create an infinite loop. And this
+        # discrete boundary value problem is the one which triggers it.
+        n = 10
+        x0 = np.ones(n**2)
+        if self.method == 'lm':
+            max_nfev = 5000  # To account for Jacobian estimation.
+        else:
+            max_nfev = 100
+        res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
+                            max_nfev=max_nfev)
+
+        assert_(res.nfev < max_nfev)
+        assert_(res.cost < 0.5)
+
+    def test_error_raised_when_all_tolerances_below_eps(self):
+        # Test that all 0 tolerances are not allowed.
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
+                      method=self.method, ftol=None, xtol=None, gtol=None)
+
+    def test_convergence_with_only_one_tolerance_enabled(self):
+        if self.method == 'lm':
+            return  # should not do test
+        x0 = [-2, 1]
+        x_opt = [1, 1]
+        for ftol, xtol, gtol in [(1e-8, None, None),
+                                  (None, 1e-8, None),
+                                  (None, None, 1e-8)]:
+            res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
+                                ftol=ftol, gtol=gtol, xtol=xtol,
+                                method=self.method)
+            assert_allclose(res.x, x_opt)
+
+
+class BoundsMixin:
+    def test_inconsistent(self):
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
+                      bounds=(10.0, 0.0), method=self.method)
+
+    def test_infeasible(self):
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
+                      bounds=(3., 4), method=self.method)
+
+    def test_wrong_number(self):
+        assert_raises(ValueError, least_squares, fun_trivial, 2.,
+                      bounds=(1., 2, 3), method=self.method)
+
+    def test_inconsistent_shape(self):
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
+                      bounds=(1.0, [2.0, 3.0]), method=self.method)
+        # 1-D array wont't be broadcasted
+        assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
+                      bounds=([0.0], [3.0, 4.0]), method=self.method)
+
+    def test_in_bounds(self):
+        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
+            res = least_squares(fun_trivial, 2.0, jac=jac,
+                                bounds=(-1.0, 3.0), method=self.method)
+            assert_allclose(res.x, 0.0, atol=1e-4)
+            assert_equal(res.active_mask, [0])
+            assert_(-1 <= res.x <= 3)
+            res = least_squares(fun_trivial, 2.0, jac=jac,
+                                bounds=(0.5, 3.0), method=self.method)
+            assert_allclose(res.x, 0.5, atol=1e-4)
+            assert_equal(res.active_mask, [-1])
+            assert_(0.5 <= res.x <= 3)
+
+    def test_bounds_shape(self):
+        def get_bounds_direct(lb, ub):
+            return lb, ub
+
+        def get_bounds_instances(lb, ub):
+            return Bounds(lb, ub)
+
+        for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
+            for bounds_func in [get_bounds_direct, get_bounds_instances]:
+                x0 = [1.0, 1.0]
+                res = least_squares(fun_2d_trivial, x0, jac=jac)
+                assert_allclose(res.x, [0.0, 0.0])
+                res = least_squares(fun_2d_trivial, x0, jac=jac,
+                                    bounds=bounds_func(0.5, [2.0, 2.0]),
+                                    method=self.method)
+                assert_allclose(res.x, [0.5, 0.5])
+                res = least_squares(fun_2d_trivial, x0, jac=jac,
+                                    bounds=bounds_func([0.3, 0.2], 3.0),
+                                    method=self.method)
+                assert_allclose(res.x, [0.3, 0.2])
+                res = least_squares(
+                    fun_2d_trivial, x0, jac=jac,
+                    bounds=bounds_func([-1, 0.5], [1.0, 3.0]),
+                    method=self.method)
+                assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
+
+    def test_bounds_instances(self):
+        res = least_squares(fun_trivial, 0.5, bounds=Bounds())
+        assert_allclose(res.x, 0.0, atol=1e-4)
+
+        res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0))
+        assert_allclose(res.x, 1.0, atol=1e-4)
+
+        res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0))
+        assert_allclose(res.x, 0.0, atol=1e-4)
+
+        res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0))
+        assert_allclose(res.x, -1.0, atol=1e-4)
+
+        res = least_squares(fun_2d_trivial, [0.5, 0.5],
+                            bounds=Bounds(lb=[-1.0, -1.0], ub=1.0))
+        assert_allclose(res.x, [0.0, 0.0], atol=1e-5)
+
+        res = least_squares(fun_2d_trivial, [0.5, 0.5],
+                            bounds=Bounds(lb=[0.1, 0.1]))
+        assert_allclose(res.x, [0.1, 0.1], atol=1e-5)
+
+    def test_rosenbrock_bounds(self):
+        x0_1 = np.array([-2.0, 1.0])
+        x0_2 = np.array([2.0, 2.0])
+        x0_3 = np.array([-2.0, 2.0])
+        x0_4 = np.array([0.0, 2.0])
+        x0_5 = np.array([-1.2, 1.0])
+        problems = [
+            (x0_1, ([-np.inf, -1.5], np.inf)),
+            (x0_2, ([-np.inf, 1.5], np.inf)),
+            (x0_3, ([-np.inf, 1.5], np.inf)),
+            (x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
+            (x0_2, ([1.0, 1.5], [3.0, 3.0])),
+            (x0_5, ([-50.0, 0.0], [0.5, 100]))
+        ]
+        for x0, bounds in problems:
+            for jac, x_scale, tr_solver in product(
+                    ['2-point', '3-point', 'cs', jac_rosenbrock],
+                    [1.0, [1.0, 0.5], 'jac'],
+                    ['exact', 'lsmr']):
+                res = least_squares(fun_rosenbrock, x0, jac, bounds,
+                                    x_scale=x_scale, tr_solver=tr_solver,
+                                    method=self.method)
+                assert_allclose(res.optimality, 0.0, atol=1e-5)
+
+
+class SparseMixin:
+    def test_exact_tr_solver(self):
+        p = BroydenTridiagonal()
+        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
+                      tr_solver='exact', method=self.method)
+        assert_raises(ValueError, least_squares, p.fun, p.x0,
+                      tr_solver='exact', jac_sparsity=p.sparsity,
+                      method=self.method)
+
+    def test_equivalence(self):
+        sparse = BroydenTridiagonal(mode='sparse')
+        dense = BroydenTridiagonal(mode='dense')
+        res_sparse = least_squares(
+            sparse.fun, sparse.x0, jac=sparse.jac,
+            method=self.method)
+        res_dense = least_squares(
+            dense.fun, dense.x0, jac=sparse.jac,
+            method=self.method)
+        assert_equal(res_sparse.nfev, res_dense.nfev)
+        assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
+        assert_allclose(res_sparse.cost, 0, atol=1e-20)
+        assert_allclose(res_dense.cost, 0, atol=1e-20)
+
+    def test_tr_options(self):
+        p = BroydenTridiagonal()
+        res = least_squares(p.fun, p.x0, p.jac, method=self.method,
+                            tr_options={'btol': 1e-10})
+        assert_allclose(res.cost, 0, atol=1e-20)
+
+    def test_wrong_parameters(self):
+        p = BroydenTridiagonal()
+        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
+                      tr_solver='best', method=self.method)
+        assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
+                      tr_solver='lsmr', tr_options={'tol': 1e-10})
+
+    def test_solver_selection(self):
+        sparse = BroydenTridiagonal(mode='sparse')
+        dense = BroydenTridiagonal(mode='dense')
+        res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
+                                   method=self.method)
+        res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
+                                  method=self.method)
+        assert_allclose(res_sparse.cost, 0, atol=1e-20)
+        assert_allclose(res_dense.cost, 0, atol=1e-20)
+        assert_(issparse(res_sparse.jac))
+        assert_(isinstance(res_dense.jac, np.ndarray))
+
+    def test_numerical_jac(self):
+        p = BroydenTridiagonal()
+        for jac in ['2-point', '3-point', 'cs']:
+            res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
+            res_sparse = least_squares(
+                p.fun, p.x0, jac,method=self.method,
+                jac_sparsity=p.sparsity)
+            assert_equal(res_dense.nfev, res_sparse.nfev)
+            assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
+            assert_allclose(res_dense.cost, 0, atol=1e-20)
+            assert_allclose(res_sparse.cost, 0, atol=1e-20)
+
+    def test_with_bounds(self):
+        p = BroydenTridiagonal()
+        for jac, jac_sparsity in product(
+                [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
+            res_1 = least_squares(
+                p.fun, p.x0, jac, bounds=(p.lb, np.inf),
+                method=self.method,jac_sparsity=jac_sparsity)
+            res_2 = least_squares(
+                p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
+                method=self.method, jac_sparsity=jac_sparsity)
+            res_3 = least_squares(
+                p.fun, p.x0, jac, bounds=(p.lb, p.ub),
+                method=self.method, jac_sparsity=jac_sparsity)
+            assert_allclose(res_1.optimality, 0, atol=1e-10)
+            assert_allclose(res_2.optimality, 0, atol=1e-10)
+            assert_allclose(res_3.optimality, 0, atol=1e-10)
+
+    def test_wrong_jac_sparsity(self):
+        p = BroydenTridiagonal()
+        sparsity = p.sparsity[:-1]
+        assert_raises(ValueError, least_squares, p.fun, p.x0,
+                      jac_sparsity=sparsity, method=self.method)
+
+    def test_linear_operator(self):
+        p = BroydenTridiagonal(mode='operator')
+        res = least_squares(p.fun, p.x0, p.jac, method=self.method)
+        assert_allclose(res.cost, 0.0, atol=1e-20)
+        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
+                      method=self.method, tr_solver='exact')
+
+    def test_x_scale_jac_scale(self):
+        p = BroydenTridiagonal()
+        res = least_squares(p.fun, p.x0, p.jac, method=self.method,
+                            x_scale='jac')
+        assert_allclose(res.cost, 0.0, atol=1e-20)
+
+        p = BroydenTridiagonal(mode='operator')
+        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
+                      method=self.method, x_scale='jac')
+
+
+class LossFunctionMixin:
+    def test_options(self):
+        for loss in LOSSES:
+            res = least_squares(fun_trivial, 2.0, loss=loss,
+                                method=self.method)
+            assert_allclose(res.x, 0, atol=1e-15)
+
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
+                      loss='hinge', method=self.method)
+
+    def test_fun(self):
+        # Test that res.fun is actual residuals, and not modified by loss
+        # function stuff.
+        for loss in LOSSES:
+            res = least_squares(fun_trivial, 2.0, loss=loss,
+                                method=self.method)
+            assert_equal(res.fun, fun_trivial(res.x))
+
+    def test_grad(self):
+        # Test that res.grad is true gradient of loss function at the
+        # solution. Use max_nfev = 1, to avoid reaching minimum.
+        x = np.array([2.0])  # res.x will be this.
+
+        res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
+                            max_nfev=1, method=self.method)
+        assert_equal(res.grad, 2 * x * (x**2 + 5))
+
+        res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
+                            max_nfev=1, method=self.method)
+        assert_equal(res.grad, 2 * x)
+
+        res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
+                            max_nfev=1, method=self.method)
+        assert_allclose(res.grad,
+                        2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
+
+        res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
+                            max_nfev=1, method=self.method)
+        assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
+
+        res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
+                            max_nfev=1, method=self.method)
+        assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
+
+        res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
+                            max_nfev=1, method=self.method)
+        assert_allclose(res.grad,
+                        2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
+
+    def test_jac(self):
+        # Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
+        # of Hessian. This approximation is computed by doubly differentiating
+        # the cost function and dropping the part containing second derivative
+        # of f. For a scalar function it is computed as
+        # H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
+        # brackets is less than EPS it is replaced by EPS. Here, we check
+        # against the root of H.
+
+        x = 2.0  # res.x will be this.
+        f = x**2 + 5  # res.fun will be this.
+
+        res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
+                            max_nfev=1, method=self.method)
+        assert_equal(res.jac, 2 * x)
+
+        # For `huber` loss the Jacobian correction is identically zero
+        # in outlier region, in such cases it is modified to be equal EPS**0.5.
+        res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
+                            max_nfev=1, method=self.method)
+        assert_equal(res.jac, 2 * x * EPS**0.5)
+
+        # Now, let's apply `loss_scale` to turn the residual into an inlier.
+        # The loss function becomes linear.
+        res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
+                            f_scale=10, max_nfev=1)
+        assert_equal(res.jac, 2 * x)
+
+        # 'soft_l1' always gives a positive scaling.
+        res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
+                            max_nfev=1, method=self.method)
+        assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
+
+        # For 'cauchy' the correction term turns out to be negative, and it
+        # replaced by EPS**0.5.
+        res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
+                            max_nfev=1, method=self.method)
+        assert_allclose(res.jac, 2 * x * EPS**0.5)
+
+        # Now use scaling to turn the residual to inlier.
+        res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
+                            f_scale=10, max_nfev=1, method=self.method)
+        fs = f / 10
+        assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
+
+        # 'arctan' gives an outlier.
+        res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
+                            max_nfev=1, method=self.method)
+        assert_allclose(res.jac, 2 * x * EPS**0.5)
+
+        # Turn to inlier.
+        res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
+                            f_scale=20.0, max_nfev=1, method=self.method)
+        fs = f / 20
+        assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
+
+        # cubic_soft_l1 will give an outlier.
+        res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
+                            max_nfev=1)
+        assert_allclose(res.jac, 2 * x * EPS**0.5)
+
+        # Turn to inlier.
+        res = least_squares(fun_trivial, x, jac_trivial,
+                            loss=cubic_soft_l1, f_scale=6, max_nfev=1)
+        fs = f / 6
+        assert_allclose(res.jac,
+                        2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
+
+    def test_robustness(self):
+        for noise in [0.1, 1.0]:
+            p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
+
+            for jac in ['2-point', '3-point', 'cs', p.jac]:
+                res_lsq = least_squares(p.fun, p.p0, jac=jac,
+                                        method=self.method)
+                assert_allclose(res_lsq.optimality, 0, atol=1e-2)
+                for loss in LOSSES:
+                    if loss == 'linear':
+                        continue
+                    res_robust = least_squares(
+                        p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
+                        method=self.method)
+                    assert_allclose(res_robust.optimality, 0, atol=1e-2)
+                    assert_(norm(res_robust.x - p.p_opt) <
+                            norm(res_lsq.x - p.p_opt))
+
+
+class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
+    method = 'dogbox'
+
+
+class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
+    method = 'trf'
+
+    def test_lsmr_regularization(self):
+        p = BroydenTridiagonal()
+        for regularize in [True, False]:
+            res = least_squares(p.fun, p.x0, p.jac, method='trf',
+                                tr_options={'regularize': regularize})
+            assert_allclose(res.cost, 0, atol=1e-20)
+
+
+class TestLM(BaseMixin):
+    method = 'lm'
+
+    def test_bounds_not_supported(self):
+        assert_raises(ValueError, least_squares, fun_trivial,
+                      2.0, bounds=(-3.0, 3.0), method='lm')
+
+    def test_m_less_n_not_supported(self):
+        x0 = [-2, 1]
+        assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
+                      method='lm')
+
+    def test_sparse_not_supported(self):
+        p = BroydenTridiagonal()
+        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
+                      method='lm')
+
+    def test_jac_sparsity_not_supported(self):
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
+                      jac_sparsity=[1], method='lm')
+
+    def test_LinearOperator_not_supported(self):
+        p = BroydenTridiagonal(mode="operator")
+        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
+                      method='lm')
+
+    def test_loss(self):
+        res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
+        assert_allclose(res.x, 0.0, atol=1e-4)
+
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
+                      method='lm', loss='huber')
+
+
+def test_basic():
+    # test that 'method' arg is really optional
+    res = least_squares(fun_trivial, 2.0)
+    assert_allclose(res.x, 0, atol=1e-10)
+
+
+def test_small_tolerances_for_lm():
+    for ftol, xtol, gtol in [(None, 1e-13, 1e-13),
+                             (1e-13, None, 1e-13),
+                             (1e-13, 1e-13, None)]:
+        assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol,
+                      ftol=ftol, gtol=gtol, method='lm')
+
+
+def test_fp32_gh12991():
+    # checks that smaller FP sizes can be used in least_squares
+    # this is the minimum working example reported for gh12991
+    np.random.seed(1)
+
+    x = np.linspace(0, 1, 100).astype("float32")
+    y = np.random.random(100).astype("float32")
+
+    def func(p, x):
+        return p[0] + p[1] * x
+
+    def err(p, x, y):
+        return func(p, x) - y
+
+    res = least_squares(err, [-1.0, -1.0], args=(x, y))
+    # previously the initial jacobian calculated for this would be all 0
+    # and the minimize would terminate immediately, with nfev=1, would
+    # report a successful minimization (it shouldn't have done), but be
+    # unchanged from the initial solution.
+    # It was terminating early because the underlying approx_derivative
+    # used a step size for FP64 when the working space was FP32.
+    assert res.nfev > 2
+    assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linear_assignment.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linear_assignment.py
new file mode 100644
index 00000000..d59792da
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linear_assignment.py
@@ -0,0 +1,116 @@
+# Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck
+# License: BSD
+
+from numpy.testing import assert_array_equal
+import pytest
+
+import numpy as np
+
+from scipy.optimize import linear_sum_assignment
+from scipy.sparse import random
+from scipy.sparse._sputils import matrix
+from scipy.sparse.csgraph import min_weight_full_bipartite_matching
+from scipy.sparse.csgraph.tests.test_matching import (
+    linear_sum_assignment_assertions, linear_sum_assignment_test_cases
+)
+
+
+def test_linear_sum_assignment_input_shape():
+    with pytest.raises(ValueError, match="expected a matrix"):
+        linear_sum_assignment([1, 2, 3])
+
+
+def test_linear_sum_assignment_input_object():
+    C = [[1, 2, 3], [4, 5, 6]]
+    assert_array_equal(linear_sum_assignment(C),
+                       linear_sum_assignment(np.asarray(C)))
+    assert_array_equal(linear_sum_assignment(C),
+                       linear_sum_assignment(matrix(C)))
+
+
+def test_linear_sum_assignment_input_bool():
+    I = np.identity(3)
+    assert_array_equal(linear_sum_assignment(I.astype(np.bool_)),
+                       linear_sum_assignment(I))
+
+
+def test_linear_sum_assignment_input_string():
+    I = np.identity(3)
+    with pytest.raises(TypeError, match="Cannot cast array data"):
+        linear_sum_assignment(I.astype(str))
+
+
+def test_linear_sum_assignment_input_nan():
+    I = np.diag([np.nan, 1, 1])
+    with pytest.raises(ValueError, match="contains invalid numeric entries"):
+        linear_sum_assignment(I)
+
+
+def test_linear_sum_assignment_input_neginf():
+    I = np.diag([1, -np.inf, 1])
+    with pytest.raises(ValueError, match="contains invalid numeric entries"):
+        linear_sum_assignment(I)
+
+
+def test_linear_sum_assignment_input_inf():
+    I = np.identity(3)
+    I[:, 0] = np.inf
+    with pytest.raises(ValueError, match="cost matrix is infeasible"):
+        linear_sum_assignment(I)
+
+
+def test_constant_cost_matrix():
+    # Fixes #11602
+    n = 8
+    C = np.ones((n, n))
+    row_ind, col_ind = linear_sum_assignment(C)
+    assert_array_equal(row_ind, np.arange(n))
+    assert_array_equal(col_ind, np.arange(n))
+
+
+@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
+def test_linear_sum_assignment_trivial_cost(num_rows, num_cols):
+    C = np.empty(shape=(num_cols, num_rows))
+    row_ind, col_ind = linear_sum_assignment(C)
+    assert len(row_ind) == 0
+    assert len(col_ind) == 0
+
+
+@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
+def test_linear_sum_assignment_small_inputs(sign, test_case):
+    linear_sum_assignment_assertions(
+        linear_sum_assignment, np.array, sign, test_case)
+
+
+# Tests that combine scipy.optimize.linear_sum_assignment and
+# scipy.sparse.csgraph.min_weight_full_bipartite_matching
+def test_two_methods_give_same_result_on_many_sparse_inputs():
+    # As opposed to the test above, here we do not spell out the expected
+    # output; only assert that the two methods give the same result.
+    # Concretely, the below tests 100 cases of size 100x100, out of which
+    # 36 are infeasible.
+    np.random.seed(1234)
+    for _ in range(100):
+        lsa_raises = False
+        mwfbm_raises = False
+        sparse = random(100, 100, density=0.06,
+                        data_rvs=lambda size: np.random.randint(1, 100, size))
+        # In csgraph, zeros correspond to missing edges, so we explicitly
+        # replace those with infinities
+        dense = np.full(sparse.shape, np.inf)
+        dense[sparse.row, sparse.col] = sparse.data
+        sparse = sparse.tocsr()
+        try:
+            row_ind, col_ind = linear_sum_assignment(dense)
+            lsa_cost = dense[row_ind, col_ind].sum()
+        except ValueError:
+            lsa_raises = True
+        try:
+            row_ind, col_ind = min_weight_full_bipartite_matching(sparse)
+            mwfbm_cost = sparse[row_ind, col_ind].sum()
+        except ValueError:
+            mwfbm_raises = True
+        # Ensure that if one method raises, so does the other one.
+        assert lsa_raises == mwfbm_raises
+        if not lsa_raises:
+            assert lsa_cost == mwfbm_cost
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linesearch.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linesearch.py
new file mode 100644
index 00000000..003e2a0a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linesearch.py
@@ -0,0 +1,312 @@
+"""
+Tests for line search routines
+"""
+from numpy.testing import (assert_equal, assert_array_almost_equal,
+                           assert_array_almost_equal_nulp, assert_warns,
+                           suppress_warnings)
+import scipy.optimize._linesearch as ls
+from scipy.optimize._linesearch import LineSearchWarning
+import numpy as np
+
+
+def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
+    """
+    Check that strong Wolfe conditions apply
+    """
+    phi1 = phi(s)
+    phi0 = phi(0)
+    derphi0 = derphi(0)
+    derphi1 = derphi(s)
+    msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
+        s, phi0, phi1, derphi0, derphi1, err_msg)
+
+    assert phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg
+    assert abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg
+
+
+def assert_armijo(s, phi, c1=1e-4, err_msg=""):
+    """
+    Check that Armijo condition applies
+    """
+    phi1 = phi(s)
+    phi0 = phi(0)
+    msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg)
+    assert phi1 <= (1 - c1*s)*phi0, msg
+
+
+def assert_line_wolfe(x, p, s, f, fprime, **kw):
+    assert_wolfe(s, phi=lambda sp: f(x + p*sp),
+                 derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)
+
+
+def assert_line_armijo(x, p, s, f, **kw):
+    assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)
+
+
+def assert_fp_equal(x, y, err_msg="", nulp=50):
+    """Assert two arrays are equal, up to some floating-point rounding error"""
+    try:
+        assert_array_almost_equal_nulp(x, y, nulp)
+    except AssertionError as e:
+        raise AssertionError("%s\n%s" % (e, err_msg)) from e
+
+
+class TestLineSearch:
+    # -- scalar functions; must have dphi(0.) < 0
+    def _scalar_func_1(self, s):
+        self.fcount += 1
+        p = -s - s**3 + s**4
+        dp = -1 - 3*s**2 + 4*s**3
+        return p, dp
+
+    def _scalar_func_2(self, s):
+        self.fcount += 1
+        p = np.exp(-4*s) + s**2
+        dp = -4*np.exp(-4*s) + 2*s
+        return p, dp
+
+    def _scalar_func_3(self, s):
+        self.fcount += 1
+        p = -np.sin(10*s)
+        dp = -10*np.cos(10*s)
+        return p, dp
+
+    # -- n-d functions
+
+    def _line_func_1(self, x):
+        self.fcount += 1
+        f = np.dot(x, x)
+        df = 2*x
+        return f, df
+
+    def _line_func_2(self, x):
+        self.fcount += 1
+        f = np.dot(x, np.dot(self.A, x)) + 1
+        df = np.dot(self.A + self.A.T, x)
+        return f, df
+
+    # --
+
+    def setup_method(self):
+        self.scalar_funcs = []
+        self.line_funcs = []
+        self.N = 20
+        self.fcount = 0
+
+        def bind_index(func, idx):
+            # Remember Python's closure semantics!
+            return lambda *a, **kw: func(*a, **kw)[idx]
+
+        for name in sorted(dir(self)):
+            if name.startswith('_scalar_func_'):
+                value = getattr(self, name)
+                self.scalar_funcs.append(
+                    (name, bind_index(value, 0), bind_index(value, 1)))
+            elif name.startswith('_line_func_'):
+                value = getattr(self, name)
+                self.line_funcs.append(
+                    (name, bind_index(value, 0), bind_index(value, 1)))
+
+        np.random.seed(1234)
+        self.A = np.random.randn(self.N, self.N)
+
+    def scalar_iter(self):
+        for name, phi, derphi in self.scalar_funcs:
+            for old_phi0 in np.random.randn(3):
+                yield name, phi, derphi, old_phi0
+
+    def line_iter(self):
+        for name, f, fprime in self.line_funcs:
+            k = 0
+            while k < 9:
+                x = np.random.randn(self.N)
+                p = np.random.randn(self.N)
+                if np.dot(p, fprime(x)) >= 0:
+                    # always pick a descent direction
+                    continue
+                k += 1
+                old_fv = float(np.random.randn())
+                yield name, f, fprime, x, p, old_fv
+
+    # -- Generic scalar searches
+
+    def test_scalar_search_wolfe1(self):
+        c = 0
+        for name, phi, derphi, old_phi0 in self.scalar_iter():
+            c += 1
+            s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
+                                                    old_phi0, derphi(0))
+            assert_fp_equal(phi0, phi(0), name)
+            assert_fp_equal(phi1, phi(s), name)
+            assert_wolfe(s, phi, derphi, err_msg=name)
+
+        assert c > 3  # check that the iterator really works...
+
+    def test_scalar_search_wolfe2(self):
+        for name, phi, derphi, old_phi0 in self.scalar_iter():
+            s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
+                phi, derphi, phi(0), old_phi0, derphi(0))
+            assert_fp_equal(phi0, phi(0), name)
+            assert_fp_equal(phi1, phi(s), name)
+            if derphi1 is not None:
+                assert_fp_equal(derphi1, derphi(s), name)
+            assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0))
+
+    def test_scalar_search_wolfe2_with_low_amax(self):
+        def phi(alpha):
+            return (alpha - 5) ** 2
+
+        def derphi(alpha):
+            return 2 * (alpha - 5)
+
+        s, _, _, _ = assert_warns(LineSearchWarning,
+                                  ls.scalar_search_wolfe2, phi, derphi, amax=0.001)
+        assert s is None
+
+    def test_scalar_search_wolfe2_regression(self):
+        # Regression test for gh-12157
+        # This phi has its minimum at alpha=4/3 ~ 1.333.
+        def phi(alpha):
+            if alpha < 1:
+                return - 3*np.pi/2 * (alpha - 1)
+            else:
+                return np.cos(3*np.pi/2 * alpha - np.pi)
+
+        def derphi(alpha):
+            if alpha < 1:
+                return - 3*np.pi/2
+            else:
+                return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)
+
+        s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)
+        # Without the fix in gh-13073, the scalar_search_wolfe2
+        # returned s=2.0 instead.
+        assert s < 1.5
+
+    def test_scalar_search_armijo(self):
+        for name, phi, derphi, old_phi0 in self.scalar_iter():
+            s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
+            assert_fp_equal(phi1, phi(s), name)
+            assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0))
+
+    # -- Generic line searches
+
+    def test_line_search_wolfe1(self):
+        c = 0
+        smax = 100
+        for name, f, fprime, x, p, old_f in self.line_iter():
+            f0 = f(x)
+            g0 = fprime(x)
+            self.fcount = 0
+            s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
+                                                           g0, f0, old_f,
+                                                           amax=smax)
+            assert_equal(self.fcount, fc+gc)
+            assert_fp_equal(ofv, f(x))
+            if s is None:
+                continue
+            assert_fp_equal(fv, f(x + s*p))
+            assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
+            if s < smax:
+                c += 1
+                assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
+
+        assert c > 3  # check that the iterator really works...
+
+    def test_line_search_wolfe2(self):
+        c = 0
+        smax = 512
+        for name, f, fprime, x, p, old_f in self.line_iter():
+            f0 = f(x)
+            g0 = fprime(x)
+            self.fcount = 0
+            with suppress_warnings() as sup:
+                sup.filter(LineSearchWarning,
+                           "The line search algorithm could not find a solution")
+                sup.filter(LineSearchWarning,
+                           "The line search algorithm did not converge")
+                s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
+                                                               g0, f0, old_f,
+                                                               amax=smax)
+            assert_equal(self.fcount, fc+gc)
+            assert_fp_equal(ofv, f(x))
+            assert_fp_equal(fv, f(x + s*p))
+            if gv is not None:
+                assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
+            if s < smax:
+                c += 1
+                assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
+        assert c > 3  # check that the iterator really works...
+
+    def test_line_search_wolfe2_bounds(self):
+        # See gh-7475
+
+        # For this f and p, starting at a point on axis 0, the strong Wolfe
+        # condition 2 is met if and only if the step length s satisfies
+        # |x + s| <= c2 * |x|
+        f = lambda x: np.dot(x, x)
+        fp = lambda x: 2 * x
+        p = np.array([1, 0])
+
+        # Smallest s satisfying strong Wolfe conditions for these arguments is 30
+        x = -60 * p
+        c2 = 0.5
+
+        s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
+        assert_line_wolfe(x, p, s, f, fp)
+
+        s, _, _, _, _, _ = assert_warns(LineSearchWarning,
+                                        ls.line_search_wolfe2, f, fp, x, p,
+                                        amax=29, c2=c2)
+        assert s is None
+
+        # s=30 will only be tried on the 6th iteration, so this won't converge
+        assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,
+                     c2=c2, maxiter=5)
+
+    def test_line_search_armijo(self):
+        c = 0
+        for name, f, fprime, x, p, old_f in self.line_iter():
+            f0 = f(x)
+            g0 = fprime(x)
+            self.fcount = 0
+            s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
+            c += 1
+            assert_equal(self.fcount, fc)
+            assert_fp_equal(fv, f(x + s*p))
+            assert_line_armijo(x, p, s, f, err_msg=name)
+        assert c >= 9
+
+    # -- More specific tests
+
+    def test_armijo_terminate_1(self):
+        # Armijo should evaluate the function only once if the trial step
+        # is already suitable
+        count = [0]
+
+        def phi(s):
+            count[0] += 1
+            return -s + 0.01*s**2
+        s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
+        assert_equal(s, 1)
+        assert_equal(count[0], 2)
+        assert_armijo(s, phi)
+
+    def test_wolfe_terminate(self):
+        # wolfe1 and wolfe2 should also evaluate the function only a few
+        # times if the trial step is already suitable
+
+        def phi(s):
+            count[0] += 1
+            return -s + 0.05*s**2
+
+        def derphi(s):
+            count[0] += 1
+            return -1 + 0.05*2*s
+
+        for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
+            count = [0]
+            r = func(phi, derphi, phi(0), None, derphi(0))
+            assert r[0] is not None, (r, func)
+            assert count[0] <= 2 + 2, (count, func)
+            assert_wolfe(r[0], phi, derphi, err_msg=str(func))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linprog.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linprog.py
new file mode 100644
index 00000000..08677778
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_linprog.py
@@ -0,0 +1,2437 @@
+"""
+Unit test for Linear Programming
+"""
+import sys
+import platform
+
+import numpy as np
+from numpy.testing import (assert_, assert_allclose, assert_equal,
+                           assert_array_less, assert_warns, suppress_warnings)
+from pytest import raises as assert_raises
+from scipy.optimize import linprog, OptimizeWarning
+from scipy.optimize._numdiff import approx_derivative
+from scipy.sparse.linalg import MatrixRankWarning
+from scipy.linalg import LinAlgWarning
+import scipy.sparse
+import pytest
+
+has_umfpack = True
+try:
+    from scikits.umfpack import UmfpackWarning
+except ImportError:
+    has_umfpack = False
+
+has_cholmod = True
+try:
+    import sksparse
+    from sksparse.cholmod import cholesky as cholmod
+except ImportError:
+    has_cholmod = False
+
+
+def _assert_iteration_limit_reached(res, maxiter):
+    assert_(not res.success, "Incorrectly reported success")
+    assert_(res.success < maxiter, "Incorrectly reported number of iterations")
+    assert_equal(res.status, 1, "Failed to report iteration limit reached")
+
+
+def _assert_infeasible(res):
+    # res: linprog result object
+    assert_(not res.success, "incorrectly reported success")
+    assert_equal(res.status, 2, "failed to report infeasible status")
+
+
+def _assert_unbounded(res):
+    # res: linprog result object
+    assert_(not res.success, "incorrectly reported success")
+    assert_equal(res.status, 3, "failed to report unbounded status")
+
+
+def _assert_unable_to_find_basic_feasible_sol(res):
+    # res: linprog result object
+
+    # The status may be either 2 or 4 depending on why the feasible solution
+    # could not be found. If the undelying problem is expected to not have a
+    # feasible solution, _assert_infeasible should be used.
+    assert_(not res.success, "incorrectly reported success")
+    assert_(res.status in (2, 4), "failed to report optimization failure")
+
+
+def _assert_success(res, desired_fun=None, desired_x=None,
+                    rtol=1e-8, atol=1e-8):
+    # res: linprog result object
+    # desired_fun: desired objective function value or None
+    # desired_x: desired solution or None
+    if not res.success:
+        msg = "linprog status {0}, message: {1}".format(res.status,
+                                                        res.message)
+        raise AssertionError(msg)
+
+    assert_equal(res.status, 0)
+    if desired_fun is not None:
+        assert_allclose(res.fun, desired_fun,
+                        err_msg="converged to an unexpected objective value",
+                        rtol=rtol, atol=atol)
+    if desired_x is not None:
+        assert_allclose(res.x, desired_x,
+                        err_msg="converged to an unexpected solution",
+                        rtol=rtol, atol=atol)
+
+
+def magic_square(n):
+    """
+    Generates a linear program for which integer solutions represent an
+    n x n magic square; binary decision variables represent the presence
+    (or absence) of an integer 1 to n^2 in each position of the square.
+    """
+
+    np.random.seed(0)
+    M = n * (n**2 + 1) / 2
+
+    numbers = np.arange(n**4) // n**2 + 1
+
+    numbers = numbers.reshape(n**2, n, n)
+
+    zeros = np.zeros((n**2, n, n))
+
+    A_list = []
+    b_list = []
+
+    # Rule 1: use every number exactly once
+    for i in range(n**2):
+        A_row = zeros.copy()
+        A_row[i, :, :] = 1
+        A_list.append(A_row.flatten())
+        b_list.append(1)
+
+    # Rule 2: Only one number per square
+    for i in range(n):
+        for j in range(n):
+            A_row = zeros.copy()
+            A_row[:, i, j] = 1
+            A_list.append(A_row.flatten())
+            b_list.append(1)
+
+    # Rule 3: sum of rows is M
+    for i in range(n):
+        A_row = zeros.copy()
+        A_row[:, i, :] = numbers[:, i, :]
+        A_list.append(A_row.flatten())
+        b_list.append(M)
+
+    # Rule 4: sum of columns is M
+    for i in range(n):
+        A_row = zeros.copy()
+        A_row[:, :, i] = numbers[:, :, i]
+        A_list.append(A_row.flatten())
+        b_list.append(M)
+
+    # Rule 5: sum of diagonals is M
+    A_row = zeros.copy()
+    A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)]
+    A_list.append(A_row.flatten())
+    b_list.append(M)
+    A_row = zeros.copy()
+    A_row[:, range(n), range(-1, -n - 1, -1)] = \
+        numbers[:, range(n), range(-1, -n - 1, -1)]
+    A_list.append(A_row.flatten())
+    b_list.append(M)
+
+    A = np.array(np.vstack(A_list), dtype=float)
+    b = np.array(b_list, dtype=float)
+    c = np.random.rand(A.shape[1])
+
+    return A, b, c, numbers, M
+
+
+def lpgen_2d(m, n):
+    """ -> A b c LP test: m*n vars, m+n constraints
+        row sums == n/m, col sums == 1
+        https://gist.github.com/denis-bz/8647461
+    """
+    np.random.seed(0)
+    c = - np.random.exponential(size=(m, n))
+    Arow = np.zeros((m, m * n))
+    brow = np.zeros(m)
+    for j in range(m):
+        j1 = j + 1
+        Arow[j, j * n:j1 * n] = 1
+        brow[j] = n / m
+
+    Acol = np.zeros((n, m * n))
+    bcol = np.zeros(n)
+    for j in range(n):
+        j1 = j + 1
+        Acol[j, j::n] = 1
+        bcol[j] = 1
+
+    A = np.vstack((Arow, Acol))
+    b = np.hstack((brow, bcol))
+
+    return A, b, c.ravel()
+
+
+def very_random_gen(seed=0):
+    np.random.seed(seed)
+    m_eq, m_ub, n = 10, 20, 50
+    c = np.random.rand(n)-0.5
+    A_ub = np.random.rand(m_ub, n)-0.5
+    b_ub = np.random.rand(m_ub)-0.5
+    A_eq = np.random.rand(m_eq, n)-0.5
+    b_eq = np.random.rand(m_eq)-0.5
+    lb = -np.random.rand(n)
+    ub = np.random.rand(n)
+    lb[lb < -np.random.rand()] = -np.inf
+    ub[ub > np.random.rand()] = np.inf
+    bounds = np.vstack((lb, ub)).T
+    return c, A_ub, b_ub, A_eq, b_eq, bounds
+
+
+def nontrivial_problem():
+    c = [-1, 8, 4, -6]
+    A_ub = [[-7, -7, 6, 9],
+            [1, -1, -3, 0],
+            [10, -10, -7, 7],
+            [6, -1, 3, 4]]
+    b_ub = [-3, 6, -6, 6]
+    A_eq = [[-10, 1, 1, -8]]
+    b_eq = [-4]
+    x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391]
+    f_star = 7083 / 1391
+    return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star
+
+
+def l1_regression_prob(seed=0, m=8, d=9, n=100):
+    '''
+    Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)}
+        x in R^d
+        y in R
+    n: number of training samples
+    d: dimension of x, i.e. x in R^d
+    phi: feature map R^d -> R^m
+    m: dimension of feature space
+    '''
+    np.random.seed(seed)
+    phi = np.random.normal(0, 1, size=(m, d))  # random feature mapping
+    w_true = np.random.randn(m)
+    x = np.random.normal(0, 1, size=(d, n))  # features
+    y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n)  # measurements
+
+    # construct the problem
+    c = np.ones(m+n)
+    c[:m] = 0
+    A_ub = scipy.sparse.lil_matrix((2*n, n+m))
+    idx = 0
+    for ii in range(n):
+        A_ub[idx, :m] = phi @ x[:, ii]
+        A_ub[idx, m+ii] = -1
+        A_ub[idx+1, :m] = -1*phi @ x[:, ii]
+        A_ub[idx+1, m+ii] = -1
+        idx += 2
+    A_ub = A_ub.tocsc()
+    b_ub = np.zeros(2*n)
+    b_ub[0::2] = y
+    b_ub[1::2] = -y
+    bnds = [(None, None)]*m + [(0, None)]*n
+    return c, A_ub, b_ub, bnds
+
+
+def generic_callback_test(self):
+    # Check that callback is as advertised
+    last_cb = {}
+
+    def cb(res):
+        message = res.pop('message')
+        complete = res.pop('complete')
+
+        assert_(res.pop('phase') in (1, 2))
+        assert_(res.pop('status') in range(4))
+        assert_(isinstance(res.pop('nit'), int))
+        assert_(isinstance(complete, bool))
+        assert_(isinstance(message, str))
+
+        last_cb['x'] = res['x']
+        last_cb['fun'] = res['fun']
+        last_cb['slack'] = res['slack']
+        last_cb['con'] = res['con']
+
+    c = np.array([-3, -2])
+    A_ub = [[2, 1], [1, 1], [1, 0]]
+    b_ub = [10, 8, 4]
+    res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method)
+
+    _assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
+    assert_allclose(last_cb['fun'], res['fun'])
+    assert_allclose(last_cb['x'], res['x'])
+    assert_allclose(last_cb['con'], res['con'])
+    assert_allclose(last_cb['slack'], res['slack'])
+
+
+def test_unknown_solvers_and_options():
+    c = np.array([-3, -2])
+    A_ub = [[2, 1], [1, 1], [1, 0]]
+    b_ub = [10, 8, 4]
+
+    assert_raises(ValueError, linprog,
+                  c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki')
+    assert_raises(ValueError, linprog,
+                  c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki')
+    with pytest.warns(OptimizeWarning, match="Unknown solver options:"):
+        linprog(c, A_ub=A_ub, b_ub=b_ub,
+                options={"rr_method": 'ekki-ekki-ekki'})
+
+
+def test_choose_solver():
+    # 'highs' chooses 'dual'
+    c = np.array([-3, -2])
+    A_ub = [[2, 1], [1, 1], [1, 0]]
+    b_ub = [10, 8, 4]
+
+    res = linprog(c, A_ub, b_ub, method='highs')
+    _assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
+
+
+def test_deprecation():
+    with pytest.warns(DeprecationWarning):
+        linprog(1, method='interior-point')
+    with pytest.warns(DeprecationWarning):
+        linprog(1, method='revised simplex')
+    with pytest.warns(DeprecationWarning):
+        linprog(1, method='simplex')
+
+
+def test_highs_status_message():
+    res = linprog(1, method='highs')
+    msg = "Optimization terminated successfully. (HiGHS Status 7:"
+    assert res.status == 0
+    assert res.message.startswith(msg)
+
+    A, b, c, numbers, M = magic_square(6)
+    bounds = [(0, 1)] * len(c)
+    integrality = [1] * len(c)
+    options = {"time_limit": 0.1}
+    res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs',
+                  options=options, integrality=integrality)
+    msg = "Time limit reached. (HiGHS Status 13:"
+    assert res.status == 1
+    assert res.message.startswith(msg)
+
+    options = {"maxiter": 10}
+    res = linprog(c=c, A_eq=A, b_eq=b, bounds=bounds, method='highs-ds',
+                  options=options)
+    msg = "Iteration limit reached. (HiGHS Status 14:"
+    assert res.status == 1
+    assert res.message.startswith(msg)
+
+    res = linprog(1, bounds=(1, -1), method='highs')
+    msg = "The problem is infeasible. (HiGHS Status 8:"
+    assert res.status == 2
+    assert res.message.startswith(msg)
+
+    res = linprog(-1, method='highs')
+    msg = "The problem is unbounded. (HiGHS Status 10:"
+    assert res.status == 3
+    assert res.message.startswith(msg)
+
+    from scipy.optimize._linprog_highs import _highs_to_scipy_status_message
+    status, message = _highs_to_scipy_status_message(58, "Hello!")
+    msg = "The HiGHS status code was not recognized. (HiGHS Status 58:"
+    assert status == 4
+    assert message.startswith(msg)
+
+    status, message = _highs_to_scipy_status_message(None, None)
+    msg = "HiGHS did not provide a status code. (HiGHS Status None: None)"
+    assert status == 4
+    assert message.startswith(msg)
+
+
+def test_bug_17380():
+    linprog([1, 1], A_ub=[[-1, 0]], b_ub=[-2.5], integrality=[1, 1])
+
+
+A_ub = None
+b_ub = None
+A_eq = None
+b_eq = None
+bounds = None
+
+################
+# Common Tests #
+################
+
+
+class LinprogCommonTests:
+    """
+    Base class for `linprog` tests. Generally, each test will be performed
+    once for every derived class of LinprogCommonTests, each of which will
+    typically change self.options and/or self.method. Effectively, these tests
+    are run for many combination of method (simplex, revised simplex, and
+    interior point) and options (such as pivoting rule or sparse treatment).
+    """
+
+    ##################
+    # Targeted Tests #
+    ##################
+
+    def test_callback(self):
+        generic_callback_test(self)
+
+    def test_disp(self):
+        # test that display option does not break anything.
+        A, b, c = lpgen_2d(20, 20)
+        res = linprog(c, A_ub=A, b_ub=b, method=self.method,
+                      options={"disp": True})
+        _assert_success(res, desired_fun=-64.049494229)
+
+    def test_docstring_example(self):
+        # Example from linprog docstring.
+        c = [-1, 4]
+        A = [[-3, 1], [1, 2]]
+        b = [6, 4]
+        x0_bounds = (None, None)
+        x1_bounds = (-3, None)
+        res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
+                      options=self.options, method=self.method)
+        _assert_success(res, desired_fun=-22)
+
+    def test_type_error(self):
+        # (presumably) checks that linprog recognizes type errors
+        # This is tested more carefully in test__linprog_clean_inputs.py
+        c = [1]
+        A_eq = [[1]]
+        b_eq = "hello"
+        assert_raises(TypeError, linprog,
+                      c, A_eq=A_eq, b_eq=b_eq,
+                      method=self.method, options=self.options)
+
+    def test_aliasing_b_ub(self):
+        # (presumably) checks that linprog does not modify b_ub
+        # This is tested more carefully in test__linprog_clean_inputs.py
+        c = np.array([1.0])
+        A_ub = np.array([[1.0]])
+        b_ub_orig = np.array([3.0])
+        b_ub = b_ub_orig.copy()
+        bounds = (-4.0, np.inf)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-4, desired_x=[-4])
+        assert_allclose(b_ub_orig, b_ub)
+
+    def test_aliasing_b_eq(self):
+        # (presumably) checks that linprog does not modify b_eq
+        # This is tested more carefully in test__linprog_clean_inputs.py
+        c = np.array([1.0])
+        A_eq = np.array([[1.0]])
+        b_eq_orig = np.array([3.0])
+        b_eq = b_eq_orig.copy()
+        bounds = (-4.0, np.inf)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=3, desired_x=[3])
+        assert_allclose(b_eq_orig, b_eq)
+
+    def test_non_ndarray_args(self):
+        # (presumably) checks that linprog accepts list in place of arrays
+        # This is tested more carefully in test__linprog_clean_inputs.py
+        c = [1.0]
+        A_ub = [[1.0]]
+        b_ub = [3.0]
+        A_eq = [[1.0]]
+        b_eq = [2.0]
+        bounds = (-1.0, 10.0)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=2, desired_x=[2])
+
+    def test_unknown_options(self):
+        c = np.array([-3, -2])
+        A_ub = [[2, 1], [1, 1], [1, 0]]
+        b_ub = [10, 8, 4]
+
+        def f(c, A_ub=None, b_ub=None, A_eq=None,
+              b_eq=None, bounds=None, options={}):
+            linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                    method=self.method, options=options)
+
+        o = {key: self.options[key] for key in self.options}
+        o['spam'] = 42
+
+        assert_warns(OptimizeWarning, f,
+                     c, A_ub=A_ub, b_ub=b_ub, options=o)
+
+    def test_integrality_without_highs(self):
+        # ensure that using `integrality` parameter without `method='highs'`
+        # raises warning and produces correct solution to relaxed problem
+        # source: https://en.wikipedia.org/wiki/Integer_programming#Example
+        A_ub = np.array([[-1, 1], [3, 2], [2, 3]])
+        b_ub = np.array([1, 12, 12])
+        c = -np.array([0, 1])
+
+        bounds = [(0, np.inf)] * len(c)
+        integrality = [1] * len(c)
+
+        with np.testing.assert_warns(OptimizeWarning):
+            res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
+                          method=self.method, integrality=integrality)
+
+        np.testing.assert_allclose(res.x, [1.8, 2.8])
+        np.testing.assert_allclose(res.fun, -2.8)
+
+    def test_invalid_inputs(self):
+
+        def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
+            linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                    method=self.method, options=self.options)
+
+        # Test ill-formatted bounds
+        assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)])
+        assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)])
+        assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)])
+
+        # Test other invalid inputs
+        assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2])
+        assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1])
+        assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2])
+        assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1])
+        assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1)
+
+        # this last check doesn't make sense for sparse presolve
+        if ("_sparse_presolve" in self.options and
+                self.options["_sparse_presolve"]):
+            return
+            # there aren't 3-D sparse matrices
+
+        assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1)
+
+    def test_sparse_constraints(self):
+        # gh-13559: improve error message for sparse inputs when unsupported
+        def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
+            linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                    method=self.method, options=self.options)
+
+        np.random.seed(0)
+        m = 100
+        n = 150
+        A_eq = scipy.sparse.rand(m, n, 0.5)
+        x_valid = np.random.randn((n))
+        c = np.random.randn((n))
+        ub = x_valid + np.random.rand((n))
+        lb = x_valid - np.random.rand((n))
+        bounds = np.column_stack((lb, ub))
+        b_eq = A_eq * x_valid
+
+        if self.method in {'simplex', 'revised simplex'}:
+            # simplex and revised simplex should raise error
+            with assert_raises(ValueError, match=f"Method '{self.method}' "
+                               "does not support sparse constraint matrices."):
+                linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
+                        method=self.method, options=self.options)
+        else:
+            # other methods should succeed
+            options = {**self.options}
+            if self.method in {'interior-point'}:
+                options['sparse'] = True
+
+            res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
+                          method=self.method, options=options)
+            assert res.success
+
+    def test_maxiter(self):
+        # test iteration limit w/ Enzo example
+        c = [4, 8, 3, 0, 0, 0]
+        A = [
+            [2, 5, 3, -1, 0, 0],
+            [3, 2.5, 8, 0, -1, 0],
+            [8, 10, 4, 0, 0, -1]]
+        b = [185, 155, 600]
+        np.random.seed(0)
+        maxiter = 3
+        res = linprog(c, A_eq=A, b_eq=b, method=self.method,
+                      options={"maxiter": maxiter})
+        _assert_iteration_limit_reached(res, maxiter)
+        assert_equal(res.nit, maxiter)
+
+    def test_bounds_fixed(self):
+
+        # Test fixed bounds (upper equal to lower)
+        # If presolve option True, test if solution found in presolve (i.e.
+        # number of iterations is 0).
+        do_presolve = self.options.get('presolve', True)
+
+        res = linprog([1], bounds=(1, 1),
+                      method=self.method, options=self.options)
+        _assert_success(res, 1, 1)
+        if do_presolve:
+            assert_equal(res.nit, 0)
+
+        res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)],
+                      method=self.method, options=self.options)
+        _assert_success(res, 12, [5, -1, 3])
+        if do_presolve:
+            assert_equal(res.nit, 0)
+
+        res = linprog([1, 1], bounds=[(1, 1), (1, 3)],
+                      method=self.method, options=self.options)
+        _assert_success(res, 2, [1, 1])
+        if do_presolve:
+            assert_equal(res.nit, 0)
+
+        res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7],
+                      bounds=[(-5, 5), (0, 10), (3.5, 3.5)],
+                      method=self.method, options=self.options)
+        _assert_success(res, 15, [1, 7, 3.5])
+        if do_presolve:
+            assert_equal(res.nit, 0)
+
+    def test_bounds_infeasible(self):
+
+        # Test ill-valued bounds (upper less than lower)
+        # If presolve option True, test if solution found in presolve (i.e.
+        # number of iterations is 0).
+        do_presolve = self.options.get('presolve', True)
+
+        res = linprog([1], bounds=(1, -2), method=self.method, options=self.options)
+        _assert_infeasible(res)
+        if do_presolve:
+            assert_equal(res.nit, 0)
+
+        res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options)
+        _assert_infeasible(res)
+        if do_presolve:
+            assert_equal(res.nit, 0)
+
+        res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], method=self.method, options=self.options)
+        _assert_infeasible(res)
+        if do_presolve:
+            assert_equal(res.nit, 0)
+
+    def test_bounds_infeasible_2(self):
+
+        # Test ill-valued bounds (lower inf, upper -inf)
+        # If presolve option True, test if solution found in presolve (i.e.
+        # number of iterations is 0).
+        # For the simplex method, the cases do not result in an
+        # infeasible status, but in a RuntimeWarning. This is a
+        # consequence of having _presolve() take care of feasibility
+        # checks. See issue gh-11618.
+        do_presolve = self.options.get('presolve', True)
+        simplex_without_presolve = not do_presolve and self.method == 'simplex'
+
+        c = [1, 2, 3]
+        bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)]
+        bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)]
+
+        if simplex_without_presolve:
+            def g(c, bounds):
+                res = linprog(c, bounds=bounds, method=self.method, options=self.options)
+                return res
+
+            with pytest.warns(RuntimeWarning):
+                with pytest.raises(IndexError):
+                    g(c, bounds=bounds_1)
+
+            with pytest.warns(RuntimeWarning):
+                with pytest.raises(IndexError):
+                    g(c, bounds=bounds_2)
+        else:
+            res = linprog(c=c, bounds=bounds_1, method=self.method, options=self.options)
+            _assert_infeasible(res)
+            if do_presolve:
+                assert_equal(res.nit, 0)
+            res = linprog(c=c, bounds=bounds_2, method=self.method, options=self.options)
+            _assert_infeasible(res)
+            if do_presolve:
+                assert_equal(res.nit, 0)
+
+    def test_empty_constraint_1(self):
+        c = [-1, -2]
+        res = linprog(c, method=self.method, options=self.options)
+        _assert_unbounded(res)
+
+    def test_empty_constraint_2(self):
+        c = [-1, 1, -1, 1]
+        bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
+        res = linprog(c, bounds=bounds,
+                      method=self.method, options=self.options)
+        _assert_unbounded(res)
+        # Unboundedness detected in presolve requires no iterations
+        if self.options.get('presolve', True):
+            assert_equal(res.nit, 0)
+
+    def test_empty_constraint_3(self):
+        c = [1, -1, 1, -1]
+        bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
+        res = linprog(c, bounds=bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2)
+
+    def test_inequality_constraints(self):
+        # Minimize linear function subject to linear inequality constraints.
+        #  http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf
+        c = np.array([3, 2]) * -1  # maximize
+        A_ub = [[2, 1],
+                [1, 1],
+                [1, 0]]
+        b_ub = [10, 8, 4]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-18, desired_x=[2, 6])
+
+    def test_inequality_constraints2(self):
+        # Minimize linear function subject to linear inequality constraints.
+        # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf
+        # (dead link)
+        c = [6, 3]
+        A_ub = [[0, 3],
+                [-1, -1],
+                [-2, 1]]
+        b_ub = [2, -1, -1]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3])
+
+    def test_bounds_simple(self):
+        c = [1, 2]
+        bounds = (1, 2)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=[1, 1])
+
+        bounds = [(1, 2), (1, 2)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=[1, 1])
+
+    def test_bounded_below_only_1(self):
+        c = np.array([1.0])
+        A_eq = np.array([[1.0]])
+        b_eq = np.array([3.0])
+        bounds = (1.0, None)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=3, desired_x=[3])
+
+    def test_bounded_below_only_2(self):
+        c = np.ones(3)
+        A_eq = np.eye(3)
+        b_eq = np.array([1, 2, 3])
+        bounds = (0.5, np.inf)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
+
+    def test_bounded_above_only_1(self):
+        c = np.array([1.0])
+        A_eq = np.array([[1.0]])
+        b_eq = np.array([3.0])
+        bounds = (None, 10.0)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=3, desired_x=[3])
+
+    def test_bounded_above_only_2(self):
+        c = np.ones(3)
+        A_eq = np.eye(3)
+        b_eq = np.array([1, 2, 3])
+        bounds = (-np.inf, 4)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
+
+    def test_bounds_infinity(self):
+        c = np.ones(3)
+        A_eq = np.eye(3)
+        b_eq = np.array([1, 2, 3])
+        bounds = (-np.inf, np.inf)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
+
+    def test_bounds_mixed(self):
+        # Problem has one unbounded variable and
+        # another with a negative lower bound.
+        c = np.array([-1, 4]) * -1  # maximize
+        A_ub = np.array([[-3, 1],
+                         [1, 2]], dtype=np.float64)
+        b_ub = [6, 4]
+        x0_bounds = (-np.inf, np.inf)
+        x1_bounds = (-3, np.inf)
+        bounds = (x0_bounds, x1_bounds)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7])
+
+    def test_bounds_equal_but_infeasible(self):
+        c = [-4, 1]
+        A_ub = [[7, -2], [0, 1], [2, -2]]
+        b_ub = [14, 0, 3]
+        bounds = [(2, 2), (0, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+    def test_bounds_equal_but_infeasible2(self):
+        c = [-4, 1]
+        A_eq = [[7, -2], [0, 1], [2, -2]]
+        b_eq = [14, 0, 3]
+        bounds = [(2, 2), (0, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+    def test_bounds_equal_no_presolve(self):
+        # There was a bug when a lower and upper bound were equal but
+        # presolve was not on to eliminate the variable. The bound
+        # was being converted to an equality constraint, but the bound
+        # was not eliminated, leading to issues in postprocessing.
+        c = [1, 2]
+        A_ub = [[1, 2], [1.1, 2.2]]
+        b_ub = [4, 8]
+        bounds = [(1, 2), (2, 2)]
+
+        o = {key: self.options[key] for key in self.options}
+        o["presolve"] = False
+
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=o)
+        _assert_infeasible(res)
+
+    def test_zero_column_1(self):
+        m, n = 3, 4
+        np.random.seed(0)
+        c = np.random.rand(n)
+        c[1] = 1
+        A_eq = np.random.rand(m, n)
+        A_eq[:, 1] = 0
+        b_eq = np.random.rand(m)
+        A_ub = [[1, 0, 1, 1]]
+        b_ub = 3
+        bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-9.7087836730413404)
+
+    def test_zero_column_2(self):
+        if self.method in {'highs-ds', 'highs-ipm'}:
+            # See upstream issue https://github.com/ERGO-Code/HiGHS/issues/648
+            pytest.xfail()
+
+        np.random.seed(0)
+        m, n = 2, 4
+        c = np.random.rand(n)
+        c[1] = -1
+        A_eq = np.random.rand(m, n)
+        A_eq[:, 1] = 0
+        b_eq = np.random.rand(m)
+
+        A_ub = np.random.rand(m, n)
+        A_ub[:, 1] = 0
+        b_ub = np.random.rand(m)
+        bounds = (None, None)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_unbounded(res)
+        # Unboundedness detected in presolve
+        if self.options.get('presolve', True) and "highs" not in self.method:
+            # HiGHS detects unboundedness or infeasibility in presolve
+            # It needs an iteration of simplex to be sure of unboundedness
+            # Other solvers report that the problem is unbounded if feasible
+            assert_equal(res.nit, 0)
+
+    def test_zero_row_1(self):
+        c = [1, 2, 3]
+        A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
+        b_eq = [0, 3, 0]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=3)
+
+    def test_zero_row_2(self):
+        A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
+        b_ub = [0, 3, 0]
+        c = [1, 2, 3]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=0)
+
+    def test_zero_row_3(self):
+        m, n = 2, 4
+        c = np.random.rand(n)
+        A_eq = np.random.rand(m, n)
+        A_eq[0, :] = 0
+        b_eq = np.random.rand(m)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+        # Infeasibility detected in presolve
+        if self.options.get('presolve', True):
+            assert_equal(res.nit, 0)
+
+    def test_zero_row_4(self):
+        m, n = 2, 4
+        c = np.random.rand(n)
+        A_ub = np.random.rand(m, n)
+        A_ub[0, :] = 0
+        b_ub = -np.random.rand(m)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+        # Infeasibility detected in presolve
+        if self.options.get('presolve', True):
+            assert_equal(res.nit, 0)
+
+    def test_singleton_row_eq_1(self):
+        c = [1, 1, 1, 2]
+        A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
+        b_eq = [1, 2, 2, 4]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+        # Infeasibility detected in presolve
+        if self.options.get('presolve', True):
+            assert_equal(res.nit, 0)
+
+    def test_singleton_row_eq_2(self):
+        c = [1, 1, 1, 2]
+        A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
+        b_eq = [1, 2, 1, 4]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=4)
+
+    def test_singleton_row_ub_1(self):
+        c = [1, 1, 1, 2]
+        A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
+        b_ub = [1, 2, -2, 4]
+        bounds = [(None, None), (0, None), (0, None), (0, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+        # Infeasibility detected in presolve
+        if self.options.get('presolve', True):
+            assert_equal(res.nit, 0)
+
+    def test_singleton_row_ub_2(self):
+        c = [1, 1, 1, 2]
+        A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
+        b_ub = [1, 2, -0.5, 4]
+        bounds = [(None, None), (0, None), (0, None), (0, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=0.5)
+
+    def test_infeasible(self):
+        # Test linprog response to an infeasible problem
+        c = [-1, -1]
+        A_ub = [[1, 0],
+                [0, 1],
+                [-1, -1]]
+        b_ub = [2, 2, -5]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+    def test_infeasible_inequality_bounds(self):
+        c = [1]
+        A_ub = [[2]]
+        b_ub = 4
+        bounds = (5, 6)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+        # Infeasibility detected in presolve
+        if self.options.get('presolve', True):
+            assert_equal(res.nit, 0)
+
+    def test_unbounded(self):
+        # Test linprog response to an unbounded problem
+        c = np.array([1, 1]) * -1  # maximize
+        A_ub = [[-1, 1],
+                [-1, -1]]
+        b_ub = [-1, -2]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_unbounded(res)
+
+    def test_unbounded_below_no_presolve_corrected(self):
+        c = [1]
+        bounds = [(None, 1)]
+
+        o = {key: self.options[key] for key in self.options}
+        o["presolve"] = False
+
+        res = linprog(c=c, bounds=bounds,
+                      method=self.method,
+                      options=o)
+        if self.method == "revised simplex":
+            # Revised simplex has a special pathway for no constraints.
+            assert_equal(res.status, 5)
+        else:
+            _assert_unbounded(res)
+
+    def test_unbounded_no_nontrivial_constraints_1(self):
+        """
+        Test whether presolve pathway for detecting unboundedness after
+        constraint elimination is working.
+        """
+        c = np.array([0, 0, 0, 1, -1, -1])
+        A_ub = np.array([[1, 0, 0, 0, 0, 0],
+                         [0, 1, 0, 0, 0, 0],
+                         [0, 0, 0, 0, 0, -1]])
+        b_ub = np.array([2, -2, 0])
+        bounds = [(None, None), (None, None), (None, None),
+                  (-1, 1), (-1, 1), (0, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_unbounded(res)
+        if not self.method.lower().startswith("highs"):
+            assert_equal(res.x[-1], np.inf)
+            assert_equal(res.message[:36],
+                         "The problem is (trivially) unbounded")
+
+    def test_unbounded_no_nontrivial_constraints_2(self):
+        """
+        Test whether presolve pathway for detecting unboundedness after
+        constraint elimination is working.
+        """
+        c = np.array([0, 0, 0, 1, -1, 1])
+        A_ub = np.array([[1, 0, 0, 0, 0, 0],
+                         [0, 1, 0, 0, 0, 0],
+                         [0, 0, 0, 0, 0, 1]])
+        b_ub = np.array([2, -2, 0])
+        bounds = [(None, None), (None, None), (None, None),
+                  (-1, 1), (-1, 1), (None, 0)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_unbounded(res)
+        if not self.method.lower().startswith("highs"):
+            assert_equal(res.x[-1], -np.inf)
+            assert_equal(res.message[:36],
+                         "The problem is (trivially) unbounded")
+
+    def test_cyclic_recovery(self):
+        # Test linprogs recovery from cycling using the Klee-Minty problem
+        # Klee-Minty  https://www.math.ubc.ca/~israel/m340/kleemin3.pdf
+        c = np.array([100, 10, 1]) * -1  # maximize
+        A_ub = [[1, 0, 0],
+                [20, 1, 0],
+                [200, 20, 1]]
+        b_ub = [1, 100, 10000]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7)
+
+    def test_cyclic_bland(self):
+        # Test the effect of Bland's rule on a cycling problem
+        c = np.array([-10, 57, 9, 24.])
+        A_ub = np.array([[0.5, -5.5, -2.5, 9],
+                         [0.5, -1.5, -0.5, 1],
+                         [1, 0, 0, 0]])
+        b_ub = [0, 0, 1]
+
+        # copy the existing options dictionary but change maxiter
+        maxiter = 100
+        o = {key: val for key, val in self.options.items()}
+        o['maxiter'] = maxiter
+
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=o)
+
+        if self.method == 'simplex' and not self.options.get('bland'):
+            # simplex cycles without Bland's rule
+            _assert_iteration_limit_reached(res, o['maxiter'])
+        else:
+            # other methods, including simplex with Bland's rule, succeed
+            _assert_success(res, desired_x=[1, 0, 1, 0])
+        # note that revised simplex skips this test because it may or may not
+        # cycle depending on the initial basis
+
+    def test_remove_redundancy_infeasibility(self):
+        # mostly a test of redundancy removal, which is carefully tested in
+        # test__remove_redundancy.py
+        m, n = 10, 10
+        c = np.random.rand(n)
+        A_eq = np.random.rand(m, n)
+        b_eq = np.random.rand(m)
+        A_eq[-1, :] = 2 * A_eq[-2, :]
+        b_eq[-1] *= -1
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+    #################
+    # General Tests #
+    #################
+
+    def test_nontrivial_problem(self):
+        # Problem involves all constraint types,
+        # negative resource limits, and rounding issues.
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=f_star, desired_x=x_star)
+
+    def test_lpgen_problem(self):
+        # Test linprog  with a rather large problem (400 variables,
+        # 40 constraints) generated by https://gist.github.com/denis-bz/8647461
+        A_ub, b_ub, c = lpgen_2d(20, 20)
+
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-64.049494229)
+
+    def test_network_flow(self):
+        # A network flow problem with supply and demand at nodes
+        # and with costs along directed edges.
+        # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf
+        c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]
+        n, p = -1, 1
+        A_eq = [
+            [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],
+            [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],
+            [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],
+            [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]
+        b_eq = [0, 19, -16, 33, 0, 0, -36]
+        with suppress_warnings() as sup:
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7)
+
+    def test_network_flow_limited_capacity(self):
+        # A network flow problem with supply and demand at nodes
+        # and with costs and capacities along directed edges.
+        # http://blog.sommer-forst.de/2013/04/10/
+        c = [2, 2, 1, 3, 1]
+        bounds = [
+            [0, 4],
+            [0, 2],
+            [0, 2],
+            [0, 3],
+            [0, 5]]
+        n, p = -1, 1
+        A_eq = [
+            [n, n, 0, 0, 0],
+            [p, 0, n, n, 0],
+            [0, p, p, 0, n],
+            [0, 0, 0, p, p]]
+        b_eq = [-4, 0, 0, 4]
+
+        with suppress_warnings() as sup:
+            # this is an UmfpackWarning but I had trouble importing it
+            if has_umfpack:
+                sup.filter(UmfpackWarning)
+            sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            sup.filter(OptimizeWarning, "Solving system with option...")
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_success(res, desired_fun=14)
+
+    def test_simplex_algorithm_wikipedia_example(self):
+        # https://en.wikipedia.org/wiki/Simplex_algorithm#Example
+        c = [-2, -3, -4]
+        A_ub = [
+            [3, 2, 1],
+            [2, 5, 3]]
+        b_ub = [10, 15]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-20)
+
+    def test_enzo_example(self):
+        # https://github.com/scipy/scipy/issues/1779 lp2.py
+        #
+        # Translated from Octave code at:
+        # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm
+        # and placed under MIT licence by Enzo Michelangeli
+        # with permission explicitly granted by the original author,
+        # Prof. Kazunobu Yoshida
+        c = [4, 8, 3, 0, 0, 0]
+        A_eq = [
+            [2, 5, 3, -1, 0, 0],
+            [3, 2.5, 8, 0, -1, 0],
+            [8, 10, 4, 0, 0, -1]]
+        b_eq = [185, 155, 600]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=317.5,
+                        desired_x=[66.25, 0, 17.5, 0, 183.75, 0],
+                        atol=6e-6, rtol=1e-7)
+
+    def test_enzo_example_b(self):
+        # rescued from https://github.com/scipy/scipy/pull/218
+        c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]
+        A_eq = [[-1, -1, -1, 0, 0, 0],
+                [0, 0, 0, 1, 1, 1],
+                [1, 0, 0, 1, 0, 0],
+                [0, 1, 0, 0, 1, 0],
+                [0, 0, 1, 0, 0, 1]]
+        b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]
+
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-1.77,
+                        desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])
+
+    def test_enzo_example_c_with_degeneracy(self):
+        # rescued from https://github.com/scipy/scipy/pull/218
+        m = 20
+        c = -np.ones(m)
+        tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1)
+        A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
+        b_eq = [0, 0]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=0, desired_x=np.zeros(m))
+
+    def test_enzo_example_c_with_unboundedness(self):
+        # rescued from https://github.com/scipy/scipy/pull/218
+        m = 50
+        c = -np.ones(m)
+        tmp = 2 * np.pi * np.arange(m) / (m + 1)
+        A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
+        b_eq = [0, 0]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_unbounded(res)
+
+    def test_enzo_example_c_with_infeasibility(self):
+        # rescued from https://github.com/scipy/scipy/pull/218
+        m = 50
+        c = -np.ones(m)
+        tmp = 2 * np.pi * np.arange(m) / (m + 1)
+        A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
+        b_eq = [1, 1]
+
+        o = {key: self.options[key] for key in self.options}
+        o["presolve"] = False
+
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=o)
+        _assert_infeasible(res)
+
+    def test_basic_artificial_vars(self):
+        # Problem is chosen to test two phase simplex methods when at the end
+        # of phase 1 some artificial variables remain in the basis.
+        # Also, for `method='simplex'`, the row in the tableau corresponding
+        # with the artificial variables is not all zero.
+        c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004])
+        A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0],
+                         [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0],
+                         [1.0, 1.0, 0, 0, 0, 0]])
+        b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0])
+        A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]])
+        b_eq = np.array([0, 0])
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c),
+                        atol=2e-6)
+
+    def test_optimize_result(self):
+        # check all fields in OptimizeResult
+        c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0)
+        res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
+                      bounds=bounds, method=self.method, options=self.options)
+        assert_(res.success)
+        assert_(res.nit)
+        assert_(not res.status)
+        if 'highs' not in self.method:
+            # HiGHS status/message tested separately
+            assert_(res.message == "Optimization terminated successfully.")
+        assert_allclose(c @ res.x, res.fun)
+        assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11)
+        assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11)
+        for key in ['eqlin', 'ineqlin', 'lower', 'upper']:
+            if key in res.keys():
+                assert isinstance(res[key]['marginals'], np.ndarray)
+                assert isinstance(res[key]['residual'], np.ndarray)
+
+    #################
+    # Bug Fix Tests #
+    #################
+
+    def test_bug_5400(self):
+        # https://github.com/scipy/scipy/issues/5400
+        bounds = [
+            (0, None),
+            (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100),
+            (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900),
+            (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)]
+
+        f = 1 / 9
+        g = -1e4
+        h = -3.1
+        A_ub = np.array([
+            [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0],
+            [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0],
+            [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1],
+            [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1],
+            [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0],
+            [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0],
+            [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0],
+            [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0],
+            [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]])
+
+        b_ub = np.array([
+            0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900,
+            900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+
+        c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1,
+                      1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning,
+                       "Solving system with option 'sym_pos'")
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_success(res, desired_fun=-106.63507541835018)
+
+    def test_bug_6139(self):
+        # linprog(method='simplex') fails to find a basic feasible solution
+        # if phase 1 pseudo-objective function is outside the provided tol.
+        # https://github.com/scipy/scipy/issues/6139
+
+        # Note: This is not strictly a bug as the default tolerance determines
+        # if a result is "close enough" to zero and should not be expected
+        # to work for all cases.
+
+        c = np.array([1, 1, 1])
+        A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]])
+        b_eq = np.array([5.00000000e+00, -1.00000000e+04])
+        A_ub = -np.array([[0., 1000000., 1010000.]])
+        b_ub = -np.array([10000000.])
+        bounds = (None, None)
+
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+
+        _assert_success(res, desired_fun=14.95,
+                        desired_x=np.array([5, 4.95, 5]))
+
+    def test_bug_6690(self):
+        # linprog simplex used to violate bound constraint despite reporting
+        # success.
+        # https://github.com/scipy/scipy/issues/6690
+
+        A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]])
+        b_eq = np.array([0.9626])
+        A_ub = np.array([
+            [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22],
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+            [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0],
+            [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37],
+            [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0]
+        ])
+        b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022])
+        bounds = np.array([
+            [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73],
+            [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]
+        ]).T
+        c = np.array([
+            -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28
+            ])
+
+        with suppress_warnings() as sup:
+            if has_umfpack:
+                sup.filter(UmfpackWarning)
+            sup.filter(OptimizeWarning,
+                       "Solving system with option 'cholesky'")
+            sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+
+        desired_fun = -1.19099999999
+        desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800,
+                              0.5000, 0.4700, 0.0900, 0.3200, -0.7300])
+        _assert_success(res, desired_fun=desired_fun, desired_x=desired_x)
+
+        # Add small tol value to ensure arrays are less than or equal.
+        atol = 1e-6
+        assert_array_less(bounds[:, 0] - atol, res.x)
+        assert_array_less(res.x, bounds[:, 1] + atol)
+
+    def test_bug_7044(self):
+        # linprog simplex failed to "identify correct constraints" (?)
+        # leading to a non-optimal solution if A is rank-deficient.
+        # https://github.com/scipy/scipy/issues/7044
+
+        A_eq, b_eq, c, _, _ = magic_square(3)
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+
+        desired_fun = 1.730550597
+        _assert_success(res, desired_fun=desired_fun)
+        assert_allclose(A_eq.dot(res.x), b_eq)
+        assert_array_less(np.zeros(res.x.size) - 1e-5, res.x)
+
+    def test_bug_7237(self):
+        # https://github.com/scipy/scipy/issues/7237
+        # linprog simplex "explodes" when the pivot value is very
+        # close to zero.
+
+        c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0])
+        A_ub = np.array([
+            [1., -724., 911., -551., -555., -896., 478., -80., -293.],
+            [1., 566., 42., 937., 233., 883., 392., -909., 57.],
+            [1., -208., -894., 539., 321., 532., -924., 942., 55.],
+            [1., 857., -859., 83., 462., -265., -971., 826., 482.],
+            [1., 314., -424., 245., -424., 194., -443., -104., -429.],
+            [1., 540., 679., 361., 149., -827., 876., 633., 302.],
+            [0., -1., -0., -0., -0., -0., -0., -0., -0.],
+            [0., -0., -1., -0., -0., -0., -0., -0., -0.],
+            [0., -0., -0., -1., -0., -0., -0., -0., -0.],
+            [0., -0., -0., -0., -1., -0., -0., -0., -0.],
+            [0., -0., -0., -0., -0., -1., -0., -0., -0.],
+            [0., -0., -0., -0., -0., -0., -1., -0., -0.],
+            [0., -0., -0., -0., -0., -0., -0., -1., -0.],
+            [0., -0., -0., -0., -0., -0., -0., -0., -1.],
+            [0., 1., 0., 0., 0., 0., 0., 0., 0.],
+            [0., 0., 1., 0., 0., 0., 0., 0., 0.],
+            [0., 0., 0., 1., 0., 0., 0., 0., 0.],
+            [0., 0., 0., 0., 1., 0., 0., 0., 0.],
+            [0., 0., 0., 0., 0., 1., 0., 0., 0.],
+            [0., 0., 0., 0., 0., 0., 1., 0., 0.],
+            [0., 0., 0., 0., 0., 0., 0., 1., 0.],
+            [0., 0., 0., 0., 0., 0., 0., 0., 1.]
+            ])
+        b_ub = np.array([
+            0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
+            0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.])
+        A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]])
+        b_eq = np.array([[1.]])
+        bounds = [(None, None)] * 9
+
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_fun=108.568535, atol=1e-6)
+
+    def test_bug_8174(self):
+        # https://github.com/scipy/scipy/issues/8174
+        # The simplex method sometimes "explodes" if the pivot value is very
+        # close to zero.
+        A_ub = np.array([
+            [22714, 1008, 13380, -2713.5, -1116],
+            [-4986, -1092, -31220, 17386.5, 684],
+            [-4986, 0, 0, -2713.5, 0],
+            [22714, 0, 0, 17386.5, 0]])
+        b_ub = np.zeros(A_ub.shape[0])
+        c = -np.ones(A_ub.shape[1])
+        bounds = [(0, 1)] * A_ub.shape[1]
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+
+        if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex':
+            _assert_unable_to_find_basic_feasible_sol(res)
+        else:
+            _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6)
+
+    def test_bug_8174_2(self):
+        # Test supplementary example from issue 8174.
+        # https://github.com/scipy/scipy/issues/8174
+        # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution
+        c = np.array([1, 0, 0, 0, 0, 0, 0])
+        A_ub = -np.identity(7)
+        b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]])
+        A_eq = np.array([
+            [1, 1, 1, 1, 1, 1, 0],
+            [0.3, 1.3, 0.9, 0, 0, 0, -1],
+            [0.3, 0, 0, 0, 0, 0, -2/3],
+            [0, 0.65, 0, 0, 0, 0, -1/15],
+            [0, 0, 0.3, 0, 0, 0, -1/15]
+        ])
+        b_eq = np.array([[100], [0], [0], [0], [0]])
+
+        with suppress_warnings() as sup:
+            if has_umfpack:
+                sup.filter(UmfpackWarning)
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_success(res, desired_fun=43.3333333331385)
+
+    def test_bug_8561(self):
+        # Test that pivot row is chosen correctly when using Bland's rule
+        # This was originally written for the simplex method with
+        # Bland's rule only, but it doesn't hurt to test all methods/options
+        # https://github.com/scipy/scipy/issues/8561
+        c = np.array([7, 0, -4, 1.5, 1.5])
+        A_ub = np.array([
+            [4, 5.5, 1.5, 1.0, -3.5],
+            [1, -2.5, -2, 2.5, 0.5],
+            [3, -0.5, 4, -12.5, -7],
+            [-1, 4.5, 2, -3.5, -2],
+            [5.5, 2, -4.5, -1, 9.5]])
+        b_ub = np.array([0, 0, 0, 0, 1])
+        res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options,
+                      method=self.method)
+        _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3])
+
+    def test_bug_8662(self):
+        # linprog simplex used to report incorrect optimal results
+        # https://github.com/scipy/scipy/issues/8662
+        c = [-10, 10, 6, 3]
+        A_ub = [[8, -8, -4, 6],
+                [-8, 8, 4, -6],
+                [-4, 4, 8, -4],
+                [3, -3, -3, -10]]
+        b_ub = [9, -9, -9, -4]
+        bounds = [(0, None), (0, None), (0, None), (0, None)]
+        desired_fun = 36.0000000000
+
+        with suppress_warnings() as sup:
+            if has_umfpack:
+                sup.filter(UmfpackWarning)
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                           method=self.method, options=self.options)
+
+        # Set boundary condition as a constraint
+        A_ub.append([0, 0, -1, 0])
+        b_ub.append(0)
+        bounds[2] = (None, None)
+
+        with suppress_warnings() as sup:
+            if has_umfpack:
+                sup.filter(UmfpackWarning)
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                           method=self.method, options=self.options)
+        rtol = 1e-5
+        _assert_success(res1, desired_fun=desired_fun, rtol=rtol)
+        _assert_success(res2, desired_fun=desired_fun, rtol=rtol)
+
+    def test_bug_8663(self):
+        # exposed a bug in presolve
+        # https://github.com/scipy/scipy/issues/8663
+        c = [1, 5]
+        A_eq = [[0, -7]]
+        b_eq = [-6]
+        bounds = [(0, None), (None, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7)
+
+    def test_bug_8664(self):
+        # interior-point has trouble with this when presolve is off
+        # tested for interior-point with presolve off in TestLinprogIPSpecific
+        # https://github.com/scipy/scipy/issues/8664
+        c = [4]
+        A_ub = [[2], [5]]
+        b_ub = [4, 4]
+        A_eq = [[0], [-8], [9]]
+        b_eq = [3, 2, 10]
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            sup.filter(OptimizeWarning, "Solving system with option...")
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_infeasible(res)
+
+    def test_bug_8973(self):
+        """
+        Test whether bug described at:
+        https://github.com/scipy/scipy/issues/8973
+        was fixed.
+        """
+        c = np.array([0, 0, 0, 1, -1])
+        A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]])
+        b_ub = np.array([2, -2])
+        bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        # solution vector x is not unique
+        _assert_success(res, desired_fun=-2)
+        # HiGHS IPM had an issue where the following wasn't true!
+        assert_equal(c @ res.x, res.fun)
+
+    def test_bug_8973_2(self):
+        """
+        Additional test for:
+        https://github.com/scipy/scipy/issues/8973
+        suggested in
+        https://github.com/scipy/scipy/pull/8985
+        review by @antonior92
+        """
+        c = np.zeros(1)
+        A_ub = np.array([[1]])
+        b_ub = np.array([-2])
+        bounds = (None, None)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options)
+        _assert_success(res, desired_x=[-2], desired_fun=0)
+
+    def test_bug_10124(self):
+        """
+        Test for linprog docstring problem
+        'disp'=True caused revised simplex failure
+        """
+        c = np.zeros(1)
+        A_ub = np.array([[1]])
+        b_ub = np.array([-2])
+        bounds = (None, None)
+        c = [-1, 4]
+        A_ub = [[-3, 1], [1, 2]]
+        b_ub = [6, 4]
+        bounds = [(None, None), (-3, None)]
+        o = {"disp": True}
+        o.update(self.options)
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=o)
+        _assert_success(res, desired_x=[10, -3], desired_fun=-22)
+
+    def test_bug_10349(self):
+        """
+        Test for redundancy removal tolerance issue
+        https://github.com/scipy/scipy/issues/10349
+        """
+        A_eq = np.array([[1, 1, 0, 0, 0, 0],
+                         [0, 0, 1, 1, 0, 0],
+                         [0, 0, 0, 0, 1, 1],
+                         [1, 0, 1, 0, 0, 0],
+                         [0, 0, 0, 1, 1, 0],
+                         [0, 1, 0, 0, 0, 1]])
+        b_eq = np.array([221, 210, 10, 141, 198, 102])
+        c = np.concatenate((0, 1, np.zeros(4)), axis=None)
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=self.options)
+        _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92)
+
+    @pytest.mark.skipif(sys.platform == 'darwin',
+                        reason=("Failing on some local macOS builds, "
+                                "see gh-13846"))
+    def test_bug_10466(self):
+        """
+        Test that autoscale fixes poorly-scaled problem
+        """
+        c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.]
+        A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+                [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
+                [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
+                [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.],
+                [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
+                [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
+                [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
+                [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.],
+                [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],
+                [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]]
+
+        b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08,
+                1.00663296e+09, 1.07374182e+09, 1.07374182e+09,
+                1.07374182e+09, 1.07374182e+09, 1.07374182e+09,
+                1.07374182e+09]
+
+        o = {}
+        # HiGHS methods don't use autoscale option
+        if not self.method.startswith("highs"):
+            o = {"autoscale": True}
+        o.update(self.options)
+
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "Solving system with option...")
+            if has_umfpack:
+                sup.filter(UmfpackWarning)
+            sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
+            sup.filter(RuntimeWarning, "divide by zero encountered...")
+            sup.filter(RuntimeWarning, "overflow encountered...")
+            sup.filter(RuntimeWarning, "invalid value encountered...")
+            sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=o)
+        assert_allclose(res.fun, -8589934560)
+
+#########################
+# Method-specific Tests #
+#########################
+
+
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+class LinprogSimplexTests(LinprogCommonTests):
+    method = "simplex"
+
+
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+class LinprogIPTests(LinprogCommonTests):
+    method = "interior-point"
+
+    def test_bug_10466(self):
+        pytest.skip("Test is failing, but solver is deprecated.")
+
+
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+class LinprogRSTests(LinprogCommonTests):
+    method = "revised simplex"
+
+    # Revised simplex does not reliably solve these problems.
+    # Failure is intermittent due to the random choice of elements to complete
+    # the basis after phase 1 terminates. In any case, linprog exists
+    # gracefully, reporting numerical difficulties. I do not think this should
+    # prevent revised simplex from being merged, as it solves the problems
+    # most of the time and solves a broader range of problems than the existing
+    # simplex implementation.
+    # I believe that the root cause is the same for all three and that this
+    # same issue prevents revised simplex from solving many other problems
+    # reliably. Somehow the pivoting rule allows the algorithm to pivot into
+    # a singular basis. I haven't been able to find a reference that
+    # acknowledges this possibility, suggesting that there is a bug. On the
+    # other hand, the pivoting rule is quite simple, and I can't find a
+    # mistake, which suggests that this is a possibility with the pivoting
+    # rule. Hopefully, a better pivoting rule will fix the issue.
+
+    def test_bug_5400(self):
+        pytest.skip("Intermittent failure acceptable.")
+
+    def test_bug_8662(self):
+        pytest.skip("Intermittent failure acceptable.")
+
+    def test_network_flow(self):
+        pytest.skip("Intermittent failure acceptable.")
+
+
+class LinprogHiGHSTests(LinprogCommonTests):
+    def test_callback(self):
+        # this is the problem from test_callback
+        cb = lambda res: None
+        c = np.array([-3, -2])
+        A_ub = [[2, 1], [1, 1], [1, 0]]
+        b_ub = [10, 8, 4]
+        assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub,
+                      callback=cb, method=self.method)
+        res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method)
+        _assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
+
+    @pytest.mark.parametrize("options",
+                             [{"maxiter": -1},
+                              {"disp": -1},
+                              {"presolve": -1},
+                              {"time_limit": -1},
+                              {"dual_feasibility_tolerance": -1},
+                              {"primal_feasibility_tolerance": -1},
+                              {"ipm_optimality_tolerance": -1},
+                              {"simplex_dual_edge_weight_strategy": "ekki"},
+                              ])
+    def test_invalid_option_values(self, options):
+        def f(options):
+            linprog(1, method=self.method, options=options)
+        options.update(self.options)
+        assert_warns(OptimizeWarning, f, options=options)
+
+    def test_crossover(self):
+        A_eq, b_eq, c, _, _ = magic_square(4)
+        bounds = (0, 1)
+        res = linprog(c, A_eq=A_eq, b_eq=b_eq,
+                      bounds=bounds, method=self.method, options=self.options)
+        # there should be nonzero crossover iterations for IPM (only)
+        assert_equal(res.crossover_nit == 0, self.method != "highs-ipm")
+
+    def test_marginals(self):
+        # Ensure lagrange multipliers are correct by comparing the derivative
+        # w.r.t. b_ub/b_eq/ub/lb to the reported duals.
+        c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=0)
+        res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
+                      bounds=bounds, method=self.method, options=self.options)
+        lb, ub = bounds.T
+
+        # sensitivity w.r.t. b_ub
+        def f_bub(x):
+            return linprog(c, A_ub, x, A_eq, b_eq, bounds,
+                           method=self.method).fun
+
+        dfdbub = approx_derivative(f_bub, b_ub, method='3-point', f0=res.fun)
+        assert_allclose(res.ineqlin.marginals, dfdbub)
+
+        # sensitivity w.r.t. b_eq
+        def f_beq(x):
+            return linprog(c, A_ub, b_ub, A_eq, x, bounds,
+                           method=self.method).fun
+
+        dfdbeq = approx_derivative(f_beq, b_eq, method='3-point', f0=res.fun)
+        assert_allclose(res.eqlin.marginals, dfdbeq)
+
+        # sensitivity w.r.t. lb
+        def f_lb(x):
+            bounds = np.array([x, ub]).T
+            return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                           method=self.method).fun
+
+        with np.errstate(invalid='ignore'):
+            # approx_derivative has trouble where lb is infinite
+            dfdlb = approx_derivative(f_lb, lb, method='3-point', f0=res.fun)
+            dfdlb[~np.isfinite(lb)] = 0
+
+        assert_allclose(res.lower.marginals, dfdlb)
+
+        # sensitivity w.r.t. ub
+        def f_ub(x):
+            bounds = np.array([lb, x]).T
+            return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                           method=self.method).fun
+
+        with np.errstate(invalid='ignore'):
+            dfdub = approx_derivative(f_ub, ub, method='3-point', f0=res.fun)
+            dfdub[~np.isfinite(ub)] = 0
+
+        assert_allclose(res.upper.marginals, dfdub)
+
+    def test_dual_feasibility(self):
+        # Ensure solution is dual feasible using marginals
+        c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42)
+        res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
+                      bounds=bounds, method=self.method, options=self.options)
+
+        # KKT dual feasibility equation from Theorem 1 from
+        # http://www.personal.psu.edu/cxg286/LPKKT.pdf
+        resid = (-c + A_ub.T @ res.ineqlin.marginals +
+                 A_eq.T @ res.eqlin.marginals +
+                 res.upper.marginals +
+                 res.lower.marginals)
+        assert_allclose(resid, 0, atol=1e-12)
+
+    def test_complementary_slackness(self):
+        # Ensure that the complementary slackness condition is satisfied.
+        c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42)
+        res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
+                      bounds=bounds, method=self.method, options=self.options)
+
+        # KKT complementary slackness equation from Theorem 1 from
+        # http://www.personal.psu.edu/cxg286/LPKKT.pdf modified for
+        # non-zero RHS
+        assert np.allclose(res.ineqlin.marginals @ (b_ub - A_ub @ res.x), 0)
+
+
+################################
+# Simplex Option-Specific Tests#
+################################
+
+
+class TestLinprogSimplexDefault(LinprogSimplexTests):
+
+    def setup_method(self):
+        self.options = {}
+
+    def test_bug_5400(self):
+        pytest.skip("Simplex fails on this problem.")
+
+    def test_bug_7237_low_tol(self):
+        # Fails if the tolerance is too strict. Here, we test that
+        # even if the solution is wrong, the appropriate error is raised.
+        pytest.skip("Simplex fails on this problem.")
+
+    def test_bug_8174_low_tol(self):
+        # Fails if the tolerance is too strict. Here, we test that
+        # even if the solution is wrong, the appropriate warning is issued.
+        self.options.update({'tol': 1e-12})
+        with pytest.warns(OptimizeWarning):
+            super().test_bug_8174()
+
+
+class TestLinprogSimplexBland(LinprogSimplexTests):
+
+    def setup_method(self):
+        self.options = {'bland': True}
+
+    def test_bug_5400(self):
+        pytest.skip("Simplex fails on this problem.")
+
+    def test_bug_8174_low_tol(self):
+        # Fails if the tolerance is too strict. Here, we test that
+        # even if the solution is wrong, the appropriate error is raised.
+        self.options.update({'tol': 1e-12})
+        with pytest.raises(AssertionError):
+            with pytest.warns(OptimizeWarning):
+                super().test_bug_8174()
+
+
+class TestLinprogSimplexNoPresolve(LinprogSimplexTests):
+
+    def setup_method(self):
+        self.options = {'presolve': False}
+
+    is_32_bit = np.intp(0).itemsize < 8
+    is_linux = sys.platform.startswith('linux')
+
+    @pytest.mark.xfail(
+        condition=is_32_bit and is_linux,
+        reason='Fails with warning on 32-bit linux')
+    def test_bug_5400(self):
+        super().test_bug_5400()
+
+    def test_bug_6139_low_tol(self):
+        # Linprog(method='simplex') fails to find a basic feasible solution
+        # if phase 1 pseudo-objective function is outside the provided tol.
+        # https://github.com/scipy/scipy/issues/6139
+        # Without ``presolve`` eliminating such rows the result is incorrect.
+        self.options.update({'tol': 1e-12})
+        with pytest.raises(AssertionError, match='linprog status 4'):
+            return super().test_bug_6139()
+
+    def test_bug_7237_low_tol(self):
+        pytest.skip("Simplex fails on this problem.")
+
+    def test_bug_8174_low_tol(self):
+        # Fails if the tolerance is too strict. Here, we test that
+        # even if the solution is wrong, the appropriate warning is issued.
+        self.options.update({'tol': 1e-12})
+        with pytest.warns(OptimizeWarning):
+            super().test_bug_8174()
+
+    def test_unbounded_no_nontrivial_constraints_1(self):
+        pytest.skip("Tests behavior specific to presolve")
+
+    def test_unbounded_no_nontrivial_constraints_2(self):
+        pytest.skip("Tests behavior specific to presolve")
+
+
+#######################################
+# Interior-Point Option-Specific Tests#
+#######################################
+
+
+class TestLinprogIPDense(LinprogIPTests):
+    options = {"sparse": False}
+
+
+if has_cholmod:
+    class TestLinprogIPSparseCholmod(LinprogIPTests):
+        options = {"sparse": True, "cholesky": True}
+
+
+if has_umfpack:
+    class TestLinprogIPSparseUmfpack(LinprogIPTests):
+        options = {"sparse": True, "cholesky": False}
+
+        def test_network_flow_limited_capacity(self):
+            pytest.skip("Failing due to numerical issues on some platforms.")
+
+
+class TestLinprogIPSparse(LinprogIPTests):
+    options = {"sparse": True, "cholesky": False, "sym_pos": False}
+
+    @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level "
+                                "perturbations in linear system solution in "
+                                "_linprog_ip._sym_solve.")
+    def test_bug_6139(self):
+        super().test_bug_6139()
+
+    @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
+    def test_bug_6690(self):
+        # Test defined in base class, but can't mark as xfail there
+        super().test_bug_6690()
+
+    def test_magic_square_sparse_no_presolve(self):
+        # test linprog with a problem with a rank-deficient A_eq matrix
+        A_eq, b_eq, c, _, _ = magic_square(3)
+        bounds = (0, 1)
+
+        with suppress_warnings() as sup:
+            if has_umfpack:
+                sup.filter(UmfpackWarning)
+            sup.filter(MatrixRankWarning, "Matrix is exactly singular")
+            sup.filter(OptimizeWarning, "Solving system with option...")
+
+            o = {key: self.options[key] for key in self.options}
+            o["presolve"] = False
+
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options=o)
+        _assert_success(res, desired_fun=1.730550597)
+
+    def test_sparse_solve_options(self):
+        # checking that problem is solved with all column permutation options
+        A_eq, b_eq, c, _, _ = magic_square(3)
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            sup.filter(OptimizeWarning, "Invalid permc_spec option")
+            o = {key: self.options[key] for key in self.options}
+            permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A',
+                           'COLAMD', 'ekki-ekki-ekki')
+            # 'ekki-ekki-ekki' raises warning about invalid permc_spec option
+            # and uses default
+            for permc_spec in permc_specs:
+                o["permc_spec"] = permc_spec
+                res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                              method=self.method, options=o)
+                _assert_success(res, desired_fun=1.730550597)
+
+
+class TestLinprogIPSparsePresolve(LinprogIPTests):
+    options = {"sparse": True, "_sparse_presolve": True}
+
+    @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level "
+                                "perturbations in linear system solution in "
+                                "_linprog_ip._sym_solve.")
+    def test_bug_6139(self):
+        super().test_bug_6139()
+
+    def test_enzo_example_c_with_infeasibility(self):
+        pytest.skip('_sparse_presolve=True incompatible with presolve=False')
+
+    @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
+    def test_bug_6690(self):
+        # Test defined in base class, but can't mark as xfail there
+        super().test_bug_6690()
+
+
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+class TestLinprogIPSpecific:
+    method = "interior-point"
+    # the following tests don't need to be performed separately for
+    # sparse presolve, sparse after presolve, and dense
+
+    def test_solver_select(self):
+        # check that default solver is selected as expected
+        if has_cholmod:
+            options = {'sparse': True, 'cholesky': True}
+        elif has_umfpack:
+            options = {'sparse': True, 'cholesky': False}
+        else:
+            options = {'sparse': True, 'cholesky': False, 'sym_pos': False}
+        A, b, c = lpgen_2d(20, 20)
+        res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options)
+        res2 = linprog(c, A_ub=A, b_ub=b, method=self.method)  # default solver
+        assert_allclose(res1.fun, res2.fun,
+                        err_msg="linprog default solver unexpected result",
+                        rtol=2e-15, atol=1e-15)
+
+    def test_unbounded_below_no_presolve_original(self):
+        # formerly caused segfault in TravisCI w/ "cholesky":True
+        c = [-1]
+        bounds = [(None, 1)]
+        res = linprog(c=c, bounds=bounds,
+                      method=self.method,
+                      options={"presolve": False, "cholesky": True})
+        _assert_success(res, desired_fun=-1)
+
+    def test_cholesky(self):
+        # use cholesky factorization and triangular solves
+        A, b, c = lpgen_2d(20, 20)
+        res = linprog(c, A_ub=A, b_ub=b, method=self.method,
+                      options={"cholesky": True})  # only for dense
+        _assert_success(res, desired_fun=-64.049494229)
+
+    def test_alternate_initial_point(self):
+        # use "improved" initial point
+        A, b, c = lpgen_2d(20, 20)
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
+            sup.filter(OptimizeWarning, "Solving system with option...")
+            sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
+            res = linprog(c, A_ub=A, b_ub=b, method=self.method,
+                          options={"ip": True, "disp": True})
+            # ip code is independent of sparse/dense
+        _assert_success(res, desired_fun=-64.049494229)
+
+    def test_bug_8664(self):
+        # interior-point has trouble with this when presolve is off
+        c = [4]
+        A_ub = [[2], [5]]
+        b_ub = [4, 4]
+        A_eq = [[0], [-8], [9]]
+        b_eq = [3, 2, 10]
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            sup.filter(OptimizeWarning, "Solving system with option...")
+            res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                          method=self.method, options={"presolve": False})
+        assert_(not res.success, "Incorrectly reported success")
+
+
+########################################
+# Revised Simplex Option-Specific Tests#
+########################################
+
+
+class TestLinprogRSCommon(LinprogRSTests):
+    options = {}
+
+    def test_cyclic_bland(self):
+        pytest.skip("Intermittent failure acceptable.")
+
+    def test_nontrivial_problem_with_guess(self):
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options, x0=x_star)
+        _assert_success(res, desired_fun=f_star, desired_x=x_star)
+        assert_equal(res.nit, 0)
+
+    def test_nontrivial_problem_with_unbounded_variables(self):
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        bounds = [(None, None), (None, None), (0, None), (None, None)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options, x0=x_star)
+        _assert_success(res, desired_fun=f_star, desired_x=x_star)
+        assert_equal(res.nit, 0)
+
+    def test_nontrivial_problem_with_bounded_variables(self):
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        bounds = [(None, 1), (1, None), (0, None), (.4, .6)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options, x0=x_star)
+        _assert_success(res, desired_fun=f_star, desired_x=x_star)
+        assert_equal(res.nit, 0)
+
+    def test_nontrivial_problem_with_negative_unbounded_variable(self):
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        b_eq = [4]
+        x_star = np.array([-219/385, 582/385, 0, 4/10])
+        f_star = 3951/385
+        bounds = [(None, None), (1, None), (0, None), (.4, .6)]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options, x0=x_star)
+        _assert_success(res, desired_fun=f_star, desired_x=x_star)
+        assert_equal(res.nit, 0)
+
+    def test_nontrivial_problem_with_bad_guess(self):
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        bad_guess = [1, 2, 3, .5]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options, x0=bad_guess)
+        assert_equal(res.status, 6)
+
+    def test_redundant_constraints_with_guess(self):
+        A, b, c, _, _ = magic_square(3)
+        p = np.random.rand(*c.shape)
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning, "A_eq does not appear...")
+            sup.filter(RuntimeWarning, "invalid value encountered")
+            sup.filter(LinAlgWarning)
+            res = linprog(c, A_eq=A, b_eq=b, method=self.method)
+            res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x)
+            res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x)
+        _assert_success(res2, desired_fun=1.730550597)
+        assert_equal(res2.nit, 0)
+        _assert_success(res3)
+        assert_(res3.nit < res.nit)  # hot start reduces iterations
+
+
+class TestLinprogRSBland(LinprogRSTests):
+    options = {"pivot": "bland"}
+
+
+############################################
+# HiGHS-Simplex-Dual Option-Specific Tests #
+############################################
+
+
+class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests):
+    method = "highs-ds"
+    options = {}
+
+    def test_lad_regression(self):
+        '''
+        The scaled model should be optimal, i.e. not produce unscaled model
+        infeasible.  See https://github.com/ERGO-Code/HiGHS/issues/494.
+        '''
+        # Test to ensure gh-13610 is resolved (mismatch between HiGHS scaled
+        # and unscaled model statuses)
+        c, A_ub, b_ub, bnds = l1_regression_prob()
+        res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds,
+                      method=self.method, options=self.options)
+        assert_equal(res.status, 0)
+        assert_(res.x is not None)
+        assert_(np.all(res.slack > -1e-6))
+        assert_(np.all(res.x <= [np.inf if ub is None else ub
+                                 for lb, ub in bnds]))
+        assert_(np.all(res.x >= [-np.inf if lb is None else lb - 1e-7
+                                 for lb, ub in bnds]))
+
+
+###################################
+# HiGHS-IPM Option-Specific Tests #
+###################################
+
+
+class TestLinprogHiGHSIPM(LinprogHiGHSTests):
+    method = "highs-ipm"
+    options = {}
+
+
+###################################
+# HiGHS-MIP Option-Specific Tests #
+###################################
+
+
+class TestLinprogHiGHSMIP():
+    method = "highs"
+    options = {}
+
+    @pytest.mark.xfail(condition=(sys.maxsize < 2 ** 32 and
+                       platform.system() == "Linux"),
+                       run=False,
+                       reason="gh-16347")
+    def test_mip1(self):
+        # solve non-relaxed magic square problem (finally!)
+        # also check that values are all integers - they don't always
+        # come out of HiGHS that way
+        n = 4
+        A, b, c, numbers, M = magic_square(n)
+        bounds = [(0, 1)] * len(c)
+        integrality = [1] * len(c)
+
+        res = linprog(c=c*0, A_eq=A, b_eq=b, bounds=bounds,
+                      method=self.method, integrality=integrality)
+
+        s = (numbers.flatten() * res.x).reshape(n**2, n, n)
+        square = np.sum(s, axis=0)
+        np.testing.assert_allclose(square.sum(axis=0), M)
+        np.testing.assert_allclose(square.sum(axis=1), M)
+        np.testing.assert_allclose(np.diag(square).sum(), M)
+        np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M)
+
+        np.testing.assert_allclose(res.x, np.round(res.x), atol=1e-12)
+
+    def test_mip2(self):
+        # solve MIP with inequality constraints and all integer constraints
+        # source: slide 5,
+        # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf
+
+        # use all array inputs to test gh-16681 (integrality couldn't be array)
+        A_ub = np.array([[2, -2], [-8, 10]])
+        b_ub = np.array([-1, 13])
+        c = -np.array([1, 1])
+
+        bounds = np.array([(0, np.inf)] * len(c))
+        integrality = np.ones_like(c)
+
+        res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
+                      method=self.method, integrality=integrality)
+
+        np.testing.assert_allclose(res.x, [1, 2])
+        np.testing.assert_allclose(res.fun, -3)
+
+    def test_mip3(self):
+        # solve MIP with inequality constraints and all integer constraints
+        # source: https://en.wikipedia.org/wiki/Integer_programming#Example
+        A_ub = np.array([[-1, 1], [3, 2], [2, 3]])
+        b_ub = np.array([1, 12, 12])
+        c = -np.array([0, 1])
+
+        bounds = [(0, np.inf)] * len(c)
+        integrality = [1] * len(c)
+
+        res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
+                      method=self.method, integrality=integrality)
+
+        np.testing.assert_allclose(res.fun, -2)
+        # two optimal solutions possible, just need one of them
+        assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2])
+
+    def test_mip4(self):
+        # solve MIP with inequality constraints and only one integer constraint
+        # source: https://www.mathworks.com/help/optim/ug/intlinprog.html
+        A_ub = np.array([[-1, -2], [-4, -1], [2, 1]])
+        b_ub = np.array([14, -33, 20])
+        c = np.array([8, 1])
+
+        bounds = [(0, np.inf)] * len(c)
+        integrality = [0, 1]
+
+        res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, bounds=bounds,
+                      method=self.method, integrality=integrality)
+
+        np.testing.assert_allclose(res.x, [6.5, 7])
+        np.testing.assert_allclose(res.fun, 59)
+
+    def test_mip5(self):
+        # solve MIP with inequality and inequality constraints
+        # source: https://www.mathworks.com/help/optim/ug/intlinprog.html
+        A_ub = np.array([[1, 1, 1]])
+        b_ub = np.array([7])
+        A_eq = np.array([[4, 2, 1]])
+        b_eq = np.array([12])
+        c = np.array([-3, -2, -1])
+
+        bounds = [(0, np.inf), (0, np.inf), (0, 1)]
+        integrality = [0, 1, 0]
+
+        res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
+                      bounds=bounds, method=self.method,
+                      integrality=integrality)
+
+        np.testing.assert_allclose(res.x, [0, 6, 0])
+        np.testing.assert_allclose(res.fun, -12)
+
+        # gh-16897: these fields were not present, ensure that they are now
+        assert res.get("mip_node_count", None) is not None
+        assert res.get("mip_dual_bound", None) is not None
+        assert res.get("mip_gap", None) is not None
+
+    @pytest.mark.slow
+    @pytest.mark.timeout(120)  # prerelease_deps_coverage_64bit_blas job
+    def test_mip6(self):
+        # solve a larger MIP with only equality constraints
+        # source: https://www.mathworks.com/help/optim/ug/intlinprog.html
+        A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26],
+                         [39, 16, 22, 28, 26, 30, 23, 24],
+                         [18, 14, 29, 27, 30, 38, 26, 26],
+                         [41, 26, 28, 36, 18, 38, 16, 26]])
+        b_eq = np.array([7872, 10466, 11322, 12058])
+        c = np.array([2, 10, 13, 17, 7, 5, 7, 3])
+
+        bounds = [(0, np.inf)]*8
+        integrality = [1]*8
+
+        res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
+                      method=self.method, integrality=integrality)
+
+        np.testing.assert_allclose(res.fun, 1854)
+
+    @pytest.mark.xslow
+    def test_mip_rel_gap_passdown(self):
+        # MIP taken from test_mip6, solved with different values of mip_rel_gap
+        # solve a larger MIP with only equality constraints
+        # source: https://www.mathworks.com/help/optim/ug/intlinprog.html
+        A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26],
+                         [39, 16, 22, 28, 26, 30, 23, 24],
+                         [18, 14, 29, 27, 30, 38, 26, 26],
+                         [41, 26, 28, 36, 18, 38, 16, 26]])
+        b_eq = np.array([7872, 10466, 11322, 12058])
+        c = np.array([2, 10, 13, 17, 7, 5, 7, 3])
+
+        bounds = [(0, np.inf)]*8
+        integrality = [1]*8
+
+        mip_rel_gaps = [0.5, 0.25, 0.01, 0.001]
+        sol_mip_gaps = []
+        for mip_rel_gap in mip_rel_gaps:
+            res = linprog(c=c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
+                          bounds=bounds, method=self.method,
+                          integrality=integrality,
+                          options={"mip_rel_gap": mip_rel_gap})
+            final_mip_gap = res["mip_gap"]
+            # assert that the solution actually has mip_gap lower than the
+            # required mip_rel_gap supplied
+            assert final_mip_gap <= mip_rel_gap
+            sol_mip_gaps.append(final_mip_gap)
+
+        # make sure that the mip_rel_gap parameter is actually doing something
+        # check that differences between solution gaps are declining
+        # monotonically with the mip_rel_gap parameter. np.diff does
+        # x[i+1] - x[i], so flip the array before differencing to get
+        # what should be a positive, monotone decreasing series of solution
+        # gaps
+        gap_diffs = np.diff(np.flip(sol_mip_gaps))
+        assert np.all(gap_diffs >= 0)
+        assert not np.all(gap_diffs == 0)
+
+
+###########################
+# Autoscale-Specific Tests#
+###########################
+
+
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+class AutoscaleTests:
+    options = {"autoscale": True}
+
+    test_bug_6139 = LinprogCommonTests.test_bug_6139
+    test_bug_6690 = LinprogCommonTests.test_bug_6690
+    test_bug_7237 = LinprogCommonTests.test_bug_7237
+
+
+class TestAutoscaleIP(AutoscaleTests):
+    method = "interior-point"
+
+    def test_bug_6139(self):
+        self.options['tol'] = 1e-10
+        return AutoscaleTests.test_bug_6139(self)
+
+
+class TestAutoscaleSimplex(AutoscaleTests):
+    method = "simplex"
+
+
+class TestAutoscaleRS(AutoscaleTests):
+    method = "revised simplex"
+
+    def test_nontrivial_problem_with_guess(self):
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options, x0=x_star)
+        _assert_success(res, desired_fun=f_star, desired_x=x_star)
+        assert_equal(res.nit, 0)
+
+    def test_nontrivial_problem_with_bad_guess(self):
+        c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
+        bad_guess = [1, 2, 3, .5]
+        res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
+                      method=self.method, options=self.options, x0=bad_guess)
+        assert_equal(res.status, 6)
+
+
+###########################
+# Redundancy Removal Tests#
+###########################
+
+
+@pytest.mark.filterwarnings("ignore::DeprecationWarning")
+class RRTests:
+    method = "interior-point"
+    LCT = LinprogCommonTests
+    # these are a few of the existing tests that have redundancy
+    test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility
+    test_bug_10349 = LCT.test_bug_10349
+    test_bug_7044 = LCT.test_bug_7044
+    test_NFLC = LCT.test_network_flow_limited_capacity
+    test_enzo_example_b = LCT.test_enzo_example_b
+
+
+class TestRRSVD(RRTests):
+    options = {"rr_method": "SVD"}
+
+
+class TestRRPivot(RRTests):
+    options = {"rr_method": "pivot"}
+
+
+class TestRRID(RRTests):
+    options = {"rr_method": "ID"}
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_common.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_common.py
new file mode 100644
index 00000000..650deedc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_common.py
@@ -0,0 +1,297 @@
+from numpy.testing import assert_, assert_allclose, assert_equal
+from pytest import raises as assert_raises
+import numpy as np
+
+from scipy.optimize._lsq.common import (
+    step_size_to_bound, find_active_constraints, make_strictly_feasible,
+    CL_scaling_vector, intersect_trust_region, build_quadratic_1d,
+    minimize_quadratic_1d, evaluate_quadratic, reflective_transformation,
+    left_multiplied_operator, right_multiplied_operator)
+
+
+class TestBounds:
+    def test_step_size_to_bounds(self):
+        lb = np.array([-1.0, 2.5, 10.0])
+        ub = np.array([1.0, 5.0, 100.0])
+        x = np.array([0.0, 2.5, 12.0])
+
+        s = np.array([0.1, 0.0, 0.0])
+        step, hits = step_size_to_bound(x, s, lb, ub)
+        assert_equal(step, 10)
+        assert_equal(hits, [1, 0, 0])
+
+        s = np.array([0.01, 0.05, -1.0])
+        step, hits = step_size_to_bound(x, s, lb, ub)
+        assert_equal(step, 2)
+        assert_equal(hits, [0, 0, -1])
+
+        s = np.array([10.0, -0.0001, 100.0])
+        step, hits = step_size_to_bound(x, s, lb, ub)
+        assert_equal(step, np.array(-0))
+        assert_equal(hits, [0, -1, 0])
+
+        s = np.array([1.0, 0.5, -2.0])
+        step, hits = step_size_to_bound(x, s, lb, ub)
+        assert_equal(step, 1.0)
+        assert_equal(hits, [1, 0, -1])
+
+        s = np.zeros(3)
+        step, hits = step_size_to_bound(x, s, lb, ub)
+        assert_equal(step, np.inf)
+        assert_equal(hits, [0, 0, 0])
+
+    def test_find_active_constraints(self):
+        lb = np.array([0.0, -10.0, 1.0])
+        ub = np.array([1.0, 0.0, 100.0])
+
+        x = np.array([0.5, -5.0, 2.0])
+        active = find_active_constraints(x, lb, ub)
+        assert_equal(active, [0, 0, 0])
+
+        x = np.array([0.0, 0.0, 10.0])
+        active = find_active_constraints(x, lb, ub)
+        assert_equal(active, [-1, 1, 0])
+
+        active = find_active_constraints(x, lb, ub, rtol=0)
+        assert_equal(active, [-1, 1, 0])
+
+        x = np.array([1e-9, -1e-8, 100 - 1e-9])
+        active = find_active_constraints(x, lb, ub)
+        assert_equal(active, [0, 0, 1])
+
+        active = find_active_constraints(x, lb, ub, rtol=1.5e-9)
+        assert_equal(active, [-1, 0, 1])
+
+        lb = np.array([1.0, -np.inf, -np.inf])
+        ub = np.array([np.inf, 10.0, np.inf])
+
+        x = np.ones(3)
+        active = find_active_constraints(x, lb, ub)
+        assert_equal(active, [-1, 0, 0])
+
+        # Handles out-of-bound cases.
+        x = np.array([0.0, 11.0, 0.0])
+        active = find_active_constraints(x, lb, ub)
+        assert_equal(active, [-1, 1, 0])
+
+        active = find_active_constraints(x, lb, ub, rtol=0)
+        assert_equal(active, [-1, 1, 0])
+
+    def test_make_strictly_feasible(self):
+        lb = np.array([-0.5, -0.8, 2.0])
+        ub = np.array([0.8, 1.0, 3.0])
+
+        x = np.array([-0.5, 0.0, 2 + 1e-10])
+
+        x_new = make_strictly_feasible(x, lb, ub, rstep=0)
+        assert_(x_new[0] > -0.5)
+        assert_equal(x_new[1:], x[1:])
+
+        x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4)
+        assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)])
+
+        x = np.array([-0.5, -1, 3.1])
+        x_new = make_strictly_feasible(x, lb, ub)
+        assert_(np.all((x_new >= lb) & (x_new <= ub)))
+
+        x_new = make_strictly_feasible(x, lb, ub, rstep=0)
+        assert_(np.all((x_new >= lb) & (x_new <= ub)))
+
+        lb = np.array([-1, 100.0])
+        ub = np.array([1, 100.0 + 1e-10])
+        x = np.array([0, 100.0])
+        x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8)
+        assert_equal(x_new, [0, 100.0 + 0.5e-10])
+
+    def test_scaling_vector(self):
+        lb = np.array([-np.inf, -5.0, 1.0, -np.inf])
+        ub = np.array([1.0, np.inf, 10.0, np.inf])
+        x = np.array([0.5, 2.0, 5.0, 0.0])
+        g = np.array([1.0, 0.1, -10.0, 0.0])
+        v, dv = CL_scaling_vector(x, g, lb, ub)
+        assert_equal(v, [1.0, 7.0, 5.0, 1.0])
+        assert_equal(dv, [0.0, 1.0, -1.0, 0.0])
+
+
+class TestQuadraticFunction:
+    def setup_method(self):
+        self.J = np.array([
+            [0.1, 0.2],
+            [-1.0, 1.0],
+            [0.5, 0.2]])
+        self.g = np.array([0.8, -2.0])
+        self.diag = np.array([1.0, 2.0])
+
+    def test_build_quadratic_1d(self):
+        s = np.zeros(2)
+        a, b = build_quadratic_1d(self.J, self.g, s)
+        assert_equal(a, 0)
+        assert_equal(b, 0)
+
+        a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
+        assert_equal(a, 0)
+        assert_equal(b, 0)
+
+        s = np.array([1.0, -1.0])
+        a, b = build_quadratic_1d(self.J, self.g, s)
+        assert_equal(a, 2.05)
+        assert_equal(b, 2.8)
+
+        a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
+        assert_equal(a, 3.55)
+        assert_equal(b, 2.8)
+
+        s0 = np.array([0.5, 0.5])
+        a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0)
+        assert_equal(a, 3.55)
+        assert_allclose(b, 2.39)
+        assert_allclose(c, -0.1525)
+
+    def test_minimize_quadratic_1d(self):
+        a = 5
+        b = -1
+
+        t, y = minimize_quadratic_1d(a, b, 1, 2)
+        assert_equal(t, 1)
+        assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
+
+        t, y = minimize_quadratic_1d(a, b, -2, -1)
+        assert_equal(t, -1)
+        assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
+
+        t, y = minimize_quadratic_1d(a, b, -1, 1)
+        assert_equal(t, 0.1)
+        assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
+
+        c = 10
+        t, y = minimize_quadratic_1d(a, b, -1, 1, c=c)
+        assert_equal(t, 0.1)
+        assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15)
+
+        t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c)
+        assert_equal(t, 0.1)
+        assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
+
+        t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c)
+        assert_equal(t, 0.1)
+        assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
+
+        t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c)
+        assert_equal(t, 0)
+        assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
+
+        a = -1
+        b = 0.2
+        t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf)
+        assert_equal(y, -np.inf)
+
+        t, y = minimize_quadratic_1d(a, b, 0, np.inf)
+        assert_equal(t, np.inf)
+        assert_equal(y, -np.inf)
+
+        t, y = minimize_quadratic_1d(a, b, -np.inf, 0)
+        assert_equal(t, -np.inf)
+        assert_equal(y, -np.inf)
+
+    def test_evaluate_quadratic(self):
+        s = np.array([1.0, -1.0])
+
+        value = evaluate_quadratic(self.J, self.g, s)
+        assert_equal(value, 4.85)
+
+        value = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
+        assert_equal(value, 6.35)
+
+        s = np.array([[1.0, -1.0],
+                     [1.0, 1.0],
+                     [0.0, 0.0]])
+
+        values = evaluate_quadratic(self.J, self.g, s)
+        assert_allclose(values, [4.85, -0.91, 0.0])
+
+        values = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
+        assert_allclose(values, [6.35, 0.59, 0.0])
+
+
+class TestTrustRegion:
+    def test_intersect(self):
+        Delta = 1.0
+
+        x = np.zeros(3)
+        s = np.array([1.0, 0.0, 0.0])
+        t_neg, t_pos = intersect_trust_region(x, s, Delta)
+        assert_equal(t_neg, -1)
+        assert_equal(t_pos, 1)
+
+        s = np.array([-1.0, 1.0, -1.0])
+        t_neg, t_pos = intersect_trust_region(x, s, Delta)
+        assert_allclose(t_neg, -3**-0.5)
+        assert_allclose(t_pos, 3**-0.5)
+
+        x = np.array([0.5, -0.5, 0])
+        s = np.array([0, 0, 1.0])
+        t_neg, t_pos = intersect_trust_region(x, s, Delta)
+        assert_allclose(t_neg, -2**-0.5)
+        assert_allclose(t_pos, 2**-0.5)
+
+        x = np.ones(3)
+        assert_raises(ValueError, intersect_trust_region, x, s, Delta)
+
+        x = np.zeros(3)
+        s = np.zeros(3)
+        assert_raises(ValueError, intersect_trust_region, x, s, Delta)
+
+
+def test_reflective_transformation():
+    lb = np.array([-1, -2], dtype=float)
+    ub = np.array([5, 3], dtype=float)
+
+    y = np.array([0, 0])
+    x, g = reflective_transformation(y, lb, ub)
+    assert_equal(x, y)
+    assert_equal(g, np.ones(2))
+
+    y = np.array([-4, 4], dtype=float)
+
+    x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf]))
+    assert_equal(x, [2, 4])
+    assert_equal(g, [-1, 1])
+
+    x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub)
+    assert_equal(x, [-4, 2])
+    assert_equal(g, [1, -1])
+
+    x, g = reflective_transformation(y, lb, ub)
+    assert_equal(x, [2, 2])
+    assert_equal(g, [-1, -1])
+
+    lb = np.array([-np.inf, -2])
+    ub = np.array([5, np.inf])
+    y = np.array([10, 10], dtype=float)
+    x, g = reflective_transformation(y, lb, ub)
+    assert_equal(x, [0, 10])
+    assert_equal(g, [-1, 1])
+
+
+def test_linear_operators():
+    A = np.arange(6).reshape((3, 2))
+
+    d_left = np.array([-1, 2, 5])
+    DA = np.diag(d_left).dot(A)
+    J_left = left_multiplied_operator(A, d_left)
+
+    d_right = np.array([5, 10])
+    AD = A.dot(np.diag(d_right))
+    J_right = right_multiplied_operator(A, d_right)
+
+    x = np.array([-2, 3])
+    X = -2 * np.arange(2, 8).reshape((2, 3))
+    xt = np.array([0, -2, 15])
+
+    assert_allclose(DA.dot(x), J_left.matvec(x))
+    assert_allclose(DA.dot(X), J_left.matmat(X))
+    assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt))
+
+    assert_allclose(AD.dot(x), J_right.matvec(x))
+    assert_allclose(AD.dot(X), J_right.matmat(X))
+    assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_linear.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_linear.py
new file mode 100644
index 00000000..5858fde0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_lsq_linear.py
@@ -0,0 +1,269 @@
+import pytest
+
+import numpy as np
+from numpy.linalg import lstsq
+from numpy.testing import assert_allclose, assert_equal, assert_
+
+from scipy.sparse import rand, coo_matrix
+from scipy.sparse.linalg import aslinearoperator
+from scipy.optimize import lsq_linear
+
+
+A = np.array([
+    [0.171, -0.057],
+    [-0.049, -0.248],
+    [-0.166, 0.054],
+])
+b = np.array([0.074, 1.014, -0.383])
+
+
+class BaseMixin:
+    def setup_method(self):
+        self.rnd = np.random.RandomState(0)
+
+    def test_dense_no_bounds(self):
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
+            assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
+            assert_allclose(res.x, res.unbounded_sol[0])
+
+    def test_dense_bounds(self):
+        # Solutions for comparison are taken from MATLAB.
+        lb = np.array([-1, -10])
+        ub = np.array([1, 0])
+        unbounded_sol = lstsq(A, b, rcond=-1)[0]
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (lb, ub), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
+            assert_allclose(res.unbounded_sol[0], unbounded_sol)
+
+        lb = np.array([0.0, -np.inf])
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (lb, np.inf), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
+                            atol=1e-6)
+            assert_allclose(res.unbounded_sol[0], unbounded_sol)
+
+        lb = np.array([-1, 0])
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (lb, np.inf), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.x, np.array([0.448427311733504, 0]),
+                            atol=1e-15)
+            assert_allclose(res.unbounded_sol[0], unbounded_sol)
+
+        ub = np.array([np.inf, -5])
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.x, np.array([-0.105560998682388, -5]))
+            assert_allclose(res.unbounded_sol[0], unbounded_sol)
+
+        ub = np.array([-1, np.inf])
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.x, np.array([-1, -4.181102129483254]))
+            assert_allclose(res.unbounded_sol[0], unbounded_sol)
+
+        lb = np.array([0, -4])
+        ub = np.array([1, 0])
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (lb, ub), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.x, np.array([0.005236663400791, -4]))
+            assert_allclose(res.unbounded_sol[0], unbounded_sol)
+
+    def test_np_matrix(self):
+        # gh-10711
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(PendingDeprecationWarning)
+            A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]])
+        k = np.array([20, 15])
+        s_t = lsq_linear(A, k)
+
+    def test_dense_rank_deficient(self):
+        A = np.array([[-0.307, -0.184]])
+        b = np.array([0.773])
+        lb = [-0.1, -0.1]
+        ub = [0.1, 0.1]
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (lb, ub), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.x, [-0.1, -0.1])
+            assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
+
+        A = np.array([
+            [0.334, 0.668],
+            [-0.516, -1.032],
+            [0.192, 0.384],
+        ])
+        b = np.array([-1.436, 0.135, 0.909])
+        lb = [0, -1]
+        ub = [1, -0.5]
+        for lsq_solver in self.lsq_solvers:
+            res = lsq_linear(A, b, (lb, ub), method=self.method,
+                             lsq_solver=lsq_solver)
+            assert_allclose(res.optimality, 0, atol=1e-11)
+            assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
+
+    def test_full_result(self):
+        lb = np.array([0, -4])
+        ub = np.array([1, 0])
+        res = lsq_linear(A, b, (lb, ub), method=self.method)
+
+        assert_allclose(res.x, [0.005236663400791, -4])
+        assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
+
+        r = A.dot(res.x) - b
+        assert_allclose(res.cost, 0.5 * np.dot(r, r))
+        assert_allclose(res.fun, r)
+
+        assert_allclose(res.optimality, 0.0, atol=1e-12)
+        assert_equal(res.active_mask, [0, -1])
+        assert_(res.nit < 15)
+        assert_(res.status == 1 or res.status == 3)
+        assert_(isinstance(res.message, str))
+        assert_(res.success)
+
+    # This is a test for issue #9982.
+    def test_almost_singular(self):
+        A = np.array(
+            [[0.8854232310355122, 0.0365312146937765, 0.0365312146836789],
+             [0.3742460132129041, 0.0130523214078376, 0.0130523214077873],
+             [0.9680633871281361, 0.0319366128718639, 0.0319366128718388]])
+
+        b = np.array(
+            [0.0055029366538097, 0.0026677442422208, 0.0066612514782381])
+
+        result = lsq_linear(A, b, method=self.method)
+        assert_(result.cost < 1.1e-8)
+
+    def test_large_rank_deficient(self):
+        np.random.seed(0)
+        n, m = np.sort(np.random.randint(2, 1000, size=2))
+        m *= 2   # make m >> n
+        A = 1.0 * np.random.randint(-99, 99, size=[m, n])
+        b = 1.0 * np.random.randint(-99, 99, size=[m])
+        bounds = 1.0 * np.sort(np.random.randint(-99, 99, size=(2, n)), axis=0)
+        bounds[1, :] += 1.0  # ensure up > lb
+
+        # Make the A matrix strongly rank deficient by replicating some columns
+        w = np.random.choice(n, n)  # Select random columns with duplicates
+        A = A[:, w]
+
+        x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x
+        x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x
+
+        cost_bvls = np.sum((A @ x_bvls - b)**2)
+        cost_trf = np.sum((A @ x_trf - b)**2)
+
+        assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10)
+
+    def test_convergence_small_matrix(self):
+        A = np.array([[49.0, 41.0, -32.0],
+                      [-19.0, -32.0, -8.0],
+                      [-13.0, 10.0, 69.0]])
+        b = np.array([-41.0, -90.0, 47.0])
+        bounds = np.array([[31.0, -44.0, 26.0],
+                           [54.0, -32.0, 28.0]])
+
+        x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x
+        x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x
+
+        cost_bvls = np.sum((A @ x_bvls - b)**2)
+        cost_trf = np.sum((A @ x_trf - b)**2)
+
+        assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10)
+
+
+class SparseMixin:
+    def test_sparse_and_LinearOperator(self):
+        m = 5000
+        n = 1000
+        A = rand(m, n, random_state=0)
+        b = self.rnd.randn(m)
+        res = lsq_linear(A, b)
+        assert_allclose(res.optimality, 0, atol=1e-6)
+
+        A = aslinearoperator(A)
+        res = lsq_linear(A, b)
+        assert_allclose(res.optimality, 0, atol=1e-6)
+
+    def test_sparse_bounds(self):
+        m = 5000
+        n = 1000
+        A = rand(m, n, random_state=0)
+        b = self.rnd.randn(m)
+        lb = self.rnd.randn(n)
+        ub = lb + 1
+        res = lsq_linear(A, b, (lb, ub))
+        assert_allclose(res.optimality, 0.0, atol=1e-6)
+
+        res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13,
+                         lsmr_maxiter=1500)
+        assert_allclose(res.optimality, 0.0, atol=1e-6)
+
+        res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
+        assert_allclose(res.optimality, 0.0, atol=1e-6)
+
+    def test_sparse_ill_conditioned(self):
+        # Sparse matrix with condition number of ~4 million
+        data = np.array([1., 1., 1., 1. + 1e-6, 1.])
+        row = np.array([0, 0, 1, 2, 2])
+        col = np.array([0, 2, 1, 0, 2])
+        A = coo_matrix((data, (row, col)), shape=(3, 3))
+
+        # Get the exact solution
+        exact_sol = lsq_linear(A.toarray(), b, lsq_solver='exact')
+
+        # Default lsmr arguments should not fully converge the solution
+        default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr')
+        with pytest.raises(AssertionError, match=""):
+            assert_allclose(exact_sol.x, default_lsmr_sol.x)
+
+        # By increasing the maximum lsmr iters, it will converge
+        conv_lsmr = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=10)
+        assert_allclose(exact_sol.x, conv_lsmr.x)
+
+
+class TestTRF(BaseMixin, SparseMixin):
+    method = 'trf'
+    lsq_solvers = ['exact', 'lsmr']
+
+
+class TestBVLS(BaseMixin):
+    method = 'bvls'
+    lsq_solvers = ['exact']
+
+
+class TestErrorChecking:
+    def test_option_lsmr_tol(self):
+        # Should work with a positive float, string equal to 'auto', or None
+        _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2)
+        _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto')
+        _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None)
+
+        # Should raise error with negative float, strings
+        # other than 'auto', and integers
+        err_message = "`lsmr_tol` must be None, 'auto', or positive float."
+        with pytest.raises(ValueError, match=err_message):
+            _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1)
+        with pytest.raises(ValueError, match=err_message):
+            _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo')
+        with pytest.raises(ValueError, match=err_message):
+            _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1)
+
+    def test_option_lsmr_maxiter(self):
+        # Should work with positive integers or None
+        _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1)
+        _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None)
+
+        # Should raise error with 0 or negative max iter
+        err_message = "`lsmr_maxiter` must be None or positive integer."
+        with pytest.raises(ValueError, match=err_message):
+            _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0)
+        with pytest.raises(ValueError, match=err_message):
+            _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_milp.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_milp.py
new file mode 100644
index 00000000..51996642
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_milp.py
@@ -0,0 +1,370 @@
+"""
+Unit test for Mixed Integer Linear Programming
+"""
+import re
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+import pytest
+
+from .test_linprog import magic_square
+from scipy.optimize import milp, Bounds, LinearConstraint
+
+
+def test_milp_iv():
+
+    message = "`c` must be a one-dimensional array of finite numbers with"
+    with pytest.raises(ValueError, match=message):
+        milp(np.zeros((3, 4)))
+    with pytest.raises(ValueError, match=message):
+        milp([])
+    with pytest.raises(ValueError, match=message):
+        milp(None)
+
+    message = "`bounds` must be convertible into an instance of..."
+    with pytest.raises(ValueError, match=message):
+        milp(1, bounds=10)
+
+    message = "`constraints` (or each element within `constraints`) must be"
+    with pytest.raises(ValueError, match=re.escape(message)):
+        milp(1, constraints=10)
+    with pytest.raises(ValueError, match=re.escape(message)):
+        milp(np.zeros(3), constraints=([[1, 2, 3]], [2, 3], [2, 3]))
+
+    message = "The shape of `A` must be (len(b_l), len(c))."
+    with pytest.raises(ValueError, match=re.escape(message)):
+        milp(np.zeros(3), constraints=([[1, 2]], [2], [2]))
+
+    message = ("`integrality` must contain integers 0-3 and be broadcastable "
+               "to `c.shape`.")
+    with pytest.raises(ValueError, match=message):
+        milp([1, 2, 3], integrality=[1, 2])
+    with pytest.raises(ValueError, match=message):
+        milp([1, 2, 3], integrality=[1, 5, 3])
+
+    message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
+    with pytest.raises(ValueError, match=message):
+        milp([1, 2, 3], bounds=([1, 2], [3, 4, 5]))
+    with pytest.raises(ValueError, match=message):
+        milp([1, 2, 3], bounds=([1, 2, 3], [4, 5]))
+
+    message = "`bounds.lb` and `bounds.ub` must contain reals and..."
+    with pytest.raises(ValueError, match=message):
+        milp([1, 2, 3], bounds=([1, 2], [3, 4]))
+    with pytest.raises(ValueError, match=message):
+        milp([1, 2, 3], bounds=([1, 2, 3], ["3+4", 4, 5]))
+    with pytest.raises(ValueError, match=message):
+        milp([1, 2, 3], bounds=([1, 2, 3], [set(), 4, 5]))
+
+
+@pytest.mark.xfail(run=False,
+                   reason="Needs to be fixed in `_highs_wrapper`")
+def test_milp_options(capsys):
+    # run=False now because of gh-16347
+    message = "Unrecognized options detected: {'ekki'}..."
+    options = {'ekki': True}
+    with pytest.warns(RuntimeWarning, match=message):
+        milp(1, options=options)
+
+    A, b, c, numbers, M = magic_square(3)
+    options = {"disp": True, "presolve": False, "time_limit": 0.05}
+    res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1,
+               options=options)
+
+    captured = capsys.readouterr()
+    assert "Presolve is switched off" in captured.out
+    assert "Time Limit Reached" in captured.out
+    assert not res.success
+
+
+def test_result():
+    A, b, c, numbers, M = magic_square(3)
+    res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1)
+    assert res.status == 0
+    assert res.success
+    msg = "Optimization terminated successfully. (HiGHS Status 7:"
+    assert res.message.startswith(msg)
+    assert isinstance(res.x, np.ndarray)
+    assert isinstance(res.fun, float)
+    assert isinstance(res.mip_node_count, int)
+    assert isinstance(res.mip_dual_bound, float)
+    assert isinstance(res.mip_gap, float)
+
+    A, b, c, numbers, M = magic_square(6)
+    res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1,
+               options={'time_limit': 0.05})
+    assert res.status == 1
+    assert not res.success
+    msg = "Time limit reached. (HiGHS Status 13:"
+    assert res.message.startswith(msg)
+    assert (res.fun is res.mip_dual_bound is res.mip_gap
+            is res.mip_node_count is res.x is None)
+
+    res = milp(1, bounds=(1, -1))
+    assert res.status == 2
+    assert not res.success
+    msg = "The problem is infeasible. (HiGHS Status 8:"
+    assert res.message.startswith(msg)
+    assert (res.fun is res.mip_dual_bound is res.mip_gap
+            is res.mip_node_count is res.x is None)
+
+    res = milp(-1)
+    assert res.status == 3
+    assert not res.success
+    msg = "The problem is unbounded. (HiGHS Status 10:"
+    assert res.message.startswith(msg)
+    assert (res.fun is res.mip_dual_bound is res.mip_gap
+            is res.mip_node_count is res.x is None)
+
+
+def test_milp_optional_args():
+    # check that arguments other than `c` are indeed optional
+    res = milp(1)
+    assert res.fun == 0
+    assert_array_equal(res.x, [0])
+
+
+def test_milp_1():
+    # solve magic square problem
+    n = 3
+    A, b, c, numbers, M = magic_square(n)
+    res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1)
+
+    # check that solution is a magic square
+    x = np.round(res.x)
+    s = (numbers.flatten() * x).reshape(n**2, n, n)
+    square = np.sum(s, axis=0)
+    np.testing.assert_allclose(square.sum(axis=0), M)
+    np.testing.assert_allclose(square.sum(axis=1), M)
+    np.testing.assert_allclose(np.diag(square).sum(), M)
+    np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M)
+
+
+def test_milp_2():
+    # solve MIP with inequality constraints and all integer constraints
+    # source: slide 5,
+    # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf
+    # also check that `milp` accepts all valid ways of specifying constraints
+    c = -np.ones(2)
+    A = [[-2, 2], [-8, 10]]
+    b_l = [1, -np.inf]
+    b_u = [np.inf, 13]
+    linear_constraint = LinearConstraint(A, b_l, b_u)
+
+    # solve original problem
+    res1 = milp(c=c, constraints=(A, b_l, b_u), integrality=True)
+    res2 = milp(c=c, constraints=linear_constraint, integrality=True)
+    res3 = milp(c=c, constraints=[(A, b_l, b_u)], integrality=True)
+    res4 = milp(c=c, constraints=[linear_constraint], integrality=True)
+    res5 = milp(c=c, integrality=True,
+                constraints=[(A[:1], b_l[:1], b_u[:1]),
+                             (A[1:], b_l[1:], b_u[1:])])
+    res6 = milp(c=c, integrality=True,
+                constraints=[LinearConstraint(A[:1], b_l[:1], b_u[:1]),
+                             LinearConstraint(A[1:], b_l[1:], b_u[1:])])
+    res7 = milp(c=c, integrality=True,
+                constraints=[(A[:1], b_l[:1], b_u[:1]),
+                             LinearConstraint(A[1:], b_l[1:], b_u[1:])])
+    xs = np.array([res1.x, res2.x, res3.x, res4.x, res5.x, res6.x, res7.x])
+    funs = np.array([res1.fun, res2.fun, res3.fun,
+                     res4.fun, res5.fun, res6.fun, res7.fun])
+    np.testing.assert_allclose(xs, np.broadcast_to([1, 2], xs.shape))
+    np.testing.assert_allclose(funs, -3)
+
+    # solve relaxed problem
+    res = milp(c=c, constraints=(A, b_l, b_u))
+    np.testing.assert_allclose(res.x, [4, 4.5])
+    np.testing.assert_allclose(res.fun, -8.5)
+
+
+def test_milp_3():
+    # solve MIP with inequality constraints and all integer constraints
+    # source: https://en.wikipedia.org/wiki/Integer_programming#Example
+    c = [0, -1]
+    A = [[-1, 1], [3, 2], [2, 3]]
+    b_u = [1, 12, 12]
+    b_l = np.full_like(b_u, -np.inf, dtype=np.float64)
+    constraints = LinearConstraint(A, b_l, b_u)
+
+    integrality = np.ones_like(c)
+
+    # solve original problem
+    res = milp(c=c, constraints=constraints, integrality=integrality)
+    assert_allclose(res.fun, -2)
+    # two optimal solutions possible, just need one of them
+    assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2])
+
+    # solve relaxed problem
+    res = milp(c=c, constraints=constraints)
+    assert_allclose(res.fun, -2.8)
+    assert_allclose(res.x, [1.8, 2.8])
+
+
+def test_milp_4():
+    # solve MIP with inequality constraints and only one integer constraint
+    # source: https://www.mathworks.com/help/optim/ug/intlinprog.html
+    c = [8, 1]
+    integrality = [0, 1]
+    A = [[1, 2], [-4, -1], [2, 1]]
+    b_l = [-14, -np.inf, -np.inf]
+    b_u = [np.inf, -33, 20]
+    constraints = LinearConstraint(A, b_l, b_u)
+    bounds = Bounds(-np.inf, np.inf)
+
+    res = milp(c, integrality=integrality, bounds=bounds,
+               constraints=constraints)
+    assert_allclose(res.fun, 59)
+    assert_allclose(res.x, [6.5, 7])
+
+
+def test_milp_5():
+    # solve MIP with inequality and equality constraints
+    # source: https://www.mathworks.com/help/optim/ug/intlinprog.html
+    c = [-3, -2, -1]
+    integrality = [0, 0, 1]
+    lb = [0, 0, 0]
+    ub = [np.inf, np.inf, 1]
+    bounds = Bounds(lb, ub)
+    A = [[1, 1, 1], [4, 2, 1]]
+    b_l = [-np.inf, 12]
+    b_u = [7, 12]
+    constraints = LinearConstraint(A, b_l, b_u)
+
+    res = milp(c, integrality=integrality, bounds=bounds,
+               constraints=constraints)
+    # there are multiple solutions
+    assert_allclose(res.fun, -12)
+
+
+@pytest.mark.slow
+@pytest.mark.timeout(120)  # prerelease_deps_coverage_64bit_blas job
+def test_milp_6():
+    # solve a larger MIP with only equality constraints
+    # source: https://www.mathworks.com/help/optim/ug/intlinprog.html
+    integrality = 1
+    A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26],
+                     [39, 16, 22, 28, 26, 30, 23, 24],
+                     [18, 14, 29, 27, 30, 38, 26, 26],
+                     [41, 26, 28, 36, 18, 38, 16, 26]])
+    b_eq = np.array([7872, 10466, 11322, 12058])
+    c = np.array([2, 10, 13, 17, 7, 5, 7, 3])
+
+    res = milp(c=c, constraints=(A_eq, b_eq, b_eq), integrality=integrality)
+
+    np.testing.assert_allclose(res.fun, 1854)
+
+
+def test_infeasible_prob_16609():
+    # Ensure presolve does not mark trivially infeasible problems
+    # as Optimal -- see gh-16609
+    c = [1.0, 0.0]
+    integrality = [0, 1]
+
+    lb = [0, -np.inf]
+    ub = [np.inf, np.inf]
+    bounds = Bounds(lb, ub)
+
+    A_eq = [[0.0, 1.0]]
+    b_eq = [0.5]
+    constraints = LinearConstraint(A_eq, b_eq, b_eq)
+
+    res = milp(c, integrality=integrality, bounds=bounds,
+               constraints=constraints)
+    np.testing.assert_equal(res.status, 2)
+
+
+_msg_time = "Time limit reached. (HiGHS Status 13:"
+_msg_iter = "Iteration limit reached. (HiGHS Status 14:"
+
+
+@pytest.mark.skipif(np.intp(0).itemsize < 8,
+                    reason="Unhandled 32-bit GCC FP bug")
+@pytest.mark.slow
+@pytest.mark.timeout(360)
+@pytest.mark.parametrize(["options", "msg"], [({"time_limit": 10}, _msg_time),
+                                              ({"node_limit": 1}, _msg_iter)])
+def test_milp_timeout_16545(options, msg):
+    # Ensure solution is not thrown away if MILP solver times out
+    # -- see gh-16545
+    rng = np.random.default_rng(5123833489170494244)
+    A = rng.integers(0, 5, size=(100, 100))
+    b_lb = np.full(100, fill_value=-np.inf)
+    b_ub = np.full(100, fill_value=25)
+    constraints = LinearConstraint(A, b_lb, b_ub)
+    variable_lb = np.zeros(100)
+    variable_ub = np.ones(100)
+    variable_bounds = Bounds(variable_lb, variable_ub)
+    integrality = np.ones(100)
+    c_vector = -np.ones(100)
+    res = milp(
+        c_vector,
+        integrality=integrality,
+        bounds=variable_bounds,
+        constraints=constraints,
+        options=options,
+    )
+
+    assert res.message.startswith(msg)
+    assert res["x"] is not None
+
+    # ensure solution is feasible
+    x = res["x"]
+    tol = 1e-8  # sometimes needed due to finite numerical precision
+    assert np.all(b_lb - tol <= A @ x) and np.all(A @ x <= b_ub + tol)
+    assert np.all(variable_lb - tol <= x) and np.all(x <= variable_ub + tol)
+    assert np.allclose(x, np.round(x))
+
+
+def test_three_constraints_16878():
+    # `milp` failed when exactly three constraints were passed
+    # Ensure that this is no longer the case.
+    rng = np.random.default_rng(5123833489170494244)
+    A = rng.integers(0, 5, size=(6, 6))
+    bl = np.full(6, fill_value=-np.inf)
+    bu = np.full(6, fill_value=10)
+    constraints = [LinearConstraint(A[:2], bl[:2], bu[:2]),
+                   LinearConstraint(A[2:4], bl[2:4], bu[2:4]),
+                   LinearConstraint(A[4:], bl[4:], bu[4:])]
+    constraints2 = [(A[:2], bl[:2], bu[:2]),
+                    (A[2:4], bl[2:4], bu[2:4]),
+                    (A[4:], bl[4:], bu[4:])]
+    lb = np.zeros(6)
+    ub = np.ones(6)
+    variable_bounds = Bounds(lb, ub)
+    c = -np.ones(6)
+    res1 = milp(c, bounds=variable_bounds, constraints=constraints)
+    res2 = milp(c, bounds=variable_bounds, constraints=constraints2)
+    ref = milp(c, bounds=variable_bounds, constraints=(A, bl, bu))
+    assert res1.success and res2.success
+    assert_allclose(res1.x, ref.x)
+    assert_allclose(res2.x, ref.x)
+
+
+@pytest.mark.xslow
+def test_mip_rel_gap_passdown():
+    # Solve problem with decreasing mip_gap to make sure mip_rel_gap decreases
+    # Adapted from test_linprog::TestLinprogHiGHSMIP::test_mip_rel_gap_passdown
+    # MIP taken from test_mip_6 above
+    A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26],
+                     [39, 16, 22, 28, 26, 30, 23, 24],
+                     [18, 14, 29, 27, 30, 38, 26, 26],
+                     [41, 26, 28, 36, 18, 38, 16, 26]])
+    b_eq = np.array([7872, 10466, 11322, 12058])
+    c = np.array([2, 10, 13, 17, 7, 5, 7, 3])
+
+    mip_rel_gaps = [0.25, 0.01, 0.001]
+    sol_mip_gaps = []
+    for mip_rel_gap in mip_rel_gaps:
+        res = milp(c=c, bounds=(0, np.inf), constraints=(A_eq, b_eq, b_eq),
+                   integrality=True, options={"mip_rel_gap": mip_rel_gap})
+        # assert that the solution actually has mip_gap lower than the
+        # required mip_rel_gap supplied
+        assert res.mip_gap <= mip_rel_gap
+        # check that `res.mip_gap` is as defined in the documentation
+        assert res.mip_gap == (res.fun - res.mip_dual_bound)/res.fun
+        sol_mip_gaps.append(res.mip_gap)
+
+    # make sure that the mip_rel_gap parameter is actually doing something
+    # check that differences between solution gaps are declining
+    # monotonically with the mip_rel_gap parameter.
+    assert np.all(np.diff(sol_mip_gaps) < 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minimize_constrained.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minimize_constrained.py
new file mode 100644
index 00000000..704dc895
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minimize_constrained.py
@@ -0,0 +1,781 @@
+
+import numpy as np
+import pytest
+from scipy.linalg import block_diag
+from scipy.sparse import csc_matrix
+from numpy.testing import (TestCase, assert_array_almost_equal,
+                           assert_array_less, assert_, assert_allclose,
+                           suppress_warnings)
+from pytest import raises
+from scipy.optimize import (NonlinearConstraint,
+                            LinearConstraint,
+                            Bounds,
+                            minimize,
+                            BFGS,
+                            SR1)
+
+
+class Maratos:
+    """Problem 15.4 from Nocedal and Wright
+
+    The following optimization problem:
+        minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
+        Subject to: x[0]**2 + x[1]**2 - 1 = 0
+    """
+
+    def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
+        rads = degrees/180*np.pi
+        self.x0 = [np.cos(rads), np.sin(rads)]
+        self.x_opt = np.array([1.0, 0.0])
+        self.constr_jac = constr_jac
+        self.constr_hess = constr_hess
+        self.bounds = None
+
+    def fun(self, x):
+        return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
+
+    def grad(self, x):
+        return np.array([4*x[0]-1, 4*x[1]])
+
+    def hess(self, x):
+        return 4*np.eye(2)
+
+    @property
+    def constr(self):
+        def fun(x):
+            return x[0]**2 + x[1]**2
+
+        if self.constr_jac is None:
+            def jac(x):
+                return [[2*x[0], 2*x[1]]]
+        else:
+            jac = self.constr_jac
+
+        if self.constr_hess is None:
+            def hess(x, v):
+                return 2*v[0]*np.eye(2)
+        else:
+            hess = self.constr_hess
+
+        return NonlinearConstraint(fun, 1, 1, jac, hess)
+
+
+class MaratosTestArgs:
+    """Problem 15.4 from Nocedal and Wright
+
+    The following optimization problem:
+        minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
+        Subject to: x[0]**2 + x[1]**2 - 1 = 0
+    """
+
+    def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None):
+        rads = degrees/180*np.pi
+        self.x0 = [np.cos(rads), np.sin(rads)]
+        self.x_opt = np.array([1.0, 0.0])
+        self.constr_jac = constr_jac
+        self.constr_hess = constr_hess
+        self.a = a
+        self.b = b
+        self.bounds = None
+
+    def _test_args(self, a, b):
+        if self.a != a or self.b != b:
+            raise ValueError()
+
+    def fun(self, x, a, b):
+        self._test_args(a, b)
+        return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
+
+    def grad(self, x, a, b):
+        self._test_args(a, b)
+        return np.array([4*x[0]-1, 4*x[1]])
+
+    def hess(self, x, a, b):
+        self._test_args(a, b)
+        return 4*np.eye(2)
+
+    @property
+    def constr(self):
+        def fun(x):
+            return x[0]**2 + x[1]**2
+
+        if self.constr_jac is None:
+            def jac(x):
+                return [[4*x[0], 4*x[1]]]
+        else:
+            jac = self.constr_jac
+
+        if self.constr_hess is None:
+            def hess(x, v):
+                return 2*v[0]*np.eye(2)
+        else:
+            hess = self.constr_hess
+
+        return NonlinearConstraint(fun, 1, 1, jac, hess)
+
+
+class MaratosGradInFunc:
+    """Problem 15.4 from Nocedal and Wright
+
+    The following optimization problem:
+        minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
+        Subject to: x[0]**2 + x[1]**2 - 1 = 0
+    """
+
+    def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
+        rads = degrees/180*np.pi
+        self.x0 = [np.cos(rads), np.sin(rads)]
+        self.x_opt = np.array([1.0, 0.0])
+        self.constr_jac = constr_jac
+        self.constr_hess = constr_hess
+        self.bounds = None
+
+    def fun(self, x):
+        return (2*(x[0]**2 + x[1]**2 - 1) - x[0],
+                np.array([4*x[0]-1, 4*x[1]]))
+
+    @property
+    def grad(self):
+        return True
+
+    def hess(self, x):
+        return 4*np.eye(2)
+
+    @property
+    def constr(self):
+        def fun(x):
+            return x[0]**2 + x[1]**2
+
+        if self.constr_jac is None:
+            def jac(x):
+                return [[4*x[0], 4*x[1]]]
+        else:
+            jac = self.constr_jac
+
+        if self.constr_hess is None:
+            def hess(x, v):
+                return 2*v[0]*np.eye(2)
+        else:
+            hess = self.constr_hess
+
+        return NonlinearConstraint(fun, 1, 1, jac, hess)
+
+
+class HyperbolicIneq:
+    """Problem 15.1 from Nocedal and Wright
+
+    The following optimization problem:
+        minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2
+        Subject to: 1/(x[0] + 1) - x[1] >= 1/4
+                                   x[0] >= 0
+                                   x[1] >= 0
+    """
+    def __init__(self, constr_jac=None, constr_hess=None):
+        self.x0 = [0, 0]
+        self.x_opt = [1.952823, 0.088659]
+        self.constr_jac = constr_jac
+        self.constr_hess = constr_hess
+        self.bounds = Bounds(0, np.inf)
+
+    def fun(self, x):
+        return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2
+
+    def grad(self, x):
+        return [x[0] - 2, x[1] - 1/2]
+
+    def hess(self, x):
+        return np.eye(2)
+
+    @property
+    def constr(self):
+        def fun(x):
+            return 1/(x[0] + 1) - x[1]
+
+        if self.constr_jac is None:
+            def jac(x):
+                return [[-1/(x[0] + 1)**2, -1]]
+        else:
+            jac = self.constr_jac
+
+        if self.constr_hess is None:
+            def hess(x, v):
+                return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0],
+                                        [0, 0]])
+        else:
+            hess = self.constr_hess
+
+        return NonlinearConstraint(fun, 0.25, np.inf, jac, hess)
+
+
+class Rosenbrock:
+    """Rosenbrock function.
+
+    The following optimization problem:
+        minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
+    """
+
+    def __init__(self, n=2, random_state=0):
+        rng = np.random.RandomState(random_state)
+        self.x0 = rng.uniform(-1, 1, n)
+        self.x_opt = np.ones(n)
+        self.bounds = None
+
+    def fun(self, x):
+        x = np.asarray(x)
+        r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
+                   axis=0)
+        return r
+
+    def grad(self, x):
+        x = np.asarray(x)
+        xm = x[1:-1]
+        xm_m1 = x[:-2]
+        xm_p1 = x[2:]
+        der = np.zeros_like(x)
+        der[1:-1] = (200 * (xm - xm_m1**2) -
+                     400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
+        der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
+        der[-1] = 200 * (x[-1] - x[-2]**2)
+        return der
+
+    def hess(self, x):
+        x = np.atleast_1d(x)
+        H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
+        diagonal = np.zeros(len(x), dtype=x.dtype)
+        diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
+        diagonal[-1] = 200
+        diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
+        H = H + np.diag(diagonal)
+        return H
+
+    @property
+    def constr(self):
+        return ()
+
+
+class IneqRosenbrock(Rosenbrock):
+    """Rosenbrock subject to inequality constraints.
+
+    The following optimization problem:
+        minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
+        subject to: x[0] + 2 x[1] <= 1
+
+    Taken from matlab ``fmincon`` documentation.
+    """
+    def __init__(self, random_state=0):
+        Rosenbrock.__init__(self, 2, random_state)
+        self.x0 = [-1, -0.5]
+        self.x_opt = [0.5022, 0.2489]
+        self.bounds = None
+
+    @property
+    def constr(self):
+        A = [[1, 2]]
+        b = 1
+        return LinearConstraint(A, -np.inf, b)
+
+
+class BoundedRosenbrock(Rosenbrock):
+    """Rosenbrock subject to inequality constraints.
+
+    The following optimization problem:
+        minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
+        subject to:  -2 <= x[0] <= 0
+                      0 <= x[1] <= 2
+
+    Taken from matlab ``fmincon`` documentation.
+    """
+    def __init__(self, random_state=0):
+        Rosenbrock.__init__(self, 2, random_state)
+        self.x0 = [-0.2, 0.2]
+        self.x_opt = None
+        self.bounds = Bounds([-2, 0], [0, 2])
+
+
+class EqIneqRosenbrock(Rosenbrock):
+    """Rosenbrock subject to equality and inequality constraints.
+
+    The following optimization problem:
+        minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
+        subject to: x[0] + 2 x[1] <= 1
+                    2 x[0] + x[1] = 1
+
+    Taken from matlab ``fimincon`` documentation.
+    """
+    def __init__(self, random_state=0):
+        Rosenbrock.__init__(self, 2, random_state)
+        self.x0 = [-1, -0.5]
+        self.x_opt = [0.41494, 0.17011]
+        self.bounds = None
+
+    @property
+    def constr(self):
+        A_ineq = [[1, 2]]
+        b_ineq = 1
+        A_eq = [[2, 1]]
+        b_eq = 1
+        return (LinearConstraint(A_ineq, -np.inf, b_ineq),
+                LinearConstraint(A_eq, b_eq, b_eq))
+
+
+class Elec:
+    """Distribution of electrons on a sphere.
+
+    Problem no 2 from COPS collection [2]_. Find
+    the equilibrium state distribution (of minimal
+    potential) of the electrons positioned on a
+    conducting sphere.
+
+    References
+    ----------
+    .. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson,
+           "Benchmarking optimization software with COPS 3.0.",
+            Argonne National Lab., Argonne, IL (US), 2004.
+    """
+    def __init__(self, n_electrons=200, random_state=0,
+                 constr_jac=None, constr_hess=None):
+        self.n_electrons = n_electrons
+        self.rng = np.random.RandomState(random_state)
+        # Initial Guess
+        phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons)
+        theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons)
+        x = np.cos(theta) * np.cos(phi)
+        y = np.cos(theta) * np.sin(phi)
+        z = np.sin(theta)
+        self.x0 = np.hstack((x, y, z))
+        self.x_opt = None
+        self.constr_jac = constr_jac
+        self.constr_hess = constr_hess
+        self.bounds = None
+
+    def _get_cordinates(self, x):
+        x_coord = x[:self.n_electrons]
+        y_coord = x[self.n_electrons:2 * self.n_electrons]
+        z_coord = x[2 * self.n_electrons:]
+        return x_coord, y_coord, z_coord
+
+    def _compute_coordinate_deltas(self, x):
+        x_coord, y_coord, z_coord = self._get_cordinates(x)
+        dx = x_coord[:, None] - x_coord
+        dy = y_coord[:, None] - y_coord
+        dz = z_coord[:, None] - z_coord
+        return dx, dy, dz
+
+    def fun(self, x):
+        dx, dy, dz = self._compute_coordinate_deltas(x)
+        with np.errstate(divide='ignore'):
+            dm1 = (dx**2 + dy**2 + dz**2) ** -0.5
+        dm1[np.diag_indices_from(dm1)] = 0
+        return 0.5 * np.sum(dm1)
+
+    def grad(self, x):
+        dx, dy, dz = self._compute_coordinate_deltas(x)
+
+        with np.errstate(divide='ignore'):
+            dm3 = (dx**2 + dy**2 + dz**2) ** -1.5
+        dm3[np.diag_indices_from(dm3)] = 0
+
+        grad_x = -np.sum(dx * dm3, axis=1)
+        grad_y = -np.sum(dy * dm3, axis=1)
+        grad_z = -np.sum(dz * dm3, axis=1)
+
+        return np.hstack((grad_x, grad_y, grad_z))
+
+    def hess(self, x):
+        dx, dy, dz = self._compute_coordinate_deltas(x)
+        d = (dx**2 + dy**2 + dz**2) ** 0.5
+
+        with np.errstate(divide='ignore'):
+            dm3 = d ** -3
+            dm5 = d ** -5
+
+        i = np.arange(self.n_electrons)
+        dm3[i, i] = 0
+        dm5[i, i] = 0
+
+        Hxx = dm3 - 3 * dx**2 * dm5
+        Hxx[i, i] = -np.sum(Hxx, axis=1)
+
+        Hxy = -3 * dx * dy * dm5
+        Hxy[i, i] = -np.sum(Hxy, axis=1)
+
+        Hxz = -3 * dx * dz * dm5
+        Hxz[i, i] = -np.sum(Hxz, axis=1)
+
+        Hyy = dm3 - 3 * dy**2 * dm5
+        Hyy[i, i] = -np.sum(Hyy, axis=1)
+
+        Hyz = -3 * dy * dz * dm5
+        Hyz[i, i] = -np.sum(Hyz, axis=1)
+
+        Hzz = dm3 - 3 * dz**2 * dm5
+        Hzz[i, i] = -np.sum(Hzz, axis=1)
+
+        H = np.vstack((
+            np.hstack((Hxx, Hxy, Hxz)),
+            np.hstack((Hxy, Hyy, Hyz)),
+            np.hstack((Hxz, Hyz, Hzz))
+        ))
+
+        return H
+
+    @property
+    def constr(self):
+        def fun(x):
+            x_coord, y_coord, z_coord = self._get_cordinates(x)
+            return x_coord**2 + y_coord**2 + z_coord**2 - 1
+
+        if self.constr_jac is None:
+            def jac(x):
+                x_coord, y_coord, z_coord = self._get_cordinates(x)
+                Jx = 2 * np.diag(x_coord)
+                Jy = 2 * np.diag(y_coord)
+                Jz = 2 * np.diag(z_coord)
+                return csc_matrix(np.hstack((Jx, Jy, Jz)))
+        else:
+            jac = self.constr_jac
+
+        if self.constr_hess is None:
+            def hess(x, v):
+                D = 2 * np.diag(v)
+                return block_diag(D, D, D)
+        else:
+            hess = self.constr_hess
+
+        return NonlinearConstraint(fun, -np.inf, 0, jac, hess)
+
+
+class TestTrustRegionConstr(TestCase):
+
+    @pytest.mark.slow
+    def test_list_of_problems(self):
+        list_of_problems = [Maratos(),
+                            Maratos(constr_hess='2-point'),
+                            Maratos(constr_hess=SR1()),
+                            Maratos(constr_jac='2-point', constr_hess=SR1()),
+                            MaratosGradInFunc(),
+                            HyperbolicIneq(),
+                            HyperbolicIneq(constr_hess='3-point'),
+                            HyperbolicIneq(constr_hess=BFGS()),
+                            HyperbolicIneq(constr_jac='3-point',
+                                           constr_hess=BFGS()),
+                            Rosenbrock(),
+                            IneqRosenbrock(),
+                            EqIneqRosenbrock(),
+                            BoundedRosenbrock(),
+                            Elec(n_electrons=2),
+                            Elec(n_electrons=2, constr_hess='2-point'),
+                            Elec(n_electrons=2, constr_hess=SR1()),
+                            Elec(n_electrons=2, constr_jac='3-point',
+                                 constr_hess=SR1())]
+
+        for prob in list_of_problems:
+            for grad in (prob.grad, '3-point', False):
+                for hess in (prob.hess,
+                             '3-point',
+                             SR1(),
+                             BFGS(exception_strategy='damp_update'),
+                             BFGS(exception_strategy='skip_update')):
+
+                    # Remove exceptions
+                    if grad in ('2-point', '3-point', 'cs', False) and \
+                       hess in ('2-point', '3-point', 'cs'):
+                        continue
+                    if prob.grad is True and grad in ('3-point', False):
+                        continue
+                    with suppress_warnings() as sup:
+                        sup.filter(UserWarning, "delta_grad == 0.0")
+                        result = minimize(prob.fun, prob.x0,
+                                          method='trust-constr',
+                                          jac=grad, hess=hess,
+                                          bounds=prob.bounds,
+                                          constraints=prob.constr)
+
+                    if prob.x_opt is not None:
+                        assert_array_almost_equal(result.x, prob.x_opt,
+                                                  decimal=5)
+                        # gtol
+                        if result.status == 1:
+                            assert_array_less(result.optimality, 1e-8)
+                    # xtol
+                    if result.status == 2:
+                        assert_array_less(result.tr_radius, 1e-8)
+
+                        if result.method == "tr_interior_point":
+                            assert_array_less(result.barrier_parameter, 1e-8)
+                    # max iter
+                    if result.status in (0, 3):
+                        raise RuntimeError("Invalid termination condition.")
+
+    def test_default_jac_and_hess(self):
+        def fun(x):
+            return (x - 1) ** 2
+        bounds = [(-2, 2)]
+        res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr')
+        assert_array_almost_equal(res.x, 1, decimal=5)
+
+    def test_default_hess(self):
+        def fun(x):
+            return (x - 1) ** 2
+        bounds = [(-2, 2)]
+        res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr',
+                       jac='2-point')
+        assert_array_almost_equal(res.x, 1, decimal=5)
+
+    def test_no_constraints(self):
+        prob = Rosenbrock()
+        result = minimize(prob.fun, prob.x0,
+                          method='trust-constr',
+                          jac=prob.grad, hess=prob.hess)
+        result1 = minimize(prob.fun, prob.x0,
+                           method='L-BFGS-B',
+                           jac='2-point')
+
+        result2 = minimize(prob.fun, prob.x0,
+                           method='L-BFGS-B',
+                           jac='3-point')
+        assert_array_almost_equal(result.x, prob.x_opt, decimal=5)
+        assert_array_almost_equal(result1.x, prob.x_opt, decimal=5)
+        assert_array_almost_equal(result2.x, prob.x_opt, decimal=5)
+
+    def test_hessp(self):
+        prob = Maratos()
+
+        def hessp(x, p):
+            H = prob.hess(x)
+            return H.dot(p)
+
+        result = minimize(prob.fun, prob.x0,
+                          method='trust-constr',
+                          jac=prob.grad, hessp=hessp,
+                          bounds=prob.bounds,
+                          constraints=prob.constr)
+
+        if prob.x_opt is not None:
+            assert_array_almost_equal(result.x, prob.x_opt, decimal=2)
+
+        # gtol
+        if result.status == 1:
+            assert_array_less(result.optimality, 1e-8)
+        # xtol
+        if result.status == 2:
+            assert_array_less(result.tr_radius, 1e-8)
+
+            if result.method == "tr_interior_point":
+                assert_array_less(result.barrier_parameter, 1e-8)
+        # max iter
+        if result.status in (0, 3):
+            raise RuntimeError("Invalid termination condition.")
+
+    def test_args(self):
+        prob = MaratosTestArgs("a", 234)
+
+        result = minimize(prob.fun, prob.x0, ("a", 234),
+                          method='trust-constr',
+                          jac=prob.grad, hess=prob.hess,
+                          bounds=prob.bounds,
+                          constraints=prob.constr)
+
+        if prob.x_opt is not None:
+            assert_array_almost_equal(result.x, prob.x_opt, decimal=2)
+
+        # gtol
+        if result.status == 1:
+            assert_array_less(result.optimality, 1e-8)
+        # xtol
+        if result.status == 2:
+            assert_array_less(result.tr_radius, 1e-8)
+            if result.method == "tr_interior_point":
+                assert_array_less(result.barrier_parameter, 1e-8)
+        # max iter
+        if result.status in (0, 3):
+            raise RuntimeError("Invalid termination condition.")
+
+    def test_raise_exception(self):
+        prob = Maratos()
+
+        raises(ValueError, minimize, prob.fun, prob.x0, method='trust-constr',
+               jac='2-point', hess='2-point', constraints=prob.constr)
+
+    def test_issue_9044(self):
+        # https://github.com/scipy/scipy/issues/9044
+        # Test the returned `OptimizeResult` contains keys consistent with
+        # other solvers.
+
+        def callback(x, info):
+            assert_('nit' in info)
+            assert_('niter' in info)
+
+        result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x,
+                          hess=lambda x: 2, callback=callback,
+                          method='trust-constr')
+        assert_(result.get('success'))
+        assert_(result.get('nit', -1) == 1)
+
+        # Also check existence of the 'niter' attribute, for backward
+        # compatibility
+        assert_(result.get('niter', -1) == 1)
+
+class TestEmptyConstraint(TestCase):
+    """
+    Here we minimize x^2+y^2 subject to x^2-y^2>1.
+    The actual minimum is at (0, 0) which fails the constraint.
+    Therefore we will find a minimum on the boundary at (+/-1, 0).
+
+    When minimizing on the boundary, optimize uses a set of
+    constraints that removes the constraint that sets that
+    boundary.  In our case, there's only one constraint, so
+    the result is an empty constraint.
+
+    This tests that the empty constraint works.
+    """
+    def test_empty_constraint(self):
+
+        def function(x):
+            return x[0]**2 + x[1]**2
+
+        def functionjacobian(x):
+            return np.array([2.*x[0], 2.*x[1]])
+
+        def functionhvp(x, v):
+            return 2.*v
+
+        def constraint(x):
+            return np.array([x[0]**2 - x[1]**2])
+
+        def constraintjacobian(x):
+            return np.array([[2*x[0], -2*x[1]]])
+
+        def constraintlcoh(x, v):
+            return np.array([[2., 0.], [0., -2.]]) * v[0]
+
+        constraint = NonlinearConstraint(constraint, 1., np.inf, constraintjacobian, constraintlcoh)
+
+        startpoint = [1., 2.]
+
+        bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf])
+
+        result = minimize(
+          function,
+          startpoint,
+          method='trust-constr',
+          jac=functionjacobian,
+          hessp=functionhvp,
+          constraints=[constraint],
+          bounds=bounds,
+        )
+
+        assert_array_almost_equal(abs(result.x), np.array([1, 0]), decimal=4)
+
+
+def test_bug_11886():
+    def opt(x):
+        return x[0]**2+x[1]**2
+
+    with np.testing.suppress_warnings() as sup:
+        sup.filter(PendingDeprecationWarning)
+        A = np.matrix(np.diag([1, 1]))
+    lin_cons = LinearConstraint(A, -1, np.inf)
+    minimize(opt, 2*[1], constraints = lin_cons)  # just checking that there are no errors
+
+
+# Remove xfail when gh-11649 is resolved
+@pytest.mark.xfail(reason="Known bug in trust-constr; see gh-11649.",
+                   strict=True)
+def test_gh11649():
+    bnds = Bounds(lb=[-1, -1], ub=[1, 1], keep_feasible=True)
+
+    def assert_inbounds(x):
+        assert np.all(x >= bnds.lb)
+        assert np.all(x <= bnds.ub)
+
+    def obj(x):
+        assert_inbounds(x)
+        return np.exp(x[0])*(4*x[0]**2 + 2*x[1]**2 + 4*x[0]*x[1] + 2*x[1] + 1)
+
+    def nce(x):
+        assert_inbounds(x)
+        return x[0]**2 + x[1]
+
+    def nci(x):
+        assert_inbounds(x)
+        return x[0]*x[1]
+
+    x0 = np.array((0.99, -0.99))
+    nlcs = [NonlinearConstraint(nci, -10, np.inf),
+            NonlinearConstraint(nce, 1, 1)]
+
+    res = minimize(fun=obj, x0=x0, method='trust-constr',
+                   bounds=bnds, constraints=nlcs)
+    assert res.success
+    assert_inbounds(res.x)
+    assert nlcs[0].lb < nlcs[0].fun(res.x) < nlcs[0].ub
+    assert_allclose(nce(res.x), nlcs[1].ub)
+
+    ref = minimize(fun=obj, x0=x0, method='slsqp',
+                   bounds=bnds, constraints=nlcs)
+    assert_allclose(res.fun, ref.fun)
+
+
+class TestBoundedNelderMead:
+
+    @pytest.mark.parametrize('bounds, x_opt',
+                             [(Bounds(-np.inf, np.inf), Rosenbrock().x_opt),
+                              (Bounds(-np.inf, -0.8), [-0.8, -0.8]),
+                              (Bounds(3.0, np.inf), [3.0, 9.0]),
+                              (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]),
+                              ])
+    def test_rosen_brock_with_bounds(self, bounds, x_opt):
+        prob = Rosenbrock()
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "Initial guess is not within "
+                                    "the specified bounds")
+            result = minimize(prob.fun, [-10, -10],
+                              method='Nelder-Mead',
+                              bounds=bounds)
+            assert np.less_equal(bounds.lb, result.x).all()
+            assert np.less_equal(result.x, bounds.ub).all()
+            assert np.allclose(prob.fun(result.x), result.fun)
+            assert np.allclose(result.x, x_opt, atol=1.e-3)
+
+    def test_equal_all_bounds(self):
+        prob = Rosenbrock()
+        bounds = Bounds([4.0, 5.0], [4.0, 5.0])
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "Initial guess is not within "
+                                    "the specified bounds")
+            result = minimize(prob.fun, [-10, 8],
+                              method='Nelder-Mead',
+                              bounds=bounds)
+            assert np.allclose(result.x, [4.0, 5.0])
+
+    def test_equal_one_bounds(self):
+        prob = Rosenbrock()
+        bounds = Bounds([4.0, 5.0], [4.0, 20.0])
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "Initial guess is not within "
+                                    "the specified bounds")
+            result = minimize(prob.fun, [-10, 8],
+                              method='Nelder-Mead',
+                              bounds=bounds)
+            assert np.allclose(result.x, [4.0, 16.0])
+
+    def test_invalid_bounds(self):
+        prob = Rosenbrock()
+        with raises(ValueError, match=r"one of the lower bounds is greater "
+                                      r"than an upper bound."):
+            bounds = Bounds([-np.inf, 1.0], [4.0, -5.0])
+            minimize(prob.fun, [-10, 3],
+                     method='Nelder-Mead',
+                     bounds=bounds)
+
+    @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, "
+                              "see gh-13846")
+    def test_outside_bounds_warning(self):
+        prob = Rosenbrock()
+        with raises(UserWarning, match=r"Initial guess is not within "
+                                       r"the specified bounds"):
+            bounds = Bounds([-np.inf, 1.0], [4.0, 5.0])
+            minimize(prob.fun, [-10, 8],
+                     method='Nelder-Mead',
+                     bounds=bounds)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minpack.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minpack.py
new file mode 100644
index 00000000..99b696bc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_minpack.py
@@ -0,0 +1,973 @@
+"""
+Unit tests for optimization routines from minpack.py.
+"""
+import warnings
+import pytest
+
+from numpy.testing import (assert_, assert_almost_equal, assert_array_equal,
+                           assert_array_almost_equal, assert_allclose,
+                           assert_warns, suppress_warnings)
+from pytest import raises as assert_raises
+import numpy as np
+from numpy import array, float64
+from multiprocessing.pool import ThreadPool
+
+from scipy import optimize, linalg
+from scipy.special import lambertw
+from scipy.optimize._minpack_py import leastsq, curve_fit, fixed_point
+from scipy.optimize import OptimizeWarning
+from scipy.optimize._minimize import Bounds
+
+
+class ReturnShape:
+    """This class exists to create a callable that does not have a '__name__' attribute.
+
+    __init__ takes the argument 'shape', which should be a tuple of ints. When an instance
+    is called with a single argument 'x', it returns numpy.ones(shape).
+    """
+
+    def __init__(self, shape):
+        self.shape = shape
+
+    def __call__(self, x):
+        return np.ones(self.shape)
+
+
+def dummy_func(x, shape):
+    """A function that returns an array of ones of the given shape.
+    `x` is ignored.
+    """
+    return np.ones(shape)
+
+
+def sequence_parallel(fs):
+    with ThreadPool(len(fs)) as pool:
+        return pool.map(lambda f: f(), fs)
+
+
+# Function and Jacobian for tests of solvers for systems of nonlinear
+# equations
+
+
+def pressure_network(flow_rates, Qtot, k):
+    """Evaluate non-linear equation system representing
+    the pressures and flows in a system of n parallel pipes::
+
+        f_i = P_i - P_0, for i = 1..n
+        f_0 = sum(Q_i) - Qtot
+
+    where Q_i is the flow rate in pipe i and P_i the pressure in that pipe.
+    Pressure is modeled as a P=kQ**2 where k is a valve coefficient and
+    Q is the flow rate.
+
+    Parameters
+    ----------
+    flow_rates : float
+        A 1-D array of n flow rates [kg/s].
+    k : float
+        A 1-D array of n valve coefficients [1/kg m].
+    Qtot : float
+        A scalar, the total input flow rate [kg/s].
+
+    Returns
+    -------
+    F : float
+        A 1-D array, F[i] == f_i.
+
+    """
+    P = k * flow_rates**2
+    F = np.hstack((P[1:] - P[0], flow_rates.sum() - Qtot))
+    return F
+
+
+def pressure_network_jacobian(flow_rates, Qtot, k):
+    """Return the jacobian of the equation system F(flow_rates)
+    computed by `pressure_network` with respect to
+    *flow_rates*. See `pressure_network` for the detailed
+    description of parrameters.
+
+    Returns
+    -------
+    jac : float
+        *n* by *n* matrix ``df_i/dQ_i`` where ``n = len(flow_rates)``
+        and *f_i* and *Q_i* are described in the doc for `pressure_network`
+    """
+    n = len(flow_rates)
+    pdiff = np.diag(flow_rates[1:] * 2 * k[1:] - 2 * flow_rates[0] * k[0])
+
+    jac = np.empty((n, n))
+    jac[:n-1, :n-1] = pdiff * 0
+    jac[:n-1, n-1] = 0
+    jac[n-1, :] = np.ones(n)
+
+    return jac
+
+
+def pressure_network_fun_and_grad(flow_rates, Qtot, k):
+    return (pressure_network(flow_rates, Qtot, k),
+            pressure_network_jacobian(flow_rates, Qtot, k))
+
+
+class TestFSolve:
+    def test_pressure_network_no_gradient(self):
+        # fsolve without gradient, equal pipes -> equal flows.
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([2., 0., 2., 0.])
+        final_flows, info, ier, mesg = optimize.fsolve(
+            pressure_network, initial_guess, args=(Qtot, k),
+            full_output=True)
+        assert_array_almost_equal(final_flows, np.ones(4))
+        assert_(ier == 1, mesg)
+
+    def test_pressure_network_with_gradient(self):
+        # fsolve with gradient, equal pipes -> equal flows
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([2., 0., 2., 0.])
+        final_flows = optimize.fsolve(
+            pressure_network, initial_guess, args=(Qtot, k),
+            fprime=pressure_network_jacobian)
+        assert_array_almost_equal(final_flows, np.ones(4))
+
+    def test_wrong_shape_func_callable(self):
+        func = ReturnShape(1)
+        # x0 is a list of two elements, but func will return an array with
+        # length 1, so this should result in a TypeError.
+        x0 = [1.5, 2.0]
+        assert_raises(TypeError, optimize.fsolve, func, x0)
+
+    def test_wrong_shape_func_function(self):
+        # x0 is a list of two elements, but func will return an array with
+        # length 1, so this should result in a TypeError.
+        x0 = [1.5, 2.0]
+        assert_raises(TypeError, optimize.fsolve, dummy_func, x0, args=((1,),))
+
+    def test_wrong_shape_fprime_callable(self):
+        func = ReturnShape(1)
+        deriv_func = ReturnShape((2,2))
+        assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
+
+    def test_wrong_shape_fprime_function(self):
+        func = lambda x: dummy_func(x, (2,))
+        deriv_func = lambda x: dummy_func(x, (3,3))
+        assert_raises(TypeError, optimize.fsolve, func, x0=[0,1], fprime=deriv_func)
+
+    def test_func_can_raise(self):
+        def func(*args):
+            raise ValueError('I raised')
+
+        with assert_raises(ValueError, match='I raised'):
+            optimize.fsolve(func, x0=[0])
+
+    def test_Dfun_can_raise(self):
+        func = lambda x: x - np.array([10])
+
+        def deriv_func(*args):
+            raise ValueError('I raised')
+
+        with assert_raises(ValueError, match='I raised'):
+            optimize.fsolve(func, x0=[0], fprime=deriv_func)
+
+    def test_float32(self):
+        func = lambda x: np.array([x[0] - 100, x[1] - 1000], dtype=np.float32)**2
+        p = optimize.fsolve(func, np.array([1, 1], np.float32))
+        assert_allclose(func(p), [0, 0], atol=1e-3)
+
+    def test_reentrant_func(self):
+        def func(*args):
+            self.test_pressure_network_no_gradient()
+            return pressure_network(*args)
+
+        # fsolve without gradient, equal pipes -> equal flows.
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([2., 0., 2., 0.])
+        final_flows, info, ier, mesg = optimize.fsolve(
+            func, initial_guess, args=(Qtot, k),
+            full_output=True)
+        assert_array_almost_equal(final_flows, np.ones(4))
+        assert_(ier == 1, mesg)
+
+    def test_reentrant_Dfunc(self):
+        def deriv_func(*args):
+            self.test_pressure_network_with_gradient()
+            return pressure_network_jacobian(*args)
+
+        # fsolve with gradient, equal pipes -> equal flows
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([2., 0., 2., 0.])
+        final_flows = optimize.fsolve(
+            pressure_network, initial_guess, args=(Qtot, k),
+            fprime=deriv_func)
+        assert_array_almost_equal(final_flows, np.ones(4))
+
+    def test_concurrent_no_gradient(self):
+        v = sequence_parallel([self.test_pressure_network_no_gradient] * 10)
+        assert all([result is None for result in v])
+
+    def test_concurrent_with_gradient(self):
+        v = sequence_parallel([self.test_pressure_network_with_gradient] * 10)
+        assert all([result is None for result in v])
+
+
+class TestRootHybr:
+    def test_pressure_network_no_gradient(self):
+        # root/hybr without gradient, equal pipes -> equal flows
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([2., 0., 2., 0.])
+        final_flows = optimize.root(pressure_network, initial_guess,
+                                    method='hybr', args=(Qtot, k)).x
+        assert_array_almost_equal(final_flows, np.ones(4))
+
+    def test_pressure_network_with_gradient(self):
+        # root/hybr with gradient, equal pipes -> equal flows
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([[2., 0., 2., 0.]])
+        final_flows = optimize.root(pressure_network, initial_guess,
+                                    args=(Qtot, k), method='hybr',
+                                    jac=pressure_network_jacobian).x
+        assert_array_almost_equal(final_flows, np.ones(4))
+
+    def test_pressure_network_with_gradient_combined(self):
+        # root/hybr with gradient and function combined, equal pipes -> equal
+        # flows
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([2., 0., 2., 0.])
+        final_flows = optimize.root(pressure_network_fun_and_grad,
+                                    initial_guess, args=(Qtot, k),
+                                    method='hybr', jac=True).x
+        assert_array_almost_equal(final_flows, np.ones(4))
+
+
+class TestRootLM:
+    def test_pressure_network_no_gradient(self):
+        # root/lm without gradient, equal pipes -> equal flows
+        k = np.full(4, 0.5)
+        Qtot = 4
+        initial_guess = array([2., 0., 2., 0.])
+        final_flows = optimize.root(pressure_network, initial_guess,
+                                    method='lm', args=(Qtot, k)).x
+        assert_array_almost_equal(final_flows, np.ones(4))
+
+
+class TestLeastSq:
+    def setup_method(self):
+        x = np.linspace(0, 10, 40)
+        a,b,c = 3.1, 42, -304.2
+        self.x = x
+        self.abc = a,b,c
+        y_true = a*x**2 + b*x + c
+        np.random.seed(0)
+        self.y_meas = y_true + 0.01*np.random.standard_normal(y_true.shape)
+
+    def residuals(self, p, y, x):
+        a,b,c = p
+        err = y-(a*x**2 + b*x + c)
+        return err
+
+    def residuals_jacobian(self, _p, _y, x):
+        return -np.vstack([x**2, x, np.ones_like(x)]).T
+
+    def test_basic(self):
+        p0 = array([0,0,0])
+        params_fit, ier = leastsq(self.residuals, p0,
+                                  args=(self.y_meas, self.x))
+        assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
+        # low precision due to random
+        assert_array_almost_equal(params_fit, self.abc, decimal=2)
+
+    def test_basic_with_gradient(self):
+        p0 = array([0,0,0])
+        params_fit, ier = leastsq(self.residuals, p0,
+                                  args=(self.y_meas, self.x),
+                                  Dfun=self.residuals_jacobian)
+        assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
+        # low precision due to random
+        assert_array_almost_equal(params_fit, self.abc, decimal=2)
+
+    def test_full_output(self):
+        p0 = array([[0,0,0]])
+        full_output = leastsq(self.residuals, p0,
+                              args=(self.y_meas, self.x),
+                              full_output=True)
+        params_fit, cov_x, infodict, mesg, ier = full_output
+        assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
+
+    def test_input_untouched(self):
+        p0 = array([0,0,0],dtype=float64)
+        p0_copy = array(p0, copy=True)
+        full_output = leastsq(self.residuals, p0,
+                              args=(self.y_meas, self.x),
+                              full_output=True)
+        params_fit, cov_x, infodict, mesg, ier = full_output
+        assert_(ier in (1,2,3,4), 'solution not found: %s' % mesg)
+        assert_array_equal(p0, p0_copy)
+
+    def test_wrong_shape_func_callable(self):
+        func = ReturnShape(1)
+        # x0 is a list of two elements, but func will return an array with
+        # length 1, so this should result in a TypeError.
+        x0 = [1.5, 2.0]
+        assert_raises(TypeError, optimize.leastsq, func, x0)
+
+    def test_wrong_shape_func_function(self):
+        # x0 is a list of two elements, but func will return an array with
+        # length 1, so this should result in a TypeError.
+        x0 = [1.5, 2.0]
+        assert_raises(TypeError, optimize.leastsq, dummy_func, x0, args=((1,),))
+
+    def test_wrong_shape_Dfun_callable(self):
+        func = ReturnShape(1)
+        deriv_func = ReturnShape((2,2))
+        assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
+
+    def test_wrong_shape_Dfun_function(self):
+        func = lambda x: dummy_func(x, (2,))
+        deriv_func = lambda x: dummy_func(x, (3,3))
+        assert_raises(TypeError, optimize.leastsq, func, x0=[0,1], Dfun=deriv_func)
+
+    def test_float32(self):
+        # Regression test for gh-1447
+        def func(p,x,y):
+            q = p[0]*np.exp(-(x-p[1])**2/(2.0*p[2]**2))+p[3]
+            return q - y
+
+        x = np.array([1.475,1.429,1.409,1.419,1.455,1.519,1.472, 1.368,1.286,
+                       1.231], dtype=np.float32)
+        y = np.array([0.0168,0.0193,0.0211,0.0202,0.0171,0.0151,0.0185,0.0258,
+                      0.034,0.0396], dtype=np.float32)
+        p0 = np.array([1.0,1.0,1.0,1.0])
+        p1, success = optimize.leastsq(func, p0, args=(x,y))
+
+        assert_(success in [1,2,3,4])
+        assert_((func(p1,x,y)**2).sum() < 1e-4 * (func(p0,x,y)**2).sum())
+
+    def test_func_can_raise(self):
+        def func(*args):
+            raise ValueError('I raised')
+
+        with assert_raises(ValueError, match='I raised'):
+            optimize.leastsq(func, x0=[0])
+
+    def test_Dfun_can_raise(self):
+        func = lambda x: x - np.array([10])
+
+        def deriv_func(*args):
+            raise ValueError('I raised')
+
+        with assert_raises(ValueError, match='I raised'):
+            optimize.leastsq(func, x0=[0], Dfun=deriv_func)
+
+    def test_reentrant_func(self):
+        def func(*args):
+            self.test_basic()
+            return self.residuals(*args)
+
+        p0 = array([0,0,0])
+        params_fit, ier = leastsq(func, p0,
+                                  args=(self.y_meas, self.x))
+        assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
+        # low precision due to random
+        assert_array_almost_equal(params_fit, self.abc, decimal=2)
+
+    def test_reentrant_Dfun(self):
+        def deriv_func(*args):
+            self.test_basic()
+            return self.residuals_jacobian(*args)
+
+        p0 = array([0,0,0])
+        params_fit, ier = leastsq(self.residuals, p0,
+                                  args=(self.y_meas, self.x),
+                                  Dfun=deriv_func)
+        assert_(ier in (1,2,3,4), 'solution not found (ier=%d)' % ier)
+        # low precision due to random
+        assert_array_almost_equal(params_fit, self.abc, decimal=2)
+
+    def test_concurrent_no_gradient(self):
+        v = sequence_parallel([self.test_basic] * 10)
+        assert all([result is None for result in v])
+
+    def test_concurrent_with_gradient(self):
+        v = sequence_parallel([self.test_basic_with_gradient] * 10)
+        assert all([result is None for result in v])
+
+    def test_func_input_output_length_check(self):
+
+        def func(x):
+            return 2 * (x[0] - 3) ** 2 + 1
+
+        with assert_raises(TypeError,
+                           match='Improper input: func input vector length N='):
+            optimize.leastsq(func, x0=[0, 1])
+
+
+class TestCurveFit:
+    def setup_method(self):
+        self.y = array([1.0, 3.2, 9.5, 13.7])
+        self.x = array([1.0, 2.0, 3.0, 4.0])
+
+    def test_one_argument(self):
+        def func(x,a):
+            return x**a
+        popt, pcov = curve_fit(func, self.x, self.y)
+        assert_(len(popt) == 1)
+        assert_(pcov.shape == (1,1))
+        assert_almost_equal(popt[0], 1.9149, decimal=4)
+        assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
+
+        # Test if we get the same with full_output. Regression test for #1415.
+        # Also test if check_finite can be turned off.
+        res = curve_fit(func, self.x, self.y,
+                        full_output=1, check_finite=False)
+        (popt2, pcov2, infodict, errmsg, ier) = res
+        assert_array_almost_equal(popt, popt2)
+
+    def test_two_argument(self):
+        def func(x, a, b):
+            return b*x**a
+        popt, pcov = curve_fit(func, self.x, self.y)
+        assert_(len(popt) == 2)
+        assert_(pcov.shape == (2,2))
+        assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
+        assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
+                                  decimal=4)
+
+    def test_func_is_classmethod(self):
+        class test_self:
+            """This class tests if curve_fit passes the correct number of
+               arguments when the model function is a class instance method.
+            """
+
+            def func(self, x, a, b):
+                return b * x**a
+
+        test_self_inst = test_self()
+        popt, pcov = curve_fit(test_self_inst.func, self.x, self.y)
+        assert_(pcov.shape == (2,2))
+        assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
+        assert_array_almost_equal(pcov, [[0.0852, -0.1260], [-0.1260, 0.1912]],
+                                  decimal=4)
+
+    def test_regression_2639(self):
+        # This test fails if epsfcn in leastsq is too large.
+        x = [574.14200000000005, 574.154, 574.16499999999996,
+             574.17700000000002, 574.18799999999999, 574.19899999999996,
+             574.21100000000001, 574.22199999999998, 574.23400000000004,
+             574.245]
+        y = [859.0, 997.0, 1699.0, 2604.0, 2013.0, 1964.0, 2435.0,
+             1550.0, 949.0, 841.0]
+        guess = [574.1861428571428, 574.2155714285715, 1302.0, 1302.0,
+                 0.0035019999999983615, 859.0]
+        good = [5.74177150e+02, 5.74209188e+02, 1.74187044e+03, 1.58646166e+03,
+                1.0068462e-02, 8.57450661e+02]
+
+        def f_double_gauss(x, x0, x1, A0, A1, sigma, c):
+            return (A0*np.exp(-(x-x0)**2/(2.*sigma**2))
+                    + A1*np.exp(-(x-x1)**2/(2.*sigma**2)) + c)
+        popt, pcov = curve_fit(f_double_gauss, x, y, guess, maxfev=10000)
+        assert_allclose(popt, good, rtol=1e-5)
+
+    def test_pcov(self):
+        xdata = np.array([0, 1, 2, 3, 4, 5])
+        ydata = np.array([1, 1, 5, 7, 8, 12])
+        sigma = np.array([1, 2, 1, 2, 1, 2])
+
+        def f(x, a, b):
+            return a*x + b
+
+        for method in ['lm', 'trf', 'dogbox']:
+            popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
+                                   method=method)
+            perr_scaled = np.sqrt(np.diag(pcov))
+            assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
+
+            popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
+                                   method=method)
+            perr_scaled = np.sqrt(np.diag(pcov))
+            assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)
+
+            popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=sigma,
+                                   absolute_sigma=True, method=method)
+            perr = np.sqrt(np.diag(pcov))
+            assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)
+
+            popt, pcov = curve_fit(f, xdata, ydata, p0=[2, 0], sigma=3*sigma,
+                                   absolute_sigma=True, method=method)
+            perr = np.sqrt(np.diag(pcov))
+            assert_allclose(perr, [3*0.30714756, 3*0.85045308], rtol=1e-3)
+
+        # infinite variances
+
+        def f_flat(x, a, b):
+            return a*x
+
+        pcov_expected = np.array([np.inf]*4).reshape(2, 2)
+
+        with suppress_warnings() as sup:
+            sup.filter(OptimizeWarning,
+                       "Covariance of the parameters could not be estimated")
+            popt, pcov = curve_fit(f_flat, xdata, ydata, p0=[2, 0], sigma=sigma)
+            popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])
+
+        assert_(pcov.shape == (2, 2))
+        assert_array_equal(pcov, pcov_expected)
+
+        assert_(pcov1.shape == (2, 2))
+        assert_array_equal(pcov1, pcov_expected)
+
+    def test_array_like(self):
+        # Test sequence input. Regression test for gh-3037.
+        def f_linear(x, a, b):
+            return a*x + b
+
+        x = [1, 2, 3, 4]
+        y = [3, 5, 7, 9]
+        assert_allclose(curve_fit(f_linear, x, y)[0], [2, 1], atol=1e-10)
+
+    def test_indeterminate_covariance(self):
+        # Test that a warning is returned when pcov is indeterminate
+        xdata = np.array([1, 2, 3, 4, 5, 6])
+        ydata = np.array([1, 2, 3, 4, 5.5, 6])
+        assert_warns(OptimizeWarning, curve_fit,
+                     lambda x, a, b: a*x, xdata, ydata)
+
+    def test_NaN_handling(self):
+        # Test for correct handling of NaNs in input data: gh-3422
+
+        # create input with NaNs
+        xdata = np.array([1, np.nan, 3])
+        ydata = np.array([1, 2, 3])
+
+        assert_raises(ValueError, curve_fit,
+                      lambda x, a, b: a*x + b, xdata, ydata)
+        assert_raises(ValueError, curve_fit,
+                      lambda x, a, b: a*x + b, ydata, xdata)
+
+        assert_raises(ValueError, curve_fit, lambda x, a, b: a*x + b,
+                      xdata, ydata, **{"check_finite": True})
+
+    def test_empty_inputs(self):
+        # Test both with and without bounds (regression test for gh-9864)
+        assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [])
+        assert_raises(ValueError, curve_fit, lambda x, a: a*x, [], [],
+                      bounds=(1, 2))
+        assert_raises(ValueError, curve_fit, lambda x, a: a*x, [1], [])
+        assert_raises(ValueError, curve_fit, lambda x, a: a*x, [2], [],
+                      bounds=(1, 2))
+
+    def test_function_zero_params(self):
+        # Fit args is zero, so "Unable to determine number of fit parameters."
+        assert_raises(ValueError, curve_fit, lambda x: x, [1, 2], [3, 4])
+
+    def test_None_x(self):  # Added in GH10196
+        popt, pcov = curve_fit(lambda _, a: a * np.arange(10),
+                               None, 2 * np.arange(10))
+        assert_allclose(popt, [2.])
+
+    def test_method_argument(self):
+        def f(x, a, b):
+            return a * np.exp(-b*x)
+
+        xdata = np.linspace(0, 1, 11)
+        ydata = f(xdata, 2., 2.)
+
+        for method in ['trf', 'dogbox', 'lm', None]:
+            popt, pcov = curve_fit(f, xdata, ydata, method=method)
+            assert_allclose(popt, [2., 2.])
+
+        assert_raises(ValueError, curve_fit, f, xdata, ydata, method='unknown')
+
+    def test_full_output(self):
+        def f(x, a, b):
+            return a * np.exp(-b * x)
+
+        xdata = np.linspace(0, 1, 11)
+        ydata = f(xdata, 2., 2.)
+
+        for method in ['trf', 'dogbox', 'lm', None]:
+            popt, pcov, infodict, errmsg, ier = curve_fit(
+                f, xdata, ydata, method=method, full_output=True)
+            assert_allclose(popt, [2., 2.])
+            assert "nfev" in infodict
+            assert "fvec" in infodict
+            if method == 'lm' or method is None:
+                assert "fjac" in infodict
+                assert "ipvt" in infodict
+                assert "qtf" in infodict
+            assert isinstance(errmsg, str)
+            assert ier in (1, 2, 3, 4)
+
+    def test_bounds(self):
+        def f(x, a, b):
+            return a * np.exp(-b*x)
+
+        xdata = np.linspace(0, 1, 11)
+        ydata = f(xdata, 2., 2.)
+
+        # The minimum w/out bounds is at [2., 2.],
+        # and with bounds it's at [1.5, smth].
+        lb = [1., 0]
+        ub = [1.5, 3.]
+
+        # Test that both variants of the bounds yield the same result
+        bounds = (lb, ub)
+        bounds_class = Bounds(lb, ub)
+        for method in [None, 'trf', 'dogbox']:
+            popt, pcov = curve_fit(f, xdata, ydata, bounds=bounds,
+                                   method=method)
+            assert_allclose(popt[0], 1.5)
+
+            popt_class, pcov_class = curve_fit(f, xdata, ydata,
+                                               bounds=bounds_class,
+                                               method=method)
+            assert_allclose(popt_class, popt)
+
+        # With bounds, the starting estimate is feasible.
+        popt, pcov = curve_fit(f, xdata, ydata, method='trf',
+                               bounds=([0., 0], [0.6, np.inf]))
+        assert_allclose(popt[0], 0.6)
+
+        # method='lm' doesn't support bounds.
+        assert_raises(ValueError, curve_fit, f, xdata, ydata, bounds=bounds,
+                      method='lm')
+
+    def test_bounds_p0(self):
+        # This test is for issue #5719. The problem was that an initial guess
+        # was ignored when 'trf' or 'dogbox' methods were invoked.
+        def f(x, a):
+            return np.sin(x + a)
+
+        xdata = np.linspace(-2*np.pi, 2*np.pi, 40)
+        ydata = np.sin(xdata)
+        bounds = (-3 * np.pi, 3 * np.pi)
+        for method in ['trf', 'dogbox']:
+            popt_1, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi)
+            popt_2, _ = curve_fit(f, xdata, ydata, p0=2.1*np.pi,
+                                  bounds=bounds, method=method)
+
+            # If the initial guess is ignored, then popt_2 would be close 0.
+            assert_allclose(popt_1, popt_2)
+
+    def test_jac(self):
+        # Test that Jacobian callable is handled correctly and
+        # weighted if sigma is provided.
+        def f(x, a, b):
+            return a * np.exp(-b*x)
+
+        def jac(x, a, b):
+            e = np.exp(-b*x)
+            return np.vstack((e, -a * x * e)).T
+
+        xdata = np.linspace(0, 1, 11)
+        ydata = f(xdata, 2., 2.)
+
+        # Test numerical options for least_squares backend.
+        for method in ['trf', 'dogbox']:
+            for scheme in ['2-point', '3-point', 'cs']:
+                popt, pcov = curve_fit(f, xdata, ydata, jac=scheme,
+                                       method=method)
+                assert_allclose(popt, [2, 2])
+
+        # Test the analytic option.
+        for method in ['lm', 'trf', 'dogbox']:
+            popt, pcov = curve_fit(f, xdata, ydata, method=method, jac=jac)
+            assert_allclose(popt, [2, 2])
+
+        # Now add an outlier and provide sigma.
+        ydata[5] = 100
+        sigma = np.ones(xdata.shape[0])
+        sigma[5] = 200
+        for method in ['lm', 'trf', 'dogbox']:
+            popt, pcov = curve_fit(f, xdata, ydata, sigma=sigma, method=method,
+                                   jac=jac)
+            # Still the optimization process is influenced somehow,
+            # have to set rtol=1e-3.
+            assert_allclose(popt, [2, 2], rtol=1e-3)
+
+    def test_maxfev_and_bounds(self):
+        # gh-6340: with no bounds, curve_fit accepts parameter maxfev (via leastsq)
+        # but with bounds, the parameter is `max_nfev` (via least_squares)
+        x = np.arange(0, 10)
+        y = 2*x
+        popt1, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), maxfev=100)
+        popt2, _ = curve_fit(lambda x,p: p*x, x, y, bounds=(0, 3), max_nfev=100)
+
+        assert_allclose(popt1, 2, atol=1e-14)
+        assert_allclose(popt2, 2, atol=1e-14)
+
+    def test_curvefit_simplecovariance(self):
+
+        def func(x, a, b):
+            return a * np.exp(-b*x)
+
+        def jac(x, a, b):
+            e = np.exp(-b*x)
+            return np.vstack((e, -a * x * e)).T
+
+        np.random.seed(0)
+        xdata = np.linspace(0, 4, 50)
+        y = func(xdata, 2.5, 1.3)
+        ydata = y + 0.2 * np.random.normal(size=len(xdata))
+
+        sigma = np.zeros(len(xdata)) + 0.2
+        covar = np.diag(sigma**2)
+
+        for jac1, jac2 in [(jac, jac), (None, None)]:
+            for absolute_sigma in [False, True]:
+                popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
+                        jac=jac1, absolute_sigma=absolute_sigma)
+                popt2, pcov2 = curve_fit(func, xdata, ydata, sigma=covar,
+                        jac=jac2, absolute_sigma=absolute_sigma)
+
+                assert_allclose(popt1, popt2, atol=1e-14)
+                assert_allclose(pcov1, pcov2, atol=1e-14)
+
+    def test_curvefit_covariance(self):
+
+        def funcp(x, a, b):
+            rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
+            return rotn.dot(a * np.exp(-b*x))
+
+        def jacp(x, a, b):
+            rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
+            e = np.exp(-b*x)
+            return rotn.dot(np.vstack((e, -a * x * e)).T)
+
+        def func(x, a, b):
+            return a * np.exp(-b*x)
+
+        def jac(x, a, b):
+            e = np.exp(-b*x)
+            return np.vstack((e, -a * x * e)).T
+
+        np.random.seed(0)
+        xdata = np.arange(1, 4)
+        y = func(xdata, 2.5, 1.0)
+        ydata = y + 0.2 * np.random.normal(size=len(xdata))
+        sigma = np.zeros(len(xdata)) + 0.2
+        covar = np.diag(sigma**2)
+        # Get a rotation matrix, and obtain ydatap = R ydata
+        # Chisq = ydata^T C^{-1} ydata
+        #       = ydata^T R^T R C^{-1} R^T R ydata
+        #       = ydatap^T Cp^{-1} ydatap
+        # Cp^{-1} = R C^{-1} R^T
+        # Cp      = R C R^T, since R^-1 = R^T
+        rotn = np.array([[1./np.sqrt(2), -1./np.sqrt(2), 0], [1./np.sqrt(2), 1./np.sqrt(2), 0], [0, 0, 1.0]])
+        ydatap = rotn.dot(ydata)
+        covarp = rotn.dot(covar).dot(rotn.T)
+
+        for jac1, jac2 in [(jac, jacp), (None, None)]:
+            for absolute_sigma in [False, True]:
+                popt1, pcov1 = curve_fit(func, xdata, ydata, sigma=sigma,
+                        jac=jac1, absolute_sigma=absolute_sigma)
+                popt2, pcov2 = curve_fit(funcp, xdata, ydatap, sigma=covarp,
+                        jac=jac2, absolute_sigma=absolute_sigma)
+
+                assert_allclose(popt1, popt2, rtol=1.2e-7, atol=1e-14)
+                assert_allclose(pcov1, pcov2, rtol=1.2e-7, atol=1e-14)
+
+    def test_dtypes(self):
+        # regression test for gh-9581: curve_fit fails if x and y dtypes differ
+        x = np.arange(-3, 5)
+        y = 1.5*x + 3.0 + 0.5*np.sin(x)
+
+        def func(x, a, b):
+            return a*x + b
+
+        for method in ['lm', 'trf', 'dogbox']:
+            for dtx in [np.float32, np.float64]:
+                for dty in [np.float32, np.float64]:
+                    x = x.astype(dtx)
+                    y = y.astype(dty)
+
+                with warnings.catch_warnings():
+                    warnings.simplefilter("error", OptimizeWarning)
+                    p, cov = curve_fit(func, x, y, method=method)
+
+                    assert np.isfinite(cov).all()
+                    assert not np.allclose(p, 1)   # curve_fit's initial value
+
+    def test_dtypes2(self):
+        # regression test for gh-7117: curve_fit fails if
+        # both inputs are float32
+        def hyperbola(x, s_1, s_2, o_x, o_y, c):
+            b_2 = (s_1 + s_2) / 2
+            b_1 = (s_2 - s_1) / 2
+            return o_y + b_1*(x-o_x) + b_2*np.sqrt((x-o_x)**2 + c**2/4)
+
+        min_fit = np.array([-3.0, 0.0, -2.0, -10.0, 0.0])
+        max_fit = np.array([0.0, 3.0, 3.0, 0.0, 10.0])
+        guess = np.array([-2.5/3.0, 4/3.0, 1.0, -4.0, 0.5])
+
+        params = [-2, .4, -1, -5, 9.5]
+        xdata = np.array([-32, -16, -8, 4, 4, 8, 16, 32])
+        ydata = hyperbola(xdata, *params)
+
+        # run optimization twice, with xdata being float32 and float64
+        popt_64, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
+                               bounds=(min_fit, max_fit))
+
+        xdata = xdata.astype(np.float32)
+        ydata = hyperbola(xdata, *params)
+
+        popt_32, _ = curve_fit(f=hyperbola, xdata=xdata, ydata=ydata, p0=guess,
+                               bounds=(min_fit, max_fit))
+
+        assert_allclose(popt_32, popt_64, atol=2e-5)
+
+    def test_broadcast_y(self):
+        xdata = np.arange(10)
+        target = 4.7 * xdata ** 2 + 3.5 * xdata + np.random.rand(len(xdata))
+        fit_func = lambda x, a, b: a*x**2 + b*x - target
+        for method in ['lm', 'trf', 'dogbox']:
+            popt0, pcov0 = curve_fit(fit_func,
+                                     xdata=xdata,
+                                     ydata=np.zeros_like(xdata),
+                                     method=method)
+            popt1, pcov1 = curve_fit(fit_func,
+                                     xdata=xdata,
+                                     ydata=0,
+                                     method=method)
+            assert_allclose(pcov0, pcov1)
+
+    def test_args_in_kwargs(self):
+        # Ensure that `args` cannot be passed as keyword argument to `curve_fit`
+
+        def func(x, a, b):
+            return a * x + b
+
+        with assert_raises(ValueError):
+            curve_fit(func,
+                      xdata=[1, 2, 3, 4],
+                      ydata=[5, 9, 13, 17],
+                      p0=[1],
+                      args=(1,))
+
+    def test_data_point_number_validation(self):
+        def func(x, a, b, c, d, e):
+            return a * np.exp(-b * x) + c + d + e
+
+        with assert_raises(TypeError, match="The number of func parameters="):
+            curve_fit(func,
+                      xdata=[1, 2, 3, 4],
+                      ydata=[5, 9, 13, 17])
+
+    @pytest.mark.filterwarnings('ignore::RuntimeWarning')
+    def test_gh4555(self):
+        # gh-4555 reported that covariance matrices returned by `leastsq`
+        # can have negative diagonal elements and eigenvalues. (In fact,
+        # they can also be asymmetric.) This shows up in the output of
+        # `scipy.optimize.curve_fit`. Check that it has been resolved.giit
+        def f(x, a, b, c, d, e):
+            return a*np.log(x + 1 + b) + c*np.log(x + 1 + d) + e
+
+        rng = np.random.default_rng(408113519974467917)
+        n = 100
+        x = np.arange(n)
+        y = np.linspace(2, 7, n) + rng.random(n)
+        p, cov = optimize.curve_fit(f, x, y, maxfev=100000)
+        assert np.all(np.diag(cov) > 0)
+        eigs = linalg.eigh(cov)[0]  # separate line for debugging
+        # some platforms see a small negative eigevenvalue
+        assert np.all(eigs > -1e-2)
+        assert_allclose(cov, cov.T)
+
+    def test_gh4555b(self):
+        # check that PR gh-17247 did not significantly change covariance matrix
+        # for simple cases
+        rng = np.random.default_rng(408113519974467917)
+
+        def func(x, a, b, c):
+            return a * np.exp(-b * x) + c
+
+        xdata = np.linspace(0, 4, 50)
+        y = func(xdata, 2.5, 1.3, 0.5)
+        y_noise = 0.2 * rng.normal(size=xdata.size)
+        ydata = y + y_noise
+        _, res = curve_fit(func, xdata, ydata)
+        # reference from commit 1d80a2f254380d2b45733258ca42eb6b55c8755b
+        ref = [[+0.0158972536486215, 0.0069207183284242, -0.0007474400714749],
+               [+0.0069207183284242, 0.0205057958128679, +0.0053997711275403],
+               [-0.0007474400714749, 0.0053997711275403, +0.0027833930320877]]
+        # Linux_Python_38_32bit_full fails with default tolerance
+        assert_allclose(res, ref, 2e-7)
+
+
+class TestFixedPoint:
+
+    def test_scalar_trivial(self):
+        # f(x) = 2x; fixed point should be x=0
+        def func(x):
+            return 2.0*x
+        x0 = 1.0
+        x = fixed_point(func, x0)
+        assert_almost_equal(x, 0.0)
+
+    def test_scalar_basic1(self):
+        # f(x) = x**2; x0=1.05; fixed point should be x=1
+        def func(x):
+            return x**2
+        x0 = 1.05
+        x = fixed_point(func, x0)
+        assert_almost_equal(x, 1.0)
+
+    def test_scalar_basic2(self):
+        # f(x) = x**0.5; x0=1.05; fixed point should be x=1
+        def func(x):
+            return x**0.5
+        x0 = 1.05
+        x = fixed_point(func, x0)
+        assert_almost_equal(x, 1.0)
+
+    def test_array_trivial(self):
+        def func(x):
+            return 2.0*x
+        x0 = [0.3, 0.15]
+        with np.errstate(all='ignore'):
+            x = fixed_point(func, x0)
+        assert_almost_equal(x, [0.0, 0.0])
+
+    def test_array_basic1(self):
+        # f(x) = c * x**2; fixed point should be x=1/c
+        def func(x, c):
+            return c * x**2
+        c = array([0.75, 1.0, 1.25])
+        x0 = [1.1, 1.15, 0.9]
+        with np.errstate(all='ignore'):
+            x = fixed_point(func, x0, args=(c,))
+        assert_almost_equal(x, 1.0/c)
+
+    def test_array_basic2(self):
+        # f(x) = c * x**0.5; fixed point should be x=c**2
+        def func(x, c):
+            return c * x**0.5
+        c = array([0.75, 1.0, 1.25])
+        x0 = [0.8, 1.1, 1.1]
+        x = fixed_point(func, x0, args=(c,))
+        assert_almost_equal(x, c**2)
+
+    def test_lambertw(self):
+        # python-list/2010-December/594592.html
+        xxroot = fixed_point(lambda xx: np.exp(-2.0*xx)/2.0, 1.0,
+                args=(), xtol=1e-12, maxiter=500)
+        assert_allclose(xxroot, np.exp(-2.0*xxroot)/2.0)
+        assert_allclose(xxroot, lambertw(1)/2)
+
+    def test_no_acceleration(self):
+        # github issue 5460
+        ks = 2
+        kl = 6
+        m = 1.3
+        n0 = 1.001
+        i0 = ((m-1)/m)*(kl/ks/m)**(1/(m-1))
+
+        def func(n):
+            return np.log(kl/ks/n) / np.log((i0*n/(n - 1))) + 1
+
+        n = fixed_point(func, n0, method='iteration')
+        assert_allclose(n, m)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nnls.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nnls.py
new file mode 100644
index 00000000..33a9f1f3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nnls.py
@@ -0,0 +1,34 @@
+""" Unit tests for nonnegative least squares
+Author: Uwe Schmitt
+Sep 2008
+"""
+import numpy as np
+
+from numpy.testing import assert_
+from pytest import raises as assert_raises
+
+from scipy.optimize import nnls
+from numpy import arange, dot
+from numpy.linalg import norm
+
+
+class TestNNLS:
+
+    def test_nnls(self):
+        a = arange(25.0).reshape(-1,5)
+        x = arange(5.0)
+        y = dot(a,x)
+        x, res = nnls(a,y)
+        assert_(res < 1e-7)
+        assert_(norm(dot(a,x)-y) < 1e-7)
+
+    def test_maxiter(self):
+        # test that maxiter argument does stop iterations
+        # NB: did not manage to find a test case where the default value
+        # of maxiter is not sufficient, so use a too-small value
+        rndm = np.random.RandomState(1234)
+        a = rndm.uniform(size=(100, 100))
+        b = rndm.uniform(size=100)
+        with assert_raises(RuntimeError):
+            nnls(a, b, maxiter=1)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nonlin.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nonlin.py
new file mode 100644
index 00000000..9b659869
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_nonlin.py
@@ -0,0 +1,490 @@
+""" Unit tests for nonlinear solvers
+Author: Ondrej Certik
+May 2007
+"""
+from numpy.testing import assert_
+import pytest
+
+from scipy.optimize import _nonlin as nonlin, root
+from numpy import diag, dot
+from numpy.linalg import inv
+import numpy as np
+
+from .test_minpack import pressure_network
+
+SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
+           'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
+           'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
+           'krylov': nonlin.newton_krylov}
+MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
+             'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
+
+#-------------------------------------------------------------------------------
+# Test problems
+#-------------------------------------------------------------------------------
+
+
+def F(x):
+    x = np.asarray(x).T
+    d = diag([3,2,1.5,1,0.5])
+    c = 0.01
+    f = -d @ x - c * float(x.T @ x) * x
+    return f
+
+
+F.xin = [1,1,1,1,1]
+F.KNOWN_BAD = {}
+F.JAC_KSP_BAD = {}
+F.ROOT_JAC_KSP_BAD = {}
+
+
+def F2(x):
+    return x
+
+
+F2.xin = [1,2,3,4,5,6]
+F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
+                'excitingmixing': nonlin.excitingmixing}
+F2.JAC_KSP_BAD = {}
+F2.ROOT_JAC_KSP_BAD = {}
+
+
+def F2_lucky(x):
+    return x
+
+
+F2_lucky.xin = [0,0,0,0,0,0]
+F2_lucky.KNOWN_BAD = {}
+F2_lucky.JAC_KSP_BAD = {}
+F2_lucky.ROOT_JAC_KSP_BAD = {}
+
+
+def F3(x):
+    A = np.array([[-2, 1, 0.], [1, -2, 1], [0, 1, -2]])
+    b = np.array([1, 2, 3.])
+    return A @ x - b
+
+
+F3.xin = [1,2,3]
+F3.KNOWN_BAD = {}
+F3.JAC_KSP_BAD = {}
+F3.ROOT_JAC_KSP_BAD = {}
+
+
+def F4_powell(x):
+    A = 1e4
+    return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
+
+
+F4_powell.xin = [-1, -2]
+F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
+                       'excitingmixing': nonlin.excitingmixing,
+                       'diagbroyden': nonlin.diagbroyden}
+# In the extreme case, it does not converge for nolinear problem solved by
+# MINRES and root problem solved by GMRES/BiCGStab/CGS/MINRES/TFQMR when using
+# Krylov method to approximate Jacobian
+F4_powell.JAC_KSP_BAD = {'minres'}
+F4_powell.ROOT_JAC_KSP_BAD = {'gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr'}
+
+
+def F5(x):
+    return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
+
+
+F5.xin = [2., 0, 2, 0]
+F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
+                'linearmixing': nonlin.linearmixing,
+                'diagbroyden': nonlin.diagbroyden}
+# In the extreme case, the Jacobian inversion yielded zero vector for nonlinear
+# problem solved by CGS/MINRES and it does not converge for root problem solved
+# by MINRES and when using Krylov method to approximate Jacobian
+F5.JAC_KSP_BAD = {'cgs', 'minres'}
+F5.ROOT_JAC_KSP_BAD = {'minres'}
+
+
+def F6(x):
+    x1, x2 = x
+    J0 = np.array([[-4.256, 14.7],
+                [0.8394989, 0.59964207]])
+    v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
+                  np.sin(x2 * np.exp(x1) - 1)])
+    return -np.linalg.solve(J0, v)
+
+
+F6.xin = [-0.5, 1.4]
+F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
+                'linearmixing': nonlin.linearmixing,
+                'diagbroyden': nonlin.diagbroyden}
+F6.JAC_KSP_BAD = {}
+F6.ROOT_JAC_KSP_BAD = {}
+
+
+#-------------------------------------------------------------------------------
+# Tests
+#-------------------------------------------------------------------------------
+
+
+class TestNonlin:
+    """
+    Check the Broyden methods for a few test problems.
+
+    broyden1, broyden2, and newton_krylov must succeed for
+    all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
+
+    """
+
+    def _check_nonlin_func(self, f, func, f_tol=1e-2):
+        # Test all methods mentioned in the class `KrylovJacobian`
+        if func == SOLVERS['krylov']:
+            for method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']:
+                if method in f.JAC_KSP_BAD:
+                    continue
+
+                x = func(f, f.xin, method=method, line_search=None,
+                         f_tol=f_tol, maxiter=200, verbose=0)
+                assert_(np.absolute(f(x)).max() < f_tol)
+
+        x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
+        assert_(np.absolute(f(x)).max() < f_tol)
+
+    def _check_root(self, f, method, f_tol=1e-2):
+        # Test Krylov methods
+        if method == 'krylov':
+            for jac_method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']:
+                if jac_method in f.ROOT_JAC_KSP_BAD:
+                    continue
+
+                res = root(f, f.xin, method=method,
+                           options={'ftol': f_tol, 'maxiter': 200,
+                                    'disp': 0,
+                                    'jac_options': {'method': jac_method}})
+                assert_(np.absolute(res.fun).max() < f_tol)
+
+        res = root(f, f.xin, method=method,
+                   options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
+        assert_(np.absolute(res.fun).max() < f_tol)
+
+    @pytest.mark.xfail
+    def _check_func_fail(self, *a, **kw):
+        pass
+
+    def test_problem_nonlin(self):
+        for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
+            for func in SOLVERS.values():
+                if func in f.KNOWN_BAD.values():
+                    if func in MUST_WORK.values():
+                        self._check_func_fail(f, func)
+                    continue
+                self._check_nonlin_func(f, func)
+
+    @pytest.mark.parametrize("method", ['lgmres', 'gmres', 'bicgstab', 'cgs',
+                                        'minres', 'tfqmr'])
+    def test_tol_norm_called(self, method):
+        # Check that supplying tol_norm keyword to nonlin_solve works
+        self._tol_norm_used = False
+
+        def local_norm_func(x):
+            self._tol_norm_used = True
+            return np.absolute(x).max()
+
+        nonlin.newton_krylov(F, F.xin, method=method, f_tol=1e-2,
+                             maxiter=200, verbose=0,
+                             tol_norm=local_norm_func)
+        assert_(self._tol_norm_used)
+
+    def test_problem_root(self):
+        for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
+            for meth in SOLVERS:
+                if meth in f.KNOWN_BAD:
+                    if meth in MUST_WORK:
+                        self._check_func_fail(f, meth)
+                    continue
+                self._check_root(f, meth)
+
+
+class TestSecant:
+    """Check that some Jacobian approximations satisfy the secant condition"""
+
+    xs = [np.array([1,2,3,4,5], float),
+          np.array([2,3,4,5,1], float),
+          np.array([3,4,5,1,2], float),
+          np.array([4,5,1,2,3], float),
+          np.array([9,1,9,1,3], float),
+          np.array([0,1,9,1,3], float),
+          np.array([5,5,7,1,1], float),
+          np.array([1,2,7,5,1], float),]
+    fs = [x**2 - 1 for x in xs]
+
+    def _check_secant(self, jac_cls, npoints=1, **kw):
+        """
+        Check that the given Jacobian approximation satisfies secant
+        conditions for last `npoints` points.
+        """
+        jac = jac_cls(**kw)
+        jac.setup(self.xs[0], self.fs[0], None)
+        for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
+            jac.update(x, f)
+
+            for k in range(min(npoints, j+1)):
+                dx = self.xs[j-k+1] - self.xs[j-k]
+                df = self.fs[j-k+1] - self.fs[j-k]
+                assert_(np.allclose(dx, jac.solve(df)))
+
+            # Check that the `npoints` secant bound is strict
+            if j >= npoints:
+                dx = self.xs[j-npoints+1] - self.xs[j-npoints]
+                df = self.fs[j-npoints+1] - self.fs[j-npoints]
+                assert_(not np.allclose(dx, jac.solve(df)))
+
+    def test_broyden1(self):
+        self._check_secant(nonlin.BroydenFirst)
+
+    def test_broyden2(self):
+        self._check_secant(nonlin.BroydenSecond)
+
+    def test_broyden1_update(self):
+        # Check that BroydenFirst update works as for a dense matrix
+        jac = nonlin.BroydenFirst(alpha=0.1)
+        jac.setup(self.xs[0], self.fs[0], None)
+
+        B = np.identity(5) * (-1/0.1)
+
+        for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
+            df = f - self.fs[last_j]
+            dx = x - self.xs[last_j]
+            B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
+            jac.update(x, f)
+            assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
+
+    def test_broyden2_update(self):
+        # Check that BroydenSecond update works as for a dense matrix
+        jac = nonlin.BroydenSecond(alpha=0.1)
+        jac.setup(self.xs[0], self.fs[0], None)
+
+        H = np.identity(5) * (-0.1)
+
+        for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
+            df = f - self.fs[last_j]
+            dx = x - self.xs[last_j]
+            H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
+            jac.update(x, f)
+            assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
+
+    def test_anderson(self):
+        # Anderson mixing (with w0=0) satisfies secant conditions
+        # for the last M iterates, see [Ey]_
+        #
+        # .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
+        self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
+
+
+class TestLinear:
+    """Solve a linear equation;
+    some methods find the exact solution in a finite number of steps"""
+
+    def _check(self, jac, N, maxiter, complex=False, **kw):
+        np.random.seed(123)
+
+        A = np.random.randn(N, N)
+        if complex:
+            A = A + 1j*np.random.randn(N, N)
+        b = np.random.randn(N)
+        if complex:
+            b = b + 1j*np.random.randn(N)
+
+        def func(x):
+            return dot(A, x) - b
+
+        sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
+                                  f_tol=1e-6, line_search=None, verbose=0)
+        assert_(np.allclose(dot(A, sol), b, atol=1e-6))
+
+    def test_broyden1(self):
+        # Broyden methods solve linear systems exactly in 2*N steps
+        self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
+        self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
+
+    def test_broyden2(self):
+        # Broyden methods solve linear systems exactly in 2*N steps
+        self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
+        self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
+
+    def test_anderson(self):
+        # Anderson is rather similar to Broyden, if given enough storage space
+        self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
+        self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
+
+    def test_krylov(self):
+        # Krylov methods solve linear systems exactly in N inner steps
+        self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
+        self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
+
+
+class TestJacobianDotSolve:
+    """Check that solve/dot methods in Jacobian approximations are consistent"""
+
+    def _func(self, x):
+        return x**2 - 1 + np.dot(self.A, x)
+
+    def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
+        np.random.seed(123)
+
+        N = 7
+
+        def rand(*a):
+            q = np.random.rand(*a)
+            if complex:
+                q = q + 1j*np.random.rand(*a)
+            return q
+
+        def assert_close(a, b, msg):
+            d = abs(a - b).max()
+            f = tol + abs(b).max()*tol
+            if d > f:
+                raise AssertionError('%s: err %g' % (msg, d))
+
+        self.A = rand(N, N)
+
+        # initialize
+        x0 = np.random.rand(N)
+        jac = jac_cls(**kw)
+        jac.setup(x0, self._func(x0), self._func)
+
+        # check consistency
+        for k in range(2*N):
+            v = rand(N)
+
+            if hasattr(jac, '__array__'):
+                Jd = np.array(jac)
+                if hasattr(jac, 'solve'):
+                    Gv = jac.solve(v)
+                    Gv2 = np.linalg.solve(Jd, v)
+                    assert_close(Gv, Gv2, 'solve vs array')
+                if hasattr(jac, 'rsolve'):
+                    Gv = jac.rsolve(v)
+                    Gv2 = np.linalg.solve(Jd.T.conj(), v)
+                    assert_close(Gv, Gv2, 'rsolve vs array')
+                if hasattr(jac, 'matvec'):
+                    Jv = jac.matvec(v)
+                    Jv2 = np.dot(Jd, v)
+                    assert_close(Jv, Jv2, 'dot vs array')
+                if hasattr(jac, 'rmatvec'):
+                    Jv = jac.rmatvec(v)
+                    Jv2 = np.dot(Jd.T.conj(), v)
+                    assert_close(Jv, Jv2, 'rmatvec vs array')
+
+            if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
+                Jv = jac.matvec(v)
+                Jv2 = jac.solve(jac.matvec(Jv))
+                assert_close(Jv, Jv2, 'dot vs solve')
+
+            if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
+                Jv = jac.rmatvec(v)
+                Jv2 = jac.rmatvec(jac.rsolve(Jv))
+                assert_close(Jv, Jv2, 'rmatvec vs rsolve')
+
+            x = rand(N)
+            jac.update(x, self._func(x))
+
+    def test_broyden1(self):
+        self._check_dot(nonlin.BroydenFirst, complex=False)
+        self._check_dot(nonlin.BroydenFirst, complex=True)
+
+    def test_broyden2(self):
+        self._check_dot(nonlin.BroydenSecond, complex=False)
+        self._check_dot(nonlin.BroydenSecond, complex=True)
+
+    def test_anderson(self):
+        self._check_dot(nonlin.Anderson, complex=False)
+        self._check_dot(nonlin.Anderson, complex=True)
+
+    def test_diagbroyden(self):
+        self._check_dot(nonlin.DiagBroyden, complex=False)
+        self._check_dot(nonlin.DiagBroyden, complex=True)
+
+    def test_linearmixing(self):
+        self._check_dot(nonlin.LinearMixing, complex=False)
+        self._check_dot(nonlin.LinearMixing, complex=True)
+
+    def test_excitingmixing(self):
+        self._check_dot(nonlin.ExcitingMixing, complex=False)
+        self._check_dot(nonlin.ExcitingMixing, complex=True)
+
+    def test_krylov(self):
+        self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3)
+        self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3)
+
+
+class TestNonlinOldTests:
+    """ Test case for a simple constrained entropy maximization problem
+    (the machine translation example of Berger et al in
+    Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
+    """
+
+    def test_broyden1(self):
+        x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
+        assert_(nonlin.norm(x) < 1e-9)
+        assert_(nonlin.norm(F(x)) < 1e-9)
+
+    def test_broyden2(self):
+        x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
+        assert_(nonlin.norm(x) < 1e-9)
+        assert_(nonlin.norm(F(x)) < 1e-9)
+
+    def test_anderson(self):
+        x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
+        assert_(nonlin.norm(x) < 0.33)
+
+    def test_linearmixing(self):
+        x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
+        assert_(nonlin.norm(x) < 1e-7)
+        assert_(nonlin.norm(F(x)) < 1e-7)
+
+    def test_exciting(self):
+        x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
+        assert_(nonlin.norm(x) < 1e-5)
+        assert_(nonlin.norm(F(x)) < 1e-5)
+
+    def test_diagbroyden(self):
+        x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
+        assert_(nonlin.norm(x) < 1e-8)
+        assert_(nonlin.norm(F(x)) < 1e-8)
+
+    def test_root_broyden1(self):
+        res = root(F, F.xin, method='broyden1',
+                   options={'nit': 12, 'jac_options': {'alpha': 1}})
+        assert_(nonlin.norm(res.x) < 1e-9)
+        assert_(nonlin.norm(res.fun) < 1e-9)
+
+    def test_root_broyden2(self):
+        res = root(F, F.xin, method='broyden2',
+                   options={'nit': 12, 'jac_options': {'alpha': 1}})
+        assert_(nonlin.norm(res.x) < 1e-9)
+        assert_(nonlin.norm(res.fun) < 1e-9)
+
+    def test_root_anderson(self):
+        res = root(F, F.xin, method='anderson',
+                   options={'nit': 12,
+                            'jac_options': {'alpha': 0.03, 'M': 5}})
+        assert_(nonlin.norm(res.x) < 0.33)
+
+    def test_root_linearmixing(self):
+        res = root(F, F.xin, method='linearmixing',
+                   options={'nit': 60,
+                            'jac_options': {'alpha': 0.5}})
+        assert_(nonlin.norm(res.x) < 1e-7)
+        assert_(nonlin.norm(res.fun) < 1e-7)
+
+    def test_root_excitingmixing(self):
+        res = root(F, F.xin, method='excitingmixing',
+                   options={'nit': 20,
+                            'jac_options': {'alpha': 0.5}})
+        assert_(nonlin.norm(res.x) < 1e-5)
+        assert_(nonlin.norm(res.fun) < 1e-5)
+
+    def test_root_diagbroyden(self):
+        res = root(F, F.xin, method='diagbroyden',
+                   options={'nit': 11,
+                            'jac_options': {'alpha': 1}})
+        assert_(nonlin.norm(res.x) < 1e-8)
+        assert_(nonlin.norm(res.fun) < 1e-8)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_optimize.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_optimize.py
new file mode 100644
index 00000000..f9cbf6bf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_optimize.py
@@ -0,0 +1,2855 @@
+"""
+Unit tests for optimization routines from optimize.py
+
+Authors:
+   Ed Schofield, Nov 2005
+   Andrew Straw, April 2008
+
+To run it in its simplest form::
+  nosetests test_optimize.py
+
+"""
+import itertools
+import platform
+import numpy as np
+from numpy.testing import (assert_allclose, assert_equal,
+                           assert_almost_equal,
+                           assert_no_warnings, assert_warns,
+                           assert_array_less, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy import optimize
+from scipy.optimize._minimize import Bounds, NonlinearConstraint
+from scipy.optimize._minimize import MINIMIZE_METHODS, MINIMIZE_SCALAR_METHODS
+from scipy.optimize._linprog import LINPROG_METHODS
+from scipy.optimize._root import ROOT_METHODS
+from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS
+from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS
+from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS
+from scipy.optimize._optimize import MemoizeJac, show_options
+
+
+def test_check_grad():
+    # Verify if check_grad is able to estimate the derivative of the
+    # expit (logistic sigmoid) function.
+
+    def expit(x):
+        return 1 / (1 + np.exp(-x))
+
+    def der_expit(x):
+        return np.exp(-x) / (1 + np.exp(-x))**2
+
+    x0 = np.array([1.5])
+
+    r = optimize.check_grad(expit, der_expit, x0)
+    assert_almost_equal(r, 0)
+    r = optimize.check_grad(expit, der_expit, x0,
+                            direction='random', seed=1234)
+    assert_almost_equal(r, 0)
+
+    r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6)
+    assert_almost_equal(r, 0)
+    r = optimize.check_grad(expit, der_expit, x0, epsilon=1e-6,
+                            direction='random', seed=1234)
+    assert_almost_equal(r, 0)
+
+    # Check if the epsilon parameter is being considered.
+    r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1) - 0)
+    assert r > 1e-7
+    r = abs(optimize.check_grad(expit, der_expit, x0, epsilon=1e-1,
+                                direction='random', seed=1234) - 0)
+    assert r > 1e-7
+
+    def x_sinx(x):
+        return (x*np.sin(x)).sum()
+
+    def der_x_sinx(x):
+        return np.sin(x) + x*np.cos(x)
+
+    x0 = np.arange(0, 2, 0.2)
+
+    r = optimize.check_grad(x_sinx, der_x_sinx, x0,
+                            direction='random', seed=1234)
+    assert_almost_equal(r, 0)
+
+    assert_raises(ValueError, optimize.check_grad,
+                  x_sinx, der_x_sinx, x0,
+                  direction='random_projection', seed=1234)
+
+    # checking can be done for derivatives of vector valued functions
+    r = optimize.check_grad(himmelblau_grad, himmelblau_hess, himmelblau_x0,
+                            direction='all', seed=1234)
+    assert r < 5e-7
+
+
+class CheckOptimize:
+    """ Base test case for a simple constrained entropy maximization problem
+    (the machine translation example of Berger et al in
+    Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
+    """
+
+    def setup_method(self):
+        self.F = np.array([[1, 1, 1],
+                           [1, 1, 0],
+                           [1, 0, 1],
+                           [1, 0, 0],
+                           [1, 0, 0]])
+        self.K = np.array([1., 0.3, 0.5])
+        self.startparams = np.zeros(3, np.float64)
+        self.solution = np.array([0., -0.524869316, 0.487525860])
+        self.maxiter = 1000
+        self.funccalls = 0
+        self.gradcalls = 0
+        self.trace = []
+
+    def func(self, x):
+        self.funccalls += 1
+        if self.funccalls > 6000:
+            raise RuntimeError("too many iterations in optimization routine")
+        log_pdot = np.dot(self.F, x)
+        logZ = np.log(sum(np.exp(log_pdot)))
+        f = logZ - np.dot(self.K, x)
+        self.trace.append(np.copy(x))
+        return f
+
+    def grad(self, x):
+        self.gradcalls += 1
+        log_pdot = np.dot(self.F, x)
+        logZ = np.log(sum(np.exp(log_pdot)))
+        p = np.exp(log_pdot - logZ)
+        return np.dot(self.F.transpose(), p) - self.K
+
+    def hess(self, x):
+        log_pdot = np.dot(self.F, x)
+        logZ = np.log(sum(np.exp(log_pdot)))
+        p = np.exp(log_pdot - logZ)
+        return np.dot(self.F.T,
+                      np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
+
+    def hessp(self, x, p):
+        return np.dot(self.hess(x), p)
+
+
+class CheckOptimizeParameterized(CheckOptimize):
+
+    def test_cg(self):
+        # conjugate gradient optimization routine
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            res = optimize.minimize(self.func, self.startparams, args=(),
+                                    method='CG', jac=self.grad,
+                                    options=opts)
+            params, fopt, func_calls, grad_calls, warnflag = \
+                res['x'], res['fun'], res['nfev'], res['njev'], res['status']
+        else:
+            retval = optimize.fmin_cg(self.func, self.startparams,
+                                      self.grad, (), maxiter=self.maxiter,
+                                      full_output=True, disp=self.disp,
+                                      retall=False)
+            (params, fopt, func_calls, grad_calls, warnflag) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        assert self.funccalls == 9, self.funccalls
+        assert self.gradcalls == 7, self.gradcalls
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        assert_allclose(self.trace[2:4],
+                        [[0, -0.5, 0.5],
+                         [0, -5.05700028e-01, 4.95985862e-01]],
+                        atol=1e-14, rtol=1e-7)
+
+    def test_cg_cornercase(self):
+        def f(r):
+            return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
+
+        # Check several initial guesses. (Too far away from the
+        # minimum, the function ends up in the flat region of exp.)
+        for x0 in np.linspace(-0.75, 3, 71):
+            sol = optimize.minimize(f, [x0], method='CG')
+            assert sol.success
+            assert_allclose(sol.x, [0.5], rtol=1e-5)
+
+    def test_bfgs(self):
+        # Broyden-Fletcher-Goldfarb-Shanno optimization routine
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            res = optimize.minimize(self.func, self.startparams,
+                                    jac=self.grad, method='BFGS', args=(),
+                                    options=opts)
+
+            params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
+                    res['x'], res['fun'], res['jac'], res['hess_inv'],
+                    res['nfev'], res['njev'], res['status'])
+        else:
+            retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
+                                        args=(), maxiter=self.maxiter,
+                                        full_output=True, disp=self.disp,
+                                        retall=False)
+            (params, fopt, gopt, Hopt,
+             func_calls, grad_calls, warnflag) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        assert self.funccalls == 10, self.funccalls
+        assert self.gradcalls == 8, self.gradcalls
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        assert_allclose(self.trace[6:8],
+                        [[0, -5.25060743e-01, 4.87748473e-01],
+                         [0, -5.24885582e-01, 4.87530347e-01]],
+                        atol=1e-14, rtol=1e-7)
+
+    def test_bfgs_infinite(self):
+        # Test corner case where -Inf is the minimum.  See gh-2019.
+        func = lambda x: -np.e**-x
+        fprime = lambda x: -func(x)
+        x0 = [0]
+        with np.errstate(over='ignore'):
+            if self.use_wrapper:
+                opts = {'disp': self.disp}
+                x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
+                                      args=(), options=opts)['x']
+            else:
+                x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
+            assert not np.isfinite(func(x))
+
+    def test_bfgs_xrtol(self):
+        # test for #17345 to test xrtol parameter
+        x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
+        res = optimize.minimize(optimize.rosen,
+                                x0, method='bfgs', options={'xrtol': 1e-3})
+        ref = optimize.minimize(optimize.rosen,
+                                x0, method='bfgs', options={'gtol': 1e-3})
+        assert res.nit != ref.nit
+
+    def test_powell(self):
+        # Powell (direction set) optimization routine
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            res = optimize.minimize(self.func, self.startparams, args=(),
+                                    method='Powell', options=opts)
+            params, fopt, direc, numiter, func_calls, warnflag = (
+                    res['x'], res['fun'], res['direc'], res['nit'],
+                    res['nfev'], res['status'])
+        else:
+            retval = optimize.fmin_powell(self.func, self.startparams,
+                                          args=(), maxiter=self.maxiter,
+                                          full_output=True, disp=self.disp,
+                                          retall=False)
+            (params, fopt, direc, numiter, func_calls, warnflag) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+        # params[0] does not affect the objective function
+        assert_allclose(params[1:], self.solution[1:], atol=5e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        #
+        # However, some leeway must be added: the exact evaluation
+        # count is sensitive to numerical error, and floating-point
+        # computations are not bit-for-bit reproducible across
+        # machines, and when using e.g., MKL, data alignment
+        # etc., affect the rounding error.
+        #
+        assert self.funccalls <= 116 + 20, self.funccalls
+        assert self.gradcalls == 0, self.gradcalls
+
+    @pytest.mark.xfail(reason="This part of test_powell fails on some "
+                       "platforms, but the solution returned by powell is "
+                       "still valid.")
+    def test_powell_gh14014(self):
+        # This part of test_powell started failing on some CI platforms;
+        # see gh-14014. Since the solution is still correct and the comments
+        # in test_powell suggest that small differences in the bits are known
+        # to change the "trace" of the solution, seems safe to xfail to get CI
+        # green now and investigate later.
+
+        # Powell (direction set) optimization routine
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            res = optimize.minimize(self.func, self.startparams, args=(),
+                                    method='Powell', options=opts)
+            params, fopt, direc, numiter, func_calls, warnflag = (
+                    res['x'], res['fun'], res['direc'], res['nit'],
+                    res['nfev'], res['status'])
+        else:
+            retval = optimize.fmin_powell(self.func, self.startparams,
+                                          args=(), maxiter=self.maxiter,
+                                          full_output=True, disp=self.disp,
+                                          retall=False)
+            (params, fopt, direc, numiter, func_calls, warnflag) = retval
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        assert_allclose(self.trace[34:39],
+                        [[0.72949016, -0.44156936, 0.47100962],
+                         [0.72949016, -0.44156936, 0.48052496],
+                         [1.45898031, -0.88313872, 0.95153458],
+                         [0.72949016, -0.44156936, 0.47576729],
+                         [1.72949016, -0.44156936, 0.47576729]],
+                        atol=1e-14, rtol=1e-7)
+
+    def test_powell_bounded(self):
+        # Powell (direction set) optimization routine
+        # same as test_powell above, but with bounds
+        bounds = [(-np.pi, np.pi) for _ in self.startparams]
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            res = optimize.minimize(self.func, self.startparams, args=(),
+                                    bounds=bounds,
+                                    method='Powell', options=opts)
+            params, fopt, direc, numiter, func_calls, warnflag = (
+                    res['x'], res['fun'], res['direc'], res['nit'],
+                    res['nfev'], res['status'])
+
+            assert func_calls == self.funccalls
+            assert_allclose(self.func(params), self.func(self.solution),
+                            atol=1e-6, rtol=1e-5)
+
+            # The exact evaluation count is sensitive to numerical error, and
+            # floating-point computations are not bit-for-bit reproducible
+            # across machines, and when using e.g. MKL, data alignment etc.
+            # affect the rounding error.
+            # It takes 155 calls on my machine, but we can add the same +20
+            # margin as is used in `test_powell`
+            assert self.funccalls <= 155 + 20
+            assert self.gradcalls == 0
+
+    def test_neldermead(self):
+        # Nelder-Mead simplex algorithm
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            res = optimize.minimize(self.func, self.startparams, args=(),
+                                    method='Nelder-mead', options=opts)
+            params, fopt, numiter, func_calls, warnflag = (
+                    res['x'], res['fun'], res['nit'], res['nfev'],
+                    res['status'])
+        else:
+            retval = optimize.fmin(self.func, self.startparams,
+                                   args=(), maxiter=self.maxiter,
+                                   full_output=True, disp=self.disp,
+                                   retall=False)
+            (params, fopt, numiter, func_calls, warnflag) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        assert self.funccalls == 167, self.funccalls
+        assert self.gradcalls == 0, self.gradcalls
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        assert_allclose(self.trace[76:78],
+                        [[0.1928968, -0.62780447, 0.35166118],
+                         [0.19572515, -0.63648426, 0.35838135]],
+                        atol=1e-14, rtol=1e-7)
+
+    def test_neldermead_initial_simplex(self):
+        # Nelder-Mead simplex algorithm
+        simplex = np.zeros((4, 3))
+        simplex[...] = self.startparams
+        for j in range(3):
+            simplex[j+1, j] += 0.1
+
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': False,
+                    'return_all': True, 'initial_simplex': simplex}
+            res = optimize.minimize(self.func, self.startparams, args=(),
+                                    method='Nelder-mead', options=opts)
+            params, fopt, numiter, func_calls, warnflag = (res['x'],
+                                                           res['fun'],
+                                                           res['nit'],
+                                                           res['nfev'],
+                                                           res['status'])
+            assert_allclose(res['allvecs'][0], simplex[0])
+        else:
+            retval = optimize.fmin(self.func, self.startparams,
+                                   args=(), maxiter=self.maxiter,
+                                   full_output=True, disp=False, retall=False,
+                                   initial_simplex=simplex)
+
+            (params, fopt, numiter, func_calls, warnflag) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.17.0. Don't allow them to increase.
+        assert self.funccalls == 100, self.funccalls
+        assert self.gradcalls == 0, self.gradcalls
+
+        # Ensure that the function behaves the same; this is from SciPy 0.15.0
+        assert_allclose(self.trace[50:52],
+                        [[0.14687474, -0.5103282, 0.48252111],
+                         [0.14474003, -0.5282084, 0.48743951]],
+                        atol=1e-14, rtol=1e-7)
+
+    def test_neldermead_initial_simplex_bad(self):
+        # Check it fails with a bad simplices
+        bad_simplices = []
+
+        simplex = np.zeros((3, 2))
+        simplex[...] = self.startparams[:2]
+        for j in range(2):
+            simplex[j+1, j] += 0.1
+        bad_simplices.append(simplex)
+
+        simplex = np.zeros((3, 3))
+        bad_simplices.append(simplex)
+
+        for simplex in bad_simplices:
+            if self.use_wrapper:
+                opts = {'maxiter': self.maxiter, 'disp': False,
+                        'return_all': False, 'initial_simplex': simplex}
+                assert_raises(ValueError,
+                              optimize.minimize,
+                              self.func,
+                              self.startparams,
+                              args=(),
+                              method='Nelder-mead',
+                              options=opts)
+            else:
+                assert_raises(ValueError, optimize.fmin,
+                              self.func, self.startparams,
+                              args=(), maxiter=self.maxiter,
+                              full_output=True, disp=False, retall=False,
+                              initial_simplex=simplex)
+
+    def test_ncg_negative_maxiter(self):
+        # Regression test for gh-8241
+        opts = {'maxiter': -1}
+        result = optimize.minimize(self.func, self.startparams,
+                                   method='Newton-CG', jac=self.grad,
+                                   args=(), options=opts)
+        assert result.status == 1
+
+    def test_ncg(self):
+        # line-search Newton conjugate gradient optimization routine
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            retval = optimize.minimize(self.func, self.startparams,
+                                       method='Newton-CG', jac=self.grad,
+                                       args=(), options=opts)['x']
+        else:
+            retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
+                                       args=(), maxiter=self.maxiter,
+                                       full_output=False, disp=self.disp,
+                                       retall=False)
+
+        params = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        assert self.funccalls == 7, self.funccalls
+        assert self.gradcalls <= 22, self.gradcalls  # 0.13.0
+        # assert self.gradcalls <= 18, self.gradcalls  # 0.9.0
+        # assert self.gradcalls == 18, self.gradcalls  # 0.8.0
+        # assert self.gradcalls == 22, self.gradcalls  # 0.7.0
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        assert_allclose(self.trace[3:5],
+                        [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
+                         [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
+                        atol=1e-6, rtol=1e-7)
+
+    def test_ncg_hess(self):
+        # Newton conjugate gradient with Hessian
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            retval = optimize.minimize(self.func, self.startparams,
+                                       method='Newton-CG', jac=self.grad,
+                                       hess=self.hess,
+                                       args=(), options=opts)['x']
+        else:
+            retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
+                                       fhess=self.hess,
+                                       args=(), maxiter=self.maxiter,
+                                       full_output=False, disp=self.disp,
+                                       retall=False)
+
+        params = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        assert self.funccalls <= 7, self.funccalls  # gh10673
+        assert self.gradcalls <= 18, self.gradcalls  # 0.9.0
+        # assert self.gradcalls == 18, self.gradcalls  # 0.8.0
+        # assert self.gradcalls == 22, self.gradcalls  # 0.7.0
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        assert_allclose(self.trace[3:5],
+                        [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
+                         [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
+                        atol=1e-6, rtol=1e-7)
+
+    def test_ncg_hessp(self):
+        # Newton conjugate gradient with Hessian times a vector p.
+        if self.use_wrapper:
+            opts = {'maxiter': self.maxiter, 'disp': self.disp,
+                    'return_all': False}
+            retval = optimize.minimize(self.func, self.startparams,
+                                       method='Newton-CG', jac=self.grad,
+                                       hessp=self.hessp,
+                                       args=(), options=opts)['x']
+        else:
+            retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
+                                       fhess_p=self.hessp,
+                                       args=(), maxiter=self.maxiter,
+                                       full_output=False, disp=self.disp,
+                                       retall=False)
+
+        params = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        assert self.funccalls <= 7, self.funccalls  # gh10673
+        assert self.gradcalls <= 18, self.gradcalls  # 0.9.0
+        # assert self.gradcalls == 18, self.gradcalls  # 0.8.0
+        # assert self.gradcalls == 22, self.gradcalls  # 0.7.0
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        assert_allclose(self.trace[3:5],
+                        [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
+                         [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
+                        atol=1e-6, rtol=1e-7)
+
+
+def test_maxfev_test():
+    rng = np.random.default_rng(271707100830272976862395227613146332411)
+
+    def cost(x):
+        return rng.random(1) * 1000  # never converged problem
+
+    for imaxfev in [1, 10, 50]:
+        # "TNC" and "L-BFGS-B" also supports max function evaluation, but
+        # these may violate the limit because of evaluating gradients
+        # by numerical differentiation. See the discussion in PR #14805.
+        for method in ['Powell', 'Nelder-Mead']:
+            result = optimize.minimize(cost, rng.random(10),
+                                       method=method,
+                                       options={'maxfev': imaxfev})
+            assert result["nfev"] == imaxfev
+
+
+def test_wrap_scalar_function_with_validation():
+
+    def func_(x):
+        return x
+
+    fcalls, func = optimize._optimize.\
+        _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
+
+    for i in range(5):
+        func(np.asarray(i))
+        assert fcalls[0] == i+1
+
+    msg = "Too many function calls"
+    with assert_raises(optimize._optimize._MaxFuncCallError, match=msg):
+        func(np.asarray(i))  # exceeded maximum function call
+
+    fcalls, func = optimize._optimize.\
+        _wrap_scalar_function_maxfun_validation(func_, np.asarray(1), 5)
+
+    msg = "The user-provided objective function must return a scalar value."
+    with assert_raises(ValueError, match=msg):
+        func(np.array([1, 1]))
+
+
+def test_obj_func_returns_scalar():
+    match = ("The user-provided "
+             "objective function must "
+             "return a scalar value.")
+    with assert_raises(ValueError, match=match):
+        optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS')
+
+
+def test_neldermead_iteration_num():
+    x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
+    res = optimize._minimize._minimize_neldermead(optimize.rosen, x0,
+                                                  xatol=1e-8)
+    assert res.nit <= 339
+
+
+def test_neldermead_xatol_fatol():
+    # gh4484
+    # test we can call with fatol, xatol specified
+    func = lambda x: x[0]**2 + x[1]**2
+
+    optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
+                                            xatol=1e-3, fatol=1e-3)
+
+
+def test_neldermead_adaptive():
+    func = lambda x: np.sum(x**2)
+    p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159,
+          0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652,
+          0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474]
+
+    res = optimize.minimize(func, p0, method='Nelder-Mead')
+    assert_equal(res.success, False)
+
+    res = optimize.minimize(func, p0, method='Nelder-Mead',
+                            options={'adaptive': True})
+    assert_equal(res.success, True)
+
+
+def test_bounded_powell_outsidebounds():
+    # With the bounded Powell method if you start outside the bounds the final
+    # should still be within the bounds (provided that the user doesn't make a
+    # bad choice for the `direc` argument).
+    func = lambda x: np.sum(x**2)
+    bounds = (-1, 1), (-1, 1), (-1, 1)
+    x0 = [-4, .5, -.8]
+
+    # we're starting outside the bounds, so we should get a warning
+    with assert_warns(optimize.OptimizeWarning):
+        res = optimize.minimize(func, x0, bounds=bounds, method="Powell")
+    assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6)
+    assert_equal(res.success, True)
+    assert_equal(res.status, 0)
+
+    # However, now if we change the `direc` argument such that the
+    # set of vectors does not span the parameter space, then we may
+    # not end up back within the bounds. Here we see that the first
+    # parameter cannot be updated!
+    direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
+    # we're starting outside the bounds, so we should get a warning
+    with assert_warns(optimize.OptimizeWarning):
+        res = optimize.minimize(func, x0,
+                                bounds=bounds, method="Powell",
+                                options={'direc': direc})
+    assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6)
+    assert_equal(res.success, False)
+    assert_equal(res.status, 4)
+
+
+def test_bounded_powell_vs_powell():
+    # here we test an example where the bounded Powell method
+    # will return a different result than the standard Powell
+    # method.
+
+    # first we test a simple example where the minimum is at
+    # the origin and the minimum that is within the bounds is
+    # larger than the minimum at the origin.
+    func = lambda x: np.sum(x**2)
+    bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2)
+    x0 = [-2.1, -5.2, 1.9, 0, -2]
+
+    options = {'ftol': 1e-10, 'xtol': 1e-10}
+
+    res_powell = optimize.minimize(func, x0, method="Powell", options=options)
+    assert_allclose(res_powell.x, 0., atol=1e-6)
+    assert_allclose(res_powell.fun, 0., atol=1e-6)
+
+    res_bounded_powell = optimize.minimize(func, x0, options=options,
+                                           bounds=bounds,
+                                           method="Powell")
+    p = np.array([-1, -0.1, 1, 0, -2])
+    assert_allclose(res_bounded_powell.x, p, atol=1e-6)
+    assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
+
+    # now we test bounded Powell but with a mix of inf bounds.
+    bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2)
+    res_bounded_powell = optimize.minimize(func, x0, options=options,
+                                           bounds=bounds,
+                                           method="Powell")
+    p = np.array([-1, -0.1, 1, 0, -2])
+    assert_allclose(res_bounded_powell.x, p, atol=1e-6)
+    assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
+
+    # next we test an example where the global minimum is within
+    # the bounds, but the bounded Powell method performs better
+    # than the standard Powell method.
+    def func(x):
+        t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1])
+        t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2]))
+        return t**2
+
+    bounds = [(-2, 5)] * 3
+    x0 = [-0.5, -0.5, -0.5]
+
+    res_powell = optimize.minimize(func, x0, method="Powell")
+    res_bounded_powell = optimize.minimize(func, x0,
+                                           bounds=bounds,
+                                           method="Powell")
+    assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6)
+    assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
+
+    # next we test the previous example where the we provide Powell
+    # with (-inf, inf) bounds, and compare it to providing Powell
+    # with no bounds. They should end up the same.
+    bounds = [(-np.inf, np.inf)] * 3
+
+    res_bounded_powell = optimize.minimize(func, x0,
+                                           bounds=bounds,
+                                           method="Powell")
+    assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6)
+    assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6)
+    assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6)
+
+    # now test when x0 starts outside of the bounds.
+    x0 = [45.46254415, -26.52351498, 31.74830248]
+    bounds = [(-2, 5)] * 3
+    # we're starting outside the bounds, so we should get a warning
+    with assert_warns(optimize.OptimizeWarning):
+        res_bounded_powell = optimize.minimize(func, x0,
+                                               bounds=bounds,
+                                               method="Powell")
+    assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
+
+
+def test_onesided_bounded_powell_stability():
+    # When the Powell method is bounded on only one side, a
+    # np.tan transform is done in order to convert it into a
+    # completely bounded problem. Here we do some simple tests
+    # of one-sided bounded Powell where the optimal solutions
+    # are large to test the stability of the transformation.
+    kwargs = {'method': 'Powell',
+              'bounds': [(-np.inf, 1e6)] * 3,
+              'options': {'ftol': 1e-8, 'xtol': 1e-8}}
+    x0 = [1, 1, 1]
+
+    # df/dx is constant.
+    f = lambda x: -np.sum(x)
+    res = optimize.minimize(f, x0, **kwargs)
+    assert_allclose(res.fun, -3e6, atol=1e-4)
+
+    # df/dx gets smaller and smaller.
+    def f(x):
+        return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1)
+
+    res = optimize.minimize(f, x0, **kwargs)
+    assert_allclose(res.fun, -(3e6) ** (0.1))
+
+    # df/dx gets larger and larger.
+    def f(x):
+        return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1)
+
+    res = optimize.minimize(f, x0, **kwargs)
+    assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7)
+
+    # df/dx gets larger for some of the variables and smaller for others.
+    def f(x):
+        t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1)
+        t *= (1 if np.all(x > 0) else -1)
+        return t
+
+    kwargs['bounds'] = [(-np.inf, 1e3)] * 3
+    res = optimize.minimize(f, x0, **kwargs)
+    assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7)
+
+
+class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
+    use_wrapper = True
+    disp = True
+
+
+class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
+    use_wrapper = True
+    disp = False
+
+
+class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
+    use_wrapper = False
+    disp = True
+
+
+class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
+    use_wrapper = False
+    disp = False
+
+
+class TestOptimizeSimple(CheckOptimize):
+
+    def test_bfgs_nan(self):
+        # Test corner case where nan is fed to optimizer.  See gh-2067.
+        func = lambda x: x
+        fprime = lambda x: np.ones_like(x)
+        x0 = [np.nan]
+        with np.errstate(over='ignore', invalid='ignore'):
+            x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
+            assert np.isnan(func(x))
+
+    def test_bfgs_nan_return(self):
+        # Test corner cases where fun returns NaN. See gh-4793.
+
+        # First case: NaN from first call.
+        func = lambda x: np.nan
+        with np.errstate(invalid='ignore'):
+            result = optimize.minimize(func, 0)
+
+        assert np.isnan(result['fun'])
+        assert result['success'] is False
+
+        # Second case: NaN from second call.
+        func = lambda x: 0 if x == 0 else np.nan
+        fprime = lambda x: np.ones_like(x)  # Steer away from zero.
+        with np.errstate(invalid='ignore'):
+            result = optimize.minimize(func, 0, jac=fprime)
+
+        assert np.isnan(result['fun'])
+        assert result['success'] is False
+
+    def test_bfgs_numerical_jacobian(self):
+        # BFGS with numerical Jacobian and a vector epsilon parameter.
+        # define the epsilon parameter using a random vector
+        epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution))
+
+        params = optimize.fmin_bfgs(self.func, self.startparams,
+                                    epsilon=epsilon, args=(),
+                                    maxiter=self.maxiter, disp=False)
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+    def test_finite_differences_jac(self):
+        methods = ['BFGS', 'CG', 'TNC']
+        jacs = ['2-point', '3-point', None]
+        for method, jac in itertools.product(methods, jacs):
+            result = optimize.minimize(self.func, self.startparams,
+                                       method=method, jac=jac)
+            assert_allclose(self.func(result.x), self.func(self.solution),
+                            atol=1e-6)
+
+    def test_finite_differences_hess(self):
+        # test that all the methods that require hess can use finite-difference
+        # For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is
+        # wrapped in a hessp function
+        # dogleg, trust-exact actually require true hessians at the moment, so
+        # they're excluded.
+        methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov']
+        hesses = FD_METHODS + (optimize.BFGS,)
+        for method, hess in itertools.product(methods, hesses):
+            if hess is optimize.BFGS:
+                hess = hess()
+            result = optimize.minimize(self.func, self.startparams,
+                                       method=method, jac=self.grad,
+                                       hess=hess)
+            assert result.success
+
+        # check that the methods demand some sort of Hessian specification
+        # Newton-CG creates its own hessp, and trust-constr doesn't need a hess
+        # specified either
+        methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact']
+        for method in methods:
+            with pytest.raises(ValueError):
+                optimize.minimize(self.func, self.startparams,
+                                  method=method, jac=self.grad,
+                                  hess=None)
+
+    def test_bfgs_gh_2169(self):
+        def f(x):
+            if x < 0:
+                return 1.79769313e+308
+            else:
+                return x + 1./x
+        xs = optimize.fmin_bfgs(f, [10.], disp=False)
+        assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
+
+    def test_bfgs_double_evaluations(self):
+        # check BFGS does not evaluate twice in a row at same point
+        def f(x):
+            xp = x[0]
+            assert xp not in seen
+            seen.add(xp)
+            return 10*x**2, 20*x
+
+        seen = set()
+        optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7)
+
+    def test_l_bfgs_b(self):
+        # limited-memory bound-constrained BFGS algorithm
+        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
+                                        self.grad, args=(),
+                                        maxiter=self.maxiter)
+
+        (params, fopt, d) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+        # Ensure that function call counts are 'known good'; these are from
+        # SciPy 0.7.0. Don't allow them to increase.
+        assert self.funccalls == 7, self.funccalls
+        assert self.gradcalls == 5, self.gradcalls
+
+        # Ensure that the function behaves the same; this is from SciPy 0.7.0
+        # test fixed in gh10673
+        assert_allclose(self.trace[3:5],
+                        [[8.117083e-16, -5.196198e-01, 4.897617e-01],
+                         [0., -0.52489628, 0.48753042]],
+                        atol=1e-14, rtol=1e-7)
+
+    def test_l_bfgs_b_numjac(self):
+        # L-BFGS-B with numerical Jacobian
+        retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
+                                        approx_grad=True,
+                                        maxiter=self.maxiter)
+
+        (params, fopt, d) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+    def test_l_bfgs_b_funjac(self):
+        # L-BFGS-B with combined objective function and Jacobian
+        def fun(x):
+            return self.func(x), self.grad(x)
+
+        retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
+                                        maxiter=self.maxiter)
+
+        (params, fopt, d) = retval
+
+        assert_allclose(self.func(params), self.func(self.solution),
+                        atol=1e-6)
+
+    def test_l_bfgs_b_maxiter(self):
+        # gh7854
+        # Ensure that not more than maxiters are ever run.
+        class Callback:
+            def __init__(self):
+                self.nit = 0
+                self.fun = None
+                self.x = None
+
+            def __call__(self, x):
+                self.x = x
+                self.fun = optimize.rosen(x)
+                self.nit += 1
+
+        c = Callback()
+        res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
+                                callback=c, options={'maxiter': 5})
+
+        assert_equal(res.nit, 5)
+        assert_almost_equal(res.x, c.x)
+        assert_almost_equal(res.fun, c.fun)
+        assert_equal(res.status, 1)
+        assert res.success is False
+        assert_equal(res.message,
+                     'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')
+
+    def test_minimize_l_bfgs_b(self):
+        # Minimize with L-BFGS-B method
+        opts = {'disp': False, 'maxiter': self.maxiter}
+        r = optimize.minimize(self.func, self.startparams,
+                              method='L-BFGS-B', jac=self.grad,
+                              options=opts)
+        assert_allclose(self.func(r.x), self.func(self.solution),
+                        atol=1e-6)
+        assert self.gradcalls == r.njev
+
+        self.funccalls = self.gradcalls = 0
+        # approximate jacobian
+        ra = optimize.minimize(self.func, self.startparams,
+                               method='L-BFGS-B', options=opts)
+        # check that function evaluations in approximate jacobian are counted
+        # assert_(ra.nfev > r.nfev)
+        assert self.funccalls == ra.nfev
+        assert_allclose(self.func(ra.x), self.func(self.solution),
+                        atol=1e-6)
+
+        self.funccalls = self.gradcalls = 0
+        # approximate jacobian
+        ra = optimize.minimize(self.func, self.startparams, jac='3-point',
+                               method='L-BFGS-B', options=opts)
+        assert self.funccalls == ra.nfev
+        assert_allclose(self.func(ra.x), self.func(self.solution),
+                        atol=1e-6)
+
+    def test_minimize_l_bfgs_b_ftol(self):
+        # Check that the `ftol` parameter in l_bfgs_b works as expected
+        v0 = None
+        for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
+            opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
+            sol = optimize.minimize(self.func, self.startparams,
+                                    method='L-BFGS-B', jac=self.grad,
+                                    options=opts)
+            v = self.func(sol.x)
+
+            if v0 is None:
+                v0 = v
+            else:
+                assert v < v0
+
+            assert_allclose(v, self.func(self.solution), rtol=tol)
+
+    def test_minimize_l_bfgs_maxls(self):
+        # check that the maxls is passed down to the Fortran routine
+        sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]),
+                                method='L-BFGS-B', jac=optimize.rosen_der,
+                                options={'disp': False, 'maxls': 1})
+        assert not sol.success
+
+    def test_minimize_l_bfgs_b_maxfun_interruption(self):
+        # gh-6162
+        f = optimize.rosen
+        g = optimize.rosen_der
+        values = []
+        x0 = np.full(7, 1000)
+
+        def objfun(x):
+            value = f(x)
+            values.append(value)
+            return value
+
+        # Look for an interesting test case.
+        # Request a maxfun that stops at a particularly bad function
+        # evaluation somewhere between 100 and 300 evaluations.
+        low, medium, high = 30, 100, 300
+        optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
+        v, k = max((y, i) for i, y in enumerate(values[medium:]))
+        maxfun = medium + k
+        # If the minimization strategy is reasonable,
+        # the minimize() result should not be worse than the best
+        # of the first 30 function evaluations.
+        target = min(values[:low])
+        xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
+        assert_array_less(fmin, target)
+
+    def test_custom(self):
+        # This function comes from the documentation example.
+        def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
+                    maxiter=100, callback=None, **options):
+            bestx = x0
+            besty = fun(x0)
+            funcalls = 1
+            niter = 0
+            improved = True
+            stop = False
+
+            while improved and not stop and niter < maxiter:
+                improved = False
+                niter += 1
+                for dim in range(np.size(x0)):
+                    for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
+                        testx = np.copy(bestx)
+                        testx[dim] = s
+                        testy = fun(testx, *args)
+                        funcalls += 1
+                        if testy < besty:
+                            besty = testy
+                            bestx = testx
+                            improved = True
+                    if callback is not None:
+                        callback(bestx)
+                    if maxfev is not None and funcalls >= maxfev:
+                        stop = True
+                        break
+
+            return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
+                                           nfev=funcalls, success=(niter > 1))
+
+        x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
+        res = optimize.minimize(optimize.rosen, x0, method=custmin,
+                                options=dict(stepsize=0.05))
+        assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
+
+    @pytest.mark.xfail(reason="output not reliable on all platforms")
+    def test_gh13321(self, capfd):
+        # gh-13321 reported issues with console output in fmin_l_bfgs_b;
+        # check that iprint=0 works.
+        kwargs = {'func': optimize.rosen, 'x0': [4, 3],
+                  'fprime': optimize.rosen_der, 'bounds': ((3, 5), (3, 5))}
+
+        # "L-BFGS-B" is always in output; should show when iprint >= 0
+        # "At iterate" is iterate info; should show when iprint >= 1
+
+        optimize.fmin_l_bfgs_b(**kwargs, iprint=-1)
+        out, _ = capfd.readouterr()
+        assert "L-BFGS-B" not in out and "At iterate" not in out
+
+        optimize.fmin_l_bfgs_b(**kwargs, iprint=0)
+        out, _ = capfd.readouterr()
+        assert "L-BFGS-B" in out and "At iterate" not in out
+
+        optimize.fmin_l_bfgs_b(**kwargs, iprint=1)
+        out, _ = capfd.readouterr()
+        assert "L-BFGS-B" in out and "At iterate" in out
+
+        # `disp is not None` overrides `iprint` behavior
+        # `disp=0` should suppress all output
+        # `disp=1` should be the same as `iprint = 1`
+
+        optimize.fmin_l_bfgs_b(**kwargs, iprint=1, disp=False)
+        out, _ = capfd.readouterr()
+        assert "L-BFGS-B" not in out and "At iterate" not in out
+
+        optimize.fmin_l_bfgs_b(**kwargs, iprint=-1, disp=True)
+        out, _ = capfd.readouterr()
+        assert "L-BFGS-B" in out and "At iterate" in out
+
+    def test_gh10771(self):
+        # check that minimize passes bounds and constraints to a custom
+        # minimizer without altering them.
+        bounds = [(-2, 2), (0, 3)]
+        constraints = 'constraints'
+
+        def custmin(fun, x0, **options):
+            assert options['bounds'] is bounds
+            assert options['constraints'] is constraints
+            return optimize.OptimizeResult()
+
+        x0 = [1, 1]
+        optimize.minimize(optimize.rosen, x0, method=custmin,
+                          bounds=bounds, constraints=constraints)
+
+    def test_minimize_tol_parameter(self):
+        # Check that the minimize() tol= argument does something
+        def func(z):
+            x, y = z
+            return x**2*y**2 + x**4 + 1
+
+        def dfunc(z):
+            x, y = z
+            return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
+
+        for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
+                       'newton-cg', 'l-bfgs-b', 'tnc',
+                       'cobyla', 'slsqp']:
+            if method in ('nelder-mead', 'powell', 'cobyla'):
+                jac = None
+            else:
+                jac = dfunc
+
+            sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,
+                                     method=method)
+            sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,
+                                     method=method)
+            assert func(sol1.x) < func(sol2.x), "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))
+
+    @pytest.mark.parametrize('method',
+                             ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
+                              'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
+                              'fmin_slsqp'] + MINIMIZE_METHODS)
+    def test_minimize_callback_copies_array(self, method):
+        # Check that arrays passed to callbacks are not modified
+        # inplace by the optimizer afterward
+
+        if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
+            func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))
+        else:
+            func = optimize.rosen
+            jac = optimize.rosen_der
+            hess = optimize.rosen_hess
+
+        x0 = np.zeros(10)
+
+        # Set options
+        kwargs = {}
+        if method.startswith('fmin'):
+            routine = getattr(optimize, method)
+            if method == 'fmin_slsqp':
+                kwargs['iter'] = 5
+            elif method == 'fmin_tnc':
+                kwargs['maxfun'] = 100
+            elif method in ('fmin', 'fmin_powell'):
+                kwargs['maxiter'] = 3500
+            else:
+                kwargs['maxiter'] = 5
+        else:
+            def routine(*a, **kw):
+                kw['method'] = method
+                return optimize.minimize(*a, **kw)
+
+            if method == 'tnc':
+                kwargs['options'] = dict(maxfun=100)
+            else:
+                kwargs['options'] = dict(maxiter=5)
+
+        if method in ('fmin_ncg',):
+            kwargs['fprime'] = jac
+        elif method in ('newton-cg',):
+            kwargs['jac'] = jac
+        elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
+                        'trust-constr'):
+            kwargs['jac'] = jac
+            kwargs['hess'] = hess
+
+        # Run with callback
+        results = []
+
+        def callback(x, *args, **kwargs):
+            results.append((x, np.copy(x)))
+
+        routine(func, x0, callback=callback, **kwargs)
+
+        # Check returned arrays coincide with their copies
+        # and have no memory overlap
+        assert len(results) > 2
+        assert all(np.all(x == y) for x, y in results)
+        assert not any(np.may_share_memory(x[0], y[0]) for x, y in itertools.combinations(results, 2))
+
+    @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg',
+                                        'bfgs', 'newton-cg', 'l-bfgs-b',
+                                        'tnc', 'cobyla', 'slsqp'])
+    def test_no_increase(self, method):
+        # Check that the solver doesn't return a value worse than the
+        # initial point.
+
+        def func(x):
+            return (x - 1)**2
+
+        def bad_grad(x):
+            # purposefully invalid gradient function, simulates a case
+            # where line searches start failing
+            return 2*(x - 1) * (-1) - 2
+
+        x0 = np.array([2.0])
+        f0 = func(x0)
+        jac = bad_grad
+        options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
+        if method in ['nelder-mead', 'powell', 'cobyla']:
+            jac = None
+        sol = optimize.minimize(func, x0, jac=jac, method=method,
+                                options=options)
+        assert_equal(func(sol.x), sol.fun)
+
+        if method == 'slsqp':
+            pytest.xfail("SLSQP returns slightly worse")
+        assert func(sol.x) <= f0
+
+    def test_slsqp_respect_bounds(self):
+        # Regression test for gh-3108
+        def f(x):
+            return sum((x - np.array([1., 2., 3., 4.]))**2)
+
+        def cons(x):
+            a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
+            return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
+
+        x0 = np.array([0.5, 1., 1.5, 2.])
+        res = optimize.minimize(f, x0, method='slsqp',
+                                constraints={'type': 'ineq', 'fun': cons})
+        assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
+
+    @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS',
+                                        'Newton-CG', 'L-BFGS-B', 'SLSQP',
+                                        'trust-constr', 'dogleg', 'trust-ncg',
+                                        'trust-exact', 'trust-krylov'])
+    def test_respect_maxiter(self, method):
+        # Check that the number of iterations equals max_iter, assuming
+        # convergence doesn't establish before
+        MAXITER = 4
+
+        x0 = np.zeros(10)
+
+        sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
+                            optimize.rosen_hess, None, None)
+
+        # Set options
+        kwargs = {'method': method, 'options': dict(maxiter=MAXITER)}
+
+        if method in ('Newton-CG',):
+            kwargs['jac'] = sf.grad
+        elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
+                        'trust-constr'):
+            kwargs['jac'] = sf.grad
+            kwargs['hess'] = sf.hess
+
+        sol = optimize.minimize(sf.fun, x0, **kwargs)
+        assert sol.nit == MAXITER
+        assert sol.nfev >= sf.nfev
+        if hasattr(sol, 'njev'):
+            assert sol.njev >= sf.ngev
+
+        # method specific tests
+        if method == 'SLSQP':
+            assert sol.status == 9  # Iteration limit reached
+
+    @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell',
+                                        'fmin', 'fmin_powell'])
+    def test_runtime_warning(self, method):
+        x0 = np.zeros(10)
+        sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
+                            optimize.rosen_hess, None, None)
+        options = {"maxiter": 1, "disp": True}
+        with pytest.warns(RuntimeWarning,
+                          match=r'Maximum number of iterations'):
+            if method.startswith('fmin'):
+                routine = getattr(optimize, method)
+                routine(sf.fun, x0, **options)
+            else:
+                optimize.minimize(sf.fun, x0, method=method, options=options)
+
+    def test_respect_maxiter_trust_constr_ineq_constraints(self):
+        # special case of minimization with trust-constr and inequality
+        # constraints to check maxiter limit is obeyed when using internal
+        # method 'tr_interior_point'
+        MAXITER = 4
+        f = optimize.rosen
+        jac = optimize.rosen_der
+        hess = optimize.rosen_hess
+
+        fun = lambda x: np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
+        cons = ({'type': 'ineq',
+                 'fun': fun},)
+
+        x0 = np.zeros(10)
+        sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess,
+                                method='trust-constr',
+                                options=dict(maxiter=MAXITER))
+        assert sol.nit == MAXITER
+
+    def test_minimize_automethod(self):
+        def f(x):
+            return x**2
+
+        def cons(x):
+            return x - 2
+
+        x0 = np.array([10.])
+        sol_0 = optimize.minimize(f, x0)
+        sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq',
+                                                       'fun': cons}])
+        sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
+        sol_3 = optimize.minimize(f, x0,
+                                  constraints=[{'type': 'ineq', 'fun': cons}],
+                                  bounds=[(5, 10)])
+        sol_4 = optimize.minimize(f, x0,
+                                  constraints=[{'type': 'ineq', 'fun': cons}],
+                                  bounds=[(1, 10)])
+        for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
+            assert sol.success
+        assert_allclose(sol_0.x, 0, atol=1e-7)
+        assert_allclose(sol_1.x, 2, atol=1e-7)
+        assert_allclose(sol_2.x, 5, atol=1e-7)
+        assert_allclose(sol_3.x, 5, atol=1e-7)
+        assert_allclose(sol_4.x, 2, atol=1e-7)
+
+    def test_minimize_coerce_args_param(self):
+        # Regression test for gh-3503
+        def Y(x, c):
+            return np.sum((x-c)**2)
+
+        def dY_dx(x, c=None):
+            return 2*(x-c)
+
+        c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
+        xinit = np.random.randn(len(c))
+        optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
+
+    def test_initial_step_scaling(self):
+        # Check that optimizer initial step is not huge even if the
+        # function and gradients are
+
+        scales = [1e-50, 1, 1e50]
+        methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
+
+        def f(x):
+            if first_step_size[0] is None and x[0] != x0[0]:
+                first_step_size[0] = abs(x[0] - x0[0])
+            if abs(x).max() > 1e4:
+                raise AssertionError("Optimization stepped far away!")
+            return scale*(x[0] - 1)**2
+
+        def g(x):
+            return np.array([scale*(x[0] - 1)])
+
+        for scale, method in itertools.product(scales, methods):
+            if method in ('CG', 'BFGS'):
+                options = dict(gtol=scale*1e-8)
+            else:
+                options = dict()
+
+            if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
+                # XXX: return initial point if they see small gradient
+                continue
+
+            x0 = [-1.0]
+            first_step_size = [None]
+            res = optimize.minimize(f, x0, jac=g, method=method,
+                                    options=options)
+
+            err_msg = "{0} {1}: {2}: {3}".format(method, scale,
+                                                 first_step_size,
+                                                 res)
+
+            assert res.success, err_msg
+            assert_allclose(res.x, [1.0], err_msg=err_msg)
+            assert res.nit <= 3, err_msg
+
+            if scale > 1e-10:
+                if method in ('CG', 'BFGS'):
+                    assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
+                else:
+                    # Newton-CG and L-BFGS-B use different logic for the first
+                    # step, but are both scaling invariant with step sizes ~ 1
+                    assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg
+            else:
+                # step size has upper bound of ||grad||, so line
+                # search makes many small steps
+                pass
+
+    @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs',
+                                        'newton-cg', 'l-bfgs-b', 'tnc',
+                                        'cobyla', 'slsqp', 'trust-constr',
+                                        'dogleg', 'trust-ncg', 'trust-exact',
+                                        'trust-krylov'])
+    def test_nan_values(self, method):
+        # Check nan values result to failed exit status
+        np.random.seed(1234)
+
+        count = [0]
+
+        def func(x):
+            return np.nan
+
+        def func2(x):
+            count[0] += 1
+            if count[0] > 2:
+                return np.nan
+            else:
+                return np.random.rand()
+
+        def grad(x):
+            return np.array([1.0])
+
+        def hess(x):
+            return np.array([[1.0]])
+
+        x0 = np.array([1.0])
+
+        needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact',
+                                'trust-ncg', 'dogleg')
+        needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg',
+                                'dogleg')
+
+        funcs = [func, func2]
+        grads = [grad] if needs_grad else [grad, None]
+        hesss = [hess] if needs_hess else [hess, None]
+        options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
+
+        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
+            sup.filter(UserWarning, "delta_grad == 0.*")
+            sup.filter(RuntimeWarning, ".*does not use Hessian.*")
+            sup.filter(RuntimeWarning, ".*does not use gradient.*")
+
+            for f, g, h in itertools.product(funcs, grads, hesss):
+                count = [0]
+                sol = optimize.minimize(f, x0, jac=g, hess=h, method=method,
+                                        options=options)
+                assert_equal(sol.success, False)
+
+    @pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs',
+                                        'l-bfgs-b', 'tnc',
+                                        'cobyla', 'slsqp', 'trust-constr',
+                                        'dogleg', 'trust-ncg', 'trust-exact',
+                                        'trust-krylov'])
+    def test_duplicate_evaluations(self, method):
+        # check that there are no duplicate evaluations for any methods
+        jac = hess = None
+        if method in ('newton-cg', 'trust-krylov', 'trust-exact',
+                      'trust-ncg', 'dogleg'):
+            jac = self.grad
+        if method in ('trust-krylov', 'trust-exact', 'trust-ncg',
+                      'dogleg'):
+            hess = self.hess
+
+        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
+            # for trust-constr
+            sup.filter(UserWarning, "delta_grad == 0.*")
+            optimize.minimize(self.func, self.startparams,
+                              method=method, jac=jac, hess=hess)
+
+        for i in range(1, len(self.trace)):
+            if np.array_equal(self.trace[i - 1], self.trace[i]):
+                raise RuntimeError(
+                    "Duplicate evaluations made by {}".format(method))
+
+
+@pytest.mark.parametrize(
+    'method',
+    ['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead']
+)
+def test_minimize_with_scalar(method):
+    # checks that minimize works with a scalar being provided to it.
+    def f(x):
+        return np.sum(x ** 2)
+
+    res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method)
+    assert res.success
+    assert_allclose(res.x, [0.0], atol=1e-5)
+
+
+class TestLBFGSBBounds:
+    def setup_method(self):
+        self.bounds = ((1, None), (None, None))
+        self.solution = (1, 0)
+
+    def fun(self, x, p=2.0):
+        return 1.0 / p * (x[0]**p + x[1]**p)
+
+    def jac(self, x, p=2.0):
+        return x**(p - 1)
+
+    def fj(self, x, p=2.0):
+        return self.fun(x, p), self.jac(x, p)
+
+    def test_l_bfgs_b_bounds(self):
+        x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
+                                         fprime=self.jac,
+                                         bounds=self.bounds)
+        assert d['warnflag'] == 0, d['task']
+        assert_allclose(x, self.solution, atol=1e-6)
+
+    def test_l_bfgs_b_funjac(self):
+        # L-BFGS-B with fun and jac combined and extra arguments
+        x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
+                                         bounds=self.bounds)
+        assert d['warnflag'] == 0, d['task']
+        assert_allclose(x, self.solution, atol=1e-6)
+
+    def test_minimize_l_bfgs_b_bounds(self):
+        # Minimize with method='L-BFGS-B' with bounds
+        res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
+                                jac=self.jac, bounds=self.bounds)
+        assert res['success'], res['message']
+        assert_allclose(res.x, self.solution, atol=1e-6)
+
+    @pytest.mark.parametrize('bounds', [
+        ([(10, 1), (1, 10)]),
+        ([(1, 10), (10, 1)]),
+        ([(10, 1), (10, 1)])
+    ])
+    def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds):
+        with pytest.raises(ValueError, match='.*bounds.*'):
+            optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
+                              jac=self.jac, bounds=bounds)
+
+    def test_minimize_l_bfgs_b_bounds_FD(self):
+        # test that initial starting value outside bounds doesn't raise
+        # an error (done with clipping).
+        # test all different finite differences combos, with and without args
+
+        jacs = ['2-point', '3-point', None]
+        argss = [(2.,), ()]
+        for jac, args in itertools.product(jacs, argss):
+            res = optimize.minimize(self.fun, [0, -1], args=args,
+                                    method='L-BFGS-B',
+                                    jac=jac, bounds=self.bounds,
+                                    options={'finite_diff_rel_step': None})
+            assert res['success'], res['message']
+            assert_allclose(res.x, self.solution, atol=1e-6)
+
+
+class TestOptimizeScalar:
+    def setup_method(self):
+        self.solution = 1.5
+
+    def fun(self, x, a=1.5):
+        """Objective function"""
+        return (x - a)**2 - 0.8
+
+    def test_brent(self):
+        x = optimize.brent(self.fun)
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.brent(self.fun, brack=(-3, -2))
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.brent(self.fun, full_output=True)
+        assert_allclose(x[0], self.solution, atol=1e-6)
+
+        x = optimize.brent(self.fun, brack=(-15, -1, 15))
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)"
+        with pytest.raises(ValueError, match=message):
+            optimize.brent(self.fun, brack=(-1, 0, 1))
+
+        message = r"\(xa < xb\) and \(xb < xc\)"
+        with pytest.raises(ValueError, match=message):
+            optimize.brent(self.fun, brack=(0, -1, 1))
+
+    def test_golden(self):
+        x = optimize.golden(self.fun)
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.golden(self.fun, brack=(-3, -2))
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.golden(self.fun, full_output=True)
+        assert_allclose(x[0], self.solution, atol=1e-6)
+
+        x = optimize.golden(self.fun, brack=(-15, -1, 15))
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.golden(self.fun, tol=0)
+        assert_allclose(x, self.solution)
+
+        maxiter_test_cases = [0, 1, 5]
+        for maxiter in maxiter_test_cases:
+            x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
+            x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
+            nfev0, nfev = x0[2], x[2]
+            assert_equal(nfev - nfev0, maxiter)
+
+        message = r"\(f\(xb\) < f\(xa\)\) and \(f\(xb\) < f\(xc\)\)"
+        with pytest.raises(ValueError, match=message):
+            optimize.golden(self.fun, brack=(-1, 0, 1))
+
+        message = r"\(xa < xb\) and \(xb < xc\)"
+        with pytest.raises(ValueError, match=message):
+            optimize.golden(self.fun, brack=(0, -1, 1))
+
+    def test_fminbound(self):
+        x = optimize.fminbound(self.fun, 0, 1)
+        assert_allclose(x, 1, atol=1e-4)
+
+        x = optimize.fminbound(self.fun, 1, 5)
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
+        assert_allclose(x, self.solution, atol=1e-6)
+        assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
+
+    def test_fminbound_scalar(self):
+        with pytest.raises(ValueError, match='.*must be finite scalars.*'):
+            optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
+
+        x = optimize.fminbound(self.fun, 1, np.array(5))
+        assert_allclose(x, self.solution, atol=1e-6)
+
+    def test_gh11207(self):
+        def fun(x):
+            return x**2
+        optimize.fminbound(fun, 0, 0)
+
+    def test_minimize_scalar(self):
+        # combine all tests above for the minimize_scalar wrapper
+        x = optimize.minimize_scalar(self.fun).x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, method='Brent')
+        assert x.success
+
+        x = optimize.minimize_scalar(self.fun, method='Brent',
+                                     options=dict(maxiter=3))
+        assert not x.success
+
+        x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
+                                     args=(1.5, ), method='Brent').x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, method='Brent',
+                                     args=(1.5,)).x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
+                                     args=(1.5, ), method='Brent').x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
+                                     args=(1.5, ), method='golden').x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, method='golden',
+                                     args=(1.5,)).x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
+                                     args=(1.5, ), method='golden').x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
+                                     method='Bounded').x
+        assert_allclose(x, 1, atol=1e-4)
+
+        x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
+                                     method='bounded').x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
+                                                       np.array([5])),
+                                     args=(np.array([1.5]), ),
+                                     method='bounded').x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+        assert_raises(ValueError, optimize.minimize_scalar, self.fun,
+                      bounds=(5, 1), method='bounded', args=(1.5, ))
+
+        assert_raises(ValueError, optimize.minimize_scalar, self.fun,
+                      bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
+
+        x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
+                                     method='bounded').x
+        assert_allclose(x, self.solution, atol=1e-6)
+
+    def test_minimize_scalar_custom(self):
+        # This function comes from the documentation example.
+        def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
+                    maxiter=100, callback=None, **options):
+            bestx = (bracket[1] + bracket[0]) / 2.0
+            besty = fun(bestx)
+            funcalls = 1
+            niter = 0
+            improved = True
+            stop = False
+
+            while improved and not stop and niter < maxiter:
+                improved = False
+                niter += 1
+                for testx in [bestx - stepsize, bestx + stepsize]:
+                    testy = fun(testx, *args)
+                    funcalls += 1
+                    if testy < besty:
+                        besty = testy
+                        bestx = testx
+                        improved = True
+                if callback is not None:
+                    callback(bestx)
+                if maxfev is not None and funcalls >= maxfev:
+                    stop = True
+                    break
+
+            return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
+                                           nfev=funcalls, success=(niter > 1))
+
+        res = optimize.minimize_scalar(self.fun, bracket=(0, 4),
+                                       method=custmin,
+                                       options=dict(stepsize=0.05))
+        assert_allclose(res.x, self.solution, atol=1e-6)
+
+    def test_minimize_scalar_coerce_args_param(self):
+        # Regression test for gh-3503
+        optimize.minimize_scalar(self.fun, args=1.5)
+
+    @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
+    def test_disp(self, method):
+        # test that all minimize_scalar methods accept a disp option.
+        for disp in [0, 1, 2, 3]:
+            optimize.minimize_scalar(self.fun, options={"disp": disp})
+
+    @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
+    def test_result_attributes(self, method):
+        kwargs = {"bounds": [-10, 10]} if method == 'bounded' else {}
+        result = optimize.minimize_scalar(self.fun, method=method, **kwargs)
+        assert hasattr(result, "x")
+        assert hasattr(result, "success")
+        assert hasattr(result, "message")
+        assert hasattr(result, "fun")
+        assert hasattr(result, "nfev")
+        assert hasattr(result, "nit")
+
+    @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
+    def test_nan_values(self, method):
+        # Check nan values result to failed exit status
+        np.random.seed(1234)
+
+        count = [0]
+
+        def func(x):
+            count[0] += 1
+            if count[0] > 4:
+                return np.nan
+            else:
+                return x**2 + 0.1 * np.sin(x)
+
+        bracket = (-1, 0, 1)
+        bounds = (-1, 1)
+
+        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
+            sup.filter(UserWarning, "delta_grad == 0.*")
+            sup.filter(RuntimeWarning, ".*does not use Hessian.*")
+            sup.filter(RuntimeWarning, ".*does not use gradient.*")
+
+            count = [0]
+
+            kwargs = {"bounds": bounds} if method == 'bounded' else {}
+            sol = optimize.minimize_scalar(func, bracket=bracket,
+                                           **kwargs, method=method,
+                                           options=dict(maxiter=20))
+            assert_equal(sol.success, False)
+
+    def test_minimize_scalar_defaults_gh10911(self):
+        # Previously, bounds were silently ignored unless `method='bounds'`
+        # was chosen. See gh-10911. Check that this is no longer the case.
+        def f(x):
+            return x**2
+
+        res = optimize.minimize_scalar(f)
+        assert_allclose(res.x, 0, atol=1e-8)
+
+        res = optimize.minimize_scalar(f, bounds=(1, 100),
+                                       options={'xatol': 1e-10})
+        assert_allclose(res.x, 1)
+
+    def test_minimize_non_finite_bounds_gh10911(self):
+        # Previously, minimize_scalar misbehaved with infinite bounds.
+        # See gh-10911. Check that it now raises an error, instead.
+        msg = "Optimization bounds must be finite scalars."
+        with pytest.raises(ValueError, match=msg):
+            optimize.minimize_scalar(np.sin, bounds=(1, np.inf))
+        with pytest.raises(ValueError, match=msg):
+            optimize.minimize_scalar(np.sin, bounds=(np.nan, 1))
+
+    @pytest.mark.parametrize("method", ['brent', 'golden'])
+    def test_minimize_unbounded_method_with_bounds_gh10911(self, method):
+        # Previously, `bounds` were silently ignored when `method='brent'` or
+        # `method='golden'`. See gh-10911. Check that error is now raised.
+        msg = "Use of `bounds` is incompatible with..."
+        with pytest.raises(ValueError, match=msg):
+            optimize.minimize_scalar(np.sin, method=method, bounds=(1, 2))
+
+
+def test_brent_negative_tolerance():
+    assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
+
+
+class TestNewtonCg:
+    def test_rosenbrock(self):
+        x0 = np.array([-1.2, 1.0])
+        sol = optimize.minimize(optimize.rosen, x0,
+                                jac=optimize.rosen_der,
+                                hess=optimize.rosen_hess,
+                                tol=1e-5,
+                                method='Newton-CG')
+        assert sol.success, sol.message
+        assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
+
+    def test_himmelblau(self):
+        x0 = np.array(himmelblau_x0)
+        sol = optimize.minimize(himmelblau,
+                                x0,
+                                jac=himmelblau_grad,
+                                hess=himmelblau_hess,
+                                method='Newton-CG',
+                                tol=1e-6)
+        assert sol.success, sol.message
+        assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
+        assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
+
+    def test_finite_difference(self):
+        x0 = np.array([-1.2, 1.0])
+        sol = optimize.minimize(optimize.rosen, x0,
+                                jac=optimize.rosen_der,
+                                hess='2-point',
+                                tol=1e-5,
+                                method='Newton-CG')
+        assert sol.success, sol.message
+        assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
+
+    def test_hessian_update_strategy(self):
+        x0 = np.array([-1.2, 1.0])
+        sol = optimize.minimize(optimize.rosen, x0,
+                                jac=optimize.rosen_der,
+                                hess=optimize.BFGS(),
+                                tol=1e-5,
+                                method='Newton-CG')
+        assert sol.success, sol.message
+        assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
+
+
+def test_line_for_search():
+    # _line_for_search is only used in _linesearch_powell, which is also
+    # tested below. Thus there are more tests of _line_for_search in the
+    # test_linesearch_powell_bounded function.
+
+    line_for_search = optimize._optimize._line_for_search
+    # args are x0, alpha, lower_bound, upper_bound
+    # returns lmin, lmax
+
+    lower_bound = np.array([-5.3, -1, -1.5, -3])
+    upper_bound = np.array([1.9, 1, 2.8, 3])
+
+    # test when starting in the bounds
+    x0 = np.array([0., 0, 0, 0])
+    # and when starting outside of the bounds
+    x1 = np.array([0., 2, -3, 0])
+
+    all_tests = (
+        (x0, np.array([1., 0, 0, 0]), -5.3, 1.9),
+        (x0, np.array([0., 1, 0, 0]), -1, 1),
+        (x0, np.array([0., 0, 1, 0]), -1.5, 2.8),
+        (x0, np.array([0., 0, 0, 1]), -3, 3),
+        (x0, np.array([1., 1, 0, 0]), -1, 1),
+        (x0, np.array([1., 0, -1, 2]), -1.5, 1.5),
+        (x0, np.array([2., 0, -1, 2]), -1.5, 0.95),
+        (x1, np.array([1., 0, 0, 0]), -5.3, 1.9),
+        (x1, np.array([0., 1, 0, 0]), -3, -1),
+        (x1, np.array([0., 0, 1, 0]), 1.5, 5.8),
+        (x1, np.array([0., 0, 0, 1]), -3, 3),
+        (x1, np.array([1., 1, 0, 0]), -3, -1),
+        (x1, np.array([1., 0, -1, 0]), -5.3, -1.5),
+    )
+
+    for x, alpha, lmin, lmax in all_tests:
+        mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
+        assert_allclose(mi, lmin, atol=1e-6)
+        assert_allclose(ma, lmax, atol=1e-6)
+
+    # now with infinite bounds
+    lower_bound = np.array([-np.inf, -1, -np.inf, -3])
+    upper_bound = np.array([np.inf, 1, 2.8, np.inf])
+
+    all_tests = (
+        (x0, np.array([1., 0, 0, 0]), -np.inf, np.inf),
+        (x0, np.array([0., 1, 0, 0]), -1, 1),
+        (x0, np.array([0., 0, 1, 0]), -np.inf, 2.8),
+        (x0, np.array([0., 0, 0, 1]), -3, np.inf),
+        (x0, np.array([1., 1, 0, 0]), -1, 1),
+        (x0, np.array([1., 0, -1, 2]), -1.5, np.inf),
+        (x1, np.array([1., 0, 0, 0]), -np.inf, np.inf),
+        (x1, np.array([0., 1, 0, 0]), -3, -1),
+        (x1, np.array([0., 0, 1, 0]), -np.inf, 5.8),
+        (x1, np.array([0., 0, 0, 1]), -3, np.inf),
+        (x1, np.array([1., 1, 0, 0]), -3, -1),
+        (x1, np.array([1., 0, -1, 0]), -5.8, np.inf),
+    )
+
+    for x, alpha, lmin, lmax in all_tests:
+        mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
+        assert_allclose(mi, lmin, atol=1e-6)
+        assert_allclose(ma, lmax, atol=1e-6)
+
+
+def test_linesearch_powell():
+    # helper function in optimize.py, not a public function.
+    linesearch_powell = optimize._optimize._linesearch_powell
+    # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
+    # returns new_fval, p + direction, direction
+    func = lambda x: np.sum((x - np.array([-1., 2., 1.5, -.4]))**2)
+    p0 = np.array([0., 0, 0, 0])
+    fval = func(p0)
+    lower_bound = np.array([-np.inf] * 4)
+    upper_bound = np.array([np.inf] * 4)
+
+    all_tests = (
+        (np.array([1., 0, 0, 0]), -1),
+        (np.array([0., 1, 0, 0]), 2),
+        (np.array([0., 0, 1, 0]), 1.5),
+        (np.array([0., 0, 0, 1]), -.4),
+        (np.array([-1., 0, 1, 0]), 1.25),
+        (np.array([0., 0, 1, 1]), .55),
+        (np.array([2., 0, -1, 1]), -.65),
+    )
+
+    for xi, l in all_tests:
+        f, p, direction = linesearch_powell(func, p0, xi,
+                                            fval=fval, tol=1e-5)
+        assert_allclose(f, func(l * xi), atol=1e-6)
+        assert_allclose(p, l * xi, atol=1e-6)
+        assert_allclose(direction, l * xi, atol=1e-6)
+
+        f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
+                                            lower_bound=lower_bound,
+                                            upper_bound=upper_bound,
+                                            fval=fval)
+        assert_allclose(f, func(l * xi), atol=1e-6)
+        assert_allclose(p, l * xi, atol=1e-6)
+        assert_allclose(direction, l * xi, atol=1e-6)
+
+
+def test_linesearch_powell_bounded():
+    # helper function in optimize.py, not a public function.
+    linesearch_powell = optimize._optimize._linesearch_powell
+    # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
+    # returns new_fval, p+direction, direction
+    func = lambda x: np.sum((x-np.array([-1., 2., 1.5, -.4]))**2)
+    p0 = np.array([0., 0, 0, 0])
+    fval = func(p0)
+
+    # first choose bounds such that the same tests from
+    # test_linesearch_powell should pass.
+    lower_bound = np.array([-2.]*4)
+    upper_bound = np.array([2.]*4)
+
+    all_tests = (
+        (np.array([1., 0, 0, 0]), -1),
+        (np.array([0., 1, 0, 0]), 2),
+        (np.array([0., 0, 1, 0]), 1.5),
+        (np.array([0., 0, 0, 1]), -.4),
+        (np.array([-1., 0, 1, 0]), 1.25),
+        (np.array([0., 0, 1, 1]), .55),
+        (np.array([2., 0, -1, 1]), -.65),
+    )
+
+    for xi, l in all_tests:
+        f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
+                                            lower_bound=lower_bound,
+                                            upper_bound=upper_bound,
+                                            fval=fval)
+        assert_allclose(f, func(l * xi), atol=1e-6)
+        assert_allclose(p, l * xi, atol=1e-6)
+        assert_allclose(direction, l * xi, atol=1e-6)
+
+    # now choose bounds such that unbounded vs bounded gives different results
+    lower_bound = np.array([-.3]*3 + [-1])
+    upper_bound = np.array([.45]*3 + [.9])
+
+    all_tests = (
+        (np.array([1., 0, 0, 0]), -.3),
+        (np.array([0., 1, 0, 0]), .45),
+        (np.array([0., 0, 1, 0]), .45),
+        (np.array([0., 0, 0, 1]), -.4),
+        (np.array([-1., 0, 1, 0]), .3),
+        (np.array([0., 0, 1, 1]), .45),
+        (np.array([2., 0, -1, 1]), -.15),
+    )
+
+    for xi, l in all_tests:
+        f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
+                                            lower_bound=lower_bound,
+                                            upper_bound=upper_bound,
+                                            fval=fval)
+        assert_allclose(f, func(l * xi), atol=1e-6)
+        assert_allclose(p, l * xi, atol=1e-6)
+        assert_allclose(direction, l * xi, atol=1e-6)
+
+    # now choose as above but start outside the bounds
+    p0 = np.array([-1., 0, 0, 2])
+    fval = func(p0)
+
+    all_tests = (
+        (np.array([1., 0, 0, 0]), .7),
+        (np.array([0., 1, 0, 0]), .45),
+        (np.array([0., 0, 1, 0]), .45),
+        (np.array([0., 0, 0, 1]), -2.4),
+    )
+
+    for xi, l in all_tests:
+        f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
+                                            lower_bound=lower_bound,
+                                            upper_bound=upper_bound,
+                                            fval=fval)
+        assert_allclose(f, func(p0 + l * xi), atol=1e-6)
+        assert_allclose(p, p0 + l * xi, atol=1e-6)
+        assert_allclose(direction, l * xi, atol=1e-6)
+
+    # now mix in inf
+    p0 = np.array([0., 0, 0, 0])
+    fval = func(p0)
+
+    # now choose bounds that mix inf
+    lower_bound = np.array([-.3, -np.inf, -np.inf, -1])
+    upper_bound = np.array([np.inf, .45, np.inf, .9])
+
+    all_tests = (
+        (np.array([1., 0, 0, 0]), -.3),
+        (np.array([0., 1, 0, 0]), .45),
+        (np.array([0., 0, 1, 0]), 1.5),
+        (np.array([0., 0, 0, 1]), -.4),
+        (np.array([-1., 0, 1, 0]), .3),
+        (np.array([0., 0, 1, 1]), .55),
+        (np.array([2., 0, -1, 1]), -.15),
+    )
+
+    for xi, l in all_tests:
+        f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
+                                            lower_bound=lower_bound,
+                                            upper_bound=upper_bound,
+                                            fval=fval)
+        assert_allclose(f, func(l * xi), atol=1e-6)
+        assert_allclose(p, l * xi, atol=1e-6)
+        assert_allclose(direction, l * xi, atol=1e-6)
+
+    # now choose as above but start outside the bounds
+    p0 = np.array([-1., 0, 0, 2])
+    fval = func(p0)
+
+    all_tests = (
+        (np.array([1., 0, 0, 0]), .7),
+        (np.array([0., 1, 0, 0]), .45),
+        (np.array([0., 0, 1, 0]), 1.5),
+        (np.array([0., 0, 0, 1]), -2.4),
+    )
+
+    for xi, l in all_tests:
+        f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
+                                            lower_bound=lower_bound,
+                                            upper_bound=upper_bound,
+                                            fval=fval)
+        assert_allclose(f, func(p0 + l * xi), atol=1e-6)
+        assert_allclose(p, p0 + l * xi, atol=1e-6)
+        assert_allclose(direction, l * xi, atol=1e-6)
+
+
+def test_powell_limits():
+    # gh15342 - powell was going outside bounds for some function evaluations.
+    bounds = optimize.Bounds([0, 0], [0.6, 20])
+
+    def fun(x):
+        a, b = x
+        assert (x >= bounds.lb).all() and (x <= bounds.ub).all()
+        return a ** 2 + b ** 2
+
+    optimize.minimize(fun, x0=[0.6, 20], method='Powell', bounds=bounds)
+
+    # Another test from the original report - gh-13411
+    bounds = optimize.Bounds(lb=[0,], ub=[1,], keep_feasible=[True,])
+
+    def func(x):
+        assert x >= 0 and x <= 1
+        return np.exp(x)
+
+    optimize.minimize(fun=func, x0=[0.5], method='powell', bounds=bounds)
+
+
+class TestRosen:
+
+    def test_hess(self):
+        # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775.
+        x = np.array([3, 4, 5])
+        p = np.array([2, 2, 2])
+        hp = optimize.rosen_hess_prod(x, p)
+        dothp = np.dot(optimize.rosen_hess(x), p)
+        assert_equal(hp, dothp)
+
+
+def himmelblau(p):
+    """
+    R^2 -> R^1 test function for optimization. The function has four local
+    minima where himmelblau(xopt) == 0.
+    """
+    x, y = p
+    a = x*x + y - 11
+    b = x + y*y - 7
+    return a*a + b*b
+
+
+def himmelblau_grad(p):
+    x, y = p
+    return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
+                     2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
+
+
+def himmelblau_hess(p):
+    x, y = p
+    return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
+                     [4*x + 4*y, 4*x + 12*y**2 - 26]])
+
+
+himmelblau_x0 = [-0.27, -0.9]
+himmelblau_xopt = [3, 2]
+himmelblau_min = 0.0
+
+
+def test_minimize_multiple_constraints():
+    # Regression test for gh-4240.
+    def func(x):
+        return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
+
+    def func1(x):
+        return np.array([x[1]])
+
+    def func2(x):
+        return np.array([x[2]])
+
+    cons = ({'type': 'ineq', 'fun': func},
+            {'type': 'ineq', 'fun': func1},
+            {'type': 'ineq', 'fun': func2})
+
+    f = lambda x: -1 * (x[0] + x[1] + x[2])
+
+    res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
+    assert_allclose(res.x, [125, 0, 0], atol=1e-10)
+
+
+class TestOptimizeResultAttributes:
+    # Test that all minimizers return an OptimizeResult containing
+    # all the OptimizeResult attributes
+    def setup_method(self):
+        self.x0 = [5, 5]
+        self.func = optimize.rosen
+        self.jac = optimize.rosen_der
+        self.hess = optimize.rosen_hess
+        self.hessp = optimize.rosen_hess_prod
+        self.bounds = [(0., 10.), (0., 10.)]
+
+    def test_attributes_present(self):
+        attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
+                      'message']
+        skip = {'cobyla': ['nit']}
+        for method in MINIMIZE_METHODS:
+            with suppress_warnings() as sup:
+                sup.filter(RuntimeWarning,
+                           ("Method .+ does not use (gradient|Hessian.*)"
+                            " information"))
+                res = optimize.minimize(self.func, self.x0, method=method,
+                                        jac=self.jac, hess=self.hess,
+                                        hessp=self.hessp)
+            for attribute in attributes:
+                if method in skip and attribute in skip[method]:
+                    continue
+
+                assert hasattr(res, attribute)
+                assert attribute in dir(res)
+
+            # gh13001, OptimizeResult.message should be a str
+            assert isinstance(res.message, str)
+
+
+def f1(z, *params):
+    x, y = z
+    a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+    return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
+
+
+def f2(z, *params):
+    x, y = z
+    a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+    return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
+
+
+def f3(z, *params):
+    x, y = z
+    a, b, c, d, e, f, g, h, i, j, k, l, scale = params
+    return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
+
+
+def brute_func(z, *params):
+    return f1(z, *params) + f2(z, *params) + f3(z, *params)
+
+
+class TestBrute:
+    # Test the "brute force" method
+    def setup_method(self):
+        self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
+        self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
+        self.solution = np.array([-1.05665192, 1.80834843])
+
+    def brute_func(self, z, *params):
+        # an instance method optimizing
+        return brute_func(z, *params)
+
+    def test_brute(self):
+        # test fmin
+        resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
+                                  full_output=True, finish=optimize.fmin)
+        assert_allclose(resbrute[0], self.solution, atol=1e-3)
+        assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
+                        atol=1e-3)
+
+        # test minimize
+        resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
+                                  full_output=True,
+                                  finish=optimize.minimize)
+        assert_allclose(resbrute[0], self.solution, atol=1e-3)
+        assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
+                        atol=1e-3)
+
+        # test that brute can optimize an instance method (the other tests use
+        # a non-class based function
+        resbrute = optimize.brute(self.brute_func, self.rranges,
+                                  args=self.params, full_output=True,
+                                  finish=optimize.minimize)
+        assert_allclose(resbrute[0], self.solution, atol=1e-3)
+
+    def test_1D(self):
+        # test that for a 1-D problem the test function is passed an array,
+        # not a scalar.
+        def f(x):
+            assert len(x.shape) == 1
+            assert x.shape[0] == 1
+            return x ** 2
+
+        optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
+
+    def test_workers(self):
+        # check that parallel evaluation works
+        resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
+                                  full_output=True, finish=None)
+
+        resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params,
+                                   full_output=True, finish=None, workers=2)
+
+        assert_allclose(resbrute1[-1], resbrute[-1])
+        assert_allclose(resbrute1[0], resbrute[0])
+
+    def test_runtime_warning(self):
+        rng = np.random.default_rng(1234)
+
+        def func(z, *params):
+            return rng.random(1) * 1000  # never converged problem
+
+        with pytest.warns(RuntimeWarning,
+                          match=r'Either final optimization did not succeed'):
+            optimize.brute(func, self.rranges, args=self.params, disp=True)
+
+    def test_coerce_args_param(self):
+        # optimize.brute should coerce non-iterable args to a tuple.
+        def f(x, *args):
+            return x ** args[0]
+
+        resbrute = optimize.brute(f, (slice(-4, 4, .25),), args=2)
+        assert_allclose(resbrute, 0)
+
+
+def test_cobyla_threadsafe():
+
+    # Verify that cobyla is threadsafe. Will segfault if it is not.
+
+    import concurrent.futures
+    import time
+
+    def objective1(x):
+        time.sleep(0.1)
+        return x[0]**2
+
+    def objective2(x):
+        time.sleep(0.1)
+        return (x[0]-1)**2
+
+    min_method = "COBYLA"
+
+    def minimizer1():
+        return optimize.minimize(objective1,
+                                      [0.0],
+                                      method=min_method)
+
+    def minimizer2():
+        return optimize.minimize(objective2,
+                                      [0.0],
+                                      method=min_method)
+
+    with concurrent.futures.ThreadPoolExecutor() as pool:
+        tasks = []
+        tasks.append(pool.submit(minimizer1))
+        tasks.append(pool.submit(minimizer2))
+        for t in tasks:
+            res = t.result()
+
+
+class TestIterationLimits:
+    # Tests that optimisation does not give up before trying requested
+    # number of iterations or evaluations. And that it does not succeed
+    # by exceeding the limits.
+    def setup_method(self):
+        self.funcalls = 0
+
+    def slow_func(self, v):
+        self.funcalls += 1
+        r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1])
+        return np.sin(r*20 + t)+r*0.5
+
+    def test_neldermead_limit(self):
+        self.check_limits("Nelder-Mead", 200)
+
+    def test_powell_limit(self):
+        self.check_limits("powell", 1000)
+
+    def check_limits(self, method, default_iters):
+        for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:
+            for mfev in [50, 500, 5000]:
+                self.funcalls = 0
+                res = optimize.minimize(self.slow_func, start_v,
+                                        method=method,
+                                        options={"maxfev": mfev})
+                assert self.funcalls == res["nfev"]
+                if res["success"]:
+                    assert res["nfev"] < mfev
+                else:
+                    assert res["nfev"] >= mfev
+            for mit in [50, 500, 5000]:
+                res = optimize.minimize(self.slow_func, start_v,
+                                        method=method,
+                                        options={"maxiter": mit})
+                if res["success"]:
+                    assert res["nit"] <= mit
+                else:
+                    assert res["nit"] >= mit
+            for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]:
+                self.funcalls = 0
+                res = optimize.minimize(self.slow_func, start_v,
+                                        method=method,
+                                        options={"maxiter": mit,
+                                                 "maxfev": mfev})
+                assert self.funcalls == res["nfev"]
+                if res["success"]:
+                    assert res["nfev"] < mfev and res["nit"] <= mit
+                else:
+                    assert res["nfev"] >= mfev or res["nit"] >= mit
+            for mfev, mit in [[np.inf, None], [None, np.inf]]:
+                self.funcalls = 0
+                res = optimize.minimize(self.slow_func, start_v,
+                                        method=method,
+                                        options={"maxiter": mit,
+                                                 "maxfev": mfev})
+                assert self.funcalls == res["nfev"]
+                if res["success"]:
+                    if mfev is None:
+                        assert res["nfev"] < default_iters*2
+                    else:
+                        assert res["nit"] <= default_iters*2
+                else:
+                    assert res["nfev"] >= default_iters*2 or res["nit"] >= default_iters*2
+
+
+def test_result_x_shape_when_len_x_is_one():
+    def fun(x):
+        return x * x
+
+    def jac(x):
+        return 2. * x
+
+    def hess(x):
+        return np.array([[2.]])
+
+    methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC',
+               'COBYLA', 'SLSQP']
+    for method in methods:
+        res = optimize.minimize(fun, np.array([0.1]), method=method)
+        assert res.x.shape == (1,)
+
+    # use jac + hess
+    methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',
+               'trust-krylov', 'Newton-CG']
+    for method in methods:
+        res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac,
+                                hess=hess)
+        assert res.x.shape == (1,)
+
+
+class FunctionWithGradient:
+    def __init__(self):
+        self.number_of_calls = 0
+
+    def __call__(self, x):
+        self.number_of_calls += 1
+        return np.sum(x**2), 2 * x
+
+
+@pytest.fixture
+def function_with_gradient():
+    return FunctionWithGradient()
+
+
+def test_memoize_jac_function_before_gradient(function_with_gradient):
+    memoized_function = MemoizeJac(function_with_gradient)
+
+    x0 = np.array([1.0, 2.0])
+    assert_allclose(memoized_function(x0), 5.0)
+    assert function_with_gradient.number_of_calls == 1
+
+    assert_allclose(memoized_function.derivative(x0), 2 * x0)
+    assert function_with_gradient.number_of_calls == 1, \
+        "function is not recomputed " \
+        "if gradient is requested after function value"
+
+    assert_allclose(
+        memoized_function(2 * x0), 20.0,
+        err_msg="different input triggers new computation")
+    assert function_with_gradient.number_of_calls == 2, \
+        "different input triggers new computation"
+
+
+def test_memoize_jac_gradient_before_function(function_with_gradient):
+    memoized_function = MemoizeJac(function_with_gradient)
+
+    x0 = np.array([1.0, 2.0])
+    assert_allclose(memoized_function.derivative(x0), 2 * x0)
+    assert function_with_gradient.number_of_calls == 1
+
+    assert_allclose(memoized_function(x0), 5.0)
+    assert function_with_gradient.number_of_calls == 1, \
+        "function is not recomputed " \
+        "if function value is requested after gradient"
+
+    assert_allclose(
+        memoized_function.derivative(2 * x0), 4 * x0,
+        err_msg="different input triggers new computation")
+    assert function_with_gradient.number_of_calls == 2, \
+        "different input triggers new computation"
+
+
+def test_memoize_jac_with_bfgs(function_with_gradient):
+    """ Tests that using MemoizedJac in combination with ScalarFunction
+        and BFGS does not lead to repeated function evaluations.
+        Tests changes made in response to GH11868.
+    """
+    memoized_function = MemoizeJac(function_with_gradient)
+    jac = memoized_function.derivative
+    hess = optimize.BFGS()
+
+    x0 = np.array([1.0, 0.5])
+    scalar_function = ScalarFunction(
+        memoized_function, x0, (), jac, hess, None, None)
+    assert function_with_gradient.number_of_calls == 1
+
+    scalar_function.fun(x0 + 0.1)
+    assert function_with_gradient.number_of_calls == 2
+
+    scalar_function.fun(x0 + 0.2)
+    assert function_with_gradient.number_of_calls == 3
+
+
+def test_gh12696():
+    # Test that optimize doesn't throw warning gh-12696
+    with assert_no_warnings():
+        optimize.fminbound(
+            lambda x: np.array([x**2]), -np.pi, np.pi, disp=False)
+
+
+# --- Test minimize with equal upper and lower bounds --- #
+
+def setup_test_equal_bounds():
+
+    np.random.seed(0)
+    x0 = np.random.rand(4)
+    lb = np.array([0, 2, -1, -1.0])
+    ub = np.array([3, 2, 2, -1.0])
+    i_eb = (lb == ub)
+
+    def check_x(x, check_size=True, check_values=True):
+        if check_size:
+            assert x.size == 4
+        if check_values:
+            assert_allclose(x[i_eb], lb[i_eb])
+
+    def func(x):
+        check_x(x)
+        return optimize.rosen(x)
+
+    def grad(x):
+        check_x(x)
+        return optimize.rosen_der(x)
+
+    def callback(x, *args):
+        check_x(x)
+
+    def constraint1(x):
+        check_x(x, check_values=False)
+        return x[0:1] - 1
+
+    def jacobian1(x):
+        check_x(x, check_values=False)
+        dc = np.zeros_like(x)
+        dc[0] = 1
+        return dc
+
+    def constraint2(x):
+        check_x(x, check_values=False)
+        return x[2:3] - 0.5
+
+    def jacobian2(x):
+        check_x(x, check_values=False)
+        dc = np.zeros_like(x)
+        dc[2] = 1
+        return dc
+
+    c1a = NonlinearConstraint(constraint1, -np.inf, 0)
+    c1b = NonlinearConstraint(constraint1, -np.inf, 0, jacobian1)
+    c2a = NonlinearConstraint(constraint2, -np.inf, 0)
+    c2b = NonlinearConstraint(constraint2, -np.inf, 0, jacobian2)
+
+    # test using the three methods that accept bounds, use derivatives, and
+    # have some trouble when bounds fix variables
+    methods = ('L-BFGS-B', 'SLSQP', 'TNC')
+
+    # test w/out gradient, w/ gradient, and w/ combined objective/gradient
+    kwds = ({"fun": func, "jac": False},
+            {"fun": func, "jac": grad},
+            {"fun": (lambda x: (func(x), grad(x))),
+             "jac": True})
+
+    # test with both old- and new-style bounds
+    bound_types = (lambda lb, ub: list(zip(lb, ub)),
+                   Bounds)
+
+    # Test for many combinations of constraints w/ and w/out jacobian
+    # Pairs in format: (test constraints, reference constraints)
+    # (always use analytical jacobian in reference)
+    constraints = ((None, None), ([], []),
+                   (c1a, c1b), (c2b, c2b),
+                   ([c1b], [c1b]), ([c2a], [c2b]),
+                   ([c1a, c2a], [c1b, c2b]),
+                   ([c1a, c2b], [c1b, c2b]),
+                   ([c1b, c2b], [c1b, c2b]))
+
+    # test with and without callback function
+    callbacks = (None, callback)
+
+    data = {"methods": methods, "kwds": kwds, "bound_types": bound_types,
+            "constraints": constraints, "callbacks": callbacks,
+            "lb": lb, "ub": ub, "x0": x0, "i_eb": i_eb}
+
+    return data
+
+
+eb_data = setup_test_equal_bounds()
+
+
+# This test is about handling fixed variables, not the accuracy of the solvers
+@pytest.mark.xfail_on_32bit("Failures due to floating point issues, not logic")
+@pytest.mark.parametrize('method', eb_data["methods"])
+@pytest.mark.parametrize('kwds', eb_data["kwds"])
+@pytest.mark.parametrize('bound_type', eb_data["bound_types"])
+@pytest.mark.parametrize('constraints', eb_data["constraints"])
+@pytest.mark.parametrize('callback', eb_data["callbacks"])
+def test_equal_bounds(method, kwds, bound_type, constraints, callback):
+    """
+    Tests that minimizers still work if (bounds.lb == bounds.ub).any()
+    gh12502 - Divide by zero in Jacobian numerical differentiation when
+    equality bounds constraints are used
+    """
+    # GH-15051; slightly more skips than necessary; hopefully fixed by GH-14882
+    if (platform.machine() == 'aarch64' and method == "TNC"
+            and kwds["jac"] is False and callback is not None):
+        pytest.skip('Tolerance violation on aarch')
+
+    lb, ub = eb_data["lb"], eb_data["ub"]
+    x0, i_eb = eb_data["x0"], eb_data["i_eb"]
+
+    test_constraints, reference_constraints = constraints
+    if test_constraints and not method == 'SLSQP':
+        pytest.skip('Only SLSQP supports nonlinear constraints')
+    # reference constraints always have analytical jacobian
+    # if test constraints are not the same, we'll need finite differences
+    fd_needed = (test_constraints != reference_constraints)
+
+    bounds = bound_type(lb, ub)  # old- or new-style
+
+    kwds.update({"x0": x0, "method": method, "bounds": bounds,
+                 "constraints": test_constraints, "callback": callback})
+    res = optimize.minimize(**kwds)
+
+    expected = optimize.minimize(optimize.rosen, x0, method=method,
+                                 jac=optimize.rosen_der, bounds=bounds,
+                                 constraints=reference_constraints)
+
+    # compare the output of a solution with FD vs that of an analytic grad
+    assert res.success
+    assert_allclose(res.fun, expected.fun, rtol=1e-6)
+    assert_allclose(res.x, expected.x, rtol=5e-4)
+
+    if fd_needed or kwds['jac'] is False:
+        expected.jac[i_eb] = np.nan
+    assert res.jac.shape[0] == 4
+    assert_allclose(res.jac[i_eb], expected.jac[i_eb], rtol=1e-6)
+
+    if not (kwds['jac'] or test_constraints or isinstance(bounds, Bounds)):
+        # compare the output to an equivalent FD minimization that doesn't
+        # need factorization
+        def fun(x):
+            new_x = np.array([np.nan, 2, np.nan, -1])
+            new_x[[0, 2]] = x
+            return optimize.rosen(new_x)
+
+        fd_res = optimize.minimize(fun,
+                                   x0[[0, 2]],
+                                   method=method,
+                                   bounds=bounds[::2])
+        assert_allclose(res.fun, fd_res.fun)
+        # TODO this test should really be equivalent to factorized version
+        # above, down to res.nfev. However, testing found that when TNC is
+        # called with or without a callback the output is different. The two
+        # should be the same! This indicates that the TNC callback may be
+        # mutating something when it should't.
+        assert_allclose(res.x[[0, 2]], fd_res.x, rtol=2e-6)
+
+
+@pytest.mark.parametrize('method', eb_data["methods"])
+def test_all_bounds_equal(method):
+    # this only tests methods that have parameters factored out when lb==ub
+    # it does not test other methods that work with bounds
+    def f(x, p1=1):
+        return np.linalg.norm(x) + p1
+
+    bounds = [(1, 1), (2, 2)]
+    x0 = (1.0, 3.0)
+    res = optimize.minimize(f, x0, bounds=bounds, method=method)
+    assert res.success
+    assert_allclose(res.fun, f([1.0, 2.0]))
+    assert res.nfev == 1
+    assert res.message == 'All independent variables were fixed by bounds.'
+
+    args = (2,)
+    res = optimize.minimize(f, x0, bounds=bounds, method=method, args=args)
+    assert res.success
+    assert_allclose(res.fun, f([1.0, 2.0], 2))
+
+    if method.upper() == 'SLSQP':
+        def con(x):
+            return np.sum(x)
+        nlc = NonlinearConstraint(con, -np.inf, 0.0)
+        res = optimize.minimize(
+            f, x0, bounds=bounds, method=method, constraints=[nlc]
+        )
+        assert res.success is False
+        assert_allclose(res.fun, f([1.0, 2.0]))
+        assert res.nfev == 1
+        message = "All independent variables were fixed by bounds, but"
+        assert res.message.startswith(message)
+
+        nlc = NonlinearConstraint(con, -np.inf, 4)
+        res = optimize.minimize(
+            f, x0, bounds=bounds, method=method, constraints=[nlc]
+        )
+        assert res.success is True
+        assert_allclose(res.fun, f([1.0, 2.0]))
+        assert res.nfev == 1
+        message = "All independent variables were fixed by bounds at values"
+        assert res.message.startswith(message)
+
+
+def test_eb_constraints():
+    # make sure constraint functions aren't overwritten when equal bounds
+    # are employed, and a parameter is factored out. GH14859
+    def f(x):
+        return x[0]**3 + x[1]**2 + x[2]*x[3]
+
+    def cfun(x):
+        return x[0] + x[1] + x[2] + x[3] - 40
+
+    constraints = [{'type': 'ineq', 'fun': cfun}]
+
+    bounds = [(0, 20)] * 4
+    bounds[1] = (5, 5)
+    optimize.minimize(
+        f,
+        x0=[1, 2, 3, 4],
+        method='SLSQP',
+        bounds=bounds,
+        constraints=constraints,
+    )
+    assert constraints[0]['fun'] == cfun
+
+
+def test_show_options():
+    solver_methods = {
+        'minimize': MINIMIZE_METHODS,
+        'minimize_scalar': MINIMIZE_SCALAR_METHODS,
+        'root': ROOT_METHODS,
+        'root_scalar': ROOT_SCALAR_METHODS,
+        'linprog': LINPROG_METHODS,
+        'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS,
+    }
+    for solver, methods in solver_methods.items():
+        for method in methods:
+            # testing that `show_options` works without error
+            show_options(solver, method)
+
+    unknown_solver_method = {
+        'minimize': "ekki",  # unknown method
+        'maximize': "cg",  # unknown solver
+        'maximize_scalar': "ekki",  # unknown solver and method
+    }
+    for solver, method in unknown_solver_method.items():
+        # testing that `show_options` raises ValueError
+        assert_raises(ValueError, show_options, solver, method)
+
+
+def test_bounds_with_list():
+    # gh13501. Bounds created with lists weren't working for Powell.
+    bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.])
+    optimize.minimize(
+        optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds
+    )
+
+
+def test_x_overwritten_user_function():
+    # if the user overwrites the x-array in the user function it's likely
+    # that the minimizer stops working properly.
+    # gh13740
+    def fquad(x):
+        a = np.arange(np.size(x))
+        x -= a
+        x *= x
+        return np.sum(x)
+
+    def fquad_jac(x):
+        a = np.arange(np.size(x))
+        x *= 2
+        x -= 2 * a
+        return x
+
+    fquad_hess = lambda x: np.eye(np.size(x)) * 2.0
+
+    meth_jac = [
+        'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact',
+        'trust-krylov', 'trust-constr'
+    ]
+    meth_hess = [
+        'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr'
+    ]
+
+    x0 = np.ones(5) * 1.5
+
+    for meth in MINIMIZE_METHODS:
+        jac = None
+        hess = None
+        if meth in meth_jac:
+            jac = fquad_jac
+        if meth in meth_hess:
+            hess = fquad_hess
+        res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess)
+        assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4)
+
+
+class TestGlobalOptimization:
+
+    def test_optimize_result_attributes(self):
+        def func(x):
+            return x ** 2
+
+        # Note that `brute` solver does not return `OptimizeResult`
+        results = [optimize.basinhopping(func, x0=1),
+                   optimize.differential_evolution(func, [(-4, 4)]),
+                   optimize.shgo(func, [(-4, 4)]),
+                   optimize.dual_annealing(func, [(-4, 4)]),
+                   optimize.direct(func, [(-4, 4)]),
+                   ]
+
+        for result in results:
+            assert isinstance(result, optimize.OptimizeResult)
+            assert hasattr(result, "x")
+            assert hasattr(result, "success")
+            assert hasattr(result, "message")
+            assert hasattr(result, "fun")
+            assert hasattr(result, "nfev")
+            assert hasattr(result, "nit")
+
+
+def test_approx_fprime():
+    # check that approx_fprime (serviced by approx_derivative) works for
+    # jac and hess
+    g = optimize.approx_fprime(himmelblau_x0, himmelblau)
+    assert_allclose(g, himmelblau_grad(himmelblau_x0), rtol=5e-6)
+
+    h = optimize.approx_fprime(himmelblau_x0, himmelblau_grad)
+    assert_allclose(h, himmelblau_hess(himmelblau_x0), rtol=5e-6)
+
+
+def test_gh12594():
+    # gh-12594 reported an error in `_linesearch_powell` and
+    # `_line_for_search` when `Bounds` was passed lists instead of arrays.
+    # Check that results are the same whether the inputs are lists or arrays.
+
+    def f(x):
+        return x[0]**2 + (x[1] - 1)**2
+
+    bounds = Bounds(lb=[-10, -10], ub=[10, 10])
+    res = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds)
+    bounds = Bounds(lb=np.array([-10, -10]), ub=np.array([10, 10]))
+    ref = optimize.minimize(f, x0=(0, 0), method='Powell', bounds=bounds)
+
+    assert_allclose(res.fun, ref.fun)
+    assert_allclose(res.x, ref.x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_quadratic_assignment.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_quadratic_assignment.py
new file mode 100644
index 00000000..baa4886f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_quadratic_assignment.py
@@ -0,0 +1,431 @@
+import pytest
+import numpy as np
+from scipy.optimize import quadratic_assignment, OptimizeWarning
+from scipy.optimize._qap import _calc_score as _score
+from numpy.testing import assert_equal, assert_, assert_warns
+
+
+################
+# Common Tests #
+################
+
+def chr12c():
+    A = [
+        [0, 90, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+        [90, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0],
+        [10, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0],
+        [0, 23, 0, 0, 0, 88, 0, 0, 0, 0, 0, 0],
+        [0, 0, 43, 0, 0, 0, 26, 0, 0, 0, 0, 0],
+        [0, 0, 0, 88, 0, 0, 0, 16, 0, 0, 0, 0],
+        [0, 0, 0, 0, 26, 0, 0, 0, 1, 0, 0, 0],
+        [0, 0, 0, 0, 0, 16, 0, 0, 0, 96, 0, 0],
+        [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 29, 0],
+        [0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 37],
+        [0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0],
+        [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0],
+    ]
+    B = [
+        [0, 36, 54, 26, 59, 72, 9, 34, 79, 17, 46, 95],
+        [36, 0, 73, 35, 90, 58, 30, 78, 35, 44, 79, 36],
+        [54, 73, 0, 21, 10, 97, 58, 66, 69, 61, 54, 63],
+        [26, 35, 21, 0, 93, 12, 46, 40, 37, 48, 68, 85],
+        [59, 90, 10, 93, 0, 64, 5, 29, 76, 16, 5, 76],
+        [72, 58, 97, 12, 64, 0, 96, 55, 38, 54, 0, 34],
+        [9, 30, 58, 46, 5, 96, 0, 83, 35, 11, 56, 37],
+        [34, 78, 66, 40, 29, 55, 83, 0, 44, 12, 15, 80],
+        [79, 35, 69, 37, 76, 38, 35, 44, 0, 64, 39, 33],
+        [17, 44, 61, 48, 16, 54, 11, 12, 64, 0, 70, 86],
+        [46, 79, 54, 68, 5, 0, 56, 15, 39, 70, 0, 18],
+        [95, 36, 63, 85, 76, 34, 37, 80, 33, 86, 18, 0],
+    ]
+    A, B = np.array(A), np.array(B)
+    n = A.shape[0]
+
+    opt_perm = np.array([7, 5, 1, 3, 10, 4, 8, 6, 9, 11, 2, 12]) - [1] * n
+
+    return A, B, opt_perm
+
+
+class QAPCommonTests:
+    """
+    Base class for `quadratic_assignment` tests.
+    """
+    def setup_method(self):
+        np.random.seed(0)
+
+    # Test global optima of problem from Umeyama IVB
+    # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf
+    # Graph matching maximum is in the paper
+    # QAP minimum determined by brute force
+    def test_accuracy_1(self):
+        # besides testing accuracy, check that A and B can be lists
+        A = [[0, 3, 4, 2],
+             [0, 0, 1, 2],
+             [1, 0, 0, 1],
+             [0, 0, 1, 0]]
+
+        B = [[0, 4, 2, 4],
+             [0, 0, 1, 0],
+             [0, 2, 0, 2],
+             [0, 1, 2, 0]]
+
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={"rng": 0, "maximize": False})
+        assert_equal(res.fun, 10)
+        assert_equal(res.col_ind, np.array([1, 2, 3, 0]))
+
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={"rng": 0, "maximize": True})
+
+        if self.method == 'faq':
+            # Global optimum is 40, but FAQ gets 37
+            assert_equal(res.fun, 37)
+            assert_equal(res.col_ind, np.array([0, 2, 3, 1]))
+        else:
+            assert_equal(res.fun, 40)
+            assert_equal(res.col_ind, np.array([0, 3, 1, 2]))
+
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={"rng": 0, "maximize": True})
+
+    # Test global optima of problem from Umeyama IIIB
+    # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf
+    # Graph matching maximum is in the paper
+    # QAP minimum determined by brute force
+    def test_accuracy_2(self):
+
+        A = np.array([[0, 5, 8, 6],
+                      [5, 0, 5, 1],
+                      [8, 5, 0, 2],
+                      [6, 1, 2, 0]])
+
+        B = np.array([[0, 1, 8, 4],
+                      [1, 0, 5, 2],
+                      [8, 5, 0, 5],
+                      [4, 2, 5, 0]])
+
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={"rng": 0, "maximize": False})
+        if self.method == 'faq':
+            # Global optimum is 176, but FAQ gets 178
+            assert_equal(res.fun, 178)
+            assert_equal(res.col_ind, np.array([1, 0, 3, 2]))
+        else:
+            assert_equal(res.fun, 176)
+            assert_equal(res.col_ind, np.array([1, 2, 3, 0]))
+
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={"rng": 0, "maximize": True})
+        assert_equal(res.fun, 286)
+        assert_equal(res.col_ind, np.array([2, 3, 0, 1]))
+
+    def test_accuracy_3(self):
+
+        A, B, opt_perm = chr12c()
+
+        # basic minimization
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={"rng": 0})
+        assert_(11156 <= res.fun < 21000)
+        assert_equal(res.fun, _score(A, B, res.col_ind))
+
+        # basic maximization
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={"rng": 0, 'maximize': True})
+        assert_(74000 <= res.fun < 85000)
+        assert_equal(res.fun, _score(A, B, res.col_ind))
+
+        # check ofv with strictly partial match
+        seed_cost = np.array([4, 8, 10])
+        seed = np.asarray([seed_cost, opt_perm[seed_cost]]).T
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={'partial_match': seed})
+        assert_(11156 <= res.fun < 21000)
+        assert_equal(res.col_ind[seed_cost], opt_perm[seed_cost])
+
+        # check performance when partial match is the global optimum
+        seed = np.asarray([np.arange(len(A)), opt_perm]).T
+        res = quadratic_assignment(A, B, method=self.method,
+                                   options={'partial_match': seed})
+        assert_equal(res.col_ind, seed[:, 1].T)
+        assert_equal(res.fun, 11156)
+        assert_equal(res.nit, 0)
+
+        # check performance with zero sized matrix inputs
+        empty = np.empty((0, 0))
+        res = quadratic_assignment(empty, empty, method=self.method,
+                                   options={"rng": 0})
+        assert_equal(res.nit, 0)
+        assert_equal(res.fun, 0)
+
+    def test_unknown_options(self):
+        A, B, opt_perm = chr12c()
+
+        def f():
+            quadratic_assignment(A, B, method=self.method,
+                                 options={"ekki-ekki": True})
+        assert_warns(OptimizeWarning, f)
+
+
+class TestFAQ(QAPCommonTests):
+    method = "faq"
+
+    def test_options(self):
+        # cost and distance matrices of QAPLIB instance chr12c
+        A, B, opt_perm = chr12c()
+        n = len(A)
+
+        # check that max_iter is obeying with low input value
+        res = quadratic_assignment(A, B,
+                                   options={'maxiter': 5})
+        assert_equal(res.nit, 5)
+
+        # test with shuffle
+        res = quadratic_assignment(A, B,
+                                   options={'shuffle_input': True})
+        assert_(11156 <= res.fun < 21000)
+
+        # test with randomized init
+        res = quadratic_assignment(A, B,
+                                   options={'rng': 1, 'P0': "randomized"})
+        assert_(11156 <= res.fun < 21000)
+
+        # check with specified P0
+        K = np.ones((n, n)) / float(n)
+        K = _doubly_stochastic(K)
+        res = quadratic_assignment(A, B,
+                                   options={'P0': K})
+        assert_(11156 <= res.fun < 21000)
+
+    def test_specific_input_validation(self):
+
+        A = np.identity(2)
+        B = A
+
+        # method is implicitly faq
+
+        # ValueError Checks: making sure single value parameters are of
+        # correct value
+        with pytest.raises(ValueError, match="Invalid 'P0' parameter"):
+            quadratic_assignment(A, B, options={'P0': "random"})
+        with pytest.raises(
+                ValueError, match="'maxiter' must be a positive integer"):
+            quadratic_assignment(A, B, options={'maxiter': -1})
+        with pytest.raises(ValueError, match="'tol' must be a positive float"):
+            quadratic_assignment(A, B, options={'tol': -1})
+
+        # TypeError Checks: making sure single value parameters are of
+        # correct type
+        with pytest.raises(TypeError):
+            quadratic_assignment(A, B, options={'maxiter': 1.5})
+
+        # test P0 matrix input
+        with pytest.raises(
+                ValueError,
+                match="`P0` matrix must have shape m' x m', where m'=n-m"):
+            quadratic_assignment(
+                np.identity(4), np.identity(4),
+                options={'P0': np.ones((3, 3))}
+            )
+
+        K = [[0.4, 0.2, 0.3],
+             [0.3, 0.6, 0.2],
+             [0.2, 0.2, 0.7]]
+        # matrix that isn't quite doubly stochastic
+        with pytest.raises(
+                ValueError, match="`P0` matrix must be doubly stochastic"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3), options={'P0': K}
+            )
+
+
+class Test2opt(QAPCommonTests):
+    method = "2opt"
+
+    def test_deterministic(self):
+        # np.random.seed(0) executes before every method
+        n = 20
+
+        A = np.random.rand(n, n)
+        B = np.random.rand(n, n)
+        res1 = quadratic_assignment(A, B, method=self.method)
+
+        np.random.seed(0)
+
+        A = np.random.rand(n, n)
+        B = np.random.rand(n, n)
+        res2 = quadratic_assignment(A, B, method=self.method)
+
+        assert_equal(res1.nit, res2.nit)
+
+    def test_partial_guess(self):
+        n = 5
+        A = np.random.rand(n, n)
+        B = np.random.rand(n, n)
+
+        res1 = quadratic_assignment(A, B, method=self.method,
+                                    options={'rng': 0})
+        guess = np.array([np.arange(5), res1.col_ind]).T
+        res2 = quadratic_assignment(A, B, method=self.method,
+                                    options={'rng': 0, 'partial_guess': guess})
+        fix = [2, 4]
+        match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T
+        res3 = quadratic_assignment(A, B, method=self.method,
+                                    options={'rng': 0, 'partial_guess': guess,
+                                             'partial_match': match})
+        assert_(res1.nit != n*(n+1)/2)
+        assert_equal(res2.nit, n*(n+1)/2)      # tests each swap exactly once
+        assert_equal(res3.nit, (n-2)*(n-1)/2)  # tests free swaps exactly once
+
+    def test_specific_input_validation(self):
+        # can't have more seed nodes than cost/dist nodes
+        _rm = _range_matrix
+        with pytest.raises(
+                ValueError,
+                match="`partial_guess` can have only as many entries as"):
+            quadratic_assignment(np.identity(3), np.identity(3),
+                                 method=self.method,
+                                 options={'partial_guess': _rm(5, 2)})
+        # test for only two seed columns
+        with pytest.raises(
+                ValueError, match="`partial_guess` must have two columns"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3), method=self.method,
+                options={'partial_guess': _range_matrix(2, 3)}
+            )
+        # test that seed has no more than two dimensions
+        with pytest.raises(
+                ValueError, match="`partial_guess` must have exactly two"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3), method=self.method,
+                options={'partial_guess': np.random.rand(3, 2, 2)}
+            )
+        # seeds cannot be negative valued
+        with pytest.raises(
+                ValueError, match="`partial_guess` must contain only pos"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3), method=self.method,
+                options={'partial_guess': -1 * _range_matrix(2, 2)}
+            )
+        # seeds can't have values greater than number of nodes
+        with pytest.raises(
+                ValueError,
+                match="`partial_guess` entries must be less than number"):
+            quadratic_assignment(
+                np.identity(5), np.identity(5), method=self.method,
+                options={'partial_guess': 2 * _range_matrix(4, 2)}
+            )
+        # columns of seed matrix must be unique
+        with pytest.raises(
+                ValueError,
+                match="`partial_guess` column entries must be unique"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3), method=self.method,
+                options={'partial_guess': np.ones((2, 2))}
+            )
+
+
+class TestQAPOnce():
+    def setup_method(self):
+        np.random.seed(0)
+
+    # these don't need to be repeated for each method
+    def test_common_input_validation(self):
+        # test that non square matrices return error
+        with pytest.raises(ValueError, match="`A` must be square"):
+            quadratic_assignment(
+                np.random.random((3, 4)),
+                np.random.random((3, 3)),
+            )
+        with pytest.raises(ValueError, match="`B` must be square"):
+            quadratic_assignment(
+                np.random.random((3, 3)),
+                np.random.random((3, 4)),
+            )
+        # test that cost and dist matrices have no more than two dimensions
+        with pytest.raises(
+                ValueError, match="`A` and `B` must have exactly two"):
+            quadratic_assignment(
+                np.random.random((3, 3, 3)),
+                np.random.random((3, 3, 3)),
+            )
+        # test that cost and dist matrices of different sizes return error
+        with pytest.raises(
+                ValueError,
+                match="`A` and `B` matrices must be of equal size"):
+            quadratic_assignment(
+                np.random.random((3, 3)),
+                np.random.random((4, 4)),
+            )
+        # can't have more seed nodes than cost/dist nodes
+        _rm = _range_matrix
+        with pytest.raises(
+                ValueError,
+                match="`partial_match` can have only as many seeds as"):
+            quadratic_assignment(np.identity(3), np.identity(3),
+                                 options={'partial_match': _rm(5, 2)})
+        # test for only two seed columns
+        with pytest.raises(
+                ValueError, match="`partial_match` must have two columns"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3),
+                options={'partial_match': _range_matrix(2, 3)}
+            )
+        # test that seed has no more than two dimensions
+        with pytest.raises(
+                ValueError, match="`partial_match` must have exactly two"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3),
+                options={'partial_match': np.random.rand(3, 2, 2)}
+            )
+        # seeds cannot be negative valued
+        with pytest.raises(
+                ValueError, match="`partial_match` must contain only pos"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3),
+                options={'partial_match': -1 * _range_matrix(2, 2)}
+            )
+        # seeds can't have values greater than number of nodes
+        with pytest.raises(
+                ValueError,
+                match="`partial_match` entries must be less than number"):
+            quadratic_assignment(
+                np.identity(5), np.identity(5),
+                options={'partial_match': 2 * _range_matrix(4, 2)}
+            )
+        # columns of seed matrix must be unique
+        with pytest.raises(
+                ValueError,
+                match="`partial_match` column entries must be unique"):
+            quadratic_assignment(
+                np.identity(3), np.identity(3),
+                options={'partial_match': np.ones((2, 2))}
+            )
+
+
+def _range_matrix(a, b):
+    mat = np.zeros((a, b))
+    for i in range(b):
+        mat[:, i] = np.arange(a)
+    return mat
+
+
+def _doubly_stochastic(P, tol=1e-3):
+    # cleaner implementation of btaba/sinkhorn_knopp
+
+    max_iter = 1000
+    c = 1 / P.sum(axis=0)
+    r = 1 / (P @ c)
+    P_eps = P
+
+    for it in range(max_iter):
+        if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and
+                (np.abs(P_eps.sum(axis=0) - 1) < tol).all()):
+            # All column/row sums ~= 1 within threshold
+            break
+
+        c = 1 / (r @ P)
+        r = 1 / (P @ c)
+        P_eps = r[:, None] * P * c
+
+    return P_eps
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_regression.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_regression.py
new file mode 100644
index 00000000..44916ba9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_regression.py
@@ -0,0 +1,40 @@
+"""Regression tests for optimize.
+
+"""
+import numpy as np
+from numpy.testing import assert_almost_equal
+from pytest import raises as assert_raises
+
+import scipy.optimize
+
+
+class TestRegression:
+
+    def test_newton_x0_is_0(self):
+        # Regression test for gh-1601
+        tgt = 1
+        res = scipy.optimize.newton(lambda x: x - 1, 0)
+        assert_almost_equal(res, tgt)
+
+    def test_newton_integers(self):
+        # Regression test for gh-1741
+        root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,
+                                    fprime=lambda x: 2*x)
+        assert_almost_equal(root, 1.0)
+
+    def test_lmdif_errmsg(self):
+        # This shouldn't cause a crash on Python 3
+        class SomeError(Exception):
+            pass
+        counter = [0]
+
+        def func(x):
+            counter[0] += 1
+            if counter[0] < 3:
+                return x**2 - np.array([9, 10, 11])
+            else:
+                raise SomeError()
+        assert_raises(SomeError,
+                      scipy.optimize.leastsq,
+                      func, [1, 2, 3])
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_slsqp.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_slsqp.py
new file mode 100644
index 00000000..1a7f2b37
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_slsqp.py
@@ -0,0 +1,604 @@
+"""
+Unit test for SLSQP optimization.
+"""
+from numpy.testing import (assert_, assert_array_almost_equal,
+                           assert_allclose, assert_equal)
+from pytest import raises as assert_raises
+import pytest
+import numpy as np
+
+from scipy.optimize import fmin_slsqp, minimize, Bounds, NonlinearConstraint
+
+
+class MyCallBack:
+    """pass a custom callback function
+
+    This makes sure it's being used.
+    """
+    def __init__(self):
+        self.been_called = False
+        self.ncalls = 0
+
+    def __call__(self, x):
+        self.been_called = True
+        self.ncalls += 1
+
+
+class TestSLSQP:
+    """
+    Test SLSQP algorithm using Example 14.4 from Numerical Methods for
+    Engineers by Steven Chapra and Raymond Canale.
+    This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2,
+    which has a maximum at x=2, y=1.
+    """
+    def setup_method(self):
+        self.opts = {'disp': False}
+
+    def fun(self, d, sign=1.0):
+        """
+        Arguments:
+        d     - A list of two elements, where d[0] represents x and d[1] represents y
+                 in the following equation.
+        sign - A multiplier for f. Since we want to optimize it, and the SciPy
+               optimizers can only minimize functions, we need to multiply it by
+               -1 to achieve the desired solution
+        Returns:
+        2*x*y + 2*x - x**2 - 2*y**2
+
+        """
+        x = d[0]
+        y = d[1]
+        return sign*(2*x*y + 2*x - x**2 - 2*y**2)
+
+    def jac(self, d, sign=1.0):
+        """
+        This is the derivative of fun, returning a NumPy array
+        representing df/dx and df/dy.
+
+        """
+        x = d[0]
+        y = d[1]
+        dfdx = sign*(-2*x + 2*y + 2)
+        dfdy = sign*(2*x - 4*y)
+        return np.array([dfdx, dfdy], float)
+
+    def fun_and_jac(self, d, sign=1.0):
+        return self.fun(d, sign), self.jac(d, sign)
+
+    def f_eqcon(self, x, sign=1.0):
+        """ Equality constraint """
+        return np.array([x[0] - x[1]])
+
+    def fprime_eqcon(self, x, sign=1.0):
+        """ Equality constraint, derivative """
+        return np.array([[1, -1]])
+
+    def f_eqcon_scalar(self, x, sign=1.0):
+        """ Scalar equality constraint """
+        return self.f_eqcon(x, sign)[0]
+
+    def fprime_eqcon_scalar(self, x, sign=1.0):
+        """ Scalar equality constraint, derivative """
+        return self.fprime_eqcon(x, sign)[0].tolist()
+
+    def f_ieqcon(self, x, sign=1.0):
+        """ Inequality constraint """
+        return np.array([x[0] - x[1] - 1.0])
+
+    def fprime_ieqcon(self, x, sign=1.0):
+        """ Inequality constraint, derivative """
+        return np.array([[1, -1]])
+
+    def f_ieqcon2(self, x):
+        """ Vector inequality constraint """
+        return np.asarray(x)
+
+    def fprime_ieqcon2(self, x):
+        """ Vector inequality constraint, derivative """
+        return np.identity(x.shape[0])
+
+    # minimize
+    def test_minimize_unbounded_approximated(self):
+        # Minimize, method='SLSQP': unbounded, approximated jacobian.
+        jacs = [None, False, '2-point', '3-point']
+        for jac in jacs:
+            res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
+                           jac=jac, method='SLSQP',
+                           options=self.opts)
+            assert_(res['success'], res['message'])
+            assert_allclose(res.x, [2, 1])
+
+    def test_minimize_unbounded_given(self):
+        # Minimize, method='SLSQP': unbounded, given Jacobian.
+        res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
+                       jac=self.jac, method='SLSQP', options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [2, 1])
+
+    def test_minimize_bounded_approximated(self):
+        # Minimize, method='SLSQP': bounded, approximated jacobian.
+        jacs = [None, False, '2-point', '3-point']
+        for jac in jacs:
+            with np.errstate(invalid='ignore'):
+                res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
+                               jac=jac,
+                               bounds=((2.5, None), (None, 0.5)),
+                               method='SLSQP', options=self.opts)
+            assert_(res['success'], res['message'])
+            assert_allclose(res.x, [2.5, 0.5])
+            assert_(2.5 <= res.x[0])
+            assert_(res.x[1] <= 0.5)
+
+    def test_minimize_unbounded_combined(self):
+        # Minimize, method='SLSQP': unbounded, combined function and Jacobian.
+        res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ),
+                       jac=True, method='SLSQP', options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [2, 1])
+
+    def test_minimize_equality_approximated(self):
+        # Minimize with method='SLSQP': equality constraint, approx. jacobian.
+        jacs = [None, False, '2-point', '3-point']
+        for jac in jacs:
+            res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
+                           jac=jac,
+                           constraints={'type': 'eq',
+                                        'fun': self.f_eqcon,
+                                        'args': (-1.0, )},
+                           method='SLSQP', options=self.opts)
+            assert_(res['success'], res['message'])
+            assert_allclose(res.x, [1, 1])
+
+    def test_minimize_equality_given(self):
+        # Minimize with method='SLSQP': equality constraint, given Jacobian.
+        res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
+                       method='SLSQP', args=(-1.0,),
+                       constraints={'type': 'eq', 'fun':self.f_eqcon,
+                                    'args': (-1.0, )},
+                       options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [1, 1])
+
+    def test_minimize_equality_given2(self):
+        # Minimize with method='SLSQP': equality constraint, given Jacobian
+        # for fun and const.
+        res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
+                       jac=self.jac, args=(-1.0,),
+                       constraints={'type': 'eq',
+                                    'fun': self.f_eqcon,
+                                    'args': (-1.0, ),
+                                    'jac': self.fprime_eqcon},
+                       options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [1, 1])
+
+    def test_minimize_equality_given_cons_scalar(self):
+        # Minimize with method='SLSQP': scalar equality constraint, given
+        # Jacobian for fun and const.
+        res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
+                       jac=self.jac, args=(-1.0,),
+                       constraints={'type': 'eq',
+                                    'fun': self.f_eqcon_scalar,
+                                    'args': (-1.0, ),
+                                    'jac': self.fprime_eqcon_scalar},
+                       options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [1, 1])
+
+    def test_minimize_inequality_given(self):
+        # Minimize with method='SLSQP': inequality constraint, given Jacobian.
+        res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
+                       jac=self.jac, args=(-1.0, ),
+                       constraints={'type': 'ineq',
+                                    'fun': self.f_ieqcon,
+                                    'args': (-1.0, )},
+                       options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [2, 1], atol=1e-3)
+
+    def test_minimize_inequality_given_vector_constraints(self):
+        # Minimize with method='SLSQP': vector inequality constraint, given
+        # Jacobian.
+        res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
+                       method='SLSQP', args=(-1.0,),
+                       constraints={'type': 'ineq',
+                                    'fun': self.f_ieqcon2,
+                                    'jac': self.fprime_ieqcon2},
+                       options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [2, 1])
+
+    def test_minimize_bounded_constraint(self):
+        # when the constraint makes the solver go up against a parameter
+        # bound make sure that the numerical differentiation of the
+        # jacobian doesn't try to exceed that bound using a finite difference.
+        # gh11403
+        def c(x):
+            assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x
+            return x[0] ** 0.5 + x[1]
+
+        def f(x):
+            assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x
+            return -x[0] ** 2 + x[1] ** 2
+
+        cns = [NonlinearConstraint(c, 0, 1.5)]
+        x0 = np.asarray([0.9, 0.5])
+        bnd = Bounds([0., 0.], [1.0, 1.0])
+        minimize(f, x0, method='SLSQP', bounds=bnd, constraints=cns)
+
+    def test_minimize_bound_equality_given2(self):
+        # Minimize with method='SLSQP': bounds, eq. const., given jac. for
+        # fun. and const.
+        res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
+                       jac=self.jac, args=(-1.0, ),
+                       bounds=[(-0.8, 1.), (-1, 0.8)],
+                       constraints={'type': 'eq',
+                                    'fun': self.f_eqcon,
+                                    'args': (-1.0, ),
+                                    'jac': self.fprime_eqcon},
+                       options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_allclose(res.x, [0.8, 0.8], atol=1e-3)
+        assert_(-0.8 <= res.x[0] <= 1)
+        assert_(-1 <= res.x[1] <= 0.8)
+
+    # fmin_slsqp
+    def test_unbounded_approximated(self):
+        # SLSQP: unbounded, approximated Jacobian.
+        res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
+                         iprint = 0, full_output = 1)
+        x, fx, its, imode, smode = res
+        assert_(imode == 0, imode)
+        assert_array_almost_equal(x, [2, 1])
+
+    def test_unbounded_given(self):
+        # SLSQP: unbounded, given Jacobian.
+        res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
+                         fprime = self.jac, iprint = 0,
+                         full_output = 1)
+        x, fx, its, imode, smode = res
+        assert_(imode == 0, imode)
+        assert_array_almost_equal(x, [2, 1])
+
+    def test_equality_approximated(self):
+        # SLSQP: equality constraint, approximated Jacobian.
+        res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,),
+                         eqcons = [self.f_eqcon],
+                         iprint = 0, full_output = 1)
+        x, fx, its, imode, smode = res
+        assert_(imode == 0, imode)
+        assert_array_almost_equal(x, [1, 1])
+
+    def test_equality_given(self):
+        # SLSQP: equality constraint, given Jacobian.
+        res = fmin_slsqp(self.fun, [-1.0, 1.0],
+                         fprime=self.jac, args=(-1.0,),
+                         eqcons = [self.f_eqcon], iprint = 0,
+                         full_output = 1)
+        x, fx, its, imode, smode = res
+        assert_(imode == 0, imode)
+        assert_array_almost_equal(x, [1, 1])
+
+    def test_equality_given2(self):
+        # SLSQP: equality constraint, given Jacobian for fun and const.
+        res = fmin_slsqp(self.fun, [-1.0, 1.0],
+                         fprime=self.jac, args=(-1.0,),
+                         f_eqcons = self.f_eqcon,
+                         fprime_eqcons = self.fprime_eqcon,
+                         iprint = 0,
+                         full_output = 1)
+        x, fx, its, imode, smode = res
+        assert_(imode == 0, imode)
+        assert_array_almost_equal(x, [1, 1])
+
+    def test_inequality_given(self):
+        # SLSQP: inequality constraint, given Jacobian.
+        res = fmin_slsqp(self.fun, [-1.0, 1.0],
+                         fprime=self.jac, args=(-1.0, ),
+                         ieqcons = [self.f_ieqcon],
+                         iprint = 0, full_output = 1)
+        x, fx, its, imode, smode = res
+        assert_(imode == 0, imode)
+        assert_array_almost_equal(x, [2, 1], decimal=3)
+
+    def test_bound_equality_given2(self):
+        # SLSQP: bounds, eq. const., given jac. for fun. and const.
+        res = fmin_slsqp(self.fun, [-1.0, 1.0],
+                         fprime=self.jac, args=(-1.0, ),
+                         bounds = [(-0.8, 1.), (-1, 0.8)],
+                         f_eqcons = self.f_eqcon,
+                         fprime_eqcons = self.fprime_eqcon,
+                         iprint = 0, full_output = 1)
+        x, fx, its, imode, smode = res
+        assert_(imode == 0, imode)
+        assert_array_almost_equal(x, [0.8, 0.8], decimal=3)
+        assert_(-0.8 <= x[0] <= 1)
+        assert_(-1 <= x[1] <= 0.8)
+
+    def test_scalar_constraints(self):
+        # Regression test for gh-2182
+        x = fmin_slsqp(lambda z: z**2, [3.],
+                       ieqcons=[lambda z: z[0] - 1],
+                       iprint=0)
+        assert_array_almost_equal(x, [1.])
+
+        x = fmin_slsqp(lambda z: z**2, [3.],
+                       f_ieqcons=lambda z: [z[0] - 1],
+                       iprint=0)
+        assert_array_almost_equal(x, [1.])
+
+    def test_integer_bounds(self):
+        # This should not raise an exception
+        fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0)
+
+    def test_array_bounds(self):
+        # NumPy used to treat n-dimensional 1-element arrays as scalars
+        # in some cases.  The handling of `bounds` by `fmin_slsqp` still
+        # supports this behavior.
+        bounds = [(-np.inf, np.inf), (np.array([2]), np.array([3]))]
+        x = fmin_slsqp(lambda z: np.sum(z**2 - 1), [2.5, 2.5], bounds=bounds,
+                       iprint=0)
+        assert_array_almost_equal(x, [0, 2])
+
+    def test_obj_must_return_scalar(self):
+        # Regression test for Github Issue #5433
+        # If objective function does not return a scalar, raises ValueError
+        with assert_raises(ValueError):
+            fmin_slsqp(lambda x: [0, 1], [1, 2, 3])
+
+    def test_obj_returns_scalar_in_list(self):
+        # Test for Github Issue #5433 and PR #6691
+        # Objective function should be able to return length-1 Python list
+        #  containing the scalar
+        fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0)
+
+    def test_callback(self):
+        # Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback
+        callback = MyCallBack()
+        res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
+                       method='SLSQP', callback=callback, options=self.opts)
+        assert_(res['success'], res['message'])
+        assert_(callback.been_called)
+        assert_equal(callback.ncalls, res['nit'])
+
+    def test_inconsistent_linearization(self):
+        # SLSQP must be able to solve this problem, even if the
+        # linearized problem at the starting point is infeasible.
+
+        # Linearized constraints are
+        #
+        #    2*x0[0]*x[0] >= 1
+        #
+        # At x0 = [0, 1], the second constraint is clearly infeasible.
+        # This triggers a call with n2==1 in the LSQ subroutine.
+        x = [0, 1]
+        f1 = lambda x: x[0] + x[1] - 2
+        f2 = lambda x: x[0]**2 - 1
+        sol = minimize(
+            lambda x: x[0]**2 + x[1]**2,
+            x,
+            constraints=({'type':'eq','fun': f1},
+                         {'type':'ineq','fun': f2}),
+            bounds=((0,None), (0,None)),
+            method='SLSQP')
+        x = sol.x
+
+        assert_allclose(f1(x), 0, atol=1e-8)
+        assert_(f2(x) >= -1e-8)
+        assert_(sol.success, sol)
+
+    def test_regression_5743(self):
+        # SLSQP must not indicate success for this problem,
+        # which is infeasible.
+        x = [1, 2]
+        sol = minimize(
+            lambda x: x[0]**2 + x[1]**2,
+            x,
+            constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1},
+                         {'type':'ineq','fun': lambda x: x[0]-2}),
+            bounds=((0,None), (0,None)),
+            method='SLSQP')
+        assert_(not sol.success, sol)
+
+    def test_gh_6676(self):
+        def func(x):
+            return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2
+
+        sol = minimize(func, [0, 0, 0], method='SLSQP')
+        assert_(sol.jac.shape == (3,))
+
+    def test_invalid_bounds(self):
+        # Raise correct error when lower bound is greater than upper bound.
+        # See Github issue 6875.
+        bounds_list = [
+            ((1, 2), (2, 1)),
+            ((2, 1), (1, 2)),
+            ((2, 1), (2, 1)),
+            ((np.inf, 0), (np.inf, 0)),
+            ((1, -np.inf), (0, 1)),
+        ]
+        for bounds in bounds_list:
+            with assert_raises(ValueError):
+                minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP')
+
+    def test_bounds_clipping(self):
+        #
+        # SLSQP returns bogus results for initial guess out of bounds, gh-6859
+        #
+        def f(x):
+            return (x[0] - 1)**2
+
+        sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)])
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+        sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)])
+        assert_(sol.success)
+        assert_allclose(sol.x, 2, atol=1e-10)
+
+        sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)])
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+        sol = minimize(f, [10], method='slsqp', bounds=[(2, None)])
+        assert_(sol.success)
+        assert_allclose(sol.x, 2, atol=1e-10)
+
+        sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)])
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+        sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)])
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+    def test_infeasible_initial(self):
+        # Check SLSQP behavior with infeasible initial point
+        def f(x):
+            x, = x
+            return x*x - 2*x + 1
+
+        cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}]
+        cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}]
+        cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x},
+                   {'type': 'ineq', 'fun': lambda x: x + 1}]
+
+        sol = minimize(f, [10], method='slsqp', constraints=cons_u)
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+        sol = minimize(f, [-10], method='slsqp', constraints=cons_l)
+        assert_(sol.success)
+        assert_allclose(sol.x, 2, atol=1e-10)
+
+        sol = minimize(f, [-10], method='slsqp', constraints=cons_u)
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+        sol = minimize(f, [10], method='slsqp', constraints=cons_l)
+        assert_(sol.success)
+        assert_allclose(sol.x, 2, atol=1e-10)
+
+        sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul)
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+        sol = minimize(f, [10], method='slsqp', constraints=cons_ul)
+        assert_(sol.success)
+        assert_allclose(sol.x, 0, atol=1e-10)
+
+    def test_inconsistent_inequalities(self):
+        # gh-7618
+
+        def cost(x):
+            return -1 * x[0] + 4 * x[1]
+
+        def ineqcons1(x):
+            return x[1] - x[0] - 1
+
+        def ineqcons2(x):
+            return x[0] - x[1]
+
+        # The inequalities are inconsistent, so no solution can exist:
+        #
+        # x1 >= x0 + 1
+        # x0 >= x1
+
+        x0 = (1,5)
+        bounds = ((-5, 5), (-5, 5))
+        cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2))
+        res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons)
+
+        assert_(not res.success)
+
+    def test_new_bounds_type(self):
+        f = lambda x: x[0]**2 + x[1]**2
+        bounds = Bounds([1, 0], [np.inf, np.inf])
+        sol = minimize(f, [0, 0], method='slsqp', bounds=bounds)
+        assert_(sol.success)
+        assert_allclose(sol.x, [1, 0])
+
+    def test_nested_minimization(self):
+
+        class NestedProblem():
+
+            def __init__(self):
+                self.F_outer_count = 0
+
+            def F_outer(self, x):
+                self.F_outer_count += 1
+                if self.F_outer_count > 1000:
+                    raise Exception("Nested minimization failed to terminate.")
+                inner_res = minimize(self.F_inner, (3, 4), method="SLSQP")
+                assert_(inner_res.success)
+                assert_allclose(inner_res.x, [1, 1])
+                return x[0]**2 + x[1]**2 + x[2]**2
+
+            def F_inner(self, x):
+                return (x[0] - 1)**2 + (x[1] - 1)**2
+
+            def solve(self):
+                outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP")
+                assert_(outer_res.success)
+                assert_allclose(outer_res.x, [0, 0, 0])
+
+        problem = NestedProblem()
+        problem.solve()
+
+    def test_gh1758(self):
+        # the test suggested in gh1758
+        # https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/
+        # implement two equality constraints, in R^2.
+        def fun(x):
+            return np.sqrt(x[1])
+
+        def f_eqcon(x):
+            """ Equality constraint """
+            return x[1] - (2 * x[0]) ** 3
+
+        def f_eqcon2(x):
+            """ Equality constraint """
+            return x[1] - (-x[0] + 1) ** 3
+
+        c1 = {'type': 'eq', 'fun': f_eqcon}
+        c2 = {'type': 'eq', 'fun': f_eqcon2}
+
+        res = minimize(fun, [8, 0.25], method='SLSQP',
+                       constraints=[c1, c2], bounds=[(-0.5, 1), (0, 8)])
+
+        np.testing.assert_allclose(res.fun, 0.5443310539518)
+        np.testing.assert_allclose(res.x, [0.33333333, 0.2962963])
+        assert res.success
+
+    def test_gh9640(self):
+        np.random.seed(10)
+        cons = ({'type': 'ineq', 'fun': lambda x: -x[0] - x[1] - 3},
+                {'type': 'ineq', 'fun': lambda x: x[1] + x[2] - 2})
+        bnds = ((-2, 2), (-2, 2), (-2, 2))
+
+        target = lambda x: 1
+        x0 = [-1.8869783504471584, -0.640096352696244, -0.8174212253407696]
+        res = minimize(target, x0, method='SLSQP', bounds=bnds, constraints=cons,
+                       options={'disp':False, 'maxiter':10000})
+
+        # The problem is infeasible, so it cannot succeed
+        assert not res.success
+
+    def test_parameters_stay_within_bounds(self):
+        # gh11403. For some problems the SLSQP Fortran code suggests a step
+        # outside one of the lower/upper bounds. When this happens
+        # approx_derivative complains because it's being asked to evaluate
+        # a gradient outside its domain.
+        np.random.seed(1)
+        bounds = Bounds(np.array([0.1]), np.array([1.0]))
+        n_inputs = len(bounds.lb)
+        x0 = np.array(bounds.lb + (bounds.ub - bounds.lb) *
+                      np.random.random(n_inputs))
+
+        def f(x):
+            assert (x >= bounds.lb).all()
+            return np.linalg.norm(x)
+
+        with pytest.warns(RuntimeWarning, match='x were outside bounds'):
+            res = minimize(f, x0, method='SLSQP', bounds=bounds)
+            assert res.success
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_tnc.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_tnc.py
new file mode 100644
index 00000000..82839476
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_tnc.py
@@ -0,0 +1,355 @@
+"""
+Unit tests for TNC optimization routine from tnc.py
+"""
+import pytest
+from numpy.testing import assert_allclose, assert_equal
+
+import numpy as np
+from math import pow
+
+from scipy import optimize
+from scipy.sparse._sputils import matrix
+
+
+class TestTnc:
+    """TNC non-linear optimization.
+
+    These tests are taken from Prof. K. Schittkowski's test examples
+    for constrained non-linear programming.
+
+    http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm
+
+    """
+    def setup_method(self):
+        # options for minimize
+        self.opts = {'disp': False, 'maxfun': 200}
+
+    # objective functions and Jacobian for each test
+    def f1(self, x, a=100.0):
+        return a * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2)
+
+    def g1(self, x, a=100.0):
+        dif = [0, 0]
+        dif[1] = 2 * a * (x[1] - pow(x[0], 2))
+        dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0)
+        return dif
+
+    def fg1(self, x, a=100.0):
+        return self.f1(x, a), self.g1(x, a)
+
+    def f3(self, x):
+        return x[1] + pow(x[1] - x[0], 2) * 1.0e-5
+
+    def g3(self, x):
+        dif = [0, 0]
+        dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5
+        dif[1] = 1.0 - dif[0]
+        return dif
+
+    def fg3(self, x):
+        return self.f3(x), self.g3(x)
+
+    def f4(self, x):
+        return pow(x[0] + 1.0, 3) / 3.0 + x[1]
+
+    def g4(self, x):
+        dif = [0, 0]
+        dif[0] = pow(x[0] + 1.0, 2)
+        dif[1] = 1.0
+        return dif
+
+    def fg4(self, x):
+        return self.f4(x), self.g4(x)
+
+    def f5(self, x):
+        return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \
+                1.5 * x[0] + 2.5 * x[1] + 1.0
+
+    def g5(self, x):
+        dif = [0, 0]
+        v1 = np.cos(x[0] + x[1])
+        v2 = 2.0*(x[0] - x[1])
+
+        dif[0] = v1 + v2 - 1.5
+        dif[1] = v1 - v2 + 2.5
+        return dif
+
+    def fg5(self, x):
+        return self.f5(x), self.g5(x)
+
+    def f38(self, x):
+        return (100.0 * pow(x[1] - pow(x[0], 2), 2) +
+                pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) +
+                pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) +
+                                             pow(x[3] - 1.0, 2)) +
+                19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5
+
+    def g38(self, x):
+        dif = [0, 0, 0, 0]
+        dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) -
+                  2.0 * (1.0 - x[0])) * 1.0e-5
+        dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) +
+                  19.8 * (x[3] - 1.0)) * 1.0e-5
+        dif[2] = (- 360.0 * x[2] * (x[3] - pow(x[2], 2)) -
+                  2.0 * (1.0 - x[2])) * 1.0e-5
+        dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) +
+                  19.8 * (x[1] - 1.0)) * 1.0e-5
+        return dif
+
+    def fg38(self, x):
+        return self.f38(x), self.g38(x)
+
+    def f45(self, x):
+        return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0
+
+    def g45(self, x):
+        dif = [0] * 5
+        dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0
+        dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0
+        dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0
+        dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0
+        dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0
+        return dif
+
+    def fg45(self, x):
+        return self.f45(x), self.g45(x)
+
+    # tests
+    # minimize with method=TNC
+    def test_minimize_tnc1(self):
+        x0, bnds = [-2, 1], ([-np.inf, None], [-1.5, None])
+        xopt = [1, 1]
+        iterx = []  # to test callback
+
+        res = optimize.minimize(self.f1, x0, method='TNC', jac=self.g1,
+                                bounds=bnds, options=self.opts,
+                                callback=iterx.append)
+        assert_allclose(res.fun, self.f1(xopt), atol=1e-8)
+        assert_equal(len(iterx), res.nit)
+
+    def test_minimize_tnc1b(self):
+        x0, bnds = matrix([-2, 1]), ([-np.inf, None],[-1.5, None])
+        xopt = [1, 1]
+        message = 'Use of `minimize` with `x0.ndim != 1` is deprecated.'
+        with pytest.warns(DeprecationWarning, match=message):
+            x = optimize.minimize(self.f1, x0, method='TNC',
+                                  bounds=bnds, options=self.opts).x
+            assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4)
+
+    def test_minimize_tnc1c(self):
+        x0, bnds = [-2, 1], ([-np.inf, None],[-1.5, None])
+        xopt = [1, 1]
+        x = optimize.minimize(self.fg1, x0, method='TNC',
+                              jac=True, bounds=bnds,
+                              options=self.opts).x
+        assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
+
+    def test_minimize_tnc2(self):
+        x0, bnds = [-2, 1], ([-np.inf, None], [1.5, None])
+        xopt = [-1.2210262419616387, 1.5]
+        x = optimize.minimize(self.f1, x0, method='TNC',
+                              jac=self.g1, bounds=bnds,
+                              options=self.opts).x
+        assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8)
+
+    def test_minimize_tnc3(self):
+        x0, bnds = [10, 1], ([-np.inf, None], [0.0, None])
+        xopt = [0, 0]
+        x = optimize.minimize(self.f3, x0, method='TNC',
+                              jac=self.g3, bounds=bnds,
+                              options=self.opts).x
+        assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8)
+
+    def test_minimize_tnc4(self):
+        x0,bnds = [1.125, 0.125], [(1, None), (0, None)]
+        xopt = [1, 0]
+        x = optimize.minimize(self.f4, x0, method='TNC',
+                              jac=self.g4, bounds=bnds,
+                              options=self.opts).x
+        assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8)
+
+    def test_minimize_tnc5(self):
+        x0, bnds = [0, 0], [(-1.5, 4),(-3, 3)]
+        xopt = [-0.54719755119659763, -1.5471975511965976]
+        x = optimize.minimize(self.f5, x0, method='TNC',
+                              jac=self.g5, bounds=bnds,
+                              options=self.opts).x
+        assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8)
+
+    def test_minimize_tnc38(self):
+        x0, bnds = np.array([-3, -1, -3, -1]), [(-10, 10)]*4
+        xopt = [1]*4
+        x = optimize.minimize(self.f38, x0, method='TNC',
+                              jac=self.g38, bounds=bnds,
+                              options=self.opts).x
+        assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8)
+
+    def test_minimize_tnc45(self):
+        x0, bnds = [2] * 5, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]
+        xopt = [1, 2, 3, 4, 5]
+        x = optimize.minimize(self.f45, x0, method='TNC',
+                              jac=self.g45, bounds=bnds,
+                              options=self.opts).x
+        assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8)
+
+    # fmin_tnc
+    def test_tnc1(self):
+        fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [-1.5, None])
+        xopt = [1, 1]
+
+        x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds, args=(100.0, ),
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc1b(self):
+        x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
+        xopt = [1, 1]
+
+        x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,
+                                      bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc1c(self):
+        x, bounds = [-2, 1], ([-np.inf, None], [-1.5, None])
+        xopt = [1, 1]
+
+        x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,
+                                      bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc2(self):
+        fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])
+        xopt = [-1.2210262419616387, 1.5]
+
+        x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc3(self):
+        fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])
+        xopt = [0, 0]
+
+        x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc4(self):
+        fg, x, bounds = self.fg4, [1.125, 0.125], [(1, None), (0, None)]
+        xopt = [1, 0]
+
+        x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc5(self):
+        fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]
+        xopt = [-0.54719755119659763, -1.5471975511965976]
+
+        x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc38(self):
+        fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4
+        xopt = [1]*4
+
+        x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_tnc45(self):
+        fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),
+                                             (0, 4), (0, 5)]
+        xopt = [1, 2, 3, 4, 5]
+
+        x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
+                                      messages=optimize._tnc.MSG_NONE,
+                                      maxfun=200)
+
+        assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,
+                        err_msg="TNC failed with status: " +
+                                optimize._tnc.RCSTRINGS[rc])
+
+    def test_raising_exceptions(self):
+        # tnc was ported to cython from hand-crafted cpython code
+        # check that Exception handling works.
+        def myfunc(x):
+            raise RuntimeError("myfunc")
+
+        def myfunc1(x):
+            return optimize.rosen(x)
+
+        def callback(x):
+            raise ValueError("callback")
+
+        with pytest.raises(RuntimeError):
+            optimize.minimize(myfunc, [0, 1], method="TNC")
+
+        with pytest.raises(ValueError):
+            optimize.minimize(
+                myfunc1, [0, 1], method="TNC", callback=callback
+            )
+
+    def test_callback_shouldnt_affect_minimization(self):
+        # gh14879. The output of a TNC minimization was different depending
+        # on whether a callback was used or not. The two should be equivalent.
+        # The issue was that TNC was unscaling/scaling x, and this process was
+        # altering x in the process. Now the callback uses an unscaled
+        # temporary copy of x.
+        def callback(x):
+            pass
+
+        fun = optimize.rosen
+        bounds = [(0, 10)] * 4
+        x0 = [1, 2, 3, 4.]
+        res = optimize.minimize(
+            fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000}
+        )
+        res2 = optimize.minimize(
+            fun, x0, bounds=bounds, method="TNC", options={"maxfun": 1000},
+            callback=callback
+        )
+        assert_allclose(res2.x, res.x)
+        assert_allclose(res2.fun, res.fun)
+        assert_equal(res2.nfev, res.nfev)
+
+    def test_maxiter_depreciations(self):
+        msg = "'maxiter' has been deprecated in favor of 'maxfun'"
+        with pytest.warns(DeprecationWarning, match=msg):
+            optimize.minimize(
+                self.f1, [1, 3], method="TNC", options={"maxiter": 1}
+            )
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion.py
new file mode 100644
index 00000000..24663f18
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion.py
@@ -0,0 +1,112 @@
+"""
+Unit tests for trust-region optimization routines.
+
+To run it in its simplest form::
+  nosetests test_optimize.py
+
+"""
+import pytest
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_allclose
+from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess,
+                            rosen_hess_prod)
+
+
+class Accumulator:
+    """ This is for testing callbacks."""
+    def __init__(self):
+        self.count = 0
+        self.accum = None
+
+    def __call__(self, x):
+        self.count += 1
+        if self.accum is None:
+            self.accum = np.array(x)
+        else:
+            self.accum += x
+
+
+class TestTrustRegionSolvers:
+
+    def setup_method(self):
+        self.x_opt = [1.0, 1.0]
+        self.easy_guess = [2.0, 2.0]
+        self.hard_guess = [-1.2, 1.0]
+
+    def test_dogleg_accuracy(self):
+        # test the accuracy and the return_all option
+        x0 = self.hard_guess
+        r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8,
+                     method='dogleg', options={'return_all': True},)
+        assert_allclose(x0, r['allvecs'][0])
+        assert_allclose(r['x'], r['allvecs'][-1])
+        assert_allclose(r['x'], self.x_opt)
+
+    def test_dogleg_callback(self):
+        # test the callback mechanism and the maxiter and return_all options
+        accumulator = Accumulator()
+        maxiter = 5
+        r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess,
+                     callback=accumulator, method='dogleg',
+                     options={'return_all': True, 'maxiter': maxiter},)
+        assert_equal(accumulator.count, maxiter)
+        assert_equal(len(r['allvecs']), maxiter+1)
+        assert_allclose(r['x'], r['allvecs'][-1])
+        assert_allclose(sum(r['allvecs'][1:]), accumulator.accum)
+
+    def test_dogleg_user_warning(self):
+        with pytest.warns(RuntimeWarning,
+                          match=r'Maximum number of iterations'):
+            minimize(rosen, self.hard_guess, jac=rosen_der,
+                     hess=rosen_hess, method='dogleg',
+                     options={'disp': True, 'maxiter': 1}, )
+
+    def test_solver_concordance(self):
+        # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
+        # test function, although this does not necessarily mean
+        # that dogleg is faster or better than ncg even for this function
+        # and especially not for other test functions.
+        f = rosen
+        g = rosen_der
+        h = rosen_hess
+        for x0 in (self.easy_guess, self.hard_guess):
+            r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
+                                method='dogleg', options={'return_all': True})
+            r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
+                                   method='trust-ncg',
+                                   options={'return_all': True})
+            r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8,
+                                   method='trust-krylov',
+                                   options={'return_all': True})
+            r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
+                             method='newton-cg', options={'return_all': True})
+            r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8,
+                                   method='trust-exact',
+                                   options={'return_all': True})
+            assert_allclose(self.x_opt, r_dogleg['x'])
+            assert_allclose(self.x_opt, r_trust_ncg['x'])
+            assert_allclose(self.x_opt, r_trust_krylov['x'])
+            assert_allclose(self.x_opt, r_ncg['x'])
+            assert_allclose(self.x_opt, r_iterative['x'])
+            assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
+
+    def test_trust_ncg_hessp(self):
+        for x0 in (self.easy_guess, self.hard_guess, self.x_opt):
+            r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod,
+                         tol=1e-8, method='trust-ncg')
+            assert_allclose(self.x_opt, r['x'])
+
+    def test_trust_ncg_start_in_optimum(self):
+        r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
+                     tol=1e-8, method='trust-ncg')
+        assert_allclose(self.x_opt, r['x'])
+
+    def test_trust_krylov_start_in_optimum(self):
+        r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
+                     tol=1e-8, method='trust-krylov')
+        assert_allclose(self.x_opt, r['x'])
+
+    def test_trust_exact_start_in_optimum(self):
+        r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
+                     tol=1e-8, method='trust-exact')
+        assert_allclose(self.x_opt, r['x'])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_exact.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_exact.py
new file mode 100644
index 00000000..beace0a5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_exact.py
@@ -0,0 +1,352 @@
+"""
+Unit tests for trust-region iterative subproblem.
+
+To run it in its simplest form::
+  nosetests test_optimize.py
+
+"""
+import numpy as np
+from scipy.optimize._trustregion_exact import (
+    estimate_smallest_singular_value,
+    singular_leading_submatrix,
+    IterativeSubproblem)
+from scipy.linalg import (svd, get_lapack_funcs, det, qr, norm)
+from numpy.testing import (assert_array_equal,
+                           assert_equal, assert_array_almost_equal)
+
+
+def random_entry(n, min_eig, max_eig, case):
+
+    # Generate random matrix
+    rand = np.random.uniform(-1, 1, (n, n))
+
+    # QR decomposition
+    Q, _, _ = qr(rand, pivoting='True')
+
+    # Generate random eigenvalues
+    eigvalues = np.random.uniform(min_eig, max_eig, n)
+    eigvalues = np.sort(eigvalues)[::-1]
+
+    # Generate matrix
+    Qaux = np.multiply(eigvalues, Q)
+    A = np.dot(Qaux, Q.T)
+
+    # Generate gradient vector accordingly
+    # to the case is being tested.
+    if case == 'hard':
+        g = np.zeros(n)
+        g[:-1] = np.random.uniform(-1, 1, n-1)
+        g = np.dot(Q, g)
+    elif case == 'jac_equal_zero':
+        g = np.zeros(n)
+    else:
+        g = np.random.uniform(-1, 1, n)
+
+    return A, g
+
+
+class TestEstimateSmallestSingularValue:
+
+    def test_for_ill_condiotioned_matrix(self):
+
+        # Ill-conditioned triangular matrix
+        C = np.array([[1, 2, 3, 4],
+                      [0, 0.05, 60, 7],
+                      [0, 0, 0.8, 9],
+                      [0, 0, 0, 10]])
+
+        # Get svd decomposition
+        U, s, Vt = svd(C)
+
+        # Get smallest singular value and correspondent right singular vector.
+        smin_svd = s[-1]
+        zmin_svd = Vt[-1, :]
+
+        # Estimate smallest singular value
+        smin, zmin = estimate_smallest_singular_value(C)
+
+        # Check the estimation
+        assert_array_almost_equal(smin, smin_svd, decimal=8)
+        assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8)
+
+
+class TestSingularLeadingSubmatrix:
+
+    def test_for_already_singular_leading_submatrix(self):
+
+        # Define test matrix A.
+        # Note that the leading 2x2 submatrix is singular.
+        A = np.array([[1, 2, 3],
+                      [2, 4, 5],
+                      [3, 5, 6]])
+
+        # Get Cholesky from lapack functions
+        cholesky, = get_lapack_funcs(('potrf',), (A,))
+
+        # Compute Cholesky Decomposition
+        c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
+
+        delta, v = singular_leading_submatrix(A, c, k)
+
+        A[k-1, k-1] += delta
+
+        # Check if the leading submatrix is singular.
+        assert_array_almost_equal(det(A[:k, :k]), 0)
+
+        # Check if `v` fullfil the specified properties
+        quadratic_term = np.dot(v, np.dot(A, v))
+        assert_array_almost_equal(quadratic_term, 0)
+
+    def test_for_simetric_indefinite_matrix(self):
+
+        # Define test matrix A.
+        # Note that the leading 5x5 submatrix is indefinite.
+        A = np.asarray([[1, 2, 3, 7, 8],
+                        [2, 5, 5, 9, 0],
+                        [3, 5, 11, 1, 2],
+                        [7, 9, 1, 7, 5],
+                        [8, 0, 2, 5, 8]])
+
+        # Get Cholesky from lapack functions
+        cholesky, = get_lapack_funcs(('potrf',), (A,))
+
+        # Compute Cholesky Decomposition
+        c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
+
+        delta, v = singular_leading_submatrix(A, c, k)
+
+        A[k-1, k-1] += delta
+
+        # Check if the leading submatrix is singular.
+        assert_array_almost_equal(det(A[:k, :k]), 0)
+
+        # Check if `v` fullfil the specified properties
+        quadratic_term = np.dot(v, np.dot(A, v))
+        assert_array_almost_equal(quadratic_term, 0)
+
+    def test_for_first_element_equal_to_zero(self):
+
+        # Define test matrix A.
+        # Note that the leading 2x2 submatrix is singular.
+        A = np.array([[0, 3, 11],
+                      [3, 12, 5],
+                      [11, 5, 6]])
+
+        # Get Cholesky from lapack functions
+        cholesky, = get_lapack_funcs(('potrf',), (A,))
+
+        # Compute Cholesky Decomposition
+        c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)
+
+        delta, v = singular_leading_submatrix(A, c, k)
+
+        A[k-1, k-1] += delta
+
+        # Check if the leading submatrix is singular
+        assert_array_almost_equal(det(A[:k, :k]), 0)
+
+        # Check if `v` fullfil the specified properties
+        quadratic_term = np.dot(v, np.dot(A, v))
+        assert_array_almost_equal(quadratic_term, 0)
+
+
+class TestIterativeSubproblem:
+
+    def test_for_the_easy_case(self):
+
+        # `H` is chosen such that `g` is not orthogonal to the
+        # eigenvector associated with the smallest eigenvalue `s`.
+        H = [[10, 2, 3, 4],
+             [2, 1, 7, 1],
+             [3, 7, 1, 7],
+             [4, 1, 7, 2]]
+        g = [1, 1, 1, 1]
+
+        # Trust Radius
+        trust_radius = 1
+
+        # Solve Subproblem
+        subprob = IterativeSubproblem(x=0,
+                                      fun=lambda x: 0,
+                                      jac=lambda x: np.array(g),
+                                      hess=lambda x: np.array(H),
+                                      k_easy=1e-10,
+                                      k_hard=1e-10)
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        assert_array_almost_equal(p, [0.00393332, -0.55260862,
+                                      0.67065477, -0.49480341])
+        assert_array_almost_equal(hits_boundary, True)
+
+    def test_for_the_hard_case(self):
+
+        # `H` is chosen such that `g` is orthogonal to the
+        # eigenvector associated with the smallest eigenvalue `s`.
+        H = [[10, 2, 3, 4],
+             [2, 1, 7, 1],
+             [3, 7, 1, 7],
+             [4, 1, 7, 2]]
+        g = [6.4852641521327437, 1, 1, 1]
+        s = -8.2151519874416614
+
+        # Trust Radius
+        trust_radius = 1
+
+        # Solve Subproblem
+        subprob = IterativeSubproblem(x=0,
+                                      fun=lambda x: 0,
+                                      jac=lambda x: np.array(g),
+                                      hess=lambda x: np.array(H),
+                                      k_easy=1e-10,
+                                      k_hard=1e-10)
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        assert_array_almost_equal(-s, subprob.lambda_current)
+
+    def test_for_interior_convergence(self):
+
+        H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
+             [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
+             [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
+             [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
+             [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]]
+
+        g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534]
+
+        # Solve Subproblem
+        subprob = IterativeSubproblem(x=0,
+                                      fun=lambda x: 0,
+                                      jac=lambda x: np.array(g),
+                                      hess=lambda x: np.array(H))
+        p, hits_boundary = subprob.solve(1.1)
+
+        assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
+                                      -0.67005053, 0.31586769])
+        assert_array_almost_equal(hits_boundary, False)
+        assert_array_almost_equal(subprob.lambda_current, 0)
+        assert_array_almost_equal(subprob.niter, 1)
+
+    def test_for_jac_equal_zero(self):
+
+        H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
+             [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
+             [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
+             [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
+             [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
+
+        g = [0, 0, 0, 0, 0]
+
+        # Solve Subproblem
+        subprob = IterativeSubproblem(x=0,
+                                      fun=lambda x: 0,
+                                      jac=lambda x: np.array(g),
+                                      hess=lambda x: np.array(H),
+                                      k_easy=1e-10,
+                                      k_hard=1e-10)
+        p, hits_boundary = subprob.solve(1.1)
+
+        assert_array_almost_equal(p, [0.06910534, -0.01432721,
+                                      -0.65311947, -0.23815972,
+                                      -0.84954934])
+        assert_array_almost_equal(hits_boundary, True)
+
+    def test_for_jac_very_close_to_zero(self):
+
+        H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
+             [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
+             [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
+             [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
+             [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]]
+
+        g = [0, 0, 0, 0, 1e-15]
+
+        # Solve Subproblem
+        subprob = IterativeSubproblem(x=0,
+                                      fun=lambda x: 0,
+                                      jac=lambda x: np.array(g),
+                                      hess=lambda x: np.array(H),
+                                      k_easy=1e-10,
+                                      k_hard=1e-10)
+        p, hits_boundary = subprob.solve(1.1)
+
+        assert_array_almost_equal(p, [0.06910534, -0.01432721,
+                                      -0.65311947, -0.23815972,
+                                      -0.84954934])
+        assert_array_almost_equal(hits_boundary, True)
+
+    def test_for_random_entries(self):
+        # Seed
+        np.random.seed(1)
+
+        # Dimension
+        n = 5
+
+        for case in ('easy', 'hard', 'jac_equal_zero'):
+
+            eig_limits = [(-20, -15),
+                          (-10, -5),
+                          (-10, 0),
+                          (-5, 5),
+                          (-10, 10),
+                          (0, 10),
+                          (5, 10),
+                          (15, 20)]
+
+            for min_eig, max_eig in eig_limits:
+                # Generate random symmetric matrix H with
+                # eigenvalues between min_eig and max_eig.
+                H, g = random_entry(n, min_eig, max_eig, case)
+
+                # Trust radius
+                trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10]
+
+                for trust_radius in trust_radius_list:
+                    # Solve subproblem with very high accuracy
+                    subprob_ac = IterativeSubproblem(0,
+                                                     lambda x: 0,
+                                                     lambda x: g,
+                                                     lambda x: H,
+                                                     k_easy=1e-10,
+                                                     k_hard=1e-10)
+
+                    p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius)
+
+                    # Compute objective function value
+                    J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac)
+
+                    stop_criteria = [(0.1, 2),
+                                     (0.5, 1.1),
+                                     (0.9, 1.01)]
+
+                    for k_opt, k_trf in stop_criteria:
+
+                        # k_easy and k_hard computed in function
+                        # of k_opt and k_trf accordingly to
+                        # Conn, A. R., Gould, N. I., & Toint, P. L. (2000).
+                        # "Trust region methods". Siam. p. 197.
+                        k_easy = min(k_trf-1,
+                                     1-np.sqrt(k_opt))
+                        k_hard = 1-k_opt
+
+                        # Solve subproblem
+                        subprob = IterativeSubproblem(0,
+                                                      lambda x: 0,
+                                                      lambda x: g,
+                                                      lambda x: H,
+                                                      k_easy=k_easy,
+                                                      k_hard=k_hard)
+                        p, hits_boundary = subprob.solve(trust_radius)
+
+                        # Compute objective function value
+                        J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p)
+
+                        # Check if it respect k_trf
+                        if hits_boundary:
+                            assert_array_equal(np.abs(norm(p)-trust_radius) <=
+                                               (k_trf-1)*trust_radius, True)
+                        else:
+                            assert_equal(norm(p) <= trust_radius, True)
+
+                        # Check if it respect k_opt
+                        assert_equal(J <= k_opt*J_ac, True)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_krylov.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_krylov.py
new file mode 100644
index 00000000..73081ec5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_trustregion_krylov.py
@@ -0,0 +1,170 @@
+"""
+Unit tests for Krylov space trust-region subproblem solver.
+
+To run it in its simplest form::
+  nosetests test_optimize.py
+
+"""
+import numpy as np
+from scipy.optimize._trlib import (get_trlib_quadratic_subproblem)
+from numpy.testing import (assert_,
+                           assert_almost_equal,
+                           assert_equal, assert_array_almost_equal)
+
+KrylovQP = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6)
+KrylovQP_disp = get_trlib_quadratic_subproblem(tol_rel_i=1e-8, tol_rel_b=1e-6, disp=True)
+
+class TestKrylovQuadraticSubproblem:
+
+    def test_for_the_easy_case(self):
+
+        # `H` is chosen such that `g` is not orthogonal to the
+        # eigenvector associated with the smallest eigenvalue.
+        H = np.array([[1.0, 0.0, 4.0],
+                      [0.0, 2.0, 0.0],
+                      [4.0, 0.0, 3.0]])
+        g = np.array([5.0, 0.0, 4.0])
+
+        # Trust Radius
+        trust_radius = 1.0
+
+        # Solve Subproblem
+        subprob = KrylovQP(x=0,
+                           fun=lambda x: 0,
+                           jac=lambda x: g,
+                           hess=lambda x: None,
+                           hessp=lambda x, y: H.dot(y))
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        assert_array_almost_equal(p, np.array([-1.0, 0.0, 0.0]))
+        assert_equal(hits_boundary, True)
+        # check kkt satisfaction
+        assert_almost_equal(
+                np.linalg.norm(H.dot(p) + subprob.lam * p + g),
+                0.0)
+        # check trust region constraint
+        assert_almost_equal(np.linalg.norm(p), trust_radius)
+
+        trust_radius = 0.5
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        assert_array_almost_equal(p,
+                np.array([-0.46125446, 0., -0.19298788]))
+        assert_equal(hits_boundary, True)
+        # check kkt satisfaction
+        assert_almost_equal(
+                np.linalg.norm(H.dot(p) + subprob.lam * p + g),
+                0.0)
+        # check trust region constraint
+        assert_almost_equal(np.linalg.norm(p), trust_radius)
+
+    def test_for_the_hard_case(self):
+
+        # `H` is chosen such that `g` is orthogonal to the
+        # eigenvector associated with the smallest eigenvalue.
+        H = np.array([[1.0, 0.0, 4.0],
+                      [0.0, 2.0, 0.0],
+                      [4.0, 0.0, 3.0]])
+        g = np.array([0.0, 2.0, 0.0])
+
+        # Trust Radius
+        trust_radius = 1.0
+
+        # Solve Subproblem
+        subprob = KrylovQP(x=0,
+                           fun=lambda x: 0,
+                           jac=lambda x: g,
+                           hess=lambda x: None,
+                           hessp=lambda x, y: H.dot(y))
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        assert_array_almost_equal(p, np.array([0.0, -1.0, 0.0]))
+        # check kkt satisfaction
+        assert_almost_equal(
+                np.linalg.norm(H.dot(p) + subprob.lam * p + g),
+                0.0)
+        # check trust region constraint
+        assert_almost_equal(np.linalg.norm(p), trust_radius)
+
+        trust_radius = 0.5
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        assert_array_almost_equal(p, np.array([0.0, -0.5, 0.0]))
+        # check kkt satisfaction
+        assert_almost_equal(
+                np.linalg.norm(H.dot(p) + subprob.lam * p + g),
+                0.0)
+        # check trust region constraint
+        assert_almost_equal(np.linalg.norm(p), trust_radius)
+
+    def test_for_interior_convergence(self):
+
+        H = np.array([[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988],
+                      [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588],
+                      [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867],
+                      [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166],
+                      [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]])
+        g = np.array([0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534])
+        trust_radius = 1.1
+
+        # Solve Subproblem
+        subprob = KrylovQP(x=0,
+                           fun=lambda x: 0,
+                           jac=lambda x: g,
+                           hess=lambda x: None,
+                           hessp=lambda x, y: H.dot(y))
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        # check kkt satisfaction
+        assert_almost_equal(
+                np.linalg.norm(H.dot(p) + subprob.lam * p + g),
+                0.0)
+
+        assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999,
+                                      -0.67005053, 0.31586769])
+        assert_array_almost_equal(hits_boundary, False)
+
+    def test_for_very_close_to_zero(self):
+
+        H = np.array([[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809],
+                      [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396],
+                      [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957],
+                      [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298],
+                      [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]])
+        g = np.array([0, 0, 0, 0, 1e-6])
+        trust_radius = 1.1
+
+        # Solve Subproblem
+        subprob = KrylovQP(x=0,
+                           fun=lambda x: 0,
+                           jac=lambda x: g,
+                           hess=lambda x: None,
+                           hessp=lambda x, y: H.dot(y))
+        p, hits_boundary = subprob.solve(trust_radius)
+
+        # check kkt satisfaction
+        assert_almost_equal(
+                np.linalg.norm(H.dot(p) + subprob.lam * p + g),
+                0.0)
+        # check trust region constraint
+        assert_almost_equal(np.linalg.norm(p), trust_radius)
+
+        assert_array_almost_equal(p, [0.06910534, -0.01432721,
+                                      -0.65311947, -0.23815972,
+                                      -0.84954934])
+        assert_array_almost_equal(hits_boundary, True)
+
+    def test_disp(self, capsys):
+        H = -np.eye(5)
+        g = np.array([0, 0, 0, 0, 1e-6])
+        trust_radius = 1.1
+
+        subprob = KrylovQP_disp(x=0,
+                                fun=lambda x: 0,
+                                jac=lambda x: g,
+                                hess=lambda x: None,
+                                hessp=lambda x, y: H.dot(y))
+        p, hits_boundary = subprob.solve(trust_radius)
+        out, err = capsys.readouterr()
+        assert_(out.startswith(' TR Solving trust region problem'), repr(out))
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_zeros.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_zeros.py
new file mode 100644
index 00000000..928acc04
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tests/test_zeros.py
@@ -0,0 +1,770 @@
+import pytest
+
+from math import sqrt, exp, sin, cos
+from functools import lru_cache
+
+from numpy.testing import (assert_warns, assert_,
+                           assert_allclose,
+                           assert_equal,
+                           assert_array_equal,
+                           suppress_warnings)
+import numpy as np
+from numpy import finfo, power, nan, isclose
+
+
+from scipy.optimize import _zeros_py as zeros, newton, root_scalar
+
+from scipy._lib._util import getfullargspec_no_self as _getfullargspec
+
+# Import testing parameters
+from scipy.optimize._tstutils import get_tests, functions as tstutils_functions, fstrings as tstutils_fstrings
+
+TOL = 4*np.finfo(float).eps  # tolerance
+
+_FLOAT_EPS = finfo(float).eps
+
+# A few test functions used frequently:
+# # A simple quadratic, (x-1)^2 - 1
+def f1(x):
+    return x ** 2 - 2 * x - 1
+
+
+def f1_1(x):
+    return 2 * x - 2
+
+
+def f1_2(x):
+    return 2.0 + 0 * x
+
+
+def f1_and_p_and_pp(x):
+    return f1(x), f1_1(x), f1_2(x)
+
+
+# Simple transcendental function
+def f2(x):
+    return exp(x) - cos(x)
+
+
+def f2_1(x):
+    return exp(x) + sin(x)
+
+
+def f2_2(x):
+    return exp(x) + cos(x)
+
+
+# lru cached function
+@lru_cache()
+def f_lrucached(x):
+    return x
+
+
+class TestBasic:
+
+    def run_check_by_name(self, name, smoothness=0, **kwargs):
+        a = .5
+        b = sqrt(3)
+        xtol = 4*np.finfo(float).eps
+        rtol = 4*np.finfo(float).eps
+        for function, fname in zip(tstutils_functions, tstutils_fstrings):
+            if smoothness > 0 and fname in ['f4', 'f5', 'f6']:
+                continue
+            r = root_scalar(function, method=name, bracket=[a, b], x0=a,
+                            xtol=xtol, rtol=rtol, **kwargs)
+            zero = r.root
+            assert_(r.converged)
+            assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
+                            err_msg='method %s, function %s' % (name, fname))
+
+    def run_check(self, method, name):
+        a = .5
+        b = sqrt(3)
+        xtol = 4 * _FLOAT_EPS
+        rtol = 4 * _FLOAT_EPS
+        for function, fname in zip(tstutils_functions, tstutils_fstrings):
+            zero, r = method(function, a, b, xtol=xtol, rtol=rtol,
+                             full_output=True)
+            assert_(r.converged)
+            assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
+                            err_msg='method %s, function %s' % (name, fname))
+
+    def run_check_lru_cached(self, method, name):
+        # check that https://github.com/scipy/scipy/issues/10846 is fixed
+        a = -1
+        b = 1
+        zero, r = method(f_lrucached, a, b, full_output=True)
+        assert_(r.converged)
+        assert_allclose(zero, 0,
+                        err_msg='method %s, function %s' % (name, 'f_lrucached'))
+
+    def _run_one_test(self, tc, method, sig_args_keys=None,
+                      sig_kwargs_keys=None, **kwargs):
+        method_args = []
+        for k in sig_args_keys or []:
+            if k not in tc:
+                # If a,b not present use x0, x1. Similarly for f and func
+                k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k)
+            method_args.append(tc[k])
+
+        method_kwargs = dict(**kwargs)
+        method_kwargs.update({'full_output': True, 'disp': False})
+        for k in sig_kwargs_keys or []:
+            method_kwargs[k] = tc[k]
+
+        root = tc.get('root')
+        func_args = tc.get('args', ())
+
+        try:
+            r, rr = method(*method_args, args=func_args, **method_kwargs)
+            return root, rr, tc
+        except Exception:
+            return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR), tc
+
+    def run_tests(self, tests, method, name,
+                  xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS,
+                  known_fail=None, **kwargs):
+        r"""Run test-cases using the specified method and the supplied signature.
+
+        Extract the arguments for the method call from the test case
+        dictionary using the supplied keys for the method's signature."""
+        # The methods have one of two base signatures:
+        # (f, a, b, **kwargs)  # newton
+        # (func, x0, **kwargs)  # bisect/brentq/...
+        sig = _getfullargspec(method)  # FullArgSpec with args, varargs, varkw, defaults, ...
+        assert_(not sig.kwonlyargs)
+        nDefaults = len(sig.defaults)
+        nRequired = len(sig.args) - nDefaults
+        sig_args_keys = sig.args[:nRequired]
+        sig_kwargs_keys = []
+        if name in ['secant', 'newton', 'halley']:
+            if name in ['newton', 'halley']:
+                sig_kwargs_keys.append('fprime')
+                if name in ['halley']:
+                    sig_kwargs_keys.append('fprime2')
+            kwargs['tol'] = xtol
+        else:
+            kwargs['xtol'] = xtol
+            kwargs['rtol'] = rtol
+
+        results = [list(self._run_one_test(
+            tc, method, sig_args_keys=sig_args_keys,
+            sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests]
+        # results= [[true root, full output, tc], ...]
+
+        known_fail = known_fail or []
+        notcvgd = [elt for elt in results if not elt[1].converged]
+        notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail]
+        notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd]
+        assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []])
+
+        # The usable xtol and rtol depend on the test
+        tols = {'xtol': 4 * _FLOAT_EPS, 'rtol': 4 * _FLOAT_EPS}
+        tols.update(**kwargs)
+        rtol = tols['rtol']
+        atol = tols.get('tol', tols['xtol'])
+
+        cvgd = [elt for elt in results if elt[1].converged]
+        approx = [elt[1].root for elt in cvgd]
+        correct = [elt[0] for elt in cvgd]
+        notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if
+                    not isclose(a, c, rtol=rtol, atol=atol)
+                    and elt[-1]['ID'] not in known_fail]
+        # Evaluate the function and see if is 0 at the purported root
+        fvs = [tc['f'](aroot, *(tc['args'])) for aroot, c, fullout, tc in notclose]
+        notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0]
+        assert_equal([notclose, len(notclose)], [[], 0])
+
+    def run_collection(self, collection, method, name, smoothness=None,
+                       known_fail=None,
+                       xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS,
+                       **kwargs):
+        r"""Run a collection of tests using the specified method.
+
+        The name is used to determine some optional arguments."""
+        tests = get_tests(collection, smoothness=smoothness)
+        self.run_tests(tests, method, name, xtol=xtol, rtol=rtol,
+                       known_fail=known_fail, **kwargs)
+
+    def test_bisect(self):
+        self.run_check(zeros.bisect, 'bisect')
+        self.run_check_lru_cached(zeros.bisect, 'bisect')
+        self.run_check_by_name('bisect')
+        self.run_collection('aps', zeros.bisect, 'bisect', smoothness=1)
+
+    def test_ridder(self):
+        self.run_check(zeros.ridder, 'ridder')
+        self.run_check_lru_cached(zeros.ridder, 'ridder')
+        self.run_check_by_name('ridder')
+        self.run_collection('aps', zeros.ridder, 'ridder', smoothness=1)
+
+    def test_brentq(self):
+        self.run_check(zeros.brentq, 'brentq')
+        self.run_check_lru_cached(zeros.brentq, 'brentq')
+        self.run_check_by_name('brentq')
+        # Brentq/h needs a lower tolerance to be specified
+        self.run_collection('aps', zeros.brentq, 'brentq', smoothness=1,
+                            xtol=1e-14, rtol=1e-14)
+
+    def test_brenth(self):
+        self.run_check(zeros.brenth, 'brenth')
+        self.run_check_lru_cached(zeros.brenth, 'brenth')
+        self.run_check_by_name('brenth')
+        self.run_collection('aps', zeros.brenth, 'brenth', smoothness=1,
+                            xtol=1e-14, rtol=1e-14)
+
+    def test_toms748(self):
+        self.run_check(zeros.toms748, 'toms748')
+        self.run_check_lru_cached(zeros.toms748, 'toms748')
+        self.run_check_by_name('toms748')
+        self.run_collection('aps', zeros.toms748, 'toms748', smoothness=1)
+
+    def test_newton_collections(self):
+        known_fail = ['aps.13.00']
+        known_fail += ['aps.12.05', 'aps.12.17']  # fails under Windows Py27
+        for collection in ['aps', 'complex']:
+            self.run_collection(collection, zeros.newton, 'newton',
+                                smoothness=2, known_fail=known_fail)
+
+    def test_halley_collections(self):
+        known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09',
+                      'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13',
+                      'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17',
+                      'aps.12.18', 'aps.13.00']
+        for collection in ['aps', 'complex']:
+            self.run_collection(collection, zeros.newton, 'halley',
+                                smoothness=2, known_fail=known_fail)
+
+    @staticmethod
+    def f1(x):
+        return x**2 - 2*x - 1  # == (x-1)**2 - 2
+
+    @staticmethod
+    def f1_1(x):
+        return 2*x - 2
+
+    @staticmethod
+    def f1_2(x):
+        return 2.0 + 0*x
+
+    @staticmethod
+    def f2(x):
+        return exp(x) - cos(x)
+
+    @staticmethod
+    def f2_1(x):
+        return exp(x) + sin(x)
+
+    @staticmethod
+    def f2_2(x):
+        return exp(x) + cos(x)
+
+    def test_newton(self):
+        for f, f_1, f_2 in [(self.f1, self.f1_1, self.f1_2),
+                            (self.f2, self.f2_1, self.f2_2)]:
+            x = zeros.newton(f, 3, tol=1e-6)
+            assert_allclose(f(x), 0, atol=1e-6)
+            x = zeros.newton(f, 3, x1=5, tol=1e-6)  # secant, x0 and x1
+            assert_allclose(f(x), 0, atol=1e-6)
+            x = zeros.newton(f, 3, fprime=f_1, tol=1e-6)   # newton
+            assert_allclose(f(x), 0, atol=1e-6)
+            x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6)  # halley
+            assert_allclose(f(x), 0, atol=1e-6)
+
+    def test_newton_by_name(self):
+        r"""Invoke newton through root_scalar()"""
+        for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
+            r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6)
+            assert_allclose(f(r.root), 0, atol=1e-6)
+
+    def test_secant_by_name(self):
+        r"""Invoke secant through root_scalar()"""
+        for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
+            r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6)
+            assert_allclose(f(r.root), 0, atol=1e-6)
+            r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6)
+            assert_allclose(f(r.root), 0, atol=1e-6)
+
+    def test_halley_by_name(self):
+        r"""Invoke halley through root_scalar()"""
+        for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
+            r = root_scalar(f, method='halley', x0=3,
+                            fprime=f_1, fprime2=f_2, xtol=1e-6)
+            assert_allclose(f(r.root), 0, atol=1e-6)
+
+    def test_root_scalar_fail(self):
+        with pytest.raises(ValueError):
+            root_scalar(f1, method='secant', x0=3, xtol=1e-6)  # no x1
+        with pytest.raises(ValueError):
+            root_scalar(f1, method='newton', x0=3, xtol=1e-6)  # no fprime
+        with pytest.raises(ValueError):
+            root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6)  # no fprime2
+        with pytest.raises(ValueError):
+            root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6)  # no fprime
+
+    def test_array_newton(self):
+        """test newton with array"""
+
+        def f1(x, *a):
+            b = a[0] + x * a[3]
+            return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x
+
+        def f1_1(x, *a):
+            b = a[3] / a[5]
+            return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1
+
+        def f1_2(x, *a):
+            b = a[3] / a[5]
+            return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2
+
+        a0 = np.array([
+            5.32725221, 5.48673747, 5.49539973,
+            5.36387202, 4.80237316, 1.43764452,
+            5.23063958, 5.46094772, 5.50512718,
+            5.42046290
+        ])
+        a1 = (np.sin(range(10)) + 1.0) * 7.0
+        args = (a0, a1, 1e-09, 0.004, 10, 0.27456)
+        x0 = [7.0] * 10
+        x = zeros.newton(f1, x0, f1_1, args)
+        x_expected = (
+            6.17264965, 11.7702805, 12.2219954,
+            7.11017681, 1.18151293, 0.143707955,
+            4.31928228, 10.5419107, 12.7552490,
+            8.91225749
+        )
+        assert_allclose(x, x_expected)
+        # test halley's
+        x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2)
+        assert_allclose(x, x_expected)
+        # test secant
+        x = zeros.newton(f1, x0, args=args)
+        assert_allclose(x, x_expected)
+
+    def test_array_newton_complex(self):
+        def f(x):
+            return x + 1+1j
+
+        def fprime(x):
+            return 1.0
+
+        t = np.full(4, 1j)
+        x = zeros.newton(f, t, fprime=fprime)
+        assert_allclose(f(x), 0.)
+
+        # should work even if x0 is not complex
+        t = np.ones(4)
+        x = zeros.newton(f, t, fprime=fprime)
+        assert_allclose(f(x), 0.)
+
+        x = zeros.newton(f, t)
+        assert_allclose(f(x), 0.)
+
+    def test_array_secant_active_zero_der(self):
+        """test secant doesn't continue to iterate zero derivatives"""
+        x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5],
+                         args=[np.array([17, 25])])
+        assert_allclose(x, (4.123105625617661, 5.0))
+
+    def test_array_newton_integers(self):
+        # test secant with float
+        x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2,
+                         args=([15.0, 17.0],))
+        assert_allclose(x, (3.872983346207417, 4.123105625617661))
+        # test integer becomes float
+        x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],))
+        assert_allclose(x, (3.872983346207417, 4.123105625617661))
+
+    def test_array_newton_zero_der_failures(self):
+        # test derivative zero warning
+        assert_warns(RuntimeWarning, zeros.newton,
+                     lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y)
+        # test failures and zero_der
+        with pytest.warns(RuntimeWarning):
+            results = zeros.newton(lambda y: y**2 - 2, [0., 0.],
+                                   lambda y: 2*y, full_output=True)
+            assert_allclose(results.root, 0)
+            assert results.zero_der.all()
+            assert not results.converged.any()
+
+    def test_newton_combined(self):
+        f1 = lambda x: x**2 - 2*x - 1
+        f1_1 = lambda x: 2*x - 2
+        f1_2 = lambda x: 2.0 + 0*x
+
+        def f1_and_p_and_pp(x):
+            return x**2 - 2*x-1, 2*x-2, 2.0
+
+        sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1)
+        sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True)
+        assert_allclose(sol0.root, sol.root, atol=1e-8)
+        assert_equal(2*sol.function_calls, sol0.function_calls)
+
+        sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2)
+        sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True)
+        assert_allclose(sol0.root, sol.root, atol=1e-8)
+        assert_equal(3*sol.function_calls, sol0.function_calls)
+
+    def test_newton_full_output(self):
+        # Test the full_output capability, both when converging and not.
+        # Use simple polynomials, to avoid hitting platform dependencies
+        # (e.g., exp & trig) in number of iterations
+
+        x0 = 3
+        expected_counts = [(6, 7), (5, 10), (3, 9)]
+
+        for derivs in range(3):
+            kwargs = {'tol': 1e-6, 'full_output': True, }
+            for k, v in [['fprime', self.f1_1], ['fprime2', self.f1_2]][:derivs]:
+                kwargs[k] = v
+
+            x, r = zeros.newton(self.f1, x0, disp=False, **kwargs)
+            assert_(r.converged)
+            assert_equal(x, r.root)
+            assert_equal((r.iterations, r.function_calls), expected_counts[derivs])
+            if derivs == 0:
+                assert r.function_calls <= r.iterations + 1
+            else:
+                assert_equal(r.function_calls, (derivs + 1) * r.iterations)
+
+            # Now repeat, allowing one fewer iteration to force convergence failure
+            iters = r.iterations - 1
+            x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=False, **kwargs)
+            assert_(not r.converged)
+            assert_equal(x, r.root)
+            assert_equal(r.iterations, iters)
+
+            if derivs == 1:
+                # Check that the correct Exception is raised and
+                # validate the start of the message.
+                with pytest.raises(
+                    RuntimeError,
+                    match='Failed to converge after %d iterations, value is .*' % (iters)):
+                    x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=True, **kwargs)
+
+    def test_deriv_zero_warning(self):
+        func = lambda x: x**2 - 2.0
+        dfunc = lambda x: 2*x
+        assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False)
+        with pytest.raises(RuntimeError, match='Derivative was zero'):
+            zeros.newton(func, 0.0, dfunc)
+
+    def test_newton_does_not_modify_x0(self):
+        # https://github.com/scipy/scipy/issues/9964
+        x0 = np.array([0.1, 3])
+        x0_copy = x0.copy()  # Copy to test for equality.
+        newton(np.sin, x0, np.cos)
+        assert_array_equal(x0, x0_copy)
+
+    def test_maxiter_int_check(self):
+        for method in [zeros.bisect, zeros.newton, zeros.ridder, zeros.brentq,
+                       zeros.brenth, zeros.toms748]:
+            with pytest.raises(TypeError,
+                    match="'float' object cannot be interpreted as an integer"):
+                method(f1, 0.0, 1.0, maxiter=72.45)
+
+
+def test_gh_5555():
+    root = 0.1
+
+    def f(x):
+        return x - root
+
+    methods = [zeros.bisect, zeros.ridder]
+    xtol = rtol = TOL
+    for method in methods:
+        res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol)
+        assert_allclose(root, res, atol=xtol, rtol=rtol,
+                        err_msg='method %s' % method.__name__)
+
+
+def test_gh_5557():
+    # Show that without the changes in 5557 brentq and brenth might
+    # only achieve a tolerance of 2*(xtol + rtol*|res|).
+
+    # f linearly interpolates (0, -0.1), (0.5, -0.1), and (1,
+    # 0.4). The important parts are that |f(0)| < |f(1)| (so that
+    # brent takes 0 as the initial guess), |f(0)| < atol (so that
+    # brent accepts 0 as the root), and that the exact root of f lies
+    # more than atol away from 0 (so that brent doesn't achieve the
+    # desired tolerance).
+    def f(x):
+        if x < 0.5:
+            return -0.1
+        else:
+            return x - 0.6
+
+    atol = 0.51
+    rtol = 4 * _FLOAT_EPS
+    methods = [zeros.brentq, zeros.brenth]
+    for method in methods:
+        res = method(f, 0, 1, xtol=atol, rtol=rtol)
+        assert_allclose(0.6, res, atol=atol, rtol=rtol)
+
+
+def test_brent_underflow_in_root_bracketing():
+    # Tetsing if an interval [a,b] brackets a zero of a function
+    # by checking f(a)*f(b) < 0 is not reliable when the product
+    # underflows/overflows. (reported in issue# 13737)
+
+    underflow_scenario = (-450.0, -350.0, -400.0)
+    overflow_scenario = (350.0, 450.0, 400.0)
+
+    for a, b, root in [underflow_scenario, overflow_scenario]:
+        c = np.exp(root)
+        for method in [zeros.brenth, zeros.brentq]:
+            res = method(lambda x: np.exp(x)-c, a, b)
+            assert_allclose(root, res)
+
+
+class TestRootResults:
+    def test_repr(self):
+        r = zeros.RootResults(root=1.0,
+                              iterations=44,
+                              function_calls=46,
+                              flag=0)
+        expected_repr = ("      converged: True\n           flag: 'converged'"
+                         "\n function_calls: 46\n     iterations: 44\n"
+                         "           root: 1.0")
+        assert_equal(repr(r), expected_repr)
+
+
+def test_complex_halley():
+    """Test Halley's works with complex roots"""
+    def f(x, *a):
+        return a[0] * x**2 + a[1] * x + a[2]
+
+    def f_1(x, *a):
+        return 2 * a[0] * x + a[1]
+
+    def f_2(x, *a):
+        retval = 2 * a[0]
+        try:
+            size = len(x)
+        except TypeError:
+            return retval
+        else:
+            return [retval] * size
+
+    z = complex(1.0, 2.0)
+    coeffs = (2.0, 3.0, 4.0)
+    y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
+    # (-0.75000000000000078+1.1989578808281789j)
+    assert_allclose(f(y, *coeffs), 0, atol=1e-6)
+    z = [z] * 10
+    coeffs = (2.0, 3.0, 4.0)
+    y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
+    assert_allclose(f(y, *coeffs), 0, atol=1e-6)
+
+
+def test_zero_der_nz_dp():
+    """Test secant method with a non-zero dp, but an infinite newton step"""
+    # pick a symmetrical functions and choose a point on the side that with dx
+    # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2,
+    # which has a root at x = 100 and is symmetrical around the line x = 100
+    # we have to pick a really big number so that it is consistently true
+    # now find a point on each side so that the secant has a zero slope
+    dx = np.finfo(float).eps ** 0.33
+    # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100
+    # -> 200 = p0 * (2 + dx) + dx
+    p0 = (200.0 - dx) / (2.0 + dx)
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning, "RMS of")
+        x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10)
+    assert_allclose(x, [100] * 10)
+    # test scalar cases too
+    p0 = (2.0 - 1e-4) / (2.0 + 1e-4)
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning, "Tolerance of")
+        x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False)
+    assert_allclose(x, 1)
+    with pytest.raises(RuntimeError, match='Tolerance of'):
+        x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True)
+    p0 = (-2.0 + 1e-4) / (2.0 + 1e-4)
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning, "Tolerance of")
+        x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False)
+    assert_allclose(x, -1)
+    with pytest.raises(RuntimeError, match='Tolerance of'):
+        x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True)
+
+
+def test_array_newton_failures():
+    """Test that array newton fails as expected"""
+    # p = 0.68  # [MPa]
+    # dp = -0.068 * 1e6  # [Pa]
+    # T = 323  # [K]
+    diameter = 0.10  # [m]
+    # L = 100  # [m]
+    roughness = 0.00015  # [m]
+    rho = 988.1  # [kg/m**3]
+    mu = 5.4790e-04  # [Pa*s]
+    u = 2.488  # [m/s]
+    reynolds_number = rho * u * diameter / mu  # Reynolds number
+
+    def colebrook_eqn(darcy_friction, re, dia):
+        return (1 / np.sqrt(darcy_friction) +
+                2 * np.log10(roughness / 3.7 / dia +
+                             2.51 / re / np.sqrt(darcy_friction)))
+
+    # only some failures
+    with pytest.warns(RuntimeWarning):
+        result = zeros.newton(
+            colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2,
+            args=[reynolds_number, diameter], full_output=True
+        )
+        assert not result.converged.all()
+    # they all fail
+    with pytest.raises(RuntimeError):
+        result = zeros.newton(
+            colebrook_eqn, x0=[0.01] * 2, maxiter=2,
+            args=[reynolds_number, diameter], full_output=True
+        )
+
+
+# this test should **not** raise a RuntimeWarning
+def test_gh8904_zeroder_at_root_fails():
+    """Test that Newton or Halley don't warn if zero derivative at root"""
+
+    # a function that has a zero derivative at it's root
+    def f_zeroder_root(x):
+        return x**3 - x**2
+
+    # should work with secant
+    r = zeros.newton(f_zeroder_root, x0=0)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+    # test again with array
+    r = zeros.newton(f_zeroder_root, x0=[0]*10)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+
+    # 1st derivative
+    def fder(x):
+        return 3 * x**2 - 2 * x
+
+    # 2nd derivative
+    def fder2(x):
+        return 6*x - 2
+
+    # should work with newton and halley
+    r = zeros.newton(f_zeroder_root, x0=0, fprime=fder)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+    r = zeros.newton(f_zeroder_root, x0=0, fprime=fder,
+                     fprime2=fder2)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+    # test again with array
+    r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+    r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder,
+                     fprime2=fder2)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+
+    # also test that if a root is found we do not raise RuntimeWarning even if
+    # the derivative is zero, EG: at x = 0.5, then fval = -0.125 and
+    # fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the
+    # root, but if the solver continued with that guess, then it will calculate
+    # a zero derivative, so it should return the root w/o RuntimeWarning
+    r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+    # test again with array
+    r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder)
+    assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
+    # doesn't apply to halley
+
+
+def test_gh_8881():
+    r"""Test that Halley's method realizes that the 2nd order adjustment
+    is too big and drops off to the 1st order adjustment."""
+    n = 9
+
+    def f(x):
+        return power(x, 1.0/n) - power(n, 1.0/n)
+
+    def fp(x):
+        return power(x, (1.0-n)/n)/n
+
+    def fpp(x):
+        return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n
+
+    x0 = 0.1
+    # The root is at x=9.
+    # The function has positive slope, x0 < root.
+    # Newton succeeds in 8 iterations
+    rt, r = newton(f, x0, fprime=fp, full_output=True)
+    assert r.converged
+    # Before the Issue 8881/PR 8882, halley would send x in the wrong direction.
+    # Check that it now succeeds.
+    rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
+    assert r.converged
+
+
+def test_gh_9608_preserve_array_shape():
+    """
+    Test that shape is preserved for array inputs even if fprime or fprime2 is
+    scalar
+    """
+    def f(x):
+        return x**2
+
+    def fp(x):
+        return 2 * x
+
+    def fpp(x):
+        return 2
+
+    x0 = np.array([-2], dtype=np.float32)
+    rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
+    assert r.converged
+
+    x0_array = np.array([-2, -3], dtype=np.float32)
+    # This next invocation should fail
+    with pytest.raises(IndexError):
+        result = zeros.newton(
+            f, x0_array, fprime=fp, fprime2=fpp, full_output=True
+        )
+
+    def fpp_array(x):
+        return np.full(np.shape(x), 2, dtype=np.float32)
+
+    result = zeros.newton(
+        f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True
+    )
+    assert result.converged.all()
+
+
+@pytest.mark.parametrize(
+    "maximum_iterations,flag_expected",
+    [(10, zeros.CONVERR), (100, zeros.CONVERGED)])
+def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected):
+    """
+    Test that if the maximum iterations is exceeded that the flag is not
+    converged.
+    """
+    result = zeros.brentq(
+        lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5,
+        -30, 30, (), 1e-6, 1e-6, maximum_iterations,
+        full_output=True, disp=False)
+    assert result[1].flag == flag_expected
+    if flag_expected == zeros.CONVERR:
+        # didn't converge because exceeded maximum iterations
+        assert result[1].iterations == maximum_iterations
+    elif flag_expected == zeros.CONVERGED:
+        # converged before maximum iterations
+        assert result[1].iterations < maximum_iterations
+
+
+def test_gh9551_raise_error_if_disp_true():
+    """Test that if disp is true then zero derivative raises RuntimeError"""
+
+    def f(x):
+        return x*x + 1
+
+    def f_p(x):
+        return 2*x
+
+    assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False)
+    with pytest.raises(
+            RuntimeError,
+            match=r'^Derivative was zero\. Failed to converge after \d+ iterations, value is [+-]?\d*\.\d+\.$'):
+        zeros.newton(f, 1.0, f_p)
+    root = zeros.newton(f, complex(10.0, 10.0), f_p)
+    assert_allclose(root, complex(0.0, 1.0))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/tnc.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/tnc.py
new file mode 100644
index 00000000..9188ddb0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/tnc.py
@@ -0,0 +1,53 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _tnc
+
+
+__all__ = [  # noqa: F822
+    'CONSTANT',
+    'FCONVERGED',
+    'INFEASIBLE',
+    'LOCALMINIMUM',
+    'LSFAIL',
+    'MAXFUN',
+    'MSGS',
+    'MSG_ALL',
+    'MSG_EXIT',
+    'MSG_INFO',
+    'MSG_ITER',
+    'MSG_NONE',
+    'MSG_VERS',
+    'MemoizeJac',
+    'NOPROGRESS',
+    'OptimizeResult',
+    'RCSTRINGS',
+    'USERABORT',
+    'XCONVERGED',
+    'array',
+    'asfarray',
+    'fmin_tnc',
+    'inf',
+    'moduleTNC',
+    'old_bound_to_new',
+    'zeros',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.tnc is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.tnc` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_tnc, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/optimize/zeros.py b/__packaged__/coreml/.python_dependencies/scipy/optimize/zeros.py
new file mode 100644
index 00000000..239265a1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/optimize/zeros.py
@@ -0,0 +1,44 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.optimize` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _zeros_py
+
+
+__all__ = [  # noqa: F822
+    'CONVERGED',
+    'CONVERR',
+    'INPROGRESS',
+    'RootResults',
+    'SIGNERR',
+    'TOMS748Solver',
+    'VALUEERR',
+    'bisect',
+    'brenth',
+    'brentq',
+    'flag_map',
+    'namedtuple',
+    'newton',
+    'operator',
+    'results_c',
+    'ridder',
+    'toms748',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.optimize.zeros is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.optimize instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.optimize` namespace, "
+                  "the `scipy.optimize.zeros` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_zeros_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/signal/__init__.py
new file mode 100644
index 00000000..d3efc5bb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/__init__.py
@@ -0,0 +1,386 @@
+"""
+=======================================
+Signal processing (:mod:`scipy.signal`)
+=======================================
+
+Convolution
+===========
+
+.. autosummary::
+   :toctree: generated/
+
+   convolve           -- N-D convolution.
+   correlate          -- N-D correlation.
+   fftconvolve        -- N-D convolution using the FFT.
+   oaconvolve         -- N-D convolution using the overlap-add method.
+   convolve2d         -- 2-D convolution (more options).
+   correlate2d        -- 2-D correlation (more options).
+   sepfir2d           -- Convolve with a 2-D separable FIR filter.
+   choose_conv_method -- Chooses faster of FFT and direct convolution methods.
+   correlation_lags   -- Determines lag indices for 1D cross-correlation.
+
+B-splines
+=========
+
+.. autosummary::
+   :toctree: generated/
+
+   bspline        -- B-spline basis function of order n.
+   cubic          -- B-spline basis function of order 3.
+   quadratic      -- B-spline basis function of order 2.
+   gauss_spline   -- Gaussian approximation to the B-spline basis function.
+   cspline1d      -- Coefficients for 1-D cubic (3rd order) B-spline.
+   qspline1d      -- Coefficients for 1-D quadratic (2nd order) B-spline.
+   cspline2d      -- Coefficients for 2-D cubic (3rd order) B-spline.
+   qspline2d      -- Coefficients for 2-D quadratic (2nd order) B-spline.
+   cspline1d_eval -- Evaluate a cubic spline at the given points.
+   qspline1d_eval -- Evaluate a quadratic spline at the given points.
+   spline_filter  -- Smoothing spline (cubic) filtering of a rank-2 array.
+
+Filtering
+=========
+
+.. autosummary::
+   :toctree: generated/
+
+   order_filter  -- N-D order filter.
+   medfilt       -- N-D median filter.
+   medfilt2d     -- 2-D median filter (faster).
+   wiener        -- N-D Wiener filter.
+
+   symiirorder1  -- 2nd-order IIR filter (cascade of first-order systems).
+   symiirorder2  -- 4th-order IIR filter (cascade of second-order systems).
+   lfilter       -- 1-D FIR and IIR digital linear filtering.
+   lfiltic       -- Construct initial conditions for `lfilter`.
+   lfilter_zi    -- Compute an initial state zi for the lfilter function that
+                 -- corresponds to the steady state of the step response.
+   filtfilt      -- A forward-backward filter.
+   savgol_filter -- Filter a signal using the Savitzky-Golay filter.
+
+   deconvolve    -- 1-D deconvolution using lfilter.
+
+   sosfilt       -- 1-D IIR digital linear filtering using
+                 -- a second-order sections filter representation.
+   sosfilt_zi    -- Compute an initial state zi for the sosfilt function that
+                 -- corresponds to the steady state of the step response.
+   sosfiltfilt   -- A forward-backward filter for second-order sections.
+   hilbert       -- Compute 1-D analytic signal, using the Hilbert transform.
+   hilbert2      -- Compute 2-D analytic signal, using the Hilbert transform.
+
+   decimate      -- Downsample a signal.
+   detrend       -- Remove linear and/or constant trends from data.
+   resample      -- Resample using Fourier method.
+   resample_poly -- Resample using polyphase filtering method.
+   upfirdn       -- Upsample, apply FIR filter, downsample.
+
+Filter design
+=============
+
+.. autosummary::
+   :toctree: generated/
+
+   bilinear      -- Digital filter from an analog filter using
+                    -- the bilinear transform.
+   bilinear_zpk  -- Digital filter from an analog filter using
+                    -- the bilinear transform.
+   findfreqs     -- Find array of frequencies for computing filter response.
+   firls         -- FIR filter design using least-squares error minimization.
+   firwin        -- Windowed FIR filter design, with frequency response
+                    -- defined as pass and stop bands.
+   firwin2       -- Windowed FIR filter design, with arbitrary frequency
+                    -- response.
+   freqs         -- Analog filter frequency response from TF coefficients.
+   freqs_zpk     -- Analog filter frequency response from ZPK coefficients.
+   freqz         -- Digital filter frequency response from TF coefficients.
+   freqz_zpk     -- Digital filter frequency response from ZPK coefficients.
+   sosfreqz      -- Digital filter frequency response for SOS format filter.
+   gammatone     -- FIR and IIR gammatone filter design.
+   group_delay   -- Digital filter group delay.
+   iirdesign     -- IIR filter design given bands and gains.
+   iirfilter     -- IIR filter design given order and critical frequencies.
+   kaiser_atten  -- Compute the attenuation of a Kaiser FIR filter, given
+                    -- the number of taps and the transition width at
+                    -- discontinuities in the frequency response.
+   kaiser_beta   -- Compute the Kaiser parameter beta, given the desired
+                    -- FIR filter attenuation.
+   kaiserord     -- Design a Kaiser window to limit ripple and width of
+                    -- transition region.
+   minimum_phase -- Convert a linear phase FIR filter to minimum phase.
+   savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
+                    -- filter.
+   remez         -- Optimal FIR filter design.
+
+   unique_roots  -- Unique roots and their multiplicities.
+   residue       -- Partial fraction expansion of b(s) / a(s).
+   residuez      -- Partial fraction expansion of b(z) / a(z).
+   invres        -- Inverse partial fraction expansion for analog filter.
+   invresz       -- Inverse partial fraction expansion for digital filter.
+   BadCoefficients  -- Warning on badly conditioned filter coefficients.
+
+Lower-level filter design functions:
+
+.. autosummary::
+   :toctree: generated/
+
+   abcd_normalize -- Check state-space matrices and ensure they are rank-2.
+   band_stop_obj  -- Band Stop Objective Function for order minimization.
+   besselap       -- Return (z,p,k) for analog prototype of Bessel filter.
+   buttap         -- Return (z,p,k) for analog prototype of Butterworth filter.
+   cheb1ap        -- Return (z,p,k) for type I Chebyshev filter.
+   cheb2ap        -- Return (z,p,k) for type II Chebyshev filter.
+   cmplx_sort     -- Sort roots based on magnitude.
+   ellipap        -- Return (z,p,k) for analog prototype of elliptic filter.
+   lp2bp          -- Transform a lowpass filter prototype to a bandpass filter.
+   lp2bp_zpk      -- Transform a lowpass filter prototype to a bandpass filter.
+   lp2bs          -- Transform a lowpass filter prototype to a bandstop filter.
+   lp2bs_zpk      -- Transform a lowpass filter prototype to a bandstop filter.
+   lp2hp          -- Transform a lowpass filter prototype to a highpass filter.
+   lp2hp_zpk      -- Transform a lowpass filter prototype to a highpass filter.
+   lp2lp          -- Transform a lowpass filter prototype to a lowpass filter.
+   lp2lp_zpk      -- Transform a lowpass filter prototype to a lowpass filter.
+   normalize      -- Normalize polynomial representation of a transfer function.
+
+
+
+Matlab-style IIR filter design
+==============================
+
+.. autosummary::
+   :toctree: generated/
+
+   butter -- Butterworth
+   buttord
+   cheby1 -- Chebyshev Type I
+   cheb1ord
+   cheby2 -- Chebyshev Type II
+   cheb2ord
+   ellip -- Elliptic (Cauer)
+   ellipord
+   bessel -- Bessel (no order selection available -- try butterod)
+   iirnotch      -- Design second-order IIR notch digital filter.
+   iirpeak       -- Design second-order IIR peak (resonant) digital filter.
+   iircomb       -- Design IIR comb filter.
+
+Continuous-time linear systems
+==============================
+
+.. autosummary::
+   :toctree: generated/
+
+   lti              -- Continuous-time linear time invariant system base class.
+   StateSpace       -- Linear time invariant system in state space form.
+   TransferFunction -- Linear time invariant system in transfer function form.
+   ZerosPolesGain   -- Linear time invariant system in zeros, poles, gain form.
+   lsim             -- Continuous-time simulation of output to linear system.
+   lsim2            -- Like lsim, but `scipy.integrate.odeint` is used.
+   impulse          -- Impulse response of linear, time-invariant (LTI) system.
+   impulse2         -- Like impulse, but `scipy.integrate.odeint` is used.
+   step             -- Step response of continuous-time LTI system.
+   step2            -- Like step, but `scipy.integrate.odeint` is used.
+   freqresp         -- Frequency response of a continuous-time LTI system.
+   bode             -- Bode magnitude and phase data (continuous-time LTI).
+
+Discrete-time linear systems
+============================
+
+.. autosummary::
+   :toctree: generated/
+
+   dlti             -- Discrete-time linear time invariant system base class.
+   StateSpace       -- Linear time invariant system in state space form.
+   TransferFunction -- Linear time invariant system in transfer function form.
+   ZerosPolesGain   -- Linear time invariant system in zeros, poles, gain form.
+   dlsim            -- Simulation of output to a discrete-time linear system.
+   dimpulse         -- Impulse response of a discrete-time LTI system.
+   dstep            -- Step response of a discrete-time LTI system.
+   dfreqresp        -- Frequency response of a discrete-time LTI system.
+   dbode            -- Bode magnitude and phase data (discrete-time LTI).
+
+LTI representations
+===================
+
+.. autosummary::
+   :toctree: generated/
+
+   tf2zpk        -- Transfer function to zero-pole-gain.
+   tf2sos        -- Transfer function to second-order sections.
+   tf2ss         -- Transfer function to state-space.
+   zpk2tf        -- Zero-pole-gain to transfer function.
+   zpk2sos       -- Zero-pole-gain to second-order sections.
+   zpk2ss        -- Zero-pole-gain to state-space.
+   ss2tf         -- State-pace to transfer function.
+   ss2zpk        -- State-space to pole-zero-gain.
+   sos2zpk       -- Second-order sections to zero-pole-gain.
+   sos2tf        -- Second-order sections to transfer function.
+   cont2discrete -- Continuous-time to discrete-time LTI conversion.
+   place_poles   -- Pole placement.
+
+Waveforms
+=========
+
+.. autosummary::
+   :toctree: generated/
+
+   chirp        -- Frequency swept cosine signal, with several freq functions.
+   gausspulse   -- Gaussian modulated sinusoid.
+   max_len_seq  -- Maximum length sequence.
+   sawtooth     -- Periodic sawtooth.
+   square       -- Square wave.
+   sweep_poly   -- Frequency swept cosine signal; freq is arbitrary polynomial.
+   unit_impulse -- Discrete unit impulse.
+
+Window functions
+================
+
+For window functions, see the `scipy.signal.windows` namespace.
+
+In the `scipy.signal` namespace, there is a convenience function to
+obtain these windows by name:
+
+.. autosummary::
+   :toctree: generated/
+
+   get_window -- Return a window of a given length and type.
+
+Wavelets
+========
+
+.. autosummary::
+   :toctree: generated/
+
+   cascade      -- Compute scaling function and wavelet from coefficients.
+   daub         -- Return low-pass.
+   morlet       -- Complex Morlet wavelet.
+   qmf          -- Return quadrature mirror filter from low-pass.
+   ricker       -- Return ricker wavelet.
+   morlet2      -- Return Morlet wavelet, compatible with cwt.
+   cwt          -- Perform continuous wavelet transform.
+
+Peak finding
+============
+
+.. autosummary::
+   :toctree: generated/
+
+   argrelmin        -- Calculate the relative minima of data.
+   argrelmax        -- Calculate the relative maxima of data.
+   argrelextrema    -- Calculate the relative extrema of data.
+   find_peaks       -- Find a subset of peaks inside a signal.
+   find_peaks_cwt   -- Find peaks in a 1-D array with wavelet transformation.
+   peak_prominences -- Calculate the prominence of each peak in a signal.
+   peak_widths      -- Calculate the width of each peak in a signal.
+
+Spectral analysis
+=================
+
+.. autosummary::
+   :toctree: generated/
+
+   periodogram    -- Compute a (modified) periodogram.
+   welch          -- Compute a periodogram using Welch's method.
+   csd            -- Compute the cross spectral density, using Welch's method.
+   coherence      -- Compute the magnitude squared coherence, using Welch's method.
+   spectrogram    -- Compute the spectrogram.
+   lombscargle    -- Computes the Lomb-Scargle periodogram.
+   vectorstrength -- Computes the vector strength.
+   stft           -- Compute the Short Time Fourier Transform.
+   istft          -- Compute the Inverse Short Time Fourier Transform.
+   check_COLA     -- Check the COLA constraint for iSTFT reconstruction.
+   check_NOLA     -- Check the NOLA constraint for iSTFT reconstruction.
+
+Chirp Z-transform and Zoom FFT
+============================================
+
+.. autosummary::
+   :toctree: generated/
+
+   czt - Chirp z-transform convenience function
+   zoom_fft - Zoom FFT convenience function
+   CZT - Chirp z-transform function generator
+   ZoomFFT - Zoom FFT function generator
+   czt_points - Output the z-plane points sampled by a chirp z-transform
+
+The functions are simpler to use than the classes, but are less efficient when
+using the same transform on many arrays of the same length, since they
+repeatedly generate the same chirp signal with every call.  In these cases,
+use the classes to create a reusable function instead.
+
+"""
+from . import _sigtools, windows
+from ._waveforms import *
+from ._max_len_seq import max_len_seq
+from ._upfirdn import upfirdn
+
+from ._spline import (  # noqa: F401
+    cspline2d,
+    qspline2d,
+    sepfir2d,
+    symiirorder1,
+    symiirorder2,
+)
+
+from ._bsplines import *
+from ._filter_design import *
+from ._fir_filter_design import *
+from ._ltisys import *
+from ._lti_conversion import *
+from ._signaltools import *
+from ._savitzky_golay import savgol_coeffs, savgol_filter
+from ._spectral_py import *
+from ._wavelets import *
+from ._peak_finding import *
+from ._czt import *
+from .windows import get_window  # keep this one in signal namespace
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import (
+    bsplines, filter_design, fir_filter_design, lti_conversion, ltisys,
+    spectral, signaltools, waveforms, wavelets, spline
+)
+
+# deal with * -> windows.* doc-only soft-deprecation
+deprecated_windows = ('boxcar', 'triang', 'parzen', 'bohman', 'blackman',
+                      'nuttall', 'blackmanharris', 'flattop', 'bartlett',
+                      'barthann', 'hamming', 'kaiser', 'gaussian',
+                      'general_gaussian', 'chebwin', 'cosine',
+                      'hann', 'exponential', 'tukey')
+
+
+def deco(name):
+    f = getattr(windows, name)
+    # Add deprecation to docstring
+
+    def wrapped(*args, **kwargs):
+        return f(*args, **kwargs)
+
+    wrapped.__name__ = name
+    wrapped.__module__ = 'scipy.signal'
+    if hasattr(f, '__qualname__'):
+        wrapped.__qualname__ = f.__qualname__
+
+    if f.__doc__:
+        lines = f.__doc__.splitlines()
+        for li, line in enumerate(lines):
+            if line.strip() == 'Parameters':
+                break
+        else:
+            raise RuntimeError('dev error: badly formatted doc')
+        spacing = ' ' * line.find('P')
+        lines.insert(li, ('{0}.. warning:: scipy.signal.{1} is deprecated,\n'
+                          '{0}             use scipy.signal.windows.{1} '
+                          'instead.\n'.format(spacing, name)))
+        wrapped.__doc__ = '\n'.join(lines)
+
+    return wrapped
+
+
+for name in deprecated_windows:
+    locals()[name] = deco(name)
+
+del deprecated_windows, name, deco
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_arraytools.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_arraytools.py
new file mode 100644
index 00000000..6458aefb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_arraytools.py
@@ -0,0 +1,241 @@
+"""
+Functions for acting on a axis of an array.
+"""
+import numpy as np
+
+
+def axis_slice(a, start=None, stop=None, step=None, axis=-1):
+    """Take a slice along axis 'axis' from 'a'.
+
+    Parameters
+    ----------
+    a : numpy.ndarray
+        The array to be sliced.
+    start, stop, step : int or None
+        The slice parameters.
+    axis : int, optional
+        The axis of `a` to be sliced.
+
+    Examples
+    --------
+    >>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+    >>> axis_slice(a, start=0, stop=1, axis=1)
+    array([[1],
+           [4],
+           [7]])
+    >>> axis_slice(a, start=1, axis=0)
+    array([[4, 5, 6],
+           [7, 8, 9]])
+
+    Notes
+    -----
+    The keyword arguments start, stop and step are used by calling
+    slice(start, stop, step). This implies axis_slice() does not
+    handle its arguments the exactly the same as indexing. To select
+    a single index k, for example, use
+        axis_slice(a, start=k, stop=k+1)
+    In this case, the length of the axis 'axis' in the result will
+    be 1; the trivial dimension is not removed. (Use numpy.squeeze()
+    to remove trivial axes.)
+    """
+    a_slice = [slice(None)] * a.ndim
+    a_slice[axis] = slice(start, stop, step)
+    b = a[tuple(a_slice)]
+    return b
+
+
+def axis_reverse(a, axis=-1):
+    """Reverse the 1-D slices of `a` along axis `axis`.
+
+    Returns axis_slice(a, step=-1, axis=axis).
+    """
+    return axis_slice(a, step=-1, axis=axis)
+
+
+def odd_ext(x, n, axis=-1):
+    """
+    Odd extension at the boundaries of an array
+
+    Generate a new ndarray by making an odd extension of `x` along an axis.
+
+    Parameters
+    ----------
+    x : ndarray
+        The array to be extended.
+    n : int
+        The number of elements by which to extend `x` at each end of the axis.
+    axis : int, optional
+        The axis along which to extend `x`. Default is -1.
+
+    Examples
+    --------
+    >>> from scipy.signal._arraytools import odd_ext
+    >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
+    >>> odd_ext(a, 2)
+    array([[-1,  0,  1,  2,  3,  4,  5,  6,  7],
+           [-4, -1,  0,  1,  4,  9, 16, 23, 28]])
+
+    Odd extension is a "180 degree rotation" at the endpoints of the original
+    array:
+
+    >>> t = np.linspace(0, 1.5, 100)
+    >>> a = 0.9 * np.sin(2 * np.pi * t**2)
+    >>> b = odd_ext(a, 40)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
+    >>> plt.plot(arange(100), a, 'r', lw=2, label='original')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+    """
+    if n < 1:
+        return x
+    if n > x.shape[axis] - 1:
+        raise ValueError(("The extension length n (%d) is too big. " +
+                         "It must not exceed x.shape[axis]-1, which is %d.")
+                         % (n, x.shape[axis] - 1))
+    left_end = axis_slice(x, start=0, stop=1, axis=axis)
+    left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
+    right_end = axis_slice(x, start=-1, axis=axis)
+    right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
+    ext = np.concatenate((2 * left_end - left_ext,
+                          x,
+                          2 * right_end - right_ext),
+                         axis=axis)
+    return ext
+
+
+def even_ext(x, n, axis=-1):
+    """
+    Even extension at the boundaries of an array
+
+    Generate a new ndarray by making an even extension of `x` along an axis.
+
+    Parameters
+    ----------
+    x : ndarray
+        The array to be extended.
+    n : int
+        The number of elements by which to extend `x` at each end of the axis.
+    axis : int, optional
+        The axis along which to extend `x`. Default is -1.
+
+    Examples
+    --------
+    >>> from scipy.signal._arraytools import even_ext
+    >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
+    >>> even_ext(a, 2)
+    array([[ 3,  2,  1,  2,  3,  4,  5,  4,  3],
+           [ 4,  1,  0,  1,  4,  9, 16,  9,  4]])
+
+    Even extension is a "mirror image" at the boundaries of the original array:
+
+    >>> t = np.linspace(0, 1.5, 100)
+    >>> a = 0.9 * np.sin(2 * np.pi * t**2)
+    >>> b = even_ext(a, 40)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
+    >>> plt.plot(arange(100), a, 'r', lw=2, label='original')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+    """
+    if n < 1:
+        return x
+    if n > x.shape[axis] - 1:
+        raise ValueError(("The extension length n (%d) is too big. " +
+                         "It must not exceed x.shape[axis]-1, which is %d.")
+                         % (n, x.shape[axis] - 1))
+    left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
+    right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
+    ext = np.concatenate((left_ext,
+                          x,
+                          right_ext),
+                         axis=axis)
+    return ext
+
+
+def const_ext(x, n, axis=-1):
+    """
+    Constant extension at the boundaries of an array
+
+    Generate a new ndarray that is a constant extension of `x` along an axis.
+
+    The extension repeats the values at the first and last element of
+    the axis.
+
+    Parameters
+    ----------
+    x : ndarray
+        The array to be extended.
+    n : int
+        The number of elements by which to extend `x` at each end of the axis.
+    axis : int, optional
+        The axis along which to extend `x`. Default is -1.
+
+    Examples
+    --------
+    >>> from scipy.signal._arraytools import const_ext
+    >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
+    >>> const_ext(a, 2)
+    array([[ 1,  1,  1,  2,  3,  4,  5,  5,  5],
+           [ 0,  0,  0,  1,  4,  9, 16, 16, 16]])
+
+    Constant extension continues with the same values as the endpoints of the
+    array:
+
+    >>> t = np.linspace(0, 1.5, 100)
+    >>> a = 0.9 * np.sin(2 * np.pi * t**2)
+    >>> b = const_ext(a, 40)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
+    >>> plt.plot(arange(100), a, 'r', lw=2, label='original')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+    """
+    if n < 1:
+        return x
+    left_end = axis_slice(x, start=0, stop=1, axis=axis)
+    ones_shape = [1] * x.ndim
+    ones_shape[axis] = n
+    ones = np.ones(ones_shape, dtype=x.dtype)
+    left_ext = ones * left_end
+    right_end = axis_slice(x, start=-1, axis=axis)
+    right_ext = ones * right_end
+    ext = np.concatenate((left_ext,
+                          x,
+                          right_ext),
+                         axis=axis)
+    return ext
+
+
+def zero_ext(x, n, axis=-1):
+    """
+    Zero padding at the boundaries of an array
+
+    Generate a new ndarray that is a zero-padded extension of `x` along
+    an axis.
+
+    Parameters
+    ----------
+    x : ndarray
+        The array to be extended.
+    n : int
+        The number of elements by which to extend `x` at each end of the
+        axis.
+    axis : int, optional
+        The axis along which to extend `x`. Default is -1.
+
+    Examples
+    --------
+    >>> from scipy.signal._arraytools import zero_ext
+    >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
+    >>> zero_ext(a, 2)
+    array([[ 0,  0,  1,  2,  3,  4,  5,  0,  0],
+           [ 0,  0,  0,  1,  4,  9, 16,  0,  0]])
+    """
+    if n < 1:
+        return x
+    zeros_shape = list(x.shape)
+    zeros_shape[axis] = n
+    zeros = np.zeros(zeros_shape, dtype=x.dtype)
+    ext = np.concatenate((zeros, x, zeros), axis=axis)
+    return ext
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_bsplines.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_bsplines.py
new file mode 100644
index 00000000..34fe3540
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_bsplines.py
@@ -0,0 +1,683 @@
+from numpy import (logical_and, asarray, pi, zeros_like,
+                   piecewise, array, arctan2, tan, zeros, arange, floor)
+from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
+                              less_equal, greater_equal)
+
+# From splinemodule.c
+from ._spline import cspline2d, sepfir2d
+
+from scipy.special import comb
+from scipy._lib._util import float_factorial
+
+__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
+           'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
+
+
+def spline_filter(Iin, lmbda=5.0):
+    """Smoothing spline (cubic) filtering of a rank-2 array.
+
+    Filter an input data set, `Iin`, using a (cubic) smoothing spline of
+    fall-off `lmbda`.
+
+    Parameters
+    ----------
+    Iin : array_like
+        input data set
+    lmbda : float, optional
+        spline smooghing fall-off value, default is `5.0`.
+
+    Returns
+    -------
+    res : ndarray
+        filterd input data
+
+    Examples
+    --------
+    We can filter an multi dimentional signal (ex: 2D image) using cubic
+    B-spline filter:
+
+    >>> import numpy as np
+    >>> from scipy.signal import spline_filter
+    >>> import matplotlib.pyplot as plt
+    >>> orig_img = np.eye(20)  # create an image
+    >>> orig_img[10, :] = 1.0
+    >>> sp_filter = spline_filter(orig_img, lmbda=0.1)
+    >>> f, ax = plt.subplots(1, 2, sharex=True)
+    >>> for ind, data in enumerate([[orig_img, "original image"],
+    ...                             [sp_filter, "spline filter"]]):
+    ...     ax[ind].imshow(data[0], cmap='gray_r')
+    ...     ax[ind].set_title(data[1])
+    >>> plt.tight_layout()
+    >>> plt.show()
+
+    """
+    intype = Iin.dtype.char
+    hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
+    if intype in ['F', 'D']:
+        Iin = Iin.astype('F')
+        ckr = cspline2d(Iin.real, lmbda)
+        cki = cspline2d(Iin.imag, lmbda)
+        outr = sepfir2d(ckr, hcol, hcol)
+        outi = sepfir2d(cki, hcol, hcol)
+        out = (outr + 1j * outi).astype(intype)
+    elif intype in ['f', 'd']:
+        ckr = cspline2d(Iin, lmbda)
+        out = sepfir2d(ckr, hcol, hcol)
+        out = out.astype(intype)
+    else:
+        raise TypeError("Invalid data type for Iin")
+    return out
+
+
+_splinefunc_cache = {}
+
+
+def _bspline_piecefunctions(order):
+    """Returns the function defined over the left-side pieces for a bspline of
+    a given order.
+
+    The 0th piece is the first one less than 0. The last piece is a function
+    identical to 0 (returned as the constant 0). (There are order//2 + 2 total
+    pieces).
+
+    Also returns the condition functions that when evaluated return boolean
+    arrays for use with `numpy.piecewise`.
+    """
+    try:
+        return _splinefunc_cache[order]
+    except KeyError:
+        pass
+
+    def condfuncgen(num, val1, val2):
+        if num == 0:
+            return lambda x: logical_and(less_equal(x, val1),
+                                         greater_equal(x, val2))
+        elif num == 2:
+            return lambda x: less_equal(x, val2)
+        else:
+            return lambda x: logical_and(less(x, val1),
+                                         greater_equal(x, val2))
+
+    last = order // 2 + 2
+    if order % 2:
+        startbound = -1.0
+    else:
+        startbound = -0.5
+    condfuncs = [condfuncgen(0, 0, startbound)]
+    bound = startbound
+    for num in range(1, last - 1):
+        condfuncs.append(condfuncgen(1, bound, bound - 1))
+        bound = bound - 1
+    condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
+
+    # final value of bound is used in piecefuncgen below
+
+    # the functions to evaluate are taken from the left-hand side
+    #  in the general expression derived from the central difference
+    #  operator (because they involve fewer terms).
+
+    fval = float_factorial(order)
+
+    def piecefuncgen(num):
+        Mk = order // 2 - num
+        if (Mk < 0):
+            return 0  # final function is 0
+        coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
+                  for k in range(Mk + 1)]
+        shifts = [-bound - k for k in range(Mk + 1)]
+
+        def thefunc(x):
+            res = 0.0
+            for k in range(Mk + 1):
+                res += coeffs[k] * (x + shifts[k]) ** order
+            return res
+        return thefunc
+
+    funclist = [piecefuncgen(k) for k in range(last)]
+
+    _splinefunc_cache[order] = (funclist, condfuncs)
+
+    return funclist, condfuncs
+
+
+def bspline(x, n):
+    """B-spline basis function of order n.
+
+    Parameters
+    ----------
+    x : array_like
+        a knot vector
+    n : int
+        The order of the spline. Must be non-negative, i.e., n >= 0
+
+    Returns
+    -------
+    res : ndarray
+        B-spline basis function values
+
+    See Also
+    --------
+    cubic : A cubic B-spline.
+    quadratic : A quadratic B-spline.
+
+    Notes
+    -----
+    Uses numpy.piecewise and automatic function-generator.
+
+    Examples
+    --------
+    We can calculate B-Spline basis function of several orders:
+
+    >>> import numpy as np
+    >>> from scipy.signal import bspline, cubic, quadratic
+    >>> bspline(0.0, 1)
+    1
+
+    >>> knots = [-1.0, 0.0, -1.0]
+    >>> bspline(knots, 2)
+    array([0.125, 0.75, 0.125])
+
+    >>> np.array_equal(bspline(knots, 2), quadratic(knots))
+    True
+
+    >>> np.array_equal(bspline(knots, 3), cubic(knots))
+    True
+
+    """
+    ax = -abs(asarray(x))
+    # number of pieces on the left-side is (n+1)/2
+    funclist, condfuncs = _bspline_piecefunctions(n)
+    condlist = [func(ax) for func in condfuncs]
+    return piecewise(ax, condlist, funclist)
+
+
+def gauss_spline(x, n):
+    r"""Gaussian approximation to B-spline basis function of order n.
+
+    Parameters
+    ----------
+    x : array_like
+        a knot vector
+    n : int
+        The order of the spline. Must be non-negative, i.e., n >= 0
+
+    Returns
+    -------
+    res : ndarray
+        B-spline basis function values approximated by a zero-mean Gaussian
+        function.
+
+    Notes
+    -----
+    The B-spline basis function can be approximated well by a zero-mean
+    Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
+    for large `n` :
+
+    .. math::  \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
+
+    References
+    ----------
+    .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
+       F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
+       Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
+       Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
+       Science, vol 4485. Springer, Berlin, Heidelberg
+    .. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
+
+    Examples
+    --------
+    We can calculate B-Spline basis functions approximated by a gaussian
+    distribution:
+
+    >>> import numpy as np
+    >>> from scipy.signal import gauss_spline, bspline
+    >>> knots = np.array([-1.0, 0.0, -1.0])
+    >>> gauss_spline(knots, 3)
+    array([0.15418033, 0.6909883, 0.15418033])  # may vary
+
+    >>> bspline(knots, 3)
+    array([0.16666667, 0.66666667, 0.16666667])  # may vary
+
+    """
+    x = asarray(x)
+    signsq = (n + 1) / 12.0
+    return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
+
+
+def cubic(x):
+    """A cubic B-spline.
+
+    This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
+
+    Parameters
+    ----------
+    x : array_like
+        a knot vector
+
+    Returns
+    -------
+    res : ndarray
+        Cubic B-spline basis function values
+
+    See Also
+    --------
+    bspline : B-spline basis function of order n
+    quadratic : A quadratic B-spline.
+
+    Examples
+    --------
+    We can calculate B-Spline basis function of several orders:
+
+    >>> import numpy as np
+    >>> from scipy.signal import bspline, cubic, quadratic
+    >>> bspline(0.0, 1)
+    1
+
+    >>> knots = [-1.0, 0.0, -1.0]
+    >>> bspline(knots, 2)
+    array([0.125, 0.75, 0.125])
+
+    >>> np.array_equal(bspline(knots, 2), quadratic(knots))
+    True
+
+    >>> np.array_equal(bspline(knots, 3), cubic(knots))
+    True
+
+    """
+    ax = abs(asarray(x))
+    res = zeros_like(ax)
+    cond1 = less(ax, 1)
+    if cond1.any():
+        ax1 = ax[cond1]
+        res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
+    cond2 = ~cond1 & less(ax, 2)
+    if cond2.any():
+        ax2 = ax[cond2]
+        res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
+    return res
+
+
+def quadratic(x):
+    """A quadratic B-spline.
+
+    This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
+
+    Parameters
+    ----------
+    x : array_like
+        a knot vector
+
+    Returns
+    -------
+    res : ndarray
+        Quadratic B-spline basis function values
+
+    See Also
+    --------
+    bspline : B-spline basis function of order n
+    cubic : A cubic B-spline.
+
+    Examples
+    --------
+    We can calculate B-Spline basis function of several orders:
+
+    >>> import numpy as np
+    >>> from scipy.signal import bspline, cubic, quadratic
+    >>> bspline(0.0, 1)
+    1
+
+    >>> knots = [-1.0, 0.0, -1.0]
+    >>> bspline(knots, 2)
+    array([0.125, 0.75, 0.125])
+
+    >>> np.array_equal(bspline(knots, 2), quadratic(knots))
+    True
+
+    >>> np.array_equal(bspline(knots, 3), cubic(knots))
+    True
+
+    """
+    ax = abs(asarray(x))
+    res = zeros_like(ax)
+    cond1 = less(ax, 0.5)
+    if cond1.any():
+        ax1 = ax[cond1]
+        res[cond1] = 0.75 - ax1 ** 2
+    cond2 = ~cond1 & less(ax, 1.5)
+    if cond2.any():
+        ax2 = ax[cond2]
+        res[cond2] = (ax2 - 1.5) ** 2 / 2.0
+    return res
+
+
+def _coeff_smooth(lam):
+    xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
+    omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
+    rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
+    rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
+    return rho, omeg
+
+
+def _hc(k, cs, rho, omega):
+    return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
+            greater(k, -1))
+
+
+def _hs(k, cs, rho, omega):
+    c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
+          (1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
+    gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
+    ak = abs(k)
+    return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
+
+
+def _cubic_smooth_coeff(signal, lamb):
+    rho, omega = _coeff_smooth(lamb)
+    cs = 1 - 2 * rho * cos(omega) + rho * rho
+    K = len(signal)
+    yp = zeros((K,), signal.dtype.char)
+    k = arange(K)
+    yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
+             add.reduce(_hc(k + 1, cs, rho, omega) * signal))
+
+    yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
+             _hc(1, cs, rho, omega) * signal[1] +
+             add.reduce(_hc(k + 2, cs, rho, omega) * signal))
+
+    for n in range(2, K):
+        yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
+                 rho * rho * yp[n - 2])
+
+    y = zeros((K,), signal.dtype.char)
+
+    y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
+                           _hs(k + 1, cs, rho, omega)) * signal[::-1])
+    y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
+                           _hs(k + 2, cs, rho, omega)) * signal[::-1])
+
+    for n in range(K - 3, -1, -1):
+        y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
+                rho * rho * y[n + 2])
+
+    return y
+
+
+def _cubic_coeff(signal):
+    zi = -2 + sqrt(3)
+    K = len(signal)
+    yplus = zeros((K,), signal.dtype.char)
+    powers = zi ** arange(K)
+    yplus[0] = signal[0] + zi * add.reduce(powers * signal)
+    for k in range(1, K):
+        yplus[k] = signal[k] + zi * yplus[k - 1]
+    output = zeros((K,), signal.dtype)
+    output[K - 1] = zi / (zi - 1) * yplus[K - 1]
+    for k in range(K - 2, -1, -1):
+        output[k] = zi * (output[k + 1] - yplus[k])
+    return output * 6.0
+
+
+def _quadratic_coeff(signal):
+    zi = -3 + 2 * sqrt(2.0)
+    K = len(signal)
+    yplus = zeros((K,), signal.dtype.char)
+    powers = zi ** arange(K)
+    yplus[0] = signal[0] + zi * add.reduce(powers * signal)
+    for k in range(1, K):
+        yplus[k] = signal[k] + zi * yplus[k - 1]
+    output = zeros((K,), signal.dtype.char)
+    output[K - 1] = zi / (zi - 1) * yplus[K - 1]
+    for k in range(K - 2, -1, -1):
+        output[k] = zi * (output[k + 1] - yplus[k])
+    return output * 8.0
+
+
+def cspline1d(signal, lamb=0.0):
+    """
+    Compute cubic spline coefficients for rank-1 array.
+
+    Find the cubic spline coefficients for a 1-D signal assuming
+    mirror-symmetric boundary conditions. To obtain the signal back from the
+    spline representation mirror-symmetric-convolve these coefficients with a
+    length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
+
+    Parameters
+    ----------
+    signal : ndarray
+        A rank-1 array representing samples of a signal.
+    lamb : float, optional
+        Smoothing coefficient, default is 0.0.
+
+    Returns
+    -------
+    c : ndarray
+        Cubic spline coefficients.
+
+    See Also
+    --------
+    cspline1d_eval : Evaluate a cubic spline at the new set of points.
+
+    Examples
+    --------
+    We can filter a signal to reduce and smooth out high-frequency noise with
+    a cubic spline:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.signal import cspline1d, cspline1d_eval
+    >>> rng = np.random.default_rng()
+    >>> sig = np.repeat([0., 1., 0.], 100)
+    >>> sig += rng.standard_normal(len(sig))*0.05  # add noise
+    >>> time = np.linspace(0, len(sig))
+    >>> filtered = cspline1d_eval(cspline1d(sig), time)
+    >>> plt.plot(sig, label="signal")
+    >>> plt.plot(time, filtered, label="filtered")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    if lamb != 0.0:
+        return _cubic_smooth_coeff(signal, lamb)
+    else:
+        return _cubic_coeff(signal)
+
+
+def qspline1d(signal, lamb=0.0):
+    """Compute quadratic spline coefficients for rank-1 array.
+
+    Parameters
+    ----------
+    signal : ndarray
+        A rank-1 array representing samples of a signal.
+    lamb : float, optional
+        Smoothing coefficient (must be zero for now).
+
+    Returns
+    -------
+    c : ndarray
+        Quadratic spline coefficients.
+
+    See Also
+    --------
+    qspline1d_eval : Evaluate a quadratic spline at the new set of points.
+
+    Notes
+    -----
+    Find the quadratic spline coefficients for a 1-D signal assuming
+    mirror-symmetric boundary conditions. To obtain the signal back from the
+    spline representation mirror-symmetric-convolve these coefficients with a
+    length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
+
+    Examples
+    --------
+    We can filter a signal to reduce and smooth out high-frequency noise with
+    a quadratic spline:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.signal import qspline1d, qspline1d_eval
+    >>> rng = np.random.default_rng()
+    >>> sig = np.repeat([0., 1., 0.], 100)
+    >>> sig += rng.standard_normal(len(sig))*0.05  # add noise
+    >>> time = np.linspace(0, len(sig))
+    >>> filtered = qspline1d_eval(qspline1d(sig), time)
+    >>> plt.plot(sig, label="signal")
+    >>> plt.plot(time, filtered, label="filtered")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    if lamb != 0.0:
+        raise ValueError("Smoothing quadratic splines not supported yet.")
+    else:
+        return _quadratic_coeff(signal)
+
+
+def cspline1d_eval(cj, newx, dx=1.0, x0=0):
+    """Evaluate a cubic spline at the new set of points.
+
+    `dx` is the old sample-spacing while `x0` was the old origin. In
+    other-words the old-sample points (knot-points) for which the `cj`
+    represent spline coefficients were at equally-spaced points of:
+
+      oldx = x0 + j*dx  j=0...N-1, with N=len(cj)
+
+    Edges are handled using mirror-symmetric boundary conditions.
+
+    Parameters
+    ----------
+    cj : ndarray
+        cublic spline coefficients
+    newx : ndarray
+        New set of points.
+    dx : float, optional
+        Old sample-spacing, the default value is 1.0.
+    x0 : int, optional
+        Old origin, the default value is 0.
+
+    Returns
+    -------
+    res : ndarray
+        Evaluated a cubic spline points.
+
+    See Also
+    --------
+    cspline1d : Compute cubic spline coefficients for rank-1 array.
+
+    Examples
+    --------
+    We can filter a signal to reduce and smooth out high-frequency noise with
+    a cubic spline:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.signal import cspline1d, cspline1d_eval
+    >>> rng = np.random.default_rng()
+    >>> sig = np.repeat([0., 1., 0.], 100)
+    >>> sig += rng.standard_normal(len(sig))*0.05  # add noise
+    >>> time = np.linspace(0, len(sig))
+    >>> filtered = cspline1d_eval(cspline1d(sig), time)
+    >>> plt.plot(sig, label="signal")
+    >>> plt.plot(time, filtered, label="filtered")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    newx = (asarray(newx) - x0) / float(dx)
+    res = zeros_like(newx, dtype=cj.dtype)
+    if res.size == 0:
+        return res
+    N = len(cj)
+    cond1 = newx < 0
+    cond2 = newx > (N - 1)
+    cond3 = ~(cond1 | cond2)
+    # handle general mirror-symmetry
+    res[cond1] = cspline1d_eval(cj, -newx[cond1])
+    res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
+    newx = newx[cond3]
+    if newx.size == 0:
+        return res
+    result = zeros_like(newx, dtype=cj.dtype)
+    jlower = floor(newx - 2).astype(int) + 1
+    for i in range(4):
+        thisj = jlower + i
+        indj = thisj.clip(0, N - 1)  # handle edge cases
+        result += cj[indj] * cubic(newx - thisj)
+    res[cond3] = result
+    return res
+
+
+def qspline1d_eval(cj, newx, dx=1.0, x0=0):
+    """Evaluate a quadratic spline at the new set of points.
+
+    Parameters
+    ----------
+    cj : ndarray
+        Quadratic spline coefficients
+    newx : ndarray
+        New set of points.
+    dx : float, optional
+        Old sample-spacing, the default value is 1.0.
+    x0 : int, optional
+        Old origin, the default value is 0.
+
+    Returns
+    -------
+    res : ndarray
+        Evaluated a quadratic spline points.
+
+    See Also
+    --------
+    qspline1d : Compute quadratic spline coefficients for rank-1 array.
+
+    Notes
+    -----
+    `dx` is the old sample-spacing while `x0` was the old origin. In
+    other-words the old-sample points (knot-points) for which the `cj`
+    represent spline coefficients were at equally-spaced points of::
+
+      oldx = x0 + j*dx  j=0...N-1, with N=len(cj)
+
+    Edges are handled using mirror-symmetric boundary conditions.
+
+    Examples
+    --------
+    We can filter a signal to reduce and smooth out high-frequency noise with
+    a quadratic spline:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.signal import qspline1d, qspline1d_eval
+    >>> rng = np.random.default_rng()
+    >>> sig = np.repeat([0., 1., 0.], 100)
+    >>> sig += rng.standard_normal(len(sig))*0.05  # add noise
+    >>> time = np.linspace(0, len(sig))
+    >>> filtered = qspline1d_eval(qspline1d(sig), time)
+    >>> plt.plot(sig, label="signal")
+    >>> plt.plot(time, filtered, label="filtered")
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    newx = (asarray(newx) - x0) / dx
+    res = zeros_like(newx)
+    if res.size == 0:
+        return res
+    N = len(cj)
+    cond1 = newx < 0
+    cond2 = newx > (N - 1)
+    cond3 = ~(cond1 | cond2)
+    # handle general mirror-symmetry
+    res[cond1] = qspline1d_eval(cj, -newx[cond1])
+    res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
+    newx = newx[cond3]
+    if newx.size == 0:
+        return res
+    result = zeros_like(newx)
+    jlower = floor(newx - 1.5).astype(int) + 1
+    for i in range(3):
+        thisj = jlower + i
+        indj = thisj.clip(0, N - 1)  # handle edge cases
+        result += cj[indj] * quadratic(newx - thisj)
+    res[cond3] = result
+    return res
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_czt.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_czt.py
new file mode 100644
index 00000000..c5e5715b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_czt.py
@@ -0,0 +1,575 @@
+# This program is public domain
+# Authors: Paul Kienzle, Nadav Horesh
+"""
+Chirp z-transform.
+
+We provide two interfaces to the chirp z-transform: an object interface
+which precalculates part of the transform and can be applied efficiently
+to many different data sets, and a functional interface which is applied
+only to the given data set.
+
+Transforms
+----------
+
+CZT : callable (x, axis=-1) -> array
+   Define a chirp z-transform that can be applied to different signals.
+ZoomFFT : callable (x, axis=-1) -> array
+   Define a Fourier transform on a range of frequencies.
+
+Functions
+---------
+
+czt : array
+   Compute the chirp z-transform for a signal.
+zoom_fft : array
+   Compute the Fourier transform on a range of frequencies.
+"""
+
+import cmath
+import numbers
+import numpy as np
+from numpy import pi, arange
+from scipy.fft import fft, ifft, next_fast_len
+
+__all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points']
+
+
+def _validate_sizes(n, m):
+    if n < 1 or not isinstance(n, numbers.Integral):
+        raise ValueError('Invalid number of CZT data '
+                         f'points ({n}) specified. '
+                         'n must be positive and integer type.')
+
+    if m is None:
+        m = n
+    elif m < 1 or not isinstance(m, numbers.Integral):
+        raise ValueError('Invalid number of CZT output '
+                         f'points ({m}) specified. '
+                         'm must be positive and integer type.')
+
+    return m
+
+
+def czt_points(m, w=None, a=1+0j):
+    """
+    Return the points at which the chirp z-transform is computed.
+
+    Parameters
+    ----------
+    m : int
+        The number of points desired.
+    w : complex, optional
+        The ratio between points in each step.
+        Defaults to equally spaced points around the entire unit circle.
+    a : complex, optional
+        The starting point in the complex plane.  Default is 1+0j.
+
+    Returns
+    -------
+    out : ndarray
+        The points in the Z plane at which `CZT` samples the z-transform,
+        when called with arguments `m`, `w`, and `a`, as complex numbers.
+
+    See Also
+    --------
+    CZT : Class that creates a callable chirp z-transform function.
+    czt : Convenience function for quickly calculating CZT.
+
+    Examples
+    --------
+    Plot the points of a 16-point FFT:
+
+    >>> import numpy as np
+    >>> from scipy.signal import czt_points
+    >>> points = czt_points(16)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(points.real, points.imag, 'o')
+    >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
+    >>> plt.axis('equal')
+    >>> plt.show()
+
+    and a 91-point logarithmic spiral that crosses the unit circle:
+
+    >>> m, w, a = 91, 0.995*np.exp(-1j*np.pi*.05), 0.8*np.exp(1j*np.pi/6)
+    >>> points = czt_points(m, w, a)
+    >>> plt.plot(points.real, points.imag, 'o')
+    >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
+    >>> plt.axis('equal')
+    >>> plt.show()
+    """
+    m = _validate_sizes(1, m)
+
+    k = arange(m)
+
+    a = 1.0 * a  # at least float
+
+    if w is None:
+        # Nothing specified, default to FFT
+        return a * np.exp(2j * pi * k / m)
+    else:
+        # w specified
+        w = 1.0 * w  # at least float
+        return a * w**-k
+
+
+class CZT:
+    """
+    Create a callable chirp z-transform function.
+
+    Transform to compute the frequency response around a spiral.
+    Objects of this class are callables which can compute the
+    chirp z-transform on their inputs.  This object precalculates the constant
+    chirps used in the given transform.
+
+    Parameters
+    ----------
+    n : int
+        The size of the signal.
+    m : int, optional
+        The number of output points desired.  Default is `n`.
+    w : complex, optional
+        The ratio between points in each step.  This must be precise or the
+        accumulated error will degrade the tail of the output sequence.
+        Defaults to equally spaced points around the entire unit circle.
+    a : complex, optional
+        The starting point in the complex plane.  Default is 1+0j.
+
+    Returns
+    -------
+    f : CZT
+        Callable object ``f(x, axis=-1)`` for computing the chirp z-transform
+        on `x`.
+
+    See Also
+    --------
+    czt : Convenience function for quickly calculating CZT.
+    ZoomFFT : Class that creates a callable partial FFT function.
+
+    Notes
+    -----
+    The defaults are chosen such that ``f(x)`` is equivalent to
+    ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to
+    ``fft.fft(x, m)``.
+
+    If `w` does not lie on the unit circle, then the transform will be
+    around a spiral with exponentially-increasing radius.  Regardless,
+    angle will increase linearly.
+
+    For transforms that do lie on the unit circle, accuracy is better when
+    using `ZoomFFT`, since any numerical error in `w` is
+    accumulated for long data lengths, drifting away from the unit circle.
+
+    The chirp z-transform can be faster than an equivalent FFT with
+    zero padding.  Try it with your own array sizes to see.
+
+    However, the chirp z-transform is considerably less precise than the
+    equivalent zero-padded FFT.
+
+    As this CZT is implemented using the Bluestein algorithm, it can compute
+    large prime-length Fourier transforms in O(N log N) time, rather than the
+    O(N**2) time required by the direct DFT calculation.  (`scipy.fft` also
+    uses Bluestein's algorithm'.)
+
+    (The name "chirp z-transform" comes from the use of a chirp in the
+    Bluestein algorithm.  It does not decompose signals into chirps, like
+    other transforms with "chirp" in the name.)
+
+    References
+    ----------
+    .. [1] Leo I. Bluestein, "A linear filtering approach to the computation
+           of the discrete Fourier transform," Northeast Electronics Research
+           and Engineering Meeting Record 10, 218-219 (1968).
+    .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and
+           its application," Bell Syst. Tech. J. 48, 1249-1292 (1969).
+
+    Examples
+    --------
+    Compute multiple prime-length FFTs:
+
+    >>> from scipy.signal import CZT
+    >>> import numpy as np
+    >>> a = np.random.rand(7)
+    >>> b = np.random.rand(7)
+    >>> c = np.random.rand(7)
+    >>> czt_7 = CZT(n=7)
+    >>> A = czt_7(a)
+    >>> B = czt_7(b)
+    >>> C = czt_7(c)
+
+    Display the points at which the FFT is calculated:
+
+    >>> czt_7.points()
+    array([ 1.00000000+0.j        ,  0.62348980+0.78183148j,
+           -0.22252093+0.97492791j, -0.90096887+0.43388374j,
+           -0.90096887-0.43388374j, -0.22252093-0.97492791j,
+            0.62348980-0.78183148j])
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o')
+    >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
+    >>> plt.axis('equal')
+    >>> plt.show()
+    """
+
+    def __init__(self, n, m=None, w=None, a=1+0j):
+        m = _validate_sizes(n, m)
+
+        k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2))
+
+        if w is None:
+            # Nothing specified, default to FFT-like
+            w = cmath.exp(-2j*pi/m)
+            wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m)
+        else:
+            # w specified
+            wk2 = w**(k**2/2.)
+
+        a = 1.0 * a  # at least float
+
+        self.w, self.a = w, a
+        self.m, self.n = m, n
+
+        nfft = next_fast_len(n + m - 1)
+        self._Awk2 = a**-k[:n] * wk2[:n]
+        self._nfft = nfft
+        self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft)
+        self._wk2 = wk2[:m]
+        self._yidx = slice(n-1, n+m-1)
+
+    def __call__(self, x, *, axis=-1):
+        """
+        Calculate the chirp z-transform of a signal.
+
+        Parameters
+        ----------
+        x : array
+            The signal to transform.
+        axis : int, optional
+            Axis over which to compute the FFT. If not given, the last axis is
+            used.
+
+        Returns
+        -------
+        out : ndarray
+            An array of the same dimensions as `x`, but with the length of the
+            transformed axis set to `m`.
+        """
+        x = np.asarray(x)
+        if x.shape[axis] != self.n:
+            raise ValueError(f"CZT defined for length {self.n}, not "
+                             f"{x.shape[axis]}")
+        # Calculate transpose coordinates, to allow operation on any given axis
+        trnsp = np.arange(x.ndim)
+        trnsp[[axis, -1]] = [-1, axis]
+        x = x.transpose(*trnsp)
+        y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft))
+        y = y[..., self._yidx] * self._wk2
+        return y.transpose(*trnsp)
+
+    def points(self):
+        """
+        Return the points at which the chirp z-transform is computed.
+        """
+        return czt_points(self.m, self.w, self.a)
+
+
+class ZoomFFT(CZT):
+    """
+    Create a callable zoom FFT transform function.
+
+    This is a specialization of the chirp z-transform (`CZT`) for a set of
+    equally-spaced frequencies around the unit circle, used to calculate a
+    section of the FFT more efficiently than calculating the entire FFT and
+    truncating.
+
+    Parameters
+    ----------
+    n : int
+        The size of the signal.
+    fn : array_like
+        A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
+        scalar, for which the range [0, `fn`] is assumed.
+    m : int, optional
+        The number of points to evaluate.  Default is `n`.
+    fs : float, optional
+        The sampling frequency.  If ``fs=10`` represented 10 kHz, for example,
+        then `f1` and `f2` would also be given in kHz.
+        The default sampling frequency is 2, so `f1` and `f2` should be
+        in the range [0, 1] to keep the transform below the Nyquist
+        frequency.
+    endpoint : bool, optional
+        If True, `f2` is the last sample. Otherwise, it is not included.
+        Default is False.
+
+    Returns
+    -------
+    f : ZoomFFT
+        Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`.
+
+    See Also
+    --------
+    zoom_fft : Convenience function for calculating a zoom FFT.
+
+    Notes
+    -----
+    The defaults are chosen such that ``f(x, 2)`` is equivalent to
+    ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to
+    ``fft.fft(x, m)``.
+
+    Sampling frequency is 1/dt, the time step between samples in the
+    signal `x`.  The unit circle corresponds to frequencies from 0 up
+    to the sampling frequency.  The default sampling frequency of 2
+    means that `f1`, `f2` values up to the Nyquist frequency are in the
+    range [0, 1). For `f1`, `f2` values expressed in radians, a sampling
+    frequency of 2*pi should be used.
+
+    Remember that a zoom FFT can only interpolate the points of the existing
+    FFT.  It cannot help to resolve two separate nearby frequencies.
+    Frequency resolution can only be increased by increasing acquisition
+    time.
+
+    These functions are implemented using Bluestein's algorithm (as is
+    `scipy.fft`). [2]_
+
+    References
+    ----------
+    .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
+           applications", pg 29 (1970)
+           https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
+    .. [2] Leo I. Bluestein, "A linear filtering approach to the computation
+           of the discrete Fourier transform," Northeast Electronics Research
+           and Engineering Meeting Record 10, 218-219 (1968).
+
+    Examples
+    --------
+    To plot the transform results use something like the following:
+
+    >>> import numpy as np
+    >>> from scipy.signal import ZoomFFT
+    >>> t = np.linspace(0, 1, 1021)
+    >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
+    >>> f1, f2 = 5, 27
+    >>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021)
+    >>> X = transform(x)
+    >>> f = np.linspace(f1, f2, len(x))
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(f, 20*np.log10(np.abs(X)))
+    >>> plt.show()
+    """
+
+    def __init__(self, n, fn, m=None, *, fs=2, endpoint=False):
+        m = _validate_sizes(n, m)
+
+        k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2))
+
+        if np.size(fn) == 2:
+            f1, f2 = fn
+        elif np.size(fn) == 1:
+            f1, f2 = 0.0, fn
+        else:
+            raise ValueError('fn must be a scalar or 2-length sequence')
+
+        self.f1, self.f2, self.fs = f1, f2, fs
+
+        if endpoint:
+            scale = ((f2 - f1) * m) / (fs * (m - 1))
+        else:
+            scale = (f2 - f1) / fs
+        a = cmath.exp(2j * pi * f1/fs)
+        wk2 = np.exp(-(1j * pi * scale * k**2) / m)
+
+        self.w = cmath.exp(-2j*pi/m * scale)
+        self.a = a
+        self.m, self.n = m, n
+
+        ak = np.exp(-2j * pi * f1/fs * k[:n])
+        self._Awk2 = ak * wk2[:n]
+
+        nfft = next_fast_len(n + m - 1)
+        self._nfft = nfft
+        self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft)
+        self._wk2 = wk2[:m]
+        self._yidx = slice(n-1, n+m-1)
+
+
+def czt(x, m=None, w=None, a=1+0j, *, axis=-1):
+    """
+    Compute the frequency response around a spiral in the Z plane.
+
+    Parameters
+    ----------
+    x : array
+        The signal to transform.
+    m : int, optional
+        The number of output points desired.  Default is the length of the
+        input data.
+    w : complex, optional
+        The ratio between points in each step.  This must be precise or the
+        accumulated error will degrade the tail of the output sequence.
+        Defaults to equally spaced points around the entire unit circle.
+    a : complex, optional
+        The starting point in the complex plane.  Default is 1+0j.
+    axis : int, optional
+        Axis over which to compute the FFT. If not given, the last axis is
+        used.
+
+    Returns
+    -------
+    out : ndarray
+        An array of the same dimensions as `x`, but with the length of the
+        transformed axis set to `m`.
+
+    See Also
+    --------
+    CZT : Class that creates a callable chirp z-transform function.
+    zoom_fft : Convenience function for partial FFT calculations.
+
+    Notes
+    -----
+    The defaults are chosen such that ``signal.czt(x)`` is equivalent to
+    ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is
+    equivalent to ``fft.fft(x, m)``.
+
+    If the transform needs to be repeated, use `CZT` to construct a
+    specialized transform function which can be reused without
+    recomputing constants.
+
+    An example application is in system identification, repeatedly evaluating
+    small slices of the z-transform of a system, around where a pole is
+    expected to exist, to refine the estimate of the pole's true location. [1]_
+
+    References
+    ----------
+    .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
+           applications", pg 20 (1970)
+           https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
+
+    Examples
+    --------
+    Generate a sinusoid:
+
+    >>> import numpy as np
+    >>> f1, f2, fs = 8, 10, 200  # Hz
+    >>> t = np.linspace(0, 1, fs, endpoint=False)
+    >>> x = np.sin(2*np.pi*t*f2)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(t, x)
+    >>> plt.axis([0, 1, -1.1, 1.1])
+    >>> plt.show()
+
+    Its discrete Fourier transform has all of its energy in a single frequency
+    bin:
+
+    >>> from scipy.fft import rfft, rfftfreq
+    >>> from scipy.signal import czt, czt_points
+    >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
+    >>> plt.margins(0, 0.1)
+    >>> plt.show()
+
+    However, if the sinusoid is logarithmically-decaying:
+
+    >>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2)
+    >>> plt.plot(t, x)
+    >>> plt.axis([0, 1, -1.1, 1.1])
+    >>> plt.show()
+
+    the DFT will have spectral leakage:
+
+    >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
+    >>> plt.margins(0, 0.1)
+    >>> plt.show()
+
+    While the DFT always samples the z-transform around the unit circle, the
+    chirp z-transform allows us to sample the Z-transform along any
+    logarithmic spiral, such as a circle with radius smaller than unity:
+
+    >>> M = fs // 2  # Just positive frequencies, like rfft
+    >>> a = np.exp(-f1/fs)  # Starting point of the circle, radius < 1
+    >>> w = np.exp(-1j*np.pi/M)  # "Step size" of circle
+    >>> points = czt_points(M + 1, w, a)  # M + 1 to include Nyquist
+    >>> plt.plot(points.real, points.imag, '.')
+    >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
+    >>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05])
+    >>> plt.show()
+
+    With the correct radius, this transforms the decaying sinusoid (and others
+    with the same decay rate) without spectral leakage:
+
+    >>> z_vals = czt(x, M + 1, w, a)  # Include Nyquist for comparison to rfft
+    >>> freqs = np.angle(points)*fs/(2*np.pi)  # angle = omega, radius = sigma
+    >>> plt.plot(freqs, abs(z_vals))
+    >>> plt.margins(0, 0.1)
+    >>> plt.show()
+    """
+    x = np.asarray(x)
+    transform = CZT(x.shape[axis], m=m, w=w, a=a)
+    return transform(x, axis=axis)
+
+
+def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1):
+    """
+    Compute the DFT of `x` only for frequencies in range `fn`.
+
+    Parameters
+    ----------
+    x : array
+        The signal to transform.
+    fn : array_like
+        A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
+        scalar, for which the range [0, `fn`] is assumed.
+    m : int, optional
+        The number of points to evaluate.  The default is the length of `x`.
+    fs : float, optional
+        The sampling frequency.  If ``fs=10`` represented 10 kHz, for example,
+        then `f1` and `f2` would also be given in kHz.
+        The default sampling frequency is 2, so `f1` and `f2` should be
+        in the range [0, 1] to keep the transform below the Nyquist
+        frequency.
+    endpoint : bool, optional
+        If True, `f2` is the last sample. Otherwise, it is not included.
+        Default is False.
+    axis : int, optional
+        Axis over which to compute the FFT. If not given, the last axis is
+        used.
+
+    Returns
+    -------
+    out : ndarray
+        The transformed signal.  The Fourier transform will be calculated
+        at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m.
+
+    See Also
+    --------
+    ZoomFFT : Class that creates a callable partial FFT function.
+
+    Notes
+    -----
+    The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent
+    to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)``
+    is equivalent to ``fft.fft(x, m)``.
+
+    To graph the magnitude of the resulting transform, use::
+
+        plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m)))
+
+    If the transform needs to be repeated, use `ZoomFFT` to construct
+    a specialized transform function which can be reused without
+    recomputing constants.
+
+    Examples
+    --------
+    To plot the transform results use something like the following:
+
+    >>> import numpy as np
+    >>> from scipy.signal import zoom_fft
+    >>> t = np.linspace(0, 1, 1021)
+    >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
+    >>> f1, f2 = 5, 27
+    >>> X = zoom_fft(x, [f1, f2], len(x), fs=1021)
+    >>> f = np.linspace(f1, f2, len(x))
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(f, 20*np.log10(np.abs(X)))
+    >>> plt.show()
+    """
+    x = np.asarray(x)
+    transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint)
+    return transform(x, axis=axis)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_filter_design.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_filter_design.py
new file mode 100644
index 00000000..c5de84c9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_filter_design.py
@@ -0,0 +1,5615 @@
+"""Filter design."""
+import math
+import operator
+import warnings
+
+import numpy
+import numpy as np
+from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
+                   resize, pi, absolute, logspace, r_, sqrt, tan, log10,
+                   arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
+                   zeros, sinh, append, concatenate, prod, ones, full, array,
+                   mintypecode)
+from numpy.polynomial.polynomial import polyval as npp_polyval
+from numpy.polynomial.polynomial import polyvalfromroots
+
+from scipy import special, optimize, fft as sp_fft
+from scipy.special import comb
+from scipy._lib._util import float_factorial
+
+
+__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
+           'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
+           'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
+           'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
+           'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
+           'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
+           'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
+           'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk',
+           'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk',
+           'gammatone', 'iircomb']
+
+
+class BadCoefficients(UserWarning):
+    """Warning about badly conditioned filter coefficients"""
+    pass
+
+
+abs = absolute
+
+
+def _is_int_type(x):
+    """
+    Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will
+    pass, while ``5.0`` and ``array([5])`` will fail.
+    """
+    if np.ndim(x) != 0:
+        # Older versions of NumPy did not raise for np.array([1]).__index__()
+        # This is safe to remove when support for those versions is dropped
+        return False
+    try:
+        operator.index(x)
+    except TypeError:
+        return False
+    else:
+        return True
+
+
+def findfreqs(num, den, N, kind='ba'):
+    """
+    Find array of frequencies for computing the response of an analog filter.
+
+    Parameters
+    ----------
+    num, den : array_like, 1-D
+        The polynomial coefficients of the numerator and denominator of the
+        transfer function of the filter or LTI system, where the coefficients
+        are ordered from highest to lowest degree. Or, the roots  of the
+        transfer function numerator and denominator (i.e., zeroes and poles).
+    N : int
+        The length of the array to be computed.
+    kind : str {'ba', 'zp'}, optional
+        Specifies whether the numerator and denominator are specified by their
+        polynomial coefficients ('ba'), or their roots ('zp').
+
+    Returns
+    -------
+    w : (N,) ndarray
+        A 1-D array of frequencies, logarithmically spaced.
+
+    Examples
+    --------
+    Find a set of nine frequencies that span the "interesting part" of the
+    frequency response for the filter with the transfer function
+
+        H(s) = s / (s^2 + 8s + 25)
+
+    >>> from scipy import signal
+    >>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
+    array([  1.00000000e-02,   3.16227766e-02,   1.00000000e-01,
+             3.16227766e-01,   1.00000000e+00,   3.16227766e+00,
+             1.00000000e+01,   3.16227766e+01,   1.00000000e+02])
+    """
+    if kind == 'ba':
+        ep = atleast_1d(roots(den)) + 0j
+        tz = atleast_1d(roots(num)) + 0j
+    elif kind == 'zp':
+        ep = atleast_1d(den) + 0j
+        tz = atleast_1d(num) + 0j
+    else:
+        raise ValueError("input must be one of {'ba', 'zp'}")
+
+    if len(ep) == 0:
+        ep = atleast_1d(-1000) + 0j
+
+    ez = r_['-1',
+            numpy.compress(ep.imag >= 0, ep, axis=-1),
+            numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
+
+    integ = abs(ez) < 1e-10
+    hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
+                                               1.5 * ez.imag)) + 0.5)
+    lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
+                                                     2 * ez.imag)) - 0.5)
+
+    w = logspace(lfreq, hfreq, N)
+    return w
+
+
+def freqs(b, a, worN=200, plot=None):
+    """
+    Compute frequency response of analog filter.
+
+    Given the M-order numerator `b` and N-order denominator `a` of an analog
+    filter, compute its frequency response::
+
+             b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
+     H(w) = ----------------------------------------------
+             a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator of a linear filter.
+    a : array_like
+        Denominator of a linear filter.
+    worN : {None, int, array_like}, optional
+        If None, then compute at 200 frequencies around the interesting parts
+        of the response curve (determined by pole-zero locations). If a single
+        integer, then compute at that many frequencies. Otherwise, compute the
+        response at the angular frequencies (e.g., rad/s) given in `worN`.
+    plot : callable, optional
+        A callable that takes two arguments. If given, the return parameters
+        `w` and `h` are passed to plot. Useful for plotting the frequency
+        response inside `freqs`.
+
+    Returns
+    -------
+    w : ndarray
+        The angular frequencies at which `h` was computed.
+    h : ndarray
+        The frequency response.
+
+    See Also
+    --------
+    freqz : Compute the frequency response of a digital filter.
+
+    Notes
+    -----
+    Using Matplotlib's "plot" function as the callable for `plot` produces
+    unexpected results, this plots the real part of the complex transfer
+    function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
+
+    Examples
+    --------
+    >>> from scipy.signal import freqs, iirfilter
+    >>> import numpy as np
+
+    >>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
+
+    >>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.xlabel('Frequency')
+    >>> plt.ylabel('Amplitude response [dB]')
+    >>> plt.grid(True)
+    >>> plt.show()
+
+    """
+    if worN is None:
+        # For backwards compatibility
+        w = findfreqs(b, a, 200)
+    elif _is_int_type(worN):
+        w = findfreqs(b, a, worN)
+    else:
+        w = atleast_1d(worN)
+
+    s = 1j * w
+    h = polyval(b, s) / polyval(a, s)
+    if plot is not None:
+        plot(w, h)
+
+    return w, h
+
+
+def freqs_zpk(z, p, k, worN=200):
+    """
+    Compute frequency response of analog filter.
+
+    Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
+    frequency response::
+
+                (jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
+     H(w) = k * ----------------------------------------
+                (jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
+
+    Parameters
+    ----------
+    z : array_like
+        Zeroes of a linear filter
+    p : array_like
+        Poles of a linear filter
+    k : scalar
+        Gain of a linear filter
+    worN : {None, int, array_like}, optional
+        If None, then compute at 200 frequencies around the interesting parts
+        of the response curve (determined by pole-zero locations). If a single
+        integer, then compute at that many frequencies. Otherwise, compute the
+        response at the angular frequencies (e.g., rad/s) given in `worN`.
+
+    Returns
+    -------
+    w : ndarray
+        The angular frequencies at which `h` was computed.
+    h : ndarray
+        The frequency response.
+
+    See Also
+    --------
+    freqs : Compute the frequency response of an analog filter in TF form
+    freqz : Compute the frequency response of a digital filter in TF form
+    freqz_zpk : Compute the frequency response of a digital filter in ZPK form
+
+    Notes
+    -----
+    .. versionadded:: 0.19.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import freqs_zpk, iirfilter
+
+    >>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
+    ...                     output='zpk')
+
+    >>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.xlabel('Frequency')
+    >>> plt.ylabel('Amplitude response [dB]')
+    >>> plt.grid(True)
+    >>> plt.show()
+
+    """
+    k = np.asarray(k)
+    if k.size > 1:
+        raise ValueError('k must be a single scalar gain')
+
+    if worN is None:
+        # For backwards compatibility
+        w = findfreqs(z, p, 200, kind='zp')
+    elif _is_int_type(worN):
+        w = findfreqs(z, p, worN, kind='zp')
+    else:
+        w = worN
+
+    w = atleast_1d(w)
+    s = 1j * w
+    num = polyvalfromroots(s, z)
+    den = polyvalfromroots(s, p)
+    h = k * num/den
+    return w, h
+
+
+def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi,
+          include_nyquist=False):
+    """
+    Compute the frequency response of a digital filter.
+
+    Given the M-order numerator `b` and N-order denominator `a` of a digital
+    filter, compute its frequency response::
+
+                 jw                 -jw              -jwM
+        jw    B(e  )    b[0] + b[1]e    + ... + b[M]e
+     H(e  ) = ------ = -----------------------------------
+                 jw                 -jw              -jwN
+              A(e  )    a[0] + a[1]e    + ... + a[N]e
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator of a linear filter. If `b` has dimension greater than 1,
+        it is assumed that the coefficients are stored in the first dimension,
+        and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
+        array must be compatible for broadcasting.
+    a : array_like
+        Denominator of a linear filter. If `b` has dimension greater than 1,
+        it is assumed that the coefficients are stored in the first dimension,
+        and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies
+        array must be compatible for broadcasting.
+    worN : {None, int, array_like}, optional
+        If a single integer, then compute at that many frequencies (default is
+        N=512). This is a convenient alternative to::
+
+            np.linspace(0, fs if whole else fs/2, N, endpoint=include_nyquist)
+
+        Using a number that is fast for FFT computations can result in
+        faster computations (see Notes).
+
+        If an array_like, compute the response at the frequencies given.
+        These are in the same units as `fs`.
+    whole : bool, optional
+        Normally, frequencies are computed from 0 to the Nyquist frequency,
+        fs/2 (upper-half of unit-circle). If `whole` is True, compute
+        frequencies from 0 to fs. Ignored if worN is array_like.
+    plot : callable
+        A callable that takes two arguments. If given, the return parameters
+        `w` and `h` are passed to plot. Useful for plotting the frequency
+        response inside `freqz`.
+    fs : float, optional
+        The sampling frequency of the digital system. Defaults to 2*pi
+        radians/sample (so w is from 0 to pi).
+
+        .. versionadded:: 1.2.0
+    include_nyquist : bool, optional
+        If `whole` is False and `worN` is an integer, setting `include_nyquist`
+        to True will include the last frequency (Nyquist frequency) and is
+        otherwise ignored.
+
+        .. versionadded:: 1.5.0
+
+    Returns
+    -------
+    w : ndarray
+        The frequencies at which `h` was computed, in the same units as `fs`.
+        By default, `w` is normalized to the range [0, pi) (radians/sample).
+    h : ndarray
+        The frequency response, as complex numbers.
+
+    See Also
+    --------
+    freqz_zpk
+    sosfreqz
+
+    Notes
+    -----
+    Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable
+    for `plot` produces unexpected results, as this plots the real part of the
+    complex transfer function, not the magnitude.
+    Try ``lambda w, h: plot(w, np.abs(h))``.
+
+    A direct computation via (R)FFT is used to compute the frequency response
+    when the following conditions are met:
+
+    1. An integer value is given for `worN`.
+    2. `worN` is fast to compute via FFT (i.e.,
+       `next_fast_len(worN) ` equals `worN`).
+    3. The denominator coefficients are a single value (``a.shape[0] == 1``).
+    4. `worN` is at least as long as the numerator coefficients
+       (``worN >= b.shape[0]``).
+    5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``.
+
+    For long FIR filters, the FFT approach can have lower error and be much
+    faster than the equivalent direct polynomial calculation.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import numpy as np
+    >>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
+    >>> w, h = signal.freqz(b)
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax1 = plt.subplots()
+    >>> ax1.set_title('Digital filter frequency response')
+
+    >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
+    >>> ax1.set_ylabel('Amplitude [dB]', color='b')
+    >>> ax1.set_xlabel('Frequency [rad/sample]')
+
+    >>> ax2 = ax1.twinx()
+    >>> angles = np.unwrap(np.angle(h))
+    >>> ax2.plot(w, angles, 'g')
+    >>> ax2.set_ylabel('Angle (radians)', color='g')
+    >>> ax2.grid(True)
+    >>> ax2.axis('tight')
+    >>> plt.show()
+
+    Broadcasting Examples
+
+    Suppose we have two FIR filters whose coefficients are stored in the
+    rows of an array with shape (2, 25). For this demonstration, we'll
+    use random data:
+
+    >>> rng = np.random.default_rng()
+    >>> b = rng.random((2, 25))
+
+    To compute the frequency response for these two filters with one call
+    to `freqz`, we must pass in ``b.T``, because `freqz` expects the first
+    axis to hold the coefficients. We must then extend the shape with a
+    trivial dimension of length 1 to allow broadcasting with the array
+    of frequencies.  That is, we pass in ``b.T[..., np.newaxis]``, which has
+    shape (25, 2, 1):
+
+    >>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024)
+    >>> w.shape
+    (1024,)
+    >>> h.shape
+    (2, 1024)
+
+    Now, suppose we have two transfer functions, with the same numerator
+    coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators
+    are stored in the first dimension of the 2-D array  `a`::
+
+        a = [   1      1  ]
+            [ -0.25, -0.5 ]
+
+    >>> b = np.array([0.5, 0.5])
+    >>> a = np.array([[1, 1], [-0.25, -0.5]])
+
+    Only `a` is more than 1-D. To make it compatible for
+    broadcasting with the frequencies, we extend it with a trivial dimension
+    in the call to `freqz`:
+
+    >>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024)
+    >>> w.shape
+    (1024,)
+    >>> h.shape
+    (2, 1024)
+
+    """
+    b = atleast_1d(b)
+    a = atleast_1d(a)
+
+    if worN is None:
+        # For backwards compatibility
+        worN = 512
+
+    h = None
+
+    if _is_int_type(worN):
+        N = operator.index(worN)
+        del worN
+        if N < 0:
+            raise ValueError('worN must be nonnegative, got %s' % (N,))
+        lastpoint = 2 * pi if whole else pi
+        # if include_nyquist is true and whole is false, w should
+        # include end point
+        w = np.linspace(0, lastpoint, N, endpoint=include_nyquist and not whole)
+        if (a.size == 1 and N >= b.shape[0] and
+                sp_fft.next_fast_len(N) == N and
+                (b.ndim == 1 or (b.shape[-1] == 1))):
+            # if N is fast, 2 * N will be fast, too, so no need to check
+            n_fft = N if whole else N * 2
+            if np.isrealobj(b) and np.isrealobj(a):
+                fft_func = sp_fft.rfft
+            else:
+                fft_func = sp_fft.fft
+            h = fft_func(b, n=n_fft, axis=0)[:N]
+            h /= a
+            if fft_func is sp_fft.rfft and whole:
+                # exclude DC and maybe Nyquist (no need to use axis_reverse
+                # here because we can build reversal with the truncation)
+                stop = -1 if n_fft % 2 == 1 else -2
+                h_flip = slice(stop, 0, -1)
+                h = np.concatenate((h, h[h_flip].conj()))
+            if b.ndim > 1:
+                # Last axis of h has length 1, so drop it.
+                h = h[..., 0]
+                # Move the first axis of h to the end.
+                h = np.moveaxis(h, 0, -1)
+    else:
+        w = atleast_1d(worN)
+        del worN
+        w = 2*pi*w/fs
+
+    if h is None:  # still need to compute using freqs w
+        zm1 = exp(-1j * w)
+        h = (npp_polyval(zm1, b, tensor=False) /
+             npp_polyval(zm1, a, tensor=False))
+
+    w = w*fs/(2*pi)
+
+    if plot is not None:
+        plot(w, h)
+
+    return w, h
+
+
+def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi):
+    r"""
+    Compute the frequency response of a digital filter in ZPK form.
+
+    Given the Zeros, Poles and Gain of a digital filter, compute its frequency
+    response:
+
+    :math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
+
+    where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
+    the `poles`.
+
+    Parameters
+    ----------
+    z : array_like
+        Zeroes of a linear filter
+    p : array_like
+        Poles of a linear filter
+    k : scalar
+        Gain of a linear filter
+    worN : {None, int, array_like}, optional
+        If a single integer, then compute at that many frequencies (default is
+        N=512).
+
+        If an array_like, compute the response at the frequencies given.
+        These are in the same units as `fs`.
+    whole : bool, optional
+        Normally, frequencies are computed from 0 to the Nyquist frequency,
+        fs/2 (upper-half of unit-circle). If `whole` is True, compute
+        frequencies from 0 to fs. Ignored if w is array_like.
+    fs : float, optional
+        The sampling frequency of the digital system. Defaults to 2*pi
+        radians/sample (so w is from 0 to pi).
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    w : ndarray
+        The frequencies at which `h` was computed, in the same units as `fs`.
+        By default, `w` is normalized to the range [0, pi) (radians/sample).
+    h : ndarray
+        The frequency response, as complex numbers.
+
+    See Also
+    --------
+    freqs : Compute the frequency response of an analog filter in TF form
+    freqs_zpk : Compute the frequency response of an analog filter in ZPK form
+    freqz : Compute the frequency response of a digital filter in TF form
+
+    Notes
+    -----
+    .. versionadded:: 0.19.0
+
+    Examples
+    --------
+    Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a
+    system with sample rate of 1000 Hz, and plot the frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000)
+    >>> w, h = signal.freqz_zpk(z, p, k, fs=1000)
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> ax1 = fig.add_subplot(1, 1, 1)
+    >>> ax1.set_title('Digital filter frequency response')
+
+    >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
+    >>> ax1.set_ylabel('Amplitude [dB]', color='b')
+    >>> ax1.set_xlabel('Frequency [Hz]')
+    >>> ax1.grid(True)
+
+    >>> ax2 = ax1.twinx()
+    >>> angles = np.unwrap(np.angle(h))
+    >>> ax2.plot(w, angles, 'g')
+    >>> ax2.set_ylabel('Angle [radians]', color='g')
+
+    >>> plt.axis('tight')
+    >>> plt.show()
+
+    """
+    z, p = map(atleast_1d, (z, p))
+
+    if whole:
+        lastpoint = 2 * pi
+    else:
+        lastpoint = pi
+
+    if worN is None:
+        # For backwards compatibility
+        w = numpy.linspace(0, lastpoint, 512, endpoint=False)
+    elif _is_int_type(worN):
+        w = numpy.linspace(0, lastpoint, worN, endpoint=False)
+    else:
+        w = atleast_1d(worN)
+        w = 2*pi*w/fs
+
+    zm1 = exp(1j * w)
+    h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
+
+    w = w*fs/(2*pi)
+
+    return w, h
+
+
+def group_delay(system, w=512, whole=False, fs=2*pi):
+    r"""Compute the group delay of a digital filter.
+
+    The group delay measures by how many samples amplitude envelopes of
+    various spectral components of a signal are delayed by a filter.
+    It is formally defined as the derivative of continuous (unwrapped) phase::
+
+               d        jw
+     D(w) = - -- arg H(e)
+              dw
+
+    Parameters
+    ----------
+    system : tuple of array_like (b, a)
+        Numerator and denominator coefficients of a filter transfer function.
+    w : {None, int, array_like}, optional
+        If a single integer, then compute at that many frequencies (default is
+        N=512).
+
+        If an array_like, compute the delay at the frequencies given. These
+        are in the same units as `fs`.
+    whole : bool, optional
+        Normally, frequencies are computed from 0 to the Nyquist frequency,
+        fs/2 (upper-half of unit-circle). If `whole` is True, compute
+        frequencies from 0 to fs. Ignored if w is array_like.
+    fs : float, optional
+        The sampling frequency of the digital system. Defaults to 2*pi
+        radians/sample (so w is from 0 to pi).
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    w : ndarray
+        The frequencies at which group delay was computed, in the same units
+        as `fs`.  By default, `w` is normalized to the range [0, pi)
+        (radians/sample).
+    gd : ndarray
+        The group delay.
+
+    See Also
+    --------
+    freqz : Frequency response of a digital filter
+
+    Notes
+    -----
+    The similar function in MATLAB is called `grpdelay`.
+
+    If the transfer function :math:`H(z)` has zeros or poles on the unit
+    circle, the group delay at corresponding frequencies is undefined.
+    When such a case arises the warning is raised and the group delay
+    is set to 0 at those frequencies.
+
+    For the details of numerical computation of the group delay refer to [1]_.
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
+           3rd edition", p. 830.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
+    >>> w, gd = signal.group_delay((b, a))
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.title('Digital filter group delay')
+    >>> plt.plot(w, gd)
+    >>> plt.ylabel('Group delay [samples]')
+    >>> plt.xlabel('Frequency [rad/sample]')
+    >>> plt.show()
+
+    """
+    if w is None:
+        # For backwards compatibility
+        w = 512
+
+    if _is_int_type(w):
+        if whole:
+            w = np.linspace(0, 2 * pi, w, endpoint=False)
+        else:
+            w = np.linspace(0, pi, w, endpoint=False)
+    else:
+        w = np.atleast_1d(w)
+        w = 2*pi*w/fs
+
+    b, a = map(np.atleast_1d, system)
+    c = np.convolve(b, a[::-1])
+    cr = c * np.arange(c.size)
+    z = np.exp(-1j * w)
+    num = np.polyval(cr[::-1], z)
+    den = np.polyval(c[::-1], z)
+    gd = np.real(num / den) - a.size + 1
+    singular = ~np.isfinite(gd)
+    near_singular = np.absolute(den) < 10 * EPSILON
+
+    if np.any(singular):
+        gd[singular] = 0
+        warnings.warn(
+            "The group delay is singular at frequencies [{0}], setting to 0".
+            format(", ".join("{0:.3f}".format(ws) for ws in w[singular])),
+            stacklevel=2
+        )
+
+    elif np.any(near_singular):
+        warnings.warn(
+            "The filter's denominator is extremely small at frequencies [{0}], \
+            around which a singularity may be present".
+            format(", ".join("{0:.3f}".format(ws) for ws in w[near_singular])),
+            stacklevel=2
+        )
+
+    w = w*fs/(2*pi)
+
+    return w, gd
+
+
+def _validate_sos(sos):
+    """Helper to validate a SOS input"""
+    sos = np.atleast_2d(sos)
+    if sos.ndim != 2:
+        raise ValueError('sos array must be 2D')
+    n_sections, m = sos.shape
+    if m != 6:
+        raise ValueError('sos array must be shape (n_sections, 6)')
+    if not (sos[:, 3] == 1).all():
+        raise ValueError('sos[:, 3] should be all ones')
+    return sos, n_sections
+
+
+def sosfreqz(sos, worN=512, whole=False, fs=2*pi):
+    r"""
+    Compute the frequency response of a digital filter in SOS format.
+
+    Given `sos`, an array with shape (n, 6) of second order sections of
+    a digital filter, compute the frequency response of the system function::
+
+               B0(z)   B1(z)         B{n-1}(z)
+        H(z) = ----- * ----- * ... * ---------
+               A0(z)   A1(z)         A{n-1}(z)
+
+    for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
+    denominator of the transfer function of the k-th second order section.
+
+    Parameters
+    ----------
+    sos : array_like
+        Array of second-order filter coefficients, must have shape
+        ``(n_sections, 6)``. Each row corresponds to a second-order
+        section, with the first three columns providing the numerator
+        coefficients and the last three providing the denominator
+        coefficients.
+    worN : {None, int, array_like}, optional
+        If a single integer, then compute at that many frequencies (default is
+        N=512).  Using a number that is fast for FFT computations can result
+        in faster computations (see Notes of `freqz`).
+
+        If an array_like, compute the response at the frequencies given (must
+        be 1-D). These are in the same units as `fs`.
+    whole : bool, optional
+        Normally, frequencies are computed from 0 to the Nyquist frequency,
+        fs/2 (upper-half of unit-circle). If `whole` is True, compute
+        frequencies from 0 to fs.
+    fs : float, optional
+        The sampling frequency of the digital system. Defaults to 2*pi
+        radians/sample (so w is from 0 to pi).
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    w : ndarray
+        The frequencies at which `h` was computed, in the same units as `fs`.
+        By default, `w` is normalized to the range [0, pi) (radians/sample).
+    h : ndarray
+        The frequency response, as complex numbers.
+
+    See Also
+    --------
+    freqz, sosfilt
+
+    Notes
+    -----
+    .. versionadded:: 0.19.0
+
+    Examples
+    --------
+    Design a 15th-order bandpass filter in SOS format.
+
+    >>> from scipy import signal
+    >>> import numpy as np
+    >>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
+    ...                    output='sos')
+
+    Compute the frequency response at 1500 points from DC to Nyquist.
+
+    >>> w, h = signal.sosfreqz(sos, worN=1500)
+
+    Plot the response.
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.subplot(2, 1, 1)
+    >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
+    >>> plt.plot(w/np.pi, db)
+    >>> plt.ylim(-75, 5)
+    >>> plt.grid(True)
+    >>> plt.yticks([0, -20, -40, -60])
+    >>> plt.ylabel('Gain [dB]')
+    >>> plt.title('Frequency Response')
+    >>> plt.subplot(2, 1, 2)
+    >>> plt.plot(w/np.pi, np.angle(h))
+    >>> plt.grid(True)
+    >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
+    ...            [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
+    >>> plt.ylabel('Phase [rad]')
+    >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
+    >>> plt.show()
+
+    If the same filter is implemented as a single transfer function,
+    numerical error corrupts the frequency response:
+
+    >>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
+    ...                    output='ba')
+    >>> w, h = signal.freqz(b, a, worN=1500)
+    >>> plt.subplot(2, 1, 1)
+    >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5))
+    >>> plt.plot(w/np.pi, db)
+    >>> plt.ylim(-75, 5)
+    >>> plt.grid(True)
+    >>> plt.yticks([0, -20, -40, -60])
+    >>> plt.ylabel('Gain [dB]')
+    >>> plt.title('Frequency Response')
+    >>> plt.subplot(2, 1, 2)
+    >>> plt.plot(w/np.pi, np.angle(h))
+    >>> plt.grid(True)
+    >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
+    ...            [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$'])
+    >>> plt.ylabel('Phase [rad]')
+    >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
+    >>> plt.show()
+
+    """
+
+    sos, n_sections = _validate_sos(sos)
+    if n_sections == 0:
+        raise ValueError('Cannot compute frequencies with no sections')
+    h = 1.
+    for row in sos:
+        w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs)
+        h *= rowh
+    return w, h
+
+
+def _cplxreal(z, tol=None):
+    """
+    Split into complex and real parts, combining conjugate pairs.
+
+    The 1-D input vector `z` is split up into its complex (`zc`) and real (`zr`)
+    elements. Every complex element must be part of a complex-conjugate pair,
+    which are combined into a single number (with positive imaginary part) in
+    the output. Two complex numbers are considered a conjugate pair if their
+    real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
+
+    Parameters
+    ----------
+    z : array_like
+        Vector of complex numbers to be sorted and split
+    tol : float, optional
+        Relative tolerance for testing realness and conjugate equality.
+        Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
+        float64)
+
+    Returns
+    -------
+    zc : ndarray
+        Complex elements of `z`, with each pair represented by a single value
+        having positive imaginary part, sorted first by real part, and then
+        by magnitude of imaginary part. The pairs are averaged when combined
+        to reduce error.
+    zr : ndarray
+        Real elements of `z` (those having imaginary part less than
+        `tol` times their magnitude), sorted by value.
+
+    Raises
+    ------
+    ValueError
+        If there are any complex numbers in `z` for which a conjugate
+        cannot be found.
+
+    See Also
+    --------
+    _cplxpair
+
+    Examples
+    --------
+    >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
+    >>> zc, zr = _cplxreal(a)
+    >>> print(zc)
+    [ 1.+1.j  2.+1.j  2.+1.j  2.+2.j]
+    >>> print(zr)
+    [ 1.  3.  4.]
+    """
+
+    z = atleast_1d(z)
+    if z.size == 0:
+        return z, z
+    elif z.ndim != 1:
+        raise ValueError('_cplxreal only accepts 1-D input')
+
+    if tol is None:
+        # Get tolerance from dtype of input
+        tol = 100 * np.finfo((1.0 * z).dtype).eps
+
+    # Sort by real part, magnitude of imaginary part (speed up further sorting)
+    z = z[np.lexsort((abs(z.imag), z.real))]
+
+    # Split reals from conjugate pairs
+    real_indices = abs(z.imag) <= tol * abs(z)
+    zr = z[real_indices].real
+
+    if len(zr) == len(z):
+        # Input is entirely real
+        return array([]), zr
+
+    # Split positive and negative halves of conjugates
+    z = z[~real_indices]
+    zp = z[z.imag > 0]
+    zn = z[z.imag < 0]
+
+    if len(zp) != len(zn):
+        raise ValueError('Array contains complex value with no matching '
+                         'conjugate.')
+
+    # Find runs of (approximately) the same real part
+    same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
+    diffs = numpy.diff(concatenate(([0], same_real, [0])))
+    run_starts = numpy.nonzero(diffs > 0)[0]
+    run_stops = numpy.nonzero(diffs < 0)[0]
+
+    # Sort each run by their imaginary parts
+    for i in range(len(run_starts)):
+        start = run_starts[i]
+        stop = run_stops[i] + 1
+        for chunk in (zp[start:stop], zn[start:stop]):
+            chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
+
+    # Check that negatives match positives
+    if any(abs(zp - zn.conj()) > tol * abs(zn)):
+        raise ValueError('Array contains complex value with no matching '
+                         'conjugate.')
+
+    # Average out numerical inaccuracy in real vs imag parts of pairs
+    zc = (zp + zn.conj()) / 2
+
+    return zc, zr
+
+
+def _cplxpair(z, tol=None):
+    """
+    Sort into pairs of complex conjugates.
+
+    Complex conjugates in `z` are sorted by increasing real part. In each
+    pair, the number with negative imaginary part appears first.
+
+    If pairs have identical real parts, they are sorted by increasing
+    imaginary magnitude.
+
+    Two complex numbers are considered a conjugate pair if their real and
+    imaginary parts differ in magnitude by less than ``tol * abs(z)``.  The
+    pairs are forced to be exact complex conjugates by averaging the positive
+    and negative values.
+
+    Purely real numbers are also sorted, but placed after the complex
+    conjugate pairs. A number is considered real if its imaginary part is
+    smaller than `tol` times the magnitude of the number.
+
+    Parameters
+    ----------
+    z : array_like
+        1-D input array to be sorted.
+    tol : float, optional
+        Relative tolerance for testing realness and conjugate equality.
+        Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for
+        float64)
+
+    Returns
+    -------
+    y : ndarray
+        Complex conjugate pairs followed by real numbers.
+
+    Raises
+    ------
+    ValueError
+        If there are any complex numbers in `z` for which a conjugate
+        cannot be found.
+
+    See Also
+    --------
+    _cplxreal
+
+    Examples
+    --------
+    >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
+    >>> z = _cplxpair(a)
+    >>> print(z)
+    [ 1.-1.j  1.+1.j  2.-1.j  2.+1.j  2.-1.j  2.+1.j  2.-2.j  2.+2.j  1.+0.j
+      3.+0.j  4.+0.j]
+    """
+
+    z = atleast_1d(z)
+    if z.size == 0 or np.isrealobj(z):
+        return np.sort(z)
+
+    if z.ndim != 1:
+        raise ValueError('z must be 1-D')
+
+    zc, zr = _cplxreal(z, tol)
+
+    # Interleave complex values and their conjugates, with negative imaginary
+    # parts first in each pair
+    zc = np.dstack((zc.conj(), zc)).flatten()
+    z = np.append(zc, zr)
+    return z
+
+
+def tf2zpk(b, a):
+    r"""Return zero, pole, gain (z, p, k) representation from a numerator,
+    denominator representation of a linear filter.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transfer function.
+    p : ndarray
+        Poles of the transfer function.
+    k : float
+        System gain.
+
+    Notes
+    -----
+    If some values of `b` are too close to 0, they are removed. In that case,
+    a BadCoefficients warning is emitted.
+
+    The `b` and `a` arrays are interpreted as coefficients for positive,
+    descending powers of the transfer function variable. So the inputs
+    :math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
+    can represent an analog filter of the form:
+
+    .. math::
+
+        H(s) = \frac
+        {b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
+        {a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
+
+    or a discrete-time filter of the form:
+
+    .. math::
+
+        H(z) = \frac
+        {b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
+        {a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
+
+    This "positive powers" form is found more commonly in controls
+    engineering. If `M` and `N` are equal (which is true for all filters
+    generated by the bilinear transform), then this happens to be equivalent
+    to the "negative powers" discrete-time form preferred in DSP:
+
+    .. math::
+
+        H(z) = \frac
+        {b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
+        {a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
+
+    Although this is true for common filters, remember that this is not true
+    in the general case. If `M` and `N` are not equal, the discrete-time
+    transfer function coefficients must first be converted to the "positive
+    powers" form before finding the poles and zeros.
+
+    """
+    b, a = normalize(b, a)
+    b = (b + 0.0) / a[0]
+    a = (a + 0.0) / a[0]
+    k = b[0]
+    b /= b[0]
+    z = roots(b)
+    p = roots(a)
+    return z, p, k
+
+
+def zpk2tf(z, p, k):
+    """
+    Return polynomial transfer function representation from zeros and poles
+
+    Parameters
+    ----------
+    z : array_like
+        Zeros of the transfer function.
+    p : array_like
+        Poles of the transfer function.
+    k : float
+        System gain.
+
+    Returns
+    -------
+    b : ndarray
+        Numerator polynomial coefficients.
+    a : ndarray
+        Denominator polynomial coefficients.
+
+    """
+    z = atleast_1d(z)
+    k = atleast_1d(k)
+    if len(z.shape) > 1:
+        temp = poly(z[0])
+        b = np.empty((z.shape[0], z.shape[1] + 1), temp.dtype.char)
+        if len(k) == 1:
+            k = [k[0]] * z.shape[0]
+        for i in range(z.shape[0]):
+            b[i] = k[i] * poly(z[i])
+    else:
+        b = k * poly(z)
+    a = atleast_1d(poly(p))
+
+    # Use real output if possible. Copied from numpy.poly, since
+    # we can't depend on a specific version of numpy.
+    if issubclass(b.dtype.type, numpy.complexfloating):
+        # if complex roots are all complex conjugates, the roots are real.
+        roots = numpy.asarray(z, complex)
+        pos_roots = numpy.compress(roots.imag > 0, roots)
+        neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
+        if len(pos_roots) == len(neg_roots):
+            if numpy.all(numpy.sort_complex(neg_roots) ==
+                         numpy.sort_complex(pos_roots)):
+                b = b.real.copy()
+
+    if issubclass(a.dtype.type, numpy.complexfloating):
+        # if complex roots are all complex conjugates, the roots are real.
+        roots = numpy.asarray(p, complex)
+        pos_roots = numpy.compress(roots.imag > 0, roots)
+        neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
+        if len(pos_roots) == len(neg_roots):
+            if numpy.all(numpy.sort_complex(neg_roots) ==
+                         numpy.sort_complex(pos_roots)):
+                a = a.real.copy()
+
+    return b, a
+
+
+def tf2sos(b, a, pairing=None, *, analog=False):
+    """
+    Return second-order sections from transfer function representation
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+    pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
+        The method to use to combine pairs of poles and zeros into sections.
+        See `zpk2sos` for information and restrictions on `pairing` and
+        `analog` arguments.
+    analog : bool, optional
+        If True, system is analog, otherwise discrete.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    sos : ndarray
+        Array of second-order filter coefficients, with shape
+        ``(n_sections, 6)``. See `sosfilt` for the SOS filter format
+        specification.
+
+    See Also
+    --------
+    zpk2sos, sosfilt
+
+    Notes
+    -----
+    It is generally discouraged to convert from TF to SOS format, since doing
+    so usually will not improve numerical precision errors. Instead, consider
+    designing filters in ZPK format and converting directly to SOS. TF is
+    converted to SOS by first converting to ZPK format, then converting
+    ZPK to SOS.
+
+    .. versionadded:: 0.16.0
+    """
+    return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog)
+
+
+def sos2tf(sos):
+    """
+    Return a single transfer function from a series of second-order sections
+
+    Parameters
+    ----------
+    sos : array_like
+        Array of second-order filter coefficients, must have shape
+        ``(n_sections, 6)``. See `sosfilt` for the SOS filter format
+        specification.
+
+    Returns
+    -------
+    b : ndarray
+        Numerator polynomial coefficients.
+    a : ndarray
+        Denominator polynomial coefficients.
+
+    Notes
+    -----
+    .. versionadded:: 0.16.0
+    """
+    sos = np.asarray(sos)
+    result_type = sos.dtype
+    if result_type.kind in 'bui':
+        result_type = np.float64
+
+    b = np.array([1], dtype=result_type)
+    a = np.array([1], dtype=result_type)
+    n_sections = sos.shape[0]
+    for section in range(n_sections):
+        b = np.polymul(b, sos[section, :3])
+        a = np.polymul(a, sos[section, 3:])
+    return b, a
+
+
+def sos2zpk(sos):
+    """
+    Return zeros, poles, and gain of a series of second-order sections
+
+    Parameters
+    ----------
+    sos : array_like
+        Array of second-order filter coefficients, must have shape
+        ``(n_sections, 6)``. See `sosfilt` for the SOS filter format
+        specification.
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transfer function.
+    p : ndarray
+        Poles of the transfer function.
+    k : float
+        System gain.
+
+    Notes
+    -----
+    The number of zeros and poles returned will be ``n_sections * 2``
+    even if some of these are (effectively) zero.
+
+    .. versionadded:: 0.16.0
+    """
+    sos = np.asarray(sos)
+    n_sections = sos.shape[0]
+    z = np.zeros(n_sections*2, np.complex128)
+    p = np.zeros(n_sections*2, np.complex128)
+    k = 1.
+    for section in range(n_sections):
+        zpk = tf2zpk(sos[section, :3], sos[section, 3:])
+        z[2*section:2*section+len(zpk[0])] = zpk[0]
+        p[2*section:2*section+len(zpk[1])] = zpk[1]
+        k *= zpk[2]
+    return z, p, k
+
+
+def _nearest_real_complex_idx(fro, to, which):
+    """Get the next closest real or complex element based on distance"""
+    assert which in ('real', 'complex', 'any')
+    order = np.argsort(np.abs(fro - to))
+    if which == 'any':
+        return order[0]
+    else:
+        mask = np.isreal(fro[order])
+        if which == 'complex':
+            mask = ~mask
+        return order[np.nonzero(mask)[0][0]]
+
+
+def _single_zpksos(z, p, k):
+    """Create one second-order section from up to two zeros and poles"""
+    sos = np.zeros(6)
+    b, a = zpk2tf(z, p, k)
+    sos[3-len(b):3] = b
+    sos[6-len(a):6] = a
+    return sos
+
+
+def zpk2sos(z, p, k, pairing=None, *, analog=False):
+    """Return second-order sections from zeros, poles, and gain of a system
+
+    Parameters
+    ----------
+    z : array_like
+        Zeros of the transfer function.
+    p : array_like
+        Poles of the transfer function.
+    k : float
+        System gain.
+    pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional
+        The method to use to combine pairs of poles and zeros into sections.
+        If analog is False and pairing is None, pairing is set to 'nearest';
+        if analog is True, pairing must be 'minimal', and is set to that if
+        it is None.
+    analog : bool, optional
+        If True, system is analog, otherwise discrete.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    sos : ndarray
+        Array of second-order filter coefficients, with shape
+        ``(n_sections, 6)``. See `sosfilt` for the SOS filter format
+        specification.
+
+    See Also
+    --------
+    sosfilt
+
+    Notes
+    -----
+    The algorithm used to convert ZPK to SOS format is designed to
+    minimize errors due to numerical precision issues. The pairing
+    algorithm attempts to minimize the peak gain of each biquadratic
+    section. This is done by pairing poles with the nearest zeros, starting
+    with the poles closest to the unit circle for discrete-time systems, and
+    poles closest to the imaginary axis for continuous-time systems.
+
+    ``pairing='minimal'`` outputs may not be suitable for `sosfilt`,
+    and ``analog=True`` outputs will never be suitable for `sosfilt`.
+
+    *Algorithms*
+
+    The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``,
+    and ``pairing='minimal'`` algorithms are mostly shared. The
+    ``'nearest'`` algorithm attempts to minimize the peak gain, while
+    ``'keep_odd'`` minimizes peak gain under the constraint that
+    odd-order systems should retain one section as first order.
+    ``'minimal'`` is similar to ``'keep_odd'``, but no additional
+    poles or zeros are introduced
+
+    The algorithm steps are as follows:
+
+    As a pre-processing step for ``pairing='nearest'``,
+    ``pairing='keep_odd'``, add poles or zeros to the origin as
+    necessary to obtain the same number of poles and zeros for
+    pairing.  If ``pairing == 'nearest'`` and there are an odd number
+    of poles, add an additional pole and a zero at the origin.
+
+    The following steps are then iterated over until no more poles or
+    zeros remain:
+
+    1. Take the (next remaining) pole (complex or real) closest to the
+       unit circle (or imaginary axis, for ``analog=True``) to
+       begin a new filter section.
+
+    2. If the pole is real and there are no other remaining real poles [#]_,
+       add the closest real zero to the section and leave it as a first
+       order section. Note that after this step we are guaranteed to be
+       left with an even number of real poles, complex poles, real zeros,
+       and complex zeros for subsequent pairing iterations.
+
+    3. Else:
+
+        1. If the pole is complex and the zero is the only remaining real
+           zero*, then pair the pole with the *next* closest zero
+           (guaranteed to be complex). This is necessary to ensure that
+           there will be a real zero remaining to eventually create a
+           first-order section (thus keeping the odd order).
+
+        2. Else pair the pole with the closest remaining zero (complex or
+           real).
+
+        3. Proceed to complete the second-order section by adding another
+           pole and zero to the current pole and zero in the section:
+
+            1. If the current pole and zero are both complex, add their
+               conjugates.
+
+            2. Else if the pole is complex and the zero is real, add the
+               conjugate pole and the next closest real zero.
+
+            3. Else if the pole is real and the zero is complex, add the
+               conjugate zero and the real pole closest to those zeros.
+
+            4. Else (we must have a real pole and real zero) add the next
+               real pole closest to the unit circle, and then add the real
+               zero closest to that pole.
+
+    .. [#] This conditional can only be met for specific odd-order inputs
+           with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods.
+
+    .. versionadded:: 0.16.0
+
+    Examples
+    --------
+
+    Design a 6th order low-pass elliptic digital filter for a system with a
+    sampling rate of 8000 Hz that has a pass-band corner frequency of
+    1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
+    the attenuation in the stop-band should be at least 90 dB.
+
+    In the following call to `ellip`, we could use ``output='sos'``,
+    but for this example, we'll use ``output='zpk'``, and then convert
+    to SOS format with `zpk2sos`:
+
+    >>> from scipy import signal
+    >>> import numpy as np
+    >>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
+
+    Now convert to SOS format.
+
+    >>> sos = signal.zpk2sos(z, p, k)
+
+    The coefficients of the numerators of the sections:
+
+    >>> sos[:, :3]
+    array([[0.0014152 , 0.00248677, 0.0014152 ],
+           [1.        , 0.72976874, 1.        ],
+           [1.        , 0.17607852, 1.        ]])
+
+    The symmetry in the coefficients occurs because all the zeros are on the
+    unit circle.
+
+    The coefficients of the denominators of the sections:
+
+    >>> sos[:, 3:]
+    array([[ 1.        , -1.32544025,  0.46989976],
+           [ 1.        , -1.26118294,  0.62625924],
+           [ 1.        , -1.2570723 ,  0.8619958 ]])
+
+    The next example shows the effect of the `pairing` option.  We have a
+    system with three poles and three zeros, so the SOS array will have
+    shape (2, 6). The means there is, in effect, an extra pole and an extra
+    zero at the origin in the SOS representation.
+
+    >>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
+    >>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
+
+    With ``pairing='nearest'`` (the default), we obtain
+
+    >>> signal.zpk2sos(z1, p1, 1)
+    array([[ 1.  ,  1.  ,  0.5 ,  1.  , -0.75,  0.  ],
+           [ 1.  ,  1.  ,  0.  ,  1.  , -1.6 ,  0.65]])
+
+    The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
+    {0, 0.75}, and the second section has the zeros {-1, 0} and poles
+    {0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
+    have been assigned to different sections.
+
+    With ``pairing='keep_odd'``, we obtain:
+
+    >>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
+    array([[ 1.  ,  1.  ,  0.  ,  1.  , -0.75,  0.  ],
+           [ 1.  ,  1.  ,  0.5 ,  1.  , -1.6 ,  0.65]])
+
+    The extra pole and zero at the origin are in the same section.
+    The first section is, in effect, a first-order section.
+
+    With ``pairing='minimal'``, the first-order section doesn't have
+    the extra pole and zero at the origin:
+
+    >>> signal.zpk2sos(z1, p1, 1, pairing='minimal')
+    array([[ 0.  ,  1.  ,  1.  ,  0.  ,  1.  , -0.75],
+           [ 1.  ,  1.  ,  0.5 ,  1.  , -1.6 ,  0.65]])
+
+    """
+    # TODO in the near future:
+    # 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
+    # 2. Make `decimate` use `sosfilt` instead of `lfilter`.
+    # 3. Make sosfilt automatically simplify sections to first order
+    #    when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
+    # 4. Further optimizations of the section ordering / pole-zero pairing.
+    # See the wiki for other potential issues.
+
+    if pairing is None:
+        pairing = 'minimal' if analog else 'nearest'
+
+    valid_pairings = ['nearest', 'keep_odd', 'minimal']
+    if pairing not in valid_pairings:
+        raise ValueError('pairing must be one of %s, not %s'
+                         % (valid_pairings, pairing))
+
+    if analog and pairing != 'minimal':
+        raise ValueError('for analog zpk2sos conversion, '
+                         'pairing must be "minimal"')
+
+    if len(z) == len(p) == 0:
+        if not analog:
+            return np.array([[k, 0., 0., 1., 0., 0.]])
+        else:
+            return np.array([[0., 0., k, 0., 0., 1.]])
+
+    if pairing != 'minimal':
+        # ensure we have the same number of poles and zeros, and make copies
+        p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
+        z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
+        n_sections = (max(len(p), len(z)) + 1) // 2
+
+        if len(p) % 2 == 1 and pairing == 'nearest':
+            p = np.concatenate((p, [0.]))
+            z = np.concatenate((z, [0.]))
+        assert len(p) == len(z)
+    else:
+        if len(p) < len(z):
+            raise ValueError('for analog zpk2sos conversion, '
+                             'must have len(p)>=len(z)')
+
+        n_sections = (len(p) + 1) // 2
+
+    # Ensure we have complex conjugate pairs
+    # (note that _cplxreal only gives us one element of each complex pair):
+    z = np.concatenate(_cplxreal(z))
+    p = np.concatenate(_cplxreal(p))
+    if not np.isreal(k):
+        raise ValueError('k must be real')
+    k = k.real
+
+    if not analog:
+        # digital: "worst" is the closest to the unit circle
+        def idx_worst(p):
+            return np.argmin(np.abs(1 - np.abs(p)))
+    else:
+        # analog: "worst" is the closest to the imaginary axis
+        def idx_worst(p):
+            return np.argmin(np.abs(np.real(p)))
+
+    sos = np.zeros((n_sections, 6))
+
+    # Construct the system, reversing order so the "worst" are last
+    for si in range(n_sections-1, -1, -1):
+        # Select the next "worst" pole
+        p1_idx = idx_worst(p)
+        p1 = p[p1_idx]
+        p = np.delete(p, p1_idx)
+
+        # Pair that pole with a zero
+
+        if np.isreal(p1) and np.isreal(p).sum() == 0:
+            # Special case (1): last remaining real pole
+            if pairing != 'minimal':
+                z1_idx = _nearest_real_complex_idx(z, p1, 'real')
+                z1 = z[z1_idx]
+                z = np.delete(z, z1_idx)
+                sos[si] = _single_zpksos([z1, 0], [p1, 0], 1)
+            elif len(z) > 0:
+                z1_idx = _nearest_real_complex_idx(z, p1, 'real')
+                z1 = z[z1_idx]
+                z = np.delete(z, z1_idx)
+                sos[si] = _single_zpksos([z1], [p1], 1)
+            else:
+                sos[si] = _single_zpksos([], [p1], 1)
+
+        elif (len(p) + 1 == len(z)
+              and not np.isreal(p1)
+              and np.isreal(p).sum() == 1
+              and np.isreal(z).sum() == 1):
+
+            # Special case (2): there's one real pole and one real zero
+            # left, and an equal number of poles and zeros to pair up.
+            # We *must* pair with a complex zero
+
+            z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
+            z1 = z[z1_idx]
+            z = np.delete(z, z1_idx)
+            sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1)
+
+        else:
+            if np.isreal(p1):
+                prealidx = np.flatnonzero(np.isreal(p))
+                p2_idx = prealidx[idx_worst(p[prealidx])]
+                p2 = p[p2_idx]
+                p = np.delete(p, p2_idx)
+            else:
+                p2 = p1.conj()
+
+            # find closest zero
+            if len(z) > 0:
+                z1_idx = _nearest_real_complex_idx(z, p1, 'any')
+                z1 = z[z1_idx]
+                z = np.delete(z, z1_idx)
+
+                if not np.isreal(z1):
+                    sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1)
+                else:
+                    if len(z) > 0:
+                        z2_idx = _nearest_real_complex_idx(z, p1, 'real')
+                        z2 = z[z2_idx]
+                        assert np.isreal(z2)
+                        z = np.delete(z, z2_idx)
+                        sos[si] = _single_zpksos([z1, z2], [p1, p2], 1)
+                    else:
+                        sos[si] = _single_zpksos([z1], [p1, p2], 1)
+            else:
+                # no more zeros
+                sos[si] = _single_zpksos([], [p1, p2], 1)
+
+    assert len(p) == len(z) == 0  # we've consumed all poles and zeros
+    del p, z
+
+    # put gain in first sos
+    sos[0][:3] *= k
+    return sos
+
+
+def _align_nums(nums):
+    """Aligns the shapes of multiple numerators.
+
+    Given an array of numerator coefficient arrays [[a_1, a_2,...,
+    a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
+    arrays with zero's so that all numerators have the same length. Such
+    alignment is necessary for functions like 'tf2ss', which needs the
+    alignment when dealing with SIMO transfer functions.
+
+    Parameters
+    ----------
+    nums: array_like
+        Numerator or list of numerators. Not necessarily with same length.
+
+    Returns
+    -------
+    nums: array
+        The numerator. If `nums` input was a list of numerators then a 2-D
+        array with padded zeros for shorter numerators is returned. Otherwise
+        returns ``np.asarray(nums)``.
+    """
+    try:
+        # The statement can throw a ValueError if one
+        # of the numerators is a single digit and another
+        # is array-like e.g. if nums = [5, [1, 2, 3]]
+        nums = asarray(nums)
+
+        if not np.issubdtype(nums.dtype, np.number):
+            raise ValueError("dtype of numerator is non-numeric")
+
+        return nums
+
+    except ValueError:
+        nums = [np.atleast_1d(num) for num in nums]
+        max_width = max(num.size for num in nums)
+
+        # pre-allocate
+        aligned_nums = np.zeros((len(nums), max_width))
+
+        # Create numerators with padded zeros
+        for index, num in enumerate(nums):
+            aligned_nums[index, -num.size:] = num
+
+        return aligned_nums
+
+
+def normalize(b, a):
+    """Normalize numerator/denominator of a continuous-time transfer function.
+
+    If values of `b` are too close to 0, they are removed. In that case, a
+    BadCoefficients warning is emitted.
+
+    Parameters
+    ----------
+    b: array_like
+        Numerator of the transfer function. Can be a 2-D array to normalize
+        multiple transfer functions.
+    a: array_like
+        Denominator of the transfer function. At most 1-D.
+
+    Returns
+    -------
+    num: array
+        The numerator of the normalized transfer function. At least a 1-D
+        array. A 2-D array if the input `num` is a 2-D array.
+    den: 1-D array
+        The denominator of the normalized transfer function.
+
+    Notes
+    -----
+    Coefficients for both the numerator and denominator should be specified in
+    descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
+    ``[1, 3, 5]``).
+
+    Examples
+    --------
+    >>> from scipy.signal import normalize
+
+    Normalize the coefficients of the transfer function
+    ``(3*s^2 - 2*s + 5) / (2*s^2 + 3*s + 1)``:
+
+    >>> b = [3, -2, 5]
+    >>> a = [2, 3, 1]
+    >>> normalize(b, a)
+    (array([ 1.5, -1. ,  2.5]), array([1. , 1.5, 0.5]))
+
+    A warning is generated if, for example, the first coefficient of
+    `b` is 0.  In the following example, the result is as expected:
+
+    >>> import warnings
+    >>> with warnings.catch_warnings(record=True) as w:
+    ...     num, den = normalize([0, 3, 6], [2, -5, 4])
+
+    >>> num
+    array([1.5, 3. ])
+    >>> den
+    array([ 1. , -2.5,  2. ])
+
+    >>> print(w[0].message)
+    Badly conditioned filter coefficients (numerator): the results may be meaningless
+
+    """
+    num, den = b, a
+
+    den = np.atleast_1d(den)
+    num = np.atleast_2d(_align_nums(num))
+
+    if den.ndim != 1:
+        raise ValueError("Denominator polynomial must be rank-1 array.")
+    if num.ndim > 2:
+        raise ValueError("Numerator polynomial must be rank-1 or"
+                         " rank-2 array.")
+    if np.all(den == 0):
+        raise ValueError("Denominator must have at least on nonzero element.")
+
+    # Trim leading zeros in denominator, leave at least one.
+    den = np.trim_zeros(den, 'f')
+
+    # Normalize transfer function
+    num, den = num / den[0], den / den[0]
+
+    # Count numerator columns that are all zero
+    leading_zeros = 0
+    for col in num.T:
+        if np.allclose(col, 0, atol=1e-14):
+            leading_zeros += 1
+        else:
+            break
+
+    # Trim leading zeros of numerator
+    if leading_zeros > 0:
+        warnings.warn("Badly conditioned filter coefficients (numerator): the "
+                      "results may be meaningless", BadCoefficients)
+        # Make sure at least one column remains
+        if leading_zeros == num.shape[1]:
+            leading_zeros -= 1
+        num = num[:, leading_zeros:]
+
+    # Squeeze first dimension if singular
+    if num.shape[0] == 1:
+        num = num[0, :]
+
+    return num, den
+
+
+def lp2lp(b, a, wo=1.0):
+    r"""
+    Transform a lowpass filter prototype to a different frequency.
+
+    Return an analog low-pass filter with cutoff frequency `wo`
+    from an analog low-pass filter prototype with unity cutoff frequency, in
+    transfer function ('ba') representation.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+    wo : float
+        Desired cutoff, as angular frequency (e.g. rad/s).
+        Defaults to no change.
+
+    Returns
+    -------
+    b : array_like
+        Numerator polynomial coefficients of the transformed low-pass filter.
+    a : array_like
+        Denominator polynomial coefficients of the transformed low-pass filter.
+
+    See Also
+    --------
+    lp2hp, lp2bp, lp2bs, bilinear
+    lp2lp_zpk
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{s}{\omega_0}
+
+    Examples
+    --------
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> lp = signal.lti([1.0], [1.0, 1.0])
+    >>> lp2 = signal.lti(*signal.lp2lp(lp.num, lp.den, 2))
+    >>> w, mag_lp, p_lp = lp.bode()
+    >>> w, mag_lp2, p_lp2 = lp2.bode(w)
+
+    >>> plt.plot(w, mag_lp, label='Lowpass')
+    >>> plt.plot(w, mag_lp2, label='Transformed Lowpass')
+    >>> plt.semilogx()
+    >>> plt.grid(True)
+    >>> plt.xlabel('Frequency [rad/s]')
+    >>> plt.ylabel('Magnitude [dB]')
+    >>> plt.legend()
+
+    """
+    a, b = map(atleast_1d, (a, b))
+    try:
+        wo = float(wo)
+    except TypeError:
+        wo = float(wo[0])
+    d = len(a)
+    n = len(b)
+    M = max((d, n))
+    pwo = pow(wo, numpy.arange(M - 1, -1, -1))
+    start1 = max((n - d, 0))
+    start2 = max((d - n, 0))
+    b = b * pwo[start1] / pwo[start2:]
+    a = a * pwo[start1] / pwo[start1:]
+    return normalize(b, a)
+
+
+def lp2hp(b, a, wo=1.0):
+    r"""
+    Transform a lowpass filter prototype to a highpass filter.
+
+    Return an analog high-pass filter with cutoff frequency `wo`
+    from an analog low-pass filter prototype with unity cutoff frequency, in
+    transfer function ('ba') representation.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+    wo : float
+        Desired cutoff, as angular frequency (e.g., rad/s).
+        Defaults to no change.
+
+    Returns
+    -------
+    b : array_like
+        Numerator polynomial coefficients of the transformed high-pass filter.
+    a : array_like
+        Denominator polynomial coefficients of the transformed high-pass filter.
+
+    See Also
+    --------
+    lp2lp, lp2bp, lp2bs, bilinear
+    lp2hp_zpk
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{\omega_0}{s}
+
+    This maintains symmetry of the lowpass and highpass responses on a
+    logarithmic scale.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> lp = signal.lti([1.0], [1.0, 1.0])
+    >>> hp = signal.lti(*signal.lp2hp(lp.num, lp.den))
+    >>> w, mag_lp, p_lp = lp.bode()
+    >>> w, mag_hp, p_hp = hp.bode(w)
+
+    >>> plt.plot(w, mag_lp, label='Lowpass')
+    >>> plt.plot(w, mag_hp, label='Highpass')
+    >>> plt.semilogx()
+    >>> plt.grid(True)
+    >>> plt.xlabel('Frequency [rad/s]')
+    >>> plt.ylabel('Magnitude [dB]')
+    >>> plt.legend()
+
+    """
+    a, b = map(atleast_1d, (a, b))
+    try:
+        wo = float(wo)
+    except TypeError:
+        wo = float(wo[0])
+    d = len(a)
+    n = len(b)
+    if wo != 1:
+        pwo = pow(wo, numpy.arange(max((d, n))))
+    else:
+        pwo = numpy.ones(max((d, n)), b.dtype.char)
+    if d >= n:
+        outa = a[::-1] * pwo
+        outb = resize(b, (d,))
+        outb[n:] = 0.0
+        outb[:n] = b[::-1] * pwo[:n]
+    else:
+        outb = b[::-1] * pwo
+        outa = resize(a, (n,))
+        outa[d:] = 0.0
+        outa[:d] = a[::-1] * pwo[:d]
+
+    return normalize(outb, outa)
+
+
+def lp2bp(b, a, wo=1.0, bw=1.0):
+    r"""
+    Transform a lowpass filter prototype to a bandpass filter.
+
+    Return an analog band-pass filter with center frequency `wo` and
+    bandwidth `bw` from an analog low-pass filter prototype with unity
+    cutoff frequency, in transfer function ('ba') representation.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+    wo : float
+        Desired passband center, as angular frequency (e.g., rad/s).
+        Defaults to no change.
+    bw : float
+        Desired passband width, as angular frequency (e.g., rad/s).
+        Defaults to 1.
+
+    Returns
+    -------
+    b : array_like
+        Numerator polynomial coefficients of the transformed band-pass filter.
+    a : array_like
+        Denominator polynomial coefficients of the transformed band-pass filter.
+
+    See Also
+    --------
+    lp2lp, lp2hp, lp2bs, bilinear
+    lp2bp_zpk
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
+
+    This is the "wideband" transformation, producing a passband with
+    geometric (log frequency) symmetry about `wo`.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> lp = signal.lti([1.0], [1.0, 1.0])
+    >>> bp = signal.lti(*signal.lp2bp(lp.num, lp.den))
+    >>> w, mag_lp, p_lp = lp.bode()
+    >>> w, mag_bp, p_bp = bp.bode(w)
+
+    >>> plt.plot(w, mag_lp, label='Lowpass')
+    >>> plt.plot(w, mag_bp, label='Bandpass')
+    >>> plt.semilogx()
+    >>> plt.grid(True)
+    >>> plt.xlabel('Frequency [rad/s]')
+    >>> plt.ylabel('Magnitude [dB]')
+    >>> plt.legend()
+    """
+
+    a, b = map(atleast_1d, (a, b))
+    D = len(a) - 1
+    N = len(b) - 1
+    artype = mintypecode((a, b))
+    ma = max([N, D])
+    Np = N + ma
+    Dp = D + ma
+    bprime = numpy.empty(Np + 1, artype)
+    aprime = numpy.empty(Dp + 1, artype)
+    wosq = wo * wo
+    for j in range(Np + 1):
+        val = 0.0
+        for i in range(0, N + 1):
+            for k in range(0, i + 1):
+                if ma - i + 2 * k == j:
+                    val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
+        bprime[Np - j] = val
+    for j in range(Dp + 1):
+        val = 0.0
+        for i in range(0, D + 1):
+            for k in range(0, i + 1):
+                if ma - i + 2 * k == j:
+                    val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
+        aprime[Dp - j] = val
+
+    return normalize(bprime, aprime)
+
+
+def lp2bs(b, a, wo=1.0, bw=1.0):
+    r"""
+    Transform a lowpass filter prototype to a bandstop filter.
+
+    Return an analog band-stop filter with center frequency `wo` and
+    bandwidth `bw` from an analog low-pass filter prototype with unity
+    cutoff frequency, in transfer function ('ba') representation.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+    wo : float
+        Desired stopband center, as angular frequency (e.g., rad/s).
+        Defaults to no change.
+    bw : float
+        Desired stopband width, as angular frequency (e.g., rad/s).
+        Defaults to 1.
+
+    Returns
+    -------
+    b : array_like
+        Numerator polynomial coefficients of the transformed band-stop filter.
+    a : array_like
+        Denominator polynomial coefficients of the transformed band-stop filter.
+
+    See Also
+    --------
+    lp2lp, lp2hp, lp2bp, bilinear
+    lp2bs_zpk
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
+
+    This is the "wideband" transformation, producing a stopband with
+    geometric (log frequency) symmetry about `wo`.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> lp = signal.lti([1.0], [1.0, 1.5])
+    >>> bs = signal.lti(*signal.lp2bs(lp.num, lp.den))
+    >>> w, mag_lp, p_lp = lp.bode()
+    >>> w, mag_bs, p_bs = bs.bode(w)
+    >>> plt.plot(w, mag_lp, label='Lowpass')
+    >>> plt.plot(w, mag_bs, label='Bandstop')
+    >>> plt.semilogx()
+    >>> plt.grid(True)
+    >>> plt.xlabel('Frequency [rad/s]')
+    >>> plt.ylabel('Magnitude [dB]')
+    >>> plt.legend()
+    """
+    a, b = map(atleast_1d, (a, b))
+    D = len(a) - 1
+    N = len(b) - 1
+    artype = mintypecode((a, b))
+    M = max([N, D])
+    Np = M + M
+    Dp = M + M
+    bprime = numpy.empty(Np + 1, artype)
+    aprime = numpy.empty(Dp + 1, artype)
+    wosq = wo * wo
+    for j in range(Np + 1):
+        val = 0.0
+        for i in range(0, N + 1):
+            for k in range(0, M - i + 1):
+                if i + 2 * k == j:
+                    val += (comb(M - i, k) * b[N - i] *
+                            (wosq) ** (M - i - k) * bw ** i)
+        bprime[Np - j] = val
+    for j in range(Dp + 1):
+        val = 0.0
+        for i in range(0, D + 1):
+            for k in range(0, M - i + 1):
+                if i + 2 * k == j:
+                    val += (comb(M - i, k) * a[D - i] *
+                            (wosq) ** (M - i - k) * bw ** i)
+        aprime[Dp - j] = val
+
+    return normalize(bprime, aprime)
+
+
+def bilinear(b, a, fs=1.0):
+    r"""
+    Return a digital IIR filter from an analog one using a bilinear transform.
+
+    Transform a set of poles and zeros from the analog s-plane to the digital
+    z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
+    ``s``, maintaining the shape of the frequency response.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator of the analog filter transfer function.
+    a : array_like
+        Denominator of the analog filter transfer function.
+    fs : float
+        Sample rate, as ordinary frequency (e.g., hertz). No prewarping is
+        done in this function.
+
+    Returns
+    -------
+    b : ndarray
+        Numerator of the transformed digital filter transfer function.
+    a : ndarray
+        Denominator of the transformed digital filter transfer function.
+
+    See Also
+    --------
+    lp2lp, lp2hp, lp2bp, lp2bs
+    bilinear_zpk
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> fs = 100
+    >>> bf = 2 * np.pi * np.array([7, 13])
+    >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass',
+    ...                                   analog=True))
+    >>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs))
+    >>> wz, hz = signal.freqz(filtz.num, filtz.den)
+    >>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz)
+
+    >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)),
+    ...              label=r'$|H_z(e^{j \omega})|$')
+    >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)),
+    ...              label=r'$|H(j \omega)|$')
+    >>> plt.legend()
+    >>> plt.xlabel('Frequency [Hz]')
+    >>> plt.ylabel('Magnitude [dB]')
+    >>> plt.grid(True)
+    """
+    fs = float(fs)
+    a, b = map(atleast_1d, (a, b))
+    D = len(a) - 1
+    N = len(b) - 1
+    artype = float
+    M = max([N, D])
+    Np = M
+    Dp = M
+    bprime = numpy.empty(Np + 1, artype)
+    aprime = numpy.empty(Dp + 1, artype)
+    for j in range(Np + 1):
+        val = 0.0
+        for i in range(N + 1):
+            for k in range(i + 1):
+                for l in range(M - i + 1):
+                    if k + l == j:
+                        val += (comb(i, k) * comb(M - i, l) * b[N - i] *
+                                pow(2 * fs, i) * (-1) ** k)
+        bprime[j] = real(val)
+    for j in range(Dp + 1):
+        val = 0.0
+        for i in range(D + 1):
+            for k in range(i + 1):
+                for l in range(M - i + 1):
+                    if k + l == j:
+                        val += (comb(i, k) * comb(M - i, l) * a[D - i] *
+                                pow(2 * fs, i) * (-1) ** k)
+        aprime[j] = real(val)
+
+    return normalize(bprime, aprime)
+
+
+def _validate_gpass_gstop(gpass, gstop):
+
+    if gpass <= 0.0:
+        raise ValueError("gpass should be larger than 0.0")
+    elif gstop <= 0.0:
+        raise ValueError("gstop should be larger than 0.0")
+    elif gpass > gstop:
+        raise ValueError("gpass should be smaller than gstop")
+
+
+def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba',
+              fs=None):
+    """Complete IIR digital and analog filter design.
+
+    Given passband and stopband frequencies and gains, construct an analog or
+    digital IIR filter of minimum order for a given basic type. Return the
+    output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
+    sections ('sos') form.
+
+    Parameters
+    ----------
+    wp, ws : float or array like, shape (2,)
+        Passband and stopband edge frequencies. Possible values are scalars
+        (for lowpass and highpass filters) or ranges (for bandpass and bandstop
+        filters).
+        For digital filters, these are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. For example:
+
+            - Lowpass:   wp = 0.2,          ws = 0.3
+            - Highpass:  wp = 0.3,          ws = 0.2
+            - Bandpass:  wp = [0.2, 0.5],   ws = [0.1, 0.6]
+            - Bandstop:  wp = [0.1, 0.6],   ws = [0.2, 0.5]
+
+        For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
+        Note, that for bandpass and bandstop filters passband must lie strictly
+        inside stopband or vice versa.
+    gpass : float
+        The maximum loss in the passband (dB).
+    gstop : float
+        The minimum attenuation in the stopband (dB).
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    ftype : str, optional
+        The type of IIR filter to design:
+
+            - Butterworth   : 'butter'
+            - Chebyshev I   : 'cheby1'
+            - Chebyshev II  : 'cheby2'
+            - Cauer/elliptic: 'ellip'
+
+    output : {'ba', 'zpk', 'sos'}, optional
+        Filter form of the output:
+
+            - second-order sections (recommended): 'sos'
+            - numerator/denominator (default)    : 'ba'
+            - pole-zero                          : 'zpk'
+
+        In general the second-order sections ('sos') form  is
+        recommended because inferring the coefficients for the
+        numerator/denominator form ('ba') suffers from numerical
+        instabilities. For reasons of backward compatibility the default
+        form is the numerator/denominator form ('ba'), where the 'b'
+        and the 'a' in 'ba' refer to the commonly used names of the
+        coefficients used.
+
+        Note: Using the second-order sections form ('sos') is sometimes
+        associated with additional computational costs: for
+        data-intense use cases it is therefore recommended to also
+        investigate the numerator/denominator form ('ba').
+
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
+        Only returned if ``output='ba'``.
+    z, p, k : ndarray, ndarray, float
+        Zeros, poles, and system gain of the IIR filter transfer
+        function.  Only returned if ``output='zpk'``.
+    sos : ndarray
+        Second-order sections representation of the IIR filter.
+        Only returned if ``output='sos'``.
+
+    See Also
+    --------
+    butter : Filter design using order and critical points
+    cheby1, cheby2, ellip, bessel
+    buttord : Find order and critical points from passband and stopband spec
+    cheb1ord, cheb2ord, ellipord
+    iirfilter : General filter design using order and critical frequencies
+
+    Notes
+    -----
+    The ``'sos'`` output parameter was added in 0.16.0.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import matplotlib.ticker
+
+    >>> wp = 0.2
+    >>> ws = 0.3
+    >>> gpass = 1
+    >>> gstop = 40
+
+    >>> system = signal.iirdesign(wp, ws, gpass, gstop)
+    >>> w, h = signal.freqz(*system)
+
+    >>> fig, ax1 = plt.subplots()
+    >>> ax1.set_title('Digital filter frequency response')
+    >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b')
+    >>> ax1.set_ylabel('Amplitude [dB]', color='b')
+    >>> ax1.set_xlabel('Frequency [rad/sample]')
+    >>> ax1.grid(True)
+    >>> ax1.set_ylim([-120, 20])
+    >>> ax2 = ax1.twinx()
+    >>> angles = np.unwrap(np.angle(h))
+    >>> ax2.plot(w, angles, 'g')
+    >>> ax2.set_ylabel('Angle (radians)', color='g')
+    >>> ax2.grid(True)
+    >>> ax2.axis('tight')
+    >>> ax2.set_ylim([-6, 1])
+    >>> nticks = 8
+    >>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
+    >>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))
+
+    """
+    try:
+        ordfunc = filter_dict[ftype][1]
+    except KeyError as e:
+        raise ValueError("Invalid IIR filter type: %s" % ftype) from e
+    except IndexError as e:
+        raise ValueError(("%s does not have order selection. Use "
+                          "iirfilter function.") % ftype) from e
+
+    _validate_gpass_gstop(gpass, gstop)
+
+    wp = atleast_1d(wp)
+    ws = atleast_1d(ws)
+
+    if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]:
+        raise ValueError("wp and ws must have one or two elements each, and"
+                         "the same shape, got %s and %s"
+                         % (wp.shape, ws.shape))
+
+    if any(wp <= 0) or any(ws <= 0):
+        raise ValueError("Values for wp, ws must be greater than 0")
+
+    if not analog:
+        if fs is None:
+            if any(wp >= 1) or any(ws >= 1):
+                raise ValueError("Values for wp, ws must be less than 1")
+        elif any(wp >= fs/2) or any(ws >= fs/2):
+            raise ValueError("Values for wp, ws must be less than fs/2"
+                             " (fs={} -> fs/2={})".format(fs, fs/2))
+
+    if wp.shape[0] == 2:
+        if not ((ws[0] < wp[0] and wp[1] < ws[1]) or
+               (wp[0] < ws[0] and ws[1] < wp[1])):
+            raise ValueError("Passband must lie strictly inside stopband"
+                             " or vice versa")
+
+    band_type = 2 * (len(wp) - 1)
+    band_type += 1
+    if wp[0] >= ws[0]:
+        band_type += 1
+
+    btype = {1: 'lowpass', 2: 'highpass',
+             3: 'bandstop', 4: 'bandpass'}[band_type]
+
+    N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs)
+    return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
+                     ftype=ftype, output=output, fs=fs)
+
+
+def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
+              ftype='butter', output='ba', fs=None):
+    """
+    IIR digital and analog filter design given order and critical points.
+
+    Design an Nth-order digital or analog filter and return the filter
+    coefficients.
+
+    Parameters
+    ----------
+    N : int
+        The order of the filter.
+    Wn : array_like
+        A scalar or length-2 sequence giving the critical frequencies.
+
+        For digital filters, `Wn` are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`Wn` is thus in
+        half-cycles / sample.)
+
+        For analog filters, `Wn` is an angular frequency (e.g., rad/s).
+
+        When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``.
+    rp : float, optional
+        For Chebyshev and elliptic filters, provides the maximum ripple
+        in the passband. (dB)
+    rs : float, optional
+        For Chebyshev and elliptic filters, provides the minimum attenuation
+        in the stop band. (dB)
+    btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
+        The type of filter.  Default is 'bandpass'.
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    ftype : str, optional
+        The type of IIR filter to design:
+
+            - Butterworth   : 'butter'
+            - Chebyshev I   : 'cheby1'
+            - Chebyshev II  : 'cheby2'
+            - Cauer/elliptic: 'ellip'
+            - Bessel/Thomson: 'bessel'
+
+    output : {'ba', 'zpk', 'sos'}, optional
+        Filter form of the output:
+
+            - second-order sections (recommended): 'sos'
+            - numerator/denominator (default)    : 'ba'
+            - pole-zero                          : 'zpk'
+
+        In general the second-order sections ('sos') form  is
+        recommended because inferring the coefficients for the
+        numerator/denominator form ('ba') suffers from numerical
+        instabilities. For reasons of backward compatibility the default
+        form is the numerator/denominator form ('ba'), where the 'b'
+        and the 'a' in 'ba' refer to the commonly used names of the
+        coefficients used.
+
+        Note: Using the second-order sections form ('sos') is sometimes
+        associated with additional computational costs: for
+        data-intense use cases it is therefore recommended to also
+        investigate the numerator/denominator form ('ba').
+
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
+        Only returned if ``output='ba'``.
+    z, p, k : ndarray, ndarray, float
+        Zeros, poles, and system gain of the IIR filter transfer
+        function.  Only returned if ``output='zpk'``.
+    sos : ndarray
+        Second-order sections representation of the IIR filter.
+        Only returned if ``output='sos'``.
+
+    See Also
+    --------
+    butter : Filter design using order and critical points
+    cheby1, cheby2, ellip, bessel
+    buttord : Find order and critical points from passband and stopband spec
+    cheb1ord, cheb2ord, ellipord
+    iirdesign : General filter design using passband and stopband spec
+
+    Notes
+    -----
+    The ``'sos'`` output parameter was added in 0.16.0.
+
+    Examples
+    --------
+    Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to
+    200 Hz and plot the frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60,
+    ...                         btype='band', analog=True, ftype='cheby2')
+    >>> w, h = signal.freqs(b, a, 1000)
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(1, 1, 1)
+    >>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5)))
+    >>> ax.set_title('Chebyshev Type II bandpass frequency response')
+    >>> ax.set_xlabel('Frequency [Hz]')
+    >>> ax.set_ylabel('Amplitude [dB]')
+    >>> ax.axis((10, 1000, -100, 10))
+    >>> ax.grid(which='both', axis='both')
+    >>> plt.show()
+
+    Create a digital filter with the same properties, in a system with
+    sampling rate of 2000 Hz, and plot the frequency response. (Second-order
+    sections implementation is required to ensure stability of a filter of
+    this order):
+
+    >>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band',
+    ...                        analog=False, ftype='cheby2', fs=2000,
+    ...                        output='sos')
+    >>> w, h = signal.sosfreqz(sos, 2000, fs=2000)
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(1, 1, 1)
+    >>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5)))
+    >>> ax.set_title('Chebyshev Type II bandpass frequency response')
+    >>> ax.set_xlabel('Frequency [Hz]')
+    >>> ax.set_ylabel('Amplitude [dB]')
+    >>> ax.axis((10, 1000, -100, 10))
+    >>> ax.grid(which='both', axis='both')
+    >>> plt.show()
+
+    """
+    ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
+    Wn = asarray(Wn)
+    if fs is not None:
+        if analog:
+            raise ValueError("fs cannot be specified for an analog filter")
+        Wn = 2*Wn/fs
+
+    if numpy.any(Wn <= 0):
+        raise ValueError("filter critical frequencies must be greater than 0")
+
+    if Wn.size > 1 and not Wn[0] < Wn[1]:
+        raise ValueError("Wn[0] must be less than Wn[1]")
+
+    try:
+        btype = band_dict[btype]
+    except KeyError as e:
+        raise ValueError("'%s' is an invalid bandtype for filter." % btype) from e
+
+    try:
+        typefunc = filter_dict[ftype][0]
+    except KeyError as e:
+        raise ValueError("'%s' is not a valid basic IIR filter." % ftype) from e
+
+    if output not in ['ba', 'zpk', 'sos']:
+        raise ValueError("'%s' is not a valid output form." % output)
+
+    if rp is not None and rp < 0:
+        raise ValueError("passband ripple (rp) must be positive")
+
+    if rs is not None and rs < 0:
+        raise ValueError("stopband attenuation (rs) must be positive")
+
+    # Get analog lowpass prototype
+    if typefunc == buttap:
+        z, p, k = typefunc(N)
+    elif typefunc == besselap:
+        z, p, k = typefunc(N, norm=bessel_norms[ftype])
+    elif typefunc == cheb1ap:
+        if rp is None:
+            raise ValueError("passband ripple (rp) must be provided to "
+                             "design a Chebyshev I filter.")
+        z, p, k = typefunc(N, rp)
+    elif typefunc == cheb2ap:
+        if rs is None:
+            raise ValueError("stopband attenuation (rs) must be provided to "
+                             "design an Chebyshev II filter.")
+        z, p, k = typefunc(N, rs)
+    elif typefunc == ellipap:
+        if rs is None or rp is None:
+            raise ValueError("Both rp and rs must be provided to design an "
+                             "elliptic filter.")
+        z, p, k = typefunc(N, rp, rs)
+    else:
+        raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
+
+    # Pre-warp frequencies for digital filter design
+    if not analog:
+        if numpy.any(Wn <= 0) or numpy.any(Wn >= 1):
+            if fs is not None:
+                raise ValueError("Digital filter critical frequencies must "
+                                 f"be 0 < Wn < fs/2 (fs={fs} -> fs/2={fs/2})")
+            raise ValueError("Digital filter critical frequencies "
+                             "must be 0 < Wn < 1")
+        fs = 2.0
+        warped = 2 * fs * tan(pi * Wn / fs)
+    else:
+        warped = Wn
+
+    # transform to lowpass, bandpass, highpass, or bandstop
+    if btype in ('lowpass', 'highpass'):
+        if numpy.size(Wn) != 1:
+            raise ValueError('Must specify a single critical frequency Wn '
+                             'for lowpass or highpass filter')
+
+        if btype == 'lowpass':
+            z, p, k = lp2lp_zpk(z, p, k, wo=warped)
+        elif btype == 'highpass':
+            z, p, k = lp2hp_zpk(z, p, k, wo=warped)
+    elif btype in ('bandpass', 'bandstop'):
+        try:
+            bw = warped[1] - warped[0]
+            wo = sqrt(warped[0] * warped[1])
+        except IndexError as e:
+            raise ValueError('Wn must specify start and stop frequencies for '
+                             'bandpass or bandstop filter') from e
+
+        if btype == 'bandpass':
+            z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw)
+        elif btype == 'bandstop':
+            z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw)
+    else:
+        raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
+
+    # Find discrete equivalent if necessary
+    if not analog:
+        z, p, k = bilinear_zpk(z, p, k, fs=fs)
+
+    # Transform to proper out type (pole-zero, state-space, numer-denom)
+    if output == 'zpk':
+        return z, p, k
+    elif output == 'ba':
+        return zpk2tf(z, p, k)
+    elif output == 'sos':
+        return zpk2sos(z, p, k, analog=analog)
+
+
+def _relative_degree(z, p):
+    """
+    Return relative degree of transfer function from zeros and poles
+    """
+    degree = len(p) - len(z)
+    if degree < 0:
+        raise ValueError("Improper transfer function. "
+                         "Must have at least as many poles as zeros.")
+    else:
+        return degree
+
+
+def bilinear_zpk(z, p, k, fs):
+    r"""
+    Return a digital IIR filter from an analog one using a bilinear transform.
+
+    Transform a set of poles and zeros from the analog s-plane to the digital
+    z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
+    ``s``, maintaining the shape of the frequency response.
+
+    Parameters
+    ----------
+    z : array_like
+        Zeros of the analog filter transfer function.
+    p : array_like
+        Poles of the analog filter transfer function.
+    k : float
+        System gain of the analog filter transfer function.
+    fs : float
+        Sample rate, as ordinary frequency (e.g., hertz). No prewarping is
+        done in this function.
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transformed digital filter transfer function.
+    p : ndarray
+        Poles of the transformed digital filter transfer function.
+    k : float
+        System gain of the transformed digital filter.
+
+    See Also
+    --------
+    lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk
+    bilinear
+
+    Notes
+    -----
+    .. versionadded:: 1.1.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> fs = 100
+    >>> bf = 2 * np.pi * np.array([7, 13])
+    >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', analog=True,
+    ...                                   output='zpk'))
+    >>> filtz = signal.lti(*signal.bilinear_zpk(filts.zeros, filts.poles,
+    ...                                         filts.gain, fs))
+    >>> wz, hz = signal.freqz_zpk(filtz.zeros, filtz.poles, filtz.gain)
+    >>> ws, hs = signal.freqs_zpk(filts.zeros, filts.poles, filts.gain,
+    ...                           worN=fs*wz)
+    >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)),
+    ...              label=r'$|H_z(e^{j \omega})|$')
+    >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)),
+    ...              label=r'$|H(j \omega)|$')
+    >>> plt.legend()
+    >>> plt.xlabel('Frequency [Hz]')
+    >>> plt.ylabel('Magnitude [dB]')
+    >>> plt.grid(True)
+    """
+    z = atleast_1d(z)
+    p = atleast_1d(p)
+
+    degree = _relative_degree(z, p)
+
+    fs2 = 2.0*fs
+
+    # Bilinear transform the poles and zeros
+    z_z = (fs2 + z) / (fs2 - z)
+    p_z = (fs2 + p) / (fs2 - p)
+
+    # Any zeros that were at infinity get moved to the Nyquist frequency
+    z_z = append(z_z, -ones(degree))
+
+    # Compensate for gain change
+    k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
+
+    return z_z, p_z, k_z
+
+
+def lp2lp_zpk(z, p, k, wo=1.0):
+    r"""
+    Transform a lowpass filter prototype to a different frequency.
+
+    Return an analog low-pass filter with cutoff frequency `wo`
+    from an analog low-pass filter prototype with unity cutoff frequency,
+    using zeros, poles, and gain ('zpk') representation.
+
+    Parameters
+    ----------
+    z : array_like
+        Zeros of the analog filter transfer function.
+    p : array_like
+        Poles of the analog filter transfer function.
+    k : float
+        System gain of the analog filter transfer function.
+    wo : float
+        Desired cutoff, as angular frequency (e.g., rad/s).
+        Defaults to no change.
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transformed low-pass filter transfer function.
+    p : ndarray
+        Poles of the transformed low-pass filter transfer function.
+    k : float
+        System gain of the transformed low-pass filter.
+
+    See Also
+    --------
+    lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
+    lp2lp
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{s}{\omega_0}
+
+    .. versionadded:: 1.1.0
+
+    """
+    z = atleast_1d(z)
+    p = atleast_1d(p)
+    wo = float(wo)  # Avoid int wraparound
+
+    degree = _relative_degree(z, p)
+
+    # Scale all points radially from origin to shift cutoff frequency
+    z_lp = wo * z
+    p_lp = wo * p
+
+    # Each shifted pole decreases gain by wo, each shifted zero increases it.
+    # Cancel out the net change to keep overall gain the same
+    k_lp = k * wo**degree
+
+    return z_lp, p_lp, k_lp
+
+
+def lp2hp_zpk(z, p, k, wo=1.0):
+    r"""
+    Transform a lowpass filter prototype to a highpass filter.
+
+    Return an analog high-pass filter with cutoff frequency `wo`
+    from an analog low-pass filter prototype with unity cutoff frequency,
+    using zeros, poles, and gain ('zpk') representation.
+
+    Parameters
+    ----------
+    z : array_like
+        Zeros of the analog filter transfer function.
+    p : array_like
+        Poles of the analog filter transfer function.
+    k : float
+        System gain of the analog filter transfer function.
+    wo : float
+        Desired cutoff, as angular frequency (e.g., rad/s).
+        Defaults to no change.
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transformed high-pass filter transfer function.
+    p : ndarray
+        Poles of the transformed high-pass filter transfer function.
+    k : float
+        System gain of the transformed high-pass filter.
+
+    See Also
+    --------
+    lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear
+    lp2hp
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{\omega_0}{s}
+
+    This maintains symmetry of the lowpass and highpass responses on a
+    logarithmic scale.
+
+    .. versionadded:: 1.1.0
+
+    """
+    z = atleast_1d(z)
+    p = atleast_1d(p)
+    wo = float(wo)
+
+    degree = _relative_degree(z, p)
+
+    # Invert positions radially about unit circle to convert LPF to HPF
+    # Scale all points radially from origin to shift cutoff frequency
+    z_hp = wo / z
+    p_hp = wo / p
+
+    # If lowpass had zeros at infinity, inverting moves them to origin.
+    z_hp = append(z_hp, zeros(degree))
+
+    # Cancel out gain change caused by inversion
+    k_hp = k * real(prod(-z) / prod(-p))
+
+    return z_hp, p_hp, k_hp
+
+
+def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0):
+    r"""
+    Transform a lowpass filter prototype to a bandpass filter.
+
+    Return an analog band-pass filter with center frequency `wo` and
+    bandwidth `bw` from an analog low-pass filter prototype with unity
+    cutoff frequency, using zeros, poles, and gain ('zpk') representation.
+
+    Parameters
+    ----------
+    z : array_like
+        Zeros of the analog filter transfer function.
+    p : array_like
+        Poles of the analog filter transfer function.
+    k : float
+        System gain of the analog filter transfer function.
+    wo : float
+        Desired passband center, as angular frequency (e.g., rad/s).
+        Defaults to no change.
+    bw : float
+        Desired passband width, as angular frequency (e.g., rad/s).
+        Defaults to 1.
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transformed band-pass filter transfer function.
+    p : ndarray
+        Poles of the transformed band-pass filter transfer function.
+    k : float
+        System gain of the transformed band-pass filter.
+
+    See Also
+    --------
+    lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear
+    lp2bp
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
+
+    This is the "wideband" transformation, producing a passband with
+    geometric (log frequency) symmetry about `wo`.
+
+    .. versionadded:: 1.1.0
+
+    """
+    z = atleast_1d(z)
+    p = atleast_1d(p)
+    wo = float(wo)
+    bw = float(bw)
+
+    degree = _relative_degree(z, p)
+
+    # Scale poles and zeros to desired bandwidth
+    z_lp = z * bw/2
+    p_lp = p * bw/2
+
+    # Square root needs to produce complex result, not NaN
+    z_lp = z_lp.astype(complex)
+    p_lp = p_lp.astype(complex)
+
+    # Duplicate poles and zeros and shift from baseband to +wo and -wo
+    z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
+                        z_lp - sqrt(z_lp**2 - wo**2)))
+    p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
+                        p_lp - sqrt(p_lp**2 - wo**2)))
+
+    # Move degree zeros to origin, leaving degree zeros at infinity for BPF
+    z_bp = append(z_bp, zeros(degree))
+
+    # Cancel out gain change from frequency scaling
+    k_bp = k * bw**degree
+
+    return z_bp, p_bp, k_bp
+
+
+def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0):
+    r"""
+    Transform a lowpass filter prototype to a bandstop filter.
+
+    Return an analog band-stop filter with center frequency `wo` and
+    stopband width `bw` from an analog low-pass filter prototype with unity
+    cutoff frequency, using zeros, poles, and gain ('zpk') representation.
+
+    Parameters
+    ----------
+    z : array_like
+        Zeros of the analog filter transfer function.
+    p : array_like
+        Poles of the analog filter transfer function.
+    k : float
+        System gain of the analog filter transfer function.
+    wo : float
+        Desired stopband center, as angular frequency (e.g., rad/s).
+        Defaults to no change.
+    bw : float
+        Desired stopband width, as angular frequency (e.g., rad/s).
+        Defaults to 1.
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transformed band-stop filter transfer function.
+    p : ndarray
+        Poles of the transformed band-stop filter transfer function.
+    k : float
+        System gain of the transformed band-stop filter.
+
+    See Also
+    --------
+    lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear
+    lp2bs
+
+    Notes
+    -----
+    This is derived from the s-plane substitution
+
+    .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
+
+    This is the "wideband" transformation, producing a stopband with
+    geometric (log frequency) symmetry about `wo`.
+
+    .. versionadded:: 1.1.0
+
+    """
+    z = atleast_1d(z)
+    p = atleast_1d(p)
+    wo = float(wo)
+    bw = float(bw)
+
+    degree = _relative_degree(z, p)
+
+    # Invert to a highpass filter with desired bandwidth
+    z_hp = (bw/2) / z
+    p_hp = (bw/2) / p
+
+    # Square root needs to produce complex result, not NaN
+    z_hp = z_hp.astype(complex)
+    p_hp = p_hp.astype(complex)
+
+    # Duplicate poles and zeros and shift from baseband to +wo and -wo
+    z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
+                        z_hp - sqrt(z_hp**2 - wo**2)))
+    p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
+                        p_hp - sqrt(p_hp**2 - wo**2)))
+
+    # Move any zeros that were at infinity to the center of the stopband
+    z_bs = append(z_bs, full(degree, +1j*wo))
+    z_bs = append(z_bs, full(degree, -1j*wo))
+
+    # Cancel out gain change caused by inversion
+    k_bs = k * real(prod(-z) / prod(-p))
+
+    return z_bs, p_bs, k_bs
+
+
+def butter(N, Wn, btype='low', analog=False, output='ba', fs=None):
+    """
+    Butterworth digital and analog filter design.
+
+    Design an Nth-order digital or analog Butterworth filter and return
+    the filter coefficients.
+
+    Parameters
+    ----------
+    N : int
+        The order of the filter. For 'bandpass' and 'bandstop' filters,
+        the resulting order of the final second-order sections ('sos')
+        matrix is ``2*N``, with `N` the number of biquad sections
+        of the desired system.
+    Wn : array_like
+        The critical frequency or frequencies. For lowpass and highpass
+        filters, Wn is a scalar; for bandpass and bandstop filters,
+        Wn is a length-2 sequence.
+
+        For a Butterworth filter, this is the point at which the gain
+        drops to 1/sqrt(2) that of the passband (the "-3 dB point").
+
+        For digital filters, if `fs` is not specified, `Wn` units are
+        normalized from 0 to 1, where 1 is the Nyquist frequency (`Wn` is
+        thus in half cycles / sample and defined as 2*critical frequencies
+        / `fs`). If `fs` is specified, `Wn` is in the same units as `fs`.
+
+        For analog filters, `Wn` is an angular frequency (e.g. rad/s).
+    btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
+        The type of filter.  Default is 'lowpass'.
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    output : {'ba', 'zpk', 'sos'}, optional
+        Type of output:  numerator/denominator ('ba'), pole-zero ('zpk'), or
+        second-order sections ('sos'). Default is 'ba' for backwards
+        compatibility, but 'sos' should be used for general-purpose filtering.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
+        Only returned if ``output='ba'``.
+    z, p, k : ndarray, ndarray, float
+        Zeros, poles, and system gain of the IIR filter transfer
+        function.  Only returned if ``output='zpk'``.
+    sos : ndarray
+        Second-order sections representation of the IIR filter.
+        Only returned if ``output='sos'``.
+
+    See Also
+    --------
+    buttord, buttap
+
+    Notes
+    -----
+    The Butterworth filter has maximally flat frequency response in the
+    passband.
+
+    The ``'sos'`` output parameter was added in 0.16.0.
+
+    If the transfer function form ``[b, a]`` is requested, numerical
+    problems can occur since the conversion between roots and
+    the polynomial coefficients is a numerically sensitive operation,
+    even for N >= 4. It is recommended to work with the SOS
+    representation.
+
+    Examples
+    --------
+    Design an analog filter and plot its frequency response, showing the
+    critical points:
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> b, a = signal.butter(4, 100, 'low', analog=True)
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.title('Butterworth filter frequency response')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.axvline(100, color='green') # cutoff frequency
+    >>> plt.show()
+
+    Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
+
+    >>> t = np.linspace(0, 1, 1000, False)  # 1 second
+    >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
+    >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
+    >>> ax1.plot(t, sig)
+    >>> ax1.set_title('10 Hz and 20 Hz sinusoids')
+    >>> ax1.axis([0, 1, -2, 2])
+
+    Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
+    apply it to the signal. (It's recommended to use second-order sections
+    format when filtering, to avoid numerical error with transfer function
+    (``ba``) format):
+
+    >>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos')
+    >>> filtered = signal.sosfilt(sos, sig)
+    >>> ax2.plot(t, filtered)
+    >>> ax2.set_title('After 15 Hz high-pass filter')
+    >>> ax2.axis([0, 1, -2, 2])
+    >>> ax2.set_xlabel('Time [seconds]')
+    >>> plt.tight_layout()
+    >>> plt.show()
+    """
+    return iirfilter(N, Wn, btype=btype, analog=analog,
+                     output=output, ftype='butter', fs=fs)
+
+
+def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None):
+    """
+    Chebyshev type I digital and analog filter design.
+
+    Design an Nth-order digital or analog Chebyshev type I filter and
+    return the filter coefficients.
+
+    Parameters
+    ----------
+    N : int
+        The order of the filter.
+    rp : float
+        The maximum ripple allowed below unity gain in the passband.
+        Specified in decibels, as a positive number.
+    Wn : array_like
+        A scalar or length-2 sequence giving the critical frequencies.
+        For Type I filters, this is the point in the transition band at which
+        the gain first drops below -`rp`.
+
+        For digital filters, `Wn` are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`Wn` is thus in
+        half-cycles / sample.)
+
+        For analog filters, `Wn` is an angular frequency (e.g., rad/s).
+    btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
+        The type of filter.  Default is 'lowpass'.
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    output : {'ba', 'zpk', 'sos'}, optional
+        Type of output:  numerator/denominator ('ba'), pole-zero ('zpk'), or
+        second-order sections ('sos'). Default is 'ba' for backwards
+        compatibility, but 'sos' should be used for general-purpose filtering.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
+        Only returned if ``output='ba'``.
+    z, p, k : ndarray, ndarray, float
+        Zeros, poles, and system gain of the IIR filter transfer
+        function.  Only returned if ``output='zpk'``.
+    sos : ndarray
+        Second-order sections representation of the IIR filter.
+        Only returned if ``output='sos'``.
+
+    See Also
+    --------
+    cheb1ord, cheb1ap
+
+    Notes
+    -----
+    The Chebyshev type I filter maximizes the rate of cutoff between the
+    frequency response's passband and stopband, at the expense of ripple in
+    the passband and increased ringing in the step response.
+
+    Type I filters roll off faster than Type II (`cheby2`), but Type II
+    filters do not have any ripple in the passband.
+
+    The equiripple passband has N maxima or minima (for example, a
+    5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
+    unity for odd-order filters, or -rp dB for even-order filters.
+
+    The ``'sos'`` output parameter was added in 0.16.0.
+
+    Examples
+    --------
+    Design an analog filter and plot its frequency response, showing the
+    critical points:
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.title('Chebyshev Type I frequency response (rp=5)')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.axvline(100, color='green') # cutoff frequency
+    >>> plt.axhline(-5, color='green') # rp
+    >>> plt.show()
+
+    Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
+
+    >>> t = np.linspace(0, 1, 1000, False)  # 1 second
+    >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
+    >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
+    >>> ax1.plot(t, sig)
+    >>> ax1.set_title('10 Hz and 20 Hz sinusoids')
+    >>> ax1.axis([0, 1, -2, 2])
+
+    Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and
+    apply it to the signal. (It's recommended to use second-order sections
+    format when filtering, to avoid numerical error with transfer function
+    (``ba``) format):
+
+    >>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos')
+    >>> filtered = signal.sosfilt(sos, sig)
+    >>> ax2.plot(t, filtered)
+    >>> ax2.set_title('After 15 Hz high-pass filter')
+    >>> ax2.axis([0, 1, -2, 2])
+    >>> ax2.set_xlabel('Time [seconds]')
+    >>> plt.tight_layout()
+    >>> plt.show()
+    """
+    return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
+                     output=output, ftype='cheby1', fs=fs)
+
+
+def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None):
+    """
+    Chebyshev type II digital and analog filter design.
+
+    Design an Nth-order digital or analog Chebyshev type II filter and
+    return the filter coefficients.
+
+    Parameters
+    ----------
+    N : int
+        The order of the filter.
+    rs : float
+        The minimum attenuation required in the stop band.
+        Specified in decibels, as a positive number.
+    Wn : array_like
+        A scalar or length-2 sequence giving the critical frequencies.
+        For Type II filters, this is the point in the transition band at which
+        the gain first reaches -`rs`.
+
+        For digital filters, `Wn` are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`Wn` is thus in
+        half-cycles / sample.)
+
+        For analog filters, `Wn` is an angular frequency (e.g., rad/s).
+    btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
+        The type of filter.  Default is 'lowpass'.
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    output : {'ba', 'zpk', 'sos'}, optional
+        Type of output:  numerator/denominator ('ba'), pole-zero ('zpk'), or
+        second-order sections ('sos'). Default is 'ba' for backwards
+        compatibility, but 'sos' should be used for general-purpose filtering.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
+        Only returned if ``output='ba'``.
+    z, p, k : ndarray, ndarray, float
+        Zeros, poles, and system gain of the IIR filter transfer
+        function.  Only returned if ``output='zpk'``.
+    sos : ndarray
+        Second-order sections representation of the IIR filter.
+        Only returned if ``output='sos'``.
+
+    See Also
+    --------
+    cheb2ord, cheb2ap
+
+    Notes
+    -----
+    The Chebyshev type II filter maximizes the rate of cutoff between the
+    frequency response's passband and stopband, at the expense of ripple in
+    the stopband and increased ringing in the step response.
+
+    Type II filters do not roll off as fast as Type I (`cheby1`).
+
+    The ``'sos'`` output parameter was added in 0.16.0.
+
+    Examples
+    --------
+    Design an analog filter and plot its frequency response, showing the
+    critical points:
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.title('Chebyshev Type II frequency response (rs=40)')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.axvline(100, color='green') # cutoff frequency
+    >>> plt.axhline(-40, color='green') # rs
+    >>> plt.show()
+
+    Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
+
+    >>> t = np.linspace(0, 1, 1000, False)  # 1 second
+    >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
+    >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
+    >>> ax1.plot(t, sig)
+    >>> ax1.set_title('10 Hz and 20 Hz sinusoids')
+    >>> ax1.axis([0, 1, -2, 2])
+
+    Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
+    apply it to the signal. (It's recommended to use second-order sections
+    format when filtering, to avoid numerical error with transfer function
+    (``ba``) format):
+
+    >>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos')
+    >>> filtered = signal.sosfilt(sos, sig)
+    >>> ax2.plot(t, filtered)
+    >>> ax2.set_title('After 17 Hz high-pass filter')
+    >>> ax2.axis([0, 1, -2, 2])
+    >>> ax2.set_xlabel('Time [seconds]')
+    >>> plt.show()
+    """
+    return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
+                     output=output, ftype='cheby2', fs=fs)
+
+
+def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None):
+    """
+    Elliptic (Cauer) digital and analog filter design.
+
+    Design an Nth-order digital or analog elliptic filter and return
+    the filter coefficients.
+
+    Parameters
+    ----------
+    N : int
+        The order of the filter.
+    rp : float
+        The maximum ripple allowed below unity gain in the passband.
+        Specified in decibels, as a positive number.
+    rs : float
+        The minimum attenuation required in the stop band.
+        Specified in decibels, as a positive number.
+    Wn : array_like
+        A scalar or length-2 sequence giving the critical frequencies.
+        For elliptic filters, this is the point in the transition band at
+        which the gain first drops below -`rp`.
+
+        For digital filters, `Wn` are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`Wn` is thus in
+        half-cycles / sample.)
+
+        For analog filters, `Wn` is an angular frequency (e.g., rad/s).
+    btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
+        The type of filter. Default is 'lowpass'.
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    output : {'ba', 'zpk', 'sos'}, optional
+        Type of output:  numerator/denominator ('ba'), pole-zero ('zpk'), or
+        second-order sections ('sos'). Default is 'ba' for backwards
+        compatibility, but 'sos' should be used for general-purpose filtering.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
+        Only returned if ``output='ba'``.
+    z, p, k : ndarray, ndarray, float
+        Zeros, poles, and system gain of the IIR filter transfer
+        function.  Only returned if ``output='zpk'``.
+    sos : ndarray
+        Second-order sections representation of the IIR filter.
+        Only returned if ``output='sos'``.
+
+    See Also
+    --------
+    ellipord, ellipap
+
+    Notes
+    -----
+    Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
+    the rate of transition between the frequency response's passband and
+    stopband, at the expense of ripple in both, and increased ringing in the
+    step response.
+
+    As `rp` approaches 0, the elliptical filter becomes a Chebyshev
+    type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
+    type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
+    filter (`butter`).
+
+    The equiripple passband has N maxima or minima (for example, a
+    5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
+    unity for odd-order filters, or -rp dB for even-order filters.
+
+    The ``'sos'`` output parameter was added in 0.16.0.
+
+    Examples
+    --------
+    Design an analog filter and plot its frequency response, showing the
+    critical points:
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.axvline(100, color='green') # cutoff frequency
+    >>> plt.axhline(-40, color='green') # rs
+    >>> plt.axhline(-5, color='green') # rp
+    >>> plt.show()
+
+    Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz
+
+    >>> t = np.linspace(0, 1, 1000, False)  # 1 second
+    >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t)
+    >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
+    >>> ax1.plot(t, sig)
+    >>> ax1.set_title('10 Hz and 20 Hz sinusoids')
+    >>> ax1.axis([0, 1, -2, 2])
+
+    Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and
+    apply it to the signal. (It's recommended to use second-order sections
+    format when filtering, to avoid numerical error with transfer function
+    (``ba``) format):
+
+    >>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos')
+    >>> filtered = signal.sosfilt(sos, sig)
+    >>> ax2.plot(t, filtered)
+    >>> ax2.set_title('After 17 Hz high-pass filter')
+    >>> ax2.axis([0, 1, -2, 2])
+    >>> ax2.set_xlabel('Time [seconds]')
+    >>> plt.tight_layout()
+    >>> plt.show()
+    """
+    return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
+                     output=output, ftype='elliptic', fs=fs)
+
+
+def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase',
+           fs=None):
+    """
+    Bessel/Thomson digital and analog filter design.
+
+    Design an Nth-order digital or analog Bessel filter and return the
+    filter coefficients.
+
+    Parameters
+    ----------
+    N : int
+        The order of the filter.
+    Wn : array_like
+        A scalar or length-2 sequence giving the critical frequencies (defined
+        by the `norm` parameter).
+        For analog filters, `Wn` is an angular frequency (e.g., rad/s).
+
+        For digital filters, `Wn` are in the same units as `fs`.  By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`Wn` is thus in
+        half-cycles / sample.)
+    btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
+        The type of filter.  Default is 'lowpass'.
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned. (See Notes.)
+    output : {'ba', 'zpk', 'sos'}, optional
+        Type of output:  numerator/denominator ('ba'), pole-zero ('zpk'), or
+        second-order sections ('sos'). Default is 'ba'.
+    norm : {'phase', 'delay', 'mag'}, optional
+        Critical frequency normalization:
+
+        ``phase``
+            The filter is normalized such that the phase response reaches its
+            midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
+            both low-pass and high-pass filters, so this is the
+            "phase-matched" case.
+
+            The magnitude response asymptotes are the same as a Butterworth
+            filter of the same order with a cutoff of `Wn`.
+
+            This is the default, and matches MATLAB's implementation.
+
+        ``delay``
+            The filter is normalized such that the group delay in the passband
+            is 1/`Wn` (e.g., seconds). This is the "natural" type obtained by
+            solving Bessel polynomials.
+
+        ``mag``
+            The filter is normalized such that the gain magnitude is -3 dB at
+            angular frequency `Wn`.
+
+        .. versionadded:: 0.18.0
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
+        Only returned if ``output='ba'``.
+    z, p, k : ndarray, ndarray, float
+        Zeros, poles, and system gain of the IIR filter transfer
+        function.  Only returned if ``output='zpk'``.
+    sos : ndarray
+        Second-order sections representation of the IIR filter.
+        Only returned if ``output='sos'``.
+
+    Notes
+    -----
+    Also known as a Thomson filter, the analog Bessel filter has maximally
+    flat group delay and maximally linear phase response, with very little
+    ringing in the step response. [1]_
+
+    The Bessel is inherently an analog filter. This function generates digital
+    Bessel filters using the bilinear transform, which does not preserve the
+    phase response of the analog filter. As such, it is only approximately
+    correct at frequencies below about fs/4. To get maximally-flat group
+    delay at higher frequencies, the analog Bessel filter must be transformed
+    using phase-preserving techniques.
+
+    See `besselap` for implementation details and references.
+
+    The ``'sos'`` output parameter was added in 0.16.0.
+
+    References
+    ----------
+    .. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
+           Characteristics", Proceedings of the Institution of Electrical
+           Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
+
+    Examples
+    --------
+    Plot the phase-normalized frequency response, showing the relationship
+    to the Butterworth's cutoff frequency (green):
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> b, a = signal.butter(4, 100, 'low', analog=True)
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
+    >>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
+    >>> plt.title('Bessel filter magnitude response (with Butterworth)')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.axvline(100, color='green')  # cutoff frequency
+    >>> plt.show()
+
+    and the phase midpoint:
+
+    >>> plt.figure()
+    >>> plt.semilogx(w, np.unwrap(np.angle(h)))
+    >>> plt.axvline(100, color='green')  # cutoff frequency
+    >>> plt.axhline(-np.pi, color='red')  # phase midpoint
+    >>> plt.title('Bessel filter phase response')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Phase [radians]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.show()
+
+    Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
+
+    >>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
+    >>> plt.axhline(-3, color='red')  # -3 dB magnitude
+    >>> plt.axvline(10, color='green')  # cutoff frequency
+    >>> plt.title('Magnitude-normalized Bessel filter frequency response')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.show()
+
+    Plot the delay-normalized filter, showing the maximally-flat group delay
+    at 0.1 seconds:
+
+    >>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
+    >>> w, h = signal.freqs(b, a)
+    >>> plt.figure()
+    >>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
+    >>> plt.axhline(0.1, color='red')  # 0.1 seconds group delay
+    >>> plt.title('Bessel filter group delay')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Group delay [seconds]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.show()
+
+    """
+    return iirfilter(N, Wn, btype=btype, analog=analog,
+                     output=output, ftype='bessel_'+norm, fs=fs)
+
+
+def maxflat():
+    pass
+
+
+def yulewalk():
+    pass
+
+
+def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
+    """
+    Band Stop Objective Function for order minimization.
+
+    Returns the non-integer order for an analog band stop filter.
+
+    Parameters
+    ----------
+    wp : scalar
+        Edge of passband `passb`.
+    ind : int, {0, 1}
+        Index specifying which `passb` edge to vary (0 or 1).
+    passb : ndarray
+        Two element sequence of fixed passband edges.
+    stopb : ndarray
+        Two element sequence of fixed stopband edges.
+    gstop : float
+        Amount of attenuation in stopband in dB.
+    gpass : float
+        Amount of ripple in the passband in dB.
+    type : {'butter', 'cheby', 'ellip'}
+        Type of filter.
+
+    Returns
+    -------
+    n : scalar
+        Filter order (possibly non-integer).
+
+    """
+
+    _validate_gpass_gstop(gpass, gstop)
+
+    passbC = passb.copy()
+    passbC[ind] = wp
+    nat = (stopb * (passbC[0] - passbC[1]) /
+           (stopb ** 2 - passbC[0] * passbC[1]))
+    nat = min(abs(nat))
+
+    if type == 'butter':
+        GSTOP = 10 ** (0.1 * abs(gstop))
+        GPASS = 10 ** (0.1 * abs(gpass))
+        n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
+    elif type == 'cheby':
+        GSTOP = 10 ** (0.1 * abs(gstop))
+        GPASS = 10 ** (0.1 * abs(gpass))
+        n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
+    elif type == 'ellip':
+        GSTOP = 10 ** (0.1 * gstop)
+        GPASS = 10 ** (0.1 * gpass)
+        arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
+        arg0 = 1.0 / nat
+        d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
+        d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
+        n = (d0[0] * d1[1] / (d0[1] * d1[0]))
+    else:
+        raise ValueError("Incorrect type: %s" % type)
+    return n
+
+
+def buttord(wp, ws, gpass, gstop, analog=False, fs=None):
+    """Butterworth filter order selection.
+
+    Return the order of the lowest order digital or analog Butterworth filter
+    that loses no more than `gpass` dB in the passband and has at least
+    `gstop` dB attenuation in the stopband.
+
+    Parameters
+    ----------
+    wp, ws : float
+        Passband and stopband edge frequencies.
+
+        For digital filters, these are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
+        half-cycles / sample.) For example:
+
+            - Lowpass:   wp = 0.2,          ws = 0.3
+            - Highpass:  wp = 0.3,          ws = 0.2
+            - Bandpass:  wp = [0.2, 0.5],   ws = [0.1, 0.6]
+            - Bandstop:  wp = [0.1, 0.6],   ws = [0.2, 0.5]
+
+        For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
+    gpass : float
+        The maximum loss in the passband (dB).
+    gstop : float
+        The minimum attenuation in the stopband (dB).
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    ord : int
+        The lowest order for a Butterworth filter which meets specs.
+    wn : ndarray or float
+        The Butterworth natural frequency (i.e. the "3dB frequency"). Should
+        be used with `butter` to give filter results. If `fs` is specified,
+        this is in the same units, and `fs` must also be passed to `butter`.
+
+    See Also
+    --------
+    butter : Filter design using order and critical points
+    cheb1ord : Find order and critical points from passband and stopband spec
+    cheb2ord, ellipord
+    iirfilter : General filter design using order and critical frequencies
+    iirdesign : General filter design using passband and stopband spec
+
+    Examples
+    --------
+    Design an analog bandpass filter with passband within 3 dB from 20 to
+    50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
+    Plot its frequency response, showing the passband and stopband
+    constraints in gray.
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
+    >>> b, a = signal.butter(N, Wn, 'band', True)
+    >>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.title('Butterworth bandpass filter fit to constraints')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.fill([1,  14,  14,   1], [-40, -40, 99, 99], '0.9', lw=0) # stop
+    >>> plt.fill([20, 20,  50,  50], [-99, -3, -3, -99], '0.9', lw=0) # pass
+    >>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
+    >>> plt.axis([10, 100, -60, 3])
+    >>> plt.show()
+
+    """
+
+    _validate_gpass_gstop(gpass, gstop)
+
+    wp = atleast_1d(wp)
+    ws = atleast_1d(ws)
+    if fs is not None:
+        if analog:
+            raise ValueError("fs cannot be specified for an analog filter")
+        wp = 2*wp/fs
+        ws = 2*ws/fs
+
+    filter_type = 2 * (len(wp) - 1)
+    filter_type += 1
+    if wp[0] >= ws[0]:
+        filter_type += 1
+
+    # Pre-warp frequencies for digital filter design
+    if not analog:
+        passb = tan(pi * wp / 2.0)
+        stopb = tan(pi * ws / 2.0)
+    else:
+        passb = wp * 1.0
+        stopb = ws * 1.0
+
+    if filter_type == 1:            # low
+        nat = stopb / passb
+    elif filter_type == 2:          # high
+        nat = passb / stopb
+    elif filter_type == 3:          # stop
+        wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
+                                 args=(0, passb, stopb, gpass, gstop,
+                                       'butter'),
+                                 disp=0)
+        passb[0] = wp0
+        wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
+                                 args=(1, passb, stopb, gpass, gstop,
+                                       'butter'),
+                                 disp=0)
+        passb[1] = wp1
+        nat = ((stopb * (passb[0] - passb[1])) /
+               (stopb ** 2 - passb[0] * passb[1]))
+    elif filter_type == 4:          # pass
+        nat = ((stopb ** 2 - passb[0] * passb[1]) /
+               (stopb * (passb[0] - passb[1])))
+
+    nat = min(abs(nat))
+
+    GSTOP = 10 ** (0.1 * abs(gstop))
+    GPASS = 10 ** (0.1 * abs(gpass))
+    ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
+
+    # Find the Butterworth natural frequency WN (or the "3dB" frequency")
+    # to give exactly gpass at passb.
+    try:
+        W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
+    except ZeroDivisionError:
+        W0 = 1.0
+        warnings.warn("Order is zero...check input parameters.",
+                      RuntimeWarning, 2)
+
+    # now convert this frequency back from lowpass prototype
+    # to the original analog filter
+
+    if filter_type == 1:  # low
+        WN = W0 * passb
+    elif filter_type == 2:  # high
+        WN = passb / W0
+    elif filter_type == 3:  # stop
+        WN = numpy.empty(2, float)
+        discr = sqrt((passb[1] - passb[0]) ** 2 +
+                     4 * W0 ** 2 * passb[0] * passb[1])
+        WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
+        WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
+        WN = numpy.sort(abs(WN))
+    elif filter_type == 4:  # pass
+        W0 = numpy.array([-W0, W0], float)
+        WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
+              sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
+                   passb[0] * passb[1]))
+        WN = numpy.sort(abs(WN))
+    else:
+        raise ValueError("Bad type: %s" % filter_type)
+
+    if not analog:
+        wn = (2.0 / pi) * arctan(WN)
+    else:
+        wn = WN
+
+    if len(wn) == 1:
+        wn = wn[0]
+
+    if fs is not None:
+        wn = wn*fs/2
+
+    return ord, wn
+
+
+def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None):
+    """Chebyshev type I filter order selection.
+
+    Return the order of the lowest order digital or analog Chebyshev Type I
+    filter that loses no more than `gpass` dB in the passband and has at
+    least `gstop` dB attenuation in the stopband.
+
+    Parameters
+    ----------
+    wp, ws : float
+        Passband and stopband edge frequencies.
+
+        For digital filters, these are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
+        half-cycles / sample.)  For example:
+
+            - Lowpass:   wp = 0.2,          ws = 0.3
+            - Highpass:  wp = 0.3,          ws = 0.2
+            - Bandpass:  wp = [0.2, 0.5],   ws = [0.1, 0.6]
+            - Bandstop:  wp = [0.1, 0.6],   ws = [0.2, 0.5]
+
+        For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
+    gpass : float
+        The maximum loss in the passband (dB).
+    gstop : float
+        The minimum attenuation in the stopband (dB).
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    ord : int
+        The lowest order for a Chebyshev type I filter that meets specs.
+    wn : ndarray or float
+        The Chebyshev natural frequency (the "3dB frequency") for use with
+        `cheby1` to give filter results. If `fs` is specified,
+        this is in the same units, and `fs` must also be passed to `cheby1`.
+
+    See Also
+    --------
+    cheby1 : Filter design using order and critical points
+    buttord : Find order and critical points from passband and stopband spec
+    cheb2ord, ellipord
+    iirfilter : General filter design using order and critical frequencies
+    iirdesign : General filter design using passband and stopband spec
+
+    Examples
+    --------
+    Design a digital lowpass filter such that the passband is within 3 dB up
+    to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
+    frequency response, showing the passband and stopband constraints in gray.
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
+    >>> b, a = signal.cheby1(N, 3, Wn, 'low')
+    >>> w, h = signal.freqz(b, a)
+    >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
+    >>> plt.title('Chebyshev I lowpass filter fit to constraints')
+    >>> plt.xlabel('Normalized frequency')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
+    >>> plt.fill([0.3, 0.3,   2,   2], [ 9, -40, -40,  9], '0.9', lw=0) # pass
+    >>> plt.axis([0.08, 1, -60, 3])
+    >>> plt.show()
+
+    """
+
+    _validate_gpass_gstop(gpass, gstop)
+
+    wp = atleast_1d(wp)
+    ws = atleast_1d(ws)
+    if fs is not None:
+        if analog:
+            raise ValueError("fs cannot be specified for an analog filter")
+        wp = 2*wp/fs
+        ws = 2*ws/fs
+
+    filter_type = 2 * (len(wp) - 1)
+    if wp[0] < ws[0]:
+        filter_type += 1
+    else:
+        filter_type += 2
+
+    # Pre-warp frequencies for digital filter design
+    if not analog:
+        passb = tan(pi * wp / 2.0)
+        stopb = tan(pi * ws / 2.0)
+    else:
+        passb = wp * 1.0
+        stopb = ws * 1.0
+
+    if filter_type == 1:           # low
+        nat = stopb / passb
+    elif filter_type == 2:          # high
+        nat = passb / stopb
+    elif filter_type == 3:     # stop
+        wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
+                                 args=(0, passb, stopb, gpass, gstop, 'cheby'),
+                                 disp=0)
+        passb[0] = wp0
+        wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
+                                 args=(1, passb, stopb, gpass, gstop, 'cheby'),
+                                 disp=0)
+        passb[1] = wp1
+        nat = ((stopb * (passb[0] - passb[1])) /
+               (stopb ** 2 - passb[0] * passb[1]))
+    elif filter_type == 4:  # pass
+        nat = ((stopb ** 2 - passb[0] * passb[1]) /
+               (stopb * (passb[0] - passb[1])))
+
+    nat = min(abs(nat))
+
+    GSTOP = 10 ** (0.1 * abs(gstop))
+    GPASS = 10 ** (0.1 * abs(gpass))
+    ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
+                   arccosh(nat)))
+
+    # Natural frequencies are just the passband edges
+    if not analog:
+        wn = (2.0 / pi) * arctan(passb)
+    else:
+        wn = passb
+
+    if len(wn) == 1:
+        wn = wn[0]
+
+    if fs is not None:
+        wn = wn*fs/2
+
+    return ord, wn
+
+
+def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None):
+    """Chebyshev type II filter order selection.
+
+    Return the order of the lowest order digital or analog Chebyshev Type II
+    filter that loses no more than `gpass` dB in the passband and has at least
+    `gstop` dB attenuation in the stopband.
+
+    Parameters
+    ----------
+    wp, ws : float
+        Passband and stopband edge frequencies.
+
+        For digital filters, these are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
+        half-cycles / sample.)  For example:
+
+            - Lowpass:   wp = 0.2,          ws = 0.3
+            - Highpass:  wp = 0.3,          ws = 0.2
+            - Bandpass:  wp = [0.2, 0.5],   ws = [0.1, 0.6]
+            - Bandstop:  wp = [0.1, 0.6],   ws = [0.2, 0.5]
+
+        For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
+    gpass : float
+        The maximum loss in the passband (dB).
+    gstop : float
+        The minimum attenuation in the stopband (dB).
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    ord : int
+        The lowest order for a Chebyshev type II filter that meets specs.
+    wn : ndarray or float
+        The Chebyshev natural frequency (the "3dB frequency") for use with
+        `cheby2` to give filter results. If `fs` is specified,
+        this is in the same units, and `fs` must also be passed to `cheby2`.
+
+    See Also
+    --------
+    cheby2 : Filter design using order and critical points
+    buttord : Find order and critical points from passband and stopband spec
+    cheb1ord, ellipord
+    iirfilter : General filter design using order and critical frequencies
+    iirdesign : General filter design using passband and stopband spec
+
+    Examples
+    --------
+    Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
+    0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
+    0.6*(fs/2). Plot its frequency response, showing the passband and
+    stopband constraints in gray.
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
+    >>> b, a = signal.cheby2(N, 60, Wn, 'stop')
+    >>> w, h = signal.freqz(b, a)
+    >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
+    >>> plt.title('Chebyshev II bandstop filter fit to constraints')
+    >>> plt.xlabel('Normalized frequency')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.fill([.01, .1, .1, .01], [-3,  -3, -99, -99], '0.9', lw=0) # stop
+    >>> plt.fill([.2,  .2, .5,  .5], [ 9, -60, -60,   9], '0.9', lw=0) # pass
+    >>> plt.fill([.6,  .6,  2,   2], [-99, -3,  -3, -99], '0.9', lw=0) # stop
+    >>> plt.axis([0.06, 1, -80, 3])
+    >>> plt.show()
+
+    """
+
+    _validate_gpass_gstop(gpass, gstop)
+
+    wp = atleast_1d(wp)
+    ws = atleast_1d(ws)
+    if fs is not None:
+        if analog:
+            raise ValueError("fs cannot be specified for an analog filter")
+        wp = 2*wp/fs
+        ws = 2*ws/fs
+
+    filter_type = 2 * (len(wp) - 1)
+    if wp[0] < ws[0]:
+        filter_type += 1
+    else:
+        filter_type += 2
+
+    # Pre-warp frequencies for digital filter design
+    if not analog:
+        passb = tan(pi * wp / 2.0)
+        stopb = tan(pi * ws / 2.0)
+    else:
+        passb = wp * 1.0
+        stopb = ws * 1.0
+
+    if filter_type == 1:           # low
+        nat = stopb / passb
+    elif filter_type == 2:          # high
+        nat = passb / stopb
+    elif filter_type == 3:     # stop
+        wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
+                                 args=(0, passb, stopb, gpass, gstop, 'cheby'),
+                                 disp=0)
+        passb[0] = wp0
+        wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
+                                 args=(1, passb, stopb, gpass, gstop, 'cheby'),
+                                 disp=0)
+        passb[1] = wp1
+        nat = ((stopb * (passb[0] - passb[1])) /
+               (stopb ** 2 - passb[0] * passb[1]))
+    elif filter_type == 4:  # pass
+        nat = ((stopb ** 2 - passb[0] * passb[1]) /
+               (stopb * (passb[0] - passb[1])))
+
+    nat = min(abs(nat))
+
+    GSTOP = 10 ** (0.1 * abs(gstop))
+    GPASS = 10 ** (0.1 * abs(gpass))
+    ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
+                   arccosh(nat)))
+
+    # Find frequency where analog response is -gpass dB.
+    # Then convert back from low-pass prototype to the original filter.
+
+    new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
+    new_freq = 1.0 / new_freq
+
+    if filter_type == 1:
+        nat = passb / new_freq
+    elif filter_type == 2:
+        nat = passb * new_freq
+    elif filter_type == 3:
+        nat = numpy.empty(2, float)
+        nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
+                  sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
+                       passb[1] * passb[0]))
+        nat[1] = passb[1] * passb[0] / nat[0]
+    elif filter_type == 4:
+        nat = numpy.empty(2, float)
+        nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
+                  sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
+                       passb[1] * passb[0]))
+        nat[1] = passb[0] * passb[1] / nat[0]
+
+    if not analog:
+        wn = (2.0 / pi) * arctan(nat)
+    else:
+        wn = nat
+
+    if len(wn) == 1:
+        wn = wn[0]
+
+    if fs is not None:
+        wn = wn*fs/2
+
+    return ord, wn
+
+
+_POW10_LOG10 = np.log(10)
+
+
+def _pow10m1(x):
+    """10 ** x - 1 for x near 0"""
+    return np.expm1(_POW10_LOG10 * x)
+
+
+def ellipord(wp, ws, gpass, gstop, analog=False, fs=None):
+    """Elliptic (Cauer) filter order selection.
+
+    Return the order of the lowest order digital or analog elliptic filter
+    that loses no more than `gpass` dB in the passband and has at least
+    `gstop` dB attenuation in the stopband.
+
+    Parameters
+    ----------
+    wp, ws : float
+        Passband and stopband edge frequencies.
+
+        For digital filters, these are in the same units as `fs`. By default,
+        `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1,
+        where 1 is the Nyquist frequency. (`wp` and `ws` are thus in
+        half-cycles / sample.) For example:
+
+            - Lowpass:   wp = 0.2,          ws = 0.3
+            - Highpass:  wp = 0.3,          ws = 0.2
+            - Bandpass:  wp = [0.2, 0.5],   ws = [0.1, 0.6]
+            - Bandstop:  wp = [0.1, 0.6],   ws = [0.2, 0.5]
+
+        For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s).
+    gpass : float
+        The maximum loss in the passband (dB).
+    gstop : float
+        The minimum attenuation in the stopband (dB).
+    analog : bool, optional
+        When True, return an analog filter, otherwise a digital filter is
+        returned.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    ord : int
+        The lowest order for an Elliptic (Cauer) filter that meets specs.
+    wn : ndarray or float
+        The Chebyshev natural frequency (the "3dB frequency") for use with
+        `ellip` to give filter results. If `fs` is specified,
+        this is in the same units, and `fs` must also be passed to `ellip`.
+
+    See Also
+    --------
+    ellip : Filter design using order and critical points
+    buttord : Find order and critical points from passband and stopband spec
+    cheb1ord, cheb2ord
+    iirfilter : General filter design using order and critical frequencies
+    iirdesign : General filter design using passband and stopband spec
+
+    Examples
+    --------
+    Design an analog highpass filter such that the passband is within 3 dB
+    above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
+    frequency response, showing the passband and stopband constraints in gray.
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
+    >>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
+    >>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
+    >>> plt.semilogx(w, 20 * np.log10(abs(h)))
+    >>> plt.title('Elliptical highpass filter fit to constraints')
+    >>> plt.xlabel('Frequency [radians / second]')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.fill([.1, 10,  10,  .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
+    >>> plt.fill([30, 30, 1e9, 1e9], [-99,  -3,  -3, -99], '0.9', lw=0) # pass
+    >>> plt.axis([1, 300, -80, 3])
+    >>> plt.show()
+
+    """
+
+    _validate_gpass_gstop(gpass, gstop)
+
+    wp = atleast_1d(wp)
+    ws = atleast_1d(ws)
+    if fs is not None:
+        if analog:
+            raise ValueError("fs cannot be specified for an analog filter")
+        wp = 2*wp/fs
+        ws = 2*ws/fs
+
+    filter_type = 2 * (len(wp) - 1)
+    filter_type += 1
+    if wp[0] >= ws[0]:
+        filter_type += 1
+
+    # Pre-warp frequencies for digital filter design
+    if not analog:
+        passb = tan(pi * wp / 2.0)
+        stopb = tan(pi * ws / 2.0)
+    else:
+        passb = wp * 1.0
+        stopb = ws * 1.0
+
+    if filter_type == 1:           # low
+        nat = stopb / passb
+    elif filter_type == 2:          # high
+        nat = passb / stopb
+    elif filter_type == 3:     # stop
+        wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
+                                 args=(0, passb, stopb, gpass, gstop, 'ellip'),
+                                 disp=0)
+        passb[0] = wp0
+        wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
+                                 args=(1, passb, stopb, gpass, gstop, 'ellip'),
+                                 disp=0)
+        passb[1] = wp1
+        nat = ((stopb * (passb[0] - passb[1])) /
+               (stopb ** 2 - passb[0] * passb[1]))
+    elif filter_type == 4:  # pass
+        nat = ((stopb ** 2 - passb[0] * passb[1]) /
+               (stopb * (passb[0] - passb[1])))
+
+    nat = min(abs(nat))
+
+    arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop)
+    arg0 = 1.0 / nat
+    d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2)
+    d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq)
+    ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
+
+    if not analog:
+        wn = arctan(passb) * 2.0 / pi
+    else:
+        wn = passb
+
+    if len(wn) == 1:
+        wn = wn[0]
+
+    if fs is not None:
+        wn = wn*fs/2
+
+    return ord, wn
+
+
+def buttap(N):
+    """Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
+
+    The filter will have an angular (e.g., rad/s) cutoff frequency of 1.
+
+    See Also
+    --------
+    butter : Filter design function using this prototype
+
+    """
+    if abs(int(N)) != N:
+        raise ValueError("Filter order must be a nonnegative integer")
+    z = numpy.array([])
+    m = numpy.arange(-N+1, N, 2)
+    # Middle value is 0 to ensure an exactly real pole
+    p = -numpy.exp(1j * pi * m / (2 * N))
+    k = 1
+    return z, p, k
+
+
+def cheb1ap(N, rp):
+    """
+    Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
+
+    The returned filter prototype has `rp` decibels of ripple in the passband.
+
+    The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
+    defined as the point at which the gain first drops below ``-rp``.
+
+    See Also
+    --------
+    cheby1 : Filter design function using this prototype
+
+    """
+    if abs(int(N)) != N:
+        raise ValueError("Filter order must be a nonnegative integer")
+    elif N == 0:
+        # Avoid divide-by-zero error
+        # Even order filters have DC gain of -rp dB
+        return numpy.array([]), numpy.array([]), 10**(-rp/20)
+    z = numpy.array([])
+
+    # Ripple factor (epsilon)
+    eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
+    mu = 1.0 / N * arcsinh(1 / eps)
+
+    # Arrange poles in an ellipse on the left half of the S-plane
+    m = numpy.arange(-N+1, N, 2)
+    theta = pi * m / (2*N)
+    p = -sinh(mu + 1j*theta)
+
+    k = numpy.prod(-p, axis=0).real
+    if N % 2 == 0:
+        k = k / sqrt((1 + eps * eps))
+
+    return z, p, k
+
+
+def cheb2ap(N, rs):
+    """
+    Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
+
+    The returned filter prototype has `rs` decibels of ripple in the stopband.
+
+    The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
+    defined as the point at which the gain first reaches ``-rs``.
+
+    See Also
+    --------
+    cheby2 : Filter design function using this prototype
+
+    """
+    if abs(int(N)) != N:
+        raise ValueError("Filter order must be a nonnegative integer")
+    elif N == 0:
+        # Avoid divide-by-zero warning
+        return numpy.array([]), numpy.array([]), 1
+
+    # Ripple factor (epsilon)
+    de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
+    mu = arcsinh(1.0 / de) / N
+
+    if N % 2:
+        m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
+                               numpy.arange(2, N, 2)))
+    else:
+        m = numpy.arange(-N+1, N, 2)
+
+    z = -conjugate(1j / sin(m * pi / (2.0 * N)))
+
+    # Poles around the unit circle like Butterworth
+    p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
+    # Warp into Chebyshev II
+    p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
+    p = 1.0 / p
+
+    k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
+    return z, p, k
+
+
+EPSILON = 2e-16
+
+# number of terms in solving degree equation
+_ELLIPDEG_MMAX = 7
+
+
+def _ellipdeg(n, m1):
+    """Solve degree equation using nomes
+
+    Given n, m1, solve
+       n * K(m) / K'(m) = K1(m1) / K1'(m1)
+    for m
+
+    See [1], Eq. (49)
+
+    References
+    ----------
+    .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
+           https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
+    """
+    K1 = special.ellipk(m1)
+    K1p = special.ellipkm1(m1)
+
+    q1 = np.exp(-np.pi * K1p / K1)
+    q = q1 ** (1/n)
+
+    mnum = np.arange(_ELLIPDEG_MMAX + 1)
+    mden = np.arange(1, _ELLIPDEG_MMAX + 2)
+
+    num = np.sum(q ** (mnum * (mnum+1)))
+    den = 1 + 2 * np.sum(q ** (mden**2))
+
+    return 16 * q * (num / den) ** 4
+
+
+# Maximum number of iterations in Landen transformation recursion
+# sequence.  10 is conservative; unit tests pass with 4, Orfanidis
+# (see _arc_jac_cn [1]) suggests 5.
+_ARC_JAC_SN_MAXITER = 10
+
+
+def _arc_jac_sn(w, m):
+    """Inverse Jacobian elliptic sn
+
+    Solve for z in w = sn(z, m)
+
+    Parameters
+    ----------
+    w : complex scalar
+        argument
+
+    m : scalar
+        modulus; in interval [0, 1]
+
+
+    See [1], Eq. (56)
+
+    References
+    ----------
+    .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design",
+           https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
+
+    """
+
+    def _complement(kx):
+        # (1-k**2) ** 0.5; the expression below
+        # works for small kx
+        return ((1 - kx) * (1 + kx)) ** 0.5
+
+    k = m ** 0.5
+
+    if k > 1:
+        return np.nan
+    elif k == 1:
+        return np.arctanh(w)
+
+    ks = [k]
+    niter = 0
+    while ks[-1] != 0:
+        k_ = ks[-1]
+        k_p = _complement(k_)
+        ks.append((1 - k_p) / (1 + k_p))
+        niter += 1
+        if niter > _ARC_JAC_SN_MAXITER:
+            raise ValueError('Landen transformation not converging')
+
+    K = np.product(1 + np.array(ks[1:])) * np.pi/2
+
+    wns = [w]
+
+    for kn, knext in zip(ks[:-1], ks[1:]):
+        wn = wns[-1]
+        wnext = (2 * wn /
+                 ((1 + knext) * (1 + _complement(kn * wn))))
+        wns.append(wnext)
+
+    u = 2 / np.pi * np.arcsin(wns[-1])
+
+    z = K * u
+    return z
+
+
+def _arc_jac_sc1(w, m):
+    """Real inverse Jacobian sc, with complementary modulus
+
+    Solve for z in w = sc(z, 1-m)
+
+    w - real scalar
+
+    m - modulus
+
+    From [1], sc(z, m) = -i * sn(i * z, 1 - m)
+
+    References
+    ----------
+    # noqa: E501
+    .. [1] https://functions.wolfram.com/EllipticFunctions/JacobiSC/introductions/JacobiPQs/ShowAll.html,
+       "Representations through other Jacobi functions"
+
+    """
+
+    zcomplex = _arc_jac_sn(1j * w, m)
+    if abs(zcomplex.real) > 1e-14:
+        raise ValueError
+
+    return zcomplex.imag
+
+
+def ellipap(N, rp, rs):
+    """Return (z,p,k) of Nth-order elliptic analog lowpass filter.
+
+    The filter is a normalized prototype that has `rp` decibels of ripple
+    in the passband and a stopband `rs` decibels down.
+
+    The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1,
+    defined as the point at which the gain first drops below ``-rp``.
+
+    See Also
+    --------
+    ellip : Filter design function using this prototype
+
+    References
+    ----------
+    .. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
+           Chapters 5 and 12.
+
+    .. [2] Orfanidis, "Lecture Notes on Elliptic Filter Design",
+           https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf
+
+    """
+    if abs(int(N)) != N:
+        raise ValueError("Filter order must be a nonnegative integer")
+    elif N == 0:
+        # Avoid divide-by-zero warning
+        # Even order filters have DC gain of -rp dB
+        return numpy.array([]), numpy.array([]), 10**(-rp/20)
+    elif N == 1:
+        p = -sqrt(1.0 / _pow10m1(0.1 * rp))
+        k = -p
+        z = []
+        return asarray(z), asarray(p), k
+
+    eps_sq = _pow10m1(0.1 * rp)
+
+    eps = np.sqrt(eps_sq)
+    ck1_sq = eps_sq / _pow10m1(0.1 * rs)
+    if ck1_sq == 0:
+        raise ValueError("Cannot design a filter with given rp and rs"
+                         " specifications.")
+
+    val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq)
+
+    m = _ellipdeg(N, ck1_sq)
+
+    capk = special.ellipk(m)
+
+    j = numpy.arange(1 - N % 2, N, 2)
+    jj = len(j)
+
+    [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
+    snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
+    z = 1.0 / (sqrt(m) * snew)
+    z = 1j * z
+    z = numpy.concatenate((z, conjugate(z)))
+
+    r = _arc_jac_sc1(1. / eps, ck1_sq)
+    v0 = capk * r / (N * val[0])
+
+    [sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
+    p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
+
+    if N % 2:
+        newp = numpy.compress(abs(p.imag) > EPSILON *
+                              numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
+                                                   axis=0).real),
+                              p, axis=-1)
+        p = numpy.concatenate((p, conjugate(newp)))
+    else:
+        p = numpy.concatenate((p, conjugate(p)))
+
+    k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
+    if N % 2 == 0:
+        k = k / numpy.sqrt((1 + eps_sq))
+
+    return z, p, k
+
+
+# TODO: Make this a real public function scipy.misc.ff
+def _falling_factorial(x, n):
+    r"""
+    Return the factorial of `x` to the `n` falling.
+
+    This is defined as:
+
+    .. math::   x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
+
+    This can more efficiently calculate ratios of factorials, since:
+
+    n!/m! == falling_factorial(n, n-m)
+
+    where n >= m
+
+    skipping the factors that cancel out
+
+    the usual factorial n! == ff(n, n)
+    """
+    val = 1
+    for k in range(x - n + 1, x + 1):
+        val *= k
+    return val
+
+
+def _bessel_poly(n, reverse=False):
+    """
+    Return the coefficients of Bessel polynomial of degree `n`
+
+    If `reverse` is true, a reverse Bessel polynomial is output.
+
+    Output is a list of coefficients:
+    [1]                   = 1
+    [1,  1]               = 1*s   +  1
+    [1,  3,  3]           = 1*s^2 +  3*s   +  3
+    [1,  6, 15, 15]       = 1*s^3 +  6*s^2 + 15*s   +  15
+    [1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
+    etc.
+
+    Output is a Python list of arbitrary precision long ints, so n is only
+    limited by your hardware's memory.
+
+    Sequence is http://oeis.org/A001498, and output can be confirmed to
+    match http://oeis.org/A001498/b001498.txt :
+
+    >>> i = 0
+    >>> for n in range(51):
+    ...     for x in _bessel_poly(n, reverse=True):
+    ...         print(i, x)
+    ...         i += 1
+
+    """
+    if abs(int(n)) != n:
+        raise ValueError("Polynomial order must be a nonnegative integer")
+    else:
+        n = int(n)  # np.int32 doesn't work, for instance
+
+    out = []
+    for k in range(n + 1):
+        num = _falling_factorial(2*n - k, n)
+        den = 2**(n - k) * math.factorial(k)
+        out.append(num // den)
+
+    if reverse:
+        return out[::-1]
+    else:
+        return out
+
+
+def _campos_zeros(n):
+    """
+    Return approximate zero locations of Bessel polynomials y_n(x) for order
+    `n` using polynomial fit (Campos-Calderon 2011)
+    """
+    if n == 1:
+        return asarray([-1+0j])
+
+    s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
+    b3 = npp_polyval(n, [16, -8]) / s
+    b2 = npp_polyval(n, [-24, -12, 12]) / s
+    b1 = npp_polyval(n, [8, 24, -12, -2]) / s
+    b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
+
+    r = npp_polyval(n, [0, 0, 2, 1])
+    a1 = npp_polyval(n, [-6, -6]) / r
+    a2 = 6 / r
+
+    k = np.arange(1, n+1)
+    x = npp_polyval(k, [0, a1, a2])
+    y = npp_polyval(k, [b0, b1, b2, b3])
+
+    return x + 1j*y
+
+
+def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
+    """
+    Given a function `f`, its first derivative `fp`, and a set of initial
+    guesses `x0`, simultaneously find the roots of the polynomial using the
+    Aberth-Ehrlich method.
+
+    ``len(x0)`` should equal the number of roots of `f`.
+
+    (This is not a complete implementation of Bini's algorithm.)
+    """
+
+    N = len(x0)
+
+    x = array(x0, complex)
+    beta = np.empty_like(x0)
+
+    for iteration in range(maxiter):
+        alpha = -f(x) / fp(x)  # Newton's method
+
+        # Model "repulsion" between zeros
+        for k in range(N):
+            beta[k] = np.sum(1/(x[k] - x[k+1:]))
+            beta[k] += np.sum(1/(x[k] - x[:k]))
+
+        x += alpha / (1 + alpha * beta)
+
+        if not all(np.isfinite(x)):
+            raise RuntimeError('Root-finding calculation failed')
+
+        # Mekwi: The iterative process can be stopped when |hn| has become
+        # less than the largest error one is willing to permit in the root.
+        if all(abs(alpha) <= tol):
+            break
+    else:
+        raise Exception('Zeros failed to converge')
+
+    return x
+
+
+def _bessel_zeros(N):
+    """
+    Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
+    modified Bessel function of the second kind
+    """
+    if N == 0:
+        return asarray([])
+
+    # Generate starting points
+    x0 = _campos_zeros(N)
+
+    # Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
+    # Bessel polynomial y_N(x)
+    def f(x):
+        return special.kve(N+0.5, 1/x)
+
+    # First derivative of above
+    def fp(x):
+        return (special.kve(N-0.5, 1/x)/(2*x**2) -
+                special.kve(N+0.5, 1/x)/(x**2) +
+                special.kve(N+1.5, 1/x)/(2*x**2))
+
+    # Starting points converge to true zeros
+    x = _aberth(f, fp, x0)
+
+    # Improve precision using Newton's method on each
+    for i in range(len(x)):
+        x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
+
+    # Average complex conjugates to make them exactly symmetrical
+    x = np.mean((x, x[::-1].conj()), 0)
+
+    # Zeros should sum to -1
+    if abs(np.sum(x) + 1) > 1e-15:
+        raise RuntimeError('Generated zeros are inaccurate')
+
+    return x
+
+
+def _norm_factor(p, k):
+    """
+    Numerically find frequency shift to apply to delay-normalized filter such
+    that -3 dB point is at 1 rad/sec.
+
+    `p` is an array_like of polynomial poles
+    `k` is a float gain
+
+    First 10 values are listed in "Bessel Scale Factors" table,
+    "Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
+    """
+    p = asarray(p, dtype=complex)
+
+    def G(w):
+        """
+        Gain of filter
+        """
+        return abs(k / prod(1j*w - p))
+
+    def cutoff(w):
+        """
+        When gain = -3 dB, return 0
+        """
+        return G(w) - 1/np.sqrt(2)
+
+    return optimize.newton(cutoff, 1.5)
+
+
+def besselap(N, norm='phase'):
+    """
+    Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
+
+    Parameters
+    ----------
+    N : int
+        The order of the filter.
+    norm : {'phase', 'delay', 'mag'}, optional
+        Frequency normalization:
+
+        ``phase``
+            The filter is normalized such that the phase response reaches its
+            midpoint at an angular (e.g., rad/s) cutoff frequency of 1. This
+            happens for both low-pass and high-pass filters, so this is the
+            "phase-matched" case. [6]_
+
+            The magnitude response asymptotes are the same as a Butterworth
+            filter of the same order with a cutoff of `Wn`.
+
+            This is the default, and matches MATLAB's implementation.
+
+        ``delay``
+            The filter is normalized such that the group delay in the passband
+            is 1 (e.g., 1 second). This is the "natural" type obtained by
+            solving Bessel polynomials
+
+        ``mag``
+            The filter is normalized such that the gain magnitude is -3 dB at
+            angular frequency 1. This is called "frequency normalization" by
+            Bond. [1]_
+
+        .. versionadded:: 0.18.0
+
+    Returns
+    -------
+    z : ndarray
+        Zeros of the transfer function. Is always an empty array.
+    p : ndarray
+        Poles of the transfer function.
+    k : scalar
+        Gain of the transfer function. For phase-normalized, this is always 1.
+
+    See Also
+    --------
+    bessel : Filter design function using this prototype
+
+    Notes
+    -----
+    To find the pole locations, approximate starting points are generated [2]_
+    for the zeros of the ordinary Bessel polynomial [3]_, then the
+    Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
+    calculate more accurate zeros, and these locations are then inverted about
+    the unit circle.
+
+    References
+    ----------
+    .. [1] C.R. Bond, "Bessel Filter Constants",
+           http://www.crbond.com/papers/bsf.pdf
+    .. [2] Campos and Calderon, "Approximate closed-form formulas for the
+           zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
+    .. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
+           Characteristics", Proceedings of the Institution of Electrical
+           Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
+    .. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
+           Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
+           April 1973
+    .. [5] Ehrlich, "A modified Newton method for polynomials", Communications
+           of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
+           :DOI:`10.1145/363067.363115`
+    .. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
+           Others", RaneNote 147, 1998,
+           https://www.ranecommercial.com/legacy/note147.html
+
+    """
+    if abs(int(N)) != N:
+        raise ValueError("Filter order must be a nonnegative integer")
+
+    N = int(N)  # calculation below doesn't always fit in np.int64
+    if N == 0:
+        p = []
+        k = 1
+    else:
+        # Find roots of reverse Bessel polynomial
+        p = 1/_bessel_zeros(N)
+
+        a_last = _falling_factorial(2*N, N) // 2**N
+
+        # Shift them to a different normalization if required
+        if norm in ('delay', 'mag'):
+            # Normalized for group delay of 1
+            k = a_last
+            if norm == 'mag':
+                # -3 dB magnitude point is at 1 rad/sec
+                norm_factor = _norm_factor(p, k)
+                p /= norm_factor
+                k = norm_factor**-N * a_last
+        elif norm == 'phase':
+            # Phase-matched (1/2 max phase shift at 1 rad/sec)
+            # Asymptotes are same as Butterworth filter
+            p *= 10**(-math.log10(a_last)/N)
+            k = 1
+        else:
+            raise ValueError('normalization not understood')
+
+    return asarray([]), asarray(p, dtype=complex), float(k)
+
+
+def iirnotch(w0, Q, fs=2.0):
+    """
+    Design second-order IIR notch digital filter.
+
+    A notch filter is a band-stop filter with a narrow bandwidth
+    (high quality factor). It rejects a narrow frequency band and
+    leaves the rest of the spectrum little changed.
+
+    Parameters
+    ----------
+    w0 : float
+        Frequency to remove from a signal. If `fs` is specified, this is in
+        the same units as `fs`. By default, it is a normalized scalar that must
+        satisfy  ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
+        sampling frequency.
+    Q : float
+        Quality factor. Dimensionless parameter that characterizes
+        notch filter -3 dB bandwidth ``bw`` relative to its center
+        frequency, ``Q = w0/bw``.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (``b``) and denominator (``a``) polynomials
+        of the IIR filter.
+
+    See Also
+    --------
+    iirpeak
+
+    Notes
+    -----
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
+           Prentice-Hall, 1996
+
+    Examples
+    --------
+    Design and plot filter to remove the 60 Hz component from a
+    signal sampled at 200 Hz, using a quality factor Q = 30
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> fs = 200.0  # Sample frequency (Hz)
+    >>> f0 = 60.0  # Frequency to be removed from signal (Hz)
+    >>> Q = 30.0  # Quality factor
+    >>> # Design notch filter
+    >>> b, a = signal.iirnotch(f0, Q, fs)
+
+    >>> # Frequency response
+    >>> freq, h = signal.freqz(b, a, fs=fs)
+    >>> # Plot
+    >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
+    >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
+    >>> ax[0].set_title("Frequency Response")
+    >>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
+    >>> ax[0].set_xlim([0, 100])
+    >>> ax[0].set_ylim([-25, 10])
+    >>> ax[0].grid(True)
+    >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
+    >>> ax[1].set_ylabel("Angle (degrees)", color='green')
+    >>> ax[1].set_xlabel("Frequency (Hz)")
+    >>> ax[1].set_xlim([0, 100])
+    >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
+    >>> ax[1].set_ylim([-90, 90])
+    >>> ax[1].grid(True)
+    >>> plt.show()
+    """
+
+    return _design_notch_peak_filter(w0, Q, "notch", fs)
+
+
+def iirpeak(w0, Q, fs=2.0):
+    """
+    Design second-order IIR peak (resonant) digital filter.
+
+    A peak filter is a band-pass filter with a narrow bandwidth
+    (high quality factor). It rejects components outside a narrow
+    frequency band.
+
+    Parameters
+    ----------
+    w0 : float
+        Frequency to be retained in a signal. If `fs` is specified, this is in
+        the same units as `fs`. By default, it is a normalized scalar that must
+        satisfy  ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the
+        sampling frequency.
+    Q : float
+        Quality factor. Dimensionless parameter that characterizes
+        peak filter -3 dB bandwidth ``bw`` relative to its center
+        frequency, ``Q = w0/bw``.
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (``b``) and denominator (``a``) polynomials
+        of the IIR filter.
+
+    See Also
+    --------
+    iirnotch
+
+    Notes
+    -----
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
+           Prentice-Hall, 1996
+
+    Examples
+    --------
+    Design and plot filter to remove the frequencies other than the 300 Hz
+    component from a signal sampled at 1000 Hz, using a quality factor Q = 30
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> fs = 1000.0  # Sample frequency (Hz)
+    >>> f0 = 300.0  # Frequency to be retained (Hz)
+    >>> Q = 30.0  # Quality factor
+    >>> # Design peak filter
+    >>> b, a = signal.iirpeak(f0, Q, fs)
+
+    >>> # Frequency response
+    >>> freq, h = signal.freqz(b, a, fs=fs)
+    >>> # Plot
+    >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
+    >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
+    >>> ax[0].set_title("Frequency Response")
+    >>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
+    >>> ax[0].set_xlim([0, 500])
+    >>> ax[0].set_ylim([-50, 10])
+    >>> ax[0].grid(True)
+    >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
+    >>> ax[1].set_ylabel("Angle (degrees)", color='green')
+    >>> ax[1].set_xlabel("Frequency (Hz)")
+    >>> ax[1].set_xlim([0, 500])
+    >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
+    >>> ax[1].set_ylim([-90, 90])
+    >>> ax[1].grid(True)
+    >>> plt.show()
+    """
+
+    return _design_notch_peak_filter(w0, Q, "peak", fs)
+
+
+def _design_notch_peak_filter(w0, Q, ftype, fs=2.0):
+    """
+    Design notch or peak digital filter.
+
+    Parameters
+    ----------
+    w0 : float
+        Normalized frequency to remove from a signal. If `fs` is specified,
+        this is in the same units as `fs`. By default, it is a normalized
+        scalar that must satisfy  ``0 < w0 < 1``, with ``w0 = 1``
+        corresponding to half of the sampling frequency.
+    Q : float
+        Quality factor. Dimensionless parameter that characterizes
+        notch filter -3 dB bandwidth ``bw`` relative to its center
+        frequency, ``Q = w0/bw``.
+    ftype : str
+        The type of IIR filter to design:
+
+            - notch filter : ``notch``
+            - peak filter  : ``peak``
+    fs : float, optional
+        The sampling frequency of the digital system.
+
+        .. versionadded:: 1.2.0:
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (``b``) and denominator (``a``) polynomials
+        of the IIR filter.
+    """
+
+    # Guarantee that the inputs are floats
+    w0 = float(w0)
+    Q = float(Q)
+    w0 = 2*w0/fs
+
+    # Checks if w0 is within the range
+    if w0 > 1.0 or w0 < 0.0:
+        raise ValueError("w0 should be such that 0 < w0 < 1")
+
+    # Get bandwidth
+    bw = w0/Q
+
+    # Normalize inputs
+    bw = bw*np.pi
+    w0 = w0*np.pi
+
+    # Compute -3dB attenuation
+    gb = 1/np.sqrt(2)
+
+    if ftype == "notch":
+        # Compute beta: formula 11.3.4 (p.575) from reference [1]
+        beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
+    elif ftype == "peak":
+        # Compute beta: formula 11.3.19 (p.579) from reference [1]
+        beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
+    else:
+        raise ValueError("Unknown ftype.")
+
+    # Compute gain: formula 11.3.6 (p.575) from reference [1]
+    gain = 1.0/(1.0+beta)
+
+    # Compute numerator b and denominator a
+    # formulas 11.3.7 (p.575) and 11.3.21 (p.579)
+    # from reference [1]
+    if ftype == "notch":
+        b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
+    else:
+        b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
+    a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
+
+    return b, a
+
+
+def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False):
+    """
+    Design IIR notching or peaking digital comb filter.
+
+    A notching comb filter consists of regularly-spaced band-stop filters with
+    a narrow bandwidth (high quality factor). Each rejects a narrow frequency
+    band and leaves the rest of the spectrum little changed.
+
+    A peaking comb filter consists of regularly-spaced band-pass filters with
+    a narrow bandwidth (high quality factor). Each rejects components outside
+    a narrow frequency band.
+
+    Parameters
+    ----------
+    w0 : float
+        The fundamental frequency of the comb filter (the spacing between its
+        peaks). This must evenly divide the sampling frequency. If `fs` is
+        specified, this is in the same units as `fs`. By default, it is
+        a normalized scalar that must satisfy  ``0 < w0 < 1``, with
+        ``w0 = 1`` corresponding to half of the sampling frequency.
+    Q : float
+        Quality factor. Dimensionless parameter that characterizes
+        notch filter -3 dB bandwidth ``bw`` relative to its center
+        frequency, ``Q = w0/bw``.
+    ftype : {'notch', 'peak'}
+        The type of comb filter generated by the function. If 'notch', then
+        the Q factor applies to the notches. If 'peak', then the Q factor
+        applies to the peaks.  Default is 'notch'.
+    fs : float, optional
+        The sampling frequency of the signal. Default is 2.0.
+    pass_zero : bool, optional
+        If False (default), the notches (nulls) of the filter are centered on
+        frequencies [0, w0, 2*w0, ...], and the peaks are centered on the
+        midpoints [w0/2, 3*w0/2, 5*w0/2, ...].  If True, the peaks are centered
+        on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa.
+
+        .. versionadded:: 1.9.0
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (``b``) and denominator (``a``) polynomials
+        of the IIR filter.
+
+    Raises
+    ------
+    ValueError
+        If `w0` is less than or equal to 0 or greater than or equal to
+        ``fs/2``, if `fs` is not divisible by `w0`, if `ftype`
+        is not 'notch' or 'peak'
+
+    See Also
+    --------
+    iirnotch
+    iirpeak
+
+    Notes
+    -----
+    For implementation details, see [1]_. The TF implementation of the
+    comb filter is numerically stable even at higher orders due to the
+    use of a single repeated pole, which won't suffer from precision loss.
+
+    References
+    ----------
+    .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
+           Prentice-Hall, 1996, ch. 11, "Digital Filter Design"
+
+    Examples
+    --------
+    Design and plot notching comb filter at 20 Hz for a
+    signal sampled at 200 Hz, using quality factor Q = 30
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> fs = 200.0  # Sample frequency (Hz)
+    >>> f0 = 20.0  # Frequency to be removed from signal (Hz)
+    >>> Q = 30.0  # Quality factor
+    >>> # Design notching comb filter
+    >>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs)
+
+    >>> # Frequency response
+    >>> freq, h = signal.freqz(b, a, fs=fs)
+    >>> response = abs(h)
+    >>> # To avoid divide by zero when graphing
+    >>> response[response == 0] = 1e-20
+    >>> # Plot
+    >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
+    >>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue')
+    >>> ax[0].set_title("Frequency Response")
+    >>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
+    >>> ax[0].set_xlim([0, 100])
+    >>> ax[0].set_ylim([-30, 10])
+    >>> ax[0].grid(True)
+    >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
+    >>> ax[1].set_ylabel("Angle (degrees)", color='green')
+    >>> ax[1].set_xlabel("Frequency (Hz)")
+    >>> ax[1].set_xlim([0, 100])
+    >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
+    >>> ax[1].set_ylim([-90, 90])
+    >>> ax[1].grid(True)
+    >>> plt.show()
+
+    Design and plot peaking comb filter at 250 Hz for a
+    signal sampled at 1000 Hz, using quality factor Q = 30
+
+    >>> fs = 1000.0  # Sample frequency (Hz)
+    >>> f0 = 250.0  # Frequency to be retained (Hz)
+    >>> Q = 30.0  # Quality factor
+    >>> # Design peaking filter
+    >>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True)
+
+    >>> # Frequency response
+    >>> freq, h = signal.freqz(b, a, fs=fs)
+    >>> response = abs(h)
+    >>> # To avoid divide by zero when graphing
+    >>> response[response == 0] = 1e-20
+    >>> # Plot
+    >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
+    >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
+    >>> ax[0].set_title("Frequency Response")
+    >>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
+    >>> ax[0].set_xlim([0, 500])
+    >>> ax[0].set_ylim([-80, 10])
+    >>> ax[0].grid(True)
+    >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
+    >>> ax[1].set_ylabel("Angle (degrees)", color='green')
+    >>> ax[1].set_xlabel("Frequency (Hz)")
+    >>> ax[1].set_xlim([0, 500])
+    >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
+    >>> ax[1].set_ylim([-90, 90])
+    >>> ax[1].grid(True)
+    >>> plt.show()
+    """
+
+    # Convert w0, Q, and fs to float
+    w0 = float(w0)
+    Q = float(Q)
+    fs = float(fs)
+
+    # Check for invalid cutoff frequency or filter type
+    ftype = ftype.lower()
+    if not 0 < w0 < fs / 2:
+        raise ValueError("w0 must be between 0 and {}"
+                         " (nyquist), but given {}.".format(fs / 2, w0))
+    if ftype not in ('notch', 'peak'):
+        raise ValueError('ftype must be either notch or peak.')
+
+    # Compute the order of the filter
+    N = round(fs / w0)
+
+    # Check for cutoff frequency divisibility
+    if abs(w0 - fs/N)/fs > 1e-14:
+        raise ValueError('fs must be divisible by w0.')
+
+    # Compute frequency in radians and filter bandwidth
+    # Eq. 11.3.1 (p. 574) from reference [1]
+    w0 = (2 * np.pi * w0) / fs
+    w_delta = w0 / Q
+
+    # Define base gain values depending on notch or peak filter
+    # Compute -3dB attenuation
+    # Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1]
+    if ftype == 'notch':
+        G0, G = 1, 0
+    elif ftype == 'peak':
+        G0, G = 0, 1
+    GB = 1 / np.sqrt(2)
+
+    # Compute beta
+    # Eq. 11.5.3 (p. 591) from reference [1]
+    beta = np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) * np.tan(N * w_delta / 4)
+
+    # Compute filter coefficients
+    # Eq 11.5.1 (p. 590) variables a, b, c from reference [1]
+    ax = (1 - beta) / (1 + beta)
+    bx = (G0 + G * beta) / (1 + beta)
+    cx = (G0 - G * beta) / (1 + beta)
+
+    # Last coefficients are negative to get peaking comb that passes zero or
+    # notching comb that doesn't.
+    negative_coef = ((ftype == 'peak' and pass_zero) or
+                     (ftype == 'notch' and not pass_zero))
+
+    # Compute numerator coefficients
+    # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
+    # b - cz^-N or b + cz^-N
+    b = np.zeros(N + 1)
+    b[0] = bx
+    if negative_coef:
+        b[-1] = -cx
+    else:
+        b[-1] = +cx
+
+    # Compute denominator coefficients
+    # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
+    # 1 - az^-N or 1 + az^-N
+    a = np.zeros(N + 1)
+    a[0] = 1
+    if negative_coef:
+        a[-1] = -ax
+    else:
+        a[-1] = +ax
+
+    return b, a
+
+
+def _hz_to_erb(hz):
+    """
+    Utility for converting from frequency (Hz) to the
+    Equivalent Rectangular Bandwidth (ERB) scale
+    ERB = frequency / EarQ + minBW
+    """
+    EarQ = 9.26449
+    minBW = 24.7
+    return hz / EarQ + minBW
+
+
+def gammatone(freq, ftype, order=None, numtaps=None, fs=None):
+    """
+    Gammatone filter design.
+
+    This function computes the coefficients of an FIR or IIR gammatone
+    digital filter [1]_.
+
+    Parameters
+    ----------
+    freq : float
+        Center frequency of the filter (expressed in the same units
+        as `fs`).
+    ftype : {'fir', 'iir'}
+        The type of filter the function generates. If 'fir', the function
+        will generate an Nth order FIR gammatone filter. If 'iir', the
+        function will generate an 8th order digital IIR filter, modeled as
+        as 4th order gammatone filter.
+    order : int, optional
+        The order of the filter. Only used when ``ftype='fir'``.
+        Default is 4 to model the human auditory system. Must be between
+        0 and 24.
+    numtaps : int, optional
+        Length of the filter. Only used when ``ftype='fir'``.
+        Default is ``fs*0.015`` if `fs` is greater than 1000,
+        15 if `fs` is less than or equal to 1000.
+    fs : float, optional
+        The sampling frequency of the signal. `freq` must be between
+        0 and ``fs/2``. Default is 2.
+
+    Returns
+    -------
+    b, a : ndarray, ndarray
+        Numerator (``b``) and denominator (``a``) polynomials of the filter.
+
+    Raises
+    ------
+    ValueError
+        If `freq` is less than or equal to 0 or greater than or equal to
+        ``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than
+        or equal to 0 or greater than 24 when ``ftype='fir'``
+
+    See Also
+    --------
+    firwin
+    iirfilter
+
+    References
+    ----------
+    .. [1] Slaney, Malcolm, "An Efficient Implementation of the
+        Patterson-Holdsworth Auditory Filter Bank", Apple Computer
+        Technical Report 35, 1993, pp.3-8, 34-39.
+
+    Examples
+    --------
+    16-sample 4th order FIR Gammatone filter centered at 440 Hz
+
+    >>> from scipy import signal
+    >>> signal.gammatone(440, 'fir', numtaps=16, fs=16000)
+    (array([ 0.00000000e+00,  2.22196719e-07,  1.64942101e-06,  4.99298227e-06,
+        1.01993969e-05,  1.63125770e-05,  2.14648940e-05,  2.29947263e-05,
+        1.76776931e-05,  2.04980537e-06, -2.72062858e-05, -7.28455299e-05,
+       -1.36651076e-04, -2.19066855e-04, -3.18905076e-04, -4.33156712e-04]),
+       [1.0])
+
+    IIR Gammatone filter centered at 440 Hz
+
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> b, a = signal.gammatone(440, 'iir', fs=16000)
+    >>> w, h = signal.freqz(b, a)
+    >>> plt.plot(w / ((2 * np.pi) / 16000), 20 * np.log10(abs(h)))
+    >>> plt.xscale('log')
+    >>> plt.title('Gammatone filter frequency response')
+    >>> plt.xlabel('Frequency')
+    >>> plt.ylabel('Amplitude [dB]')
+    >>> plt.margins(0, 0.1)
+    >>> plt.grid(which='both', axis='both')
+    >>> plt.axvline(440, color='green') # cutoff frequency
+    >>> plt.show()
+    """
+    # Converts freq to float
+    freq = float(freq)
+
+    # Set sampling rate if not passed
+    if fs is None:
+        fs = 2
+    fs = float(fs)
+
+    # Check for invalid cutoff frequency or filter type
+    ftype = ftype.lower()
+    filter_types = ['fir', 'iir']
+    if not 0 < freq < fs / 2:
+        raise ValueError("The frequency must be between 0 and {}"
+                         " (nyquist), but given {}.".format(fs / 2, freq))
+    if ftype not in filter_types:
+        raise ValueError('ftype must be either fir or iir.')
+
+    # Calculate FIR gammatone filter
+    if ftype == 'fir':
+        # Set order and numtaps if not passed
+        if order is None:
+            order = 4
+        order = operator.index(order)
+
+        if numtaps is None:
+            numtaps = max(int(fs * 0.015), 15)
+        numtaps = operator.index(numtaps)
+
+        # Check for invalid order
+        if not 0 < order <= 24:
+            raise ValueError("Invalid order: order must be > 0 and <= 24.")
+
+        # Gammatone impulse response settings
+        t = np.arange(numtaps) / fs
+        bw = 1.019 * _hz_to_erb(freq)
+
+        # Calculate the FIR gammatone filter
+        b = (t ** (order - 1)) * np.exp(-2 * np.pi * bw * t)
+        b *= np.cos(2 * np.pi * freq * t)
+
+        # Scale the FIR filter so the frequency response is 1 at cutoff
+        scale_factor = 2 * (2 * np.pi * bw) ** (order)
+        scale_factor /= float_factorial(order - 1)
+        scale_factor /= fs
+        b *= scale_factor
+        a = [1.0]
+
+    # Calculate IIR gammatone filter
+    elif ftype == 'iir':
+        # Raise warning if order and/or numtaps is passed
+        if order is not None:
+            warnings.warn('order is not used for IIR gammatone filter.')
+        if numtaps is not None:
+            warnings.warn('numtaps is not used for IIR gammatone filter.')
+
+        # Gammatone impulse response settings
+        T = 1./fs
+        bw = 2 * np.pi * 1.019 * _hz_to_erb(freq)
+        fr = 2 * freq * np.pi * T
+        bwT = bw * T
+
+        # Calculate the gain to normalize the volume at the center frequency
+        g1 = -2 * np.exp(2j * fr) * T
+        g2 = 2 * np.exp(-(bwT) + 1j * fr) * T
+        g3 = np.sqrt(3 + 2 ** (3 / 2)) * np.sin(fr)
+        g4 = np.sqrt(3 - 2 ** (3 / 2)) * np.sin(fr)
+        g5 = np.exp(2j * fr)
+
+        g = g1 + g2 * (np.cos(fr) - g4)
+        g *= (g1 + g2 * (np.cos(fr) + g4))
+        g *= (g1 + g2 * (np.cos(fr) - g3))
+        g *= (g1 + g2 * (np.cos(fr) + g3))
+        g /= ((-2 / np.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / np.exp(bwT)) ** 4)
+        g = np.abs(g)
+
+        # Create empty filter coefficient lists
+        b = np.empty(5)
+        a = np.empty(9)
+
+        # Calculate the numerator coefficients
+        b[0] = (T ** 4) / g
+        b[1] = -4 * T ** 4 * np.cos(fr) / np.exp(bw * T) / g
+        b[2] = 6 * T ** 4 * np.cos(2 * fr) / np.exp(2 * bw * T) / g
+        b[3] = -4 * T ** 4 * np.cos(3 * fr) / np.exp(3 * bw * T) / g
+        b[4] = T ** 4 * np.cos(4 * fr) / np.exp(4 * bw * T) / g
+
+        # Calculate the denominator coefficients
+        a[0] = 1
+        a[1] = -8 * np.cos(fr) / np.exp(bw * T)
+        a[2] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(2 * bw * T)
+        a[3] = -8 * (6 * np.cos(fr) + np.cos(3 * fr))
+        a[3] /= np.exp(3 * bw * T)
+        a[4] = 2 * (18 + 16 * np.cos(2 * fr) + np.cos(4 * fr))
+        a[4] /= np.exp(4 * bw * T)
+        a[5] = -8 * (6 * np.cos(fr) + np.cos(3 * fr))
+        a[5] /= np.exp(5 * bw * T)
+        a[6] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(6 * bw * T)
+        a[7] = -8 * np.cos(fr) / np.exp(7 * bw * T)
+        a[8] = np.exp(-8 * bw * T)
+
+    return b, a
+
+
+filter_dict = {'butter': [buttap, buttord],
+               'butterworth': [buttap, buttord],
+
+               'cauer': [ellipap, ellipord],
+               'elliptic': [ellipap, ellipord],
+               'ellip': [ellipap, ellipord],
+
+               'bessel': [besselap],
+               'bessel_phase': [besselap],
+               'bessel_delay': [besselap],
+               'bessel_mag': [besselap],
+
+               'cheby1': [cheb1ap, cheb1ord],
+               'chebyshev1': [cheb1ap, cheb1ord],
+               'chebyshevi': [cheb1ap, cheb1ord],
+
+               'cheby2': [cheb2ap, cheb2ord],
+               'chebyshev2': [cheb2ap, cheb2ord],
+               'chebyshevii': [cheb2ap, cheb2ord],
+               }
+
+band_dict = {'band': 'bandpass',
+             'bandpass': 'bandpass',
+             'pass': 'bandpass',
+             'bp': 'bandpass',
+
+             'bs': 'bandstop',
+             'bandstop': 'bandstop',
+             'bands': 'bandstop',
+             'stop': 'bandstop',
+
+             'l': 'lowpass',
+             'low': 'lowpass',
+             'lowpass': 'lowpass',
+             'lp': 'lowpass',
+
+             'high': 'highpass',
+             'highpass': 'highpass',
+             'h': 'highpass',
+             'hp': 'highpass',
+             }
+
+bessel_norms = {'bessel': 'phase',
+                'bessel_phase': 'phase',
+                'bessel_delay': 'delay',
+                'bessel_mag': 'mag'}
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_fir_filter_design.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_fir_filter_design.py
new file mode 100644
index 00000000..20216073
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_fir_filter_design.py
@@ -0,0 +1,1296 @@
+# -*- coding: utf-8 -*-
+"""Functions for FIR filter design."""
+
+from math import ceil, log
+import operator
+import warnings
+
+import numpy as np
+from numpy.fft import irfft, fft, ifft
+from scipy.special import sinc
+from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning,
+                          lstsq)
+
+from . import _sigtools
+
+__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
+           'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase']
+
+
+def _get_fs(fs, nyq):
+    """
+    Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
+    """
+    if nyq is None and fs is None:
+        fs = 2
+    elif nyq is not None:
+        if fs is not None:
+            raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
+        msg = ("Keyword argument 'nyq' is deprecated in favour of 'fs' and "
+               "will be removed in SciPy 1.12.0.")
+        warnings.warn(msg, DeprecationWarning, stacklevel=3)
+        fs = 2*nyq
+    return fs
+
+
+# Some notes on function parameters:
+#
+# `cutoff` and `width` are given as numbers between 0 and 1.  These are
+# relative frequencies, expressed as a fraction of the Nyquist frequency.
+# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width
+# of 300 Hz.
+#
+# The `order` of a FIR filter is one less than the number of taps.
+# This is a potential source of confusion, so in the following code,
+# we will always use the number of taps as the parameterization of
+# the 'size' of the filter. The "number of taps" means the number
+# of coefficients, which is the same as the length of the impulse
+# response of the filter.
+
+
+def kaiser_beta(a):
+    """Compute the Kaiser parameter `beta`, given the attenuation `a`.
+
+    Parameters
+    ----------
+    a : float
+        The desired attenuation in the stopband and maximum ripple in
+        the passband, in dB.  This should be a *positive* number.
+
+    Returns
+    -------
+    beta : float
+        The `beta` parameter to be used in the formula for a Kaiser window.
+
+    References
+    ----------
+    Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
+
+    Examples
+    --------
+    Suppose we want to design a lowpass filter, with 65 dB attenuation
+    in the stop band.  The Kaiser window parameter to be used in the
+    window method is computed by ``kaiser_beta(65)``:
+
+    >>> from scipy.signal import kaiser_beta
+    >>> kaiser_beta(65)
+    6.20426
+
+    """
+    if a > 50:
+        beta = 0.1102 * (a - 8.7)
+    elif a > 21:
+        beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
+    else:
+        beta = 0.0
+    return beta
+
+
+def kaiser_atten(numtaps, width):
+    """Compute the attenuation of a Kaiser FIR filter.
+
+    Given the number of taps `N` and the transition width `width`, compute the
+    attenuation `a` in dB, given by Kaiser's formula:
+
+        a = 2.285 * (N - 1) * pi * width + 7.95
+
+    Parameters
+    ----------
+    numtaps : int
+        The number of taps in the FIR filter.
+    width : float
+        The desired width of the transition region between passband and
+        stopband (or, in general, at any discontinuity) for the filter,
+        expressed as a fraction of the Nyquist frequency.
+
+    Returns
+    -------
+    a : float
+        The attenuation of the ripple, in dB.
+
+    See Also
+    --------
+    kaiserord, kaiser_beta
+
+    Examples
+    --------
+    Suppose we want to design a FIR filter using the Kaiser window method
+    that will have 211 taps and a transition width of 9 Hz for a signal that
+    is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency,
+    the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB)
+    is computed as follows:
+
+    >>> from scipy.signal import kaiser_atten
+    >>> kaiser_atten(211, 0.0375)
+    64.48099630593983
+
+    """
+    a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
+    return a
+
+
+def kaiserord(ripple, width):
+    """
+    Determine the filter window parameters for the Kaiser window method.
+
+    The parameters returned by this function are generally used to create
+    a finite impulse response filter using the window method, with either
+    `firwin` or `firwin2`.
+
+    Parameters
+    ----------
+    ripple : float
+        Upper bound for the deviation (in dB) of the magnitude of the
+        filter's frequency response from that of the desired filter (not
+        including frequencies in any transition intervals). That is, if w
+        is the frequency expressed as a fraction of the Nyquist frequency,
+        A(w) is the actual frequency response of the filter and D(w) is the
+        desired frequency response, the design requirement is that::
+
+            abs(A(w) - D(w))) < 10**(-ripple/20)
+
+        for 0 <= w <= 1 and w not in a transition interval.
+    width : float
+        Width of transition region, normalized so that 1 corresponds to pi
+        radians / sample. That is, the frequency is expressed as a fraction
+        of the Nyquist frequency.
+
+    Returns
+    -------
+    numtaps : int
+        The length of the Kaiser window.
+    beta : float
+        The beta parameter for the Kaiser window.
+
+    See Also
+    --------
+    kaiser_beta, kaiser_atten
+
+    Notes
+    -----
+    There are several ways to obtain the Kaiser window:
+
+    - ``signal.windows.kaiser(numtaps, beta, sym=True)``
+    - ``signal.get_window(beta, numtaps)``
+    - ``signal.get_window(('kaiser', beta), numtaps)``
+
+    The empirical equations discovered by Kaiser are used.
+
+    References
+    ----------
+    Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476.
+
+    Examples
+    --------
+    We will use the Kaiser window method to design a lowpass FIR filter
+    for a signal that is sampled at 1000 Hz.
+
+    We want at least 65 dB rejection in the stop band, and in the pass
+    band the gain should vary no more than 0.5%.
+
+    We want a cutoff frequency of 175 Hz, with a transition between the
+    pass band and the stop band of 24 Hz. That is, in the band [0, 163],
+    the gain varies no more than 0.5%, and in the band [187, 500], the
+    signal is attenuated by at least 65 dB.
+
+    >>> import numpy as np
+    >>> from scipy.signal import kaiserord, firwin, freqz
+    >>> import matplotlib.pyplot as plt
+    >>> fs = 1000.0
+    >>> cutoff = 175
+    >>> width = 24
+
+    The Kaiser method accepts just a single parameter to control the pass
+    band ripple and the stop band rejection, so we use the more restrictive
+    of the two. In this case, the pass band ripple is 0.005, or 46.02 dB,
+    so we will use 65 dB as the design parameter.
+
+    Use `kaiserord` to determine the length of the filter and the
+    parameter for the Kaiser window.
+
+    >>> numtaps, beta = kaiserord(65, width/(0.5*fs))
+    >>> numtaps
+    167
+    >>> beta
+    6.20426
+
+    Use `firwin` to create the FIR filter.
+
+    >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta),
+    ...               scale=False, fs=fs)
+
+    Compute the frequency response of the filter.  ``w`` is the array of
+    frequencies, and ``h`` is the corresponding complex array of frequency
+    responses.
+
+    >>> w, h = freqz(taps, worN=8000)
+    >>> w *= 0.5*fs/np.pi  # Convert w to Hz.
+
+    Compute the deviation of the magnitude of the filter's response from
+    that of the ideal lowpass filter. Values in the transition region are
+    set to ``nan``, so they won't appear in the plot.
+
+    >>> ideal = w < cutoff  # The "ideal" frequency response.
+    >>> deviation = np.abs(np.abs(h) - ideal)
+    >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan
+
+    Plot the deviation. A close look at the left end of the stop band shows
+    that the requirement for 65 dB attenuation is violated in the first lobe
+    by about 0.125 dB. This is not unusual for the Kaiser window method.
+
+    >>> plt.plot(w, 20*np.log10(np.abs(deviation)))
+    >>> plt.xlim(0, 0.5*fs)
+    >>> plt.ylim(-90, -60)
+    >>> plt.grid(alpha=0.25)
+    >>> plt.axhline(-65, color='r', ls='--', alpha=0.3)
+    >>> plt.xlabel('Frequency (Hz)')
+    >>> plt.ylabel('Deviation from ideal (dB)')
+    >>> plt.title('Lowpass Filter Frequency Response')
+    >>> plt.show()
+
+    """
+    A = abs(ripple)  # in case somebody is confused as to what's meant
+    if A < 8:
+        # Formula for N is not valid in this range.
+        raise ValueError("Requested maximum ripple attentuation %f is too "
+                         "small for the Kaiser formula." % A)
+    beta = kaiser_beta(A)
+
+    # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
+    # order, so we have to add 1 to get the number of taps.
+    numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
+
+    return int(ceil(numtaps)), beta
+
+
+def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
+           scale=True, nyq=None, fs=None):
+    """
+    FIR filter design using the window method.
+
+    This function computes the coefficients of a finite impulse response
+    filter. The filter will have linear phase; it will be Type I if
+    `numtaps` is odd and Type II if `numtaps` is even.
+
+    Type II filters always have zero response at the Nyquist frequency, so a
+    ValueError exception is raised if firwin is called with `numtaps` even and
+    having a passband whose right end is at the Nyquist frequency.
+
+    Parameters
+    ----------
+    numtaps : int
+        Length of the filter (number of coefficients, i.e. the filter
+        order + 1).  `numtaps` must be odd if a passband includes the
+        Nyquist frequency.
+    cutoff : float or 1-D array_like
+        Cutoff frequency of filter (expressed in the same units as `fs`)
+        OR an array of cutoff frequencies (that is, band edges). In the
+        latter case, the frequencies in `cutoff` should be positive and
+        monotonically increasing between 0 and `fs/2`. The values 0 and
+        `fs/2` must not be included in `cutoff`.
+    width : float or None, optional
+        If `width` is not None, then assume it is the approximate width
+        of the transition region (expressed in the same units as `fs`)
+        for use in Kaiser FIR filter design. In this case, the `window`
+        argument is ignored.
+    window : string or tuple of string and parameter values, optional
+        Desired window to use. See `scipy.signal.get_window` for a list
+        of windows and required parameters.
+    pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
+        If True, the gain at the frequency 0 (i.e., the "DC gain") is 1.
+        If False, the DC gain is 0. Can also be a string argument for the
+        desired filter type (equivalent to ``btype`` in IIR design functions).
+
+        .. versionadded:: 1.3.0
+           Support for string arguments.
+    scale : bool, optional
+        Set to True to scale the coefficients so that the frequency
+        response is exactly unity at a certain frequency.
+        That frequency is either:
+
+        - 0 (DC) if the first passband starts at 0 (i.e. pass_zero
+          is True)
+        - `fs/2` (the Nyquist frequency) if the first passband ends at
+          `fs/2` (i.e the filter is a single band highpass filter);
+          center of first passband otherwise
+
+    nyq : float, optional, deprecated
+        This is the Nyquist frequency. Each frequency in `cutoff` must be
+        between 0 and `nyq`. Default is 1.
+
+        .. deprecated:: 1.0.0
+           `firwin` keyword argument `nyq` is deprecated in favour of `fs` and
+           will be removed in SciPy 1.12.0.
+    fs : float, optional
+        The sampling frequency of the signal. Each frequency in `cutoff`
+        must be between 0 and ``fs/2``.  Default is 2.
+
+    Returns
+    -------
+    h : (numtaps,) ndarray
+        Coefficients of length `numtaps` FIR filter.
+
+    Raises
+    ------
+    ValueError
+        If any value in `cutoff` is less than or equal to 0 or greater
+        than or equal to ``fs/2``, if the values in `cutoff` are not strictly
+        monotonically increasing, or if `numtaps` is even but a passband
+        includes the Nyquist frequency.
+
+    See Also
+    --------
+    firwin2
+    firls
+    minimum_phase
+    remez
+
+    Examples
+    --------
+    Low-pass from 0 to f:
+
+    >>> from scipy import signal
+    >>> numtaps = 3
+    >>> f = 0.1
+    >>> signal.firwin(numtaps, f)
+    array([ 0.06799017,  0.86401967,  0.06799017])
+
+    Use a specific window function:
+
+    >>> signal.firwin(numtaps, f, window='nuttall')
+    array([  3.56607041e-04,   9.99286786e-01,   3.56607041e-04])
+
+    High-pass ('stop' from 0 to f):
+
+    >>> signal.firwin(numtaps, f, pass_zero=False)
+    array([-0.00859313,  0.98281375, -0.00859313])
+
+    Band-pass:
+
+    >>> f1, f2 = 0.1, 0.2
+    >>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
+    array([ 0.06301614,  0.88770441,  0.06301614])
+
+    Band-stop:
+
+    >>> signal.firwin(numtaps, [f1, f2])
+    array([-0.00801395,  1.0160279 , -0.00801395])
+
+    Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
+
+    >>> f3, f4 = 0.3, 0.4
+    >>> signal.firwin(numtaps, [f1, f2, f3, f4])
+    array([-0.01376344,  1.02752689, -0.01376344])
+
+    Multi-band (passbands are [f1, f2] and [f3,f4]):
+
+    >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
+    array([ 0.04890915,  0.91284326,  0.04890915])
+
+    """  # noqa: E501
+    # The major enhancements to this function added in November 2010 were
+    # developed by Tom Krauss (see ticket #902).
+
+    nyq = 0.5 * _get_fs(fs, nyq)
+
+    cutoff = np.atleast_1d(cutoff) / float(nyq)
+
+    # Check for invalid input.
+    if cutoff.ndim > 1:
+        raise ValueError("The cutoff argument must be at most "
+                         "one-dimensional.")
+    if cutoff.size == 0:
+        raise ValueError("At least one cutoff frequency must be given.")
+    if cutoff.min() <= 0 or cutoff.max() >= 1:
+        raise ValueError("Invalid cutoff frequency: frequencies must be "
+                         "greater than 0 and less than fs/2.")
+    if np.any(np.diff(cutoff) <= 0):
+        raise ValueError("Invalid cutoff frequencies: the frequencies "
+                         "must be strictly increasing.")
+
+    if width is not None:
+        # A width was given.  Find the beta parameter of the Kaiser window
+        # and set `window`.  This overrides the value of `window` passed in.
+        atten = kaiser_atten(numtaps, float(width) / nyq)
+        beta = kaiser_beta(atten)
+        window = ('kaiser', beta)
+
+    if isinstance(pass_zero, str):
+        if pass_zero in ('bandstop', 'lowpass'):
+            if pass_zero == 'lowpass':
+                if cutoff.size != 1:
+                    raise ValueError('cutoff must have one element if '
+                                     'pass_zero=="lowpass", got %s'
+                                     % (cutoff.shape,))
+            elif cutoff.size <= 1:
+                raise ValueError('cutoff must have at least two elements if '
+                                 'pass_zero=="bandstop", got %s'
+                                 % (cutoff.shape,))
+            pass_zero = True
+        elif pass_zero in ('bandpass', 'highpass'):
+            if pass_zero == 'highpass':
+                if cutoff.size != 1:
+                    raise ValueError('cutoff must have one element if '
+                                     'pass_zero=="highpass", got %s'
+                                     % (cutoff.shape,))
+            elif cutoff.size <= 1:
+                raise ValueError('cutoff must have at least two elements if '
+                                 'pass_zero=="bandpass", got %s'
+                                 % (cutoff.shape,))
+            pass_zero = False
+        else:
+            raise ValueError('pass_zero must be True, False, "bandpass", '
+                             '"lowpass", "highpass", or "bandstop", got '
+                             '%s' % (pass_zero,))
+    pass_zero = bool(operator.index(pass_zero))  # ensure bool-like
+
+    pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
+    if pass_nyquist and numtaps % 2 == 0:
+        raise ValueError("A filter with an even number of coefficients must "
+                         "have zero response at the Nyquist frequency.")
+
+    # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
+    # is even, and each pair in cutoff corresponds to passband.
+    cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
+
+    # `bands` is a 2-D array; each row gives the left and right edges of
+    # a passband.
+    bands = cutoff.reshape(-1, 2)
+
+    # Build up the coefficients.
+    alpha = 0.5 * (numtaps - 1)
+    m = np.arange(0, numtaps) - alpha
+    h = 0
+    for left, right in bands:
+        h += right * sinc(right * m)
+        h -= left * sinc(left * m)
+
+    # Get and apply the window function.
+    from .windows import get_window
+    win = get_window(window, numtaps, fftbins=False)
+    h *= win
+
+    # Now handle scaling if desired.
+    if scale:
+        # Get the first passband.
+        left, right = bands[0]
+        if left == 0:
+            scale_frequency = 0.0
+        elif right == 1:
+            scale_frequency = 1.0
+        else:
+            scale_frequency = 0.5 * (left + right)
+        c = np.cos(np.pi * m * scale_frequency)
+        s = np.sum(h * c)
+        h /= s
+
+    return h
+
+
+# Original version of firwin2 from scipy ticket #457, submitted by "tash".
+#
+# Rewritten by Warren Weckesser, 2010.
+
+def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=None,
+            antisymmetric=False, fs=None):
+    """
+    FIR filter design using the window method.
+
+    From the given frequencies `freq` and corresponding gains `gain`,
+    this function constructs an FIR filter with linear phase and
+    (approximately) the given frequency response.
+
+    Parameters
+    ----------
+    numtaps : int
+        The number of taps in the FIR filter.  `numtaps` must be less than
+        `nfreqs`.
+    freq : array_like, 1-D
+        The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
+        Nyquist.  The Nyquist frequency is half `fs`.
+        The values in `freq` must be nondecreasing. A value can be repeated
+        once to implement a discontinuity. The first value in `freq` must
+        be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must
+        not be repeated.
+    gain : array_like
+        The filter gains at the frequency sampling points. Certain
+        constraints to gain values, depending on the filter type, are applied,
+        see Notes for details.
+    nfreqs : int, optional
+        The size of the interpolation mesh used to construct the filter.
+        For most efficient behavior, this should be a power of 2 plus 1
+        (e.g, 129, 257, etc). The default is one more than the smallest
+        power of 2 that is not less than `numtaps`. `nfreqs` must be greater
+        than `numtaps`.
+    window : string or (string, float) or float, or None, optional
+        Window function to use. Default is "hamming". See
+        `scipy.signal.get_window` for the complete list of possible values.
+        If None, no window function is applied.
+    nyq : float, optional, deprecated
+        This is the Nyquist frequency. Each frequency in `freq` must be
+        between 0 and `nyq`. Default is 1.
+
+        .. deprecated:: 1.0.0
+           `firwin2` keyword argument `nyq` is deprecated in favour of `fs` and
+           will be removed in SciPy 1.12.0.
+    antisymmetric : bool, optional
+        Whether resulting impulse response is symmetric/antisymmetric.
+        See Notes for more details.
+    fs : float, optional
+        The sampling frequency of the signal. Each frequency in `cutoff`
+        must be between 0 and ``fs/2``. Default is 2.
+
+    Returns
+    -------
+    taps : ndarray
+        The filter coefficients of the FIR filter, as a 1-D array of length
+        `numtaps`.
+
+    See Also
+    --------
+    firls
+    firwin
+    minimum_phase
+    remez
+
+    Notes
+    -----
+    From the given set of frequencies and gains, the desired response is
+    constructed in the frequency domain. The inverse FFT is applied to the
+    desired response to create the associated convolution kernel, and the
+    first `numtaps` coefficients of this kernel, scaled by `window`, are
+    returned.
+
+    The FIR filter will have linear phase. The type of filter is determined by
+    the value of 'numtaps` and `antisymmetric` flag.
+    There are four possible combinations:
+
+       - odd  `numtaps`, `antisymmetric` is False, type I filter is produced
+       - even `numtaps`, `antisymmetric` is False, type II filter is produced
+       - odd  `numtaps`, `antisymmetric` is True, type III filter is produced
+       - even `numtaps`, `antisymmetric` is True, type IV filter is produced
+
+    Magnitude response of all but type I filters are subjects to following
+    constraints:
+
+       - type II  -- zero at the Nyquist frequency
+       - type III -- zero at zero and Nyquist frequencies
+       - type IV  -- zero at zero frequency
+
+    .. versionadded:: 0.9.0
+
+    References
+    ----------
+    .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
+       Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
+       (See, for example, Section 7.4.)
+
+    .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
+       Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
+
+    Examples
+    --------
+    A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
+    that decreases linearly on [0.5, 1.0] from 1 to 0:
+
+    >>> from scipy import signal
+    >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
+    >>> print(taps[72:78])
+    [-0.02286961 -0.06362756  0.57310236  0.57310236 -0.06362756 -0.02286961]
+
+    """
+    nyq = 0.5 * _get_fs(fs, nyq)
+
+    if len(freq) != len(gain):
+        raise ValueError('freq and gain must be of same length.')
+
+    if nfreqs is not None and numtaps >= nfreqs:
+        raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
+                          'called with ntaps=%d and nfreqs=%s') %
+                         (numtaps, nfreqs))
+
+    if freq[0] != 0 or freq[-1] != nyq:
+        raise ValueError('freq must start with 0 and end with fs/2.')
+    d = np.diff(freq)
+    if (d < 0).any():
+        raise ValueError('The values in freq must be nondecreasing.')
+    d2 = d[:-1] + d[1:]
+    if (d2 == 0).any():
+        raise ValueError('A value in freq must not occur more than twice.')
+    if freq[1] == 0:
+        raise ValueError('Value 0 must not be repeated in freq')
+    if freq[-2] == nyq:
+        raise ValueError('Value fs/2 must not be repeated in freq')
+
+    if antisymmetric:
+        if numtaps % 2 == 0:
+            ftype = 4
+        else:
+            ftype = 3
+    else:
+        if numtaps % 2 == 0:
+            ftype = 2
+        else:
+            ftype = 1
+
+    if ftype == 2 and gain[-1] != 0.0:
+        raise ValueError("A Type II filter must have zero gain at the "
+                         "Nyquist frequency.")
+    elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
+        raise ValueError("A Type III filter must have zero gain at zero "
+                         "and Nyquist frequencies.")
+    elif ftype == 4 and gain[0] != 0.0:
+        raise ValueError("A Type IV filter must have zero gain at zero "
+                         "frequency.")
+
+    if nfreqs is None:
+        nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
+
+    if (d == 0).any():
+        # Tweak any repeated values in freq so that interp works.
+        freq = np.array(freq, copy=True)
+        eps = np.finfo(float).eps * nyq
+        for k in range(len(freq) - 1):
+            if freq[k] == freq[k + 1]:
+                freq[k] = freq[k] - eps
+                freq[k + 1] = freq[k + 1] + eps
+        # Check if freq is strictly increasing after tweak
+        d = np.diff(freq)
+        if (d <= 0).any():
+            raise ValueError("freq cannot contain numbers that are too close "
+                             "(within eps * (fs/2): "
+                             "{}) to a repeated value".format(eps))
+
+    # Linearly interpolate the desired response on a uniform mesh `x`.
+    x = np.linspace(0.0, nyq, nfreqs)
+    fx = np.interp(x, freq, gain)
+
+    # Adjust the phases of the coefficients so that the first `ntaps` of the
+    # inverse FFT are the desired filter coefficients.
+    shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
+    if ftype > 2:
+        shift *= 1j
+
+    fx2 = fx * shift
+
+    # Use irfft to compute the inverse FFT.
+    out_full = irfft(fx2)
+
+    if window is not None:
+        # Create the window to apply to the filter coefficients.
+        from .windows import get_window
+        wind = get_window(window, numtaps, fftbins=False)
+    else:
+        wind = 1
+
+    # Keep only the first `numtaps` coefficients in `out`, and multiply by
+    # the window.
+    out = out_full[:numtaps] * wind
+
+    if ftype == 3:
+        out[out.size // 2] = 0.0
+
+    return out
+
+
+def remez(numtaps, bands, desired, weight=None, Hz=None, type='bandpass',
+          maxiter=25, grid_density=16, fs=None):
+    """
+    Calculate the minimax optimal filter using the Remez exchange algorithm.
+
+    Calculate the filter-coefficients for the finite impulse response
+    (FIR) filter whose transfer function minimizes the maximum error
+    between the desired gain and the realized gain in the specified
+    frequency bands using the Remez exchange algorithm.
+
+    Parameters
+    ----------
+    numtaps : int
+        The desired number of taps in the filter. The number of taps is
+        the number of terms in the filter, or the filter order plus one.
+    bands : array_like
+        A monotonic sequence containing the band edges.
+        All elements must be non-negative and less than half the sampling
+        frequency as given by `fs`.
+    desired : array_like
+        A sequence half the size of bands containing the desired gain
+        in each of the specified bands.
+    weight : array_like, optional
+        A relative weighting to give to each band region. The length of
+        `weight` has to be half the length of `bands`.
+    Hz : scalar, optional, deprecated
+        The sampling frequency in Hz. Default is 1.
+
+        .. deprecated:: 1.0.0
+           `remez` keyword argument `Hz` is deprecated in favour of `fs` and
+           will be removed in SciPy 1.12.0.
+    type : {'bandpass', 'differentiator', 'hilbert'}, optional
+        The type of filter:
+
+          * 'bandpass' : flat response in bands. This is the default.
+
+          * 'differentiator' : frequency proportional response in bands.
+
+          * 'hilbert' : filter with odd symmetry, that is, type III
+                        (for even order) or type IV (for odd order)
+                        linear phase filters.
+
+    maxiter : int, optional
+        Maximum number of iterations of the algorithm. Default is 25.
+    grid_density : int, optional
+        Grid density. The dense grid used in `remez` is of size
+        ``(numtaps + 1) * grid_density``. Default is 16.
+    fs : float, optional
+        The sampling frequency of the signal.  Default is 1.
+
+    Returns
+    -------
+    out : ndarray
+        A rank-1 array containing the coefficients of the optimal
+        (in a minimax sense) filter.
+
+    See Also
+    --------
+    firls
+    firwin
+    firwin2
+    minimum_phase
+
+    References
+    ----------
+    .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
+           design of optimum FIR linear phase digital filters",
+           IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
+    .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
+           Program for Designing Optimum FIR Linear Phase Digital
+           Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
+           pp. 506-525, 1973.
+
+    Examples
+    --------
+    In these examples, `remez` is used to design low-pass, high-pass,
+    band-pass and band-stop filters.  The parameters that define each filter
+    are the filter order, the band boundaries, the transition widths of the
+    boundaries, the desired gains in each band, and the sampling frequency.
+
+    We'll use a sample frequency of 22050 Hz in all the examples.  In each
+    example, the desired gain in each band is either 0 (for a stop band)
+    or 1 (for a pass band).
+
+    `freqz` is used to compute the frequency response of each filter, and
+    the utility function ``plot_response`` defined below is used to plot
+    the response.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> fs = 22050   # Sample rate, Hz
+
+    >>> def plot_response(w, h, title):
+    ...     "Utility function to plot response functions"
+    ...     fig = plt.figure()
+    ...     ax = fig.add_subplot(111)
+    ...     ax.plot(w, 20*np.log10(np.abs(h)))
+    ...     ax.set_ylim(-40, 5)
+    ...     ax.grid(True)
+    ...     ax.set_xlabel('Frequency (Hz)')
+    ...     ax.set_ylabel('Gain (dB)')
+    ...     ax.set_title(title)
+
+    The first example is a low-pass filter, with cutoff frequency 8 kHz.
+    The filter length is 325, and the transition width from pass to stop
+    is 100 Hz.
+
+    >>> cutoff = 8000.0    # Desired cutoff frequency, Hz
+    >>> trans_width = 100  # Width of transition from pass to stop, Hz
+    >>> numtaps = 325      # Size of the FIR filter.
+    >>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs],
+    ...                     [1, 0], fs=fs)
+    >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
+    >>> plot_response(w, h, "Low-pass Filter")
+    >>> plt.show()
+
+    This example shows a high-pass filter:
+
+    >>> cutoff = 2000.0    # Desired cutoff frequency, Hz
+    >>> trans_width = 250  # Width of transition from pass to stop, Hz
+    >>> numtaps = 125      # Size of the FIR filter.
+    >>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs],
+    ...                     [0, 1], fs=fs)
+    >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
+    >>> plot_response(w, h, "High-pass Filter")
+    >>> plt.show()
+
+    This example shows a band-pass filter with a pass-band from 2 kHz to
+    5 kHz.  The transition width is 260 Hz and the length of the filter
+    is 63, which is smaller than in the other examples:
+
+    >>> band = [2000, 5000]  # Desired pass band, Hz
+    >>> trans_width = 260    # Width of transition from pass to stop, Hz
+    >>> numtaps = 63         # Size of the FIR filter.
+    >>> edges = [0, band[0] - trans_width, band[0], band[1],
+    ...          band[1] + trans_width, 0.5*fs]
+    >>> taps = signal.remez(numtaps, edges, [0, 1, 0], fs=fs)
+    >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
+    >>> plot_response(w, h, "Band-pass Filter")
+    >>> plt.show()
+
+    The low order leads to higher ripple and less steep transitions.
+
+    The next example shows a band-stop filter.
+
+    >>> band = [6000, 8000]  # Desired stop band, Hz
+    >>> trans_width = 200    # Width of transition from pass to stop, Hz
+    >>> numtaps = 175        # Size of the FIR filter.
+    >>> edges = [0, band[0] - trans_width, band[0], band[1],
+    ...          band[1] + trans_width, 0.5*fs]
+    >>> taps = signal.remez(numtaps, edges, [1, 0, 1], fs=fs)
+    >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs)
+    >>> plot_response(w, h, "Band-stop Filter")
+    >>> plt.show()
+
+    """
+    if Hz is None and fs is None:
+        fs = 1.0
+    elif Hz is not None:
+        if fs is not None:
+            raise ValueError("Values cannot be given for both 'Hz' and 'fs'.")
+        msg = ("'remez' keyword argument 'Hz' is deprecated in favour of 'fs'"
+               " and will be removed in SciPy 1.12.0.")
+        warnings.warn(msg, DeprecationWarning, stacklevel=2)
+        fs = Hz
+
+    # Convert type
+    try:
+        tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
+    except KeyError as e:
+        raise ValueError("Type must be 'bandpass', 'differentiator', "
+                         "or 'hilbert'") from e
+
+    # Convert weight
+    if weight is None:
+        weight = [1] * len(desired)
+
+    bands = np.asarray(bands).copy()
+    return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs,
+                            maxiter, grid_density)
+
+
+def firls(numtaps, bands, desired, weight=None, nyq=None, fs=None):
+    """
+    FIR filter design using least-squares error minimization.
+
+    Calculate the filter coefficients for the linear-phase finite
+    impulse response (FIR) filter which has the best approximation
+    to the desired frequency response described by `bands` and
+    `desired` in the least squares sense (i.e., the integral of the
+    weighted mean-squared error within the specified bands is
+    minimized).
+
+    Parameters
+    ----------
+    numtaps : int
+        The number of taps in the FIR filter. `numtaps` must be odd.
+    bands : array_like
+        A monotonic nondecreasing sequence containing the band edges in
+        Hz. All elements must be non-negative and less than or equal to
+        the Nyquist frequency given by `nyq`. The bands are specified as
+        frequency pairs, thus, if using a 1D array, its length must be
+        even, e.g., `np.array([0, 1, 2, 3, 4, 5])`. Alternatively, the
+        bands can be specified as an nx2 sized 2D array, where n is the
+        number of bands, e.g, `np.array([[0, 1], [2, 3], [4, 5]])`.
+    desired : array_like
+        A sequence the same size as `bands` containing the desired gain
+        at the start and end point of each band.
+    weight : array_like, optional
+        A relative weighting to give to each band region when solving
+        the least squares problem. `weight` has to be half the size of
+        `bands`.
+    nyq : float, optional, deprecated
+        This is the Nyquist frequency. Each frequency in `bands` must be
+        between 0 and `nyq` (inclusive). Default is 1.
+
+        .. deprecated:: 1.0.0
+           `firls` keyword argument `nyq` is deprecated in favour of `fs` and
+           will be removed in SciPy 1.12.0.
+    fs : float, optional
+        The sampling frequency of the signal. Each frequency in `bands`
+        must be between 0 and ``fs/2`` (inclusive). Default is 2.
+
+    Returns
+    -------
+    coeffs : ndarray
+        Coefficients of the optimal (in a least squares sense) FIR filter.
+
+    See Also
+    --------
+    firwin
+    firwin2
+    minimum_phase
+    remez
+
+    Notes
+    -----
+    This implementation follows the algorithm given in [1]_.
+    As noted there, least squares design has multiple advantages:
+
+        1. Optimal in a least-squares sense.
+        2. Simple, non-iterative method.
+        3. The general solution can obtained by solving a linear
+           system of equations.
+        4. Allows the use of a frequency dependent weighting function.
+
+    This function constructs a Type I linear phase FIR filter, which
+    contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:
+
+    .. math:: coeffs(n) = coeffs(numtaps - 1 - n)
+
+    The odd number of coefficients and filter symmetry avoid boundary
+    conditions that could otherwise occur at the Nyquist and 0 frequencies
+    (e.g., for Type II, III, or IV variants).
+
+    .. versionadded:: 0.18
+
+    References
+    ----------
+    .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.
+           OpenStax CNX. Aug 9, 2005.
+           http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7
+
+    Examples
+    --------
+    We want to construct a band-pass filter. Note that the behavior in the
+    frequency ranges between our stop bands and pass bands is unspecified,
+    and thus may overshoot depending on the parameters of our filter:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> fig, axs = plt.subplots(2)
+    >>> fs = 10.0  # Hz
+    >>> desired = (0, 0, 1, 1, 0, 0)
+    >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
+    ...     fir_firls = signal.firls(73, bands, desired, fs=fs)
+    ...     fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
+    ...     fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
+    ...     hs = list()
+    ...     ax = axs[bi]
+    ...     for fir in (fir_firls, fir_remez, fir_firwin2):
+    ...         freq, response = signal.freqz(fir)
+    ...         hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
+    ...     for band, gains in zip(zip(bands[::2], bands[1::2]),
+    ...                            zip(desired[::2], desired[1::2])):
+    ...         ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
+    ...     if bi == 0:
+    ...         ax.legend(hs, ('firls', 'remez', 'firwin2'),
+    ...                   loc='lower center', frameon=False)
+    ...     else:
+    ...         ax.set_xlabel('Frequency (Hz)')
+    ...     ax.grid(True)
+    ...     ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
+    ...
+    >>> fig.tight_layout()
+    >>> plt.show()
+
+    """  # noqa
+    nyq = 0.5 * _get_fs(fs, nyq)
+
+    numtaps = int(numtaps)
+    if numtaps % 2 == 0 or numtaps < 1:
+        raise ValueError("numtaps must be odd and >= 1")
+    M = (numtaps-1) // 2
+
+    # normalize bands 0->1 and make it 2 columns
+    nyq = float(nyq)
+    if nyq <= 0:
+        raise ValueError('nyq must be positive, got %s <= 0.' % nyq)
+    bands = np.asarray(bands).flatten() / nyq
+    if len(bands) % 2 != 0:
+        raise ValueError("bands must contain frequency pairs.")
+    if (bands < 0).any() or (bands > 1).any():
+        raise ValueError("bands must be between 0 and 1 relative to Nyquist")
+    bands.shape = (-1, 2)
+
+    # check remaining params
+    desired = np.asarray(desired).flatten()
+    if bands.size != desired.size:
+        raise ValueError("desired must have one entry per frequency, got %s "
+                         "gains for %s frequencies."
+                         % (desired.size, bands.size))
+    desired.shape = (-1, 2)
+    if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
+        raise ValueError("bands must be monotonically nondecreasing and have "
+                         "width > 0.")
+    if (bands[:-1, 1] > bands[1:, 0]).any():
+        raise ValueError("bands must not overlap.")
+    if (desired < 0).any():
+        raise ValueError("desired must be non-negative.")
+    if weight is None:
+        weight = np.ones(len(desired))
+    weight = np.asarray(weight).flatten()
+    if len(weight) != len(desired):
+        raise ValueError("weight must be the same size as the number of "
+                         "band pairs (%s)." % (len(bands),))
+    if (weight < 0).any():
+        raise ValueError("weight must be non-negative.")
+
+    # Set up the linear matrix equation to be solved, Qa = b
+
+    # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
+    # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.
+
+    # We omit the factor of 0.5 above, instead adding it during coefficient
+    # calculation.
+
+    # We also omit the 1/π from both Q and b equations, as they cancel
+    # during solving.
+
+    # We have that:
+    #     q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
+    # Using our nomalization ω=πf and with a constant weight W over each
+    # interval f1->f2 we get:
+    #     q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
+    # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
+    n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
+    q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)
+
+    # Now we assemble our sum of Toeplitz and Hankel
+    Q1 = toeplitz(q[:M+1])
+    Q2 = hankel(q[:M+1], q[M:])
+    Q = Q1 + Q2
+
+    # Now for b(n) we have that:
+    #     b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)
+    # Using our normalization ω=πf and with a constant weight W over each
+    # interval and a linear term for D(ω) we get (over each f1->f2 interval):
+    #     b(n) = W ∫ (mf+c)cos(πnf)df
+    #          = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2
+    # integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
+    n = n[:M + 1]  # only need this many coefficients here
+    # Choose m and c such that we are at the start and end weights
+    m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))
+    c = desired[:, [0]] - bands[:, [0]] * m
+    b = bands * (m*bands + c) * np.sinc(bands * n)
+    # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0
+    b[0] -= m * bands * bands / 2.
+    b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2
+    b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)
+
+    # Now we can solve the equation
+    try:  # try the fast way
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter('always')
+            a = solve(Q, b, assume_a="pos", check_finite=False)
+        for ww in w:
+            if (ww.category == LinAlgWarning and
+                    str(ww.message).startswith('Ill-conditioned matrix')):
+                raise LinAlgError(str(ww.message))
+    except LinAlgError:  # in case Q is rank deficient
+        # This is faster than pinvh, even though we don't explicitly use
+        # the symmetry here. gelsy was faster than gelsd and gelss in
+        # some non-exhaustive tests.
+        a = lstsq(Q, b, lapack_driver='gelsy')[0]
+
+    # make coefficients symmetric (linear phase)
+    coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))
+    return coeffs
+
+
+def _dhtm(mag):
+    """Compute the modified 1-D discrete Hilbert transform
+
+    Parameters
+    ----------
+    mag : ndarray
+        The magnitude spectrum. Should be 1-D with an even length, and
+        preferably a fast length for FFT/IFFT.
+    """
+    # Adapted based on code by Niranjan Damera-Venkata,
+    # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`)
+    sig = np.zeros(len(mag))
+    # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5
+    midpt = len(mag) // 2
+    sig[1:midpt] = 1
+    sig[midpt+1:] = -1
+    # eventually if we want to support complex filters, we will need a
+    # np.abs() on the mag inside the log, and should remove the .real
+    recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real
+    return recon
+
+
+def minimum_phase(h, method='homomorphic', n_fft=None):
+    """Convert a linear-phase FIR filter to minimum phase
+
+    Parameters
+    ----------
+    h : array
+        Linear-phase FIR filter coefficients.
+    method : {'hilbert', 'homomorphic'}
+        The method to use:
+
+            'homomorphic' (default)
+                This method [4]_ [5]_ works best with filters with an
+                odd number of taps, and the resulting minimum phase filter
+                will have a magnitude response that approximates the square
+                root of the original filter's magnitude response.
+
+            'hilbert'
+                This method [1]_ is designed to be used with equiripple
+                filters (e.g., from `remez`) with unity or zero gain
+                regions.
+
+    n_fft : int
+        The number of points to use for the FFT. Should be at least a
+        few times larger than the signal length (see Notes).
+
+    Returns
+    -------
+    h_minimum : array
+        The minimum-phase version of the filter, with length
+        ``(length(h) + 1) // 2``.
+
+    See Also
+    --------
+    firwin
+    firwin2
+    remez
+
+    Notes
+    -----
+    Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection
+    of an FFT length to estimate the complex cepstrum of the filter.
+
+    In the case of the Hilbert method, the deviation from the ideal
+    spectrum ``epsilon`` is related to the number of stopband zeros
+    ``n_stop`` and FFT length ``n_fft`` as::
+
+        epsilon = 2. * n_stop / n_fft
+
+    For example, with 100 stopband zeros and a FFT length of 2048,
+    ``epsilon = 0.0976``. If we conservatively assume that the number of
+    stopband zeros is one less than the filter length, we can take the FFT
+    length to be the next power of 2 that satisfies ``epsilon=0.01`` as::
+
+        n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
+
+    This gives reasonable results for both the Hilbert and homomorphic
+    methods, and gives the value used when ``n_fft=None``.
+
+    Alternative implementations exist for creating minimum-phase filters,
+    including zero inversion [2]_ and spectral factorization [3]_ [4]_.
+    For more information, see:
+
+        http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters
+
+    References
+    ----------
+    .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and
+           complex minimum phase digital FIR filters," Acoustics, Speech,
+           and Signal Processing, 1999. Proceedings., 1999 IEEE International
+           Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3.
+           :doi:`10.1109/ICASSP.1999.756179`
+    .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR
+           filters by direct factorization," Signal Processing,
+           vol. 10, no. 4, pp. 369-383, Jun. 1986.
+    .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in
+           Handbook for Digital Signal Processing, chapter 4,
+           New York: Wiley-Interscience, 1993.
+    .. [4] J. S. Lim, Advanced Topics in Signal Processing.
+           Englewood Cliffs, N.J.: Prentice Hall, 1988.
+    .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck,
+           "Discrete-Time Signal Processing," 2nd edition.
+           Upper Saddle River, N.J.: Prentice Hall, 1999.
+
+    Examples
+    --------
+    Create an optimal linear-phase filter, then convert it to minimum phase:
+
+    >>> import numpy as np
+    >>> from scipy.signal import remez, minimum_phase, freqz, group_delay
+    >>> import matplotlib.pyplot as plt
+    >>> freq = [0, 0.2, 0.3, 1.0]
+    >>> desired = [1, 0]
+    >>> h_linear = remez(151, freq, desired, fs=2.)
+
+    Convert it to minimum phase:
+
+    >>> h_min_hom = minimum_phase(h_linear, method='homomorphic')
+    >>> h_min_hil = minimum_phase(h_linear, method='hilbert')
+
+    Compare the three filters:
+
+    >>> fig, axs = plt.subplots(4, figsize=(4, 8))
+    >>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil),
+    ...                            ('-', '-', '--'), ('k', 'r', 'c')):
+    ...     w, H = freqz(h)
+    ...     w, gd = group_delay((h, 1))
+    ...     w /= np.pi
+    ...     axs[0].plot(h, color=color, linestyle=style)
+    ...     axs[1].plot(w, np.abs(H), color=color, linestyle=style)
+    ...     axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style)
+    ...     axs[3].plot(w, gd, color=color, linestyle=style)
+    >>> for ax in axs:
+    ...     ax.grid(True, color='0.5')
+    ...     ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1)
+    >>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples')
+    >>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase')
+    >>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])):
+    ...     ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency')
+    >>> axs[1].set(ylabel='Magnitude')
+    >>> axs[2].set(ylabel='Magnitude (dB)')
+    >>> axs[3].set(ylabel='Group delay')
+    >>> plt.tight_layout()
+
+    """  # noqa
+    h = np.asarray(h)
+    if np.iscomplexobj(h):
+        raise ValueError('Complex filters not supported')
+    if h.ndim != 1 or h.size <= 2:
+        raise ValueError('h must be 1-D and at least 2 samples long')
+    n_half = len(h) // 2
+    if not np.allclose(h[-n_half:][::-1], h[:n_half]):
+        warnings.warn('h does not appear to by symmetric, conversion may '
+                      'fail', RuntimeWarning)
+    if not isinstance(method, str) or method not in \
+            ('homomorphic', 'hilbert',):
+        raise ValueError('method must be "homomorphic" or "hilbert", got %r'
+                         % (method,))
+    if n_fft is None:
+        n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
+    n_fft = int(n_fft)
+    if n_fft < len(h):
+        raise ValueError('n_fft must be at least len(h)==%s' % len(h))
+    if method == 'hilbert':
+        w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half)
+        H = np.real(fft(h, n_fft) * np.exp(1j * w))
+        dp = max(H) - 1
+        ds = 0 - min(H)
+        S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2
+        H += ds
+        H *= S
+        H = np.sqrt(H, out=H)
+        H += 1e-10  # ensure that the log does not explode
+        h_minimum = _dhtm(H)
+    else:  # method == 'homomorphic'
+        # zero-pad; calculate the DFT
+        h_temp = np.abs(fft(h, n_fft))
+        # take 0.25*log(|H|**2) = 0.5*log(|H|)
+        h_temp += 1e-7 * h_temp[h_temp > 0].min()  # don't let log blow up
+        np.log(h_temp, out=h_temp)
+        h_temp *= 0.5
+        # IDFT
+        h_temp = ifft(h_temp).real
+        # multiply pointwise by the homomorphic filter
+        # lmin[n] = 2u[n] - d[n]
+        win = np.zeros(n_fft)
+        win[0] = 1
+        stop = (len(h) + 1) // 2
+        win[1:stop] = 2
+        if len(h) % 2:
+            win[stop] = 1
+        h_temp *= win
+        h_temp = ifft(np.exp(fft(h_temp)))
+        h_minimum = h_temp.real
+    n_out = n_half + len(h) % 2
+    return h_minimum[:n_out]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_lti_conversion.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_lti_conversion.py
new file mode 100644
index 00000000..38b15e18
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_lti_conversion.py
@@ -0,0 +1,533 @@
+"""
+ltisys -- a collection of functions to convert linear time invariant systems
+from one representation to another.
+"""
+import numpy
+import numpy as np
+from numpy import (r_, eye, atleast_2d, poly, dot,
+                   asarray, prod, zeros, array, outer)
+from scipy import linalg
+
+from ._filter_design import tf2zpk, zpk2tf, normalize
+
+
+__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
+           'cont2discrete']
+
+
+def tf2ss(num, den):
+    r"""Transfer function to state-space representation.
+
+    Parameters
+    ----------
+    num, den : array_like
+        Sequences representing the coefficients of the numerator and
+        denominator polynomials, in order of descending degree. The
+        denominator needs to be at least as long as the numerator.
+
+    Returns
+    -------
+    A, B, C, D : ndarray
+        State space representation of the system, in controller canonical
+        form.
+
+    Examples
+    --------
+    Convert the transfer function:
+
+    .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
+
+    >>> num = [1, 3, 3]
+    >>> den = [1, 2, 1]
+
+    to the state-space representation:
+
+    .. math::
+
+        \dot{\textbf{x}}(t) =
+        \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
+        \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
+
+        \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
+        \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
+
+    >>> from scipy.signal import tf2ss
+    >>> A, B, C, D = tf2ss(num, den)
+    >>> A
+    array([[-2., -1.],
+           [ 1.,  0.]])
+    >>> B
+    array([[ 1.],
+           [ 0.]])
+    >>> C
+    array([[ 1.,  2.]])
+    >>> D
+    array([[ 1.]])
+    """
+    # Controller canonical state-space representation.
+    #  if M+1 = len(num) and K+1 = len(den) then we must have M <= K
+    #  states are found by asserting that X(s) = U(s) / D(s)
+    #  then Y(s) = N(s) * X(s)
+    #
+    #   A, B, C, and D follow quite naturally.
+    #
+    num, den = normalize(num, den)   # Strips zeros, checks arrays
+    nn = len(num.shape)
+    if nn == 1:
+        num = asarray([num], num.dtype)
+    M = num.shape[1]
+    K = len(den)
+    if M > K:
+        msg = "Improper transfer function. `num` is longer than `den`."
+        raise ValueError(msg)
+    if M == 0 or K == 0:  # Null system
+        return (array([], float), array([], float), array([], float),
+                array([], float))
+
+    # pad numerator to have same number of columns has denominator
+    num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
+
+    if num.shape[-1] > 0:
+        D = atleast_2d(num[:, 0])
+
+    else:
+        # We don't assign it an empty array because this system
+        # is not 'null'. It just doesn't have a non-zero D
+        # matrix. Thus, it should have a non-zero shape so that
+        # it can be operated on by functions like 'ss2tf'
+        D = array([[0]], float)
+
+    if K == 1:
+        D = D.reshape(num.shape)
+
+        return (zeros((1, 1)), zeros((1, D.shape[1])),
+                zeros((D.shape[0], 1)), D)
+
+    frow = -array([den[1:]])
+    A = r_[frow, eye(K - 2, K - 1)]
+    B = eye(K - 1, 1)
+    C = num[:, 1:] - outer(num[:, 0], den[1:])
+    D = D.reshape((C.shape[0], B.shape[1]))
+
+    return A, B, C, D
+
+
+def _none_to_empty_2d(arg):
+    if arg is None:
+        return zeros((0, 0))
+    else:
+        return arg
+
+
+def _atleast_2d_or_none(arg):
+    if arg is not None:
+        return atleast_2d(arg)
+
+
+def _shape_or_none(M):
+    if M is not None:
+        return M.shape
+    else:
+        return (None,) * 2
+
+
+def _choice_not_none(*args):
+    for arg in args:
+        if arg is not None:
+            return arg
+
+
+def _restore(M, shape):
+    if M.shape == (0, 0):
+        return zeros(shape)
+    else:
+        if M.shape != shape:
+            raise ValueError("The input arrays have incompatible shapes.")
+        return M
+
+
+def abcd_normalize(A=None, B=None, C=None, D=None):
+    """Check state-space matrices and ensure they are 2-D.
+
+    If enough information on the system is provided, that is, enough
+    properly-shaped arrays are passed to the function, the missing ones
+    are built from this information, ensuring the correct number of
+    rows and columns. Otherwise a ValueError is raised.
+
+    Parameters
+    ----------
+    A, B, C, D : array_like, optional
+        State-space matrices. All of them are None (missing) by default.
+        See `ss2tf` for format.
+
+    Returns
+    -------
+    A, B, C, D : array
+        Properly shaped state-space matrices.
+
+    Raises
+    ------
+    ValueError
+        If not enough information on the system was provided.
+
+    """
+    A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
+
+    MA, NA = _shape_or_none(A)
+    MB, NB = _shape_or_none(B)
+    MC, NC = _shape_or_none(C)
+    MD, ND = _shape_or_none(D)
+
+    p = _choice_not_none(MA, MB, NC)
+    q = _choice_not_none(NB, ND)
+    r = _choice_not_none(MC, MD)
+    if p is None or q is None or r is None:
+        raise ValueError("Not enough information on the system.")
+
+    A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
+    A = _restore(A, (p, p))
+    B = _restore(B, (p, q))
+    C = _restore(C, (r, p))
+    D = _restore(D, (r, q))
+
+    return A, B, C, D
+
+
+def ss2tf(A, B, C, D, input=0):
+    r"""State-space to transfer function.
+
+    A, B, C, D defines a linear state-space system with `p` inputs,
+    `q` outputs, and `n` state variables.
+
+    Parameters
+    ----------
+    A : array_like
+        State (or system) matrix of shape ``(n, n)``
+    B : array_like
+        Input matrix of shape ``(n, p)``
+    C : array_like
+        Output matrix of shape ``(q, n)``
+    D : array_like
+        Feedthrough (or feedforward) matrix of shape ``(q, p)``
+    input : int, optional
+        For multiple-input systems, the index of the input to use.
+
+    Returns
+    -------
+    num : 2-D ndarray
+        Numerator(s) of the resulting transfer function(s). `num` has one row
+        for each of the system's outputs. Each row is a sequence representation
+        of the numerator polynomial.
+    den : 1-D ndarray
+        Denominator of the resulting transfer function(s). `den` is a sequence
+        representation of the denominator polynomial.
+
+    Examples
+    --------
+    Convert the state-space representation:
+
+    .. math::
+
+        \dot{\textbf{x}}(t) =
+        \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
+        \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
+
+        \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
+        \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
+
+    >>> A = [[-2, -1], [1, 0]]
+    >>> B = [[1], [0]]  # 2-D column vector
+    >>> C = [[1, 2]]    # 2-D row vector
+    >>> D = 1
+
+    to the transfer function:
+
+    .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
+
+    >>> from scipy.signal import ss2tf
+    >>> ss2tf(A, B, C, D)
+    (array([[1., 3., 3.]]), array([ 1.,  2.,  1.]))
+    """
+    # transfer function is C (sI - A)**(-1) B + D
+
+    # Check consistency and make them all rank-2 arrays
+    A, B, C, D = abcd_normalize(A, B, C, D)
+
+    nout, nin = D.shape
+    if input >= nin:
+        raise ValueError("System does not have the input specified.")
+
+    # make SIMO from possibly MIMO system.
+    B = B[:, input:input + 1]
+    D = D[:, input:input + 1]
+
+    try:
+        den = poly(A)
+    except ValueError:
+        den = 1
+
+    if (prod(B.shape, axis=0) == 0) and (prod(C.shape, axis=0) == 0):
+        num = numpy.ravel(D)
+        if (prod(D.shape, axis=0) == 0) and (prod(A.shape, axis=0) == 0):
+            den = []
+        return num, den
+
+    num_states = A.shape[0]
+    type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0
+    num = numpy.empty((nout, num_states + 1), type_test.dtype)
+    for k in range(nout):
+        Ck = atleast_2d(C[k, :])
+        num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
+
+    return num, den
+
+
+def zpk2ss(z, p, k):
+    """Zero-pole-gain representation to state-space representation
+
+    Parameters
+    ----------
+    z, p : sequence
+        Zeros and poles.
+    k : float
+        System gain.
+
+    Returns
+    -------
+    A, B, C, D : ndarray
+        State space representation of the system, in controller canonical
+        form.
+
+    """
+    return tf2ss(*zpk2tf(z, p, k))
+
+
+def ss2zpk(A, B, C, D, input=0):
+    """State-space representation to zero-pole-gain representation.
+
+    A, B, C, D defines a linear state-space system with `p` inputs,
+    `q` outputs, and `n` state variables.
+
+    Parameters
+    ----------
+    A : array_like
+        State (or system) matrix of shape ``(n, n)``
+    B : array_like
+        Input matrix of shape ``(n, p)``
+    C : array_like
+        Output matrix of shape ``(q, n)``
+    D : array_like
+        Feedthrough (or feedforward) matrix of shape ``(q, p)``
+    input : int, optional
+        For multiple-input systems, the index of the input to use.
+
+    Returns
+    -------
+    z, p : sequence
+        Zeros and poles.
+    k : float
+        System gain.
+
+    """
+    return tf2zpk(*ss2tf(A, B, C, D, input=input))
+
+
+def cont2discrete(system, dt, method="zoh", alpha=None):
+    """
+    Transform a continuous to a discrete state-space system.
+
+    Parameters
+    ----------
+    system : a tuple describing the system or an instance of `lti`
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1: (instance of `lti`)
+            * 2: (num, den)
+            * 3: (zeros, poles, gain)
+            * 4: (A, B, C, D)
+
+    dt : float
+        The discretization time step.
+    method : str, optional
+        Which method to use:
+
+            * gbt: generalized bilinear transformation
+            * bilinear: Tustin's approximation ("gbt" with alpha=0.5)
+            * euler: Euler (or forward differencing) method ("gbt" with alpha=0)
+            * backward_diff: Backwards differencing ("gbt" with alpha=1.0)
+            * zoh: zero-order hold (default)
+            * foh: first-order hold (*versionadded: 1.3.0*)
+            * impulse: equivalent impulse response (*versionadded: 1.3.0*)
+
+    alpha : float within [0, 1], optional
+        The generalized bilinear transformation weighting parameter, which
+        should only be specified with method="gbt", and is ignored otherwise
+
+    Returns
+    -------
+    sysd : tuple containing the discrete system
+        Based on the input type, the output will be of the form
+
+        * (num, den, dt)   for transfer function input
+        * (zeros, poles, gain, dt)   for zeros-poles-gain input
+        * (A, B, C, D, dt) for state-space system input
+
+    Notes
+    -----
+    By default, the routine uses a Zero-Order Hold (zoh) method to perform
+    the transformation. Alternatively, a generalized bilinear transformation
+    may be used, which includes the common Tustin's bilinear approximation,
+    an Euler's method technique, or a backwards differencing technique.
+
+    The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear
+    approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method
+    is based on [4]_.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
+
+    .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf
+
+    .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized
+        bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,
+        2009.
+        (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf)
+
+    .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control
+        of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley,
+        pp. 204-206, 1998.
+
+    Examples
+    --------
+    We can transform a continuous state-space system to a discrete one:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.signal import cont2discrete, lti, dlti, dstep
+
+    Define a continuous state-space system.
+
+    >>> A = np.array([[0, 1],[-10., -3]])
+    >>> B = np.array([[0],[10.]])
+    >>> C = np.array([[1., 0]])
+    >>> D = np.array([[0.]])
+    >>> l_system = lti(A, B, C, D)
+    >>> t, x = l_system.step(T=np.linspace(0, 5, 100))
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(t, x, label='Continuous', linewidth=3)
+
+    Transform it to a discrete state-space system using several methods.
+
+    >>> dt = 0.1
+    >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']:
+    ...    d_system = cont2discrete((A, B, C, D), dt, method=method)
+    ...    s, x_d = dstep(d_system)
+    ...    ax.step(s, np.squeeze(x_d), label=method, where='post')
+    >>> ax.axis([t[0], t[-1], x[0], 1.4])
+    >>> ax.legend(loc='best')
+    >>> fig.tight_layout()
+    >>> plt.show()
+
+    """
+    if len(system) == 1:
+        return system.to_discrete()
+    if len(system) == 2:
+        sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method,
+                             alpha=alpha)
+        return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
+    elif len(system) == 3:
+        sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt,
+                             method=method, alpha=alpha)
+        return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
+    elif len(system) == 4:
+        a, b, c, d = system
+    else:
+        raise ValueError("First argument must either be a tuple of 2 (tf), "
+                         "3 (zpk), or 4 (ss) arrays.")
+
+    if method == 'gbt':
+        if alpha is None:
+            raise ValueError("Alpha parameter must be specified for the "
+                             "generalized bilinear transform (gbt) method")
+        elif alpha < 0 or alpha > 1:
+            raise ValueError("Alpha parameter must be within the interval "
+                             "[0,1] for the gbt method")
+
+    if method == 'gbt':
+        # This parameter is used repeatedly - compute once here
+        ima = np.eye(a.shape[0]) - alpha*dt*a
+        ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)
+        bd = linalg.solve(ima, dt*b)
+
+        # Similarly solve for the output equation matrices
+        cd = linalg.solve(ima.transpose(), c.transpose())
+        cd = cd.transpose()
+        dd = d + alpha*np.dot(c, bd)
+
+    elif method == 'bilinear' or method == 'tustin':
+        return cont2discrete(system, dt, method="gbt", alpha=0.5)
+
+    elif method == 'euler' or method == 'forward_diff':
+        return cont2discrete(system, dt, method="gbt", alpha=0.0)
+
+    elif method == 'backward_diff':
+        return cont2discrete(system, dt, method="gbt", alpha=1.0)
+
+    elif method == 'zoh':
+        # Build an exponential matrix
+        em_upper = np.hstack((a, b))
+
+        # Need to stack zeros under the a and b matrices
+        em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),
+                              np.zeros((b.shape[1], b.shape[1]))))
+
+        em = np.vstack((em_upper, em_lower))
+        ms = linalg.expm(dt * em)
+
+        # Dispose of the lower rows
+        ms = ms[:a.shape[0], :]
+
+        ad = ms[:, 0:a.shape[1]]
+        bd = ms[:, a.shape[1]:]
+
+        cd = c
+        dd = d
+
+    elif method == 'foh':
+        # Size parameters for convenience
+        n = a.shape[0]
+        m = b.shape[1]
+
+        # Build an exponential matrix similar to 'zoh' method
+        em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m))
+        em_lower = zeros((m, n + 2 * m))
+        em = np.block([[em_upper], [em_lower]])
+
+        ms = linalg.expm(em)
+
+        # Get the three blocks from upper rows
+        ms11 = ms[:n, 0:n]
+        ms12 = ms[:n, n:n + m]
+        ms13 = ms[:n, n + m:]
+
+        ad = ms11
+        bd = ms12 - ms13 + ms11 @ ms13
+        cd = c
+        dd = d + c @ ms13
+
+    elif method == 'impulse':
+        if not np.allclose(d, 0):
+            raise ValueError("Impulse method is only applicable"
+                             "to strictly proper systems")
+
+        ad = linalg.expm(a * dt)
+        bd = ad @ b * dt
+        cd = c
+        dd = c @ b * dt
+
+    else:
+        raise ValueError("Unknown transformation method '%s'" % method)
+
+    return ad, bd, cd, dd, dt
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_ltisys.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_ltisys.py
new file mode 100644
index 00000000..259b84a5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_ltisys.py
@@ -0,0 +1,3872 @@
+"""
+ltisys -- a collection of classes and functions for modeling linear
+time invariant systems.
+"""
+#
+# Author: Travis Oliphant 2001
+#
+# Feb 2010: Warren Weckesser
+#   Rewrote lsim2 and added impulse2.
+# Apr 2011: Jeffrey Armstrong 
+#   Added dlsim, dstep, dimpulse, cont2discrete
+# Aug 2013: Juan Luis Cano
+#   Rewrote abcd_normalize.
+# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
+#   Added pole placement
+# Mar 2015: Clancy Rowley
+#   Rewrote lsim
+# May 2015: Felix Berkenkamp
+#   Split lti class into subclasses
+#   Merged discrete systems and added dlti
+
+import warnings
+
+# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
+# use scipy's qr until this is solved
+
+from scipy.linalg import qr as s_qr
+from scipy import integrate, interpolate, linalg
+from scipy.interpolate import interp1d
+from ._filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
+                            freqz_zpk)
+from ._lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
+                             cont2discrete)
+
+import numpy
+import numpy as np
+from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
+                   dot, transpose, ones, zeros_like, linspace, nan_to_num)
+import copy
+
+__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
+           'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
+           'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
+           'dfreqresp', 'dbode']
+
+
+class LinearTimeInvariant:
+    def __new__(cls, *system, **kwargs):
+        """Create a new object, don't allow direct instances."""
+        if cls is LinearTimeInvariant:
+            raise NotImplementedError('The LinearTimeInvariant class is not '
+                                      'meant to be used directly, use `lti` '
+                                      'or `dlti` instead.')
+        return super(LinearTimeInvariant, cls).__new__(cls)
+
+    def __init__(self):
+        """
+        Initialize the `lti` baseclass.
+
+        The heavy lifting is done by the subclasses.
+        """
+        super().__init__()
+
+        self.inputs = None
+        self.outputs = None
+        self._dt = None
+
+    @property
+    def dt(self):
+        """Return the sampling time of the system, `None` for `lti` systems."""
+        return self._dt
+
+    @property
+    def _dt_dict(self):
+        if self.dt is None:
+            return {}
+        else:
+            return {'dt': self.dt}
+
+    @property
+    def zeros(self):
+        """Zeros of the system."""
+        return self.to_zpk().zeros
+
+    @property
+    def poles(self):
+        """Poles of the system."""
+        return self.to_zpk().poles
+
+    def _as_ss(self):
+        """Convert to `StateSpace` system, without copying.
+
+        Returns
+        -------
+        sys: StateSpace
+            The `StateSpace` system. If the class is already an instance of
+            `StateSpace` then this instance is returned.
+        """
+        if isinstance(self, StateSpace):
+            return self
+        else:
+            return self.to_ss()
+
+    def _as_zpk(self):
+        """Convert to `ZerosPolesGain` system, without copying.
+
+        Returns
+        -------
+        sys: ZerosPolesGain
+            The `ZerosPolesGain` system. If the class is already an instance of
+            `ZerosPolesGain` then this instance is returned.
+        """
+        if isinstance(self, ZerosPolesGain):
+            return self
+        else:
+            return self.to_zpk()
+
+    def _as_tf(self):
+        """Convert to `TransferFunction` system, without copying.
+
+        Returns
+        -------
+        sys: ZerosPolesGain
+            The `TransferFunction` system. If the class is already an instance of
+            `TransferFunction` then this instance is returned.
+        """
+        if isinstance(self, TransferFunction):
+            return self
+        else:
+            return self.to_tf()
+
+
+class lti(LinearTimeInvariant):
+    r"""
+    Continuous-time linear time invariant system base class.
+
+    Parameters
+    ----------
+    *system : arguments
+        The `lti` class can be instantiated with either 2, 3 or 4 arguments.
+        The following gives the number of arguments and the corresponding
+        continuous-time subclass that is created:
+
+            * 2: `TransferFunction`:  (numerator, denominator)
+            * 3: `ZerosPolesGain`: (zeros, poles, gain)
+            * 4: `StateSpace`:  (A, B, C, D)
+
+        Each argument can be an array or a sequence.
+
+    See Also
+    --------
+    ZerosPolesGain, StateSpace, TransferFunction, dlti
+
+    Notes
+    -----
+    `lti` instances do not exist directly. Instead, `lti` creates an instance
+    of one of its subclasses: `StateSpace`, `TransferFunction` or
+    `ZerosPolesGain`.
+
+    If (numerator, denominator) is passed in for ``*system``, coefficients for
+    both the numerator and denominator should be specified in descending
+    exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
+    5]``).
+
+    Changing the value of properties that are not directly part of the current
+    system representation (such as the `zeros` of a `StateSpace` system) is
+    very inefficient and may lead to numerical inaccuracies. It is better to
+    convert to the specific system representation first. For example, call
+    ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
+
+    Examples
+    --------
+    >>> from scipy import signal
+
+    >>> signal.lti(1, 2, 3, 4)
+    StateSpaceContinuous(
+    array([[1]]),
+    array([[2]]),
+    array([[3]]),
+    array([[4]]),
+    dt: None
+    )
+
+    Construct the transfer function
+    :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
+
+    >>> signal.lti([1, 2], [3, 4], 5)
+    ZerosPolesGainContinuous(
+    array([1, 2]),
+    array([3, 4]),
+    5,
+    dt: None
+    )
+
+    Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`:
+
+    >>> signal.lti([3, 4], [1, 2])
+    TransferFunctionContinuous(
+    array([3., 4.]),
+    array([1., 2.]),
+    dt: None
+    )
+
+    """
+    def __new__(cls, *system):
+        """Create an instance of the appropriate subclass."""
+        if cls is lti:
+            N = len(system)
+            if N == 2:
+                return TransferFunctionContinuous.__new__(
+                    TransferFunctionContinuous, *system)
+            elif N == 3:
+                return ZerosPolesGainContinuous.__new__(
+                    ZerosPolesGainContinuous, *system)
+            elif N == 4:
+                return StateSpaceContinuous.__new__(StateSpaceContinuous,
+                                                    *system)
+            else:
+                raise ValueError("`system` needs to be an instance of `lti` "
+                                 "or have 2, 3 or 4 arguments.")
+        # __new__ was called from a subclass, let it call its own functions
+        return super(lti, cls).__new__(cls)
+
+    def __init__(self, *system):
+        """
+        Initialize the `lti` baseclass.
+
+        The heavy lifting is done by the subclasses.
+        """
+        super().__init__(*system)
+
+    def impulse(self, X0=None, T=None, N=None):
+        """
+        Return the impulse response of a continuous-time system.
+        See `impulse` for details.
+        """
+        return impulse(self, X0=X0, T=T, N=N)
+
+    def step(self, X0=None, T=None, N=None):
+        """
+        Return the step response of a continuous-time system.
+        See `step` for details.
+        """
+        return step(self, X0=X0, T=T, N=N)
+
+    def output(self, U, T, X0=None):
+        """
+        Return the response of a continuous-time system to input `U`.
+        See `lsim` for details.
+        """
+        return lsim(self, U, T, X0=X0)
+
+    def bode(self, w=None, n=100):
+        """
+        Calculate Bode magnitude and phase data of a continuous-time system.
+
+        Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
+        [dB] and phase [deg]. See `bode` for details.
+
+        Examples
+        --------
+        >>> from scipy import signal
+        >>> import matplotlib.pyplot as plt
+
+        >>> sys = signal.TransferFunction([1], [1, 1])
+        >>> w, mag, phase = sys.bode()
+
+        >>> plt.figure()
+        >>> plt.semilogx(w, mag)    # Bode magnitude plot
+        >>> plt.figure()
+        >>> plt.semilogx(w, phase)  # Bode phase plot
+        >>> plt.show()
+
+        """
+        return bode(self, w=w, n=n)
+
+    def freqresp(self, w=None, n=10000):
+        """
+        Calculate the frequency response of a continuous-time system.
+
+        Returns a 2-tuple containing arrays of frequencies [rad/s] and
+        complex magnitude.
+        See `freqresp` for details.
+        """
+        return freqresp(self, w=w, n=n)
+
+    def to_discrete(self, dt, method='zoh', alpha=None):
+        """Return a discretized version of the current system.
+
+        Parameters: See `cont2discrete` for details.
+
+        Returns
+        -------
+        sys: instance of `dlti`
+        """
+        raise NotImplementedError('to_discrete is not implemented for this '
+                                  'system class.')
+
+
+class dlti(LinearTimeInvariant):
+    r"""
+    Discrete-time linear time invariant system base class.
+
+    Parameters
+    ----------
+    *system: arguments
+        The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
+        The following gives the number of arguments and the corresponding
+        discrete-time subclass that is created:
+
+            * 2: `TransferFunction`:  (numerator, denominator)
+            * 3: `ZerosPolesGain`: (zeros, poles, gain)
+            * 4: `StateSpace`:  (A, B, C, D)
+
+        Each argument can be an array or a sequence.
+    dt: float, optional
+        Sampling time [s] of the discrete-time systems. Defaults to ``True``
+        (unspecified sampling time). Must be specified as a keyword argument,
+        for example, ``dt=0.1``.
+
+    See Also
+    --------
+    ZerosPolesGain, StateSpace, TransferFunction, lti
+
+    Notes
+    -----
+    `dlti` instances do not exist directly. Instead, `dlti` creates an instance
+    of one of its subclasses: `StateSpace`, `TransferFunction` or
+    `ZerosPolesGain`.
+
+    Changing the value of properties that are not directly part of the current
+    system representation (such as the `zeros` of a `StateSpace` system) is
+    very inefficient and may lead to numerical inaccuracies.  It is better to
+    convert to the specific system representation first. For example, call
+    ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
+
+    If (numerator, denominator) is passed in for ``*system``, coefficients for
+    both the numerator and denominator should be specified in descending
+    exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
+    5]``).
+
+    .. versionadded:: 0.18.0
+
+    Examples
+    --------
+    >>> from scipy import signal
+
+    >>> signal.dlti(1, 2, 3, 4)
+    StateSpaceDiscrete(
+    array([[1]]),
+    array([[2]]),
+    array([[3]]),
+    array([[4]]),
+    dt: True
+    )
+
+    >>> signal.dlti(1, 2, 3, 4, dt=0.1)
+    StateSpaceDiscrete(
+    array([[1]]),
+    array([[2]]),
+    array([[3]]),
+    array([[4]]),
+    dt: 0.1
+    )
+
+    Construct the transfer function
+    :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
+    of 0.1 seconds:
+
+    >>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
+    ZerosPolesGainDiscrete(
+    array([1, 2]),
+    array([3, 4]),
+    5,
+    dt: 0.1
+    )
+
+    Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with
+    a sampling time of 0.1 seconds:
+
+    >>> signal.dlti([3, 4], [1, 2], dt=0.1)
+    TransferFunctionDiscrete(
+    array([3., 4.]),
+    array([1., 2.]),
+    dt: 0.1
+    )
+
+    """
+    def __new__(cls, *system, **kwargs):
+        """Create an instance of the appropriate subclass."""
+        if cls is dlti:
+            N = len(system)
+            if N == 2:
+                return TransferFunctionDiscrete.__new__(
+                    TransferFunctionDiscrete, *system, **kwargs)
+            elif N == 3:
+                return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
+                                                      *system, **kwargs)
+            elif N == 4:
+                return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
+                                                  **kwargs)
+            else:
+                raise ValueError("`system` needs to be an instance of `dlti` "
+                                 "or have 2, 3 or 4 arguments.")
+        # __new__ was called from a subclass, let it call its own functions
+        return super(dlti, cls).__new__(cls)
+
+    def __init__(self, *system, **kwargs):
+        """
+        Initialize the `lti` baseclass.
+
+        The heavy lifting is done by the subclasses.
+        """
+        dt = kwargs.pop('dt', True)
+        super().__init__(*system, **kwargs)
+
+        self.dt = dt
+
+    @property
+    def dt(self):
+        """Return the sampling time of the system."""
+        return self._dt
+
+    @dt.setter
+    def dt(self, dt):
+        self._dt = dt
+
+    def impulse(self, x0=None, t=None, n=None):
+        """
+        Return the impulse response of the discrete-time `dlti` system.
+        See `dimpulse` for details.
+        """
+        return dimpulse(self, x0=x0, t=t, n=n)
+
+    def step(self, x0=None, t=None, n=None):
+        """
+        Return the step response of the discrete-time `dlti` system.
+        See `dstep` for details.
+        """
+        return dstep(self, x0=x0, t=t, n=n)
+
+    def output(self, u, t, x0=None):
+        """
+        Return the response of the discrete-time system to input `u`.
+        See `dlsim` for details.
+        """
+        return dlsim(self, u, t, x0=x0)
+
+    def bode(self, w=None, n=100):
+        r"""
+        Calculate Bode magnitude and phase data of a discrete-time system.
+
+        Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
+        [dB] and phase [deg]. See `dbode` for details.
+
+        Examples
+        --------
+        >>> from scipy import signal
+        >>> import matplotlib.pyplot as plt
+
+        Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}`
+        with sampling time 0.5s:
+
+        >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
+
+        Equivalent: signal.dbode(sys)
+
+        >>> w, mag, phase = sys.bode()
+
+        >>> plt.figure()
+        >>> plt.semilogx(w, mag)    # Bode magnitude plot
+        >>> plt.figure()
+        >>> plt.semilogx(w, phase)  # Bode phase plot
+        >>> plt.show()
+
+        """
+        return dbode(self, w=w, n=n)
+
+    def freqresp(self, w=None, n=10000, whole=False):
+        """
+        Calculate the frequency response of a discrete-time system.
+
+        Returns a 2-tuple containing arrays of frequencies [rad/s] and
+        complex magnitude.
+        See `dfreqresp` for details.
+
+        """
+        return dfreqresp(self, w=w, n=n, whole=whole)
+
+
+class TransferFunction(LinearTimeInvariant):
+    r"""Linear Time Invariant system class in transfer function form.
+
+    Represents the system as the continuous-time transfer function
+    :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
+    discrete-time transfer function
+    :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
+    :math:`b` are elements of the numerator `num`, :math:`a` are elements of
+    the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
+    `TransferFunction` systems inherit additional
+    functionality from the `lti`, respectively the `dlti` classes, depending on
+    which system representation is used.
+
+    Parameters
+    ----------
+    *system: arguments
+        The `TransferFunction` class can be instantiated with 1 or 2
+        arguments. The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 2: array_like: (numerator, denominator)
+    dt: float, optional
+        Sampling time [s] of the discrete-time systems. Defaults to `None`
+        (continuous-time). Must be specified as a keyword argument, for
+        example, ``dt=0.1``.
+
+    See Also
+    --------
+    ZerosPolesGain, StateSpace, lti, dlti
+    tf2ss, tf2zpk, tf2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
+    state-space matrices) is very inefficient and may lead to numerical
+    inaccuracies.  It is better to convert to the specific system
+    representation first. For example, call ``sys = sys.to_ss()`` before
+    accessing/changing the A, B, C, D system matrices.
+
+    If (numerator, denominator) is passed in for ``*system``, coefficients
+    for both the numerator and denominator should be specified in descending
+    exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
+    represented as ``[1, 3, 5]``)
+
+    Examples
+    --------
+    Construct the transfer function
+    :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
+
+    >>> from scipy import signal
+
+    >>> num = [1, 3, 3]
+    >>> den = [1, 2, 1]
+
+    >>> signal.TransferFunction(num, den)
+    TransferFunctionContinuous(
+    array([1., 3., 3.]),
+    array([1., 2., 1.]),
+    dt: None
+    )
+
+    Construct the transfer function
+    :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
+    0.1 seconds:
+
+    >>> signal.TransferFunction(num, den, dt=0.1)
+    TransferFunctionDiscrete(
+    array([1., 3., 3.]),
+    array([1., 2., 1.]),
+    dt: 0.1
+    )
+
+    """
+    def __new__(cls, *system, **kwargs):
+        """Handle object conversion if input is an instance of lti."""
+        if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
+            return system[0].to_tf()
+
+        # Choose whether to inherit from `lti` or from `dlti`
+        if cls is TransferFunction:
+            if kwargs.get('dt') is None:
+                return TransferFunctionContinuous.__new__(
+                    TransferFunctionContinuous,
+                    *system,
+                    **kwargs)
+            else:
+                return TransferFunctionDiscrete.__new__(
+                    TransferFunctionDiscrete,
+                    *system,
+                    **kwargs)
+
+        # No special conversion needed
+        return super(TransferFunction, cls).__new__(cls)
+
+    def __init__(self, *system, **kwargs):
+        """Initialize the state space LTI system."""
+        # Conversion of lti instances is handled in __new__
+        if isinstance(system[0], LinearTimeInvariant):
+            return
+
+        # Remove system arguments, not needed by parents anymore
+        super().__init__(**kwargs)
+
+        self._num = None
+        self._den = None
+
+        self.num, self.den = normalize(*system)
+
+    def __repr__(self):
+        """Return representation of the system's transfer function"""
+        return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
+            self.__class__.__name__,
+            repr(self.num),
+            repr(self.den),
+            repr(self.dt),
+            )
+
+    @property
+    def num(self):
+        """Numerator of the `TransferFunction` system."""
+        return self._num
+
+    @num.setter
+    def num(self, num):
+        self._num = atleast_1d(num)
+
+        # Update dimensions
+        if len(self.num.shape) > 1:
+            self.outputs, self.inputs = self.num.shape
+        else:
+            self.outputs = 1
+            self.inputs = 1
+
+    @property
+    def den(self):
+        """Denominator of the `TransferFunction` system."""
+        return self._den
+
+    @den.setter
+    def den(self, den):
+        self._den = atleast_1d(den)
+
+    def _copy(self, system):
+        """
+        Copy the parameters of another `TransferFunction` object
+
+        Parameters
+        ----------
+        system : `TransferFunction`
+            The `StateSpace` system that is to be copied
+
+        """
+        self.num = system.num
+        self.den = system.den
+
+    def to_tf(self):
+        """
+        Return a copy of the current `TransferFunction` system.
+
+        Returns
+        -------
+        sys : instance of `TransferFunction`
+            The current system (copy)
+
+        """
+        return copy.deepcopy(self)
+
+    def to_zpk(self):
+        """
+        Convert system representation to `ZerosPolesGain`.
+
+        Returns
+        -------
+        sys : instance of `ZerosPolesGain`
+            Zeros, poles, gain representation of the current system
+
+        """
+        return ZerosPolesGain(*tf2zpk(self.num, self.den),
+                              **self._dt_dict)
+
+    def to_ss(self):
+        """
+        Convert system representation to `StateSpace`.
+
+        Returns
+        -------
+        sys : instance of `StateSpace`
+            State space model of the current system
+
+        """
+        return StateSpace(*tf2ss(self.num, self.den),
+                          **self._dt_dict)
+
+    @staticmethod
+    def _z_to_zinv(num, den):
+        """Change a transfer function from the variable `z` to `z**-1`.
+
+        Parameters
+        ----------
+        num, den: 1d array_like
+            Sequences representing the coefficients of the numerator and
+            denominator polynomials, in order of descending degree of 'z'.
+            That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
+
+        Returns
+        -------
+        num, den: 1d array_like
+            Sequences representing the coefficients of the numerator and
+            denominator polynomials, in order of ascending degree of 'z**-1'.
+            That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
+        """
+        diff = len(num) - len(den)
+        if diff > 0:
+            den = np.hstack((np.zeros(diff), den))
+        elif diff < 0:
+            num = np.hstack((np.zeros(-diff), num))
+        return num, den
+
+    @staticmethod
+    def _zinv_to_z(num, den):
+        """Change a transfer function from the variable `z` to `z**-1`.
+
+        Parameters
+        ----------
+        num, den: 1d array_like
+            Sequences representing the coefficients of the numerator and
+            denominator polynomials, in order of ascending degree of 'z**-1'.
+            That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
+
+        Returns
+        -------
+        num, den: 1d array_like
+            Sequences representing the coefficients of the numerator and
+            denominator polynomials, in order of descending degree of 'z'.
+            That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
+        """
+        diff = len(num) - len(den)
+        if diff > 0:
+            den = np.hstack((den, np.zeros(diff)))
+        elif diff < 0:
+            num = np.hstack((num, np.zeros(-diff)))
+        return num, den
+
+
+class TransferFunctionContinuous(TransferFunction, lti):
+    r"""
+    Continuous-time Linear Time Invariant system in transfer function form.
+
+    Represents the system as the transfer function
+    :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
+    :math:`b` are elements of the numerator `num`, :math:`a` are elements of
+    the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
+    Continuous-time `TransferFunction` systems inherit additional
+    functionality from the `lti` class.
+
+    Parameters
+    ----------
+    *system: arguments
+        The `TransferFunction` class can be instantiated with 1 or 2
+        arguments. The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `lti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 2: array_like: (numerator, denominator)
+
+    See Also
+    --------
+    ZerosPolesGain, StateSpace, lti
+    tf2ss, tf2zpk, tf2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
+    state-space matrices) is very inefficient and may lead to numerical
+    inaccuracies.  It is better to convert to the specific system
+    representation first. For example, call ``sys = sys.to_ss()`` before
+    accessing/changing the A, B, C, D system matrices.
+
+    If (numerator, denominator) is passed in for ``*system``, coefficients
+    for both the numerator and denominator should be specified in descending
+    exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
+    ``[1, 3, 5]``)
+
+    Examples
+    --------
+    Construct the transfer function
+    :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
+
+    >>> from scipy import signal
+
+    >>> num = [1, 3, 3]
+    >>> den = [1, 2, 1]
+
+    >>> signal.TransferFunction(num, den)
+    TransferFunctionContinuous(
+    array([ 1.,  3.,  3.]),
+    array([ 1.,  2.,  1.]),
+    dt: None
+    )
+
+    """
+
+    def to_discrete(self, dt, method='zoh', alpha=None):
+        """
+        Returns the discretized `TransferFunction` system.
+
+        Parameters: See `cont2discrete` for details.
+
+        Returns
+        -------
+        sys: instance of `dlti` and `StateSpace`
+        """
+        return TransferFunction(*cont2discrete((self.num, self.den),
+                                               dt,
+                                               method=method,
+                                               alpha=alpha)[:-1],
+                                dt=dt)
+
+
+class TransferFunctionDiscrete(TransferFunction, dlti):
+    r"""
+    Discrete-time Linear Time Invariant system in transfer function form.
+
+    Represents the system as the transfer function
+    :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
+    :math:`b` are elements of the numerator `num`, :math:`a` are elements of
+    the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
+    Discrete-time `TransferFunction` systems inherit additional functionality
+    from the `dlti` class.
+
+    Parameters
+    ----------
+    *system: arguments
+        The `TransferFunction` class can be instantiated with 1 or 2
+        arguments. The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `dlti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 2: array_like: (numerator, denominator)
+    dt: float, optional
+        Sampling time [s] of the discrete-time systems. Defaults to `True`
+        (unspecified sampling time). Must be specified as a keyword argument,
+        for example, ``dt=0.1``.
+
+    See Also
+    --------
+    ZerosPolesGain, StateSpace, dlti
+    tf2ss, tf2zpk, tf2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
+    state-space matrices) is very inefficient and may lead to numerical
+    inaccuracies.
+
+    If (numerator, denominator) is passed in for ``*system``, coefficients
+    for both the numerator and denominator should be specified in descending
+    exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
+    ``[1, 3, 5]``).
+
+    Examples
+    --------
+    Construct the transfer function
+    :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
+    0.5 seconds:
+
+    >>> from scipy import signal
+
+    >>> num = [1, 3, 3]
+    >>> den = [1, 2, 1]
+
+    >>> signal.TransferFunction(num, den, dt=0.5)
+    TransferFunctionDiscrete(
+    array([ 1.,  3.,  3.]),
+    array([ 1.,  2.,  1.]),
+    dt: 0.5
+    )
+
+    """
+    pass
+
+
+class ZerosPolesGain(LinearTimeInvariant):
+    r"""
+    Linear Time Invariant system class in zeros, poles, gain form.
+
+    Represents the system as the continuous- or discrete-time transfer function
+    :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
+    the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
+    `ZerosPolesGain` systems inherit additional functionality from the `lti`,
+    respectively the `dlti` classes, depending on which system representation
+    is used.
+
+    Parameters
+    ----------
+    *system : arguments
+        The `ZerosPolesGain` class can be instantiated with 1 or 3
+        arguments. The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 3: array_like: (zeros, poles, gain)
+    dt: float, optional
+        Sampling time [s] of the discrete-time systems. Defaults to `None`
+        (continuous-time). Must be specified as a keyword argument, for
+        example, ``dt=0.1``.
+
+
+    See Also
+    --------
+    TransferFunction, StateSpace, lti, dlti
+    zpk2ss, zpk2tf, zpk2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
+    state-space matrices) is very inefficient and may lead to numerical
+    inaccuracies.  It is better to convert to the specific system
+    representation first. For example, call ``sys = sys.to_ss()`` before
+    accessing/changing the A, B, C, D system matrices.
+
+    Examples
+    --------
+    Construct the transfer function
+    :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
+
+    >>> from scipy import signal
+
+    >>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
+    ZerosPolesGainContinuous(
+    array([1, 2]),
+    array([3, 4]),
+    5,
+    dt: None
+    )
+
+    Construct the transfer function
+    :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
+    of 0.1 seconds:
+
+    >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
+    ZerosPolesGainDiscrete(
+    array([1, 2]),
+    array([3, 4]),
+    5,
+    dt: 0.1
+    )
+
+    """
+    def __new__(cls, *system, **kwargs):
+        """Handle object conversion if input is an instance of `lti`"""
+        if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
+            return system[0].to_zpk()
+
+        # Choose whether to inherit from `lti` or from `dlti`
+        if cls is ZerosPolesGain:
+            if kwargs.get('dt') is None:
+                return ZerosPolesGainContinuous.__new__(
+                    ZerosPolesGainContinuous,
+                    *system,
+                    **kwargs)
+            else:
+                return ZerosPolesGainDiscrete.__new__(
+                    ZerosPolesGainDiscrete,
+                    *system,
+                    **kwargs
+                    )
+
+        # No special conversion needed
+        return super(ZerosPolesGain, cls).__new__(cls)
+
+    def __init__(self, *system, **kwargs):
+        """Initialize the zeros, poles, gain system."""
+        # Conversion of lti instances is handled in __new__
+        if isinstance(system[0], LinearTimeInvariant):
+            return
+
+        super().__init__(**kwargs)
+
+        self._zeros = None
+        self._poles = None
+        self._gain = None
+
+        self.zeros, self.poles, self.gain = system
+
+    def __repr__(self):
+        """Return representation of the `ZerosPolesGain` system."""
+        return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
+            self.__class__.__name__,
+            repr(self.zeros),
+            repr(self.poles),
+            repr(self.gain),
+            repr(self.dt),
+            )
+
+    @property
+    def zeros(self):
+        """Zeros of the `ZerosPolesGain` system."""
+        return self._zeros
+
+    @zeros.setter
+    def zeros(self, zeros):
+        self._zeros = atleast_1d(zeros)
+
+        # Update dimensions
+        if len(self.zeros.shape) > 1:
+            self.outputs, self.inputs = self.zeros.shape
+        else:
+            self.outputs = 1
+            self.inputs = 1
+
+    @property
+    def poles(self):
+        """Poles of the `ZerosPolesGain` system."""
+        return self._poles
+
+    @poles.setter
+    def poles(self, poles):
+        self._poles = atleast_1d(poles)
+
+    @property
+    def gain(self):
+        """Gain of the `ZerosPolesGain` system."""
+        return self._gain
+
+    @gain.setter
+    def gain(self, gain):
+        self._gain = gain
+
+    def _copy(self, system):
+        """
+        Copy the parameters of another `ZerosPolesGain` system.
+
+        Parameters
+        ----------
+        system : instance of `ZerosPolesGain`
+            The zeros, poles gain system that is to be copied
+
+        """
+        self.poles = system.poles
+        self.zeros = system.zeros
+        self.gain = system.gain
+
+    def to_tf(self):
+        """
+        Convert system representation to `TransferFunction`.
+
+        Returns
+        -------
+        sys : instance of `TransferFunction`
+            Transfer function of the current system
+
+        """
+        return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
+                                **self._dt_dict)
+
+    def to_zpk(self):
+        """
+        Return a copy of the current 'ZerosPolesGain' system.
+
+        Returns
+        -------
+        sys : instance of `ZerosPolesGain`
+            The current system (copy)
+
+        """
+        return copy.deepcopy(self)
+
+    def to_ss(self):
+        """
+        Convert system representation to `StateSpace`.
+
+        Returns
+        -------
+        sys : instance of `StateSpace`
+            State space model of the current system
+
+        """
+        return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
+                          **self._dt_dict)
+
+
+class ZerosPolesGainContinuous(ZerosPolesGain, lti):
+    r"""
+    Continuous-time Linear Time Invariant system in zeros, poles, gain form.
+
+    Represents the system as the continuous time transfer function
+    :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
+    the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
+    Continuous-time `ZerosPolesGain` systems inherit additional functionality
+    from the `lti` class.
+
+    Parameters
+    ----------
+    *system : arguments
+        The `ZerosPolesGain` class can be instantiated with 1 or 3
+        arguments. The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `lti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 3: array_like: (zeros, poles, gain)
+
+    See Also
+    --------
+    TransferFunction, StateSpace, lti
+    zpk2ss, zpk2tf, zpk2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
+    state-space matrices) is very inefficient and may lead to numerical
+    inaccuracies.  It is better to convert to the specific system
+    representation first. For example, call ``sys = sys.to_ss()`` before
+    accessing/changing the A, B, C, D system matrices.
+
+    Examples
+    --------
+    Construct the transfer function
+    :math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
+
+    >>> from scipy import signal
+
+    >>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
+    ZerosPolesGainContinuous(
+    array([1, 2]),
+    array([3, 4]),
+    5,
+    dt: None
+    )
+
+    """
+
+    def to_discrete(self, dt, method='zoh', alpha=None):
+        """
+        Returns the discretized `ZerosPolesGain` system.
+
+        Parameters: See `cont2discrete` for details.
+
+        Returns
+        -------
+        sys: instance of `dlti` and `ZerosPolesGain`
+        """
+        return ZerosPolesGain(
+            *cont2discrete((self.zeros, self.poles, self.gain),
+                           dt,
+                           method=method,
+                           alpha=alpha)[:-1],
+            dt=dt)
+
+
+class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
+    r"""
+    Discrete-time Linear Time Invariant system in zeros, poles, gain form.
+
+    Represents the system as the discrete-time transfer function
+    :math:`H(z)=k \prod_i (z - q[i]) / \prod_j (z - p[j])`, where :math:`k` is
+    the `gain`, :math:`q` are the `zeros` and :math:`p` are the `poles`.
+    Discrete-time `ZerosPolesGain` systems inherit additional functionality
+    from the `dlti` class.
+
+    Parameters
+    ----------
+    *system : arguments
+        The `ZerosPolesGain` class can be instantiated with 1 or 3
+        arguments. The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `dlti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 3: array_like: (zeros, poles, gain)
+    dt: float, optional
+        Sampling time [s] of the discrete-time systems. Defaults to `True`
+        (unspecified sampling time). Must be specified as a keyword argument,
+        for example, ``dt=0.1``.
+
+    See Also
+    --------
+    TransferFunction, StateSpace, dlti
+    zpk2ss, zpk2tf, zpk2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
+    state-space matrices) is very inefficient and may lead to numerical
+    inaccuracies.  It is better to convert to the specific system
+    representation first. For example, call ``sys = sys.to_ss()`` before
+    accessing/changing the A, B, C, D system matrices.
+
+    Examples
+    --------
+    Construct the transfer function
+    :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
+
+    >>> from scipy import signal
+
+    >>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
+    ZerosPolesGainContinuous(
+    array([1, 2]),
+    array([3, 4]),
+    5,
+    dt: None
+    )
+
+    Construct the transfer function
+    :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
+    of 0.1 seconds:
+
+    >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
+    ZerosPolesGainDiscrete(
+    array([1, 2]),
+    array([3, 4]),
+    5,
+    dt: 0.1
+    )
+
+    """
+    pass
+
+
+def _atleast_2d_or_none(arg):
+    if arg is not None:
+        return atleast_2d(arg)
+
+
+class StateSpace(LinearTimeInvariant):
+    r"""
+    Linear Time Invariant system in state-space form.
+
+    Represents the system as the continuous-time, first order differential
+    equation :math:`\dot{x} = A x + B u` or the discrete-time difference
+    equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
+    inherit additional functionality from the `lti`, respectively the `dlti`
+    classes, depending on which system representation is used.
+
+    Parameters
+    ----------
+    *system: arguments
+        The `StateSpace` class can be instantiated with 1 or 4 arguments.
+        The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 4: array_like: (A, B, C, D)
+    dt: float, optional
+        Sampling time [s] of the discrete-time systems. Defaults to `None`
+        (continuous-time). Must be specified as a keyword argument, for
+        example, ``dt=0.1``.
+
+    See Also
+    --------
+    TransferFunction, ZerosPolesGain, lti, dlti
+    ss2zpk, ss2tf, zpk2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `StateSpace` system representation (such as `zeros` or `poles`) is very
+    inefficient and may lead to numerical inaccuracies.  It is better to
+    convert to the specific system representation first. For example, call
+    ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import numpy as np
+    >>> a = np.array([[0, 1], [0, 0]])
+    >>> b = np.array([[0], [1]])
+    >>> c = np.array([[1, 0]])
+    >>> d = np.array([[0]])
+
+    >>> sys = signal.StateSpace(a, b, c, d)
+    >>> print(sys)
+    StateSpaceContinuous(
+    array([[0, 1],
+           [0, 0]]),
+    array([[0],
+           [1]]),
+    array([[1, 0]]),
+    array([[0]]),
+    dt: None
+    )
+
+    >>> sys.to_discrete(0.1)
+    StateSpaceDiscrete(
+    array([[1. , 0.1],
+           [0. , 1. ]]),
+    array([[0.005],
+           [0.1  ]]),
+    array([[1, 0]]),
+    array([[0]]),
+    dt: 0.1
+    )
+
+    >>> a = np.array([[1, 0.1], [0, 1]])
+    >>> b = np.array([[0.005], [0.1]])
+
+    >>> signal.StateSpace(a, b, c, d, dt=0.1)
+    StateSpaceDiscrete(
+    array([[1. , 0.1],
+           [0. , 1. ]]),
+    array([[0.005],
+           [0.1  ]]),
+    array([[1, 0]]),
+    array([[0]]),
+    dt: 0.1
+    )
+
+    """
+
+    # Override NumPy binary operations and ufuncs
+    __array_priority__ = 100.0
+    __array_ufunc__ = None
+
+    def __new__(cls, *system, **kwargs):
+        """Create new StateSpace object and settle inheritance."""
+        # Handle object conversion if input is an instance of `lti`
+        if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
+            return system[0].to_ss()
+
+        # Choose whether to inherit from `lti` or from `dlti`
+        if cls is StateSpace:
+            if kwargs.get('dt') is None:
+                return StateSpaceContinuous.__new__(StateSpaceContinuous,
+                                                    *system, **kwargs)
+            else:
+                return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
+                                                  *system, **kwargs)
+
+        # No special conversion needed
+        return super(StateSpace, cls).__new__(cls)
+
+    def __init__(self, *system, **kwargs):
+        """Initialize the state space lti/dlti system."""
+        # Conversion of lti instances is handled in __new__
+        if isinstance(system[0], LinearTimeInvariant):
+            return
+
+        # Remove system arguments, not needed by parents anymore
+        super().__init__(**kwargs)
+
+        self._A = None
+        self._B = None
+        self._C = None
+        self._D = None
+
+        self.A, self.B, self.C, self.D = abcd_normalize(*system)
+
+    def __repr__(self):
+        """Return representation of the `StateSpace` system."""
+        return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
+            self.__class__.__name__,
+            repr(self.A),
+            repr(self.B),
+            repr(self.C),
+            repr(self.D),
+            repr(self.dt),
+            )
+
+    def _check_binop_other(self, other):
+        return isinstance(other, (StateSpace, np.ndarray, float, complex,
+                                  np.number, int))
+
+    def __mul__(self, other):
+        """
+        Post-multiply another system or a scalar
+
+        Handles multiplication of systems in the sense of a frequency domain
+        multiplication. That means, given two systems E1(s) and E2(s), their
+        multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s)
+        is equivalent to first applying E2(s), and then E1(s).
+
+        Notes
+        -----
+        For SISO systems the order of system application does not matter.
+        However, for MIMO systems, where the two systems are matrices, the
+        order above ensures standard Matrix multiplication rules apply.
+        """
+        if not self._check_binop_other(other):
+            return NotImplemented
+
+        if isinstance(other, StateSpace):
+            # Disallow mix of discrete and continuous systems.
+            if type(other) is not type(self):
+                return NotImplemented
+
+            if self.dt != other.dt:
+                raise TypeError('Cannot multiply systems with different `dt`.')
+
+            n1 = self.A.shape[0]
+            n2 = other.A.shape[0]
+
+            # Interconnection of systems
+            # x1' = A1 x1 + B1 u1
+            # y1  = C1 x1 + D1 u1
+            # x2' = A2 x2 + B2 y1
+            # y2  = C2 x2 + D2 y1
+            #
+            # Plugging in with u1 = y2 yields
+            # [x1']   [A1 B1*C2 ] [x1]   [B1*D2]
+            # [x2'] = [0  A2    ] [x2] + [B2   ] u2
+            #                    [x1]
+            #  y2   = [C1 D1*C2] [x2] + D1*D2 u2
+            a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))),
+                           np.hstack((zeros((n2, n1)), other.A))))
+            b = np.vstack((np.dot(self.B, other.D), other.B))
+            c = np.hstack((self.C, np.dot(self.D, other.C)))
+            d = np.dot(self.D, other.D)
+        else:
+            # Assume that other is a scalar / matrix
+            # For post multiplication the input gets scaled
+            a = self.A
+            b = np.dot(self.B, other)
+            c = self.C
+            d = np.dot(self.D, other)
+
+        common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype)
+        return StateSpace(np.asarray(a, dtype=common_dtype),
+                          np.asarray(b, dtype=common_dtype),
+                          np.asarray(c, dtype=common_dtype),
+                          np.asarray(d, dtype=common_dtype),
+                          **self._dt_dict)
+
+    def __rmul__(self, other):
+        """Pre-multiply a scalar or matrix (but not StateSpace)"""
+        if not self._check_binop_other(other) or isinstance(other, StateSpace):
+            return NotImplemented
+
+        # For pre-multiplication only the output gets scaled
+        a = self.A
+        b = self.B
+        c = np.dot(other, self.C)
+        d = np.dot(other, self.D)
+
+        common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype)
+        return StateSpace(np.asarray(a, dtype=common_dtype),
+                          np.asarray(b, dtype=common_dtype),
+                          np.asarray(c, dtype=common_dtype),
+                          np.asarray(d, dtype=common_dtype),
+                          **self._dt_dict)
+
+    def __neg__(self):
+        """Negate the system (equivalent to pre-multiplying by -1)."""
+        return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict)
+
+    def __add__(self, other):
+        """
+        Adds two systems in the sense of frequency domain addition.
+        """
+        if not self._check_binop_other(other):
+            return NotImplemented
+
+        if isinstance(other, StateSpace):
+            # Disallow mix of discrete and continuous systems.
+            if type(other) is not type(self):
+                raise TypeError('Cannot add {} and {}'.format(type(self),
+                                                              type(other)))
+
+            if self.dt != other.dt:
+                raise TypeError('Cannot add systems with different `dt`.')
+            # Interconnection of systems
+            # x1' = A1 x1 + B1 u
+            # y1  = C1 x1 + D1 u
+            # x2' = A2 x2 + B2 u
+            # y2  = C2 x2 + D2 u
+            # y   = y1 + y2
+            #
+            # Plugging in yields
+            # [x1']   [A1 0 ] [x1]   [B1]
+            # [x2'] = [0  A2] [x2] + [B2] u
+            #                 [x1]
+            #  y    = [C1 C2] [x2] + [D1 + D2] u
+            a = linalg.block_diag(self.A, other.A)
+            b = np.vstack((self.B, other.B))
+            c = np.hstack((self.C, other.C))
+            d = self.D + other.D
+        else:
+            other = np.atleast_2d(other)
+            if self.D.shape == other.shape:
+                # A scalar/matrix is really just a static system (A=0, B=0, C=0)
+                a = self.A
+                b = self.B
+                c = self.C
+                d = self.D + other
+            else:
+                raise ValueError("Cannot add systems with incompatible "
+                                 "dimensions ({} and {})"
+                                 .format(self.D.shape, other.shape))
+
+        common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype)
+        return StateSpace(np.asarray(a, dtype=common_dtype),
+                          np.asarray(b, dtype=common_dtype),
+                          np.asarray(c, dtype=common_dtype),
+                          np.asarray(d, dtype=common_dtype),
+                          **self._dt_dict)
+
+    def __sub__(self, other):
+        if not self._check_binop_other(other):
+            return NotImplemented
+
+        return self.__add__(-other)
+
+    def __radd__(self, other):
+        if not self._check_binop_other(other):
+            return NotImplemented
+
+        return self.__add__(other)
+
+    def __rsub__(self, other):
+        if not self._check_binop_other(other):
+            return NotImplemented
+
+        return (-self).__add__(other)
+
+    def __truediv__(self, other):
+        """
+        Divide by a scalar
+        """
+        # Division by non-StateSpace scalars
+        if not self._check_binop_other(other) or isinstance(other, StateSpace):
+            return NotImplemented
+
+        if isinstance(other, np.ndarray) and other.ndim > 0:
+            # It's ambiguous what this means, so disallow it
+            raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays")
+
+        return self.__mul__(1/other)
+
+    @property
+    def A(self):
+        """State matrix of the `StateSpace` system."""
+        return self._A
+
+    @A.setter
+    def A(self, A):
+        self._A = _atleast_2d_or_none(A)
+
+    @property
+    def B(self):
+        """Input matrix of the `StateSpace` system."""
+        return self._B
+
+    @B.setter
+    def B(self, B):
+        self._B = _atleast_2d_or_none(B)
+        self.inputs = self.B.shape[-1]
+
+    @property
+    def C(self):
+        """Output matrix of the `StateSpace` system."""
+        return self._C
+
+    @C.setter
+    def C(self, C):
+        self._C = _atleast_2d_or_none(C)
+        self.outputs = self.C.shape[0]
+
+    @property
+    def D(self):
+        """Feedthrough matrix of the `StateSpace` system."""
+        return self._D
+
+    @D.setter
+    def D(self, D):
+        self._D = _atleast_2d_or_none(D)
+
+    def _copy(self, system):
+        """
+        Copy the parameters of another `StateSpace` system.
+
+        Parameters
+        ----------
+        system : instance of `StateSpace`
+            The state-space system that is to be copied
+
+        """
+        self.A = system.A
+        self.B = system.B
+        self.C = system.C
+        self.D = system.D
+
+    def to_tf(self, **kwargs):
+        """
+        Convert system representation to `TransferFunction`.
+
+        Parameters
+        ----------
+        kwargs : dict, optional
+            Additional keywords passed to `ss2zpk`
+
+        Returns
+        -------
+        sys : instance of `TransferFunction`
+            Transfer function of the current system
+
+        """
+        return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
+                                       **kwargs), **self._dt_dict)
+
+    def to_zpk(self, **kwargs):
+        """
+        Convert system representation to `ZerosPolesGain`.
+
+        Parameters
+        ----------
+        kwargs : dict, optional
+            Additional keywords passed to `ss2zpk`
+
+        Returns
+        -------
+        sys : instance of `ZerosPolesGain`
+            Zeros, poles, gain representation of the current system
+
+        """
+        return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
+                                      **kwargs), **self._dt_dict)
+
+    def to_ss(self):
+        """
+        Return a copy of the current `StateSpace` system.
+
+        Returns
+        -------
+        sys : instance of `StateSpace`
+            The current system (copy)
+
+        """
+        return copy.deepcopy(self)
+
+
+class StateSpaceContinuous(StateSpace, lti):
+    r"""
+    Continuous-time Linear Time Invariant system in state-space form.
+
+    Represents the system as the continuous-time, first order differential
+    equation :math:`\dot{x} = A x + B u`.
+    Continuous-time `StateSpace` systems inherit additional functionality
+    from the `lti` class.
+
+    Parameters
+    ----------
+    *system: arguments
+        The `StateSpace` class can be instantiated with 1 or 3 arguments.
+        The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `lti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 4: array_like: (A, B, C, D)
+
+    See Also
+    --------
+    TransferFunction, ZerosPolesGain, lti
+    ss2zpk, ss2tf, zpk2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `StateSpace` system representation (such as `zeros` or `poles`) is very
+    inefficient and may lead to numerical inaccuracies.  It is better to
+    convert to the specific system representation first. For example, call
+    ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
+
+    Examples
+    --------
+    >>> from scipy import signal
+
+    >>> a = np.array([[0, 1], [0, 0]])
+    >>> b = np.array([[0], [1]])
+    >>> c = np.array([[1, 0]])
+    >>> d = np.array([[0]])
+
+    >>> sys = signal.StateSpace(a, b, c, d)
+    >>> print(sys)
+    StateSpaceContinuous(
+    array([[0, 1],
+           [0, 0]]),
+    array([[0],
+           [1]]),
+    array([[1, 0]]),
+    array([[0]]),
+    dt: None
+    )
+
+    """
+
+    def to_discrete(self, dt, method='zoh', alpha=None):
+        """
+        Returns the discretized `StateSpace` system.
+
+        Parameters: See `cont2discrete` for details.
+
+        Returns
+        -------
+        sys: instance of `dlti` and `StateSpace`
+        """
+        return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
+                                         dt,
+                                         method=method,
+                                         alpha=alpha)[:-1],
+                          dt=dt)
+
+
+class StateSpaceDiscrete(StateSpace, dlti):
+    r"""
+    Discrete-time Linear Time Invariant system in state-space form.
+
+    Represents the system as the discrete-time difference equation
+    :math:`x[k+1] = A x[k] + B u[k]`.
+    `StateSpace` systems inherit additional functionality from the `dlti`
+    class.
+
+    Parameters
+    ----------
+    *system: arguments
+        The `StateSpace` class can be instantiated with 1 or 3 arguments.
+        The following gives the number of input arguments and their
+        interpretation:
+
+            * 1: `dlti` system: (`StateSpace`, `TransferFunction` or
+              `ZerosPolesGain`)
+            * 4: array_like: (A, B, C, D)
+    dt: float, optional
+        Sampling time [s] of the discrete-time systems. Defaults to `True`
+        (unspecified sampling time). Must be specified as a keyword argument,
+        for example, ``dt=0.1``.
+
+    See Also
+    --------
+    TransferFunction, ZerosPolesGain, dlti
+    ss2zpk, ss2tf, zpk2sos
+
+    Notes
+    -----
+    Changing the value of properties that are not part of the
+    `StateSpace` system representation (such as `zeros` or `poles`) is very
+    inefficient and may lead to numerical inaccuracies.  It is better to
+    convert to the specific system representation first. For example, call
+    ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
+
+    Examples
+    --------
+    >>> from scipy import signal
+
+    >>> a = np.array([[1, 0.1], [0, 1]])
+    >>> b = np.array([[0.005], [0.1]])
+    >>> c = np.array([[1, 0]])
+    >>> d = np.array([[0]])
+
+    >>> signal.StateSpace(a, b, c, d, dt=0.1)
+    StateSpaceDiscrete(
+    array([[ 1. ,  0.1],
+           [ 0. ,  1. ]]),
+    array([[ 0.005],
+           [ 0.1  ]]),
+    array([[1, 0]]),
+    array([[0]]),
+    dt: 0.1
+    )
+
+    """
+    pass
+
+
+def lsim2(system, U=None, T=None, X0=None, **kwargs):
+    """
+    Simulate output of a continuous-time linear system, by using
+    the ODE solver `scipy.integrate.odeint`.
+
+    Parameters
+    ----------
+    system : an instance of the `lti` class or a tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+        * 1: (instance of `lti`)
+        * 2: (num, den)
+        * 3: (zeros, poles, gain)
+        * 4: (A, B, C, D)
+
+    U : array_like (1D or 2D), optional
+        An input array describing the input at each time T.  Linear
+        interpolation is used between given times.  If there are
+        multiple inputs, then each column of the rank-2 array
+        represents an input.  If U is not given, the input is assumed
+        to be zero.
+    T : array_like (1D or 2D), optional
+        The time steps at which the input is defined and at which the
+        output is desired.  The default is 101 evenly spaced points on
+        the interval [0,10.0].
+    X0 : array_like (1D), optional
+        The initial condition of the state vector.  If `X0` is not
+        given, the initial conditions are assumed to be 0.
+    kwargs : dict
+        Additional keyword arguments are passed on to the function
+        `odeint`.  See the notes below for more details.
+
+    Returns
+    -------
+    T : 1D ndarray
+        The time values for the output.
+    yout : ndarray
+        The response of the system.
+    xout : ndarray
+        The time-evolution of the state-vector.
+
+    See Also
+    --------
+    lsim
+
+    Notes
+    -----
+    This function uses `scipy.integrate.odeint` to solve the
+    system's differential equations.  Additional keyword arguments
+    given to `lsim2` are passed on to `odeint`.  See the documentation
+    for `scipy.integrate.odeint` for the full list of arguments.
+
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    Examples
+    --------
+    We'll use `lsim2` to simulate an analog Bessel filter applied to
+    a signal.
+
+    >>> import numpy as np
+    >>> from scipy.signal import bessel, lsim2
+    >>> import matplotlib.pyplot as plt
+
+    Create a low-pass Bessel filter with a cutoff of 12 Hz.
+
+    >>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
+
+    Generate data to which the filter is applied.
+
+    >>> t = np.linspace(0, 1.25, 500, endpoint=False)
+
+    The input signal is the sum of three sinusoidal curves, with
+    frequencies 4 Hz, 40 Hz, and 80 Hz.  The filter should mostly
+    eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
+
+    >>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
+    ...      0.5*np.cos(2*np.pi*80*t))
+
+    Simulate the filter with `lsim2`.
+
+    >>> tout, yout, xout = lsim2((b, a), U=u, T=t)
+
+    Plot the result.
+
+    >>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
+    >>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
+    >>> plt.legend(loc='best', shadow=True, framealpha=1)
+    >>> plt.grid(alpha=0.3)
+    >>> plt.xlabel('t')
+    >>> plt.show()
+
+    In a second example, we simulate a double integrator ``y'' = u``, with
+    a constant input ``u = 1``.  We'll use the state space representation
+    of the integrator.
+
+    >>> from scipy.signal import lti
+    >>> A = np.array([[0, 1], [0, 0]])
+    >>> B = np.array([[0], [1]])
+    >>> C = np.array([[1, 0]])
+    >>> D = 0
+    >>> system = lti(A, B, C, D)
+
+    `t` and `u` define the time and input signal for the system to
+    be simulated.
+
+    >>> t = np.linspace(0, 5, num=50)
+    >>> u = np.ones_like(t)
+
+    Compute the simulation, and then plot `y`.  As expected, the plot shows
+    the curve ``y = 0.5*t**2``.
+
+    >>> tout, y, x = lsim2(system, u, t)
+    >>> plt.plot(t, y)
+    >>> plt.grid(alpha=0.3)
+    >>> plt.xlabel('t')
+    >>> plt.show()
+
+    """
+    if isinstance(system, lti):
+        sys = system._as_ss()
+    elif isinstance(system, dlti):
+        raise AttributeError('lsim2 can only be used with continuous-time '
+                             'systems.')
+    else:
+        sys = lti(*system)._as_ss()
+
+    if X0 is None:
+        X0 = zeros(sys.B.shape[0], sys.A.dtype)
+
+    if T is None:
+        # XXX T should really be a required argument, but U was
+        # changed from a required positional argument to a keyword,
+        # and T is after U in the argument list.  So we either: change
+        # the API and move T in front of U; check here for T being
+        # None and raise an exception; or assign a default value to T
+        # here.  This code implements the latter.
+        T = linspace(0, 10.0, 101)
+
+    T = atleast_1d(T)
+    if len(T.shape) != 1:
+        raise ValueError("T must be a rank-1 array.")
+
+    if U is not None:
+        U = atleast_1d(U)
+        if len(U.shape) == 1:
+            U = U.reshape(-1, 1)
+        sU = U.shape
+        if sU[0] != len(T):
+            raise ValueError("U must have the same number of rows "
+                             "as elements in T.")
+
+        if sU[1] != sys.inputs:
+            raise ValueError("The number of inputs in U (%d) is not "
+                             "compatible with the number of system "
+                             "inputs (%d)" % (sU[1], sys.inputs))
+        # Create a callable that uses linear interpolation to
+        # calculate the input at any time.
+        ufunc = interpolate.interp1d(T, U, kind='linear',
+                                     axis=0, bounds_error=False)
+
+        def fprime(x, t, sys, ufunc):
+            """The vector field of the linear system."""
+            return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
+        xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
+        yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
+    else:
+        def fprime(x, t, sys):
+            """The vector field of the linear system."""
+            return dot(sys.A, x)
+        xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
+        yout = dot(sys.C, transpose(xout))
+
+    return T, squeeze(transpose(yout)), xout
+
+
+def _cast_to_array_dtype(in1, in2):
+    """Cast array to dtype of other array, while avoiding ComplexWarning.
+
+    Those can be raised when casting complex to real.
+    """
+    if numpy.issubdtype(in2.dtype, numpy.float64):
+        # dtype to cast to is not complex, so use .real
+        in1 = in1.real.astype(in2.dtype)
+    else:
+        in1 = in1.astype(in2.dtype)
+
+    return in1
+
+
+def lsim(system, U, T, X0=None, interp=True):
+    """
+    Simulate output of a continuous-time linear system.
+
+    Parameters
+    ----------
+    system : an instance of the LTI class or a tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+        * 1: (instance of `lti`)
+        * 2: (num, den)
+        * 3: (zeros, poles, gain)
+        * 4: (A, B, C, D)
+
+    U : array_like
+        An input array describing the input at each time `T`
+        (interpolation is assumed between given times).  If there are
+        multiple inputs, then each column of the rank-2 array
+        represents an input.  If U = 0 or None, a zero input is used.
+    T : array_like
+        The time steps at which the input is defined and at which the
+        output is desired.  Must be nonnegative, increasing, and equally spaced.
+    X0 : array_like, optional
+        The initial conditions on the state vector (zero by default).
+    interp : bool, optional
+        Whether to use linear (True, the default) or zero-order-hold (False)
+        interpolation for the input array.
+
+    Returns
+    -------
+    T : 1D ndarray
+        Time values for the output.
+    yout : 1D ndarray
+        System response.
+    xout : ndarray
+        Time evolution of the state vector.
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    Examples
+    --------
+    We'll use `lsim` to simulate an analog Bessel filter applied to
+    a signal.
+
+    >>> import numpy as np
+    >>> from scipy.signal import bessel, lsim
+    >>> import matplotlib.pyplot as plt
+
+    Create a low-pass Bessel filter with a cutoff of 12 Hz.
+
+    >>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
+
+    Generate data to which the filter is applied.
+
+    >>> t = np.linspace(0, 1.25, 500, endpoint=False)
+
+    The input signal is the sum of three sinusoidal curves, with
+    frequencies 4 Hz, 40 Hz, and 80 Hz.  The filter should mostly
+    eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
+
+    >>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
+    ...      0.5*np.cos(2*np.pi*80*t))
+
+    Simulate the filter with `lsim`.
+
+    >>> tout, yout, xout = lsim((b, a), U=u, T=t)
+
+    Plot the result.
+
+    >>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
+    >>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
+    >>> plt.legend(loc='best', shadow=True, framealpha=1)
+    >>> plt.grid(alpha=0.3)
+    >>> plt.xlabel('t')
+    >>> plt.show()
+
+    In a second example, we simulate a double integrator ``y'' = u``, with
+    a constant input ``u = 1``.  We'll use the state space representation
+    of the integrator.
+
+    >>> from scipy.signal import lti
+    >>> A = np.array([[0.0, 1.0], [0.0, 0.0]])
+    >>> B = np.array([[0.0], [1.0]])
+    >>> C = np.array([[1.0, 0.0]])
+    >>> D = 0.0
+    >>> system = lti(A, B, C, D)
+
+    `t` and `u` define the time and input signal for the system to
+    be simulated.
+
+    >>> t = np.linspace(0, 5, num=50)
+    >>> u = np.ones_like(t)
+
+    Compute the simulation, and then plot `y`.  As expected, the plot shows
+    the curve ``y = 0.5*t**2``.
+
+    >>> tout, y, x = lsim(system, u, t)
+    >>> plt.plot(t, y)
+    >>> plt.grid(alpha=0.3)
+    >>> plt.xlabel('t')
+    >>> plt.show()
+
+    """
+    if isinstance(system, lti):
+        sys = system._as_ss()
+    elif isinstance(system, dlti):
+        raise AttributeError('lsim can only be used with continuous-time '
+                             'systems.')
+    else:
+        sys = lti(*system)._as_ss()
+    T = atleast_1d(T)
+    if len(T.shape) != 1:
+        raise ValueError("T must be a rank-1 array.")
+
+    A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
+    n_states = A.shape[0]
+    n_inputs = B.shape[1]
+
+    n_steps = T.size
+    if X0 is None:
+        X0 = zeros(n_states, sys.A.dtype)
+    xout = np.empty((n_steps, n_states), sys.A.dtype)
+
+    if T[0] == 0:
+        xout[0] = X0
+    elif T[0] > 0:
+        # step forward to initial time, with zero input
+        xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
+    else:
+        raise ValueError("Initial time must be nonnegative")
+
+    no_input = (U is None or
+                (isinstance(U, (int, float)) and U == 0.) or
+                not np.any(U))
+
+    if n_steps == 1:
+        yout = squeeze(dot(xout, transpose(C)))
+        if not no_input:
+            yout += squeeze(dot(U, transpose(D)))
+        return T, squeeze(yout), squeeze(xout)
+
+    dt = T[1] - T[0]
+    if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
+        warnings.warn("Non-uniform timesteps are deprecated. Results may be "
+                      "slow and/or inaccurate.", DeprecationWarning)
+        return lsim2(system, U, T, X0)
+
+    if no_input:
+        # Zero input: just use matrix exponential
+        # take transpose because state is a row vector
+        expAT_dt = linalg.expm(transpose(A) * dt)
+        for i in range(1, n_steps):
+            xout[i] = dot(xout[i-1], expAT_dt)
+        yout = squeeze(dot(xout, transpose(C)))
+        return T, squeeze(yout), squeeze(xout)
+
+    # Nonzero input
+    U = atleast_1d(U)
+    if U.ndim == 1:
+        U = U[:, np.newaxis]
+
+    if U.shape[0] != n_steps:
+        raise ValueError("U must have the same number of rows "
+                         "as elements in T.")
+
+    if U.shape[1] != n_inputs:
+        raise ValueError("System does not define that many inputs.")
+
+    if not interp:
+        # Zero-order hold
+        # Algorithm: to integrate from time 0 to time dt, we solve
+        #   xdot = A x + B u,  x(0) = x0
+        #   udot = 0,          u(0) = u0.
+        #
+        # Solution is
+        #   [ x(dt) ]       [ A*dt   B*dt ] [ x0 ]
+        #   [ u(dt) ] = exp [  0     0    ] [ u0 ]
+        M = np.vstack([np.hstack([A * dt, B * dt]),
+                       np.zeros((n_inputs, n_states + n_inputs))])
+        # transpose everything because the state and input are row vectors
+        expMT = linalg.expm(transpose(M))
+        Ad = expMT[:n_states, :n_states]
+        Bd = expMT[n_states:, :n_states]
+        for i in range(1, n_steps):
+            xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
+    else:
+        # Linear interpolation between steps
+        # Algorithm: to integrate from time 0 to time dt, with linear
+        # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
+        #   xdot = A x + B u,        x(0) = x0
+        #   udot = (u1 - u0) / dt,   u(0) = u0.
+        #
+        # Solution is
+        #   [ x(dt) ]       [ A*dt  B*dt  0 ] [  x0   ]
+        #   [ u(dt) ] = exp [  0     0    I ] [  u0   ]
+        #   [u1 - u0]       [  0     0    0 ] [u1 - u0]
+        M = np.vstack([np.hstack([A * dt, B * dt,
+                                  np.zeros((n_states, n_inputs))]),
+                       np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
+                                  np.identity(n_inputs)]),
+                       np.zeros((n_inputs, n_states + 2 * n_inputs))])
+        expMT = linalg.expm(transpose(M))
+        Ad = expMT[:n_states, :n_states]
+        Bd1 = expMT[n_states+n_inputs:, :n_states]
+        Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
+        for i in range(1, n_steps):
+            xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
+
+    yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
+    return T, squeeze(yout), squeeze(xout)
+
+
+def _default_response_times(A, n):
+    """Compute a reasonable set of time samples for the response time.
+
+    This function is used by `impulse`, `impulse2`, `step` and `step2`
+    to compute the response time when the `T` argument to the function
+    is None.
+
+    Parameters
+    ----------
+    A : array_like
+        The system matrix, which is square.
+    n : int
+        The number of time samples to generate.
+
+    Returns
+    -------
+    t : ndarray
+        The 1-D array of length `n` of time samples at which the response
+        is to be computed.
+    """
+    # Create a reasonable time interval.
+    # TODO: This could use some more work.
+    # For example, what is expected when the system is unstable?
+    vals = linalg.eigvals(A)
+    r = min(abs(real(vals)))
+    if r == 0.0:
+        r = 1.0
+    tc = 1.0 / r
+    t = linspace(0.0, 7 * tc, n)
+    return t
+
+
+def impulse(system, X0=None, T=None, N=None):
+    """Impulse response of continuous-time system.
+
+    Parameters
+    ----------
+    system : an instance of the LTI class or a tuple of array_like
+        describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `lti`)
+            * 2 (num, den)
+            * 3 (zeros, poles, gain)
+            * 4 (A, B, C, D)
+
+    X0 : array_like, optional
+        Initial state-vector.  Defaults to zero.
+    T : array_like, optional
+        Time points.  Computed if not given.
+    N : int, optional
+        The number of time points to compute (if `T` is not given).
+
+    Returns
+    -------
+    T : ndarray
+        A 1-D array of time points.
+    yout : ndarray
+        A 1-D array containing the impulse response of the system (except for
+        singularities at zero).
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    Examples
+    --------
+    Compute the impulse response of a second order system with a repeated
+    root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
+
+    >>> from scipy import signal
+    >>> system = ([1.0], [1.0, 2.0, 1.0])
+    >>> t, y = signal.impulse(system)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(t, y)
+
+    """
+    if isinstance(system, lti):
+        sys = system._as_ss()
+    elif isinstance(system, dlti):
+        raise AttributeError('impulse can only be used with continuous-time '
+                             'systems.')
+    else:
+        sys = lti(*system)._as_ss()
+    if X0 is None:
+        X = squeeze(sys.B)
+    else:
+        X = squeeze(sys.B + X0)
+    if N is None:
+        N = 100
+    if T is None:
+        T = _default_response_times(sys.A, N)
+    else:
+        T = asarray(T)
+
+    _, h, _ = lsim(sys, 0., T, X, interp=False)
+    return T, h
+
+
+def impulse2(system, X0=None, T=None, N=None, **kwargs):
+    """
+    Impulse response of a single-input, continuous-time linear system.
+
+    Parameters
+    ----------
+    system : an instance of the LTI class or a tuple of array_like
+        describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `lti`)
+            * 2 (num, den)
+            * 3 (zeros, poles, gain)
+            * 4 (A, B, C, D)
+
+    X0 : 1-D array_like, optional
+        The initial condition of the state vector.  Default: 0 (the
+        zero vector).
+    T : 1-D array_like, optional
+        The time steps at which the input is defined and at which the
+        output is desired.  If `T` is not given, the function will
+        generate a set of time samples automatically.
+    N : int, optional
+        Number of time points to compute.  Default: 100.
+    kwargs : various types
+        Additional keyword arguments are passed on to the function
+        `scipy.signal.lsim2`, which in turn passes them on to
+        `scipy.integrate.odeint`; see the latter's documentation for
+        information about these arguments.
+
+    Returns
+    -------
+    T : ndarray
+        The time values for the output.
+    yout : ndarray
+        The output response of the system.
+
+    See Also
+    --------
+    impulse, lsim2, scipy.integrate.odeint
+
+    Notes
+    -----
+    The solution is generated by calling `scipy.signal.lsim2`, which uses
+    the differential equation solver `scipy.integrate.odeint`.
+
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    .. versionadded:: 0.8.0
+
+    Examples
+    --------
+    Compute the impulse response of a second order system with a repeated
+    root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
+
+    >>> from scipy import signal
+    >>> system = ([1.0], [1.0, 2.0, 1.0])
+    >>> t, y = signal.impulse2(system)
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(t, y)
+
+    """
+    if isinstance(system, lti):
+        sys = system._as_ss()
+    elif isinstance(system, dlti):
+        raise AttributeError('impulse2 can only be used with continuous-time '
+                             'systems.')
+    else:
+        sys = lti(*system)._as_ss()
+    B = sys.B
+    if B.shape[-1] != 1:
+        raise ValueError("impulse2() requires a single-input system.")
+    B = B.squeeze()
+    if X0 is None:
+        X0 = zeros_like(B)
+    if N is None:
+        N = 100
+    if T is None:
+        T = _default_response_times(sys.A, N)
+
+    # Move the impulse in the input to the initial conditions, and then
+    # solve using lsim2().
+    ic = B + X0
+    Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
+    return Tr, Yr
+
+
+def step(system, X0=None, T=None, N=None):
+    """Step response of continuous-time system.
+
+    Parameters
+    ----------
+    system : an instance of the LTI class or a tuple of array_like
+        describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `lti`)
+            * 2 (num, den)
+            * 3 (zeros, poles, gain)
+            * 4 (A, B, C, D)
+
+    X0 : array_like, optional
+        Initial state-vector (default is zero).
+    T : array_like, optional
+        Time points (computed if not given).
+    N : int, optional
+        Number of time points to compute if `T` is not given.
+
+    Returns
+    -------
+    T : 1D ndarray
+        Output time points.
+    yout : 1D ndarray
+        Step response of system.
+
+    See Also
+    --------
+    scipy.signal.step2
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> lti = signal.lti([1.0], [1.0, 1.0])
+    >>> t, y = signal.step(lti)
+    >>> plt.plot(t, y)
+    >>> plt.xlabel('Time [s]')
+    >>> plt.ylabel('Amplitude')
+    >>> plt.title('Step response for 1. Order Lowpass')
+    >>> plt.grid()
+
+    """
+    if isinstance(system, lti):
+        sys = system._as_ss()
+    elif isinstance(system, dlti):
+        raise AttributeError('step can only be used with continuous-time '
+                             'systems.')
+    else:
+        sys = lti(*system)._as_ss()
+    if N is None:
+        N = 100
+    if T is None:
+        T = _default_response_times(sys.A, N)
+    else:
+        T = asarray(T)
+    U = ones(T.shape, sys.A.dtype)
+    vals = lsim(sys, U, T, X0=X0, interp=False)
+    return vals[0], vals[1]
+
+
+def step2(system, X0=None, T=None, N=None, **kwargs):
+    """Step response of continuous-time system.
+
+    This function is functionally the same as `scipy.signal.step`, but
+    it uses the function `scipy.signal.lsim2` to compute the step
+    response.
+
+    Parameters
+    ----------
+    system : an instance of the LTI class or a tuple of array_like
+        describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `lti`)
+            * 2 (num, den)
+            * 3 (zeros, poles, gain)
+            * 4 (A, B, C, D)
+
+    X0 : array_like, optional
+        Initial state-vector (default is zero).
+    T : array_like, optional
+        Time points (computed if not given).
+    N : int, optional
+        Number of time points to compute if `T` is not given.
+    kwargs : various types
+        Additional keyword arguments are passed on the function
+        `scipy.signal.lsim2`, which in turn passes them on to
+        `scipy.integrate.odeint`.  See the documentation for
+        `scipy.integrate.odeint` for information about these arguments.
+
+    Returns
+    -------
+    T : 1D ndarray
+        Output time points.
+    yout : 1D ndarray
+        Step response of system.
+
+    See Also
+    --------
+    scipy.signal.step
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    .. versionadded:: 0.8.0
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> lti = signal.lti([1.0], [1.0, 1.0])
+    >>> t, y = signal.step2(lti)
+    >>> plt.plot(t, y)
+    >>> plt.xlabel('Time [s]')
+    >>> plt.ylabel('Amplitude')
+    >>> plt.title('Step response for 1. Order Lowpass')
+    >>> plt.grid()
+
+    """
+    if isinstance(system, lti):
+        sys = system._as_ss()
+    elif isinstance(system, dlti):
+        raise AttributeError('step2 can only be used with continuous-time '
+                             'systems.')
+    else:
+        sys = lti(*system)._as_ss()
+    if N is None:
+        N = 100
+    if T is None:
+        T = _default_response_times(sys.A, N)
+    else:
+        T = asarray(T)
+    U = ones(T.shape, sys.A.dtype)
+    vals = lsim2(sys, U, T, X0=X0, **kwargs)
+    return vals[0], vals[1]
+
+
+def bode(system, w=None, n=100):
+    """
+    Calculate Bode magnitude and phase data of a continuous-time system.
+
+    Parameters
+    ----------
+    system : an instance of the LTI class or a tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `lti`)
+            * 2 (num, den)
+            * 3 (zeros, poles, gain)
+            * 4 (A, B, C, D)
+
+    w : array_like, optional
+        Array of frequencies (in rad/s). Magnitude and phase data is calculated
+        for every value in this array. If not given a reasonable set will be
+        calculated.
+    n : int, optional
+        Number of frequency points to compute if `w` is not given. The `n`
+        frequencies are logarithmically spaced in an interval chosen to
+        include the influence of the poles and zeros of the system.
+
+    Returns
+    -------
+    w : 1D ndarray
+        Frequency array [rad/s]
+    mag : 1D ndarray
+        Magnitude array [dB]
+    phase : 1D ndarray
+        Phase array [deg]
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> sys = signal.TransferFunction([1], [1, 1])
+    >>> w, mag, phase = signal.bode(sys)
+
+    >>> plt.figure()
+    >>> plt.semilogx(w, mag)    # Bode magnitude plot
+    >>> plt.figure()
+    >>> plt.semilogx(w, phase)  # Bode phase plot
+    >>> plt.show()
+
+    """
+    w, y = freqresp(system, w=w, n=n)
+
+    mag = 20.0 * numpy.log10(abs(y))
+    phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
+
+    return w, mag, phase
+
+
+def freqresp(system, w=None, n=10000):
+    r"""Calculate the frequency response of a continuous-time system.
+
+    Parameters
+    ----------
+    system : an instance of the `lti` class or a tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `lti`)
+            * 2 (num, den)
+            * 3 (zeros, poles, gain)
+            * 4 (A, B, C, D)
+
+    w : array_like, optional
+        Array of frequencies (in rad/s). Magnitude and phase data is
+        calculated for every value in this array. If not given, a reasonable
+        set will be calculated.
+    n : int, optional
+        Number of frequency points to compute if `w` is not given. The `n`
+        frequencies are logarithmically spaced in an interval chosen to
+        include the influence of the poles and zeros of the system.
+
+    Returns
+    -------
+    w : 1D ndarray
+        Frequency array [rad/s]
+    H : 1D ndarray
+        Array of complex magnitude values
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
+
+    Examples
+    --------
+    Generating the Nyquist plot of a transfer function
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`:
+
+    >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
+
+    >>> w, H = signal.freqresp(s1)
+
+    >>> plt.figure()
+    >>> plt.plot(H.real, H.imag, "b")
+    >>> plt.plot(H.real, -H.imag, "r")
+    >>> plt.show()
+    """
+    if isinstance(system, lti):
+        if isinstance(system, (TransferFunction, ZerosPolesGain)):
+            sys = system
+        else:
+            sys = system._as_zpk()
+    elif isinstance(system, dlti):
+        raise AttributeError('freqresp can only be used with continuous-time '
+                             'systems.')
+    else:
+        sys = lti(*system)._as_zpk()
+
+    if sys.inputs != 1 or sys.outputs != 1:
+        raise ValueError("freqresp() requires a SISO (single input, single "
+                         "output) system.")
+
+    if w is not None:
+        worN = w
+    else:
+        worN = n
+
+    if isinstance(sys, TransferFunction):
+        # In the call to freqs(), sys.num.ravel() is used because there are
+        # cases where sys.num is a 2-D array with a single row.
+        w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
+
+    elif isinstance(sys, ZerosPolesGain):
+        w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
+
+    return w, h
+
+
+# This class will be used by place_poles to return its results
+# see https://code.activestate.com/recipes/52308/
+class Bunch:
+    def __init__(self, **kwds):
+        self.__dict__.update(kwds)
+
+
+def _valid_inputs(A, B, poles, method, rtol, maxiter):
+    """
+    Check the poles come in complex conjugage pairs
+    Check shapes of A, B and poles are compatible.
+    Check the method chosen is compatible with provided poles
+    Return update method to use and ordered poles
+
+    """
+    poles = np.asarray(poles)
+    if poles.ndim > 1:
+        raise ValueError("Poles must be a 1D array like.")
+    # Will raise ValueError if poles do not come in complex conjugates pairs
+    poles = _order_complex_poles(poles)
+    if A.ndim > 2:
+        raise ValueError("A must be a 2D array/matrix.")
+    if B.ndim > 2:
+        raise ValueError("B must be a 2D array/matrix")
+    if A.shape[0] != A.shape[1]:
+        raise ValueError("A must be square")
+    if len(poles) > A.shape[0]:
+        raise ValueError("maximum number of poles is %d but you asked for %d" %
+                         (A.shape[0], len(poles)))
+    if len(poles) < A.shape[0]:
+        raise ValueError("number of poles is %d but you should provide %d" %
+                         (len(poles), A.shape[0]))
+    r = np.linalg.matrix_rank(B)
+    for p in poles:
+        if sum(p == poles) > r:
+            raise ValueError("at least one of the requested pole is repeated "
+                             "more than rank(B) times")
+    # Choose update method
+    update_loop = _YT_loop
+    if method not in ('KNV0','YT'):
+        raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
+
+    if method == "KNV0":
+        update_loop = _KNV0_loop
+        if not all(np.isreal(poles)):
+            raise ValueError("Complex poles are not supported by KNV0")
+
+    if maxiter < 1:
+        raise ValueError("maxiter must be at least equal to 1")
+
+    # We do not check rtol <= 0 as the user can use a negative rtol to
+    # force maxiter iterations
+    if rtol > 1:
+        raise ValueError("rtol can not be greater than 1")
+
+    return update_loop, poles
+
+
+def _order_complex_poles(poles):
+    """
+    Check we have complex conjugates pairs and reorder P according to YT, ie
+    real_poles, complex_i, conjugate complex_i, ....
+    The lexicographic sort on the complex poles is added to help the user to
+    compare sets of poles.
+    """
+    ordered_poles = np.sort(poles[np.isreal(poles)])
+    im_poles = []
+    for p in np.sort(poles[np.imag(poles) < 0]):
+        if np.conj(p) in poles:
+            im_poles.extend((p, np.conj(p)))
+
+    ordered_poles = np.hstack((ordered_poles, im_poles))
+
+    if poles.shape[0] != len(ordered_poles):
+        raise ValueError("Complex poles must come with their conjugates")
+    return ordered_poles
+
+
+def _KNV0(B, ker_pole, transfer_matrix, j, poles):
+    """
+    Algorithm "KNV0" Kautsky et Al. Robust pole
+    assignment in linear state feedback, Int journal of Control
+    1985, vol 41 p 1129->1155
+    https://la.epfl.ch/files/content/sites/la/files/
+        users/105941/public/KautskyNicholsDooren
+
+    """
+    # Remove xj form the base
+    transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
+    # If we QR this matrix in full mode Q=Q0|Q1
+    # then Q1 will be a single column orthogonnal to
+    # Q0, that's what we are looking for !
+
+    # After merge of gh-4249 great speed improvements could be achieved
+    # using QR updates instead of full QR in the line below
+
+    # To debug with numpy qr uncomment the line below
+    # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
+    Q, R = s_qr(transfer_matrix_not_j, mode="full")
+
+    mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
+    yj = np.dot(mat_ker_pj, Q[:, -1])
+
+    # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
+    # projection into ker_pole[j] will yield a vector
+    # close to 0.  As we are looking for a vector in ker_pole[j]
+    # simply stick with transfer_matrix[:, j] (unless someone provides me with
+    # a better choice ?)
+
+    if not np.allclose(yj, 0):
+        xj = yj/np.linalg.norm(yj)
+        transfer_matrix[:, j] = xj
+
+        # KNV does not support complex poles, using YT technique the two lines
+        # below seem to work 9 out of 10 times but it is not reliable enough:
+        # transfer_matrix[:, j]=real(xj)
+        # transfer_matrix[:, j+1]=imag(xj)
+
+        # Add this at the beginning of this function if you wish to test
+        # complex support:
+        #    if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
+        #        return
+        # Problems arise when imag(xj)=>0 I have no idea on how to fix this
+
+
+def _YT_real(ker_pole, Q, transfer_matrix, i, j):
+    """
+    Applies algorithm from YT section 6.1 page 19 related to real pairs
+    """
+    # step 1 page 19
+    u = Q[:, -2, np.newaxis]
+    v = Q[:, -1, np.newaxis]
+
+    # step 2 page 19
+    m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
+        np.dot(v, u.T)), ker_pole[j])
+
+    # step 3 page 19
+    um, sm, vm = np.linalg.svd(m)
+    # mu1, mu2 two first columns of U => 2 first lines of U.T
+    mu1, mu2 = um.T[:2, :, np.newaxis]
+    # VM is V.T with numpy we want the first two lines of V.T
+    nu1, nu2 = vm[:2, :, np.newaxis]
+
+    # what follows is a rough python translation of the formulas
+    # in section 6.2 page 20 (step 4)
+    transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
+            transfer_matrix[:, i, np.newaxis],
+            transfer_matrix[:, j, np.newaxis]))
+
+    if not np.allclose(sm[0], sm[1]):
+        ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
+        ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
+        ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
+    else:
+        ker_pole_ij = np.vstack((
+                                np.hstack((ker_pole[i],
+                                           np.zeros(ker_pole[i].shape))),
+                                np.hstack((np.zeros(ker_pole[j].shape),
+                                                    ker_pole[j]))
+                                ))
+        mu_nu_matrix = np.vstack(
+            (np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
+            )
+        ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
+    transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
+                             transfer_matrix_j_mo_transfer_matrix_j)
+    if not np.allclose(transfer_matrix_ij, 0):
+        transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
+                              np.linalg.norm(transfer_matrix_ij))
+        transfer_matrix[:, i] = transfer_matrix_ij[
+            :transfer_matrix[:, i].shape[0], 0
+            ]
+        transfer_matrix[:, j] = transfer_matrix_ij[
+            transfer_matrix[:, i].shape[0]:, 0
+            ]
+    else:
+        # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
+        # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
+        # ker_pole_mu_nu and iterate. As we are looking for a vector in
+        # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
+        # (that's a guess, not a claim !)
+        transfer_matrix[:, i] = ker_pole_mu_nu[
+            :transfer_matrix[:, i].shape[0], 0
+            ]
+        transfer_matrix[:, j] = ker_pole_mu_nu[
+            transfer_matrix[:, i].shape[0]:, 0
+            ]
+
+
+def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
+    """
+    Applies algorithm from YT section 6.2 page 20 related to complex pairs
+    """
+    # step 1 page 20
+    ur = np.sqrt(2)*Q[:, -2, np.newaxis]
+    ui = np.sqrt(2)*Q[:, -1, np.newaxis]
+    u = ur + 1j*ui
+
+    # step 2 page 20
+    ker_pole_ij = ker_pole[i]
+    m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
+               np.dot(np.conj(u), u.T)), ker_pole_ij)
+
+    # step 3 page 20
+    e_val, e_vec = np.linalg.eig(m)
+    # sort eigenvalues according to their module
+    e_val_idx = np.argsort(np.abs(e_val))
+    mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
+    mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
+
+    # what follows is a rough python translation of the formulas
+    # in section 6.2 page 20 (step 4)
+
+    # remember transfer_matrix_i has been split as
+    # transfer_matrix[i]=real(transfer_matrix_i) and
+    # transfer_matrix[j]=imag(transfer_matrix_i)
+    transfer_matrix_j_mo_transfer_matrix_j = (
+        transfer_matrix[:, i, np.newaxis] +
+        1j*transfer_matrix[:, j, np.newaxis]
+        )
+    if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
+                              np.abs(e_val[e_val_idx[-2]])):
+        ker_pole_mu = np.dot(ker_pole_ij, mu1)
+    else:
+        mu1_mu2_matrix = np.hstack((mu1, mu2))
+        ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
+    transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
+                              transfer_matrix_j_mo_transfer_matrix_j)
+
+    if not np.allclose(transfer_matrix_i_j, 0):
+        transfer_matrix_i_j = (transfer_matrix_i_j /
+            np.linalg.norm(transfer_matrix_i_j))
+        transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
+        transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
+    else:
+        # same idea as in YT_real
+        transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
+        transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
+
+
+def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
+    """
+    Algorithm "YT" Tits, Yang. Globally Convergent
+    Algorithms for Robust Pole Assignment by State Feedback
+    https://hdl.handle.net/1903/5598
+    The poles P have to be sorted accordingly to section 6.2 page 20
+
+    """
+    # The IEEE edition of the YT paper gives useful information on the
+    # optimal update order for the real poles in order to minimize the number
+    # of times we have to loop over all poles, see page 1442
+    nb_real = poles[np.isreal(poles)].shape[0]
+    # hnb => Half Nb Real
+    hnb = nb_real // 2
+
+    # Stick to the indices in the paper and then remove one to get numpy array
+    # index it is a bit easier to link the code to the paper this way even if it
+    # is not very clean. The paper is unclear about what should be done when
+    # there is only one real pole => use KNV0 on this real pole seem to work
+    if nb_real > 0:
+        #update the biggest real pole with the smallest one
+        update_order = [[nb_real], [1]]
+    else:
+        update_order = [[],[]]
+
+    r_comp = np.arange(nb_real+1, len(poles)+1, 2)
+    # step 1.a
+    r_p = np.arange(1, hnb+nb_real % 2)
+    update_order[0].extend(2*r_p)
+    update_order[1].extend(2*r_p+1)
+    # step 1.b
+    update_order[0].extend(r_comp)
+    update_order[1].extend(r_comp+1)
+    # step 1.c
+    r_p = np.arange(1, hnb+1)
+    update_order[0].extend(2*r_p-1)
+    update_order[1].extend(2*r_p)
+    # step 1.d
+    if hnb == 0 and np.isreal(poles[0]):
+        update_order[0].append(1)
+        update_order[1].append(1)
+    update_order[0].extend(r_comp)
+    update_order[1].extend(r_comp+1)
+    # step 2.a
+    r_j = np.arange(2, hnb+nb_real % 2)
+    for j in r_j:
+        for i in range(1, hnb+1):
+            update_order[0].append(i)
+            update_order[1].append(i+j)
+    # step 2.b
+    if hnb == 0 and np.isreal(poles[0]):
+        update_order[0].append(1)
+        update_order[1].append(1)
+    update_order[0].extend(r_comp)
+    update_order[1].extend(r_comp+1)
+    # step 2.c
+    r_j = np.arange(2, hnb+nb_real % 2)
+    for j in r_j:
+        for i in range(hnb+1, nb_real+1):
+            idx_1 = i+j
+            if idx_1 > nb_real:
+                idx_1 = i+j-nb_real
+            update_order[0].append(i)
+            update_order[1].append(idx_1)
+    # step 2.d
+    if hnb == 0 and np.isreal(poles[0]):
+        update_order[0].append(1)
+        update_order[1].append(1)
+    update_order[0].extend(r_comp)
+    update_order[1].extend(r_comp+1)
+    # step 3.a
+    for i in range(1, hnb+1):
+        update_order[0].append(i)
+        update_order[1].append(i+hnb)
+    # step 3.b
+    if hnb == 0 and np.isreal(poles[0]):
+        update_order[0].append(1)
+        update_order[1].append(1)
+    update_order[0].extend(r_comp)
+    update_order[1].extend(r_comp+1)
+
+    update_order = np.array(update_order).T-1
+    stop = False
+    nb_try = 0
+    while nb_try < maxiter and not stop:
+        det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
+        for i, j in update_order:
+            if i == j:
+                assert i == 0, "i!=0 for KNV call in YT"
+                assert np.isreal(poles[i]), "calling KNV on a complex pole"
+                _KNV0(B, ker_pole, transfer_matrix, i, poles)
+            else:
+                transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
+                                                    axis=1)
+                # after merge of gh-4249 great speed improvements could be
+                # achieved using QR updates instead of full QR in the line below
+
+                #to debug with numpy qr uncomment the line below
+                #Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
+                Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
+
+                if np.isreal(poles[i]):
+                    assert np.isreal(poles[j]), "mixing real and complex " + \
+                        "in YT_real" + str(poles)
+                    _YT_real(ker_pole, Q, transfer_matrix, i, j)
+                else:
+                    assert ~np.isreal(poles[i]), "mixing real and complex " + \
+                        "in YT_real" + str(poles)
+                    _YT_complex(ker_pole, Q, transfer_matrix, i, j)
+
+        det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
+                                  np.abs(np.linalg.det(transfer_matrix))))
+        cur_rtol = np.abs(
+            (det_transfer_matrix -
+             det_transfer_matrixb) /
+            det_transfer_matrix)
+        if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
+            # Convergence test from YT page 21
+            stop = True
+        nb_try += 1
+    return stop, cur_rtol, nb_try
+
+
+def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
+    """
+    Loop over all poles one by one and apply KNV method 0 algorithm
+    """
+    # This method is useful only because we need to be able to call
+    # _KNV0 from YT without looping over all poles, otherwise it would
+    # have been fine to mix _KNV0_loop and _KNV0 in a single function
+    stop = False
+    nb_try = 0
+    while nb_try < maxiter and not stop:
+        det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
+        for j in range(B.shape[0]):
+            _KNV0(B, ker_pole, transfer_matrix, j, poles)
+
+        det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
+                                  np.abs(np.linalg.det(transfer_matrix))))
+        cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
+                       det_transfer_matrix)
+        if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
+            # Convergence test from YT page 21
+            stop = True
+
+        nb_try += 1
+    return stop, cur_rtol, nb_try
+
+
+def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
+    """
+    Compute K such that eigenvalues (A - dot(B, K))=poles.
+
+    K is the gain matrix such as the plant described by the linear system
+    ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
+    as close as possible to those asked for in poles.
+
+    SISO, MISO and MIMO systems are supported.
+
+    Parameters
+    ----------
+    A, B : ndarray
+        State-space representation of linear system ``AX + BU``.
+    poles : array_like
+        Desired real poles and/or complex conjugates poles.
+        Complex poles are only supported with ``method="YT"`` (default).
+    method: {'YT', 'KNV0'}, optional
+        Which method to choose to find the gain matrix K. One of:
+
+            - 'YT': Yang Tits
+            - 'KNV0': Kautsky, Nichols, Van Dooren update method 0
+
+        See References and Notes for details on the algorithms.
+    rtol: float, optional
+        After each iteration the determinant of the eigenvectors of
+        ``A - B*K`` is compared to its previous value, when the relative
+        error between these two values becomes lower than `rtol` the algorithm
+        stops.  Default is 1e-3.
+    maxiter: int, optional
+        Maximum number of iterations to compute the gain matrix.
+        Default is 30.
+
+    Returns
+    -------
+    full_state_feedback : Bunch object
+        full_state_feedback is composed of:
+            gain_matrix : 1-D ndarray
+                The closed loop matrix K such as the eigenvalues of ``A-BK``
+                are as close as possible to the requested poles.
+            computed_poles : 1-D ndarray
+                The poles corresponding to ``A-BK`` sorted as first the real
+                poles in increasing order, then the complex congugates in
+                lexicographic order.
+            requested_poles : 1-D ndarray
+                The poles the algorithm was asked to place sorted as above,
+                they may differ from what was achieved.
+            X : 2-D ndarray
+                The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
+                (see Notes)
+            rtol : float
+                The relative tolerance achieved on ``det(X)`` (see Notes).
+                `rtol` will be NaN if it is possible to solve the system
+                ``diag(poles) = (A - B*K)``, or 0 when the optimization
+                algorithms can't do anything i.e when ``B.shape[1] == 1``.
+            nb_iter : int
+                The number of iterations performed before converging.
+                `nb_iter` will be NaN if it is possible to solve the system
+                ``diag(poles) = (A - B*K)``, or 0 when the optimization
+                algorithms can't do anything i.e when ``B.shape[1] == 1``.
+
+    Notes
+    -----
+    The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
+    al. (KNV) paper [1]_.  KNV relies on rank-1 updates to find the transfer
+    matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
+    rank-2 updates. This yields on average more robust solutions (see [2]_
+    pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
+    does not in its original version.  Only update method 0 proposed by KNV has
+    been implemented here, hence the name ``'KNV0'``.
+
+    KNV extended to complex poles is used in Matlab's ``place`` function, YT is
+    distributed under a non-free licence by Slicot under the name ``robpole``.
+    It is unclear and undocumented how KNV0 has been extended to complex poles
+    (Tits and Yang claim on page 14 of their paper that their method can not be
+    used to extend KNV to complex poles), therefore only YT supports them in
+    this implementation.
+
+    As the solution to the problem of pole placement is not unique for MIMO
+    systems, both methods start with a tentative transfer matrix which is
+    altered in various way to increase its determinant.  Both methods have been
+    proven to converge to a stable solution, however depending on the way the
+    initial transfer matrix is chosen they will converge to different
+    solutions and therefore there is absolutely no guarantee that using
+    ``'KNV0'`` will yield results similar to Matlab's or any other
+    implementation of these algorithms.
+
+    Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
+    is only provided because it is needed by ``'YT'`` in some specific cases.
+    Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
+    when ``abs(det(X))`` is used as a robustness indicator.
+
+    [2]_ is available as a technical report on the following URL:
+    https://hdl.handle.net/1903/5598
+
+    References
+    ----------
+    .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
+           in linear state feedback", International Journal of Control, Vol. 41
+           pp. 1129-1155, 1985.
+    .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
+           pole assignment by state feedback", IEEE Transactions on Automatic
+           Control, Vol. 41, pp. 1432-1452, 1996.
+
+    Examples
+    --------
+    A simple example demonstrating real pole placement using both KNV and YT
+    algorithms.  This is example number 1 from section 4 of the reference KNV
+    publication ([1]_):
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> A = np.array([[ 1.380,  -0.2077,  6.715, -5.676  ],
+    ...               [-0.5814, -4.290,   0,      0.6750 ],
+    ...               [ 1.067,   4.273,  -6.654,  5.893  ],
+    ...               [ 0.0480,  4.273,   1.343, -2.104  ]])
+    >>> B = np.array([[ 0,      5.679 ],
+    ...               [ 1.136,  1.136 ],
+    ...               [ 0,      0,    ],
+    ...               [-3.146,  0     ]])
+    >>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
+
+    Now compute K with KNV method 0, with the default YT method and with the YT
+    method while forcing 100 iterations of the algorithm and print some results
+    after each call.
+
+    >>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
+    >>> fsf1.gain_matrix
+    array([[ 0.20071427, -0.96665799,  0.24066128, -0.10279785],
+           [ 0.50587268,  0.57779091,  0.51795763, -0.41991442]])
+
+    >>> fsf2 = signal.place_poles(A, B, P)  # uses YT method
+    >>> fsf2.computed_poles
+    array([-8.6659, -5.0566, -0.5   , -0.2   ])
+
+    >>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
+    >>> fsf3.X
+    array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j,  0.74823657+0.j],
+           [-0.04977751+0.j, -0.80872954+0.j,  0.13566234+0.j, -0.29322906+0.j],
+           [-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
+           [ 0.22267347+0.j,  0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
+
+    The absolute value of the determinant of X is a good indicator to check the
+    robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
+    it.  Below a comparison of the robustness of the results above:
+
+    >>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
+    True
+    >>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
+    True
+
+    Now a simple example for complex poles:
+
+    >>> A = np.array([[ 0,  7/3.,  0,   0   ],
+    ...               [ 0,   0,    0,  7/9. ],
+    ...               [ 0,   0,    0,   0   ],
+    ...               [ 0,   0,    0,   0   ]])
+    >>> B = np.array([[ 0,  0 ],
+    ...               [ 0,  0 ],
+    ...               [ 1,  0 ],
+    ...               [ 0,  1 ]])
+    >>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
+    >>> fsf = signal.place_poles(A, B, P, method='YT')
+
+    We can plot the desired and computed poles in the complex plane:
+
+    >>> t = np.linspace(0, 2*np.pi, 401)
+    >>> plt.plot(np.cos(t), np.sin(t), 'k--')  # unit circle
+    >>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
+    ...          'wo', label='Desired')
+    >>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
+    ...          label='Placed')
+    >>> plt.grid()
+    >>> plt.axis('image')
+    >>> plt.axis([-1.1, 1.1, -1.1, 1.1])
+    >>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
+
+    """
+    # Move away all the inputs checking, it only adds noise to the code
+    update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
+
+    # The current value of the relative tolerance we achieved
+    cur_rtol = 0
+    # The number of iterations needed before converging
+    nb_iter = 0
+
+    # Step A: QR decomposition of B page 1132 KN
+    # to debug with numpy qr uncomment the line below
+    # u, z = np.linalg.qr(B, mode="complete")
+    u, z = s_qr(B, mode="full")
+    rankB = np.linalg.matrix_rank(B)
+    u0 = u[:, :rankB]
+    u1 = u[:, rankB:]
+    z = z[:rankB, :]
+
+    # If we can use the identity matrix as X the solution is obvious
+    if B.shape[0] == rankB:
+        # if B is square and full rank there is only one solution
+        # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
+        # i.e K=inv(B)*(diag(P)-A)
+        # if B has as many lines as its rank (but not square) there are many
+        # solutions and we can choose one using least squares
+        # => use lstsq in both cases.
+        # In both cases the transfer matrix X will be eye(A.shape[0]) and I
+        # can hardly think of a better one so there is nothing to optimize
+        #
+        # for complex poles we use the following trick
+        #
+        # |a -b| has for eigenvalues a+b and a-b
+        # |b a|
+        #
+        # |a+bi 0| has the obvious eigenvalues a+bi and a-bi
+        # |0 a-bi|
+        #
+        # e.g solving the first one in R gives the solution
+        # for the second one in C
+        diag_poles = np.zeros(A.shape)
+        idx = 0
+        while idx < poles.shape[0]:
+            p = poles[idx]
+            diag_poles[idx, idx] = np.real(p)
+            if ~np.isreal(p):
+                diag_poles[idx, idx+1] = -np.imag(p)
+                diag_poles[idx+1, idx+1] = np.real(p)
+                diag_poles[idx+1, idx] = np.imag(p)
+                idx += 1  # skip next one
+            idx += 1
+        gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
+        transfer_matrix = np.eye(A.shape[0])
+        cur_rtol = np.nan
+        nb_iter = np.nan
+    else:
+        # step A (p1144 KNV) and beginning of step F: decompose
+        # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
+        # in the same loop
+        ker_pole = []
+
+        # flag to skip the conjugate of a complex pole
+        skip_conjugate = False
+        # select orthonormal base ker_pole for each Pole and vectors for
+        # transfer_matrix
+        for j in range(B.shape[0]):
+            if skip_conjugate:
+                skip_conjugate = False
+                continue
+            pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
+
+            # after QR Q=Q0|Q1
+            # only Q0 is used to reconstruct  the qr'ed (dot Q, R) matrix.
+            # Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
+            # R when using mode "complete". In default mode Q1 and the zeros
+            # in R are not computed
+
+            # To debug with numpy qr uncomment the line below
+            # Q, _ = np.linalg.qr(pole_space_j, mode="complete")
+            Q, _ = s_qr(pole_space_j, mode="full")
+
+            ker_pole_j = Q[:, pole_space_j.shape[1]:]
+
+            # We want to select one vector in ker_pole_j to build the transfer
+            # matrix, however qr returns sometimes vectors with zeros on the
+            # same line for each pole and this yields very long convergence
+            # times.
+            # Or some other times a set of vectors, one with zero imaginary
+            # part and one (or several) with imaginary parts. After trying
+            # many ways to select the best possible one (eg ditch vectors
+            # with zero imaginary part for complex poles) I ended up summing
+            # all vectors in ker_pole_j, this solves 100% of the problems and
+            # is a valid choice for transfer_matrix.
+            # This way for complex poles we are sure to have a non zero
+            # imaginary part that way, and the problem of lines full of zeros
+            # in transfer_matrix is solved too as when a vector from
+            # ker_pole_j has a zero the other one(s) when
+            # ker_pole_j.shape[1]>1) for sure won't have a zero there.
+
+            transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
+            transfer_matrix_j = (transfer_matrix_j /
+                                 np.linalg.norm(transfer_matrix_j))
+            if ~np.isreal(poles[j]):  # complex pole
+                transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
+                                               np.imag(transfer_matrix_j)])
+                ker_pole.extend([ker_pole_j, ker_pole_j])
+
+                # Skip next pole as it is the conjugate
+                skip_conjugate = True
+            else:  # real pole, nothing to do
+                ker_pole.append(ker_pole_j)
+
+            if j == 0:
+                transfer_matrix = transfer_matrix_j
+            else:
+                transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
+
+        if rankB > 1:  # otherwise there is nothing we can optimize
+            stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
+                                                  poles, B, maxiter, rtol)
+            if not stop and rtol > 0:
+                # if rtol<=0 the user has probably done that on purpose,
+                # don't annoy him
+                err_msg = (
+                    "Convergence was not reached after maxiter iterations.\n"
+                    "You asked for a relative tolerance of %f we got %f" %
+                    (rtol, cur_rtol)
+                    )
+                warnings.warn(err_msg)
+
+        # reconstruct transfer_matrix to match complex conjugate pairs,
+        # ie transfer_matrix_j/transfer_matrix_j+1 are
+        # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
+        transfer_matrix = transfer_matrix.astype(complex)
+        idx = 0
+        while idx < poles.shape[0]-1:
+            if ~np.isreal(poles[idx]):
+                rel = transfer_matrix[:, idx].copy()
+                img = transfer_matrix[:, idx+1]
+                # rel will be an array referencing a column of transfer_matrix
+                # if we don't copy() it will changer after the next line and
+                # and the line after will not yield the correct value
+                transfer_matrix[:, idx] = rel-1j*img
+                transfer_matrix[:, idx+1] = rel+1j*img
+                idx += 1  # skip next one
+            idx += 1
+
+        try:
+            m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
+                                                          transfer_matrix.T)).T
+            gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
+        except np.linalg.LinAlgError as e:
+            raise ValueError("The poles you've chosen can't be placed. "
+                             "Check the controllability matrix and try "
+                             "another set of poles") from e
+
+    # Beware: Kautsky solves A+BK but the usual form is A-BK
+    gain_matrix = -gain_matrix
+    # K still contains complex with ~=0j imaginary parts, get rid of them
+    gain_matrix = np.real(gain_matrix)
+
+    full_state_feedback = Bunch()
+    full_state_feedback.gain_matrix = gain_matrix
+    full_state_feedback.computed_poles = _order_complex_poles(
+        np.linalg.eig(A - np.dot(B, gain_matrix))[0]
+        )
+    full_state_feedback.requested_poles = poles
+    full_state_feedback.X = transfer_matrix
+    full_state_feedback.rtol = cur_rtol
+    full_state_feedback.nb_iter = nb_iter
+
+    return full_state_feedback
+
+
+def dlsim(system, u, t=None, x0=None):
+    """
+    Simulate output of a discrete-time linear system.
+
+    Parameters
+    ----------
+    system : tuple of array_like or instance of `dlti`
+        A tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1: (instance of `dlti`)
+            * 3: (num, den, dt)
+            * 4: (zeros, poles, gain, dt)
+            * 5: (A, B, C, D, dt)
+
+    u : array_like
+        An input array describing the input at each time `t` (interpolation is
+        assumed between given times).  If there are multiple inputs, then each
+        column of the rank-2 array represents an input.
+    t : array_like, optional
+        The time steps at which the input is defined.  If `t` is given, it
+        must be the same length as `u`, and the final value in `t` determines
+        the number of steps returned in the output.
+    x0 : array_like, optional
+        The initial conditions on the state vector (zero by default).
+
+    Returns
+    -------
+    tout : ndarray
+        Time values for the output, as a 1-D array.
+    yout : ndarray
+        System response, as a 1-D array.
+    xout : ndarray, optional
+        Time-evolution of the state-vector.  Only generated if the input is a
+        `StateSpace` system.
+
+    See Also
+    --------
+    lsim, dstep, dimpulse, cont2discrete
+
+    Examples
+    --------
+    A simple integrator transfer function with a discrete time step of 1.0
+    could be implemented as:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> tf = ([1.0,], [1.0, -1.0], 1.0)
+    >>> t_in = [0.0, 1.0, 2.0, 3.0]
+    >>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
+    >>> t_out, y = signal.dlsim(tf, u, t=t_in)
+    >>> y.T
+    array([[ 0.,  0.,  0.,  1.]])
+
+    """
+    # Convert system to dlti-StateSpace
+    if isinstance(system, lti):
+        raise AttributeError('dlsim can only be used with discrete-time dlti '
+                             'systems.')
+    elif not isinstance(system, dlti):
+        system = dlti(*system[:-1], dt=system[-1])
+
+    # Condition needed to ensure output remains compatible
+    is_ss_input = isinstance(system, StateSpace)
+    system = system._as_ss()
+
+    u = np.atleast_1d(u)
+
+    if u.ndim == 1:
+        u = np.atleast_2d(u).T
+
+    if t is None:
+        out_samples = len(u)
+        stoptime = (out_samples - 1) * system.dt
+    else:
+        stoptime = t[-1]
+        out_samples = int(np.floor(stoptime / system.dt)) + 1
+
+    # Pre-build output arrays
+    xout = np.zeros((out_samples, system.A.shape[0]))
+    yout = np.zeros((out_samples, system.C.shape[0]))
+    tout = np.linspace(0.0, stoptime, num=out_samples)
+
+    # Check initial condition
+    if x0 is None:
+        xout[0, :] = np.zeros((system.A.shape[1],))
+    else:
+        xout[0, :] = np.asarray(x0)
+
+    # Pre-interpolate inputs into the desired time steps
+    if t is None:
+        u_dt = u
+    else:
+        if len(u.shape) == 1:
+            u = u[:, np.newaxis]
+
+        u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
+        u_dt = u_dt_interp(tout).transpose()
+
+    # Simulate the system
+    for i in range(0, out_samples - 1):
+        xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
+                        np.dot(system.B, u_dt[i, :]))
+        yout[i, :] = (np.dot(system.C, xout[i, :]) +
+                      np.dot(system.D, u_dt[i, :]))
+
+    # Last point
+    yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
+                              np.dot(system.D, u_dt[out_samples-1, :]))
+
+    if is_ss_input:
+        return tout, yout, xout
+    else:
+        return tout, yout
+
+
+def dimpulse(system, x0=None, t=None, n=None):
+    """
+    Impulse response of discrete-time system.
+
+    Parameters
+    ----------
+    system : tuple of array_like or instance of `dlti`
+        A tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1: (instance of `dlti`)
+            * 3: (num, den, dt)
+            * 4: (zeros, poles, gain, dt)
+            * 5: (A, B, C, D, dt)
+
+    x0 : array_like, optional
+        Initial state-vector.  Defaults to zero.
+    t : array_like, optional
+        Time points.  Computed if not given.
+    n : int, optional
+        The number of time points to compute (if `t` is not given).
+
+    Returns
+    -------
+    tout : ndarray
+        Time values for the output, as a 1-D array.
+    yout : tuple of ndarray
+        Impulse response of system.  Each element of the tuple represents
+        the output of the system based on an impulse in each input.
+
+    See Also
+    --------
+    impulse, dstep, dlsim, cont2discrete
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> butter = signal.dlti(*signal.butter(3, 0.5))
+    >>> t, y = signal.dimpulse(butter, n=25)
+    >>> plt.step(t, np.squeeze(y))
+    >>> plt.grid()
+    >>> plt.xlabel('n [samples]')
+    >>> plt.ylabel('Amplitude')
+
+    """
+    # Convert system to dlti-StateSpace
+    if isinstance(system, dlti):
+        system = system._as_ss()
+    elif isinstance(system, lti):
+        raise AttributeError('dimpulse can only be used with discrete-time '
+                             'dlti systems.')
+    else:
+        system = dlti(*system[:-1], dt=system[-1])._as_ss()
+
+    # Default to 100 samples if unspecified
+    if n is None:
+        n = 100
+
+    # If time is not specified, use the number of samples
+    # and system dt
+    if t is None:
+        t = np.linspace(0, n * system.dt, n, endpoint=False)
+    else:
+        t = np.asarray(t)
+
+    # For each input, implement a step change
+    yout = None
+    for i in range(0, system.inputs):
+        u = np.zeros((t.shape[0], system.inputs))
+        u[0, i] = 1.0
+
+        one_output = dlsim(system, u, t=t, x0=x0)
+
+        if yout is None:
+            yout = (one_output[1],)
+        else:
+            yout = yout + (one_output[1],)
+
+        tout = one_output[0]
+
+    return tout, yout
+
+
+def dstep(system, x0=None, t=None, n=None):
+    """
+    Step response of discrete-time system.
+
+    Parameters
+    ----------
+    system : tuple of array_like
+        A tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1: (instance of `dlti`)
+            * 3: (num, den, dt)
+            * 4: (zeros, poles, gain, dt)
+            * 5: (A, B, C, D, dt)
+
+    x0 : array_like, optional
+        Initial state-vector.  Defaults to zero.
+    t : array_like, optional
+        Time points.  Computed if not given.
+    n : int, optional
+        The number of time points to compute (if `t` is not given).
+
+    Returns
+    -------
+    tout : ndarray
+        Output time points, as a 1-D array.
+    yout : tuple of ndarray
+        Step response of system.  Each element of the tuple represents
+        the output of the system based on a step response to each input.
+
+    See Also
+    --------
+    step, dimpulse, dlsim, cont2discrete
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> butter = signal.dlti(*signal.butter(3, 0.5))
+    >>> t, y = signal.dstep(butter, n=25)
+    >>> plt.step(t, np.squeeze(y))
+    >>> plt.grid()
+    >>> plt.xlabel('n [samples]')
+    >>> plt.ylabel('Amplitude')
+    """
+    # Convert system to dlti-StateSpace
+    if isinstance(system, dlti):
+        system = system._as_ss()
+    elif isinstance(system, lti):
+        raise AttributeError('dstep can only be used with discrete-time dlti '
+                             'systems.')
+    else:
+        system = dlti(*system[:-1], dt=system[-1])._as_ss()
+
+    # Default to 100 samples if unspecified
+    if n is None:
+        n = 100
+
+    # If time is not specified, use the number of samples
+    # and system dt
+    if t is None:
+        t = np.linspace(0, n * system.dt, n, endpoint=False)
+    else:
+        t = np.asarray(t)
+
+    # For each input, implement a step change
+    yout = None
+    for i in range(0, system.inputs):
+        u = np.zeros((t.shape[0], system.inputs))
+        u[:, i] = np.ones((t.shape[0],))
+
+        one_output = dlsim(system, u, t=t, x0=x0)
+
+        if yout is None:
+            yout = (one_output[1],)
+        else:
+            yout = yout + (one_output[1],)
+
+        tout = one_output[0]
+
+    return tout, yout
+
+
+def dfreqresp(system, w=None, n=10000, whole=False):
+    r"""
+    Calculate the frequency response of a discrete-time system.
+
+    Parameters
+    ----------
+    system : an instance of the `dlti` class or a tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `dlti`)
+            * 2 (numerator, denominator, dt)
+            * 3 (zeros, poles, gain, dt)
+            * 4 (A, B, C, D, dt)
+
+    w : array_like, optional
+        Array of frequencies (in radians/sample). Magnitude and phase data is
+        calculated for every value in this array. If not given a reasonable
+        set will be calculated.
+    n : int, optional
+        Number of frequency points to compute if `w` is not given. The `n`
+        frequencies are logarithmically spaced in an interval chosen to
+        include the influence of the poles and zeros of the system.
+    whole : bool, optional
+        Normally, if 'w' is not given, frequencies are computed from 0 to the
+        Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
+        `whole` is True, compute frequencies from 0 to 2*pi radians/sample.
+
+    Returns
+    -------
+    w : 1D ndarray
+        Frequency array [radians/sample]
+    H : 1D ndarray
+        Array of complex magnitude values
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
+
+    .. versionadded:: 0.18.0
+
+    Examples
+    --------
+    Generating the Nyquist plot of a transfer function
+
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    Construct the transfer function
+    :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05
+    seconds:
+
+    >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
+
+    >>> w, H = signal.dfreqresp(sys)
+
+    >>> plt.figure()
+    >>> plt.plot(H.real, H.imag, "b")
+    >>> plt.plot(H.real, -H.imag, "r")
+    >>> plt.show()
+
+    """
+    if not isinstance(system, dlti):
+        if isinstance(system, lti):
+            raise AttributeError('dfreqresp can only be used with '
+                                 'discrete-time systems.')
+
+        system = dlti(*system[:-1], dt=system[-1])
+
+    if isinstance(system, StateSpace):
+        # No SS->ZPK code exists right now, just SS->TF->ZPK
+        system = system._as_tf()
+
+    if not isinstance(system, (TransferFunction, ZerosPolesGain)):
+        raise ValueError('Unknown system type')
+
+    if system.inputs != 1 or system.outputs != 1:
+        raise ValueError("dfreqresp requires a SISO (single input, single "
+                         "output) system.")
+
+    if w is not None:
+        worN = w
+    else:
+        worN = n
+
+    if isinstance(system, TransferFunction):
+        # Convert numerator and denominator from polynomials in the variable
+        # 'z' to polynomials in the variable 'z^-1', as freqz expects.
+        num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
+        w, h = freqz(num, den, worN=worN, whole=whole)
+
+    elif isinstance(system, ZerosPolesGain):
+        w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
+                         whole=whole)
+
+    return w, h
+
+
+def dbode(system, w=None, n=100):
+    r"""
+    Calculate Bode magnitude and phase data of a discrete-time system.
+
+    Parameters
+    ----------
+    system : an instance of the LTI class or a tuple describing the system.
+        The following gives the number of elements in the tuple and
+        the interpretation:
+
+            * 1 (instance of `dlti`)
+            * 2 (num, den, dt)
+            * 3 (zeros, poles, gain, dt)
+            * 4 (A, B, C, D, dt)
+
+    w : array_like, optional
+        Array of frequencies (in radians/sample). Magnitude and phase data is
+        calculated for every value in this array. If not given a reasonable
+        set will be calculated.
+    n : int, optional
+        Number of frequency points to compute if `w` is not given. The `n`
+        frequencies are logarithmically spaced in an interval chosen to
+        include the influence of the poles and zeros of the system.
+
+    Returns
+    -------
+    w : 1D ndarray
+        Frequency array [rad/time_unit]
+    mag : 1D ndarray
+        Magnitude array [dB]
+    phase : 1D ndarray
+        Phase array [deg]
+
+    Notes
+    -----
+    If (num, den) is passed in for ``system``, coefficients for both the
+    numerator and denominator should be specified in descending exponent
+    order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
+
+    .. versionadded:: 0.18.0
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with
+    a sampling time of 0.05 seconds:
+
+    >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
+
+    Equivalent: sys.bode()
+
+    >>> w, mag, phase = signal.dbode(sys)
+
+    >>> plt.figure()
+    >>> plt.semilogx(w, mag)    # Bode magnitude plot
+    >>> plt.figure()
+    >>> plt.semilogx(w, phase)  # Bode phase plot
+    >>> plt.show()
+
+    """
+    w, y = dfreqresp(system, w=w, n=n)
+
+    if isinstance(system, dlti):
+        dt = system.dt
+    else:
+        dt = system[-1]
+
+    mag = 20.0 * numpy.log10(abs(y))
+    phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
+
+    return w / dt, mag, phase
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_max_len_seq.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_max_len_seq.py
new file mode 100644
index 00000000..ce730043
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_max_len_seq.py
@@ -0,0 +1,139 @@
+# Author: Eric Larson
+# 2014
+
+"""Tools for MLS generation"""
+
+import numpy as np
+
+from ._max_len_seq_inner import _max_len_seq_inner
+
+__all__ = ['max_len_seq']
+
+
+# These are definitions of linear shift register taps for use in max_len_seq()
+_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
+             9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
+             14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
+             18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
+             23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
+             27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
+             31: [28], 32: [31, 30, 10]}
+
+def max_len_seq(nbits, state=None, length=None, taps=None):
+    """
+    Maximum length sequence (MLS) generator.
+
+    Parameters
+    ----------
+    nbits : int
+        Number of bits to use. Length of the resulting sequence will
+        be ``(2**nbits) - 1``. Note that generating long sequences
+        (e.g., greater than ``nbits == 16``) can take a long time.
+    state : array_like, optional
+        If array, must be of length ``nbits``, and will be cast to binary
+        (bool) representation. If None, a seed of ones will be used,
+        producing a repeatable representation. If ``state`` is all
+        zeros, an error is raised as this is invalid. Default: None.
+    length : int, optional
+        Number of samples to compute. If None, the entire length
+        ``(2**nbits) - 1`` is computed.
+    taps : array_like, optional
+        Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
+        If None, taps will be automatically selected (for up to
+        ``nbits == 32``).
+
+    Returns
+    -------
+    seq : array
+        Resulting MLS sequence of 0's and 1's.
+    state : array
+        The final state of the shift register.
+
+    Notes
+    -----
+    The algorithm for MLS generation is generically described in:
+
+        https://en.wikipedia.org/wiki/Maximum_length_sequence
+
+    The default values for taps are specifically taken from the first
+    option listed for each value of ``nbits`` in:
+
+        https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
+
+    .. versionadded:: 0.15.0
+
+    Examples
+    --------
+    MLS uses binary convention:
+
+    >>> from scipy.signal import max_len_seq
+    >>> max_len_seq(4)[0]
+    array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
+
+    MLS has a white spectrum (except for DC):
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from numpy.fft import fft, ifft, fftshift, fftfreq
+    >>> seq = max_len_seq(6)[0]*2-1  # +1 and -1
+    >>> spec = fft(seq)
+    >>> N = len(seq)
+    >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
+    >>> plt.margins(0.1, 0.1)
+    >>> plt.grid(True)
+    >>> plt.show()
+
+    Circular autocorrelation of MLS is an impulse:
+
+    >>> acorrcirc = ifft(spec * np.conj(spec)).real
+    >>> plt.figure()
+    >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
+    >>> plt.margins(0.1, 0.1)
+    >>> plt.grid(True)
+    >>> plt.show()
+
+    Linear autocorrelation of MLS is approximately an impulse:
+
+    >>> acorr = np.correlate(seq, seq, 'full')
+    >>> plt.figure()
+    >>> plt.plot(np.arange(-N+1, N), acorr, '.-')
+    >>> plt.margins(0.1, 0.1)
+    >>> plt.grid(True)
+    >>> plt.show()
+
+    """
+    taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64
+    if taps is None:
+        if nbits not in _mls_taps:
+            known_taps = np.array(list(_mls_taps.keys()))
+            raise ValueError('nbits must be between %s and %s if taps is None'
+                             % (known_taps.min(), known_taps.max()))
+        taps = np.array(_mls_taps[nbits], taps_dtype)
+    else:
+        taps = np.unique(np.array(taps, taps_dtype))[::-1]
+        if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
+            raise ValueError('taps must be non-empty with values between '
+                             'zero and nbits (inclusive)')
+        taps = np.array(taps)  # needed for Cython and Pythran
+    n_max = (2**nbits) - 1
+    if length is None:
+        length = n_max
+    else:
+        length = int(length)
+        if length < 0:
+            raise ValueError('length must be greater than or equal to 0')
+    # We use int8 instead of bool here because NumPy arrays of bools
+    # don't seem to work nicely with Cython
+    if state is None:
+        state = np.ones(nbits, dtype=np.int8, order='c')
+    else:
+        # makes a copy if need be, ensuring it's 0's and 1's
+        state = np.array(state, dtype=bool, order='c').astype(np.int8)
+    if state.ndim != 1 or state.size != nbits:
+        raise ValueError('state must be a 1-D array of size nbits')
+    if np.all(state == 0):
+        raise ValueError('state must not be all zeros')
+
+    seq = np.empty(length, dtype=np.int8, order='c')
+    state = _max_len_seq_inner(taps, state, nbits, length, seq)
+    return seq, state
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_peak_finding.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_peak_finding.py
new file mode 100644
index 00000000..42c20735
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_peak_finding.py
@@ -0,0 +1,1311 @@
+"""
+Functions for identifying peaks in signals.
+"""
+import math
+import numpy as np
+
+from scipy.signal._wavelets import cwt, ricker
+from scipy.stats import scoreatpercentile
+
+from ._peak_finding_utils import (
+    _local_maxima_1d,
+    _select_by_peak_distance,
+    _peak_prominences,
+    _peak_widths
+)
+
+
+__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences',
+           'peak_widths', 'find_peaks', 'find_peaks_cwt']
+
+
+def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
+    """
+    Calculate the relative extrema of `data`.
+
+    Relative extrema are calculated by finding locations where
+    ``comparator(data[n], data[n+1:n+order+1])`` is True.
+
+    Parameters
+    ----------
+    data : ndarray
+        Array in which to find the relative extrema.
+    comparator : callable
+        Function to use to compare two data points.
+        Should take two arrays as arguments.
+    axis : int, optional
+        Axis over which to select from `data`. Default is 0.
+    order : int, optional
+        How many points on each side to use for the comparison
+        to consider ``comparator(n,n+x)`` to be True.
+    mode : str, optional
+        How the edges of the vector are treated. 'wrap' (wrap around) or
+        'clip' (treat overflow as the same as the last (or first) element).
+        Default 'clip'. See numpy.take.
+
+    Returns
+    -------
+    extrema : ndarray
+        Boolean array of the same shape as `data` that is True at an extrema,
+        False otherwise.
+
+    See also
+    --------
+    argrelmax, argrelmin
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> testdata = np.array([1,2,3,2,1])
+    >>> _boolrelextrema(testdata, np.greater, axis=0)
+    array([False, False,  True, False, False], dtype=bool)
+
+    """
+    if (int(order) != order) or (order < 1):
+        raise ValueError('Order must be an int >= 1')
+
+    datalen = data.shape[axis]
+    locs = np.arange(0, datalen)
+
+    results = np.ones(data.shape, dtype=bool)
+    main = data.take(locs, axis=axis, mode=mode)
+    for shift in range(1, order + 1):
+        plus = data.take(locs + shift, axis=axis, mode=mode)
+        minus = data.take(locs - shift, axis=axis, mode=mode)
+        results &= comparator(main, plus)
+        results &= comparator(main, minus)
+        if ~results.any():
+            return results
+    return results
+
+
+def argrelmin(data, axis=0, order=1, mode='clip'):
+    """
+    Calculate the relative minima of `data`.
+
+    Parameters
+    ----------
+    data : ndarray
+        Array in which to find the relative minima.
+    axis : int, optional
+        Axis over which to select from `data`. Default is 0.
+    order : int, optional
+        How many points on each side to use for the comparison
+        to consider ``comparator(n, n+x)`` to be True.
+    mode : str, optional
+        How the edges of the vector are treated.
+        Available options are 'wrap' (wrap around) or 'clip' (treat overflow
+        as the same as the last (or first) element).
+        Default 'clip'. See numpy.take.
+
+    Returns
+    -------
+    extrema : tuple of ndarrays
+        Indices of the minima in arrays of integers. ``extrema[k]`` is
+        the array of indices of axis `k` of `data`. Note that the
+        return value is a tuple even when `data` is 1-D.
+
+    See Also
+    --------
+    argrelextrema, argrelmax, find_peaks
+
+    Notes
+    -----
+    This function uses `argrelextrema` with np.less as comparator. Therefore, it
+    requires a strict inequality on both sides of a value to consider it a
+    minimum. This means flat minima (more than one sample wide) are not detected.
+    In case of 1-D `data` `find_peaks` can be used to detect all
+    local minima, including flat ones, by calling it with negated `data`.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import argrelmin
+    >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
+    >>> argrelmin(x)
+    (array([1, 5]),)
+    >>> y = np.array([[1, 2, 1, 2],
+    ...               [2, 2, 0, 0],
+    ...               [5, 3, 4, 4]])
+    ...
+    >>> argrelmin(y, axis=1)
+    (array([0, 2]), array([2, 1]))
+
+    """
+    return argrelextrema(data, np.less, axis, order, mode)
+
+
+def argrelmax(data, axis=0, order=1, mode='clip'):
+    """
+    Calculate the relative maxima of `data`.
+
+    Parameters
+    ----------
+    data : ndarray
+        Array in which to find the relative maxima.
+    axis : int, optional
+        Axis over which to select from `data`. Default is 0.
+    order : int, optional
+        How many points on each side to use for the comparison
+        to consider ``comparator(n, n+x)`` to be True.
+    mode : str, optional
+        How the edges of the vector are treated.
+        Available options are 'wrap' (wrap around) or 'clip' (treat overflow
+        as the same as the last (or first) element).
+        Default 'clip'. See `numpy.take`.
+
+    Returns
+    -------
+    extrema : tuple of ndarrays
+        Indices of the maxima in arrays of integers. ``extrema[k]`` is
+        the array of indices of axis `k` of `data`. Note that the
+        return value is a tuple even when `data` is 1-D.
+
+    See Also
+    --------
+    argrelextrema, argrelmin, find_peaks
+
+    Notes
+    -----
+    This function uses `argrelextrema` with np.greater as comparator. Therefore,
+    it  requires a strict inequality on both sides of a value to consider it a
+    maximum. This means flat maxima (more than one sample wide) are not detected.
+    In case of 1-D `data` `find_peaks` can be used to detect all
+    local maxima, including flat ones.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import argrelmax
+    >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
+    >>> argrelmax(x)
+    (array([3, 6]),)
+    >>> y = np.array([[1, 2, 1, 2],
+    ...               [2, 2, 0, 0],
+    ...               [5, 3, 4, 4]])
+    ...
+    >>> argrelmax(y, axis=1)
+    (array([0]), array([1]))
+    """
+    return argrelextrema(data, np.greater, axis, order, mode)
+
+
+def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
+    """
+    Calculate the relative extrema of `data`.
+
+    Parameters
+    ----------
+    data : ndarray
+        Array in which to find the relative extrema.
+    comparator : callable
+        Function to use to compare two data points.
+        Should take two arrays as arguments.
+    axis : int, optional
+        Axis over which to select from `data`. Default is 0.
+    order : int, optional
+        How many points on each side to use for the comparison
+        to consider ``comparator(n, n+x)`` to be True.
+    mode : str, optional
+        How the edges of the vector are treated. 'wrap' (wrap around) or
+        'clip' (treat overflow as the same as the last (or first) element).
+        Default is 'clip'. See `numpy.take`.
+
+    Returns
+    -------
+    extrema : tuple of ndarrays
+        Indices of the maxima in arrays of integers. ``extrema[k]`` is
+        the array of indices of axis `k` of `data`. Note that the
+        return value is a tuple even when `data` is 1-D.
+
+    See Also
+    --------
+    argrelmin, argrelmax
+
+    Notes
+    -----
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import argrelextrema
+    >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
+    >>> argrelextrema(x, np.greater)
+    (array([3, 6]),)
+    >>> y = np.array([[1, 2, 1, 2],
+    ...               [2, 2, 0, 0],
+    ...               [5, 3, 4, 4]])
+    ...
+    >>> argrelextrema(y, np.less, axis=1)
+    (array([0, 2]), array([2, 1]))
+
+    """
+    results = _boolrelextrema(data, comparator,
+                              axis, order, mode)
+    return np.nonzero(results)
+
+
+def _arg_x_as_expected(value):
+    """Ensure argument `x` is a 1-D C-contiguous array of dtype('float64').
+
+    Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x`
+    compatible with the signature of the wrapped Cython functions.
+
+    Returns
+    -------
+    value : ndarray
+        A 1-D C-contiguous array with dtype('float64').
+    """
+    value = np.asarray(value, order='C', dtype=np.float64)
+    if value.ndim != 1:
+        raise ValueError('`x` must be a 1-D array')
+    return value
+
+
+def _arg_peaks_as_expected(value):
+    """Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp').
+
+    Used in `peak_prominences` and `peak_widths` to make `peaks` compatible
+    with the signature of the wrapped Cython functions.
+
+    Returns
+    -------
+    value : ndarray
+        A 1-D C-contiguous array with dtype('intp').
+    """
+    value = np.asarray(value)
+    if value.size == 0:
+        # Empty arrays default to np.float64 but are valid input
+        value = np.array([], dtype=np.intp)
+    try:
+        # Safely convert to C-contiguous array of type np.intp
+        value = value.astype(np.intp, order='C', casting='safe',
+                             subok=False, copy=False)
+    except TypeError as e:
+        raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e
+    if value.ndim != 1:
+        raise ValueError('`peaks` must be a 1-D array')
+    return value
+
+
+def _arg_wlen_as_expected(value):
+    """Ensure argument `wlen` is of type `np.intp` and larger than 1.
+
+    Used in `peak_prominences` and `peak_widths`.
+
+    Returns
+    -------
+    value : np.intp
+        The original `value` rounded up to an integer or -1 if `value` was
+        None.
+    """
+    if value is None:
+        # _peak_prominences expects an intp; -1 signals that no value was
+        # supplied by the user
+        value = -1
+    elif 1 < value:
+        # Round up to a positive integer
+        if not np.can_cast(value, np.intp, "safe"):
+            value = math.ceil(value)
+        value = np.intp(value)
+    else:
+        raise ValueError('`wlen` must be larger than 1, was {}'
+                         .format(value))
+    return value
+
+
+def peak_prominences(x, peaks, wlen=None):
+    """
+    Calculate the prominence of each peak in a signal.
+
+    The prominence of a peak measures how much a peak stands out from the
+    surrounding baseline of the signal and is defined as the vertical distance
+    between the peak and its lowest contour line.
+
+    Parameters
+    ----------
+    x : sequence
+        A signal with peaks.
+    peaks : sequence
+        Indices of peaks in `x`.
+    wlen : int, optional
+        A window length in samples that optionally limits the evaluated area for
+        each peak to a subset of `x`. The peak is always placed in the middle of
+        the window therefore the given length is rounded up to the next odd
+        integer. This parameter can speed up the calculation (see Notes).
+
+    Returns
+    -------
+    prominences : ndarray
+        The calculated prominences for each peak in `peaks`.
+    left_bases, right_bases : ndarray
+        The peaks' bases as indices in `x` to the left and right of each peak.
+        The higher base of each pair is a peak's lowest contour line.
+
+    Raises
+    ------
+    ValueError
+        If a value in `peaks` is an invalid index for `x`.
+
+    Warns
+    -----
+    PeakPropertyWarning
+        For indices in `peaks` that don't point to valid local maxima in `x`,
+        the returned prominence will be 0 and this warning is raised. This
+        also happens if `wlen` is smaller than the plateau size of a peak.
+
+    Warnings
+    --------
+    This function may return unexpected results for data containing NaNs. To
+    avoid this, NaNs should either be removed or replaced.
+
+    See Also
+    --------
+    find_peaks
+        Find peaks inside a signal based on peak properties.
+    peak_widths
+        Calculate the width of peaks.
+
+    Notes
+    -----
+    Strategy to compute a peak's prominence:
+
+    1. Extend a horizontal line from the current peak to the left and right
+       until the line either reaches the window border (see `wlen`) or
+       intersects the signal again at the slope of a higher peak. An
+       intersection with a peak of the same height is ignored.
+    2. On each side find the minimal signal value within the interval defined
+       above. These points are the peak's bases.
+    3. The higher one of the two bases marks the peak's lowest contour line. The
+       prominence can then be calculated as the vertical difference between the
+       peaks height itself and its lowest contour line.
+
+    Searching for the peak's bases can be slow for large `x` with periodic
+    behavior because large chunks or even the full signal need to be evaluated
+    for the first algorithmic step. This evaluation area can be limited with the
+    parameter `wlen` which restricts the algorithm to a window around the
+    current peak and can shorten the calculation time if the window length is
+    short in relation to `x`.
+    However, this may stop the algorithm from finding the true global contour
+    line if the peak's true bases are outside this window. Instead, a higher
+    contour line is found within the restricted window leading to a smaller
+    calculated prominence. In practice, this is only relevant for the highest set
+    of peaks in `x`. This behavior may even be used intentionally to calculate
+    "local" prominences.
+
+    .. versionadded:: 1.1.0
+
+    References
+    ----------
+    .. [1] Wikipedia Article for Topographic Prominence:
+       https://en.wikipedia.org/wiki/Topographic_prominence
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import find_peaks, peak_prominences
+    >>> import matplotlib.pyplot as plt
+
+    Create a test signal with two overlayed harmonics
+
+    >>> x = np.linspace(0, 6 * np.pi, 1000)
+    >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
+
+    Find all peaks and calculate prominences
+
+    >>> peaks, _ = find_peaks(x)
+    >>> prominences = peak_prominences(x, peaks)[0]
+    >>> prominences
+    array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603  ,
+           0.47822491, 2.48340261, 0.47822491])
+
+    Calculate the height of each peak's contour line and plot the results
+
+    >>> contour_heights = x[peaks] - prominences
+    >>> plt.plot(x)
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks])
+    >>> plt.show()
+
+    Let's evaluate a second example that demonstrates several edge cases for
+    one peak at index 5.
+
+    >>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0])
+    >>> peaks = np.array([5])
+    >>> plt.plot(x)
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.show()
+    >>> peak_prominences(x, peaks)  # -> (prominences, left_bases, right_bases)
+    (array([3.]), array([2]), array([6]))
+
+    Note how the peak at index 3 of the same height is not considered as a
+    border while searching for the left base. Instead, two minima at 0 and 2
+    are found in which case the one closer to the evaluated peak is always
+    chosen. On the right side, however, the base must be placed at 6 because the
+    higher peak represents the right border to the evaluated area.
+
+    >>> peak_prominences(x, peaks, wlen=3.1)
+    (array([2.]), array([4]), array([6]))
+
+    Here, we restricted the algorithm to a window from 3 to 7 (the length is 5
+    samples because `wlen` was rounded up to the next odd integer). Thus, the
+    only two candidates in the evaluated area are the two neighboring samples
+    and a smaller prominence is calculated.
+    """
+    x = _arg_x_as_expected(x)
+    peaks = _arg_peaks_as_expected(peaks)
+    wlen = _arg_wlen_as_expected(wlen)
+    return _peak_prominences(x, peaks, wlen)
+
+
+def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None):
+    """
+    Calculate the width of each peak in a signal.
+
+    This function calculates the width of a peak in samples at a relative
+    distance to the peak's height and prominence.
+
+    Parameters
+    ----------
+    x : sequence
+        A signal with peaks.
+    peaks : sequence
+        Indices of peaks in `x`.
+    rel_height : float, optional
+        Chooses the relative height at which the peak width is measured as a
+        percentage of its prominence. 1.0 calculates the width of the peak at
+        its lowest contour line while 0.5 evaluates at half the prominence
+        height. Must be at least 0. See notes for further explanation.
+    prominence_data : tuple, optional
+        A tuple of three arrays matching the output of `peak_prominences` when
+        called with the same arguments `x` and `peaks`. This data are calculated
+        internally if not provided.
+    wlen : int, optional
+        A window length in samples passed to `peak_prominences` as an optional
+        argument for internal calculation of `prominence_data`. This argument
+        is ignored if `prominence_data` is given.
+
+    Returns
+    -------
+    widths : ndarray
+        The widths for each peak in samples.
+    width_heights : ndarray
+        The height of the contour lines at which the `widths` where evaluated.
+    left_ips, right_ips : ndarray
+        Interpolated positions of left and right intersection points of a
+        horizontal line at the respective evaluation height.
+
+    Raises
+    ------
+    ValueError
+        If `prominence_data` is supplied but doesn't satisfy the condition
+        ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak,
+        has the wrong dtype, is not C-contiguous or does not have the same
+        shape.
+
+    Warns
+    -----
+    PeakPropertyWarning
+        Raised if any calculated width is 0. This may stem from the supplied
+        `prominence_data` or if `rel_height` is set to 0.
+
+    Warnings
+    --------
+    This function may return unexpected results for data containing NaNs. To
+    avoid this, NaNs should either be removed or replaced.
+
+    See Also
+    --------
+    find_peaks
+        Find peaks inside a signal based on peak properties.
+    peak_prominences
+        Calculate the prominence of peaks.
+
+    Notes
+    -----
+    The basic algorithm to calculate a peak's width is as follows:
+
+    * Calculate the evaluation height :math:`h_{eval}` with the formula
+      :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the
+      height of the peak itself, :math:`P` is the peak's prominence and
+      :math:`R` a positive ratio specified with the argument `rel_height`.
+    * Draw a horizontal line at the evaluation height to both sides, starting at
+      the peak's current vertical position until the lines either intersect a
+      slope, the signal border or cross the vertical position of the peak's
+      base (see `peak_prominences` for an definition). For the first case,
+      intersection with the signal, the true intersection point is estimated
+      with linear interpolation.
+    * Calculate the width as the horizontal distance between the chosen
+      endpoints on both sides. As a consequence of this the maximal possible
+      width for each peak is the horizontal distance between its bases.
+
+    As shown above to calculate a peak's width its prominence and bases must be
+    known. You can supply these yourself with the argument `prominence_data`.
+    Otherwise, they are internally calculated (see `peak_prominences`).
+
+    .. versionadded:: 1.1.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import chirp, find_peaks, peak_widths
+    >>> import matplotlib.pyplot as plt
+
+    Create a test signal with two overlayed harmonics
+
+    >>> x = np.linspace(0, 6 * np.pi, 1000)
+    >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
+
+    Find all peaks and calculate their widths at the relative height of 0.5
+    (contour line at half the prominence height) and 1 (at the lowest contour
+    line at full prominence height).
+
+    >>> peaks, _ = find_peaks(x)
+    >>> results_half = peak_widths(x, peaks, rel_height=0.5)
+    >>> results_half[0]  # widths
+    array([ 64.25172825,  41.29465463,  35.46943289, 104.71586081,
+            35.46729324,  41.30429622, 181.93835853,  45.37078546])
+    >>> results_full = peak_widths(x, peaks, rel_height=1)
+    >>> results_full[0]  # widths
+    array([181.9396084 ,  72.99284945,  61.28657872, 373.84622694,
+        61.78404617,  72.48822812, 253.09161876,  79.36860878])
+
+    Plot signal, peaks and contour lines at which the widths where calculated
+
+    >>> plt.plot(x)
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.hlines(*results_half[1:], color="C2")
+    >>> plt.hlines(*results_full[1:], color="C3")
+    >>> plt.show()
+    """
+    x = _arg_x_as_expected(x)
+    peaks = _arg_peaks_as_expected(peaks)
+    if prominence_data is None:
+        # Calculate prominence if not supplied and use wlen if supplied.
+        wlen = _arg_wlen_as_expected(wlen)
+        prominence_data = _peak_prominences(x, peaks, wlen)
+    return _peak_widths(x, peaks, rel_height, *prominence_data)
+
+
+def _unpack_condition_args(interval, x, peaks):
+    """
+    Parse condition arguments for `find_peaks`.
+
+    Parameters
+    ----------
+    interval : number or ndarray or sequence
+        Either a number or ndarray or a 2-element sequence of the former. The
+        first value is always interpreted as `imin` and the second, if supplied,
+        as `imax`.
+    x : ndarray
+        The signal with `peaks`.
+    peaks : ndarray
+        An array with indices used to reduce `imin` and / or `imax` if those are
+        arrays.
+
+    Returns
+    -------
+    imin, imax : number or ndarray or None
+        Minimal and maximal value in `argument`.
+
+    Raises
+    ------
+    ValueError :
+        If interval border is given as array and its size does not match the size
+        of `x`.
+
+    Notes
+    -----
+
+    .. versionadded:: 1.1.0
+    """
+    try:
+        imin, imax = interval
+    except (TypeError, ValueError):
+        imin, imax = (interval, None)
+
+    # Reduce arrays if arrays
+    if isinstance(imin, np.ndarray):
+        if imin.size != x.size:
+            raise ValueError('array size of lower interval border must match x')
+        imin = imin[peaks]
+    if isinstance(imax, np.ndarray):
+        if imax.size != x.size:
+            raise ValueError('array size of upper interval border must match x')
+        imax = imax[peaks]
+
+    return imin, imax
+
+
+def _select_by_property(peak_properties, pmin, pmax):
+    """
+    Evaluate where the generic property of peaks confirms to an interval.
+
+    Parameters
+    ----------
+    peak_properties : ndarray
+        An array with properties for each peak.
+    pmin : None or number or ndarray
+        Lower interval boundary for `peak_properties`. ``None`` is interpreted as
+        an open border.
+    pmax : None or number or ndarray
+        Upper interval boundary for `peak_properties`. ``None`` is interpreted as
+        an open border.
+
+    Returns
+    -------
+    keep : bool
+        A boolean mask evaluating to true where `peak_properties` confirms to the
+        interval.
+
+    See Also
+    --------
+    find_peaks
+
+    Notes
+    -----
+
+    .. versionadded:: 1.1.0
+    """
+    keep = np.ones(peak_properties.size, dtype=bool)
+    if pmin is not None:
+        keep &= (pmin <= peak_properties)
+    if pmax is not None:
+        keep &= (peak_properties <= pmax)
+    return keep
+
+
+def _select_by_peak_threshold(x, peaks, tmin, tmax):
+    """
+    Evaluate which peaks fulfill the threshold condition.
+
+    Parameters
+    ----------
+    x : ndarray
+        A 1-D array which is indexable by `peaks`.
+    peaks : ndarray
+        Indices of peaks in `x`.
+    tmin, tmax : scalar or ndarray or None
+         Minimal and / or maximal required thresholds. If supplied as ndarrays
+         their size must match `peaks`. ``None`` is interpreted as an open
+         border.
+
+    Returns
+    -------
+    keep : bool
+        A boolean mask evaluating to true where `peaks` fulfill the threshold
+        condition.
+    left_thresholds, right_thresholds : ndarray
+        Array matching `peak` containing the thresholds of each peak on
+        both sides.
+
+    Notes
+    -----
+
+    .. versionadded:: 1.1.0
+    """
+    # Stack thresholds on both sides to make min / max operations easier:
+    # tmin is compared with the smaller, and tmax with the greater thresold to
+    # each peak's side
+    stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1],
+                                    x[peaks] - x[peaks + 1]])
+    keep = np.ones(peaks.size, dtype=bool)
+    if tmin is not None:
+        min_thresholds = np.min(stacked_thresholds, axis=0)
+        keep &= (tmin <= min_thresholds)
+    if tmax is not None:
+        max_thresholds = np.max(stacked_thresholds, axis=0)
+        keep &= (max_thresholds <= tmax)
+
+    return keep, stacked_thresholds[0], stacked_thresholds[1]
+
+
+def find_peaks(x, height=None, threshold=None, distance=None,
+               prominence=None, width=None, wlen=None, rel_height=0.5,
+               plateau_size=None):
+    """
+    Find peaks inside a signal based on peak properties.
+
+    This function takes a 1-D array and finds all local maxima by
+    simple comparison of neighboring values. Optionally, a subset of these
+    peaks can be selected by specifying conditions for a peak's properties.
+
+    Parameters
+    ----------
+    x : sequence
+        A signal with peaks.
+    height : number or ndarray or sequence, optional
+        Required height of peaks. Either a number, ``None``, an array matching
+        `x` or a 2-element sequence of the former. The first element is
+        always interpreted as the  minimal and the second, if supplied, as the
+        maximal required height.
+    threshold : number or ndarray or sequence, optional
+        Required threshold of peaks, the vertical distance to its neighboring
+        samples. Either a number, ``None``, an array matching `x` or a
+        2-element sequence of the former. The first element is always
+        interpreted as the  minimal and the second, if supplied, as the maximal
+        required threshold.
+    distance : number, optional
+        Required minimal horizontal distance (>= 1) in samples between
+        neighbouring peaks. Smaller peaks are removed first until the condition
+        is fulfilled for all remaining peaks.
+    prominence : number or ndarray or sequence, optional
+        Required prominence of peaks. Either a number, ``None``, an array
+        matching `x` or a 2-element sequence of the former. The first
+        element is always interpreted as the  minimal and the second, if
+        supplied, as the maximal required prominence.
+    width : number or ndarray or sequence, optional
+        Required width of peaks in samples. Either a number, ``None``, an array
+        matching `x` or a 2-element sequence of the former. The first
+        element is always interpreted as the  minimal and the second, if
+        supplied, as the maximal required width.
+    wlen : int, optional
+        Used for calculation of the peaks prominences, thus it is only used if
+        one of the arguments `prominence` or `width` is given. See argument
+        `wlen` in `peak_prominences` for a full description of its effects.
+    rel_height : float, optional
+        Used for calculation of the peaks width, thus it is only used if `width`
+        is given. See argument  `rel_height` in `peak_widths` for a full
+        description of its effects.
+    plateau_size : number or ndarray or sequence, optional
+        Required size of the flat top of peaks in samples. Either a number,
+        ``None``, an array matching `x` or a 2-element sequence of the former.
+        The first element is always interpreted as the minimal and the second,
+        if supplied as the maximal required plateau size.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    peaks : ndarray
+        Indices of peaks in `x` that satisfy all given conditions.
+    properties : dict
+        A dictionary containing properties of the returned peaks which were
+        calculated as intermediate results during evaluation of the specified
+        conditions:
+
+        * 'peak_heights'
+              If `height` is given, the height of each peak in `x`.
+        * 'left_thresholds', 'right_thresholds'
+              If `threshold` is given, these keys contain a peaks vertical
+              distance to its neighbouring samples.
+        * 'prominences', 'right_bases', 'left_bases'
+              If `prominence` is given, these keys are accessible. See
+              `peak_prominences` for a description of their content.
+        * 'width_heights', 'left_ips', 'right_ips'
+              If `width` is given, these keys are accessible. See `peak_widths`
+              for a description of their content.
+        * 'plateau_sizes', left_edges', 'right_edges'
+              If `plateau_size` is given, these keys are accessible and contain
+              the indices of a peak's edges (edges are still part of the
+              plateau) and the calculated plateau sizes.
+
+              .. versionadded:: 1.2.0
+
+        To calculate and return properties without excluding peaks, provide the
+        open interval ``(None, None)`` as a value to the appropriate argument
+        (excluding `distance`).
+
+    Warns
+    -----
+    PeakPropertyWarning
+        Raised if a peak's properties have unexpected values (see
+        `peak_prominences` and `peak_widths`).
+
+    Warnings
+    --------
+    This function may return unexpected results for data containing NaNs. To
+    avoid this, NaNs should either be removed or replaced.
+
+    See Also
+    --------
+    find_peaks_cwt
+        Find peaks using the wavelet transformation.
+    peak_prominences
+        Directly calculate the prominence of peaks.
+    peak_widths
+        Directly calculate the width of peaks.
+
+    Notes
+    -----
+    In the context of this function, a peak or local maximum is defined as any
+    sample whose two direct neighbours have a smaller amplitude. For flat peaks
+    (more than one sample of equal amplitude wide) the index of the middle
+    sample is returned (rounded down in case the number of samples is even).
+    For noisy signals the peak locations can be off because the noise might
+    change the position of local maxima. In those cases consider smoothing the
+    signal before searching for peaks or use other peak finding and fitting
+    methods (like `find_peaks_cwt`).
+
+    Some additional comments on specifying conditions:
+
+    * Almost all conditions (excluding `distance`) can be given as half-open or
+      closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open
+      interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval
+      :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified
+      as well, which returns the matching properties without exclusion of peaks.
+    * The border is always included in the interval used to select valid peaks.
+    * For several conditions the interval borders can be specified with
+      arrays matching `x` in shape which enables dynamic constrains based on
+      the sample position.
+    * The conditions are evaluated in the following order: `plateau_size`,
+      `height`, `threshold`, `distance`, `prominence`, `width`. In most cases
+      this order is the fastest one because faster operations are applied first
+      to reduce the number of peaks that need to be evaluated later.
+    * While indices in `peaks` are guaranteed to be at least `distance` samples
+      apart, edges of flat peaks may be closer than the allowed `distance`.
+    * Use `wlen` to reduce the time it takes to evaluate the conditions for
+      `prominence` or `width` if `x` is large or has many local maxima
+      (see `peak_prominences`).
+
+    .. versionadded:: 1.1.0
+
+    Examples
+    --------
+    To demonstrate this function's usage we use a signal `x` supplied with
+    SciPy (see `scipy.datasets.electrocardiogram`). Let's find all peaks (local
+    maxima) in `x` whose amplitude lies above 0.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.datasets import electrocardiogram
+    >>> from scipy.signal import find_peaks
+    >>> x = electrocardiogram()[2000:4000]
+    >>> peaks, _ = find_peaks(x, height=0)
+    >>> plt.plot(x)
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.plot(np.zeros_like(x), "--", color="gray")
+    >>> plt.show()
+
+    We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching
+    `x` in size to reflect a changing condition for different parts of the
+    signal.
+
+    >>> border = np.sin(np.linspace(0, 3 * np.pi, x.size))
+    >>> peaks, _ = find_peaks(x, height=(-border, border))
+    >>> plt.plot(x)
+    >>> plt.plot(-border, "--", color="gray")
+    >>> plt.plot(border, ":", color="gray")
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.show()
+
+    Another useful condition for periodic signals can be given with the
+    `distance` argument. In this case, we can easily select the positions of
+    QRS complexes within the electrocardiogram (ECG) by demanding a distance of
+    at least 150 samples.
+
+    >>> peaks, _ = find_peaks(x, distance=150)
+    >>> np.diff(peaks)
+    array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172])
+    >>> plt.plot(x)
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.show()
+
+    Especially for noisy signals peaks can be easily grouped by their
+    prominence (see `peak_prominences`). E.g., we can select all peaks except
+    for the mentioned QRS complexes by limiting the allowed prominence to 0.6.
+
+    >>> peaks, properties = find_peaks(x, prominence=(None, 0.6))
+    >>> properties["prominences"].max()
+    0.5049999999999999
+    >>> plt.plot(x)
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.show()
+
+    And, finally, let's examine a different section of the ECG which contains
+    beat forms of different shape. To select only the atypical heart beats, we
+    combine two conditions: a minimal prominence of 1 and width of at least 20
+    samples.
+
+    >>> x = electrocardiogram()[17000:18000]
+    >>> peaks, properties = find_peaks(x, prominence=1, width=20)
+    >>> properties["prominences"], properties["widths"]
+    (array([1.495, 2.3  ]), array([36.93773946, 39.32723577]))
+    >>> plt.plot(x)
+    >>> plt.plot(peaks, x[peaks], "x")
+    >>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"],
+    ...            ymax = x[peaks], color = "C1")
+    >>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"],
+    ...            xmax=properties["right_ips"], color = "C1")
+    >>> plt.show()
+    """
+    # _argmaxima1d expects array of dtype 'float64'
+    x = _arg_x_as_expected(x)
+    if distance is not None and distance < 1:
+        raise ValueError('`distance` must be greater or equal to 1')
+
+    peaks, left_edges, right_edges = _local_maxima_1d(x)
+    properties = {}
+
+    if plateau_size is not None:
+        # Evaluate plateau size
+        plateau_sizes = right_edges - left_edges + 1
+        pmin, pmax = _unpack_condition_args(plateau_size, x, peaks)
+        keep = _select_by_property(plateau_sizes, pmin, pmax)
+        peaks = peaks[keep]
+        properties["plateau_sizes"] = plateau_sizes
+        properties["left_edges"] = left_edges
+        properties["right_edges"] = right_edges
+        properties = {key: array[keep] for key, array in properties.items()}
+
+    if height is not None:
+        # Evaluate height condition
+        peak_heights = x[peaks]
+        hmin, hmax = _unpack_condition_args(height, x, peaks)
+        keep = _select_by_property(peak_heights, hmin, hmax)
+        peaks = peaks[keep]
+        properties["peak_heights"] = peak_heights
+        properties = {key: array[keep] for key, array in properties.items()}
+
+    if threshold is not None:
+        # Evaluate threshold condition
+        tmin, tmax = _unpack_condition_args(threshold, x, peaks)
+        keep, left_thresholds, right_thresholds = _select_by_peak_threshold(
+            x, peaks, tmin, tmax)
+        peaks = peaks[keep]
+        properties["left_thresholds"] = left_thresholds
+        properties["right_thresholds"] = right_thresholds
+        properties = {key: array[keep] for key, array in properties.items()}
+
+    if distance is not None:
+        # Evaluate distance condition
+        keep = _select_by_peak_distance(peaks, x[peaks], distance)
+        peaks = peaks[keep]
+        properties = {key: array[keep] for key, array in properties.items()}
+
+    if prominence is not None or width is not None:
+        # Calculate prominence (required for both conditions)
+        wlen = _arg_wlen_as_expected(wlen)
+        properties.update(zip(
+            ['prominences', 'left_bases', 'right_bases'],
+            _peak_prominences(x, peaks, wlen=wlen)
+        ))
+
+    if prominence is not None:
+        # Evaluate prominence condition
+        pmin, pmax = _unpack_condition_args(prominence, x, peaks)
+        keep = _select_by_property(properties['prominences'], pmin, pmax)
+        peaks = peaks[keep]
+        properties = {key: array[keep] for key, array in properties.items()}
+
+    if width is not None:
+        # Calculate widths
+        properties.update(zip(
+            ['widths', 'width_heights', 'left_ips', 'right_ips'],
+            _peak_widths(x, peaks, rel_height, properties['prominences'],
+                         properties['left_bases'], properties['right_bases'])
+        ))
+        # Evaluate width condition
+        wmin, wmax = _unpack_condition_args(width, x, peaks)
+        keep = _select_by_property(properties['widths'], wmin, wmax)
+        peaks = peaks[keep]
+        properties = {key: array[keep] for key, array in properties.items()}
+
+    return peaks, properties
+
+
+def _identify_ridge_lines(matr, max_distances, gap_thresh):
+    """
+    Identify ridges in the 2-D matrix.
+
+    Expect that the width of the wavelet feature increases with increasing row
+    number.
+
+    Parameters
+    ----------
+    matr : 2-D ndarray
+        Matrix in which to identify ridge lines.
+    max_distances : 1-D sequence
+        At each row, a ridge line is only connected
+        if the relative max at row[n] is within
+        `max_distances`[n] from the relative max at row[n+1].
+    gap_thresh : int
+        If a relative maximum is not found within `max_distances`,
+        there will be a gap. A ridge line is discontinued if
+        there are more than `gap_thresh` points without connecting
+        a new relative maximum.
+
+    Returns
+    -------
+    ridge_lines : tuple
+        Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
+        ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
+        found.  Each ridge-line will be sorted by row (increasing), but the
+        order of the ridge lines is not specified.
+
+    References
+    ----------
+    .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
+       :doi:`10.1093/bioinformatics/btl355`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> data = rng.random((5,5))
+    >>> max_dist = 3
+    >>> max_distances = np.full(20, max_dist)
+    >>> ridge_lines = _identify_ridge_lines(data, max_distances, 1)
+
+    Notes
+    -----
+    This function is intended to be used in conjunction with `cwt`
+    as part of `find_peaks_cwt`.
+
+    """
+    if len(max_distances) < matr.shape[0]:
+        raise ValueError('Max_distances must have at least as many rows '
+                         'as matr')
+
+    all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
+    # Highest row for which there are any relative maxima
+    has_relmax = np.nonzero(all_max_cols.any(axis=1))[0]
+    if len(has_relmax) == 0:
+        return []
+    start_row = has_relmax[-1]
+    # Each ridge line is a 3-tuple:
+    # rows, cols,Gap number
+    ridge_lines = [[[start_row],
+                   [col],
+                   0] for col in np.nonzero(all_max_cols[start_row])[0]]
+    final_lines = []
+    rows = np.arange(start_row - 1, -1, -1)
+    cols = np.arange(0, matr.shape[1])
+    for row in rows:
+        this_max_cols = cols[all_max_cols[row]]
+
+        # Increment gap number of each line,
+        # set it to zero later if appropriate
+        for line in ridge_lines:
+            line[2] += 1
+
+        # XXX These should always be all_max_cols[row]
+        # But the order might be different. Might be an efficiency gain
+        # to make sure the order is the same and avoid this iteration
+        prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
+        # Look through every relative maximum found at current row
+        # Attempt to connect them with existing ridge lines.
+        for ind, col in enumerate(this_max_cols):
+            # If there is a previous ridge line within
+            # the max_distance to connect to, do so.
+            # Otherwise start a new one.
+            line = None
+            if len(prev_ridge_cols) > 0:
+                diffs = np.abs(col - prev_ridge_cols)
+                closest = np.argmin(diffs)
+                if diffs[closest] <= max_distances[row]:
+                    line = ridge_lines[closest]
+            if line is not None:
+                # Found a point close enough, extend current ridge line
+                line[1].append(col)
+                line[0].append(row)
+                line[2] = 0
+            else:
+                new_line = [[row],
+                            [col],
+                            0]
+                ridge_lines.append(new_line)
+
+        # Remove the ridge lines with gap_number too high
+        # XXX Modifying a list while iterating over it.
+        # Should be safe, since we iterate backwards, but
+        # still tacky.
+        for ind in range(len(ridge_lines) - 1, -1, -1):
+            line = ridge_lines[ind]
+            if line[2] > gap_thresh:
+                final_lines.append(line)
+                del ridge_lines[ind]
+
+    out_lines = []
+    for line in (final_lines + ridge_lines):
+        sortargs = np.array(np.argsort(line[0]))
+        rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
+        rows[sortargs] = line[0]
+        cols[sortargs] = line[1]
+        out_lines.append([rows, cols])
+
+    return out_lines
+
+
+def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
+                        min_snr=1, noise_perc=10):
+    """
+    Filter ridge lines according to prescribed criteria. Intended
+    to be used for finding relative maxima.
+
+    Parameters
+    ----------
+    cwt : 2-D ndarray
+        Continuous wavelet transform from which the `ridge_lines` were defined.
+    ridge_lines : 1-D sequence
+        Each element should contain 2 sequences, the rows and columns
+        of the ridge line (respectively).
+    window_size : int, optional
+        Size of window to use to calculate noise floor.
+        Default is ``cwt.shape[1] / 20``.
+    min_length : int, optional
+        Minimum length a ridge line needs to be acceptable.
+        Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
+    min_snr : float, optional
+        Minimum SNR ratio. Default 1. The signal is the value of
+        the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
+        noise is the `noise_perc`th percentile of datapoints contained within a
+        window of `window_size` around ``cwt[0, loc]``.
+    noise_perc : float, optional
+        When calculating the noise floor, percentile of data points
+        examined below which to consider noise. Calculated using
+        scipy.stats.scoreatpercentile.
+
+    References
+    ----------
+    .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
+       :doi:`10.1093/bioinformatics/btl355`
+
+    """
+    num_points = cwt.shape[1]
+    if min_length is None:
+        min_length = np.ceil(cwt.shape[0] / 4)
+    if window_size is None:
+        window_size = np.ceil(num_points / 20)
+
+    window_size = int(window_size)
+    hf_window, odd = divmod(window_size, 2)
+
+    # Filter based on SNR
+    row_one = cwt[0, :]
+    noises = np.empty_like(row_one)
+    for ind, val in enumerate(row_one):
+        window_start = max(ind - hf_window, 0)
+        window_end = min(ind + hf_window + odd, num_points)
+        noises[ind] = scoreatpercentile(row_one[window_start:window_end],
+                                        per=noise_perc)
+
+    def filt_func(line):
+        if len(line[0]) < min_length:
+            return False
+        snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
+        if snr < min_snr:
+            return False
+        return True
+
+    return list(filter(filt_func, ridge_lines))
+
+
+def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
+                   gap_thresh=None, min_length=None,
+                   min_snr=1, noise_perc=10, window_size=None):
+    """
+    Find peaks in a 1-D array with wavelet transformation.
+
+    The general approach is to smooth `vector` by convolving it with
+    `wavelet(width)` for each width in `widths`. Relative maxima which
+    appear at enough length scales, and with sufficiently high SNR, are
+    accepted.
+
+    Parameters
+    ----------
+    vector : ndarray
+        1-D array in which to find the peaks.
+    widths : float or sequence
+        Single width or 1-D array-like of widths to use for calculating
+        the CWT matrix. In general,
+        this range should cover the expected width of peaks of interest.
+    wavelet : callable, optional
+        Should take two parameters and return a 1-D array to convolve
+        with `vector`. The first parameter determines the number of points
+        of the returned wavelet array, the second parameter is the scale
+        (`width`) of the wavelet. Should be normalized and symmetric.
+        Default is the ricker wavelet.
+    max_distances : ndarray, optional
+        At each row, a ridge line is only connected if the relative max at
+        row[n] is within ``max_distances[n]`` from the relative max at
+        ``row[n+1]``.  Default value is ``widths/4``.
+    gap_thresh : float, optional
+        If a relative maximum is not found within `max_distances`,
+        there will be a gap. A ridge line is discontinued if there are more
+        than `gap_thresh` points without connecting a new relative maximum.
+        Default is the first value of the widths array i.e. widths[0].
+    min_length : int, optional
+        Minimum length a ridge line needs to be acceptable.
+        Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
+    min_snr : float, optional
+        Minimum SNR ratio. Default 1. The signal is the maximum CWT coefficient
+        on the largest ridge line. The noise is `noise_perc` th percentile of
+        datapoints contained within the same ridge line.
+    noise_perc : float, optional
+        When calculating the noise floor, percentile of data points
+        examined below which to consider noise. Calculated using
+        `stats.scoreatpercentile`.  Default is 10.
+    window_size : int, optional
+        Size of window to use to calculate noise floor.
+        Default is ``cwt.shape[1] / 20``.
+
+    Returns
+    -------
+    peaks_indices : ndarray
+        Indices of the locations in the `vector` where peaks were found.
+        The list is sorted.
+
+    See Also
+    --------
+    cwt
+        Continuous wavelet transform.
+    find_peaks
+        Find peaks inside a signal based on peak properties.
+
+    Notes
+    -----
+    This approach was designed for finding sharp peaks among noisy data,
+    however with proper parameter selection it should function well for
+    different peak shapes.
+
+    The algorithm is as follows:
+     1. Perform a continuous wavelet transform on `vector`, for the supplied
+        `widths`. This is a convolution of `vector` with `wavelet(width)` for
+        each width in `widths`. See `cwt`.
+     2. Identify "ridge lines" in the cwt matrix. These are relative maxima
+        at each row, connected across adjacent rows. See identify_ridge_lines
+     3. Filter the ridge_lines using filter_ridge_lines.
+
+    .. versionadded:: 0.11.0
+
+    References
+    ----------
+    .. [1] Bioinformatics (2006) 22 (17): 2059-2065.
+       :doi:`10.1093/bioinformatics/btl355`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> xs = np.arange(0, np.pi, 0.05)
+    >>> data = np.sin(xs)
+    >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
+    >>> peakind, xs[peakind], data[peakind]
+    ([32], array([ 1.6]), array([ 0.9995736]))
+
+    """
+    widths = np.array(widths, copy=False, ndmin=1)
+
+    if gap_thresh is None:
+        gap_thresh = np.ceil(widths[0])
+    if max_distances is None:
+        max_distances = widths / 4.0
+    if wavelet is None:
+        wavelet = ricker
+
+    cwt_dat = cwt(vector, wavelet, widths)
+    ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
+    filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
+                                   window_size=window_size, min_snr=min_snr,
+                                   noise_perc=noise_perc)
+    max_locs = np.asarray([x[1][0] for x in filtered])
+    max_locs.sort()
+
+    return max_locs
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_savitzky_golay.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_savitzky_golay.py
new file mode 100644
index 00000000..339178fb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_savitzky_golay.py
@@ -0,0 +1,357 @@
+import numpy as np
+from scipy.linalg import lstsq
+from scipy._lib._util import float_factorial
+from scipy.ndimage import convolve1d
+from ._arraytools import axis_slice
+
+
+def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None,
+                  use="conv"):
+    """Compute the coefficients for a 1-D Savitzky-Golay FIR filter.
+
+    Parameters
+    ----------
+    window_length : int
+        The length of the filter window (i.e., the number of coefficients).
+    polyorder : int
+        The order of the polynomial used to fit the samples.
+        `polyorder` must be less than `window_length`.
+    deriv : int, optional
+        The order of the derivative to compute. This must be a
+        nonnegative integer. The default is 0, which means to filter
+        the data without differentiating.
+    delta : float, optional
+        The spacing of the samples to which the filter will be applied.
+        This is only used if deriv > 0.
+    pos : int or None, optional
+        If pos is not None, it specifies evaluation position within the
+        window. The default is the middle of the window.
+    use : str, optional
+        Either 'conv' or 'dot'. This argument chooses the order of the
+        coefficients. The default is 'conv', which means that the
+        coefficients are ordered to be used in a convolution. With
+        use='dot', the order is reversed, so the filter is applied by
+        dotting the coefficients with the data set.
+
+    Returns
+    -------
+    coeffs : 1-D ndarray
+        The filter coefficients.
+
+    See Also
+    --------
+    savgol_filter
+
+    Notes
+    -----
+    .. versionadded:: 0.14.0
+
+    References
+    ----------
+    A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by
+    Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8),
+    pp 1627-1639.
+    Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and
+    differentiation filter for even number data. Signal Process.
+    85, 7 (July 2005), 1429-1434.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import savgol_coeffs
+    >>> savgol_coeffs(5, 2)
+    array([-0.08571429,  0.34285714,  0.48571429,  0.34285714, -0.08571429])
+    >>> savgol_coeffs(5, 2, deriv=1)
+    array([ 2.00000000e-01,  1.00000000e-01,  2.07548111e-16, -1.00000000e-01,
+           -2.00000000e-01])
+
+    Note that use='dot' simply reverses the coefficients.
+
+    >>> savgol_coeffs(5, 2, pos=3)
+    array([ 0.25714286,  0.37142857,  0.34285714,  0.17142857, -0.14285714])
+    >>> savgol_coeffs(5, 2, pos=3, use='dot')
+    array([-0.14285714,  0.17142857,  0.34285714,  0.37142857,  0.25714286])
+    >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot')
+    array([0.45,  -0.85,  -0.65,  1.05])
+
+    `x` contains data from the parabola x = t**2, sampled at
+    t = -1, 0, 1, 2, 3.  `c` holds the coefficients that will compute the
+    derivative at the last position.  When dotted with `x` the result should
+    be 6.
+
+    >>> x = np.array([1, 0, 1, 4, 9])
+    >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot')
+    >>> c.dot(x)
+    6.0
+    """
+
+    # An alternative method for finding the coefficients when deriv=0 is
+    #    t = np.arange(window_length)
+    #    unit = (t == pos).astype(int)
+    #    coeffs = np.polyval(np.polyfit(t, unit, polyorder), t)
+    # The method implemented here is faster.
+
+    # To recreate the table of sample coefficients shown in the chapter on
+    # the Savitzy-Golay filter in the Numerical Recipes book, use
+    #    window_length = nL + nR + 1
+    #    pos = nL + 1
+    #    c = savgol_coeffs(window_length, M, pos=pos, use='dot')
+
+    if polyorder >= window_length:
+        raise ValueError("polyorder must be less than window_length.")
+
+    halflen, rem = divmod(window_length, 2)
+
+    if pos is None:
+        if rem == 0:
+            pos = halflen - 0.5
+        else:
+            pos = halflen
+
+    if not (0 <= pos < window_length):
+        raise ValueError("pos must be nonnegative and less than "
+                         "window_length.")
+
+    if use not in ['conv', 'dot']:
+        raise ValueError("`use` must be 'conv' or 'dot'")
+
+    if deriv > polyorder:
+        coeffs = np.zeros(window_length)
+        return coeffs
+
+    # Form the design matrix A. The columns of A are powers of the integers
+    # from -pos to window_length - pos - 1. The powers (i.e., rows) range
+    # from 0 to polyorder. (That is, A is a vandermonde matrix, but not
+    # necessarily square.)
+    x = np.arange(-pos, window_length - pos, dtype=float)
+
+    if use == "conv":
+        # Reverse so that result can be used in a convolution.
+        x = x[::-1]
+
+    order = np.arange(polyorder + 1).reshape(-1, 1)
+    A = x ** order
+
+    # y determines which order derivative is returned.
+    y = np.zeros(polyorder + 1)
+    # The coefficient assigned to y[deriv] scales the result to take into
+    # account the order of the derivative and the sample spacing.
+    y[deriv] = float_factorial(deriv) / (delta ** deriv)
+
+    # Find the least-squares solution of A*c = y
+    coeffs, _, _, _ = lstsq(A, y)
+
+    return coeffs
+
+
+def _polyder(p, m):
+    """Differentiate polynomials represented with coefficients.
+
+    p must be a 1-D or 2-D array.  In the 2-D case, each column gives
+    the coefficients of a polynomial; the first row holds the coefficients
+    associated with the highest power. m must be a nonnegative integer.
+    (numpy.polyder doesn't handle the 2-D case.)
+    """
+
+    if m == 0:
+        result = p
+    else:
+        n = len(p)
+        if n <= m:
+            result = np.zeros_like(p[:1, ...])
+        else:
+            dp = p[:-m].copy()
+            for k in range(m):
+                rng = np.arange(n - k - 1, m - k - 1, -1)
+                dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))
+            result = dp
+    return result
+
+
+def _fit_edge(x, window_start, window_stop, interp_start, interp_stop,
+              axis, polyorder, deriv, delta, y):
+    """
+    Given an N-d array `x` and the specification of a slice of `x` from
+    `window_start` to `window_stop` along `axis`, create an interpolating
+    polynomial of each 1-D slice, and evaluate that polynomial in the slice
+    from `interp_start` to `interp_stop`. Put the result into the
+    corresponding slice of `y`.
+    """
+
+    # Get the edge into a (window_length, -1) array.
+    x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis)
+    if axis == 0 or axis == -x.ndim:
+        xx_edge = x_edge
+        swapped = False
+    else:
+        xx_edge = x_edge.swapaxes(axis, 0)
+        swapped = True
+    xx_edge = xx_edge.reshape(xx_edge.shape[0], -1)
+
+    # Fit the edges.  poly_coeffs has shape (polyorder + 1, -1),
+    # where '-1' is the same as in xx_edge.
+    poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start),
+                             xx_edge, polyorder)
+
+    if deriv > 0:
+        poly_coeffs = _polyder(poly_coeffs, deriv)
+
+    # Compute the interpolated values for the edge.
+    i = np.arange(interp_start - window_start, interp_stop - window_start)
+    values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv)
+
+    # Now put the values into the appropriate slice of y.
+    # First reshape values to match y.
+    shp = list(y.shape)
+    shp[0], shp[axis] = shp[axis], shp[0]
+    values = values.reshape(interp_stop - interp_start, *shp[1:])
+    if swapped:
+        values = values.swapaxes(0, axis)
+    # Get a view of the data to be replaced by values.
+    y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis)
+    y_edge[...] = values
+
+
+def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y):
+    """
+    Use polynomial interpolation of x at the low and high ends of the axis
+    to fill in the halflen values in y.
+
+    This function just calls _fit_edge twice, once for each end of the axis.
+    """
+    halflen = window_length // 2
+    _fit_edge(x, 0, window_length, 0, halflen, axis,
+              polyorder, deriv, delta, y)
+    n = x.shape[axis]
+    _fit_edge(x, n - window_length, n, n - halflen, n, axis,
+              polyorder, deriv, delta, y)
+
+
+def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0,
+                  axis=-1, mode='interp', cval=0.0):
+    """ Apply a Savitzky-Golay filter to an array.
+
+    This is a 1-D filter. If `x`  has dimension greater than 1, `axis`
+    determines the axis along which the filter is applied.
+
+    Parameters
+    ----------
+    x : array_like
+        The data to be filtered. If `x` is not a single or double precision
+        floating point array, it will be converted to type ``numpy.float64``
+        before filtering.
+    window_length : int
+        The length of the filter window (i.e., the number of coefficients).
+        If `mode` is 'interp', `window_length` must be less than or equal
+        to the size of `x`.
+    polyorder : int
+        The order of the polynomial used to fit the samples.
+        `polyorder` must be less than `window_length`.
+    deriv : int, optional
+        The order of the derivative to compute. This must be a
+        nonnegative integer. The default is 0, which means to filter
+        the data without differentiating.
+    delta : float, optional
+        The spacing of the samples to which the filter will be applied.
+        This is only used if deriv > 0. Default is 1.0.
+    axis : int, optional
+        The axis of the array `x` along which the filter is to be applied.
+        Default is -1.
+    mode : str, optional
+        Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This
+        determines the type of extension to use for the padded signal to
+        which the filter is applied.  When `mode` is 'constant', the padding
+        value is given by `cval`.  See the Notes for more details on 'mirror',
+        'constant', 'wrap', and 'nearest'.
+        When the 'interp' mode is selected (the default), no extension
+        is used.  Instead, a degree `polyorder` polynomial is fit to the
+        last `window_length` values of the edges, and this polynomial is
+        used to evaluate the last `window_length // 2` output values.
+    cval : scalar, optional
+        Value to fill past the edges of the input if `mode` is 'constant'.
+        Default is 0.0.
+
+    Returns
+    -------
+    y : ndarray, same shape as `x`
+        The filtered data.
+
+    See Also
+    --------
+    savgol_coeffs
+
+    Notes
+    -----
+    Details on the `mode` options:
+
+        'mirror':
+            Repeats the values at the edges in reverse order. The value
+            closest to the edge is not included.
+        'nearest':
+            The extension contains the nearest input value.
+        'constant':
+            The extension contains the value given by the `cval` argument.
+        'wrap':
+            The extension contains the values from the other end of the array.
+
+    For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and
+    `window_length` is 7, the following shows the extended data for
+    the various `mode` options (assuming `cval` is 0)::
+
+        mode       |   Ext   |         Input          |   Ext
+        -----------+---------+------------------------+---------
+        'mirror'   | 4  3  2 | 1  2  3  4  5  6  7  8 | 7  6  5
+        'nearest'  | 1  1  1 | 1  2  3  4  5  6  7  8 | 8  8  8
+        'constant' | 0  0  0 | 1  2  3  4  5  6  7  8 | 0  0  0
+        'wrap'     | 6  7  8 | 1  2  3  4  5  6  7  8 | 1  2  3
+
+    .. versionadded:: 0.14.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import savgol_filter
+    >>> np.set_printoptions(precision=2)  # For compact display.
+    >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9])
+
+    Filter with a window length of 5 and a degree 2 polynomial.  Use
+    the defaults for all other parameters.
+
+    >>> savgol_filter(x, 5, 2)
+    array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1.  , 4.  , 9.  ])
+
+    Note that the last five values in x are samples of a parabola, so
+    when mode='interp' (the default) is used with polyorder=2, the last
+    three values are unchanged. Compare that to, for example,
+    `mode='nearest'`:
+
+    >>> savgol_filter(x, 5, 2, mode='nearest')
+    array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1.  , 4.6 , 7.97])
+
+    """
+    if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]:
+        raise ValueError("mode must be 'mirror', 'constant', 'nearest' "
+                         "'wrap' or 'interp'.")
+
+    x = np.asarray(x)
+    # Ensure that x is either single or double precision floating point.
+    if x.dtype != np.float64 and x.dtype != np.float32:
+        x = x.astype(np.float64)
+
+    coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)
+
+    if mode == "interp":
+        if window_length > x.shape[axis]:
+            raise ValueError("If mode is 'interp', window_length must be less "
+                             "than or equal to the size of x.")
+
+        # Do not pad. Instead, for the elements within `window_length // 2`
+        # of the ends of the sequence, use the polynomial that is fitted to
+        # the last `window_length` elements.
+        y = convolve1d(x, coeffs, axis=axis, mode="constant")
+        _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y)
+    else:
+        # Any mode other than 'interp' is passed on to ndimage.convolve1d.
+        y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval)
+
+    return y
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_signaltools.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_signaltools.py
new file mode 100644
index 00000000..31baba24
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_signaltools.py
@@ -0,0 +1,4565 @@
+# Author: Travis Oliphant
+# 1999 -- 2002
+
+import operator
+import math
+import timeit
+from scipy.spatial import cKDTree
+from . import _sigtools
+from ._ltisys import dlti
+from ._upfirdn import upfirdn, _output_len, _upfirdn_modes
+from scipy import linalg, fft as sp_fft
+from scipy.fft._helper import _init_nd_shape_and_axes
+from scipy._lib._util import prod as _prod
+import numpy as np
+from scipy.special import lambertw
+from .windows import get_window
+from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
+from ._filter_design import cheby1, _validate_sos, zpk2sos
+from ._fir_filter_design import firwin
+from ._sosfilt import _sosfilt
+import warnings
+
+
+__all__ = ['correlate', 'correlation_lags', 'correlate2d',
+           'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',
+           'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
+           'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
+           'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
+           'residuez', 'resample', 'resample_poly', 'detrend',
+           'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
+           'filtfilt', 'decimate', 'vectorstrength']
+
+
+_modedict = {'valid': 0, 'same': 1, 'full': 2}
+
+_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
+                 'symmetric': 1, 'reflect': 4}
+
+
+def _valfrommode(mode):
+    try:
+        return _modedict[mode]
+    except KeyError as e:
+        raise ValueError("Acceptable mode flags are 'valid',"
+                         " 'same', or 'full'.") from e
+
+
+def _bvalfromboundary(boundary):
+    try:
+        return _boundarydict[boundary] << 2
+    except KeyError as e:
+        raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
+                         "(or 'wrap'), and 'symmetric' (or 'symm').") from e
+
+
+def _inputs_swap_needed(mode, shape1, shape2, axes=None):
+    """Determine if inputs arrays need to be swapped in `"valid"` mode.
+
+    If in `"valid"` mode, returns whether or not the input arrays need to be
+    swapped depending on whether `shape1` is at least as large as `shape2` in
+    every calculated dimension.
+
+    This is important for some of the correlation and convolution
+    implementations in this module, where the larger array input needs to come
+    before the smaller array input when operating in this mode.
+
+    Note that if the mode provided is not 'valid', False is immediately
+    returned.
+
+    """
+    if mode != 'valid':
+        return False
+
+    if not shape1:
+        return False
+
+    if axes is None:
+        axes = range(len(shape1))
+
+    ok1 = all(shape1[i] >= shape2[i] for i in axes)
+    ok2 = all(shape2[i] >= shape1[i] for i in axes)
+
+    if not (ok1 or ok2):
+        raise ValueError("For 'valid' mode, one must be at least "
+                         "as large as the other in every dimension")
+
+    return not ok1
+
+
+def correlate(in1, in2, mode='full', method='auto'):
+    r"""
+    Cross-correlate two N-dimensional arrays.
+
+    Cross-correlate `in1` and `in2`, with the output size determined by the
+    `mode` argument.
+
+    Parameters
+    ----------
+    in1 : array_like
+        First input.
+    in2 : array_like
+        Second input. Should have the same number of dimensions as `in1`.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output:
+
+        ``full``
+           The output is the full discrete linear cross-correlation
+           of the inputs. (Default)
+        ``valid``
+           The output consists only of those elements that do not
+           rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
+           must be at least as large as the other in every dimension.
+        ``same``
+           The output is the same size as `in1`, centered
+           with respect to the 'full' output.
+    method : str {'auto', 'direct', 'fft'}, optional
+        A string indicating which method to use to calculate the correlation.
+
+        ``direct``
+           The correlation is determined directly from sums, the definition of
+           correlation.
+        ``fft``
+           The Fast Fourier Transform is used to perform the correlation more
+           quickly (only available for numerical arrays.)
+        ``auto``
+           Automatically chooses direct or Fourier method based on an estimate
+           of which is faster (default).  See `convolve` Notes for more detail.
+
+           .. versionadded:: 0.19.0
+
+    Returns
+    -------
+    correlate : array
+        An N-dimensional array containing a subset of the discrete linear
+        cross-correlation of `in1` with `in2`.
+
+    See Also
+    --------
+    choose_conv_method : contains more documentation on `method`.
+    correlation_lags : calculates the lag / displacement indices array for 1D
+        cross-correlation.
+
+    Notes
+    -----
+    The correlation z of two d-dimensional arrays x and y is defined as::
+
+        z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
+
+    This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
+    then
+
+    .. math::
+
+          z[k] = (x * y)(k - N + 1)
+               = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
+
+    for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
+
+    where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
+    and :math:`y_m` is 0 when m is outside the range of y.
+
+    ``method='fft'`` only works for numerical arrays as it relies on
+    `fftconvolve`. In certain cases (i.e., arrays of objects or when
+    rounding integers can lose precision), ``method='direct'`` is always used.
+
+    When using "same" mode with even-length inputs, the outputs of `correlate`
+    and `correlate2d` differ: There is a 1-index offset between them.
+
+    Examples
+    --------
+    Implement a matched filter using cross-correlation, to recover a signal
+    that has passed through a noisy channel.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
+    >>> sig_noise = sig + rng.standard_normal(len(sig))
+    >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
+
+    >>> clock = np.arange(64, len(sig), 128)
+    >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
+    >>> ax_orig.plot(sig)
+    >>> ax_orig.plot(clock, sig[clock], 'ro')
+    >>> ax_orig.set_title('Original signal')
+    >>> ax_noise.plot(sig_noise)
+    >>> ax_noise.set_title('Signal with noise')
+    >>> ax_corr.plot(corr)
+    >>> ax_corr.plot(clock, corr[clock], 'ro')
+    >>> ax_corr.axhline(0.5, ls=':')
+    >>> ax_corr.set_title('Cross-correlated with rectangular pulse')
+    >>> ax_orig.margins(0, 0.1)
+    >>> fig.tight_layout()
+    >>> plt.show()
+
+    Compute the cross-correlation of a noisy signal with the original signal.
+
+    >>> x = np.arange(128) / 128
+    >>> sig = np.sin(2 * np.pi * x)
+    >>> sig_noise = sig + rng.standard_normal(len(sig))
+    >>> corr = signal.correlate(sig_noise, sig)
+    >>> lags = signal.correlation_lags(len(sig), len(sig_noise))
+    >>> corr /= np.max(corr)
+
+    >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8))
+    >>> ax_orig.plot(sig)
+    >>> ax_orig.set_title('Original signal')
+    >>> ax_orig.set_xlabel('Sample Number')
+    >>> ax_noise.plot(sig_noise)
+    >>> ax_noise.set_title('Signal with noise')
+    >>> ax_noise.set_xlabel('Sample Number')
+    >>> ax_corr.plot(lags, corr)
+    >>> ax_corr.set_title('Cross-correlated signal')
+    >>> ax_corr.set_xlabel('Lag')
+    >>> ax_orig.margins(0, 0.1)
+    >>> ax_noise.margins(0, 0.1)
+    >>> ax_corr.margins(0, 0.1)
+    >>> fig.tight_layout()
+    >>> plt.show()
+
+    """
+    in1 = np.asarray(in1)
+    in2 = np.asarray(in2)
+
+    if in1.ndim == in2.ndim == 0:
+        return in1 * in2.conj()
+    elif in1.ndim != in2.ndim:
+        raise ValueError("in1 and in2 should have the same dimensionality")
+
+    # Don't use _valfrommode, since correlate should not accept numeric modes
+    try:
+        val = _modedict[mode]
+    except KeyError as e:
+        raise ValueError("Acceptable mode flags are 'valid',"
+                         " 'same', or 'full'.") from e
+
+    # this either calls fftconvolve or this function with method=='direct'
+    if method in ('fft', 'auto'):
+        return convolve(in1, _reverse_and_conj(in2), mode, method)
+
+    elif method == 'direct':
+        # fastpath to faster numpy.correlate for 1d inputs when possible
+        if _np_conv_ok(in1, in2, mode):
+            return np.correlate(in1, in2, mode)
+
+        # _correlateND is far slower when in2.size > in1.size, so swap them
+        # and then undo the effect afterward if mode == 'full'.  Also, it fails
+        # with 'valid' mode if in2 is larger than in1, so swap those, too.
+        # Don't swap inputs for 'same' mode, since shape of in1 matters.
+        swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
+                          _inputs_swap_needed(mode, in1.shape, in2.shape))
+
+        if swapped_inputs:
+            in1, in2 = in2, in1
+
+        if mode == 'valid':
+            ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
+            out = np.empty(ps, in1.dtype)
+
+            z = _sigtools._correlateND(in1, in2, out, val)
+
+        else:
+            ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
+
+            # zero pad input
+            in1zpadded = np.zeros(ps, in1.dtype)
+            sc = tuple(slice(0, i) for i in in1.shape)
+            in1zpadded[sc] = in1.copy()
+
+            if mode == 'full':
+                out = np.empty(ps, in1.dtype)
+            elif mode == 'same':
+                out = np.empty(in1.shape, in1.dtype)
+
+            z = _sigtools._correlateND(in1zpadded, in2, out, val)
+
+        if swapped_inputs:
+            # Reverse and conjugate to undo the effect of swapping inputs
+            z = _reverse_and_conj(z)
+
+        return z
+
+    else:
+        raise ValueError("Acceptable method flags are 'auto',"
+                         " 'direct', or 'fft'.")
+
+
+def correlation_lags(in1_len, in2_len, mode='full'):
+    r"""
+    Calculates the lag / displacement indices array for 1D cross-correlation.
+
+    Parameters
+    ----------
+    in1_len : int
+        First input size.
+    in2_len : int
+        Second input size.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output.
+        See the documentation `correlate` for more information.
+
+    Returns
+    -------
+    lags : array
+        Returns an array containing cross-correlation lag/displacement indices.
+        Indices can be indexed with the np.argmax of the correlation to return
+        the lag/displacement.
+
+    See Also
+    --------
+    correlate : Compute the N-dimensional cross-correlation.
+
+    Notes
+    -----
+    Cross-correlation for continuous functions :math:`f` and :math:`g` is
+    defined as:
+
+    .. math::
+
+        \left ( f\star g \right )\left ( \tau \right )
+        \triangleq \int_{t_0}^{t_0 +T}
+        \overline{f\left ( t \right )}g\left ( t+\tau \right )dt
+
+    Where :math:`\tau` is defined as the displacement, also known as the lag.
+
+    Cross correlation for discrete functions :math:`f` and :math:`g` is
+    defined as:
+
+    .. math::
+        \left ( f\star g \right )\left [ n \right ]
+        \triangleq \sum_{-\infty}^{\infty}
+        \overline{f\left [ m \right ]}g\left [ m+n \right ]
+
+    Where :math:`n` is the lag.
+
+    Examples
+    --------
+    Cross-correlation of a signal with its time-delayed self.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> rng = np.random.default_rng()
+    >>> x = rng.standard_normal(1000)
+    >>> y = np.concatenate([rng.standard_normal(100), x])
+    >>> correlation = signal.correlate(x, y, mode="full")
+    >>> lags = signal.correlation_lags(x.size, y.size, mode="full")
+    >>> lag = lags[np.argmax(correlation)]
+    """
+
+    # calculate lag ranges in different modes of operation
+    if mode == "full":
+        # the output is the full discrete linear convolution
+        # of the inputs. (Default)
+        lags = np.arange(-in2_len + 1, in1_len)
+    elif mode == "same":
+        # the output is the same size as `in1`, centered
+        # with respect to the 'full' output.
+        # calculate the full output
+        lags = np.arange(-in2_len + 1, in1_len)
+        # determine the midpoint in the full output
+        mid = lags.size // 2
+        # determine lag_bound to be used with respect
+        # to the midpoint
+        lag_bound = in1_len // 2
+        # calculate lag ranges for even and odd scenarios
+        if in1_len % 2 == 0:
+            lags = lags[(mid-lag_bound):(mid+lag_bound)]
+        else:
+            lags = lags[(mid-lag_bound):(mid+lag_bound)+1]
+    elif mode == "valid":
+        # the output consists only of those elements that do not
+        # rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
+        # must be at least as large as the other in every dimension.
+
+        # the lag_bound will be either negative or positive
+        # this let's us infer how to present the lag range
+        lag_bound = in1_len - in2_len
+        if lag_bound >= 0:
+            lags = np.arange(lag_bound + 1)
+        else:
+            lags = np.arange(lag_bound, 1)
+    return lags
+
+
+def _centered(arr, newshape):
+    # Return the center newshape portion of the array.
+    newshape = np.asarray(newshape)
+    currshape = np.array(arr.shape)
+    startind = (currshape - newshape) // 2
+    endind = startind + newshape
+    myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
+    return arr[tuple(myslice)]
+
+
+def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):
+    """Handle the axes argument for frequency-domain convolution.
+
+    Returns the inputs and axes in a standard form, eliminating redundant axes,
+    swapping the inputs if necessary, and checking for various potential
+    errors.
+
+    Parameters
+    ----------
+    in1 : array
+        First input.
+    in2 : array
+        Second input.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output.
+        See the documentation `fftconvolve` for more information.
+    axes : list of ints
+        Axes over which to compute the FFTs.
+    sorted_axes : bool, optional
+        If `True`, sort the axes.
+        Default is `False`, do not sort.
+
+    Returns
+    -------
+    in1 : array
+        The first input, possible swapped with the second input.
+    in2 : array
+        The second input, possible swapped with the first input.
+    axes : list of ints
+        Axes over which to compute the FFTs.
+
+    """
+    s1 = in1.shape
+    s2 = in2.shape
+    noaxes = axes is None
+
+    _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes)
+
+    if not noaxes and not len(axes):
+        raise ValueError("when provided, axes cannot be empty")
+
+    # Axes of length 1 can rely on broadcasting rules for multipy,
+    # no fft needed.
+    axes = [a for a in axes if s1[a] != 1 and s2[a] != 1]
+
+    if sorted_axes:
+        axes.sort()
+
+    if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1
+               for a in range(in1.ndim) if a not in axes):
+        raise ValueError("incompatible shapes for in1 and in2:"
+                         " {0} and {1}".format(s1, s2))
+
+    # Check that input sizes are compatible with 'valid' mode.
+    if _inputs_swap_needed(mode, s1, s2, axes=axes):
+        # Convolution is commutative; order doesn't have any effect on output.
+        in1, in2 = in2, in1
+
+    return in1, in2, axes
+
+
+def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):
+    """Convolve two arrays in the frequency domain.
+
+    This function implements only base the FFT-related operations.
+    Specifically, it converts the signals to the frequency domain, multiplies
+    them, then converts them back to the time domain.  Calculations of axes,
+    shapes, convolution mode, etc. are implemented in higher level-functions,
+    such as `fftconvolve` and `oaconvolve`.  Those functions should be used
+    instead of this one.
+
+    Parameters
+    ----------
+    in1 : array_like
+        First input.
+    in2 : array_like
+        Second input. Should have the same number of dimensions as `in1`.
+    axes : array_like of ints
+        Axes over which to compute the FFTs.
+    shape : array_like of ints
+        The sizes of the FFTs.
+    calc_fast_len : bool, optional
+        If `True`, set each value of `shape` to the next fast FFT length.
+        Default is `False`, use `axes` as-is.
+
+    Returns
+    -------
+    out : array
+        An N-dimensional array containing the discrete linear convolution of
+        `in1` with `in2`.
+
+    """
+    if not len(axes):
+        return in1 * in2
+
+    complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c')
+
+    if calc_fast_len:
+        # Speed up FFT by padding to optimal size.
+        fshape = [
+            sp_fft.next_fast_len(shape[a], not complex_result) for a in axes]
+    else:
+        fshape = shape
+
+    if not complex_result:
+        fft, ifft = sp_fft.rfftn, sp_fft.irfftn
+    else:
+        fft, ifft = sp_fft.fftn, sp_fft.ifftn
+
+    sp1 = fft(in1, fshape, axes=axes)
+    sp2 = fft(in2, fshape, axes=axes)
+
+    ret = ifft(sp1 * sp2, fshape, axes=axes)
+
+    if calc_fast_len:
+        fslice = tuple([slice(sz) for sz in shape])
+        ret = ret[fslice]
+
+    return ret
+
+
+def _apply_conv_mode(ret, s1, s2, mode, axes):
+    """Calculate the convolution result shape based on the `mode` argument.
+
+    Returns the result sliced to the correct size for the given mode.
+
+    Parameters
+    ----------
+    ret : array
+        The result array, with the appropriate shape for the 'full' mode.
+    s1 : list of int
+        The shape of the first input.
+    s2 : list of int
+        The shape of the second input.
+    mode : str {'full', 'valid', 'same'}
+        A string indicating the size of the output.
+        See the documentation `fftconvolve` for more information.
+    axes : list of ints
+        Axes over which to compute the convolution.
+
+    Returns
+    -------
+    ret : array
+        A copy of `res`, sliced to the correct size for the given `mode`.
+
+    """
+    if mode == "full":
+        return ret.copy()
+    elif mode == "same":
+        return _centered(ret, s1).copy()
+    elif mode == "valid":
+        shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1
+                       for a in range(ret.ndim)]
+        return _centered(ret, shape_valid).copy()
+    else:
+        raise ValueError("acceptable mode flags are 'valid',"
+                         " 'same', or 'full'")
+
+
+def fftconvolve(in1, in2, mode="full", axes=None):
+    """Convolve two N-dimensional arrays using FFT.
+
+    Convolve `in1` and `in2` using the fast Fourier transform method, with
+    the output size determined by the `mode` argument.
+
+    This is generally much faster than `convolve` for large arrays (n > ~500),
+    but can be slower when only a few output values are needed, and can only
+    output float arrays (int or object array inputs will be cast to float).
+
+    As of v0.19, `convolve` automatically chooses this method or the direct
+    method based on an estimation of which is faster.
+
+    Parameters
+    ----------
+    in1 : array_like
+        First input.
+    in2 : array_like
+        Second input. Should have the same number of dimensions as `in1`.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output:
+
+        ``full``
+           The output is the full discrete linear convolution
+           of the inputs. (Default)
+        ``valid``
+           The output consists only of those elements that do not
+           rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
+           must be at least as large as the other in every dimension.
+        ``same``
+           The output is the same size as `in1`, centered
+           with respect to the 'full' output.
+    axes : int or array_like of ints or None, optional
+        Axes over which to compute the convolution.
+        The default is over all axes.
+
+    Returns
+    -------
+    out : array
+        An N-dimensional array containing a subset of the discrete linear
+        convolution of `in1` with `in2`.
+
+    See Also
+    --------
+    convolve : Uses the direct convolution or FFT convolution algorithm
+               depending on which is faster.
+    oaconvolve : Uses the overlap-add method to do convolution, which is
+                 generally faster when the input arrays are large and
+                 significantly different in size.
+
+    Examples
+    --------
+    Autocorrelation of white noise is an impulse.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> rng = np.random.default_rng()
+    >>> sig = rng.standard_normal(1000)
+    >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
+    >>> ax_orig.plot(sig)
+    >>> ax_orig.set_title('White noise')
+    >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
+    >>> ax_mag.set_title('Autocorrelation')
+    >>> fig.tight_layout()
+    >>> fig.show()
+
+    Gaussian blur implemented using FFT convolution.  Notice the dark borders
+    around the image, due to the zero-padding beyond its boundaries.
+    The `convolve2d` function allows for other types of image boundaries,
+    but is far slower.
+
+    >>> from scipy import datasets
+    >>> face = datasets.face(gray=True)
+    >>> kernel = np.outer(signal.windows.gaussian(70, 8),
+    ...                   signal.windows.gaussian(70, 8))
+    >>> blurred = signal.fftconvolve(face, kernel, mode='same')
+
+    >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
+    ...                                                      figsize=(6, 15))
+    >>> ax_orig.imshow(face, cmap='gray')
+    >>> ax_orig.set_title('Original')
+    >>> ax_orig.set_axis_off()
+    >>> ax_kernel.imshow(kernel, cmap='gray')
+    >>> ax_kernel.set_title('Gaussian kernel')
+    >>> ax_kernel.set_axis_off()
+    >>> ax_blurred.imshow(blurred, cmap='gray')
+    >>> ax_blurred.set_title('Blurred')
+    >>> ax_blurred.set_axis_off()
+    >>> fig.show()
+
+    """
+    in1 = np.asarray(in1)
+    in2 = np.asarray(in2)
+
+    if in1.ndim == in2.ndim == 0:  # scalar inputs
+        return in1 * in2
+    elif in1.ndim != in2.ndim:
+        raise ValueError("in1 and in2 should have the same dimensionality")
+    elif in1.size == 0 or in2.size == 0:  # empty arrays
+        return np.array([])
+
+    in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
+                                          sorted_axes=False)
+
+    s1 = in1.shape
+    s2 = in2.shape
+
+    shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1
+             for i in range(in1.ndim)]
+
+    ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True)
+
+    return _apply_conv_mode(ret, s1, s2, mode, axes)
+
+
+def _calc_oa_lens(s1, s2):
+    """Calculate the optimal FFT lengths for overlapp-add convolution.
+
+    The calculation is done for a single dimension.
+
+    Parameters
+    ----------
+    s1 : int
+        Size of the dimension for the first array.
+    s2 : int
+        Size of the dimension for the second array.
+
+    Returns
+    -------
+    block_size : int
+        The size of the FFT blocks.
+    overlap : int
+        The amount of overlap between two blocks.
+    in1_step : int
+        The size of each step for the first array.
+    in2_step : int
+        The size of each step for the first array.
+
+    """
+    # Set up the arguments for the conventional FFT approach.
+    fallback = (s1+s2-1, None, s1, s2)
+
+    # Use conventional FFT convolve if sizes are same.
+    if s1 == s2 or s1 == 1 or s2 == 1:
+        return fallback
+
+    if s2 > s1:
+        s1, s2 = s2, s1
+        swapped = True
+    else:
+        swapped = False
+
+    # There cannot be a useful block size if s2 is more than half of s1.
+    if s2 >= s1/2:
+        return fallback
+
+    # Derivation of optimal block length
+    # For original formula see:
+    # https://en.wikipedia.org/wiki/Overlap-add_method
+    #
+    # Formula:
+    # K = overlap = s2-1
+    # N = block_size
+    # C = complexity
+    # e = exponential, exp(1)
+    #
+    # C = (N*(log2(N)+1))/(N-K)
+    # C = (N*log2(2N))/(N-K)
+    # C = N/(N-K) * log2(2N)
+    # C1 = N/(N-K)
+    # C2 = log2(2N) = ln(2N)/ln(2)
+    #
+    # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2
+    # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2))
+    #
+    # dC/dN = dC1/dN*C2 + dC2/dN*C1
+    # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K))
+    # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K))
+    # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2)
+    # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2)
+    # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
+    #
+    # Solve for minimum, where dC/dN = 0
+    # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2)
+    # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K
+    # 0 = N - K*ln(2N) - K
+    # 0 = N - K*(ln(2N) + 1)
+    # 0 = N - K*ln(2Ne)
+    # N = K*ln(2Ne)
+    # N/K = ln(2Ne)
+    #
+    # e^(N/K) = e^ln(2Ne)
+    # e^(N/K) = 2Ne
+    # 1/e^(N/K) = 1/(2*N*e)
+    # e^(N/-K) = 1/(2*N*e)
+    # e^(N/-K) = K/N*1/(2*K*e)
+    # N/K*e^(N/-K) = 1/(2*e*K)
+    # N/-K*e^(N/-K) = -1/(2*e*K)
+    #
+    # Using Lambert W function
+    # https://en.wikipedia.org/wiki/Lambert_W_function
+    # x = W(y) It is the solution to y = x*e^x
+    # x = N/-K
+    # y = -1/(2*e*K)
+    #
+    # N/-K = W(-1/(2*e*K))
+    #
+    # N = -K*W(-1/(2*e*K))
+    overlap = s2-1
+    opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real
+    block_size = sp_fft.next_fast_len(math.ceil(opt_size))
+
+    # Use conventional FFT convolve if there is only going to be one block.
+    if block_size >= s1:
+        return fallback
+
+    if not swapped:
+        in1_step = block_size-s2+1
+        in2_step = s2
+    else:
+        in1_step = s2
+        in2_step = block_size-s2+1
+
+    return block_size, overlap, in1_step, in2_step
+
+
+def oaconvolve(in1, in2, mode="full", axes=None):
+    """Convolve two N-dimensional arrays using the overlap-add method.
+
+    Convolve `in1` and `in2` using the overlap-add method, with
+    the output size determined by the `mode` argument.
+
+    This is generally much faster than `convolve` for large arrays (n > ~500),
+    and generally much faster than `fftconvolve` when one array is much
+    larger than the other, but can be slower when only a few output values are
+    needed or when the arrays are very similar in shape, and can only
+    output float arrays (int or object array inputs will be cast to float).
+
+    Parameters
+    ----------
+    in1 : array_like
+        First input.
+    in2 : array_like
+        Second input. Should have the same number of dimensions as `in1`.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output:
+
+        ``full``
+           The output is the full discrete linear convolution
+           of the inputs. (Default)
+        ``valid``
+           The output consists only of those elements that do not
+           rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
+           must be at least as large as the other in every dimension.
+        ``same``
+           The output is the same size as `in1`, centered
+           with respect to the 'full' output.
+    axes : int or array_like of ints or None, optional
+        Axes over which to compute the convolution.
+        The default is over all axes.
+
+    Returns
+    -------
+    out : array
+        An N-dimensional array containing a subset of the discrete linear
+        convolution of `in1` with `in2`.
+
+    See Also
+    --------
+    convolve : Uses the direct convolution or FFT convolution algorithm
+               depending on which is faster.
+    fftconvolve : An implementation of convolution using FFT.
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    References
+    ----------
+    .. [1] Wikipedia, "Overlap-add_method".
+           https://en.wikipedia.org/wiki/Overlap-add_method
+    .. [2] Richard G. Lyons. Understanding Digital Signal Processing,
+           Third Edition, 2011. Chapter 13.10.
+           ISBN 13: 978-0137-02741-5
+
+    Examples
+    --------
+    Convolve a 100,000 sample signal with a 512-sample filter.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> rng = np.random.default_rng()
+    >>> sig = rng.standard_normal(100000)
+    >>> filt = signal.firwin(512, 0.01)
+    >>> fsig = signal.oaconvolve(sig, filt)
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
+    >>> ax_orig.plot(sig)
+    >>> ax_orig.set_title('White noise')
+    >>> ax_mag.plot(fsig)
+    >>> ax_mag.set_title('Filtered noise')
+    >>> fig.tight_layout()
+    >>> fig.show()
+
+    """
+    in1 = np.asarray(in1)
+    in2 = np.asarray(in2)
+
+    if in1.ndim == in2.ndim == 0:  # scalar inputs
+        return in1 * in2
+    elif in1.ndim != in2.ndim:
+        raise ValueError("in1 and in2 should have the same dimensionality")
+    elif in1.size == 0 or in2.size == 0:  # empty arrays
+        return np.array([])
+    elif in1.shape == in2.shape:  # Equivalent to fftconvolve
+        return fftconvolve(in1, in2, mode=mode, axes=axes)
+
+    in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes,
+                                          sorted_axes=True)
+
+    s1 = in1.shape
+    s2 = in2.shape
+
+    if not axes:
+        ret = in1 * in2
+        return _apply_conv_mode(ret, s1, s2, mode, axes)
+
+    # Calculate this now since in1 is changed later
+    shape_final = [None if i not in axes else
+                   s1[i] + s2[i] - 1 for i in range(in1.ndim)]
+
+    # Calculate the block sizes for the output, steps, first and second inputs.
+    # It is simpler to calculate them all together than doing them in separate
+    # loops due to all the special cases that need to be handled.
+    optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else
+                     _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim))
+    block_size, overlaps, \
+        in1_step, in2_step = zip(*optimal_sizes)
+
+    # Fall back to fftconvolve if there is only one block in every dimension.
+    if in1_step == s1 and in2_step == s2:
+        return fftconvolve(in1, in2, mode=mode, axes=axes)
+
+    # Figure out the number of steps and padding.
+    # This would get too complicated in a list comprehension.
+    nsteps1 = []
+    nsteps2 = []
+    pad_size1 = []
+    pad_size2 = []
+    for i in range(in1.ndim):
+        if i not in axes:
+            pad_size1 += [(0, 0)]
+            pad_size2 += [(0, 0)]
+            continue
+
+        if s1[i] > in1_step[i]:
+            curnstep1 = math.ceil((s1[i]+1)/in1_step[i])
+            if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]:
+                curnstep1 += 1
+
+            curpad1 = curnstep1*in1_step[i] - s1[i]
+        else:
+            curnstep1 = 1
+            curpad1 = 0
+
+        if s2[i] > in2_step[i]:
+            curnstep2 = math.ceil((s2[i]+1)/in2_step[i])
+            if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]:
+                curnstep2 += 1
+
+            curpad2 = curnstep2*in2_step[i] - s2[i]
+        else:
+            curnstep2 = 1
+            curpad2 = 0
+
+        nsteps1 += [curnstep1]
+        nsteps2 += [curnstep2]
+        pad_size1 += [(0, curpad1)]
+        pad_size2 += [(0, curpad2)]
+
+    # Pad the array to a size that can be reshaped to the desired shape
+    # if necessary.
+    if not all(curpad == (0, 0) for curpad in pad_size1):
+        in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0)
+
+    if not all(curpad == (0, 0) for curpad in pad_size2):
+        in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0)
+
+    # Reshape the overlap-add parts to input block sizes.
+    split_axes = [iax+i for i, iax in enumerate(axes)]
+    fft_axes = [iax+1 for iax in split_axes]
+
+    # We need to put each new dimension before the corresponding dimension
+    # being reshaped in order to get the data in the right layout at the end.
+    reshape_size1 = list(in1_step)
+    reshape_size2 = list(in2_step)
+    for i, iax in enumerate(split_axes):
+        reshape_size1.insert(iax, nsteps1[i])
+        reshape_size2.insert(iax, nsteps2[i])
+
+    in1 = in1.reshape(*reshape_size1)
+    in2 = in2.reshape(*reshape_size2)
+
+    # Do the convolution.
+    fft_shape = [block_size[i] for i in axes]
+    ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False)
+
+    # Do the overlap-add.
+    for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes):
+        overlap = overlaps[ax]
+        if overlap is None:
+            continue
+
+        ret, overpart = np.split(ret, [-overlap], ax_fft)
+        overpart = np.split(overpart, [-1], ax_split)[0]
+
+        ret_overpart = np.split(ret, [overlap], ax_fft)[0]
+        ret_overpart = np.split(ret_overpart, [1], ax_split)[1]
+        ret_overpart += overpart
+
+    # Reshape back to the correct dimensionality.
+    shape_ret = [ret.shape[i] if i not in fft_axes else
+                 ret.shape[i]*ret.shape[i-1]
+                 for i in range(ret.ndim) if i not in split_axes]
+    ret = ret.reshape(*shape_ret)
+
+    # Slice to the correct size.
+    slice_final = tuple([slice(islice) for islice in shape_final])
+    ret = ret[slice_final]
+
+    return _apply_conv_mode(ret, s1, s2, mode, axes)
+
+
+def _numeric_arrays(arrays, kinds='buifc'):
+    """
+    See if a list of arrays are all numeric.
+
+    Parameters
+    ----------
+    arrays : array or list of arrays
+        arrays to check if numeric.
+    kinds : string-like
+        The dtypes of the arrays to be checked. If the dtype.kind of
+        the ndarrays are not in this string the function returns False and
+        otherwise returns True.
+    """
+    if type(arrays) == np.ndarray:
+        return arrays.dtype.kind in kinds
+    for array_ in arrays:
+        if array_.dtype.kind not in kinds:
+            return False
+    return True
+
+
+def _conv_ops(x_shape, h_shape, mode):
+    """
+    Find the number of operations required for direct/fft methods of
+    convolution. The direct operations were recorded by making a dummy class to
+    record the number of operations by overriding ``__mul__`` and ``__add__``.
+    The FFT operations rely on the (well-known) computational complexity of the
+    FFT (and the implementation of ``_freq_domain_conv``).
+
+    """
+    if mode == "full":
+        out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
+    elif mode == "valid":
+        out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)]
+    elif mode == "same":
+        out_shape = x_shape
+    else:
+        raise ValueError("Acceptable mode flags are 'valid',"
+                         " 'same', or 'full', not mode={}".format(mode))
+
+    s1, s2 = x_shape, h_shape
+    if len(x_shape) == 1:
+        s1, s2 = s1[0], s2[0]
+        if mode == "full":
+            direct_ops = s1 * s2
+        elif mode == "valid":
+            direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2
+        elif mode == "same":
+            direct_ops = (s1 * s2 if s1 < s2 else
+                          s1 * s2 - (s2 // 2) * ((s2 + 1) // 2))
+    else:
+        if mode == "full":
+            direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
+        elif mode == "valid":
+            direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
+        elif mode == "same":
+            direct_ops = _prod(s1) * _prod(s2)
+
+    full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
+    N = _prod(full_out_shape)
+    fft_ops = 3 * N * np.log(N)  # 3 separate FFTs of size full_out_shape
+    return fft_ops, direct_ops
+
+
+def _fftconv_faster(x, h, mode):
+    """
+    See if using fftconvolve or convolve is faster.
+
+    Parameters
+    ----------
+    x : np.ndarray
+        Signal
+    h : np.ndarray
+        Kernel
+    mode : str
+        Mode passed to convolve
+
+    Returns
+    -------
+    fft_faster : bool
+
+    Notes
+    -----
+    See docstring of `choose_conv_method` for details on tuning hardware.
+
+    See pull request 11031 for more detail:
+    https://github.com/scipy/scipy/pull/11031.
+
+    """
+    fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)
+    offset = -1e-3 if x.ndim == 1 else -1e-4
+    constants = {
+            "valid": (1.89095737e-9, 2.1364985e-10, offset),
+            "full": (1.7649070e-9, 2.1414831e-10, offset),
+            "same": (3.2646654e-9, 2.8478277e-10, offset)
+            if h.size <= x.size
+            else (3.21635404e-9, 1.1773253e-8, -1e-5),
+    } if x.ndim == 1 else {
+            "valid": (1.85927e-9, 2.11242e-8, offset),
+            "full": (1.99817e-9, 1.66174e-8, offset),
+            "same": (2.04735e-9, 1.55367e-8, offset),
+    }
+    O_fft, O_direct, O_offset = constants[mode]
+    return O_fft * fft_ops < O_direct * direct_ops + O_offset
+
+
+def _reverse_and_conj(x):
+    """
+    Reverse array `x` in all dimensions and perform the complex conjugate
+    """
+    reverse = (slice(None, None, -1),) * x.ndim
+    return x[reverse].conj()
+
+
+def _np_conv_ok(volume, kernel, mode):
+    """
+    See if numpy supports convolution of `volume` and `kernel` (i.e. both are
+    1D ndarrays and of the appropriate shape).  NumPy's 'same' mode uses the
+    size of the larger input, while SciPy's uses the size of the first input.
+
+    Invalid mode strings will return False and be caught by the calling func.
+    """
+    if volume.ndim == kernel.ndim == 1:
+        if mode in ('full', 'valid'):
+            return True
+        elif mode == 'same':
+            return volume.size >= kernel.size
+    else:
+        return False
+
+
+def _timeit_fast(stmt="pass", setup="pass", repeat=3):
+    """
+    Returns the time the statement/function took, in seconds.
+
+    Faster, less precise version of IPython's timeit. `stmt` can be a statement
+    written as a string or a callable.
+
+    Will do only 1 loop (like IPython's timeit) with no repetitions
+    (unlike IPython) for very slow functions.  For fast functions, only does
+    enough loops to take 5 ms, which seems to produce similar results (on
+    Windows at least), and avoids doing an extraneous cycle that isn't
+    measured.
+
+    """
+    timer = timeit.Timer(stmt, setup)
+
+    # determine number of calls per rep so total time for 1 rep >= 5 ms
+    x = 0
+    for p in range(0, 10):
+        number = 10**p
+        x = timer.timeit(number)  # seconds
+        if x >= 5e-3 / 10:  # 5 ms for final test, 1/10th that for this one
+            break
+    if x > 1:  # second
+        # If it's macroscopic, don't bother with repetitions
+        best = x
+    else:
+        number *= 10
+        r = timer.repeat(repeat, number)
+        best = min(r)
+
+    sec = best / number
+    return sec
+
+
+def choose_conv_method(in1, in2, mode='full', measure=False):
+    """
+    Find the fastest convolution/correlation method.
+
+    This primarily exists to be called during the ``method='auto'`` option in
+    `convolve` and `correlate`. It can also be used to determine the value of
+    ``method`` for many different convolutions of the same dtype/shape.
+    In addition, it supports timing the convolution to adapt the value of
+    ``method`` to a particular set of inputs and/or hardware.
+
+    Parameters
+    ----------
+    in1 : array_like
+        The first argument passed into the convolution function.
+    in2 : array_like
+        The second argument passed into the convolution function.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output:
+
+        ``full``
+           The output is the full discrete linear convolution
+           of the inputs. (Default)
+        ``valid``
+           The output consists only of those elements that do not
+           rely on the zero-padding.
+        ``same``
+           The output is the same size as `in1`, centered
+           with respect to the 'full' output.
+    measure : bool, optional
+        If True, run and time the convolution of `in1` and `in2` with both
+        methods and return the fastest. If False (default), predict the fastest
+        method using precomputed values.
+
+    Returns
+    -------
+    method : str
+        A string indicating which convolution method is fastest, either
+        'direct' or 'fft'
+    times : dict, optional
+        A dictionary containing the times (in seconds) needed for each method.
+        This value is only returned if ``measure=True``.
+
+    See Also
+    --------
+    convolve
+    correlate
+
+    Notes
+    -----
+    Generally, this method is 99% accurate for 2D signals and 85% accurate
+    for 1D signals for randomly chosen input sizes. For precision, use
+    ``measure=True`` to find the fastest method by timing the convolution.
+    This can be used to avoid the minimal overhead of finding the fastest
+    ``method`` later, or to adapt the value of ``method`` to a particular set
+    of inputs.
+
+    Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this
+    function. These experiments measured the ratio between the time required
+    when using ``method='auto'`` and the time required for the fastest method
+    (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these
+    experiments, we found:
+
+    * There is a 95% chance of this ratio being less than 1.5 for 1D signals
+      and a 99% chance of being less than 2.5 for 2D signals.
+    * The ratio was always less than 2.5/5 for 1D/2D signals respectively.
+    * This function is most inaccurate for 1D convolutions that take between 1
+      and 10 milliseconds with ``method='direct'``. A good proxy for this
+      (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``.
+
+    The 2D results almost certainly generalize to 3D/4D/etc because the
+    implementation is the same (the 1D implementation is different).
+
+    All the numbers above are specific to the EC2 machine. However, we did find
+    that this function generalizes fairly decently across hardware. The speed
+    tests were of similar quality (and even slightly better) than the same
+    tests performed on the machine to tune this function's numbers (a mid-2014
+    15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor).
+
+    There are cases when `fftconvolve` supports the inputs but this function
+    returns `direct` (e.g., to protect against floating point integer
+    precision).
+
+    .. versionadded:: 0.19
+
+    Examples
+    --------
+    Estimate the fastest method for a given input:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> rng = np.random.default_rng()
+    >>> img = rng.random((32, 32))
+    >>> filter = rng.random((8, 8))
+    >>> method = signal.choose_conv_method(img, filter, mode='same')
+    >>> method
+    'fft'
+
+    This can then be applied to other arrays of the same dtype and shape:
+
+    >>> img2 = rng.random((32, 32))
+    >>> filter2 = rng.random((8, 8))
+    >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method)
+    >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method)
+
+    The output of this function (``method``) works with `correlate` and
+    `convolve`.
+
+    """
+    volume = np.asarray(in1)
+    kernel = np.asarray(in2)
+
+    if measure:
+        times = {}
+        for method in ['fft', 'direct']:
+            times[method] = _timeit_fast(lambda: convolve(volume, kernel,
+                                         mode=mode, method=method))
+
+        chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
+        return chosen_method, times
+
+    # for integer input,
+    # catch when more precision required than float provides (representing an
+    # integer as float can lose precision in fftconvolve if larger than 2**52)
+    if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
+        max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
+        max_value *= int(min(volume.size, kernel.size))
+        if max_value > 2**np.finfo('float').nmant - 1:
+            return 'direct'
+
+    if _numeric_arrays([volume, kernel], kinds='b'):
+        return 'direct'
+
+    if _numeric_arrays([volume, kernel]):
+        if _fftconv_faster(volume, kernel, mode):
+            return 'fft'
+
+    return 'direct'
+
+
+def convolve(in1, in2, mode='full', method='auto'):
+    """
+    Convolve two N-dimensional arrays.
+
+    Convolve `in1` and `in2`, with the output size determined by the
+    `mode` argument.
+
+    Parameters
+    ----------
+    in1 : array_like
+        First input.
+    in2 : array_like
+        Second input. Should have the same number of dimensions as `in1`.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output:
+
+        ``full``
+           The output is the full discrete linear convolution
+           of the inputs. (Default)
+        ``valid``
+           The output consists only of those elements that do not
+           rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
+           must be at least as large as the other in every dimension.
+        ``same``
+           The output is the same size as `in1`, centered
+           with respect to the 'full' output.
+    method : str {'auto', 'direct', 'fft'}, optional
+        A string indicating which method to use to calculate the convolution.
+
+        ``direct``
+           The convolution is determined directly from sums, the definition of
+           convolution.
+        ``fft``
+           The Fourier Transform is used to perform the convolution by calling
+           `fftconvolve`.
+        ``auto``
+           Automatically chooses direct or Fourier method based on an estimate
+           of which is faster (default).  See Notes for more detail.
+
+           .. versionadded:: 0.19.0
+
+    Returns
+    -------
+    convolve : array
+        An N-dimensional array containing a subset of the discrete linear
+        convolution of `in1` with `in2`.
+
+    Warns
+    -----
+    RuntimeWarning
+        Use of the FFT convolution on input containing NAN or INF will lead
+        to the entire output being NAN or INF. Use method='direct' when your
+        input contains NAN or INF values.
+
+    See Also
+    --------
+    numpy.polymul : performs polynomial multiplication (same operation, but
+                    also accepts poly1d objects)
+    choose_conv_method : chooses the fastest appropriate convolution method
+    fftconvolve : Always uses the FFT method.
+    oaconvolve : Uses the overlap-add method to do convolution, which is
+                 generally faster when the input arrays are large and
+                 significantly different in size.
+
+    Notes
+    -----
+    By default, `convolve` and `correlate` use ``method='auto'``, which calls
+    `choose_conv_method` to choose the fastest method using pre-computed
+    values (`choose_conv_method` can also measure real-world timing with a
+    keyword argument). Because `fftconvolve` relies on floating point numbers,
+    there are certain constraints that may force `method=direct` (more detail
+    in `choose_conv_method` docstring).
+
+    Examples
+    --------
+    Smooth a square pulse using a Hann window:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> sig = np.repeat([0., 1., 0.], 100)
+    >>> win = signal.windows.hann(50)
+    >>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
+    >>> ax_orig.plot(sig)
+    >>> ax_orig.set_title('Original pulse')
+    >>> ax_orig.margins(0, 0.1)
+    >>> ax_win.plot(win)
+    >>> ax_win.set_title('Filter impulse response')
+    >>> ax_win.margins(0, 0.1)
+    >>> ax_filt.plot(filtered)
+    >>> ax_filt.set_title('Filtered signal')
+    >>> ax_filt.margins(0, 0.1)
+    >>> fig.tight_layout()
+    >>> fig.show()
+
+    """
+    volume = np.asarray(in1)
+    kernel = np.asarray(in2)
+
+    if volume.ndim == kernel.ndim == 0:
+        return volume * kernel
+    elif volume.ndim != kernel.ndim:
+        raise ValueError("volume and kernel should have the same "
+                         "dimensionality")
+
+    if _inputs_swap_needed(mode, volume.shape, kernel.shape):
+        # Convolution is commutative; order doesn't have any effect on output
+        volume, kernel = kernel, volume
+
+    if method == 'auto':
+        method = choose_conv_method(volume, kernel, mode=mode)
+
+    if method == 'fft':
+        out = fftconvolve(volume, kernel, mode=mode)
+        result_type = np.result_type(volume, kernel)
+        if result_type.kind in {'u', 'i'}:
+            out = np.around(out)
+
+        if np.isnan(out.flat[0]) or np.isinf(out.flat[0]):
+            warnings.warn("Use of fft convolution on input with NAN or inf"
+                          " results in NAN or inf output. Consider using"
+                          " method='direct' instead.",
+                          category=RuntimeWarning, stacklevel=2)
+
+        return out.astype(result_type)
+    elif method == 'direct':
+        # fastpath to faster numpy.convolve for 1d inputs when possible
+        if _np_conv_ok(volume, kernel, mode):
+            return np.convolve(volume, kernel, mode)
+
+        return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
+    else:
+        raise ValueError("Acceptable method flags are 'auto',"
+                         " 'direct', or 'fft'.")
+
+
+def order_filter(a, domain, rank):
+    """
+    Perform an order filter on an N-D array.
+
+    Perform an order filter on the array in. The domain argument acts as a
+    mask centered over each pixel. The non-zero elements of domain are
+    used to select elements surrounding each input pixel which are placed
+    in a list. The list is sorted, and the output for that pixel is the
+    element corresponding to rank in the sorted list.
+
+    Parameters
+    ----------
+    a : ndarray
+        The N-dimensional input array.
+    domain : array_like
+        A mask array with the same number of dimensions as `a`.
+        Each dimension should have an odd number of elements.
+    rank : int
+        A non-negative integer which selects the element from the
+        sorted list (0 corresponds to the smallest element, 1 is the
+        next smallest element, etc.).
+
+    Returns
+    -------
+    out : ndarray
+        The results of the order filter in an array with the same
+        shape as `a`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> x = np.arange(25).reshape(5, 5)
+    >>> domain = np.identity(3)
+    >>> x
+    array([[ 0,  1,  2,  3,  4],
+           [ 5,  6,  7,  8,  9],
+           [10, 11, 12, 13, 14],
+           [15, 16, 17, 18, 19],
+           [20, 21, 22, 23, 24]])
+    >>> signal.order_filter(x, domain, 0)
+    array([[  0.,   0.,   0.,   0.,   0.],
+           [  0.,   0.,   1.,   2.,   0.],
+           [  0.,   5.,   6.,   7.,   0.],
+           [  0.,  10.,  11.,  12.,   0.],
+           [  0.,   0.,   0.,   0.,   0.]])
+    >>> signal.order_filter(x, domain, 2)
+    array([[  6.,   7.,   8.,   9.,   4.],
+           [ 11.,  12.,  13.,  14.,   9.],
+           [ 16.,  17.,  18.,  19.,  14.],
+           [ 21.,  22.,  23.,  24.,  19.],
+           [ 20.,  21.,  22.,  23.,  24.]])
+
+    """
+    domain = np.asarray(domain)
+    for dimsize in domain.shape:
+        if (dimsize % 2) != 1:
+            raise ValueError("Each dimension of domain argument "
+                             "should have an odd number of elements.")
+    return _sigtools._order_filterND(a, domain, rank)
+
+
+def medfilt(volume, kernel_size=None):
+    """
+    Perform a median filter on an N-dimensional array.
+
+    Apply a median filter to the input array using a local window-size
+    given by `kernel_size`. The array will automatically be zero-padded.
+
+    Parameters
+    ----------
+    volume : array_like
+        An N-dimensional input array.
+    kernel_size : array_like, optional
+        A scalar or an N-length list giving the size of the median filter
+        window in each dimension.  Elements of `kernel_size` should be odd.
+        If `kernel_size` is a scalar, then this scalar is used as the size in
+        each dimension. Default size is 3 for each dimension.
+
+    Returns
+    -------
+    out : ndarray
+        An array the same size as input containing the median filtered
+        result.
+
+    Warns
+    -----
+    UserWarning
+        If array size is smaller than kernel size along any dimension
+
+    See Also
+    --------
+    scipy.ndimage.median_filter
+    scipy.signal.medfilt2d
+
+    Notes
+    -----
+    The more general function `scipy.ndimage.median_filter` has a more
+    efficient implementation of a median filter and therefore runs much faster.
+
+    For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes,
+    the specialised function `scipy.signal.medfilt2d` may be faster.
+
+    """
+    volume = np.atleast_1d(volume)
+    if kernel_size is None:
+        kernel_size = [3] * volume.ndim
+    kernel_size = np.asarray(kernel_size)
+    if kernel_size.shape == ():
+        kernel_size = np.repeat(kernel_size.item(), volume.ndim)
+
+    for k in range(volume.ndim):
+        if (kernel_size[k] % 2) != 1:
+            raise ValueError("Each element of kernel_size should be odd.")
+    if any(k > s for k, s in zip(kernel_size, volume.shape)):
+        warnings.warn('kernel_size exceeds volume extent: the volume will be '
+                      'zero-padded.')
+
+    domain = np.ones(kernel_size, dtype=volume.dtype)
+
+    numels = np.prod(kernel_size, axis=0)
+    order = numels // 2
+    return _sigtools._order_filterND(volume, domain, order)
+
+
+def wiener(im, mysize=None, noise=None):
+    """
+    Perform a Wiener filter on an N-dimensional array.
+
+    Apply a Wiener filter to the N-dimensional array `im`.
+
+    Parameters
+    ----------
+    im : ndarray
+        An N-dimensional array.
+    mysize : int or array_like, optional
+        A scalar or an N-length list giving the size of the Wiener filter
+        window in each dimension.  Elements of mysize should be odd.
+        If mysize is a scalar, then this scalar is used as the size
+        in each dimension.
+    noise : float, optional
+        The noise-power to use. If None, then noise is estimated as the
+        average of the local variance of the input.
+
+    Returns
+    -------
+    out : ndarray
+        Wiener filtered result with the same shape as `im`.
+
+    Notes
+    -----
+    This implementation is similar to wiener2 in Matlab/Octave.
+    For more details see [1]_
+
+    References
+    ----------
+    .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing,
+           Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548.
+
+    Examples
+    --------
+    >>> from scipy.datasets import face
+    >>> from scipy.signal import wiener
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> img = rng.random((40, 40))    #Create a random image
+    >>> filtered_img = wiener(img, (5, 5))  #Filter the image
+    >>> f, (plot1, plot2) = plt.subplots(1, 2)
+    >>> plot1.imshow(img)
+    >>> plot2.imshow(filtered_img)
+    >>> plt.show()
+
+    """
+    im = np.asarray(im)
+    if mysize is None:
+        mysize = [3] * im.ndim
+    mysize = np.asarray(mysize)
+    if mysize.shape == ():
+        mysize = np.repeat(mysize.item(), im.ndim)
+
+    # Estimate the local mean
+    lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0)
+
+    # Estimate the local variance
+    lVar = (correlate(im ** 2, np.ones(mysize), 'same') /
+            np.prod(mysize, axis=0) - lMean ** 2)
+
+    # Estimate the noise power if needed.
+    if noise is None:
+        noise = np.mean(np.ravel(lVar), axis=0)
+
+    res = (im - lMean)
+    res *= (1 - noise / lVar)
+    res += lMean
+    out = np.where(lVar < noise, lMean, res)
+
+    return out
+
+
+def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
+    """
+    Convolve two 2-dimensional arrays.
+
+    Convolve `in1` and `in2` with output size determined by `mode`, and
+    boundary conditions determined by `boundary` and `fillvalue`.
+
+    Parameters
+    ----------
+    in1 : array_like
+        First input.
+    in2 : array_like
+        Second input. Should have the same number of dimensions as `in1`.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output:
+
+        ``full``
+           The output is the full discrete linear convolution
+           of the inputs. (Default)
+        ``valid``
+           The output consists only of those elements that do not
+           rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
+           must be at least as large as the other in every dimension.
+        ``same``
+           The output is the same size as `in1`, centered
+           with respect to the 'full' output.
+    boundary : str {'fill', 'wrap', 'symm'}, optional
+        A flag indicating how to handle boundaries:
+
+        ``fill``
+           pad input arrays with fillvalue. (default)
+        ``wrap``
+           circular boundary conditions.
+        ``symm``
+           symmetrical boundary conditions.
+
+    fillvalue : scalar, optional
+        Value to fill pad input arrays with. Default is 0.
+
+    Returns
+    -------
+    out : ndarray
+        A 2-dimensional array containing a subset of the discrete linear
+        convolution of `in1` with `in2`.
+
+    Examples
+    --------
+    Compute the gradient of an image by 2D convolution with a complex Scharr
+    operator.  (Horizontal operator is real, vertical is imaginary.)  Use
+    symmetric boundary condition to avoid creating edges at the image
+    boundaries.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy import datasets
+    >>> ascent = datasets.ascent()
+    >>> scharr = np.array([[ -3-3j, 0-10j,  +3 -3j],
+    ...                    [-10+0j, 0+ 0j, +10 +0j],
+    ...                    [ -3+3j, 0+10j,  +3 +3j]]) # Gx + j*Gy
+    >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
+    >>> ax_orig.imshow(ascent, cmap='gray')
+    >>> ax_orig.set_title('Original')
+    >>> ax_orig.set_axis_off()
+    >>> ax_mag.imshow(np.absolute(grad), cmap='gray')
+    >>> ax_mag.set_title('Gradient magnitude')
+    >>> ax_mag.set_axis_off()
+    >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
+    >>> ax_ang.set_title('Gradient orientation')
+    >>> ax_ang.set_axis_off()
+    >>> fig.show()
+
+    """
+    in1 = np.asarray(in1)
+    in2 = np.asarray(in2)
+
+    if not in1.ndim == in2.ndim == 2:
+        raise ValueError('convolve2d inputs must both be 2-D arrays')
+
+    if _inputs_swap_needed(mode, in1.shape, in2.shape):
+        in1, in2 = in2, in1
+
+    val = _valfrommode(mode)
+    bval = _bvalfromboundary(boundary)
+    out = _sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
+    return out
+
+
+def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
+    """
+    Cross-correlate two 2-dimensional arrays.
+
+    Cross correlate `in1` and `in2` with output size determined by `mode`, and
+    boundary conditions determined by `boundary` and `fillvalue`.
+
+    Parameters
+    ----------
+    in1 : array_like
+        First input.
+    in2 : array_like
+        Second input. Should have the same number of dimensions as `in1`.
+    mode : str {'full', 'valid', 'same'}, optional
+        A string indicating the size of the output:
+
+        ``full``
+           The output is the full discrete linear cross-correlation
+           of the inputs. (Default)
+        ``valid``
+           The output consists only of those elements that do not
+           rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
+           must be at least as large as the other in every dimension.
+        ``same``
+           The output is the same size as `in1`, centered
+           with respect to the 'full' output.
+    boundary : str {'fill', 'wrap', 'symm'}, optional
+        A flag indicating how to handle boundaries:
+
+        ``fill``
+           pad input arrays with fillvalue. (default)
+        ``wrap``
+           circular boundary conditions.
+        ``symm``
+           symmetrical boundary conditions.
+
+    fillvalue : scalar, optional
+        Value to fill pad input arrays with. Default is 0.
+
+    Returns
+    -------
+    correlate2d : ndarray
+        A 2-dimensional array containing a subset of the discrete linear
+        cross-correlation of `in1` with `in2`.
+
+    Notes
+    -----
+    When using "same" mode with even-length inputs, the outputs of `correlate`
+    and `correlate2d` differ: There is a 1-index offset between them.
+
+    Examples
+    --------
+    Use 2D cross-correlation to find the location of a template in a noisy
+    image:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy import datasets
+    >>> rng = np.random.default_rng()
+    >>> face = datasets.face(gray=True) - datasets.face(gray=True).mean()
+    >>> template = np.copy(face[300:365, 670:750])  # right eye
+    >>> template -= template.mean()
+    >>> face = face + rng.standard_normal(face.shape) * 50  # add noise
+    >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
+    >>> y, x = np.unravel_index(np.argmax(corr), corr.shape)  # find the match
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
+    ...                                                     figsize=(6, 15))
+    >>> ax_orig.imshow(face, cmap='gray')
+    >>> ax_orig.set_title('Original')
+    >>> ax_orig.set_axis_off()
+    >>> ax_template.imshow(template, cmap='gray')
+    >>> ax_template.set_title('Template')
+    >>> ax_template.set_axis_off()
+    >>> ax_corr.imshow(corr, cmap='gray')
+    >>> ax_corr.set_title('Cross-correlation')
+    >>> ax_corr.set_axis_off()
+    >>> ax_orig.plot(x, y, 'ro')
+    >>> fig.show()
+
+    """
+    in1 = np.asarray(in1)
+    in2 = np.asarray(in2)
+
+    if not in1.ndim == in2.ndim == 2:
+        raise ValueError('correlate2d inputs must both be 2-D arrays')
+
+    swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
+    if swapped_inputs:
+        in1, in2 = in2, in1
+
+    val = _valfrommode(mode)
+    bval = _bvalfromboundary(boundary)
+    out = _sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
+
+    if swapped_inputs:
+        out = out[::-1, ::-1]
+
+    return out
+
+
+def medfilt2d(input, kernel_size=3):
+    """
+    Median filter a 2-dimensional array.
+
+    Apply a median filter to the `input` array using a local window-size
+    given by `kernel_size` (must be odd). The array is zero-padded
+    automatically.
+
+    Parameters
+    ----------
+    input : array_like
+        A 2-dimensional input array.
+    kernel_size : array_like, optional
+        A scalar or a list of length 2, giving the size of the
+        median filter window in each dimension.  Elements of
+        `kernel_size` should be odd.  If `kernel_size` is a scalar,
+        then this scalar is used as the size in each dimension.
+        Default is a kernel of size (3, 3).
+
+    Returns
+    -------
+    out : ndarray
+        An array the same size as input containing the median filtered
+        result.
+
+    See Also
+    --------
+    scipy.ndimage.median_filter
+
+    Notes
+    -----
+    This is faster than `medfilt` when the input dtype is ``uint8``,
+    ``float32``, or ``float64``; for other types, this falls back to
+    `medfilt`. In some situations, `scipy.ndimage.median_filter` may be
+    faster than this function.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> x = np.arange(25).reshape(5, 5)
+    >>> x
+    array([[ 0,  1,  2,  3,  4],
+           [ 5,  6,  7,  8,  9],
+           [10, 11, 12, 13, 14],
+           [15, 16, 17, 18, 19],
+           [20, 21, 22, 23, 24]])
+
+    # Replaces i,j with the median out of 5*5 window
+
+    >>> signal.medfilt2d(x, kernel_size=5)
+    array([[ 0,  0,  2,  0,  0],
+           [ 0,  3,  7,  4,  0],
+           [ 2,  8, 12,  9,  4],
+           [ 0,  8, 12,  9,  0],
+           [ 0,  0, 12,  0,  0]])
+
+    # Replaces i,j with the median out of default 3*3 window
+
+    >>> signal.medfilt2d(x)
+    array([[ 0,  1,  2,  3,  0],
+           [ 1,  6,  7,  8,  4],
+           [ 6, 11, 12, 13,  9],
+           [11, 16, 17, 18, 14],
+           [ 0, 16, 17, 18,  0]])
+
+    # Replaces i,j with the median out of default 5*3 window
+
+    >>> signal.medfilt2d(x, kernel_size=[5,3])
+    array([[ 0,  1,  2,  3,  0],
+           [ 0,  6,  7,  8,  3],
+           [ 5, 11, 12, 13,  8],
+           [ 5, 11, 12, 13,  8],
+           [ 0, 11, 12, 13,  0]])
+
+    # Replaces i,j with the median out of default 3*5 window
+
+    >>> signal.medfilt2d(x, kernel_size=[3,5])
+    array([[ 0,  0,  2,  1,  0],
+           [ 1,  5,  7,  6,  3],
+           [ 6, 10, 12, 11,  8],
+           [11, 15, 17, 16, 13],
+           [ 0, 15, 17, 16,  0]])
+
+    # As seen in the examples,
+    # kernel numbers must be odd and not exceed original array dim
+
+    """
+    image = np.asarray(input)
+
+    # checking dtype.type, rather than just dtype, is necessary for
+    # excluding np.longdouble with MS Visual C.
+    if image.dtype.type not in (np.ubyte, np.single, np.double):
+        return medfilt(image, kernel_size)
+
+    if kernel_size is None:
+        kernel_size = [3] * 2
+    kernel_size = np.asarray(kernel_size)
+    if kernel_size.shape == ():
+        kernel_size = np.repeat(kernel_size.item(), 2)
+
+    for size in kernel_size:
+        if (size % 2) != 1:
+            raise ValueError("Each element of kernel_size should be odd.")
+
+    return _sigtools._medfilt2d(image, kernel_size)
+
+
+def lfilter(b, a, x, axis=-1, zi=None):
+    """
+    Filter data along one-dimension with an IIR or FIR filter.
+
+    Filter a data sequence, `x`, using a digital filter.  This works for many
+    fundamental data types (including Object type).  The filter is a direct
+    form II transposed implementation of the standard difference equation
+    (see Notes).
+
+    The function `sosfilt` (and filter design using ``output='sos'``) should be
+    preferred over `lfilter` for most filtering tasks, as second-order sections
+    have fewer numerical problems.
+
+    Parameters
+    ----------
+    b : array_like
+        The numerator coefficient vector in a 1-D sequence.
+    a : array_like
+        The denominator coefficient vector in a 1-D sequence.  If ``a[0]``
+        is not 1, then both `a` and `b` are normalized by ``a[0]``.
+    x : array_like
+        An N-dimensional input array.
+    axis : int, optional
+        The axis of the input data array along which to apply the
+        linear filter. The filter is applied to each subarray along
+        this axis.  Default is -1.
+    zi : array_like, optional
+        Initial conditions for the filter delays.  It is a vector
+        (or array of vectors for an N-dimensional input) of length
+        ``max(len(a), len(b)) - 1``.  If `zi` is None or is not given then
+        initial rest is assumed.  See `lfiltic` for more information.
+
+    Returns
+    -------
+    y : array
+        The output of the digital filter.
+    zf : array, optional
+        If `zi` is None, this is not returned, otherwise, `zf` holds the
+        final filter delay values.
+
+    See Also
+    --------
+    lfiltic : Construct initial conditions for `lfilter`.
+    lfilter_zi : Compute initial state (steady state of step response) for
+                 `lfilter`.
+    filtfilt : A forward-backward filter, to obtain a filter with zero phase.
+    savgol_filter : A Savitzky-Golay filter.
+    sosfilt: Filter data using cascaded second-order sections.
+    sosfiltfilt: A forward-backward filter using second-order sections.
+
+    Notes
+    -----
+    The filter function is implemented as a direct II transposed structure.
+    This means that the filter implements::
+
+       a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
+                             - a[1]*y[n-1] - ... - a[N]*y[n-N]
+
+    where `M` is the degree of the numerator, `N` is the degree of the
+    denominator, and `n` is the sample number.  It is implemented using
+    the following difference equations (assuming M = N)::
+
+         a[0]*y[n] = b[0] * x[n]               + d[0][n-1]
+           d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
+           d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
+         ...
+         d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
+         d[N-1][n] = b[N] * x[n] - a[N] * y[n]
+
+    where `d` are the state variables.
+
+    The rational transfer function describing this filter in the
+    z-transform domain is::
+
+                             -1              -M
+                 b[0] + b[1]z  + ... + b[M] z
+         Y(z) = -------------------------------- X(z)
+                             -1              -N
+                 a[0] + a[1]z  + ... + a[N] z
+
+    Examples
+    --------
+    Generate a noisy signal to be filtered:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> t = np.linspace(-1, 1, 201)
+    >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
+    ...      0.1*np.sin(2*np.pi*1.25*t + 1) +
+    ...      0.18*np.cos(2*np.pi*3.85*t))
+    >>> xn = x + rng.standard_normal(len(t)) * 0.08
+
+    Create an order 3 lowpass butterworth filter:
+
+    >>> b, a = signal.butter(3, 0.05)
+
+    Apply the filter to xn.  Use lfilter_zi to choose the initial condition of
+    the filter:
+
+    >>> zi = signal.lfilter_zi(b, a)
+    >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
+
+    Apply the filter again, to have a result filtered at an order the same as
+    filtfilt:
+
+    >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
+
+    Use filtfilt to apply the filter:
+
+    >>> y = signal.filtfilt(b, a, xn)
+
+    Plot the original signal and the various filtered versions:
+
+    >>> plt.figure
+    >>> plt.plot(t, xn, 'b', alpha=0.75)
+    >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
+    >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
+    ...             'filtfilt'), loc='best')
+    >>> plt.grid(True)
+    >>> plt.show()
+
+    """
+    a = np.atleast_1d(a)
+    if len(a) == 1:
+        # This path only supports types fdgFDGO to mirror _linear_filter below.
+        # Any of b, a, x, or zi can set the dtype, but there is no default
+        # casting of other types; instead a NotImplementedError is raised.
+        b = np.asarray(b)
+        a = np.asarray(a)
+        if b.ndim != 1 and a.ndim != 1:
+            raise ValueError('object of too small depth for desired array')
+        x = _validate_x(x)
+        inputs = [b, a, x]
+        if zi is not None:
+            # _linear_filter does not broadcast zi, but does do expansion of
+            # singleton dims.
+            zi = np.asarray(zi)
+            if zi.ndim != x.ndim:
+                raise ValueError('object of too small depth for desired array')
+            expected_shape = list(x.shape)
+            expected_shape[axis] = b.shape[0] - 1
+            expected_shape = tuple(expected_shape)
+            # check the trivial case where zi is the right shape first
+            if zi.shape != expected_shape:
+                strides = zi.ndim * [None]
+                if axis < 0:
+                    axis += zi.ndim
+                for k in range(zi.ndim):
+                    if k == axis and zi.shape[k] == expected_shape[k]:
+                        strides[k] = zi.strides[k]
+                    elif k != axis and zi.shape[k] == expected_shape[k]:
+                        strides[k] = zi.strides[k]
+                    elif k != axis and zi.shape[k] == 1:
+                        strides[k] = 0
+                    else:
+                        raise ValueError('Unexpected shape for zi: expected '
+                                         '%s, found %s.' %
+                                         (expected_shape, zi.shape))
+                zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
+                                                     strides)
+            inputs.append(zi)
+        dtype = np.result_type(*inputs)
+
+        if dtype.char not in 'fdgFDGO':
+            raise NotImplementedError("input type '%s' not supported" % dtype)
+
+        b = np.array(b, dtype=dtype)
+        a = np.array(a, dtype=dtype, copy=False)
+        b /= a[0]
+        x = np.array(x, dtype=dtype, copy=False)
+
+        out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
+        ind = out_full.ndim * [slice(None)]
+        if zi is not None:
+            ind[axis] = slice(zi.shape[axis])
+            out_full[tuple(ind)] += zi
+
+        ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
+        out = out_full[tuple(ind)]
+
+        if zi is None:
+            return out
+        else:
+            ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
+            zf = out_full[tuple(ind)]
+            return out, zf
+    else:
+        if zi is None:
+            return _sigtools._linear_filter(b, a, x, axis)
+        else:
+            return _sigtools._linear_filter(b, a, x, axis, zi)
+
+
+def lfiltic(b, a, y, x=None):
+    """
+    Construct initial conditions for lfilter given input and output vectors.
+
+    Given a linear filter (b, a) and initial conditions on the output `y`
+    and the input `x`, return the initial conditions on the state vector zi
+    which is used by `lfilter` to generate the output given the input.
+
+    Parameters
+    ----------
+    b : array_like
+        Linear filter term.
+    a : array_like
+        Linear filter term.
+    y : array_like
+        Initial conditions.
+
+        If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
+
+        If `y` is too short, it is padded with zeros.
+    x : array_like, optional
+        Initial conditions.
+
+        If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
+
+        If `x` is not given, its initial conditions are assumed zero.
+
+        If `x` is too short, it is padded with zeros.
+
+    Returns
+    -------
+    zi : ndarray
+        The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
+        where ``K = max(M, N)``.
+
+    See Also
+    --------
+    lfilter, lfilter_zi
+
+    """
+    N = np.size(a) - 1
+    M = np.size(b) - 1
+    K = max(M, N)
+    y = np.asarray(y)
+
+    if x is None:
+        result_type = np.result_type(np.asarray(b), np.asarray(a), y)
+        if result_type.kind in 'bui':
+            result_type = np.float64
+        x = np.zeros(M, dtype=result_type)
+    else:
+        x = np.asarray(x)
+
+        result_type = np.result_type(np.asarray(b), np.asarray(a), y, x)
+        if result_type.kind in 'bui':
+            result_type = np.float64
+        x = x.astype(result_type)
+
+        L = np.size(x)
+        if L < M:
+            x = np.r_[x, np.zeros(M - L)]
+
+    y = y.astype(result_type)
+    zi = np.zeros(K, result_type)
+
+    L = np.size(y)
+    if L < N:
+        y = np.r_[y, np.zeros(N - L)]
+
+    for m in range(M):
+        zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
+
+    for m in range(N):
+        zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
+
+    return zi
+
+
+def deconvolve(signal, divisor):
+    """Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
+
+    Returns the quotient and remainder such that
+    ``signal = convolve(divisor, quotient) + remainder``
+
+    Parameters
+    ----------
+    signal : (N,) array_like
+        Signal data, typically a recorded signal
+    divisor : (N,) array_like
+        Divisor data, typically an impulse response or filter that was
+        applied to the original signal
+
+    Returns
+    -------
+    quotient : ndarray
+        Quotient, typically the recovered original signal
+    remainder : ndarray
+        Remainder
+
+    See Also
+    --------
+    numpy.polydiv : performs polynomial division (same operation, but
+                    also accepts poly1d objects)
+
+    Examples
+    --------
+    Deconvolve a signal that's been filtered:
+
+    >>> from scipy import signal
+    >>> original = [0, 1, 0, 0, 1, 1, 0, 0]
+    >>> impulse_response = [2, 1]
+    >>> recorded = signal.convolve(impulse_response, original)
+    >>> recorded
+    array([0, 2, 1, 0, 2, 3, 1, 0, 0])
+    >>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
+    >>> recovered
+    array([ 0.,  1.,  0.,  0.,  1.,  1.,  0.,  0.])
+
+    """
+    num = np.atleast_1d(signal)
+    den = np.atleast_1d(divisor)
+    if num.ndim > 1:
+        raise ValueError("signal must be 1-D.")
+    if den.ndim > 1:
+        raise ValueError("divisor must be 1-D.")
+    N = len(num)
+    D = len(den)
+    if D > N:
+        quot = []
+        rem = num
+    else:
+        input = np.zeros(N - D + 1, float)
+        input[0] = 1
+        quot = lfilter(num, den, input)
+        rem = num - convolve(den, quot, mode='full')
+    return quot, rem
+
+
+def hilbert(x, N=None, axis=-1):
+    """
+    Compute the analytic signal, using the Hilbert transform.
+
+    The transformation is done along the last axis by default.
+
+    Parameters
+    ----------
+    x : array_like
+        Signal data.  Must be real.
+    N : int, optional
+        Number of Fourier components.  Default: ``x.shape[axis]``
+    axis : int, optional
+        Axis along which to do the transformation.  Default: -1.
+
+    Returns
+    -------
+    xa : ndarray
+        Analytic signal of `x`, of each 1-D array along `axis`
+
+    Notes
+    -----
+    The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
+
+    .. math:: x_a = F^{-1}(F(x) 2U) = x + i y
+
+    where `F` is the Fourier transform, `U` the unit step function,
+    and `y` the Hilbert transform of `x`. [1]_
+
+    In other words, the negative half of the frequency spectrum is zeroed
+    out, turning the real-valued signal into a complex signal.  The Hilbert
+    transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
+    original signal from ``np.real(hilbert(x))``.
+
+    References
+    ----------
+    .. [1] Wikipedia, "Analytic signal".
+           https://en.wikipedia.org/wiki/Analytic_signal
+    .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
+    .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
+           Processing, Third Edition, 2009. Chapter 12.
+           ISBN 13: 978-1292-02572-8
+
+    Examples
+    --------
+    In this example we use the Hilbert transform to determine the amplitude
+    envelope and instantaneous frequency of an amplitude-modulated signal.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.signal import hilbert, chirp
+
+    >>> duration = 1.0
+    >>> fs = 400.0
+    >>> samples = int(fs*duration)
+    >>> t = np.arange(samples) / fs
+
+    We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
+    apply an amplitude modulation.
+
+    >>> signal = chirp(t, 20.0, t[-1], 100.0)
+    >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
+
+    The amplitude envelope is given by magnitude of the analytic signal. The
+    instantaneous frequency can be obtained by differentiating the
+    instantaneous phase in respect to time. The instantaneous phase corresponds
+    to the phase angle of the analytic signal.
+
+    >>> analytic_signal = hilbert(signal)
+    >>> amplitude_envelope = np.abs(analytic_signal)
+    >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
+    >>> instantaneous_frequency = (np.diff(instantaneous_phase) /
+    ...                            (2.0*np.pi) * fs)
+
+    >>> fig, (ax0, ax1) = plt.subplots(nrows=2)
+    >>> ax0.plot(t, signal, label='signal')
+    >>> ax0.plot(t, amplitude_envelope, label='envelope')
+    >>> ax0.set_xlabel("time in seconds")
+    >>> ax0.legend()
+    >>> ax1.plot(t[1:], instantaneous_frequency)
+    >>> ax1.set_xlabel("time in seconds")
+    >>> ax1.set_ylim(0.0, 120.0)
+    >>> fig.tight_layout()
+
+    """
+    x = np.asarray(x)
+    if np.iscomplexobj(x):
+        raise ValueError("x must be real.")
+    if N is None:
+        N = x.shape[axis]
+    if N <= 0:
+        raise ValueError("N must be positive.")
+
+    Xf = sp_fft.fft(x, N, axis=axis)
+    h = np.zeros(N, dtype=Xf.dtype)
+    if N % 2 == 0:
+        h[0] = h[N // 2] = 1
+        h[1:N // 2] = 2
+    else:
+        h[0] = 1
+        h[1:(N + 1) // 2] = 2
+
+    if x.ndim > 1:
+        ind = [np.newaxis] * x.ndim
+        ind[axis] = slice(None)
+        h = h[tuple(ind)]
+    x = sp_fft.ifft(Xf * h, axis=axis)
+    return x
+
+
+def hilbert2(x, N=None):
+    """
+    Compute the '2-D' analytic signal of `x`
+
+    Parameters
+    ----------
+    x : array_like
+        2-D signal data.
+    N : int or tuple of two ints, optional
+        Number of Fourier components. Default is ``x.shape``
+
+    Returns
+    -------
+    xa : ndarray
+        Analytic signal of `x` taken along axes (0,1).
+
+    References
+    ----------
+    .. [1] Wikipedia, "Analytic signal",
+        https://en.wikipedia.org/wiki/Analytic_signal
+
+    """
+    x = np.atleast_2d(x)
+    if x.ndim > 2:
+        raise ValueError("x must be 2-D.")
+    if np.iscomplexobj(x):
+        raise ValueError("x must be real.")
+    if N is None:
+        N = x.shape
+    elif isinstance(N, int):
+        if N <= 0:
+            raise ValueError("N must be positive.")
+        N = (N, N)
+    elif len(N) != 2 or np.any(np.asarray(N) <= 0):
+        raise ValueError("When given as a tuple, N must hold exactly "
+                         "two positive integers")
+
+    Xf = sp_fft.fft2(x, N, axes=(0, 1))
+    h1 = np.zeros(N[0], dtype=Xf.dtype)
+    h2 = np.zeros(N[1], dtype=Xf.dtype)
+    for p in range(2):
+        h = eval("h%d" % (p + 1))
+        N1 = N[p]
+        if N1 % 2 == 0:
+            h[0] = h[N1 // 2] = 1
+            h[1:N1 // 2] = 2
+        else:
+            h[0] = 1
+            h[1:(N1 + 1) // 2] = 2
+        exec("h%d = h" % (p + 1), globals(), locals())
+
+    h = h1[:, np.newaxis] * h2[np.newaxis, :]
+    k = x.ndim
+    while k > 2:
+        h = h[:, np.newaxis]
+        k -= 1
+    x = sp_fft.ifft2(Xf * h, axes=(0, 1))
+    return x
+
+
+def cmplx_sort(p):
+    """Sort roots based on magnitude.
+
+    Parameters
+    ----------
+    p : array_like
+        The roots to sort, as a 1-D array.
+
+    Returns
+    -------
+    p_sorted : ndarray
+        Sorted roots.
+    indx : ndarray
+        Array of indices needed to sort the input `p`.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> vals = [1, 4, 1+1.j, 3]
+    >>> p_sorted, indx = signal.cmplx_sort(vals)
+    >>> p_sorted
+    array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
+    >>> indx
+    array([0, 2, 3, 1])
+    """
+    p = np.asarray(p)
+    indx = np.argsort(abs(p))
+    return np.take(p, indx, 0), indx
+
+
+def unique_roots(p, tol=1e-3, rtype='min'):
+    """Determine unique roots and their multiplicities from a list of roots.
+
+    Parameters
+    ----------
+    p : array_like
+        The list of roots.
+    tol : float, optional
+        The tolerance for two roots to be considered equal in terms of
+        the distance between them. Default is 1e-3. Refer to Notes about
+        the details on roots grouping.
+    rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional
+        How to determine the returned root if multiple roots are within
+        `tol` of each other.
+
+          - 'max', 'maximum': pick the maximum of those roots
+          - 'min', 'minimum': pick the minimum of those roots
+          - 'avg', 'mean': take the average of those roots
+
+        When finding minimum or maximum among complex roots they are compared
+        first by the real part and then by the imaginary part.
+
+    Returns
+    -------
+    unique : ndarray
+        The list of unique roots.
+    multiplicity : ndarray
+        The multiplicity of each root.
+
+    Notes
+    -----
+    If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to
+    ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it
+    doesn't necessarily mean that ``a`` is close to ``c``. It means that roots
+    grouping is not unique. In this function we use "greedy" grouping going
+    through the roots in the order they are given in the input `p`.
+
+    This utility function is not specific to roots but can be used for any
+    sequence of values for which uniqueness and multiplicity has to be
+    determined. For a more general routine, see `numpy.unique`.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
+    >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
+
+    Check which roots have multiplicity larger than 1:
+
+    >>> uniq[mult > 1]
+    array([ 1.305])
+    """
+    if rtype in ['max', 'maximum']:
+        reduce = np.max
+    elif rtype in ['min', 'minimum']:
+        reduce = np.min
+    elif rtype in ['avg', 'mean']:
+        reduce = np.mean
+    else:
+        raise ValueError("`rtype` must be one of "
+                         "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
+
+    p = np.asarray(p)
+
+    points = np.empty((len(p), 2))
+    points[:, 0] = np.real(p)
+    points[:, 1] = np.imag(p)
+    tree = cKDTree(points)
+
+    p_unique = []
+    p_multiplicity = []
+    used = np.zeros(len(p), dtype=bool)
+    for i in range(len(p)):
+        if used[i]:
+            continue
+
+        group = tree.query_ball_point(points[i], tol)
+        group = [x for x in group if not used[x]]
+
+        p_unique.append(reduce(p[group]))
+        p_multiplicity.append(len(group))
+
+        used[group] = True
+
+    return np.asarray(p_unique), np.asarray(p_multiplicity)
+
+
+def invres(r, p, k, tol=1e-3, rtype='avg'):
+    """Compute b(s) and a(s) from partial fraction expansion.
+
+    If `M` is the degree of numerator `b` and `N` the degree of denominator
+    `a`::
+
+              b(s)     b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
+      H(s) = ------ = ------------------------------------------
+              a(s)     a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
+
+    then the partial-fraction expansion H(s) is defined as::
+
+               r[0]       r[1]             r[-1]
+           = -------- + -------- + ... + --------- + k(s)
+             (s-p[0])   (s-p[1])         (s-p[-1])
+
+    If there are any repeated roots (closer together than `tol`), then H(s)
+    has terms like::
+
+          r[i]      r[i+1]              r[i+n-1]
+        -------- + ----------- + ... + -----------
+        (s-p[i])  (s-p[i])**2          (s-p[i])**n
+
+    This function is used for polynomials in positive powers of s or z,
+    such as analog filters or digital filters in controls engineering.  For
+    negative powers of z (typical for digital filters in DSP), use `invresz`.
+
+    Parameters
+    ----------
+    r : array_like
+        Residues corresponding to the poles. For repeated poles, the residues
+        must be ordered to correspond to ascending by power fractions.
+    p : array_like
+        Poles. Equal poles must be adjacent.
+    k : array_like
+        Coefficients of the direct polynomial term.
+    tol : float, optional
+        The tolerance for two roots to be considered equal in terms of
+        the distance between them. Default is 1e-3. See `unique_roots`
+        for further details.
+    rtype : {'avg', 'min', 'max'}, optional
+        Method for computing a root to represent a group of identical roots.
+        Default is 'avg'. See `unique_roots` for further details.
+
+    Returns
+    -------
+    b : ndarray
+        Numerator polynomial coefficients.
+    a : ndarray
+        Denominator polynomial coefficients.
+
+    See Also
+    --------
+    residue, invresz, unique_roots
+
+    """
+    r = np.atleast_1d(r)
+    p = np.atleast_1d(p)
+    k = np.trim_zeros(np.atleast_1d(k), 'f')
+
+    unique_poles, multiplicity = _group_poles(p, tol, rtype)
+    factors, denominator = _compute_factors(unique_poles, multiplicity,
+                                            include_powers=True)
+
+    if len(k) == 0:
+        numerator = 0
+    else:
+        numerator = np.polymul(k, denominator)
+
+    for residue, factor in zip(r, factors):
+        numerator = np.polyadd(numerator, residue * factor)
+
+    return numerator, denominator
+
+
+def _compute_factors(roots, multiplicity, include_powers=False):
+    """Compute the total polynomial divided by factors for each root."""
+    current = np.array([1])
+    suffixes = [current]
+    for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):
+        monomial = np.array([1, -pole])
+        for _ in range(mult):
+            current = np.polymul(current, monomial)
+        suffixes.append(current)
+    suffixes = suffixes[::-1]
+
+    factors = []
+    current = np.array([1])
+    for pole, mult, suffix in zip(roots, multiplicity, suffixes):
+        monomial = np.array([1, -pole])
+        block = []
+        for i in range(mult):
+            if i == 0 or include_powers:
+                block.append(np.polymul(current, suffix))
+            current = np.polymul(current, monomial)
+        factors.extend(reversed(block))
+
+    return factors, current
+
+
+def _compute_residues(poles, multiplicity, numerator):
+    denominator_factors, _ = _compute_factors(poles, multiplicity)
+    numerator = numerator.astype(poles.dtype)
+
+    residues = []
+    for pole, mult, factor in zip(poles, multiplicity,
+                                  denominator_factors):
+        if mult == 1:
+            residues.append(np.polyval(numerator, pole) /
+                            np.polyval(factor, pole))
+        else:
+            numer = numerator.copy()
+            monomial = np.array([1, -pole])
+            factor, d = np.polydiv(factor, monomial)
+
+            block = []
+            for _ in range(mult):
+                numer, n = np.polydiv(numer, monomial)
+                r = n[0] / d[0]
+                numer = np.polysub(numer, r * factor)
+                block.append(r)
+
+            residues.extend(reversed(block))
+
+    return np.asarray(residues)
+
+
+def residue(b, a, tol=1e-3, rtype='avg'):
+    """Compute partial-fraction expansion of b(s) / a(s).
+
+    If `M` is the degree of numerator `b` and `N` the degree of denominator
+    `a`::
+
+              b(s)     b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
+      H(s) = ------ = ------------------------------------------
+              a(s)     a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
+
+    then the partial-fraction expansion H(s) is defined as::
+
+               r[0]       r[1]             r[-1]
+           = -------- + -------- + ... + --------- + k(s)
+             (s-p[0])   (s-p[1])         (s-p[-1])
+
+    If there are any repeated roots (closer together than `tol`), then H(s)
+    has terms like::
+
+          r[i]      r[i+1]              r[i+n-1]
+        -------- + ----------- + ... + -----------
+        (s-p[i])  (s-p[i])**2          (s-p[i])**n
+
+    This function is used for polynomials in positive powers of s or z,
+    such as analog filters or digital filters in controls engineering.  For
+    negative powers of z (typical for digital filters in DSP), use `residuez`.
+
+    See Notes for details about the algorithm.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+    tol : float, optional
+        The tolerance for two roots to be considered equal in terms of
+        the distance between them. Default is 1e-3. See `unique_roots`
+        for further details.
+    rtype : {'avg', 'min', 'max'}, optional
+        Method for computing a root to represent a group of identical roots.
+        Default is 'avg'. See `unique_roots` for further details.
+
+    Returns
+    -------
+    r : ndarray
+        Residues corresponding to the poles. For repeated poles, the residues
+        are ordered to correspond to ascending by power fractions.
+    p : ndarray
+        Poles ordered by magnitude in ascending order.
+    k : ndarray
+        Coefficients of the direct polynomial term.
+
+    See Also
+    --------
+    invres, residuez, numpy.poly, unique_roots
+
+    Notes
+    -----
+    The "deflation through subtraction" algorithm is used for
+    computations --- method 6 in [1]_.
+
+    The form of partial fraction expansion depends on poles multiplicity in
+    the exact mathematical sense. However there is no way to exactly
+    determine multiplicity of roots of a polynomial in numerical computing.
+    Thus you should think of the result of `residue` with given `tol` as
+    partial fraction expansion computed for the denominator composed of the
+    computed poles with empirically determined multiplicity. The choice of
+    `tol` can drastically change the result if there are close poles.
+
+    References
+    ----------
+    .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a
+           review of computational methodology and efficiency", Journal of
+           Computational and Applied Mathematics, Vol. 9, 1983.
+    """
+    b = np.asarray(b)
+    a = np.asarray(a)
+    if (np.issubdtype(b.dtype, np.complexfloating)
+            or np.issubdtype(a.dtype, np.complexfloating)):
+        b = b.astype(complex)
+        a = a.astype(complex)
+    else:
+        b = b.astype(float)
+        a = a.astype(float)
+
+    b = np.trim_zeros(np.atleast_1d(b), 'f')
+    a = np.trim_zeros(np.atleast_1d(a), 'f')
+
+    if a.size == 0:
+        raise ValueError("Denominator `a` is zero.")
+
+    poles = np.roots(a)
+    if b.size == 0:
+        return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
+
+    if len(b) < len(a):
+        k = np.empty(0)
+    else:
+        k, b = np.polydiv(b, a)
+
+    unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
+    unique_poles, order = cmplx_sort(unique_poles)
+    multiplicity = multiplicity[order]
+
+    residues = _compute_residues(unique_poles, multiplicity, b)
+
+    index = 0
+    for pole, mult in zip(unique_poles, multiplicity):
+        poles[index:index + mult] = pole
+        index += mult
+
+    return residues / a[0], poles, k
+
+
+def residuez(b, a, tol=1e-3, rtype='avg'):
+    """Compute partial-fraction expansion of b(z) / a(z).
+
+    If `M` is the degree of numerator `b` and `N` the degree of denominator
+    `a`::
+
+                b(z)     b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
+        H(z) = ------ = ------------------------------------------
+                a(z)     a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
+
+    then the partial-fraction expansion H(z) is defined as::
+
+                 r[0]                   r[-1]
+         = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
+           (1-p[0]z**(-1))         (1-p[-1]z**(-1))
+
+    If there are any repeated roots (closer than `tol`), then the partial
+    fraction expansion has terms like::
+
+             r[i]              r[i+1]                    r[i+n-1]
+        -------------- + ------------------ + ... + ------------------
+        (1-p[i]z**(-1))  (1-p[i]z**(-1))**2         (1-p[i]z**(-1))**n
+
+    This function is used for polynomials in negative powers of z,
+    such as digital filters in DSP.  For positive powers, use `residue`.
+
+    See Notes of `residue` for details about the algorithm.
+
+    Parameters
+    ----------
+    b : array_like
+        Numerator polynomial coefficients.
+    a : array_like
+        Denominator polynomial coefficients.
+    tol : float, optional
+        The tolerance for two roots to be considered equal in terms of
+        the distance between them. Default is 1e-3. See `unique_roots`
+        for further details.
+    rtype : {'avg', 'min', 'max'}, optional
+        Method for computing a root to represent a group of identical roots.
+        Default is 'avg'. See `unique_roots` for further details.
+
+    Returns
+    -------
+    r : ndarray
+        Residues corresponding to the poles. For repeated poles, the residues
+        are ordered to correspond to ascending by power fractions.
+    p : ndarray
+        Poles ordered by magnitude in ascending order.
+    k : ndarray
+        Coefficients of the direct polynomial term.
+
+    See Also
+    --------
+    invresz, residue, unique_roots
+    """
+    b = np.asarray(b)
+    a = np.asarray(a)
+    if (np.issubdtype(b.dtype, np.complexfloating)
+            or np.issubdtype(a.dtype, np.complexfloating)):
+        b = b.astype(complex)
+        a = a.astype(complex)
+    else:
+        b = b.astype(float)
+        a = a.astype(float)
+
+    b = np.trim_zeros(np.atleast_1d(b), 'b')
+    a = np.trim_zeros(np.atleast_1d(a), 'b')
+
+    if a.size == 0:
+        raise ValueError("Denominator `a` is zero.")
+    elif a[0] == 0:
+        raise ValueError("First coefficient of determinant `a` must be "
+                         "non-zero.")
+
+    poles = np.roots(a)
+    if b.size == 0:
+        return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([])
+
+    b_rev = b[::-1]
+    a_rev = a[::-1]
+
+    if len(b_rev) < len(a_rev):
+        k_rev = np.empty(0)
+    else:
+        k_rev, b_rev = np.polydiv(b_rev, a_rev)
+
+    unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype)
+    unique_poles, order = cmplx_sort(unique_poles)
+    multiplicity = multiplicity[order]
+
+    residues = _compute_residues(1 / unique_poles, multiplicity, b_rev)
+
+    index = 0
+    powers = np.empty(len(residues), dtype=int)
+    for pole, mult in zip(unique_poles, multiplicity):
+        poles[index:index + mult] = pole
+        powers[index:index + mult] = 1 + np.arange(mult)
+        index += mult
+
+    residues *= (-poles) ** powers / a_rev[0]
+
+    return residues, poles, k_rev[::-1]
+
+
+def _group_poles(poles, tol, rtype):
+    if rtype in ['max', 'maximum']:
+        reduce = np.max
+    elif rtype in ['min', 'minimum']:
+        reduce = np.min
+    elif rtype in ['avg', 'mean']:
+        reduce = np.mean
+    else:
+        raise ValueError("`rtype` must be one of "
+                         "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
+
+    unique = []
+    multiplicity = []
+
+    pole = poles[0]
+    block = [pole]
+    for i in range(1, len(poles)):
+        if abs(poles[i] - pole) <= tol:
+            block.append(pole)
+        else:
+            unique.append(reduce(block))
+            multiplicity.append(len(block))
+            pole = poles[i]
+            block = [pole]
+
+    unique.append(reduce(block))
+    multiplicity.append(len(block))
+
+    return np.asarray(unique), np.asarray(multiplicity)
+
+
+def invresz(r, p, k, tol=1e-3, rtype='avg'):
+    """Compute b(z) and a(z) from partial fraction expansion.
+
+    If `M` is the degree of numerator `b` and `N` the degree of denominator
+    `a`::
+
+                b(z)     b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
+        H(z) = ------ = ------------------------------------------
+                a(z)     a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
+
+    then the partial-fraction expansion H(z) is defined as::
+
+                 r[0]                   r[-1]
+         = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
+           (1-p[0]z**(-1))         (1-p[-1]z**(-1))
+
+    If there are any repeated roots (closer than `tol`), then the partial
+    fraction expansion has terms like::
+
+             r[i]              r[i+1]                    r[i+n-1]
+        -------------- + ------------------ + ... + ------------------
+        (1-p[i]z**(-1))  (1-p[i]z**(-1))**2         (1-p[i]z**(-1))**n
+
+    This function is used for polynomials in negative powers of z,
+    such as digital filters in DSP.  For positive powers, use `invres`.
+
+    Parameters
+    ----------
+    r : array_like
+        Residues corresponding to the poles. For repeated poles, the residues
+        must be ordered to correspond to ascending by power fractions.
+    p : array_like
+        Poles. Equal poles must be adjacent.
+    k : array_like
+        Coefficients of the direct polynomial term.
+    tol : float, optional
+        The tolerance for two roots to be considered equal in terms of
+        the distance between them. Default is 1e-3. See `unique_roots`
+        for further details.
+    rtype : {'avg', 'min', 'max'}, optional
+        Method for computing a root to represent a group of identical roots.
+        Default is 'avg'. See `unique_roots` for further details.
+
+    Returns
+    -------
+    b : ndarray
+        Numerator polynomial coefficients.
+    a : ndarray
+        Denominator polynomial coefficients.
+
+    See Also
+    --------
+    residuez, unique_roots, invres
+
+    """
+    r = np.atleast_1d(r)
+    p = np.atleast_1d(p)
+    k = np.trim_zeros(np.atleast_1d(k), 'b')
+
+    unique_poles, multiplicity = _group_poles(p, tol, rtype)
+    factors, denominator = _compute_factors(unique_poles, multiplicity,
+                                            include_powers=True)
+
+    if len(k) == 0:
+        numerator = 0
+    else:
+        numerator = np.polymul(k[::-1], denominator[::-1])
+
+    for residue, factor in zip(r, factors):
+        numerator = np.polyadd(numerator, residue * factor[::-1])
+
+    return numerator[::-1], denominator
+
+
+def resample(x, num, t=None, axis=0, window=None, domain='time'):
+    """
+    Resample `x` to `num` samples using Fourier method along the given axis.
+
+    The resampled signal starts at the same value as `x` but is sampled
+    with a spacing of ``len(x) / num * (spacing of x)``.  Because a
+    Fourier method is used, the signal is assumed to be periodic.
+
+    Parameters
+    ----------
+    x : array_like
+        The data to be resampled.
+    num : int
+        The number of samples in the resampled signal.
+    t : array_like, optional
+        If `t` is given, it is assumed to be the equally spaced sample
+        positions associated with the signal data in `x`.
+    axis : int, optional
+        The axis of `x` that is resampled.  Default is 0.
+    window : array_like, callable, string, float, or tuple, optional
+        Specifies the window applied to the signal in the Fourier
+        domain.  See below for details.
+    domain : string, optional
+        A string indicating the domain of the input `x`:
+        ``time`` Consider the input `x` as time-domain (Default),
+        ``freq`` Consider the input `x` as frequency-domain.
+
+    Returns
+    -------
+    resampled_x or (resampled_x, resampled_t)
+        Either the resampled array, or, if `t` was given, a tuple
+        containing the resampled array and the corresponding resampled
+        positions.
+
+    See Also
+    --------
+    decimate : Downsample the signal after applying an FIR or IIR filter.
+    resample_poly : Resample using polyphase filtering and an FIR filter.
+
+    Notes
+    -----
+    The argument `window` controls a Fourier-domain window that tapers
+    the Fourier spectrum before zero-padding to alleviate ringing in
+    the resampled values for sampled signals you didn't intend to be
+    interpreted as band-limited.
+
+    If `window` is a function, then it is called with a vector of inputs
+    indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
+
+    If `window` is an array of the same length as `x.shape[axis]` it is
+    assumed to be the window to be applied directly in the Fourier
+    domain (with dc and low-frequency first).
+
+    For any other type of `window`, the function `scipy.signal.get_window`
+    is called to generate the window.
+
+    The first sample of the returned vector is the same as the first
+    sample of the input vector.  The spacing between samples is changed
+    from ``dx`` to ``dx * len(x) / num``.
+
+    If `t` is not None, then it is used solely to calculate the resampled
+    positions `resampled_t`
+
+    As noted, `resample` uses FFT transformations, which can be very
+    slow if the number of input or output samples is large and prime;
+    see `scipy.fft.fft`.
+
+    Examples
+    --------
+    Note that the end of the resampled data rises to meet the first
+    sample of the next cycle:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+
+    >>> x = np.linspace(0, 10, 20, endpoint=False)
+    >>> y = np.cos(-x**2/6.0)
+    >>> f = signal.resample(y, 100)
+    >>> xnew = np.linspace(0, 10, 100, endpoint=False)
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
+    >>> plt.legend(['data', 'resampled'], loc='best')
+    >>> plt.show()
+    """
+
+    if domain not in ('time', 'freq'):
+        raise ValueError("Acceptable domain flags are 'time' or"
+                         " 'freq', not domain={}".format(domain))
+
+    x = np.asarray(x)
+    Nx = x.shape[axis]
+
+    # Check if we can use faster real FFT
+    real_input = np.isrealobj(x)
+
+    if domain == 'time':
+        # Forward transform
+        if real_input:
+            X = sp_fft.rfft(x, axis=axis)
+        else:  # Full complex FFT
+            X = sp_fft.fft(x, axis=axis)
+    else:  # domain == 'freq'
+        X = x
+
+    # Apply window to spectrum
+    if window is not None:
+        if callable(window):
+            W = window(sp_fft.fftfreq(Nx))
+        elif isinstance(window, np.ndarray):
+            if window.shape != (Nx,):
+                raise ValueError('window must have the same length as data')
+            W = window
+        else:
+            W = sp_fft.ifftshift(get_window(window, Nx))
+
+        newshape_W = [1] * x.ndim
+        newshape_W[axis] = X.shape[axis]
+        if real_input:
+            # Fold the window back on itself to mimic complex behavior
+            W_real = W.copy()
+            W_real[1:] += W_real[-1:0:-1]
+            W_real[1:] *= 0.5
+            X *= W_real[:newshape_W[axis]].reshape(newshape_W)
+        else:
+            X *= W.reshape(newshape_W)
+
+    # Copy each half of the original spectrum to the output spectrum, either
+    # truncating high frequences (downsampling) or zero-padding them
+    # (upsampling)
+
+    # Placeholder array for output spectrum
+    newshape = list(x.shape)
+    if real_input:
+        newshape[axis] = num // 2 + 1
+    else:
+        newshape[axis] = num
+    Y = np.zeros(newshape, X.dtype)
+
+    # Copy positive frequency components (and Nyquist, if present)
+    N = min(num, Nx)
+    nyq = N // 2 + 1  # Slice index that includes Nyquist if present
+    sl = [slice(None)] * x.ndim
+    sl[axis] = slice(0, nyq)
+    Y[tuple(sl)] = X[tuple(sl)]
+    if not real_input:
+        # Copy negative frequency components
+        if N > 2:  # (slice expression doesn't collapse to empty array)
+            sl[axis] = slice(nyq - N, None)
+            Y[tuple(sl)] = X[tuple(sl)]
+
+    # Split/join Nyquist component(s) if present
+    # So far we have set Y[+N/2]=X[+N/2]
+    if N % 2 == 0:
+        if num < Nx:  # downsampling
+            if real_input:
+                sl[axis] = slice(N//2, N//2 + 1)
+                Y[tuple(sl)] *= 2.
+            else:
+                # select the component of Y at frequency +N/2,
+                # add the component of X at -N/2
+                sl[axis] = slice(-N//2, -N//2 + 1)
+                Y[tuple(sl)] += X[tuple(sl)]
+        elif Nx < num:  # upsampling
+            # select the component at frequency +N/2 and halve it
+            sl[axis] = slice(N//2, N//2 + 1)
+            Y[tuple(sl)] *= 0.5
+            if not real_input:
+                temp = Y[tuple(sl)]
+                # set the component at -N/2 equal to the component at +N/2
+                sl[axis] = slice(num-N//2, num-N//2 + 1)
+                Y[tuple(sl)] = temp
+
+    # Inverse transform
+    if real_input:
+        y = sp_fft.irfft(Y, num, axis=axis)
+    else:
+        y = sp_fft.ifft(Y, axis=axis, overwrite_x=True)
+
+    y *= (float(num) / float(Nx))
+
+    if t is None:
+        return y
+    else:
+        new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
+        return y, new_t
+
+
+def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0),
+                  padtype='constant', cval=None):
+    """
+    Resample `x` along the given axis using polyphase filtering.
+
+    The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
+    FIR filter is applied, and then it is downsampled by the factor `down`.
+    The resulting sample rate is ``up / down`` times the original sample
+    rate. By default, values beyond the boundary of the signal are assumed
+    to be zero during the filtering step.
+
+    Parameters
+    ----------
+    x : array_like
+        The data to be resampled.
+    up : int
+        The upsampling factor.
+    down : int
+        The downsampling factor.
+    axis : int, optional
+        The axis of `x` that is resampled. Default is 0.
+    window : string, tuple, or array_like, optional
+        Desired window to use to design the low-pass filter, or the FIR filter
+        coefficients to employ. See below for details.
+    padtype : string, optional
+        `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of
+        the other signal extension modes supported by `scipy.signal.upfirdn`.
+        Changes assumptions on values beyond the boundary. If `constant`,
+        assumed to be `cval` (default zero). If `line` assumed to continue a
+        linear trend defined by the first and last points. `mean`, `median`,
+        `maximum` and `minimum` work as in `np.pad` and assume that the values
+        beyond the boundary are the mean, median, maximum or minimum
+        respectively of the array along the axis.
+
+        .. versionadded:: 1.4.0
+    cval : float, optional
+        Value to use if `padtype='constant'`. Default is zero.
+
+        .. versionadded:: 1.4.0
+
+    Returns
+    -------
+    resampled_x : array
+        The resampled array.
+
+    See Also
+    --------
+    decimate : Downsample the signal after applying an FIR or IIR filter.
+    resample : Resample up or down using the FFT method.
+
+    Notes
+    -----
+    This polyphase method will likely be faster than the Fourier method
+    in `scipy.signal.resample` when the number of samples is large and
+    prime, or when the number of samples is large and `up` and `down`
+    share a large greatest common denominator. The length of the FIR
+    filter used will depend on ``max(up, down) // gcd(up, down)``, and
+    the number of operations during polyphase filtering will depend on
+    the filter length and `down` (see `scipy.signal.upfirdn` for details).
+
+    The argument `window` specifies the FIR low-pass filter design.
+
+    If `window` is an array_like it is assumed to be the FIR filter
+    coefficients. Note that the FIR filter is applied after the upsampling
+    step, so it should be designed to operate on a signal at a sampling
+    frequency higher than the original by a factor of `up//gcd(up, down)`.
+    This function's output will be centered with respect to this array, so it
+    is best to pass a symmetric filter with an odd number of samples if, as
+    is usually the case, a zero-phase filter is desired.
+
+    For any other type of `window`, the functions `scipy.signal.get_window`
+    and `scipy.signal.firwin` are called to generate the appropriate filter
+    coefficients.
+
+    The first sample of the returned vector is the same as the first
+    sample of the input vector. The spacing between samples is changed
+    from ``dx`` to ``dx * down / float(up)``.
+
+    Examples
+    --------
+    By default, the end of the resampled data rises to meet the first
+    sample of the next cycle for the FFT method, and gets closer to zero
+    for the polyphase method:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> x = np.linspace(0, 10, 20, endpoint=False)
+    >>> y = np.cos(-x**2/6.0)
+    >>> f_fft = signal.resample(y, 100)
+    >>> f_poly = signal.resample_poly(y, 100, 20)
+    >>> xnew = np.linspace(0, 10, 100, endpoint=False)
+
+    >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
+    >>> plt.plot(x, y, 'ko-')
+    >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro')  # boundaries
+    >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
+    >>> plt.show()
+
+    This default behaviour can be changed by using the padtype option:
+
+    >>> N = 5
+    >>> x = np.linspace(0, 1, N, endpoint=False)
+    >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x)
+    >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x)
+    >>> Y = np.stack([y, y2], axis=-1)
+    >>> up = 4
+    >>> xr = np.linspace(0, 1, N*up, endpoint=False)
+
+    >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant')
+    >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean')
+    >>> y4 = signal.resample_poly(Y, up, 1, padtype='line')
+
+    >>> for i in [0,1]:
+    ...     plt.figure()
+    ...     plt.plot(xr, y4[:,i], 'g.', label='line')
+    ...     plt.plot(xr, y3[:,i], 'y.', label='mean')
+    ...     plt.plot(xr, y2[:,i], 'r.', label='constant')
+    ...     plt.plot(x, Y[:,i], 'k-')
+    ...     plt.legend()
+    >>> plt.show()
+
+    """
+    x = np.asarray(x)
+    if up != int(up):
+        raise ValueError("up must be an integer")
+    if down != int(down):
+        raise ValueError("down must be an integer")
+    up = int(up)
+    down = int(down)
+    if up < 1 or down < 1:
+        raise ValueError('up and down must be >= 1')
+    if cval is not None and padtype != 'constant':
+        raise ValueError('cval has no effect when padtype is ', padtype)
+
+    # Determine our up and down factors
+    # Use a rational approximation to save computation time on really long
+    # signals
+    g_ = math.gcd(up, down)
+    up //= g_
+    down //= g_
+    if up == down == 1:
+        return x.copy()
+    n_in = x.shape[axis]
+    n_out = n_in * up
+    n_out = n_out // down + bool(n_out % down)
+
+    if isinstance(window, (list, np.ndarray)):
+        window = np.array(window)  # use array to force a copy (we modify it)
+        if window.ndim > 1:
+            raise ValueError('window must be 1-D')
+        half_len = (window.size - 1) // 2
+        h = window
+    else:
+        # Design a linear-phase low-pass FIR filter
+        max_rate = max(up, down)
+        f_c = 1. / max_rate  # cutoff of FIR filter (rel. to Nyquist)
+        half_len = 10 * max_rate  # reasonable cutoff for sinc-like function
+        h = firwin(2 * half_len + 1, f_c,
+                   window=window).astype(x.dtype)  # match dtype of x
+    h *= up
+
+    # Zero-pad our filter to put the output samples at the center
+    n_pre_pad = (down - half_len % down)
+    n_post_pad = 0
+    n_pre_remove = (half_len + n_pre_pad) // down
+    # We should rarely need to do this given our filter lengths...
+    while _output_len(len(h) + n_pre_pad + n_post_pad, n_in,
+                      up, down) < n_out + n_pre_remove:
+        n_post_pad += 1
+    h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,
+                        np.zeros(n_post_pad, dtype=h.dtype)))
+    n_pre_remove_end = n_pre_remove + n_out
+
+    # Remove background depending on the padtype option
+    funcs = {'mean': np.mean, 'median': np.median,
+             'minimum': np.amin, 'maximum': np.amax}
+    upfirdn_kwargs = {'mode': 'constant', 'cval': 0}
+    if padtype in funcs:
+        background_values = funcs[padtype](x, axis=axis, keepdims=True)
+    elif padtype in _upfirdn_modes:
+        upfirdn_kwargs = {'mode': padtype}
+        if padtype == 'constant':
+            if cval is None:
+                cval = 0
+            upfirdn_kwargs['cval'] = cval
+    else:
+        raise ValueError(
+            'padtype must be one of: maximum, mean, median, minimum, ' +
+            ', '.join(_upfirdn_modes))
+
+    if padtype in funcs:
+        x = x - background_values
+
+    # filter then remove excess
+    y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs)
+    keep = [slice(None), ]*x.ndim
+    keep[axis] = slice(n_pre_remove, n_pre_remove_end)
+    y_keep = y[tuple(keep)]
+
+    # Add background back
+    if padtype in funcs:
+        y_keep += background_values
+
+    return y_keep
+
+
+def vectorstrength(events, period):
+    '''
+    Determine the vector strength of the events corresponding to the given
+    period.
+
+    The vector strength is a measure of phase synchrony, how well the
+    timing of the events is synchronized to a single period of a periodic
+    signal.
+
+    If multiple periods are used, calculate the vector strength of each.
+    This is called the "resonating vector strength".
+
+    Parameters
+    ----------
+    events : 1D array_like
+        An array of time points containing the timing of the events.
+    period : float or array_like
+        The period of the signal that the events should synchronize to.
+        The period is in the same units as `events`.  It can also be an array
+        of periods, in which case the outputs are arrays of the same length.
+
+    Returns
+    -------
+    strength : float or 1D array
+        The strength of the synchronization.  1.0 is perfect synchronization
+        and 0.0 is no synchronization.  If `period` is an array, this is also
+        an array with each element containing the vector strength at the
+        corresponding period.
+    phase : float or array
+        The phase that the events are most strongly synchronized to in radians.
+        If `period` is an array, this is also an array with each element
+        containing the phase for the corresponding period.
+
+    References
+    ----------
+    van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
+        strength: Auditory system, electric fish, and noise.
+        Chaos 21, 047508 (2011);
+        :doi:`10.1063/1.3670512`.
+    van Hemmen, JL.  Vector strength after Goldberg, Brown, and von Mises:
+        biological and mathematical perspectives.  Biol Cybern.
+        2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
+    van Hemmen, JL and Vollmayr, AN.  Resonating vector strength: what happens
+        when we vary the "probing" frequency while keeping the spike times
+        fixed.  Biol Cybern. 2013 Aug;107(4):491-94.
+        :doi:`10.1007/s00422-013-0560-8`.
+    '''
+    events = np.asarray(events)
+    period = np.asarray(period)
+    if events.ndim > 1:
+        raise ValueError('events cannot have dimensions more than 1')
+    if period.ndim > 1:
+        raise ValueError('period cannot have dimensions more than 1')
+
+    # we need to know later if period was originally a scalar
+    scalarperiod = not period.ndim
+
+    events = np.atleast_2d(events)
+    period = np.atleast_2d(period)
+    if (period <= 0).any():
+        raise ValueError('periods must be positive')
+
+    # this converts the times to vectors
+    vectors = np.exp(np.dot(2j*np.pi/period.T, events))
+
+    # the vector strength is just the magnitude of the mean of the vectors
+    # the vector phase is the angle of the mean of the vectors
+    vectormean = np.mean(vectors, axis=1)
+    strength = abs(vectormean)
+    phase = np.angle(vectormean)
+
+    # if the original period was a scalar, return scalars
+    if scalarperiod:
+        strength = strength[0]
+        phase = phase[0]
+    return strength, phase
+
+
+def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False):
+    """
+    Remove linear trend along axis from data.
+
+    Parameters
+    ----------
+    data : array_like
+        The input data.
+    axis : int, optional
+        The axis along which to detrend the data. By default this is the
+        last axis (-1).
+    type : {'linear', 'constant'}, optional
+        The type of detrending. If ``type == 'linear'`` (default),
+        the result of a linear least-squares fit to `data` is subtracted
+        from `data`.
+        If ``type == 'constant'``, only the mean of `data` is subtracted.
+    bp : array_like of ints, optional
+        A sequence of break points. If given, an individual linear fit is
+        performed for each part of `data` between two break points.
+        Break points are specified as indices into `data`. This parameter
+        only has an effect when ``type == 'linear'``.
+    overwrite_data : bool, optional
+        If True, perform in place detrending and avoid a copy. Default is False
+
+    Returns
+    -------
+    ret : ndarray
+        The detrended input data.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> rng = np.random.default_rng()
+    >>> npoints = 1000
+    >>> noise = rng.standard_normal(npoints)
+    >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
+    >>> (signal.detrend(x) - noise).max()
+    0.06  # random
+
+    """
+    if type not in ['linear', 'l', 'constant', 'c']:
+        raise ValueError("Trend type must be 'linear' or 'constant'.")
+    data = np.asarray(data)
+    dtype = data.dtype.char
+    if dtype not in 'dfDF':
+        dtype = 'd'
+    if type in ['constant', 'c']:
+        ret = data - np.mean(data, axis, keepdims=True)
+        return ret
+    else:
+        dshape = data.shape
+        N = dshape[axis]
+        bp = np.sort(np.unique(np.r_[0, bp, N]))
+        if np.any(bp > N):
+            raise ValueError("Breakpoints must be less than length "
+                             "of data along given axis.")
+        Nreg = len(bp) - 1
+        # Restructure data so that axis is along first dimension and
+        #  all other dimensions are collapsed into second dimension
+        rnk = len(dshape)
+        if axis < 0:
+            axis = axis + rnk
+        newdims = np.r_[axis, 0:axis, axis + 1:rnk]
+        newdata = np.reshape(np.transpose(data, tuple(newdims)),
+                             (N, _prod(dshape) // N))
+        if not overwrite_data:
+            newdata = newdata.copy()  # make sure we have a copy
+        if newdata.dtype.char not in 'dfDF':
+            newdata = newdata.astype(dtype)
+        # Find leastsq fit and remove it for each piece
+        for m in range(Nreg):
+            Npts = bp[m + 1] - bp[m]
+            A = np.ones((Npts, 2), dtype)
+            A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)
+            sl = slice(bp[m], bp[m + 1])
+            coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
+            newdata[sl] = newdata[sl] - np.dot(A, coef)
+        # Put data back in original shape.
+        tdshape = np.take(dshape, newdims, 0)
+        ret = np.reshape(newdata, tuple(tdshape))
+        vals = list(range(1, rnk))
+        olddims = vals[:axis] + [0] + vals[axis:]
+        ret = np.transpose(ret, tuple(olddims))
+        return ret
+
+
+def lfilter_zi(b, a):
+    """
+    Construct initial conditions for lfilter for step response steady-state.
+
+    Compute an initial state `zi` for the `lfilter` function that corresponds
+    to the steady state of the step response.
+
+    A typical use of this function is to set the initial state so that the
+    output of the filter starts at the same value as the first element of
+    the signal to be filtered.
+
+    Parameters
+    ----------
+    b, a : array_like (1-D)
+        The IIR filter coefficients. See `lfilter` for more
+        information.
+
+    Returns
+    -------
+    zi : 1-D ndarray
+        The initial state for the filter.
+
+    See Also
+    --------
+    lfilter, lfiltic, filtfilt
+
+    Notes
+    -----
+    A linear filter with order m has a state space representation (A, B, C, D),
+    for which the output y of the filter can be expressed as::
+
+        z(n+1) = A*z(n) + B*x(n)
+        y(n)   = C*z(n) + D*x(n)
+
+    where z(n) is a vector of length m, A has shape (m, m), B has shape
+    (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
+    a scalar).  lfilter_zi solves::
+
+        zi = A*zi + B
+
+    In other words, it finds the initial condition for which the response
+    to an input of all ones is a constant.
+
+    Given the filter coefficients `a` and `b`, the state space matrices
+    for the transposed direct form II implementation of the linear filter,
+    which is the implementation used by scipy.signal.lfilter, are::
+
+        A = scipy.linalg.companion(a).T
+        B = b[1:] - a[1:]*b[0]
+
+    assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
+    divided by a[0].
+
+    Examples
+    --------
+    The following code creates a lowpass Butterworth filter. Then it
+    applies that filter to an array whose values are all 1.0; the
+    output is also all 1.0, as expected for a lowpass filter.  If the
+    `zi` argument of `lfilter` had not been given, the output would have
+    shown the transient signal.
+
+    >>> from numpy import array, ones
+    >>> from scipy.signal import lfilter, lfilter_zi, butter
+    >>> b, a = butter(5, 0.25)
+    >>> zi = lfilter_zi(b, a)
+    >>> y, zo = lfilter(b, a, ones(10), zi=zi)
+    >>> y
+    array([1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.])
+
+    Another example:
+
+    >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
+    >>> y, zf = lfilter(b, a, x, zi=zi*x[0])
+    >>> y
+    array([ 0.5       ,  0.5       ,  0.5       ,  0.49836039,  0.48610528,
+        0.44399389,  0.35505241])
+
+    Note that the `zi` argument to `lfilter` was computed using
+    `lfilter_zi` and scaled by `x[0]`.  Then the output `y` has no
+    transient until the input drops from 0.5 to 0.0.
+
+    """
+
+    # FIXME: Can this function be replaced with an appropriate
+    # use of lfiltic?  For example, when b,a = butter(N,Wn),
+    #    lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
+    #
+
+    # We could use scipy.signal.normalize, but it uses warnings in
+    # cases where a ValueError is more appropriate, and it allows
+    # b to be 2D.
+    b = np.atleast_1d(b)
+    if b.ndim != 1:
+        raise ValueError("Numerator b must be 1-D.")
+    a = np.atleast_1d(a)
+    if a.ndim != 1:
+        raise ValueError("Denominator a must be 1-D.")
+
+    while len(a) > 1 and a[0] == 0.0:
+        a = a[1:]
+    if a.size < 1:
+        raise ValueError("There must be at least one nonzero `a` coefficient.")
+
+    if a[0] != 1.0:
+        # Normalize the coefficients so a[0] == 1.
+        b = b / a[0]
+        a = a / a[0]
+
+    n = max(len(a), len(b))
+
+    # Pad a or b with zeros so they are the same length.
+    if len(a) < n:
+        a = np.r_[a, np.zeros(n - len(a), dtype=a.dtype)]
+    elif len(b) < n:
+        b = np.r_[b, np.zeros(n - len(b), dtype=b.dtype)]
+
+    IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T
+    B = b[1:] - a[1:] * b[0]
+    # Solve zi = A*zi + B
+    zi = np.linalg.solve(IminusA, B)
+
+    # For future reference: we could also use the following
+    # explicit formulas to solve the linear system:
+    #
+    # zi = np.zeros(n - 1)
+    # zi[0] = B.sum() / IminusA[:,0].sum()
+    # asum = 1.0
+    # csum = 0.0
+    # for k in range(1,n-1):
+    #     asum += a[k]
+    #     csum += b[k] - a[k]*b[0]
+    #     zi[k] = asum*zi[0] - csum
+
+    return zi
+
+
+def sosfilt_zi(sos):
+    """
+    Construct initial conditions for sosfilt for step response steady-state.
+
+    Compute an initial state `zi` for the `sosfilt` function that corresponds
+    to the steady state of the step response.
+
+    A typical use of this function is to set the initial state so that the
+    output of the filter starts at the same value as the first element of
+    the signal to be filtered.
+
+    Parameters
+    ----------
+    sos : array_like
+        Array of second-order filter coefficients, must have shape
+        ``(n_sections, 6)``. See `sosfilt` for the SOS filter format
+        specification.
+
+    Returns
+    -------
+    zi : ndarray
+        Initial conditions suitable for use with ``sosfilt``, shape
+        ``(n_sections, 2)``.
+
+    See Also
+    --------
+    sosfilt, zpk2sos
+
+    Notes
+    -----
+    .. versionadded:: 0.16.0
+
+    Examples
+    --------
+    Filter a rectangular pulse that begins at time 0, with and without
+    the use of the `zi` argument of `scipy.signal.sosfilt`.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> sos = signal.butter(9, 0.125, output='sos')
+    >>> zi = signal.sosfilt_zi(sos)
+    >>> x = (np.arange(250) < 100).astype(int)
+    >>> f1 = signal.sosfilt(sos, x)
+    >>> f2, zo = signal.sosfilt(sos, x, zi=zi)
+
+    >>> plt.plot(x, 'k--', label='x')
+    >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
+    >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    sos = np.asarray(sos)
+    if sos.ndim != 2 or sos.shape[1] != 6:
+        raise ValueError('sos must be shape (n_sections, 6)')
+
+    if sos.dtype.kind in 'bui':
+        sos = sos.astype(np.float64)
+
+    n_sections = sos.shape[0]
+    zi = np.empty((n_sections, 2), dtype=sos.dtype)
+    scale = 1.0
+    for section in range(n_sections):
+        b = sos[section, :3]
+        a = sos[section, 3:]
+        zi[section] = scale * lfilter_zi(b, a)
+        # If H(z) = B(z)/A(z) is this section's transfer function, then
+        # b.sum()/a.sum() is H(1), the gain at omega=0.  That's the steady
+        # state value of this section's step response.
+        scale *= b.sum() / a.sum()
+
+    return zi
+
+
+def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
+    """Forward-backward IIR filter that uses Gustafsson's method.
+
+    Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
+    then backward, using Gustafsson's initial conditions [1]_.
+
+    Let ``y_fb`` be the result of filtering first forward and then backward,
+    and let ``y_bf`` be the result of filtering first backward then forward.
+    Gustafsson's method is to compute initial conditions for the forward
+    pass and the backward pass such that ``y_fb == y_bf``.
+
+    Parameters
+    ----------
+    b : scalar or 1-D ndarray
+        Numerator coefficients of the filter.
+    a : scalar or 1-D ndarray
+        Denominator coefficients of the filter.
+    x : ndarray
+        Data to be filtered.
+    axis : int, optional
+        Axis of `x` to be filtered.  Default is -1.
+    irlen : int or None, optional
+        The length of the nonnegligible part of the impulse response.
+        If `irlen` is None, or if the length of the signal is less than
+        ``2 * irlen``, then no part of the impulse response is ignored.
+
+    Returns
+    -------
+    y : ndarray
+        The filtered data.
+    x0 : ndarray
+        Initial condition for the forward filter.
+    x1 : ndarray
+        Initial condition for the backward filter.
+
+    Notes
+    -----
+    Typically the return values `x0` and `x1` are not needed by the
+    caller.  The intended use of these return values is in unit tests.
+
+    References
+    ----------
+    .. [1] F. Gustaffson. Determining the initial states in forward-backward
+           filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
+
+    """
+    # In the comments, "Gustafsson's paper" and [1] refer to the
+    # paper referenced in the docstring.
+
+    b = np.atleast_1d(b)
+    a = np.atleast_1d(a)
+
+    order = max(len(b), len(a)) - 1
+    if order == 0:
+        # The filter is just scalar multiplication, with no state.
+        scale = (b[0] / a[0])**2
+        y = scale * x
+        return y, np.array([]), np.array([])
+
+    if axis != -1 or axis != x.ndim - 1:
+        # Move the axis containing the data to the end.
+        x = np.swapaxes(x, axis, x.ndim - 1)
+
+    # n is the number of samples in the data to be filtered.
+    n = x.shape[-1]
+
+    if irlen is None or n <= 2*irlen:
+        m = n
+    else:
+        m = irlen
+
+    # Create Obs, the observability matrix (called O in the paper).
+    # This matrix can be interpreted as the operator that propagates
+    # an arbitrary initial state to the output, assuming the input is
+    # zero.
+    # In Gustafsson's paper, the forward and backward filters are not
+    # necessarily the same, so he has both O_f and O_b.  We use the same
+    # filter in both directions, so we only need O. The same comment
+    # applies to S below.
+    Obs = np.zeros((m, order))
+    zi = np.zeros(order)
+    zi[0] = 1
+    Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
+    for k in range(1, order):
+        Obs[k:, k] = Obs[:-k, 0]
+
+    # Obsr is O^R (Gustafsson's notation for row-reversed O)
+    Obsr = Obs[::-1]
+
+    # Create S.  S is the matrix that applies the filter to the reversed
+    # propagated initial conditions.  That is,
+    #     out = S.dot(zi)
+    # is the same as
+    #     tmp, _ = lfilter(b, a, zeros(), zi=zi)  # Propagate ICs.
+    #     out = lfilter(b, a, tmp[::-1])          # Reverse and filter.
+
+    # Equations (5) & (6) of [1]
+    S = lfilter(b, a, Obs[::-1], axis=0)
+
+    # Sr is S^R (row-reversed S)
+    Sr = S[::-1]
+
+    # M is [(S^R - O), (O^R - S)]
+    if m == n:
+        M = np.hstack((Sr - Obs, Obsr - S))
+    else:
+        # Matrix described in section IV of [1].
+        M = np.zeros((2*m, 2*order))
+        M[:m, :order] = Sr - Obs
+        M[m:, order:] = Obsr - S
+
+    # Naive forward-backward and backward-forward filters.
+    # These have large transients because the filters use zero initial
+    # conditions.
+    y_f = lfilter(b, a, x)
+    y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
+
+    y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
+    y_bf = lfilter(b, a, y_b)
+
+    delta_y_bf_fb = y_bf - y_fb
+    if m == n:
+        delta = delta_y_bf_fb
+    else:
+        start_m = delta_y_bf_fb[..., :m]
+        end_m = delta_y_bf_fb[..., -m:]
+        delta = np.concatenate((start_m, end_m), axis=-1)
+
+    # ic_opt holds the "optimal" initial conditions.
+    # The following code computes the result shown in the formula
+    # of the paper between equations (6) and (7).
+    if delta.ndim == 1:
+        ic_opt = linalg.lstsq(M, delta)[0]
+    else:
+        # Reshape delta so it can be used as an array of multiple
+        # right-hand-sides in linalg.lstsq.
+        delta2d = delta.reshape(-1, delta.shape[-1]).T
+        ic_opt0 = linalg.lstsq(M, delta2d)[0].T
+        ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
+
+    # Now compute the filtered signal using equation (7) of [1].
+    # First, form [S^R, O^R] and call it W.
+    if m == n:
+        W = np.hstack((Sr, Obsr))
+    else:
+        W = np.zeros((2*m, 2*order))
+        W[:m, :order] = Sr
+        W[m:, order:] = Obsr
+
+    # Equation (7) of [1] says
+    #     Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
+    # `wic` is (almost) the product on the right.
+    # W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
+    # so we can't use W.dot(ic_opt).  Instead, we dot ic_opt with W.T,
+    # so wic has shape (..., m).
+    wic = ic_opt.dot(W.T)
+
+    # `wic` is "almost" the product of W and the optimal ICs in equation
+    # (7)--if we're using a truncated impulse response (m < n), `wic`
+    # contains only the adjustments required for the ends of the signal.
+    # Here we form y_opt, taking this into account if necessary.
+    y_opt = y_fb
+    if m == n:
+        y_opt += wic
+    else:
+        y_opt[..., :m] += wic[..., :m]
+        y_opt[..., -m:] += wic[..., -m:]
+
+    x0 = ic_opt[..., :order]
+    x1 = ic_opt[..., -order:]
+    if axis != -1 or axis != x.ndim - 1:
+        # Restore the data axis to its original position.
+        x0 = np.swapaxes(x0, axis, x.ndim - 1)
+        x1 = np.swapaxes(x1, axis, x.ndim - 1)
+        y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
+
+    return y_opt, x0, x1
+
+
+def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
+             irlen=None):
+    """
+    Apply a digital filter forward and backward to a signal.
+
+    This function applies a linear digital filter twice, once forward and
+    once backwards.  The combined filter has zero phase and a filter order
+    twice that of the original.
+
+    The function provides options for handling the edges of the signal.
+
+    The function `sosfiltfilt` (and filter design using ``output='sos'``)
+    should be preferred over `filtfilt` for most filtering tasks, as
+    second-order sections have fewer numerical problems.
+
+    Parameters
+    ----------
+    b : (N,) array_like
+        The numerator coefficient vector of the filter.
+    a : (N,) array_like
+        The denominator coefficient vector of the filter.  If ``a[0]``
+        is not 1, then both `a` and `b` are normalized by ``a[0]``.
+    x : array_like
+        The array of data to be filtered.
+    axis : int, optional
+        The axis of `x` to which the filter is applied.
+        Default is -1.
+    padtype : str or None, optional
+        Must be 'odd', 'even', 'constant', or None.  This determines the
+        type of extension to use for the padded signal to which the filter
+        is applied.  If `padtype` is None, no padding is used.  The default
+        is 'odd'.
+    padlen : int or None, optional
+        The number of elements by which to extend `x` at both ends of
+        `axis` before applying the filter.  This value must be less than
+        ``x.shape[axis] - 1``.  ``padlen=0`` implies no padding.
+        The default value is ``3 * max(len(a), len(b))``.
+    method : str, optional
+        Determines the method for handling the edges of the signal, either
+        "pad" or "gust".  When `method` is "pad", the signal is padded; the
+        type of padding is determined by `padtype` and `padlen`, and `irlen`
+        is ignored.  When `method` is "gust", Gustafsson's method is used,
+        and `padtype` and `padlen` are ignored.
+    irlen : int or None, optional
+        When `method` is "gust", `irlen` specifies the length of the
+        impulse response of the filter.  If `irlen` is None, no part
+        of the impulse response is ignored.  For a long signal, specifying
+        `irlen` can significantly improve the performance of the filter.
+
+    Returns
+    -------
+    y : ndarray
+        The filtered output with the same shape as `x`.
+
+    See Also
+    --------
+    sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
+
+    Notes
+    -----
+    When `method` is "pad", the function pads the data along the given axis
+    in one of three ways: odd, even or constant.  The odd and even extensions
+    have the corresponding symmetry about the end point of the data.  The
+    constant extension extends the data with the values at the end points. On
+    both the forward and backward passes, the initial condition of the
+    filter is found by using `lfilter_zi` and scaling it by the end point of
+    the extended data.
+
+    When `method` is "gust", Gustafsson's method [1]_ is used.  Initial
+    conditions are chosen for the forward and backward passes so that the
+    forward-backward filter gives the same result as the backward-forward
+    filter.
+
+    The option to use Gustaffson's method was added in scipy version 0.16.0.
+
+    References
+    ----------
+    .. [1] F. Gustaffson, "Determining the initial states in forward-backward
+           filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
+           1996.
+
+    Examples
+    --------
+    The examples will use several functions from `scipy.signal`.
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    First we create a one second signal that is the sum of two pure sine
+    waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
+
+    >>> t = np.linspace(0, 1.0, 2001)
+    >>> xlow = np.sin(2 * np.pi * 5 * t)
+    >>> xhigh = np.sin(2 * np.pi * 250 * t)
+    >>> x = xlow + xhigh
+
+    Now create a lowpass Butterworth filter with a cutoff of 0.125 times
+    the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
+    The result should be approximately ``xlow``, with no phase shift.
+
+    >>> b, a = signal.butter(8, 0.125)
+    >>> y = signal.filtfilt(b, a, x, padlen=150)
+    >>> np.abs(y - xlow).max()
+    9.1086182074789912e-06
+
+    We get a fairly clean result for this artificial example because
+    the odd extension is exact, and with the moderately long padding,
+    the filter's transients have dissipated by the time the actual data
+    is reached.  In general, transient effects at the edges are
+    unavoidable.
+
+    The following example demonstrates the option ``method="gust"``.
+
+    First, create a filter.
+
+    >>> b, a = signal.ellip(4, 0.01, 120, 0.125)  # Filter to be applied.
+
+    `sig` is a random input signal to be filtered.
+
+    >>> rng = np.random.default_rng()
+    >>> n = 60
+    >>> sig = rng.standard_normal(n)**3 + 3*rng.standard_normal(n).cumsum()
+
+    Apply `filtfilt` to `sig`, once using the Gustafsson method, and
+    once using padding, and plot the results for comparison.
+
+    >>> fgust = signal.filtfilt(b, a, sig, method="gust")
+    >>> fpad = signal.filtfilt(b, a, sig, padlen=50)
+    >>> plt.plot(sig, 'k-', label='input')
+    >>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
+    >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    The `irlen` argument can be used to improve the performance
+    of Gustafsson's method.
+
+    Estimate the impulse response length of the filter.
+
+    >>> z, p, k = signal.tf2zpk(b, a)
+    >>> eps = 1e-9
+    >>> r = np.max(np.abs(p))
+    >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
+    >>> approx_impulse_len
+    137
+
+    Apply the filter to a longer signal, with and without the `irlen`
+    argument.  The difference between `y1` and `y2` is small.  For long
+    signals, using `irlen` gives a significant performance improvement.
+
+    >>> x = rng.standard_normal(5000)
+    >>> y1 = signal.filtfilt(b, a, x, method='gust')
+    >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
+    >>> print(np.max(np.abs(y1 - y2)))
+    1.80056858312e-10
+
+    """
+    b = np.atleast_1d(b)
+    a = np.atleast_1d(a)
+    x = np.asarray(x)
+
+    if method not in ["pad", "gust"]:
+        raise ValueError("method must be 'pad' or 'gust'.")
+
+    if method == "gust":
+        y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
+        return y
+
+    # method == "pad"
+    edge, ext = _validate_pad(padtype, padlen, x, axis,
+                              ntaps=max(len(a), len(b)))
+
+    # Get the steady state of the filter's step response.
+    zi = lfilter_zi(b, a)
+
+    # Reshape zi and create x0 so that zi*x0 broadcasts
+    # to the correct value for the 'zi' keyword argument
+    # to lfilter.
+    zi_shape = [1] * x.ndim
+    zi_shape[axis] = zi.size
+    zi = np.reshape(zi, zi_shape)
+    x0 = axis_slice(ext, stop=1, axis=axis)
+
+    # Forward filter.
+    (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
+
+    # Backward filter.
+    # Create y0 so zi*y0 broadcasts appropriately.
+    y0 = axis_slice(y, start=-1, axis=axis)
+    (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
+
+    # Reverse y.
+    y = axis_reverse(y, axis=axis)
+
+    if edge > 0:
+        # Slice the actual signal from the extended signal.
+        y = axis_slice(y, start=edge, stop=-edge, axis=axis)
+
+    return y
+
+
+def _validate_pad(padtype, padlen, x, axis, ntaps):
+    """Helper to validate padding for filtfilt"""
+    if padtype not in ['even', 'odd', 'constant', None]:
+        raise ValueError(("Unknown value '%s' given to padtype.  padtype "
+                          "must be 'even', 'odd', 'constant', or None.") %
+                         padtype)
+
+    if padtype is None:
+        padlen = 0
+
+    if padlen is None:
+        # Original padding; preserved for backwards compatibility.
+        edge = ntaps * 3
+    else:
+        edge = padlen
+
+    # x's 'axis' dimension must be bigger than edge.
+    if x.shape[axis] <= edge:
+        raise ValueError("The length of the input vector x must be greater "
+                         "than padlen, which is %d." % edge)
+
+    if padtype is not None and edge > 0:
+        # Make an extension of length `edge` at each
+        # end of the input array.
+        if padtype == 'even':
+            ext = even_ext(x, edge, axis=axis)
+        elif padtype == 'odd':
+            ext = odd_ext(x, edge, axis=axis)
+        else:
+            ext = const_ext(x, edge, axis=axis)
+    else:
+        ext = x
+    return edge, ext
+
+
+def _validate_x(x):
+    x = np.asarray(x)
+    if x.ndim == 0:
+        raise ValueError('x must be at least 1-D')
+    return x
+
+
+def sosfilt(sos, x, axis=-1, zi=None):
+    """
+    Filter data along one dimension using cascaded second-order sections.
+
+    Filter a data sequence, `x`, using a digital IIR filter defined by
+    `sos`.
+
+    Parameters
+    ----------
+    sos : array_like
+        Array of second-order filter coefficients, must have shape
+        ``(n_sections, 6)``. Each row corresponds to a second-order
+        section, with the first three columns providing the numerator
+        coefficients and the last three providing the denominator
+        coefficients.
+    x : array_like
+        An N-dimensional input array.
+    axis : int, optional
+        The axis of the input data array along which to apply the
+        linear filter. The filter is applied to each subarray along
+        this axis.  Default is -1.
+    zi : array_like, optional
+        Initial conditions for the cascaded filter delays.  It is a (at
+        least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
+        ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
+        replaced by 2.  If `zi` is None or is not given then initial rest
+        (i.e. all zeros) is assumed.
+        Note that these initial conditions are *not* the same as the initial
+        conditions given by `lfiltic` or `lfilter_zi`.
+
+    Returns
+    -------
+    y : ndarray
+        The output of the digital filter.
+    zf : ndarray, optional
+        If `zi` is None, this is not returned, otherwise, `zf` holds the
+        final filter delay values.
+
+    See Also
+    --------
+    zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
+
+    Notes
+    -----
+    The filter function is implemented as a series of second-order filters
+    with direct-form II transposed structure. It is designed to minimize
+    numerical precision errors for high-order filters.
+
+    .. versionadded:: 0.16.0
+
+    Examples
+    --------
+    Plot a 13th-order filter's impulse response using both `lfilter` and
+    `sosfilt`, showing the instability that results from trying to do a
+    13th-order filter in a single stage (the numerical error pushes some poles
+    outside of the unit circle):
+
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy import signal
+    >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
+    >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
+    >>> x = signal.unit_impulse(700)
+    >>> y_tf = signal.lfilter(b, a, x)
+    >>> y_sos = signal.sosfilt(sos, x)
+    >>> plt.plot(y_tf, 'r', label='TF')
+    >>> plt.plot(y_sos, 'k', label='SOS')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    x = _validate_x(x)
+    sos, n_sections = _validate_sos(sos)
+    x_zi_shape = list(x.shape)
+    x_zi_shape[axis] = 2
+    x_zi_shape = tuple([n_sections] + x_zi_shape)
+    inputs = [sos, x]
+    if zi is not None:
+        inputs.append(np.asarray(zi))
+    dtype = np.result_type(*inputs)
+    if dtype.char not in 'fdgFDGO':
+        raise NotImplementedError("input type '%s' not supported" % dtype)
+    if zi is not None:
+        zi = np.array(zi, dtype)  # make a copy so that we can operate in place
+        if zi.shape != x_zi_shape:
+            raise ValueError('Invalid zi shape. With axis=%r, an input with '
+                             'shape %r, and an sos array with %d sections, zi '
+                             'must have shape %r, got %r.' %
+                             (axis, x.shape, n_sections, x_zi_shape, zi.shape))
+        return_zi = True
+    else:
+        zi = np.zeros(x_zi_shape, dtype=dtype)
+        return_zi = False
+    axis = axis % x.ndim  # make positive
+    x = np.moveaxis(x, axis, -1)
+    zi = np.moveaxis(zi, [0, axis + 1], [-2, -1])
+    x_shape, zi_shape = x.shape, zi.shape
+    x = np.reshape(x, (-1, x.shape[-1]))
+    x = np.array(x, dtype, order='C')  # make a copy, can modify in place
+    zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2)))
+    sos = sos.astype(dtype, copy=False)
+    _sosfilt(sos, x, zi)
+    x.shape = x_shape
+    x = np.moveaxis(x, -1, axis)
+    if return_zi:
+        zi.shape = zi_shape
+        zi = np.moveaxis(zi, [-2, -1], [0, axis + 1])
+        out = (x, zi)
+    else:
+        out = x
+    return out
+
+
+def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
+    """
+    A forward-backward digital filter using cascaded second-order sections.
+
+    See `filtfilt` for more complete information about this method.
+
+    Parameters
+    ----------
+    sos : array_like
+        Array of second-order filter coefficients, must have shape
+        ``(n_sections, 6)``. Each row corresponds to a second-order
+        section, with the first three columns providing the numerator
+        coefficients and the last three providing the denominator
+        coefficients.
+    x : array_like
+        The array of data to be filtered.
+    axis : int, optional
+        The axis of `x` to which the filter is applied.
+        Default is -1.
+    padtype : str or None, optional
+        Must be 'odd', 'even', 'constant', or None.  This determines the
+        type of extension to use for the padded signal to which the filter
+        is applied.  If `padtype` is None, no padding is used.  The default
+        is 'odd'.
+    padlen : int or None, optional
+        The number of elements by which to extend `x` at both ends of
+        `axis` before applying the filter.  This value must be less than
+        ``x.shape[axis] - 1``.  ``padlen=0`` implies no padding.
+        The default value is::
+
+            3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
+                                        (sos[:, 5] == 0).sum()))
+
+        The extra subtraction at the end attempts to compensate for poles
+        and zeros at the origin (e.g. for odd-order filters) to yield
+        equivalent estimates of `padlen` to those of `filtfilt` for
+        second-order section filters built with `scipy.signal` functions.
+
+    Returns
+    -------
+    y : ndarray
+        The filtered output with the same shape as `x`.
+
+    See Also
+    --------
+    filtfilt, sosfilt, sosfilt_zi, sosfreqz
+
+    Notes
+    -----
+    .. versionadded:: 0.18.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.signal import sosfiltfilt, butter
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Create an interesting signal to filter.
+
+    >>> n = 201
+    >>> t = np.linspace(0, 1, n)
+    >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*rng.standard_normal(n)
+
+    Create a lowpass Butterworth filter, and use it to filter `x`.
+
+    >>> sos = butter(4, 0.125, output='sos')
+    >>> y = sosfiltfilt(sos, x)
+
+    For comparison, apply an 8th order filter using `sosfilt`.  The filter
+    is initialized using the mean of the first four values of `x`.
+
+    >>> from scipy.signal import sosfilt, sosfilt_zi
+    >>> sos8 = butter(8, 0.125, output='sos')
+    >>> zi = x[:4].mean() * sosfilt_zi(sos8)
+    >>> y2, zo = sosfilt(sos8, x, zi=zi)
+
+    Plot the results.  Note that the phase of `y` matches the input, while
+    `y2` has a significant phase delay.
+
+    >>> plt.plot(t, x, alpha=0.5, label='x(t)')
+    >>> plt.plot(t, y, label='y(t)')
+    >>> plt.plot(t, y2, label='y2(t)')
+    >>> plt.legend(framealpha=1, shadow=True)
+    >>> plt.grid(alpha=0.25)
+    >>> plt.xlabel('t')
+    >>> plt.show()
+
+    """
+    sos, n_sections = _validate_sos(sos)
+    x = _validate_x(x)
+
+    # `method` is "pad"...
+    ntaps = 2 * n_sections + 1
+    ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
+    edge, ext = _validate_pad(padtype, padlen, x, axis,
+                              ntaps=ntaps)
+
+    # These steps follow the same form as filtfilt with modifications
+    zi = sosfilt_zi(sos)  # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
+    zi_shape = [1] * x.ndim
+    zi_shape[axis] = 2
+    zi.shape = [n_sections] + zi_shape
+    x_0 = axis_slice(ext, stop=1, axis=axis)
+    (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
+    y_0 = axis_slice(y, start=-1, axis=axis)
+    (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
+    y = axis_reverse(y, axis=axis)
+    if edge > 0:
+        y = axis_slice(y, start=edge, stop=-edge, axis=axis)
+    return y
+
+
+def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
+    """
+    Downsample the signal after applying an anti-aliasing filter.
+
+    By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
+    filter with Hamming window is used if `ftype` is 'fir'.
+
+    Parameters
+    ----------
+    x : array_like
+        The signal to be downsampled, as an N-dimensional array.
+    q : int
+        The downsampling factor. When using IIR downsampling, it is recommended
+        to call `decimate` multiple times for downsampling factors higher than
+        13.
+    n : int, optional
+        The order of the filter (1 less than the length for 'fir'). Defaults to
+        8 for 'iir' and 20 times the downsampling factor for 'fir'.
+    ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
+        If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
+        of an `dlti` object, uses that object to filter before downsampling.
+    axis : int, optional
+        The axis along which to decimate.
+    zero_phase : bool, optional
+        Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
+        when using an IIR filter, and shifting the outputs back by the filter's
+        group delay when using an FIR filter. The default value of ``True`` is
+        recommended, since a phase shift is generally not desired.
+
+        .. versionadded:: 0.18.0
+
+    Returns
+    -------
+    y : ndarray
+        The down-sampled signal.
+
+    See Also
+    --------
+    resample : Resample up or down using the FFT method.
+    resample_poly : Resample using polyphase filtering and an FIR filter.
+
+    Notes
+    -----
+    The ``zero_phase`` keyword was added in 0.18.0.
+    The possibility to use instances of ``dlti`` as ``ftype`` was added in
+    0.18.0.
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    Define wave parameters.
+
+    >>> wave_duration = 3
+    >>> sample_rate = 100
+    >>> freq = 2
+    >>> q = 5
+
+    Calculate number of samples.
+
+    >>> samples = wave_duration*sample_rate
+    >>> samples_decimated = int(samples/q)
+
+    Create cosine wave.
+
+    >>> x = np.linspace(0, wave_duration, samples, endpoint=False)
+    >>> y = np.cos(x*np.pi*freq*2)
+
+    Decimate cosine wave.
+
+    >>> ydem = signal.decimate(y, q)
+    >>> xnew = np.linspace(0, wave_duration, samples_decimated, endpoint=False)
+
+    Plot original and decimated waves.
+
+    >>> plt.plot(x, y, '.-', xnew, ydem, 'o-')
+    >>> plt.xlabel('Time, Seconds')
+    >>> plt.legend(['data', 'decimated'], loc='best')
+    >>> plt.show()
+
+    """
+
+    x = np.asarray(x)
+    q = operator.index(q)
+
+    if n is not None:
+        n = operator.index(n)
+
+    result_type = x.dtype
+    if not np.issubdtype(result_type, np.inexact) \
+       or result_type.type == np.float16:
+        # upcast integers and float16 to float64
+        result_type = np.float64
+
+    if ftype == 'fir':
+        if n is None:
+            half_len = 10 * q  # reasonable cutoff for our sinc-like function
+            n = 2 * half_len
+        b, a = firwin(n+1, 1. / q, window='hamming'), 1.
+        b = np.asarray(b, dtype=result_type)
+        a = np.asarray(a, dtype=result_type)
+    elif ftype == 'iir':
+        if n is None:
+            n = 8
+        sos = cheby1(n, 0.05, 0.8 / q, output='sos')
+        sos = np.asarray(sos, dtype=result_type)
+    elif isinstance(ftype, dlti):
+        system = ftype._as_zpk()
+        sos = zpk2sos(system.zeros, system.poles, system.gain)
+        sos = np.asarray(sos, dtype=result_type)
+    else:
+        raise ValueError('invalid ftype')
+
+    sl = [slice(None)] * x.ndim
+
+    if ftype == 'fir':
+        b = b / a
+        if zero_phase:
+            y = resample_poly(x, 1, q, axis=axis, window=b)
+        else:
+            # upfirdn is generally faster than lfilter by a factor equal to the
+            # downsampling factor, since it only calculates the needed outputs
+            n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
+            y = upfirdn(b, x, up=1, down=q, axis=axis)
+            sl[axis] = slice(None, n_out, None)
+
+    else:  # IIR case
+        if zero_phase:
+            y = sosfiltfilt(sos, x, axis=axis)
+        else:
+            y = sosfilt(sos, x, axis=axis)
+        sl[axis] = slice(None, None, q)
+
+    return y[tuple(sl)]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_spectral.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_spectral.py
new file mode 100644
index 00000000..5fc37c5d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_spectral.py
@@ -0,0 +1,83 @@
+# Author: Pim Schellart
+# 2010 - 2011
+
+"""Tools for spectral analysis of unequally sampled signals."""
+
+import numpy as np
+
+#pythran export _lombscargle(float64[], float64[], float64[])
+def _lombscargle(x, y, freqs):
+    """
+    _lombscargle(x, y, freqs)
+
+    Computes the Lomb-Scargle periodogram.
+
+    Parameters
+    ----------
+    x : array_like
+        Sample times.
+    y : array_like
+        Measurement values (must be registered so the mean is zero).
+    freqs : array_like
+        Angular frequencies for output periodogram.
+
+    Returns
+    -------
+    pgram : array_like
+        Lomb-Scargle periodogram.
+
+    Raises
+    ------
+    ValueError
+        If the input arrays `x` and `y` do not have the same shape.
+
+    See also
+    --------
+    lombscargle
+
+    """
+
+    # Check input sizes
+    if x.shape != y.shape:
+        raise ValueError("Input arrays do not have the same size.")
+
+    # Create empty array for output periodogram
+    pgram = np.empty_like(freqs)
+
+    c = np.empty_like(x)
+    s = np.empty_like(x)
+
+    for i in range(freqs.shape[0]):
+
+        xc = 0.
+        xs = 0.
+        cc = 0.
+        ss = 0.
+        cs = 0.
+
+        c[:] = np.cos(freqs[i] * x)
+        s[:] = np.sin(freqs[i] * x)
+
+        for j in range(x.shape[0]):
+            xc += y[j] * c[j]
+            xs += y[j] * s[j]
+            cc += c[j] * c[j]
+            ss += s[j] * s[j]
+            cs += c[j] * s[j]
+
+        if freqs[i] == 0:
+            raise ZeroDivisionError()
+
+        tau = np.arctan2(2 * cs, cc - ss) / (2 * freqs[i])
+        c_tau = np.cos(freqs[i] * tau)
+        s_tau = np.sin(freqs[i] * tau)
+        c_tau2 = c_tau * c_tau
+        s_tau2 = s_tau * s_tau
+        cs_tau = 2 * c_tau * s_tau
+
+        pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 /
+            (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) +
+            ((c_tau * xs - s_tau * xc)**2 /
+             (c_tau2 * ss - cs_tau * cs + s_tau2 * cc)))
+
+    return pgram
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_spectral_py.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_spectral_py.py
new file mode 100644
index 00000000..29a14da5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_spectral_py.py
@@ -0,0 +1,2059 @@
+"""Tools for spectral analysis.
+"""
+
+import numpy as np
+from scipy import fft as sp_fft
+from . import _signaltools
+from .windows import get_window
+from ._spectral import _lombscargle
+from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
+import warnings
+
+
+__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
+           'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA']
+
+
+def lombscargle(x,
+                y,
+                freqs,
+                precenter=False,
+                normalize=False):
+    """
+    lombscargle(x, y, freqs)
+
+    Computes the Lomb-Scargle periodogram.
+
+    The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
+    extended by Scargle [2]_ to find, and test the significance of weak
+    periodic signals with uneven temporal sampling.
+
+    When *normalize* is False (default) the computed periodogram
+    is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
+    signal with amplitude A for sufficiently large N.
+
+    When *normalize* is True the computed periodogram is normalized by
+    the residuals of the data around a constant reference model (at zero).
+
+    Input arrays should be 1-D and will be cast to float64.
+
+    Parameters
+    ----------
+    x : array_like
+        Sample times.
+    y : array_like
+        Measurement values.
+    freqs : array_like
+        Angular frequencies for output periodogram.
+    precenter : bool, optional
+        Pre-center measurement values by subtracting the mean.
+    normalize : bool, optional
+        Compute normalized periodogram.
+
+    Returns
+    -------
+    pgram : array_like
+        Lomb-Scargle periodogram.
+
+    Raises
+    ------
+    ValueError
+        If the input arrays `x` and `y` do not have the same shape.
+
+    See Also
+    --------
+    istft: Inverse Short Time Fourier Transform
+    check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
+    welch: Power spectral density by Welch's method
+    spectrogram: Spectrogram by Welch's method
+    csd: Cross spectral density by Welch's method
+
+    Notes
+    -----
+    This subroutine calculates the periodogram using a slightly
+    modified algorithm due to Townsend [3]_ which allows the
+    periodogram to be calculated using only a single pass through
+    the input arrays for each frequency.
+
+    The algorithm running time scales roughly as O(x * freqs) or O(N^2)
+    for a large number of samples and frequencies.
+
+    References
+    ----------
+    .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
+           data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
+
+    .. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
+           Statistical aspects of spectral analysis of unevenly spaced data",
+           The Astrophysical Journal, vol 263, pp. 835-853, 1982
+
+    .. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
+           periodogram using graphics processing units.", The Astrophysical
+           Journal Supplement Series, vol 191, pp. 247-253, 2010
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    First define some input parameters for the signal:
+
+    >>> A = 2.
+    >>> w0 = 1.  # rad/sec
+    >>> nin = 150
+    >>> nout = 100000
+
+    Randomly generate sample times:
+
+    >>> x = rng.uniform(0, 10*np.pi, nin)
+
+    Plot a sine wave for the selected times:
+
+    >>> y = A * np.cos(w0*x)
+
+    Define the array of frequencies for which to compute the periodogram:
+
+    >>> w = np.linspace(0.01, 10, nout)
+
+    Calculate Lomb-Scargle periodogram:
+
+    >>> import scipy.signal as signal
+    >>> pgram = signal.lombscargle(x, y, w, normalize=True)
+
+    Now make a plot of the input data:
+
+    >>> fig, (ax_t, ax_w) = plt.subplots(2, 1, constrained_layout=True)
+    >>> ax_t.plot(x, y, 'b+')
+    >>> ax_t.set_xlabel('Time [s]')
+
+    Then plot the normalized periodogram:
+
+    >>> ax_w.plot(w, pgram)
+    >>> ax_w.set_xlabel('Angular frequency [rad/s]')
+    >>> ax_w.set_ylabel('Normalized amplitude')
+    >>> plt.show()
+
+    """
+    x = np.ascontiguousarray(x, dtype=np.float64)
+    y = np.ascontiguousarray(y, dtype=np.float64)
+    freqs = np.ascontiguousarray(freqs, dtype=np.float64)
+
+    assert x.ndim == 1
+    assert y.ndim == 1
+    assert freqs.ndim == 1
+
+    if precenter:
+        pgram = _lombscargle(x, y - y.mean(), freqs)
+    else:
+        pgram = _lombscargle(x, y, freqs)
+
+    if normalize:
+        pgram *= 2 / np.dot(y, y)
+
+    return pgram
+
+
+def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
+                return_onesided=True, scaling='density', axis=-1):
+    """
+    Estimate power spectral density using a periodogram.
+
+    Parameters
+    ----------
+    x : array_like
+        Time series of measurement values
+    fs : float, optional
+        Sampling frequency of the `x` time series. Defaults to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be equal to the length
+        of the axis over which the periodogram is computed. Defaults
+        to 'boxcar'.
+    nfft : int, optional
+        Length of the FFT used. If `None` the length of `x` will be
+        used.
+    detrend : str or function or `False`, optional
+        Specifies how to detrend each segment. If `detrend` is a
+        string, it is passed as the `type` argument to the `detrend`
+        function. If it is a function, it takes a segment and returns a
+        detrended segment. If `detrend` is `False`, no detrending is
+        done. Defaults to 'constant'.
+    return_onesided : bool, optional
+        If `True`, return a one-sided spectrum for real data. If
+        `False` return a two-sided spectrum. Defaults to `True`, but for
+        complex data, a two-sided spectrum is always returned.
+    scaling : { 'density', 'spectrum' }, optional
+        Selects between computing the power spectral density ('density')
+        where `Pxx` has units of V**2/Hz and computing the power
+        spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
+        is measured in V and `fs` is measured in Hz. Defaults to
+        'density'
+    axis : int, optional
+        Axis along which the periodogram is computed; the default is
+        over the last axis (i.e. ``axis=-1``).
+
+    Returns
+    -------
+    f : ndarray
+        Array of sample frequencies.
+    Pxx : ndarray
+        Power spectral density or power spectrum of `x`.
+
+    See Also
+    --------
+    welch: Estimate power spectral density using Welch's method
+    lombscargle: Lomb-Scargle periodogram for unevenly sampled data
+
+    Notes
+    -----
+    .. versionadded:: 0.12.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
+    0.001 V**2/Hz of white noise sampled at 10 kHz.
+
+    >>> fs = 10e3
+    >>> N = 1e5
+    >>> amp = 2*np.sqrt(2)
+    >>> freq = 1234.0
+    >>> noise_power = 0.001 * fs / 2
+    >>> time = np.arange(N) / fs
+    >>> x = amp*np.sin(2*np.pi*freq*time)
+    >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape)
+
+    Compute and plot the power spectral density.
+
+    >>> f, Pxx_den = signal.periodogram(x, fs)
+    >>> plt.semilogy(f, Pxx_den)
+    >>> plt.ylim([1e-7, 1e2])
+    >>> plt.xlabel('frequency [Hz]')
+    >>> plt.ylabel('PSD [V**2/Hz]')
+    >>> plt.show()
+
+    If we average the last half of the spectral density, to exclude the
+    peak, we can recover the noise power on the signal.
+
+    >>> np.mean(Pxx_den[25000:])
+    0.000985320699252543
+
+    Now compute and plot the power spectrum.
+
+    >>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
+    >>> plt.figure()
+    >>> plt.semilogy(f, np.sqrt(Pxx_spec))
+    >>> plt.ylim([1e-4, 1e1])
+    >>> plt.xlabel('frequency [Hz]')
+    >>> plt.ylabel('Linear spectrum [V RMS]')
+    >>> plt.show()
+
+    The peak height in the power spectrum is an estimate of the RMS
+    amplitude.
+
+    >>> np.sqrt(Pxx_spec.max())
+    2.0077340678640727
+
+    """
+    x = np.asarray(x)
+
+    if x.size == 0:
+        return np.empty(x.shape), np.empty(x.shape)
+
+    if window is None:
+        window = 'boxcar'
+
+    if nfft is None:
+        nperseg = x.shape[axis]
+    elif nfft == x.shape[axis]:
+        nperseg = nfft
+    elif nfft > x.shape[axis]:
+        nperseg = x.shape[axis]
+    elif nfft < x.shape[axis]:
+        s = [np.s_[:]]*len(x.shape)
+        s[axis] = np.s_[:nfft]
+        x = x[tuple(s)]
+        nperseg = nfft
+        nfft = None
+
+    if hasattr(window, 'size'):
+        if window.size != nperseg:
+            raise ValueError('the size of the window must be the same size '
+                             'of the input on the specified axis')
+
+    return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0,
+                 nfft=nfft, detrend=detrend, return_onesided=return_onesided,
+                 scaling=scaling, axis=axis)
+
+
+def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
+          detrend='constant', return_onesided=True, scaling='density',
+          axis=-1, average='mean'):
+    r"""
+    Estimate power spectral density using Welch's method.
+
+    Welch's method [1]_ computes an estimate of the power spectral
+    density by dividing the data into overlapping segments, computing a
+    modified periodogram for each segment and averaging the
+    periodograms.
+
+    Parameters
+    ----------
+    x : array_like
+        Time series of measurement values
+    fs : float, optional
+        Sampling frequency of the `x` time series. Defaults to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg. Defaults
+        to a Hann window.
+    nperseg : int, optional
+        Length of each segment. Defaults to None, but if window is str or
+        tuple, is set to 256, and if window is array_like, is set to the
+        length of the window.
+    noverlap : int, optional
+        Number of points to overlap between segments. If `None`,
+        ``noverlap = nperseg // 2``. Defaults to `None`.
+    nfft : int, optional
+        Length of the FFT used, if a zero padded FFT is desired. If
+        `None`, the FFT length is `nperseg`. Defaults to `None`.
+    detrend : str or function or `False`, optional
+        Specifies how to detrend each segment. If `detrend` is a
+        string, it is passed as the `type` argument to the `detrend`
+        function. If it is a function, it takes a segment and returns a
+        detrended segment. If `detrend` is `False`, no detrending is
+        done. Defaults to 'constant'.
+    return_onesided : bool, optional
+        If `True`, return a one-sided spectrum for real data. If
+        `False` return a two-sided spectrum. Defaults to `True`, but for
+        complex data, a two-sided spectrum is always returned.
+    scaling : { 'density', 'spectrum' }, optional
+        Selects between computing the power spectral density ('density')
+        where `Pxx` has units of V**2/Hz and computing the power
+        spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
+        is measured in V and `fs` is measured in Hz. Defaults to
+        'density'
+    axis : int, optional
+        Axis along which the periodogram is computed; the default is
+        over the last axis (i.e. ``axis=-1``).
+    average : { 'mean', 'median' }, optional
+        Method to use when averaging periodograms. Defaults to 'mean'.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    f : ndarray
+        Array of sample frequencies.
+    Pxx : ndarray
+        Power spectral density or power spectrum of x.
+
+    See Also
+    --------
+    periodogram: Simple, optionally modified periodogram
+    lombscargle: Lomb-Scargle periodogram for unevenly sampled data
+
+    Notes
+    -----
+    An appropriate amount of overlap will depend on the choice of window
+    and on your requirements. For the default Hann window an overlap of
+    50% is a reasonable trade off between accurately estimating the
+    signal power, while not over counting any of the data. Narrower
+    windows may require a larger overlap.
+
+    If `noverlap` is 0, this method is equivalent to Bartlett's method
+    [2]_.
+
+    .. versionadded:: 0.12.0
+
+    References
+    ----------
+    .. [1] P. Welch, "The use of the fast Fourier transform for the
+           estimation of power spectra: A method based on time averaging
+           over short, modified periodograms", IEEE Trans. Audio
+           Electroacoust. vol. 15, pp. 70-73, 1967.
+    .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
+           Biometrika, vol. 37, pp. 1-16, 1950.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
+    0.001 V**2/Hz of white noise sampled at 10 kHz.
+
+    >>> fs = 10e3
+    >>> N = 1e5
+    >>> amp = 2*np.sqrt(2)
+    >>> freq = 1234.0
+    >>> noise_power = 0.001 * fs / 2
+    >>> time = np.arange(N) / fs
+    >>> x = amp*np.sin(2*np.pi*freq*time)
+    >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape)
+
+    Compute and plot the power spectral density.
+
+    >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
+    >>> plt.semilogy(f, Pxx_den)
+    >>> plt.ylim([0.5e-3, 1])
+    >>> plt.xlabel('frequency [Hz]')
+    >>> plt.ylabel('PSD [V**2/Hz]')
+    >>> plt.show()
+
+    If we average the last half of the spectral density, to exclude the
+    peak, we can recover the noise power on the signal.
+
+    >>> np.mean(Pxx_den[256:])
+    0.0009924865443739191
+
+    Now compute and plot the power spectrum.
+
+    >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
+    >>> plt.figure()
+    >>> plt.semilogy(f, np.sqrt(Pxx_spec))
+    >>> plt.xlabel('frequency [Hz]')
+    >>> plt.ylabel('Linear spectrum [V RMS]')
+    >>> plt.show()
+
+    The peak height in the power spectrum is an estimate of the RMS
+    amplitude.
+
+    >>> np.sqrt(Pxx_spec.max())
+    2.0077340678640727
+
+    If we now introduce a discontinuity in the signal, by increasing the
+    amplitude of a small portion of the signal by 50, we can see the
+    corruption of the mean average power spectral density, but using a
+    median average better estimates the normal behaviour.
+
+    >>> x[int(N//2):int(N//2)+10] *= 50.
+    >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
+    >>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median')
+    >>> plt.semilogy(f, Pxx_den, label='mean')
+    >>> plt.semilogy(f_med, Pxx_den_med, label='median')
+    >>> plt.ylim([0.5e-3, 1])
+    >>> plt.xlabel('frequency [Hz]')
+    >>> plt.ylabel('PSD [V**2/Hz]')
+    >>> plt.legend()
+    >>> plt.show()
+
+    """
+    freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg,
+                     noverlap=noverlap, nfft=nfft, detrend=detrend,
+                     return_onesided=return_onesided, scaling=scaling,
+                     axis=axis, average=average)
+
+    return freqs, Pxx.real
+
+
+def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
+        detrend='constant', return_onesided=True, scaling='density',
+        axis=-1, average='mean'):
+    r"""
+    Estimate the cross power spectral density, Pxy, using Welch's method.
+
+    Parameters
+    ----------
+    x : array_like
+        Time series of measurement values
+    y : array_like
+        Time series of measurement values
+    fs : float, optional
+        Sampling frequency of the `x` and `y` time series. Defaults
+        to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg. Defaults
+        to a Hann window.
+    nperseg : int, optional
+        Length of each segment. Defaults to None, but if window is str or
+        tuple, is set to 256, and if window is array_like, is set to the
+        length of the window.
+    noverlap: int, optional
+        Number of points to overlap between segments. If `None`,
+        ``noverlap = nperseg // 2``. Defaults to `None`.
+    nfft : int, optional
+        Length of the FFT used, if a zero padded FFT is desired. If
+        `None`, the FFT length is `nperseg`. Defaults to `None`.
+    detrend : str or function or `False`, optional
+        Specifies how to detrend each segment. If `detrend` is a
+        string, it is passed as the `type` argument to the `detrend`
+        function. If it is a function, it takes a segment and returns a
+        detrended segment. If `detrend` is `False`, no detrending is
+        done. Defaults to 'constant'.
+    return_onesided : bool, optional
+        If `True`, return a one-sided spectrum for real data. If
+        `False` return a two-sided spectrum. Defaults to `True`, but for
+        complex data, a two-sided spectrum is always returned.
+    scaling : { 'density', 'spectrum' }, optional
+        Selects between computing the cross spectral density ('density')
+        where `Pxy` has units of V**2/Hz and computing the cross spectrum
+        ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
+        measured in V and `fs` is measured in Hz. Defaults to 'density'
+    axis : int, optional
+        Axis along which the CSD is computed for both inputs; the
+        default is over the last axis (i.e. ``axis=-1``).
+    average : { 'mean', 'median' }, optional
+        Method to use when averaging periodograms. If the spectrum is
+        complex, the average is computed separately for the real and
+        imaginary parts. Defaults to 'mean'.
+
+        .. versionadded:: 1.2.0
+
+    Returns
+    -------
+    f : ndarray
+        Array of sample frequencies.
+    Pxy : ndarray
+        Cross spectral density or cross power spectrum of x,y.
+
+    See Also
+    --------
+    periodogram: Simple, optionally modified periodogram
+    lombscargle: Lomb-Scargle periodogram for unevenly sampled data
+    welch: Power spectral density by Welch's method. [Equivalent to
+           csd(x,x)]
+    coherence: Magnitude squared coherence by Welch's method.
+
+    Notes
+    -----
+    By convention, Pxy is computed with the conjugate FFT of X
+    multiplied by the FFT of Y.
+
+    If the input series differ in length, the shorter series will be
+    zero-padded to match.
+
+    An appropriate amount of overlap will depend on the choice of window
+    and on your requirements. For the default Hann window an overlap of
+    50% is a reasonable trade off between accurately estimating the
+    signal power, while not over counting any of the data. Narrower
+    windows may require a larger overlap.
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] P. Welch, "The use of the fast Fourier transform for the
+           estimation of power spectra: A method based on time averaging
+           over short, modified periodograms", IEEE Trans. Audio
+           Electroacoust. vol. 15, pp. 70-73, 1967.
+    .. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
+           Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Generate two test signals with some common features.
+
+    >>> fs = 10e3
+    >>> N = 1e5
+    >>> amp = 20
+    >>> freq = 1234.0
+    >>> noise_power = 0.001 * fs / 2
+    >>> time = np.arange(N) / fs
+    >>> b, a = signal.butter(2, 0.25, 'low')
+    >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
+    >>> y = signal.lfilter(b, a, x)
+    >>> x += amp*np.sin(2*np.pi*freq*time)
+    >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
+
+    Compute and plot the magnitude of the cross spectral density.
+
+    >>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
+    >>> plt.semilogy(f, np.abs(Pxy))
+    >>> plt.xlabel('frequency [Hz]')
+    >>> plt.ylabel('CSD [V**2/Hz]')
+    >>> plt.show()
+
+    """
+    freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
+                                     detrend, return_onesided, scaling, axis,
+                                     mode='psd')
+
+    # Average over windows.
+    if len(Pxy.shape) >= 2 and Pxy.size > 0:
+        if Pxy.shape[-1] > 1:
+            if average == 'median':
+                # np.median must be passed real arrays for the desired result
+                bias = _median_bias(Pxy.shape[-1])
+                if np.iscomplexobj(Pxy):
+                    Pxy = (np.median(np.real(Pxy), axis=-1)
+                           + 1j * np.median(np.imag(Pxy), axis=-1))
+                else:
+                    Pxy = np.median(Pxy, axis=-1)
+                Pxy /= bias
+            elif average == 'mean':
+                Pxy = Pxy.mean(axis=-1)
+            else:
+                raise ValueError('average must be "median" or "mean", got %s'
+                                 % (average,))
+        else:
+            Pxy = np.reshape(Pxy, Pxy.shape[:-1])
+
+    return freqs, Pxy
+
+
+def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None,
+                nfft=None, detrend='constant', return_onesided=True,
+                scaling='density', axis=-1, mode='psd'):
+    """Compute a spectrogram with consecutive Fourier transforms.
+
+    Spectrograms can be used as a way of visualizing the change of a
+    nonstationary signal's frequency content over time.
+
+    Parameters
+    ----------
+    x : array_like
+        Time series of measurement values
+    fs : float, optional
+        Sampling frequency of the `x` time series. Defaults to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg.
+        Defaults to a Tukey window with shape parameter of 0.25.
+    nperseg : int, optional
+        Length of each segment. Defaults to None, but if window is str or
+        tuple, is set to 256, and if window is array_like, is set to the
+        length of the window.
+    noverlap : int, optional
+        Number of points to overlap between segments. If `None`,
+        ``noverlap = nperseg // 8``. Defaults to `None`.
+    nfft : int, optional
+        Length of the FFT used, if a zero padded FFT is desired. If
+        `None`, the FFT length is `nperseg`. Defaults to `None`.
+    detrend : str or function or `False`, optional
+        Specifies how to detrend each segment. If `detrend` is a
+        string, it is passed as the `type` argument to the `detrend`
+        function. If it is a function, it takes a segment and returns a
+        detrended segment. If `detrend` is `False`, no detrending is
+        done. Defaults to 'constant'.
+    return_onesided : bool, optional
+        If `True`, return a one-sided spectrum for real data. If
+        `False` return a two-sided spectrum. Defaults to `True`, but for
+        complex data, a two-sided spectrum is always returned.
+    scaling : { 'density', 'spectrum' }, optional
+        Selects between computing the power spectral density ('density')
+        where `Sxx` has units of V**2/Hz and computing the power
+        spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
+        is measured in V and `fs` is measured in Hz. Defaults to
+        'density'.
+    axis : int, optional
+        Axis along which the spectrogram is computed; the default is over
+        the last axis (i.e. ``axis=-1``).
+    mode : str, optional
+        Defines what kind of return values are expected. Options are
+        ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
+        equivalent to the output of `stft` with no padding or boundary
+        extension. 'magnitude' returns the absolute magnitude of the
+        STFT. 'angle' and 'phase' return the complex angle of the STFT,
+        with and without unwrapping, respectively.
+
+    Returns
+    -------
+    f : ndarray
+        Array of sample frequencies.
+    t : ndarray
+        Array of segment times.
+    Sxx : ndarray
+        Spectrogram of x. By default, the last axis of Sxx corresponds
+        to the segment times.
+
+    See Also
+    --------
+    periodogram: Simple, optionally modified periodogram
+    lombscargle: Lomb-Scargle periodogram for unevenly sampled data
+    welch: Power spectral density by Welch's method.
+    csd: Cross spectral density by Welch's method.
+
+    Notes
+    -----
+    An appropriate amount of overlap will depend on the choice of window
+    and on your requirements. In contrast to welch's method, where the
+    entire data stream is averaged over, one may wish to use a smaller
+    overlap (or perhaps none at all) when computing a spectrogram, to
+    maintain some statistical independence between individual segments.
+    It is for this reason that the default window is a Tukey window with
+    1/8th of a window's length overlap at each end.
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
+           "Discrete-Time Signal Processing", Prentice Hall, 1999.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fftshift
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
+    modulated around 3kHz, corrupted by white noise of exponentially
+    decreasing magnitude sampled at 10 kHz.
+
+    >>> fs = 10e3
+    >>> N = 1e5
+    >>> amp = 2 * np.sqrt(2)
+    >>> noise_power = 0.01 * fs / 2
+    >>> time = np.arange(N) / float(fs)
+    >>> mod = 500*np.cos(2*np.pi*0.25*time)
+    >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
+    >>> noise = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
+    >>> noise *= np.exp(-time/5)
+    >>> x = carrier + noise
+
+    Compute and plot the spectrogram.
+
+    >>> f, t, Sxx = signal.spectrogram(x, fs)
+    >>> plt.pcolormesh(t, f, Sxx, shading='gouraud')
+    >>> plt.ylabel('Frequency [Hz]')
+    >>> plt.xlabel('Time [sec]')
+    >>> plt.show()
+
+    Note, if using output that is not one sided, then use the following:
+
+    >>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False)
+    >>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0), shading='gouraud')
+    >>> plt.ylabel('Frequency [Hz]')
+    >>> plt.xlabel('Time [sec]')
+    >>> plt.show()
+
+    """
+    modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
+    if mode not in modelist:
+        raise ValueError('unknown value for mode {}, must be one of {}'
+                         .format(mode, modelist))
+
+    # need to set default for nperseg before setting default for noverlap below
+    window, nperseg = _triage_segments(window, nperseg,
+                                       input_length=x.shape[axis])
+
+    # Less overlap than welch, so samples are more statisically independent
+    if noverlap is None:
+        noverlap = nperseg // 8
+
+    if mode == 'psd':
+        freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
+                                            noverlap, nfft, detrend,
+                                            return_onesided, scaling, axis,
+                                            mode='psd')
+
+    else:
+        freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
+                                            noverlap, nfft, detrend,
+                                            return_onesided, scaling, axis,
+                                            mode='stft')
+
+        if mode == 'magnitude':
+            Sxx = np.abs(Sxx)
+        elif mode in ['angle', 'phase']:
+            Sxx = np.angle(Sxx)
+            if mode == 'phase':
+                # Sxx has one additional dimension for time strides
+                if axis < 0:
+                    axis -= 1
+                Sxx = np.unwrap(Sxx, axis=axis)
+
+        # mode =='complex' is same as `stft`, doesn't need modification
+
+    return freqs, time, Sxx
+
+
+def check_COLA(window, nperseg, noverlap, tol=1e-10):
+    r"""Check whether the Constant OverLap Add (COLA) constraint is met.
+
+    Parameters
+    ----------
+    window : str or tuple or array_like
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg.
+    nperseg : int
+        Length of each segment.
+    noverlap : int
+        Number of points to overlap between segments.
+    tol : float, optional
+        The allowed variance of a bin's weighted sum from the median bin
+        sum.
+
+    Returns
+    -------
+    verdict : bool
+        `True` if chosen combination satisfies COLA within `tol`,
+        `False` otherwise
+
+    See Also
+    --------
+    check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
+    stft: Short Time Fourier Transform
+    istft: Inverse Short Time Fourier Transform
+
+    Notes
+    -----
+    In order to enable inversion of an STFT via the inverse STFT in
+    `istft`, it is sufficient that the signal windowing obeys the constraint of
+    "Constant OverLap Add" (COLA). This ensures that every point in the input
+    data is equally weighted, thereby avoiding aliasing and allowing full
+    reconstruction.
+
+    Some examples of windows that satisfy COLA:
+        - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
+        - Bartlett window at overlap of 1/2, 3/4, 5/6, ...
+        - Hann window at 1/2, 2/3, 3/4, ...
+        - Any Blackman family window at 2/3 overlap
+        - Any window with ``noverlap = nperseg-1``
+
+    A very comprehensive list of other windows may be found in [2]_,
+    wherein the COLA condition is satisfied when the "Amplitude
+    Flatness" is unity.
+
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
+           Publishing, 2011,ISBN 978-0-9745607-3-1.
+    .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
+           spectral density estimation by the Discrete Fourier transform
+           (DFT), including a comprehensive list of window functions and
+           some new at-top windows", 2002,
+           http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
+
+    Examples
+    --------
+    >>> from scipy import signal
+
+    Confirm COLA condition for rectangular window of 75% (3/4) overlap:
+
+    >>> signal.check_COLA(signal.windows.boxcar(100), 100, 75)
+    True
+
+    COLA is not true for 25% (1/4) overlap, though:
+
+    >>> signal.check_COLA(signal.windows.boxcar(100), 100, 25)
+    False
+
+    "Symmetrical" Hann window (for filter design) is not COLA:
+
+    >>> signal.check_COLA(signal.windows.hann(120, sym=True), 120, 60)
+    False
+
+    "Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
+    overlap of 1/2, 2/3, 3/4, etc.:
+
+    >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 60)
+    True
+
+    >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 80)
+    True
+
+    >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 90)
+    True
+
+    """
+    nperseg = int(nperseg)
+
+    if nperseg < 1:
+        raise ValueError('nperseg must be a positive integer')
+
+    if noverlap >= nperseg:
+        raise ValueError('noverlap must be less than nperseg.')
+    noverlap = int(noverlap)
+
+    if isinstance(window, str) or type(window) is tuple:
+        win = get_window(window, nperseg)
+    else:
+        win = np.asarray(window)
+        if len(win.shape) != 1:
+            raise ValueError('window must be 1-D')
+        if win.shape[0] != nperseg:
+            raise ValueError('window must have length of nperseg')
+
+    step = nperseg - noverlap
+    binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
+
+    if nperseg % step != 0:
+        binsums[:nperseg % step] += win[-(nperseg % step):]
+
+    deviation = binsums - np.median(binsums)
+    return np.max(np.abs(deviation)) < tol
+
+
+def check_NOLA(window, nperseg, noverlap, tol=1e-10):
+    r"""Check whether the Nonzero Overlap Add (NOLA) constraint is met.
+
+    Parameters
+    ----------
+    window : str or tuple or array_like
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg.
+    nperseg : int
+        Length of each segment.
+    noverlap : int
+        Number of points to overlap between segments.
+    tol : float, optional
+        The allowed variance of a bin's weighted sum from the median bin
+        sum.
+
+    Returns
+    -------
+    verdict : bool
+        `True` if chosen combination satisfies the NOLA constraint within
+        `tol`, `False` otherwise
+
+    See Also
+    --------
+    check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
+    stft: Short Time Fourier Transform
+    istft: Inverse Short Time Fourier Transform
+
+    Notes
+    -----
+    In order to enable inversion of an STFT via the inverse STFT in
+    `istft`, the signal windowing must obey the constraint of "nonzero
+    overlap add" (NOLA):
+
+    .. math:: \sum_{t}w^{2}[n-tH] \ne 0
+
+    for all :math:`n`, where :math:`w` is the window function, :math:`t` is the
+    frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` -
+    `noverlap`).
+
+    This ensures that the normalization factors in the denominator of the
+    overlap-add inversion equation are not zero. Only very pathological windows
+    will fail the NOLA constraint.
+
+    .. versionadded:: 1.2.0
+
+    References
+    ----------
+    .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
+           Publishing, 2011,ISBN 978-0-9745607-3-1.
+    .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
+           spectral density estimation by the Discrete Fourier transform
+           (DFT), including a comprehensive list of window functions and
+           some new at-top windows", 2002,
+           http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+
+    Confirm NOLA condition for rectangular window of 75% (3/4) overlap:
+
+    >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 75)
+    True
+
+    NOLA is also true for 25% (1/4) overlap:
+
+    >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 25)
+    True
+
+    "Symmetrical" Hann window (for filter design) is also NOLA:
+
+    >>> signal.check_NOLA(signal.windows.hann(120, sym=True), 120, 60)
+    True
+
+    As long as there is overlap, it takes quite a pathological window to fail
+    NOLA:
+
+    >>> w = np.ones(64, dtype="float")
+    >>> w[::2] = 0
+    >>> signal.check_NOLA(w, 64, 32)
+    False
+
+    If there is not enough overlap, a window with zeros at the ends will not
+    work:
+
+    >>> signal.check_NOLA(signal.windows.hann(64), 64, 0)
+    False
+    >>> signal.check_NOLA(signal.windows.hann(64), 64, 1)
+    False
+    >>> signal.check_NOLA(signal.windows.hann(64), 64, 2)
+    True
+
+    """
+    nperseg = int(nperseg)
+
+    if nperseg < 1:
+        raise ValueError('nperseg must be a positive integer')
+
+    if noverlap >= nperseg:
+        raise ValueError('noverlap must be less than nperseg')
+    if noverlap < 0:
+        raise ValueError('noverlap must be a nonnegative integer')
+    noverlap = int(noverlap)
+
+    if isinstance(window, str) or type(window) is tuple:
+        win = get_window(window, nperseg)
+    else:
+        win = np.asarray(window)
+        if len(win.shape) != 1:
+            raise ValueError('window must be 1-D')
+        if win.shape[0] != nperseg:
+            raise ValueError('window must have length of nperseg')
+
+    step = nperseg - noverlap
+    binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step))
+
+    if nperseg % step != 0:
+        binsums[:nperseg % step] += win[-(nperseg % step):]**2
+
+    return np.min(binsums) > tol
+
+
+def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
+         detrend=False, return_onesided=True, boundary='zeros', padded=True,
+         axis=-1, scaling='spectrum'):
+    r"""Compute the Short Time Fourier Transform (STFT).
+
+    STFTs can be used as a way of quantifying the change of a
+    nonstationary signal's frequency and phase content over time.
+
+    Parameters
+    ----------
+    x : array_like
+        Time series of measurement values
+    fs : float, optional
+        Sampling frequency of the `x` time series. Defaults to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg. Defaults
+        to a Hann window.
+    nperseg : int, optional
+        Length of each segment. Defaults to 256.
+    noverlap : int, optional
+        Number of points to overlap between segments. If `None`,
+        ``noverlap = nperseg // 2``. Defaults to `None`. When
+        specified, the COLA constraint must be met (see Notes below).
+    nfft : int, optional
+        Length of the FFT used, if a zero padded FFT is desired. If
+        `None`, the FFT length is `nperseg`. Defaults to `None`.
+    detrend : str or function or `False`, optional
+        Specifies how to detrend each segment. If `detrend` is a
+        string, it is passed as the `type` argument to the `detrend`
+        function. If it is a function, it takes a segment and returns a
+        detrended segment. If `detrend` is `False`, no detrending is
+        done. Defaults to `False`.
+    return_onesided : bool, optional
+        If `True`, return a one-sided spectrum for real data. If
+        `False` return a two-sided spectrum. Defaults to `True`, but for
+        complex data, a two-sided spectrum is always returned.
+    boundary : str or None, optional
+        Specifies whether the input signal is extended at both ends, and
+        how to generate the new values, in order to center the first
+        windowed segment on the first input point. This has the benefit
+        of enabling reconstruction of the first input point when the
+        employed window function starts at zero. Valid options are
+        ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
+        'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
+        extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
+    padded : bool, optional
+        Specifies whether the input signal is zero-padded at the end to
+        make the signal fit exactly into an integer number of window
+        segments, so that all of the signal is included in the output.
+        Defaults to `True`. Padding occurs after boundary extension, if
+        `boundary` is not `None`, and `padded` is `True`, as is the
+        default.
+    axis : int, optional
+        Axis along which the STFT is computed; the default is over the
+        last axis (i.e. ``axis=-1``).
+    scaling: {'spectrum', 'psd'}
+        The default 'spectrum' scaling allows each frequency line of `Zxx` to
+        be interpreted as a magnitude spectrum. The 'psd' option scales each
+        line to a power spectral density - it allows to calculate the signal's
+        energy by numerically integrating over ``abs(Zxx)**2``.
+
+        .. versionadded:: 1.9.0
+
+    Returns
+    -------
+    f : ndarray
+        Array of sample frequencies.
+    t : ndarray
+        Array of segment times.
+    Zxx : ndarray
+        STFT of `x`. By default, the last axis of `Zxx` corresponds
+        to the segment times.
+
+    See Also
+    --------
+    istft: Inverse Short Time Fourier Transform
+    check_COLA: Check whether the Constant OverLap Add (COLA) constraint
+                is met
+    check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
+    welch: Power spectral density by Welch's method.
+    spectrogram: Spectrogram by Welch's method.
+    csd: Cross spectral density by Welch's method.
+    lombscargle: Lomb-Scargle periodogram for unevenly sampled data
+
+    Notes
+    -----
+    In order to enable inversion of an STFT via the inverse STFT in
+    `istft`, the signal windowing must obey the constraint of "Nonzero
+    OverLap Add" (NOLA), and the input signal must have complete
+    windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
+    (nperseg-noverlap) == 0``). The `padded` argument may be used to
+    accomplish this.
+
+    Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop
+    size :math:`H` = `nperseg - noverlap`, the windowed frame at time index
+    :math:`t` is given by
+
+    .. math:: x_{t}[n]=x[n]w[n-tH]
+
+    The overlap-add (OLA) reconstruction equation is given by
+
+    .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
+
+    The NOLA constraint ensures that every normalization term that appears
+    in the denomimator of the OLA reconstruction equation is nonzero. Whether a
+    choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can
+    be tested with `check_NOLA`.
+
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
+           "Discrete-Time Signal Processing", Prentice Hall, 1999.
+    .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
+           Modified Short-Time Fourier Transform", IEEE 1984,
+           10.1109/TASSP.1984.1164317
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
+    modulated around 3kHz, corrupted by white noise of exponentially
+    decreasing magnitude sampled at 10 kHz.
+
+    >>> fs = 10e3
+    >>> N = 1e5
+    >>> amp = 2 * np.sqrt(2)
+    >>> noise_power = 0.01 * fs / 2
+    >>> time = np.arange(N) / float(fs)
+    >>> mod = 500*np.cos(2*np.pi*0.25*time)
+    >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
+    >>> noise = rng.normal(scale=np.sqrt(noise_power),
+    ...                    size=time.shape)
+    >>> noise *= np.exp(-time/5)
+    >>> x = carrier + noise
+
+    Compute and plot the STFT's magnitude.
+
+    >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
+    >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud')
+    >>> plt.title('STFT Magnitude')
+    >>> plt.ylabel('Frequency [Hz]')
+    >>> plt.xlabel('Time [sec]')
+    >>> plt.show()
+
+    Compare the energy of the signal `x` with the energy of its STFT:
+
+    >>> E_x = sum(x**2) / fs  # Energy of x
+    >>> # Calculate a two-sided STFT with PSD scaling:
+    >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000, return_onesided=False,
+    ...                         scaling='psd')
+    >>> # Integrate numerically over abs(Zxx)**2:
+    >>> df, dt = f[1] - f[0], t[1] - t[0]
+    >>> E_Zxx = sum(np.sum(Zxx.real**2 + Zxx.imag**2, axis=0) * df) * dt
+    >>> # The energy is the same, but the numerical errors are quite large:
+    >>> np.isclose(E_x, E_Zxx, rtol=1e-2)
+    True
+
+    """
+    if scaling == 'psd':
+        scaling = 'density'
+    elif scaling != 'spectrum':
+        raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!")
+
+    freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
+                                        nfft, detrend, return_onesided,
+                                        scaling=scaling, axis=axis,
+                                        mode='stft', boundary=boundary,
+                                        padded=padded)
+
+    return freqs, time, Zxx
+
+
+def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
+          input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2,
+          scaling='spectrum'):
+    r"""Perform the inverse Short Time Fourier transform (iSTFT).
+
+    Parameters
+    ----------
+    Zxx : array_like
+        STFT of the signal to be reconstructed. If a purely real array
+        is passed, it will be cast to a complex data type.
+    fs : float, optional
+        Sampling frequency of the time series. Defaults to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg. Defaults
+        to a Hann window. Must match the window used to generate the
+        STFT for faithful inversion.
+    nperseg : int, optional
+        Number of data points corresponding to each STFT segment. This
+        parameter must be specified if the number of data points per
+        segment is odd, or if the STFT was padded via ``nfft >
+        nperseg``. If `None`, the value depends on the shape of
+        `Zxx` and `input_onesided`. If `input_onesided` is `True`,
+        ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
+        ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
+    noverlap : int, optional
+        Number of points to overlap between segments. If `None`, half
+        of the segment length. Defaults to `None`. When specified, the
+        COLA constraint must be met (see Notes below), and should match
+        the parameter used to generate the STFT. Defaults to `None`.
+    nfft : int, optional
+        Number of FFT points corresponding to each STFT segment. This
+        parameter must be specified if the STFT was padded via ``nfft >
+        nperseg``. If `None`, the default values are the same as for
+        `nperseg`, detailed above, with one exception: if
+        `input_onesided` is True and
+        ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
+        that value. This case allows the proper inversion of an
+        odd-length unpadded STFT using ``nfft=None``. Defaults to
+        `None`.
+    input_onesided : bool, optional
+        If `True`, interpret the input array as one-sided FFTs, such
+        as is returned by `stft` with ``return_onesided=True`` and
+        `numpy.fft.rfft`. If `False`, interpret the input as a a
+        two-sided FFT. Defaults to `True`.
+    boundary : bool, optional
+        Specifies whether the input signal was extended at its
+        boundaries by supplying a non-`None` ``boundary`` argument to
+        `stft`. Defaults to `True`.
+    time_axis : int, optional
+        Where the time segments of the STFT is located; the default is
+        the last axis (i.e. ``axis=-1``).
+    freq_axis : int, optional
+        Where the frequency axis of the STFT is located; the default is
+        the penultimate axis (i.e. ``axis=-2``).
+    scaling: {'spectrum', 'psd'}
+        The default 'spectrum' scaling allows each frequency line of `Zxx` to
+        be interpreted as a magnitude spectrum. The 'psd' option scales each
+        line to a power spectral density - it allows to calculate the signal's
+        energy by numerically integrating over ``abs(Zxx)**2``.
+
+    Returns
+    -------
+    t : ndarray
+        Array of output data times.
+    x : ndarray
+        iSTFT of `Zxx`.
+
+    See Also
+    --------
+    stft: Short Time Fourier Transform
+    check_COLA: Check whether the Constant OverLap Add (COLA) constraint
+                is met
+    check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
+
+    Notes
+    -----
+    In order to enable inversion of an STFT via the inverse STFT with
+    `istft`, the signal windowing must obey the constraint of "nonzero
+    overlap add" (NOLA):
+
+    .. math:: \sum_{t}w^{2}[n-tH] \ne 0
+
+    This ensures that the normalization factors that appear in the denominator
+    of the overlap-add reconstruction equation
+
+    .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
+
+    are not zero. The NOLA constraint can be checked with the `check_NOLA`
+    function.
+
+    An STFT which has been modified (via masking or otherwise) is not
+    guaranteed to correspond to a exactly realizible signal. This
+    function implements the iSTFT via the least-squares estimation
+    algorithm detailed in [2]_, which produces a signal that minimizes
+    the mean squared error between the STFT of the returned signal and
+    the modified STFT.
+
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
+           "Discrete-Time Signal Processing", Prentice Hall, 1999.
+    .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
+           Modified Short-Time Fourier Transform", IEEE 1984,
+           10.1109/TASSP.1984.1164317
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
+    0.001 V**2/Hz of white noise sampled at 1024 Hz.
+
+    >>> fs = 1024
+    >>> N = 10*fs
+    >>> nperseg = 512
+    >>> amp = 2 * np.sqrt(2)
+    >>> noise_power = 0.001 * fs / 2
+    >>> time = np.arange(N) / float(fs)
+    >>> carrier = amp * np.sin(2*np.pi*50*time)
+    >>> noise = rng.normal(scale=np.sqrt(noise_power),
+    ...                    size=time.shape)
+    >>> x = carrier + noise
+
+    Compute the STFT, and plot its magnitude
+
+    >>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
+    >>> plt.figure()
+    >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud')
+    >>> plt.ylim([f[1], f[-1]])
+    >>> plt.title('STFT Magnitude')
+    >>> plt.ylabel('Frequency [Hz]')
+    >>> plt.xlabel('Time [sec]')
+    >>> plt.yscale('log')
+    >>> plt.show()
+
+    Zero the components that are 10% or less of the carrier magnitude,
+    then convert back to a time series via inverse STFT
+
+    >>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
+    >>> _, xrec = signal.istft(Zxx, fs)
+
+    Compare the cleaned signal with the original and true carrier signals.
+
+    >>> plt.figure()
+    >>> plt.plot(time, x, time, xrec, time, carrier)
+    >>> plt.xlim([2, 2.1])
+    >>> plt.xlabel('Time [sec]')
+    >>> plt.ylabel('Signal')
+    >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
+    >>> plt.show()
+
+    Note that the cleaned signal does not start as abruptly as the original,
+    since some of the coefficients of the transient were also removed:
+
+    >>> plt.figure()
+    >>> plt.plot(time, x, time, xrec, time, carrier)
+    >>> plt.xlim([0, 0.1])
+    >>> plt.xlabel('Time [sec]')
+    >>> plt.ylabel('Signal')
+    >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
+    >>> plt.show()
+
+    """
+    # Make sure input is an ndarray of appropriate complex dtype
+    Zxx = np.asarray(Zxx) + 0j
+    freq_axis = int(freq_axis)
+    time_axis = int(time_axis)
+
+    if Zxx.ndim < 2:
+        raise ValueError('Input stft must be at least 2d!')
+
+    if freq_axis == time_axis:
+        raise ValueError('Must specify differing time and frequency axes!')
+
+    nseg = Zxx.shape[time_axis]
+
+    if input_onesided:
+        # Assume even segment length
+        n_default = 2*(Zxx.shape[freq_axis] - 1)
+    else:
+        n_default = Zxx.shape[freq_axis]
+
+    # Check windowing parameters
+    if nperseg is None:
+        nperseg = n_default
+    else:
+        nperseg = int(nperseg)
+        if nperseg < 1:
+            raise ValueError('nperseg must be a positive integer')
+
+    if nfft is None:
+        if (input_onesided) and (nperseg == n_default + 1):
+            # Odd nperseg, no FFT padding
+            nfft = nperseg
+        else:
+            nfft = n_default
+    elif nfft < nperseg:
+        raise ValueError('nfft must be greater than or equal to nperseg.')
+    else:
+        nfft = int(nfft)
+
+    if noverlap is None:
+        noverlap = nperseg//2
+    else:
+        noverlap = int(noverlap)
+    if noverlap >= nperseg:
+        raise ValueError('noverlap must be less than nperseg.')
+    nstep = nperseg - noverlap
+
+    # Rearrange axes if necessary
+    if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
+        # Turn negative indices to positive for the call to transpose
+        if freq_axis < 0:
+            freq_axis = Zxx.ndim + freq_axis
+        if time_axis < 0:
+            time_axis = Zxx.ndim + time_axis
+        zouter = list(range(Zxx.ndim))
+        for ax in sorted([time_axis, freq_axis], reverse=True):
+            zouter.pop(ax)
+        Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
+
+    # Get window as array
+    if isinstance(window, str) or type(window) is tuple:
+        win = get_window(window, nperseg)
+    else:
+        win = np.asarray(window)
+        if len(win.shape) != 1:
+            raise ValueError('window must be 1-D')
+        if win.shape[0] != nperseg:
+            raise ValueError('window must have length of {0}'.format(nperseg))
+
+    ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft
+    xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
+
+    # Initialize output and normalization arrays
+    outputlength = nperseg + (nseg-1)*nstep
+    x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
+    norm = np.zeros(outputlength, dtype=xsubs.dtype)
+
+    if np.result_type(win, xsubs) != xsubs.dtype:
+        win = win.astype(xsubs.dtype)
+
+    if scaling == 'spectrum':
+        xsubs *= win.sum()
+    elif scaling == 'psd':
+        xsubs *= np.sqrt(fs * sum(win**2))
+    else:
+        raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!")
+
+    # Construct the output from the ifft segments
+    # This loop could perhaps be vectorized/strided somehow...
+    for ii in range(nseg):
+        # Window the ifft
+        x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
+        norm[..., ii*nstep:ii*nstep+nperseg] += win**2
+
+    # Remove extension points
+    if boundary:
+        x = x[..., nperseg//2:-(nperseg//2)]
+        norm = norm[..., nperseg//2:-(nperseg//2)]
+
+    # Divide out normalization where non-tiny
+    if np.sum(norm > 1e-10) != len(norm):
+        warnings.warn("NOLA condition failed, STFT may not be invertible")
+    x /= np.where(norm > 1e-10, norm, 1.0)
+
+    if input_onesided:
+        x = x.real
+
+    # Put axes back
+    if x.ndim > 1:
+        if time_axis != Zxx.ndim-1:
+            if freq_axis < time_axis:
+                time_axis -= 1
+            x = np.moveaxis(x, -1, time_axis)
+
+    time = np.arange(x.shape[0])/float(fs)
+    return time, x
+
+
+def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
+              nfft=None, detrend='constant', axis=-1):
+    r"""
+    Estimate the magnitude squared coherence estimate, Cxy, of
+    discrete-time signals X and Y using Welch's method.
+
+    ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
+    spectral density estimates of X and Y, and `Pxy` is the cross
+    spectral density estimate of X and Y.
+
+    Parameters
+    ----------
+    x : array_like
+        Time series of measurement values
+    y : array_like
+        Time series of measurement values
+    fs : float, optional
+        Sampling frequency of the `x` and `y` time series. Defaults
+        to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg. Defaults
+        to a Hann window.
+    nperseg : int, optional
+        Length of each segment. Defaults to None, but if window is str or
+        tuple, is set to 256, and if window is array_like, is set to the
+        length of the window.
+    noverlap: int, optional
+        Number of points to overlap between segments. If `None`,
+        ``noverlap = nperseg // 2``. Defaults to `None`.
+    nfft : int, optional
+        Length of the FFT used, if a zero padded FFT is desired. If
+        `None`, the FFT length is `nperseg`. Defaults to `None`.
+    detrend : str or function or `False`, optional
+        Specifies how to detrend each segment. If `detrend` is a
+        string, it is passed as the `type` argument to the `detrend`
+        function. If it is a function, it takes a segment and returns a
+        detrended segment. If `detrend` is `False`, no detrending is
+        done. Defaults to 'constant'.
+    axis : int, optional
+        Axis along which the coherence is computed for both inputs; the
+        default is over the last axis (i.e. ``axis=-1``).
+
+    Returns
+    -------
+    f : ndarray
+        Array of sample frequencies.
+    Cxy : ndarray
+        Magnitude squared coherence of x and y.
+
+    See Also
+    --------
+    periodogram: Simple, optionally modified periodogram
+    lombscargle: Lomb-Scargle periodogram for unevenly sampled data
+    welch: Power spectral density by Welch's method.
+    csd: Cross spectral density by Welch's method.
+
+    Notes
+    -----
+    An appropriate amount of overlap will depend on the choice of window
+    and on your requirements. For the default Hann window an overlap of
+    50% is a reasonable trade off between accurately estimating the
+    signal power, while not over counting any of the data. Narrower
+    windows may require a larger overlap.
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] P. Welch, "The use of the fast Fourier transform for the
+           estimation of power spectra: A method based on time averaging
+           over short, modified periodograms", IEEE Trans. Audio
+           Electroacoust. vol. 15, pp. 70-73, 1967.
+    .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
+           Signals" Prentice Hall, 2005
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+
+    Generate two test signals with some common features.
+
+    >>> fs = 10e3
+    >>> N = 1e5
+    >>> amp = 20
+    >>> freq = 1234.0
+    >>> noise_power = 0.001 * fs / 2
+    >>> time = np.arange(N) / fs
+    >>> b, a = signal.butter(2, 0.25, 'low')
+    >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape)
+    >>> y = signal.lfilter(b, a, x)
+    >>> x += amp*np.sin(2*np.pi*freq*time)
+    >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
+
+    Compute and plot the coherence.
+
+    >>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
+    >>> plt.semilogy(f, Cxy)
+    >>> plt.xlabel('frequency [Hz]')
+    >>> plt.ylabel('Coherence')
+    >>> plt.show()
+
+    """
+    freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg,
+                       noverlap=noverlap, nfft=nfft, detrend=detrend,
+                       axis=axis)
+    _, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,
+                   nfft=nfft, detrend=detrend, axis=axis)
+    _, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg,
+                 noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis)
+
+    Cxy = np.abs(Pxy)**2 / Pxx / Pyy
+
+    return freqs, Cxy
+
+
+def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
+                     nfft=None, detrend='constant', return_onesided=True,
+                     scaling='density', axis=-1, mode='psd', boundary=None,
+                     padded=False):
+    """Calculate various forms of windowed FFTs for PSD, CSD, etc.
+
+    This is a helper function that implements the commonality between
+    the stft, psd, csd, and spectrogram functions. It is not designed to
+    be called externally. The windows are not averaged over; the result
+    from each window is returned.
+
+    Parameters
+    ----------
+    x : array_like
+        Array or sequence containing the data to be analyzed.
+    y : array_like
+        Array or sequence containing the data to be analyzed. If this is
+        the same object in memory as `x` (i.e. ``_spectral_helper(x,
+        x, ...)``), the extra computations are spared.
+    fs : float, optional
+        Sampling frequency of the time series. Defaults to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. If `window` is a string or tuple, it is
+        passed to `get_window` to generate the window values, which are
+        DFT-even by default. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length must be nperseg. Defaults
+        to a Hann window.
+    nperseg : int, optional
+        Length of each segment. Defaults to None, but if window is str or
+        tuple, is set to 256, and if window is array_like, is set to the
+        length of the window.
+    noverlap : int, optional
+        Number of points to overlap between segments. If `None`,
+        ``noverlap = nperseg // 2``. Defaults to `None`.
+    nfft : int, optional
+        Length of the FFT used, if a zero padded FFT is desired. If
+        `None`, the FFT length is `nperseg`. Defaults to `None`.
+    detrend : str or function or `False`, optional
+        Specifies how to detrend each segment. If `detrend` is a
+        string, it is passed as the `type` argument to the `detrend`
+        function. If it is a function, it takes a segment and returns a
+        detrended segment. If `detrend` is `False`, no detrending is
+        done. Defaults to 'constant'.
+    return_onesided : bool, optional
+        If `True`, return a one-sided spectrum for real data. If
+        `False` return a two-sided spectrum. Defaults to `True`, but for
+        complex data, a two-sided spectrum is always returned.
+    scaling : { 'density', 'spectrum' }, optional
+        Selects between computing the cross spectral density ('density')
+        where `Pxy` has units of V**2/Hz and computing the cross
+        spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
+        and `y` are measured in V and `fs` is measured in Hz.
+        Defaults to 'density'
+    axis : int, optional
+        Axis along which the FFTs are computed; the default is over the
+        last axis (i.e. ``axis=-1``).
+    mode: str {'psd', 'stft'}, optional
+        Defines what kind of return values are expected. Defaults to
+        'psd'.
+    boundary : str or None, optional
+        Specifies whether the input signal is extended at both ends, and
+        how to generate the new values, in order to center the first
+        windowed segment on the first input point. This has the benefit
+        of enabling reconstruction of the first input point when the
+        employed window function starts at zero. Valid options are
+        ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
+        `None`.
+    padded : bool, optional
+        Specifies whether the input signal is zero-padded at the end to
+        make the signal fit exactly into an integer number of window
+        segments, so that all of the signal is included in the output.
+        Defaults to `False`. Padding occurs after boundary extension, if
+        `boundary` is not `None`, and `padded` is `True`.
+
+    Returns
+    -------
+    freqs : ndarray
+        Array of sample frequencies.
+    t : ndarray
+        Array of times corresponding to each data segment
+    result : ndarray
+        Array of output data, contents dependent on *mode* kwarg.
+
+    Notes
+    -----
+    Adapted from matplotlib.mlab
+
+    .. versionadded:: 0.16.0
+    """
+    if mode not in ['psd', 'stft']:
+        raise ValueError("Unknown value for mode %s, must be one of: "
+                         "{'psd', 'stft'}" % mode)
+
+    boundary_funcs = {'even': even_ext,
+                      'odd': odd_ext,
+                      'constant': const_ext,
+                      'zeros': zero_ext,
+                      None: None}
+
+    if boundary not in boundary_funcs:
+        raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
+                         .format(boundary, list(boundary_funcs.keys())))
+
+    # If x and y are the same object we can save ourselves some computation.
+    same_data = y is x
+
+    if not same_data and mode != 'psd':
+        raise ValueError("x and y must be equal if mode is 'stft'")
+
+    axis = int(axis)
+
+    # Ensure we have np.arrays, get outdtype
+    x = np.asarray(x)
+    if not same_data:
+        y = np.asarray(y)
+        outdtype = np.result_type(x, y, np.complex64)
+    else:
+        outdtype = np.result_type(x, np.complex64)
+
+    if not same_data:
+        # Check if we can broadcast the outer axes together
+        xouter = list(x.shape)
+        youter = list(y.shape)
+        xouter.pop(axis)
+        youter.pop(axis)
+        try:
+            outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
+        except ValueError as e:
+            raise ValueError('x and y cannot be broadcast together.') from e
+
+    if same_data:
+        if x.size == 0:
+            return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
+    else:
+        if x.size == 0 or y.size == 0:
+            outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
+            emptyout = np.moveaxis(np.empty(outshape), -1, axis)
+            return emptyout, emptyout, emptyout
+
+    if x.ndim > 1:
+        if axis != -1:
+            x = np.moveaxis(x, axis, -1)
+            if not same_data and y.ndim > 1:
+                y = np.moveaxis(y, axis, -1)
+
+    # Check if x and y are the same length, zero-pad if necessary
+    if not same_data:
+        if x.shape[-1] != y.shape[-1]:
+            if x.shape[-1] < y.shape[-1]:
+                pad_shape = list(x.shape)
+                pad_shape[-1] = y.shape[-1] - x.shape[-1]
+                x = np.concatenate((x, np.zeros(pad_shape)), -1)
+            else:
+                pad_shape = list(y.shape)
+                pad_shape[-1] = x.shape[-1] - y.shape[-1]
+                y = np.concatenate((y, np.zeros(pad_shape)), -1)
+
+    if nperseg is not None:  # if specified by user
+        nperseg = int(nperseg)
+        if nperseg < 1:
+            raise ValueError('nperseg must be a positive integer')
+
+    # parse window; if array like, then set nperseg = win.shape
+    win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1])
+
+    if nfft is None:
+        nfft = nperseg
+    elif nfft < nperseg:
+        raise ValueError('nfft must be greater than or equal to nperseg.')
+    else:
+        nfft = int(nfft)
+
+    if noverlap is None:
+        noverlap = nperseg//2
+    else:
+        noverlap = int(noverlap)
+    if noverlap >= nperseg:
+        raise ValueError('noverlap must be less than nperseg.')
+    nstep = nperseg - noverlap
+
+    # Padding occurs after boundary extension, so that the extended signal ends
+    # in zeros, instead of introducing an impulse at the end.
+    # I.e. if x = [..., 3, 2]
+    # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
+    # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
+
+    if boundary is not None:
+        ext_func = boundary_funcs[boundary]
+        x = ext_func(x, nperseg//2, axis=-1)
+        if not same_data:
+            y = ext_func(y, nperseg//2, axis=-1)
+
+    if padded:
+        # Pad to integer number of windowed segments
+        # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
+        nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
+        zeros_shape = list(x.shape[:-1]) + [nadd]
+        x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
+        if not same_data:
+            zeros_shape = list(y.shape[:-1]) + [nadd]
+            y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
+
+    # Handle detrending and window functions
+    if not detrend:
+        def detrend_func(d):
+            return d
+    elif not hasattr(detrend, '__call__'):
+        def detrend_func(d):
+            return _signaltools.detrend(d, type=detrend, axis=-1)
+    elif axis != -1:
+        # Wrap this function so that it receives a shape that it could
+        # reasonably expect to receive.
+        def detrend_func(d):
+            d = np.moveaxis(d, -1, axis)
+            d = detrend(d)
+            return np.moveaxis(d, axis, -1)
+    else:
+        detrend_func = detrend
+
+    if np.result_type(win, np.complex64) != outdtype:
+        win = win.astype(outdtype)
+
+    if scaling == 'density':
+        scale = 1.0 / (fs * (win*win).sum())
+    elif scaling == 'spectrum':
+        scale = 1.0 / win.sum()**2
+    else:
+        raise ValueError('Unknown scaling: %r' % scaling)
+
+    if mode == 'stft':
+        scale = np.sqrt(scale)
+
+    if return_onesided:
+        if np.iscomplexobj(x):
+            sides = 'twosided'
+            warnings.warn('Input data is complex, switching to '
+                          'return_onesided=False')
+        else:
+            sides = 'onesided'
+            if not same_data:
+                if np.iscomplexobj(y):
+                    sides = 'twosided'
+                    warnings.warn('Input data is complex, switching to '
+                                  'return_onesided=False')
+    else:
+        sides = 'twosided'
+
+    if sides == 'twosided':
+        freqs = sp_fft.fftfreq(nfft, 1/fs)
+    elif sides == 'onesided':
+        freqs = sp_fft.rfftfreq(nfft, 1/fs)
+
+    # Perform the windowed FFTs
+    result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
+
+    if not same_data:
+        # All the same operations on the y data
+        result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
+                               sides)
+        result = np.conjugate(result) * result_y
+    elif mode == 'psd':
+        result = np.conjugate(result) * result
+
+    result *= scale
+    if sides == 'onesided' and mode == 'psd':
+        if nfft % 2:
+            result[..., 1:] *= 2
+        else:
+            # Last point is unpaired Nyquist freq point, don't double
+            result[..., 1:-1] *= 2
+
+    time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
+                     nperseg - noverlap)/float(fs)
+    if boundary is not None:
+        time -= (nperseg/2) / fs
+
+    result = result.astype(outdtype)
+
+    # All imaginary parts are zero anyways
+    if same_data and mode != 'stft':
+        result = result.real
+
+    # Output is going to have new last axis for time/window index, so a
+    # negative axis index shifts down one
+    if axis < 0:
+        axis -= 1
+
+    # Roll frequency axis back to axis where the data came from
+    result = np.moveaxis(result, -1, axis)
+
+    return freqs, time, result
+
+
+def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
+    """
+    Calculate windowed FFT, for internal use by
+    `scipy.signal._spectral_helper`.
+
+    This is a helper function that does the main FFT calculation for
+    `_spectral helper`. All input validation is performed there, and the
+    data axis is assumed to be the last axis of x. It is not designed to
+    be called externally. The windows are not averaged over; the result
+    from each window is returned.
+
+    Returns
+    -------
+    result : ndarray
+        Array of FFT data
+
+    Notes
+    -----
+    Adapted from matplotlib.mlab
+
+    .. versionadded:: 0.16.0
+    """
+    # Created strided array of data segments
+    if nperseg == 1 and noverlap == 0:
+        result = x[..., np.newaxis]
+    else:
+        # https://stackoverflow.com/a/5568169
+        step = nperseg - noverlap
+        shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
+        strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
+        result = np.lib.stride_tricks.as_strided(x, shape=shape,
+                                                 strides=strides)
+
+    # Detrend each data segment individually
+    result = detrend_func(result)
+
+    # Apply window by multiplication
+    result = win * result
+
+    # Perform the fft. Acts on last axis by default. Zero-pads automatically
+    if sides == 'twosided':
+        func = sp_fft.fft
+    else:
+        result = result.real
+        func = sp_fft.rfft
+    result = func(result, n=nfft)
+
+    return result
+
+
+def _triage_segments(window, nperseg, input_length):
+    """
+    Parses window and nperseg arguments for spectrogram and _spectral_helper.
+    This is a helper function, not meant to be called externally.
+
+    Parameters
+    ----------
+    window : string, tuple, or ndarray
+        If window is specified by a string or tuple and nperseg is not
+        specified, nperseg is set to the default of 256 and returns a window of
+        that length.
+        If instead the window is array_like and nperseg is not specified, then
+        nperseg is set to the length of the window. A ValueError is raised if
+        the user supplies both an array_like window and a value for nperseg but
+        nperseg does not equal the length of the window.
+
+    nperseg : int
+        Length of each segment
+
+    input_length: int
+        Length of input signal, i.e. x.shape[-1]. Used to test for errors.
+
+    Returns
+    -------
+    win : ndarray
+        window. If function was called with string or tuple than this will hold
+        the actual array used as a window.
+
+    nperseg : int
+        Length of each segment. If window is str or tuple, nperseg is set to
+        256. If window is array_like, nperseg is set to the length of the
+        window.
+    """
+    # parse window; if array like, then set nperseg = win.shape
+    if isinstance(window, str) or isinstance(window, tuple):
+        # if nperseg not specified
+        if nperseg is None:
+            nperseg = 256  # then change to default
+        if nperseg > input_length:
+            warnings.warn('nperseg = {0:d} is greater than input length '
+                          ' = {1:d}, using nperseg = {1:d}'
+                          .format(nperseg, input_length))
+            nperseg = input_length
+        win = get_window(window, nperseg)
+    else:
+        win = np.asarray(window)
+        if len(win.shape) != 1:
+            raise ValueError('window must be 1-D')
+        if input_length < win.shape[-1]:
+            raise ValueError('window is longer than input signal')
+        if nperseg is None:
+            nperseg = win.shape[0]
+        elif nperseg is not None:
+            if nperseg != win.shape[0]:
+                raise ValueError("value specified for nperseg is different"
+                                 " from length of window")
+    return win, nperseg
+
+
+def _median_bias(n):
+    """
+    Returns the bias of the median of a set of periodograms relative to
+    the mean.
+
+    See Appendix B from [1]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        Numbers of periodograms being averaged.
+
+    Returns
+    -------
+    bias : float
+        Calculated bias.
+
+    References
+    ----------
+    .. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton.
+           "FINDCHIRP: an algorithm for detection of gravitational waves from
+           inspiraling compact binaries", Physical Review D 85, 2012,
+           :arxiv:`gr-qc/0509116`
+    """
+    ii_2 = 2 * np.arange(1., (n-1) // 2 + 1)
+    return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_upfirdn.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_upfirdn.py
new file mode 100644
index 00000000..aad808f4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_upfirdn.py
@@ -0,0 +1,216 @@
+# Code adapted from "upfirdn" python library with permission:
+#
+# Copyright (c) 2009, Motorola, Inc
+#
+# All Rights Reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of Motorola nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import numpy as np
+
+from ._upfirdn_apply import _output_len, _apply, mode_enum
+
+__all__ = ['upfirdn', '_output_len']
+
+_upfirdn_modes = [
+    'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',
+    'antisymmetric', 'antireflect', 'line',
+]
+
+
+def _pad_h(h, up):
+    """Store coefficients in a transposed, flipped arrangement.
+
+    For example, suppose upRate is 3, and the
+    input number of coefficients is 10, represented as h[0], ..., h[9].
+
+    Then the internal buffer will look like this::
+
+       h[9], h[6], h[3], h[0],   // flipped phase 0 coefs
+       0,    h[7], h[4], h[1],   // flipped phase 1 coefs (zero-padded)
+       0,    h[8], h[5], h[2],   // flipped phase 2 coefs (zero-padded)
+
+    """
+    h_padlen = len(h) + (-len(h) % up)
+    h_full = np.zeros(h_padlen, h.dtype)
+    h_full[:len(h)] = h
+    h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()
+    return h_full
+
+
+def _check_mode(mode):
+    mode = mode.lower()
+    enum = mode_enum(mode)
+    return enum
+
+
+class _UpFIRDn:
+    """Helper for resampling."""
+
+    def __init__(self, h, x_dtype, up, down):
+        h = np.asarray(h)
+        if h.ndim != 1 or h.size == 0:
+            raise ValueError('h must be 1-D with non-zero length')
+        self._output_type = np.result_type(h.dtype, x_dtype, np.float32)
+        h = np.asarray(h, self._output_type)
+        self._up = int(up)
+        self._down = int(down)
+        if self._up < 1 or self._down < 1:
+            raise ValueError('Both up and down must be >= 1')
+        # This both transposes, and "flips" each phase for filtering
+        self._h_trans_flip = _pad_h(h, self._up)
+        self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)
+        self._h_len_orig = len(h)
+
+    def apply_filter(self, x, axis=-1, mode='constant', cval=0):
+        """Apply the prepared filter to the specified axis of N-D signal x."""
+        output_len = _output_len(self._h_len_orig, x.shape[axis],
+                                 self._up, self._down)
+        # Explicit use of np.int64 for output_shape dtype avoids OverflowError
+        # when allocating large array on platforms where np.int_ is 32 bits
+        output_shape = np.asarray(x.shape, dtype=np.int64)
+        output_shape[axis] = output_len
+        out = np.zeros(output_shape, dtype=self._output_type, order='C')
+        axis = axis % x.ndim
+        mode = _check_mode(mode)
+        _apply(np.asarray(x, self._output_type),
+               self._h_trans_flip, out,
+               self._up, self._down, axis, mode, cval)
+        return out
+
+
+def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):
+    """Upsample, FIR filter, and downsample.
+
+    Parameters
+    ----------
+    h : array_like
+        1-D FIR (finite-impulse response) filter coefficients.
+    x : array_like
+        Input signal array.
+    up : int, optional
+        Upsampling rate. Default is 1.
+    down : int, optional
+        Downsampling rate. Default is 1.
+    axis : int, optional
+        The axis of the input data array along which to apply the
+        linear filter. The filter is applied to each subarray along
+        this axis. Default is -1.
+    mode : str, optional
+        The signal extension mode to use. The set
+        ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to
+        modes provided by `numpy.pad`. ``"smooth"`` implements a smooth
+        extension by extending based on the slope of the last 2 points at each
+        end of the array. ``"antireflect"`` and ``"antisymmetric"`` are
+        anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode
+        `"line"` extends the signal based on a linear trend defined by the
+        first and last points along the ``axis``.
+
+        .. versionadded:: 1.4.0
+    cval : float, optional
+        The constant value to use when ``mode == "constant"``.
+
+        .. versionadded:: 1.4.0
+
+    Returns
+    -------
+    y : ndarray
+        The output signal array. Dimensions will be the same as `x` except
+        for along `axis`, which will change size according to the `h`,
+        `up`,  and `down` parameters.
+
+    Notes
+    -----
+    The algorithm is an implementation of the block diagram shown on page 129
+    of the Vaidyanathan text [1]_ (Figure 4.3-8d).
+
+    The direct approach of upsampling by factor of P with zero insertion,
+    FIR filtering of length ``N``, and downsampling by factor of Q is
+    O(N*Q) per output sample. The polyphase implementation used here is
+    O(N/P).
+
+    .. versionadded:: 0.18
+
+    References
+    ----------
+    .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
+           Prentice Hall, 1993.
+
+    Examples
+    --------
+    Simple operations:
+
+    >>> import numpy as np
+    >>> from scipy.signal import upfirdn
+    >>> upfirdn([1, 1, 1], [1, 1, 1])   # FIR filter
+    array([ 1.,  2.,  3.,  2.,  1.])
+    >>> upfirdn([1], [1, 2, 3], 3)  # upsampling with zeros insertion
+    array([ 1.,  0.,  0.,  2.,  0.,  0.,  3.])
+    >>> upfirdn([1, 1, 1], [1, 2, 3], 3)  # upsampling with sample-and-hold
+    array([ 1.,  1.,  1.,  2.,  2.,  2.,  3.,  3.,  3.])
+    >>> upfirdn([.5, 1, .5], [1, 1, 1], 2)  # linear interpolation
+    array([ 0.5,  1. ,  1. ,  1. ,  1. ,  1. ,  0.5])
+    >>> upfirdn([1], np.arange(10), 1, 3)  # decimation by 3
+    array([ 0.,  3.,  6.,  9.])
+    >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3)  # linear interp, rate 2/3
+    array([ 0. ,  1. ,  2.5,  4. ,  5.5,  7. ,  8.5])
+
+    Apply a single filter to multiple signals:
+
+    >>> x = np.reshape(np.arange(8), (4, 2))
+    >>> x
+    array([[0, 1],
+           [2, 3],
+           [4, 5],
+           [6, 7]])
+
+    Apply along the last dimension of ``x``:
+
+    >>> h = [1, 1]
+    >>> upfirdn(h, x, 2)
+    array([[ 0.,  0.,  1.,  1.],
+           [ 2.,  2.,  3.,  3.],
+           [ 4.,  4.,  5.,  5.],
+           [ 6.,  6.,  7.,  7.]])
+
+    Apply along the 0th dimension of ``x``:
+
+    >>> upfirdn(h, x, 2, axis=0)
+    array([[ 0.,  1.],
+           [ 0.,  1.],
+           [ 2.,  3.],
+           [ 2.,  3.],
+           [ 4.,  5.],
+           [ 4.,  5.],
+           [ 6.,  7.],
+           [ 6.,  7.]])
+    """
+    x = np.asarray(x)
+    ufd = _UpFIRDn(h, x.dtype, up, down)
+    # This is equivalent to (but faster than) using np.apply_along_axis
+    return ufd.apply_filter(x, axis, mode, cval)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_waveforms.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_waveforms.py
new file mode 100644
index 00000000..55bd045c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_waveforms.py
@@ -0,0 +1,672 @@
+# Author: Travis Oliphant
+# 2003
+#
+# Feb. 2010: Updated by Warren Weckesser:
+#   Rewrote much of chirp()
+#   Added sweep_poly()
+import numpy as np
+from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
+    exp, cos, sin, polyval, polyint
+
+
+__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
+           'unit_impulse']
+
+
+def sawtooth(t, width=1):
+    """
+    Return a periodic sawtooth or triangle waveform.
+
+    The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
+    interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
+    ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
+
+    Note that this is not band-limited.  It produces an infinite number
+    of harmonics, which are aliased back and forth across the frequency
+    spectrum.
+
+    Parameters
+    ----------
+    t : array_like
+        Time.
+    width : array_like, optional
+        Width of the rising ramp as a proportion of the total cycle.
+        Default is 1, producing a rising ramp, while 0 produces a falling
+        ramp.  `width` = 0.5 produces a triangle wave.
+        If an array, causes wave shape to change over time, and must be the
+        same length as t.
+
+    Returns
+    -------
+    y : ndarray
+        Output array containing the sawtooth waveform.
+
+    Examples
+    --------
+    A 5 Hz waveform sampled at 500 Hz for 1 second:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> t = np.linspace(0, 1, 500)
+    >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
+
+    """
+    t, w = asarray(t), asarray(width)
+    w = asarray(w + (t - t))
+    t = asarray(t + (w - w))
+    if t.dtype.char in ['fFdD']:
+        ytype = t.dtype.char
+    else:
+        ytype = 'd'
+    y = zeros(t.shape, ytype)
+
+    # width must be between 0 and 1 inclusive
+    mask1 = (w > 1) | (w < 0)
+    place(y, mask1, nan)
+
+    # take t modulo 2*pi
+    tmod = mod(t, 2 * pi)
+
+    # on the interval 0 to width*2*pi function is
+    #  tmod / (pi*w) - 1
+    mask2 = (1 - mask1) & (tmod < w * 2 * pi)
+    tsub = extract(mask2, tmod)
+    wsub = extract(mask2, w)
+    place(y, mask2, tsub / (pi * wsub) - 1)
+
+    # on the interval width*2*pi to 2*pi function is
+    #  (pi*(w+1)-tmod) / (pi*(1-w))
+
+    mask3 = (1 - mask1) & (1 - mask2)
+    tsub = extract(mask3, tmod)
+    wsub = extract(mask3, w)
+    place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
+    return y
+
+
+def square(t, duty=0.5):
+    """
+    Return a periodic square-wave waveform.
+
+    The square wave has a period ``2*pi``, has value +1 from 0 to
+    ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
+    the interval [0,1].
+
+    Note that this is not band-limited.  It produces an infinite number
+    of harmonics, which are aliased back and forth across the frequency
+    spectrum.
+
+    Parameters
+    ----------
+    t : array_like
+        The input time array.
+    duty : array_like, optional
+        Duty cycle.  Default is 0.5 (50% duty cycle).
+        If an array, causes wave shape to change over time, and must be the
+        same length as t.
+
+    Returns
+    -------
+    y : ndarray
+        Output array containing the square waveform.
+
+    Examples
+    --------
+    A 5 Hz waveform sampled at 500 Hz for 1 second:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> t = np.linspace(0, 1, 500, endpoint=False)
+    >>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
+    >>> plt.ylim(-2, 2)
+
+    A pulse-width modulated sine wave:
+
+    >>> plt.figure()
+    >>> sig = np.sin(2 * np.pi * t)
+    >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
+    >>> plt.subplot(2, 1, 1)
+    >>> plt.plot(t, sig)
+    >>> plt.subplot(2, 1, 2)
+    >>> plt.plot(t, pwm)
+    >>> plt.ylim(-1.5, 1.5)
+
+    """
+    t, w = asarray(t), asarray(duty)
+    w = asarray(w + (t - t))
+    t = asarray(t + (w - w))
+    if t.dtype.char in ['fFdD']:
+        ytype = t.dtype.char
+    else:
+        ytype = 'd'
+
+    y = zeros(t.shape, ytype)
+
+    # width must be between 0 and 1 inclusive
+    mask1 = (w > 1) | (w < 0)
+    place(y, mask1, nan)
+
+    # on the interval 0 to duty*2*pi function is 1
+    tmod = mod(t, 2 * pi)
+    mask2 = (1 - mask1) & (tmod < w * 2 * pi)
+    place(y, mask2, 1)
+
+    # on the interval duty*2*pi to 2*pi function is
+    #  (pi*(w+1)-tmod) / (pi*(1-w))
+    mask3 = (1 - mask1) & (1 - mask2)
+    place(y, mask3, -1)
+    return y
+
+
+def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
+               retenv=False):
+    """
+    Return a Gaussian modulated sinusoid:
+
+        ``exp(-a t^2) exp(1j*2*pi*fc*t).``
+
+    If `retquad` is True, then return the real and imaginary parts
+    (in-phase and quadrature).
+    If `retenv` is True, then return the envelope (unmodulated signal).
+    Otherwise, return the real part of the modulated sinusoid.
+
+    Parameters
+    ----------
+    t : ndarray or the string 'cutoff'
+        Input array.
+    fc : float, optional
+        Center frequency (e.g. Hz).  Default is 1000.
+    bw : float, optional
+        Fractional bandwidth in frequency domain of pulse (e.g. Hz).
+        Default is 0.5.
+    bwr : float, optional
+        Reference level at which fractional bandwidth is calculated (dB).
+        Default is -6.
+    tpr : float, optional
+        If `t` is 'cutoff', then the function returns the cutoff
+        time for when the pulse amplitude falls below `tpr` (in dB).
+        Default is -60.
+    retquad : bool, optional
+        If True, return the quadrature (imaginary) as well as the real part
+        of the signal.  Default is False.
+    retenv : bool, optional
+        If True, return the envelope of the signal.  Default is False.
+
+    Returns
+    -------
+    yI : ndarray
+        Real part of signal.  Always returned.
+    yQ : ndarray
+        Imaginary part of signal.  Only returned if `retquad` is True.
+    yenv : ndarray
+        Envelope of signal.  Only returned if `retenv` is True.
+
+    See Also
+    --------
+    scipy.signal.morlet
+
+    Examples
+    --------
+    Plot real component, imaginary component, and envelope for a 5 Hz pulse,
+    sampled at 100 Hz for 2 seconds:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
+    >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
+    >>> plt.plot(t, i, t, q, t, e, '--')
+
+    """
+    if fc < 0:
+        raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
+    if bw <= 0:
+        raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
+    if bwr >= 0:
+        raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
+                         "be < 0 dB" % bwr)
+
+    # exp(-a t^2) <->  sqrt(pi/a) exp(-pi^2/a * f^2)  = g(f)
+
+    ref = pow(10.0, bwr / 20.0)
+    # fdel = fc*bw/2:  g(fdel) = ref --- solve this for a
+    #
+    # pi^2/a * fc^2 * bw^2 /4=-log(ref)
+    a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
+
+    if isinstance(t, str):
+        if t == 'cutoff':  # compute cut_off point
+            #  Solve exp(-a tc**2) = tref  for tc
+            #   tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
+            if tpr >= 0:
+                raise ValueError("Reference level for time cutoff must "
+                                 "be < 0 dB")
+            tref = pow(10.0, tpr / 20.0)
+            return sqrt(-log(tref) / a)
+        else:
+            raise ValueError("If `t` is a string, it must be 'cutoff'")
+
+    yenv = exp(-a * t * t)
+    yI = yenv * cos(2 * pi * fc * t)
+    yQ = yenv * sin(2 * pi * fc * t)
+    if not retquad and not retenv:
+        return yI
+    if not retquad and retenv:
+        return yI, yenv
+    if retquad and not retenv:
+        return yI, yQ
+    if retquad and retenv:
+        return yI, yQ, yenv
+
+
+def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
+    """Frequency-swept cosine generator.
+
+    In the following, 'Hz' should be interpreted as 'cycles per unit';
+    there is no requirement here that the unit is one second.  The
+    important distinction is that the units of rotation are cycles, not
+    radians. Likewise, `t` could be a measurement of space instead of time.
+
+    Parameters
+    ----------
+    t : array_like
+        Times at which to evaluate the waveform.
+    f0 : float
+        Frequency (e.g. Hz) at time t=0.
+    t1 : float
+        Time at which `f1` is specified.
+    f1 : float
+        Frequency (e.g. Hz) of the waveform at time `t1`.
+    method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
+        Kind of frequency sweep.  If not given, `linear` is assumed.  See
+        Notes below for more details.
+    phi : float, optional
+        Phase offset, in degrees. Default is 0.
+    vertex_zero : bool, optional
+        This parameter is only used when `method` is 'quadratic'.
+        It determines whether the vertex of the parabola that is the graph
+        of the frequency is at t=0 or t=t1.
+
+    Returns
+    -------
+    y : ndarray
+        A numpy array containing the signal evaluated at `t` with the
+        requested time-varying frequency.  More precisely, the function
+        returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
+        (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
+
+    See Also
+    --------
+    sweep_poly
+
+    Notes
+    -----
+    There are four options for the `method`.  The following formulas give
+    the instantaneous frequency (in Hz) of the signal generated by
+    `chirp()`.  For convenience, the shorter names shown below may also be
+    used.
+
+    linear, lin, li:
+
+        ``f(t) = f0 + (f1 - f0) * t / t1``
+
+    quadratic, quad, q:
+
+        The graph of the frequency f(t) is a parabola through (0, f0) and
+        (t1, f1).  By default, the vertex of the parabola is at (0, f0).
+        If `vertex_zero` is False, then the vertex is at (t1, f1).  The
+        formula is:
+
+        if vertex_zero is True:
+
+            ``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
+
+        else:
+
+            ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
+
+        To use a more general quadratic function, or an arbitrary
+        polynomial, use the function `scipy.signal.sweep_poly`.
+
+    logarithmic, log, lo:
+
+        ``f(t) = f0 * (f1/f0)**(t/t1)``
+
+        f0 and f1 must be nonzero and have the same sign.
+
+        This signal is also known as a geometric or exponential chirp.
+
+    hyperbolic, hyp:
+
+        ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
+
+        f0 and f1 must be nonzero.
+
+    Examples
+    --------
+    The following will be used in the examples:
+
+    >>> import numpy as np
+    >>> from scipy.signal import chirp, spectrogram
+    >>> import matplotlib.pyplot as plt
+
+    For the first example, we'll plot the waveform for a linear chirp
+    from 6 Hz to 1 Hz over 10 seconds:
+
+    >>> t = np.linspace(0, 10, 1500)
+    >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear')
+    >>> plt.plot(t, w)
+    >>> plt.title("Linear Chirp, f(0)=6, f(10)=1")
+    >>> plt.xlabel('t (sec)')
+    >>> plt.show()
+
+    For the remaining examples, we'll use higher frequency ranges,
+    and demonstrate the result using `scipy.signal.spectrogram`.
+    We'll use a 4 second interval sampled at 7200 Hz.
+
+    >>> fs = 7200
+    >>> T = 4
+    >>> t = np.arange(0, int(T*fs)) / fs
+
+    We'll use this function to plot the spectrogram in each example.
+
+    >>> def plot_spectrogram(title, w, fs):
+    ...     ff, tt, Sxx = spectrogram(w, fs=fs, nperseg=256, nfft=576)
+    ...     fig, ax = plt.subplots()
+    ...     ax.pcolormesh(tt, ff[:145], Sxx[:145], cmap='gray_r',
+    ...                   shading='gouraud')
+    ...     ax.set_title(title)
+    ...     ax.set_xlabel('t (sec)')
+    ...     ax.set_ylabel('Frequency (Hz)')
+    ...     ax.grid(True)
+    ...
+
+    Quadratic chirp from 1500 Hz to 250 Hz
+    (vertex of the parabolic curve of the frequency is at t=0):
+
+    >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic')
+    >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250', w, fs)
+    >>> plt.show()
+
+    Quadratic chirp from 1500 Hz to 250 Hz
+    (vertex of the parabolic curve of the frequency is at t=T):
+
+    >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic',
+    ...           vertex_zero=False)
+    >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250\\n' +
+    ...                  '(vertex_zero=False)', w, fs)
+    >>> plt.show()
+
+    Logarithmic chirp from 1500 Hz to 250 Hz:
+
+    >>> w = chirp(t, f0=1500, f1=250, t1=T, method='logarithmic')
+    >>> plot_spectrogram(f'Logarithmic Chirp, f(0)=1500, f({T})=250', w, fs)
+    >>> plt.show()
+
+    Hyperbolic chirp from 1500 Hz to 250 Hz:
+
+    >>> w = chirp(t, f0=1500, f1=250, t1=T, method='hyperbolic')
+    >>> plot_spectrogram(f'Hyperbolic Chirp, f(0)=1500, f({T})=250', w, fs)
+    >>> plt.show()
+
+    """
+    # 'phase' is computed in _chirp_phase, to make testing easier.
+    phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
+    # Convert  phi to radians.
+    phi *= pi / 180
+    return cos(phase + phi)
+
+
+def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
+    """
+    Calculate the phase used by `chirp` to generate its output.
+
+    See `chirp` for a description of the arguments.
+
+    """
+    t = asarray(t)
+    f0 = float(f0)
+    t1 = float(t1)
+    f1 = float(f1)
+    if method in ['linear', 'lin', 'li']:
+        beta = (f1 - f0) / t1
+        phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
+
+    elif method in ['quadratic', 'quad', 'q']:
+        beta = (f1 - f0) / (t1 ** 2)
+        if vertex_zero:
+            phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
+        else:
+            phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
+
+    elif method in ['logarithmic', 'log', 'lo']:
+        if f0 * f1 <= 0.0:
+            raise ValueError("For a logarithmic chirp, f0 and f1 must be "
+                             "nonzero and have the same sign.")
+        if f0 == f1:
+            phase = 2 * pi * f0 * t
+        else:
+            beta = t1 / log(f1 / f0)
+            phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
+
+    elif method in ['hyperbolic', 'hyp']:
+        if f0 == 0 or f1 == 0:
+            raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
+                             "nonzero.")
+        if f0 == f1:
+            # Degenerate case: constant frequency.
+            phase = 2 * pi * f0 * t
+        else:
+            # Singular point: the instantaneous frequency blows up
+            # when t == sing.
+            sing = -f1 * t1 / (f0 - f1)
+            phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
+
+    else:
+        raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
+                         " or 'hyperbolic', but a value of %r was given."
+                         % method)
+
+    return phase
+
+
+def sweep_poly(t, poly, phi=0):
+    """
+    Frequency-swept cosine generator, with a time-dependent frequency.
+
+    This function generates a sinusoidal function whose instantaneous
+    frequency varies with time.  The frequency at time `t` is given by
+    the polynomial `poly`.
+
+    Parameters
+    ----------
+    t : ndarray
+        Times at which to evaluate the waveform.
+    poly : 1-D array_like or instance of numpy.poly1d
+        The desired frequency expressed as a polynomial.  If `poly` is
+        a list or ndarray of length n, then the elements of `poly` are
+        the coefficients of the polynomial, and the instantaneous
+        frequency is
+
+          ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
+
+        If `poly` is an instance of numpy.poly1d, then the
+        instantaneous frequency is
+
+          ``f(t) = poly(t)``
+
+    phi : float, optional
+        Phase offset, in degrees, Default: 0.
+
+    Returns
+    -------
+    sweep_poly : ndarray
+        A numpy array containing the signal evaluated at `t` with the
+        requested time-varying frequency.  More precisely, the function
+        returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
+        (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
+
+    See Also
+    --------
+    chirp
+
+    Notes
+    -----
+    .. versionadded:: 0.8.0
+
+    If `poly` is a list or ndarray of length `n`, then the elements of
+    `poly` are the coefficients of the polynomial, and the instantaneous
+    frequency is:
+
+        ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
+
+    If `poly` is an instance of `numpy.poly1d`, then the instantaneous
+    frequency is:
+
+          ``f(t) = poly(t)``
+
+    Finally, the output `s` is:
+
+        ``cos(phase + (pi/180)*phi)``
+
+    where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
+    ``f(t)`` as defined above.
+
+    Examples
+    --------
+    Compute the waveform with instantaneous frequency::
+
+        f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
+
+    over the interval 0 <= t <= 10.
+
+    >>> import numpy as np
+    >>> from scipy.signal import sweep_poly
+    >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
+    >>> t = np.linspace(0, 10, 5001)
+    >>> w = sweep_poly(t, p)
+
+    Plot it:
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.subplot(2, 1, 1)
+    >>> plt.plot(t, w)
+    >>> plt.title("Sweep Poly\\nwith frequency " +
+    ...           "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
+    >>> plt.subplot(2, 1, 2)
+    >>> plt.plot(t, p(t), 'r', label='f(t)')
+    >>> plt.legend()
+    >>> plt.xlabel('t')
+    >>> plt.tight_layout()
+    >>> plt.show()
+
+    """
+    # 'phase' is computed in _sweep_poly_phase, to make testing easier.
+    phase = _sweep_poly_phase(t, poly)
+    # Convert to radians.
+    phi *= pi / 180
+    return cos(phase + phi)
+
+
+def _sweep_poly_phase(t, poly):
+    """
+    Calculate the phase used by sweep_poly to generate its output.
+
+    See `sweep_poly` for a description of the arguments.
+
+    """
+    # polyint handles lists, ndarrays and instances of poly1d automatically.
+    intpoly = polyint(poly)
+    phase = 2 * pi * polyval(intpoly, t)
+    return phase
+
+
+def unit_impulse(shape, idx=None, dtype=float):
+    """
+    Unit impulse signal (discrete delta function) or unit basis vector.
+
+    Parameters
+    ----------
+    shape : int or tuple of int
+        Number of samples in the output (1-D), or a tuple that represents the
+        shape of the output (N-D).
+    idx : None or int or tuple of int or 'mid', optional
+        Index at which the value is 1.  If None, defaults to the 0th element.
+        If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
+        all dimensions.  If an int, the impulse will be at `idx` in all
+        dimensions.
+    dtype : data-type, optional
+        The desired data-type for the array, e.g., ``numpy.int8``.  Default is
+        ``numpy.float64``.
+
+    Returns
+    -------
+    y : ndarray
+        Output array containing an impulse signal.
+
+    Notes
+    -----
+    The 1D case is also known as the Kronecker delta.
+
+    .. versionadded:: 0.19.0
+
+    Examples
+    --------
+    An impulse at the 0th element (:math:`\\delta[n]`):
+
+    >>> from scipy import signal
+    >>> signal.unit_impulse(8)
+    array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.])
+
+    Impulse offset by 2 samples (:math:`\\delta[n-2]`):
+
+    >>> signal.unit_impulse(7, 2)
+    array([ 0.,  0.,  1.,  0.,  0.,  0.,  0.])
+
+    2-dimensional impulse, centered:
+
+    >>> signal.unit_impulse((3, 3), 'mid')
+    array([[ 0.,  0.,  0.],
+           [ 0.,  1.,  0.],
+           [ 0.,  0.,  0.]])
+
+    Impulse at (2, 2), using broadcasting:
+
+    >>> signal.unit_impulse((4, 4), 2)
+    array([[ 0.,  0.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.],
+           [ 0.,  0.,  1.,  0.],
+           [ 0.,  0.,  0.,  0.]])
+
+    Plot the impulse response of a 4th-order Butterworth lowpass filter:
+
+    >>> imp = signal.unit_impulse(100, 'mid')
+    >>> b, a = signal.butter(4, 0.2)
+    >>> response = signal.lfilter(b, a, imp)
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(np.arange(-50, 50), imp)
+    >>> plt.plot(np.arange(-50, 50), response)
+    >>> plt.margins(0.1, 0.1)
+    >>> plt.xlabel('Time [samples]')
+    >>> plt.ylabel('Amplitude')
+    >>> plt.grid(True)
+    >>> plt.show()
+
+    """
+    out = zeros(shape, dtype)
+
+    shape = np.atleast_1d(shape)
+
+    if idx is None:
+        idx = (0,) * len(shape)
+    elif idx == 'mid':
+        idx = tuple(shape // 2)
+    elif not hasattr(idx, "__iter__"):
+        idx = (idx,) * len(shape)
+
+    out[idx] = 1
+    return out
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/_wavelets.py b/__packaged__/coreml/.python_dependencies/scipy/signal/_wavelets.py
new file mode 100644
index 00000000..279654ec
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/_wavelets.py
@@ -0,0 +1,492 @@
+import numpy as np
+from scipy.linalg import eig
+from scipy.special import comb
+from scipy.signal import convolve
+
+__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
+
+
+def daub(p):
+    """
+    The coefficients for the FIR low-pass filter producing Daubechies wavelets.
+
+    p>=1 gives the order of the zero at f=1/2.
+    There are 2p filter coefficients.
+
+    Parameters
+    ----------
+    p : int
+        Order of the zero at f=1/2, can have values from 1 to 34.
+
+    Returns
+    -------
+    daub : ndarray
+        Return
+
+    """
+    sqrt = np.sqrt
+    if p < 1:
+        raise ValueError("p must be at least 1.")
+    if p == 1:
+        c = 1 / sqrt(2)
+        return np.array([c, c])
+    elif p == 2:
+        f = sqrt(2) / 8
+        c = sqrt(3)
+        return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
+    elif p == 3:
+        tmp = 12 * sqrt(10)
+        z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
+        z1c = np.conj(z1)
+        f = sqrt(2) / 8
+        d0 = np.real((1 - z1) * (1 - z1c))
+        a0 = np.real(z1 * z1c)
+        a1 = 2 * np.real(z1)
+        return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
+                                  a0 - 3 * a1 + 3, 3 - a1, 1])
+    elif p < 35:
+        # construct polynomial and factor it
+        if p < 35:
+            P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
+            yj = np.roots(P)
+        else:  # try different polynomial --- needs work
+            P = [comb(p - 1 + k, k, exact=1) / 4.0**k
+                 for k in range(p)][::-1]
+            yj = np.roots(P) / 4
+        # for each root, compute two z roots, select the one with |z|>1
+        # Build up final polynomial
+        c = np.poly1d([1, 1])**p
+        q = np.poly1d([1])
+        for k in range(p - 1):
+            yval = yj[k]
+            part = 2 * sqrt(yval * (yval - 1))
+            const = 1 - 2 * yval
+            z1 = const + part
+            if (abs(z1)) < 1:
+                z1 = const - part
+            q = q * [1, -z1]
+
+        q = c * np.real(q)
+        # Normalize result
+        q = q / np.sum(q) * sqrt(2)
+        return q.c[::-1]
+    else:
+        raise ValueError("Polynomial factorization does not work "
+                         "well for p too large.")
+
+
+def qmf(hk):
+    """
+    Return high-pass qmf filter from low-pass
+
+    Parameters
+    ----------
+    hk : array_like
+        Coefficients of high-pass filter.
+
+    Returns
+    -------
+    array_like
+        High-pass filter coefficients.
+
+    """
+    N = len(hk) - 1
+    asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
+    return hk[::-1] * np.array(asgn)
+
+
+def cascade(hk, J=7):
+    """
+    Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
+
+    Parameters
+    ----------
+    hk : array_like
+        Coefficients of low-pass filter.
+    J : int, optional
+        Values will be computed at grid points ``K/2**J``. Default is 7.
+
+    Returns
+    -------
+    x : ndarray
+        The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
+        ``len(hk) = len(gk) = N+1``.
+    phi : ndarray
+        The scaling function ``phi(x)`` at `x`:
+        ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
+    psi : ndarray, optional
+        The wavelet function ``psi(x)`` at `x`:
+        ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
+        `psi` is only returned if `gk` is not None.
+
+    Notes
+    -----
+    The algorithm uses the vector cascade algorithm described by Strang and
+    Nguyen in "Wavelets and Filter Banks".  It builds a dictionary of values
+    and slices for quick reuse.  Then inserts vectors into final vector at the
+    end.
+
+    """
+    N = len(hk) - 1
+
+    if (J > 30 - np.log2(N + 1)):
+        raise ValueError("Too many levels.")
+    if (J < 1):
+        raise ValueError("Too few levels.")
+
+    # construct matrices needed
+    nn, kk = np.ogrid[:N, :N]
+    s2 = np.sqrt(2)
+    # append a zero so that take works
+    thk = np.r_[hk, 0]
+    gk = qmf(hk)
+    tgk = np.r_[gk, 0]
+
+    indx1 = np.clip(2 * nn - kk, -1, N + 1)
+    indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
+    m = np.empty((2, 2, N, N), 'd')
+    m[0, 0] = np.take(thk, indx1, 0)
+    m[0, 1] = np.take(thk, indx2, 0)
+    m[1, 0] = np.take(tgk, indx1, 0)
+    m[1, 1] = np.take(tgk, indx2, 0)
+    m *= s2
+
+    # construct the grid of points
+    x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
+    phi = 0 * x
+
+    psi = 0 * x
+
+    # find phi0, and phi1
+    lam, v = eig(m[0, 0])
+    ind = np.argmin(np.absolute(lam - 1))
+    # a dictionary with a binary representation of the
+    #   evaluation points x < 1 -- i.e. position is 0.xxxx
+    v = np.real(v[:, ind])
+    # need scaling function to integrate to 1 so find
+    #  eigenvector normalized to sum(v,axis=0)=1
+    sm = np.sum(v)
+    if sm < 0:  # need scaling function to integrate to 1
+        v = -v
+        sm = -sm
+    bitdic = {'0': v / sm}
+    bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
+    step = 1 << J
+    phi[::step] = bitdic['0']
+    phi[(1 << (J - 1))::step] = bitdic['1']
+    psi[::step] = np.dot(m[1, 0], bitdic['0'])
+    psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
+    # descend down the levels inserting more and more values
+    #  into bitdic -- store the values in the correct location once we
+    #  have computed them -- stored in the dictionary
+    #  for quicker use later.
+    prevkeys = ['1']
+    for level in range(2, J + 1):
+        newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
+        fac = 1 << (J - level)
+        for key in newkeys:
+            # convert key to number
+            num = 0
+            for pos in range(level):
+                if key[pos] == '1':
+                    num += (1 << (level - 1 - pos))
+            pastphi = bitdic[key[1:]]
+            ii = int(key[0])
+            temp = np.dot(m[0, ii], pastphi)
+            bitdic[key] = temp
+            phi[num * fac::step] = temp
+            psi[num * fac::step] = np.dot(m[1, ii], pastphi)
+        prevkeys = newkeys
+
+    return x, phi, psi
+
+
+def morlet(M, w=5.0, s=1.0, complete=True):
+    """
+    Complex Morlet wavelet.
+
+    Parameters
+    ----------
+    M : int
+        Length of the wavelet.
+    w : float, optional
+        Omega0. Default is 5
+    s : float, optional
+        Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
+    complete : bool, optional
+        Whether to use the complete or the standard version.
+
+    Returns
+    -------
+    morlet : (M,) ndarray
+
+    See Also
+    --------
+    morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
+    scipy.signal.gausspulse
+
+    Notes
+    -----
+    The standard version::
+
+        pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
+
+    This commonly used wavelet is often referred to simply as the
+    Morlet wavelet.  Note that this simplified version can cause
+    admissibility problems at low values of `w`.
+
+    The complete version::
+
+        pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
+
+    This version has a correction
+    term to improve admissibility. For `w` greater than 5, the
+    correction term is negligible.
+
+    Note that the energy of the return wavelet is not normalised
+    according to `s`.
+
+    The fundamental frequency of this wavelet in Hz is given
+    by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
+
+    Note: This function was created before `cwt` and is not compatible
+    with it.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> M = 100
+    >>> s = 4.0
+    >>> w = 2.0
+    >>> wavelet = signal.morlet(M, s, w)
+    >>> plt.plot(wavelet)
+    >>> plt.show()
+
+    """
+    x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
+    output = np.exp(1j * w * x)
+
+    if complete:
+        output -= np.exp(-0.5 * (w**2))
+
+    output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
+
+    return output
+
+
+def ricker(points, a):
+    """
+    Return a Ricker wavelet, also known as the "Mexican hat wavelet".
+
+    It models the function:
+
+        ``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
+
+    where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
+
+    Parameters
+    ----------
+    points : int
+        Number of points in `vector`.
+        Will be centered around 0.
+    a : scalar
+        Width parameter of the wavelet.
+
+    Returns
+    -------
+    vector : (N,) ndarray
+        Array of length `points` in shape of ricker curve.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> points = 100
+    >>> a = 4.0
+    >>> vec2 = signal.ricker(points, a)
+    >>> print(len(vec2))
+    100
+    >>> plt.plot(vec2)
+    >>> plt.show()
+
+    """
+    A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
+    wsq = a**2
+    vec = np.arange(0, points) - (points - 1.0) / 2
+    xsq = vec**2
+    mod = (1 - xsq / wsq)
+    gauss = np.exp(-xsq / (2 * wsq))
+    total = A * mod * gauss
+    return total
+
+
+def morlet2(M, s, w=5):
+    """
+    Complex Morlet wavelet, designed to work with `cwt`.
+
+    Returns the complete version of morlet wavelet, normalised
+    according to `s`::
+
+        exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
+
+    Parameters
+    ----------
+    M : int
+        Length of the wavelet.
+    s : float
+        Width parameter of the wavelet.
+    w : float, optional
+        Omega0. Default is 5
+
+    Returns
+    -------
+    morlet : (M,) ndarray
+
+    See Also
+    --------
+    morlet : Implementation of Morlet wavelet, incompatible with `cwt`
+
+    Notes
+    -----
+
+    .. versionadded:: 1.4.0
+
+    This function was designed to work with `cwt`. Because `morlet2`
+    returns an array of complex numbers, the `dtype` argument of `cwt`
+    should be set to `complex128` for best results.
+
+    Note the difference in implementation with `morlet`.
+    The fundamental frequency of this wavelet in Hz is given by::
+
+        f = w*fs / (2*s*np.pi)
+
+    where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
+    Similarly we can get the wavelet width parameter at ``f``::
+
+        s = w*fs / (2*f*np.pi)
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+
+    >>> M = 100
+    >>> s = 4.0
+    >>> w = 2.0
+    >>> wavelet = signal.morlet2(M, s, w)
+    >>> plt.plot(abs(wavelet))
+    >>> plt.show()
+
+    This example shows basic use of `morlet2` with `cwt` in time-frequency
+    analysis:
+
+    >>> t, dt = np.linspace(0, 1, 200, retstep=True)
+    >>> fs = 1/dt
+    >>> w = 6.
+    >>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
+    >>> freq = np.linspace(1, fs/2, 100)
+    >>> widths = w*fs / (2*freq*np.pi)
+    >>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
+    >>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud')
+    >>> plt.show()
+
+    """
+    x = np.arange(0, M) - (M - 1.0) / 2
+    x = x / s
+    wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
+    output = np.sqrt(1/s) * wavelet
+    return output
+
+
+def cwt(data, wavelet, widths, dtype=None, **kwargs):
+    """
+    Continuous wavelet transform.
+
+    Performs a continuous wavelet transform on `data`,
+    using the `wavelet` function. A CWT performs a convolution
+    with `data` using the `wavelet` function, which is characterized
+    by a width parameter and length parameter. The `wavelet` function
+    is allowed to be complex.
+
+    Parameters
+    ----------
+    data : (N,) ndarray
+        data on which to perform the transform.
+    wavelet : function
+        Wavelet function, which should take 2 arguments.
+        The first argument is the number of points that the returned vector
+        will have (len(wavelet(length,width)) == length).
+        The second is a width parameter, defining the size of the wavelet
+        (e.g. standard deviation of a gaussian). See `ricker`, which
+        satisfies these requirements.
+    widths : (M,) sequence
+        Widths to use for transform.
+    dtype : data-type, optional
+        The desired data type of output. Defaults to ``float64`` if the
+        output of `wavelet` is real and ``complex128`` if it is complex.
+
+        .. versionadded:: 1.4.0
+
+    kwargs
+        Keyword arguments passed to wavelet function.
+
+        .. versionadded:: 1.4.0
+
+    Returns
+    -------
+    cwt: (M, N) ndarray
+        Will have shape of (len(widths), len(data)).
+
+    Notes
+    -----
+
+    .. versionadded:: 1.4.0
+
+    For non-symmetric, complex-valued wavelets, the input signal is convolved
+    with the time-reversed complex-conjugate of the wavelet data [1].
+
+    ::
+
+        length = min(10 * width[ii], len(data))
+        cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
+                                        **kwargs))[::-1], mode='same')
+
+    References
+    ----------
+    .. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
+        Academic Press, 2009.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> t = np.linspace(-1, 1, 200, endpoint=False)
+    >>> sig  = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
+    >>> widths = np.arange(1, 31)
+    >>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
+
+    .. note:: For cwt matrix plotting it is advisable to flip the y-axis
+
+    >>> cwtmatr_yflip = np.flipud(cwtmatr)
+    >>> plt.imshow(cwtmatr_yflip, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
+    ...            vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
+    >>> plt.show()
+    """
+    # Determine output type
+    if dtype is None:
+        if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
+            dtype = np.complex128
+        else:
+            dtype = np.float64
+
+    output = np.empty((len(widths), len(data)), dtype=dtype)
+    for ind, width in enumerate(widths):
+        N = np.min([10 * width, len(data)])
+        wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
+        output[ind] = convolve(data, wavelet_data, mode='same')
+    return output
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/bsplines.py b/__packaged__/coreml/.python_dependencies/scipy/signal/bsplines.py
new file mode 100644
index 00000000..d9043f5f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/bsplines.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _bsplines
+
+__all__ = [  # noqa: F822
+    'spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
+    'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval',
+    'logical_and', 'zeros_like', 'piecewise', 'array', 'arctan2',
+    'tan', 'arange', 'floor', 'exp', 'greater', 'less', 'add',
+    'less_equal', 'greater_equal', 'cspline2d', 'sepfir2d', 'comb',
+    'float_factorial'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.bsplines is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.bsplines` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_bsplines, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/filter_design.py b/__packaged__/coreml/.python_dependencies/scipy/signal/filter_design.py
new file mode 100644
index 00000000..18bbb137
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/filter_design.py
@@ -0,0 +1,42 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _filter_design
+
+__all__ = [  # noqa: F822
+    'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
+    'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
+    'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
+    'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
+    'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
+    'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
+    'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
+    'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk',
+    'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk',
+    'gammatone', 'iircomb',
+    'atleast_1d', 'poly', 'polyval', 'roots', 'resize', 'absolute',
+    'logspace', 'tan', 'log10', 'arctan', 'arcsinh', 'exp', 'arccosh',
+    'ceil', 'conjugate', 'append', 'prod', 'full', 'array', 'mintypecode',
+    'npp_polyval', 'polyvalfromroots', 'optimize', 'sp_fft', 'comb',
+    'float_factorial', 'abs', 'maxflat', 'yulewalk',
+    'EPSILON', 'filter_dict', 'band_dict', 'bessel_norms'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.filter_design is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.filter_design` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_filter_design, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/fir_filter_design.py b/__packaged__/coreml/.python_dependencies/scipy/signal/fir_filter_design.py
new file mode 100644
index 00000000..2fe752c2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/fir_filter_design.py
@@ -0,0 +1,33 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+
+import warnings
+from . import _fir_filter_design
+
+
+__all__ = [  # noqa: F822
+    'kaiser_beta', 'kaiser_atten', 'kaiserord',
+    'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase',
+    'ceil', 'log', 'irfft', 'fft', 'ifft', 'sinc', 'toeplitz',
+    'hankel', 'solve', 'LinAlgError', 'LinAlgWarning', 'lstsq'
+
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.fir_filter_design is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.fir_filter_design` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_fir_filter_design, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/lti_conversion.py b/__packaged__/coreml/.python_dependencies/scipy/signal/lti_conversion.py
new file mode 100644
index 00000000..e8676516
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/lti_conversion.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _lti_conversion
+
+
+__all__ = [  # noqa: F822
+    'tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
+    'cont2discrete','eye', 'atleast_2d',
+    'poly', 'prod', 'array', 'outer', 'linalg', 'tf2zpk', 'zpk2tf', 'normalize'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.lti_conversion is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.lti_conversion` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_lti_conversion, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/ltisys.py b/__packaged__/coreml/.python_dependencies/scipy/signal/ltisys.py
new file mode 100644
index 00000000..d55e605b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/ltisys.py
@@ -0,0 +1,38 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _ltisys
+
+__all__ = [  # noqa: F822
+    'lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
+    'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
+    'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
+    'dfreqresp', 'dbode', 's_qr', 'integrate', 'interpolate', 'linalg',
+    'interp1d', 'tf2zpk', 'zpk2tf', 'normalize', 'freqs',
+    'freqz', 'freqs_zpk', 'freqz_zpk', 'tf2ss', 'abcd_normalize',
+    'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete', 'atleast_1d',
+    'atleast_2d', 'squeeze', 'transpose', 'zeros_like', 'linspace',
+    'nan_to_num', 'LinearTimeInvariant', 'TransferFunctionContinuous',
+    'TransferFunctionDiscrete', 'ZerosPolesGainContinuous',
+    'ZerosPolesGainDiscrete', 'StateSpaceContinuous',
+    'StateSpaceDiscrete', 'Bunch'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.ltisys is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.ltisys` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_ltisys, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/signaltools.py b/__packaged__/coreml/.python_dependencies/scipy/signal/signaltools.py
new file mode 100644
index 00000000..c979ef7d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/signaltools.py
@@ -0,0 +1,37 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _signaltools
+
+__all__ = [  # noqa: F822
+    'correlate', 'correlation_lags', 'correlate2d',
+    'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',
+    'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
+    'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
+    'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
+    'residuez', 'resample', 'resample_poly', 'detrend',
+    'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
+    'filtfilt', 'decimate', 'vectorstrength',
+    'timeit', 'cKDTree', 'dlti', 'upfirdn', 'linalg',
+    'sp_fft', 'lambertw', 'get_window', 'axis_slice', 'axis_reverse',
+    'odd_ext', 'even_ext', 'const_ext', 'cheby1', 'firwin'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.signaltools is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.signaltools` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_signaltools, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/spectral.py b/__packaged__/coreml/.python_dependencies/scipy/signal/spectral.py
new file mode 100644
index 00000000..b90ea4f0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/spectral.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+
+import warnings
+from . import _spectral_py
+
+
+__all__ = [  # noqa: F822
+    'periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
+    'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA',
+    'sp_fft', 'get_window', 'const_ext', 'even_ext',
+    'odd_ext', 'zero_ext'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.spectral is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.spectral` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_spectral_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/spline.py b/__packaged__/coreml/.python_dependencies/scipy/signal/spline.py
new file mode 100644
index 00000000..cc15b2e5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/spline.py
@@ -0,0 +1,26 @@
+# This file is not meant for public use and will be removed in the future
+# versions of SciPy. Use the `scipy.signal` namespace for importing the
+# functions included below.
+
+import warnings
+
+from . import _spline
+
+__all__ = [  # noqa: F822
+    'cspline2d', 'qspline2d', 'sepfir2d', 'symiirorder1', 'symiirorder2']
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            f"scipy.signal.spline is deprecated and has no attribute {name}. "
+            "Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.spline` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+    return getattr(_spline, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/mpsig.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/mpsig.py
new file mode 100644
index 00000000..d129de74
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/mpsig.py
@@ -0,0 +1,122 @@
+"""
+Some signal functions implemented using mpmath.
+"""
+
+try:
+    import mpmath
+except ImportError:
+    mpmath = None
+
+
+def _prod(seq):
+    """Returns the product of the elements in the sequence `seq`."""
+    p = 1
+    for elem in seq:
+        p *= elem
+    return p
+
+
+def _relative_degree(z, p):
+    """
+    Return relative degree of transfer function from zeros and poles.
+
+    This is simply len(p) - len(z), which must be nonnegative.
+    A ValueError is raised if len(p) < len(z).
+    """
+    degree = len(p) - len(z)
+    if degree < 0:
+        raise ValueError("Improper transfer function. "
+                         "Must have at least as many poles as zeros.")
+    return degree
+
+
+def _zpkbilinear(z, p, k, fs):
+    """Bilinear transformation to convert a filter from analog to digital."""
+
+    degree = _relative_degree(z, p)
+
+    fs2 = 2*fs
+
+    # Bilinear transform the poles and zeros
+    z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
+    p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
+
+    # Any zeros that were at infinity get moved to the Nyquist frequency
+    z_z.extend([-1] * degree)
+
+    # Compensate for gain change
+    numer = _prod(fs2 - z1 for z1 in z)
+    denom = _prod(fs2 - p1 for p1 in p)
+    k_z = k * numer / denom
+
+    return z_z, p_z, k_z.real
+
+
+def _zpklp2lp(z, p, k, wo=1):
+    """Transform a lowpass filter to a different cutoff frequency."""
+
+    degree = _relative_degree(z, p)
+
+    # Scale all points radially from origin to shift cutoff frequency
+    z_lp = [wo * z1 for z1 in z]
+    p_lp = [wo * p1 for p1 in p]
+
+    # Each shifted pole decreases gain by wo, each shifted zero increases it.
+    # Cancel out the net change to keep overall gain the same
+    k_lp = k * wo**degree
+
+    return z_lp, p_lp, k_lp
+
+
+def _butter_analog_poles(n):
+    """
+    Poles of an analog Butterworth lowpass filter.
+
+    This is the same calculation as scipy.signal.buttap(n) or
+    scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
+    and only the poles are returned.
+    """
+    poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)]
+    return poles
+
+
+def butter_lp(n, Wn):
+    """
+    Lowpass Butterworth digital filter design.
+
+    This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
+    but it uses mpmath, and the results are returned in lists instead of NumPy
+    arrays.
+    """
+    zeros = []
+    poles = _butter_analog_poles(n)
+    k = 1
+    fs = 2
+    warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
+    z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
+    z, p, k = _zpkbilinear(z, p, k, fs=fs)
+    return z, p, k
+
+
+def zpkfreqz(z, p, k, worN=None):
+    """
+    Frequency response of a filter in zpk format, using mpmath.
+
+    This is the same calculation as scipy.signal.freqz, but the input is in
+    zpk format, the calculation is performed using mpath, and the results are
+    returned in lists instead of NumPy arrays.
+    """
+    if worN is None or isinstance(worN, int):
+        N = worN or 512
+        ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
+    else:
+        ws = worN
+
+    h = []
+    for wk in ws:
+        zm1 = mpmath.exp(1j * wk)
+        numer = _prod([zm1 - t for t in z])
+        denom = _prod([zm1 - t for t in p])
+        hk = k * numer / denom
+        h.append(hk)
+    return ws, h
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_array_tools.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_array_tools.py
new file mode 100644
index 00000000..81503b7e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_array_tools.py
@@ -0,0 +1,111 @@
+import numpy as np
+
+from numpy.testing import assert_array_equal
+from pytest import raises as assert_raises
+
+from scipy.signal._arraytools import (axis_slice, axis_reverse,
+     odd_ext, even_ext, const_ext, zero_ext)
+
+
+class TestArrayTools:
+
+    def test_axis_slice(self):
+        a = np.arange(12).reshape(3, 4)
+
+        s = axis_slice(a, start=0, stop=1, axis=0)
+        assert_array_equal(s, a[0:1, :])
+
+        s = axis_slice(a, start=-1, axis=0)
+        assert_array_equal(s, a[-1:, :])
+
+        s = axis_slice(a, start=0, stop=1, axis=1)
+        assert_array_equal(s, a[:, 0:1])
+
+        s = axis_slice(a, start=-1, axis=1)
+        assert_array_equal(s, a[:, -1:])
+
+        s = axis_slice(a, start=0, step=2, axis=0)
+        assert_array_equal(s, a[::2, :])
+
+        s = axis_slice(a, start=0, step=2, axis=1)
+        assert_array_equal(s, a[:, ::2])
+
+    def test_axis_reverse(self):
+        a = np.arange(12).reshape(3, 4)
+
+        r = axis_reverse(a, axis=0)
+        assert_array_equal(r, a[::-1, :])
+
+        r = axis_reverse(a, axis=1)
+        assert_array_equal(r, a[:, ::-1])
+
+    def test_odd_ext(self):
+        a = np.array([[1, 2, 3, 4, 5],
+                      [9, 8, 7, 6, 5]])
+
+        odd = odd_ext(a, 2, axis=1)
+        expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
+                             [11, 10, 9, 8, 7, 6, 5, 4, 3]])
+        assert_array_equal(odd, expected)
+
+        odd = odd_ext(a, 1, axis=0)
+        expected = np.array([[-7, -4, -1, 2, 5],
+                             [1, 2, 3, 4, 5],
+                             [9, 8, 7, 6, 5],
+                             [17, 14, 11, 8, 5]])
+        assert_array_equal(odd, expected)
+
+        assert_raises(ValueError, odd_ext, a, 2, axis=0)
+        assert_raises(ValueError, odd_ext, a, 5, axis=1)
+
+    def test_even_ext(self):
+        a = np.array([[1, 2, 3, 4, 5],
+                      [9, 8, 7, 6, 5]])
+
+        even = even_ext(a, 2, axis=1)
+        expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3],
+                             [7, 8, 9, 8, 7, 6, 5, 6, 7]])
+        assert_array_equal(even, expected)
+
+        even = even_ext(a, 1, axis=0)
+        expected = np.array([[9, 8, 7, 6, 5],
+                             [1, 2, 3, 4, 5],
+                             [9, 8, 7, 6, 5],
+                             [1, 2, 3, 4, 5]])
+        assert_array_equal(even, expected)
+
+        assert_raises(ValueError, even_ext, a, 2, axis=0)
+        assert_raises(ValueError, even_ext, a, 5, axis=1)
+
+    def test_const_ext(self):
+        a = np.array([[1, 2, 3, 4, 5],
+                      [9, 8, 7, 6, 5]])
+
+        const = const_ext(a, 2, axis=1)
+        expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5],
+                             [9, 9, 9, 8, 7, 6, 5, 5, 5]])
+        assert_array_equal(const, expected)
+
+        const = const_ext(a, 1, axis=0)
+        expected = np.array([[1, 2, 3, 4, 5],
+                             [1, 2, 3, 4, 5],
+                             [9, 8, 7, 6, 5],
+                             [9, 8, 7, 6, 5]])
+        assert_array_equal(const, expected)
+
+    def test_zero_ext(self):
+        a = np.array([[1, 2, 3, 4, 5],
+                      [9, 8, 7, 6, 5]])
+
+        zero = zero_ext(a, 2, axis=1)
+        expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0],
+                             [0, 0, 9, 8, 7, 6, 5, 0, 0]])
+        assert_array_equal(zero, expected)
+
+        zero = zero_ext(a, 1, axis=0)
+        expected = np.array([[0, 0, 0, 0, 0],
+                             [1, 2, 3, 4, 5],
+                             [9, 8, 7, 6, 5],
+                             [0, 0, 0, 0, 0]])
+        assert_array_equal(zero, expected)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_bsplines.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_bsplines.py
new file mode 100644
index 00000000..7861c3ec
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_bsplines.py
@@ -0,0 +1,267 @@
+# pylint: disable=missing-docstring
+import numpy as np
+from numpy import array
+from numpy.testing import (assert_allclose, assert_array_equal,
+                           assert_almost_equal)
+import pytest
+from pytest import raises
+
+import scipy.signal._bsplines as bsp
+from scipy import signal
+
+
+class TestBSplines:
+    """Test behaviors of B-splines. The values tested against were returned as of
+    SciPy 1.1.0 and are included for regression testing purposes"""
+
+    def test_spline_filter(self):
+        np.random.seed(12457)
+        # Test the type-error branch
+        raises(TypeError, bsp.spline_filter, array([0]), 0)
+        # Test the complex branch
+        data_array_complex = np.random.rand(7, 7) + np.random.rand(7, 7)*1j
+        # make the magnitude exceed 1, and make some negative
+        data_array_complex = 10*(1+1j-2*data_array_complex)
+        result_array_complex = array(
+            [[-4.61489230e-01-1.92994022j, 8.33332443+6.25519943j,
+              6.96300745e-01-9.05576038j, 5.28294849+3.97541356j,
+              5.92165565+7.68240595j, 6.59493160-1.04542804j,
+              9.84503460-5.85946894j],
+             [-8.78262329-8.4295969j, 7.20675516+5.47528982j,
+              -8.17223072+2.06330729j, -4.38633347-8.65968037j,
+              9.89916801-8.91720295j, 2.67755103+8.8706522j,
+              6.24192142+3.76879835j],
+             [-3.15627527+2.56303072j, 9.87658501-0.82838702j,
+              -9.96930313+8.72288895j, 3.17193985+6.42474651j,
+              -4.50919819-6.84576082j, 5.75423431+9.94723988j,
+              9.65979767+6.90665293j],
+             [-8.28993416-6.61064005j, 9.71416473e-01-9.44907284j,
+              -2.38331890+9.25196648j, -7.08868170-0.77403212j,
+              4.89887714+7.05371094j, -1.37062311-2.73505688j,
+              7.70705748+2.5395329j],
+             [2.51528406-1.82964492j, 3.65885472+2.95454836j,
+              5.16786575-1.66362023j, -8.77737999e-03+5.72478867j,
+              4.10533333-3.10287571j, 9.04761887+1.54017115j,
+              -5.77960968e-01-7.87758923j],
+             [9.86398506-3.98528528j, -4.71444130-2.44316983j,
+              -1.68038976-1.12708664j, 2.84695053+1.01725709j,
+              1.14315915-8.89294529j, -3.17127085-5.42145538j,
+              1.91830420-6.16370344j],
+             [7.13875294+2.91851187j, -5.35737514+9.64132309j,
+              -9.66586399+0.70250005j, -9.87717438-2.0262239j,
+              9.93160629+1.5630846j, 4.71948051-2.22050714j,
+              9.49550819+7.8995142j]])
+        # FIXME: for complex types, the computations are done in
+        # single precision (reason unclear). When this is changed,
+        # this test needs updating.
+        assert_allclose(bsp.spline_filter(data_array_complex, 0),
+                        result_array_complex, rtol=1e-6)
+        # Test the real branch
+        np.random.seed(12457)
+        data_array_real = np.random.rand(12, 12)
+        # make the magnitude exceed 1, and make some negative
+        data_array_real = 10*(1-2*data_array_real)
+        result_array_real = array(
+            [[-.463312621, 8.33391222, .697290949, 5.28390836,
+              5.92066474, 6.59452137, 9.84406950, -8.78324188,
+              7.20675750, -8.17222994, -4.38633345, 9.89917069],
+             [2.67755154, 6.24192170, -3.15730578, 9.87658581,
+              -9.96930425, 3.17194115, -4.50919947, 5.75423446,
+              9.65979824, -8.29066885, .971416087, -2.38331897],
+             [-7.08868346, 4.89887705, -1.37062289, 7.70705838,
+              2.51526461, 3.65885497, 5.16786604, -8.77715342e-03,
+              4.10533325, 9.04761993, -.577960351, 9.86382519],
+             [-4.71444301, -1.68038985, 2.84695116, 1.14315938,
+              -3.17127091, 1.91830461, 7.13779687, -5.35737482,
+              -9.66586425, -9.87717456, 9.93160672, 4.71948144],
+             [9.49551194, -1.92958436, 6.25427993, -9.05582911,
+              3.97562282, 7.68232426, -1.04514824, -5.86021443,
+              -8.43007451, 5.47528997, 2.06330736, -8.65968112],
+             [-8.91720100, 8.87065356, 3.76879937, 2.56222894,
+              -.828387146, 8.72288903, 6.42474741, -6.84576083,
+              9.94724115, 6.90665380, -6.61084494, -9.44907391],
+             [9.25196790, -.774032030, 7.05371046, -2.73505725,
+              2.53953305, -1.82889155, 2.95454824, -1.66362046,
+              5.72478916, -3.10287679, 1.54017123, -7.87759020],
+             [-3.98464539, -2.44316992, -1.12708657, 1.01725672,
+              -8.89294671, -5.42145629, -6.16370321, 2.91775492,
+              9.64132208, .702499998, -2.02622392, 1.56308431],
+             [-2.22050773, 7.89951554, 5.98970713, -7.35861835,
+              5.45459283, -7.76427957, 3.67280490, -4.05521315,
+              4.51967507, -3.22738749, -3.65080177, 3.05630155],
+             [-6.21240584, -.296796126, -8.34800163, 9.21564563,
+              -3.61958784, -4.77120006, -3.99454057, 1.05021988e-03,
+              -6.95982829, 6.04380797, 8.43181250, -2.71653339],
+             [1.19638037, 6.99718842e-02, 6.72020394, -2.13963198,
+              3.75309875, -5.70076744, 5.92143551, -7.22150575,
+              -3.77114594, -1.11903194, -5.39151466, 3.06620093],
+             [9.86326886, 1.05134482, -7.75950607, -3.64429655,
+              7.81848957, -9.02270373, 3.73399754, -4.71962549,
+              -7.71144306, 3.78263161, 6.46034818, -4.43444731]])
+        assert_allclose(bsp.spline_filter(data_array_real, 0),
+                        result_array_real)
+
+    def test_bspline(self):
+        np.random.seed(12458)
+        assert_allclose(bsp.bspline(np.random.rand(1, 1), 2),
+                        array([[0.73694695]]))
+        data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
+        data_array_complex = 0.1*data_array_complex
+        result_array_complex = array(
+            [[0.40882362, 0.41021151, 0.40886708, 0.40905103],
+             [0.40829477, 0.41021230, 0.40966097, 0.40939871],
+             [0.41036803, 0.40901724, 0.40965331, 0.40879513],
+             [0.41032862, 0.40925287, 0.41037754, 0.41027477]])
+        assert_allclose(bsp.bspline(data_array_complex, 10),
+                        result_array_complex)
+
+    def test_gauss_spline(self):
+        np.random.seed(12459)
+        assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342)
+        assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217]))
+
+    def test_gauss_spline_list(self):
+        # regression test for gh-12152 (accept array_like)
+        knots = [-1.0, 0.0, -1.0]
+        assert_almost_equal(bsp.gauss_spline(knots, 3),
+                            array([0.15418033, 0.6909883, 0.15418033]))
+
+    def test_cubic(self):
+        np.random.seed(12460)
+        assert_array_equal(bsp.cubic([0]), array([0]))
+        data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
+        data_array_complex = 1+1j-2*data_array_complex
+        # scaling the magnitude by 10 makes the results close enough to zero,
+        # that the assertion fails, so just make the elements have a mix of
+        # positive and negative imaginary components...
+        result_array_complex = array(
+            [[0.23056563, 0.38414406, 0.08342987, 0.06904847],
+             [0.17240848, 0.47055447, 0.63896278, 0.39756424],
+             [0.12672571, 0.65862632, 0.1116695, 0.09700386],
+             [0.3544116, 0.17856518, 0.1528841, 0.17285762]])
+        assert_allclose(bsp.cubic(data_array_complex), result_array_complex)
+
+    def test_quadratic(self):
+        np.random.seed(12461)
+        assert_array_equal(bsp.quadratic([0]), array([0]))
+        data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
+        # scaling the magnitude by 10 makes the results all zero,
+        # so just make the elements have a mix of positive and negative
+        # imaginary components...
+        data_array_complex = (1+1j-2*data_array_complex)
+        result_array_complex = array(
+            [[0.23062746, 0.06338176, 0.34902312, 0.31944105],
+             [0.14701256, 0.13277773, 0.29428615, 0.09814697],
+             [0.52873842, 0.06484157, 0.09517566, 0.46420389],
+             [0.09286829, 0.09371954, 0.1422526, 0.16007024]])
+        assert_allclose(bsp.quadratic(data_array_complex),
+                        result_array_complex)
+
+    def test_cspline1d(self):
+        np.random.seed(12462)
+        assert_array_equal(bsp.cspline1d(array([0])), [0.])
+        c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378,
+                     4.78893826])
+        # test lamda != 0
+        assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d)
+        c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812,
+                      5.21051638])
+        assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0)
+
+    def test_qspline1d(self):
+        np.random.seed(12463)
+        assert_array_equal(bsp.qspline1d(array([0])), [0.])
+        # test lamda != 0
+        raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.)
+        raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.)
+        q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055,
+                      5.14634135])
+        assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0)
+
+    def test_cspline1d_eval(self):
+        np.random.seed(12464)
+        assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.]))
+        assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []),
+                           array([]))
+        x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
+        dx = x[1]-x[0]
+        newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
+                -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
+                6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
+                12.5]
+        y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
+                   1.396, 4.094])
+        cj = bsp.cspline1d(y)
+        newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068,
+                      4.21600281, 6.04643068, 6.864, 5.16924703, 3.514,
+                      4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433,
+                      7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396,
+                      2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879,
+                      7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759,
+                      6.80717667, 6.203, 4.41570658])
+        assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
+
+    def test_qspline1d_eval(self):
+        np.random.seed(12465)
+        assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.]))
+        assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []),
+                           array([]))
+        x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
+        dx = x[1]-x[0]
+        newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
+                -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
+                6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
+                12.5]
+        y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
+                   1.396, 4.094])
+        cj = bsp.qspline1d(y)
+        newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915,
+                      4.21600002, 5.91436915, 6.864, 5.18390821, 3.514,
+                      4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433,
+                      7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396,
+                      2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879,
+                      7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759,
+                      6.71900226, 6.203, 4.49418159])
+        assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
+
+
+def test_sepfir2d_invalid_filter():
+    filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0])
+    image = np.random.rand(7, 9)
+    # No error for odd lengths
+    signal.sepfir2d(image, filt, filt[2:])
+
+    # Row or column filter must be odd
+    with pytest.raises(ValueError, match="odd length"):
+        signal.sepfir2d(image, filt, filt[1:])
+    with pytest.raises(ValueError, match="odd length"):
+        signal.sepfir2d(image, filt[1:], filt)
+
+    # Filters must be 1-dimensional
+    with pytest.raises(ValueError, match="object too deep"):
+        signal.sepfir2d(image, filt.reshape(1, -1), filt)
+    with pytest.raises(ValueError, match="object too deep"):
+        signal.sepfir2d(image, filt, filt.reshape(1, -1))
+
+def test_sepfir2d_invalid_image():
+    filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0])
+    image = np.random.rand(8, 8)
+
+    # Image must be 2 dimensional
+    with pytest.raises(ValueError, match="object too deep"):
+        signal.sepfir2d(image.reshape(4, 4, 4), filt, filt)
+
+    with pytest.raises(ValueError, match="object of too small depth"):
+        signal.sepfir2d(image[0], filt, filt)
+
+
+def test_cspline2d():
+    np.random.seed(181819142)
+    image = np.random.rand(71, 73)
+    signal.cspline2d(image, 8.0)
+
+
+def test_qspline2d():
+    np.random.seed(181819143)
+    image = np.random.rand(71, 73)
+    signal.qspline2d(image)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_cont2discrete.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_cont2discrete.py
new file mode 100644
index 00000000..6f95d149
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_cont2discrete.py
@@ -0,0 +1,420 @@
+import numpy as np
+from numpy.testing import \
+                          assert_array_almost_equal, assert_almost_equal, \
+                          assert_allclose, assert_equal
+
+import pytest
+from scipy.signal import cont2discrete as c2d
+from scipy.signal import dlsim, ss2tf, ss2zpk, lsim2, lti
+from scipy.signal import tf2ss, impulse2, dimpulse, step2, dstep
+
+# Author: Jeffrey Armstrong 
+# March 29, 2011
+
+
+class TestC2D:
+    def test_zoh(self):
+        ac = np.eye(2)
+        bc = np.full((2, 1), 0.5)
+        cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
+        dc = np.array([[0.0], [0.0], [-0.33]])
+
+        ad_truth = 1.648721270700128 * np.eye(2)
+        bd_truth = np.full((2, 1), 0.324360635350064)
+        # c and d in discrete should be equal to their continuous counterparts
+        dt_requested = 0.5
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh')
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cc, cd)
+        assert_array_almost_equal(dc, dd)
+        assert_almost_equal(dt_requested, dt)
+
+    def test_foh(self):
+        ac = np.eye(2)
+        bc = np.full((2, 1), 0.5)
+        cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
+        dc = np.array([[0.0], [0.0], [-0.33]])
+
+        # True values are verified with Matlab
+        ad_truth = 1.648721270700128 * np.eye(2)
+        bd_truth = np.full((2, 1), 0.420839287058789)
+        cd_truth = cc
+        dd_truth = np.array([[0.260262223725224],
+                             [0.297442541400256],
+                             [-0.144098411624840]])
+        dt_requested = 0.5
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh')
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cd_truth, cd)
+        assert_array_almost_equal(dd_truth, dd)
+        assert_almost_equal(dt_requested, dt)
+
+    def test_impulse(self):
+        ac = np.eye(2)
+        bc = np.full((2, 1), 0.5)
+        cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
+        dc = np.array([[0.0], [0.0], [0.0]])
+
+        # True values are verified with Matlab
+        ad_truth = 1.648721270700128 * np.eye(2)
+        bd_truth = np.full((2, 1), 0.412180317675032)
+        cd_truth = cc
+        dd_truth = np.array([[0.4375], [0.5], [0.3125]])
+        dt_requested = 0.5
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
+                                 method='impulse')
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cd_truth, cd)
+        assert_array_almost_equal(dd_truth, dd)
+        assert_almost_equal(dt_requested, dt)
+
+    def test_gbt(self):
+        ac = np.eye(2)
+        bc = np.full((2, 1), 0.5)
+        cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
+        dc = np.array([[0.0], [0.0], [-0.33]])
+
+        dt_requested = 0.5
+        alpha = 1.0 / 3.0
+
+        ad_truth = 1.6 * np.eye(2)
+        bd_truth = np.full((2, 1), 0.3)
+        cd_truth = np.array([[0.9, 1.2],
+                             [1.2, 1.2],
+                             [1.2, 0.3]])
+        dd_truth = np.array([[0.175],
+                             [0.2],
+                             [-0.205]])
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
+                                 method='gbt', alpha=alpha)
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cd_truth, cd)
+        assert_array_almost_equal(dd_truth, dd)
+
+    def test_euler(self):
+        ac = np.eye(2)
+        bc = np.full((2, 1), 0.5)
+        cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
+        dc = np.array([[0.0], [0.0], [-0.33]])
+
+        dt_requested = 0.5
+
+        ad_truth = 1.5 * np.eye(2)
+        bd_truth = np.full((2, 1), 0.25)
+        cd_truth = np.array([[0.75, 1.0],
+                             [1.0, 1.0],
+                             [1.0, 0.25]])
+        dd_truth = dc
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
+                                 method='euler')
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cd_truth, cd)
+        assert_array_almost_equal(dd_truth, dd)
+        assert_almost_equal(dt_requested, dt)
+
+    def test_backward_diff(self):
+        ac = np.eye(2)
+        bc = np.full((2, 1), 0.5)
+        cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
+        dc = np.array([[0.0], [0.0], [-0.33]])
+
+        dt_requested = 0.5
+
+        ad_truth = 2.0 * np.eye(2)
+        bd_truth = np.full((2, 1), 0.5)
+        cd_truth = np.array([[1.5, 2.0],
+                             [2.0, 2.0],
+                             [2.0, 0.5]])
+        dd_truth = np.array([[0.875],
+                             [1.0],
+                             [0.295]])
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
+                                 method='backward_diff')
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cd_truth, cd)
+        assert_array_almost_equal(dd_truth, dd)
+
+    def test_bilinear(self):
+        ac = np.eye(2)
+        bc = np.full((2, 1), 0.5)
+        cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
+        dc = np.array([[0.0], [0.0], [-0.33]])
+
+        dt_requested = 0.5
+
+        ad_truth = (5.0 / 3.0) * np.eye(2)
+        bd_truth = np.full((2, 1), 1.0 / 3.0)
+        cd_truth = np.array([[1.0, 4.0 / 3.0],
+                             [4.0 / 3.0, 4.0 / 3.0],
+                             [4.0 / 3.0, 1.0 / 3.0]])
+        dd_truth = np.array([[0.291666666666667],
+                             [1.0 / 3.0],
+                             [-0.121666666666667]])
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
+                                 method='bilinear')
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cd_truth, cd)
+        assert_array_almost_equal(dd_truth, dd)
+        assert_almost_equal(dt_requested, dt)
+
+        # Same continuous system again, but change sampling rate
+
+        ad_truth = 1.4 * np.eye(2)
+        bd_truth = np.full((2, 1), 0.2)
+        cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]])
+        dd_truth = np.array([[0.175], [0.2], [-0.205]])
+
+        dt_requested = 1.0 / 3.0
+
+        ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
+                                 method='bilinear')
+
+        assert_array_almost_equal(ad_truth, ad)
+        assert_array_almost_equal(bd_truth, bd)
+        assert_array_almost_equal(cd_truth, cd)
+        assert_array_almost_equal(dd_truth, dd)
+        assert_almost_equal(dt_requested, dt)
+
+    def test_transferfunction(self):
+        numc = np.array([0.25, 0.25, 0.5])
+        denc = np.array([0.75, 0.75, 1.0])
+
+        numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]])
+        dend = np.array([1.0, -1.351394049721225, 0.606530659712634])
+
+        dt_requested = 0.5
+
+        num, den, dt = c2d((numc, denc), dt_requested, method='zoh')
+
+        assert_array_almost_equal(numd, num)
+        assert_array_almost_equal(dend, den)
+        assert_almost_equal(dt_requested, dt)
+
+    def test_zerospolesgain(self):
+        zeros_c = np.array([0.5, -0.5])
+        poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
+        k_c = 1.0
+
+        zeros_d = [1.23371727305860, 0.735356894461267]
+        polls_d = [0.938148335039729 + 0.346233593780536j,
+                   0.938148335039729 - 0.346233593780536j]
+        k_d = 1.0
+
+        dt_requested = 0.5
+
+        zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested,
+                                  method='zoh')
+
+        assert_array_almost_equal(zeros_d, zeros)
+        assert_array_almost_equal(polls_d, poles)
+        assert_almost_equal(k_d, k)
+        assert_almost_equal(dt_requested, dt)
+
+    def test_gbt_with_sio_tf_and_zpk(self):
+        """Test method='gbt' with alpha=0.25 for tf and zpk cases."""
+        # State space coefficients for the continuous SIO system.
+        A = -1.0
+        B = 1.0
+        C = 1.0
+        D = 0.5
+
+        # The continuous transfer function coefficients.
+        cnum, cden = ss2tf(A, B, C, D)
+
+        # Continuous zpk representation
+        cz, cp, ck = ss2zpk(A, B, C, D)
+
+        h = 1.0
+        alpha = 0.25
+
+        # Explicit formulas, in the scalar case.
+        Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)
+        Bd = h * B / (1 - alpha * h * A)
+        Cd = C / (1 - alpha * h * A)
+        Dd = D + alpha * C * Bd
+
+        # Convert the explicit solution to tf
+        dnum, dden = ss2tf(Ad, Bd, Cd, Dd)
+
+        # Compute the discrete tf using cont2discrete.
+        c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)
+
+        assert_allclose(dnum, c2dnum)
+        assert_allclose(dden, c2dden)
+
+        # Convert explicit solution to zpk.
+        dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)
+
+        # Compute the discrete zpk using cont2discrete.
+        c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)
+
+        assert_allclose(dz, c2dz)
+        assert_allclose(dp, c2dp)
+        assert_allclose(dk, c2dk)
+
+    def test_discrete_approx(self):
+        """
+        Test that the solution to the discrete approximation of a continuous
+        system actually approximates the solution to the continuous system.
+        This is an indirect test of the correctness of the implementation
+        of cont2discrete.
+        """
+
+        def u(t):
+            return np.sin(2.5 * t)
+
+        a = np.array([[-0.01]])
+        b = np.array([[1.0]])
+        c = np.array([[1.0]])
+        d = np.array([[0.2]])
+        x0 = 1.0
+
+        t = np.linspace(0, 10.0, 101)
+        dt = t[1] - t[0]
+        u1 = u(t)
+
+        # Use lsim2 to compute the solution to the continuous system.
+        t, yout, xout = lsim2((a, b, c, d), T=t, U=u1, X0=x0,
+                              rtol=1e-9, atol=1e-11)
+
+        # Convert the continuous system to a discrete approximation.
+        dsys = c2d((a, b, c, d), dt, method='bilinear')
+
+        # Use dlsim with the pairwise averaged input to compute the output
+        # of the discrete system.
+        u2 = 0.5 * (u1[:-1] + u1[1:])
+        t2 = t[:-1]
+        td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0)
+
+        # ymid is the average of consecutive terms of the "exact" output
+        # computed by lsim2.  This is what the discrete approximation
+        # actually approximates.
+        ymid = 0.5 * (yout[:-1] + yout[1:])
+
+        assert_allclose(yd2.ravel(), ymid, rtol=1e-4)
+
+    def test_simo_tf(self):
+        # See gh-5753
+        tf = ([[1, 0], [1, 1]], [1, 1])
+        num, den, dt = c2d(tf, 0.01)
+
+        assert_equal(dt, 0.01)  # sanity check
+        assert_allclose(den, [1, -0.990404983], rtol=1e-3)
+        assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3)
+
+    def test_multioutput(self):
+        ts = 0.01  # time step
+
+        tf = ([[1, -3], [1, 5]], [1, 1])
+        num, den, dt = c2d(tf, ts)
+
+        tf1 = (tf[0][0], tf[1])
+        num1, den1, dt1 = c2d(tf1, ts)
+
+        tf2 = (tf[0][1], tf[1])
+        num2, den2, dt2 = c2d(tf2, ts)
+
+        # Sanity checks
+        assert_equal(dt, dt1)
+        assert_equal(dt, dt2)
+
+        # Check that we get the same results
+        assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13)
+
+        # Single input, so the denominator should
+        # not be multidimensional like the numerator
+        assert_allclose(den, den1, rtol=1e-13)
+        assert_allclose(den, den2, rtol=1e-13)
+
+class TestC2dLti:
+    def test_c2d_ss(self):
+        # StateSpace
+        A = np.array([[-0.3, 0.1], [0.2, -0.7]])
+        B = np.array([[0], [1]])
+        C = np.array([[1, 0]])
+        D = 0
+
+        A_res = np.array([[0.985136404135682, 0.004876671474795],
+                          [0.009753342949590, 0.965629718236502]])
+        B_res = np.array([[0.000122937599964], [0.049135527547844]])
+
+        sys_ssc = lti(A, B, C, D)
+        sys_ssd = sys_ssc.to_discrete(0.05)
+
+        assert_allclose(sys_ssd.A, A_res)
+        assert_allclose(sys_ssd.B, B_res)
+        assert_allclose(sys_ssd.C, C)
+        assert_allclose(sys_ssd.D, D)
+
+    def test_c2d_tf(self):
+
+        sys = lti([0.5, 0.3], [1.0, 0.4])
+        sys = sys.to_discrete(0.005)
+
+        # Matlab results
+        num_res = np.array([0.5, -0.485149004980066])
+        den_res = np.array([1.0, -0.980198673306755])
+
+        # Somehow a lot of numerical errors
+        assert_allclose(sys.den, den_res, atol=0.02)
+        assert_allclose(sys.num, num_res, atol=0.02)
+
+
+class TestC2dInvariants:
+    # Some test cases for checking the invariances.
+    # Array of triplets: (system, sample time, number of samples)
+    cases = [
+        (tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10),
+        (tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10),
+        (tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10),
+    ]
+
+    # Some options for lsim2 and derived routines
+    tolerances = {'rtol': 1e-9, 'atol': 1e-11}
+
+    # Check that systems discretized with the impulse-invariant
+    # method really hold the invariant
+    @pytest.mark.parametrize("sys,sample_time,samples_number", cases)
+    def test_impulse_invariant(self, sys, sample_time, samples_number):
+        time = np.arange(samples_number) * sample_time
+        _, yout_cont = impulse2(sys, T=time, **self.tolerances)
+        _, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'),
+                                n=len(time))
+        assert_allclose(sample_time * yout_cont.ravel(), yout_disc[0].ravel())
+
+    # Step invariant should hold for ZOH discretized systems
+    @pytest.mark.parametrize("sys,sample_time,samples_number", cases)
+    def test_step_invariant(self, sys, sample_time, samples_number):
+        time = np.arange(samples_number) * sample_time
+        _, yout_cont = step2(sys, T=time, **self.tolerances)
+        _, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time))
+        assert_allclose(yout_cont.ravel(), yout_disc[0].ravel())
+
+    # Linear invariant should hold for FOH discretized systems
+    @pytest.mark.parametrize("sys,sample_time,samples_number", cases)
+    def test_linear_invariant(self, sys, sample_time, samples_number):
+        time = np.arange(samples_number) * sample_time
+        _, yout_cont, _ = lsim2(sys, T=time, U=time, **self.tolerances)
+        _, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time)
+        assert_allclose(yout_cont.ravel(), yout_disc.ravel())
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_czt.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_czt.py
new file mode 100644
index 00000000..b4a3c0e3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_czt.py
@@ -0,0 +1,219 @@
+# This program is public domain
+# Authors: Paul Kienzle, Nadav Horesh
+'''
+A unit test module for czt.py
+'''
+import pytest
+from numpy.testing import assert_allclose
+from scipy.fft import fft
+from scipy.signal import (czt, zoom_fft, czt_points, CZT, ZoomFFT)
+import numpy as np
+
+
+def check_czt(x):
+    # Check that czt is the equivalent of normal fft
+    y = fft(x)
+    y1 = czt(x)
+    assert_allclose(y1, y, rtol=1e-13)
+
+    # Check that interpolated czt is the equivalent of normal fft
+    y = fft(x, 100*len(x))
+    y1 = czt(x, 100*len(x))
+    assert_allclose(y1, y, rtol=1e-12)
+
+
+def check_zoom_fft(x):
+    # Check that zoom_fft is the equivalent of normal fft
+    y = fft(x)
+    y1 = zoom_fft(x, [0, 2-2./len(y)], endpoint=True)
+    assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
+    y1 = zoom_fft(x, [0, 2])
+    assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
+
+    # Test fn scalar
+    y1 = zoom_fft(x, 2-2./len(y), endpoint=True)
+    assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
+    y1 = zoom_fft(x, 2)
+    assert_allclose(y1, y, rtol=1e-11, atol=1e-14)
+
+    # Check that zoom_fft with oversampling is equivalent to zero padding
+    over = 10
+    yover = fft(x, over*len(x))
+    y2 = zoom_fft(x, [0, 2-2./len(yover)], m=len(yover), endpoint=True)
+    assert_allclose(y2, yover, rtol=1e-12, atol=1e-10)
+    y2 = zoom_fft(x, [0, 2], m=len(yover))
+    assert_allclose(y2, yover, rtol=1e-12, atol=1e-10)
+
+    # Check that zoom_fft works on a subrange
+    w = np.linspace(0, 2-2./len(x), len(x))
+    f1, f2 = w[3], w[6]
+    y3 = zoom_fft(x, [f1, f2], m=3*over+1, endpoint=True)
+    idx3 = slice(3*over, 6*over+1)
+    assert_allclose(y3, yover[idx3], rtol=1e-13)
+
+
+def test_1D():
+    # Test of 1D version of the transforms
+
+    np.random.seed(0)  # Deterministic randomness
+
+    # Random signals
+    lengths = np.random.randint(8, 200, 20)
+    np.append(lengths, 1)
+    for length in lengths:
+        x = np.random.random(length)
+        check_zoom_fft(x)
+        check_czt(x)
+
+    # Gauss
+    t = np.linspace(-2, 2, 128)
+    x = np.exp(-t**2/0.01)
+    check_zoom_fft(x)
+
+    # Linear
+    x = [1, 2, 3, 4, 5, 6, 7]
+    check_zoom_fft(x)
+
+    # Check near powers of two
+    check_zoom_fft(range(126-31))
+    check_zoom_fft(range(127-31))
+    check_zoom_fft(range(128-31))
+    check_zoom_fft(range(129-31))
+    check_zoom_fft(range(130-31))
+
+    # Check transform on n-D array input
+    x = np.reshape(np.arange(3*2*28), (3, 2, 28))
+    y1 = zoom_fft(x, [0, 2-2./28])
+    y2 = zoom_fft(x[2, 0, :], [0, 2-2./28])
+    assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12)
+
+    y1 = zoom_fft(x, [0, 2], endpoint=False)
+    y2 = zoom_fft(x[2, 0, :], [0, 2], endpoint=False)
+    assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12)
+
+    # Random (not a test condition)
+    x = np.random.rand(101)
+    check_zoom_fft(x)
+
+    # Spikes
+    t = np.linspace(0, 1, 128)
+    x = np.sin(2*np.pi*t*5)+np.sin(2*np.pi*t*13)
+    check_zoom_fft(x)
+
+    # Sines
+    x = np.zeros(100, dtype=complex)
+    x[[1, 5, 21]] = 1
+    check_zoom_fft(x)
+
+    # Sines plus complex component
+    x += 1j*np.linspace(0, 0.5, x.shape[0])
+    check_zoom_fft(x)
+
+
+def test_large_prime_lengths():
+    np.random.seed(0)  # Deterministic randomness
+    for N in (101, 1009, 10007):
+        x = np.random.rand(N)
+        y = fft(x)
+        y1 = czt(x)
+        assert_allclose(y, y1, rtol=1e-12)
+
+
+@pytest.mark.slow
+def test_czt_vs_fft():
+    np.random.seed(123)
+    random_lengths = np.random.exponential(100000, size=10).astype('int')
+    for n in random_lengths:
+        a = np.random.randn(n)
+        assert_allclose(czt(a), fft(a), rtol=1e-11)
+
+
+def test_empty_input():
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        czt([])
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        zoom_fft([], 0.5)
+
+
+def test_0_rank_input():
+    with pytest.raises(IndexError, match='tuple index out of range'):
+        czt(5)
+    with pytest.raises(IndexError, match='tuple index out of range'):
+        zoom_fft(5, 0.5)
+
+
+@pytest.mark.parametrize('impulse', ([0, 0, 1], [0, 0, 1, 0, 0],
+                                     np.concatenate((np.array([0, 0, 1]),
+                                                     np.zeros(100)))))
+@pytest.mark.parametrize('m', (1, 3, 5, 8, 101, 1021))
+@pytest.mark.parametrize('a', (1, 2, 0.5, 1.1))
+# Step that tests away from the unit circle, but not so far it explodes from
+# numerical error
+@pytest.mark.parametrize('w', (None, 0.98534 + 0.17055j))
+def test_czt_math(impulse, m, w, a):
+    # z-transform of an impulse is 1 everywhere
+    assert_allclose(czt(impulse[2:], m=m, w=w, a=a),
+                    np.ones(m), rtol=1e-10)
+
+    # z-transform of a delayed impulse is z**-1
+    assert_allclose(czt(impulse[1:], m=m, w=w, a=a),
+                    czt_points(m=m, w=w, a=a)**-1, rtol=1e-10)
+
+    # z-transform of a 2-delayed impulse is z**-2
+    assert_allclose(czt(impulse, m=m, w=w, a=a),
+                    czt_points(m=m, w=w, a=a)**-2, rtol=1e-10)
+
+
+def test_int_args():
+    # Integer argument `a` was producing all 0s
+    assert_allclose(abs(czt([0, 1], m=10, a=2)), 0.5*np.ones(10), rtol=1e-15)
+    assert_allclose(czt_points(11, w=2), 1/(2**np.arange(11)), rtol=1e-30)
+
+
+def test_czt_points():
+    for N in (1, 2, 3, 8, 11, 100, 101, 10007):
+        assert_allclose(czt_points(N), np.exp(2j*np.pi*np.arange(N)/N),
+                        rtol=1e-30)
+
+    assert_allclose(czt_points(7, w=1), np.ones(7), rtol=1e-30)
+    assert_allclose(czt_points(11, w=2.), 1/(2**np.arange(11)), rtol=1e-30)
+
+    func = CZT(12, m=11, w=2., a=1)
+    assert_allclose(func.points(), 1/(2**np.arange(11)), rtol=1e-30)
+
+
+@pytest.mark.parametrize('cls, args', [(CZT, (100,)), (ZoomFFT, (100, 0.2))])
+def test_CZT_size_mismatch(cls, args):
+    # Data size doesn't match function's expected size
+    myfunc = cls(*args)
+    with pytest.raises(ValueError, match='CZT defined for'):
+        myfunc(np.arange(5))
+
+
+def test_invalid_range():
+    with pytest.raises(ValueError, match='2-length sequence'):
+        ZoomFFT(100, [1, 2, 3])
+
+
+@pytest.mark.parametrize('m', [0, -11, 5.5, 4.0])
+def test_czt_points_errors(m):
+    # Invalid number of points
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        czt_points(m)
+
+
+@pytest.mark.parametrize('size', [0, -5, 3.5, 4.0])
+def test_nonsense_size(size):
+    # Numpy and Scipy fft() give ValueError for 0 output size, so we do, too
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        CZT(size, 3)
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        ZoomFFT(size, 0.2, 3)
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        CZT(3, size)
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        ZoomFFT(3, 0.2, size)
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        czt([1, 2, 3], size)
+    with pytest.raises(ValueError, match='Invalid number of CZT'):
+        zoom_fft([1, 2, 3], 0.2, size)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_dltisys.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_dltisys.py
new file mode 100644
index 00000000..e4f01efc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_dltisys.py
@@ -0,0 +1,598 @@
+# Author: Jeffrey Armstrong 
+# April 4, 2011
+
+import numpy as np
+from numpy.testing import (assert_equal,
+                           assert_array_almost_equal, assert_array_equal,
+                           assert_allclose, assert_, assert_almost_equal,
+                           suppress_warnings)
+from pytest import raises as assert_raises
+from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti,
+                          StateSpace, TransferFunction, ZerosPolesGain,
+                          dfreqresp, dbode, BadCoefficients)
+
+
+class TestDLTI:
+
+    def test_dlsim(self):
+
+        a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
+        b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
+        c = np.asarray([[0.1, 0.3]])
+        d = np.asarray([[0.0, -0.1, 0.0]])
+        dt = 0.5
+
+        # Create an input matrix with inputs down the columns (3 cols) and its
+        # respective time input vector
+        u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis],
+                       np.full((5, 1), 0.01),
+                       np.full((5, 1), -0.002)))
+        t_in = np.linspace(0, 2.0, num=5)
+
+        # Define the known result
+        yout_truth = np.array([[-0.001,
+                                -0.00073,
+                                0.039446,
+                                0.0915387,
+                                0.13195948]]).T
+        xout_truth = np.asarray([[0, 0],
+                                 [0.0012, 0.0005],
+                                 [0.40233, 0.00071],
+                                 [1.163368, -0.079327],
+                                 [2.2402985, -0.3035679]])
+
+        tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in)
+
+        assert_array_almost_equal(yout_truth, yout)
+        assert_array_almost_equal(xout_truth, xout)
+        assert_array_almost_equal(t_in, tout)
+
+        # Make sure input with single-dimension doesn't raise error
+        dlsim((1, 2, 3), 4)
+
+        # Interpolated control - inputs should have different time steps
+        # than the discrete model uses internally
+        u_sparse = u[[0, 4], :]
+        t_sparse = np.asarray([0.0, 2.0])
+
+        tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse)
+
+        assert_array_almost_equal(yout_truth, yout)
+        assert_array_almost_equal(xout_truth, xout)
+        assert_equal(len(tout), yout.shape[0])
+
+        # Transfer functions (assume dt = 0.5)
+        num = np.asarray([1.0, -0.1])
+        den = np.asarray([0.3, 1.0, 0.2])
+        yout_truth = np.array([[0.0,
+                                0.0,
+                                3.33333333333333,
+                                -4.77777777777778,
+                                23.0370370370370]]).T
+
+        # Assume use of the first column of the control input built earlier
+        tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in)
+
+        assert_array_almost_equal(yout, yout_truth)
+        assert_array_almost_equal(t_in, tout)
+
+        # Retest the same with a 1-D input vector
+        uflat = np.asarray(u[:, 0])
+        uflat = uflat.reshape((5,))
+        tout, yout = dlsim((num, den, 0.5), uflat, t_in)
+
+        assert_array_almost_equal(yout, yout_truth)
+        assert_array_almost_equal(t_in, tout)
+
+        # zeros-poles-gain representation
+        zd = np.array([0.5, -0.5])
+        pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
+        k = 1.0
+        yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T
+
+        tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in)
+
+        assert_array_almost_equal(yout, yout_truth)
+        assert_array_almost_equal(t_in, tout)
+
+        # Raise an error for continuous-time systems
+        system = lti([1], [1, 1])
+        assert_raises(AttributeError, dlsim, system, u)
+
+    def test_dstep(self):
+
+        a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
+        b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
+        c = np.asarray([[0.1, 0.3]])
+        d = np.asarray([[0.0, -0.1, 0.0]])
+        dt = 0.5
+
+        # Because b.shape[1] == 3, dstep should result in a tuple of three
+        # result vectors
+        yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956,
+                                       -0.036324, -0.093318, -0.15782348,
+                                       -0.226628324, -0.2969374948]),
+                           np.asarray([-0.1, -0.075, -0.058, -0.04815,
+                                       -0.04453, -0.0461895, -0.0521812,
+                                       -0.061588875, -0.073549579,
+                                       -0.08727047595]),
+                           np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239,
+                                       0.009081, 0.0233295, 0.03945587,
+                                       0.056657081, 0.0742343737]))
+
+        tout, yout = dstep((a, b, c, d, dt), n=10)
+
+        assert_equal(len(yout), 3)
+
+        for i in range(0, len(yout)):
+            assert_equal(yout[i].shape[0], 10)
+            assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i])
+
+        # Check that the other two inputs (tf, zpk) will work as well
+        tfin = ([1.0], [1.0, 1.0], 0.5)
+        yout_tfstep = np.asarray([0.0, 1.0, 0.0])
+        tout, yout = dstep(tfin, n=3)
+        assert_equal(len(yout), 1)
+        assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
+
+        zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
+        tout, yout = dstep(zpkin, n=3)
+        assert_equal(len(yout), 1)
+        assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
+
+        # Raise an error for continuous-time systems
+        system = lti([1], [1, 1])
+        assert_raises(AttributeError, dstep, system)
+
+    def test_dimpulse(self):
+
+        a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
+        b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
+        c = np.asarray([[0.1, 0.3]])
+        d = np.asarray([[0.0, -0.1, 0.0]])
+        dt = 0.5
+
+        # Because b.shape[1] == 3, dimpulse should result in a tuple of three
+        # result vectors
+        yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084,
+                                      -0.045884, -0.056994, -0.06450548,
+                                      -0.068804844, -0.0703091708]),
+                          np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362,
+                                      -0.0016595, -0.0059917, -0.009407675,
+                                      -0.011960704, -0.01372089695]),
+                          np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771,
+                                      0.011471, 0.0142485, 0.01612637,
+                                      0.017201211, 0.0175772927]))
+
+        tout, yout = dimpulse((a, b, c, d, dt), n=10)
+
+        assert_equal(len(yout), 3)
+
+        for i in range(0, len(yout)):
+            assert_equal(yout[i].shape[0], 10)
+            assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i])
+
+        # Check that the other two inputs (tf, zpk) will work as well
+        tfin = ([1.0], [1.0, 1.0], 0.5)
+        yout_tfimpulse = np.asarray([0.0, 1.0, -1.0])
+        tout, yout = dimpulse(tfin, n=3)
+        assert_equal(len(yout), 1)
+        assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
+
+        zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
+        tout, yout = dimpulse(zpkin, n=3)
+        assert_equal(len(yout), 1)
+        assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
+
+        # Raise an error for continuous-time systems
+        system = lti([1], [1, 1])
+        assert_raises(AttributeError, dimpulse, system)
+
+    def test_dlsim_trivial(self):
+        a = np.array([[0.0]])
+        b = np.array([[0.0]])
+        c = np.array([[0.0]])
+        d = np.array([[0.0]])
+        n = 5
+        u = np.zeros(n).reshape(-1, 1)
+        tout, yout, xout = dlsim((a, b, c, d, 1), u)
+        assert_array_equal(tout, np.arange(float(n)))
+        assert_array_equal(yout, np.zeros((n, 1)))
+        assert_array_equal(xout, np.zeros((n, 1)))
+
+    def test_dlsim_simple1d(self):
+        a = np.array([[0.5]])
+        b = np.array([[0.0]])
+        c = np.array([[1.0]])
+        d = np.array([[0.0]])
+        n = 5
+        u = np.zeros(n).reshape(-1, 1)
+        tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
+        assert_array_equal(tout, np.arange(float(n)))
+        expected = (0.5 ** np.arange(float(n))).reshape(-1, 1)
+        assert_array_equal(yout, expected)
+        assert_array_equal(xout, expected)
+
+    def test_dlsim_simple2d(self):
+        lambda1 = 0.5
+        lambda2 = 0.25
+        a = np.array([[lambda1, 0.0],
+                      [0.0, lambda2]])
+        b = np.array([[0.0],
+                      [0.0]])
+        c = np.array([[1.0, 0.0],
+                      [0.0, 1.0]])
+        d = np.array([[0.0],
+                      [0.0]])
+        n = 5
+        u = np.zeros(n).reshape(-1, 1)
+        tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
+        assert_array_equal(tout, np.arange(float(n)))
+        # The analytical solution:
+        expected = (np.array([lambda1, lambda2]) **
+                                np.arange(float(n)).reshape(-1, 1))
+        assert_array_equal(yout, expected)
+        assert_array_equal(xout, expected)
+
+    def test_more_step_and_impulse(self):
+        lambda1 = 0.5
+        lambda2 = 0.75
+        a = np.array([[lambda1, 0.0],
+                      [0.0, lambda2]])
+        b = np.array([[1.0, 0.0],
+                      [0.0, 1.0]])
+        c = np.array([[1.0, 1.0]])
+        d = np.array([[0.0, 0.0]])
+
+        n = 10
+
+        # Check a step response.
+        ts, ys = dstep((a, b, c, d, 1), n=n)
+
+        # Create the exact step response.
+        stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n))
+        stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n))
+
+        assert_allclose(ys[0][:, 0], stp0)
+        assert_allclose(ys[1][:, 0], stp1)
+
+        # Check an impulse response with an initial condition.
+        x0 = np.array([1.0, 1.0])
+        ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0)
+
+        # Create the exact impulse response.
+        imp = (np.array([lambda1, lambda2]) **
+                            np.arange(-1, n + 1).reshape(-1, 1))
+        imp[0, :] = 0.0
+        # Analytical solution to impulse response
+        y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0)
+        y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0)
+
+        assert_allclose(yi[0][:, 0], y0)
+        assert_allclose(yi[1][:, 0], y1)
+
+        # Check that dt=0.1, n=3 gives 3 time values.
+        system = ([1.0], [1.0, -0.5], 0.1)
+        t, (y,) = dstep(system, n=3)
+        assert_allclose(t, [0, 0.1, 0.2])
+        assert_array_equal(y.T, [[0, 1.0, 1.5]])
+        t, (y,) = dimpulse(system, n=3)
+        assert_allclose(t, [0, 0.1, 0.2])
+        assert_array_equal(y.T, [[0, 1, 0.5]])
+
+
+class TestDlti:
+    def test_dlti_instantiation(self):
+        # Test that lti can be instantiated.
+
+        dt = 0.05
+        # TransferFunction
+        s = dlti([1], [-1], dt=dt)
+        assert_(isinstance(s, TransferFunction))
+        assert_(isinstance(s, dlti))
+        assert_(not isinstance(s, lti))
+        assert_equal(s.dt, dt)
+
+        # ZerosPolesGain
+        s = dlti(np.array([]), np.array([-1]), 1, dt=dt)
+        assert_(isinstance(s, ZerosPolesGain))
+        assert_(isinstance(s, dlti))
+        assert_(not isinstance(s, lti))
+        assert_equal(s.dt, dt)
+
+        # StateSpace
+        s = dlti([1], [-1], 1, 3, dt=dt)
+        assert_(isinstance(s, StateSpace))
+        assert_(isinstance(s, dlti))
+        assert_(not isinstance(s, lti))
+        assert_equal(s.dt, dt)
+
+        # Number of inputs
+        assert_raises(ValueError, dlti, 1)
+        assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
+
+
+class TestStateSpaceDisc:
+    def test_initialization(self):
+        # Check that all initializations work
+        dt = 0.05
+        StateSpace(1, 1, 1, 1, dt=dt)
+        StateSpace([1], [2], [3], [4], dt=dt)
+        StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
+                   np.array([[1, 0]]), np.array([[0]]), dt=dt)
+        StateSpace(1, 1, 1, 1, dt=True)
+
+    def test_conversion(self):
+        # Check the conversion functions
+        s = StateSpace(1, 2, 3, 4, dt=0.05)
+        assert_(isinstance(s.to_ss(), StateSpace))
+        assert_(isinstance(s.to_tf(), TransferFunction))
+        assert_(isinstance(s.to_zpk(), ZerosPolesGain))
+
+        # Make sure copies work
+        assert_(StateSpace(s) is not s)
+        assert_(s.to_ss() is not s)
+
+    def test_properties(self):
+        # Test setters/getters for cross class properties.
+        # This implicitly tests to_tf() and to_zpk()
+
+        # Getters
+        s = StateSpace(1, 1, 1, 1, dt=0.05)
+        assert_equal(s.poles, [1])
+        assert_equal(s.zeros, [0])
+
+
+class TestTransferFunction:
+    def test_initialization(self):
+        # Check that all initializations work
+        dt = 0.05
+        TransferFunction(1, 1, dt=dt)
+        TransferFunction([1], [2], dt=dt)
+        TransferFunction(np.array([1]), np.array([2]), dt=dt)
+        TransferFunction(1, 1, dt=True)
+
+    def test_conversion(self):
+        # Check the conversion functions
+        s = TransferFunction([1, 0], [1, -1], dt=0.05)
+        assert_(isinstance(s.to_ss(), StateSpace))
+        assert_(isinstance(s.to_tf(), TransferFunction))
+        assert_(isinstance(s.to_zpk(), ZerosPolesGain))
+
+        # Make sure copies work
+        assert_(TransferFunction(s) is not s)
+        assert_(s.to_tf() is not s)
+
+    def test_properties(self):
+        # Test setters/getters for cross class properties.
+        # This implicitly tests to_ss() and to_zpk()
+
+        # Getters
+        s = TransferFunction([1, 0], [1, -1], dt=0.05)
+        assert_equal(s.poles, [1])
+        assert_equal(s.zeros, [0])
+
+
+class TestZerosPolesGain:
+    def test_initialization(self):
+        # Check that all initializations work
+        dt = 0.05
+        ZerosPolesGain(1, 1, 1, dt=dt)
+        ZerosPolesGain([1], [2], 1, dt=dt)
+        ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt)
+        ZerosPolesGain(1, 1, 1, dt=True)
+
+    def test_conversion(self):
+        # Check the conversion functions
+        s = ZerosPolesGain(1, 2, 3, dt=0.05)
+        assert_(isinstance(s.to_ss(), StateSpace))
+        assert_(isinstance(s.to_tf(), TransferFunction))
+        assert_(isinstance(s.to_zpk(), ZerosPolesGain))
+
+        # Make sure copies work
+        assert_(ZerosPolesGain(s) is not s)
+        assert_(s.to_zpk() is not s)
+
+
+class Test_dfreqresp:
+
+    def test_manual(self):
+        # Test dfreqresp() real part calculation (manual sanity check).
+        # 1st order low-pass filter: H(z) = 1 / (z - 0.2),
+        system = TransferFunction(1, [1, -0.2], dt=0.1)
+        w = [0.1, 1, 10]
+        w, H = dfreqresp(system, w=w)
+
+        # test real
+        expected_re = [1.2383, 0.4130, -0.7553]
+        assert_almost_equal(H.real, expected_re, decimal=4)
+
+        # test imag
+        expected_im = [-0.1555, -1.0214, 0.3955]
+        assert_almost_equal(H.imag, expected_im, decimal=4)
+
+    def test_auto(self):
+        # Test dfreqresp() real part calculation.
+        # 1st order low-pass filter: H(z) = 1 / (z - 0.2),
+        system = TransferFunction(1, [1, -0.2], dt=0.1)
+        w = [0.1, 1, 10, 100]
+        w, H = dfreqresp(system, w=w)
+        jw = np.exp(w * 1j)
+        y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
+
+        # test real
+        expected_re = y.real
+        assert_almost_equal(H.real, expected_re)
+
+        # test imag
+        expected_im = y.imag
+        assert_almost_equal(H.imag, expected_im)
+
+    def test_freq_range(self):
+        # Test that freqresp() finds a reasonable frequency range.
+        # 1st order low-pass filter: H(z) = 1 / (z - 0.2),
+        # Expected range is from 0.01 to 10.
+        system = TransferFunction(1, [1, -0.2], dt=0.1)
+        n = 10
+        expected_w = np.linspace(0, np.pi, 10, endpoint=False)
+        w, H = dfreqresp(system, n=n)
+        assert_almost_equal(w, expected_w)
+
+    def test_pole_one(self):
+        # Test that freqresp() doesn't fail on a system with a pole at 0.
+        # integrator, pole at zero: H(s) = 1 / s
+        system = TransferFunction([1], [1, -1], dt=0.1)
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, message="divide by zero")
+            sup.filter(RuntimeWarning, message="invalid value encountered")
+            w, H = dfreqresp(system, n=2)
+        assert_equal(w[0], 0.)  # a fail would give not-a-number
+
+    def test_error(self):
+        # Raise an error for continuous-time systems
+        system = lti([1], [1, 1])
+        assert_raises(AttributeError, dfreqresp, system)
+
+    def test_from_state_space(self):
+        # H(z) = 2 / z^3 - 0.5 * z^2
+
+        system_TF = dlti([2], [1, -0.5, 0, 0])
+
+        A = np.array([[0.5, 0, 0],
+                      [1, 0, 0],
+                      [0, 1, 0]])
+        B = np.array([[1, 0, 0]]).T
+        C = np.array([[0, 0, 2]])
+        D = 0
+
+        system_SS = dlti(A, B, C, D)
+        w = 10.0**np.arange(-3,0,.5)
+        with suppress_warnings() as sup:
+            sup.filter(BadCoefficients)
+            w1, H1 = dfreqresp(system_TF, w=w)
+            w2, H2 = dfreqresp(system_SS, w=w)
+
+        assert_almost_equal(H1, H2)
+
+    def test_from_zpk(self):
+        # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
+        system_ZPK = dlti([],[0.2],0.3)
+        system_TF = dlti(0.3, [1, -0.2])
+        w = [0.1, 1, 10, 100]
+        w1, H1 = dfreqresp(system_ZPK, w=w)
+        w2, H2 = dfreqresp(system_TF, w=w)
+        assert_almost_equal(H1, H2)
+
+
+class Test_bode:
+
+    def test_manual(self):
+        # Test bode() magnitude calculation (manual sanity check).
+        # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
+        dt = 0.1
+        system = TransferFunction(0.3, [1, -0.2], dt=dt)
+        w = [0.1, 0.5, 1, np.pi]
+        w2, mag, phase = dbode(system, w=w)
+
+        # Test mag
+        expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412]
+        assert_almost_equal(mag, expected_mag, decimal=4)
+
+        # Test phase
+        expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000]
+        assert_almost_equal(phase, expected_phase, decimal=4)
+
+        # Test frequency
+        assert_equal(np.array(w) / dt, w2)
+
+    def test_auto(self):
+        # Test bode() magnitude calculation.
+        # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
+        system = TransferFunction(0.3, [1, -0.2], dt=0.1)
+        w = np.array([0.1, 0.5, 1, np.pi])
+        w2, mag, phase = dbode(system, w=w)
+        jw = np.exp(w * 1j)
+        y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
+
+        # Test mag
+        expected_mag = 20.0 * np.log10(abs(y))
+        assert_almost_equal(mag, expected_mag)
+
+        # Test phase
+        expected_phase = np.rad2deg(np.angle(y))
+        assert_almost_equal(phase, expected_phase)
+
+    def test_range(self):
+        # Test that bode() finds a reasonable frequency range.
+        # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
+        dt = 0.1
+        system = TransferFunction(0.3, [1, -0.2], dt=0.1)
+        n = 10
+        # Expected range is from 0.01 to 10.
+        expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt
+        w, mag, phase = dbode(system, n=n)
+        assert_almost_equal(w, expected_w)
+
+    def test_pole_one(self):
+        # Test that freqresp() doesn't fail on a system with a pole at 0.
+        # integrator, pole at zero: H(s) = 1 / s
+        system = TransferFunction([1], [1, -1], dt=0.1)
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, message="divide by zero")
+            sup.filter(RuntimeWarning, message="invalid value encountered")
+            w, mag, phase = dbode(system, n=2)
+        assert_equal(w[0], 0.)  # a fail would give not-a-number
+
+    def test_imaginary(self):
+        # bode() should not fail on a system with pure imaginary poles.
+        # The test passes if bode doesn't raise an exception.
+        system = TransferFunction([1], [1, 0, 100], dt=0.1)
+        dbode(system, n=2)
+
+    def test_error(self):
+        # Raise an error for continuous-time systems
+        system = lti([1], [1, 1])
+        assert_raises(AttributeError, dbode, system)
+
+
+class TestTransferFunctionZConversion:
+    """Test private conversions between 'z' and 'z**-1' polynomials."""
+
+    def test_full(self):
+        # Numerator and denominator same order
+        num = [2, 3, 4]
+        den = [5, 6, 7]
+        num2, den2 = TransferFunction._z_to_zinv(num, den)
+        assert_equal(num, num2)
+        assert_equal(den, den2)
+
+        num2, den2 = TransferFunction._zinv_to_z(num, den)
+        assert_equal(num, num2)
+        assert_equal(den, den2)
+
+    def test_numerator(self):
+        # Numerator lower order than denominator
+        num = [2, 3]
+        den = [5, 6, 7]
+        num2, den2 = TransferFunction._z_to_zinv(num, den)
+        assert_equal([0, 2, 3], num2)
+        assert_equal(den, den2)
+
+        num2, den2 = TransferFunction._zinv_to_z(num, den)
+        assert_equal([2, 3, 0], num2)
+        assert_equal(den, den2)
+
+    def test_denominator(self):
+        # Numerator higher order than denominator
+        num = [2, 3, 4]
+        den = [5, 6]
+        num2, den2 = TransferFunction._z_to_zinv(num, den)
+        assert_equal(num, num2)
+        assert_equal([0, 5, 6], den2)
+
+        num2, den2 = TransferFunction._zinv_to_z(num, den)
+        assert_equal(num, num2)
+        assert_equal([5, 6, 0], den2)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_filter_design.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_filter_design.py
new file mode 100644
index 00000000..18d78552
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_filter_design.py
@@ -0,0 +1,4156 @@
+import warnings
+
+from scipy._lib import _pep440
+import numpy as np
+from numpy.testing import (assert_array_almost_equal,
+                           assert_array_equal, assert_array_less,
+                           assert_equal, assert_,
+                           assert_allclose, assert_warns, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from numpy import array, spacing, sin, pi, sort, sqrt
+from scipy.signal import (argrelextrema, BadCoefficients, bessel, besselap, bilinear,
+                          buttap, butter, buttord, cheb1ap, cheb1ord, cheb2ap,
+                          cheb2ord, cheby1, cheby2, ellip, ellipap, ellipord,
+                          firwin, freqs_zpk, freqs, freqz, freqz_zpk,
+                          gammatone, group_delay, iircomb, iirdesign, iirfilter,
+                          iirnotch, iirpeak, lp2bp, lp2bs, lp2hp, lp2lp, normalize,
+                          sos2tf, sos2zpk, sosfreqz, tf2sos, tf2zpk, zpk2sos,
+                          zpk2tf, bilinear_zpk, lp2lp_zpk, lp2hp_zpk, lp2bp_zpk,
+                          lp2bs_zpk)
+from scipy.signal._filter_design import (_cplxreal, _cplxpair, _norm_factor,
+                                        _bessel_poly, _bessel_zeros)
+
+try:
+    import mpmath
+except ImportError:
+    mpmath = None
+
+
+def mpmath_check(min_ver):
+    return pytest.mark.skipif(mpmath is None or
+                              _pep440.parse(mpmath.__version__) < _pep440.Version(min_ver),
+                              reason="mpmath version >= %s required" % min_ver)
+
+
+class TestCplxPair:
+
+    def test_trivial_input(self):
+        assert_equal(_cplxpair([]).size, 0)
+        assert_equal(_cplxpair(1), 1)
+
+    def test_output_order(self):
+        assert_allclose(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j])
+
+        a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2]
+        b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2]
+        assert_allclose(_cplxpair(a), b)
+
+        # points spaced around the unit circle
+        z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7)
+        z1 = np.copy(z)
+        np.random.shuffle(z)
+        assert_allclose(_cplxpair(z), z1)
+        np.random.shuffle(z)
+        assert_allclose(_cplxpair(z), z1)
+        np.random.shuffle(z)
+        assert_allclose(_cplxpair(z), z1)
+
+        # Should be able to pair up all the conjugates
+        x = np.random.rand(10000) + 1j * np.random.rand(10000)
+        y = x.conj()
+        z = np.random.rand(10000)
+        x = np.concatenate((x, y, z))
+        np.random.shuffle(x)
+        c = _cplxpair(x)
+
+        # Every other element of head should be conjugates:
+        assert_allclose(c[0:20000:2], np.conj(c[1:20000:2]))
+        # Real parts of head should be in sorted order:
+        assert_allclose(c[0:20000:2].real, np.sort(c[0:20000:2].real))
+        # Tail should be sorted real numbers:
+        assert_allclose(c[20000:], np.sort(c[20000:]))
+
+    def test_real_integer_input(self):
+        assert_array_equal(_cplxpair([2, 0, 1]), [0, 1, 2])
+
+    def test_tolerances(self):
+        eps = spacing(1)
+        assert_allclose(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps),
+                        [-1j, 1j, 1+1j*eps])
+
+        # sorting close to 0
+        assert_allclose(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j])
+        assert_allclose(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j])
+        assert_allclose(_cplxpair([+1j, -1j]), [-1j, +1j])
+
+    def test_unmatched_conjugates(self):
+        # 1+2j is unmatched
+        assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j])
+
+        # 1+2j and 1-3j are unmatched
+        assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j])
+
+        # 1+3j is unmatched
+        assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j])
+
+        # Not conjugates
+        assert_raises(ValueError, _cplxpair, [4+5j, 4+5j])
+        assert_raises(ValueError, _cplxpair, [1-7j, 1-7j])
+
+        # No pairs
+        assert_raises(ValueError, _cplxpair, [1+3j])
+        assert_raises(ValueError, _cplxpair, [1-3j])
+
+
+class TestCplxReal:
+
+    def test_trivial_input(self):
+        assert_equal(_cplxreal([]), ([], []))
+        assert_equal(_cplxreal(1), ([], [1]))
+
+    def test_output_order(self):
+        zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1])))
+        assert_allclose(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1])
+
+        eps = spacing(1)
+
+        a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j,
+             1, 4, 2, 3, 0, 0,
+             2+3j, 2-3j,
+             1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j,  # sorts out of order
+             3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j,
+             2-3j, 2+3j]
+        zc, zr = _cplxreal(a)
+        assert_allclose(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j,
+                             3+1j])
+        assert_allclose(zr, [0, 0, 1, 2, 3, 4])
+
+        z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j,
+                   0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j,
+                   4+eps-2j, 4-1j, 4-eps+2j])
+
+        zc, zr = _cplxreal(z)
+        assert_allclose(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j,
+                             4+2j])
+        assert_equal(zr, [])
+
+    def test_unmatched_conjugates(self):
+        # 1+2j is unmatched
+        assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j])
+
+        # 1+2j and 1-3j are unmatched
+        assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j])
+
+        # 1+3j is unmatched
+        assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j])
+
+        # No pairs
+        assert_raises(ValueError, _cplxreal, [1+3j])
+        assert_raises(ValueError, _cplxreal, [1-3j])
+
+    def test_real_integer_input(self):
+        zc, zr = _cplxreal([2, 0, 1, 4])
+        assert_array_equal(zc, [])
+        assert_array_equal(zr, [0, 1, 2, 4])
+
+
+class TestTf2zpk:
+
+    @pytest.mark.parametrize('dt', (np.float64, np.complex128))
+    def test_simple(self, dt):
+        z_r = np.array([0.5, -0.5])
+        p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
+        # Sort the zeros/poles so that we don't fail the test if the order
+        # changes
+        z_r.sort()
+        p_r.sort()
+        b = np.poly(z_r).astype(dt)
+        a = np.poly(p_r).astype(dt)
+
+        z, p, k = tf2zpk(b, a)
+        z.sort()
+        # The real part of `p` is ~0.0, so sort by imaginary part
+        p = p[np.argsort(p.imag)]
+
+        assert_array_almost_equal(z, z_r)
+        assert_array_almost_equal(p, p_r)
+        assert_array_almost_equal(k, 1.)
+        assert k.dtype == dt
+
+    def test_bad_filter(self):
+        # Regression test for #651: better handling of badly conditioned
+        # filter coefficients.
+        with suppress_warnings():
+            warnings.simplefilter("error", BadCoefficients)
+            assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0])
+
+
+class TestZpk2Tf:
+
+    def test_identity(self):
+        """Test the identity transfer function."""
+        z = []
+        p = []
+        k = 1.
+        b, a = zpk2tf(z, p, k)
+        b_r = np.array([1.])  # desired result
+        a_r = np.array([1.])  # desired result
+        # The test for the *type* of the return values is a regression
+        # test for ticket #1095. In the case p=[], zpk2tf used to
+        # return the scalar 1.0 instead of array([1.0]).
+        assert_array_equal(b, b_r)
+        assert_(isinstance(b, np.ndarray))
+        assert_array_equal(a, a_r)
+        assert_(isinstance(a, np.ndarray))
+
+
+class TestSos2Zpk:
+
+    def test_basic(self):
+        sos = [[1, 0, 1, 1, 0, -0.81],
+               [1, 0, 0, 1, 0, +0.49]]
+        z, p, k = sos2zpk(sos)
+        z2 = [1j, -1j, 0, 0]
+        p2 = [0.9, -0.9, 0.7j, -0.7j]
+        k2 = 1
+        assert_array_almost_equal(sort(z), sort(z2), decimal=4)
+        assert_array_almost_equal(sort(p), sort(p2), decimal=4)
+        assert_array_almost_equal(k, k2)
+
+        sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873],
+               [1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873],
+               [1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]]
+        z, p, k = sos2zpk(sos)
+        z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
+              0.8090 - 0.5878j, -1.0000 + 0.0000j, 0]
+        p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
+              0.7922 - 0.5755j, -0.9791 + 0.0000j, 0]
+        k2 = 1
+        assert_array_almost_equal(sort(z), sort(z2), decimal=4)
+        assert_array_almost_equal(sort(p), sort(p2), decimal=4)
+
+        sos = array([[1, 2, 3, 1, 0.2, 0.3],
+                     [4, 5, 6, 1, 0.4, 0.5]])
+        z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j,
+                  -0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j])
+        p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j,
+                  -0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j])
+        k = 4
+        z2, p2, k2 = sos2zpk(sos)
+        assert_allclose(_cplxpair(z2), z)
+        assert_allclose(_cplxpair(p2), p)
+        assert_allclose(k2, k)
+
+    def test_fewer_zeros(self):
+        """Test not the expected number of p/z (effectively at origin)."""
+        sos = butter(3, 0.1, output='sos')
+        z, p, k = sos2zpk(sos)
+        assert len(z) == 4
+        assert len(p) == 4
+
+        sos = butter(12, [5., 30.], 'bandpass', fs=1200., analog=False,
+                    output='sos')
+        with pytest.warns(BadCoefficients, match='Badly conditioned'):
+            z, p, k = sos2zpk(sos)
+        assert len(z) == 24
+        assert len(p) == 24
+
+
+class TestSos2Tf:
+
+    def test_basic(self):
+        sos = [[1, 1, 1, 1, 0, -1],
+               [-2, 3, 1, 1, 10, 1]]
+        b, a = sos2tf(sos)
+        assert_array_almost_equal(b, [-2, 1, 2, 4, 1])
+        assert_array_almost_equal(a, [1, 10, 0, -10, -1])
+
+
+class TestTf2Sos:
+
+    def test_basic(self):
+        num = [2, 16, 44, 56, 32]
+        den = [3, 3, -15, 18, -12]
+        sos = tf2sos(num, den)
+        sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000],
+                [1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]]
+        assert_array_almost_equal(sos, sos2, decimal=4)
+
+        b = [1, -3, 11, -27, 18]
+        a = [16, 12, 2, -4, -1]
+        sos = tf2sos(b, a)
+        sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250],
+                [1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]]
+        # assert_array_almost_equal(sos, sos2, decimal=4)
+
+    @pytest.mark.parametrize('b, a, analog, sos',
+                             [([1], [1], False, [[1., 0., 0., 1., 0., 0.]]),
+                              ([1], [1], True, [[0., 0., 1., 0., 0., 1.]]),
+                              ([1], [1., 0., -1.01, 0, 0.01], False,
+                               [[1., 0., 0., 1., 0., -0.01],
+                                [1., 0., 0., 1., 0., -1]]),
+                              ([1], [1., 0., -1.01, 0, 0.01], True,
+                               [[0., 0., 1., 1., 0., -1],
+                                [0., 0., 1., 1., 0., -0.01]])])
+    def test_analog(self, b, a, analog, sos):
+        sos2 = tf2sos(b, a, analog=analog)
+        assert_array_almost_equal(sos, sos2, decimal=4)
+
+
+class TestZpk2Sos:
+
+    @pytest.mark.parametrize('dt', 'fdgFDG')
+    @pytest.mark.parametrize('pairing, analog',
+                             [('nearest', False),
+                              ('keep_odd', False),
+                              ('minimal', False),
+                              ('minimal', True)])
+    def test_dtypes(self, dt, pairing, analog):
+        z = np.array([-1, -1]).astype(dt)
+        ct = dt.upper()  # the poles have to be complex
+        p = np.array([0.57149 + 0.29360j, 0.57149 - 0.29360j]).astype(ct)
+        k = np.array(1).astype(dt)
+        sos = zpk2sos(z, p, k, pairing=pairing, analog=analog)
+        sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]]  # octave & MATLAB
+        assert_array_almost_equal(sos, sos2, decimal=4)
+
+    def test_basic(self):
+        for pairing in ('nearest', 'keep_odd'):
+            #
+            # Cases that match octave
+            #
+
+            z = [-1, -1]
+            p = [0.57149 + 0.29360j, 0.57149 - 0.29360j]
+            k = 1
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]]  # octave & MATLAB
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+            z = [1j, -1j]
+            p = [0.9, -0.9, 0.7j, -0.7j]
+            k = 1
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            sos2 = [[1, 0, 1, 1, 0, +0.49],
+                    [1, 0, 0, 1, 0, -0.81]]  # octave
+            # sos2 = [[0, 0, 1, 1, -0.9, 0],
+            #         [1, 0, 1, 1, 0.9, 0]]  # MATLAB
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+            z = []
+            p = [0.8, -0.5+0.25j, -0.5-0.25j]
+            k = 1.
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            sos2 = [[1., 0., 0., 1., 1., 0.3125],
+                    [1., 0., 0., 1., -0.8, 0.]]  # octave, MATLAB fails
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+            z = [1., 1., 0.9j, -0.9j]
+            p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j]
+            k = 1
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            sos2 = [[1, 0, 0.81, 1, -0.2, 0.82],
+                    [1, -2, 1, 1, -1.98, 0.9802]]  # octave
+            # sos2 = [[1, -2, 1, 1,  -0.2, 0.82],
+            #         [1, 0, 0.81, 1, -1.98, 0.9802]]  # MATLAB
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+            z = [0.9+0.1j, 0.9-0.1j, -0.9]
+            p = [0.75+0.25j, 0.75-0.25j, 0.9]
+            k = 1
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            if pairing == 'keep_odd':
+                sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625],
+                        [1, 0.9, 0, 1, -0.9, 0]]  # octave; MATLAB fails
+                assert_array_almost_equal(sos, sos2, decimal=4)
+            else:  # pairing == 'nearest'
+                sos2 = [[1, 0.9, 0, 1, -1.5, 0.625],
+                        [1, -1.8, 0.82, 1, -0.9, 0]]  # our algorithm
+                assert_array_almost_equal(sos, sos2, decimal=4)
+
+            #
+            # Cases that differ from octave:
+            #
+
+            z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
+                 +0.8090 - 0.5878j, -1.0000 + 0.0000j]
+            p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
+                 +0.7922 - 0.5755j, -0.9791 + 0.0000j]
+            k = 1
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            # sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870],
+            #         [1, -1.618, 1, 1, -1.5844, 0.95878],
+            #         [1, 1, 0, 1, 0.9791, 0]]  # octave, MATLAB fails
+            sos2 = [[1, 1, 0, 1, +0.97915, 0],
+                    [1, 0.61803, 1, 1, +0.60515, 0.95873],
+                    [1, -1.61803, 1, 1, -1.58430, 0.95873]]
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+            z = [-1 - 1.4142j, -1 + 1.4142j,
+                 -0.625 - 1.0533j, -0.625 + 1.0533j]
+            p = [-0.2 - 0.6782j, -0.2 + 0.6782j,
+                 -0.1 - 0.5385j, -0.1 + 0.5385j]
+            k = 4
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            sos2 = [[4, 8, 12, 1, 0.2, 0.3],
+                    [1, 1.25, 1.5, 1, 0.4, 0.5]]  # MATLAB
+            # sos2 = [[4, 8, 12, 1, 0.4, 0.5],
+            #         [1, 1.25, 1.5, 1, 0.2, 0.3]]  # octave
+            assert_allclose(sos, sos2, rtol=1e-4, atol=1e-4)
+
+            z = []
+            p = [0.2, -0.5+0.25j, -0.5-0.25j]
+            k = 1.
+            sos = zpk2sos(z, p, k, pairing=pairing)
+            sos2 = [[1., 0., 0., 1., -0.2, 0.],
+                    [1., 0., 0., 1., 1., 0.3125]]
+            # sos2 = [[1., 0., 0., 1., 1., 0.3125],
+            #         [1., 0., 0., 1., -0.2, 0]]  # octave, MATLAB fails
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+            # The next two examples are adapted from Leland B. Jackson,
+            # "Digital Filters and Signal Processing (1995) p.400:
+            # http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false
+
+            deg2rad = np.pi / 180.
+            k = 1.
+
+            # first example
+            thetas = [22.5, 45, 77.5]
+            mags = [0.8, 0.6, 0.9]
+            z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas])
+            z = np.concatenate((z, np.conj(z)))
+            p = np.array([mag * np.exp(theta * deg2rad * 1j)
+                          for theta, mag in zip(thetas, mags)])
+            p = np.concatenate((p, np.conj(p)))
+            sos = zpk2sos(z, p, k)
+            # sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81],  # octave,
+            #         [1, -1.41421, 1, 1, -0.84853, 0.36],  # MATLAB fails
+            #         [1, -1.84776, 1, 1, -1.47821, 0.64]]
+            # Note that pole-zero pairing matches, but ordering is different
+            sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36],
+                    [1, -1.84776, 1, 1, -1.47821, 0.64],
+                    [1, -0.43288, 1, 1, -0.38959, 0.81]]
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+            # second example
+            z = np.array([np.exp(theta * deg2rad * 1j)
+                          for theta in (85., 10.)])
+            z = np.concatenate((z, np.conj(z), [1, -1]))
+            sos = zpk2sos(z, p, k)
+
+            # sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81],  # octave "wrong",
+            #         [1, -1.96962, 1, 1, -0.84853, 0.36],  # MATLAB fails
+            #         [1, 0, -1, 1, -1.47821, 0.64000]]
+            # Our pole-zero pairing matches the text, Octave does not
+            sos2 = [[1, 0, -1, 1, -0.84853, 0.36],
+                    [1, -1.96962, 1, 1, -1.47821, 0.64],
+                    [1, -0.17431, 1, 1, -0.38959, 0.81]]
+            assert_array_almost_equal(sos, sos2, decimal=4)
+
+    # these examples are taken from the doc string, and show the
+    # effect of the 'pairing' argument
+    @pytest.mark.parametrize('pairing, sos',
+                             [('nearest',
+                               np.array([[1., 1., 0.5, 1., -0.75, 0.],
+                                         [1., 1., 0., 1., -1.6, 0.65]])),
+                              ('keep_odd',
+                               np.array([[1., 1., 0, 1., -0.75, 0.],
+                                         [1., 1., 0.5, 1., -1.6, 0.65]])),
+                              ('minimal',
+                               np.array([[0., 1., 1., 0., 1., -0.75],
+                                         [1., 1., 0.5, 1., -1.6, 0.65]]))])
+    def test_pairing(self, pairing, sos):
+        z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
+        p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
+        sos2 = zpk2sos(z1, p1, 1, pairing=pairing)
+        assert_array_almost_equal(sos, sos2, decimal=4)
+
+    @pytest.mark.parametrize('p, sos_dt',
+                             [([-1, 1, -0.1, 0.1],
+                               [[0., 0., 1., 1., 0., -0.01],
+                                [0., 0., 1., 1., 0., -1]]),
+                              ([-0.7071+0.7071j, -0.7071-0.7071j, -0.1j, 0.1j],
+                               [[0., 0., 1., 1., 0., 0.01],
+                                [0., 0., 1., 1., 1.4142, 1.]])])
+    def test_analog(self, p, sos_dt):
+        # test `analog` argument
+        # for discrete time, poles closest to unit circle should appear last
+        # for cont. time, poles closest to imaginary axis should appear last
+        sos2_dt = zpk2sos([], p, 1, pairing='minimal', analog=False)
+        sos2_ct = zpk2sos([], p, 1, pairing='minimal', analog=True)
+        assert_array_almost_equal(sos_dt, sos2_dt, decimal=4)
+        assert_array_almost_equal(sos_dt[::-1], sos2_ct, decimal=4)
+
+    def test_bad_args(self):
+        with pytest.raises(ValueError, match=r'pairing must be one of'):
+            zpk2sos([1], [2], 1, pairing='no_such_pairing')
+
+        with pytest.raises(ValueError, match=r'.*pairing must be "minimal"'):
+            zpk2sos([1], [2], 1, pairing='keep_odd', analog=True)
+
+        with pytest.raises(ValueError,
+                           match=r'.*must have len\(p\)>=len\(z\)'):
+            zpk2sos([1, 1], [2], 1, analog=True)
+
+        with pytest.raises(ValueError, match=r'k must be real'):
+            zpk2sos([1], [2], k=1j)
+
+
+class TestFreqs:
+
+    def test_basic(self):
+        _, h = freqs([1.0], [1.0], worN=8)
+        assert_array_almost_equal(h, np.ones(8))
+
+    def test_output(self):
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        w = [0.1, 1, 10, 100]
+        num = [1]
+        den = [1, 1]
+        w, H = freqs(num, den, worN=w)
+        s = w * 1j
+        expected = 1 / (s + 1)
+        assert_array_almost_equal(H.real, expected.real)
+        assert_array_almost_equal(H.imag, expected.imag)
+
+    def test_freq_range(self):
+        # Test that freqresp() finds a reasonable frequency range.
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        # Expected range is from 0.01 to 10.
+        num = [1]
+        den = [1, 1]
+        n = 10
+        expected_w = np.logspace(-2, 1, n)
+        w, H = freqs(num, den, worN=n)
+        assert_array_almost_equal(w, expected_w)
+
+    def test_plot(self):
+
+        def plot(w, h):
+            assert_array_almost_equal(h, np.ones(8))
+
+        assert_raises(ZeroDivisionError, freqs, [1.0], [1.0], worN=8,
+                      plot=lambda w, h: 1 / 0)
+        freqs([1.0], [1.0], worN=8, plot=plot)
+
+    def test_backward_compat(self):
+        # For backward compatibility, test if None act as a wrapper for default
+        w1, h1 = freqs([1.0], [1.0])
+        w2, h2 = freqs([1.0], [1.0], None)
+        assert_array_almost_equal(w1, w2)
+        assert_array_almost_equal(h1, h2)
+
+    def test_w_or_N_types(self):
+        # Measure at 8 equally-spaced points
+        for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
+                  np.array(8)):
+            w, h = freqs([1.0], [1.0], worN=N)
+            assert_equal(len(w), 8)
+            assert_array_almost_equal(h, np.ones(8))
+
+        # Measure at frequency 8 rad/sec
+        for w in (8.0, 8.0+0j):
+            w_out, h = freqs([1.0], [1.0], worN=w)
+            assert_array_almost_equal(w_out, [8])
+            assert_array_almost_equal(h, [1])
+
+
+class TestFreqs_zpk:
+
+    def test_basic(self):
+        _, h = freqs_zpk([1.0], [1.0], [1.0], worN=8)
+        assert_array_almost_equal(h, np.ones(8))
+
+    def test_output(self):
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        w = [0.1, 1, 10, 100]
+        z = []
+        p = [-1]
+        k = 1
+        w, H = freqs_zpk(z, p, k, worN=w)
+        s = w * 1j
+        expected = 1 / (s + 1)
+        assert_array_almost_equal(H.real, expected.real)
+        assert_array_almost_equal(H.imag, expected.imag)
+
+    def test_freq_range(self):
+        # Test that freqresp() finds a reasonable frequency range.
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        # Expected range is from 0.01 to 10.
+        z = []
+        p = [-1]
+        k = 1
+        n = 10
+        expected_w = np.logspace(-2, 1, n)
+        w, H = freqs_zpk(z, p, k, worN=n)
+        assert_array_almost_equal(w, expected_w)
+
+    def test_vs_freqs(self):
+        b, a = cheby1(4, 5, 100, analog=True, output='ba')
+        z, p, k = cheby1(4, 5, 100, analog=True, output='zpk')
+
+        w1, h1 = freqs(b, a)
+        w2, h2 = freqs_zpk(z, p, k)
+        assert_allclose(w1, w2)
+        assert_allclose(h1, h2, rtol=1e-6)
+
+    def test_backward_compat(self):
+        # For backward compatibility, test if None act as a wrapper for default
+        w1, h1 = freqs_zpk([1.0], [1.0], [1.0])
+        w2, h2 = freqs_zpk([1.0], [1.0], [1.0], None)
+        assert_array_almost_equal(w1, w2)
+        assert_array_almost_equal(h1, h2)
+
+    def test_w_or_N_types(self):
+        # Measure at 8 equally-spaced points
+        for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
+                  np.array(8)):
+            w, h = freqs_zpk([], [], 1, worN=N)
+            assert_equal(len(w), 8)
+            assert_array_almost_equal(h, np.ones(8))
+
+        # Measure at frequency 8 rad/sec
+        for w in (8.0, 8.0+0j):
+            w_out, h = freqs_zpk([], [], 1, worN=w)
+            assert_array_almost_equal(w_out, [8])
+            assert_array_almost_equal(h, [1])
+
+
+class TestFreqz:
+
+    def test_ticket1441(self):
+        """Regression test for ticket 1441."""
+        # Because freqz previously used arange instead of linspace,
+        # when N was large, it would return one more point than
+        # requested.
+        N = 100000
+        w, h = freqz([1.0], worN=N)
+        assert_equal(w.shape, (N,))
+
+    def test_basic(self):
+        w, h = freqz([1.0], worN=8)
+        assert_array_almost_equal(w, np.pi * np.arange(8) / 8.)
+        assert_array_almost_equal(h, np.ones(8))
+        w, h = freqz([1.0], worN=9)
+        assert_array_almost_equal(w, np.pi * np.arange(9) / 9.)
+        assert_array_almost_equal(h, np.ones(9))
+
+        for a in [1, np.ones(2)]:
+            w, h = freqz(np.ones(2), a, worN=0)
+            assert_equal(w.shape, (0,))
+            assert_equal(h.shape, (0,))
+            assert_equal(h.dtype, np.dtype('complex128'))
+
+        t = np.linspace(0, 1, 4, endpoint=False)
+        for b, a, h_whole in zip(
+                ([1., 0, 0, 0], np.sin(2 * np.pi * t)),
+                ([1., 0, 0, 0], [0.5, 0, 0, 0]),
+                ([1., 1., 1., 1.], [0, -4j, 0, 4j])):
+            w, h = freqz(b, a, worN=4, whole=True)
+            expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False)
+            assert_array_almost_equal(w, expected_w)
+            assert_array_almost_equal(h, h_whole)
+            # simultaneously check int-like support
+            w, h = freqz(b, a, worN=np.int32(4), whole=True)
+            assert_array_almost_equal(w, expected_w)
+            assert_array_almost_equal(h, h_whole)
+            w, h = freqz(b, a, worN=w, whole=True)
+            assert_array_almost_equal(w, expected_w)
+            assert_array_almost_equal(h, h_whole)
+
+    def test_basic_whole(self):
+        w, h = freqz([1.0], worN=8, whole=True)
+        assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
+        assert_array_almost_equal(h, np.ones(8))
+
+    def test_plot(self):
+
+        def plot(w, h):
+            assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
+            assert_array_almost_equal(h, np.ones(8))
+
+        assert_raises(ZeroDivisionError, freqz, [1.0], worN=8,
+                      plot=lambda w, h: 1 / 0)
+        freqz([1.0], worN=8, plot=plot)
+
+    def test_fft_wrapping(self):
+        # Some simple real FIR filters
+        bs = list()  # filters
+        as_ = list()
+        hs_whole = list()
+        hs_half = list()
+        # 3 taps
+        t = np.linspace(0, 1, 3, endpoint=False)
+        bs.append(np.sin(2 * np.pi * t))
+        as_.append(3.)
+        hs_whole.append([0, -0.5j, 0.5j])
+        hs_half.append([0, np.sqrt(1./12.), -0.5j])
+        # 4 taps
+        t = np.linspace(0, 1, 4, endpoint=False)
+        bs.append(np.sin(2 * np.pi * t))
+        as_.append(0.5)
+        hs_whole.append([0, -4j, 0, 4j])
+        hs_half.append([0, np.sqrt(8), -4j, -np.sqrt(8)])
+        del t
+        for ii, b in enumerate(bs):
+            # whole
+            a = as_[ii]
+            expected_w = np.linspace(0, 2 * np.pi, len(b), endpoint=False)
+            w, h = freqz(b, a, worN=expected_w, whole=True)  # polyval
+            err_msg = 'b = %s, a=%s' % (b, a)
+            assert_array_almost_equal(w, expected_w, err_msg=err_msg)
+            assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg)
+            w, h = freqz(b, a, worN=len(b), whole=True)  # FFT
+            assert_array_almost_equal(w, expected_w, err_msg=err_msg)
+            assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg)
+            # non-whole
+            expected_w = np.linspace(0, np.pi, len(b), endpoint=False)
+            w, h = freqz(b, a, worN=expected_w, whole=False)  # polyval
+            assert_array_almost_equal(w, expected_w, err_msg=err_msg)
+            assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg)
+            w, h = freqz(b, a, worN=len(b), whole=False)  # FFT
+            assert_array_almost_equal(w, expected_w, err_msg=err_msg)
+            assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg)
+
+        # some random FIR filters (real + complex)
+        # assume polyval is accurate
+        rng = np.random.RandomState(0)
+        for ii in range(2, 10):  # number of taps
+            b = rng.randn(ii)
+            for kk in range(2):
+                a = rng.randn(1) if kk == 0 else rng.randn(3)
+                for jj in range(2):
+                    if jj == 1:
+                        b = b + rng.randn(ii) * 1j
+                    # whole
+                    expected_w = np.linspace(0, 2 * np.pi, ii, endpoint=False)
+                    w, expected_h = freqz(b, a, worN=expected_w, whole=True)
+                    assert_array_almost_equal(w, expected_w)
+                    w, h = freqz(b, a, worN=ii, whole=True)
+                    assert_array_almost_equal(w, expected_w)
+                    assert_array_almost_equal(h, expected_h)
+                    # half
+                    expected_w = np.linspace(0, np.pi, ii, endpoint=False)
+                    w, expected_h = freqz(b, a, worN=expected_w, whole=False)
+                    assert_array_almost_equal(w, expected_w)
+                    w, h = freqz(b, a, worN=ii, whole=False)
+                    assert_array_almost_equal(w, expected_w)
+                    assert_array_almost_equal(h, expected_h)
+
+    def test_broadcasting1(self):
+        # Test broadcasting with worN an integer or a 1-D array,
+        # b and a are n-dimensional arrays.
+        np.random.seed(123)
+        b = np.random.rand(3, 5, 1)
+        a = np.random.rand(2, 1)
+        for whole in [False, True]:
+            # Test with worN being integers (one fast for FFT and one not),
+            # a 1-D array, and an empty array.
+            for worN in [16, 17, np.linspace(0, 1, 10), np.array([])]:
+                w, h = freqz(b, a, worN=worN, whole=whole)
+                for k in range(b.shape[1]):
+                    bk = b[:, k, 0]
+                    ak = a[:, 0]
+                    ww, hh = freqz(bk, ak, worN=worN, whole=whole)
+                    assert_allclose(ww, w)
+                    assert_allclose(hh, h[k])
+
+    def test_broadcasting2(self):
+        # Test broadcasting with worN an integer or a 1-D array,
+        # b is an n-dimensional array, and a is left at the default value.
+        np.random.seed(123)
+        b = np.random.rand(3, 5, 1)
+        for whole in [False, True]:
+            for worN in [16, 17, np.linspace(0, 1, 10)]:
+                w, h = freqz(b, worN=worN, whole=whole)
+                for k in range(b.shape[1]):
+                    bk = b[:, k, 0]
+                    ww, hh = freqz(bk, worN=worN, whole=whole)
+                    assert_allclose(ww, w)
+                    assert_allclose(hh, h[k])
+
+    def test_broadcasting3(self):
+        # Test broadcasting where b.shape[-1] is the same length
+        # as worN, and a is left at the default value.
+        np.random.seed(123)
+        N = 16
+        b = np.random.rand(3, N)
+        for whole in [False, True]:
+            for worN in [N, np.linspace(0, 1, N)]:
+                w, h = freqz(b, worN=worN, whole=whole)
+                assert_equal(w.size, N)
+                for k in range(N):
+                    bk = b[:, k]
+                    ww, hh = freqz(bk, worN=w[k], whole=whole)
+                    assert_allclose(ww, w[k])
+                    assert_allclose(hh, h[k])
+
+    def test_broadcasting4(self):
+        # Test broadcasting with worN a 2-D array.
+        np.random.seed(123)
+        b = np.random.rand(4, 2, 1, 1)
+        a = np.random.rand(5, 2, 1, 1)
+        for whole in [False, True]:
+            for worN in [np.random.rand(6, 7), np.empty((6, 0))]:
+                w, h = freqz(b, a, worN=worN, whole=whole)
+                assert_allclose(w, worN, rtol=1e-14)
+                assert_equal(h.shape, (2,) + worN.shape)
+                for k in range(2):
+                    ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0],
+                                   worN=worN.ravel(),
+                                   whole=whole)
+                    assert_allclose(ww, worN.ravel(), rtol=1e-14)
+                    assert_allclose(hh, h[k, :, :].ravel())
+
+    def test_backward_compat(self):
+        # For backward compatibility, test if None act as a wrapper for default
+        w1, h1 = freqz([1.0], 1)
+        w2, h2 = freqz([1.0], 1, None)
+        assert_array_almost_equal(w1, w2)
+        assert_array_almost_equal(h1, h2)
+
+    def test_fs_param(self):
+        fs = 900
+        b = [0.039479155677484369, 0.11843746703245311, 0.11843746703245311,
+             0.039479155677484369]
+        a = [1.0, -1.3199152021838287, 0.80341991081938424,
+             -0.16767146321568049]
+
+        # N = None, whole=False
+        w1, h1 = freqz(b, a, fs=fs)
+        w2, h2 = freqz(b, a)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False))
+
+        # N = None, whole=True
+        w1, h1 = freqz(b, a, whole=True, fs=fs)
+        w2, h2 = freqz(b, a, whole=True)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False))
+
+        # N = 5, whole=False
+        w1, h1 = freqz(b, a, 5, fs=fs)
+        w2, h2 = freqz(b, a, 5)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False))
+
+        # N = 5, whole=True
+        w1, h1 = freqz(b, a, 5, whole=True, fs=fs)
+        w2, h2 = freqz(b, a, 5, whole=True)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False))
+
+        # w is an array_like
+        for w in ([123], (123,), np.array([123]), (50, 123, 230),
+                  np.array([50, 123, 230])):
+            w1, h1 = freqz(b, a, w, fs=fs)
+            w2, h2 = freqz(b, a, 2*pi*np.array(w)/fs)
+            assert_allclose(h1, h2)
+            assert_allclose(w, w1)
+
+    def test_w_or_N_types(self):
+        # Measure at 7 (polyval) or 8 (fft) equally-spaced points
+        for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7),
+                  np.array(7),
+                  8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
+                  np.array(8)):
+
+            w, h = freqz([1.0], worN=N)
+            assert_array_almost_equal(w, np.pi * np.arange(N) / N)
+            assert_array_almost_equal(h, np.ones(N))
+
+            w, h = freqz([1.0], worN=N, fs=100)
+            assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False))
+            assert_array_almost_equal(h, np.ones(N))
+
+        # Measure at frequency 8 Hz
+        for w in (8.0, 8.0+0j):
+            # Only makes sense when fs is specified
+            w_out, h = freqz([1.0], worN=w, fs=100)
+            assert_array_almost_equal(w_out, [8])
+            assert_array_almost_equal(h, [1])
+
+    def test_nyquist(self):
+        w, h = freqz([1.0], worN=8, include_nyquist=True)
+        assert_array_almost_equal(w, np.pi * np.arange(8) / 7.)
+        assert_array_almost_equal(h, np.ones(8))
+        w, h = freqz([1.0], worN=9, include_nyquist=True)
+        assert_array_almost_equal(w, np.pi * np.arange(9) / 8.)
+        assert_array_almost_equal(h, np.ones(9))
+
+        for a in [1, np.ones(2)]:
+            w, h = freqz(np.ones(2), a, worN=0, include_nyquist=True)
+            assert_equal(w.shape, (0,))
+            assert_equal(h.shape, (0,))
+            assert_equal(h.dtype, np.dtype('complex128'))
+
+        w1, h1 = freqz([1.0], worN=8, whole = True, include_nyquist=True)
+        w2, h2 = freqz([1.0], worN=8, whole = True, include_nyquist=False)
+        assert_array_almost_equal(w1, w2)
+        assert_array_almost_equal(h1, h2)
+
+
+class TestSOSFreqz:
+
+    def test_sosfreqz_basic(self):
+        # Compare the results of freqz and sosfreqz for a low order
+        # Butterworth filter.
+
+        N = 500
+
+        b, a = butter(4, 0.2)
+        sos = butter(4, 0.2, output='sos')
+        w, h = freqz(b, a, worN=N)
+        w2, h2 = sosfreqz(sos, worN=N)
+        assert_equal(w2, w)
+        assert_allclose(h2, h, rtol=1e-10, atol=1e-14)
+
+        b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass')
+        sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos')
+        w, h = freqz(b, a, worN=N)
+        w2, h2 = sosfreqz(sos, worN=N)
+        assert_equal(w2, w)
+        assert_allclose(h2, h, rtol=1e-10, atol=1e-14)
+        # must have at least one section
+        assert_raises(ValueError, sosfreqz, sos[:0])
+
+    def test_sosfrez_design(self):
+        # Compare sosfreqz output against expected values for different
+        # filter types
+
+        # from cheb2ord
+        N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
+        sos = cheby2(N, 60, Wn, 'stop', output='sos')
+        w, h = sosfreqz(sos)
+        h = np.abs(h)
+        w /= np.pi
+        assert_allclose(20 * np.log10(h[w <= 0.1]), 0, atol=3.01)
+        assert_allclose(20 * np.log10(h[w >= 0.6]), 0., atol=3.01)
+        assert_allclose(h[(w >= 0.2) & (w <= 0.5)], 0., atol=1e-3)  # <= -60 dB
+
+        N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150)
+        sos = cheby2(N, 150, Wn, 'stop', output='sos')
+        w, h = sosfreqz(sos)
+        dB = 20*np.log10(np.abs(h))
+        w /= np.pi
+        assert_allclose(dB[w <= 0.1], 0, atol=3.01)
+        assert_allclose(dB[w >= 0.6], 0., atol=3.01)
+        assert_array_less(dB[(w >= 0.2) & (w <= 0.5)], -149.9)
+
+        # from cheb1ord
+        N, Wn = cheb1ord(0.2, 0.3, 3, 40)
+        sos = cheby1(N, 3, Wn, 'low', output='sos')
+        w, h = sosfreqz(sos)
+        h = np.abs(h)
+        w /= np.pi
+        assert_allclose(20 * np.log10(h[w <= 0.2]), 0, atol=3.01)
+        assert_allclose(h[w >= 0.3], 0., atol=1e-2)  # <= -40 dB
+
+        N, Wn = cheb1ord(0.2, 0.3, 1, 150)
+        sos = cheby1(N, 1, Wn, 'low', output='sos')
+        w, h = sosfreqz(sos)
+        dB = 20*np.log10(np.abs(h))
+        w /= np.pi
+        assert_allclose(dB[w <= 0.2], 0, atol=1.01)
+        assert_array_less(dB[w >= 0.3], -149.9)
+
+        # adapted from ellipord
+        N, Wn = ellipord(0.3, 0.2, 3, 60)
+        sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
+        w, h = sosfreqz(sos)
+        h = np.abs(h)
+        w /= np.pi
+        assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01)
+        assert_allclose(h[w <= 0.1], 0., atol=1.5e-3)  # <= -60 dB (approx)
+
+        # adapted from buttord
+        N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40)
+        sos = butter(N, Wn, 'band', output='sos')
+        w, h = sosfreqz(sos)
+        h = np.abs(h)
+        w /= np.pi
+        assert_allclose(h[w <= 0.14], 0., atol=1e-2)  # <= -40 dB
+        assert_allclose(h[w >= 0.6], 0., atol=1e-2)  # <= -40 dB
+        assert_allclose(20 * np.log10(h[(w >= 0.2) & (w <= 0.5)]),
+                        0, atol=3.01)
+
+        N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100)
+        sos = butter(N, Wn, 'band', output='sos')
+        w, h = sosfreqz(sos)
+        dB = 20*np.log10(np.maximum(np.abs(h), 1e-10))
+        w /= np.pi
+        assert_array_less(dB[(w > 0) & (w <= 0.14)], -99.9)
+        assert_array_less(dB[w >= 0.6], -99.9)
+        assert_allclose(dB[(w >= 0.2) & (w <= 0.5)], 0, atol=3.01)
+
+    def test_sosfreqz_design_ellip(self):
+        N, Wn = ellipord(0.3, 0.1, 3, 60)
+        sos = ellip(N, 0.3, 60, Wn, 'high', output='sos')
+        w, h = sosfreqz(sos)
+        h = np.abs(h)
+        w /= np.pi
+        assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01)
+        assert_allclose(h[w <= 0.1], 0., atol=1.5e-3)  # <= -60 dB (approx)
+
+        N, Wn = ellipord(0.3, 0.2, .5, 150)
+        sos = ellip(N, .5, 150, Wn, 'high', output='sos')
+        w, h = sosfreqz(sos)
+        dB = 20*np.log10(np.maximum(np.abs(h), 1e-10))
+        w /= np.pi
+        assert_allclose(dB[w >= 0.3], 0, atol=.55)
+        # Allow some numerical slop in the upper bound -150, so this is
+        # a check that dB[w <= 0.2] is less than or almost equal to -150.
+        assert dB[w <= 0.2].max() < -150*(1 - 1e-12)
+
+    @mpmath_check("0.10")
+    def test_sos_freqz_against_mp(self):
+        # Compare the result of sosfreqz applied to a high order Butterworth
+        # filter against the result computed using mpmath.  (signal.freqz fails
+        # miserably with such high order filters.)
+        from . import mpsig
+        N = 500
+        order = 25
+        Wn = 0.15
+        with mpmath.workdps(80):
+            z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn)
+            w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N)
+        w_mp = np.array([float(x) for x in w_mp])
+        h_mp = np.array([complex(x) for x in h_mp])
+
+        sos = butter(order, Wn, output='sos')
+        w, h = sosfreqz(sos, worN=N)
+        assert_allclose(w, w_mp, rtol=1e-12, atol=1e-14)
+        assert_allclose(h, h_mp, rtol=1e-12, atol=1e-14)
+
+    def test_fs_param(self):
+        fs = 900
+        sos = [[0.03934683014103762, 0.07869366028207524, 0.03934683014103762,
+                1.0, -0.37256600288916636, 0.0],
+               [1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]]
+
+        # N = None, whole=False
+        w1, h1 = sosfreqz(sos, fs=fs)
+        w2, h2 = sosfreqz(sos)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False))
+
+        # N = None, whole=True
+        w1, h1 = sosfreqz(sos, whole=True, fs=fs)
+        w2, h2 = sosfreqz(sos, whole=True)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False))
+
+        # N = 5, whole=False
+        w1, h1 = sosfreqz(sos, 5, fs=fs)
+        w2, h2 = sosfreqz(sos, 5)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False))
+
+        # N = 5, whole=True
+        w1, h1 = sosfreqz(sos, 5, whole=True, fs=fs)
+        w2, h2 = sosfreqz(sos, 5, whole=True)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False))
+
+        # w is an array_like
+        for w in ([123], (123,), np.array([123]), (50, 123, 230),
+                  np.array([50, 123, 230])):
+            w1, h1 = sosfreqz(sos, w, fs=fs)
+            w2, h2 = sosfreqz(sos, 2*pi*np.array(w)/fs)
+            assert_allclose(h1, h2)
+            assert_allclose(w, w1)
+
+    def test_w_or_N_types(self):
+        # Measure at 7 (polyval) or 8 (fft) equally-spaced points
+        for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7),
+                  np.array(7),
+                  8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
+                  np.array(8)):
+
+            w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N)
+            assert_array_almost_equal(w, np.pi * np.arange(N) / N)
+            assert_array_almost_equal(h, np.ones(N))
+
+            w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N, fs=100)
+            assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False))
+            assert_array_almost_equal(h, np.ones(N))
+
+        # Measure at frequency 8 Hz
+        for w in (8.0, 8.0+0j):
+            # Only makes sense when fs is specified
+            w_out, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=w, fs=100)
+            assert_array_almost_equal(w_out, [8])
+            assert_array_almost_equal(h, [1])
+
+
+class TestFreqz_zpk:
+
+    def test_ticket1441(self):
+        """Regression test for ticket 1441."""
+        # Because freqz previously used arange instead of linspace,
+        # when N was large, it would return one more point than
+        # requested.
+        N = 100000
+        w, h = freqz_zpk([0.5], [0.5], 1.0, worN=N)
+        assert_equal(w.shape, (N,))
+
+    def test_basic(self):
+        w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8)
+        assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
+        assert_array_almost_equal(h, np.ones(8))
+
+    def test_basic_whole(self):
+        w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8, whole=True)
+        assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
+        assert_array_almost_equal(h, np.ones(8))
+
+    def test_vs_freqz(self):
+        b, a = cheby1(4, 5, 0.5, analog=False, output='ba')
+        z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk')
+
+        w1, h1 = freqz(b, a)
+        w2, h2 = freqz_zpk(z, p, k)
+        assert_allclose(w1, w2)
+        assert_allclose(h1, h2, rtol=1e-6)
+
+    def test_backward_compat(self):
+        # For backward compatibility, test if None act as a wrapper for default
+        w1, h1 = freqz_zpk([0.5], [0.5], 1.0)
+        w2, h2 = freqz_zpk([0.5], [0.5], 1.0, None)
+        assert_array_almost_equal(w1, w2)
+        assert_array_almost_equal(h1, h2)
+
+    def test_fs_param(self):
+        fs = 900
+        z = [-1, -1, -1]
+        p = [0.4747869998473389+0.4752230717749344j, 0.37256600288916636,
+             0.4747869998473389-0.4752230717749344j]
+        k = 0.03934683014103762
+
+        # N = None, whole=False
+        w1, h1 = freqz_zpk(z, p, k, whole=False, fs=fs)
+        w2, h2 = freqz_zpk(z, p, k, whole=False)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False))
+
+        # N = None, whole=True
+        w1, h1 = freqz_zpk(z, p, k, whole=True, fs=fs)
+        w2, h2 = freqz_zpk(z, p, k, whole=True)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False))
+
+        # N = 5, whole=False
+        w1, h1 = freqz_zpk(z, p, k, 5, fs=fs)
+        w2, h2 = freqz_zpk(z, p, k, 5)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False))
+
+        # N = 5, whole=True
+        w1, h1 = freqz_zpk(z, p, k, 5, whole=True, fs=fs)
+        w2, h2 = freqz_zpk(z, p, k, 5, whole=True)
+        assert_allclose(h1, h2)
+        assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False))
+
+        # w is an array_like
+        for w in ([123], (123,), np.array([123]), (50, 123, 230),
+                  np.array([50, 123, 230])):
+            w1, h1 = freqz_zpk(z, p, k, w, fs=fs)
+            w2, h2 = freqz_zpk(z, p, k, 2*pi*np.array(w)/fs)
+            assert_allclose(h1, h2)
+            assert_allclose(w, w1)
+
+    def test_w_or_N_types(self):
+        # Measure at 8 equally-spaced points
+        for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
+                  np.array(8)):
+
+            w, h = freqz_zpk([], [], 1, worN=N)
+            assert_array_almost_equal(w, np.pi * np.arange(8) / 8.)
+            assert_array_almost_equal(h, np.ones(8))
+
+            w, h = freqz_zpk([], [], 1, worN=N, fs=100)
+            assert_array_almost_equal(w, np.linspace(0, 50, 8, endpoint=False))
+            assert_array_almost_equal(h, np.ones(8))
+
+        # Measure at frequency 8 Hz
+        for w in (8.0, 8.0+0j):
+            # Only makes sense when fs is specified
+            w_out, h = freqz_zpk([], [], 1, worN=w, fs=100)
+            assert_array_almost_equal(w_out, [8])
+            assert_array_almost_equal(h, [1])
+
+
+class TestNormalize:
+
+    def test_allclose(self):
+        """Test for false positive on allclose in normalize() in
+        filter_design.py"""
+        # Test to make sure the allclose call within signal.normalize does not
+        # choose false positives. Then check against a known output from MATLAB
+        # to make sure the fix doesn't break anything.
+
+        # These are the coefficients returned from
+        #   `[b,a] = cheby1(8, 0.5, 0.048)'
+        # in MATLAB. There are at least 15 significant figures in each
+        # coefficient, so it makes sense to test for errors on the order of
+        # 1e-13 (this can always be relaxed if different platforms have
+        # different rounding errors)
+        b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10,
+                             6.022052805239190e-10, 1.204410561047838e-09,
+                             1.505513201309798e-09, 1.204410561047838e-09,
+                             6.022052805239190e-10, 1.720586515782626e-10,
+                             2.150733144728282e-11])
+        a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00,
+                             2.654354569747454e+01, -5.182182531666387e+01,
+                             6.334127355102684e+01, -4.963358186631157e+01,
+                             2.434862182949389e+01, -6.836925348604676e+00,
+                             8.412934944449140e-01])
+
+        # This is the input to signal.normalize after passing through the
+        # equivalent steps in signal.iirfilter as was done for MATLAB
+        b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05,
+                              4.3520780422820447e-05, 8.7041560845640893e-05,
+                              1.0880195105705122e-04, 8.7041560845640975e-05,
+                              4.3520780422820447e-05, 1.2434508692234413e-05,
+                              1.5543135865293012e-06])
+        a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05,
+                              1.9182761917308895e+06, -3.7451128364682454e+06,
+                              4.5776121393762771e+06, -3.5869706138592605e+06,
+                              1.7596511818472347e+06, -4.9409793515707983e+05,
+                              6.0799461347219651e+04])
+
+        b_output, a_output = normalize(b_norm_in, a_norm_in)
+
+        # The test on b works for decimal=14 but the one for a does not. For
+        # the sake of consistency, both of these are decimal=13. If something
+        # breaks on another platform, it is probably fine to relax this lower.
+        assert_array_almost_equal(b_matlab, b_output, decimal=13)
+        assert_array_almost_equal(a_matlab, a_output, decimal=13)
+
+    def test_errors(self):
+        """Test the error cases."""
+        # all zero denominator
+        assert_raises(ValueError, normalize, [1, 2], 0)
+
+        # denominator not 1 dimensional
+        assert_raises(ValueError, normalize, [1, 2], [[1]])
+
+        # numerator too many dimensions
+        assert_raises(ValueError, normalize, [[[1, 2]]], 1)
+
+
+class TestLp2lp:
+
+    def test_basic(self):
+        b = [1]
+        a = [1, np.sqrt(2), 1]
+        b_lp, a_lp = lp2lp(b, a, 0.38574256627112119)
+        assert_array_almost_equal(b_lp, [0.1488], decimal=4)
+        assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4)
+
+
+class TestLp2hp:
+
+    def test_basic(self):
+        b = [0.25059432325190018]
+        a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018]
+        b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000)
+        assert_allclose(b_hp, [1, 0, 0, 0])
+        assert_allclose(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4)
+
+
+class TestLp2bp:
+
+    def test_basic(self):
+        b = [1]
+        a = [1, 2, 2, 1]
+        b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000)
+        assert_allclose(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6)
+        assert_allclose(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13,
+                               1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4)
+
+
+class TestLp2bs:
+
+    def test_basic(self):
+        b = [1]
+        a = [1, 1]
+        b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251)
+        assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5)
+        assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5)
+
+
+class TestBilinear:
+
+    def test_basic(self):
+        b = [0.14879732743343033]
+        a = [1, 0.54552236880522209, 0.14879732743343033]
+        b_z, a_z = bilinear(b, a, 0.5)
+        assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821],
+                                  decimal=5)
+        assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4)
+
+        b = [1, 0, 0.17407467530697837]
+        a = [1, 0.18460575326152251, 0.17407467530697837]
+        b_z, a_z = bilinear(b, a, 0.5)
+        assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413],
+                                  decimal=4)
+        assert_array_almost_equal(a_z, [1, -1.2158, 0.72826],
+                                  decimal=4)
+
+
+class TestLp2lp_zpk:
+
+    def test_basic(self):
+        z = []
+        p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)]
+        k = 1
+        z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5)
+        assert_array_equal(z_lp, [])
+        assert_allclose(sort(p_lp), sort(p)*5)
+        assert_allclose(k_lp, 25)
+
+        # Pseudo-Chebyshev with both poles and zeros
+        z = [-2j, +2j]
+        p = [-0.75, -0.5-0.5j, -0.5+0.5j]
+        k = 3
+        z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20)
+        assert_allclose(sort(z_lp), sort([-40j, +40j]))
+        assert_allclose(sort(p_lp), sort([-15, -10-10j, -10+10j]))
+        assert_allclose(k_lp, 60)
+
+
+class TestLp2hp_zpk:
+
+    def test_basic(self):
+        z = []
+        p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)]
+        k = 1
+
+        z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5)
+        assert_array_equal(z_hp, [0, 0])
+        assert_allclose(sort(p_hp), sort(p)*5)
+        assert_allclose(k_hp, 1)
+
+        z = [-2j, +2j]
+        p = [-0.75, -0.5-0.5j, -0.5+0.5j]
+        k = 3
+        z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6)
+        assert_allclose(sort(z_hp), sort([-3j, 0, +3j]))
+        assert_allclose(sort(p_hp), sort([-8, -6-6j, -6+6j]))
+        assert_allclose(k_hp, 32)
+
+
+class TestLp2bp_zpk:
+
+    def test_basic(self):
+        z = [-2j, +2j]
+        p = [-0.75, -0.5-0.5j, -0.5+0.5j]
+        k = 3
+        z_bp, p_bp, k_bp = lp2bp_zpk(z, p, k, 15, 8)
+        assert_allclose(sort(z_bp), sort([-25j, -9j, 0, +9j, +25j]))
+        assert_allclose(sort(p_bp), sort([-3 + 6j*sqrt(6),
+                                          -3 - 6j*sqrt(6),
+                                          +2j+sqrt(-8j-225)-2,
+                                          -2j+sqrt(+8j-225)-2,
+                                          +2j-sqrt(-8j-225)-2,
+                                          -2j-sqrt(+8j-225)-2, ]))
+        assert_allclose(k_bp, 24)
+
+
+class TestLp2bs_zpk:
+
+    def test_basic(self):
+        z = [-2j, +2j]
+        p = [-0.75, -0.5-0.5j, -0.5+0.5j]
+        k = 3
+
+        z_bs, p_bs, k_bs = lp2bs_zpk(z, p, k, 35, 12)
+
+        assert_allclose(sort(z_bs), sort([+35j, -35j,
+                                          +3j+sqrt(1234)*1j,
+                                          -3j+sqrt(1234)*1j,
+                                          +3j-sqrt(1234)*1j,
+                                          -3j-sqrt(1234)*1j]))
+        assert_allclose(sort(p_bs), sort([+3j*sqrt(129) - 8,
+                                          -3j*sqrt(129) - 8,
+                                          (-6 + 6j) - sqrt(-1225 - 72j),
+                                          (-6 - 6j) - sqrt(-1225 + 72j),
+                                          (-6 + 6j) + sqrt(-1225 - 72j),
+                                          (-6 - 6j) + sqrt(-1225 + 72j), ]))
+        assert_allclose(k_bs, 32)
+
+
+class TestBilinear_zpk:
+
+    def test_basic(self):
+        z = [-2j, +2j]
+        p = [-0.75, -0.5-0.5j, -0.5+0.5j]
+        k = 3
+
+        z_d, p_d, k_d = bilinear_zpk(z, p, k, 10)
+
+        assert_allclose(sort(z_d), sort([(20-2j)/(20+2j), (20+2j)/(20-2j),
+                                         -1]))
+        assert_allclose(sort(p_d), sort([77/83,
+                                         (1j/2 + 39/2) / (41/2 - 1j/2),
+                                         (39/2 - 1j/2) / (1j/2 + 41/2), ]))
+        assert_allclose(k_d, 9696/69803)
+
+
+class TestPrototypeType:
+
+    def test_output_type(self):
+        # Prototypes should consistently output arrays, not lists
+        # https://github.com/scipy/scipy/pull/441
+        for func in (buttap,
+                     besselap,
+                     lambda N: cheb1ap(N, 1),
+                     lambda N: cheb2ap(N, 20),
+                     lambda N: ellipap(N, 1, 20)):
+            for N in range(7):
+                z, p, k = func(N)
+                assert_(isinstance(z, np.ndarray))
+                assert_(isinstance(p, np.ndarray))
+
+
+def dB(x):
+    # Return magnitude in decibels, avoiding divide-by-zero warnings
+    # (and deal with some "not less-ordered" errors when -inf shows up)
+    return 20 * np.log10(np.maximum(np.abs(x), np.finfo(np.float64).tiny))
+
+
+class TestButtord:
+
+    def test_lowpass(self):
+        wp = 0.2
+        ws = 0.3
+        rp = 3
+        rs = 60
+        N, Wn = buttord(wp, ws, rp, rs, False)
+        b, a = butter(N, Wn, 'lowpass', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp, dB(h[w <= wp]))
+        assert_array_less(dB(h[ws <= w]), -rs)
+
+        assert_equal(N, 16)
+        assert_allclose(Wn, 2.0002776782743284e-01, rtol=1e-15)
+
+    def test_highpass(self):
+        wp = 0.3
+        ws = 0.2
+        rp = 3
+        rs = 70
+        N, Wn = buttord(wp, ws, rp, rs, False)
+        b, a = butter(N, Wn, 'highpass', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp, dB(h[wp <= w]))
+        assert_array_less(dB(h[w <= ws]), -rs)
+
+        assert_equal(N, 18)
+        assert_allclose(Wn, 2.9996603079132672e-01, rtol=1e-15)
+
+    def test_bandpass(self):
+        wp = [0.2, 0.5]
+        ws = [0.1, 0.6]
+        rp = 3
+        rs = 80
+        N, Wn = buttord(wp, ws, rp, rs, False)
+        b, a = butter(N, Wn, 'bandpass', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
+        assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
+                          -rs + 0.1)
+
+        assert_equal(N, 18)
+        assert_allclose(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01],
+                        rtol=1e-15)
+
+    def test_bandstop(self):
+        wp = [0.1, 0.6]
+        ws = [0.2, 0.5]
+        rp = 3
+        rs = 90
+        N, Wn = buttord(wp, ws, rp, rs, False)
+        b, a = butter(N, Wn, 'bandstop', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp,
+                          dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
+        assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
+                          -rs)
+
+        assert_equal(N, 20)
+        assert_allclose(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01],
+                        rtol=1e-6)
+
+    def test_analog(self):
+        wp = 200
+        ws = 600
+        rp = 3
+        rs = 60
+        N, Wn = buttord(wp, ws, rp, rs, True)
+        b, a = butter(N, Wn, 'lowpass', True)
+        w, h = freqs(b, a)
+        assert_array_less(-rp, dB(h[w <= wp]))
+        assert_array_less(dB(h[ws <= w]), -rs)
+
+        assert_equal(N, 7)
+        assert_allclose(Wn, 2.0006785355671877e+02, rtol=1e-15)
+
+        n, Wn = buttord(1, 550/450, 1, 26, analog=True)
+        assert_equal(n, 19)
+        assert_allclose(Wn, 1.0361980524629517, rtol=1e-15)
+
+        assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55)
+
+    def test_fs_param(self):
+        wp = [4410, 11025]
+        ws = [2205, 13230]
+        rp = 3
+        rs = 80
+        fs = 44100
+        N, Wn = buttord(wp, ws, rp, rs, False, fs=fs)
+        b, a = butter(N, Wn, 'bandpass', False, fs=fs)
+        w, h = freqz(b, a, fs=fs)
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
+        assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
+                          -rs + 0.1)
+
+        assert_equal(N, 18)
+        assert_allclose(Wn, [4409.722701715714, 11025.47178084662],
+                        rtol=1e-15)
+
+    def test_invalid_input(self):
+        with pytest.raises(ValueError) as exc_info:
+            buttord([20, 50], [14, 60], 3, 2)
+        assert "gpass should be smaller than gstop" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            buttord([20, 50], [14, 60], -1, 2)
+        assert "gpass should be larger than 0.0" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            buttord([20, 50], [14, 60], 1, -2)
+        assert "gstop should be larger than 0.0" in str(exc_info.value)
+
+    def test_runtime_warnings(self):
+        with pytest.warns(RuntimeWarning, match=r'Order is zero'):
+            buttord(0.0, 1.0, 3, 60)
+
+
+class TestCheb1ord:
+
+    def test_lowpass(self):
+        wp = 0.2
+        ws = 0.3
+        rp = 3
+        rs = 60
+        N, Wn = cheb1ord(wp, ws, rp, rs, False)
+        b, a = cheby1(N, rp, Wn, 'low', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1, dB(h[w <= wp]))
+        assert_array_less(dB(h[ws <= w]), -rs + 0.1)
+
+        assert_equal(N, 8)
+        assert_allclose(Wn, 0.2, rtol=1e-15)
+
+    def test_highpass(self):
+        wp = 0.3
+        ws = 0.2
+        rp = 3
+        rs = 70
+        N, Wn = cheb1ord(wp, ws, rp, rs, False)
+        b, a = cheby1(N, rp, Wn, 'high', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1, dB(h[wp <= w]))
+        assert_array_less(dB(h[w <= ws]), -rs + 0.1)
+
+        assert_equal(N, 9)
+        assert_allclose(Wn, 0.3, rtol=1e-15)
+
+    def test_bandpass(self):
+        wp = [0.2, 0.5]
+        ws = [0.1, 0.6]
+        rp = 3
+        rs = 80
+        N, Wn = cheb1ord(wp, ws, rp, rs, False)
+        b, a = cheby1(N, rp, Wn, 'band', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
+        assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
+                          -rs + 0.1)
+
+        assert_equal(N, 9)
+        assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
+
+    def test_bandstop(self):
+        wp = [0.1, 0.6]
+        ws = [0.2, 0.5]
+        rp = 3
+        rs = 90
+        N, Wn = cheb1ord(wp, ws, rp, rs, False)
+        b, a = cheby1(N, rp, Wn, 'stop', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
+        assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
+                          -rs + 0.1)
+
+        assert_equal(N, 10)
+        assert_allclose(Wn, [0.14758232569947785, 0.6], rtol=1e-5)
+
+    def test_analog(self):
+        wp = 700
+        ws = 100
+        rp = 3
+        rs = 70
+        N, Wn = cheb1ord(wp, ws, rp, rs, True)
+        b, a = cheby1(N, rp, Wn, 'high', True)
+        w, h = freqs(b, a)
+        assert_array_less(-rp - 0.1, dB(h[wp <= w]))
+        assert_array_less(dB(h[w <= ws]), -rs + 0.1)
+
+        assert_equal(N, 4)
+        assert_allclose(Wn, 700, rtol=1e-15)
+
+        assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17)
+
+    def test_fs_param(self):
+        wp = 4800
+        ws = 7200
+        rp = 3
+        rs = 60
+        fs = 48000
+        N, Wn = cheb1ord(wp, ws, rp, rs, False, fs=fs)
+        b, a = cheby1(N, rp, Wn, 'low', False, fs=fs)
+        w, h = freqz(b, a, fs=fs)
+        assert_array_less(-rp - 0.1, dB(h[w <= wp]))
+        assert_array_less(dB(h[ws <= w]), -rs + 0.1)
+
+        assert_equal(N, 8)
+        assert_allclose(Wn, 4800, rtol=1e-15)
+
+    def test_invalid_input(self):
+        with pytest.raises(ValueError) as exc_info:
+            cheb1ord(0.2, 0.3, 3, 2)
+        assert "gpass should be smaller than gstop" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            cheb1ord(0.2, 0.3, -1, 2)
+        assert "gpass should be larger than 0.0" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            cheb1ord(0.2, 0.3, 1, -2)
+        assert "gstop should be larger than 0.0" in str(exc_info.value)
+
+
+class TestCheb2ord:
+
+    def test_lowpass(self):
+        wp = 0.2
+        ws = 0.3
+        rp = 3
+        rs = 60
+        N, Wn = cheb2ord(wp, ws, rp, rs, False)
+        b, a = cheby2(N, rs, Wn, 'lp', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1, dB(h[w <= wp]))
+        assert_array_less(dB(h[ws <= w]), -rs + 0.1)
+
+        assert_equal(N, 8)
+        assert_allclose(Wn, 0.28647639976553163, rtol=1e-15)
+
+    def test_highpass(self):
+        wp = 0.3
+        ws = 0.2
+        rp = 3
+        rs = 70
+        N, Wn = cheb2ord(wp, ws, rp, rs, False)
+        b, a = cheby2(N, rs, Wn, 'hp', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1, dB(h[wp <= w]))
+        assert_array_less(dB(h[w <= ws]), -rs + 0.1)
+
+        assert_equal(N, 9)
+        assert_allclose(Wn, 0.20697492182903282, rtol=1e-15)
+
+    def test_bandpass(self):
+        wp = [0.2, 0.5]
+        ws = [0.1, 0.6]
+        rp = 3
+        rs = 80
+        N, Wn = cheb2ord(wp, ws, rp, rs, False)
+        b, a = cheby2(N, rs, Wn, 'bp', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
+        assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
+                          -rs + 0.1)
+
+        assert_equal(N, 9)
+        assert_allclose(Wn, [0.14876937565923479, 0.59748447842351482],
+                        rtol=1e-15)
+
+    def test_bandstop(self):
+        wp = [0.1, 0.6]
+        ws = [0.2, 0.5]
+        rp = 3
+        rs = 90
+        N, Wn = cheb2ord(wp, ws, rp, rs, False)
+        b, a = cheby2(N, rs, Wn, 'bs', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
+        assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
+                          -rs + 0.1)
+
+        assert_equal(N, 10)
+        assert_allclose(Wn, [0.19926249974781743, 0.50125246585567362],
+                        rtol=1e-6)
+
+    def test_analog(self):
+        wp = [20, 50]
+        ws = [10, 60]
+        rp = 3
+        rs = 80
+        N, Wn = cheb2ord(wp, ws, rp, rs, True)
+        b, a = cheby2(N, rs, Wn, 'bp', True)
+        w, h = freqs(b, a)
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
+        assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
+                          -rs + 0.1)
+
+        assert_equal(N, 11)
+        assert_allclose(Wn, [1.673740595370124e+01, 5.974641487254268e+01],
+                        rtol=1e-15)
+
+    def test_fs_param(self):
+        wp = 150
+        ws = 100
+        rp = 3
+        rs = 70
+        fs = 1000
+        N, Wn = cheb2ord(wp, ws, rp, rs, False, fs=fs)
+        b, a = cheby2(N, rs, Wn, 'hp', False, fs=fs)
+        w, h = freqz(b, a, fs=fs)
+        assert_array_less(-rp - 0.1, dB(h[wp <= w]))
+        assert_array_less(dB(h[w <= ws]), -rs + 0.1)
+
+        assert_equal(N, 9)
+        assert_allclose(Wn, 103.4874609145164, rtol=1e-15)
+
+    def test_invalid_input(self):
+        with pytest.raises(ValueError) as exc_info:
+            cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 2)
+        assert "gpass should be smaller than gstop" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            cheb2ord([0.1, 0.6], [0.2, 0.5], -1, 2)
+        assert "gpass should be larger than 0.0" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            cheb2ord([0.1, 0.6], [0.2, 0.5], 1, -2)
+        assert "gstop should be larger than 0.0" in str(exc_info.value)
+
+
+class TestEllipord:
+
+    def test_lowpass(self):
+        wp = 0.2
+        ws = 0.3
+        rp = 3
+        rs = 60
+        N, Wn = ellipord(wp, ws, rp, rs, False)
+        b, a = ellip(N, rp, rs, Wn, 'lp', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1, dB(h[w <= wp]))
+        assert_array_less(dB(h[ws <= w]), -rs + 0.1)
+
+        assert_equal(N, 5)
+        assert_allclose(Wn, 0.2, rtol=1e-15)
+
+    def test_lowpass_1000dB(self):
+        # failed when ellipkm1 wasn't used in ellipord and ellipap
+        wp = 0.2
+        ws = 0.3
+        rp = 3
+        rs = 1000
+        N, Wn = ellipord(wp, ws, rp, rs, False)
+        sos = ellip(N, rp, rs, Wn, 'lp', False, output='sos')
+        w, h = sosfreqz(sos)
+        w /= np.pi
+        assert_array_less(-rp - 0.1, dB(h[w <= wp]))
+        assert_array_less(dB(h[ws <= w]), -rs + 0.1)
+
+    def test_highpass(self):
+        wp = 0.3
+        ws = 0.2
+        rp = 3
+        rs = 70
+        N, Wn = ellipord(wp, ws, rp, rs, False)
+        b, a = ellip(N, rp, rs, Wn, 'hp', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1, dB(h[wp <= w]))
+        assert_array_less(dB(h[w <= ws]), -rs + 0.1)
+
+        assert_equal(N, 6)
+        assert_allclose(Wn, 0.3, rtol=1e-15)
+
+    def test_bandpass(self):
+        wp = [0.2, 0.5]
+        ws = [0.1, 0.6]
+        rp = 3
+        rs = 80
+        N, Wn = ellipord(wp, ws, rp, rs, False)
+        b, a = ellip(N, rp, rs, Wn, 'bp', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
+        assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
+                          -rs + 0.1)
+
+        assert_equal(N, 6)
+        assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
+
+    def test_bandstop(self):
+        wp = [0.1, 0.6]
+        ws = [0.2, 0.5]
+        rp = 3
+        rs = 90
+        N, Wn = ellipord(wp, ws, rp, rs, False)
+        b, a = ellip(N, rp, rs, Wn, 'bs', False)
+        w, h = freqz(b, a)
+        w /= np.pi
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
+        assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
+                          -rs + 0.1)
+
+        assert_equal(N, 7)
+        assert_allclose(Wn, [0.14758232794342988, 0.6], rtol=1e-5)
+
+    def test_analog(self):
+        wp = [1000, 6000]
+        ws = [2000, 5000]
+        rp = 3
+        rs = 90
+        N, Wn = ellipord(wp, ws, rp, rs, True)
+        b, a = ellip(N, rp, rs, Wn, 'bs', True)
+        w, h = freqs(b, a)
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
+        assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
+                          -rs + 0.1)
+
+        assert_equal(N, 8)
+        assert_allclose(Wn, [1666.6666, 6000])
+
+        assert_equal(ellipord(1, 1.2, 1, 80, analog=True)[0], 9)
+
+    def test_fs_param(self):
+        wp = [400, 2400]
+        ws = [800, 2000]
+        rp = 3
+        rs = 90
+        fs = 8000
+        N, Wn = ellipord(wp, ws, rp, rs, False, fs=fs)
+        b, a = ellip(N, rp, rs, Wn, 'bs', False, fs=fs)
+        w, h = freqz(b, a, fs=fs)
+        assert_array_less(-rp - 0.1,
+                          dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
+        assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
+                          -rs + 0.1)
+
+        assert_equal(N, 7)
+        assert_allclose(Wn, [590.3293117737195, 2400], rtol=1e-5)
+
+    def test_invalid_input(self):
+        with pytest.raises(ValueError) as exc_info:
+            ellipord(0.2, 0.5, 3, 2)
+        assert "gpass should be smaller than gstop" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            ellipord(0.2, 0.5, -1, 2)
+        assert "gpass should be larger than 0.0" in str(exc_info.value)
+
+        with pytest.raises(ValueError) as exc_info:
+            ellipord(0.2, 0.5, 1, -2)
+        assert "gstop should be larger than 0.0" in str(exc_info.value)
+
+
+class TestBessel:
+
+    def test_degenerate(self):
+        for norm in ('delay', 'phase', 'mag'):
+            # 0-order filter is just a passthrough
+            b, a = bessel(0, 1, analog=True, norm=norm)
+            assert_array_equal(b, [1])
+            assert_array_equal(a, [1])
+
+            # 1-order filter is same for all types
+            b, a = bessel(1, 1, analog=True, norm=norm)
+            assert_allclose(b, [1], rtol=1e-15)
+            assert_allclose(a, [1, 1], rtol=1e-15)
+
+            z, p, k = bessel(1, 0.3, analog=True, output='zpk', norm=norm)
+            assert_array_equal(z, [])
+            assert_allclose(p, [-0.3], rtol=1e-14)
+            assert_allclose(k, 0.3, rtol=1e-14)
+
+    def test_high_order(self):
+        # high even order, 'phase'
+        z, p, k = bessel(24, 100, analog=True, output='zpk')
+        z2 = []
+        p2 = [
+             -9.055312334014323e+01 + 4.844005815403969e+00j,
+             -8.983105162681878e+01 + 1.454056170018573e+01j,
+             -8.837357994162065e+01 + 2.426335240122282e+01j,
+             -8.615278316179575e+01 + 3.403202098404543e+01j,
+             -8.312326467067703e+01 + 4.386985940217900e+01j,
+             -7.921695461084202e+01 + 5.380628489700191e+01j,
+             -7.433392285433246e+01 + 6.388084216250878e+01j,
+             -6.832565803501586e+01 + 7.415032695116071e+01j,
+             -6.096221567378025e+01 + 8.470292433074425e+01j,
+             -5.185914574820616e+01 + 9.569048385258847e+01j,
+             -4.027853855197555e+01 + 1.074195196518679e+02j,
+             -2.433481337524861e+01 + 1.207298683731973e+02j,
+             ]
+        k2 = 9.999999999999989e+47
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
+        assert_allclose(k, k2, rtol=1e-14)
+
+        # high odd order, 'phase'
+        z, p, k = bessel(23, 1000, analog=True, output='zpk')
+        z2 = []
+        p2 = [
+             -2.497697202208956e+02 + 1.202813187870698e+03j,
+             -4.126986617510172e+02 + 1.065328794475509e+03j,
+             -5.304922463809596e+02 + 9.439760364018479e+02j,
+             -9.027564978975828e+02 + 1.010534334242318e+02j,
+             -8.909283244406079e+02 + 2.023024699647598e+02j,
+             -8.709469394347836e+02 + 3.039581994804637e+02j,
+             -8.423805948131370e+02 + 4.062657947488952e+02j,
+             -8.045561642249877e+02 + 5.095305912401127e+02j,
+             -7.564660146766259e+02 + 6.141594859516342e+02j,
+             -6.965966033906477e+02 + 7.207341374730186e+02j,
+             -6.225903228776276e+02 + 8.301558302815096e+02j,
+             -9.066732476324988e+02]
+        k2 = 9.999999999999983e+68
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
+        assert_allclose(k, k2, rtol=1e-14)
+
+        # high even order, 'delay' (Orchard 1965 "The Roots of the
+        # Maximally Flat-Delay Polynomials" Table 1)
+        z, p, k = bessel(31, 1, analog=True, output='zpk', norm='delay')
+        p2 = [-20.876706,
+              -20.826543 + 1.735732j,
+              -20.675502 + 3.473320j,
+              -20.421895 + 5.214702j,
+              -20.062802 + 6.961982j,
+              -19.593895 + 8.717546j,
+              -19.009148 + 10.484195j,
+              -18.300400 + 12.265351j,
+              -17.456663 + 14.065350j,
+              -16.463032 + 15.889910j,
+              -15.298849 + 17.746914j,
+              -13.934466 + 19.647827j,
+              -12.324914 + 21.610519j,
+              -10.395893 + 23.665701j,
+              - 8.005600 + 25.875019j,
+              - 4.792045 + 28.406037j,
+              ]
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
+
+        # high odd order, 'delay'
+        z, p, k = bessel(30, 1, analog=True, output='zpk', norm='delay')
+        p2 = [-20.201029 + 0.867750j,
+              -20.097257 + 2.604235j,
+              -19.888485 + 4.343721j,
+              -19.572188 + 6.088363j,
+              -19.144380 + 7.840570j,
+              -18.599342 + 9.603147j,
+              -17.929195 + 11.379494j,
+              -17.123228 + 13.173901j,
+              -16.166808 + 14.992008j,
+              -15.039580 + 16.841580j,
+              -13.712245 + 18.733902j,
+              -12.140295 + 20.686563j,
+              -10.250119 + 22.729808j,
+              - 7.901170 + 24.924391j,
+              - 4.734679 + 27.435615j,
+              ]
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(np.union1d(p2, np.conj(p2)), key=np.imag))
+
+    def test_refs(self):
+        # Compare to http://www.crbond.com/papers/bsf2.pdf
+        # "Delay Normalized Bessel Polynomial Coefficients"
+        bond_b = 10395
+        bond_a = [1, 21, 210, 1260, 4725, 10395, 10395]
+        b, a = bessel(6, 1, norm='delay', analog=True)
+        assert_allclose(bond_b, b)
+        assert_allclose(bond_a, a)
+
+        # "Delay Normalized Bessel Pole Locations"
+        bond_poles = {
+            1: [-1.0000000000],
+            2: [-1.5000000000 + 0.8660254038j],
+            3: [-1.8389073227 + 1.7543809598j, -2.3221853546],
+            4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j],
+            5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j,
+                -3.6467385953],
+            6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j,
+                -4.2483593959 + 0.8675096732j],
+            7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j,
+                -4.7582905282 + 1.7392860611j, -4.9717868585],
+            8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j,
+                -5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j],
+            9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j,
+                -5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j,
+                -6.2970191817],
+            10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j,
+                 -5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j,
+                 -6.9220449054 + 0.8676651955j]
+            }
+
+        for N in range(1, 11):
+            p1 = np.sort(bond_poles[N])
+            p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'delay')[1])))
+            assert_array_almost_equal(p1, p2, decimal=10)
+
+        # "Frequency Normalized Bessel Pole Locations"
+        bond_poles = {
+            1: [-1.0000000000],
+            2: [-1.1016013306 + 0.6360098248j],
+            3: [-1.0474091610 + 0.9992644363j, -1.3226757999],
+            4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j],
+            5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j,
+                -1.5023162714],
+            6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j,
+                -1.5714904036 + 0.3208963742j],
+            7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j,
+                -1.6120387662 + 0.5892445069j, -1.6843681793],
+            8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j,
+                -1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j],
+            9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j,
+                -1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j,
+                -1.8566005012],
+            10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j,
+                 -1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j,
+                 -1.9276196914 + 0.2416234710j]
+            }
+
+        for N in range(1, 11):
+            p1 = np.sort(bond_poles[N])
+            p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'mag')[1])))
+            assert_array_almost_equal(p1, p2, decimal=10)
+
+        # Compare to https://www.ranecommercial.com/legacy/note147.html
+        # "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order"
+        a = [1, 1, 1/3]
+        b2, a2 = bessel(2, 1, norm='delay', analog=True)
+        assert_allclose(a[::-1], a2/b2)
+
+        a = [1, 1, 2/5, 1/15]
+        b2, a2 = bessel(3, 1, norm='delay', analog=True)
+        assert_allclose(a[::-1], a2/b2)
+
+        a = [1, 1, 9/21, 2/21, 1/105]
+        b2, a2 = bessel(4, 1, norm='delay', analog=True)
+        assert_allclose(a[::-1], a2/b2)
+
+        a = [1, np.sqrt(3), 1]
+        b2, a2 = bessel(2, 1, norm='phase', analog=True)
+        assert_allclose(a[::-1], a2/b2)
+
+        # TODO: Why so inaccurate?  Is reference flawed?
+        a = [1, 2.481, 2.463, 1.018]
+        b2, a2 = bessel(3, 1, norm='phase', analog=True)
+        assert_array_almost_equal(a[::-1], a2/b2, decimal=1)
+
+        # TODO: Why so inaccurate?  Is reference flawed?
+        a = [1, 3.240, 4.5, 3.240, 1.050]
+        b2, a2 = bessel(4, 1, norm='phase', analog=True)
+        assert_array_almost_equal(a[::-1], a2/b2, decimal=1)
+
+        # Table of -3 dB factors:
+        N, scale = 2, 1.272
+        scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
+        assert_array_almost_equal(scale, scale2, decimal=3)
+
+        # TODO: Why so inaccurate?  Is reference flawed?
+        N, scale = 3, 1.413
+        scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
+        assert_array_almost_equal(scale, scale2, decimal=2)
+
+        # TODO: Why so inaccurate?  Is reference flawed?
+        N, scale = 4, 1.533
+        scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1]
+        assert_array_almost_equal(scale, scale2, decimal=1)
+
+    def test_hardcoded(self):
+        # Compare to values from original hardcoded implementation
+        originals = {
+            0: [],
+            1: [-1],
+            2: [-.8660254037844386467637229 + .4999999999999999999999996j],
+            3: [-.9416000265332067855971980,
+                -.7456403858480766441810907 + .7113666249728352680992154j],
+            4: [-.6572111716718829545787788 + .8301614350048733772399715j,
+                -.9047587967882449459642624 + .2709187330038746636700926j],
+            5: [-.9264420773877602247196260,
+                -.8515536193688395541722677 + .4427174639443327209850002j,
+                -.5905759446119191779319432 + .9072067564574549539291747j],
+            6: [-.9093906830472271808050953 + .1856964396793046769246397j,
+                -.7996541858328288520243325 + .5621717346937317988594118j,
+                -.5385526816693109683073792 + .9616876881954277199245657j],
+            7: [-.9194871556490290014311619,
+                -.8800029341523374639772340 + .3216652762307739398381830j,
+                -.7527355434093214462291616 + .6504696305522550699212995j,
+                -.4966917256672316755024763 + 1.002508508454420401230220j],
+            8: [-.9096831546652910216327629 + .1412437976671422927888150j,
+                -.8473250802359334320103023 + .4259017538272934994996429j,
+                -.7111381808485399250796172 + .7186517314108401705762571j,
+                -.4621740412532122027072175 + 1.034388681126901058116589j],
+            9: [-.9154957797499037686769223,
+                -.8911217017079759323183848 + .2526580934582164192308115j,
+                -.8148021112269012975514135 + .5085815689631499483745341j,
+                -.6743622686854761980403401 + .7730546212691183706919682j,
+                -.4331415561553618854685942 + 1.060073670135929666774323j],
+            10: [-.9091347320900502436826431 + .1139583137335511169927714j,
+                 -.8688459641284764527921864 + .3430008233766309973110589j,
+                 -.7837694413101441082655890 + .5759147538499947070009852j,
+                 -.6417513866988316136190854 + .8175836167191017226233947j,
+                 -.4083220732868861566219785 + 1.081274842819124562037210j],
+            11: [-.9129067244518981934637318,
+                 -.8963656705721166099815744 + .2080480375071031919692341j,
+                 -.8453044014712962954184557 + .4178696917801248292797448j,
+                 -.7546938934722303128102142 + .6319150050721846494520941j,
+                 -.6126871554915194054182909 + .8547813893314764631518509j,
+                 -.3868149510055090879155425 + 1.099117466763120928733632j],
+            12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j,
+                 -.8802534342016826507901575 + .2871779503524226723615457j,
+                 -.8217296939939077285792834 + .4810212115100676440620548j,
+                 -.7276681615395159454547013 + .6792961178764694160048987j,
+                 -.5866369321861477207528215 + .8863772751320727026622149j,
+                 -.3679640085526312839425808 + 1.114373575641546257595657j],
+            13: [-.9110914665984182781070663,
+                 -.8991314665475196220910718 + .1768342956161043620980863j,
+                 -.8625094198260548711573628 + .3547413731172988997754038j,
+                 -.7987460692470972510394686 + .5350752120696801938272504j,
+                 -.7026234675721275653944062 + .7199611890171304131266374j,
+                 -.5631559842430199266325818 + .9135900338325109684927731j,
+                 -.3512792323389821669401925 + 1.127591548317705678613239j],
+            14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j,
+                 -.8869506674916445312089167 + .2470079178765333183201435j,
+                 -.8441199160909851197897667 + .4131653825102692595237260j,
+                 -.7766591387063623897344648 + .5819170677377608590492434j,
+                 -.6794256425119233117869491 + .7552857305042033418417492j,
+                 -.5418766775112297376541293 + .9373043683516919569183099j,
+                 -.3363868224902037330610040 + 1.139172297839859991370924j],
+            15: [-.9097482363849064167228581,
+                 -.9006981694176978324932918 + .1537681197278439351298882j,
+                 -.8731264620834984978337843 + .3082352470564267657715883j,
+                 -.8256631452587146506294553 + .4642348752734325631275134j,
+                 -.7556027168970728127850416 + .6229396358758267198938604j,
+                 -.6579196593110998676999362 + .7862895503722515897065645j,
+                 -.5224954069658330616875186 + .9581787261092526478889345j,
+                 -.3229963059766444287113517 + 1.149416154583629539665297j],
+            16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j,
+                 -.8911723070323647674780132 + .2167089659900576449410059j,
+                 -.8584264231521330481755780 + .3621697271802065647661080j,
+                 -.8074790293236003885306146 + .5092933751171800179676218j,
+                 -.7356166304713115980927279 + .6591950877860393745845254j,
+                 -.6379502514039066715773828 + .8137453537108761895522580j,
+                 -.5047606444424766743309967 + .9767137477799090692947061j,
+                 -.3108782755645387813283867 + 1.158552841199330479412225j],
+            17: [-.9087141161336397432860029,
+                 -.9016273850787285964692844 + .1360267995173024591237303j,
+                 -.8801100704438627158492165 + .2725347156478803885651973j,
+                 -.8433414495836129204455491 + .4100759282910021624185986j,
+                 -.7897644147799708220288138 + .5493724405281088674296232j,
+                 -.7166893842372349049842743 + .6914936286393609433305754j,
+                 -.6193710717342144521602448 + .8382497252826992979368621j,
+                 -.4884629337672704194973683 + .9932971956316781632345466j,
+                 -.2998489459990082015466971 + 1.166761272925668786676672j],
+            18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j,
+                 -.8939764278132455733032155 + .1930374640894758606940586j,
+                 -.8681095503628830078317207 + .3224204925163257604931634j,
+                 -.8281885016242836608829018 + .4529385697815916950149364j,
+                 -.7726285030739558780127746 + .5852778162086640620016316j,
+                 -.6987821445005273020051878 + .7204696509726630531663123j,
+                 -.6020482668090644386627299 + .8602708961893664447167418j,
+                 -.4734268069916151511140032 + 1.008234300314801077034158j,
+                 -.2897592029880489845789953 + 1.174183010600059128532230j],
+            19: [-.9078934217899404528985092,
+                 -.9021937639390660668922536 + .1219568381872026517578164j,
+                 -.8849290585034385274001112 + .2442590757549818229026280j,
+                 -.8555768765618421591093993 + .3672925896399872304734923j,
+                 -.8131725551578197705476160 + .4915365035562459055630005j,
+                 -.7561260971541629355231897 + .6176483917970178919174173j,
+                 -.6818424412912442033411634 + .7466272357947761283262338j,
+                 -.5858613321217832644813602 + .8801817131014566284786759j,
+                 -.4595043449730988600785456 + 1.021768776912671221830298j,
+                 -.2804866851439370027628724 + 1.180931628453291873626003j],
+            20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j,
+                 -.8959150941925768608568248 + .1740317175918705058595844j,
+                 -.8749560316673332850673214 + .2905559296567908031706902j,
+                 -.8427907479956670633544106 + .4078917326291934082132821j,
+                 -.7984251191290606875799876 + .5264942388817132427317659j,
+                 -.7402780309646768991232610 + .6469975237605228320268752j,
+                 -.6658120544829934193890626 + .7703721701100763015154510j,
+                 -.5707026806915714094398061 + .8982829066468255593407161j,
+                 -.4465700698205149555701841 + 1.034097702560842962315411j,
+                 -.2719299580251652601727704 + 1.187099379810885886139638j],
+            21: [-.9072262653142957028884077,
+                 -.9025428073192696303995083 + .1105252572789856480992275j,
+                 -.8883808106664449854431605 + .2213069215084350419975358j,
+                 -.8643915813643204553970169 + .3326258512522187083009453j,
+                 -.8299435470674444100273463 + .4448177739407956609694059j,
+                 -.7840287980408341576100581 + .5583186348022854707564856j,
+                 -.7250839687106612822281339 + .6737426063024382240549898j,
+                 -.6506315378609463397807996 + .7920349342629491368548074j,
+                 -.5564766488918562465935297 + .9148198405846724121600860j,
+                 -.4345168906815271799687308 + 1.045382255856986531461592j,
+                 -.2640041595834031147954813 + 1.192762031948052470183960j],
+            22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j,
+                 -.8972983138153530955952835 + .1584351912289865608659759j,
+                 -.8799661455640176154025352 + .2644363039201535049656450j,
+                 -.8534754036851687233084587 + .3710389319482319823405321j,
+                 -.8171682088462720394344996 + .4785619492202780899653575j,
+                 -.7700332930556816872932937 + .5874255426351153211965601j,
+                 -.7105305456418785989070935 + .6982266265924524000098548j,
+                 -.6362427683267827226840153 + .8118875040246347267248508j,
+                 -.5430983056306302779658129 + .9299947824439872998916657j,
+                 -.4232528745642628461715044 + 1.055755605227545931204656j,
+                 -.2566376987939318038016012 + 1.197982433555213008346532j],
+            23: [-.9066732476324988168207439,
+                 -.9027564979912504609412993 + .1010534335314045013252480j,
+                 -.8909283242471251458653994 + .2023024699381223418195228j,
+                 -.8709469395587416239596874 + .3039581993950041588888925j,
+                 -.8423805948021127057054288 + .4062657948237602726779246j,
+                 -.8045561642053176205623187 + .5095305912227258268309528j,
+                 -.7564660146829880581478138 + .6141594859476032127216463j,
+                 -.6965966033912705387505040 + .7207341374753046970247055j,
+                 -.6225903228771341778273152 + .8301558302812980678845563j,
+                 -.5304922463810191698502226 + .9439760364018300083750242j,
+                 -.4126986617510148836149955 + 1.065328794475513585531053j,
+                 -.2497697202208956030229911 + 1.202813187870697831365338j],
+            24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j,
+                 -.8983105104397872954053307 + .1454056133873610120105857j,
+                 -.8837358034555706623131950 + .2426335234401383076544239j,
+                 -.8615278304016353651120610 + .3403202112618624773397257j,
+                 -.8312326466813240652679563 + .4386985933597305434577492j,
+                 -.7921695462343492518845446 + .5380628490968016700338001j,
+                 -.7433392285088529449175873 + .6388084216222567930378296j,
+                 -.6832565803536521302816011 + .7415032695091650806797753j,
+                 -.6096221567378335562589532 + .8470292433077202380020454j,
+                 -.5185914574820317343536707 + .9569048385259054576937721j,
+                 -.4027853855197518014786978 + 1.074195196518674765143729j,
+                 -.2433481337524869675825448 + 1.207298683731972524975429j],
+            25: [-.9062073871811708652496104,
+                 -.9028833390228020537142561 + 93077131185102967450643820e-27j,
+                 -.8928551459883548836774529 + .1863068969804300712287138j,
+                 -.8759497989677857803656239 + .2798521321771408719327250j,
+                 -.8518616886554019782346493 + .3738977875907595009446142j,
+                 -.8201226043936880253962552 + .4686668574656966589020580j,
+                 -.7800496278186497225905443 + .5644441210349710332887354j,
+                 -.7306549271849967721596735 + .6616149647357748681460822j,
+                 -.6704827128029559528610523 + .7607348858167839877987008j,
+                 -.5972898661335557242320528 + .8626676330388028512598538j,
+                 -.5073362861078468845461362 + .9689006305344868494672405j,
+                 -.3934529878191079606023847 + 1.082433927173831581956863j,
+                 -.2373280669322028974199184 + 1.211476658382565356579418j],
+            }
+        for N in originals:
+            p1 = sorted(np.union1d(originals[N],
+                                   np.conj(originals[N])), key=np.imag)
+            p2 = sorted(besselap(N)[1], key=np.imag)
+            assert_allclose(p1, p2, rtol=1e-14)
+
+    def test_norm_phase(self):
+        # Test some orders and frequencies and see that they have the right
+        # phase at w0
+        for N in (1, 2, 3, 4, 5, 51, 72):
+            for w0 in (1, 100):
+                b, a = bessel(N, w0, analog=True, norm='phase')
+                w = np.linspace(0, w0, 100)
+                w, h = freqs(b, a, w)
+                phase = np.unwrap(np.angle(h))
+                assert_allclose(phase[[0, -1]], (0, -N*pi/4), rtol=1e-1)
+
+    def test_norm_mag(self):
+        # Test some orders and frequencies and see that they have the right
+        # mag at w0
+        for N in (1, 2, 3, 4, 5, 51, 72):
+            for w0 in (1, 100):
+                b, a = bessel(N, w0, analog=True, norm='mag')
+                w = (0, w0)
+                w, h = freqs(b, a, w)
+                mag = abs(h)
+                assert_allclose(mag, (1, 1/np.sqrt(2)))
+
+    def test_norm_delay(self):
+        # Test some orders and frequencies and see that they have the right
+        # delay at DC
+        for N in (1, 2, 3, 4, 5, 51, 72):
+            for w0 in (1, 100):
+                b, a = bessel(N, w0, analog=True, norm='delay')
+                w = np.linspace(0, 10*w0, 1000)
+                w, h = freqs(b, a, w)
+                delay = -np.diff(np.unwrap(np.angle(h)))/np.diff(w)
+                assert_allclose(delay[0], 1/w0, rtol=1e-4)
+
+    def test_norm_factor(self):
+        mpmath_values = {
+            1: 1, 2: 1.361654128716130520, 3: 1.755672368681210649,
+            4: 2.113917674904215843, 5: 2.427410702152628137,
+            6: 2.703395061202921876, 7: 2.951722147038722771,
+            8: 3.179617237510651330, 9: 3.391693138911660101,
+            10: 3.590980594569163482, 11: 3.779607416439620092,
+            12: 3.959150821144285315, 13: 4.130825499383535980,
+            14: 4.295593409533637564, 15: 4.454233021624377494,
+            16: 4.607385465472647917, 17: 4.755586548961147727,
+            18: 4.899289677284488007, 19: 5.038882681488207605,
+            20: 5.174700441742707423, 21: 5.307034531360917274,
+            22: 5.436140703250035999, 23: 5.562244783787878196,
+            24: 5.685547371295963521, 25: 5.806227623775418541,
+            50: 8.268963160013226298, 51: 8.352374541546012058,
+            }
+        for N in mpmath_values:
+            z, p, k = besselap(N, 'delay')
+            assert_allclose(mpmath_values[N], _norm_factor(p, k), rtol=1e-13)
+
+    def test_bessel_poly(self):
+        assert_array_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1])
+        assert_array_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105])
+
+    def test_bessel_zeros(self):
+        assert_array_equal(_bessel_zeros(0), [])
+
+    def test_invalid(self):
+        assert_raises(ValueError, besselap, 5, 'nonsense')
+        assert_raises(ValueError, besselap, -5)
+        assert_raises(ValueError, besselap, 3.2)
+        assert_raises(ValueError, _bessel_poly, -3)
+        assert_raises(ValueError, _bessel_poly, 3.3)
+
+    def test_fs_param(self):
+        for norm in ('phase', 'mag', 'delay'):
+            for fs in (900, 900.1, 1234.567):
+                for N in (0, 1, 2, 3, 10):
+                    for fc in (100, 100.1, 432.12345):
+                        for btype in ('lp', 'hp'):
+                            ba1 = bessel(N, fc, btype, norm=norm, fs=fs)
+                            ba2 = bessel(N, fc/(fs/2), btype, norm=norm)
+                            assert_allclose(ba1, ba2)
+                    for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
+                        for btype in ('bp', 'bs'):
+                            ba1 = bessel(N, fc, btype, norm=norm, fs=fs)
+                            for seq in (list, tuple, array):
+                                fcnorm = seq([f/(fs/2) for f in fc])
+                                ba2 = bessel(N, fcnorm, btype, norm=norm)
+                                assert_allclose(ba1, ba2)
+
+
+class TestButter:
+
+    def test_degenerate(self):
+        # 0-order filter is just a passthrough
+        b, a = butter(0, 1, analog=True)
+        assert_array_equal(b, [1])
+        assert_array_equal(a, [1])
+
+        # 1-order filter is same for all types
+        b, a = butter(1, 1, analog=True)
+        assert_array_almost_equal(b, [1])
+        assert_array_almost_equal(a, [1, 1])
+
+        z, p, k = butter(1, 0.3, output='zpk')
+        assert_array_equal(z, [-1])
+        assert_allclose(p, [3.249196962329063e-01], rtol=1e-14)
+        assert_allclose(k, 3.375401518835469e-01, rtol=1e-14)
+
+    def test_basic(self):
+        # analog s-plane
+        for N in range(25):
+            wn = 0.01
+            z, p, k = butter(N, wn, 'low', analog=True, output='zpk')
+            assert_array_almost_equal([], z)
+            assert_(len(p) == N)
+            # All poles should be at distance wn from origin
+            assert_array_almost_equal(wn, abs(p))
+            assert_(all(np.real(p) <= 0))  # No poles in right half of S-plane
+            assert_array_almost_equal(wn**N, k)
+
+        # digital z-plane
+        for N in range(25):
+            wn = 0.01
+            z, p, k = butter(N, wn, 'high', analog=False, output='zpk')
+            assert_array_equal(np.ones(N), z)  # All zeros exactly at DC
+            assert_(all(np.abs(p) <= 1))  # No poles outside unit circle
+
+        b1, a1 = butter(2, 1, analog=True)
+        assert_array_almost_equal(b1, [1])
+        assert_array_almost_equal(a1, [1, np.sqrt(2), 1])
+
+        b2, a2 = butter(5, 1, analog=True)
+        assert_array_almost_equal(b2, [1])
+        assert_array_almost_equal(a2, [1, 3.2361, 5.2361,
+                                       5.2361, 3.2361, 1], decimal=4)
+
+        b3, a3 = butter(10, 1, analog=True)
+        assert_array_almost_equal(b3, [1])
+        assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824,
+                                       74.2334, 64.8824, 42.8021, 20.4317,
+                                       6.3925, 1], decimal=4)
+
+        b2, a2 = butter(19, 1.0441379169150726, analog=True)
+        assert_array_almost_equal(b2, [2.2720], decimal=4)
+        assert_array_almost_equal(a2, 1.0e+004 * np.array([
+                        0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570,
+                        0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044,
+                        1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153,
+                        0.0026, 0.0002]), decimal=0)
+
+        b, a = butter(5, 0.4)
+        assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194,
+                                      0.2194, 0.1097, 0.0219], decimal=4)
+        assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738,
+                                      -0.3864, 0.1112, -0.0113], decimal=4)
+
+    def test_highpass(self):
+        # highpass, high even order
+        z, p, k = butter(28, 0.43, 'high', output='zpk')
+        z2 = np.ones(28)
+        p2 = [
+            2.068257195514592e-01 + 9.238294351481734e-01j,
+            2.068257195514592e-01 - 9.238294351481734e-01j,
+            1.874933103892023e-01 + 8.269455076775277e-01j,
+            1.874933103892023e-01 - 8.269455076775277e-01j,
+            1.717435567330153e-01 + 7.383078571194629e-01j,
+            1.717435567330153e-01 - 7.383078571194629e-01j,
+            1.588266870755982e-01 + 6.564623730651094e-01j,
+            1.588266870755982e-01 - 6.564623730651094e-01j,
+            1.481881532502603e-01 + 5.802343458081779e-01j,
+            1.481881532502603e-01 - 5.802343458081779e-01j,
+            1.394122576319697e-01 + 5.086609000582009e-01j,
+            1.394122576319697e-01 - 5.086609000582009e-01j,
+            1.321840881809715e-01 + 4.409411734716436e-01j,
+            1.321840881809715e-01 - 4.409411734716436e-01j,
+            1.262633413354405e-01 + 3.763990035551881e-01j,
+            1.262633413354405e-01 - 3.763990035551881e-01j,
+            1.214660449478046e-01 + 3.144545234797277e-01j,
+            1.214660449478046e-01 - 3.144545234797277e-01j,
+            1.104868766650320e-01 + 2.771505404367791e-02j,
+            1.104868766650320e-01 - 2.771505404367791e-02j,
+            1.111768629525075e-01 + 8.331369153155753e-02j,
+            1.111768629525075e-01 - 8.331369153155753e-02j,
+            1.125740630842972e-01 + 1.394219509611784e-01j,
+            1.125740630842972e-01 - 1.394219509611784e-01j,
+            1.147138487992747e-01 + 1.963932363793666e-01j,
+            1.147138487992747e-01 - 1.963932363793666e-01j,
+            1.176516491045901e-01 + 2.546021573417188e-01j,
+            1.176516491045901e-01 - 2.546021573417188e-01j,
+            ]
+        k2 = 1.446671081817286e-06
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-7)
+        assert_allclose(k, k2, rtol=1e-10)
+
+        # highpass, high odd order
+        z, p, k = butter(27, 0.56, 'high', output='zpk')
+        z2 = np.ones(27)
+        p2 = [
+            -1.772572785680147e-01 + 9.276431102995948e-01j,
+            -1.772572785680147e-01 - 9.276431102995948e-01j,
+            -1.600766565322114e-01 + 8.264026279893268e-01j,
+            -1.600766565322114e-01 - 8.264026279893268e-01j,
+            -1.461948419016121e-01 + 7.341841939120078e-01j,
+            -1.461948419016121e-01 - 7.341841939120078e-01j,
+            -1.348975284762046e-01 + 6.493235066053785e-01j,
+            -1.348975284762046e-01 - 6.493235066053785e-01j,
+            -1.256628210712206e-01 + 5.704921366889227e-01j,
+            -1.256628210712206e-01 - 5.704921366889227e-01j,
+            -1.181038235962314e-01 + 4.966120551231630e-01j,
+            -1.181038235962314e-01 - 4.966120551231630e-01j,
+            -1.119304913239356e-01 + 4.267938916403775e-01j,
+            -1.119304913239356e-01 - 4.267938916403775e-01j,
+            -1.069237739782691e-01 + 3.602914879527338e-01j,
+            -1.069237739782691e-01 - 3.602914879527338e-01j,
+            -1.029178030691416e-01 + 2.964677964142126e-01j,
+            -1.029178030691416e-01 - 2.964677964142126e-01j,
+            -9.978747500816100e-02 + 2.347687643085738e-01j,
+            -9.978747500816100e-02 - 2.347687643085738e-01j,
+            -9.743974496324025e-02 + 1.747028739092479e-01j,
+            -9.743974496324025e-02 - 1.747028739092479e-01j,
+            -9.580754551625957e-02 + 1.158246860771989e-01j,
+            -9.580754551625957e-02 - 1.158246860771989e-01j,
+            -9.484562207782568e-02 + 5.772118357151691e-02j,
+            -9.484562207782568e-02 - 5.772118357151691e-02j,
+            -9.452783117928215e-02
+            ]
+        k2 = 9.585686688851069e-09
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-8)
+        assert_allclose(k, k2)
+
+    def test_bandpass(self):
+        z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk')
+        z2 = [1, 1, 1, 1, 1, 1, 1, 1,
+              -1, -1, -1, -1, -1, -1, -1, -1]
+        p2 = [
+            4.979909925436156e-01 + 8.367609424799387e-01j,
+            4.979909925436156e-01 - 8.367609424799387e-01j,
+            4.913338722555539e-01 + 7.866774509868817e-01j,
+            4.913338722555539e-01 - 7.866774509868817e-01j,
+            5.035229361778706e-01 + 7.401147376726750e-01j,
+            5.035229361778706e-01 - 7.401147376726750e-01j,
+            5.307617160406101e-01 + 7.029184459442954e-01j,
+            5.307617160406101e-01 - 7.029184459442954e-01j,
+            5.680556159453138e-01 + 6.788228792952775e-01j,
+            5.680556159453138e-01 - 6.788228792952775e-01j,
+            6.100962560818854e-01 + 6.693849403338664e-01j,
+            6.100962560818854e-01 - 6.693849403338664e-01j,
+            6.904694312740631e-01 + 6.930501690145245e-01j,
+            6.904694312740631e-01 - 6.930501690145245e-01j,
+            6.521767004237027e-01 + 6.744414640183752e-01j,
+            6.521767004237027e-01 - 6.744414640183752e-01j,
+            ]
+        k2 = 3.398854055800844e-08
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-13)
+        assert_allclose(k, k2, rtol=1e-13)
+
+        # bandpass analog
+        z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk')
+        z2 = np.zeros(4)
+        p2 = [
+            -4.179137760733086e+00 + 1.095935899082837e+02j,
+            -4.179137760733086e+00 - 1.095935899082837e+02j,
+            -9.593598668443835e+00 + 1.034745398029734e+02j,
+            -9.593598668443835e+00 - 1.034745398029734e+02j,
+            -8.883991981781929e+00 + 9.582087115567160e+01j,
+            -8.883991981781929e+00 - 9.582087115567160e+01j,
+            -3.474530886568715e+00 + 9.111599925805801e+01j,
+            -3.474530886568715e+00 - 9.111599925805801e+01j,
+            ]
+        k2 = 1.600000000000001e+05
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
+        assert_allclose(k, k2, rtol=1e-15)
+
+    def test_bandstop(self):
+        z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk')
+        z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j,
+              -1.594474531383421e-02 - 9.998728744679880e-01j,
+              -1.594474531383421e-02 + 9.998728744679880e-01j,
+              -1.594474531383421e-02 - 9.998728744679880e-01j,
+              -1.594474531383421e-02 + 9.998728744679880e-01j,
+              -1.594474531383421e-02 - 9.998728744679880e-01j,
+              -1.594474531383421e-02 + 9.998728744679880e-01j,
+              -1.594474531383421e-02 - 9.998728744679880e-01j,
+              -1.594474531383421e-02 + 9.998728744679880e-01j,
+              -1.594474531383421e-02 - 9.998728744679880e-01j,
+              -1.594474531383421e-02 + 9.998728744679880e-01j,
+              -1.594474531383421e-02 - 9.998728744679880e-01j,
+              -1.594474531383421e-02 + 9.998728744679880e-01j,
+              -1.594474531383421e-02 - 9.998728744679880e-01j]
+        p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j,
+              -1.766850742887729e-01 - 9.466951258673900e-01j,
+               1.467897662432886e-01 + 9.515917126462422e-01j,
+               1.467897662432886e-01 - 9.515917126462422e-01j,
+              -1.370083529426906e-01 + 8.880376681273993e-01j,
+              -1.370083529426906e-01 - 8.880376681273993e-01j,
+               1.086774544701390e-01 + 8.915240810704319e-01j,
+               1.086774544701390e-01 - 8.915240810704319e-01j,
+              -7.982704457700891e-02 + 8.506056315273435e-01j,
+              -7.982704457700891e-02 - 8.506056315273435e-01j,
+               5.238812787110331e-02 + 8.524011102699969e-01j,
+               5.238812787110331e-02 - 8.524011102699969e-01j,
+              -1.357545000491310e-02 + 8.382287744986582e-01j,
+              -1.357545000491310e-02 - 8.382287744986582e-01j]
+        k2 = 4.577122512960063e-01
+        assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag))
+        assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
+        assert_allclose(k, k2, rtol=1e-14)
+
+    def test_ba_output(self):
+        b, a = butter(4, [100, 300], 'bandpass', analog=True)
+        b2 = [1.6e+09, 0, 0, 0, 0]
+        a2 = [1.000000000000000e+00, 5.226251859505511e+02,
+              2.565685424949238e+05, 6.794127417357160e+07,
+              1.519411254969542e+10, 2.038238225207147e+12,
+              2.309116882454312e+14, 1.411088002066486e+16,
+              8.099999999999991e+17]
+        assert_allclose(b, b2, rtol=1e-14)
+        assert_allclose(a, a2, rtol=1e-14)
+
+    def test_fs_param(self):
+        for fs in (900, 900.1, 1234.567):
+            for N in (0, 1, 2, 3, 10):
+                for fc in (100, 100.1, 432.12345):
+                    for btype in ('lp', 'hp'):
+                        ba1 = butter(N, fc, btype, fs=fs)
+                        ba2 = butter(N, fc/(fs/2), btype)
+                        assert_allclose(ba1, ba2)
+                for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
+                    for btype in ('bp', 'bs'):
+                        ba1 = butter(N, fc, btype, fs=fs)
+                        for seq in (list, tuple, array):
+                            fcnorm = seq([f/(fs/2) for f in fc])
+                            ba2 = butter(N, fcnorm, btype)
+                            assert_allclose(ba1, ba2)
+
+
+class TestCheby1:
+
+    def test_degenerate(self):
+        # 0-order filter is just a passthrough
+        # Even-order filters have DC gain of -rp dB
+        b, a = cheby1(0, 10*np.log10(2), 1, analog=True)
+        assert_array_almost_equal(b, [1/np.sqrt(2)])
+        assert_array_equal(a, [1])
+
+        # 1-order filter is same for all types
+        b, a = cheby1(1, 10*np.log10(2), 1, analog=True)
+        assert_array_almost_equal(b, [1])
+        assert_array_almost_equal(a, [1, 1])
+
+        z, p, k = cheby1(1, 0.1, 0.3, output='zpk')
+        assert_array_equal(z, [-1])
+        assert_allclose(p, [-5.390126972799615e-01], rtol=1e-14)
+        assert_allclose(k, 7.695063486399808e-01, rtol=1e-14)
+
+    def test_basic(self):
+        for N in range(25):
+            wn = 0.01
+            z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk')
+            assert_array_almost_equal([], z)
+            assert_(len(p) == N)
+            assert_(all(np.real(p) <= 0))  # No poles in right half of S-plane
+
+        for N in range(25):
+            wn = 0.01
+            z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk')
+            assert_array_equal(np.ones(N), z)  # All zeros exactly at DC
+            assert_(all(np.abs(p) <= 1))  # No poles outside unit circle
+
+        # Same test as TestNormalize
+        b, a = cheby1(8, 0.5, 0.048)
+        assert_array_almost_equal(b, [
+                             2.150733144728282e-11, 1.720586515782626e-10,
+                             6.022052805239190e-10, 1.204410561047838e-09,
+                             1.505513201309798e-09, 1.204410561047838e-09,
+                             6.022052805239190e-10, 1.720586515782626e-10,
+                             2.150733144728282e-11], decimal=14)
+        assert_array_almost_equal(a, [
+                             1.000000000000000e+00, -7.782402035027959e+00,
+                             2.654354569747454e+01, -5.182182531666387e+01,
+                             6.334127355102684e+01, -4.963358186631157e+01,
+                             2.434862182949389e+01, -6.836925348604676e+00,
+                             8.412934944449140e-01], decimal=14)
+
+        b, a = cheby1(4, 1, [0.4, 0.7], btype='band')
+        assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0,
+                                      -0.0335, 0, 0.0084], decimal=4)
+        assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137,
+                                      1.8653, 1.8982, 0.5676, 0.4103],
+                                  decimal=4)
+
+        b2, a2 = cheby1(5, 3, 1, analog=True)
+        assert_array_almost_equal(b2, [0.0626], decimal=4)
+        assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080,
+                                       0.0626], decimal=4)
+
+        b, a = cheby1(8, 0.5, 0.1)
+        assert_array_almost_equal(b, 1.0e-006 * np.array([
+            0.00703924326028, 0.05631394608227, 0.19709881128793,
+            0.39419762257586, 0.49274702821983, 0.39419762257586,
+            0.19709881128793, 0.05631394608227, 0.00703924326028]),
+            decimal=13)
+        assert_array_almost_equal(a, [
+              1.00000000000000, -7.44912258934158, 24.46749067762108,
+              -46.27560200466141, 55.11160187999928, -42.31640010161038,
+              20.45543300484147, -5.69110270561444, 0.69770374759022],
+            decimal=13)
+
+        b, a = cheby1(8, 0.5, 0.25)
+        assert_array_almost_equal(b, 1.0e-003 * np.array([
+            0.00895261138923, 0.07162089111382, 0.25067311889837,
+            0.50134623779673, 0.62668279724591, 0.50134623779673,
+            0.25067311889837, 0.07162089111382, 0.00895261138923]),
+            decimal=13)
+        assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545,
+                                      16.58122329202101, -27.71423273542923,
+                                      30.39509758355313, -22.34729670426879,
+                                      10.74509800434910, -3.08924633697497,
+                                      0.40707685889802], decimal=13)
+
+    def test_highpass(self):
+        # high even order
+        z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk')
+        z2 = np.ones(24)
+        p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j,
+              -6.136558509657073e-01 - 2.700091504942893e-01j,
+              -3.303348340927516e-01 + 6.659400861114254e-01j,
+              -3.303348340927516e-01 - 6.659400861114254e-01j,
+              8.779713780557169e-03 + 8.223108447483040e-01j,
+              8.779713780557169e-03 - 8.223108447483040e-01j,
+              2.742361123006911e-01 + 8.356666951611864e-01j,
+              2.742361123006911e-01 - 8.356666951611864e-01j,
+              4.562984557158206e-01 + 7.954276912303594e-01j,
+              4.562984557158206e-01 - 7.954276912303594e-01j,
+              5.777335494123628e-01 + 7.435821817961783e-01j,
+              5.777335494123628e-01 - 7.435821817961783e-01j,
+              6.593260977749194e-01 + 6.955390907990932e-01j,
+              6.593260977749194e-01 - 6.955390907990932e-01j,
+              7.149590948466562e-01 + 6.559437858502012e-01j,
+              7.149590948466562e-01 - 6.559437858502012e-01j,
+              7.532432388188739e-01 + 6.256158042292060e-01j,
+              7.532432388188739e-01 - 6.256158042292060e-01j,
+              7.794365244268271e-01 + 6.042099234813333e-01j,
+              7.794365244268271e-01 - 6.042099234813333e-01j,
+              7.967253874772997e-01 + 5.911966597313203e-01j,
+              7.967253874772997e-01 - 5.911966597313203e-01j,
+              8.069756417293870e-01 + 5.862214589217275e-01j,
+              8.069756417293870e-01 - 5.862214589217275e-01j]
+        k2 = 6.190427617192018e-04
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-10)
+        assert_allclose(k, k2, rtol=1e-10)
+
+        # high odd order
+        z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk')
+        z2 = np.ones(23)
+        p2 = [-7.676400532011010e-01,
+              -6.754621070166477e-01 + 3.970502605619561e-01j,
+              -6.754621070166477e-01 - 3.970502605619561e-01j,
+              -4.528880018446727e-01 + 6.844061483786332e-01j,
+              -4.528880018446727e-01 - 6.844061483786332e-01j,
+              -1.986009130216447e-01 + 8.382285942941594e-01j,
+              -1.986009130216447e-01 - 8.382285942941594e-01j,
+              2.504673931532608e-02 + 8.958137635794080e-01j,
+              2.504673931532608e-02 - 8.958137635794080e-01j,
+              2.001089429976469e-01 + 9.010678290791480e-01j,
+              2.001089429976469e-01 - 9.010678290791480e-01j,
+              3.302410157191755e-01 + 8.835444665962544e-01j,
+              3.302410157191755e-01 - 8.835444665962544e-01j,
+              4.246662537333661e-01 + 8.594054226449009e-01j,
+              4.246662537333661e-01 - 8.594054226449009e-01j,
+              4.919620928120296e-01 + 8.366772762965786e-01j,
+              4.919620928120296e-01 - 8.366772762965786e-01j,
+              5.385746917494749e-01 + 8.191616180796720e-01j,
+              5.385746917494749e-01 - 8.191616180796720e-01j,
+              5.855636993537203e-01 + 8.060680937701062e-01j,
+              5.855636993537203e-01 - 8.060680937701062e-01j,
+              5.688812849391721e-01 + 8.086497795114683e-01j,
+              5.688812849391721e-01 - 8.086497795114683e-01j]
+        k2 = 1.941697029206324e-05
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-10)
+        assert_allclose(k, k2, rtol=1e-10)
+
+        z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk')
+        z2 = np.zeros(10)
+        p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j,
+              -3.144743169501551e+03 - 3.511680029092744e+03j,
+              -5.633065604514602e+02 + 2.023615191183945e+03j,
+              -5.633065604514602e+02 - 2.023615191183945e+03j,
+              -1.946412183352025e+02 + 1.372309454274755e+03j,
+              -1.946412183352025e+02 - 1.372309454274755e+03j,
+              -7.987162953085479e+01 + 1.105207708045358e+03j,
+              -7.987162953085479e+01 - 1.105207708045358e+03j,
+              -2.250315039031946e+01 + 1.001723931471477e+03j,
+              -2.250315039031946e+01 - 1.001723931471477e+03j]
+        k2 = 8.912509381337453e-01
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-13)
+        assert_allclose(k, k2, rtol=1e-15)
+
+    def test_bandpass(self):
+        z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk')
+        z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1]
+        p2 = [3.077784854851463e-01 + 9.453307017592942e-01j,
+              3.077784854851463e-01 - 9.453307017592942e-01j,
+              3.280567400654425e-01 + 9.272377218689016e-01j,
+              3.280567400654425e-01 - 9.272377218689016e-01j,
+              3.677912763284301e-01 + 9.038008865279966e-01j,
+              3.677912763284301e-01 - 9.038008865279966e-01j,
+              4.194425632520948e-01 + 8.769407159656157e-01j,
+              4.194425632520948e-01 - 8.769407159656157e-01j,
+              4.740921994669189e-01 + 8.496508528630974e-01j,
+              4.740921994669189e-01 - 8.496508528630974e-01j,
+              5.234866481897429e-01 + 8.259608422808477e-01j,
+              5.234866481897429e-01 - 8.259608422808477e-01j,
+              5.844717632289875e-01 + 8.052901363500210e-01j,
+              5.844717632289875e-01 - 8.052901363500210e-01j,
+              5.615189063336070e-01 + 8.100667803850766e-01j,
+              5.615189063336070e-01 - 8.100667803850766e-01j]
+        k2 = 5.007028718074307e-09
+        assert_array_equal(z, z2)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-13)
+        assert_allclose(k, k2, rtol=1e-13)
+
+    def test_bandstop(self):
+        z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk')
+        z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j,
+              -1.583844403245361e-01 - 9.873775210440450e-01j,
+              -1.583844403245361e-01 + 9.873775210440450e-01j,
+              -1.583844403245361e-01 - 9.873775210440450e-01j,
+              -1.583844403245361e-01 + 9.873775210440450e-01j,
+              -1.583844403245361e-01 - 9.873775210440450e-01j,
+              -1.583844403245361e-01 + 9.873775210440450e-01j,
+              -1.583844403245361e-01 - 9.873775210440450e-01j,
+              -1.583844403245361e-01 + 9.873775210440450e-01j,
+              -1.583844403245361e-01 - 9.873775210440450e-01j,
+              -1.583844403245361e-01 + 9.873775210440450e-01j,
+              -1.583844403245361e-01 - 9.873775210440450e-01j,
+              -1.583844403245361e-01 + 9.873775210440450e-01j,
+              -1.583844403245361e-01 - 9.873775210440450e-01j]
+        p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j,
+              -8.942974551472813e-02 - 3.482480481185926e-01j,
+               1.293775154041798e-01 + 8.753499858081858e-01j,
+               1.293775154041798e-01 - 8.753499858081858e-01j,
+               3.399741945062013e-02 + 9.690316022705607e-01j,
+               3.399741945062013e-02 - 9.690316022705607e-01j,
+               4.167225522796539e-04 + 9.927338161087488e-01j,
+               4.167225522796539e-04 - 9.927338161087488e-01j,
+              -3.912966549550960e-01 + 8.046122859255742e-01j,
+              -3.912966549550960e-01 - 8.046122859255742e-01j,
+              -3.307805547127368e-01 + 9.133455018206508e-01j,
+              -3.307805547127368e-01 - 9.133455018206508e-01j,
+              -3.072658345097743e-01 + 9.443589759799366e-01j,
+              -3.072658345097743e-01 - 9.443589759799366e-01j]
+        k2 = 3.619438310405028e-01
+        assert_allclose(sorted(z, key=np.imag),
+                        sorted(z2, key=np.imag), rtol=1e-13)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-13)
+        assert_allclose(k, k2, rtol=0, atol=5e-16)
+
+    def test_ba_output(self):
+        # with transfer function conversion,  without digital conversion
+        b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True)
+        b2 = [1.000000000000006e+00, 0,
+              3.255000000000020e+05, 0,
+              4.238010000000026e+10, 0,
+              2.758944510000017e+15, 0,
+              8.980364380050052e+19, 0,
+              1.169243442282517e+24
+              ]
+        a2 = [1.000000000000000e+00, 4.630555945694342e+02,
+              4.039266454794788e+05, 1.338060988610237e+08,
+              5.844333551294591e+10, 1.357346371637638e+13,
+              3.804661141892782e+15, 5.670715850340080e+17,
+              1.114411200988328e+20, 8.316815934908471e+21,
+              1.169243442282517e+24
+              ]
+        assert_allclose(b, b2, rtol=1e-14)
+        assert_allclose(a, a2, rtol=1e-14)
+
+    def test_fs_param(self):
+        for fs in (900, 900.1, 1234.567):
+            for N in (0, 1, 2, 3, 10):
+                for fc in (100, 100.1, 432.12345):
+                    for btype in ('lp', 'hp'):
+                        ba1 = cheby1(N, 1, fc, btype, fs=fs)
+                        ba2 = cheby1(N, 1, fc/(fs/2), btype)
+                        assert_allclose(ba1, ba2)
+                for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
+                    for btype in ('bp', 'bs'):
+                        ba1 = cheby1(N, 1, fc, btype, fs=fs)
+                        for seq in (list, tuple, array):
+                            fcnorm = seq([f/(fs/2) for f in fc])
+                            ba2 = cheby1(N, 1, fcnorm, btype)
+                            assert_allclose(ba1, ba2)
+
+
+class TestCheby2:
+
+    def test_degenerate(self):
+        # 0-order filter is just a passthrough
+        # Stopband ripple factor doesn't matter
+        b, a = cheby2(0, 123.456, 1, analog=True)
+        assert_array_equal(b, [1])
+        assert_array_equal(a, [1])
+
+        # 1-order filter is same for all types
+        b, a = cheby2(1, 10*np.log10(2), 1, analog=True)
+        assert_array_almost_equal(b, [1])
+        assert_array_almost_equal(a, [1, 1])
+
+        z, p, k = cheby2(1, 50, 0.3, output='zpk')
+        assert_array_equal(z, [-1])
+        assert_allclose(p, [9.967826460175649e-01], rtol=1e-14)
+        assert_allclose(k, 1.608676991217512e-03, rtol=1e-14)
+
+    def test_basic(self):
+        for N in range(25):
+            wn = 0.01
+            z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk')
+            assert_(len(p) == N)
+            assert_(all(np.real(p) <= 0))  # No poles in right half of S-plane
+
+        for N in range(25):
+            wn = 0.01
+            z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk')
+            assert_(all(np.abs(p) <= 1))  # No poles outside unit circle
+
+        B, A = cheby2(18, 100, 0.5)
+        assert_array_almost_equal(B, [
+            0.00167583914216, 0.01249479541868, 0.05282702120282,
+            0.15939804265706, 0.37690207631117, 0.73227013789108,
+            1.20191856962356, 1.69522872823393, 2.07598674519837,
+            2.21972389625291, 2.07598674519838, 1.69522872823395,
+            1.20191856962359, 0.73227013789110, 0.37690207631118,
+            0.15939804265707, 0.05282702120282, 0.01249479541868,
+            0.00167583914216], decimal=13)
+        assert_array_almost_equal(A, [
+            1.00000000000000, -0.27631970006174, 3.19751214254060,
+            -0.15685969461355, 4.13926117356269, 0.60689917820044,
+            2.95082770636540, 0.89016501910416, 1.32135245849798,
+            0.51502467236824, 0.38906643866660, 0.15367372690642,
+            0.07255803834919, 0.02422454070134, 0.00756108751837,
+            0.00179848550988, 0.00033713574499, 0.00004258794833,
+            0.00000281030149], decimal=13)
+
+    def test_highpass(self):
+        # high even order
+        z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk')
+        z2 = [9.981088955489852e-01 + 6.147058341984388e-02j,
+              9.981088955489852e-01 - 6.147058341984388e-02j,
+              9.832702870387426e-01 + 1.821525257215483e-01j,
+              9.832702870387426e-01 - 1.821525257215483e-01j,
+              9.550760158089112e-01 + 2.963609353922882e-01j,
+              9.550760158089112e-01 - 2.963609353922882e-01j,
+              9.162054748821922e-01 + 4.007087817803773e-01j,
+              9.162054748821922e-01 - 4.007087817803773e-01j,
+              8.700619897368064e-01 + 4.929423232136168e-01j,
+              8.700619897368064e-01 - 4.929423232136168e-01j,
+              5.889791753434985e-01 + 8.081482110427953e-01j,
+              5.889791753434985e-01 - 8.081482110427953e-01j,
+              5.984900456570295e-01 + 8.011302423760501e-01j,
+              5.984900456570295e-01 - 8.011302423760501e-01j,
+              6.172880888914629e-01 + 7.867371958365343e-01j,
+              6.172880888914629e-01 - 7.867371958365343e-01j,
+              6.448899971038180e-01 + 7.642754030030161e-01j,
+              6.448899971038180e-01 - 7.642754030030161e-01j,
+              6.804845629637927e-01 + 7.327624168637228e-01j,
+              6.804845629637927e-01 - 7.327624168637228e-01j,
+              8.202619107108660e-01 + 5.719881098737678e-01j,
+              8.202619107108660e-01 - 5.719881098737678e-01j,
+              7.228410452536148e-01 + 6.910143437705678e-01j,
+              7.228410452536148e-01 - 6.910143437705678e-01j,
+              7.702121399578629e-01 + 6.377877856007792e-01j,
+              7.702121399578629e-01 - 6.377877856007792e-01j]
+        p2 = [7.365546198286450e-01 + 4.842085129329526e-02j,
+              7.365546198286450e-01 - 4.842085129329526e-02j,
+              7.292038510962885e-01 + 1.442201672097581e-01j,
+              7.292038510962885e-01 - 1.442201672097581e-01j,
+              7.151293788040354e-01 + 2.369925800458584e-01j,
+              7.151293788040354e-01 - 2.369925800458584e-01j,
+              6.955051820787286e-01 + 3.250341363856910e-01j,
+              6.955051820787286e-01 - 3.250341363856910e-01j,
+              6.719122956045220e-01 + 4.070475750638047e-01j,
+              6.719122956045220e-01 - 4.070475750638047e-01j,
+              6.461722130611300e-01 + 4.821965916689270e-01j,
+              6.461722130611300e-01 - 4.821965916689270e-01j,
+              5.528045062872224e-01 + 8.162920513838372e-01j,
+              5.528045062872224e-01 - 8.162920513838372e-01j,
+              5.464847782492791e-01 + 7.869899955967304e-01j,
+              5.464847782492791e-01 - 7.869899955967304e-01j,
+              5.488033111260949e-01 + 7.520442354055579e-01j,
+              5.488033111260949e-01 - 7.520442354055579e-01j,
+              6.201874719022955e-01 + 5.500894392527353e-01j,
+              6.201874719022955e-01 - 5.500894392527353e-01j,
+              5.586478152536709e-01 + 7.112676877332921e-01j,
+              5.586478152536709e-01 - 7.112676877332921e-01j,
+              5.958145844148228e-01 + 6.107074340842115e-01j,
+              5.958145844148228e-01 - 6.107074340842115e-01j,
+              5.747812938519067e-01 + 6.643001536914696e-01j,
+              5.747812938519067e-01 - 6.643001536914696e-01j]
+        k2 = 9.932997786497189e-02
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-13)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-12)
+        assert_allclose(k, k2, rtol=1e-11)
+
+        # high odd order
+        z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk')
+        z2 = [9.690690376586687e-01 + 2.467897896011971e-01j,
+              9.690690376586687e-01 - 2.467897896011971e-01j,
+              9.999999999999492e-01,
+              8.835111277191199e-01 + 4.684101698261429e-01j,
+              8.835111277191199e-01 - 4.684101698261429e-01j,
+              7.613142857900539e-01 + 6.483830335935022e-01j,
+              7.613142857900539e-01 - 6.483830335935022e-01j,
+              6.232625173626231e-01 + 7.820126817709752e-01j,
+              6.232625173626231e-01 - 7.820126817709752e-01j,
+              4.864456563413621e-01 + 8.737108351316745e-01j,
+              4.864456563413621e-01 - 8.737108351316745e-01j,
+              3.618368136816749e-01 + 9.322414495530347e-01j,
+              3.618368136816749e-01 - 9.322414495530347e-01j,
+              2.549486883466794e-01 + 9.669545833752675e-01j,
+              2.549486883466794e-01 - 9.669545833752675e-01j,
+              1.676175432109457e-01 + 9.858520980390212e-01j,
+              1.676175432109457e-01 - 9.858520980390212e-01j,
+              1.975218468277521e-03 + 9.999980492540941e-01j,
+              1.975218468277521e-03 - 9.999980492540941e-01j,
+              1.786959496651858e-02 + 9.998403260399917e-01j,
+              1.786959496651858e-02 - 9.998403260399917e-01j,
+              9.967933660557139e-02 + 9.950196127985684e-01j,
+              9.967933660557139e-02 - 9.950196127985684e-01j,
+              5.013970951219547e-02 + 9.987422137518890e-01j,
+              5.013970951219547e-02 - 9.987422137518890e-01j]
+        p2 = [4.218866331906864e-01,
+              4.120110200127552e-01 + 1.361290593621978e-01j,
+              4.120110200127552e-01 - 1.361290593621978e-01j,
+              3.835890113632530e-01 + 2.664910809911026e-01j,
+              3.835890113632530e-01 - 2.664910809911026e-01j,
+              3.399195570456499e-01 + 3.863983538639875e-01j,
+              3.399195570456499e-01 - 3.863983538639875e-01j,
+              2.855977834508353e-01 + 4.929444399540688e-01j,
+              2.855977834508353e-01 - 4.929444399540688e-01j,
+              2.255765441339322e-01 + 5.851631870205766e-01j,
+              2.255765441339322e-01 - 5.851631870205766e-01j,
+              1.644087535815792e-01 + 6.637356937277153e-01j,
+              1.644087535815792e-01 - 6.637356937277153e-01j,
+              -7.293633845273095e-02 + 9.739218252516307e-01j,
+              -7.293633845273095e-02 - 9.739218252516307e-01j,
+              1.058259206358626e-01 + 7.304739464862978e-01j,
+              1.058259206358626e-01 - 7.304739464862978e-01j,
+              -5.703971947785402e-02 + 9.291057542169088e-01j,
+              -5.703971947785402e-02 - 9.291057542169088e-01j,
+              5.263875132656864e-02 + 7.877974334424453e-01j,
+              5.263875132656864e-02 - 7.877974334424453e-01j,
+              -3.007943405982616e-02 + 8.846331716180016e-01j,
+              -3.007943405982616e-02 - 8.846331716180016e-01j,
+              6.857277464483946e-03 + 8.383275456264492e-01j,
+              6.857277464483946e-03 - 8.383275456264492e-01j]
+        k2 = 6.507068761705037e-03
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-13)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-12)
+        assert_allclose(k, k2, rtol=1e-11)
+
+    def test_bandpass(self):
+        z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk')
+        z2 = [-9.999999999999999e-01,
+               3.676588029658514e-01 + 9.299607543341383e-01j,
+               3.676588029658514e-01 - 9.299607543341383e-01j,
+               7.009689684982283e-01 + 7.131917730894889e-01j,
+               7.009689684982283e-01 - 7.131917730894889e-01j,
+               7.815697973765858e-01 + 6.238178033919218e-01j,
+               7.815697973765858e-01 - 6.238178033919218e-01j,
+               8.063793628819866e-01 + 5.913986160941200e-01j,
+               8.063793628819866e-01 - 5.913986160941200e-01j,
+               1.000000000000001e+00,
+               9.944493019920448e-01 + 1.052168511576739e-01j,
+               9.944493019920448e-01 - 1.052168511576739e-01j,
+               9.854674703367308e-01 + 1.698642543566085e-01j,
+               9.854674703367308e-01 - 1.698642543566085e-01j,
+               9.762751735919308e-01 + 2.165335665157851e-01j,
+               9.762751735919308e-01 - 2.165335665157851e-01j,
+               9.792277171575134e-01 + 2.027636011479496e-01j,
+               9.792277171575134e-01 - 2.027636011479496e-01j]
+        p2 = [8.143803410489621e-01 + 5.411056063397541e-01j,
+              8.143803410489621e-01 - 5.411056063397541e-01j,
+              7.650769827887418e-01 + 5.195412242095543e-01j,
+              7.650769827887418e-01 - 5.195412242095543e-01j,
+              6.096241204063443e-01 + 3.568440484659796e-01j,
+              6.096241204063443e-01 - 3.568440484659796e-01j,
+              6.918192770246239e-01 + 4.770463577106911e-01j,
+              6.918192770246239e-01 - 4.770463577106911e-01j,
+              6.986241085779207e-01 + 1.146512226180060e-01j,
+              6.986241085779207e-01 - 1.146512226180060e-01j,
+              8.654645923909734e-01 + 1.604208797063147e-01j,
+              8.654645923909734e-01 - 1.604208797063147e-01j,
+              9.164831670444591e-01 + 1.969181049384918e-01j,
+              9.164831670444591e-01 - 1.969181049384918e-01j,
+              9.630425777594550e-01 + 2.317513360702271e-01j,
+              9.630425777594550e-01 - 2.317513360702271e-01j,
+              9.438104703725529e-01 + 2.193509900269860e-01j,
+              9.438104703725529e-01 - 2.193509900269860e-01j]
+        k2 = 9.345352824659604e-03
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-13)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-13)
+        assert_allclose(k, k2, rtol=1e-11)
+
+    def test_bandstop(self):
+        z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk')
+        z2 = [6.230544895101009e-01 + 7.821784343111114e-01j,
+              6.230544895101009e-01 - 7.821784343111114e-01j,
+              9.086608545660115e-01 + 4.175349702471991e-01j,
+              9.086608545660115e-01 - 4.175349702471991e-01j,
+              9.478129721465802e-01 + 3.188268649763867e-01j,
+              9.478129721465802e-01 - 3.188268649763867e-01j,
+              -6.230544895100982e-01 + 7.821784343111109e-01j,
+              -6.230544895100982e-01 - 7.821784343111109e-01j,
+              -9.086608545660116e-01 + 4.175349702472088e-01j,
+              -9.086608545660116e-01 - 4.175349702472088e-01j,
+              -9.478129721465784e-01 + 3.188268649763897e-01j,
+              -9.478129721465784e-01 - 3.188268649763897e-01j]
+        p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j,
+              -9.464094036167638e-01 - 1.720048695084344e-01j,
+              -8.715844103386737e-01 + 1.370665039509297e-01j,
+              -8.715844103386737e-01 - 1.370665039509297e-01j,
+              -8.078751204586425e-01 + 5.729329866682983e-02j,
+              -8.078751204586425e-01 - 5.729329866682983e-02j,
+               9.464094036167665e-01 + 1.720048695084332e-01j,
+               9.464094036167665e-01 - 1.720048695084332e-01j,
+               8.078751204586447e-01 + 5.729329866683007e-02j,
+               8.078751204586447e-01 - 5.729329866683007e-02j,
+               8.715844103386721e-01 + 1.370665039509331e-01j,
+               8.715844103386721e-01 - 1.370665039509331e-01j]
+        k2 = 2.917823332763358e-03
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-13)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-13)
+        assert_allclose(k, k2, rtol=1e-11)
+
+    def test_ba_output(self):
+        # with transfer function conversion, without digital conversion
+        b, a = cheby2(5, 20, [2010, 2100], 'stop', True)
+        b2 = [1.000000000000000e+00, 0,  # Matlab: 6.683253076978249e-12,
+              2.111512500000000e+07, 0,  # Matlab: 1.134325604589552e-04,
+              1.782966433781250e+14, 0,  # Matlab: 7.216787944356781e+02,
+              7.525901316990656e+20, 0,  # Matlab: 2.039829265789886e+09,
+              1.587960565565748e+27, 0,  # Matlab: 2.161236218626134e+15,
+              1.339913493808585e+33]
+        a2 = [1.000000000000000e+00, 1.849550755473371e+02,
+              2.113222918998538e+07, 3.125114149732283e+09,
+              1.785133457155609e+14, 1.979158697776348e+16,
+              7.535048322653831e+20, 5.567966191263037e+22,
+              1.589246884221346e+27, 5.871210648525566e+28,
+              1.339913493808590e+33]
+        assert_allclose(b, b2, rtol=1e-14)
+        assert_allclose(a, a2, rtol=1e-14)
+
+    def test_fs_param(self):
+        for fs in (900, 900.1, 1234.567):
+            for N in (0, 1, 2, 3, 10):
+                for fc in (100, 100.1, 432.12345):
+                    for btype in ('lp', 'hp'):
+                        ba1 = cheby2(N, 20, fc, btype, fs=fs)
+                        ba2 = cheby2(N, 20, fc/(fs/2), btype)
+                        assert_allclose(ba1, ba2)
+                for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
+                    for btype in ('bp', 'bs'):
+                        ba1 = cheby2(N, 20, fc, btype, fs=fs)
+                        for seq in (list, tuple, array):
+                            fcnorm = seq([f/(fs/2) for f in fc])
+                            ba2 = cheby2(N, 20, fcnorm, btype)
+                            assert_allclose(ba1, ba2)
+
+class TestEllip:
+
+    def test_degenerate(self):
+        # 0-order filter is just a passthrough
+        # Even-order filters have DC gain of -rp dB
+        # Stopband ripple factor doesn't matter
+        b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True)
+        assert_array_almost_equal(b, [1/np.sqrt(2)])
+        assert_array_equal(a, [1])
+
+        # 1-order filter is same for all types
+        b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True)
+        assert_array_almost_equal(b, [1])
+        assert_array_almost_equal(a, [1, 1])
+
+        z, p, k = ellip(1, 1, 55, 0.3, output='zpk')
+        assert_allclose(z, [-9.999999999999998e-01], rtol=1e-14)
+        assert_allclose(p, [-6.660721153525525e-04], rtol=1e-10)
+        assert_allclose(k, 5.003330360576763e-01, rtol=1e-14)
+
+    def test_basic(self):
+        for N in range(25):
+            wn = 0.01
+            z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk')
+            assert_(len(p) == N)
+            assert_(all(np.real(p) <= 0))  # No poles in right half of S-plane
+
+        for N in range(25):
+            wn = 0.01
+            z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk')
+            assert_(all(np.abs(p) <= 1))  # No poles outside unit circle
+
+        b3, a3 = ellip(5, 3, 26, 1, analog=True)
+        assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0,
+                                       0.2409], decimal=4)
+        assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012,
+                                       0.2409], decimal=4)
+
+        b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop')
+        assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042,
+                                      0.3469, 0.3310], decimal=4)
+        assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323,
+                                      0.1131, -0.0060], decimal=4)
+
+    def test_highpass(self):
+        # high even order
+        z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk')
+        z2 = [9.761875332501075e-01 + 2.169283290099910e-01j,
+              9.761875332501075e-01 - 2.169283290099910e-01j,
+              8.413503353963494e-01 + 5.404901600661900e-01j,
+              8.413503353963494e-01 - 5.404901600661900e-01j,
+              7.160082576305009e-01 + 6.980918098681732e-01j,
+              7.160082576305009e-01 - 6.980918098681732e-01j,
+              6.456533638965329e-01 + 7.636306264739803e-01j,
+              6.456533638965329e-01 - 7.636306264739803e-01j,
+              6.127321820971366e-01 + 7.902906256703928e-01j,
+              6.127321820971366e-01 - 7.902906256703928e-01j,
+              5.983607817490196e-01 + 8.012267936512676e-01j,
+              5.983607817490196e-01 - 8.012267936512676e-01j,
+              5.922577552594799e-01 + 8.057485658286990e-01j,
+              5.922577552594799e-01 - 8.057485658286990e-01j,
+              5.896952092563588e-01 + 8.076258788449631e-01j,
+              5.896952092563588e-01 - 8.076258788449631e-01j,
+              5.886248765538837e-01 + 8.084063054565607e-01j,
+              5.886248765538837e-01 - 8.084063054565607e-01j,
+              5.881802711123132e-01 + 8.087298490066037e-01j,
+              5.881802711123132e-01 - 8.087298490066037e-01j,
+              5.879995719101164e-01 + 8.088612386766461e-01j,
+              5.879995719101164e-01 - 8.088612386766461e-01j,
+              5.879354086709576e-01 + 8.089078780868164e-01j,
+              5.879354086709576e-01 - 8.089078780868164e-01j]
+        p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j,
+              -3.184805259081650e-01 - 4.206951906775851e-01j,
+               1.417279173459985e-01 + 7.903955262836452e-01j,
+               1.417279173459985e-01 - 7.903955262836452e-01j,
+               4.042881216964651e-01 + 8.309042239116594e-01j,
+               4.042881216964651e-01 - 8.309042239116594e-01j,
+               5.128964442789670e-01 + 8.229563236799665e-01j,
+               5.128964442789670e-01 - 8.229563236799665e-01j,
+               5.569614712822724e-01 + 8.155957702908510e-01j,
+               5.569614712822724e-01 - 8.155957702908510e-01j,
+               5.750478870161392e-01 + 8.118633973883931e-01j,
+               5.750478870161392e-01 - 8.118633973883931e-01j,
+               5.825314018170804e-01 + 8.101960910679270e-01j,
+               5.825314018170804e-01 - 8.101960910679270e-01j,
+               5.856397379751872e-01 + 8.094825218722543e-01j,
+               5.856397379751872e-01 - 8.094825218722543e-01j,
+               5.869326035251949e-01 + 8.091827531557583e-01j,
+               5.869326035251949e-01 - 8.091827531557583e-01j,
+               5.874697218855733e-01 + 8.090593298213502e-01j,
+               5.874697218855733e-01 - 8.090593298213502e-01j,
+               5.876904783532237e-01 + 8.090127161018823e-01j,
+               5.876904783532237e-01 - 8.090127161018823e-01j,
+               5.877753105317594e-01 + 8.090050577978136e-01j,
+               5.877753105317594e-01 - 8.090050577978136e-01j]
+        k2 = 4.918081266957108e-02
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-4)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-4)
+        assert_allclose(k, k2, rtol=1e-3)
+
+        # high odd order
+        z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk')
+        z2 = [9.999999999998661e-01,
+              6.603717261750994e-01 + 7.509388678638675e-01j,
+              6.603717261750994e-01 - 7.509388678638675e-01j,
+              2.788635267510325e-01 + 9.603307416968041e-01j,
+              2.788635267510325e-01 - 9.603307416968041e-01j,
+              1.070215532544218e-01 + 9.942567008268131e-01j,
+              1.070215532544218e-01 - 9.942567008268131e-01j,
+              4.049427369978163e-02 + 9.991797705105507e-01j,
+              4.049427369978163e-02 - 9.991797705105507e-01j,
+              1.531059368627931e-02 + 9.998827859909265e-01j,
+              1.531059368627931e-02 - 9.998827859909265e-01j,
+              5.808061438534933e-03 + 9.999831330689181e-01j,
+              5.808061438534933e-03 - 9.999831330689181e-01j,
+              2.224277847754599e-03 + 9.999975262909676e-01j,
+              2.224277847754599e-03 - 9.999975262909676e-01j,
+              8.731857107534554e-04 + 9.999996187732845e-01j,
+              8.731857107534554e-04 - 9.999996187732845e-01j,
+              3.649057346914968e-04 + 9.999999334218996e-01j,
+              3.649057346914968e-04 - 9.999999334218996e-01j,
+              1.765538109802615e-04 + 9.999999844143768e-01j,
+              1.765538109802615e-04 - 9.999999844143768e-01j,
+              1.143655290967426e-04 + 9.999999934602630e-01j,
+              1.143655290967426e-04 - 9.999999934602630e-01j]
+        p2 = [-6.322017026545028e-01,
+              -4.648423756662754e-01 + 5.852407464440732e-01j,
+              -4.648423756662754e-01 - 5.852407464440732e-01j,
+              -2.249233374627773e-01 + 8.577853017985717e-01j,
+              -2.249233374627773e-01 - 8.577853017985717e-01j,
+              -9.234137570557621e-02 + 9.506548198678851e-01j,
+              -9.234137570557621e-02 - 9.506548198678851e-01j,
+              -3.585663561241373e-02 + 9.821494736043981e-01j,
+              -3.585663561241373e-02 - 9.821494736043981e-01j,
+              -1.363917242312723e-02 + 9.933844128330656e-01j,
+              -1.363917242312723e-02 - 9.933844128330656e-01j,
+              -5.131505238923029e-03 + 9.975221173308673e-01j,
+              -5.131505238923029e-03 - 9.975221173308673e-01j,
+              -1.904937999259502e-03 + 9.990680819857982e-01j,
+              -1.904937999259502e-03 - 9.990680819857982e-01j,
+              -6.859439885466834e-04 + 9.996492201426826e-01j,
+              -6.859439885466834e-04 - 9.996492201426826e-01j,
+              -2.269936267937089e-04 + 9.998686250679161e-01j,
+              -2.269936267937089e-04 - 9.998686250679161e-01j,
+              -5.687071588789117e-05 + 9.999527573294513e-01j,
+              -5.687071588789117e-05 - 9.999527573294513e-01j,
+              -6.948417068525226e-07 + 9.999882737700173e-01j,
+              -6.948417068525226e-07 - 9.999882737700173e-01j]
+        k2 = 1.220910020289434e-02
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-4)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-4)
+        assert_allclose(k, k2, rtol=1e-3)
+
+    def test_bandpass(self):
+        z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk')
+        z2 = [-9.999999999999991e-01,
+               6.856610961780020e-01 + 7.279209168501619e-01j,
+               6.856610961780020e-01 - 7.279209168501619e-01j,
+               7.850346167691289e-01 + 6.194518952058737e-01j,
+               7.850346167691289e-01 - 6.194518952058737e-01j,
+               7.999038743173071e-01 + 6.001281461922627e-01j,
+               7.999038743173071e-01 - 6.001281461922627e-01j,
+               9.999999999999999e-01,
+               9.862938983554124e-01 + 1.649980183725925e-01j,
+               9.862938983554124e-01 - 1.649980183725925e-01j,
+               9.788558330548762e-01 + 2.045513580850601e-01j,
+               9.788558330548762e-01 - 2.045513580850601e-01j,
+               9.771155231720003e-01 + 2.127093189691258e-01j,
+               9.771155231720003e-01 - 2.127093189691258e-01j]
+        p2 = [8.063992755498643e-01 + 5.858071374778874e-01j,
+              8.063992755498643e-01 - 5.858071374778874e-01j,
+              8.050395347071724e-01 + 5.639097428109795e-01j,
+              8.050395347071724e-01 - 5.639097428109795e-01j,
+              8.113124936559144e-01 + 4.855241143973142e-01j,
+              8.113124936559144e-01 - 4.855241143973142e-01j,
+              8.665595314082394e-01 + 3.334049560919331e-01j,
+              8.665595314082394e-01 - 3.334049560919331e-01j,
+              9.412369011968871e-01 + 2.457616651325908e-01j,
+              9.412369011968871e-01 - 2.457616651325908e-01j,
+              9.679465190411238e-01 + 2.228772501848216e-01j,
+              9.679465190411238e-01 - 2.228772501848216e-01j,
+              9.747235066273385e-01 + 2.178937926146544e-01j,
+              9.747235066273385e-01 - 2.178937926146544e-01j]
+        k2 = 8.354782670263239e-03
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-4)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-4)
+        assert_allclose(k, k2, rtol=1e-3)
+
+        z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk')
+        z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j,
+              -5.583607317695175e-14 - 1.433755965989225e+02j,
+               5.740106416459296e-14 + 1.261678754570291e+02j,
+               5.740106416459296e-14 - 1.261678754570291e+02j,
+              -2.199676239638652e-14 + 6.974861996895196e+01j,
+              -2.199676239638652e-14 - 6.974861996895196e+01j,
+              -3.372595657044283e-14 + 7.926145989044531e+01j,
+              -3.372595657044283e-14 - 7.926145989044531e+01j,
+              0]
+        p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j,
+              -8.814960004852743e-01 - 1.104124501436066e+02j,
+              -2.477372459140184e+00 + 1.065638954516534e+02j,
+              -2.477372459140184e+00 - 1.065638954516534e+02j,
+              -3.072156842945799e+00 + 9.995404870405324e+01j,
+              -3.072156842945799e+00 - 9.995404870405324e+01j,
+              -2.180456023925693e+00 + 9.379206865455268e+01j,
+              -2.180456023925693e+00 - 9.379206865455268e+01j,
+              -7.230484977485752e-01 + 9.056598800801140e+01j,
+              -7.230484977485752e-01 - 9.056598800801140e+01j]
+        k2 = 3.774571622827070e-02
+        assert_allclose(sorted(z, key=np.imag),
+                        sorted(z2, key=np.imag), rtol=1e-4)
+        assert_allclose(sorted(p, key=np.imag),
+                        sorted(p2, key=np.imag), rtol=1e-6)
+        assert_allclose(k, k2, rtol=1e-3)
+
+    def test_bandstop(self):
+        z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk')
+        z2 = [3.528578094286510e-01 + 9.356769561794296e-01j,
+              3.528578094286510e-01 - 9.356769561794296e-01j,
+              3.769716042264783e-01 + 9.262248159096587e-01j,
+              3.769716042264783e-01 - 9.262248159096587e-01j,
+              4.406101783111199e-01 + 8.976985411420985e-01j,
+              4.406101783111199e-01 - 8.976985411420985e-01j,
+              5.539386470258847e-01 + 8.325574907062760e-01j,
+              5.539386470258847e-01 - 8.325574907062760e-01j,
+              6.748464963023645e-01 + 7.379581332490555e-01j,
+              6.748464963023645e-01 - 7.379581332490555e-01j,
+              7.489887970285254e-01 + 6.625826604475596e-01j,
+              7.489887970285254e-01 - 6.625826604475596e-01j,
+              7.913118471618432e-01 + 6.114127579150699e-01j,
+              7.913118471618432e-01 - 6.114127579150699e-01j,
+              7.806804740916381e-01 + 6.249303940216475e-01j,
+              7.806804740916381e-01 - 6.249303940216475e-01j]
+
+        p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j,
+              -1.025299146693730e-01 - 5.662682444754943e-01j,
+               1.698463595163031e-01 + 8.926678667070186e-01j,
+               1.698463595163031e-01 - 8.926678667070186e-01j,
+               2.750532687820631e-01 + 9.351020170094005e-01j,
+               2.750532687820631e-01 - 9.351020170094005e-01j,
+               3.070095178909486e-01 + 9.457373499553291e-01j,
+               3.070095178909486e-01 - 9.457373499553291e-01j,
+               7.695332312152288e-01 + 2.792567212705257e-01j,
+               7.695332312152288e-01 - 2.792567212705257e-01j,
+               8.083818999225620e-01 + 4.990723496863960e-01j,
+               8.083818999225620e-01 - 4.990723496863960e-01j,
+               8.066158014414928e-01 + 5.649811440393374e-01j,
+               8.066158014414928e-01 - 5.649811440393374e-01j,
+               8.062787978834571e-01 + 5.855780880424964e-01j,
+               8.062787978834571e-01 - 5.855780880424964e-01j]
+        k2 = 2.068622545291259e-01
+        assert_allclose(sorted(z, key=np.angle),
+                        sorted(z2, key=np.angle), rtol=1e-6)
+        assert_allclose(sorted(p, key=np.angle),
+                        sorted(p2, key=np.angle), rtol=1e-5)
+        assert_allclose(k, k2, rtol=1e-5)
+
+    def test_ba_output(self):
+        # with transfer function conversion,  without digital conversion
+        b, a = ellip(5, 1, 40, [201, 240], 'stop', True)
+        b2 = [
+             1.000000000000000e+00, 0,  # Matlab: 1.743506051190569e-13,
+             2.426561778314366e+05, 0,  # Matlab: 3.459426536825722e-08,
+             2.348218683400168e+10, 0,  # Matlab: 2.559179747299313e-03,
+             1.132780692872241e+15, 0,  # Matlab: 8.363229375535731e+01,
+             2.724038554089566e+19, 0,  # Matlab: 1.018700994113120e+06,
+             2.612380874940186e+23
+             ]
+        a2 = [
+             1.000000000000000e+00, 1.337266601804649e+02,
+             2.486725353510667e+05, 2.628059713728125e+07,
+             2.436169536928770e+10, 1.913554568577315e+12,
+             1.175208184614438e+15, 6.115751452473410e+16,
+             2.791577695211466e+19, 7.241811142725384e+20,
+             2.612380874940182e+23
+             ]
+        assert_allclose(b, b2, rtol=1e-6)
+        assert_allclose(a, a2, rtol=1e-4)
+
+    def test_fs_param(self):
+        for fs in (900, 900.1, 1234.567):
+            for N in (0, 1, 2, 3, 10):
+                for fc in (100, 100.1, 432.12345):
+                    for btype in ('lp', 'hp'):
+                        ba1 = ellip(N, 1, 20, fc, btype, fs=fs)
+                        ba2 = ellip(N, 1, 20, fc/(fs/2), btype)
+                        assert_allclose(ba1, ba2)
+                for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)):
+                    for btype in ('bp', 'bs'):
+                        ba1 = ellip(N, 1, 20, fc, btype, fs=fs)
+                        for seq in (list, tuple, array):
+                            fcnorm = seq([f/(fs/2) for f in fc])
+                            ba2 = ellip(N, 1, 20, fcnorm, btype)
+                            assert_allclose(ba1, ba2)
+
+
+def test_sos_consistency():
+    # Consistency checks of output='sos' for the specialized IIR filter
+    # design functions.
+    design_funcs = [(bessel, (0.1,)),
+                    (butter, (0.1,)),
+                    (cheby1, (45.0, 0.1)),
+                    (cheby2, (0.087, 0.1)),
+                    (ellip, (0.087, 45, 0.1))]
+    for func, args in design_funcs:
+        name = func.__name__
+
+        b, a = func(2, *args, output='ba')
+        sos = func(2, *args, output='sos')
+        assert_allclose(sos, [np.hstack((b, a))], err_msg="%s(2,...)" % name)
+
+        zpk = func(3, *args, output='zpk')
+        sos = func(3, *args, output='sos')
+        assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(3,...)" % name)
+
+        zpk = func(4, *args, output='zpk')
+        sos = func(4, *args, output='sos')
+        assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(4,...)" % name)
+
+
+class TestIIRNotch:
+
+    def test_ba_output(self):
+        # Compare coeficients with Matlab ones
+        # for the equivalent input:
+        b, a = iirnotch(0.06, 30)
+        b2 = [
+             9.9686824e-01, -1.9584219e+00,
+             9.9686824e-01
+             ]
+        a2 = [
+             1.0000000e+00, -1.9584219e+00,
+             9.9373647e-01
+             ]
+
+        assert_allclose(b, b2, rtol=1e-8)
+        assert_allclose(a, a2, rtol=1e-8)
+
+    def test_frequency_response(self):
+        # Get filter coeficients
+        b, a = iirnotch(0.3, 30)
+
+        # Get frequency response
+        w, h = freqz(b, a, 1000)
+
+        # Pick 5 point
+        p = [200,  # w0 = 0.200
+             295,  # w0 = 0.295
+             300,  # w0 = 0.300
+             305,  # w0 = 0.305
+             400]  # w0 = 0.400
+
+        # Get frequency response correspondent to each of those points
+        hp = h[p]
+
+        # Check if the frequency response fulfill the specifications:
+        # hp[0] and hp[4]  correspond to frequencies distant from
+        # w0 = 0.3 and should be close to 1
+        assert_allclose(abs(hp[0]), 1, rtol=1e-2)
+        assert_allclose(abs(hp[4]), 1, rtol=1e-2)
+
+        # hp[1] and hp[3] correspond to frequencies approximately
+        # on the edges of the passband and should be close to -3dB
+        assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
+        assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
+
+        # hp[2] correspond to the frequency that should be removed
+        # the frequency response should be very close to 0
+        assert_allclose(abs(hp[2]), 0, atol=1e-10)
+
+    def test_errors(self):
+        # Exception should be raised if w0 > 1 or w0 <0
+        assert_raises(ValueError, iirnotch, w0=2, Q=30)
+        assert_raises(ValueError, iirnotch, w0=-1, Q=30)
+
+        # Exception should be raised if any of the parameters
+        # are not float (or cannot be converted to one)
+        assert_raises(ValueError, iirnotch, w0="blabla", Q=30)
+        assert_raises(TypeError, iirnotch, w0=-1, Q=[1, 2, 3])
+
+    def test_fs_param(self):
+        # Get filter coeficients
+        b, a = iirnotch(1500, 30, fs=10000)
+
+        # Get frequency response
+        w, h = freqz(b, a, 1000, fs=10000)
+
+        # Pick 5 point
+        p = [200,  # w0 = 1000
+             295,  # w0 = 1475
+             300,  # w0 = 1500
+             305,  # w0 = 1525
+             400]  # w0 = 2000
+
+        # Get frequency response correspondent to each of those points
+        hp = h[p]
+
+        # Check if the frequency response fulfill the specifications:
+        # hp[0] and hp[4]  correspond to frequencies distant from
+        # w0 = 1500 and should be close to 1
+        assert_allclose(abs(hp[0]), 1, rtol=1e-2)
+        assert_allclose(abs(hp[4]), 1, rtol=1e-2)
+
+        # hp[1] and hp[3] correspond to frequencies approximately
+        # on the edges of the passband and should be close to -3dB
+        assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
+        assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
+
+        # hp[2] correspond to the frequency that should be removed
+        # the frequency response should be very close to 0
+        assert_allclose(abs(hp[2]), 0, atol=1e-10)
+
+
+class TestIIRPeak:
+
+    def test_ba_output(self):
+        # Compare coeficients with Matlab ones
+        # for the equivalent input:
+        b, a = iirpeak(0.06, 30)
+        b2 = [
+             3.131764229e-03, 0,
+             -3.131764229e-03
+             ]
+        a2 = [
+             1.0000000e+00, -1.958421917e+00,
+             9.9373647e-01
+             ]
+        assert_allclose(b, b2, rtol=1e-8)
+        assert_allclose(a, a2, rtol=1e-8)
+
+    def test_frequency_response(self):
+        # Get filter coeficients
+        b, a = iirpeak(0.3, 30)
+
+        # Get frequency response
+        w, h = freqz(b, a, 1000)
+
+        # Pick 5 point
+        p = [30,  # w0 = 0.030
+             295,  # w0 = 0.295
+             300,  # w0 = 0.300
+             305,  # w0 = 0.305
+             800]  # w0 = 0.800
+
+        # Get frequency response correspondent to each of those points
+        hp = h[p]
+
+        # Check if the frequency response fulfill the specifications:
+        # hp[0] and hp[4]  correspond to frequencies distant from
+        # w0 = 0.3 and should be close to 0
+        assert_allclose(abs(hp[0]), 0, atol=1e-2)
+        assert_allclose(abs(hp[4]), 0, atol=1e-2)
+
+        # hp[1] and hp[3] correspond to frequencies approximately
+        # on the edges of the passband and should be close to 10**(-3/20)
+        assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
+        assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
+
+        # hp[2] correspond to the frequency that should be retained and
+        # the frequency response should be very close to 1
+        assert_allclose(abs(hp[2]), 1, rtol=1e-10)
+
+    def test_errors(self):
+        # Exception should be raised if w0 > 1 or w0 <0
+        assert_raises(ValueError, iirpeak, w0=2, Q=30)
+        assert_raises(ValueError, iirpeak, w0=-1, Q=30)
+
+        # Exception should be raised if any of the parameters
+        # are not float (or cannot be converted to one)
+        assert_raises(ValueError, iirpeak, w0="blabla", Q=30)
+        assert_raises(TypeError, iirpeak, w0=-1, Q=[1, 2, 3])
+
+    def test_fs_param(self):
+        # Get filter coeficients
+        b, a = iirpeak(1200, 30, fs=8000)
+
+        # Get frequency response
+        w, h = freqz(b, a, 1000, fs=8000)
+
+        # Pick 5 point
+        p = [30,  # w0 = 120
+             295,  # w0 = 1180
+             300,  # w0 = 1200
+             305,  # w0 = 1220
+             800]  # w0 = 3200
+
+        # Get frequency response correspondent to each of those points
+        hp = h[p]
+
+        # Check if the frequency response fulfill the specifications:
+        # hp[0] and hp[4]  correspond to frequencies distant from
+        # w0 = 1200 and should be close to 0
+        assert_allclose(abs(hp[0]), 0, atol=1e-2)
+        assert_allclose(abs(hp[4]), 0, atol=1e-2)
+
+        # hp[1] and hp[3] correspond to frequencies approximately
+        # on the edges of the passband and should be close to 10**(-3/20)
+        assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2)
+        assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2)
+
+        # hp[2] correspond to the frequency that should be retained and
+        # the frequency response should be very close to 1
+        assert_allclose(abs(hp[2]), 1, rtol=1e-10)
+
+
+class TestIIRComb:
+    # Test erroneous input cases
+    def test_invalid_input(self):
+        # w0 is <= 0 or >= fs / 2
+        fs = 1000
+        for args in [(-fs, 30), (0, 35), (fs / 2, 40), (fs, 35)]:
+            with pytest.raises(ValueError, match='w0 must be between '):
+                iircomb(*args, fs=fs)
+
+        # fs is not divisible by w0
+        for args in [(120, 30), (157, 35)]:
+            with pytest.raises(ValueError, match='fs must be divisible '):
+                iircomb(*args, fs=fs)
+
+        # https://github.com/scipy/scipy/issues/14043#issuecomment-1107349140
+        # Previously, fs=44100, w0=49.999 was rejected, but fs=2,
+        # w0=49.999/int(44100/2) was accepted. Now it is rejected, too.
+        with pytest.raises(ValueError, match='fs must be divisible '):
+            iircomb(w0=49.999/int(44100/2), Q=30)
+
+        with pytest.raises(ValueError, match='fs must be divisible '):
+            iircomb(w0=49.999, Q=30, fs=44100)
+
+        # Filter type is not notch or peak
+        for args in [(0.2, 30, 'natch'), (0.5, 35, 'comb')]:
+            with pytest.raises(ValueError, match='ftype must be '):
+                iircomb(*args)
+
+    # Verify that the filter's frequency response contains a
+    # notch at the cutoff frequency
+    @pytest.mark.parametrize('ftype', ('notch', 'peak'))
+    def test_frequency_response(self, ftype):
+        # Create a notching or peaking comb filter at 1000 Hz
+        b, a = iircomb(1000, 30, ftype=ftype, fs=10000)
+
+        # Compute the frequency response
+        freqs, response = freqz(b, a, 1000, fs=10000)
+
+        # Find the notch using argrelextrema
+        comb_points = argrelextrema(abs(response), np.less)[0]
+
+        # Verify that the first notch sits at 1000 Hz
+        comb1 = comb_points[0]
+        assert_allclose(freqs[comb1], 1000)
+
+    # Verify pass_zero parameter
+    @pytest.mark.parametrize('ftype,pass_zero,peak,notch',
+                             [('peak', True, 123.45, 61.725),
+                              ('peak', False, 61.725, 123.45),
+                              ('peak', None, 61.725, 123.45),
+                              ('notch', None, 61.725, 123.45),
+                              ('notch', True, 123.45, 61.725),
+                              ('notch', False, 61.725, 123.45)])
+    def test_pass_zero(self, ftype, pass_zero, peak, notch):
+        # Create a notching or peaking comb filter
+        b, a = iircomb(123.45, 30, ftype=ftype, fs=1234.5, pass_zero=pass_zero)
+
+        # Compute the frequency response
+        freqs, response = freqz(b, a, [peak, notch], fs=1234.5)
+
+        # Verify that expected notches are notches and peaks are peaks
+        assert abs(response[0]) > 0.99
+        assert abs(response[1]) < 1e-10
+
+    # All built-in IIR filters are real, so should have perfectly
+    # symmetrical poles and zeros. Then ba representation (using
+    # numpy.poly) will be purely real instead of having negligible
+    # imaginary parts.
+    def test_iir_symmetry(self):
+        b, a = iircomb(400, 30, fs=24000)
+        z, p, k = tf2zpk(b, a)
+        assert_array_equal(sorted(z), sorted(z.conj()))
+        assert_array_equal(sorted(p), sorted(p.conj()))
+        assert_equal(k, np.real(k))
+
+        assert issubclass(b.dtype.type, np.floating)
+        assert issubclass(a.dtype.type, np.floating)
+
+    # Verify filter coefficients with MATLAB's iircomb function
+    def test_ba_output(self):
+        b_notch, a_notch = iircomb(60, 35, ftype='notch', fs=600)
+        b_notch2 = [0.957020174408697, 0.0, 0.0, 0.0, 0.0, 0.0,
+                    0.0, 0.0, 0.0, 0.0, -0.957020174408697]
+        a_notch2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+                    0.0, 0.0, 0.0, 0.0, -0.914040348817395]
+        assert_allclose(b_notch, b_notch2)
+        assert_allclose(a_notch, a_notch2)
+
+        b_peak, a_peak = iircomb(60, 35, ftype='peak', fs=600)
+        b_peak2 = [0.0429798255913026, 0.0, 0.0, 0.0, 0.0, 0.0,
+                   0.0, 0.0, 0.0, 0.0, -0.0429798255913026]
+        a_peak2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+                   0.0, 0.0, 0.0, 0.0, 0.914040348817395]
+        assert_allclose(b_peak, b_peak2)
+        assert_allclose(a_peak, a_peak2)
+
+    # Verify that https://github.com/scipy/scipy/issues/14043 is fixed
+    def test_nearest_divisor(self):
+        # Create a notching comb filter
+        b, a = iircomb(50/int(44100/2), 50.0, ftype='notch')
+
+        # Compute the frequency response at an upper harmonic of 50
+        freqs, response = freqz(b, a, [22000], fs=44100)
+
+        # Before bug fix, this would produce N = 881, so that 22 kHz was ~0 dB.
+        # Now N = 882 correctly and 22 kHz should be a notch <-220 dB
+        assert abs(response[0]) < 1e-10
+
+
+class TestIIRDesign:
+
+    def test_exceptions(self):
+        with pytest.raises(ValueError, match="the same shape"):
+            iirdesign(0.2, [0.1, 0.3], 1, 40)
+        with pytest.raises(ValueError, match="the same shape"):
+            iirdesign(np.array([[0.3, 0.6], [0.3, 0.6]]),
+                      np.array([[0.4, 0.5], [0.4, 0.5]]), 1, 40)
+
+        # discrete filter with non-positive frequency
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign(0, 0.5, 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign(-0.1, 0.5, 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign(0.1, 0, 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign(0.1, -0.5, 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0, 0.3], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, 0], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, 0.3], [0, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, 0.3], [0.1, 0], 1, 40)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40)
+
+        # analog filter with negative frequency
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign(-0.1, 0.5, 1, 40, analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign(0.1, -0.5, 1, 40, analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40, analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40, analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40, analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40, analog=True)
+
+        # discrete filter with fs=None, freq > 1
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign(1, 0.5, 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign(1.1, 0.5, 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign(0.1, 1, 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign(0.1, 1.5, 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([1, 0.3], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([1.1, 0.3], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([0.1, 1], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([0.1, 1.1], [0.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([0.1, 0.3], [1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([0.1, 0.3], [1.1, 0.5], 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([0.1, 0.3], [0.1, 1], 1, 40)
+        with pytest.raises(ValueError, match="must be less than 1"):
+            iirdesign([0.1, 0.3], [0.1, 1.5], 1, 40)
+
+        # discrete filter with fs>2, wp, ws < fs/2 must pass
+        iirdesign(100, 500, 1, 40, fs=2000)
+        iirdesign(500, 100, 1, 40, fs=2000)
+        iirdesign([200, 400], [100, 500], 1, 40, fs=2000)
+        iirdesign([100, 500], [200, 400], 1, 40, fs=2000)
+
+        # discrete filter with fs>2, freq > fs/2: this must raise
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign(1000, 400, 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign(1100, 500, 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign(100, 1000, 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign(100, 1100, 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([1000, 400], [100, 500], 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([1100, 400], [100, 500], 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([200, 1000], [100, 500], 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([200, 1100], [100, 500], 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([200, 400], [1000, 500], 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([200, 400], [1100, 500], 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([200, 400], [100, 1000], 1, 40, fs=2000)
+        with pytest.raises(ValueError, match="must be less than fs/2"):
+            iirdesign([200, 400], [100, 1100], 1, 40, fs=2000)
+
+        with pytest.raises(ValueError, match="strictly inside stopband"):
+            iirdesign([0.1, 0.4], [0.5, 0.6], 1, 40)
+        with pytest.raises(ValueError, match="strictly inside stopband"):
+            iirdesign([0.5, 0.6], [0.1, 0.4], 1, 40)
+        with pytest.raises(ValueError, match="strictly inside stopband"):
+            iirdesign([0.3, 0.6], [0.4, 0.7], 1, 40)
+        with pytest.raises(ValueError, match="strictly inside stopband"):
+            iirdesign([0.4, 0.7], [0.3, 0.6], 1, 40)
+
+
+class TestIIRFilter:
+
+    def test_symmetry(self):
+        # All built-in IIR filters are real, so should have perfectly
+        # symmetrical poles and zeros. Then ba representation (using
+        # numpy.poly) will be purely real instead of having negligible
+        # imaginary parts.
+        for N in np.arange(1, 26):
+            for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'):
+                z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
+                                    ftype=ftype, output='zpk')
+                assert_array_equal(sorted(z), sorted(z.conj()))
+                assert_array_equal(sorted(p), sorted(p.conj()))
+                assert_equal(k, np.real(k))
+
+                b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
+                                 ftype=ftype, output='ba')
+                assert_(issubclass(b.dtype.type, np.floating))
+                assert_(issubclass(a.dtype.type, np.floating))
+
+    def test_int_inputs(self):
+        # Using integer frequency arguments and large N should not produce
+        # numpy integers that wraparound to negative numbers
+        k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel',
+                      output='zpk')[2]
+        k2 = 9.999999999999989e+47
+        assert_allclose(k, k2)
+
+    def test_invalid_wn_size(self):
+        # low and high have 1 Wn, band and stop have 2 Wn
+        assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low')
+        assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high')
+        assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp')
+        assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True)
+
+    def test_invalid_wn_range(self):
+        # For digital filters, 0 <= Wn <= 1
+        assert_raises(ValueError, iirfilter, 1, 2, btype='low')
+        assert_raises(ValueError, iirfilter, 1, [0.5, 1], btype='band')
+        assert_raises(ValueError, iirfilter, 1, [0., 0.5], btype='band')
+        assert_raises(ValueError, iirfilter, 1, -1, btype='high')
+        assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band')
+        assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop')
+
+        # analog=True with non-positive critical frequencies
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirfilter(2, 0, btype='low', analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirfilter(2, -1, btype='low', analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirfilter(2, [0, 100], analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirfilter(2, [-1, 100], analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirfilter(2, [10, 0], analog=True)
+        with pytest.raises(ValueError, match="must be greater than 0"):
+            iirfilter(2, [10, -1], analog=True)
+
+    def test_analog_sos(self):
+        # first order Butterworth filter with Wn = 1 has tf 1/(s+1)
+        sos = [[0., 0., 1., 0., 1., 1.]]
+        sos2 = iirfilter(N=1, Wn=1, btype='low', analog=True, output='sos')
+        assert_array_almost_equal(sos, sos2)
+
+    def test_wn1_ge_wn0(self):
+        # gh-15773: should raise error if Wn[0] >= Wn[1]
+        with pytest.raises(ValueError,
+                           match=r"Wn\[0\] must be less than Wn\[1\]"):
+            iirfilter(2, [0.5, 0.5])
+        with pytest.raises(ValueError,
+                           match=r"Wn\[0\] must be less than Wn\[1\]"):
+            iirfilter(2, [0.6, 0.5])
+
+
+class TestGroupDelay:
+    def test_identity_filter(self):
+        w, gd = group_delay((1, 1))
+        assert_array_almost_equal(w, pi * np.arange(512) / 512)
+        assert_array_almost_equal(gd, np.zeros(512))
+        w, gd = group_delay((1, 1), whole=True)
+        assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512)
+        assert_array_almost_equal(gd, np.zeros(512))
+
+    def test_fir(self):
+        # Let's design linear phase FIR and check that the group delay
+        # is constant.
+        N = 100
+        b = firwin(N + 1, 0.1)
+        w, gd = group_delay((b, 1))
+        assert_allclose(gd, 0.5 * N)
+
+    def test_iir(self):
+        # Let's design Butterworth filter and test the group delay at
+        # some points against MATLAB answer.
+        b, a = butter(4, 0.1)
+        w = np.linspace(0, pi, num=10, endpoint=False)
+        w, gd = group_delay((b, a), w=w)
+        matlab_gd = np.array([8.249313898506037, 11.958947880907104,
+                              2.452325615326005, 1.048918665702008,
+                              0.611382575635897, 0.418293269460578,
+                              0.317932917836572, 0.261371844762525,
+                              0.229038045801298, 0.212185774208521])
+        assert_array_almost_equal(gd, matlab_gd)
+
+    def test_singular(self):
+        # Let's create a filter with zeros and poles on the unit circle and
+        # check if warnings are raised at those frequencies.
+        z1 = np.exp(1j * 0.1 * pi)
+        z2 = np.exp(1j * 0.25 * pi)
+        p1 = np.exp(1j * 0.5 * pi)
+        p2 = np.exp(1j * 0.8 * pi)
+        b = np.convolve([1, -z1], [1, -z2])
+        a = np.convolve([1, -p1], [1, -p2])
+        w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi])
+
+        w, gd = assert_warns(UserWarning, group_delay, (b, a), w=w)
+
+    def test_backward_compat(self):
+        # For backward compatibility, test if None act as a wrapper for default
+        w1, gd1 = group_delay((1, 1))
+        w2, gd2 = group_delay((1, 1), None)
+        assert_array_almost_equal(w1, w2)
+        assert_array_almost_equal(gd1, gd2)
+
+    def test_fs_param(self):
+        # Let's design Butterworth filter and test the group delay at
+        # some points against the normalized frequency answer.
+        b, a = butter(4, 4800, fs=96000)
+        w = np.linspace(0, 96000/2, num=10, endpoint=False)
+        w, gd = group_delay((b, a), w=w, fs=96000)
+        norm_gd = np.array([8.249313898506037, 11.958947880907104,
+                            2.452325615326005, 1.048918665702008,
+                            0.611382575635897, 0.418293269460578,
+                            0.317932917836572, 0.261371844762525,
+                            0.229038045801298, 0.212185774208521])
+        assert_array_almost_equal(gd, norm_gd)
+
+    def test_w_or_N_types(self):
+        # Measure at 8 equally-spaced points
+        for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8),
+                  np.array(8)):
+            w, gd = group_delay((1, 1), N)
+            assert_array_almost_equal(w, pi * np.arange(8) / 8)
+            assert_array_almost_equal(gd, np.zeros(8))
+
+        # Measure at frequency 8 rad/sec
+        for w in (8.0, 8.0+0j):
+            w_out, gd = group_delay((1, 1), w)
+            assert_array_almost_equal(w_out, [8])
+            assert_array_almost_equal(gd, [0])
+
+
+class TestGammatone:
+    # Test erroneus input cases.
+    def test_invalid_input(self):
+        # Cutoff frequency is <= 0 or >= fs / 2.
+        fs = 16000
+        for args in [(-fs, 'iir'), (0, 'fir'), (fs / 2, 'iir'), (fs, 'fir')]:
+            with pytest.raises(ValueError, match='The frequency must be '
+                               'between '):
+                gammatone(*args, fs=fs)
+
+        # Filter type is not fir or iir
+        for args in [(440, 'fie'), (220, 'it')]:
+            with pytest.raises(ValueError, match='ftype must be '):
+                gammatone(*args, fs=fs)
+
+        # Order is <= 0 or > 24 for FIR filter.
+        for args in [(440, 'fir', -50), (220, 'fir', 0), (110, 'fir', 25),
+                     (55, 'fir', 50)]:
+            with pytest.raises(ValueError, match='Invalid order: '):
+                gammatone(*args, numtaps=None, fs=fs)
+
+    # Verify that the filter's frequency response is approximately
+    # 1 at the cutoff frequency.
+    def test_frequency_response(self):
+        fs = 16000
+        ftypes = ['fir', 'iir']
+        for ftype in ftypes:
+            # Create a gammatone filter centered at 1000 Hz.
+            b, a = gammatone(1000, ftype, fs=fs)
+
+            # Calculate the frequency response.
+            freqs, response = freqz(b, a)
+
+            # Determine peak magnitude of the response
+            # and corresponding frequency.
+            response_max = np.max(np.abs(response))
+            freq_hz = freqs[np.argmax(np.abs(response))] / ((2 * np.pi) / fs)
+
+            # Check that the peak magnitude is 1 and the frequency is 1000 Hz.
+            response_max == pytest.approx(1, rel=1e-2)
+            freq_hz == pytest.approx(1000, rel=1e-2)
+
+    # All built-in IIR filters are real, so should have perfectly
+    # symmetrical poles and zeros. Then ba representation (using
+    # numpy.poly) will be purely real instead of having negligible
+    # imaginary parts.
+    def test_iir_symmetry(self):
+        b, a = gammatone(440, 'iir', fs=24000)
+        z, p, k = tf2zpk(b, a)
+        assert_array_equal(sorted(z), sorted(z.conj()))
+        assert_array_equal(sorted(p), sorted(p.conj()))
+        assert_equal(k, np.real(k))
+
+        assert_(issubclass(b.dtype.type, np.floating))
+        assert_(issubclass(a.dtype.type, np.floating))
+
+    # Verify FIR filter coefficients with the paper's
+    # Mathematica implementation
+    def test_fir_ba_output(self):
+        b, _ = gammatone(15, 'fir', fs=1000)
+        b2 = [0.0, 2.2608075649884e-04,
+              1.5077903981357e-03, 4.2033687753998e-03,
+              8.1508962726503e-03, 1.2890059089154e-02,
+              1.7833890391666e-02, 2.2392613558564e-02,
+              2.6055195863104e-02, 2.8435872863284e-02,
+              2.9293319149544e-02, 2.852976858014e-02,
+              2.6176557156294e-02, 2.2371510270395e-02,
+              1.7332485267759e-02]
+        assert_allclose(b, b2)
+
+    # Verify IIR filter coefficients with the paper's MATLAB implementation
+    def test_iir_ba_output(self):
+        b, a = gammatone(440, 'iir', fs=16000)
+        b2 = [1.31494461367464e-06, -5.03391196645395e-06,
+              7.00649426000897e-06, -4.18951968419854e-06,
+              9.02614910412011e-07]
+        a2 = [1.0, -7.65646235454218,
+              25.7584699322366, -49.7319214483238,
+              60.2667361289181, -46.9399590980486,
+              22.9474798808461, -6.43799381299034,
+              0.793651554625368]
+        assert_allclose(b, b2)
+        assert_allclose(a, a2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_fir_filter_design.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_fir_filter_design.py
new file mode 100644
index 00000000..db9a7938
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_fir_filter_design.py
@@ -0,0 +1,674 @@
+import numpy as np
+from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
+                           assert_equal, assert_,
+                           assert_allclose, assert_warns)
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.fft import fft
+from scipy.special import sinc
+from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \
+    firwin, firwin2, freqz, remez, firls, minimum_phase
+
+
+def test_kaiser_beta():
+    b = kaiser_beta(58.7)
+    assert_almost_equal(b, 0.1102 * 50.0)
+    b = kaiser_beta(22.0)
+    assert_almost_equal(b, 0.5842 + 0.07886)
+    b = kaiser_beta(21.0)
+    assert_equal(b, 0.0)
+    b = kaiser_beta(10.0)
+    assert_equal(b, 0.0)
+
+
+def test_kaiser_atten():
+    a = kaiser_atten(1, 1.0)
+    assert_equal(a, 7.95)
+    a = kaiser_atten(2, 1/np.pi)
+    assert_equal(a, 2.285 + 7.95)
+
+
+def test_kaiserord():
+    assert_raises(ValueError, kaiserord, 1.0, 1.0)
+    numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
+    assert_equal((numtaps, beta), (2, 0.0))
+
+
+class TestFirwin:
+
+    def check_response(self, h, expected_response, tol=.05):
+        N = len(h)
+        alpha = 0.5 * (N-1)
+        m = np.arange(0,N) - alpha   # time indices of taps
+        for freq, expected in expected_response:
+            actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
+            mse = abs(actual-expected)**2
+            assert_(mse < tol, 'response not as expected, mse=%g > %g'
+               % (mse, tol))
+
+    def test_response(self):
+        N = 51
+        f = .5
+        # increase length just to try even/odd
+        h = firwin(N, f)  # low-pass from 0 to f
+        self.check_response(h, [(.25,1), (.75,0)])
+
+        h = firwin(N+1, f, window='nuttall')  # specific window
+        self.check_response(h, [(.25,1), (.75,0)])
+
+        h = firwin(N+2, f, pass_zero=False)  # stop from 0 to f --> high-pass
+        self.check_response(h, [(.25,0), (.75,1)])
+
+        f1, f2, f3, f4 = .2, .4, .6, .8
+        h = firwin(N+3, [f1, f2], pass_zero=False)  # band-pass filter
+        self.check_response(h, [(.1,0), (.3,1), (.5,0)])
+
+        h = firwin(N+4, [f1, f2])  # band-stop filter
+        self.check_response(h, [(.1,1), (.3,0), (.5,1)])
+
+        h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
+        self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
+
+        h = firwin(N+6, [f1, f2, f3, f4])  # multiband filter
+        self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
+
+        h = firwin(N+7, 0.1, width=.03)  # low-pass
+        self.check_response(h, [(.05,1), (.75,0)])
+
+        h = firwin(N+8, 0.1, pass_zero=False)  # high-pass
+        self.check_response(h, [(.05,0), (.75,1)])
+
+    def mse(self, h, bands):
+        """Compute mean squared error versus ideal response across frequency
+        band.
+          h -- coefficients
+          bands -- list of (left, right) tuples relative to 1==Nyquist of
+            passbands
+        """
+        w, H = freqz(h, worN=1024)
+        f = w/np.pi
+        passIndicator = np.zeros(len(w), bool)
+        for left, right in bands:
+            passIndicator |= (f >= left) & (f < right)
+        Hideal = np.where(passIndicator, 1, 0)
+        mse = np.mean(abs(abs(H)-Hideal)**2)
+        return mse
+
+    def test_scaling(self):
+        """
+        For one lowpass, bandpass, and highpass example filter, this test
+        checks two things:
+          - the mean squared error over the frequency domain of the unscaled
+            filter is smaller than the scaled filter (true for rectangular
+            window)
+          - the response of the scaled filter is exactly unity at the center
+            of the first passband
+        """
+        N = 11
+        cases = [
+            ([.5], True, (0, 1)),
+            ([0.2, .6], False, (.4, 1)),
+            ([.5], False, (1, 1)),
+        ]
+        for cutoff, pass_zero, expected_response in cases:
+            h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
+            hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
+            if len(cutoff) == 1:
+                if pass_zero:
+                    cutoff = [0] + cutoff
+                else:
+                    cutoff = cutoff + [1]
+            assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
+                'least squares violation')
+            self.check_response(hs, [expected_response], 1e-12)
+
+
+class TestFirWinMore:
+    """Different author, different style, different tests..."""
+
+    def test_lowpass(self):
+        width = 0.04
+        ntaps, beta = kaiserord(120, width)
+        kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
+        taps = firwin(ntaps, **kwargs)
+
+        # Check the symmetry of taps.
+        assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
+
+        # Check the gain at a few samples where we know it should be approximately 0 or 1.
+        freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples)
+        assert_array_almost_equal(np.abs(response),
+                                    [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
+
+        taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs)
+        assert_allclose(taps, taps_str)
+
+    def test_highpass(self):
+        width = 0.04
+        ntaps, beta = kaiserord(120, width)
+
+        # Ensure that ntaps is odd.
+        ntaps |= 1
+
+        kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
+        taps = firwin(ntaps, pass_zero=False, **kwargs)
+
+        # Check the symmetry of taps.
+        assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
+
+        # Check the gain at a few samples where we know it should be approximately 0 or 1.
+        freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples)
+        assert_array_almost_equal(np.abs(response),
+                                    [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
+
+        taps_str = firwin(ntaps, pass_zero='highpass', **kwargs)
+        assert_allclose(taps, taps_str)
+
+    def test_bandpass(self):
+        width = 0.04
+        ntaps, beta = kaiserord(120, width)
+        kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False)
+        taps = firwin(ntaps, pass_zero=False, **kwargs)
+
+        # Check the symmetry of taps.
+        assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
+
+        # Check the gain at a few samples where we know it should be approximately 0 or 1.
+        freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
+                                0.7-width/2, 0.7+width/2, 0.8, 1.0])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples)
+        assert_array_almost_equal(np.abs(response),
+                [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
+
+        taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs)
+        assert_allclose(taps, taps_str)
+
+    def test_bandstop_multi(self):
+        width = 0.04
+        ntaps, beta = kaiserord(120, width)
+        kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
+                      scale=False)
+        taps = firwin(ntaps, **kwargs)
+
+        # Check the symmetry of taps.
+        assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
+
+        # Check the gain at a few samples where we know it should be approximately 0 or 1.
+        freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
+                                0.5-width/2, 0.5+width/2, 0.65,
+                                0.8-width/2, 0.8+width/2, 0.9, 1.0])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples)
+        assert_array_almost_equal(np.abs(response),
+                [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
+                decimal=5)
+
+        taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs)
+        assert_allclose(taps, taps_str)
+
+    def test_fs_nyq(self):
+        """Test the fs and nyq keywords."""
+        nyquist = 1000
+        width = 40.0
+        relative_width = width/nyquist
+        ntaps, beta = kaiserord(120, relative_width)
+        taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
+                        pass_zero=False, scale=False, fs=2*nyquist)
+
+        # Check the symmetry of taps.
+        assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
+
+        # Check the gain at a few samples where we know it should be approximately 0 or 1.
+        freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
+                                700-width/2, 700+width/2, 800, 1000])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
+        assert_array_almost_equal(np.abs(response),
+                [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'nyq'")
+            taps2 = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
+                           pass_zero=False, scale=False, nyq=nyquist)
+        assert_allclose(taps2, taps)
+
+    def test_bad_cutoff(self):
+        """Test that invalid cutoff argument raises ValueError."""
+        # cutoff values must be greater than 0 and less than 1.
+        assert_raises(ValueError, firwin, 99, -0.5)
+        assert_raises(ValueError, firwin, 99, 1.5)
+        # Don't allow 0 or 1 in cutoff.
+        assert_raises(ValueError, firwin, 99, [0, 0.5])
+        assert_raises(ValueError, firwin, 99, [0.5, 1])
+        # cutoff values must be strictly increasing.
+        assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
+        assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
+        # Must have at least one cutoff value.
+        assert_raises(ValueError, firwin, 99, [])
+        # 2D array not allowed.
+        assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
+        # cutoff values must be less than nyq.
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'nyq'")
+            assert_raises(ValueError, firwin, 99, 50.0, nyq=40)
+            assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25)
+        assert_raises(ValueError, firwin, 99, 50.0, fs=80)
+        assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
+
+    def test_even_highpass_raises_value_error(self):
+        """Test that attempt to create a highpass filter with an even number
+        of taps raises a ValueError exception."""
+        assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
+        assert_raises(ValueError, firwin, 40, [.25, 0.5])
+
+    def test_bad_pass_zero(self):
+        """Test degenerate pass_zero cases."""
+        with assert_raises(ValueError, match='pass_zero must be'):
+            firwin(41, 0.5, pass_zero='foo')
+        with assert_raises(TypeError, match='cannot be interpreted'):
+            firwin(41, 0.5, pass_zero=1.)
+        for pass_zero in ('lowpass', 'highpass'):
+            with assert_raises(ValueError, match='cutoff must have one'):
+                firwin(41, [0.5, 0.6], pass_zero=pass_zero)
+        for pass_zero in ('bandpass', 'bandstop'):
+            with assert_raises(ValueError, match='must have at least two'):
+                firwin(41, [0.5], pass_zero=pass_zero)
+
+    def test_nyq_deprecation(self):
+        with pytest.warns(DeprecationWarning,
+                          match="Keyword argument 'nyq' is deprecated in "
+                          ):
+            firwin(1, 1, nyq=10)
+
+class TestFirwin2:
+
+    def test_invalid_args(self):
+        # `freq` and `gain` have different lengths.
+        with assert_raises(ValueError, match='must be of same length'):
+            firwin2(50, [0, 0.5, 1], [0.0, 1.0])
+        # `nfreqs` is less than `ntaps`.
+        with assert_raises(ValueError, match='ntaps must be less than nfreqs'):
+            firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
+        # Decreasing value in `freq`
+        with assert_raises(ValueError, match='must be nondecreasing'):
+            firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
+        # Value in `freq` repeated more than once.
+        with assert_raises(ValueError, match='must not occur more than twice'):
+            firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0])
+        # `freq` does not start at 0.0.
+        with assert_raises(ValueError, match='start with 0'):
+            firwin2(50, [0.5, 1.0], [0.0, 1.0])
+        # `freq` does not end at fs/2.
+        with assert_raises(ValueError, match='end with fs/2'):
+            firwin2(50, [0.0, 0.5], [0.0, 1.0])
+        # Value 0 is repeated in `freq`
+        with assert_raises(ValueError, match='0 must not be repeated'):
+            firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
+        # Value fs/2 is repeated in `freq`
+        with assert_raises(ValueError, match='fs/2 must not be repeated'):
+            firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0])
+        # Value in `freq` that is too close to a repeated number
+        with assert_raises(ValueError, match='cannot contain numbers '
+                                             'that are too close'):
+            firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0],
+                        [1.0, 1.0, 1.0, 0.0, 0.0])
+
+        # Type II filter, but the gain at nyquist frequency is not zero.
+        with assert_raises(ValueError, match='Type II filter'):
+            firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
+
+        # Type III filter, but the gains at nyquist and zero rate are not zero.
+        with assert_raises(ValueError, match='Type III filter'):
+            firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True)
+        with assert_raises(ValueError, match='Type III filter'):
+            firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
+        with assert_raises(ValueError, match='Type III filter'):
+            firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True)
+
+        # Type IV filter, but the gain at zero rate is not zero.
+        with assert_raises(ValueError, match='Type IV filter'):
+            firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
+
+    def test01(self):
+        width = 0.04
+        beta = 12.0
+        ntaps = 400
+        # Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
+        # increases from w=0.5 to w=1  (w=1 is the Nyquist frequency).
+        freq = [0.0, 0.5, 1.0]
+        gain = [1.0, 1.0, 0.0]
+        taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
+        freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
+                                                        0.75, 1.0-width/2])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples)
+        assert_array_almost_equal(np.abs(response),
+                        [1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
+
+    def test02(self):
+        width = 0.04
+        beta = 12.0
+        # ntaps must be odd for positive gain at Nyquist.
+        ntaps = 401
+        # An ideal highpass filter.
+        freq = [0.0, 0.5, 0.5, 1.0]
+        gain = [0.0, 0.0, 1.0, 1.0]
+        taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
+        freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples)
+        assert_array_almost_equal(np.abs(response),
+                                [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
+
+    def test03(self):
+        width = 0.02
+        ntaps, beta = kaiserord(120, width)
+        # ntaps must be odd for positive gain at Nyquist.
+        ntaps = int(ntaps) | 1
+        freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
+        gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
+        taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
+        freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
+                                    0.5-width, 0.5+width, 0.75, 1.0])
+        freqs, response = freqz(taps, worN=np.pi*freq_samples)
+        assert_array_almost_equal(np.abs(response),
+                    [1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
+
+    def test04(self):
+        """Test firwin2 when window=None."""
+        ntaps = 5
+        # Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
+        freq = [0.0, 0.5, 0.5, 1.0]
+        gain = [1.0, 1.0, 0.0, 0.0]
+        taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
+        alpha = 0.5 * (ntaps - 1)
+        m = np.arange(0, ntaps) - alpha
+        h = 0.5 * sinc(0.5 * m)
+        assert_array_almost_equal(h, taps)
+
+    def test05(self):
+        """Test firwin2 for calculating Type IV filters"""
+        ntaps = 1500
+
+        freq = [0.0, 1.0]
+        gain = [0.0, 1.0]
+        taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
+        assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1])
+
+        freqs, response = freqz(taps, worN=2048)
+        assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4)
+
+    def test06(self):
+        """Test firwin2 for calculating Type III filters"""
+        ntaps = 1501
+
+        freq = [0.0, 0.5, 0.55, 1.0]
+        gain = [0.0, 0.5, 0.0, 0.0]
+        taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
+        assert_equal(taps[ntaps // 2], 0.0)
+        assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1])
+
+        freqs, response1 = freqz(taps, worN=2048)
+        response2 = np.interp(freqs / np.pi, freq, gain)
+        assert_array_almost_equal(abs(response1), response2, decimal=3)
+
+    def test_fs_nyq(self):
+        taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
+        taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0)
+        assert_array_almost_equal(taps1, taps2)
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'nyq'")
+            taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0)
+        assert_array_almost_equal(taps1, taps2)
+
+    def test_tuple(self):
+        taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0))
+        taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
+        assert_array_almost_equal(taps1, taps2)
+
+    def test_input_modyfication(self):
+        freq1 = np.array([0.0, 0.5, 0.5, 1.0])
+        freq2 = np.array(freq1)
+        firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0])
+        assert_equal(freq1, freq2)
+
+    def test_nyq_deprecation(self):
+        with pytest.warns(DeprecationWarning,
+                          match="Keyword argument 'nyq' is deprecated in "
+                          ):
+            firwin2(1, [0, 10], [1, 1], nyq=10)
+
+
+class TestRemez:
+
+    def test_bad_args(self):
+        assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
+
+    def test_hilbert(self):
+        N = 11  # number of taps in the filter
+        a = 0.1  # width of the transition band
+
+        # design an unity gain hilbert bandpass filter from w to 0.5-w
+        h = remez(11, [a, 0.5-a], [1], type='hilbert')
+
+        # make sure the filter has correct # of taps
+        assert_(len(h) == N, "Number of Taps")
+
+        # make sure it is type III (anti-symmetric tap coefficients)
+        assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
+
+        # Since the requested response is symmetric, all even coefficients
+        # should be zero (or in this case really small)
+        assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
+
+        # now check the frequency response
+        w, H = freqz(h, 1)
+        f = w/2/np.pi
+        Hmag = abs(H)
+
+        # should have a zero at 0 and pi (in this case close to zero)
+        assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi")
+
+        # check that the pass band is close to unity
+        idx = np.logical_and(f > a, f < 0.5-a)
+        assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
+
+    def test_compare(self):
+        # test comparison to MATLAB
+        k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
+             -0.003530911231040, 0.193140296954975, 0.373400753484939,
+             0.373400753484939, 0.193140296954975, -0.003530911231040,
+             -0.075943803756711, -0.041314581814658, 0.024590270518440]
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "'remez'")
+            h = remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.)
+        assert_allclose(h, k)
+        h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
+        assert_allclose(h, k)
+
+        h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
+             0.002879152556419, 0.016849978528150, -0.043276706138248,
+             0.073641298245579, -0.103908158578635, 0.129770906801075,
+             -0.147163447297124, 0.153302248456347, -0.147163447297124,
+             0.129770906801075, -0.103908158578635, 0.073641298245579,
+             -0.043276706138248, 0.016849978528150, 0.002879152556419,
+             -0.014644062687875, 0.018704846485491, -0.038976016082299]
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "'remez'")
+            assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], Hz=2.), h)
+        assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h)
+
+    def test_Hz_deprecation(self):
+        with pytest.warns(DeprecationWarning,
+                          match="'remez' keyword argument 'Hz'"
+                          ):
+            remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.)
+
+class TestFirls:
+
+    def test_bad_args(self):
+        # even numtaps
+        assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
+        # odd bands
+        assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
+        # len(bands) != len(desired)
+        assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
+        # non-monotonic bands
+        assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
+        assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
+        assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
+        assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
+        # negative desired
+        assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
+        # len(weight) != len(pairs)
+        assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2])
+        # negative weight
+        assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1])
+
+    def test_firls(self):
+        N = 11  # number of taps in the filter
+        a = 0.1  # width of the transition band
+
+        # design a halfband symmetric low-pass filter
+        h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0)
+
+        # make sure the filter has correct # of taps
+        assert_equal(len(h), N)
+
+        # make sure it is symmetric
+        midx = (N-1) // 2
+        assert_array_almost_equal(h[:midx], h[:-midx-1:-1])
+
+        # make sure the center tap is 0.5
+        assert_almost_equal(h[midx], 0.5)
+
+        # For halfband symmetric, odd coefficients (except the center)
+        # should be zero (really small)
+        hodd = np.hstack((h[1:midx:2], h[-midx+1::2]))
+        assert_array_almost_equal(hodd, 0)
+
+        # now check the frequency response
+        w, H = freqz(h, 1)
+        f = w/2/np.pi
+        Hmag = np.abs(H)
+
+        # check that the pass band is close to unity
+        idx = np.logical_and(f > 0, f < a)
+        assert_array_almost_equal(Hmag[idx], 1, decimal=3)
+
+        # check that the stop band is close to zero
+        idx = np.logical_and(f > 0.5-a, f < 0.5)
+        assert_array_almost_equal(Hmag[idx], 0, decimal=3)
+
+    def test_compare(self):
+        # compare to OCTAVE output
+        taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2])
+        # >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
+        known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
+                      -9.81576747564301e-03, 3.17271686090449e-01,
+                      5.11409425599933e-01, 3.17271686090449e-01,
+                      -9.81576747564301e-03, -1.03354450635036e-01,
+                      -6.26930101730182e-04]
+        assert_allclose(taps, known_taps)
+
+        # compare to MATLAB output
+        taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2])
+        # >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
+        known_taps = [
+            0.058545300496815, -0.014233383714318, -0.104688258464392,
+            0.012403323025279, 0.317930861136062, 0.488047220029700,
+            0.317930861136062, 0.012403323025279, -0.104688258464392,
+            -0.014233383714318, 0.058545300496815]
+        assert_allclose(taps, known_taps)
+
+        # With linear changes:
+        taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20)
+        # >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
+        known_taps = [
+            1.156090832768218, -4.1385894727395849, 7.5288619164321826,
+            -8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
+            1.156090832768218]
+        assert_allclose(taps, known_taps)
+
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Keyword argument 'nyq'")
+            taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10)
+            assert_allclose(taps, known_taps)
+
+            with pytest.raises(ValueError, match='between 0 and 1'):
+                firls(7, [0, 1], [0, 1], nyq=0.5)
+
+    def test_rank_deficient(self):
+        # solve() runs but warns (only sometimes, so here we don't use match)
+        x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0])
+        w, h = freqz(x, fs=2.)
+        assert_allclose(np.abs(h[:2]), 1., atol=1e-5)
+        assert_allclose(np.abs(h[-2:]), 0., atol=1e-6)
+        # switch to pinvh (tolerances could be higher with longer
+        # filters, but using shorter ones is faster computationally and
+        # the idea is the same)
+        x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0])
+        w, h = freqz(x, fs=2.)
+        mask = w < 0.01
+        assert mask.sum() > 3
+        assert_allclose(np.abs(h[mask]), 1., atol=1e-4)
+        mask = w > 0.99
+        assert mask.sum() > 3
+        assert_allclose(np.abs(h[mask]), 0., atol=1e-4)
+
+    def test_nyq_deprecation(self):
+        with pytest.warns(DeprecationWarning,
+                          match="Keyword argument 'nyq' is deprecated in "
+                          ):
+            firls(1, (0, 1), (0, 0), nyq=10)
+
+
+class TestMinimumPhase:
+
+    def test_bad_args(self):
+        # not enough taps
+        assert_raises(ValueError, minimum_phase, [1.])
+        assert_raises(ValueError, minimum_phase, [1., 1.])
+        assert_raises(ValueError, minimum_phase, np.full(10, 1j))
+        assert_raises(ValueError, minimum_phase, 'foo')
+        assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
+        assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
+        assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
+
+    def test_homomorphic(self):
+        # check that it can recover frequency responses of arbitrary
+        # linear-phase filters
+
+        # for some cases we can get the actual filter back
+        h = [1, -1]
+        h_new = minimum_phase(np.convolve(h, h[::-1]))
+        assert_allclose(h_new, h, rtol=0.05)
+
+        # but in general we only guarantee we get the magnitude back
+        rng = np.random.RandomState(0)
+        for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
+            h = rng.randn(n)
+            h_new = minimum_phase(np.convolve(h, h[::-1]))
+            assert_allclose(np.abs(fft(h_new)),
+                            np.abs(fft(h)), rtol=1e-4)
+
+    def test_hilbert(self):
+        # compare to MATLAB output of reference implementation
+
+        # f=[0 0.3 0.5 1];
+        # a=[1 1 0 0];
+        # h=remez(11,f,a);
+        h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
+        k = [0.349585548646686, 0.373552164395447, 0.326082685363438,
+             0.077152207480935, -0.129943946349364, -0.059355880509749]
+        m = minimum_phase(h, 'hilbert')
+        assert_allclose(m, k, rtol=5e-3)
+
+        # f=[0 0.8 0.9 1];
+        # a=[0 0 1 1];
+        # h=remez(20,f,a);
+        h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.)
+        k = [0.232486803906329, -0.133551833687071, 0.151871456867244,
+             -0.157957283165866, 0.151739294892963, -0.129293146705090,
+             0.100787844523204, -0.065832656741252, 0.035361328741024,
+             -0.014977068692269, -0.158416139047557]
+        m = minimum_phase(h, 'hilbert', n_fft=2**19)
+        assert_allclose(m, k, rtol=2e-3)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_ltisys.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_ltisys.py
new file mode 100644
index 00000000..4ac4ec21
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_ltisys.py
@@ -0,0 +1,1290 @@
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
+                           assert_, suppress_warnings)
+from pytest import raises as assert_raises
+
+from scipy.signal import (ss2tf, tf2ss, lsim2, impulse2, step2, lti,
+                          dlti, bode, freqresp, lsim, impulse, step,
+                          abcd_normalize, place_poles,
+                          TransferFunction, StateSpace, ZerosPolesGain)
+from scipy.signal._filter_design import BadCoefficients
+import scipy.linalg as linalg
+from scipy.sparse._sputils import matrix
+
+
+def _assert_poles_close(P1,P2, rtol=1e-8, atol=1e-8):
+    """
+    Check each pole in P1 is close to a pole in P2 with a 1e-8
+    relative tolerance or 1e-8 absolute tolerance (useful for zero poles).
+    These tolerances are very strict but the systems tested are known to
+    accept these poles so we should not be far from what is requested.
+    """
+    P2 = P2.copy()
+    for p1 in P1:
+        found = False
+        for p2_idx in range(P2.shape[0]):
+            if np.allclose([np.real(p1), np.imag(p1)],
+                           [np.real(P2[p2_idx]), np.imag(P2[p2_idx])],
+                           rtol, atol):
+                found = True
+                np.delete(P2, p2_idx)
+                break
+        if not found:
+            raise ValueError("Can't find pole " + str(p1) + " in " + str(P2))
+
+
+class TestPlacePoles:
+
+    def _check(self, A, B, P, **kwargs):
+        """
+        Perform the most common tests on the poles computed by place_poles
+        and return the Bunch object for further specific tests
+        """
+        fsf = place_poles(A, B, P, **kwargs)
+        expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix))
+        _assert_poles_close(expected, fsf.requested_poles)
+        _assert_poles_close(expected, fsf.computed_poles)
+        _assert_poles_close(P,fsf.requested_poles)
+        return fsf
+
+    def test_real(self):
+        # Test real pole placement using KNV and YT0 algorithm and example 1 in
+        # section 4 of the reference publication (see place_poles docstring)
+        A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0,
+                      0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273,
+                      1.343, -2.104]).reshape(4, 4)
+        B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2)
+        P = np.array([-0.2, -0.5, -5.0566, -8.6659])
+
+        # Check that both KNV and YT compute correct K matrix
+        self._check(A, B, P, method='KNV0')
+        self._check(A, B, P, method='YT')
+
+        # Try to reach the specific case in _YT_real where two singular
+        # values are almost equal. This is to improve code coverage but I
+        # have no way to be sure this code is really reached
+
+        # on some architectures this can lead to a RuntimeWarning invalid
+        # value in divide (see gh-7590), so suppress it for now
+        with np.errstate(invalid='ignore'):
+            self._check(A, B, (2,2,3,3))
+
+    def test_complex(self):
+        # Test complex pole placement on a linearized car model, taken from L.
+        # Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE
+        # editions p 184/185
+        A = np.array([[0, 7, 0, 0],
+                      [0, 0, 0, 7/3.],
+                      [0, 0, 0, 0],
+                      [0, 0, 0, 0]])
+        B = np.array([[0, 0],
+                      [0, 0],
+                      [1, 0],
+                      [0, 1]])
+        # Test complex poles on YT
+        P = np.array([-3, -1, -2-1j, -2+1j])
+        # on macOS arm64 this can lead to a RuntimeWarning invalid
+        # value in divide, so suppress it for now
+        with np.errstate(divide='ignore', invalid='ignore'):
+            self._check(A, B, P)
+
+        # Try to reach the specific case in _YT_complex where two singular
+        # values are almost equal. This is to improve code coverage but I
+        # have no way to be sure this code is really reached
+
+        P = [0-1e-6j,0+1e-6j,-10,10]
+        with np.errstate(divide='ignore', invalid='ignore'):
+            self._check(A, B, P, maxiter=1000)
+
+        # Try to reach the specific case in _YT_complex where the rank two
+        # update yields two null vectors. This test was found via Monte Carlo.
+
+        A = np.array(
+                    [-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546,
+                   -167, -754, -2285, -543, -1700, -584, -2978, -925, -1300,
+                   -1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709,
+                   -291, -338, -153, -1804, -1106, -1168, -867, -2297]
+                   ).reshape(6,6)
+
+        B = np.array(
+                    [-108, -374, -524, -1285, -1232, -161, -1204, -672, -637,
+                     -15, -483, -23, -931, -780, -1245, -1129, -1290, -1502,
+                     -952, -1374, -62, -964, -930, -939, -792, -756, -1437,
+                     -491, -1543, -686]
+                     ).reshape(6,5)
+        P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j]
+        self._check(A, B, P)
+
+        # Use a lot of poles to go through all cases for update_order
+        # in _YT_loop
+
+        big_A = np.ones((11,11))-np.eye(11)
+        big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:]
+        big_A[:6,:6] = A
+        big_B[:6,:5] = B
+
+        P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j]
+        with np.errstate(divide='ignore', invalid='ignore'):
+            self._check(big_A, big_B, P)
+
+        #check with only complex poles and only real poles
+        P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100]
+        self._check(big_A[:-1,:-1], big_B[:-1,:-1], P)
+        P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j,
+             -10-10j,-20-20j,-30-30j,-40-40j,-50-50j]
+        self._check(big_A[:-1,:-1], big_B[:-1,:-1], P)
+
+        # need a 5x5 array to ensure YT handles properly when there
+        # is only one real pole and several complex
+        A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0,
+                      0,0,0,5,0,0,0,0,9]).reshape(5,5)
+        B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2)
+        P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j])
+        with np.errstate(divide='ignore', invalid='ignore'):
+            place_poles(A, B, P)
+
+        # same test with an odd number of real poles > 1
+        # this is another specific case of YT
+        P = np.array([-2, -3, -4, -1+1j, -1-1j])
+        with np.errstate(divide='ignore', invalid='ignore'):
+            self._check(A, B, P)
+
+    def test_tricky_B(self):
+        # check we handle as we should the 1 column B matrices and
+        # n column B matrices (with n such as shape(A)=(n, n))
+        A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0,
+                      0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273,
+                      1.343, -2.104]).reshape(4, 4)
+        B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4,
+                      5, 6, 7, 8]).reshape(4, 4)
+
+        # KNV or YT are not called here, it's a specific case with only
+        # one unique solution
+        P = np.array([-0.2, -0.5, -5.0566, -8.6659])
+        fsf = self._check(A, B, P)
+        # rtol and nb_iter should be set to np.nan as the identity can be
+        # used as transfer matrix
+        assert_equal(fsf.rtol, np.nan)
+        assert_equal(fsf.nb_iter, np.nan)
+
+        # check with complex poles too as they trigger a specific case in
+        # the specific case :-)
+        P = np.array((-2+1j,-2-1j,-3,-2))
+        fsf = self._check(A, B, P)
+        assert_equal(fsf.rtol, np.nan)
+        assert_equal(fsf.nb_iter, np.nan)
+
+        #now test with a B matrix with only one column (no optimisation)
+        B = B[:,0].reshape(4,1)
+        P = np.array((-2+1j,-2-1j,-3,-2))
+        fsf = self._check(A, B, P)
+
+        #  we can't optimize anything, check they are set to 0 as expected
+        assert_equal(fsf.rtol, 0)
+        assert_equal(fsf.nb_iter, 0)
+
+    def test_errors(self):
+        # Test input mistakes from user
+        A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4)
+        B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2)
+
+        #should fail as the method keyword is invalid
+        assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
+                      method="foo")
+
+        #should fail as poles are not 1D array
+        assert_raises(ValueError, place_poles, A, B,
+                      np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1))
+
+        #should fail as A is not a 2D array
+        assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B,
+                      (-2.1,-2.2,-2.3,-2.4))
+
+        #should fail as B is not a 2D array
+        assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis],
+                      (-2.1,-2.2,-2.3,-2.4))
+
+        #should fail as there are too many poles
+        assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3))
+
+        #should fail as there are not enough poles
+        assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3))
+
+        #should fail as the rtol is greater than 1
+        assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
+                      rtol=42)
+
+        #should fail as maxiter is smaller than 1
+        assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4),
+                      maxiter=-42)
+
+        # should fail as ndim(B) is two
+        assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2))
+
+        #unctrollable system
+        assert_raises(ValueError, place_poles, np.ones((4,4)),
+                      np.ones((4,2)), (1,2,3,4))
+
+        # Should not raise ValueError as the poles can be placed but should
+        # raise a warning as the convergence is not reached
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42)
+            assert_(len(w) == 1)
+            assert_(issubclass(w[-1].category, UserWarning))
+            assert_("Convergence was not reached after maxiter iterations"
+                    in str(w[-1].message))
+            assert_equal(fsf.nb_iter, 42)
+
+        # should fail as a complex misses its conjugate
+        assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2))
+
+        # should fail as A is not square
+        assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5))
+
+        # should fail as B has not the same number of lines as A
+        assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5))
+
+        # should fail as KNV0 does not support complex poles
+        assert_raises(ValueError, place_poles, A, B,
+                      (-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0")
+
+
+class TestSS2TF:
+
+    def check_matrix_shapes(self, p, q, r):
+        ss2tf(np.zeros((p, p)),
+              np.zeros((p, q)),
+              np.zeros((r, p)),
+              np.zeros((r, q)), 0)
+
+    def test_shapes(self):
+        # Each tuple holds:
+        #   number of states, number of inputs, number of outputs
+        for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]:
+            self.check_matrix_shapes(p, q, r)
+
+    def test_basic(self):
+        # Test a round trip through tf2ss and ss2tf.
+        b = np.array([1.0, 3.0, 5.0])
+        a = np.array([1.0, 2.0, 3.0])
+
+        A, B, C, D = tf2ss(b, a)
+        assert_allclose(A, [[-2, -3], [1, 0]], rtol=1e-13)
+        assert_allclose(B, [[1], [0]], rtol=1e-13)
+        assert_allclose(C, [[1, 2]], rtol=1e-13)
+        assert_allclose(D, [[1]], rtol=1e-14)
+
+        bb, aa = ss2tf(A, B, C, D)
+        assert_allclose(bb[0], b, rtol=1e-13)
+        assert_allclose(aa, a, rtol=1e-13)
+
+    def test_zero_order_round_trip(self):
+        # See gh-5760
+        tf = (2, 1)
+        A, B, C, D = tf2ss(*tf)
+        assert_allclose(A, [[0]], rtol=1e-13)
+        assert_allclose(B, [[0]], rtol=1e-13)
+        assert_allclose(C, [[0]], rtol=1e-13)
+        assert_allclose(D, [[2]], rtol=1e-13)
+
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[2, 0]], rtol=1e-13)
+        assert_allclose(den, [1, 0], rtol=1e-13)
+
+        tf = ([[5], [2]], 1)
+        A, B, C, D = tf2ss(*tf)
+        assert_allclose(A, [[0]], rtol=1e-13)
+        assert_allclose(B, [[0]], rtol=1e-13)
+        assert_allclose(C, [[0], [0]], rtol=1e-13)
+        assert_allclose(D, [[5], [2]], rtol=1e-13)
+
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[5, 0], [2, 0]], rtol=1e-13)
+        assert_allclose(den, [1, 0], rtol=1e-13)
+
+    def test_simo_round_trip(self):
+        # See gh-5753
+        tf = ([[1, 2], [1, 1]], [1, 2])
+        A, B, C, D = tf2ss(*tf)
+        assert_allclose(A, [[-2]], rtol=1e-13)
+        assert_allclose(B, [[1]], rtol=1e-13)
+        assert_allclose(C, [[0], [-1]], rtol=1e-13)
+        assert_allclose(D, [[1], [1]], rtol=1e-13)
+
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[1, 2], [1, 1]], rtol=1e-13)
+        assert_allclose(den, [1, 2], rtol=1e-13)
+
+        tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1])
+        A, B, C, D = tf2ss(*tf)
+        assert_allclose(A, [[-1, -1], [1, 0]], rtol=1e-13)
+        assert_allclose(B, [[1], [0]], rtol=1e-13)
+        assert_allclose(C, [[-1, 0], [0, 0]], rtol=1e-13)
+        assert_allclose(D, [[1], [1]], rtol=1e-13)
+
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[1, 0, 1], [1, 1, 1]], rtol=1e-13)
+        assert_allclose(den, [1, 1, 1], rtol=1e-13)
+
+        tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4])
+        A, B, C, D = tf2ss(*tf)
+        assert_allclose(A, [[-2, -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13)
+        assert_allclose(B, [[1], [0], [0]], rtol=1e-13)
+        assert_allclose(C, [[1, 2, 3], [1, 2, 3]], rtol=1e-13)
+        assert_allclose(D, [[0], [0]], rtol=1e-13)
+
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[0, 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13)
+        assert_allclose(den, [1, 2, 3, 4], rtol=1e-13)
+
+        tf = (np.array([1, [2, 3]], dtype=object), [1, 6])
+        A, B, C, D = tf2ss(*tf)
+        assert_allclose(A, [[-6]], rtol=1e-31)
+        assert_allclose(B, [[1]], rtol=1e-31)
+        assert_allclose(C, [[1], [-9]], rtol=1e-31)
+        assert_allclose(D, [[0], [2]], rtol=1e-31)
+
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[0, 1], [2, 3]], rtol=1e-13)
+        assert_allclose(den, [1, 6], rtol=1e-13)
+
+        tf = (np.array([[1, -3], [1, 2, 3]], dtype=object), [1, 6, 5])
+        A, B, C, D = tf2ss(*tf)
+        assert_allclose(A, [[-6, -5], [1, 0]], rtol=1e-13)
+        assert_allclose(B, [[1], [0]], rtol=1e-13)
+        assert_allclose(C, [[1, -3], [-4, -2]], rtol=1e-13)
+        assert_allclose(D, [[0], [1]], rtol=1e-13)
+
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[0, 1, -3], [1, 2, 3]], rtol=1e-13)
+        assert_allclose(den, [1, 6, 5], rtol=1e-13)
+
+    def test_all_int_arrays(self):
+        A = [[0, 1, 0], [0, 0, 1], [-3, -4, -2]]
+        B = [[0], [0], [1]]
+        C = [[5, 1, 0]]
+        D = [[0]]
+        num, den = ss2tf(A, B, C, D)
+        assert_allclose(num, [[0.0, 0.0, 1.0, 5.0]], rtol=1e-13, atol=1e-14)
+        assert_allclose(den, [1.0, 2.0, 4.0, 3.0], rtol=1e-13)
+
+    def test_multioutput(self):
+        # Regression test for gh-2669.
+
+        # 4 states
+        A = np.array([[-1.0, 0.0, 1.0, 0.0],
+                      [-1.0, 0.0, 2.0, 0.0],
+                      [-4.0, 0.0, 3.0, 0.0],
+                      [-8.0, 8.0, 0.0, 4.0]])
+
+        # 1 input
+        B = np.array([[0.3],
+                      [0.0],
+                      [7.0],
+                      [0.0]])
+
+        # 3 outputs
+        C = np.array([[0.0, 1.0, 0.0, 0.0],
+                      [0.0, 0.0, 0.0, 1.0],
+                      [8.0, 8.0, 0.0, 0.0]])
+
+        D = np.array([[0.0],
+                      [0.0],
+                      [1.0]])
+
+        # Get the transfer functions for all the outputs in one call.
+        b_all, a = ss2tf(A, B, C, D)
+
+        # Get the transfer functions for each output separately.
+        b0, a0 = ss2tf(A, B, C[0], D[0])
+        b1, a1 = ss2tf(A, B, C[1], D[1])
+        b2, a2 = ss2tf(A, B, C[2], D[2])
+
+        # Check that we got the same results.
+        assert_allclose(a0, a, rtol=1e-13)
+        assert_allclose(a1, a, rtol=1e-13)
+        assert_allclose(a2, a, rtol=1e-13)
+        assert_allclose(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14)
+
+
+class TestLsim:
+    def lti_nowarn(self, *args):
+        with suppress_warnings() as sup:
+            sup.filter(BadCoefficients)
+            system = lti(*args)
+        return system
+
+    def test_first_order(self):
+        # y' = -y
+        # exact solution is y(t) = exp(-t)
+        system = self.lti_nowarn(-1.,1.,1.,0.)
+        t = np.linspace(0,5)
+        u = np.zeros_like(t)
+        tout, y, x = lsim(system, u, t, X0=[1.0])
+        expected_x = np.exp(-tout)
+        assert_almost_equal(x, expected_x)
+        assert_almost_equal(y, expected_x)
+
+    def test_integrator(self):
+        # integrator: y' = u
+        system = self.lti_nowarn(0., 1., 1., 0.)
+        t = np.linspace(0,5)
+        u = t
+        tout, y, x = lsim(system, u, t)
+        expected_x = 0.5 * tout**2
+        assert_almost_equal(x, expected_x)
+        assert_almost_equal(y, expected_x)
+
+    def test_double_integrator(self):
+        # double integrator: y'' = 2u
+        A = matrix([[0., 1.], [0., 0.]])
+        B = matrix([[0.], [1.]])
+        C = matrix([[2., 0.]])
+        system = self.lti_nowarn(A, B, C, 0.)
+        t = np.linspace(0,5)
+        u = np.ones_like(t)
+        tout, y, x = lsim(system, u, t)
+        expected_x = np.transpose(np.array([0.5 * tout**2, tout]))
+        expected_y = tout**2
+        assert_almost_equal(x, expected_x)
+        assert_almost_equal(y, expected_y)
+
+    def test_jordan_block(self):
+        # Non-diagonalizable A matrix
+        #   x1' + x1 = x2
+        #   x2' + x2 = u
+        #   y = x1
+        # Exact solution with u = 0 is y(t) = t exp(-t)
+        A = matrix([[-1., 1.], [0., -1.]])
+        B = matrix([[0.], [1.]])
+        C = matrix([[1., 0.]])
+        system = self.lti_nowarn(A, B, C, 0.)
+        t = np.linspace(0,5)
+        u = np.zeros_like(t)
+        tout, y, x = lsim(system, u, t, X0=[0.0, 1.0])
+        expected_y = tout * np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_miso(self):
+        # A system with two state variables, two inputs, and one output.
+        A = np.array([[-1.0, 0.0], [0.0, -2.0]])
+        B = np.array([[1.0, 0.0], [0.0, 1.0]])
+        C = np.array([1.0, 0.0])
+        D = np.zeros((1,2))
+        system = self.lti_nowarn(A, B, C, D)
+
+        t = np.linspace(0, 5.0, 101)
+        u = np.zeros_like(t)
+        tout, y, x = lsim(system, u, t, X0=[1.0, 1.0])
+        expected_y = np.exp(-tout)
+        expected_x0 = np.exp(-tout)
+        expected_x1 = np.exp(-2.0*tout)
+        assert_almost_equal(y, expected_y)
+        assert_almost_equal(x[:,0], expected_x0)
+        assert_almost_equal(x[:,1], expected_x1)
+
+    def test_nonzero_initial_time(self):
+        system = self.lti_nowarn(-1.,1.,1.,0.)
+        t = np.linspace(1,2)
+        u = np.zeros_like(t)
+        tout, y, x = lsim(system, u, t, X0=[1.0])
+        expected_y = np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+
+class Test_lsim2:
+
+    def test_01(self):
+        t = np.linspace(0,10,1001)
+        u = np.zeros_like(t)
+        # First order system: x'(t) + x(t) = u(t), x(0) = 1.
+        # Exact solution is x(t) = exp(-t).
+        system = ([1.0],[1.0,1.0])
+        tout, y, x = lsim2(system, u, t, X0=[1.0])
+        expected_x = np.exp(-tout)
+        assert_almost_equal(x[:,0], expected_x)
+
+    def test_02(self):
+        t = np.array([0.0, 1.0, 1.0, 3.0])
+        u = np.array([0.0, 0.0, 1.0, 1.0])
+        # Simple integrator: x'(t) = u(t)
+        system = ([1.0],[1.0,0.0])
+        tout, y, x = lsim2(system, u, t, X0=[1.0])
+        expected_x = np.maximum(1.0, tout)
+        assert_almost_equal(x[:,0], expected_x)
+
+    def test_03(self):
+        t = np.array([0.0, 1.0, 1.0, 1.1, 1.1, 2.0])
+        u = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0])
+        # Simple integrator:  x'(t) = u(t)
+        system = ([1.0],[1.0, 0.0])
+        tout, y, x = lsim2(system, u, t, hmax=0.01)
+        expected_x = np.array([0.0, 0.0, 0.0, 0.1, 0.1, 0.1])
+        assert_almost_equal(x[:,0], expected_x)
+
+    def test_04(self):
+        t = np.linspace(0, 10, 1001)
+        u = np.zeros_like(t)
+        # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0.
+        # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution
+        # is (1-t)*exp(-t).
+        system = ([1.0], [1.0, 2.0, 1.0])
+        tout, y, x = lsim2(system, u, t, X0=[1.0, 0.0])
+        expected_x = (1.0 - tout) * np.exp(-tout)
+        assert_almost_equal(x[:,0], expected_x)
+
+    def test_05(self):
+        # The call to lsim2 triggers a "BadCoefficients" warning from
+        # scipy.signal._filter_design, but the test passes.  I think the warning
+        # is related to the incomplete handling of multi-input systems in
+        # scipy.signal.
+
+        # A system with two state variables, two inputs, and one output.
+        A = np.array([[-1.0, 0.0], [0.0, -2.0]])
+        B = np.array([[1.0, 0.0], [0.0, 1.0]])
+        C = np.array([1.0, 0.0])
+        D = np.zeros((1, 2))
+
+        t = np.linspace(0, 10.0, 101)
+        with suppress_warnings() as sup:
+            sup.filter(BadCoefficients)
+            tout, y, x = lsim2((A,B,C,D), T=t, X0=[1.0, 1.0])
+        expected_y = np.exp(-tout)
+        expected_x0 = np.exp(-tout)
+        expected_x1 = np.exp(-2.0 * tout)
+        assert_almost_equal(y, expected_y)
+        assert_almost_equal(x[:,0], expected_x0)
+        assert_almost_equal(x[:,1], expected_x1)
+
+    def test_06(self):
+        # Test use of the default values of the arguments `T` and `U`.
+        # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0.
+        # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution
+        # is (1-t)*exp(-t).
+        system = ([1.0], [1.0, 2.0, 1.0])
+        tout, y, x = lsim2(system, X0=[1.0, 0.0])
+        expected_x = (1.0 - tout) * np.exp(-tout)
+        assert_almost_equal(x[:,0], expected_x)
+
+
+class _TestImpulseFuncs:
+    # Common tests for impulse/impulse2 (= self.func)
+
+    def test_01(self):
+        # First order system: x'(t) + x(t) = u(t)
+        # Exact impulse response is x(t) = exp(-t).
+        system = ([1.0], [1.0,1.0])
+        tout, y = self.func(system)
+        expected_y = np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_02(self):
+        # Specify the desired time values for the output.
+
+        # First order system: x'(t) + x(t) = u(t)
+        # Exact impulse response is x(t) = exp(-t).
+        system = ([1.0], [1.0,1.0])
+        n = 21
+        t = np.linspace(0, 2.0, n)
+        tout, y = self.func(system, T=t)
+        assert_equal(tout.shape, (n,))
+        assert_almost_equal(tout, t)
+        expected_y = np.exp(-t)
+        assert_almost_equal(y, expected_y)
+
+    def test_03(self):
+        # Specify an initial condition as a scalar.
+
+        # First order system: x'(t) + x(t) = u(t), x(0)=3.0
+        # Exact impulse response is x(t) = 4*exp(-t).
+        system = ([1.0], [1.0,1.0])
+        tout, y = self.func(system, X0=3.0)
+        expected_y = 4.0 * np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_04(self):
+        # Specify an initial condition as a list.
+
+        # First order system: x'(t) + x(t) = u(t), x(0)=3.0
+        # Exact impulse response is x(t) = 4*exp(-t).
+        system = ([1.0], [1.0,1.0])
+        tout, y = self.func(system, X0=[3.0])
+        expected_y = 4.0 * np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_05(self):
+        # Simple integrator: x'(t) = u(t)
+        system = ([1.0], [1.0,0.0])
+        tout, y = self.func(system)
+        expected_y = np.ones_like(tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_06(self):
+        # Second order system with a repeated root:
+        #     x''(t) + 2*x(t) + x(t) = u(t)
+        # The exact impulse response is t*exp(-t).
+        system = ([1.0], [1.0, 2.0, 1.0])
+        tout, y = self.func(system)
+        expected_y = tout * np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_array_like(self):
+        # Test that function can accept sequences, scalars.
+        system = ([1.0], [1.0, 2.0, 1.0])
+        # TODO: add meaningful test where X0 is a list
+        tout, y = self.func(system, X0=[3], T=[5, 6])
+        tout, y = self.func(system, X0=[3], T=[5])
+
+    def test_array_like2(self):
+        system = ([1.0], [1.0, 2.0, 1.0])
+        tout, y = self.func(system, X0=3, T=5)
+
+
+class TestImpulse2(_TestImpulseFuncs):
+    def setup_method(self):
+        self.func = impulse2
+
+
+class TestImpulse(_TestImpulseFuncs):
+    def setup_method(self):
+        self.func = impulse
+
+
+class _TestStepFuncs:
+    def test_01(self):
+        # First order system: x'(t) + x(t) = u(t)
+        # Exact step response is x(t) = 1 - exp(-t).
+        system = ([1.0], [1.0,1.0])
+        tout, y = self.func(system)
+        expected_y = 1.0 - np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_02(self):
+        # Specify the desired time values for the output.
+
+        # First order system: x'(t) + x(t) = u(t)
+        # Exact step response is x(t) = 1 - exp(-t).
+        system = ([1.0], [1.0,1.0])
+        n = 21
+        t = np.linspace(0, 2.0, n)
+        tout, y = self.func(system, T=t)
+        assert_equal(tout.shape, (n,))
+        assert_almost_equal(tout, t)
+        expected_y = 1 - np.exp(-t)
+        assert_almost_equal(y, expected_y)
+
+    def test_03(self):
+        # Specify an initial condition as a scalar.
+
+        # First order system: x'(t) + x(t) = u(t), x(0)=3.0
+        # Exact step response is x(t) = 1 + 2*exp(-t).
+        system = ([1.0], [1.0,1.0])
+        tout, y = self.func(system, X0=3.0)
+        expected_y = 1 + 2.0*np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_04(self):
+        # Specify an initial condition as a list.
+
+        # First order system: x'(t) + x(t) = u(t), x(0)=3.0
+        # Exact step response is x(t) = 1 + 2*exp(-t).
+        system = ([1.0], [1.0,1.0])
+        tout, y = self.func(system, X0=[3.0])
+        expected_y = 1 + 2.0*np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_05(self):
+        # Simple integrator: x'(t) = u(t)
+        # Exact step response is x(t) = t.
+        system = ([1.0],[1.0,0.0])
+        tout, y = self.func(system)
+        expected_y = tout
+        assert_almost_equal(y, expected_y)
+
+    def test_06(self):
+        # Second order system with a repeated root:
+        #     x''(t) + 2*x(t) + x(t) = u(t)
+        # The exact step response is 1 - (1 + t)*exp(-t).
+        system = ([1.0], [1.0, 2.0, 1.0])
+        tout, y = self.func(system)
+        expected_y = 1 - (1 + tout) * np.exp(-tout)
+        assert_almost_equal(y, expected_y)
+
+    def test_array_like(self):
+        # Test that function can accept sequences, scalars.
+        system = ([1.0], [1.0, 2.0, 1.0])
+        # TODO: add meaningful test where X0 is a list
+        tout, y = self.func(system, T=[5, 6])
+
+
+class TestStep2(_TestStepFuncs):
+    def setup_method(self):
+        self.func = step2
+
+    def test_05(self):
+        # This test is almost the same as the one it overwrites in the base
+        # class.  The only difference is the tolerances passed to step2:
+        # the default tolerances are not accurate enough for this test
+
+        # Simple integrator: x'(t) = u(t)
+        # Exact step response is x(t) = t.
+        system = ([1.0], [1.0,0.0])
+        tout, y = self.func(system, atol=1e-10, rtol=1e-8)
+        expected_y = tout
+        assert_almost_equal(y, expected_y)
+
+
+class TestStep(_TestStepFuncs):
+    def setup_method(self):
+        self.func = step
+
+    def test_complex_input(self):
+        # Test that complex input doesn't raise an error.
+        # `step` doesn't seem to have been designed for complex input, but this
+        # works and may be used, so add regression test.  See gh-2654.
+        step(([], [-1], 1+0j))
+
+
+class TestLti:
+    def test_lti_instantiation(self):
+        # Test that lti can be instantiated with sequences, scalars.
+        # See PR-225.
+
+        # TransferFunction
+        s = lti([1], [-1])
+        assert_(isinstance(s, TransferFunction))
+        assert_(isinstance(s, lti))
+        assert_(not isinstance(s, dlti))
+        assert_(s.dt is None)
+
+        # ZerosPolesGain
+        s = lti(np.array([]), np.array([-1]), 1)
+        assert_(isinstance(s, ZerosPolesGain))
+        assert_(isinstance(s, lti))
+        assert_(not isinstance(s, dlti))
+        assert_(s.dt is None)
+
+        # StateSpace
+        s = lti([], [-1], 1)
+        s = lti([1], [-1], 1, 3)
+        assert_(isinstance(s, StateSpace))
+        assert_(isinstance(s, lti))
+        assert_(not isinstance(s, dlti))
+        assert_(s.dt is None)
+
+
+class TestStateSpace:
+    def test_initialization(self):
+        # Check that all initializations work
+        StateSpace(1, 1, 1, 1)
+        StateSpace([1], [2], [3], [4])
+        StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
+                   np.array([[1, 0]]), np.array([[0]]))
+
+    def test_conversion(self):
+        # Check the conversion functions
+        s = StateSpace(1, 2, 3, 4)
+        assert_(isinstance(s.to_ss(), StateSpace))
+        assert_(isinstance(s.to_tf(), TransferFunction))
+        assert_(isinstance(s.to_zpk(), ZerosPolesGain))
+
+        # Make sure copies work
+        assert_(StateSpace(s) is not s)
+        assert_(s.to_ss() is not s)
+
+    def test_properties(self):
+        # Test setters/getters for cross class properties.
+        # This implicitly tests to_tf() and to_zpk()
+
+        # Getters
+        s = StateSpace(1, 1, 1, 1)
+        assert_equal(s.poles, [1])
+        assert_equal(s.zeros, [0])
+        assert_(s.dt is None)
+
+    def test_operators(self):
+        # Test +/-/* operators on systems
+
+        class BadType:
+            pass
+
+        s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]),
+                        np.array([[1], [0]]),
+                        np.array([[1, 0]]),
+                        np.array([[0]]),
+                        )
+
+        s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]),
+                        np.array([[1], [0]]),
+                        np.array([[1, 0]]),
+                        np.array([[0]])
+                        )
+
+        s_discrete = s1.to_discrete(0.1)
+        s2_discrete = s2.to_discrete(0.2)
+        s3_discrete = s2.to_discrete(0.1)
+
+        # Impulse response
+        t = np.linspace(0, 1, 100)
+        u = np.zeros_like(t)
+        u[0] = 1
+
+        # Test multiplication
+        for typ in (int, float, complex, np.float32, np.complex128, np.array):
+            assert_allclose(lsim(typ(2) * s1, U=u, T=t)[1],
+                            typ(2) * lsim(s1, U=u, T=t)[1])
+
+            assert_allclose(lsim(s1 * typ(2), U=u, T=t)[1],
+                            lsim(s1, U=u, T=t)[1] * typ(2))
+
+            assert_allclose(lsim(s1 / typ(2), U=u, T=t)[1],
+                            lsim(s1, U=u, T=t)[1] / typ(2))
+
+            with assert_raises(TypeError):
+                typ(2) / s1
+
+        assert_allclose(lsim(s1 * 2, U=u, T=t)[1],
+                        lsim(s1, U=2 * u, T=t)[1])
+
+        assert_allclose(lsim(s1 * s2, U=u, T=t)[1],
+                        lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1],
+                        atol=1e-5)
+
+        with assert_raises(TypeError):
+            s1 / s1
+
+        with assert_raises(TypeError):
+            s1 * s_discrete
+
+        with assert_raises(TypeError):
+            # Check different discretization constants
+            s_discrete * s2_discrete
+
+        with assert_raises(TypeError):
+            s1 * BadType()
+
+        with assert_raises(TypeError):
+            BadType() * s1
+
+        with assert_raises(TypeError):
+            s1 / BadType()
+
+        with assert_raises(TypeError):
+            BadType() / s1
+
+        # Test addition
+        assert_allclose(lsim(s1 + 2, U=u, T=t)[1],
+                        2 * u + lsim(s1, U=u, T=t)[1])
+
+        # Check for dimension mismatch
+        with assert_raises(ValueError):
+            s1 + np.array([1, 2])
+
+        with assert_raises(ValueError):
+            np.array([1, 2]) + s1
+
+        with assert_raises(TypeError):
+            s1 + s_discrete
+
+        with assert_raises(ValueError):
+            s1 / np.array([[1, 2], [3, 4]])
+
+        with assert_raises(TypeError):
+            # Check different discretization constants
+            s_discrete + s2_discrete
+
+        with assert_raises(TypeError):
+            s1 + BadType()
+
+        with assert_raises(TypeError):
+            BadType() + s1
+
+        assert_allclose(lsim(s1 + s2, U=u, T=t)[1],
+                        lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1])
+
+        # Test subtraction
+        assert_allclose(lsim(s1 - 2, U=u, T=t)[1],
+                        -2 * u + lsim(s1, U=u, T=t)[1])
+
+        assert_allclose(lsim(2 - s1, U=u, T=t)[1],
+                        2 * u + lsim(-s1, U=u, T=t)[1])
+
+        assert_allclose(lsim(s1 - s2, U=u, T=t)[1],
+                        lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1])
+
+        with assert_raises(TypeError):
+            s1 - BadType()
+
+        with assert_raises(TypeError):
+            BadType() - s1
+
+        s = s_discrete + s3_discrete
+        assert_(s.dt == 0.1)
+
+        s = s_discrete * s3_discrete
+        assert_(s.dt == 0.1)
+
+        s = 3 * s_discrete
+        assert_(s.dt == 0.1)
+
+        s = -s_discrete
+        assert_(s.dt == 0.1)
+
+class TestTransferFunction:
+    def test_initialization(self):
+        # Check that all initializations work
+        TransferFunction(1, 1)
+        TransferFunction([1], [2])
+        TransferFunction(np.array([1]), np.array([2]))
+
+    def test_conversion(self):
+        # Check the conversion functions
+        s = TransferFunction([1, 0], [1, -1])
+        assert_(isinstance(s.to_ss(), StateSpace))
+        assert_(isinstance(s.to_tf(), TransferFunction))
+        assert_(isinstance(s.to_zpk(), ZerosPolesGain))
+
+        # Make sure copies work
+        assert_(TransferFunction(s) is not s)
+        assert_(s.to_tf() is not s)
+
+    def test_properties(self):
+        # Test setters/getters for cross class properties.
+        # This implicitly tests to_ss() and to_zpk()
+
+        # Getters
+        s = TransferFunction([1, 0], [1, -1])
+        assert_equal(s.poles, [1])
+        assert_equal(s.zeros, [0])
+
+
+class TestZerosPolesGain:
+    def test_initialization(self):
+        # Check that all initializations work
+        ZerosPolesGain(1, 1, 1)
+        ZerosPolesGain([1], [2], 1)
+        ZerosPolesGain(np.array([1]), np.array([2]), 1)
+
+    def test_conversion(self):
+        #Check the conversion functions
+        s = ZerosPolesGain(1, 2, 3)
+        assert_(isinstance(s.to_ss(), StateSpace))
+        assert_(isinstance(s.to_tf(), TransferFunction))
+        assert_(isinstance(s.to_zpk(), ZerosPolesGain))
+
+        # Make sure copies work
+        assert_(ZerosPolesGain(s) is not s)
+        assert_(s.to_zpk() is not s)
+
+
+class Test_abcd_normalize:
+    def setup_method(self):
+        self.A = np.array([[1.0, 2.0], [3.0, 4.0]])
+        self.B = np.array([[-1.0], [5.0]])
+        self.C = np.array([[4.0, 5.0]])
+        self.D = np.array([[2.5]])
+
+    def test_no_matrix_fails(self):
+        assert_raises(ValueError, abcd_normalize)
+
+    def test_A_nosquare_fails(self):
+        assert_raises(ValueError, abcd_normalize, [1, -1],
+                      self.B, self.C, self.D)
+
+    def test_AB_mismatch_fails(self):
+        assert_raises(ValueError, abcd_normalize, self.A, [-1, 5],
+                      self.C, self.D)
+
+    def test_AC_mismatch_fails(self):
+        assert_raises(ValueError, abcd_normalize, self.A, self.B,
+                      [[4.0], [5.0]], self.D)
+
+    def test_CD_mismatch_fails(self):
+        assert_raises(ValueError, abcd_normalize, self.A, self.B,
+                      self.C, [2.5, 0])
+
+    def test_BD_mismatch_fails(self):
+        assert_raises(ValueError, abcd_normalize, self.A, [-1, 5],
+                      self.C, self.D)
+
+    def test_normalized_matrices_unchanged(self):
+        A, B, C, D = abcd_normalize(self.A, self.B, self.C, self.D)
+        assert_equal(A, self.A)
+        assert_equal(B, self.B)
+        assert_equal(C, self.C)
+        assert_equal(D, self.D)
+
+    def test_shapes(self):
+        A, B, C, D = abcd_normalize(self.A, self.B, [1, 0], 0)
+        assert_equal(A.shape[0], A.shape[1])
+        assert_equal(A.shape[0], B.shape[0])
+        assert_equal(A.shape[0], C.shape[1])
+        assert_equal(C.shape[0], D.shape[0])
+        assert_equal(B.shape[1], D.shape[1])
+
+    def test_zero_dimension_is_not_none1(self):
+        B_ = np.zeros((2, 0))
+        D_ = np.zeros((0, 0))
+        A, B, C, D = abcd_normalize(A=self.A, B=B_, D=D_)
+        assert_equal(A, self.A)
+        assert_equal(B, B_)
+        assert_equal(D, D_)
+        assert_equal(C.shape[0], D_.shape[0])
+        assert_equal(C.shape[1], self.A.shape[0])
+
+    def test_zero_dimension_is_not_none2(self):
+        B_ = np.zeros((2, 0))
+        C_ = np.zeros((0, 2))
+        A, B, C, D = abcd_normalize(A=self.A, B=B_, C=C_)
+        assert_equal(A, self.A)
+        assert_equal(B, B_)
+        assert_equal(C, C_)
+        assert_equal(D.shape[0], C_.shape[0])
+        assert_equal(D.shape[1], B_.shape[1])
+
+    def test_missing_A(self):
+        A, B, C, D = abcd_normalize(B=self.B, C=self.C, D=self.D)
+        assert_equal(A.shape[0], A.shape[1])
+        assert_equal(A.shape[0], B.shape[0])
+        assert_equal(A.shape, (self.B.shape[0], self.B.shape[0]))
+
+    def test_missing_B(self):
+        A, B, C, D = abcd_normalize(A=self.A, C=self.C, D=self.D)
+        assert_equal(B.shape[0], A.shape[0])
+        assert_equal(B.shape[1], D.shape[1])
+        assert_equal(B.shape, (self.A.shape[0], self.D.shape[1]))
+
+    def test_missing_C(self):
+        A, B, C, D = abcd_normalize(A=self.A, B=self.B, D=self.D)
+        assert_equal(C.shape[0], D.shape[0])
+        assert_equal(C.shape[1], A.shape[0])
+        assert_equal(C.shape, (self.D.shape[0], self.A.shape[0]))
+
+    def test_missing_D(self):
+        A, B, C, D = abcd_normalize(A=self.A, B=self.B, C=self.C)
+        assert_equal(D.shape[0], C.shape[0])
+        assert_equal(D.shape[1], B.shape[1])
+        assert_equal(D.shape, (self.C.shape[0], self.B.shape[1]))
+
+    def test_missing_AB(self):
+        A, B, C, D = abcd_normalize(C=self.C, D=self.D)
+        assert_equal(A.shape[0], A.shape[1])
+        assert_equal(A.shape[0], B.shape[0])
+        assert_equal(B.shape[1], D.shape[1])
+        assert_equal(A.shape, (self.C.shape[1], self.C.shape[1]))
+        assert_equal(B.shape, (self.C.shape[1], self.D.shape[1]))
+
+    def test_missing_AC(self):
+        A, B, C, D = abcd_normalize(B=self.B, D=self.D)
+        assert_equal(A.shape[0], A.shape[1])
+        assert_equal(A.shape[0], B.shape[0])
+        assert_equal(C.shape[0], D.shape[0])
+        assert_equal(C.shape[1], A.shape[0])
+        assert_equal(A.shape, (self.B.shape[0], self.B.shape[0]))
+        assert_equal(C.shape, (self.D.shape[0], self.B.shape[0]))
+
+    def test_missing_AD(self):
+        A, B, C, D = abcd_normalize(B=self.B, C=self.C)
+        assert_equal(A.shape[0], A.shape[1])
+        assert_equal(A.shape[0], B.shape[0])
+        assert_equal(D.shape[0], C.shape[0])
+        assert_equal(D.shape[1], B.shape[1])
+        assert_equal(A.shape, (self.B.shape[0], self.B.shape[0]))
+        assert_equal(D.shape, (self.C.shape[0], self.B.shape[1]))
+
+    def test_missing_BC(self):
+        A, B, C, D = abcd_normalize(A=self.A, D=self.D)
+        assert_equal(B.shape[0], A.shape[0])
+        assert_equal(B.shape[1], D.shape[1])
+        assert_equal(C.shape[0], D.shape[0])
+        assert_equal(C.shape[1], A.shape[0])
+        assert_equal(B.shape, (self.A.shape[0], self.D.shape[1]))
+        assert_equal(C.shape, (self.D.shape[0], self.A.shape[0]))
+
+    def test_missing_ABC_fails(self):
+        assert_raises(ValueError, abcd_normalize, D=self.D)
+
+    def test_missing_BD_fails(self):
+        assert_raises(ValueError, abcd_normalize, A=self.A, C=self.C)
+
+    def test_missing_CD_fails(self):
+        assert_raises(ValueError, abcd_normalize, A=self.A, B=self.B)
+
+
+class Test_bode:
+
+    def test_01(self):
+        # Test bode() magnitude calculation (manual sanity check).
+        # 1st order low-pass filter: H(s) = 1 / (s + 1),
+        # cutoff: 1 rad/s, slope: -20 dB/decade
+        #   H(s=0.1) ~= 0 dB
+        #   H(s=1) ~= -3 dB
+        #   H(s=10) ~= -20 dB
+        #   H(s=100) ~= -40 dB
+        system = lti([1], [1, 1])
+        w = [0.1, 1, 10, 100]
+        w, mag, phase = bode(system, w=w)
+        expected_mag = [0, -3, -20, -40]
+        assert_almost_equal(mag, expected_mag, decimal=1)
+
+    def test_02(self):
+        # Test bode() phase calculation (manual sanity check).
+        # 1st order low-pass filter: H(s) = 1 / (s + 1),
+        #   angle(H(s=0.1)) ~= -5.7 deg
+        #   angle(H(s=1)) ~= -45 deg
+        #   angle(H(s=10)) ~= -84.3 deg
+        system = lti([1], [1, 1])
+        w = [0.1, 1, 10]
+        w, mag, phase = bode(system, w=w)
+        expected_phase = [-5.7, -45, -84.3]
+        assert_almost_equal(phase, expected_phase, decimal=1)
+
+    def test_03(self):
+        # Test bode() magnitude calculation.
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        system = lti([1], [1, 1])
+        w = [0.1, 1, 10, 100]
+        w, mag, phase = bode(system, w=w)
+        jw = w * 1j
+        y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
+        expected_mag = 20.0 * np.log10(abs(y))
+        assert_almost_equal(mag, expected_mag)
+
+    def test_04(self):
+        # Test bode() phase calculation.
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        system = lti([1], [1, 1])
+        w = [0.1, 1, 10, 100]
+        w, mag, phase = bode(system, w=w)
+        jw = w * 1j
+        y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
+        expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi
+        assert_almost_equal(phase, expected_phase)
+
+    def test_05(self):
+        # Test that bode() finds a reasonable frequency range.
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        system = lti([1], [1, 1])
+        n = 10
+        # Expected range is from 0.01 to 10.
+        expected_w = np.logspace(-2, 1, n)
+        w, mag, phase = bode(system, n=n)
+        assert_almost_equal(w, expected_w)
+
+    def test_06(self):
+        # Test that bode() doesn't fail on a system with a pole at 0.
+        # integrator, pole at zero: H(s) = 1 / s
+        system = lti([1], [1, 0])
+        w, mag, phase = bode(system, n=2)
+        assert_equal(w[0], 0.01)  # a fail would give not-a-number
+
+    def test_07(self):
+        # bode() should not fail on a system with pure imaginary poles.
+        # The test passes if bode doesn't raise an exception.
+        system = lti([1], [1, 0, 100])
+        w, mag, phase = bode(system, n=2)
+
+    def test_08(self):
+        # Test that bode() return continuous phase, issues/2331.
+        system = lti([], [-10, -30, -40, -60, -70], 1)
+        w, mag, phase = system.bode(w=np.logspace(-3, 40, 100))
+        assert_almost_equal(min(phase), -450, decimal=15)
+
+    def test_from_state_space(self):
+        # Ensure that bode works with a system that was created from the
+        # state space representation matrices A, B, C, D.  In this case,
+        # system.num will be a 2-D array with shape (1, n+1), where (n,n)
+        # is the shape of A.
+        # A Butterworth lowpass filter is used, so we know the exact
+        # frequency response.
+        a = np.array([1.0, 2.0, 2.0, 1.0])
+        A = linalg.companion(a).T
+        B = np.array([[0.0], [0.0], [1.0]])
+        C = np.array([[1.0, 0.0, 0.0]])
+        D = np.array([[0.0]])
+        with suppress_warnings() as sup:
+            sup.filter(BadCoefficients)
+            system = lti(A, B, C, D)
+            w, mag, phase = bode(system, n=100)
+
+        expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6)))
+        assert_almost_equal(mag, expected_magnitude)
+
+
+class Test_freqresp:
+
+    def test_output_manual(self):
+        # Test freqresp() output calculation (manual sanity check).
+        # 1st order low-pass filter: H(s) = 1 / (s + 1),
+        #   re(H(s=0.1)) ~= 0.99
+        #   re(H(s=1)) ~= 0.5
+        #   re(H(s=10)) ~= 0.0099
+        system = lti([1], [1, 1])
+        w = [0.1, 1, 10]
+        w, H = freqresp(system, w=w)
+        expected_re = [0.99, 0.5, 0.0099]
+        expected_im = [-0.099, -0.5, -0.099]
+        assert_almost_equal(H.real, expected_re, decimal=1)
+        assert_almost_equal(H.imag, expected_im, decimal=1)
+
+    def test_output(self):
+        # Test freqresp() output calculation.
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        system = lti([1], [1, 1])
+        w = [0.1, 1, 10, 100]
+        w, H = freqresp(system, w=w)
+        s = w * 1j
+        expected = np.polyval(system.num, s) / np.polyval(system.den, s)
+        assert_almost_equal(H.real, expected.real)
+        assert_almost_equal(H.imag, expected.imag)
+
+    def test_freq_range(self):
+        # Test that freqresp() finds a reasonable frequency range.
+        # 1st order low-pass filter: H(s) = 1 / (s + 1)
+        # Expected range is from 0.01 to 10.
+        system = lti([1], [1, 1])
+        n = 10
+        expected_w = np.logspace(-2, 1, n)
+        w, H = freqresp(system, n=n)
+        assert_almost_equal(w, expected_w)
+
+    def test_pole_zero(self):
+        # Test that freqresp() doesn't fail on a system with a pole at 0.
+        # integrator, pole at zero: H(s) = 1 / s
+        system = lti([1], [1, 0])
+        w, H = freqresp(system, n=2)
+        assert_equal(w[0], 0.01)  # a fail would give not-a-number
+
+    def test_from_state_space(self):
+        # Ensure that freqresp works with a system that was created from the
+        # state space representation matrices A, B, C, D.  In this case,
+        # system.num will be a 2-D array with shape (1, n+1), where (n,n) is
+        # the shape of A.
+        # A Butterworth lowpass filter is used, so we know the exact
+        # frequency response.
+        a = np.array([1.0, 2.0, 2.0, 1.0])
+        A = linalg.companion(a).T
+        B = np.array([[0.0],[0.0],[1.0]])
+        C = np.array([[1.0, 0.0, 0.0]])
+        D = np.array([[0.0]])
+        with suppress_warnings() as sup:
+            sup.filter(BadCoefficients)
+            system = lti(A, B, C, D)
+            w, H = freqresp(system, n=100)
+        s = w * 1j
+        expected = (1.0 / (1.0 + 2*s + 2*s**2 + s**3))
+        assert_almost_equal(H.real, expected.real)
+        assert_almost_equal(H.imag, expected.imag)
+
+    def test_from_zpk(self):
+        # 4th order low-pass filter: H(s) = 1 / (s + 1)
+        system = lti([],[-1]*4,[1])
+        w = [0.1, 1, 10, 100]
+        w, H = freqresp(system, w=w)
+        s = w * 1j
+        expected = 1 / (s + 1)**4
+        assert_almost_equal(H.real, expected.real)
+        assert_almost_equal(H.imag, expected.imag)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_max_len_seq.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_max_len_seq.py
new file mode 100644
index 00000000..c4e79969
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_max_len_seq.py
@@ -0,0 +1,65 @@
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+from pytest import raises as assert_raises
+
+from numpy.fft import fft, ifft
+
+from scipy.signal import max_len_seq
+
+
+class TestMLS:
+
+    def test_mls_inputs(self):
+        # can't all be zero state
+        assert_raises(ValueError, max_len_seq,
+                      10, state=np.zeros(10))
+        # wrong size state
+        assert_raises(ValueError, max_len_seq, 10,
+                      state=np.ones(3))
+        # wrong length
+        assert_raises(ValueError, max_len_seq, 10, length=-1)
+        assert_array_equal(max_len_seq(10, length=0)[0], [])
+        # unknown taps
+        assert_raises(ValueError, max_len_seq, 64)
+        # bad taps
+        assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
+
+    def test_mls_output(self):
+        # define some alternate working taps
+        alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
+                    8: [7, 5, 3]}
+        # assume the other bit levels work, too slow to test higher orders...
+        for nbits in range(2, 8):
+            for state in [None, np.round(np.random.rand(nbits))]:
+                for taps in [None, alt_taps[nbits]]:
+                    if state is not None and np.all(state == 0):
+                        state[0] = 1  # they can't all be zero
+                    orig_m = max_len_seq(nbits, state=state,
+                                         taps=taps)[0]
+                    m = 2. * orig_m - 1.  # convert to +/- 1 representation
+                    # First, make sure we got all 1's or -1
+                    err_msg = "mls had non binary terms"
+                    assert_array_equal(np.abs(m), np.ones_like(m),
+                                       err_msg=err_msg)
+                    # Test via circular cross-correlation, which is just mult.
+                    # in the frequency domain with one signal conjugated
+                    tester = np.real(ifft(fft(m) * np.conj(fft(m))))
+                    out_len = 2**nbits - 1
+                    # impulse amplitude == test_len
+                    err_msg = "mls impulse has incorrect value"
+                    assert_allclose(tester[0], out_len, err_msg=err_msg)
+                    # steady-state is -1
+                    err_msg = "mls steady-state has incorrect value"
+                    assert_allclose(tester[1:], np.full(out_len - 1, -1),
+                                    err_msg=err_msg)
+                    # let's do the split thing using a couple options
+                    for n in (1, 2**(nbits - 1)):
+                        m1, s1 = max_len_seq(nbits, state=state, taps=taps,
+                                             length=n)
+                        m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
+                                             length=1)
+                        m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
+                                             length=out_len - n - 1)
+                        new_m = np.concatenate((m1, m2, m3))
+                        assert_array_equal(orig_m, new_m)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_peak_finding.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_peak_finding.py
new file mode 100644
index 00000000..c8b523f5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_peak_finding.py
@@ -0,0 +1,887 @@
+import copy
+
+import numpy as np
+from numpy.testing import (
+    assert_,
+    assert_equal,
+    assert_allclose,
+    assert_array_equal
+)
+import pytest
+from pytest import raises, warns
+
+from scipy.signal._peak_finding import (
+    argrelmax,
+    argrelmin,
+    peak_prominences,
+    peak_widths,
+    _unpack_condition_args,
+    find_peaks,
+    find_peaks_cwt,
+    _identify_ridge_lines
+)
+from scipy.signal.windows import gaussian
+from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning
+
+
+def _gen_gaussians(center_locs, sigmas, total_length):
+    xdata = np.arange(0, total_length).astype(float)
+    out_data = np.zeros(total_length, dtype=float)
+    for ind, sigma in enumerate(sigmas):
+        tmp = (xdata - center_locs[ind]) / sigma
+        out_data += np.exp(-(tmp**2))
+    return out_data
+
+
+def _gen_gaussians_even(sigmas, total_length):
+    num_peaks = len(sigmas)
+    delta = total_length / (num_peaks + 1)
+    center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int)
+    out_data = _gen_gaussians(center_locs, sigmas, total_length)
+    return out_data, center_locs
+
+
+def _gen_ridge_line(start_locs, max_locs, length, distances, gaps):
+    """
+    Generate coordinates for a ridge line.
+
+    Will be a series of coordinates, starting a start_loc (length 2).
+    The maximum distance between any adjacent columns will be
+    `max_distance`, the max distance between adjacent rows
+    will be `map_gap'.
+
+    `max_locs` should be the size of the intended matrix. The
+    ending coordinates are guaranteed to be less than `max_locs`,
+    although they may not approach `max_locs` at all.
+    """
+
+    def keep_bounds(num, max_val):
+        out = max(num, 0)
+        out = min(out, max_val)
+        return out
+
+    gaps = copy.deepcopy(gaps)
+    distances = copy.deepcopy(distances)
+
+    locs = np.zeros([length, 2], dtype=int)
+    locs[0, :] = start_locs
+    total_length = max_locs[0] - start_locs[0] - sum(gaps)
+    if total_length < length:
+        raise ValueError('Cannot generate ridge line according to constraints')
+    dist_int = length / len(distances) - 1
+    gap_int = length / len(gaps) - 1
+    for ind in range(1, length):
+        nextcol = locs[ind - 1, 1]
+        nextrow = locs[ind - 1, 0] + 1
+        if (ind % dist_int == 0) and (len(distances) > 0):
+            nextcol += ((-1)**ind)*distances.pop()
+        if (ind % gap_int == 0) and (len(gaps) > 0):
+            nextrow += gaps.pop()
+        nextrow = keep_bounds(nextrow, max_locs[0])
+        nextcol = keep_bounds(nextcol, max_locs[1])
+        locs[ind, :] = [nextrow, nextcol]
+
+    return [locs[:, 0], locs[:, 1]]
+
+
+class TestLocalMaxima1d:
+
+    def test_empty(self):
+        """Test with empty signal."""
+        x = np.array([], dtype=np.float64)
+        for array in _local_maxima_1d(x):
+            assert_equal(array, np.array([]))
+            assert_(array.base is None)
+
+    def test_linear(self):
+        """Test with linear signal."""
+        x = np.linspace(0, 100)
+        for array in _local_maxima_1d(x):
+            assert_equal(array, np.array([]))
+            assert_(array.base is None)
+
+    def test_simple(self):
+        """Test with simple signal."""
+        x = np.linspace(-10, 10, 50)
+        x[2::3] += 1
+        expected = np.arange(2, 50, 3)
+        for array in _local_maxima_1d(x):
+            # For plateaus of size 1, the edges are identical with the
+            # midpoints
+            assert_equal(array, expected)
+            assert_(array.base is None)
+
+    def test_flat_maxima(self):
+        """Test if flat maxima are detected correctly."""
+        x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10,
+                      -5, -5, -5, -5, -5, -10])
+        midpoints, left_edges, right_edges = _local_maxima_1d(x)
+        assert_equal(midpoints, np.array([2, 4, 8, 12, 18]))
+        assert_equal(left_edges, np.array([2, 4, 7, 11, 16]))
+        assert_equal(right_edges, np.array([2, 5, 9, 14, 20]))
+
+    @pytest.mark.parametrize('x', [
+        np.array([1., 0, 2]),
+        np.array([3., 3, 0, 4, 4]),
+        np.array([5., 5, 5, 0, 6, 6, 6]),
+    ])
+    def test_signal_edges(self, x):
+        """Test if behavior on signal edges is correct."""
+        for array in _local_maxima_1d(x):
+            assert_equal(array, np.array([]))
+            assert_(array.base is None)
+
+    def test_exceptions(self):
+        """Test input validation and raised exceptions."""
+        with raises(ValueError, match="wrong number of dimensions"):
+            _local_maxima_1d(np.ones((1, 1)))
+        with raises(ValueError, match="expected 'const float64_t'"):
+            _local_maxima_1d(np.ones(1, dtype=int))
+        with raises(TypeError, match="list"):
+            _local_maxima_1d([1., 2.])
+        with raises(TypeError, match="'x' must not be None"):
+            _local_maxima_1d(None)
+
+
+class TestRidgeLines:
+
+    def test_empty(self):
+        test_matr = np.zeros([20, 100])
+        lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
+        assert_(len(lines) == 0)
+
+    def test_minimal(self):
+        test_matr = np.zeros([20, 100])
+        test_matr[0, 10] = 1
+        lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
+        assert_(len(lines) == 1)
+
+        test_matr = np.zeros([20, 100])
+        test_matr[0:2, 10] = 1
+        lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
+        assert_(len(lines) == 1)
+
+    def test_single_pass(self):
+        distances = [0, 1, 2, 5]
+        gaps = [0, 1, 2, 0, 1]
+        test_matr = np.zeros([20, 50]) + 1e-12
+        length = 12
+        line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
+        test_matr[line[0], line[1]] = 1
+        max_distances = np.full(20, max(distances))
+        identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
+        assert_array_equal(identified_lines, [line])
+
+    def test_single_bigdist(self):
+        distances = [0, 1, 2, 5]
+        gaps = [0, 1, 2, 4]
+        test_matr = np.zeros([20, 50])
+        length = 12
+        line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
+        test_matr[line[0], line[1]] = 1
+        max_dist = 3
+        max_distances = np.full(20, max_dist)
+        #This should get 2 lines, since the distance is too large
+        identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
+        assert_(len(identified_lines) == 2)
+
+        for iline in identified_lines:
+            adists = np.diff(iline[1])
+            np.testing.assert_array_less(np.abs(adists), max_dist)
+
+            agaps = np.diff(iline[0])
+            np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
+
+    def test_single_biggap(self):
+        distances = [0, 1, 2, 5]
+        max_gap = 3
+        gaps = [0, 4, 2, 1]
+        test_matr = np.zeros([20, 50])
+        length = 12
+        line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
+        test_matr[line[0], line[1]] = 1
+        max_dist = 6
+        max_distances = np.full(20, max_dist)
+        #This should get 2 lines, since the gap is too large
+        identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
+        assert_(len(identified_lines) == 2)
+
+        for iline in identified_lines:
+            adists = np.diff(iline[1])
+            np.testing.assert_array_less(np.abs(adists), max_dist)
+
+            agaps = np.diff(iline[0])
+            np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
+
+    def test_single_biggaps(self):
+        distances = [0]
+        max_gap = 1
+        gaps = [3, 6]
+        test_matr = np.zeros([50, 50])
+        length = 30
+        line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
+        test_matr[line[0], line[1]] = 1
+        max_dist = 1
+        max_distances = np.full(50, max_dist)
+        #This should get 3 lines, since the gaps are too large
+        identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
+        assert_(len(identified_lines) == 3)
+
+        for iline in identified_lines:
+            adists = np.diff(iline[1])
+            np.testing.assert_array_less(np.abs(adists), max_dist)
+
+            agaps = np.diff(iline[0])
+            np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
+
+
+class TestArgrel:
+
+    def test_empty(self):
+        # Regression test for gh-2832.
+        # When there are no relative extrema, make sure that
+        # the number of empty arrays returned matches the
+        # dimension of the input.
+
+        empty_array = np.array([], dtype=int)
+
+        z1 = np.zeros(5)
+
+        i = argrelmin(z1)
+        assert_equal(len(i), 1)
+        assert_array_equal(i[0], empty_array)
+
+        z2 = np.zeros((3,5))
+
+        row, col = argrelmin(z2, axis=0)
+        assert_array_equal(row, empty_array)
+        assert_array_equal(col, empty_array)
+
+        row, col = argrelmin(z2, axis=1)
+        assert_array_equal(row, empty_array)
+        assert_array_equal(col, empty_array)
+
+    def test_basic(self):
+        # Note: the docstrings for the argrel{min,max,extrema} functions
+        # do not give a guarantee of the order of the indices, so we'll
+        # sort them before testing.
+
+        x = np.array([[1, 2, 2, 3, 2],
+                      [2, 1, 2, 2, 3],
+                      [3, 2, 1, 2, 2],
+                      [2, 3, 2, 1, 2],
+                      [1, 2, 3, 2, 1]])
+
+        row, col = argrelmax(x, axis=0)
+        order = np.argsort(row)
+        assert_equal(row[order], [1, 2, 3])
+        assert_equal(col[order], [4, 0, 1])
+
+        row, col = argrelmax(x, axis=1)
+        order = np.argsort(row)
+        assert_equal(row[order], [0, 3, 4])
+        assert_equal(col[order], [3, 1, 2])
+
+        row, col = argrelmin(x, axis=0)
+        order = np.argsort(row)
+        assert_equal(row[order], [1, 2, 3])
+        assert_equal(col[order], [1, 2, 3])
+
+        row, col = argrelmin(x, axis=1)
+        order = np.argsort(row)
+        assert_equal(row[order], [1, 2, 3])
+        assert_equal(col[order], [1, 2, 3])
+
+    def test_highorder(self):
+        order = 2
+        sigmas = [1.0, 2.0, 10.0, 5.0, 15.0]
+        test_data, act_locs = _gen_gaussians_even(sigmas, 500)
+        test_data[act_locs + order] = test_data[act_locs]*0.99999
+        test_data[act_locs - order] = test_data[act_locs]*0.99999
+        rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0]
+
+        assert_(len(rel_max_locs) == len(act_locs))
+        assert_((rel_max_locs == act_locs).all())
+
+    def test_2d_gaussians(self):
+        sigmas = [1.0, 2.0, 10.0]
+        test_data, act_locs = _gen_gaussians_even(sigmas, 100)
+        rot_factor = 20
+        rot_range = np.arange(0, len(test_data)) - rot_factor
+        test_data_2 = np.vstack([test_data, test_data[rot_range]])
+        rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1)
+
+        for rw in range(0, test_data_2.shape[0]):
+            inds = (rel_max_rows == rw)
+
+            assert_(len(rel_max_cols[inds]) == len(act_locs))
+            assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all())
+
+
+class TestPeakProminences:
+
+    def test_empty(self):
+        """
+        Test if an empty array is returned if no peaks are provided.
+        """
+        out = peak_prominences([1, 2, 3], [])
+        for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
+            assert_(arr.size == 0)
+            assert_(arr.dtype == dtype)
+
+        out = peak_prominences([], [])
+        for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
+            assert_(arr.size == 0)
+            assert_(arr.dtype == dtype)
+
+    def test_basic(self):
+        """
+        Test if height of prominences is correctly calculated in signal with
+        rising baseline (peak widths are 1 sample).
+        """
+        # Prepare basic signal
+        x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1])
+        peaks = np.array([1, 2, 4, 6])
+        lbases = np.array([0, 0, 0, 5])
+        rbases = np.array([3, 3, 5, 7])
+        proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0)
+        # Test if calculation matches handcrafted result
+        out = peak_prominences(x, peaks)
+        assert_equal(out[0], proms)
+        assert_equal(out[1], lbases)
+        assert_equal(out[2], rbases)
+
+    def test_edge_cases(self):
+        """
+        Test edge cases.
+        """
+        # Peaks have same height, prominence and bases
+        x = [0, 2, 1, 2, 1, 2, 0]
+        peaks = [1, 3, 5]
+        proms, lbases, rbases = peak_prominences(x, peaks)
+        assert_equal(proms, [2, 2, 2])
+        assert_equal(lbases, [0, 0, 0])
+        assert_equal(rbases, [6, 6, 6])
+
+        # Peaks have same height & prominence but different bases
+        x = [0, 1, 0, 1, 0, 1, 0]
+        peaks = np.array([1, 3, 5])
+        proms, lbases, rbases = peak_prominences(x, peaks)
+        assert_equal(proms, [1, 1, 1])
+        assert_equal(lbases, peaks - 1)
+        assert_equal(rbases, peaks + 1)
+
+    def test_non_contiguous(self):
+        """
+        Test with non-C-contiguous input arrays.
+        """
+        x = np.repeat([-9, 9, 9, 0, 3, 1], 2)
+        peaks = np.repeat([1, 2, 4], 2)
+        proms, lbases, rbases = peak_prominences(x[::2], peaks[::2])
+        assert_equal(proms, [9, 9, 2])
+        assert_equal(lbases, [0, 0, 3])
+        assert_equal(rbases, [3, 3, 5])
+
+    def test_wlen(self):
+        """
+        Test if wlen actually shrinks the evaluation range correctly.
+        """
+        x = [0, 1, 2, 3, 1, 0, -1]
+        peak = [3]
+        # Test rounding behavior of wlen
+        assert_equal(peak_prominences(x, peak), [3., 0, 6])
+        for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]:
+            assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i])
+
+    def test_exceptions(self):
+        """
+        Verify that exceptions and warnings are raised.
+        """
+        # x with dimension > 1
+        with raises(ValueError, match='1-D array'):
+            peak_prominences([[0, 1, 1, 0]], [1, 2])
+        # peaks with dimension > 1
+        with raises(ValueError, match='1-D array'):
+            peak_prominences([0, 1, 1, 0], [[1, 2]])
+        # x with dimension < 1
+        with raises(ValueError, match='1-D array'):
+            peak_prominences(3, [0,])
+
+        # empty x with supplied
+        with raises(ValueError, match='not a valid index'):
+            peak_prominences([], [0])
+        # invalid indices with non-empty x
+        for p in [-100, -1, 3, 1000]:
+            with raises(ValueError, match='not a valid index'):
+                peak_prominences([1, 0, 2], [p])
+
+        # peaks is not cast-able to np.intp
+        with raises(TypeError, match='cannot safely cast'):
+            peak_prominences([0, 1, 1, 0], [1.1, 2.3])
+
+        # wlen < 3
+        with raises(ValueError, match='wlen'):
+            peak_prominences(np.arange(10), [3, 5], wlen=1)
+
+    def test_warnings(self):
+        """
+        Verify that appropriate warnings are raised.
+        """
+        msg = "some peaks have a prominence of 0"
+        for p in [0, 1, 2]:
+            with warns(PeakPropertyWarning, match=msg):
+                peak_prominences([1, 0, 2], [p,])
+        with warns(PeakPropertyWarning, match=msg):
+            peak_prominences([0, 1, 1, 1, 0], [2], wlen=2)
+
+
+class TestPeakWidths:
+
+    def test_empty(self):
+        """
+        Test if an empty array is returned if no peaks are provided.
+        """
+        widths = peak_widths([], [])[0]
+        assert_(isinstance(widths, np.ndarray))
+        assert_equal(widths.size, 0)
+        widths = peak_widths([1, 2, 3], [])[0]
+        assert_(isinstance(widths, np.ndarray))
+        assert_equal(widths.size, 0)
+        out = peak_widths([], [])
+        for arr in out:
+            assert_(isinstance(arr, np.ndarray))
+            assert_equal(arr.size, 0)
+
+    @pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
+    def test_basic(self):
+        """
+        Test a simple use case with easy to verify results at different relative
+        heights.
+        """
+        x = np.array([1, 0, 1, 2, 1, 0, -1])
+        prominence = 2
+        for rel_height, width_true, lip_true, rip_true in [
+            (0., 0., 3., 3.),  # raises warning
+            (0.25, 1., 2.5, 3.5),
+            (0.5, 2., 2., 4.),
+            (0.75, 3., 1.5, 4.5),
+            (1., 4., 1., 5.),
+            (2., 5., 1., 6.),
+            (3., 5., 1., 6.)
+        ]:
+            width_calc, height, lip_calc, rip_calc = peak_widths(
+                x, [3], rel_height)
+            assert_allclose(width_calc, width_true)
+            assert_allclose(height, 2 - rel_height * prominence)
+            assert_allclose(lip_calc, lip_true)
+            assert_allclose(rip_calc, rip_true)
+
+    def test_non_contiguous(self):
+        """
+        Test with non-C-contiguous input arrays.
+        """
+        x = np.repeat([0, 100, 50], 4)
+        peaks = np.repeat([1], 3)
+        result = peak_widths(x[::4], peaks[::3])
+        assert_equal(result, [0.75, 75, 0.75, 1.5])
+
+    def test_exceptions(self):
+        """
+        Verify that argument validation works as intended.
+        """
+        with raises(ValueError, match='1-D array'):
+            # x with dimension > 1
+            peak_widths(np.zeros((3, 4)), np.ones(3))
+        with raises(ValueError, match='1-D array'):
+            # x with dimension < 1
+            peak_widths(3, [0])
+        with raises(ValueError, match='1-D array'):
+            # peaks with dimension > 1
+            peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
+        with raises(ValueError, match='1-D array'):
+            # peaks with dimension < 1
+            peak_widths(np.arange(10), 3)
+        with raises(ValueError, match='not a valid index'):
+            # peak pos exceeds x.size
+            peak_widths(np.arange(10), [8, 11])
+        with raises(ValueError, match='not a valid index'):
+            # empty x with peaks supplied
+            peak_widths([], [1, 2])
+        with raises(TypeError, match='cannot safely cast'):
+            # peak cannot be safely casted to intp
+            peak_widths(np.arange(10), [1.1, 2.3])
+        with raises(ValueError, match='rel_height'):
+            # rel_height is < 0
+            peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1)
+        with raises(TypeError, match='None'):
+            # prominence data contains None
+            peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
+
+    def test_warnings(self):
+        """
+        Verify that appropriate warnings are raised.
+        """
+        msg = "some peaks have a width of 0"
+        with warns(PeakPropertyWarning, match=msg):
+            # Case: rel_height is 0
+            peak_widths([0, 1, 0], [1], rel_height=0)
+        with warns(PeakPropertyWarning, match=msg):
+            # Case: prominence is 0 and bases are identical
+            peak_widths(
+                [0, 1, 1, 1, 0], [2],
+                prominence_data=(np.array([0.], np.float64),
+                                 np.array([2], np.intp),
+                                 np.array([2], np.intp))
+            )
+
+    def test_mismatching_prominence_data(self):
+        """Test with mismatching peak and / or prominence data."""
+        x = [0, 1, 0]
+        peak = [1]
+        for i, (prominences, left_bases, right_bases) in enumerate([
+            ((1.,), (-1,), (2,)),  # left base not in x
+            ((1.,), (0,), (3,)),  # right base not in x
+            ((1.,), (2,), (0,)),  # swapped bases same as peak
+            ((1., 1.), (0, 0), (2, 2)),  # array shapes don't match peaks
+            ((1., 1.), (0,), (2,)),  # arrays with different shapes
+            ((1.,), (0, 0), (2,)),  # arrays with different shapes
+            ((1.,), (0,), (2, 2))  # arrays with different shapes
+        ]):
+            # Make sure input is matches output of signal.peak_prominences
+            prominence_data = (np.array(prominences, dtype=np.float64),
+                               np.array(left_bases, dtype=np.intp),
+                               np.array(right_bases, dtype=np.intp))
+            # Test for correct exception
+            if i < 3:
+                match = "prominence data is invalid for peak"
+            else:
+                match = "arrays in `prominence_data` must have the same shape"
+            with raises(ValueError, match=match):
+                peak_widths(x, peak, prominence_data=prominence_data)
+
+    @pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
+    def test_intersection_rules(self):
+        """Test if x == eval_height counts as an intersection."""
+        # Flatt peak with two possible intersection points if evaluated at 1
+        x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
+        # relative height is 0 -> width is 0 as well, raises warning
+        assert_allclose(peak_widths(x, peaks=[5], rel_height=0),
+                        [(0.,), (3.,), (5.,), (5.,)])
+        # width_height == x counts as intersection -> nearest 1 is chosen
+        assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3),
+                        [(4.,), (1.,), (3.,), (7.,)])
+
+
+def test_unpack_condition_args():
+    """
+    Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
+    """
+    x = np.arange(10)
+    amin_true = x
+    amax_true = amin_true + 10
+    peaks = amin_true[1::2]
+
+    # Test unpacking with None or interval
+    assert_((None, None) == _unpack_condition_args((None, None), x, peaks))
+    assert_((1, None) == _unpack_condition_args(1, x, peaks))
+    assert_((1, None) == _unpack_condition_args((1, None), x, peaks))
+    assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks))
+    assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks))
+
+    # Test if borders are correctly reduced with `peaks`
+    amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
+    assert_equal(amin_calc, amin_true[peaks])
+    assert_equal(amax_calc, amax_true[peaks])
+
+    # Test raises if array borders don't match x
+    with raises(ValueError, match="array size of lower"):
+        _unpack_condition_args(amin_true, np.arange(11), peaks)
+    with raises(ValueError, match="array size of upper"):
+        _unpack_condition_args((None, amin_true), np.arange(11), peaks)
+
+
+class TestFindPeaks:
+
+    # Keys of optionally returned properties
+    property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds',
+                     'prominences', 'left_bases', 'right_bases', 'widths',
+                     'width_heights', 'left_ips', 'right_ips'}
+
+    def test_constant(self):
+        """
+        Test behavior for signal without local maxima.
+        """
+        open_interval = (None, None)
+        peaks, props = find_peaks(np.ones(10),
+                                  height=open_interval, threshold=open_interval,
+                                  prominence=open_interval, width=open_interval)
+        assert_(peaks.size == 0)
+        for key in self.property_keys:
+            assert_(props[key].size == 0)
+
+    def test_plateau_size(self):
+        """
+        Test plateau size condition for peaks.
+        """
+        # Prepare signal with peaks with peak_height == plateau_size
+        plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111])
+        x = np.zeros(plateau_sizes.size * 2 + 1)
+        x[1::2] = plateau_sizes
+        repeats = np.ones(x.size, dtype=int)
+        repeats[1::2] = x[1::2]
+        x = np.repeat(x, repeats)
+
+        # Test full output
+        peaks, props = find_peaks(x, plateau_size=(None, None))
+        assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100])
+        assert_equal(props["plateau_sizes"], plateau_sizes)
+        assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2)
+        assert_equal(props["right_edges"], peaks + plateau_sizes // 2)
+
+        # Test conditions
+        assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100])
+        assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7])
+        assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33])
+
+    def test_height_condition(self):
+        """
+        Test height condition for peaks.
+        """
+        x = (0., 1/3, 0., 2.5, 0, 4., 0)
+        peaks, props = find_peaks(x, height=(None, None))
+        assert_equal(peaks, np.array([1, 3, 5]))
+        assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]))
+        assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]))
+        assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]))
+        assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]))
+
+    def test_threshold_condition(self):
+        """
+        Test threshold condition for peaks.
+        """
+        x = (0, 2, 1, 4, -1)
+        peaks, props = find_peaks(x, threshold=(None, None))
+        assert_equal(peaks, np.array([1, 3]))
+        assert_equal(props['left_thresholds'], np.array([2, 3]))
+        assert_equal(props['right_thresholds'], np.array([1, 5]))
+        assert_equal(find_peaks(x, threshold=2)[0], np.array([3]))
+        assert_equal(find_peaks(x, threshold=3.5)[0], np.array([]))
+        assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]))
+        assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]))
+        assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([]))
+
+    def test_distance_condition(self):
+        """
+        Test distance condition for peaks.
+        """
+        # Peaks of different height with constant distance 3
+        peaks_all = np.arange(1, 21, 3)
+        x = np.zeros(21)
+        x[peaks_all] += np.linspace(1, 2, peaks_all.size)
+
+        # Test if peaks with "minimal" distance are still selected (distance = 3)
+        assert_equal(find_peaks(x, distance=3)[0], peaks_all)
+
+        # Select every second peak (distance > 3)
+        peaks_subset = find_peaks(x, distance=3.0001)[0]
+        # Test if peaks_subset is subset of peaks_all
+        assert_(
+            np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0
+        )
+        # Test if every second peak was removed
+        assert_equal(np.diff(peaks_subset), 6)
+
+        # Test priority of peak removal
+        x = [-2, 1, -1, 0, -3]
+        peaks_subset = find_peaks(x, distance=10)[0]  # use distance > x size
+        assert_(peaks_subset.size == 1 and peaks_subset[0] == 1)
+
+    def test_prominence_condition(self):
+        """
+        Test prominence condition for peaks.
+        """
+        x = np.linspace(0, 10, 100)
+        peaks_true = np.arange(1, 99, 2)
+        offset = np.linspace(1, 10, peaks_true.size)
+        x[peaks_true] += offset
+        prominences = x[peaks_true] - x[peaks_true + 1]
+        interval = (3, 9)
+        keep = np.nonzero(
+            (interval[0] <= prominences) & (prominences <= interval[1]))
+
+        peaks_calc, properties = find_peaks(x, prominence=interval)
+        assert_equal(peaks_calc, peaks_true[keep])
+        assert_equal(properties['prominences'], prominences[keep])
+        assert_equal(properties['left_bases'], 0)
+        assert_equal(properties['right_bases'], peaks_true[keep] + 1)
+
+    def test_width_condition(self):
+        """
+        Test width condition for peaks.
+        """
+        x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0])
+        peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75)
+        assert_equal(peaks.size, 1)
+        assert_equal(peaks, 7)
+        assert_allclose(props['widths'], 1.35)
+        assert_allclose(props['width_heights'], 1.)
+        assert_allclose(props['left_ips'], 6.4)
+        assert_allclose(props['right_ips'], 7.75)
+
+    def test_properties(self):
+        """
+        Test returned properties.
+        """
+        open_interval = (None, None)
+        x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9]
+        peaks, props = find_peaks(x,
+                                  height=open_interval, threshold=open_interval,
+                                  prominence=open_interval, width=open_interval)
+        assert_(len(props) == len(self.property_keys))
+        for key in self.property_keys:
+            assert_(peaks.size == props[key].size)
+
+    def test_raises(self):
+        """
+        Test exceptions raised by function.
+        """
+        with raises(ValueError, match="1-D array"):
+            find_peaks(np.array(1))
+        with raises(ValueError, match="1-D array"):
+            find_peaks(np.ones((2, 2)))
+        with raises(ValueError, match="distance"):
+            find_peaks(np.arange(10), distance=-1)
+
+    @pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0",
+                                "ignore:some peaks have a width of 0")
+    def test_wlen_smaller_plateau(self):
+        """
+        Test behavior of prominence and width calculation if the given window
+        length is smaller than a peak's plateau size.
+
+        Regression test for gh-9110.
+        """
+        peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None),
+                                  width=(None, None), wlen=2)
+        assert_equal(peaks, 2)
+        assert_equal(props["prominences"], 0)
+        assert_equal(props["widths"], 0)
+        assert_equal(props["width_heights"], 1)
+        for key in ("left_bases", "right_bases", "left_ips", "right_ips"):
+            assert_equal(props[key], peaks)
+
+    @pytest.mark.parametrize("kwargs", [
+        {},
+        {"distance": 3.0},
+        {"prominence": (None, None)},
+        {"width": (None, 2)},
+
+    ])
+    def test_readonly_array(self, kwargs):
+        """
+        Test readonly arrays are accepted.
+        """
+        x = np.linspace(0, 10, 15)
+        x_readonly = x.copy()
+        x_readonly.flags.writeable = False
+
+        peaks, _ = find_peaks(x)
+        peaks_readonly, _ = find_peaks(x_readonly, **kwargs)
+
+        assert_allclose(peaks, peaks_readonly)
+
+
+class TestFindPeaksCwt:
+
+    def test_find_peaks_exact(self):
+        """
+        Generate a series of gaussians and attempt to find the peak locations.
+        """
+        sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
+        num_points = 500
+        test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
+        widths = np.arange(0.1, max(sigmas))
+        found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,
+                                         min_length=None)
+        np.testing.assert_array_equal(found_locs, act_locs,
+                        "Found maximum locations did not equal those expected")
+
+    def test_find_peaks_withnoise(self):
+        """
+        Verify that peak locations are (approximately) found
+        for a series of gaussians with added noise.
+        """
+        sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
+        num_points = 500
+        test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
+        widths = np.arange(0.1, max(sigmas))
+        noise_amp = 0.07
+        np.random.seed(18181911)
+        test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
+        found_locs = find_peaks_cwt(test_data, widths, min_length=15,
+                                         gap_thresh=1, min_snr=noise_amp / 5)
+
+        np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +
+                                'of peaks found than expected')
+        diffs = np.abs(found_locs - act_locs)
+        max_diffs = np.array(sigmas) / 5
+        np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +
+                                     'by more than %s' % (max_diffs))
+
+    def test_find_peaks_nopeak(self):
+        """
+        Verify that no peak is found in
+        data that's just noise.
+        """
+        noise_amp = 1.0
+        num_points = 100
+        np.random.seed(181819141)
+        test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)
+        widths = np.arange(10, 50)
+        found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)
+        np.testing.assert_equal(len(found_locs), 0)
+
+    def test_find_peaks_with_non_default_wavelets(self):
+        x = gaussian(200, 2)
+        widths = np.array([1, 2, 3, 4])
+        a = find_peaks_cwt(x, widths, wavelet=gaussian)
+
+        np.testing.assert_equal(np.array([100]), a)
+
+    def test_find_peaks_window_size(self):
+        """
+        Verify that window_size is passed correctly to private function and
+        affects the result.
+        """
+        sigmas = [2.0, 2.0]
+        num_points = 1000
+        test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
+        widths = np.arange(0.1, max(sigmas), 0.2)
+        noise_amp = 0.05
+        np.random.seed(18181911)
+        test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
+
+        # Possibly contrived negative region to throw off peak finding
+        # when window_size is too large
+        test_data[250:320] -= 1
+
+        found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
+                                    min_length=None, window_size=None)
+        with pytest.raises(AssertionError):
+            assert found_locs.size == act_locs.size
+
+        found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
+                                    min_length=None, window_size=20)
+        assert found_locs.size == act_locs.size
+
+    def test_find_peaks_with_one_width(self):
+        """
+        Verify that the `width` argument
+        in `find_peaks_cwt` can be a float
+        """
+        xs = np.arange(0, np.pi, 0.05)
+        test_data = np.sin(xs)
+        widths = 1
+        found_locs = find_peaks_cwt(test_data, widths)
+
+        np.testing.assert_equal(found_locs, 32)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_result_type.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_result_type.py
new file mode 100644
index 00000000..58fdd458
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_result_type.py
@@ -0,0 +1,52 @@
+# Regressions tests on result types of some signal functions
+
+import numpy as np
+from numpy.testing import assert_
+
+from scipy.signal import (decimate,
+                          lfilter_zi,
+                          lfiltic,
+                          sos2tf,
+                          sosfilt_zi)
+
+
+def test_decimate():
+    ones_f32 = np.ones(32, dtype=np.float32)
+    assert_(decimate(ones_f32, 2).dtype == np.float32)
+
+    ones_i64 = np.ones(32, dtype=np.int64)
+    assert_(decimate(ones_i64, 2).dtype == np.float64)
+    
+
+def test_lfilter_zi():
+    b_f32 = np.array([1, 2, 3], dtype=np.float32)
+    a_f32 = np.array([4, 5, 6], dtype=np.float32)
+    assert_(lfilter_zi(b_f32, a_f32).dtype == np.float32)
+
+
+def test_lfiltic():
+    # this would return f32 when given a mix of f32 / f64 args
+    b_f32 = np.array([1, 2, 3], dtype=np.float32)
+    a_f32 = np.array([4, 5, 6], dtype=np.float32)
+    x_f32 = np.ones(32, dtype=np.float32)
+    
+    b_f64 = b_f32.astype(np.float64)
+    a_f64 = a_f32.astype(np.float64)
+    x_f64 = x_f32.astype(np.float64)
+
+    assert_(lfiltic(b_f64, a_f32, x_f32).dtype == np.float64)
+    assert_(lfiltic(b_f32, a_f64, x_f32).dtype == np.float64)
+    assert_(lfiltic(b_f32, a_f32, x_f64).dtype == np.float64)
+    assert_(lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64)
+
+
+def test_sos2tf():
+    sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32)
+    b, a = sos2tf(sos_f32)
+    assert_(b.dtype == np.float32)
+    assert_(a.dtype == np.float32)
+
+
+def test_sosfilt_zi():
+    sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32)
+    assert_(sosfilt_zi(sos_f32).dtype == np.float32)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_savitzky_golay.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_savitzky_golay.py
new file mode 100644
index 00000000..fbbf370b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_savitzky_golay.py
@@ -0,0 +1,358 @@
+import pytest
+import numpy as np
+from numpy.testing import (assert_allclose, assert_equal,
+                           assert_almost_equal, assert_array_equal,
+                           assert_array_almost_equal)
+
+from scipy.ndimage import convolve1d
+
+from scipy.signal import savgol_coeffs, savgol_filter
+from scipy.signal._savitzky_golay import _polyder
+
+
+def check_polyder(p, m, expected):
+    dp = _polyder(p, m)
+    assert_array_equal(dp, expected)
+
+
+def test_polyder():
+    cases = [
+        ([5], 0, [5]),
+        ([5], 1, [0]),
+        ([3, 2, 1], 0, [3, 2, 1]),
+        ([3, 2, 1], 1, [6, 2]),
+        ([3, 2, 1], 2, [6]),
+        ([3, 2, 1], 3, [0]),
+        ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
+        ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
+        ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
+        ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
+    ]
+    for p, m, expected in cases:
+        check_polyder(np.array(p).T, m, np.array(expected).T)
+
+
+#--------------------------------------------------------------------
+# savgol_coeffs tests
+#--------------------------------------------------------------------
+
+def alt_sg_coeffs(window_length, polyorder, pos):
+    """This is an alternative implementation of the SG coefficients.
+
+    It uses numpy.polyfit and numpy.polyval. The results should be
+    equivalent to those of savgol_coeffs(), but this implementation
+    is slower.
+
+    window_length should be odd.
+
+    """
+    if pos is None:
+        pos = window_length // 2
+    t = np.arange(window_length)
+    unit = (t == pos).astype(int)
+    h = np.polyval(np.polyfit(t, unit, polyorder), t)
+    return h
+
+
+def test_sg_coeffs_trivial():
+    # Test a trivial case of savgol_coeffs: polyorder = window_length - 1
+    h = savgol_coeffs(1, 0)
+    assert_allclose(h, [1])
+
+    h = savgol_coeffs(3, 2)
+    assert_allclose(h, [0, 1, 0], atol=1e-10)
+
+    h = savgol_coeffs(5, 4)
+    assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
+
+    h = savgol_coeffs(5, 4, pos=1)
+    assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
+
+    h = savgol_coeffs(5, 4, pos=1, use='dot')
+    assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
+
+
+def compare_coeffs_to_alt(window_length, order):
+    # For the given window_length and order, compare the results
+    # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
+    # Also include pos=None.
+    for pos in [None] + list(range(window_length)):
+        h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
+        h2 = alt_sg_coeffs(window_length, order, pos=pos)
+        assert_allclose(h1, h2, atol=1e-10,
+                        err_msg=("window_length = %d, order = %d, pos = %s" %
+                                 (window_length, order, pos)))
+
+
+def test_sg_coeffs_compare():
+    # Compare savgol_coeffs() to alt_sg_coeffs().
+    for window_length in range(1, 8, 2):
+        for order in range(window_length):
+            compare_coeffs_to_alt(window_length, order)
+
+
+def test_sg_coeffs_exact():
+    polyorder = 4
+    window_length = 9
+    halflen = window_length // 2
+
+    x = np.linspace(0, 21, 43)
+    delta = x[1] - x[0]
+
+    # The data is a cubic polynomial.  We'll use an order 4
+    # SG filter, so the filtered values should equal the input data
+    # (except within half window_length of the edges).
+    y = 0.5 * x ** 3 - x
+    h = savgol_coeffs(window_length, polyorder)
+    y0 = convolve1d(y, h)
+    assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
+
+    # Check the same input, but use deriv=1.  dy is the exact result.
+    dy = 1.5 * x ** 2 - 1
+    h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
+    y1 = convolve1d(y, h)
+    assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
+
+    # Check the same input, but use deriv=2. d2y is the exact result.
+    d2y = 3.0 * x
+    h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
+    y2 = convolve1d(y, h)
+    assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
+
+
+def test_sg_coeffs_deriv():
+    # The data in `x` is a sampled parabola, so using savgol_coeffs with an
+    # order 2 or higher polynomial should give exact results.
+    i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
+    x = i ** 2 / 4
+    dx = i / 2
+    d2x = np.full_like(i, 0.5)
+    for pos in range(x.size):
+        coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
+        assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
+        coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
+        assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
+        coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
+        assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
+
+
+def test_sg_coeffs_deriv_gt_polyorder():
+    """
+    If deriv > polyorder, the coefficients should be all 0.
+    This is a regression test for a bug where, e.g.,
+        savgol_coeffs(5, polyorder=1, deriv=2)
+    raised an error.
+    """
+    coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
+    assert_array_equal(coeffs, np.zeros(5))
+    coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
+    assert_array_equal(coeffs, np.zeros(7))
+
+
+def test_sg_coeffs_large():
+    # Test that for large values of window_length and polyorder the array of
+    # coefficients returned is symmetric. The aim is to ensure that
+    # no potential numeric overflow occurs.
+    coeffs0 = savgol_coeffs(31, 9)
+    assert_array_almost_equal(coeffs0, coeffs0[::-1])
+    coeffs1 = savgol_coeffs(31, 9, deriv=1)
+    assert_array_almost_equal(coeffs1, -coeffs1[::-1])
+
+# --------------------------------------------------------------------
+# savgol_coeffs tests for even window length
+# --------------------------------------------------------------------
+
+
+def test_sg_coeffs_even_window_length():
+    # Simple case - deriv=0, polyorder=0, 1
+    window_lengths = [4, 6, 8, 10, 12, 14, 16]
+    for length in window_lengths:
+        h_p_d = savgol_coeffs(length, 0, 0)
+        assert_allclose(h_p_d, 1/length)
+
+    # Verify with closed forms
+    # deriv=1, polyorder=1, 2
+    def h_p_d_closed_form_1(k, m):
+        return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1))
+
+    # deriv=2, polyorder=2
+    def h_p_d_closed_form_2(k, m):
+        numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2)
+        denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1)
+        return numer/denom
+
+    for length in window_lengths:
+        m = length//2
+        expected_output = [h_p_d_closed_form_1(k, m)
+                           for k in range(-m + 1, m + 1)][::-1]
+        actual_output = savgol_coeffs(length, 1, 1)
+        assert_allclose(expected_output, actual_output)
+        actual_output = savgol_coeffs(length, 2, 1)
+        assert_allclose(expected_output, actual_output)
+
+        expected_output = [h_p_d_closed_form_2(k, m)
+                           for k in range(-m + 1, m + 1)][::-1]
+        actual_output = savgol_coeffs(length, 2, 2)
+        assert_allclose(expected_output, actual_output)
+        actual_output = savgol_coeffs(length, 3, 2)
+        assert_allclose(expected_output, actual_output)
+
+#--------------------------------------------------------------------
+# savgol_filter tests
+#--------------------------------------------------------------------
+
+
+def test_sg_filter_trivial():
+    """ Test some trivial edge cases for savgol_filter()."""
+    x = np.array([1.0])
+    y = savgol_filter(x, 1, 0)
+    assert_equal(y, [1.0])
+
+    # Input is a single value. With a window length of 3 and polyorder 1,
+    # the value in y is from the straight-line fit of (-1,0), (0,3) and
+    # (1, 0) at 0. This is just the average of the three values, hence 1.0.
+    x = np.array([3.0])
+    y = savgol_filter(x, 3, 1, mode='constant')
+    assert_almost_equal(y, [1.0], decimal=15)
+
+    x = np.array([3.0])
+    y = savgol_filter(x, 3, 1, mode='nearest')
+    assert_almost_equal(y, [3.0], decimal=15)
+
+    x = np.array([1.0] * 3)
+    y = savgol_filter(x, 3, 1, mode='wrap')
+    assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
+
+
+def test_sg_filter_basic():
+    # Some basic test cases for savgol_filter().
+    x = np.array([1.0, 2.0, 1.0])
+    y = savgol_filter(x, 3, 1, mode='constant')
+    assert_allclose(y, [1.0, 4.0 / 3, 1.0])
+
+    y = savgol_filter(x, 3, 1, mode='mirror')
+    assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
+
+    y = savgol_filter(x, 3, 1, mode='wrap')
+    assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
+
+
+def test_sg_filter_2d():
+    x = np.array([[1.0, 2.0, 1.0],
+                  [2.0, 4.0, 2.0]])
+    expected = np.array([[1.0, 4.0 / 3, 1.0],
+                         [2.0, 8.0 / 3, 2.0]])
+    y = savgol_filter(x, 3, 1, mode='constant')
+    assert_allclose(y, expected)
+
+    y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
+    assert_allclose(y, expected.T)
+
+
+def test_sg_filter_interp_edges():
+    # Another test with low degree polynomial data, for which we can easily
+    # give the exact results. In this test, we use mode='interp', so
+    # savgol_filter should match the exact solution for the entire data set,
+    # including the edges.
+    t = np.linspace(-5, 5, 21)
+    delta = t[1] - t[0]
+    # Polynomial test data.
+    x = np.array([t,
+                  3 * t ** 2,
+                  t ** 3 - t])
+    dx = np.array([np.ones_like(t),
+                   6 * t,
+                   3 * t ** 2 - 1.0])
+    d2x = np.array([np.zeros_like(t),
+                    np.full_like(t, 6),
+                    6 * t])
+
+    window_length = 7
+
+    y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
+    assert_allclose(y, x, atol=1e-12)
+
+    y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
+                       deriv=1, delta=delta)
+    assert_allclose(y1, dx, atol=1e-12)
+
+    y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
+                       deriv=2, delta=delta)
+    assert_allclose(y2, d2x, atol=1e-12)
+
+    # Transpose everything, and test again with axis=0.
+
+    x = x.T
+    dx = dx.T
+    d2x = d2x.T
+
+    y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
+    assert_allclose(y, x, atol=1e-12)
+
+    y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
+                       deriv=1, delta=delta)
+    assert_allclose(y1, dx, atol=1e-12)
+
+    y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
+                       deriv=2, delta=delta)
+    assert_allclose(y2, d2x, atol=1e-12)
+
+
+def test_sg_filter_interp_edges_3d():
+    # Test mode='interp' with a 3-D array.
+    t = np.linspace(-5, 5, 21)
+    delta = t[1] - t[0]
+    x1 = np.array([t, -t])
+    x2 = np.array([t ** 2, 3 * t ** 2 + 5])
+    x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
+    dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
+    dx2 = np.array([2 * t, 6 * t])
+    dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
+
+    # z has shape (3, 2, 21)
+    z = np.array([x1, x2, x3])
+    dz = np.array([dx1, dx2, dx3])
+
+    y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
+    assert_allclose(y, z, atol=1e-10)
+
+    dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
+    assert_allclose(dy, dz, atol=1e-10)
+
+    # z has shape (3, 21, 2)
+    z = np.array([x1.T, x2.T, x3.T])
+    dz = np.array([dx1.T, dx2.T, dx3.T])
+
+    y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
+    assert_allclose(y, z, atol=1e-10)
+
+    dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
+    assert_allclose(dy, dz, atol=1e-10)
+
+    # z has shape (21, 3, 2)
+    z = z.swapaxes(0, 1).copy()
+    dz = dz.swapaxes(0, 1).copy()
+
+    y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
+    assert_allclose(y, z, atol=1e-10)
+
+    dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
+    assert_allclose(dy, dz, atol=1e-10)
+
+
+def test_sg_filter_valid_window_length_3d():
+    """Tests that the window_length check is using the correct axis."""
+
+    x = np.ones((10, 20, 30))
+
+    savgol_filter(x, window_length=29, polyorder=3, mode='interp')
+
+    with pytest.raises(ValueError, match='window_length must be less than'):
+        # window_length is more than x.shape[-1].
+        savgol_filter(x, window_length=31, polyorder=3, mode='interp')
+
+    savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp')
+
+    with pytest.raises(ValueError, match='window_length must be less than'):
+        # window_length is more than x.shape[0].
+        savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_signaltools.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_signaltools.py
new file mode 100644
index 00000000..1a828af8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_signaltools.py
@@ -0,0 +1,3575 @@
+# -*- coding: utf-8 -*-
+import sys
+
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from decimal import Decimal
+from itertools import product
+from math import gcd
+
+import pytest
+from pytest import raises as assert_raises
+from numpy.testing import (
+    assert_equal,
+    assert_almost_equal, assert_array_equal, assert_array_almost_equal,
+    assert_allclose, assert_, assert_array_less,
+    suppress_warnings)
+from numpy import array, arange
+import numpy as np
+
+from scipy.fft import fft
+from scipy.ndimage import correlate1d
+from scipy.optimize import fmin, linear_sum_assignment
+from scipy import signal
+from scipy.signal import (
+    correlate, correlate2d, correlation_lags, convolve, convolve2d,
+    fftconvolve, oaconvolve, choose_conv_method,
+    hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos,
+    invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt,
+    sosfilt_zi, tf2zpk, BadCoefficients, detrend, unique_roots, residue,
+    residuez)
+from scipy.signal.windows import hann
+from scipy.signal._signaltools import (_filtfilt_gust, _compute_factors,
+                                      _group_poles)
+from scipy.signal._upfirdn import _upfirdn_modes
+from scipy._lib import _testutils
+
+
+class _TestConvolve:
+
+    def test_basic(self):
+        a = [3, 4, 5, 6, 5, 4]
+        b = [1, 2, 3]
+        c = convolve(a, b)
+        assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12]))
+
+    def test_same(self):
+        a = [3, 4, 5]
+        b = [1, 2, 3, 4]
+        c = convolve(a, b, mode="same")
+        assert_array_equal(c, array([10, 22, 34]))
+
+    def test_same_eq(self):
+        a = [3, 4, 5]
+        b = [1, 2, 3]
+        c = convolve(a, b, mode="same")
+        assert_array_equal(c, array([10, 22, 22]))
+
+    def test_complex(self):
+        x = array([1 + 1j, 2 + 1j, 3 + 1j])
+        y = array([1 + 1j, 2 + 1j])
+        z = convolve(x, y)
+        assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j]))
+
+    def test_zero_rank(self):
+        a = 1289
+        b = 4567
+        c = convolve(a, b)
+        assert_equal(c, a * b)
+
+    def test_broadcastable(self):
+        a = np.arange(27).reshape(3, 3, 3)
+        b = np.arange(3)
+        for i in range(3):
+            b_shape = [1]*3
+            b_shape[i] = 3
+            x = convolve(a, b.reshape(b_shape), method='direct')
+            y = convolve(a, b.reshape(b_shape), method='fft')
+            assert_allclose(x, y)
+
+    def test_single_element(self):
+        a = array([4967])
+        b = array([3920])
+        c = convolve(a, b)
+        assert_equal(c, a * b)
+
+    def test_2d_arrays(self):
+        a = [[1, 2, 3], [3, 4, 5]]
+        b = [[2, 3, 4], [4, 5, 6]]
+        c = convolve(a, b)
+        d = array([[2, 7, 16, 17, 12],
+                   [10, 30, 62, 58, 38],
+                   [12, 31, 58, 49, 30]])
+        assert_array_equal(c, d)
+
+    def test_input_swapping(self):
+        small = arange(8).reshape(2, 2, 2)
+        big = 1j * arange(27).reshape(3, 3, 3)
+        big += arange(27)[::-1].reshape(3, 3, 3)
+
+        out_array = array(
+            [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j],
+              [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j],
+              [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j],
+              [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]],
+
+             [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j],
+              [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j],
+              [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j],
+              [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]],
+
+             [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j],
+              [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j],
+              [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j],
+              [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]],
+
+             [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j],
+              [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j],
+              [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j],
+              [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]])
+
+        assert_array_equal(convolve(small, big, 'full'), out_array)
+        assert_array_equal(convolve(big, small, 'full'), out_array)
+        assert_array_equal(convolve(small, big, 'same'),
+                           out_array[1:3, 1:3, 1:3])
+        assert_array_equal(convolve(big, small, 'same'),
+                           out_array[0:3, 0:3, 0:3])
+        assert_array_equal(convolve(small, big, 'valid'),
+                           out_array[1:3, 1:3, 1:3])
+        assert_array_equal(convolve(big, small, 'valid'),
+                           out_array[1:3, 1:3, 1:3])
+
+    def test_invalid_params(self):
+        a = [3, 4, 5]
+        b = [1, 2, 3]
+        assert_raises(ValueError, convolve, a, b, mode='spam')
+        assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft')
+        assert_raises(ValueError, convolve, a, b, mode='ham', method='direct')
+        assert_raises(ValueError, convolve, a, b, mode='full', method='bacon')
+        assert_raises(ValueError, convolve, a, b, mode='same', method='bacon')
+
+
+class TestConvolve(_TestConvolve):
+
+    def test_valid_mode2(self):
+        # See gh-5897
+        a = [1, 2, 3, 6, 5, 3]
+        b = [2, 3, 4, 5, 3, 4, 2, 2, 1]
+        expected = [70, 78, 73, 65]
+
+        out = convolve(a, b, 'valid')
+        assert_array_equal(out, expected)
+
+        out = convolve(b, a, 'valid')
+        assert_array_equal(out, expected)
+
+        a = [1 + 5j, 2 - 1j, 3 + 0j]
+        b = [2 - 3j, 1 + 0j]
+        expected = [2 - 3j, 8 - 10j]
+
+        out = convolve(a, b, 'valid')
+        assert_array_equal(out, expected)
+
+        out = convolve(b, a, 'valid')
+        assert_array_equal(out, expected)
+
+    def test_same_mode(self):
+        a = [1, 2, 3, 3, 1, 2]
+        b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3]
+        c = convolve(a, b, 'same')
+        d = array([57, 61, 63, 57, 45, 36])
+        assert_array_equal(c, d)
+
+    def test_invalid_shapes(self):
+        # By "invalid," we mean that no one
+        # array has dimensions that are all at
+        # least as large as the corresponding
+        # dimensions of the other array. This
+        # setup should throw a ValueError.
+        a = np.arange(1, 7).reshape((2, 3))
+        b = np.arange(-6, 0).reshape((3, 2))
+
+        assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'})
+        assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'})
+
+    def test_convolve_method(self, n=100):
+        types = sum([t for _, t in np.sctypes.items()], [])
+        types = {np.dtype(t).name for t in types}
+
+        # These types include 'bool' and all precisions (int8, float32, etc)
+        # The removed types throw errors in correlate or fftconvolve
+        for dtype in ['complex256', 'complex192', 'float128', 'float96',
+                      'str', 'void', 'bytes', 'object', 'unicode', 'string']:
+            if dtype in types:
+                types.remove(dtype)
+
+        args = [(t1, t2, mode) for t1 in types for t2 in types
+                               for mode in ['valid', 'full', 'same']]
+
+        # These are random arrays, which means test is much stronger than
+        # convolving testing by convolving two np.ones arrays
+        np.random.seed(42)
+        array_types = {'i': np.random.choice([0, 1], size=n),
+                       'f': np.random.randn(n)}
+        array_types['b'] = array_types['u'] = array_types['i']
+        array_types['c'] = array_types['f'] + 0.5j*array_types['f']
+
+        for t1, t2, mode in args:
+            x1 = array_types[np.dtype(t1).kind].astype(t1)
+            x2 = array_types[np.dtype(t2).kind].astype(t2)
+
+            results = {key: convolve(x1, x2, method=key, mode=mode)
+                       for key in ['fft', 'direct']}
+
+            assert_equal(results['fft'].dtype, results['direct'].dtype)
+
+            if 'bool' in t1 and 'bool' in t2:
+                assert_equal(choose_conv_method(x1, x2), 'direct')
+                continue
+
+            # Found by experiment. Found approx smallest value for (rtol, atol)
+            # threshold to have tests pass.
+            if any([t in {'complex64', 'float32'} for t in [t1, t2]]):
+                kwargs = {'rtol': 1.0e-4, 'atol': 1e-6}
+            elif 'float16' in [t1, t2]:
+                # atol is default for np.allclose
+                kwargs = {'rtol': 1e-3, 'atol': 1e-3}
+            else:
+                # defaults for np.allclose (different from assert_allclose)
+                kwargs = {'rtol': 1e-5, 'atol': 1e-8}
+
+            assert_allclose(results['fft'], results['direct'], **kwargs)
+
+    def test_convolve_method_large_input(self):
+        # This is really a test that convolving two large integers goes to the
+        # direct method even if they're in the fft method.
+        for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]:
+            z = np.array([2**n], dtype=np.int64)
+            fft = convolve(z, z, method='fft')
+            direct = convolve(z, z, method='direct')
+
+            # this is the case when integer precision gets to us
+            # issue #6076 has more detail, hopefully more tests after resolved
+            if n < 50:
+                assert_equal(fft, direct)
+                assert_equal(fft, 2**(2*n))
+                assert_equal(direct, 2**(2*n))
+
+    def test_mismatched_dims(self):
+        # Input arrays should have the same number of dimensions
+        assert_raises(ValueError, convolve, [1], 2, method='direct')
+        assert_raises(ValueError, convolve, 1, [2], method='direct')
+        assert_raises(ValueError, convolve, [1], 2, method='fft')
+        assert_raises(ValueError, convolve, 1, [2], method='fft')
+        assert_raises(ValueError, convolve, [1], [[2]])
+        assert_raises(ValueError, convolve, [3], 2)
+
+
+class _TestConvolve2d:
+
+    def test_2d_arrays(self):
+        a = [[1, 2, 3], [3, 4, 5]]
+        b = [[2, 3, 4], [4, 5, 6]]
+        d = array([[2, 7, 16, 17, 12],
+                   [10, 30, 62, 58, 38],
+                   [12, 31, 58, 49, 30]])
+        e = convolve2d(a, b)
+        assert_array_equal(e, d)
+
+    def test_valid_mode(self):
+        e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
+        f = [[1, 2, 3], [3, 4, 5]]
+        h = array([[62, 80, 98, 116, 134]])
+
+        g = convolve2d(e, f, 'valid')
+        assert_array_equal(g, h)
+
+        # See gh-5897
+        g = convolve2d(f, e, 'valid')
+        assert_array_equal(g, h)
+
+    def test_valid_mode_complx(self):
+        e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
+        f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j
+        h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]])
+
+        g = convolve2d(e, f, 'valid')
+        assert_array_almost_equal(g, h)
+
+        # See gh-5897
+        g = convolve2d(f, e, 'valid')
+        assert_array_equal(g, h)
+
+    def test_fillvalue(self):
+        a = [[1, 2, 3], [3, 4, 5]]
+        b = [[2, 3, 4], [4, 5, 6]]
+        fillval = 1
+        c = convolve2d(a, b, 'full', 'fill', fillval)
+        d = array([[24, 26, 31, 34, 32],
+                   [28, 40, 62, 64, 52],
+                   [32, 46, 67, 62, 48]])
+        assert_array_equal(c, d)
+
+    def test_fillvalue_errors(self):
+        msg = "could not cast `fillvalue` directly to the output "
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(np.ComplexWarning, "Casting complex values")
+            with assert_raises(ValueError, match=msg):
+                convolve2d([[1]], [[1, 2]], fillvalue=1j)
+
+        msg = "`fillvalue` must be scalar or an array with "
+        with assert_raises(ValueError, match=msg):
+            convolve2d([[1]], [[1, 2]], fillvalue=[1, 2])
+
+    def test_fillvalue_empty(self):
+        # Check that fillvalue being empty raises an error:
+        assert_raises(ValueError, convolve2d, [[1]], [[1, 2]],
+                      fillvalue=[])
+
+    def test_wrap_boundary(self):
+        a = [[1, 2, 3], [3, 4, 5]]
+        b = [[2, 3, 4], [4, 5, 6]]
+        c = convolve2d(a, b, 'full', 'wrap')
+        d = array([[80, 80, 74, 80, 80],
+                   [68, 68, 62, 68, 68],
+                   [80, 80, 74, 80, 80]])
+        assert_array_equal(c, d)
+
+    def test_sym_boundary(self):
+        a = [[1, 2, 3], [3, 4, 5]]
+        b = [[2, 3, 4], [4, 5, 6]]
+        c = convolve2d(a, b, 'full', 'symm')
+        d = array([[34, 30, 44, 62, 66],
+                   [52, 48, 62, 80, 84],
+                   [82, 78, 92, 110, 114]])
+        assert_array_equal(c, d)
+
+    @pytest.mark.parametrize('func', [convolve2d, correlate2d])
+    @pytest.mark.parametrize('boundary, expected',
+                             [('symm', [[37.0, 42.0, 44.0, 45.0]]),
+                              ('wrap', [[43.0, 44.0, 42.0, 39.0]])])
+    def test_same_with_boundary(self, func, boundary, expected):
+        # Test boundary='symm' and boundary='wrap' with a "long" kernel.
+        # The size of the kernel requires that the values in the "image"
+        # be extended more than once to handle the requested boundary method.
+        # This is a regression test for gh-8684 and gh-8814.
+        image = np.array([[2.0, -1.0, 3.0, 4.0]])
+        kernel = np.ones((1, 21))
+        result = func(image, kernel, mode='same', boundary=boundary)
+        # The expected results were calculated "by hand".  Because the
+        # kernel is all ones, the same result is expected for convolve2d
+        # and correlate2d.
+        assert_array_equal(result, expected)
+
+    def test_boundary_extension_same(self):
+        # Regression test for gh-12686.
+        # Use ndimage.convolve with appropriate arguments to create the
+        # expected result.
+        import scipy.ndimage as ndi
+        a = np.arange(1, 10*3+1, dtype=float).reshape(10, 3)
+        b = np.arange(1, 10*10+1, dtype=float).reshape(10, 10)
+        c = convolve2d(a, b, mode='same', boundary='wrap')
+        assert_array_equal(c, ndi.convolve(a, b, mode='wrap', origin=(-1, -1)))
+
+    def test_boundary_extension_full(self):
+        # Regression test for gh-12686.
+        # Use ndimage.convolve with appropriate arguments to create the
+        # expected result.
+        import scipy.ndimage as ndi
+        a = np.arange(1, 3*3+1, dtype=float).reshape(3, 3)
+        b = np.arange(1, 6*6+1, dtype=float).reshape(6, 6)
+        c = convolve2d(a, b, mode='full', boundary='wrap')
+        apad = np.pad(a, ((3, 3), (3, 3)), 'wrap')
+        assert_array_equal(c, ndi.convolve(apad, b, mode='wrap')[:-1, :-1])
+
+    def test_invalid_shapes(self):
+        # By "invalid," we mean that no one
+        # array has dimensions that are all at
+        # least as large as the corresponding
+        # dimensions of the other array. This
+        # setup should throw a ValueError.
+        a = np.arange(1, 7).reshape((2, 3))
+        b = np.arange(-6, 0).reshape((3, 2))
+
+        assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'})
+        assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'})
+
+
+class TestConvolve2d(_TestConvolve2d):
+
+    def test_same_mode(self):
+        e = [[1, 2, 3], [3, 4, 5]]
+        f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
+        g = convolve2d(e, f, 'same')
+        h = array([[22, 28, 34],
+                   [80, 98, 116]])
+        assert_array_equal(g, h)
+
+    def test_valid_mode2(self):
+        # See gh-5897
+        e = [[1, 2, 3], [3, 4, 5]]
+        f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
+        expected = [[62, 80, 98, 116, 134]]
+
+        out = convolve2d(e, f, 'valid')
+        assert_array_equal(out, expected)
+
+        out = convolve2d(f, e, 'valid')
+        assert_array_equal(out, expected)
+
+        e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]]
+        f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]]
+        expected = [[27 - 1j, 46. + 2j]]
+
+        out = convolve2d(e, f, 'valid')
+        assert_array_equal(out, expected)
+
+        # See gh-5897
+        out = convolve2d(f, e, 'valid')
+        assert_array_equal(out, expected)
+
+    def test_consistency_convolve_funcs(self):
+        # Compare np.convolve, signal.convolve, signal.convolve2d
+        a = np.arange(5)
+        b = np.array([3.2, 1.4, 3])
+        for mode in ['full', 'valid', 'same']:
+            assert_almost_equal(np.convolve(a, b, mode=mode),
+                                signal.convolve(a, b, mode=mode))
+            assert_almost_equal(np.squeeze(
+                signal.convolve2d([a], [b], mode=mode)),
+                signal.convolve(a, b, mode=mode))
+
+    def test_invalid_dims(self):
+        assert_raises(ValueError, convolve2d, 3, 4)
+        assert_raises(ValueError, convolve2d, [3], [4])
+        assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]])
+
+    @pytest.mark.slow
+    @pytest.mark.xfail_on_32bit("Can't create large array for test")
+    def test_large_array(self):
+        # Test indexing doesn't overflow an int (gh-10761)
+        n = 2**31 // (1000 * np.int64().itemsize)
+        _testutils.check_free_memory(2 * n * 1001 * np.int64().itemsize / 1e6)
+
+        # Create a chequered pattern of 1s and 0s
+        a = np.zeros(1001 * n, dtype=np.int64)
+        a[::2] = 1
+        a = np.lib.stride_tricks.as_strided(a, shape=(n, 1000), strides=(8008, 8))
+
+        count = signal.convolve2d(a, [[1, 1]])
+        fails = np.where(count > 1)
+        assert fails[0].size == 0
+
+
+class TestFFTConvolve:
+
+    @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
+    def test_real(self, axes):
+        a = array([1, 2, 3])
+        expected = array([1, 4, 10, 12, 9.])
+
+        if axes == '':
+            out = fftconvolve(a, a)
+        else:
+            out = fftconvolve(a, a, axes=axes)
+
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
+    def test_real_axes(self, axes):
+        a = array([1, 2, 3])
+        expected = array([1, 4, 10, 12, 9.])
+
+        a = np.tile(a, [2, 1])
+        expected = np.tile(expected, [2, 1])
+
+        out = fftconvolve(a, a, axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
+    def test_complex(self, axes):
+        a = array([1 + 1j, 2 + 2j, 3 + 3j])
+        expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
+
+        if axes == '':
+            out = fftconvolve(a, a)
+        else:
+            out = fftconvolve(a, a, axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
+    def test_complex_axes(self, axes):
+        a = array([1 + 1j, 2 + 2j, 3 + 3j])
+        expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
+
+        a = np.tile(a, [2, 1])
+        expected = np.tile(expected, [2, 1])
+
+        out = fftconvolve(a, a, axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', ['',
+                                      None,
+                                      [0, 1],
+                                      [1, 0],
+                                      [0, -1],
+                                      [-1, 0],
+                                      [-2, 1],
+                                      [1, -2],
+                                      [-2, -1],
+                                      [-1, -2]])
+    def test_2d_real_same(self, axes):
+        a = array([[1, 2, 3],
+                   [4, 5, 6]])
+        expected = array([[1, 4, 10, 12, 9],
+                          [8, 26, 56, 54, 36],
+                          [16, 40, 73, 60, 36]])
+
+        if axes == '':
+            out = fftconvolve(a, a)
+        else:
+            out = fftconvolve(a, a, axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [[1, 2],
+                                      [2, 1],
+                                      [1, -1],
+                                      [-1, 1],
+                                      [-2, 2],
+                                      [2, -2],
+                                      [-2, -1],
+                                      [-1, -2]])
+    def test_2d_real_same_axes(self, axes):
+        a = array([[1, 2, 3],
+                   [4, 5, 6]])
+        expected = array([[1, 4, 10, 12, 9],
+                          [8, 26, 56, 54, 36],
+                          [16, 40, 73, 60, 36]])
+
+        a = np.tile(a, [2, 1, 1])
+        expected = np.tile(expected, [2, 1, 1])
+
+        out = fftconvolve(a, a, axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', ['',
+                                      None,
+                                      [0, 1],
+                                      [1, 0],
+                                      [0, -1],
+                                      [-1, 0],
+                                      [-2, 1],
+                                      [1, -2],
+                                      [-2, -1],
+                                      [-1, -2]])
+    def test_2d_complex_same(self, axes):
+        a = array([[1 + 2j, 3 + 4j, 5 + 6j],
+                   [2 + 1j, 4 + 3j, 6 + 5j]])
+        expected = array([
+            [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
+            [10j, 44j, 118j, 156j, 122j],
+            [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
+            ])
+
+        if axes == '':
+            out = fftconvolve(a, a)
+        else:
+            out = fftconvolve(a, a, axes=axes)
+
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [[1, 2],
+                                      [2, 1],
+                                      [1, -1],
+                                      [-1, 1],
+                                      [-2, 2],
+                                      [2, -2],
+                                      [-2, -1],
+                                      [-1, -2]])
+    def test_2d_complex_same_axes(self, axes):
+        a = array([[1 + 2j, 3 + 4j, 5 + 6j],
+                   [2 + 1j, 4 + 3j, 6 + 5j]])
+        expected = array([
+            [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
+            [10j, 44j, 118j, 156j, 122j],
+            [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
+            ])
+
+        a = np.tile(a, [2, 1, 1])
+        expected = np.tile(expected, [2, 1, 1])
+
+        out = fftconvolve(a, a, axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
+    def test_real_same_mode(self, axes):
+        a = array([1, 2, 3])
+        b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
+        expected_1 = array([35., 41., 47.])
+        expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
+
+        if axes == '':
+            out = fftconvolve(a, b, 'same')
+        else:
+            out = fftconvolve(a, b, 'same', axes=axes)
+        assert_array_almost_equal(out, expected_1)
+
+        if axes == '':
+            out = fftconvolve(b, a, 'same')
+        else:
+            out = fftconvolve(b, a, 'same', axes=axes)
+        assert_array_almost_equal(out, expected_2)
+
+    @pytest.mark.parametrize('axes', [1, -1, [1], [-1]])
+    def test_real_same_mode_axes(self, axes):
+        a = array([1, 2, 3])
+        b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
+        expected_1 = array([35., 41., 47.])
+        expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
+
+        a = np.tile(a, [2, 1])
+        b = np.tile(b, [2, 1])
+        expected_1 = np.tile(expected_1, [2, 1])
+        expected_2 = np.tile(expected_2, [2, 1])
+
+        out = fftconvolve(a, b, 'same', axes=axes)
+        assert_array_almost_equal(out, expected_1)
+
+        out = fftconvolve(b, a, 'same', axes=axes)
+        assert_array_almost_equal(out, expected_2)
+
+    @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
+    def test_valid_mode_real(self, axes):
+        # See gh-5897
+        a = array([3, 2, 1])
+        b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
+        expected = array([24., 31., 41., 43., 49., 25., 12.])
+
+        if axes == '':
+            out = fftconvolve(a, b, 'valid')
+        else:
+            out = fftconvolve(a, b, 'valid', axes=axes)
+        assert_array_almost_equal(out, expected)
+
+        if axes == '':
+            out = fftconvolve(b, a, 'valid')
+        else:
+            out = fftconvolve(b, a, 'valid', axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [1, [1]])
+    def test_valid_mode_real_axes(self, axes):
+        # See gh-5897
+        a = array([3, 2, 1])
+        b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
+        expected = array([24., 31., 41., 43., 49., 25., 12.])
+
+        a = np.tile(a, [2, 1])
+        b = np.tile(b, [2, 1])
+        expected = np.tile(expected, [2, 1])
+
+        out = fftconvolve(a, b, 'valid', axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
+    def test_valid_mode_complex(self, axes):
+        a = array([3 - 1j, 2 + 7j, 1 + 0j])
+        b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
+        expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
+
+        if axes == '':
+            out = fftconvolve(a, b, 'valid')
+        else:
+            out = fftconvolve(a, b, 'valid', axes=axes)
+        assert_array_almost_equal(out, expected)
+
+        if axes == '':
+            out = fftconvolve(b, a, 'valid')
+        else:
+            out = fftconvolve(b, a, 'valid', axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
+    def test_valid_mode_complex_axes(self, axes):
+        a = array([3 - 1j, 2 + 7j, 1 + 0j])
+        b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
+        expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
+
+        a = np.tile(a, [2, 1])
+        b = np.tile(b, [2, 1])
+        expected = np.tile(expected, [2, 1])
+
+        out = fftconvolve(a, b, 'valid', axes=axes)
+        assert_array_almost_equal(out, expected)
+
+        out = fftconvolve(b, a, 'valid', axes=axes)
+        assert_array_almost_equal(out, expected)
+
+    def test_valid_mode_ignore_nonaxes(self):
+        # See gh-5897
+        a = array([3, 2, 1])
+        b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
+        expected = array([24., 31., 41., 43., 49., 25., 12.])
+
+        a = np.tile(a, [2, 1])
+        b = np.tile(b, [1, 1])
+        expected = np.tile(expected, [2, 1])
+
+        out = fftconvolve(a, b, 'valid', axes=1)
+        assert_array_almost_equal(out, expected)
+
+    def test_empty(self):
+        # Regression test for #1745: crashes with 0-length input.
+        assert_(fftconvolve([], []).size == 0)
+        assert_(fftconvolve([5, 6], []).size == 0)
+        assert_(fftconvolve([], [7]).size == 0)
+
+    def test_zero_rank(self):
+        a = array(4967)
+        b = array(3920)
+        out = fftconvolve(a, b)
+        assert_equal(out, a * b)
+
+    def test_single_element(self):
+        a = array([4967])
+        b = array([3920])
+        out = fftconvolve(a, b)
+        assert_equal(out, a * b)
+
+    @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
+    def test_random_data(self, axes):
+        np.random.seed(1234)
+        a = np.random.rand(1233) + 1j * np.random.rand(1233)
+        b = np.random.rand(1321) + 1j * np.random.rand(1321)
+        expected = np.convolve(a, b, 'full')
+
+        if axes == '':
+            out = fftconvolve(a, b, 'full')
+        else:
+            out = fftconvolve(a, b, 'full', axes=axes)
+        assert_(np.allclose(out, expected, rtol=1e-10))
+
+    @pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
+    def test_random_data_axes(self, axes):
+        np.random.seed(1234)
+        a = np.random.rand(1233) + 1j * np.random.rand(1233)
+        b = np.random.rand(1321) + 1j * np.random.rand(1321)
+        expected = np.convolve(a, b, 'full')
+
+        a = np.tile(a, [2, 1])
+        b = np.tile(b, [2, 1])
+        expected = np.tile(expected, [2, 1])
+
+        out = fftconvolve(a, b, 'full', axes=axes)
+        assert_(np.allclose(out, expected, rtol=1e-10))
+
+    @pytest.mark.parametrize('axes', [[1, 4],
+                                      [4, 1],
+                                      [1, -1],
+                                      [-1, 1],
+                                      [-4, 4],
+                                      [4, -4],
+                                      [-4, -1],
+                                      [-1, -4]])
+    def test_random_data_multidim_axes(self, axes):
+        a_shape, b_shape = (123, 22), (132, 11)
+        np.random.seed(1234)
+        a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape)
+        b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape)
+        expected = convolve2d(a, b, 'full')
+
+        a = a[:, :, None, None, None]
+        b = b[:, :, None, None, None]
+        expected = expected[:, :, None, None, None]
+
+        a = np.moveaxis(a.swapaxes(0, 2), 1, 4)
+        b = np.moveaxis(b.swapaxes(0, 2), 1, 4)
+        expected = np.moveaxis(expected.swapaxes(0, 2), 1, 4)
+
+        # use 1 for dimension 2 in a and 3 in b to test broadcasting
+        a = np.tile(a, [2, 1, 3, 1, 1])
+        b = np.tile(b, [2, 1, 1, 4, 1])
+        expected = np.tile(expected, [2, 1, 3, 4, 1])
+
+        out = fftconvolve(a, b, 'full', axes=axes)
+        assert_allclose(out, expected, rtol=1e-10, atol=1e-10)
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize(
+        'n',
+        list(range(1, 100)) +
+        list(range(1000, 1500)) +
+        np.random.RandomState(1234).randint(1001, 10000, 5).tolist())
+    def test_many_sizes(self, n):
+        a = np.random.rand(n) + 1j * np.random.rand(n)
+        b = np.random.rand(n) + 1j * np.random.rand(n)
+        expected = np.convolve(a, b, 'full')
+
+        out = fftconvolve(a, b, 'full')
+        assert_allclose(out, expected, atol=1e-10)
+
+        out = fftconvolve(a, b, 'full', axes=[0])
+        assert_allclose(out, expected, atol=1e-10)
+
+    def test_fft_nan(self):
+        n = 1000
+        rng = np.random.default_rng(43876432987)
+        sig_nan = rng.standard_normal(n)
+
+        for val in [np.nan, np.inf]:
+            sig_nan[100] = val
+            coeffs = signal.firwin(200, 0.2)
+
+            with pytest.warns(RuntimeWarning, match="Use of fft convolution"):
+                signal.convolve(sig_nan, coeffs, mode='same', method='fft')
+
+def fftconvolve_err(*args, **kwargs):
+    raise RuntimeError('Fell back to fftconvolve')
+
+
+def gen_oa_shapes(sizes):
+    return [(a, b) for a, b in product(sizes, repeat=2)
+            if abs(a - b) > 3]
+
+
+def gen_oa_shapes_2d(sizes):
+    shapes0 = gen_oa_shapes(sizes)
+    shapes1 = gen_oa_shapes(sizes)
+    shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in
+              zip(shapes0, shapes1)]
+
+    modes = ['full', 'valid', 'same']
+    return [ishapes+(imode,) for ishapes, imode in product(shapes, modes)
+            if imode != 'valid' or
+            (ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or
+            (ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])]
+
+
+def gen_oa_shapes_eq(sizes):
+    return [(a, b) for a, b in product(sizes, repeat=2)
+            if a >= b]
+
+
+class TestOAConvolve:
+    @pytest.mark.slow()
+    @pytest.mark.parametrize('shape_a_0, shape_b_0',
+                             gen_oa_shapes_eq(list(range(100)) +
+                                              list(range(100, 1000, 23)))
+                             )
+    def test_real_manylens(self, shape_a_0, shape_b_0):
+        a = np.random.rand(shape_a_0)
+        b = np.random.rand(shape_b_0)
+
+        expected = fftconvolve(a, b)
+        out = oaconvolve(a, b)
+
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('shape_a_0, shape_b_0',
+                             gen_oa_shapes([50, 47, 6, 4, 1]))
+    @pytest.mark.parametrize('is_complex', [True, False])
+    @pytest.mark.parametrize('mode', ['full', 'valid', 'same'])
+    def test_1d_noaxes(self, shape_a_0, shape_b_0,
+                       is_complex, mode, monkeypatch):
+        a = np.random.rand(shape_a_0)
+        b = np.random.rand(shape_b_0)
+        if is_complex:
+            a = a + 1j*np.random.rand(shape_a_0)
+            b = b + 1j*np.random.rand(shape_b_0)
+
+        expected = fftconvolve(a, b, mode=mode)
+
+        monkeypatch.setattr(signal._signaltools, 'fftconvolve',
+                            fftconvolve_err)
+        out = oaconvolve(a, b, mode=mode)
+
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [0, 1])
+    @pytest.mark.parametrize('shape_a_0, shape_b_0',
+                             gen_oa_shapes([50, 47, 6, 4]))
+    @pytest.mark.parametrize('shape_a_extra', [1, 3])
+    @pytest.mark.parametrize('shape_b_extra', [1, 3])
+    @pytest.mark.parametrize('is_complex', [True, False])
+    @pytest.mark.parametrize('mode', ['full', 'valid', 'same'])
+    def test_1d_axes(self, axes, shape_a_0, shape_b_0,
+                     shape_a_extra, shape_b_extra,
+                     is_complex, mode, monkeypatch):
+        ax_a = [shape_a_extra]*2
+        ax_b = [shape_b_extra]*2
+        ax_a[axes] = shape_a_0
+        ax_b[axes] = shape_b_0
+
+        a = np.random.rand(*ax_a)
+        b = np.random.rand(*ax_b)
+        if is_complex:
+            a = a + 1j*np.random.rand(*ax_a)
+            b = b + 1j*np.random.rand(*ax_b)
+
+        expected = fftconvolve(a, b, mode=mode, axes=axes)
+
+        monkeypatch.setattr(signal._signaltools, 'fftconvolve',
+                            fftconvolve_err)
+        out = oaconvolve(a, b, mode=mode, axes=axes)
+
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('shape_a_0, shape_b_0, '
+                             'shape_a_1, shape_b_1, mode',
+                             gen_oa_shapes_2d([50, 47, 6, 4]))
+    @pytest.mark.parametrize('is_complex', [True, False])
+    def test_2d_noaxes(self, shape_a_0, shape_b_0,
+                       shape_a_1, shape_b_1, mode,
+                       is_complex, monkeypatch):
+        a = np.random.rand(shape_a_0, shape_a_1)
+        b = np.random.rand(shape_b_0, shape_b_1)
+        if is_complex:
+            a = a + 1j*np.random.rand(shape_a_0, shape_a_1)
+            b = b + 1j*np.random.rand(shape_b_0, shape_b_1)
+
+        expected = fftconvolve(a, b, mode=mode)
+
+        monkeypatch.setattr(signal._signaltools, 'fftconvolve',
+                            fftconvolve_err)
+        out = oaconvolve(a, b, mode=mode)
+
+        assert_array_almost_equal(out, expected)
+
+    @pytest.mark.parametrize('axes', [[0, 1], [0, 2], [1, 2]])
+    @pytest.mark.parametrize('shape_a_0, shape_b_0, '
+                             'shape_a_1, shape_b_1, mode',
+                             gen_oa_shapes_2d([50, 47, 6, 4]))
+    @pytest.mark.parametrize('shape_a_extra', [1, 3])
+    @pytest.mark.parametrize('shape_b_extra', [1, 3])
+    @pytest.mark.parametrize('is_complex', [True, False])
+    def test_2d_axes(self, axes, shape_a_0, shape_b_0,
+                     shape_a_1, shape_b_1, mode,
+                     shape_a_extra, shape_b_extra,
+                     is_complex, monkeypatch):
+        ax_a = [shape_a_extra]*3
+        ax_b = [shape_b_extra]*3
+        ax_a[axes[0]] = shape_a_0
+        ax_b[axes[0]] = shape_b_0
+        ax_a[axes[1]] = shape_a_1
+        ax_b[axes[1]] = shape_b_1
+
+        a = np.random.rand(*ax_a)
+        b = np.random.rand(*ax_b)
+        if is_complex:
+            a = a + 1j*np.random.rand(*ax_a)
+            b = b + 1j*np.random.rand(*ax_b)
+
+        expected = fftconvolve(a, b, mode=mode, axes=axes)
+
+        monkeypatch.setattr(signal._signaltools, 'fftconvolve',
+                            fftconvolve_err)
+        out = oaconvolve(a, b, mode=mode, axes=axes)
+
+        assert_array_almost_equal(out, expected)
+
+    def test_empty(self):
+        # Regression test for #1745: crashes with 0-length input.
+        assert_(oaconvolve([], []).size == 0)
+        assert_(oaconvolve([5, 6], []).size == 0)
+        assert_(oaconvolve([], [7]).size == 0)
+
+    def test_zero_rank(self):
+        a = array(4967)
+        b = array(3920)
+        out = oaconvolve(a, b)
+        assert_equal(out, a * b)
+
+    def test_single_element(self):
+        a = array([4967])
+        b = array([3920])
+        out = oaconvolve(a, b)
+        assert_equal(out, a * b)
+
+
+class TestAllFreqConvolves:
+
+    @pytest.mark.parametrize('convapproach',
+                             [fftconvolve, oaconvolve])
+    def test_invalid_shapes(self, convapproach):
+        a = np.arange(1, 7).reshape((2, 3))
+        b = np.arange(-6, 0).reshape((3, 2))
+        with assert_raises(ValueError,
+                           match="For 'valid' mode, one must be at least "
+                           "as large as the other in every dimension"):
+            convapproach(a, b, mode='valid')
+
+    @pytest.mark.parametrize('convapproach',
+                             [fftconvolve, oaconvolve])
+    def test_invalid_shapes_axes(self, convapproach):
+        a = np.zeros([5, 6, 2, 1])
+        b = np.zeros([5, 6, 3, 1])
+        with assert_raises(ValueError,
+                           match=r"incompatible shapes for in1 and in2:"
+                           r" \(5L?, 6L?, 2L?, 1L?\) and"
+                           r" \(5L?, 6L?, 3L?, 1L?\)"):
+            convapproach(a, b, axes=[0, 1])
+
+    @pytest.mark.parametrize('a,b',
+                             [([1], 2),
+                              (1, [2]),
+                              ([3], [[2]])])
+    @pytest.mark.parametrize('convapproach',
+                             [fftconvolve, oaconvolve])
+    def test_mismatched_dims(self, a, b, convapproach):
+        with assert_raises(ValueError,
+                           match="in1 and in2 should have the same"
+                           " dimensionality"):
+            convapproach(a, b)
+
+    @pytest.mark.parametrize('convapproach',
+                             [fftconvolve, oaconvolve])
+    def test_invalid_flags(self, convapproach):
+        with assert_raises(ValueError,
+                           match="acceptable mode flags are 'valid',"
+                           " 'same', or 'full'"):
+            convapproach([1], [2], mode='chips')
+
+        with assert_raises(ValueError,
+                           match="when provided, axes cannot be empty"):
+            convapproach([1], [2], axes=[])
+
+        with assert_raises(ValueError, match="axes must be a scalar or "
+                           "iterable of integers"):
+            convapproach([1], [2], axes=[[1, 2], [3, 4]])
+
+        with assert_raises(ValueError, match="axes must be a scalar or "
+                           "iterable of integers"):
+            convapproach([1], [2], axes=[1., 2., 3., 4.])
+
+        with assert_raises(ValueError,
+                           match="axes exceeds dimensionality of input"):
+            convapproach([1], [2], axes=[1])
+
+        with assert_raises(ValueError,
+                           match="axes exceeds dimensionality of input"):
+            convapproach([1], [2], axes=[-2])
+
+        with assert_raises(ValueError,
+                           match="all axes must be unique"):
+            convapproach([1], [2], axes=[0, 0])
+
+    @pytest.mark.parametrize('dtype', [np.longfloat, np.longcomplex])
+    def test_longdtype_input(self, dtype):
+        x = np.random.random((27, 27)).astype(dtype)
+        y = np.random.random((4, 4)).astype(dtype)
+        if np.iscomplexobj(dtype()):
+            x += .1j
+            y -= .1j
+
+        res = fftconvolve(x, y)
+        assert_allclose(res, convolve(x, y, method='direct'))
+        assert res.dtype == dtype
+
+
+class TestMedFilt:
+
+    IN = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
+          [50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
+          [50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
+          [50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
+          [50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
+          [70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
+          [64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
+          [3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
+          [7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
+          [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
+
+    OUT = [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
+           [0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
+           [50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
+           [50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
+           [50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
+           [33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
+           [32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
+           [7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
+           [0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
+           [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]]
+
+    KERNEL_SIZE = [7,3]
+
+    def test_basic(self):
+        d = signal.medfilt(self.IN, self.KERNEL_SIZE)
+        e = signal.medfilt2d(np.array(self.IN, float), self.KERNEL_SIZE)
+        assert_array_equal(d, self.OUT)
+        assert_array_equal(d, e)
+
+    @pytest.mark.parametrize('dtype', [np.ubyte, np.byte, np.ushort, np.short,
+                                       np.uint, int, np.ulonglong, np.ulonglong,
+                                       np.float32, np.float64, np.longdouble])
+    def test_types(self, dtype):
+        # volume input and output types match
+        in_typed = np.array(self.IN, dtype=dtype)
+        assert_equal(signal.medfilt(in_typed).dtype, dtype)
+        assert_equal(signal.medfilt2d(in_typed).dtype, dtype)
+
+    @pytest.mark.parametrize('dtype', [np.bool_, np.cfloat, np.cdouble,
+                                       np.clongdouble, np.float16,])
+    def test_invalid_dtypes(self, dtype):
+        in_typed = np.array(self.IN, dtype=dtype)
+        with pytest.raises(ValueError, match="order_filterND"):
+            signal.medfilt(in_typed)
+
+        with pytest.raises(ValueError, match="order_filterND"):
+            signal.medfilt2d(in_typed)
+
+    def test_none(self):
+        # gh-1651, trac #1124. Ensure this does not segfault.
+        with pytest.warns(UserWarning):
+            assert_raises(TypeError, signal.medfilt, None)
+        # Expand on this test to avoid a regression with possible contiguous
+        # numpy arrays that have odd strides. The stride value below gets
+        # us into wrong memory if used (but it does not need to be used)
+        dummy = np.arange(10, dtype=np.float64)
+        a = dummy[5:6]
+        a.strides = 16
+        assert_(signal.medfilt(a, 1) == 5.)
+
+    def test_refcounting(self):
+        # Check a refcounting-related crash
+        a = Decimal(123)
+        x = np.array([a, a], dtype=object)
+        if hasattr(sys, 'getrefcount'):
+            n = 2 * sys.getrefcount(a)
+        else:
+            n = 10
+        # Shouldn't segfault:
+        with pytest.warns(UserWarning):
+            for j in range(n):
+                signal.medfilt(x)
+        if hasattr(sys, 'getrefcount'):
+            assert_(sys.getrefcount(a) < n)
+        assert_equal(x, [a, a])
+
+    def test_object(self,):
+        in_object = np.array(self.IN, dtype=object)
+        out_object = np.array(self.OUT, dtype=object)
+        assert_array_equal(signal.medfilt(in_object, self.KERNEL_SIZE),
+                           out_object)
+
+    @pytest.mark.parametrize("dtype", [np.ubyte, np.float32, np.float64])
+    def test_medfilt2d_parallel(self, dtype):
+        in_typed = np.array(self.IN, dtype=dtype)
+        expected = np.array(self.OUT, dtype=dtype)
+
+        # This is used to simplify the indexing calculations.
+        assert in_typed.shape == expected.shape
+
+        # We'll do the calculation in four chunks. M1 and N1 are the dimensions
+        # of the first output chunk. We have to extend the input by half the
+        # kernel size to be able to calculate the full output chunk.
+        M1 = expected.shape[0] // 2
+        N1 = expected.shape[1] // 2
+        offM = self.KERNEL_SIZE[0] // 2 + 1
+        offN = self.KERNEL_SIZE[1] // 2 + 1
+
+        def apply(chunk):
+            # in = slice of in_typed to use.
+            # sel = slice of output to crop it to the correct region.
+            # out = slice of output array to store in.
+            M, N = chunk
+            if M == 0:
+                Min = slice(0, M1 + offM)
+                Msel = slice(0, -offM)
+                Mout = slice(0, M1)
+            else:
+                Min = slice(M1 - offM, None)
+                Msel = slice(offM, None)
+                Mout = slice(M1, None)
+            if N == 0:
+                Nin = slice(0, N1 + offN)
+                Nsel = slice(0, -offN)
+                Nout = slice(0, N1)
+            else:
+                Nin = slice(N1 - offN, None)
+                Nsel = slice(offN, None)
+                Nout = slice(N1, None)
+
+            # Do the calculation, but do not write to the output in the threads.
+            chunk_data = in_typed[Min, Nin]
+            med = signal.medfilt2d(chunk_data, self.KERNEL_SIZE)
+            return med[Msel, Nsel], Mout, Nout
+
+        # Give each chunk to a different thread.
+        output = np.zeros_like(expected)
+        with ThreadPoolExecutor(max_workers=4) as pool:
+            chunks = {(0, 0), (0, 1), (1, 0), (1, 1)}
+            futures = {pool.submit(apply, chunk) for chunk in chunks}
+
+            # Store each result in the output as it arrives.
+            for future in as_completed(futures):
+                data, Mslice, Nslice = future.result()
+                output[Mslice, Nslice] = data
+
+        assert_array_equal(output, expected)
+
+
+class TestWiener:
+
+    def test_basic(self):
+        g = array([[5, 6, 4, 3],
+                   [3, 5, 6, 2],
+                   [2, 3, 5, 6],
+                   [1, 6, 9, 7]], 'd')
+        h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667],
+                   [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
+                   [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
+                   [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
+        assert_array_almost_equal(signal.wiener(g), h, decimal=6)
+        assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6)
+
+
+padtype_options = ["mean", "median", "minimum", "maximum", "line"]
+padtype_options += _upfirdn_modes
+
+
+class TestResample:
+    def test_basic(self):
+        # Some basic tests
+
+        # Regression test for issue #3603.
+        # window.shape must equal to sig.shape[0]
+        sig = np.arange(128)
+        num = 256
+        win = signal.get_window(('kaiser', 8.0), 160)
+        assert_raises(ValueError, signal.resample, sig, num, window=win)
+
+        # Other degenerate conditions
+        assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1)
+        assert_raises(ValueError, signal.resample_poly, sig, 1, 0)
+        assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='')
+        assert_raises(ValueError, signal.resample_poly, sig, 2, 1,
+                      padtype='mean', cval=10)
+
+        # test for issue #6505 - should not modify window.shape when axis ≠ 0
+        sig2 = np.tile(np.arange(160), (2, 1))
+        signal.resample(sig2, num, axis=-1, window=win)
+        assert_(win.shape == (160,))
+
+    @pytest.mark.parametrize('window', (None, 'hamming'))
+    @pytest.mark.parametrize('N', (20, 19))
+    @pytest.mark.parametrize('num', (100, 101, 10, 11))
+    def test_rfft(self, N, num, window):
+        # Make sure the speed up using rfft gives the same result as the normal
+        # way using fft
+        x = np.linspace(0, 10, N, endpoint=False)
+        y = np.cos(-x**2/6.0)
+        assert_allclose(signal.resample(y, num, window=window),
+                        signal.resample(y + 0j, num, window=window).real)
+
+        y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)])
+        y_complex = y + 0j
+        assert_allclose(
+            signal.resample(y, num, axis=1, window=window),
+            signal.resample(y_complex, num, axis=1, window=window).real,
+            atol=1e-9)
+
+    def test_input_domain(self):
+        # Test if both input domain modes produce the same results.
+        tsig = np.arange(256) + 0j
+        fsig = fft(tsig)
+        num = 256
+        assert_allclose(
+            signal.resample(fsig, num, domain='freq'),
+            signal.resample(tsig, num, domain='time'),
+            atol=1e-9)
+
+    @pytest.mark.parametrize('nx', (1, 2, 3, 5, 8))
+    @pytest.mark.parametrize('ny', (1, 2, 3, 5, 8))
+    @pytest.mark.parametrize('dtype', ('float', 'complex'))
+    def test_dc(self, nx, ny, dtype):
+        x = np.array([1] * nx, dtype)
+        y = signal.resample(x, ny)
+        assert_allclose(y, [1] * ny)
+
+    @pytest.mark.parametrize('padtype', padtype_options)
+    def test_mutable_window(self, padtype):
+        # Test that a mutable window is not modified
+        impulse = np.zeros(3)
+        window = np.random.RandomState(0).randn(2)
+        window_orig = window.copy()
+        signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype)
+        assert_array_equal(window, window_orig)
+
+    @pytest.mark.parametrize('padtype', padtype_options)
+    def test_output_float32(self, padtype):
+        # Test that float32 inputs yield a float32 output
+        x = np.arange(10, dtype=np.float32)
+        h = np.array([1, 1, 1], dtype=np.float32)
+        y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype)
+        assert y.dtype == np.float32
+
+    @pytest.mark.parametrize('padtype', padtype_options)
+    @pytest.mark.parametrize('dtype', [np.float32, np.float64])
+    def test_output_match_dtype(self, padtype, dtype):
+        # Test that the dtype of x is preserved per issue #14733
+        x = np.arange(10, dtype=dtype)
+        y = signal.resample_poly(x, 1, 2, padtype=padtype)
+        assert y.dtype == x.dtype
+
+    @pytest.mark.parametrize(
+        "method, ext, padtype",
+        [("fft", False, None)]
+        + list(
+            product(
+                ["polyphase"], [False, True], padtype_options,
+            )
+        ),
+    )
+    def test_resample_methods(self, method, ext, padtype):
+        # Test resampling of sinusoids and random noise (1-sec)
+        rate = 100
+        rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201]
+
+        # Sinusoids, windowed to avoid edge artifacts
+        t = np.arange(rate) / float(rate)
+        freqs = np.array((1., 10., 40.))[:, np.newaxis]
+        x = np.sin(2 * np.pi * freqs * t) * hann(rate)
+
+        for rate_to in rates_to:
+            t_to = np.arange(rate_to) / float(rate_to)
+            y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to)
+            if method == 'fft':
+                y_resamps = signal.resample(x, rate_to, axis=-1)
+            else:
+                if ext and rate_to != rate:
+                    # Match default window design
+                    g = gcd(rate_to, rate)
+                    up = rate_to // g
+                    down = rate // g
+                    max_rate = max(up, down)
+                    f_c = 1. / max_rate
+                    half_len = 10 * max_rate
+                    window = signal.firwin(2 * half_len + 1, f_c,
+                                           window=('kaiser', 5.0))
+                    polyargs = {'window': window, 'padtype': padtype}
+                else:
+                    polyargs = {'padtype': padtype}
+
+                y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1,
+                                                 **polyargs)
+
+            for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs):
+                if freq >= 0.5 * rate_to:
+                    y_to.fill(0.)  # mostly low-passed away
+                    if padtype in ['minimum', 'maximum']:
+                        assert_allclose(y_resamp, y_to, atol=3e-1)
+                    else:
+                        assert_allclose(y_resamp, y_to, atol=1e-3)
+                else:
+                    assert_array_equal(y_to.shape, y_resamp.shape)
+                    corr = np.corrcoef(y_to, y_resamp)[0, 1]
+                    assert_(corr > 0.99, msg=(corr, rate, rate_to))
+
+        # Random data
+        rng = np.random.RandomState(0)
+        x = hann(rate) * np.cumsum(rng.randn(rate))  # low-pass, wind
+        for rate_to in rates_to:
+            # random data
+            t_to = np.arange(rate_to) / float(rate_to)
+            y_to = np.interp(t_to, t, x)
+            if method == 'fft':
+                y_resamp = signal.resample(x, rate_to)
+            else:
+                y_resamp = signal.resample_poly(x, rate_to, rate,
+                                                padtype=padtype)
+            assert_array_equal(y_to.shape, y_resamp.shape)
+            corr = np.corrcoef(y_to, y_resamp)[0, 1]
+            assert_(corr > 0.99, msg=corr)
+
+        # More tests of fft method (Master 0.18.1 fails these)
+        if method == 'fft':
+            x1 = np.array([1.+0.j, 0.+0.j])
+            y1_test = signal.resample(x1, 4)
+            # upsampling a complex array
+            y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j])
+            assert_allclose(y1_test, y1_true, atol=1e-12)
+            x2 = np.array([1., 0.5, 0., 0.5])
+            y2_test = signal.resample(x2, 2)  # downsampling a real array
+            y2_true = np.array([1., 0.])
+            assert_allclose(y2_test, y2_true, atol=1e-12)
+
+    def test_poly_vs_filtfilt(self):
+        # Check that up=1.0 gives same answer as filtfilt + slicing
+        random_state = np.random.RandomState(17)
+        try_types = (int, np.float32, np.complex64, float, complex)
+        size = 10000
+        down_factors = [2, 11, 79]
+
+        for dtype in try_types:
+            x = random_state.randn(size).astype(dtype)
+            if dtype in (np.complex64, np.complex128):
+                x += 1j * random_state.randn(size)
+
+            # resample_poly assumes zeros outside of signl, whereas filtfilt
+            # can only constant-pad. Make them equivalent:
+            x[0] = 0
+            x[-1] = 0
+
+            for down in down_factors:
+                h = signal.firwin(31, 1. / down, window='hamming')
+                yf = filtfilt(h, 1.0, x, padtype='constant')[::down]
+
+                # Need to pass convolved version of filter to resample_poly,
+                # since filtfilt does forward and backward, but resample_poly
+                # only goes forward
+                hc = convolve(h, h[::-1])
+                y = signal.resample_poly(x, 1, down, window=hc)
+                assert_allclose(yf, y, atol=1e-7, rtol=1e-7)
+
+    def test_correlate1d(self):
+        for down in [2, 4]:
+            for nx in range(1, 40, down):
+                for nweights in (32, 33):
+                    x = np.random.random((nx,))
+                    weights = np.random.random((nweights,))
+                    y_g = correlate1d(x, weights[::-1], mode='constant')
+                    y_s = signal.resample_poly(
+                        x, up=1, down=down, window=weights)
+                    assert_allclose(y_g[::down], y_s)
+
+
+class TestCSpline1DEval:
+
+    def test_basic(self):
+        y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])
+        x = arange(len(y))
+        dx = x[1] - x[0]
+        cj = signal.cspline1d(y)
+
+        x2 = arange(len(y) * 10.0) / 10.0
+        y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])
+
+        # make sure interpolated values are on knot points
+        assert_array_almost_equal(y2[::10], y, decimal=5)
+
+    def test_complex(self):
+        #  create some smoothly varying complex signal to interpolate
+        x = np.arange(2)
+        y = np.zeros(x.shape, dtype=np.complex64)
+        T = 10.0
+        f = 1.0 / T
+        y = np.exp(2.0J * np.pi * f * x)
+
+        # get the cspline transform
+        cy = signal.cspline1d(y)
+
+        # determine new test x value and interpolate
+        xnew = np.array([0.5])
+        ynew = signal.cspline1d_eval(cy, xnew)
+
+        assert_equal(ynew.dtype, y.dtype)
+
+class TestOrderFilt:
+
+    def test_basic(self):
+        assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1),
+                           [2, 3, 2])
+
+
+class _TestLinearFilter:
+
+    def generate(self, shape):
+        x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
+        return self.convert_dtype(x)
+
+    def convert_dtype(self, arr):
+        if self.dtype == np.dtype('O'):
+            arr = np.asarray(arr)
+            out = np.empty(arr.shape, self.dtype)
+            iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],
+                        [['readonly'],['writeonly']])
+            for x, y in iter:
+                y[...] = self.type(x[()])
+            return out
+        else:
+            return np.array(arr, self.dtype, copy=False)
+
+    def test_rank_1_IIR(self):
+        x = self.generate((6,))
+        b = self.convert_dtype([1, -1])
+        a = self.convert_dtype([0.5, -0.5])
+        y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
+        assert_array_almost_equal(lfilter(b, a, x), y_r)
+
+    def test_rank_1_FIR(self):
+        x = self.generate((6,))
+        b = self.convert_dtype([1, 1])
+        a = self.convert_dtype([1])
+        y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.])
+        assert_array_almost_equal(lfilter(b, a, x), y_r)
+
+    def test_rank_1_IIR_init_cond(self):
+        x = self.generate((6,))
+        b = self.convert_dtype([1, 0, -1])
+        a = self.convert_dtype([0.5, -0.5])
+        zi = self.convert_dtype([1, 2])
+        y_r = self.convert_dtype([1, 5, 9, 13, 17, 21])
+        zf_r = self.convert_dtype([13, -10])
+        y, zf = lfilter(b, a, x, zi=zi)
+        assert_array_almost_equal(y, y_r)
+        assert_array_almost_equal(zf, zf_r)
+
+    def test_rank_1_FIR_init_cond(self):
+        x = self.generate((6,))
+        b = self.convert_dtype([1, 1, 1])
+        a = self.convert_dtype([1])
+        zi = self.convert_dtype([1, 1])
+        y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.])
+        zf_r = self.convert_dtype([9, 5])
+        y, zf = lfilter(b, a, x, zi=zi)
+        assert_array_almost_equal(y, y_r)
+        assert_array_almost_equal(zf, zf_r)
+
+    def test_rank_2_IIR_axis_0(self):
+        x = self.generate((4, 3))
+        b = self.convert_dtype([1, -1])
+        a = self.convert_dtype([0.5, 0.5])
+        y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],
+                                      [6, 4, 2]])
+        y = lfilter(b, a, x, axis=0)
+        assert_array_almost_equal(y_r2_a0, y)
+
+    def test_rank_2_IIR_axis_1(self):
+        x = self.generate((4, 3))
+        b = self.convert_dtype([1, -1])
+        a = self.convert_dtype([0.5, 0.5])
+        y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],
+                            [18, -16, 18]])
+        y = lfilter(b, a, x, axis=1)
+        assert_array_almost_equal(y_r2_a1, y)
+
+    def test_rank_2_IIR_axis_0_init_cond(self):
+        x = self.generate((4, 3))
+        b = self.convert_dtype([1, -1])
+        a = self.convert_dtype([0.5, 0.5])
+        zi = self.convert_dtype(np.ones((4,1)))
+
+        y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],
+                              [19, -17, 19]])
+        zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis]
+        y, zf = lfilter(b, a, x, axis=1, zi=zi)
+        assert_array_almost_equal(y_r2_a0_1, y)
+        assert_array_almost_equal(zf, zf_r)
+
+    def test_rank_2_IIR_axis_1_init_cond(self):
+        x = self.generate((4,3))
+        b = self.convert_dtype([1, -1])
+        a = self.convert_dtype([0.5, 0.5])
+        zi = self.convert_dtype(np.ones((1,3)))
+
+        y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],
+                                        [1, 3, 5], [5, 3, 1]])
+        zf_r = self.convert_dtype([[-23, -23, -23]])
+        y, zf = lfilter(b, a, x, axis=0, zi=zi)
+        assert_array_almost_equal(y_r2_a0_0, y)
+        assert_array_almost_equal(zf, zf_r)
+
+    def test_rank_3_IIR(self):
+        x = self.generate((4, 3, 2))
+        b = self.convert_dtype([1, -1])
+        a = self.convert_dtype([0.5, 0.5])
+
+        for axis in range(x.ndim):
+            y = lfilter(b, a, x, axis)
+            y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
+            assert_array_almost_equal(y, y_r)
+
+    def test_rank_3_IIR_init_cond(self):
+        x = self.generate((4, 3, 2))
+        b = self.convert_dtype([1, -1])
+        a = self.convert_dtype([0.5, 0.5])
+
+        for axis in range(x.ndim):
+            zi_shape = list(x.shape)
+            zi_shape[axis] = 1
+            zi = self.convert_dtype(np.ones(zi_shape))
+            zi1 = self.convert_dtype([1])
+            y, zf = lfilter(b, a, x, axis, zi)
+            lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
+            lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
+            y_r = np.apply_along_axis(lf0, axis, x)
+            zf_r = np.apply_along_axis(lf1, axis, x)
+            assert_array_almost_equal(y, y_r)
+            assert_array_almost_equal(zf, zf_r)
+
+    def test_rank_3_FIR(self):
+        x = self.generate((4, 3, 2))
+        b = self.convert_dtype([1, 0, -1])
+        a = self.convert_dtype([1])
+
+        for axis in range(x.ndim):
+            y = lfilter(b, a, x, axis)
+            y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
+            assert_array_almost_equal(y, y_r)
+
+    def test_rank_3_FIR_init_cond(self):
+        x = self.generate((4, 3, 2))
+        b = self.convert_dtype([1, 0, -1])
+        a = self.convert_dtype([1])
+
+        for axis in range(x.ndim):
+            zi_shape = list(x.shape)
+            zi_shape[axis] = 2
+            zi = self.convert_dtype(np.ones(zi_shape))
+            zi1 = self.convert_dtype([1, 1])
+            y, zf = lfilter(b, a, x, axis, zi)
+            lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
+            lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
+            y_r = np.apply_along_axis(lf0, axis, x)
+            zf_r = np.apply_along_axis(lf1, axis, x)
+            assert_array_almost_equal(y, y_r)
+            assert_array_almost_equal(zf, zf_r)
+
+    def test_zi_pseudobroadcast(self):
+        x = self.generate((4, 5, 20))
+        b,a = signal.butter(8, 0.2, output='ba')
+        b = self.convert_dtype(b)
+        a = self.convert_dtype(a)
+        zi_size = b.shape[0] - 1
+
+        # lfilter requires x.ndim == zi.ndim exactly.  However, zi can have
+        # length 1 dimensions.
+        zi_full = self.convert_dtype(np.ones((4, 5, zi_size)))
+        zi_sing = self.convert_dtype(np.ones((1, 1, zi_size)))
+
+        y_full, zf_full = lfilter(b, a, x, zi=zi_full)
+        y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)
+
+        assert_array_almost_equal(y_sing, y_full)
+        assert_array_almost_equal(zf_full, zf_sing)
+
+        # lfilter does not prepend ones
+        assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size))
+
+    def test_scalar_a(self):
+        # a can be a scalar.
+        x = self.generate(6)
+        b = self.convert_dtype([1, 0, -1])
+        a = self.convert_dtype([1])
+        y_r = self.convert_dtype([0, 1, 2, 2, 2, 2])
+
+        y = lfilter(b, a[0], x)
+        assert_array_almost_equal(y, y_r)
+
+    def test_zi_some_singleton_dims(self):
+        # lfilter doesn't really broadcast (no prepending of 1's).  But does
+        # do singleton expansion if x and zi have the same ndim.  This was
+        # broken only if a subset of the axes were singletons (gh-4681).
+        x = self.convert_dtype(np.zeros((3,2,5), 'l'))
+        b = self.convert_dtype(np.ones(5, 'l'))
+        a = self.convert_dtype(np.array([1,0,0]))
+        zi = np.ones((3,1,4), 'l')
+        zi[1,:,:] *= 2
+        zi[2,:,:] *= 3
+        zi = self.convert_dtype(zi)
+
+        zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l'))
+        y_expected = np.zeros((3,2,5), 'l')
+        y_expected[:,:,:4] = [[[1]], [[2]], [[3]]]
+        y_expected = self.convert_dtype(y_expected)
+
+        # IIR
+        y_iir, zf_iir = lfilter(b, a, x, -1, zi)
+        assert_array_almost_equal(y_iir, y_expected)
+        assert_array_almost_equal(zf_iir, zf_expected)
+
+        # FIR
+        y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)
+        assert_array_almost_equal(y_fir, y_expected)
+        assert_array_almost_equal(zf_fir, zf_expected)
+
+    def base_bad_size_zi(self, b, a, x, axis, zi):
+        b = self.convert_dtype(b)
+        a = self.convert_dtype(a)
+        x = self.convert_dtype(x)
+        zi = self.convert_dtype(zi)
+        assert_raises(ValueError, lfilter, b, a, x, axis, zi)
+
+    def test_bad_size_zi(self):
+        # rank 1
+        x1 = np.arange(6)
+        self.base_bad_size_zi([1], [1], x1, -1, [1])
+        self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1])
+        self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]])
+        self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2])
+        self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]])
+        self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2])
+        self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1])
+        self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]])
+        self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3])
+        self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0])
+        self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]])
+        self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2])
+        self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3])
+
+        # rank 2
+        x2 = np.arange(12).reshape((4,3))
+        # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)
+        self.base_bad_size_zi([1], [1], x2, 0, [0])
+
+        # for each of these there are 5 cases tested (in this order):
+        # 1. not deep enough, right # elements
+        # 2. too deep, right # elements
+        # 3. right depth, right # elements, transposed
+        # 4. right depth, too few elements
+        # 5. right depth, too many elements
+
+        self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2])
+        self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]])
+        self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]])
+        self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]])
+        self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]])
+
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]])
+
+        self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2])
+        self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]])
+        self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]])
+        self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]])
+        self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]])
+
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
+
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
+
+        # for axis=1 zi.shape should == (4, max(len(a),len(b))-1)
+        self.base_bad_size_zi([1], [1], x2, 1, [0])
+
+        self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3])
+        self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]])
+        self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]])
+        self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]])
+        self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]])
+
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]])
+        self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
+
+        self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3])
+        self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]])
+        self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]])
+        self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]])
+        self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]])
+
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]])
+        self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
+
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]])
+        self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
+
+    def test_empty_zi(self):
+        # Regression test for #880: empty array for zi crashes.
+        x = self.generate((5,))
+        a = self.convert_dtype([1])
+        b = self.convert_dtype([1])
+        zi = self.convert_dtype([])
+        y, zf = lfilter(b, a, x, zi=zi)
+        assert_array_almost_equal(y, x)
+        assert_equal(zf.dtype, self.dtype)
+        assert_equal(zf.size, 0)
+
+    def test_lfiltic_bad_zi(self):
+        # Regression test for #3699: bad initial conditions
+        a = self.convert_dtype([1])
+        b = self.convert_dtype([1])
+        # "y" sets the datatype of zi, so it truncates if int
+        zi = lfiltic(b, a, [1., 0])
+        zi_1 = lfiltic(b, a, [1, 0])
+        zi_2 = lfiltic(b, a, [True, False])
+        assert_array_equal(zi, zi_1)
+        assert_array_equal(zi, zi_2)
+
+    def test_short_x_FIR(self):
+        # regression test for #5116
+        # x shorter than b, with non None zi fails
+        a = self.convert_dtype([1])
+        b = self.convert_dtype([1, 0, -1])
+        zi = self.convert_dtype([2, 7])
+        x = self.convert_dtype([72])
+        ye = self.convert_dtype([74])
+        zfe = self.convert_dtype([7, -72])
+        y, zf = lfilter(b, a, x, zi=zi)
+        assert_array_almost_equal(y, ye)
+        assert_array_almost_equal(zf, zfe)
+
+    def test_short_x_IIR(self):
+        # regression test for #5116
+        # x shorter than b, with non None zi fails
+        a = self.convert_dtype([1, 1])
+        b = self.convert_dtype([1, 0, -1])
+        zi = self.convert_dtype([2, 7])
+        x = self.convert_dtype([72])
+        ye = self.convert_dtype([74])
+        zfe = self.convert_dtype([-67, -72])
+        y, zf = lfilter(b, a, x, zi=zi)
+        assert_array_almost_equal(y, ye)
+        assert_array_almost_equal(zf, zfe)
+
+    def test_do_not_modify_a_b_IIR(self):
+        x = self.generate((6,))
+        b = self.convert_dtype([1, -1])
+        b0 = b.copy()
+        a = self.convert_dtype([0.5, -0.5])
+        a0 = a.copy()
+        y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
+        y_f = lfilter(b, a, x)
+        assert_array_almost_equal(y_f, y_r)
+        assert_equal(b, b0)
+        assert_equal(a, a0)
+
+    def test_do_not_modify_a_b_FIR(self):
+        x = self.generate((6,))
+        b = self.convert_dtype([1, 0, 1])
+        b0 = b.copy()
+        a = self.convert_dtype([2])
+        a0 = a.copy()
+        y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.])
+        y_f = lfilter(b, a, x)
+        assert_array_almost_equal(y_f, y_r)
+        assert_equal(b, b0)
+        assert_equal(a, a0)
+
+
+class TestLinearFilterFloat32(_TestLinearFilter):
+    dtype = np.dtype('f')
+
+
+class TestLinearFilterFloat64(_TestLinearFilter):
+    dtype = np.dtype('d')
+
+
+class TestLinearFilterFloatExtended(_TestLinearFilter):
+    dtype = np.dtype('g')
+
+
+class TestLinearFilterComplex64(_TestLinearFilter):
+    dtype = np.dtype('F')
+
+
+class TestLinearFilterComplex128(_TestLinearFilter):
+    dtype = np.dtype('D')
+
+
+class TestLinearFilterComplexExtended(_TestLinearFilter):
+    dtype = np.dtype('G')
+
+class TestLinearFilterDecimal(_TestLinearFilter):
+    dtype = np.dtype('O')
+
+    def type(self, x):
+        return Decimal(str(x))
+
+
+class TestLinearFilterObject(_TestLinearFilter):
+    dtype = np.dtype('O')
+    type = float
+
+
+def test_lfilter_bad_object():
+    # lfilter: object arrays with non-numeric objects raise TypeError.
+    # Regression test for ticket #1452.
+    assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])
+    assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])
+    assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])
+
+
+def test_lfilter_notimplemented_input():
+    # Should not crash, gh-7991
+    assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5])
+
+
+@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short,
+                                np.uint, int, np.ulonglong, np.ulonglong,
+                                np.float32, np.float64, np.longdouble,
+                                Decimal])
+class TestCorrelateReal:
+    def _setup_rank1(self, dt):
+        a = np.linspace(0, 3, 4).astype(dt)
+        b = np.linspace(1, 2, 2).astype(dt)
+
+        y_r = np.array([0, 2, 5, 8, 3]).astype(dt)
+        return a, b, y_r
+
+    def equal_tolerance(self, res_dt):
+        # default value of keyword
+        decimal = 6
+        try:
+            dt_info = np.finfo(res_dt)
+            if hasattr(dt_info, 'resolution'):
+                decimal = int(-0.5*np.log10(dt_info.resolution))
+        except Exception:
+            pass
+        return decimal
+
+    def equal_tolerance_fft(self, res_dt):
+        # FFT implementations convert longdouble arguments down to
+        # double so don't expect better precision, see gh-9520
+        if res_dt == np.longdouble:
+            return self.equal_tolerance(np.double)
+        else:
+            return self.equal_tolerance(res_dt)
+
+    def test_method(self, dt):
+        if dt == Decimal:
+            method = choose_conv_method([Decimal(4)], [Decimal(3)])
+            assert_equal(method, 'direct')
+        else:
+            a, b, y_r = self._setup_rank3(dt)
+            y_fft = correlate(a, b, method='fft')
+            y_direct = correlate(a, b, method='direct')
+
+            assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype))
+            assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype))
+            assert_equal(y_fft.dtype, dt)
+            assert_equal(y_direct.dtype, dt)
+
+    def test_rank1_valid(self, dt):
+        a, b, y_r = self._setup_rank1(dt)
+        y = correlate(a, b, 'valid')
+        assert_array_almost_equal(y, y_r[1:4])
+        assert_equal(y.dtype, dt)
+
+        # See gh-5897
+        y = correlate(b, a, 'valid')
+        assert_array_almost_equal(y, y_r[1:4][::-1])
+        assert_equal(y.dtype, dt)
+
+    def test_rank1_same(self, dt):
+        a, b, y_r = self._setup_rank1(dt)
+        y = correlate(a, b, 'same')
+        assert_array_almost_equal(y, y_r[:-1])
+        assert_equal(y.dtype, dt)
+
+    def test_rank1_full(self, dt):
+        a, b, y_r = self._setup_rank1(dt)
+        y = correlate(a, b, 'full')
+        assert_array_almost_equal(y, y_r)
+        assert_equal(y.dtype, dt)
+
+    def _setup_rank3(self, dt):
+        a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(
+            dt)
+        b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(
+            dt)
+
+        y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.],
+                      [46., 432., 1062., 1840., 2672., 1698., 864., 266.],
+                      [134., 736., 1662., 2768., 3920., 2418., 1168., 314.],
+                      [260., 952., 1932., 3056., 4208., 2580., 1240., 332.],
+                      [202., 664., 1290., 1984., 2688., 1590., 712., 150.],
+                      [114., 344., 642., 960., 1280., 726., 296., 38.]],
+
+                     [[23., 400., 1035., 1832., 2696., 1737., 904., 293.],
+                      [134., 920., 2166., 3680., 5280., 3306., 1640., 474.],
+                      [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.],
+                      [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.],
+                      [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.],
+                      [241., 700., 1281., 1888., 2496., 1383., 532., 39.]],
+
+                     [[22., 214., 528., 916., 1332., 846., 430., 132.],
+                      [86., 484., 1098., 1832., 2600., 1602., 772., 206.],
+                      [188., 802., 1698., 2732., 3788., 2256., 1018., 218.],
+                      [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.],
+                      [230., 692., 1290., 1928., 2568., 1458., 596., 78.],
+                      [126., 354., 636., 924., 1212., 654., 234., 0.]]],
+                    dtype=dt)
+
+        return a, b, y_r
+
+    def test_rank3_valid(self, dt):
+        a, b, y_r = self._setup_rank3(dt)
+        y = correlate(a, b, "valid")
+        assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5])
+        assert_equal(y.dtype, dt)
+
+        # See gh-5897
+        y = correlate(b, a, "valid")
+        assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1])
+        assert_equal(y.dtype, dt)
+
+    def test_rank3_same(self, dt):
+        a, b, y_r = self._setup_rank3(dt)
+        y = correlate(a, b, "same")
+        assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2])
+        assert_equal(y.dtype, dt)
+
+    def test_rank3_all(self, dt):
+        a, b, y_r = self._setup_rank3(dt)
+        y = correlate(a, b)
+        assert_array_almost_equal(y, y_r)
+        assert_equal(y.dtype, dt)
+
+
+class TestCorrelate:
+    # Tests that don't depend on dtype
+
+    def test_invalid_shapes(self):
+        # By "invalid," we mean that no one
+        # array has dimensions that are all at
+        # least as large as the corresponding
+        # dimensions of the other array. This
+        # setup should throw a ValueError.
+        a = np.arange(1, 7).reshape((2, 3))
+        b = np.arange(-6, 0).reshape((3, 2))
+
+        assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'})
+        assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'})
+
+    def test_invalid_params(self):
+        a = [3, 4, 5]
+        b = [1, 2, 3]
+        assert_raises(ValueError, correlate, a, b, mode='spam')
+        assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft')
+        assert_raises(ValueError, correlate, a, b, mode='ham', method='direct')
+        assert_raises(ValueError, correlate, a, b, mode='full', method='bacon')
+        assert_raises(ValueError, correlate, a, b, mode='same', method='bacon')
+
+    def test_mismatched_dims(self):
+        # Input arrays should have the same number of dimensions
+        assert_raises(ValueError, correlate, [1], 2, method='direct')
+        assert_raises(ValueError, correlate, 1, [2], method='direct')
+        assert_raises(ValueError, correlate, [1], 2, method='fft')
+        assert_raises(ValueError, correlate, 1, [2], method='fft')
+        assert_raises(ValueError, correlate, [1], [[2]])
+        assert_raises(ValueError, correlate, [3], 2)
+
+    def test_numpy_fastpath(self):
+        a = [1, 2, 3]
+        b = [4, 5]
+        assert_allclose(correlate(a, b, mode='same'), [5, 14, 23])
+
+        a = [1, 2, 3]
+        b = [4, 5, 6]
+        assert_allclose(correlate(a, b, mode='same'), [17, 32, 23])
+        assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12])
+        assert_allclose(correlate(a, b, mode='valid'), [32])
+
+
+@pytest.mark.parametrize("mode", ["valid", "same", "full"])
+@pytest.mark.parametrize("behind", [True, False])
+@pytest.mark.parametrize("input_size", [100, 101, 1000, 1001, 10000, 10001])
+def test_correlation_lags(mode, behind, input_size):
+    # generate random data
+    rng = np.random.RandomState(0)
+    in1 = rng.standard_normal(input_size)
+    offset = int(input_size/10)
+    # generate offset version of array to correlate with
+    if behind:
+        # y is behind x
+        in2 = np.concatenate([rng.standard_normal(offset), in1])
+        expected = -offset
+    else:
+        # y is ahead of x
+        in2 = in1[offset:]
+        expected = offset
+    # cross correlate, returning lag information
+    correlation = correlate(in1, in2, mode=mode)
+    lags = correlation_lags(in1.size, in2.size, mode=mode)
+    # identify the peak
+    lag_index = np.argmax(correlation)
+    # Check as expected
+    assert_equal(lags[lag_index], expected)
+    # Correlation and lags shape should match
+    assert_equal(lags.shape, correlation.shape)
+
+
+@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble])
+class TestCorrelateComplex:
+    # The decimal precision to be used for comparing results.
+    # This value will be passed as the 'decimal' keyword argument of
+    # assert_array_almost_equal().
+    # Since correlate may chose to use FFT method which converts
+    # longdoubles to doubles internally don't expect better precision
+    # for longdouble than for double (see gh-9520).
+
+    def decimal(self, dt):
+        if dt == np.clongdouble:
+            dt = np.cdouble
+        return int(2 * np.finfo(dt).precision / 3)
+
+    def _setup_rank1(self, dt, mode):
+        np.random.seed(9)
+        a = np.random.randn(10).astype(dt)
+        a += 1j * np.random.randn(10).astype(dt)
+        b = np.random.randn(8).astype(dt)
+        b += 1j * np.random.randn(8).astype(dt)
+
+        y_r = (correlate(a.real, b.real, mode=mode) +
+               correlate(a.imag, b.imag, mode=mode)).astype(dt)
+        y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +
+                     correlate(a.imag, b.real, mode=mode))
+        return a, b, y_r
+
+    def test_rank1_valid(self, dt):
+        a, b, y_r = self._setup_rank1(dt, 'valid')
+        y = correlate(a, b, 'valid')
+        assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
+        assert_equal(y.dtype, dt)
+
+        # See gh-5897
+        y = correlate(b, a, 'valid')
+        assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt))
+        assert_equal(y.dtype, dt)
+
+    def test_rank1_same(self, dt):
+        a, b, y_r = self._setup_rank1(dt, 'same')
+        y = correlate(a, b, 'same')
+        assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
+        assert_equal(y.dtype, dt)
+
+    def test_rank1_full(self, dt):
+        a, b, y_r = self._setup_rank1(dt, 'full')
+        y = correlate(a, b, 'full')
+        assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
+        assert_equal(y.dtype, dt)
+
+    def test_swap_full(self, dt):
+        d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt)
+        k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt)
+        y = correlate(d, k)
+        assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j])
+
+    def test_swap_same(self, dt):
+        d = [0.+0.j, 1.+1.j, 2.+2.j]
+        k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j]
+        y = correlate(d, k, mode="same")
+        assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j])
+
+    def test_rank3(self, dt):
+        a = np.random.randn(10, 8, 6).astype(dt)
+        a += 1j * np.random.randn(10, 8, 6).astype(dt)
+        b = np.random.randn(8, 6, 4).astype(dt)
+        b += 1j * np.random.randn(8, 6, 4).astype(dt)
+
+        y_r = (correlate(a.real, b.real)
+               + correlate(a.imag, b.imag)).astype(dt)
+        y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
+
+        y = correlate(a, b, 'full')
+        assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
+        assert_equal(y.dtype, dt)
+
+    def test_rank0(self, dt):
+        a = np.array(np.random.randn()).astype(dt)
+        a += 1j * np.array(np.random.randn()).astype(dt)
+        b = np.array(np.random.randn()).astype(dt)
+        b += 1j * np.array(np.random.randn()).astype(dt)
+
+        y_r = (correlate(a.real, b.real)
+               + correlate(a.imag, b.imag)).astype(dt)
+        y_r += 1j * np.array(-correlate(a.real, b.imag) +
+                             correlate(a.imag, b.real))
+
+        y = correlate(a, b, 'full')
+        assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
+        assert_equal(y.dtype, dt)
+
+        assert_equal(correlate([1], [2j]), correlate(1, 2j))
+        assert_equal(correlate([2j], [3j]), correlate(2j, 3j))
+        assert_equal(correlate([3j], [4]), correlate(3j, 4))
+
+
+class TestCorrelate2d:
+
+    def test_consistency_correlate_funcs(self):
+        # Compare np.correlate, signal.correlate, signal.correlate2d
+        a = np.arange(5)
+        b = np.array([3.2, 1.4, 3])
+        for mode in ['full', 'valid', 'same']:
+            assert_almost_equal(np.correlate(a, b, mode=mode),
+                                signal.correlate(a, b, mode=mode))
+            assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],
+                                                              mode=mode)),
+                                signal.correlate(a, b, mode=mode))
+
+            # See gh-5897
+            if mode == 'valid':
+                assert_almost_equal(np.correlate(b, a, mode=mode),
+                                    signal.correlate(b, a, mode=mode))
+                assert_almost_equal(np.squeeze(signal.correlate2d([b], [a],
+                                                                  mode=mode)),
+                                    signal.correlate(b, a, mode=mode))
+
+    def test_invalid_shapes(self):
+        # By "invalid," we mean that no one
+        # array has dimensions that are all at
+        # least as large as the corresponding
+        # dimensions of the other array. This
+        # setup should throw a ValueError.
+        a = np.arange(1, 7).reshape((2, 3))
+        b = np.arange(-6, 0).reshape((3, 2))
+
+        assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'})
+        assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'})
+
+    def test_complex_input(self):
+        assert_equal(signal.correlate2d([[1]], [[2j]]), -2j)
+        assert_equal(signal.correlate2d([[2j]], [[3j]]), 6)
+        assert_equal(signal.correlate2d([[3j]], [[4]]), 12j)
+
+
+class TestLFilterZI:
+
+    def test_basic(self):
+        a = np.array([1.0, -1.0, 0.5])
+        b = np.array([1.0, 0.0, 2.0])
+        zi_expected = np.array([5.0, -1.0])
+        zi = lfilter_zi(b, a)
+        assert_array_almost_equal(zi, zi_expected)
+
+    def test_scale_invariance(self):
+        # Regression test.  There was a bug in which b was not correctly
+        # rescaled when a[0] was nonzero.
+        b = np.array([2, 8, 5])
+        a = np.array([1, 1, 8])
+        zi1 = lfilter_zi(b, a)
+        zi2 = lfilter_zi(2*b, 2*a)
+        assert_allclose(zi2, zi1, rtol=1e-12)
+
+    @pytest.mark.parametrize('dtype', [np.float32, np.float64])
+    def test_types(self, dtype):
+        b = np.zeros((8), dtype=dtype)
+        a = np.array([1], dtype=dtype)
+        assert_equal(np.real(signal.lfilter_zi(b, a)).dtype, dtype)
+
+
+class TestFiltFilt:
+    filtfilt_kind = 'tf'
+
+    def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None,
+                 method='pad', irlen=None):
+        if self.filtfilt_kind == 'tf':
+            b, a = zpk2tf(*zpk)
+            return filtfilt(b, a, x, axis, padtype, padlen, method, irlen)
+        elif self.filtfilt_kind == 'sos':
+            sos = zpk2sos(*zpk)
+            return sosfiltfilt(sos, x, axis, padtype, padlen)
+
+    def test_basic(self):
+        zpk = tf2zpk([1, 2, 3], [1, 2, 3])
+        out = self.filtfilt(zpk, np.arange(12))
+        assert_allclose(out, arange(12), atol=5.28e-11)
+
+    def test_sine(self):
+        rate = 2000
+        t = np.linspace(0, 1.0, rate + 1)
+        # A signal with low frequency and a high frequency.
+        xlow = np.sin(5 * 2 * np.pi * t)
+        xhigh = np.sin(250 * 2 * np.pi * t)
+        x = xlow + xhigh
+
+        zpk = butter(8, 0.125, output='zpk')
+        # r is the magnitude of the largest pole.
+        r = np.abs(zpk[1]).max()
+        eps = 1e-5
+        # n estimates the number of steps for the
+        # transient to decay by a factor of eps.
+        n = int(np.ceil(np.log(eps) / np.log(r)))
+
+        # High order lowpass filter...
+        y = self.filtfilt(zpk, x, padlen=n)
+        # Result should be just xlow.
+        err = np.abs(y - xlow).max()
+        assert_(err < 1e-4)
+
+        # A 2D case.
+        x2d = np.vstack([xlow, xlow + xhigh])
+        y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1)
+        assert_equal(y2d.shape, x2d.shape)
+        err = np.abs(y2d - xlow).max()
+        assert_(err < 1e-4)
+
+        # Use the previous result to check the use of the axis keyword.
+        # (Regression test for ticket #1620)
+        y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0)
+        assert_equal(y2d, y2dt.T)
+
+    def test_axis(self):
+        # Test the 'axis' keyword on a 3D array.
+        x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
+        zpk = butter(3, 0.125, output='zpk')
+        y0 = self.filtfilt(zpk, x, padlen=0, axis=0)
+        y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1)
+        assert_array_equal(y0, np.swapaxes(y1, 0, 1))
+        y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2)
+        assert_array_equal(y0, np.swapaxes(y2, 0, 2))
+
+    def test_acoeff(self):
+        if self.filtfilt_kind != 'tf':
+            return  # only necessary for TF
+        # test for 'a' coefficient as single number
+        out = signal.filtfilt([.5, .5], 1, np.arange(10))
+        assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14)
+
+    def test_gust_simple(self):
+        if self.filtfilt_kind != 'tf':
+            pytest.skip('gust only implemented for TF systems')
+        # The input array has length 2.  The exact solution for this case
+        # was computed "by hand".
+        x = np.array([1.0, 2.0])
+        b = np.array([0.5])
+        a = np.array([1.0, -0.5])
+        y, z1, z2 = _filtfilt_gust(b, a, x)
+        assert_allclose([z1[0], z2[0]],
+                        [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]])
+        assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1],
+                            0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]])
+
+    def test_gust_scalars(self):
+        if self.filtfilt_kind != 'tf':
+            pytest.skip('gust only implemented for TF systems')
+        # The filter coefficients are both scalars, so the filter simply
+        # multiplies its input by b/a.  When it is used in filtfilt, the
+        # factor is (b/a)**2.
+        x = np.arange(12)
+        b = 3.0
+        a = 2.0
+        y = filtfilt(b, a, x, method="gust")
+        expected = (b/a)**2 * x
+        assert_allclose(y, expected)
+
+
+class TestSOSFiltFilt(TestFiltFilt):
+    filtfilt_kind = 'sos'
+
+    def test_equivalence(self):
+        """Test equivalence between sosfiltfilt and filtfilt"""
+        x = np.random.RandomState(0).randn(1000)
+        for order in range(1, 6):
+            zpk = signal.butter(order, 0.35, output='zpk')
+            b, a = zpk2tf(*zpk)
+            sos = zpk2sos(*zpk)
+            y = filtfilt(b, a, x)
+            y_sos = sosfiltfilt(sos, x)
+            assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order)
+
+
+def filtfilt_gust_opt(b, a, x):
+    """
+    An alternative implementation of filtfilt with Gustafsson edges.
+
+    This function computes the same result as
+    `scipy.signal._signaltools._filtfilt_gust`, but only 1-d arrays
+    are accepted.  The problem is solved using `fmin` from `scipy.optimize`.
+    `_filtfilt_gust` is significanly faster than this implementation.
+    """
+    def filtfilt_gust_opt_func(ics, b, a, x):
+        """Objective function used in filtfilt_gust_opt."""
+        m = max(len(a), len(b)) - 1
+        z0f = ics[:m]
+        z0b = ics[m:]
+        y_f = lfilter(b, a, x, zi=z0f)[0]
+        y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1]
+
+        y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
+        y_bf = lfilter(b, a, y_b, zi=z0f)[0]
+        value = np.sum((y_fb - y_bf)**2)
+        return value
+
+    m = max(len(a), len(b)) - 1
+    zi = lfilter_zi(b, a)
+    ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi))
+    result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x),
+                  xtol=1e-10, ftol=1e-12,
+                  maxfun=10000, maxiter=10000,
+                  full_output=True, disp=False)
+    opt, fopt, niter, funcalls, warnflag = result
+    if warnflag > 0:
+        raise RuntimeError("minimization failed in filtfilt_gust_opt: "
+                           "warnflag=%d" % warnflag)
+    z0f = opt[:m]
+    z0b = opt[m:]
+
+    # Apply the forward-backward filter using the computed initial
+    # conditions.
+    y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
+    y = lfilter(b, a, y_b, zi=z0f)[0]
+
+    return y, z0f, z0b
+
+
+def check_filtfilt_gust(b, a, shape, axis, irlen=None):
+    # Generate x, the data to be filtered.
+    np.random.seed(123)
+    x = np.random.randn(*shape)
+
+    # Apply filtfilt to x. This is the main calculation to be checked.
+    y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen)
+
+    # Also call the private function so we can test the ICs.
+    yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
+
+    # filtfilt_gust_opt is an independent implementation that gives the
+    # expected result, but it only handles 1-D arrays, so use some looping
+    # and reshaping shenanigans to create the expected output arrays.
+    xx = np.swapaxes(x, axis, -1)
+    out_shape = xx.shape[:-1]
+    yo = np.empty_like(xx)
+    m = max(len(a), len(b)) - 1
+    zo1 = np.empty(out_shape + (m,))
+    zo2 = np.empty(out_shape + (m,))
+    for indx in product(*[range(d) for d in out_shape]):
+        yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx])
+    yo = np.swapaxes(yo, -1, axis)
+    zo1 = np.swapaxes(zo1, -1, axis)
+    zo2 = np.swapaxes(zo2, -1, axis)
+
+    assert_allclose(y, yo, rtol=1e-8, atol=1e-9)
+    assert_allclose(yg, yo, rtol=1e-8, atol=1e-9)
+    assert_allclose(zg1, zo1, rtol=1e-8, atol=1e-9)
+    assert_allclose(zg2, zo2, rtol=1e-8, atol=1e-9)
+
+
+def test_choose_conv_method():
+    for mode in ['valid', 'same', 'full']:
+        for ndim in [1, 2]:
+            n, k, true_method = 8, 6, 'direct'
+            x = np.random.randn(*((n,) * ndim))
+            h = np.random.randn(*((k,) * ndim))
+
+            method = choose_conv_method(x, h, mode=mode)
+            assert_equal(method, true_method)
+
+            method_try, times = choose_conv_method(x, h, mode=mode, measure=True)
+            assert_(method_try in {'fft', 'direct'})
+            assert_(type(times) is dict)
+            assert_('fft' in times.keys() and 'direct' in times.keys())
+
+        n = 10
+        for not_fft_conv_supp in ["complex256", "complex192"]:
+            if hasattr(np, not_fft_conv_supp):
+                x = np.ones(n, dtype=not_fft_conv_supp)
+                h = x.copy()
+                assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
+
+        x = np.array([2**51], dtype=np.int64)
+        h = x.copy()
+        assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
+
+        x = [Decimal(3), Decimal(2)]
+        h = [Decimal(1), Decimal(4)]
+        assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
+
+
+def test_filtfilt_gust():
+    # Design a filter.
+    z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk')
+
+    # Find the approximate impulse response length of the filter.
+    eps = 1e-10
+    r = np.max(np.abs(p))
+    approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
+
+    np.random.seed(123)
+
+    b, a = zpk2tf(z, p, k)
+    for irlen in [None, approx_impulse_len]:
+        signal_len = 5 * approx_impulse_len
+
+        # 1-d test case
+        check_filtfilt_gust(b, a, (signal_len,), 0, irlen)
+
+        # 3-d test case; test each axis.
+        for axis in range(3):
+            shape = [2, 2, 2]
+            shape[axis] = signal_len
+            check_filtfilt_gust(b, a, shape, axis, irlen)
+
+    # Test case with length less than 2*approx_impulse_len.
+    # In this case, `filtfilt_gust` should behave the same as if
+    # `irlen=None` was given.
+    length = 2*approx_impulse_len - 50
+    check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len)
+
+
+class TestDecimate:
+    def test_bad_args(self):
+        x = np.arange(12)
+        assert_raises(TypeError, signal.decimate, x, q=0.5, n=1)
+        assert_raises(TypeError, signal.decimate, x, q=2, n=0.5)
+
+    def test_basic_IIR(self):
+        x = np.arange(12)
+        y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round()
+        assert_array_equal(y, x[::2])
+
+    def test_basic_FIR(self):
+        x = np.arange(12)
+        y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round()
+        assert_array_equal(y, x[::2])
+
+    def test_shape(self):
+        # Regression test for ticket #1480.
+        z = np.zeros((30, 30))
+        d0 = signal.decimate(z, 2, axis=0, zero_phase=False)
+        assert_equal(d0.shape, (15, 30))
+        d1 = signal.decimate(z, 2, axis=1, zero_phase=False)
+        assert_equal(d1.shape, (30, 15))
+
+    def test_phaseshift_FIR(self):
+        with suppress_warnings() as sup:
+            sup.filter(BadCoefficients, "Badly conditioned filter")
+            self._test_phaseshift(method='fir', zero_phase=False)
+
+    def test_zero_phase_FIR(self):
+        with suppress_warnings() as sup:
+            sup.filter(BadCoefficients, "Badly conditioned filter")
+            self._test_phaseshift(method='fir', zero_phase=True)
+
+    def test_phaseshift_IIR(self):
+        self._test_phaseshift(method='iir', zero_phase=False)
+
+    def test_zero_phase_IIR(self):
+        self._test_phaseshift(method='iir', zero_phase=True)
+
+    def _test_phaseshift(self, method, zero_phase):
+        rate = 120
+        rates_to = [15, 20, 30, 40]  # q = 8, 6, 4, 3
+
+        t_tot = int(100)  # Need to let antialiasing filters settle
+        t = np.arange(rate*t_tot+1) / float(rate)
+
+        # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts
+        freqs = np.array(rates_to) * 0.8 / 2
+        d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t)
+             * signal.windows.tukey(t.size, 0.1))
+
+        for rate_to in rates_to:
+            q = rate // rate_to
+            t_to = np.arange(rate_to*t_tot+1) / float(rate_to)
+            d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to)
+                     * signal.windows.tukey(t_to.size, 0.1))
+
+            # Set up downsampling filters, match v0.17 defaults
+            if method == 'fir':
+                n = 30
+                system = signal.dlti(signal.firwin(n + 1, 1. / q,
+                                                   window='hamming'), 1.)
+            elif method == 'iir':
+                n = 8
+                wc = 0.8*np.pi/q
+                system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi))
+
+            # Calculate expected phase response, as unit complex vector
+            if zero_phase is False:
+                _, h_resps = signal.freqz(system.num, system.den,
+                                          freqs/rate*2*np.pi)
+                h_resps /= np.abs(h_resps)
+            else:
+                h_resps = np.ones_like(freqs)
+
+            y_resamps = signal.decimate(d.real, q, n, ftype=system,
+                                        zero_phase=zero_phase)
+
+            # Get phase from complex inner product, like CSD
+            h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1)
+            h_resamps /= np.abs(h_resamps)
+            subnyq = freqs < 0.5*rate_to
+
+            # Complex vectors should be aligned, only compare below nyquist
+            assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0,
+                            atol=1e-3, rtol=1e-3)
+
+    def test_auto_n(self):
+        # Test that our value of n is a reasonable choice (depends on
+        # the downsampling factor)
+        sfreq = 100.
+        n = 1000
+        t = np.arange(n) / sfreq
+        # will alias for decimations (>= 15)
+        x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t)
+        assert_allclose(np.linalg.norm(x), 1., rtol=1e-3)
+        x_out = signal.decimate(x, 30, ftype='fir')
+        assert_array_less(np.linalg.norm(x_out), 0.01)
+
+    def test_long_float32(self):
+        # regression: gh-15072.  With 32-bit float and either lfilter
+        # or filtfilt, this is numerically unstable
+        x = signal.decimate(np.ones(10_000, dtype=np.float32), 10)
+        assert not any(np.isnan(x))
+
+    def test_float16_upcast(self):
+        # float16 must be upcast to float64
+        x = signal.decimate(np.ones(100, dtype=np.float16), 10)
+        assert x.dtype.type == np.float64
+
+
+class TestHilbert:
+
+    def test_bad_args(self):
+        x = np.array([1.0 + 0.0j])
+        assert_raises(ValueError, hilbert, x)
+        x = np.arange(8.0)
+        assert_raises(ValueError, hilbert, x, N=0)
+
+    def test_hilbert_theoretical(self):
+        # test cases by Ariel Rokem
+        decimal = 14
+
+        pi = np.pi
+        t = np.arange(0, 2 * pi, pi / 256)
+        a0 = np.sin(t)
+        a1 = np.cos(t)
+        a2 = np.sin(2 * t)
+        a3 = np.cos(2 * t)
+        a = np.vstack([a0, a1, a2, a3])
+
+        h = hilbert(a)
+        h_abs = np.abs(h)
+        h_angle = np.angle(h)
+        h_real = np.real(h)
+
+        # The real part should be equal to the original signals:
+        assert_almost_equal(h_real, a, decimal)
+        # The absolute value should be one everywhere, for this input:
+        assert_almost_equal(h_abs, np.ones(a.shape), decimal)
+        # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
+        # the first 256 bins:
+        assert_almost_equal(h_angle[0, :256],
+                            np.arange(-pi / 2, pi / 2, pi / 256),
+                            decimal)
+        # For the 'slow' cosine - the phase should go from 0 to pi in the
+        # same interval:
+        assert_almost_equal(
+            h_angle[1, :256], np.arange(0, pi, pi / 256), decimal)
+        # The 'fast' sine should make this phase transition in half the time:
+        assert_almost_equal(h_angle[2, :128],
+                            np.arange(-pi / 2, pi / 2, pi / 128),
+                            decimal)
+        # Ditto for the 'fast' cosine:
+        assert_almost_equal(
+            h_angle[3, :128], np.arange(0, pi, pi / 128), decimal)
+
+        # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia
+        assert_almost_equal(h[1].imag, a0, decimal)
+
+    def test_hilbert_axisN(self):
+        # tests for axis and N arguments
+        a = np.arange(18).reshape(3, 6)
+        # test axis
+        aa = hilbert(a, axis=-1)
+        assert_equal(hilbert(a.T, axis=0), aa.T)
+        # test 1d
+        assert_almost_equal(hilbert(a[0]), aa[0], 14)
+
+        # test N
+        aan = hilbert(a, N=20, axis=-1)
+        assert_equal(aan.shape, [3, 20])
+        assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3])
+        # the next test is just a regression test,
+        # no idea whether numbers make sense
+        a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j,
+                           1.000000000000000e+00 - 2.047794505137069j,
+                           1.999999999999999e+00 - 2.244055555687583j,
+                           3.000000000000000e+00 - 1.262750302935009j,
+                           4.000000000000000e+00 - 1.066489252384493j,
+                           5.000000000000000e+00 + 2.918022706971047j,
+                           8.881784197001253e-17 + 3.845658908989067j,
+                          -9.444121133484362e-17 + 0.985044202202061j,
+                          -1.776356839400251e-16 + 1.332257797702019j,
+                          -3.996802888650564e-16 + 0.501905089898885j,
+                           1.332267629550188e-16 + 0.668696078880782j,
+                          -1.192678053963799e-16 + 0.235487067862679j,
+                          -1.776356839400251e-16 + 0.286439612812121j,
+                           3.108624468950438e-16 + 0.031676888064907j,
+                           1.332267629550188e-16 - 0.019275656884536j,
+                          -2.360035624836702e-16 - 0.1652588660287j,
+                           0.000000000000000e+00 - 0.332049855010597j,
+                           3.552713678800501e-16 - 0.403810179797771j,
+                           8.881784197001253e-17 - 0.751023775297729j,
+                           9.444121133484362e-17 - 0.79252210110103j])
+        assert_almost_equal(aan[0], a0hilb, 14, 'N regression')
+
+    @pytest.mark.parametrize('dtype', [np.float32, np.float64])
+    def test_hilbert_types(self, dtype):
+        in_typed = np.zeros(8, dtype=dtype)
+        assert_equal(np.real(signal.hilbert(in_typed)).dtype, dtype)
+
+
+class TestHilbert2:
+
+    def test_bad_args(self):
+        # x must be real.
+        x = np.array([[1.0 + 0.0j]])
+        assert_raises(ValueError, hilbert2, x)
+
+        # x must be rank 2.
+        x = np.arange(24).reshape(2, 3, 4)
+        assert_raises(ValueError, hilbert2, x)
+
+        # Bad value for N.
+        x = np.arange(16).reshape(4, 4)
+        assert_raises(ValueError, hilbert2, x, N=0)
+        assert_raises(ValueError, hilbert2, x, N=(2, 0))
+        assert_raises(ValueError, hilbert2, x, N=(2,))
+
+    @pytest.mark.parametrize('dtype', [np.float32, np.float64])
+    def test_hilbert2_types(self, dtype):
+        in_typed = np.zeros((2, 32), dtype=dtype)
+        assert_equal(np.real(signal.hilbert2(in_typed)).dtype, dtype)
+
+
+class TestPartialFractionExpansion:
+    @staticmethod
+    def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7):
+        r_true = np.asarray(r_true)
+        p_true = np.asarray(p_true)
+
+        distance = np.hypot(abs(p[:, None] - p_true),
+                            abs(r[:, None] - r_true))
+
+        rows, cols = linear_sum_assignment(distance)
+        assert_almost_equal(p[rows], p_true[cols], decimal=decimal)
+        assert_almost_equal(r[rows], r_true[cols], decimal=decimal)
+
+    def test_compute_factors(self):
+        factors, poly = _compute_factors([1, 2, 3], [3, 2, 1])
+        assert_equal(len(factors), 3)
+        assert_almost_equal(factors[0], np.poly([2, 2, 3]))
+        assert_almost_equal(factors[1], np.poly([1, 1, 1, 3]))
+        assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2]))
+        assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
+
+        factors, poly = _compute_factors([1, 2, 3], [3, 2, 1],
+                                         include_powers=True)
+        assert_equal(len(factors), 6)
+        assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3]))
+        assert_almost_equal(factors[1], np.poly([1, 2, 2, 3]))
+        assert_almost_equal(factors[2], np.poly([2, 2, 3]))
+        assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3]))
+        assert_almost_equal(factors[4], np.poly([1, 1, 1, 3]))
+        assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2]))
+        assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
+
+    def test_group_poles(self):
+        unique, multiplicity = _group_poles(
+            [1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min')
+        assert_equal(unique, [1.0, 2.0, 3.0])
+        assert_equal(multiplicity, [3, 2, 1])
+
+    def test_residue_general(self):
+        # Test are taken from issue #4464, note that poles in scipy are
+        # in increasing by absolute value order, opposite to MATLAB.
+        r, p, k = residue([5, 3, -2, 7], [-4, 0, 8, 3])
+        assert_almost_equal(r, [1.3320, -0.6653, -1.4167], decimal=4)
+        assert_almost_equal(p, [-0.4093, -1.1644, 1.5737], decimal=4)
+        assert_almost_equal(k, [-1.2500], decimal=4)
+
+        r, p, k = residue([-4, 8], [1, 6, 8])
+        assert_almost_equal(r, [8, -12])
+        assert_almost_equal(p, [-2, -4])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([4, 1], [1, -1, -2])
+        assert_almost_equal(r, [1, 3])
+        assert_almost_equal(p, [-1, 2])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([4, 3], [2, -3.4, 1.98, -0.406])
+        self.assert_rp_almost_equal(
+            r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25],
+            [0.5 - 0.2j, 0.5 + 0.2j, 0.7])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([2, 1], [1, 5, 8, 4])
+        self.assert_rp_almost_equal(r, p, [-1, 1, 3], [-1, -2, -2])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([3, -1.1, 0.88, -2.396, 1.348],
+                          [1, -0.7, -0.14, 0.048])
+        assert_almost_equal(r, [-3, 4, 1])
+        assert_almost_equal(p, [0.2, -0.3, 0.8])
+        assert_almost_equal(k, [3, 1])
+
+        r, p, k = residue([1], [1, 2, -3])
+        assert_almost_equal(r, [0.25, -0.25])
+        assert_almost_equal(p, [1, -3])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([1, 0, -5], [1, 0, 0, 0, -1])
+        self.assert_rp_almost_equal(r, p,
+                                    [1, 1.5j, -1.5j, -1], [-1, -1j, 1j, 1])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([3, 8, 6], [1, 3, 3, 1])
+        self.assert_rp_almost_equal(r, p, [1, 2, 3], [-1, -1, -1])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([3, -1], [1, -3, 2])
+        assert_almost_equal(r, [-2, 5])
+        assert_almost_equal(p, [1, 2])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue([2, 3, -1], [1, -3, 2])
+        assert_almost_equal(r, [-4, 13])
+        assert_almost_equal(p, [1, 2])
+        assert_almost_equal(k, [2])
+
+        r, p, k = residue([7, 2, 3, -1], [1, -3, 2])
+        assert_almost_equal(r, [-11, 69])
+        assert_almost_equal(p, [1, 2])
+        assert_almost_equal(k, [7, 23])
+
+        r, p, k = residue([2, 3, -1], [1, -3, 4, -2])
+        self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j],
+                                    [1, 1 - 1j, 1 + 1j])
+        assert_almost_equal(k.size, 0)
+
+    def test_residue_leading_zeros(self):
+        # Leading zeros in numerator or denominator must not affect the answer.
+        r0, p0, k0 = residue([5, 3, -2, 7], [-4, 0, 8, 3])
+        r1, p1, k1 = residue([0, 5, 3, -2, 7], [-4, 0, 8, 3])
+        r2, p2, k2 = residue([5, 3, -2, 7], [0, -4, 0, 8, 3])
+        r3, p3, k3 = residue([0, 0, 5, 3, -2, 7], [0, 0, 0, -4, 0, 8, 3])
+        assert_almost_equal(r0, r1)
+        assert_almost_equal(r0, r2)
+        assert_almost_equal(r0, r3)
+        assert_almost_equal(p0, p1)
+        assert_almost_equal(p0, p2)
+        assert_almost_equal(p0, p3)
+        assert_almost_equal(k0, k1)
+        assert_almost_equal(k0, k2)
+        assert_almost_equal(k0, k3)
+
+    def test_resiude_degenerate(self):
+        # Several tests for zero numerator and denominator.
+        r, p, k = residue([0, 0], [1, 6, 8])
+        assert_almost_equal(r, [0, 0])
+        assert_almost_equal(p, [-2, -4])
+        assert_equal(k.size, 0)
+
+        r, p, k = residue(0, 1)
+        assert_equal(r.size, 0)
+        assert_equal(p.size, 0)
+        assert_equal(k.size, 0)
+
+        with pytest.raises(ValueError, match="Denominator `a` is zero."):
+            residue(1, 0)
+
+    def test_residuez_general(self):
+        r, p, k = residuez([1, 6, 6, 2], [1, -(2 + 1j), (1 + 2j), -1j])
+        self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j],
+                                    [1j, 1, 1])
+        assert_almost_equal(k, [2j])
+
+        r, p, k = residuez([1, 2, 1], [1, -1, 0.3561])
+        self.assert_rp_almost_equal(r, p,
+                                    [-0.9041 - 5.9928j, -0.9041 + 5.9928j],
+                                    [0.5 + 0.3257j, 0.5 - 0.3257j],
+                                    decimal=4)
+        assert_almost_equal(k, [2.8082], decimal=4)
+
+        r, p, k = residuez([1, -1], [1, -5, 6])
+        assert_almost_equal(r, [-1, 2])
+        assert_almost_equal(p, [2, 3])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez([2, 3, 4], [1, 3, 3, 1])
+        self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez([1, -10, -4, 4], [2, -2, -4])
+        assert_almost_equal(r, [0.5, -1.5])
+        assert_almost_equal(p, [-1, 2])
+        assert_almost_equal(k, [1.5, -1])
+
+        r, p, k = residuez([18], [18, 3, -4, -1])
+        self.assert_rp_almost_equal(r, p,
+                                    [0.36, 0.24, 0.4], [0.5, -1/3, -1/3])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez([2, 3], np.polymul([1, -1/2], [1, 1/4]))
+        assert_almost_equal(r, [-10/3, 16/3])
+        assert_almost_equal(p, [-0.25, 0.5])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez([1, -2, 1], [1, -1])
+        assert_almost_equal(r, [0])
+        assert_almost_equal(p, [1])
+        assert_almost_equal(k, [1, -1])
+
+        r, p, k = residuez(1, [1, -1j])
+        assert_almost_equal(r, [1])
+        assert_almost_equal(p, [1j])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez(1, [1, -1, 0.25])
+        assert_almost_equal(r, [0, 1])
+        assert_almost_equal(p, [0.5, 0.5])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez(1, [1, -0.75, .125])
+        assert_almost_equal(r, [-1, 2])
+        assert_almost_equal(p, [0.25, 0.5])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez([1, 6, 2], [1, -2, 1])
+        assert_almost_equal(r, [-10, 9])
+        assert_almost_equal(p, [1, 1])
+        assert_almost_equal(k, [2])
+
+        r, p, k = residuez([6, 2], [1, -2, 1])
+        assert_almost_equal(r, [-2, 8])
+        assert_almost_equal(p, [1, 1])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez([1, 6, 6, 2], [1, -2, 1])
+        assert_almost_equal(r, [-24, 15])
+        assert_almost_equal(p, [1, 1])
+        assert_almost_equal(k, [10, 2])
+
+        r, p, k = residuez([1, 0, 1], [1, 0, 0, 0, 0, -1])
+        self.assert_rp_almost_equal(r, p,
+                                    [0.2618 + 0.1902j, 0.2618 - 0.1902j,
+                                     0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j],
+                                    [-0.8090 + 0.5878j, -0.8090 - 0.5878j,
+                                     1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j],
+                                    decimal=4)
+        assert_equal(k.size, 0)
+
+    def test_residuez_trailing_zeros(self):
+        # Trailing zeros in numerator or denominator must not affect the
+        # answer.
+        r0, p0, k0 = residuez([5, 3, -2, 7], [-4, 0, 8, 3])
+        r1, p1, k1 = residuez([5, 3, -2, 7, 0], [-4, 0, 8, 3])
+        r2, p2, k2 = residuez([5, 3, -2, 7], [-4, 0, 8, 3, 0])
+        r3, p3, k3 = residuez([5, 3, -2, 7, 0, 0], [-4, 0, 8, 3, 0, 0, 0])
+        assert_almost_equal(r0, r1)
+        assert_almost_equal(r0, r2)
+        assert_almost_equal(r0, r3)
+        assert_almost_equal(p0, p1)
+        assert_almost_equal(p0, p2)
+        assert_almost_equal(p0, p3)
+        assert_almost_equal(k0, k1)
+        assert_almost_equal(k0, k2)
+        assert_almost_equal(k0, k3)
+
+    def test_residuez_degenerate(self):
+        r, p, k = residuez([0, 0], [1, 6, 8])
+        assert_almost_equal(r, [0, 0])
+        assert_almost_equal(p, [-2, -4])
+        assert_equal(k.size, 0)
+
+        r, p, k = residuez(0, 1)
+        assert_equal(r.size, 0)
+        assert_equal(p.size, 0)
+        assert_equal(k.size, 0)
+
+        with pytest.raises(ValueError, match="Denominator `a` is zero."):
+            residuez(1, 0)
+
+        with pytest.raises(ValueError,
+                           match="First coefficient of determinant `a` must "
+                                 "be non-zero."):
+            residuez(1, [0, 1, 2, 3])
+
+    def test_inverse_unique_roots_different_rtypes(self):
+        # This test was inspired by github issue 2496.
+        r = [3 / 10, -1 / 6, -2 / 15]
+        p = [0, -2, -5]
+        k = []
+        b_expected = [0, 1, 3]
+        a_expected = [1, 7, 10, 0]
+
+        # With the default tolerance, the rtype does not matter
+        # for this example.
+        for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
+            b, a = invres(r, p, k, rtype=rtype)
+            assert_allclose(b, b_expected)
+            assert_allclose(a, a_expected)
+
+            b, a = invresz(r, p, k, rtype=rtype)
+            assert_allclose(b, b_expected)
+            assert_allclose(a, a_expected)
+
+    def test_inverse_repeated_roots_different_rtypes(self):
+        r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
+        p = [0, -2, -2, -5]
+        k = []
+        b_expected = [0, 0, 1, 3]
+        b_expected_z = [-1/6, -2/3, 11/6, 3]
+        a_expected = [1, 9, 24, 20, 0]
+
+        for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
+            b, a = invres(r, p, k, rtype=rtype)
+            assert_allclose(b, b_expected, atol=1e-14)
+            assert_allclose(a, a_expected)
+
+            b, a = invresz(r, p, k, rtype=rtype)
+            assert_allclose(b, b_expected_z, atol=1e-14)
+            assert_allclose(a, a_expected)
+
+    def test_inverse_bad_rtype(self):
+        r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
+        p = [0, -2, -2, -5]
+        k = []
+        with pytest.raises(ValueError, match="`rtype` must be one of"):
+            invres(r, p, k, rtype='median')
+        with pytest.raises(ValueError, match="`rtype` must be one of"):
+            invresz(r, p, k, rtype='median')
+
+    def test_invresz_one_coefficient_bug(self):
+        # Regression test for issue in gh-4646.
+        r = [1]
+        p = [2]
+        k = [0]
+        b, a = invresz(r, p, k)
+        assert_allclose(b, [1.0])
+        assert_allclose(a, [1.0, -2.0])
+
+    def test_invres(self):
+        b, a = invres([1], [1], [])
+        assert_almost_equal(b, [1])
+        assert_almost_equal(a, [1, -1])
+
+        b, a = invres([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], [])
+        assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j])
+        assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j])
+
+        b, a = invres([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3])
+        assert_almost_equal(b, [1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10])
+        assert_almost_equal(a, [1, -3 - 1j, 4])
+
+        b, a = invres([-1, 2, 1j, 3 - 1j, 4, -2],
+                      [-1, 2 - 1j, 2 - 1j, 3, 3, 3], [])
+        assert_almost_equal(b, [4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j,
+                                -292 + 219j, 192 - 268j])
+        assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
+                                108 - 54j, -81 + 108j])
+
+        b, a = invres([-1, 1j], [1, 1], [1, 2])
+        assert_almost_equal(b, [1, 0, -4, 3 + 1j])
+        assert_almost_equal(a, [1, -2, 1])
+
+    def test_invresz(self):
+        b, a = invresz([1], [1], [])
+        assert_almost_equal(b, [1])
+        assert_almost_equal(a, [1, -1])
+
+        b, a = invresz([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], [])
+        assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j])
+        assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j])
+
+        b, a = invresz([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3])
+        assert_almost_equal(b, [2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12])
+        assert_almost_equal(a, [1, -3 - 1j, 4])
+
+        b, a = invresz([-1, 2, 1j, 3 - 1j, 4, -2],
+                       [-1, 2 - 1j, 2 - 1j, 3, 3, 3], [])
+        assert_almost_equal(b, [6, -50 + 11j, 100 - 72j, 80 + 58j,
+                                -354 + 228j, 234 - 297j])
+        assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
+                                108 - 54j, -81 + 108j])
+
+        b, a = invresz([-1, 1j], [1, 1], [1, 2])
+        assert_almost_equal(b, [1j, 1, -3, 2])
+        assert_almost_equal(a, [1, -2, 1])
+
+    def test_inverse_scalar_arguments(self):
+        b, a = invres(1, 1, 1)
+        assert_almost_equal(b, [1, 0])
+        assert_almost_equal(a, [1, -1])
+
+        b, a = invresz(1, 1, 1)
+        assert_almost_equal(b, [2, -1])
+        assert_almost_equal(a, [1, -1])
+
+
+class TestVectorstrength:
+
+    def test_single_1dperiod(self):
+        events = np.array([.5])
+        period = 5.
+        targ_strength = 1.
+        targ_phase = .1
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 0)
+        assert_equal(phase.ndim, 0)
+        assert_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_single_2dperiod(self):
+        events = np.array([.5])
+        period = [1, 2, 5.]
+        targ_strength = [1.] * 3
+        targ_phase = np.array([.5, .25, .1])
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 1)
+        assert_equal(phase.ndim, 1)
+        assert_array_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_equal_1dperiod(self):
+        events = np.array([.25, .25, .25, .25, .25, .25])
+        period = 2
+        targ_strength = 1.
+        targ_phase = .125
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 0)
+        assert_equal(phase.ndim, 0)
+        assert_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_equal_2dperiod(self):
+        events = np.array([.25, .25, .25, .25, .25, .25])
+        period = [1, 2, ]
+        targ_strength = [1.] * 2
+        targ_phase = np.array([.25, .125])
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 1)
+        assert_equal(phase.ndim, 1)
+        assert_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_spaced_1dperiod(self):
+        events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
+        period = 1
+        targ_strength = 1.
+        targ_phase = .1
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 0)
+        assert_equal(phase.ndim, 0)
+        assert_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_spaced_2dperiod(self):
+        events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
+        period = [1, .5]
+        targ_strength = [1.] * 2
+        targ_phase = np.array([.1, .2])
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 1)
+        assert_equal(phase.ndim, 1)
+        assert_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_partial_1dperiod(self):
+        events = np.array([.25, .5, .75])
+        period = 1
+        targ_strength = 1. / 3.
+        targ_phase = .5
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 0)
+        assert_equal(phase.ndim, 0)
+        assert_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_partial_2dperiod(self):
+        events = np.array([.25, .5, .75])
+        period = [1., 1., 1., 1.]
+        targ_strength = [1. / 3.] * 4
+        targ_phase = np.array([.5, .5, .5, .5])
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 1)
+        assert_equal(phase.ndim, 1)
+        assert_almost_equal(strength, targ_strength)
+        assert_almost_equal(phase, 2 * np.pi * targ_phase)
+
+    def test_opposite_1dperiod(self):
+        events = np.array([0, .25, .5, .75])
+        period = 1.
+        targ_strength = 0
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 0)
+        assert_equal(phase.ndim, 0)
+        assert_almost_equal(strength, targ_strength)
+
+    def test_opposite_2dperiod(self):
+        events = np.array([0, .25, .5, .75])
+        period = [1.] * 10
+        targ_strength = [0.] * 10
+
+        strength, phase = vectorstrength(events, period)
+
+        assert_equal(strength.ndim, 1)
+        assert_equal(phase.ndim, 1)
+        assert_almost_equal(strength, targ_strength)
+
+    def test_2d_events_ValueError(self):
+        events = np.array([[1, 2]])
+        period = 1.
+        assert_raises(ValueError, vectorstrength, events, period)
+
+    def test_2d_period_ValueError(self):
+        events = 1.
+        period = np.array([[1]])
+        assert_raises(ValueError, vectorstrength, events, period)
+
+    def test_zero_period_ValueError(self):
+        events = 1.
+        period = 0
+        assert_raises(ValueError, vectorstrength, events, period)
+
+    def test_negative_period_ValueError(self):
+        events = 1.
+        period = -1
+        assert_raises(ValueError, vectorstrength, events, period)
+
+
+def cast_tf2sos(b, a):
+    """Convert TF2SOS, casting to complex128 and back to the original dtype."""
+    # tf2sos does not support all of the dtypes that we want to check, e.g.:
+    #
+    #     TypeError: array type complex256 is unsupported in linalg
+    #
+    # so let's cast, convert, and cast back -- should be fine for the
+    # systems and precisions we are testing.
+    dtype = np.asarray(b).dtype
+    b = np.array(b, np.complex128)
+    a = np.array(a, np.complex128)
+    return tf2sos(b, a).astype(dtype)
+
+
+def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0):
+    """Wrap assert_allclose while casting object arrays."""
+    if actual.dtype.kind == 'O':
+        dtype = np.array(actual.flat[0]).dtype
+        actual, desired = actual.astype(dtype), desired.astype(dtype)
+    assert_allclose(actual, desired, rtol, atol)
+
+
+@pytest.mark.parametrize('func', (sosfilt, lfilter))
+def test_nonnumeric_dtypes(func):
+    x = [Decimal(1), Decimal(2), Decimal(3)]
+    b = [Decimal(1), Decimal(2), Decimal(3)]
+    a = [Decimal(1), Decimal(2), Decimal(3)]
+    x = np.array(x)
+    assert x.dtype.kind == 'O'
+    desired = lfilter(np.array(b, float), np.array(a, float), x.astype(float))
+    if func is sosfilt:
+        actual = sosfilt([b + a], x)
+    else:
+        actual = lfilter(b, a, x)
+    assert all(isinstance(x, Decimal) for x in actual)
+    assert_allclose(actual.astype(float), desired.astype(float))
+    # Degenerate cases
+    if func is lfilter:
+        args = [1., 1.]
+    else:
+        args = [tf2sos(1., 1.)]
+
+    with pytest.raises(ValueError, match='must be at least 1-D'):
+        func(*args, x=1.)
+
+
+@pytest.mark.parametrize('dt', 'fdgFDGO')
+class TestSOSFilt:
+
+    # The test_rank* tests are pulled from _TestLinearFilter
+    def test_rank1(self, dt):
+        x = np.linspace(0, 5, 6).astype(dt)
+        b = np.array([1, -1]).astype(dt)
+        a = np.array([0.5, -0.5]).astype(dt)
+
+        # Test simple IIR
+        y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(dt)
+        sos = cast_tf2sos(b, a)
+        assert sos.dtype.char == dt
+        assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r)
+
+        # Test simple FIR
+        b = np.array([1, 1]).astype(dt)
+        # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero:
+        a = np.array([1, 0]).astype(dt)
+        y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(dt)
+        assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r)
+
+        b = [1, 1, 0]
+        a = [1, 0, 0]
+        x = np.ones(8)
+        sos = np.concatenate((b, a))
+        sos.shape = (1, 6)
+        y = sosfilt(sos, x)
+        assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2])
+
+    def test_rank2(self, dt):
+        shape = (4, 3)
+        x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
+        x = x.astype(dt)
+
+        b = np.array([1, -1]).astype(dt)
+        a = np.array([0.5, 0.5]).astype(dt)
+
+        y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]],
+                           dtype=dt)
+
+        y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
+                            [18, -16, 18]], dtype=dt)
+
+        y = sosfilt(cast_tf2sos(b, a), x, axis=0)
+        assert_array_almost_equal(y_r2_a0, y)
+
+        y = sosfilt(cast_tf2sos(b, a), x, axis=1)
+        assert_array_almost_equal(y_r2_a1, y)
+
+    def test_rank3(self, dt):
+        shape = (4, 3, 2)
+        x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
+
+        b = np.array([1, -1]).astype(dt)
+        a = np.array([0.5, 0.5]).astype(dt)
+
+        # Test last axis
+        y = sosfilt(cast_tf2sos(b, a), x)
+        for i in range(x.shape[0]):
+            for j in range(x.shape[1]):
+                assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
+
+    def test_initial_conditions(self, dt):
+        b1, a1 = signal.butter(2, 0.25, 'low')
+        b2, a2 = signal.butter(2, 0.75, 'low')
+        b3, a3 = signal.butter(2, 0.75, 'low')
+        b = np.convolve(np.convolve(b1, b2), b3)
+        a = np.convolve(np.convolve(a1, a2), a3)
+        sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3]))
+
+        x = np.random.rand(50).astype(dt)
+
+        # Stopping filtering and continuing
+        y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6))
+        y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]]
+        assert_allclose_cast(y_true, lfilter(b, a, x))
+
+        y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2)))
+        y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]]
+        assert_allclose_cast(y_true, y_sos)
+
+        # Use a step function
+        zi = sosfilt_zi(sos)
+        x = np.ones(8, dt)
+        y, zf = sosfilt(sos, x, zi=zi)
+
+        assert_allclose_cast(y, np.ones(8))
+        assert_allclose_cast(zf, zi)
+
+        # Initial condition shape matching
+        x.shape = (1, 1) + x.shape  # 3D
+        assert_raises(ValueError, sosfilt, sos, x, zi=zi)
+        zi_nd = zi.copy()
+        zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1])
+        assert_raises(ValueError, sosfilt, sos, x,
+                      zi=zi_nd[:, :, :, [0, 1, 1]])
+        y, zf = sosfilt(sos, x, zi=zi_nd)
+        assert_allclose_cast(y[0, 0], np.ones(8))
+        assert_allclose_cast(zf[:, 0, 0, :], zi)
+
+    def test_initial_conditions_3d_axis1(self, dt):
+        # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input.
+
+        # Input array is x.
+        x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3))
+        x = x.astype(dt)
+
+        # Design a filter in ZPK format and convert to SOS
+        zpk = signal.butter(6, 0.35, output='zpk')
+        sos = zpk2sos(*zpk)
+        nsections = sos.shape[0]
+
+        # Filter along this axis.
+        axis = 1
+
+        # Initial conditions, all zeros.
+        shp = list(x.shape)
+        shp[axis] = 2
+        shp = [nsections] + shp
+        z0 = np.zeros(shp)
+
+        # Apply the filter to x.
+        yf, zf = sosfilt(sos, x, axis=axis, zi=z0)
+
+        # Apply the filter to x in two stages.
+        y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0)
+        y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1)
+
+        # y should equal yf, and z2 should equal zf.
+        y = np.concatenate((y1, y2), axis=axis)
+        assert_allclose_cast(y, yf, rtol=1e-10, atol=1e-13)
+        assert_allclose_cast(z2, zf, rtol=1e-10, atol=1e-13)
+
+        # let's try the "step" initial condition
+        zi = sosfilt_zi(sos)
+        zi.shape = [nsections, 1, 2, 1]
+        zi = zi * x[:, 0:1, :]
+        y = sosfilt(sos, x, axis=axis, zi=zi)[0]
+        # check it against the TF form
+        b, a = zpk2tf(*zpk)
+        zi = lfilter_zi(b, a)
+        zi.shape = [1, zi.size, 1]
+        zi = zi * x[:, 0:1, :]
+        y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0]
+        assert_allclose_cast(y, y_tf, rtol=1e-10, atol=1e-13)
+
+    def test_bad_zi_shape(self, dt):
+        # The shape of zi is checked before using any values in the
+        # arguments, so np.empty is fine for creating the arguments.
+        x = np.empty((3, 15, 3), dt)
+        sos = np.zeros((4, 6))
+        zi = np.empty((4, 3, 3, 2))  # Correct shape is (4, 3, 2, 3)
+        with pytest.raises(ValueError, match='should be all ones'):
+            sosfilt(sos, x, zi=zi, axis=1)
+        sos[:, 3] = 1.
+        with pytest.raises(ValueError, match='Invalid zi shape'):
+            sosfilt(sos, x, zi=zi, axis=1)
+
+    def test_sosfilt_zi(self, dt):
+        sos = signal.butter(6, 0.2, output='sos')
+        zi = sosfilt_zi(sos)
+
+        y, zf = sosfilt(sos, np.ones(40, dt), zi=zi)
+        assert_allclose_cast(zf, zi, rtol=1e-13)
+
+        # Expected steady state value of the step response of this filter:
+        ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1))
+        assert_allclose_cast(y, ss, rtol=1e-13)
+
+        # zi as array-like
+        _, zf = sosfilt(sos, np.ones(40, dt), zi=zi.tolist())
+        assert_allclose_cast(zf, zi, rtol=1e-13)
+
+
+class TestDeconvolve:
+
+    def test_basic(self):
+        # From docstring example
+        original = [0, 1, 0, 0, 1, 1, 0, 0]
+        impulse_response = [2, 1]
+        recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0]
+        recovered, remainder = signal.deconvolve(recorded, impulse_response)
+        assert_allclose(recovered, original)
+
+    def test_n_dimensional_signal(self):
+        recorded = [[0, 0], [0, 0]]
+        impulse_response = [0, 0]
+        with pytest.raises(ValueError, match="signal must be 1-D."):
+            quotient, remainder = signal.deconvolve(recorded, impulse_response)
+
+    def test_n_dimensional_divisor(self):
+        recorded = [0, 0]
+        impulse_response = [[0, 0], [0, 0]]
+        with pytest.raises(ValueError, match="divisor must be 1-D."):
+            quotient, remainder = signal.deconvolve(recorded, impulse_response)
+
+
+class TestDetrend:
+
+    def test_basic(self):
+        detrended = detrend(array([1, 2, 3]))
+        detrended_exact = array([0, 0, 0])
+        assert_array_almost_equal(detrended, detrended_exact)
+
+    def test_copy(self):
+        x = array([1, 1.2, 1.5, 1.6, 2.4])
+        copy_array = detrend(x, overwrite_data=False)
+        inplace = detrend(x, overwrite_data=True)
+        assert_array_almost_equal(copy_array, inplace)
+
+
+class TestUniqueRoots:
+    def test_real_no_repeat(self):
+        p = [-1.0, -0.5, 0.3, 1.2, 10.0]
+        unique, multiplicity = unique_roots(p)
+        assert_almost_equal(unique, p, decimal=15)
+        assert_equal(multiplicity, np.ones(len(p)))
+
+    def test_real_repeat(self):
+        p = [-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05]
+
+        unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
+        assert_almost_equal(unique, [-1.0, -0.89, 0.5, 1.0], decimal=15)
+        assert_equal(multiplicity, [2, 2, 1, 2])
+
+        unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
+        assert_almost_equal(unique, [-0.95, -0.8, 0.5, 1.05], decimal=15)
+        assert_equal(multiplicity, [2, 2, 1, 2])
+
+        unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
+        assert_almost_equal(unique, [-0.975, -0.845, 0.5, 1.025], decimal=15)
+        assert_equal(multiplicity, [2, 2, 1, 2])
+
+    def test_complex_no_repeat(self):
+        p = [-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j]
+        unique, multiplicity = unique_roots(p)
+        assert_almost_equal(unique, p, decimal=15)
+        assert_equal(multiplicity, np.ones(len(p)))
+
+    def test_complex_repeat(self):
+        p = [-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0,
+             0.5 + 0.5j, 0.45 + 0.55j]
+
+        unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
+        assert_almost_equal(unique, [-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j],
+                            decimal=15)
+        assert_equal(multiplicity, [2, 2, 1, 2])
+
+        unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
+        assert_almost_equal(unique,
+                            [-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j],
+                            decimal=15)
+        assert_equal(multiplicity, [2, 2, 1, 2])
+
+        unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
+        assert_almost_equal(
+            unique, [-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j],
+            decimal=15)
+        assert_equal(multiplicity, [2, 2, 1, 2])
+
+    def test_gh_4915(self):
+        p = np.roots(np.convolve(np.ones(5), np.ones(5)))
+        true_roots = [-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)]
+
+        unique, multiplicity = unique_roots(p)
+        unique = np.sort(unique)
+
+        assert_almost_equal(np.sort(unique), true_roots, decimal=7)
+        assert_equal(multiplicity, [2, 2, 2, 2])
+
+    def test_complex_roots_extra(self):
+        unique, multiplicity = unique_roots([1.0, 1.0j, 1.0])
+        assert_almost_equal(unique, [1.0, 1.0j], decimal=15)
+        assert_equal(multiplicity, [2, 1])
+
+        unique, multiplicity = unique_roots([1, 1 + 2e-9, 1e-9 + 1j], tol=0.1)
+        assert_almost_equal(unique, [1.0, 1e-9 + 1.0j], decimal=15)
+        assert_equal(multiplicity, [2, 1])
+
+    def test_single_unique_root(self):
+        p = np.random.rand(100) + 1j * np.random.rand(100)
+        unique, multiplicity = unique_roots(p, 2)
+        assert_almost_equal(unique, [np.min(p)], decimal=15)
+        assert_equal(multiplicity, [100])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_spectral.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_spectral.py
new file mode 100644
index 00000000..465eed58
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_spectral.py
@@ -0,0 +1,1602 @@
+import numpy as np
+from numpy.testing import (assert_, assert_approx_equal,
+                           assert_allclose, assert_array_equal, assert_equal,
+                           assert_array_almost_equal_nulp, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy import signal
+from scipy.fft import fftfreq
+from scipy.signal import (periodogram, welch, lombscargle, csd, coherence,
+                          spectrogram, stft, istft, check_COLA, check_NOLA)
+from scipy.signal._spectral_py import _spectral_helper
+
+
+class TestPeriodogram:
+    def test_real_onesided_even(self):
+        x = np.zeros(16)
+        x[0] = 1
+        f, p = periodogram(x)
+        assert_allclose(f, np.linspace(0, 0.5, 9))
+        q = np.ones(9)
+        q[0] = 0
+        q[-1] /= 2.0
+        q /= 8
+        assert_allclose(p, q)
+
+    def test_real_onesided_odd(self):
+        x = np.zeros(15)
+        x[0] = 1
+        f, p = periodogram(x)
+        assert_allclose(f, np.arange(8.0)/15.0)
+        q = np.ones(8)
+        q[0] = 0
+        q *= 2.0/15.0
+        assert_allclose(p, q, atol=1e-15)
+
+    def test_real_twosided(self):
+        x = np.zeros(16)
+        x[0] = 1
+        f, p = periodogram(x, return_onesided=False)
+        assert_allclose(f, fftfreq(16, 1.0))
+        q = np.full(16, 1/16.0)
+        q[0] = 0
+        assert_allclose(p, q)
+
+    def test_real_spectrum(self):
+        x = np.zeros(16)
+        x[0] = 1
+        f, p = periodogram(x, scaling='spectrum')
+        g, q = periodogram(x, scaling='density')
+        assert_allclose(f, np.linspace(0, 0.5, 9))
+        assert_allclose(p, q/16.0)
+
+    def test_integer_even(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        f, p = periodogram(x)
+        assert_allclose(f, np.linspace(0, 0.5, 9))
+        q = np.ones(9)
+        q[0] = 0
+        q[-1] /= 2.0
+        q /= 8
+        assert_allclose(p, q)
+
+    def test_integer_odd(self):
+        x = np.zeros(15, dtype=int)
+        x[0] = 1
+        f, p = periodogram(x)
+        assert_allclose(f, np.arange(8.0)/15.0)
+        q = np.ones(8)
+        q[0] = 0
+        q *= 2.0/15.0
+        assert_allclose(p, q, atol=1e-15)
+
+    def test_integer_twosided(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        f, p = periodogram(x, return_onesided=False)
+        assert_allclose(f, fftfreq(16, 1.0))
+        q = np.full(16, 1/16.0)
+        q[0] = 0
+        assert_allclose(p, q)
+
+    def test_complex(self):
+        x = np.zeros(16, np.complex128)
+        x[0] = 1.0 + 2.0j
+        f, p = periodogram(x, return_onesided=False)
+        assert_allclose(f, fftfreq(16, 1.0))
+        q = np.full(16, 5.0/16.0)
+        q[0] = 0
+        assert_allclose(p, q)
+
+    def test_unk_scaling(self):
+        assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),
+                scaling='foo')
+
+    def test_nd_axis_m1(self):
+        x = np.zeros(20, dtype=np.float64)
+        x = x.reshape((2,1,10))
+        x[:,:,0] = 1.0
+        f, p = periodogram(x)
+        assert_array_equal(p.shape, (2, 1, 6))
+        assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)
+        f0, p0 = periodogram(x[0,0,:])
+        assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
+
+    def test_nd_axis_0(self):
+        x = np.zeros(20, dtype=np.float64)
+        x = x.reshape((10,2,1))
+        x[0,:,:] = 1.0
+        f, p = periodogram(x, axis=0)
+        assert_array_equal(p.shape, (6,2,1))
+        assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)
+        f0, p0 = periodogram(x[:,0,0])
+        assert_array_almost_equal_nulp(p0, p[:,1,0])
+
+    def test_window_external(self):
+        x = np.zeros(16)
+        x[0] = 1
+        f, p = periodogram(x, 10, 'hann')
+        win = signal.get_window('hann', 16)
+        fe, pe = periodogram(x, 10, win)
+        assert_array_almost_equal_nulp(p, pe)
+        assert_array_almost_equal_nulp(f, fe)
+        win_err = signal.get_window('hann', 32)
+        assert_raises(ValueError, periodogram, x,
+                      10, win_err)  # win longer than signal
+
+    def test_padded_fft(self):
+        x = np.zeros(16)
+        x[0] = 1
+        f, p = periodogram(x)
+        fp, pp = periodogram(x, nfft=32)
+        assert_allclose(f, fp[::2])
+        assert_allclose(p, pp[::2])
+        assert_array_equal(pp.shape, (17,))
+
+    def test_empty_input(self):
+        f, p = periodogram([])
+        assert_array_equal(f.shape, (0,))
+        assert_array_equal(p.shape, (0,))
+        for shape in [(0,), (3,0), (0,5,2)]:
+            f, p = periodogram(np.empty(shape))
+            assert_array_equal(f.shape, shape)
+            assert_array_equal(p.shape, shape)
+
+    def test_empty_input_other_axis(self):
+        for shape in [(3,0), (0,5,2)]:
+            f, p = periodogram(np.empty(shape), axis=1)
+            assert_array_equal(f.shape, shape)
+            assert_array_equal(p.shape, shape)
+
+    def test_short_nfft(self):
+        x = np.zeros(18)
+        x[0] = 1
+        f, p = periodogram(x, nfft=16)
+        assert_allclose(f, np.linspace(0, 0.5, 9))
+        q = np.ones(9)
+        q[0] = 0
+        q[-1] /= 2.0
+        q /= 8
+        assert_allclose(p, q)
+
+    def test_nfft_is_xshape(self):
+        x = np.zeros(16)
+        x[0] = 1
+        f, p = periodogram(x, nfft=16)
+        assert_allclose(f, np.linspace(0, 0.5, 9))
+        q = np.ones(9)
+        q[0] = 0
+        q[-1] /= 2.0
+        q /= 8
+        assert_allclose(p, q)
+
+    def test_real_onesided_even_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        f, p = periodogram(x)
+        assert_allclose(f, np.linspace(0, 0.5, 9))
+        q = np.ones(9, 'f')
+        q[0] = 0
+        q[-1] /= 2.0
+        q /= 8
+        assert_allclose(p, q)
+        assert_(p.dtype == q.dtype)
+
+    def test_real_onesided_odd_32(self):
+        x = np.zeros(15, 'f')
+        x[0] = 1
+        f, p = periodogram(x)
+        assert_allclose(f, np.arange(8.0)/15.0)
+        q = np.ones(8, 'f')
+        q[0] = 0
+        q *= 2.0/15.0
+        assert_allclose(p, q, atol=1e-7)
+        assert_(p.dtype == q.dtype)
+
+    def test_real_twosided_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        f, p = periodogram(x, return_onesided=False)
+        assert_allclose(f, fftfreq(16, 1.0))
+        q = np.full(16, 1/16.0, 'f')
+        q[0] = 0
+        assert_allclose(p, q)
+        assert_(p.dtype == q.dtype)
+
+    def test_complex_32(self):
+        x = np.zeros(16, 'F')
+        x[0] = 1.0 + 2.0j
+        f, p = periodogram(x, return_onesided=False)
+        assert_allclose(f, fftfreq(16, 1.0))
+        q = np.full(16, 5.0/16.0, 'f')
+        q[0] = 0
+        assert_allclose(p, q)
+        assert_(p.dtype == q.dtype)
+
+    def test_shorter_window_error(self):
+        x = np.zeros(16)
+        x[0] = 1
+        win = signal.get_window('hann', 10)
+        expected_msg = ('the size of the window must be the same size '
+                        'of the input on the specified axis')
+        with assert_raises(ValueError, match=expected_msg):
+            periodogram(x, window=win)
+
+
+class TestWelch:
+    def test_real_onesided_even(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8)
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
+                      0.11111111])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_real_onesided_odd(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=9)
+        assert_allclose(f, np.arange(5.0)/9.0)
+        q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
+                      0.17072113])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_real_twosided(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
+                      0.11111111, 0.11111111, 0.11111111, 0.07638889])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_real_spectrum(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8, scaling='spectrum')
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
+                      0.02083333])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_integer_onesided_even(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8)
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
+                      0.11111111])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_integer_onesided_odd(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=9)
+        assert_allclose(f, np.arange(5.0)/9.0)
+        q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
+                      0.17072113])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_integer_twosided(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
+                      0.11111111, 0.11111111, 0.11111111, 0.07638889])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_complex(self):
+        x = np.zeros(16, np.complex128)
+        x[0] = 1.0 + 2.0j
+        x[8] = 1.0 + 2.0j
+        f, p = welch(x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
+                      0.55555556, 0.55555556, 0.55555556, 0.38194444])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_unk_scaling(self):
+        assert_raises(ValueError, welch, np.zeros(4, np.complex128),
+                      scaling='foo', nperseg=4)
+
+    def test_detrend_linear(self):
+        x = np.arange(10, dtype=np.float64) + 0.04
+        f, p = welch(x, nperseg=10, detrend='linear')
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_no_detrending(self):
+        x = np.arange(10, dtype=np.float64) + 0.04
+        f1, p1 = welch(x, nperseg=10, detrend=False)
+        f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)
+        assert_allclose(f1, f2, atol=1e-15)
+        assert_allclose(p1, p2, atol=1e-15)
+
+    def test_detrend_external(self):
+        x = np.arange(10, dtype=np.float64) + 0.04
+        f, p = welch(x, nperseg=10,
+                     detrend=lambda seg: signal.detrend(seg, type='l'))
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_detrend_external_nd_m1(self):
+        x = np.arange(40, dtype=np.float64) + 0.04
+        x = x.reshape((2,2,10))
+        f, p = welch(x, nperseg=10,
+                     detrend=lambda seg: signal.detrend(seg, type='l'))
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_detrend_external_nd_0(self):
+        x = np.arange(20, dtype=np.float64) + 0.04
+        x = x.reshape((2,1,10))
+        x = np.moveaxis(x, 2, 0)
+        f, p = welch(x, nperseg=10, axis=0,
+                     detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_nd_axis_m1(self):
+        x = np.arange(20, dtype=np.float64) + 0.04
+        x = x.reshape((2,1,10))
+        f, p = welch(x, nperseg=10)
+        assert_array_equal(p.shape, (2, 1, 6))
+        assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
+        f0, p0 = welch(x[0,0,:], nperseg=10)
+        assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
+
+    def test_nd_axis_0(self):
+        x = np.arange(20, dtype=np.float64) + 0.04
+        x = x.reshape((10,2,1))
+        f, p = welch(x, nperseg=10, axis=0)
+        assert_array_equal(p.shape, (6,2,1))
+        assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
+        f0, p0 = welch(x[:,0,0], nperseg=10)
+        assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
+
+    def test_window_external(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, 10, 'hann', nperseg=8)
+        win = signal.get_window('hann', 8)
+        fe, pe = welch(x, 10, win, nperseg=None)
+        assert_array_almost_equal_nulp(p, pe)
+        assert_array_almost_equal_nulp(f, fe)
+        assert_array_equal(fe.shape, (5,))  # because win length used as nperseg
+        assert_array_equal(pe.shape, (5,))
+        assert_raises(ValueError, welch, x,
+                      10, win, nperseg=4)  # because nperseg != win.shape[-1]
+        win_err = signal.get_window('hann', 32)
+        assert_raises(ValueError, welch, x,
+                      10, win_err, nperseg=None)  # win longer than signal
+
+    def test_empty_input(self):
+        f, p = welch([])
+        assert_array_equal(f.shape, (0,))
+        assert_array_equal(p.shape, (0,))
+        for shape in [(0,), (3,0), (0,5,2)]:
+            f, p = welch(np.empty(shape))
+            assert_array_equal(f.shape, shape)
+            assert_array_equal(p.shape, shape)
+
+    def test_empty_input_other_axis(self):
+        for shape in [(3,0), (0,5,2)]:
+            f, p = welch(np.empty(shape), axis=1)
+            assert_array_equal(f.shape, shape)
+            assert_array_equal(p.shape, shape)
+
+    def test_short_data(self):
+        x = np.zeros(8)
+        x[0] = 1
+        #for string-like window, input signal length < nperseg value gives
+        #UserWarning, sets nperseg to x.shape[-1]
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "nperseg = 256 is greater than input length  = 8, using nperseg = 8")
+            f, p = welch(x,window='hann')  # default nperseg
+            f1, p1 = welch(x,window='hann', nperseg=256)  # user-specified nperseg
+        f2, p2 = welch(x, nperseg=8)  # valid nperseg, doesn't give warning
+        assert_allclose(f, f2)
+        assert_allclose(p, p2)
+        assert_allclose(f1, f2)
+        assert_allclose(p1, p2)
+
+    def test_window_long_or_nd(self):
+        assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1]))
+        assert_raises(ValueError, welch, np.zeros(4), 1,
+                      np.arange(6).reshape((2,3)))
+
+    def test_nondefault_noverlap(self):
+        x = np.zeros(64)
+        x[::8] = 1
+        f, p = welch(x, nperseg=16, noverlap=4)
+        q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
+                      1./6.])
+        assert_allclose(p, q, atol=1e-12)
+
+    def test_bad_noverlap(self):
+        assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7)
+
+    def test_nfft_too_short(self):
+        assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)
+
+    def test_real_onesided_even_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8)
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
+                      0.11111111], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype)
+
+    def test_real_onesided_odd_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=9)
+        assert_allclose(f, np.arange(5.0)/9.0)
+        q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
+                      0.17072113], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype)
+
+    def test_real_twosided_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.08333333, 0.07638889, 0.11111111,
+                      0.11111111, 0.11111111, 0.11111111, 0.11111111,
+                      0.07638889], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype)
+
+    def test_complex_32(self):
+        x = np.zeros(16, 'F')
+        x[0] = 1.0 + 2.0j
+        x[8] = 1.0 + 2.0j
+        f, p = welch(x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
+                      0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype,
+                'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
+
+    def test_padded_freqs(self):
+        x = np.zeros(12)
+
+        nfft = 24
+        f = fftfreq(nfft, 1.0)[:nfft//2+1]
+        f[-1] *= -1
+        fodd, _ = welch(x, nperseg=5, nfft=nfft)
+        feven, _ = welch(x, nperseg=6, nfft=nfft)
+        assert_allclose(f, fodd)
+        assert_allclose(f, feven)
+
+        nfft = 25
+        f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]
+        fodd, _ = welch(x, nperseg=5, nfft=nfft)
+        feven, _ = welch(x, nperseg=6, nfft=nfft)
+        assert_allclose(f, fodd)
+        assert_allclose(f, feven)
+
+    def test_window_correction(self):
+        A = 20
+        fs = 1e4
+        nperseg = int(fs//10)
+        fsig = 300
+        ii = int(fsig*nperseg//fs)  # Freq index of fsig
+
+        tt = np.arange(fs)/fs
+        x = A*np.sin(2*np.pi*fsig*tt)
+
+        for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']:
+            _, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window,
+                              scaling='spectrum')
+            freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window,
+                                 scaling='density')
+
+            # Check peak height at signal frequency for 'spectrum'
+            assert_allclose(p_spec[ii], A**2/2.0)
+            # Check integrated spectrum RMS for 'density'
+            assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2,
+                            rtol=1e-3)
+
+    def test_axis_rolling(self):
+        np.random.seed(1234)
+
+        x_flat = np.random.randn(1024)
+        _, p_flat = welch(x_flat)
+
+        for a in range(3):
+            newshape = [1,]*3
+            newshape[a] = -1
+            x = x_flat.reshape(newshape)
+
+            _, p_plus = welch(x, axis=a)  # Positive axis index
+            _, p_minus = welch(x, axis=a-x.ndim)  # Negative axis index
+
+            assert_equal(p_flat, p_plus.squeeze(), err_msg=a)
+            assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim)
+
+    def test_average(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = welch(x, nperseg=8, average='median')
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([.1, .05, 0., 1.54074396e-33, 0.])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+        assert_raises(ValueError, welch, x, nperseg=8,
+                      average='unrecognised-average')
+
+
+class TestCSD:
+    def test_pad_shorter_x(self):
+        x = np.zeros(8)
+        y = np.zeros(12)
+
+        f = np.linspace(0, 0.5, 7)
+        c = np.zeros(7,dtype=np.complex128)
+        f1, c1 = csd(x, y, nperseg=12)
+
+        assert_allclose(f, f1)
+        assert_allclose(c, c1)
+
+    def test_pad_shorter_y(self):
+        x = np.zeros(12)
+        y = np.zeros(8)
+
+        f = np.linspace(0, 0.5, 7)
+        c = np.zeros(7,dtype=np.complex128)
+        f1, c1 = csd(x, y, nperseg=12)
+
+        assert_allclose(f, f1)
+        assert_allclose(c, c1)
+
+    def test_real_onesided_even(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=8)
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
+                      0.11111111])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_real_onesided_odd(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=9)
+        assert_allclose(f, np.arange(5.0)/9.0)
+        q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
+                      0.17072113])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_real_twosided(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
+                      0.11111111, 0.11111111, 0.11111111, 0.07638889])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_real_spectrum(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=8, scaling='spectrum')
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
+                      0.02083333])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_integer_onesided_even(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=8)
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
+                      0.11111111])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_integer_onesided_odd(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=9)
+        assert_allclose(f, np.arange(5.0)/9.0)
+        q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
+                      0.17072113])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_integer_twosided(self):
+        x = np.zeros(16, dtype=int)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
+                      0.11111111, 0.11111111, 0.11111111, 0.07638889])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_complex(self):
+        x = np.zeros(16, np.complex128)
+        x[0] = 1.0 + 2.0j
+        x[8] = 1.0 + 2.0j
+        f, p = csd(x, x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
+                      0.55555556, 0.55555556, 0.55555556, 0.38194444])
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+
+    def test_unk_scaling(self):
+        assert_raises(ValueError, csd, np.zeros(4, np.complex128),
+                      np.ones(4, np.complex128), scaling='foo', nperseg=4)
+
+    def test_detrend_linear(self):
+        x = np.arange(10, dtype=np.float64) + 0.04
+        f, p = csd(x, x, nperseg=10, detrend='linear')
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_no_detrending(self):
+        x = np.arange(10, dtype=np.float64) + 0.04
+        f1, p1 = csd(x, x, nperseg=10, detrend=False)
+        f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x)
+        assert_allclose(f1, f2, atol=1e-15)
+        assert_allclose(p1, p2, atol=1e-15)
+
+    def test_detrend_external(self):
+        x = np.arange(10, dtype=np.float64) + 0.04
+        f, p = csd(x, x, nperseg=10,
+                   detrend=lambda seg: signal.detrend(seg, type='l'))
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_detrend_external_nd_m1(self):
+        x = np.arange(40, dtype=np.float64) + 0.04
+        x = x.reshape((2,2,10))
+        f, p = csd(x, x, nperseg=10,
+                   detrend=lambda seg: signal.detrend(seg, type='l'))
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_detrend_external_nd_0(self):
+        x = np.arange(20, dtype=np.float64) + 0.04
+        x = x.reshape((2,1,10))
+        x = np.moveaxis(x, 2, 0)
+        f, p = csd(x, x, nperseg=10, axis=0,
+                   detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
+        assert_allclose(p, np.zeros_like(p), atol=1e-15)
+
+    def test_nd_axis_m1(self):
+        x = np.arange(20, dtype=np.float64) + 0.04
+        x = x.reshape((2,1,10))
+        f, p = csd(x, x, nperseg=10)
+        assert_array_equal(p.shape, (2, 1, 6))
+        assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
+        f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10)
+        assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
+
+    def test_nd_axis_0(self):
+        x = np.arange(20, dtype=np.float64) + 0.04
+        x = x.reshape((10,2,1))
+        f, p = csd(x, x, nperseg=10, axis=0)
+        assert_array_equal(p.shape, (6,2,1))
+        assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
+        f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10)
+        assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
+
+    def test_window_external(self):
+        x = np.zeros(16)
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, 10, 'hann', 8)
+        win = signal.get_window('hann', 8)
+        fe, pe = csd(x, x, 10, win, nperseg=None)
+        assert_array_almost_equal_nulp(p, pe)
+        assert_array_almost_equal_nulp(f, fe)
+        assert_array_equal(fe.shape, (5,))  # because win length used as nperseg
+        assert_array_equal(pe.shape, (5,))
+        assert_raises(ValueError, csd, x, x,
+                      10, win, nperseg=256)  # because nperseg != win.shape[-1]
+        win_err = signal.get_window('hann', 32)
+        assert_raises(ValueError, csd, x, x,
+              10, win_err, nperseg=None)  # because win longer than signal
+
+    def test_empty_input(self):
+        f, p = csd([],np.zeros(10))
+        assert_array_equal(f.shape, (0,))
+        assert_array_equal(p.shape, (0,))
+
+        f, p = csd(np.zeros(10),[])
+        assert_array_equal(f.shape, (0,))
+        assert_array_equal(p.shape, (0,))
+
+        for shape in [(0,), (3,0), (0,5,2)]:
+            f, p = csd(np.empty(shape), np.empty(shape))
+            assert_array_equal(f.shape, shape)
+            assert_array_equal(p.shape, shape)
+
+        f, p = csd(np.ones(10), np.empty((5,0)))
+        assert_array_equal(f.shape, (5,0))
+        assert_array_equal(p.shape, (5,0))
+
+        f, p = csd(np.empty((5,0)), np.ones(10))
+        assert_array_equal(f.shape, (5,0))
+        assert_array_equal(p.shape, (5,0))
+
+    def test_empty_input_other_axis(self):
+        for shape in [(3,0), (0,5,2)]:
+            f, p = csd(np.empty(shape), np.empty(shape), axis=1)
+            assert_array_equal(f.shape, shape)
+            assert_array_equal(p.shape, shape)
+
+        f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1)
+        assert_array_equal(f.shape, (10,0,3))
+        assert_array_equal(p.shape, (10,0,3))
+
+        f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1)
+        assert_array_equal(f.shape, (10,0,3))
+        assert_array_equal(p.shape, (10,0,3))
+
+    def test_short_data(self):
+        x = np.zeros(8)
+        x[0] = 1
+
+        #for string-like window, input signal length < nperseg value gives
+        #UserWarning, sets nperseg to x.shape[-1]
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "nperseg = 256 is greater than input length  = 8, using nperseg = 8")
+            f, p = csd(x, x, window='hann')  # default nperseg
+            f1, p1 = csd(x, x, window='hann', nperseg=256)  # user-specified nperseg
+        f2, p2 = csd(x, x, nperseg=8)  # valid nperseg, doesn't give warning
+        assert_allclose(f, f2)
+        assert_allclose(p, p2)
+        assert_allclose(f1, f2)
+        assert_allclose(p1, p2)
+
+    def test_window_long_or_nd(self):
+        assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
+                      np.array([1,1,1,1,1]))
+        assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,
+                      np.arange(6).reshape((2,3)))
+
+    def test_nondefault_noverlap(self):
+        x = np.zeros(64)
+        x[::8] = 1
+        f, p = csd(x, x, nperseg=16, noverlap=4)
+        q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
+                      1./6.])
+        assert_allclose(p, q, atol=1e-12)
+
+    def test_bad_noverlap(self):
+        assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann',
+                      2, 7)
+
+    def test_nfft_too_short(self):
+        assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3,
+                      nperseg=4)
+
+    def test_real_onesided_even_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=8)
+        assert_allclose(f, np.linspace(0, 0.5, 5))
+        q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
+                      0.11111111], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype)
+
+    def test_real_onesided_odd_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=9)
+        assert_allclose(f, np.arange(5.0)/9.0)
+        q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
+                      0.17072113], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype)
+
+    def test_real_twosided_32(self):
+        x = np.zeros(16, 'f')
+        x[0] = 1
+        x[8] = 1
+        f, p = csd(x, x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.08333333, 0.07638889, 0.11111111,
+                      0.11111111, 0.11111111, 0.11111111, 0.11111111,
+                      0.07638889], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype)
+
+    def test_complex_32(self):
+        x = np.zeros(16, 'F')
+        x[0] = 1.0 + 2.0j
+        x[8] = 1.0 + 2.0j
+        f, p = csd(x, x, nperseg=8, return_onesided=False)
+        assert_allclose(f, fftfreq(8, 1.0))
+        q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
+                      0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
+        assert_allclose(p, q, atol=1e-7, rtol=1e-7)
+        assert_(p.dtype == q.dtype,
+                'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
+
+    def test_padded_freqs(self):
+        x = np.zeros(12)
+        y = np.ones(12)
+
+        nfft = 24
+        f = fftfreq(nfft, 1.0)[:nfft//2+1]
+        f[-1] *= -1
+        fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
+        feven, _ = csd(x, y, nperseg=6, nfft=nfft)
+        assert_allclose(f, fodd)
+        assert_allclose(f, feven)
+
+        nfft = 25
+        f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]
+        fodd, _ = csd(x, y, nperseg=5, nfft=nfft)
+        feven, _ = csd(x, y, nperseg=6, nfft=nfft)
+        assert_allclose(f, fodd)
+        assert_allclose(f, feven)
+
+    def test_copied_data(self):
+        x = np.random.randn(64)
+        y = x.copy()
+
+        _, p_same = csd(x, x, nperseg=8, average='mean',
+                        return_onesided=False)
+        _, p_copied = csd(x, y, nperseg=8, average='mean',
+                          return_onesided=False)
+        assert_allclose(p_same, p_copied)
+
+        _, p_same = csd(x, x, nperseg=8, average='median',
+                        return_onesided=False)
+        _, p_copied = csd(x, y, nperseg=8, average='median',
+                          return_onesided=False)
+        assert_allclose(p_same, p_copied)
+
+
+class TestCoherence:
+    def test_identical_input(self):
+        x = np.random.randn(20)
+        y = np.copy(x)  # So `y is x` -> False
+
+        f = np.linspace(0, 0.5, 6)
+        C = np.ones(6)
+        f1, C1 = coherence(x, y, nperseg=10)
+
+        assert_allclose(f, f1)
+        assert_allclose(C, C1)
+
+    def test_phase_shifted_input(self):
+        x = np.random.randn(20)
+        y = -x
+
+        f = np.linspace(0, 0.5, 6)
+        C = np.ones(6)
+        f1, C1 = coherence(x, y, nperseg=10)
+
+        assert_allclose(f, f1)
+        assert_allclose(C, C1)
+
+
+class TestSpectrogram:
+    def test_average_all_segments(self):
+        x = np.random.randn(1024)
+
+        fs = 1.0
+        window = ('tukey', 0.25)
+        nperseg = 16
+        noverlap = 2
+
+        f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
+        fw, Pw = welch(x, fs, window, nperseg, noverlap)
+        assert_allclose(f, fw)
+        assert_allclose(np.mean(P, axis=-1), Pw)
+
+    def test_window_external(self):
+        x = np.random.randn(1024)
+
+        fs = 1.0
+        window = ('tukey', 0.25)
+        nperseg = 16
+        noverlap = 2
+        f, _, P = spectrogram(x, fs, window, nperseg, noverlap)
+
+        win = signal.get_window(('tukey', 0.25), 16)
+        fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2)
+        assert_array_equal(fe.shape, (9,))  # because win length used as nperseg
+        assert_array_equal(Pe.shape, (9,73))
+        assert_raises(ValueError, spectrogram, x,
+                      fs, win, nperseg=8)  # because nperseg != win.shape[-1]
+        win_err = signal.get_window(('tukey', 0.25), 2048)
+        assert_raises(ValueError, spectrogram, x,
+                      fs, win_err, nperseg=None)  # win longer than signal
+
+    def test_short_data(self):
+        x = np.random.randn(1024)
+        fs = 1.0
+
+        #for string-like window, input signal length < nperseg value gives
+        #UserWarning, sets nperseg to x.shape[-1]
+        f, _, p = spectrogram(x, fs, window=('tukey',0.25))  # default nperseg
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       "nperseg = 1025 is greater than input length  = 1024, using nperseg = 1024")
+            f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25),
+                                    nperseg=1025)  # user-specified nperseg
+        f2, _, p2 = spectrogram(x, fs, nperseg=256)  # to compare w/default
+        f3, _, p3 = spectrogram(x, fs, nperseg=1024)  # compare w/user-spec'd
+        assert_allclose(f, f2)
+        assert_allclose(p, p2)
+        assert_allclose(f1, f3)
+        assert_allclose(p1, p3)
+
+class TestLombscargle:
+    def test_frequency(self):
+        """Test if frequency location of peak corresponds to frequency of
+        generated input signal.
+        """
+
+        # Input parameters
+        ampl = 2.
+        w = 1.
+        phi = 0.5 * np.pi
+        nin = 100
+        nout = 1000
+        p = 0.7  # Fraction of points to select
+
+        # Randomly select a fraction of an array with timesteps
+        np.random.seed(2353425)
+        r = np.random.rand(nin)
+        t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
+
+        # Plot a sine wave for the selected times
+        x = ampl * np.sin(w*t + phi)
+
+        # Define the array of frequencies for which to compute the periodogram
+        f = np.linspace(0.01, 10., nout)
+
+        # Calculate Lomb-Scargle periodogram
+        P = lombscargle(t, x, f)
+
+        # Check if difference between found frequency maximum and input
+        # frequency is less than accuracy
+        delta = f[1] - f[0]
+        assert_(w - f[np.argmax(P)] < (delta/2.))
+
+    def test_amplitude(self):
+        # Test if height of peak in normalized Lomb-Scargle periodogram
+        # corresponds to amplitude of the generated input signal.
+
+        # Input parameters
+        ampl = 2.
+        w = 1.
+        phi = 0.5 * np.pi
+        nin = 100
+        nout = 1000
+        p = 0.7  # Fraction of points to select
+
+        # Randomly select a fraction of an array with timesteps
+        np.random.seed(2353425)
+        r = np.random.rand(nin)
+        t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
+
+        # Plot a sine wave for the selected times
+        x = ampl * np.sin(w*t + phi)
+
+        # Define the array of frequencies for which to compute the periodogram
+        f = np.linspace(0.01, 10., nout)
+
+        # Calculate Lomb-Scargle periodogram
+        pgram = lombscargle(t, x, f)
+
+        # Normalize
+        pgram = np.sqrt(4 * pgram / t.shape[0])
+
+        # Check if difference between found frequency maximum and input
+        # frequency is less than accuracy
+        assert_approx_equal(np.max(pgram), ampl, significant=2)
+
+    def test_precenter(self):
+        # Test if precenter gives the same result as manually precentering.
+
+        # Input parameters
+        ampl = 2.
+        w = 1.
+        phi = 0.5 * np.pi
+        nin = 100
+        nout = 1000
+        p = 0.7  # Fraction of points to select
+        offset = 0.15  # Offset to be subtracted in pre-centering
+
+        # Randomly select a fraction of an array with timesteps
+        np.random.seed(2353425)
+        r = np.random.rand(nin)
+        t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
+
+        # Plot a sine wave for the selected times
+        x = ampl * np.sin(w*t + phi) + offset
+
+        # Define the array of frequencies for which to compute the periodogram
+        f = np.linspace(0.01, 10., nout)
+
+        # Calculate Lomb-Scargle periodogram
+        pgram = lombscargle(t, x, f, precenter=True)
+        pgram2 = lombscargle(t, x - x.mean(), f, precenter=False)
+
+        # check if centering worked
+        assert_allclose(pgram, pgram2)
+
+    def test_normalize(self):
+        # Test normalize option of Lomb-Scarge.
+
+        # Input parameters
+        ampl = 2.
+        w = 1.
+        phi = 0.5 * np.pi
+        nin = 100
+        nout = 1000
+        p = 0.7  # Fraction of points to select
+
+        # Randomly select a fraction of an array with timesteps
+        np.random.seed(2353425)
+        r = np.random.rand(nin)
+        t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]
+
+        # Plot a sine wave for the selected times
+        x = ampl * np.sin(w*t + phi)
+
+        # Define the array of frequencies for which to compute the periodogram
+        f = np.linspace(0.01, 10., nout)
+
+        # Calculate Lomb-Scargle periodogram
+        pgram = lombscargle(t, x, f)
+        pgram2 = lombscargle(t, x, f, normalize=True)
+
+        # check if normalization works as expected
+        assert_allclose(pgram * 2 / np.dot(x, x), pgram2)
+        assert_approx_equal(np.max(pgram2), 1.0, significant=2)
+
+    def test_wrong_shape(self):
+        t = np.linspace(0, 1, 1)
+        x = np.linspace(0, 1, 2)
+        f = np.linspace(0, 1, 3)
+        assert_raises(ValueError, lombscargle, t, x, f)
+
+    def test_zero_division(self):
+        t = np.zeros(1)
+        x = np.zeros(1)
+        f = np.zeros(1)
+        assert_raises(ZeroDivisionError, lombscargle, t, x, f)
+
+    def test_lombscargle_atan_vs_atan2(self):
+        # https://github.com/scipy/scipy/issues/3787
+        # This raised a ZeroDivisionError.
+        t = np.linspace(0, 10, 1000, endpoint=False)
+        x = np.sin(4*t)
+        f = np.linspace(0, 50, 500, endpoint=False) + 0.1
+        lombscargle(t, x, f*2*np.pi)
+
+
+class TestSTFT:
+    def test_input_validation(self):
+
+        def chk_VE(match):
+            """Assert for a ValueError matching regexp `match`.
+
+            This little wrapper allows a more concise code layout.
+            """
+            return pytest.raises(ValueError, match=match)
+
+        # Checks for check_COLA():
+        with chk_VE('nperseg must be a positive integer'):
+            check_COLA('hann', -10, 0)
+        with chk_VE('noverlap must be less than nperseg.'):
+            check_COLA('hann', 10, 20)
+        with chk_VE('window must be 1-D'):
+            check_COLA(np.ones((2, 2)), 10, 0)
+        with chk_VE('window must have length of nperseg'):
+            check_COLA(np.ones(20), 10, 0)
+
+        # Checks for check_NOLA():
+        with chk_VE('nperseg must be a positive integer'):
+            check_NOLA('hann', -10, 0)
+        with chk_VE('noverlap must be less than nperseg'):
+            check_NOLA('hann', 10, 20)
+        with chk_VE('window must be 1-D'):
+            check_NOLA(np.ones((2, 2)), 10, 0)
+        with chk_VE('window must have length of nperseg'):
+            check_NOLA(np.ones(20), 10, 0)
+        with chk_VE('noverlap must be a nonnegative integer'):
+            check_NOLA('hann', 64, -32)
+
+        x = np.zeros(1024)
+        z = stft(x)[2]
+
+        # Checks for stft():
+        with chk_VE('window must be 1-D'):
+            stft(x, window=np.ones((2, 2)))
+        with chk_VE('value specified for nperseg is different ' +
+                    'from length of window'):
+            stft(x, window=np.ones(10), nperseg=256)
+        with chk_VE('nperseg must be a positive integer'):
+            stft(x, nperseg=-256)
+        with chk_VE('noverlap must be less than nperseg.'):
+            stft(x, nperseg=256, noverlap=1024)
+        with chk_VE('nfft must be greater than or equal to nperseg.'):
+            stft(x, nperseg=256, nfft=8)
+
+        # Checks for istft():
+        with chk_VE('Input stft must be at least 2d!'):
+            istft(x)
+        with chk_VE('window must be 1-D'):
+            istft(z, window=np.ones((2, 2)))
+        with chk_VE('window must have length of 256'):
+            istft(z, window=np.ones(10), nperseg=256)
+        with chk_VE('nperseg must be a positive integer'):
+            istft(z, nperseg=-256)
+        with chk_VE('noverlap must be less than nperseg.'):
+            istft(z, nperseg=256, noverlap=1024)
+        with chk_VE('nfft must be greater than or equal to nperseg.'):
+            istft(z, nperseg=256, nfft=8)
+        with pytest.warns(UserWarning, match="NOLA condition failed, " +
+                          "STFT may not be invertible"):
+            istft(z, nperseg=256, noverlap=0, window='hann')
+        with chk_VE('Must specify differing time and frequency axes!'):
+            istft(z, time_axis=0, freq_axis=0)
+
+        # Checks for _spectral_helper():
+        with chk_VE("Unknown value for mode foo, must be one of: " +
+                    r"\{'psd', 'stft'\}"):
+            _spectral_helper(x, x, mode='foo')
+        with chk_VE("x and y must be equal if mode is 'stft'"):
+            _spectral_helper(x[:512], x[512:], mode='stft')
+        with chk_VE("Unknown boundary option 'foo', must be one of: " +
+                    r"\['even', 'odd', 'constant', 'zeros', None\]"):
+            _spectral_helper(x, x, boundary='foo')
+
+        scaling = "not_valid"
+        with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"):
+            stft(x, scaling=scaling)
+        with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"):
+            istft(z, scaling=scaling)
+
+    def test_check_COLA(self):
+        settings = [
+                    ('boxcar', 10, 0),
+                    ('boxcar', 10, 9),
+                    ('bartlett', 51, 26),
+                    ('hann', 256, 128),
+                    ('hann', 256, 192),
+                    ('blackman', 300, 200),
+                    (('tukey', 0.5), 256, 64),
+                    ('hann', 256, 255),
+                    ]
+
+        for setting in settings:
+            msg = '{0}, {1}, {2}'.format(*setting)
+            assert_equal(True, check_COLA(*setting), err_msg=msg)
+
+    def test_check_NOLA(self):
+        settings_pass = [
+                    ('boxcar', 10, 0),
+                    ('boxcar', 10, 9),
+                    ('boxcar', 10, 7),
+                    ('bartlett', 51, 26),
+                    ('bartlett', 51, 10),
+                    ('hann', 256, 128),
+                    ('hann', 256, 192),
+                    ('hann', 256, 37),
+                    ('blackman', 300, 200),
+                    ('blackman', 300, 123),
+                    (('tukey', 0.5), 256, 64),
+                    (('tukey', 0.5), 256, 38),
+                    ('hann', 256, 255),
+                    ('hann', 256, 39),
+                    ]
+        for setting in settings_pass:
+            msg = '{0}, {1}, {2}'.format(*setting)
+            assert_equal(True, check_NOLA(*setting), err_msg=msg)
+
+        w_fail = np.ones(16)
+        w_fail[::2] = 0
+        settings_fail = [
+                    (w_fail, len(w_fail), len(w_fail) // 2),
+                    ('hann', 64, 0),
+        ]
+        for setting in settings_fail:
+            msg = '{0}, {1}, {2}'.format(*setting)
+            assert_equal(False, check_NOLA(*setting), err_msg=msg)
+
+    def test_average_all_segments(self):
+        np.random.seed(1234)
+        x = np.random.randn(1024)
+
+        fs = 1.0
+        window = 'hann'
+        nperseg = 16
+        noverlap = 8
+
+        # Compare twosided, because onesided welch doubles non-DC terms to
+        # account for power at negative frequencies. stft doesn't do this,
+        # because it breaks invertibility.
+        f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False,
+                       return_onesided=False, boundary=None)
+        fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False,
+                       scaling='spectrum', detrend=False)
+
+        assert_allclose(f, fw)
+        assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw)
+
+    def test_permute_axes(self):
+        np.random.seed(1234)
+        x = np.random.randn(1024)
+
+        fs = 1.0
+        window = 'hann'
+        nperseg = 16
+        noverlap = 8
+
+        f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap)
+        f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap,
+                          axis=0)
+
+        t3, x1 = istft(Z1, fs, window, nperseg, noverlap)
+        t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0,
+                       freq_axis=-1)
+
+        assert_allclose(f1, f2)
+        assert_allclose(t1, t2)
+        assert_allclose(t3, t4)
+        assert_allclose(Z1, Z2[:, 0, 0, :])
+        assert_allclose(x1, x2[:, 0, 0])
+
+    @pytest.mark.parametrize('scaling', ['spectrum', 'psd'])
+    def test_roundtrip_real(self, scaling):
+        np.random.seed(1234)
+
+        settings = [
+                    ('boxcar', 100, 10, 0),           # Test no overlap
+                    ('boxcar', 100, 10, 9),           # Test high overlap
+                    ('bartlett', 101, 51, 26),        # Test odd nperseg
+                    ('hann', 1024, 256, 128),         # Test defaults
+                    (('tukey', 0.5), 1152, 256, 64),  # Test Tukey
+                    ('hann', 1024, 256, 255),         # Test overlapped hann
+                    ]
+
+        for window, N, nperseg, noverlap in settings:
+            t = np.arange(N)
+            x = 10*np.random.randn(t.size)
+
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                            window=window, detrend=None, padded=False,
+                            scaling=scaling)
+
+            tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
+                           window=window, scaling=scaling)
+
+            msg = '{0}, {1}'.format(window, noverlap)
+            assert_allclose(t, tr, err_msg=msg)
+            assert_allclose(x, xr, err_msg=msg)
+
+    def test_roundtrip_not_nola(self):
+        np.random.seed(1234)
+
+        w_fail = np.ones(16)
+        w_fail[::2] = 0
+        settings = [
+                    (w_fail, 256, len(w_fail), len(w_fail) // 2),
+                    ('hann', 256, 64, 0),
+        ]
+
+        for window, N, nperseg, noverlap in settings:
+            msg = '{0}, {1}, {2}, {3}'.format(window, N, nperseg, noverlap)
+            assert not check_NOLA(window, nperseg, noverlap), msg
+
+            t = np.arange(N)
+            x = 10 * np.random.randn(t.size)
+
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                            window=window, detrend=None, padded=True,
+                            boundary='zeros')
+            with pytest.warns(UserWarning, match='NOLA'):
+                tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
+                               window=window, boundary=True)
+
+            assert np.allclose(t, tr[:len(t)]), msg
+            assert not np.allclose(x, xr[:len(x)]), msg
+
+    def test_roundtrip_nola_not_cola(self):
+        np.random.seed(1234)
+
+        settings = [
+                    ('boxcar', 100, 10, 3),           # NOLA True, COLA False
+                    ('bartlett', 101, 51, 37),        # NOLA True, COLA False
+                    ('hann', 1024, 256, 127),         # NOLA True, COLA False
+                    (('tukey', 0.5), 1152, 256, 14),  # NOLA True, COLA False
+                    ('hann', 1024, 256, 5),           # NOLA True, COLA False
+                    ]
+
+        for window, N, nperseg, noverlap in settings:
+            msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
+            assert check_NOLA(window, nperseg, noverlap), msg
+            assert not check_COLA(window, nperseg, noverlap), msg
+
+            t = np.arange(N)
+            x = 10 * np.random.randn(t.size)
+
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                            window=window, detrend=None, padded=True,
+                            boundary='zeros')
+
+            tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
+                           window=window, boundary=True)
+
+            msg = '{0}, {1}'.format(window, noverlap)
+            assert_allclose(t, tr[:len(t)], err_msg=msg)
+            assert_allclose(x, xr[:len(x)], err_msg=msg)
+
+    def test_roundtrip_float32(self):
+        np.random.seed(1234)
+
+        settings = [('hann', 1024, 256, 128)]
+
+        for window, N, nperseg, noverlap in settings:
+            t = np.arange(N)
+            x = 10*np.random.randn(t.size)
+            x = x.astype(np.float32)
+
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                            window=window, detrend=None, padded=False)
+
+            tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
+                           window=window)
+
+            msg = '{0}, {1}'.format(window, noverlap)
+            assert_allclose(t, t, err_msg=msg)
+            assert_allclose(x, xr, err_msg=msg, rtol=1e-4, atol=1e-5)
+            assert_(x.dtype == xr.dtype)
+
+    @pytest.mark.parametrize('scaling', ['spectrum', 'psd'])
+    def test_roundtrip_complex(self, scaling):
+        np.random.seed(1234)
+
+        settings = [
+                    ('boxcar', 100, 10, 0),           # Test no overlap
+                    ('boxcar', 100, 10, 9),           # Test high overlap
+                    ('bartlett', 101, 51, 26),        # Test odd nperseg
+                    ('hann', 1024, 256, 128),         # Test defaults
+                    (('tukey', 0.5), 1152, 256, 64),  # Test Tukey
+                    ('hann', 1024, 256, 255),         # Test overlapped hann
+                    ]
+
+        for window, N, nperseg, noverlap in settings:
+            t = np.arange(N)
+            x = 10*np.random.randn(t.size) + 10j*np.random.randn(t.size)
+
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                            window=window, detrend=None, padded=False,
+                            return_onesided=False, scaling=scaling)
+
+            tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
+                           window=window, input_onesided=False,
+                           scaling=scaling)
+
+            msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
+            assert_allclose(t, tr, err_msg=msg)
+            assert_allclose(x, xr, err_msg=msg)
+
+        # Check that asking for onesided switches to twosided
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning,
+                       "Input data is complex, switching to return_onesided=False")
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                            window=window, detrend=None, padded=False,
+                            return_onesided=True, scaling=scaling)
+
+        tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,
+                       window=window, input_onesided=False, scaling=scaling)
+
+        msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)
+        assert_allclose(t, tr, err_msg=msg)
+        assert_allclose(x, xr, err_msg=msg)
+
+    def test_roundtrip_boundary_extension(self):
+        np.random.seed(1234)
+
+        # Test against boxcar, since window is all ones, and thus can be fully
+        # recovered with no boundary extension
+
+        settings = [
+                    ('boxcar', 100, 10, 0),           # Test no overlap
+                    ('boxcar', 100, 10, 9),           # Test high overlap
+                    ]
+
+        for window, N, nperseg, noverlap in settings:
+            t = np.arange(N)
+            x = 10*np.random.randn(t.size)
+
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                           window=window, detrend=None, padded=True,
+                           boundary=None)
+
+            _, xr = istft(zz, noverlap=noverlap, window=window, boundary=False)
+
+            for boundary in ['even', 'odd', 'constant', 'zeros']:
+                _, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap,
+                                window=window, detrend=None, padded=True,
+                                boundary=boundary)
+
+                _, xr_ext = istft(zz_ext, noverlap=noverlap, window=window,
+                                boundary=True)
+
+                msg = '{0}, {1}, {2}'.format(window, noverlap, boundary)
+                assert_allclose(x, xr, err_msg=msg)
+                assert_allclose(x, xr_ext, err_msg=msg)
+
+    def test_roundtrip_padded_signal(self):
+        np.random.seed(1234)
+
+        settings = [
+                    ('boxcar', 101, 10, 0),
+                    ('hann', 1000, 256, 128),
+                    ]
+
+        for window, N, nperseg, noverlap in settings:
+            t = np.arange(N)
+            x = 10*np.random.randn(t.size)
+
+            _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,
+                            window=window, detrend=None, padded=True)
+
+            tr, xr = istft(zz, noverlap=noverlap, window=window)
+
+            msg = '{0}, {1}'.format(window, noverlap)
+            # Account for possible zero-padding at the end
+            assert_allclose(t, tr[:t.size], err_msg=msg)
+            assert_allclose(x, xr[:x.size], err_msg=msg)
+
+    def test_roundtrip_padded_FFT(self):
+        np.random.seed(1234)
+
+        settings = [
+                    ('hann', 1024, 256, 128, 512),
+                    ('hann', 1024, 256, 128, 501),
+                    ('boxcar', 100, 10, 0, 33),
+                    (('tukey', 0.5), 1152, 256, 64, 1024),
+                    ]
+
+        for window, N, nperseg, noverlap, nfft in settings:
+            t = np.arange(N)
+            x = 10*np.random.randn(t.size)
+            xc = x*np.exp(1j*np.pi/4)
+
+            # real signal
+            _, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
+                            window=window, detrend=None, padded=True)
+
+            # complex signal
+            _, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
+                            window=window, detrend=None, padded=True,
+                            return_onesided=False)
+
+            tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
+                           window=window)
+
+            tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,
+                            window=window, input_onesided=False)
+
+            msg = '{0}, {1}'.format(window, noverlap)
+            assert_allclose(t, tr, err_msg=msg)
+            assert_allclose(x, xr, err_msg=msg)
+            assert_allclose(xc, xcr, err_msg=msg)
+
+    def test_axis_rolling(self):
+        np.random.seed(1234)
+
+        x_flat = np.random.randn(1024)
+        _, _, z_flat = stft(x_flat)
+
+        for a in range(3):
+            newshape = [1,]*3
+            newshape[a] = -1
+            x = x_flat.reshape(newshape)
+
+            _, _, z_plus = stft(x, axis=a)  # Positive axis index
+            _, _, z_minus = stft(x, axis=a-x.ndim)  # Negative axis index
+
+            assert_equal(z_flat, z_plus.squeeze(), err_msg=a)
+            assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim)
+
+        # z_flat has shape [n_freq, n_time]
+
+        # Test vs. transpose
+        _, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1)
+        _, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1)
+
+        assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus')
+        assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus')
+
+    def test_roundtrip_scaling(self):
+        """Verify behavior of scaling parameter. """
+        # Create 1024 sample cosine signal with amplitude 2:
+        X = np.zeros(513, dtype=complex)
+        X[256] = 1024
+        x = np.fft.irfft(X)
+        power_x = sum(x**2) / len(x)  # power of signal x is 2
+
+        # Calculate magnitude-scaled STFT:
+        Zs = stft(x, boundary='even', scaling='spectrum')[2]
+
+        # Test round trip:
+        x1 = istft(Zs, boundary=True, scaling='spectrum')[1]
+        assert_allclose(x1, x)
+
+        # For a Hann-windowed 256 sample length FFT, we expect a peak at
+        # frequency 64 (since it is 1/4 the length of X) with a height of 1
+        # (half the amplitude). A Hann window of a perfectly centered sine has
+        # the magnitude [..., 0, 0, 0.5, 1, 0.5, 0, 0, ...].
+        # Note that in this case the 'even' padding works for the beginning
+        # but not for the end of the STFT.
+        assert_allclose(abs(Zs[63, :-1]), 0.5)
+        assert_allclose(abs(Zs[64, :-1]), 1)
+        assert_allclose(abs(Zs[65, :-1]), 0.5)
+        # All other values should be zero:
+        Zs[63:66, :-1] = 0
+        # Note since 'rtol' does not have influence here, atol needs to be set:
+        assert_allclose(Zs[:, :-1], 0, atol=np.finfo(Zs.dtype).resolution)
+
+        # Calculate two-sided psd-scaled STFT:
+        #  - using 'even' padding since signal is axis symmetric - this ensures
+        #    stationary behavior on the boundaries
+        #  - using the two-sided transform allows determining the spectral
+        #    power by `sum(abs(Zp[:, k])**2) / len(f)` for the k-th time slot.
+        Zp = stft(x, return_onesided=False, boundary='even', scaling='psd')[2]
+
+        # Calculate spectral power of Zd by summing over the frequency axis:
+        psd_Zp = np.sum(Zp.real**2 + Zp.imag**2, axis=0) / Zp.shape[0]
+        # Spectral power of Zp should be equal to the signal's power:
+        assert_allclose(psd_Zp, power_x)
+
+        # Test round trip:
+        x1 = istft(Zp, input_onesided=False, boundary=True, scaling='psd')[1]
+        assert_allclose(x1, x)
+
+        # The power of the one-sided psd-scaled STFT can be determined
+        # analogously (note that the two sides are not of equal shape):
+        Zp0 = stft(x, return_onesided=True, boundary='even', scaling='psd')[2]
+
+        # Since x is real, its Fourier transform is conjugate symmetric, i.e.,
+        # the missing 'second side' can be expressed through the 'first side':
+        Zp1 = np.conj(Zp0[-2:0:-1, :])  # 'second side' is conjugate reversed
+        assert_allclose(Zp[:129, :], Zp0)
+        assert_allclose(Zp[129:, :], Zp1)
+
+        # Calculate the spectral power:
+        s2 = (np.sum(Zp0.real ** 2 + Zp0.imag ** 2, axis=0) +
+              np.sum(Zp1.real ** 2 + Zp1.imag ** 2, axis=0))
+        psd_Zp01 = s2 / (Zp0.shape[0] + Zp1.shape[0])
+        assert_allclose(psd_Zp01, power_x)
+
+        # Test round trip:
+        x1 = istft(Zp0, input_onesided=True, boundary=True, scaling='psd')[1]
+        assert_allclose(x1, x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_upfirdn.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_upfirdn.py
new file mode 100644
index 00000000..af23fd41
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_upfirdn.py
@@ -0,0 +1,287 @@
+# Code adapted from "upfirdn" python library with permission:
+#
+# Copyright (c) 2009, Motorola, Inc
+#
+# All Rights Reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of Motorola nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import numpy as np
+from itertools import product
+
+from numpy.testing import assert_equal, assert_allclose
+from pytest import raises as assert_raises
+import pytest
+
+from scipy.signal import upfirdn, firwin
+from scipy.signal._upfirdn import _output_len, _upfirdn_modes
+from scipy.signal._upfirdn_apply import _pad_test
+
+
+def upfirdn_naive(x, h, up=1, down=1):
+    """Naive upfirdn processing in Python.
+
+    Note: arg order (x, h) differs to facilitate apply_along_axis use.
+    """
+    h = np.asarray(h)
+    out = np.zeros(len(x) * up, x.dtype)
+    out[::up] = x
+    out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)]
+    return out
+
+
+class UpFIRDnCase:
+    """Test _UpFIRDn object"""
+    def __init__(self, up, down, h, x_dtype):
+        self.up = up
+        self.down = down
+        self.h = np.atleast_1d(h)
+        self.x_dtype = x_dtype
+        self.rng = np.random.RandomState(17)
+
+    def __call__(self):
+        # tiny signal
+        self.scrub(np.ones(1, self.x_dtype))
+        # ones
+        self.scrub(np.ones(10, self.x_dtype))  # ones
+        # randn
+        x = self.rng.randn(10).astype(self.x_dtype)
+        if self.x_dtype in (np.complex64, np.complex128):
+            x += 1j * self.rng.randn(10)
+        self.scrub(x)
+        # ramp
+        self.scrub(np.arange(10).astype(self.x_dtype))
+        # 3D, random
+        size = (2, 3, 5)
+        x = self.rng.randn(*size).astype(self.x_dtype)
+        if self.x_dtype in (np.complex64, np.complex128):
+            x += 1j * self.rng.randn(*size)
+        for axis in range(len(size)):
+            self.scrub(x, axis=axis)
+        x = x[:, ::2, 1::3].T
+        for axis in range(len(size)):
+            self.scrub(x, axis=axis)
+
+    def scrub(self, x, axis=-1):
+        yr = np.apply_along_axis(upfirdn_naive, axis, x,
+                                 self.h, self.up, self.down)
+        want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down)
+        assert yr.shape[axis] == want_len
+        y = upfirdn(self.h, x, self.up, self.down, axis=axis)
+        assert y.shape[axis] == want_len
+        assert y.shape == yr.shape
+        dtypes = (self.h.dtype, x.dtype)
+        if all(d == np.complex64 for d in dtypes):
+            assert_equal(y.dtype, np.complex64)
+        elif np.complex64 in dtypes and np.float32 in dtypes:
+            assert_equal(y.dtype, np.complex64)
+        elif all(d == np.float32 for d in dtypes):
+            assert_equal(y.dtype, np.float32)
+        elif np.complex128 in dtypes or np.complex64 in dtypes:
+            assert_equal(y.dtype, np.complex128)
+        else:
+            assert_equal(y.dtype, np.float64)
+        assert_allclose(yr, y)
+
+
+_UPFIRDN_TYPES = (int, np.float32, np.complex64, float, complex)
+
+
+class TestUpfirdn:
+
+    def test_valid_input(self):
+        assert_raises(ValueError, upfirdn, [1], [1], 1, 0)  # up or down < 1
+        assert_raises(ValueError, upfirdn, [], [1], 1, 1)  # h.ndim != 1
+        assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1)
+
+    @pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5])
+    @pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5])
+    def test_singleton(self, len_h, len_x):
+        # gh-9844: lengths producing expected outputs
+        h = np.zeros(len_h)
+        h[len_h // 2] = 1.  # make h a delta
+        x = np.ones(len_x)
+        y = upfirdn(h, x, 1, 1)
+        want = np.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant')
+        assert_allclose(y, want)
+
+    def test_shift_x(self):
+        # gh-9844: shifted x can change values?
+        y = upfirdn([1, 1], [1.], 1, 1)
+        assert_allclose(y, [1, 1])  # was [0, 1] in the issue
+        y = upfirdn([1, 1], [0., 1.], 1, 1)
+        assert_allclose(y, [0, 1, 1])
+
+    # A bunch of lengths/factors chosen because they exposed differences
+    # between the "old way" and new way of computing length, and then
+    # got `expected` from MATLAB
+    @pytest.mark.parametrize('len_h, len_x, up, down, expected', [
+        (2, 2, 5, 2, [1, 0, 0, 0]),
+        (2, 3, 6, 3, [1, 0, 1, 0, 1]),
+        (2, 4, 4, 3, [1, 0, 0, 0, 1]),
+        (3, 2, 6, 2, [1, 0, 0, 1, 0]),
+        (4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]),
+    ])
+    def test_length_factors(self, len_h, len_x, up, down, expected):
+        # gh-9844: weird factors
+        h = np.zeros(len_h)
+        h[0] = 1.
+        x = np.ones(len_x)
+        y = upfirdn(h, x, up, down)
+        assert_allclose(y, expected)
+
+    @pytest.mark.parametrize('down, want_len', [  # lengths from MATLAB
+        (2, 5015),
+        (11, 912),
+        (79, 127),
+    ])
+    def test_vs_convolve(self, down, want_len):
+        # Check that up=1.0 gives same answer as convolve + slicing
+        random_state = np.random.RandomState(17)
+        try_types = (int, np.float32, np.complex64, float, complex)
+        size = 10000
+
+        for dtype in try_types:
+            x = random_state.randn(size).astype(dtype)
+            if dtype in (np.complex64, np.complex128):
+                x += 1j * random_state.randn(size)
+
+            h = firwin(31, 1. / down, window='hamming')
+            yl = upfirdn_naive(x, h, 1, down)
+            y = upfirdn(h, x, up=1, down=down)
+            assert y.shape == (want_len,)
+            assert yl.shape[0] == y.shape[0]
+            assert_allclose(yl, y, atol=1e-7, rtol=1e-7)
+
+    @pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
+    @pytest.mark.parametrize('h', (1., 1j))
+    @pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)])
+    def test_vs_naive_delta(self, x_dtype, h, up, down):
+        UpFIRDnCase(up, down, h, x_dtype)()
+
+    @pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
+    @pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES)
+    @pytest.mark.parametrize('p_max, q_max',
+                             list(product((10, 100), (10, 100))))
+    def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max):
+        tests = self._random_factors(p_max, q_max, h_dtype, x_dtype)
+        for test in tests:
+            test()
+
+    def _random_factors(self, p_max, q_max, h_dtype, x_dtype):
+        n_rep = 3
+        longest_h = 25
+        random_state = np.random.RandomState(17)
+        tests = []
+
+        for _ in range(n_rep):
+            # Randomize the up/down factors somewhat
+            p_add = q_max if p_max > q_max else 1
+            q_add = p_max if q_max > p_max else 1
+            p = random_state.randint(p_max) + p_add
+            q = random_state.randint(q_max) + q_add
+
+            # Generate random FIR coefficients
+            len_h = random_state.randint(longest_h) + 1
+            h = np.atleast_1d(random_state.randint(len_h))
+            h = h.astype(h_dtype)
+            if h_dtype == complex:
+                h += 1j * random_state.randint(len_h)
+
+            tests.append(UpFIRDnCase(p, q, h, x_dtype))
+
+        return tests
+
+    @pytest.mark.parametrize('mode', _upfirdn_modes)
+    def test_extensions(self, mode):
+        """Test vs. manually computed results for modes not in numpy's pad."""
+        x = np.array([1, 2, 3, 1], dtype=float)
+        npre, npost = 6, 6
+        y = _pad_test(x, npre=npre, npost=npost, mode=mode)
+        if mode == 'antisymmetric':
+            y_expected = np.asarray(
+                [3, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2])
+        elif mode == 'antireflect':
+            y_expected = np.asarray(
+                [1, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1])
+        elif mode == 'smooth':
+            y_expected = np.asarray(
+                [-5, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11])
+        elif mode == "line":
+            lin_slope = (x[-1] - x[0]) / (len(x) - 1)
+            left = x[0] + np.arange(-npre, 0, 1) * lin_slope
+            right = x[-1] + np.arange(1, npost + 1) * lin_slope
+            y_expected = np.concatenate((left, x, right))
+        else:
+            y_expected = np.pad(x, (npre, npost), mode=mode)
+        assert_allclose(y, y_expected)
+
+    @pytest.mark.parametrize(
+        'size, h_len, mode, dtype',
+        product(
+            [8],
+            [4, 5, 26],  # include cases with h_len > 2*size
+            _upfirdn_modes,
+            [np.float32, np.float64, np.complex64, np.complex128],
+        )
+    )
+    def test_modes(self, size, h_len, mode, dtype):
+        random_state = np.random.RandomState(5)
+        x = random_state.randn(size).astype(dtype)
+        if dtype in (np.complex64, np.complex128):
+            x += 1j * random_state.randn(size)
+        h = np.arange(1, 1 + h_len, dtype=x.real.dtype)
+
+        y = upfirdn(h, x, up=1, down=1, mode=mode)
+        # expected result: pad the input, filter with zero padding, then crop
+        npad = h_len - 1
+        if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']:
+            # use _pad_test test function for modes not supported by np.pad.
+            xpad = _pad_test(x, npre=npad, npost=npad, mode=mode)
+        else:
+            xpad = np.pad(x, npad, mode=mode)
+        ypad = upfirdn(h, xpad, up=1, down=1, mode='constant')
+        y_expected = ypad[npad:-npad]
+
+        atol = rtol = np.finfo(dtype).eps * 1e2
+        assert_allclose(y, y_expected, atol=atol, rtol=rtol)
+
+
+def test_output_len_long_input():
+    # Regression test for gh-17375.  On Windows, a large enough input
+    # that should have been well within the capabilities of 64 bit integers
+    # would result in a 32 bit overflow because of a bug in Cython 0.29.32.
+    len_h = 1001
+    in_len = 10**8
+    up = 320
+    down = 441
+    out_len = _output_len(len_h, in_len, up, down)
+    # The expected value was computed "by hand" from the formula
+    #   (((in_len - 1) * up + len_h) - 1) // down + 1
+    assert out_len == 72562360
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_waveforms.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_waveforms.py
new file mode 100644
index 00000000..7f84a804
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_waveforms.py
@@ -0,0 +1,351 @@
+import numpy as np
+from numpy.testing import (assert_almost_equal, assert_equal,
+                           assert_, assert_allclose, assert_array_equal)
+from pytest import raises as assert_raises
+
+import scipy.signal._waveforms as waveforms
+
+
+# These chirp_* functions are the instantaneous frequencies of the signals
+# returned by chirp().
+
+def chirp_linear(t, f0, f1, t1):
+    f = f0 + (f1 - f0) * t / t1
+    return f
+
+
+def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
+    if vertex_zero:
+        f = f0 + (f1 - f0) * t**2 / t1**2
+    else:
+        f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
+    return f
+
+
+def chirp_geometric(t, f0, f1, t1):
+    f = f0 * (f1/f0)**(t/t1)
+    return f
+
+
+def chirp_hyperbolic(t, f0, f1, t1):
+    f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
+    return f
+
+
+def compute_frequency(t, theta):
+    """
+    Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).
+    """
+    # Assume theta and t are 1-D NumPy arrays.
+    # Assume that t is uniformly spaced.
+    dt = t[1] - t[0]
+    f = np.diff(theta)/(2*np.pi) / dt
+    tf = 0.5*(t[1:] + t[:-1])
+    return tf, f
+
+
+class TestChirp:
+
+    def test_linear_at_zero(self):
+        w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
+        assert_almost_equal(w, 1.0)
+
+    def test_linear_freq_01(self):
+        method = 'linear'
+        f0 = 1.0
+        f1 = 2.0
+        t1 = 1.0
+        t = np.linspace(0, t1, 100)
+        phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+        tf, f = compute_frequency(t, phase)
+        abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
+        assert_(abserr < 1e-6)
+
+    def test_linear_freq_02(self):
+        method = 'linear'
+        f0 = 200.0
+        f1 = 100.0
+        t1 = 10.0
+        t = np.linspace(0, t1, 100)
+        phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+        tf, f = compute_frequency(t, phase)
+        abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
+        assert_(abserr < 1e-6)
+
+    def test_quadratic_at_zero(self):
+        w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
+        assert_almost_equal(w, 1.0)
+
+    def test_quadratic_at_zero2(self):
+        w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
+                            vertex_zero=False)
+        assert_almost_equal(w, 1.0)
+
+    def test_quadratic_freq_01(self):
+        method = 'quadratic'
+        f0 = 1.0
+        f1 = 2.0
+        t1 = 1.0
+        t = np.linspace(0, t1, 2000)
+        phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+        tf, f = compute_frequency(t, phase)
+        abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
+        assert_(abserr < 1e-6)
+
+    def test_quadratic_freq_02(self):
+        method = 'quadratic'
+        f0 = 20.0
+        f1 = 10.0
+        t1 = 10.0
+        t = np.linspace(0, t1, 2000)
+        phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+        tf, f = compute_frequency(t, phase)
+        abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
+        assert_(abserr < 1e-6)
+
+    def test_logarithmic_at_zero(self):
+        w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
+        assert_almost_equal(w, 1.0)
+
+    def test_logarithmic_freq_01(self):
+        method = 'logarithmic'
+        f0 = 1.0
+        f1 = 2.0
+        t1 = 1.0
+        t = np.linspace(0, t1, 10000)
+        phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+        tf, f = compute_frequency(t, phase)
+        abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
+        assert_(abserr < 1e-6)
+
+    def test_logarithmic_freq_02(self):
+        method = 'logarithmic'
+        f0 = 200.0
+        f1 = 100.0
+        t1 = 10.0
+        t = np.linspace(0, t1, 10000)
+        phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+        tf, f = compute_frequency(t, phase)
+        abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
+        assert_(abserr < 1e-6)
+
+    def test_logarithmic_freq_03(self):
+        method = 'logarithmic'
+        f0 = 100.0
+        f1 = 100.0
+        t1 = 10.0
+        t = np.linspace(0, t1, 10000)
+        phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+        tf, f = compute_frequency(t, phase)
+        abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
+        assert_(abserr < 1e-6)
+
+    def test_hyperbolic_at_zero(self):
+        w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
+        assert_almost_equal(w, 1.0)
+
+    def test_hyperbolic_freq_01(self):
+        method = 'hyperbolic'
+        t1 = 1.0
+        t = np.linspace(0, t1, 10000)
+        #           f0     f1
+        cases = [[10.0, 1.0],
+                 [1.0, 10.0],
+                 [-10.0, -1.0],
+                 [-1.0, -10.0]]
+        for f0, f1 in cases:
+            phase = waveforms._chirp_phase(t, f0, t1, f1, method)
+            tf, f = compute_frequency(t, phase)
+            expected = chirp_hyperbolic(tf, f0, f1, t1)
+            assert_allclose(f, expected)
+
+    def test_hyperbolic_zero_freq(self):
+        # f0=0 or f1=0 must raise a ValueError.
+        method = 'hyperbolic'
+        t1 = 1.0
+        t = np.linspace(0, t1, 5)
+        assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
+        assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
+
+    def test_unknown_method(self):
+        method = "foo"
+        f0 = 10.0
+        f1 = 20.0
+        t1 = 1.0
+        t = np.linspace(0, t1, 10)
+        assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
+
+    def test_integer_t1(self):
+        f0 = 10.0
+        f1 = 20.0
+        t = np.linspace(-1, 1, 11)
+        t1 = 3.0
+        float_result = waveforms.chirp(t, f0, t1, f1)
+        t1 = 3
+        int_result = waveforms.chirp(t, f0, t1, f1)
+        err_msg = "Integer input 't1=3' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+    def test_integer_f0(self):
+        f1 = 20.0
+        t1 = 3.0
+        t = np.linspace(-1, 1, 11)
+        f0 = 10.0
+        float_result = waveforms.chirp(t, f0, t1, f1)
+        f0 = 10
+        int_result = waveforms.chirp(t, f0, t1, f1)
+        err_msg = "Integer input 'f0=10' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+    def test_integer_f1(self):
+        f0 = 10.0
+        t1 = 3.0
+        t = np.linspace(-1, 1, 11)
+        f1 = 20.0
+        float_result = waveforms.chirp(t, f0, t1, f1)
+        f1 = 20
+        int_result = waveforms.chirp(t, f0, t1, f1)
+        err_msg = "Integer input 'f1=20' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+    def test_integer_all(self):
+        f0 = 10
+        t1 = 3
+        f1 = 20
+        t = np.linspace(-1, 1, 11)
+        float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
+        int_result = waveforms.chirp(t, f0, t1, f1)
+        err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+
+class TestSweepPoly:
+
+    def test_sweep_poly_quad1(self):
+        p = np.poly1d([1.0, 0.0, 1.0])
+        t = np.linspace(0, 3.0, 10000)
+        phase = waveforms._sweep_poly_phase(t, p)
+        tf, f = compute_frequency(t, phase)
+        expected = p(tf)
+        abserr = np.max(np.abs(f - expected))
+        assert_(abserr < 1e-6)
+
+    def test_sweep_poly_const(self):
+        p = np.poly1d(2.0)
+        t = np.linspace(0, 3.0, 10000)
+        phase = waveforms._sweep_poly_phase(t, p)
+        tf, f = compute_frequency(t, phase)
+        expected = p(tf)
+        abserr = np.max(np.abs(f - expected))
+        assert_(abserr < 1e-6)
+
+    def test_sweep_poly_linear(self):
+        p = np.poly1d([-1.0, 10.0])
+        t = np.linspace(0, 3.0, 10000)
+        phase = waveforms._sweep_poly_phase(t, p)
+        tf, f = compute_frequency(t, phase)
+        expected = p(tf)
+        abserr = np.max(np.abs(f - expected))
+        assert_(abserr < 1e-6)
+
+    def test_sweep_poly_quad2(self):
+        p = np.poly1d([1.0, 0.0, -2.0])
+        t = np.linspace(0, 3.0, 10000)
+        phase = waveforms._sweep_poly_phase(t, p)
+        tf, f = compute_frequency(t, phase)
+        expected = p(tf)
+        abserr = np.max(np.abs(f - expected))
+        assert_(abserr < 1e-6)
+
+    def test_sweep_poly_cubic(self):
+        p = np.poly1d([2.0, 1.0, 0.0, -2.0])
+        t = np.linspace(0, 2.0, 10000)
+        phase = waveforms._sweep_poly_phase(t, p)
+        tf, f = compute_frequency(t, phase)
+        expected = p(tf)
+        abserr = np.max(np.abs(f - expected))
+        assert_(abserr < 1e-6)
+
+    def test_sweep_poly_cubic2(self):
+        """Use an array of coefficients instead of a poly1d."""
+        p = np.array([2.0, 1.0, 0.0, -2.0])
+        t = np.linspace(0, 2.0, 10000)
+        phase = waveforms._sweep_poly_phase(t, p)
+        tf, f = compute_frequency(t, phase)
+        expected = np.poly1d(p)(tf)
+        abserr = np.max(np.abs(f - expected))
+        assert_(abserr < 1e-6)
+
+    def test_sweep_poly_cubic3(self):
+        """Use a list of coefficients instead of a poly1d."""
+        p = [2.0, 1.0, 0.0, -2.0]
+        t = np.linspace(0, 2.0, 10000)
+        phase = waveforms._sweep_poly_phase(t, p)
+        tf, f = compute_frequency(t, phase)
+        expected = np.poly1d(p)(tf)
+        abserr = np.max(np.abs(f - expected))
+        assert_(abserr < 1e-6)
+
+
+class TestGaussPulse:
+
+    def test_integer_fc(self):
+        float_result = waveforms.gausspulse('cutoff', fc=1000.0)
+        int_result = waveforms.gausspulse('cutoff', fc=1000)
+        err_msg = "Integer input 'fc=1000' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+    def test_integer_bw(self):
+        float_result = waveforms.gausspulse('cutoff', bw=1.0)
+        int_result = waveforms.gausspulse('cutoff', bw=1)
+        err_msg = "Integer input 'bw=1' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+    def test_integer_bwr(self):
+        float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
+        int_result = waveforms.gausspulse('cutoff', bwr=-6)
+        err_msg = "Integer input 'bwr=-6' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+    def test_integer_tpr(self):
+        float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
+        int_result = waveforms.gausspulse('cutoff', tpr=-60)
+        err_msg = "Integer input 'tpr=-60' gives wrong result"
+        assert_equal(int_result, float_result, err_msg=err_msg)
+
+
+class TestUnitImpulse:
+
+    def test_no_index(self):
+        assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0])
+        assert_array_equal(waveforms.unit_impulse((3, 3)),
+                           [[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+    def test_index(self):
+        assert_array_equal(waveforms.unit_impulse(10, 3),
+                           [0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
+        assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)),
+                           [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+
+        # Broadcasting
+        imp = waveforms.unit_impulse((4, 4), 2)
+        assert_array_equal(imp, np.array([[0, 0, 0, 0],
+                                          [0, 0, 0, 0],
+                                          [0, 0, 1, 0],
+                                          [0, 0, 0, 0]]))
+
+    def test_mid(self):
+        assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'),
+                           [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+        assert_array_equal(waveforms.unit_impulse(9, 'mid'),
+                           [0, 0, 0, 0, 1, 0, 0, 0, 0])
+
+    def test_dtype(self):
+        imp = waveforms.unit_impulse(7)
+        assert_(np.issubdtype(imp.dtype, np.floating))
+
+        imp = waveforms.unit_impulse(5, 3, dtype=int)
+        assert_(np.issubdtype(imp.dtype, np.integer))
+
+        imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
+        assert_(np.issubdtype(imp.dtype, np.complexfloating))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_wavelets.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_wavelets.py
new file mode 100644
index 00000000..6f82d7ea
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_wavelets.py
@@ -0,0 +1,151 @@
+import numpy as np
+from numpy.testing import assert_equal, \
+    assert_array_equal, assert_array_almost_equal, assert_array_less, assert_
+
+import scipy.signal._wavelets as wavelets
+
+
+class TestWavelets:
+    def test_qmf(self):
+        assert_array_equal(wavelets.qmf([1, 1]), [1, -1])
+
+    def test_daub(self):
+        for i in range(1, 15):
+            assert_equal(len(wavelets.daub(i)), i * 2)
+
+    def test_cascade(self):
+        for J in range(1, 7):
+            for i in range(1, 5):
+                lpcoef = wavelets.daub(i)
+                k = len(lpcoef)
+                x, phi, psi = wavelets.cascade(lpcoef, J)
+                assert_(len(x) == len(phi) == len(psi))
+                assert_equal(len(x), (k - 1) * 2 ** J)
+
+    def test_morlet(self):
+        x = wavelets.morlet(50, 4.1, complete=True)
+        y = wavelets.morlet(50, 4.1, complete=False)
+        # Test if complete and incomplete wavelet have same lengths:
+        assert_equal(len(x), len(y))
+        # Test if complete wavelet is less than incomplete wavelet:
+        assert_array_less(x, y)
+
+        x = wavelets.morlet(10, 50, complete=False)
+        y = wavelets.morlet(10, 50, complete=True)
+        # For large widths complete and incomplete wavelets should be
+        # identical within numerical precision:
+        assert_equal(x, y)
+
+        # miscellaneous tests:
+        x = np.array([1.73752399e-09 + 9.84327394e-25j,
+                      6.49471756e-01 + 0.00000000e+00j,
+                      1.73752399e-09 - 9.84327394e-25j])
+        y = wavelets.morlet(3, w=2, complete=True)
+        assert_array_almost_equal(x, y)
+
+        x = np.array([2.00947715e-09 + 9.84327394e-25j,
+                      7.51125544e-01 + 0.00000000e+00j,
+                      2.00947715e-09 - 9.84327394e-25j])
+        y = wavelets.morlet(3, w=2, complete=False)
+        assert_array_almost_equal(x, y, decimal=2)
+
+        x = wavelets.morlet(10000, s=4, complete=True)
+        y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
+        assert_array_almost_equal(x, y, decimal=2)
+
+        x = wavelets.morlet(10000, s=4, complete=False)
+        assert_array_almost_equal(y, x, decimal=2)
+        y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
+        assert_array_almost_equal(x, y, decimal=2)
+
+        x = wavelets.morlet(10000, w=3, s=5, complete=True)
+        y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
+        assert_array_almost_equal(x, y, decimal=2)
+
+        x = wavelets.morlet(10000, w=3, s=5, complete=False)
+        assert_array_almost_equal(y, x, decimal=2)
+        y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
+        assert_array_almost_equal(x, y, decimal=2)
+
+        x = wavelets.morlet(10000, w=7, s=10, complete=True)
+        y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
+        assert_array_almost_equal(x, y, decimal=2)
+
+        x = wavelets.morlet(10000, w=7, s=10, complete=False)
+        assert_array_almost_equal(x, y, decimal=2)
+        y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
+        assert_array_almost_equal(x, y, decimal=2)
+
+    def test_morlet2(self):
+        w = wavelets.morlet2(1.0, 0.5)
+        expected = (np.pi**(-0.25) * np.sqrt(1/0.5)).astype(complex)
+        assert_array_equal(w, expected)
+
+        lengths = [5, 11, 15, 51, 101]
+        for length in lengths:
+            w = wavelets.morlet2(length, 1.0)
+            assert_(len(w) == length)
+            max_loc = np.argmax(w)
+            assert_(max_loc == (length // 2))
+
+        points = 100
+        w = abs(wavelets.morlet2(points, 2.0))
+        half_vec = np.arange(0, points // 2)
+        assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
+
+        x = np.array([5.03701224e-09 + 2.46742437e-24j,
+                      1.88279253e+00 + 0.00000000e+00j,
+                      5.03701224e-09 - 2.46742437e-24j])
+        y = wavelets.morlet2(3, s=1/(2*np.pi), w=2)
+        assert_array_almost_equal(x, y)
+
+    def test_ricker(self):
+        w = wavelets.ricker(1.0, 1)
+        expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
+        assert_array_equal(w, expected)
+
+        lengths = [5, 11, 15, 51, 101]
+        for length in lengths:
+            w = wavelets.ricker(length, 1.0)
+            assert_(len(w) == length)
+            max_loc = np.argmax(w)
+            assert_(max_loc == (length // 2))
+
+        points = 100
+        w = wavelets.ricker(points, 2.0)
+        half_vec = np.arange(0, points // 2)
+        #Wavelet should be symmetric
+        assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
+
+        #Check zeros
+        aas = [5, 10, 15, 20, 30]
+        points = 99
+        for a in aas:
+            w = wavelets.ricker(points, a)
+            vec = np.arange(0, points) - (points - 1.0) / 2
+            exp_zero1 = np.argmin(np.abs(vec - a))
+            exp_zero2 = np.argmin(np.abs(vec + a))
+            assert_array_almost_equal(w[exp_zero1], 0)
+            assert_array_almost_equal(w[exp_zero2], 0)
+
+    def test_cwt(self):
+        widths = [1.0]
+        delta_wavelet = lambda s, t: np.array([1])
+        len_data = 100
+        test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
+
+        #Test delta function input gives same data as output
+        cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)
+        assert_(cwt_dat.shape == (len(widths), len_data))
+        assert_array_almost_equal(test_data, cwt_dat.flatten())
+
+        #Check proper shape on output
+        widths = [1, 3, 4, 5, 10]
+        cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)
+        assert_(cwt_dat.shape == (len(widths), len_data))
+
+        widths = [len_data * 10]
+        #Note: this wavelet isn't defined quite right, but is fine for this test
+        flat_wavelet = lambda l, w: np.full(w, 1 / w)
+        cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)
+        assert_array_almost_equal(cwt_dat, np.mean(test_data))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_windows.py b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_windows.py
new file mode 100644
index 00000000..20e40d20
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/tests/test_windows.py
@@ -0,0 +1,852 @@
+import pickle
+
+import numpy as np
+from numpy import array
+from numpy.testing import (assert_array_almost_equal, assert_array_equal,
+                           assert_allclose,
+                           assert_equal, assert_, assert_array_less,
+                           suppress_warnings)
+from pytest import raises as assert_raises
+
+from scipy.fft import fft
+from scipy.signal import windows, get_window, resample, hann as dep_hann
+
+
+window_funcs = [
+    ('boxcar', ()),
+    ('triang', ()),
+    ('parzen', ()),
+    ('bohman', ()),
+    ('blackman', ()),
+    ('nuttall', ()),
+    ('blackmanharris', ()),
+    ('flattop', ()),
+    ('bartlett', ()),
+    ('barthann', ()),
+    ('hamming', ()),
+    ('kaiser', (1,)),
+    ('dpss', (2,)),
+    ('gaussian', (0.5,)),
+    ('general_gaussian', (1.5, 2)),
+    ('chebwin', (1,)),
+    ('cosine', ()),
+    ('hann', ()),
+    ('exponential', ()),
+    ('taylor', ()),
+    ('tukey', (0.5,)),
+    ('lanczos', ()),
+    ]
+
+
+class TestBartHann:
+
+    def test_basic(self):
+        assert_allclose(windows.barthann(6, sym=True),
+                        [0, 0.35857354213752, 0.8794264578624801,
+                         0.8794264578624801, 0.3585735421375199, 0])
+        assert_allclose(windows.barthann(7),
+                        [0, 0.27, 0.73, 1.0, 0.73, 0.27, 0])
+        assert_allclose(windows.barthann(6, False),
+                        [0, 0.27, 0.73, 1.0, 0.73, 0.27])
+
+
+class TestBartlett:
+
+    def test_basic(self):
+        assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])
+        assert_allclose(windows.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])
+        assert_allclose(windows.bartlett(6, False),
+                        [0, 1/3, 2/3, 1.0, 2/3, 1/3])
+
+
+class TestBlackman:
+
+    def test_basic(self):
+        assert_allclose(windows.blackman(6, sym=False),
+                        [0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14)
+        assert_allclose(windows.blackman(7, sym=False),
+                        [0, 0.09045342435412804, 0.4591829575459636,
+                         0.9203636180999081, 0.9203636180999081,
+                         0.4591829575459636, 0.09045342435412804], atol=1e-8)
+        assert_allclose(windows.blackman(6),
+                        [0, 0.2007701432625305, 0.8492298567374694,
+                         0.8492298567374694, 0.2007701432625305, 0],
+                        atol=1e-14)
+        assert_allclose(windows.blackman(7, True),
+                        [0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14)
+
+
+class TestBlackmanHarris:
+
+    def test_basic(self):
+        assert_allclose(windows.blackmanharris(6, False),
+                        [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
+        assert_allclose(windows.blackmanharris(7, sym=False),
+                        [6.0e-05, 0.03339172347815117, 0.332833504298565,
+                         0.8893697722232837, 0.8893697722232838,
+                         0.3328335042985652, 0.03339172347815122])
+        assert_allclose(windows.blackmanharris(6),
+                        [6.0e-05, 0.1030114893456638, 0.7938335106543362,
+                         0.7938335106543364, 0.1030114893456638, 6.0e-05])
+        assert_allclose(windows.blackmanharris(7, sym=True),
+                        [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,
+                         6.0e-05])
+
+
+class TestTaylor:
+
+    def test_normalized(self):
+        """Tests windows of small length that are normalized to 1. See the
+        documentation for the Taylor window for more information on
+        normalization.
+        """
+        assert_allclose(windows.taylor(1, 2, 15), 1.0)
+        assert_allclose(
+            windows.taylor(5, 2, 15),
+            np.array([0.75803341, 0.90757699, 1.0, 0.90757699, 0.75803341])
+        )
+        assert_allclose(
+            windows.taylor(6, 2, 15),
+            np.array([
+                0.7504082, 0.86624416, 0.98208011, 0.98208011, 0.86624416,
+                0.7504082
+            ])
+        )
+
+    def test_non_normalized(self):
+        """Test windows of small length that are not normalized to 1. See
+        the documentation for the Taylor window for more information on
+        normalization.
+        """
+        assert_allclose(
+            windows.taylor(5, 2, 15, norm=False),
+            np.array([
+                0.87508054, 1.04771499, 1.15440894, 1.04771499, 0.87508054
+            ])
+        )
+        assert_allclose(
+            windows.taylor(6, 2, 15, norm=False),
+            np.array([
+                0.86627793, 1.0, 1.13372207, 1.13372207, 1.0, 0.86627793
+            ])
+        )
+
+    def test_correctness(self):
+        """This test ensures the correctness of the implemented Taylor
+        Windowing function. A Taylor Window of 1024 points is created, its FFT
+        is taken, and the Peak Sidelobe Level (PSLL) and 3dB and 18dB bandwidth
+        are found and checked.
+
+        A publication from Sandia National Laboratories was used as reference
+        for the correctness values [1]_.
+
+        References
+        -----
+        .. [1] Armin Doerry, "Catalog of Window Taper Functions for
+               Sidelobe Control", 2017.
+               https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf
+        """
+        M_win = 1024
+        N_fft = 131072
+        # Set norm=False for correctness as the values obtained from the
+        # scientific publication do not normalize the values. Normalizing
+        # changes the sidelobe level from the desired value.
+        w = windows.taylor(M_win, nbar=4, sll=35, norm=False, sym=False)
+        f = fft(w, N_fft)
+        spec = 20 * np.log10(np.abs(f / np.amax(f)))
+
+        first_zero = np.argmax(np.diff(spec) > 0)
+
+        PSLL = np.amax(spec[first_zero:-first_zero])
+
+        BW_3dB = 2*np.argmax(spec <= -3.0102999566398121) / N_fft * M_win
+        BW_18dB = 2*np.argmax(spec <= -18.061799739838872) / N_fft * M_win
+
+        assert_allclose(PSLL, -35.1672, atol=1)
+        assert_allclose(BW_3dB, 1.1822, atol=0.1)
+        assert_allclose(BW_18dB, 2.6112, atol=0.1)
+
+
+class TestBohman:
+
+    def test_basic(self):
+        assert_allclose(windows.bohman(6),
+                        [0, 0.1791238937062839, 0.8343114522576858,
+                         0.8343114522576858, 0.1791238937062838, 0])
+        assert_allclose(windows.bohman(7, sym=True),
+                        [0, 0.1089977810442293, 0.6089977810442293, 1.0,
+                         0.6089977810442295, 0.1089977810442293, 0])
+        assert_allclose(windows.bohman(6, False),
+                        [0, 0.1089977810442293, 0.6089977810442293, 1.0,
+                         0.6089977810442295, 0.1089977810442293])
+
+
+class TestBoxcar:
+
+    def test_basic(self):
+        assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1])
+        assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1])
+        assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1])
+
+
+cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
+                       0.198891, 0.235450, 0.274846, 0.316836,
+                       0.361119, 0.407338, 0.455079, 0.503883,
+                       0.553248, 0.602637, 0.651489, 0.699227,
+                       0.745266, 0.789028, 0.829947, 0.867485,
+                       0.901138, 0.930448, 0.955010, 0.974482,
+                       0.988591, 0.997138, 1.000000, 0.997138,
+                       0.988591, 0.974482, 0.955010, 0.930448,
+                       0.901138, 0.867485, 0.829947, 0.789028,
+                       0.745266, 0.699227, 0.651489, 0.602637,
+                       0.553248, 0.503883, 0.455079, 0.407338,
+                       0.361119, 0.316836, 0.274846, 0.235450,
+                       0.198891, 0.165348, 0.134941, 0.107729,
+                       0.200938])
+
+cheb_even_true = array([0.203894, 0.107279, 0.133904,
+                        0.163608, 0.196338, 0.231986,
+                        0.270385, 0.311313, 0.354493,
+                        0.399594, 0.446233, 0.493983,
+                        0.542378, 0.590916, 0.639071,
+                        0.686302, 0.732055, 0.775783,
+                        0.816944, 0.855021, 0.889525,
+                        0.920006, 0.946060, 0.967339,
+                        0.983557, 0.994494, 1.000000,
+                        1.000000, 0.994494, 0.983557,
+                        0.967339, 0.946060, 0.920006,
+                        0.889525, 0.855021, 0.816944,
+                        0.775783, 0.732055, 0.686302,
+                        0.639071, 0.590916, 0.542378,
+                        0.493983, 0.446233, 0.399594,
+                        0.354493, 0.311313, 0.270385,
+                        0.231986, 0.196338, 0.163608,
+                        0.133904, 0.107279, 0.203894])
+
+
+class TestChebWin:
+
+    def test_basic(self):
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            assert_allclose(windows.chebwin(6, 100),
+                            [0.1046401879356917, 0.5075781475823447, 1.0, 1.0,
+                             0.5075781475823447, 0.1046401879356917])
+            assert_allclose(windows.chebwin(7, 100),
+                            [0.05650405062850233, 0.316608530648474,
+                             0.7601208123539079, 1.0, 0.7601208123539079,
+                             0.316608530648474, 0.05650405062850233])
+            assert_allclose(windows.chebwin(6, 10),
+                            [1.0, 0.6071201674458373, 0.6808391469897297,
+                             0.6808391469897297, 0.6071201674458373, 1.0])
+            assert_allclose(windows.chebwin(7, 10),
+                            [1.0, 0.5190521247588651, 0.5864059018130382,
+                             0.6101519801307441, 0.5864059018130382,
+                             0.5190521247588651, 1.0])
+            assert_allclose(windows.chebwin(6, 10, False),
+                            [1.0, 0.5190521247588651, 0.5864059018130382,
+                             0.6101519801307441, 0.5864059018130382,
+                             0.5190521247588651])
+
+    def test_cheb_odd_high_attenuation(self):
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            cheb_odd = windows.chebwin(53, at=-40)
+        assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
+
+    def test_cheb_even_high_attenuation(self):
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            cheb_even = windows.chebwin(54, at=40)
+        assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
+
+    def test_cheb_odd_low_attenuation(self):
+        cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,
+                                      0.610151, 0.586405, 0.519052,
+                                      1.000000])
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            cheb_odd = windows.chebwin(7, at=10)
+        assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
+
+    def test_cheb_even_low_attenuation(self):
+        cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,
+                                       0.541338, 0.541338, 0.51027,
+                                       0.451924, 1.000000])
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            cheb_even = windows.chebwin(8, at=-10)
+        assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
+
+
+exponential_data = {
+    (4, None, 0.2, False):
+        array([4.53999297624848542e-05,
+               6.73794699908546700e-03, 1.00000000000000000e+00,
+               6.73794699908546700e-03]),
+    (4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,
+                                 0.0820849986238988, 0.00055308437014783]),
+    (4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
+                                  0.36787944117144233]),
+    (4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,
+                                 0.60653065971263342, 0.22313016014842982]),
+    (4, 2, 0.2, False):
+        array([4.53999297624848542e-05, 6.73794699908546700e-03,
+               1.00000000000000000e+00, 6.73794699908546700e-03]),
+    (4, 2, 0.2, True): None,
+    (4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,
+                               0.36787944117144233]),
+    (4, 2, 1.0, True): None,
+    (5, None, 0.2, True):
+        array([4.53999297624848542e-05,
+               6.73794699908546700e-03, 1.00000000000000000e+00,
+               6.73794699908546700e-03, 4.53999297624848542e-05]),
+    (5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,
+                                 0.36787944117144233, 0.1353352832366127]),
+    (5, 2, 0.2, True): None,
+    (5, 2, 1.0, True): None
+}
+
+
+def test_exponential():
+    for k, v in exponential_data.items():
+        if v is None:
+            assert_raises(ValueError, windows.exponential, *k)
+        else:
+            win = windows.exponential(*k)
+            assert_allclose(win, v, rtol=1e-14)
+
+
+class TestFlatTop:
+
+    def test_basic(self):
+        assert_allclose(windows.flattop(6, sym=False),
+                        [-0.000421051, -0.051263156, 0.19821053, 1.0,
+                         0.19821053, -0.051263156])
+        assert_allclose(windows.flattop(7, sym=False),
+                        [-0.000421051, -0.03684078115492348,
+                         0.01070371671615342, 0.7808739149387698,
+                         0.7808739149387698, 0.01070371671615342,
+                         -0.03684078115492348])
+        assert_allclose(windows.flattop(6),
+                        [-0.000421051, -0.0677142520762119, 0.6068721525762117,
+                         0.6068721525762117, -0.0677142520762119,
+                         -0.000421051])
+        assert_allclose(windows.flattop(7, True),
+                        [-0.000421051, -0.051263156, 0.19821053, 1.0,
+                         0.19821053, -0.051263156, -0.000421051])
+
+
+class TestGaussian:
+
+    def test_basic(self):
+        assert_allclose(windows.gaussian(6, 1.0),
+                        [0.04393693362340742, 0.3246524673583497,
+                         0.8824969025845955, 0.8824969025845955,
+                         0.3246524673583497, 0.04393693362340742])
+        assert_allclose(windows.gaussian(7, 1.2),
+                        [0.04393693362340742, 0.2493522087772962,
+                         0.7066482778577162, 1.0, 0.7066482778577162,
+                         0.2493522087772962, 0.04393693362340742])
+        assert_allclose(windows.gaussian(7, 3),
+                        [0.6065306597126334, 0.8007374029168081,
+                         0.9459594689067654, 1.0, 0.9459594689067654,
+                         0.8007374029168081, 0.6065306597126334])
+        assert_allclose(windows.gaussian(6, 3, False),
+                        [0.6065306597126334, 0.8007374029168081,
+                         0.9459594689067654, 1.0, 0.9459594689067654,
+                         0.8007374029168081])
+
+
+class TestGeneralCosine:
+
+    def test_basic(self):
+        assert_allclose(windows.general_cosine(5, [0.5, 0.3, 0.2]),
+                        [0.4, 0.3, 1, 0.3, 0.4])
+        assert_allclose(windows.general_cosine(4, [0.5, 0.3, 0.2], sym=False),
+                        [0.4, 0.3, 1, 0.3])
+
+
+class TestGeneralHamming:
+
+    def test_basic(self):
+        assert_allclose(windows.general_hamming(5, 0.7),
+                        [0.4, 0.7, 1.0, 0.7, 0.4])
+        assert_allclose(windows.general_hamming(5, 0.75, sym=False),
+                        [0.5, 0.6727457514, 0.9522542486,
+                         0.9522542486, 0.6727457514])
+        assert_allclose(windows.general_hamming(6, 0.75, sym=True),
+                        [0.5, 0.6727457514, 0.9522542486,
+                        0.9522542486, 0.6727457514, 0.5])
+
+
+class TestHamming:
+
+    def test_basic(self):
+        assert_allclose(windows.hamming(6, False),
+                        [0.08, 0.31, 0.77, 1.0, 0.77, 0.31])
+        assert_allclose(windows.hamming(7, sym=False),
+                        [0.08, 0.2531946911449826, 0.6423596296199047,
+                         0.9544456792351128, 0.9544456792351128,
+                         0.6423596296199047, 0.2531946911449826])
+        assert_allclose(windows.hamming(6),
+                        [0.08, 0.3978521825875242, 0.9121478174124757,
+                         0.9121478174124757, 0.3978521825875242, 0.08])
+        assert_allclose(windows.hamming(7, sym=True),
+                        [0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08])
+
+
+class TestHann:
+
+    def test_basic(self):
+        assert_allclose(windows.hann(6, sym=False),
+                        [0, 0.25, 0.75, 1.0, 0.75, 0.25])
+        assert_allclose(windows.hann(7, sym=False),
+                        [0, 0.1882550990706332, 0.6112604669781572,
+                         0.9504844339512095, 0.9504844339512095,
+                         0.6112604669781572, 0.1882550990706332])
+        assert_allclose(windows.hann(6, True),
+                        [0, 0.3454915028125263, 0.9045084971874737,
+                         0.9045084971874737, 0.3454915028125263, 0])
+        assert_allclose(windows.hann(7),
+                        [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])
+
+
+class TestKaiser:
+
+    def test_basic(self):
+        assert_allclose(windows.kaiser(6, 0.5),
+                        [0.9403061933191572, 0.9782962393705389,
+                         0.9975765035372042, 0.9975765035372042,
+                         0.9782962393705389, 0.9403061933191572])
+        assert_allclose(windows.kaiser(7, 0.5),
+                        [0.9403061933191572, 0.9732402256999829,
+                         0.9932754654413773, 1.0, 0.9932754654413773,
+                         0.9732402256999829, 0.9403061933191572])
+        assert_allclose(windows.kaiser(6, 2.7),
+                        [0.2603047507678832, 0.6648106293528054,
+                         0.9582099802511439, 0.9582099802511439,
+                         0.6648106293528054, 0.2603047507678832])
+        assert_allclose(windows.kaiser(7, 2.7),
+                        [0.2603047507678832, 0.5985765418119844,
+                         0.8868495172060835, 1.0, 0.8868495172060835,
+                         0.5985765418119844, 0.2603047507678832])
+        assert_allclose(windows.kaiser(6, 2.7, False),
+                        [0.2603047507678832, 0.5985765418119844,
+                         0.8868495172060835, 1.0, 0.8868495172060835,
+                         0.5985765418119844])
+
+
+class TestKaiserBesselDerived:
+
+    def test_basic(self):
+        M = 100
+        w = windows.kaiser_bessel_derived(M, beta=4.0)
+        w2 = windows.get_window(('kaiser bessel derived', 4.0),
+                                M, fftbins=False)
+        assert_allclose(w, w2)
+
+        # Test for Princen-Bradley condition
+        assert_allclose(w[:M // 2] ** 2 + w[-M // 2:] ** 2, 1.)
+
+        # Test actual values from other implementations
+        # M = 2:  sqrt(2) / 2
+        # M = 4:  0.518562710536, 0.855039598640
+        # M = 6:  0.436168993154, 0.707106781187, 0.899864772847
+        # Ref:https://github.com/scipy/scipy/pull/4747#issuecomment-172849418
+        assert_allclose(windows.kaiser_bessel_derived(2, beta=np.pi / 2)[:1],
+                        np.sqrt(2) / 2)
+
+        assert_allclose(windows.kaiser_bessel_derived(4, beta=np.pi / 2)[:2],
+                        [0.518562710536, 0.855039598640])
+
+        assert_allclose(windows.kaiser_bessel_derived(6, beta=np.pi / 2)[:3],
+                        [0.436168993154, 0.707106781187, 0.899864772847])
+
+    def test_exceptions(self):
+        M = 100
+        # Assert ValueError for odd window length
+        msg = ("Kaiser-Bessel Derived windows are only defined for even "
+               "number of points")
+        with assert_raises(ValueError, match=msg):
+            windows.kaiser_bessel_derived(M + 1, beta=4.)
+
+        # Assert ValueError for non-symmetric setting
+        msg = ("Kaiser-Bessel Derived windows are only defined for "
+               "symmetric shapes")
+        with assert_raises(ValueError, match=msg):
+            windows.kaiser_bessel_derived(M + 1, beta=4., sym=False)
+
+
+class TestNuttall:
+
+    def test_basic(self):
+        assert_allclose(windows.nuttall(6, sym=False),
+                        [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
+                         0.0613345])
+        assert_allclose(windows.nuttall(7, sym=False),
+                        [0.0003628, 0.03777576895352025, 0.3427276199688195,
+                         0.8918518610776603, 0.8918518610776603,
+                         0.3427276199688196, 0.0377757689535203])
+        assert_allclose(windows.nuttall(6),
+                        [0.0003628, 0.1105152530498718, 0.7982580969501282,
+                         0.7982580969501283, 0.1105152530498719, 0.0003628])
+        assert_allclose(windows.nuttall(7, True),
+                        [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
+                         0.0613345, 0.0003628])
+
+
+class TestParzen:
+
+    def test_basic(self):
+        assert_allclose(windows.parzen(6),
+                        [0.009259259259259254, 0.25, 0.8611111111111112,
+                         0.8611111111111112, 0.25, 0.009259259259259254])
+        assert_allclose(windows.parzen(7, sym=True),
+                        [0.00583090379008747, 0.1574344023323616,
+                         0.6501457725947521, 1.0, 0.6501457725947521,
+                         0.1574344023323616, 0.00583090379008747])
+        assert_allclose(windows.parzen(6, False),
+                        [0.00583090379008747, 0.1574344023323616,
+                         0.6501457725947521, 1.0, 0.6501457725947521,
+                         0.1574344023323616])
+
+
+class TestTriang:
+
+    def test_basic(self):
+
+        assert_allclose(windows.triang(6, True),
+                        [1/6, 1/2, 5/6, 5/6, 1/2, 1/6])
+        assert_allclose(windows.triang(7),
+                        [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4])
+        assert_allclose(windows.triang(6, sym=False),
+                        [1/4, 1/2, 3/4, 1, 3/4, 1/2])
+
+
+tukey_data = {
+    (4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),
+    (4, 0.9, True): array([0.0, 0.84312081893436686,
+                           0.84312081893436686, 0.0]),
+    (4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),
+    (4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),
+    (4, 0.9, False): array([0.0, 0.58682408883346526,
+                            1.0, 0.58682408883346526]),
+    (4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),
+    (5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),
+    (5, 0.8, True): array([0.0, 0.69134171618254492,
+                           1.0, 0.69134171618254492, 0.0]),
+    (5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),
+
+    (6, 0): [1, 1, 1, 1, 1, 1],
+    (7, 0): [1, 1, 1, 1, 1, 1, 1],
+    (6, .25): [0, 1, 1, 1, 1, 0],
+    (7, .25): [0, 1, 1, 1, 1, 1, 0],
+    (6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0],
+    (7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0],
+    (6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0],
+    (7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0,
+               0.9698463103929542, 0.4131759111665347, 0],
+    (6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737,
+             0.3454915028125263, 0],
+    (7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0],
+}
+
+
+class TestTukey:
+
+    def test_basic(self):
+        # Test against hardcoded data
+        for k, v in tukey_data.items():
+            if v is None:
+                assert_raises(ValueError, windows.tukey, *k)
+            else:
+                win = windows.tukey(*k)
+                assert_allclose(win, v, rtol=1e-14)
+
+    def test_extremes(self):
+        # Test extremes of alpha correspond to boxcar and hann
+        tuk0 = windows.tukey(100, 0)
+        box0 = windows.boxcar(100)
+        assert_array_almost_equal(tuk0, box0)
+
+        tuk1 = windows.tukey(100, 1)
+        han1 = windows.hann(100)
+        assert_array_almost_equal(tuk1, han1)
+
+
+dpss_data = {
+    # All values from MATLAB:
+    # * taper[1] of (3, 1.4, 3) sign-flipped
+    # * taper[3] of (5, 1.5, 5) sign-flipped
+    (4, 0.1, 2): ([[0.497943898, 0.502047681, 0.502047681, 0.497943898], [0.670487993, 0.224601537, -0.224601537, -0.670487993]], [0.197961815, 0.002035474]),  # noqa
+    (3, 1.4, 3): ([[0.410233151, 0.814504464, 0.410233151], [0.707106781, 0.0, -0.707106781], [0.575941629, -0.580157287, 0.575941629]], [0.999998093, 0.998067480, 0.801934426]),  # noqa
+    (5, 1.5, 5): ([[0.1745071052, 0.4956749177, 0.669109327, 0.495674917, 0.174507105], [0.4399493348, 0.553574369, 0.0, -0.553574369, -0.439949334], [0.631452756, 0.073280238, -0.437943884, 0.073280238, 0.631452756], [0.553574369, -0.439949334, 0.0, 0.439949334, -0.553574369], [0.266110290, -0.498935248, 0.600414741, -0.498935248, 0.266110290147157]], [0.999728571, 0.983706916, 0.768457889, 0.234159338, 0.013947282907567]),  # noqa: E501
+    (100, 2, 4): ([[0.0030914414, 0.0041266922, 0.005315076, 0.006665149, 0.008184854, 0.0098814158, 0.011761239, 0.013829809, 0.016091597, 0.018549973, 0.02120712, 0.02406396, 0.027120092, 0.030373728, 0.033821651, 0.037459181, 0.041280145, 0.045276872, 0.049440192, 0.053759447, 0.058222524, 0.062815894, 0.067524661, 0.072332638, 0.077222418, 0.082175473, 0.087172252, 0.092192299, 0.097214376, 0.1022166, 0.10717657, 0.11207154, 0.11687856, 0.12157463, 0.12613686, 0.13054266, 0.13476986, 0.13879691, 0.14260302, 0.14616832, 0.14947401, 0.1525025, 0.15523755, 0.15766438, 0.15976981, 0.16154233, 0.16297223, 0.16405162, 0.16477455, 0.16513702, 0.16513702, 0.16477455, 0.16405162, 0.16297223, 0.16154233, 0.15976981, 0.15766438, 0.15523755, 0.1525025, 0.14947401, 0.14616832, 0.14260302, 0.13879691, 0.13476986, 0.13054266, 0.12613686, 0.12157463, 0.11687856, 0.11207154, 0.10717657, 0.1022166, 0.097214376, 0.092192299, 0.087172252, 0.082175473, 0.077222418, 0.072332638, 0.067524661, 0.062815894, 0.058222524, 0.053759447, 0.049440192, 0.045276872, 0.041280145, 0.037459181, 0.033821651, 0.030373728, 0.027120092, 0.02406396, 0.02120712, 0.018549973, 0.016091597, 0.013829809, 0.011761239, 0.0098814158, 0.008184854, 0.006665149, 0.005315076, 0.0041266922, 0.0030914414], [0.018064449, 0.022040342, 0.026325013, 0.030905288, 0.035764398, 0.040881982, 0.046234148, 0.051793558, 0.057529559, 0.063408356, 0.069393216, 0.075444716, 0.081521022, 0.087578202, 0.093570567, 0.099451049, 0.10517159, 0.11068356, 0.11593818, 0.12088699, 0.12548227, 0.12967752, 0.1334279, 0.13669069, 0.13942569, 0.1415957, 0.14316686, 0.14410905, 0.14439626, 0.14400686, 0.14292389, 0.1411353, 0.13863416, 0.13541876, 0.13149274, 0.12686516, 0.12155045, 0.1155684, 0.10894403, 0.10170748, 0.093893752, 0.08554251, 0.076697768, 0.067407559, 0.057723559, 0.04770068, 0.037396627, 0.026871428, 0.016186944, 0.0054063557, -0.0054063557, -0.016186944, -0.026871428, -0.037396627, -0.04770068, -0.057723559, -0.067407559, -0.076697768, -0.08554251, -0.093893752, -0.10170748, -0.10894403, -0.1155684, -0.12155045, -0.12686516, -0.13149274, -0.13541876, -0.13863416, -0.1411353, -0.14292389, -0.14400686, -0.14439626, -0.14410905, -0.14316686, -0.1415957, -0.13942569, -0.13669069, -0.1334279, -0.12967752, -0.12548227, -0.12088699, -0.11593818, -0.11068356, -0.10517159, -0.099451049, -0.093570567, -0.087578202, -0.081521022, -0.075444716, -0.069393216, -0.063408356, -0.057529559, -0.051793558, -0.046234148, -0.040881982, -0.035764398, -0.030905288, -0.026325013, -0.022040342, -0.018064449], [0.064817553, 0.072567801, 0.080292992, 0.087918235, 0.095367076, 0.10256232, 0.10942687, 0.1158846, 0.12186124, 0.12728523, 0.13208858, 0.13620771, 0.13958427, 0.14216587, 0.14390678, 0.14476863, 0.1447209, 0.14374148, 0.14181704, 0.13894336, 0.13512554, 0.13037812, 0.1247251, 0.11819984, 0.11084487, 0.10271159, 0.093859853, 0.084357497, 0.074279719, 0.063708406, 0.052731374, 0.041441525, 0.029935953, 0.018314987, 0.0066811877, -0.0048616765, -0.016209689, -0.027259848, -0.037911124, -0.048065512, -0.05762905, -0.066512804, -0.0746338, -0.081915903, -0.088290621, -0.09369783, -0.098086416, -0.10141482, -0.10365146, -0.10477512, -0.10477512, -0.10365146, -0.10141482, -0.098086416, -0.09369783, -0.088290621, -0.081915903, -0.0746338, -0.066512804, -0.05762905, -0.048065512, -0.037911124, -0.027259848, -0.016209689, -0.0048616765, 0.0066811877, 0.018314987, 0.029935953, 0.041441525, 0.052731374, 0.063708406, 0.074279719, 0.084357497, 0.093859853, 0.10271159, 0.11084487, 0.11819984, 0.1247251, 0.13037812, 0.13512554, 0.13894336, 0.14181704, 0.14374148, 0.1447209, 0.14476863, 0.14390678, 0.14216587, 0.13958427, 0.13620771, 0.13208858, 0.12728523, 0.12186124, 0.1158846, 0.10942687, 0.10256232, 0.095367076, 0.087918235, 0.080292992, 0.072567801, 0.064817553], [0.14985551, 0.15512305, 0.15931467, 0.16236806, 0.16423291, 0.16487165, 0.16426009, 0.1623879, 0.1592589, 0.15489114, 0.14931693, 0.14258255, 0.13474785, 0.1258857, 0.11608124, 0.10543095, 0.094041635, 0.082029213, 0.069517411, 0.056636348, 0.043521028, 0.030309756, 0.017142511, 0.0041592774, -0.0085016282, -0.020705223, -0.032321494, -0.043226982, -0.053306291, -0.062453515, -0.070573544, -0.077583253, -0.083412547, -0.088005244, -0.091319802, -0.093329861, -0.094024602, -0.093408915, -0.091503383, -0.08834406, -0.08398207, -0.078483012, -0.071926192, -0.064403681, -0.056019215, -0.046886954, -0.037130106, -0.026879442, -0.016271713, -0.005448, 0.005448, 0.016271713, 0.026879442, 0.037130106, 0.046886954, 0.056019215, 0.064403681, 0.071926192, 0.078483012, 0.08398207, 0.08834406, 0.091503383, 0.093408915, 0.094024602, 0.093329861, 0.091319802, 0.088005244, 0.083412547, 0.077583253, 0.070573544, 0.062453515, 0.053306291, 0.043226982, 0.032321494, 0.020705223, 0.0085016282, -0.0041592774, -0.017142511, -0.030309756, -0.043521028, -0.056636348, -0.069517411, -0.082029213, -0.094041635, -0.10543095, -0.11608124, -0.1258857, -0.13474785, -0.14258255, -0.14931693, -0.15489114, -0.1592589, -0.1623879, -0.16426009, -0.16487165, -0.16423291, -0.16236806, -0.15931467, -0.15512305, -0.14985551]], [0.999943140, 0.997571533, 0.959465463, 0.721862496]),  # noqa: E501
+}
+
+
+class TestDPSS:
+
+    def test_basic(self):
+        # Test against hardcoded data
+        for k, v in dpss_data.items():
+            win, ratios = windows.dpss(*k, return_ratios=True)
+            assert_allclose(win, v[0], atol=1e-7, err_msg=k)
+            assert_allclose(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k)
+
+    def test_unity(self):
+        # Test unity value handling (gh-2221)
+        for M in range(1, 21):
+            # corrected w/approximation (default)
+            win = windows.dpss(M, M / 2.1)
+            expected = M % 2  # one for odd, none for even
+            assert_equal(np.isclose(win, 1.).sum(), expected,
+                         err_msg='%s' % (win,))
+            # corrected w/subsample delay (slower)
+            win_sub = windows.dpss(M, M / 2.1, norm='subsample')
+            if M > 2:
+                # @M=2 the subsample doesn't do anything
+                assert_equal(np.isclose(win_sub, 1.).sum(), expected,
+                             err_msg='%s' % (win_sub,))
+                assert_allclose(win, win_sub, rtol=0.03)  # within 3%
+            # not the same, l2-norm
+            win_2 = windows.dpss(M, M / 2.1, norm=2)
+            expected = 1 if M == 1 else 0
+            assert_equal(np.isclose(win_2, 1.).sum(), expected,
+                         err_msg='%s' % (win_2,))
+
+    def test_extremes(self):
+        # Test extremes of alpha
+        lam = windows.dpss(31, 6, 4, return_ratios=True)[1]
+        assert_array_almost_equal(lam, 1.)
+        lam = windows.dpss(31, 7, 4, return_ratios=True)[1]
+        assert_array_almost_equal(lam, 1.)
+        lam = windows.dpss(31, 8, 4, return_ratios=True)[1]
+        assert_array_almost_equal(lam, 1.)
+
+    def test_degenerate(self):
+        # Test failures
+        assert_raises(ValueError, windows.dpss, 4, 1.5, -1)  # Bad Kmax
+        assert_raises(ValueError, windows.dpss, 4, 1.5, -5)
+        assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1)
+        assert_raises(ValueError, windows.dpss, 3, 1.5, 3)  # NW must be < N/2.
+        assert_raises(ValueError, windows.dpss, 3, -1, 3)  # NW must be pos
+        assert_raises(ValueError, windows.dpss, 3, 0, 3)
+        assert_raises(ValueError, windows.dpss, -1, 1, 3)  # negative M
+
+
+class TestLanczos:
+
+    def test_basic(self):
+        # Analytical results:
+        # sinc(x) = sinc(-x)
+        # sinc(pi) = 0, sinc(0) = 1
+        # Hand computation on WolframAlpha:
+        # sinc(2 pi / 3) = 0.413496672
+        # sinc(pi / 3) = 0.826993343
+        # sinc(3 pi / 5) = 0.504551152
+        # sinc(pi / 5) = 0.935489284
+        assert_allclose(windows.lanczos(6, sym=False),
+                        [0., 0.413496672,
+                         0.826993343, 1., 0.826993343,
+                         0.413496672],
+                        atol=1e-9)
+        assert_allclose(windows.lanczos(6),
+                        [0., 0.504551152,
+                         0.935489284, 0.935489284,
+                         0.504551152, 0.],
+                        atol=1e-9)
+        assert_allclose(windows.lanczos(7, sym=True),
+                        [0., 0.413496672,
+                         0.826993343, 1., 0.826993343,
+                         0.413496672, 0.],
+                        atol=1e-9)
+
+    def test_array_size(self):
+        for n in [0, 10, 11]:
+            assert_equal(len(windows.lanczos(n, sym=False)), n)
+            assert_equal(len(windows.lanczos(n, sym=True)), n)
+
+
+class TestGetWindow:
+
+    def test_boxcar(self):
+        w = windows.get_window('boxcar', 12)
+        assert_array_equal(w, np.ones_like(w))
+
+        # window is a tuple of len 1
+        w = windows.get_window(('boxcar',), 16)
+        assert_array_equal(w, np.ones_like(w))
+
+    def test_cheb_odd(self):
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            w = windows.get_window(('chebwin', -40), 53, fftbins=False)
+        assert_array_almost_equal(w, cheb_odd_true, decimal=4)
+
+    def test_cheb_even(self):
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            w = windows.get_window(('chebwin', 40), 54, fftbins=False)
+        assert_array_almost_equal(w, cheb_even_true, decimal=4)
+
+    def test_dpss(self):
+        win1 = windows.get_window(('dpss', 3), 64, fftbins=False)
+        win2 = windows.dpss(64, 3)
+        assert_array_almost_equal(win1, win2, decimal=4)
+
+    def test_kaiser_float(self):
+        win1 = windows.get_window(7.2, 64)
+        win2 = windows.kaiser(64, 7.2, False)
+        assert_allclose(win1, win2)
+
+    def test_invalid_inputs(self):
+        # Window is not a float, tuple, or string
+        assert_raises(ValueError, windows.get_window, set('hann'), 8)
+
+        # Unknown window type error
+        assert_raises(ValueError, windows.get_window, 'broken', 4)
+
+    def test_array_as_window(self):
+        # github issue 3603
+        osfactor = 128
+        sig = np.arange(128)
+
+        win = windows.get_window(('kaiser', 8.0), osfactor // 2)
+        with assert_raises(ValueError, match='must have the same length'):
+            resample(sig, len(sig) * osfactor, window=win)
+
+    def test_general_cosine(self):
+        assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4),
+                        [0.4, 0.3, 1, 0.3])
+        assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4,
+                                   fftbins=False),
+                        [0.4, 0.55, 0.55, 0.4])
+
+    def test_general_hamming(self):
+        assert_allclose(get_window(('general_hamming', 0.7), 5),
+                        [0.4, 0.6072949, 0.9427051, 0.9427051, 0.6072949])
+        assert_allclose(get_window(('general_hamming', 0.7), 5, fftbins=False),
+                        [0.4, 0.7, 1.0, 0.7, 0.4])
+
+    def test_lanczos(self):
+        assert_allclose(get_window('lanczos', 6),
+                        [0., 0.413496672, 0.826993343, 1., 0.826993343,
+                         0.413496672], atol=1e-9)
+        assert_allclose(get_window('lanczos', 6, fftbins=False),
+                        [0., 0.504551152, 0.935489284, 0.935489284,
+                         0.504551152, 0.], atol=1e-9)
+        assert_allclose(get_window('lanczos', 6), get_window('sinc', 6))
+
+
+def test_windowfunc_basics():
+    for window_name, params in window_funcs:
+        window = getattr(windows, window_name)
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "This window is not suitable")
+            # Check symmetry for odd and even lengths
+            w1 = window(8, *params, sym=True)
+            w2 = window(7, *params, sym=False)
+            assert_array_almost_equal(w1[:-1], w2)
+
+            w1 = window(9, *params, sym=True)
+            w2 = window(8, *params, sym=False)
+            assert_array_almost_equal(w1[:-1], w2)
+
+            # Check that functions run and output lengths are correct
+            assert_equal(len(window(6, *params, sym=True)), 6)
+            assert_equal(len(window(6, *params, sym=False)), 6)
+            assert_equal(len(window(7, *params, sym=True)), 7)
+            assert_equal(len(window(7, *params, sym=False)), 7)
+
+            # Check invalid lengths
+            assert_raises(ValueError, window, 5.5, *params)
+            assert_raises(ValueError, window, -7, *params)
+
+            # Check degenerate cases
+            assert_array_equal(window(0, *params, sym=True), [])
+            assert_array_equal(window(0, *params, sym=False), [])
+            assert_array_equal(window(1, *params, sym=True), [1])
+            assert_array_equal(window(1, *params, sym=False), [1])
+
+            # Check dtype
+            assert_(window(0, *params, sym=True).dtype == 'float')
+            assert_(window(0, *params, sym=False).dtype == 'float')
+            assert_(window(1, *params, sym=True).dtype == 'float')
+            assert_(window(1, *params, sym=False).dtype == 'float')
+            assert_(window(6, *params, sym=True).dtype == 'float')
+            assert_(window(6, *params, sym=False).dtype == 'float')
+
+            # Check normalization
+            assert_array_less(window(10, *params, sym=True), 1.01)
+            assert_array_less(window(10, *params, sym=False), 1.01)
+            assert_array_less(window(9, *params, sym=True), 1.01)
+            assert_array_less(window(9, *params, sym=False), 1.01)
+
+            # Check that DFT-even spectrum is purely real for odd and even
+            assert_allclose(fft(window(10, *params, sym=False)).imag,
+                            0, atol=1e-14)
+            assert_allclose(fft(window(11, *params, sym=False)).imag,
+                            0, atol=1e-14)
+
+
+def test_needs_params():
+    for winstr in ['kaiser', 'ksr', 'kaiser_bessel_derived', 'kbd',
+                   'gaussian', 'gauss', 'gss',
+                   'general gaussian', 'general_gaussian',
+                   'general gauss', 'general_gauss', 'ggs',
+                   'dss', 'dpss', 'general cosine', 'general_cosine',
+                   'chebwin', 'cheb', 'general hamming', 'general_hamming',
+                   ]:
+        assert_raises(ValueError, get_window, winstr, 7)
+
+
+def test_not_needs_params():
+    for winstr in ['barthann',
+                   'bartlett',
+                   'blackman',
+                   'blackmanharris',
+                   'bohman',
+                   'boxcar',
+                   'cosine',
+                   'flattop',
+                   'hamming',
+                   'nuttall',
+                   'parzen',
+                   'taylor',
+                   'exponential',
+                   'poisson',
+                   'tukey',
+                   'tuk',
+                   'triangle',
+                   'lanczos',
+                   'sinc',
+                   ]:
+        win = get_window(winstr, 7)
+        assert_equal(len(win), 7)
+
+
+def test_deprecation():
+    if dep_hann.__doc__ is not None:  # can be None with `-OO` mode
+        assert_('signal.hann is deprecated' in dep_hann.__doc__)
+        assert_('deprecated' not in windows.hann.__doc__)
+
+
+def test_deprecated_pickleable():
+    dep_hann2 = pickle.loads(pickle.dumps(dep_hann))
+    assert_(dep_hann2 is dep_hann)
+
+
+def test_symmetric():
+
+    for win in [windows.lanczos]:
+        # Even sampling points
+        w = win(4096)
+        error = np.max(np.abs(w-np.flip(w)))
+        assert_equal(error, 0.0)
+
+        # Odd sampling points
+        w = win(4097)
+        error = np.max(np.abs(w-np.flip(w)))
+        assert_equal(error, 0.0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/waveforms.py b/__packaged__/coreml/.python_dependencies/scipy/signal/waveforms.py
new file mode 100644
index 00000000..7c561b36
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/waveforms.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _waveforms
+
+__all__ = [  # noqa: F822
+    'sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
+    'unit_impulse', 'place', 'nan', 'mod', 'extract', 'log', 'exp',
+    'polyval', 'polyint'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.waveforms is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.waveforms` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_waveforms, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/wavelets.py b/__packaged__/coreml/.python_dependencies/scipy/signal/wavelets.py
new file mode 100644
index 00000000..2de6ff4c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/wavelets.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _wavelets
+
+__all__ = [  # noqa: F822
+    'daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt',
+    'eig', 'comb', 'convolve'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.wavelets is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, "
+                  "the `scipy.signal.wavelets` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_wavelets, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/windows/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/signal/windows/__init__.py
new file mode 100644
index 00000000..967a7c75
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/windows/__init__.py
@@ -0,0 +1,52 @@
+"""
+Window functions (:mod:`scipy.signal.windows`)
+==============================================
+
+The suite of window functions for filtering and spectral estimation.
+
+.. currentmodule:: scipy.signal.windows
+
+.. autosummary::
+   :toctree: generated/
+
+   get_window              -- Return a window of a given length and type.
+
+   barthann                -- Bartlett-Hann window
+   bartlett                -- Bartlett window
+   blackman                -- Blackman window
+   blackmanharris          -- Minimum 4-term Blackman-Harris window
+   bohman                  -- Bohman window
+   boxcar                  -- Boxcar window
+   chebwin                 -- Dolph-Chebyshev window
+   cosine                  -- Cosine window
+   dpss                    -- Discrete prolate spheroidal sequences
+   exponential             -- Exponential window
+   flattop                 -- Flat top window
+   gaussian                -- Gaussian window
+   general_cosine          -- Generalized Cosine window
+   general_gaussian        -- Generalized Gaussian window
+   general_hamming         -- Generalized Hamming window
+   hamming                 -- Hamming window
+   hann                    -- Hann window
+   kaiser                  -- Kaiser window
+   kaiser_bessel_derived   -- Kaiser-Bessel derived window
+   lanczos                 -- Lanczos window also known as a sinc window
+   nuttall                 -- Nuttall's minimum 4-term Blackman-Harris window
+   parzen                  -- Parzen window
+   taylor                  -- Taylor window
+   triang                  -- Triangular window
+   tukey                   -- Tukey window
+
+"""
+
+from ._windows import *
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import windows
+
+__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
+           'blackmanharris', 'flattop', 'bartlett', 'barthann',
+           'hamming', 'kaiser', 'kaiser_bessel_derived', 'gaussian',
+           'general_gaussian', 'general_cosine', 'general_hamming',
+           'chebwin', 'cosine', 'hann', 'exponential', 'tukey', 'taylor',
+           'get_window', 'dpss', 'lanczos']
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/windows/_windows.py b/__packaged__/coreml/.python_dependencies/scipy/signal/windows/_windows.py
new file mode 100644
index 00000000..c052f47e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/windows/_windows.py
@@ -0,0 +1,2374 @@
+"""The suite of window functions."""
+
+import operator
+import warnings
+
+import numpy as np
+from scipy import linalg, special, fft as sp_fft
+
+__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
+           'blackmanharris', 'flattop', 'bartlett', 'barthann',
+           'hamming', 'kaiser', 'kaiser_bessel_derived', 'gaussian',
+           'general_cosine', 'general_gaussian', 'general_hamming',
+           'chebwin', 'cosine', 'hann', 'exponential', 'tukey', 'taylor',
+           'dpss', 'get_window', 'lanczos']
+
+
+def _len_guards(M):
+    """Handle small or incorrect window lengths"""
+    if int(M) != M or M < 0:
+        raise ValueError('Window length M must be a non-negative integer')
+    return M <= 1
+
+
+def _extend(M, sym):
+    """Extend window by 1 sample if needed for DFT-even symmetry"""
+    if not sym:
+        return M + 1, True
+    else:
+        return M, False
+
+
+def _truncate(w, needed):
+    """Truncate window by 1 sample if needed for DFT-even symmetry"""
+    if needed:
+        return w[:-1]
+    else:
+        return w
+
+
+def general_cosine(M, a, sym=True):
+    r"""
+    Generic weighted sum of cosine terms window
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window
+    a : array_like
+        Sequence of weighting coefficients. This uses the convention of being
+        centered on the origin, so these will typically all be positive
+        numbers, not alternating sign.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The array of window values.
+
+    References
+    ----------
+    .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
+           Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
+           no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
+    .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
+           Discrete Fourier transform (DFT), including a comprehensive list of
+           window functions and some new flat-top windows", February 15, 2002
+           https://holometer.fnal.gov/GH_FFT.pdf
+
+    Examples
+    --------
+    Heinzel describes a flat-top window named "HFT90D" with formula: [2]_
+
+    .. math::  w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z)
+               - 0.440811 \cos(3z) + 0.043097 \cos(4z)
+
+    where
+
+    .. math::  z = \frac{2 \pi j}{N}, j = 0...N - 1
+
+    Since this uses the convention of starting at the origin, to reproduce the
+    window, we need to convert every other coefficient to a positive number:
+
+    >>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097]
+
+    The paper states that the highest sidelobe is at -90.2 dB.  Reproduce
+    Figure 42 by plotting the window and its frequency response, and confirm
+    the sidelobe level in red:
+
+    >>> import numpy as np
+    >>> from scipy.signal.windows import general_cosine
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = general_cosine(1000, HFT90D, sym=False)
+    >>> plt.plot(window)
+    >>> plt.title("HFT90D window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 10000) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = np.abs(fftshift(A / abs(A).max()))
+    >>> response = 20 * np.log10(np.maximum(response, 1e-10))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-50/1000, 50/1000, -140, 0])
+    >>> plt.title("Frequency response of the HFT90D window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+    >>> plt.axhline(-90.2, color='red')
+    >>> plt.show()
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    fac = np.linspace(-np.pi, np.pi, M)
+    w = np.zeros(M)
+    for k in range(len(a)):
+        w += a[k] * np.cos(k * fac)
+
+    return _truncate(w, needs_trunc)
+
+
+def boxcar(M, sym=True):
+    """Return a boxcar or rectangular window.
+
+    Also known as a rectangular window or Dirichlet window, this is equivalent
+    to no window at all.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        Whether the window is symmetric. (Has no effect for boxcar.)
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1.
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.boxcar(51)
+    >>> plt.plot(window)
+    >>> plt.title("Boxcar window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the boxcar window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    w = np.ones(M, float)
+
+    return _truncate(w, needs_trunc)
+
+
+def triang(M, sym=True):
+    """Return a triangular window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    See Also
+    --------
+    bartlett : A triangular window that touches zero
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.triang(51)
+    >>> plt.plot(window)
+    >>> plt.title("Triangular window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = np.abs(fftshift(A / abs(A).max()))
+    >>> response = 20 * np.log10(np.maximum(response, 1e-10))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the triangular window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(1, (M + 1) // 2 + 1)
+    if M % 2 == 0:
+        w = (2 * n - 1.0) / M
+        w = np.r_[w, w[::-1]]
+    else:
+        w = 2 * n / (M + 1.0)
+        w = np.r_[w, w[-2::-1]]
+
+    return _truncate(w, needs_trunc)
+
+
+def parzen(M, sym=True):
+    """Return a Parzen window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    References
+    ----------
+    .. [1] E. Parzen, "Mathematical Considerations in the Estimation of
+           Spectra", Technometrics,  Vol. 3, No. 2 (May, 1961), pp. 167-190
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.parzen(51)
+    >>> plt.plot(window)
+    >>> plt.title("Parzen window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Parzen window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
+    na = np.extract(n < -(M - 1) / 4.0, n)
+    nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
+    wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
+    wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
+          6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
+    w = np.r_[wa, wb, wa[::-1]]
+
+    return _truncate(w, needs_trunc)
+
+
+def bohman(M, sym=True):
+    """Return a Bohman window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.bohman(51)
+    >>> plt.plot(window)
+    >>> plt.title("Bohman window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Bohman window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    fac = np.abs(np.linspace(-1, 1, M)[1:-1])
+    w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
+    w = np.r_[0, w, 0]
+
+    return _truncate(w, needs_trunc)
+
+
+def blackman(M, sym=True):
+    r"""
+    Return a Blackman window.
+
+    The Blackman window is a taper formed by using the first three terms of
+    a summation of cosines. It was designed to have close to the minimal
+    leakage possible.  It is close to optimal, only slightly worse than a
+    Kaiser window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The Blackman window is defined as
+
+    .. math::  w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
+
+    The "exact Blackman" window was designed to null out the third and fourth
+    sidelobes, but has discontinuities at the boundaries, resulting in a
+    6 dB/oct fall-off.  This window is an approximation of the "exact" window,
+    which does not null the sidelobes as well, but is smooth at the edges,
+    improving the fall-off rate to 18 dB/oct. [3]_
+
+    Most references to the Blackman window come from the signal processing
+    literature, where it is used as one of many windowing functions for
+    smoothing values.  It is also known as an apodization (which means
+    "removing the foot", i.e. smoothing discontinuities at the beginning
+    and end of the sampled signal) or tapering function. It is known as a
+    "near optimal" tapering function, almost as good (by some measures)
+    as the Kaiser window.
+
+    References
+    ----------
+    .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
+           spectra, Dover Publications, New York.
+    .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
+           Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
+    .. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
+           Analysis with the Discrete Fourier Transform". Proceedings of the
+           IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`.
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.blackman(51)
+    >>> plt.plot(window)
+    >>> plt.title("Blackman window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = np.abs(fftshift(A / abs(A).max()))
+    >>> response = 20 * np.log10(np.maximum(response, 1e-10))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Blackman window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    # Docstring adapted from NumPy's blackman function
+    return general_cosine(M, [0.42, 0.50, 0.08], sym)
+
+
+def nuttall(M, sym=True):
+    """Return a minimum 4-term Blackman-Harris window according to Nuttall.
+
+    This variation is called "Nuttall4c" by Heinzel. [2]_
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    References
+    ----------
+    .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
+           Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
+           no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
+    .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
+           Discrete Fourier transform (DFT), including a comprehensive list of
+           window functions and some new flat-top windows", February 15, 2002
+           https://holometer.fnal.gov/GH_FFT.pdf
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.nuttall(51)
+    >>> plt.plot(window)
+    >>> plt.title("Nuttall window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Nuttall window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym)
+
+
+def blackmanharris(M, sym=True):
+    """Return a minimum 4-term Blackman-Harris window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.blackmanharris(51)
+    >>> plt.plot(window)
+    >>> plt.title("Blackman-Harris window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Blackman-Harris window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym)
+
+
+def flattop(M, sym=True):
+    """Return a flat top window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    Flat top windows are used for taking accurate measurements of signal
+    amplitude in the frequency domain, with minimal scalloping error from the
+    center of a frequency bin to its edges, compared to others.  This is a
+    5th-order cosine window, with the 5 terms optimized to make the main lobe
+    maximally flat. [1]_
+
+    References
+    ----------
+    .. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for
+           Measurement Systems", Springer Media, 2006, p. 70
+           :doi:`10.1007/0-387-28666-7`.
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.flattop(51)
+    >>> plt.plot(window)
+    >>> plt.title("Flat top window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the flat top window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368]
+    return general_cosine(M, a, sym)
+
+
+def bartlett(M, sym=True):
+    r"""
+    Return a Bartlett window.
+
+    The Bartlett window is very similar to a triangular window, except
+    that the end points are at zero.  It is often used in signal
+    processing for tapering a signal, without generating too much
+    ripple in the frequency domain.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The triangular window, with the first and last samples equal to zero
+        and the maximum value normalized to 1 (though the value 1 does not
+        appear if `M` is even and `sym` is True).
+
+    See Also
+    --------
+    triang : A triangular window that does not touch zero at the ends
+
+    Notes
+    -----
+    The Bartlett window is defined as
+
+    .. math:: w(n) = \frac{2}{M-1} \left(
+              \frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
+              \right)
+
+    Most references to the Bartlett window come from the signal
+    processing literature, where it is used as one of many windowing
+    functions for smoothing values.  Note that convolution with this
+    window produces linear interpolation.  It is also known as an
+    apodization (which means"removing the foot", i.e. smoothing
+    discontinuities at the beginning and end of the sampled signal) or
+    tapering function. The Fourier transform of the Bartlett is the product
+    of two sinc functions.
+    Note the excellent discussion in Kanasewich. [2]_
+
+    References
+    ----------
+    .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
+           Biometrika 37, 1-16, 1950.
+    .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
+           The University of Alberta Press, 1975, pp. 109-110.
+    .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
+           Processing", Prentice-Hall, 1999, pp. 468-471.
+    .. [4] Wikipedia, "Window function",
+           https://en.wikipedia.org/wiki/Window_function
+    .. [5] W.H. Press,  B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+           "Numerical Recipes", Cambridge University Press, 1986, page 429.
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.bartlett(51)
+    >>> plt.plot(window)
+    >>> plt.title("Bartlett window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Bartlett window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    # Docstring adapted from NumPy's bartlett function
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(0, M)
+    w = np.where(np.less_equal(n, (M - 1) / 2.0),
+                 2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
+
+    return _truncate(w, needs_trunc)
+
+
+def hann(M, sym=True):
+    r"""
+    Return a Hann window.
+
+    The Hann window is a taper formed by using a raised cosine or sine-squared
+    with ends that touch zero.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The Hann window is defined as
+
+    .. math::  w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
+               \qquad 0 \leq n \leq M-1
+
+    The window was named for Julius von Hann, an Austrian meteorologist. It is
+    also known as the Cosine Bell. It is sometimes erroneously referred to as
+    the "Hanning" window, from the use of "hann" as a verb in the original
+    paper and confusion with the very similar Hamming window.
+
+    Most references to the Hann window come from the signal processing
+    literature, where it is used as one of many windowing functions for
+    smoothing values.  It is also known as an apodization (which means
+    "removing the foot", i.e. smoothing discontinuities at the beginning
+    and end of the sampled signal) or tapering function.
+
+    References
+    ----------
+    .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
+           spectra, Dover Publications, New York.
+    .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
+           The University of Alberta Press, 1975, pp. 106-108.
+    .. [3] Wikipedia, "Window function",
+           https://en.wikipedia.org/wiki/Window_function
+    .. [4] W.H. Press,  B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+           "Numerical Recipes", Cambridge University Press, 1986, page 425.
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.hann(51)
+    >>> plt.plot(window)
+    >>> plt.title("Hann window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = np.abs(fftshift(A / abs(A).max()))
+    >>> response = 20 * np.log10(np.maximum(response, 1e-10))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Hann window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    # Docstring adapted from NumPy's hanning function
+    return general_hamming(M, 0.5, sym)
+
+
+def tukey(M, alpha=0.5, sym=True):
+    r"""Return a Tukey window, also known as a tapered cosine window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    alpha : float, optional
+        Shape parameter of the Tukey window, representing the fraction of the
+        window inside the cosine tapered region.
+        If zero, the Tukey window is equivalent to a rectangular window.
+        If one, the Tukey window is equivalent to a Hann window.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    References
+    ----------
+    .. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
+           Analysis with the Discrete Fourier Transform". Proceedings of the
+           IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`
+    .. [2] Wikipedia, "Window function",
+           https://en.wikipedia.org/wiki/Window_function#Tukey_window
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.tukey(51)
+    >>> plt.plot(window)
+    >>> plt.title("Tukey window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+    >>> plt.ylim([0, 1.1])
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Tukey window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+
+    if alpha <= 0:
+        return np.ones(M, 'd')
+    elif alpha >= 1.0:
+        return hann(M, sym=sym)
+
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(0, M)
+    width = int(np.floor(alpha*(M-1)/2.0))
+    n1 = n[0:width+1]
+    n2 = n[width+1:M-width-1]
+    n3 = n[M-width-1:]
+
+    w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
+    w2 = np.ones(n2.shape)
+    w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
+
+    w = np.concatenate((w1, w2, w3))
+
+    return _truncate(w, needs_trunc)
+
+
+def barthann(M, sym=True):
+    """Return a modified Bartlett-Hann window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.barthann(51)
+    >>> plt.plot(window)
+    >>> plt.title("Bartlett-Hann window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Bartlett-Hann window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(0, M)
+    fac = np.abs(n / (M - 1.0) - 0.5)
+    w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
+
+    return _truncate(w, needs_trunc)
+
+
+def general_hamming(M, alpha, sym=True):
+    r"""Return a generalized Hamming window.
+
+    The generalized Hamming window is constructed by multiplying a rectangular
+    window by one period of a cosine function [1]_.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    alpha : float
+        The window coefficient, :math:`\alpha`
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    See Also
+    --------
+    hamming, hann
+
+    Notes
+    -----
+    The generalized Hamming window is defined as
+
+    .. math:: w(n) = \alpha - \left(1 - \alpha\right) \cos\left(\frac{2\pi{n}}{M-1}\right)
+              \qquad 0 \leq n \leq M-1
+
+    Both the common Hamming window and Hann window are special cases of the
+    generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` =
+    0.5, respectively [2]_.
+
+    References
+    ----------
+    .. [1] DSPRelated, "Generalized Hamming Window Family",
+           https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html
+    .. [2] Wikipedia, "Window function",
+           https://en.wikipedia.org/wiki/Window_function
+    .. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm
+           Definition",
+           https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition
+    .. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition",
+           https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition
+
+    Examples
+    --------
+    The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming
+    windows in the processing of spaceborne Synthetic Aperture Radar (SAR)
+    data [3]_. The facility uses various values for the :math:`\alpha`
+    parameter based on operating mode of the SAR instrument. Some common
+    :math:`\alpha` values include 0.75, 0.7 and 0.52 [4]_. As an example, we
+    plot these different windows.
+
+    >>> import numpy as np
+    >>> from scipy.signal.windows import general_hamming
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> fig1, spatial_plot = plt.subplots()
+    >>> spatial_plot.set_title("Generalized Hamming Windows")
+    >>> spatial_plot.set_ylabel("Amplitude")
+    >>> spatial_plot.set_xlabel("Sample")
+
+    >>> fig2, freq_plot = plt.subplots()
+    >>> freq_plot.set_title("Frequency Responses")
+    >>> freq_plot.set_ylabel("Normalized magnitude [dB]")
+    >>> freq_plot.set_xlabel("Normalized frequency [cycles per sample]")
+
+    >>> for alpha in [0.75, 0.7, 0.52]:
+    ...     window = general_hamming(41, alpha)
+    ...     spatial_plot.plot(window, label="{:.2f}".format(alpha))
+    ...     A = fft(window, 2048) / (len(window)/2.0)
+    ...     freq = np.linspace(-0.5, 0.5, len(A))
+    ...     response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    ...     freq_plot.plot(freq, response, label="{:.2f}".format(alpha))
+    >>> freq_plot.legend(loc="upper right")
+    >>> spatial_plot.legend(loc="upper right")
+
+    """
+    return general_cosine(M, [alpha, 1. - alpha], sym)
+
+
+def hamming(M, sym=True):
+    r"""Return a Hamming window.
+
+    The Hamming window is a taper formed by using a raised cosine with
+    non-zero endpoints, optimized to minimize the nearest side lobe.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The Hamming window is defined as
+
+    .. math::  w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
+               \qquad 0 \leq n \leq M-1
+
+    The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
+    is described in Blackman and Tukey. It was recommended for smoothing the
+    truncated autocovariance function in the time domain.
+    Most references to the Hamming window come from the signal processing
+    literature, where it is used as one of many windowing functions for
+    smoothing values.  It is also known as an apodization (which means
+    "removing the foot", i.e. smoothing discontinuities at the beginning
+    and end of the sampled signal) or tapering function.
+
+    References
+    ----------
+    .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
+           spectra, Dover Publications, New York.
+    .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
+           University of Alberta Press, 1975, pp. 109-110.
+    .. [3] Wikipedia, "Window function",
+           https://en.wikipedia.org/wiki/Window_function
+    .. [4] W.H. Press,  B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+           "Numerical Recipes", Cambridge University Press, 1986, page 425.
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.hamming(51)
+    >>> plt.plot(window)
+    >>> plt.title("Hamming window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Hamming window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    # Docstring adapted from NumPy's hamming function
+    return general_hamming(M, 0.54, sym)
+
+
+def kaiser(M, beta, sym=True):
+    r"""Return a Kaiser window.
+
+    The Kaiser window is a taper formed by using a Bessel function.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    beta : float
+        Shape parameter, determines trade-off between main-lobe width and
+        side lobe level. As beta gets large, the window narrows.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The Kaiser window is defined as
+
+    .. math::  w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
+               \right)/I_0(\beta)
+
+    with
+
+    .. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
+
+    where :math:`I_0` is the modified zeroth-order Bessel function.
+
+    The Kaiser was named for Jim Kaiser, who discovered a simple approximation
+    to the DPSS window based on Bessel functions.
+    The Kaiser window is a very good approximation to the Digital Prolate
+    Spheroidal Sequence, or Slepian window, which is the transform which
+    maximizes the energy in the main lobe of the window relative to total
+    energy.
+
+    The Kaiser can approximate other windows by varying the beta parameter.
+    (Some literature uses alpha = beta/pi.) [4]_
+
+    ====  =======================
+    beta  Window shape
+    ====  =======================
+    0     Rectangular
+    5     Similar to a Hamming
+    6     Similar to a Hann
+    8.6   Similar to a Blackman
+    ====  =======================
+
+    A beta value of 14 is probably a good starting point. Note that as beta
+    gets large, the window narrows, and so the number of samples needs to be
+    large enough to sample the increasingly narrow spike, otherwise NaNs will
+    be returned.
+
+    Most references to the Kaiser window come from the signal processing
+    literature, where it is used as one of many windowing functions for
+    smoothing values.  It is also known as an apodization (which means
+    "removing the foot", i.e. smoothing discontinuities at the beginning
+    and end of the sampled signal) or tapering function.
+
+    References
+    ----------
+    .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
+           digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
+           John Wiley and Sons, New York, (1966).
+    .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
+           University of Alberta Press, 1975, pp. 177-178.
+    .. [3] Wikipedia, "Window function",
+           https://en.wikipedia.org/wiki/Window_function
+    .. [4] F. J. Harris, "On the use of windows for harmonic analysis with the
+           discrete Fourier transform," Proceedings of the IEEE, vol. 66,
+           no. 1, pp. 51-83, Jan. 1978. :doi:`10.1109/PROC.1978.10837`.
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.kaiser(51, beta=14)
+    >>> plt.plot(window)
+    >>> plt.title(r"Kaiser window ($\beta$=14)")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    # Docstring adapted from NumPy's kaiser function
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(0, M)
+    alpha = (M - 1) / 2.0
+    w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
+         special.i0(beta))
+
+    return _truncate(w, needs_trunc)
+
+
+def kaiser_bessel_derived(M, beta, *, sym=True):
+    """Return a Kaiser-Bessel derived window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+        Note that this window is only defined for an even
+        number of points.
+    beta : float
+        Kaiser window shape parameter.
+    sym : bool, optional
+        This parameter only exists to comply with the interface offered by
+        the other window functions and to be callable by `get_window`.
+        When True (default), generates a symmetric window, for use in filter
+        design.
+
+    Returns
+    -------
+    w : ndarray
+        The window, normalized to fulfil the Princen-Bradley condition.
+
+    See Also
+    --------
+    kaiser
+
+    Notes
+    -----
+    It is designed to be suitable for use with the modified discrete cosine
+    transform (MDCT) and is mainly used in audio signal processing and
+    audio coding.
+
+    .. versionadded:: 1.9.0
+
+    References
+    ----------
+    .. [1] Bosi, Marina, and Richard E. Goldberg. Introduction to Digital
+           Audio Coding and Standards. Dordrecht: Kluwer, 2003.
+    .. [2] Wikipedia, "Kaiser window",
+           https://en.wikipedia.org/wiki/Kaiser_window
+
+    Examples
+    --------
+    Plot the Kaiser-Bessel derived window based on the wikipedia
+    reference [2]_:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> N = 50
+    >>> for alpha in [0.64, 2.55, 7.64, 31.83]:
+    ...     ax.plot(signal.windows.kaiser_bessel_derived(2*N, np.pi*alpha),
+    ...             label=f"{alpha=}")
+    >>> ax.grid(True)
+    >>> ax.set_title("Kaiser-Bessel derived window")
+    >>> ax.set_ylabel("Amplitude")
+    >>> ax.set_xlabel("Sample")
+    >>> ax.set_xticks([0, N, 2*N-1])
+    >>> ax.set_xticklabels(["0", "N", "2N+1"])  # doctest: +SKIP
+    >>> ax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.707, 0.8, 1.0])
+    >>> fig.legend(loc="center")
+    >>> fig.tight_layout()
+    >>> fig.show()
+    """
+    if not sym:
+        raise ValueError(
+            "Kaiser-Bessel Derived windows are only defined for symmetric "
+            "shapes"
+        )
+    elif M < 1:
+        return np.array([])
+    elif M % 2:
+        raise ValueError(
+            "Kaiser-Bessel Derived windows are only defined for even number "
+            "of points"
+        )
+
+    kaiser_window = kaiser(M // 2 + 1, beta)
+    csum = np.cumsum(kaiser_window)
+    half_window = np.sqrt(csum[:-1] / csum[-1])
+    w = np.concatenate((half_window, half_window[::-1]), axis=0)
+    return w
+
+
+def gaussian(M, std, sym=True):
+    r"""Return a Gaussian window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    std : float
+        The standard deviation, sigma.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The Gaussian window is defined as
+
+    .. math::  w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.gaussian(51, std=7)
+    >>> plt.plot(window)
+    >>> plt.title(r"Gaussian window ($\sigma$=7)")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(0, M) - (M - 1.0) / 2.0
+    sig2 = 2 * std * std
+    w = np.exp(-n ** 2 / sig2)
+
+    return _truncate(w, needs_trunc)
+
+
+def general_gaussian(M, p, sig, sym=True):
+    r"""Return a window with a generalized Gaussian shape.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    p : float
+        Shape parameter.  p = 1 is identical to `gaussian`, p = 0.5 is
+        the same shape as the Laplace distribution.
+    sig : float
+        The standard deviation, sigma.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The generalized Gaussian window is defined as
+
+    .. math::  w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
+
+    the half-power point is at
+
+    .. math::  (2 \log(2))^{1/(2 p)} \sigma
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.general_gaussian(51, p=1.5, sig=7)
+    >>> plt.plot(window)
+    >>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title(r"Freq. resp. of the gen. Gaussian "
+    ...           r"window (p=1.5, $\sigma$=7)")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    n = np.arange(0, M) - (M - 1.0) / 2.0
+    w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
+
+    return _truncate(w, needs_trunc)
+
+
+# `chebwin` contributed by Kumar Appaiah.
+def chebwin(M, at, sym=True):
+    r"""Return a Dolph-Chebyshev window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    at : float
+        Attenuation (in dB).
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value always normalized to 1
+
+    Notes
+    -----
+    This window optimizes for the narrowest main lobe width for a given order
+    `M` and sidelobe equiripple attenuation `at`, using Chebyshev
+    polynomials.  It was originally developed by Dolph to optimize the
+    directionality of radio antenna arrays.
+
+    Unlike most windows, the Dolph-Chebyshev is defined in terms of its
+    frequency response:
+
+    .. math:: W(k) = \frac
+              {\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
+              {\cosh[M \cosh^{-1}(\beta)]}
+
+    where
+
+    .. math:: \beta = \cosh \left [\frac{1}{M}
+              \cosh^{-1}(10^\frac{A}{20}) \right ]
+
+    and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
+
+    The time domain window is then generated using the IFFT, so
+    power-of-two `M` are the fastest to generate, and prime number `M` are
+    the slowest.
+
+    The equiripple condition in the frequency domain creates impulses in the
+    time domain, which appear at the ends of the window.
+
+    References
+    ----------
+    .. [1] C. Dolph, "A current distribution for broadside arrays which
+           optimizes the relationship between beam width and side-lobe level",
+           Proceedings of the IEEE, Vol. 34, Issue 6
+    .. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
+           American Meteorological Society (April 1997)
+           http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
+    .. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
+           discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
+           No. 1, January 1978
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.chebwin(51, at=100)
+    >>> plt.plot(window)
+    >>> plt.title("Dolph-Chebyshev window (100 dB)")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """
+    if np.abs(at) < 45:
+        warnings.warn("This window is not suitable for spectral analysis "
+                      "for attenuation values lower than about 45dB because "
+                      "the equivalent noise bandwidth of a Chebyshev window "
+                      "does not grow monotonically with increasing sidelobe "
+                      "attenuation when the attenuation is smaller than "
+                      "about 45 dB.")
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    # compute the parameter beta
+    order = M - 1.0
+    beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
+    k = np.r_[0:M] * 1.0
+    x = beta * np.cos(np.pi * k / M)
+    # Find the window's DFT coefficients
+    # Use analytic definition of Chebyshev polynomial instead of expansion
+    # from scipy.special. Using the expansion in scipy.special leads to errors.
+    p = np.zeros(x.shape)
+    p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
+    p[x < -1] = (2 * (M % 2) - 1) * np.cosh(order * np.arccosh(-x[x < -1]))
+    p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
+
+    # Appropriate IDFT and filling up
+    # depending on even/odd M
+    if M % 2:
+        w = np.real(sp_fft.fft(p))
+        n = (M + 1) // 2
+        w = w[:n]
+        w = np.concatenate((w[n - 1:0:-1], w))
+    else:
+        p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
+        w = np.real(sp_fft.fft(p))
+        n = M // 2 + 1
+        w = np.concatenate((w[n - 1:0:-1], w[1:n]))
+    w = w / max(w)
+
+    return _truncate(w, needs_trunc)
+
+
+def cosine(M, sym=True):
+    """Return a window with a simple cosine shape.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+
+    .. versionadded:: 0.13.0
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.cosine(51)
+    >>> plt.plot(window)
+    >>> plt.title("Cosine window")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the cosine window")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+    >>> plt.show()
+
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    w = np.sin(np.pi / M * (np.arange(0, M) + .5))
+
+    return _truncate(w, needs_trunc)
+
+
+def exponential(M, center=None, tau=1., sym=True):
+    r"""Return an exponential (or Poisson) window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    center : float, optional
+        Parameter defining the center location of the window function.
+        The default value if not given is ``center = (M-1) / 2``.  This
+        parameter must take its default value for symmetric windows.
+    tau : float, optional
+        Parameter defining the decay.  For ``center = 0`` use
+        ``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
+        remaining at the end.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The Exponential window is defined as
+
+    .. math::  w(n) = e^{-|n-center| / \tau}
+
+    References
+    ----------
+    .. [1] S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
+           Technical Review 3, Bruel & Kjaer, 1987.
+
+    Examples
+    --------
+    Plot the symmetric window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> M = 51
+    >>> tau = 3.0
+    >>> window = signal.windows.exponential(M, tau=tau)
+    >>> plt.plot(window)
+    >>> plt.title("Exponential Window (tau=3.0)")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -35, 0])
+    >>> plt.title("Frequency response of the Exponential window (tau=3.0)")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    This function can also generate non-symmetric windows:
+
+    >>> tau2 = -(M-1) / np.log(0.01)
+    >>> window2 = signal.windows.exponential(M, 0, tau2, False)
+    >>> plt.figure()
+    >>> plt.plot(window2)
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+    """
+    if sym and center is not None:
+        raise ValueError("If sym==True, center must be None.")
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    if center is None:
+        center = (M-1) / 2
+
+    n = np.arange(0, M)
+    w = np.exp(-np.abs(n-center) / tau)
+
+    return _truncate(w, needs_trunc)
+
+
+def taylor(M, nbar=4, sll=30, norm=True, sym=True):
+    """
+    Return a Taylor window.
+
+    The Taylor window taper function approximates the Dolph-Chebyshev window's
+    constant sidelobe level for a parameterized number of near-in sidelobes,
+    but then allows a taper beyond [2]_.
+
+    The SAR (synthetic aperature radar) community commonly uses Taylor
+    weighting for image formation processing because it provides strong,
+    selectable sidelobe suppression with minimum broadening of the
+    mainlobe [1]_.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    nbar : int, optional
+        Number of nearly constant level sidelobes adjacent to the mainlobe.
+    sll : float, optional
+        Desired suppression of sidelobe level in decibels (dB) relative to the
+        DC gain of the mainlobe. This should be a positive number.
+    norm : bool, optional
+        When True (default), divides the window by the largest (middle) value
+        for odd-length windows or the value that would occur between the two
+        repeated middle values for even-length windows such that all values
+        are less than or equal to 1. When False the DC gain will remain at 1
+        (0 dB) and the sidelobes will be `sll` dB down.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    out : array
+        The window. When `norm` is True (default), the maximum value is
+        normalized to 1 (though the value 1 does not appear if `M` is
+        even and `sym` is True).
+
+    See Also
+    --------
+    chebwin, kaiser, bartlett, blackman, hamming, hann
+
+    References
+    ----------
+    .. [1] W. Carrara, R. Goodman, and R. Majewski, "Spotlight Synthetic
+           Aperture Radar: Signal Processing Algorithms" Pages 512-513,
+           July 1995.
+    .. [2] Armin Doerry, "Catalog of Window Taper Functions for
+           Sidelobe Control", 2017.
+           https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf
+
+    Examples
+    --------
+    Plot the window and its frequency response:
+
+    >>> import numpy as np
+    >>> from scipy import signal
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+
+    >>> window = signal.windows.taylor(51, nbar=20, sll=100, norm=False)
+    >>> plt.plot(window)
+    >>> plt.title("Taylor window (100 dB)")
+    >>> plt.ylabel("Amplitude")
+    >>> plt.xlabel("Sample")
+
+    >>> plt.figure()
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> plt.plot(freq, response)
+    >>> plt.axis([-0.5, 0.5, -120, 0])
+    >>> plt.title("Frequency response of the Taylor window (100 dB)")
+    >>> plt.ylabel("Normalized magnitude [dB]")
+    >>> plt.xlabel("Normalized frequency [cycles per sample]")
+
+    """  # noqa: E501
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    # Original text uses a negative sidelobe level parameter and then negates
+    # it in the calculation of B. To keep consistent with other methods we
+    # assume the sidelobe level parameter to be positive.
+    B = 10**(sll / 20)
+    A = np.arccosh(B) / np.pi
+    s2 = nbar**2 / (A**2 + (nbar - 0.5)**2)
+    ma = np.arange(1, nbar)
+
+    Fm = np.empty(nbar-1)
+    signs = np.empty_like(ma)
+    signs[::2] = 1
+    signs[1::2] = -1
+    m2 = ma*ma
+    for mi, m in enumerate(ma):
+        numer = signs[mi] * np.prod(1 - m2[mi]/s2/(A**2 + (ma - 0.5)**2))
+        denom = 2 * np.prod(1 - m2[mi]/m2[:mi]) * np.prod(1 - m2[mi]/m2[mi+1:])
+        Fm[mi] = numer / denom
+
+    def W(n):
+        return 1 + 2*np.dot(Fm, np.cos(
+            2*np.pi*ma[:, np.newaxis]*(n-M/2.+0.5)/M))
+
+    w = W(np.arange(M))
+
+    # normalize (Note that this is not described in the original text [1])
+    if norm:
+        scale = 1.0 / W((M - 1) / 2)
+        w *= scale
+
+    return _truncate(w, needs_trunc)
+
+
+def dpss(M, NW, Kmax=None, sym=True, norm=None, return_ratios=False):
+    """
+    Compute the Discrete Prolate Spheroidal Sequences (DPSS).
+
+    DPSS (or Slepian sequences) are often used in multitaper power spectral
+    density estimation (see [1]_). The first window in the sequence can be
+    used to maximize the energy concentration in the main lobe, and is also
+    called the Slepian window.
+
+    Parameters
+    ----------
+    M : int
+        Window length.
+    NW : float
+        Standardized half bandwidth corresponding to ``2*NW = BW/f0 = BW*M*dt``
+        where ``dt`` is taken as 1.
+    Kmax : int | None, optional
+        Number of DPSS windows to return (orders ``0`` through ``Kmax-1``).
+        If None (default), return only a single window of shape ``(M,)``
+        instead of an array of windows of shape ``(Kmax, M)``.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+    norm : {2, 'approximate', 'subsample'} | None, optional
+        If 'approximate' or 'subsample', then the windows are normalized by the
+        maximum, and a correction scale-factor for even-length windows
+        is applied either using ``M**2/(M**2+NW)`` ("approximate") or
+        a FFT-based subsample shift ("subsample"), see Notes for details.
+        If None, then "approximate" is used when ``Kmax=None`` and 2 otherwise
+        (which uses the l2 norm).
+    return_ratios : bool, optional
+        If True, also return the concentration ratios in addition to the
+        windows.
+
+    Returns
+    -------
+    v : ndarray, shape (Kmax, M) or (M,)
+        The DPSS windows. Will be 1D if `Kmax` is None.
+    r : ndarray, shape (Kmax,) or float, optional
+        The concentration ratios for the windows. Only returned if
+        `return_ratios` evaluates to True. Will be 0D if `Kmax` is None.
+
+    Notes
+    -----
+    This computation uses the tridiagonal eigenvector formulation given
+    in [2]_.
+
+    The default normalization for ``Kmax=None``, i.e. window-generation mode,
+    simply using the l-infinity norm would create a window with two unity
+    values, which creates slight normalization differences between even and odd
+    orders. The approximate correction of ``M**2/float(M**2+NW)`` for even
+    sample numbers is used to counteract this effect (see Examples below).
+
+    For very long signals (e.g., 1e6 elements), it can be useful to compute
+    windows orders of magnitude shorter and use interpolation (e.g.,
+    `scipy.interpolate.interp1d`) to obtain tapers of length `M`,
+    but this in general will not preserve orthogonality between the tapers.
+
+    .. versionadded:: 1.1
+
+    References
+    ----------
+    .. [1] Percival DB, Walden WT. Spectral Analysis for Physical Applications:
+       Multitaper and Conventional Univariate Techniques.
+       Cambridge University Press; 1993.
+    .. [2] Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
+       uncertainty V: The discrete case. Bell System Technical Journal,
+       Volume 57 (1978), 1371430.
+    .. [3] Kaiser, JF, Schafer RW. On the Use of the I0-Sinh Window for
+       Spectrum Analysis. IEEE Transactions on Acoustics, Speech and
+       Signal Processing. ASSP-28 (1): 105-107; 1980.
+
+    Examples
+    --------
+    We can compare the window to `kaiser`, which was invented as an alternative
+    that was easier to calculate [3]_ (example adapted from
+    `here `_):
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.signal import windows, freqz
+    >>> M = 51
+    >>> fig, axes = plt.subplots(3, 2, figsize=(5, 7))
+    >>> for ai, alpha in enumerate((1, 3, 5)):
+    ...     win_dpss = windows.dpss(M, alpha)
+    ...     beta = alpha*np.pi
+    ...     win_kaiser = windows.kaiser(M, beta)
+    ...     for win, c in ((win_dpss, 'k'), (win_kaiser, 'r')):
+    ...         win /= win.sum()
+    ...         axes[ai, 0].plot(win, color=c, lw=1.)
+    ...         axes[ai, 0].set(xlim=[0, M-1], title=r'$\\alpha$ = %s' % alpha,
+    ...                         ylabel='Amplitude')
+    ...         w, h = freqz(win)
+    ...         axes[ai, 1].plot(w, 20 * np.log10(np.abs(h)), color=c, lw=1.)
+    ...         axes[ai, 1].set(xlim=[0, np.pi],
+    ...                         title=r'$\\beta$ = %0.2f' % beta,
+    ...                         ylabel='Magnitude (dB)')
+    >>> for ax in axes.ravel():
+    ...     ax.grid(True)
+    >>> axes[2, 1].legend(['DPSS', 'Kaiser'])
+    >>> fig.tight_layout()
+    >>> plt.show()
+
+    And here are examples of the first four windows, along with their
+    concentration ratios:
+
+    >>> M = 512
+    >>> NW = 2.5
+    >>> win, eigvals = windows.dpss(M, NW, 4, return_ratios=True)
+    >>> fig, ax = plt.subplots(1)
+    >>> ax.plot(win.T, linewidth=1.)
+    >>> ax.set(xlim=[0, M-1], ylim=[-0.1, 0.1], xlabel='Samples',
+    ...        title='DPSS, M=%d, NW=%0.1f' % (M, NW))
+    >>> ax.legend(['win[%d] (%0.4f)' % (ii, ratio)
+    ...            for ii, ratio in enumerate(eigvals)])
+    >>> fig.tight_layout()
+    >>> plt.show()
+
+    Using a standard :math:`l_{\\infty}` norm would produce two unity values
+    for even `M`, but only one unity value for odd `M`. This produces uneven
+    window power that can be counteracted by the approximate correction
+    ``M**2/float(M**2+NW)``, which can be selected by using
+    ``norm='approximate'`` (which is the same as ``norm=None`` when
+    ``Kmax=None``, as is the case here). Alternatively, the slower
+    ``norm='subsample'`` can be used, which uses subsample shifting in the
+    frequency domain (FFT) to compute the correction:
+
+    >>> Ms = np.arange(1, 41)
+    >>> factors = (50, 20, 10, 5, 2.0001)
+    >>> energy = np.empty((3, len(Ms), len(factors)))
+    >>> for mi, M in enumerate(Ms):
+    ...     for fi, factor in enumerate(factors):
+    ...         NW = M / float(factor)
+    ...         # Corrected using empirical approximation (default)
+    ...         win = windows.dpss(M, NW)
+    ...         energy[0, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
+    ...         # Corrected using subsample shifting
+    ...         win = windows.dpss(M, NW, norm='subsample')
+    ...         energy[1, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
+    ...         # Uncorrected (using l-infinity norm)
+    ...         win /= win.max()
+    ...         energy[2, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
+    >>> fig, ax = plt.subplots(1)
+    >>> hs = ax.plot(Ms, energy[2], '-o', markersize=4,
+    ...              markeredgecolor='none')
+    >>> leg = [hs[-1]]
+    >>> for hi, hh in enumerate(hs):
+    ...     h1 = ax.plot(Ms, energy[0, :, hi], '-o', markersize=4,
+    ...                  color=hh.get_color(), markeredgecolor='none',
+    ...                  alpha=0.66)
+    ...     h2 = ax.plot(Ms, energy[1, :, hi], '-o', markersize=4,
+    ...                  color=hh.get_color(), markeredgecolor='none',
+    ...                  alpha=0.33)
+    ...     if hi == len(hs) - 1:
+    ...         leg.insert(0, h1[0])
+    ...         leg.insert(0, h2[0])
+    >>> ax.set(xlabel='M (samples)', ylabel=r'Power / $\\sqrt{M}$')
+    >>> ax.legend(leg, ['Uncorrected', r'Corrected: $\\frac{M^2}{M^2+NW}$',
+    ...                 'Corrected (subsample)'])
+    >>> fig.tight_layout()
+
+    """  # noqa: E501
+    if _len_guards(M):
+        return np.ones(M)
+    if norm is None:
+        norm = 'approximate' if Kmax is None else 2
+    known_norms = (2, 'approximate', 'subsample')
+    if norm not in known_norms:
+        raise ValueError('norm must be one of %s, got %s'
+                         % (known_norms, norm))
+    if Kmax is None:
+        singleton = True
+        Kmax = 1
+    else:
+        singleton = False
+    Kmax = operator.index(Kmax)
+    if not 0 < Kmax <= M:
+        raise ValueError('Kmax must be greater than 0 and less than M')
+    if NW >= M/2.:
+        raise ValueError('NW must be less than M/2.')
+    if NW <= 0:
+        raise ValueError('NW must be positive')
+    M, needs_trunc = _extend(M, sym)
+    W = float(NW) / M
+    nidx = np.arange(M)
+
+    # Here we want to set up an optimization problem to find a sequence
+    # whose energy is maximally concentrated within band [-W,W].
+    # Thus, the measure lambda(T,W) is the ratio between the energy within
+    # that band, and the total energy. This leads to the eigen-system
+    # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
+    # eigenvalue is the sequence with maximally concentrated energy. The
+    # collection of eigenvectors of this system are called Slepian
+    # sequences, or discrete prolate spheroidal sequences (DPSS). Only the
+    # first K, K = 2NW/dt orders of DPSS will exhibit good spectral
+    # concentration
+    # [see https://en.wikipedia.org/wiki/Spectral_concentration_problem]
+
+    # Here we set up an alternative symmetric tri-diagonal eigenvalue
+    # problem such that
+    # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
+    # the main diagonal = ([M-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,M-1]
+    # and the first off-diagonal = t(M-t)/2, t=[1,2,...,M-1]
+    # [see Percival and Walden, 1993]
+    d = ((M - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
+    e = nidx[1:] * (M - nidx[1:]) / 2.
+
+    # only calculate the highest Kmax eigenvalues
+    w, windows = linalg.eigh_tridiagonal(
+        d, e, select='i', select_range=(M - Kmax, M - 1))
+    w = w[::-1]
+    windows = windows[:, ::-1].T
+
+    # By convention (Percival and Walden, 1993 pg 379)
+    # * symmetric tapers (k=0,2,4,...) should have a positive average.
+    fix_even = (windows[::2].sum(axis=1) < 0)
+    for i, f in enumerate(fix_even):
+        if f:
+            windows[2 * i] *= -1
+    # * antisymmetric tapers should begin with a positive lobe
+    #   (this depends on the definition of "lobe", here we'll take the first
+    #   point above the numerical noise, which should be good enough for
+    #   sufficiently smooth functions, and more robust than relying on an
+    #   algorithm that uses max(abs(w)), which is susceptible to numerical
+    #   noise problems)
+    thresh = max(1e-7, 1. / M)
+    for i, w in enumerate(windows[1::2]):
+        if w[w * w > thresh][0] < 0:
+            windows[2 * i + 1] *= -1
+
+    # Now find the eigenvalues of the original spectral concentration problem
+    # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
+    if return_ratios:
+        dpss_rxx = _fftautocorr(windows)
+        r = 4 * W * np.sinc(2 * W * nidx)
+        r[0] = 2 * W
+        ratios = np.dot(dpss_rxx, r)
+        if singleton:
+            ratios = ratios[0]
+    # Deal with sym and Kmax=None
+    if norm != 2:
+        windows /= windows.max()
+        if M % 2 == 0:
+            if norm == 'approximate':
+                correction = M**2 / float(M**2 + NW)
+            else:
+                s = sp_fft.rfft(windows[0])
+                shift = -(1 - 1./M) * np.arange(1, M//2 + 1)
+                s[1:] *= 2 * np.exp(-1j * np.pi * shift)
+                correction = M / s.real.sum()
+            windows *= correction
+    # else we're already l2 normed, so do nothing
+    if needs_trunc:
+        windows = windows[:, :-1]
+    if singleton:
+        windows = windows[0]
+    return (windows, ratios) if return_ratios else windows
+
+
+def lanczos(M, *, sym=True):
+    r"""Return a Lanczos window also known as a sinc window.
+
+    Parameters
+    ----------
+    M : int
+        Number of points in the output window. If zero, an empty array
+        is returned. An exception is thrown when it is negative.
+    sym : bool, optional
+        When True (default), generates a symmetric window, for use in filter
+        design.
+        When False, generates a periodic window, for use in spectral analysis.
+
+    Returns
+    -------
+    w : ndarray
+        The window, with the maximum value normalized to 1 (though the value 1
+        does not appear if `M` is even and `sym` is True).
+
+    Notes
+    -----
+    The Lanczos window is defined as
+
+    .. math::  w(n) = sinc \left( \frac{2n}{M - 1} - 1 \right)
+
+    where
+
+    .. math::  sinc(x) = \frac{\sin(\pi x)}{\pi x}
+
+    The Lanczos window has reduced Gibbs oscillations and is widely used for
+    filtering climate timeseries with good properties in the physical and
+    spectral domains.
+
+    .. versionadded:: 1.10
+
+    References
+    ----------
+    .. [1] Lanczos, C., and Teichmann, T. (1957). Applied analysis.
+           Physics Today, 10, 44.
+    .. [2] Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions.
+           Journal of Applied Meteorology, Vol 18, pp 1016-1022.
+    .. [3] Thomson, R. E. and Emery, W. J. (2014) Data Analysis Methods in
+           Physical Oceanography (Third Edition), Elsevier, pp 593-637.
+    .. [4] Wikipedia, "Window function",
+           http://en.wikipedia.org/wiki/Window_function
+
+    Examples
+    --------
+    Plot the window
+
+    >>> import numpy as np
+    >>> from scipy.signal.windows import lanczos
+    >>> from scipy.fft import fft, fftshift
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots(1)
+    >>> window = lanczos(51)
+    >>> ax.plot(window)
+    >>> ax.set_title("Lanczos window")
+    >>> ax.set_ylabel("Amplitude")
+    >>> ax.set_xlabel("Sample")
+    >>> fig.tight_layout()
+    >>> plt.show()
+
+    and its frequency response:
+
+    >>> fig, ax = plt.subplots(1)
+    >>> A = fft(window, 2048) / (len(window)/2.0)
+    >>> freq = np.linspace(-0.5, 0.5, len(A))
+    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
+    >>> ax.plot(freq, response)
+    >>> ax.set_xlim(-0.5, 0.5)
+    >>> ax.set_ylim(-120, 0)
+    >>> ax.set_title("Frequency response of the lanczos window")
+    >>> ax.set_ylabel("Normalized magnitude [dB]")
+    >>> ax.set_xlabel("Normalized frequency [cycles per sample]")
+    >>> fig.tight_layout()
+    >>> plt.show()
+    """
+    if _len_guards(M):
+        return np.ones(M)
+    M, needs_trunc = _extend(M, sym)
+
+    # To make sure that the window is symmetric, we concatenate the right hand
+    # half of the window and the flipped one which is the left hand half of
+    # the window.
+    def _calc_right_side_lanczos(n, m):
+        return np.sinc(2. * np.arange(n, m) / (m - 1) - 1.0)
+
+    if M % 2 == 0:
+        wh = _calc_right_side_lanczos(M/2, M)
+        w = np.r_[np.flip(wh), wh]
+    else:
+        wh = _calc_right_side_lanczos((M+1)/2, M)
+        w = np.r_[np.flip(wh), 1.0, wh]
+
+    return _truncate(w, needs_trunc)
+
+
+def _fftautocorr(x):
+    """Compute the autocorrelation of a real array and crop the result."""
+    N = x.shape[-1]
+    use_N = sp_fft.next_fast_len(2*N-1)
+    x_fft = sp_fft.rfft(x, use_N, axis=-1)
+    cxy = sp_fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N]
+    # Or equivalently (but in most cases slower):
+    # cxy = np.array([np.convolve(xx, yy[::-1], mode='full')
+    #                 for xx, yy in zip(x, x)])[:, N-1:2*N-1]
+    return cxy
+
+
+_win_equiv_raw = {
+    ('barthann', 'brthan', 'bth'): (barthann, False),
+    ('bartlett', 'bart', 'brt'): (bartlett, False),
+    ('blackman', 'black', 'blk'): (blackman, False),
+    ('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
+    ('bohman', 'bman', 'bmn'): (bohman, False),
+    ('boxcar', 'box', 'ones',
+        'rect', 'rectangular'): (boxcar, False),
+    ('chebwin', 'cheb'): (chebwin, True),
+    ('cosine', 'halfcosine'): (cosine, False),
+    ('dpss',): (dpss, True),
+    ('exponential', 'poisson'): (exponential, False),
+    ('flattop', 'flat', 'flt'): (flattop, False),
+    ('gaussian', 'gauss', 'gss'): (gaussian, True),
+    ('general cosine', 'general_cosine'): (general_cosine, True),
+    ('general gaussian', 'general_gaussian',
+        'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
+    ('general hamming', 'general_hamming'): (general_hamming, True),
+    ('hamming', 'hamm', 'ham'): (hamming, False),
+    ('hann', 'han'): (hann, False),
+    ('kaiser', 'ksr'): (kaiser, True),
+    ('kaiser bessel derived', 'kbd'): (kaiser_bessel_derived, True),
+    ('lanczos', 'sinc'): (lanczos, False),
+    ('nuttall', 'nutl', 'nut'): (nuttall, False),
+    ('parzen', 'parz', 'par'): (parzen, False),
+    ('taylor', 'taylorwin'): (taylor, False),
+    ('triangle', 'triang', 'tri'): (triang, False),
+    ('tukey', 'tuk'): (tukey, False),
+}
+
+# Fill dict with all valid window name strings
+_win_equiv = {}
+for k, v in _win_equiv_raw.items():
+    for key in k:
+        _win_equiv[key] = v[0]
+
+# Keep track of which windows need additional parameters
+_needs_param = set()
+for k, v in _win_equiv_raw.items():
+    if v[1]:
+        _needs_param.update(k)
+
+
+def get_window(window, Nx, fftbins=True):
+    """
+    Return a window of a given length and type.
+
+    Parameters
+    ----------
+    window : string, float, or tuple
+        The type of window to create. See below for more details.
+    Nx : int
+        The number of samples in the window.
+    fftbins : bool, optional
+        If True (default), create a "periodic" window, ready to use with
+        `ifftshift` and be multiplied by the result of an FFT (see also
+        :func:`~scipy.fft.fftfreq`).
+        If False, create a "symmetric" window, for use in filter design.
+
+    Returns
+    -------
+    get_window : ndarray
+        Returns a window of length `Nx` and type `window`
+
+    Notes
+    -----
+    Window types:
+
+    - `~scipy.signal.windows.boxcar`
+    - `~scipy.signal.windows.triang`
+    - `~scipy.signal.windows.blackman`
+    - `~scipy.signal.windows.hamming`
+    - `~scipy.signal.windows.hann`
+    - `~scipy.signal.windows.bartlett`
+    - `~scipy.signal.windows.flattop`
+    - `~scipy.signal.windows.parzen`
+    - `~scipy.signal.windows.bohman`
+    - `~scipy.signal.windows.blackmanharris`
+    - `~scipy.signal.windows.nuttall`
+    - `~scipy.signal.windows.barthann`
+    - `~scipy.signal.windows.cosine`
+    - `~scipy.signal.windows.exponential`
+    - `~scipy.signal.windows.tukey`
+    - `~scipy.signal.windows.taylor`
+    - `~scipy.signal.windows.lanczos`
+    - `~scipy.signal.windows.kaiser` (needs beta)
+    - `~scipy.signal.windows.kaiser_bessel_derived` (needs beta)
+    - `~scipy.signal.windows.gaussian` (needs standard deviation)
+    - `~scipy.signal.windows.general_cosine` (needs weighting coefficients)
+    - `~scipy.signal.windows.general_gaussian` (needs power, width)
+    - `~scipy.signal.windows.general_hamming` (needs window coefficient)
+    - `~scipy.signal.windows.dpss` (needs normalized half-bandwidth)
+    - `~scipy.signal.windows.chebwin` (needs attenuation)
+
+
+    If the window requires no parameters, then `window` can be a string.
+
+    If the window requires parameters, then `window` must be a tuple
+    with the first argument the string name of the window, and the next
+    arguments the needed parameters.
+
+    If `window` is a floating point number, it is interpreted as the beta
+    parameter of the `~scipy.signal.windows.kaiser` window.
+
+    Each of the window types listed above is also the name of
+    a function that can be called directly to create a window of
+    that type.
+
+    Examples
+    --------
+    >>> from scipy import signal
+    >>> signal.get_window('triang', 7)
+    array([ 0.125,  0.375,  0.625,  0.875,  0.875,  0.625,  0.375])
+    >>> signal.get_window(('kaiser', 4.0), 9)
+    array([ 0.08848053,  0.29425961,  0.56437221,  0.82160913,  0.97885093,
+            0.97885093,  0.82160913,  0.56437221,  0.29425961])
+    >>> signal.get_window(('exponential', None, 1.), 9)
+    array([ 0.011109  ,  0.03019738,  0.082085  ,  0.22313016,  0.60653066,
+            0.60653066,  0.22313016,  0.082085  ,  0.03019738])
+    >>> signal.get_window(4.0, 9)
+    array([ 0.08848053,  0.29425961,  0.56437221,  0.82160913,  0.97885093,
+            0.97885093,  0.82160913,  0.56437221,  0.29425961])
+
+    """
+    sym = not fftbins
+    try:
+        beta = float(window)
+    except (TypeError, ValueError) as e:
+        args = ()
+        if isinstance(window, tuple):
+            winstr = window[0]
+            if len(window) > 1:
+                args = window[1:]
+        elif isinstance(window, str):
+            if window in _needs_param:
+                raise ValueError("The '" + window + "' window needs one or "
+                                 "more parameters -- pass a tuple.") from e
+            else:
+                winstr = window
+        else:
+            raise ValueError("%s as window type is not supported." %
+                             str(type(window))) from e
+
+        try:
+            winfunc = _win_equiv[winstr]
+        except KeyError as e:
+            raise ValueError("Unknown window type.") from e
+
+        if winfunc is dpss:
+            params = (Nx,) + args + (None,)
+        else:
+            params = (Nx,) + args
+    else:
+        winfunc = kaiser
+        params = (Nx, beta)
+
+    return winfunc(*params, sym=sym)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/signal/windows/windows.py b/__packaged__/coreml/.python_dependencies/scipy/signal/windows/windows.py
new file mode 100644
index 00000000..65871470
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/signal/windows/windows.py
@@ -0,0 +1,32 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.signal.windows` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _windows
+
+__all__ = [  # noqa: F822
+    'boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
+    'blackmanharris', 'flattop', 'bartlett', 'barthann',
+    'hamming', 'kaiser', 'gaussian', 'general_cosine',
+    'general_gaussian', 'general_hamming', 'chebwin', 'cosine',
+    'hann', 'exponential', 'tukey', 'taylor', 'dpss', 'get_window',
+    'linalg', 'sp_fft', 'k', 'v', 'key'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.signal.windows.windows is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.signal.windows instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.signal.windows` namespace, "
+                  "the `scipy.signal.windows.windows` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_windows, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/__init__.py
new file mode 100644
index 00000000..aab1b88e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/__init__.py
@@ -0,0 +1,298 @@
+"""
+=====================================
+Sparse matrices (:mod:`scipy.sparse`)
+=====================================
+
+.. currentmodule:: scipy.sparse
+
+SciPy 2-D sparse array package for numeric data.
+
+.. note::
+
+   This package is switching to an array interface, compatible with
+   NumPy arrays, from the older matrix interface.  We recommend that
+   you use the array objects (`bsr_array`, `coo_array`, etc.) for
+   all new work.
+
+   When using the array interface, please note that:
+
+   - ``x * y`` no longer performs matrix multiplication, but
+     element-wise multiplication (just like with NumPy arrays).  To
+     make code work with both arrays and matrices, use ``x @ y`` for
+     matrix multiplication.
+   - Operations such as `sum`, that used to produce dense matrices, now
+     produce arrays, whose multiplication behavior differs similarly.
+   - Sparse arrays currently must be two-dimensional.  This also means
+     that all *slicing* operations on these objects must produce
+     two-dimensional results, or they will result in an error. This
+     will be addressed in a future version.
+
+   The construction utilities (`eye`, `kron`, `random`, `diags`, etc.)
+   have not yet been ported, but their results can be wrapped into arrays::
+
+     A = csr_array(eye(3))
+
+Contents
+========
+
+Sparse array classes
+--------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   bsr_array - Block Sparse Row array
+   coo_array - A sparse array in COOrdinate format
+   csc_array - Compressed Sparse Column array
+   csr_array - Compressed Sparse Row array
+   dia_array - Sparse array with DIAgonal storage
+   dok_array - Dictionary Of Keys based sparse array
+   lil_array - Row-based list of lists sparse array
+
+Sparse matrix classes
+---------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   bsr_matrix - Block Sparse Row matrix
+   coo_matrix - A sparse matrix in COOrdinate format
+   csc_matrix - Compressed Sparse Column matrix
+   csr_matrix - Compressed Sparse Row matrix
+   dia_matrix - Sparse matrix with DIAgonal storage
+   dok_matrix - Dictionary Of Keys based sparse matrix
+   lil_matrix - Row-based list of lists sparse matrix
+   spmatrix - Sparse matrix base class
+
+Functions
+---------
+
+Building sparse matrices:
+
+.. autosummary::
+   :toctree: generated/
+
+   eye - Sparse MxN matrix whose k-th diagonal is all ones
+   identity - Identity matrix in sparse format
+   kron - kronecker product of two sparse matrices
+   kronsum - kronecker sum of sparse matrices
+   diags - Return a sparse matrix from diagonals
+   spdiags - Return a sparse matrix from diagonals
+   block_diag - Build a block diagonal sparse matrix
+   tril - Lower triangular portion of a matrix in sparse format
+   triu - Upper triangular portion of a matrix in sparse format
+   bmat - Build a sparse matrix from sparse sub-blocks
+   hstack - Stack sparse matrices horizontally (column wise)
+   vstack - Stack sparse matrices vertically (row wise)
+   rand - Random values in a given shape
+   random - Random values in a given shape
+
+Save and load sparse matrices:
+
+.. autosummary::
+   :toctree: generated/
+
+   save_npz - Save a sparse matrix to a file using ``.npz`` format.
+   load_npz - Load a sparse matrix from a file using ``.npz`` format.
+
+Sparse matrix tools:
+
+.. autosummary::
+   :toctree: generated/
+
+   find
+
+Identifying sparse matrices:
+
+.. autosummary::
+   :toctree: generated/
+
+   issparse
+   isspmatrix
+   isspmatrix_csc
+   isspmatrix_csr
+   isspmatrix_bsr
+   isspmatrix_lil
+   isspmatrix_dok
+   isspmatrix_coo
+   isspmatrix_dia
+
+Submodules
+----------
+
+.. autosummary::
+
+   csgraph - Compressed sparse graph routines
+   linalg - sparse linear algebra routines
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   SparseEfficiencyWarning
+   SparseWarning
+
+
+Usage information
+=================
+
+There are seven available sparse matrix types:
+
+    1. csc_matrix: Compressed Sparse Column format
+    2. csr_matrix: Compressed Sparse Row format
+    3. bsr_matrix: Block Sparse Row format
+    4. lil_matrix: List of Lists format
+    5. dok_matrix: Dictionary of Keys format
+    6. coo_matrix: COOrdinate format (aka IJV, triplet format)
+    7. dia_matrix: DIAgonal format
+
+To construct a matrix efficiently, use either dok_matrix or lil_matrix.
+The lil_matrix class supports basic slicing and fancy indexing with a
+similar syntax to NumPy arrays. As illustrated below, the COO format
+may also be used to efficiently construct matrices. Despite their
+similarity to NumPy arrays, it is **strongly discouraged** to use NumPy
+functions directly on these matrices because NumPy may not properly convert
+them for computations, leading to unexpected (and incorrect) results. If you
+do want to apply a NumPy function to these matrices, first check if SciPy has
+its own implementation for the given sparse matrix class, or **convert the
+sparse matrix to a NumPy array** (e.g., using the `toarray()` method of the
+class) first before applying the method.
+
+To perform manipulations such as multiplication or inversion, first
+convert the matrix to either CSC or CSR format. The lil_matrix format is
+row-based, so conversion to CSR is efficient, whereas conversion to CSC
+is less so.
+
+All conversions among the CSR, CSC, and COO formats are efficient,
+linear-time operations.
+
+Matrix vector product
+---------------------
+To do a vector product between a sparse matrix and a vector simply use
+the matrix `dot` method, as described in its docstring:
+
+>>> import numpy as np
+>>> from scipy.sparse import csr_matrix
+>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
+>>> v = np.array([1, 0, -1])
+>>> A.dot(v)
+array([ 1, -3, -1], dtype=int64)
+
+.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices,
+  therefore using it will result on unexpected results or errors.
+  The corresponding dense array should be obtained first instead:
+
+  >>> np.dot(A.toarray(), v)
+  array([ 1, -3, -1], dtype=int64)
+
+  but then all the performance advantages would be lost.
+
+The CSR format is specially suitable for fast matrix vector products.
+
+Example 1
+---------
+Construct a 1000x1000 lil_matrix and add some values to it:
+
+>>> from scipy.sparse import lil_matrix
+>>> from scipy.sparse.linalg import spsolve
+>>> from numpy.linalg import solve, norm
+>>> from numpy.random import rand
+
+>>> A = lil_matrix((1000, 1000))
+>>> A[0, :100] = rand(100)
+>>> A[1, 100:200] = A[0, :100]
+>>> A.setdiag(rand(1000))
+
+Now convert it to CSR format and solve A x = b for x:
+
+>>> A = A.tocsr()
+>>> b = rand(1000)
+>>> x = spsolve(A, b)
+
+Convert it to a dense matrix and solve, and check that the result
+is the same:
+
+>>> x_ = solve(A.toarray(), b)
+
+Now we can compute norm of the error with:
+
+>>> err = norm(x-x_)
+>>> err < 1e-10
+True
+
+It should be small :)
+
+
+Example 2
+---------
+
+Construct a matrix in COO format:
+
+>>> from scipy import sparse
+>>> from numpy import array
+>>> I = array([0,3,1,0])
+>>> J = array([0,3,1,2])
+>>> V = array([4,5,7,9])
+>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4))
+
+Notice that the indices do not need to be sorted.
+
+Duplicate (i,j) entries are summed when converting to CSR or CSC.
+
+>>> I = array([0,0,1,3,1,0,0])
+>>> J = array([0,2,1,3,1,0,0])
+>>> V = array([1,1,1,1,1,1,1])
+>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr()
+
+This is useful for constructing finite-element stiffness and mass matrices.
+
+Further details
+---------------
+
+CSR column indices are not necessarily sorted. Likewise for CSC row
+indices. Use the .sorted_indices() and .sort_indices() methods when
+sorted indices are required (e.g., when passing data to other libraries).
+
+"""
+
+# Original code by Travis Oliphant.
+# Modified and extended by Ed Schofield, Robert Cimrman,
+# Nathan Bell, and Jake Vanderplas.
+
+import warnings as _warnings
+
+from ._base import *
+from ._csr import *
+from ._csc import *
+from ._lil import *
+from ._dok import *
+from ._coo import *
+from ._dia import *
+from ._bsr import *
+from ._construct import *
+from ._extract import *
+from ._matrix_io import *
+
+from ._arrays import (
+    csr_array, csc_array, lil_array, dok_array, coo_array, dia_array, bsr_array
+)
+
+# For backward compatibility with v0.19.
+from . import csgraph
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import (
+    base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract,
+    lil, sparsetools, sputils
+)
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+# Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15
+_warnings.filterwarnings('ignore', message='the matrix subclass is not the recommended way')
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_arrays.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_arrays.py
new file mode 100644
index 00000000..5234339b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_arrays.py
@@ -0,0 +1,98 @@
+from ._bsr import bsr_matrix
+from ._coo import coo_matrix
+from ._csc import csc_matrix
+from ._csr import csr_matrix
+from ._dia import dia_matrix
+from ._dok import dok_matrix
+from ._lil import lil_matrix
+
+
+class _sparray:
+    """This class provides a base class for all sparse arrays.
+
+    It cannot be instantiated.  Most of the work is provided by subclasses.
+    """
+    _is_array = True
+
+    @property
+    def _bsr_container(self):
+        return bsr_array
+
+    @property
+    def _coo_container(self):
+        return coo_array
+
+    @property
+    def _csc_container(self):
+        return csc_array
+
+    @property
+    def _csr_container(self):
+        return csr_array
+
+    @property
+    def _dia_container(self):
+        return dia_array
+
+    @property
+    def _dok_container(self):
+        return dok_array
+
+    @property
+    def _lil_container(self):
+        return lil_array
+
+    # Restore elementwise multiplication
+    def __mul__(self, *args, **kwargs):
+        return self.multiply(*args, **kwargs)
+
+    def __rmul__(self, *args, **kwargs):
+        return self.multiply(*args, **kwargs)
+
+    # Restore elementwise power
+    def __pow__(self, *args, **kwargs):
+        return self.power(*args, **kwargs)
+
+
+def _matrix_doc_to_array(docstr):
+    # For opimized builds with stripped docstrings
+    if docstr is None:
+        return None
+    return docstr.replace('matrix', 'array').replace('matrices', 'arrays')
+
+
+class bsr_array(_sparray, bsr_matrix):
+    pass
+
+
+class coo_array(_sparray, coo_matrix):
+    pass
+
+
+class csc_array(_sparray, csc_matrix):
+    pass
+
+
+class csr_array(_sparray, csr_matrix):
+    pass
+
+
+class dia_array(_sparray, dia_matrix):
+    pass
+
+
+class dok_array(_sparray, dok_matrix):
+    pass
+
+
+class lil_array(_sparray, lil_matrix):
+    pass
+
+
+bsr_array.__doc__ = _matrix_doc_to_array(bsr_matrix.__doc__)
+coo_array.__doc__ = _matrix_doc_to_array(coo_matrix.__doc__)
+csc_array.__doc__ = _matrix_doc_to_array(csc_matrix.__doc__)
+csr_array.__doc__ = _matrix_doc_to_array(csr_matrix.__doc__)
+dia_array.__doc__ = _matrix_doc_to_array(dia_matrix.__doc__)
+dok_array.__doc__ = _matrix_doc_to_array(dok_matrix.__doc__)
+lil_array.__doc__ = _matrix_doc_to_array(lil_matrix.__doc__)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_base.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_base.py
new file mode 100644
index 00000000..74215fb5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_base.py
@@ -0,0 +1,1331 @@
+"""Base class for sparse matrices"""
+from warnings import warn
+
+import numpy as np
+
+from ._sputils import (asmatrix, check_reshape_kwargs, check_shape,
+                       get_sum_dtype, isdense, isintlike, isscalarlike,
+                       matrix, validateaxis)
+
+__all__ = ['spmatrix', 'isspmatrix', 'issparse',
+           'SparseWarning', 'SparseEfficiencyWarning']
+
+
+class SparseWarning(Warning):
+    pass
+
+
+class SparseFormatWarning(SparseWarning):
+    pass
+
+
+class SparseEfficiencyWarning(SparseWarning):
+    pass
+
+
+# The formats that we might potentially understand.
+_formats = {'csc': [0, "Compressed Sparse Column"],
+            'csr': [1, "Compressed Sparse Row"],
+            'dok': [2, "Dictionary Of Keys"],
+            'lil': [3, "List of Lists"],
+            'dod': [4, "Dictionary of Dictionaries"],
+            'sss': [5, "Symmetric Sparse Skyline"],
+            'coo': [6, "COOrdinate"],
+            'lba': [7, "Linpack BAnded"],
+            'egd': [8, "Ellpack-itpack Generalized Diagonal"],
+            'dia': [9, "DIAgonal"],
+            'bsr': [10, "Block Sparse Row"],
+            'msr': [11, "Modified compressed Sparse Row"],
+            'bsc': [12, "Block Sparse Column"],
+            'msc': [13, "Modified compressed Sparse Column"],
+            'ssk': [14, "Symmetric SKyline"],
+            'nsk': [15, "Nonsymmetric SKyline"],
+            'jad': [16, "JAgged Diagonal"],
+            'uss': [17, "Unsymmetric Sparse Skyline"],
+            'vbr': [18, "Variable Block Row"],
+            'und': [19, "Undefined"]
+            }
+
+
+# These univariate ufuncs preserve zeros.
+_ufuncs_with_fixed_point_at_zero = frozenset([
+        np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,
+        np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,
+        np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])
+
+
+MAXPRINT = 50
+
+
+class spmatrix:
+    """ This class provides a base class for all sparse matrices.  It
+    cannot be instantiated.  Most of the work is provided by subclasses.
+    """
+
+    __array_priority__ = 10.1
+    ndim = 2
+
+    @property
+    def _bsr_container(self):
+        from ._bsr import bsr_matrix
+        return bsr_matrix
+
+    @property
+    def _coo_container(self):
+        from ._coo import coo_matrix
+        return coo_matrix
+
+    @property
+    def _csc_container(self):
+        from ._csc import csc_matrix
+        return csc_matrix
+
+    @property
+    def _csr_container(self):
+        from ._csr import csr_matrix
+        return csr_matrix
+
+    @property
+    def _dia_container(self):
+        from ._dia import dia_matrix
+        return dia_matrix
+
+    @property
+    def _dok_container(self):
+        from ._dok import dok_matrix
+        return dok_matrix
+
+    @property
+    def _lil_container(self):
+        from ._lil import lil_matrix
+        return lil_matrix
+
+    _is_array = False
+
+    def __init__(self, maxprint=MAXPRINT):
+        self._shape = None
+        if self.__class__.__name__ == 'spmatrix':
+            raise ValueError("This class is not intended"
+                             " to be instantiated directly.")
+        self.maxprint = maxprint
+
+    def set_shape(self, shape):
+        """See `reshape`."""
+        # Make sure copy is False since this is in place
+        # Make sure format is unchanged because we are doing a __dict__ swap
+        new_matrix = self.reshape(shape, copy=False).asformat(self.format)
+        self.__dict__ = new_matrix.__dict__
+
+    def get_shape(self):
+        """Get shape of a matrix."""
+        return self._shape
+
+    shape = property(fget=get_shape, fset=set_shape)
+
+    def reshape(self, *args, **kwargs):
+        """reshape(self, shape, order='C', copy=False)
+
+        Gives a new shape to a sparse matrix without changing its data.
+
+        Parameters
+        ----------
+        shape : length-2 tuple of ints
+            The new shape should be compatible with the original shape.
+        order : {'C', 'F'}, optional
+            Read the elements using this index order. 'C' means to read and
+            write the elements using C-like index order; e.g., read entire first
+            row, then second row, etc. 'F' means to read and write the elements
+            using Fortran-like index order; e.g., read entire first column, then
+            second column, etc.
+        copy : bool, optional
+            Indicates whether or not attributes of self should be copied
+            whenever possible. The degree to which attributes are copied varies
+            depending on the type of sparse matrix being used.
+
+        Returns
+        -------
+        reshaped_matrix : sparse matrix
+            A sparse matrix with the given `shape`, not necessarily of the same
+            format as the current object.
+
+        See Also
+        --------
+        numpy.matrix.reshape : NumPy's implementation of 'reshape' for
+                               matrices
+        """
+        # If the shape already matches, don't bother doing an actual reshape
+        # Otherwise, the default is to convert to COO and use its reshape
+        shape = check_shape(args, self.shape)
+        order, copy = check_reshape_kwargs(kwargs)
+        if shape == self.shape:
+            if copy:
+                return self.copy()
+            else:
+                return self
+
+        return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
+
+    def resize(self, shape):
+        """Resize the matrix in-place to dimensions given by ``shape``
+
+        Any elements that lie within the new shape will remain at the same
+        indices, while non-zero elements lying outside the new shape are
+        removed.
+
+        Parameters
+        ----------
+        shape : (int, int)
+            number of rows and columns in the new matrix
+
+        Notes
+        -----
+        The semantics are not identical to `numpy.ndarray.resize` or
+        `numpy.resize`. Here, the same data will be maintained at each index
+        before and after reshape, if that index is within the new bounds. In
+        numpy, resizing maintains contiguity of the array, moving elements
+        around in the logical matrix but not within a flattened representation.
+
+        We give no guarantees about whether the underlying data attributes
+        (arrays, etc.) will be modified in place or replaced with new objects.
+        """
+        # As an inplace operation, this requires implementation in each format.
+        raise NotImplementedError(
+            '{}.resize is not implemented'.format(type(self).__name__))
+
+    def astype(self, dtype, casting='unsafe', copy=True):
+        """Cast the matrix elements to a specified type.
+
+        Parameters
+        ----------
+        dtype : string or numpy dtype
+            Typecode or data-type to which to cast the data.
+        casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+            Controls what kind of data casting may occur.
+            Defaults to 'unsafe' for backwards compatibility.
+            'no' means the data types should not be cast at all.
+            'equiv' means only byte-order changes are allowed.
+            'safe' means only casts which can preserve values are allowed.
+            'same_kind' means only safe casts or casts within a kind,
+            like float64 to float32, are allowed.
+            'unsafe' means any data conversions may be done.
+        copy : bool, optional
+            If `copy` is `False`, the result might share some memory with this
+            matrix. If `copy` is `True`, it is guaranteed that the result and
+            this matrix do not share any memory.
+        """
+
+        dtype = np.dtype(dtype)
+        if self.dtype != dtype:
+            return self.tocsr().astype(
+                dtype, casting=casting, copy=copy).asformat(self.format)
+        elif copy:
+            return self.copy()
+        else:
+            return self
+
+    @classmethod
+    def _ascontainer(cls, X, **kwargs):
+        if cls._is_array:
+            return np.asarray(X, **kwargs)
+        else:
+            return asmatrix(X, **kwargs)
+
+    @classmethod
+    def _container(cls, X, **kwargs):
+        if cls._is_array:
+            return np.array(X, **kwargs)
+        else:
+            return matrix(X, **kwargs)
+
+    def asfptype(self):
+        """Upcast matrix to a floating point format (if necessary)"""
+
+        fp_types = ['f', 'd', 'F', 'D']
+
+        if self.dtype.char in fp_types:
+            return self
+        else:
+            for fp_type in fp_types:
+                if self.dtype <= np.dtype(fp_type):
+                    return self.astype(fp_type)
+
+            raise TypeError('cannot upcast [%s] to a floating '
+                            'point format' % self.dtype.name)
+
+    def __iter__(self):
+        for r in range(self.shape[0]):
+            yield self[r, :]
+
+    def getmaxprint(self):
+        """Maximum number of elements to display when printed."""
+        return self.maxprint
+
+    def count_nonzero(self):
+        """Number of non-zero entries, equivalent to
+
+        np.count_nonzero(a.toarray())
+
+        Unlike getnnz() and the nnz property, which return the number of stored
+        entries (the length of the data attribute), this method counts the
+        actual number of non-zero entries in data.
+        """
+        raise NotImplementedError("count_nonzero not implemented for %s." %
+                                  self.__class__.__name__)
+
+    def getnnz(self, axis=None):
+        """Number of stored values, including explicit zeros.
+
+        Parameters
+        ----------
+        axis : None, 0, or 1
+            Select between the number of values across the whole matrix, in
+            each column, or in each row.
+
+        See also
+        --------
+        count_nonzero : Number of non-zero entries
+        """
+        raise NotImplementedError("getnnz not implemented for %s." %
+                                  self.__class__.__name__)
+
+    @property
+    def nnz(self):
+        """Number of stored values, including explicit zeros.
+
+        See also
+        --------
+        count_nonzero : Number of non-zero entries
+        """
+        return self.getnnz()
+
+    def getformat(self):
+        """Format of a matrix representation as a string."""
+        return getattr(self, 'format', 'und')
+
+    def __repr__(self):
+        _, format_name = _formats[self.getformat()]
+        sparse_cls = 'array' if self._is_array else 'matrix'
+        return f"<%dx%d sparse {sparse_cls} of type '%s'\n" \
+               "\twith %d stored elements in %s format>" % \
+               (self.shape + (self.dtype.type, self.nnz, format_name))
+
+    def __str__(self):
+        maxprint = self.getmaxprint()
+
+        A = self.tocoo()
+
+        # helper function, outputs "(i,j)  v"
+        def tostr(row, col, data):
+            triples = zip(list(zip(row, col)), data)
+            return '\n'.join([('  %s\t%s' % t) for t in triples])
+
+        if self.nnz > maxprint:
+            half = maxprint // 2
+            out = tostr(A.row[:half], A.col[:half], A.data[:half])
+            out += "\n  :\t:\n"
+            half = maxprint - maxprint//2
+            out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
+        else:
+            out = tostr(A.row, A.col, A.data)
+
+        return out
+
+    def __bool__(self):  # Simple -- other ideas?
+        if self.shape == (1, 1):
+            return self.nnz != 0
+        else:
+            raise ValueError("The truth value of an array with more than one "
+                             "element is ambiguous. Use a.any() or a.all().")
+    __nonzero__ = __bool__
+
+    # What should len(sparse) return? For consistency with dense matrices,
+    # perhaps it should be the number of rows?  But for some uses the number of
+    # non-zeros is more important.  For now, raise an exception!
+    def __len__(self):
+        raise TypeError("sparse matrix length is ambiguous; use getnnz()"
+                        " or shape[0]")
+
+    def asformat(self, format, copy=False):
+        """Return this matrix in the passed format.
+
+        Parameters
+        ----------
+        format : {str, None}
+            The desired matrix format ("csr", "csc", "lil", "dok", "array", ...)
+            or None for no conversion.
+        copy : bool, optional
+            If True, the result is guaranteed to not share data with self.
+
+        Returns
+        -------
+        A : This matrix in the passed format.
+        """
+        if format is None or format == self.format:
+            if copy:
+                return self.copy()
+            else:
+                return self
+        else:
+            try:
+                convert_method = getattr(self, 'to' + format)
+            except AttributeError as e:
+                raise ValueError('Format {} is unknown.'.format(format)) from e
+
+            # Forward the copy kwarg, if it's accepted.
+            try:
+                return convert_method(copy=copy)
+            except TypeError:
+                return convert_method()
+
+    ###################################################################
+    #  NOTE: All arithmetic operations use csr_matrix by default.
+    # Therefore a new sparse matrix format just needs to define a
+    # .tocsr() method to provide arithmetic support. Any of these
+    # methods can be overridden for efficiency.
+    ####################################################################
+
+    def multiply(self, other):
+        """Point-wise multiplication by another matrix
+        """
+        return self.tocsr().multiply(other)
+
+    def maximum(self, other):
+        """Element-wise maximum between this and another matrix."""
+        return self.tocsr().maximum(other)
+
+    def minimum(self, other):
+        """Element-wise minimum between this and another matrix."""
+        return self.tocsr().minimum(other)
+
+    def dot(self, other):
+        """Ordinary dot product
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy.sparse import csr_matrix
+        >>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
+        >>> v = np.array([1, 0, -1])
+        >>> A.dot(v)
+        array([ 1, -3, -1], dtype=int64)
+
+        """
+        if np.isscalar(other):
+            return self * other
+        else:
+            return self @ other
+
+    def power(self, n, dtype=None):
+        """Element-wise power."""
+        return self.tocsr().power(n, dtype=dtype)
+
+    def __eq__(self, other):
+        return self.tocsr().__eq__(other)
+
+    def __ne__(self, other):
+        return self.tocsr().__ne__(other)
+
+    def __lt__(self, other):
+        return self.tocsr().__lt__(other)
+
+    def __gt__(self, other):
+        return self.tocsr().__gt__(other)
+
+    def __le__(self, other):
+        return self.tocsr().__le__(other)
+
+    def __ge__(self, other):
+        return self.tocsr().__ge__(other)
+
+    def __abs__(self):
+        return abs(self.tocsr())
+
+    def __round__(self, ndigits=0):
+        return round(self.tocsr(), ndigits=ndigits)
+
+    def _add_sparse(self, other):
+        return self.tocsr()._add_sparse(other)
+
+    def _add_dense(self, other):
+        return self.tocoo()._add_dense(other)
+
+    def _sub_sparse(self, other):
+        return self.tocsr()._sub_sparse(other)
+
+    def _sub_dense(self, other):
+        return self.todense() - other
+
+    def _rsub_dense(self, other):
+        # note: this can't be replaced by other + (-self) for unsigned types
+        return other - self.todense()
+
+    def __add__(self, other):  # self + other
+        if isscalarlike(other):
+            if other == 0:
+                return self.copy()
+            # Now we would add this scalar to every element.
+            raise NotImplementedError('adding a nonzero scalar to a '
+                                      'sparse matrix is not supported')
+        elif isspmatrix(other):
+            if other.shape != self.shape:
+                raise ValueError("inconsistent shapes")
+            return self._add_sparse(other)
+        elif isdense(other):
+            other = np.broadcast_to(other, self.shape)
+            return self._add_dense(other)
+        else:
+            return NotImplemented
+
+    def __radd__(self,other):  # other + self
+        return self.__add__(other)
+
+    def __sub__(self, other):  # self - other
+        if isscalarlike(other):
+            if other == 0:
+                return self.copy()
+            raise NotImplementedError('subtracting a nonzero scalar from a '
+                                      'sparse matrix is not supported')
+        elif isspmatrix(other):
+            if other.shape != self.shape:
+                raise ValueError("inconsistent shapes")
+            return self._sub_sparse(other)
+        elif isdense(other):
+            other = np.broadcast_to(other, self.shape)
+            return self._sub_dense(other)
+        else:
+            return NotImplemented
+
+    def __rsub__(self,other):  # other - self
+        if isscalarlike(other):
+            if other == 0:
+                return -self.copy()
+            raise NotImplementedError('subtracting a sparse matrix from a '
+                                      'nonzero scalar is not supported')
+        elif isdense(other):
+            other = np.broadcast_to(other, self.shape)
+            return self._rsub_dense(other)
+        else:
+            return NotImplemented
+
+    def _mul_dispatch(self, other):
+        """`np.matrix`-compatible mul, i.e. `dot` or `NotImplemented`
+
+        interpret other and call one of the following
+        self._mul_scalar()
+        self._mul_vector()
+        self._mul_multivector()
+        self._mul_sparse_matrix()
+        """
+        # This method has to be different from `__mul__` because it is also
+        # called by sparse array classes via matmul, while their mul is
+        # elementwise.
+
+        M, N = self.shape
+
+        if other.__class__ is np.ndarray:
+            # Fast path for the most common case
+            if other.shape == (N,):
+                return self._mul_vector(other)
+            elif other.shape == (N, 1):
+                return self._mul_vector(other.ravel()).reshape(M, 1)
+            elif other.ndim == 2 and other.shape[0] == N:
+                return self._mul_multivector(other)
+
+        if isscalarlike(other):
+            # scalar value
+            return self._mul_scalar(other)
+
+        if issparse(other):
+            if self.shape[1] != other.shape[0]:
+                raise ValueError('dimension mismatch')
+            return self._mul_sparse_matrix(other)
+
+        # If it's a list or whatever, treat it like a matrix
+        other_a = np.asanyarray(other)
+
+        if other_a.ndim == 0 and other_a.dtype == np.object_:
+            # Not interpretable as an array; return NotImplemented so that
+            # other's __rmul__ can kick in if that's implemented.
+            return NotImplemented
+
+        try:
+            other.shape
+        except AttributeError:
+            other = other_a
+
+        if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
+            # dense row or column vector
+            if other.shape != (N,) and other.shape != (N, 1):
+                raise ValueError('dimension mismatch')
+
+            result = self._mul_vector(np.ravel(other))
+
+            if isinstance(other, np.matrix):
+                result = self._ascontainer(result)
+
+            if other.ndim == 2 and other.shape[1] == 1:
+                # If 'other' was an (nx1) column vector, reshape the result
+                result = result.reshape(-1, 1)
+
+            return result
+
+        elif other.ndim == 2:
+            ##
+            # dense 2D array or matrix ("multivector")
+
+            if other.shape[0] != self.shape[1]:
+                raise ValueError('dimension mismatch')
+
+            result = self._mul_multivector(np.asarray(other))
+
+            if isinstance(other, np.matrix):
+                result = self._ascontainer(result)
+
+            return result
+
+        else:
+            raise ValueError('could not interpret dimensions')
+
+    def __mul__(self, other):
+        return self._mul_dispatch(other)
+
+    # by default, use CSR for __mul__ handlers
+    def _mul_scalar(self, other):
+        return self.tocsr()._mul_scalar(other)
+
+    def _mul_vector(self, other):
+        return self.tocsr()._mul_vector(other)
+
+    def _mul_multivector(self, other):
+        return self.tocsr()._mul_multivector(other)
+
+    def _mul_sparse_matrix(self, other):
+        return self.tocsr()._mul_sparse_matrix(other)
+
+    def _rmul_dispatch(self, other):
+        if isscalarlike(other):
+            return self._mul_scalar(other)
+        else:
+            # Don't use asarray unless we have to
+            try:
+                tr = other.transpose()
+            except AttributeError:
+                tr = np.asarray(other).transpose()
+            ret = self.transpose()._mul_dispatch(tr)
+            if ret is NotImplemented:
+                return NotImplemented
+            return ret.transpose()
+
+    def __rmul__(self, other):  # other * self
+        return self._rmul_dispatch(other)
+
+    #######################
+    # matmul (@) operator #
+    #######################
+
+    def __matmul__(self, other):
+        if isscalarlike(other):
+            raise ValueError("Scalar operands are not allowed, "
+                             "use '*' instead")
+        return self._mul_dispatch(other)
+
+    def __rmatmul__(self, other):
+        if isscalarlike(other):
+            raise ValueError("Scalar operands are not allowed, "
+                             "use '*' instead")
+        return self._rmul_dispatch(other)
+
+    ####################
+    # Other Arithmetic #
+    ####################
+
+    def _divide(self, other, true_divide=False, rdivide=False):
+        if isscalarlike(other):
+            if rdivide:
+                if true_divide:
+                    return np.true_divide(other, self.todense())
+                else:
+                    return np.divide(other, self.todense())
+
+            if true_divide and np.can_cast(self.dtype, np.float_):
+                return self.astype(np.float_)._mul_scalar(1./other)
+            else:
+                r = self._mul_scalar(1./other)
+
+                scalar_dtype = np.asarray(other).dtype
+                if (np.issubdtype(self.dtype, np.integer) and
+                        np.issubdtype(scalar_dtype, np.integer)):
+                    return r.astype(self.dtype)
+                else:
+                    return r
+
+        elif isdense(other):
+            if not rdivide:
+                if true_divide:
+                    return np.true_divide(self.todense(), other)
+                else:
+                    return np.divide(self.todense(), other)
+            else:
+                if true_divide:
+                    return np.true_divide(other, self.todense())
+                else:
+                    return np.divide(other, self.todense())
+        elif isspmatrix(other):
+            if rdivide:
+                return other._divide(self, true_divide, rdivide=False)
+
+            self_csr = self.tocsr()
+            if true_divide and np.can_cast(self.dtype, np.float_):
+                return self_csr.astype(np.float_)._divide_sparse(other)
+            else:
+                return self_csr._divide_sparse(other)
+        else:
+            return NotImplemented
+
+    def __truediv__(self, other):
+        return self._divide(other, true_divide=True)
+
+    def __div__(self, other):
+        # Always do true division
+        return self._divide(other, true_divide=True)
+
+    def __rtruediv__(self, other):
+        # Implementing this as the inverse would be too magical -- bail out
+        return NotImplemented
+
+    def __rdiv__(self, other):
+        # Implementing this as the inverse would be too magical -- bail out
+        return NotImplemented
+
+    def __neg__(self):
+        return -self.tocsr()
+
+    def __iadd__(self, other):
+        return NotImplemented
+
+    def __isub__(self, other):
+        return NotImplemented
+
+    def __imul__(self, other):
+        return NotImplemented
+
+    def __idiv__(self, other):
+        return self.__itruediv__(other)
+
+    def __itruediv__(self, other):
+        return NotImplemented
+
+    def __pow__(self, other):
+        M, N = self.shape[0], self.shape[1]
+        if M != N:
+            raise TypeError('matrix is not square')
+
+        if isintlike(other):
+            other = int(other)
+            if other < 0:
+                raise ValueError('exponent must be >= 0')
+
+            if other == 0:
+                from ._construct import eye
+                E = eye(M, dtype=self.dtype)
+                if self._is_array:
+                    from ._arrays import dia_array
+                    E = dia_array(E)
+                return E
+
+            elif other == 1:
+                return self.copy()
+            else:
+                tmp = self.__pow__(other//2)
+                if (other % 2):
+                    return self @ tmp @ tmp
+                else:
+                    return tmp @ tmp
+        elif isscalarlike(other):
+            raise ValueError('exponent must be an integer')
+        else:
+            return NotImplemented
+
+    def __getattr__(self, attr):
+        if attr == 'A':
+            if self._is_array:
+                warn(np.VisibleDeprecationWarning(
+                    "Please use `.todense()` instead"
+                ))
+            return self.toarray()
+        elif attr == 'T':
+            return self.transpose()
+        elif attr == 'H':
+            if self._is_array:
+                warn(np.VisibleDeprecationWarning(
+                    "Please use `.conj().T` instead"
+                ))
+            return self.getH()
+        elif attr == 'real':
+            return self._real()
+        elif attr == 'imag':
+            return self._imag()
+        elif attr == 'size':
+            return self.getnnz()
+        else:
+            raise AttributeError(attr + " not found")
+
+    def transpose(self, axes=None, copy=False):
+        """
+        Reverses the dimensions of the sparse matrix.
+
+        Parameters
+        ----------
+        axes : None, optional
+            This argument is in the signature *solely* for NumPy
+            compatibility reasons. Do not pass in anything except
+            for the default value.
+        copy : bool, optional
+            Indicates whether or not attributes of `self` should be
+            copied whenever possible. The degree to which attributes
+            are copied varies depending on the type of sparse matrix
+            being used.
+
+        Returns
+        -------
+        p : `self` with the dimensions reversed.
+
+        See Also
+        --------
+        numpy.matrix.transpose : NumPy's implementation of 'transpose'
+                                 for matrices
+        """
+        return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
+
+    def conj(self, copy=True):
+        """Element-wise complex conjugation.
+
+        If the matrix is of non-complex data type and `copy` is False,
+        this method does nothing and the data is not copied.
+
+        Parameters
+        ----------
+        copy : bool, optional
+            If True, the result is guaranteed to not share data with self.
+
+        Returns
+        -------
+        A : The element-wise complex conjugate.
+
+        """
+        if np.issubdtype(self.dtype, np.complexfloating):
+            return self.tocsr(copy=copy).conj(copy=False)
+        elif copy:
+            return self.copy()
+        else:
+            return self
+
+    def conjugate(self, copy=True):
+        return self.conj(copy=copy)
+
+    conjugate.__doc__ = conj.__doc__
+
+    # Renamed conjtranspose() -> getH() for compatibility with dense matrices
+    def getH(self):
+        """Return the Hermitian transpose of this matrix.
+
+        See Also
+        --------
+        numpy.matrix.getH : NumPy's implementation of `getH` for matrices
+        """
+        return self.transpose().conj()
+
+    def _real(self):
+        return self.tocsr()._real()
+
+    def _imag(self):
+        return self.tocsr()._imag()
+
+    def nonzero(self):
+        """nonzero indices
+
+        Returns a tuple of arrays (row,col) containing the indices
+        of the non-zero elements of the matrix.
+
+        Examples
+        --------
+        >>> from scipy.sparse import csr_matrix
+        >>> A = csr_matrix([[1,2,0],[0,0,3],[4,0,5]])
+        >>> A.nonzero()
+        (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
+
+        """
+
+        # convert to COOrdinate format
+        A = self.tocoo()
+        nz_mask = A.data != 0
+        return (A.row[nz_mask], A.col[nz_mask])
+
+    def getcol(self, j):
+        """Returns a copy of column j of the matrix, as an (m x 1) sparse
+        matrix (column vector).
+        """
+        # Spmatrix subclasses should override this method for efficiency.
+        # Post-multiply by a (n x 1) column vector 'a' containing all zeros
+        # except for a_j = 1
+        n = self.shape[1]
+        if j < 0:
+            j += n
+        if j < 0 or j >= n:
+            raise IndexError("index out of bounds")
+        col_selector = self._csc_container(([1], [[j], [0]]),
+                                           shape=(n, 1), dtype=self.dtype)
+        return self @ col_selector
+
+    def getrow(self, i):
+        """Returns a copy of row i of the matrix, as a (1 x n) sparse
+        matrix (row vector).
+        """
+        # Spmatrix subclasses should override this method for efficiency.
+        # Pre-multiply by a (1 x m) row vector 'a' containing all zeros
+        # except for a_i = 1
+        m = self.shape[0]
+        if i < 0:
+            i += m
+        if i < 0 or i >= m:
+            raise IndexError("index out of bounds")
+        row_selector = self._csr_container(([1], [[0], [i]]),
+                                           shape=(1, m), dtype=self.dtype)
+        return row_selector @ self
+
+    # The following dunder methods cannot be implemented.
+    #
+    # def __array__(self):
+    #     # Sparse matrices rely on NumPy wrapping them in object arrays under
+    #     # the hood to make unary ufuncs work on them. So we cannot raise
+    #     # TypeError here - which would be handy to not give users object
+    #     # arrays they probably don't want (they're looking for `.toarray()`).
+    #     #
+    #     # Conversion with `toarray()` would also break things because of the
+    #     # behavior discussed above, plus we want to avoid densification by
+    #     # accident because that can too easily blow up memory.
+    #
+    # def __array_ufunc__(self):
+    #     # We cannot implement __array_ufunc__ due to mismatching semantics.
+    #     # See gh-7707 and gh-7349 for details.
+    #
+    # def __array_function__(self):
+    #     # We cannot implement __array_function__ due to mismatching semantics.
+    #     # See gh-10362 for details.
+
+    def todense(self, order=None, out=None):
+        """
+        Return a dense matrix representation of this matrix.
+
+        Parameters
+        ----------
+        order : {'C', 'F'}, optional
+            Whether to store multi-dimensional data in C (row-major)
+            or Fortran (column-major) order in memory. The default
+            is 'None', which provides no ordering guarantees.
+            Cannot be specified in conjunction with the `out`
+            argument.
+
+        out : ndarray, 2-D, optional
+            If specified, uses this array (or `numpy.matrix`) as the
+            output buffer instead of allocating a new array to
+            return. The provided array must have the same shape and
+            dtype as the sparse matrix on which you are calling the
+            method.
+
+        Returns
+        -------
+        arr : numpy.matrix, 2-D
+            A NumPy matrix object with the same shape and containing
+            the same data represented by the sparse matrix, with the
+            requested memory order. If `out` was passed and was an
+            array (rather than a `numpy.matrix`), it will be filled
+            with the appropriate values and returned wrapped in a
+            `numpy.matrix` object that shares the same memory.
+        """
+        return self._ascontainer(self.toarray(order=order, out=out))
+
+    def toarray(self, order=None, out=None):
+        """
+        Return a dense ndarray representation of this matrix.
+
+        Parameters
+        ----------
+        order : {'C', 'F'}, optional
+            Whether to store multidimensional data in C (row-major)
+            or Fortran (column-major) order in memory. The default
+            is 'None', which provides no ordering guarantees.
+            Cannot be specified in conjunction with the `out`
+            argument.
+
+        out : ndarray, 2-D, optional
+            If specified, uses this array as the output buffer
+            instead of allocating a new array to return. The provided
+            array must have the same shape and dtype as the sparse
+            matrix on which you are calling the method. For most
+            sparse types, `out` is required to be memory contiguous
+            (either C or Fortran ordered).
+
+        Returns
+        -------
+        arr : ndarray, 2-D
+            An array with the same shape and containing the same
+            data represented by the sparse matrix, with the requested
+            memory order. If `out` was passed, the same object is
+            returned after being modified in-place to contain the
+            appropriate values.
+        """
+        return self.tocoo(copy=False).toarray(order=order, out=out)
+
+    # Any sparse matrix format deriving from spmatrix must define one of
+    # tocsr or tocoo. The other conversion methods may be implemented for
+    # efficiency, but are not required.
+    def tocsr(self, copy=False):
+        """Convert this matrix to Compressed Sparse Row format.
+
+        With copy=False, the data/indices may be shared between this matrix and
+        the resultant csr_matrix.
+        """
+        return self.tocoo(copy=copy).tocsr(copy=False)
+
+    def todok(self, copy=False):
+        """Convert this matrix to Dictionary Of Keys format.
+
+        With copy=False, the data/indices may be shared between this matrix and
+        the resultant dok_matrix.
+        """
+        return self.tocoo(copy=copy).todok(copy=False)
+
+    def tocoo(self, copy=False):
+        """Convert this matrix to COOrdinate format.
+
+        With copy=False, the data/indices may be shared between this matrix and
+        the resultant coo_matrix.
+        """
+        return self.tocsr(copy=False).tocoo(copy=copy)
+
+    def tolil(self, copy=False):
+        """Convert this matrix to List of Lists format.
+
+        With copy=False, the data/indices may be shared between this matrix and
+        the resultant lil_matrix.
+        """
+        return self.tocsr(copy=False).tolil(copy=copy)
+
+    def todia(self, copy=False):
+        """Convert this matrix to sparse DIAgonal format.
+
+        With copy=False, the data/indices may be shared between this matrix and
+        the resultant dia_matrix.
+        """
+        return self.tocoo(copy=copy).todia(copy=False)
+
+    def tobsr(self, blocksize=None, copy=False):
+        """Convert this matrix to Block Sparse Row format.
+
+        With copy=False, the data/indices may be shared between this matrix and
+        the resultant bsr_matrix.
+
+        When blocksize=(R, C) is provided, it will be used for construction of
+        the bsr_matrix.
+        """
+        return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
+
+    def tocsc(self, copy=False):
+        """Convert this matrix to Compressed Sparse Column format.
+
+        With copy=False, the data/indices may be shared between this matrix and
+        the resultant csc_matrix.
+        """
+        return self.tocsr(copy=copy).tocsc(copy=False)
+
+    def copy(self):
+        """Returns a copy of this matrix.
+
+        No data/indices will be shared between the returned value and current
+        matrix.
+        """
+        return self.__class__(self, copy=True)
+
+    def sum(self, axis=None, dtype=None, out=None):
+        """
+        Sum the matrix elements over a given axis.
+
+        Parameters
+        ----------
+        axis : {-2, -1, 0, 1, None} optional
+            Axis along which the sum is computed. The default is to
+            compute the sum of all the matrix elements, returning a scalar
+            (i.e., `axis` = `None`).
+        dtype : dtype, optional
+            The type of the returned matrix and of the accumulator in which
+            the elements are summed.  The dtype of `a` is used by default
+            unless `a` has an integer dtype of less precision than the default
+            platform integer.  In that case, if `a` is signed then the platform
+            integer is used while if `a` is unsigned then an unsigned integer
+            of the same precision as the platform integer is used.
+
+            .. versionadded:: 0.18.0
+
+        out : np.matrix, optional
+            Alternative output matrix in which to place the result. It must
+            have the same shape as the expected output, but the type of the
+            output values will be cast if necessary.
+
+            .. versionadded:: 0.18.0
+
+        Returns
+        -------
+        sum_along_axis : np.matrix
+            A matrix with the same shape as `self`, with the specified
+            axis removed.
+
+        See Also
+        --------
+        numpy.matrix.sum : NumPy's implementation of 'sum' for matrices
+
+        """
+        validateaxis(axis)
+
+        # We use multiplication by a matrix of ones to achieve this.
+        # For some sparse matrix formats more efficient methods are
+        # possible -- these should override this function.
+        m, n = self.shape
+
+        # Mimic numpy's casting.
+        res_dtype = get_sum_dtype(self.dtype)
+
+        if axis is None:
+            # sum over rows and columns
+            return (
+                self @ self._ascontainer(np.ones((n, 1), dtype=res_dtype))
+            ).sum(dtype=dtype, out=out)
+
+        if axis < 0:
+            axis += 2
+
+        # axis = 0 or 1 now
+        if axis == 0:
+            # sum over columns
+            ret = self._ascontainer(
+                np.ones((1, m), dtype=res_dtype)
+            ) @ self
+        else:
+            # sum over rows
+            ret = self @ self._ascontainer(
+                np.ones((n, 1), dtype=res_dtype)
+            )
+
+        if out is not None and out.shape != ret.shape:
+            raise ValueError("dimensions do not match")
+
+        return ret.sum(axis=axis, dtype=dtype, out=out)
+
+    def mean(self, axis=None, dtype=None, out=None):
+        """
+        Compute the arithmetic mean along the specified axis.
+
+        Returns the average of the matrix elements. The average is taken
+        over all elements in the matrix by default, otherwise over the
+        specified axis. `float64` intermediate and return values are used
+        for integer inputs.
+
+        Parameters
+        ----------
+        axis : {-2, -1, 0, 1, None} optional
+            Axis along which the mean is computed. The default is to compute
+            the mean of all elements in the matrix (i.e., `axis` = `None`).
+        dtype : data-type, optional
+            Type to use in computing the mean. For integer inputs, the default
+            is `float64`; for floating point inputs, it is the same as the
+            input dtype.
+
+            .. versionadded:: 0.18.0
+
+        out : np.matrix, optional
+            Alternative output matrix in which to place the result. It must
+            have the same shape as the expected output, but the type of the
+            output values will be cast if necessary.
+
+            .. versionadded:: 0.18.0
+
+        Returns
+        -------
+        m : np.matrix
+
+        See Also
+        --------
+        numpy.matrix.mean : NumPy's implementation of 'mean' for matrices
+
+        """
+        def _is_integral(dtype):
+            return (np.issubdtype(dtype, np.integer) or
+                    np.issubdtype(dtype, np.bool_))
+
+        validateaxis(axis)
+
+        res_dtype = self.dtype.type
+        integral = _is_integral(self.dtype)
+
+        # output dtype
+        if dtype is None:
+            if integral:
+                res_dtype = np.float64
+        else:
+            res_dtype = np.dtype(dtype).type
+
+        # intermediate dtype for summation
+        inter_dtype = np.float64 if integral else res_dtype
+        inter_self = self.astype(inter_dtype)
+
+        if axis is None:
+            return (inter_self / np.array(
+                self.shape[0] * self.shape[1]))\
+                .sum(dtype=res_dtype, out=out)
+
+        if axis < 0:
+            axis += 2
+
+        # axis = 0 or 1 now
+        if axis == 0:
+            return (inter_self * (1.0 / self.shape[0])).sum(
+                axis=0, dtype=res_dtype, out=out)
+        else:
+            return (inter_self * (1.0 / self.shape[1])).sum(
+                axis=1, dtype=res_dtype, out=out)
+
+    def diagonal(self, k=0):
+        """Returns the kth diagonal of the matrix.
+
+        Parameters
+        ----------
+        k : int, optional
+            Which diagonal to get, corresponding to elements a[i, i+k].
+            Default: 0 (the main diagonal).
+
+            .. versionadded:: 1.0
+
+        See also
+        --------
+        numpy.diagonal : Equivalent numpy function.
+
+        Examples
+        --------
+        >>> from scipy.sparse import csr_matrix
+        >>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
+        >>> A.diagonal()
+        array([1, 0, 5])
+        >>> A.diagonal(k=1)
+        array([2, 3])
+        """
+        return self.tocsr().diagonal(k=k)
+
+    def trace(self, offset=0):
+        """Returns the sum along diagonals of the sparse matrix.
+
+        Parameters
+        ----------
+        offset : int, optional
+            Which diagonal to get, corresponding to elements a[i, i+offset].
+            Default: 0 (the main diagonal).
+
+        """
+        return self.diagonal(k=offset).sum()
+
+    def setdiag(self, values, k=0):
+        """
+        Set diagonal or off-diagonal elements of the array.
+
+        Parameters
+        ----------
+        values : array_like
+            New values of the diagonal elements.
+
+            Values may have any length. If the diagonal is longer than values,
+            then the remaining diagonal entries will not be set. If values are
+            longer than the diagonal, then the remaining values are ignored.
+
+            If a scalar value is given, all of the diagonal is set to it.
+
+        k : int, optional
+            Which off-diagonal to set, corresponding to elements a[i,i+k].
+            Default: 0 (the main diagonal).
+
+        """
+        M, N = self.shape
+        if (k > 0 and k >= N) or (k < 0 and -k >= M):
+            raise ValueError("k exceeds matrix dimensions")
+        self._setdiag(np.asarray(values), k)
+
+    def _setdiag(self, values, k):
+        M, N = self.shape
+        if k < 0:
+            if values.ndim == 0:
+                # broadcast
+                max_index = min(M+k, N)
+                for i in range(max_index):
+                    self[i - k, i] = values
+            else:
+                max_index = min(M+k, N, len(values))
+                if max_index <= 0:
+                    return
+                for i, v in enumerate(values[:max_index]):
+                    self[i - k, i] = v
+        else:
+            if values.ndim == 0:
+                # broadcast
+                max_index = min(M, N-k)
+                for i in range(max_index):
+                    self[i, i + k] = values
+            else:
+                max_index = min(M, N-k, len(values))
+                if max_index <= 0:
+                    return
+                for i, v in enumerate(values[:max_index]):
+                    self[i, i + k] = v
+
+    def _process_toarray_args(self, order, out):
+        if out is not None:
+            if order is not None:
+                raise ValueError('order cannot be specified if out '
+                                 'is not None')
+            if out.shape != self.shape or out.dtype != self.dtype:
+                raise ValueError('out array must be same dtype and shape as '
+                                 'sparse matrix')
+            out[...] = 0.
+            return out
+        else:
+            return np.zeros(self.shape, dtype=self.dtype, order=order)
+
+
+def isspmatrix(x):
+    """Is x of a sparse matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a sparse matrix
+
+    Returns
+    -------
+    bool
+        True if x is a sparse matrix, False otherwise
+
+    Notes
+    -----
+    issparse and isspmatrix are aliases for the same function.
+
+    Examples
+    --------
+    >>> from scipy.sparse import csr_matrix, isspmatrix
+    >>> isspmatrix(csr_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import isspmatrix
+    >>> isspmatrix(5)
+    False
+    """
+    return isinstance(x, spmatrix)
+
+
+issparse = isspmatrix
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_bsr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_bsr.py
new file mode 100644
index 00000000..8f9838bc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_bsr.py
@@ -0,0 +1,721 @@
+"""Compressed Block Sparse Row matrix format"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['bsr_matrix', 'isspmatrix_bsr']
+
+from warnings import warn
+
+import numpy as np
+
+from ._data import _data_matrix, _minmax_mixin
+from ._compressed import _cs_matrix
+from ._base import isspmatrix, _formats, spmatrix
+from ._sputils import (isshape, getdtype, getdata, to_native, upcast,
+                       get_index_dtype, check_shape)
+from . import _sparsetools
+from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,
+                           bsr_matmat, bsr_transpose, bsr_sort_indices,
+                           bsr_tocsr)
+
+
+class bsr_matrix(_cs_matrix, _minmax_mixin):
+    """Block Sparse Row matrix
+
+    This can be instantiated in several ways:
+        bsr_matrix(D, [blocksize=(R,C)])
+            where D is a dense matrix or 2-D ndarray.
+
+        bsr_matrix(S, [blocksize=(R,C)])
+            with another sparse matrix S (equivalent to S.tobsr())
+
+        bsr_matrix((M, N), [blocksize=(R,C), dtype])
+            to construct an empty matrix with shape (M, N)
+            dtype is optional, defaulting to dtype='d'.
+
+        bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
+            where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
+
+        bsr_matrix((data, indices, indptr), [shape=(M, N)])
+            is the standard BSR representation where the block column
+            indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
+            and their corresponding block values are stored in
+            ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
+            supplied, the matrix dimensions are inferred from the index arrays.
+
+    Attributes
+    ----------
+    dtype : dtype
+        Data type of the matrix
+    shape : 2-tuple
+        Shape of the matrix
+    ndim : int
+        Number of dimensions (this is always 2)
+    nnz
+        Number of stored values, including explicit zeros
+    data
+        Data array of the matrix
+    indices
+        BSR format index array
+    indptr
+        BSR format index pointer array
+    blocksize
+        Block size of the matrix
+    has_sorted_indices
+        Whether indices are sorted
+
+    Notes
+    -----
+    Sparse matrices can be used in arithmetic operations: they support
+    addition, subtraction, multiplication, division, and matrix power.
+
+    **Summary of BSR format**
+
+    The Block Compressed Row (BSR) format is very similar to the Compressed
+    Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
+    sub matrices like the last example below.  Block matrices often arise in
+    vector-valued finite element discretizations. In such cases, BSR is
+    considerably more efficient than CSR and CSC for many sparse arithmetic
+    operations.
+
+    **Blocksize**
+
+    The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
+    That is, R and C must satisfy the relationship ``M % R = 0`` and
+    ``N % C = 0``.
+
+    If no blocksize is specified, a simple heuristic is applied to determine
+    an appropriate blocksize.
+
+    Examples
+    --------
+    >>> from scipy.sparse import bsr_matrix
+    >>> import numpy as np
+    >>> bsr_matrix((3, 4), dtype=np.int8).toarray()
+    array([[0, 0, 0, 0],
+           [0, 0, 0, 0],
+           [0, 0, 0, 0]], dtype=int8)
+
+    >>> row = np.array([0, 0, 1, 2, 2, 2])
+    >>> col = np.array([0, 2, 2, 0, 1, 2])
+    >>> data = np.array([1, 2, 3 ,4, 5, 6])
+    >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
+    array([[1, 0, 2],
+           [0, 0, 3],
+           [4, 5, 6]])
+
+    >>> indptr = np.array([0, 2, 3, 6])
+    >>> indices = np.array([0, 2, 2, 0, 1, 2])
+    >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
+    >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
+    array([[1, 1, 0, 0, 2, 2],
+           [1, 1, 0, 0, 2, 2],
+           [0, 0, 0, 0, 3, 3],
+           [0, 0, 0, 0, 3, 3],
+           [4, 4, 5, 5, 6, 6],
+           [4, 4, 5, 5, 6, 6]])
+
+    """
+    format = 'bsr'
+
+    def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
+        _data_matrix.__init__(self)
+
+        if isspmatrix(arg1):
+            if isspmatrix_bsr(arg1) and copy:
+                arg1 = arg1.copy()
+            else:
+                arg1 = arg1.tobsr(blocksize=blocksize)
+            self._set_self(arg1)
+
+        elif isinstance(arg1,tuple):
+            if isshape(arg1):
+                # it's a tuple of matrix dimensions (M,N)
+                self._shape = check_shape(arg1)
+                M,N = self.shape
+                # process blocksize
+                if blocksize is None:
+                    blocksize = (1,1)
+                else:
+                    if not isshape(blocksize):
+                        raise ValueError('invalid blocksize=%s' % blocksize)
+                    blocksize = tuple(blocksize)
+                self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
+
+                R,C = blocksize
+                if (M % R) != 0 or (N % C) != 0:
+                    raise ValueError('shape must be multiple of blocksize')
+
+                # Select index dtype large enough to pass array and
+                # scalar parameters to sparsetools
+                idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))
+                self.indices = np.zeros(0, dtype=idx_dtype)
+                self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
+
+            elif len(arg1) == 2:
+                # (data,(row,col)) format
+                self._set_self(
+                    self._coo_container(arg1, dtype=dtype, shape=shape).tobsr(
+                        blocksize=blocksize
+                    )
+                )
+
+            elif len(arg1) == 3:
+                # (data,indices,indptr) format
+                (data, indices, indptr) = arg1
+
+                # Select index dtype large enough to pass array and
+                # scalar parameters to sparsetools
+                maxval = 1
+                if shape is not None:
+                    maxval = max(shape)
+                if blocksize is not None:
+                    maxval = max(maxval, max(blocksize))
+                idx_dtype = get_index_dtype((indices, indptr), maxval=maxval,
+                                            check_contents=True)
+                self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
+                self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
+                self.data = getdata(data, copy=copy, dtype=dtype)
+                if self.data.ndim != 3:
+                    raise ValueError(
+                        'BSR data must be 3-dimensional, got shape=%s' % (
+                            self.data.shape,))
+                if blocksize is not None:
+                    if not isshape(blocksize):
+                        raise ValueError('invalid blocksize=%s' % (blocksize,))
+                    if tuple(blocksize) != self.data.shape[1:]:
+                        raise ValueError('mismatching blocksize=%s vs %s' % (
+                            blocksize, self.data.shape[1:]))
+            else:
+                raise ValueError('unrecognized bsr_matrix constructor usage')
+        else:
+            # must be dense
+            try:
+                arg1 = np.asarray(arg1)
+            except Exception as e:
+                raise ValueError("unrecognized form for"
+                        " %s_matrix constructor" % self.format) from e
+            arg1 = self._coo_container(
+                arg1, dtype=dtype
+            ).tobsr(blocksize=blocksize)
+            self._set_self(arg1)
+
+        if shape is not None:
+            self._shape = check_shape(shape)
+        else:
+            if self.shape is None:
+                # shape not already set, try to infer dimensions
+                try:
+                    M = len(self.indptr) - 1
+                    N = self.indices.max() + 1
+                except Exception as e:
+                    raise ValueError('unable to infer matrix dimensions') from e
+                else:
+                    R,C = self.blocksize
+                    self._shape = check_shape((M*R,N*C))
+
+        if self.shape is None:
+            if shape is None:
+                # TODO infer shape here
+                raise ValueError('need to infer shape')
+            else:
+                self._shape = check_shape(shape)
+
+        if dtype is not None:
+            self.data = self.data.astype(dtype, copy=False)
+
+        self.check_format(full_check=False)
+
+    def check_format(self, full_check=True):
+        """check whether the matrix format is valid
+
+            *Parameters*:
+                full_check:
+                    True  - rigorous check, O(N) operations : default
+                    False - basic check, O(1) operations
+
+        """
+        M,N = self.shape
+        R,C = self.blocksize
+
+        # index arrays should have integer data types
+        if self.indptr.dtype.kind != 'i':
+            warn("indptr array has non-integer dtype (%s)"
+                    % self.indptr.dtype.name)
+        if self.indices.dtype.kind != 'i':
+            warn("indices array has non-integer dtype (%s)"
+                    % self.indices.dtype.name)
+
+        idx_dtype = get_index_dtype((self.indices, self.indptr))
+        self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
+        self.indices = np.asarray(self.indices, dtype=idx_dtype)
+        self.data = to_native(self.data)
+
+        # check array shapes
+        if self.indices.ndim != 1 or self.indptr.ndim != 1:
+            raise ValueError("indices, and indptr should be 1-D")
+        if self.data.ndim != 3:
+            raise ValueError("data should be 3-D")
+
+        # check index pointer
+        if (len(self.indptr) != M//R + 1):
+            raise ValueError("index pointer size (%d) should be (%d)" %
+                                (len(self.indptr), M//R + 1))
+        if (self.indptr[0] != 0):
+            raise ValueError("index pointer should start with 0")
+
+        # check index and data arrays
+        if (len(self.indices) != len(self.data)):
+            raise ValueError("indices and data should have the same size")
+        if (self.indptr[-1] > len(self.indices)):
+            raise ValueError("Last value of index pointer should be less than "
+                                "the size of index and data arrays")
+
+        self.prune()
+
+        if full_check:
+            # check format validity (more expensive)
+            if self.nnz > 0:
+                if self.indices.max() >= N//C:
+                    raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
+                if self.indices.min() < 0:
+                    raise ValueError("column index values must be >= 0")
+                if np.diff(self.indptr).min() < 0:
+                    raise ValueError("index pointer values must form a "
+                                        "non-decreasing sequence")
+
+        # if not self.has_sorted_indices():
+        #    warn('Indices were not in sorted order. Sorting indices.')
+        #    self.sort_indices(check_first=False)
+
+    def _get_blocksize(self):
+        return self.data.shape[1:]
+    blocksize = property(fget=_get_blocksize)
+
+    def getnnz(self, axis=None):
+        if axis is not None:
+            raise NotImplementedError("getnnz over an axis is not implemented "
+                                      "for BSR format")
+        R,C = self.blocksize
+        return int(self.indptr[-1] * R * C)
+
+    getnnz.__doc__ = spmatrix.getnnz.__doc__
+
+    def __repr__(self):
+        format = _formats[self.getformat()][1]
+        return ("<%dx%d sparse matrix of type '%s'\n"
+                "\twith %d stored elements (blocksize = %dx%d) in %s format>" %
+                (self.shape + (self.dtype.type, self.nnz) + self.blocksize +
+                 (format,)))
+
+    def diagonal(self, k=0):
+        rows, cols = self.shape
+        if k <= -rows or k >= cols:
+            return np.empty(0, dtype=self.data.dtype)
+        R, C = self.blocksize
+        y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
+                     dtype=upcast(self.dtype))
+        _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
+                                  self.indptr, self.indices,
+                                  np.ravel(self.data), y)
+        return y
+
+    diagonal.__doc__ = spmatrix.diagonal.__doc__
+
+    ##########################
+    # NotImplemented methods #
+    ##########################
+
+    def __getitem__(self,key):
+        raise NotImplementedError
+
+    def __setitem__(self,key,val):
+        raise NotImplementedError
+
+    ######################
+    # Arithmetic methods #
+    ######################
+
+    def _add_dense(self, other):
+        return self.tocoo(copy=False)._add_dense(other)
+
+    def _mul_vector(self, other):
+        M,N = self.shape
+        R,C = self.blocksize
+
+        result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
+
+        bsr_matvec(M//R, N//C, R, C,
+            self.indptr, self.indices, self.data.ravel(),
+            other, result)
+
+        return result
+
+    def _mul_multivector(self,other):
+        R,C = self.blocksize
+        M,N = self.shape
+        n_vecs = other.shape[1]  # number of column vectors
+
+        result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
+
+        bsr_matvecs(M//R, N//C, n_vecs, R, C,
+                self.indptr, self.indices, self.data.ravel(),
+                other.ravel(), result.ravel())
+
+        return result
+
+    def _mul_sparse_matrix(self, other):
+        M, K1 = self.shape
+        K2, N = other.shape
+
+        R,n = self.blocksize
+
+        # convert to this format
+        if isspmatrix_bsr(other):
+            C = other.blocksize[1]
+        else:
+            C = 1
+
+        from ._csr import isspmatrix_csr
+
+        if isspmatrix_csr(other) and n == 1:
+            other = other.tobsr(blocksize=(n,C), copy=False)  # lightweight conversion
+        else:
+            other = other.tobsr(blocksize=(n,C))
+
+        idx_dtype = get_index_dtype((self.indptr, self.indices,
+                                     other.indptr, other.indices))
+
+        bnnz = csr_matmat_maxnnz(M//R, N//C,
+                                 self.indptr.astype(idx_dtype),
+                                 self.indices.astype(idx_dtype),
+                                 other.indptr.astype(idx_dtype),
+                                 other.indices.astype(idx_dtype))
+
+        idx_dtype = get_index_dtype((self.indptr, self.indices,
+                                     other.indptr, other.indices),
+                                    maxval=bnnz)
+        indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
+        indices = np.empty(bnnz, dtype=idx_dtype)
+        data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
+
+        bsr_matmat(bnnz, M//R, N//C, R, C, n,
+                   self.indptr.astype(idx_dtype),
+                   self.indices.astype(idx_dtype),
+                   np.ravel(self.data),
+                   other.indptr.astype(idx_dtype),
+                   other.indices.astype(idx_dtype),
+                   np.ravel(other.data),
+                   indptr,
+                   indices,
+                   data)
+
+        data = data.reshape(-1,R,C)
+
+        # TODO eliminate zeros
+
+        return self._bsr_container(
+            (data, indices, indptr), shape=(M, N), blocksize=(R, C)
+        )
+
+    ######################
+    # Conversion methods #
+    ######################
+
+    def tobsr(self, blocksize=None, copy=False):
+        """Convert this matrix into Block Sparse Row Format.
+
+        With copy=False, the data/indices may be shared between this
+        matrix and the resultant bsr_matrix.
+
+        If blocksize=(R, C) is provided, it will be used for determining
+        block size of the bsr_matrix.
+        """
+        if blocksize not in [None, self.blocksize]:
+            return self.tocsr().tobsr(blocksize=blocksize)
+        if copy:
+            return self.copy()
+        else:
+            return self
+
+    def tocsr(self, copy=False):
+        M, N = self.shape
+        R, C = self.blocksize
+        nnz = self.nnz
+        idx_dtype = get_index_dtype((self.indptr, self.indices),
+                                    maxval=max(nnz, N))
+        indptr = np.empty(M + 1, dtype=idx_dtype)
+        indices = np.empty(nnz, dtype=idx_dtype)
+        data = np.empty(nnz, dtype=upcast(self.dtype))
+
+        bsr_tocsr(M // R,  # n_brow
+                  N // C,  # n_bcol
+                  R, C,
+                  self.indptr.astype(idx_dtype, copy=False),
+                  self.indices.astype(idx_dtype, copy=False),
+                  self.data,
+                  indptr,
+                  indices,
+                  data)
+        return self._csr_container((data, indices, indptr), shape=self.shape)
+
+    tocsr.__doc__ = spmatrix.tocsr.__doc__
+
+    def tocsc(self, copy=False):
+        return self.tocsr(copy=False).tocsc(copy=copy)
+
+    tocsc.__doc__ = spmatrix.tocsc.__doc__
+
+    def tocoo(self, copy=True):
+        """Convert this matrix to COOrdinate format.
+
+        When copy=False the data array will be shared between
+        this matrix and the resultant coo_matrix.
+        """
+
+        M,N = self.shape
+        R,C = self.blocksize
+
+        indptr_diff = np.diff(self.indptr)
+        if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
+            # Check for potential overflow
+            indptr_diff_limited = indptr_diff.astype(np.intp)
+            if np.any(indptr_diff_limited != indptr_diff):
+                raise ValueError("Matrix too big to convert")
+            indptr_diff = indptr_diff_limited
+
+        row = (R * np.arange(M//R)).repeat(indptr_diff)
+        row = row.repeat(R*C).reshape(-1,R,C)
+        row += np.tile(np.arange(R).reshape(-1,1), (1,C))
+        row = row.reshape(-1)
+
+        col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
+        col += np.tile(np.arange(C), (R,1))
+        col = col.reshape(-1)
+
+        data = self.data.reshape(-1)
+
+        if copy:
+            data = data.copy()
+
+        return self._coo_container(
+            (data, (row, col)), shape=self.shape
+        )
+
+    def toarray(self, order=None, out=None):
+        return self.tocoo(copy=False).toarray(order=order, out=out)
+
+    toarray.__doc__ = spmatrix.toarray.__doc__
+
+    def transpose(self, axes=None, copy=False):
+        if axes is not None:
+            raise ValueError(("Sparse matrices do not support "
+                              "an 'axes' parameter because swapping "
+                              "dimensions is the only logical permutation."))
+
+        R, C = self.blocksize
+        M, N = self.shape
+        NBLK = self.nnz//(R*C)
+
+        if self.nnz == 0:
+            return self._bsr_container((N, M), blocksize=(C, R),
+                                       dtype=self.dtype, copy=copy)
+
+        indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
+        indices = np.empty(NBLK, dtype=self.indices.dtype)
+        data = np.empty((NBLK, C, R), dtype=self.data.dtype)
+
+        bsr_transpose(M//R, N//C, R, C,
+                      self.indptr, self.indices, self.data.ravel(),
+                      indptr, indices, data.ravel())
+
+        return self._bsr_container((data, indices, indptr),
+                                   shape=(N, M), copy=copy)
+
+    transpose.__doc__ = spmatrix.transpose.__doc__
+
+    ##############################################################
+    # methods that examine or modify the internal data structure #
+    ##############################################################
+
+    def eliminate_zeros(self):
+        """Remove zero elements in-place."""
+
+        if not self.nnz:
+            return  # nothing to do
+
+        R,C = self.blocksize
+        M,N = self.shape
+
+        mask = (self.data != 0).reshape(-1,R*C).sum(axis=1)  # nonzero blocks
+
+        nonzero_blocks = mask.nonzero()[0]
+
+        self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
+
+        # modifies self.indptr and self.indices *in place*
+        _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
+                                         self.indices, mask)
+        self.prune()
+
+    def sum_duplicates(self):
+        """Eliminate duplicate matrix entries by adding them together
+
+        The is an *in place* operation
+        """
+        if self.has_canonical_format:
+            return
+        self.sort_indices()
+        R, C = self.blocksize
+        M, N = self.shape
+
+        # port of _sparsetools.csr_sum_duplicates
+        n_row = M // R
+        nnz = 0
+        row_end = 0
+        for i in range(n_row):
+            jj = row_end
+            row_end = self.indptr[i+1]
+            while jj < row_end:
+                j = self.indices[jj]
+                x = self.data[jj]
+                jj += 1
+                while jj < row_end and self.indices[jj] == j:
+                    x += self.data[jj]
+                    jj += 1
+                self.indices[nnz] = j
+                self.data[nnz] = x
+                nnz += 1
+            self.indptr[i+1] = nnz
+
+        self.prune()  # nnz may have changed
+        self.has_canonical_format = True
+
+    def sort_indices(self):
+        """Sort the indices of this matrix *in place*
+        """
+        if self.has_sorted_indices:
+            return
+
+        R,C = self.blocksize
+        M,N = self.shape
+
+        bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
+
+        self.has_sorted_indices = True
+
+    def prune(self):
+        """ Remove empty space after all non-zero elements.
+        """
+
+        R,C = self.blocksize
+        M,N = self.shape
+
+        if len(self.indptr) != M//R + 1:
+            raise ValueError("index pointer has invalid length")
+
+        bnnz = self.indptr[-1]
+
+        if len(self.indices) < bnnz:
+            raise ValueError("indices array has too few elements")
+        if len(self.data) < bnnz:
+            raise ValueError("data array has too few elements")
+
+        self.data = self.data[:bnnz]
+        self.indices = self.indices[:bnnz]
+
+    # utility functions
+    def _binopt(self, other, op, in_shape=None, out_shape=None):
+        """Apply the binary operation fn to two sparse matrices."""
+
+        # Ideally we'd take the GCDs of the blocksize dimensions
+        # and explode self and other to match.
+        other = self.__class__(other, blocksize=self.blocksize)
+
+        # e.g. bsr_plus_bsr, etc.
+        fn = getattr(_sparsetools, self.format + op + self.format)
+
+        R,C = self.blocksize
+
+        max_bnnz = len(self.data) + len(other.data)
+        idx_dtype = get_index_dtype((self.indptr, self.indices,
+                                     other.indptr, other.indices),
+                                    maxval=max_bnnz)
+        indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
+        indices = np.empty(max_bnnz, dtype=idx_dtype)
+
+        bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
+        if op in bool_ops:
+            data = np.empty(R*C*max_bnnz, dtype=np.bool_)
+        else:
+            data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
+
+        fn(self.shape[0]//R, self.shape[1]//C, R, C,
+           self.indptr.astype(idx_dtype),
+           self.indices.astype(idx_dtype),
+           self.data,
+           other.indptr.astype(idx_dtype),
+           other.indices.astype(idx_dtype),
+           np.ravel(other.data),
+           indptr,
+           indices,
+           data)
+
+        actual_bnnz = indptr[-1]
+        indices = indices[:actual_bnnz]
+        data = data[:R*C*actual_bnnz]
+
+        if actual_bnnz < max_bnnz/2:
+            indices = indices.copy()
+            data = data.copy()
+
+        data = data.reshape(-1,R,C)
+
+        return self.__class__((data, indices, indptr), shape=self.shape)
+
+    # needed by _data_matrix
+    def _with_data(self,data,copy=True):
+        """Returns a matrix with the same sparsity structure as self,
+        but with different data.  By default the structure arrays
+        (i.e. .indptr and .indices) are copied.
+        """
+        if copy:
+            return self.__class__((data,self.indices.copy(),self.indptr.copy()),
+                                   shape=self.shape,dtype=data.dtype)
+        else:
+            return self.__class__((data,self.indices,self.indptr),
+                                   shape=self.shape,dtype=data.dtype)
+
+#    # these functions are used by the parent class
+#    # to remove redudancy between bsc_matrix and bsr_matrix
+#    def _swap(self,x):
+#        """swap the members of x if this is a column-oriented matrix
+#        """
+#        return (x[0],x[1])
+
+
+def isspmatrix_bsr(x):
+    """Is x of a bsr_matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a bsr matrix
+
+    Returns
+    -------
+    bool
+        True if x is a bsr matrix, False otherwise
+
+    Examples
+    --------
+    >>> from scipy.sparse import bsr_matrix, isspmatrix_bsr
+    >>> isspmatrix_bsr(bsr_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr
+    >>> isspmatrix_bsr(csr_matrix([[5]]))
+    False
+    """
+    from ._arrays import bsr_array
+    return isinstance(x, bsr_matrix) or isinstance(x, bsr_array)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_compressed.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_compressed.py
new file mode 100644
index 00000000..a0d2532a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_compressed.py
@@ -0,0 +1,1318 @@
+"""Base class for sparse matrix formats using compressed storage."""
+__all__ = []
+
+from warnings import warn
+import operator
+
+import numpy as np
+from scipy._lib._util import _prune_array
+
+from ._base import spmatrix, isspmatrix, SparseEfficiencyWarning
+from ._data import _data_matrix, _minmax_mixin
+from . import _sparsetools
+from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
+                           csr_sample_values, csr_row_index, csr_row_slice,
+                           csr_column_index1, csr_column_index2)
+from ._index import IndexMixin
+from ._sputils import (upcast, upcast_char, to_native, isdense, isshape,
+                       getdtype, isscalarlike, isintlike, get_index_dtype,
+                       downcast_intp_index, get_sum_dtype, check_shape,
+                       is_pydata_spmatrix)
+
+
+class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
+    """base matrix class for compressed row- and column-oriented matrices"""
+
+    def __init__(self, arg1, shape=None, dtype=None, copy=False):
+        _data_matrix.__init__(self)
+
+        if isspmatrix(arg1):
+            if arg1.format == self.format and copy:
+                arg1 = arg1.copy()
+            else:
+                arg1 = arg1.asformat(self.format)
+            self._set_self(arg1)
+
+        elif isinstance(arg1, tuple):
+            if isshape(arg1):
+                # It's a tuple of matrix dimensions (M, N)
+                # create empty matrix
+                self._shape = check_shape(arg1)
+                M, N = self.shape
+                # Select index dtype large enough to pass array and
+                # scalar parameters to sparsetools
+                idx_dtype = get_index_dtype(maxval=max(M, N))
+                self.data = np.zeros(0, getdtype(dtype, default=float))
+                self.indices = np.zeros(0, idx_dtype)
+                self.indptr = np.zeros(self._swap((M, N))[0] + 1,
+                                       dtype=idx_dtype)
+            else:
+                if len(arg1) == 2:
+                    # (data, ij) format
+                    other = self.__class__(
+                        self._coo_container(arg1, shape=shape, dtype=dtype)
+                    )
+                    self._set_self(other)
+                elif len(arg1) == 3:
+                    # (data, indices, indptr) format
+                    (data, indices, indptr) = arg1
+
+                    # Select index dtype large enough to pass array and
+                    # scalar parameters to sparsetools
+                    maxval = None
+                    if shape is not None:
+                        maxval = max(shape)
+                    idx_dtype = get_index_dtype((indices, indptr),
+                                                maxval=maxval,
+                                                check_contents=True)
+
+                    self.indices = np.array(indices, copy=copy,
+                                            dtype=idx_dtype)
+                    self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
+                    self.data = np.array(data, copy=copy, dtype=dtype)
+                else:
+                    raise ValueError("unrecognized {}_matrix "
+                                     "constructor usage".format(self.format))
+
+        else:
+            # must be dense
+            try:
+                arg1 = np.asarray(arg1)
+            except Exception as e:
+                raise ValueError("unrecognized {}_matrix constructor usage"
+                                 "".format(self.format)) from e
+            self._set_self(self.__class__(
+                self._coo_container(arg1, dtype=dtype)
+            ))
+
+        # Read matrix dimensions given, if any
+        if shape is not None:
+            self._shape = check_shape(shape)
+        else:
+            if self.shape is None:
+                # shape not already set, try to infer dimensions
+                try:
+                    major_dim = len(self.indptr) - 1
+                    minor_dim = self.indices.max() + 1
+                except Exception as e:
+                    raise ValueError('unable to infer matrix dimensions') from e
+                else:
+                    self._shape = check_shape(self._swap((major_dim,
+                                                          minor_dim)))
+
+        if dtype is not None:
+            self.data = self.data.astype(dtype, copy=False)
+
+        self.check_format(full_check=False)
+
+    def getnnz(self, axis=None):
+        if axis is None:
+            return int(self.indptr[-1])
+        else:
+            if axis < 0:
+                axis += 2
+            axis, _ = self._swap((axis, 1 - axis))
+            _, N = self._swap(self.shape)
+            if axis == 0:
+                return np.bincount(downcast_intp_index(self.indices),
+                                   minlength=N)
+            elif axis == 1:
+                return np.diff(self.indptr)
+            raise ValueError('axis out of bounds')
+
+    getnnz.__doc__ = spmatrix.getnnz.__doc__
+
+    def _set_self(self, other, copy=False):
+        """take the member variables of other and assign them to self"""
+
+        if copy:
+            other = other.copy()
+
+        self.data = other.data
+        self.indices = other.indices
+        self.indptr = other.indptr
+        self._shape = check_shape(other.shape)
+
+    def check_format(self, full_check=True):
+        """check whether the matrix format is valid
+
+        Parameters
+        ----------
+        full_check : bool, optional
+            If `True`, rigorous check, O(N) operations. Otherwise
+            basic check, O(1) operations (default True).
+        """
+        # use _swap to determine proper bounds
+        major_name, minor_name = self._swap(('row', 'column'))
+        major_dim, minor_dim = self._swap(self.shape)
+
+        # index arrays should have integer data types
+        if self.indptr.dtype.kind != 'i':
+            warn("indptr array has non-integer dtype ({})"
+                 "".format(self.indptr.dtype.name), stacklevel=3)
+        if self.indices.dtype.kind != 'i':
+            warn("indices array has non-integer dtype ({})"
+                 "".format(self.indices.dtype.name), stacklevel=3)
+
+        idx_dtype = get_index_dtype((self.indptr, self.indices))
+        self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
+        self.indices = np.asarray(self.indices, dtype=idx_dtype)
+        self.data = to_native(self.data)
+
+        # check array shapes
+        for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
+            if x != 1:
+                raise ValueError('data, indices, and indptr should be 1-D')
+
+        # check index pointer
+        if (len(self.indptr) != major_dim + 1):
+            raise ValueError("index pointer size ({}) should be ({})"
+                             "".format(len(self.indptr), major_dim + 1))
+        if (self.indptr[0] != 0):
+            raise ValueError("index pointer should start with 0")
+
+        # check index and data arrays
+        if (len(self.indices) != len(self.data)):
+            raise ValueError("indices and data should have the same size")
+        if (self.indptr[-1] > len(self.indices)):
+            raise ValueError("Last value of index pointer should be less than "
+                             "the size of index and data arrays")
+
+        self.prune()
+
+        if full_check:
+            # check format validity (more expensive)
+            if self.nnz > 0:
+                if self.indices.max() >= minor_dim:
+                    raise ValueError("{} index values must be < {}"
+                                     "".format(minor_name, minor_dim))
+                if self.indices.min() < 0:
+                    raise ValueError("{} index values must be >= 0"
+                                     "".format(minor_name))
+                if np.diff(self.indptr).min() < 0:
+                    raise ValueError("index pointer values must form a "
+                                     "non-decreasing sequence")
+
+        # if not self.has_sorted_indices():
+        #    warn('Indices were not in sorted order.  Sorting indices.')
+        #    self.sort_indices()
+        #    assert(self.has_sorted_indices())
+        # TODO check for duplicates?
+
+    #######################
+    # Boolean comparisons #
+    #######################
+
+    def _scalar_binopt(self, other, op):
+        """Scalar version of self._binopt, for cases in which no new nonzeros
+        are added. Produces a new spmatrix in canonical form.
+        """
+        self.sum_duplicates()
+        res = self._with_data(op(self.data, other), copy=True)
+        res.eliminate_zeros()
+        return res
+
+    def __eq__(self, other):
+        # Scalar other.
+        if isscalarlike(other):
+            if np.isnan(other):
+                return self.__class__(self.shape, dtype=np.bool_)
+
+            if other == 0:
+                warn("Comparing a sparse matrix with 0 using == is inefficient"
+                     ", try using != instead.", SparseEfficiencyWarning,
+                     stacklevel=3)
+                all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
+                inv = self._scalar_binopt(other, operator.ne)
+                return all_true - inv
+            else:
+                return self._scalar_binopt(other, operator.eq)
+        # Dense other.
+        elif isdense(other):
+            return self.todense() == other
+        # Pydata sparse other.
+        elif is_pydata_spmatrix(other):
+            return NotImplemented
+        # Sparse other.
+        elif isspmatrix(other):
+            warn("Comparing sparse matrices using == is inefficient, try using"
+                 " != instead.", SparseEfficiencyWarning, stacklevel=3)
+            # TODO sparse broadcasting
+            if self.shape != other.shape:
+                return False
+            elif self.format != other.format:
+                other = other.asformat(self.format)
+            res = self._binopt(other, '_ne_')
+            all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
+            return all_true - res
+        else:
+            return False
+
+    def __ne__(self, other):
+        # Scalar other.
+        if isscalarlike(other):
+            if np.isnan(other):
+                warn("Comparing a sparse matrix with nan using != is"
+                     " inefficient", SparseEfficiencyWarning, stacklevel=3)
+                all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
+                return all_true
+            elif other != 0:
+                warn("Comparing a sparse matrix with a nonzero scalar using !="
+                     " is inefficient, try using == instead.",
+                     SparseEfficiencyWarning, stacklevel=3)
+                all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
+                inv = self._scalar_binopt(other, operator.eq)
+                return all_true - inv
+            else:
+                return self._scalar_binopt(other, operator.ne)
+        # Dense other.
+        elif isdense(other):
+            return self.todense() != other
+        # Pydata sparse other.
+        elif is_pydata_spmatrix(other):
+            return NotImplemented
+        # Sparse other.
+        elif isspmatrix(other):
+            # TODO sparse broadcasting
+            if self.shape != other.shape:
+                return True
+            elif self.format != other.format:
+                other = other.asformat(self.format)
+            return self._binopt(other, '_ne_')
+        else:
+            return True
+
+    def _inequality(self, other, op, op_name, bad_scalar_msg):
+        # Scalar other.
+        if isscalarlike(other):
+            if 0 == other and op_name in ('_le_', '_ge_'):
+                raise NotImplementedError(" >= and <= don't work with 0.")
+            elif op(0, other):
+                warn(bad_scalar_msg, SparseEfficiencyWarning)
+                other_arr = np.empty(self.shape, dtype=np.result_type(other))
+                other_arr.fill(other)
+                other_arr = self.__class__(other_arr)
+                return self._binopt(other_arr, op_name)
+            else:
+                return self._scalar_binopt(other, op)
+        # Dense other.
+        elif isdense(other):
+            return op(self.todense(), other)
+        # Sparse other.
+        elif isspmatrix(other):
+            # TODO sparse broadcasting
+            if self.shape != other.shape:
+                raise ValueError("inconsistent shapes")
+            elif self.format != other.format:
+                other = other.asformat(self.format)
+            if op_name not in ('_ge_', '_le_'):
+                return self._binopt(other, op_name)
+
+            warn("Comparing sparse matrices using >= and <= is inefficient, "
+                 "using <, >, or !=, instead.", SparseEfficiencyWarning)
+            all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
+            res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
+            return all_true - res
+        else:
+            raise ValueError("Operands could not be compared.")
+
+    def __lt__(self, other):
+        return self._inequality(other, operator.lt, '_lt_',
+                                "Comparing a sparse matrix with a scalar "
+                                "greater than zero using < is inefficient, "
+                                "try using >= instead.")
+
+    def __gt__(self, other):
+        return self._inequality(other, operator.gt, '_gt_',
+                                "Comparing a sparse matrix with a scalar "
+                                "less than zero using > is inefficient, "
+                                "try using <= instead.")
+
+    def __le__(self, other):
+        return self._inequality(other, operator.le, '_le_',
+                                "Comparing a sparse matrix with a scalar "
+                                "greater than zero using <= is inefficient, "
+                                "try using > instead.")
+
+    def __ge__(self, other):
+        return self._inequality(other, operator.ge, '_ge_',
+                                "Comparing a sparse matrix with a scalar "
+                                "less than zero using >= is inefficient, "
+                                "try using < instead.")
+
+    #################################
+    # Arithmetic operator overrides #
+    #################################
+
+    def _add_dense(self, other):
+        if other.shape != self.shape:
+            raise ValueError('Incompatible shapes ({} and {})'
+                             .format(self.shape, other.shape))
+        dtype = upcast_char(self.dtype.char, other.dtype.char)
+        order = self._swap('CF')[0]
+        result = np.array(other, dtype=dtype, order=order, copy=True)
+        M, N = self._swap(self.shape)
+        y = result if result.flags.c_contiguous else result.T
+        csr_todense(M, N, self.indptr, self.indices, self.data, y)
+        return self._container(result, copy=False)
+
+    def _add_sparse(self, other):
+        return self._binopt(other, '_plus_')
+
+    def _sub_sparse(self, other):
+        return self._binopt(other, '_minus_')
+
+    def multiply(self, other):
+        """Point-wise multiplication by another matrix, vector, or
+        scalar.
+        """
+        # Scalar multiplication.
+        if isscalarlike(other):
+            return self._mul_scalar(other)
+        # Sparse matrix or vector.
+        if isspmatrix(other):
+            if self.shape == other.shape:
+                other = self.__class__(other)
+                return self._binopt(other, '_elmul_')
+            # Single element.
+            elif other.shape == (1, 1):
+                return self._mul_scalar(other.toarray()[0, 0])
+            elif self.shape == (1, 1):
+                return other._mul_scalar(self.toarray()[0, 0])
+            # A row times a column.
+            elif self.shape[1] == 1 and other.shape[0] == 1:
+                return self._mul_sparse_matrix(other.tocsc())
+            elif self.shape[0] == 1 and other.shape[1] == 1:
+                return other._mul_sparse_matrix(self.tocsc())
+            # Row vector times matrix. other is a row.
+            elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
+                other = self._dia_container(
+                    (other.toarray().ravel(), [0]),
+                    shape=(other.shape[1], other.shape[1])
+                )
+                return self._mul_sparse_matrix(other)
+            # self is a row.
+            elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
+                copy = self._dia_container(
+                    (self.toarray().ravel(), [0]),
+                    shape=(self.shape[1], self.shape[1])
+                )
+                return other._mul_sparse_matrix(copy)
+            # Column vector times matrix. other is a column.
+            elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
+                other = self._dia_container(
+                    (other.toarray().ravel(), [0]),
+                    shape=(other.shape[0], other.shape[0])
+                )
+                return other._mul_sparse_matrix(self)
+            # self is a column.
+            elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
+                copy = self._dia_container(
+                    (self.toarray().ravel(), [0]),
+                    shape=(self.shape[0], self.shape[0])
+                )
+                return copy._mul_sparse_matrix(other)
+            else:
+                raise ValueError("inconsistent shapes")
+
+        # Assume other is a dense matrix/array, which produces a single-item
+        # object array if other isn't convertible to ndarray.
+        other = np.atleast_2d(other)
+
+        if other.ndim != 2:
+            return np.multiply(self.toarray(), other)
+        # Single element / wrapped object.
+        if other.size == 1:
+            return self._mul_scalar(other.flat[0])
+        # Fast case for trivial sparse matrix.
+        elif self.shape == (1, 1):
+            return np.multiply(self.toarray()[0, 0], other)
+
+        ret = self.tocoo()
+        # Matching shapes.
+        if self.shape == other.shape:
+            data = np.multiply(ret.data, other[ret.row, ret.col])
+        # Sparse row vector times...
+        elif self.shape[0] == 1:
+            if other.shape[1] == 1:  # Dense column vector.
+                data = np.multiply(ret.data, other)
+            elif other.shape[1] == self.shape[1]:  # Dense matrix.
+                data = np.multiply(ret.data, other[:, ret.col])
+            else:
+                raise ValueError("inconsistent shapes")
+            row = np.repeat(np.arange(other.shape[0]), len(ret.row))
+            col = np.tile(ret.col, other.shape[0])
+            return self._coo_container(
+                (data.view(np.ndarray).ravel(), (row, col)),
+                shape=(other.shape[0], self.shape[1]),
+                copy=False
+            )
+        # Sparse column vector times...
+        elif self.shape[1] == 1:
+            if other.shape[0] == 1:  # Dense row vector.
+                data = np.multiply(ret.data[:, None], other)
+            elif other.shape[0] == self.shape[0]:  # Dense matrix.
+                data = np.multiply(ret.data[:, None], other[ret.row])
+            else:
+                raise ValueError("inconsistent shapes")
+            row = np.repeat(ret.row, other.shape[1])
+            col = np.tile(np.arange(other.shape[1]), len(ret.col))
+            return self._coo_container(
+                (data.view(np.ndarray).ravel(), (row, col)),
+                shape=(self.shape[0], other.shape[1]),
+                copy=False
+            )
+        # Sparse matrix times dense row vector.
+        elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
+            data = np.multiply(ret.data, other[:, ret.col].ravel())
+        # Sparse matrix times dense column vector.
+        elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
+            data = np.multiply(ret.data, other[ret.row].ravel())
+        else:
+            raise ValueError("inconsistent shapes")
+        ret.data = data.view(np.ndarray).ravel()
+        return ret
+
+    ###########################
+    # Multiplication handlers #
+    ###########################
+
+    def _mul_vector(self, other):
+        M, N = self.shape
+
+        # output array
+        result = np.zeros(M, dtype=upcast_char(self.dtype.char,
+                                               other.dtype.char))
+
+        # csr_matvec or csc_matvec
+        fn = getattr(_sparsetools, self.format + '_matvec')
+        fn(M, N, self.indptr, self.indices, self.data, other, result)
+
+        return result
+
+    def _mul_multivector(self, other):
+        M, N = self.shape
+        n_vecs = other.shape[1]  # number of column vectors
+
+        result = np.zeros((M, n_vecs),
+                          dtype=upcast_char(self.dtype.char, other.dtype.char))
+
+        # csr_matvecs or csc_matvecs
+        fn = getattr(_sparsetools, self.format + '_matvecs')
+        fn(M, N, n_vecs, self.indptr, self.indices, self.data,
+           other.ravel(), result.ravel())
+
+        return result
+
+    def _mul_sparse_matrix(self, other):
+        M, K1 = self.shape
+        K2, N = other.shape
+
+        major_axis = self._swap((M, N))[0]
+        other = self.__class__(other)  # convert to this format
+
+        idx_dtype = get_index_dtype((self.indptr, self.indices,
+                                     other.indptr, other.indices))
+
+        fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
+        nnz = fn(M, N,
+                 np.asarray(self.indptr, dtype=idx_dtype),
+                 np.asarray(self.indices, dtype=idx_dtype),
+                 np.asarray(other.indptr, dtype=idx_dtype),
+                 np.asarray(other.indices, dtype=idx_dtype))
+
+        idx_dtype = get_index_dtype((self.indptr, self.indices,
+                                     other.indptr, other.indices),
+                                    maxval=nnz)
+
+        indptr = np.empty(major_axis + 1, dtype=idx_dtype)
+        indices = np.empty(nnz, dtype=idx_dtype)
+        data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
+
+        fn = getattr(_sparsetools, self.format + '_matmat')
+        fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
+           np.asarray(self.indices, dtype=idx_dtype),
+           self.data,
+           np.asarray(other.indptr, dtype=idx_dtype),
+           np.asarray(other.indices, dtype=idx_dtype),
+           other.data,
+           indptr, indices, data)
+
+        return self.__class__((data, indices, indptr), shape=(M, N))
+
+    def diagonal(self, k=0):
+        rows, cols = self.shape
+        if k <= -rows or k >= cols:
+            return np.empty(0, dtype=self.data.dtype)
+        fn = getattr(_sparsetools, self.format + "_diagonal")
+        y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
+                     dtype=upcast(self.dtype))
+        fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
+           self.data, y)
+        return y
+
+    diagonal.__doc__ = spmatrix.diagonal.__doc__
+
+    #####################
+    # Other binary ops  #
+    #####################
+
+    def _maximum_minimum(self, other, npop, op_name, dense_check):
+        if isscalarlike(other):
+            if dense_check(other):
+                warn("Taking maximum (minimum) with > 0 (< 0) number results"
+                     " to a dense matrix.", SparseEfficiencyWarning,
+                     stacklevel=3)
+                other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
+                other_arr.fill(other)
+                other_arr = self.__class__(other_arr)
+                return self._binopt(other_arr, op_name)
+            else:
+                self.sum_duplicates()
+                new_data = npop(self.data, np.asarray(other))
+                mat = self.__class__((new_data, self.indices, self.indptr),
+                                     dtype=new_data.dtype, shape=self.shape)
+                return mat
+        elif isdense(other):
+            return npop(self.todense(), other)
+        elif isspmatrix(other):
+            return self._binopt(other, op_name)
+        else:
+            raise ValueError("Operands not compatible.")
+
+    def maximum(self, other):
+        return self._maximum_minimum(other, np.maximum,
+                                     '_maximum_', lambda x: np.asarray(x) > 0)
+
+    maximum.__doc__ = spmatrix.maximum.__doc__
+
+    def minimum(self, other):
+        return self._maximum_minimum(other, np.minimum,
+                                     '_minimum_', lambda x: np.asarray(x) < 0)
+
+    minimum.__doc__ = spmatrix.minimum.__doc__
+
+    #####################
+    # Reduce operations #
+    #####################
+
+    def sum(self, axis=None, dtype=None, out=None):
+        """Sum the matrix over the given axis.  If the axis is None, sum
+        over both rows and columns, returning a scalar.
+        """
+        # The spmatrix base class already does axis=0 and axis=1 efficiently
+        # so we only do the case axis=None here
+        if (not hasattr(self, 'blocksize') and
+                axis in self._swap(((1, -1), (0, 2)))[0]):
+            # faster than multiplication for large minor axis in CSC/CSR
+            res_dtype = get_sum_dtype(self.dtype)
+            ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
+
+            major_index, value = self._minor_reduce(np.add)
+            ret[major_index] = value
+            ret = self._ascontainer(ret)
+            if axis % 2 == 1:
+                ret = ret.T
+
+            if out is not None and out.shape != ret.shape:
+                raise ValueError('dimensions do not match')
+
+            return ret.sum(axis=(), dtype=dtype, out=out)
+        # spmatrix will handle the remaining situations when axis
+        # is in {None, -1, 0, 1}
+        else:
+            return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
+
+    sum.__doc__ = spmatrix.sum.__doc__
+
+    def _minor_reduce(self, ufunc, data=None):
+        """Reduce nonzeros with a ufunc over the minor axis when non-empty
+
+        Can be applied to a function of self.data by supplying data parameter.
+
+        Warning: this does not call sum_duplicates()
+
+        Returns
+        -------
+        major_index : array of ints
+            Major indices where nonzero
+
+        value : array of self.dtype
+            Reduce result for nonzeros in each major_index
+        """
+        if data is None:
+            data = self.data
+        major_index = np.flatnonzero(np.diff(self.indptr))
+        value = ufunc.reduceat(data,
+                               downcast_intp_index(self.indptr[major_index]))
+        return major_index, value
+
+    #######################
+    # Getting and Setting #
+    #######################
+
+    def _get_intXint(self, row, col):
+        M, N = self._swap(self.shape)
+        major, minor = self._swap((row, col))
+        indptr, indices, data = get_csr_submatrix(
+            M, N, self.indptr, self.indices, self.data,
+            major, major + 1, minor, minor + 1)
+        return data.sum(dtype=self.dtype)
+
+    def _get_sliceXslice(self, row, col):
+        major, minor = self._swap((row, col))
+        if major.step in (1, None) and minor.step in (1, None):
+            return self._get_submatrix(major, minor, copy=True)
+        return self._major_slice(major)._minor_slice(minor)
+
+    def _get_arrayXarray(self, row, col):
+        # inner indexing
+        idx_dtype = self.indices.dtype
+        M, N = self._swap(self.shape)
+        major, minor = self._swap((row, col))
+        major = np.asarray(major, dtype=idx_dtype)
+        minor = np.asarray(minor, dtype=idx_dtype)
+
+        val = np.empty(major.size, dtype=self.dtype)
+        csr_sample_values(M, N, self.indptr, self.indices, self.data,
+                          major.size, major.ravel(), minor.ravel(), val)
+        if major.ndim == 1:
+            return self._ascontainer(val)
+        return self.__class__(val.reshape(major.shape))
+
+    def _get_columnXarray(self, row, col):
+        # outer indexing
+        major, minor = self._swap((row, col))
+        return self._major_index_fancy(major)._minor_index_fancy(minor)
+
+    def _major_index_fancy(self, idx):
+        """Index along the major axis where idx is an array of ints.
+        """
+        idx_dtype = self.indices.dtype
+        indices = np.asarray(idx, dtype=idx_dtype).ravel()
+
+        _, N = self._swap(self.shape)
+        M = len(indices)
+        new_shape = self._swap((M, N))
+        if M == 0:
+            return self.__class__(new_shape, dtype=self.dtype)
+
+        row_nnz = self.indptr[indices + 1] - self.indptr[indices]
+        idx_dtype = self.indices.dtype
+        res_indptr = np.zeros(M+1, dtype=idx_dtype)
+        np.cumsum(row_nnz, out=res_indptr[1:])
+
+        nnz = res_indptr[-1]
+        res_indices = np.empty(nnz, dtype=idx_dtype)
+        res_data = np.empty(nnz, dtype=self.dtype)
+        csr_row_index(M, indices, self.indptr, self.indices, self.data,
+                      res_indices, res_data)
+
+        return self.__class__((res_data, res_indices, res_indptr),
+                              shape=new_shape, copy=False)
+
+    def _major_slice(self, idx, copy=False):
+        """Index along the major axis where idx is a slice object.
+        """
+        if idx == slice(None):
+            return self.copy() if copy else self
+
+        M, N = self._swap(self.shape)
+        start, stop, step = idx.indices(M)
+        M = len(range(start, stop, step))
+        new_shape = self._swap((M, N))
+        if M == 0:
+            return self.__class__(new_shape, dtype=self.dtype)
+
+        # Work out what slices are needed for `row_nnz`
+        # start,stop can be -1, only if step is negative
+        start0, stop0 = start, stop
+        if stop == -1 and start >= 0:
+            stop0 = None
+        start1, stop1 = start + 1, stop + 1
+
+        row_nnz = self.indptr[start1:stop1:step] - \
+            self.indptr[start0:stop0:step]
+        idx_dtype = self.indices.dtype
+        res_indptr = np.zeros(M+1, dtype=idx_dtype)
+        np.cumsum(row_nnz, out=res_indptr[1:])
+
+        if step == 1:
+            all_idx = slice(self.indptr[start], self.indptr[stop])
+            res_indices = np.array(self.indices[all_idx], copy=copy)
+            res_data = np.array(self.data[all_idx], copy=copy)
+        else:
+            nnz = res_indptr[-1]
+            res_indices = np.empty(nnz, dtype=idx_dtype)
+            res_data = np.empty(nnz, dtype=self.dtype)
+            csr_row_slice(start, stop, step, self.indptr, self.indices,
+                          self.data, res_indices, res_data)
+
+        return self.__class__((res_data, res_indices, res_indptr),
+                              shape=new_shape, copy=False)
+
+    def _minor_index_fancy(self, idx):
+        """Index along the minor axis where idx is an array of ints.
+        """
+        idx_dtype = self.indices.dtype
+        idx = np.asarray(idx, dtype=idx_dtype).ravel()
+
+        M, N = self._swap(self.shape)
+        k = len(idx)
+        new_shape = self._swap((M, k))
+        if k == 0:
+            return self.__class__(new_shape, dtype=self.dtype)
+
+        # pass 1: count idx entries and compute new indptr
+        col_offsets = np.zeros(N, dtype=idx_dtype)
+        res_indptr = np.empty_like(self.indptr)
+        csr_column_index1(k, idx, M, N, self.indptr, self.indices,
+                          col_offsets, res_indptr)
+
+        # pass 2: copy indices/data for selected idxs
+        col_order = np.argsort(idx).astype(idx_dtype, copy=False)
+        nnz = res_indptr[-1]
+        res_indices = np.empty(nnz, dtype=idx_dtype)
+        res_data = np.empty(nnz, dtype=self.dtype)
+        csr_column_index2(col_order, col_offsets, len(self.indices),
+                          self.indices, self.data, res_indices, res_data)
+        return self.__class__((res_data, res_indices, res_indptr),
+                              shape=new_shape, copy=False)
+
+    def _minor_slice(self, idx, copy=False):
+        """Index along the minor axis where idx is a slice object.
+        """
+        if idx == slice(None):
+            return self.copy() if copy else self
+
+        M, N = self._swap(self.shape)
+        start, stop, step = idx.indices(N)
+        N = len(range(start, stop, step))
+        if N == 0:
+            return self.__class__(self._swap((M, N)), dtype=self.dtype)
+        if step == 1:
+            return self._get_submatrix(minor=idx, copy=copy)
+        # TODO: don't fall back to fancy indexing here
+        return self._minor_index_fancy(np.arange(start, stop, step))
+
+    def _get_submatrix(self, major=None, minor=None, copy=False):
+        """Return a submatrix of this matrix.
+
+        major, minor: None, int, or slice with step 1
+        """
+        M, N = self._swap(self.shape)
+        i0, i1 = _process_slice(major, M)
+        j0, j1 = _process_slice(minor, N)
+
+        if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
+            return self.copy() if copy else self
+
+        indptr, indices, data = get_csr_submatrix(
+            M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
+
+        shape = self._swap((i1 - i0, j1 - j0))
+        return self.__class__((data, indices, indptr), shape=shape,
+                              dtype=self.dtype, copy=False)
+
+    def _set_intXint(self, row, col, x):
+        i, j = self._swap((row, col))
+        self._set_many(i, j, x)
+
+    def _set_arrayXarray(self, row, col, x):
+        i, j = self._swap((row, col))
+        self._set_many(i, j, x)
+
+    def _set_arrayXarray_sparse(self, row, col, x):
+        # clear entries that will be overwritten
+        self._zero_many(*self._swap((row, col)))
+
+        M, N = row.shape  # matches col.shape
+        broadcast_row = M != 1 and x.shape[0] == 1
+        broadcast_col = N != 1 and x.shape[1] == 1
+        r, c = x.row, x.col
+
+        x = np.asarray(x.data, dtype=self.dtype)
+        if x.size == 0:
+            return
+
+        if broadcast_row:
+            r = np.repeat(np.arange(M), len(r))
+            c = np.tile(c, M)
+            x = np.tile(x, M)
+        if broadcast_col:
+            r = np.repeat(r, N)
+            c = np.tile(np.arange(N), len(c))
+            x = np.repeat(x, N)
+        # only assign entries in the new sparsity structure
+        i, j = self._swap((row[r, c], col[r, c]))
+        self._set_many(i, j, x)
+
+    def _setdiag(self, values, k):
+        if 0 in self.shape:
+            return
+
+        M, N = self.shape
+        broadcast = (values.ndim == 0)
+
+        if k < 0:
+            if broadcast:
+                max_index = min(M + k, N)
+            else:
+                max_index = min(M + k, N, len(values))
+            i = np.arange(max_index, dtype=self.indices.dtype)
+            j = np.arange(max_index, dtype=self.indices.dtype)
+            i -= k
+
+        else:
+            if broadcast:
+                max_index = min(M, N - k)
+            else:
+                max_index = min(M, N - k, len(values))
+            i = np.arange(max_index, dtype=self.indices.dtype)
+            j = np.arange(max_index, dtype=self.indices.dtype)
+            j += k
+
+        if not broadcast:
+            values = values[:len(i)]
+
+        self[i, j] = values
+
+    def _prepare_indices(self, i, j):
+        M, N = self._swap(self.shape)
+
+        def check_bounds(indices, bound):
+            idx = indices.max()
+            if idx >= bound:
+                raise IndexError('index (%d) out of range (>= %d)' %
+                                 (idx, bound))
+            idx = indices.min()
+            if idx < -bound:
+                raise IndexError('index (%d) out of range (< -%d)' %
+                                 (idx, bound))
+
+        i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
+        j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
+        check_bounds(i, M)
+        check_bounds(j, N)
+        return i, j, M, N
+
+    def _set_many(self, i, j, x):
+        """Sets value at each (i, j) to x
+
+        Here (i,j) index major and minor respectively, and must not contain
+        duplicate entries.
+        """
+        i, j, M, N = self._prepare_indices(i, j)
+        x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()
+
+        n_samples = x.size
+        offsets = np.empty(n_samples, dtype=self.indices.dtype)
+        ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
+                                 i, j, offsets)
+        if ret == 1:
+            # rinse and repeat
+            self.sum_duplicates()
+            csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
+                               i, j, offsets)
+
+        if -1 not in offsets:
+            # only affects existing non-zero cells
+            self.data[offsets] = x
+            return
+
+        else:
+            warn("Changing the sparsity structure of a {}_matrix is expensive."
+                 " lil_matrix is more efficient.".format(self.format),
+                 SparseEfficiencyWarning, stacklevel=3)
+            # replace where possible
+            mask = offsets > -1
+            self.data[offsets[mask]] = x[mask]
+            # only insertions remain
+            mask = ~mask
+            i = i[mask]
+            i[i < 0] += M
+            j = j[mask]
+            j[j < 0] += N
+            self._insert_many(i, j, x[mask])
+
+    def _zero_many(self, i, j):
+        """Sets value at each (i, j) to zero, preserving sparsity structure.
+
+        Here (i,j) index major and minor respectively.
+        """
+        i, j, M, N = self._prepare_indices(i, j)
+
+        n_samples = len(i)
+        offsets = np.empty(n_samples, dtype=self.indices.dtype)
+        ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
+                                 i, j, offsets)
+        if ret == 1:
+            # rinse and repeat
+            self.sum_duplicates()
+            csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
+                               i, j, offsets)
+
+        # only assign zeros to the existing sparsity structure
+        self.data[offsets[offsets > -1]] = 0
+
+    def _insert_many(self, i, j, x):
+        """Inserts new nonzero at each (i, j) with value x
+
+        Here (i,j) index major and minor respectively.
+        i, j and x must be non-empty, 1d arrays.
+        Inserts each major group (e.g. all entries per row) at a time.
+        Maintains has_sorted_indices property.
+        Modifies i, j, x in place.
+        """
+        order = np.argsort(i, kind='mergesort')  # stable for duplicates
+        i = i.take(order, mode='clip')
+        j = j.take(order, mode='clip')
+        x = x.take(order, mode='clip')
+
+        do_sort = self.has_sorted_indices
+
+        # Update index data type
+        idx_dtype = get_index_dtype((self.indices, self.indptr),
+                                    maxval=(self.indptr[-1] + x.size))
+        self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
+        self.indices = np.asarray(self.indices, dtype=idx_dtype)
+        i = np.asarray(i, dtype=idx_dtype)
+        j = np.asarray(j, dtype=idx_dtype)
+
+        # Collate old and new in chunks by major index
+        indices_parts = []
+        data_parts = []
+        ui, ui_indptr = np.unique(i, return_index=True)
+        ui_indptr = np.append(ui_indptr, len(j))
+        new_nnzs = np.diff(ui_indptr)
+        prev = 0
+        for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
+            # old entries
+            start = self.indptr[prev]
+            stop = self.indptr[ii]
+            indices_parts.append(self.indices[start:stop])
+            data_parts.append(self.data[start:stop])
+
+            # handle duplicate j: keep last setting
+            uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
+            if len(uj) == je - js:
+                indices_parts.append(j[js:je])
+                data_parts.append(x[js:je])
+            else:
+                indices_parts.append(j[js:je][::-1][uj_indptr])
+                data_parts.append(x[js:je][::-1][uj_indptr])
+                new_nnzs[c] = len(uj)
+
+            prev = ii
+
+        # remaining old entries
+        start = self.indptr[ii]
+        indices_parts.append(self.indices[start:])
+        data_parts.append(self.data[start:])
+
+        # update attributes
+        self.indices = np.concatenate(indices_parts)
+        self.data = np.concatenate(data_parts)
+        nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
+        nnzs[0] = idx_dtype(0)
+        indptr_diff = np.diff(self.indptr)
+        indptr_diff[ui] += new_nnzs
+        nnzs[1:] = indptr_diff
+        self.indptr = np.cumsum(nnzs, out=nnzs)
+
+        if do_sort:
+            # TODO: only sort where necessary
+            self.has_sorted_indices = False
+            self.sort_indices()
+
+        self.check_format(full_check=False)
+
+    ######################
+    # Conversion methods #
+    ######################
+
+    def tocoo(self, copy=True):
+        major_dim, minor_dim = self._swap(self.shape)
+        minor_indices = self.indices
+        major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
+        _sparsetools.expandptr(major_dim, self.indptr, major_indices)
+        row, col = self._swap((major_indices, minor_indices))
+
+        return self._coo_container(
+            (self.data, (row, col)), self.shape, copy=copy,
+            dtype=self.dtype
+        )
+
+    tocoo.__doc__ = spmatrix.tocoo.__doc__
+
+    def toarray(self, order=None, out=None):
+        if out is None and order is None:
+            order = self._swap('cf')[0]
+        out = self._process_toarray_args(order, out)
+        if not (out.flags.c_contiguous or out.flags.f_contiguous):
+            raise ValueError('Output array must be C or F contiguous')
+        # align ideal order with output array order
+        if out.flags.c_contiguous:
+            x = self.tocsr()
+            y = out
+        else:
+            x = self.tocsc()
+            y = out.T
+        M, N = x._swap(x.shape)
+        csr_todense(M, N, x.indptr, x.indices, x.data, y)
+        return out
+
+    toarray.__doc__ = spmatrix.toarray.__doc__
+
+    ##############################################################
+    # methods that examine or modify the internal data structure #
+    ##############################################################
+
+    def eliminate_zeros(self):
+        """Remove zero entries from the matrix
+
+        This is an *in place* operation.
+        """
+        M, N = self._swap(self.shape)
+        _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
+                                         self.data)
+        self.prune()  # nnz may have changed
+
+    def __get_has_canonical_format(self):
+        """Determine whether the matrix has sorted indices and no duplicates
+
+        Returns
+            - True: if the above applies
+            - False: otherwise
+
+        has_canonical_format implies has_sorted_indices, so if the latter flag
+        is False, so will the former be; if the former is found True, the
+        latter flag is also set.
+        """
+
+        # first check to see if result was cached
+        if not getattr(self, '_has_sorted_indices', True):
+            # not sorted => not canonical
+            self._has_canonical_format = False
+        elif not hasattr(self, '_has_canonical_format'):
+            self.has_canonical_format = bool(
+                _sparsetools.csr_has_canonical_format(
+                    len(self.indptr) - 1, self.indptr, self.indices))
+        return self._has_canonical_format
+
+    def __set_has_canonical_format(self, val):
+        self._has_canonical_format = bool(val)
+        if val:
+            self.has_sorted_indices = True
+
+    has_canonical_format = property(fget=__get_has_canonical_format,
+                                    fset=__set_has_canonical_format)
+
+    def sum_duplicates(self):
+        """Eliminate duplicate matrix entries by adding them together
+
+        This is an *in place* operation.
+        """
+        if self.has_canonical_format:
+            return
+        self.sort_indices()
+
+        M, N = self._swap(self.shape)
+        _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
+                                        self.data)
+
+        self.prune()  # nnz may have changed
+        self.has_canonical_format = True
+
+    def __get_sorted(self):
+        """Determine whether the matrix has sorted indices
+
+        Returns
+            - True: if the indices of the matrix are in sorted order
+            - False: otherwise
+
+        """
+
+        # first check to see if result was cached
+        if not hasattr(self, '_has_sorted_indices'):
+            self._has_sorted_indices = bool(
+                _sparsetools.csr_has_sorted_indices(
+                    len(self.indptr) - 1, self.indptr, self.indices))
+        return self._has_sorted_indices
+
+    def __set_sorted(self, val):
+        self._has_sorted_indices = bool(val)
+
+    has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
+
+    def sorted_indices(self):
+        """Return a copy of this matrix with sorted indices
+        """
+        A = self.copy()
+        A.sort_indices()
+        return A
+
+        # an alternative that has linear complexity is the following
+        # although the previous option is typically faster
+        # return self.toother().toother()
+
+    def sort_indices(self):
+        """Sort the indices of this matrix *in place*
+        """
+
+        if not self.has_sorted_indices:
+            _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
+                                          self.indices, self.data)
+            self.has_sorted_indices = True
+
+    def prune(self):
+        """Remove empty space after all non-zero elements.
+        """
+        major_dim = self._swap(self.shape)[0]
+
+        if len(self.indptr) != major_dim + 1:
+            raise ValueError('index pointer has invalid length')
+        if len(self.indices) < self.nnz:
+            raise ValueError('indices array has fewer than nnz elements')
+        if len(self.data) < self.nnz:
+            raise ValueError('data array has fewer than nnz elements')
+
+        self.indices = _prune_array(self.indices[:self.nnz])
+        self.data = _prune_array(self.data[:self.nnz])
+
+    def resize(self, *shape):
+        shape = check_shape(shape)
+        if hasattr(self, 'blocksize'):
+            bm, bn = self.blocksize
+            new_M, rm = divmod(shape[0], bm)
+            new_N, rn = divmod(shape[1], bn)
+            if rm or rn:
+                raise ValueError("shape must be divisible into %s blocks. "
+                                 "Got %s" % (self.blocksize, shape))
+            M, N = self.shape[0] // bm, self.shape[1] // bn
+        else:
+            new_M, new_N = self._swap(shape)
+            M, N = self._swap(self.shape)
+
+        if new_M < M:
+            self.indices = self.indices[:self.indptr[new_M]]
+            self.data = self.data[:self.indptr[new_M]]
+            self.indptr = self.indptr[:new_M + 1]
+        elif new_M > M:
+            self.indptr = np.resize(self.indptr, new_M + 1)
+            self.indptr[M + 1:].fill(self.indptr[M])
+
+        if new_N < N:
+            mask = self.indices < new_N
+            if not np.all(mask):
+                self.indices = self.indices[mask]
+                self.data = self.data[mask]
+                major_index, val = self._minor_reduce(np.add, mask)
+                self.indptr.fill(0)
+                self.indptr[1:][major_index] = val
+                np.cumsum(self.indptr, out=self.indptr)
+
+        self._shape = shape
+
+    resize.__doc__ = spmatrix.resize.__doc__
+
+    ###################
+    # utility methods #
+    ###################
+
+    # needed by _data_matrix
+    def _with_data(self, data, copy=True):
+        """Returns a matrix with the same sparsity structure as self,
+        but with different data.  By default the structure arrays
+        (i.e. .indptr and .indices) are copied.
+        """
+        if copy:
+            return self.__class__((data, self.indices.copy(),
+                                   self.indptr.copy()),
+                                  shape=self.shape,
+                                  dtype=data.dtype)
+        else:
+            return self.__class__((data, self.indices, self.indptr),
+                                  shape=self.shape, dtype=data.dtype)
+
+    def _binopt(self, other, op):
+        """apply the binary operation fn to two sparse matrices."""
+        other = self.__class__(other)
+
+        # e.g. csr_plus_csr, csr_minus_csr, etc.
+        fn = getattr(_sparsetools, self.format + op + self.format)
+
+        maxnnz = self.nnz + other.nnz
+        idx_dtype = get_index_dtype((self.indptr, self.indices,
+                                     other.indptr, other.indices),
+                                    maxval=maxnnz)
+        indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
+        indices = np.empty(maxnnz, dtype=idx_dtype)
+
+        bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
+        if op in bool_ops:
+            data = np.empty(maxnnz, dtype=np.bool_)
+        else:
+            data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
+
+        fn(self.shape[0], self.shape[1],
+           np.asarray(self.indptr, dtype=idx_dtype),
+           np.asarray(self.indices, dtype=idx_dtype),
+           self.data,
+           np.asarray(other.indptr, dtype=idx_dtype),
+           np.asarray(other.indices, dtype=idx_dtype),
+           other.data,
+           indptr, indices, data)
+
+        A = self.__class__((data, indices, indptr), shape=self.shape)
+        A.prune()
+
+        return A
+
+    def _divide_sparse(self, other):
+        """
+        Divide this matrix by a second sparse matrix.
+        """
+        if other.shape != self.shape:
+            raise ValueError('inconsistent shapes')
+
+        r = self._binopt(other, '_eldiv_')
+
+        if np.issubdtype(r.dtype, np.inexact):
+            # Eldiv leaves entries outside the combined sparsity
+            # pattern empty, so they must be filled manually.
+            # Everything outside of other's sparsity is NaN, and everything
+            # inside it is either zero or defined by eldiv.
+            out = np.empty(self.shape, dtype=self.dtype)
+            out.fill(np.nan)
+            row, col = other.nonzero()
+            out[row, col] = 0
+            r = r.tocoo()
+            out[r.row, r.col] = r.data
+            out = self._container(out)
+        else:
+            # integers types go with nan <-> 0
+            out = r
+
+        return out
+
+
+def _process_slice(sl, num):
+    if sl is None:
+        i0, i1 = 0, num
+    elif isinstance(sl, slice):
+        i0, i1, stride = sl.indices(num)
+        if stride != 1:
+            raise ValueError('slicing with step != 1 not supported')
+        i0 = min(i0, i1)  # give an empty slice when i0 > i1
+    elif isintlike(sl):
+        if sl < 0:
+            sl += num
+        i0, i1 = sl, sl + 1
+        if i0 < 0 or i1 > num:
+            raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
+                             (i0, i1, num))
+    else:
+        raise TypeError('expected slice or scalar')
+
+    return i0, i1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_construct.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_construct.py
new file mode 100644
index 00000000..87d46315
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_construct.py
@@ -0,0 +1,947 @@
+"""Functions to construct sparse matrices
+"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
+           'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag']
+
+import numbers
+from functools import partial
+import numpy as np
+
+from scipy._lib._util import check_random_state, rng_integers
+from ._sputils import upcast, get_index_dtype, isscalarlike
+
+from ._sparsetools import csr_hstack
+from ._csr import csr_matrix
+from ._csc import csc_matrix
+from ._bsr import bsr_matrix
+from ._coo import coo_matrix
+from ._dia import dia_matrix
+
+from ._base import issparse
+
+
+def spdiags(data, diags, m=None, n=None, format=None):
+    """
+    Return a sparse matrix from diagonals.
+
+    Parameters
+    ----------
+    data : array_like
+        Matrix diagonals stored row-wise
+    diags : sequence of int or an int
+        Diagonals to set:
+
+        * k = 0  the main diagonal
+        * k > 0  the kth upper diagonal
+        * k < 0  the kth lower diagonal
+    m, n : int, tuple, optional
+        Shape of the result. If `n` is None and `m` is a given tuple,
+        the shape is this tuple. If omitted, the matrix is square and
+        its shape is len(data[0]).
+    format : str, optional
+        Format of the result. By default (format=None) an appropriate sparse
+        matrix format is returned. This choice is subject to change.
+
+    See Also
+    --------
+    diags : more convenient form of this function
+    dia_matrix : the sparse DIAgonal format.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import spdiags
+    >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
+    >>> diags = np.array([0, -1, 2])
+    >>> spdiags(data, diags, 4, 4).toarray()
+    array([[1, 0, 3, 0],
+           [1, 2, 0, 4],
+           [0, 2, 3, 0],
+           [0, 0, 3, 4]])
+
+    """
+    if m is None and n is None:
+        m = n = len(data[0])
+    elif n is None:
+        m, n = m
+    return dia_matrix((data, diags), shape=(m, n)).asformat(format)
+
+
+def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
+    """
+    Construct a sparse matrix from diagonals.
+
+    Parameters
+    ----------
+    diagonals : sequence of array_like
+        Sequence of arrays containing the matrix diagonals,
+        corresponding to `offsets`.
+    offsets : sequence of int or an int, optional
+        Diagonals to set:
+          - k = 0  the main diagonal (default)
+          - k > 0  the kth upper diagonal
+          - k < 0  the kth lower diagonal
+    shape : tuple of int, optional
+        Shape of the result. If omitted, a square matrix large enough
+        to contain the diagonals is returned.
+    format : {"dia", "csr", "csc", "lil", ...}, optional
+        Matrix format of the result. By default (format=None) an
+        appropriate sparse matrix format is returned. This choice is
+        subject to change.
+    dtype : dtype, optional
+        Data type of the matrix.
+
+    See Also
+    --------
+    spdiags : construct matrix from diagonals
+
+    Notes
+    -----
+    This function differs from `spdiags` in the way it handles
+    off-diagonals.
+
+    The result from `diags` is the sparse equivalent of::
+
+        np.diag(diagonals[0], offsets[0])
+        + ...
+        + np.diag(diagonals[k], offsets[k])
+
+    Repeated diagonal offsets are disallowed.
+
+    .. versionadded:: 0.11
+
+    Examples
+    --------
+    >>> from scipy.sparse import diags
+    >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
+    >>> diags(diagonals, [0, -1, 2]).toarray()
+    array([[1, 0, 1, 0],
+           [1, 2, 0, 2],
+           [0, 2, 3, 0],
+           [0, 0, 3, 4]])
+
+    Broadcasting of scalars is supported (but shape needs to be
+    specified):
+
+    >>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
+    array([[-2.,  1.,  0.,  0.],
+           [ 1., -2.,  1.,  0.],
+           [ 0.,  1., -2.,  1.],
+           [ 0.,  0.,  1., -2.]])
+
+
+    If only one diagonal is wanted (as in `numpy.diag`), the following
+    works as well:
+
+    >>> diags([1, 2, 3], 1).toarray()
+    array([[ 0.,  1.,  0.,  0.],
+           [ 0.,  0.,  2.,  0.],
+           [ 0.,  0.,  0.,  3.],
+           [ 0.,  0.,  0.,  0.]])
+    """
+    # if offsets is not a sequence, assume that there's only one diagonal
+    if isscalarlike(offsets):
+        # now check that there's actually only one diagonal
+        if len(diagonals) == 0 or isscalarlike(diagonals[0]):
+            diagonals = [np.atleast_1d(diagonals)]
+        else:
+            raise ValueError("Different number of diagonals and offsets.")
+    else:
+        diagonals = list(map(np.atleast_1d, diagonals))
+
+    offsets = np.atleast_1d(offsets)
+
+    # Basic check
+    if len(diagonals) != len(offsets):
+        raise ValueError("Different number of diagonals and offsets.")
+
+    # Determine shape, if omitted
+    if shape is None:
+        m = len(diagonals[0]) + abs(int(offsets[0]))
+        shape = (m, m)
+
+    # Determine data type, if omitted
+    if dtype is None:
+        dtype = np.common_type(*diagonals)
+
+    # Construct data array
+    m, n = shape
+
+    M = max([min(m + offset, n - offset) + max(0, offset)
+             for offset in offsets])
+    M = max(0, M)
+    data_arr = np.zeros((len(offsets), M), dtype=dtype)
+
+    K = min(m, n)
+
+    for j, diagonal in enumerate(diagonals):
+        offset = offsets[j]
+        k = max(0, offset)
+        length = min(m + offset, n - offset, K)
+        if length < 0:
+            raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
+        try:
+            data_arr[j, k:k+length] = diagonal[...,:length]
+        except ValueError as e:
+            if len(diagonal) != length and len(diagonal) != 1:
+                raise ValueError(
+                    "Diagonal length (index %d: %d at offset %d) does not "
+                    "agree with matrix size (%d, %d)." % (
+                    j, len(diagonal), offset, m, n)) from e
+            raise
+
+    return dia_matrix((data_arr, offsets), shape=(m, n)).asformat(format)
+
+
+def identity(n, dtype='d', format=None):
+    """Identity matrix in sparse format
+
+    Returns an identity matrix with shape (n,n) using a given
+    sparse format and dtype.
+
+    Parameters
+    ----------
+    n : int
+        Shape of the identity matrix.
+    dtype : dtype, optional
+        Data type of the matrix
+    format : str, optional
+        Sparse format of the result, e.g., format="csr", etc.
+
+    Examples
+    --------
+    >>> from scipy.sparse import identity
+    >>> identity(3).toarray()
+    array([[ 1.,  0.,  0.],
+           [ 0.,  1.,  0.],
+           [ 0.,  0.,  1.]])
+    >>> identity(3, dtype='int8', format='dia')
+    <3x3 sparse matrix of type ''
+            with 3 stored elements (1 diagonals) in DIAgonal format>
+
+    """
+    return eye(n, n, dtype=dtype, format=format)
+
+
+def eye(m, n=None, k=0, dtype=float, format=None):
+    """Sparse matrix with ones on diagonal
+
+    Returns a sparse (m x n) matrix where the kth diagonal
+    is all ones and everything else is zeros.
+
+    Parameters
+    ----------
+    m : int
+        Number of rows in the matrix.
+    n : int, optional
+        Number of columns. Default: `m`.
+    k : int, optional
+        Diagonal to place ones on. Default: 0 (main diagonal).
+    dtype : dtype, optional
+        Data type of the matrix.
+    format : str, optional
+        Sparse format of the result, e.g., format="csr", etc.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import sparse
+    >>> sparse.eye(3).toarray()
+    array([[ 1.,  0.,  0.],
+           [ 0.,  1.,  0.],
+           [ 0.,  0.,  1.]])
+    >>> sparse.eye(3, dtype=np.int8)
+    <3x3 sparse matrix of type ''
+        with 3 stored elements (1 diagonals) in DIAgonal format>
+
+    """
+    if n is None:
+        n = m
+    m,n = int(m),int(n)
+
+    if m == n and k == 0:
+        # fast branch for special formats
+        if format in ['csr', 'csc']:
+            idx_dtype = get_index_dtype(maxval=n)
+            indptr = np.arange(n+1, dtype=idx_dtype)
+            indices = np.arange(n, dtype=idx_dtype)
+            data = np.ones(n, dtype=dtype)
+            cls = {'csr': csr_matrix, 'csc': csc_matrix}[format]
+            return cls((data,indices,indptr),(n,n))
+        elif format == 'coo':
+            idx_dtype = get_index_dtype(maxval=n)
+            row = np.arange(n, dtype=idx_dtype)
+            col = np.arange(n, dtype=idx_dtype)
+            data = np.ones(n, dtype=dtype)
+            return coo_matrix((data, (row, col)), (n, n))
+
+    diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
+    return spdiags(diags, k, m, n).asformat(format)
+
+
+def kron(A, B, format=None):
+    """kronecker product of sparse matrices A and B
+
+    Parameters
+    ----------
+    A : sparse or dense matrix
+        first matrix of the product
+    B : sparse or dense matrix
+        second matrix of the product
+    format : str, optional
+        format of the result (e.g. "csr")
+
+    Returns
+    -------
+    kronecker product in a sparse matrix format
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import sparse
+    >>> A = sparse.csr_matrix(np.array([[0, 2], [5, 0]]))
+    >>> B = sparse.csr_matrix(np.array([[1, 2], [3, 4]]))
+    >>> sparse.kron(A, B).toarray()
+    array([[ 0,  0,  2,  4],
+           [ 0,  0,  6,  8],
+           [ 5, 10,  0,  0],
+           [15, 20,  0,  0]])
+
+    >>> sparse.kron(A, [[1, 2], [3, 4]]).toarray()
+    array([[ 0,  0,  2,  4],
+           [ 0,  0,  6,  8],
+           [ 5, 10,  0,  0],
+           [15, 20,  0,  0]])
+
+    """
+    B = coo_matrix(B)
+
+    if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
+        # B is fairly dense, use BSR
+        A = csr_matrix(A,copy=True)
+        output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
+
+        if A.nnz == 0 or B.nnz == 0:
+            # kronecker product is the zero matrix
+            return coo_matrix(output_shape).asformat(format)
+
+        B = B.toarray()
+        data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
+        data = data * B
+
+        return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
+    else:
+        # use COO
+        A = coo_matrix(A)
+        output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
+
+        if A.nnz == 0 or B.nnz == 0:
+            # kronecker product is the zero matrix
+            return coo_matrix(output_shape).asformat(format)
+
+        # expand entries of a into blocks
+        row = A.row.repeat(B.nnz)
+        col = A.col.repeat(B.nnz)
+        data = A.data.repeat(B.nnz)
+
+        if max(A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) > np.iinfo('int32').max:
+            row = row.astype(np.int64)
+            col = col.astype(np.int64)
+
+        row *= B.shape[0]
+        col *= B.shape[1]
+
+        # increment block indices
+        row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
+        row += B.row
+        col += B.col
+        row,col = row.reshape(-1),col.reshape(-1)
+
+        # compute block entries
+        data = data.reshape(-1,B.nnz) * B.data
+        data = data.reshape(-1)
+
+        return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
+
+
+def kronsum(A, B, format=None):
+    """kronecker sum of sparse matrices A and B
+
+    Kronecker sum of two sparse matrices is a sum of two Kronecker
+    products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
+    and B has shape (n,n) and I_m and I_n are identity matrices
+    of shape (m,m) and (n,n), respectively.
+
+    Parameters
+    ----------
+    A
+        square matrix
+    B
+        square matrix
+    format : str
+        format of the result (e.g. "csr")
+
+    Returns
+    -------
+    kronecker sum in a sparse matrix format
+
+    Examples
+    --------
+
+
+    """
+    A = coo_matrix(A)
+    B = coo_matrix(B)
+
+    if A.shape[0] != A.shape[1]:
+        raise ValueError('A is not square')
+
+    if B.shape[0] != B.shape[1]:
+        raise ValueError('B is not square')
+
+    dtype = upcast(A.dtype, B.dtype)
+
+    L = kron(eye(B.shape[0],dtype=dtype), A, format=format)
+    R = kron(B, eye(A.shape[0],dtype=dtype), format=format)
+
+    return (L+R).asformat(format)  # since L + R is not always same format
+
+
+def _compressed_sparse_stack(blocks, axis):
+    """
+    Stacking fast path for CSR/CSC matrices
+    (i) vstack for CSR, (ii) hstack for CSC.
+    """
+    other_axis = 1 if axis == 0 else 0
+    data = np.concatenate([b.data for b in blocks])
+    constant_dim = blocks[0].shape[other_axis]
+    idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks],
+                                maxval=max(data.size, constant_dim))
+    indices = np.empty(data.size, dtype=idx_dtype)
+    indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype)
+    last_indptr = idx_dtype(0)
+    sum_dim = 0
+    sum_indices = 0
+    for b in blocks:
+        if b.shape[other_axis] != constant_dim:
+            raise ValueError(f'incompatible dimensions for axis {other_axis}')
+        indices[sum_indices:sum_indices+b.indices.size] = b.indices
+        sum_indices += b.indices.size
+        idxs = slice(sum_dim, sum_dim + b.shape[axis])
+        indptr[idxs] = b.indptr[:-1]
+        indptr[idxs] += last_indptr
+        sum_dim += b.shape[axis]
+        last_indptr += b.indptr[-1]
+    indptr[-1] = last_indptr
+    if axis == 0:
+        return csr_matrix((data, indices, indptr),
+                          shape=(sum_dim, constant_dim))
+    else:
+        return csc_matrix((data, indices, indptr),
+                          shape=(constant_dim, sum_dim))
+
+
+def _stack_along_minor_axis(blocks, axis):
+    """
+    Stacking fast path for CSR/CSC matrices along the minor axis
+    (i) hstack for CSR, (ii) vstack for CSC.
+    """
+    n_blocks = len(blocks)
+    if n_blocks == 0:
+        raise ValueError('Missing block matrices')
+
+    if n_blocks == 1:
+        return blocks[0]
+
+    # check for incompatible dimensions
+    other_axis = 1 if axis == 0 else 0
+    other_axis_dims = set(b.shape[other_axis] for b in blocks)
+    if len(other_axis_dims) > 1:
+        raise ValueError(f'Mismatching dimensions along axis {other_axis}: '
+                         f'{other_axis_dims}')
+    constant_dim, = other_axis_dims
+
+    # Do the stacking
+    indptr_list = [b.indptr for b in blocks]
+    data_cat = np.concatenate([b.data for b in blocks])
+
+    # Need to check if any indices/indptr, would be too large post-
+    # concatenation for np.int32:
+    # - The max value of indices is the output array's stacking-axis length - 1
+    # - The max value in indptr is the number of non-zero entries. This is
+    #   exceedingly unlikely to require int64, but is checked out of an
+    #   abundance of caution.
+    sum_dim = sum(b.shape[axis] for b in blocks)
+    nnz = sum(len(b.indices) for b in blocks)
+    idx_dtype = get_index_dtype(maxval=max(sum_dim - 1, nnz))
+    stack_dim_cat = np.array([b.shape[axis] for b in blocks], dtype=idx_dtype)
+    if data_cat.size > 0:
+        indptr_cat = np.concatenate(indptr_list).astype(idx_dtype)
+        indices_cat = (np.concatenate([b.indices for b in blocks])
+                       .astype(idx_dtype))
+        indptr = np.empty(constant_dim + 1, dtype=idx_dtype)
+        indices = np.empty_like(indices_cat)
+        data = np.empty_like(data_cat)
+        csr_hstack(n_blocks, constant_dim, stack_dim_cat,
+                   indptr_cat, indices_cat, data_cat,
+                   indptr, indices, data)
+    else:
+        indptr = np.zeros(constant_dim + 1, dtype=idx_dtype)
+        indices = np.empty(0, dtype=idx_dtype)
+        data = np.empty(0, dtype=data_cat.dtype)
+
+    if axis == 0:
+        return csc_matrix((data, indices, indptr),
+                          shape=(sum_dim, constant_dim))
+    else:
+        return csr_matrix((data, indices, indptr),
+                          shape=(constant_dim, sum_dim))
+
+
+def hstack(blocks, format=None, dtype=None):
+    """
+    Stack sparse matrices horizontally (column wise)
+
+    Parameters
+    ----------
+    blocks
+        sequence of sparse matrices with compatible shapes
+    format : str
+        sparse format of the result (e.g., "csr")
+        by default an appropriate sparse matrix format is returned.
+        This choice is subject to change.
+    dtype : dtype, optional
+        The data-type of the output matrix. If not given, the dtype is
+        determined from that of `blocks`.
+
+    See Also
+    --------
+    vstack : stack sparse matrices vertically (row wise)
+
+    Examples
+    --------
+    >>> from scipy.sparse import coo_matrix, hstack
+    >>> A = coo_matrix([[1, 2], [3, 4]])
+    >>> B = coo_matrix([[5], [6]])
+    >>> hstack([A,B]).toarray()
+    array([[1, 2, 5],
+           [3, 4, 6]])
+
+    """
+    return bmat([blocks], format=format, dtype=dtype)
+
+
+def vstack(blocks, format=None, dtype=None):
+    """
+    Stack sparse matrices vertically (row wise)
+
+    Parameters
+    ----------
+    blocks
+        sequence of sparse matrices with compatible shapes
+    format : str, optional
+        sparse format of the result (e.g., "csr")
+        by default an appropriate sparse matrix format is returned.
+        This choice is subject to change.
+    dtype : dtype, optional
+        The data-type of the output matrix. If not given, the dtype is
+        determined from that of `blocks`.
+
+    See Also
+    --------
+    hstack : stack sparse matrices horizontally (column wise)
+
+    Examples
+    --------
+    >>> from scipy.sparse import coo_matrix, vstack
+    >>> A = coo_matrix([[1, 2], [3, 4]])
+    >>> B = coo_matrix([[5, 6]])
+    >>> vstack([A, B]).toarray()
+    array([[1, 2],
+           [3, 4],
+           [5, 6]])
+
+    """
+    return bmat([[b] for b in blocks], format=format, dtype=dtype)
+
+
+def bmat(blocks, format=None, dtype=None):
+    """
+    Build a sparse matrix from sparse sub-blocks
+
+    Parameters
+    ----------
+    blocks : array_like
+        Grid of sparse matrices with compatible shapes.
+        An entry of None implies an all-zero matrix.
+    format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
+        The sparse format of the result (e.g. "csr"). By default an
+        appropriate sparse matrix format is returned.
+        This choice is subject to change.
+    dtype : dtype, optional
+        The data-type of the output matrix. If not given, the dtype is
+        determined from that of `blocks`.
+
+    Returns
+    -------
+    bmat : sparse matrix
+
+    See Also
+    --------
+    block_diag, diags
+
+    Examples
+    --------
+    >>> from scipy.sparse import coo_matrix, bmat
+    >>> A = coo_matrix([[1, 2], [3, 4]])
+    >>> B = coo_matrix([[5], [6]])
+    >>> C = coo_matrix([[7]])
+    >>> bmat([[A, B], [None, C]]).toarray()
+    array([[1, 2, 5],
+           [3, 4, 6],
+           [0, 0, 7]])
+
+    >>> bmat([[A, None], [None, C]]).toarray()
+    array([[1, 2, 0],
+           [3, 4, 0],
+           [0, 0, 7]])
+
+    """
+
+    blocks = np.asarray(blocks, dtype='object')
+
+    if blocks.ndim != 2:
+        raise ValueError('blocks must be 2-D')
+
+    M,N = blocks.shape
+
+    # check for fast path cases
+    if (format in (None, 'csr') and all(isinstance(b, csr_matrix)
+                                        for b in blocks.flat)):
+        if N > 1:
+            # stack along columns (axis 1):
+            blocks = [[_stack_along_minor_axis(blocks[b, :], 1)]
+                      for b in range(M)]   # must have shape: (M, 1)
+            blocks = np.asarray(blocks, dtype='object')
+
+        # stack along rows (axis 0):
+        A = _compressed_sparse_stack(blocks[:, 0], 0)
+        if dtype is not None:
+            A = A.astype(dtype)
+        return A
+    elif (format in (None, 'csc') and all(isinstance(b, csc_matrix)
+                                          for b in blocks.flat)):
+        if M > 1:
+            # stack along rows (axis 0):
+            blocks = [[_stack_along_minor_axis(blocks[:, b], 0)
+                       for b in range(N)]]   # must have shape: (1, N)
+            blocks = np.asarray(blocks, dtype='object')
+
+        # stack along columns (axis 1):
+        A = _compressed_sparse_stack(blocks[0, :], 1)
+        if dtype is not None:
+            A = A.astype(dtype)
+        return A
+
+    block_mask = np.zeros(blocks.shape, dtype=bool)
+    brow_lengths = np.zeros(M, dtype=np.int64)
+    bcol_lengths = np.zeros(N, dtype=np.int64)
+
+    # convert everything to COO format
+    for i in range(M):
+        for j in range(N):
+            if blocks[i,j] is not None:
+                A = coo_matrix(blocks[i,j])
+                blocks[i,j] = A
+                block_mask[i,j] = True
+
+                if brow_lengths[i] == 0:
+                    brow_lengths[i] = A.shape[0]
+                elif brow_lengths[i] != A.shape[0]:
+                    msg = (f'blocks[{i},:] has incompatible row dimensions. '
+                           f'Got blocks[{i},{j}].shape[0] == {A.shape[0]}, '
+                           f'expected {brow_lengths[i]}.')
+                    raise ValueError(msg)
+
+                if bcol_lengths[j] == 0:
+                    bcol_lengths[j] = A.shape[1]
+                elif bcol_lengths[j] != A.shape[1]:
+                    msg = (f'blocks[:,{j}] has incompatible column '
+                           f'dimensions. '
+                           f'Got blocks[{i},{j}].shape[1] == {A.shape[1]}, '
+                           f'expected {bcol_lengths[j]}.')
+                    raise ValueError(msg)
+
+    nnz = sum(block.nnz for block in blocks[block_mask])
+    if dtype is None:
+        all_dtypes = [blk.dtype for blk in blocks[block_mask]]
+        dtype = upcast(*all_dtypes) if all_dtypes else None
+
+    row_offsets = np.append(0, np.cumsum(brow_lengths))
+    col_offsets = np.append(0, np.cumsum(bcol_lengths))
+
+    shape = (row_offsets[-1], col_offsets[-1])
+
+    data = np.empty(nnz, dtype=dtype)
+    idx_dtype = get_index_dtype(maxval=max(shape))
+    row = np.empty(nnz, dtype=idx_dtype)
+    col = np.empty(nnz, dtype=idx_dtype)
+
+    nnz = 0
+    ii, jj = np.nonzero(block_mask)
+    for i, j in zip(ii, jj):
+        B = blocks[i, j]
+        idx = slice(nnz, nnz + B.nnz)
+        data[idx] = B.data
+        np.add(B.row, row_offsets[i], out=row[idx], dtype=idx_dtype)
+        np.add(B.col, col_offsets[j], out=col[idx], dtype=idx_dtype)
+        nnz += B.nnz
+
+    return coo_matrix((data, (row, col)), shape=shape).asformat(format)
+
+
+def block_diag(mats, format=None, dtype=None):
+    """
+    Build a block diagonal sparse matrix from provided matrices.
+
+    Parameters
+    ----------
+    mats : sequence of matrices
+        Input matrices.
+    format : str, optional
+        The sparse format of the result (e.g., "csr"). If not given, the matrix
+        is returned in "coo" format.
+    dtype : dtype specifier, optional
+        The data-type of the output matrix. If not given, the dtype is
+        determined from that of `blocks`.
+
+    Returns
+    -------
+    res : sparse matrix
+
+    Notes
+    -----
+
+    .. versionadded:: 0.11.0
+
+    See Also
+    --------
+    bmat, diags
+
+    Examples
+    --------
+    >>> from scipy.sparse import coo_matrix, block_diag
+    >>> A = coo_matrix([[1, 2], [3, 4]])
+    >>> B = coo_matrix([[5], [6]])
+    >>> C = coo_matrix([[7]])
+    >>> block_diag((A, B, C)).toarray()
+    array([[1, 2, 0, 0],
+           [3, 4, 0, 0],
+           [0, 0, 5, 0],
+           [0, 0, 6, 0],
+           [0, 0, 0, 7]])
+
+    """
+    row = []
+    col = []
+    data = []
+    r_idx = 0
+    c_idx = 0
+    for a in mats:
+        if isinstance(a, (list, numbers.Number)):
+            a = coo_matrix(a)
+        nrows, ncols = a.shape
+        if issparse(a):
+            a = a.tocoo()
+            row.append(a.row + r_idx)
+            col.append(a.col + c_idx)
+            data.append(a.data)
+        else:
+            a_row, a_col = np.divmod(np.arange(nrows*ncols), ncols)
+            row.append(a_row + r_idx)
+            col.append(a_col + c_idx)
+            data.append(a.ravel())
+        r_idx += nrows
+        c_idx += ncols
+    row = np.concatenate(row)
+    col = np.concatenate(col)
+    data = np.concatenate(data)
+    return coo_matrix((data, (row, col)),
+                      shape=(r_idx, c_idx),
+                      dtype=dtype).asformat(format)
+
+
+def random(m, n, density=0.01, format='coo', dtype=None,
+           random_state=None, data_rvs=None):
+    """Generate a sparse matrix of the given shape and density with randomly
+    distributed values.
+
+    Parameters
+    ----------
+    m, n : int
+        shape of the matrix
+    density : real, optional
+        density of the generated matrix: density equal to one means a full
+        matrix, density of 0 means a matrix with no non-zero items.
+    format : str, optional
+        sparse matrix format.
+    dtype : dtype, optional
+        type of the returned matrix values.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+        This random state will be used
+        for sampling the sparsity structure, but not necessarily for sampling
+        the values of the structurally nonzero entries of the matrix.
+    data_rvs : callable, optional
+        Samples a requested number of random values.
+        This function should take a single argument specifying the length
+        of the ndarray that it will return. The structurally nonzero entries
+        of the sparse random matrix will be taken from the array sampled
+        by this function. By default, uniform [0, 1) random values will be
+        sampled using the same random state as is used for sampling
+        the sparsity structure.
+
+    Returns
+    -------
+    res : sparse matrix
+
+    Notes
+    -----
+    Only float types are supported for now.
+
+    Examples
+    --------
+    >>> from scipy.sparse import random
+    >>> from scipy import stats
+    >>> from numpy.random import default_rng
+    >>> rng = default_rng()
+    >>> rvs = stats.poisson(25, loc=10).rvs
+    >>> S = random(3, 4, density=0.25, random_state=rng, data_rvs=rvs)
+    >>> S.A
+    array([[ 36.,   0.,  33.,   0.],   # random
+           [  0.,   0.,   0.,   0.],
+           [  0.,   0.,  36.,   0.]])
+
+    >>> from scipy.sparse import random
+    >>> from scipy.stats import rv_continuous
+    >>> class CustomDistribution(rv_continuous):
+    ...     def _rvs(self,  size=None, random_state=None):
+    ...         return random_state.standard_normal(size)
+    >>> X = CustomDistribution(seed=rng)
+    >>> Y = X()  # get a frozen version of the distribution
+    >>> S = random(3, 4, density=0.25, random_state=rng, data_rvs=Y.rvs)
+    >>> S.A
+    array([[ 0.        ,  0.        ,  0.        ,  0.        ],   # random
+           [ 0.13569738,  1.9467163 , -0.81205367,  0.        ],
+           [ 0.        ,  0.        ,  0.        ,  0.        ]])
+
+    """
+    if density < 0 or density > 1:
+        raise ValueError("density expected to be 0 <= density <= 1")
+    dtype = np.dtype(dtype)
+
+    mn = m * n
+
+    tp = np.intc
+    if mn > np.iinfo(tp).max:
+        tp = np.int64
+
+    if mn > np.iinfo(tp).max:
+        msg = """\
+Trying to generate a random sparse matrix such as the product of dimensions is
+greater than %d - this is not supported on this machine
+"""
+        raise ValueError(msg % np.iinfo(tp).max)
+
+    # Number of non zero values
+    k = int(round(density * m * n))
+
+    random_state = check_random_state(random_state)
+
+    if data_rvs is None:
+        if np.issubdtype(dtype, np.integer):
+            def data_rvs(n):
+                return rng_integers(random_state,
+                                    np.iinfo(dtype).min,
+                                    np.iinfo(dtype).max,
+                                    n,
+                                    dtype=dtype)
+        elif np.issubdtype(dtype, np.complexfloating):
+            def data_rvs(n):
+                return (random_state.uniform(size=n) +
+                        random_state.uniform(size=n) * 1j)
+        else:
+            data_rvs = partial(random_state.uniform, 0., 1.)
+
+    ind = random_state.choice(mn, size=k, replace=False)
+
+    j = np.floor(ind * 1. / m).astype(tp, copy=False)
+    i = (ind - j * m).astype(tp, copy=False)
+    vals = data_rvs(k).astype(dtype, copy=False)
+    return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format,
+                                                             copy=False)
+
+
+def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
+    """Generate a sparse matrix of the given shape and density with uniformly
+    distributed values.
+
+    Parameters
+    ----------
+    m, n : int
+        shape of the matrix
+    density : real, optional
+        density of the generated matrix: density equal to one means a full
+        matrix, density of 0 means a matrix with no non-zero items.
+    format : str, optional
+        sparse matrix format.
+    dtype : dtype, optional
+        type of the returned matrix values.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    res : sparse matrix
+
+    Notes
+    -----
+    Only float types are supported for now.
+
+    See Also
+    --------
+    scipy.sparse.random : Similar function that allows a user-specified random
+        data source.
+
+    Examples
+    --------
+    >>> from scipy.sparse import rand
+    >>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42)
+    >>> matrix
+    <3x4 sparse matrix of type ''
+       with 3 stored elements in Compressed Sparse Row format>
+    >>> matrix.toarray()
+    array([[0.05641158, 0.        , 0.        , 0.65088847],
+           [0.        , 0.        , 0.        , 0.14286682],
+           [0.        , 0.        , 0.        , 0.        ]])
+
+    """
+    return random(m, n, density, format, dtype, random_state)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_coo.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_coo.py
new file mode 100644
index 00000000..14f5e681
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_coo.py
@@ -0,0 +1,614 @@
+""" A sparse matrix in COOrdinate or 'triplet' format"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['coo_matrix', 'isspmatrix_coo']
+
+from warnings import warn
+
+import numpy as np
+
+
+from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
+from ._base import isspmatrix, SparseEfficiencyWarning, spmatrix
+from ._data import _data_matrix, _minmax_mixin
+from ._sputils import (upcast, upcast_char, to_native, isshape, getdtype,
+                       getdata, get_index_dtype, downcast_intp_index,
+                       check_shape, check_reshape_kwargs)
+
+import operator
+
+
+class coo_matrix(_data_matrix, _minmax_mixin):
+    """
+    A sparse matrix in COOrdinate format.
+
+    Also known as the 'ijv' or 'triplet' format.
+
+    This can be instantiated in several ways:
+        coo_matrix(D)
+            with a dense matrix D
+
+        coo_matrix(S)
+            with another sparse matrix S (equivalent to S.tocoo())
+
+        coo_matrix((M, N), [dtype])
+            to construct an empty matrix with shape (M, N)
+            dtype is optional, defaulting to dtype='d'.
+
+        coo_matrix((data, (i, j)), [shape=(M, N)])
+            to construct from three arrays:
+                1. data[:]   the entries of the matrix, in any order
+                2. i[:]      the row indices of the matrix entries
+                3. j[:]      the column indices of the matrix entries
+
+            Where ``A[i[k], j[k]] = data[k]``.  When shape is not
+            specified, it is inferred from the index arrays
+
+    Attributes
+    ----------
+    dtype : dtype
+        Data type of the matrix
+    shape : 2-tuple
+        Shape of the matrix
+    ndim : int
+        Number of dimensions (this is always 2)
+    nnz
+        Number of stored values, including explicit zeros
+    data
+        COO format data array of the matrix
+    row
+        COO format row index array of the matrix
+    col
+        COO format column index array of the matrix
+
+    Notes
+    -----
+
+    Sparse matrices can be used in arithmetic operations: they support
+    addition, subtraction, multiplication, division, and matrix power.
+
+    Advantages of the COO format
+        - facilitates fast conversion among sparse formats
+        - permits duplicate entries (see example)
+        - very fast conversion to and from CSR/CSC formats
+
+    Disadvantages of the COO format
+        - does not directly support:
+            + arithmetic operations
+            + slicing
+
+    Intended Usage
+        - COO is a fast format for constructing sparse matrices
+        - Once a matrix has been constructed, convert to CSR or
+          CSC format for fast arithmetic and matrix vector operations
+        - By default when converting to CSR or CSC format, duplicate (i,j)
+          entries will be summed together.  This facilitates efficient
+          construction of finite element matrices and the like. (see example)
+
+    Examples
+    --------
+
+    >>> # Constructing an empty matrix
+    >>> import numpy as np
+    >>> from scipy.sparse import coo_matrix
+    >>> coo_matrix((3, 4), dtype=np.int8).toarray()
+    array([[0, 0, 0, 0],
+           [0, 0, 0, 0],
+           [0, 0, 0, 0]], dtype=int8)
+
+    >>> # Constructing a matrix using ijv format
+    >>> row  = np.array([0, 3, 1, 0])
+    >>> col  = np.array([0, 3, 1, 2])
+    >>> data = np.array([4, 5, 7, 9])
+    >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
+    array([[4, 0, 9, 0],
+           [0, 7, 0, 0],
+           [0, 0, 0, 0],
+           [0, 0, 0, 5]])
+
+    >>> # Constructing a matrix with duplicate indices
+    >>> row  = np.array([0, 0, 1, 3, 1, 0, 0])
+    >>> col  = np.array([0, 2, 1, 3, 1, 0, 0])
+    >>> data = np.array([1, 1, 1, 1, 1, 1, 1])
+    >>> coo = coo_matrix((data, (row, col)), shape=(4, 4))
+    >>> # Duplicate indices are maintained until implicitly or explicitly summed
+    >>> np.max(coo.data)
+    1
+    >>> coo.toarray()
+    array([[3, 0, 1, 0],
+           [0, 2, 0, 0],
+           [0, 0, 0, 0],
+           [0, 0, 0, 1]])
+
+    """
+    format = 'coo'
+
+    def __init__(self, arg1, shape=None, dtype=None, copy=False):
+        _data_matrix.__init__(self)
+
+        if isinstance(arg1, tuple):
+            if isshape(arg1):
+                M, N = arg1
+                self._shape = check_shape((M, N))
+                idx_dtype = get_index_dtype(maxval=max(M, N))
+                data_dtype = getdtype(dtype, default=float)
+                self.row = np.array([], dtype=idx_dtype)
+                self.col = np.array([], dtype=idx_dtype)
+                self.data = np.array([], dtype=data_dtype)
+                self.has_canonical_format = True
+            else:
+                try:
+                    obj, (row, col) = arg1
+                except (TypeError, ValueError) as e:
+                    raise TypeError('invalid input format') from e
+
+                if shape is None:
+                    if len(row) == 0 or len(col) == 0:
+                        raise ValueError('cannot infer dimensions from zero '
+                                         'sized index arrays')
+                    M = operator.index(np.max(row)) + 1
+                    N = operator.index(np.max(col)) + 1
+                    self._shape = check_shape((M, N))
+                else:
+                    # Use 2 steps to ensure shape has length 2.
+                    M, N = shape
+                    self._shape = check_shape((M, N))
+
+                idx_dtype = get_index_dtype(maxval=max(self.shape))
+                self.row = np.array(row, copy=copy, dtype=idx_dtype)
+                self.col = np.array(col, copy=copy, dtype=idx_dtype)
+                self.data = getdata(obj, copy=copy, dtype=dtype)
+                self.has_canonical_format = False
+        else:
+            if isspmatrix(arg1):
+                if isspmatrix_coo(arg1) and copy:
+                    self.row = arg1.row.copy()
+                    self.col = arg1.col.copy()
+                    self.data = arg1.data.copy()
+                    self._shape = check_shape(arg1.shape)
+                else:
+                    coo = arg1.tocoo()
+                    self.row = coo.row
+                    self.col = coo.col
+                    self.data = coo.data
+                    self._shape = check_shape(coo.shape)
+                self.has_canonical_format = False
+            else:
+                #dense argument
+                M = np.atleast_2d(np.asarray(arg1))
+
+                if M.ndim != 2:
+                    raise TypeError('expected dimension <= 2 array or matrix')
+
+                self._shape = check_shape(M.shape)
+                if shape is not None:
+                    if check_shape(shape) != self._shape:
+                        raise ValueError('inconsistent shapes: %s != %s' %
+                                         (shape, self._shape))
+
+                self.row, self.col = M.nonzero()
+                self.data = M[self.row, self.col]
+                self.has_canonical_format = True
+
+        if dtype is not None:
+            self.data = self.data.astype(dtype, copy=False)
+
+        self._check()
+
+    def reshape(self, *args, **kwargs):
+        shape = check_shape(args, self.shape)
+        order, copy = check_reshape_kwargs(kwargs)
+
+        # Return early if reshape is not required
+        if shape == self.shape:
+            if copy:
+                return self.copy()
+            else:
+                return self
+
+        nrows, ncols = self.shape
+
+        if order == 'C':
+            # Upcast to avoid overflows: the coo_matrix constructor
+            # below will downcast the results to a smaller dtype, if
+            # possible.
+            dtype = get_index_dtype(maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1)))
+
+            flat_indices = np.multiply(ncols, self.row, dtype=dtype) + self.col
+            new_row, new_col = divmod(flat_indices, shape[1])
+        elif order == 'F':
+            dtype = get_index_dtype(maxval=(nrows * max(0, ncols - 1) + max(0, nrows - 1)))
+
+            flat_indices = np.multiply(nrows, self.col, dtype=dtype) + self.row
+            new_col, new_row = divmod(flat_indices, shape[0])
+        else:
+            raise ValueError("'order' must be 'C' or 'F'")
+
+        # Handle copy here rather than passing on to the constructor so that no
+        # copy will be made of new_row and new_col regardless
+        if copy:
+            new_data = self.data.copy()
+        else:
+            new_data = self.data
+
+        return self.__class__((new_data, (new_row, new_col)),
+                              shape=shape, copy=False)
+
+    reshape.__doc__ = spmatrix.reshape.__doc__
+
+    def getnnz(self, axis=None):
+        if axis is None:
+            nnz = len(self.data)
+            if nnz != len(self.row) or nnz != len(self.col):
+                raise ValueError('row, column, and data array must all be the '
+                                 'same length')
+
+            if self.data.ndim != 1 or self.row.ndim != 1 or \
+                    self.col.ndim != 1:
+                raise ValueError('row, column, and data arrays must be 1-D')
+
+            return int(nnz)
+
+        if axis < 0:
+            axis += 2
+        if axis == 0:
+            return np.bincount(downcast_intp_index(self.col),
+                               minlength=self.shape[1])
+        elif axis == 1:
+            return np.bincount(downcast_intp_index(self.row),
+                               minlength=self.shape[0])
+        else:
+            raise ValueError('axis out of bounds')
+
+    getnnz.__doc__ = spmatrix.getnnz.__doc__
+
+    def _check(self):
+        """ Checks data structure for consistency """
+
+        # index arrays should have integer data types
+        if self.row.dtype.kind != 'i':
+            warn("row index array has non-integer dtype (%s)  "
+                    % self.row.dtype.name)
+        if self.col.dtype.kind != 'i':
+            warn("col index array has non-integer dtype (%s) "
+                    % self.col.dtype.name)
+
+        idx_dtype = get_index_dtype(maxval=max(self.shape))
+        self.row = np.asarray(self.row, dtype=idx_dtype)
+        self.col = np.asarray(self.col, dtype=idx_dtype)
+        self.data = to_native(self.data)
+
+        if self.nnz > 0:
+            if self.row.max() >= self.shape[0]:
+                raise ValueError('row index exceeds matrix dimensions')
+            if self.col.max() >= self.shape[1]:
+                raise ValueError('column index exceeds matrix dimensions')
+            if self.row.min() < 0:
+                raise ValueError('negative row index found')
+            if self.col.min() < 0:
+                raise ValueError('negative column index found')
+
+    def transpose(self, axes=None, copy=False):
+        if axes is not None:
+            raise ValueError(("Sparse matrices do not support "
+                              "an 'axes' parameter because swapping "
+                              "dimensions is the only logical permutation."))
+
+        M, N = self.shape
+        return self.__class__((self.data, (self.col, self.row)),
+                              shape=(N, M), copy=copy)
+
+    transpose.__doc__ = spmatrix.transpose.__doc__
+
+    def resize(self, *shape):
+        shape = check_shape(shape)
+        new_M, new_N = shape
+        M, N = self.shape
+
+        if new_M < M or new_N < N:
+            mask = np.logical_and(self.row < new_M, self.col < new_N)
+            if not mask.all():
+                self.row = self.row[mask]
+                self.col = self.col[mask]
+                self.data = self.data[mask]
+
+        self._shape = shape
+
+    resize.__doc__ = spmatrix.resize.__doc__
+
+    def toarray(self, order=None, out=None):
+        """See the docstring for `spmatrix.toarray`."""
+        B = self._process_toarray_args(order, out)
+        fortran = int(B.flags.f_contiguous)
+        if not fortran and not B.flags.c_contiguous:
+            raise ValueError("Output array must be C or F contiguous")
+        M,N = self.shape
+        coo_todense(M, N, self.nnz, self.row, self.col, self.data,
+                    B.ravel('A'), fortran)
+        return B
+
+    def tocsc(self, copy=False):
+        """Convert this matrix to Compressed Sparse Column format
+
+        Duplicate entries will be summed together.
+
+        Examples
+        --------
+        >>> from numpy import array
+        >>> from scipy.sparse import coo_matrix
+        >>> row  = array([0, 0, 1, 3, 1, 0, 0])
+        >>> col  = array([0, 2, 1, 3, 1, 0, 0])
+        >>> data = array([1, 1, 1, 1, 1, 1, 1])
+        >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc()
+        >>> A.toarray()
+        array([[3, 0, 1, 0],
+               [0, 2, 0, 0],
+               [0, 0, 0, 0],
+               [0, 0, 0, 1]])
+
+        """
+        if self.nnz == 0:
+            return self._csc_container(self.shape, dtype=self.dtype)
+        else:
+            M,N = self.shape
+            idx_dtype = get_index_dtype((self.col, self.row),
+                                        maxval=max(self.nnz, M))
+            row = self.row.astype(idx_dtype, copy=False)
+            col = self.col.astype(idx_dtype, copy=False)
+
+            indptr = np.empty(N + 1, dtype=idx_dtype)
+            indices = np.empty_like(row, dtype=idx_dtype)
+            data = np.empty_like(self.data, dtype=upcast(self.dtype))
+
+            coo_tocsr(N, M, self.nnz, col, row, self.data,
+                      indptr, indices, data)
+
+            x = self._csc_container((data, indices, indptr), shape=self.shape)
+            if not self.has_canonical_format:
+                x.sum_duplicates()
+            return x
+
+    def tocsr(self, copy=False):
+        """Convert this matrix to Compressed Sparse Row format
+
+        Duplicate entries will be summed together.
+
+        Examples
+        --------
+        >>> from numpy import array
+        >>> from scipy.sparse import coo_matrix
+        >>> row  = array([0, 0, 1, 3, 1, 0, 0])
+        >>> col  = array([0, 2, 1, 3, 1, 0, 0])
+        >>> data = array([1, 1, 1, 1, 1, 1, 1])
+        >>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr()
+        >>> A.toarray()
+        array([[3, 0, 1, 0],
+               [0, 2, 0, 0],
+               [0, 0, 0, 0],
+               [0, 0, 0, 1]])
+
+        """
+        if self.nnz == 0:
+            return self._csr_container(self.shape, dtype=self.dtype)
+        else:
+            M,N = self.shape
+            idx_dtype = get_index_dtype((self.row, self.col),
+                                        maxval=max(self.nnz, N))
+            row = self.row.astype(idx_dtype, copy=False)
+            col = self.col.astype(idx_dtype, copy=False)
+
+            indptr = np.empty(M + 1, dtype=idx_dtype)
+            indices = np.empty_like(col, dtype=idx_dtype)
+            data = np.empty_like(self.data, dtype=upcast(self.dtype))
+
+            coo_tocsr(M, N, self.nnz, row, col, self.data,
+                      indptr, indices, data)
+
+            x = self._csr_container((data, indices, indptr), shape=self.shape)
+            if not self.has_canonical_format:
+                x.sum_duplicates()
+            return x
+
+    def tocoo(self, copy=False):
+        if copy:
+            return self.copy()
+        else:
+            return self
+
+    tocoo.__doc__ = spmatrix.tocoo.__doc__
+
+    def todia(self, copy=False):
+        self.sum_duplicates()
+        ks = self.col - self.row  # the diagonal for each nonzero
+        diags, diag_idx = np.unique(ks, return_inverse=True)
+
+        if len(diags) > 100:
+            # probably undesired, should todia() have a maxdiags parameter?
+            warn("Constructing a DIA matrix with %d diagonals "
+                 "is inefficient" % len(diags), SparseEfficiencyWarning)
+
+        #initialize and fill in data array
+        if self.data.size == 0:
+            data = np.zeros((0, 0), dtype=self.dtype)
+        else:
+            data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
+            data[diag_idx, self.col] = self.data
+
+        return self._dia_container((data, diags), shape=self.shape)
+
+    todia.__doc__ = spmatrix.todia.__doc__
+
+    def todok(self, copy=False):
+        self.sum_duplicates()
+        dok = self._dok_container((self.shape), dtype=self.dtype)
+        dok._update(zip(zip(self.row,self.col),self.data))
+
+        return dok
+
+    todok.__doc__ = spmatrix.todok.__doc__
+
+    def diagonal(self, k=0):
+        rows, cols = self.shape
+        if k <= -rows or k >= cols:
+            return np.empty(0, dtype=self.data.dtype)
+        diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
+                        dtype=self.dtype)
+        diag_mask = (self.row + k) == self.col
+
+        if self.has_canonical_format:
+            row = self.row[diag_mask]
+            data = self.data[diag_mask]
+        else:
+            row, _, data = self._sum_duplicates(self.row[diag_mask],
+                                                self.col[diag_mask],
+                                                self.data[diag_mask])
+        diag[row + min(k, 0)] = data
+
+        return diag
+
+    diagonal.__doc__ = _data_matrix.diagonal.__doc__
+
+    def _setdiag(self, values, k):
+        M, N = self.shape
+        if values.ndim and not len(values):
+            return
+        idx_dtype = self.row.dtype
+
+        # Determine which triples to keep and where to put the new ones.
+        full_keep = self.col - self.row != k
+        if k < 0:
+            max_index = min(M+k, N)
+            if values.ndim:
+                max_index = min(max_index, len(values))
+            keep = np.logical_or(full_keep, self.col >= max_index)
+            new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
+            new_col = np.arange(max_index, dtype=idx_dtype)
+        else:
+            max_index = min(M, N-k)
+            if values.ndim:
+                max_index = min(max_index, len(values))
+            keep = np.logical_or(full_keep, self.row >= max_index)
+            new_row = np.arange(max_index, dtype=idx_dtype)
+            new_col = np.arange(k, k + max_index, dtype=idx_dtype)
+
+        # Define the array of data consisting of the entries to be added.
+        if values.ndim:
+            new_data = values[:max_index]
+        else:
+            new_data = np.empty(max_index, dtype=self.dtype)
+            new_data[:] = values
+
+        # Update the internal structure.
+        self.row = np.concatenate((self.row[keep], new_row))
+        self.col = np.concatenate((self.col[keep], new_col))
+        self.data = np.concatenate((self.data[keep], new_data))
+        self.has_canonical_format = False
+
+    # needed by _data_matrix
+    def _with_data(self,data,copy=True):
+        """Returns a matrix with the same sparsity structure as self,
+        but with different data.  By default the index arrays
+        (i.e. .row and .col) are copied.
+        """
+        if copy:
+            return self.__class__((data, (self.row.copy(), self.col.copy())),
+                                   shape=self.shape, dtype=data.dtype)
+        else:
+            return self.__class__((data, (self.row, self.col)),
+                                   shape=self.shape, dtype=data.dtype)
+
+    def sum_duplicates(self):
+        """Eliminate duplicate matrix entries by adding them together
+
+        This is an *in place* operation
+        """
+        if self.has_canonical_format:
+            return
+        summed = self._sum_duplicates(self.row, self.col, self.data)
+        self.row, self.col, self.data = summed
+        self.has_canonical_format = True
+
+    def _sum_duplicates(self, row, col, data):
+        # Assumes (data, row, col) not in canonical format.
+        if len(data) == 0:
+            return row, col, data
+        order = np.lexsort((row, col))
+        row = row[order]
+        col = col[order]
+        data = data[order]
+        unique_mask = ((row[1:] != row[:-1]) |
+                       (col[1:] != col[:-1]))
+        unique_mask = np.append(True, unique_mask)
+        row = row[unique_mask]
+        col = col[unique_mask]
+        unique_inds, = np.nonzero(unique_mask)
+        data = np.add.reduceat(data, unique_inds, dtype=self.dtype)
+        return row, col, data
+
+    def eliminate_zeros(self):
+        """Remove zero entries from the matrix
+
+        This is an *in place* operation
+        """
+        mask = self.data != 0
+        self.data = self.data[mask]
+        self.row = self.row[mask]
+        self.col = self.col[mask]
+
+    #######################
+    # Arithmetic handlers #
+    #######################
+
+    def _add_dense(self, other):
+        if other.shape != self.shape:
+            raise ValueError('Incompatible shapes ({} and {})'
+                             .format(self.shape, other.shape))
+        dtype = upcast_char(self.dtype.char, other.dtype.char)
+        result = np.array(other, dtype=dtype, copy=True)
+        fortran = int(result.flags.f_contiguous)
+        M, N = self.shape
+        coo_todense(M, N, self.nnz, self.row, self.col, self.data,
+                    result.ravel('A'), fortran)
+        return self._container(result, copy=False)
+
+    def _mul_vector(self, other):
+        #output array
+        result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
+                                                            other.dtype.char))
+        coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
+        return result
+
+    def _mul_multivector(self, other):
+        result = np.zeros((other.shape[1], self.shape[0]),
+                          dtype=upcast_char(self.dtype.char, other.dtype.char))
+        for i, col in enumerate(other.T):
+            coo_matvec(self.nnz, self.row, self.col, self.data, col, result[i])
+        return result.T.view(type=type(other))
+
+
+def isspmatrix_coo(x):
+    """Is x of coo_matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a coo matrix
+
+    Returns
+    -------
+    bool
+        True if x is a coo matrix, False otherwise
+
+    Examples
+    --------
+    >>> from scipy.sparse import coo_matrix, isspmatrix_coo
+    >>> isspmatrix_coo(coo_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import coo_matrix, csr_matrix, isspmatrix_coo
+    >>> isspmatrix_coo(csr_matrix([[5]]))
+    False
+    """
+    from ._arrays import coo_array
+    return isinstance(x, coo_matrix) or isinstance(x, coo_array)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_csc.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_csc.py
new file mode 100644
index 00000000..a86b2202
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_csc.py
@@ -0,0 +1,260 @@
+"""Compressed Sparse Column matrix format"""
+__docformat__ = "restructuredtext en"
+
+__all__ = ['csc_matrix', 'isspmatrix_csc']
+
+
+import numpy as np
+
+from ._base import spmatrix
+from ._sparsetools import csc_tocsr, expandptr
+from ._sputils import upcast, get_index_dtype
+
+from ._compressed import _cs_matrix
+
+
+class csc_matrix(_cs_matrix):
+    """
+    Compressed Sparse Column matrix
+
+    This can be instantiated in several ways:
+
+        csc_matrix(D)
+            with a dense matrix or rank-2 ndarray D
+
+        csc_matrix(S)
+            with another sparse matrix S (equivalent to S.tocsc())
+
+        csc_matrix((M, N), [dtype])
+            to construct an empty matrix with shape (M, N)
+            dtype is optional, defaulting to dtype='d'.
+
+        csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
+            where ``data``, ``row_ind`` and ``col_ind`` satisfy the
+            relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
+
+        csc_matrix((data, indices, indptr), [shape=(M, N)])
+            is the standard CSC representation where the row indices for
+            column i are stored in ``indices[indptr[i]:indptr[i+1]]``
+            and their corresponding values are stored in
+            ``data[indptr[i]:indptr[i+1]]``.  If the shape parameter is
+            not supplied, the matrix dimensions are inferred from
+            the index arrays.
+
+    Attributes
+    ----------
+    dtype : dtype
+        Data type of the matrix
+    shape : 2-tuple
+        Shape of the matrix
+    ndim : int
+        Number of dimensions (this is always 2)
+    nnz
+        Number of stored values, including explicit zeros
+    data
+        Data array of the matrix
+    indices
+        CSC format index array
+    indptr
+        CSC format index pointer array
+    has_sorted_indices
+        Whether indices are sorted
+
+    Notes
+    -----
+
+    Sparse matrices can be used in arithmetic operations: they support
+    addition, subtraction, multiplication, division, and matrix power.
+
+    Advantages of the CSC format
+        - efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
+        - efficient column slicing
+        - fast matrix vector products (CSR, BSR may be faster)
+
+    Disadvantages of the CSC format
+      - slow row slicing operations (consider CSR)
+      - changes to the sparsity structure are expensive (consider LIL or DOK)
+
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> csc_matrix((3, 4), dtype=np.int8).toarray()
+    array([[0, 0, 0, 0],
+           [0, 0, 0, 0],
+           [0, 0, 0, 0]], dtype=int8)
+
+    >>> row = np.array([0, 2, 2, 0, 1, 2])
+    >>> col = np.array([0, 0, 1, 2, 2, 2])
+    >>> data = np.array([1, 2, 3, 4, 5, 6])
+    >>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
+    array([[1, 0, 4],
+           [0, 0, 5],
+           [2, 3, 6]])
+
+    >>> indptr = np.array([0, 2, 3, 6])
+    >>> indices = np.array([0, 2, 2, 0, 1, 2])
+    >>> data = np.array([1, 2, 3, 4, 5, 6])
+    >>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
+    array([[1, 0, 4],
+           [0, 0, 5],
+           [2, 3, 6]])
+
+    """
+    format = 'csc'
+
+    def transpose(self, axes=None, copy=False):
+        if axes is not None:
+            raise ValueError(("Sparse matrices do not support "
+                              "an 'axes' parameter because swapping "
+                              "dimensions is the only logical permutation."))
+
+        M, N = self.shape
+
+        return self._csr_container((self.data, self.indices,
+                                    self.indptr), (N, M), copy=copy)
+
+    transpose.__doc__ = spmatrix.transpose.__doc__
+
+    def __iter__(self):
+        yield from self.tocsr()
+
+    def tocsc(self, copy=False):
+        if copy:
+            return self.copy()
+        else:
+            return self
+
+    tocsc.__doc__ = spmatrix.tocsc.__doc__
+
+    def tocsr(self, copy=False):
+        M,N = self.shape
+        idx_dtype = get_index_dtype((self.indptr, self.indices),
+                                    maxval=max(self.nnz, N))
+        indptr = np.empty(M + 1, dtype=idx_dtype)
+        indices = np.empty(self.nnz, dtype=idx_dtype)
+        data = np.empty(self.nnz, dtype=upcast(self.dtype))
+
+        csc_tocsr(M, N,
+                  self.indptr.astype(idx_dtype),
+                  self.indices.astype(idx_dtype),
+                  self.data,
+                  indptr,
+                  indices,
+                  data)
+
+        A = self._csr_container(
+            (data, indices, indptr),
+            shape=self.shape, copy=False
+        )
+        A.has_sorted_indices = True
+        return A
+
+    tocsr.__doc__ = spmatrix.tocsr.__doc__
+
+    def nonzero(self):
+        # CSC can't use _cs_matrix's .nonzero method because it
+        # returns the indices sorted for self transposed.
+
+        # Get row and col indices, from _cs_matrix.tocoo
+        major_dim, minor_dim = self._swap(self.shape)
+        minor_indices = self.indices
+        major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
+        expandptr(major_dim, self.indptr, major_indices)
+        row, col = self._swap((major_indices, minor_indices))
+
+        # Remove explicit zeros
+        nz_mask = self.data != 0
+        row = row[nz_mask]
+        col = col[nz_mask]
+
+        # Sort them to be in C-style order
+        ind = np.argsort(row, kind='mergesort')
+        row = row[ind]
+        col = col[ind]
+
+        return row, col
+
+    nonzero.__doc__ = _cs_matrix.nonzero.__doc__
+
+    def getrow(self, i):
+        """Returns a copy of row i of the matrix, as a (1 x n)
+        CSR matrix (row vector).
+        """
+        M, N = self.shape
+        i = int(i)
+        if i < 0:
+            i += M
+        if i < 0 or i >= M:
+            raise IndexError('index (%d) out of range' % i)
+        return self._get_submatrix(minor=i).tocsr()
+
+    def getcol(self, i):
+        """Returns a copy of column i of the matrix, as a (m x 1)
+        CSC matrix (column vector).
+        """
+        M, N = self.shape
+        i = int(i)
+        if i < 0:
+            i += N
+        if i < 0 or i >= N:
+            raise IndexError('index (%d) out of range' % i)
+        return self._get_submatrix(major=i, copy=True)
+
+    def _get_intXarray(self, row, col):
+        return self._major_index_fancy(col)._get_submatrix(minor=row)
+
+    def _get_intXslice(self, row, col):
+        if col.step in (1, None):
+            return self._get_submatrix(major=col, minor=row, copy=True)
+        return self._major_slice(col)._get_submatrix(minor=row)
+
+    def _get_sliceXint(self, row, col):
+        if row.step in (1, None):
+            return self._get_submatrix(major=col, minor=row, copy=True)
+        return self._get_submatrix(major=col)._minor_slice(row)
+
+    def _get_sliceXarray(self, row, col):
+        return self._major_index_fancy(col)._minor_slice(row)
+
+    def _get_arrayXint(self, row, col):
+        return self._get_submatrix(major=col)._minor_index_fancy(row)
+
+    def _get_arrayXslice(self, row, col):
+        return self._major_slice(col)._minor_index_fancy(row)
+
+    # these functions are used by the parent class (_cs_matrix)
+    # to remove redudancy between csc_matrix and csr_matrix
+    def _swap(self, x):
+        """swap the members of x if this is a column-oriented matrix
+        """
+        return x[1], x[0]
+
+
+def isspmatrix_csc(x):
+    """Is x of csc_matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a csc matrix
+
+    Returns
+    -------
+    bool
+        True if x is a csc matrix, False otherwise
+
+    Examples
+    --------
+    >>> from scipy.sparse import csc_matrix, isspmatrix_csc
+    >>> isspmatrix_csc(csc_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
+    >>> isspmatrix_csc(csr_matrix([[5]]))
+    False
+    """
+    from ._arrays import csc_array
+    return isinstance(x, csc_matrix) or isinstance(x, csc_array)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_csr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_csr.py
new file mode 100644
index 00000000..7581ddf9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_csr.py
@@ -0,0 +1,357 @@
+"""Compressed Sparse Row matrix format"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['csr_matrix', 'isspmatrix_csr']
+
+import numpy as np
+
+from ._base import spmatrix
+from ._sparsetools import (csr_tocsc, csr_tobsr, csr_count_blocks,
+                           get_csr_submatrix)
+from ._sputils import upcast, get_index_dtype
+
+from ._compressed import _cs_matrix
+
+
+class csr_matrix(_cs_matrix):
+    """
+    Compressed Sparse Row matrix
+
+    This can be instantiated in several ways:
+        csr_matrix(D)
+            with a dense matrix or rank-2 ndarray D
+
+        csr_matrix(S)
+            with another sparse matrix S (equivalent to S.tocsr())
+
+        csr_matrix((M, N), [dtype])
+            to construct an empty matrix with shape (M, N)
+            dtype is optional, defaulting to dtype='d'.
+
+        csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
+            where ``data``, ``row_ind`` and ``col_ind`` satisfy the
+            relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
+
+        csr_matrix((data, indices, indptr), [shape=(M, N)])
+            is the standard CSR representation where the column indices for
+            row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
+            corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
+            If the shape parameter is not supplied, the matrix dimensions
+            are inferred from the index arrays.
+
+    Attributes
+    ----------
+    dtype : dtype
+        Data type of the matrix
+    shape : 2-tuple
+        Shape of the matrix
+    ndim : int
+        Number of dimensions (this is always 2)
+    nnz
+        Number of stored values, including explicit zeros
+    data
+        CSR format data array of the matrix
+    indices
+        CSR format index array of the matrix
+    indptr
+        CSR format index pointer array of the matrix
+    has_sorted_indices
+        Whether indices are sorted
+
+    Notes
+    -----
+
+    Sparse matrices can be used in arithmetic operations: they support
+    addition, subtraction, multiplication, division, and matrix power.
+
+    Advantages of the CSR format
+      - efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
+      - efficient row slicing
+      - fast matrix vector products
+
+    Disadvantages of the CSR format
+      - slow column slicing operations (consider CSC)
+      - changes to the sparsity structure are expensive (consider LIL or DOK)
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> from scipy.sparse import csr_matrix
+    >>> csr_matrix((3, 4), dtype=np.int8).toarray()
+    array([[0, 0, 0, 0],
+           [0, 0, 0, 0],
+           [0, 0, 0, 0]], dtype=int8)
+
+    >>> row = np.array([0, 0, 1, 2, 2, 2])
+    >>> col = np.array([0, 2, 2, 0, 1, 2])
+    >>> data = np.array([1, 2, 3, 4, 5, 6])
+    >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
+    array([[1, 0, 2],
+           [0, 0, 3],
+           [4, 5, 6]])
+
+    >>> indptr = np.array([0, 2, 3, 6])
+    >>> indices = np.array([0, 2, 2, 0, 1, 2])
+    >>> data = np.array([1, 2, 3, 4, 5, 6])
+    >>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
+    array([[1, 0, 2],
+           [0, 0, 3],
+           [4, 5, 6]])
+
+    Duplicate entries are summed together:
+
+    >>> row = np.array([0, 1, 2, 0])
+    >>> col = np.array([0, 1, 1, 0])
+    >>> data = np.array([1, 2, 4, 8])
+    >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
+    array([[9, 0, 0],
+           [0, 2, 0],
+           [0, 4, 0]])
+
+    As an example of how to construct a CSR matrix incrementally,
+    the following snippet builds a term-document matrix from texts:
+
+    >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
+    >>> indptr = [0]
+    >>> indices = []
+    >>> data = []
+    >>> vocabulary = {}
+    >>> for d in docs:
+    ...     for term in d:
+    ...         index = vocabulary.setdefault(term, len(vocabulary))
+    ...         indices.append(index)
+    ...         data.append(1)
+    ...     indptr.append(len(indices))
+    ...
+    >>> csr_matrix((data, indices, indptr), dtype=int).toarray()
+    array([[2, 1, 0, 0],
+           [0, 1, 1, 1]])
+
+    """
+    format = 'csr'
+
+    def transpose(self, axes=None, copy=False):
+        if axes is not None:
+            raise ValueError(("Sparse matrices do not support "
+                              "an 'axes' parameter because swapping "
+                              "dimensions is the only logical permutation."))
+
+        M, N = self.shape
+        return self._csc_container((self.data, self.indices,
+                                    self.indptr), shape=(N, M), copy=copy)
+
+    transpose.__doc__ = spmatrix.transpose.__doc__
+
+    def tolil(self, copy=False):
+        lil = self._lil_container(self.shape, dtype=self.dtype)
+
+        self.sum_duplicates()
+        ptr,ind,dat = self.indptr,self.indices,self.data
+        rows, data = lil.rows, lil.data
+
+        for n in range(self.shape[0]):
+            start = ptr[n]
+            end = ptr[n+1]
+            rows[n] = ind[start:end].tolist()
+            data[n] = dat[start:end].tolist()
+
+        return lil
+
+    tolil.__doc__ = spmatrix.tolil.__doc__
+
+    def tocsr(self, copy=False):
+        if copy:
+            return self.copy()
+        else:
+            return self
+
+    tocsr.__doc__ = spmatrix.tocsr.__doc__
+
+    def tocsc(self, copy=False):
+        idx_dtype = get_index_dtype((self.indptr, self.indices),
+                                    maxval=max(self.nnz, self.shape[0]))
+        indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)
+        indices = np.empty(self.nnz, dtype=idx_dtype)
+        data = np.empty(self.nnz, dtype=upcast(self.dtype))
+
+        csr_tocsc(self.shape[0], self.shape[1],
+                  self.indptr.astype(idx_dtype),
+                  self.indices.astype(idx_dtype),
+                  self.data,
+                  indptr,
+                  indices,
+                  data)
+
+        A = self._csc_container((data, indices, indptr), shape=self.shape)
+        A.has_sorted_indices = True
+        return A
+
+    tocsc.__doc__ = spmatrix.tocsc.__doc__
+
+    def tobsr(self, blocksize=None, copy=True):
+        if blocksize is None:
+            from ._spfuncs import estimate_blocksize
+            return self.tobsr(blocksize=estimate_blocksize(self))
+
+        elif blocksize == (1,1):
+            arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
+            return self._bsr_container(arg1, shape=self.shape, copy=copy)
+
+        else:
+            R,C = blocksize
+            M,N = self.shape
+
+            if R < 1 or C < 1 or M % R != 0 or N % C != 0:
+                raise ValueError('invalid blocksize %s' % blocksize)
+
+            blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
+
+            idx_dtype = get_index_dtype((self.indptr, self.indices),
+                                        maxval=max(N//C, blks))
+            indptr = np.empty(M//R+1, dtype=idx_dtype)
+            indices = np.empty(blks, dtype=idx_dtype)
+            data = np.zeros((blks,R,C), dtype=self.dtype)
+
+            csr_tobsr(M, N, R, C,
+                      self.indptr.astype(idx_dtype),
+                      self.indices.astype(idx_dtype),
+                      self.data,
+                      indptr, indices, data.ravel())
+
+            return self._bsr_container(
+                (data, indices, indptr), shape=self.shape
+            )
+
+    tobsr.__doc__ = spmatrix.tobsr.__doc__
+
+    # these functions are used by the parent class (_cs_matrix)
+    # to remove redundancy between csc_matrix and csr_matrix
+    def _swap(self, x):
+        """swap the members of x if this is a column-oriented matrix
+        """
+        return x
+
+    def __iter__(self):
+        indptr = np.zeros(2, dtype=self.indptr.dtype)
+        shape = (1, self.shape[1])
+        i0 = 0
+        for i1 in self.indptr[1:]:
+            indptr[1] = i1 - i0
+            indices = self.indices[i0:i1]
+            data = self.data[i0:i1]
+            yield self.__class__(
+                (data, indices, indptr), shape=shape, copy=True
+            )
+            i0 = i1
+
+    def getrow(self, i):
+        """Returns a copy of row i of the matrix, as a (1 x n)
+        CSR matrix (row vector).
+        """
+        M, N = self.shape
+        i = int(i)
+        if i < 0:
+            i += M
+        if i < 0 or i >= M:
+            raise IndexError('index (%d) out of range' % i)
+        indptr, indices, data = get_csr_submatrix(
+            M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N)
+        return self.__class__((data, indices, indptr), shape=(1, N),
+                              dtype=self.dtype, copy=False)
+
+    def getcol(self, i):
+        """Returns a copy of column i of the matrix, as a (m x 1)
+        CSR matrix (column vector).
+        """
+        M, N = self.shape
+        i = int(i)
+        if i < 0:
+            i += N
+        if i < 0 or i >= N:
+            raise IndexError('index (%d) out of range' % i)
+        indptr, indices, data = get_csr_submatrix(
+            M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1)
+        return self.__class__((data, indices, indptr), shape=(M, 1),
+                              dtype=self.dtype, copy=False)
+
+    def _get_intXarray(self, row, col):
+        return self.getrow(row)._minor_index_fancy(col)
+
+    def _get_intXslice(self, row, col):
+        if col.step in (1, None):
+            return self._get_submatrix(row, col, copy=True)
+        # TODO: uncomment this once it's faster:
+        # return self.getrow(row)._minor_slice(col)
+
+        M, N = self.shape
+        start, stop, stride = col.indices(N)
+
+        ii, jj = self.indptr[row:row+2]
+        row_indices = self.indices[ii:jj]
+        row_data = self.data[ii:jj]
+
+        if stride > 0:
+            ind = (row_indices >= start) & (row_indices < stop)
+        else:
+            ind = (row_indices <= start) & (row_indices > stop)
+
+        if abs(stride) > 1:
+            ind &= (row_indices - start) % stride == 0
+
+        row_indices = (row_indices[ind] - start) // stride
+        row_data = row_data[ind]
+        row_indptr = np.array([0, len(row_indices)])
+
+        if stride < 0:
+            row_data = row_data[::-1]
+            row_indices = abs(row_indices[::-1])
+
+        shape = (1, max(0, int(np.ceil(float(stop - start) / stride))))
+        return self.__class__((row_data, row_indices, row_indptr), shape=shape,
+                              dtype=self.dtype, copy=False)
+
+    def _get_sliceXint(self, row, col):
+        if row.step in (1, None):
+            return self._get_submatrix(row, col, copy=True)
+        return self._major_slice(row)._get_submatrix(minor=col)
+
+    def _get_sliceXarray(self, row, col):
+        return self._major_slice(row)._minor_index_fancy(col)
+
+    def _get_arrayXint(self, row, col):
+        return self._major_index_fancy(row)._get_submatrix(minor=col)
+
+    def _get_arrayXslice(self, row, col):
+        if col.step not in (1, None):
+            col = np.arange(*col.indices(self.shape[1]))
+            return self._get_arrayXarray(row, col)
+        return self._major_index_fancy(row)._get_submatrix(minor=col)
+
+
+def isspmatrix_csr(x):
+    """Is x of csr_matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a csr matrix
+
+    Returns
+    -------
+    bool
+        True if x is a csr matrix, False otherwise
+
+    Examples
+    --------
+    >>> from scipy.sparse import csr_matrix, isspmatrix_csr
+    >>> isspmatrix_csr(csr_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
+    >>> isspmatrix_csr(csc_matrix([[5]]))
+    False
+    """
+    from ._arrays import csr_array
+    return isinstance(x, csr_matrix) or isinstance(x, csr_array)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_data.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_data.py
new file mode 100644
index 00000000..2efa6b96
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_data.py
@@ -0,0 +1,402 @@
+"""Base class for sparse matrice with a .data attribute
+
+    subclasses must provide a _with_data() method that
+    creates a new matrix with the same sparsity pattern
+    as self but with a different data array
+
+"""
+
+import numpy as np
+
+from ._base import spmatrix, _ufuncs_with_fixed_point_at_zero
+from ._sputils import isscalarlike, validateaxis, matrix
+
+__all__ = []
+
+
+# TODO implement all relevant operations
+# use .data.__methods__() instead of /=, *=, etc.
+class _data_matrix(spmatrix):
+    def __init__(self):
+        spmatrix.__init__(self)
+
+    def _get_dtype(self):
+        return self.data.dtype
+
+    def _set_dtype(self, newtype):
+        self.data.dtype = newtype
+    dtype = property(fget=_get_dtype, fset=_set_dtype)
+
+    def _deduped_data(self):
+        if hasattr(self, 'sum_duplicates'):
+            self.sum_duplicates()
+        return self.data
+
+    def __abs__(self):
+        return self._with_data(abs(self._deduped_data()))
+
+    def __round__(self, ndigits=0):
+        return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
+
+    def _real(self):
+        return self._with_data(self.data.real)
+
+    def _imag(self):
+        return self._with_data(self.data.imag)
+
+    def __neg__(self):
+        if self.dtype.kind == 'b':
+            raise NotImplementedError('negating a sparse boolean '
+                                      'matrix is not supported')
+        return self._with_data(-self.data)
+
+    def __imul__(self, other):  # self *= other
+        if isscalarlike(other):
+            self.data *= other
+            return self
+        else:
+            return NotImplemented
+
+    def __itruediv__(self, other):  # self /= other
+        if isscalarlike(other):
+            recip = 1.0 / other
+            self.data *= recip
+            return self
+        else:
+            return NotImplemented
+
+    def astype(self, dtype, casting='unsafe', copy=True):
+        dtype = np.dtype(dtype)
+        if self.dtype != dtype:
+            return self._with_data(
+                self._deduped_data().astype(dtype, casting=casting, copy=copy),
+                copy=copy)
+        elif copy:
+            return self.copy()
+        else:
+            return self
+
+    astype.__doc__ = spmatrix.astype.__doc__
+
+    def conj(self, copy=True):
+        if np.issubdtype(self.dtype, np.complexfloating):
+            return self._with_data(self.data.conj(), copy=copy)
+        elif copy:
+            return self.copy()
+        else:
+            return self
+
+    conj.__doc__ = spmatrix.conj.__doc__
+
+    def copy(self):
+        return self._with_data(self.data.copy(), copy=True)
+
+    copy.__doc__ = spmatrix.copy.__doc__
+
+    def count_nonzero(self):
+        return np.count_nonzero(self._deduped_data())
+
+    count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
+
+    def power(self, n, dtype=None):
+        """
+        This function performs element-wise power.
+
+        Parameters
+        ----------
+        n : n is a scalar
+
+        dtype : If dtype is not specified, the current dtype will be preserved.
+        """
+        if not isscalarlike(n):
+            raise NotImplementedError("input is not scalar")
+
+        data = self._deduped_data()
+        if dtype is not None:
+            data = data.astype(dtype)
+        return self._with_data(data ** n)
+
+    ###########################
+    # Multiplication handlers #
+    ###########################
+
+    def _mul_scalar(self, other):
+        return self._with_data(self.data * other)
+
+
+# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix.
+for npfunc in _ufuncs_with_fixed_point_at_zero:
+    name = npfunc.__name__
+
+    def _create_method(op):
+        def method(self):
+            result = op(self._deduped_data())
+            return self._with_data(result, copy=True)
+
+        method.__doc__ = ("Element-wise %s.\n\n"
+                          "See `numpy.%s` for more information." % (name, name))
+        method.__name__ = name
+
+        return method
+
+    setattr(_data_matrix, name, _create_method(npfunc))
+
+
+def _find_missing_index(ind, n):
+    for k, a in enumerate(ind):
+        if k != a:
+            return k
+
+    k += 1
+    if k < n:
+        return k
+    else:
+        return -1
+
+
+class _minmax_mixin:
+    """Mixin for min and max methods.
+
+    These are not implemented for dia_matrix, hence the separate class.
+    """
+
+    def _min_or_max_axis(self, axis, min_or_max):
+        N = self.shape[axis]
+        if N == 0:
+            raise ValueError("zero-size array to reduction operation")
+        M = self.shape[1 - axis]
+
+        mat = self.tocsc() if axis == 0 else self.tocsr()
+        mat.sum_duplicates()
+
+        major_index, value = mat._minor_reduce(min_or_max)
+        not_full = np.diff(mat.indptr)[major_index] < N
+        value[not_full] = min_or_max(value[not_full], 0)
+
+        mask = value != 0
+        major_index = np.compress(mask, major_index)
+        value = np.compress(mask, value)
+
+        if axis == 0:
+            return self._coo_container(
+                (value, (np.zeros(len(value)), major_index)),
+                dtype=self.dtype, shape=(1, M)
+            )
+        else:
+            return self._coo_container(
+                (value, (major_index, np.zeros(len(value)))),
+                dtype=self.dtype, shape=(M, 1)
+            )
+
+    def _min_or_max(self, axis, out, min_or_max):
+        if out is not None:
+            raise ValueError(("Sparse matrices do not support "
+                              "an 'out' parameter."))
+
+        validateaxis(axis)
+
+        if axis is None:
+            if 0 in self.shape:
+                raise ValueError("zero-size array to reduction operation")
+
+            zero = self.dtype.type(0)
+            if self.nnz == 0:
+                return zero
+            m = min_or_max.reduce(self._deduped_data().ravel())
+            if self.nnz != np.prod(self.shape):
+                m = min_or_max(zero, m)
+            return m
+
+        if axis < 0:
+            axis += 2
+
+        if (axis == 0) or (axis == 1):
+            return self._min_or_max_axis(axis, min_or_max)
+        else:
+            raise ValueError("axis out of range")
+
+    def _arg_min_or_max_axis(self, axis, op, compare):
+        if self.shape[axis] == 0:
+            raise ValueError("Can't apply the operation along a zero-sized "
+                             "dimension.")
+
+        if axis < 0:
+            axis += 2
+
+        zero = self.dtype.type(0)
+
+        mat = self.tocsc() if axis == 0 else self.tocsr()
+        mat.sum_duplicates()
+
+        ret_size, line_size = mat._swap(mat.shape)
+        ret = np.zeros(ret_size, dtype=int)
+
+        nz_lines, = np.nonzero(np.diff(mat.indptr))
+        for i in nz_lines:
+            p, q = mat.indptr[i:i + 2]
+            data = mat.data[p:q]
+            indices = mat.indices[p:q]
+            am = op(data)
+            m = data[am]
+            if compare(m, zero) or q - p == line_size:
+                ret[i] = indices[am]
+            else:
+                zero_ind = _find_missing_index(indices, line_size)
+                if m == zero:
+                    ret[i] = min(am, zero_ind)
+                else:
+                    ret[i] = zero_ind
+
+        if axis == 1:
+            ret = ret.reshape(-1, 1)
+
+        return matrix(ret)
+
+    def _arg_min_or_max(self, axis, out, op, compare):
+        if out is not None:
+            raise ValueError("Sparse matrices do not support "
+                             "an 'out' parameter.")
+
+        validateaxis(axis)
+
+        if axis is None:
+            if 0 in self.shape:
+                raise ValueError("Can't apply the operation to "
+                                 "an empty matrix.")
+
+            if self.nnz == 0:
+                return 0
+            else:
+                zero = self.dtype.type(0)
+                mat = self.tocoo()
+                mat.sum_duplicates()
+                am = op(mat.data)
+                m = mat.data[am]
+
+                if compare(m, zero):
+                    # cast to Python int to avoid overflow
+                    # and RuntimeError
+                    return int(mat.row[am])*mat.shape[1] + int(mat.col[am])
+                else:
+                    size = np.prod(mat.shape)
+                    if size == mat.nnz:
+                        return am
+                    else:
+                        ind = mat.row * mat.shape[1] + mat.col
+                        zero_ind = _find_missing_index(ind, size)
+                        if m == zero:
+                            return min(zero_ind, am)
+                        else:
+                            return zero_ind
+
+        return self._arg_min_or_max_axis(axis, op, compare)
+
+    def max(self, axis=None, out=None):
+        """
+        Return the maximum of the matrix or maximum along an axis.
+        This takes all elements into account, not just the non-zero ones.
+
+        Parameters
+        ----------
+        axis : {-2, -1, 0, 1, None} optional
+            Axis along which the sum is computed. The default is to
+            compute the maximum over all the matrix elements, returning
+            a scalar (i.e., `axis` = `None`).
+
+        out : None, optional
+            This argument is in the signature *solely* for NumPy
+            compatibility reasons. Do not pass in anything except
+            for the default value, as this argument is not used.
+
+        Returns
+        -------
+        amax : coo_matrix or scalar
+            Maximum of `a`. If `axis` is None, the result is a scalar value.
+            If `axis` is given, the result is a sparse.coo_matrix of dimension
+            ``a.ndim - 1``.
+
+        See Also
+        --------
+        min : The minimum value of a sparse matrix along a given axis.
+        numpy.matrix.max : NumPy's implementation of 'max' for matrices
+
+        """
+        return self._min_or_max(axis, out, np.maximum)
+
+    def min(self, axis=None, out=None):
+        """
+        Return the minimum of the matrix or maximum along an axis.
+        This takes all elements into account, not just the non-zero ones.
+
+        Parameters
+        ----------
+        axis : {-2, -1, 0, 1, None} optional
+            Axis along which the sum is computed. The default is to
+            compute the minimum over all the matrix elements, returning
+            a scalar (i.e., `axis` = `None`).
+
+        out : None, optional
+            This argument is in the signature *solely* for NumPy
+            compatibility reasons. Do not pass in anything except for
+            the default value, as this argument is not used.
+
+        Returns
+        -------
+        amin : coo_matrix or scalar
+            Minimum of `a`. If `axis` is None, the result is a scalar value.
+            If `axis` is given, the result is a sparse.coo_matrix of dimension
+            ``a.ndim - 1``.
+
+        See Also
+        --------
+        max : The maximum value of a sparse matrix along a given axis.
+        numpy.matrix.min : NumPy's implementation of 'min' for matrices
+
+        """
+        return self._min_or_max(axis, out, np.minimum)
+
+    def argmax(self, axis=None, out=None):
+        """Return indices of maximum elements along an axis.
+
+        Implicit zero elements are also taken into account. If there are
+        several maximum values, the index of the first occurrence is returned.
+
+        Parameters
+        ----------
+        axis : {-2, -1, 0, 1, None}, optional
+            Axis along which the argmax is computed. If None (default), index
+            of the maximum element in the flatten data is returned.
+        out : None, optional
+            This argument is in the signature *solely* for NumPy
+            compatibility reasons. Do not pass in anything except for
+            the default value, as this argument is not used.
+
+        Returns
+        -------
+        ind : numpy.matrix or int
+            Indices of maximum elements. If matrix, its size along `axis` is 1.
+        """
+        return self._arg_min_or_max(axis, out, np.argmax, np.greater)
+
+    def argmin(self, axis=None, out=None):
+        """Return indices of minimum elements along an axis.
+
+        Implicit zero elements are also taken into account. If there are
+        several minimum values, the index of the first occurrence is returned.
+
+        Parameters
+        ----------
+        axis : {-2, -1, 0, 1, None}, optional
+            Axis along which the argmin is computed. If None (default), index
+            of the minimum element in the flatten data is returned.
+        out : None, optional
+            This argument is in the signature *solely* for NumPy
+            compatibility reasons. Do not pass in anything except for
+            the default value, as this argument is not used.
+
+        Returns
+        -------
+         ind : numpy.matrix or int
+            Indices of minimum elements. If matrix, its size along `axis` is 1.
+        """
+        return self._arg_min_or_max(axis, out, np.argmin, np.less)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_dia.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_dia.py
new file mode 100644
index 00000000..54d9a35a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_dia.py
@@ -0,0 +1,470 @@
+"""Sparse DIAgonal format"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['dia_matrix', 'isspmatrix_dia']
+
+import numpy as np
+
+from ._base import isspmatrix, _formats, spmatrix
+from ._data import _data_matrix
+from ._sputils import (isshape, upcast_char, getdtype, get_index_dtype,
+                       get_sum_dtype, validateaxis, check_shape)
+from ._sparsetools import dia_matvec
+
+
+class dia_matrix(_data_matrix):
+    """Sparse matrix with DIAgonal storage
+
+    This can be instantiated in several ways:
+        dia_matrix(D)
+            with a dense matrix
+
+        dia_matrix(S)
+            with another sparse matrix S (equivalent to S.todia())
+
+        dia_matrix((M, N), [dtype])
+            to construct an empty matrix with shape (M, N),
+            dtype is optional, defaulting to dtype='d'.
+
+        dia_matrix((data, offsets), shape=(M, N))
+            where the ``data[k,:]`` stores the diagonal entries for
+            diagonal ``offsets[k]`` (See example below)
+
+    Attributes
+    ----------
+    dtype : dtype
+        Data type of the matrix
+    shape : 2-tuple
+        Shape of the matrix
+    ndim : int
+        Number of dimensions (this is always 2)
+    nnz
+        Number of stored values, including explicit zeros
+    data
+        DIA format data array of the matrix
+    offsets
+        DIA format offset array of the matrix
+
+    Notes
+    -----
+
+    Sparse matrices can be used in arithmetic operations: they support
+    addition, subtraction, multiplication, division, and matrix power.
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> from scipy.sparse import dia_matrix
+    >>> dia_matrix((3, 4), dtype=np.int8).toarray()
+    array([[0, 0, 0, 0],
+           [0, 0, 0, 0],
+           [0, 0, 0, 0]], dtype=int8)
+
+    >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
+    >>> offsets = np.array([0, -1, 2])
+    >>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
+    array([[1, 0, 3, 0],
+           [1, 2, 0, 4],
+           [0, 2, 3, 0],
+           [0, 0, 3, 4]])
+
+    >>> from scipy.sparse import dia_matrix
+    >>> n = 10
+    >>> ex = np.ones(n)
+    >>> data = np.array([ex, 2 * ex, ex])
+    >>> offsets = np.array([-1, 0, 1])
+    >>> dia_matrix((data, offsets), shape=(n, n)).toarray()
+    array([[2., 1., 0., ..., 0., 0., 0.],
+           [1., 2., 1., ..., 0., 0., 0.],
+           [0., 1., 2., ..., 0., 0., 0.],
+           ...,
+           [0., 0., 0., ..., 2., 1., 0.],
+           [0., 0., 0., ..., 1., 2., 1.],
+           [0., 0., 0., ..., 0., 1., 2.]])
+    """
+    format = 'dia'
+
+    def __init__(self, arg1, shape=None, dtype=None, copy=False):
+        _data_matrix.__init__(self)
+
+        if isspmatrix_dia(arg1):
+            if copy:
+                arg1 = arg1.copy()
+            self.data = arg1.data
+            self.offsets = arg1.offsets
+            self._shape = check_shape(arg1.shape)
+        elif isspmatrix(arg1):
+            if isspmatrix_dia(arg1) and copy:
+                A = arg1.copy()
+            else:
+                A = arg1.todia()
+            self.data = A.data
+            self.offsets = A.offsets
+            self._shape = check_shape(A.shape)
+        elif isinstance(arg1, tuple):
+            if isshape(arg1):
+                # It's a tuple of matrix dimensions (M, N)
+                # create empty matrix
+                self._shape = check_shape(arg1)
+                self.data = np.zeros((0,0), getdtype(dtype, default=float))
+                idx_dtype = get_index_dtype(maxval=max(self.shape))
+                self.offsets = np.zeros((0), dtype=idx_dtype)
+            else:
+                try:
+                    # Try interpreting it as (data, offsets)
+                    data, offsets = arg1
+                except Exception as e:
+                    raise ValueError('unrecognized form for dia_matrix constructor') from e
+                else:
+                    if shape is None:
+                        raise ValueError('expected a shape argument')
+                    self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
+                    self.offsets = np.atleast_1d(np.array(arg1[1],
+                                                          dtype=get_index_dtype(maxval=max(shape)),
+                                                          copy=copy))
+                    self._shape = check_shape(shape)
+        else:
+            #must be dense, convert to COO first, then to DIA
+            try:
+                arg1 = np.asarray(arg1)
+            except Exception as e:
+                raise ValueError("unrecognized form for"
+                        " %s_matrix constructor" % self.format) from e
+            A = self._coo_container(arg1, dtype=dtype, shape=shape).todia()
+            self.data = A.data
+            self.offsets = A.offsets
+            self._shape = check_shape(A.shape)
+
+        if dtype is not None:
+            self.data = self.data.astype(dtype)
+
+        #check format
+        if self.offsets.ndim != 1:
+            raise ValueError('offsets array must have rank 1')
+
+        if self.data.ndim != 2:
+            raise ValueError('data array must have rank 2')
+
+        if self.data.shape[0] != len(self.offsets):
+            raise ValueError('number of diagonals (%d) '
+                    'does not match the number of offsets (%d)'
+                    % (self.data.shape[0], len(self.offsets)))
+
+        if len(np.unique(self.offsets)) != len(self.offsets):
+            raise ValueError('offset array contains duplicate values')
+
+    def __repr__(self):
+        format = _formats[self.getformat()][1]
+        return "<%dx%d sparse matrix of type '%s'\n" \
+               "\twith %d stored elements (%d diagonals) in %s format>" % \
+               (self.shape + (self.dtype.type, self.nnz, self.data.shape[0],
+                              format))
+
+    def _data_mask(self):
+        """Returns a mask of the same shape as self.data, where
+        mask[i,j] is True when data[i,j] corresponds to a stored element."""
+        num_rows, num_cols = self.shape
+        offset_inds = np.arange(self.data.shape[1])
+        row = offset_inds - self.offsets[:,None]
+        mask = (row >= 0)
+        mask &= (row < num_rows)
+        mask &= (offset_inds < num_cols)
+        return mask
+
+    def count_nonzero(self):
+        mask = self._data_mask()
+        return np.count_nonzero(self.data[mask])
+
+    def getnnz(self, axis=None):
+        if axis is not None:
+            raise NotImplementedError("getnnz over an axis is not implemented "
+                                      "for DIA format")
+        M,N = self.shape
+        nnz = 0
+        for k in self.offsets:
+            if k > 0:
+                nnz += min(M,N-k)
+            else:
+                nnz += min(M+k,N)
+        return int(nnz)
+
+    getnnz.__doc__ = spmatrix.getnnz.__doc__
+    count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
+
+    def sum(self, axis=None, dtype=None, out=None):
+        validateaxis(axis)
+
+        if axis is not None and axis < 0:
+            axis += 2
+
+        res_dtype = get_sum_dtype(self.dtype)
+        num_rows, num_cols = self.shape
+        ret = None
+
+        if axis == 0:
+            mask = self._data_mask()
+            x = (self.data * mask).sum(axis=0)
+            if x.shape[0] == num_cols:
+                res = x
+            else:
+                res = np.zeros(num_cols, dtype=x.dtype)
+                res[:x.shape[0]] = x
+            ret = self._ascontainer(res, dtype=res_dtype)
+
+        else:
+            row_sums = np.zeros((num_rows, 1), dtype=res_dtype)
+            one = np.ones(num_cols, dtype=res_dtype)
+            dia_matvec(num_rows, num_cols, len(self.offsets),
+                       self.data.shape[1], self.offsets, self.data, one, row_sums)
+
+            row_sums = self._ascontainer(row_sums)
+
+            if axis is None:
+                return row_sums.sum(dtype=dtype, out=out)
+
+            ret = self._ascontainer(row_sums.sum(axis=axis))
+
+        if out is not None and out.shape != ret.shape:
+            raise ValueError("dimensions do not match")
+
+        return ret.sum(axis=(), dtype=dtype, out=out)
+
+    sum.__doc__ = spmatrix.sum.__doc__
+
+    def _add_sparse(self, other):
+
+        # Check if other is also of type dia_matrix
+        if not isinstance(other, type(self)):
+            # If other is not of type dia_matrix, default to
+            # converting to csr_matrix, as is done in the _add_sparse
+            # method of parent class spmatrix
+            return self.tocsr()._add_sparse(other)
+
+        # The task is to compute m = self + other
+        # Start by making a copy of self, of the datatype
+        # that should result from adding self and other
+        dtype = np.promote_types(self.dtype, other.dtype)
+        m = self.astype(dtype, copy=True)
+
+        # Then, add all the stored diagonals of other.
+        for d in other.offsets:
+            # Check if the diagonal has already been added.
+            if d in m.offsets:
+                # If the diagonal is already there, we need to take
+                # the sum of the existing and the new
+                m.setdiag(m.diagonal(d) + other.diagonal(d), d)
+            else:
+                m.setdiag(other.diagonal(d), d)
+        return m
+
+    def _mul_vector(self, other):
+        x = other
+
+        y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
+                                                       x.dtype.char))
+
+        L = self.data.shape[1]
+
+        M,N = self.shape
+
+        dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel())
+
+        return y
+
+    def _mul_multimatrix(self, other):
+        return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
+
+    def _setdiag(self, values, k=0):
+        M, N = self.shape
+
+        if values.ndim == 0:
+            # broadcast
+            values_n = np.inf
+        else:
+            values_n = len(values)
+
+        if k < 0:
+            n = min(M + k, N, values_n)
+            min_index = 0
+            max_index = n
+        else:
+            n = min(M, N - k, values_n)
+            min_index = k
+            max_index = k + n
+
+        if values.ndim != 0:
+            # allow also longer sequences
+            values = values[:n]
+
+        data_rows, data_cols = self.data.shape
+        if k in self.offsets:
+            if max_index > data_cols:
+                data = np.zeros((data_rows, max_index), dtype=self.data.dtype)
+                data[:, :data_cols] = self.data
+                self.data = data
+            self.data[self.offsets == k, min_index:max_index] = values
+        else:
+            self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
+            m = max(max_index, data_cols)
+            data = np.zeros((data_rows + 1, m), dtype=self.data.dtype)
+            data[:-1, :data_cols] = self.data
+            data[-1, min_index:max_index] = values
+            self.data = data
+
+    def todia(self, copy=False):
+        if copy:
+            return self.copy()
+        else:
+            return self
+
+    todia.__doc__ = spmatrix.todia.__doc__
+
+    def transpose(self, axes=None, copy=False):
+        if axes is not None:
+            raise ValueError(("Sparse matrices do not support "
+                              "an 'axes' parameter because swapping "
+                              "dimensions is the only logical permutation."))
+
+        num_rows, num_cols = self.shape
+        max_dim = max(self.shape)
+
+        # flip diagonal offsets
+        offsets = -self.offsets
+
+        # re-align the data matrix
+        r = np.arange(len(offsets), dtype=np.intc)[:, None]
+        c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None]
+        pad_amount = max(0, max_dim-self.data.shape[1])
+        data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
+                                              dtype=self.data.dtype)))
+        data = data[r, c]
+        return self._dia_container((data, offsets), shape=(
+            num_cols, num_rows), copy=copy)
+
+    transpose.__doc__ = spmatrix.transpose.__doc__
+
+    def diagonal(self, k=0):
+        rows, cols = self.shape
+        if k <= -rows or k >= cols:
+            return np.empty(0, dtype=self.data.dtype)
+        idx, = np.nonzero(self.offsets == k)
+        first_col = max(0, k)
+        last_col = min(rows + k, cols)
+        result_size = last_col - first_col
+        if idx.size == 0:
+            return np.zeros(result_size, dtype=self.data.dtype)
+        result = self.data[idx[0], first_col:last_col]
+        padding = result_size - len(result)
+        if padding > 0:
+            result = np.pad(result, (0, padding), mode='constant')
+        return result
+
+    diagonal.__doc__ = spmatrix.diagonal.__doc__
+
+    def tocsc(self, copy=False):
+        if self.nnz == 0:
+            return self._csc_container(self.shape, dtype=self.dtype)
+
+        num_rows, num_cols = self.shape
+        num_offsets, offset_len = self.data.shape
+        offset_inds = np.arange(offset_len)
+
+        row = offset_inds - self.offsets[:,None]
+        mask = (row >= 0)
+        mask &= (row < num_rows)
+        mask &= (offset_inds < num_cols)
+        mask &= (self.data != 0)
+
+        idx_dtype = get_index_dtype(maxval=max(self.shape))
+        indptr = np.zeros(num_cols + 1, dtype=idx_dtype)
+        indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0)[:num_cols])
+        if offset_len < num_cols:
+            indptr[offset_len+1:] = indptr[offset_len]
+        indices = row.T[mask.T].astype(idx_dtype, copy=False)
+        data = self.data.T[mask.T]
+        return self._csc_container((data, indices, indptr), shape=self.shape,
+                                   dtype=self.dtype)
+
+    tocsc.__doc__ = spmatrix.tocsc.__doc__
+
+    def tocoo(self, copy=False):
+        num_rows, num_cols = self.shape
+        num_offsets, offset_len = self.data.shape
+        offset_inds = np.arange(offset_len)
+
+        row = offset_inds - self.offsets[:,None]
+        mask = (row >= 0)
+        mask &= (row < num_rows)
+        mask &= (offset_inds < num_cols)
+        mask &= (self.data != 0)
+        row = row[mask]
+        col = np.tile(offset_inds, num_offsets)[mask.ravel()]
+        data = self.data[mask]
+
+        A = self._coo_container(
+            (data, (row, col)), shape=self.shape, dtype=self.dtype
+        )
+        A.has_canonical_format = True
+        return A
+
+    tocoo.__doc__ = spmatrix.tocoo.__doc__
+
+    # needed by _data_matrix
+    def _with_data(self, data, copy=True):
+        """Returns a matrix with the same sparsity structure as self,
+        but with different data.  By default the structure arrays are copied.
+        """
+        if copy:
+            return self._dia_container(
+                (data, self.offsets.copy()), shape=self.shape
+            )
+        else:
+            return self._dia_container(
+                (data, self.offsets), shape=self.shape
+            )
+
+    def resize(self, *shape):
+        shape = check_shape(shape)
+        M, N = shape
+        # we do not need to handle the case of expanding N
+        self.data = self.data[:, :N]
+
+        if (M > self.shape[0] and
+                np.any(self.offsets + self.shape[0] < self.data.shape[1])):
+            # explicitly clear values that were previously hidden
+            mask = (self.offsets[:, None] + self.shape[0] <=
+                    np.arange(self.data.shape[1]))
+            self.data[mask] = 0
+
+        self._shape = shape
+
+    resize.__doc__ = spmatrix.resize.__doc__
+
+
+def isspmatrix_dia(x):
+    """Is x of dia_matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a dia matrix
+
+    Returns
+    -------
+    bool
+        True if x is a dia matrix, False otherwise
+
+    Examples
+    --------
+    >>> from scipy.sparse import dia_matrix, isspmatrix_dia
+    >>> isspmatrix_dia(dia_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import dia_matrix, csr_matrix, isspmatrix_dia
+    >>> isspmatrix_dia(csr_matrix([[5]]))
+    False
+    """
+    from ._arrays import dia_array
+    return isinstance(x, dia_matrix) or isinstance(x, dia_array)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_dok.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_dok.py
new file mode 100644
index 00000000..42d79fc2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_dok.py
@@ -0,0 +1,456 @@
+"""Dictionary Of Keys based matrix"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['dok_matrix', 'isspmatrix_dok']
+
+import itertools
+import numpy as np
+
+from ._base import spmatrix, isspmatrix
+from ._index import IndexMixin
+from ._sputils import (isdense, getdtype, isshape, isintlike, isscalarlike,
+                       upcast, upcast_scalar, get_index_dtype, check_shape)
+
+try:
+    from operator import isSequenceType as _is_sequence
+except ImportError:
+    def _is_sequence(x):
+        return (hasattr(x, '__len__') or hasattr(x, '__next__')
+                or hasattr(x, 'next'))
+
+
+class dok_matrix(spmatrix, IndexMixin, dict):
+    """
+    Dictionary Of Keys based sparse matrix.
+
+    This is an efficient structure for constructing sparse
+    matrices incrementally.
+
+    This can be instantiated in several ways:
+        dok_matrix(D)
+            with a dense matrix, D
+
+        dok_matrix(S)
+            with a sparse matrix, S
+
+        dok_matrix((M,N), [dtype])
+            create the matrix with initial shape (M,N)
+            dtype is optional, defaulting to dtype='d'
+
+    Attributes
+    ----------
+    dtype : dtype
+        Data type of the matrix
+    shape : 2-tuple
+        Shape of the matrix
+    ndim : int
+        Number of dimensions (this is always 2)
+    nnz
+        Number of nonzero elements
+
+    Notes
+    -----
+
+    Sparse matrices can be used in arithmetic operations: they support
+    addition, subtraction, multiplication, division, and matrix power.
+
+    Allows for efficient O(1) access of individual elements.
+    Duplicates are not allowed.
+    Can be efficiently converted to a coo_matrix once constructed.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import dok_matrix
+    >>> S = dok_matrix((5, 5), dtype=np.float32)
+    >>> for i in range(5):
+    ...     for j in range(5):
+    ...         S[i, j] = i + j    # Update element
+
+    """
+    format = 'dok'
+
+    def __init__(self, arg1, shape=None, dtype=None, copy=False):
+        dict.__init__(self)
+        spmatrix.__init__(self)
+
+        self.dtype = getdtype(dtype, default=float)
+        if isinstance(arg1, tuple) and isshape(arg1):  # (M,N)
+            M, N = arg1
+            self._shape = check_shape((M, N))
+        elif isspmatrix(arg1):  # Sparse ctor
+            if isspmatrix_dok(arg1) and copy:
+                arg1 = arg1.copy()
+            else:
+                arg1 = arg1.todok()
+
+            if dtype is not None:
+                arg1 = arg1.astype(dtype, copy=False)
+
+            dict.update(self, arg1)
+            self._shape = check_shape(arg1.shape)
+            self.dtype = arg1.dtype
+        else:  # Dense ctor
+            try:
+                arg1 = np.asarray(arg1)
+            except Exception as e:
+                raise TypeError('Invalid input format.') from e
+
+            if len(arg1.shape) != 2:
+                raise TypeError('Expected rank <=2 dense array or matrix.')
+
+            d = self._coo_container(arg1, dtype=dtype).todok()
+            dict.update(self, d)
+            self._shape = check_shape(arg1.shape)
+            self.dtype = d.dtype
+
+    def update(self, val):
+        # Prevent direct usage of update
+        raise NotImplementedError("Direct modification to dok_matrix element "
+                                  "is not allowed.")
+
+    def _update(self, data):
+        """An update method for dict data defined for direct access to
+        `dok_matrix` data. Main purpose is to be used for effcient conversion
+        from other spmatrix classes. Has no checking if `data` is valid."""
+        return dict.update(self, data)
+
+    def set_shape(self, shape):
+        new_matrix = self.reshape(shape, copy=False).asformat(self.format)
+        self.__dict__ = new_matrix.__dict__
+        dict.clear(self)
+        dict.update(self, new_matrix)
+
+    shape = property(fget=spmatrix.get_shape, fset=set_shape)
+
+    def getnnz(self, axis=None):
+        if axis is not None:
+            raise NotImplementedError("getnnz over an axis is not implemented "
+                                      "for DOK format.")
+        return dict.__len__(self)
+
+    def count_nonzero(self):
+        return sum(x != 0 for x in self.values())
+
+    getnnz.__doc__ = spmatrix.getnnz.__doc__
+    count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
+
+    def __len__(self):
+        return dict.__len__(self)
+
+    def get(self, key, default=0.):
+        """This overrides the dict.get method, providing type checking
+        but otherwise equivalent functionality.
+        """
+        try:
+            i, j = key
+            assert isintlike(i) and isintlike(j)
+        except (AssertionError, TypeError, ValueError) as e:
+            raise IndexError('Index must be a pair of integers.') from e
+        if (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]):
+            raise IndexError('Index out of bounds.')
+        return dict.get(self, key, default)
+
+    def _get_intXint(self, row, col):
+        return dict.get(self, (row, col), self.dtype.type(0))
+
+    def _get_intXslice(self, row, col):
+        return self._get_sliceXslice(slice(row, row+1), col)
+
+    def _get_sliceXint(self, row, col):
+        return self._get_sliceXslice(row, slice(col, col+1))
+
+    def _get_sliceXslice(self, row, col):
+        row_start, row_stop, row_step = row.indices(self.shape[0])
+        col_start, col_stop, col_step = col.indices(self.shape[1])
+        row_range = range(row_start, row_stop, row_step)
+        col_range = range(col_start, col_stop, col_step)
+        shape = (len(row_range), len(col_range))
+        # Switch paths only when advantageous
+        # (count the iterations in the loops, adjust for complexity)
+        if len(self) >= 2 * shape[0] * shape[1]:
+            # O(nr*nc) path: loop over 
+            return self._get_columnXarray(row_range, col_range)
+        # O(nnz) path: loop over entries of self
+        newdok = self._dok_container(shape, dtype=self.dtype)
+        for key in self.keys():
+            i, ri = divmod(int(key[0]) - row_start, row_step)
+            if ri != 0 or i < 0 or i >= shape[0]:
+                continue
+            j, rj = divmod(int(key[1]) - col_start, col_step)
+            if rj != 0 or j < 0 or j >= shape[1]:
+                continue
+            x = dict.__getitem__(self, key)
+            dict.__setitem__(newdok, (i, j), x)
+        return newdok
+
+    def _get_intXarray(self, row, col):
+        col = col.squeeze()
+        return self._get_columnXarray([row], col)
+
+    def _get_arrayXint(self, row, col):
+        row = row.squeeze()
+        return self._get_columnXarray(row, [col])
+
+    def _get_sliceXarray(self, row, col):
+        row = list(range(*row.indices(self.shape[0])))
+        return self._get_columnXarray(row, col)
+
+    def _get_arrayXslice(self, row, col):
+        col = list(range(*col.indices(self.shape[1])))
+        return self._get_columnXarray(row, col)
+
+    def _get_columnXarray(self, row, col):
+        # outer indexing
+        newdok = self._dok_container((len(row), len(col)), dtype=self.dtype)
+
+        for i, r in enumerate(row):
+            for j, c in enumerate(col):
+                v = dict.get(self, (r, c), 0)
+                if v:
+                    dict.__setitem__(newdok, (i, j), v)
+        return newdok
+
+    def _get_arrayXarray(self, row, col):
+        # inner indexing
+        i, j = map(np.atleast_2d, np.broadcast_arrays(row, col))
+        newdok = self._dok_container(i.shape, dtype=self.dtype)
+
+        for key in itertools.product(range(i.shape[0]), range(i.shape[1])):
+            v = dict.get(self, (i[key], j[key]), 0)
+            if v:
+                dict.__setitem__(newdok, key, v)
+        return newdok
+
+    def _set_intXint(self, row, col, x):
+        key = (row, col)
+        if x:
+            dict.__setitem__(self, key, x)
+        elif dict.__contains__(self, key):
+            del self[key]
+
+    def _set_arrayXarray(self, row, col, x):
+        row = list(map(int, row.ravel()))
+        col = list(map(int, col.ravel()))
+        x = x.ravel()
+        dict.update(self, zip(zip(row, col), x))
+
+        for i in np.nonzero(x == 0)[0]:
+            key = (row[i], col[i])
+            if dict.__getitem__(self, key) == 0:
+                # may have been superseded by later update
+                del self[key]
+
+    def __add__(self, other):
+        if isscalarlike(other):
+            res_dtype = upcast_scalar(self.dtype, other)
+            new = self._dok_container(self.shape, dtype=res_dtype)
+            # Add this scalar to every element.
+            M, N = self.shape
+            for key in itertools.product(range(M), range(N)):
+                aij = dict.get(self, (key), 0) + other
+                if aij:
+                    new[key] = aij
+            # new.dtype.char = self.dtype.char
+        elif isspmatrix_dok(other):
+            if other.shape != self.shape:
+                raise ValueError("Matrix dimensions are not equal.")
+            # We could alternatively set the dimensions to the largest of
+            # the two matrices to be summed.  Would this be a good idea?
+            res_dtype = upcast(self.dtype, other.dtype)
+            new = self._dok_container(self.shape, dtype=res_dtype)
+            dict.update(new, self)
+            with np.errstate(over='ignore'):
+                dict.update(new,
+                           ((k, new[k] + other[k]) for k in other.keys()))
+        elif isspmatrix(other):
+            csc = self.tocsc()
+            new = csc + other
+        elif isdense(other):
+            new = self.todense() + other
+        else:
+            return NotImplemented
+        return new
+
+    def __radd__(self, other):
+        if isscalarlike(other):
+            new = self._dok_container(self.shape, dtype=self.dtype)
+            M, N = self.shape
+            for key in itertools.product(range(M), range(N)):
+                aij = dict.get(self, (key), 0) + other
+                if aij:
+                    new[key] = aij
+        elif isspmatrix_dok(other):
+            if other.shape != self.shape:
+                raise ValueError("Matrix dimensions are not equal.")
+            new = self._dok_container(self.shape, dtype=self.dtype)
+            dict.update(new, self)
+            dict.update(new,
+                       ((k, self[k] + other[k]) for k in other.keys()))
+        elif isspmatrix(other):
+            csc = self.tocsc()
+            new = csc + other
+        elif isdense(other):
+            new = other + self.todense()
+        else:
+            return NotImplemented
+        return new
+
+    def __neg__(self):
+        if self.dtype.kind == 'b':
+            raise NotImplementedError('Negating a sparse boolean matrix is not'
+                                      ' supported.')
+        new = self._dok_container(self.shape, dtype=self.dtype)
+        dict.update(new, ((k, -self[k]) for k in self.keys()))
+        return new
+
+    def _mul_scalar(self, other):
+        res_dtype = upcast_scalar(self.dtype, other)
+        # Multiply this scalar by every element.
+        new = self._dok_container(self.shape, dtype=res_dtype)
+        dict.update(new, ((k, v * other) for k, v in self.items()))
+        return new
+
+    def _mul_vector(self, other):
+        # matrix * vector
+        result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
+        for (i, j), v in self.items():
+            result[i] += v * other[j]
+        return result
+
+    def _mul_multivector(self, other):
+        # matrix * multivector
+        result_shape = (self.shape[0], other.shape[1])
+        result_dtype = upcast(self.dtype, other.dtype)
+        result = np.zeros(result_shape, dtype=result_dtype)
+        for (i, j), v in self.items():
+            result[i,:] += v * other[j,:]
+        return result
+
+    def __imul__(self, other):
+        if isscalarlike(other):
+            dict.update(self, ((k, v * other) for k, v in self.items()))
+            return self
+        return NotImplemented
+
+    def __truediv__(self, other):
+        if isscalarlike(other):
+            res_dtype = upcast_scalar(self.dtype, other)
+            new = self._dok_container(self.shape, dtype=res_dtype)
+            dict.update(new, ((k, v / other) for k, v in self.items()))
+            return new
+        return self.tocsr() / other
+
+    def __itruediv__(self, other):
+        if isscalarlike(other):
+            dict.update(self, ((k, v / other) for k, v in self.items()))
+            return self
+        return NotImplemented
+
+    def __reduce__(self):
+        # this approach is necessary because __setstate__ is called after
+        # __setitem__ upon unpickling and since __init__ is not called there
+        # is no shape attribute hence it is not possible to unpickle it.
+        return dict.__reduce__(self)
+
+    # What should len(sparse) return? For consistency with dense matrices,
+    # perhaps it should be the number of rows?  For now it returns the number
+    # of non-zeros.
+
+    def transpose(self, axes=None, copy=False):
+        if axes is not None:
+            raise ValueError("Sparse matrices do not support "
+                             "an 'axes' parameter because swapping "
+                             "dimensions is the only logical permutation.")
+
+        M, N = self.shape
+        new = self._dok_container((N, M), dtype=self.dtype, copy=copy)
+        dict.update(new, (((right, left), val)
+                          for (left, right), val in self.items()))
+        return new
+
+    transpose.__doc__ = spmatrix.transpose.__doc__
+
+    def conjtransp(self):
+        """Return the conjugate transpose."""
+        M, N = self.shape
+        new = self._dok_container((N, M), dtype=self.dtype)
+        dict.update(new, (((right, left), np.conj(val))
+                          for (left, right), val in self.items()))
+        return new
+
+    def copy(self):
+        new = self._dok_container(self.shape, dtype=self.dtype)
+        dict.update(new, self)
+        return new
+
+    copy.__doc__ = spmatrix.copy.__doc__
+
+    def tocoo(self, copy=False):
+        if self.nnz == 0:
+            return self._coo_container(self.shape, dtype=self.dtype)
+
+        idx_dtype = get_index_dtype(maxval=max(self.shape))
+        data = np.fromiter(self.values(), dtype=self.dtype, count=self.nnz)
+        row = np.fromiter((i for i, _ in self.keys()), dtype=idx_dtype, count=self.nnz)
+        col = np.fromiter((j for _, j in self.keys()), dtype=idx_dtype, count=self.nnz)
+        A = self._coo_container(
+            (data, (row, col)), shape=self.shape, dtype=self.dtype
+        )
+        A.has_canonical_format = True
+        return A
+
+    tocoo.__doc__ = spmatrix.tocoo.__doc__
+
+    def todok(self, copy=False):
+        if copy:
+            return self.copy()
+        return self
+
+    todok.__doc__ = spmatrix.todok.__doc__
+
+    def tocsc(self, copy=False):
+        return self.tocoo(copy=False).tocsc(copy=copy)
+
+    tocsc.__doc__ = spmatrix.tocsc.__doc__
+
+    def resize(self, *shape):
+        shape = check_shape(shape)
+        newM, newN = shape
+        M, N = self.shape
+        if newM < M or newN < N:
+            # Remove all elements outside new dimensions
+            for (i, j) in list(self.keys()):
+                if i >= newM or j >= newN:
+                    del self[i, j]
+        self._shape = shape
+
+    resize.__doc__ = spmatrix.resize.__doc__
+
+
+def isspmatrix_dok(x):
+    """Is x of dok_matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a dok matrix
+
+    Returns
+    -------
+    bool
+        True if x is a dok matrix, False otherwise
+
+    Examples
+    --------
+    >>> from scipy.sparse import dok_matrix, isspmatrix_dok
+    >>> isspmatrix_dok(dok_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import dok_matrix, csr_matrix, isspmatrix_dok
+    >>> isspmatrix_dok(csr_matrix([[5]]))
+    False
+    """
+    from ._arrays import dok_array
+    return isinstance(x, dok_matrix) or isinstance(x, dok_array)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_extract.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_extract.py
new file mode 100644
index 00000000..bf8c9157
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_extract.py
@@ -0,0 +1,169 @@
+"""Functions to extract parts of sparse matrices
+"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['find', 'tril', 'triu']
+
+
+from ._coo import coo_matrix
+
+
+def find(A):
+    """Return the indices and values of the nonzero elements of a matrix
+
+    Parameters
+    ----------
+    A : dense or sparse matrix
+        Matrix whose nonzero elements are desired.
+
+    Returns
+    -------
+    (I,J,V) : tuple of arrays
+        I,J, and V contain the row indices, column indices, and values
+        of the nonzero matrix entries.
+
+
+    Examples
+    --------
+    >>> from scipy.sparse import csr_matrix, find
+    >>> A = csr_matrix([[7.0, 8.0, 0],[0, 0, 9.0]])
+    >>> find(A)
+    (array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7.,  8.,  9.]))
+
+    """
+
+    A = coo_matrix(A, copy=True)
+    A.sum_duplicates()
+    # remove explicit zeros
+    nz_mask = A.data != 0
+    return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask]
+
+
+def tril(A, k=0, format=None):
+    """Return the lower triangular portion of a matrix in sparse format
+
+    Returns the elements on or below the k-th diagonal of the matrix A.
+        - k = 0 corresponds to the main diagonal
+        - k > 0 is above the main diagonal
+        - k < 0 is below the main diagonal
+
+    Parameters
+    ----------
+    A : dense or sparse matrix
+        Matrix whose lower trianglar portion is desired.
+    k : integer : optional
+        The top-most diagonal of the lower triangle.
+    format : string
+        Sparse format of the result, e.g. format="csr", etc.
+
+    Returns
+    -------
+    L : sparse matrix
+        Lower triangular portion of A in sparse format.
+
+    See Also
+    --------
+    triu : upper triangle in sparse format
+
+    Examples
+    --------
+    >>> from scipy.sparse import csr_matrix, tril
+    >>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
+    ...                dtype='int32')
+    >>> A.toarray()
+    array([[1, 2, 0, 0, 3],
+           [4, 5, 0, 6, 7],
+           [0, 0, 8, 9, 0]])
+    >>> tril(A).toarray()
+    array([[1, 0, 0, 0, 0],
+           [4, 5, 0, 0, 0],
+           [0, 0, 8, 0, 0]])
+    >>> tril(A).nnz
+    4
+    >>> tril(A, k=1).toarray()
+    array([[1, 2, 0, 0, 0],
+           [4, 5, 0, 0, 0],
+           [0, 0, 8, 9, 0]])
+    >>> tril(A, k=-1).toarray()
+    array([[0, 0, 0, 0, 0],
+           [4, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0]])
+    >>> tril(A, format='csc')
+    <3x5 sparse matrix of type ''
+            with 4 stored elements in Compressed Sparse Column format>
+
+    """
+
+    # convert to COOrdinate format where things are easy
+    A = coo_matrix(A, copy=False)
+    mask = A.row + k >= A.col
+    return _masked_coo(A, mask).asformat(format)
+
+
+def triu(A, k=0, format=None):
+    """Return the upper triangular portion of a matrix in sparse format
+
+    Returns the elements on or above the k-th diagonal of the matrix A.
+        - k = 0 corresponds to the main diagonal
+        - k > 0 is above the main diagonal
+        - k < 0 is below the main diagonal
+
+    Parameters
+    ----------
+    A : dense or sparse matrix
+        Matrix whose upper trianglar portion is desired.
+    k : integer : optional
+        The bottom-most diagonal of the upper triangle.
+    format : string
+        Sparse format of the result, e.g. format="csr", etc.
+
+    Returns
+    -------
+    L : sparse matrix
+        Upper triangular portion of A in sparse format.
+
+    See Also
+    --------
+    tril : lower triangle in sparse format
+
+    Examples
+    --------
+    >>> from scipy.sparse import csr_matrix, triu
+    >>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
+    ...                dtype='int32')
+    >>> A.toarray()
+    array([[1, 2, 0, 0, 3],
+           [4, 5, 0, 6, 7],
+           [0, 0, 8, 9, 0]])
+    >>> triu(A).toarray()
+    array([[1, 2, 0, 0, 3],
+           [0, 5, 0, 6, 7],
+           [0, 0, 8, 9, 0]])
+    >>> triu(A).nnz
+    8
+    >>> triu(A, k=1).toarray()
+    array([[0, 2, 0, 0, 3],
+           [0, 0, 0, 6, 7],
+           [0, 0, 0, 9, 0]])
+    >>> triu(A, k=-1).toarray()
+    array([[1, 2, 0, 0, 3],
+           [4, 5, 0, 6, 7],
+           [0, 0, 8, 9, 0]])
+    >>> triu(A, format='csc')
+    <3x5 sparse matrix of type ''
+            with 8 stored elements in Compressed Sparse Column format>
+
+    """
+
+    # convert to COOrdinate format where things are easy
+    A = coo_matrix(A, copy=False)
+    mask = A.row + k <= A.col
+    return _masked_coo(A, mask).asformat(format)
+
+
+def _masked_coo(A, mask):
+    row = A.row[mask]
+    col = A.col[mask]
+    data = A.data[mask]
+    return coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_index.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_index.py
new file mode 100644
index 00000000..db7bab9f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_index.py
@@ -0,0 +1,389 @@
+"""Indexing mixin for sparse matrix classes.
+"""
+import numpy as np
+from ._sputils import isintlike
+
+try:
+    INT_TYPES = (int, long, np.integer)
+except NameError:
+    # long is not defined in Python3
+    INT_TYPES = (int, np.integer)
+
+
+def _broadcast_arrays(a, b):
+    """
+    Same as np.broadcast_arrays(a, b) but old writeability rules.
+
+    NumPy >= 1.17.0 transitions broadcast_arrays to return
+    read-only arrays. Set writeability explicitly to avoid warnings.
+    Retain the old writeability rules, as our Cython code assumes
+    the old behavior.
+    """
+    x, y = np.broadcast_arrays(a, b)
+    x.flags.writeable = a.flags.writeable
+    y.flags.writeable = b.flags.writeable
+    return x, y
+
+
+class IndexMixin:
+    """
+    This class provides common dispatching and validation logic for indexing.
+    """
+    def _raise_on_1d_array_slice(self):
+        """We do not currently support 1D sparse arrays.
+
+        This function is called each time that a 1D array would
+        result, raising an error instead.
+
+        Once 1D sparse arrays are implemented, it should be removed.
+        """
+        if self._is_array:
+            raise NotImplementedError(
+                'We have not yet implemented 1D sparse slices; '
+                'please index using explicit indices, e.g. `x[:, [0]]`'
+            )
+
+    def __getitem__(self, key):
+        row, col = self._validate_indices(key)
+
+        # Dispatch to specialized methods.
+        if isinstance(row, INT_TYPES):
+            if isinstance(col, INT_TYPES):
+                return self._get_intXint(row, col)
+            elif isinstance(col, slice):
+                self._raise_on_1d_array_slice()
+                return self._get_intXslice(row, col)
+            elif col.ndim == 1:
+                self._raise_on_1d_array_slice()
+                return self._get_intXarray(row, col)
+            elif col.ndim == 2:
+                return self._get_intXarray(row, col)
+            raise IndexError('index results in >2 dimensions')
+        elif isinstance(row, slice):
+            if isinstance(col, INT_TYPES):
+                self._raise_on_1d_array_slice()
+                return self._get_sliceXint(row, col)
+            elif isinstance(col, slice):
+                if row == slice(None) and row == col:
+                    return self.copy()
+                return self._get_sliceXslice(row, col)
+            elif col.ndim == 1:
+                return self._get_sliceXarray(row, col)
+            raise IndexError('index results in >2 dimensions')
+        elif row.ndim == 1:
+            if isinstance(col, INT_TYPES):
+                self._raise_on_1d_array_slice()
+                return self._get_arrayXint(row, col)
+            elif isinstance(col, slice):
+                return self._get_arrayXslice(row, col)
+        else:  # row.ndim == 2
+            if isinstance(col, INT_TYPES):
+                return self._get_arrayXint(row, col)
+            elif isinstance(col, slice):
+                raise IndexError('index results in >2 dimensions')
+            elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1):
+                # special case for outer indexing
+                return self._get_columnXarray(row[:,0], col.ravel())
+
+        # The only remaining case is inner (fancy) indexing
+        row, col = _broadcast_arrays(row, col)
+        if row.shape != col.shape:
+            raise IndexError('number of row and column indices differ')
+        if row.size == 0:
+            return self.__class__(np.atleast_2d(row).shape, dtype=self.dtype)
+        return self._get_arrayXarray(row, col)
+
+    def __setitem__(self, key, x):
+        row, col = self._validate_indices(key)
+
+        if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES):
+            x = np.asarray(x, dtype=self.dtype)
+            if x.size != 1:
+                raise ValueError('Trying to assign a sequence to an item')
+            self._set_intXint(row, col, x.flat[0])
+            return
+
+        if isinstance(row, slice):
+            row = np.arange(*row.indices(self.shape[0]))[:, None]
+        else:
+            row = np.atleast_1d(row)
+
+        if isinstance(col, slice):
+            col = np.arange(*col.indices(self.shape[1]))[None, :]
+            if row.ndim == 1:
+                row = row[:, None]
+        else:
+            col = np.atleast_1d(col)
+
+        i, j = _broadcast_arrays(row, col)
+        if i.shape != j.shape:
+            raise IndexError('number of row and column indices differ')
+
+        from ._base import isspmatrix
+        if isspmatrix(x):
+            if i.ndim == 1:
+                # Inner indexing, so treat them like row vectors.
+                i = i[None]
+                j = j[None]
+            broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
+            broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
+            if not ((broadcast_row or x.shape[0] == i.shape[0]) and
+                    (broadcast_col or x.shape[1] == i.shape[1])):
+                raise ValueError('shape mismatch in assignment')
+            if x.shape[0] == 0 or x.shape[1] == 0:
+                return
+            x = x.tocoo(copy=True)
+            x.sum_duplicates()
+            self._set_arrayXarray_sparse(i, j, x)
+        else:
+            # Make x and i into the same shape
+            x = np.asarray(x, dtype=self.dtype)
+            if x.squeeze().shape != i.squeeze().shape:
+                x = np.broadcast_to(x, i.shape)
+            if x.size == 0:
+                return
+            x = x.reshape(i.shape)
+            self._set_arrayXarray(i, j, x)
+
+    def _validate_indices(self, key):
+        M, N = self.shape
+        row, col = _unpack_index(key)
+
+        if isintlike(row):
+            row = int(row)
+            if row < -M or row >= M:
+                raise IndexError('row index (%d) out of range' % row)
+            if row < 0:
+                row += M
+        elif not isinstance(row, slice):
+            row = self._asindices(row, M)
+
+        if isintlike(col):
+            col = int(col)
+            if col < -N or col >= N:
+                raise IndexError('column index (%d) out of range' % col)
+            if col < 0:
+                col += N
+        elif not isinstance(col, slice):
+            col = self._asindices(col, N)
+
+        return row, col
+
+    def _asindices(self, idx, length):
+        """Convert `idx` to a valid index for an axis with a given length.
+
+        Subclasses that need special validation can override this method.
+        """
+        try:
+            x = np.asarray(idx)
+        except (ValueError, TypeError, MemoryError) as e:
+            raise IndexError('invalid index') from e
+
+        if x.ndim not in (1, 2):
+            raise IndexError('Index dimension must be 1 or 2')
+
+        if x.size == 0:
+            return x
+
+        # Check bounds
+        max_indx = x.max()
+        if max_indx >= length:
+            raise IndexError('index (%d) out of range' % max_indx)
+
+        min_indx = x.min()
+        if min_indx < 0:
+            if min_indx < -length:
+                raise IndexError('index (%d) out of range' % min_indx)
+            if x is idx or not x.flags.owndata:
+                x = x.copy()
+            x[x < 0] += length
+        return x
+
+    def getrow(self, i):
+        """Return a copy of row i of the matrix, as a (1 x n) row vector.
+        """
+        M, N = self.shape
+        i = int(i)
+        if i < -M or i >= M:
+            raise IndexError('index (%d) out of range' % i)
+        if i < 0:
+            i += M
+        return self._get_intXslice(i, slice(None))
+
+    def getcol(self, i):
+        """Return a copy of column i of the matrix, as a (m x 1) column vector.
+        """
+        M, N = self.shape
+        i = int(i)
+        if i < -N or i >= N:
+            raise IndexError('index (%d) out of range' % i)
+        if i < 0:
+            i += N
+        return self._get_sliceXint(slice(None), i)
+
+    def _get_intXint(self, row, col):
+        raise NotImplementedError()
+
+    def _get_intXarray(self, row, col):
+        raise NotImplementedError()
+
+    def _get_intXslice(self, row, col):
+        raise NotImplementedError()
+
+    def _get_sliceXint(self, row, col):
+        raise NotImplementedError()
+
+    def _get_sliceXslice(self, row, col):
+        raise NotImplementedError()
+
+    def _get_sliceXarray(self, row, col):
+        raise NotImplementedError()
+
+    def _get_arrayXint(self, row, col):
+        raise NotImplementedError()
+
+    def _get_arrayXslice(self, row, col):
+        raise NotImplementedError()
+
+    def _get_columnXarray(self, row, col):
+        raise NotImplementedError()
+
+    def _get_arrayXarray(self, row, col):
+        raise NotImplementedError()
+
+    def _set_intXint(self, row, col, x):
+        raise NotImplementedError()
+
+    def _set_arrayXarray(self, row, col, x):
+        raise NotImplementedError()
+
+    def _set_arrayXarray_sparse(self, row, col, x):
+        # Fall back to densifying x
+        x = np.asarray(x.toarray(), dtype=self.dtype)
+        x, _ = _broadcast_arrays(x, row)
+        self._set_arrayXarray(row, col, x)
+
+
+def _unpack_index(index):
+    """ Parse index. Always return a tuple of the form (row, col).
+    Valid type for row/col is integer, slice, or array of integers.
+    """
+    # First, check if indexing with single boolean matrix.
+    from ._base import spmatrix, isspmatrix
+    if (isinstance(index, (spmatrix, np.ndarray)) and
+            index.ndim == 2 and index.dtype.kind == 'b'):
+        return index.nonzero()
+
+    # Parse any ellipses.
+    index = _check_ellipsis(index)
+
+    # Next, parse the tuple or object
+    if isinstance(index, tuple):
+        if len(index) == 2:
+            row, col = index
+        elif len(index) == 1:
+            row, col = index[0], slice(None)
+        else:
+            raise IndexError('invalid number of indices')
+    else:
+        idx = _compatible_boolean_index(index)
+        if idx is None:
+            row, col = index, slice(None)
+        elif idx.ndim < 2:
+            return _boolean_index_to_array(idx), slice(None)
+        elif idx.ndim == 2:
+            return idx.nonzero()
+    # Next, check for validity and transform the index as needed.
+    if isspmatrix(row) or isspmatrix(col):
+        # Supporting sparse boolean indexing with both row and col does
+        # not work because spmatrix.ndim is always 2.
+        raise IndexError(
+            'Indexing with sparse matrices is not supported '
+            'except boolean indexing where matrix and index '
+            'are equal shapes.')
+    bool_row = _compatible_boolean_index(row)
+    bool_col = _compatible_boolean_index(col)
+    if bool_row is not None:
+        row = _boolean_index_to_array(bool_row)
+    if bool_col is not None:
+        col = _boolean_index_to_array(bool_col)
+    return row, col
+
+
+def _check_ellipsis(index):
+    """Process indices with Ellipsis. Returns modified index."""
+    if index is Ellipsis:
+        return (slice(None), slice(None))
+
+    if not isinstance(index, tuple):
+        return index
+
+    # TODO: Deprecate this multiple-ellipsis handling,
+    #       as numpy no longer supports it.
+
+    # Find first ellipsis.
+    for j, v in enumerate(index):
+        if v is Ellipsis:
+            first_ellipsis = j
+            break
+    else:
+        return index
+
+    # Try to expand it using shortcuts for common cases
+    if len(index) == 1:
+        return (slice(None), slice(None))
+    if len(index) == 2:
+        if first_ellipsis == 0:
+            if index[1] is Ellipsis:
+                return (slice(None), slice(None))
+            return (slice(None), index[1])
+        return (index[0], slice(None))
+
+    # Expand it using a general-purpose algorithm
+    tail = []
+    for v in index[first_ellipsis+1:]:
+        if v is not Ellipsis:
+            tail.append(v)
+    nd = first_ellipsis + len(tail)
+    nslice = max(0, 2 - nd)
+    return index[:first_ellipsis] + (slice(None),)*nslice + tuple(tail)
+
+
+def _maybe_bool_ndarray(idx):
+    """Returns a compatible array if elements are boolean.
+    """
+    idx = np.asanyarray(idx)
+    if idx.dtype.kind == 'b':
+        return idx
+    return None
+
+
+def _first_element_bool(idx, max_dim=2):
+    """Returns True if first element of the incompatible
+    array type is boolean.
+    """
+    if max_dim < 1:
+        return None
+    try:
+        first = next(iter(idx), None)
+    except TypeError:
+        return None
+    if isinstance(first, bool):
+        return True
+    return _first_element_bool(first, max_dim-1)
+
+
+def _compatible_boolean_index(idx):
+    """Returns a boolean index array that can be converted to
+    integer array. Returns None if no such array exists.
+    """
+    # Presence of attribute `ndim` indicates a compatible array type.
+    if hasattr(idx, 'ndim') or _first_element_bool(idx):
+        return _maybe_bool_ndarray(idx)
+    return None
+
+
+def _boolean_index_to_array(idx):
+    if idx.ndim > 1:
+        raise IndexError('invalid index shape')
+    return np.where(idx)[0]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_lil.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_lil.py
new file mode 100644
index 00000000..2e6fa0b7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_lil.py
@@ -0,0 +1,547 @@
+"""List of Lists sparse matrix class
+"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['lil_matrix', 'isspmatrix_lil']
+
+from bisect import bisect_left
+
+import numpy as np
+
+from ._base import spmatrix, isspmatrix
+from ._index import IndexMixin, INT_TYPES, _broadcast_arrays
+from ._sputils import (getdtype, isshape, isscalarlike, upcast_scalar,
+                       get_index_dtype, check_shape, check_reshape_kwargs)
+from . import _csparsetools
+
+
+class lil_matrix(spmatrix, IndexMixin):
+    """Row-based LIst of Lists sparse matrix
+
+    This is a structure for constructing sparse matrices incrementally.
+    Note that inserting a single item can take linear time in the worst case;
+    to construct a matrix efficiently, make sure the items are pre-sorted by
+    index, per row.
+
+    This can be instantiated in several ways:
+        lil_matrix(D)
+            with a dense matrix or rank-2 ndarray D
+
+        lil_matrix(S)
+            with another sparse matrix S (equivalent to S.tolil())
+
+        lil_matrix((M, N), [dtype])
+            to construct an empty matrix with shape (M, N)
+            dtype is optional, defaulting to dtype='d'.
+
+    Attributes
+    ----------
+    dtype : dtype
+        Data type of the matrix
+    shape : 2-tuple
+        Shape of the matrix
+    ndim : int
+        Number of dimensions (this is always 2)
+    nnz
+        Number of stored values, including explicit zeros
+    data
+        LIL format data array of the matrix
+    rows
+        LIL format row index array of the matrix
+
+    Notes
+    -----
+    Sparse matrices can be used in arithmetic operations: they support
+    addition, subtraction, multiplication, division, and matrix power.
+
+    Advantages of the LIL format
+        - supports flexible slicing
+        - changes to the matrix sparsity structure are efficient
+
+    Disadvantages of the LIL format
+        - arithmetic operations LIL + LIL are slow (consider CSR or CSC)
+        - slow column slicing (consider CSC)
+        - slow matrix vector products (consider CSR or CSC)
+
+    Intended Usage
+        - LIL is a convenient format for constructing sparse matrices
+        - once a matrix has been constructed, convert to CSR or
+          CSC format for fast arithmetic and matrix vector operations
+        - consider using the COO format when constructing large matrices
+
+    Data Structure
+        - An array (``self.rows``) of rows, each of which is a sorted
+          list of column indices of non-zero elements.
+        - The corresponding nonzero values are stored in similar
+          fashion in ``self.data``.
+
+
+    """
+    format = 'lil'
+
+    def __init__(self, arg1, shape=None, dtype=None, copy=False):
+        spmatrix.__init__(self)
+        self.dtype = getdtype(dtype, arg1, default=float)
+
+        # First get the shape
+        if isspmatrix(arg1):
+            if isspmatrix_lil(arg1) and copy:
+                A = arg1.copy()
+            else:
+                A = arg1.tolil()
+
+            if dtype is not None:
+                A = A.astype(dtype, copy=False)
+
+            self._shape = check_shape(A.shape)
+            self.dtype = A.dtype
+            self.rows = A.rows
+            self.data = A.data
+        elif isinstance(arg1,tuple):
+            if isshape(arg1):
+                if shape is not None:
+                    raise ValueError('invalid use of shape parameter')
+                M, N = arg1
+                self._shape = check_shape((M, N))
+                self.rows = np.empty((M,), dtype=object)
+                self.data = np.empty((M,), dtype=object)
+                for i in range(M):
+                    self.rows[i] = []
+                    self.data[i] = []
+            else:
+                raise TypeError('unrecognized lil_matrix constructor usage')
+        else:
+            # assume A is dense
+            try:
+                A = self._ascontainer(arg1)
+            except TypeError as e:
+                raise TypeError('unsupported matrix type') from e
+            else:
+                A = self._csr_container(A, dtype=dtype).tolil()
+
+                self._shape = check_shape(A.shape)
+                self.dtype = A.dtype
+                self.rows = A.rows
+                self.data = A.data
+
+    def __iadd__(self,other):
+        self[:,:] = self + other
+        return self
+
+    def __isub__(self,other):
+        self[:,:] = self - other
+        return self
+
+    def __imul__(self,other):
+        if isscalarlike(other):
+            self[:,:] = self * other
+            return self
+        else:
+            return NotImplemented
+
+    def __itruediv__(self,other):
+        if isscalarlike(other):
+            self[:,:] = self / other
+            return self
+        else:
+            return NotImplemented
+
+    # Whenever the dimensions change, empty lists should be created for each
+    # row
+
+    def getnnz(self, axis=None):
+        if axis is None:
+            return sum([len(rowvals) for rowvals in self.data])
+        if axis < 0:
+            axis += 2
+        if axis == 0:
+            out = np.zeros(self.shape[1], dtype=np.intp)
+            for row in self.rows:
+                out[row] += 1
+            return out
+        elif axis == 1:
+            return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
+        else:
+            raise ValueError('axis out of bounds')
+
+    def count_nonzero(self):
+        return sum(np.count_nonzero(rowvals) for rowvals in self.data)
+
+    getnnz.__doc__ = spmatrix.getnnz.__doc__
+    count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
+
+    def __str__(self):
+        val = ''
+        for i, row in enumerate(self.rows):
+            for pos, j in enumerate(row):
+                val += "  %s\t%s\n" % (str((i, j)), str(self.data[i][pos]))
+        return val[:-1]
+
+    def getrowview(self, i):
+        """Returns a view of the 'i'th row (without copying).
+        """
+        new = self._lil_container((1, self.shape[1]), dtype=self.dtype)
+        new.rows[0] = self.rows[i]
+        new.data[0] = self.data[i]
+        return new
+
+    def getrow(self, i):
+        """Returns a copy of the 'i'th row.
+        """
+        M, N = self.shape
+        if i < 0:
+            i += M
+        if i < 0 or i >= M:
+            raise IndexError('row index out of bounds')
+        new = self._lil_container((1, N), dtype=self.dtype)
+        new.rows[0] = self.rows[i][:]
+        new.data[0] = self.data[i][:]
+        return new
+
+    def __getitem__(self, key):
+        # Fast path for simple (int, int) indexing.
+        if (isinstance(key, tuple) and len(key) == 2 and
+                isinstance(key[0], INT_TYPES) and
+                isinstance(key[1], INT_TYPES)):
+            # lil_get1 handles validation for us.
+            return self._get_intXint(*key)
+        # Everything else takes the normal path.
+        return IndexMixin.__getitem__(self, key)
+
+    def _asindices(self, idx, N):
+        # LIL routines handle bounds-checking for us, so don't do it here.
+        try:
+            x = np.asarray(idx)
+        except (ValueError, TypeError, MemoryError) as e:
+            raise IndexError('invalid index') from e
+        if x.ndim not in (1, 2):
+            raise IndexError('Index dimension must be <= 2')
+        return x
+
+    def _get_intXint(self, row, col):
+        v = _csparsetools.lil_get1(self.shape[0], self.shape[1], self.rows,
+                                   self.data, row, col)
+        return self.dtype.type(v)
+
+    def _get_sliceXint(self, row, col):
+        row = range(*row.indices(self.shape[0]))
+        return self._get_row_ranges(row, slice(col, col+1))
+
+    def _get_arrayXint(self, row, col):
+        row = row.squeeze()
+        return self._get_row_ranges(row, slice(col, col+1))
+
+    def _get_intXslice(self, row, col):
+        return self._get_row_ranges((row,), col)
+
+    def _get_sliceXslice(self, row, col):
+        row = range(*row.indices(self.shape[0]))
+        return self._get_row_ranges(row, col)
+
+    def _get_arrayXslice(self, row, col):
+        return self._get_row_ranges(row, col)
+
+    def _get_intXarray(self, row, col):
+        row = np.array(row, dtype=col.dtype, ndmin=1)
+        return self._get_columnXarray(row, col)
+
+    def _get_sliceXarray(self, row, col):
+        row = np.arange(*row.indices(self.shape[0]))
+        return self._get_columnXarray(row, col)
+
+    def _get_columnXarray(self, row, col):
+        # outer indexing
+        row, col = _broadcast_arrays(row[:,None], col)
+        return self._get_arrayXarray(row, col)
+
+    def _get_arrayXarray(self, row, col):
+        # inner indexing
+        i, j = map(np.atleast_2d, _prepare_index_for_memoryview(row, col))
+        new = self._lil_container(i.shape, dtype=self.dtype)
+        _csparsetools.lil_fancy_get(self.shape[0], self.shape[1],
+                                    self.rows, self.data,
+                                    new.rows, new.data,
+                                    i, j)
+        return new
+
+    def _get_row_ranges(self, rows, col_slice):
+        """
+        Fast path for indexing in the case where column index is slice.
+
+        This gains performance improvement over brute force by more
+        efficient skipping of zeros, by accessing the elements
+        column-wise in order.
+
+        Parameters
+        ----------
+        rows : sequence or range
+            Rows indexed. If range, must be within valid bounds.
+        col_slice : slice
+            Columns indexed
+
+        """
+        j_start, j_stop, j_stride = col_slice.indices(self.shape[1])
+        col_range = range(j_start, j_stop, j_stride)
+        nj = len(col_range)
+        new = self._lil_container((len(rows), nj), dtype=self.dtype)
+
+        _csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1],
+                                         self.rows, self.data,
+                                         new.rows, new.data,
+                                         rows,
+                                         j_start, j_stop, j_stride, nj)
+
+        return new
+
+    def _set_intXint(self, row, col, x):
+        _csparsetools.lil_insert(self.shape[0], self.shape[1], self.rows,
+                                 self.data, row, col, x)
+
+    def _set_arrayXarray(self, row, col, x):
+        i, j, x = map(np.atleast_2d, _prepare_index_for_memoryview(row, col, x))
+        _csparsetools.lil_fancy_set(self.shape[0], self.shape[1],
+                                    self.rows, self.data,
+                                    i, j, x)
+
+    def _set_arrayXarray_sparse(self, row, col, x):
+        # Special case: full matrix assignment
+        if (x.shape == self.shape and
+                isinstance(row, slice) and row == slice(None) and
+                isinstance(col, slice) and col == slice(None)):
+            x = self._lil_container(x, dtype=self.dtype)
+            self.rows = x.rows
+            self.data = x.data
+            return
+        # Fall back to densifying x
+        x = np.asarray(x.toarray(), dtype=self.dtype)
+        x, _ = _broadcast_arrays(x, row)
+        self._set_arrayXarray(row, col, x)
+
+    def __setitem__(self, key, x):
+        # Fast path for simple (int, int) indexing.
+        if (isinstance(key, tuple) and len(key) == 2 and
+                isinstance(key[0], INT_TYPES) and
+                isinstance(key[1], INT_TYPES)):
+            x = self.dtype.type(x)
+            if x.size > 1:
+                raise ValueError("Trying to assign a sequence to an item")
+            return self._set_intXint(key[0], key[1], x)
+        # Everything else takes the normal path.
+        IndexMixin.__setitem__(self, key, x)
+
+    def _mul_scalar(self, other):
+        if other == 0:
+            # Multiply by zero: return the zero matrix
+            new = self._lil_container(self.shape, dtype=self.dtype)
+        else:
+            res_dtype = upcast_scalar(self.dtype, other)
+
+            new = self.copy()
+            new = new.astype(res_dtype)
+            # Multiply this scalar by every element.
+            for j, rowvals in enumerate(new.data):
+                new.data[j] = [val*other for val in rowvals]
+        return new
+
+    def __truediv__(self, other):           # self / other
+        if isscalarlike(other):
+            new = self.copy()
+            # Divide every element by this scalar
+            for j, rowvals in enumerate(new.data):
+                new.data[j] = [val/other for val in rowvals]
+            return new
+        else:
+            return self.tocsr() / other
+
+    def copy(self):
+        M, N = self.shape
+        new = self._lil_container(self.shape, dtype=self.dtype)
+        # This is ~14x faster than calling deepcopy() on rows and data.
+        _csparsetools.lil_get_row_ranges(M, N, self.rows, self.data,
+                                         new.rows, new.data, range(M),
+                                         0, N, 1, N)
+        return new
+
+    copy.__doc__ = spmatrix.copy.__doc__
+
+    def reshape(self, *args, **kwargs):
+        shape = check_shape(args, self.shape)
+        order, copy = check_reshape_kwargs(kwargs)
+
+        # Return early if reshape is not required
+        if shape == self.shape:
+            if copy:
+                return self.copy()
+            else:
+                return self
+
+        new = self._lil_container(shape, dtype=self.dtype)
+
+        if order == 'C':
+            ncols = self.shape[1]
+            for i, row in enumerate(self.rows):
+                for col, j in enumerate(row):
+                    new_r, new_c = np.unravel_index(i * ncols + j, shape)
+                    new[new_r, new_c] = self[i, j]
+        elif order == 'F':
+            nrows = self.shape[0]
+            for i, row in enumerate(self.rows):
+                for col, j in enumerate(row):
+                    new_r, new_c = np.unravel_index(i + j * nrows, shape, order)
+                    new[new_r, new_c] = self[i, j]
+        else:
+            raise ValueError("'order' must be 'C' or 'F'")
+
+        return new
+
+    reshape.__doc__ = spmatrix.reshape.__doc__
+
+    def resize(self, *shape):
+        shape = check_shape(shape)
+        new_M, new_N = shape
+        M, N = self.shape
+
+        if new_M < M:
+            self.rows = self.rows[:new_M]
+            self.data = self.data[:new_M]
+        elif new_M > M:
+            self.rows = np.resize(self.rows, new_M)
+            self.data = np.resize(self.data, new_M)
+            for i in range(M, new_M):
+                self.rows[i] = []
+                self.data[i] = []
+
+        if new_N < N:
+            for row, data in zip(self.rows, self.data):
+                trunc = bisect_left(row, new_N)
+                del row[trunc:]
+                del data[trunc:]
+
+        self._shape = shape
+
+    resize.__doc__ = spmatrix.resize.__doc__
+
+    def toarray(self, order=None, out=None):
+        d = self._process_toarray_args(order, out)
+        for i, row in enumerate(self.rows):
+            for pos, j in enumerate(row):
+                d[i, j] = self.data[i][pos]
+        return d
+
+    toarray.__doc__ = spmatrix.toarray.__doc__
+
+    def transpose(self, axes=None, copy=False):
+        return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False)
+
+    transpose.__doc__ = spmatrix.transpose.__doc__
+
+    def tolil(self, copy=False):
+        if copy:
+            return self.copy()
+        else:
+            return self
+
+    tolil.__doc__ = spmatrix.tolil.__doc__
+
+    def tocsr(self, copy=False):
+        M, N = self.shape
+        if M == 0 or N == 0:
+            return self._csr_container((M, N), dtype=self.dtype)
+
+        # construct indptr array
+        if M*N <= np.iinfo(np.int32).max:
+            # fast path: it is known that 64-bit indexing will not be needed.
+            idx_dtype = np.int32
+            indptr = np.empty(M + 1, dtype=idx_dtype)
+            indptr[0] = 0
+            _csparsetools.lil_get_lengths(self.rows, indptr[1:])
+            np.cumsum(indptr, out=indptr)
+            nnz = indptr[-1]
+        else:
+            idx_dtype = get_index_dtype(maxval=N)
+            lengths = np.empty(M, dtype=idx_dtype)
+            _csparsetools.lil_get_lengths(self.rows, lengths)
+            nnz = lengths.sum(dtype=np.int64)
+            idx_dtype = get_index_dtype(maxval=max(N, nnz))
+            indptr = np.empty(M + 1, dtype=idx_dtype)
+            indptr[0] = 0
+            np.cumsum(lengths, dtype=idx_dtype, out=indptr[1:])
+
+        indices = np.empty(nnz, dtype=idx_dtype)
+        data = np.empty(nnz, dtype=self.dtype)
+        _csparsetools.lil_flatten_to_array(self.rows, indices)
+        _csparsetools.lil_flatten_to_array(self.data, data)
+
+        # init csr matrix
+        return self._csr_container((data, indices, indptr), shape=self.shape)
+
+    tocsr.__doc__ = spmatrix.tocsr.__doc__
+
+
+def _prepare_index_for_memoryview(i, j, x=None):
+    """
+    Convert index and data arrays to form suitable for passing to the
+    Cython fancy getset routines.
+
+    The conversions are necessary since to (i) ensure the integer
+    index arrays are in one of the accepted types, and (ii) to ensure
+    the arrays are writable so that Cython memoryview support doesn't
+    choke on them.
+
+    Parameters
+    ----------
+    i, j
+        Index arrays
+    x : optional
+        Data arrays
+
+    Returns
+    -------
+    i, j, x
+        Re-formatted arrays (x is omitted, if input was None)
+
+    """
+    if i.dtype > j.dtype:
+        j = j.astype(i.dtype)
+    elif i.dtype < j.dtype:
+        i = i.astype(j.dtype)
+
+    if not i.flags.writeable or i.dtype not in (np.int32, np.int64):
+        i = i.astype(np.intp)
+    if not j.flags.writeable or j.dtype not in (np.int32, np.int64):
+        j = j.astype(np.intp)
+
+    if x is not None:
+        if not x.flags.writeable:
+            x = x.copy()
+        return i, j, x
+    else:
+        return i, j
+
+
+def isspmatrix_lil(x):
+    """Is x of lil_matrix type?
+
+    Parameters
+    ----------
+    x
+        object to check for being a lil matrix
+
+    Returns
+    -------
+    bool
+        True if x is a lil matrix, False otherwise
+
+    Examples
+    --------
+    >>> from scipy.sparse import lil_matrix, isspmatrix_lil
+    >>> isspmatrix_lil(lil_matrix([[5]]))
+    True
+
+    >>> from scipy.sparse import lil_matrix, csr_matrix, isspmatrix_lil
+    >>> isspmatrix_lil(csr_matrix([[5]]))
+    False
+    """
+    from ._arrays import lil_array
+    return isinstance(x, lil_matrix) or isinstance(x, lil_array)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_matrix_io.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_matrix_io.py
new file mode 100644
index 00000000..51711c66
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_matrix_io.py
@@ -0,0 +1,151 @@
+import numpy as np
+import scipy.sparse
+
+__all__ = ['save_npz', 'load_npz']
+
+
+# Make loading safe vs. malicious input
+PICKLE_KWARGS = dict(allow_pickle=False)
+
+
+def save_npz(file, matrix, compressed=True):
+    """ Save a sparse matrix to a file using ``.npz`` format.
+
+    Parameters
+    ----------
+    file : str or file-like object
+        Either the file name (string) or an open file (file-like object)
+        where the data will be saved. If file is a string, the ``.npz``
+        extension will be appended to the file name if it is not already
+        there.
+    matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``)
+        The sparse matrix to save.
+    compressed : bool, optional
+        Allow compressing the file. Default: True
+
+    See Also
+    --------
+    scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format.
+    numpy.savez: Save several arrays into a ``.npz`` archive.
+    numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.
+
+    Examples
+    --------
+    Store sparse matrix to disk, and load it again:
+
+    >>> import numpy as np
+    >>> import scipy.sparse
+    >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
+    >>> sparse_matrix
+    <2x3 sparse matrix of type ''
+       with 2 stored elements in Compressed Sparse Column format>
+    >>> sparse_matrix.toarray()
+    array([[0, 0, 3],
+           [4, 0, 0]], dtype=int64)
+
+    >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
+    >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
+
+    >>> sparse_matrix
+    <2x3 sparse matrix of type ''
+       with 2 stored elements in Compressed Sparse Column format>
+    >>> sparse_matrix.toarray()
+    array([[0, 0, 3],
+           [4, 0, 0]], dtype=int64)
+    """
+    arrays_dict = {}
+    if matrix.format in ('csc', 'csr', 'bsr'):
+        arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
+    elif matrix.format == 'dia':
+        arrays_dict.update(offsets=matrix.offsets)
+    elif matrix.format == 'coo':
+        arrays_dict.update(row=matrix.row, col=matrix.col)
+    else:
+        raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))
+    arrays_dict.update(
+        format=matrix.format.encode('ascii'),
+        shape=matrix.shape,
+        data=matrix.data
+    )
+    if compressed:
+        np.savez_compressed(file, **arrays_dict)
+    else:
+        np.savez(file, **arrays_dict)
+
+
+def load_npz(file):
+    """ Load a sparse matrix from a file using ``.npz`` format.
+
+    Parameters
+    ----------
+    file : str or file-like object
+        Either the file name (string) or an open file (file-like object)
+        where the data will be loaded.
+
+    Returns
+    -------
+    result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix
+        A sparse matrix containing the loaded data.
+
+    Raises
+    ------
+    OSError
+        If the input file does not exist or cannot be read.
+
+    See Also
+    --------
+    scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format.
+    numpy.load: Load several arrays from a ``.npz`` archive.
+
+    Examples
+    --------
+    Store sparse matrix to disk, and load it again:
+
+    >>> import numpy as np
+    >>> import scipy.sparse
+    >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
+    >>> sparse_matrix
+    <2x3 sparse matrix of type ''
+       with 2 stored elements in Compressed Sparse Column format>
+    >>> sparse_matrix.toarray()
+    array([[0, 0, 3],
+           [4, 0, 0]], dtype=int64)
+
+    >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
+    >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
+
+    >>> sparse_matrix
+    <2x3 sparse matrix of type ''
+        with 2 stored elements in Compressed Sparse Column format>
+    >>> sparse_matrix.toarray()
+    array([[0, 0, 3],
+           [4, 0, 0]], dtype=int64)
+    """
+
+    with np.load(file, **PICKLE_KWARGS) as loaded:
+        try:
+            matrix_format = loaded['format']
+        except KeyError as e:
+            raise ValueError('The file {} does not contain a sparse matrix.'.format(file)) from e
+
+        matrix_format = matrix_format.item()
+
+        if not isinstance(matrix_format, str):
+            # Play safe with Python 2 vs 3 backward compatibility;
+            # files saved with SciPy < 1.0.0 may contain unicode or bytes.
+            matrix_format = matrix_format.decode('ascii')
+
+        try:
+            cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format))
+        except AttributeError as e:
+            raise ValueError('Unknown matrix format "{}"'.format(matrix_format)) from e
+
+        if matrix_format in ('csc', 'csr', 'bsr'):
+            return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])
+        elif matrix_format == 'dia':
+            return cls((loaded['data'], loaded['offsets']), shape=loaded['shape'])
+        elif matrix_format == 'coo':
+            return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape'])
+        else:
+            raise NotImplementedError('Load is not implemented for '
+                                      'sparse matrix of format {}.'.format(matrix_format))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_spfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_spfuncs.py
new file mode 100644
index 00000000..5e8e58c0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_spfuncs.py
@@ -0,0 +1,76 @@
+""" Functions that operate on sparse matrices
+"""
+
+__all__ = ['count_blocks','estimate_blocksize']
+
+from ._csr import isspmatrix_csr, csr_matrix
+from ._csc import isspmatrix_csc
+from ._sparsetools import csr_count_blocks
+
+
+def estimate_blocksize(A,efficiency=0.7):
+    """Attempt to determine the blocksize of a sparse matrix
+
+    Returns a blocksize=(r,c) such that
+        - A.nnz / A.tobsr( (r,c) ).nnz > efficiency
+    """
+    if not (isspmatrix_csr(A) or isspmatrix_csc(A)):
+        A = csr_matrix(A)
+
+    if A.nnz == 0:
+        return (1,1)
+
+    if not 0 < efficiency < 1.0:
+        raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
+
+    high_efficiency = (1.0 + efficiency) / 2.0
+    nnz = float(A.nnz)
+    M,N = A.shape
+
+    if M % 2 == 0 and N % 2 == 0:
+        e22 = nnz / (4 * count_blocks(A,(2,2)))
+    else:
+        e22 = 0.0
+
+    if M % 3 == 0 and N % 3 == 0:
+        e33 = nnz / (9 * count_blocks(A,(3,3)))
+    else:
+        e33 = 0.0
+
+    if e22 > high_efficiency and e33 > high_efficiency:
+        e66 = nnz / (36 * count_blocks(A,(6,6)))
+        if e66 > efficiency:
+            return (6,6)
+        else:
+            return (3,3)
+    else:
+        if M % 4 == 0 and N % 4 == 0:
+            e44 = nnz / (16 * count_blocks(A,(4,4)))
+        else:
+            e44 = 0.0
+
+        if e44 > efficiency:
+            return (4,4)
+        elif e33 > efficiency:
+            return (3,3)
+        elif e22 > efficiency:
+            return (2,2)
+        else:
+            return (1,1)
+
+
+def count_blocks(A,blocksize):
+    """For a given blocksize=(r,c) count the number of occupied
+    blocks in a sparse matrix A
+    """
+    r,c = blocksize
+    if r < 1 or c < 1:
+        raise ValueError('r and c must be positive')
+
+    if isspmatrix_csr(A):
+        M,N = A.shape
+        return csr_count_blocks(M,N,r,c,A.indptr,A.indices)
+    elif isspmatrix_csc(A):
+        return count_blocks(A.T,(c,r))
+    else:
+        return count_blocks(csr_matrix(A),blocksize)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/_sputils.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/_sputils.py
new file mode 100644
index 00000000..f492b926
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/_sputils.py
@@ -0,0 +1,413 @@
+""" Utility functions for sparse matrix module
+"""
+
+import sys
+import operator
+import numpy as np
+from scipy._lib._util import prod
+import scipy.sparse as sp
+
+
+__all__ = ['upcast', 'getdtype', 'getdata', 'isscalarlike', 'isintlike',
+           'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype']
+
+supported_dtypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc,
+                    np.uintc, np.int_, np.uint, np.longlong, np.ulonglong,
+                    np.single, np.double,
+                    np.longdouble, np.csingle, np.cdouble, np.clongdouble]
+
+_upcast_memo = {}
+
+
+def upcast(*args):
+    """Returns the nearest supported sparse dtype for the
+    combination of one or more types.
+
+    upcast(t0, t1, ..., tn) -> T  where T is a supported dtype
+
+    Examples
+    --------
+
+    >>> upcast('int32')
+    
+    >>> upcast('bool')
+    
+    >>> upcast('int32','float32')
+    
+    >>> upcast('bool',complex,float)
+    
+
+    """
+
+    t = _upcast_memo.get(hash(args))
+    if t is not None:
+        return t
+
+    upcast = np.result_type(*args)
+
+    for t in supported_dtypes:
+        if np.can_cast(upcast, t):
+            _upcast_memo[hash(args)] = t
+            return t
+
+    raise TypeError('no supported conversion for types: %r' % (args,))
+
+
+def upcast_char(*args):
+    """Same as `upcast` but taking dtype.char as input (faster)."""
+    t = _upcast_memo.get(args)
+    if t is not None:
+        return t
+    t = upcast(*map(np.dtype, args))
+    _upcast_memo[args] = t
+    return t
+
+
+def upcast_scalar(dtype, scalar):
+    """Determine data type for binary operation between an array of
+    type `dtype` and a scalar.
+    """
+    return (np.array([0], dtype=dtype) * scalar).dtype
+
+
+def downcast_intp_index(arr):
+    """
+    Down-cast index array to np.intp dtype if it is of a larger dtype.
+
+    Raise an error if the array contains a value that is too large for
+    intp.
+    """
+    if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
+        if arr.size == 0:
+            return arr.astype(np.intp)
+        maxval = arr.max()
+        minval = arr.min()
+        if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
+            raise ValueError("Cannot deal with arrays with indices larger "
+                             "than the machine maximum address size "
+                             "(e.g. 64-bit indices on 32-bit machine).")
+        return arr.astype(np.intp)
+    return arr
+
+
+def to_native(A):
+    """
+    Ensure that the data type of the NumPy array `A` has native byte order.
+
+    `A` must be a NumPy array.  If the data type of `A` does not have native
+    byte order, a copy of `A` with a native byte order is returned. Otherwise
+    `A` is returned.
+    """
+    dt = A.dtype
+    if dt.isnative:
+        # Don't call `asarray()` if A is already native, to avoid unnecessarily
+        # creating a view of the input array.
+        return A
+    return np.asarray(A, dtype=dt.newbyteorder('native'))
+
+
+def getdtype(dtype, a=None, default=None):
+    """Function used to simplify argument processing. If 'dtype' is not
+    specified (is None), returns a.dtype; otherwise returns a np.dtype
+    object created from the specified dtype argument. If 'dtype' and 'a'
+    are both None, construct a data type out of the 'default' parameter.
+    Furthermore, 'dtype' must be in 'allowed' set.
+    """
+    # TODO is this really what we want?
+    if dtype is None:
+        try:
+            newdtype = a.dtype
+        except AttributeError as e:
+            if default is not None:
+                newdtype = np.dtype(default)
+            else:
+                raise TypeError("could not interpret data type") from e
+    else:
+        newdtype = np.dtype(dtype)
+        if newdtype == np.object_:
+            raise ValueError(
+                "object dtype is not supported by sparse matrices"
+            )
+
+    return newdtype
+
+
+def getdata(obj, dtype=None, copy=False):
+    """
+    This is a wrapper of `np.array(obj, dtype=dtype, copy=copy)`
+    that will generate a warning if the result is an object array.
+    """
+    data = np.array(obj, dtype=dtype, copy=copy)
+    # Defer to getdtype for checking that the dtype is OK.
+    # This is called for the validation only; we don't need the return value.
+    getdtype(data.dtype)
+    return data
+
+
+def get_index_dtype(arrays=(), maxval=None, check_contents=False):
+    """
+    Based on input (integer) arrays `a`, determine a suitable index data
+    type that can hold the data in the arrays.
+
+    Parameters
+    ----------
+    arrays : tuple of array_like
+        Input arrays whose types/contents to check
+    maxval : float, optional
+        Maximum value needed
+    check_contents : bool, optional
+        Whether to check the values in the arrays and not just their types.
+        Default: False (check only the types)
+
+    Returns
+    -------
+    dtype : dtype
+        Suitable index data type (int32 or int64)
+
+    """
+
+    int32min = np.int32(np.iinfo(np.int32).min)
+    int32max = np.int32(np.iinfo(np.int32).max)
+
+    # not using intc directly due to misinteractions with pythran
+    dtype = np.int32 if np.intc().itemsize == 4 else np.int64
+    if maxval is not None:
+        maxval = np.int64(maxval)
+        if maxval > int32max:
+            dtype = np.int64
+
+    if isinstance(arrays, np.ndarray):
+        arrays = (arrays,)
+
+    for arr in arrays:
+        arr = np.asarray(arr)
+        if not np.can_cast(arr.dtype, np.int32):
+            if check_contents:
+                if arr.size == 0:
+                    # a bigger type not needed
+                    continue
+                elif np.issubdtype(arr.dtype, np.integer):
+                    maxval = arr.max()
+                    minval = arr.min()
+                    if minval >= int32min and maxval <= int32max:
+                        # a bigger type not needed
+                        continue
+
+            dtype = np.int64
+            break
+
+    return dtype
+
+
+def get_sum_dtype(dtype):
+    """Mimic numpy's casting for np.sum"""
+    if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
+        return np.uint
+    if np.can_cast(dtype, np.int_):
+        return np.int_
+    return dtype
+
+
+def isscalarlike(x):
+    """Is x either a scalar, an array scalar, or a 0-dim array?"""
+    return np.isscalar(x) or (isdense(x) and x.ndim == 0)
+
+
+def isintlike(x):
+    """Is x appropriate as an index into a sparse matrix? Returns True
+    if it can be cast safely to a machine int.
+    """
+    # Fast-path check to eliminate non-scalar values. operator.index would
+    # catch this case too, but the exception catching is slow.
+    if np.ndim(x) != 0:
+        return False
+    try:
+        operator.index(x)
+    except (TypeError, ValueError):
+        try:
+            loose_int = bool(int(x) == x)
+        except (TypeError, ValueError):
+            return False
+        if loose_int:
+            msg = "Inexact indices into sparse matrices are not allowed"
+            raise ValueError(msg)
+        return loose_int
+    return True
+
+
+def isshape(x, nonneg=False):
+    """Is x a valid 2-tuple of dimensions?
+
+    If nonneg, also checks that the dimensions are non-negative.
+    """
+    try:
+        # Assume it's a tuple of matrix dimensions (M, N)
+        (M, N) = x
+    except Exception:
+        return False
+    else:
+        if isintlike(M) and isintlike(N):
+            if np.ndim(M) == 0 and np.ndim(N) == 0:
+                if not nonneg or (M >= 0 and N >= 0):
+                    return True
+        return False
+
+
+def issequence(t):
+    return ((isinstance(t, (list, tuple)) and
+            (len(t) == 0 or np.isscalar(t[0]))) or
+            (isinstance(t, np.ndarray) and (t.ndim == 1)))
+
+
+def ismatrix(t):
+    return ((isinstance(t, (list, tuple)) and
+             len(t) > 0 and issequence(t[0])) or
+            (isinstance(t, np.ndarray) and t.ndim == 2))
+
+
+def isdense(x):
+    return isinstance(x, np.ndarray)
+
+
+def validateaxis(axis):
+    if axis is not None:
+        axis_type = type(axis)
+
+        # In NumPy, you can pass in tuples for 'axis', but they are
+        # not very useful for sparse matrices given their limited
+        # dimensions, so let's make it explicit that they are not
+        # allowed to be passed in
+        if axis_type == tuple:
+            raise TypeError(("Tuples are not accepted for the 'axis' "
+                             "parameter. Please pass in one of the "
+                             "following: {-2, -1, 0, 1, None}."))
+
+        # If not a tuple, check that the provided axis is actually
+        # an integer and raise a TypeError similar to NumPy's
+        if not np.issubdtype(np.dtype(axis_type), np.integer):
+            raise TypeError("axis must be an integer, not {name}"
+                            .format(name=axis_type.__name__))
+
+        if not (-2 <= axis <= 1):
+            raise ValueError("axis out of range")
+
+
+def check_shape(args, current_shape=None):
+    """Imitate numpy.matrix handling of shape arguments"""
+    if len(args) == 0:
+        raise TypeError("function missing 1 required positional argument: "
+                        "'shape'")
+    elif len(args) == 1:
+        try:
+            shape_iter = iter(args[0])
+        except TypeError:
+            new_shape = (operator.index(args[0]), )
+        else:
+            new_shape = tuple(operator.index(arg) for arg in shape_iter)
+    else:
+        new_shape = tuple(operator.index(arg) for arg in args)
+
+    if current_shape is None:
+        if len(new_shape) != 2:
+            raise ValueError('shape must be a 2-tuple of positive integers')
+        elif any(d < 0 for d in new_shape):
+            raise ValueError("'shape' elements cannot be negative")
+
+    else:
+        # Check the current size only if needed
+        current_size = prod(current_shape)
+
+        # Check for negatives
+        negative_indexes = [i for i, x in enumerate(new_shape) if x < 0]
+        if len(negative_indexes) == 0:
+            new_size = prod(new_shape)
+            if new_size != current_size:
+                raise ValueError('cannot reshape array of size {} into shape {}'
+                                 .format(current_size, new_shape))
+        elif len(negative_indexes) == 1:
+            skip = negative_indexes[0]
+            specified = prod(new_shape[0:skip] + new_shape[skip+1:])
+            unspecified, remainder = divmod(current_size, specified)
+            if remainder != 0:
+                err_shape = tuple('newshape' if x < 0 else x for x in new_shape)
+                raise ValueError('cannot reshape array of size {} into shape {}'
+                                 ''.format(current_size, err_shape))
+            new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:]
+        else:
+            raise ValueError('can only specify one unknown dimension')
+
+    if len(new_shape) != 2:
+        raise ValueError('matrix shape must be two-dimensional')
+
+    return new_shape
+
+
+def check_reshape_kwargs(kwargs):
+    """Unpack keyword arguments for reshape function.
+
+    This is useful because keyword arguments after star arguments are not
+    allowed in Python 2, but star keyword arguments are. This function unpacks
+    'order' and 'copy' from the star keyword arguments (with defaults) and
+    throws an error for any remaining.
+    """
+
+    order = kwargs.pop('order', 'C')
+    copy = kwargs.pop('copy', False)
+    if kwargs:  # Some unused kwargs remain
+        raise TypeError('reshape() got unexpected keywords arguments: {}'
+                        .format(', '.join(kwargs.keys())))
+    return order, copy
+
+
+def is_pydata_spmatrix(m):
+    """
+    Check whether object is pydata/sparse matrix, avoiding importing the module.
+    """
+    base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None)
+    return base_cls is not None and isinstance(m, base_cls)
+
+
+###############################################################################
+# Wrappers for NumPy types that are deprecated
+
+# Numpy versions of these functions raise deprecation warnings, the
+# ones below do not.
+
+def matrix(*args, **kwargs):
+    return np.array(*args, **kwargs).view(np.matrix)
+
+
+def asmatrix(data, dtype=None):
+    if isinstance(data, np.matrix) and (dtype is None or data.dtype == dtype):
+        return data
+    return np.asarray(data, dtype=dtype).view(np.matrix)
+
+###############################################################################
+
+
+def _todata(s: 'sp.spmatrix') -> np.ndarray:
+    """Access nonzero values, possibly after summing duplicates.
+
+    Parameters
+    ----------
+    s : sparse matrix
+        Input sparse matrix.
+
+    Returns
+    -------
+    data: ndarray
+      Nonzero values of the array, with shape (s.nnz,)
+
+    """
+    if isinstance(s, sp._data._data_matrix):
+        return s._deduped_data()
+
+    if isinstance(s, sp.dok_matrix):
+        return np.fromiter(s.values(), dtype=s.dtype, count=s.nnz)
+
+    if isinstance(s, sp.lil_matrix):
+        data = np.empty(s.nnz, dtype=s.dtype)
+        sp._csparsetools.lil_flatten_to_array(s.data, data)
+        return data
+
+    return s.tocoo()._deduped_data()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/base.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/base.py
new file mode 100644
index 00000000..d4a1ae43
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/base.py
@@ -0,0 +1,42 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _base
+
+
+__all__ = [  # noqa: F822
+    'MAXPRINT',
+    'SparseEfficiencyWarning',
+    'SparseFormatWarning',
+    'SparseWarning',
+    'asmatrix',
+    'check_reshape_kwargs',
+    'check_shape',
+    'get_sum_dtype',
+    'isdense',
+    'isintlike',
+    'isscalarlike',
+    'issparse',
+    'isspmatrix',
+    'spmatrix',
+    'validateaxis',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.base is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.base` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_base, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/bsr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/bsr.py
new file mode 100644
index 00000000..b7666e20
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/bsr.py
@@ -0,0 +1,46 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _bsr
+
+
+__all__ = [  # noqa: F822
+    'bsr_matmat',
+    'bsr_matrix',
+    'bsr_matvec',
+    'bsr_matvecs',
+    'bsr_sort_indices',
+    'bsr_tocsr',
+    'bsr_transpose',
+    'check_shape',
+    'csr_matmat_maxnnz',
+    'get_index_dtype',
+    'getdata',
+    'getdtype',
+    'isshape',
+    'isspmatrix',
+    'isspmatrix_bsr',
+    'spmatrix',
+    'to_native',
+    'upcast',
+    'warn',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.bsr is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.bsr` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_bsr, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/compressed.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/compressed.py
new file mode 100644
index 00000000..a40d2edb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/compressed.py
@@ -0,0 +1,54 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _compressed
+
+
+__all__ = [  # noqa: F822
+    'IndexMixin',
+    'SparseEfficiencyWarning',
+    'check_shape',
+    'csr_column_index1',
+    'csr_column_index2',
+    'csr_row_index',
+    'csr_row_slice',
+    'csr_sample_offsets',
+    'csr_sample_values',
+    'csr_todense',
+    'downcast_intp_index',
+    'get_csr_submatrix',
+    'get_index_dtype',
+    'get_sum_dtype',
+    'getdtype',
+    'is_pydata_spmatrix',
+    'isdense',
+    'isintlike',
+    'isscalarlike',
+    'isshape',
+    'isspmatrix',
+    'operator',
+    'spmatrix',
+    'to_native',
+    'upcast',
+    'upcast_char',
+    'warn',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.compressed is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.compressed` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_compressed, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/construct.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/construct.py
new file mode 100644
index 00000000..647d11fa
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/construct.py
@@ -0,0 +1,53 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _construct
+
+
+__all__ = [  # noqa: F822
+    'block_diag',
+    'bmat',
+    'bsr_matrix',
+    'check_random_state',
+    'coo_matrix',
+    'csc_matrix',
+    'csr_hstack',
+    'csr_matrix',
+    'dia_matrix',
+    'diags',
+    'eye',
+    'get_index_dtype',
+    'hstack',
+    'identity',
+    'isscalarlike',
+    'issparse',
+    'kron',
+    'kronsum',
+    'numbers',
+    'partial',
+    'rand',
+    'random',
+    'rng_integers',
+    'spdiags',
+    'upcast',
+    'vstack',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.construct is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.construct` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_construct, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/coo.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/coo.py
new file mode 100644
index 00000000..92f6e83d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/coo.py
@@ -0,0 +1,47 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _coo
+
+
+__all__ = [  # noqa: F822
+    'SparseEfficiencyWarning',
+    'check_reshape_kwargs',
+    'check_shape',
+    'coo_matrix',
+    'coo_matvec',
+    'coo_tocsr',
+    'coo_todense',
+    'downcast_intp_index',
+    'get_index_dtype',
+    'getdata',
+    'getdtype',
+    'isshape',
+    'isspmatrix',
+    'isspmatrix_coo',
+    'operator',
+    'spmatrix',
+    'to_native',
+    'upcast',
+    'upcast_char',
+    'warn',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.coo is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.coo` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_coo, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csc.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csc.py
new file mode 100644
index 00000000..1a941a64
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csc.py
@@ -0,0 +1,34 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _csc
+
+
+__all__ = [  # noqa: F822
+    'csc_matrix',
+    'csc_tocsr',
+    'expandptr',
+    'get_index_dtype',
+    'isspmatrix_csc',
+    'spmatrix',
+    'upcast',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.csc is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.csc` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_csc, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/__init__.py
new file mode 100644
index 00000000..3497e393
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/__init__.py
@@ -0,0 +1,208 @@
+r"""
+Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`)
+==============================================================
+
+.. currentmodule:: scipy.sparse.csgraph
+
+Fast graph algorithms based on sparse matrix representations.
+
+Contents
+--------
+
+.. autosummary::
+   :toctree: generated/
+
+   connected_components -- determine connected components of a graph
+   laplacian -- compute the laplacian of a graph
+   shortest_path -- compute the shortest path between points on a positive graph
+   dijkstra -- use Dijkstra's algorithm for shortest path
+   floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
+   bellman_ford -- use the Bellman-Ford algorithm for shortest path
+   johnson -- use Johnson's algorithm for shortest path
+   breadth_first_order -- compute a breadth-first order of nodes
+   depth_first_order -- compute a depth-first order of nodes
+   breadth_first_tree -- construct the breadth-first tree from a given node
+   depth_first_tree -- construct a depth-first tree from a given node
+   minimum_spanning_tree -- construct the minimum spanning tree of a graph
+   reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
+   maximum_flow -- solve the maximum flow problem for a graph
+   maximum_bipartite_matching -- compute a maximum matching of a bipartite graph
+   min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph
+   structural_rank -- compute the structural rank of a graph
+   NegativeCycleError
+
+.. autosummary::
+   :toctree: generated/
+
+   construct_dist_matrix
+   csgraph_from_dense
+   csgraph_from_masked
+   csgraph_masked_from_dense
+   csgraph_to_dense
+   csgraph_to_masked
+   reconstruct_path
+
+Graph Representations
+---------------------
+This module uses graphs which are stored in a matrix format. A
+graph with N nodes can be represented by an (N x N) adjacency matrix G.
+If there is a connection from node i to node j, then G[i, j] = w, where
+w is the weight of the connection. For nodes i and j which are
+not connected, the value depends on the representation:
+
+- for dense array representations, non-edges are represented by
+  G[i, j] = 0, infinity, or NaN.
+
+- for dense masked representations (of type np.ma.MaskedArray), non-edges
+  are represented by masked values. This can be useful when graphs with
+  zero-weight edges are desired.
+
+- for sparse array representations, non-edges are represented by
+  non-entries in the matrix. This sort of sparse representation also
+  allows for edges with zero weights.
+
+As a concrete example, imagine that you would like to represent the following
+undirected graph::
+
+              G
+
+             (0)
+            /   \
+           1     2
+          /       \
+        (2)       (1)
+
+This graph has three nodes, where node 0 and 1 are connected by an edge of
+weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
+We can construct the dense, masked, and sparse representations as follows,
+keeping in mind that an undirected graph is represented by a symmetric matrix::
+
+    >>> import numpy as np
+    >>> G_dense = np.array([[0, 2, 1],
+    ...                     [2, 0, 0],
+    ...                     [1, 0, 0]])
+    >>> G_masked = np.ma.masked_values(G_dense, 0)
+    >>> from scipy.sparse import csr_matrix
+    >>> G_sparse = csr_matrix(G_dense)
+
+This becomes more difficult when zero edges are significant. For example,
+consider the situation when we slightly modify the above graph::
+
+             G2
+
+             (0)
+            /   \
+           0     2
+          /       \
+        (2)       (1)
+
+This is identical to the previous graph, except nodes 0 and 2 are connected
+by an edge of zero weight. In this case, the dense representation above
+leads to ambiguities: how can non-edges be represented if zero is a meaningful
+value? In this case, either a masked or sparse representation must be used
+to eliminate the ambiguity::
+
+    >>> import numpy as np
+    >>> G2_data = np.array([[np.inf, 2,      0     ],
+    ...                     [2,      np.inf, np.inf],
+    ...                     [0,      np.inf, np.inf]])
+    >>> G2_masked = np.ma.masked_invalid(G2_data)
+    >>> from scipy.sparse.csgraph import csgraph_from_dense
+    >>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
+    >>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
+    >>> G2_sparse.data
+    array([ 2.,  0.,  2.,  0.])
+
+Here we have used a utility routine from the csgraph submodule in order to
+convert the dense representation to a sparse representation which can be
+understood by the algorithms in submodule. By viewing the data array, we
+can see that the zero values are explicitly encoded in the graph.
+
+Directed vs. undirected
+^^^^^^^^^^^^^^^^^^^^^^^
+Matrices may represent either directed or undirected graphs. This is
+specified throughout the csgraph module by a boolean keyword. Graphs are
+assumed to be directed by default. In a directed graph, traversal from node
+i to node j can be accomplished over the edge G[i, j], but not the edge
+G[j, i].  Consider the following dense graph::
+
+    >>> import numpy as np
+    >>> G_dense = np.array([[0, 1, 0],
+    ...                     [2, 0, 3],
+    ...                     [0, 4, 0]])
+
+When ``directed=True`` we get the graph::
+
+      ---1--> ---3-->
+    (0)     (1)     (2)
+      <--2--- <--4---
+
+In a non-directed graph, traversal from node i to node j can be
+accomplished over either G[i, j] or G[j, i].  If both edges are not null,
+and the two have unequal weights, then the smaller of the two is used.
+
+So for the same graph, when ``directed=False`` we get the graph::
+
+    (0)--1--(1)--3--(2)
+
+Note that a symmetric matrix will represent an undirected graph, regardless
+of whether the 'directed' keyword is set to True or False. In this case,
+using ``directed=True`` generally leads to more efficient computation.
+
+The routines in this module accept as input either scipy.sparse representations
+(csr, csc, or lil format), masked representations, or dense representations
+with non-edges indicated by zeros, infinities, and NaN entries.
+"""
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['connected_components',
+           'laplacian',
+           'shortest_path',
+           'floyd_warshall',
+           'dijkstra',
+           'bellman_ford',
+           'johnson',
+           'breadth_first_order',
+           'depth_first_order',
+           'breadth_first_tree',
+           'depth_first_tree',
+           'minimum_spanning_tree',
+           'reverse_cuthill_mckee',
+           'maximum_flow',
+           'maximum_bipartite_matching',
+           'min_weight_full_bipartite_matching',
+           'structural_rank',
+           'construct_dist_matrix',
+           'reconstruct_path',
+           'csgraph_masked_from_dense',
+           'csgraph_from_dense',
+           'csgraph_from_masked',
+           'csgraph_to_dense',
+           'csgraph_to_masked',
+           'NegativeCycleError']
+
+from ._laplacian import laplacian
+from ._shortest_path import (
+    shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson,
+    NegativeCycleError
+)
+from ._traversal import (
+    breadth_first_order, depth_first_order, breadth_first_tree,
+    depth_first_tree, connected_components
+)
+from ._min_spanning_tree import minimum_spanning_tree
+from ._flow import maximum_flow
+from ._matching import (
+    maximum_bipartite_matching, min_weight_full_bipartite_matching
+)
+from ._reordering import reverse_cuthill_mckee, structural_rank
+from ._tools import (
+    construct_dist_matrix, reconstruct_path, csgraph_from_dense,
+    csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked,
+    csgraph_to_masked
+)
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_laplacian.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_laplacian.py
new file mode 100644
index 00000000..4a47ab66
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_laplacian.py
@@ -0,0 +1,555 @@
+"""
+Laplacian of a compressed-sparse graph
+"""
+
+import numpy as np
+from scipy.sparse import isspmatrix
+from scipy.sparse.linalg import LinearOperator
+
+
+###############################################################################
+# Graph laplacian
+def laplacian(
+    csgraph,
+    normed=False,
+    return_diag=False,
+    use_out_degree=False,
+    *,
+    copy=True,
+    form="array",
+    dtype=None,
+    symmetrized=False,
+):
+    """
+    Return the Laplacian of a directed graph.
+
+    Parameters
+    ----------
+    csgraph : array_like or sparse matrix, 2 dimensions
+        compressed-sparse graph, with shape (N, N).
+    normed : bool, optional
+        If True, then compute symmetrically normalized Laplacian.
+        Default: False.
+    return_diag : bool, optional
+        If True, then also return an array related to vertex degrees.
+        Default: False.
+    use_out_degree : bool, optional
+        If True, then use out-degree instead of in-degree.
+        This distinction matters only if the graph is asymmetric.
+        Default: False.
+    copy: bool, optional
+        If False, then change `csgraph` in place if possible,
+        avoiding doubling the memory use.
+        Default: True, for backward compatibility.
+    form: 'array', or 'function', or 'lo'
+        Determines the format of the output Laplacian:
+
+        * 'array' is a numpy array;
+        * 'function' is a pointer to evaluating the Laplacian-vector
+          or Laplacian-matrix product;
+        * 'lo' results in the format of the `LinearOperator`.
+
+        Choosing 'function' or 'lo' always avoids doubling
+        the memory use, ignoring `copy` value.
+        Default: 'array', for backward compatibility.
+    dtype: None or one of numeric numpy dtypes, optional
+        The dtype of the output. If ``dtype=None``, the dtype of the
+        output matches the dtype of the input csgraph, except for
+        the case ``normed=True`` and integer-like csgraph, where
+        the output dtype is 'float' allowing accurate normalization,
+        but dramatically increasing the memory use.
+        Default: None, for backward compatibility.
+    symmetrized: bool, optional
+        If True, then the output Laplacian is symmetric/Hermitian.
+        The symmetrization is done by ``csgraph + csgraph.T.conj``
+        without dividing by 2 to preserve integer dtypes if possible
+        prior to the construction of the Laplacian.
+        The symmetrization will increase the memory footprint of
+        sparse matrices unless the sparsity pattern is symmetric or
+        `form` is 'function' or 'lo'.
+        Default: False, for backward compatibility.
+
+    Returns
+    -------
+    lap : ndarray, or sparse matrix, or `LinearOperator`
+        The N x N Laplacian of csgraph. It will be a NumPy array (dense)
+        if the input was dense, or a sparse matrix otherwise, or
+        the format of a function or `LinearOperator` if
+        `form` equals 'function' or 'lo', respectively.
+    diag : ndarray, optional
+        The length-N main diagonal of the Laplacian matrix.
+        For the normalized Laplacian, this is the array of square roots
+        of vertex degrees or 1 if the degree is zero.
+
+    Notes
+    -----
+    The Laplacian matrix of a graph is sometimes referred to as the
+    "Kirchhoff matrix" or just the "Laplacian", and is useful in many
+    parts of spectral graph theory.
+    In particular, the eigen-decomposition of the Laplacian can give
+    insight into many properties of the graph, e.g.,
+    is commonly used for spectral data embedding and clustering.
+
+    The constructed Laplacian doubles the memory use if ``copy=True`` and
+    ``form="array"`` which is the default.
+    Choosing ``copy=False`` has no effect unless ``form="array"``
+    or the matrix is sparse in the ``coo`` format, or dense array, except
+    for the integer input with ``normed=True`` that forces the float output.
+
+    Sparse input is reformatted into ``coo`` if ``form="array"``,
+    which is the default.
+
+    If the input adjacency matrix is not symmetic, the Laplacian is
+    also non-symmetric unless ``symmetrized=True`` is used.
+
+    Diagonal entries of the input adjacency matrix are ignored and
+    replaced with zeros for the purpose of normalization where ``normed=True``.
+    The normalization uses the inverse square roots of row-sums of the input
+    adjacency matrix, and thus may fail if the row-sums contain
+    negative or complex with a non-zero imaginary part values.
+
+    The normalization is symmetric, making the normalized Laplacian also
+    symmetric if the input csgraph was symmetric.
+
+    References
+    ----------
+    .. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csgraph
+
+    Our first illustration is the symmetric graph
+
+    >>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
+    >>> G
+    array([[0, 0, 0, 0],
+           [0, 1, 2, 3],
+           [0, 2, 4, 6],
+           [0, 3, 6, 9]])
+
+    and its symmetric Laplacian matrix
+
+    >>> csgraph.laplacian(G)
+    array([[ 0,  0,  0,  0],
+           [ 0,  5, -2, -3],
+           [ 0, -2,  8, -6],
+           [ 0, -3, -6,  9]])
+
+    The non-symmetric graph
+
+    >>> G = np.arange(9).reshape(3, 3)
+    >>> G
+    array([[0, 1, 2],
+           [3, 4, 5],
+           [6, 7, 8]])
+
+    has different row- and column sums, resulting in two varieties
+    of the Laplacian matrix, using an in-degree, which is the default
+
+    >>> L_in_degree = csgraph.laplacian(G)
+    >>> L_in_degree
+    array([[ 9, -1, -2],
+           [-3,  8, -5],
+           [-6, -7,  7]])
+
+    or alternatively an out-degree
+
+    >>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
+    >>> L_out_degree
+    array([[ 3, -1, -2],
+           [-3,  8, -5],
+           [-6, -7, 13]])
+
+    Constructing a symmetric Laplacian matrix, one can add the two as
+
+    >>> L_in_degree + L_out_degree.T
+    array([[ 12,  -4,  -8],
+            [ -4,  16, -12],
+            [ -8, -12,  20]])
+
+    or use the ``symmetrized=True`` option
+
+    >>> csgraph.laplacian(G, symmetrized=True)
+    array([[ 12,  -4,  -8],
+           [ -4,  16, -12],
+           [ -8, -12,  20]])
+
+    that is equivalent to symmetrizing the original graph
+
+    >>> csgraph.laplacian(G + G.T)
+    array([[ 12,  -4,  -8],
+           [ -4,  16, -12],
+           [ -8, -12,  20]])
+
+    The goal of normalization is to make the non-zero diagonal entries
+    of the Laplacian matrix to be all unit, also scaling off-diagonal
+    entries correspondingly. The normalization can be done manually, e.g.,
+
+    >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
+    >>> L, d = csgraph.laplacian(G, return_diag=True)
+    >>> L
+    array([[ 2, -1, -1],
+           [-1,  2, -1],
+           [-1, -1,  2]])
+    >>> d
+    array([2, 2, 2])
+    >>> scaling = np.sqrt(d)
+    >>> scaling
+    array([1.41421356, 1.41421356, 1.41421356])
+    >>> (1/scaling)*L*(1/scaling)
+    array([[ 1. , -0.5, -0.5],
+           [-0.5,  1. , -0.5],
+           [-0.5, -0.5,  1. ]])
+
+    Or using ``normed=True`` option
+
+    >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
+    >>> L
+    array([[ 1. , -0.5, -0.5],
+           [-0.5,  1. , -0.5],
+           [-0.5, -0.5,  1. ]])
+
+    which now instead of the diagonal returns the scaling coefficients
+
+    >>> d
+    array([1.41421356, 1.41421356, 1.41421356])
+
+    Zero scaling coefficients are substituted with 1s, where scaling
+    has thus no effect, e.g.,
+
+    >>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
+    >>> G
+    array([[0, 0, 0],
+           [0, 0, 1],
+           [0, 1, 0]])
+    >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
+    >>> L
+    array([[ 0., -0., -0.],
+           [-0.,  1., -1.],
+           [-0., -1.,  1.]])
+    >>> d
+    array([1., 1., 1.])
+
+    Only the symmetric normalization is implemented, resulting
+    in a symmetric Laplacian matrix if and only if its graph is symmetric
+    and has all non-negative degrees, like in the examples above.
+
+    The output Laplacian matrix is by default a dense array or a sparse matrix
+    inferring its shape, format, and dtype from the input graph matrix:
+
+    >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
+    >>> G
+    array([[0., 1., 1.],
+           [1., 0., 1.],
+           [1., 1., 0.]], dtype=float32)
+    >>> csgraph.laplacian(G)
+    array([[ 2., -1., -1.],
+           [-1.,  2., -1.],
+           [-1., -1.,  2.]], dtype=float32)
+
+    but can alternatively be generated matrix-free as a LinearOperator:
+
+    >>> L = csgraph.laplacian(G, form="lo")
+    >>> L
+    <3x3 _CustomLinearOperator with dtype=float32>
+    >>> L(np.eye(3))
+    array([[ 2., -1., -1.],
+           [-1.,  2., -1.],
+           [-1., -1.,  2.]])
+
+    or as a lambda-function:
+
+    >>> L = csgraph.laplacian(G, form="function")
+    >>> L
+    . at 0x0000012AE6F5A598>
+    >>> L(np.eye(3))
+    array([[ 2., -1., -1.],
+           [-1.,  2., -1.],
+           [-1., -1.,  2.]])
+
+    The Laplacian matrix is used for
+    spectral data clustering and embedding
+    as well as for spectral graph partitioning.
+    Our final example illustrates the latter
+    for a noisy directed linear graph.
+
+    >>> from scipy.sparse import diags, random
+    >>> from scipy.sparse.linalg import lobpcg
+
+    Create a directed linear graph with ``N=35`` vertices
+    using a sparse adjacency matrix ``G``:
+
+    >>> N = 35
+    >>> G = diags(np.ones(N-1), 1, format="csr")
+
+    Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
+
+    >>> rng = np.random.default_rng()
+    >>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
+
+    Set initial approximations for eigenvectors:
+
+    >>> X = rng.random((N, 2))
+
+    The constant vector of ones is always a trivial eigenvector
+    of the non-normalized Laplacian to be filtered out:
+
+    >>> Y = np.ones((N, 1))
+
+    Alternating (1) the sign of the graph weights allows determining
+    labels for spectral max- and min- cuts in a single loop.
+    Since the graph is undirected, the option ``symmetrized=True``
+    must be used in the construction of the Laplacian.
+    The option ``normed=True`` cannot be used in (2) for the negative weights
+    here as the symmetric normalization evaluates square roots.
+    The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
+    a fixed memory footprint and read-only access to the graph.
+    Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
+    that determines the labels as the signs of its components in (5).
+    Since the sign in an eigenvector is not deterministic and can flip,
+    we fix the sign of the first component to be always +1 in (4).
+
+    >>> for cut in ["max", "min"]:
+    ...     G = -G  # 1.
+    ...     L = csgraph.laplacian(G, symmetrized=True, form="lo")  # 2.
+    ...     _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3)  # 3.
+    ...     eves *= np.sign(eves[0, 0])  # 4.
+    ...     print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0))  # 5.
+    max-cut labels:
+    [1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
+    min-cut labels:
+    [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
+
+    As anticipated for a (slightly noisy) linear graph,
+    the max-cut strips all the edges of the graph coloring all
+    odd vertices into one color and all even vertices into another one,
+    while the balanced min-cut partitions the graph
+    in the middle by deleting a single edge.
+    Both determined partitions are optimal.
+    """
+    if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
+        raise ValueError('csgraph must be a square matrix or array')
+
+    if normed and (
+        np.issubdtype(csgraph.dtype, np.signedinteger)
+        or np.issubdtype(csgraph.dtype, np.uint)
+    ):
+        csgraph = csgraph.astype(np.float64)
+
+    if form == "array":
+        create_lap = (
+            _laplacian_sparse if isspmatrix(csgraph) else _laplacian_dense
+        )
+    else:
+        create_lap = (
+            _laplacian_sparse_flo
+            if isspmatrix(csgraph)
+            else _laplacian_dense_flo
+        )
+
+    degree_axis = 1 if use_out_degree else 0
+
+    lap, d = create_lap(
+        csgraph,
+        normed=normed,
+        axis=degree_axis,
+        copy=copy,
+        form=form,
+        dtype=dtype,
+        symmetrized=symmetrized,
+    )
+    if return_diag:
+        return lap, d
+    return lap
+
+
+def _setdiag_dense(m, d):
+    step = len(d) + 1
+    m.flat[::step] = d
+
+
+def _laplace(m, d):
+    return lambda v: v * d[:, np.newaxis] - m @ v
+
+
+def _laplace_normed(m, d, nd):
+    laplace = _laplace(m, d)
+    return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
+
+
+def _laplace_sym(m, d):
+    return (
+        lambda v: v * d[:, np.newaxis]
+        - m @ v
+        - np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
+    )
+
+
+def _laplace_normed_sym(m, d, nd):
+    laplace_sym = _laplace_sym(m, d)
+    return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
+
+
+def _linearoperator(mv, shape, dtype):
+    return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
+
+
+def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
+    # The keyword argument `copy` is unused and has no effect here.
+    del copy
+
+    if dtype is None:
+        dtype = graph.dtype
+
+    graph_sum = graph.sum(axis=axis).getA1()
+    graph_diagonal = graph.diagonal()
+    diag = graph_sum - graph_diagonal
+    if symmetrized:
+        graph_sum += graph.sum(axis=1 - axis).getA1()
+        diag = graph_sum - graph_diagonal - graph_diagonal
+
+    if normed:
+        isolated_node_mask = diag == 0
+        w = np.where(isolated_node_mask, 1, np.sqrt(diag))
+        if symmetrized:
+            md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
+        else:
+            md = _laplace_normed(graph, graph_sum, 1.0 / w)
+        if form == "function":
+            return md, w.astype(dtype, copy=False)
+        elif form == "lo":
+            m = _linearoperator(md, shape=graph.shape, dtype=dtype)
+            return m, w.astype(dtype, copy=False)
+        else:
+            raise ValueError(f"Invalid form: {form!r}")
+    else:
+        if symmetrized:
+            md = _laplace_sym(graph, graph_sum)
+        else:
+            md = _laplace(graph, graph_sum)
+        if form == "function":
+            return md, diag.astype(dtype, copy=False)
+        elif form == "lo":
+            m = _linearoperator(md, shape=graph.shape, dtype=dtype)
+            return m, diag.astype(dtype, copy=False)
+        else:
+            raise ValueError(f"Invalid form: {form!r}")
+
+
+def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
+    # The keyword argument `form` is unused and has no effect here.
+    del form
+
+    if dtype is None:
+        dtype = graph.dtype
+
+    needs_copy = False
+    if graph.format in ('lil', 'dok'):
+        m = graph.tocoo()
+    else:
+        m = graph
+        if copy:
+            needs_copy = True
+
+    if symmetrized:
+        m += m.T.conj()
+
+    w = m.sum(axis=axis).getA1() - m.diagonal()
+    if normed:
+        m = m.tocoo(copy=needs_copy)
+        isolated_node_mask = (w == 0)
+        w = np.where(isolated_node_mask, 1, np.sqrt(w))
+        m.data /= w[m.row]
+        m.data /= w[m.col]
+        m.data *= -1
+        m.setdiag(1 - isolated_node_mask)
+    else:
+        if m.format == 'dia':
+            m = m.copy()
+        else:
+            m = m.tocoo(copy=needs_copy)
+        m.data *= -1
+        m.setdiag(w)
+
+    return m.astype(dtype, copy=False), w.astype(dtype)
+
+
+def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
+
+    if copy:
+        m = np.array(graph)
+    else:
+        m = np.asarray(graph)
+
+    if dtype is None:
+        dtype = m.dtype
+
+    graph_sum = m.sum(axis=axis)
+    graph_diagonal = m.diagonal()
+    diag = graph_sum - graph_diagonal
+    if symmetrized:
+        graph_sum += m.sum(axis=1 - axis)
+        diag = graph_sum - graph_diagonal - graph_diagonal
+
+    if normed:
+        isolated_node_mask = diag == 0
+        w = np.where(isolated_node_mask, 1, np.sqrt(diag))
+        if symmetrized:
+            md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
+        else:
+            md = _laplace_normed(m, graph_sum, 1.0 / w)
+        if form == "function":
+            return md, w.astype(dtype, copy=False)
+        elif form == "lo":
+            m = _linearoperator(md, shape=graph.shape, dtype=dtype)
+            return m, w.astype(dtype, copy=False)
+        else:
+            raise ValueError(f"Invalid form: {form!r}")
+    else:
+        if symmetrized:
+            md = _laplace_sym(m, graph_sum)
+        else:
+            md = _laplace(m, graph_sum)
+        if form == "function":
+            return md, diag.astype(dtype, copy=False)
+        elif form == "lo":
+            m = _linearoperator(md, shape=graph.shape, dtype=dtype)
+            return m, diag.astype(dtype, copy=False)
+        else:
+            raise ValueError(f"Invalid form: {form!r}")
+
+
+def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
+
+    if form != "array":
+        raise ValueError(f'{form!r} must be "array"')
+
+    if dtype is None:
+        dtype = graph.dtype
+
+    if copy:
+        m = np.array(graph)
+    else:
+        m = np.asarray(graph)
+
+    if dtype is None:
+        dtype = m.dtype
+
+    if symmetrized:
+        m += m.T.conj()
+    np.fill_diagonal(m, 0)
+    w = m.sum(axis=axis)
+    if normed:
+        isolated_node_mask = (w == 0)
+        w = np.where(isolated_node_mask, 1, np.sqrt(w))
+        m /= w
+        m /= w[:, np.newaxis]
+        m *= -1
+        _setdiag_dense(m, 1 - isolated_node_mask)
+    else:
+        m *= -1
+        _setdiag_dense(m, w)
+
+    return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_validation.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_validation.py
new file mode 100644
index 00000000..d93060c9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/_validation.py
@@ -0,0 +1,56 @@
+import numpy as np
+from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc
+from ._tools import csgraph_to_dense, csgraph_from_dense,\
+    csgraph_masked_from_dense, csgraph_from_masked
+
+DTYPE = np.float64
+
+
+def validate_graph(csgraph, directed, dtype=DTYPE,
+                   csr_output=True, dense_output=True,
+                   copy_if_dense=False, copy_if_sparse=False,
+                   null_value_in=0, null_value_out=np.inf,
+                   infinity_null=True, nan_null=True):
+    """Routine for validation and conversion of csgraph inputs"""
+    if not (csr_output or dense_output):
+        raise ValueError("Internal: dense or csr output must be true")
+
+    # if undirected and csc storage, then transposing in-place
+    # is quicker than later converting to csr.
+    if (not directed) and isspmatrix_csc(csgraph):
+        csgraph = csgraph.T
+
+    if isspmatrix(csgraph):
+        if csr_output:
+            csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse)
+        else:
+            csgraph = csgraph_to_dense(csgraph, null_value=null_value_out)
+    elif np.ma.isMaskedArray(csgraph):
+        if dense_output:
+            mask = csgraph.mask
+            csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense)
+            csgraph[mask] = null_value_out
+        else:
+            csgraph = csgraph_from_masked(csgraph)
+    else:
+        if dense_output:
+            csgraph = csgraph_masked_from_dense(csgraph,
+                                                copy=copy_if_dense,
+                                                null_value=null_value_in,
+                                                nan_null=nan_null,
+                                                infinity_null=infinity_null)
+            mask = csgraph.mask
+            csgraph = np.asarray(csgraph.data, dtype=DTYPE)
+            csgraph[mask] = null_value_out
+        else:
+            csgraph = csgraph_from_dense(csgraph, null_value=null_value_in,
+                                         infinity_null=infinity_null,
+                                         nan_null=nan_null)
+
+    if csgraph.ndim != 2:
+        raise ValueError("compressed-sparse graph must be 2-D")
+
+    if csgraph.shape[0] != csgraph.shape[1]:
+        raise ValueError("compressed-sparse graph must be shape (N, N)")
+
+    return csgraph
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/setup.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/setup.py
new file mode 100644
index 00000000..32919a72
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/setup.py
@@ -0,0 +1,38 @@
+
+def configuration(parent_package='', top_path=None):
+    import numpy
+    from numpy.distutils.misc_util import Configuration
+
+    config = Configuration('csgraph', parent_package, top_path)
+
+    config.add_data_dir('tests')
+
+    config.add_extension('_shortest_path',
+         sources=['_shortest_path.c'],
+         include_dirs=[numpy.get_include()])
+
+    config.add_extension('_traversal',
+         sources=['_traversal.c'],
+         include_dirs=[numpy.get_include()])
+
+    config.add_extension('_min_spanning_tree',
+         sources=['_min_spanning_tree.c'],
+         include_dirs=[numpy.get_include()])
+
+    config.add_extension('_matching',
+         sources=['_matching.c'],
+         include_dirs=[numpy.get_include()])
+    
+    config.add_extension('_flow',
+         sources=['_flow.c'],
+         include_dirs=[numpy.get_include()])
+    
+    config.add_extension('_reordering',
+         sources=['_reordering.c'],
+         include_dirs=[numpy.get_include()])
+
+    config.add_extension('_tools',
+         sources=['_tools.c'],
+         include_dirs=[numpy.get_include()])
+
+    return config
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_connected_components.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_connected_components.py
new file mode 100644
index 00000000..681f29fd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_connected_components.py
@@ -0,0 +1,99 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_array_almost_equal
+from scipy.sparse import csgraph
+
+
+def test_weak_connections():
+    Xde = np.array([[0, 1, 0],
+                    [0, 0, 0],
+                    [0, 0, 0]])
+
+    Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
+
+    for X in Xsp, Xde:
+        n_components, labels =\
+            csgraph.connected_components(X, directed=True,
+                                         connection='weak')
+
+        assert_equal(n_components, 2)
+        assert_array_almost_equal(labels, [0, 0, 1])
+
+
+def test_strong_connections():
+    X1de = np.array([[0, 1, 0],
+                     [0, 0, 0],
+                     [0, 0, 0]])
+    X2de = X1de + X1de.T
+
+    X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
+    X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
+
+    for X in X1sp, X1de:
+        n_components, labels =\
+            csgraph.connected_components(X, directed=True,
+                                         connection='strong')
+
+        assert_equal(n_components, 3)
+        labels.sort()
+        assert_array_almost_equal(labels, [0, 1, 2])
+
+    for X in X2sp, X2de:
+        n_components, labels =\
+            csgraph.connected_components(X, directed=True,
+                                         connection='strong')
+
+        assert_equal(n_components, 2)
+        labels.sort()
+        assert_array_almost_equal(labels, [0, 0, 1])
+
+
+def test_strong_connections2():
+    X = np.array([[0, 0, 0, 0, 0, 0],
+                  [1, 0, 1, 0, 0, 0],
+                  [0, 0, 0, 1, 0, 0],
+                  [0, 0, 1, 0, 1, 0],
+                  [0, 0, 0, 0, 0, 0],
+                  [0, 0, 0, 0, 1, 0]])
+    n_components, labels =\
+        csgraph.connected_components(X, directed=True,
+                                     connection='strong')
+    assert_equal(n_components, 5)
+    labels.sort()
+    assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])
+
+
+def test_weak_connections2():
+    X = np.array([[0, 0, 0, 0, 0, 0],
+                  [1, 0, 0, 0, 0, 0],
+                  [0, 0, 0, 1, 0, 0],
+                  [0, 0, 1, 0, 1, 0],
+                  [0, 0, 0, 0, 0, 0],
+                  [0, 0, 0, 0, 1, 0]])
+    n_components, labels =\
+        csgraph.connected_components(X, directed=True,
+                                     connection='weak')
+    assert_equal(n_components, 2)
+    labels.sort()
+    assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])
+
+
+def test_ticket1876():
+    # Regression test: this failed in the original implementation
+    # There should be two strongly-connected components; previously gave one
+    g = np.array([[0, 1, 1, 0],
+                  [1, 0, 0, 1],
+                  [0, 0, 0, 1],
+                  [0, 0, 1, 0]])
+    n_components, labels = csgraph.connected_components(g, connection='strong')
+
+    assert_equal(n_components, 2)
+    assert_equal(labels[0], labels[1])
+    assert_equal(labels[2], labels[3])
+
+
+def test_fully_connected_graph():
+    # Fully connected dense matrices raised an exception.
+    # https://github.com/scipy/scipy/issues/3818
+    g = np.ones((4, 4))
+    n_components, labels = csgraph.connected_components(g)
+    assert_equal(n_components, 1)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_conversions.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_conversions.py
new file mode 100644
index 00000000..080b4966
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_conversions.py
@@ -0,0 +1,61 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from scipy.sparse import csr_matrix
+from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense
+
+
+def test_csgraph_from_dense():
+    np.random.seed(1234)
+    G = np.random.random((10, 10))
+    some_nulls = (G < 0.4)
+    all_nulls = (G < 0.8)
+
+    for null_value in [0, np.nan, np.inf]:
+        G[all_nulls] = null_value
+        with np.errstate(invalid="ignore"):
+            G_csr = csgraph_from_dense(G, null_value=0)
+
+        G[all_nulls] = 0
+        assert_array_almost_equal(G, G_csr.toarray())
+
+    for null_value in [np.nan, np.inf]:
+        G[all_nulls] = 0
+        G[some_nulls] = null_value
+        with np.errstate(invalid="ignore"):
+            G_csr = csgraph_from_dense(G, null_value=0)
+
+        G[all_nulls] = 0
+        assert_array_almost_equal(G, G_csr.toarray())
+
+
+def test_csgraph_to_dense():
+    np.random.seed(1234)
+    G = np.random.random((10, 10))
+    nulls = (G < 0.8)
+    G[nulls] = np.inf
+
+    G_csr = csgraph_from_dense(G)
+
+    for null_value in [0, 10, -np.inf, np.inf]:
+        G[nulls] = null_value
+        assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value))
+
+
+def test_multiple_edges():
+    # create a random sqare matrix with an even number of elements
+    np.random.seed(1234)
+    X = np.random.random((10, 10))
+    Xcsr = csr_matrix(X)
+
+    # now double-up every other column
+    Xcsr.indices[::2] = Xcsr.indices[1::2]
+
+    # normal sparse toarray() will sum the duplicated edges
+    Xdense = Xcsr.toarray()
+    assert_array_almost_equal(Xdense[:, 1::2],
+                              X[:, ::2] + X[:, 1::2])
+
+    # csgraph_to_dense chooses the minimum of each duplicated edge
+    Xdense = csgraph_to_dense(Xcsr)
+    assert_array_almost_equal(Xdense[:, 1::2],
+                              np.minimum(X[:, ::2], X[:, 1::2]))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_flow.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_flow.py
new file mode 100644
index 00000000..2afb4153
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_flow.py
@@ -0,0 +1,208 @@
+import numpy as np
+from numpy.testing import assert_array_equal
+import pytest
+
+from scipy.sparse import csr_matrix, csc_matrix
+from scipy.sparse.csgraph import maximum_flow
+from scipy.sparse.csgraph._flow import (
+    _add_reverse_edges, _make_edge_pointers, _make_tails
+)
+
+methods = ['edmonds_karp', 'dinic']
+
+def test_raises_on_dense_input():
+    with pytest.raises(TypeError):
+        graph = np.array([[0, 1], [0, 0]])
+        maximum_flow(graph, 0, 1)
+        maximum_flow(graph, 0, 1, method='edmonds_karp')
+
+
+def test_raises_on_csc_input():
+    with pytest.raises(TypeError):
+        graph = csc_matrix([[0, 1], [0, 0]])
+        maximum_flow(graph, 0, 1)
+        maximum_flow(graph, 0, 1, method='edmonds_karp')
+
+
+def test_raises_on_floating_point_input():
+    with pytest.raises(ValueError):
+        graph = csr_matrix([[0, 1.5], [0, 0]], dtype=np.float64)
+        maximum_flow(graph, 0, 1)
+        maximum_flow(graph, 0, 1, method='edmonds_karp')
+
+
+def test_raises_on_non_square_input():
+    with pytest.raises(ValueError):
+        graph = csr_matrix([[0, 1, 2], [2, 1, 0]])
+        maximum_flow(graph, 0, 1)
+
+
+def test_raises_when_source_is_sink():
+    with pytest.raises(ValueError):
+        graph = csr_matrix([[0, 1], [0, 0]])
+        maximum_flow(graph, 0, 0)
+        maximum_flow(graph, 0, 0, method='edmonds_karp')
+
+
+@pytest.mark.parametrize('method', methods)
+@pytest.mark.parametrize('source', [-1, 2, 3])
+def test_raises_when_source_is_out_of_bounds(source, method):
+    with pytest.raises(ValueError):
+        graph = csr_matrix([[0, 1], [0, 0]])
+        maximum_flow(graph, source, 1, method=method)
+
+
+@pytest.mark.parametrize('method', methods)
+@pytest.mark.parametrize('sink', [-1, 2, 3])
+def test_raises_when_sink_is_out_of_bounds(sink, method):
+    with pytest.raises(ValueError):
+        graph = csr_matrix([[0, 1], [0, 0]])
+        maximum_flow(graph, 0, sink, method=method)
+
+
+@pytest.mark.parametrize('method', methods)
+def test_simple_graph(method):
+    # This graph looks as follows:
+    #     (0) --5--> (1)
+    graph = csr_matrix([[0, 5], [0, 0]])
+    res = maximum_flow(graph, 0, 1, method=method)
+    assert res.flow_value == 5
+    expected_flow = np.array([[0, 5], [-5, 0]])
+    assert_array_equal(res.flow.toarray(), expected_flow)
+
+
+@pytest.mark.parametrize('method', methods)
+def test_bottle_neck_graph(method):
+    # This graph cannot use the full capacity between 0 and 1:
+    #     (0) --5--> (1) --3--> (2)
+    graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]])
+    res = maximum_flow(graph, 0, 2, method=method)
+    assert res.flow_value == 3
+    expected_flow = np.array([[0, 3, 0], [-3, 0, 3], [0, -3, 0]])
+    assert_array_equal(res.flow.toarray(), expected_flow)
+
+
+@pytest.mark.parametrize('method', methods)
+def test_backwards_flow(method):
+    # This example causes backwards flow between vertices 3 and 4,
+    # and so this test ensures that we handle that accordingly. See
+    #     https://stackoverflow.com/q/38843963/5085211
+    # for more information.
+    graph = csr_matrix([[0, 10, 0, 0, 10, 0, 0, 0],
+                        [0, 0, 10, 0, 0, 0, 0, 0],
+                        [0, 0, 0, 10, 0, 0, 0, 0],
+                        [0, 0, 0, 0, 0, 0, 0, 10],
+                        [0, 0, 0, 10, 0, 10, 0, 0],
+                        [0, 0, 0, 0, 0, 0, 10, 0],
+                        [0, 0, 0, 0, 0, 0, 0, 10],
+                        [0, 0, 0, 0, 0, 0, 0, 0]])
+    res = maximum_flow(graph, 0, 7, method=method)
+    assert res.flow_value == 20
+    expected_flow = np.array([[0, 10, 0, 0, 10, 0, 0, 0],
+                              [-10, 0, 10, 0, 0, 0, 0, 0],
+                              [0, -10, 0, 10, 0, 0, 0, 0],
+                              [0, 0, -10, 0, 0, 0, 0, 10],
+                              [-10, 0, 0, 0, 0, 10, 0, 0],
+                              [0, 0, 0, 0, -10, 0, 10, 0],
+                              [0, 0, 0, 0, 0, -10, 0, 10],
+                              [0, 0, 0, -10, 0, 0, -10, 0]])
+    assert_array_equal(res.flow.toarray(), expected_flow)
+
+
+@pytest.mark.parametrize('method', methods)
+def test_example_from_clrs_chapter_26_1(method):
+    # See page 659 in CLRS second edition, but note that the maximum flow
+    # we find is slightly different than the one in CLRS; we push a flow of
+    # 12 to v_1 instead of v_2.
+    graph = csr_matrix([[0, 16, 13, 0, 0, 0],
+                        [0, 0, 10, 12, 0, 0],
+                        [0, 4, 0, 0, 14, 0],
+                        [0, 0, 9, 0, 0, 20],
+                        [0, 0, 0, 7, 0, 4],
+                        [0, 0, 0, 0, 0, 0]])
+    res = maximum_flow(graph, 0, 5, method=method)
+    assert res.flow_value == 23
+    expected_flow = np.array([[0, 12, 11, 0, 0, 0],
+                              [-12, 0, 0, 12, 0, 0],
+                              [-11, 0, 0, 0, 11, 0],
+                              [0, -12, 0, 0, -7, 19],
+                              [0, 0, -11, 7, 0, 4],
+                              [0, 0, 0, -19, -4, 0]])
+    assert_array_equal(res.flow.toarray(), expected_flow)
+
+
+@pytest.mark.parametrize('method', methods)
+def test_disconnected_graph(method):
+    # This tests the following disconnected graph:
+    #     (0) --5--> (1)    (2) --3--> (3)
+    graph = csr_matrix([[0, 5, 0, 0],
+                        [0, 0, 0, 0],
+                        [0, 0, 9, 3],
+                        [0, 0, 0, 0]])
+    res = maximum_flow(graph, 0, 3, method=method)
+    assert res.flow_value == 0
+    expected_flow = np.zeros((4, 4), dtype=np.int32)
+    assert_array_equal(res.flow.toarray(), expected_flow)
+
+
+@pytest.mark.parametrize('method', methods)
+def test_add_reverse_edges_large_graph(method):
+    # Regression test for https://github.com/scipy/scipy/issues/14385
+    n = 100_000
+    indices = np.arange(1, n)
+    indptr = np.array(list(range(n)) + [n - 1])
+    data = np.ones(n - 1, dtype=np.int32)
+    graph = csr_matrix((data, indices, indptr), shape=(n, n))
+    res = maximum_flow(graph, 0, n - 1, method=method)
+    assert res.flow_value == 1
+    expected_flow = graph - graph.transpose()
+    assert_array_equal(res.flow.data, expected_flow.data)
+    assert_array_equal(res.flow.indices, expected_flow.indices)
+    assert_array_equal(res.flow.indptr, expected_flow.indptr)
+
+
+def test_residual_raises_deprecation_warning():
+    graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]])
+    res = maximum_flow(graph, 0, 2)
+    with pytest.deprecated_call():
+        res.residual
+
+
+@pytest.mark.parametrize("a,b_data_expected", [
+    ([[]], []),
+    ([[0], [0]], []),
+    ([[1, 0, 2], [0, 0, 0], [0, 3, 0]], [1, 2, 0, 0, 3]),
+    ([[9, 8, 7], [4, 5, 6], [0, 0, 0]], [9, 8, 7, 4, 5, 6, 0, 0])])
+def test_add_reverse_edges(a, b_data_expected):
+    """Test that the reversal of the edges of the input graph works
+    as expected.
+    """
+    a = csr_matrix(a, dtype=np.int32, shape=(len(a), len(a)))
+    b = _add_reverse_edges(a)
+    assert_array_equal(b.data, b_data_expected)
+
+
+@pytest.mark.parametrize("a,expected", [
+    ([[]], []),
+    ([[0]], []),
+    ([[1]], [0]),
+    ([[0, 1], [10, 0]], [1, 0]),
+    ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2])
+])
+def test_make_edge_pointers(a, expected):
+    a = csr_matrix(a, dtype=np.int32)
+    rev_edge_ptr = _make_edge_pointers(a)
+    assert_array_equal(rev_edge_ptr, expected)
+
+
+@pytest.mark.parametrize("a,expected", [
+    ([[]], []),
+    ([[0]], []),
+    ([[1]], [0]),
+    ([[0, 1], [10, 0]], [0, 1]),
+    ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 0, 1, 2, 2])
+])
+def test_make_tails(a, expected):
+    a = csr_matrix(a, dtype=np.int32)
+    tails = _make_tails(a)
+    assert_array_equal(tails, expected)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_graph_laplacian.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_graph_laplacian.py
new file mode 100644
index 00000000..e5648ba7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_graph_laplacian.py
@@ -0,0 +1,358 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose
+from pytest import raises as assert_raises
+from scipy import sparse
+
+from scipy.sparse import csgraph
+
+
+def check_int_type(mat):
+    return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype(
+        mat.dtype, np.uint
+    )
+
+
+def test_laplacian_value_error():
+    for t in int, float, complex:
+        for m in ([1, 1],
+                  [[[1]]],
+                  [[1, 2, 3], [4, 5, 6]],
+                  [[1, 2], [3, 4], [5, 5]]):
+            A = np.array(m, dtype=t)
+            assert_raises(ValueError, csgraph.laplacian, A)
+
+
+def _explicit_laplacian(x, normed=False):
+    if sparse.issparse(x):
+        x = x.toarray()
+    x = np.asarray(x)
+    y = -1.0 * x
+    for j in range(y.shape[0]):
+        y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
+    if normed:
+        d = np.diag(y).copy()
+        d[d == 0] = 1.0
+        y /= d[:,None]**.5
+        y /= d[None,:]**.5
+    return y
+
+
+def _check_symmetric_graph_laplacian(mat, normed, copy=True):
+    if not hasattr(mat, 'shape'):
+        mat = eval(mat, dict(np=np, sparse=sparse))
+
+    if sparse.issparse(mat):
+        sp_mat = mat
+        mat = sp_mat.toarray()
+    else:
+        sp_mat = sparse.csr_matrix(mat)
+
+    mat_copy = np.copy(mat)
+    sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True)
+
+    n_nodes = mat.shape[0]
+    explicit_laplacian = _explicit_laplacian(mat, normed=normed)
+    laplacian = csgraph.laplacian(mat, normed=normed, copy=copy)
+    sp_laplacian = csgraph.laplacian(sp_mat, normed=normed,
+                                     copy=copy)
+
+    if copy:
+        assert_allclose(mat, mat_copy)
+        _assert_allclose_sparse(sp_mat, sp_mat_copy)
+    else:
+        if not (normed and check_int_type(mat)):
+            assert_allclose(laplacian, mat)
+            if sp_mat.format == 'coo':
+                _assert_allclose_sparse(sp_laplacian, sp_mat)
+
+    assert_allclose(laplacian, sp_laplacian.toarray())
+
+    for tested in [laplacian, sp_laplacian.toarray()]:
+        if not normed:
+            assert_allclose(tested.sum(axis=0), np.zeros(n_nodes))
+        assert_allclose(tested.T, tested)
+        assert_allclose(tested, explicit_laplacian)
+
+
+def test_symmetric_graph_laplacian():
+    symmetric_mats = (
+        'np.arange(10) * np.arange(10)[:, np.newaxis]',
+        'np.ones((7, 7))',
+        'np.eye(19)',
+        'sparse.diags([1, 1], [-1, 1], shape=(4, 4))',
+        'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()',
+        'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()',
+        'np.vander(np.arange(4)) + np.vander(np.arange(4)).T'
+    )
+    for mat in symmetric_mats:
+        for normed in True, False:
+            for copy in True, False:
+                _check_symmetric_graph_laplacian(mat, normed, copy)
+
+
+def _assert_allclose_sparse(a, b, **kwargs):
+    # helper function that can deal with sparse matrices
+    if sparse.issparse(a):
+        a = a.toarray()
+    if sparse.issparse(b):
+        b = b.toarray()
+    assert_allclose(a, b, **kwargs)
+
+
+def _check_laplacian_dtype_none(
+    A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
+):
+    mat = arr_type(A, dtype=dtype)
+    L, d = csgraph.laplacian(
+        mat,
+        normed=normed,
+        return_diag=True,
+        use_out_degree=use_out_degree,
+        copy=copy,
+        dtype=None,
+    )
+    if normed and check_int_type(mat):
+        assert L.dtype == np.float64
+        assert d.dtype == np.float64
+        _assert_allclose_sparse(L, desired_L, atol=1e-12)
+        _assert_allclose_sparse(d, desired_d, atol=1e-12)
+    else:
+        assert L.dtype == dtype
+        assert d.dtype == dtype
+        desired_L = np.asarray(desired_L).astype(dtype)
+        desired_d = np.asarray(desired_d).astype(dtype)
+        _assert_allclose_sparse(L, desired_L, atol=1e-12)
+        _assert_allclose_sparse(d, desired_d, atol=1e-12)
+
+    if not copy:
+        if not (normed and check_int_type(mat)):
+            if type(mat) is np.ndarray:
+                assert_allclose(L, mat)
+            elif mat.format == "coo":
+                _assert_allclose_sparse(L, mat)
+
+
+def _check_laplacian_dtype(
+    A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type
+):
+    mat = arr_type(A, dtype=dtype)
+    L, d = csgraph.laplacian(
+        mat,
+        normed=normed,
+        return_diag=True,
+        use_out_degree=use_out_degree,
+        copy=copy,
+        dtype=dtype,
+    )
+    assert L.dtype == dtype
+    assert d.dtype == dtype
+    desired_L = np.asarray(desired_L).astype(dtype)
+    desired_d = np.asarray(desired_d).astype(dtype)
+    _assert_allclose_sparse(L, desired_L, atol=1e-12)
+    _assert_allclose_sparse(d, desired_d, atol=1e-12)
+
+    if not copy:
+        if not (normed and check_int_type(mat)):
+            if type(mat) is np.ndarray:
+                assert_allclose(L, mat)
+            elif mat.format == 'coo':
+                _assert_allclose_sparse(L, mat)
+
+
+INT_DTYPES = {np.intc, np.int_, np.longlong}
+REAL_DTYPES = {np.single, np.double, np.longdouble}
+COMPLEX_DTYPES = {np.csingle, np.cdouble, np.clongdouble}
+# use sorted tuple to ensure fixed order of tests
+DTYPES = tuple(sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str))
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("arr_type", [np.array,
+                                      sparse.csr_matrix,
+                                      sparse.coo_matrix])
+@pytest.mark.parametrize("copy", [True, False])
+@pytest.mark.parametrize("normed", [True, False])
+@pytest.mark.parametrize("use_out_degree", [True, False])
+def test_asymmetric_laplacian(use_out_degree, normed,
+                              copy, dtype, arr_type):
+    # adjacency matrix
+    A = [[0, 1, 0],
+         [4, 2, 0],
+         [0, 0, 0]]
+    A = arr_type(np.array(A), dtype=dtype)
+    A_copy = A.copy()
+
+    if not normed and use_out_degree:
+        # Laplacian matrix using out-degree
+        L = [[1, -1, 0],
+             [-4, 4, 0],
+             [0, 0, 0]]
+        d = [1, 4, 0]
+
+    if normed and use_out_degree:
+        # normalized Laplacian matrix using out-degree
+        L = [[1, -0.5, 0],
+             [-2, 1, 0],
+             [0, 0, 0]]
+        d = [1, 2, 1]
+
+    if not normed and not use_out_degree:
+        # Laplacian matrix using in-degree
+        L = [[4, -1, 0],
+             [-4, 1, 0],
+             [0, 0, 0]]
+        d = [4, 1, 0]
+
+    if normed and not use_out_degree:
+        # normalized Laplacian matrix using in-degree
+        L = [[1, -0.5, 0],
+             [-2, 1, 0],
+             [0, 0, 0]]
+        d = [2, 1, 1]
+
+    _check_laplacian_dtype_none(
+        A,
+        L,
+        d,
+        normed=normed,
+        use_out_degree=use_out_degree,
+        copy=copy,
+        dtype=dtype,
+        arr_type=arr_type,
+    )
+
+    _check_laplacian_dtype(
+        A_copy,
+        L,
+        d,
+        normed=normed,
+        use_out_degree=use_out_degree,
+        copy=copy,
+        dtype=dtype,
+        arr_type=arr_type,
+    )
+
+
+@pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil',
+                                 'dok', 'dia', 'bsr'])
+@pytest.mark.parametrize("normed", [True, False])
+@pytest.mark.parametrize("copy", [True, False])
+def test_sparse_formats(fmt, normed, copy):
+    mat = sparse.diags([1, 1], [-1, 1], shape=(4, 4), format=fmt)
+    _check_symmetric_graph_laplacian(mat, normed, copy)
+
+
+@pytest.mark.parametrize(
+    "arr_type", [np.asarray, sparse.csr_matrix, sparse.coo_matrix]
+)
+@pytest.mark.parametrize("form", ["array", "function", "lo"])
+def test_laplacian_symmetrized(arr_type, form):
+    # adjacency matrix
+    n = 3
+    mat = arr_type(np.arange(n * n).reshape(n, n))
+    L_in, d_in = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        form=form,
+    )
+    L_out, d_out = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        use_out_degree=True,
+        form=form,
+    )
+    Ls, ds = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        symmetrized=True,
+        form=form,
+    )
+    Ls_normed, ds_normed = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        symmetrized=True,
+        normed=True,
+        form=form,
+    )
+    mat += mat.T
+    Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form)
+    Lss_normed, dss_normed = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        normed=True,
+        form=form,
+    )
+
+    assert_allclose(ds, d_in + d_out)
+    assert_allclose(ds, dss)
+    assert_allclose(ds_normed, dss_normed)
+
+    d = {}
+    for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]:
+        if form == "array":
+            d[L] = eval(L)
+        else:
+            d[L] = eval(L)(np.eye(n, dtype=mat.dtype))
+
+    _assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T)
+    _assert_allclose_sparse(d["Ls"], d["Lss"])
+    _assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"])
+
+
+@pytest.mark.parametrize(
+    "arr_type", [np.asarray, sparse.csr_matrix, sparse.coo_matrix]
+)
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("normed", [True, False])
+@pytest.mark.parametrize("symmetrized", [True, False])
+@pytest.mark.parametrize("use_out_degree", [True, False])
+@pytest.mark.parametrize("form", ["function", "lo"])
+def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form):
+    n = 3
+    mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]]
+    mat = arr_type(np.array(mat), dtype=dtype)
+    Lo, do = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        normed=normed,
+        symmetrized=symmetrized,
+        use_out_degree=use_out_degree,
+        dtype=dtype,
+    )
+    La, da = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        normed=normed,
+        symmetrized=symmetrized,
+        use_out_degree=use_out_degree,
+        dtype=dtype,
+        form="array",
+    )
+    assert_allclose(do, da)
+    _assert_allclose_sparse(Lo, La)
+
+    L, d = csgraph.laplacian(
+        mat,
+        return_diag=True,
+        normed=normed,
+        symmetrized=symmetrized,
+        use_out_degree=use_out_degree,
+        dtype=dtype,
+        form=form,
+    )
+    assert_allclose(d, do)
+    assert d.dtype == dtype
+    Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype)
+    _assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7)
+    x = np.arange(6).reshape(3, 2)
+    if not (normed and dtype in INT_DTYPES):
+        assert_allclose(L(x), Lo @ x)
+    else:
+        # Normalized Lo is casted to integer, but L() is not
+        pass
+
+
+def test_format_error_message():
+    with pytest.raises(ValueError, match="Invalid form: 'toto'"):
+        _ = csgraph.laplacian(np.eye(1), form='toto')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_matching.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_matching.py
new file mode 100644
index 00000000..387aa5e0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_matching.py
@@ -0,0 +1,239 @@
+from itertools import product
+
+import numpy as np
+from numpy.testing import assert_array_equal, assert_equal
+import pytest
+
+from scipy.sparse import csr_matrix, coo_matrix, diags
+from scipy.sparse.csgraph import (
+    maximum_bipartite_matching, min_weight_full_bipartite_matching
+)
+
+
+def test_maximum_bipartite_matching_raises_on_dense_input():
+    with pytest.raises(TypeError):
+        graph = np.array([[0, 1], [0, 0]])
+        maximum_bipartite_matching(graph)
+
+
+def test_maximum_bipartite_matching_empty_graph():
+    graph = csr_matrix((0, 0))
+    x = maximum_bipartite_matching(graph, perm_type='row')
+    y = maximum_bipartite_matching(graph, perm_type='column')
+    expected_matching = np.array([])
+    assert_array_equal(expected_matching, x)
+    assert_array_equal(expected_matching, y)
+
+
+def test_maximum_bipartite_matching_empty_left_partition():
+    graph = csr_matrix((2, 0))
+    x = maximum_bipartite_matching(graph, perm_type='row')
+    y = maximum_bipartite_matching(graph, perm_type='column')
+    assert_array_equal(np.array([]), x)
+    assert_array_equal(np.array([-1, -1]), y)
+
+
+def test_maximum_bipartite_matching_empty_right_partition():
+    graph = csr_matrix((0, 3))
+    x = maximum_bipartite_matching(graph, perm_type='row')
+    y = maximum_bipartite_matching(graph, perm_type='column')
+    assert_array_equal(np.array([-1, -1, -1]), x)
+    assert_array_equal(np.array([]), y)
+
+
+def test_maximum_bipartite_matching_graph_with_no_edges():
+    graph = csr_matrix((2, 2))
+    x = maximum_bipartite_matching(graph, perm_type='row')
+    y = maximum_bipartite_matching(graph, perm_type='column')
+    assert_array_equal(np.array([-1, -1]), x)
+    assert_array_equal(np.array([-1, -1]), y)
+
+
+def test_maximum_bipartite_matching_graph_that_causes_augmentation():
+    # In this graph, column 1 is initially assigned to row 1, but it should be
+    # reassigned to make room for row 2.
+    graph = csr_matrix([[1, 1], [1, 0]])
+    x = maximum_bipartite_matching(graph, perm_type='column')
+    y = maximum_bipartite_matching(graph, perm_type='row')
+    expected_matching = np.array([1, 0])
+    assert_array_equal(expected_matching, x)
+    assert_array_equal(expected_matching, y)
+
+
+def test_maximum_bipartite_matching_graph_with_more_rows_than_columns():
+    graph = csr_matrix([[1, 1], [1, 0], [0, 1]])
+    x = maximum_bipartite_matching(graph, perm_type='column')
+    y = maximum_bipartite_matching(graph, perm_type='row')
+    assert_array_equal(np.array([0, -1, 1]), x)
+    assert_array_equal(np.array([0, 2]), y)
+
+
+def test_maximum_bipartite_matching_graph_with_more_columns_than_rows():
+    graph = csr_matrix([[1, 1, 0], [0, 0, 1]])
+    x = maximum_bipartite_matching(graph, perm_type='column')
+    y = maximum_bipartite_matching(graph, perm_type='row')
+    assert_array_equal(np.array([0, 2]), x)
+    assert_array_equal(np.array([0, -1, 1]), y)
+
+
+def test_maximum_bipartite_matching_explicit_zeros_count_as_edges():
+    data = [0, 0]
+    indices = [1, 0]
+    indptr = [0, 1, 2]
+    graph = csr_matrix((data, indices, indptr), shape=(2, 2))
+    x = maximum_bipartite_matching(graph, perm_type='row')
+    y = maximum_bipartite_matching(graph, perm_type='column')
+    expected_matching = np.array([1, 0])
+    assert_array_equal(expected_matching, x)
+    assert_array_equal(expected_matching, y)
+
+
+def test_maximum_bipartite_matching_feasibility_of_result():
+    # This is a regression test for GitHub issue #11458
+    data = np.ones(50, dtype=int)
+    indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13,
+               14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8,
+               10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14]
+    indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45,
+              47, 47, 48, 50]
+    graph = csr_matrix((data, indices, indptr), shape=(20, 25))
+    x = maximum_bipartite_matching(graph, perm_type='row')
+    y = maximum_bipartite_matching(graph, perm_type='column')
+    assert (x != -1).sum() == 13
+    assert (y != -1).sum() == 13
+    # Ensure that each element of the matching is in fact an edge in the graph.
+    for u, v in zip(range(graph.shape[0]), y):
+        if v != -1:
+            assert graph[u, v]
+    for u, v in zip(x, range(graph.shape[1])):
+        if u != -1:
+            assert graph[u, v]
+
+
+def test_matching_large_random_graph_with_one_edge_incident_to_each_vertex():
+    np.random.seed(42)
+    A = diags(np.ones(25), offsets=0, format='csr')
+    rand_perm = np.random.permutation(25)
+    rand_perm2 = np.random.permutation(25)
+
+    Rrow = np.arange(25)
+    Rcol = rand_perm
+    Rdata = np.ones(25, dtype=int)
+    Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
+
+    Crow = rand_perm2
+    Ccol = np.arange(25)
+    Cdata = np.ones(25, dtype=int)
+    Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
+    # Randomly permute identity matrix
+    B = Rmat * A * Cmat
+
+    # Row permute
+    perm = maximum_bipartite_matching(B, perm_type='row')
+    Rrow = np.arange(25)
+    Rcol = perm
+    Rdata = np.ones(25, dtype=int)
+    Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr()
+    C1 = Rmat * B
+
+    # Column permute
+    perm2 = maximum_bipartite_matching(B, perm_type='column')
+    Crow = perm2
+    Ccol = np.arange(25)
+    Cdata = np.ones(25, dtype=int)
+    Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr()
+    C2 = B * Cmat
+
+    # Should get identity matrix back
+    assert_equal(any(C1.diagonal() == 0), False)
+    assert_equal(any(C2.diagonal() == 0), False)
+
+
+@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
+def test_min_weight_full_matching_trivial_graph(num_rows, num_cols):
+    biadjacency_matrix = csr_matrix((num_cols, num_rows))
+    row_ind, col_ind = min_weight_full_bipartite_matching(biadjacency_matrix)
+    assert len(row_ind) == 0
+    assert len(col_ind) == 0
+
+
+@pytest.mark.parametrize('biadjacency_matrix',
+                         [
+                            [[1, 1, 1], [1, 0, 0], [1, 0, 0]],
+                            [[1, 1, 1], [0, 0, 1], [0, 0, 1]],
+                            [[1, 0, 0], [2, 0, 0]],
+                            [[0, 1, 0], [0, 2, 0]],
+                            [[1, 0], [2, 0], [5, 0]]
+                         ])
+def test_min_weight_full_matching_infeasible_problems(biadjacency_matrix):
+    with pytest.raises(ValueError):
+        min_weight_full_bipartite_matching(csr_matrix(biadjacency_matrix))
+
+
+def test_explicit_zero_causes_warning():
+    with pytest.warns(UserWarning):
+        biadjacency_matrix = csr_matrix(((2, 0, 3), (0, 1, 1), (0, 2, 3)))
+        min_weight_full_bipartite_matching(biadjacency_matrix)
+
+
+# General test for linear sum assignment solvers to make it possible to rely
+# on the same tests for scipy.optimize.linear_sum_assignment.
+def linear_sum_assignment_assertions(
+    solver, array_type, sign, test_case
+):
+    cost_matrix, expected_cost = test_case
+    maximize = sign == -1
+    cost_matrix = sign * array_type(cost_matrix)
+    expected_cost = sign * np.array(expected_cost)
+
+    row_ind, col_ind = solver(cost_matrix, maximize=maximize)
+    assert_array_equal(row_ind, np.sort(row_ind))
+    assert_array_equal(expected_cost,
+                       np.array(cost_matrix[row_ind, col_ind]).flatten())
+
+    cost_matrix = cost_matrix.T
+    row_ind, col_ind = solver(cost_matrix, maximize=maximize)
+    assert_array_equal(row_ind, np.sort(row_ind))
+    assert_array_equal(np.sort(expected_cost),
+                       np.sort(np.array(
+                           cost_matrix[row_ind, col_ind])).flatten())
+
+
+linear_sum_assignment_test_cases = product(
+    [-1, 1],
+    [
+        # Square
+        ([[400, 150, 400],
+          [400, 450, 600],
+          [300, 225, 300]],
+         [150, 400, 300]),
+
+        # Rectangular variant
+        ([[400, 150, 400, 1],
+          [400, 450, 600, 2],
+          [300, 225, 300, 3]],
+         [150, 2, 300]),
+
+        ([[10, 10, 8],
+          [9, 8, 1],
+          [9, 7, 4]],
+         [10, 1, 7]),
+
+        # Square
+        ([[10, 10, 8, 11],
+          [9, 8, 1, 1],
+          [9, 7, 4, 10]],
+         [10, 1, 4]),
+
+        # Rectangular variant
+        ([[10, float("inf"), float("inf")],
+          [float("inf"), float("inf"), 1],
+          [float("inf"), 7, float("inf")]],
+         [10, 1, 7])
+    ])
+
+
+@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
+def test_min_weight_full_matching_small_inputs(sign, test_case):
+    linear_sum_assignment_assertions(
+        min_weight_full_bipartite_matching, csr_matrix, sign, test_case)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_reordering.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_reordering.py
new file mode 100644
index 00000000..cb4c002f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_reordering.py
@@ -0,0 +1,70 @@
+import numpy as np
+from numpy.testing import assert_equal
+from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank
+from scipy.sparse import csc_matrix, csr_matrix, coo_matrix
+
+
+def test_graph_reverse_cuthill_mckee():
+    A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
+                [0, 1, 1, 0, 0, 1, 0, 1],
+                [0, 1, 1, 0, 1, 0, 0, 0],
+                [0, 0, 0, 1, 0, 0, 1, 0],
+                [1, 0, 1, 0, 1, 0, 0, 0],
+                [0, 1, 0, 0, 0, 1, 0, 1],
+                [0, 0, 0, 1, 0, 0, 1, 0],
+                [0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
+    
+    graph = csr_matrix(A)
+    perm = reverse_cuthill_mckee(graph)
+    correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
+    assert_equal(perm, correct_perm)
+    
+    # Test int64 indices input
+    graph.indices = graph.indices.astype('int64')
+    graph.indptr = graph.indptr.astype('int64')
+    perm = reverse_cuthill_mckee(graph, True)
+    assert_equal(perm, correct_perm)
+
+
+def test_graph_reverse_cuthill_mckee_ordering():
+    data = np.ones(63,dtype=int)
+    rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 
+                2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
+                6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
+                9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 
+                12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
+                14, 15, 15, 15, 15, 15])
+    cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
+                7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13, 
+                15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
+                1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
+                4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
+                5, 7, 10, 13, 15])
+    graph = coo_matrix((data, (rows,cols))).tocsr()
+    perm = reverse_cuthill_mckee(graph)
+    correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
+                0, 13, 7, 5, 9, 11, 1, 3])
+    assert_equal(perm, correct_perm)
+
+
+def test_graph_structural_rank():
+    # Test square matrix #1
+    A = csc_matrix([[1, 1, 0], 
+                    [1, 0, 1],
+                    [0, 1, 0]])
+    assert_equal(structural_rank(A), 3)
+    
+    # Test square matrix #2
+    rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
+    cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
+    data = np.ones_like(rows)
+    B = coo_matrix((data,(rows,cols)), shape=(8,8))
+    assert_equal(structural_rank(B), 6)
+    
+    #Test non-square matrix
+    C = csc_matrix([[1, 0, 2, 0], 
+                    [2, 0, 4, 0]])
+    assert_equal(structural_rank(C), 2)
+    
+    #Test tall matrix
+    assert_equal(structural_rank(C.T), 2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_shortest_path.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_shortest_path.py
new file mode 100644
index 00000000..f745e0fb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_shortest_path.py
@@ -0,0 +1,395 @@
+from io import StringIO
+import warnings
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose
+from pytest import raises as assert_raises
+from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson,
+                                  bellman_ford, construct_dist_matrix,
+                                  NegativeCycleError)
+import scipy.sparse
+from scipy.io import mmread
+import pytest
+
+directed_G = np.array([[0, 3, 3, 0, 0],
+                       [0, 0, 0, 2, 4],
+                       [0, 0, 0, 0, 0],
+                       [1, 0, 0, 0, 0],
+                       [2, 0, 0, 2, 0]], dtype=float)
+
+undirected_G = np.array([[0, 3, 3, 1, 2],
+                         [3, 0, 0, 2, 4],
+                         [3, 0, 0, 0, 0],
+                         [1, 2, 0, 0, 2],
+                         [2, 4, 0, 2, 0]], dtype=float)
+
+unweighted_G = (directed_G > 0).astype(float)
+
+directed_SP = [[0, 3, 3, 5, 7],
+               [3, 0, 6, 2, 4],
+               [np.inf, np.inf, 0, np.inf, np.inf],
+               [1, 4, 4, 0, 8],
+               [2, 5, 5, 2, 0]]
+
+directed_sparse_zero_G = scipy.sparse.csr_matrix(([0, 1, 2, 3, 1], 
+                                            ([0, 1, 2, 3, 4], 
+                                             [1, 2, 0, 4, 3])), 
+                                            shape = (5, 5))
+
+directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
+                      [3, 0, 1, np.inf, np.inf],
+                      [2, 2, 0, np.inf, np.inf],
+                      [np.inf, np.inf, np.inf, 0, 3],
+                      [np.inf, np.inf, np.inf, 1, 0]]
+
+undirected_sparse_zero_G = scipy.sparse.csr_matrix(([0, 0, 1, 1, 2, 2, 1, 1], 
+                                              ([0, 1, 1, 2, 2, 0, 3, 4], 
+                                               [1, 0, 2, 1, 0, 2, 4, 3])), 
+                                              shape = (5, 5))
+
+undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf],
+                        [0, 0, 1, np.inf, np.inf],
+                        [1, 1, 0, np.inf, np.inf],
+                        [np.inf, np.inf, np.inf, 0, 1],
+                        [np.inf, np.inf, np.inf, 1, 0]]
+
+directed_pred = np.array([[-9999, 0, 0, 1, 1],
+                          [3, -9999, 0, 1, 1],
+                          [-9999, -9999, -9999, -9999, -9999],
+                          [3, 0, 0, -9999, 1],
+                          [4, 0, 0, 4, -9999]], dtype=float)
+
+undirected_SP = np.array([[0, 3, 3, 1, 2],
+                          [3, 0, 6, 2, 4],
+                          [3, 6, 0, 4, 5],
+                          [1, 2, 4, 0, 2],
+                          [2, 4, 5, 2, 0]], dtype=float)
+
+undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2],
+                                  [np.inf, 0, np.inf, 2, np.inf],
+                                  [np.inf, np.inf, 0, np.inf, np.inf],
+                                  [1, 2, np.inf, 0, 2],
+                                  [2, np.inf, np.inf, 2, 0]], dtype=float)
+
+undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5)
+undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf
+
+undirected_pred = np.array([[-9999, 0, 0, 0, 0],
+                            [1, -9999, 0, 1, 1],
+                            [2, 0, -9999, 0, 0],
+                            [3, 3, 0, -9999, 3],
+                            [4, 4, 0, 4, -9999]], dtype=float)
+
+directed_negative_weighted_G = np.array([[0, 0, 0],
+                                         [-1, 0, 0],
+                                         [0, -1, 0]], dtype=float)
+
+directed_negative_weighted_SP = np.array([[0, np.inf, np.inf],
+                                          [-1, 0, np.inf],
+                                          [-2, -1, 0]], dtype=float)
+
+methods = ['auto', 'FW', 'D', 'BF', 'J']
+
+
+def test_dijkstra_limit():
+    limits = [0, 2, np.inf]
+    results = [undirected_SP_limit_0,
+               undirected_SP_limit_2,
+               undirected_SP]
+
+    def check(limit, result):
+        SP = dijkstra(undirected_G, directed=False, limit=limit)
+        assert_array_almost_equal(SP, result)
+
+    for limit, result in zip(limits, results):
+        check(limit, result)
+
+
+def test_directed():
+    def check(method):
+        SP = shortest_path(directed_G, method=method, directed=True,
+                           overwrite=False)
+        assert_array_almost_equal(SP, directed_SP)
+
+    for method in methods:
+        check(method)
+
+
+def test_undirected():
+    def check(method, directed_in):
+        if directed_in:
+            SP1 = shortest_path(directed_G, method=method, directed=False,
+                                overwrite=False)
+            assert_array_almost_equal(SP1, undirected_SP)
+        else:
+            SP2 = shortest_path(undirected_G, method=method, directed=True,
+                                overwrite=False)
+            assert_array_almost_equal(SP2, undirected_SP)
+
+    for method in methods:
+        for directed_in in (True, False):
+            check(method, directed_in)
+
+
+def test_directed_sparse_zero():
+    # test directed sparse graph with zero-weight edge and two connected components
+    def check(method):
+        SP = shortest_path(directed_sparse_zero_G, method=method, directed=True,
+                           overwrite=False)
+        assert_array_almost_equal(SP, directed_sparse_zero_SP)
+
+    for method in methods:
+        check(method)
+
+
+def test_undirected_sparse_zero():
+    def check(method, directed_in):
+        if directed_in:
+            SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False,
+                                overwrite=False)
+            assert_array_almost_equal(SP1, undirected_sparse_zero_SP)
+        else:
+            SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True,
+                                overwrite=False)
+            assert_array_almost_equal(SP2, undirected_sparse_zero_SP)
+
+    for method in methods:
+        for directed_in in (True, False):
+            check(method, directed_in)
+
+
+@pytest.mark.parametrize('directed, SP_ans',
+                         ((True, directed_SP),
+                          (False, undirected_SP)))
+@pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0]))
+def test_dijkstra_indices_min_only(directed, SP_ans, indices):
+    SP_ans = np.array(SP_ans)
+    indices = np.array(indices, dtype=np.int64)
+    min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)]
+    min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype)
+    for k in range(SP_ans.shape[0]):
+        min_d_ans[k] = SP_ans[min_ind_ans[k], k]
+    min_ind_ans[np.isinf(min_d_ans)] = -9999
+
+    SP, pred, sources = dijkstra(directed_G,
+                                 directed=directed,
+                                 indices=indices,
+                                 min_only=True,
+                                 return_predecessors=True)
+    assert_array_almost_equal(SP, min_d_ans)
+    assert_array_equal(min_ind_ans, sources)
+    SP = dijkstra(directed_G,
+                  directed=directed,
+                  indices=indices,
+                  min_only=True,
+                  return_predecessors=False)
+    assert_array_almost_equal(SP, min_d_ans)
+
+
+@pytest.mark.parametrize('n', (10, 100, 1000))
+def test_dijkstra_min_only_random(n):
+    np.random.seed(1234)
+    data = scipy.sparse.rand(n, n, density=0.5, format='lil',
+                             random_state=42, dtype=np.float64)
+    data.setdiag(np.zeros(n, dtype=np.bool_))
+    # choose some random vertices
+    v = np.arange(n)
+    np.random.shuffle(v)
+    indices = v[:int(n*.1)]
+    ds, pred, sources = dijkstra(data,
+                                 directed=True,
+                                 indices=indices,
+                                 min_only=True,
+                                 return_predecessors=True)
+    for k in range(n):
+        p = pred[k]
+        s = sources[k]
+        while p != -9999:
+            assert sources[p] == s
+            p = pred[p]
+
+
+def test_dijkstra_random():
+    # reproduces the hang observed in gh-17782
+    n = 10
+    indices = [0, 4, 4, 5, 7, 9, 0, 6, 2, 3, 7, 9, 1, 2, 9, 2, 5, 6]
+    indptr = [0, 0, 2, 5, 6, 7, 8, 12, 15, 18, 18]
+    data = [0.33629, 0.40458, 0.47493, 0.42757, 0.11497, 0.91653, 0.69084,
+            0.64979, 0.62555, 0.743, 0.01724, 0.99945, 0.31095, 0.15557,
+            0.02439, 0.65814, 0.23478, 0.24072]
+    graph = scipy.sparse.csr_matrix((data, indices, indptr), shape=(n, n))
+    dijkstra(graph, directed=True, return_predecessors=True)
+
+
+def test_gh_17782_segfault():
+    text = """%%MatrixMarket matrix coordinate real general
+                84 84 22
+                2 1 4.699999809265137e+00
+                6 14 1.199999973177910e-01
+                9 6 1.199999973177910e-01
+                10 16 2.012000083923340e+01
+                11 10 1.422000026702881e+01
+                12 1 9.645999908447266e+01
+                13 18 2.012000083923340e+01
+                14 13 4.679999828338623e+00
+                15 11 1.199999973177910e-01
+                16 12 1.199999973177910e-01
+                18 15 1.199999973177910e-01
+                32 2 2.299999952316284e+00
+                33 20 6.000000000000000e+00
+                33 32 5.000000000000000e+00
+                36 9 3.720000028610229e+00
+                36 37 3.720000028610229e+00
+                36 38 3.720000028610229e+00
+                37 44 8.159999847412109e+00
+                38 32 7.903999328613281e+01
+                43 20 2.400000000000000e+01
+                43 33 4.000000000000000e+00
+                44 43 6.028000259399414e+01
+    """
+    data = mmread(StringIO(text))
+    dijkstra(data, directed=True, return_predecessors=True)
+
+
+def test_shortest_path_indices():
+    indices = np.arange(4)
+
+    def check(func, indshape):
+        outshape = indshape + (5,)
+        SP = func(directed_G, directed=False,
+                  indices=indices.reshape(indshape))
+        assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape))
+
+    for indshape in [(4,), (4, 1), (2, 2)]:
+        for func in (dijkstra, bellman_ford, johnson, shortest_path):
+            check(func, indshape)
+
+    assert_raises(ValueError, shortest_path, directed_G, method='FW',
+                  indices=indices)
+
+
+def test_predecessors():
+    SP_res = {True: directed_SP,
+              False: undirected_SP}
+    pred_res = {True: directed_pred,
+                False: undirected_pred}
+
+    def check(method, directed):
+        SP, pred = shortest_path(directed_G, method, directed=directed,
+                                 overwrite=False,
+                                 return_predecessors=True)
+        assert_array_almost_equal(SP, SP_res[directed])
+        assert_array_almost_equal(pred, pred_res[directed])
+
+    for method in methods:
+        for directed in (True, False):
+            check(method, directed)
+
+
+def test_construct_shortest_path():
+    def check(method, directed):
+        SP1, pred = shortest_path(directed_G,
+                                  directed=directed,
+                                  overwrite=False,
+                                  return_predecessors=True)
+        SP2 = construct_dist_matrix(directed_G, pred, directed=directed)
+        assert_array_almost_equal(SP1, SP2)
+
+    for method in methods:
+        for directed in (True, False):
+            check(method, directed)
+
+
+def test_unweighted_path():
+    def check(method, directed):
+        SP1 = shortest_path(directed_G,
+                            directed=directed,
+                            overwrite=False,
+                            unweighted=True)
+        SP2 = shortest_path(unweighted_G,
+                            directed=directed,
+                            overwrite=False,
+                            unweighted=False)
+        assert_array_almost_equal(SP1, SP2)
+
+    for method in methods:
+        for directed in (True, False):
+            check(method, directed)
+
+
+def test_negative_cycles():
+    # create a small graph with a negative cycle
+    graph = np.ones([5, 5])
+    graph.flat[::6] = 0
+    graph[1, 2] = -2
+
+    def check(method, directed):
+        assert_raises(NegativeCycleError, shortest_path, graph, method,
+                      directed)
+
+    for method in ['FW', 'J', 'BF']:
+        for directed in (True, False):
+            check(method, directed)
+
+
+@pytest.mark.parametrize("method", ['FW', 'J', 'BF'])
+def test_negative_weights(method):
+    SP = shortest_path(directed_negative_weighted_G, method, directed=True)
+    assert_allclose(SP, directed_negative_weighted_SP, atol=1e-10)
+
+
+def test_masked_input():
+    np.ma.masked_equal(directed_G, 0)
+
+    def check(method):
+        SP = shortest_path(directed_G, method=method, directed=True,
+                           overwrite=False)
+        assert_array_almost_equal(SP, directed_SP)
+
+    for method in methods:
+        check(method)
+
+
+def test_overwrite():
+    G = np.array([[0, 3, 3, 1, 2],
+                  [3, 0, 0, 2, 4],
+                  [3, 0, 0, 0, 0],
+                  [1, 2, 0, 0, 2],
+                  [2, 4, 0, 2, 0]], dtype=float)
+    foo = G.copy()
+    shortest_path(foo, overwrite=False)
+    assert_array_equal(foo, G)
+
+
+@pytest.mark.parametrize('method', methods)
+def test_buffer(method):
+    # Smoke test that sparse matrices with read-only buffers (e.g., those from
+    # joblib workers) do not cause::
+    #
+    #     ValueError: buffer source array is read-only
+    #
+    G = scipy.sparse.csr_matrix([[1.]])
+    G.data.flags['WRITEABLE'] = False
+    shortest_path(G, method=method)
+
+
+def test_NaN_warnings():
+    with warnings.catch_warnings(record=True) as record:
+        shortest_path(np.array([[0, 1], [np.nan, 0]]))
+    for r in record:
+        assert r.category is not RuntimeWarning
+
+
+def test_sparse_matrices():
+    # Test that using lil,csr and csc sparse matrix do not cause error
+    G_dense = np.array([[0, 3, 0, 0, 0],
+                        [0, 0, -1, 0, 0],
+                        [0, 0, 0, 2, 0],
+                        [0, 0, 0, 0, 4],
+                        [0, 0, 0, 0, 0]], dtype=float)
+    SP = shortest_path(G_dense)
+    G_csr = scipy.sparse.csr_matrix(G_dense)
+    G_csc = scipy.sparse.csc_matrix(G_dense)
+    G_lil = scipy.sparse.lil_matrix(G_dense)
+    assert_array_almost_equal(SP, shortest_path(G_csr))
+    assert_array_almost_equal(SP, shortest_path(G_csc))
+    assert_array_almost_equal(SP, shortest_path(G_lil))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_spanning_tree.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_spanning_tree.py
new file mode 100644
index 00000000..2db338d8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_spanning_tree.py
@@ -0,0 +1,65 @@
+"""Test the minimum spanning tree function"""
+import numpy as np
+from numpy.testing import assert_
+import numpy.testing as npt
+from scipy.sparse import csr_matrix
+from scipy.sparse.csgraph import minimum_spanning_tree
+
+
+def test_minimum_spanning_tree():
+
+    # Create a graph with two connected components.
+    graph = [[0,1,0,0,0],
+             [1,0,0,0,0],
+             [0,0,0,8,5],
+             [0,0,8,0,1],
+             [0,0,5,1,0]]
+    graph = np.asarray(graph)
+
+    # Create the expected spanning tree.
+    expected = [[0,1,0,0,0],
+                [0,0,0,0,0],
+                [0,0,0,0,5],
+                [0,0,0,0,1],
+                [0,0,0,0,0]]
+    expected = np.asarray(expected)
+
+    # Ensure minimum spanning tree code gives this expected output.
+    csgraph = csr_matrix(graph)
+    mintree = minimum_spanning_tree(csgraph)
+    npt.assert_array_equal(mintree.toarray(), expected,
+        'Incorrect spanning tree found.')
+
+    # Ensure that the original graph was not modified.
+    npt.assert_array_equal(csgraph.toarray(), graph,
+        'Original graph was modified.')
+
+    # Now let the algorithm modify the csgraph in place.
+    mintree = minimum_spanning_tree(csgraph, overwrite=True)
+    npt.assert_array_equal(mintree.toarray(), expected,
+        'Graph was not properly modified to contain MST.')
+
+    np.random.seed(1234)
+    for N in (5, 10, 15, 20):
+
+        # Create a random graph.
+        graph = 3 + np.random.random((N, N))
+        csgraph = csr_matrix(graph)
+
+        # The spanning tree has at most N - 1 edges.
+        mintree = minimum_spanning_tree(csgraph)
+        assert_(mintree.nnz < N)
+
+        # Set the sub diagonal to 1 to create a known spanning tree.
+        idx = np.arange(N-1)
+        graph[idx,idx+1] = 1
+        csgraph = csr_matrix(graph)
+        mintree = minimum_spanning_tree(csgraph)
+
+        # We expect to see this pattern in the spanning tree and otherwise
+        # have this zero.
+        expected = np.zeros((N, N))
+        expected[idx, idx+1] = 1
+
+        npt.assert_array_equal(mintree.toarray(), expected,
+            'Incorrect spanning tree found.')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_traversal.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_traversal.py
new file mode 100644
index 00000000..026fbe27
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csgraph/tests/test_traversal.py
@@ -0,0 +1,68 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from scipy.sparse.csgraph import (breadth_first_tree, depth_first_tree,
+    csgraph_to_dense, csgraph_from_dense)
+
+
+def test_graph_breadth_first():
+    csgraph = np.array([[0, 1, 2, 0, 0],
+                        [1, 0, 0, 0, 3],
+                        [2, 0, 0, 7, 0],
+                        [0, 0, 7, 0, 1],
+                        [0, 3, 0, 1, 0]])
+    csgraph = csgraph_from_dense(csgraph, null_value=0)
+
+    bfirst = np.array([[0, 1, 2, 0, 0],
+                       [0, 0, 0, 0, 3],
+                       [0, 0, 0, 7, 0],
+                       [0, 0, 0, 0, 0],
+                       [0, 0, 0, 0, 0]])
+
+    for directed in [True, False]:
+        bfirst_test = breadth_first_tree(csgraph, 0, directed)
+        assert_array_almost_equal(csgraph_to_dense(bfirst_test),
+                                  bfirst)
+
+
+def test_graph_depth_first():
+    csgraph = np.array([[0, 1, 2, 0, 0],
+                        [1, 0, 0, 0, 3],
+                        [2, 0, 0, 7, 0],
+                        [0, 0, 7, 0, 1],
+                        [0, 3, 0, 1, 0]])
+    csgraph = csgraph_from_dense(csgraph, null_value=0)
+
+    dfirst = np.array([[0, 1, 0, 0, 0],
+                       [0, 0, 0, 0, 3],
+                       [0, 0, 0, 0, 0],
+                       [0, 0, 7, 0, 0],
+                       [0, 0, 0, 1, 0]])
+
+    for directed in [True, False]:
+        dfirst_test = depth_first_tree(csgraph, 0, directed)
+        assert_array_almost_equal(csgraph_to_dense(dfirst_test),
+                                  dfirst)
+
+
+def test_graph_breadth_first_trivial_graph():
+    csgraph = np.array([[0]])
+    csgraph = csgraph_from_dense(csgraph, null_value=0)
+
+    bfirst = np.array([[0]])
+
+    for directed in [True, False]:
+        bfirst_test = breadth_first_tree(csgraph, 0, directed)
+        assert_array_almost_equal(csgraph_to_dense(bfirst_test),
+                                  bfirst)
+
+
+def test_graph_depth_first_trivial_graph():
+    csgraph = np.array([[0]])
+    csgraph = csgraph_from_dense(csgraph, null_value=0)
+
+    bfirst = np.array([[0]])
+
+    for directed in [True, False]:
+        bfirst_test = depth_first_tree(csgraph, 0, directed)
+        assert_array_almost_equal(csgraph_to_dense(bfirst_test),
+                                  bfirst)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/csr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/csr.py
new file mode 100644
index 00000000..8c6dc63d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/csr.py
@@ -0,0 +1,36 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _csr
+
+
+__all__ = [  # noqa: F822
+    'csr_count_blocks',
+    'csr_matrix',
+    'csr_tobsr',
+    'csr_tocsc',
+    'get_csr_submatrix',
+    'get_index_dtype',
+    'isspmatrix_csr',
+    'spmatrix',
+    'upcast',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.csr is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.csr` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_csr, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/data.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/data.py
new file mode 100644
index 00000000..f720816e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/data.py
@@ -0,0 +1,33 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _data
+
+
+__all__ = [  # noqa: F822
+    'isscalarlike',
+    'matrix',
+    'name',
+    'npfunc',
+    'spmatrix',
+    'validateaxis',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.data is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.data` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_data, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/dia.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/dia.py
new file mode 100644
index 00000000..fc129c98
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/dia.py
@@ -0,0 +1,39 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _dia
+
+
+__all__ = [  # noqa: F822
+    'check_shape',
+    'dia_matrix',
+    'dia_matvec',
+    'get_index_dtype',
+    'get_sum_dtype',
+    'getdtype',
+    'isshape',
+    'isspmatrix',
+    'isspmatrix_dia',
+    'spmatrix',
+    'upcast_char',
+    'validateaxis',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.dia is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.dia` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_dia, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/dok.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/dok.py
new file mode 100644
index 00000000..d4a60ebe
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/dok.py
@@ -0,0 +1,42 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _dok
+
+
+__all__ = [  # noqa: F822
+    'IndexMixin',
+    'check_shape',
+    'dok_matrix',
+    'get_index_dtype',
+    'getdtype',
+    'isdense',
+    'isintlike',
+    'isscalarlike',
+    'isshape',
+    'isspmatrix',
+    'isspmatrix_dok',
+    'itertools',
+    'spmatrix',
+    'upcast',
+    'upcast_scalar',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.dok is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.dok` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_dok, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/extract.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/extract.py
new file mode 100644
index 00000000..ebe920e7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/extract.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _extract
+
+
+__all__ = [  # noqa: F822
+    'coo_matrix',
+    'find',
+    'tril',
+    'triu',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.extract is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.extract` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_extract, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/lil.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/lil.py
new file mode 100644
index 00000000..e354393c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/lil.py
@@ -0,0 +1,41 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _lil
+
+
+__all__ = [  # noqa: F822
+    'INT_TYPES',
+    'IndexMixin',
+    'bisect_left',
+    'check_reshape_kwargs',
+    'check_shape',
+    'get_index_dtype',
+    'getdtype',
+    'isscalarlike',
+    'isshape',
+    'isspmatrix',
+    'isspmatrix_lil',
+    'lil_matrix',
+    'spmatrix',
+    'upcast_scalar',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.lil is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.lil` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_lil, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/__init__.py
new file mode 100644
index 00000000..23337272
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/__init__.py
@@ -0,0 +1,136 @@
+"""
+Sparse linear algebra (:mod:`scipy.sparse.linalg`)
+==================================================
+
+.. currentmodule:: scipy.sparse.linalg
+
+Abstract linear operators
+-------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   LinearOperator -- abstract representation of a linear operator
+   aslinearoperator -- convert an object to an abstract linear operator
+
+Matrix Operations
+-----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   inv -- compute the sparse matrix inverse
+   expm -- compute the sparse matrix exponential
+   expm_multiply -- compute the product of a matrix exponential and a matrix
+
+Matrix norms
+------------
+
+.. autosummary::
+   :toctree: generated/
+
+   norm -- Norm of a sparse matrix
+   onenormest -- Estimate the 1-norm of a sparse matrix
+
+Solving linear problems
+-----------------------
+
+Direct methods for linear equation systems:
+
+.. autosummary::
+   :toctree: generated/
+
+   spsolve -- Solve the sparse linear system Ax=b
+   spsolve_triangular -- Solve the sparse linear system Ax=b for a triangular matrix
+   factorized -- Pre-factorize matrix to a function solving a linear system
+   MatrixRankWarning -- Warning on exactly singular matrices
+   use_solver -- Select direct solver to use
+
+Iterative methods for linear equation systems:
+
+.. autosummary::
+   :toctree: generated/
+
+   bicg -- Use BIConjugate Gradient iteration to solve A x = b
+   bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b
+   cg -- Use Conjugate Gradient iteration to solve A x = b
+   cgs -- Use Conjugate Gradient Squared iteration to solve A x = b
+   gmres -- Use Generalized Minimal RESidual iteration to solve A x = b
+   lgmres -- Solve a matrix equation using the LGMRES algorithm
+   minres -- Use MINimum RESidual iteration to solve Ax = b
+   qmr -- Use Quasi-Minimal Residual iteration to solve A x = b
+   gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
+   tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve A x = b
+
+Iterative methods for least-squares problems:
+
+.. autosummary::
+   :toctree: generated/
+
+   lsqr -- Find the least-squares solution to a sparse linear equation system
+   lsmr -- Find the least-squares solution to a sparse linear equation system
+
+Matrix factorizations
+---------------------
+
+Eigenvalue problems:
+
+.. autosummary::
+   :toctree: generated/
+
+   eigs -- Find k eigenvalues and eigenvectors of the square matrix A
+   eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
+   lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
+
+Singular values problems:
+
+.. autosummary::
+   :toctree: generated/
+
+   svds -- Compute k singular values/vectors for a sparse matrix
+
+The `svds` function supports the following solvers:
+
+.. toctree::
+
+    sparse.linalg.svds-arpack
+    sparse.linalg.svds-lobpcg
+    sparse.linalg.svds-propack
+
+Complete or incomplete LU factorizations
+
+.. autosummary::
+   :toctree: generated/
+
+   splu -- Compute a LU decomposition for a sparse matrix
+   spilu -- Compute an incomplete LU decomposition for a sparse matrix
+   SuperLU -- Object representing an LU factorization
+
+Exceptions
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   ArpackNoConvergence
+   ArpackError
+
+"""
+
+from ._isolve import *
+from ._dsolve import *
+from ._interface import *
+from ._eigen import *
+from ._matfuncs import *
+from ._onenormest import *
+from ._norm import *
+from ._expm_multiply import *
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import isolve, dsolve, interface, eigen, matfuncs
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/__init__.py
new file mode 100644
index 00000000..daeef045
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/__init__.py
@@ -0,0 +1,71 @@
+"""
+Linear Solvers
+==============
+
+The default solver is SuperLU (included in the scipy distribution),
+which can solve real or complex linear systems in both single and
+double precisions.  It is automatically replaced by UMFPACK, if
+available.  Note that UMFPACK works in double precision only, so
+switch it off by::
+
+    >>> use_solver(useUmfpack=False)
+
+to solve in the single precision. See also use_solver documentation.
+
+Example session::
+
+    >>> from scipy.sparse import csc_matrix, spdiags
+    >>> from numpy import array
+    >>> from scipy.sparse.linalg import spsolve, use_solver
+    >>>
+    >>> print("Inverting a sparse linear system:")
+    >>> print("The sparse matrix (constructed from diagonals):")
+    >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
+    >>> b = array([1, 2, 3, 4, 5])
+    >>> print("Solve: single precision complex:")
+    >>> use_solver( useUmfpack = False )
+    >>> a = a.astype('F')
+    >>> x = spsolve(a, b)
+    >>> print(x)
+    >>> print("Error: ", a@x-b)
+    >>>
+    >>> print("Solve: double precision complex:")
+    >>> use_solver( useUmfpack = True )
+    >>> a = a.astype('D')
+    >>> x = spsolve(a, b)
+    >>> print(x)
+    >>> print("Error: ", a@x-b)
+    >>>
+    >>> print("Solve: double precision:")
+    >>> a = a.astype('d')
+    >>> x = spsolve(a, b)
+    >>> print(x)
+    >>> print("Error: ", a@x-b)
+    >>>
+    >>> print("Solve: single precision:")
+    >>> use_solver( useUmfpack = False )
+    >>> a = a.astype('f')
+    >>> x = spsolve(a, b.astype('f'))
+    >>> print(x)
+    >>> print("Error: ", a@x-b)
+
+"""
+
+#import umfpack
+#__doc__ = '\n\n'.join( (__doc__,  umfpack.__doc__) )
+#del umfpack
+
+from .linsolve import *
+from ._superlu import SuperLU
+from . import _add_newdocs
+from . import linsolve
+
+__all__ = [
+    'MatrixRankWarning', 'SuperLU', 'factorized',
+    'spilu', 'splu', 'spsolve',
+    'spsolve_triangular', 'use_solver'
+]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/_add_newdocs.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/_add_newdocs.py
new file mode 100644
index 00000000..c7ecd3df
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/_add_newdocs.py
@@ -0,0 +1,152 @@
+from numpy.lib import add_newdoc
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU',
+    """
+    LU factorization of a sparse matrix.
+
+    Factorization is represented as::
+
+        Pr @ A @ Pc = L @ U
+
+    To construct these `SuperLU` objects, call the `splu` and `spilu`
+    functions.
+
+    Attributes
+    ----------
+    shape
+    nnz
+    perm_c
+    perm_r
+    L
+    U
+
+    Methods
+    -------
+    solve
+
+    Notes
+    -----
+
+    .. versionadded:: 0.14.0
+
+    Examples
+    --------
+    The LU decomposition can be used to solve matrix equations. Consider:
+
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix, linalg as sla
+    >>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]])
+
+    This can be solved for a given right-hand side:
+
+    >>> lu = sla.splu(A)
+    >>> b = np.array([1, 2, 3, 4])
+    >>> x = lu.solve(b)
+    >>> A.dot(x)
+    array([ 1.,  2.,  3.,  4.])
+
+    The ``lu`` object also contains an explicit representation of the
+    decomposition. The permutations are represented as mappings of
+    indices:
+
+    >>> lu.perm_r
+    array([0, 2, 1, 3], dtype=int32)
+    >>> lu.perm_c
+    array([2, 0, 1, 3], dtype=int32)
+
+    The L and U factors are sparse matrices in CSC format:
+
+    >>> lu.L.A
+    array([[ 1. ,  0. ,  0. ,  0. ],
+           [ 0. ,  1. ,  0. ,  0. ],
+           [ 0. ,  0. ,  1. ,  0. ],
+           [ 1. ,  0.5,  0.5,  1. ]])
+    >>> lu.U.A
+    array([[ 2.,  0.,  1.,  4.],
+           [ 0.,  2.,  1.,  1.],
+           [ 0.,  0.,  1.,  1.],
+           [ 0.,  0.,  0., -5.]])
+
+    The permutation matrices can be constructed:
+
+    >>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
+    >>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
+
+    We can reassemble the original matrix:
+
+    >>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).A
+    array([[ 1.,  2.,  0.,  4.],
+           [ 1.,  0.,  0.,  1.],
+           [ 1.,  0.,  2.,  1.],
+           [ 2.,  2.,  1.,  0.]])
+    """)
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve',
+    """
+    solve(rhs[, trans])
+
+    Solves linear system of equations with one or several right-hand sides.
+
+    Parameters
+    ----------
+    rhs : ndarray, shape (n,) or (n, k)
+        Right hand side(s) of equation
+    trans : {'N', 'T', 'H'}, optional
+        Type of system to solve::
+
+            'N':   A   @ x == rhs  (default)
+            'T':   A^T @ x == rhs
+            'H':   A^H @ x == rhs
+
+        i.e., normal, transposed, and hermitian conjugate.
+
+    Returns
+    -------
+    x : ndarray, shape ``rhs.shape``
+        Solution vector(s)
+    """))
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L',
+    """
+    Lower triangular factor with unit diagonal as a
+    `scipy.sparse.csc_matrix`.
+
+    .. versionadded:: 0.14.0
+    """))
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U',
+    """
+    Upper triangular factor as a `scipy.sparse.csc_matrix`.
+
+    .. versionadded:: 0.14.0
+    """))
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape',
+    """
+    Shape of the original matrix as a tuple of ints.
+    """))
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz',
+    """
+    Number of nonzero elements in the matrix.
+    """))
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c',
+    """
+    Permutation Pc represented as an array of indices.
+
+    The column permutation matrix can be reconstructed via:
+
+    >>> Pc = np.zeros((n, n))
+    >>> Pc[np.arange(n), perm_c] = 1
+    """))
+
+add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r',
+    """
+    Permutation Pr represented as an array of indices.
+
+    The row permutation matrix can be reconstructed via:
+
+    >>> Pr = np.zeros((n, n))
+    >>> Pr[perm_r, np.arange(n)] = 1
+    """))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/linsolve.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/linsolve.py
new file mode 100644
index 00000000..6be4aff7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/linsolve.py
@@ -0,0 +1,715 @@
+from warnings import warn
+
+import numpy as np
+from numpy import asarray
+from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix,
+                          SparseEfficiencyWarning, csc_matrix, csr_matrix)
+from scipy.sparse._sputils import is_pydata_spmatrix
+from scipy.linalg import LinAlgError
+import copy
+
+from . import _superlu
+
+noScikit = False
+try:
+    import scikits.umfpack as umfpack
+except ImportError:
+    noScikit = True
+
+useUmfpack = not noScikit
+
+__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
+           'MatrixRankWarning', 'spsolve_triangular']
+
+
+class MatrixRankWarning(UserWarning):
+    pass
+
+
+def use_solver(**kwargs):
+    """
+    Select default sparse direct solver to be used.
+
+    Parameters
+    ----------
+    useUmfpack : bool, optional
+        Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
+        if ``scikits.umfpack`` is installed. Default: True
+    assumeSortedIndices : bool, optional
+        Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
+        Has effect only if useUmfpack is True and ``scikits.umfpack`` is
+        installed. Default: False
+
+    Notes
+    -----
+    The default sparse solver is UMFPACK when available
+    (``scikits.umfpack`` is installed). This can be changed by passing
+    useUmfpack = False, which then causes the always present SuperLU
+    based solver to be used.
+
+    UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
+    sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
+    to gain some speed.
+
+    References
+    ----------
+    .. [1] T. A. Davis, Algorithm 832:  UMFPACK - an unsymmetric-pattern
+           multifrontal method with a column pre-ordering strategy, ACM
+           Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
+           https://dl.acm.org/doi/abs/10.1145/992200.992206
+
+    .. [2] T. A. Davis, A column pre-ordering strategy for the
+           unsymmetric-pattern multifrontal method, ACM Trans.
+           on Mathematical Software, 30(2), 2004, pp. 165--195.
+           https://dl.acm.org/doi/abs/10.1145/992200.992205
+
+    .. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
+           method for unsymmetric sparse matrices, ACM Trans. on
+           Mathematical Software, 25(1), 1999, pp. 1--19.
+           https://doi.org/10.1145/305658.287640
+
+    .. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
+           method for sparse LU factorization, SIAM J. Matrix Analysis and
+           Computations, 18(1), 1997, pp. 140--158.
+           https://doi.org/10.1137/S0895479894246905T.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse.linalg import use_solver, spsolve
+    >>> from scipy.sparse import csc_matrix
+    >>> R = np.random.randn(5, 5)
+    >>> A = csc_matrix(R)
+    >>> b = np.random.randn(5)
+    >>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
+    >>> x = spsolve(A, b)
+    >>> np.allclose(A.dot(x), b)
+    True
+    >>> use_solver(useUmfpack=True) # reset umfPack usage to default
+    """
+    if 'useUmfpack' in kwargs:
+        globals()['useUmfpack'] = kwargs['useUmfpack']
+    if useUmfpack and 'assumeSortedIndices' in kwargs:
+        umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
+
+def _get_umf_family(A):
+    """Get umfpack family string given the sparse matrix dtype."""
+    _families = {
+        (np.float64, np.int32): 'di',
+        (np.complex128, np.int32): 'zi',
+        (np.float64, np.int64): 'dl',
+        (np.complex128, np.int64): 'zl'
+    }
+
+    f_type = np.sctypeDict[A.dtype.name]
+    i_type = np.sctypeDict[A.indices.dtype.name]
+
+    try:
+        family = _families[(f_type, i_type)]
+
+    except KeyError as e:
+        msg = 'only float64 or complex128 matrices with int32 or int64' \
+            ' indices are supported! (got: matrix: %s, indices: %s)' \
+            % (f_type, i_type)
+        raise ValueError(msg) from e
+
+    # See gh-8278. Considered converting only if
+    # A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
+    # but that didn't always fix the issue.
+    family = family[0] + "l"
+    A_new = copy.copy(A)
+    A_new.indptr = np.array(A.indptr, copy=False, dtype=np.int64)
+    A_new.indices = np.array(A.indices, copy=False, dtype=np.int64)
+
+    return family, A_new
+
+def spsolve(A, b, permc_spec=None, use_umfpack=True):
+    """Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
+
+    Parameters
+    ----------
+    A : ndarray or sparse matrix
+        The square matrix A will be converted into CSC or CSR form
+    b : ndarray or sparse matrix
+        The matrix or vector representing the right hand side of the equation.
+        If a vector, b.shape must be (n,) or (n, 1).
+    permc_spec : str, optional
+        How to permute the columns of the matrix for sparsity preservation.
+        (default: 'COLAMD')
+
+        - ``NATURAL``: natural ordering.
+        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
+        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
+        - ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
+
+    use_umfpack : bool, optional
+        if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
+        [6]_ . This is only referenced if b is a vector and
+        ``scikits.umfpack`` is installed.
+
+    Returns
+    -------
+    x : ndarray or sparse matrix
+        the solution of the sparse linear equation.
+        If b is a vector, then x is a vector of size A.shape[1]
+        If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
+
+    Notes
+    -----
+    For solving the matrix expression AX = B, this solver assumes the resulting
+    matrix X is sparse, as is often the case for very sparse inputs.  If the
+    resulting X is dense, the construction of this sparse result will be
+    relatively expensive.  In that case, consider converting A to a dense
+    matrix and using scipy.linalg.solve or its variants.
+
+    References
+    ----------
+    .. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
+           COLAMD, an approximate column minimum degree ordering algorithm,
+           ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
+           :doi:`10.1145/1024074.1024080`
+
+    .. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
+           minimum degree ordering algorithm, ACM Trans. on Mathematical
+           Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
+
+    .. [3] T. A. Davis, Algorithm 832:  UMFPACK - an unsymmetric-pattern
+           multifrontal method with a column pre-ordering strategy, ACM
+           Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
+           https://dl.acm.org/doi/abs/10.1145/992200.992206
+
+    .. [4] T. A. Davis, A column pre-ordering strategy for the
+           unsymmetric-pattern multifrontal method, ACM Trans.
+           on Mathematical Software, 30(2), 2004, pp. 165--195.
+           https://dl.acm.org/doi/abs/10.1145/992200.992205
+
+    .. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
+           method for unsymmetric sparse matrices, ACM Trans. on
+           Mathematical Software, 25(1), 1999, pp. 1--19.
+           https://doi.org/10.1145/305658.287640
+
+    .. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
+           method for sparse LU factorization, SIAM J. Matrix Analysis and
+           Computations, 18(1), 1997, pp. 140--158.
+           https://doi.org/10.1137/S0895479894246905T.
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import spsolve
+    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
+    >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
+    >>> x = spsolve(A, B)
+    >>> np.allclose(A.dot(x).toarray(), B.toarray())
+    True
+    """
+
+    if is_pydata_spmatrix(A):
+        A = A.to_scipy_sparse().tocsc()
+
+    if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
+        A = csc_matrix(A)
+        warn('spsolve requires A be CSC or CSR matrix format',
+                SparseEfficiencyWarning)
+
+    # b is a vector only if b have shape (n,) or (n, 1)
+    b_is_sparse = isspmatrix(b) or is_pydata_spmatrix(b)
+    if not b_is_sparse:
+        b = asarray(b)
+    b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
+
+    # sum duplicates for non-canonical format
+    A.sum_duplicates()
+    A = A.asfptype()  # upcast to a floating point format
+    result_dtype = np.promote_types(A.dtype, b.dtype)
+    if A.dtype != result_dtype:
+        A = A.astype(result_dtype)
+    if b.dtype != result_dtype:
+        b = b.astype(result_dtype)
+
+    # validate input shapes
+    M, N = A.shape
+    if (M != N):
+        raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
+
+    if M != b.shape[0]:
+        raise ValueError("matrix - rhs dimension mismatch (%s - %s)"
+                         % (A.shape, b.shape[0]))
+
+    use_umfpack = use_umfpack and useUmfpack
+
+    if b_is_vector and use_umfpack:
+        if b_is_sparse:
+            b_vec = b.toarray()
+        else:
+            b_vec = b
+        b_vec = asarray(b_vec, dtype=A.dtype).ravel()
+
+        if noScikit:
+            raise RuntimeError('Scikits.umfpack not installed.')
+
+        if A.dtype.char not in 'dD':
+            raise ValueError("convert matrix data to double, please, using"
+                  " .astype(), or set linsolve.useUmfpack = False")
+
+        umf_family, A = _get_umf_family(A)
+        umf = umfpack.UmfpackContext(umf_family)
+        x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
+                         autoTranspose=True)
+    else:
+        if b_is_vector and b_is_sparse:
+            b = b.toarray()
+            b_is_sparse = False
+
+        if not b_is_sparse:
+            if isspmatrix_csc(A):
+                flag = 1  # CSC format
+            else:
+                flag = 0  # CSR format
+
+            options = dict(ColPerm=permc_spec)
+            x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr,
+                                    b, flag, options=options)
+            if info != 0:
+                warn("Matrix is exactly singular", MatrixRankWarning)
+                x.fill(np.nan)
+            if b_is_vector:
+                x = x.ravel()
+        else:
+            # b is sparse
+            Afactsolve = factorized(A)
+
+            if not (isspmatrix_csc(b) or is_pydata_spmatrix(b)):
+                warn('spsolve is more efficient when sparse b '
+                     'is in the CSC matrix format', SparseEfficiencyWarning)
+                b = csc_matrix(b)
+
+            # Create a sparse output matrix by repeatedly applying
+            # the sparse factorization to solve columns of b.
+            data_segs = []
+            row_segs = []
+            col_segs = []
+            for j in range(b.shape[1]):
+                # TODO: replace this with
+                # bj = b[:, j].toarray().ravel()
+                # once 1D sparse arrays are supported.
+                # That is a slightly faster code path.
+                bj = b[:, [j]].toarray().ravel()
+                xj = Afactsolve(bj)
+                w = np.flatnonzero(xj)
+                segment_length = w.shape[0]
+                row_segs.append(w)
+                col_segs.append(np.full(segment_length, j, dtype=int))
+                data_segs.append(np.asarray(xj[w], dtype=A.dtype))
+            sparse_data = np.concatenate(data_segs)
+            sparse_row = np.concatenate(row_segs)
+            sparse_col = np.concatenate(col_segs)
+            x = A.__class__((sparse_data, (sparse_row, sparse_col)),
+                           shape=b.shape, dtype=A.dtype)
+
+            if is_pydata_spmatrix(b):
+                x = b.__class__(x)
+
+    return x
+
+
+def splu(A, permc_spec=None, diag_pivot_thresh=None,
+         relax=None, panel_size=None, options=dict()):
+    """
+    Compute the LU decomposition of a sparse, square matrix.
+
+    Parameters
+    ----------
+    A : sparse matrix
+        Sparse matrix to factorize. Most efficient when provided in CSC
+        format. Other formats will be converted to CSC before factorization.
+    permc_spec : str, optional
+        How to permute the columns of the matrix for sparsity preservation.
+        (default: 'COLAMD')
+
+        - ``NATURAL``: natural ordering.
+        - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
+        - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
+        - ``COLAMD``: approximate minimum degree column ordering
+
+    diag_pivot_thresh : float, optional
+        Threshold used for a diagonal entry to be an acceptable pivot.
+        See SuperLU user's guide for details [1]_
+    relax : int, optional
+        Expert option for customizing the degree of relaxing supernodes.
+        See SuperLU user's guide for details [1]_
+    panel_size : int, optional
+        Expert option for customizing the panel size.
+        See SuperLU user's guide for details [1]_
+    options : dict, optional
+        Dictionary containing additional expert options to SuperLU.
+        See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
+        for more details. For example, you can specify
+        ``options=dict(Equil=False, IterRefine='SINGLE'))``
+        to turn equilibration off and perform a single iterative refinement.
+
+    Returns
+    -------
+    invA : scipy.sparse.linalg.SuperLU
+        Object, which has a ``solve`` method.
+
+    See also
+    --------
+    spilu : incomplete LU decomposition
+
+    Notes
+    -----
+    This function uses the SuperLU library.
+
+    References
+    ----------
+    .. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import splu
+    >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
+    >>> B = splu(A)
+    >>> x = np.array([1., 2., 3.], dtype=float)
+    >>> B.solve(x)
+    array([ 1. , -3. , -1.5])
+    >>> A.dot(B.solve(x))
+    array([ 1.,  2.,  3.])
+    >>> B.solve(A.dot(x))
+    array([ 1.,  2.,  3.])
+    """
+
+    if is_pydata_spmatrix(A):
+        csc_construct_func = lambda *a, cls=type(A): cls(csc_matrix(*a))
+        A = A.to_scipy_sparse().tocsc()
+    else:
+        csc_construct_func = csc_matrix
+
+    if not isspmatrix_csc(A):
+        A = csc_matrix(A)
+        warn('splu converted its input to CSC format', SparseEfficiencyWarning)
+
+    # sum duplicates for non-canonical format
+    A.sum_duplicates()
+    A = A.asfptype()  # upcast to a floating point format
+
+    M, N = A.shape
+    if (M != N):
+        raise ValueError("can only factor square matrices")  # is this true?
+
+    _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
+                    PanelSize=panel_size, Relax=relax)
+    if options is not None:
+        _options.update(options)
+
+    # Ensure that no column permutations are applied
+    if (_options["ColPerm"] == "NATURAL"):
+        _options["SymmetricMode"] = True
+
+    return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
+                          csc_construct_func=csc_construct_func,
+                          ilu=False, options=_options)
+
+
+def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
+          diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
+    """
+    Compute an incomplete LU decomposition for a sparse, square matrix.
+
+    The resulting object is an approximation to the inverse of `A`.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Sparse matrix to factorize. Most efficient when provided in CSC format.
+        Other formats will be converted to CSC before factorization.
+    drop_tol : float, optional
+        Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
+        (default: 1e-4)
+    fill_factor : float, optional
+        Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
+    drop_rule : str, optional
+        Comma-separated string of drop rules to use.
+        Available rules: ``basic``, ``prows``, ``column``, ``area``,
+        ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
+
+        See SuperLU documentation for details.
+
+    Remaining other options
+        Same as for `splu`
+
+    Returns
+    -------
+    invA_approx : scipy.sparse.linalg.SuperLU
+        Object, which has a ``solve`` method.
+
+    See also
+    --------
+    splu : complete LU decomposition
+
+    Notes
+    -----
+    To improve the better approximation to the inverse, you may need to
+    increase `fill_factor` AND decrease `drop_tol`.
+
+    This function uses the SuperLU library.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import spilu
+    >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
+    >>> B = spilu(A)
+    >>> x = np.array([1., 2., 3.], dtype=float)
+    >>> B.solve(x)
+    array([ 1. , -3. , -1.5])
+    >>> A.dot(B.solve(x))
+    array([ 1.,  2.,  3.])
+    >>> B.solve(A.dot(x))
+    array([ 1.,  2.,  3.])
+    """
+
+    if is_pydata_spmatrix(A):
+        csc_construct_func = lambda *a, cls=type(A): cls(csc_matrix(*a))
+        A = A.to_scipy_sparse().tocsc()
+    else:
+        csc_construct_func = csc_matrix
+
+    if not isspmatrix_csc(A):
+        A = csc_matrix(A)
+        warn('spilu converted its input to CSC format',
+             SparseEfficiencyWarning)
+
+    # sum duplicates for non-canonical format
+    A.sum_duplicates()
+    A = A.asfptype()  # upcast to a floating point format
+
+    M, N = A.shape
+    if (M != N):
+        raise ValueError("can only factor square matrices")  # is this true?
+
+    _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
+                    ILU_FillFactor=fill_factor,
+                    DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
+                    PanelSize=panel_size, Relax=relax)
+    if options is not None:
+        _options.update(options)
+
+    # Ensure that no column permutations are applied
+    if (_options["ColPerm"] == "NATURAL"):
+        _options["SymmetricMode"] = True
+
+    return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
+                          csc_construct_func=csc_construct_func,
+                          ilu=True, options=_options)
+
+
+def factorized(A):
+    """
+    Return a function for solving a sparse linear system, with A pre-factorized.
+
+    Parameters
+    ----------
+    A : (N, N) array_like
+        Input. A in CSC format is most efficient. A CSR format matrix will
+        be converted to CSC before factorization.
+
+    Returns
+    -------
+    solve : callable
+        To solve the linear system of equations given in `A`, the `solve`
+        callable should be passed an ndarray of shape (N,).
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse.linalg import factorized
+    >>> A = np.array([[ 3. ,  2. , -1. ],
+    ...               [ 2. , -2. ,  4. ],
+    ...               [-1. ,  0.5, -1. ]])
+    >>> solve = factorized(A) # Makes LU decomposition.
+    >>> rhs1 = np.array([1, -2, 0])
+    >>> solve(rhs1) # Uses the LU factors.
+    array([ 1., -2., -2.])
+
+    """
+    if is_pydata_spmatrix(A):
+        A = A.to_scipy_sparse().tocsc()
+
+    if useUmfpack:
+        if noScikit:
+            raise RuntimeError('Scikits.umfpack not installed.')
+
+        if not isspmatrix_csc(A):
+            A = csc_matrix(A)
+            warn('splu converted its input to CSC format',
+                 SparseEfficiencyWarning)
+
+        A = A.asfptype()  # upcast to a floating point format
+
+        if A.dtype.char not in 'dD':
+            raise ValueError("convert matrix data to double, please, using"
+                  " .astype(), or set linsolve.useUmfpack = False")
+
+        umf_family, A = _get_umf_family(A)
+        umf = umfpack.UmfpackContext(umf_family)
+
+        # Make LU decomposition.
+        umf.numeric(A)
+
+        def solve(b):
+            with np.errstate(divide="ignore", invalid="ignore"):
+                # Ignoring warnings with numpy >= 1.23.0, see gh-16523
+                result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
+
+            return result
+
+        return solve
+    else:
+        return splu(A).solve
+
+
+def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
+                       unit_diagonal=False):
+    """
+    Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
+
+    Parameters
+    ----------
+    A : (M, M) sparse matrix
+        A sparse square triangular matrix. Should be in CSR format.
+    b : (M,) or (M, N) array_like
+        Right-hand side matrix in ``A x = b``
+    lower : bool, optional
+        Whether `A` is a lower or upper triangular matrix.
+        Default is lower triangular matrix.
+    overwrite_A : bool, optional
+        Allow changing `A`. The indices of `A` are going to be sorted and zero
+        entries are going to be removed.
+        Enabling gives a performance gain. Default is False.
+    overwrite_b : bool, optional
+        Allow overwriting data in `b`.
+        Enabling gives a performance gain. Default is False.
+        If `overwrite_b` is True, it should be ensured that
+        `b` has an appropriate dtype to be able to store the result.
+    unit_diagonal : bool, optional
+        If True, diagonal elements of `a` are assumed to be 1 and will not be
+        referenced.
+
+        .. versionadded:: 1.4.0
+
+    Returns
+    -------
+    x : (M,) or (M, N) ndarray
+        Solution to the system ``A x = b``. Shape of return matches shape
+        of `b`.
+
+    Raises
+    ------
+    LinAlgError
+        If `A` is singular or not triangular.
+    ValueError
+        If shape of `A` or shape of `b` do not match the requirements.
+
+    Notes
+    -----
+    .. versionadded:: 0.19.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csr_matrix
+    >>> from scipy.sparse.linalg import spsolve_triangular
+    >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
+    >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
+    >>> x = spsolve_triangular(A, B)
+    >>> np.allclose(A.dot(x), B)
+    True
+    """
+
+    if is_pydata_spmatrix(A):
+        A = A.to_scipy_sparse().tocsr()
+
+    # Check the input for correct type and format.
+    if not isspmatrix_csr(A):
+        warn('CSR matrix format is required. Converting to CSR matrix.',
+             SparseEfficiencyWarning)
+        A = csr_matrix(A)
+    elif not overwrite_A:
+        A = A.copy()
+
+    if A.shape[0] != A.shape[1]:
+        raise ValueError(
+            'A must be a square matrix but its shape is {}.'.format(A.shape))
+
+    # sum duplicates for non-canonical format
+    A.sum_duplicates()
+
+    b = np.asanyarray(b)
+
+    if b.ndim not in [1, 2]:
+        raise ValueError(
+            'b must have 1 or 2 dims but its shape is {}.'.format(b.shape))
+    if A.shape[0] != b.shape[0]:
+        raise ValueError(
+            'The size of the dimensions of A must be equal to '
+            'the size of the first dimension of b but the shape of A is '
+            '{} and the shape of b is {}.'.format(A.shape, b.shape))
+
+    # Init x as (a copy of) b.
+    x_dtype = np.result_type(A.data, b, np.float64)
+    if overwrite_b:
+        if np.can_cast(b.dtype, x_dtype, casting='same_kind'):
+            x = b
+        else:
+            raise ValueError(
+                'Cannot overwrite b (dtype {}) with result '
+                'of type {}.'.format(b.dtype, x_dtype))
+    else:
+        x = b.astype(x_dtype, copy=True)
+
+    # Choose forward or backward order.
+    if lower:
+        row_indices = range(len(b))
+    else:
+        row_indices = range(len(b) - 1, -1, -1)
+
+    # Fill x iteratively.
+    for i in row_indices:
+
+        # Get indices for i-th row.
+        indptr_start = A.indptr[i]
+        indptr_stop = A.indptr[i + 1]
+
+        if lower:
+            A_diagonal_index_row_i = indptr_stop - 1
+            A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1)
+        else:
+            A_diagonal_index_row_i = indptr_start
+            A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop)
+
+        # Check regularity and triangularity of A.
+        if not unit_diagonal and (indptr_stop <= indptr_start
+                                  or A.indices[A_diagonal_index_row_i] < i):
+            raise LinAlgError(
+                'A is singular: diagonal {} is zero.'.format(i))
+        if not unit_diagonal and A.indices[A_diagonal_index_row_i] > i:
+            raise LinAlgError(
+                'A is not triangular: A[{}, {}] is nonzero.'
+                ''.format(i, A.indices[A_diagonal_index_row_i]))
+
+        # Incorporate off-diagonal entries.
+        A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i]
+        A_values_in_row_i = A.data[A_off_diagonal_indices_row_i]
+        x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i)
+
+        # Compute i-th entry of x.
+        if not unit_diagonal:
+            x[i] /= A.data[A_diagonal_index_row_i]
+
+    return x
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py
new file mode 100644
index 00000000..5f659fab
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py
@@ -0,0 +1,799 @@
+import sys
+import threading
+
+import numpy as np
+from numpy import array, finfo, arange, eye, all, unique, ones, dot
+import numpy.random as random
+from numpy.testing import (
+        assert_array_almost_equal, assert_almost_equal,
+        assert_equal, assert_array_equal, assert_, assert_allclose,
+        assert_warns, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+import scipy.linalg
+from scipy.linalg import norm, inv
+from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
+        csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
+from scipy.sparse.linalg import SuperLU
+from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu,
+        MatrixRankWarning, _superlu, spsolve_triangular, factorized)
+import scipy.sparse
+
+from scipy._lib._testutils import check_free_memory
+
+
+sup_sparse_efficiency = suppress_warnings()
+sup_sparse_efficiency.filter(SparseEfficiencyWarning)
+
+# scikits.umfpack is not a SciPy dependency but it is optionally used in
+# dsolve, so check whether it's available
+try:
+    import scikits.umfpack as umfpack
+    has_umfpack = True
+except ImportError:
+    has_umfpack = False
+
+def toarray(a):
+    if isspmatrix(a):
+        return a.toarray()
+    else:
+        return a
+
+
+def setup_bug_8278():
+    N = 2 ** 6
+    h = 1/N
+    Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1],
+                              shape=(N-1, N-1))/(h**2)
+    eyeN = scipy.sparse.eye(N - 1)
+    A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D))
+         + scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN))
+         + scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN)))
+    b = np.random.rand((N-1)**3)
+    return A, b
+
+
+class TestFactorized:
+    def setup_method(self):
+        n = 5
+        d = arange(n) + 1
+        self.n = n
+        self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
+        random.seed(1234)
+
+    def _check_singular(self):
+        A = csc_matrix((5,5), dtype='d')
+        b = ones(5)
+        assert_array_almost_equal(0. * b, factorized(A)(b))
+
+    def _check_non_singular(self):
+        # Make a diagonal dominant, to make sure it is not singular
+        n = 5
+        a = csc_matrix(random.rand(n, n))
+        b = ones(n)
+
+        expected = splu(a).solve(b)
+        assert_array_almost_equal(factorized(a)(b), expected)
+
+    def test_singular_without_umfpack(self):
+        use_solver(useUmfpack=False)
+        with assert_raises(RuntimeError, match="Factor is exactly singular"):
+            self._check_singular()
+
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_singular_with_umfpack(self):
+        use_solver(useUmfpack=True)
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
+            assert_warns(umfpack.UmfpackWarning, self._check_singular)
+
+    def test_non_singular_without_umfpack(self):
+        use_solver(useUmfpack=False)
+        self._check_non_singular()
+
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_non_singular_with_umfpack(self):
+        use_solver(useUmfpack=True)
+        self._check_non_singular()
+
+    def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
+        use_solver(useUmfpack=False)
+        msg = "can only factor square matrices"
+        with assert_raises(ValueError, match=msg):
+            factorized(self.A[:, :4])
+
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_factorizes_nonsquare_matrix_with_umfpack(self):
+        use_solver(useUmfpack=True)
+        # does not raise
+        factorized(self.A[:,:4])
+
+    def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
+        use_solver(useUmfpack=False)
+        solve = factorized(self.A)
+        b = random.rand(4)
+        B = random.rand(4, 3)
+        BB = random.rand(self.n, 3, 9)
+
+        with assert_raises(ValueError, match="is of incompatible size"):
+            solve(b)
+        with assert_raises(ValueError, match="is of incompatible size"):
+            solve(B)
+        with assert_raises(ValueError,
+                           match="object too deep for desired array"):
+            solve(BB)
+
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
+        use_solver(useUmfpack=True)
+        solve = factorized(self.A)
+        b = random.rand(4)
+        B = random.rand(4, 3)
+        BB = random.rand(self.n, 3, 9)
+
+        # does not raise
+        solve(b)
+        msg = "object too deep for desired array"
+        with assert_raises(ValueError, match=msg):
+            solve(B)
+        with assert_raises(ValueError, match=msg):
+            solve(BB)
+
+    def test_call_with_cast_to_complex_without_umfpack(self):
+        use_solver(useUmfpack=False)
+        solve = factorized(self.A)
+        b = random.rand(4)
+        for t in [np.complex64, np.complex128]:
+            with assert_raises(TypeError, match="Cannot cast array data"):
+                solve(b.astype(t))
+
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_call_with_cast_to_complex_with_umfpack(self):
+        use_solver(useUmfpack=True)
+        solve = factorized(self.A)
+        b = random.rand(4)
+        for t in [np.complex64, np.complex128]:
+            assert_warns(np.ComplexWarning, solve, b.astype(t))
+
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_assume_sorted_indices_flag(self):
+        # a sparse matrix with unsorted indices
+        unsorted_inds = np.array([2, 0, 1, 0])
+        data = np.array([10, 16, 5, 0.4])
+        indptr = np.array([0, 1, 2, 4])
+        A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
+        b = ones(3)
+
+        # should raise when incorrectly assuming indices are sorted
+        use_solver(useUmfpack=True, assumeSortedIndices=True)
+        with assert_raises(RuntimeError,
+                           match="UMFPACK_ERROR_invalid_matrix"):
+            factorized(A)
+
+        # should sort indices and succeed when not assuming indices are sorted
+        use_solver(useUmfpack=True, assumeSortedIndices=False)
+        expected = splu(A.copy()).solve(b)
+
+        assert_equal(A.has_sorted_indices, 0)
+        assert_array_almost_equal(factorized(A)(b), expected)
+
+    @pytest.mark.slow
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_bug_8278(self):
+        check_free_memory(8000)
+        use_solver(useUmfpack=True)
+        A, b = setup_bug_8278()
+        A = A.tocsc()
+        f = factorized(A)
+        x = f(b)
+        assert_array_almost_equal(A @ x, b)
+
+
+class TestLinsolve:
+    def setup_method(self):
+        use_solver(useUmfpack=False)
+
+    def test_singular(self):
+        A = csc_matrix((5,5), dtype='d')
+        b = array([1, 2, 3, 4, 5],dtype='d')
+        with suppress_warnings() as sup:
+            sup.filter(MatrixRankWarning, "Matrix is exactly singular")
+            x = spsolve(A, b)
+        assert_(not np.isfinite(x).any())
+
+    def test_singular_gh_3312(self):
+        # "Bad" test case that leads SuperLU to call LAPACK with invalid
+        # arguments. Check that it fails moderately gracefully.
+        ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
+        v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
+        A = csc_matrix((v, ij.T), shape=(20, 20))
+        b = np.arange(20)
+
+        try:
+            # should either raise a runtime error or return value
+            # appropriate for singular input (which yields the warning)
+            with suppress_warnings() as sup:
+                sup.filter(MatrixRankWarning, "Matrix is exactly singular")
+                x = spsolve(A, b)
+            assert not np.isfinite(x).any()
+        except RuntimeError:
+            pass
+
+    def test_twodiags(self):
+        A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
+        b = array([1, 2, 3, 4, 5])
+
+        # condition number of A
+        cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2)
+
+        for t in ['f','d','F','D']:
+            eps = finfo(t).eps  # floating point epsilon
+            b = b.astype(t)
+
+            for format in ['csc','csr']:
+                Asp = A.astype(t).asformat(format)
+
+                x = spsolve(Asp,b)
+
+                assert_(norm(b - Asp@x) < 10 * cond_A * eps)
+
+    def test_bvector_smoketest(self):
+        Adense = array([[0., 1., 1.],
+                        [1., 0., 1.],
+                        [0., 0., 1.]])
+        As = csc_matrix(Adense)
+        random.seed(1234)
+        x = random.randn(3)
+        b = As@x
+        x2 = spsolve(As, b)
+
+        assert_array_almost_equal(x, x2)
+
+    def test_bmatrix_smoketest(self):
+        Adense = array([[0., 1., 1.],
+                        [1., 0., 1.],
+                        [0., 0., 1.]])
+        As = csc_matrix(Adense)
+        random.seed(1234)
+        x = random.randn(3, 4)
+        Bdense = As.dot(x)
+        Bs = csc_matrix(Bdense)
+        x2 = spsolve(As, Bs)
+        assert_array_almost_equal(x, x2.toarray())
+
+    @sup_sparse_efficiency
+    def test_non_square(self):
+        # A is not square.
+        A = ones((3, 4))
+        b = ones((4, 1))
+        assert_raises(ValueError, spsolve, A, b)
+        # A2 and b2 have incompatible shapes.
+        A2 = csc_matrix(eye(3))
+        b2 = array([1.0, 2.0])
+        assert_raises(ValueError, spsolve, A2, b2)
+
+    @sup_sparse_efficiency
+    def test_example_comparison(self):
+        row = array([0,0,1,2,2,2])
+        col = array([0,2,2,0,1,2])
+        data = array([1,2,3,-4,5,6])
+        sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
+        M = sM.toarray()
+
+        row = array([0,0,1,1,0,0])
+        col = array([0,2,1,1,0,0])
+        data = array([1,1,1,1,1,1])
+        sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
+        N = sN.toarray()
+
+        sX = spsolve(sM, sN)
+        X = scipy.linalg.solve(M, N)
+
+        assert_array_almost_equal(X, sX.toarray())
+
+    @sup_sparse_efficiency
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_shape_compatibility(self):
+        use_solver(useUmfpack=True)
+        A = csc_matrix([[1., 0], [0, 2]])
+        bs = [
+            [1, 6],
+            array([1, 6]),
+            [[1], [6]],
+            array([[1], [6]]),
+            csc_matrix([[1], [6]]),
+            csr_matrix([[1], [6]]),
+            dok_matrix([[1], [6]]),
+            bsr_matrix([[1], [6]]),
+            array([[1., 2., 3.], [6., 8., 10.]]),
+            csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
+            csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
+            dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
+            bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
+            ]
+
+        for b in bs:
+            x = np.linalg.solve(A.toarray(), toarray(b))
+            for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
+                x1 = spsolve(spmattype(A), b, use_umfpack=True)
+                x2 = spsolve(spmattype(A), b, use_umfpack=False)
+
+                # check solution
+                if x.ndim == 2 and x.shape[1] == 1:
+                    # interprets also these as "vectors"
+                    x = x.ravel()
+
+                assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
+                assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
+
+                # dense vs. sparse output  ("vectors" are always dense)
+                if isspmatrix(b) and x.ndim > 1:
+                    assert_(isspmatrix(x1), repr((b, spmattype, 1)))
+                    assert_(isspmatrix(x2), repr((b, spmattype, 2)))
+                else:
+                    assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
+                    assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
+
+                # check output shape
+                if x.ndim == 1:
+                    # "vector"
+                    assert_equal(x1.shape, (A.shape[1],))
+                    assert_equal(x2.shape, (A.shape[1],))
+                else:
+                    # "matrix"
+                    assert_equal(x1.shape, x.shape)
+                    assert_equal(x2.shape, x.shape)
+
+        A = csc_matrix((3, 3))
+        b = csc_matrix((1, 3))
+        assert_raises(ValueError, spsolve, A, b)
+
+    @sup_sparse_efficiency
+    def test_ndarray_support(self):
+        A = array([[1., 2.], [2., 0.]])
+        x = array([[1., 1.], [0.5, -0.5]])
+        b = array([[2., 0.], [2., 2.]])
+
+        assert_array_almost_equal(x, spsolve(A, b))
+
+    def test_gssv_badinput(self):
+        N = 10
+        d = arange(N) + 1.0
+        A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
+
+        for spmatrix in (csc_matrix, csr_matrix):
+            A = spmatrix(A)
+            b = np.arange(N)
+
+            def not_c_contig(x):
+                return x.repeat(2)[::2]
+
+            def not_1dim(x):
+                return x[:,None]
+
+            def bad_type(x):
+                return x.astype(bool)
+
+            def too_short(x):
+                return x[:-1]
+
+            badops = [not_c_contig, not_1dim, bad_type, too_short]
+
+            for badop in badops:
+                msg = "%r %r" % (spmatrix, badop)
+                # Not C-contiguous
+                assert_raises((ValueError, TypeError), _superlu.gssv,
+                              N, A.nnz, badop(A.data), A.indices, A.indptr,
+                              b, int(spmatrix == csc_matrix), err_msg=msg)
+                assert_raises((ValueError, TypeError), _superlu.gssv,
+                              N, A.nnz, A.data, badop(A.indices), A.indptr,
+                              b, int(spmatrix == csc_matrix), err_msg=msg)
+                assert_raises((ValueError, TypeError), _superlu.gssv,
+                              N, A.nnz, A.data, A.indices, badop(A.indptr),
+                              b, int(spmatrix == csc_matrix), err_msg=msg)
+
+    def test_sparsity_preservation(self):
+        ident = csc_matrix([
+            [1, 0, 0],
+            [0, 1, 0],
+            [0, 0, 1]])
+        b = csc_matrix([
+            [0, 1],
+            [1, 0],
+            [0, 0]])
+        x = spsolve(ident, b)
+        assert_equal(ident.nnz, 3)
+        assert_equal(b.nnz, 2)
+        assert_equal(x.nnz, 2)
+        assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
+
+    def test_dtype_cast(self):
+        A_real = scipy.sparse.csr_matrix([[1, 2, 0],
+                                          [0, 0, 3],
+                                          [4, 0, 5]])
+        A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
+                                             [0, 0, 3],
+                                             [4, 0, 5 + 1j]])
+        b_real = np.array([1,1,1])
+        b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
+        x = spsolve(A_real, b_real)
+        assert_(np.issubdtype(x.dtype, np.floating))
+        x = spsolve(A_real, b_complex)
+        assert_(np.issubdtype(x.dtype, np.complexfloating))
+        x = spsolve(A_complex, b_real)
+        assert_(np.issubdtype(x.dtype, np.complexfloating))
+        x = spsolve(A_complex, b_complex)
+        assert_(np.issubdtype(x.dtype, np.complexfloating))
+
+    @pytest.mark.slow
+    @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
+    def test_bug_8278(self):
+        check_free_memory(8000)
+        use_solver(useUmfpack=True)
+        A, b = setup_bug_8278()
+        x = spsolve(A, b)
+        assert_array_almost_equal(A @ x, b)
+
+
+class TestSplu:
+    def setup_method(self):
+        use_solver(useUmfpack=False)
+        n = 40
+        d = arange(n) + 1
+        self.n = n
+        self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
+        random.seed(1234)
+
+    def _smoketest(self, spxlu, check, dtype):
+        if np.issubdtype(dtype, np.complexfloating):
+            A = self.A + 1j*self.A.T
+        else:
+            A = self.A
+
+        A = A.astype(dtype)
+        lu = spxlu(A)
+
+        rng = random.RandomState(1234)
+
+        # Input shapes
+        for k in [None, 1, 2, self.n, self.n+2]:
+            msg = "k=%r" % (k,)
+
+            if k is None:
+                b = rng.rand(self.n)
+            else:
+                b = rng.rand(self.n, k)
+
+            if np.issubdtype(dtype, np.complexfloating):
+                b = b + 1j*rng.rand(*b.shape)
+            b = b.astype(dtype)
+
+            x = lu.solve(b)
+            check(A, b, x, msg)
+
+            x = lu.solve(b, 'T')
+            check(A.T, b, x, msg)
+
+            x = lu.solve(b, 'H')
+            check(A.T.conj(), b, x, msg)
+
+    @sup_sparse_efficiency
+    def test_splu_smoketest(self):
+        self._internal_test_splu_smoketest()
+
+    def _internal_test_splu_smoketest(self):
+        # Check that splu works at all
+        def check(A, b, x, msg=""):
+            eps = np.finfo(A.dtype).eps
+            r = A @ x
+            assert_(abs(r - b).max() < 1e3*eps, msg)
+
+        self._smoketest(splu, check, np.float32)
+        self._smoketest(splu, check, np.float64)
+        self._smoketest(splu, check, np.complex64)
+        self._smoketest(splu, check, np.complex128)
+
+    @sup_sparse_efficiency
+    def test_spilu_smoketest(self):
+        self._internal_test_spilu_smoketest()
+
+    def _internal_test_spilu_smoketest(self):
+        errors = []
+
+        def check(A, b, x, msg=""):
+            r = A @ x
+            err = abs(r - b).max()
+            assert_(err < 1e-2, msg)
+            if b.dtype in (np.float64, np.complex128):
+                errors.append(err)
+
+        self._smoketest(spilu, check, np.float32)
+        self._smoketest(spilu, check, np.float64)
+        self._smoketest(spilu, check, np.complex64)
+        self._smoketest(spilu, check, np.complex128)
+
+        assert_(max(errors) > 1e-5)
+
+    @sup_sparse_efficiency
+    def test_spilu_drop_rule(self):
+        # Test passing in the drop_rule argument to spilu.
+        A = identity(2)
+
+        rules = [
+            b'basic,area'.decode('ascii'),  # unicode
+            b'basic,area',  # ascii
+            [b'basic', b'area'.decode('ascii')]
+        ]
+        for rule in rules:
+            # Argument should be accepted
+            assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
+
+    def test_splu_nnz0(self):
+        A = csc_matrix((5,5), dtype='d')
+        assert_raises(RuntimeError, splu, A)
+
+    def test_spilu_nnz0(self):
+        A = csc_matrix((5,5), dtype='d')
+        assert_raises(RuntimeError, spilu, A)
+
+    def test_splu_basic(self):
+        # Test basic splu functionality.
+        n = 30
+        rng = random.RandomState(12)
+        a = rng.rand(n, n)
+        a[a < 0.95] = 0
+        # First test with a singular matrix
+        a[:, 0] = 0
+        a_ = csc_matrix(a)
+        # Matrix is exactly singular
+        assert_raises(RuntimeError, splu, a_)
+
+        # Make a diagonal dominant, to make sure it is not singular
+        a += 4*eye(n)
+        a_ = csc_matrix(a)
+        lu = splu(a_)
+        b = ones(n)
+        x = lu.solve(b)
+        assert_almost_equal(dot(a, x), b)
+
+    def test_splu_perm(self):
+        # Test the permutation vectors exposed by splu.
+        n = 30
+        a = random.random((n, n))
+        a[a < 0.95] = 0
+        # Make a diagonal dominant, to make sure it is not singular
+        a += 4*eye(n)
+        a_ = csc_matrix(a)
+        lu = splu(a_)
+        # Check that the permutation indices do belong to [0, n-1].
+        for perm in (lu.perm_r, lu.perm_c):
+            assert_(all(perm > -1))
+            assert_(all(perm < n))
+            assert_equal(len(unique(perm)), len(perm))
+
+        # Now make a symmetric, and test that the two permutation vectors are
+        # the same
+        # Note: a += a.T relies on undefined behavior.
+        a = a + a.T
+        a_ = csc_matrix(a)
+        lu = splu(a_)
+        assert_array_equal(lu.perm_r, lu.perm_c)
+
+    @pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
+    def test_natural_permc(self, splu_fun, rtol):
+        # Test that the "NATURAL" permc_spec does not permute the matrix
+        np.random.seed(42)
+        n = 500
+        p = 0.01
+        A = scipy.sparse.random(n, n, p)
+        x = np.random.rand(n)
+        # Make A diagonal dominant to make sure it is not singular
+        A += (n+1)*scipy.sparse.identity(n)
+        A_ = csc_matrix(A)
+        b = A_ @ x
+
+        # without permc_spec, permutation is not identity
+        lu = splu_fun(A_)
+        assert_(np.any(lu.perm_c != np.arange(n)))
+
+        # with permc_spec="NATURAL", permutation is identity
+        lu = splu_fun(A_, permc_spec="NATURAL")
+        assert_array_equal(lu.perm_c, np.arange(n))
+
+        # Also, lu decomposition is valid
+        x2 = lu.solve(b)
+        assert_allclose(x, x2, rtol=rtol)
+
+    @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
+    def test_lu_refcount(self):
+        # Test that we are keeping track of the reference count with splu.
+        n = 30
+        a = random.random((n, n))
+        a[a < 0.95] = 0
+        # Make a diagonal dominant, to make sure it is not singular
+        a += 4*eye(n)
+        a_ = csc_matrix(a)
+        lu = splu(a_)
+
+        # And now test that we don't have a refcount bug
+        rc = sys.getrefcount(lu)
+        for attr in ('perm_r', 'perm_c'):
+            perm = getattr(lu, attr)
+            assert_equal(sys.getrefcount(lu), rc + 1)
+            del perm
+            assert_equal(sys.getrefcount(lu), rc)
+
+    def test_bad_inputs(self):
+        A = self.A.tocsc()
+
+        assert_raises(ValueError, splu, A[:,:4])
+        assert_raises(ValueError, spilu, A[:,:4])
+
+        for lu in [splu(A), spilu(A)]:
+            b = random.rand(42)
+            B = random.rand(42, 3)
+            BB = random.rand(self.n, 3, 9)
+            assert_raises(ValueError, lu.solve, b)
+            assert_raises(ValueError, lu.solve, B)
+            assert_raises(ValueError, lu.solve, BB)
+            assert_raises(TypeError, lu.solve,
+                          b.astype(np.complex64))
+            assert_raises(TypeError, lu.solve,
+                          b.astype(np.complex128))
+
+    @sup_sparse_efficiency
+    def test_superlu_dlamch_i386_nan(self):
+        # SuperLU 4.3 calls some functions returning floats without
+        # declaring them. On i386@linux call convention, this fails to
+        # clear floating point registers after call. As a result, NaN
+        # can appear in the next floating point operation made.
+        #
+        # Here's a test case that triggered the issue.
+        n = 8
+        d = np.arange(n) + 1
+        A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
+        A = A.astype(np.float32)
+        spilu(A)
+        A = A + 1j*A
+        B = A.A
+        assert_(not np.isnan(B).any())
+
+    @sup_sparse_efficiency
+    def test_lu_attr(self):
+
+        def check(dtype, complex_2=False):
+            A = self.A.astype(dtype)
+
+            if complex_2:
+                A = A + 1j*A.T
+
+            n = A.shape[0]
+            lu = splu(A)
+
+            # Check that the decomposition is as advertized
+
+            Pc = np.zeros((n, n))
+            Pc[np.arange(n), lu.perm_c] = 1
+
+            Pr = np.zeros((n, n))
+            Pr[lu.perm_r, np.arange(n)] = 1
+
+            Ad = A.toarray()
+            lhs = Pr.dot(Ad).dot(Pc)
+            rhs = (lu.L @ lu.U).toarray()
+
+            eps = np.finfo(dtype).eps
+
+            assert_allclose(lhs, rhs, atol=100*eps)
+
+        check(np.float32)
+        check(np.float64)
+        check(np.complex64)
+        check(np.complex128)
+        check(np.complex64, True)
+        check(np.complex128, True)
+
+    @pytest.mark.slow
+    @sup_sparse_efficiency
+    def test_threads_parallel(self):
+        oks = []
+
+        def worker():
+            try:
+                self.test_splu_basic()
+                self._internal_test_splu_smoketest()
+                self._internal_test_spilu_smoketest()
+                oks.append(True)
+            except Exception:
+                pass
+
+        threads = [threading.Thread(target=worker)
+                   for k in range(20)]
+        for t in threads:
+            t.start()
+        for t in threads:
+            t.join()
+
+        assert_equal(len(oks), 20)
+
+
+class TestSpsolveTriangular:
+    def setup_method(self):
+        use_solver(useUmfpack=False)
+
+    def test_zero_diagonal(self):
+        n = 5
+        rng = np.random.default_rng(43876432987)
+        A = rng.standard_normal((n, n))
+        b = np.arange(n)
+        A = scipy.sparse.tril(A, k=0, format='csr')
+
+        x = spsolve_triangular(A, b, unit_diagonal=True, lower=True)
+
+        A.setdiag(1)
+        assert_allclose(A.dot(x), b)
+
+        # Regression test from gh-15199
+        A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64)
+        b = np.array([1., 2., 3.])
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning, "CSR matrix format is")
+            spsolve_triangular(A, b, unit_diagonal=True)
+
+    def test_singular(self):
+        n = 5
+        A = csr_matrix((n, n))
+        b = np.arange(n)
+        for lower in (True, False):
+            assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)
+
+    @sup_sparse_efficiency
+    def test_bad_shape(self):
+        # A is not square.
+        A = np.zeros((3, 4))
+        b = ones((4, 1))
+        assert_raises(ValueError, spsolve_triangular, A, b)
+        # A2 and b2 have incompatible shapes.
+        A2 = csr_matrix(eye(3))
+        b2 = array([1.0, 2.0])
+        assert_raises(ValueError, spsolve_triangular, A2, b2)
+
+    @sup_sparse_efficiency
+    def test_input_types(self):
+        A = array([[1., 0.], [1., 2.]])
+        b = array([[2., 0.], [2., 2.]])
+        for matrix_type in (array, csc_matrix, csr_matrix):
+            x = spsolve_triangular(matrix_type(A), b, lower=True)
+            assert_array_almost_equal(A.dot(x), b)
+
+    @pytest.mark.slow
+    @pytest.mark.timeout(120)  # prerelease_deps_coverage_64bit_blas job
+    @sup_sparse_efficiency
+    def test_random(self):
+        def random_triangle_matrix(n, lower=True):
+            A = scipy.sparse.random(n, n, density=0.1, format='coo')
+            if lower:
+                A = scipy.sparse.tril(A)
+            else:
+                A = scipy.sparse.triu(A)
+            A = A.tocsr(copy=False)
+            for i in range(n):
+                A[i, i] = np.random.rand() + 1
+            return A
+
+        np.random.seed(1234)
+        for lower in (True, False):
+            for n in (10, 10**2, 10**3):
+                A = random_triangle_matrix(n, lower=lower)
+                for m in (1, 10):
+                    for b in (np.random.rand(n, m),
+                              np.random.randint(-9, 9, (n, m)),
+                              np.random.randint(-9, 9, (n, m)) +
+                              np.random.randint(-9, 9, (n, m)) * 1j):
+                        x = spsolve_triangular(A, b, lower=lower)
+                        assert_array_almost_equal(A.dot(x), b)
+                        x = spsolve_triangular(A, b, lower=lower,
+                                               unit_diagonal=True)
+                        A.setdiag(1)
+                        assert_array_almost_equal(A.dot(x), b)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/__init__.py
new file mode 100644
index 00000000..25278d34
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/__init__.py
@@ -0,0 +1,22 @@
+"""
+Sparse Eigenvalue Solvers
+-------------------------
+
+The submodules of sparse.linalg._eigen:
+    1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
+
+"""
+from .arpack import *
+from .lobpcg import *
+from ._svds import svds
+
+from . import arpack
+
+__all__ = [
+    'ArpackError', 'ArpackNoConvergence',
+    'eigs', 'eigsh', 'lobpcg', 'svds'
+]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds.py
new file mode 100644
index 00000000..b33955dd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds.py
@@ -0,0 +1,563 @@
+import os
+import numpy as np
+
+from .arpack import _arpack  # type: ignore[attr-defined]
+from . import eigsh
+
+from scipy._lib._util import check_random_state
+from scipy.sparse.linalg._interface import LinearOperator, aslinearoperator
+from scipy.sparse.linalg._eigen.lobpcg import lobpcg  # type: ignore[no-redef]
+if os.environ.get("SCIPY_USE_PROPACK"):
+    from scipy.sparse.linalg._svdp import _svdp
+    HAS_PROPACK = True
+else:
+    HAS_PROPACK = False
+from scipy.linalg import svd
+
+arpack_int = _arpack.timing.nbx.dtype
+__all__ = ['svds']
+
+
+def _herm(x):
+    return x.T.conj()
+
+
+def _iv(A, k, ncv, tol, which, v0, maxiter,
+        return_singular, solver, random_state):
+
+    # input validation/standardization for `solver`
+    # out of order because it's needed for other parameters
+    solver = str(solver).lower()
+    solvers = {"arpack", "lobpcg", "propack"}
+    if solver not in solvers:
+        raise ValueError(f"solver must be one of {solvers}.")
+
+    # input validation/standardization for `A`
+    A = aslinearoperator(A)  # this takes care of some input validation
+    if not (np.issubdtype(A.dtype, np.complexfloating)
+            or np.issubdtype(A.dtype, np.floating)):
+        message = "`A` must be of floating or complex floating data type."
+        raise ValueError(message)
+    if np.prod(A.shape) == 0:
+        message = "`A` must not be empty."
+        raise ValueError(message)
+
+    # input validation/standardization for `k`
+    kmax = min(A.shape) if solver == 'propack' else min(A.shape) - 1
+    if int(k) != k or not (0 < k <= kmax):
+        message = "`k` must be an integer satisfying `0 < k < min(A.shape)`."
+        raise ValueError(message)
+    k = int(k)
+
+    # input validation/standardization for `ncv`
+    if solver == "arpack" and ncv is not None:
+        if int(ncv) != ncv or not (k < ncv < min(A.shape)):
+            message = ("`ncv` must be an integer satisfying "
+                       "`k < ncv < min(A.shape)`.")
+            raise ValueError(message)
+        ncv = int(ncv)
+
+    # input validation/standardization for `tol`
+    if tol < 0 or not np.isfinite(tol):
+        message = "`tol` must be a non-negative floating point value."
+        raise ValueError(message)
+    tol = float(tol)
+
+    # input validation/standardization for `which`
+    which = str(which).upper()
+    whichs = {'LM', 'SM'}
+    if which not in whichs:
+        raise ValueError(f"`which` must be in {whichs}.")
+
+    # input validation/standardization for `v0`
+    if v0 is not None:
+        v0 = np.atleast_1d(v0)
+        if not (np.issubdtype(v0.dtype, np.complexfloating)
+                or np.issubdtype(v0.dtype, np.floating)):
+            message = ("`v0` must be of floating or complex floating "
+                       "data type.")
+            raise ValueError(message)
+
+        shape = (A.shape[0],) if solver == 'propack' else (min(A.shape),)
+        if v0.shape != shape:
+            message = f"`v0` must have shape {shape}."
+            raise ValueError(message)
+
+    # input validation/standardization for `maxiter`
+    if maxiter is not None and (int(maxiter) != maxiter or maxiter <= 0):
+        message = "`maxiter` must be a positive integer."
+        raise ValueError(message)
+    maxiter = int(maxiter) if maxiter is not None else maxiter
+
+    # input validation/standardization for `return_singular_vectors`
+    # not going to be flexible with this; too complicated for little gain
+    rs_options = {True, False, "vh", "u"}
+    if return_singular not in rs_options:
+        raise ValueError(f"`return_singular_vectors` must be in {rs_options}.")
+
+    random_state = check_random_state(random_state)
+
+    return (A, k, ncv, tol, which, v0, maxiter,
+            return_singular, solver, random_state)
+
+
+def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
+         maxiter=None, return_singular_vectors=True,
+         solver='arpack', random_state=None, options=None):
+    """
+    Partial singular value decomposition of a sparse matrix.
+
+    Compute the largest or smallest `k` singular values and corresponding
+    singular vectors of a sparse matrix `A`. The order in which the singular
+    values are returned is not guaranteed.
+
+    In the descriptions below, let ``M, N = A.shape``.
+
+    Parameters
+    ----------
+    A : ndarray, sparse matrix, or LinearOperator
+        Matrix to decompose of a floating point numeric dtype.
+    k : int, default: 6
+        Number of singular values and singular vectors to compute.
+        Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for
+        ``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise.
+    ncv : int, optional
+        When ``solver='arpack'``, this is the number of Lanczos vectors
+        generated. See :ref:`'arpack' ` for details.
+        When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is
+        ignored.
+    tol : float, optional
+        Tolerance for singular values. Zero (default) means machine precision.
+    which : {'LM', 'SM'}
+        Which `k` singular values to find: either the largest magnitude ('LM')
+        or smallest magnitude ('SM') singular values.
+    v0 : ndarray, optional
+        The starting vector for iteration; see method-specific
+        documentation (:ref:`'arpack' `,
+        :ref:`'lobpcg' `), or
+        :ref:`'propack' ` for details.
+    maxiter : int, optional
+        Maximum number of iterations; see method-specific
+        documentation (:ref:`'arpack' `,
+        :ref:`'lobpcg' `), or
+        :ref:`'propack' ` for details.
+    return_singular_vectors : {True, False, "u", "vh"}
+        Singular values are always computed and returned; this parameter
+        controls the computation and return of singular vectors.
+
+        - ``True``: return singular vectors.
+        - ``False``: do not return singular vectors.
+        - ``"u"``: if ``M <= N``, compute only the left singular vectors and
+          return ``None`` for the right singular vectors. Otherwise, compute
+          all singular vectors.
+        - ``"vh"``: if ``M > N``, compute only the right singular vectors and
+          return ``None`` for the left singular vectors. Otherwise, compute
+          all singular vectors.
+
+        If ``solver='propack'``, the option is respected regardless of the
+        matrix shape.
+
+    solver :  {'arpack', 'propack', 'lobpcg'}, optional
+            The solver used.
+            :ref:`'arpack' `,
+            :ref:`'lobpcg' `, and
+            :ref:`'propack' ` are supported.
+            Default: `'arpack'`.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate resamples.
+
+        If `random_state` is ``None`` (or `np.random`), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance then that instance is used.
+    options : dict, optional
+        A dictionary of solver-specific options. No solver-specific options
+        are currently supported; this parameter is reserved for future use.
+
+    Returns
+    -------
+    u : ndarray, shape=(M, k)
+        Unitary matrix having left singular vectors as columns.
+    s : ndarray, shape=(k,)
+        The singular values.
+    vh : ndarray, shape=(k, N)
+        Unitary matrix having right singular vectors as rows.
+
+    Notes
+    -----
+    This is a naive implementation using ARPACK or LOBPCG as an eigensolver
+    on the matrix ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on
+    which one is smaller size, followed by the Rayleigh-Ritz method
+    as postprocessing; see
+    Using the normal matrix, in Rayleigh-Ritz method, (2022, Nov. 19),
+    Wikipedia, https://w.wiki/4zms.
+
+    Alternatively, the PROPACK solver can be called. ``form="array"``
+
+    Choices of the input matrix ``A`` numeric dtype may be limited.
+    Only ``solver="lobpcg"`` supports all floating point dtypes
+    real: 'np.single', 'np.double', 'np.longdouble' and
+    complex: 'np.csingle', 'np.cdouble', 'np.clongdouble'.
+    The ``solver="arpack"`` supports only
+    'np.single', 'np.double', and 'np.cdouble'.
+
+    Examples
+    --------
+    Construct a matrix ``A`` from singular values and vectors.
+
+    >>> import numpy as np
+    >>> from scipy.stats import ortho_group
+    >>> from scipy.sparse.linalg import svds
+    >>> from scipy.sparse import csr_matrix
+    >>> rng = np.random.default_rng()
+
+    Construct a dense matrix ``A`` from singular values and vectors.
+
+    >>> orthogonal = ortho_group.rvs(10, random_state=rng)
+    >>> s = [1e-3, 1, 2, 3, 4]  # non-zero singular values
+    >>> u = orthogonal[:, :5]         # left singular vectors
+    >>> vT = orthogonal[:, 5:].T      # right singular vectors
+    >>> A = u @ np.diag(s) @ vT
+
+    With only four singular values/vectors, the SVD approximates the original
+    matrix.
+
+    >>> u4, s4, vT4 = svds(A, k=4)
+    >>> A4 = u4 @ np.diag(s4) @ vT4
+    >>> np.allclose(A4, A, atol=1e-3)
+    True
+
+    With all five non-zero singular values/vectors, we can reproduce
+    the original matrix more accurately.
+
+    >>> u5, s5, vT5 = svds(A, k=5)
+    >>> A5 = u5 @ np.diag(s5) @ vT5
+    >>> np.allclose(A5, A)
+    True
+
+    The singular values match the expected singular values.
+
+    >>> np.allclose(s5, s)
+    True
+
+    Since the singular values are not close to each other in this example,
+    every singular vector matches as expected up to a difference in sign.
+
+    >>> (np.allclose(np.abs(u5), np.abs(u)) and
+    ...  np.allclose(np.abs(vT5), np.abs(vT)))
+    True
+
+    The singular vectors are also orthogonal.
+
+    >>> (np.allclose(u5.T @ u5, np.eye(5)) and
+    ...  np.allclose(vT5 @ vT5.T, np.eye(5)))
+    True
+
+    If there are (nearly) multiple singular values, the corresponding
+    individual singular vectors may be unstable, but the whole invariant
+    subspace containing all such singular vectors is computed accurately
+    as can be measured by angles between subspaces via 'subspace_angles'.
+
+    >>> from scipy.linalg import subspace_angles as s_a
+    >>> rng = np.random.default_rng()
+    >>> s = [1, 1 + 1e-6]  # non-zero singular values
+    >>> u, _ = np.linalg.qr(rng.standard_normal((99, 2)))
+    >>> v, _ = np.linalg.qr(rng.standard_normal((99, 2)))
+    >>> vT = v.T
+    >>> A = u @ np.diag(s) @ vT
+    >>> A = A.astype(np.float32)
+    >>> u2, s2, vT2 = svds(A, k=2)
+    >>> np.allclose(s2, s)
+    True
+
+    The angles between the individual exact and computed singular vectors
+    are not so small.
+
+    >>> s_a(u2[:, :1], u[:, :1]) + s_a(u2[:, 1:], u[:, 1:]) > 1e-3
+    True
+
+    >>> (s_a(vT2[:1, :].T, vT[:1, :].T) +
+    ...  s_a(vT2[1:, :].T, vT[1:, :].T)) > 1e-3
+    True
+
+    As opposed to the angles between the 2-dimensional invariant subspaces
+    that these vectors span, which are small for rights singular vectors
+
+    >>> s_a(u2, u).sum() < 1e-6
+    True
+
+    as well as for left singular vectors.
+
+    >>> s_a(vT2.T, vT.T).sum() < 1e-6
+    True
+
+    The next example follows that of 'sklearn.decomposition.TruncatedSVD'.
+
+    >>> rng = np.random.RandomState(0)
+    >>> X_dense = rng.random(size=(100, 100))
+    >>> X_dense[:, 2 * np.arange(50)] = 0
+    >>> X = csr_matrix(X_dense)
+    >>> _, singular_values, _ = svds(X, k=5)
+    >>> print(singular_values)
+    [ 4.3293...  4.4491...  4.5420...  4.5987... 35.2410...]
+
+    The function can be called without the transpose of the input matrix
+    ever explicitly constructed.
+
+    >>> from scipy.linalg import svd
+    >>> from scipy.sparse import rand
+    >>> from scipy.sparse.linalg import aslinearoperator
+    >>> rng = np.random.RandomState(0)
+    >>> G = rand(8, 9, density=0.5, random_state=rng)
+    >>> Glo = aslinearoperator(G)
+    >>> _, singular_values_svds, _ = svds(Glo, k=5)
+    >>> _, singular_values_svd, _ = svd(G.toarray())
+    >>> np.allclose(singular_values_svds, singular_values_svd[-4::-1])
+    True
+
+    The most memory efficient scenario is where neither
+    the original matrix, nor its transpose, is explicitly constructed.
+    Our example computes the smallest singular values and vectors
+    of 'LinearOperator' constructed from the numpy function 'np.diff' used
+    column-wise to be consistent with 'LinearOperator' operating on columns.
+
+    >>> from scipy.sparse.linalg import LinearOperator, aslinearoperator
+    >>> diff0 = lambda a: np.diff(a, axis=0)
+
+    Let us create the matrix from 'diff0' to be used for validation only.
+
+    >>> n = 5  # The dimension of the space.
+    >>> M_from_diff0 = diff0(np.eye(n))
+    >>> print(M_from_diff0.astype(int))
+    [[-1  1  0  0  0]
+     [ 0 -1  1  0  0]
+     [ 0  0 -1  1  0]
+     [ 0  0  0 -1  1]]
+
+    The matrix 'M_from_diff0' is bi-diagonal and could be alternatively
+    created directly by
+
+    >>> M = - np.eye(n - 1, n, dtype=int)
+    >>> np.fill_diagonal(M[:,1:], 1)
+    >>> np.allclose(M, M_from_diff0)
+    True
+
+    Its transpose
+
+    >>> print(M.T)
+    [[-1  0  0  0]
+     [ 1 -1  0  0]
+     [ 0  1 -1  0]
+     [ 0  0  1 -1]
+     [ 0  0  0  1]]
+
+    can be viewed as the incidence matrix; see
+    Incidence matrix, (2022, Nov. 19), Wikipedia, https://w.wiki/5YXU,
+    of a linear graph with 5 vertices and 4 edges. The 5x5 normal matrix
+    'M.T @ M' thus is
+
+    >>> print(M.T @ M)
+    [[ 1 -1  0  0  0]
+     [-1  2 -1  0  0]
+     [ 0 -1  2 -1  0]
+     [ 0  0 -1  2 -1]
+     [ 0  0  0 -1  1]]
+
+    the graph Laplacian, while the actually used in 'svds' smaller size
+    4x4 normal matrix 'M @ M.T'
+
+    >>> print(M @ M.T)
+    [[ 2 -1  0  0]
+     [-1  2 -1  0]
+     [ 0 -1  2 -1]
+     [ 0  0 -1  2]]
+
+    is the so-called edge-based Laplacian; see
+    Symmetric Laplacian via the incidence matrix, in Laplacian matrix,
+    (2022, Nov. 19), Wikipedia, https://w.wiki/5YXW.
+
+    The 'LinearOperator' setup needs the options 'rmatvec' and 'rmatmat'
+    of multiplication by the matrix transpose 'M.T', but we want to be
+    matrix-free to save memory, so knowing how 'M.T' looks like, we
+    manually construct the following function to be used in 'rmatmat=diff0t'.
+
+    >>> def diff0t(a):
+    ...     if a.ndim == 1:
+    ...         a = a[:,np.newaxis]  # Turn 1D into 2D array
+    ...     d = np.zeros((a.shape[0] + 1, a.shape[1]), dtype=a.dtype)
+    ...     d[0, :] = - a[0, :]
+    ...     d[1:-1, :] = a[0:-1, :] - a[1:, :]
+    ...     d[-1, :] = a[-1, :]
+    ...     return d
+
+    We check that our function 'diff0t' for the matrix transpose is valid.
+
+    >>> np.allclose(M.T, diff0t(np.eye(n-1)))
+    True
+
+    Now we setup our matrix-free 'LinearOperator' called 'diff0_func_aslo'
+    and for validation the matrix-based 'diff0_matrix_aslo'.
+
+    >>> def diff0_func_aslo_def(n):
+    ...     return LinearOperator(matvec=diff0,
+    ...                           matmat=diff0,
+    ...                           rmatvec=diff0t,
+    ...                           rmatmat=diff0t,
+    ...                           shape=(n - 1, n))
+    >>> diff0_func_aslo = diff0_func_aslo_def(n)
+    >>> diff0_matrix_aslo = aslinearoperator(M_from_diff0)
+
+    And validate both the matrix and its transpose in 'LinearOperator'.
+
+    >>> np.allclose(diff0_func_aslo(np.eye(n)),
+    ...             diff0_matrix_aslo(np.eye(n)))
+    True
+    >>> np.allclose(diff0_func_aslo.T(np.eye(n-1)),
+    ...             diff0_matrix_aslo.T(np.eye(n-1)))
+    True
+
+    Having the 'LinearOperator' setup validated, we run the solver.
+
+    >>> n = 100
+    >>> diff0_func_aslo = diff0_func_aslo_def(n)
+    >>> u, s, vT = svds(diff0_func_aslo, k=3, which='SM')
+
+    The singular values squared and the singular vectors are known
+    explicitly; see
+    Pure Dirichlet boundary conditions, in
+    Eigenvalues and eigenvectors of the second derivative,
+    (2022, Nov. 19), Wikipedia, https://w.wiki/5YX6,
+    since 'diff' corresponds to first
+    derivative, and its smaller size n-1 x n-1 normal matrix
+    'M @ M.T' represent the discrete second derivative with the Dirichlet
+    boundary conditions. We use these analytic expressions for validation.
+
+    >>> se = 2. * np.sin(np.pi * np.arange(1, 4) / (2. * n))
+    >>> ue = np.sqrt(2 / n) * np.sin(np.pi * np.outer(np.arange(1, n),
+    ...                              np.arange(1, 4)) / n)
+    >>> np.allclose(s, se, atol=1e-3)
+    True
+    >>> print(np.allclose(np.abs(u), np.abs(ue), atol=1e-6))
+    True
+    """
+    args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors,
+               solver, random_state)
+    (A, k, ncv, tol, which, v0, maxiter,
+     return_singular_vectors, solver, random_state) = args
+
+    largest = (which == 'LM')
+    n, m = A.shape
+
+    if n >= m:
+        X_dot = A.matvec
+        X_matmat = A.matmat
+        XH_dot = A.rmatvec
+        XH_mat = A.rmatmat
+        transpose = False
+    else:
+        X_dot = A.rmatvec
+        X_matmat = A.rmatmat
+        XH_dot = A.matvec
+        XH_mat = A.matmat
+        transpose = True
+
+        dtype = getattr(A, 'dtype', None)
+        if dtype is None:
+            dtype = A.dot(np.zeros([m, 1])).dtype
+
+    def matvec_XH_X(x):
+        return XH_dot(X_dot(x))
+
+    def matmat_XH_X(x):
+        return XH_mat(X_matmat(x))
+
+    XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
+                          matmat=matmat_XH_X,
+                          shape=(min(A.shape), min(A.shape)))
+
+    # Get a low rank approximation of the implicitly defined gramian matrix.
+    # This is not a stable way to approach the problem.
+    if solver == 'lobpcg':
+
+        if k == 1 and v0 is not None:
+            X = np.reshape(v0, (-1, 1))
+        else:
+            X = random_state.standard_normal(size=(min(A.shape), k))
+
+        _, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
+                           largest=largest)
+        # lobpcg does not guarantee exactly orthonormal eigenvectors
+        # until after gh-16320 is merged
+        eigvec, _ = np.linalg.qr(eigvec)
+
+    elif solver == 'propack':
+        if not HAS_PROPACK:
+            raise ValueError("`solver='propack'` is opt-in due "
+                             "to potential issues on Windows, "
+                             "it can be enabled by setting the "
+                             "`SCIPY_USE_PROPACK` environment "
+                             "variable before importing scipy")
+        jobu = return_singular_vectors in {True, 'u'}
+        jobv = return_singular_vectors in {True, 'vh'}
+        irl_mode = (which == 'SM')
+        res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None,
+                    compute_u=jobu, compute_v=jobv, irl_mode=irl_mode,
+                    kmax=maxiter, v0=v0, random_state=random_state)
+
+        u, s, vh, _ = res  # but we'll ignore bnd, the last output
+
+        # PROPACK order appears to be largest first. `svds` output order is not
+        # guaranteed, according to documentation, but for ARPACK and LOBPCG
+        # they actually are ordered smallest to largest, so reverse for
+        # consistency.
+        s = s[::-1]
+        u = u[:, ::-1]
+        vh = vh[::-1]
+
+        u = u if jobu else None
+        vh = vh if jobv else None
+
+        if return_singular_vectors:
+            return u, s, vh
+        else:
+            return s
+
+    elif solver == 'arpack' or solver is None:
+        if v0 is None:
+            v0 = random_state.standard_normal(size=(min(A.shape),))
+        _, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
+                          ncv=ncv, which=which, v0=v0)
+        # arpack do not guarantee exactly orthonormal eigenvectors
+        # for clustered eigenvalues, especially in complex arithmetic
+        eigvec, _ = np.linalg.qr(eigvec)
+
+    # the eigenvectors eigvec must be orthonomal here; see gh-16712
+    Av = X_matmat(eigvec)
+    if not return_singular_vectors:
+        s = svd(Av, compute_uv=False, overwrite_a=True)
+        return s[::-1]
+
+    # compute the left singular vectors of X and update the right ones
+    # accordingly
+    u, s, vh = svd(Av, full_matrices=False, overwrite_a=True)
+    u = u[:, ::-1]
+    s = s[::-1]
+    vh = vh[::-1]
+
+    jobu = return_singular_vectors in {True, 'u'}
+    jobv = return_singular_vectors in {True, 'vh'}
+
+    if transpose:
+        u_tmp = eigvec @ _herm(vh) if jobu else None
+        vh = _herm(u) if jobv else None
+        u = u_tmp
+    else:
+        if not jobu:
+            u = None
+        vh = vh @ _herm(eigvec) if jobv else None
+
+    return u, s, vh
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds_doc.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds_doc.py
new file mode 100644
index 00000000..3c102502
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/_svds_doc.py
@@ -0,0 +1,398 @@
+
+def _svds_arpack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
+                     maxiter=None, return_singular_vectors=True,
+                     solver='arpack', random_state=None):
+    """
+    Partial singular value decomposition of a sparse matrix using ARPACK.
+
+    Compute the largest or smallest `k` singular values and corresponding
+    singular vectors of a sparse matrix `A`. The order in which the singular
+    values are returned is not guaranteed.
+
+    In the descriptions below, let ``M, N = A.shape``.
+
+    Parameters
+    ----------
+    A : sparse matrix or LinearOperator
+        Matrix to decompose.
+    k : int, optional
+        Number of singular values and singular vectors to compute.
+        Must satisfy ``1 <= k <= min(M, N) - 1``.
+        Default is 6.
+    ncv : int, optional
+        The number of Lanczos vectors generated.
+        The default is ``min(n, max(2*k + 1, 20))``.
+        If specified, must satistify ``k + 1 < ncv < min(M, N)``; ``ncv > 2*k``
+        is recommended.
+    tol : float, optional
+        Tolerance for singular values. Zero (default) means machine precision.
+    which : {'LM', 'SM'}
+        Which `k` singular values to find: either the largest magnitude ('LM')
+        or smallest magnitude ('SM') singular values.
+    v0 : ndarray, optional
+        The starting vector for iteration:
+        an (approximate) left singular vector if ``N > M`` and a right singular
+        vector otherwise. Must be of length ``min(M, N)``.
+        Default: random
+    maxiter : int, optional
+        Maximum number of Arnoldi update iterations allowed;
+        default is ``min(M, N) * 10``.
+    return_singular_vectors : {True, False, "u", "vh"}
+        Singular values are always computed and returned; this parameter
+        controls the computation and return of singular vectors.
+
+        - ``True``: return singular vectors.
+        - ``False``: do not return singular vectors.
+        - ``"u"``: if ``M <= N``, compute only the left singular vectors and
+          return ``None`` for the right singular vectors. Otherwise, compute
+          all singular vectors.
+        - ``"vh"``: if ``M > N``, compute only the right singular vectors and
+          return ``None`` for the left singular vectors. Otherwise, compute
+          all singular vectors.
+
+    solver :  {'arpack', 'propack', 'lobpcg'}, optional
+            This is the solver-specific documentation for ``solver='arpack'``.
+            :ref:`'lobpcg' ` and
+            :ref:`'propack' `
+            are also supported.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate resamples.
+
+        If `random_state` is ``None`` (or `np.random`), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance then that instance is used.
+    options : dict, optional
+        A dictionary of solver-specific options. No solver-specific options
+        are currently supported; this parameter is reserved for future use.
+
+    Returns
+    -------
+    u : ndarray, shape=(M, k)
+        Unitary matrix having left singular vectors as columns.
+    s : ndarray, shape=(k,)
+        The singular values.
+    vh : ndarray, shape=(k, N)
+        Unitary matrix having right singular vectors as rows.
+
+    Notes
+    -----
+    This is a naive implementation using ARPACK as an eigensolver
+    on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
+    efficient.
+
+    Examples
+    --------
+    Construct a matrix ``A`` from singular values and vectors.
+
+    >>> from scipy.stats import ortho_group
+    >>> from scipy.sparse import csc_matrix, diags
+    >>> from scipy.sparse.linalg import svds
+    >>> rng = np.random.default_rng()
+    >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
+    >>> s = [0.0001, 0.001, 3, 4, 5]  # singular values
+    >>> u = orthogonal[:, :5]         # left singular vectors
+    >>> vT = orthogonal[:, 5:].T      # right singular vectors
+    >>> A = u @ diags(s) @ vT
+
+    With only three singular values/vectors, the SVD approximates the original
+    matrix.
+
+    >>> u2, s2, vT2 = svds(A, k=3, solver='arpack')
+    >>> A2 = u2 @ np.diag(s2) @ vT2
+    >>> np.allclose(A2, A.toarray(), atol=1e-3)
+    True
+
+    With all five singular values/vectors, we can reproduce the original
+    matrix.
+
+    >>> u3, s3, vT3 = svds(A, k=5, solver='arpack')
+    >>> A3 = u3 @ np.diag(s3) @ vT3
+    >>> np.allclose(A3, A.toarray())
+    True
+
+    The singular values match the expected singular values, and the singular
+    vectors are as expected up to a difference in sign.
+
+    >>> (np.allclose(s3, s) and
+    ...  np.allclose(np.abs(u3), np.abs(u.toarray())) and
+    ...  np.allclose(np.abs(vT3), np.abs(vT.toarray())))
+    True
+
+    The singular vectors are also orthogonal.
+
+    >>> (np.allclose(u3.T @ u3, np.eye(5)) and
+    ...  np.allclose(vT3 @ vT3.T, np.eye(5)))
+    True
+    """
+    pass
+
+
+def _svds_lobpcg_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
+                     maxiter=None, return_singular_vectors=True,
+                     solver='lobpcg', random_state=None):
+    """
+    Partial singular value decomposition of a sparse matrix using LOBPCG.
+
+    Compute the largest or smallest `k` singular values and corresponding
+    singular vectors of a sparse matrix `A`. The order in which the singular
+    values are returned is not guaranteed.
+
+    In the descriptions below, let ``M, N = A.shape``.
+
+    Parameters
+    ----------
+    A : sparse matrix or LinearOperator
+        Matrix to decompose.
+    k : int, default: 6
+        Number of singular values and singular vectors to compute.
+        Must satisfy ``1 <= k <= min(M, N) - 1``.
+    ncv : int, optional
+        Ignored.
+    tol : float, optional
+        Tolerance for singular values. Zero (default) means machine precision.
+    which : {'LM', 'SM'}
+        Which `k` singular values to find: either the largest magnitude ('LM')
+        or smallest magnitude ('SM') singular values.
+    v0 : ndarray, optional
+        If `k` is 1, the starting vector for iteration:
+        an (approximate) left singular vector if ``N > M`` and a right singular
+        vector otherwise. Must be of length ``min(M, N)``.
+        Ignored otherwise.
+        Default: random
+    maxiter : int, default: 20
+        Maximum number of iterations.
+    return_singular_vectors : {True, False, "u", "vh"}
+        Singular values are always computed and returned; this parameter
+        controls the computation and return of singular vectors.
+
+        - ``True``: return singular vectors.
+        - ``False``: do not return singular vectors.
+        - ``"u"``: if ``M <= N``, compute only the left singular vectors and
+          return ``None`` for the right singular vectors. Otherwise, compute
+          all singular vectors.
+        - ``"vh"``: if ``M > N``, compute only the right singular vectors and
+          return ``None`` for the left singular vectors. Otherwise, compute
+          all singular vectors.
+
+    solver :  {'arpack', 'propack', 'lobpcg'}, optional
+            This is the solver-specific documentation for ``solver='lobpcg'``.
+            :ref:`'arpack' ` and
+            :ref:`'propack' `
+            are also supported.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate resamples.
+
+        If `random_state` is ``None`` (or `np.random`), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance then that instance is used.
+    options : dict, optional
+        A dictionary of solver-specific options. No solver-specific options
+        are currently supported; this parameter is reserved for future use.
+
+    Returns
+    -------
+    u : ndarray, shape=(M, k)
+        Unitary matrix having left singular vectors as columns.
+    s : ndarray, shape=(k,)
+        The singular values.
+    vh : ndarray, shape=(k, N)
+        Unitary matrix having right singular vectors as rows.
+
+    Notes
+    -----
+    This is a naive implementation using LOBPCG as an eigensolver
+    on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
+    efficient.
+
+    Examples
+    --------
+    Construct a matrix ``A`` from singular values and vectors.
+
+    >>> from scipy.stats import ortho_group
+    >>> from scipy.sparse import csc_matrix, diags
+    >>> from scipy.sparse.linalg import svds
+    >>> rng = np.random.default_rng()
+    >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
+    >>> s = [0.0001, 0.001, 3, 4, 5]  # singular values
+    >>> u = orthogonal[:, :5]         # left singular vectors
+    >>> vT = orthogonal[:, 5:].T      # right singular vectors
+    >>> A = u @ diags(s) @ vT
+
+    With only three singular values/vectors, the SVD approximates the original
+    matrix.
+
+    >>> u2, s2, vT2 = svds(A, k=3, solver='lobpcg')
+    >>> A2 = u2 @ np.diag(s2) @ vT2
+    >>> np.allclose(A2, A.toarray(), atol=1e-3)
+    True
+
+    With all five singular values/vectors, we can reproduce the original
+    matrix.
+
+    >>> u3, s3, vT3 = svds(A, k=5, solver='lobpcg')
+    >>> A3 = u3 @ np.diag(s3) @ vT3
+    >>> np.allclose(A3, A.toarray())
+    True
+
+    The singular values match the expected singular values, and the singular
+    vectors are as expected up to a difference in sign.
+
+    >>> (np.allclose(s3, s) and
+    ...  np.allclose(np.abs(u3), np.abs(u.todense())) and
+    ...  np.allclose(np.abs(vT3), np.abs(vT.todense())))
+    True
+
+    The singular vectors are also orthogonal.
+
+    >>> (np.allclose(u3.T @ u3, np.eye(5)) and
+    ...  np.allclose(vT3 @ vT3.T, np.eye(5)))
+    True
+
+    """
+    pass
+
+
+def _svds_propack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
+                      maxiter=None, return_singular_vectors=True,
+                      solver='propack', random_state=None):
+    """
+    Partial singular value decomposition of a sparse matrix using PROPACK.
+
+    Compute the largest or smallest `k` singular values and corresponding
+    singular vectors of a sparse matrix `A`. The order in which the singular
+    values are returned is not guaranteed.
+
+    In the descriptions below, let ``M, N = A.shape``.
+
+    Parameters
+    ----------
+    A : sparse matrix or LinearOperator
+        Matrix to decompose. If `A` is a ``LinearOperator``
+        object, it must define both ``matvec`` and ``rmatvec`` methods.
+    k : int, default: 6
+        Number of singular values and singular vectors to compute.
+        Must satisfy ``1 <= k <= min(M, N)``.
+    ncv : int, optional
+        Ignored.
+    tol : float, optional
+        The desired relative accuracy for computed singular values.
+        Zero (default) means machine precision.
+    which : {'LM', 'SM'}
+        Which `k` singular values to find: either the largest magnitude ('LM')
+        or smallest magnitude ('SM') singular values. Note that choosing
+        ``which='SM'`` will force the ``irl`` option to be set ``True``.
+    v0 : ndarray, optional
+        Starting vector for iterations: must be of length ``A.shape[0]``.
+        If not specified, PROPACK will generate a starting vector.
+    maxiter : int, optional
+        Maximum number of iterations / maximal dimension of the Krylov
+        subspace. Default is ``10 * k``.
+    return_singular_vectors : {True, False, "u", "vh"}
+        Singular values are always computed and returned; this parameter
+        controls the computation and return of singular vectors.
+
+        - ``True``: return singular vectors.
+        - ``False``: do not return singular vectors.
+        - ``"u"``: compute only the left singular vectors; return ``None`` for
+          the right singular vectors.
+        - ``"vh"``: compute only the right singular vectors; return ``None``
+          for the left singular vectors.
+
+    solver :  {'arpack', 'propack', 'lobpcg'}, optional
+            This is the solver-specific documentation for ``solver='propack'``.
+            :ref:`'arpack' ` and
+            :ref:`'lobpcg' `
+            are also supported.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate resamples.
+
+        If `random_state` is ``None`` (or `np.random`), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance then that instance is used.
+    options : dict, optional
+        A dictionary of solver-specific options. No solver-specific options
+        are currently supported; this parameter is reserved for future use.
+
+    Returns
+    -------
+    u : ndarray, shape=(M, k)
+        Unitary matrix having left singular vectors as columns.
+    s : ndarray, shape=(k,)
+        The singular values.
+    vh : ndarray, shape=(k, N)
+        Unitary matrix having right singular vectors as rows.
+
+    Notes
+    -----
+    This is an interface to the Fortran library PROPACK [1]_.
+    The current default is to run with IRL mode disabled unless seeking the
+    smallest singular values/vectors (``which='SM'``).
+
+    References
+    ----------
+
+    .. [1] Larsen, Rasmus Munk. "PROPACK-Software for large and sparse SVD
+       calculations." Available online. URL
+       http://sun.stanford.edu/~rmunk/PROPACK (2004): 2008-2009.
+
+    Examples
+    --------
+    Construct a matrix ``A`` from singular values and vectors.
+
+    >>> from scipy.stats import ortho_group
+    >>> from scipy.sparse import csc_matrix, diags
+    >>> from scipy.sparse.linalg import svds
+    >>> rng = np.random.default_rng()
+    >>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
+    >>> s = [0.0001, 0.001, 3, 4, 5]  # singular values
+    >>> u = orthogonal[:, :5]         # left singular vectors
+    >>> vT = orthogonal[:, 5:].T      # right singular vectors
+    >>> A = u @ diags(s) @ vT
+
+    With only three singular values/vectors, the SVD approximates the original
+    matrix.
+
+    >>> u2, s2, vT2 = svds(A, k=3, solver='propack')
+    >>> A2 = u2 @ np.diag(s2) @ vT2
+    >>> np.allclose(A2, A.todense(), atol=1e-3)
+    True
+
+    With all five singular values/vectors, we can reproduce the original
+    matrix.
+
+    >>> u3, s3, vT3 = svds(A, k=5, solver='propack')
+    >>> A3 = u3 @ np.diag(s3) @ vT3
+    >>> np.allclose(A3, A.todense())
+    True
+
+    The singular values match the expected singular values, and the singular
+    vectors are as expected up to a difference in sign.
+
+    >>> (np.allclose(s3, s) and
+    ...  np.allclose(np.abs(u3), np.abs(u.toarray())) and
+    ...  np.allclose(np.abs(vT3), np.abs(vT.toarray())))
+    True
+
+    The singular vectors are also orthogonal.
+
+    >>> (np.allclose(u3.T @ u3, np.eye(5)) and
+    ...  np.allclose(vT3 @ vT3.T, np.eye(5)))
+    True
+
+    """
+    pass
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/COPYING b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/COPYING
new file mode 100644
index 00000000..e87667e1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/COPYING
@@ -0,0 +1,45 @@
+
+BSD Software License
+
+Pertains to ARPACK and P_ARPACK
+
+Copyright (c) 1996-2008 Rice University.
+Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff.
+All rights reserved.
+
+Arpack has been renamed to arpack-ng.
+
+Copyright (c) 2001-2011 - Scilab Enterprises
+Updated by Allan Cornet, Sylvestre Ledru.
+
+Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch)
+
+Copyright (c) 2007 - Sébastien Fabbro (gentoo patch)
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+- Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer listed
+  in this license in the documentation and/or other materials
+  provided with the distribution.
+
+- Neither the name of the copyright holders nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/__init__.py
new file mode 100644
index 00000000..679b9448
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/__init__.py
@@ -0,0 +1,20 @@
+"""
+Eigenvalue solver using iterative methods.
+
+Find k eigenvectors and eigenvalues of a matrix A using the
+Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
+
+These methods are most useful for large sparse matrices.
+
+  - eigs(A,k)
+  - eigsh(A,k)
+
+References
+----------
+.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
+.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang,  ARPACK USERS GUIDE:
+   Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
+   Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
+
+"""
+from .arpack import *
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/arpack.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/arpack.py
new file mode 100644
index 00000000..76a0542c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/arpack.py
@@ -0,0 +1,1699 @@
+"""
+Find a few eigenvectors and eigenvalues of a matrix.
+
+
+Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
+
+"""
+# Wrapper implementation notes
+#
+# ARPACK Entry Points
+# -------------------
+# The entry points to ARPACK are
+# - (s,d)seupd : single and double precision symmetric matrix
+# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
+# This wrapper puts the *neupd (general matrix) interfaces in eigs()
+# and the *seupd (symmetric matrix) in eigsh().
+# There is no specialized interface for complex Hermitian matrices.
+# To find eigenvalues of a complex Hermitian matrix you
+# may use eigsh(), but eigsh() will simply call eigs()
+# and return the real part of the eigenvalues thus obtained.
+
+# Number of eigenvalues returned and complex eigenvalues
+# ------------------------------------------------------
+# The ARPACK nonsymmetric real and double interface (s,d)naupd return
+# eigenvalues and eigenvectors in real (float,double) arrays.
+# Since the eigenvalues and eigenvectors are, in general, complex
+# ARPACK puts the real and imaginary parts in consecutive entries
+# in real-valued arrays.   This wrapper puts the real entries
+# into complex data types and attempts to return the requested eigenvalues
+# and eigenvectors.
+
+
+# Solver modes
+# ------------
+# ARPACK and handle shifted and shift-inverse computations
+# for eigenvalues by providing a shift (sigma) and a solver.
+
+__docformat__ = "restructuredtext en"
+
+__all__ = ['eigs', 'eigsh', 'ArpackError', 'ArpackNoConvergence']
+
+from . import _arpack
+arpack_int = _arpack.timing.nbx.dtype
+
+import numpy as np
+import warnings
+from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator
+from scipy.sparse import eye, issparse, isspmatrix, isspmatrix_csr
+from scipy.linalg import eig, eigh, lu_factor, lu_solve
+from scipy.sparse._sputils import isdense, is_pydata_spmatrix
+from scipy.sparse.linalg import gmres, splu
+from scipy._lib._util import _aligned_zeros
+from scipy._lib._threadsafety import ReentrancyLock
+
+
+_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
+_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
+
+DNAUPD_ERRORS = {
+    0: "Normal exit.",
+    1: "Maximum number of iterations taken. "
+       "All possible eigenvalues of OP has been found. IPARAM(5) "
+       "returns the number of wanted converged Ritz values.",
+    2: "No longer an informational error. Deprecated starting "
+       "with release 2 of ARPACK.",
+    3: "No shifts could be applied during a cycle of the "
+       "Implicitly restarted Arnoldi iteration. One possibility "
+       "is to increase the size of NCV relative to NEV. ",
+    -1: "N must be positive.",
+    -2: "NEV must be positive.",
+    -3: "NCV-NEV >= 2 and less than or equal to N.",
+    -4: "The maximum number of Arnoldi update iterations allowed "
+        "must be greater than zero.",
+    -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
+    -6: "BMAT must be one of 'I' or 'G'.",
+    -7: "Length of private work array WORKL is not sufficient.",
+    -8: "Error return from LAPACK eigenvalue calculation;",
+    -9: "Starting vector is zero.",
+    -10: "IPARAM(7) must be 1,2,3,4.",
+    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
+    -12: "IPARAM(1) must be equal to 0 or 1.",
+    -13: "NEV and WHICH = 'BE' are incompatible.",
+    -9999: "Could not build an Arnoldi factorization. "
+           "IPARAM(5) returns the size of the current Arnoldi "
+           "factorization. The user is advised to check that "
+           "enough workspace and array storage has been allocated."
+}
+
+SNAUPD_ERRORS = DNAUPD_ERRORS
+
+ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
+ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
+
+CNAUPD_ERRORS = ZNAUPD_ERRORS
+
+DSAUPD_ERRORS = {
+    0: "Normal exit.",
+    1: "Maximum number of iterations taken. "
+       "All possible eigenvalues of OP has been found.",
+    2: "No longer an informational error. Deprecated starting with "
+       "release 2 of ARPACK.",
+    3: "No shifts could be applied during a cycle of the Implicitly "
+       "restarted Arnoldi iteration. One possibility is to increase "
+       "the size of NCV relative to NEV. ",
+    -1: "N must be positive.",
+    -2: "NEV must be positive.",
+    -3: "NCV must be greater than NEV and less than or equal to N.",
+    -4: "The maximum number of Arnoldi update iterations allowed "
+        "must be greater than zero.",
+    -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
+    -6: "BMAT must be one of 'I' or 'G'.",
+    -7: "Length of private work array WORKL is not sufficient.",
+    -8: "Error return from trid. eigenvalue calculation; "
+        "Informational error from LAPACK routine dsteqr .",
+    -9: "Starting vector is zero.",
+    -10: "IPARAM(7) must be 1,2,3,4,5.",
+    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
+    -12: "IPARAM(1) must be equal to 0 or 1.",
+    -13: "NEV and WHICH = 'BE' are incompatible. ",
+    -9999: "Could not build an Arnoldi factorization. "
+           "IPARAM(5) returns the size of the current Arnoldi "
+           "factorization. The user is advised to check that "
+           "enough workspace and array storage has been allocated.",
+}
+
+SSAUPD_ERRORS = DSAUPD_ERRORS
+
+DNEUPD_ERRORS = {
+    0: "Normal exit.",
+    1: "The Schur form computed by LAPACK routine dlahqr "
+       "could not be reordered by LAPACK routine dtrsen. "
+       "Re-enter subroutine dneupd  with IPARAM(5)NCV and "
+       "increase the size of the arrays DR and DI to have "
+       "dimension at least dimension NCV and allocate at least NCV "
+       "columns for Z. NOTE: Not necessary if Z and V share "
+       "the same space. Please notify the authors if this error"
+       "occurs.",
+    -1: "N must be positive.",
+    -2: "NEV must be positive.",
+    -3: "NCV-NEV >= 2 and less than or equal to N.",
+    -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
+    -6: "BMAT must be one of 'I' or 'G'.",
+    -7: "Length of private work WORKL array is not sufficient.",
+    -8: "Error return from calculation of a real Schur form. "
+        "Informational error from LAPACK routine dlahqr .",
+    -9: "Error return from calculation of eigenvectors. "
+        "Informational error from LAPACK routine dtrevc.",
+    -10: "IPARAM(7) must be 1,2,3,4.",
+    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
+    -12: "HOWMNY = 'S' not yet implemented",
+    -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
+    -14: "DNAUPD  did not find any eigenvalues to sufficient "
+         "accuracy.",
+    -15: "DNEUPD got a different count of the number of converged "
+         "Ritz values than DNAUPD got.  This indicates the user "
+         "probably made an error in passing data from DNAUPD to "
+         "DNEUPD or that the data was modified before entering "
+         "DNEUPD",
+}
+
+SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
+SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
+                    "could not be reordered by LAPACK routine strsen . "
+                    "Re-enter subroutine dneupd  with IPARAM(5)=NCV and "
+                    "increase the size of the arrays DR and DI to have "
+                    "dimension at least dimension NCV and allocate at least "
+                    "NCV columns for Z. NOTE: Not necessary if Z and V share "
+                    "the same space. Please notify the authors if this error "
+                    "occurs.")
+SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
+                      "accuracy.")
+SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
+                      "converged Ritz values than SNAUPD got.  This indicates "
+                      "the user probably made an error in passing data from "
+                      "SNAUPD to SNEUPD or that the data was modified before "
+                      "entering SNEUPD")
+
+ZNEUPD_ERRORS = {0: "Normal exit.",
+                 1: "The Schur form computed by LAPACK routine csheqr "
+                    "could not be reordered by LAPACK routine ztrsen. "
+                    "Re-enter subroutine zneupd with IPARAM(5)=NCV and "
+                    "increase the size of the array D to have "
+                    "dimension at least dimension NCV and allocate at least "
+                    "NCV columns for Z. NOTE: Not necessary if Z and V share "
+                    "the same space. Please notify the authors if this error "
+                    "occurs.",
+                 -1: "N must be positive.",
+                 -2: "NEV must be positive.",
+                 -3: "NCV-NEV >= 1 and less than or equal to N.",
+                 -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
+                 -6: "BMAT must be one of 'I' or 'G'.",
+                 -7: "Length of private work WORKL array is not sufficient.",
+                 -8: "Error return from LAPACK eigenvalue calculation. "
+                     "This should never happened.",
+                 -9: "Error return from calculation of eigenvectors. "
+                     "Informational error from LAPACK routine ztrevc.",
+                 -10: "IPARAM(7) must be 1,2,3",
+                 -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
+                 -12: "HOWMNY = 'S' not yet implemented",
+                 -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
+                 -14: "ZNAUPD did not find any eigenvalues to sufficient "
+                      "accuracy.",
+                 -15: "ZNEUPD got a different count of the number of "
+                      "converged Ritz values than ZNAUPD got.  This "
+                      "indicates the user probably made an error in passing "
+                      "data from ZNAUPD to ZNEUPD or that the data was "
+                      "modified before entering ZNEUPD"
+                 }
+
+CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
+CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
+                      "accuracy.")
+CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
+                      "converged Ritz values than CNAUPD got.  This indicates "
+                      "the user probably made an error in passing data from "
+                      "CNAUPD to CNEUPD or that the data was modified before "
+                      "entering CNEUPD")
+
+DSEUPD_ERRORS = {
+    0: "Normal exit.",
+    -1: "N must be positive.",
+    -2: "NEV must be positive.",
+    -3: "NCV must be greater than NEV and less than or equal to N.",
+    -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
+    -6: "BMAT must be one of 'I' or 'G'.",
+    -7: "Length of private work WORKL array is not sufficient.",
+    -8: ("Error return from trid. eigenvalue calculation; "
+         "Information error from LAPACK routine dsteqr."),
+    -9: "Starting vector is zero.",
+    -10: "IPARAM(7) must be 1,2,3,4,5.",
+    -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
+    -12: "NEV and WHICH = 'BE' are incompatible.",
+    -14: "DSAUPD  did not find any eigenvalues to sufficient accuracy.",
+    -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
+    -16: "HOWMNY = 'S' not yet implemented",
+    -17: ("DSEUPD  got a different count of the number of converged "
+          "Ritz values than DSAUPD  got.  This indicates the user "
+          "probably made an error in passing data from DSAUPD  to "
+          "DSEUPD  or that the data was modified before entering  "
+          "DSEUPD.")
+}
+
+SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
+SSEUPD_ERRORS[-14] = ("SSAUPD  did not find any eigenvalues "
+                      "to sufficient accuracy.")
+SSEUPD_ERRORS[-17] = ("SSEUPD  got a different count of the number of "
+                      "converged "
+                      "Ritz values than SSAUPD  got.  This indicates the user "
+                      "probably made an error in passing data from SSAUPD  to "
+                      "SSEUPD  or that the data was modified before entering  "
+                      "SSEUPD.")
+
+_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
+                 's': SSAUPD_ERRORS}
+_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
+                 's': SNAUPD_ERRORS,
+                 'z': ZNAUPD_ERRORS,
+                 'c': CNAUPD_ERRORS}
+_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
+                 's': SSEUPD_ERRORS}
+_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
+                 's': SNEUPD_ERRORS,
+                 'z': ZNEUPD_ERRORS,
+                 'c': CNEUPD_ERRORS}
+
+# accepted values of parameter WHICH in _SEUPD
+_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
+
+# accepted values of parameter WHICH in _NAUPD
+_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
+
+
+class ArpackError(RuntimeError):
+    """
+    ARPACK error
+    """
+
+    def __init__(self, info, infodict=_NAUPD_ERRORS):
+        msg = infodict.get(info, "Unknown error")
+        RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
+
+
+class ArpackNoConvergence(ArpackError):
+    """
+    ARPACK iteration did not converge
+
+    Attributes
+    ----------
+    eigenvalues : ndarray
+        Partial result. Converged eigenvalues.
+    eigenvectors : ndarray
+        Partial result. Converged eigenvectors.
+
+    """
+
+    def __init__(self, msg, eigenvalues, eigenvectors):
+        ArpackError.__init__(self, -1, {-1: msg})
+        self.eigenvalues = eigenvalues
+        self.eigenvectors = eigenvectors
+
+
+def choose_ncv(k):
+    """
+    Choose number of lanczos vectors based on target number
+    of singular/eigen values and vectors to compute, k.
+    """
+    return max(2 * k + 1, 20)
+
+
+class _ArpackParams:
+    def __init__(self, n, k, tp, mode=1, sigma=None,
+                 ncv=None, v0=None, maxiter=None, which="LM", tol=0):
+        if k <= 0:
+            raise ValueError("k must be positive, k=%d" % k)
+
+        if maxiter is None:
+            maxiter = n * 10
+        if maxiter <= 0:
+            raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
+
+        if tp not in 'fdFD':
+            raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
+
+        if v0 is not None:
+            # ARPACK overwrites its initial resid,  make a copy
+            self.resid = np.array(v0, copy=True)
+            info = 1
+        else:
+            # ARPACK will use a random initial vector.
+            self.resid = np.zeros(n, tp)
+            info = 0
+
+        if sigma is None:
+            #sigma not used
+            self.sigma = 0
+        else:
+            self.sigma = sigma
+
+        if ncv is None:
+            ncv = choose_ncv(k)
+        ncv = min(ncv, n)
+
+        self.v = np.zeros((n, ncv), tp)  # holds Ritz vectors
+        self.iparam = np.zeros(11, arpack_int)
+
+        # set solver mode and parameters
+        ishfts = 1
+        self.mode = mode
+        self.iparam[0] = ishfts
+        self.iparam[2] = maxiter
+        self.iparam[3] = 1
+        self.iparam[6] = mode
+
+        self.n = n
+        self.tol = tol
+        self.k = k
+        self.maxiter = maxiter
+        self.ncv = ncv
+        self.which = which
+        self.tp = tp
+        self.info = info
+
+        self.converged = False
+        self.ido = 0
+
+    def _raise_no_convergence(self):
+        msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
+        k_ok = self.iparam[4]
+        num_iter = self.iparam[2]
+        try:
+            ev, vec = self.extract(True)
+        except ArpackError as err:
+            msg = "%s [%s]" % (msg, err)
+            ev = np.zeros((0,))
+            vec = np.zeros((self.n, 0))
+            k_ok = 0
+        raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
+
+
+class _SymmetricArpackParams(_ArpackParams):
+    def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
+                 Minv_matvec=None, sigma=None,
+                 ncv=None, v0=None, maxiter=None, which="LM", tol=0):
+        # The following modes are supported:
+        #  mode = 1:
+        #    Solve the standard eigenvalue problem:
+        #      A*x = lambda*x :
+        #       A - symmetric
+        #    Arguments should be
+        #       matvec      = left multiplication by A
+        #       M_matvec    = None [not used]
+        #       Minv_matvec = None [not used]
+        #
+        #  mode = 2:
+        #    Solve the general eigenvalue problem:
+        #      A*x = lambda*M*x
+        #       A - symmetric
+        #       M - symmetric positive definite
+        #    Arguments should be
+        #       matvec      = left multiplication by A
+        #       M_matvec    = left multiplication by M
+        #       Minv_matvec = left multiplication by M^-1
+        #
+        #  mode = 3:
+        #    Solve the general eigenvalue problem in shift-invert mode:
+        #      A*x = lambda*M*x
+        #       A - symmetric
+        #       M - symmetric positive semi-definite
+        #    Arguments should be
+        #       matvec      = None [not used]
+        #       M_matvec    = left multiplication by M
+        #                     or None, if M is the identity
+        #       Minv_matvec = left multiplication by [A-sigma*M]^-1
+        #
+        #  mode = 4:
+        #    Solve the general eigenvalue problem in Buckling mode:
+        #      A*x = lambda*AG*x
+        #       A  - symmetric positive semi-definite
+        #       AG - symmetric indefinite
+        #    Arguments should be
+        #       matvec      = left multiplication by A
+        #       M_matvec    = None [not used]
+        #       Minv_matvec = left multiplication by [A-sigma*AG]^-1
+        #
+        #  mode = 5:
+        #    Solve the general eigenvalue problem in Cayley-transformed mode:
+        #      A*x = lambda*M*x
+        #       A - symmetric
+        #       M - symmetric positive semi-definite
+        #    Arguments should be
+        #       matvec      = left multiplication by A
+        #       M_matvec    = left multiplication by M
+        #                     or None, if M is the identity
+        #       Minv_matvec = left multiplication by [A-sigma*M]^-1
+        if mode == 1:
+            if matvec is None:
+                raise ValueError("matvec must be specified for mode=1")
+            if M_matvec is not None:
+                raise ValueError("M_matvec cannot be specified for mode=1")
+            if Minv_matvec is not None:
+                raise ValueError("Minv_matvec cannot be specified for mode=1")
+
+            self.OP = matvec
+            self.B = lambda x: x
+            self.bmat = 'I'
+        elif mode == 2:
+            if matvec is None:
+                raise ValueError("matvec must be specified for mode=2")
+            if M_matvec is None:
+                raise ValueError("M_matvec must be specified for mode=2")
+            if Minv_matvec is None:
+                raise ValueError("Minv_matvec must be specified for mode=2")
+
+            self.OP = lambda x: Minv_matvec(matvec(x))
+            self.OPa = Minv_matvec
+            self.OPb = matvec
+            self.B = M_matvec
+            self.bmat = 'G'
+        elif mode == 3:
+            if matvec is not None:
+                raise ValueError("matvec must not be specified for mode=3")
+            if Minv_matvec is None:
+                raise ValueError("Minv_matvec must be specified for mode=3")
+
+            if M_matvec is None:
+                self.OP = Minv_matvec
+                self.OPa = Minv_matvec
+                self.B = lambda x: x
+                self.bmat = 'I'
+            else:
+                self.OP = lambda x: Minv_matvec(M_matvec(x))
+                self.OPa = Minv_matvec
+                self.B = M_matvec
+                self.bmat = 'G'
+        elif mode == 4:
+            if matvec is None:
+                raise ValueError("matvec must be specified for mode=4")
+            if M_matvec is not None:
+                raise ValueError("M_matvec must not be specified for mode=4")
+            if Minv_matvec is None:
+                raise ValueError("Minv_matvec must be specified for mode=4")
+            self.OPa = Minv_matvec
+            self.OP = lambda x: self.OPa(matvec(x))
+            self.B = matvec
+            self.bmat = 'G'
+        elif mode == 5:
+            if matvec is None:
+                raise ValueError("matvec must be specified for mode=5")
+            if Minv_matvec is None:
+                raise ValueError("Minv_matvec must be specified for mode=5")
+
+            self.OPa = Minv_matvec
+            self.A_matvec = matvec
+
+            if M_matvec is None:
+                self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
+                self.B = lambda x: x
+                self.bmat = 'I'
+            else:
+                self.OP = lambda x: Minv_matvec(matvec(x)
+                                                + sigma * M_matvec(x))
+                self.B = M_matvec
+                self.bmat = 'G'
+        else:
+            raise ValueError("mode=%i not implemented" % mode)
+
+        if which not in _SEUPD_WHICH:
+            raise ValueError("which must be one of %s"
+                             % ' '.join(_SEUPD_WHICH))
+        if k >= n:
+            raise ValueError("k must be less than ndim(A), k=%d" % k)
+
+        _ArpackParams.__init__(self, n, k, tp, mode, sigma,
+                               ncv, v0, maxiter, which, tol)
+
+        if self.ncv > n or self.ncv <= k:
+            raise ValueError("ncv must be k= n - 1:
+            raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
+
+        _ArpackParams.__init__(self, n, k, tp, mode, sigma,
+                               ncv, v0, maxiter, which, tol)
+
+        if self.ncv > n or self.ncv <= k + 1:
+            raise ValueError("ncv must be k+1 k, so we'll
+                            # throw out this case.
+                            nreturned -= 1
+                    i += 1
+
+            else:
+                # real matrix, mode 3 or 4, imag(sigma) is nonzero:
+                # see remark 3 in neupd.f
+                # Build complex eigenvalues from real and imaginary parts
+                i = 0
+                while i <= k:
+                    if abs(d[i].imag) == 0:
+                        d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
+                    else:
+                        if i < k:
+                            z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
+                            z[:, i + 1] = z[:, i].conjugate()
+                            d[i] = ((np.dot(zr[:, i],
+                                            self.matvec(zr[:, i]))
+                                     + np.dot(zr[:, i + 1],
+                                              self.matvec(zr[:, i + 1])))
+                                    + 1j * (np.dot(zr[:, i],
+                                                   self.matvec(zr[:, i + 1]))
+                                            - np.dot(zr[:, i + 1],
+                                                     self.matvec(zr[:, i]))))
+                            d[i + 1] = d[i].conj()
+                            i += 1
+                        else:
+                            #last eigenvalue is complex: the imaginary part of
+                            # the eigenvector has not been returned
+                            #this can only happen if nreturned > k, so we'll
+                            # throw out this case.
+                            nreturned -= 1
+                    i += 1
+
+            # Now we have k+1 possible eigenvalues and eigenvectors
+            # Return the ones specified by the keyword "which"
+
+            if nreturned <= k:
+                # we got less or equal as many eigenvalues we wanted
+                d = d[:nreturned]
+                z = z[:, :nreturned]
+            else:
+                # we got one extra eigenvalue (likely a cc pair, but which?)
+                if self.mode in (1, 2):
+                    rd = d
+                elif self.mode in (3, 4):
+                    rd = 1 / (d - self.sigma)
+
+                if self.which in ['LR', 'SR']:
+                    ind = np.argsort(rd.real)
+                elif self.which in ['LI', 'SI']:
+                    # for LI,SI ARPACK returns largest,smallest
+                    # abs(imaginary) (complex pairs come together)
+                    ind = np.argsort(abs(rd.imag))
+                else:
+                    ind = np.argsort(abs(rd))
+
+                if self.which in ['LR', 'LM', 'LI']:
+                    ind = ind[-k:][::-1]
+                elif self.which in ['SR', 'SM', 'SI']:
+                    ind = ind[:k]
+
+                d = d[ind]
+                z = z[:, ind]
+        else:
+            # complex is so much simpler...
+            d, z, ierr =\
+                    self._arpack_extract(return_eigenvectors,
+                           howmny, sselect, self.sigma, workev,
+                           self.bmat, self.which, k, self.tol, self.resid,
+                           self.v, self.iparam, self.ipntr,
+                           self.workd, self.workl, self.rwork, ierr)
+
+            if ierr != 0:
+                raise ArpackError(ierr, infodict=self.extract_infodict)
+
+            k_ok = self.iparam[4]
+            d = d[:k_ok]
+            z = z[:, :k_ok]
+
+        if return_eigenvectors:
+            return d, z
+        else:
+            return d
+
+
+def _aslinearoperator_with_dtype(m):
+    m = aslinearoperator(m)
+    if not hasattr(m, 'dtype'):
+        x = np.zeros(m.shape[1])
+        m.dtype = (m * x).dtype
+    return m
+
+
+class SpLuInv(LinearOperator):
+    """
+    SpLuInv:
+       helper class to repeatedly solve M*x=b
+       using a sparse LU-decomposition of M
+    """
+
+    def __init__(self, M):
+        self.M_lu = splu(M)
+        self.shape = M.shape
+        self.dtype = M.dtype
+        self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
+
+    def _matvec(self, x):
+        # careful here: splu.solve will throw away imaginary
+        # part of x if M is real
+        x = np.asarray(x)
+        if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
+            return (self.M_lu.solve(np.real(x).astype(self.dtype))
+                    + 1j * self.M_lu.solve(np.imag(x).astype(self.dtype)))
+        else:
+            return self.M_lu.solve(x.astype(self.dtype))
+
+
+class LuInv(LinearOperator):
+    """
+    LuInv:
+       helper class to repeatedly solve M*x=b
+       using an LU-decomposition of M
+    """
+
+    def __init__(self, M):
+        self.M_lu = lu_factor(M)
+        self.shape = M.shape
+        self.dtype = M.dtype
+
+    def _matvec(self, x):
+        return lu_solve(self.M_lu, x)
+
+
+def gmres_loose(A, b, tol):
+    """
+    gmres with looser termination condition.
+    """
+    b = np.asarray(b)
+    min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
+    return gmres(A, b, tol=max(tol, min_tol), atol=0)
+
+
+class IterInv(LinearOperator):
+    """
+    IterInv:
+       helper class to repeatedly solve M*x=b
+       using an iterative method.
+    """
+
+    def __init__(self, M, ifunc=gmres_loose, tol=0):
+        self.M = M
+        if hasattr(M, 'dtype'):
+            self.dtype = M.dtype
+        else:
+            x = np.zeros(M.shape[1])
+            self.dtype = (M * x).dtype
+        self.shape = M.shape
+
+        if tol <= 0:
+            # when tol=0, ARPACK uses machine tolerance as calculated
+            # by LAPACK's _LAMCH function.  We should match this
+            tol = 2 * np.finfo(self.dtype).eps
+        self.ifunc = ifunc
+        self.tol = tol
+
+    def _matvec(self, x):
+        b, info = self.ifunc(self.M, x, tol=self.tol)
+        if info != 0:
+            raise ValueError("Error in inverting M: function "
+                             "%s did not converge (info = %i)."
+                             % (self.ifunc.__name__, info))
+        return b
+
+
+class IterOpInv(LinearOperator):
+    """
+    IterOpInv:
+       helper class to repeatedly solve [A-sigma*M]*x = b
+       using an iterative method
+    """
+
+    def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0):
+        self.A = A
+        self.M = M
+        self.sigma = sigma
+
+        def mult_func(x):
+            return A.matvec(x) - sigma * M.matvec(x)
+
+        def mult_func_M_None(x):
+            return A.matvec(x) - sigma * x
+
+        x = np.zeros(A.shape[1])
+        if M is None:
+            dtype = mult_func_M_None(x).dtype
+            self.OP = LinearOperator(self.A.shape,
+                                     mult_func_M_None,
+                                     dtype=dtype)
+        else:
+            dtype = mult_func(x).dtype
+            self.OP = LinearOperator(self.A.shape,
+                                     mult_func,
+                                     dtype=dtype)
+        self.shape = A.shape
+
+        if tol <= 0:
+            # when tol=0, ARPACK uses machine tolerance as calculated
+            # by LAPACK's _LAMCH function.  We should match this
+            tol = 2 * np.finfo(self.OP.dtype).eps
+        self.ifunc = ifunc
+        self.tol = tol
+
+    def _matvec(self, x):
+        b, info = self.ifunc(self.OP, x, tol=self.tol)
+        if info != 0:
+            raise ValueError("Error in inverting [A-sigma*M]: function "
+                             "%s did not converge (info = %i)."
+                             % (self.ifunc.__name__, info))
+        return b
+
+    @property
+    def dtype(self):
+        return self.OP.dtype
+
+
+def _fast_spmatrix_to_csc(A, hermitian=False):
+    """Convert sparse matrix to CSC (by transposing, if possible)"""
+    if (isspmatrix_csr(A) and hermitian
+            and not np.issubdtype(A.dtype, np.complexfloating)):
+        return A.T
+    elif is_pydata_spmatrix(A):
+        # No need to convert
+        return A
+    else:
+        return A.tocsc()
+
+
+def get_inv_matvec(M, hermitian=False, tol=0):
+    if isdense(M):
+        return LuInv(M).matvec
+    elif isspmatrix(M) or is_pydata_spmatrix(M):
+        M = _fast_spmatrix_to_csc(M, hermitian=hermitian)
+        return SpLuInv(M).matvec
+    else:
+        return IterInv(M, tol=tol).matvec
+
+
+def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0):
+    if sigma == 0:
+        return get_inv_matvec(A, hermitian=hermitian, tol=tol)
+
+    if M is None:
+        #M is the identity matrix
+        if isdense(A):
+            if (np.issubdtype(A.dtype, np.complexfloating)
+                    or np.imag(sigma) == 0):
+                A = np.copy(A)
+            else:
+                A = A + 0j
+            A.flat[::A.shape[1] + 1] -= sigma
+            return LuInv(A).matvec
+        elif isspmatrix(A) or is_pydata_spmatrix(A):
+            A = A - sigma * eye(A.shape[0])
+            A = _fast_spmatrix_to_csc(A, hermitian=hermitian)
+            return SpLuInv(A).matvec
+        else:
+            return IterOpInv(_aslinearoperator_with_dtype(A),
+                             M, sigma, tol=tol).matvec
+    else:
+        if ((not isdense(A) and not isspmatrix(A) and not is_pydata_spmatrix(A)) or
+                (not isdense(M) and not isspmatrix(M) and not is_pydata_spmatrix(A))):
+            return IterOpInv(_aslinearoperator_with_dtype(A),
+                             _aslinearoperator_with_dtype(M),
+                             sigma, tol=tol).matvec
+        elif isdense(A) or isdense(M):
+            return LuInv(A - sigma * M).matvec
+        else:
+            OP = A - sigma * M
+            OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian)
+            return SpLuInv(OP).matvec
+
+
+# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a
+# lock and a re-entering check.
+_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: "
+                              "ARPACK is not re-entrant")
+
+
+def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None,
+         ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
+         Minv=None, OPinv=None, OPpart=None):
+    """
+    Find k eigenvalues and eigenvectors of the square matrix A.
+
+    Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem
+    for w[i] eigenvalues with corresponding eigenvectors x[i].
+
+    If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
+    generalized eigenvalue problem for w[i] eigenvalues
+    with corresponding eigenvectors x[i]
+
+    Parameters
+    ----------
+    A : ndarray, sparse matrix or LinearOperator
+        An array, sparse matrix, or LinearOperator representing
+        the operation ``A @ x``, where A is a real or complex square matrix.
+    k : int, optional
+        The number of eigenvalues and eigenvectors desired.
+        `k` must be smaller than N-1. It is not possible to compute all
+        eigenvectors of a matrix.
+    M : ndarray, sparse matrix or LinearOperator, optional
+        An array, sparse matrix, or LinearOperator representing
+        the operation M@x for the generalized eigenvalue problem
+
+            A @ x = w * M @ x.
+
+        M must represent a real symmetric matrix if A is real, and must
+        represent a complex Hermitian matrix if A is complex. For best
+        results, the data type of M should be the same as that of A.
+        Additionally:
+
+            If `sigma` is None, M is positive definite
+
+            If sigma is specified, M is positive semi-definite
+
+        If sigma is None, eigs requires an operator to compute the solution
+        of the linear equation ``M @ x = b``.  This is done internally via a
+        (sparse) LU decomposition for an explicit matrix M, or via an
+        iterative solver for a general linear operator.  Alternatively,
+        the user can supply the matrix or operator Minv, which gives
+        ``x = Minv @ b = M^-1 @ b``.
+    sigma : real or complex, optional
+        Find eigenvalues near sigma using shift-invert mode.  This requires
+        an operator to compute the solution of the linear system
+        ``[A - sigma * M] @ x = b``, where M is the identity matrix if
+        unspecified. This is computed internally via a (sparse) LU
+        decomposition for explicit matrices A & M, or via an iterative
+        solver if either A or M is a general linear operator.
+        Alternatively, the user can supply the matrix or operator OPinv,
+        which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
+        For a real matrix A, shift-invert can either be done in imaginary
+        mode or real mode, specified by the parameter OPpart ('r' or 'i').
+        Note that when sigma is specified, the keyword 'which' (below)
+        refers to the shifted eigenvalues ``w'[i]`` where:
+
+            If A is real and OPpart == 'r' (default),
+              ``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``.
+
+            If A is real and OPpart == 'i',
+              ``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``.
+
+            If A is complex, ``w'[i] = 1/(w[i]-sigma)``.
+
+    v0 : ndarray, optional
+        Starting vector for iteration.
+        Default: random
+    ncv : int, optional
+        The number of Lanczos vectors generated
+        `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
+        Default: ``min(n, max(2*k + 1, 20))``
+    which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
+        Which `k` eigenvectors and eigenvalues to find:
+
+            'LM' : largest magnitude
+
+            'SM' : smallest magnitude
+
+            'LR' : largest real part
+
+            'SR' : smallest real part
+
+            'LI' : largest imaginary part
+
+            'SI' : smallest imaginary part
+
+        When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
+        (see discussion in 'sigma', above).  ARPACK is generally better
+        at finding large values than small values.  If small eigenvalues are
+        desired, consider using shift-invert mode for better performance.
+    maxiter : int, optional
+        Maximum number of Arnoldi update iterations allowed
+        Default: ``n*10``
+    tol : float, optional
+        Relative accuracy for eigenvalues (stopping criterion)
+        The default value of 0 implies machine precision.
+    return_eigenvectors : bool, optional
+        Return eigenvectors (True) in addition to eigenvalues
+    Minv : ndarray, sparse matrix or LinearOperator, optional
+        See notes in M, above.
+    OPinv : ndarray, sparse matrix or LinearOperator, optional
+        See notes in sigma, above.
+    OPpart : {'r' or 'i'}, optional
+        See notes in sigma, above
+
+    Returns
+    -------
+    w : ndarray
+        Array of k eigenvalues.
+    v : ndarray
+        An array of `k` eigenvectors.
+        ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
+
+    Raises
+    ------
+    ArpackNoConvergence
+        When the requested convergence is not obtained.
+        The currently converged eigenvalues and eigenvectors can be found
+        as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
+        object.
+
+    See Also
+    --------
+    eigsh : eigenvalues and eigenvectors for symmetric matrix A
+    svds : singular value decomposition for a matrix A
+
+    Notes
+    -----
+    This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
+    ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
+    find the eigenvalues and eigenvectors [2]_.
+
+    References
+    ----------
+    .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
+    .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang,  ARPACK USERS GUIDE:
+       Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
+       Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
+
+    Examples
+    --------
+    Find 6 eigenvectors of the identity matrix:
+
+    >>> import numpy as np
+    >>> from scipy.sparse.linalg import eigs
+    >>> id = np.eye(13)
+    >>> vals, vecs = eigs(id, k=6)
+    >>> vals
+    array([ 1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j,  1.+0.j])
+    >>> vecs.shape
+    (13, 6)
+
+    """
+    if A.shape[0] != A.shape[1]:
+        raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
+    if M is not None:
+        if M.shape != A.shape:
+            raise ValueError('wrong M dimensions %s, should be %s'
+                             % (M.shape, A.shape))
+        if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
+            warnings.warn('M does not have the same type precision as A. '
+                          'This may adversely affect ARPACK convergence')
+
+    n = A.shape[0]
+
+    if k <= 0:
+        raise ValueError("k=%d must be greater than 0." % k)
+
+    if k >= n - 1:
+        warnings.warn("k >= N - 1 for N * N square matrix. "
+                      "Attempting to use scipy.linalg.eig instead.",
+                      RuntimeWarning)
+
+        if issparse(A):
+            raise TypeError("Cannot use scipy.linalg.eig for sparse A with "
+                            "k >= N - 1. Use scipy.linalg.eig(A.toarray()) or"
+                            " reduce k.")
+        if isinstance(A, LinearOperator):
+            raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
+                            "A with k >= N - 1.")
+        if isinstance(M, LinearOperator):
+            raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
+                            "M with k >= N - 1.")
+
+        return eig(A, b=M, right=return_eigenvectors)
+
+    if sigma is None:
+        matvec = _aslinearoperator_with_dtype(A).matvec
+
+        if OPinv is not None:
+            raise ValueError("OPinv should not be specified "
+                             "with sigma = None.")
+        if OPpart is not None:
+            raise ValueError("OPpart should not be specified with "
+                             "sigma = None or complex A")
+
+        if M is None:
+            #standard eigenvalue problem
+            mode = 1
+            M_matvec = None
+            Minv_matvec = None
+            if Minv is not None:
+                raise ValueError("Minv should not be "
+                                 "specified with M = None.")
+        else:
+            #general eigenvalue problem
+            mode = 2
+            if Minv is None:
+                Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
+            else:
+                Minv = _aslinearoperator_with_dtype(Minv)
+                Minv_matvec = Minv.matvec
+            M_matvec = _aslinearoperator_with_dtype(M).matvec
+    else:
+        #sigma is not None: shift-invert mode
+        if np.issubdtype(A.dtype, np.complexfloating):
+            if OPpart is not None:
+                raise ValueError("OPpart should not be specified "
+                                 "with sigma=None or complex A")
+            mode = 3
+        elif OPpart is None or OPpart.lower() == 'r':
+            mode = 3
+        elif OPpart.lower() == 'i':
+            if np.imag(sigma) == 0:
+                raise ValueError("OPpart cannot be 'i' if sigma is real")
+            mode = 4
+        else:
+            raise ValueError("OPpart must be one of ('r','i')")
+
+        matvec = _aslinearoperator_with_dtype(A).matvec
+        if Minv is not None:
+            raise ValueError("Minv should not be specified when sigma is")
+        if OPinv is None:
+            Minv_matvec = get_OPinv_matvec(A, M, sigma,
+                                           hermitian=False, tol=tol)
+        else:
+            OPinv = _aslinearoperator_with_dtype(OPinv)
+            Minv_matvec = OPinv.matvec
+        if M is None:
+            M_matvec = None
+        else:
+            M_matvec = _aslinearoperator_with_dtype(M).matvec
+
+    params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
+                                      M_matvec, Minv_matvec, sigma,
+                                      ncv, v0, maxiter, which, tol)
+
+    with _ARPACK_LOCK:
+        while not params.converged:
+            params.iterate()
+
+        return params.extract(return_eigenvectors)
+
+
+def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None,
+          ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
+          Minv=None, OPinv=None, mode='normal'):
+    """
+    Find k eigenvalues and eigenvectors of the real symmetric square matrix
+    or complex Hermitian matrix A.
+
+    Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for
+    w[i] eigenvalues with corresponding eigenvectors x[i].
+
+    If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
+    generalized eigenvalue problem for w[i] eigenvalues
+    with corresponding eigenvectors x[i].
+
+    Note that there is no specialized routine for the case when A is a complex
+    Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the
+    real parts of the eigenvalues thus obtained.
+
+    Parameters
+    ----------
+    A : ndarray, sparse matrix or LinearOperator
+        A square operator representing the operation ``A @ x``, where ``A`` is
+        real symmetric or complex Hermitian. For buckling mode (see below)
+        ``A`` must additionally be positive-definite.
+    k : int, optional
+        The number of eigenvalues and eigenvectors desired.
+        `k` must be smaller than N. It is not possible to compute all
+        eigenvectors of a matrix.
+
+    Returns
+    -------
+    w : array
+        Array of k eigenvalues.
+    v : array
+        An array representing the `k` eigenvectors.  The column ``v[:, i]`` is
+        the eigenvector corresponding to the eigenvalue ``w[i]``.
+
+    Other Parameters
+    ----------------
+    M : An N x N matrix, array, sparse matrix, or linear operator representing
+        the operation ``M @ x`` for the generalized eigenvalue problem
+
+            A @ x = w * M @ x.
+
+        M must represent a real symmetric matrix if A is real, and must
+        represent a complex Hermitian matrix if A is complex. For best
+        results, the data type of M should be the same as that of A.
+        Additionally:
+
+            If sigma is None, M is symmetric positive definite.
+
+            If sigma is specified, M is symmetric positive semi-definite.
+
+            In buckling mode, M is symmetric indefinite.
+
+        If sigma is None, eigsh requires an operator to compute the solution
+        of the linear equation ``M @ x = b``. This is done internally via a
+        (sparse) LU decomposition for an explicit matrix M, or via an
+        iterative solver for a general linear operator.  Alternatively,
+        the user can supply the matrix or operator Minv, which gives
+        ``x = Minv @ b = M^-1 @ b``.
+    sigma : real
+        Find eigenvalues near sigma using shift-invert mode.  This requires
+        an operator to compute the solution of the linear system
+        ``[A - sigma * M] x = b``, where M is the identity matrix if
+        unspecified.  This is computed internally via a (sparse) LU
+        decomposition for explicit matrices A & M, or via an iterative
+        solver if either A or M is a general linear operator.
+        Alternatively, the user can supply the matrix or operator OPinv,
+        which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
+        Note that when sigma is specified, the keyword 'which' refers to
+        the shifted eigenvalues ``w'[i]`` where:
+
+            if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``.
+
+            if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``.
+
+            if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``.
+
+        (see further discussion in 'mode' below)
+    v0 : ndarray, optional
+        Starting vector for iteration.
+        Default: random
+    ncv : int, optional
+        The number of Lanczos vectors generated ncv must be greater than k and
+        smaller than n; it is recommended that ``ncv > 2*k``.
+        Default: ``min(n, max(2*k + 1, 20))``
+    which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
+        If A is a complex Hermitian matrix, 'BE' is invalid.
+        Which `k` eigenvectors and eigenvalues to find:
+
+            'LM' : Largest (in magnitude) eigenvalues.
+
+            'SM' : Smallest (in magnitude) eigenvalues.
+
+            'LA' : Largest (algebraic) eigenvalues.
+
+            'SA' : Smallest (algebraic) eigenvalues.
+
+            'BE' : Half (k/2) from each end of the spectrum.
+
+        When k is odd, return one more (k/2+1) from the high end.
+        When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
+        (see discussion in 'sigma', above).  ARPACK is generally better
+        at finding large values than small values.  If small eigenvalues are
+        desired, consider using shift-invert mode for better performance.
+    maxiter : int, optional
+        Maximum number of Arnoldi update iterations allowed.
+        Default: ``n*10``
+    tol : float
+        Relative accuracy for eigenvalues (stopping criterion).
+        The default value of 0 implies machine precision.
+    Minv : N x N matrix, array, sparse matrix, or LinearOperator
+        See notes in M, above.
+    OPinv : N x N matrix, array, sparse matrix, or LinearOperator
+        See notes in sigma, above.
+    return_eigenvectors : bool
+        Return eigenvectors (True) in addition to eigenvalues.
+        This value determines the order in which eigenvalues are sorted.
+        The sort order is also dependent on the `which` variable.
+
+            For which = 'LM' or 'SA':
+                If `return_eigenvectors` is True, eigenvalues are sorted by
+                algebraic value.
+
+                If `return_eigenvectors` is False, eigenvalues are sorted by
+                absolute value.
+
+            For which = 'BE' or 'LA':
+                eigenvalues are always sorted by algebraic value.
+
+            For which = 'SM':
+                If `return_eigenvectors` is True, eigenvalues are sorted by
+                algebraic value.
+
+                If `return_eigenvectors` is False, eigenvalues are sorted by
+                decreasing absolute value.
+
+    mode : string ['normal' | 'buckling' | 'cayley']
+        Specify strategy to use for shift-invert mode.  This argument applies
+        only for real-valued A and sigma != None.  For shift-invert mode,
+        ARPACK internally solves the eigenvalue problem
+        ``OP @ x'[i] = w'[i] * B @ x'[i]``
+        and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
+        into the desired eigenvectors and eigenvalues of the problem
+        ``A @ x[i] = w[i] * M @ x[i]``.
+        The modes are as follows:
+
+            'normal' :
+                OP = [A - sigma * M]^-1 @ M,
+                B = M,
+                w'[i] = 1 / (w[i] - sigma)
+
+            'buckling' :
+                OP = [A - sigma * M]^-1 @ A,
+                B = A,
+                w'[i] = w[i] / (w[i] - sigma)
+
+            'cayley' :
+                OP = [A - sigma * M]^-1 @ [A + sigma * M],
+                B = M,
+                w'[i] = (w[i] + sigma) / (w[i] - sigma)
+
+        The choice of mode will affect which eigenvalues are selected by
+        the keyword 'which', and can also impact the stability of
+        convergence (see [2] for a discussion).
+
+    Raises
+    ------
+    ArpackNoConvergence
+        When the requested convergence is not obtained.
+
+        The currently converged eigenvalues and eigenvectors can be found
+        as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
+        object.
+
+    See Also
+    --------
+    eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
+    svds : singular value decomposition for a matrix A
+
+    Notes
+    -----
+    This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
+    functions which use the Implicitly Restarted Lanczos Method to
+    find the eigenvalues and eigenvectors [2]_.
+
+    References
+    ----------
+    .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
+    .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang,  ARPACK USERS GUIDE:
+       Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
+       Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse.linalg import eigsh
+    >>> identity = np.eye(13)
+    >>> eigenvalues, eigenvectors = eigsh(identity, k=6)
+    >>> eigenvalues
+    array([1., 1., 1., 1., 1., 1.])
+    >>> eigenvectors.shape
+    (13, 6)
+
+    """
+    # complex Hermitian matrices should be solved with eigs
+    if np.issubdtype(A.dtype, np.complexfloating):
+        if mode != 'normal':
+            raise ValueError("mode=%s cannot be used with "
+                             "complex matrix A" % mode)
+        if which == 'BE':
+            raise ValueError("which='BE' cannot be used with complex matrix A")
+        elif which == 'LA':
+            which = 'LR'
+        elif which == 'SA':
+            which = 'SR'
+        ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
+                   ncv=ncv, maxiter=maxiter, tol=tol,
+                   return_eigenvectors=return_eigenvectors, Minv=Minv,
+                   OPinv=OPinv)
+
+        if return_eigenvectors:
+            return ret[0].real, ret[1]
+        else:
+            return ret.real
+
+    if A.shape[0] != A.shape[1]:
+        raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
+    if M is not None:
+        if M.shape != A.shape:
+            raise ValueError('wrong M dimensions %s, should be %s'
+                             % (M.shape, A.shape))
+        if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
+            warnings.warn('M does not have the same type precision as A. '
+                          'This may adversely affect ARPACK convergence')
+
+    n = A.shape[0]
+
+    if k <= 0:
+        raise ValueError("k must be greater than 0.")
+
+    if k >= n:
+        warnings.warn("k >= N for N * N square matrix. "
+                      "Attempting to use scipy.linalg.eigh instead.",
+                      RuntimeWarning)
+
+        if issparse(A):
+            raise TypeError("Cannot use scipy.linalg.eigh for sparse A with "
+                            "k >= N. Use scipy.linalg.eigh(A.toarray()) or"
+                            " reduce k.")
+        if isinstance(A, LinearOperator):
+            raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
+                            "A with k >= N.")
+        if isinstance(M, LinearOperator):
+            raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
+                            "M with k >= N.")
+
+        return eigh(A, b=M, eigvals_only=not return_eigenvectors)
+
+    if sigma is None:
+        A = _aslinearoperator_with_dtype(A)
+        matvec = A.matvec
+
+        if OPinv is not None:
+            raise ValueError("OPinv should not be specified "
+                             "with sigma = None.")
+        if M is None:
+            #standard eigenvalue problem
+            mode = 1
+            M_matvec = None
+            Minv_matvec = None
+            if Minv is not None:
+                raise ValueError("Minv should not be "
+                                 "specified with M = None.")
+        else:
+            #general eigenvalue problem
+            mode = 2
+            if Minv is None:
+                Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
+            else:
+                Minv = _aslinearoperator_with_dtype(Minv)
+                Minv_matvec = Minv.matvec
+            M_matvec = _aslinearoperator_with_dtype(M).matvec
+    else:
+        # sigma is not None: shift-invert mode
+        if Minv is not None:
+            raise ValueError("Minv should not be specified when sigma is")
+
+        # normal mode
+        if mode == 'normal':
+            mode = 3
+            matvec = None
+            if OPinv is None:
+                Minv_matvec = get_OPinv_matvec(A, M, sigma,
+                                               hermitian=True, tol=tol)
+            else:
+                OPinv = _aslinearoperator_with_dtype(OPinv)
+                Minv_matvec = OPinv.matvec
+            if M is None:
+                M_matvec = None
+            else:
+                M = _aslinearoperator_with_dtype(M)
+                M_matvec = M.matvec
+
+        # buckling mode
+        elif mode == 'buckling':
+            mode = 4
+            if OPinv is None:
+                Minv_matvec = get_OPinv_matvec(A, M, sigma,
+                                               hermitian=True, tol=tol)
+            else:
+                Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
+            matvec = _aslinearoperator_with_dtype(A).matvec
+            M_matvec = None
+
+        # cayley-transform mode
+        elif mode == 'cayley':
+            mode = 5
+            matvec = _aslinearoperator_with_dtype(A).matvec
+            if OPinv is None:
+                Minv_matvec = get_OPinv_matvec(A, M, sigma,
+                                               hermitian=True, tol=tol)
+            else:
+                Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
+            if M is None:
+                M_matvec = None
+            else:
+                M_matvec = _aslinearoperator_with_dtype(M).matvec
+
+        # unrecognized mode
+        else:
+            raise ValueError("unrecognized mode '%s'" % mode)
+
+    params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
+                                    M_matvec, Minv_matvec, sigma,
+                                    ncv, v0, maxiter, which, tol)
+
+    with _ARPACK_LOCK:
+        while not params.converged:
+            params.iterate()
+
+        return params.extract(return_eigenvectors)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py
new file mode 100644
index 00000000..0ec99b25
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py
@@ -0,0 +1,725 @@
+__usage__ = """
+To run tests locally:
+  python tests/test_arpack.py [-l] [-v]
+
+"""
+
+import threading
+import itertools
+
+import numpy as np
+
+from numpy.testing import assert_allclose, assert_equal, suppress_warnings
+from pytest import raises as assert_raises
+import pytest
+
+from numpy import dot, conj, random
+from scipy.linalg import eig, eigh
+from scipy.sparse import csc_matrix, csr_matrix, diags, rand
+from scipy.sparse.linalg import LinearOperator, aslinearoperator
+from scipy.sparse.linalg._eigen.arpack import (eigs, eigsh, arpack,
+                                              ArpackNoConvergence)
+
+
+from scipy._lib._gcutils import assert_deallocated, IS_PYPY
+
+
+# precision for tests
+_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
+
+
+def _get_test_tolerance(type_char, mattype=None, D_type=None, which=None):
+    """
+    Return tolerance values suitable for a given test:
+
+    Parameters
+    ----------
+    type_char : {'f', 'd', 'F', 'D'}
+        Data type in ARPACK eigenvalue problem
+    mattype : {csr_matrix, aslinearoperator, asarray}, optional
+        Linear operator type
+
+    Returns
+    -------
+    tol
+        Tolerance to pass to the ARPACK routine
+    rtol
+        Relative tolerance for outputs
+    atol
+        Absolute tolerance for outputs
+
+    """
+
+    rtol = {'f': 3000 * np.finfo(np.float32).eps,
+            'F': 3000 * np.finfo(np.float32).eps,
+            'd': 2000 * np.finfo(np.float64).eps,
+            'D': 2000 * np.finfo(np.float64).eps}[type_char]
+    atol = rtol
+    tol = 0
+
+    if mattype is aslinearoperator and type_char in ('f', 'F'):
+        # iterative methods in single precision: worse errors
+        # also: bump ARPACK tolerance so that the iterative method converges
+        tol = 30 * np.finfo(np.float32).eps
+        rtol *= 5
+
+    if mattype is csr_matrix and type_char in ('f', 'F'):
+        # sparse in single precision: worse errors
+        rtol *= 5
+
+    if (
+        which in ('LM', 'SM', 'LA')
+        and D_type.name == "gen-hermitian-Mc"
+    ):
+        if type_char == 'F':
+            # missing case 1, 2, and more, from PR 14798
+            rtol *= 5
+
+        if type_char == 'D':
+            # missing more cases, from PR 14798
+            rtol *= 7
+
+    return tol, rtol, atol
+
+
+def generate_matrix(N, complex_=False, hermitian=False,
+                    pos_definite=False, sparse=False):
+    M = np.random.random((N, N))
+    if complex_:
+        M = M + 1j * np.random.random((N, N))
+
+    if hermitian:
+        if pos_definite:
+            if sparse:
+                i = np.arange(N)
+                j = np.random.randint(N, size=N-2)
+                i, j = np.meshgrid(i, j)
+                M[i, j] = 0
+            M = np.dot(M.conj(), M.T)
+        else:
+            M = np.dot(M.conj(), M.T)
+            if sparse:
+                i = np.random.randint(N, size=N * N // 4)
+                j = np.random.randint(N, size=N * N // 4)
+                ind = np.nonzero(i == j)
+                j[ind] = (j[ind] + 1) % N
+                M[i, j] = 0
+                M[j, i] = 0
+    else:
+        if sparse:
+            i = np.random.randint(N, size=N * N // 2)
+            j = np.random.randint(N, size=N * N // 2)
+            M[i, j] = 0
+    return M
+
+
+def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
+    M = np.random.random((N, N))
+
+    M = 0.5 * (M + M.T)  # Make M symmetric
+
+    if pos_definite:
+        Id = N * np.eye(N)
+        if sparse:
+            M = csr_matrix(M)
+        M += Id
+    else:
+        if sparse:
+            M = csr_matrix(M)
+
+    return M
+
+
+def _aslinearoperator_with_dtype(m):
+    m = aslinearoperator(m)
+    if not hasattr(m, 'dtype'):
+        x = np.zeros(m.shape[1])
+        m.dtype = (m * x).dtype
+    return m
+
+
+def assert_allclose_cc(actual, desired, **kw):
+    """Almost equal or complex conjugates almost equal"""
+    try:
+        assert_allclose(actual, desired, **kw)
+    except AssertionError:
+        assert_allclose(actual, conj(desired), **kw)
+
+
+def argsort_which(eigenvalues, typ, k, which,
+                  sigma=None, OPpart=None, mode=None):
+    """Return sorted indices of eigenvalues using the "which" keyword
+    from eigs and eigsh"""
+    if sigma is None:
+        reval = np.round(eigenvalues, decimals=_ndigits[typ])
+    else:
+        if mode is None or mode == 'normal':
+            if OPpart is None:
+                reval = 1. / (eigenvalues - sigma)
+            elif OPpart == 'r':
+                reval = 0.5 * (1. / (eigenvalues - sigma)
+                               + 1. / (eigenvalues - np.conj(sigma)))
+            elif OPpart == 'i':
+                reval = -0.5j * (1. / (eigenvalues - sigma)
+                                 - 1. / (eigenvalues - np.conj(sigma)))
+        elif mode == 'cayley':
+            reval = (eigenvalues + sigma) / (eigenvalues - sigma)
+        elif mode == 'buckling':
+            reval = eigenvalues / (eigenvalues - sigma)
+        else:
+            raise ValueError("mode='%s' not recognized" % mode)
+
+        reval = np.round(reval, decimals=_ndigits[typ])
+
+    if which in ['LM', 'SM']:
+        ind = np.argsort(abs(reval))
+    elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
+        ind = np.argsort(np.real(reval))
+    elif which in ['LI', 'SI']:
+        # for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
+        if typ.islower():
+            ind = np.argsort(abs(np.imag(reval)))
+        else:
+            ind = np.argsort(np.imag(reval))
+    else:
+        raise ValueError("which='%s' is unrecognized" % which)
+
+    if which in ['LM', 'LA', 'LR', 'LI']:
+        return ind[-k:]
+    elif which in ['SM', 'SA', 'SR', 'SI']:
+        return ind[:k]
+    elif which == 'BE':
+        return np.concatenate((ind[:k//2], ind[k//2-k:]))
+
+
+def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
+              mattype=np.asarray, OPpart=None, mode='normal'):
+    general = ('bmat' in d)
+
+    if symmetric:
+        eigs_func = eigsh
+    else:
+        eigs_func = eigs
+
+    if general:
+        err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
+               "mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
+                                                   typ, which, sigma,
+                                                   mattype.__name__,
+                                                   OPpart, mode))
+    else:
+        err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
+               "mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
+                                                   typ, which, sigma,
+                                                   mattype.__name__,
+                                                   OPpart, mode))
+
+    a = d['mat'].astype(typ)
+    ac = mattype(a)
+
+    if general:
+        b = d['bmat'].astype(typ)
+        bc = mattype(b)
+
+    # get exact eigenvalues
+    exact_eval = d['eval'].astype(typ.upper())
+    ind = argsort_which(exact_eval, typ, k, which,
+                        sigma, OPpart, mode)
+    exact_eval = exact_eval[ind]
+
+    # compute arpack eigenvalues
+    kwargs = dict(which=which, v0=v0, sigma=sigma)
+    if eigs_func is eigsh:
+        kwargs['mode'] = mode
+    else:
+        kwargs['OPpart'] = OPpart
+
+    # compute suitable tolerances
+    kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, d, which)
+    # on rare occasions, ARPACK routines return results that are proper
+    # eigenvalues and -vectors, but not necessarily the ones requested in
+    # the parameter which. This is inherent to the Krylov methods, and
+    # should not be treated as a failure. If such a rare situation
+    # occurs, the calculation is tried again (but at most a few times).
+    ntries = 0
+    while ntries < 5:
+        # solve
+        if general:
+            try:
+                eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
+            except ArpackNoConvergence:
+                kwargs['maxiter'] = 20*a.shape[0]
+                eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
+        else:
+            try:
+                eigenvalues, evec = eigs_func(ac, k, **kwargs)
+            except ArpackNoConvergence:
+                kwargs['maxiter'] = 20*a.shape[0]
+                eigenvalues, evec = eigs_func(ac, k, **kwargs)
+
+        ind = argsort_which(eigenvalues, typ, k, which,
+                            sigma, OPpart, mode)
+        eigenvalues = eigenvalues[ind]
+        evec = evec[:, ind]
+
+        try:
+            # check eigenvalues
+            assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
+                               err_msg=err)
+            check_evecs = True
+        except AssertionError:
+            check_evecs = False
+            ntries += 1
+
+        if check_evecs:
+            # check eigenvectors
+            LHS = np.dot(a, evec)
+            if general:
+                RHS = eigenvalues * np.dot(b, evec)
+            else:
+                RHS = eigenvalues * evec
+
+            assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
+            break
+
+    # check eigenvalues
+    assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
+
+
+class DictWithRepr(dict):
+    def __init__(self, name):
+        self.name = name
+
+    def __repr__(self):
+        return "<%s>" % self.name
+
+
+class SymmetricParams:
+    def __init__(self):
+        self.eigs = eigsh
+        self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
+        self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
+        self.sigmas_modes = {None: ['normal'],
+                             0.5: ['normal', 'buckling', 'cayley']}
+
+        # generate matrices
+        # these should all be float32 so that the eigenvalues
+        # are the same in float32 and float64
+        N = 6
+        np.random.seed(2300)
+        Ar = generate_matrix(N, hermitian=True,
+                             pos_definite=True).astype('f').astype('d')
+        M = generate_matrix(N, hermitian=True,
+                            pos_definite=True).astype('f').astype('d')
+        Ac = generate_matrix(N, hermitian=True, pos_definite=True,
+                             complex_=True).astype('F').astype('D')
+        Mc = generate_matrix(N, hermitian=True, pos_definite=True,
+                             complex_=True).astype('F').astype('D')
+        v0 = np.random.random(N)
+
+        # standard symmetric problem
+        SS = DictWithRepr("std-symmetric")
+        SS['mat'] = Ar
+        SS['v0'] = v0
+        SS['eval'] = eigh(SS['mat'], eigvals_only=True)
+
+        # general symmetric problem
+        GS = DictWithRepr("gen-symmetric")
+        GS['mat'] = Ar
+        GS['bmat'] = M
+        GS['v0'] = v0
+        GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
+
+        # standard hermitian problem
+        SH = DictWithRepr("std-hermitian")
+        SH['mat'] = Ac
+        SH['v0'] = v0
+        SH['eval'] = eigh(SH['mat'], eigvals_only=True)
+
+        # general hermitian problem
+        GH = DictWithRepr("gen-hermitian")
+        GH['mat'] = Ac
+        GH['bmat'] = M
+        GH['v0'] = v0
+        GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
+
+        # general hermitian problem with hermitian M
+        GHc = DictWithRepr("gen-hermitian-Mc")
+        GHc['mat'] = Ac
+        GHc['bmat'] = Mc
+        GHc['v0'] = v0
+        GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
+
+        self.real_test_cases = [SS, GS]
+        self.complex_test_cases = [SH, GH, GHc]
+
+
+class NonSymmetricParams:
+    def __init__(self):
+        self.eigs = eigs
+        self.which = ['LM', 'LR', 'LI']  # , 'SM', 'LR', 'SR', 'LI', 'SI']
+        self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
+        self.sigmas_OPparts = {None: [None],
+                               0.1: ['r'],
+                               0.1 + 0.1j: ['r', 'i']}
+
+        # generate matrices
+        # these should all be float32 so that the eigenvalues
+        # are the same in float32 and float64
+        N = 6
+        np.random.seed(2300)
+        Ar = generate_matrix(N).astype('f').astype('d')
+        M = generate_matrix(N, hermitian=True,
+                            pos_definite=True).astype('f').astype('d')
+        Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
+        v0 = np.random.random(N)
+
+        # standard real nonsymmetric problem
+        SNR = DictWithRepr("std-real-nonsym")
+        SNR['mat'] = Ar
+        SNR['v0'] = v0
+        SNR['eval'] = eig(SNR['mat'], left=False, right=False)
+
+        # general real nonsymmetric problem
+        GNR = DictWithRepr("gen-real-nonsym")
+        GNR['mat'] = Ar
+        GNR['bmat'] = M
+        GNR['v0'] = v0
+        GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
+
+        # standard complex nonsymmetric problem
+        SNC = DictWithRepr("std-cmplx-nonsym")
+        SNC['mat'] = Ac
+        SNC['v0'] = v0
+        SNC['eval'] = eig(SNC['mat'], left=False, right=False)
+
+        # general complex nonsymmetric problem
+        GNC = DictWithRepr("gen-cmplx-nonsym")
+        GNC['mat'] = Ac
+        GNC['bmat'] = M
+        GNC['v0'] = v0
+        GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
+
+        self.real_test_cases = [SNR, GNR]
+        self.complex_test_cases = [SNC, GNC]
+
+
+def test_symmetric_modes():
+    params = SymmetricParams()
+    k = 2
+    symmetric = True
+    for D in params.real_test_cases:
+        for typ in 'fd':
+            for which in params.which:
+                for mattype in params.mattypes:
+                    for (sigma, modes) in params.sigmas_modes.items():
+                        for mode in modes:
+                            eval_evec(symmetric, D, typ, k, which,
+                                      None, sigma, mattype, None, mode)
+
+
+def test_hermitian_modes():
+    params = SymmetricParams()
+    k = 2
+    symmetric = True
+    for D in params.complex_test_cases:
+        for typ in 'FD':
+            for which in params.which:
+                if which == 'BE':
+                    continue  # BE invalid for complex
+                for mattype in params.mattypes:
+                    for sigma in params.sigmas_modes:
+                        eval_evec(symmetric, D, typ, k, which,
+                                  None, sigma, mattype)
+
+
+def test_symmetric_starting_vector():
+    params = SymmetricParams()
+    symmetric = True
+    for k in [1, 2, 3, 4, 5]:
+        for D in params.real_test_cases:
+            for typ in 'fd':
+                v0 = random.rand(len(D['v0'])).astype(typ)
+                eval_evec(symmetric, D, typ, k, 'LM', v0)
+
+
+def test_symmetric_no_convergence():
+    np.random.seed(1234)
+    m = generate_matrix(30, hermitian=True, pos_definite=True)
+    tol, rtol, atol = _get_test_tolerance('d')
+    try:
+        w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
+        raise AssertionError("Spurious no-error exit")
+    except ArpackNoConvergence as err:
+        k = len(err.eigenvalues)
+        if k <= 0:
+            raise AssertionError("Spurious no-eigenvalues-found case") from err
+        w, v = err.eigenvalues, err.eigenvectors
+        assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
+
+
+def test_real_nonsymmetric_modes():
+    params = NonSymmetricParams()
+    k = 2
+    symmetric = False
+    for D in params.real_test_cases:
+        for typ in 'fd':
+            for which in params.which:
+                for mattype in params.mattypes:
+                    for sigma, OPparts in params.sigmas_OPparts.items():
+                        for OPpart in OPparts:
+                            eval_evec(symmetric, D, typ, k, which,
+                                      None, sigma, mattype, OPpart)
+
+
+def test_complex_nonsymmetric_modes():
+    params = NonSymmetricParams()
+    k = 2
+    symmetric = False
+    for D in params.complex_test_cases:
+        for typ in 'DF':
+            for which in params.which:
+                for mattype in params.mattypes:
+                    for sigma in params.sigmas_OPparts:
+                        eval_evec(symmetric, D, typ, k, which,
+                                  None, sigma, mattype)
+
+
+def test_standard_nonsymmetric_starting_vector():
+    params = NonSymmetricParams()
+    sigma = None
+    symmetric = False
+    for k in [1, 2, 3, 4]:
+        for d in params.complex_test_cases:
+            for typ in 'FD':
+                A = d['mat']
+                n = A.shape[0]
+                v0 = random.rand(n).astype(typ)
+                eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
+
+
+def test_general_nonsymmetric_starting_vector():
+    params = NonSymmetricParams()
+    sigma = None
+    symmetric = False
+    for k in [1, 2, 3, 4]:
+        for d in params.complex_test_cases:
+            for typ in 'FD':
+                A = d['mat']
+                n = A.shape[0]
+                v0 = random.rand(n).astype(typ)
+                eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
+
+
+def test_standard_nonsymmetric_no_convergence():
+    np.random.seed(1234)
+    m = generate_matrix(30, complex_=True)
+    tol, rtol, atol = _get_test_tolerance('d')
+    try:
+        w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
+        raise AssertionError("Spurious no-error exit")
+    except ArpackNoConvergence as err:
+        k = len(err.eigenvalues)
+        if k <= 0:
+            raise AssertionError("Spurious no-eigenvalues-found case") from err
+        w, v = err.eigenvalues, err.eigenvectors
+        for ww, vv in zip(w, v.T):
+            assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
+
+
+def test_eigen_bad_shapes():
+    # A is not square.
+    A = csc_matrix(np.zeros((2, 3)))
+    assert_raises(ValueError, eigs, A)
+
+
+def test_eigen_bad_kwargs():
+    # Test eigen on wrong keyword argument
+    A = csc_matrix(np.zeros((8, 8)))
+    assert_raises(ValueError, eigs, A, which='XX')
+
+
+def test_ticket_1459_arpack_crash():
+    for dtype in [np.float32, np.float64]:
+        # This test does not seem to catch the issue for float32,
+        # but we made the same fix there, just to be sure
+
+        N = 6
+        k = 2
+
+        np.random.seed(2301)
+        A = np.random.random((N, N)).astype(dtype)
+        v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
+                       -0.34365925382227402451, 0.46122533684552280420,
+                       -0.58001341115969040629, -0.78844877570084292984e-01],
+                      dtype=dtype)
+
+        # Should not crash:
+        evals, evecs = eigs(A, k, v0=v0)
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_linearoperator_deallocation():
+    # Check that the linear operators used by the Arpack wrappers are
+    # deallocatable by reference counting -- they are big objects, so
+    # Python's cyclic GC may not collect them fast enough before
+    # running out of memory if eigs/eigsh are called in a tight loop.
+
+    M_d = np.eye(10)
+    M_s = csc_matrix(M_d)
+    M_o = aslinearoperator(M_d)
+
+    with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
+        pass
+    with assert_deallocated(lambda: arpack.LuInv(M_d)):
+        pass
+    with assert_deallocated(lambda: arpack.IterInv(M_s)):
+        pass
+    with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
+        pass
+    with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
+        pass
+
+def test_parallel_threads():
+    results = []
+    v0 = np.random.rand(50)
+
+    def worker():
+        x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
+        w, v = eigs(x, k=3, v0=v0)
+        results.append(w)
+
+        w, v = eigsh(x, k=3, v0=v0)
+        results.append(w)
+
+    threads = [threading.Thread(target=worker) for k in range(10)]
+    for t in threads:
+        t.start()
+    for t in threads:
+        t.join()
+
+    worker()
+
+    for r in results:
+        assert_allclose(r, results[-1])
+
+
+def test_reentering():
+    # Just some linear operator that calls eigs recursively
+    def A_matvec(x):
+        x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
+        w, v = eigs(x, k=1)
+        return v / w[0]
+    A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
+
+    # The Fortran code is not reentrant, so this fails (gracefully, not crashing)
+    assert_raises(RuntimeError, eigs, A, k=1)
+    assert_raises(RuntimeError, eigsh, A, k=1)
+
+
+def test_regression_arpackng_1315():
+    # Check that issue arpack-ng/#1315 is not present.
+    # Adapted from arpack-ng/TESTS/bug_1315_single.c
+    # If this fails, then the installed ARPACK library is faulty.
+
+    for dtype in [np.float32, np.float64]:
+        np.random.seed(1234)
+
+        w0 = np.arange(1, 1000+1).astype(dtype)
+        A = diags([w0], [0], shape=(1000, 1000))
+
+        v0 = np.random.rand(1000).astype(dtype)
+        w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
+
+        assert_allclose(np.sort(w), np.sort(w0[-9:]),
+                        rtol=1e-4)
+
+
+def test_eigs_for_k_greater():
+    # Test eigs() for k beyond limits.
+    A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4))  # sparse
+    A = generate_matrix(4, sparse=False)
+    M_dense = np.random.random((4, 4))
+    M_sparse = generate_matrix(4, sparse=True)
+    M_linop = aslinearoperator(M_dense)
+    eig_tuple1 = eig(A, b=M_dense)
+    eig_tuple2 = eig(A, b=M_sparse)
+
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning)
+
+        assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
+        assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
+        assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
+        assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
+
+        # M as LinearOperator
+        assert_raises(TypeError, eigs, A, M=M_linop, k=3)
+
+        # Test 'A' for different types
+        assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
+        assert_raises(TypeError, eigs, A_sparse, k=3)
+
+
+def test_eigsh_for_k_greater():
+    # Test eigsh() for k beyond limits.
+    A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4))  # sparse
+    A = generate_matrix(4, sparse=False)
+    M_dense = generate_matrix_symmetric(4, pos_definite=True)
+    M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
+    M_linop = aslinearoperator(M_dense)
+    eig_tuple1 = eigh(A, b=M_dense)
+    eig_tuple2 = eigh(A, b=M_sparse)
+
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning)
+
+        assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
+        assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
+        assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
+
+        # M as LinearOperator
+        assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
+
+        # Test 'A' for different types
+        assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
+        assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
+
+
+def test_real_eigs_real_k_subset():
+    np.random.seed(1)
+
+    n = 10
+    A = rand(n, n, density=0.5)
+    A.data *= 2
+    A.data -= 1
+
+    v0 = np.ones(n)
+
+    whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
+    dtypes = [np.float32, np.float64]
+
+    for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
+        prev_w = np.array([], dtype=dtype)
+        eps = np.finfo(dtype).eps
+        for k in range(1, 9):
+            w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
+                        v0=v0.astype(dtype), tol=0)
+            assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
+
+            # Check that the set of eigenvalues for `k` is a subset of that for `k+1`
+            dist = abs(prev_w[:,None] - w).min(axis=1)
+            assert_allclose(dist, 0, atol=np.sqrt(eps))
+
+            prev_w = w
+
+            # Check sort order
+            if sigma is None:
+                d = w
+            else:
+                d = 1 / (w - sigma)
+
+            if which == 'LM':
+                # ARPACK is systematic for 'LM', but sort order
+                # appears not well defined for other modes
+                assert np.all(np.diff(abs(d)) <= 1e-6)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/__init__.py
new file mode 100644
index 00000000..6ab53303
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/__init__.py
@@ -0,0 +1,16 @@
+"""
+Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
+
+LOBPCG is a preconditioned eigensolver for large symmetric positive definite
+(SPD) generalized eigenproblems.
+
+Call the function lobpcg - see help for lobpcg.lobpcg.
+
+"""
+from .lobpcg import *
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py
new file mode 100644
index 00000000..d6d54698
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py
@@ -0,0 +1,982 @@
+"""
+Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
+
+References
+----------
+.. [1] A. V. Knyazev (2001),
+       Toward the Optimal Preconditioned Eigensolver: Locally Optimal
+       Block Preconditioned Conjugate Gradient Method.
+       SIAM Journal on Scientific Computing 23, no. 2,
+       pp. 517-541. :doi:`10.1137/S1064827500366124`
+
+.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
+       Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
+       in hypre and PETSc.  :arxiv:`0705.2626`
+
+.. [3] A. V. Knyazev's C and MATLAB implementations:
+       https://github.com/lobpcg/blopex
+"""
+
+import warnings
+import numpy as np
+from scipy.linalg import (inv, eigh, cho_factor, cho_solve,
+                          cholesky, LinAlgError)
+from scipy.sparse.linalg import LinearOperator
+from scipy.sparse import isspmatrix
+from numpy import block as bmat
+
+__all__ = ["lobpcg"]
+
+
+def _report_nonhermitian(M, name):
+    """
+    Report if `M` is not a Hermitian matrix given its type.
+    """
+    from scipy.linalg import norm
+
+    md = M - M.T.conj()
+    nmd = norm(md, 1)
+    tol = 10 * np.finfo(M.dtype).eps
+    tol = max(tol, tol * norm(M, 1))
+    if nmd > tol:
+        warnings.warn(
+              f"Matrix {name} of the type {M.dtype} is not Hermitian: "
+              f"condition: {nmd} < {tol} fails.",
+              UserWarning, stacklevel=4
+         )
+
+def _as2d(ar):
+    """
+    If the input array is 2D return it, if it is 1D, append a dimension,
+    making it a column vector.
+    """
+    if ar.ndim == 2:
+        return ar
+    else:  # Assume 1!
+        aux = np.array(ar, copy=False)
+        aux.shape = (ar.shape[0], 1)
+        return aux
+
+
+def _makeMatMat(m):
+    if m is None:
+        return None
+    elif callable(m):
+        return lambda v: m(v)
+    else:
+        return lambda v: m @ v
+
+
+def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
+    """Changes blockVectorV in place."""
+    YBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
+    tmp = cho_solve(factYBY, YBV)
+    blockVectorV -= np.dot(blockVectorY, tmp)
+
+
+def _b_orthonormalize(B, blockVectorV, blockVectorBV=None,
+                      verbosityLevel=0):
+    """in-place B-orthonormalize the given block vector using Cholesky."""
+    normalization = blockVectorV.max(axis=0) + np.finfo(blockVectorV.dtype).eps
+    blockVectorV = blockVectorV / normalization
+    if blockVectorBV is None:
+        if B is not None:
+            try:
+                blockVectorBV = B(blockVectorV)
+            except Exception as e:
+                if verbosityLevel:
+                    warnings.warn(
+                        f"Secondary MatMul call failed with error\n"
+                        f"{e}\n",
+                        UserWarning, stacklevel=3
+                    )
+                    return None, None, None, normalization
+            if blockVectorBV.shape != blockVectorV.shape:
+                raise ValueError(
+                    f"The shape {blockVectorV.shape} "
+                    f"of the orthogonalized matrix not preserved\n"
+                    f"and changed to {blockVectorBV.shape} "
+                    f"after multiplying by the secondary matrix.\n"
+                )
+        else:
+            blockVectorBV = blockVectorV  # Shared data!!!
+    else:
+        blockVectorBV = blockVectorBV / normalization
+    VBV = blockVectorV.T.conj() @ blockVectorBV
+    try:
+        # VBV is a Cholesky factor from now on...
+        VBV = cholesky(VBV, overwrite_a=True)
+        VBV = inv(VBV, overwrite_a=True)
+        blockVectorV = blockVectorV @ VBV
+        # blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T
+        if B is not None:
+            blockVectorBV = blockVectorBV @ VBV
+            # blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T
+        return blockVectorV, blockVectorBV, VBV, normalization
+    except LinAlgError:
+        if verbosityLevel:
+            warnings.warn(
+                "Cholesky has failed.",
+                UserWarning, stacklevel=3
+            )
+        return None, None, None, normalization
+
+
+def _get_indx(_lambda, num, largest):
+    """Get `num` indices into `_lambda` depending on `largest` option."""
+    ii = np.argsort(_lambda)
+    if largest:
+        ii = ii[:-num - 1:-1]
+    else:
+        ii = ii[:num]
+
+    return ii
+
+
+def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel):
+    if verbosityLevel:
+        _report_nonhermitian(gramA, "gramA")
+        _report_nonhermitian(gramB, "gramB")
+
+
+def lobpcg(
+    A,
+    X,
+    B=None,
+    M=None,
+    Y=None,
+    tol=None,
+    maxiter=None,
+    largest=True,
+    verbosityLevel=0,
+    retLambdaHistory=False,
+    retResidualNormsHistory=False,
+    restartControl=20,
+):
+    """Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
+
+    LOBPCG is a preconditioned eigensolver for large symmetric positive
+    definite (SPD) generalized eigenproblems.
+
+    Parameters
+    ----------
+    A : {sparse matrix, dense matrix, LinearOperator, callable object}
+        The symmetric linear operator of the problem, usually a
+        sparse matrix.  Often called the "stiffness matrix".
+    X : ndarray, float32 or float64
+        Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
+        has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
+    B : {dense matrix, sparse matrix, LinearOperator, callable object}
+        Optional.
+        The right hand side operator in a generalized eigenproblem.
+        By default, ``B = Identity``.  Often called the "mass matrix".
+    M : {dense matrix, sparse matrix, LinearOperator, callable object}
+        Optional.
+        Preconditioner to `A`; by default ``M = Identity``.
+        `M` should approximate the inverse of `A`.
+    Y : ndarray, float32 or float64, optional.
+        An n-by-sizeY matrix of constraints (non-sparse), sizeY < n.
+        The iterations will be performed in the B-orthogonal complement
+        of the column-space of Y. Y must be full rank.
+    tol : scalar, optional.
+        Solver tolerance (stopping criterion).
+        The default is ``tol=n*sqrt(eps)``.
+    maxiter : int, optional.
+        Maximum number of iterations.  The default is ``maxiter=20``.
+    largest : bool, optional.
+        When True, solve for the largest eigenvalues, otherwise the smallest.
+    verbosityLevel : int, optional
+        Controls solver output.  The default is ``verbosityLevel=0``.
+    retLambdaHistory : bool, optional.
+        Whether to return eigenvalue history.  Default is False.
+    retResidualNormsHistory : bool, optional.
+        Whether to return history of residual norms.  Default is False.
+    restartControl : int, optional.
+        Iterations restart if the residuals jump up 2**restartControl times
+        compared to the smallest ones recorded in retResidualNormsHistory.
+        The default is ``restartControl=20``, making the restarts rare for
+        backward compatibility.
+
+    Returns
+    -------
+    w : ndarray
+        Array of ``k`` eigenvalues.
+    v : ndarray
+        An array of ``k`` eigenvectors.  `v` has the same shape as `X`.
+    lambdas : ndarray, optional
+        The eigenvalue history, if `retLambdaHistory` is True.
+    rnorms : ndarray, optional
+        The history of residual norms, if `retResidualNormsHistory` is True.
+
+    Notes
+    -----
+    The iterative loop in lobpcg runs maxit=maxiter (or 20 if maxit=None)
+    iterations at most and finishes earler if the tolerance is met.
+    Breaking backward compatibility with the previous version, lobpcg
+    now returns the block of iterative vectors with the best accuracy rather
+    than the last one iterated, as a cure for possible divergence.
+
+    The size of the iteration history output equals to the number of the best
+    (limited by maxit) iterations plus 3 (initial, final, and postprocessing).
+
+    If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
+    the return tuple has the following format
+    ``(lambda, V, lambda history, residual norms history)``.
+
+    In the following ``n`` denotes the matrix size and ``k`` the number
+    of required eigenvalues (smallest or largest).
+
+    The LOBPCG code internally solves eigenproblems of the size ``3k`` on every
+    iteration by calling the dense eigensolver `eigh`, so if ``k`` is not
+    small enough compared to ``n``, it makes no sense to call the LOBPCG code.
+    Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely
+    break internally, so the code calls the standard function `eigh` instead.
+    It is not that ``n`` should be large for the LOBPCG to work, but rather the
+    ratio ``n / k`` should be large. It you call LOBPCG with ``k=1``
+    and ``n=10``, it works though ``n`` is small. The method is intended
+    for extremely large ``n / k``.
+
+    The convergence speed depends basically on two factors:
+
+    1. Relative separation of the seeking eigenvalues from the rest
+       of the eigenvalues. One can vary ``k`` to improve the absolute
+       separation and use proper preconditioning to shrink the spectral spread.
+       For example, a rod vibration test problem (under tests
+       directory) is ill-conditioned for large ``n``, so convergence will be
+       slow, unless efficient preconditioning is used. For this specific
+       problem, a good simple preconditioner function would be a linear solve
+       for `A`, which is easy to code since `A` is tridiagonal.
+
+    2. Quality of the initial approximations `X` to the seeking eigenvectors.
+       Randomly distributed around the origin vectors work well if no better
+       choice is known.
+
+    References
+    ----------
+    .. [1] A. V. Knyazev (2001),
+           Toward the Optimal Preconditioned Eigensolver: Locally Optimal
+           Block Preconditioned Conjugate Gradient Method.
+           SIAM Journal on Scientific Computing 23, no. 2,
+           pp. 517-541. :doi:`10.1137/S1064827500366124`
+
+    .. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
+           (2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
+           (BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
+
+    .. [3] A. V. Knyazev's C and MATLAB implementations:
+           https://github.com/lobpcg/blopex
+
+    Examples
+    --------
+    Solve ``A x = lambda x`` with constraints and preconditioning.
+
+    >>> import numpy as np
+    >>> from scipy.sparse import spdiags, issparse
+    >>> from scipy.sparse.linalg import lobpcg, LinearOperator
+
+    The square matrix size:
+
+    >>> n = 100
+    >>> vals = np.arange(1, n + 1)
+
+    The first mandatory input parameter, in this test
+    a sparse 2D array representing the square matrix
+    of the eigenvalue problem to solve:
+
+    >>> A = spdiags(vals, 0, n, n)
+    >>> A.toarray()
+    array([[  1,   0,   0, ...,   0,   0,   0],
+           [  0,   2,   0, ...,   0,   0,   0],
+           [  0,   0,   3, ...,   0,   0,   0],
+           ...,
+           [  0,   0,   0, ...,  98,   0,   0],
+           [  0,   0,   0, ...,   0,  99,   0],
+           [  0,   0,   0, ...,   0,   0, 100]])
+
+    Initial guess for eigenvectors, should have linearly independent
+    columns. The second mandatory input parameter, a 2D array with the
+    row dimension determining the number of requested eigenvalues.
+    If no initial approximations available, randomly oriented vectors
+    commonly work best, e.g., with components normally disrtibuted
+    around zero or uniformly distributed on the interval [-1 1].
+
+    >>> rng = np.random.default_rng()
+    >>> X = rng.normal(size=(n, 3))
+
+    Constraints - an optional input parameter is a 2D array comprising
+    of column vectors that the eigenvectors must be orthogonal to:
+
+    >>> Y = np.eye(n, 3)
+
+    Preconditioner in the inverse of A in this example:
+
+    >>> invA = spdiags([1./vals], 0, n, n)
+
+    The preconditiner must be defined by a function:
+
+    >>> def precond( x ):
+    ...     return invA @ x
+
+    The argument x of the preconditioner function is a matrix inside `lobpcg`,
+    thus the use of matrix-matrix product ``@``.
+
+    The preconditioner function is passed to lobpcg as a `LinearOperator`:
+
+    >>> M = LinearOperator(matvec=precond, matmat=precond,
+    ...                    shape=(n, n), dtype=np.float64)
+
+    Let us now solve the eigenvalue problem for the matrix A:
+
+    >>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
+    >>> eigenvalues
+    array([4., 5., 6.])
+
+    Note that the vectors passed in Y are the eigenvectors of the 3 smallest
+    eigenvalues. The results returned are orthogonal to those.
+    """
+    blockVectorX = X
+    bestblockVectorX = blockVectorX
+    blockVectorY = Y
+    residualTolerance = tol
+    if maxiter is None:
+        maxiter = 20
+
+    bestIterationNumber = maxiter
+
+    sizeY = 0
+    if blockVectorY is not None:
+        if len(blockVectorY.shape) != 2:
+            warnings.warn(
+                f"Expected rank-2 array for argument Y, instead got "
+                f"{len(blockVectorY.shape)}, "
+                f"so ignore it and use no constraints.",
+                UserWarning, stacklevel=2
+            )
+            blockVectorY = None
+        else:
+            sizeY = blockVectorY.shape[1]
+
+    # Block size.
+    if blockVectorX is None:
+        raise ValueError("The mandatory initial matrix X cannot be None")
+    if len(blockVectorX.shape) != 2:
+        raise ValueError("expected rank-2 array for argument X")
+
+    n, sizeX = blockVectorX.shape
+
+    # Data type of iterates, determined by X, must be inexact
+    if not np.issubdtype(blockVectorX.dtype, np.inexact):
+        warnings.warn(
+            f"Data type for argument X is {blockVectorX.dtype}, "
+            f"which is not inexact, so casted to np.float32.",
+            UserWarning, stacklevel=2
+        )
+        blockVectorX = np.asarray(blockVectorX, dtype=np.float32)
+
+    if retLambdaHistory:
+        lambdaHistory = np.zeros((maxiter + 3, sizeX),
+                                 dtype=blockVectorX.dtype)
+    if retResidualNormsHistory:
+        residualNormsHistory = np.zeros((maxiter + 3, sizeX),
+                                        dtype=blockVectorX.dtype)
+
+    if verbosityLevel:
+        aux = "Solving "
+        if B is None:
+            aux += "standard"
+        else:
+            aux += "generalized"
+        aux += " eigenvalue problem with"
+        if M is None:
+            aux += "out"
+        aux += " preconditioning\n\n"
+        aux += "matrix size %d\n" % n
+        aux += "block size %d\n\n" % sizeX
+        if blockVectorY is None:
+            aux += "No constraints\n\n"
+        else:
+            if sizeY > 1:
+                aux += "%d constraints\n\n" % sizeY
+            else:
+                aux += "%d constraint\n\n" % sizeY
+        print(aux)
+
+    if (n - sizeY) < (5 * sizeX):
+        warnings.warn(
+            f"The problem size {n} minus the constraints size {sizeY} "
+            f"is too small relative to the block size {sizeX}. "
+            f"Using a dense eigensolver instead of LOBPCG iterations."
+            f"No output of the history of the iterations.",
+            UserWarning, stacklevel=2
+        )
+
+        sizeX = min(sizeX, n)
+
+        if blockVectorY is not None:
+            raise NotImplementedError(
+                "The dense eigensolver does not support constraints."
+            )
+
+        # Define the closed range of indices of eigenvalues to return.
+        if largest:
+            eigvals = (n - sizeX, n - 1)
+        else:
+            eigvals = (0, sizeX - 1)
+
+        try:
+            if isinstance(A, LinearOperator):
+                A = A(np.eye(n, dtype=int))
+            elif callable(A):
+                A = A(np.eye(n, dtype=int))
+                if A.shape != (n, n):
+                    raise ValueError(
+                        f"The shape {A.shape} of the primary matrix\n"
+                        f"defined by a callable object is wrong.\n"
+                    )
+            elif isspmatrix(A):
+                A = A.toarray()
+            else:
+                A = np.asarray(A)
+        except Exception as e:
+            raise Exception(
+                f"Primary MatMul call failed with error\n"
+                f"{e}\n")
+
+        if B is not None:
+            try:
+                if isinstance(B, LinearOperator):
+                    B = B(np.eye(n, dtype=int))
+                elif callable(B):
+                    B = B(np.eye(n, dtype=int))
+                    if B.shape != (n, n):
+                        raise ValueError(
+                            f"The shape {B.shape} of the secondary matrix\n"
+                            f"defined by a callable object is wrong.\n"
+                        )
+                elif isspmatrix(B):
+                    B = B.toarray()
+                else:
+                    B = np.asarray(B)
+            except Exception as e:
+                raise Exception(
+                    f"Secondary MatMul call failed with error\n"
+                    f"{e}\n")
+
+        try:
+            vals, vecs = eigh(A,
+                              B,
+                              subset_by_index=eigvals,
+                              check_finite=False)
+            if largest:
+                # Reverse order to be compatible with eigs() in 'LM' mode.
+                vals = vals[::-1]
+                vecs = vecs[:, ::-1]
+
+            return vals, vecs
+        except Exception as e:
+            raise Exception(
+                f"Dense eigensolver failed with error\n"
+                f"{e}\n"
+            )
+
+    if (residualTolerance is None) or (residualTolerance <= 0.0):
+        residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n
+
+    A = _makeMatMat(A)
+    B = _makeMatMat(B)
+    M = _makeMatMat(M)
+
+    # Apply constraints to X.
+    if blockVectorY is not None:
+
+        if B is not None:
+            blockVectorBY = B(blockVectorY)
+            if blockVectorBY.shape != blockVectorY.shape:
+                raise ValueError(
+                    f"The shape {blockVectorY.shape} "
+                    f"of the constraint not preserved\n"
+                    f"and changed to {blockVectorBY.shape} "
+                    f"after multiplying by the secondary matrix.\n"
+                )
+        else:
+            blockVectorBY = blockVectorY
+
+        # gramYBY is a dense array.
+        gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
+        try:
+            # gramYBY is a Cholesky factor from now on...
+            gramYBY = cho_factor(gramYBY)
+        except LinAlgError as e:
+            raise ValueError("Linearly dependent constraints") from e
+
+        _applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
+
+    ##
+    # B-orthonormalize X.
+    blockVectorX, blockVectorBX, _, _ = _b_orthonormalize(
+        B, blockVectorX, verbosityLevel=verbosityLevel)
+    if blockVectorX is None:
+        raise ValueError("Linearly dependent initial approximations")
+
+    ##
+    # Compute the initial Ritz vectors: solve the eigenproblem.
+    blockVectorAX = A(blockVectorX)
+    if blockVectorAX.shape != blockVectorX.shape:
+        raise ValueError(
+            f"The shape {blockVectorX.shape} "
+            f"of the initial approximations not preserved\n"
+            f"and changed to {blockVectorAX.shape} "
+            f"after multiplying by the primary matrix.\n"
+        )
+
+    gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
+
+    _lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
+    ii = _get_indx(_lambda, sizeX, largest)
+    _lambda = _lambda[ii]
+    if retLambdaHistory:
+        lambdaHistory[0, :] = _lambda
+
+    eigBlockVector = np.asarray(eigBlockVector[:, ii])
+    blockVectorX = np.dot(blockVectorX, eigBlockVector)
+    blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
+    if B is not None:
+        blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
+
+    ##
+    # Active index set.
+    activeMask = np.ones((sizeX,), dtype=bool)
+
+    ##
+    # Main iteration loop.
+
+    blockVectorP = None  # set during iteration
+    blockVectorAP = None
+    blockVectorBP = None
+
+    smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max)
+
+    iterationNumber = -1
+    restart = True
+    forcedRestart = False
+    explicitGramFlag = False
+    while iterationNumber < maxiter:
+        iterationNumber += 1
+
+        if B is not None:
+            aux = blockVectorBX * _lambda[np.newaxis, :]
+        else:
+            aux = blockVectorX * _lambda[np.newaxis, :]
+
+        blockVectorR = blockVectorAX - aux
+
+        aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
+        residualNorms = np.sqrt(np.abs(aux))
+        if retResidualNormsHistory:
+            residualNormsHistory[iterationNumber, :] = residualNorms
+        residualNorm = np.sum(np.abs(residualNorms)) / sizeX
+
+        if residualNorm < smallestResidualNorm:
+            smallestResidualNorm = residualNorm
+            bestIterationNumber = iterationNumber
+            bestblockVectorX = blockVectorX
+        elif residualNorm > 2**restartControl * smallestResidualNorm:
+            forcedRestart = True
+            blockVectorAX = A(blockVectorX)
+            if blockVectorAX.shape != blockVectorX.shape:
+                raise ValueError(
+                    f"The shape {blockVectorX.shape} "
+                    f"of the restarted iterate not preserved\n"
+                    f"and changed to {blockVectorAX.shape} "
+                    f"after multiplying by the primary matrix.\n"
+                )
+            if B is not None:
+                blockVectorBX = B(blockVectorX)
+                if blockVectorBX.shape != blockVectorX.shape:
+                    raise ValueError(
+                        f"The shape {blockVectorX.shape} "
+                        f"of the restarted iterate not preserved\n"
+                        f"and changed to {blockVectorBX.shape} "
+                        f"after multiplying by the secondary matrix.\n"
+                    )
+
+        ii = np.where(residualNorms > residualTolerance, True, False)
+        activeMask = activeMask & ii
+        currentBlockSize = activeMask.sum()
+
+        if verbosityLevel:
+            print(f"iteration {iterationNumber}")
+            print(f"current block size: {currentBlockSize}")
+            print(f"eigenvalue(s):\n{_lambda}")
+            print(f"residual norm(s):\n{residualNorms}")
+
+        if currentBlockSize == 0:
+            break
+
+        activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
+
+        if iterationNumber > 0:
+            activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
+            activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
+            if B is not None:
+                activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
+
+        if M is not None:
+            # Apply preconditioner T to the active residuals.
+            activeBlockVectorR = M(activeBlockVectorR)
+
+        ##
+        # Apply constraints to the preconditioned residuals.
+        if blockVectorY is not None:
+            _applyConstraints(activeBlockVectorR,
+                              gramYBY,
+                              blockVectorBY,
+                              blockVectorY)
+
+        ##
+        # B-orthogonalize the preconditioned residuals to X.
+        if B is not None:
+            activeBlockVectorR = activeBlockVectorR - (
+                blockVectorX @
+                (blockVectorBX.T.conj() @ activeBlockVectorR)
+            )
+        else:
+            activeBlockVectorR = activeBlockVectorR - (
+                blockVectorX @
+                (blockVectorX.T.conj() @ activeBlockVectorR)
+            )
+
+        ##
+        # B-orthonormalize the preconditioned residuals.
+        aux = _b_orthonormalize(
+            B, activeBlockVectorR, verbosityLevel=verbosityLevel)
+        activeBlockVectorR, activeBlockVectorBR, _, _ = aux
+
+        if activeBlockVectorR is None:
+            warnings.warn(
+                f"Failed at iteration {iterationNumber} with accuracies "
+                f"{residualNorms}\n not reaching the requested "
+                f"tolerance {residualTolerance}.",
+                UserWarning, stacklevel=2
+            )
+            break
+        activeBlockVectorAR = A(activeBlockVectorR)
+
+        if iterationNumber > 0:
+            if B is not None:
+                aux = _b_orthonormalize(
+                    B, activeBlockVectorP, activeBlockVectorBP,
+                    verbosityLevel=verbosityLevel
+                )
+                activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
+            else:
+                aux = _b_orthonormalize(B, activeBlockVectorP,
+                                        verbosityLevel=verbosityLevel)
+                activeBlockVectorP, _, invR, normal = aux
+            # Function _b_orthonormalize returns None if Cholesky fails
+            if activeBlockVectorP is not None:
+                activeBlockVectorAP = activeBlockVectorAP / normal
+                activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
+                restart = forcedRestart
+            else:
+                restart = True
+
+        ##
+        # Perform the Rayleigh Ritz Procedure:
+        # Compute symmetric Gram matrices:
+
+        if activeBlockVectorAR.dtype == "float32":
+            myeps = 1
+        else:
+            myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps)
+
+        if residualNorms.max() > myeps and not explicitGramFlag:
+            explicitGramFlag = False
+        else:
+            # Once explicitGramFlag, forever explicitGramFlag.
+            explicitGramFlag = True
+
+        # Shared memory assingments to simplify the code
+        if B is None:
+            blockVectorBX = blockVectorX
+            activeBlockVectorBR = activeBlockVectorR
+            if not restart:
+                activeBlockVectorBP = activeBlockVectorP
+
+        # Common submatrices:
+        gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
+        gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
+
+        gramDtype = activeBlockVectorAR.dtype
+        if explicitGramFlag:
+            gramRAR = (gramRAR + gramRAR.T.conj()) / 2
+            gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
+            gramXAX = (gramXAX + gramXAX.T.conj()) / 2
+            gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
+            gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
+            gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
+        else:
+            gramXAX = np.diag(_lambda).astype(gramDtype)
+            gramXBX = np.eye(sizeX, dtype=gramDtype)
+            gramRBR = np.eye(currentBlockSize, dtype=gramDtype)
+            gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype)
+
+        if not restart:
+            gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
+            gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
+            gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
+            gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
+            gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
+            if explicitGramFlag:
+                gramPAP = (gramPAP + gramPAP.T.conj()) / 2
+                gramPBP = np.dot(activeBlockVectorP.T.conj(),
+                                 activeBlockVectorBP)
+            else:
+                gramPBP = np.eye(currentBlockSize, dtype=gramDtype)
+
+            gramA = bmat(
+                [
+                    [gramXAX, gramXAR, gramXAP],
+                    [gramXAR.T.conj(), gramRAR, gramRAP],
+                    [gramXAP.T.conj(), gramRAP.T.conj(), gramPAP],
+                ]
+            )
+            gramB = bmat(
+                [
+                    [gramXBX, gramXBR, gramXBP],
+                    [gramXBR.T.conj(), gramRBR, gramRBP],
+                    [gramXBP.T.conj(), gramRBP.T.conj(), gramPBP],
+                ]
+            )
+
+            _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
+
+            try:
+                _lambda, eigBlockVector = eigh(gramA,
+                                               gramB,
+                                               check_finite=False)
+            except LinAlgError as e:
+                # raise ValueError("eigh failed in lobpcg iterations") from e
+                if verbosityLevel:
+                    warnings.warn(
+                        f"eigh failed at iteration {iterationNumber} \n"
+                        f"with error {e} causing a restart.\n",
+                        UserWarning, stacklevel=2
+                    )
+                # try again after dropping the direction vectors P from RR
+                restart = True
+
+        if restart:
+            gramA = bmat([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]])
+            gramB = bmat([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]])
+
+            _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
+
+            try:
+                _lambda, eigBlockVector = eigh(gramA,
+                                               gramB,
+                                               check_finite=False)
+            except LinAlgError as e:
+                # raise ValueError("eigh failed in lobpcg iterations") from e
+                warnings.warn(
+                    f"eigh failed at iteration {iterationNumber} with error\n"
+                    f"{e}\n",
+                    UserWarning, stacklevel=2
+                )
+                break
+
+        ii = _get_indx(_lambda, sizeX, largest)
+        _lambda = _lambda[ii]
+        eigBlockVector = eigBlockVector[:, ii]
+        if retLambdaHistory:
+            lambdaHistory[iterationNumber + 1, :] = _lambda
+
+        # Compute Ritz vectors.
+        if B is not None:
+            if not restart:
+                eigBlockVectorX = eigBlockVector[:sizeX]
+                eigBlockVectorR = eigBlockVector[sizeX:
+                                                 sizeX + currentBlockSize]
+                eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
+
+                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
+                pp += np.dot(activeBlockVectorP, eigBlockVectorP)
+
+                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
+                app += np.dot(activeBlockVectorAP, eigBlockVectorP)
+
+                bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
+                bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
+            else:
+                eigBlockVectorX = eigBlockVector[:sizeX]
+                eigBlockVectorR = eigBlockVector[sizeX:]
+
+                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
+                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
+                bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
+
+            blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
+            blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
+            blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
+
+            blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
+
+        else:
+            if not restart:
+                eigBlockVectorX = eigBlockVector[:sizeX]
+                eigBlockVectorR = eigBlockVector[sizeX:
+                                                 sizeX + currentBlockSize]
+                eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
+
+                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
+                pp += np.dot(activeBlockVectorP, eigBlockVectorP)
+
+                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
+                app += np.dot(activeBlockVectorAP, eigBlockVectorP)
+            else:
+                eigBlockVectorX = eigBlockVector[:sizeX]
+                eigBlockVectorR = eigBlockVector[sizeX:]
+
+                pp = np.dot(activeBlockVectorR, eigBlockVectorR)
+                app = np.dot(activeBlockVectorAR, eigBlockVectorR)
+
+            blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
+            blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
+
+            blockVectorP, blockVectorAP = pp, app
+
+    if B is not None:
+        aux = blockVectorBX * _lambda[np.newaxis, :]
+    else:
+        aux = blockVectorX * _lambda[np.newaxis, :]
+
+    blockVectorR = blockVectorAX - aux
+
+    aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
+    residualNorms = np.sqrt(np.abs(aux))
+    # Use old lambda in case of early loop exit.
+    if retLambdaHistory:
+        lambdaHistory[iterationNumber + 1, :] = _lambda
+    if retResidualNormsHistory:
+        residualNormsHistory[iterationNumber + 1, :] = residualNorms
+    residualNorm = np.sum(np.abs(residualNorms)) / sizeX
+    if residualNorm < smallestResidualNorm:
+        smallestResidualNorm = residualNorm
+        bestIterationNumber = iterationNumber + 1
+        bestblockVectorX = blockVectorX
+
+    if np.max(np.abs(residualNorms)) > residualTolerance:
+        warnings.warn(
+            f"Exited at iteration {iterationNumber} with accuracies \n"
+            f"{residualNorms}\n"
+            f"not reaching the requested tolerance {residualTolerance}.\n"
+            f"Use iteration {bestIterationNumber} instead with accuracy \n"
+            f"{smallestResidualNorm}.\n",
+            UserWarning, stacklevel=2
+        )
+
+    if verbosityLevel:
+        print(f"Final iterative eigenvalue(s):\n{_lambda}")
+        print(f"Final iterative residual norm(s):\n{residualNorms}")
+
+    blockVectorX = bestblockVectorX
+    # Making eigenvectors "exactly" satisfy the blockVectorY constrains
+    if blockVectorY is not None:
+        _applyConstraints(blockVectorX,
+                          gramYBY,
+                          blockVectorBY,
+                          blockVectorY)
+
+    # Making eigenvectors "exactly" othonormalized by final "exact" RR
+    blockVectorAX = A(blockVectorX)
+    if blockVectorAX.shape != blockVectorX.shape:
+        raise ValueError(
+            f"The shape {blockVectorX.shape} "
+            f"of the postprocessing iterate not preserved\n"
+            f"and changed to {blockVectorAX.shape} "
+            f"after multiplying by the primary matrix.\n"
+        )
+    gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
+
+    blockVectorBX = blockVectorX
+    if B is not None:
+        blockVectorBX = B(blockVectorX)
+        if blockVectorBX.shape != blockVectorX.shape:
+            raise ValueError(
+                f"The shape {blockVectorX.shape} "
+                f"of the postprocessing iterate not preserved\n"
+                f"and changed to {blockVectorBX.shape} "
+                f"after multiplying by the secondary matrix.\n"
+            )
+
+    gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
+    _handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel)
+    gramXAX = (gramXAX + gramXAX.T.conj()) / 2
+    gramXBX = (gramXBX + gramXBX.T.conj()) / 2
+    try:
+        _lambda, eigBlockVector = eigh(gramXAX,
+                                       gramXBX,
+                                       check_finite=False)
+    except LinAlgError as e:
+        raise ValueError("eigh has failed in lobpcg postprocessing") from e
+
+    ii = _get_indx(_lambda, sizeX, largest)
+    _lambda = _lambda[ii]
+    eigBlockVector = np.asarray(eigBlockVector[:, ii])
+
+    blockVectorX = np.dot(blockVectorX, eigBlockVector)
+    blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
+
+    if B is not None:
+        blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
+        aux = blockVectorBX * _lambda[np.newaxis, :]
+    else:
+        aux = blockVectorX * _lambda[np.newaxis, :]
+
+    blockVectorR = blockVectorAX - aux
+
+    aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
+    residualNorms = np.sqrt(np.abs(aux))
+
+    if retLambdaHistory:
+        lambdaHistory[bestIterationNumber + 1, :] = _lambda
+    if retResidualNormsHistory:
+        residualNormsHistory[bestIterationNumber + 1, :] = residualNorms
+
+    if retLambdaHistory:
+        lambdaHistory = lambdaHistory[
+            : bestIterationNumber + 2, :]
+    if retResidualNormsHistory:
+        residualNormsHistory = residualNormsHistory[
+            : bestIterationNumber + 2, :]
+
+    if np.max(np.abs(residualNorms)) > residualTolerance:
+        warnings.warn(
+            f"Exited postprocessing with accuracies \n"
+            f"{residualNorms}\n"
+            f"not reaching the requested tolerance {residualTolerance}.",
+            UserWarning, stacklevel=2
+        )
+
+    if verbosityLevel:
+        print(f"Final postprocessing eigenvalue(s):\n{_lambda}")
+        print(f"Final residual norm(s):\n{residualNorms}")
+
+    if retLambdaHistory:
+        lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0])
+        lambdaHistory = [np.squeeze(i) for i in lambdaHistory]
+    if retResidualNormsHistory:
+        residualNormsHistory = np.vsplit(residualNormsHistory,
+                                         np.shape(residualNormsHistory)[0])
+        residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory]
+
+    if retLambdaHistory:
+        if retResidualNormsHistory:
+            return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
+        else:
+            return _lambda, blockVectorX, lambdaHistory
+    else:
+        if retResidualNormsHistory:
+            return _lambda, blockVectorX, residualNormsHistory
+        else:
+            return _lambda, blockVectorX
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py
new file mode 100644
index 00000000..1f5f9492
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py
@@ -0,0 +1,534 @@
+""" Test functions for the sparse.linalg._eigen.lobpcg module
+"""
+
+import itertools
+import platform
+import sys
+import pytest
+import numpy as np
+from numpy import ones, r_, diag
+from numpy.testing import (assert_almost_equal, assert_equal,
+                           assert_allclose, assert_array_less)
+
+from scipy.linalg import eig, eigh, toeplitz, orth
+from scipy.sparse import spdiags, diags, eye, csr_matrix
+from scipy.sparse.linalg import eigs, LinearOperator
+from scipy.sparse.linalg._eigen.lobpcg import lobpcg
+
+_IS_32BIT = (sys.maxsize < 2**32)
+
+
+def ElasticRod(n):
+    """Build the matrices for the generalized eigenvalue problem of the
+    fixed-free elastic rod vibration model.
+    """
+    L = 1.0
+    le = L/n
+    rho = 7.85e3
+    S = 1.e-4
+    E = 2.1e11
+    mass = rho*S*le/6.
+    k = E*S/le
+    A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
+    B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
+    return A, B
+
+
+def MikotaPair(n):
+    """Build a pair of full diagonal matrices for the generalized eigenvalue
+    problem. The Mikota pair acts as a nice test since the eigenvalues are the
+    squares of the integers n, n=1,2,...
+    """
+    x = np.arange(1, n+1)
+    B = diag(1./x)
+    y = np.arange(n-1, 0, -1)
+    z = np.arange(2*n-1, 0, -2)
+    A = diag(z)-diag(y, -1)-diag(y, 1)
+    return A, B
+
+
+def compare_solutions(A, B, m):
+    """Check eig vs. lobpcg consistency.
+    """
+    n = A.shape[0]
+    rnd = np.random.RandomState(0)
+    V = rnd.random((n, m))
+    X = orth(V)
+    eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False)
+    eigvals.sort()
+    w, _ = eig(A, b=B)
+    w.sort()
+    assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
+
+
+def test_Small():
+    A, B = ElasticRod(10)
+    with pytest.warns(UserWarning, match="The problem size"):
+        compare_solutions(A, B, 10)
+    A, B = MikotaPair(10)
+    with pytest.warns(UserWarning, match="The problem size"):
+        compare_solutions(A, B, 10)
+
+
+def test_ElasticRod():
+    A, B = ElasticRod(20)
+    with pytest.warns(UserWarning, match="Exited at iteration"):
+        compare_solutions(A, B, 2)
+
+
+def test_MikotaPair():
+    A, B = MikotaPair(20)
+    compare_solutions(A, B, 2)
+
+
+@pytest.mark.filterwarnings("ignore:Exited at iteration 0")
+@pytest.mark.filterwarnings("ignore:Exited postprocessing")
+def test_nonhermitian_warning(capsys):
+    """Check the warning of a Ritz matrix being not Hermitian
+    by feeding a non-Hermitian input matrix.
+    Also check stdout since verbosityLevel=1 and lack of stderr.
+    """
+    n = 10
+    X = np.arange(n * 2).reshape(n, 2).astype(np.float32)
+    A = np.arange(n * n).reshape(n, n).astype(np.float32)
+    with pytest.warns(UserWarning, match="Matrix gramA"):
+        _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
+    out, err = capsys.readouterr()  # Capture output
+    assert out.startswith("Solving standard eigenvalue")  # Test stdout
+    assert err == ''  # Test empty stderr
+    # Make the matrix symmetric and the UserWarning dissappears.
+    A += A.T
+    _, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
+    out, err = capsys.readouterr()  # Capture output
+    assert out.startswith("Solving standard eigenvalue")  # Test stdout
+    assert err == ''  # Test empty stderr
+
+
+def test_regression():
+    """Check the eigenvalue of the identity matrix is one.
+    """
+    # https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
+    n = 10
+    X = np.ones((n, 1))
+    A = np.identity(n)
+    w, _ = lobpcg(A, X)
+    assert_allclose(w, [1])
+
+
+@pytest.mark.filterwarnings("ignore:The problem size")
+@pytest.mark.parametrize('n, m, m_excluded', [(100, 4, 3), (4, 2, 0)])
+def test_diagonal(n, m, m_excluded):
+    """Test ``m - m_excluded`` eigenvalues and eigenvectors of
+    diagonal matrices of the size ``n`` varying matrix formats:
+    dense array, spare matrix, and ``LinearOperator`` for both
+    matrixes in the generalized eigenvalue problem ``Av = cBv``
+    and for the preconditioner.
+    """
+    rnd = np.random.RandomState(0)
+
+    # Define the generalized eigenvalue problem Av = cBv
+    # where (c, v) is a generalized eigenpair,
+    # A is the diagonal matrix whose entries are 1,...n,
+    # B is the identity matrix.
+    vals = np.arange(1, n+1, dtype=float)
+    A_s = diags([vals], [0], (n, n))
+    A_a = A_s.toarray()
+
+    def A_f(x):
+        return A_s @ x
+
+    A_lo = LinearOperator(matvec=A_f,
+                          matmat=A_f,
+                          shape=(n, n), dtype=float)
+
+    B_a = eye(n)
+    B_s = csr_matrix(B_a)
+
+    def B_f(x):
+        return B_a @ x
+
+    B_lo = LinearOperator(matvec=B_f,
+                          matmat=B_f,
+                          shape=(n, n), dtype=float)
+
+    # Let the preconditioner M be the inverse of A.
+    M_s = diags([1./vals], [0], (n, n))
+    M_a = M_s.toarray()
+
+    def M_f(x):
+        return M_s @ x
+
+    M_lo = LinearOperator(matvec=M_f,
+                          matmat=M_f,
+                          shape=(n, n), dtype=float)
+
+    # Pick random initial vectors.
+    X = rnd.normal(size=(n, m))
+
+    # Require that the returned eigenvectors be in the orthogonal complement
+    # of the first few standard basis vectors.
+    if m_excluded > 0:
+        Y = np.eye(n, m_excluded)
+    else:
+        Y = None
+
+    for A in [A_a, A_s, A_lo]:
+        for B in [B_a, B_s, B_lo]:
+            for M in [M_a, M_s, M_lo]:
+                eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y,
+                                       maxiter=40, largest=False)
+
+                assert_allclose(eigvals, np.arange(1+m_excluded,
+                                                   1+m_excluded+m))
+                _check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
+
+
+def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
+    """Check if the eigenvalue residual is small.
+    """
+    mult_wV = np.multiply(w, V)
+    dot_MV = M.dot(V)
+    assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
+
+
+def _check_fiedler(n, p):
+    """Check the Fiedler vector computation.
+    """
+    # This is not necessarily the recommended way to find the Fiedler vector.
+    col = np.zeros(n)
+    col[1] = 1
+    A = toeplitz(col)
+    D = np.diag(A.sum(axis=1))
+    L = D - A
+    # Compute the full eigendecomposition using tricks, e.g.
+    # http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
+    tmp = np.pi * np.arange(n) / n
+    analytic_w = 2 * (1 - np.cos(tmp))
+    analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
+    _check_eigen(L, analytic_w, analytic_V)
+    # Compute the full eigendecomposition using eigh.
+    eigh_w, eigh_V = eigh(L)
+    _check_eigen(L, eigh_w, eigh_V)
+    # Check that the first eigenvalue is near zero and that the rest agree.
+    assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
+    assert_allclose(eigh_w[1:], analytic_w[1:])
+
+    # Check small lobpcg eigenvalues.
+    X = analytic_V[:, :p]
+    lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
+    assert_equal(lobpcg_w.shape, (p,))
+    assert_equal(lobpcg_V.shape, (n, p))
+    _check_eigen(L, lobpcg_w, lobpcg_V)
+    assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
+    assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
+
+    # Check large lobpcg eigenvalues.
+    X = analytic_V[:, -p:]
+    lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
+    assert_equal(lobpcg_w.shape, (p,))
+    assert_equal(lobpcg_V.shape, (n, p))
+    _check_eigen(L, lobpcg_w, lobpcg_V)
+    assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
+
+    # Look for the Fiedler vector using good but not exactly correct guesses.
+    fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
+    X = np.vstack((np.ones(n), fiedler_guess)).T
+    lobpcg_w, _ = lobpcg(L, X, largest=False)
+    # Mathematically, the smaller eigenvalue should be zero
+    # and the larger should be the algebraic connectivity.
+    lobpcg_w = np.sort(lobpcg_w)
+    assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
+
+
+def test_fiedler_small_8():
+    """Check the dense workaround path for small matrices.
+    """
+    # This triggers the dense path because 8 < 2*5.
+    with pytest.warns(UserWarning, match="The problem size"):
+        _check_fiedler(8, 2)
+
+
+def test_fiedler_large_12():
+    """Check the dense workaround path avoided for non-small matrices.
+    """
+    # This does not trigger the dense path, because 2*5 <= 12.
+    _check_fiedler(12, 2)
+
+
+@pytest.mark.skipif(platform.machine() == 'aarch64',
+                    reason="issue #15935")
+def test_failure_to_run_iterations():
+    """Check that the code exists gracefully without breaking. Issue #10974.
+    """
+    rnd = np.random.RandomState(0)
+    X = rnd.standard_normal((100, 10))
+    A = X @ X.T
+    Q = rnd.standard_normal((X.shape[0], 4))
+    with pytest.warns(UserWarning, match="Failed at iteration"):
+        eigenvalues, _ = lobpcg(A, Q, maxiter=40, tol=1e-12)
+    assert(np.max(eigenvalues) > 0)
+
+
+def test_failure_to_run_iterations_nonsymmetric():
+    """Check that the code exists gracefully without breaking
+    if the matrix in not symmetric.
+    """
+    A = np.zeros((10, 10))
+    A[0, 1] = 1
+    Q = np.ones((10, 1))
+    with pytest.warns(UserWarning, match="Exited at iteration 2"):
+        eigenvalues, _ = lobpcg(A, Q, maxiter=20)
+    assert np.max(eigenvalues) > 0
+
+
+@pytest.mark.filterwarnings("ignore:The problem size")
+def test_hermitian():
+    """Check complex-value Hermitian cases.
+    """
+    rnd = np.random.RandomState(0)
+
+    sizes = [3, 10, 50]
+    ks = [1, 3, 10, 50]
+    gens = [True, False]
+
+    for s, k, gen in itertools.product(sizes, ks, gens):
+        if k > s:
+            continue
+
+        H = rnd.random((s, s)) + 1.j * rnd.random((s, s))
+        H = 10 * np.eye(s) + H + H.T.conj()
+
+        X = rnd.standard_normal((s, k))
+        X = X + 1.j * rnd.standard_normal((s, k))
+
+        if not gen:
+            B = np.eye(s)
+            w, v = lobpcg(H, X, maxiter=5000)
+            w0, _ = eigh(H)
+        else:
+            B = rnd.random((s, s)) + 1.j * rnd.random((s, s))
+            B = 10 * np.eye(s) + B.dot(B.T.conj())
+            w, v = lobpcg(H, X, B, maxiter=5000, largest=False)
+            w0, _ = eigh(H, B)
+
+        for wx, vx in zip(w, v.T):
+            # Check eigenvector
+            assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
+                            / np.linalg.norm(H.dot(vx)),
+                            0, atol=5e-4, rtol=0)
+
+            # Compare eigenvalues
+            j = np.argmin(abs(w0 - wx))
+            assert_allclose(wx, w0[j], rtol=1e-4)
+
+
+# The n=5 case tests the alternative small matrix code path that uses eigh().
+@pytest.mark.filterwarnings("ignore:The problem size")
+@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
+def test_eigs_consistency(n, atol):
+    """Check eigs vs. lobpcg consistency.
+    """
+    vals = np.arange(1, n+1, dtype=np.float64)
+    A = spdiags(vals, 0, n, n)
+    rnd = np.random.RandomState(0)
+    X = rnd.random((n, 2))
+    lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
+    vals, _ = eigs(A, k=2)
+
+    _check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
+    assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
+
+
+def test_verbosity():
+    """Check that nonzero verbosity level code runs.
+    """
+    rnd = np.random.RandomState(0)
+    X = rnd.standard_normal((10, 10))
+    A = X @ X.T
+    Q = rnd.standard_normal((X.shape[0], 1))
+    with pytest.warns(UserWarning, match="Exited at iteration"):
+        _, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9)
+
+
+@pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32',
+                   reason="tolerance violation on windows")
+@pytest.mark.xfail(platform.machine() == 'ppc64le',
+                   reason="fails on ppc64le")
+@pytest.mark.filterwarnings("ignore:Exited postprocessing")
+def test_tolerance_float32():
+    """Check lobpcg for attainable tolerance in float32.
+    """
+    rnd = np.random.RandomState(0)
+    n = 50
+    m = 3
+    vals = -np.arange(1, n + 1)
+    A = diags([vals], [0], (n, n))
+    A = A.astype(np.float32)
+    X = rnd.standard_normal((n, m))
+    X = X.astype(np.float32)
+    eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0)
+    assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5)
+
+
+def test_random_initial_float32():
+    """Check lobpcg in float32 for specific initial.
+    """
+    rnd = np.random.RandomState(0)
+    n = 50
+    m = 4
+    vals = -np.arange(1, n + 1)
+    A = diags([vals], [0], (n, n))
+    A = A.astype(np.float32)
+    X = rnd.random((n, m))
+    X = X.astype(np.float32)
+    eigvals, _ = lobpcg(A, X, tol=1e-3, maxiter=50, verbosityLevel=1)
+    assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-2)
+
+
+def test_maxit():
+    """Check lobpcg if maxit=maxiter runs maxiter iterations and
+    if maxit=None runs 20 iterations (the default)
+    by checking the size of the iteration history output, which should
+    be the number of iterations plus 3 (initial, final, and postprocessing)
+    typically when maxiter is small and the choice of the best is passive.
+    """
+    rnd = np.random.RandomState(0)
+    n = 50
+    m = 4
+    vals = -np.arange(1, n + 1)
+    A = diags([vals], [0], (n, n))
+    A = A.astype(np.float32)
+    X = rnd.standard_normal((n, m))
+    X = X.astype(np.float64)
+    for maxiter in range(1, 4):
+        with pytest.warns(UserWarning, match="Exited at iteration"):
+            _, _, l_h, r_h = lobpcg(A, X, tol=1e-8, maxiter=maxiter,
+                                    retLambdaHistory=True,
+                                    retResidualNormsHistory=True)
+        assert_allclose(np.shape(l_h)[0], maxiter+3)
+        assert_allclose(np.shape(r_h)[0], maxiter+3)
+    with pytest.warns(UserWarning, match="Exited at iteration"):
+        l, _, l_h, r_h = lobpcg(A, X, tol=1e-8,
+                                retLambdaHistory=True,
+                                retResidualNormsHistory=True)
+    assert_allclose(np.shape(l_h)[0], 20+3)
+    assert_allclose(np.shape(r_h)[0], 20+3)
+    # Check that eigenvalue output is the last one in history
+    assert_allclose(l, l_h[-1])
+    # Make sure that both history outputs are lists
+    assert isinstance(l_h, list)
+    assert isinstance(r_h, list)
+    # Make sure that both history lists are arrays-like
+    assert_allclose(np.shape(l_h), np.shape(np.asarray(l_h)))
+    assert_allclose(np.shape(r_h), np.shape(np.asarray(r_h)))
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize("n", [20])
+@pytest.mark.parametrize("m", [1, 3])
+@pytest.mark.filterwarnings("ignore:Exited at iteration")
+@pytest.mark.filterwarnings("ignore:Exited postprocessing")
+def test_diagonal_data_types(n, m):
+    """Check lobpcg for diagonal matrices for all matrix types.
+    """
+    rnd = np.random.RandomState(0)
+    # Define the generalized eigenvalue problem Av = cBv
+    # where (c, v) is a generalized eigenpair,
+    # and where we choose A  and B to be diagonal.
+    vals = np.arange(1, n + 1)
+
+    # list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
+    list_sparse_format = ['coo']
+    sparse_formats = len(list_sparse_format)
+    for s_f_i, s_f in enumerate(list_sparse_format):
+
+        As64 = diags([vals * vals], [0], (n, n), format=s_f)
+        As32 = As64.astype(np.float32)
+        Af64 = As64.toarray()
+        Af32 = Af64.astype(np.float32)
+
+        def As32f(x):
+            return As32 @ x
+        As32LO = LinearOperator(matvec=As32f,
+                                matmat=As32f,
+                                shape=(n, n),
+                                dtype=As32.dtype)
+
+        listA = [Af64, As64, Af32, As32, As32f, As32LO, lambda v: As32 @ v]
+
+        Bs64 = diags([vals], [0], (n, n), format=s_f)
+        Bf64 = Bs64.toarray()
+        Bs32 = Bs64.astype(np.float32)
+
+        def Bs32f(x):
+            return Bs32 @ x
+        Bs32LO = LinearOperator(matvec=Bs32f,
+                                matmat=Bs32f,
+                                shape=(n, n),
+                                dtype=Bs32.dtype)
+        listB = [Bf64, Bs64, Bs32, Bs32f, Bs32LO, lambda v: Bs32 @ v]
+
+        # Define the preconditioner function as LinearOperator.
+        Ms64 = diags([1./vals], [0], (n, n), format=s_f)
+
+        def Ms64precond(x):
+            return Ms64 @ x
+        Ms64precondLO = LinearOperator(matvec=Ms64precond,
+                                       matmat=Ms64precond,
+                                       shape=(n, n),
+                                       dtype=Ms64.dtype)
+        Mf64 = Ms64.toarray()
+
+        def Mf64precond(x):
+            return Mf64 @ x
+        Mf64precondLO = LinearOperator(matvec=Mf64precond,
+                                       matmat=Mf64precond,
+                                       shape=(n, n),
+                                       dtype=Mf64.dtype)
+        Ms32 = Ms64.astype(np.float32)
+
+        def Ms32precond(x):
+            return Ms32 @ x
+        Ms32precondLO = LinearOperator(matvec=Ms32precond,
+                                       matmat=Ms32precond,
+                                       shape=(n, n),
+                                       dtype=Ms32.dtype)
+        Mf32 = Ms32.toarray()
+
+        def Mf32precond(x):
+            return Mf32 @ x
+        Mf32precondLO = LinearOperator(matvec=Mf32precond,
+                                       matmat=Mf32precond,
+                                       shape=(n, n),
+                                       dtype=Mf32.dtype)
+        listM = [None, Ms64, Ms64precondLO, Mf64precondLO, Ms64precond,
+                 Ms32, Ms32precondLO, Mf32precondLO, Ms32precond]
+
+        # Setup matrix of the initial approximation to the eigenvectors
+        # (cannot be sparse array).
+        Xf64 = rnd.random((n, m))
+        Xf32 = Xf64.astype(np.float32)
+        listX = [Xf64, Xf32]
+
+        # Require that the returned eigenvectors be in the orthogonal complement
+        # of the first few standard basis vectors (cannot be sparse array).
+        m_excluded = 3
+        Yf64 = np.eye(n, m_excluded, dtype=float)
+        Yf32 = np.eye(n, m_excluded, dtype=np.float32)
+        listY = [Yf64, Yf32]
+
+        tests = list(itertools.product(listA, listB, listM, listX, listY))
+        # This is one of the slower tests because there are >1,000 configs
+        # to test here, instead of checking product of all input, output types
+        # test each configuration for the first sparse format, and then
+        # for one additional sparse format. this takes 2/7=30% as long as
+        # testing all configurations for all sparse formats.
+        if s_f_i > 0:
+            tests = tests[s_f_i - 1::sparse_formats-1]
+
+        for A, B, M, X, Y in tests:
+            eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
+                                maxiter=100, largest=False)
+            assert_allclose(eigvals,
+                            np.arange(1 + m_excluded, 1 + m_excluded + m),
+                            atol=1e-5)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/tests/test_svds.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/tests/test_svds.py
new file mode 100644
index 00000000..94816a3b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_eigen/tests/test_svds.py
@@ -0,0 +1,907 @@
+import os
+import re
+import copy
+import numpy as np
+
+from numpy.testing import assert_allclose, assert_equal, assert_array_equal
+import pytest
+
+from scipy.linalg import svd, null_space
+from scipy.sparse import csc_matrix, isspmatrix, spdiags, random
+from scipy.sparse.linalg import LinearOperator, aslinearoperator
+if os.environ.get("SCIPY_USE_PROPACK"):
+    has_propack = True
+else:
+    has_propack = False
+from scipy.sparse.linalg import svds
+from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence
+
+
+# --- Helper Functions / Classes ---
+
+
+def sorted_svd(m, k, which='LM'):
+    # Compute svd of a dense matrix m, and return singular vectors/values
+    # sorted.
+    if isspmatrix(m):
+        m = m.toarray()
+    u, s, vh = svd(m)
+    if which == 'LM':
+        ii = np.argsort(s)[-k:]
+    elif which == 'SM':
+        ii = np.argsort(s)[:k]
+    else:
+        raise ValueError("unknown which=%r" % (which,))
+
+    return u[:, ii], s[ii], vh[ii]
+
+
+def svd_estimate(u, s, vh):
+    return np.dot(u, np.dot(np.diag(s), vh))
+
+
+def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False,
+                check_svd=True, atol=1e-10, rtol=1e-7):
+    n, m = A.shape
+
+    # Check shapes.
+    assert_equal(u.shape, (n, k))
+    assert_equal(s.shape, (k,))
+    assert_equal(vh.shape, (k, m))
+
+    # Check that the original matrix can be reconstituted.
+    A_rebuilt = (u*s).dot(vh)
+    assert_equal(A_rebuilt.shape, A.shape)
+    if check_usvh_A:
+        assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol)
+
+    # Check that u is a semi-orthogonal matrix.
+    uh_u = np.dot(u.T.conj(), u)
+    assert_equal(uh_u.shape, (k, k))
+    assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol)
+
+    # Check that vh is a semi-orthogonal matrix.
+    vh_v = np.dot(vh, vh.T.conj())
+    assert_equal(vh_v.shape, (k, k))
+    assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol)
+
+    # Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
+    if check_svd:
+        u2, s2, vh2 = sorted_svd(A, k, which)
+        assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol)
+        assert_allclose(s, s2, atol=atol, rtol=rtol)
+        assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol)
+
+
+def _check_svds_n(A, k, u, s, vh, which="LM", check_res=True,
+                  check_svd=True, atol=1e-10, rtol=1e-7):
+    n, m = A.shape
+
+    # Check shapes.
+    assert_equal(u.shape, (n, k))
+    assert_equal(s.shape, (k,))
+    assert_equal(vh.shape, (k, m))
+
+    # Check that u is a semi-orthogonal matrix.
+    uh_u = np.dot(u.T.conj(), u)
+    assert_equal(uh_u.shape, (k, k))
+    error = np.sum(np.abs(uh_u - np.identity(k))) / (k * k)
+    assert_allclose(error, 0.0, atol=atol, rtol=rtol)
+
+    # Check that vh is a semi-orthogonal matrix.
+    vh_v = np.dot(vh, vh.T.conj())
+    assert_equal(vh_v.shape, (k, k))
+    error = np.sum(np.abs(vh_v - np.identity(k))) / (k * k)
+    assert_allclose(error, 0.0, atol=atol, rtol=rtol)
+
+    # Check residuals
+    if check_res:
+        ru = A.T.conj() @ u - vh.T.conj() * s
+        rus = np.sum(np.abs(ru)) / (n * k)
+        rvh = A @ vh.T.conj() - u * s
+        rvhs = np.sum(np.abs(rvh)) / (m * k)
+        assert_allclose(rus, 0.0, atol=atol, rtol=rtol)
+        assert_allclose(rvhs, 0.0, atol=atol, rtol=rtol)
+
+    # Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
+    if check_svd:
+        u2, s2, vh2 = sorted_svd(A, k, which)
+        assert_allclose(s, s2, atol=atol, rtol=rtol)
+        A_rebuilt_svd = (u2*s2).dot(vh2)
+        A_rebuilt = (u*s).dot(vh)
+        assert_equal(A_rebuilt.shape, A.shape)
+        error = np.sum(np.abs(A_rebuilt_svd - A_rebuilt)) / (k * k)
+        assert_allclose(error, 0.0, atol=atol, rtol=rtol)
+
+
+class CheckingLinearOperator(LinearOperator):
+    def __init__(self, A):
+        self.A = A
+        self.dtype = A.dtype
+        self.shape = A.shape
+
+    def _matvec(self, x):
+        assert_equal(max(x.shape), np.size(x))
+        return self.A.dot(x)
+
+    def _rmatvec(self, x):
+        assert_equal(max(x.shape), np.size(x))
+        return self.A.T.conjugate().dot(x)
+
+
+# --- Test Input Validation ---
+# Tests input validation on parameters `k` and `which`.
+# Needs better input validation checks for all other parameters.
+
+class SVDSCommonTests:
+
+    solver = None
+
+    # some of these IV tests could run only once, say with solver=None
+
+    _A_empty_msg = "`A` must not be empty."
+    _A_dtype_msg = "`A` must be of floating or complex floating data type"
+    _A_type_msg = "type not understood"
+    _A_ndim_msg = "array must have ndim <= 2"
+    _A_validation_inputs = [
+        (np.asarray([[]]), ValueError, _A_empty_msg),
+        (np.asarray([[1, 2], [3, 4]]), ValueError, _A_dtype_msg),
+        ("hi", TypeError, _A_type_msg),
+        (np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)]
+
+    @pytest.mark.parametrize("args", _A_validation_inputs)
+    def test_svds_input_validation_A(self, args):
+        A, error_type, message = args
+        with pytest.raises(error_type, match=message):
+            svds(A, k=1, solver=self.solver)
+
+    @pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"])
+    def test_svds_input_validation_k_1(self, k):
+        rng = np.random.default_rng(0)
+        A = rng.random((4, 3))
+
+        # propack can do complete SVD
+        if self.solver == 'propack' and k == 3:
+            if not has_propack:
+                pytest.skip("PROPACK not enabled")
+            res = svds(A, k=k, solver=self.solver)
+            _check_svds(A, k, *res, check_usvh_A=True, check_svd=True)
+            return
+
+        message = ("`k` must be an integer satisfying")
+        with pytest.raises(ValueError, match=message):
+            svds(A, k=k, solver=self.solver)
+
+    def test_svds_input_validation_k_2(self):
+        # I think the stack trace is reasonable when `k` can't be converted
+        # to an int.
+        message = "int() argument must be a"
+        with pytest.raises(TypeError, match=re.escape(message)):
+            svds(np.eye(10), k=[], solver=self.solver)
+
+        message = "invalid literal for int()"
+        with pytest.raises(ValueError, match=message):
+            svds(np.eye(10), k="hi", solver=self.solver)
+
+    @pytest.mark.parametrize("tol", (-1, np.inf, np.nan))
+    def test_svds_input_validation_tol_1(self, tol):
+        message = "`tol` must be a non-negative floating point value."
+        with pytest.raises(ValueError, match=message):
+            svds(np.eye(10), tol=tol, solver=self.solver)
+
+    @pytest.mark.parametrize("tol", ([], 'hi'))
+    def test_svds_input_validation_tol_2(self, tol):
+        # I think the stack trace is reasonable here
+        message = "'<' not supported between instances"
+        with pytest.raises(TypeError, match=message):
+            svds(np.eye(10), tol=tol, solver=self.solver)
+
+    @pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0))
+    def test_svds_input_validation_which(self, which):
+        # Regression test for a github issue.
+        # https://github.com/scipy/scipy/issues/4590
+        # Function was not checking for eigenvalue type and unintended
+        # values could be returned.
+        with pytest.raises(ValueError, match="`which` must be in"):
+            svds(np.eye(10), which=which, solver=self.solver)
+
+    @pytest.mark.parametrize("transpose", (True, False))
+    @pytest.mark.parametrize("n", range(4, 9))
+    def test_svds_input_validation_v0_1(self, transpose, n):
+        rng = np.random.default_rng(0)
+        A = rng.random((5, 7))
+        v0 = rng.random(n)
+        if transpose:
+            A = A.T
+        k = 2
+        message = "`v0` must have shape"
+
+        required_length = (A.shape[0] if self.solver == 'propack'
+                           else min(A.shape))
+        if n != required_length:
+            with pytest.raises(ValueError, match=message):
+                svds(A, k=k, v0=v0, solver=self.solver)
+
+    def test_svds_input_validation_v0_2(self):
+        A = np.ones((10, 10))
+        v0 = np.ones((1, 10))
+        message = "`v0` must have shape"
+        with pytest.raises(ValueError, match=message):
+            svds(A, k=1, v0=v0, solver=self.solver)
+
+    @pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int)))
+    def test_svds_input_validation_v0_3(self, v0):
+        A = np.ones((10, 10))
+        message = "`v0` must be of floating or complex floating data type."
+        with pytest.raises(ValueError, match=message):
+            svds(A, k=1, v0=v0, solver=self.solver)
+
+    @pytest.mark.parametrize("maxiter", (-1, 0, 5.5))
+    def test_svds_input_validation_maxiter_1(self, maxiter):
+        message = ("`maxiter` must be a positive integer.")
+        with pytest.raises(ValueError, match=message):
+            svds(np.eye(10), maxiter=maxiter, solver=self.solver)
+
+    def test_svds_input_validation_maxiter_2(self):
+        # I think the stack trace is reasonable when `k` can't be converted
+        # to an int.
+        message = "int() argument must be a"
+        with pytest.raises(TypeError, match=re.escape(message)):
+            svds(np.eye(10), maxiter=[], solver=self.solver)
+
+        message = "invalid literal for int()"
+        with pytest.raises(ValueError, match=message):
+            svds(np.eye(10), maxiter="hi", solver=self.solver)
+
+    @pytest.mark.parametrize("rsv", ('ekki', 10))
+    def test_svds_input_validation_return_singular_vectors(self, rsv):
+        message = "`return_singular_vectors` must be in"
+        with pytest.raises(ValueError, match=message):
+            svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver)
+
+    # --- Test Parameters ---
+
+    @pytest.mark.parametrize("k", [3, 5])
+    @pytest.mark.parametrize("which", ["LM", "SM"])
+    def test_svds_parameter_k_which(self, k, which):
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+        # check that the `k` parameter sets the number of eigenvalues/
+        # eigenvectors returned.
+        # Also check that the `which` parameter sets whether the largest or
+        # smallest eigenvalues are returned
+        rng = np.random.default_rng(0)
+        A = rng.random((10, 10))
+        if self.solver == 'lobpcg':
+            with pytest.warns(UserWarning, match="The problem size"):
+                res = svds(A, k=k, which=which, solver=self.solver,
+                           random_state=0)
+        else:
+            res = svds(A, k=k, which=which, solver=self.solver,
+                       random_state=0)
+        _check_svds(A, k, *res, which=which, atol=8e-10)
+
+    # loop instead of parametrize for simplicity
+    def test_svds_parameter_tol(self):
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+        return  # TODO: needs work, disabling for now
+        # check the effect of the `tol` parameter on solver accuracy by solving
+        # the same problem with varying `tol` and comparing the eigenvalues
+        # against ground truth computed
+        n = 100  # matrix size
+        k = 3    # number of eigenvalues to check
+
+        # generate a random, sparse-ish matrix
+        # effect isn't apparent for matrices that are too small
+        rng = np.random.default_rng(0)
+        A = rng.random((n, n))
+        A[A > .1] = 0
+        A = A @ A.T
+
+        _, s, _ = svd(A)  # calculate ground truth
+
+        # calculate the error as a function of `tol`
+        A = csc_matrix(A)
+
+        def err(tol):
+            if self.solver == 'lobpcg' and tol == 1e-4:
+                with pytest.warns(UserWarning, match="Exited at iteration"):
+                    _, s2, _ = svds(A, k=k, v0=np.ones(n),
+                                    solver=self.solver, tol=tol)
+            else:
+                _, s2, _ = svds(A, k=k, v0=np.ones(n),
+                                solver=self.solver, tol=tol)
+            return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1])
+
+        tols = [1e-4, 1e-2, 1e0]  # tolerance levels to check
+        # for 'arpack' and 'propack', accuracies make discrete steps
+        accuracies = {'propack': [1e-12, 1e-6, 1e-4],
+                      'arpack': [2e-15, 1e-10, 1e-10],
+                      'lobpcg': [1e-11, 1e-3, 10]}
+
+        for tol, accuracy in zip(tols, accuracies[self.solver]):
+            error = err(tol)
+            assert error < accuracy
+            assert error > accuracy/10
+
+    def test_svd_v0(self):
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+        # check that the `v0` parameter affects the solution
+        n = 100
+        k = 1
+        # If k != 1, LOBPCG needs more initial vectors, which are generated
+        # with random_state, so it does not pass w/ k >= 2.
+        # For some other values of `n`, the AssertionErrors are not raised
+        # with different v0s, which is reasonable.
+
+        rng = np.random.default_rng(0)
+        A = rng.random((n, n))
+
+        # with the same v0, solutions are the same, and they are accurate
+        # v0 takes precedence over random_state
+        v0a = rng.random(n)
+        res1a = svds(A, k, v0=v0a, solver=self.solver, random_state=0)
+        res2a = svds(A, k, v0=v0a, solver=self.solver, random_state=1)
+        assert_equal(res1a, res2a)
+        _check_svds(A, k, *res1a)
+
+        # with the same v0, solutions are the same, and they are accurate
+        v0b = rng.random(n)
+        res1b = svds(A, k, v0=v0b, solver=self.solver, random_state=2)
+        res2b = svds(A, k, v0=v0b, solver=self.solver, random_state=3)
+        assert_equal(res1b, res2b)
+        _check_svds(A, k, *res1b)
+
+        # with different v0, solutions can be numerically different
+        message = "Arrays are not equal"
+        with pytest.raises(AssertionError, match=message):
+            assert_equal(res1a, res1b)
+
+    def test_svd_random_state(self):
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+        # check that the `random_state` parameter affects the solution
+        # Admittedly, `n` and `k` are chosen so that all solver pass all
+        # these checks. That's a tall order, since LOBPCG doesn't want to
+        # achieve the desired accuracy and ARPACK often returns the same
+        # singular values/vectors for different v0.
+        n = 100
+        k = 1
+
+        rng = np.random.default_rng(0)
+        A = rng.random((n, n))
+
+        # with the same random_state, solutions are the same and accurate
+        res1a = svds(A, k, solver=self.solver, random_state=0)
+        res2a = svds(A, k, solver=self.solver, random_state=0)
+        assert_equal(res1a, res2a)
+        _check_svds(A, k, *res1a)
+
+        # with the same random_state, solutions are the same and accurate
+        res1b = svds(A, k, solver=self.solver, random_state=1)
+        res2b = svds(A, k, solver=self.solver, random_state=1)
+        assert_equal(res1b, res2b)
+        _check_svds(A, k, *res1b)
+
+        # with different random_state, solutions can be numerically different
+        message = "Arrays are not equal"
+        with pytest.raises(AssertionError, match=message):
+            assert_equal(res1a, res1b)
+
+    @pytest.mark.parametrize("random_state", (0, 1,
+                                              np.random.RandomState(0),
+                                              np.random.default_rng(0)))
+    def test_svd_random_state_2(self, random_state):
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+
+        n = 100
+        k = 1
+
+        rng = np.random.default_rng(0)
+        A = rng.random((n, n))
+
+        random_state_2 = copy.deepcopy(random_state)
+
+        # with the same random_state, solutions are the same and accurate
+        res1a = svds(A, k, solver=self.solver, random_state=random_state)
+        res2a = svds(A, k, solver=self.solver, random_state=random_state_2)
+        assert_equal(res1a, res2a)
+        _check_svds(A, k, *res1a)
+
+    @pytest.mark.parametrize("random_state", (None,
+                                              np.random.RandomState(0),
+                                              np.random.default_rng(0)))
+    def test_svd_random_state_3(self, random_state):
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+
+        n = 100
+        k = 5
+
+        rng = np.random.default_rng(0)
+        A = rng.random((n, n))
+
+        # random_state in different state produces accurate - but not
+        # not necessarily identical - results
+        res1a = svds(A, k, solver=self.solver, random_state=random_state)
+        res2a = svds(A, k, solver=self.solver, random_state=random_state)
+        _check_svds(A, k, *res1a, atol=2e-10, rtol=1e-6)
+        _check_svds(A, k, *res2a, atol=2e-10, rtol=1e-6)
+
+        message = "Arrays are not equal"
+        with pytest.raises(AssertionError, match=message):
+            assert_equal(res1a, res2a)
+
+    @pytest.mark.filterwarnings("ignore:Exited postprocessing")
+    def test_svd_maxiter(self):
+        # check that maxiter works as expected: should not return accurate
+        # solution after 1 iteration, but should with default `maxiter`
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+        A = np.diag(np.arange(9)).astype(np.float64)
+        k = 1
+        u, s, vh = sorted_svd(A, k)
+
+        if self.solver == 'arpack':
+            message = "ARPACK error -1: No convergence"
+            with pytest.raises(ArpackNoConvergence, match=message):
+                svds(A, k, ncv=3, maxiter=1, solver=self.solver)
+        elif self.solver == 'lobpcg':
+            with pytest.warns(UserWarning, match="Exited at iteration"):
+                svds(A, k, maxiter=1, solver=self.solver)
+        elif self.solver == 'propack':
+            message = "k=1 singular triplets did not converge within"
+            with pytest.raises(np.linalg.LinAlgError, match=message):
+                svds(A, k, maxiter=1, solver=self.solver)
+
+        ud, sd, vhd = svds(A, k, solver=self.solver)  # default maxiter
+        _check_svds(A, k, ud, sd, vhd, atol=1e-8)
+        assert_allclose(np.abs(ud), np.abs(u), atol=1e-8)
+        assert_allclose(np.abs(vhd), np.abs(vh), atol=1e-8)
+        assert_allclose(np.abs(sd), np.abs(s), atol=1e-9)
+
+    @pytest.mark.parametrize("rsv", (True, False, 'u', 'vh'))
+    @pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5)))
+    def test_svd_return_singular_vectors(self, rsv, shape):
+        # check that the return_singular_vectors parameter works as expected
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+
+        rng = np.random.default_rng(0)
+        A = rng.random(shape)
+        k = 2
+        M, N = shape
+        u, s, vh = sorted_svd(A, k)
+
+        respect_u = True if self.solver == 'propack' else M <= N
+        respect_vh = True if self.solver == 'propack' else M > N
+
+        if self.solver == 'lobpcg':
+            with pytest.warns(UserWarning, match="The problem size"):
+                if rsv is False:
+                    s2 = svds(A, k, return_singular_vectors=rsv,
+                              solver=self.solver, random_state=rng)
+                    assert_allclose(s2, s)
+                elif rsv == 'u' and respect_u:
+                    u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
+                                       solver=self.solver, random_state=rng)
+                    assert_allclose(np.abs(u2), np.abs(u))
+                    assert_allclose(s2, s)
+                    assert vh2 is None
+                elif rsv == 'vh' and respect_vh:
+                    u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
+                                       solver=self.solver, random_state=rng)
+                    assert u2 is None
+                    assert_allclose(s2, s)
+                    assert_allclose(np.abs(vh2), np.abs(vh))
+                else:
+                    u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
+                                       solver=self.solver, random_state=rng)
+                    if u2 is not None:
+                        assert_allclose(np.abs(u2), np.abs(u))
+                    assert_allclose(s2, s)
+                    if vh2 is not None:
+                        assert_allclose(np.abs(vh2), np.abs(vh))
+        else:
+            if rsv is False:
+                s2 = svds(A, k, return_singular_vectors=rsv,
+                          solver=self.solver, random_state=rng)
+                assert_allclose(s2, s)
+            elif rsv == 'u' and respect_u:
+                u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
+                                   solver=self.solver, random_state=rng)
+                assert_allclose(np.abs(u2), np.abs(u))
+                assert_allclose(s2, s)
+                assert vh2 is None
+            elif rsv == 'vh' and respect_vh:
+                u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
+                                   solver=self.solver, random_state=rng)
+                assert u2 is None
+                assert_allclose(s2, s)
+                assert_allclose(np.abs(vh2), np.abs(vh))
+            else:
+                u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
+                                   solver=self.solver, random_state=rng)
+                if u2 is not None:
+                    assert_allclose(np.abs(u2), np.abs(u))
+                assert_allclose(s2, s)
+                if vh2 is not None:
+                    assert_allclose(np.abs(vh2), np.abs(vh))
+
+    # --- Test Basic Functionality ---
+    # Tests the accuracy of each solver for real and complex matrices provided
+    # as list, dense array, sparse matrix, and LinearOperator.
+
+    A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]]
+    A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]]
+
+    @pytest.mark.filterwarnings("ignore:k >= N - 1",
+                                reason="needed to demonstrate #16725")
+    @pytest.mark.parametrize('A', (A1, A2))
+    @pytest.mark.parametrize('k', range(1, 5))
+    # PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM"))
+    @pytest.mark.parametrize('real', (True, False))
+    @pytest.mark.parametrize('transpose', (False, True))
+    # In gh-14299, it was suggested the `svds` should _not_ work with lists
+    @pytest.mark.parametrize('lo_type', (np.asarray, csc_matrix,
+                                         aslinearoperator))
+    def test_svd_simple(self, A, k, real, transpose, lo_type):
+
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+
+        A = np.asarray(A)
+        A = np.real(A) if real else A
+        A = A.T if transpose else A
+        A2 = lo_type(A)
+
+        # could check for the appropriate errors, but that is tested above
+        if k > min(A.shape):
+            pytest.skip("`k` cannot be greater than `min(A.shape)`")
+        if self.solver != 'propack' and k >= min(A.shape):
+            pytest.skip("Only PROPACK supports complete SVD")
+        if self.solver == 'arpack' and not real and k == min(A.shape) - 1:
+            pytest.skip("#16725")
+
+        if self.solver == 'propack' and (np.intp(0).itemsize < 8 and not real):
+            pytest.skip('PROPACK complex-valued SVD methods not available '
+                        'for 32-bit builds')
+
+        if self.solver == 'lobpcg':
+            with pytest.warns(UserWarning, match="The problem size"):
+                u, s, vh = svds(A2, k, solver=self.solver)
+        else:
+            u, s, vh = svds(A2, k, solver=self.solver)
+        _check_svds(A, k, u, s, vh, atol=3e-10)
+
+    def test_svd_linop(self):
+        solver = self.solver
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not available")
+
+        nmks = [(6, 7, 3),
+                (9, 5, 4),
+                (10, 8, 5)]
+
+        def reorder(args):
+            U, s, VH = args
+            j = np.argsort(s)
+            return U[:, j], s[j], VH[j, :]
+
+        for n, m, k in nmks:
+            # Test svds on a LinearOperator.
+            A = np.random.RandomState(52).randn(n, m)
+            L = CheckingLinearOperator(A)
+
+            if solver == 'propack':
+                v0 = np.ones(n)
+            else:
+                v0 = np.ones(min(A.shape))
+            if solver == 'lobpcg':
+                with pytest.warns(UserWarning, match="The problem size"):
+                    U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver))
+                    U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver))
+            else:
+                U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver))
+                U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver))
+
+            assert_allclose(np.abs(U1), np.abs(U2))
+            assert_allclose(s1, s2)
+            assert_allclose(np.abs(VH1), np.abs(VH2))
+            assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
+                            np.dot(U2, np.dot(np.diag(s2), VH2)))
+
+            # Try again with which="SM".
+            A = np.random.RandomState(1909).randn(n, m)
+            L = CheckingLinearOperator(A)
+
+            # TODO: arpack crashes when v0=v0, which="SM"
+            kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {}
+            if self.solver == 'lobpcg':
+                with pytest.warns(UserWarning, match="The problem size"):
+                    U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
+                                               **kwargs))
+                    U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
+                                               **kwargs))
+            else:
+                U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
+                                           **kwargs))
+                U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
+                                           **kwargs))
+
+            assert_allclose(np.abs(U1), np.abs(U2))
+            assert_allclose(s1 + 1, s2 + 1)
+            assert_allclose(np.abs(VH1), np.abs(VH2))
+            assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
+                            np.dot(U2, np.dot(np.diag(s2), VH2)))
+
+            if k < min(n, m) - 1:
+                # Complex input and explicit which="LM".
+                for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]:
+                    if self.solver == 'propack' and np.intp(0).itemsize < 8:
+                        pytest.skip('PROPACK complex-valued SVD methods '
+                                    'not available for 32-bit builds')
+                    rng = np.random.RandomState(1648)
+                    A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
+                    L = CheckingLinearOperator(A)
+
+                    if self.solver == 'lobpcg':
+                        with pytest.warns(UserWarning,
+                                          match="The problem size"):
+                            U1, s1, VH1 = reorder(svds(A, k, which="LM",
+                                                       solver=solver))
+                            U2, s2, VH2 = reorder(svds(L, k, which="LM",
+                                                       solver=solver))
+                    else:
+                        U1, s1, VH1 = reorder(svds(A, k, which="LM",
+                                                   solver=solver))
+                        U2, s2, VH2 = reorder(svds(L, k, which="LM",
+                                                   solver=solver))
+
+                    assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
+                    assert_allclose(s1, s2, rtol=eps)
+                    assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
+                    assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
+                                    np.dot(U2, np.dot(np.diag(s2), VH2)),
+                                    rtol=eps)
+
+    SHAPES = ((100, 100), (100, 101), (101, 100))
+
+    @pytest.mark.filterwarnings("ignore:Exited at iteration")
+    @pytest.mark.filterwarnings("ignore:Exited postprocessing")
+    @pytest.mark.parametrize("shape", SHAPES)
+    # ARPACK supports only dtype float, complex, or np.float32
+    @pytest.mark.parametrize("dtype", (float, complex, np.float32))
+    def test_small_sigma_sparse(self, shape, dtype):
+        # https://github.com/scipy/scipy/pull/11829
+        solver = self.solver
+        # 2do: PROPACK fails orthogonality of singular vectors
+        # if dtype == complex and self.solver == 'propack':
+        #    pytest.skip("PROPACK unsupported for complex dtype")
+        if solver == 'propack':
+            pytest.skip("PROPACK failures unrelated to PR")
+        rng = np.random.default_rng(0)
+        k = 5
+        (m, n) = shape
+        S = random(m, n, density=0.1, random_state=rng)
+        if dtype == complex:
+            S = + 1j * random(m, n, density=0.1, random_state=rng)
+        e = np.ones(m)
+        e[0:5] *= 1e1 ** np.arange(-5, 0, 1)
+        S = spdiags(e, 0, m, m) @ S
+        S = S.astype(dtype)
+        u, s, vh = svds(S, k, which='SM', solver=solver, maxiter=1000)
+        c_svd = False  # partial SVD can be different from full SVD
+        _check_svds_n(S, k, u, s, vh, which="SM", check_svd=c_svd, atol=1e-1)
+
+    # --- Test Edge Cases ---
+    # Checks a few edge cases.
+
+    @pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6)))
+    @pytest.mark.parametrize("dtype", (float, complex))
+    def test_svd_LM_ones_matrix(self, shape, dtype):
+        # Check that svds can deal with matrix_rank less than k in LM mode.
+        k = 3
+        n, m = shape
+        A = np.ones((n, m), dtype=dtype)
+
+        if self.solver == 'lobpcg':
+            with pytest.warns(UserWarning, match="The problem size"):
+                U, s, VH = svds(A, k, solver=self.solver)
+        else:
+            U, s, VH = svds(A, k, solver=self.solver)
+
+        _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
+
+        # Check that the largest singular value is near sqrt(n*m)
+        # and the other singular values have been forced to zero.
+        assert_allclose(np.max(s), np.sqrt(n*m))
+        s = np.array(sorted(s)[:-1]) + 1
+        z = np.ones_like(s)
+        assert_allclose(s, z)
+
+    @pytest.mark.filterwarnings("ignore:k >= N - 1",
+                                reason="needed to demonstrate #16725")
+    @pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2)))
+    @pytest.mark.parametrize("dtype", (float, complex))
+    def test_zero_matrix(self, shape, dtype):
+        # Check that svds can deal with matrices containing only zeros;
+        # see https://github.com/scipy/scipy/issues/3452/
+        # shape = (4, 2) is included because it is the particular case
+        # reported in the issue
+        k = 1
+        n, m = shape
+        A = np.zeros((n, m), dtype=dtype)
+
+        if (self.solver == 'arpack' and dtype is complex
+                and k == min(A.shape) - 1):
+            pytest.skip("#16725")
+
+        if self.solver == 'propack':
+            pytest.skip("PROPACK failures unrelated to PR #16712")
+
+        if self.solver == 'lobpcg':
+            with pytest.warns(UserWarning, match="The problem size"):
+                U, s, VH = svds(A, k, solver=self.solver)
+        else:
+            U, s, VH = svds(A, k, solver=self.solver)
+
+        # Check some generic properties of svd.
+        _check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
+
+        # Check that the singular values are zero.
+        assert_array_equal(s, 0)
+
+    @pytest.mark.parametrize("shape", ((20, 20), (20, 21), (21, 20)))
+    # ARPACK supports only dtype float, complex, or np.float32
+    @pytest.mark.parametrize("dtype", (float, complex, np.float32))
+    def test_small_sigma(self, shape, dtype):
+        if not has_propack:
+            pytest.skip("PROPACK not enabled")
+        # https://github.com/scipy/scipy/pull/11829
+        if dtype == complex and self.solver == 'propack':
+            pytest.skip("PROPACK unsupported for complex dtype")
+        rng = np.random.default_rng(179847540)
+        A = rng.random(shape).astype(dtype)
+        u, _, vh = svd(A, full_matrices=False)
+        if dtype == np.float32:
+            e = 10.0
+        else:
+            e = 100.0
+        t = e**(-np.arange(len(vh))).astype(dtype)
+        A = (u*t).dot(vh)
+        k = 4
+        u, s, vh = svds(A, k, solver=self.solver, maxiter=100)
+        t = np.sum(s > 0)
+        assert_equal(t, k)
+        # LOBPCG needs larger atol and rtol to pass
+        _check_svds_n(A, k, u, s, vh, atol=1e-3, rtol=1e0, check_svd=False)
+
+    # ARPACK supports only dtype float, complex, or np.float32
+    @pytest.mark.filterwarnings("ignore:The problem size")
+    @pytest.mark.parametrize("dtype", (float, complex, np.float32))
+    def test_small_sigma2(self, dtype):
+        if self.solver == 'propack':
+            if not has_propack:
+                pytest.skip("PROPACK not enabled")
+            elif dtype == np.float32:
+                pytest.skip("Test failures in CI, see gh-17004")
+            elif dtype == complex:
+                # https://github.com/scipy/scipy/issues/11406
+                pytest.skip("PROPACK unsupported for complex dtype")
+
+        rng = np.random.default_rng(179847540)
+        # create a 10x10 singular matrix with a 4-dim null space
+        dim = 4
+        size = 10
+        x = rng.random((size, size-dim))
+        y = x[:, :dim] * rng.random(dim)
+        mat = np.hstack((x, y))
+        mat = mat.astype(dtype)
+
+        nz = null_space(mat)
+        assert_equal(nz.shape[1], dim)
+
+        # Tolerances atol and rtol adjusted to pass np.float32
+        # Use non-sparse svd
+        u, s, vh = svd(mat)
+        # Singular values are 0:
+        assert_allclose(s[-dim:], 0, atol=1e-6, rtol=1e0)
+        # Smallest right singular vectors in null space:
+        assert_allclose(mat @ vh[-dim:, :].T, 0, atol=1e-6, rtol=1e0)
+
+        # Smallest singular values should be 0
+        sp_mat = csc_matrix(mat)
+        su, ss, svh = svds(sp_mat, k=dim, which='SM', solver=self.solver)
+        # Smallest dim singular values are 0:
+        assert_allclose(ss, 0, atol=1e-5, rtol=1e0)
+        # Smallest singular vectors via svds in null space:
+        n, m = mat.shape
+        if n < m:  # else the assert fails with some libraries unclear why
+            assert_allclose(sp_mat.transpose() @ su, 0, atol=1e-5, rtol=1e0)
+        assert_allclose(sp_mat @ svh.T, 0, atol=1e-5, rtol=1e0)
+
+# --- Perform tests with each solver ---
+
+
+class Test_SVDS_once():
+    @pytest.mark.parametrize("solver", ['ekki', object])
+    def test_svds_input_validation_solver(self, solver):
+        message = "solver must be one of"
+        with pytest.raises(ValueError, match=message):
+            svds(np.ones((3, 4)), k=2, solver=solver)
+
+
+class Test_SVDS_ARPACK(SVDSCommonTests):
+
+    def setup_method(self):
+        self.solver = 'arpack'
+
+    @pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"])
+    def test_svds_input_validation_ncv_1(self, ncv):
+        rng = np.random.default_rng(0)
+        A = rng.random((6, 7))
+        k = 3
+        if ncv in {4, 5}:
+            u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver)
+        # partial decomposition, so don't check that u@diag(s)@vh=A;
+        # do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
+            _check_svds(A, k, u, s, vh)
+        else:
+            message = ("`ncv` must be an integer satisfying")
+            with pytest.raises(ValueError, match=message):
+                svds(A, k=k, ncv=ncv, solver=self.solver)
+
+    def test_svds_input_validation_ncv_2(self):
+        # I think the stack trace is reasonable when `ncv` can't be converted
+        # to an int.
+        message = "int() argument must be a"
+        with pytest.raises(TypeError, match=re.escape(message)):
+            svds(np.eye(10), ncv=[], solver=self.solver)
+
+        message = "invalid literal for int()"
+        with pytest.raises(ValueError, match=message):
+            svds(np.eye(10), ncv="hi", solver=self.solver)
+
+    # I can't see a robust relationship between `ncv` and relevant outputs
+    # (e.g. accuracy, time), so no test of the parameter.
+
+
+class Test_SVDS_LOBPCG(SVDSCommonTests):
+
+    def setup_method(self):
+        self.solver = 'lobpcg'
+
+    def test_svd_random_state_3(self):
+        pytest.xfail("LOBPCG is having trouble with accuracy.")
+
+
+class Test_SVDS_PROPACK(SVDSCommonTests):
+
+    def setup_method(self):
+        self.solver = 'propack'
+
+    def test_svd_LM_ones_matrix(self):
+        message = ("PROPACK does not return orthonormal singular vectors "
+                   "associated with zero singular values.")
+        # There are some other issues with this matrix of all ones, e.g.
+        # `which='sm'` and `k=1` returns the largest singular value
+        pytest.xfail(message)
+
+    def test_svd_LM_zeros_matrix(self):
+        message = ("PROPACK does not return orthonormal singular vectors "
+                   "associated with zero singular values.")
+        pytest.xfail(message)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_expm_multiply.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_expm_multiply.py
new file mode 100644
index 00000000..01bea3c4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_expm_multiply.py
@@ -0,0 +1,807 @@
+"""Compute the action of the matrix exponential."""
+from warnings import warn
+
+import numpy as np
+
+import scipy.linalg
+import scipy.sparse.linalg
+from scipy.linalg._decomp_qr import qr
+from scipy.sparse._sputils import is_pydata_spmatrix
+from scipy.sparse.linalg import aslinearoperator
+from scipy.sparse.linalg._interface import IdentityOperator
+from scipy.sparse.linalg._onenormest import onenormest
+
+__all__ = ['expm_multiply']
+
+
+def _exact_inf_norm(A):
+    # A compatibility function which should eventually disappear.
+    if scipy.sparse.isspmatrix(A):
+        return max(abs(A).sum(axis=1).flat)
+    elif is_pydata_spmatrix(A):
+        return max(abs(A).sum(axis=1))
+    else:
+        return np.linalg.norm(A, np.inf)
+
+
+def _exact_1_norm(A):
+    # A compatibility function which should eventually disappear.
+    if scipy.sparse.isspmatrix(A):
+        return max(abs(A).sum(axis=0).flat)
+    elif is_pydata_spmatrix(A):
+        return max(abs(A).sum(axis=0))
+    else:
+        return np.linalg.norm(A, 1)
+
+
+def _trace(A):
+    # A compatibility function which should eventually disappear.
+    if is_pydata_spmatrix(A):
+        return A.to_scipy_sparse().trace()
+    else:
+        return A.trace()
+
+
+def traceest(A, m3, seed=None):
+    """Estimate `np.trace(A)` using `3*m3` matrix-vector products.
+
+    The result is not deterministic.
+
+    Parameters
+    ----------
+    A : LinearOperator
+        Linear operator whose trace will be estimated. Has to be square.
+    m3 : int
+        Number of matrix-vector products divided by 3 used to estimate the
+        trace.
+    seed : optional
+        Seed for `numpy.random.default_rng`.
+        Can be provided to obtain deterministic results.
+
+    Returns
+    -------
+    trace : LinearOperator.dtype
+        Estimate of the trace
+
+    Notes
+    -----
+    This is the Hutch++ algorithm given in [1]_.
+
+    References
+    ----------
+    .. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P.
+       Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium
+       on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial
+       and Applied Mathematics, 2021
+       https://doi.org/10.1137/1.9781611976496.16
+
+    """
+    rng = np.random.default_rng(seed)
+    if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]:
+        raise ValueError("Expected A to be like a square matrix.")
+    n = A.shape[-1]
+    S = rng.choice([-1.0, +1.0], [n, m3])
+    Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic')
+    trQAQ = np.trace(Q.conj().T @ A.matmat(Q))
+    G = rng.choice([-1, +1], [n, m3])
+    right = G - Q@(Q.conj().T @ G)
+    trGAG = np.trace(right.conj().T @ A.matmat(right))
+    return trQAQ + trGAG/m3
+
+
+def _ident_like(A):
+    # A compatibility function which should eventually disappear.
+    if scipy.sparse.isspmatrix(A):
+        return scipy.sparse._construct.eye(A.shape[0], A.shape[1],
+                dtype=A.dtype, format=A.format)
+    elif is_pydata_spmatrix(A):
+        import sparse
+        return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
+    elif isinstance(A, scipy.sparse.linalg.LinearOperator):
+        return IdentityOperator(A.shape, dtype=A.dtype)
+    else:
+        return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
+
+
+def expm_multiply(A, B, start=None, stop=None, num=None,
+                  endpoint=None, traceA=None):
+    """
+    Compute the action of the matrix exponential of A on B.
+
+    Parameters
+    ----------
+    A : transposable linear operator
+        The operator whose exponential is of interest.
+    B : ndarray
+        The matrix or vector to be multiplied by the matrix exponential of A.
+    start : scalar, optional
+        The starting time point of the sequence.
+    stop : scalar, optional
+        The end time point of the sequence, unless `endpoint` is set to False.
+        In that case, the sequence consists of all but the last of ``num + 1``
+        evenly spaced time points, so that `stop` is excluded.
+        Note that the step size changes when `endpoint` is False.
+    num : int, optional
+        Number of time points to use.
+    endpoint : bool, optional
+        If True, `stop` is the last time point.  Otherwise, it is not included.
+    traceA : scalar, optional
+        Trace of `A`. If not given the trace is estimated for linear operators,
+        or calculated exactly for sparse matrices. It is used to precondition
+        `A`, thus an approximate trace is acceptable.
+        For linear operators, `traceA` should be provided to ensure performance
+        as the estimation is not guaranteed to be reliable for all cases.
+
+        .. versionadded: 1.9.0
+
+    Returns
+    -------
+    expm_A_B : ndarray
+         The result of the action :math:`e^{t_k A} B`.
+
+    Warns
+    -----
+    UserWarning
+        If `A` is a linear operator and ``traceA=None`` (default).
+
+    Notes
+    -----
+    The optional arguments defining the sequence of evenly spaced time points
+    are compatible with the arguments of `numpy.linspace`.
+
+    The output ndarray shape is somewhat complicated so I explain it here.
+    The ndim of the output could be either 1, 2, or 3.
+    It would be 1 if you are computing the expm action on a single vector
+    at a single time point.
+    It would be 2 if you are computing the expm action on a vector
+    at multiple time points, or if you are computing the expm action
+    on a matrix at a single time point.
+    It would be 3 if you want the action on a matrix with multiple
+    columns at multiple time points.
+    If multiple time points are requested, expm_A_B[0] will always
+    be the action of the expm at the first time point,
+    regardless of whether the action is on a vector or a matrix.
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
+           "Computing the Action of the Matrix Exponential,
+           with an Application to Exponential Integrators."
+           SIAM Journal on Scientific Computing,
+           33 (2). pp. 488-511. ISSN 1064-8275
+           http://eprints.ma.man.ac.uk/1591/
+
+    .. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
+           "Computing Matrix Functions."
+           Acta Numerica,
+           19. 159-208. ISSN 0962-4929
+           http://eprints.ma.man.ac.uk/1451/
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import expm, expm_multiply
+    >>> A = csc_matrix([[1, 0], [0, 1]])
+    >>> A.toarray()
+    array([[1, 0],
+           [0, 1]], dtype=int64)
+    >>> B = np.array([np.exp(-1.), np.exp(-2.)])
+    >>> B
+    array([ 0.36787944,  0.13533528])
+    >>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
+    array([[ 1.        ,  0.36787944],
+           [ 1.64872127,  0.60653066],
+           [ 2.71828183,  1.        ]])
+    >>> expm(A).dot(B)                  # Verify 1st timestep
+    array([ 1.        ,  0.36787944])
+    >>> expm(1.5*A).dot(B)              # Verify 2nd timestep
+    array([ 1.64872127,  0.60653066])
+    >>> expm(2*A).dot(B)                # Verify 3rd timestep
+    array([ 2.71828183,  1.        ])
+    """
+    if all(arg is None for arg in (start, stop, num, endpoint)):
+        X = _expm_multiply_simple(A, B, traceA=traceA)
+    else:
+        X, status = _expm_multiply_interval(A, B, start, stop, num,
+                                            endpoint, traceA=traceA)
+    return X
+
+
+def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False):
+    """
+    Compute the action of the matrix exponential at a single time point.
+
+    Parameters
+    ----------
+    A : transposable linear operator
+        The operator whose exponential is of interest.
+    B : ndarray
+        The matrix to be multiplied by the matrix exponential of A.
+    t : float
+        A time point.
+    traceA : scalar, optional
+        Trace of `A`. If not given the trace is estimated for linear operators,
+        or calculated exactly for sparse matrices. It is used to precondition
+        `A`, thus an approximate trace is acceptable
+    balance : bool
+        Indicates whether or not to apply balancing.
+
+    Returns
+    -------
+    F : ndarray
+        :math:`e^{t A} B`
+
+    Notes
+    -----
+    This is algorithm (3.2) in Al-Mohy and Higham (2011).
+
+    """
+    if balance:
+        raise NotImplementedError
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected A to be like a square matrix')
+    if A.shape[1] != B.shape[0]:
+        raise ValueError('shapes of matrices A {} and B {} are incompatible'
+                         .format(A.shape, B.shape))
+    ident = _ident_like(A)
+    is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
+    n = A.shape[0]
+    if len(B.shape) == 1:
+        n0 = 1
+    elif len(B.shape) == 2:
+        n0 = B.shape[1]
+    else:
+        raise ValueError('expected B to be like a matrix or a vector')
+    u_d = 2**-53
+    tol = u_d
+    if traceA is None:
+        if is_linear_operator:
+            warn("Trace of LinearOperator not available, it will be estimated."
+                 " Provide `traceA` to ensure performance.", stacklevel=3)
+        # m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might
+        # speed up exponential calculation, but trace estimation is more costly
+        traceA = traceest(A, m3=1) if is_linear_operator else _trace(A)
+    mu = traceA / float(n)
+    A = A - mu * ident
+    A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
+    if t*A_1_norm == 0:
+        m_star, s = 0, 1
+    else:
+        ell = 2
+        norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
+        m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
+    return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
+
+
+def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
+    """
+    A helper function.
+    """
+    if balance:
+        raise NotImplementedError
+    if tol is None:
+        u_d = 2 ** -53
+        tol = u_d
+    F = B
+    eta = np.exp(t*mu / float(s))
+    for i in range(s):
+        c1 = _exact_inf_norm(B)
+        for j in range(m_star):
+            coeff = t / float(s*(j+1))
+            B = coeff * A.dot(B)
+            c2 = _exact_inf_norm(B)
+            F = F + B
+            if c1 + c2 <= tol * _exact_inf_norm(F):
+                break
+            c1 = c2
+        F = eta * F
+        B = F
+    return F
+
+
+# This table helps to compute bounds.
+# They seem to have been difficult to calculate, involving symbolic
+# manipulation of equations, followed by numerical root finding.
+_theta = {
+        # The first 30 values are from table A.3 of Computing Matrix Functions.
+        1: 2.29e-16,
+        2: 2.58e-8,
+        3: 1.39e-5,
+        4: 3.40e-4,
+        5: 2.40e-3,
+        6: 9.07e-3,
+        7: 2.38e-2,
+        8: 5.00e-2,
+        9: 8.96e-2,
+        10: 1.44e-1,
+        # 11
+        11: 2.14e-1,
+        12: 3.00e-1,
+        13: 4.00e-1,
+        14: 5.14e-1,
+        15: 6.41e-1,
+        16: 7.81e-1,
+        17: 9.31e-1,
+        18: 1.09,
+        19: 1.26,
+        20: 1.44,
+        # 21
+        21: 1.62,
+        22: 1.82,
+        23: 2.01,
+        24: 2.22,
+        25: 2.43,
+        26: 2.64,
+        27: 2.86,
+        28: 3.08,
+        29: 3.31,
+        30: 3.54,
+        # The rest are from table 3.1 of
+        # Computing the Action of the Matrix Exponential.
+        35: 4.7,
+        40: 6.0,
+        45: 7.2,
+        50: 8.5,
+        55: 9.9,
+        }
+
+
+def _onenormest_matrix_power(A, p,
+        t=2, itmax=5, compute_v=False, compute_w=False):
+    """
+    Efficiently estimate the 1-norm of A^p.
+
+    Parameters
+    ----------
+    A : ndarray
+        Matrix whose 1-norm of a power is to be computed.
+    p : int
+        Non-negative integer power.
+    t : int, optional
+        A positive parameter controlling the tradeoff between
+        accuracy versus time and memory usage.
+        Larger values take longer and use more memory
+        but give more accurate output.
+    itmax : int, optional
+        Use at most this many iterations.
+    compute_v : bool, optional
+        Request a norm-maximizing linear operator input vector if True.
+    compute_w : bool, optional
+        Request a norm-maximizing linear operator output vector if True.
+
+    Returns
+    -------
+    est : float
+        An underestimate of the 1-norm of the sparse matrix.
+    v : ndarray, optional
+        The vector such that ||Av||_1 == est*||v||_1.
+        It can be thought of as an input to the linear operator
+        that gives an output with particularly large norm.
+    w : ndarray, optional
+        The vector Av which has relatively large 1-norm.
+        It can be thought of as an output of the linear operator
+        that is relatively large in norm compared to the input.
+
+    """
+    #XXX Eventually turn this into an API function in the  _onenormest module,
+    #XXX and remove its underscore,
+    #XXX but wait until expm_multiply goes into scipy.
+    from scipy.sparse.linalg._onenormest import onenormest
+    return onenormest(aslinearoperator(A) ** p)
+
+class LazyOperatorNormInfo:
+    """
+    Information about an operator is lazily computed.
+
+    The information includes the exact 1-norm of the operator,
+    in addition to estimates of 1-norms of powers of the operator.
+    This uses the notation of Computing the Action (2011).
+    This class is specialized enough to probably not be of general interest
+    outside of this module.
+
+    """
+
+    def __init__(self, A, A_1_norm=None, ell=2, scale=1):
+        """
+        Provide the operator and some norm-related information.
+
+        Parameters
+        ----------
+        A : linear operator
+            The operator of interest.
+        A_1_norm : float, optional
+            The exact 1-norm of A.
+        ell : int, optional
+            A technical parameter controlling norm estimation quality.
+        scale : int, optional
+            If specified, return the norms of scale*A instead of A.
+
+        """
+        self._A = A
+        self._A_1_norm = A_1_norm
+        self._ell = ell
+        self._d = {}
+        self._scale = scale
+
+    def set_scale(self,scale):
+        """
+        Set the scale parameter.
+        """
+        self._scale = scale
+
+    def onenorm(self):
+        """
+        Compute the exact 1-norm.
+        """
+        if self._A_1_norm is None:
+            self._A_1_norm = _exact_1_norm(self._A)
+        return self._scale*self._A_1_norm
+
+    def d(self, p):
+        """
+        Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
+        """
+        if p not in self._d:
+            est = _onenormest_matrix_power(self._A, p, self._ell)
+            self._d[p] = est ** (1.0 / p)
+        return self._scale*self._d[p]
+
+    def alpha(self, p):
+        """
+        Lazily compute max(d(p), d(p+1)).
+        """
+        return max(self.d(p), self.d(p+1))
+
+def _compute_cost_div_m(m, p, norm_info):
+    """
+    A helper function for computing bounds.
+
+    This is equation (3.10).
+    It measures cost in terms of the number of required matrix products.
+
+    Parameters
+    ----------
+    m : int
+        A valid key of _theta.
+    p : int
+        A matrix power.
+    norm_info : LazyOperatorNormInfo
+        Information about 1-norms of related operators.
+
+    Returns
+    -------
+    cost_div_m : int
+        Required number of matrix products divided by m.
+
+    """
+    return int(np.ceil(norm_info.alpha(p) / _theta[m]))
+
+
+def _compute_p_max(m_max):
+    """
+    Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
+
+    Do this in a slightly dumb way, but safe and not too slow.
+
+    Parameters
+    ----------
+    m_max : int
+        A count related to bounds.
+
+    """
+    sqrt_m_max = np.sqrt(m_max)
+    p_low = int(np.floor(sqrt_m_max))
+    p_high = int(np.ceil(sqrt_m_max + 1))
+    return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
+
+
+def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
+    """
+    A helper function for the _expm_multiply_* functions.
+
+    Parameters
+    ----------
+    norm_info : LazyOperatorNormInfo
+        Information about norms of certain linear operators of interest.
+    n0 : int
+        Number of columns in the _expm_multiply_* B matrix.
+    tol : float
+        Expected to be
+        :math:`2^{-24}` for single precision or
+        :math:`2^{-53}` for double precision.
+    m_max : int
+        A value related to a bound.
+    ell : int
+        The number of columns used in the 1-norm approximation.
+        This is usually taken to be small, maybe between 1 and 5.
+
+    Returns
+    -------
+    best_m : int
+        Related to bounds for error control.
+    best_s : int
+        Amount of scaling.
+
+    Notes
+    -----
+    This is code fragment (3.1) in Al-Mohy and Higham (2011).
+    The discussion of default values for m_max and ell
+    is given between the definitions of equation (3.11)
+    and the definition of equation (3.12).
+
+    """
+    if ell < 1:
+        raise ValueError('expected ell to be a positive integer')
+    best_m = None
+    best_s = None
+    if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
+        for m, theta in _theta.items():
+            s = int(np.ceil(norm_info.onenorm() / theta))
+            if best_m is None or m * s < best_m * best_s:
+                best_m = m
+                best_s = s
+    else:
+        # Equation (3.11).
+        for p in range(2, _compute_p_max(m_max) + 1):
+            for m in range(p*(p-1)-1, m_max+1):
+                if m in _theta:
+                    s = _compute_cost_div_m(m, p, norm_info)
+                    if best_m is None or m * s < best_m * best_s:
+                        best_m = m
+                        best_s = s
+        best_s = max(best_s, 1)
+    return best_m, best_s
+
+
+def _condition_3_13(A_1_norm, n0, m_max, ell):
+    """
+    A helper function for the _expm_multiply_* functions.
+
+    Parameters
+    ----------
+    A_1_norm : float
+        The precomputed 1-norm of A.
+    n0 : int
+        Number of columns in the _expm_multiply_* B matrix.
+    m_max : int
+        A value related to a bound.
+    ell : int
+        The number of columns used in the 1-norm approximation.
+        This is usually taken to be small, maybe between 1 and 5.
+
+    Returns
+    -------
+    value : bool
+        Indicates whether or not the condition has been met.
+
+    Notes
+    -----
+    This is condition (3.13) in Al-Mohy and Higham (2011).
+
+    """
+
+    # This is the rhs of equation (3.12).
+    p_max = _compute_p_max(m_max)
+    a = 2 * ell * p_max * (p_max + 3)
+
+    # Evaluate the condition (3.13).
+    b = _theta[m_max] / float(n0 * m_max)
+    return A_1_norm <= a * b
+
+
+def _expm_multiply_interval(A, B, start=None, stop=None, num=None,
+                            endpoint=None, traceA=None, balance=False,
+                            status_only=False):
+    """
+    Compute the action of the matrix exponential at multiple time points.
+
+    Parameters
+    ----------
+    A : transposable linear operator
+        The operator whose exponential is of interest.
+    B : ndarray
+        The matrix to be multiplied by the matrix exponential of A.
+    start : scalar, optional
+        The starting time point of the sequence.
+    stop : scalar, optional
+        The end time point of the sequence, unless `endpoint` is set to False.
+        In that case, the sequence consists of all but the last of ``num + 1``
+        evenly spaced time points, so that `stop` is excluded.
+        Note that the step size changes when `endpoint` is False.
+    num : int, optional
+        Number of time points to use.
+    traceA : scalar, optional
+        Trace of `A`. If not given the trace is estimated for linear operators,
+        or calculated exactly for sparse matrices. It is used to precondition
+        `A`, thus an approximate trace is acceptable
+    endpoint : bool, optional
+        If True, `stop` is the last time point. Otherwise, it is not included.
+    balance : bool
+        Indicates whether or not to apply balancing.
+    status_only : bool
+        A flag that is set to True for some debugging and testing operations.
+
+    Returns
+    -------
+    F : ndarray
+        :math:`e^{t_k A} B`
+    status : int
+        An integer status for testing and debugging.
+
+    Notes
+    -----
+    This is algorithm (5.2) in Al-Mohy and Higham (2011).
+
+    There seems to be a typo, where line 15 of the algorithm should be
+    moved to line 6.5 (between lines 6 and 7).
+
+    """
+    if balance:
+        raise NotImplementedError
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected A to be like a square matrix')
+    if A.shape[1] != B.shape[0]:
+        raise ValueError('shapes of matrices A {} and B {} are incompatible'
+                         .format(A.shape, B.shape))
+    ident = _ident_like(A)
+    is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
+    n = A.shape[0]
+    if len(B.shape) == 1:
+        n0 = 1
+    elif len(B.shape) == 2:
+        n0 = B.shape[1]
+    else:
+        raise ValueError('expected B to be like a matrix or a vector')
+    u_d = 2**-53
+    tol = u_d
+    if traceA is None:
+        if is_linear_operator:
+            warn("Trace of LinearOperator not available, it will be estimated."
+                 " Provide `traceA` to ensure performance.", stacklevel=3)
+        # m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might
+        # speed up exponential calculation, but trace estimation is also costly
+        # an educated guess would need to consider the number of time points
+        traceA = traceest(A, m3=5) if is_linear_operator else _trace(A)
+    mu = traceA / float(n)
+
+    # Get the linspace samples, attempting to preserve the linspace defaults.
+    linspace_kwargs = {'retstep': True}
+    if num is not None:
+        linspace_kwargs['num'] = num
+    if endpoint is not None:
+        linspace_kwargs['endpoint'] = endpoint
+    samples, step = np.linspace(start, stop, **linspace_kwargs)
+
+    # Convert the linspace output to the notation used by the publication.
+    nsamples = len(samples)
+    if nsamples < 2:
+        raise ValueError('at least two time points are required')
+    q = nsamples - 1
+    h = step
+    t_0 = samples[0]
+    t_q = samples[q]
+
+    # Define the output ndarray.
+    # Use an ndim=3 shape, such that the last two indices
+    # are the ones that may be involved in level 3 BLAS operations.
+    X_shape = (nsamples,) + B.shape
+    X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
+    t = t_q - t_0
+    A = A - mu * ident
+    A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
+    ell = 2
+    norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
+    if t*A_1_norm == 0:
+        m_star, s = 0, 1
+    else:
+        m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
+
+    # Compute the expm action up to the initial time point.
+    X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
+
+    # Compute the expm action at the rest of the time points.
+    if q <= s:
+        if status_only:
+            return 0
+        else:
+            return _expm_multiply_interval_core_0(A, X,
+                    h, mu, q, norm_info, tol, ell,n0)
+    elif not (q % s):
+        if status_only:
+            return 1
+        else:
+            return _expm_multiply_interval_core_1(A, X,
+                    h, mu, m_star, s, q, tol)
+    elif (q % s):
+        if status_only:
+            return 2
+        else:
+            return _expm_multiply_interval_core_2(A, X,
+                    h, mu, m_star, s, q, tol)
+    else:
+        raise Exception('internal error')
+
+
+def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
+    """
+    A helper function, for the case q <= s.
+    """
+
+    # Compute the new values of m_star and s which should be applied
+    # over intervals of size t/q
+    if norm_info.onenorm() == 0:
+        m_star, s = 0, 1
+    else:
+        norm_info.set_scale(1./q)
+        m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
+        norm_info.set_scale(1)
+
+    for k in range(q):
+        X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
+    return X, 0
+
+
+def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
+    """
+    A helper function, for the case q > s and q % s == 0.
+    """
+    d = q // s
+    input_shape = X.shape[1:]
+    K_shape = (m_star + 1, ) + input_shape
+    K = np.empty(K_shape, dtype=X.dtype)
+    for i in range(s):
+        Z = X[i*d]
+        K[0] = Z
+        high_p = 0
+        for k in range(1, d+1):
+            F = K[0]
+            c1 = _exact_inf_norm(F)
+            for p in range(1, m_star+1):
+                if p > high_p:
+                    K[p] = h * A.dot(K[p-1]) / float(p)
+                coeff = float(pow(k, p))
+                F = F + coeff * K[p]
+                inf_norm_K_p_1 = _exact_inf_norm(K[p])
+                c2 = coeff * inf_norm_K_p_1
+                if c1 + c2 <= tol * _exact_inf_norm(F):
+                    break
+                c1 = c2
+            X[k + i*d] = np.exp(k*h*mu) * F
+    return X, 1
+
+
+def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
+    """
+    A helper function, for the case q > s and q % s > 0.
+    """
+    d = q // s
+    j = q // d
+    r = q - d * j
+    input_shape = X.shape[1:]
+    K_shape = (m_star + 1, ) + input_shape
+    K = np.empty(K_shape, dtype=X.dtype)
+    for i in range(j + 1):
+        Z = X[i*d]
+        K[0] = Z
+        high_p = 0
+        if i < j:
+            effective_d = d
+        else:
+            effective_d = r
+        for k in range(1, effective_d+1):
+            F = K[0]
+            c1 = _exact_inf_norm(F)
+            for p in range(1, m_star+1):
+                if p == high_p + 1:
+                    K[p] = h * A.dot(K[p-1]) / float(p)
+                    high_p = p
+                coeff = float(pow(k, p))
+                F = F + coeff * K[p]
+                inf_norm_K_p_1 = _exact_inf_norm(K[p])
+                c2 = coeff * inf_norm_K_p_1
+                if c1 + c2 <= tol * _exact_inf_norm(F):
+                    break
+                c1 = c2
+            X[k + i*d] = np.exp(k*h*mu) * F
+    return X, 2
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_interface.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_interface.py
new file mode 100644
index 00000000..cf1e6ae0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_interface.py
@@ -0,0 +1,829 @@
+"""Abstract linear algebra library.
+
+This module defines a class hierarchy that implements a kind of "lazy"
+matrix representation, called the ``LinearOperator``. It can be used to do
+linear algebra with extremely large sparse or structured matrices, without
+representing those explicitly in memory. Such matrices can be added,
+multiplied, transposed, etc.
+
+As a motivating example, suppose you want have a matrix where almost all of
+the elements have the value one. The standard sparse matrix representation
+skips the storage of zeros, but not ones. By contrast, a LinearOperator is
+able to represent such matrices efficiently. First, we need a compact way to
+represent an all-ones matrix::
+
+    >>> import numpy as np
+    >>> class Ones(LinearOperator):
+    ...     def __init__(self, shape):
+    ...         super().__init__(dtype=None, shape=shape)
+    ...     def _matvec(self, x):
+    ...         return np.repeat(x.sum(), self.shape[0])
+
+Instances of this class emulate ``np.ones(shape)``, but using a constant
+amount of storage, independent of ``shape``. The ``_matvec`` method specifies
+how this linear operator multiplies with (operates on) a vector. We can now
+add this operator to a sparse matrix that stores only offsets from one::
+
+    >>> from scipy.sparse import csr_matrix
+    >>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
+    >>> A = aslinearoperator(offsets) + Ones(offsets.shape)
+    >>> A.dot([1, 2, 3])
+    array([13,  4, 15])
+
+The result is the same as that given by its dense, explicitly-stored
+counterpart::
+
+    >>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
+    array([13,  4, 15])
+
+Several algorithms in the ``scipy.sparse`` library are able to operate on
+``LinearOperator`` instances.
+"""
+
+import warnings
+
+import numpy as np
+
+from scipy.sparse import isspmatrix
+from scipy.sparse._sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix
+
+__all__ = ['LinearOperator', 'aslinearoperator']
+
+
+class LinearOperator:
+    """Common interface for performing matrix vector products
+
+    Many iterative methods (e.g. cg, gmres) do not need to know the
+    individual entries of a matrix to solve a linear system A*x=b.
+    Such solvers only require the computation of matrix vector
+    products, A*v where v is a dense vector.  This class serves as
+    an abstract interface between iterative solvers and matrix-like
+    objects.
+
+    To construct a concrete LinearOperator, either pass appropriate
+    callables to the constructor of this class, or subclass it.
+
+    A subclass must implement either one of the methods ``_matvec``
+    and ``_matmat``, and the attributes/properties ``shape`` (pair of
+    integers) and ``dtype`` (may be None). It may call the ``__init__``
+    on this class to have these attributes validated. Implementing
+    ``_matvec`` automatically implements ``_matmat`` (using a naive
+    algorithm) and vice-versa.
+
+    Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
+    to implement the Hermitian adjoint (conjugate transpose). As with
+    ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
+    ``_adjoint`` implements the other automatically. Implementing
+    ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
+    backwards compatibility.
+
+    Parameters
+    ----------
+    shape : tuple
+        Matrix dimensions (M, N).
+    matvec : callable f(v)
+        Returns returns A * v.
+    rmatvec : callable f(v)
+        Returns A^H * v, where A^H is the conjugate transpose of A.
+    matmat : callable f(V)
+        Returns A * V, where V is a dense matrix with dimensions (N, K).
+    dtype : dtype
+        Data type of the matrix.
+    rmatmat : callable f(V)
+        Returns A^H * V, where V is a dense matrix with dimensions (M, K).
+
+    Attributes
+    ----------
+    args : tuple
+        For linear operators describing products etc. of other linear
+        operators, the operands of the binary operation.
+    ndim : int
+        Number of dimensions (this is always 2)
+
+    See Also
+    --------
+    aslinearoperator : Construct LinearOperators
+
+    Notes
+    -----
+    The user-defined matvec() function must properly handle the case
+    where v has shape (N,) as well as the (N,1) case.  The shape of
+    the return type is handled internally by LinearOperator.
+
+    LinearOperator instances can also be multiplied, added with each
+    other and exponentiated, all lazily: the result of these operations
+    is always a new, composite LinearOperator, that defers linear
+    operations to the original operators and combines the results.
+
+    More details regarding how to subclass a LinearOperator and several
+    examples of concrete LinearOperator instances can be found in the
+    external project `PyLops `_.
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse.linalg import LinearOperator
+    >>> def mv(v):
+    ...     return np.array([2*v[0], 3*v[1]])
+    ...
+    >>> A = LinearOperator((2,2), matvec=mv)
+    >>> A
+    <2x2 _CustomLinearOperator with dtype=float64>
+    >>> A.matvec(np.ones(2))
+    array([ 2.,  3.])
+    >>> A * np.ones(2)
+    array([ 2.,  3.])
+
+    """
+
+    ndim = 2
+
+    def __new__(cls, *args, **kwargs):
+        if cls is LinearOperator:
+            # Operate as _CustomLinearOperator factory.
+            return super(LinearOperator, cls).__new__(_CustomLinearOperator)
+        else:
+            obj = super(LinearOperator, cls).__new__(cls)
+
+            if (type(obj)._matvec == LinearOperator._matvec
+                    and type(obj)._matmat == LinearOperator._matmat):
+                warnings.warn("LinearOperator subclass should implement"
+                              " at least one of _matvec and _matmat.",
+                              category=RuntimeWarning, stacklevel=2)
+
+            return obj
+
+    def __init__(self, dtype, shape):
+        """Initialize this LinearOperator.
+
+        To be called by subclasses. ``dtype`` may be None; ``shape`` should
+        be convertible to a length-2 tuple.
+        """
+        if dtype is not None:
+            dtype = np.dtype(dtype)
+
+        shape = tuple(shape)
+        if not isshape(shape):
+            raise ValueError("invalid shape %r (must be 2-d)" % (shape,))
+
+        self.dtype = dtype
+        self.shape = shape
+
+    def _init_dtype(self):
+        """Called from subclasses at the end of the __init__ routine.
+        """
+        if self.dtype is None:
+            v = np.zeros(self.shape[-1])
+            self.dtype = np.asarray(self.matvec(v)).dtype
+
+    def _matmat(self, X):
+        """Default matrix-matrix multiplication handler.
+
+        Falls back on the user-defined _matvec method, so defining that will
+        define matrix multiplication (though in a very suboptimal way).
+        """
+
+        return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
+
+    def _matvec(self, x):
+        """Default matrix-vector multiplication handler.
+
+        If self is a linear operator of shape (M, N), then this method will
+        be called on a shape (N,) or (N, 1) ndarray, and should return a
+        shape (M,) or (M, 1) ndarray.
+
+        This default implementation falls back on _matmat, so defining that
+        will define matrix-vector multiplication as well.
+        """
+        return self.matmat(x.reshape(-1, 1))
+
+    def matvec(self, x):
+        """Matrix-vector multiplication.
+
+        Performs the operation y=A*x where A is an MxN linear
+        operator and x is a column vector or 1-d array.
+
+        Parameters
+        ----------
+        x : {matrix, ndarray}
+            An array with shape (N,) or (N,1).
+
+        Returns
+        -------
+        y : {matrix, ndarray}
+            A matrix or ndarray with shape (M,) or (M,1) depending
+            on the type and shape of the x argument.
+
+        Notes
+        -----
+        This matvec wraps the user-specified matvec routine or overridden
+        _matvec method to ensure that y has the correct shape and type.
+
+        """
+
+        x = np.asanyarray(x)
+
+        M,N = self.shape
+
+        if x.shape != (N,) and x.shape != (N,1):
+            raise ValueError('dimension mismatch')
+
+        y = self._matvec(x)
+
+        if isinstance(x, np.matrix):
+            y = asmatrix(y)
+        else:
+            y = np.asarray(y)
+
+        if x.ndim == 1:
+            y = y.reshape(M)
+        elif x.ndim == 2:
+            y = y.reshape(M,1)
+        else:
+            raise ValueError('invalid shape returned by user-defined matvec()')
+
+        return y
+
+    def rmatvec(self, x):
+        """Adjoint matrix-vector multiplication.
+
+        Performs the operation y = A^H * x where A is an MxN linear
+        operator and x is a column vector or 1-d array.
+
+        Parameters
+        ----------
+        x : {matrix, ndarray}
+            An array with shape (M,) or (M,1).
+
+        Returns
+        -------
+        y : {matrix, ndarray}
+            A matrix or ndarray with shape (N,) or (N,1) depending
+            on the type and shape of the x argument.
+
+        Notes
+        -----
+        This rmatvec wraps the user-specified rmatvec routine or overridden
+        _rmatvec method to ensure that y has the correct shape and type.
+
+        """
+
+        x = np.asanyarray(x)
+
+        M,N = self.shape
+
+        if x.shape != (M,) and x.shape != (M,1):
+            raise ValueError('dimension mismatch')
+
+        y = self._rmatvec(x)
+
+        if isinstance(x, np.matrix):
+            y = asmatrix(y)
+        else:
+            y = np.asarray(y)
+
+        if x.ndim == 1:
+            y = y.reshape(N)
+        elif x.ndim == 2:
+            y = y.reshape(N,1)
+        else:
+            raise ValueError('invalid shape returned by user-defined rmatvec()')
+
+        return y
+
+    def _rmatvec(self, x):
+        """Default implementation of _rmatvec; defers to adjoint."""
+        if type(self)._adjoint == LinearOperator._adjoint:
+            # _adjoint not overridden, prevent infinite recursion
+            raise NotImplementedError
+        else:
+            return self.H.matvec(x)
+
+    def matmat(self, X):
+        """Matrix-matrix multiplication.
+
+        Performs the operation y=A*X where A is an MxN linear
+        operator and X dense N*K matrix or ndarray.
+
+        Parameters
+        ----------
+        X : {matrix, ndarray}
+            An array with shape (N,K).
+
+        Returns
+        -------
+        Y : {matrix, ndarray}
+            A matrix or ndarray with shape (M,K) depending on
+            the type of the X argument.
+
+        Notes
+        -----
+        This matmat wraps any user-specified matmat routine or overridden
+        _matmat method to ensure that y has the correct type.
+
+        """
+
+        X = np.asanyarray(X)
+
+        if X.ndim != 2:
+            raise ValueError('expected 2-d ndarray or matrix, not %d-d'
+                             % X.ndim)
+
+        if X.shape[0] != self.shape[1]:
+            raise ValueError('dimension mismatch: %r, %r'
+                             % (self.shape, X.shape))
+
+        Y = self._matmat(X)
+
+        if isinstance(Y, np.matrix):
+            Y = asmatrix(Y)
+
+        return Y
+
+    def rmatmat(self, X):
+        """Adjoint matrix-matrix multiplication.
+
+        Performs the operation y = A^H * x where A is an MxN linear
+        operator and x is a column vector or 1-d array, or 2-d array.
+        The default implementation defers to the adjoint.
+
+        Parameters
+        ----------
+        X : {matrix, ndarray}
+            A matrix or 2D array.
+
+        Returns
+        -------
+        Y : {matrix, ndarray}
+            A matrix or 2D array depending on the type of the input.
+
+        Notes
+        -----
+        This rmatmat wraps the user-specified rmatmat routine.
+
+        """
+
+        X = np.asanyarray(X)
+
+        if X.ndim != 2:
+            raise ValueError('expected 2-d ndarray or matrix, not %d-d'
+                             % X.ndim)
+
+        if X.shape[0] != self.shape[0]:
+            raise ValueError('dimension mismatch: %r, %r'
+                             % (self.shape, X.shape))
+
+        Y = self._rmatmat(X)
+        if isinstance(Y, np.matrix):
+            Y = asmatrix(Y)
+        return Y
+
+    def _rmatmat(self, X):
+        """Default implementation of _rmatmat defers to rmatvec or adjoint."""
+        if type(self)._adjoint == LinearOperator._adjoint:
+            return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])
+        else:
+            return self.H.matmat(X)
+
+    def __call__(self, x):
+        return self*x
+
+    def __mul__(self, x):
+        return self.dot(x)
+
+    def dot(self, x):
+        """Matrix-matrix or matrix-vector multiplication.
+
+        Parameters
+        ----------
+        x : array_like
+            1-d or 2-d array, representing a vector or matrix.
+
+        Returns
+        -------
+        Ax : array
+            1-d or 2-d array (depending on the shape of x) that represents
+            the result of applying this linear operator on x.
+
+        """
+        if isinstance(x, LinearOperator):
+            return _ProductLinearOperator(self, x)
+        elif np.isscalar(x):
+            return _ScaledLinearOperator(self, x)
+        else:
+            x = np.asarray(x)
+
+            if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
+                return self.matvec(x)
+            elif x.ndim == 2:
+                return self.matmat(x)
+            else:
+                raise ValueError('expected 1-d or 2-d array or matrix, got %r'
+                                 % x)
+
+    def __matmul__(self, other):
+        if np.isscalar(other):
+            raise ValueError("Scalar operands are not allowed, "
+                             "use '*' instead")
+        return self.__mul__(other)
+
+    def __rmatmul__(self, other):
+        if np.isscalar(other):
+            raise ValueError("Scalar operands are not allowed, "
+                             "use '*' instead")
+        return self.__rmul__(other)
+
+    def __rmul__(self, x):
+        if np.isscalar(x):
+            return _ScaledLinearOperator(self, x)
+        else:
+            return NotImplemented
+
+    def __pow__(self, p):
+        if np.isscalar(p):
+            return _PowerLinearOperator(self, p)
+        else:
+            return NotImplemented
+
+    def __add__(self, x):
+        if isinstance(x, LinearOperator):
+            return _SumLinearOperator(self, x)
+        else:
+            return NotImplemented
+
+    def __neg__(self):
+        return _ScaledLinearOperator(self, -1)
+
+    def __sub__(self, x):
+        return self.__add__(-x)
+
+    def __repr__(self):
+        M,N = self.shape
+        if self.dtype is None:
+            dt = 'unspecified dtype'
+        else:
+            dt = 'dtype=' + str(self.dtype)
+
+        return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
+
+    def adjoint(self):
+        """Hermitian adjoint.
+
+        Returns the Hermitian adjoint of self, aka the Hermitian
+        conjugate or Hermitian transpose. For a complex matrix, the
+        Hermitian adjoint is equal to the conjugate transpose.
+
+        Can be abbreviated self.H instead of self.adjoint().
+
+        Returns
+        -------
+        A_H : LinearOperator
+            Hermitian adjoint of self.
+        """
+        return self._adjoint()
+
+    H = property(adjoint)
+
+    def transpose(self):
+        """Transpose this linear operator.
+
+        Returns a LinearOperator that represents the transpose of this one.
+        Can be abbreviated self.T instead of self.transpose().
+        """
+        return self._transpose()
+
+    T = property(transpose)
+
+    def _adjoint(self):
+        """Default implementation of _adjoint; defers to rmatvec."""
+        return _AdjointLinearOperator(self)
+
+    def _transpose(self):
+        """ Default implementation of _transpose; defers to rmatvec + conj"""
+        return _TransposedLinearOperator(self)
+
+
+class _CustomLinearOperator(LinearOperator):
+    """Linear operator defined in terms of user-specified operations."""
+
+    def __init__(self, shape, matvec, rmatvec=None, matmat=None,
+                 dtype=None, rmatmat=None):
+        super().__init__(dtype, shape)
+
+        self.args = ()
+
+        self.__matvec_impl = matvec
+        self.__rmatvec_impl = rmatvec
+        self.__rmatmat_impl = rmatmat
+        self.__matmat_impl = matmat
+
+        self._init_dtype()
+
+    def _matmat(self, X):
+        if self.__matmat_impl is not None:
+            return self.__matmat_impl(X)
+        else:
+            return super()._matmat(X)
+
+    def _matvec(self, x):
+        return self.__matvec_impl(x)
+
+    def _rmatvec(self, x):
+        func = self.__rmatvec_impl
+        if func is None:
+            raise NotImplementedError("rmatvec is not defined")
+        return self.__rmatvec_impl(x)
+
+    def _rmatmat(self, X):
+        if self.__rmatmat_impl is not None:
+            return self.__rmatmat_impl(X)
+        else:
+            return super()._rmatmat(X)
+
+    def _adjoint(self):
+        return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
+                                     matvec=self.__rmatvec_impl,
+                                     rmatvec=self.__matvec_impl,
+                                     matmat=self.__rmatmat_impl,
+                                     rmatmat=self.__matmat_impl,
+                                     dtype=self.dtype)
+
+
+class _AdjointLinearOperator(LinearOperator):
+    """Adjoint of arbitrary Linear Operator"""
+
+    def __init__(self, A):
+        shape = (A.shape[1], A.shape[0])
+        super().__init__(dtype=A.dtype, shape=shape)
+        self.A = A
+        self.args = (A,)
+
+    def _matvec(self, x):
+        return self.A._rmatvec(x)
+
+    def _rmatvec(self, x):
+        return self.A._matvec(x)
+
+    def _matmat(self, x):
+        return self.A._rmatmat(x)
+
+    def _rmatmat(self, x):
+        return self.A._matmat(x)
+
+class _TransposedLinearOperator(LinearOperator):
+    """Transposition of arbitrary Linear Operator"""
+
+    def __init__(self, A):
+        shape = (A.shape[1], A.shape[0])
+        super().__init__(dtype=A.dtype, shape=shape)
+        self.A = A
+        self.args = (A,)
+
+    def _matvec(self, x):
+        # NB. np.conj works also on sparse matrices
+        return np.conj(self.A._rmatvec(np.conj(x)))
+
+    def _rmatvec(self, x):
+        return np.conj(self.A._matvec(np.conj(x)))
+
+    def _matmat(self, x):
+        # NB. np.conj works also on sparse matrices
+        return np.conj(self.A._rmatmat(np.conj(x)))
+
+    def _rmatmat(self, x):
+        return np.conj(self.A._matmat(np.conj(x)))
+
+def _get_dtype(operators, dtypes=None):
+    if dtypes is None:
+        dtypes = []
+    for obj in operators:
+        if obj is not None and hasattr(obj, 'dtype'):
+            dtypes.append(obj.dtype)
+    return np.result_type(*dtypes)
+
+
+class _SumLinearOperator(LinearOperator):
+    def __init__(self, A, B):
+        if not isinstance(A, LinearOperator) or \
+                not isinstance(B, LinearOperator):
+            raise ValueError('both operands have to be a LinearOperator')
+        if A.shape != B.shape:
+            raise ValueError('cannot add %r and %r: shape mismatch'
+                             % (A, B))
+        self.args = (A, B)
+        super().__init__(_get_dtype([A, B]), A.shape)
+
+    def _matvec(self, x):
+        return self.args[0].matvec(x) + self.args[1].matvec(x)
+
+    def _rmatvec(self, x):
+        return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
+
+    def _rmatmat(self, x):
+        return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
+
+    def _matmat(self, x):
+        return self.args[0].matmat(x) + self.args[1].matmat(x)
+
+    def _adjoint(self):
+        A, B = self.args
+        return A.H + B.H
+
+
+class _ProductLinearOperator(LinearOperator):
+    def __init__(self, A, B):
+        if not isinstance(A, LinearOperator) or \
+                not isinstance(B, LinearOperator):
+            raise ValueError('both operands have to be a LinearOperator')
+        if A.shape[1] != B.shape[0]:
+            raise ValueError('cannot multiply %r and %r: shape mismatch'
+                             % (A, B))
+        super().__init__(_get_dtype([A, B]),
+                                                     (A.shape[0], B.shape[1]))
+        self.args = (A, B)
+
+    def _matvec(self, x):
+        return self.args[0].matvec(self.args[1].matvec(x))
+
+    def _rmatvec(self, x):
+        return self.args[1].rmatvec(self.args[0].rmatvec(x))
+
+    def _rmatmat(self, x):
+        return self.args[1].rmatmat(self.args[0].rmatmat(x))
+
+    def _matmat(self, x):
+        return self.args[0].matmat(self.args[1].matmat(x))
+
+    def _adjoint(self):
+        A, B = self.args
+        return B.H * A.H
+
+
+class _ScaledLinearOperator(LinearOperator):
+    def __init__(self, A, alpha):
+        if not isinstance(A, LinearOperator):
+            raise ValueError('LinearOperator expected as A')
+        if not np.isscalar(alpha):
+            raise ValueError('scalar expected as alpha')
+        dtype = _get_dtype([A], [type(alpha)])
+        super().__init__(dtype, A.shape)
+        self.args = (A, alpha)
+
+    def _matvec(self, x):
+        return self.args[1] * self.args[0].matvec(x)
+
+    def _rmatvec(self, x):
+        return np.conj(self.args[1]) * self.args[0].rmatvec(x)
+
+    def _rmatmat(self, x):
+        return np.conj(self.args[1]) * self.args[0].rmatmat(x)
+
+    def _matmat(self, x):
+        return self.args[1] * self.args[0].matmat(x)
+
+    def _adjoint(self):
+        A, alpha = self.args
+        return A.H * np.conj(alpha)
+
+
+class _PowerLinearOperator(LinearOperator):
+    def __init__(self, A, p):
+        if not isinstance(A, LinearOperator):
+            raise ValueError('LinearOperator expected as A')
+        if A.shape[0] != A.shape[1]:
+            raise ValueError('square LinearOperator expected, got %r' % A)
+        if not isintlike(p) or p < 0:
+            raise ValueError('non-negative integer expected as p')
+
+        super().__init__(_get_dtype([A]), A.shape)
+        self.args = (A, p)
+
+    def _power(self, fun, x):
+        res = np.array(x, copy=True)
+        for i in range(self.args[1]):
+            res = fun(res)
+        return res
+
+    def _matvec(self, x):
+        return self._power(self.args[0].matvec, x)
+
+    def _rmatvec(self, x):
+        return self._power(self.args[0].rmatvec, x)
+
+    def _rmatmat(self, x):
+        return self._power(self.args[0].rmatmat, x)
+
+    def _matmat(self, x):
+        return self._power(self.args[0].matmat, x)
+
+    def _adjoint(self):
+        A, p = self.args
+        return A.H ** p
+
+
+class MatrixLinearOperator(LinearOperator):
+    def __init__(self, A):
+        super().__init__(A.dtype, A.shape)
+        self.A = A
+        self.__adj = None
+        self.args = (A,)
+
+    def _matmat(self, X):
+        return self.A.dot(X)
+
+    def _adjoint(self):
+        if self.__adj is None:
+            self.__adj = _AdjointMatrixOperator(self)
+        return self.__adj
+
+class _AdjointMatrixOperator(MatrixLinearOperator):
+    def __init__(self, adjoint):
+        self.A = adjoint.A.T.conj()
+        self.__adjoint = adjoint
+        self.args = (adjoint,)
+        self.shape = adjoint.shape[1], adjoint.shape[0]
+
+    @property
+    def dtype(self):
+        return self.__adjoint.dtype
+
+    def _adjoint(self):
+        return self.__adjoint
+
+
+class IdentityOperator(LinearOperator):
+    def __init__(self, shape, dtype=None):
+        super().__init__(dtype, shape)
+
+    def _matvec(self, x):
+        return x
+
+    def _rmatvec(self, x):
+        return x
+
+    def _rmatmat(self, x):
+        return x
+
+    def _matmat(self, x):
+        return x
+
+    def _adjoint(self):
+        return self
+
+
+def aslinearoperator(A):
+    """Return A as a LinearOperator.
+
+    'A' may be any of the following types:
+     - ndarray
+     - matrix
+     - sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
+     - LinearOperator
+     - An object with .shape and .matvec attributes
+
+    See the LinearOperator documentation for additional information.
+
+    Notes
+    -----
+    If 'A' has no .dtype attribute, the data type is determined by calling
+    :func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
+    call upon the linear operator creation.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse.linalg import aslinearoperator
+    >>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
+    >>> aslinearoperator(M)
+    <2x3 MatrixLinearOperator with dtype=int32>
+    """
+    if isinstance(A, LinearOperator):
+        return A
+
+    elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
+        if A.ndim > 2:
+            raise ValueError('array must have ndim <= 2')
+        A = np.atleast_2d(np.asarray(A))
+        return MatrixLinearOperator(A)
+
+    elif isspmatrix(A) or is_pydata_spmatrix(A):
+        return MatrixLinearOperator(A)
+
+    else:
+        if hasattr(A, 'shape') and hasattr(A, 'matvec'):
+            rmatvec = None
+            rmatmat = None
+            dtype = None
+
+            if hasattr(A, 'rmatvec'):
+                rmatvec = A.rmatvec
+            if hasattr(A, 'rmatmat'):
+                rmatmat = A.rmatmat
+            if hasattr(A, 'dtype'):
+                dtype = A.dtype
+            return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
+                                  rmatmat=rmatmat, dtype=dtype)
+
+        else:
+            raise TypeError('type not understood')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/__init__.py
new file mode 100644
index 00000000..3b572745
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/__init__.py
@@ -0,0 +1,20 @@
+"Iterative Solvers for Sparse Linear Systems"
+
+#from info import __doc__
+from .iterative import *
+from .minres import minres
+from .lgmres import lgmres
+from .lsqr import lsqr
+from .lsmr import lsmr
+from ._gcrotmk import gcrotmk
+from .tfqmr import tfqmr
+
+__all__ = [
+    'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
+    'lgmres', 'lsmr', 'lsqr',
+    'minres', 'qmr', 'tfqmr'
+]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/_gcrotmk.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/_gcrotmk.py
new file mode 100644
index 00000000..f24d5a44
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/_gcrotmk.py
@@ -0,0 +1,507 @@
+# Copyright (C) 2015, Pauli Virtanen 
+# Distributed under the same license as SciPy.
+
+import warnings
+import numpy as np
+from numpy.linalg import LinAlgError
+from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
+from scipy.sparse.linalg._isolve.utils import make_system
+
+
+__all__ = ['gcrotmk']
+
+
+def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
+            prepend_outer_v=False):
+    """
+    FGMRES Arnoldi process, with optional projection or augmentation
+
+    Parameters
+    ----------
+    matvec : callable
+        Operation A*x
+    v0 : ndarray
+        Initial vector, normalized to nrm2(v0) == 1
+    m : int
+        Number of GMRES rounds
+    atol : float
+        Absolute tolerance for early exit
+    lpsolve : callable
+        Left preconditioner L
+    rpsolve : callable
+        Right preconditioner R
+    cs : list of (ndarray, ndarray)
+        Columns of matrices C and U in GCROT
+    outer_v : list of ndarrays
+        Augmentation vectors in LGMRES
+    prepend_outer_v : bool, optional
+        Whether augmentation vectors come before or after
+        Krylov iterates
+
+    Raises
+    ------
+    LinAlgError
+        If nans encountered
+
+    Returns
+    -------
+    Q, R : ndarray
+        QR decomposition of the upper Hessenberg H=QR
+    B : ndarray
+        Projections corresponding to matrix C
+    vs : list of ndarray
+        Columns of matrix V
+    zs : list of ndarray
+        Columns of matrix Z
+    y : ndarray
+        Solution to ||H y - e_1||_2 = min!
+    res : float
+        The final (preconditioned) residual norm
+
+    """
+
+    if lpsolve is None:
+        lpsolve = lambda x: x
+    if rpsolve is None:
+        rpsolve = lambda x: x
+
+    axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
+
+    vs = [v0]
+    zs = []
+    y = None
+    res = np.nan
+
+    m = m + len(outer_v)
+
+    # Orthogonal projection coefficients
+    B = np.zeros((len(cs), m), dtype=v0.dtype)
+
+    # H is stored in QR factorized form
+    Q = np.ones((1, 1), dtype=v0.dtype)
+    R = np.zeros((1, 0), dtype=v0.dtype)
+
+    eps = np.finfo(v0.dtype).eps
+
+    breakdown = False
+
+    # FGMRES Arnoldi process
+    for j in range(m):
+        # L A Z = C B + V H
+
+        if prepend_outer_v and j < len(outer_v):
+            z, w = outer_v[j]
+        elif prepend_outer_v and j == len(outer_v):
+            z = rpsolve(v0)
+            w = None
+        elif not prepend_outer_v and j >= m - len(outer_v):
+            z, w = outer_v[j - (m - len(outer_v))]
+        else:
+            z = rpsolve(vs[-1])
+            w = None
+
+        if w is None:
+            w = lpsolve(matvec(z))
+        else:
+            # w is clobbered below
+            w = w.copy()
+
+        w_norm = nrm2(w)
+
+        # GCROT projection: L A -> (1 - C C^H) L A
+        # i.e. orthogonalize against C
+        for i, c in enumerate(cs):
+            alpha = dot(c, w)
+            B[i,j] = alpha
+            w = axpy(c, w, c.shape[0], -alpha)  # w -= alpha*c
+
+        # Orthogonalize against V
+        hcur = np.zeros(j+2, dtype=Q.dtype)
+        for i, v in enumerate(vs):
+            alpha = dot(v, w)
+            hcur[i] = alpha
+            w = axpy(v, w, v.shape[0], -alpha)  # w -= alpha*v
+        hcur[i+1] = nrm2(w)
+
+        with np.errstate(over='ignore', divide='ignore'):
+            # Careful with denormals
+            alpha = 1/hcur[-1]
+
+        if np.isfinite(alpha):
+            w = scal(alpha, w)
+
+        if not (hcur[-1] > eps * w_norm):
+            # w essentially in the span of previous vectors,
+            # or we have nans. Bail out after updating the QR
+            # solution.
+            breakdown = True
+
+        vs.append(w)
+        zs.append(z)
+
+        # Arnoldi LSQ problem
+
+        # Add new column to H=Q@R, padding other columns with zeros
+        Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
+        Q2[:j+1,:j+1] = Q
+        Q2[j+1,j+1] = 1
+
+        R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
+        R2[:j+1,:] = R
+
+        Q, R = qr_insert(Q2, R2, hcur, j, which='col',
+                         overwrite_qru=True, check_finite=False)
+
+        # Transformed least squares problem
+        # || Q R y - inner_res_0 * e_1 ||_2 = min!
+        # Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
+
+        # Residual is immediately known
+        res = abs(Q[0,-1])
+
+        # Check for termination
+        if res < atol or breakdown:
+            break
+
+    if not np.isfinite(R[j,j]):
+        # nans encountered, bail out
+        raise LinAlgError()
+
+    # -- Get the LSQ problem solution
+
+    # The problem is triangular, but the condition number may be
+    # bad (or in case of breakdown the last diagonal entry may be
+    # zero), so use lstsq instead of trtrs.
+    y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
+
+    B = B[:,:j+1]
+
+    return Q, R, B, vs, zs, y, res
+
+
+def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
+            m=20, k=None, CU=None, discard_C=False, truncate='oldest',
+            atol=None):
+    """
+    Solve a matrix equation using flexible GCROT(m,k) algorithm.
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        The real or complex N-by-N matrix of the linear system.
+        Alternatively, ``A`` can be a linear operator which can
+        produce ``Ax`` using, e.g.,
+        ``scipy.sparse.linalg.LinearOperator``.
+    b : ndarray
+        Right hand side of the linear system. Has shape (N,) or (N,1).
+    x0 : ndarray
+        Starting guess for the solution.
+    tol, atol : float, optional
+        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
+        The default for ``atol`` is `tol`.
+
+        .. warning::
+
+           The default value for `atol` will be changed in a future release.
+           For future compatibility, specify `atol` explicitly.
+    maxiter : int, optional
+        Maximum number of iterations.  Iteration will stop after maxiter
+        steps even if the specified tolerance has not been achieved.
+    M : {sparse matrix, ndarray, LinearOperator}, optional
+        Preconditioner for A.  The preconditioner should approximate the
+        inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
+        can vary from iteration to iteration. Effective preconditioning
+        dramatically improves the rate of convergence, which implies that
+        fewer iterations are needed to reach a given error tolerance.
+    callback : function, optional
+        User-supplied function to call after each iteration.  It is called
+        as callback(xk), where xk is the current solution vector.
+    m : int, optional
+        Number of inner FGMRES iterations per each outer iteration.
+        Default: 20
+    k : int, optional
+        Number of vectors to carry between inner FGMRES iterations.
+        According to [2]_, good values are around m.
+        Default: m
+    CU : list of tuples, optional
+        List of tuples ``(c, u)`` which contain the columns of the matrices
+        C and U in the GCROT(m,k) algorithm. For details, see [2]_.
+        The list given and vectors contained in it are modified in-place.
+        If not given, start from empty matrices. The ``c`` elements in the
+        tuples can be ``None``, in which case the vectors are recomputed
+        via ``c = A u`` on start and orthogonalized as described in [3]_.
+    discard_C : bool, optional
+        Discard the C-vectors at the end. Useful if recycling Krylov subspaces
+        for different linear systems.
+    truncate : {'oldest', 'smallest'}, optional
+        Truncation scheme to use. Drop: oldest vectors, or vectors with
+        smallest singular values using the scheme discussed in [1,2].
+        See [2]_ for detailed comparison.
+        Default: 'oldest'
+
+    Returns
+    -------
+    x : ndarray
+        The solution found.
+    info : int
+        Provides convergence information:
+
+        * 0  : successful exit
+        * >0 : convergence to tolerance not achieved, number of iterations
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import gcrotmk
+    >>> R = np.random.randn(5, 5)
+    >>> A = csc_matrix(R)
+    >>> b = np.random.randn(5)
+    >>> x, exit_code = gcrotmk(A, b)
+    >>> print(exit_code)
+    0
+    >>> np.allclose(A.dot(x), b)
+    True
+
+    References
+    ----------
+    .. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
+           methods'', SIAM J. Numer. Anal. 36, 864 (1999).
+    .. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
+           of GCROT for solving nonsymmetric linear systems'',
+           SIAM J. Sci. Comput. 32, 172 (2010).
+    .. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
+           ''Recycling Krylov subspaces for sequences of linear systems'',
+           SIAM J. Sci. Comput. 28, 1651 (2006).
+
+    """
+    A,M,x,b,postprocess = make_system(A,M,x0,b)
+
+    if not np.isfinite(b).all():
+        raise ValueError("RHS must contain only finite numbers")
+
+    if truncate not in ('oldest', 'smallest'):
+        raise ValueError("Invalid value for 'truncate': %r" % (truncate,))
+
+    if atol is None:
+        warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. "
+                      "The default value will change in the future. To preserve "
+                      "current behavior, set ``atol=tol``.",
+                      category=DeprecationWarning, stacklevel=2)
+        atol = tol
+
+    matvec = A.matvec
+    psolve = M.matvec
+
+    if CU is None:
+        CU = []
+
+    if k is None:
+        k = m
+
+    axpy, dot, scal = None, None, None
+
+    if x0 is None:
+        r = b.copy()
+    else:
+        r = b - matvec(x)
+
+    axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
+
+    b_norm = nrm2(b)
+    if b_norm == 0:
+        x = b
+        return (postprocess(x), 0)
+
+    if discard_C:
+        CU[:] = [(None, u) for c, u in CU]
+
+    # Reorthogonalize old vectors
+    if CU:
+        # Sort already existing vectors to the front
+        CU.sort(key=lambda cu: cu[0] is not None)
+
+        # Fill-in missing ones
+        C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
+        us = []
+        j = 0
+        while CU:
+            # More memory-efficient: throw away old vectors as we go
+            c, u = CU.pop(0)
+            if c is None:
+                c = matvec(u)
+            C[:,j] = c
+            j += 1
+            us.append(u)
+
+        # Orthogonalize
+        Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
+        del C
+
+        # C := Q
+        cs = list(Q.T)
+
+        # U := U P R^-1,  back-substitution
+        new_us = []
+        for j in range(len(cs)):
+            u = us[P[j]]
+            for i in range(j):
+                u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
+            if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
+                # discard rest of the vectors
+                break
+            u = scal(1.0/R[j,j], u)
+            new_us.append(u)
+
+        # Form the new CU lists
+        CU[:] = list(zip(cs, new_us))[::-1]
+
+    if CU:
+        axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
+
+        # Solve first the projection operation with respect to the CU
+        # vectors. This corresponds to modifying the initial guess to
+        # be
+        #
+        #     x' = x + U y
+        #     y = argmin_y || b - A (x + U y) ||^2
+        #
+        # The solution is y = C^H (b - A x)
+        for c, u in CU:
+            yc = dot(c, r)
+            x = axpy(u, x, x.shape[0], yc)
+            r = axpy(c, r, r.shape[0], -yc)
+
+    # GCROT main iteration
+    for j_outer in range(maxiter):
+        # -- callback
+        if callback is not None:
+            callback(x)
+
+        beta = nrm2(r)
+
+        # -- check stopping condition
+        beta_tol = max(atol, tol * b_norm)
+
+        if beta <= beta_tol and (j_outer > 0 or CU):
+            # recompute residual to avoid rounding error
+            r = b - matvec(x)
+            beta = nrm2(r)
+
+        if beta <= beta_tol:
+            j_outer = -1
+            break
+
+        ml = m + max(k - len(CU), 0)
+
+        cs = [c for c, u in CU]
+
+        try:
+            Q, R, B, vs, zs, y, pres = _fgmres(matvec,
+                                               r/beta,
+                                               ml,
+                                               rpsolve=psolve,
+                                               atol=max(atol, tol*b_norm)/beta,
+                                               cs=cs)
+            y *= beta
+        except LinAlgError:
+            # Floating point over/underflow, non-finite result from
+            # matmul etc. -- report failure.
+            break
+
+        #
+        # At this point,
+        #
+        #     [A U, A Z] = [C, V] G;   G =  [ I  B ]
+        #                                   [ 0  H ]
+        #
+        # where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
+        #
+        #     || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
+        #
+        # from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
+        #
+
+        #
+        # GCROT(m,k) update
+        #
+
+        # Define new outer vectors
+
+        # ux := (Z - U B) y
+        ux = zs[0]*y[0]
+        for z, yc in zip(zs[1:], y[1:]):
+            ux = axpy(z, ux, ux.shape[0], yc)  # ux += z*yc
+        by = B.dot(y)
+        for cu, byc in zip(CU, by):
+            c, u = cu
+            ux = axpy(u, ux, ux.shape[0], -byc)  # ux -= u*byc
+
+        # cx := V H y
+        hy = Q.dot(R.dot(y))
+        cx = vs[0] * hy[0]
+        for v, hyc in zip(vs[1:], hy[1:]):
+            cx = axpy(v, cx, cx.shape[0], hyc)  # cx += v*hyc
+
+        # Normalize cx, maintaining cx = A ux
+        # This new cx is orthogonal to the previous C, by construction
+        try:
+            alpha = 1/nrm2(cx)
+            if not np.isfinite(alpha):
+                raise FloatingPointError()
+        except (FloatingPointError, ZeroDivisionError):
+            # Cannot update, so skip it
+            continue
+
+        cx = scal(alpha, cx)
+        ux = scal(alpha, ux)
+
+        # Update residual and solution
+        gamma = dot(cx, r)
+        r = axpy(cx, r, r.shape[0], -gamma)  # r -= gamma*cx
+        x = axpy(ux, x, x.shape[0], gamma)  # x += gamma*ux
+
+        # Truncate CU
+        if truncate == 'oldest':
+            while len(CU) >= k and CU:
+                del CU[0]
+        elif truncate == 'smallest':
+            if len(CU) >= k and CU:
+                # cf. [1,2]
+                D = solve(R[:-1,:].T, B.T).T
+                W, sigma, V = svd(D)
+
+                # C := C W[:,:k-1],  U := U W[:,:k-1]
+                new_CU = []
+                for j, w in enumerate(W[:,:k-1].T):
+                    c, u = CU[0]
+                    c = c * w[0]
+                    u = u * w[0]
+                    for cup, wp in zip(CU[1:], w[1:]):
+                        cp, up = cup
+                        c = axpy(cp, c, c.shape[0], wp)
+                        u = axpy(up, u, u.shape[0], wp)
+
+                    # Reorthogonalize at the same time; not necessary
+                    # in exact arithmetic, but floating point error
+                    # tends to accumulate here
+                    for cp, up in new_CU:
+                        alpha = dot(cp, c)
+                        c = axpy(cp, c, c.shape[0], -alpha)
+                        u = axpy(up, u, u.shape[0], -alpha)
+                    alpha = nrm2(c)
+                    c = scal(1.0/alpha, c)
+                    u = scal(1.0/alpha, u)
+
+                    new_CU.append((c, u))
+                CU[:] = new_CU
+
+        # Add new vector to CU
+        CU.append((cx, ux))
+
+    # Include the solution vector to the span
+    CU.append((None, x.copy()))
+    if discard_C:
+        CU[:] = [(None, uz) for cz, uz in CU]
+
+    return postprocess(x), j_outer + 1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/iterative.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/iterative.py
new file mode 100644
index 00000000..8ab6bbd8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/iterative.py
@@ -0,0 +1,881 @@
+"""Iterative methods for solving linear systems"""
+
+__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr']
+
+import warnings
+from textwrap import dedent
+import numpy as np
+
+from . import _iterative
+
+from scipy.sparse.linalg._interface import LinearOperator
+from .utils import make_system
+from scipy._lib._util import _aligned_zeros
+from scipy._lib._threadsafety import non_reentrant
+
+_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'}
+
+
+# Part of the docstring common to all iterative solvers
+common_doc1 = \
+"""
+Parameters
+----------
+A : {sparse matrix, ndarray, LinearOperator}"""
+
+common_doc2 = \
+"""b : ndarray
+    Right hand side of the linear system. Has shape (N,) or (N,1).
+
+Returns
+-------
+x : ndarray
+    The converged solution.
+info : integer
+    Provides convergence information:
+        0  : successful exit
+        >0 : convergence to tolerance not achieved, number of iterations
+        <0 : illegal input or breakdown
+
+Other Parameters
+----------------
+x0 : ndarray
+    Starting guess for the solution.
+tol, atol : float, optional
+    Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
+    The default for ``atol`` is ``'legacy'``, which emulates
+    a different legacy behavior.
+
+    .. warning::
+
+       The default value for `atol` will be changed in a future release.
+       For future compatibility, specify `atol` explicitly.
+maxiter : integer
+    Maximum number of iterations.  Iteration will stop after maxiter
+    steps even if the specified tolerance has not been achieved.
+M : {sparse matrix, ndarray, LinearOperator}
+    Preconditioner for A.  The preconditioner should approximate the
+    inverse of A.  Effective preconditioning dramatically improves the
+    rate of convergence, which implies that fewer iterations are needed
+    to reach a given error tolerance.
+callback : function
+    User-supplied function to call after each iteration.  It is called
+    as callback(xk), where xk is the current solution vector.
+"""
+
+
+def _stoptest(residual, atol):
+    """
+    Successful termination condition for the solvers.
+    """
+    resid = np.linalg.norm(residual)
+    if resid <= atol:
+        return resid, 1
+    else:
+        return resid, 0
+
+
+def _get_atol(tol, atol, bnrm2, get_residual, routine_name):
+    """
+    Parse arguments for absolute tolerance in termination condition.
+
+    Parameters
+    ----------
+    tol, atol : object
+        The arguments passed into the solver routine by user.
+    bnrm2 : float
+        2-norm of the rhs vector.
+    get_residual : callable
+        Callable ``get_residual()`` that returns the initial value of
+        the residual.
+    routine_name : str
+        Name of the routine.
+    """
+
+    if atol is None:
+        warnings.warn("scipy.sparse.linalg.{name} called without specifying `atol`. "
+                      "The default value will be changed in a future release. "
+                      "For compatibility, specify a value for `atol` explicitly, e.g., "
+                      "``{name}(..., atol=0)``, or to retain the old behavior "
+                      "``{name}(..., atol='legacy')``".format(name=routine_name),
+                      category=DeprecationWarning, stacklevel=4)
+        atol = 'legacy'
+
+    tol = float(tol)
+
+    if atol == 'legacy':
+        # emulate old legacy behavior
+        resid = get_residual()
+        if resid <= tol:
+            return 'exit'
+        if bnrm2 == 0:
+            return tol
+        else:
+            return tol * float(bnrm2)
+    else:
+        return max(float(atol), tol * float(bnrm2))
+
+
+def set_docstring(header, Ainfo, footer='', atol_default='0'):
+    def combine(fn):
+        fn.__doc__ = '\n'.join((header, common_doc1,
+                                '    ' + Ainfo.replace('\n', '\n    '),
+                                common_doc2, dedent(footer)))
+        return fn
+    return combine
+
+
+@set_docstring('Use BIConjugate Gradient iteration to solve ``Ax = b``.',
+               'The real or complex N-by-N matrix of the linear system.\n'
+               'Alternatively, ``A`` can be a linear operator which can\n'
+               'produce ``Ax`` and ``A^T x`` using, e.g.,\n'
+               '``scipy.sparse.linalg.LinearOperator``.',
+               footer="""\
+               Examples
+               --------
+               >>> import numpy as np
+               >>> from scipy.sparse import csc_matrix
+               >>> from scipy.sparse.linalg import bicg
+               >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
+               >>> b = np.array([2, 4, -1], dtype=float)
+               >>> x, exitCode = bicg(A, b)
+               >>> print(exitCode)            # 0 indicates successful convergence
+               0
+               >>> np.allclose(A.dot(x), b)
+               True
+
+               """
+               )
+@non_reentrant()
+def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
+    A,M,x,b,postprocess = make_system(A, M, x0, b)
+
+    n = len(b)
+    if maxiter is None:
+        maxiter = n*10
+
+    matvec, rmatvec = A.matvec, A.rmatvec
+    psolve, rpsolve = M.matvec, M.rmatvec
+    ltr = _type_conv[x.dtype.char]
+    revcom = getattr(_iterative, ltr + 'bicgrevcom')
+
+    get_residual = lambda: np.linalg.norm(matvec(x) - b)
+    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicg')
+    if atol == 'exit':
+        return postprocess(x), 0
+
+    resid = atol
+    ndx1 = 1
+    ndx2 = -1
+    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
+    work = _aligned_zeros(6*n,dtype=x.dtype)
+    ijob = 1
+    info = 0
+    ftflag = True
+    iter_ = maxiter
+    while True:
+        olditer = iter_
+        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
+           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
+        if callback is not None and iter_ > olditer:
+            callback(x)
+        slice1 = slice(ndx1-1, ndx1-1+n)
+        slice2 = slice(ndx2-1, ndx2-1+n)
+        if (ijob == -1):
+            if callback is not None:
+                callback(x)
+            break
+        elif (ijob == 1):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(work[slice1])
+        elif (ijob == 2):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*rmatvec(work[slice1])
+        elif (ijob == 3):
+            work[slice1] = psolve(work[slice2])
+        elif (ijob == 4):
+            work[slice1] = rpsolve(work[slice2])
+        elif (ijob == 5):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(x)
+        elif (ijob == 6):
+            if ftflag:
+                info = -1
+                ftflag = False
+            resid, info = _stoptest(work[slice1], atol)
+        ijob = 2
+
+    if info > 0 and iter_ == maxiter and not (resid <= atol):
+        # info isn't set appropriately otherwise
+        info = iter_
+
+    return postprocess(x), info
+
+
+@set_docstring('Use BIConjugate Gradient STABilized iteration to solve '
+               '``Ax = b``.',
+               'The real or complex N-by-N matrix of the linear system.\n'
+               'Alternatively, ``A`` can be a linear operator which can\n'
+               'produce ``Ax`` using, e.g.,\n'
+               '``scipy.sparse.linalg.LinearOperator``.',
+               footer="""\
+               Examples
+               --------
+               >>> import numpy as np
+               >>> from scipy.sparse import csc_matrix
+               >>> from scipy.sparse.linalg import bicgstab
+               >>> R = np.array([[4, 2, 0, 1],
+               ...               [3, 0, 0, 2],
+               ...               [0, 1, 1, 1],
+               ...               [0, 2, 1, 0]])
+               >>> A = csc_matrix(R)
+               >>> b = np.array([-1, -0.5, -1, 2])
+               >>> x, exit_code = bicgstab(A, b)
+               >>> print(exit_code)  # 0 indicates successful convergence
+               0
+               >>> np.allclose(A.dot(x), b)
+               True
+               """)
+@non_reentrant()
+def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
+    A, M, x, b, postprocess = make_system(A, M, x0, b)
+
+    n = len(b)
+    if maxiter is None:
+        maxiter = n*10
+
+    matvec = A.matvec
+    psolve = M.matvec
+    ltr = _type_conv[x.dtype.char]
+    revcom = getattr(_iterative, ltr + 'bicgstabrevcom')
+
+    get_residual = lambda: np.linalg.norm(matvec(x) - b)
+    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicgstab')
+    if atol == 'exit':
+        return postprocess(x), 0
+
+    resid = atol
+    ndx1 = 1
+    ndx2 = -1
+    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
+    work = _aligned_zeros(7*n,dtype=x.dtype)
+    ijob = 1
+    info = 0
+    ftflag = True
+    iter_ = maxiter
+    while True:
+        olditer = iter_
+        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
+           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
+        if callback is not None and iter_ > olditer:
+            callback(x)
+        slice1 = slice(ndx1-1, ndx1-1+n)
+        slice2 = slice(ndx2-1, ndx2-1+n)
+        if (ijob == -1):
+            if callback is not None:
+                callback(x)
+            break
+        elif (ijob == 1):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(work[slice1])
+        elif (ijob == 2):
+            work[slice1] = psolve(work[slice2])
+        elif (ijob == 3):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(x)
+        elif (ijob == 4):
+            if ftflag:
+                info = -1
+                ftflag = False
+            resid, info = _stoptest(work[slice1], atol)
+        ijob = 2
+
+    if info > 0 and iter_ == maxiter and not (resid <= atol):
+        # info isn't set appropriately otherwise
+        info = iter_
+
+    return postprocess(x), info
+
+
+@set_docstring('Use Conjugate Gradient iteration to solve ``Ax = b``.',
+               'The real or complex N-by-N matrix of the linear system.\n'
+               '``A`` must represent a hermitian, positive definite matrix.\n'
+               'Alternatively, ``A`` can be a linear operator which can\n'
+               'produce ``Ax`` using, e.g.,\n'
+               '``scipy.sparse.linalg.LinearOperator``.',
+               footer="""\
+               Examples
+               --------
+               >>> import numpy as np
+               >>> from scipy.sparse import csc_matrix
+               >>> from scipy.sparse.linalg import cg
+               >>> P = np.array([[4, 0, 1, 0],
+               ...               [0, 5, 0, 0],
+               ...               [1, 0, 3, 2],
+               ...               [0, 0, 2, 4]])
+               >>> A = csc_matrix(P)
+               >>> b = np.array([-1, -0.5, -1, 2])
+               >>> x, exit_code = cg(A, b)
+               >>> print(exit_code)    # 0 indicates successful convergence
+               0
+               >>> np.allclose(A.dot(x), b)
+               True
+
+               """)
+@non_reentrant()
+def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
+    A, M, x, b, postprocess = make_system(A, M, x0, b)
+
+    n = len(b)
+    if maxiter is None:
+        maxiter = n*10
+
+    matvec = A.matvec
+    psolve = M.matvec
+    ltr = _type_conv[x.dtype.char]
+    revcom = getattr(_iterative, ltr + 'cgrevcom')
+
+    get_residual = lambda: np.linalg.norm(matvec(x) - b)
+    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cg')
+    if atol == 'exit':
+        return postprocess(x), 0
+
+    resid = atol
+    ndx1 = 1
+    ndx2 = -1
+    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
+    work = _aligned_zeros(4*n,dtype=x.dtype)
+    ijob = 1
+    info = 0
+    ftflag = True
+    iter_ = maxiter
+    while True:
+        olditer = iter_
+        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
+           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
+        if callback is not None and iter_ > olditer:
+            callback(x)
+        slice1 = slice(ndx1-1, ndx1-1+n)
+        slice2 = slice(ndx2-1, ndx2-1+n)
+        if (ijob == -1):
+            if callback is not None:
+                callback(x)
+            break
+        elif (ijob == 1):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(work[slice1])
+        elif (ijob == 2):
+            work[slice1] = psolve(work[slice2])
+        elif (ijob == 3):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(x)
+        elif (ijob == 4):
+            if ftflag:
+                info = -1
+                ftflag = False
+            resid, info = _stoptest(work[slice1], atol)
+            if info == 1 and iter_ > 1:
+                # recompute residual and recheck, to avoid
+                # accumulating rounding error
+                work[slice1] = b - matvec(x)
+                resid, info = _stoptest(work[slice1], atol)
+        ijob = 2
+
+    if info > 0 and iter_ == maxiter and not (resid <= atol):
+        # info isn't set appropriately otherwise
+        info = iter_
+
+    return postprocess(x), info
+
+
+@set_docstring('Use Conjugate Gradient Squared iteration to solve ``Ax = b``.',
+               'The real-valued N-by-N matrix of the linear system.\n'
+               'Alternatively, ``A`` can be a linear operator which can\n'
+               'produce ``Ax`` using, e.g.,\n'
+               '``scipy.sparse.linalg.LinearOperator``.',
+               footer="""\
+               Examples
+               --------
+               >>> import numpy as np
+               >>> from scipy.sparse import csc_matrix
+               >>> from scipy.sparse.linalg import cgs
+               >>> R = np.array([[4, 2, 0, 1],
+               ...               [3, 0, 0, 2],
+               ...               [0, 1, 1, 1],
+               ...               [0, 2, 1, 0]])
+               >>> A = csc_matrix(R)
+               >>> b = np.array([-1, -0.5, -1, 2])
+               >>> x, exit_code = cgs(A, b)
+               >>> print(exit_code)  # 0 indicates successful convergence
+               0
+               >>> np.allclose(A.dot(x), b)
+               True
+               """
+               )
+@non_reentrant()
+def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
+    A, M, x, b, postprocess = make_system(A, M, x0, b)
+
+    n = len(b)
+    if maxiter is None:
+        maxiter = n*10
+
+    matvec = A.matvec
+    psolve = M.matvec
+    ltr = _type_conv[x.dtype.char]
+    revcom = getattr(_iterative, ltr + 'cgsrevcom')
+
+    get_residual = lambda: np.linalg.norm(matvec(x) - b)
+    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cgs')
+    if atol == 'exit':
+        return postprocess(x), 0
+
+    resid = atol
+    ndx1 = 1
+    ndx2 = -1
+    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
+    work = _aligned_zeros(7*n,dtype=x.dtype)
+    ijob = 1
+    info = 0
+    ftflag = True
+    iter_ = maxiter
+    while True:
+        olditer = iter_
+        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
+           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
+        if callback is not None and iter_ > olditer:
+            callback(x)
+        slice1 = slice(ndx1-1, ndx1-1+n)
+        slice2 = slice(ndx2-1, ndx2-1+n)
+        if (ijob == -1):
+            if callback is not None:
+                callback(x)
+            break
+        elif (ijob == 1):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(work[slice1])
+        elif (ijob == 2):
+            work[slice1] = psolve(work[slice2])
+        elif (ijob == 3):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(x)
+        elif (ijob == 4):
+            if ftflag:
+                info = -1
+                ftflag = False
+            resid, info = _stoptest(work[slice1], atol)
+            if info == 1 and iter_ > 1:
+                # recompute residual and recheck, to avoid
+                # accumulating rounding error
+                work[slice1] = b - matvec(x)
+                resid, info = _stoptest(work[slice1], atol)
+        ijob = 2
+
+    if info == -10:
+        # termination due to breakdown: check for convergence
+        resid, ok = _stoptest(b - matvec(x), atol)
+        if ok:
+            info = 0
+
+    if info > 0 and iter_ == maxiter and not (resid <= atol):
+        # info isn't set appropriately otherwise
+        info = iter_
+
+    return postprocess(x), info
+
+
+@non_reentrant()
+def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None,
+          restrt=None, atol=None, callback_type=None):
+    """
+    Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        The real or complex N-by-N matrix of the linear system.
+        Alternatively, ``A`` can be a linear operator which can
+        produce ``Ax`` using, e.g.,
+        ``scipy.sparse.linalg.LinearOperator``.
+    b : ndarray
+        Right hand side of the linear system. Has shape (N,) or (N,1).
+
+    Returns
+    -------
+    x : ndarray
+        The converged solution.
+    info : int
+        Provides convergence information:
+          * 0  : successful exit
+          * >0 : convergence to tolerance not achieved, number of iterations
+          * <0 : illegal input or breakdown
+
+    Other parameters
+    ----------------
+    x0 : ndarray
+        Starting guess for the solution (a vector of zeros by default).
+    tol, atol : float, optional
+        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
+        The default for ``atol`` is ``'legacy'``, which emulates
+        a different legacy behavior.
+
+        .. warning::
+
+           The default value for `atol` will be changed in a future release.
+           For future compatibility, specify `atol` explicitly.
+    restart : int, optional
+        Number of iterations between restarts. Larger values increase
+        iteration cost, but may be necessary for convergence.
+        Default is 20.
+    maxiter : int, optional
+        Maximum number of iterations (restart cycles).  Iteration will stop
+        after maxiter steps even if the specified tolerance has not been
+        achieved.
+    M : {sparse matrix, ndarray, LinearOperator}
+        Inverse of the preconditioner of A.  M should approximate the
+        inverse of A and be easy to solve for (see Notes).  Effective
+        preconditioning dramatically improves the rate of convergence,
+        which implies that fewer iterations are needed to reach a given
+        error tolerance.  By default, no preconditioner is used.
+    callback : function
+        User-supplied function to call after each iteration.  It is called
+        as `callback(args)`, where `args` are selected by `callback_type`.
+    callback_type : {'x', 'pr_norm', 'legacy'}, optional
+        Callback function argument requested:
+          - ``x``: current iterate (ndarray), called on every restart
+          - ``pr_norm``: relative (preconditioned) residual norm (float),
+            called on every inner iteration
+          - ``legacy`` (default): same as ``pr_norm``, but also changes the
+            meaning of 'maxiter' to count inner iterations instead of restart
+            cycles.
+    restrt : int, optional, deprecated
+
+        .. deprecated:: 0.11.0
+           `gmres` keyword argument `restrt` is deprecated infavour of
+           `restart` and will be removed in SciPy 1.12.0.
+
+    See Also
+    --------
+    LinearOperator
+
+    Notes
+    -----
+    A preconditioner, P, is chosen such that P is close to A but easy to solve
+    for. The preconditioner parameter required by this routine is
+    ``M = P^-1``. The inverse should preferably not be calculated
+    explicitly.  Rather, use the following template to produce M::
+
+      # Construct a linear operator that computes P^-1 @ x.
+      import scipy.sparse.linalg as spla
+      M_x = lambda x: spla.spsolve(P, x)
+      M = spla.LinearOperator((n, n), M_x)
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import gmres
+    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
+    >>> b = np.array([2, 4, -1], dtype=float)
+    >>> x, exitCode = gmres(A, b)
+    >>> print(exitCode)            # 0 indicates successful convergence
+    0
+    >>> np.allclose(A.dot(x), b)
+    True
+    """
+
+    # Change 'restrt' keyword to 'restart'
+    if restrt is None:
+        restrt = restart
+    elif restart is not None:
+        raise ValueError("Cannot specify both restart and restrt keywords. "
+                         "Preferably use 'restart' only.")
+    else:
+        msg = ("'gmres' keyword argument 'restrt' is deprecated infavour of "
+               "'restart' and will be removed in SciPy 1.12.0.")
+        warnings.warn(msg, DeprecationWarning, stacklevel=2)
+
+    if callback is not None and callback_type is None:
+        # Warn about 'callback_type' semantic changes.
+        # Probably should be removed only in far future, Scipy 2.0 or so.
+        warnings.warn("scipy.sparse.linalg.gmres called without specifying `callback_type`. "
+                      "The default value will be changed in a future release. "
+                      "For compatibility, specify a value for `callback_type` explicitly, e.g., "
+                      "``{name}(..., callback_type='pr_norm')``, or to retain the old behavior "
+                      "``{name}(..., callback_type='legacy')``",
+                      category=DeprecationWarning, stacklevel=3)
+
+    if callback_type is None:
+        callback_type = 'legacy'
+
+    if callback_type not in ('x', 'pr_norm', 'legacy'):
+        raise ValueError("Unknown callback_type: {!r}".format(callback_type))
+
+    if callback is None:
+        callback_type = 'none'
+
+    A, M, x, b,postprocess = make_system(A, M, x0, b)
+
+    n = len(b)
+    if maxiter is None:
+        maxiter = n*10
+
+    if restrt is None:
+        restrt = 20
+    restrt = min(restrt, n)
+
+    matvec = A.matvec
+    psolve = M.matvec
+    ltr = _type_conv[x.dtype.char]
+    revcom = getattr(_iterative, ltr + 'gmresrevcom')
+
+    bnrm2 = np.linalg.norm(b)
+    Mb_nrm2 = np.linalg.norm(psolve(b))
+    get_residual = lambda: np.linalg.norm(matvec(x) - b)
+    atol = _get_atol(tol, atol, bnrm2, get_residual, 'gmres')
+    if atol == 'exit':
+        return postprocess(x), 0
+
+    if bnrm2 == 0:
+        return postprocess(b), 0
+
+    # Tolerance passed to GMRESREVCOM applies to the inner iteration
+    # and deals with the left-preconditioned residual.
+    ptol_max_factor = 1.0
+    ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
+    resid = np.nan
+    presid = np.nan
+    ndx1 = 1
+    ndx2 = -1
+    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
+    work = _aligned_zeros((6+restrt)*n,dtype=x.dtype)
+    work2 = _aligned_zeros((restrt+1)*(2*restrt+2),dtype=x.dtype)
+    ijob = 1
+    info = 0
+    ftflag = True
+    iter_ = maxiter
+    old_ijob = ijob
+    first_pass = True
+    resid_ready = False
+    iter_num = 1
+    while True:
+        olditer = iter_
+        x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
+           revcom(b, x, restrt, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol)
+        if callback_type == 'x' and iter_ != olditer:
+            callback(x)
+        slice1 = slice(ndx1-1, ndx1-1+n)
+        slice2 = slice(ndx2-1, ndx2-1+n)
+        if (ijob == -1):  # gmres success, update last residual
+            if callback_type in ('pr_norm', 'legacy'):
+                if resid_ready:
+                    callback(presid / bnrm2)
+            elif callback_type == 'x':
+                callback(x)
+            break
+        elif (ijob == 1):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(x)
+        elif (ijob == 2):
+            work[slice1] = psolve(work[slice2])
+            if not first_pass and old_ijob == 3:
+                resid_ready = True
+
+            first_pass = False
+        elif (ijob == 3):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*matvec(work[slice1])
+            if resid_ready:
+                if callback_type in ('pr_norm', 'legacy'):
+                    callback(presid / bnrm2)
+                resid_ready = False
+                iter_num = iter_num+1
+
+        elif (ijob == 4):
+            if ftflag:
+                info = -1
+                ftflag = False
+            resid, info = _stoptest(work[slice1], atol)
+
+            # Inner loop tolerance control
+            if info or presid > ptol:
+                ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
+            else:
+                # Inner loop tolerance OK, but outer loop not.
+                ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
+
+            if resid != 0:
+                ptol = presid * min(ptol_max_factor, atol / resid)
+            else:
+                ptol = presid * ptol_max_factor
+
+        old_ijob = ijob
+        ijob = 2
+
+        if callback_type == 'legacy':
+            # Legacy behavior
+            if iter_num > maxiter:
+                info = maxiter
+                break
+
+    if info >= 0 and not (resid <= atol):
+        # info isn't set appropriately otherwise
+        info = maxiter
+
+    return postprocess(x), info
+
+
+@non_reentrant()
+def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None,
+        atol=None):
+    """Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        The real-valued N-by-N matrix of the linear system.
+        Alternatively, ``A`` can be a linear operator which can
+        produce ``Ax`` and ``A^T x`` using, e.g.,
+        ``scipy.sparse.linalg.LinearOperator``.
+    b : ndarray
+        Right hand side of the linear system. Has shape (N,) or (N,1).
+
+    Returns
+    -------
+    x : ndarray
+        The converged solution.
+    info : integer
+        Provides convergence information:
+            0  : successful exit
+            >0 : convergence to tolerance not achieved, number of iterations
+            <0 : illegal input or breakdown
+
+    Other Parameters
+    ----------------
+    x0 : ndarray
+        Starting guess for the solution.
+    tol, atol : float, optional
+        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
+        The default for ``atol`` is ``'legacy'``, which emulates
+        a different legacy behavior.
+
+        .. warning::
+
+           The default value for `atol` will be changed in a future release.
+           For future compatibility, specify `atol` explicitly.
+    maxiter : integer
+        Maximum number of iterations.  Iteration will stop after maxiter
+        steps even if the specified tolerance has not been achieved.
+    M1 : {sparse matrix, ndarray, LinearOperator}
+        Left preconditioner for A.
+    M2 : {sparse matrix, ndarray, LinearOperator}
+        Right preconditioner for A. Used together with the left
+        preconditioner M1.  The matrix M1@A@M2 should have better
+        conditioned than A alone.
+    callback : function
+        User-supplied function to call after each iteration.  It is called
+        as callback(xk), where xk is the current solution vector.
+
+    See Also
+    --------
+    LinearOperator
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import qmr
+    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
+    >>> b = np.array([2, 4, -1], dtype=float)
+    >>> x, exitCode = qmr(A, b)
+    >>> print(exitCode)            # 0 indicates successful convergence
+    0
+    >>> np.allclose(A.dot(x), b)
+    True
+    """
+    A_ = A
+    A, M, x, b, postprocess = make_system(A, None, x0, b)
+
+    if M1 is None and M2 is None:
+        if hasattr(A_,'psolve'):
+            def left_psolve(b):
+                return A_.psolve(b,'left')
+
+            def right_psolve(b):
+                return A_.psolve(b,'right')
+
+            def left_rpsolve(b):
+                return A_.rpsolve(b,'left')
+
+            def right_rpsolve(b):
+                return A_.rpsolve(b,'right')
+            M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
+            M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
+        else:
+            def id(b):
+                return b
+            M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
+            M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
+
+    n = len(b)
+    if maxiter is None:
+        maxiter = n*10
+
+    ltr = _type_conv[x.dtype.char]
+    revcom = getattr(_iterative, ltr + 'qmrrevcom')
+
+    get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
+    atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
+    if atol == 'exit':
+        return postprocess(x), 0
+
+    resid = atol
+    ndx1 = 1
+    ndx2 = -1
+    # Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
+    work = _aligned_zeros(11*n,x.dtype)
+    ijob = 1
+    info = 0
+    ftflag = True
+    iter_ = maxiter
+    while True:
+        olditer = iter_
+        x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
+           revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
+        if callback is not None and iter_ > olditer:
+            callback(x)
+        slice1 = slice(ndx1-1, ndx1-1+n)
+        slice2 = slice(ndx2-1, ndx2-1+n)
+        if (ijob == -1):
+            if callback is not None:
+                callback(x)
+            break
+        elif (ijob == 1):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*A.matvec(work[slice1])
+        elif (ijob == 2):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*A.rmatvec(work[slice1])
+        elif (ijob == 3):
+            work[slice1] = M1.matvec(work[slice2])
+        elif (ijob == 4):
+            work[slice1] = M2.matvec(work[slice2])
+        elif (ijob == 5):
+            work[slice1] = M1.rmatvec(work[slice2])
+        elif (ijob == 6):
+            work[slice1] = M2.rmatvec(work[slice2])
+        elif (ijob == 7):
+            work[slice2] *= sclr2
+            work[slice2] += sclr1*A.matvec(x)
+        elif (ijob == 8):
+            if ftflag:
+                info = -1
+                ftflag = False
+            resid, info = _stoptest(work[slice1], atol)
+        ijob = 2
+
+    if info > 0 and iter_ == maxiter and not (resid <= atol):
+        # info isn't set appropriately otherwise
+        info = iter_
+
+    return postprocess(x), info
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lgmres.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lgmres.py
new file mode 100644
index 00000000..54e8f147
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lgmres.py
@@ -0,0 +1,237 @@
+# Copyright (C) 2009, Pauli Virtanen 
+# Distributed under the same license as SciPy.
+
+import warnings
+import numpy as np
+from numpy.linalg import LinAlgError
+from scipy.linalg import get_blas_funcs
+from .utils import make_system
+
+from ._gcrotmk import _fgmres
+
+__all__ = ['lgmres']
+
+
+def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
+           inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
+           prepend_outer_v=False, atol=None):
+    """
+    Solve a matrix equation using the LGMRES algorithm.
+
+    The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
+    in the convergence in restarted GMRES, and often converges in fewer
+    iterations.
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        The real or complex N-by-N matrix of the linear system.
+        Alternatively, ``A`` can be a linear operator which can
+        produce ``Ax`` using, e.g.,
+        ``scipy.sparse.linalg.LinearOperator``.
+    b : ndarray
+        Right hand side of the linear system. Has shape (N,) or (N,1).
+    x0 : ndarray
+        Starting guess for the solution.
+    tol, atol : float, optional
+        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
+        The default for ``atol`` is `tol`.
+
+        .. warning::
+
+           The default value for `atol` will be changed in a future release.
+           For future compatibility, specify `atol` explicitly.
+    maxiter : int, optional
+        Maximum number of iterations.  Iteration will stop after maxiter
+        steps even if the specified tolerance has not been achieved.
+    M : {sparse matrix, ndarray, LinearOperator}, optional
+        Preconditioner for A.  The preconditioner should approximate the
+        inverse of A.  Effective preconditioning dramatically improves the
+        rate of convergence, which implies that fewer iterations are needed
+        to reach a given error tolerance.
+    callback : function, optional
+        User-supplied function to call after each iteration.  It is called
+        as callback(xk), where xk is the current solution vector.
+    inner_m : int, optional
+        Number of inner GMRES iterations per each outer iteration.
+    outer_k : int, optional
+        Number of vectors to carry between inner GMRES iterations.
+        According to [1]_, good values are in the range of 1...3.
+        However, note that if you want to use the additional vectors to
+        accelerate solving multiple similar problems, larger values may
+        be beneficial.
+    outer_v : list of tuples, optional
+        List containing tuples ``(v, Av)`` of vectors and corresponding
+        matrix-vector products, used to augment the Krylov subspace, and
+        carried between inner GMRES iterations. The element ``Av`` can
+        be `None` if the matrix-vector product should be re-evaluated.
+        This parameter is modified in-place by `lgmres`, and can be used
+        to pass "guess" vectors in and out of the algorithm when solving
+        similar problems.
+    store_outer_Av : bool, optional
+        Whether LGMRES should store also A@v in addition to vectors `v`
+        in the `outer_v` list. Default is True.
+    prepend_outer_v : bool, optional
+        Whether to put outer_v augmentation vectors before Krylov iterates.
+        In standard LGMRES, prepend_outer_v=False.
+
+    Returns
+    -------
+    x : ndarray
+        The converged solution.
+    info : int
+        Provides convergence information:
+
+            - 0  : successful exit
+            - >0 : convergence to tolerance not achieved, number of iterations
+            - <0 : illegal input or breakdown
+
+    Notes
+    -----
+    The LGMRES algorithm [1]_ [2]_ is designed to avoid the
+    slowing of convergence in restarted GMRES, due to alternating
+    residual vectors. Typically, it often outperforms GMRES(m) of
+    comparable memory requirements by some measure, or at least is not
+    much worse.
+
+    Another advantage in this algorithm is that you can supply it with
+    'guess' vectors in the `outer_v` argument that augment the Krylov
+    subspace. If the solution lies close to the span of these vectors,
+    the algorithm converges faster. This can be useful if several very
+    similar matrices need to be inverted one after another, such as in
+    Newton-Krylov iteration where the Jacobian matrix often changes
+    little in the nonlinear steps.
+
+    References
+    ----------
+    .. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
+             Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
+             Anal. Appl. 26, 962 (2005).
+    .. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
+             restarted GMRES", PhD thesis, University of Colorado (2003).
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import lgmres
+    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
+    >>> b = np.array([2, 4, -1], dtype=float)
+    >>> x, exitCode = lgmres(A, b)
+    >>> print(exitCode)            # 0 indicates successful convergence
+    0
+    >>> np.allclose(A.dot(x), b)
+    True
+    """
+    A,M,x,b,postprocess = make_system(A,M,x0,b)
+
+    if not np.isfinite(b).all():
+        raise ValueError("RHS must contain only finite numbers")
+
+    if atol is None:
+        warnings.warn("scipy.sparse.linalg.lgmres called without specifying `atol`. "
+                      "The default value will change in the future. To preserve "
+                      "current behavior, set ``atol=tol``.",
+                      category=DeprecationWarning, stacklevel=2)
+        atol = tol
+
+    matvec = A.matvec
+    psolve = M.matvec
+
+    if outer_v is None:
+        outer_v = []
+
+    axpy, dot, scal = None, None, None
+    nrm2 = get_blas_funcs('nrm2', [b])
+
+    b_norm = nrm2(b)
+    if b_norm == 0:
+        x = b
+        return (postprocess(x), 0)
+
+    ptol_max_factor = 1.0
+
+    for k_outer in range(maxiter):
+        r_outer = matvec(x) - b
+
+        # -- callback
+        if callback is not None:
+            callback(x)
+
+        # -- determine input type routines
+        if axpy is None:
+            if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
+                x = x.astype(r_outer.dtype)
+            axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
+                                                   (x, r_outer))
+
+        # -- check stopping condition
+        r_norm = nrm2(r_outer)
+        if r_norm <= max(atol, tol * b_norm):
+            break
+
+        # -- inner LGMRES iteration
+        v0 = -psolve(r_outer)
+        inner_res_0 = nrm2(v0)
+
+        if inner_res_0 == 0:
+            rnorm = nrm2(r_outer)
+            raise RuntimeError("Preconditioner returned a zero vector; "
+                               "|v| ~ %.1g, |M v| = 0" % rnorm)
+
+        v0 = scal(1.0/inner_res_0, v0)
+
+        ptol = min(ptol_max_factor, max(atol, tol*b_norm)/r_norm)
+
+        try:
+            Q, R, B, vs, zs, y, pres = _fgmres(matvec,
+                                               v0,
+                                               inner_m,
+                                               lpsolve=psolve,
+                                               atol=ptol,
+                                               outer_v=outer_v,
+                                               prepend_outer_v=prepend_outer_v)
+            y *= inner_res_0
+            if not np.isfinite(y).all():
+                # Overflow etc. in computation. There's no way to
+                # recover from this, so we have to bail out.
+                raise LinAlgError()
+        except LinAlgError:
+            # Floating point over/underflow, non-finite result from
+            # matmul etc. -- report failure.
+            return postprocess(x), k_outer + 1
+
+        # Inner loop tolerance control
+        if pres > ptol:
+            ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
+        else:
+            ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
+
+        # -- GMRES terminated: eval solution
+        dx = zs[0]*y[0]
+        for w, yc in zip(zs[1:], y[1:]):
+            dx = axpy(w, dx, dx.shape[0], yc)  # dx += w*yc
+
+        # -- Store LGMRES augmentation vectors
+        nx = nrm2(dx)
+        if nx > 0:
+            if store_outer_Av:
+                q = Q.dot(R.dot(y))
+                ax = vs[0]*q[0]
+                for v, qc in zip(vs[1:], q[1:]):
+                    ax = axpy(v, ax, ax.shape[0], qc)
+                outer_v.append((dx/nx, ax/nx))
+            else:
+                outer_v.append((dx/nx, None))
+
+        # -- Retain only a finite number of augmentation vectors
+        while len(outer_v) > outer_k:
+            del outer_v[0]
+
+        # -- Apply step
+        x += dx
+    else:
+        # didn't converge ...
+        return postprocess(x), maxiter
+
+    return postprocess(x), 0
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsmr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsmr.py
new file mode 100644
index 00000000..d1a61081
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsmr.py
@@ -0,0 +1,486 @@
+"""
+Copyright (C) 2010 David Fong and Michael Saunders
+
+LSMR uses an iterative method.
+
+07 Jun 2010: Documentation updated
+03 Jun 2010: First release version in Python
+
+David Chin-lung Fong            clfong@stanford.edu
+Institute for Computational and Mathematical Engineering
+Stanford University
+
+Michael Saunders                saunders@stanford.edu
+Systems Optimization Laboratory
+Dept of MS&E, Stanford University.
+
+"""
+
+__all__ = ['lsmr']
+
+from numpy import zeros, infty, atleast_1d, result_type
+from numpy.linalg import norm
+from math import sqrt
+from scipy.sparse.linalg._interface import aslinearoperator
+
+from scipy.sparse.linalg._isolve.lsqr import _sym_ortho
+
+
+def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
+         maxiter=None, show=False, x0=None):
+    """Iterative solver for least-squares problems.
+
+    lsmr solves the system of linear equations ``Ax = b``. If the system
+    is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
+    ``A`` is a rectangular matrix of dimension m-by-n, where all cases are
+    allowed: m = n, m > n, or m < n. ``b`` is a vector of length m.
+    The matrix A may be dense or sparse (usually sparse).
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        Matrix A in the linear system.
+        Alternatively, ``A`` can be a linear operator which can
+        produce ``Ax`` and ``A^H x`` using, e.g.,
+        ``scipy.sparse.linalg.LinearOperator``.
+    b : array_like, shape (m,)
+        Vector ``b`` in the linear system.
+    damp : float
+        Damping factor for regularized least-squares. `lsmr` solves
+        the regularized least-squares problem::
+
+         min ||(b) - (  A   )x||
+             ||(0)   (damp*I) ||_2
+
+        where damp is a scalar.  If damp is None or 0, the system
+        is solved without regularization. Default is 0.
+    atol, btol : float, optional
+        Stopping tolerances. `lsmr` continues iterations until a
+        certain backward error estimate is smaller than some quantity
+        depending on atol and btol.  Let ``r = b - Ax`` be the
+        residual vector for the current approximate solution ``x``.
+        If ``Ax = b`` seems to be consistent, `lsmr` terminates
+        when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
+        Otherwise, `lsmr` terminates when ``norm(A^H r) <=
+        atol * norm(A) * norm(r)``.  If both tolerances are 1.0e-6 (default),
+        the final ``norm(r)`` should be accurate to about 6
+        digits. (The final ``x`` will usually have fewer correct digits,
+        depending on ``cond(A)`` and the size of LAMBDA.)  If `atol`
+        or `btol` is None, a default value of 1.0e-6 will be used.
+        Ideally, they should be estimates of the relative error in the
+        entries of ``A`` and ``b`` respectively.  For example, if the entries
+        of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
+        the algorithm from doing unnecessary work beyond the
+        uncertainty of the input data.
+    conlim : float, optional
+        `lsmr` terminates if an estimate of ``cond(A)`` exceeds
+        `conlim`.  For compatible systems ``Ax = b``, conlim could be
+        as large as 1.0e+12 (say).  For least-squares problems,
+        `conlim` should be less than 1.0e+8. If `conlim` is None, the
+        default value is 1e+8.  Maximum precision can be obtained by
+        setting ``atol = btol = conlim = 0``, but the number of
+        iterations may then be excessive. Default is 1e8.
+    maxiter : int, optional
+        `lsmr` terminates if the number of iterations reaches
+        `maxiter`.  The default is ``maxiter = min(m, n)``.  For
+        ill-conditioned systems, a larger value of `maxiter` may be
+        needed. Default is False.
+    show : bool, optional
+        Print iterations logs if ``show=True``. Default is False.
+    x0 : array_like, shape (n,), optional
+        Initial guess of ``x``, if None zeros are used. Default is None.
+
+        .. versionadded:: 1.0.0
+
+    Returns
+    -------
+    x : ndarray of float
+        Least-square solution returned.
+    istop : int
+        istop gives the reason for stopping::
+
+          istop   = 0 means x=0 is a solution.  If x0 was given, then x=x0 is a
+                      solution.
+                  = 1 means x is an approximate solution to A@x = B,
+                      according to atol and btol.
+                  = 2 means x approximately solves the least-squares problem
+                      according to atol.
+                  = 3 means COND(A) seems to be greater than CONLIM.
+                  = 4 is the same as 1 with atol = btol = eps (machine
+                      precision)
+                  = 5 is the same as 2 with atol = eps.
+                  = 6 is the same as 3 with CONLIM = 1/eps.
+                  = 7 means ITN reached maxiter before the other stopping
+                      conditions were satisfied.
+
+    itn : int
+        Number of iterations used.
+    normr : float
+        ``norm(b-Ax)``
+    normar : float
+        ``norm(A^H (b - Ax))``
+    norma : float
+        ``norm(A)``
+    conda : float
+        Condition number of A.
+    normx : float
+        ``norm(x)``
+
+    Notes
+    -----
+
+    .. versionadded:: 0.11.0
+
+    References
+    ----------
+    .. [1] D. C.-L. Fong and M. A. Saunders,
+           "LSMR: An iterative algorithm for sparse least-squares problems",
+           SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
+           :arxiv:`1006.0758`
+    .. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import lsmr
+    >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
+
+    The first example has the trivial solution ``[0, 0]``
+
+    >>> b = np.array([0., 0., 0.], dtype=float)
+    >>> x, istop, itn, normr = lsmr(A, b)[:4]
+    >>> istop
+    0
+    >>> x
+    array([0., 0.])
+
+    The stopping code `istop=0` returned indicates that a vector of zeros was
+    found as a solution. The returned solution `x` indeed contains
+    ``[0., 0.]``. The next example has a non-trivial solution:
+
+    >>> b = np.array([1., 0., -1.], dtype=float)
+    >>> x, istop, itn, normr = lsmr(A, b)[:4]
+    >>> istop
+    1
+    >>> x
+    array([ 1., -1.])
+    >>> itn
+    1
+    >>> normr
+    4.440892098500627e-16
+
+    As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
+    limits. The given solution ``[1., -1.]`` obviously solves the equation. The
+    remaining return values include information about the number of iterations
+    (`itn=1`) and the remaining difference of left and right side of the solved
+    equation.
+    The final example demonstrates the behavior in the case where there is no
+    solution for the equation:
+
+    >>> b = np.array([1., 0.01, -1.], dtype=float)
+    >>> x, istop, itn, normr = lsmr(A, b)[:4]
+    >>> istop
+    2
+    >>> x
+    array([ 1.00333333, -0.99666667])
+    >>> A.dot(x)-b
+    array([ 0.00333333, -0.00333333,  0.00333333])
+    >>> normr
+    0.005773502691896255
+
+    `istop` indicates that the system is inconsistent and thus `x` is rather an
+    approximate solution to the corresponding least-squares problem. `normr`
+    contains the minimal distance that was found.
+    """
+
+    A = aslinearoperator(A)
+    b = atleast_1d(b)
+    if b.ndim > 1:
+        b = b.squeeze()
+
+    msg = ('The exact solution is x = 0, or x = x0, if x0 was given  ',
+           'Ax - b is small enough, given atol, btol                  ',
+           'The least-squares solution is good enough, given atol     ',
+           'The estimate of cond(Abar) has exceeded conlim            ',
+           'Ax - b is small enough for this machine                   ',
+           'The least-squares solution is good enough for this machine',
+           'Cond(Abar) seems to be too large for this machine         ',
+           'The iteration limit has been reached                      ')
+
+    hdg1 = '   itn      x(1)       norm r    norm Ar'
+    hdg2 = ' compatible   LS      norm A   cond A'
+    pfreq = 20   # print frequency (for repeating the heading)
+    pcount = 0   # print counter
+
+    m, n = A.shape
+
+    # stores the num of singular values
+    minDim = min([m, n])
+
+    if maxiter is None:
+        maxiter = minDim
+
+    if x0 is None:
+        dtype = result_type(A, b, float)
+    else:
+        dtype = result_type(A, b, x0, float)
+
+    if show:
+        print(' ')
+        print('LSMR            Least-squares solution of  Ax = b\n')
+        print(f'The matrix A has {m} rows and {n} columns')
+        print('damp = %20.14e\n' % (damp))
+        print('atol = %8.2e                 conlim = %8.2e\n' % (atol, conlim))
+        print('btol = %8.2e             maxiter = %8g\n' % (btol, maxiter))
+
+    u = b
+    normb = norm(b)
+    if x0 is None:
+        x = zeros(n, dtype)
+        beta = normb.copy()
+    else:
+        x = atleast_1d(x0.copy())
+        u = u - A.matvec(x)
+        beta = norm(u)
+
+    if beta > 0:
+        u = (1 / beta) * u
+        v = A.rmatvec(u)
+        alpha = norm(v)
+    else:
+        v = zeros(n, dtype)
+        alpha = 0
+
+    if alpha > 0:
+        v = (1 / alpha) * v
+
+    # Initialize variables for 1st iteration.
+
+    itn = 0
+    zetabar = alpha * beta
+    alphabar = alpha
+    rho = 1
+    rhobar = 1
+    cbar = 1
+    sbar = 0
+
+    h = v.copy()
+    hbar = zeros(n, dtype)
+
+    # Initialize variables for estimation of ||r||.
+
+    betadd = beta
+    betad = 0
+    rhodold = 1
+    tautildeold = 0
+    thetatilde = 0
+    zeta = 0
+    d = 0
+
+    # Initialize variables for estimation of ||A|| and cond(A)
+
+    normA2 = alpha * alpha
+    maxrbar = 0
+    minrbar = 1e+100
+    normA = sqrt(normA2)
+    condA = 1
+    normx = 0
+
+    # Items for use in stopping rules, normb set earlier
+    istop = 0
+    ctol = 0
+    if conlim > 0:
+        ctol = 1 / conlim
+    normr = beta
+
+    # Reverse the order here from the original matlab code because
+    # there was an error on return when arnorm==0
+    normar = alpha * beta
+    if normar == 0:
+        if show:
+            print(msg[0])
+        return x, istop, itn, normr, normar, normA, condA, normx
+
+    if normb == 0:
+        x[()] = 0
+        return x, istop, itn, normr, normar, normA, condA, normx
+
+    if show:
+        print(' ')
+        print(hdg1, hdg2)
+        test1 = 1
+        test2 = alpha / beta
+        str1 = '%6g %12.5e' % (itn, x[0])
+        str2 = ' %10.3e %10.3e' % (normr, normar)
+        str3 = '  %8.1e %8.1e' % (test1, test2)
+        print(''.join([str1, str2, str3]))
+
+    # Main iteration loop.
+    while itn < maxiter:
+        itn = itn + 1
+
+        # Perform the next step of the bidiagonalization to obtain the
+        # next  beta, u, alpha, v.  These satisfy the relations
+        #         beta*u  =  A@v   -  alpha*u,
+        #        alpha*v  =  A'@u  -  beta*v.
+
+        u *= -alpha
+        u += A.matvec(v)
+        beta = norm(u)
+
+        if beta > 0:
+            u *= (1 / beta)
+            v *= -beta
+            v += A.rmatvec(u)
+            alpha = norm(v)
+            if alpha > 0:
+                v *= (1 / alpha)
+
+        # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
+
+        # Construct rotation Qhat_{k,2k+1}.
+
+        chat, shat, alphahat = _sym_ortho(alphabar, damp)
+
+        # Use a plane rotation (Q_i) to turn B_i to R_i
+
+        rhoold = rho
+        c, s, rho = _sym_ortho(alphahat, beta)
+        thetanew = s*alpha
+        alphabar = c*alpha
+
+        # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
+
+        rhobarold = rhobar
+        zetaold = zeta
+        thetabar = sbar * rho
+        rhotemp = cbar * rho
+        cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
+        zeta = cbar * zetabar
+        zetabar = - sbar * zetabar
+
+        # Update h, h_hat, x.
+
+        hbar *= - (thetabar * rho / (rhoold * rhobarold))
+        hbar += h
+        x += (zeta / (rho * rhobar)) * hbar
+        h *= - (thetanew / rho)
+        h += v
+
+        # Estimate of ||r||.
+
+        # Apply rotation Qhat_{k,2k+1}.
+        betaacute = chat * betadd
+        betacheck = -shat * betadd
+
+        # Apply rotation Q_{k,k+1}.
+        betahat = c * betaacute
+        betadd = -s * betaacute
+
+        # Apply rotation Qtilde_{k-1}.
+        # betad = betad_{k-1} here.
+
+        thetatildeold = thetatilde
+        ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
+        thetatilde = stildeold * rhobar
+        rhodold = ctildeold * rhobar
+        betad = - stildeold * betad + ctildeold * betahat
+
+        # betad   = betad_k here.
+        # rhodold = rhod_k  here.
+
+        tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
+        taud = (zeta - thetatilde * tautildeold) / rhodold
+        d = d + betacheck * betacheck
+        normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
+
+        # Estimate ||A||.
+        normA2 = normA2 + beta * beta
+        normA = sqrt(normA2)
+        normA2 = normA2 + alpha * alpha
+
+        # Estimate cond(A).
+        maxrbar = max(maxrbar, rhobarold)
+        if itn > 1:
+            minrbar = min(minrbar, rhobarold)
+        condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
+
+        # Test for convergence.
+
+        # Compute norms for convergence testing.
+        normar = abs(zetabar)
+        normx = norm(x)
+
+        # Now use these norms to estimate certain other quantities,
+        # some of which will be small near a solution.
+
+        test1 = normr / normb
+        if (normA * normr) != 0:
+            test2 = normar / (normA * normr)
+        else:
+            test2 = infty
+        test3 = 1 / condA
+        t1 = test1 / (1 + normA * normx / normb)
+        rtol = btol + atol * normA * normx / normb
+
+        # The following tests guard against extremely small values of
+        # atol, btol or ctol.  (The user may have set any or all of
+        # the parameters atol, btol, conlim  to 0.)
+        # The effect is equivalent to the normAl tests using
+        # atol = eps,  btol = eps,  conlim = 1/eps.
+
+        if itn >= maxiter:
+            istop = 7
+        if 1 + test3 <= 1:
+            istop = 6
+        if 1 + test2 <= 1:
+            istop = 5
+        if 1 + t1 <= 1:
+            istop = 4
+
+        # Allow for tolerances set by the user.
+
+        if test3 <= ctol:
+            istop = 3
+        if test2 <= atol:
+            istop = 2
+        if test1 <= rtol:
+            istop = 1
+
+        # See if it is time to print something.
+
+        if show:
+            if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
+               (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
+               (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
+               (istop != 0):
+
+                if pcount >= pfreq:
+                    pcount = 0
+                    print(' ')
+                    print(hdg1, hdg2)
+                pcount = pcount + 1
+                str1 = '%6g %12.5e' % (itn, x[0])
+                str2 = ' %10.3e %10.3e' % (normr, normar)
+                str3 = '  %8.1e %8.1e' % (test1, test2)
+                str4 = ' %8.1e %8.1e' % (normA, condA)
+                print(''.join([str1, str2, str3, str4]))
+
+        if istop > 0:
+            break
+
+    # Print the stopping condition.
+
+    if show:
+        print(' ')
+        print('LSMR finished')
+        print(msg[istop])
+        print('istop =%8g    normr =%8.1e' % (istop, normr))
+        print('    normA =%8.1e    normAr =%8.1e' % (normA, normar))
+        print('itn   =%8g    condA =%8.1e' % (itn, condA))
+        print('    normx =%8.1e' % (normx))
+        print(str1, str2)
+        print(str3, str4)
+
+    return x, istop, itn, normr, normar, normA, condA, normx
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsqr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsqr.py
new file mode 100644
index 00000000..2cd6ecbc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/lsqr.py
@@ -0,0 +1,587 @@
+"""Sparse Equations and Least Squares.
+
+The original Fortran code was written by C. C. Paige and M. A. Saunders as
+described in
+
+C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
+equations and sparse least squares, TOMS 8(1), 43--71 (1982).
+
+C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
+equations and least-squares problems, TOMS 8(2), 195--209 (1982).
+
+It is licensed under the following BSD license:
+
+Copyright (c) 2006, Systems Optimization Laboratory
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+
+    * Neither the name of Stanford University nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The Fortran code was translated to Python for use in CVXOPT by Jeffery
+Kline with contributions by Mridul Aanjaneya and Bob Myhill.
+
+Adapted for SciPy by Stefan van der Walt.
+
+"""
+
+__all__ = ['lsqr']
+
+import numpy as np
+from math import sqrt
+from scipy.sparse.linalg._interface import aslinearoperator
+
+eps = np.finfo(np.float64).eps
+
+
+def _sym_ortho(a, b):
+    """
+    Stable implementation of Givens rotation.
+
+    Notes
+    -----
+    The routine 'SymOrtho' was added for numerical stability. This is
+    recommended by S.-C. Choi in [1]_.  It removes the unpleasant potential of
+    ``1/eps`` in some important places (see, for example text following
+    "Compute the next plane rotation Qk" in minres.py).
+
+    References
+    ----------
+    .. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
+           and Least-Squares Problems", Dissertation,
+           http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
+
+    """
+    if b == 0:
+        return np.sign(a), 0, abs(a)
+    elif a == 0:
+        return 0, np.sign(b), abs(b)
+    elif abs(b) > abs(a):
+        tau = a / b
+        s = np.sign(b) / sqrt(1 + tau * tau)
+        c = s * tau
+        r = b / s
+    else:
+        tau = b / a
+        c = np.sign(a) / sqrt(1+tau*tau)
+        s = c * tau
+        r = a / c
+    return c, s, r
+
+
+def lsqr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
+         iter_lim=None, show=False, calc_var=False, x0=None):
+    """Find the least-squares solution to a large, sparse, linear system
+    of equations.
+
+    The function solves ``Ax = b``  or  ``min ||Ax - b||^2`` or
+    ``min ||Ax - b||^2 + d^2 ||x - x0||^2``.
+
+    The matrix A may be square or rectangular (over-determined or
+    under-determined), and may have any rank.
+
+    ::
+
+      1. Unsymmetric equations --    solve  Ax = b
+
+      2. Linear least squares  --    solve  Ax = b
+                                     in the least-squares sense
+
+      3. Damped least squares  --    solve  (   A    )*x = (    b    )
+                                            ( damp*I )     ( damp*x0 )
+                                     in the least-squares sense
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        Representation of an m-by-n matrix.
+        Alternatively, ``A`` can be a linear operator which can
+        produce ``Ax`` and ``A^T x`` using, e.g.,
+        ``scipy.sparse.linalg.LinearOperator``.
+    b : array_like, shape (m,)
+        Right-hand side vector ``b``.
+    damp : float
+        Damping coefficient. Default is 0.
+    atol, btol : float, optional
+        Stopping tolerances. `lsqr` continues iterations until a
+        certain backward error estimate is smaller than some quantity
+        depending on atol and btol.  Let ``r = b - Ax`` be the
+        residual vector for the current approximate solution ``x``.
+        If ``Ax = b`` seems to be consistent, `lsqr` terminates
+        when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
+        Otherwise, `lsqr` terminates when ``norm(A^H r) <=
+        atol * norm(A) * norm(r)``.  If both tolerances are 1.0e-6 (default),
+        the final ``norm(r)`` should be accurate to about 6
+        digits. (The final ``x`` will usually have fewer correct digits,
+        depending on ``cond(A)`` and the size of LAMBDA.)  If `atol`
+        or `btol` is None, a default value of 1.0e-6 will be used.
+        Ideally, they should be estimates of the relative error in the
+        entries of ``A`` and ``b`` respectively.  For example, if the entries
+        of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
+        the algorithm from doing unnecessary work beyond the
+        uncertainty of the input data.
+    conlim : float, optional
+        Another stopping tolerance.  lsqr terminates if an estimate of
+        ``cond(A)`` exceeds `conlim`.  For compatible systems ``Ax =
+        b``, `conlim` could be as large as 1.0e+12 (say).  For
+        least-squares problems, conlim should be less than 1.0e+8.
+        Maximum precision can be obtained by setting ``atol = btol =
+        conlim = zero``, but the number of iterations may then be
+        excessive. Default is 1e8.
+    iter_lim : int, optional
+        Explicit limitation on number of iterations (for safety).
+    show : bool, optional
+        Display an iteration log. Default is False.
+    calc_var : bool, optional
+        Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
+    x0 : array_like, shape (n,), optional
+        Initial guess of x, if None zeros are used. Default is None.
+
+        .. versionadded:: 1.0.0
+
+    Returns
+    -------
+    x : ndarray of float
+        The final solution.
+    istop : int
+        Gives the reason for termination.
+        1 means x is an approximate solution to Ax = b.
+        2 means x approximately solves the least-squares problem.
+    itn : int
+        Iteration number upon termination.
+    r1norm : float
+        ``norm(r)``, where ``r = b - Ax``.
+    r2norm : float
+        ``sqrt( norm(r)^2  +  damp^2 * norm(x - x0)^2 )``.  Equal to `r1norm`
+        if ``damp == 0``.
+    anorm : float
+        Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
+    acond : float
+        Estimate of ``cond(Abar)``.
+    arnorm : float
+        Estimate of ``norm(A'@r - damp^2*(x - x0))``.
+    xnorm : float
+        ``norm(x)``
+    var : ndarray of float
+        If ``calc_var`` is True, estimates all diagonals of
+        ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
+        damp^2*I)^{-1}``.  This is well defined if A has full column
+        rank or ``damp > 0``.  (Not sure what var means if ``rank(A)
+        < n`` and ``damp = 0.``)
+
+    Notes
+    -----
+    LSQR uses an iterative method to approximate the solution.  The
+    number of iterations required to reach a certain accuracy depends
+    strongly on the scaling of the problem.  Poor scaling of the rows
+    or columns of A should therefore be avoided where possible.
+
+    For example, in problem 1 the solution is unaltered by
+    row-scaling.  If a row of A is very small or large compared to
+    the other rows of A, the corresponding row of ( A  b ) should be
+    scaled up or down.
+
+    In problems 1 and 2, the solution x is easily recovered
+    following column-scaling.  Unless better information is known,
+    the nonzero columns of A should be scaled so that they all have
+    the same Euclidean norm (e.g., 1.0).
+
+    In problem 3, there is no freedom to re-scale if damp is
+    nonzero.  However, the value of damp should be assigned only
+    after attention has been paid to the scaling of A.
+
+    The parameter damp is intended to help regularize
+    ill-conditioned systems, by preventing the true solution from
+    being very large.  Another aid to regularization is provided by
+    the parameter acond, which may be used to terminate iterations
+    before the computed solution becomes very large.
+
+    If some initial estimate ``x0`` is known and if ``damp == 0``,
+    one could proceed as follows:
+
+      1. Compute a residual vector ``r0 = b - A@x0``.
+      2. Use LSQR to solve the system  ``A@dx = r0``.
+      3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
+
+    This requires that ``x0`` be available before and after the call
+    to LSQR.  To judge the benefits, suppose LSQR takes k1 iterations
+    to solve A@x = b and k2 iterations to solve A@dx = r0.
+    If x0 is "good", norm(r0) will be smaller than norm(b).
+    If the same stopping tolerances atol and btol are used for each
+    system, k1 and k2 will be similar, but the final solution x0 + dx
+    should be more accurate.  The only way to reduce the total work
+    is to use a larger stopping tolerance for the second system.
+    If some value btol is suitable for A@x = b, the larger value
+    btol*norm(b)/norm(r0)  should be suitable for A@dx = r0.
+
+    Preconditioning is another way to reduce the number of iterations.
+    If it is possible to solve a related system ``M@x = b``
+    efficiently, where M approximates A in some helpful way (e.g. M -
+    A has low rank or its elements are small relative to those of A),
+    LSQR may converge more rapidly on the system ``A@M(inverse)@z =
+    b``, after which x can be recovered by solving M@x = z.
+
+    If A is symmetric, LSQR should not be used!
+
+    Alternatives are the symmetric conjugate-gradient method (cg)
+    and/or SYMMLQ.  SYMMLQ is an implementation of symmetric cg that
+    applies to any symmetric A and will converge more rapidly than
+    LSQR.  If A is positive definite, there are other implementations
+    of symmetric cg that require slightly less work per iteration than
+    SYMMLQ (but will take the same number of iterations).
+
+    References
+    ----------
+    .. [1] C. C. Paige and M. A. Saunders (1982a).
+           "LSQR: An algorithm for sparse linear equations and
+           sparse least squares", ACM TOMS 8(1), 43-71.
+    .. [2] C. C. Paige and M. A. Saunders (1982b).
+           "Algorithm 583.  LSQR: Sparse linear equations and least
+           squares problems", ACM TOMS 8(2), 195-209.
+    .. [3] M. A. Saunders (1995).  "Solution of sparse rectangular
+           systems using LSQR and CRAIG", BIT 35, 588-604.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import lsqr
+    >>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
+
+    The first example has the trivial solution ``[0, 0]``
+
+    >>> b = np.array([0., 0., 0.], dtype=float)
+    >>> x, istop, itn, normr = lsqr(A, b)[:4]
+    >>> istop
+    0
+    >>> x
+    array([ 0.,  0.])
+
+    The stopping code `istop=0` returned indicates that a vector of zeros was
+    found as a solution. The returned solution `x` indeed contains
+    ``[0., 0.]``. The next example has a non-trivial solution:
+
+    >>> b = np.array([1., 0., -1.], dtype=float)
+    >>> x, istop, itn, r1norm = lsqr(A, b)[:4]
+    >>> istop
+    1
+    >>> x
+    array([ 1., -1.])
+    >>> itn
+    1
+    >>> r1norm
+    4.440892098500627e-16
+
+    As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance
+    limits. The given solution ``[1., -1.]`` obviously solves the equation. The
+    remaining return values include information about the number of iterations
+    (`itn=1`) and the remaining difference of left and right side of the solved
+    equation.
+    The final example demonstrates the behavior in the case where there is no
+    solution for the equation:
+
+    >>> b = np.array([1., 0.01, -1.], dtype=float)
+    >>> x, istop, itn, r1norm = lsqr(A, b)[:4]
+    >>> istop
+    2
+    >>> x
+    array([ 1.00333333, -0.99666667])
+    >>> A.dot(x)-b
+    array([ 0.00333333, -0.00333333,  0.00333333])
+    >>> r1norm
+    0.005773502691896255
+
+    `istop` indicates that the system is inconsistent and thus `x` is rather an
+    approximate solution to the corresponding least-squares problem. `r1norm`
+    contains the norm of the minimal residual that was found.
+    """
+    A = aslinearoperator(A)
+    b = np.atleast_1d(b)
+    if b.ndim > 1:
+        b = b.squeeze()
+
+    m, n = A.shape
+    if iter_lim is None:
+        iter_lim = 2 * n
+    var = np.zeros(n)
+
+    msg = ('The exact solution is  x = 0                              ',
+           'Ax - b is small enough, given atol, btol                  ',
+           'The least-squares solution is good enough, given atol     ',
+           'The estimate of cond(Abar) has exceeded conlim            ',
+           'Ax - b is small enough for this machine                   ',
+           'The least-squares solution is good enough for this machine',
+           'Cond(Abar) seems to be too large for this machine         ',
+           'The iteration limit has been reached                      ')
+
+    if show:
+        print(' ')
+        print('LSQR            Least-squares solution of  Ax = b')
+        str1 = f'The matrix A has {m} rows and {n} columns'
+        str2 = 'damp = %20.14e   calc_var = %8g' % (damp, calc_var)
+        str3 = 'atol = %8.2e                 conlim = %8.2e' % (atol, conlim)
+        str4 = 'btol = %8.2e               iter_lim = %8g' % (btol, iter_lim)
+        print(str1)
+        print(str2)
+        print(str3)
+        print(str4)
+
+    itn = 0
+    istop = 0
+    ctol = 0
+    if conlim > 0:
+        ctol = 1/conlim
+    anorm = 0
+    acond = 0
+    dampsq = damp**2
+    ddnorm = 0
+    res2 = 0
+    xnorm = 0
+    xxnorm = 0
+    z = 0
+    cs2 = -1
+    sn2 = 0
+
+    # Set up the first vectors u and v for the bidiagonalization.
+    # These satisfy  beta*u = b - A@x,  alfa*v = A'@u.
+    u = b
+    bnorm = np.linalg.norm(b)
+
+    if x0 is None:
+        x = np.zeros(n)
+        beta = bnorm.copy()
+    else:
+        x = np.asarray(x0)
+        u = u - A.matvec(x)
+        beta = np.linalg.norm(u)
+
+    if beta > 0:
+        u = (1/beta) * u
+        v = A.rmatvec(u)
+        alfa = np.linalg.norm(v)
+    else:
+        v = x.copy()
+        alfa = 0
+
+    if alfa > 0:
+        v = (1/alfa) * v
+    w = v.copy()
+
+    rhobar = alfa
+    phibar = beta
+    rnorm = beta
+    r1norm = rnorm
+    r2norm = rnorm
+
+    # Reverse the order here from the original matlab code because
+    # there was an error on return when arnorm==0
+    arnorm = alfa * beta
+    if arnorm == 0:
+        if show:
+            print(msg[0])
+        return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
+
+    head1 = '   Itn      x[0]       r1norm     r2norm '
+    head2 = ' Compatible    LS      Norm A   Cond A'
+
+    if show:
+        print(' ')
+        print(head1, head2)
+        test1 = 1
+        test2 = alfa / beta
+        str1 = '%6g %12.5e' % (itn, x[0])
+        str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
+        str3 = '  %8.1e %8.1e' % (test1, test2)
+        print(str1, str2, str3)
+
+    # Main iteration loop.
+    while itn < iter_lim:
+        itn = itn + 1
+        # Perform the next step of the bidiagonalization to obtain the
+        # next  beta, u, alfa, v. These satisfy the relations
+        #     beta*u  =  a@v   -  alfa*u,
+        #     alfa*v  =  A'@u  -  beta*v.
+        u = A.matvec(v) - alfa * u
+        beta = np.linalg.norm(u)
+
+        if beta > 0:
+            u = (1/beta) * u
+            anorm = sqrt(anorm**2 + alfa**2 + beta**2 + dampsq)
+            v = A.rmatvec(u) - beta * v
+            alfa = np.linalg.norm(v)
+            if alfa > 0:
+                v = (1 / alfa) * v
+
+        # Use a plane rotation to eliminate the damping parameter.
+        # This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
+        if damp > 0:
+            rhobar1 = sqrt(rhobar**2 + dampsq)
+            cs1 = rhobar / rhobar1
+            sn1 = damp / rhobar1
+            psi = sn1 * phibar
+            phibar = cs1 * phibar
+        else:
+            # cs1 = 1 and sn1 = 0
+            rhobar1 = rhobar
+            psi = 0.
+
+        # Use a plane rotation to eliminate the subdiagonal element (beta)
+        # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
+        cs, sn, rho = _sym_ortho(rhobar1, beta)
+
+        theta = sn * alfa
+        rhobar = -cs * alfa
+        phi = cs * phibar
+        phibar = sn * phibar
+        tau = sn * phi
+
+        # Update x and w.
+        t1 = phi / rho
+        t2 = -theta / rho
+        dk = (1 / rho) * w
+
+        x = x + t1 * w
+        w = v + t2 * w
+        ddnorm = ddnorm + np.linalg.norm(dk)**2
+
+        if calc_var:
+            var = var + dk**2
+
+        # Use a plane rotation on the right to eliminate the
+        # super-diagonal element (theta) of the upper-bidiagonal matrix.
+        # Then use the result to estimate norm(x).
+        delta = sn2 * rho
+        gambar = -cs2 * rho
+        rhs = phi - delta * z
+        zbar = rhs / gambar
+        xnorm = sqrt(xxnorm + zbar**2)
+        gamma = sqrt(gambar**2 + theta**2)
+        cs2 = gambar / gamma
+        sn2 = theta / gamma
+        z = rhs / gamma
+        xxnorm = xxnorm + z**2
+
+        # Test for convergence.
+        # First, estimate the condition of the matrix  Abar,
+        # and the norms of  rbar  and  Abar'rbar.
+        acond = anorm * sqrt(ddnorm)
+        res1 = phibar**2
+        res2 = res2 + psi**2
+        rnorm = sqrt(res1 + res2)
+        arnorm = alfa * abs(tau)
+
+        # Distinguish between
+        #    r1norm = ||b - Ax|| and
+        #    r2norm = rnorm in current code
+        #           = sqrt(r1norm^2 + damp^2*||x - x0||^2).
+        #    Estimate r1norm from
+        #    r1norm = sqrt(r2norm^2 - damp^2*||x - x0||^2).
+        # Although there is cancellation, it might be accurate enough.
+        if damp > 0:
+            r1sq = rnorm**2 - dampsq * xxnorm
+            r1norm = sqrt(abs(r1sq))
+            if r1sq < 0:
+                r1norm = -r1norm
+        else:
+            r1norm = rnorm
+        r2norm = rnorm
+
+        # Now use these norms to estimate certain other quantities,
+        # some of which will be small near a solution.
+        test1 = rnorm / bnorm
+        test2 = arnorm / (anorm * rnorm + eps)
+        test3 = 1 / (acond + eps)
+        t1 = test1 / (1 + anorm * xnorm / bnorm)
+        rtol = btol + atol * anorm * xnorm / bnorm
+
+        # The following tests guard against extremely small values of
+        # atol, btol  or  ctol.  (The user may have set any or all of
+        # the parameters  atol, btol, conlim  to 0.)
+        # The effect is equivalent to the normal tests using
+        # atol = eps,  btol = eps,  conlim = 1/eps.
+        if itn >= iter_lim:
+            istop = 7
+        if 1 + test3 <= 1:
+            istop = 6
+        if 1 + test2 <= 1:
+            istop = 5
+        if 1 + t1 <= 1:
+            istop = 4
+
+        # Allow for tolerances set by the user.
+        if test3 <= ctol:
+            istop = 3
+        if test2 <= atol:
+            istop = 2
+        if test1 <= rtol:
+            istop = 1
+
+        if show:
+            # See if it is time to print something.
+            prnt = False
+            if n <= 40:
+                prnt = True
+            if itn <= 10:
+                prnt = True
+            if itn >= iter_lim-10:
+                prnt = True
+            # if itn%10 == 0: prnt = True
+            if test3 <= 2*ctol:
+                prnt = True
+            if test2 <= 10*atol:
+                prnt = True
+            if test1 <= 10*rtol:
+                prnt = True
+            if istop != 0:
+                prnt = True
+
+            if prnt:
+                str1 = '%6g %12.5e' % (itn, x[0])
+                str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
+                str3 = '  %8.1e %8.1e' % (test1, test2)
+                str4 = ' %8.1e %8.1e' % (anorm, acond)
+                print(str1, str2, str3, str4)
+
+        if istop != 0:
+            break
+
+    # End of iteration loop.
+    # Print the stopping condition.
+    if show:
+        print(' ')
+        print('LSQR finished')
+        print(msg[istop])
+        print(' ')
+        str1 = 'istop =%8g   r1norm =%8.1e' % (istop, r1norm)
+        str2 = 'anorm =%8.1e   arnorm =%8.1e' % (anorm, arnorm)
+        str3 = 'itn   =%8g   r2norm =%8.1e' % (itn, r2norm)
+        str4 = 'acond =%8.1e   xnorm  =%8.1e' % (acond, xnorm)
+        print(str1 + '   ' + str2)
+        print(str3 + '   ' + str4)
+        print(' ')
+
+    return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/minres.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/minres.py
new file mode 100644
index 00000000..7f1bfd00
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/minres.py
@@ -0,0 +1,392 @@
+from numpy import inner, zeros, inf, finfo
+from numpy.linalg import norm
+from math import sqrt
+
+from .utils import make_system
+
+__all__ = ['minres']
+
+
+def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
+           M=None, callback=None, show=False, check=False):
+    """
+    Use MINimum RESidual iteration to solve Ax=b
+
+    MINRES minimizes norm(Ax - b) for a real symmetric matrix A.  Unlike
+    the Conjugate Gradient method, A can be indefinite or singular.
+
+    If shift != 0 then the method solves (A - shift*I)x = b
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        The real symmetric N-by-N matrix of the linear system
+        Alternatively, ``A`` can be a linear operator which can
+        produce ``Ax`` using, e.g.,
+        ``scipy.sparse.linalg.LinearOperator``.
+    b : ndarray
+        Right hand side of the linear system. Has shape (N,) or (N,1).
+
+    Returns
+    -------
+    x : ndarray
+        The converged solution.
+    info : integer
+        Provides convergence information:
+            0  : successful exit
+            >0 : convergence to tolerance not achieved, number of iterations
+            <0 : illegal input or breakdown
+
+    Other Parameters
+    ----------------
+    x0 : ndarray
+        Starting guess for the solution.
+    shift : float
+        Value to apply to the system ``(A - shift * I)x = b``. Default is 0.
+    tol : float
+        Tolerance to achieve. The algorithm terminates when the relative
+        residual is below `tol`.
+    maxiter : integer
+        Maximum number of iterations.  Iteration will stop after maxiter
+        steps even if the specified tolerance has not been achieved.
+    M : {sparse matrix, ndarray, LinearOperator}
+        Preconditioner for A.  The preconditioner should approximate the
+        inverse of A.  Effective preconditioning dramatically improves the
+        rate of convergence, which implies that fewer iterations are needed
+        to reach a given error tolerance.
+    callback : function
+        User-supplied function to call after each iteration.  It is called
+        as callback(xk), where xk is the current solution vector.
+    show : bool
+        If ``True``, print out a summary and metrics related to the solution
+        during iterations. Default is ``False``.
+    check : bool
+        If ``True``, run additional input validation to check that `A` and
+        `M` (if specified) are symmetric. Default is ``False``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import minres
+    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
+    >>> A = A + A.T
+    >>> b = np.array([2, 4, -1], dtype=float)
+    >>> x, exitCode = minres(A, b)
+    >>> print(exitCode)            # 0 indicates successful convergence
+    0
+    >>> np.allclose(A.dot(x), b)
+    True
+
+    References
+    ----------
+    Solution of sparse indefinite systems of linear equations,
+        C. C. Paige and M. A. Saunders (1975),
+        SIAM J. Numer. Anal. 12(4), pp. 617-629.
+        https://web.stanford.edu/group/SOL/software/minres/
+
+    This file is a translation of the following MATLAB implementation:
+        https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
+
+    """
+    A, M, x, b, postprocess = make_system(A, M, x0, b)
+
+    matvec = A.matvec
+    psolve = M.matvec
+
+    first = 'Enter minres.   '
+    last = 'Exit  minres.   '
+
+    n = A.shape[0]
+
+    if maxiter is None:
+        maxiter = 5 * n
+
+    msg = [' beta2 = 0.  If M = I, b and x are eigenvectors    ',   # -1
+            ' beta1 = 0.  The exact solution is x0          ',   # 0
+            ' A solution to Ax = b was found, given rtol        ',   # 1
+            ' A least-squares solution was found, given rtol    ',   # 2
+            ' Reasonable accuracy achieved, given eps           ',   # 3
+            ' x has converged to an eigenvector                 ',   # 4
+            ' acond has exceeded 0.1/eps                        ',   # 5
+            ' The iteration limit was reached                   ',   # 6
+            ' A  does not define a symmetric matrix             ',   # 7
+            ' M  does not define a symmetric matrix             ',   # 8
+            ' M  does not define a pos-def preconditioner       ']   # 9
+
+    if show:
+        print(first + 'Solution of symmetric Ax = b')
+        print(first + 'n      =  %3g     shift  =  %23.14e' % (n,shift))
+        print(first + 'itnlim =  %3g     rtol   =  %11.2e' % (maxiter,tol))
+        print()
+
+    istop = 0
+    itn = 0
+    Anorm = 0
+    Acond = 0
+    rnorm = 0
+    ynorm = 0
+
+    xtype = x.dtype
+
+    eps = finfo(xtype).eps
+
+    # Set up y and v for the first Lanczos vector v1.
+    # y  =  beta1 P' v1,  where  P = C**(-1).
+    # v is really P' v1.
+
+    if x0 is None:
+        r1 = b.copy()
+    else:
+        r1 = b - A@x
+    y = psolve(r1)
+
+    beta1 = inner(r1, y)
+
+    if beta1 < 0:
+        raise ValueError('indefinite preconditioner')
+    elif beta1 == 0:
+        return (postprocess(x), 0)
+
+    bnorm = norm(b)
+    if bnorm == 0:
+        x = b
+        return (postprocess(x), 0)
+
+    beta1 = sqrt(beta1)
+
+    if check:
+        # are these too strict?
+
+        # see if A is symmetric
+        w = matvec(y)
+        r2 = matvec(w)
+        s = inner(w,w)
+        t = inner(y,r2)
+        z = abs(s - t)
+        epsa = (s + eps) * eps**(1.0/3.0)
+        if z > epsa:
+            raise ValueError('non-symmetric matrix')
+
+        # see if M is symmetric
+        r2 = psolve(y)
+        s = inner(y,y)
+        t = inner(r1,r2)
+        z = abs(s - t)
+        epsa = (s + eps) * eps**(1.0/3.0)
+        if z > epsa:
+            raise ValueError('non-symmetric preconditioner')
+
+    # Initialize other quantities
+    oldb = 0
+    beta = beta1
+    dbar = 0
+    epsln = 0
+    qrnorm = beta1
+    phibar = beta1
+    rhs1 = beta1
+    rhs2 = 0
+    tnorm2 = 0
+    gmax = 0
+    gmin = finfo(xtype).max
+    cs = -1
+    sn = 0
+    w = zeros(n, dtype=xtype)
+    w2 = zeros(n, dtype=xtype)
+    r2 = r1
+
+    if show:
+        print()
+        print()
+        print('   Itn     x(1)     Compatible    LS       norm(A)  cond(A) gbar/|A|')
+
+    while itn < maxiter:
+        itn += 1
+
+        s = 1.0/beta
+        v = s*y
+
+        y = matvec(v)
+        y = y - shift * v
+
+        if itn >= 2:
+            y = y - (beta/oldb)*r1
+
+        alfa = inner(v,y)
+        y = y - (alfa/beta)*r2
+        r1 = r2
+        r2 = y
+        y = psolve(r2)
+        oldb = beta
+        beta = inner(r2,y)
+        if beta < 0:
+            raise ValueError('non-symmetric matrix')
+        beta = sqrt(beta)
+        tnorm2 += alfa**2 + oldb**2 + beta**2
+
+        if itn == 1:
+            if beta/beta1 <= 10*eps:
+                istop = -1  # Terminate later
+
+        # Apply previous rotation Qk-1 to get
+        #   [deltak epslnk+1] = [cs  sn][dbark    0   ]
+        #   [gbar k dbar k+1]   [sn -cs][alfak betak+1].
+
+        oldeps = epsln
+        delta = cs * dbar + sn * alfa   # delta1 = 0         deltak
+        gbar = sn * dbar - cs * alfa   # gbar 1 = alfa1     gbar k
+        epsln = sn * beta     # epsln2 = 0         epslnk+1
+        dbar = - cs * beta   # dbar 2 = beta2     dbar k+1
+        root = norm([gbar, dbar])
+        Arnorm = phibar * root
+
+        # Compute the next plane rotation Qk
+
+        gamma = norm([gbar, beta])       # gammak
+        gamma = max(gamma, eps)
+        cs = gbar / gamma             # ck
+        sn = beta / gamma             # sk
+        phi = cs * phibar              # phik
+        phibar = sn * phibar              # phibark+1
+
+        # Update  x.
+
+        denom = 1.0/gamma
+        w1 = w2
+        w2 = w
+        w = (v - oldeps*w1 - delta*w2) * denom
+        x = x + phi*w
+
+        # Go round again.
+
+        gmax = max(gmax, gamma)
+        gmin = min(gmin, gamma)
+        z = rhs1 / gamma
+        rhs1 = rhs2 - delta*z
+        rhs2 = - epsln*z
+
+        # Estimate various norms and test for convergence.
+
+        Anorm = sqrt(tnorm2)
+        ynorm = norm(x)
+        epsa = Anorm * eps
+        epsx = Anorm * ynorm * eps
+        epsr = Anorm * ynorm * tol
+        diag = gbar
+
+        if diag == 0:
+            diag = epsa
+
+        qrnorm = phibar
+        rnorm = qrnorm
+        if ynorm == 0 or Anorm == 0:
+            test1 = inf
+        else:
+            test1 = rnorm / (Anorm*ynorm)    # ||r||  / (||A|| ||x||)
+        if Anorm == 0:
+            test2 = inf
+        else:
+            test2 = root / Anorm            # ||Ar|| / (||A|| ||r||)
+
+        # Estimate  cond(A).
+        # In this version we look at the diagonals of  R  in the
+        # factorization of the lower Hessenberg matrix,  Q @ H = R,
+        # where H is the tridiagonal matrix from Lanczos with one
+        # extra row, beta(k+1) e_k^T.
+
+        Acond = gmax/gmin
+
+        # See if any of the stopping criteria are satisfied.
+        # In rare cases, istop is already -1 from above (Abar = const*I).
+
+        if istop == 0:
+            t1 = 1 + test1      # These tests work if tol < eps
+            t2 = 1 + test2
+            if t2 <= 1:
+                istop = 2
+            if t1 <= 1:
+                istop = 1
+
+            if itn >= maxiter:
+                istop = 6
+            if Acond >= 0.1/eps:
+                istop = 4
+            if epsx >= beta1:
+                istop = 3
+            # if rnorm <= epsx   : istop = 2
+            # if rnorm <= epsr   : istop = 1
+            if test2 <= tol:
+                istop = 2
+            if test1 <= tol:
+                istop = 1
+
+        # See if it is time to print something.
+
+        prnt = False
+        if n <= 40:
+            prnt = True
+        if itn <= 10:
+            prnt = True
+        if itn >= maxiter-10:
+            prnt = True
+        if itn % 10 == 0:
+            prnt = True
+        if qrnorm <= 10*epsx:
+            prnt = True
+        if qrnorm <= 10*epsr:
+            prnt = True
+        if Acond <= 1e-2/eps:
+            prnt = True
+        if istop != 0:
+            prnt = True
+
+        if show and prnt:
+            str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
+            str2 = ' %10.3e' % (test2,)
+            str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
+
+            print(str1 + str2 + str3)
+
+            if itn % 10 == 0:
+                print()
+
+        if callback is not None:
+            callback(x)
+
+        if istop != 0:
+            break  # TODO check this
+
+    if show:
+        print()
+        print(last + ' istop   =  %3g               itn   =%5g' % (istop,itn))
+        print(last + ' Anorm   =  %12.4e      Acond =  %12.4e' % (Anorm,Acond))
+        print(last + ' rnorm   =  %12.4e      ynorm =  %12.4e' % (rnorm,ynorm))
+        print(last + ' Arnorm  =  %12.4e' % (Arnorm,))
+        print(last + msg[istop+1])
+
+    if istop == 6:
+        info = maxiter
+    else:
+        info = 0
+
+    return (postprocess(x),info)
+
+
+if __name__ == '__main__':
+    from numpy import arange
+    from scipy.sparse import spdiags
+
+    n = 10
+
+    residuals = []
+
+    def cb(x):
+        residuals.append(norm(b - A@x))
+
+    # A = poisson((10,),format='csr')
+    A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
+    M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
+    A.psolve = M.matvec
+    b = zeros(A.shape[0])
+    x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
+    # x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py
new file mode 100644
index 00000000..62b1f9d9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+"""Tests for the linalg._isolve.gcrotmk module
+"""
+
+from numpy.testing import (assert_, assert_allclose, assert_equal,
+                           suppress_warnings)
+
+import numpy as np
+from numpy import zeros, array, allclose
+from scipy.linalg import norm
+from scipy.sparse import csr_matrix, eye, rand
+
+from scipy.sparse.linalg._interface import LinearOperator
+from scipy.sparse.linalg import splu
+from scipy.sparse.linalg._isolve import gcrotmk, gmres
+
+
+Am = csr_matrix(array([[-2,1,0,0,0,9],
+                       [1,-2,1,0,5,0],
+                       [0,1,-2,1,0,0],
+                       [0,0,1,-2,1,0],
+                       [0,3,0,1,-2,1],
+                       [1,0,0,0,1,-2]]))
+b = array([1,2,3,4,5,6])
+count = [0]
+
+
+def matvec(v):
+    count[0] += 1
+    return Am@v
+
+
+A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
+
+
+def do_solve(**kw):
+    count[0] = 0
+    with suppress_warnings() as sup:
+        sup.filter(DeprecationWarning, ".*called without specifying.*")
+        x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
+    count_0 = count[0]
+    assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
+    return x0, count_0
+
+
+class TestGCROTMK:
+    def test_preconditioner(self):
+        # Check that preconditioning works
+        pc = splu(Am.tocsc())
+        M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
+
+        x0, count_0 = do_solve()
+        x1, count_1 = do_solve(M=M)
+
+        assert_equal(count_1, 3)
+        assert_(count_1 < count_0/2)
+        assert_(allclose(x1, x0, rtol=1e-14))
+
+    def test_arnoldi(self):
+        np.random.seed(1)
+
+        A = eye(2000) + rand(2000, 2000, density=5e-4)
+        b = np.random.rand(2000)
+
+        # The inner arnoldi should be equivalent to gmres
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
+            x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
+
+        assert_equal(flag0, 1)
+        assert_equal(flag1, 1)
+        assert np.linalg.norm(A.dot(x0) - b) > 1e-3
+
+        assert_allclose(x0, x1)
+
+    def test_cornercase(self):
+        np.random.seed(1234)
+
+        # Rounding error may prevent convergence with tol=0 --- ensure
+        # that the return values in this case are correct, and no
+        # exceptions are raised
+
+        for n in [3, 5, 10, 100]:
+            A = 2*eye(n)
+
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning, ".*called without specifying.*")
+                b = np.ones(n)
+                x, info = gcrotmk(A, b, maxiter=10)
+                assert_equal(info, 0)
+                assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+                x, info = gcrotmk(A, b, tol=0, maxiter=10)
+                if info == 0:
+                    assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+                b = np.random.rand(n)
+                x, info = gcrotmk(A, b, maxiter=10)
+                assert_equal(info, 0)
+                assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+                x, info = gcrotmk(A, b, tol=0, maxiter=10)
+                if info == 0:
+                    assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+    def test_nans(self):
+        A = eye(3, format='lil')
+        A[1,1] = np.nan
+        b = np.ones(3)
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x, info = gcrotmk(A, b, tol=0, maxiter=10)
+            assert_equal(info, 1)
+
+    def test_truncate(self):
+        np.random.seed(1234)
+        A = np.random.rand(30, 30) + np.eye(30)
+        b = np.random.rand(30)
+
+        for truncate in ['oldest', 'smallest']:
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning, ".*called without specifying.*")
+                x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
+                                  maxiter=200)
+            assert_equal(info, 0)
+            assert_allclose(A.dot(x) - b, 0, atol=1e-3)
+
+    def test_CU(self):
+        for discard_C in (True, False):
+            # Check that C,U behave as expected
+            CU = []
+            x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
+            assert_(len(CU) > 0)
+            assert_(len(CU) <= 6)
+
+            if discard_C:
+                for c, u in CU:
+                    assert_(c is None)
+
+            # should converge immediately
+            x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
+            if discard_C:
+                assert_equal(count_1, 2 + len(CU))
+            else:
+                assert_equal(count_1, 3)
+            assert_(count_1 <= count_0/2)
+            assert_allclose(x1, x0, atol=1e-14)
+
+    def test_denormals(self):
+        # Check that no warnings are emitted if the matrix contains
+        # numbers for which 1/x has no float representation, and that
+        # the solver behaves properly.
+        A = np.array([[1, 2], [3, 4]], dtype=float)
+        A *= 100 * np.nextafter(0, 1)
+
+        b = np.array([1, 1])
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            xp, info = gcrotmk(A, b)
+
+        if info == 0:
+            assert_allclose(A.dot(xp), b)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_iterative.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_iterative.py
new file mode 100644
index 00000000..48fc16b7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_iterative.py
@@ -0,0 +1,794 @@
+""" Test functions for the sparse.linalg._isolve module
+"""
+
+import itertools
+import platform
+import sys
+import numpy as np
+
+from numpy.testing import (assert_equal, assert_array_equal,
+     assert_, assert_allclose, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+
+from numpy import zeros, arange, array, ones, eye, iscomplexobj
+from scipy.linalg import norm
+from scipy.sparse import spdiags, csr_matrix, SparseEfficiencyWarning, kronsum
+
+from scipy.sparse.linalg import LinearOperator, aslinearoperator
+from scipy.sparse.linalg._isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk, tfqmr
+
+# TODO check that method preserve shape and type
+# TODO test both preconditioner methods
+
+
+class Case:
+    def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
+        self.name = name
+        self.A = A
+        if b is None:
+            self.b = arange(A.shape[0], dtype=float)
+        else:
+            self.b = b
+        if skip is None:
+            self.skip = []
+        else:
+            self.skip = skip
+        if nonconvergence is None:
+            self.nonconvergence = []
+        else:
+            self.nonconvergence = nonconvergence
+
+    def __repr__(self):
+        return "<%s>" % self.name
+
+
+class IterativeParams:
+    def __init__(self):
+        # list of tuples (solver, symmetric, positive_definite )
+        solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk, tfqmr]
+        sym_solvers = [minres, cg]
+        posdef_solvers = [cg]
+        real_solvers = [minres]
+
+        self.solvers = solvers
+
+        # list of tuples (A, symmetric, positive_definite )
+        self.cases = []
+
+        # Symmetric and Positive Definite
+        N = 40
+        data = ones((3,N))
+        data[0,:] = 2
+        data[1,:] = -1
+        data[2,:] = -1
+        Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr')
+        self.Poisson1D = Case("poisson1d", Poisson1D)
+        self.cases.append(Case("poisson1d", Poisson1D))
+        # note: minres fails for single precision
+        self.cases.append(Case("poisson1d", Poisson1D.astype('f'),
+                               skip=[minres]))
+
+        # Symmetric and Negative Definite
+        self.cases.append(Case("neg-poisson1d", -Poisson1D,
+                               skip=posdef_solvers))
+        # note: minres fails for single precision
+        self.cases.append(Case("neg-poisson1d", (-Poisson1D).astype('f'),
+                               skip=posdef_solvers + [minres]))
+
+        # 2-dimensional Poisson equations
+        Poisson2D = kronsum(Poisson1D, Poisson1D)
+        self.Poisson2D = Case("poisson2d", Poisson2D)
+        # note: minres fails for 2-d poisson problem, it will be fixed in the future PR
+        self.cases.append(Case("poisson2d", Poisson2D, skip=[minres]))
+        # note: minres fails for single precision
+        self.cases.append(Case("poisson2d", Poisson2D.astype('f'),
+                               skip=[minres]))
+
+        # Symmetric and Indefinite
+        data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d')
+        RandDiag = spdiags(data, [0], 10, 10, format='csr')
+        self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
+        self.cases.append(Case("rand-diag", RandDiag.astype('f'),
+                               skip=posdef_solvers))
+
+        # Random real-valued
+        np.random.seed(1234)
+        data = np.random.rand(4, 4)
+        self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers))
+        self.cases.append(Case("rand", data.astype('f'),
+                               skip=posdef_solvers+sym_solvers))
+
+        # Random symmetric real-valued
+        np.random.seed(1234)
+        data = np.random.rand(4, 4)
+        data = data + data.T
+        self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
+        self.cases.append(Case("rand-sym", data.astype('f'),
+                               skip=posdef_solvers))
+
+        # Random pos-def symmetric real
+        np.random.seed(1234)
+        data = np.random.rand(9, 9)
+        data = np.dot(data.conj(), data.T)
+        self.cases.append(Case("rand-sym-pd", data))
+        # note: minres fails for single precision
+        self.cases.append(Case("rand-sym-pd", data.astype('f'),
+                               skip=[minres]))
+
+        # Random complex-valued
+        np.random.seed(1234)
+        data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
+        self.cases.append(Case("rand-cmplx", data,
+                               skip=posdef_solvers+sym_solvers+real_solvers))
+        self.cases.append(Case("rand-cmplx", data.astype('F'),
+                               skip=posdef_solvers+sym_solvers+real_solvers))
+
+        # Random hermitian complex-valued
+        np.random.seed(1234)
+        data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
+        data = data + data.T.conj()
+        self.cases.append(Case("rand-cmplx-herm", data,
+                               skip=posdef_solvers+real_solvers))
+        self.cases.append(Case("rand-cmplx-herm", data.astype('F'),
+                               skip=posdef_solvers+real_solvers))
+
+        # Random pos-def hermitian complex-valued
+        np.random.seed(1234)
+        data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9)
+        data = np.dot(data.conj(), data.T)
+        self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
+        self.cases.append(Case("rand-cmplx-sym-pd", data.astype('F'),
+                               skip=real_solvers))
+
+        # Non-symmetric and Positive Definite
+        #
+        # cgs, qmr, bicg and tfqmr fail to converge on this one
+        #   -- algorithmic limitation apparently
+        data = ones((2,10))
+        data[0,:] = 2
+        data[1,:] = -1
+        A = spdiags(data, [0,-1], 10, 10, format='csr')
+        self.cases.append(Case("nonsymposdef", A,
+                               skip=sym_solvers+[cgs, qmr, bicg, tfqmr]))
+        self.cases.append(Case("nonsymposdef", A.astype('F'),
+                               skip=sym_solvers+[cgs, qmr, bicg, tfqmr]))
+
+        # Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr/tfqmr breakdown
+        A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
+                      [0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
+                      [0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
+                      [0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
+                      [0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
+                      [1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
+                      [-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
+                      [0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
+                      [0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
+                      [0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
+                      [0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float)
+        b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float)
+        assert (A == A.T).all()
+        self.cases.append(Case("sym-nonpd", A, b,
+                               skip=posdef_solvers,
+                               nonconvergence=[cgs,bicg,bicgstab,qmr,tfqmr]))
+
+
+params = IterativeParams()
+
+
+def check_maxiter(solver, case):
+    A = case.A
+    tol = 1e-12
+
+    b = case.b
+    x0 = 0*b
+
+    residuals = []
+
+    def callback(x):
+        residuals.append(norm(b - case.A*x))
+
+    x, info = solver(A, b, x0=x0, tol=tol, maxiter=1, callback=callback)
+
+    assert_equal(len(residuals), 1)
+    assert_equal(info, 1)
+
+
+def test_maxiter():
+    for case in params.cases:
+        for solver in params.solvers:
+            if solver in case.skip + case.nonconvergence:
+                continue
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning, ".*called without specifying.*")
+                check_maxiter(solver, case)
+
+
+def assert_normclose(a, b, tol=1e-8):
+    residual = norm(a - b)
+    tolerance = tol * norm(b)
+    msg = f"residual ({residual}) not smaller than tolerance ({tolerance})"
+    assert_(residual < tolerance, msg=msg)
+
+
+def check_convergence(solver, case):
+    A = case.A
+
+    if A.dtype.char in "dD":
+        tol = 1e-8
+    else:
+        tol = 1e-2
+
+    b = case.b
+    x0 = 0*b
+
+    x, info = solver(A, b, x0=x0, tol=tol)
+
+    assert_array_equal(x0, 0*b)  # ensure that x0 is not overwritten
+    if solver not in case.nonconvergence:
+        assert_equal(info,0)
+        assert_normclose(A.dot(x), b, tol=tol)
+    else:
+        assert_(info != 0)
+        assert_(np.linalg.norm(A.dot(x) - b) <= np.linalg.norm(b))
+
+
+def test_convergence():
+    for solver in params.solvers:
+        for case in params.cases:
+            if solver in case.skip:
+                continue
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning, ".*called without specifying.*")
+                check_convergence(solver, case)
+
+
+def check_precond_dummy(solver, case):
+    tol = 1e-8
+
+    def identity(b,which=None):
+        """trivial preconditioner"""
+        return b
+
+    A = case.A
+
+    M,N = A.shape
+    # Ensure the diagonal elements of A are non-zero before calculating
+    # 1.0/A.diagonal()
+    diagOfA = A.diagonal()
+    if np.count_nonzero(diagOfA) == len(diagOfA):
+        spdiags([1.0/diagOfA], [0], M, N)
+
+    b = case.b
+    x0 = 0*b
+
+    precond = LinearOperator(A.shape, identity, rmatvec=identity)
+
+    if solver is qmr:
+        x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol)
+    else:
+        x, info = solver(A, b, M=precond, x0=x0, tol=tol)
+    assert_equal(info,0)
+    assert_normclose(A.dot(x), b, tol)
+
+    A = aslinearoperator(A)
+    A.psolve = identity
+    A.rpsolve = identity
+
+    x, info = solver(A, b, x0=x0, tol=tol)
+    assert_equal(info,0)
+    assert_normclose(A@x, b, tol=tol)
+
+
+def test_precond_dummy():
+    for case in params.cases:
+        for solver in params.solvers:
+            if solver in case.skip + case.nonconvergence:
+                continue
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning, ".*called without specifying.*")
+                check_precond_dummy(solver, case)
+
+
+def check_precond_inverse(solver, case):
+    tol = 1e-8
+
+    def inverse(b,which=None):
+        """inverse preconditioner"""
+        A = case.A
+        if not isinstance(A, np.ndarray):
+            A = A.toarray()
+        return np.linalg.solve(A, b)
+
+    def rinverse(b,which=None):
+        """inverse preconditioner"""
+        A = case.A
+        if not isinstance(A, np.ndarray):
+            A = A.toarray()
+        return np.linalg.solve(A.T, b)
+
+    matvec_count = [0]
+
+    def matvec(b):
+        matvec_count[0] += 1
+        return case.A.dot(b)
+
+    def rmatvec(b):
+        matvec_count[0] += 1
+        return case.A.T.dot(b)
+
+    b = case.b
+    x0 = 0*b
+
+    A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec)
+    precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse)
+
+    # Solve with preconditioner
+    matvec_count = [0]
+    x, info = solver(A, b, M=precond, x0=x0, tol=tol)
+
+    assert_equal(info, 0)
+    assert_normclose(case.A.dot(x), b, tol)
+
+    # Solution should be nearly instant
+    assert_(matvec_count[0] <= 3, repr(matvec_count))
+
+
+@pytest.mark.parametrize("case", [params.Poisson1D, params.Poisson2D])
+def test_precond_inverse(case):
+    for solver in params.solvers:
+        if solver in case.skip:
+            continue
+        if solver is qmr:
+            continue
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            check_precond_inverse(solver, case)
+
+
+def test_reentrancy():
+    non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr]
+    reentrant = [lgmres, minres, gcrotmk, tfqmr]
+    for solver in reentrant + non_reentrant:
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            _check_reentrancy(solver, solver in reentrant)
+
+
+def _check_reentrancy(solver, is_reentrant):
+    def matvec(x):
+        A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]])
+        y, info = solver(A, x)
+        assert_equal(info, 0)
+        return y
+    b = np.array([1, 1./2, 1./3])
+    op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec,
+                        dtype=b.dtype)
+
+    if not is_reentrant:
+        assert_raises(RuntimeError, solver, op, b)
+    else:
+        y, info = solver(op, b)
+        assert_equal(info, 0)
+        assert_allclose(y, [1, 1, 1])
+
+
+@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, lgmres, gcrotmk])
+def test_atol(solver):
+    # TODO: minres. It didn't historically use absolute tolerances, so
+    # fixing it is less urgent.
+
+    np.random.seed(1234)
+    A = np.random.rand(10, 10)
+    A = A.dot(A.T) + 10 * np.eye(10)
+    b = 1e3 * np.random.rand(10)
+    b_norm = np.linalg.norm(b)
+
+    tols = np.r_[0, np.logspace(np.log10(1e-10), np.log10(1e2), 7), np.inf]
+
+    # Check effect of badly scaled preconditioners
+    M0 = np.random.randn(10, 10)
+    M0 = M0.dot(M0.T)
+    Ms = [None, 1e-6 * M0, 1e6 * M0]
+
+    for M, tol, atol in itertools.product(Ms, tols, tols):
+        if tol == 0 and atol == 0:
+            continue
+
+        if solver is qmr:
+            if M is not None:
+                M = aslinearoperator(M)
+                M2 = aslinearoperator(np.eye(10))
+            else:
+                M2 = None
+            x, info = solver(A, b, M1=M, M2=M2, tol=tol, atol=atol)
+        else:
+            x, info = solver(A, b, M=M, tol=tol, atol=atol)
+        assert_equal(info, 0)
+
+        residual = A.dot(x) - b
+        err = np.linalg.norm(residual)
+        atol2 = tol * b_norm
+        # Added 1.00025 fudge factor because of `err` exceeding `atol` just
+        # very slightly on s390x (see gh-17839)
+        assert_(err <= 1.00025 * max(atol, atol2))
+
+
+@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk, tfqmr])
+def test_zero_rhs(solver):
+    np.random.seed(1234)
+    A = np.random.rand(10, 10)
+    A = A.dot(A.T) + 10 * np.eye(10)
+
+    b = np.zeros(10)
+    tols = np.r_[np.logspace(np.log10(1e-10), np.log10(1e2), 7)]
+
+    for tol in tols:
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+
+            x, info = solver(A, b, tol=tol)
+            assert_equal(info, 0)
+            assert_allclose(x, 0, atol=1e-15)
+
+            x, info = solver(A, b, tol=tol, x0=ones(10))
+            assert_equal(info, 0)
+            assert_allclose(x, 0, atol=tol)
+
+            if solver is not minres:
+                x, info = solver(A, b, tol=tol, atol=0, x0=ones(10))
+                if info == 0:
+                    assert_allclose(x, 0)
+
+                x, info = solver(A, b, tol=tol, atol=tol)
+                assert_equal(info, 0)
+                assert_allclose(x, 0, atol=1e-300)
+
+                x, info = solver(A, b, tol=tol, atol=0)
+                assert_equal(info, 0)
+                assert_allclose(x, 0, atol=1e-300)
+
+
+@pytest.mark.parametrize("solver", [
+    pytest.param(gmres, marks=pytest.mark.xfail(platform.machine() == 'aarch64'
+                                                and sys.version_info[1] == 9,
+                                                reason="gh-13019")),
+    qmr,
+    pytest.param(lgmres, marks=pytest.mark.xfail(
+        platform.machine() not in ['x86_64' 'x86', 'aarch64', 'arm64'],
+        reason="fails on at least ppc64le, ppc64 and riscv64, see gh-17839")
+    ),
+    pytest.param(cgs, marks=pytest.mark.xfail),
+    pytest.param(bicg, marks=pytest.mark.xfail),
+    pytest.param(bicgstab, marks=pytest.mark.xfail),
+    pytest.param(gcrotmk, marks=pytest.mark.xfail),
+    pytest.param(tfqmr, marks=pytest.mark.xfail)])
+def test_maxiter_worsening(solver):
+    # Check error does not grow (boundlessly) with increasing maxiter.
+    # This can occur due to the solvers hitting close to breakdown,
+    # which they should detect and halt as necessary.
+    # cf. gh-9100
+
+    # Singular matrix, rhs numerically not in range
+    A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685],
+                  [0, -0.13627952880333782+6.283185307179586j, 0, 0],
+                  [0, 0, -0.13627952880333782-6.283185307179586j, 0],
+                  [0.1112795288033368, 0j, 0j, -0.16127952880333785]])
+    v = np.ones(4)
+    best_error = np.inf
+    tol = 7 if platform.machine() == 'aarch64' else 5
+
+    for maxiter in range(1, 20):
+        x, info = solver(A, v, maxiter=maxiter, tol=1e-8, atol=0)
+
+        if info == 0:
+            assert_(np.linalg.norm(A.dot(x) - v) <= 1e-8*np.linalg.norm(v))
+
+        error = np.linalg.norm(A.dot(x) - v)
+        best_error = min(best_error, error)
+
+        # Check with slack
+        assert_(error <= tol*best_error)
+
+
+@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk, tfqmr])
+def test_x0_working(solver):
+    # Easy problem
+    np.random.seed(1)
+    n = 10
+    A = np.random.rand(n, n)
+    A = A.dot(A.T)
+    b = np.random.rand(n)
+    x0 = np.random.rand(n)
+
+    if solver is minres:
+        kw = dict(tol=1e-6)
+    else:
+        kw = dict(atol=0, tol=1e-6)
+
+    x, info = solver(A, b, **kw)
+    assert_equal(info, 0)
+    assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
+
+    x, info = solver(A, b, x0=x0, **kw)
+    assert_equal(info, 0)
+    assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
+
+
+@pytest.mark.parametrize('solver', [cg, cgs, bicg, bicgstab, gmres, qmr,
+                                    minres, lgmres, gcrotmk])
+def test_x0_equals_Mb(solver):
+    for case in params.cases:
+        if solver in case.skip:
+            continue
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            A = case.A
+            b = case.b
+            x0 = 'Mb'
+            tol = 1e-8
+            x, info = solver(A, b, x0=x0, tol=tol)
+
+            assert_array_equal(x0, 'Mb')  # ensure that x0 is not overwritten
+            assert_equal(info, 0)
+            assert_normclose(A.dot(x), b, tol=tol)
+
+
+@pytest.mark.parametrize(('solver', 'solverstring'), [(tfqmr, 'TFQMR')])
+def test_show(solver, solverstring, capsys):
+    def cb(x):
+        count[0] += 1
+
+    for i in [0, 20]:
+        case = params.cases[i]
+        A = case.A
+        b = case.b
+        count = [0]
+        x, info = solver(A, b, callback=cb, show=True)
+        out, err = capsys.readouterr()
+        if i == 20:  # Asymmetric and Positive Definite
+            assert_equal(out, f"{solverstring}: Linear solve not converged "
+                              f"due to reach MAXIT iterations {count[0]}\n")
+        else:  # 1-D Poisson equations
+            assert_equal(out, f"{solverstring}: Linear solve converged due to "
+                              f"reach TOL iterations {count[0]}\n")
+        assert_equal(err, '')
+
+
+#------------------------------------------------------------------------------
+
+class TestQMR:
+    def test_leftright_precond(self):
+        """Check that QMR works with left and right preconditioners"""
+
+        from scipy.sparse.linalg._dsolve import splu
+        from scipy.sparse.linalg._interface import LinearOperator
+
+        n = 100
+
+        dat = ones(n)
+        A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n)
+        b = arange(n,dtype='d')
+
+        L = spdiags([-dat/2, dat], [-1,0], n, n)
+        U = spdiags([4*dat, -dat], [0,1], n, n)
+
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "splu converted its input to CSC format")
+            L_solver = splu(L)
+            U_solver = splu(U)
+
+        def L_solve(b):
+            return L_solver.solve(b)
+
+        def U_solve(b):
+            return U_solver.solve(b)
+
+        def LT_solve(b):
+            return L_solver.solve(b,'T')
+
+        def UT_solve(b):
+            return U_solver.solve(b,'T')
+
+        M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve)
+        M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve)
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)
+
+        assert_equal(info,0)
+        assert_normclose(A@x, b, tol=1e-8)
+
+
+class TestGMRES:
+    def test_basic(self):
+        A = np.vander(np.arange(10) + 1)[:, ::-1]
+        b = np.zeros(10)
+        b[0] = 1
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x_gm, err = gmres(A, b, restart=5, maxiter=1)
+
+        assert_allclose(x_gm[0], 0.359, rtol=1e-2)
+
+    def test_callback(self):
+
+        def store_residual(r, rvec):
+            rvec[rvec.nonzero()[0].max()+1] = r
+
+        # Define, A,b
+        A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]]))
+        b = ones((A.shape[0],))
+        maxiter = 1
+        rvec = zeros(maxiter+1)
+        rvec[0] = 1.0
+        callback = lambda r:store_residual(r, rvec)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback)
+
+        # Expected output from SciPy 1.0.0
+        assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
+
+        # Test preconditioned callback
+        M = 1e-3 * np.eye(A.shape[0])
+        rvec = zeros(maxiter+1)
+        rvec[0] = 1.0
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback)
+
+        # Expected output from SciPy 1.0.0 (callback has preconditioned residual!)
+        assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10)
+
+    def test_abi(self):
+        # Check we don't segfault on gmres with complex argument
+        A = eye(2)
+        b = ones(2)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            r_x, r_info = gmres(A, b)
+            r_x = r_x.astype(complex)
+
+            x, info = gmres(A.astype(complex), b.astype(complex))
+
+        assert_(iscomplexobj(x))
+        assert_allclose(r_x, x)
+        assert_(r_info == info)
+
+    def test_atol_legacy(self):
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+
+            # Check the strange legacy behavior: the tolerance is interpreted
+            # as atol, but only for the initial residual
+            A = eye(2)
+            b = 1e-6 * ones(2)
+            x, info = gmres(A, b, tol=1e-5)
+            assert_array_equal(x, np.zeros(2))
+
+            A = eye(2)
+            b = ones(2)
+            x, info = gmres(A, b, tol=1e-5)
+            assert_(np.linalg.norm(A.dot(x) - b) <= 1e-5*np.linalg.norm(b))
+            assert_allclose(x, b, atol=0, rtol=1e-8)
+
+            rndm = np.random.RandomState(12345)
+            A = rndm.rand(30, 30)
+            b = 1e-6 * ones(30)
+            x, info = gmres(A, b, tol=1e-7, restart=20)
+            assert_(np.linalg.norm(A.dot(x) - b) > 1e-7)
+
+        A = eye(2)
+        b = 1e-10 * ones(2)
+        x, info = gmres(A, b, tol=1e-8, atol=0)
+        assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
+
+    def test_defective_precond_breakdown(self):
+        # Breakdown due to defective preconditioner
+        M = np.eye(3)
+        M[2,2] = 0
+
+        b = np.array([0, 1, 1])
+        x = np.array([1, 0, 0])
+        A = np.diag([2, 3, 4])
+
+        x, info = gmres(A, b, x0=x, M=M, tol=1e-15, atol=0)
+
+        # Should not return nans, nor terminate with false success
+        assert_(not np.isnan(x).any())
+        if info == 0:
+            assert_(np.linalg.norm(A.dot(x) - b) <= 1e-15*np.linalg.norm(b))
+
+        # The solution should be OK outside null space of M
+        assert_allclose(M.dot(A.dot(x)), M.dot(b))
+
+    def test_defective_matrix_breakdown(self):
+        # Breakdown due to defective matrix
+        A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
+        b = np.array([1, 0, 1])
+        x, info = gmres(A, b, tol=1e-8, atol=0)
+
+        # Should not return nans, nor terminate with false success
+        assert_(not np.isnan(x).any())
+        if info == 0:
+            assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
+
+        # The solution should be OK outside null space of A
+        assert_allclose(A.dot(A.dot(x)), A.dot(b))
+
+    def test_callback_type(self):
+        # The legacy callback type changes meaning of 'maxiter'
+        np.random.seed(1)
+        A = np.random.rand(20, 20)
+        b = np.random.rand(20)
+
+        cb_count = [0]
+
+        def pr_norm_cb(r):
+            cb_count[0] += 1
+            assert_(isinstance(r, float))
+
+        def x_cb(x):
+            cb_count[0] += 1
+            assert_(isinstance(x, np.ndarray))
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            # 2 iterations is not enough to solve the problem
+            cb_count = [0]
+            x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50)
+            assert info == 2
+            assert cb_count[0] == 2
+
+        # With `callback_type` specified, no warning should be raised
+        cb_count = [0]
+        x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
+                        callback_type='legacy')
+        assert info == 2
+        assert cb_count[0] == 2
+
+        # 2 restart cycles is enough to solve the problem
+        cb_count = [0]
+        x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
+                        callback_type='pr_norm')
+        assert info == 0
+        assert cb_count[0] > 2
+
+        # 2 restart cycles is enough to solve the problem
+        cb_count = [0]
+        x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=2, restart=50,
+                        callback_type='x')
+        assert info == 0
+        assert cb_count[0] == 2
+
+    def test_callback_x_monotonic(self):
+        # Check that callback_type='x' gives monotonic norm decrease
+        np.random.seed(1)
+        A = np.random.rand(20, 20) + np.eye(20)
+        b = np.random.rand(20)
+
+        prev_r = [np.inf]
+        count = [0]
+
+        def x_cb(x):
+            r = np.linalg.norm(A.dot(x) - b)
+            assert r <= prev_r[0]
+            prev_r[0] = r
+            count[0] += 1
+
+        x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=20, restart=10,
+                        callback_type='x')
+        assert info == 20
+        assert count[0] == 21
+        x_cb(x)
+
+    def test_restrt_dep(self):
+        with pytest.warns(
+            DeprecationWarning,
+            match="'gmres' keyword argument 'restrt'"
+        ):
+            gmres(np.array([1]), np.array([1]), restrt=10)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lgmres.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lgmres.py
new file mode 100644
index 00000000..9a304e4c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lgmres.py
@@ -0,0 +1,211 @@
+"""Tests for the linalg._isolve.lgmres module
+"""
+
+from numpy.testing import (assert_, assert_allclose, assert_equal,
+                           suppress_warnings)
+
+import pytest
+from platform import python_implementation
+
+import numpy as np
+from numpy import zeros, array, allclose
+from scipy.linalg import norm
+from scipy.sparse import csr_matrix, eye, rand
+
+from scipy.sparse.linalg._interface import LinearOperator
+from scipy.sparse.linalg import splu
+from scipy.sparse.linalg._isolve import lgmres, gmres
+
+
+Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
+                       [1, -2, 1, 0, 5, 0],
+                       [0, 1, -2, 1, 0, 0],
+                       [0, 0, 1, -2, 1, 0],
+                       [0, 3, 0, 1, -2, 1],
+                       [1, 0, 0, 0, 1, -2]]))
+b = array([1, 2, 3, 4, 5, 6])
+count = [0]
+
+
+def matvec(v):
+    count[0] += 1
+    return Am@v
+
+
+A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
+
+
+def do_solve(**kw):
+    count[0] = 0
+    with suppress_warnings() as sup:
+        sup.filter(DeprecationWarning, ".*called without specifying.*")
+        x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
+                          inner_m=6, tol=1e-14, **kw)
+    count_0 = count[0]
+    assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
+    return x0, count_0
+
+
+class TestLGMRES:
+    def test_preconditioner(self):
+        # Check that preconditioning works
+        pc = splu(Am.tocsc())
+        M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
+
+        x0, count_0 = do_solve()
+        x1, count_1 = do_solve(M=M)
+
+        assert_(count_1 == 3)
+        assert_(count_1 < count_0/2)
+        assert_(allclose(x1, x0, rtol=1e-14))
+
+    def test_outer_v(self):
+        # Check that the augmentation vectors behave as expected
+
+        outer_v = []
+        x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
+        assert_(len(outer_v) > 0)
+        assert_(len(outer_v) <= 6)
+
+        x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
+                               prepend_outer_v=True)
+        assert_(count_1 == 2, count_1)
+        assert_(count_1 < count_0/2)
+        assert_(allclose(x1, x0, rtol=1e-14))
+
+        # ---
+
+        outer_v = []
+        x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
+                               store_outer_Av=False)
+        assert_(array([v[1] is None for v in outer_v]).all())
+        assert_(len(outer_v) > 0)
+        assert_(len(outer_v) <= 6)
+
+        x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
+                               prepend_outer_v=True)
+        assert_(count_1 == 3, count_1)
+        assert_(count_1 < count_0/2)
+        assert_(allclose(x1, x0, rtol=1e-14))
+
+    @pytest.mark.skipif(python_implementation() == 'PyPy',
+                        reason="Fails on PyPy CI runs. See #9507")
+    def test_arnoldi(self):
+        np.random.seed(1234)
+
+        A = eye(2000) + rand(2000, 2000, density=5e-4)
+        b = np.random.rand(2000)
+
+        # The inner arnoldi should be equivalent to gmres
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]),
+                               inner_m=15, maxiter=1)
+            x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]),
+                              restart=15, maxiter=1)
+
+        assert_equal(flag0, 1)
+        assert_equal(flag1, 1)
+        norm = np.linalg.norm(A.dot(x0) - b)
+        assert_(norm > 1e-4)
+        assert_allclose(x0, x1)
+
+    def test_cornercase(self):
+        np.random.seed(1234)
+
+        # Rounding error may prevent convergence with tol=0 --- ensure
+        # that the return values in this case are correct, and no
+        # exceptions are raised
+
+        for n in [3, 5, 10, 100]:
+            A = 2*eye(n)
+
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning, ".*called without specifying.*")
+
+                b = np.ones(n)
+                x, info = lgmres(A, b, maxiter=10)
+                assert_equal(info, 0)
+                assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+                x, info = lgmres(A, b, tol=0, maxiter=10)
+                if info == 0:
+                    assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+                b = np.random.rand(n)
+                x, info = lgmres(A, b, maxiter=10)
+                assert_equal(info, 0)
+                assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+                x, info = lgmres(A, b, tol=0, maxiter=10)
+                if info == 0:
+                    assert_allclose(A.dot(x) - b, 0, atol=1e-14)
+
+    def test_nans(self):
+        A = eye(3, format='lil')
+        A[1, 1] = np.nan
+        b = np.ones(3)
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            x, info = lgmres(A, b, tol=0, maxiter=10)
+            assert_equal(info, 1)
+
+    def test_breakdown_with_outer_v(self):
+        A = np.array([[1, 2], [3, 4]], dtype=float)
+        b = np.array([1, 2])
+
+        x = np.linalg.solve(A, b)
+        v0 = np.array([1, 0])
+
+        # The inner iteration should converge to the correct solution,
+        # since it's in the outer vector list
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
+
+        assert_allclose(xp, x, atol=1e-12)
+
+    def test_breakdown_underdetermined(self):
+        # Should find LSQ solution in the Krylov span in one inner
+        # iteration, despite solver breakdown from nilpotent A.
+        A = np.array([[0, 1, 1, 1],
+                      [0, 0, 1, 1],
+                      [0, 0, 0, 1],
+                      [0, 0, 0, 0]], dtype=float)
+
+        bs = [
+            np.array([1, 1, 1, 1]),
+            np.array([1, 1, 1, 0]),
+            np.array([1, 1, 0, 0]),
+            np.array([1, 0, 0, 0]),
+        ]
+
+        for b in bs:
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning, ".*called without specifying.*")
+                xp, info = lgmres(A, b, maxiter=1)
+            resp = np.linalg.norm(A.dot(xp) - b)
+
+            K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
+            y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
+            x = K.dot(y)
+            res = np.linalg.norm(A.dot(x) - b)
+
+            assert_allclose(resp, res, err_msg=repr(b))
+
+    def test_denormals(self):
+        # Check that no warnings are emitted if the matrix contains
+        # numbers for which 1/x has no float representation, and that
+        # the solver behaves properly.
+        A = np.array([[1, 2], [3, 4]], dtype=float)
+        A *= 100 * np.nextafter(0, 1)
+
+        b = np.array([1, 1])
+
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, ".*called without specifying.*")
+            xp, info = lgmres(A, b)
+
+        if info == 0:
+            assert_allclose(A.dot(xp), b)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsmr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsmr.py
new file mode 100644
index 00000000..c4797bb2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsmr.py
@@ -0,0 +1,228 @@
+"""
+Copyright (C) 2010 David Fong and Michael Saunders
+Distributed under the same license as SciPy
+
+Testing Code for LSMR.
+
+03 Jun 2010: First version release with lsmr.py
+
+David Chin-lung Fong            clfong@stanford.edu
+Institute for Computational and Mathematical Engineering
+Stanford University
+
+Michael Saunders                saunders@stanford.edu
+Systems Optimization Laboratory
+Dept of MS&E, Stanford University.
+
+"""
+
+from numpy import array, arange, eye, zeros, ones, sqrt, transpose, hstack
+from numpy.linalg import norm
+from numpy.testing import assert_allclose
+import pytest
+from scipy.sparse import coo_matrix
+from scipy.sparse.linalg._interface import aslinearoperator
+from scipy.sparse.linalg import lsmr
+from .test_lsqr import G, b
+
+
+class TestLSMR:
+    def setup_method(self):
+        self.n = 10
+        self.m = 10
+
+    def assertCompatibleSystem(self, A, xtrue):
+        Afun = aslinearoperator(A)
+        b = Afun.matvec(xtrue)
+        x = lsmr(A, b)[0]
+        assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
+
+    def testIdentityACase1(self):
+        A = eye(self.n)
+        xtrue = zeros((self.n, 1))
+        self.assertCompatibleSystem(A, xtrue)
+
+    def testIdentityACase2(self):
+        A = eye(self.n)
+        xtrue = ones((self.n,1))
+        self.assertCompatibleSystem(A, xtrue)
+
+    def testIdentityACase3(self):
+        A = eye(self.n)
+        xtrue = transpose(arange(self.n,0,-1))
+        self.assertCompatibleSystem(A, xtrue)
+
+    def testBidiagonalA(self):
+        A = lowerBidiagonalMatrix(20,self.n)
+        xtrue = transpose(arange(self.n,0,-1))
+        self.assertCompatibleSystem(A,xtrue)
+
+    def testScalarB(self):
+        A = array([[1.0, 2.0]])
+        b = 3.0
+        x = lsmr(A, b)[0]
+        assert norm(A.dot(x) - b) == pytest.approx(0)
+
+    def testComplexX(self):
+        A = eye(self.n)
+        xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
+        self.assertCompatibleSystem(A, xtrue)
+
+    def testComplexX0(self):
+        A = 4 * eye(self.n) + ones((self.n, self.n))
+        xtrue = transpose(arange(self.n, 0, -1))
+        b = aslinearoperator(A).matvec(xtrue)
+        x0 = zeros(self.n, dtype=complex)
+        x = lsmr(A, b, x0=x0)[0]
+        assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
+
+    def testComplexA(self):
+        A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
+        xtrue = transpose(arange(self.n, 0, -1).astype(complex))
+        self.assertCompatibleSystem(A, xtrue)
+
+    def testComplexB(self):
+        A = 4 * eye(self.n) + ones((self.n, self.n))
+        xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
+        b = aslinearoperator(A).matvec(xtrue)
+        x = lsmr(A, b)[0]
+        assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
+
+    def testColumnB(self):
+        A = eye(self.n)
+        b = ones((self.n, 1))
+        x = lsmr(A, b)[0]
+        assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
+
+    def testInitialization(self):
+        # Test that the default setting is not modified
+        x_ref, _, itn_ref, normr_ref, *_ = lsmr(G, b)
+        assert_allclose(norm(b - G@x_ref), normr_ref, atol=1e-6)
+
+        # Test passing zeros yields similiar result
+        x0 = zeros(b.shape)
+        x = lsmr(G, b, x0=x0)[0]
+        assert_allclose(x, x_ref)
+
+        # Test warm-start with single iteration
+        x0 = lsmr(G, b, maxiter=1)[0]
+
+        x, _, itn, normr, *_ = lsmr(G, b, x0=x0)
+        assert_allclose(norm(b - G@x), normr, atol=1e-6)
+
+        # NOTE(gh-12139): This doesn't always converge to the same value as
+        # ref because error estimates will be slightly different when calculated
+        # from zeros vs x0 as a result only compare norm and itn (not x).
+
+        # x generally converges 1 iteration faster because it started at x0.
+        # itn == itn_ref means that lsmr(x0) took an extra iteration see above.
+        # -1 is technically possible but is rare (1 in 100000) so it's more
+        # likely to be an error elsewhere.
+        assert itn - itn_ref in (0, 1)
+
+        # If an extra iteration is performed normr may be 0, while normr_ref
+        # may be much larger.
+        assert normr < normr_ref * (1 + 1e-6)
+
+
+class TestLSMRReturns:
+    def setup_method(self):
+        self.n = 10
+        self.A = lowerBidiagonalMatrix(20, self.n)
+        self.xtrue = transpose(arange(self.n, 0, -1))
+        self.Afun = aslinearoperator(self.A)
+        self.b = self.Afun.matvec(self.xtrue)
+        self.x0 = ones(self.n)
+        self.x00 = self.x0.copy()
+        self.returnValues = lsmr(self.A, self.b)
+        self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
+
+    def test_unchanged_x0(self):
+        x, istop, itn, normr, normar, normA, condA, normx = self.returnValuesX0
+        assert_allclose(self.x00, self.x0)
+
+    def testNormr(self):
+        x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
+        assert norm(self.b - self.Afun.matvec(x)) == pytest.approx(normr)
+
+    def testNormar(self):
+        x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
+        assert (norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))
+                == pytest.approx(normar))
+
+    def testNormx(self):
+        x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
+        assert norm(x) == pytest.approx(normx)
+
+
+def lowerBidiagonalMatrix(m, n):
+    # This is a simple example for testing LSMR.
+    # It uses the leading m*n submatrix from
+    # A = [ 1
+    #       1 2
+    #         2 3
+    #           3 4
+    #             ...
+    #               n ]
+    # suitably padded by zeros.
+    #
+    # 04 Jun 2010: First version for distribution with lsmr.py
+    if m <= n:
+        row = hstack((arange(m, dtype=int),
+                      arange(1, m, dtype=int)))
+        col = hstack((arange(m, dtype=int),
+                      arange(m-1, dtype=int)))
+        data = hstack((arange(1, m+1, dtype=float),
+                       arange(1,m, dtype=float)))
+        return coo_matrix((data, (row, col)), shape=(m,n))
+    else:
+        row = hstack((arange(n, dtype=int),
+                      arange(1, n+1, dtype=int)))
+        col = hstack((arange(n, dtype=int),
+                      arange(n, dtype=int)))
+        data = hstack((arange(1, n+1, dtype=float),
+                       arange(1,n+1, dtype=float)))
+        return coo_matrix((data,(row, col)), shape=(m,n))
+
+
+def lsmrtest(m, n, damp):
+    """Verbose testing of lsmr"""
+
+    A = lowerBidiagonalMatrix(m,n)
+    xtrue = arange(n,0,-1, dtype=float)
+    Afun = aslinearoperator(A)
+
+    b = Afun.matvec(xtrue)
+
+    atol = 1.0e-7
+    btol = 1.0e-7
+    conlim = 1.0e+10
+    itnlim = 10*n
+    show = 1
+
+    x, istop, itn, normr, normar, norma, conda, normx \
+      = lsmr(A, b, damp, atol, btol, conlim, itnlim, show)
+
+    j1 = min(n,5)
+    j2 = max(n-4,1)
+    print(' ')
+    print('First elements of x:')
+    str = ['%10.4f' % (xi) for xi in x[0:j1]]
+    print(''.join(str))
+    print(' ')
+    print('Last  elements of x:')
+    str = ['%10.4f' % (xi) for xi in x[j2-1:]]
+    print(''.join(str))
+
+    r = b - Afun.matvec(x)
+    r2 = sqrt(norm(r)**2 + (damp*norm(x))**2)
+    print(' ')
+    str = 'normr (est.)  %17.10e' % (normr)
+    str2 = 'normr (true)  %17.10e' % (r2)
+    print(str)
+    print(str2)
+    print(' ')
+
+
+if __name__ == "__main__":
+    lsmrtest(20,10,0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsqr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsqr.py
new file mode 100644
index 00000000..bb2ca702
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_lsqr.py
@@ -0,0 +1,153 @@
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal, assert_equal
+import pytest
+import scipy.sparse
+import scipy.sparse.linalg
+from scipy.sparse.linalg import lsqr
+from time import time
+
+# Set up a test problem
+n = 35
+G = np.eye(n)
+normal = np.random.normal
+norm = np.linalg.norm
+
+for jj in range(5):
+    gg = normal(size=n)
+    hh = gg * gg.T
+    G += (hh + hh.T) * 0.5
+    G += normal(size=n) * normal(size=n)
+
+b = normal(size=n)
+
+# tolerance for atol/btol keywords of lsqr()
+tol = 2e-10
+# tolerances for testing the results of the lsqr() call with assert_allclose
+# These tolerances are a bit fragile - see discussion in gh-15301.
+atol_test = 4e-10
+rtol_test = 2e-8
+show = False
+maxit = None
+
+
+def test_lsqr_basic():
+    b_copy = b.copy()
+    xo, *_ = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
+    assert_array_equal(b_copy, b)
+
+    svx = np.linalg.solve(G, b)
+    assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
+
+    # Now the same but with damp > 0.
+    # This is equivalent to solving the extented system:
+    # ( G      ) @ x = ( b )
+    # ( damp*I )       ( 0 )
+    damp = 1.5
+    xo, *_ = lsqr(
+        G, b, damp=damp, show=show, atol=tol, btol=tol, iter_lim=maxit)
+
+    Gext = np.r_[G, damp * np.eye(G.shape[1])]
+    bext = np.r_[b, np.zeros(G.shape[1])]
+    svx, *_ = np.linalg.lstsq(Gext, bext, rcond=None)
+    assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
+
+
+def test_gh_2466():
+    row = np.array([0, 0])
+    col = np.array([0, 1])
+    val = np.array([1, -1])
+    A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2))
+    b = np.asarray([4])
+    lsqr(A, b)
+
+
+def test_well_conditioned_problems():
+    # Test that sparse the lsqr solver returns the right solution
+    # on various problems with different random seeds.
+    # This is a non-regression test for a potential ZeroDivisionError
+    # raised when computing the `test2` & `test3` convergence conditions.
+    n = 10
+    A_sparse = scipy.sparse.eye(n, n)
+    A_dense = A_sparse.toarray()
+
+    with np.errstate(invalid='raise'):
+        for seed in range(30):
+            rng = np.random.RandomState(seed + 10)
+            beta = rng.rand(n)
+            beta[beta == 0] = 0.00001  # ensure that all the betas are not null
+            b = A_sparse @ beta[:, np.newaxis]
+            output = lsqr(A_sparse, b, show=show)
+
+            # Check that the termination condition corresponds to an approximate
+            # solution to Ax = b
+            assert_equal(output[1], 1)
+            solution = output[0]
+
+            # Check that we recover the ground truth solution
+            assert_allclose(solution, beta)
+
+            # Sanity check: compare to the dense array solver
+            reference_solution = np.linalg.solve(A_dense, b).ravel()
+            assert_allclose(solution, reference_solution)
+
+
+def test_b_shapes():
+    # Test b being a scalar.
+    A = np.array([[1.0, 2.0]])
+    b = 3.0
+    x = lsqr(A, b)[0]
+    assert norm(A.dot(x) - b) == pytest.approx(0)
+
+    # Test b being a column vector.
+    A = np.eye(10)
+    b = np.ones((10, 1))
+    x = lsqr(A, b)[0]
+    assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
+
+
+def test_initialization():
+    # Test the default setting is the same as zeros
+    b_copy = b.copy()
+    x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
+    x0 = np.zeros(x_ref[0].shape)
+    x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
+    assert_array_equal(b_copy, b)
+    assert_allclose(x_ref[0], x[0])
+
+    # Test warm-start with single iteration
+    x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
+    x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
+    assert_allclose(x_ref[0], x[0])
+    assert_array_equal(b_copy, b)
+
+
+if __name__ == "__main__":
+    svx = np.linalg.solve(G, b)
+
+    tic = time()
+    X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
+    xo = X[0]
+    phio = X[3]
+    psio = X[7]
+    k = X[2]
+    chio = X[8]
+    mg = np.amax(G - G.T)
+    if mg > 1e-14:
+        sym = 'No'
+    else:
+        sym = 'Yes'
+
+    print('LSQR')
+    print("Is linear operator symmetric? " + sym)
+    print("n: %3g  iterations:   %3g" % (n, k))
+    print("Norms computed in %.2fs by LSQR" % (time() - tic))
+    print(" ||x||  %9.4e  ||r|| %9.4e  ||Ar||  %9.4e " % (chio, phio, psio))
+    print("Residual norms computed directly:")
+    print(" ||x||  %9.4e  ||r|| %9.4e  ||Ar||  %9.4e" % (norm(xo),
+                                                         norm(G*xo - b),
+                                                         norm(G.T*(G*xo-b))))
+    print("Direct solution norms:")
+    print(" ||x||  %9.4e  ||r|| %9.4e " % (norm(svx), norm(G*svx - b)))
+    print("")
+    print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo))
+    print("")
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_minres.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_minres.py
new file mode 100644
index 00000000..a1993e4c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_minres.py
@@ -0,0 +1,97 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose, assert_
+from scipy.sparse.linalg._isolve import minres
+
+from pytest import raises as assert_raises
+from .test_iterative import assert_normclose
+
+
+def get_sample_problem():
+    # A random 10 x 10 symmetric matrix
+    np.random.seed(1234)
+    matrix = np.random.rand(10, 10)
+    matrix = matrix + matrix.T
+    # A random vector of length 10
+    vector = np.random.rand(10)
+    return matrix, vector
+
+
+def test_singular():
+    A, b = get_sample_problem()
+    A[0, ] = 0
+    b[0] = 0
+    xp, info = minres(A, b)
+    assert_equal(info, 0)
+    assert_normclose(A.dot(xp), b, tol=1e-5)
+
+
+def test_x0_is_used_by():
+    A, b = get_sample_problem()
+    # Random x0 to feed minres
+    np.random.seed(12345)
+    x0 = np.random.rand(10)
+    trace = []
+
+    def trace_iterates(xk):
+        trace.append(xk)
+    minres(A, b, x0=x0, callback=trace_iterates)
+    trace_with_x0 = trace
+
+    trace = []
+    minres(A, b, callback=trace_iterates)
+    assert_(not np.array_equal(trace_with_x0[0], trace[0]))
+
+
+def test_shift():
+    A, b = get_sample_problem()
+    shift = 0.5
+    shifted_A = A - shift * np.eye(10)
+    x1, info1 = minres(A, b, shift=shift)
+    x2, info2 = minres(shifted_A, b)
+    assert_equal(info1, 0)
+    assert_allclose(x1, x2, rtol=1e-5)
+
+
+def test_asymmetric_fail():
+    """Asymmetric matrix should raise `ValueError` when check=True"""
+    A, b = get_sample_problem()
+    A[1, 2] = 1
+    A[2, 1] = 2
+    with assert_raises(ValueError):
+        xp, info = minres(A, b, check=True)
+
+
+def test_minres_non_default_x0():
+    np.random.seed(1234)
+    tol = 10**(-6)
+    a = np.random.randn(5, 5)
+    a = np.dot(a, a.T)
+    b = np.random.randn(5)
+    c = np.random.randn(5)
+    x = minres(a, b, x0=c, tol=tol)[0]
+    assert_normclose(a.dot(x), b, tol=tol)
+
+
+def test_minres_precond_non_default_x0():
+    np.random.seed(12345)
+    tol = 10**(-6)
+    a = np.random.randn(5, 5)
+    a = np.dot(a, a.T)
+    b = np.random.randn(5)
+    c = np.random.randn(5)
+    m = np.random.randn(5, 5)
+    m = np.dot(m, m.T)
+    x = minres(a, b, M=m, x0=c, tol=tol)[0]
+    assert_normclose(a.dot(x), b, tol=tol)
+
+
+def test_minres_precond_exact_x0():
+    np.random.seed(1234)
+    tol = 10**(-6)
+    a = np.eye(10)
+    b = np.ones(10)
+    c = np.ones(10)
+    m = np.random.randn(10, 10)
+    m = np.dot(m, m.T)
+    x = minres(a, b, M=m, x0=c, tol=tol)[0]
+    assert_normclose(a.dot(x), b, tol=tol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_utils.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_utils.py
new file mode 100644
index 00000000..c6ce5569
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tests/test_utils.py
@@ -0,0 +1,8 @@
+import numpy as np
+from pytest import raises as assert_raises
+
+import scipy.sparse.linalg._isolve.utils as utils
+
+
+def test_make_system_bad_shape():
+    assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tfqmr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tfqmr.py
new file mode 100644
index 00000000..31d44fcc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/tfqmr.py
@@ -0,0 +1,184 @@
+import numpy as np
+from .utils import make_system
+
+
+__all__ = ['tfqmr']
+
+
+def tfqmr(A, b, x0=None, tol=1e-5, maxiter=None, M=None,
+          callback=None, atol=None, show=False):
+    """
+    Use Transpose-Free Quasi-Minimal Residual iteration to solve ``Ax = b``.
+
+    Parameters
+    ----------
+    A : {sparse matrix, ndarray, LinearOperator}
+        The real or complex N-by-N matrix of the linear system.
+        Alternatively, `A` can be a linear operator which can
+        produce ``Ax`` using, e.g.,
+        `scipy.sparse.linalg.LinearOperator`.
+    b : {ndarray}
+        Right hand side of the linear system. Has shape (N,) or (N,1).
+    x0 : {ndarray}
+        Starting guess for the solution.
+    tol, atol : float, optional
+        Tolerances for convergence, ``norm(residual) <= max(tol*norm(b-Ax0), atol)``.
+        The default for `tol` is 1.0e-5.
+        The default for `atol` is ``tol * norm(b-Ax0)``.
+
+        .. warning::
+
+           The default value for `atol` will be changed in a future release.
+           For future compatibility, specify `atol` explicitly.
+    maxiter : int, optional
+        Maximum number of iterations.  Iteration will stop after maxiter
+        steps even if the specified tolerance has not been achieved.
+        Default is ``min(10000, ndofs * 10)``, where ``ndofs = A.shape[0]``.
+    M : {sparse matrix, ndarray, LinearOperator}
+        Inverse of the preconditioner of A.  M should approximate the
+        inverse of A and be easy to solve for (see Notes).  Effective
+        preconditioning dramatically improves the rate of convergence,
+        which implies that fewer iterations are needed to reach a given
+        error tolerance.  By default, no preconditioner is used.
+    callback : function, optional
+        User-supplied function to call after each iteration.  It is called
+        as `callback(xk)`, where `xk` is the current solution vector.
+    show : bool, optional
+        Specify ``show = True`` to show the convergence, ``show = False`` is
+        to close the output of the convergence.
+        Default is `False`.
+
+    Returns
+    -------
+    x : ndarray
+        The converged solution.
+    info : int
+        Provides convergence information:
+
+            - 0  : successful exit
+            - >0 : convergence to tolerance not achieved, number of iterations
+            - <0 : illegal input or breakdown
+
+    Notes
+    -----
+    The Transpose-Free QMR algorithm is derived from the CGS algorithm.
+    However, unlike CGS, the convergence curves for the TFQMR method is
+    smoothed by computing a quasi minimization of the residual norm. The
+    implementation supports left preconditioner, and the "residual norm"
+    to compute in convergence criterion is actually an upper bound on the
+    actual residual norm ``||b - Axk||``.
+
+    References
+    ----------
+    .. [1] R. W. Freund, A Transpose-Free Quasi-Minimal Residual Algorithm for
+           Non-Hermitian Linear Systems, SIAM J. Sci. Comput., 14(2), 470-482,
+           1993.
+    .. [2] Y. Saad, Iterative Methods for Sparse Linear Systems, 2nd edition,
+           SIAM, Philadelphia, 2003.
+    .. [3] C. T. Kelley, Iterative Methods for Linear and Nonlinear Equations,
+           number 16 in Frontiers in Applied Mathematics, SIAM, Philadelphia,
+           1995.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import tfqmr
+    >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
+    >>> b = np.array([2, 4, -1], dtype=float)
+    >>> x, exitCode = tfqmr(A, b)
+    >>> print(exitCode)            # 0 indicates successful convergence
+    0
+    >>> np.allclose(A.dot(x), b)
+    True
+    """
+
+    # Check data type
+    dtype = A.dtype
+    if np.issubdtype(dtype, np.int64):
+        dtype = float
+        A = A.astype(dtype)
+    if np.issubdtype(b.dtype, np.int64):
+        b = b.astype(dtype)
+
+    A, M, x, b, postprocess = make_system(A, M, x0, b)
+
+    # Check if the R.H.S is a zero vector
+    if np.linalg.norm(b) == 0.:
+        x = b.copy()
+        return (postprocess(x), 0)
+
+    ndofs = A.shape[0]
+    if maxiter is None:
+        maxiter = min(10000, ndofs * 10)
+
+    if x0 is None:
+        r = b.copy()
+    else:
+        r = b - A.matvec(x)
+    u = r
+    w = r.copy()
+    # Take rstar as b - Ax0, that is rstar := r = b - Ax0 mathematically
+    rstar = r
+    v = M.matvec(A.matvec(r))
+    uhat = v
+    d = theta = eta = 0.
+    rho = np.inner(rstar.conjugate(), r)
+    rhoLast = rho
+    r0norm = np.sqrt(rho)
+    tau = r0norm
+    if r0norm == 0:
+        return (postprocess(x), 0)
+
+    if atol is None:
+        atol = tol * r0norm
+    else:
+        atol = max(atol, tol * r0norm)
+
+    for iter in range(maxiter):
+        even = iter % 2 == 0
+        if (even):
+            vtrstar = np.inner(rstar.conjugate(), v)
+            # Check breakdown
+            if vtrstar == 0.:
+                return (postprocess(x), -1)
+            alpha = rho / vtrstar
+            uNext = u - alpha * v  # [1]-(5.6)
+        w -= alpha * uhat  # [1]-(5.8)
+        d = u + (theta**2 / alpha) * eta * d  # [1]-(5.5)
+        # [1]-(5.2)
+        theta = np.linalg.norm(w) / tau
+        c = np.sqrt(1. / (1 + theta**2))
+        tau *= theta * c
+        # Calculate step and direction [1]-(5.4)
+        eta = (c**2) * alpha
+        z = M.matvec(d)
+        x += eta * z
+
+        if callback is not None:
+            callback(x)
+
+        # Convergence criteron
+        if tau * np.sqrt(iter+1) < atol:
+            if (show):
+                print("TFQMR: Linear solve converged due to reach TOL "
+                      "iterations {}".format(iter+1))
+            return (postprocess(x), 0)
+
+        if (not even):
+            # [1]-(5.7)
+            rho = np.inner(rstar.conjugate(), w)
+            beta = rho / rhoLast
+            u = w + beta * u
+            v = beta * uhat + (beta**2) * v
+            uhat = M.matvec(A.matvec(u))
+            v += uhat
+        else:
+            uhat = M.matvec(A.matvec(uNext))
+            u = uNext
+            rhoLast = rho
+
+    if (show):
+        print("TFQMR: Linear solve not converged due to reach MAXIT "
+              "iterations {}".format(iter+1))
+    return (postprocess(x), maxiter)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/utils.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/utils.py
new file mode 100644
index 00000000..80f37fc1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_isolve/utils.py
@@ -0,0 +1,127 @@
+__docformat__ = "restructuredtext en"
+
+__all__ = []
+
+
+from numpy import asanyarray, asarray, array, zeros
+
+from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator, \
+     IdentityOperator
+
+_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
+                 ('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
+                 ('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
+                 ('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
+                 ('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
+                 ('D','D'):'D'}
+
+
+def coerce(x,y):
+    if x not in 'fdFD':
+        x = 'd'
+    if y not in 'fdFD':
+        y = 'd'
+    return _coerce_rules[x,y]
+
+
+def id(x):
+    return x
+
+
+def make_system(A, M, x0, b):
+    """Make a linear system Ax=b
+
+    Parameters
+    ----------
+    A : LinearOperator
+        sparse or dense matrix (or any valid input to aslinearoperator)
+    M : {LinearOperator, Nones}
+        preconditioner
+        sparse or dense matrix (or any valid input to aslinearoperator)
+    x0 : {array_like, str, None}
+        initial guess to iterative method.
+        ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.
+        Default is `None`, which means using the zero initial guess.
+    b : array_like
+        right hand side
+
+    Returns
+    -------
+    (A, M, x, b, postprocess)
+        A : LinearOperator
+            matrix of the linear system
+        M : LinearOperator
+            preconditioner
+        x : rank 1 ndarray
+            initial guess
+        b : rank 1 ndarray
+            right hand side
+        postprocess : function
+            converts the solution vector to the appropriate
+            type and dimensions (e.g. (N,1) matrix)
+
+    """
+    A_ = A
+    A = aslinearoperator(A)
+
+    if A.shape[0] != A.shape[1]:
+        raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')
+
+    N = A.shape[0]
+
+    b = asanyarray(b)
+
+    if not (b.shape == (N,1) or b.shape == (N,)):
+        raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '
+                         'incompatible')
+
+    if b.dtype.char not in 'fdFD':
+        b = b.astype('d')  # upcast non-FP types to double
+
+    def postprocess(x):
+        return x
+
+    if hasattr(A,'dtype'):
+        xtype = A.dtype.char
+    else:
+        xtype = A.matvec(b).dtype.char
+    xtype = coerce(xtype, b.dtype.char)
+
+    b = asarray(b,dtype=xtype)  # make b the same type as x
+    b = b.ravel()
+
+    # process preconditioner
+    if M is None:
+        if hasattr(A_,'psolve'):
+            psolve = A_.psolve
+        else:
+            psolve = id
+        if hasattr(A_,'rpsolve'):
+            rpsolve = A_.rpsolve
+        else:
+            rpsolve = id
+        if psolve is id and rpsolve is id:
+            M = IdentityOperator(shape=A.shape, dtype=A.dtype)
+        else:
+            M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
+                               dtype=A.dtype)
+    else:
+        M = aslinearoperator(M)
+        if A.shape != M.shape:
+            raise ValueError('matrix and preconditioner have different shapes')
+
+    # set initial guess
+    if x0 is None:
+        x = zeros(N, dtype=xtype)
+    elif isinstance(x0, str):
+        if x0 == 'Mb':  # use nonzero initial guess ``M @ b``
+            bCopy = b.copy()
+            x = M.matvec(bCopy)
+    else:
+        x = array(x0, dtype=xtype)
+        if not (x.shape == (N, 1) or x.shape == (N,)):
+            raise ValueError(f'shapes of A {A.shape} and '
+                             f'x0 {x.shape} are incompatible')
+        x = x.ravel()
+
+    return A, M, x, b, postprocess
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_matfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_matfuncs.py
new file mode 100644
index 00000000..1f99cf7f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_matfuncs.py
@@ -0,0 +1,863 @@
+"""
+Sparse matrix functions
+"""
+
+#
+# Authors: Travis Oliphant, March 2002
+#          Anthony Scopatz, August 2012 (Sparse Updates)
+#          Jake Vanderplas, August 2012 (Sparse Updates)
+#
+
+__all__ = ['expm', 'inv']
+
+import numpy as np
+from scipy.linalg._basic import solve, solve_triangular
+
+from scipy.sparse._base import isspmatrix
+from scipy.sparse.linalg import spsolve
+from scipy.sparse._sputils import is_pydata_spmatrix
+
+import scipy.sparse
+import scipy.sparse.linalg
+from scipy.sparse.linalg._interface import LinearOperator
+
+from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
+
+
+UPPER_TRIANGULAR = 'upper_triangular'
+
+
+def inv(A):
+    """
+    Compute the inverse of a sparse matrix
+
+    Parameters
+    ----------
+    A : (M, M) sparse matrix
+        square matrix to be inverted
+
+    Returns
+    -------
+    Ainv : (M, M) sparse matrix
+        inverse of `A`
+
+    Notes
+    -----
+    This computes the sparse inverse of `A`. If the inverse of `A` is expected
+    to be non-sparse, it will likely be faster to convert `A` to dense and use
+    `scipy.linalg.inv`.
+
+    Examples
+    --------
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import inv
+    >>> A = csc_matrix([[1., 0.], [1., 2.]])
+    >>> Ainv = inv(A)
+    >>> Ainv
+    <2x2 sparse matrix of type ''
+        with 3 stored elements in Compressed Sparse Column format>
+    >>> A.dot(Ainv)
+    <2x2 sparse matrix of type ''
+        with 2 stored elements in Compressed Sparse Column format>
+    >>> A.dot(Ainv).toarray()
+    array([[ 1.,  0.],
+           [ 0.,  1.]])
+
+    .. versionadded:: 0.12.0
+
+    """
+    # Check input
+    if not (scipy.sparse.isspmatrix(A) or is_pydata_spmatrix(A)):
+        raise TypeError('Input must be a sparse matrix')
+
+    # Use sparse direct solver to solve "AX = I" accurately
+    I = _ident_like(A)
+    Ainv = spsolve(A, I)
+    return Ainv
+
+
+def _onenorm_matrix_power_nnm(A, p):
+    """
+    Compute the 1-norm of a non-negative integer power of a non-negative matrix.
+
+    Parameters
+    ----------
+    A : a square ndarray or matrix or sparse matrix
+        Input matrix with non-negative entries.
+    p : non-negative integer
+        The power to which the matrix is to be raised.
+
+    Returns
+    -------
+    out : float
+        The 1-norm of the matrix power p of A.
+
+    """
+    # Check input
+    if int(p) != p or p < 0:
+        raise ValueError('expected non-negative integer p')
+    p = int(p)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected A to be like a square matrix')
+
+    # Explicitly make a column vector so that this works when A is a
+    # numpy matrix (in addition to ndarray and sparse matrix).
+    v = np.ones((A.shape[0], 1), dtype=float)
+    M = A.T
+    for i in range(p):
+        v = M.dot(v)
+    return np.max(v)
+
+
+def _is_upper_triangular(A):
+    # This function could possibly be of wider interest.
+    if isspmatrix(A):
+        lower_part = scipy.sparse.tril(A, -1)
+        # Check structural upper triangularity,
+        # then coincidental upper triangularity if needed.
+        return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
+    elif is_pydata_spmatrix(A):
+        import sparse
+        lower_part = sparse.tril(A, -1)
+        return lower_part.nnz == 0
+    else:
+        return not np.tril(A, -1).any()
+
+
+def _smart_matrix_product(A, B, alpha=None, structure=None):
+    """
+    A matrix product that knows about sparse and structured matrices.
+
+    Parameters
+    ----------
+    A : 2d ndarray
+        First matrix.
+    B : 2d ndarray
+        Second matrix.
+    alpha : float
+        The matrix product will be scaled by this constant.
+    structure : str, optional
+        A string describing the structure of both matrices `A` and `B`.
+        Only `upper_triangular` is currently supported.
+
+    Returns
+    -------
+    M : 2d ndarray
+        Matrix product of A and B.
+
+    """
+    if len(A.shape) != 2:
+        raise ValueError('expected A to be a rectangular matrix')
+    if len(B.shape) != 2:
+        raise ValueError('expected B to be a rectangular matrix')
+    f = None
+    if structure == UPPER_TRIANGULAR:
+        if (not isspmatrix(A) and not isspmatrix(B)
+                and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
+            f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
+    if f is not None:
+        if alpha is None:
+            alpha = 1.
+        out = f(alpha, A, B)
+    else:
+        if alpha is None:
+            out = A.dot(B)
+        else:
+            out = alpha * A.dot(B)
+    return out
+
+
+class MatrixPowerOperator(LinearOperator):
+
+    def __init__(self, A, p, structure=None):
+        if A.ndim != 2 or A.shape[0] != A.shape[1]:
+            raise ValueError('expected A to be like a square matrix')
+        if p < 0:
+            raise ValueError('expected p to be a non-negative integer')
+        self._A = A
+        self._p = p
+        self._structure = structure
+        self.dtype = A.dtype
+        self.ndim = A.ndim
+        self.shape = A.shape
+
+    def _matvec(self, x):
+        for i in range(self._p):
+            x = self._A.dot(x)
+        return x
+
+    def _rmatvec(self, x):
+        A_T = self._A.T
+        x = x.ravel()
+        for i in range(self._p):
+            x = A_T.dot(x)
+        return x
+
+    def _matmat(self, X):
+        for i in range(self._p):
+            X = _smart_matrix_product(self._A, X, structure=self._structure)
+        return X
+
+    @property
+    def T(self):
+        return MatrixPowerOperator(self._A.T, self._p)
+
+
+class ProductOperator(LinearOperator):
+    """
+    For now, this is limited to products of multiple square matrices.
+    """
+
+    def __init__(self, *args, **kwargs):
+        self._structure = kwargs.get('structure', None)
+        for A in args:
+            if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+                raise ValueError(
+                        'For now, the ProductOperator implementation is '
+                        'limited to the product of multiple square matrices.')
+        if args:
+            n = args[0].shape[0]
+            for A in args:
+                for d in A.shape:
+                    if d != n:
+                        raise ValueError(
+                                'The square matrices of the ProductOperator '
+                                'must all have the same shape.')
+            self.shape = (n, n)
+            self.ndim = len(self.shape)
+        self.dtype = np.result_type(*[x.dtype for x in args])
+        self._operator_sequence = args
+
+    def _matvec(self, x):
+        for A in reversed(self._operator_sequence):
+            x = A.dot(x)
+        return x
+
+    def _rmatvec(self, x):
+        x = x.ravel()
+        for A in self._operator_sequence:
+            x = A.T.dot(x)
+        return x
+
+    def _matmat(self, X):
+        for A in reversed(self._operator_sequence):
+            X = _smart_matrix_product(A, X, structure=self._structure)
+        return X
+
+    @property
+    def T(self):
+        T_args = [A.T for A in reversed(self._operator_sequence)]
+        return ProductOperator(*T_args)
+
+
+def _onenormest_matrix_power(A, p,
+        t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
+    """
+    Efficiently estimate the 1-norm of A^p.
+
+    Parameters
+    ----------
+    A : ndarray
+        Matrix whose 1-norm of a power is to be computed.
+    p : int
+        Non-negative integer power.
+    t : int, optional
+        A positive parameter controlling the tradeoff between
+        accuracy versus time and memory usage.
+        Larger values take longer and use more memory
+        but give more accurate output.
+    itmax : int, optional
+        Use at most this many iterations.
+    compute_v : bool, optional
+        Request a norm-maximizing linear operator input vector if True.
+    compute_w : bool, optional
+        Request a norm-maximizing linear operator output vector if True.
+
+    Returns
+    -------
+    est : float
+        An underestimate of the 1-norm of the sparse matrix.
+    v : ndarray, optional
+        The vector such that ||Av||_1 == est*||v||_1.
+        It can be thought of as an input to the linear operator
+        that gives an output with particularly large norm.
+    w : ndarray, optional
+        The vector Av which has relatively large 1-norm.
+        It can be thought of as an output of the linear operator
+        that is relatively large in norm compared to the input.
+
+    """
+    return scipy.sparse.linalg.onenormest(
+            MatrixPowerOperator(A, p, structure=structure))
+
+
+def _onenormest_product(operator_seq,
+        t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
+    """
+    Efficiently estimate the 1-norm of the matrix product of the args.
+
+    Parameters
+    ----------
+    operator_seq : linear operator sequence
+        Matrices whose 1-norm of product is to be computed.
+    t : int, optional
+        A positive parameter controlling the tradeoff between
+        accuracy versus time and memory usage.
+        Larger values take longer and use more memory
+        but give more accurate output.
+    itmax : int, optional
+        Use at most this many iterations.
+    compute_v : bool, optional
+        Request a norm-maximizing linear operator input vector if True.
+    compute_w : bool, optional
+        Request a norm-maximizing linear operator output vector if True.
+    structure : str, optional
+        A string describing the structure of all operators.
+        Only `upper_triangular` is currently supported.
+
+    Returns
+    -------
+    est : float
+        An underestimate of the 1-norm of the sparse matrix.
+    v : ndarray, optional
+        The vector such that ||Av||_1 == est*||v||_1.
+        It can be thought of as an input to the linear operator
+        that gives an output with particularly large norm.
+    w : ndarray, optional
+        The vector Av which has relatively large 1-norm.
+        It can be thought of as an output of the linear operator
+        that is relatively large in norm compared to the input.
+
+    """
+    return scipy.sparse.linalg.onenormest(
+            ProductOperator(*operator_seq, structure=structure))
+
+
+class _ExpmPadeHelper:
+    """
+    Help lazily evaluate a matrix exponential.
+
+    The idea is to not do more work than we need for high expm precision,
+    so we lazily compute matrix powers and store or precompute
+    other properties of the matrix.
+
+    """
+
+    def __init__(self, A, structure=None, use_exact_onenorm=False):
+        """
+        Initialize the object.
+
+        Parameters
+        ----------
+        A : a dense or sparse square numpy matrix or ndarray
+            The matrix to be exponentiated.
+        structure : str, optional
+            A string describing the structure of matrix `A`.
+            Only `upper_triangular` is currently supported.
+        use_exact_onenorm : bool, optional
+            If True then only the exact one-norm of matrix powers and products
+            will be used. Otherwise, the one-norm of powers and products
+            may initially be estimated.
+        """
+        self.A = A
+        self._A2 = None
+        self._A4 = None
+        self._A6 = None
+        self._A8 = None
+        self._A10 = None
+        self._d4_exact = None
+        self._d6_exact = None
+        self._d8_exact = None
+        self._d10_exact = None
+        self._d4_approx = None
+        self._d6_approx = None
+        self._d8_approx = None
+        self._d10_approx = None
+        self.ident = _ident_like(A)
+        self.structure = structure
+        self.use_exact_onenorm = use_exact_onenorm
+
+    @property
+    def A2(self):
+        if self._A2 is None:
+            self._A2 = _smart_matrix_product(
+                    self.A, self.A, structure=self.structure)
+        return self._A2
+
+    @property
+    def A4(self):
+        if self._A4 is None:
+            self._A4 = _smart_matrix_product(
+                    self.A2, self.A2, structure=self.structure)
+        return self._A4
+
+    @property
+    def A6(self):
+        if self._A6 is None:
+            self._A6 = _smart_matrix_product(
+                    self.A4, self.A2, structure=self.structure)
+        return self._A6
+
+    @property
+    def A8(self):
+        if self._A8 is None:
+            self._A8 = _smart_matrix_product(
+                    self.A6, self.A2, structure=self.structure)
+        return self._A8
+
+    @property
+    def A10(self):
+        if self._A10 is None:
+            self._A10 = _smart_matrix_product(
+                    self.A4, self.A6, structure=self.structure)
+        return self._A10
+
+    @property
+    def d4_tight(self):
+        if self._d4_exact is None:
+            self._d4_exact = _onenorm(self.A4)**(1/4.)
+        return self._d4_exact
+
+    @property
+    def d6_tight(self):
+        if self._d6_exact is None:
+            self._d6_exact = _onenorm(self.A6)**(1/6.)
+        return self._d6_exact
+
+    @property
+    def d8_tight(self):
+        if self._d8_exact is None:
+            self._d8_exact = _onenorm(self.A8)**(1/8.)
+        return self._d8_exact
+
+    @property
+    def d10_tight(self):
+        if self._d10_exact is None:
+            self._d10_exact = _onenorm(self.A10)**(1/10.)
+        return self._d10_exact
+
+    @property
+    def d4_loose(self):
+        if self.use_exact_onenorm:
+            return self.d4_tight
+        if self._d4_exact is not None:
+            return self._d4_exact
+        else:
+            if self._d4_approx is None:
+                self._d4_approx = _onenormest_matrix_power(self.A2, 2,
+                        structure=self.structure)**(1/4.)
+            return self._d4_approx
+
+    @property
+    def d6_loose(self):
+        if self.use_exact_onenorm:
+            return self.d6_tight
+        if self._d6_exact is not None:
+            return self._d6_exact
+        else:
+            if self._d6_approx is None:
+                self._d6_approx = _onenormest_matrix_power(self.A2, 3,
+                        structure=self.structure)**(1/6.)
+            return self._d6_approx
+
+    @property
+    def d8_loose(self):
+        if self.use_exact_onenorm:
+            return self.d8_tight
+        if self._d8_exact is not None:
+            return self._d8_exact
+        else:
+            if self._d8_approx is None:
+                self._d8_approx = _onenormest_matrix_power(self.A4, 2,
+                        structure=self.structure)**(1/8.)
+            return self._d8_approx
+
+    @property
+    def d10_loose(self):
+        if self.use_exact_onenorm:
+            return self.d10_tight
+        if self._d10_exact is not None:
+            return self._d10_exact
+        else:
+            if self._d10_approx is None:
+                self._d10_approx = _onenormest_product((self.A4, self.A6),
+                        structure=self.structure)**(1/10.)
+            return self._d10_approx
+
+    def pade3(self):
+        b = (120., 60., 12., 1.)
+        U = _smart_matrix_product(self.A,
+                b[3]*self.A2 + b[1]*self.ident,
+                structure=self.structure)
+        V = b[2]*self.A2 + b[0]*self.ident
+        return U, V
+
+    def pade5(self):
+        b = (30240., 15120., 3360., 420., 30., 1.)
+        U = _smart_matrix_product(self.A,
+                b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
+                structure=self.structure)
+        V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
+        return U, V
+
+    def pade7(self):
+        b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
+        U = _smart_matrix_product(self.A,
+                b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
+                structure=self.structure)
+        V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
+        return U, V
+
+    def pade9(self):
+        b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
+                2162160., 110880., 3960., 90., 1.)
+        U = _smart_matrix_product(self.A,
+                (b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
+                    b[3]*self.A2 + b[1]*self.ident),
+                structure=self.structure)
+        V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
+                b[2]*self.A2 + b[0]*self.ident)
+        return U, V
+
+    def pade13_scaled(self, s):
+        b = (64764752532480000., 32382376266240000., 7771770303897600.,
+                1187353796428800., 129060195264000., 10559470521600.,
+                670442572800., 33522128640., 1323241920., 40840800., 960960.,
+                16380., 182., 1.)
+        B = self.A * 2**-s
+        B2 = self.A2 * 2**(-2*s)
+        B4 = self.A4 * 2**(-4*s)
+        B6 = self.A6 * 2**(-6*s)
+        U2 = _smart_matrix_product(B6,
+                b[13]*B6 + b[11]*B4 + b[9]*B2,
+                structure=self.structure)
+        U = _smart_matrix_product(B,
+                (U2 + b[7]*B6 + b[5]*B4 +
+                    b[3]*B2 + b[1]*self.ident),
+                structure=self.structure)
+        V2 = _smart_matrix_product(B6,
+                b[12]*B6 + b[10]*B4 + b[8]*B2,
+                structure=self.structure)
+        V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
+        return U, V
+
+
+def expm(A):
+    """
+    Compute the matrix exponential using Pade approximation.
+
+    Parameters
+    ----------
+    A : (M,M) array_like or sparse matrix
+        2D Array or Matrix (sparse or dense) to be exponentiated
+
+    Returns
+    -------
+    expA : (M,M) ndarray
+        Matrix exponential of `A`
+
+    Notes
+    -----
+    This is algorithm (6.1) which is a simplification of algorithm (5.1).
+
+    .. versionadded:: 0.12.0
+
+    References
+    ----------
+    .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
+           "A New Scaling and Squaring Algorithm for the Matrix Exponential."
+           SIAM Journal on Matrix Analysis and Applications.
+           31 (3). pp. 970-989. ISSN 1095-7162
+
+    Examples
+    --------
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import expm
+    >>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
+    >>> A.toarray()
+    array([[1, 0, 0],
+           [0, 2, 0],
+           [0, 0, 3]], dtype=int64)
+    >>> Aexp = expm(A)
+    >>> Aexp
+    <3x3 sparse matrix of type ''
+        with 3 stored elements in Compressed Sparse Column format>
+    >>> Aexp.toarray()
+    array([[  2.71828183,   0.        ,   0.        ],
+           [  0.        ,   7.3890561 ,   0.        ],
+           [  0.        ,   0.        ,  20.08553692]])
+    """
+    return _expm(A, use_exact_onenorm='auto')
+
+
+def _expm(A, use_exact_onenorm):
+    # Core of expm, separated to allow testing exact and approximate
+    # algorithms.
+
+    # Avoid indiscriminate asarray() to allow sparse or other strange arrays.
+    if isinstance(A, (list, tuple, np.matrix)):
+        A = np.asarray(A)
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected a square matrix')
+
+    # gracefully handle size-0 input,
+    # carefully handling sparse scenario
+    if A.shape == (0, 0):
+        out = np.zeros([0, 0], dtype=A.dtype)
+        if isspmatrix(A) or is_pydata_spmatrix(A):
+            return A.__class__(out)
+        return out
+
+    # Trivial case
+    if A.shape == (1, 1):
+        out = [[np.exp(A[0, 0])]]
+
+        # Avoid indiscriminate casting to ndarray to
+        # allow for sparse or other strange arrays
+        if isspmatrix(A) or is_pydata_spmatrix(A):
+            return A.__class__(out)
+
+        return np.array(out)
+
+    # Ensure input is of float type, to avoid integer overflows etc.
+    if ((isinstance(A, np.ndarray) or isspmatrix(A) or is_pydata_spmatrix(A))
+            and not np.issubdtype(A.dtype, np.inexact)):
+        A = A.astype(float)
+
+    # Detect upper triangularity.
+    structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
+
+    if use_exact_onenorm == "auto":
+        # Hardcode a matrix order threshold for exact vs. estimated one-norms.
+        use_exact_onenorm = A.shape[0] < 200
+
+    # Track functions of A to help compute the matrix exponential.
+    h = _ExpmPadeHelper(
+            A, structure=structure, use_exact_onenorm=use_exact_onenorm)
+
+    # Try Pade order 3.
+    eta_1 = max(h.d4_loose, h.d6_loose)
+    if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
+        U, V = h.pade3()
+        return _solve_P_Q(U, V, structure=structure)
+
+    # Try Pade order 5.
+    eta_2 = max(h.d4_tight, h.d6_loose)
+    if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
+        U, V = h.pade5()
+        return _solve_P_Q(U, V, structure=structure)
+
+    # Try Pade orders 7 and 9.
+    eta_3 = max(h.d6_tight, h.d8_loose)
+    if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
+        U, V = h.pade7()
+        return _solve_P_Q(U, V, structure=structure)
+    if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
+        U, V = h.pade9()
+        return _solve_P_Q(U, V, structure=structure)
+
+    # Use Pade order 13.
+    eta_4 = max(h.d8_loose, h.d10_loose)
+    eta_5 = min(eta_3, eta_4)
+    theta_13 = 4.25
+
+    # Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
+    if eta_5 == 0:
+        # Nilpotent special case
+        s = 0
+    else:
+        s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
+    s = s + _ell(2**-s * h.A, 13)
+    U, V = h.pade13_scaled(s)
+    X = _solve_P_Q(U, V, structure=structure)
+    if structure == UPPER_TRIANGULAR:
+        # Invoke Code Fragment 2.1.
+        X = _fragment_2_1(X, h.A, s)
+    else:
+        # X = r_13(A)^(2^s) by repeated squaring.
+        for i in range(s):
+            X = X.dot(X)
+    return X
+
+
+def _solve_P_Q(U, V, structure=None):
+    """
+    A helper function for expm_2009.
+
+    Parameters
+    ----------
+    U : ndarray
+        Pade numerator.
+    V : ndarray
+        Pade denominator.
+    structure : str, optional
+        A string describing the structure of both matrices `U` and `V`.
+        Only `upper_triangular` is currently supported.
+
+    Notes
+    -----
+    The `structure` argument is inspired by similar args
+    for theano and cvxopt functions.
+
+    """
+    P = U + V
+    Q = -U + V
+    if isspmatrix(U) or is_pydata_spmatrix(U):
+        return spsolve(Q, P)
+    elif structure is None:
+        return solve(Q, P)
+    elif structure == UPPER_TRIANGULAR:
+        return solve_triangular(Q, P)
+    else:
+        raise ValueError('unsupported matrix structure: ' + str(structure))
+
+
+def _exp_sinch(a, x):
+    """
+    Stably evaluate exp(a)*sinh(x)/x
+
+    Notes
+    -----
+    The strategy of falling back to a sixth order Taylor expansion
+    was suggested by the Spallation Neutron Source docs
+    which was found on the internet by google search.
+    http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
+    The details of the cutoff point and the Horner-like evaluation
+    was picked without reference to anything in particular.
+
+    Note that sinch is not currently implemented in scipy.special,
+    whereas the "engineer's" definition of sinc is implemented.
+    The implementation of sinc involves a scaling factor of pi
+    that distinguishes it from the "mathematician's" version of sinc.
+
+    """
+
+    # If x is small then use sixth order Taylor expansion.
+    # How small is small? I am using the point where the relative error
+    # of the approximation is less than 1e-14.
+    # If x is large then directly evaluate sinh(x) / x.
+    if abs(x) < 0.0135:
+        x2 = x*x
+        return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
+    else:
+        return (np.exp(a + x) - np.exp(a - x)) / (2*x)
+
+
+def _eq_10_42(lam_1, lam_2, t_12):
+    """
+    Equation (10.42) of Functions of Matrices: Theory and Computation.
+
+    Notes
+    -----
+    This is a helper function for _fragment_2_1 of expm_2009.
+    Equation (10.42) is on page 251 in the section on Schur algorithms.
+    In particular, section 10.4.3 explains the Schur-Parlett algorithm.
+    expm([[lam_1, t_12], [0, lam_1])
+    =
+    [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
+    [0, exp(lam_2)]
+    """
+
+    # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
+    # apparently suffers from cancellation, according to Higham's textbook.
+    # A nice implementation of sinch, defined as sinh(x)/x,
+    # will apparently work around the cancellation.
+    a = 0.5 * (lam_1 + lam_2)
+    b = 0.5 * (lam_1 - lam_2)
+    return t_12 * _exp_sinch(a, b)
+
+
+def _fragment_2_1(X, T, s):
+    """
+    A helper function for expm_2009.
+
+    Notes
+    -----
+    The argument X is modified in-place, but this modification is not the same
+    as the returned value of the function.
+    This function also takes pains to do things in ways that are compatible
+    with sparse matrices, for example by avoiding fancy indexing
+    and by using methods of the matrices whenever possible instead of
+    using functions of the numpy or scipy libraries themselves.
+
+    """
+    # Form X = r_m(2^-s T)
+    # Replace diag(X) by exp(2^-s diag(T)).
+    n = X.shape[0]
+    diag_T = np.ravel(T.diagonal().copy())
+
+    # Replace diag(X) by exp(2^-s diag(T)).
+    scale = 2 ** -s
+    exp_diag = np.exp(scale * diag_T)
+    for k in range(n):
+        X[k, k] = exp_diag[k]
+
+    for i in range(s-1, -1, -1):
+        X = X.dot(X)
+
+        # Replace diag(X) by exp(2^-i diag(T)).
+        scale = 2 ** -i
+        exp_diag = np.exp(scale * diag_T)
+        for k in range(n):
+            X[k, k] = exp_diag[k]
+
+        # Replace (first) superdiagonal of X by explicit formula
+        # for superdiagonal of exp(2^-i T) from Eq (10.42) of
+        # the author's 2008 textbook
+        # Functions of Matrices: Theory and Computation.
+        for k in range(n-1):
+            lam_1 = scale * diag_T[k]
+            lam_2 = scale * diag_T[k+1]
+            t_12 = scale * T[k, k+1]
+            value = _eq_10_42(lam_1, lam_2, t_12)
+            X[k, k+1] = value
+
+    # Return the updated X matrix.
+    return X
+
+
+def _ell(A, m):
+    """
+    A helper function for expm_2009.
+
+    Parameters
+    ----------
+    A : linear operator
+        A linear operator whose norm of power we care about.
+    m : int
+        The power of the linear operator
+
+    Returns
+    -------
+    value : int
+        A value related to a bound.
+
+    """
+    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
+        raise ValueError('expected A to be like a square matrix')
+
+    # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
+    # They are coefficients of terms of a generating function series expansion.
+    c_i = {3: 100800.,
+           5: 10059033600.,
+           7: 4487938430976000.,
+           9: 5914384781877411840000.,
+           13: 113250775606021113483283660800000000.
+           }
+    abs_c_recip = c_i[m]
+
+    # This is explained after Eq. (1.2) of the 2009 expm paper.
+    # It is the "unit roundoff" of IEEE double precision arithmetic.
+    u = 2**-53
+
+    # Compute the one-norm of matrix power p of abs(A).
+    A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
+
+    # Treat zero norm as a special case.
+    if not A_abs_onenorm:
+        return 0
+
+    alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
+    log2_alpha_div_u = np.log2(alpha/u)
+    value = int(np.ceil(log2_alpha_div_u / (2 * m)))
+    return max(value, 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_norm.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_norm.py
new file mode 100644
index 00000000..63534e0d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_norm.py
@@ -0,0 +1,193 @@
+"""Sparse matrix norms.
+
+"""
+import numpy as np
+from scipy.sparse import issparse
+from scipy.sparse.linalg import svds
+import scipy.sparse as sp
+
+from numpy import Inf, sqrt, abs
+
+__all__ = ['norm']
+
+
+def _sparse_frobenius_norm(x):
+    data = sp._sputils._todata(x)
+    return np.linalg.norm(data)
+
+
+def norm(x, ord=None, axis=None):
+    """
+    Norm of a sparse matrix
+
+    This function is able to return one of seven different matrix norms,
+    depending on the value of the ``ord`` parameter.
+
+    Parameters
+    ----------
+    x : a sparse matrix
+        Input sparse matrix.
+    ord : {non-zero int, inf, -inf, 'fro'}, optional
+        Order of the norm (see table under ``Notes``). inf means numpy's
+        `inf` object.
+    axis : {int, 2-tuple of ints, None}, optional
+        If `axis` is an integer, it specifies the axis of `x` along which to
+        compute the vector norms.  If `axis` is a 2-tuple, it specifies the
+        axes that hold 2-D matrices, and the matrix norms of these matrices
+        are computed.  If `axis` is None then either a vector norm (when `x`
+        is 1-D) or a matrix norm (when `x` is 2-D) is returned.
+
+    Returns
+    -------
+    n : float or ndarray
+
+    Notes
+    -----
+    Some of the ord are not implemented because some associated functions like,
+    _multi_svd_norm, are not yet available for sparse matrix.
+
+    This docstring is modified based on numpy.linalg.norm.
+    https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py
+
+    The following norms can be calculated:
+
+    =====  ============================
+    ord    norm for sparse matrices
+    =====  ============================
+    None   Frobenius norm
+    'fro'  Frobenius norm
+    inf    max(sum(abs(x), axis=1))
+    -inf   min(sum(abs(x), axis=1))
+    0      abs(x).sum(axis=axis)
+    1      max(sum(abs(x), axis=0))
+    -1     min(sum(abs(x), axis=0))
+    2      Spectral norm (the largest singular value)
+    -2     Not implemented
+    other  Not implemented
+    =====  ============================
+
+    The Frobenius norm is given by [1]_:
+
+        :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
+
+    References
+    ----------
+    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
+        Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
+
+    Examples
+    --------
+    >>> from scipy.sparse import *
+    >>> import numpy as np
+    >>> from scipy.sparse.linalg import norm
+    >>> a = np.arange(9) - 4
+    >>> a
+    array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
+    >>> b = a.reshape((3, 3))
+    >>> b
+    array([[-4, -3, -2],
+           [-1, 0, 1],
+           [ 2, 3, 4]])
+
+    >>> b = csr_matrix(b)
+    >>> norm(b)
+    7.745966692414834
+    >>> norm(b, 'fro')
+    7.745966692414834
+    >>> norm(b, np.inf)
+    9
+    >>> norm(b, -np.inf)
+    2
+    >>> norm(b, 1)
+    7
+    >>> norm(b, -1)
+    6
+
+    The matrix 2-norm or the spectral norm is the largest singular
+    value, computed approximately and with limitations.
+
+    >>> b = diags([-1, 1], [0, 1], shape=(9, 10))
+    >>> norm(b, 2)
+    1.9753...
+    """
+    if not issparse(x):
+        raise TypeError("input is not sparse. use numpy.linalg.norm")
+
+    # Check the default case first and handle it immediately.
+    if axis is None and ord in (None, 'fro', 'f'):
+        return _sparse_frobenius_norm(x)
+
+    # Some norms require functions that are not implemented for all types.
+    x = x.tocsr()
+
+    if axis is None:
+        axis = (0, 1)
+    elif not isinstance(axis, tuple):
+        msg = "'axis' must be None, an integer or a tuple of integers"
+        try:
+            int_axis = int(axis)
+        except TypeError as e:
+            raise TypeError(msg) from e
+        if axis != int_axis:
+            raise TypeError(msg)
+        axis = (int_axis,)
+
+    nd = 2
+    if len(axis) == 2:
+        row_axis, col_axis = axis
+        if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
+            raise ValueError('Invalid axis %r for an array with shape %r' %
+                             (axis, x.shape))
+        if row_axis % nd == col_axis % nd:
+            raise ValueError('Duplicate axes given.')
+        if ord == 2:
+            # Only solver="lobpcg" supports all numpy dtypes
+            _, s, _ = svds(x, k=1, solver="lobpcg")
+            return s[0]
+        elif ord == -2:
+            raise NotImplementedError
+            #return _multi_svd_norm(x, row_axis, col_axis, amin)
+        elif ord == 1:
+            return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
+        elif ord == Inf:
+            return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
+        elif ord == -1:
+            return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
+        elif ord == -Inf:
+            return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
+        elif ord in (None, 'f', 'fro'):
+            # The axis order does not matter for this norm.
+            return _sparse_frobenius_norm(x)
+        else:
+            raise ValueError("Invalid norm order for matrices.")
+    elif len(axis) == 1:
+        a, = axis
+        if not (-nd <= a < nd):
+            raise ValueError('Invalid axis %r for an array with shape %r' %
+                             (axis, x.shape))
+        if ord == Inf:
+            M = abs(x).max(axis=a)
+        elif ord == -Inf:
+            M = abs(x).min(axis=a)
+        elif ord == 0:
+            # Zero norm
+            M = (x != 0).sum(axis=a)
+        elif ord == 1:
+            # special case for speedup
+            M = abs(x).sum(axis=a)
+        elif ord in (2, None):
+            M = sqrt(abs(x).power(2).sum(axis=a))
+        else:
+            try:
+                ord + 1
+            except TypeError as e:
+                raise ValueError('Invalid norm order for vectors.') from e
+            M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
+        if hasattr(M, 'toarray'):
+            return M.toarray().ravel()
+        elif hasattr(M, 'A'):
+            return M.A.ravel()
+        else:
+            return M.ravel()
+    else:
+        raise ValueError("Improper number of dimensions to norm.")
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_onenormest.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_onenormest.py
new file mode 100644
index 00000000..59c29270
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_onenormest.py
@@ -0,0 +1,467 @@
+"""Sparse block 1-norm estimator.
+"""
+
+import numpy as np
+from scipy.sparse.linalg import aslinearoperator
+
+
+__all__ = ['onenormest']
+
+
+def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False):
+    """
+    Compute a lower bound of the 1-norm of a sparse matrix.
+
+    Parameters
+    ----------
+    A : ndarray or other linear operator
+        A linear operator that can be transposed and that can
+        produce matrix products.
+    t : int, optional
+        A positive parameter controlling the tradeoff between
+        accuracy versus time and memory usage.
+        Larger values take longer and use more memory
+        but give more accurate output.
+    itmax : int, optional
+        Use at most this many iterations.
+    compute_v : bool, optional
+        Request a norm-maximizing linear operator input vector if True.
+    compute_w : bool, optional
+        Request a norm-maximizing linear operator output vector if True.
+
+    Returns
+    -------
+    est : float
+        An underestimate of the 1-norm of the sparse matrix.
+    v : ndarray, optional
+        The vector such that ||Av||_1 == est*||v||_1.
+        It can be thought of as an input to the linear operator
+        that gives an output with particularly large norm.
+    w : ndarray, optional
+        The vector Av which has relatively large 1-norm.
+        It can be thought of as an output of the linear operator
+        that is relatively large in norm compared to the input.
+
+    Notes
+    -----
+    This is algorithm 2.4 of [1].
+
+    In [2] it is described as follows.
+    "This algorithm typically requires the evaluation of
+    about 4t matrix-vector products and almost invariably
+    produces a norm estimate (which is, in fact, a lower
+    bound on the norm) correct to within a factor 3."
+
+    .. versionadded:: 0.13.0
+
+    References
+    ----------
+    .. [1] Nicholas J. Higham and Francoise Tisseur (2000),
+           "A Block Algorithm for Matrix 1-Norm Estimation,
+           with an Application to 1-Norm Pseudospectra."
+           SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201.
+
+    .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009),
+           "A new scaling and squaring algorithm for the matrix exponential."
+           SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.sparse import csc_matrix
+    >>> from scipy.sparse.linalg import onenormest
+    >>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float)
+    >>> A.toarray()
+    array([[ 1.,  0.,  0.],
+           [ 5.,  8.,  2.],
+           [ 0., -1.,  0.]])
+    >>> onenormest(A)
+    9.0
+    >>> np.linalg.norm(A.toarray(), ord=1)
+    9.0
+    """
+
+    # Check the input.
+    A = aslinearoperator(A)
+    if A.shape[0] != A.shape[1]:
+        raise ValueError('expected the operator to act like a square matrix')
+
+    # If the operator size is small compared to t,
+    # then it is easier to compute the exact norm.
+    # Otherwise estimate the norm.
+    n = A.shape[1]
+    if t >= n:
+        A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n)))
+        if A_explicit.shape != (n, n):
+            raise Exception('internal error: ',
+                    'unexpected shape ' + str(A_explicit.shape))
+        col_abs_sums = abs(A_explicit).sum(axis=0)
+        if col_abs_sums.shape != (n, ):
+            raise Exception('internal error: ',
+                    'unexpected shape ' + str(col_abs_sums.shape))
+        argmax_j = np.argmax(col_abs_sums)
+        v = elementary_vector(n, argmax_j)
+        w = A_explicit[:, argmax_j]
+        est = col_abs_sums[argmax_j]
+    else:
+        est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax)
+
+    # Report the norm estimate along with some certificates of the estimate.
+    if compute_v or compute_w:
+        result = (est,)
+        if compute_v:
+            result += (v,)
+        if compute_w:
+            result += (w,)
+        return result
+    else:
+        return est
+
+
+def _blocked_elementwise(func):
+    """
+    Decorator for an elementwise function, to apply it blockwise along
+    first dimension, to avoid excessive memory usage in temporaries.
+    """
+    block_size = 2**20
+
+    def wrapper(x):
+        if x.shape[0] < block_size:
+            return func(x)
+        else:
+            y0 = func(x[:block_size])
+            y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)
+            y[:block_size] = y0
+            del y0
+            for j in range(block_size, x.shape[0], block_size):
+                y[j:j+block_size] = func(x[j:j+block_size])
+            return y
+    return wrapper
+
+
+@_blocked_elementwise
+def sign_round_up(X):
+    """
+    This should do the right thing for both real and complex matrices.
+
+    From Higham and Tisseur:
+    "Everything in this section remains valid for complex matrices
+    provided that sign(A) is redefined as the matrix (aij / |aij|)
+    (and sign(0) = 1) transposes are replaced by conjugate transposes."
+
+    """
+    Y = X.copy()
+    Y[Y == 0] = 1
+    Y /= np.abs(Y)
+    return Y
+
+
+@_blocked_elementwise
+def _max_abs_axis1(X):
+    return np.max(np.abs(X), axis=1)
+
+
+def _sum_abs_axis0(X):
+    block_size = 2**20
+    r = None
+    for j in range(0, X.shape[0], block_size):
+        y = np.sum(np.abs(X[j:j+block_size]), axis=0)
+        if r is None:
+            r = y
+        else:
+            r += y
+    return r
+
+
+def elementary_vector(n, i):
+    v = np.zeros(n, dtype=float)
+    v[i] = 1
+    return v
+
+
+def vectors_are_parallel(v, w):
+    # Columns are considered parallel when they are equal or negative.
+    # Entries are required to be in {-1, 1},
+    # which guarantees that the magnitudes of the vectors are identical.
+    if v.ndim != 1 or v.shape != w.shape:
+        raise ValueError('expected conformant vectors with entries in {-1,1}')
+    n = v.shape[0]
+    return np.dot(v, w) == n
+
+
+def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y):
+    for v in X.T:
+        if not any(vectors_are_parallel(v, w) for w in Y.T):
+            return False
+    return True
+
+
+def column_needs_resampling(i, X, Y=None):
+    # column i of X needs resampling if either
+    # it is parallel to a previous column of X or
+    # it is parallel to a column of Y
+    n, t = X.shape
+    v = X[:, i]
+    if any(vectors_are_parallel(v, X[:, j]) for j in range(i)):
+        return True
+    if Y is not None:
+        if any(vectors_are_parallel(v, w) for w in Y.T):
+            return True
+    return False
+
+
+def resample_column(i, X):
+    X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1
+
+
+def less_than_or_close(a, b):
+    return np.allclose(a, b) or (a < b)
+
+
+def _algorithm_2_2(A, AT, t):
+    """
+    This is Algorithm 2.2.
+
+    Parameters
+    ----------
+    A : ndarray or other linear operator
+        A linear operator that can produce matrix products.
+    AT : ndarray or other linear operator
+        The transpose of A.
+    t : int, optional
+        A positive parameter controlling the tradeoff between
+        accuracy versus time and memory usage.
+
+    Returns
+    -------
+    g : sequence
+        A non-negative decreasing vector
+        such that g[j] is a lower bound for the 1-norm
+        of the column of A of jth largest 1-norm.
+        The first entry of this vector is therefore a lower bound
+        on the 1-norm of the linear operator A.
+        This sequence has length t.
+    ind : sequence
+        The ith entry of ind is the index of the column A whose 1-norm
+        is given by g[i].
+        This sequence of indices has length t, and its entries are
+        chosen from range(n), possibly with repetition,
+        where n is the order of the operator A.
+
+    Notes
+    -----
+    This algorithm is mainly for testing.
+    It uses the 'ind' array in a way that is similar to
+    its usage in algorithm 2.4. This algorithm 2.2 may be easier to test,
+    so it gives a chance of uncovering bugs related to indexing
+    which could have propagated less noticeably to algorithm 2.4.
+
+    """
+    A_linear_operator = aslinearoperator(A)
+    AT_linear_operator = aslinearoperator(AT)
+    n = A_linear_operator.shape[0]
+
+    # Initialize the X block with columns of unit 1-norm.
+    X = np.ones((n, t))
+    if t > 1:
+        X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1
+    X /= float(n)
+
+    # Iteratively improve the lower bounds.
+    # Track extra things, to assert invariants for debugging.
+    g_prev = None
+    h_prev = None
+    k = 1
+    ind = range(t)
+    while True:
+        Y = np.asarray(A_linear_operator.matmat(X))
+        g = _sum_abs_axis0(Y)
+        best_j = np.argmax(g)
+        g.sort()
+        g = g[::-1]
+        S = sign_round_up(Y)
+        Z = np.asarray(AT_linear_operator.matmat(S))
+        h = _max_abs_axis1(Z)
+
+        # If this algorithm runs for fewer than two iterations,
+        # then its return values do not have the properties indicated
+        # in the description of the algorithm.
+        # In particular, the entries of g are not 1-norms of any
+        # column of A until the second iteration.
+        # Therefore we will require the algorithm to run for at least
+        # two iterations, even though this requirement is not stated
+        # in the description of the algorithm.
+        if k >= 2:
+            if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])):
+                break
+        ind = np.argsort(h)[::-1][:t]
+        h = h[ind]
+        for j in range(t):
+            X[:, j] = elementary_vector(n, ind[j])
+
+        # Check invariant (2.2).
+        if k >= 2:
+            if not less_than_or_close(g_prev[0], h_prev[0]):
+                raise Exception('invariant (2.2) is violated')
+            if not less_than_or_close(h_prev[0], g[0]):
+                raise Exception('invariant (2.2) is violated')
+
+        # Check invariant (2.3).
+        if k >= 3:
+            for j in range(t):
+                if not less_than_or_close(g[j], g_prev[j]):
+                    raise Exception('invariant (2.3) is violated')
+
+        # Update for the next iteration.
+        g_prev = g
+        h_prev = h
+        k += 1
+
+    # Return the lower bounds and the corresponding column indices.
+    return g, ind
+
+
+def _onenormest_core(A, AT, t, itmax):
+    """
+    Compute a lower bound of the 1-norm of a sparse matrix.
+
+    Parameters
+    ----------
+    A : ndarray or other linear operator
+        A linear operator that can produce matrix products.
+    AT : ndarray or other linear operator
+        The transpose of A.
+    t : int, optional
+        A positive parameter controlling the tradeoff between
+        accuracy versus time and memory usage.
+    itmax : int, optional
+        Use at most this many iterations.
+
+    Returns
+    -------
+    est : float
+        An underestimate of the 1-norm of the sparse matrix.
+    v : ndarray, optional
+        The vector such that ||Av||_1 == est*||v||_1.
+        It can be thought of as an input to the linear operator
+        that gives an output with particularly large norm.
+    w : ndarray, optional
+        The vector Av which has relatively large 1-norm.
+        It can be thought of as an output of the linear operator
+        that is relatively large in norm compared to the input.
+    nmults : int, optional
+        The number of matrix products that were computed.
+    nresamples : int, optional
+        The number of times a parallel column was observed,
+        necessitating a re-randomization of the column.
+
+    Notes
+    -----
+    This is algorithm 2.4.
+
+    """
+    # This function is a more or less direct translation
+    # of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
+    A_linear_operator = aslinearoperator(A)
+    AT_linear_operator = aslinearoperator(AT)
+    if itmax < 2:
+        raise ValueError('at least two iterations are required')
+    if t < 1:
+        raise ValueError('at least one column is required')
+    n = A.shape[0]
+    if t >= n:
+        raise ValueError('t should be smaller than the order of A')
+    # Track the number of big*small matrix multiplications
+    # and the number of resamplings.
+    nmults = 0
+    nresamples = 0
+    # "We now explain our choice of starting matrix.  We take the first
+    # column of X to be the vector of 1s [...] This has the advantage that
+    # for a matrix with nonnegative elements the algorithm converges
+    # with an exact estimate on the second iteration, and such matrices
+    # arise in applications [...]"
+    X = np.ones((n, t), dtype=float)
+    # "The remaining columns are chosen as rand{-1,1},
+    # with a check for and correction of parallel columns,
+    # exactly as for S in the body of the algorithm."
+    if t > 1:
+        for i in range(1, t):
+            # These are technically initial samples, not resamples,
+            # so the resampling count is not incremented.
+            resample_column(i, X)
+        for i in range(t):
+            while column_needs_resampling(i, X):
+                resample_column(i, X)
+                nresamples += 1
+    # "Choose starting matrix X with columns of unit 1-norm."
+    X /= float(n)
+    # "indices of used unit vectors e_j"
+    ind_hist = np.zeros(0, dtype=np.intp)
+    est_old = 0
+    S = np.zeros((n, t), dtype=float)
+    k = 1
+    ind = None
+    while True:
+        Y = np.asarray(A_linear_operator.matmat(X))
+        nmults += 1
+        mags = _sum_abs_axis0(Y)
+        est = np.max(mags)
+        best_j = np.argmax(mags)
+        if est > est_old or k == 2:
+            if k >= 2:
+                ind_best = ind[best_j]
+            w = Y[:, best_j]
+        # (1)
+        if k >= 2 and est <= est_old:
+            est = est_old
+            break
+        est_old = est
+        S_old = S
+        if k > itmax:
+            break
+        S = sign_round_up(Y)
+        del Y
+        # (2)
+        if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
+            break
+        if t > 1:
+            # "Ensure that no column of S is parallel to another column of S
+            # or to a column of S_old by replacing columns of S by rand{-1,1}."
+            for i in range(t):
+                while column_needs_resampling(i, S, S_old):
+                    resample_column(i, S)
+                    nresamples += 1
+        del S_old
+        # (3)
+        Z = np.asarray(AT_linear_operator.matmat(S))
+        nmults += 1
+        h = _max_abs_axis1(Z)
+        del Z
+        # (4)
+        if k >= 2 and max(h) == h[ind_best]:
+            break
+        # "Sort h so that h_first >= ... >= h_last
+        # and re-order ind correspondingly."
+        #
+        # Later on, we will need at most t+len(ind_hist) largest
+        # entries, so drop the rest
+        ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
+        del h
+        if t > 1:
+            # (5)
+            # Break if the most promising t vectors have been visited already.
+            if np.in1d(ind[:t], ind_hist).all():
+                break
+            # Put the most promising unvisited vectors at the front of the list
+            # and put the visited vectors at the end of the list.
+            # Preserve the order of the indices induced by the ordering of h.
+            seen = np.in1d(ind, ind_hist)
+            ind = np.concatenate((ind[~seen], ind[seen]))
+        for j in range(t):
+            X[:, j] = elementary_vector(n, ind[j])
+
+        new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)]
+        ind_hist = np.concatenate((ind_hist, new_ind))
+        k += 1
+    v = elementary_vector(n, ind_best)
+    return est, v, w, nmults, nresamples
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_svdp.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_svdp.py
new file mode 100644
index 00000000..e28eca78
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/_svdp.py
@@ -0,0 +1,321 @@
+"""
+Python wrapper for PROPACK
+--------------------------
+
+PROPACK is a collection of Fortran routines for iterative computation
+of partial SVDs of large matrices or linear operators.
+
+Based on BSD licensed pypropack project:
+  http://github.com/jakevdp/pypropack
+  Author: Jake Vanderplas 
+
+PROPACK source is BSD licensed, and available at
+  http://soi.stanford.edu/~rmunk/PROPACK/
+"""
+
+__all__ = ['_svdp']
+
+import numpy as np
+
+from scipy._lib._util import check_random_state
+from scipy.sparse.linalg import aslinearoperator
+from scipy.linalg import LinAlgError
+
+from ._propack import _spropack  # type: ignore
+from ._propack import _dpropack
+from ._propack import _cpropack
+from ._propack import _zpropack
+
+
+_lansvd_dict = {
+    'f': _spropack.slansvd,
+    'd': _dpropack.dlansvd,
+    'F': _cpropack.clansvd,
+    'D': _zpropack.zlansvd,
+}
+
+
+_lansvd_irl_dict = {
+    'f': _spropack.slansvd_irl,
+    'd': _dpropack.dlansvd_irl,
+    'F': _cpropack.clansvd_irl,
+    'D': _zpropack.zlansvd_irl,
+}
+
+_which_converter = {
+    'LM': 'L',
+    'SM': 'S',
+}
+
+
+class _AProd:
+    """
+    Wrapper class for linear operator
+
+    The call signature of the __call__ method matches the callback of
+    the PROPACK routines.
+    """
+    def __init__(self, A):
+        try:
+            self.A = aslinearoperator(A)
+        except TypeError:
+            self.A = aslinearoperator(np.asarray(A))
+
+    def __call__(self, transa, m, n, x, y, sparm, iparm):
+        if transa == 'n':
+            y[:] = self.A.matvec(x)
+        else:
+            y[:] = self.A.rmatvec(x)
+
+    @property
+    def shape(self):
+        return self.A.shape
+
+    @property
+    def dtype(self):
+        try:
+            return self.A.dtype
+        except AttributeError:
+            return self.A.matvec(np.zeros(self.A.shape[1])).dtype
+
+
+def _svdp(A, k, which='LM', irl_mode=True, kmax=None,
+          compute_u=True, compute_v=True, v0=None, full_output=False, tol=0,
+          delta=None, eta=None, anorm=0, cgs=False, elr=True,
+          min_relgap=0.002, shifts=None, maxiter=None, random_state=None):
+    """
+    Compute the singular value decomposition of a linear operator using PROPACK
+
+    Parameters
+    ----------
+    A : array_like, sparse matrix, or LinearOperator
+        Operator for which SVD will be computed.  If `A` is a LinearOperator
+        object, it must define both ``matvec`` and ``rmatvec`` methods.
+    k : int
+        Number of singular values/vectors to compute
+    which : {"LM", "SM"}
+        Which singluar triplets to compute:
+        - 'LM': compute triplets corresponding to the `k` largest singular
+                values
+        - 'SM': compute triplets corresponding to the `k` smallest singular
+                values
+        `which='SM'` requires `irl_mode=True`.  Computes largest singular
+        values by default.
+    irl_mode : bool, optional
+        If `True`, then compute SVD using IRL (implicitly restarted Lanczos)
+        mode.  Default is `True`.
+    kmax : int, optional
+        Maximal number of iterations / maximal dimension of the Krylov
+        subspace. Default is ``10 * k``.
+    compute_u : bool, optional
+        If `True` (default) then compute left singular vectors, `u`.
+    compute_v : bool, optional
+        If `True` (default) then compute right singular vectors, `v`.
+    tol : float, optional
+        The desired relative accuracy for computed singular values.
+        If not specified, it will be set based on machine precision.
+    v0 : array_like, optional
+        Starting vector for iterations: must be of length ``A.shape[0]``.
+        If not specified, PROPACK will generate a starting vector.
+    full_output : bool, optional
+        If `True`, then return sigma_bound.  Default is `False`.
+    delta : float, optional
+        Level of orthogonality to maintain between Lanczos vectors.
+        Default is set based on machine precision.
+    eta : float, optional
+        Orthogonality cutoff.  During reorthogonalization, vectors with
+        component larger than `eta` along the Lanczos vector will be purged.
+        Default is set based on machine precision.
+    anorm : float, optional
+        Estimate of ``||A||``.  Default is `0`.
+    cgs : bool, optional
+        If `True`, reorthogonalization is done using classical Gram-Schmidt.
+        If `False` (default), it is done using modified Gram-Schmidt.
+    elr : bool, optional
+        If `True` (default), then extended local orthogonality is enforced
+        when obtaining singular vectors.
+    min_relgap : float, optional
+        The smallest relative gap allowed between any shift in IRL mode.
+        Default is `0.001`.  Accessed only if ``irl_mode=True``.
+    shifts : int, optional
+        Number of shifts per restart in IRL mode.  Default is determined
+        to satisfy ``k <= min(kmax-shifts, m, n)``.  Must be
+        >= 0, but choosing 0 might lead to performance degredation.
+        Accessed only if ``irl_mode=True``.
+    maxiter : int, optional
+        Maximum number of restarts in IRL mode.  Default is `1000`.
+        Accessed only if ``irl_mode=True``.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate resamples.
+
+        If `random_state` is ``None`` (or `np.random`), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance then that instance is used.
+
+    Returns
+    -------
+    u : ndarray
+        The `k` largest (``which="LM"``) or smallest (``which="SM"``) left
+        singular vectors, ``shape == (A.shape[0], 3)``, returned only if
+        ``compute_u=True``.
+    sigma : ndarray
+        The top `k` singular values, ``shape == (k,)``
+    vt : ndarray
+        The `k` largest (``which="LM"``) or smallest (``which="SM"``) right
+        singular vectors, ``shape == (3, A.shape[1])``, returned only if
+        ``compute_v=True``.
+    sigma_bound : ndarray
+        the error bounds on the singular values sigma, returned only if
+        ``full_output=True``.
+
+    """
+    # 32-bit complex PROPACK functions have Fortran LAPACK ABI
+    # incompatibility issues
+    if np.iscomplexobj(A) and (np.intp(0).itemsize < 8):
+        raise TypeError('PROPACK complex-valued SVD methods not available '
+                        'for 32-bit builds')
+
+    random_state = check_random_state(random_state)
+
+    which = which.upper()
+    if which not in {'LM', 'SM'}:
+        raise ValueError("`which` must be either 'LM' or 'SM'")
+    if not irl_mode and which == 'SM':
+        raise ValueError("`which`='SM' requires irl_mode=True")
+
+    aprod = _AProd(A)
+    typ = aprod.dtype.char
+
+    try:
+        lansvd_irl = _lansvd_irl_dict[typ]
+        lansvd = _lansvd_dict[typ]
+    except KeyError:
+        # work with non-supported types using native system precision
+        if np.iscomplexobj(np.empty(0, dtype=typ)):
+            typ = np.dtype(complex).char
+        else:
+            typ = np.dtype(float).char
+        lansvd_irl = _lansvd_irl_dict[typ]
+        lansvd = _lansvd_dict[typ]
+
+    m, n = aprod.shape
+    if (k < 1) or (k > min(m, n)):
+        raise ValueError("k must be positive and not greater than m or n")
+
+    if kmax is None:
+        kmax = 10*k
+    if maxiter is None:
+        maxiter = 1000
+
+    # guard against unnecessarily large kmax
+    kmax = min(m + 1, n + 1, kmax)
+    if kmax < k:
+        raise ValueError(
+            "kmax must be greater than or equal to k, "
+            f"but kmax ({kmax}) < k ({k})")
+
+    # convert python args to fortran args
+    jobu = 'y' if compute_u else 'n'
+    jobv = 'y' if compute_v else 'n'
+
+    # these will be the output arrays
+    u = np.zeros((m, kmax + 1), order='F', dtype=typ)
+    v = np.zeros((n, kmax), order='F', dtype=typ)
+
+    # Specify the starting vector.  if v0 is all zero, PROPACK will generate
+    # a random starting vector: the random seed cannot be controlled in that
+    # case, so we'll instead use numpy to generate a random vector
+    if v0 is None:
+        u[:, 0] = random_state.uniform(size=m)
+        if np.iscomplexobj(np.empty(0, dtype=typ)):  # complex type
+            u[:, 0] += 1j * random_state.uniform(size=m)
+    else:
+        try:
+            u[:, 0] = v0
+        except ValueError:
+            raise ValueError(f"v0 must be of length {m}")
+
+    # process options for the fit
+    if delta is None:
+        delta = np.sqrt(np.finfo(typ).eps)
+    if eta is None:
+        eta = np.finfo(typ).eps ** 0.75
+
+    if irl_mode:
+        doption = np.array((delta, eta, anorm, min_relgap), dtype=typ.lower())
+
+        # validate or find default shifts
+        if shifts is None:
+            shifts = kmax - k
+        if k > min(kmax - shifts, m, n):
+            raise ValueError('shifts must satisfy '
+                             'k <= min(kmax-shifts, m, n)!')
+        elif shifts < 0:
+            raise ValueError('shifts must be >= 0!')
+
+    else:
+        doption = np.array((delta, eta, anorm), dtype=typ.lower())
+
+    ioption = np.array((int(bool(cgs)), int(bool(elr))), dtype='i')
+
+    # If computing `u` or `v` (left and right singular vectors,
+    # respectively), `blocksize` controls how large a fraction of the
+    # work is done via fast BLAS level 3 operations.  A larger blocksize
+    # may lead to faster computation at the expense of greater memory
+    # consumption.  `blocksize` must be ``>= 1``.  Choosing blocksize
+    # of 16, but docs don't specify; it's almost surely a
+    # power of 2.
+    blocksize = 16
+
+    # Determine lwork & liwork:
+    # the required lengths are specified in the PROPACK documentation
+    if compute_u or compute_v:
+        lwork = m + n + 9*kmax + 5*kmax*kmax + 4 + max(
+            3*kmax*kmax + 4*kmax + 4,
+            blocksize*max(m, n))
+        liwork = 8*kmax
+    else:
+        lwork = m + n + 9*kmax + 2*kmax*kmax + 4 + max(m + n, 4*kmax + 4)
+        liwork = 2*kmax + 1
+    work = np.empty(lwork, dtype=typ.lower())
+    iwork = np.empty(liwork, dtype=np.int32)
+
+    # dummy arguments: these are passed to aprod, and not used in this wrapper
+    dparm = np.empty(1, dtype=typ.lower())
+    iparm = np.empty(1, dtype=np.int32)
+
+    if typ.isupper():
+        # PROPACK documentation is unclear on the required length of zwork.
+        # Use the same length Julia's wrapper uses
+        # see https://github.com/JuliaSmoothOptimizers/PROPACK.jl/
+        zwork = np.empty(m + n + 32*m, dtype=typ)
+        works = work, zwork, iwork
+    else:
+        works = work, iwork
+
+    if irl_mode:
+        u, sigma, bnd, v, info = lansvd_irl(_which_converter[which], jobu,
+                                            jobv, m, n, shifts, k, maxiter,
+                                            aprod, u, v, tol, *works, doption,
+                                            ioption, dparm, iparm)
+    else:
+        u, sigma, bnd, v, info = lansvd(jobu, jobv, m, n, k, aprod, u, v, tol,
+                                        *works, doption, ioption, dparm, iparm)
+
+    if info > 0:
+        raise LinAlgError(
+            f"An invariant subspace of dimension {info} was found.")
+    elif info < 0:
+        raise LinAlgError(
+            f"k={k} singular triplets did not converge within "
+            f"kmax={kmax} iterations")
+
+    # info == 0: The K largest (or smallest) singular triplets were computed
+    # succesfully!
+
+    return u[:, :k], sigma, v[:, :k].conj().T, bnd
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/dsolve.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/dsolve.py
new file mode 100644
index 00000000..92248421
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/dsolve.py
@@ -0,0 +1,38 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _dsolve
+
+
+__all__ = [  # noqa: F822
+    'MatrixRankWarning', 'SuperLU', 'factorized',
+    'spilu', 'splu', 'spsolve',
+    'spsolve_triangular', 'use_solver', 'linsolve', 'test'
+]
+
+dsolve_modules = ['linsolve']
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__ and name not in dsolve_modules:
+        raise AttributeError(
+            "scipy.sparse.linalg.dsolve is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse.linalg instead.")
+
+    if name in dsolve_modules:
+        msg = (f'The module `scipy.sparse.linalg.dsolve.{name}` is '
+               'deprecated. All public names must be imported directly from '
+               'the `scipy.sparse.linalg` namespace.')
+    else:
+        msg = (f"Please use `{name}` from the `scipy.sparse.linalg` namespace,"
+               " the `scipy.sparse.linalg.eigen` namespace is deprecated.")
+
+    warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_dsolve, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/eigen.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/eigen.py
new file mode 100644
index 00000000..e89cfac7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/eigen.py
@@ -0,0 +1,37 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _eigen
+
+
+__all__ = [  # noqa: F822
+    'ArpackError', 'ArpackNoConvergence',
+    'eigs', 'eigsh', 'lobpcg', 'svds', 'arpack', 'test'
+]
+
+eigen_modules = ['arpack']
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__ and name not in eigen_modules:
+        raise AttributeError(
+            "scipy.sparse.linalg.eigen is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse.linalg instead.")
+
+    if name in eigen_modules:
+        msg = (f'The module `scipy.sparse.linalg.eigen.{name}` is '
+               'deprecated. All public names must be imported directly from '
+               'the `scipy.sparse.linalg` namespace.')
+    else:
+        msg = (f"Please use `{name}` from the `scipy.sparse.linalg` namespace,"
+               " the `scipy.sparse.linalg.eigen` namespace is deprecated.")
+
+    warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_eigen, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/interface.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/interface.py
new file mode 100644
index 00000000..6e1329d5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/interface.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _interface
+
+
+__all__ = [  # noqa: F822
+    'LinearOperator', 'aslinearoperator',
+    'isspmatrix', 'isshape', 'isintlike', 'asmatrix',
+    'is_pydata_spmatrix', 'MatrixLinearOperator', 'IdentityOperator'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.linalg.interface is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse.linalg` namespace, "
+                  "the `scipy.sparse.linalg.interface` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_interface, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/isolve.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/isolve.py
new file mode 100644
index 00000000..f113cd2d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/isolve.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _isolve
+
+
+__all__ = [  # noqa: F822
+    'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
+    'lgmres', 'lsmr', 'lsqr',
+    'minres', 'qmr', 'tfqmr', 'utils', 'iterative', 'test'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.linalg.isolve is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse.linalg` namespace, "
+                  "the `scipy.sparse.linalg.isolve` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_isolve, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/matfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/matfuncs.py
new file mode 100644
index 00000000..71d848c5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/matfuncs.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse.linalg` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _matfuncs
+
+
+__all__ = [  # noqa: F822
+    'expm', 'inv', 'solve', 'solve_triangular',
+    'isspmatrix', 'spsolve', 'is_pydata_spmatrix', 'LinearOperator',
+    'UPPER_TRIANGULAR', 'MatrixPowerOperator', 'ProductOperator'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.linalg.matfuncs is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse.linalg instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse.linalg` namespace, "
+                  "the `scipy.sparse.linalg.matfuncs` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_matfuncs, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/propack_test_data.npz b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/propack_test_data.npz
new file mode 100644
index 00000000..f0da2580
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/propack_test_data.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_expm_multiply.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_expm_multiply.py
new file mode 100644
index 00000000..ad42d9ea
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_expm_multiply.py
@@ -0,0 +1,345 @@
+"""Test functions for the sparse.linalg._expm_multiply module."""
+from functools import partial
+from itertools import product
+
+import numpy as np
+import pytest
+from numpy.testing import (assert_allclose, assert_, assert_equal,
+                           suppress_warnings)
+from scipy.sparse import SparseEfficiencyWarning
+from scipy.sparse.linalg import aslinearoperator
+import scipy.linalg
+from scipy.sparse.linalg import expm as sp_expm
+from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
+        _onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
+        _expm_multiply_interval)
+
+
+IMPRECISE = {np.single, np.csingle}
+REAL_DTYPES = {np.intc, np.int_, np.longlong,
+               np.single, np.double, np.longdouble}
+COMPLEX_DTYPES = {np.csingle, np.cdouble, np.clongdouble}
+# use sorted tuple to ensure fixed order of tests
+DTYPES = tuple(sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str))
+
+
+def estimated(func):
+    """If trace is estimated, it should warn.
+
+    We warn that estimation of trace might impact performance.
+    All result have to be correct nevertheless!
+
+    """
+    def wrapped(*args, **kwds):
+        with pytest.warns(UserWarning,
+                          match="Trace of LinearOperator not available"):
+            return func(*args, **kwds)
+    return wrapped
+
+
+def less_than_or_close(a, b):
+    return np.allclose(a, b) or (a < b)
+
+
+class TestExpmActionSimple:
+    """
+    These tests do not consider the case of multiple time steps in one call.
+    """
+
+    def test_theta_monotonicity(self):
+        pairs = sorted(_theta.items())
+        for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
+            assert_(theta_a < theta_b)
+
+    def test_p_max_default(self):
+        m_max = 55
+        expected_p_max = 8
+        observed_p_max = _compute_p_max(m_max)
+        assert_equal(observed_p_max, expected_p_max)
+
+    def test_p_max_range(self):
+        for m_max in range(1, 55+1):
+            p_max = _compute_p_max(m_max)
+            assert_(p_max*(p_max - 1) <= m_max + 1)
+            p_too_big = p_max + 1
+            assert_(p_too_big*(p_too_big - 1) > m_max + 1)
+
+    def test_onenormest_matrix_power(self):
+        np.random.seed(1234)
+        n = 40
+        nsamples = 10
+        for i in range(nsamples):
+            A = scipy.linalg.inv(np.random.randn(n, n))
+            for p in range(4):
+                if not p:
+                    M = np.identity(n)
+                else:
+                    M = np.dot(M, A)
+                estimated = _onenormest_matrix_power(A, p)
+                exact = np.linalg.norm(M, 1)
+                assert_(less_than_or_close(estimated, exact))
+                assert_(less_than_or_close(exact, 3*estimated))
+
+    def test_expm_multiply(self):
+        np.random.seed(1234)
+        n = 40
+        k = 3
+        nsamples = 10
+        for i in range(nsamples):
+            A = scipy.linalg.inv(np.random.randn(n, n))
+            B = np.random.randn(n, k)
+            observed = expm_multiply(A, B)
+            expected = np.dot(sp_expm(A), B)
+            assert_allclose(observed, expected)
+            observed = estimated(expm_multiply)(aslinearoperator(A), B)
+            assert_allclose(observed, expected)
+            traceA = np.trace(A)
+            observed = expm_multiply(aslinearoperator(A), B, traceA=traceA)
+            assert_allclose(observed, expected)
+
+    def test_matrix_vector_multiply(self):
+        np.random.seed(1234)
+        n = 40
+        nsamples = 10
+        for i in range(nsamples):
+            A = scipy.linalg.inv(np.random.randn(n, n))
+            v = np.random.randn(n)
+            observed = expm_multiply(A, v)
+            expected = np.dot(sp_expm(A), v)
+            assert_allclose(observed, expected)
+            observed = estimated(expm_multiply)(aslinearoperator(A), v)
+            assert_allclose(observed, expected)
+
+    def test_scaled_expm_multiply(self):
+        np.random.seed(1234)
+        n = 40
+        k = 3
+        nsamples = 10
+        for i, t in product(range(nsamples), [0.2, 1.0, 1.5]):
+            with np.errstate(invalid='ignore'):
+                A = scipy.linalg.inv(np.random.randn(n, n))
+                B = np.random.randn(n, k)
+                observed = _expm_multiply_simple(A, B, t=t)
+                expected = np.dot(sp_expm(t*A), B)
+                assert_allclose(observed, expected)
+                observed = estimated(_expm_multiply_simple)(
+                    aslinearoperator(A), B, t=t
+                )
+                assert_allclose(observed, expected)
+
+    def test_scaled_expm_multiply_single_timepoint(self):
+        np.random.seed(1234)
+        t = 0.1
+        n = 5
+        k = 2
+        A = np.random.randn(n, n)
+        B = np.random.randn(n, k)
+        observed = _expm_multiply_simple(A, B, t=t)
+        expected = sp_expm(t*A).dot(B)
+        assert_allclose(observed, expected)
+        observed = estimated(_expm_multiply_simple)(
+            aslinearoperator(A), B, t=t
+        )
+        assert_allclose(observed, expected)
+
+    def test_sparse_expm_multiply(self):
+        np.random.seed(1234)
+        n = 40
+        k = 3
+        nsamples = 10
+        for i in range(nsamples):
+            A = scipy.sparse.rand(n, n, density=0.05)
+            B = np.random.randn(n, k)
+            observed = expm_multiply(A, B)
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "splu converted its input to CSC format")
+                sup.filter(SparseEfficiencyWarning,
+                           "spsolve is more efficient when sparse b is in the"
+                           " CSC matrix format")
+                expected = sp_expm(A).dot(B)
+            assert_allclose(observed, expected)
+            observed = estimated(expm_multiply)(aslinearoperator(A), B)
+            assert_allclose(observed, expected)
+
+    def test_complex(self):
+        A = np.array([
+            [1j, 1j],
+            [0, 1j]], dtype=complex)
+        B = np.array([1j, 1j])
+        observed = expm_multiply(A, B)
+        expected = np.array([
+            1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
+            1j * np.exp(1j)], dtype=complex)
+        assert_allclose(observed, expected)
+        observed = estimated(expm_multiply)(aslinearoperator(A), B)
+        assert_allclose(observed, expected)
+
+
+class TestExpmActionInterval:
+
+    def test_sparse_expm_multiply_interval(self):
+        np.random.seed(1234)
+        start = 0.1
+        stop = 3.2
+        n = 40
+        k = 3
+        endpoint = True
+        for num in (14, 13, 2):
+            A = scipy.sparse.rand(n, n, density=0.05)
+            B = np.random.randn(n, k)
+            v = np.random.randn(n)
+            for target in (B, v):
+                X = expm_multiply(A, target, start=start, stop=stop,
+                                  num=num, endpoint=endpoint)
+                samples = np.linspace(start=start, stop=stop,
+                                      num=num, endpoint=endpoint)
+                with suppress_warnings() as sup:
+                    sup.filter(SparseEfficiencyWarning,
+                               "splu converted its input to CSC format")
+                    sup.filter(SparseEfficiencyWarning,
+                               "spsolve is more efficient when sparse b is in"
+                               " the CSC matrix format")
+                    for solution, t in zip(X, samples):
+                        assert_allclose(solution, sp_expm(t*A).dot(target))
+
+    def test_expm_multiply_interval_vector(self):
+        np.random.seed(1234)
+        interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
+        for num, n in product([14, 13, 2], [1, 2, 5, 20, 40]):
+            A = scipy.linalg.inv(np.random.randn(n, n))
+            v = np.random.randn(n)
+            samples = np.linspace(num=num, **interval)
+            X = expm_multiply(A, v, num=num, **interval)
+            for solution, t in zip(X, samples):
+                assert_allclose(solution, sp_expm(t*A).dot(v))
+            # test for linear operator with unknown trace -> estimate trace
+            Xguess = estimated(expm_multiply)(aslinearoperator(A), v,
+                                              num=num, **interval)
+            # test for linear operator with given trace
+            Xgiven = expm_multiply(aslinearoperator(A), v, num=num, **interval,
+                                   traceA=np.trace(A))
+            # test robustness for linear operator with wrong trace
+            Xwrong = expm_multiply(aslinearoperator(A), v, num=num, **interval,
+                                   traceA=np.trace(A)*5)
+            for sol_guess, sol_given, sol_wrong, t in zip(Xguess, Xgiven,
+                                                          Xwrong, samples):
+                correct = sp_expm(t*A).dot(v)
+                assert_allclose(sol_guess, correct)
+                assert_allclose(sol_given, correct)
+                assert_allclose(sol_wrong, correct)
+
+    def test_expm_multiply_interval_matrix(self):
+        np.random.seed(1234)
+        interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
+        for num, n, k in product([14, 13, 2], [1, 2, 5, 20, 40], [1, 2]):
+            A = scipy.linalg.inv(np.random.randn(n, n))
+            B = np.random.randn(n, k)
+            samples = np.linspace(num=num, **interval)
+            X = expm_multiply(A, B, num=num, **interval)
+            for solution, t in zip(X, samples):
+                assert_allclose(solution, sp_expm(t*A).dot(B))
+            X = estimated(expm_multiply)(aslinearoperator(A), B, num=num,
+                                         **interval)
+            for solution, t in zip(X, samples):
+                assert_allclose(solution, sp_expm(t*A).dot(B))
+
+    def test_sparse_expm_multiply_interval_dtypes(self):
+        # Test A & B int
+        A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
+        B = np.ones(5, dtype=int)
+        Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
+        assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
+
+        # Test A complex, B int
+        A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
+        B = np.ones(5, dtype=int)
+        Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
+        assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
+
+        # Test A int, B complex
+        A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
+        B = np.full(5, 1j, dtype=complex)
+        Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
+        assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
+
+    def test_expm_multiply_interval_status_0(self):
+        self._help_test_specific_expm_interval_status(0)
+
+    def test_expm_multiply_interval_status_1(self):
+        self._help_test_specific_expm_interval_status(1)
+
+    def test_expm_multiply_interval_status_2(self):
+        self._help_test_specific_expm_interval_status(2)
+
+    def _help_test_specific_expm_interval_status(self, target_status):
+        np.random.seed(1234)
+        start = 0.1
+        stop = 3.2
+        num = 13
+        endpoint = True
+        n = 5
+        k = 2
+        nrepeats = 10
+        nsuccesses = 0
+        for num in [14, 13, 2] * nrepeats:
+            A = np.random.randn(n, n)
+            B = np.random.randn(n, k)
+            status = _expm_multiply_interval(A, B,
+                    start=start, stop=stop, num=num, endpoint=endpoint,
+                    status_only=True)
+            if status == target_status:
+                X, status = _expm_multiply_interval(A, B,
+                        start=start, stop=stop, num=num, endpoint=endpoint,
+                        status_only=False)
+                assert_equal(X.shape, (num, n, k))
+                samples = np.linspace(start=start, stop=stop,
+                        num=num, endpoint=endpoint)
+                for solution, t in zip(X, samples):
+                    assert_allclose(solution, sp_expm(t*A).dot(B))
+                nsuccesses += 1
+        if not nsuccesses:
+            msg = 'failed to find a status-' + str(target_status) + ' interval'
+            raise Exception(msg)
+
+
+@pytest.mark.parametrize("dtype_a", DTYPES)
+@pytest.mark.parametrize("dtype_b", DTYPES)
+@pytest.mark.parametrize("b_is_matrix", [False, True])
+def test_expm_multiply_dtype(dtype_a, dtype_b, b_is_matrix):
+    """Make sure `expm_multiply` handles all numerical dtypes correctly."""
+    assert_allclose_ = (partial(assert_allclose, rtol=1.2e-3, atol=1e-5)
+                        if {dtype_a, dtype_b} & IMPRECISE else assert_allclose)
+    rng = np.random.default_rng(1234)
+    # test data
+    n = 7
+    b_shape = (n, 3) if b_is_matrix else (n, )
+    if dtype_a in REAL_DTYPES:
+        A = scipy.linalg.inv(rng.random([n, n])).astype(dtype_a)
+    else:
+        A = scipy.linalg.inv(
+            rng.random([n, n]) + 1j*rng.random([n, n])
+        ).astype(dtype_a)
+    if dtype_b in REAL_DTYPES:
+        B = (2*rng.random(b_shape)).astype(dtype_b)
+    else:
+        B = (rng.random(b_shape) + 1j*rng.random(b_shape)).astype(dtype_b)
+
+    # single application
+    sol_mat = expm_multiply(A, B)
+    sol_op = estimated(expm_multiply)(aslinearoperator(A), B)
+    direct_sol = np.dot(sp_expm(A), B)
+    assert_allclose_(sol_mat, direct_sol)
+    assert_allclose_(sol_op, direct_sol)
+    sol_op = expm_multiply(aslinearoperator(A), B, traceA=np.trace(A))
+    assert_allclose_(sol_op, direct_sol)
+
+    # for time points
+    interval = {'start': 0.1, 'stop': 3.2, 'num': 13, 'endpoint': True}
+    samples = np.linspace(**interval)
+    X_mat = expm_multiply(A, B, **interval)
+    X_op = estimated(expm_multiply)(aslinearoperator(A), B, **interval)
+    for sol_mat, sol_op, t in zip(X_mat, X_op, samples):
+        direct_sol = sp_expm(t*A).dot(B)
+        assert_allclose_(sol_mat, direct_sol)
+        assert_allclose_(sol_op, direct_sol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_interface.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_interface.py
new file mode 100644
index 00000000..c4ad4c46
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_interface.py
@@ -0,0 +1,449 @@
+"""Test functions for the sparse.linalg._interface module
+"""
+
+from functools import partial
+from itertools import product
+import operator
+from pytest import raises as assert_raises, warns
+from numpy.testing import assert_, assert_equal
+
+import numpy as np
+import scipy.sparse as sparse
+
+import scipy.sparse.linalg._interface as interface
+from scipy.sparse._sputils import matrix
+
+
+class TestLinearOperator:
+    def setup_method(self):
+        self.A = np.array([[1,2,3],
+                           [4,5,6]])
+        self.B = np.array([[1,2],
+                           [3,4],
+                           [5,6]])
+        self.C = np.array([[1,2],
+                           [3,4]])
+
+    def test_matvec(self):
+        def get_matvecs(A):
+            return [{
+                        'shape': A.shape,
+                        'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
+                        'rmatvec': lambda x: np.dot(A.T.conj(),
+                                                    x).reshape(A.shape[1])
+                    },
+                    {
+                        'shape': A.shape,
+                        'matvec': lambda x: np.dot(A, x),
+                        'rmatvec': lambda x: np.dot(A.T.conj(), x),
+                        'rmatmat': lambda x: np.dot(A.T.conj(), x),
+                        'matmat': lambda x: np.dot(A, x)
+                    }]
+
+        for matvecs in get_matvecs(self.A):
+            A = interface.LinearOperator(**matvecs)
+
+            assert_(A.args == ())
+
+            assert_equal(A.matvec(np.array([1,2,3])), [14,32])
+            assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
+            assert_equal(A * np.array([1,2,3]), [14,32])
+            assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
+            assert_equal(A.dot(np.array([1,2,3])), [14,32])
+            assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
+
+            assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]])
+            assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]])
+            assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]])
+
+            assert_equal((2*A)*[1,1,1], [12,30])
+            assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18])
+            assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])
+            assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
+            assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]])
+            assert_equal((A*2)*[1,1,1], [12,30])
+            assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
+            assert_equal((2j*A)*[1,1,1], [12j,30j])
+            assert_equal((A+A)*[1,1,1], [12, 30])
+            assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18])
+            assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])
+            assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
+            assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
+            assert_equal((-A)*[1,1,1], [-6,-15])
+            assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
+            assert_equal((A-A)*[1,1,1], [0,0])
+            assert_equal((A - A) * [[1], [1], [1]], [[0], [0]])
+
+            X = np.array([[1, 2], [3, 4]])
+            # A_asarray = np.array([[1, 2, 3], [4, 5, 6]])
+            assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X))
+            assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X))
+            assert_equal((2j * A).rmatmat(X),
+                         np.dot((2j * self.A).T.conj(), X))
+            assert_equal((A * 2j).rmatmat(X),
+                         np.dot((self.A * 2j).T.conj(), X))
+            assert_equal((A + A).rmatmat(X),
+                         np.dot((self.A + self.A).T, X))
+            assert_equal((A + 2j * A).rmatmat(X),
+                         np.dot((self.A + 2j * self.A).T.conj(), X))
+            assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X))
+            assert_equal((A - A).rmatmat(X),
+                         np.dot((self.A - self.A).T, X))
+            assert_equal((2j * A).rmatmat(2j * X),
+                         np.dot((2j * self.A).T.conj(), 2j * X))
+
+            z = A+A
+            assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
+            z = 2*A
+            assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
+
+            assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))
+            assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
+            assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
+            assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
+            assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
+            assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
+
+            assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray))
+            assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray))
+            assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray))
+
+            assert_(isinstance(2*A, interface._ScaledLinearOperator))
+            assert_(isinstance(2j*A, interface._ScaledLinearOperator))
+            assert_(isinstance(A+A, interface._SumLinearOperator))
+            assert_(isinstance(-A, interface._ScaledLinearOperator))
+            assert_(isinstance(A-A, interface._SumLinearOperator))
+
+            assert_((2j*A).dtype == np.complex_)
+
+            assert_raises(ValueError, A.matvec, np.array([1,2]))
+            assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
+            assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
+            assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
+
+            assert_raises(ValueError, lambda: A*A)
+            assert_raises(ValueError, lambda: A**2)
+
+        for matvecsA, matvecsB in product(get_matvecs(self.A),
+                                          get_matvecs(self.B)):
+            A = interface.LinearOperator(**matvecsA)
+            B = interface.LinearOperator(**matvecsB)
+            # AtimesB = np.array([[22, 28], [49, 64]])
+            AtimesB = self.A.dot(self.B)
+            X = np.array([[1, 2], [3, 4]])
+
+            assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X))
+            assert_equal((2j * A * B).rmatmat(X),
+                         np.dot((2j * AtimesB).T.conj(), X))
+
+            assert_equal((A*B)*[1,1], [50,113])
+            assert_equal((A*B)*[[1],[1]], [[50],[113]])
+            assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
+
+            assert_equal((A * B).rmatvec([1, 1]), [71, 92])
+            assert_equal((A * B).H.matvec([1, 1]), [71, 92])
+
+            assert_(isinstance(A*B, interface._ProductLinearOperator))
+
+            assert_raises(ValueError, lambda: A+B)
+            assert_raises(ValueError, lambda: A**2)
+
+            z = A*B
+            assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
+
+        for matvecsC in get_matvecs(self.C):
+            C = interface.LinearOperator(**matvecsC)
+            X = np.array([[1, 2], [3, 4]])
+
+            assert_equal(C.rmatmat(X), np.dot((self.C).T, X))
+            assert_equal((C**2).rmatmat(X),
+                         np.dot((np.dot(self.C, self.C)).T, X))
+
+            assert_equal((C**2)*[1,1], [17,37])
+            assert_equal((C**2).rmatvec([1, 1]), [22, 32])
+            assert_equal((C**2).H.matvec([1, 1]), [22, 32])
+            assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
+
+            assert_(isinstance(C**2, interface._PowerLinearOperator))
+
+    def test_matmul(self):
+        D = {'shape': self.A.shape,
+             'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),
+             'rmatvec': lambda x: np.dot(self.A.T.conj(),
+                                         x).reshape(self.A.shape[1]),
+             'rmatmat': lambda x: np.dot(self.A.T.conj(), x),
+             'matmat': lambda x: np.dot(self.A, x)}
+        A = interface.LinearOperator(**D)
+        B = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]])
+        b = B[0]
+
+        assert_equal(operator.matmul(A, b), A * b)
+        assert_equal(operator.matmul(A, B), A * B)
+        assert_raises(ValueError, operator.matmul, A, 2)
+        assert_raises(ValueError, operator.matmul, 2, A)
+
+
+class TestAsLinearOperator:
+    def setup_method(self):
+        self.cases = []
+
+        def make_cases(original, dtype):
+            cases = []
+
+            cases.append((matrix(original, dtype=dtype), original))
+            cases.append((np.array(original, dtype=dtype), original))
+            cases.append((sparse.csr_matrix(original, dtype=dtype), original))
+
+            # Test default implementations of _adjoint and _rmatvec, which
+            # refer to each other.
+            def mv(x, dtype):
+                y = original.dot(x)
+                if len(x.shape) == 2:
+                    y = y.reshape(-1, 1)
+                return y
+
+            def rmv(x, dtype):
+                return original.T.conj().dot(x)
+
+            class BaseMatlike(interface.LinearOperator):
+                args = ()
+
+                def __init__(self, dtype):
+                    self.dtype = np.dtype(dtype)
+                    self.shape = original.shape
+
+                def _matvec(self, x):
+                    return mv(x, self.dtype)
+
+            class HasRmatvec(BaseMatlike):
+                args = ()
+
+                def _rmatvec(self,x):
+                    return rmv(x, self.dtype)
+
+            class HasAdjoint(BaseMatlike):
+                args = ()
+
+                def _adjoint(self):
+                    shape = self.shape[1], self.shape[0]
+                    matvec = partial(rmv, dtype=self.dtype)
+                    rmatvec = partial(mv, dtype=self.dtype)
+                    return interface.LinearOperator(matvec=matvec,
+                                                    rmatvec=rmatvec,
+                                                    dtype=self.dtype,
+                                                    shape=shape)
+
+            class HasRmatmat(HasRmatvec):
+                def _matmat(self, x):
+                    return original.dot(x)
+
+                def _rmatmat(self, x):
+                    return original.T.conj().dot(x)
+
+            cases.append((HasRmatvec(dtype), original))
+            cases.append((HasAdjoint(dtype), original))
+            cases.append((HasRmatmat(dtype), original))
+            return cases
+
+        original = np.array([[1,2,3], [4,5,6]])
+        self.cases += make_cases(original, np.int32)
+        self.cases += make_cases(original, np.float32)
+        self.cases += make_cases(original, np.float64)
+        self.cases += [(interface.aslinearoperator(M).T, A.T)
+                       for M, A in make_cases(original.T, np.float64)]
+        self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
+                       for M, A in make_cases(original.T, np.float64)]
+
+        original = np.array([[1, 2j, 3j], [4j, 5j, 6]])
+        self.cases += make_cases(original, np.complex_)
+        self.cases += [(interface.aslinearoperator(M).T, A.T)
+                       for M, A in make_cases(original.T, np.complex_)]
+        self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
+                       for M, A in make_cases(original.T, np.complex_)]
+
+    def test_basic(self):
+
+        for M, A_array in self.cases:
+            A = interface.aslinearoperator(M)
+            M,N = A.shape
+
+            xs = [np.array([1, 2, 3]),
+                  np.array([[1], [2], [3]])]
+            ys = [np.array([1, 2]), np.array([[1], [2]])]
+
+            if A.dtype == np.complex_:
+                xs += [np.array([1, 2j, 3j]),
+                       np.array([[1], [2j], [3j]])]
+                ys += [np.array([1, 2j]), np.array([[1], [2j]])]
+
+            x2 = np.array([[1, 4], [2, 5], [3, 6]])
+
+            for x in xs:
+                assert_equal(A.matvec(x), A_array.dot(x))
+                assert_equal(A * x, A_array.dot(x))
+
+            assert_equal(A.matmat(x2), A_array.dot(x2))
+            assert_equal(A * x2, A_array.dot(x2))
+
+            for y in ys:
+                assert_equal(A.rmatvec(y), A_array.T.conj().dot(y))
+                assert_equal(A.T.matvec(y), A_array.T.dot(y))
+                assert_equal(A.H.matvec(y), A_array.T.conj().dot(y))
+
+            for y in ys:
+                if y.ndim < 2:
+                    continue
+                assert_equal(A.rmatmat(y), A_array.T.conj().dot(y))
+                assert_equal(A.T.matmat(y), A_array.T.dot(y))
+                assert_equal(A.H.matmat(y), A_array.T.conj().dot(y))
+
+            if hasattr(M,'dtype'):
+                assert_equal(A.dtype, M.dtype)
+
+            assert_(hasattr(A, 'args'))
+
+    def test_dot(self):
+
+        for M, A_array in self.cases:
+            A = interface.aslinearoperator(M)
+            M,N = A.shape
+
+            x0 = np.array([1, 2, 3])
+            x1 = np.array([[1], [2], [3]])
+            x2 = np.array([[1, 4], [2, 5], [3, 6]])
+
+            assert_equal(A.dot(x0), A_array.dot(x0))
+            assert_equal(A.dot(x1), A_array.dot(x1))
+            assert_equal(A.dot(x2), A_array.dot(x2))
+
+
+def test_repr():
+    A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)
+    repr_A = repr(A)
+    assert_('unspecified dtype' not in repr_A, repr_A)
+
+
+def test_identity():
+    ident = interface.IdentityOperator((3, 3))
+    assert_equal(ident * [1, 2, 3], [1, 2, 3])
+    assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))
+
+    assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])
+
+
+def test_attributes():
+    A = interface.aslinearoperator(np.arange(16).reshape(4, 4))
+
+    def always_four_ones(x):
+        x = np.asarray(x)
+        assert_(x.shape == (3,) or x.shape == (3, 1))
+        return np.ones(4)
+
+    B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)
+
+    for op in [A, B, A * B, A.H, A + A, B + B, A**4]:
+        assert_(hasattr(op, "dtype"))
+        assert_(hasattr(op, "shape"))
+        assert_(hasattr(op, "_matvec"))
+
+def matvec(x):
+    """ Needed for test_pickle as local functions are not pickleable """
+    return np.zeros(3)
+
+def test_pickle():
+    import pickle
+
+    for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
+        A = interface.LinearOperator((3, 3), matvec)
+        s = pickle.dumps(A, protocol=protocol)
+        B = pickle.loads(s)
+
+        for k in A.__dict__:
+            assert_equal(getattr(A, k), getattr(B, k))
+
+def test_inheritance():
+    class Empty(interface.LinearOperator):
+        pass
+
+    with warns(RuntimeWarning, match="should implement at least"):
+        assert_raises(TypeError, Empty)
+
+    class Identity(interface.LinearOperator):
+        def __init__(self, n):
+            super().__init__(dtype=None, shape=(n, n))
+
+        def _matvec(self, x):
+            return x
+
+    id3 = Identity(3)
+    assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
+    assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
+
+    class MatmatOnly(interface.LinearOperator):
+        def __init__(self, A):
+            super().__init__(A.dtype, A.shape)
+            self.A = A
+
+        def _matmat(self, x):
+            return self.A.dot(x)
+
+    mm = MatmatOnly(np.random.randn(5, 3))
+    assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))
+
+def test_dtypes_of_operator_sum():
+    # gh-6078
+
+    mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
+    mat_real = np.random.rand(2,2)
+
+    complex_operator = interface.aslinearoperator(mat_complex)
+    real_operator = interface.aslinearoperator(mat_real)
+
+    sum_complex = complex_operator + complex_operator
+    sum_real = real_operator + real_operator
+
+    assert_equal(sum_real.dtype, np.float64)
+    assert_equal(sum_complex.dtype, np.complex128)
+
+def test_no_double_init():
+    call_count = [0]
+
+    def matvec(v):
+        call_count[0] += 1
+        return v
+
+    # It should call matvec exactly once (in order to determine the
+    # operator dtype)
+    interface.LinearOperator((2, 2), matvec=matvec)
+    assert_equal(call_count[0], 1)
+
+def test_adjoint_conjugate():
+    X = np.array([[1j]])
+    A = interface.aslinearoperator(X)
+
+    B = 1j * A
+    Y = 1j * X
+
+    v = np.array([1])
+
+    assert_equal(B.dot(v), Y.dot(v))
+    assert_equal(B.H.dot(v), Y.T.conj().dot(v))
+
+def test_ndim():
+    X = np.array([[1]])
+    A = interface.aslinearoperator(X)
+    assert_equal(A.ndim, 2)
+
+def test_transpose_noconjugate():
+    X = np.array([[1j]])
+    A = interface.aslinearoperator(X)
+
+    B = 1j * A
+    Y = 1j * X
+
+    v = np.array([1])
+
+    assert_equal(B.dot(v), Y.dot(v))
+    assert_equal(B.T.dot(v), Y.T.dot(v))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_matfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_matfuncs.py
new file mode 100644
index 00000000..8f984553
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_matfuncs.py
@@ -0,0 +1,581 @@
+#
+# Created by: Pearu Peterson, March 2002
+#
+""" Test functions for scipy.linalg._matfuncs module
+
+"""
+import math
+
+import numpy as np
+from numpy import array, eye, exp, random
+from numpy.linalg import matrix_power
+from numpy.testing import (
+        assert_allclose, assert_, assert_array_almost_equal, assert_equal,
+        assert_array_almost_equal_nulp, suppress_warnings)
+
+from scipy.sparse import csc_matrix, SparseEfficiencyWarning
+from scipy.sparse._construct import eye as speye
+from scipy.sparse.linalg._matfuncs import (expm, _expm,
+        ProductOperator, MatrixPowerOperator,
+        _onenorm_matrix_power_nnm)
+from scipy.sparse._sputils import matrix
+from scipy.linalg import logm
+from scipy.special import factorial, binom
+import scipy.sparse
+import scipy.sparse.linalg
+
+
+def _burkardt_13_power(n, p):
+    """
+    A helper function for testing matrix functions.
+
+    Parameters
+    ----------
+    n : integer greater than 1
+        Order of the square matrix to be returned.
+    p : non-negative integer
+        Power of the matrix.
+
+    Returns
+    -------
+    out : ndarray representing a square matrix
+        A Forsythe matrix of order n, raised to the power p.
+
+    """
+    # Input validation.
+    if n != int(n) or n < 2:
+        raise ValueError('n must be an integer greater than 1')
+    n = int(n)
+    if p != int(p) or p < 0:
+        raise ValueError('p must be a non-negative integer')
+    p = int(p)
+
+    # Construct the matrix explicitly.
+    a, b = divmod(p, n)
+    large = np.power(10.0, -n*a)
+    small = large * np.power(10.0, -n)
+    return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)
+
+
+def test_onenorm_matrix_power_nnm():
+    np.random.seed(1234)
+    for n in range(1, 5):
+        for p in range(5):
+            M = np.random.random((n, n))
+            Mp = np.linalg.matrix_power(M, p)
+            observed = _onenorm_matrix_power_nnm(M, p)
+            expected = np.linalg.norm(Mp, 1)
+            assert_allclose(observed, expected)
+
+
+class TestExpM:
+    def test_zero_ndarray(self):
+        a = array([[0.,0],[0,0]])
+        assert_array_almost_equal(expm(a),[[1,0],[0,1]])
+
+    def test_zero_sparse(self):
+        a = csc_matrix([[0.,0],[0,0]])
+        assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])
+
+    def test_zero_matrix(self):
+        a = matrix([[0.,0],[0,0]])
+        assert_array_almost_equal(expm(a),[[1,0],[0,1]])
+
+    def test_misc_types(self):
+        A = expm(np.array([[1]]))
+        assert_allclose(expm(((1,),)), A)
+        assert_allclose(expm([[1]]), A)
+        assert_allclose(expm(matrix([[1]])), A)
+        assert_allclose(expm(np.array([[1]])), A)
+        assert_allclose(expm(csc_matrix([[1]])).A, A)
+        B = expm(np.array([[1j]]))
+        assert_allclose(expm(((1j,),)), B)
+        assert_allclose(expm([[1j]]), B)
+        assert_allclose(expm(matrix([[1j]])), B)
+        assert_allclose(expm(csc_matrix([[1j]])).A, B)
+
+    def test_bidiagonal_sparse(self):
+        A = csc_matrix([
+            [1, 3, 0],
+            [0, 1, 5],
+            [0, 0, 2]], dtype=float)
+        e1 = math.exp(1)
+        e2 = math.exp(2)
+        expected = np.array([
+            [e1, 3*e1, 15*(e2 - 2*e1)],
+            [0, e1, 5*(e2 - e1)],
+            [0, 0, e2]], dtype=float)
+        observed = expm(A).toarray()
+        assert_array_almost_equal(observed, expected)
+
+    def test_padecases_dtype_float(self):
+        for dtype in [np.float32, np.float64]:
+            for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
+                A = scale * eye(3, dtype=dtype)
+                observed = expm(A)
+                expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
+                assert_array_almost_equal_nulp(observed, expected, nulp=100)
+
+    def test_padecases_dtype_complex(self):
+        for dtype in [np.complex64, np.complex128]:
+            for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
+                A = scale * eye(3, dtype=dtype)
+                observed = expm(A)
+                expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
+                assert_array_almost_equal_nulp(observed, expected, nulp=100)
+
+    def test_padecases_dtype_sparse_float(self):
+        # float32 and complex64 lead to errors in spsolve/UMFpack
+        dtype = np.float64
+        for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
+            a = scale * speye(3, 3, dtype=dtype, format='csc')
+            e = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a csc_matrix is expensive.")
+                exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()
+                inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()
+            assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)
+            assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
+
+    def test_padecases_dtype_sparse_complex(self):
+        # float32 and complex64 lead to errors in spsolve/UMFpack
+        dtype = np.complex128
+        for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
+            a = scale * speye(3, 3, dtype=dtype, format='csc')
+            e = exp(scale) * eye(3, dtype=dtype)
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a csc_matrix is expensive.")
+                assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
+
+    def test_logm_consistency(self):
+        random.seed(1234)
+        for dtype in [np.float64, np.complex128]:
+            for n in range(1, 10):
+                for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
+                    # make logm(A) be of a given scale
+                    A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
+                    if np.iscomplexobj(A):
+                        A = A + 1j * random.rand(n, n) * scale
+                    assert_array_almost_equal(expm(logm(A)), A)
+
+    def test_integer_matrix(self):
+        Q = np.array([
+            [-3, 1, 1, 1],
+            [1, -3, 1, 1],
+            [1, 1, -3, 1],
+            [1, 1, 1, -3]])
+        assert_allclose(expm(Q), expm(1.0 * Q))
+
+    def test_integer_matrix_2(self):
+        # Check for integer overflows
+        Q = np.array([[-500, 500, 0, 0],
+                      [0, -550, 360, 190],
+                      [0, 630, -630, 0],
+                      [0, 0, 0, 0]], dtype=np.int16)
+        assert_allclose(expm(Q), expm(1.0 * Q))
+
+        Q = csc_matrix(Q)
+        assert_allclose(expm(Q).A, expm(1.0 * Q).A)
+
+    def test_triangularity_perturbation(self):
+        # Experiment (1) of
+        # Awad H. Al-Mohy and Nicholas J. Higham (2012)
+        # Improved Inverse Scaling and Squaring Algorithms
+        # for the Matrix Logarithm.
+        A = np.array([
+            [3.2346e-1, 3e4, 3e4, 3e4],
+            [0, 3.0089e-1, 3e4, 3e4],
+            [0, 0, 3.221e-1, 3e4],
+            [0, 0, 0, 3.0744e-1]],
+            dtype=float)
+        A_logm = np.array([
+            [-1.12867982029050462e+00, 9.61418377142025565e+04,
+             -4.52485573953179264e+09, 2.92496941103871812e+14],
+            [0.00000000000000000e+00, -1.20101052953082288e+00,
+             9.63469687211303099e+04, -4.68104828911105442e+09],
+            [0.00000000000000000e+00, 0.00000000000000000e+00,
+             -1.13289322264498393e+00, 9.53249183094775653e+04],
+            [0.00000000000000000e+00, 0.00000000000000000e+00,
+             0.00000000000000000e+00, -1.17947533272554850e+00]],
+            dtype=float)
+        assert_allclose(expm(A_logm), A, rtol=1e-4)
+
+        # Perturb the upper triangular matrix by tiny amounts,
+        # so that it becomes technically not upper triangular.
+        random.seed(1234)
+        tiny = 1e-17
+        A_logm_perturbed = A_logm.copy()
+        A_logm_perturbed[1, 0] = tiny
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "Ill-conditioned.*")
+            A_expm_logm_perturbed = expm(A_logm_perturbed)
+        rtol = 1e-4
+        atol = 100 * tiny
+        assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
+
+    def test_burkardt_1(self):
+        # This matrix is diagonal.
+        # The calculation of the matrix exponential is simple.
+        #
+        # This is the first of a series of matrix exponential tests
+        # collected by John Burkardt from the following sources.
+        #
+        # Alan Laub,
+        # Review of "Linear System Theory" by Joao Hespanha,
+        # SIAM Review,
+        # Volume 52, Number 4, December 2010, pages 779--781.
+        #
+        # Cleve Moler and Charles Van Loan,
+        # Nineteen Dubious Ways to Compute the Exponential of a Matrix,
+        # Twenty-Five Years Later,
+        # SIAM Review,
+        # Volume 45, Number 1, March 2003, pages 3--49.
+        #
+        # Cleve Moler,
+        # Cleve's Corner: A Balancing Act for the Matrix Exponential,
+        # 23 July 2012.
+        #
+        # Robert Ward,
+        # Numerical computation of the matrix exponential
+        # with accuracy estimate,
+        # SIAM Journal on Numerical Analysis,
+        # Volume 14, Number 4, September 1977, pages 600--610.
+        exp1 = np.exp(1)
+        exp2 = np.exp(2)
+        A = np.array([
+            [1, 0],
+            [0, 2],
+            ], dtype=float)
+        desired = np.array([
+            [exp1, 0],
+            [0, exp2],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_2(self):
+        # This matrix is symmetric.
+        # The calculation of the matrix exponential is straightforward.
+        A = np.array([
+            [1, 3],
+            [3, 2],
+            ], dtype=float)
+        desired = np.array([
+            [39.322809708033859, 46.166301438885753],
+            [46.166301438885768, 54.711576854329110],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_3(self):
+        # This example is due to Laub.
+        # This matrix is ill-suited for the Taylor series approach.
+        # As powers of A are computed, the entries blow up too quickly.
+        exp1 = np.exp(1)
+        exp39 = np.exp(39)
+        A = np.array([
+            [0, 1],
+            [-39, -40],
+            ], dtype=float)
+        desired = np.array([
+            [
+                39/(38*exp1) - 1/(38*exp39),
+                -np.expm1(-38) / (38*exp1)],
+            [
+                39*np.expm1(-38) / (38*exp1),
+                -1/(38*exp1) + 39/(38*exp39)],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_4(self):
+        # This example is due to Moler and Van Loan.
+        # The example will cause problems for the series summation approach,
+        # as well as for diagonal Pade approximations.
+        A = np.array([
+            [-49, 24],
+            [-64, 31],
+            ], dtype=float)
+        U = np.array([[3, 1], [4, 2]], dtype=float)
+        V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)
+        w = np.array([-17, -1], dtype=float)
+        desired = np.dot(U * np.exp(w), V)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_5(self):
+        # This example is due to Moler and Van Loan.
+        # This matrix is strictly upper triangular
+        # All powers of A are zero beyond some (low) limit.
+        # This example will cause problems for Pade approximations.
+        A = np.array([
+            [0, 6, 0, 0],
+            [0, 0, 6, 0],
+            [0, 0, 0, 6],
+            [0, 0, 0, 0],
+            ], dtype=float)
+        desired = np.array([
+            [1, 6, 18, 36],
+            [0, 1, 6, 18],
+            [0, 0, 1, 6],
+            [0, 0, 0, 1],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_6(self):
+        # This example is due to Moler and Van Loan.
+        # This matrix does not have a complete set of eigenvectors.
+        # That means the eigenvector approach will fail.
+        exp1 = np.exp(1)
+        A = np.array([
+            [1, 1],
+            [0, 1],
+            ], dtype=float)
+        desired = np.array([
+            [exp1, exp1],
+            [0, exp1],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_7(self):
+        # This example is due to Moler and Van Loan.
+        # This matrix is very close to example 5.
+        # Mathematically, it has a complete set of eigenvectors.
+        # Numerically, however, the calculation will be suspect.
+        exp1 = np.exp(1)
+        eps = np.spacing(1)
+        A = np.array([
+            [1 + eps, 1],
+            [0, 1 - eps],
+            ], dtype=float)
+        desired = np.array([
+            [exp1, exp1],
+            [0, exp1],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_8(self):
+        # This matrix was an example in Wikipedia.
+        exp4 = np.exp(4)
+        exp16 = np.exp(16)
+        A = np.array([
+            [21, 17, 6],
+            [-5, -1, -6],
+            [4, 4, 16],
+            ], dtype=float)
+        desired = np.array([
+            [13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],
+            [-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],
+            [16*exp16, 16*exp16, 4*exp16],
+            ], dtype=float) * 0.25
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_9(self):
+        # This matrix is due to the NAG Library.
+        # It is an example for function F01ECF.
+        A = np.array([
+            [1, 2, 2, 2],
+            [3, 1, 1, 2],
+            [3, 2, 1, 2],
+            [3, 3, 3, 1],
+            ], dtype=float)
+        desired = np.array([
+            [740.7038, 610.8500, 542.2743, 549.1753],
+            [731.2510, 603.5524, 535.0884, 542.2743],
+            [823.7630, 679.4257, 603.5524, 610.8500],
+            [998.4355, 823.7630, 731.2510, 740.7038],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_10(self):
+        # This is Ward's example #1.
+        # It is defective and nonderogatory.
+        A = np.array([
+            [4, 2, 0],
+            [1, 4, 1],
+            [1, 1, 4],
+            ], dtype=float)
+        assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))
+        desired = np.array([
+            [147.8666224463699, 183.7651386463682, 71.79703239999647],
+            [127.7810855231823, 183.7651386463682, 91.88256932318415],
+            [127.7810855231824, 163.6796017231806, 111.9681062463718],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_11(self):
+        # This is Ward's example #2.
+        # It is a symmetric matrix.
+        A = np.array([
+            [29.87942128909879, 0.7815750847907159, -2.289519314033932],
+            [0.7815750847907159, 25.72656945571064, 8.680737820540137],
+            [-2.289519314033932, 8.680737820540137, 34.39400925519054],
+            ], dtype=float)
+        assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))
+        desired = np.array([
+             [
+                 5.496313853692378E+15,
+                 -1.823188097200898E+16,
+                 -3.047577080858001E+16],
+             [
+                -1.823188097200899E+16,
+                6.060522870222108E+16,
+                1.012918429302482E+17],
+             [
+                -3.047577080858001E+16,
+                1.012918429302482E+17,
+                1.692944112408493E+17],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_12(self):
+        # This is Ward's example #3.
+        # Ward's algorithm has difficulty estimating the accuracy
+        # of its results.
+        A = np.array([
+            [-131, 19, 18],
+            [-390, 56, 54],
+            [-387, 57, 52],
+            ], dtype=float)
+        assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))
+        desired = np.array([
+            [-1.509644158793135, 0.3678794391096522, 0.1353352811751005],
+            [-5.632570799891469, 1.471517758499875, 0.4060058435250609],
+            [-4.934938326088363, 1.103638317328798, 0.5413411267617766],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_burkardt_13(self):
+        # This is Ward's example #4.
+        # This is a version of the Forsythe matrix.
+        # The eigenvector problem is badly conditioned.
+        # Ward's algorithm has difficulty esimating the accuracy
+        # of its results for this problem.
+        #
+        # Check the construction of one instance of this family of matrices.
+        A4_actual = _burkardt_13_power(4, 1)
+        A4_desired = [[0, 1, 0, 0],
+                      [0, 0, 1, 0],
+                      [0, 0, 0, 1],
+                      [1e-4, 0, 0, 0]]
+        assert_allclose(A4_actual, A4_desired)
+        # Check the expm for a few instances.
+        for n in (2, 3, 4, 10):
+            # Approximate expm using Taylor series.
+            # This works well for this matrix family
+            # because each matrix in the summation,
+            # even before dividing by the factorial,
+            # is entrywise positive with max entry 10**(-floor(p/n)*n).
+            k = max(1, int(np.ceil(16/n)))
+            desired = np.zeros((n, n), dtype=float)
+            for p in range(n*k):
+                Ap = _burkardt_13_power(n, p)
+                assert_equal(np.min(Ap), 0)
+                assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
+                desired += Ap / factorial(p)
+            actual = expm(_burkardt_13_power(n, 1))
+            assert_allclose(actual, desired)
+
+    def test_burkardt_14(self):
+        # This is Moler's example.
+        # This badly scaled matrix caused problems for MATLAB's expm().
+        A = np.array([
+            [0, 1e-8, 0],
+            [-(2e10 + 4e8/6.), -3, 2e10],
+            [200./3., 0, -200./3.],
+            ], dtype=float)
+        desired = np.array([
+            [0.446849468283175, 1.54044157383952e-09, 0.462811453558774],
+            [-5743067.77947947, -0.0152830038686819, -4526542.71278401],
+            [0.447722977849494, 1.54270484519591e-09, 0.463480648837651],
+            ], dtype=float)
+        actual = expm(A)
+        assert_allclose(actual, desired)
+
+    def test_pascal(self):
+        # Test pascal triangle.
+        # Nilpotent exponential, used to trigger a failure (gh-8029)
+
+        for scale in [1.0, 1e-3, 1e-6]:
+            for n in range(0, 80, 3):
+                sc = scale ** np.arange(n, -1, -1)
+                if np.any(sc < 1e-300):
+                    break
+
+                A = np.diag(np.arange(1, n + 1), -1) * scale
+                B = expm(A)
+
+                got = B
+                expected = binom(np.arange(n + 1)[:,None],
+                                 np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
+                atol = 1e-13 * abs(expected).max()
+                assert_allclose(got, expected, atol=atol)
+
+    def test_matrix_input(self):
+        # Large np.matrix inputs should work, gh-5546
+        A = np.zeros((200, 200))
+        A[-1,0] = 1
+        B0 = expm(A)
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "the matrix subclass.*")
+            sup.filter(PendingDeprecationWarning, "the matrix subclass.*")
+            B = expm(np.matrix(A))
+        assert_allclose(B, B0)
+
+    def test_exp_sinch_overflow(self):
+        # Check overflow in intermediate steps is fixed (gh-11839)
+        L = np.array([[1.0, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0],
+                      [0.0, 1.0, 0.0, -0.5, -0.5, 0.0, 0.0],
+                      [0.0, 0.0, 1.0, 0.0, 0.0, -0.5, -0.5],
+                      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+                      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+                      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+                      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
+
+        E0 = expm(-L)
+        E1 = expm(-2**11 * L)
+        E2 = E0
+        for j in range(11):
+            E2 = E2 @ E2
+
+        assert_allclose(E1, E2)
+
+
+class TestOperators:
+
+    def test_product_operator(self):
+        random.seed(1234)
+        n = 5
+        k = 2
+        nsamples = 10
+        for i in range(nsamples):
+            A = np.random.randn(n, n)
+            B = np.random.randn(n, n)
+            C = np.random.randn(n, n)
+            D = np.random.randn(n, k)
+            op = ProductOperator(A, B, C)
+            assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))
+            assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))
+
+    def test_matrix_power_operator(self):
+        random.seed(1234)
+        n = 5
+        k = 2
+        p = 3
+        nsamples = 10
+        for i in range(nsamples):
+            A = np.random.randn(n, n)
+            B = np.random.randn(n, k)
+            op = MatrixPowerOperator(A, p)
+            assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))
+            assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_norm.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_norm.py
new file mode 100644
index 00000000..96c2f65d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_norm.py
@@ -0,0 +1,141 @@
+"""Test functions for the sparse.linalg.norm module
+"""
+
+import pytest
+import numpy as np
+from numpy.linalg import norm as npnorm
+from numpy.testing import assert_allclose, assert_equal
+from pytest import raises as assert_raises
+
+import scipy.sparse
+from scipy.sparse.linalg import norm as spnorm
+
+
+# https://github.com/scipy/scipy/issues/16031
+def test_sparray_norm():
+    row = np.array([0, 0, 1, 1])
+    col = np.array([0, 1, 2, 3])
+    data = np.array([4, 5, 7, 9])
+    test_arr = scipy.sparse.coo_array((data, (row, col)), shape=(2, 4))
+    test_mat = scipy.sparse.coo_matrix((data, (row, col)), shape=(2, 4))
+    assert_equal(spnorm(test_arr, ord=1, axis=0), np.array([4, 5, 7, 9]))
+    assert_equal(spnorm(test_mat, ord=1, axis=0), np.array([4, 5, 7, 9]))
+    assert_equal(spnorm(test_arr, ord=1, axis=1), np.array([9, 16]))
+    assert_equal(spnorm(test_mat, ord=1, axis=1), np.array([9, 16]))
+
+
+class TestNorm:
+    def setup_method(self):
+        a = np.arange(9) - 4
+        b = a.reshape((3, 3))
+        self.b = scipy.sparse.csr_matrix(b)
+
+    def test_matrix_norm(self):
+
+        # Frobenius norm is the default
+        assert_allclose(spnorm(self.b), 7.745966692414834)        
+        assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834)
+
+        assert_allclose(spnorm(self.b, np.inf), 9)
+        assert_allclose(spnorm(self.b, -np.inf), 2)
+        assert_allclose(spnorm(self.b, 1), 7)
+        assert_allclose(spnorm(self.b, -1), 6)
+        # Only floating or complex floating dtype supported by svds.
+        with pytest.warns(UserWarning, match="The problem size"):
+            assert_allclose(spnorm(self.b.astype(np.float64), 2),
+                            7.348469228349534)
+
+        # _multi_svd_norm is not implemented for sparse matrix
+        assert_raises(NotImplementedError, spnorm, self.b, -2)
+
+    def test_matrix_norm_axis(self):
+        for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))):
+            assert_allclose(spnorm(m, axis=axis), 7.745966692414834)        
+            assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834)
+            assert_allclose(spnorm(m, np.inf, axis=axis), 9)
+            assert_allclose(spnorm(m, -np.inf, axis=axis), 2)
+            assert_allclose(spnorm(m, 1, axis=axis), 7)
+            assert_allclose(spnorm(m, -1, axis=axis), 6)
+
+    def test_vector_norm(self):
+        v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398]
+        for m, a in (self.b, 0), (self.b.T, 1):
+            for axis in a, (a, ), a-2, (a-2, ):
+                assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7])
+                assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4])
+                assert_allclose(spnorm(m, axis=axis), v)
+                assert_allclose(spnorm(m, ord=2, axis=axis), v)
+                assert_allclose(spnorm(m, ord=None, axis=axis), v)
+
+    def test_norm_exceptions(self):
+        m = self.b
+        assert_raises(TypeError, spnorm, m, None, 1.5)
+        assert_raises(TypeError, spnorm, m, None, [2])
+        assert_raises(ValueError, spnorm, m, None, ())
+        assert_raises(ValueError, spnorm, m, None, (0, 1, 2))
+        assert_raises(ValueError, spnorm, m, None, (0, 0))
+        assert_raises(ValueError, spnorm, m, None, (0, 2))
+        assert_raises(ValueError, spnorm, m, None, (-3, 0))
+        assert_raises(ValueError, spnorm, m, None, 2)
+        assert_raises(ValueError, spnorm, m, None, -3)
+        assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0)
+        assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1))
+
+
+class TestVsNumpyNorm:
+    _sparse_types = (
+            scipy.sparse.bsr_matrix,
+            scipy.sparse.coo_matrix,
+            scipy.sparse.csc_matrix,
+            scipy.sparse.csr_matrix,
+            scipy.sparse.dia_matrix,
+            scipy.sparse.dok_matrix,
+            scipy.sparse.lil_matrix,
+            )
+    _test_matrices = (
+            (np.arange(9) - 4).reshape((3, 3)),
+            [
+                [1, 2, 3],
+                [-1, 1, 4]],
+            [
+                [1, 0, 3],
+                [-1, 1, 4j]],
+            )
+
+    def test_sparse_matrix_norms(self):
+        for sparse_type in self._sparse_types:
+            for M in self._test_matrices:
+                S = sparse_type(M)
+                assert_allclose(spnorm(S), npnorm(M))
+                assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
+                assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
+                assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
+                assert_allclose(spnorm(S, 1), npnorm(M, 1))
+                assert_allclose(spnorm(S, -1), npnorm(M, -1))
+
+    def test_sparse_matrix_norms_with_axis(self):
+        for sparse_type in self._sparse_types:
+            for M in self._test_matrices:
+                S = sparse_type(M)
+                for axis in None, (0, 1), (1, 0):
+                    assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
+                    for ord in 'fro', np.inf, -np.inf, 1, -1:
+                        assert_allclose(spnorm(S, ord, axis=axis),
+                                        npnorm(M, ord, axis=axis))
+                # Some numpy matrix norms are allergic to negative axes.
+                for axis in (-2, -1), (-1, -2), (1, -2):
+                    assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
+                    assert_allclose(spnorm(S, 'f', axis=axis),
+                                    npnorm(M, 'f', axis=axis))
+                    assert_allclose(spnorm(S, 'fro', axis=axis),
+                                    npnorm(M, 'fro', axis=axis))
+
+    def test_sparse_vector_norms(self):
+        for sparse_type in self._sparse_types:
+            for M in self._test_matrices:
+                S = sparse_type(M)
+                for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )):
+                    assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
+                    for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
+                        assert_allclose(spnorm(S, ord, axis=axis),
+                                        npnorm(M, ord, axis=axis))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_onenormest.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_onenormest.py
new file mode 100644
index 00000000..655dba3d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_onenormest.py
@@ -0,0 +1,252 @@
+"""Test functions for the sparse.linalg._onenormest module
+"""
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, assert_
+import pytest
+import scipy.linalg
+import scipy.sparse.linalg
+from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2
+
+
+class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
+    """
+    This is purely for onenormest testing.
+    """
+
+    def __init__(self, A, B):
+        if A.ndim != 2 or B.ndim != 2:
+            raise ValueError('expected ndarrays representing matrices')
+        if A.shape[1] != B.shape[0]:
+            raise ValueError('incompatible shapes')
+        self.A = A
+        self.B = B
+        self.ndim = 2
+        self.shape = (A.shape[0], B.shape[1])
+
+    def _matvec(self, x):
+        return np.dot(self.A, np.dot(self.B, x))
+
+    def _rmatvec(self, x):
+        return np.dot(np.dot(x, self.A), self.B)
+
+    def _matmat(self, X):
+        return np.dot(self.A, np.dot(self.B, X))
+
+    @property
+    def T(self):
+        return MatrixProductOperator(self.B.T, self.A.T)
+
+
+class TestOnenormest:
+
+    @pytest.mark.xslow
+    def test_onenormest_table_3_t_2(self):
+        # This will take multiple seconds if your computer is slow like mine.
+        # It is stochastic, so the tolerance could be too strict.
+        np.random.seed(1234)
+        t = 2
+        n = 100
+        itmax = 5
+        nsamples = 5000
+        observed = []
+        expected = []
+        nmult_list = []
+        nresample_list = []
+        for i in range(nsamples):
+            A = scipy.linalg.inv(np.random.randn(n, n))
+            est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
+            observed.append(est)
+            expected.append(scipy.linalg.norm(A, 1))
+            nmult_list.append(nmults)
+            nresample_list.append(nresamples)
+        observed = np.array(observed, dtype=float)
+        expected = np.array(expected, dtype=float)
+        relative_errors = np.abs(observed - expected) / expected
+
+        # check the mean underestimation ratio
+        underestimation_ratio = observed / expected
+        assert_(0.99 < np.mean(underestimation_ratio) < 1.0)
+
+        # check the max and mean required column resamples
+        assert_equal(np.max(nresample_list), 2)
+        assert_(0.05 < np.mean(nresample_list) < 0.2)
+
+        # check the proportion of norms computed exactly correctly
+        nexact = np.count_nonzero(relative_errors < 1e-14)
+        proportion_exact = nexact / float(nsamples)
+        assert_(0.9 < proportion_exact < 0.95)
+
+        # check the average number of matrix*vector multiplications
+        assert_(3.5 < np.mean(nmult_list) < 4.5)
+
+    @pytest.mark.xslow
+    def test_onenormest_table_4_t_7(self):
+        # This will take multiple seconds if your computer is slow like mine.
+        # It is stochastic, so the tolerance could be too strict.
+        np.random.seed(1234)
+        t = 7
+        n = 100
+        itmax = 5
+        nsamples = 5000
+        observed = []
+        expected = []
+        nmult_list = []
+        nresample_list = []
+        for i in range(nsamples):
+            A = np.random.randint(-1, 2, size=(n, n))
+            est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
+            observed.append(est)
+            expected.append(scipy.linalg.norm(A, 1))
+            nmult_list.append(nmults)
+            nresample_list.append(nresamples)
+        observed = np.array(observed, dtype=float)
+        expected = np.array(expected, dtype=float)
+        relative_errors = np.abs(observed - expected) / expected
+
+        # check the mean underestimation ratio
+        underestimation_ratio = observed / expected
+        assert_(0.90 < np.mean(underestimation_ratio) < 0.99)
+
+        # check the required column resamples
+        assert_equal(np.max(nresample_list), 0)
+
+        # check the proportion of norms computed exactly correctly
+        nexact = np.count_nonzero(relative_errors < 1e-14)
+        proportion_exact = nexact / float(nsamples)
+        assert_(0.15 < proportion_exact < 0.25)
+
+        # check the average number of matrix*vector multiplications
+        assert_(3.5 < np.mean(nmult_list) < 4.5)
+
+    def test_onenormest_table_5_t_1(self):
+        # "note that there is no randomness and hence only one estimate for t=1"
+        t = 1
+        n = 100
+        itmax = 5
+        alpha = 1 - 1e-6
+        A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1))
+        first_col = np.array([1] + [0]*(n-1))
+        first_row = np.array([(-alpha)**i for i in range(n)])
+        B = -scipy.linalg.toeplitz(first_col, first_row)
+        assert_allclose(A, B)
+        est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax)
+        exact_value = scipy.linalg.norm(B, 1)
+        underest_ratio = est / exact_value
+        assert_allclose(underest_ratio, 0.05, rtol=1e-4)
+        assert_equal(nmults, 11)
+        assert_equal(nresamples, 0)
+        # check the non-underscored version of onenormest
+        est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax)
+        assert_allclose(est, est_plain)
+
+    @pytest.mark.xslow
+    def test_onenormest_table_6_t_1(self):
+        #TODO this test seems to give estimates that match the table,
+        #TODO even though no attempt has been made to deal with
+        #TODO complex numbers in the one-norm estimation.
+        # This will take multiple seconds if your computer is slow like mine.
+        # It is stochastic, so the tolerance could be too strict.
+        np.random.seed(1234)
+        t = 1
+        n = 100
+        itmax = 5
+        nsamples = 5000
+        observed = []
+        expected = []
+        nmult_list = []
+        nresample_list = []
+        for i in range(nsamples):
+            A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n)
+            A = scipy.linalg.inv(A_inv)
+            est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
+            observed.append(est)
+            expected.append(scipy.linalg.norm(A, 1))
+            nmult_list.append(nmults)
+            nresample_list.append(nresamples)
+        observed = np.array(observed, dtype=float)
+        expected = np.array(expected, dtype=float)
+        relative_errors = np.abs(observed - expected) / expected
+
+        # check the mean underestimation ratio
+        underestimation_ratio = observed / expected
+        underestimation_ratio_mean = np.mean(underestimation_ratio)
+        assert_(0.90 < underestimation_ratio_mean < 0.99)
+
+        # check the required column resamples
+        max_nresamples = np.max(nresample_list)
+        assert_equal(max_nresamples, 0)
+
+        # check the proportion of norms computed exactly correctly
+        nexact = np.count_nonzero(relative_errors < 1e-14)
+        proportion_exact = nexact / float(nsamples)
+        assert_(0.7 < proportion_exact < 0.8)
+
+        # check the average number of matrix*vector multiplications
+        mean_nmult = np.mean(nmult_list)
+        assert_(4 < mean_nmult < 5)
+
+    def _help_product_norm_slow(self, A, B):
+        # for profiling
+        C = np.dot(A, B)
+        return scipy.linalg.norm(C, 1)
+
+    def _help_product_norm_fast(self, A, B):
+        # for profiling
+        t = 2
+        itmax = 5
+        D = MatrixProductOperator(A, B)
+        est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax)
+        return est
+
+    @pytest.mark.slow
+    def test_onenormest_linear_operator(self):
+        # Define a matrix through its product A B.
+        # Depending on the shapes of A and B,
+        # it could be easy to multiply this product by a small matrix,
+        # but it could be annoying to look at all of
+        # the entries of the product explicitly.
+        np.random.seed(1234)
+        n = 6000
+        k = 3
+        A = np.random.randn(n, k)
+        B = np.random.randn(k, n)
+        fast_estimate = self._help_product_norm_fast(A, B)
+        exact_value = self._help_product_norm_slow(A, B)
+        assert_(fast_estimate <= exact_value <= 3*fast_estimate,
+                'fast: %g\nexact:%g' % (fast_estimate, exact_value))
+
+    def test_returns(self):
+        np.random.seed(1234)
+        A = scipy.sparse.rand(50, 50, 0.1)
+
+        s0 = scipy.linalg.norm(A.toarray(), 1)
+        s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True)
+        s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True)
+        s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True)
+
+        assert_allclose(s1, s0, rtol=1e-9)
+        assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9)
+        assert_allclose(A.dot(v), w, rtol=1e-9)
+
+
+class TestAlgorithm_2_2:
+
+    def test_randn_inv(self):
+        np.random.seed(1234)
+        n = 20
+        nsamples = 100
+        for i in range(nsamples):
+
+            # Choose integer t uniformly between 1 and 3 inclusive.
+            t = np.random.randint(1, 4)
+
+            # Choose n uniformly between 10 and 40 inclusive.
+            n = np.random.randint(10, 41)
+
+            # Sample the inverse of a matrix with random normal entries.
+            A = scipy.linalg.inv(np.random.randn(n, n))
+
+            # Compute the 1-norm bounds.
+            g, ind = _algorithm_2_2(A, A.T, t)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_propack.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_propack.py
new file mode 100644
index 00000000..f41c8235
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_propack.py
@@ -0,0 +1,187 @@
+import os
+import pytest
+import sys
+
+import numpy as np
+from numpy.testing import assert_allclose
+from pytest import raises as assert_raises
+from scipy.sparse.linalg._svdp import _svdp
+from scipy.sparse import csr_matrix, csc_matrix
+
+
+# dtype_flavour to tolerance
+TOLS = {
+    np.float32: 1e-4,
+    np.float64: 1e-8,
+    np.complex64: 1e-4,
+    np.complex128: 1e-8,
+}
+
+
+def is_complex_type(dtype):
+    return np.dtype(dtype).kind == "c"
+
+
+def is_32bit():
+    return sys.maxsize <= 2**32  # (usually 2**31-1 on 32-bit)
+
+
+def is_windows():
+    return 'win32' in sys.platform
+
+
+_dtypes = []
+for dtype_flavour in TOLS.keys():
+    marks = []
+    if is_complex_type(dtype_flavour):
+        if is_32bit():
+            # PROPACK has issues w/ complex on 32-bit; see gh-14433
+            marks = [pytest.mark.skip]
+        elif is_windows() and np.dtype(dtype_flavour).itemsize == 16:
+            # windows crashes for complex128 (so don't xfail); see gh-15108
+            marks = [pytest.mark.skip]
+        else:
+            marks = [pytest.mark.slow]  # type: ignore[list-item]
+    _dtypes.append(pytest.param(dtype_flavour, marks=marks,
+                                id=dtype_flavour.__name__))
+_dtypes = tuple(_dtypes)  # type: ignore[assignment]
+
+
+def generate_matrix(constructor, n, m, f,
+                    dtype=float, rseed=0, **kwargs):
+    """Generate a random sparse matrix"""
+    rng = np.random.RandomState(rseed)
+    if is_complex_type(dtype):
+        M = (- 5 + 10 * rng.rand(n, m)
+             - 5j + 10j * rng.rand(n, m)).astype(dtype)
+    else:
+        M = (-5 + 10 * rng.rand(n, m)).astype(dtype)
+    M[M.real > 10 * f - 5] = 0
+    return constructor(M, **kwargs)
+
+
+def assert_orthogonal(u1, u2, rtol, atol):
+    """Check that the first k rows of u1 and u2 are orthogonal"""
+    A = abs(np.dot(u1.conj().T, u2))
+    assert_allclose(A, np.eye(u1.shape[1], u2.shape[1]), rtol=rtol, atol=atol)
+
+
+def check_svdp(n, m, constructor, dtype, k, irl_mode, which, f=0.8):
+    tol = TOLS[dtype]
+
+    M = generate_matrix(np.asarray, n, m, f, dtype)
+    Msp = constructor(M)
+
+    u1, sigma1, vt1 = np.linalg.svd(M, full_matrices=False)
+    u2, sigma2, vt2, _ = _svdp(Msp, k=k, which=which, irl_mode=irl_mode,
+                               tol=tol)
+
+    # check the which
+    if which.upper() == 'SM':
+        u1 = np.roll(u1, k, 1)
+        vt1 = np.roll(vt1, k, 0)
+        sigma1 = np.roll(sigma1, k)
+
+    # check that singular values agree
+    assert_allclose(sigma1[:k], sigma2, rtol=tol, atol=tol)
+
+    # check that singular vectors are orthogonal
+    assert_orthogonal(u1, u2, rtol=tol, atol=tol)
+    assert_orthogonal(vt1.T, vt2.T, rtol=tol, atol=tol)
+
+
+@pytest.mark.parametrize('ctor', (np.array, csr_matrix, csc_matrix))
+@pytest.mark.parametrize('dtype', _dtypes)
+@pytest.mark.parametrize('irl', (True, False))
+@pytest.mark.parametrize('which', ('LM', 'SM'))
+def test_svdp(ctor, dtype, irl, which):
+    np.random.seed(0)
+    n, m, k = 10, 20, 3
+    if which == 'SM' and not irl:
+        message = "`which`='SM' requires irl_mode=True"
+        with assert_raises(ValueError, match=message):
+            check_svdp(n, m, ctor, dtype, k, irl, which)
+    else:
+        if is_32bit() and is_complex_type(dtype):
+            message = 'PROPACK complex-valued SVD methods not available '
+            with assert_raises(TypeError, match=message):
+                check_svdp(n, m, ctor, dtype, k, irl, which)
+        else:
+            check_svdp(n, m, ctor, dtype, k, irl, which)
+
+
+@pytest.mark.parametrize('dtype', _dtypes)
+@pytest.mark.parametrize('irl', (False, True))
+@pytest.mark.timeout(120)  # True, complex64 > 60 s: prerel deps cov 64bit blas
+def test_examples(dtype, irl):
+    # Note: atol for complex64 bumped from 1e-4 to 1e-3 due to test failures
+    # with BLIS, Netlib, and MKL+AVX512 - see
+    # https://github.com/conda-forge/scipy-feedstock/pull/198#issuecomment-999180432
+    atol = {
+        np.float32: 1.3e-4,
+        np.float64: 1e-9,
+        np.complex64: 1e-3,
+        np.complex128: 1e-9,
+    }[dtype]
+
+    path_prefix = os.path.dirname(__file__)
+    # Test matrices from `illc1850.coord` and `mhd1280b.cua` distributed with
+    # PROPACK 2.1: http://sun.stanford.edu/~rmunk/PROPACK/
+    relative_path = "propack_test_data.npz"
+    filename = os.path.join(path_prefix, relative_path)
+    data = np.load(filename, allow_pickle=True)
+
+    if is_complex_type(dtype):
+        A = data['A_complex'].item().astype(dtype)
+    else:
+        A = data['A_real'].item().astype(dtype)
+
+    k = 200
+    u, s, vh, _ = _svdp(A, k, irl_mode=irl, random_state=0)
+
+    # complex example matrix has many repeated singular values, so check only
+    # beginning non-repeated singular vectors to avoid permutations
+    sv_check = 27 if is_complex_type(dtype) else k
+    u = u[:, :sv_check]
+    vh = vh[:sv_check, :]
+    s = s[:sv_check]
+
+    # Check orthogonality of singular vectors
+    assert_allclose(np.eye(u.shape[1]), u.conj().T @ u, atol=atol)
+    assert_allclose(np.eye(vh.shape[0]), vh @ vh.conj().T, atol=atol)
+
+    # Ensure the norm of the difference between the np.linalg.svd and
+    # PROPACK reconstructed matrices is small
+    u3, s3, vh3 = np.linalg.svd(A.todense())
+    u3 = u3[:, :sv_check]
+    s3 = s3[:sv_check]
+    vh3 = vh3[:sv_check, :]
+    A3 = u3 @ np.diag(s3) @ vh3
+    recon = u @ np.diag(s) @ vh
+    assert_allclose(np.linalg.norm(A3 - recon), 0, atol=atol)
+
+
+@pytest.mark.parametrize('shifts', (None, -10, 0, 1, 10, 70))
+@pytest.mark.parametrize('dtype', _dtypes[:2])
+def test_shifts(shifts, dtype):
+    np.random.seed(0)
+    n, k = 70, 10
+    A = np.random.random((n, n))
+    if shifts is not None and ((shifts < 0) or (k > min(n-1-shifts, n))):
+        with pytest.raises(ValueError):
+            _svdp(A, k, shifts=shifts, kmax=5*k, irl_mode=True)
+    else:
+        _svdp(A, k, shifts=shifts, kmax=5*k, irl_mode=True)
+
+
+@pytest.mark.slow
+@pytest.mark.xfail()
+def test_shifts_accuracy():
+    np.random.seed(0)
+    n, k = 70, 10
+    A = np.random.random((n, n)).astype(np.double)
+    u1, s1, vt1, _ = _svdp(A, k, shifts=None, which='SM', irl_mode=True)
+    u2, s2, vt2, _ = _svdp(A, k, shifts=32, which='SM', irl_mode=True)
+    # shifts <= 32 doesn't agree with shifts > 32
+    # Does agree when which='LM' instead of 'SM'
+    assert_allclose(s1, s2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_pydata_sparse.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_pydata_sparse.py
new file mode 100644
index 00000000..9d45ff51
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/linalg/tests/test_pydata_sparse.py
@@ -0,0 +1,241 @@
+import pytest
+
+import numpy as np
+import scipy.sparse as sp
+import scipy.sparse.linalg as splin
+
+from numpy.testing import assert_allclose, assert_equal
+
+try:
+    import sparse
+except Exception:
+    sparse = None
+
+pytestmark = pytest.mark.skipif(sparse is None,
+                                reason="pydata/sparse not installed")
+
+
+msg = "pydata/sparse (0.8) does not implement necessary operations"
+
+
+sparse_params = (pytest.param("COO"),
+                 pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)]))
+
+scipy_sparse_classes = [
+    sp.bsr_matrix,
+    sp.csr_matrix,
+    sp.coo_matrix,
+    sp.csc_matrix,
+    sp.dia_matrix,
+    sp.dok_matrix
+]
+
+
+@pytest.fixture(params=sparse_params)
+def sparse_cls(request):
+    return getattr(sparse, request.param)
+
+
+@pytest.fixture(params=scipy_sparse_classes)
+def sp_sparse_cls(request):
+    return request.param
+
+
+@pytest.fixture
+def same_matrix(sparse_cls, sp_sparse_cls):
+    np.random.seed(1234)
+    A_dense = np.random.rand(9, 9)
+    return sp_sparse_cls(A_dense), sparse_cls(A_dense)
+
+
+@pytest.fixture
+def matrices(sparse_cls):
+    np.random.seed(1234)
+    A_dense = np.random.rand(9, 9)
+    A_dense = A_dense @ A_dense.T
+    A_sparse = sparse_cls(A_dense)
+    b = np.random.rand(9)
+    return A_dense, A_sparse, b
+
+
+def test_isolve_gmres(matrices):
+    # Several of the iterative solvers use the same
+    # isolve.utils.make_system wrapper code, so test just one of them.
+    A_dense, A_sparse, b = matrices
+    x, info = splin.gmres(A_sparse, b, atol=1e-15)
+    assert info == 0
+    assert isinstance(x, np.ndarray)
+    assert_allclose(A_sparse @ x, b)
+
+
+def test_lsmr(matrices):
+    A_dense, A_sparse, b = matrices
+    res0 = splin.lsmr(A_dense, b)
+    res = splin.lsmr(A_sparse, b)
+    assert_allclose(res[0], res0[0], atol=1.8e-5)
+
+
+# test issue 17012
+def test_lsmr_output_shape():
+    x = splin.lsmr(A=np.ones((10, 1)), b=np.zeros(10), x0=np.ones(1))[0]
+    assert_equal(x.shape, (1,))
+
+
+def test_lsqr(matrices):
+    A_dense, A_sparse, b = matrices
+    res0 = splin.lsqr(A_dense, b)
+    res = splin.lsqr(A_sparse, b)
+    assert_allclose(res[0], res0[0], atol=1e-5)
+
+
+def test_eigs(matrices):
+    A_dense, A_sparse, v0 = matrices
+
+    M_dense = np.diag(v0**2)
+    M_sparse = A_sparse.__class__(M_dense)
+
+    w_dense, v_dense = splin.eigs(A_dense, k=3, v0=v0)
+    w, v = splin.eigs(A_sparse, k=3, v0=v0)
+
+    assert_allclose(w, w_dense)
+    assert_allclose(v, v_dense)
+
+    for M in [M_sparse, M_dense]:
+        w_dense, v_dense = splin.eigs(A_dense, M=M_dense, k=3, v0=v0)
+        w, v = splin.eigs(A_sparse, M=M, k=3, v0=v0)
+
+        assert_allclose(w, w_dense)
+        assert_allclose(v, v_dense)
+
+        w_dense, v_dense = splin.eigsh(A_dense, M=M_dense, k=3, v0=v0)
+        w, v = splin.eigsh(A_sparse, M=M, k=3, v0=v0)
+
+        assert_allclose(w, w_dense)
+        assert_allclose(v, v_dense)
+
+
+def test_svds(matrices):
+    A_dense, A_sparse, v0 = matrices
+
+    u0, s0, vt0 = splin.svds(A_dense, k=2, v0=v0)
+    u, s, vt = splin.svds(A_sparse, k=2, v0=v0)
+
+    assert_allclose(s, s0)
+    assert_allclose(u, u0)
+    assert_allclose(vt, vt0)
+
+
+def test_lobpcg(matrices):
+    A_dense, A_sparse, x = matrices
+    X = x[:,None]
+
+    w_dense, v_dense = splin.lobpcg(A_dense, X)
+    w, v = splin.lobpcg(A_sparse, X)
+
+    assert_allclose(w, w_dense)
+    assert_allclose(v, v_dense)
+
+
+def test_spsolve(matrices):
+    A_dense, A_sparse, b = matrices
+    b2 = np.random.rand(len(b), 3)
+
+    x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
+    x = splin.spsolve(A_sparse, b)
+    assert isinstance(x, np.ndarray)
+    assert_allclose(x, x0)
+
+    x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
+    x = splin.spsolve(A_sparse, b, use_umfpack=True)
+    assert isinstance(x, np.ndarray)
+    assert_allclose(x, x0)
+
+    x0 = splin.spsolve(sp.csc_matrix(A_dense), b2)
+    x = splin.spsolve(A_sparse, b2)
+    assert isinstance(x, np.ndarray)
+    assert_allclose(x, x0)
+
+    x0 = splin.spsolve(sp.csc_matrix(A_dense),
+                       sp.csc_matrix(A_dense))
+    x = splin.spsolve(A_sparse, A_sparse)
+    assert isinstance(x, type(A_sparse))
+    assert_allclose(x.toarray(), x0.toarray())
+
+
+def test_splu(matrices):
+    A_dense, A_sparse, b = matrices
+    n = len(b)
+    sparse_cls = type(A_sparse)
+
+    lu = splin.splu(A_sparse)
+
+    assert isinstance(lu.L, sparse_cls)
+    assert isinstance(lu.U, sparse_cls)
+
+    Pr = sparse_cls(sp.csc_matrix((np.ones(n), (lu.perm_r, np.arange(n)))))
+    Pc = sparse_cls(sp.csc_matrix((np.ones(n), (np.arange(n), lu.perm_c))))
+    A2 = Pr.T @ lu.L @ lu.U @ Pc.T
+
+    assert_allclose(A2.toarray(), A_sparse.toarray())
+
+    z = lu.solve(A_sparse.toarray())
+    assert_allclose(z, np.eye(n), atol=1e-10)
+
+
+def test_spilu(matrices):
+    A_dense, A_sparse, b = matrices
+    sparse_cls = type(A_sparse)
+
+    lu = splin.spilu(A_sparse)
+
+    assert isinstance(lu.L, sparse_cls)
+    assert isinstance(lu.U, sparse_cls)
+
+    z = lu.solve(A_sparse.toarray())
+    assert_allclose(z, np.eye(len(b)), atol=1e-3)
+
+
+def test_spsolve_triangular(matrices):
+    A_dense, A_sparse, b = matrices
+    A_sparse = sparse.tril(A_sparse)
+
+    x = splin.spsolve_triangular(A_sparse, b)
+    assert_allclose(A_sparse @ x, b)
+
+
+def test_onenormest(matrices):
+    A_dense, A_sparse, b = matrices
+    est0 = splin.onenormest(A_dense)
+    est = splin.onenormest(A_sparse)
+    assert_allclose(est, est0)
+
+
+def test_inv(matrices):
+    A_dense, A_sparse, b = matrices
+    x0 = splin.inv(sp.csc_matrix(A_dense))
+    x = splin.inv(A_sparse)
+    assert_allclose(x.toarray(), x0.toarray())
+
+
+def test_expm(matrices):
+    A_dense, A_sparse, b = matrices
+    x0 = splin.expm(sp.csc_matrix(A_dense))
+    x = splin.expm(A_sparse)
+    assert_allclose(x.toarray(), x0.toarray())
+
+
+def test_expm_multiply(matrices):
+    A_dense, A_sparse, b = matrices
+    x0 = splin.expm_multiply(A_dense, b)
+    x = splin.expm_multiply(A_sparse, b)
+    assert_allclose(x, x0)
+
+
+def test_eq(same_matrix):
+    sp_sparse, pd_sparse = same_matrix
+    assert (sp_sparse == pd_sparse).all()
+
+
+def test_ne(same_matrix):
+    sp_sparse, pd_sparse = same_matrix
+    assert not (sp_sparse != pd_sparse).any()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/sparsetools.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/sparsetools.py
new file mode 100644
index 00000000..aba16eda
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/sparsetools.py
@@ -0,0 +1,106 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _sparsetools
+
+
+__all__ = [  # noqa: F822
+    'bsr_diagonal',
+    'bsr_eldiv_bsr',
+    'bsr_elmul_bsr',
+    'bsr_ge_bsr',
+    'bsr_gt_bsr',
+    'bsr_le_bsr',
+    'bsr_lt_bsr',
+    'bsr_matmat',
+    'bsr_matvec',
+    'bsr_matvecs',
+    'bsr_maximum_bsr',
+    'bsr_minimum_bsr',
+    'bsr_minus_bsr',
+    'bsr_ne_bsr',
+    'bsr_plus_bsr',
+    'bsr_scale_columns',
+    'bsr_scale_rows',
+    'bsr_sort_indices',
+    'bsr_tocsr',
+    'bsr_transpose',
+    'coo_matvec',
+    'coo_tocsr',
+    'coo_todense',
+    'cs_graph_components',
+    'csc_diagonal',
+    'csc_eldiv_csc',
+    'csc_elmul_csc',
+    'csc_ge_csc',
+    'csc_gt_csc',
+    'csc_le_csc',
+    'csc_lt_csc',
+    'csc_matmat',
+    'csc_matmat_maxnnz',
+    'csc_matvec',
+    'csc_matvecs',
+    'csc_maximum_csc',
+    'csc_minimum_csc',
+    'csc_minus_csc',
+    'csc_ne_csc',
+    'csc_plus_csc',
+    'csc_tocsr',
+    'csr_column_index1',
+    'csr_column_index2',
+    'csr_count_blocks',
+    'csr_diagonal',
+    'csr_eldiv_csr',
+    'csr_eliminate_zeros',
+    'csr_elmul_csr',
+    'csr_ge_csr',
+    'csr_gt_csr',
+    'csr_has_canonical_format',
+    'csr_has_sorted_indices',
+    'csr_hstack',
+    'csr_le_csr',
+    'csr_lt_csr',
+    'csr_matmat',
+    'csr_matmat_maxnnz',
+    'csr_matvec',
+    'csr_matvecs',
+    'csr_maximum_csr',
+    'csr_minimum_csr',
+    'csr_minus_csr',
+    'csr_ne_csr',
+    'csr_plus_csr',
+    'csr_row_index',
+    'csr_row_slice',
+    'csr_sample_offsets',
+    'csr_sample_values',
+    'csr_scale_columns',
+    'csr_scale_rows',
+    'csr_sort_indices',
+    'csr_sum_duplicates',
+    'csr_tobsr',
+    'csr_tocsc',
+    'csr_todense',
+    'dia_matvec',
+    'expandptr',
+    'get_csr_submatrix',
+    'test_throw_error',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.sparsetools is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.sparsetools` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_sparsetools, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/spfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/spfuncs.py
new file mode 100644
index 00000000..9c65bfd4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/spfuncs.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _spfuncs
+
+
+__all__ = [  # noqa: F822
+    'isspmatrix_csr', 'csr_matrix', 'isspmatrix_csc', 'csr_count_blocks',
+    'estimate_blocksize', 'count_blocks'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.spfuncs is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.spfuncs` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_spfuncs, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/sputils.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/sputils.py
new file mode 100644
index 00000000..ed96d653
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/sputils.py
@@ -0,0 +1,52 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.sparse` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _sputils
+
+
+__all__ = [  # noqa: F822
+    'asmatrix',
+    'check_reshape_kwargs',
+    'check_shape',
+    'downcast_intp_index',
+    'get_index_dtype',
+    'get_sum_dtype',
+    'getdata',
+    'getdtype',
+    'is_pydata_spmatrix',
+    'isdense',
+    'isintlike',
+    'ismatrix',
+    'isscalarlike',
+    'issequence',
+    'isshape',
+    'matrix',
+    'operator',
+    'prod',
+    'supported_dtypes',
+    'sys',
+    'to_native',
+    'upcast',
+    'upcast_char',
+    'upcast_scalar',
+    'validateaxis',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.sparse.sputils is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.sparse instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.sparse` namespace, "
+                  "the `scipy.sparse.sputils` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_sputils, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py2.npz b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py2.npz
new file mode 100644
index 00000000..83ee2575
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py2.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py3.npz b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py3.npz
new file mode 100644
index 00000000..73d086fd
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/data/csc_py3.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_array_api.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_array_api.py
new file mode 100644
index 00000000..a074cae7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_array_api.py
@@ -0,0 +1,339 @@
+import pytest
+import numpy as np
+import numpy.testing as npt
+import scipy.sparse
+import scipy.sparse.linalg as spla
+
+sparray_types = ('bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil')
+
+sparray_classes = [
+    getattr(scipy.sparse, f'{T}_array') for T in sparray_types
+]
+
+A = np.array([
+    [0, 1, 2, 0],
+    [2, 0, 0, 3],
+    [1, 4, 0, 0]
+])
+
+B = np.array([
+    [0, 1],
+    [2, 0]
+])
+
+X = np.array([
+    [1, 0, 0, 1],
+    [2, 1, 2, 0],
+    [0, 2, 1, 0],
+    [0, 0, 1, 2]
+], dtype=float)
+
+
+sparrays = [sparray(A) for sparray in sparray_classes]
+square_sparrays = [sparray(B) for sparray in sparray_classes]
+eig_sparrays = [sparray(X) for sparray in sparray_classes]
+
+parametrize_sparrays = pytest.mark.parametrize(
+    "A", sparrays, ids=sparray_types
+)
+parametrize_square_sparrays = pytest.mark.parametrize(
+    "B", square_sparrays, ids=sparray_types
+)
+parametrize_eig_sparrays = pytest.mark.parametrize(
+    "X", eig_sparrays, ids=sparray_types
+)
+
+
+@parametrize_sparrays
+def test_sum(A):
+    assert not isinstance(A.sum(axis=0), np.matrix), \
+        "Expected array, got matrix"
+    assert A.sum(axis=0).shape == (4,)
+    assert A.sum(axis=1).shape == (3,)
+
+
+@parametrize_sparrays
+def test_mean(A):
+    assert not isinstance(A.mean(axis=1), np.matrix), \
+        "Expected array, got matrix"
+
+
+@parametrize_sparrays
+def test_todense(A):
+    assert not isinstance(A.todense(), np.matrix), \
+        "Expected array, got matrix"
+
+
+@parametrize_sparrays
+def test_indexing(A):
+    if A.__class__.__name__[:3] in ('dia', 'coo', 'bsr'):
+        return
+
+    with pytest.raises(NotImplementedError):
+        A[1, :]
+
+    with pytest.raises(NotImplementedError):
+        A[:, 1]
+
+    with pytest.raises(NotImplementedError):
+        A[1, [1, 2]]
+
+    with pytest.raises(NotImplementedError):
+        A[[1, 2], 1]
+
+    assert A[[0]]._is_array, "Expected sparse array, got sparse matrix"
+    assert A[1, [[1, 2]]]._is_array, "Expected ndarray, got sparse array"
+    assert A[[[1, 2]], 1]._is_array, "Expected ndarray, got sparse array"
+    assert A[:, [1, 2]]._is_array, "Expected sparse array, got something else"
+
+
+@parametrize_sparrays
+def test_dense_addition(A):
+    X = np.random.random(A.shape)
+    assert not isinstance(A + X, np.matrix), "Expected array, got matrix"
+
+
+@parametrize_sparrays
+def test_sparse_addition(A):
+    assert (A + A)._is_array, "Expected array, got matrix"
+
+
+@parametrize_sparrays
+def test_elementwise_mul(A):
+    assert np.all((A * A).todense() == A.power(2).todense())
+
+
+@parametrize_sparrays
+def test_elementwise_rmul(A):
+    with pytest.raises(TypeError):
+        None * A
+
+    with pytest.raises(ValueError):
+        np.eye(3) * scipy.sparse.csr_array(np.arange(6).reshape(2, 3))
+
+    assert np.all((2 * A) == (A.todense() * 2))
+
+    assert np.all((A.todense() * A) == (A.todense() ** 2))
+
+
+@parametrize_sparrays
+def test_matmul(A):
+    assert np.all((A @ A.T).todense() == A.dot(A.T).todense())
+
+
+@parametrize_square_sparrays
+def test_pow(B):
+    assert (B**0)._is_array, "Expected array, got matrix"
+    assert (B**2)._is_array, "Expected array, got matrix"
+
+
+@parametrize_sparrays
+def test_sparse_divide(A):
+    assert isinstance(A / A, np.ndarray)
+
+
+@parametrize_sparrays
+def test_dense_divide(A):
+    assert (A / 2)._is_array, "Expected array, got matrix"
+
+
+@parametrize_sparrays
+def test_no_A_attr(A):
+    with pytest.warns(np.VisibleDeprecationWarning):
+        A.A
+
+
+@parametrize_sparrays
+def test_no_H_attr(A):
+    with pytest.warns(np.VisibleDeprecationWarning):
+        A.H
+
+
+@parametrize_sparrays
+def test_getrow_getcol(A):
+    assert A.getcol(0)._is_array
+    assert A.getrow(0)._is_array
+
+
+@parametrize_sparrays
+def test_docstr(A):
+    if A.__doc__ is None:
+        return
+
+    docstr = A.__doc__.lower()
+    for phrase in ('matrix', 'matrices'):
+        assert phrase not in docstr
+
+
+# -- linalg --
+
+@parametrize_sparrays
+def test_as_linearoperator(A):
+    L = spla.aslinearoperator(A)
+    npt.assert_allclose(L * [1, 2, 3, 4], A @ [1, 2, 3, 4])
+
+
+@parametrize_square_sparrays
+def test_inv(B):
+    if B.__class__.__name__[:3] != 'csc':
+        return
+
+    C = spla.inv(B)
+
+    assert C._is_array
+    npt.assert_allclose(C.todense(), np.linalg.inv(B.todense()))
+
+
+@parametrize_square_sparrays
+def test_expm(B):
+    if B.__class__.__name__[:3] != 'csc':
+        return
+
+    Bmat = scipy.sparse.csc_matrix(B)
+
+    C = spla.expm(B)
+
+    assert C._is_array
+    npt.assert_allclose(
+        C.todense(),
+        spla.expm(Bmat).todense()
+    )
+
+
+@parametrize_square_sparrays
+def test_expm_multiply(B):
+    if B.__class__.__name__[:3] != 'csc':
+        return
+
+    npt.assert_allclose(
+        spla.expm_multiply(B, np.array([1, 2])),
+        spla.expm(B) @ [1, 2]
+    )
+
+
+@parametrize_sparrays
+def test_norm(A):
+    C = spla.norm(A)
+    npt.assert_allclose(C, np.linalg.norm(A.todense()))
+
+
+@parametrize_square_sparrays
+def test_onenormest(B):
+    C = spla.onenormest(B)
+    npt.assert_allclose(C, np.linalg.norm(B.todense(), 1))
+
+
+@parametrize_square_sparrays
+def test_spsolve(B):
+    if B.__class__.__name__[:3] not in ('csc', 'csr'):
+        return
+
+    npt.assert_allclose(
+        spla.spsolve(B, [1, 2]),
+        np.linalg.solve(B.todense(), [1, 2])
+    )
+
+
+def test_spsolve_triangular():
+    X = scipy.sparse.csr_array([
+        [1, 0, 0, 0],
+        [2, 1, 0, 0],
+        [3, 2, 1, 0],
+        [4, 3, 2, 1],
+    ])
+    spla.spsolve_triangular(X, [1, 2, 3, 4])
+
+
+@parametrize_square_sparrays
+def test_factorized(B):
+    if B.__class__.__name__[:3] != 'csc':
+        return
+
+    LU = spla.factorized(B)
+    npt.assert_allclose(
+        LU(np.array([1, 2])),
+        np.linalg.solve(B.todense(), [1, 2])
+    )
+
+
+@parametrize_square_sparrays
+@pytest.mark.parametrize(
+    "solver",
+    ["bicg", "bicgstab", "cg", "cgs", "gmres", "lgmres", "minres", "qmr",
+     "gcrotmk", "tfqmr"]
+)
+def test_solvers(B, solver):
+    if solver == "minres":
+        kwargs = {}
+    else:
+        kwargs = {'atol': 1e-5}
+
+    x, info = getattr(spla, solver)(B, np.array([1, 2]), **kwargs)
+    assert info >= 0  # no errors, even if perhaps did not converge fully
+    npt.assert_allclose(x, [1, 1], atol=1e-1)
+
+
+@parametrize_sparrays
+@pytest.mark.parametrize(
+    "solver",
+    ["lsqr", "lsmr"]
+)
+def test_lstsqr(A, solver):
+    x, *_ = getattr(spla, solver)(A, [1, 2, 3])
+    npt.assert_allclose(A @ x, [1, 2, 3])
+
+
+@parametrize_eig_sparrays
+def test_eigs(X):
+    e, v = spla.eigs(X, k=1)
+    npt.assert_allclose(
+        X @ v,
+        e[0] * v
+    )
+
+
+@parametrize_eig_sparrays
+def test_eigsh(X):
+    X = X + X.T
+    e, v = spla.eigsh(X, k=1)
+    npt.assert_allclose(
+        X @ v,
+        e[0] * v
+    )
+
+
+@parametrize_eig_sparrays
+def test_svds(X):
+    u, s, vh = spla.svds(X, k=3)
+    u2, s2, vh2 = np.linalg.svd(X.todense())
+    s = np.sort(s)
+    s2 = np.sort(s2[:3])
+    npt.assert_allclose(s, s2, atol=1e-3)
+
+
+def test_splu():
+    X = scipy.sparse.csc_array([
+        [1, 0, 0, 0],
+        [2, 1, 0, 0],
+        [3, 2, 1, 0],
+        [4, 3, 2, 1],
+    ])
+    LU = spla.splu(X)
+    npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0])
+
+
+def test_spilu():
+    X = scipy.sparse.csc_array([
+        [1, 0, 0, 0],
+        [2, 1, 0, 0],
+        [3, 2, 1, 0],
+        [4, 3, 2, 1],
+    ])
+    LU = spla.spilu(X)
+    npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0])
+
+
+@parametrize_sparrays
+def test_power_operator(A):
+    # https://github.com/scipy/scipy/issues/15948
+    npt.assert_equal((A**2).todense(), (A.todense())**2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_base.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_base.py
new file mode 100644
index 00000000..a2549f75
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_base.py
@@ -0,0 +1,4976 @@
+#
+# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
+
+""" Test functions for sparse matrices. Each class in the "Matrix class
+based tests" section become subclasses of the classes in the "Generic
+tests" section. This is done by the functions in the "Tailored base
+class for generic tests" section.
+
+"""
+
+
+import contextlib
+import functools
+import operator
+import platform
+import itertools
+import sys
+from scipy._lib import _pep440
+
+import numpy as np
+from numpy import (arange, zeros, array, dot, asarray,
+                   vstack, ndarray, transpose, diag, kron, inf, conjugate,
+                   int8, ComplexWarning)
+
+import random
+from numpy.testing import (assert_equal, assert_array_equal,
+        assert_array_almost_equal, assert_almost_equal, assert_,
+        assert_allclose,suppress_warnings)
+from pytest import raises as assert_raises
+
+import scipy.linalg
+
+import scipy.sparse as sparse
+from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix,
+        coo_matrix, lil_matrix, dia_matrix, bsr_matrix,
+        eye, isspmatrix, SparseEfficiencyWarning)
+from scipy.sparse._sputils import (supported_dtypes, isscalarlike,
+                                   get_index_dtype, asmatrix, matrix)
+from scipy.sparse.linalg import splu, expm, inv
+
+from scipy._lib.decorator import decorator
+
+import pytest
+
+
+IS_COLAB = ('google.colab' in sys.modules)
+
+
+def assert_in(member, collection, msg=None):
+    assert_(member in collection, msg=msg if msg is not None else "%r not found in %r" % (member, collection))
+
+
+def assert_array_equal_dtype(x, y, **kwargs):
+    assert_(x.dtype == y.dtype)
+    assert_array_equal(x, y, **kwargs)
+
+
+NON_ARRAY_BACKED_FORMATS = frozenset(['dok'])
+
+def sparse_may_share_memory(A, B):
+    # Checks if A and B have any numpy array sharing memory.
+
+    def _underlying_arrays(x):
+        # Given any object (e.g. a sparse array), returns all numpy arrays
+        # stored in any attribute.
+
+        arrays = []
+        for a in x.__dict__.values():
+            if isinstance(a, (np.ndarray, np.generic)):
+                arrays.append(a)
+        return arrays
+
+    for a in _underlying_arrays(A):
+        for b in _underlying_arrays(B):
+            if np.may_share_memory(a, b):
+                return True
+    return False
+
+
+sup_complex = suppress_warnings()
+sup_complex.filter(ComplexWarning)
+
+
+def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None,
+                            downcast_maxval=None, assert_32bit=False):
+    """
+    Monkeypatch the maxval threshold at which scipy.sparse switches to
+    64-bit index arrays, or make it (pseudo-)random.
+
+    """
+    if maxval_limit is None:
+        maxval_limit = np.int64(10)
+    else:
+        # Ensure we use numpy scalars rather than Python scalars (matters for
+        # NEP 50 casting rule changes)
+        maxval_limit = np.int64(maxval_limit)
+
+    if assert_32bit:
+        def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
+            tp = get_index_dtype(arrays, maxval, check_contents)
+            assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max)
+            assert_(tp == np.int32 or tp == np.intc)
+            return tp
+    elif fixed_dtype is not None:
+        def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
+            return fixed_dtype
+    elif random:
+        counter = np.random.RandomState(seed=1234)
+
+        def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
+            return (np.int32, np.int64)[counter.randint(2)]
+    else:
+        def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
+            dtype = np.int32
+            if maxval is not None:
+                if maxval > maxval_limit:
+                    dtype = np.int64
+            for arr in arrays:
+                arr = np.asarray(arr)
+                if arr.dtype > np.int32:
+                    if check_contents:
+                        if arr.size == 0:
+                            # a bigger type not needed
+                            continue
+                        elif np.issubdtype(arr.dtype, np.integer):
+                            maxval = arr.max()
+                            minval = arr.min()
+                            if minval >= -maxval_limit and maxval <= maxval_limit:
+                                # a bigger type not needed
+                                continue
+                    dtype = np.int64
+            return dtype
+
+    if downcast_maxval is not None:
+        def new_downcast_intp_index(arr):
+            if arr.max() > downcast_maxval:
+                raise AssertionError("downcast limited")
+            return arr.astype(np.intp)
+
+    @decorator
+    def deco(func, *a, **kw):
+        backup = []
+        modules = [scipy.sparse._bsr, scipy.sparse._coo, scipy.sparse._csc,
+                   scipy.sparse._csr, scipy.sparse._dia, scipy.sparse._dok,
+                   scipy.sparse._lil, scipy.sparse._sputils,
+                   scipy.sparse._compressed, scipy.sparse._construct]
+        try:
+            for mod in modules:
+                backup.append((mod, 'get_index_dtype',
+                               getattr(mod, 'get_index_dtype', None)))
+                setattr(mod, 'get_index_dtype', new_get_index_dtype)
+                if downcast_maxval is not None:
+                    backup.append((mod, 'downcast_intp_index',
+                                   getattr(mod, 'downcast_intp_index', None)))
+                    setattr(mod, 'downcast_intp_index', new_downcast_intp_index)
+            return func(*a, **kw)
+        finally:
+            for mod, name, oldfunc in backup:
+                if oldfunc is not None:
+                    setattr(mod, name, oldfunc)
+
+    return deco
+
+
+def toarray(a):
+    if isinstance(a, np.ndarray) or isscalarlike(a):
+        return a
+    return a.toarray()
+
+
+class BinopTester:
+    # Custom type to test binary operations on sparse matrices.
+
+    def __add__(self, mat):
+        return "matrix on the right"
+
+    def __mul__(self, mat):
+        return "matrix on the right"
+
+    def __sub__(self, mat):
+        return "matrix on the right"
+
+    def __radd__(self, mat):
+        return "matrix on the left"
+
+    def __rmul__(self, mat):
+        return "matrix on the left"
+
+    def __rsub__(self, mat):
+        return "matrix on the left"
+
+    def __matmul__(self, mat):
+        return "matrix on the right"
+
+    def __rmatmul__(self, mat):
+        return "matrix on the left"
+
+class BinopTester_with_shape:
+    # Custom type to test binary operations on sparse matrices
+    # with object which has shape attribute.
+    def __init__(self,shape):
+        self._shape = shape
+
+    def shape(self):
+        return self._shape
+
+    def ndim(self):
+        return len(self._shape)
+
+    def __add__(self, mat):
+        return "matrix on the right"
+
+    def __mul__(self, mat):
+        return "matrix on the right"
+
+    def __sub__(self, mat):
+        return "matrix on the right"
+
+    def __radd__(self, mat):
+        return "matrix on the left"
+
+    def __rmul__(self, mat):
+        return "matrix on the left"
+
+    def __rsub__(self, mat):
+        return "matrix on the left"
+
+    def __matmul__(self, mat):
+        return "matrix on the right"
+
+    def __rmatmul__(self, mat):
+        return "matrix on the left"
+
+
+#------------------------------------------------------------------------------
+# Generic tests
+#------------------------------------------------------------------------------
+
+
+# TODO test prune
+# TODO test has_sorted_indices
+class _TestCommon:
+    """test common functionality shared by all sparse formats"""
+    math_dtypes = supported_dtypes
+
+    @classmethod
+    def init_class(cls):
+        # Canonical data.
+        cls.dat = array([[1, 0, 0, 2], [3, 0, 1, 0], [0, 2, 0, 0]], 'd')
+        cls.datsp = cls.spmatrix(cls.dat)
+
+        # Some sparse and dense matrices with data for every supported
+        # dtype.
+        # This set union is a workaround for numpy#6295, which means that
+        # two np.int64 dtypes don't hash to the same value.
+        cls.checked_dtypes = set(supported_dtypes).union(cls.math_dtypes)
+        cls.dat_dtypes = {}
+        cls.datsp_dtypes = {}
+        for dtype in cls.checked_dtypes:
+            cls.dat_dtypes[dtype] = cls.dat.astype(dtype)
+            cls.datsp_dtypes[dtype] = cls.spmatrix(cls.dat.astype(dtype))
+
+        # Check that the original data is equivalent to the
+        # corresponding dat_dtypes & datsp_dtypes.
+        assert_equal(cls.dat, cls.dat_dtypes[np.float64])
+        assert_equal(cls.datsp.toarray(),
+                     cls.datsp_dtypes[np.float64].toarray())
+
+    def test_bool(self):
+        def check(dtype):
+            datsp = self.datsp_dtypes[dtype]
+
+            assert_raises(ValueError, bool, datsp)
+            assert_(self.spmatrix([1]))
+            assert_(not self.spmatrix([0]))
+
+        if isinstance(self, TestDOK):
+            pytest.skip("Cannot create a rank <= 2 DOK matrix.")
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_bool_rollover(self):
+        # bool's underlying dtype is 1 byte, check that it does not
+        # rollover True -> False at 256.
+        dat = array([[True, False]])
+        datsp = self.spmatrix(dat)
+
+        for _ in range(10):
+            datsp = datsp + datsp
+            dat = dat + dat
+        assert_array_equal(dat, datsp.toarray())
+
+    def test_eq(self):
+        sup = suppress_warnings()
+        sup.filter(SparseEfficiencyWarning)
+
+        @sup
+        @sup_complex
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+            datbsr = bsr_matrix(dat)
+            datcsr = csr_matrix(dat)
+            datcsc = csc_matrix(dat)
+            datlil = lil_matrix(dat)
+
+            # sparse/sparse
+            assert_array_equal_dtype(dat == dat2, (datsp == datsp2).toarray())
+            # mix sparse types
+            assert_array_equal_dtype(dat == dat2, (datbsr == datsp2).toarray())
+            assert_array_equal_dtype(dat == dat2, (datcsr == datsp2).toarray())
+            assert_array_equal_dtype(dat == dat2, (datcsc == datsp2).toarray())
+            assert_array_equal_dtype(dat == dat2, (datlil == datsp2).toarray())
+            # sparse/dense
+            assert_array_equal_dtype(dat == datsp2, datsp2 == dat)
+            # sparse/scalar
+            assert_array_equal_dtype(dat == 0, (datsp == 0).toarray())
+            assert_array_equal_dtype(dat == 1, (datsp == 1).toarray())
+            assert_array_equal_dtype(dat == np.nan,
+                                     (datsp == np.nan).toarray())
+
+        if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
+            pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_ne(self):
+        sup = suppress_warnings()
+        sup.filter(SparseEfficiencyWarning)
+
+        @sup
+        @sup_complex
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+            datbsr = bsr_matrix(dat)
+            datcsc = csc_matrix(dat)
+            datcsr = csr_matrix(dat)
+            datlil = lil_matrix(dat)
+
+            # sparse/sparse
+            assert_array_equal_dtype(dat != dat2, (datsp != datsp2).toarray())
+            # mix sparse types
+            assert_array_equal_dtype(dat != dat2, (datbsr != datsp2).toarray())
+            assert_array_equal_dtype(dat != dat2, (datcsc != datsp2).toarray())
+            assert_array_equal_dtype(dat != dat2, (datcsr != datsp2).toarray())
+            assert_array_equal_dtype(dat != dat2, (datlil != datsp2).toarray())
+            # sparse/dense
+            assert_array_equal_dtype(dat != datsp2, datsp2 != dat)
+            # sparse/scalar
+            assert_array_equal_dtype(dat != 0, (datsp != 0).toarray())
+            assert_array_equal_dtype(dat != 1, (datsp != 1).toarray())
+            assert_array_equal_dtype(0 != dat, (0 != datsp).toarray())
+            assert_array_equal_dtype(1 != dat, (1 != datsp).toarray())
+            assert_array_equal_dtype(dat != np.nan,
+                                     (datsp != np.nan).toarray())
+
+        if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
+            pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_lt(self):
+        sup = suppress_warnings()
+        sup.filter(SparseEfficiencyWarning)
+
+        @sup
+        @sup_complex
+        def check(dtype):
+            # data
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+            datcomplex = dat.astype(complex)
+            datcomplex[:,0] = 1 + 1j
+            datspcomplex = self.spmatrix(datcomplex)
+            datbsr = bsr_matrix(dat)
+            datcsc = csc_matrix(dat)
+            datcsr = csr_matrix(dat)
+            datlil = lil_matrix(dat)
+
+            # sparse/sparse
+            assert_array_equal_dtype(dat < dat2, (datsp < datsp2).toarray())
+            assert_array_equal_dtype(datcomplex < dat2,
+                                     (datspcomplex < datsp2).toarray())
+            # mix sparse types
+            assert_array_equal_dtype(dat < dat2, (datbsr < datsp2).toarray())
+            assert_array_equal_dtype(dat < dat2, (datcsc < datsp2).toarray())
+            assert_array_equal_dtype(dat < dat2, (datcsr < datsp2).toarray())
+            assert_array_equal_dtype(dat < dat2, (datlil < datsp2).toarray())
+
+            assert_array_equal_dtype(dat2 < dat, (datsp2 < datbsr).toarray())
+            assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsc).toarray())
+            assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsr).toarray())
+            assert_array_equal_dtype(dat2 < dat, (datsp2 < datlil).toarray())
+            # sparse/dense
+            assert_array_equal_dtype(dat < dat2, datsp < dat2)
+            assert_array_equal_dtype(datcomplex < dat2, datspcomplex < dat2)
+            # sparse/scalar
+            for val in [2, 1, 0, -1, -2]:
+                val = np.int64(val)  # avoid Python scalar (due to NEP 50 changes)
+                assert_array_equal_dtype((datsp < val).toarray(), dat < val)
+                assert_array_equal_dtype((val < datsp).toarray(), val < dat)
+
+            with np.errstate(invalid='ignore'):
+                assert_array_equal_dtype((datsp < np.nan).toarray(),
+                                         dat < np.nan)
+
+            # data
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+
+            # dense rhs
+            assert_array_equal_dtype(dat < datsp2, datsp < dat2)
+
+        if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
+            pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_gt(self):
+        sup = suppress_warnings()
+        sup.filter(SparseEfficiencyWarning)
+
+        @sup
+        @sup_complex
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+            datcomplex = dat.astype(complex)
+            datcomplex[:,0] = 1 + 1j
+            datspcomplex = self.spmatrix(datcomplex)
+            datbsr = bsr_matrix(dat)
+            datcsc = csc_matrix(dat)
+            datcsr = csr_matrix(dat)
+            datlil = lil_matrix(dat)
+
+            # sparse/sparse
+            assert_array_equal_dtype(dat > dat2, (datsp > datsp2).toarray())
+            assert_array_equal_dtype(datcomplex > dat2,
+                                     (datspcomplex > datsp2).toarray())
+            # mix sparse types
+            assert_array_equal_dtype(dat > dat2, (datbsr > datsp2).toarray())
+            assert_array_equal_dtype(dat > dat2, (datcsc > datsp2).toarray())
+            assert_array_equal_dtype(dat > dat2, (datcsr > datsp2).toarray())
+            assert_array_equal_dtype(dat > dat2, (datlil > datsp2).toarray())
+
+            assert_array_equal_dtype(dat2 > dat, (datsp2 > datbsr).toarray())
+            assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsc).toarray())
+            assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsr).toarray())
+            assert_array_equal_dtype(dat2 > dat, (datsp2 > datlil).toarray())
+            # sparse/dense
+            assert_array_equal_dtype(dat > dat2, datsp > dat2)
+            assert_array_equal_dtype(datcomplex > dat2, datspcomplex > dat2)
+            # sparse/scalar
+            for val in [2, 1, 0, -1, -2]:
+                val = np.int64(val)  # avoid Python scalar (due to NEP 50 changes)
+                assert_array_equal_dtype((datsp > val).toarray(), dat > val)
+                assert_array_equal_dtype((val > datsp).toarray(), val > dat)
+
+            with np.errstate(invalid='ignore'):
+                assert_array_equal_dtype((datsp > np.nan).toarray(),
+                                         dat > np.nan)
+
+            # data
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+
+            # dense rhs
+            assert_array_equal_dtype(dat > datsp2, datsp > dat2)
+
+        if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
+            pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_le(self):
+        sup = suppress_warnings()
+        sup.filter(SparseEfficiencyWarning)
+
+        @sup
+        @sup_complex
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+            datcomplex = dat.astype(complex)
+            datcomplex[:,0] = 1 + 1j
+            datspcomplex = self.spmatrix(datcomplex)
+            datbsr = bsr_matrix(dat)
+            datcsc = csc_matrix(dat)
+            datcsr = csr_matrix(dat)
+            datlil = lil_matrix(dat)
+
+            # sparse/sparse
+            assert_array_equal_dtype(dat <= dat2, (datsp <= datsp2).toarray())
+            assert_array_equal_dtype(datcomplex <= dat2,
+                                     (datspcomplex <= datsp2).toarray())
+            # mix sparse types
+            assert_array_equal_dtype((datbsr <= datsp2).toarray(), dat <= dat2)
+            assert_array_equal_dtype((datcsc <= datsp2).toarray(), dat <= dat2)
+            assert_array_equal_dtype((datcsr <= datsp2).toarray(), dat <= dat2)
+            assert_array_equal_dtype((datlil <= datsp2).toarray(), dat <= dat2)
+
+            assert_array_equal_dtype((datsp2 <= datbsr).toarray(), dat2 <= dat)
+            assert_array_equal_dtype((datsp2 <= datcsc).toarray(), dat2 <= dat)
+            assert_array_equal_dtype((datsp2 <= datcsr).toarray(), dat2 <= dat)
+            assert_array_equal_dtype((datsp2 <= datlil).toarray(), dat2 <= dat)
+            # sparse/dense
+            assert_array_equal_dtype(datsp <= dat2, dat <= dat2)
+            assert_array_equal_dtype(datspcomplex <= dat2, datcomplex <= dat2)
+            # sparse/scalar
+            for val in [2, 1, -1, -2]:
+                val = np.int64(val)  # avoid Python scalar (due to NEP 50 changes)
+                assert_array_equal_dtype((datsp <= val).toarray(), dat <= val)
+                assert_array_equal_dtype((val <= datsp).toarray(), val <= dat)
+
+            # data
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+
+            # dense rhs
+            assert_array_equal_dtype(dat <= datsp2, datsp <= dat2)
+
+        if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
+            pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_ge(self):
+        sup = suppress_warnings()
+        sup.filter(SparseEfficiencyWarning)
+
+        @sup
+        @sup_complex
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+            datcomplex = dat.astype(complex)
+            datcomplex[:,0] = 1 + 1j
+            datspcomplex = self.spmatrix(datcomplex)
+            datbsr = bsr_matrix(dat)
+            datcsc = csc_matrix(dat)
+            datcsr = csr_matrix(dat)
+            datlil = lil_matrix(dat)
+
+            # sparse/sparse
+            assert_array_equal_dtype(dat >= dat2, (datsp >= datsp2).toarray())
+            assert_array_equal_dtype(datcomplex >= dat2,
+                                     (datspcomplex >= datsp2).toarray())
+            # mix sparse types
+            assert_array_equal_dtype((datbsr >= datsp2).toarray(), dat >= dat2)
+            assert_array_equal_dtype((datcsc >= datsp2).toarray(), dat >= dat2)
+            assert_array_equal_dtype((datcsr >= datsp2).toarray(), dat >= dat2)
+            assert_array_equal_dtype((datlil >= datsp2).toarray(), dat >= dat2)
+
+            assert_array_equal_dtype((datsp2 >= datbsr).toarray(), dat2 >= dat)
+            assert_array_equal_dtype((datsp2 >= datcsc).toarray(), dat2 >= dat)
+            assert_array_equal_dtype((datsp2 >= datcsr).toarray(), dat2 >= dat)
+            assert_array_equal_dtype((datsp2 >= datlil).toarray(), dat2 >= dat)
+            # sparse/dense
+            assert_array_equal_dtype(datsp >= dat2, dat >= dat2)
+            assert_array_equal_dtype(datspcomplex >= dat2, datcomplex >= dat2)
+            # sparse/scalar
+            for val in [2, 1, -1, -2]:
+                val = np.int64(val)  # avoid Python scalar (due to NEP 50 changes)
+                assert_array_equal_dtype((datsp >= val).toarray(), dat >= val)
+                assert_array_equal_dtype((val >= datsp).toarray(), val >= dat)
+
+            # dense data
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+            dat2 = dat.copy()
+            dat2[:,0] = 0
+            datsp2 = self.spmatrix(dat2)
+
+            # dense rhs
+            assert_array_equal_dtype(dat >= datsp2, datsp >= dat2)
+
+        if not isinstance(self, (TestBSR, TestCSC, TestCSR)):
+            pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.")
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_empty(self):
+        # create empty matrices
+        assert_equal(self.spmatrix((3, 3)).toarray(), zeros((3, 3)))
+        assert_equal(self.spmatrix((3, 3)).nnz, 0)
+        assert_equal(self.spmatrix((3, 3)).count_nonzero(), 0)
+
+    def test_count_nonzero(self):
+        expected = np.count_nonzero(self.datsp.toarray())
+        assert_equal(self.datsp.count_nonzero(), expected)
+        assert_equal(self.datsp.T.count_nonzero(), expected)
+
+    def test_invalid_shapes(self):
+        assert_raises(ValueError, self.spmatrix, (-1,3))
+        assert_raises(ValueError, self.spmatrix, (3,-1))
+        assert_raises(ValueError, self.spmatrix, (-1,-1))
+
+    def test_repr(self):
+        repr(self.datsp)
+
+    def test_str(self):
+        str(self.datsp)
+
+    def test_empty_arithmetic(self):
+        # Test manipulating empty matrices. Fails in SciPy SVN <= r1768
+        shape = (5, 5)
+        for mytype in [np.dtype('int32'), np.dtype('float32'),
+                np.dtype('float64'), np.dtype('complex64'),
+                np.dtype('complex128')]:
+            a = self.spmatrix(shape, dtype=mytype)
+            b = a + a
+            c = 2 * a
+            d = a * a.tocsc()
+            e = a * a.tocsr()
+            f = a * a.tocoo()
+            for m in [a,b,c,d,e,f]:
+                assert_equal(m.A, a.A*a.A)
+                # These fail in all revisions <= r1768:
+                assert_equal(m.dtype,mytype)
+                assert_equal(m.A.dtype,mytype)
+
+    def test_abs(self):
+        A = array([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd')
+        assert_equal(abs(A), abs(self.spmatrix(A)).toarray())
+
+    def test_round(self):
+        decimal = 1
+        A = array([[-1.35, 0.56], [17.25, -5.98]], 'd')
+        assert_equal(np.around(A, decimals=decimal),
+                     round(self.spmatrix(A), ndigits=decimal).toarray())
+
+    def test_elementwise_power(self):
+        A = array([[-4, -3, -2], [-1, 0, 1], [2, 3, 4]], 'd')
+        assert_equal(np.power(A, 2), self.spmatrix(A).power(2).toarray())
+
+        #it's element-wise power function, input has to be a scalar
+        assert_raises(NotImplementedError, self.spmatrix(A).power, A)
+
+    def test_neg(self):
+        A = array([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd')
+        assert_equal(-A, (-self.spmatrix(A)).toarray())
+
+        # see gh-5843
+        A = array([[True, False, False], [False, False, True]])
+        assert_raises(NotImplementedError, self.spmatrix(A).__neg__)
+
+    def test_real(self):
+        D = array([[1 + 3j, 2 - 4j]])
+        A = self.spmatrix(D)
+        assert_equal(A.real.toarray(), D.real)
+
+    def test_imag(self):
+        D = array([[1 + 3j, 2 - 4j]])
+        A = self.spmatrix(D)
+        assert_equal(A.imag.toarray(), D.imag)
+
+    def test_diagonal(self):
+        # Does the matrix's .diagonal() method work?
+        mats = []
+        mats.append([[1,0,2]])
+        mats.append([[1],[0],[2]])
+        mats.append([[0,1],[0,2],[0,3]])
+        mats.append([[0,0,1],[0,0,2],[0,3,0]])
+        mats.append([[1,0],[0,0]])
+
+        mats.append(kron(mats[0],[[1,2]]))
+        mats.append(kron(mats[0],[[1],[2]]))
+        mats.append(kron(mats[1],[[1,2],[3,4]]))
+        mats.append(kron(mats[2],[[1,2],[3,4]]))
+        mats.append(kron(mats[3],[[1,2],[3,4]]))
+        mats.append(kron(mats[3],[[1,2,3,4]]))
+
+        for m in mats:
+            rows, cols = array(m).shape
+            sparse_mat = self.spmatrix(m)
+            for k in range(-rows-1, cols+2):
+                assert_equal(sparse_mat.diagonal(k=k), diag(m, k=k))
+            # Test for k beyond boundaries(issue #11949)
+            assert_equal(sparse_mat.diagonal(k=10), diag(m, k=10))
+            assert_equal(sparse_mat.diagonal(k=-99), diag(m, k=-99))
+
+        # Test all-zero matrix.
+        assert_equal(self.spmatrix((40, 16130)).diagonal(), np.zeros(40))
+        # Test empty matrix
+        # https://github.com/scipy/scipy/issues/11949
+        assert_equal(self.spmatrix((0, 0)).diagonal(), np.empty(0))
+        assert_equal(self.spmatrix((15, 0)).diagonal(), np.empty(0))
+        assert_equal(self.spmatrix((0, 5)).diagonal(10), np.empty(0))
+
+    def test_trace(self):
+        # For square matrix
+        A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+        B = self.spmatrix(A)
+        for k in range(-2, 3):
+            assert_equal(A.trace(offset=k), B.trace(offset=k))
+
+        # For rectangular matrix
+        A = np.array([[1, 2, 3], [4, 5, 6]])
+        B = self.spmatrix(A)
+        for k in range(-1, 3):
+            assert_equal(A.trace(offset=k), B.trace(offset=k))
+
+    def test_reshape(self):
+        # This first example is taken from the lil_matrix reshaping test.
+        x = self.spmatrix([[1, 0, 7], [0, 0, 0], [0, 3, 0], [0, 0, 5]])
+        for order in ['C', 'F']:
+            for s in [(12, 1), (1, 12)]:
+                assert_array_equal(x.reshape(s, order=order).toarray(),
+                                   x.toarray().reshape(s, order=order))
+
+        # This example is taken from the stackoverflow answer at
+        # https://stackoverflow.com/q/16511879
+        x = self.spmatrix([[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]])
+        y = x.reshape((2, 6))  # Default order is 'C'
+        desired = [[0, 10, 0, 0, 0, 0], [0, 0, 0, 20, 30, 40]]
+        assert_array_equal(y.A, desired)
+
+        # Reshape with negative indexes
+        y = x.reshape((2, -1))
+        assert_array_equal(y.A, desired)
+        y = x.reshape((-1, 6))
+        assert_array_equal(y.A, desired)
+        assert_raises(ValueError, x.reshape, (-1, -1))
+
+        # Reshape with star args
+        y = x.reshape(2, 6)
+        assert_array_equal(y.A, desired)
+        assert_raises(TypeError, x.reshape, 2, 6, not_an_arg=1)
+
+        # Reshape with same size is noop unless copy=True
+        y = x.reshape((3, 4))
+        assert_(y is x)
+        y = x.reshape((3, 4), copy=True)
+        assert_(y is not x)
+
+        # Ensure reshape did not alter original size
+        assert_array_equal(x.shape, (3, 4))
+
+        # Reshape in place
+        x.shape = (2, 6)
+        assert_array_equal(x.A, desired)
+
+        # Reshape to bad ndim
+        assert_raises(ValueError, x.reshape, (x.size,))
+        assert_raises(ValueError, x.reshape, (1, x.size, 1))
+
+    @pytest.mark.slow
+    def test_setdiag_comprehensive(self):
+        def dense_setdiag(a, v, k):
+            v = np.asarray(v)
+            if k >= 0:
+                n = min(a.shape[0], a.shape[1] - k)
+                if v.ndim != 0:
+                    n = min(n, len(v))
+                    v = v[:n]
+                i = np.arange(0, n)
+                j = np.arange(k, k + n)
+                a[i,j] = v
+            elif k < 0:
+                dense_setdiag(a.T, v, -k)
+
+        def check_setdiag(a, b, k):
+            # Check setting diagonal using a scalar, a vector of
+            # correct length, and too short or too long vectors
+            for r in [-1, len(np.diag(a, k)), 2, 30]:
+                if r < 0:
+                    v = np.random.choice(range(1, 20))
+                else:
+                    v = np.random.randint(1, 20, size=r)
+
+                dense_setdiag(a, v, k)
+                with suppress_warnings() as sup:
+                    sup.filter(SparseEfficiencyWarning, "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                    b.setdiag(v, k)
+
+                # check that dense_setdiag worked
+                d = np.diag(a, k)
+                if np.asarray(v).ndim == 0:
+                    assert_array_equal(d, v, err_msg="%s %d" % (msg, r))
+                else:
+                    n = min(len(d), len(v))
+                    assert_array_equal(d[:n], v[:n], err_msg="%s %d" % (msg, r))
+                # check that sparse setdiag worked
+                assert_array_equal(b.A, a, err_msg="%s %d" % (msg, r))
+
+        # comprehensive test
+        np.random.seed(1234)
+        shapes = [(0,5), (5,0), (1,5), (5,1), (5,5)]
+        for dtype in [np.int8, np.float64]:
+            for m,n in shapes:
+                ks = np.arange(-m+1, n-1)
+                for k in ks:
+                    msg = repr((dtype, m, n, k))
+                    a = np.zeros((m, n), dtype=dtype)
+                    b = self.spmatrix((m, n), dtype=dtype)
+
+                    check_setdiag(a, b, k)
+
+                    # check overwriting etc
+                    for k2 in np.random.choice(ks, size=min(len(ks), 5)):
+                        check_setdiag(a, b, k2)
+
+    def test_setdiag(self):
+        # simple test cases
+        m = self.spmatrix(np.eye(3))
+        m2 = self.spmatrix((4, 4))
+        values = [3, 2, 1]
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            assert_raises(ValueError, m.setdiag, values, k=4)
+            m.setdiag(values)
+            assert_array_equal(m.diagonal(), values)
+            m.setdiag(values, k=1)
+            assert_array_equal(m.A, np.array([[3, 3, 0],
+                                              [0, 2, 2],
+                                              [0, 0, 1]]))
+            m.setdiag(values, k=-2)
+            assert_array_equal(m.A, np.array([[3, 3, 0],
+                                              [0, 2, 2],
+                                              [3, 0, 1]]))
+            m.setdiag((9,), k=2)
+            assert_array_equal(m.A[0,2], 9)
+            m.setdiag((9,), k=-2)
+            assert_array_equal(m.A[2,0], 9)
+            # test short values on an empty matrix
+            m2.setdiag([1], k=2)
+            assert_array_equal(m2.A[0], [0, 0, 1, 0])
+            # test overwriting that same diagonal
+            m2.setdiag([1, 1], k=2)
+            assert_array_equal(m2.A[:2], [[0, 0, 1, 0],
+                                          [0, 0, 0, 1]])
+
+    def test_nonzero(self):
+        A = array([[1, 0, 1],[0, 1, 1],[0, 0, 1]])
+        Asp = self.spmatrix(A)
+
+        A_nz = set([tuple(ij) for ij in transpose(A.nonzero())])
+        Asp_nz = set([tuple(ij) for ij in transpose(Asp.nonzero())])
+
+        assert_equal(A_nz, Asp_nz)
+
+    def test_numpy_nonzero(self):
+        # See gh-5987
+        A = array([[1, 0, 1], [0, 1, 1], [0, 0, 1]])
+        Asp = self.spmatrix(A)
+
+        A_nz = set([tuple(ij) for ij in transpose(np.nonzero(A))])
+        Asp_nz = set([tuple(ij) for ij in transpose(np.nonzero(Asp))])
+
+        assert_equal(A_nz, Asp_nz)
+
+    def test_getrow(self):
+        assert_array_equal(self.datsp.getrow(1).toarray(), self.dat[[1], :])
+        assert_array_equal(self.datsp.getrow(-1).toarray(), self.dat[[-1], :])
+
+    def test_getcol(self):
+        assert_array_equal(self.datsp.getcol(1).toarray(), self.dat[:, [1]])
+        assert_array_equal(self.datsp.getcol(-1).toarray(), self.dat[:, [-1]])
+
+    def test_sum(self):
+        np.random.seed(1234)
+        dat_1 = matrix([[0, 1, 2],
+                        [3, -4, 5],
+                        [-6, 7, 9]])
+        dat_2 = np.random.rand(5, 5)
+        dat_3 = np.array([[]])
+        dat_4 = np.zeros((40, 40))
+        dat_5 = sparse.rand(5, 5, density=1e-2).A
+        matrices = [dat_1, dat_2, dat_3, dat_4, dat_5]
+
+        def check(dtype, j):
+            dat = matrix(matrices[j], dtype=dtype)
+            datsp = self.spmatrix(dat, dtype=dtype)
+            with np.errstate(over='ignore'):
+                assert_array_almost_equal(dat.sum(), datsp.sum())
+                assert_equal(dat.sum().dtype, datsp.sum().dtype)
+                assert_(np.isscalar(datsp.sum(axis=None)))
+                assert_array_almost_equal(dat.sum(axis=None),
+                                          datsp.sum(axis=None))
+                assert_equal(dat.sum(axis=None).dtype,
+                             datsp.sum(axis=None).dtype)
+                assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0))
+                assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype)
+                assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1))
+                assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype)
+                assert_array_almost_equal(dat.sum(axis=-2), datsp.sum(axis=-2))
+                assert_equal(dat.sum(axis=-2).dtype, datsp.sum(axis=-2).dtype)
+                assert_array_almost_equal(dat.sum(axis=-1), datsp.sum(axis=-1))
+                assert_equal(dat.sum(axis=-1).dtype, datsp.sum(axis=-1).dtype)
+
+        for dtype in self.checked_dtypes:
+            for j in range(len(matrices)):
+                check(dtype, j)
+
+    def test_sum_invalid_params(self):
+        out = np.zeros((1, 3))
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        assert_raises(ValueError, datsp.sum, axis=3)
+        assert_raises(TypeError, datsp.sum, axis=(0, 1))
+        assert_raises(TypeError, datsp.sum, axis=1.5)
+        assert_raises(ValueError, datsp.sum, axis=1, out=out)
+
+    def test_sum_dtype(self):
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        def check(dtype):
+            dat_mean = dat.mean(dtype=dtype)
+            datsp_mean = datsp.mean(dtype=dtype)
+
+            assert_array_almost_equal(dat_mean, datsp_mean)
+            assert_equal(dat_mean.dtype, datsp_mean.dtype)
+
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_sum_out(self):
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        dat_out = array([[0]])
+        datsp_out = matrix([[0]])
+
+        dat.sum(out=dat_out, keepdims=True)
+        datsp.sum(out=datsp_out)
+        assert_array_almost_equal(dat_out, datsp_out)
+
+        dat_out = np.zeros((3, 1))
+        datsp_out = asmatrix(np.zeros((3, 1)))
+
+        dat.sum(axis=1, out=dat_out, keepdims=True)
+        datsp.sum(axis=1, out=datsp_out)
+        assert_array_almost_equal(dat_out, datsp_out)
+
+    def test_numpy_sum(self):
+        # See gh-5987
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        dat_mean = np.sum(dat)
+        datsp_mean = np.sum(datsp)
+
+        assert_array_almost_equal(dat_mean, datsp_mean)
+        assert_equal(dat_mean.dtype, datsp_mean.dtype)
+
+    def test_mean(self):
+        def check(dtype):
+            dat = array([[0, 1, 2],
+                         [3, 4, 5],
+                         [6, 7, 9]], dtype=dtype)
+            datsp = self.spmatrix(dat, dtype=dtype)
+
+            assert_array_almost_equal(dat.mean(), datsp.mean())
+            assert_equal(dat.mean().dtype, datsp.mean().dtype)
+            assert_(np.isscalar(datsp.mean(axis=None)))
+            assert_array_almost_equal(
+                dat.mean(axis=None, keepdims=True), datsp.mean(axis=None)
+            )
+            assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype)
+            assert_array_almost_equal(
+                dat.mean(axis=0, keepdims=True), datsp.mean(axis=0)
+            )
+            assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype)
+            assert_array_almost_equal(
+                dat.mean(axis=1, keepdims=True), datsp.mean(axis=1)
+            )
+            assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype)
+            assert_array_almost_equal(
+                dat.mean(axis=-2, keepdims=True), datsp.mean(axis=-2)
+            )
+            assert_equal(dat.mean(axis=-2).dtype, datsp.mean(axis=-2).dtype)
+            assert_array_almost_equal(
+                dat.mean(axis=-1, keepdims=True), datsp.mean(axis=-1)
+            )
+            assert_equal(dat.mean(axis=-1).dtype, datsp.mean(axis=-1).dtype)
+
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_mean_invalid_params(self):
+        out = asmatrix(np.zeros((1, 3)))
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        assert_raises(ValueError, datsp.mean, axis=3)
+        assert_raises(TypeError, datsp.mean, axis=(0, 1))
+        assert_raises(TypeError, datsp.mean, axis=1.5)
+        assert_raises(ValueError, datsp.mean, axis=1, out=out)
+
+    def test_mean_dtype(self):
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        def check(dtype):
+            dat_mean = dat.mean(dtype=dtype)
+            datsp_mean = datsp.mean(dtype=dtype)
+
+            assert_array_almost_equal(dat_mean, datsp_mean)
+            assert_equal(dat_mean.dtype, datsp_mean.dtype)
+
+        for dtype in self.checked_dtypes:
+            check(dtype)
+
+    def test_mean_out(self):
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        dat_out = array([[0]])
+        datsp_out = matrix([[0]])
+
+        dat.mean(out=dat_out, keepdims=True)
+        datsp.mean(out=datsp_out)
+        assert_array_almost_equal(dat_out, datsp_out)
+
+        dat_out = np.zeros((3, 1))
+        datsp_out = matrix(np.zeros((3, 1)))
+
+        dat.mean(axis=1, out=dat_out, keepdims=True)
+        datsp.mean(axis=1, out=datsp_out)
+        assert_array_almost_equal(dat_out, datsp_out)
+
+    def test_numpy_mean(self):
+        # See gh-5987
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        dat_mean = np.mean(dat)
+        datsp_mean = np.mean(datsp)
+
+        assert_array_almost_equal(dat_mean, datsp_mean)
+        assert_equal(dat_mean.dtype, datsp_mean.dtype)
+
+    def test_expm(self):
+        M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
+        sM = self.spmatrix(M, shape=(3,3), dtype=float)
+        Mexp = scipy.linalg.expm(M)
+
+        N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]])
+        sN = self.spmatrix(N, shape=(3,3), dtype=float)
+        Nexp = scipy.linalg.expm(N)
+
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "splu converted its input to CSC format")
+            sup.filter(SparseEfficiencyWarning,
+                       "spsolve is more efficient when sparse b is in the CSC matrix format")
+            sup.filter(SparseEfficiencyWarning,
+                       "spsolve requires A be CSC or CSR matrix format")
+            sMexp = expm(sM).toarray()
+            sNexp = expm(sN).toarray()
+
+        assert_array_almost_equal((sMexp - Mexp), zeros((3, 3)))
+        assert_array_almost_equal((sNexp - Nexp), zeros((3, 3)))
+
+    def test_inv(self):
+        def check(dtype):
+            M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype)
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "spsolve requires A be CSC or CSR matrix format")
+                sup.filter(SparseEfficiencyWarning,
+                           "spsolve is more efficient when sparse b is in the CSC matrix format")
+                sup.filter(SparseEfficiencyWarning,
+                           "splu converted its input to CSC format")
+                sM = self.spmatrix(M, shape=(3,3), dtype=dtype)
+                sMinv = inv(sM)
+            assert_array_almost_equal(sMinv.dot(sM).toarray(), np.eye(3))
+            assert_raises(TypeError, inv, M)
+        for dtype in [float]:
+            check(dtype)
+
+    @sup_complex
+    def test_from_array(self):
+        A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
+        assert_array_equal(self.spmatrix(A).toarray(), A)
+
+        A = array([[1.0 + 3j, 0, 0],
+                   [0, 2.0 + 5, 0],
+                   [0, 0, 0]])
+        assert_array_equal(self.spmatrix(A).toarray(), A)
+        assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
+
+    @sup_complex
+    def test_from_matrix(self):
+        A = matrix([[1, 0, 0], [2, 3, 4], [0, 5, 0], [0, 0, 0]])
+        assert_array_equal(self.spmatrix(A).todense(), A)
+
+        A = matrix([[1.0 + 3j, 0, 0],
+                    [0, 2.0 + 5, 0],
+                    [0, 0, 0]])
+        assert_array_equal(self.spmatrix(A).todense(), A)
+        assert_array_equal(
+            self.spmatrix(A, dtype='int16').todense(), A.astype('int16')
+        )
+
+    @sup_complex
+    def test_from_list(self):
+        A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]]
+        assert_array_equal(self.spmatrix(A).toarray(), A)
+
+        A = [[1.0 + 3j, 0, 0],
+             [0, 2.0 + 5, 0],
+             [0, 0, 0]]
+        assert_array_equal(self.spmatrix(A).toarray(), array(A))
+        assert_array_equal(
+            self.spmatrix(A, dtype='int16').toarray(), array(A).astype('int16')
+        )
+
+    @sup_complex
+    def test_from_sparse(self):
+        D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
+        S = csr_matrix(D)
+        assert_array_equal(self.spmatrix(S).toarray(), D)
+        S = self.spmatrix(D)
+        assert_array_equal(self.spmatrix(S).toarray(), D)
+
+        D = array([[1.0 + 3j, 0, 0],
+                   [0, 2.0 + 5, 0],
+                   [0, 0, 0]])
+        S = csr_matrix(D)
+        assert_array_equal(self.spmatrix(S).toarray(), D)
+        assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
+        S = self.spmatrix(D)
+        assert_array_equal(self.spmatrix(S).toarray(), D)
+        assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
+
+    # def test_array(self):
+    #    """test array(A) where A is in sparse format"""
+    #    assert_equal( array(self.datsp), self.dat )
+
+    def test_todense(self):
+        # Check C- or F-contiguous (default).
+        chk = self.datsp.todense()
+        assert isinstance(chk, np.matrix)
+        assert_array_equal(chk, self.dat)
+        assert_(chk.flags.c_contiguous != chk.flags.f_contiguous)
+        # Check C-contiguous (with arg).
+        chk = self.datsp.todense(order='C')
+        assert_array_equal(chk, self.dat)
+        assert_(chk.flags.c_contiguous)
+        assert_(not chk.flags.f_contiguous)
+        # Check F-contiguous (with arg).
+        chk = self.datsp.todense(order='F')
+        assert_array_equal(chk, self.dat)
+        assert_(not chk.flags.c_contiguous)
+        assert_(chk.flags.f_contiguous)
+        # Check with out argument (array).
+        out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
+        chk = self.datsp.todense(out=out)
+        assert_array_equal(self.dat, out)
+        assert_array_equal(self.dat, chk)
+        assert_(chk.base is out)
+        # Check with out array (matrix).
+        out = asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype))
+        chk = self.datsp.todense(out=out)
+        assert_array_equal(self.dat, out)
+        assert_array_equal(self.dat, chk)
+        assert_(chk is out)
+        a = array([[1.,2.,3.]])
+        dense_dot_dense = a @ self.dat
+        check = a * self.datsp.todense()
+        assert_array_equal(dense_dot_dense, check)
+        b = array([[1.,2.,3.,4.]]).T
+        dense_dot_dense = self.dat @ b
+        check2 = self.datsp.todense() @ b
+        assert_array_equal(dense_dot_dense, check2)
+        # Check bool data works.
+        spbool = self.spmatrix(self.dat, dtype=bool)
+        matbool = self.dat.astype(bool)
+        assert_array_equal(spbool.todense(), matbool)
+
+    def test_toarray(self):
+        # Check C- or F-contiguous (default).
+        dat = asarray(self.dat)
+        chk = self.datsp.toarray()
+        assert_array_equal(chk, dat)
+        assert_(chk.flags.c_contiguous != chk.flags.f_contiguous)
+        # Check C-contiguous (with arg).
+        chk = self.datsp.toarray(order='C')
+        assert_array_equal(chk, dat)
+        assert_(chk.flags.c_contiguous)
+        assert_(not chk.flags.f_contiguous)
+        # Check F-contiguous (with arg).
+        chk = self.datsp.toarray(order='F')
+        assert_array_equal(chk, dat)
+        assert_(not chk.flags.c_contiguous)
+        assert_(chk.flags.f_contiguous)
+        # Check with output arg.
+        out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
+        self.datsp.toarray(out=out)
+        assert_array_equal(chk, dat)
+        # Check that things are fine when we don't initialize with zeros.
+        out[...] = 1.
+        self.datsp.toarray(out=out)
+        assert_array_equal(chk, dat)
+        a = array([1.,2.,3.])
+        dense_dot_dense = dot(a, dat)
+        check = dot(a, self.datsp.toarray())
+        assert_array_equal(dense_dot_dense, check)
+        b = array([1.,2.,3.,4.])
+        dense_dot_dense = dot(dat, b)
+        check2 = dot(self.datsp.toarray(), b)
+        assert_array_equal(dense_dot_dense, check2)
+        # Check bool data works.
+        spbool = self.spmatrix(self.dat, dtype=bool)
+        arrbool = dat.astype(bool)
+        assert_array_equal(spbool.toarray(), arrbool)
+
+    @sup_complex
+    def test_astype(self):
+        D = array([[2.0 + 3j, 0, 0],
+                   [0, 4.0 + 5j, 0],
+                   [0, 0, 0]])
+        S = self.spmatrix(D)
+
+        for x in supported_dtypes:
+            # Check correctly casted
+            D_casted = D.astype(x)
+            for copy in (True, False):
+                S_casted = S.astype(x, copy=copy)
+                assert_equal(S_casted.dtype, D_casted.dtype)  # correct type
+                assert_equal(S_casted.toarray(), D_casted)    # correct values
+                assert_equal(S_casted.format, S.format)       # format preserved
+            # Check correctly copied
+            assert_(S_casted.astype(x, copy=False) is S_casted)
+            S_copied = S_casted.astype(x, copy=True)
+            assert_(S_copied is not S_casted)
+
+            def check_equal_but_not_same_array_attribute(attribute):
+                a = getattr(S_casted, attribute)
+                b = getattr(S_copied, attribute)
+                assert_array_equal(a, b)
+                assert_(a is not b)
+                i = (0,) * b.ndim
+                b_i = b[i]
+                b[i] = not b[i]
+                assert_(a[i] != b[i])
+                b[i] = b_i
+
+            if S_casted.format in ('csr', 'csc', 'bsr'):
+                for attribute in ('indices', 'indptr', 'data'):
+                    check_equal_but_not_same_array_attribute(attribute)
+            elif S_casted.format == 'coo':
+                for attribute in ('row', 'col', 'data'):
+                    check_equal_but_not_same_array_attribute(attribute)
+            elif S_casted.format == 'dia':
+                for attribute in ('offsets', 'data'):
+                    check_equal_but_not_same_array_attribute(attribute)
+
+    def test_asfptype(self):
+        A = self.spmatrix(arange(6,dtype='int32').reshape(2,3))
+
+        assert_equal(A.dtype, np.dtype('int32'))
+        assert_equal(A.asfptype().dtype, np.dtype('float64'))
+        assert_equal(A.asfptype().format, A.format)
+        assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32'))
+        assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128'))
+
+        B = A.asfptype()
+        C = B.asfptype()
+        assert_(B is C)
+
+    def test_mul_scalar(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            assert_array_equal(dat*2, (datsp*2).toarray())
+            assert_array_equal(dat*17.3, (datsp*17.3).toarray())
+
+        for dtype in self.math_dtypes:
+            check(dtype)
+
+    def test_rmul_scalar(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            assert_array_equal(2*dat, (2*datsp).toarray())
+            assert_array_equal(17.3*dat, (17.3*datsp).toarray())
+
+        for dtype in self.math_dtypes:
+            check(dtype)
+
+    # github issue #15210
+    def test_rmul_scalar_type_error(self):
+        datsp = self.datsp_dtypes[np.float64]
+        with assert_raises(TypeError):
+            None * datsp
+
+    def test_add(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            a = dat.copy()
+            a[0,2] = 2.0
+            b = datsp
+            c = b + a
+            assert_array_equal(c, b.toarray() + a)
+
+            c = b + b.tocsr()
+            assert_array_equal(c.toarray(),
+                               b.toarray() + b.toarray())
+
+            # test broadcasting
+            c = b + a[0]
+            assert_array_equal(c, b.toarray() + a[0])
+
+        for dtype in self.math_dtypes:
+            check(dtype)
+
+    def test_radd(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            a = dat.copy()
+            a[0,2] = 2.0
+            b = datsp
+            c = a + b
+            assert_array_equal(c, a + b.toarray())
+
+        for dtype in self.math_dtypes:
+            check(dtype)
+
+    def test_sub(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            assert_array_equal((datsp - datsp).toarray(), np.zeros((3, 4)))
+            assert_array_equal((datsp - 0).toarray(), dat)
+
+            A = self.spmatrix(
+                np.array([[1, 0, 0, 4], [-1, 0, 0, 0], [0, 8, 0, -5]], 'd')
+            )
+            assert_array_equal((datsp - A).toarray(), dat - A.toarray())
+            assert_array_equal((A - datsp).toarray(), A.toarray() - dat)
+
+            # test broadcasting
+            assert_array_equal(datsp - dat[0], dat - dat[0])
+
+        for dtype in self.math_dtypes:
+            if dtype == np.dtype('bool'):
+                # boolean array subtraction deprecated in 1.9.0
+                continue
+
+            check(dtype)
+
+    def test_rsub(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
+            assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
+            assert_array_equal((0 - datsp).toarray(), -dat)
+
+            A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
+            assert_array_equal((dat - A), dat - A.toarray())
+            assert_array_equal((A - dat), A.toarray() - dat)
+            assert_array_equal(A.toarray() - datsp, A.toarray() - dat)
+            assert_array_equal(datsp - A.toarray(), dat - A.toarray())
+
+            # test broadcasting
+            assert_array_equal(dat[0] - datsp, dat[0] - dat)
+
+        for dtype in self.math_dtypes:
+            if dtype == np.dtype('bool'):
+                # boolean array subtraction deprecated in 1.9.0
+                continue
+
+            check(dtype)
+
+    def test_add0(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            # Adding 0 to a sparse matrix
+            assert_array_equal((datsp + 0).toarray(), dat)
+            # use sum (which takes 0 as a starting value)
+            sumS = sum([k * datsp for k in range(1, 3)])
+            sumD = sum([k * dat for k in range(1, 3)])
+            assert_almost_equal(sumS.toarray(), sumD)
+
+        for dtype in self.math_dtypes:
+            check(dtype)
+
+    def test_elementwise_multiply(self):
+        # real/real
+        A = array([[4,0,9],[2,-3,5]])
+        B = array([[0,7,0],[0,-4,0]])
+        Asp = self.spmatrix(A)
+        Bsp = self.spmatrix(B)
+        assert_almost_equal(Asp.multiply(Bsp).toarray(), A*B)  # sparse/sparse
+        assert_almost_equal(Asp.multiply(B).toarray(), A*B)  # sparse/dense
+
+        # complex/complex
+        C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
+        D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
+        Csp = self.spmatrix(C)
+        Dsp = self.spmatrix(D)
+        assert_almost_equal(Csp.multiply(Dsp).toarray(), C*D)  # sparse/sparse
+        assert_almost_equal(Csp.multiply(D).toarray(), C*D)  # sparse/dense
+
+        # real/complex
+        assert_almost_equal(Asp.multiply(Dsp).toarray(), A*D)  # sparse/sparse
+        assert_almost_equal(Asp.multiply(D).toarray(), A*D)  # sparse/dense
+
+    def test_elementwise_multiply_broadcast(self):
+        A = array([4])
+        B = array([[-9]])
+        C = array([1,-1,0])
+        D = array([[7,9,-9]])
+        E = array([[3],[2],[1]])
+        F = array([[8,6,3],[-4,3,2],[6,6,6]])
+        G = [1, 2, 3]
+        H = np.ones((3, 4))
+        J = H.T
+        K = array([[0]])
+        L = array([[[1,2],[0,1]]])
+
+        # Some arrays can't be cast as spmatrices (A,C,L) so leave
+        # them out.
+        Bsp = self.spmatrix(B)
+        Dsp = self.spmatrix(D)
+        Esp = self.spmatrix(E)
+        Fsp = self.spmatrix(F)
+        Hsp = self.spmatrix(H)
+        Hspp = self.spmatrix(H[0,None])
+        Jsp = self.spmatrix(J)
+        Jspp = self.spmatrix(J[:,0,None])
+        Ksp = self.spmatrix(K)
+
+        matrices = [A, B, C, D, E, F, G, H, J, K, L]
+        spmatrices = [Bsp, Dsp, Esp, Fsp, Hsp, Hspp, Jsp, Jspp, Ksp]
+
+        # sparse/sparse
+        for i in spmatrices:
+            for j in spmatrices:
+                try:
+                    dense_mult = i.toarray() * j.toarray()
+                except ValueError:
+                    assert_raises(ValueError, i.multiply, j)
+                    continue
+                sp_mult = i.multiply(j)
+                assert_almost_equal(sp_mult.toarray(), dense_mult)
+
+        # sparse/dense
+        for i in spmatrices:
+            for j in matrices:
+                try:
+                    dense_mult = i.toarray() * j
+                except TypeError:
+                    continue
+                except ValueError:
+                    assert_raises(ValueError, i.multiply, j)
+                    continue
+                sp_mult = i.multiply(j)
+                if isspmatrix(sp_mult):
+                    assert_almost_equal(sp_mult.toarray(), dense_mult)
+                else:
+                    assert_almost_equal(sp_mult, dense_mult)
+
+    def test_elementwise_divide(self):
+        expected = [[1,np.nan,np.nan,1],
+                    [1,np.nan,1,np.nan],
+                    [np.nan,1,np.nan,np.nan]]
+        assert_array_equal(toarray(self.datsp / self.datsp), expected)
+
+        denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
+        expected = [[1,np.nan,np.nan,0.5],
+                    [-3,np.nan,inf,np.nan],
+                    [np.nan,0.25,np.nan,0]]
+        assert_array_equal(toarray(self.datsp / denom), expected)
+
+        # complex
+        A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
+        B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
+        Asp = self.spmatrix(A)
+        Bsp = self.spmatrix(B)
+        assert_almost_equal(toarray(Asp / Bsp), A/B)
+
+        # integer
+        A = array([[1,2,3],[-3,2,1]])
+        B = array([[0,1,2],[0,-2,3]])
+        Asp = self.spmatrix(A)
+        Bsp = self.spmatrix(B)
+        with np.errstate(divide='ignore'):
+            assert_array_equal(toarray(Asp / Bsp), A / B)
+
+        # mismatching sparsity patterns
+        A = array([[0,1],[1,0]])
+        B = array([[1,0],[1,0]])
+        Asp = self.spmatrix(A)
+        Bsp = self.spmatrix(B)
+        with np.errstate(divide='ignore', invalid='ignore'):
+            assert_array_equal(np.array(toarray(Asp / Bsp)), A / B)
+
+    def test_pow(self):
+        A = array([[1, 0, 2, 0], [0, 3, 4, 0], [0, 5, 0, 0], [0, 6, 7, 8]])
+        B = self.spmatrix(A)
+
+        for exponent in [0,1,2,3]:
+            ret_sp = B**exponent
+            ret_np = np.linalg.matrix_power(A, exponent)
+            assert_array_equal(ret_sp.toarray(), ret_np)
+            assert_equal(ret_sp.dtype, ret_np.dtype)
+
+        # invalid exponents
+        for exponent in [-1, 2.2, 1 + 3j]:
+            assert_raises(Exception, B.__pow__, exponent)
+
+        # nonsquare matrix
+        B = self.spmatrix(A[:3,:])
+        assert_raises(Exception, B.__pow__, 1)
+
+    def test_rmatvec(self):
+        M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
+        assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray()))
+        row = array([[1,2,3,4]])
+        assert_array_almost_equal(row * M, row @ M.toarray())
+
+    def test_small_multiplication(self):
+        # test that A*x works for x with shape () (1,) (1,1) and (1,0)
+        A = self.spmatrix([[1],[2],[3]])
+
+        assert_(isspmatrix(A * array(1)))
+        assert_equal((A * array(1)).toarray(), [[1], [2], [3]])
+        assert_equal(A * array([1]), array([1, 2, 3]))
+        assert_equal(A * array([[1]]), array([[1], [2], [3]]))
+        assert_equal(A * np.ones((1, 0)), np.ones((3, 0)))
+
+    def test_binop_custom_type(self):
+        # Non-regression test: previously, binary operations would raise
+        # NotImplementedError instead of returning NotImplemented
+        # (https://docs.python.org/library/constants.html#NotImplemented)
+        # so overloading Custom + matrix etc. didn't work.
+        A = self.spmatrix([[1], [2], [3]])
+        B = BinopTester()
+        assert_equal(A + B, "matrix on the left")
+        assert_equal(A - B, "matrix on the left")
+        assert_equal(A * B, "matrix on the left")
+        assert_equal(B + A, "matrix on the right")
+        assert_equal(B - A, "matrix on the right")
+        assert_equal(B * A, "matrix on the right")
+
+        assert_equal(eval('A @ B'), "matrix on the left")
+        assert_equal(eval('B @ A'), "matrix on the right")
+
+    def test_binop_custom_type_with_shape(self):
+        A = self.spmatrix([[1], [2], [3]])
+        B = BinopTester_with_shape((3,1))
+        assert_equal(A + B, "matrix on the left")
+        assert_equal(A - B, "matrix on the left")
+        assert_equal(A * B, "matrix on the left")
+        assert_equal(B + A, "matrix on the right")
+        assert_equal(B - A, "matrix on the right")
+        assert_equal(B * A, "matrix on the right")
+
+        assert_equal(eval('A @ B'), "matrix on the left")
+        assert_equal(eval('B @ A'), "matrix on the right")
+
+    def test_dot_scalar(self):
+        M = self.spmatrix(array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
+        scalar = 10
+        actual = M.dot(scalar)
+        expected = M * scalar
+
+        assert_allclose(actual.toarray(), expected.toarray())
+
+    def test_matmul(self):
+        M = self.spmatrix(array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
+        B = self.spmatrix(array([[0,1],[1,0],[0,2]],'d'))
+        col = array([[1,2,3]]).T
+
+        # check matrix-vector
+        assert_array_almost_equal(operator.matmul(M, col),
+                                  M.toarray() @ col)
+
+        # check matrix-matrix
+        assert_array_almost_equal(operator.matmul(M, B).toarray(),
+                                  (M * B).toarray())
+        assert_array_almost_equal(operator.matmul(M.toarray(), B),
+                                  (M * B).toarray())
+        assert_array_almost_equal(operator.matmul(M, B.toarray()),
+                                  (M * B).toarray())
+
+        # check error on matrix-scalar
+        assert_raises(ValueError, operator.matmul, M, 1)
+        assert_raises(ValueError, operator.matmul, 1, M)
+
+    def test_matvec(self):
+        M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
+        col = array([[1,2,3]]).T
+        assert_array_almost_equal(M * col, M.toarray() @ col)
+
+        # check result dimensions (ticket #514)
+        assert_equal((M * array([1,2,3])).shape,(4,))
+        assert_equal((M * array([[1],[2],[3]])).shape,(4,1))
+        assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1))
+
+        # check result type
+        assert_(isinstance(M * array([1,2,3]), ndarray))
+        assert_(isinstance(M * matrix([1,2,3]).T, np.matrix))
+
+        # ensure exception is raised for improper dimensions
+        bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
+                    matrix([1,2,3]), matrix([[1],[2]])]
+        for x in bad_vecs:
+            assert_raises(ValueError, M.__mul__, x)
+
+        # The current relationship between sparse matrix products and array
+        # products is as follows:
+        assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3]))
+        assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T)
+        # Note that the result of M * x is dense if x has a singleton dimension.
+
+        # Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col)
+        # is rank-2.  Is this desirable?
+
+    def test_matmat_sparse(self):
+        a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
+        a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
+        b = matrix([[0,1],[1,0],[0,2]],'d')
+        asp = self.spmatrix(a)
+        bsp = self.spmatrix(b)
+        assert_array_almost_equal((asp*bsp).toarray(), a@b)
+        assert_array_almost_equal(asp*b, a@b)
+        assert_array_almost_equal(a*bsp, a@b)
+        assert_array_almost_equal(a2*bsp, a@b)
+
+        # Now try performing cross-type multplication:
+        csp = bsp.tocsc()
+        c = b
+        want = a@c
+        assert_array_almost_equal((asp*csp).toarray(), want)
+        assert_array_almost_equal(asp*c, want)
+
+        assert_array_almost_equal(a*csp, want)
+        assert_array_almost_equal(a2*csp, want)
+        csp = bsp.tocsr()
+        assert_array_almost_equal((asp*csp).toarray(), want)
+        assert_array_almost_equal(asp*c, want)
+
+        assert_array_almost_equal(a*csp, want)
+        assert_array_almost_equal(a2*csp, want)
+        csp = bsp.tocoo()
+        assert_array_almost_equal((asp*csp).toarray(), want)
+        assert_array_almost_equal(asp*c, want)
+
+        assert_array_almost_equal(a*csp, want)
+        assert_array_almost_equal(a2*csp, want)
+
+        # Test provided by Andy Fraser, 2006-03-26
+        L = 30
+        frac = .3
+        random.seed(0)  # make runs repeatable
+        A = zeros((L,2))
+        for i in range(L):
+            for j in range(2):
+                r = random.random()
+                if r < frac:
+                    A[i,j] = r/frac
+
+        A = self.spmatrix(A)
+        B = A*A.T
+        assert_array_almost_equal(B.toarray(), A.toarray() @ A.T.toarray())
+        assert_array_almost_equal(B.toarray(), A.toarray() @ A.toarray().T)
+
+        # check dimension mismatch 2x2 times 3x2
+        A = self.spmatrix([[1,2],[3,4]])
+        B = self.spmatrix([[1,2],[3,4],[5,6]])
+        assert_raises(ValueError, A.__mul__, B)
+
+    def test_matmat_dense(self):
+        a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
+        asp = self.spmatrix(a)
+
+        # check both array and matrix types
+        bs = [array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]])]
+
+        for b in bs:
+            result = asp*b
+            assert_(isinstance(result, type(b)))
+            assert_equal(result.shape, (4,2))
+            assert_equal(result, dot(a,b))
+
+    def test_sparse_format_conversions(self):
+        A = sparse.kron([[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]])
+        D = A.toarray()
+        A = self.spmatrix(A)
+
+        for format in ['bsr','coo','csc','csr','dia','dok','lil']:
+            a = A.asformat(format)
+            assert_equal(a.format,format)
+            assert_array_equal(a.toarray(), D)
+
+            b = self.spmatrix(D+3j).asformat(format)
+            assert_equal(b.format,format)
+            assert_array_equal(b.toarray(), D+3j)
+
+            c = eval(format + '_matrix')(A)
+            assert_equal(c.format,format)
+            assert_array_equal(c.toarray(), D)
+
+        for format in ['array', 'dense']:
+            a = A.asformat(format)
+            assert_array_equal(a, D)
+
+            b = self.spmatrix(D+3j).asformat(format)
+            assert_array_equal(b, D+3j)
+
+    def test_tobsr(self):
+        x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
+        y = array([[0,1,2],[3,0,5]])
+        A = kron(x,y)
+        Asp = self.spmatrix(A)
+        for format in ['bsr']:
+            fn = getattr(Asp, 'to' + format)
+
+            for X in [1, 2, 3, 6]:
+                for Y in [1, 2, 3, 4, 6, 12]:
+                    assert_equal(fn(blocksize=(X, Y)).toarray(), A)
+
+    def test_transpose(self):
+        dat_1 = self.dat
+        dat_2 = np.array([[]])
+        matrices = [dat_1, dat_2]
+
+        def check(dtype, j):
+            dat = array(matrices[j], dtype=dtype)
+            datsp = self.spmatrix(dat)
+
+            a = datsp.transpose()
+            b = dat.transpose()
+
+            assert_array_equal(a.toarray(), b)
+            assert_array_equal(a.transpose().toarray(), dat)
+            assert_equal(a.dtype, b.dtype)
+
+        # See gh-5987
+        empty = self.spmatrix((3, 4))
+        assert_array_equal(np.transpose(empty).toarray(),
+                           np.transpose(zeros((3, 4))))
+        assert_array_equal(empty.T.toarray(), zeros((4, 3)))
+        assert_raises(ValueError, empty.transpose, axes=0)
+
+        for dtype in self.checked_dtypes:
+            for j in range(len(matrices)):
+                check(dtype, j)
+
+    def test_add_dense(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            # adding a dense matrix to a sparse matrix
+            sum1 = dat + datsp
+            assert_array_equal(sum1, dat + dat)
+            sum2 = datsp + dat
+            assert_array_equal(sum2, dat + dat)
+
+        for dtype in self.math_dtypes:
+            check(dtype)
+
+    def test_sub_dense(self):
+        # subtracting a dense matrix to/from a sparse matrix
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            # Behavior is different for bool.
+            if dat.dtype == bool:
+                sum1 = dat - datsp
+                assert_array_equal(sum1, dat - dat)
+                sum2 = datsp - dat
+                assert_array_equal(sum2, dat - dat)
+            else:
+                # Manually add to avoid upcasting from scalar
+                # multiplication.
+                sum1 = (dat + dat + dat) - datsp
+                assert_array_equal(sum1, dat + dat)
+                sum2 = (datsp + datsp + datsp) - dat
+                assert_array_equal(sum2, dat + dat)
+
+        for dtype in self.math_dtypes:
+            if dtype == np.dtype('bool'):
+                # boolean array subtraction deprecated in 1.9.0
+                continue
+
+            check(dtype)
+
+    def test_maximum_minimum(self):
+        A_dense = np.array([[1, 0, 3], [0, 4, 5], [0, 0, 0]])
+        B_dense = np.array([[1, 1, 2], [0, 3, 6], [1, -1, 0]])
+
+        A_dense_cpx = np.array([[1, 0, 3], [0, 4+2j, 5], [0, 1j, -1j]])
+
+        def check(dtype, dtype2, btype):
+            if np.issubdtype(dtype, np.complexfloating):
+                A = self.spmatrix(A_dense_cpx.astype(dtype))
+            else:
+                A = self.spmatrix(A_dense.astype(dtype))
+            if btype == 'scalar':
+                B = dtype2.type(1)
+            elif btype == 'scalar2':
+                B = dtype2.type(-1)
+            elif btype == 'dense':
+                B = B_dense.astype(dtype2)
+            elif btype == 'sparse':
+                B = self.spmatrix(B_dense.astype(dtype2))
+            else:
+                raise ValueError()
+
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Taking maximum .minimum. with > 0 .< 0. number results to a dense matrix")
+
+                max_s = A.maximum(B)
+                min_s = A.minimum(B)
+
+            max_d = np.maximum(toarray(A), toarray(B))
+            assert_array_equal(toarray(max_s), max_d)
+            assert_equal(max_s.dtype, max_d.dtype)
+
+            min_d = np.minimum(toarray(A), toarray(B))
+            assert_array_equal(toarray(min_s), min_d)
+            assert_equal(min_s.dtype, min_d.dtype)
+
+        for dtype in self.math_dtypes:
+            for dtype2 in [np.int8, np.float_, np.complex_]:
+                for btype in ['scalar', 'scalar2', 'dense', 'sparse']:
+                    check(np.dtype(dtype), np.dtype(dtype2), btype)
+
+    def test_copy(self):
+        # Check whether the copy=True and copy=False keywords work
+        A = self.datsp
+
+        # check that copy preserves format
+        assert_equal(A.copy().format, A.format)
+        assert_equal(A.__class__(A,copy=True).format, A.format)
+        assert_equal(A.__class__(A,copy=False).format, A.format)
+
+        assert_equal(A.copy().toarray(), A.toarray())
+        assert_equal(A.__class__(A, copy=True).toarray(), A.toarray())
+        assert_equal(A.__class__(A, copy=False).toarray(), A.toarray())
+
+        # check that XXX_matrix.toXXX() works
+        toself = getattr(A,'to' + A.format)
+        assert_(toself() is A)
+        assert_(toself(copy=False) is A)
+        assert_equal(toself(copy=True).format, A.format)
+        assert_equal(toself(copy=True).toarray(), A.toarray())
+
+        # check whether the data is copied?
+        assert_(not sparse_may_share_memory(A.copy(), A))
+
+    # test that __iter__ is compatible with NumPy matrix
+    def test_iterator(self):
+        B = matrix(np.arange(50).reshape(5, 10))
+        A = self.spmatrix(B)
+
+        for x, y in zip(A, B):
+            assert_equal(x.toarray(), y)
+
+    def test_size_zero_matrix_arithmetic(self):
+        # Test basic matrix arithmetic with shapes like (0,0), (10,0),
+        # (0, 3), etc.
+        mat = array([])
+        a = mat.reshape((0, 0))
+        b = mat.reshape((0, 1))
+        c = mat.reshape((0, 5))
+        d = mat.reshape((1, 0))
+        e = mat.reshape((5, 0))
+        f = np.ones([5, 5])
+
+        asp = self.spmatrix(a)
+        bsp = self.spmatrix(b)
+        csp = self.spmatrix(c)
+        dsp = self.spmatrix(d)
+        esp = self.spmatrix(e)
+        fsp = self.spmatrix(f)
+
+        # matrix product.
+        assert_array_equal(asp.dot(asp).A, np.dot(a, a))
+        assert_array_equal(bsp.dot(dsp).A, np.dot(b, d))
+        assert_array_equal(dsp.dot(bsp).A, np.dot(d, b))
+        assert_array_equal(csp.dot(esp).A, np.dot(c, e))
+        assert_array_equal(csp.dot(fsp).A, np.dot(c, f))
+        assert_array_equal(esp.dot(csp).A, np.dot(e, c))
+        assert_array_equal(dsp.dot(csp).A, np.dot(d, c))
+        assert_array_equal(fsp.dot(esp).A, np.dot(f, e))
+
+        # bad matrix products
+        assert_raises(ValueError, dsp.dot, e)
+        assert_raises(ValueError, asp.dot, d)
+
+        # elemente-wise multiplication
+        assert_array_equal(asp.multiply(asp).A, np.multiply(a, a))
+        assert_array_equal(bsp.multiply(bsp).A, np.multiply(b, b))
+        assert_array_equal(dsp.multiply(dsp).A, np.multiply(d, d))
+
+        assert_array_equal(asp.multiply(a).A, np.multiply(a, a))
+        assert_array_equal(bsp.multiply(b).A, np.multiply(b, b))
+        assert_array_equal(dsp.multiply(d).A, np.multiply(d, d))
+
+        assert_array_equal(asp.multiply(6).A, np.multiply(a, 6))
+        assert_array_equal(bsp.multiply(6).A, np.multiply(b, 6))
+        assert_array_equal(dsp.multiply(6).A, np.multiply(d, 6))
+
+        # bad element-wise multiplication
+        assert_raises(ValueError, asp.multiply, c)
+        assert_raises(ValueError, esp.multiply, c)
+
+        # Addition
+        assert_array_equal(asp.__add__(asp).A, a.__add__(a))
+        assert_array_equal(bsp.__add__(bsp).A, b.__add__(b))
+        assert_array_equal(dsp.__add__(dsp).A, d.__add__(d))
+
+        # bad addition
+        assert_raises(ValueError, asp.__add__, dsp)
+        assert_raises(ValueError, bsp.__add__, asp)
+
+    def test_size_zero_conversions(self):
+        mat = array([])
+        a = mat.reshape((0, 0))
+        b = mat.reshape((0, 5))
+        c = mat.reshape((5, 0))
+
+        for m in [a, b, c]:
+            spm = self.spmatrix(m)
+            assert_array_equal(spm.tocoo().A, m)
+            assert_array_equal(spm.tocsr().A, m)
+            assert_array_equal(spm.tocsc().A, m)
+            assert_array_equal(spm.tolil().A, m)
+            assert_array_equal(spm.todok().A, m)
+            assert_array_equal(spm.tobsr().A, m)
+
+    def test_pickle(self):
+        import pickle
+        sup = suppress_warnings()
+        sup.filter(SparseEfficiencyWarning)
+
+        @sup
+        def check():
+            datsp = self.datsp.copy()
+            for protocol in range(pickle.HIGHEST_PROTOCOL):
+                sploaded = pickle.loads(pickle.dumps(datsp, protocol=protocol))
+                assert_equal(datsp.shape, sploaded.shape)
+                assert_array_equal(datsp.toarray(), sploaded.toarray())
+                assert_equal(datsp.format, sploaded.format)
+                for key, val in datsp.__dict__.items():
+                    if isinstance(val, np.ndarray):
+                        assert_array_equal(val, sploaded.__dict__[key])
+                    else:
+                        assert_(val == sploaded.__dict__[key])
+        check()
+
+    def test_unary_ufunc_overrides(self):
+        def check(name):
+            if name == "sign":
+                pytest.skip("sign conflicts with comparison op "
+                            "support on Numpy")
+            if self.spmatrix in (dok_matrix, lil_matrix):
+                pytest.skip("Unary ops not implemented for dok/lil")
+            ufunc = getattr(np, name)
+
+            X = self.spmatrix(np.arange(20).reshape(4, 5) / 20.)
+            X0 = ufunc(X.toarray())
+
+            X2 = ufunc(X)
+            assert_array_equal(X2.toarray(), X0)
+
+        for name in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
+                     "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
+                     "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt",
+                     "abs"]:
+            check(name)
+
+    def test_resize(self):
+        # resize(shape) resizes the matrix in-place
+        D = np.array([[1, 0, 3, 4],
+                      [2, 0, 0, 0],
+                      [3, 0, 0, 0]])
+        S = self.spmatrix(D)
+        assert_(S.resize((3, 2)) is None)
+        assert_array_equal(S.A, [[1, 0],
+                                 [2, 0],
+                                 [3, 0]])
+        S.resize((2, 2))
+        assert_array_equal(S.A, [[1, 0],
+                                 [2, 0]])
+        S.resize((3, 2))
+        assert_array_equal(S.A, [[1, 0],
+                                 [2, 0],
+                                 [0, 0]])
+        S.resize((3, 3))
+        assert_array_equal(S.A, [[1, 0, 0],
+                                 [2, 0, 0],
+                                 [0, 0, 0]])
+        # test no-op
+        S.resize((3, 3))
+        assert_array_equal(S.A, [[1, 0, 0],
+                                 [2, 0, 0],
+                                 [0, 0, 0]])
+
+        # test *args
+        S.resize(3, 2)
+        assert_array_equal(S.A, [[1, 0],
+                                 [2, 0],
+                                 [0, 0]])
+
+        for bad_shape in [1, (-1, 2), (2, -1), (1, 2, 3)]:
+            assert_raises(ValueError, S.resize, bad_shape)
+
+    def test_constructor1_base(self):
+        A = self.datsp
+
+        self_format = A.format
+
+        C = A.__class__(A, copy=False)
+        assert_array_equal_dtype(A.toarray(), C.toarray())
+        if self_format not in NON_ARRAY_BACKED_FORMATS:
+            assert_(sparse_may_share_memory(A, C))
+
+        C = A.__class__(A, dtype=A.dtype, copy=False)
+        assert_array_equal_dtype(A.toarray(), C.toarray())
+        if self_format not in NON_ARRAY_BACKED_FORMATS:
+            assert_(sparse_may_share_memory(A, C))
+
+        C = A.__class__(A, dtype=np.float32, copy=False)
+        assert_array_equal(A.toarray(), C.toarray())
+
+        C = A.__class__(A, copy=True)
+        assert_array_equal_dtype(A.toarray(), C.toarray())
+        assert_(not sparse_may_share_memory(A, C))
+
+        for other_format in ['csr', 'csc', 'coo', 'dia', 'dok', 'lil']:
+            if other_format == self_format:
+                continue
+            B = A.asformat(other_format)
+            C = A.__class__(B, copy=False)
+            assert_array_equal_dtype(A.toarray(), C.toarray())
+
+            C = A.__class__(B, copy=True)
+            assert_array_equal_dtype(A.toarray(), C.toarray())
+            assert_(not sparse_may_share_memory(B, C))
+
+
+class _TestInplaceArithmetic:
+    def test_inplace_dense(self):
+        a = np.ones((3, 4))
+        b = self.spmatrix(a)
+
+        x = a.copy()
+        y = a.copy()
+        x += a
+        y += b
+        assert_array_equal(x, y)
+
+        x = a.copy()
+        y = a.copy()
+        x -= a
+        y -= b
+        assert_array_equal(x, y)
+
+        # This is matrix product, from __rmul__
+        assert_raises(ValueError, operator.imul, x, b)
+        x = a.copy()
+        y = a.copy()
+        x = x.dot(a.T)
+        y *= b.T
+        assert_array_equal(x, y)
+
+        # Matrix (non-elementwise) floor division is not defined
+        assert_raises(TypeError, operator.ifloordiv, x, b)
+
+    def test_imul_scalar(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            # Avoid implicit casting.
+            if np.can_cast(type(2), dtype, casting='same_kind'):
+                a = datsp.copy()
+                a *= 2
+                b = dat.copy()
+                b *= 2
+                assert_array_equal(b, a.toarray())
+
+            if np.can_cast(type(17.3), dtype, casting='same_kind'):
+                a = datsp.copy()
+                a *= 17.3
+                b = dat.copy()
+                b *= 17.3
+                assert_array_equal(b, a.toarray())
+
+        for dtype in self.math_dtypes:
+            check(dtype)
+
+    def test_idiv_scalar(self):
+        def check(dtype):
+            dat = self.dat_dtypes[dtype]
+            datsp = self.datsp_dtypes[dtype]
+
+            if np.can_cast(type(2), dtype, casting='same_kind'):
+                a = datsp.copy()
+                a /= 2
+                b = dat.copy()
+                b /= 2
+                assert_array_equal(b, a.toarray())
+
+            if np.can_cast(type(17.3), dtype, casting='same_kind'):
+                a = datsp.copy()
+                a /= 17.3
+                b = dat.copy()
+                b /= 17.3
+                assert_array_equal(b, a.toarray())
+
+        for dtype in self.math_dtypes:
+            # /= should only be used with float dtypes to avoid implicit
+            # casting.
+            if not np.can_cast(dtype, np.int_):
+                check(dtype)
+
+    def test_inplace_success(self):
+        # Inplace ops should work even if a specialized version is not
+        # implemented, falling back to x = x  y
+        a = self.spmatrix(np.eye(5))
+        b = self.spmatrix(np.eye(5))
+        bp = self.spmatrix(np.eye(5))
+
+        b += a
+        bp = bp + a
+        assert_allclose(b.A, bp.A)
+
+        b *= a
+        bp = bp * a
+        assert_allclose(b.A, bp.A)
+
+        b -= a
+        bp = bp - a
+        assert_allclose(b.A, bp.A)
+
+        assert_raises(TypeError, operator.ifloordiv, a, b)
+
+
+class _TestGetSet:
+    def test_getelement(self):
+        def check(dtype):
+            D = array([[1,0,0],
+                       [4,3,0],
+                       [0,2,0],
+                       [0,0,0]], dtype=dtype)
+            A = self.spmatrix(D)
+
+            M,N = D.shape
+
+            for i in range(-M, M):
+                for j in range(-N, N):
+                    assert_equal(A[i,j], D[i,j])
+
+            assert_equal(type(A[1,1]), dtype)
+
+            for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]:
+                assert_raises((IndexError, TypeError), A.__getitem__, ij)
+
+        for dtype in supported_dtypes:
+            check(np.dtype(dtype))
+
+    def test_setelement(self):
+        def check(dtype):
+            A = self.spmatrix((3,4), dtype=dtype)
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                A[0, 0] = dtype.type(0)  # bug 870
+                A[1, 2] = dtype.type(4.0)
+                A[0, 1] = dtype.type(3)
+                A[2, 0] = dtype.type(2.0)
+                A[0,-1] = dtype.type(8)
+                A[-1,-2] = dtype.type(7)
+                A[0, 1] = dtype.type(5)
+
+            if dtype != np.bool_:
+                assert_array_equal(
+                    A.toarray(),
+                    [
+                        [0, 5, 0, 8],
+                        [0, 0, 4, 0],
+                        [2, 0, 7, 0]
+                    ]
+                )
+
+            for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
+                assert_raises(IndexError, A.__setitem__, ij, 123.0)
+
+            for v in [[1,2,3], array([1,2,3])]:
+                assert_raises(ValueError, A.__setitem__, (0,0), v)
+
+            if (not np.issubdtype(dtype, np.complexfloating) and
+                    dtype != np.bool_):
+                for v in [3j]:
+                    assert_raises(TypeError, A.__setitem__, (0,0), v)
+
+        for dtype in supported_dtypes:
+            check(np.dtype(dtype))
+
+    def test_negative_index_assignment(self):
+        # Regression test for github issue 4428.
+
+        def check(dtype):
+            A = self.spmatrix((3, 10), dtype=dtype)
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                A[0, -4] = 1
+            assert_equal(A[0, -4], 1)
+
+        for dtype in self.math_dtypes:
+            check(np.dtype(dtype))
+
+    def test_scalar_assign_2(self):
+        n, m = (5, 10)
+
+        def _test_set(i, j, nitems):
+            msg = "%r ; %r ; %r" % (i, j, nitems)
+            A = self.spmatrix((n, m))
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                A[i, j] = 1
+            assert_almost_equal(A.sum(), nitems, err_msg=msg)
+            assert_almost_equal(A[i, j], 1, err_msg=msg)
+
+        # [i,j]
+        for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
+                     (array(-1), array(-2))]:
+            _test_set(i, j, 1)
+
+    def test_index_scalar_assign(self):
+        A = self.spmatrix((5, 5))
+        B = np.zeros((5, 5))
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            for C in [A, B]:
+                C[0,1] = 1
+                C[3,0] = 4
+                C[3,0] = 9
+        assert_array_equal(A.toarray(), B)
+
+
+class _TestSolve:
+    def test_solve(self):
+        # Test whether the lu_solve command segfaults, as reported by Nils
+        # Wagner for a 64-bit machine, 02 March 2005 (EJS)
+        n = 20
+        np.random.seed(0)  # make tests repeatable
+        A = zeros((n,n), dtype=complex)
+        x = np.random.rand(n)
+        y = np.random.rand(n-1)+1j*np.random.rand(n-1)
+        r = np.random.rand(n)
+        for i in range(len(x)):
+            A[i,i] = x[i]
+        for i in range(len(y)):
+            A[i,i+1] = y[i]
+            A[i+1,i] = conjugate(y[i])
+        A = self.spmatrix(A)
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "splu converted its input to CSC format")
+            x = splu(A).solve(r)
+        assert_almost_equal(A*x,r)
+
+
+class _TestSlicing:
+    def test_dtype_preservation(self):
+        assert_equal(self.spmatrix((1,10), dtype=np.int16)[0,1:5].dtype, np.int16)
+        assert_equal(self.spmatrix((1,10), dtype=np.int32)[0,1:5].dtype, np.int32)
+        assert_equal(self.spmatrix((1,10), dtype=np.float32)[0,1:5].dtype, np.float32)
+        assert_equal(self.spmatrix((1,10), dtype=np.float64)[0,1:5].dtype, np.float64)
+
+    def test_dtype_preservation_empty_slice(self):
+        # This should be parametrized with pytest, but something in the parent
+        # class creation used in this file breaks pytest.mark.parametrize.
+        for dt in [np.int16, np.int32, np.float32, np.float64]:
+            A = self.spmatrix((3, 2), dtype=dt)
+            assert_equal(A[:, 0:0:2].dtype, dt)
+            assert_equal(A[0:0:2, :].dtype, dt)
+            assert_equal(A[0, 0:0:2].dtype, dt)
+            assert_equal(A[0:0:2, 0].dtype, dt)
+
+    def test_get_horiz_slice(self):
+        B = asmatrix(arange(50.).reshape(5,10))
+        A = self.spmatrix(B)
+        assert_array_equal(B[1, :], A[1, :].toarray())
+        assert_array_equal(B[1, 2:5], A[1, 2:5].toarray())
+
+        C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
+        D = self.spmatrix(C)
+        assert_array_equal(C[1, 1:3], D[1, 1:3].toarray())
+
+        # Now test slicing when a row contains only zeros
+        E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
+        F = self.spmatrix(E)
+        assert_array_equal(E[1, 1:3], F[1, 1:3].toarray())
+        assert_array_equal(E[2, -2:], F[2, -2:].A)
+
+        # The following should raise exceptions:
+        assert_raises(IndexError, A.__getitem__, (slice(None), 11))
+        assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
+
+    def test_get_vert_slice(self):
+        B = arange(50.).reshape(5, 10)
+        A = self.spmatrix(B)
+        assert_array_equal(B[2:5, [0]], A[2:5, 0].toarray())
+        assert_array_equal(B[:, [1]], A[:, 1].toarray())
+
+        C = array([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
+        D = self.spmatrix(C)
+        assert_array_equal(C[1:3, [1]], D[1:3, 1].toarray())
+        assert_array_equal(C[:, [2]], D[:, 2].toarray())
+
+        # Now test slicing when a column contains only zeros
+        E = array([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
+        F = self.spmatrix(E)
+        assert_array_equal(E[:, [1]], F[:, 1].toarray())
+        assert_array_equal(E[-2:, [2]], F[-2:, 2].toarray())
+
+        # The following should raise exceptions:
+        assert_raises(IndexError, A.__getitem__, (slice(None), 11))
+        assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
+
+    def test_get_slices(self):
+        B = arange(50.).reshape(5, 10)
+        A = self.spmatrix(B)
+        assert_array_equal(A[2:5, 0:3].toarray(), B[2:5, 0:3])
+        assert_array_equal(A[1:, :-1].toarray(), B[1:, :-1])
+        assert_array_equal(A[:-1, 1:].toarray(), B[:-1, 1:])
+
+        # Now test slicing when a column contains only zeros
+        E = array([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
+        F = self.spmatrix(E)
+        assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].toarray())
+        assert_array_equal(E[:, 1:], F[:, 1:].toarray())
+
+    def test_non_unit_stride_2d_indexing(self):
+        # Regression test -- used to silently ignore the stride.
+        v0 = np.random.rand(50, 50)
+        try:
+            v = self.spmatrix(v0)[0:25:2, 2:30:3]
+        except ValueError:
+            # if unsupported
+            raise pytest.skip("feature not implemented")
+
+        assert_array_equal(v.toarray(), v0[0:25:2, 2:30:3])
+
+    def test_slicing_2(self):
+        B = asmatrix(arange(50).reshape(5,10))
+        A = self.spmatrix(B)
+
+        # [i,j]
+        assert_equal(A[2,3], B[2,3])
+        assert_equal(A[-1,8], B[-1,8])
+        assert_equal(A[-1,-2],B[-1,-2])
+        assert_equal(A[array(-1),-2],B[-1,-2])
+        assert_equal(A[-1,array(-2)],B[-1,-2])
+        assert_equal(A[array(-1),array(-2)],B[-1,-2])
+
+        # [i,1:2]
+        assert_equal(A[2, :].toarray(), B[2, :])
+        assert_equal(A[2, 5:-2].toarray(), B[2, 5:-2])
+        assert_equal(A[array(2), 5:-2].toarray(), B[2, 5:-2])
+
+        # [1:2,j]
+        assert_equal(A[:, 2].toarray(), B[:, 2])
+        assert_equal(A[3:4, 9].toarray(), B[3:4, 9])
+        assert_equal(A[1:4, -5].toarray(), B[1:4, -5])
+        assert_equal(A[2:-1, 3].toarray(), B[2:-1, 3])
+        assert_equal(A[2:-1, array(3)].toarray(), B[2:-1, 3])
+
+        # [1:2,1:2]
+        assert_equal(A[1:2, 1:2].toarray(), B[1:2, 1:2])
+        assert_equal(A[4:, 3:].toarray(), B[4:, 3:])
+        assert_equal(A[:4, :5].toarray(), B[:4, :5])
+        assert_equal(A[2:-1, :5].toarray(), B[2:-1, :5])
+
+        # [i]
+        assert_equal(A[1, :].toarray(), B[1, :])
+        assert_equal(A[-2, :].toarray(), B[-2, :])
+        assert_equal(A[array(-2), :].toarray(), B[-2, :])
+
+        # [1:2]
+        assert_equal(A[1:4].toarray(), B[1:4])
+        assert_equal(A[1:-2].toarray(), B[1:-2])
+
+        # Check bug reported by Robert Cimrman:
+        # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link)
+        s = slice(int8(2),int8(4),None)
+        assert_equal(A[s, :].toarray(), B[2:4, :])
+        assert_equal(A[:, s].toarray(), B[:, 2:4])
+
+    def test_slicing_3(self):
+        B = asmatrix(arange(50).reshape(5,10))
+        A = self.spmatrix(B)
+
+        s_ = np.s_
+        slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
+                  s_[15:20], s_[3:2],
+                  s_[8:3:-1], s_[4::-2], s_[:5:-1],
+                  0, 1, s_[:], s_[1:5], -1, -2, -5,
+                  array(-1), np.int8(-3)]
+
+        def check_1(a):
+            x = A[a]
+            y = B[a]
+            if y.shape == ():
+                assert_equal(x, y, repr(a))
+            else:
+                if x.size == 0 and y.size == 0:
+                    pass
+                else:
+                    assert_array_equal(x.toarray(), y, repr(a))
+
+        for j, a in enumerate(slices):
+            check_1(a)
+
+        def check_2(a, b):
+            # Indexing np.matrix with 0-d arrays seems to be broken,
+            # as they seem not to be treated as scalars.
+            # https://github.com/numpy/numpy/issues/3110
+            if isinstance(a, np.ndarray):
+                ai = int(a)
+            else:
+                ai = a
+            if isinstance(b, np.ndarray):
+                bi = int(b)
+            else:
+                bi = b
+
+            x = A[a, b]
+            y = B[ai, bi]
+
+            if y.shape == ():
+                assert_equal(x, y, repr((a, b)))
+            else:
+                if x.size == 0 and y.size == 0:
+                    pass
+                else:
+                    assert_array_equal(x.toarray(), y, repr((a, b)))
+
+        for i, a in enumerate(slices):
+            for j, b in enumerate(slices):
+                check_2(a, b)
+
+        # Check out of bounds etc. systematically
+        extra_slices = []
+        for a, b, c in itertools.product(*([(None, 0, 1, 2, 5, 15,
+                                             -1, -2, 5, -15)]*3)):
+            if c == 0:
+                continue
+            extra_slices.append(slice(a, b, c))
+
+        for a in extra_slices:
+            check_2(a, a)
+            check_2(a, -2)
+            check_2(-2, a)
+
+    def test_ellipsis_slicing(self):
+        b = asmatrix(arange(50).reshape(5,10))
+        a = self.spmatrix(b)
+
+        assert_array_equal(a[...].A, b[...].A)
+        assert_array_equal(a[...,].A, b[...,].A)
+
+        assert_array_equal(a[1, ...].A, b[1, ...].A)
+        assert_array_equal(a[..., 1].A, b[..., 1].A)
+        assert_array_equal(a[1:, ...].A, b[1:, ...].A)
+        assert_array_equal(a[..., 1:].A, b[..., 1:].A)
+
+        assert_array_equal(a[1:, 1, ...].A, b[1:, 1, ...].A)
+        assert_array_equal(a[1, ..., 1:].A, b[1, ..., 1:].A)
+        # These return ints
+        assert_equal(a[1, 1, ...], b[1, 1, ...])
+        assert_equal(a[1, ..., 1], b[1, ..., 1])
+
+    def test_multiple_ellipsis_slicing(self):
+        b = asmatrix(arange(50).reshape(5,10))
+        a = self.spmatrix(b)
+
+        assert_array_equal(a[..., ...].A, b[:, :].A)
+        assert_array_equal(a[..., ..., ...].A, b[:, :].A)
+        assert_array_equal(a[1, ..., ...].A, b[1, :].A)
+        assert_array_equal(a[1:, ..., ...].A, b[1:, :].A)
+        assert_array_equal(a[..., ..., 1:].A, b[:, 1:].A)
+        assert_array_equal(a[..., ..., 1].A, b[:, 1].A)
+
+
+class _TestSlicingAssign:
+    def test_slice_scalar_assign(self):
+        A = self.spmatrix((5, 5))
+        B = np.zeros((5, 5))
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            for C in [A, B]:
+                C[0:1,1] = 1
+                C[3:0,0] = 4
+                C[3:4,0] = 9
+                C[0,4:] = 1
+                C[3::-1,4:] = 9
+        assert_array_equal(A.toarray(), B)
+
+    def test_slice_assign_2(self):
+        n, m = (5, 10)
+
+        def _test_set(i, j):
+            msg = "i=%r; j=%r" % (i, j)
+            A = self.spmatrix((n, m))
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                A[i, j] = 1
+            B = np.zeros((n, m))
+            B[i, j] = 1
+            assert_array_almost_equal(A.toarray(), B, err_msg=msg)
+        # [i,1:2]
+        for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)),
+                     (array(2), slice(5, -2))]:
+            _test_set(i, j)
+
+    def test_self_self_assignment(self):
+        # Tests whether a row of one lil_matrix can be assigned to
+        # another.
+        B = self.spmatrix((4,3))
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            B[0,0] = 2
+            B[1,2] = 7
+            B[2,1] = 3
+            B[3,0] = 10
+
+            A = B / 10
+            B[0,:] = A[0,:]
+            assert_array_equal(A[0,:].A, B[0,:].A)
+
+            A = B / 10
+            B[:,:] = A[:1,:1]
+            assert_array_equal(np.zeros((4,3)) + A[0,0], B.A)
+
+            A = B / 10
+            B[:-1,0] = A[0,:].T
+            assert_array_equal(A[0,:].A.T, B[:-1,0].A)
+
+    def test_slice_assignment(self):
+        B = self.spmatrix((4,3))
+        expected = array([[10,0,0],
+                          [0,0,6],
+                          [0,14,0],
+                          [0,0,0]])
+        block = [[1,0],[0,4]]
+
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            B[0,0] = 5
+            B[1,2] = 3
+            B[2,1] = 7
+            B[:,:] = B+B
+            assert_array_equal(B.toarray(), expected)
+
+            B[:2,:2] = csc_matrix(array(block))
+            assert_array_equal(B.toarray()[:2, :2], block)
+
+    def test_sparsity_modifying_assignment(self):
+        B = self.spmatrix((4,3))
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            B[0,0] = 5
+            B[1,2] = 3
+            B[2,1] = 7
+            B[3,0] = 10
+            B[:3] = csr_matrix(np.eye(3))
+
+        expected = array([[1,0,0],[0,1,0],[0,0,1],[10,0,0]])
+        assert_array_equal(B.toarray(), expected)
+
+    def test_set_slice(self):
+        A = self.spmatrix((5,10))
+        B = array(zeros((5, 10), float))
+        s_ = np.s_
+        slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
+                  s_[8:3:-1], s_[4::-2], s_[:5:-1],
+                  0, 1, s_[:], s_[1:5], -1, -2, -5,
+                  array(-1), np.int8(-3)]
+
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            for j, a in enumerate(slices):
+                A[a] = j
+                B[a] = j
+                assert_array_equal(A.toarray(), B, repr(a))
+
+            for i, a in enumerate(slices):
+                for j, b in enumerate(slices):
+                    A[a,b] = 10*i + 1000*(j+1)
+                    B[a,b] = 10*i + 1000*(j+1)
+                    assert_array_equal(A.toarray(), B, repr((a, b)))
+
+            A[0, 1:10:2] = range(1, 10, 2)
+            B[0, 1:10:2] = range(1, 10, 2)
+            assert_array_equal(A.toarray(), B)
+            A[1:5:2, 0] = np.arange(1, 5, 2)[:, None]
+            B[1:5:2, 0] = np.arange(1, 5, 2)[:]
+            assert_array_equal(A.toarray(), B)
+
+        # The next commands should raise exceptions
+        assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100)))
+        assert_raises(ValueError, A.__setitem__, (0, 0), arange(100))
+        assert_raises(ValueError, A.__setitem__, (0, slice(None)),
+                      list(range(100)))
+        assert_raises(ValueError, A.__setitem__, (slice(None), 1),
+                      list(range(100)))
+        assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy())
+        assert_raises(ValueError, A.__setitem__,
+                      ([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4])
+        assert_raises(ValueError, A.__setitem__,
+                      ([[1, 2, 3], [0, 3, 4], [4, 1, 3]],
+                       [[1, 2, 4], [0, 1, 3]]), [2, 3, 4])
+        assert_raises(ValueError, A.__setitem__, (slice(4), 0),
+                      [[1, 2], [3, 4]])
+
+    def test_assign_empty_spmatrix(self):
+        A = self.spmatrix(np.ones((2, 3)))
+        B = self.spmatrix((1, 2))
+        A[1, :2] = B
+        assert_array_equal(A.toarray(), [[1, 1, 1], [0, 0, 1]])
+
+    def test_assign_1d_slice(self):
+        A = self.spmatrix(np.ones((3, 3)))
+        x = np.zeros(3)
+        A[:, 0] = x
+        A[1, :] = x
+        assert_array_equal(A.toarray(), [[0, 1, 1], [0, 0, 0], [0, 1, 1]])
+
+
+class _TestFancyIndexing:
+    """Tests fancy indexing features.  The tests for any matrix formats
+    that implement these features should derive from this class.
+    """
+
+    def test_dtype_preservation_empty_index(self):
+        # This should be parametrized with pytest, but something in the parent
+        # class creation used in this file breaks pytest.mark.parametrize.
+        for dt in [np.int16, np.int32, np.float32, np.float64]:
+            A = self.spmatrix((3, 2), dtype=dt)
+            assert_equal(A[:, [False, False]].dtype, dt)
+            assert_equal(A[[False, False, False], :].dtype, dt)
+            assert_equal(A[:, []].dtype, dt)
+            assert_equal(A[[], :].dtype, dt)
+
+    def test_bad_index(self):
+        A = self.spmatrix(np.zeros([5, 5]))
+        assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo")
+        assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo"))
+        assert_raises((IndexError, ValueError), A.__getitem__,
+                      ([1, 2, 3], [1, 2, 3, 4]))
+
+    def test_fancy_indexing(self):
+        B = asmatrix(arange(50).reshape(5,10))
+        A = self.spmatrix(B)
+
+        # [i]
+        assert_equal(A[[1, 3]].toarray(), B[[1, 3]])
+
+        # [i,[1,2]]
+        assert_equal(A[3, [1, 3]].toarray(), B[3, [1, 3]])
+        assert_equal(A[-1, [2, -5]].toarray(), B[-1, [2, -5]])
+        assert_equal(A[array(-1), [2, -5]].toarray(), B[-1, [2, -5]])
+        assert_equal(A[-1, array([2, -5])].toarray(), B[-1, [2, -5]])
+        assert_equal(A[array(-1), array([2, -5])].toarray(), B[-1, [2, -5]])
+
+        # [1:2,[1,2]]
+        assert_equal(A[:, [2, 8, 3, -1]].toarray(), B[:, [2, 8, 3, -1]])
+        assert_equal(A[3:4, [9]].toarray(), B[3:4, [9]])
+        assert_equal(A[1:4, [-1, -5]].toarray(), B[1:4, [-1, -5]])
+        assert_equal(A[1:4, array([-1, -5])].toarray(), B[1:4, [-1, -5]])
+
+        # [[1,2],j]
+        assert_equal(A[[1, 3], 3].toarray(), B[[1, 3], 3])
+        assert_equal(A[[2, -5], -4].toarray(), B[[2, -5], -4])
+        assert_equal(A[array([2, -5]), -4].toarray(), B[[2, -5], -4])
+        assert_equal(A[[2, -5], array(-4)].toarray(), B[[2, -5], -4])
+        assert_equal(A[array([2, -5]), array(-4)].toarray(), B[[2, -5], -4])
+
+        # [[1,2],1:2]
+        assert_equal(A[[1, 3], :].toarray(), B[[1, 3], :])
+        assert_equal(A[[2, -5], 8:-1].toarray(), B[[2, -5], 8:-1])
+        assert_equal(A[array([2, -5]), 8:-1].toarray(), B[[2, -5], 8:-1])
+
+        # [[1,2],[1,2]]
+        assert_equal(toarray(A[[1, 3], [2, 4]]), B[[1, 3], [2, 4]])
+        assert_equal(toarray(A[[-1, -3], [2, -4]]), B[[-1, -3], [2, -4]])
+        assert_equal(
+            toarray(A[array([-1, -3]), [2, -4]]), B[[-1, -3], [2, -4]]
+        )
+        assert_equal(
+            toarray(A[[-1, -3], array([2, -4])]), B[[-1, -3], [2, -4]]
+        )
+        assert_equal(
+            toarray(A[array([-1, -3]), array([2, -4])]), B[[-1, -3], [2, -4]]
+        )
+
+        # [[[1],[2]],[1,2]]
+        assert_equal(A[[[1], [3]], [2, 4]].toarray(), B[[[1], [3]], [2, 4]])
+        assert_equal(
+            A[[[-1], [-3], [-2]], [2, -4]].toarray(),
+            B[[[-1], [-3], [-2]], [2, -4]]
+        )
+        assert_equal(
+            A[array([[-1], [-3], [-2]]), [2, -4]].toarray(),
+            B[[[-1], [-3], [-2]], [2, -4]]
+        )
+        assert_equal(
+            A[[[-1], [-3], [-2]], array([2, -4])].toarray(),
+            B[[[-1], [-3], [-2]], [2, -4]]
+        )
+        assert_equal(
+            A[array([[-1], [-3], [-2]]), array([2, -4])].toarray(),
+            B[[[-1], [-3], [-2]], [2, -4]]
+        )
+
+        # [[1,2]]
+        assert_equal(A[[1, 3]].toarray(), B[[1, 3]])
+        assert_equal(A[[-1, -3]].toarray(), B[[-1, -3]])
+        assert_equal(A[array([-1, -3])].toarray(), B[[-1, -3]])
+
+        # [[1,2],:][:,[1,2]]
+        assert_equal(
+            A[[1, 3], :][:, [2, 4]].toarray(), B[[1, 3], :][:, [2, 4]]
+        )
+        assert_equal(
+            A[[-1, -3], :][:, [2, -4]].toarray(), B[[-1, -3], :][:, [2, -4]]
+        )
+        assert_equal(
+            A[array([-1, -3]), :][:, array([2, -4])].toarray(),
+            B[[-1, -3], :][:, [2, -4]]
+        )
+
+        # [:,[1,2]][[1,2],:]
+        assert_equal(
+            A[:, [1, 3]][[2, 4], :].toarray(), B[:, [1, 3]][[2, 4], :]
+        )
+        assert_equal(
+            A[:, [-1, -3]][[2, -4], :].toarray(), B[:, [-1, -3]][[2, -4], :]
+        )
+        assert_equal(
+            A[:, array([-1, -3])][array([2, -4]), :].toarray(),
+            B[:, [-1, -3]][[2, -4], :]
+        )
+
+        # Check bug reported by Robert Cimrman:
+        # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link)
+        s = slice(int8(2),int8(4),None)
+        assert_equal(A[s, :].toarray(), B[2:4, :])
+        assert_equal(A[:, s].toarray(), B[:, 2:4])
+
+        # Regression for gh-4917: index with tuple of 2D arrays
+        i = np.array([[1]], dtype=int)
+        assert_equal(A[i, i].toarray(), B[i, i])
+
+        # Regression for gh-4917: index with tuple of empty nested lists
+        assert_equal(A[[[]], [[]]].toarray(), B[[[]], [[]]])
+
+    def test_fancy_indexing_randomized(self):
+        np.random.seed(1234)  # make runs repeatable
+
+        NUM_SAMPLES = 50
+        M = 6
+        N = 4
+
+        D = asmatrix(np.random.rand(M,N))
+        D = np.multiply(D, D > 0.5)
+
+        I = np.random.randint(-M + 1, M, size=NUM_SAMPLES)
+        J = np.random.randint(-N + 1, N, size=NUM_SAMPLES)
+
+        S = self.spmatrix(D)
+
+        SIJ = S[I,J]
+        if isspmatrix(SIJ):
+            SIJ = SIJ.toarray()
+        assert_equal(SIJ, D[I,J])
+
+        I_bad = I + M
+        J_bad = J - N
+
+        assert_raises(IndexError, S.__getitem__, (I_bad,J))
+        assert_raises(IndexError, S.__getitem__, (I,J_bad))
+
+    def test_fancy_indexing_boolean(self):
+        np.random.seed(1234)  # make runs repeatable
+
+        B = asmatrix(arange(50).reshape(5,10))
+        A = self.spmatrix(B)
+
+        I = np.array(np.random.randint(0, 2, size=5), dtype=bool)
+        J = np.array(np.random.randint(0, 2, size=10), dtype=bool)
+        X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
+
+        assert_equal(toarray(A[I]), B[I])
+        assert_equal(toarray(A[:, J]), B[:, J])
+        assert_equal(toarray(A[X]), B[X])
+        assert_equal(toarray(A[B > 9]), B[B > 9])
+
+        I = np.array([True, False, True, True, False])
+        J = np.array([False, True, True, False, True,
+                      False, False, False, False, False])
+
+        assert_equal(toarray(A[I, J]), B[I, J])
+
+        Z1 = np.zeros((6, 11), dtype=bool)
+        Z2 = np.zeros((6, 11), dtype=bool)
+        Z2[0,-1] = True
+        Z3 = np.zeros((6, 11), dtype=bool)
+        Z3[-1,0] = True
+
+        assert_equal(A[Z1], np.array([]))
+        assert_raises(IndexError, A.__getitem__, Z2)
+        assert_raises(IndexError, A.__getitem__, Z3)
+        assert_raises((IndexError, ValueError), A.__getitem__, (X, 1))
+
+    def test_fancy_indexing_sparse_boolean(self):
+        np.random.seed(1234)  # make runs repeatable
+
+        B = asmatrix(arange(50).reshape(5,10))
+        A = self.spmatrix(B)
+
+        X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
+
+        Xsp = csr_matrix(X)
+
+        assert_equal(toarray(A[Xsp]), B[X])
+        assert_equal(toarray(A[A > 9]), B[B > 9])
+
+        Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool)
+        Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool)
+
+        Zsp = csr_matrix(Z)
+        Ysp = csr_matrix(Y)
+
+        assert_raises(IndexError, A.__getitem__, Zsp)
+        assert_raises(IndexError, A.__getitem__, Ysp)
+        assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1))
+
+    def test_fancy_indexing_regression_3087(self):
+        mat = self.spmatrix(array([[1, 0, 0], [0,1,0], [1,0,0]]))
+        desired_cols = np.ravel(mat.sum(0)) > 0
+        assert_equal(mat[:, desired_cols].A, [[1, 0], [0, 1], [1, 0]])
+
+    def test_fancy_indexing_seq_assign(self):
+        mat = self.spmatrix(array([[1, 0], [0, 1]]))
+        assert_raises(ValueError, mat.__setitem__, (0, 0), np.array([1,2]))
+
+    def test_fancy_indexing_2d_assign(self):
+        # regression test for gh-10695
+        mat = self.spmatrix(array([[1, 0], [2, 3]]))
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure")
+            mat[[0, 1], [1, 1]] = mat[[1, 0], [0, 0]]
+        assert_equal(toarray(mat), array([[1, 2], [2, 1]]))
+
+    def test_fancy_indexing_empty(self):
+        B = asmatrix(arange(50).reshape(5,10))
+        B[1,:] = 0
+        B[:,2] = 0
+        B[3,6] = 0
+        A = self.spmatrix(B)
+
+        K = np.array([False, False, False, False, False])
+        assert_equal(toarray(A[K]), B[K])
+        K = np.array([], dtype=int)
+        assert_equal(toarray(A[K]), B[K])
+        assert_equal(toarray(A[K, K]), B[K, K])
+        J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
+        assert_equal(toarray(A[K, J]), B[K, J])
+        assert_equal(toarray(A[J, K]), B[J, K])
+
+
+@contextlib.contextmanager
+def check_remains_sorted(X):
+    """Checks that sorted indices property is retained through an operation
+    """
+    if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices:
+        yield
+        return
+    yield
+    indices = X.indices.copy()
+    X.has_sorted_indices = False
+    X.sort_indices()
+    assert_array_equal(indices, X.indices,
+                       'Expected sorted indices, found unsorted')
+
+
+class _TestFancyIndexingAssign:
+    def test_bad_index_assign(self):
+        A = self.spmatrix(np.zeros([5, 5]))
+        assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2)
+        assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5)
+
+    def test_fancy_indexing_set(self):
+        n, m = (5, 10)
+
+        def _test_set_slice(i, j):
+            A = self.spmatrix((n, m))
+            B = asmatrix(np.zeros((n, m)))
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                B[i, j] = 1
+                with check_remains_sorted(A):
+                    A[i, j] = 1
+            assert_array_almost_equal(A.toarray(), B)
+        # [1:2,1:2]
+        for i, j in [((2, 3, 4), slice(None, 10, 4)),
+                     (np.arange(3), slice(5, -2)),
+                     (slice(2, 5), slice(5, -2))]:
+            _test_set_slice(i, j)
+        for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]:
+            _test_set_slice(i, j)
+
+    def test_fancy_assignment_dtypes(self):
+        def check(dtype):
+            A = self.spmatrix((5, 5), dtype=dtype)
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                A[[0,1],[0,1]] = dtype.type(1)
+                assert_equal(A.sum(), dtype.type(1)*2)
+                A[0:2,0:2] = dtype.type(1.0)
+                assert_equal(A.sum(), dtype.type(1)*4)
+                A[2,2] = dtype.type(1.0)
+                assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1))
+
+        for dtype in supported_dtypes:
+            check(np.dtype(dtype))
+
+    def test_sequence_assignment(self):
+        A = self.spmatrix((4,3))
+        B = self.spmatrix(eye(3,4))
+
+        i0 = [0,1,2]
+        i1 = (0,1,2)
+        i2 = array(i0)
+
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            with check_remains_sorted(A):
+                A[0,i0] = B[i0,0].T
+                A[1,i1] = B[i1,1].T
+                A[2,i2] = B[i2,2].T
+            assert_array_equal(A.toarray(), B.T.toarray())
+
+            # column slice
+            A = self.spmatrix((2,3))
+            with check_remains_sorted(A):
+                A[1,1:3] = [10,20]
+            assert_array_equal(A.toarray(), [[0, 0, 0], [0, 10, 20]])
+
+            # row slice
+            A = self.spmatrix((3,2))
+            with check_remains_sorted(A):
+                A[1:3,1] = [[10],[20]]
+            assert_array_equal(A.toarray(), [[0, 0], [0, 10], [0, 20]])
+
+            # both slices
+            A = self.spmatrix((3,3))
+            B = asmatrix(np.zeros((3,3)))
+            with check_remains_sorted(A):
+                for C in [A, B]:
+                    C[[0,1,2], [0,1,2]] = [4,5,6]
+            assert_array_equal(A.toarray(), B)
+
+            # both slices (2)
+            A = self.spmatrix((4, 3))
+            with check_remains_sorted(A):
+                A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
+            assert_almost_equal(A.sum(), 6)
+            B = asmatrix(np.zeros((4, 3)))
+            B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
+            assert_array_equal(A.toarray(), B)
+
+    def test_fancy_assign_empty(self):
+        B = asmatrix(arange(50).reshape(5,10))
+        B[1,:] = 0
+        B[:,2] = 0
+        B[3,6] = 0
+        A = self.spmatrix(B)
+
+        K = np.array([False, False, False, False, False])
+        A[K] = 42
+        assert_equal(toarray(A), B)
+
+        K = np.array([], dtype=int)
+        A[K] = 42
+        assert_equal(toarray(A), B)
+        A[K,K] = 42
+        assert_equal(toarray(A), B)
+
+        J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
+        A[K,J] = 42
+        assert_equal(toarray(A), B)
+        A[J,K] = 42
+        assert_equal(toarray(A), B)
+
+
+class _TestFancyMultidim:
+    def test_fancy_indexing_ndarray(self):
+        sets = [
+            (np.array([[1], [2], [3]]), np.array([3, 4, 2])),
+            (np.array([[1], [2], [3]]), np.array([[3, 4, 2]])),
+            (np.array([[1, 2, 3]]), np.array([[3], [4], [2]])),
+            (np.array([1, 2, 3]), np.array([[3], [4], [2]])),
+            (np.array([[1, 2, 3], [3, 4, 2]]),
+             np.array([[5, 6, 3], [2, 3, 1]]))
+            ]
+        # These inputs generate 3-D outputs
+        #    (np.array([[[1], [2], [3]], [[3], [4], [2]]]),
+        #     np.array([[[5], [6], [3]], [[2], [3], [1]]])),
+
+        for I, J in sets:
+            np.random.seed(1234)
+            D = asmatrix(np.random.rand(5, 7))
+            S = self.spmatrix(D)
+
+            SIJ = S[I,J]
+            if isspmatrix(SIJ):
+                SIJ = SIJ.toarray()
+            assert_equal(SIJ, D[I,J])
+
+            I_bad = I + 5
+            J_bad = J + 7
+
+            assert_raises(IndexError, S.__getitem__, (I_bad,J))
+            assert_raises(IndexError, S.__getitem__, (I,J_bad))
+
+            # This would generate 3-D arrays -- not supported
+            assert_raises(IndexError, S.__getitem__, ([I, I], slice(None)))
+            assert_raises(IndexError, S.__getitem__, (slice(None), [J, J]))
+
+
+class _TestFancyMultidimAssign:
+    def test_fancy_assign_ndarray(self):
+        np.random.seed(1234)
+
+        D = asmatrix(np.random.rand(5, 7))
+        S = self.spmatrix(D)
+        X = np.random.rand(2, 3)
+
+        I = np.array([[1, 2, 3], [3, 4, 2]])
+        J = np.array([[5, 6, 3], [2, 3, 1]])
+
+        with check_remains_sorted(S):
+            S[I,J] = X
+        D[I,J] = X
+        assert_equal(S.toarray(), D)
+
+        I_bad = I + 5
+        J_bad = J + 7
+
+        C = [1, 2, 3]
+
+        with check_remains_sorted(S):
+            S[I,J] = C
+        D[I,J] = C
+        assert_equal(S.toarray(), D)
+
+        with check_remains_sorted(S):
+            S[I,J] = 3
+        D[I,J] = 3
+        assert_equal(S.toarray(), D)
+
+        assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
+        assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
+
+    def test_fancy_indexing_multidim_set(self):
+        n, m = (5, 10)
+
+        def _test_set_slice(i, j):
+            A = self.spmatrix((n, m))
+            with check_remains_sorted(A), suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                A[i, j] = 1
+            B = asmatrix(np.zeros((n, m)))
+            B[i, j] = 1
+            assert_array_almost_equal(A.toarray(), B)
+        # [[[1, 2], [1, 2]], [1, 2]]
+        for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]),
+                        (np.array([0, 4]), [[0, 3], [1, 2]]),
+                        ([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]:
+            _test_set_slice(i, j)
+
+    def test_fancy_assign_list(self):
+        np.random.seed(1234)
+
+        D = asmatrix(np.random.rand(5, 7))
+        S = self.spmatrix(D)
+        X = np.random.rand(2, 3)
+
+        I = [[1, 2, 3], [3, 4, 2]]
+        J = [[5, 6, 3], [2, 3, 1]]
+
+        S[I,J] = X
+        D[I,J] = X
+        assert_equal(S.toarray(), D)
+
+        I_bad = [[ii + 5 for ii in i] for i in I]
+        J_bad = [[jj + 7 for jj in j] for j in J]
+        C = [1, 2, 3]
+
+        S[I,J] = C
+        D[I,J] = C
+        assert_equal(S.toarray(), D)
+
+        S[I,J] = 3
+        D[I,J] = 3
+        assert_equal(S.toarray(), D)
+
+        assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
+        assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
+
+    def test_fancy_assign_slice(self):
+        np.random.seed(1234)
+
+        D = asmatrix(np.random.rand(5, 7))
+        S = self.spmatrix(D)
+
+        I = [1, 2, 3, 3, 4, 2]
+        J = [5, 6, 3, 2, 3, 1]
+
+        I_bad = [ii + 5 for ii in I]
+        J_bad = [jj + 7 for jj in J]
+
+        C1 = [1, 2, 3, 4, 5, 6, 7]
+        C2 = np.arange(5)[:, None]
+        assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C1)
+        assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C2)
+
+
+class _TestArithmetic:
+    """
+    Test real/complex arithmetic
+    """
+    def __arith_init(self):
+        # these can be represented exactly in FP (so arithmetic should be exact)
+        self.__A = array([[-1.5, 6.5, 0, 2.25, 0, 0],
+                          [3.125, -7.875, 0.625, 0, 0, 0],
+                          [0, 0, -0.125, 1.0, 0, 0],
+                          [0, 0, 8.375, 0, 0, 0]], 'float64')
+        self.__B = array([[0.375, 0, 0, 0, -5, 2.5],
+                          [14.25, -3.75, 0, 0, -0.125, 0],
+                          [0, 7.25, 0, 0, 0, 0],
+                          [18.5, -0.0625, 0, 0, 0, 0]], 'complex128')
+        self.__B.imag = array([[1.25, 0, 0, 0, 6, -3.875],
+                               [2.25, 4.125, 0, 0, 0, 2.75],
+                               [0, 4.125, 0, 0, 0, 0],
+                               [-0.0625, 0, 0, 0, 0, 0]], 'float64')
+
+        # fractions are all x/16ths
+        assert_array_equal((self.__A*16).astype('int32'),16*self.__A)
+        assert_array_equal((self.__B.real*16).astype('int32'),16*self.__B.real)
+        assert_array_equal((self.__B.imag*16).astype('int32'),16*self.__B.imag)
+
+        self.__Asp = self.spmatrix(self.__A)
+        self.__Bsp = self.spmatrix(self.__B)
+
+    def test_add_sub(self):
+        self.__arith_init()
+
+        # basic tests
+        assert_array_equal(
+            (self.__Asp + self.__Bsp).toarray(), self.__A + self.__B
+        )
+
+        # check conversions
+        for x in supported_dtypes:
+            with np.errstate(invalid="ignore"):
+                A = self.__A.astype(x)
+            Asp = self.spmatrix(A)
+            for y in supported_dtypes:
+                if not np.issubdtype(y, np.complexfloating):
+                    with np.errstate(invalid="ignore"):
+                        B = self.__B.real.astype(y)
+                else:
+                    B = self.__B.astype(y)
+                Bsp = self.spmatrix(B)
+
+                # addition
+                D1 = A + B
+                S1 = Asp + Bsp
+
+                assert_equal(S1.dtype,D1.dtype)
+                assert_array_equal(S1.toarray(), D1)
+                assert_array_equal(Asp + B,D1)          # check sparse + dense
+                assert_array_equal(A + Bsp,D1)          # check dense + sparse
+
+                # subtraction
+                if np.dtype('bool') in [x, y]:
+                    # boolean array subtraction deprecated in 1.9.0
+                    continue
+
+                D1 = A - B
+                S1 = Asp - Bsp
+
+                assert_equal(S1.dtype,D1.dtype)
+                assert_array_equal(S1.toarray(), D1)
+                assert_array_equal(Asp - B,D1)          # check sparse - dense
+                assert_array_equal(A - Bsp,D1)          # check dense - sparse
+
+    def test_mu(self):
+        self.__arith_init()
+
+        # basic tests
+        assert_array_equal((self.__Asp * self.__Bsp.T).toarray(),
+                           self.__A @ self.__B.T)
+
+        for x in supported_dtypes:
+            with np.errstate(invalid="ignore"):
+                A = self.__A.astype(x)
+            Asp = self.spmatrix(A)
+            for y in supported_dtypes:
+                if np.issubdtype(y, np.complexfloating):
+                    B = self.__B.astype(y)
+                else:
+                    with np.errstate(invalid="ignore"):
+                        B = self.__B.real.astype(y)
+                Bsp = self.spmatrix(B)
+
+                D1 = A @ B.T
+                S1 = Asp * Bsp.T
+
+                assert_allclose(S1.toarray(), D1,
+                                atol=1e-14*abs(D1).max())
+                assert_equal(S1.dtype,D1.dtype)
+
+
+class _TestMinMax:
+    def test_minmax(self):
+        for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]:
+            D = np.arange(20, dtype=dtype).reshape(5,4)
+
+            X = self.spmatrix(D)
+            assert_equal(X.min(), 0)
+            assert_equal(X.max(), 19)
+            assert_equal(X.min().dtype, dtype)
+            assert_equal(X.max().dtype, dtype)
+
+            D *= -1
+            X = self.spmatrix(D)
+            assert_equal(X.min(), -19)
+            assert_equal(X.max(), 0)
+
+            D += 5
+            X = self.spmatrix(D)
+            assert_equal(X.min(), -14)
+            assert_equal(X.max(), 5)
+
+        # try a fully dense matrix
+        X = self.spmatrix(np.arange(1, 10).reshape(3, 3))
+        assert_equal(X.min(), 1)
+        assert_equal(X.min().dtype, X.dtype)
+
+        X = -X
+        assert_equal(X.max(), -1)
+
+        # and a fully sparse matrix
+        Z = self.spmatrix(np.zeros(1))
+        assert_equal(Z.min(), 0)
+        assert_equal(Z.max(), 0)
+        assert_equal(Z.max().dtype, Z.dtype)
+
+        # another test
+        D = np.arange(20, dtype=float).reshape(5,4)
+        D[0:2, :] = 0
+        X = self.spmatrix(D)
+        assert_equal(X.min(), 0)
+        assert_equal(X.max(), 19)
+
+        # zero-size matrices
+        for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]:
+            X = self.spmatrix(D)
+            assert_raises(ValueError, X.min)
+            assert_raises(ValueError, X.max)
+
+    def test_minmax_axis(self):
+        D = np.arange(50).reshape(5, 10)
+        # completely empty rows, leaving some completely full:
+        D[1, :] = 0
+        # empty at end for reduceat:
+        D[:, 9] = 0
+        # partial rows/cols:
+        D[3, 3] = 0
+        # entries on either side of 0:
+        D[2, 2] = -1
+        X = self.spmatrix(D)
+
+        axes = [-2, -1, 0, 1]
+        for axis in axes:
+            assert_array_equal(
+                X.max(axis=axis).A, D.max(axis=axis, keepdims=True)
+            )
+            assert_array_equal(
+                X.min(axis=axis).A, D.min(axis=axis, keepdims=True)
+            )
+
+        # full matrix
+        D = np.arange(1, 51).reshape(10, 5)
+        X = self.spmatrix(D)
+        for axis in axes:
+            assert_array_equal(
+                X.max(axis=axis).A, D.max(axis=axis, keepdims=True)
+            )
+            assert_array_equal(
+                X.min(axis=axis).A, D.min(axis=axis, keepdims=True)
+            )
+
+        # empty matrix
+        D = np.zeros((10, 5))
+        X = self.spmatrix(D)
+        for axis in axes:
+            assert_array_equal(
+                X.max(axis=axis).A, D.max(axis=axis, keepdims=True)
+            )
+            assert_array_equal(
+                X.min(axis=axis).A, D.min(axis=axis, keepdims=True)
+            )
+
+        axes_even = [0, -2]
+        axes_odd = [1, -1]
+
+        # zero-size matrices
+        D = np.zeros((0, 10))
+        X = self.spmatrix(D)
+        for axis in axes_even:
+            assert_raises(ValueError, X.min, axis=axis)
+            assert_raises(ValueError, X.max, axis=axis)
+        for axis in axes_odd:
+            assert_array_equal(np.zeros((0, 1)), X.min(axis=axis).A)
+            assert_array_equal(np.zeros((0, 1)), X.max(axis=axis).A)
+
+        D = np.zeros((10, 0))
+        X = self.spmatrix(D)
+        for axis in axes_odd:
+            assert_raises(ValueError, X.min, axis=axis)
+            assert_raises(ValueError, X.max, axis=axis)
+        for axis in axes_even:
+            assert_array_equal(np.zeros((1, 0)), X.min(axis=axis).A)
+            assert_array_equal(np.zeros((1, 0)), X.max(axis=axis).A)
+
+    def test_minmax_invalid_params(self):
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        for fname in ('min', 'max'):
+            func = getattr(datsp, fname)
+            assert_raises(ValueError, func, axis=3)
+            assert_raises(TypeError, func, axis=(0, 1))
+            assert_raises(TypeError, func, axis=1.5)
+            assert_raises(ValueError, func, axis=1, out=1)
+
+    def test_numpy_minmax(self):
+        # See gh-5987
+        # xref gh-7460 in 'numpy'
+        from scipy.sparse import _data
+
+        dat = array([[0, 1, 2],
+                     [3, -4, 5],
+                     [-6, 7, 9]])
+        datsp = self.spmatrix(dat)
+
+        # We are only testing sparse matrices who have
+        # implemented 'min' and 'max' because they are
+        # the ones with the compatibility issues with
+        # the 'numpy' implementation.
+        if isinstance(datsp, _data._minmax_mixin):
+            assert_array_equal(np.min(datsp), np.min(dat))
+            assert_array_equal(np.max(datsp), np.max(dat))
+
+    def test_argmax(self):
+        D1 = np.array([
+            [-1, 5, 2, 3],
+            [0, 0, -1, -2],
+            [-1, -2, -3, -4],
+            [1, 2, 3, 4],
+            [1, 2, 0, 0],
+        ])
+        D2 = D1.transpose()
+
+        for D in [D1, D2]:
+            mat = csr_matrix(D)
+
+            assert_equal(mat.argmax(), np.argmax(D))
+            assert_equal(mat.argmin(), np.argmin(D))
+
+            assert_equal(mat.argmax(axis=0),
+                         asmatrix(np.argmax(D, axis=0)))
+            assert_equal(mat.argmin(axis=0),
+                         asmatrix(np.argmin(D, axis=0)))
+
+            assert_equal(mat.argmax(axis=1),
+                         asmatrix(np.argmax(D, axis=1).reshape(-1, 1)))
+            assert_equal(mat.argmin(axis=1),
+                         asmatrix(np.argmin(D, axis=1).reshape(-1, 1)))
+
+        D1 = np.empty((0, 5))
+        D2 = np.empty((5, 0))
+
+        for axis in [None, 0]:
+            mat = self.spmatrix(D1)
+            assert_raises(ValueError, mat.argmax, axis=axis)
+            assert_raises(ValueError, mat.argmin, axis=axis)
+
+        for axis in [None, 1]:
+            mat = self.spmatrix(D2)
+            assert_raises(ValueError, mat.argmax, axis=axis)
+            assert_raises(ValueError, mat.argmin, axis=axis)
+
+
+class _TestGetNnzAxis:
+    def test_getnnz_axis(self):
+        dat = array([[0, 2],
+                     [3, 5],
+                     [-6, 9]])
+        bool_dat = dat.astype(bool)
+        datsp = self.spmatrix(dat)
+
+        accepted_return_dtypes = (np.int32, np.int64)
+
+        assert_array_equal(bool_dat.sum(axis=None), datsp.getnnz(axis=None))
+        assert_array_equal(bool_dat.sum(), datsp.getnnz())
+        assert_array_equal(bool_dat.sum(axis=0), datsp.getnnz(axis=0))
+        assert_in(datsp.getnnz(axis=0).dtype, accepted_return_dtypes)
+        assert_array_equal(bool_dat.sum(axis=1), datsp.getnnz(axis=1))
+        assert_in(datsp.getnnz(axis=1).dtype, accepted_return_dtypes)
+        assert_array_equal(bool_dat.sum(axis=-2), datsp.getnnz(axis=-2))
+        assert_in(datsp.getnnz(axis=-2).dtype, accepted_return_dtypes)
+        assert_array_equal(bool_dat.sum(axis=-1), datsp.getnnz(axis=-1))
+        assert_in(datsp.getnnz(axis=-1).dtype, accepted_return_dtypes)
+
+        assert_raises(ValueError, datsp.getnnz, axis=2)
+
+
+#------------------------------------------------------------------------------
+# Tailored base class for generic tests
+#------------------------------------------------------------------------------
+
+def _possibly_unimplemented(cls, require=True):
+    """
+    Construct a class that either runs tests as usual (require=True),
+    or each method skips if it encounters a common error.
+    """
+    if require:
+        return cls
+    else:
+        def wrap(fc):
+            @functools.wraps(fc)
+            def wrapper(*a, **kw):
+                try:
+                    return fc(*a, **kw)
+                except (NotImplementedError, TypeError, ValueError,
+                        IndexError, AttributeError):
+                    raise pytest.skip("feature not implemented")
+
+            return wrapper
+
+        new_dict = dict(cls.__dict__)
+        for name, func in cls.__dict__.items():
+            if name.startswith('test_'):
+                new_dict[name] = wrap(func)
+        return type(cls.__name__ + "NotImplemented",
+                    cls.__bases__,
+                    new_dict)
+
+
+def sparse_test_class(getset=True, slicing=True, slicing_assign=True,
+                      fancy_indexing=True, fancy_assign=True,
+                      fancy_multidim_indexing=True, fancy_multidim_assign=True,
+                      minmax=True, nnz_axis=True):
+    """
+    Construct a base class, optionally converting some of the tests in
+    the suite to check that the feature is not implemented.
+    """
+    bases = (_TestCommon,
+             _possibly_unimplemented(_TestGetSet, getset),
+             _TestSolve,
+             _TestInplaceArithmetic,
+             _TestArithmetic,
+             _possibly_unimplemented(_TestSlicing, slicing),
+             _possibly_unimplemented(_TestSlicingAssign, slicing_assign),
+             _possibly_unimplemented(_TestFancyIndexing, fancy_indexing),
+             _possibly_unimplemented(_TestFancyIndexingAssign,
+                                     fancy_assign),
+             _possibly_unimplemented(_TestFancyMultidim,
+                                     fancy_indexing and fancy_multidim_indexing),
+             _possibly_unimplemented(_TestFancyMultidimAssign,
+                                     fancy_multidim_assign and fancy_assign),
+             _possibly_unimplemented(_TestMinMax, minmax),
+             _possibly_unimplemented(_TestGetNnzAxis, nnz_axis))
+
+    # check that test names do not clash
+    names = {}
+    for cls in bases:
+        for name in cls.__dict__:
+            if not name.startswith('test_'):
+                continue
+            old_cls = names.get(name)
+            if old_cls is not None:
+                raise ValueError("Test class %s overloads test %s defined in %s" % (
+                    cls.__name__, name, old_cls.__name__))
+            names[name] = cls
+
+    return type("TestBase", bases, {})
+
+
+#------------------------------------------------------------------------------
+# Matrix class based tests
+#------------------------------------------------------------------------------
+
+class TestCSR(sparse_test_class()):
+    @classmethod
+    def spmatrix(cls, *args, **kwargs):
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a csr_matrix is expensive")
+            return csr_matrix(*args, **kwargs)
+    math_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
+
+    def test_constructor1(self):
+        b = array([[0, 4, 0],
+                   [3, 0, 0],
+                   [0, 2, 0]], 'd')
+        bsp = csr_matrix(b)
+        assert_array_almost_equal(bsp.data,[4,3,2])
+        assert_array_equal(bsp.indices,[1,0,1])
+        assert_array_equal(bsp.indptr,[0,1,2,3])
+        assert_equal(bsp.getnnz(),3)
+        assert_equal(bsp.getformat(),'csr')
+        assert_array_equal(bsp.toarray(), b)
+
+    def test_constructor2(self):
+        b = zeros((6,6),'d')
+        b[3,4] = 5
+        bsp = csr_matrix(b)
+        assert_array_almost_equal(bsp.data,[5])
+        assert_array_equal(bsp.indices,[4])
+        assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1])
+        assert_array_almost_equal(bsp.toarray(), b)
+
+    def test_constructor3(self):
+        b = array([[1, 0],
+                   [0, 2],
+                   [3, 0]], 'd')
+        bsp = csr_matrix(b)
+        assert_array_almost_equal(bsp.data,[1,2,3])
+        assert_array_equal(bsp.indices,[0,1,0])
+        assert_array_equal(bsp.indptr,[0,1,2,3])
+        assert_array_almost_equal(bsp.toarray(), b)
+
+    def test_constructor4(self):
+        # using (data, ij) format
+        row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
+        col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
+        data = array([6., 10., 3., 9., 1., 4.,
+                              11., 2., 8., 5., 7.])
+
+        ij = vstack((row,col))
+        csr = csr_matrix((data,ij),(4,3))
+        assert_array_equal(arange(12).reshape(4, 3), csr.toarray())
+
+        # using Python lists and a specified dtype
+        csr = csr_matrix(([2**63 + 1, 1], ([0, 1], [0, 1])), dtype=np.uint64)
+        dense = array([[2**63 + 1, 0], [0, 1]], dtype=np.uint64)
+        assert_array_equal(dense, csr.toarray())
+
+    def test_constructor5(self):
+        # infer dimensions from arrays
+        indptr = array([0,1,3,3])
+        indices = array([0,5,1,2])
+        data = array([1,2,3,4])
+        csr = csr_matrix((data, indices, indptr))
+        assert_array_equal(csr.shape,(3,6))
+
+    def test_constructor6(self):
+        # infer dimensions and dtype from lists
+        indptr = [0, 1, 3, 3]
+        indices = [0, 5, 1, 2]
+        data = [1, 2, 3, 4]
+        csr = csr_matrix((data, indices, indptr))
+        assert_array_equal(csr.shape, (3,6))
+        assert_(np.issubdtype(csr.dtype, np.signedinteger))
+
+    def test_constructor_smallcol(self):
+        # int64 indices not required
+        data = arange(6) + 1
+        col = array([1, 2, 1, 0, 0, 2], dtype=np.int64)
+        ptr = array([0, 2, 4, 6], dtype=np.int64)
+
+        a = csr_matrix((data, col, ptr), shape=(3, 3))
+
+        b = array([[0, 1, 2],
+                   [4, 3, 0],
+                   [5, 0, 6]], 'd')
+
+        assert_equal(a.indptr.dtype, np.dtype(np.int32))
+        assert_equal(a.indices.dtype, np.dtype(np.int32))
+        assert_array_equal(a.toarray(), b)
+
+    def test_constructor_largecol(self):
+        # int64 indices required
+        data = arange(6) + 1
+        large = np.iinfo(np.int32).max + 100
+        col = array([0, 1, 2, large, large+1, large+2], dtype=np.int64)
+        ptr = array([0, 2, 4, 6], dtype=np.int64)
+
+        a = csr_matrix((data, col, ptr))
+
+        assert_equal(a.indptr.dtype, np.dtype(np.int64))
+        assert_equal(a.indices.dtype, np.dtype(np.int64))
+        assert_array_equal(a.shape, (3, max(col)+1))
+
+    def test_sort_indices(self):
+        data = arange(5)
+        indices = array([7, 2, 1, 5, 4])
+        indptr = array([0, 3, 5])
+        asp = csr_matrix((data, indices, indptr), shape=(2,10))
+        bsp = asp.copy()
+        asp.sort_indices()
+        assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
+        assert_array_equal(asp.toarray(), bsp.toarray())
+
+    def test_eliminate_zeros(self):
+        data = array([1, 0, 0, 0, 2, 0, 3, 0])
+        indices = array([1, 2, 3, 4, 5, 6, 7, 8])
+        indptr = array([0, 3, 8])
+        asp = csr_matrix((data, indices, indptr), shape=(2,10))
+        bsp = asp.copy()
+        asp.eliminate_zeros()
+        assert_array_equal(asp.nnz, 3)
+        assert_array_equal(asp.data,[1, 2, 3])
+        assert_array_equal(asp.toarray(), bsp.toarray())
+
+    def test_ufuncs(self):
+        X = csr_matrix(np.arange(20).reshape(4, 5) / 20.)
+        for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
+                  "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
+                  "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
+            assert_equal(hasattr(csr_matrix, f), True)
+            X2 = getattr(X, f)()
+            assert_equal(X.shape, X2.shape)
+            assert_array_equal(X.indices, X2.indices)
+            assert_array_equal(X.indptr, X2.indptr)
+            assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
+
+    def test_unsorted_arithmetic(self):
+        data = arange(5)
+        indices = array([7, 2, 1, 5, 4])
+        indptr = array([0, 3, 5])
+        asp = csr_matrix((data, indices, indptr), shape=(2,10))
+        data = arange(6)
+        indices = array([8, 1, 5, 7, 2, 4])
+        indptr = array([0, 2, 6])
+        bsp = csr_matrix((data, indices, indptr), shape=(2,10))
+        assert_equal((asp + bsp).toarray(), asp.toarray() + bsp.toarray())
+
+    def test_fancy_indexing_broadcast(self):
+        # broadcasting indexing mode is supported
+        I = np.array([[1], [2], [3]])
+        J = np.array([3, 4, 2])
+
+        np.random.seed(1234)
+        D = asmatrix(np.random.rand(5, 7))
+        S = self.spmatrix(D)
+
+        SIJ = S[I,J]
+        if isspmatrix(SIJ):
+            SIJ = SIJ.toarray()
+        assert_equal(SIJ, D[I,J])
+
+    def test_has_sorted_indices(self):
+        "Ensure has_sorted_indices memoizes sorted state for sort_indices"
+        sorted_inds = np.array([0, 1])
+        unsorted_inds = np.array([1, 0])
+        data = np.array([1, 1])
+        indptr = np.array([0, 2])
+        M = csr_matrix((data, sorted_inds, indptr)).copy()
+        assert_equal(True, M.has_sorted_indices)
+        assert type(M.has_sorted_indices) == bool
+
+        M = csr_matrix((data, unsorted_inds, indptr)).copy()
+        assert_equal(False, M.has_sorted_indices)
+
+        # set by sorting
+        M.sort_indices()
+        assert_equal(True, M.has_sorted_indices)
+        assert_array_equal(M.indices, sorted_inds)
+
+        M = csr_matrix((data, unsorted_inds, indptr)).copy()
+        # set manually (although underlyingly unsorted)
+        M.has_sorted_indices = True
+        assert_equal(True, M.has_sorted_indices)
+        assert_array_equal(M.indices, unsorted_inds)
+
+        # ensure sort bypassed when has_sorted_indices == True
+        M.sort_indices()
+        assert_array_equal(M.indices, unsorted_inds)
+
+    def test_has_canonical_format(self):
+        "Ensure has_canonical_format memoizes state for sum_duplicates"
+
+        M = csr_matrix((np.array([2]), np.array([0]), np.array([0, 1])))
+        assert_equal(True, M.has_canonical_format)
+
+        indices = np.array([0, 0])  # contains duplicate
+        data = np.array([1, 1])
+        indptr = np.array([0, 2])
+
+        M = csr_matrix((data, indices, indptr)).copy()
+        assert_equal(False, M.has_canonical_format)
+        assert type(M.has_canonical_format) == bool
+
+        # set by deduplicating
+        M.sum_duplicates()
+        assert_equal(True, M.has_canonical_format)
+        assert_equal(1, len(M.indices))
+
+        M = csr_matrix((data, indices, indptr)).copy()
+        # set manually (although underlyingly duplicated)
+        M.has_canonical_format = True
+        assert_equal(True, M.has_canonical_format)
+        assert_equal(2, len(M.indices))  # unaffected content
+
+        # ensure deduplication bypassed when has_canonical_format == True
+        M.sum_duplicates()
+        assert_equal(2, len(M.indices))  # unaffected content
+
+    def test_scalar_idx_dtype(self):
+        # Check that index dtype takes into account all parameters
+        # passed to sparsetools, including the scalar ones
+        indptr = np.zeros(2, dtype=np.int32)
+        indices = np.zeros(0, dtype=np.int32)
+        vals = np.zeros(0)
+        a = csr_matrix((vals, indices, indptr), shape=(1, 2**31-1))
+        b = csr_matrix((vals, indices, indptr), shape=(1, 2**31))
+        ij = np.zeros((2, 0), dtype=np.int32)
+        c = csr_matrix((vals, ij), shape=(1, 2**31-1))
+        d = csr_matrix((vals, ij), shape=(1, 2**31))
+        e = csr_matrix((1, 2**31-1))
+        f = csr_matrix((1, 2**31))
+        assert_equal(a.indptr.dtype, np.int32)
+        assert_equal(b.indptr.dtype, np.int64)
+        assert_equal(c.indptr.dtype, np.int32)
+        assert_equal(d.indptr.dtype, np.int64)
+        assert_equal(e.indptr.dtype, np.int32)
+        assert_equal(f.indptr.dtype, np.int64)
+
+        # These shouldn't fail
+        for x in [a, b, c, d, e, f]:
+            x + x
+
+    def test_binop_explicit_zeros(self):
+        # Check that binary ops don't introduce spurious explicit zeros.
+        # See gh-9619 for context.
+        a = csr_matrix([0, 1, 0])
+        b = csr_matrix([1, 1, 0])
+        assert (a + b).nnz == 2
+        assert a.multiply(b).nnz == 1
+
+
+TestCSR.init_class()
+
+
+class TestCSC(sparse_test_class()):
+    @classmethod
+    def spmatrix(cls, *args, **kwargs):
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a csc_matrix is expensive")
+            return csc_matrix(*args, **kwargs)
+    math_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
+
+    def test_constructor1(self):
+        b = array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 2, 0, 3]], 'd')
+        bsp = csc_matrix(b)
+        assert_array_almost_equal(bsp.data,[1,2,1,3])
+        assert_array_equal(bsp.indices,[0,2,1,2])
+        assert_array_equal(bsp.indptr,[0,1,2,3,4])
+        assert_equal(bsp.getnnz(),4)
+        assert_equal(bsp.shape,b.shape)
+        assert_equal(bsp.getformat(),'csc')
+
+    def test_constructor2(self):
+        b = zeros((6,6),'d')
+        b[2,4] = 5
+        bsp = csc_matrix(b)
+        assert_array_almost_equal(bsp.data,[5])
+        assert_array_equal(bsp.indices,[2])
+        assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
+
+    def test_constructor3(self):
+        b = array([[1, 0], [0, 0], [0, 2]], 'd')
+        bsp = csc_matrix(b)
+        assert_array_almost_equal(bsp.data,[1,2])
+        assert_array_equal(bsp.indices,[0,2])
+        assert_array_equal(bsp.indptr,[0,1,2])
+
+    def test_constructor4(self):
+        # using (data, ij) format
+        row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
+        col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
+        data = array([6., 10., 3., 9., 1., 4.,
+                              11., 2., 8., 5., 7.])
+
+        ij = vstack((row,col))
+        csc = csc_matrix((data,ij),(4,3))
+        assert_array_equal(arange(12).reshape(4, 3), csc.toarray())
+
+    def test_constructor5(self):
+        # infer dimensions from arrays
+        indptr = array([0,1,3,3])
+        indices = array([0,5,1,2])
+        data = array([1,2,3,4])
+        csc = csc_matrix((data, indices, indptr))
+        assert_array_equal(csc.shape,(6,3))
+
+    def test_constructor6(self):
+        # infer dimensions and dtype from lists
+        indptr = [0, 1, 3, 3]
+        indices = [0, 5, 1, 2]
+        data = [1, 2, 3, 4]
+        csc = csc_matrix((data, indices, indptr))
+        assert_array_equal(csc.shape,(6,3))
+        assert_(np.issubdtype(csc.dtype, np.signedinteger))
+
+    def test_eliminate_zeros(self):
+        data = array([1, 0, 0, 0, 2, 0, 3, 0])
+        indices = array([1, 2, 3, 4, 5, 6, 7, 8])
+        indptr = array([0, 3, 8])
+        asp = csc_matrix((data, indices, indptr), shape=(10,2))
+        bsp = asp.copy()
+        asp.eliminate_zeros()
+        assert_array_equal(asp.nnz, 3)
+        assert_array_equal(asp.data,[1, 2, 3])
+        assert_array_equal(asp.toarray(), bsp.toarray())
+
+    def test_sort_indices(self):
+        data = arange(5)
+        row = array([7, 2, 1, 5, 4])
+        ptr = [0, 3, 5]
+        asp = csc_matrix((data, row, ptr), shape=(10,2))
+        bsp = asp.copy()
+        asp.sort_indices()
+        assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
+        assert_array_equal(asp.toarray(), bsp.toarray())
+
+    def test_ufuncs(self):
+        X = csc_matrix(np.arange(21).reshape(7, 3) / 21.)
+        for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
+                  "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
+                  "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
+            assert_equal(hasattr(csr_matrix, f), True)
+            X2 = getattr(X, f)()
+            assert_equal(X.shape, X2.shape)
+            assert_array_equal(X.indices, X2.indices)
+            assert_array_equal(X.indptr, X2.indptr)
+            assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
+
+    def test_unsorted_arithmetic(self):
+        data = arange(5)
+        indices = array([7, 2, 1, 5, 4])
+        indptr = array([0, 3, 5])
+        asp = csc_matrix((data, indices, indptr), shape=(10,2))
+        data = arange(6)
+        indices = array([8, 1, 5, 7, 2, 4])
+        indptr = array([0, 2, 6])
+        bsp = csc_matrix((data, indices, indptr), shape=(10,2))
+        assert_equal((asp + bsp).toarray(), asp.toarray() + bsp.toarray())
+
+    def test_fancy_indexing_broadcast(self):
+        # broadcasting indexing mode is supported
+        I = np.array([[1], [2], [3]])
+        J = np.array([3, 4, 2])
+
+        np.random.seed(1234)
+        D = asmatrix(np.random.rand(5, 7))
+        S = self.spmatrix(D)
+
+        SIJ = S[I,J]
+        if isspmatrix(SIJ):
+            SIJ = SIJ.toarray()
+        assert_equal(SIJ, D[I,J])
+
+    def test_scalar_idx_dtype(self):
+        # Check that index dtype takes into account all parameters
+        # passed to sparsetools, including the scalar ones
+        indptr = np.zeros(2, dtype=np.int32)
+        indices = np.zeros(0, dtype=np.int32)
+        vals = np.zeros(0)
+        a = csc_matrix((vals, indices, indptr), shape=(2**31-1, 1))
+        b = csc_matrix((vals, indices, indptr), shape=(2**31, 1))
+        ij = np.zeros((2, 0), dtype=np.int32)
+        c = csc_matrix((vals, ij), shape=(2**31-1, 1))
+        d = csc_matrix((vals, ij), shape=(2**31, 1))
+        e = csr_matrix((1, 2**31-1))
+        f = csr_matrix((1, 2**31))
+        assert_equal(a.indptr.dtype, np.int32)
+        assert_equal(b.indptr.dtype, np.int64)
+        assert_equal(c.indptr.dtype, np.int32)
+        assert_equal(d.indptr.dtype, np.int64)
+        assert_equal(e.indptr.dtype, np.int32)
+        assert_equal(f.indptr.dtype, np.int64)
+
+        # These shouldn't fail
+        for x in [a, b, c, d, e, f]:
+            x + x
+
+
+TestCSC.init_class()
+
+
+class TestDOK(sparse_test_class(minmax=False, nnz_axis=False)):
+    spmatrix = dok_matrix
+    math_dtypes = [np.int_, np.float_, np.complex_]
+
+    def test_mult(self):
+        A = dok_matrix((10,10))
+        A[0,3] = 10
+        A[5,6] = 20
+        D = A*A.T
+        E = A*A.H
+        assert_array_equal(D.A, E.A)
+
+    def test_add_nonzero(self):
+        A = self.spmatrix((3,2))
+        A[0,1] = -10
+        A[2,0] = 20
+        A = A + 10
+        B = array([[10, 0], [10, 10], [30, 10]])
+        assert_array_equal(A.toarray(), B)
+
+        A = A + 1j
+        B = B + 1j
+        assert_array_equal(A.toarray(), B)
+
+    def test_dok_divide_scalar(self):
+        A = self.spmatrix((3,2))
+        A[0,1] = -10
+        A[2,0] = 20
+
+        assert_array_equal((A/1j).toarray(), A.toarray()/1j)
+        assert_array_equal((A/9).toarray(), A.toarray()/9)
+
+    def test_convert(self):
+        # Test provided by Andrew Straw.  Fails in SciPy <= r1477.
+        (m, n) = (6, 7)
+        a = dok_matrix((m, n))
+
+        # set a few elements, but none in the last column
+        a[2,1] = 1
+        a[0,2] = 2
+        a[3,1] = 3
+        a[1,5] = 4
+        a[4,3] = 5
+        a[4,2] = 6
+
+        # assert that the last column is all zeros
+        assert_array_equal(a.toarray()[:,n-1], zeros(m,))
+
+        # make sure it still works for CSC format
+        csc = a.tocsc()
+        assert_array_equal(csc.toarray()[:,n-1], zeros(m,))
+
+        # now test CSR
+        (m, n) = (n, m)
+        b = a.transpose()
+        assert_equal(b.shape, (m, n))
+        # assert that the last row is all zeros
+        assert_array_equal(b.toarray()[m-1,:], zeros(n,))
+
+        # make sure it still works for CSR format
+        csr = b.tocsr()
+        assert_array_equal(csr.toarray()[m-1,:], zeros(n,))
+
+    def test_ctor(self):
+        # Empty ctor
+        assert_raises(TypeError, dok_matrix)
+
+        # Dense ctor
+        b = array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 2, 0, 3]], 'd')
+        A = dok_matrix(b)
+        assert_equal(b.dtype, A.dtype)
+        assert_equal(A.toarray(), b)
+
+        # Sparse ctor
+        c = csr_matrix(b)
+        assert_equal(A.toarray(), c.toarray())
+
+        data = [[0, 1, 2], [3, 0, 0]]
+        d = dok_matrix(data, dtype=np.float32)
+        assert_equal(d.dtype, np.float32)
+        da = d.toarray()
+        assert_equal(da.dtype, np.float32)
+        assert_array_equal(da, data)
+
+    def test_ticket1160(self):
+        # Regression test for ticket #1160.
+        a = dok_matrix((3,3))
+        a[0,0] = 0
+        # This assert would fail, because the above assignment would
+        # incorrectly call __set_item__ even though the value was 0.
+        assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys")
+
+        # Slice assignments were also affected.
+        b = dok_matrix((3,3))
+        b[:,0] = 0
+        assert_(len(b.keys()) == 0, "Unexpected entries in keys")
+
+
+TestDOK.init_class()
+
+
+class TestLIL(sparse_test_class(minmax=False)):
+    spmatrix = lil_matrix
+    math_dtypes = [np.int_, np.float_, np.complex_]
+
+    def test_dot(self):
+        A = zeros((10, 10), np.complex128)
+        A[0, 3] = 10
+        A[5, 6] = 20j
+
+        B = lil_matrix((10, 10), dtype=np.complex128)
+        B[0, 3] = 10
+        B[5, 6] = 20j
+
+        # TODO: properly handle this assertion on ppc64le
+        if platform.machine() != 'ppc64le':
+            assert_array_equal(A @ A.T, (B * B.T).toarray())
+
+        assert_array_equal(A @ A.conjugate().T, (B * B.H).toarray())
+
+    def test_scalar_mul(self):
+        x = lil_matrix((3, 3))
+        x[0, 0] = 2
+
+        x = x*2
+        assert_equal(x[0, 0], 4)
+
+        x = x*0
+        assert_equal(x[0, 0], 0)
+
+    def test_inplace_ops(self):
+        A = lil_matrix([[0, 2, 3], [4, 0, 6]])
+        B = lil_matrix([[0, 1, 0], [0, 2, 3]])
+
+        data = {'add': (B, A + B),
+                'sub': (B, A - B),
+                'mul': (3, A * 3)}
+
+        for op, (other, expected) in data.items():
+            result = A.copy()
+            getattr(result, '__i%s__' % op)(other)
+
+            assert_array_equal(result.toarray(), expected.toarray())
+
+        # Ticket 1604.
+        A = lil_matrix((1, 3), dtype=np.dtype('float64'))
+        B = array([0.1, 0.1, 0.1])
+        A[0, :] += B
+        assert_array_equal(A[0, :].toarray().squeeze(), B)
+
+    def test_lil_iteration(self):
+        row_data = [[1, 2, 3], [4, 5, 6]]
+        B = lil_matrix(array(row_data))
+        for r, row in enumerate(B):
+            assert_array_equal(row.toarray(), array(row_data[r], ndmin=2))
+
+    def test_lil_from_csr(self):
+        # Tests whether a lil_matrix can be constructed from a
+        # csr_matrix.
+        B = lil_matrix((10, 10))
+        B[0, 3] = 10
+        B[5, 6] = 20
+        B[8, 3] = 30
+        B[3, 8] = 40
+        B[8, 9] = 50
+        C = B.tocsr()
+        D = lil_matrix(C)
+        assert_array_equal(C.A, D.A)
+
+    def test_fancy_indexing_lil(self):
+        M = asmatrix(arange(25).reshape(5, 5))
+        A = lil_matrix(M)
+
+        assert_equal(A[array([1, 2, 3]), 2:3].toarray(),
+                     M[array([1, 2, 3]), 2:3])
+
+    def test_point_wise_multiply(self):
+        l = lil_matrix((4, 3))
+        l[0, 0] = 1
+        l[1, 1] = 2
+        l[2, 2] = 3
+        l[3, 1] = 4
+
+        m = lil_matrix((4, 3))
+        m[0, 0] = 1
+        m[0, 1] = 2
+        m[2, 2] = 3
+        m[3, 1] = 4
+        m[3, 2] = 4
+
+        assert_array_equal(l.multiply(m).toarray(),
+                           m.multiply(l).toarray())
+
+        assert_array_equal(l.multiply(m).toarray(),
+                           [[1, 0, 0],
+                            [0, 0, 0],
+                            [0, 0, 9],
+                            [0, 16, 0]])
+
+    def test_lil_multiply_removal(self):
+        # Ticket #1427.
+        a = lil_matrix(np.ones((3, 3)))
+        a *= 2.
+        a[0, :] = 0
+
+
+TestLIL.init_class()
+
+
+class TestCOO(sparse_test_class(getset=False,
+                                slicing=False, slicing_assign=False,
+                                fancy_indexing=False, fancy_assign=False)):
+    spmatrix = coo_matrix
+    math_dtypes = [np.int_, np.float_, np.complex_]
+
+    def test_constructor1(self):
+        # unsorted triplet format
+        row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
+        col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
+        data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.])
+
+        coo = coo_matrix((data,(row,col)),(4,3))
+        assert_array_equal(arange(12).reshape(4, 3), coo.toarray())
+
+        # using Python lists and a specified dtype
+        coo = coo_matrix(([2**63 + 1, 1], ([0, 1], [0, 1])), dtype=np.uint64)
+        dense = array([[2**63 + 1, 0], [0, 1]], dtype=np.uint64)
+        assert_array_equal(dense, coo.toarray())
+
+    def test_constructor2(self):
+        # unsorted triplet format with duplicates (which are summed)
+        row = array([0,1,2,2,2,2,0,0,2,2])
+        col = array([0,2,0,2,1,1,1,0,0,2])
+        data = array([2,9,-4,5,7,0,-1,2,1,-5])
+        coo = coo_matrix((data,(row,col)),(3,3))
+
+        mat = array([[4, -1, 0], [0, 0, 9], [-3, 7, 0]])
+
+        assert_array_equal(mat, coo.toarray())
+
+    def test_constructor3(self):
+        # empty matrix
+        coo = coo_matrix((4,3))
+
+        assert_array_equal(coo.shape,(4,3))
+        assert_array_equal(coo.row,[])
+        assert_array_equal(coo.col,[])
+        assert_array_equal(coo.data,[])
+        assert_array_equal(coo.toarray(), zeros((4, 3)))
+
+    def test_constructor4(self):
+        # from dense matrix
+        mat = array([[0,1,0,0],
+                     [7,0,3,0],
+                     [0,4,0,0]])
+        coo = coo_matrix(mat)
+        assert_array_equal(coo.toarray(), mat)
+
+        # upgrade rank 1 arrays to row matrix
+        mat = array([0,1,0,0])
+        coo = coo_matrix(mat)
+        assert_array_equal(coo.toarray(), mat.reshape(1, -1))
+
+        # error if second arg interpreted as shape (gh-9919)
+        with pytest.raises(TypeError, match=r'object cannot be interpreted'):
+            coo_matrix([0, 11, 22, 33], ([0, 1, 2, 3], [0, 0, 0, 0]))
+
+        # error if explicit shape arg doesn't match the dense matrix
+        with pytest.raises(ValueError, match=r'inconsistent shapes'):
+            coo_matrix([0, 11, 22, 33], shape=(4, 4))
+
+    def test_constructor_data_ij_dtypeNone(self):
+        data = [1]
+        coo = coo_matrix((data, ([0], [0])), dtype=None)
+        assert coo.dtype == np.array(data).dtype
+
+    @pytest.mark.xfail(run=False, reason='COO does not have a __getitem__')
+    def test_iterator(self):
+        pass
+
+    def test_todia_all_zeros(self):
+        zeros = [[0, 0]]
+        dia = coo_matrix(zeros).todia()
+        assert_array_equal(dia.A, zeros)
+
+    def test_sum_duplicates(self):
+        coo = coo_matrix((4,3))
+        coo.sum_duplicates()
+        coo = coo_matrix(([1,2], ([1,0], [1,0])))
+        coo.sum_duplicates()
+        assert_array_equal(coo.A, [[2,0],[0,1]])
+        coo = coo_matrix(([1,2], ([1,1], [1,1])))
+        coo.sum_duplicates()
+        assert_array_equal(coo.A, [[0,0],[0,3]])
+        assert_array_equal(coo.row, [1])
+        assert_array_equal(coo.col, [1])
+        assert_array_equal(coo.data, [3])
+
+    def test_todok_duplicates(self):
+        coo = coo_matrix(([1,1,1,1], ([0,2,2,0], [0,1,1,0])))
+        dok = coo.todok()
+        assert_array_equal(dok.A, coo.A)
+
+    def test_eliminate_zeros(self):
+        data = array([1, 0, 0, 0, 2, 0, 3, 0])
+        row = array([0, 0, 0, 1, 1, 1, 1, 1])
+        col = array([1, 2, 3, 4, 5, 6, 7, 8])
+        asp = coo_matrix((data, (row, col)), shape=(2,10))
+        bsp = asp.copy()
+        asp.eliminate_zeros()
+        assert_((asp.data != 0).all())
+        assert_array_equal(asp.A, bsp.A)
+
+    def test_reshape_copy(self):
+        arr = [[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]]
+        new_shape = (2, 6)
+        x = coo_matrix(arr)
+
+        y = x.reshape(new_shape)
+        assert_(y.data is x.data)
+
+        y = x.reshape(new_shape, copy=False)
+        assert_(y.data is x.data)
+
+        y = x.reshape(new_shape, copy=True)
+        assert_(not np.may_share_memory(y.data, x.data))
+
+    def test_large_dimensions_reshape(self):
+        # Test that reshape is immune to integer overflow when number of elements
+        # exceeds 2^31-1
+        mat1 = coo_matrix(([1], ([3000000], [1000])), (3000001, 1001))
+        mat2 = coo_matrix(([1], ([1000], [3000000])), (1001, 3000001))
+
+        # assert_array_equal is slow for big matrices because it expects dense
+        # Using __ne__ and nnz instead
+        assert_((mat1.reshape((1001, 3000001), order='C') != mat2).nnz == 0)
+        assert_((mat2.reshape((3000001, 1001), order='F') != mat1).nnz == 0)
+
+
+TestCOO.init_class()
+
+
+class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=False,
+                                fancy_indexing=False, fancy_assign=False,
+                                minmax=False, nnz_axis=False)):
+    spmatrix = dia_matrix
+    math_dtypes = [np.int_, np.float_, np.complex_]
+
+    def test_constructor1(self):
+        D = array([[1, 0, 3, 0],
+                   [1, 2, 0, 4],
+                   [0, 2, 3, 0],
+                   [0, 0, 3, 4]])
+        data = np.array([[1,2,3,4]]).repeat(3,axis=0)
+        offsets = np.array([0,-1,2])
+        assert_equal(dia_matrix((data, offsets), shape=(4, 4)).toarray(), D)
+
+    @pytest.mark.xfail(run=False, reason='DIA does not have a __getitem__')
+    def test_iterator(self):
+        pass
+
+    @with_64bit_maxval_limit(3)
+    def test_setdiag_dtype(self):
+        m = dia_matrix(np.eye(3))
+        assert_equal(m.offsets.dtype, np.int32)
+        m.setdiag((3,), k=2)
+        assert_equal(m.offsets.dtype, np.int32)
+
+        m = dia_matrix(np.eye(4))
+        assert_equal(m.offsets.dtype, np.int64)
+        m.setdiag((3,), k=3)
+        assert_equal(m.offsets.dtype, np.int64)
+
+    @pytest.mark.skip(reason='DIA stores extra zeros')
+    def test_getnnz_axis(self):
+        pass
+
+    def test_convert_gh14555(self):
+        # regression test for gh-14555
+        m = dia_matrix(([[1, 1, 0]], [-1]), shape=(4, 2))
+        expected = m.toarray()
+        assert_array_equal(m.tocsc().toarray(), expected)
+        assert_array_equal(m.tocsr().toarray(), expected)
+
+
+TestDIA.init_class()
+
+
+class TestBSR(sparse_test_class(getset=False,
+                                slicing=False, slicing_assign=False,
+                                fancy_indexing=False, fancy_assign=False,
+                                nnz_axis=False)):
+    spmatrix = bsr_matrix
+    math_dtypes = [np.int_, np.float_, np.complex_]
+
+    def test_constructor1(self):
+        # check native BSR format constructor
+        indptr = array([0,2,2,4])
+        indices = array([0,2,2,3])
+        data = zeros((4,2,3))
+
+        data[0] = array([[0, 1, 2],
+                         [3, 0, 5]])
+        data[1] = array([[0, 2, 4],
+                         [6, 0, 10]])
+        data[2] = array([[0, 4, 8],
+                         [12, 0, 20]])
+        data[3] = array([[0, 5, 10],
+                         [15, 0, 25]])
+
+        A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
+        Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
+        assert_equal(Asp.toarray(), A)
+
+        # infer shape from arrays
+        Asp = bsr_matrix((data,indices,indptr))
+        assert_equal(Asp.toarray(), A)
+
+    def test_constructor2(self):
+        # construct from dense
+
+        # test zero mats
+        for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
+            A = zeros(shape)
+            assert_equal(bsr_matrix(A).toarray(), A)
+        A = zeros((4,6))
+        assert_equal(bsr_matrix(A, blocksize=(2, 2)).toarray(), A)
+        assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A)
+
+        A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
+        assert_equal(bsr_matrix(A).toarray(), A)
+        assert_equal(bsr_matrix(A, shape=(6, 12)).toarray(), A)
+        assert_equal(bsr_matrix(A, blocksize=(1, 1)).toarray(), A)
+        assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A)
+        assert_equal(bsr_matrix(A, blocksize=(2, 6)).toarray(), A)
+        assert_equal(bsr_matrix(A, blocksize=(2, 12)).toarray(), A)
+        assert_equal(bsr_matrix(A, blocksize=(3, 12)).toarray(), A)
+        assert_equal(bsr_matrix(A, blocksize=(6, 12)).toarray(), A)
+
+        A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]])
+        assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A)
+
+    def test_constructor3(self):
+        # construct from coo-like (data,(row,col)) format
+        arg = ([1,2,3], ([0,1,1], [0,0,1]))
+        A = array([[1,0],[2,3]])
+        assert_equal(bsr_matrix(arg, blocksize=(2, 2)).toarray(), A)
+
+    def test_constructor4(self):
+        # regression test for gh-6292: bsr_matrix((data, indices, indptr)) was
+        #  trying to compare an int to a None
+        n = 8
+        data = np.ones((n, n, 1), dtype=np.int8)
+        indptr = np.array([0, n], dtype=np.int32)
+        indices = np.arange(n, dtype=np.int32)
+        bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
+
+    def test_constructor5(self):
+        # check for validations introduced in gh-13400
+        n = 8
+        data_1dim = np.ones(n)
+        data = np.ones((n, n, n))
+        indptr = np.array([0, n])
+        indices = np.arange(n)
+
+        with assert_raises(ValueError):
+            # data ndim check
+            bsr_matrix((data_1dim, indices, indptr))
+
+        with assert_raises(ValueError):
+            # invalid blocksize
+            bsr_matrix((data, indices, indptr), blocksize=(1, 1, 1))
+
+        with assert_raises(ValueError):
+            # mismatching blocksize
+            bsr_matrix((data, indices, indptr), blocksize=(1, 1))
+
+    def test_default_dtype(self):
+        # As a numpy array, `values` has shape (2, 2, 1).
+        values = [[[1], [1]], [[1], [1]]]
+        indptr = np.array([0, 2], dtype=np.int32)
+        indices = np.array([0, 1], dtype=np.int32)
+        b = bsr_matrix((values, indices, indptr), blocksize=(2, 1))
+        assert b.dtype == np.array(values).dtype
+
+    def test_bsr_tocsr(self):
+        # check native conversion from BSR to CSR
+        indptr = array([0, 2, 2, 4])
+        indices = array([0, 2, 2, 3])
+        data = zeros((4, 2, 3))
+
+        data[0] = array([[0, 1, 2],
+                         [3, 0, 5]])
+        data[1] = array([[0, 2, 4],
+                         [6, 0, 10]])
+        data[2] = array([[0, 4, 8],
+                         [12, 0, 20]])
+        data[3] = array([[0, 5, 10],
+                         [15, 0, 25]])
+
+        A = kron([[1, 0, 2, 0], [0, 0, 0, 0], [0, 0, 4, 5]],
+                 [[0, 1, 2], [3, 0, 5]])
+        Absr = bsr_matrix((data, indices, indptr), shape=(6, 12))
+        Acsr = Absr.tocsr()
+        Acsr_via_coo = Absr.tocoo().tocsr()
+        assert_equal(Acsr.toarray(), A)
+        assert_equal(Acsr.toarray(), Acsr_via_coo.toarray())
+
+    def test_eliminate_zeros(self):
+        data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
+        data = data.reshape(-1,2,2)
+        indices = array([1, 2, 3, 4, 5, 6, 7, 8])
+        indptr = array([0, 3, 8])
+        asp = bsr_matrix((data, indices, indptr), shape=(4,20))
+        bsp = asp.copy()
+        asp.eliminate_zeros()
+        assert_array_equal(asp.nnz, 3*4)
+        assert_array_equal(asp.toarray(), bsp.toarray())
+
+    # github issue #9687
+    def test_eliminate_zeros_all_zero(self):
+        np.random.seed(0)
+        m = bsr_matrix(np.random.random((12, 12)), blocksize=(2, 3))
+
+        # eliminate some blocks, but not all
+        m.data[m.data <= 0.9] = 0
+        m.eliminate_zeros()
+        assert_equal(m.nnz, 66)
+        assert_array_equal(m.data.shape, (11, 2, 3))
+
+        # eliminate all remaining blocks
+        m.data[m.data <= 1.0] = 0
+        m.eliminate_zeros()
+        assert_equal(m.nnz, 0)
+        assert_array_equal(m.data.shape, (0, 2, 3))
+        assert_array_equal(m.toarray(), np.zeros((12, 12)))
+
+        # test fast path
+        m.eliminate_zeros()
+        assert_equal(m.nnz, 0)
+        assert_array_equal(m.data.shape, (0, 2, 3))
+        assert_array_equal(m.toarray(), np.zeros((12, 12)))
+
+    def test_bsr_matvec(self):
+        A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
+        x = arange(A.shape[1]).reshape(-1,1)
+        assert_equal(A*x, A.toarray() @ x)
+
+    def test_bsr_matvecs(self):
+        A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
+        x = arange(A.shape[1]*6).reshape(-1,6)
+        assert_equal(A*x, A.toarray() @ x)
+
+    @pytest.mark.xfail(run=False, reason='BSR does not have a __getitem__')
+    def test_iterator(self):
+        pass
+
+    @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__')
+    def test_setdiag(self):
+        pass
+
+    def test_resize_blocked(self):
+        # test resize() with non-(1,1) blocksize
+        D = np.array([[1, 0, 3, 4],
+                      [2, 0, 0, 0],
+                      [3, 0, 0, 0]])
+        S = self.spmatrix(D, blocksize=(1, 2))
+        assert_(S.resize((3, 2)) is None)
+        assert_array_equal(S.A, [[1, 0],
+                                 [2, 0],
+                                 [3, 0]])
+        S.resize((2, 2))
+        assert_array_equal(S.A, [[1, 0],
+                                 [2, 0]])
+        S.resize((3, 2))
+        assert_array_equal(S.A, [[1, 0],
+                                 [2, 0],
+                                 [0, 0]])
+        S.resize((3, 4))
+        assert_array_equal(S.A, [[1, 0, 0, 0],
+                                 [2, 0, 0, 0],
+                                 [0, 0, 0, 0]])
+        assert_raises(ValueError, S.resize, (2, 3))
+
+    @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__')
+    def test_setdiag_comprehensive(self):
+        pass
+
+    @pytest.mark.skipif(IS_COLAB, reason="exceeds memory limit")
+    def test_scalar_idx_dtype(self):
+        # Check that index dtype takes into account all parameters
+        # passed to sparsetools, including the scalar ones
+        indptr = np.zeros(2, dtype=np.int32)
+        indices = np.zeros(0, dtype=np.int32)
+        vals = np.zeros((0, 1, 1))
+        a = bsr_matrix((vals, indices, indptr), shape=(1, 2**31-1))
+        b = bsr_matrix((vals, indices, indptr), shape=(1, 2**31))
+        c = bsr_matrix((1, 2**31-1))
+        d = bsr_matrix((1, 2**31))
+        assert_equal(a.indptr.dtype, np.int32)
+        assert_equal(b.indptr.dtype, np.int64)
+        assert_equal(c.indptr.dtype, np.int32)
+        assert_equal(d.indptr.dtype, np.int64)
+
+        try:
+            vals2 = np.zeros((0, 1, 2**31-1))
+            vals3 = np.zeros((0, 1, 2**31))
+            e = bsr_matrix((vals2, indices, indptr), shape=(1, 2**31-1))
+            f = bsr_matrix((vals3, indices, indptr), shape=(1, 2**31))
+            assert_equal(e.indptr.dtype, np.int32)
+            assert_equal(f.indptr.dtype, np.int64)
+        except (MemoryError, ValueError):
+            # May fail on 32-bit Python
+            e = 0
+            f = 0
+
+        # These shouldn't fail
+        for x in [a, b, c, d, e, f]:
+            x + x
+
+
+TestBSR.init_class()
+
+
+#------------------------------------------------------------------------------
+# Tests for non-canonical representations (with duplicates, unsorted indices)
+#------------------------------------------------------------------------------
+
+def _same_sum_duplicate(data, *inds, **kwargs):
+    """Duplicates entries to produce the same matrix"""
+    indptr = kwargs.pop('indptr', None)
+    if np.issubdtype(data.dtype, np.bool_) or \
+       np.issubdtype(data.dtype, np.unsignedinteger):
+        if indptr is None:
+            return (data,) + inds
+        else:
+            return (data,) + inds + (indptr,)
+
+    zeros_pos = (data == 0).nonzero()
+
+    # duplicate data
+    data = data.repeat(2, axis=0)
+    data[::2] -= 1
+    data[1::2] = 1
+
+    # don't spoil all explicit zeros
+    if zeros_pos[0].size > 0:
+        pos = tuple(p[0] for p in zeros_pos)
+        pos1 = (2*pos[0],) + pos[1:]
+        pos2 = (2*pos[0]+1,) + pos[1:]
+        data[pos1] = 0
+        data[pos2] = 0
+
+    inds = tuple(indices.repeat(2) for indices in inds)
+
+    if indptr is None:
+        return (data,) + inds
+    else:
+        return (data,) + inds + (indptr * 2,)
+
+
+class _NonCanonicalMixin:
+    def spmatrix(self, D, sorted_indices=False, **kwargs):
+        """Replace D with a non-canonical equivalent: containing
+        duplicate elements and explicit zeros"""
+        construct = super().spmatrix
+        M = construct(D, **kwargs)
+
+        zero_pos = (M.A == 0).nonzero()
+        has_zeros = (zero_pos[0].size > 0)
+        if has_zeros:
+            k = zero_pos[0].size//2
+            with suppress_warnings() as sup:
+                sup.filter(SparseEfficiencyWarning,
+                           "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+                M = self._insert_explicit_zero(M, zero_pos[0][k], zero_pos[1][k])
+
+        arg1 = self._arg1_for_noncanonical(M, sorted_indices)
+        if 'shape' not in kwargs:
+            kwargs['shape'] = M.shape
+        NC = construct(arg1, **kwargs)
+
+        # check that result is valid
+        if NC.dtype in [np.float32, np.complex64]:
+            # For single-precision floats, the differences between M and NC
+            # that are introduced by the extra operations involved in the
+            # construction of NC necessitate a more lenient tolerance level
+            # than the default.
+            rtol = 1e-05
+        else:
+            rtol = 1e-07
+        assert_allclose(NC.A, M.A, rtol=rtol)
+
+        # check that at least one explicit zero
+        if has_zeros:
+            assert_((NC.data == 0).any())
+        # TODO check that NC has duplicates (which are not explicit zeros)
+
+        return NC
+
+    @pytest.mark.skip(reason='bool(matrix) counts explicit zeros')
+    def test_bool(self):
+        pass
+
+    @pytest.mark.skip(reason='getnnz-axis counts explicit zeros')
+    def test_getnnz_axis(self):
+        pass
+
+    @pytest.mark.skip(reason='nnz counts explicit zeros')
+    def test_empty(self):
+        pass
+
+
+class _NonCanonicalCompressedMixin(_NonCanonicalMixin):
+    def _arg1_for_noncanonical(self, M, sorted_indices=False):
+        """Return non-canonical constructor arg1 equivalent to M"""
+        data, indices, indptr = _same_sum_duplicate(M.data, M.indices,
+                                                    indptr=M.indptr)
+        if not sorted_indices:
+            for start, stop in zip(indptr, indptr[1:]):
+                indices[start:stop] = indices[start:stop][::-1].copy()
+                data[start:stop] = data[start:stop][::-1].copy()
+        return data, indices, indptr
+
+    def _insert_explicit_zero(self, M, i, j):
+        M[i,j] = 0
+        return M
+
+
+class _NonCanonicalCSMixin(_NonCanonicalCompressedMixin):
+    def test_getelement(self):
+        def check(dtype, sorted_indices):
+            D = array([[1,0,0],
+                       [4,3,0],
+                       [0,2,0],
+                       [0,0,0]], dtype=dtype)
+            A = self.spmatrix(D, sorted_indices=sorted_indices)
+
+            M,N = D.shape
+
+            for i in range(-M, M):
+                for j in range(-N, N):
+                    assert_equal(A[i,j], D[i,j])
+
+            for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]:
+                assert_raises((IndexError, TypeError), A.__getitem__, ij)
+
+        for dtype in supported_dtypes:
+            for sorted_indices in [False, True]:
+                check(np.dtype(dtype), sorted_indices)
+
+    def test_setitem_sparse(self):
+        D = np.eye(3)
+        A = self.spmatrix(D)
+        B = self.spmatrix([[1,2,3]])
+
+        D[1,:] = B.toarray()
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            A[1,:] = B
+        assert_array_equal(A.toarray(), D)
+
+        D[:,2] = B.toarray().ravel()
+        with suppress_warnings() as sup:
+            sup.filter(SparseEfficiencyWarning,
+                       "Changing the sparsity structure of a cs[cr]_matrix is expensive")
+            A[:,2] = B.T
+        assert_array_equal(A.toarray(), D)
+
+    @pytest.mark.xfail(run=False, reason='inverse broken with non-canonical matrix')
+    def test_inv(self):
+        pass
+
+    @pytest.mark.xfail(run=False, reason='solve broken with non-canonical matrix')
+    def test_solve(self):
+        pass
+
+
+class TestCSRNonCanonical(_NonCanonicalCSMixin, TestCSR):
+    pass
+
+
+class TestCSCNonCanonical(_NonCanonicalCSMixin, TestCSC):
+    pass
+
+
+class TestBSRNonCanonical(_NonCanonicalCompressedMixin, TestBSR):
+    def _insert_explicit_zero(self, M, i, j):
+        x = M.tocsr()
+        x[i,j] = 0
+        return x.tobsr(blocksize=M.blocksize)
+
+    @pytest.mark.xfail(run=False, reason='diagonal broken with non-canonical BSR')
+    def test_diagonal(self):
+        pass
+
+    @pytest.mark.xfail(run=False, reason='expm broken with non-canonical BSR')
+    def test_expm(self):
+        pass
+
+
+class TestCOONonCanonical(_NonCanonicalMixin, TestCOO):
+    def _arg1_for_noncanonical(self, M, sorted_indices=None):
+        """Return non-canonical constructor arg1 equivalent to M"""
+        data, row, col = _same_sum_duplicate(M.data, M.row, M.col)
+        return data, (row, col)
+
+    def _insert_explicit_zero(self, M, i, j):
+        M.data = np.r_[M.data.dtype.type(0), M.data]
+        M.row = np.r_[M.row.dtype.type(i), M.row]
+        M.col = np.r_[M.col.dtype.type(j), M.col]
+        return M
+
+    def test_setdiag_noncanonical(self):
+        m = self.spmatrix(np.eye(3))
+        m.sum_duplicates()
+        m.setdiag([3, 2], k=1)
+        m.sum_duplicates()
+        assert_(np.all(np.diff(m.col) >= 0))
+
+
+def cases_64bit():
+    TEST_CLASSES = [TestBSR, TestCOO, TestCSC, TestCSR, TestDIA,
+                    # lil/dok->other conversion operations have get_index_dtype
+                    TestDOK, TestLIL
+                    ]
+
+    # The following features are missing, so skip the tests:
+    SKIP_TESTS = {
+        'test_expm': 'expm for 64-bit indices not available',
+        'test_inv': 'linsolve for 64-bit indices not available',
+        'test_solve': 'linsolve for 64-bit indices not available',
+        'test_scalar_idx_dtype': 'test implemented in base class',
+        'test_large_dimensions_reshape': 'test actually requires 64-bit to work',
+        'test_constructor_smallcol': 'test verifies int32 indexes',
+        'test_constructor_largecol': 'test verifies int64 indexes',
+    }
+
+    for cls in TEST_CLASSES:
+        for method_name in sorted(dir(cls)):
+            method = getattr(cls, method_name)
+            if (method_name.startswith('test_') and
+                    not getattr(method, 'slow', False)):
+                marks = []
+
+                msg = SKIP_TESTS.get(method_name)
+                if bool(msg):
+                    marks += [pytest.mark.skip(reason=msg)]
+
+                if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"):
+                    markers = getattr(method, 'pytestmark', [])
+                    for mark in markers:
+                        if mark.name in ('skipif', 'skip', 'xfail', 'xslow'):
+                            marks.append(mark)
+                else:
+                    for mname in ['skipif', 'skip', 'xfail', 'xslow']:
+                        if hasattr(method, mname):
+                            marks += [getattr(method, mname)]
+
+                yield pytest.param(cls, method_name, marks=marks)
+
+
+class Test64Bit:
+    MAT_CLASSES = [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix]
+
+    def _create_some_matrix(self, mat_cls, m, n):
+        return mat_cls(np.random.rand(m, n))
+
+    def _compare_index_dtype(self, m, dtype):
+        dtype = np.dtype(dtype)
+        if isinstance(m, (csc_matrix, csr_matrix, bsr_matrix)):
+            return (m.indices.dtype == dtype) and (m.indptr.dtype == dtype)
+        elif isinstance(m, coo_matrix):
+            return (m.row.dtype == dtype) and (m.col.dtype == dtype)
+        elif isinstance(m, dia_matrix):
+            return (m.offsets.dtype == dtype)
+        else:
+            raise ValueError("matrix %r has no integer indices" % (m,))
+
+    def test_decorator_maxval_limit(self):
+        # Test that the with_64bit_maxval_limit decorator works
+
+        @with_64bit_maxval_limit(maxval_limit=10)
+        def check(mat_cls):
+            m = mat_cls(np.random.rand(10, 1))
+            assert_(self._compare_index_dtype(m, np.int32))
+            m = mat_cls(np.random.rand(11, 1))
+            assert_(self._compare_index_dtype(m, np.int64))
+
+        for mat_cls in self.MAT_CLASSES:
+            check(mat_cls)
+
+    def test_decorator_maxval_random(self):
+        # Test that the with_64bit_maxval_limit decorator works (2)
+
+        @with_64bit_maxval_limit(random=True)
+        def check(mat_cls):
+            seen_32 = False
+            seen_64 = False
+            for k in range(100):
+                m = self._create_some_matrix(mat_cls, 9, 9)
+                seen_32 = seen_32 or self._compare_index_dtype(m, np.int32)
+                seen_64 = seen_64 or self._compare_index_dtype(m, np.int64)
+                if seen_32 and seen_64:
+                    break
+            else:
+                raise AssertionError("both 32 and 64 bit indices not seen")
+
+        for mat_cls in self.MAT_CLASSES:
+            check(mat_cls)
+
+    def _check_resiliency(self, cls, method_name, **kw):
+        # Resiliency test, to check that sparse matrices deal reasonably
+        # with varying index data types.
+
+        @with_64bit_maxval_limit(**kw)
+        def check(cls, method_name):
+            instance = cls()
+            if hasattr(instance, 'setup_method'):
+                instance.setup_method()
+            try:
+                getattr(instance, method_name)()
+            finally:
+                if hasattr(instance, 'teardown_method'):
+                    instance.teardown_method()
+
+        check(cls, method_name)
+
+    @pytest.mark.parametrize('cls,method_name', cases_64bit())
+    def test_resiliency_limit_10(self, cls, method_name):
+        self._check_resiliency(cls, method_name, maxval_limit=10)
+
+    @pytest.mark.parametrize('cls,method_name', cases_64bit())
+    def test_resiliency_random(self, cls, method_name):
+        # bsr_matrix.eliminate_zeros relies on csr_matrix constructor
+        # not making copies of index arrays --- this is not
+        # necessarily true when we pick the index data type randomly
+        self._check_resiliency(cls, method_name, random=True)
+
+    @pytest.mark.parametrize('cls,method_name', cases_64bit())
+    def test_resiliency_all_32(self, cls, method_name):
+        self._check_resiliency(cls, method_name, fixed_dtype=np.int32)
+
+    @pytest.mark.parametrize('cls,method_name', cases_64bit())
+    def test_resiliency_all_64(self, cls, method_name):
+        self._check_resiliency(cls, method_name, fixed_dtype=np.int64)
+
+    @pytest.mark.parametrize('cls,method_name', cases_64bit())
+    def test_no_64(self, cls, method_name):
+        self._check_resiliency(cls, method_name, assert_32bit=True)
+
+    def test_downcast_intp(self):
+        # Check that bincount and ufunc.reduceat intp downcasts are
+        # dealt with. The point here is to trigger points in the code
+        # that can fail on 32-bit systems when using 64-bit indices,
+        # due to use of functions that only work with intp-size
+        # indices.
+
+        @with_64bit_maxval_limit(fixed_dtype=np.int64,
+                                 downcast_maxval=1)
+        def check_limited():
+            # These involve indices larger than `downcast_maxval`
+            a = csc_matrix([[1, 2], [3, 4], [5, 6]])
+            assert_raises(AssertionError, a.getnnz, axis=1)
+            assert_raises(AssertionError, a.sum, axis=0)
+
+            a = csr_matrix([[1, 2, 3], [3, 4, 6]])
+            assert_raises(AssertionError, a.getnnz, axis=0)
+
+            a = coo_matrix([[1, 2, 3], [3, 4, 5]])
+            assert_raises(AssertionError, a.getnnz, axis=0)
+
+        @with_64bit_maxval_limit(fixed_dtype=np.int64)
+        def check_unlimited():
+            # These involve indices larger than `downcast_maxval`
+            a = csc_matrix([[1, 2], [3, 4], [5, 6]])
+            a.getnnz(axis=1)
+            a.sum(axis=0)
+
+            a = csr_matrix([[1, 2, 3], [3, 4, 6]])
+            a.getnnz(axis=0)
+
+            a = coo_matrix([[1, 2, 3], [3, 4, 5]])
+            a.getnnz(axis=0)
+
+        check_limited()
+        check_unlimited()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_construct.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_construct.py
new file mode 100644
index 00000000..d71935a6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_construct.py
@@ -0,0 +1,582 @@
+"""test sparse matrix construction functions"""
+
+import numpy as np
+from numpy import array
+from numpy.testing import (assert_equal, assert_,
+        assert_array_equal, assert_array_almost_equal_nulp)
+import pytest
+from pytest import raises as assert_raises
+from scipy._lib._testutils import check_free_memory
+from scipy._lib._util import check_random_state
+
+from scipy.sparse import (csr_matrix, coo_matrix,
+                          _construct as construct)
+from scipy.sparse._construct import rand as sprand
+from scipy.sparse._sputils import matrix
+
+sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok']
+
+#TODO check whether format=XXX is respected
+
+
+def _sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None):
+    # Helper function for testing.
+    random_state = check_random_state(random_state)
+    data_rvs = random_state.standard_normal
+    return construct.random(m, n, density, format, dtype,
+                            random_state, data_rvs)
+
+
+class TestConstructUtils:
+    def test_spdiags(self):
+        diags1 = array([[1, 2, 3, 4, 5]])
+        diags2 = array([[1, 2, 3, 4, 5],
+                         [6, 7, 8, 9,10]])
+        diags3 = array([[1, 2, 3, 4, 5],
+                         [6, 7, 8, 9,10],
+                         [11,12,13,14,15]])
+
+        cases = []
+        cases.append((diags1, 0, 1, 1, [[1]]))
+        cases.append((diags1, [0], 1, 1, [[1]]))
+        cases.append((diags1, [0], 2, 1, [[1],[0]]))
+        cases.append((diags1, [0], 1, 2, [[1,0]]))
+        cases.append((diags1, [1], 1, 2, [[0,2]]))
+        cases.append((diags1,[-1], 1, 2, [[0,0]]))
+        cases.append((diags1, [0], 2, 2, [[1,0],[0,2]]))
+        cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]]))
+        cases.append((diags1, [3], 2, 2, [[0,0],[0,0]]))
+        cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
+        cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]))
+        cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]))
+
+        cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]))
+        cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
+        cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0],
+                                              [0,0,0,4,0,0],
+                                              [0,0,0,0,5,0],
+                                              [6,0,0,0,0,0],
+                                              [0,7,0,0,0,0],
+                                              [0,0,8,0,0,0]]))
+
+        cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0],
+                                                [1, 7,13, 0, 0, 0],
+                                                [0, 2, 8,14, 0, 0],
+                                                [0, 0, 3, 9,15, 0],
+                                                [0, 0, 0, 4,10, 0],
+                                                [0, 0, 0, 0, 5, 0]]))
+        cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0],
+                                                 [11, 0, 0, 9, 0],
+                                                 [0,12, 0, 0,10],
+                                                 [0, 0,13, 0, 0],
+                                                 [1, 0, 0,14, 0],
+                                                 [0, 2, 0, 0,15]]))
+        cases.append((diags3, [-1, 1, 2], len(diags3[0]), len(diags3[0]),
+                      [[0, 7, 13, 0, 0],
+                       [1, 0, 8, 14, 0],
+                       [0, 2, 0, 9, 15],
+                       [0, 0, 3, 0, 10],
+                       [0, 0, 0, 4, 0]]))
+
+        for d, o, m, n, result in cases:
+            if len(d[0]) == m and m == n:
+                assert_equal(construct.spdiags(d, o).toarray(), result)
+            assert_equal(construct.spdiags(d, o, m, n).toarray(), result)
+            assert_equal(construct.spdiags(d, o, (m, n)).toarray(), result)
+
+    def test_diags(self):
+        a = array([1, 2, 3, 4, 5])
+        b = array([6, 7, 8, 9, 10])
+        c = array([11, 12, 13, 14, 15])
+
+        cases = []
+        cases.append((a[:1], 0, (1, 1), [[1]]))
+        cases.append(([a[:1]], [0], (1, 1), [[1]]))
+        cases.append(([a[:1]], [0], (2, 1), [[1],[0]]))
+        cases.append(([a[:1]], [0], (1, 2), [[1,0]]))
+        cases.append(([a[:1]], [1], (1, 2), [[0,1]]))
+        cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]]))
+        cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]]))
+        cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
+        cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]]))
+        cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]]))
+        cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]]))
+        cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]]))
+        cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]]))
+        cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]]))
+        cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]]))
+        cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]]))
+        cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]]))
+        cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]]))
+        cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]]))
+        cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]]))
+        cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]]))
+        cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]]))
+        cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]))
+
+        cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]]))
+        cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
+        cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0],
+                                                     [0,0,0,2,0,0],
+                                                     [0,0,0,0,3,0],
+                                                     [6,0,0,0,0,4],
+                                                     [0,7,0,0,0,0],
+                                                     [0,0,8,0,0,0]]))
+
+        cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0],
+                                                            [1, 7,12, 0, 0],
+                                                            [0, 2, 8,13, 0],
+                                                            [0, 0, 3, 9,14],
+                                                            [0, 0, 0, 4,10]]))
+        cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0],
+                                                          [11, 0, 0, 7, 0],
+                                                          [0,12, 0, 0, 8],
+                                                          [0, 0,13, 0, 0],
+                                                          [1, 0, 0,14, 0],
+                                                          [0, 2, 0, 0,15]]))
+
+        # too long arrays are OK
+        cases.append(([a], [0], (1, 1), [[1]]))
+        cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]]))
+        cases.append((np.array([[1, 2, 3], [4, 5, 6]]), [0,-1], (3, 3), [[1, 0, 0], [4, 2, 0], [0, 5, 3]]))
+
+        # scalar case: broadcasting
+        cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0],
+                                                    [1, -2, 1],
+                                                    [0, 1, -2]]))
+
+        for d, o, shape, result in cases:
+            err_msg = "%r %r %r %r" % (d, o, shape, result)
+            assert_equal(construct.diags(d, o, shape=shape).toarray(),
+                         result, err_msg=err_msg)
+
+            if shape[0] == shape[1] and hasattr(d[0], '__len__') and len(d[0]) <= max(shape):
+                # should be able to find the shape automatically
+                assert_equal(construct.diags(d, o).toarray(), result,
+                             err_msg=err_msg)
+
+    def test_diags_default(self):
+        a = array([1, 2, 3, 4, 5])
+        assert_equal(construct.diags(a).toarray(), np.diag(a))
+
+    def test_diags_default_bad(self):
+        a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])
+        assert_raises(ValueError, construct.diags, a)
+
+    def test_diags_bad(self):
+        a = array([1, 2, 3, 4, 5])
+        b = array([6, 7, 8, 9, 10])
+        c = array([11, 12, 13, 14, 15])
+
+        cases = []
+        cases.append(([a[:0]], 0, (1, 1)))
+        cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5)))
+        cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5)))
+        cases.append(([a[:2],c,b[:3]], [-4,2,-1], None))
+        cases.append(([], [-4,2,-1], None))
+        cases.append(([1], [-5], (4, 4)))
+        cases.append(([a], 0, None))
+
+        for d, o, shape in cases:
+            assert_raises(ValueError, construct.diags, d, o, shape)
+
+        assert_raises(TypeError, construct.diags, [[None]], [0])
+
+    def test_diags_vs_diag(self):
+        # Check that
+        #
+        #    diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...
+        #
+
+        np.random.seed(1234)
+
+        for n_diags in [1, 2, 3, 4, 5, 10]:
+            n = 1 + n_diags//2 + np.random.randint(0, 10)
+
+            offsets = np.arange(-n+1, n-1)
+            np.random.shuffle(offsets)
+            offsets = offsets[:n_diags]
+
+            diagonals = [np.random.rand(n - abs(q)) for q in offsets]
+
+            mat = construct.diags(diagonals, offsets)
+            dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])
+
+            assert_array_almost_equal_nulp(mat.toarray(), dense_mat)
+
+            if len(offsets) == 1:
+                mat = construct.diags(diagonals[0], offsets[0])
+                dense_mat = np.diag(diagonals[0], offsets[0])
+                assert_array_almost_equal_nulp(mat.toarray(), dense_mat)
+
+    def test_diags_dtype(self):
+        x = construct.diags([2.2], [0], shape=(2, 2), dtype=int)
+        assert_equal(x.dtype, int)
+        assert_equal(x.toarray(), [[2, 0], [0, 2]])
+
+    def test_diags_one_diagonal(self):
+        d = list(range(5))
+        for k in range(-5, 6):
+            assert_equal(construct.diags(d, k).toarray(),
+                         construct.diags([d], [k]).toarray())
+
+    def test_diags_empty(self):
+        x = construct.diags([])
+        assert_equal(x.shape, (0, 0))
+
+    def test_identity(self):
+        assert_equal(construct.identity(1).toarray(), [[1]])
+        assert_equal(construct.identity(2).toarray(), [[1,0],[0,1]])
+
+        I = construct.identity(3, dtype='int8', format='dia')
+        assert_equal(I.dtype, np.dtype('int8'))
+        assert_equal(I.format, 'dia')
+
+        for fmt in sparse_formats:
+            I = construct.identity(3, format=fmt)
+            assert_equal(I.format, fmt)
+            assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
+
+    def test_eye(self):
+        assert_equal(construct.eye(1,1).toarray(), [[1]])
+        assert_equal(construct.eye(2,3).toarray(), [[1,0,0],[0,1,0]])
+        assert_equal(construct.eye(3,2).toarray(), [[1,0],[0,1],[0,0]])
+        assert_equal(construct.eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]])
+
+        assert_equal(construct.eye(3,3,dtype='int16').dtype, np.dtype('int16'))
+
+        for m in [3, 5]:
+            for n in [3, 5]:
+                for k in range(-5,6):
+                    assert_equal(construct.eye(m, n, k=k).toarray(), np.eye(m, n, k=k))
+                    if m == n:
+                        assert_equal(construct.eye(m, k=k).toarray(), np.eye(m, n, k=k))
+
+    def test_eye_one(self):
+        assert_equal(construct.eye(1).toarray(), [[1]])
+        assert_equal(construct.eye(2).toarray(), [[1,0],[0,1]])
+
+        I = construct.eye(3, dtype='int8', format='dia')
+        assert_equal(I.dtype, np.dtype('int8'))
+        assert_equal(I.format, 'dia')
+
+        for fmt in sparse_formats:
+            I = construct.eye(3, format=fmt)
+            assert_equal(I.format, fmt)
+            assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
+
+    def test_kron(self):
+        cases = []
+
+        cases.append(array([[0]]))
+        cases.append(array([[-1]]))
+        cases.append(array([[4]]))
+        cases.append(array([[10]]))
+        cases.append(array([[0],[0]]))
+        cases.append(array([[0,0]]))
+        cases.append(array([[1,2],[3,4]]))
+        cases.append(array([[0,2],[5,0]]))
+        cases.append(array([[0,2,-6],[8,0,14]]))
+        cases.append(array([[5,4],[0,0],[6,0]]))
+        cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
+        cases.append(array([[0,1,0,2,0,5,8]]))
+        cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))
+
+        for a in cases:
+            for b in cases:
+                expected = np.kron(a, b)
+                for fmt in sparse_formats:
+                    result = construct.kron(csr_matrix(a), csr_matrix(b), format=fmt) 
+                    assert_equal(result.format, fmt)
+                    assert_array_equal(result.toarray(), expected)
+
+    def test_kron_large(self):
+        n = 2**16
+        a = construct.eye(1, n, n-1)
+        b = construct.eye(n, 1, 1-n)
+
+        construct.kron(a, a)
+        construct.kron(b, b)
+
+    def test_kronsum(self):
+        cases = []
+
+        cases.append(array([[0]]))
+        cases.append(array([[-1]]))
+        cases.append(array([[4]]))
+        cases.append(array([[10]]))
+        cases.append(array([[1,2],[3,4]]))
+        cases.append(array([[0,2],[5,0]]))
+        cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))
+        cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))
+
+        for a in cases:
+            for b in cases:
+                result = construct.kronsum(
+                    csr_matrix(a), csr_matrix(b)).toarray()
+                expected = np.kron(np.eye(len(b)), a) + \
+                        np.kron(b, np.eye(len(a)))
+                assert_array_equal(result,expected)
+
+    def test_vstack(self):
+
+        A = coo_matrix([[1,2],[3,4]])
+        B = coo_matrix([[5,6]])
+
+        expected = array([[1, 2],
+                          [3, 4],
+                          [5, 6]])
+        assert_equal(construct.vstack([A, B]).toarray(), expected)
+        assert_equal(construct.vstack([A, B], dtype=np.float32).dtype,
+                     np.float32)
+
+        assert_equal(construct.vstack([A.tocsr(), B.tocsr()]).toarray(),
+                     expected)
+        result = construct.vstack([A.tocsr(), B.tocsr()], dtype=np.float32)
+        assert_equal(result.dtype, np.float32)
+        assert_equal(result.indices.dtype, np.int32)
+        assert_equal(result.indptr.dtype, np.int32)
+
+        assert_equal(construct.vstack([A.tocsc(), B.tocsc()]).toarray(),
+                     expected)
+        result = construct.vstack([A.tocsc(), B.tocsc()], dtype=np.float32)
+        assert_equal(result.dtype, np.float32)
+        assert_equal(result.indices.dtype, np.int32)
+        assert_equal(result.indptr.dtype, np.int32)
+
+    def test_hstack(self):
+
+        A = coo_matrix([[1,2],[3,4]])
+        B = coo_matrix([[5],[6]])
+
+        expected = array([[1, 2, 5],
+                          [3, 4, 6]])
+        assert_equal(construct.hstack([A, B]).toarray(), expected)
+        assert_equal(construct.hstack([A, B], dtype=np.float32).dtype,
+                     np.float32)
+        assert_equal(construct.hstack([A.tocsc(), B.tocsc()]).toarray(),
+                     expected)
+        assert_equal(construct.hstack([A.tocsc(), B.tocsc()],
+                                      dtype=np.float32).dtype,
+                     np.float32)
+        assert_equal(construct.hstack([A.tocsr(), B.tocsr()]).toarray(),
+                     expected)
+        assert_equal(construct.hstack([A.tocsr(), B.tocsr()],
+                                      dtype=np.float32).dtype,
+                     np.float32)
+
+    def test_bmat(self):
+
+        A = coo_matrix([[1, 2], [3, 4]])
+        B = coo_matrix([[5],[6]])
+        C = coo_matrix([[7]])
+        D = coo_matrix((0, 0))
+
+        expected = array([[1, 2, 5],
+                          [3, 4, 6],
+                          [0, 0, 7]])
+        assert_equal(construct.bmat([[A, B], [None, C]]).toarray(), expected)
+        E = csr_matrix((1, 2), dtype=np.int32)
+        assert_equal(construct.bmat([[A.tocsr(), B.tocsr()],
+                                     [E, C.tocsr()]]).toarray(),
+                     expected)
+        assert_equal(construct.bmat([[A.tocsc(), B.tocsc()],
+                                     [E.tocsc(), C.tocsc()]]).toarray(),
+                     expected)
+
+        expected = array([[1, 2, 0],
+                          [3, 4, 0],
+                          [0, 0, 7]])
+        assert_equal(construct.bmat([[A, None], [None, C]]).toarray(),
+                     expected)
+        assert_equal(construct.bmat([[A.tocsr(), E.T.tocsr()],
+                                     [E, C.tocsr()]]).toarray(),
+                     expected)
+        assert_equal(construct.bmat([[A.tocsc(), E.T.tocsc()],
+                                     [E.tocsc(), C.tocsc()]]).toarray(),
+                     expected)
+
+        Z = csr_matrix((1, 1), dtype=np.int32)
+        expected = array([[0, 5],
+                          [0, 6],
+                          [7, 0]])
+        assert_equal(construct.bmat([[None, B], [C, None]]).toarray(),
+                     expected)
+        assert_equal(construct.bmat([[E.T.tocsr(), B.tocsr()],
+                                     [C.tocsr(), Z]]).toarray(),
+                     expected)
+        assert_equal(construct.bmat([[E.T.tocsc(), B.tocsc()],
+                                     [C.tocsc(), Z.tocsc()]]).toarray(),
+                     expected)
+
+        expected = matrix(np.empty((0, 0)))
+        assert_equal(construct.bmat([[None, None]]).toarray(), expected)
+        assert_equal(construct.bmat([[None, D], [D, None]]).toarray(),
+                     expected)
+
+        # test bug reported in gh-5976
+        expected = array([[7]])
+        assert_equal(construct.bmat([[None, D], [C, None]]).toarray(),
+                     expected)
+
+        # test failure cases
+        with assert_raises(ValueError) as excinfo:
+            construct.bmat([[A], [B]])
+        excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2')
+
+        with assert_raises(ValueError) as excinfo:
+            construct.bmat([[A.tocsr()], [B.tocsr()]])
+        excinfo.match(r'incompatible dimensions for axis 1')
+
+        with assert_raises(ValueError) as excinfo:
+            construct.bmat([[A.tocsc()], [B.tocsc()]])
+        excinfo.match(r'Mismatching dimensions along axis 1: ({1, 2}|{2, 1})')
+
+        with assert_raises(ValueError) as excinfo:
+            construct.bmat([[A, C]])
+        excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2')
+
+        with assert_raises(ValueError) as excinfo:
+            construct.bmat([[A.tocsr(), C.tocsr()]])
+        excinfo.match(r'Mismatching dimensions along axis 0: ({1, 2}|{2, 1})')
+
+        with assert_raises(ValueError) as excinfo:
+            construct.bmat([[A.tocsc(), C.tocsc()]])
+        excinfo.match(r'incompatible dimensions for axis 0')
+
+    @pytest.mark.slow
+    @pytest.mark.xfail_on_32bit("Can't create large array for test")
+    def test_concatenate_int32_overflow(self):
+        """ test for indptr overflow when concatenating matrices """
+        check_free_memory(30000)
+
+        n = 33000
+        A = csr_matrix(np.ones((n, n), dtype=bool))
+        B = A.copy()
+        C = construct._compressed_sparse_stack((A,B), 0)
+
+        assert_(np.all(np.equal(np.diff(C.indptr), n)))
+        assert_equal(C.indices.dtype, np.int64)
+        assert_equal(C.indptr.dtype, np.int64)
+
+    def test_block_diag_basic(self):
+        """ basic test for block_diag """
+        A = coo_matrix([[1,2],[3,4]])
+        B = coo_matrix([[5],[6]])
+        C = coo_matrix([[7]])
+
+        expected = array([[1, 2, 0, 0],
+                          [3, 4, 0, 0],
+                          [0, 0, 5, 0],
+                          [0, 0, 6, 0],
+                          [0, 0, 0, 7]])
+
+        assert_equal(construct.block_diag((A, B, C)).toarray(), expected)
+
+    def test_block_diag_scalar_1d_args(self):
+        """ block_diag with scalar and 1d arguments """
+        # one 1d matrix and a scalar
+        assert_array_equal(construct.block_diag([[2,3], 4]).toarray(),
+                           [[2, 3, 0], [0, 0, 4]])
+
+    def test_block_diag_1(self):
+        """ block_diag with one matrix """
+        assert_equal(construct.block_diag([[1, 0]]).toarray(),
+                     array([[1, 0]]))
+        assert_equal(construct.block_diag([[[1, 0]]]).toarray(),
+                     array([[1, 0]]))
+        assert_equal(construct.block_diag([[[1], [0]]]).toarray(),
+                     array([[1], [0]]))
+        # just on scalar
+        assert_equal(construct.block_diag([1]).toarray(),
+                     array([[1]]))
+
+    def test_block_diag_sparse_matrices(self):
+        """ block_diag with sparse matrices """
+
+        sparse_col_matrices = [coo_matrix(([[1, 2, 3]]), shape=(1, 3)),
+                               coo_matrix(([[4, 5]]), shape=(1, 2))]
+        block_sparse_cols_matrices = construct.block_diag(sparse_col_matrices)
+        assert_equal(block_sparse_cols_matrices.toarray(),
+                     array([[1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]))
+
+        sparse_row_matrices = [coo_matrix(([[1], [2], [3]]), shape=(3, 1)),
+                               coo_matrix(([[4], [5]]), shape=(2, 1))]
+        block_sparse_row_matrices = construct.block_diag(sparse_row_matrices)
+        assert_equal(block_sparse_row_matrices.toarray(),
+                     array([[1, 0], [2, 0], [3, 0], [0, 4], [0, 5]]))
+
+    def test_random_sampling(self):
+        # Simple sanity checks for sparse random sampling.
+        for f in sprand, _sprandn:
+            for t in [np.float32, np.float64, np.longdouble,
+                      np.int32, np.int64, np.complex64, np.complex128]:
+                x = f(5, 10, density=0.1, dtype=t)
+                assert_equal(x.dtype, t)
+                assert_equal(x.shape, (5, 10))
+                assert_equal(x.nnz, 5)
+
+            x1 = f(5, 10, density=0.1, random_state=4321)
+            assert_equal(x1.dtype, np.double)
+
+            x2 = f(5, 10, density=0.1,
+                   random_state=np.random.RandomState(4321))
+
+            assert_array_equal(x1.data, x2.data)
+            assert_array_equal(x1.row, x2.row)
+            assert_array_equal(x1.col, x2.col)
+
+            for density in [0.0, 0.1, 0.5, 1.0]:
+                x = f(5, 10, density=density)
+                assert_equal(x.nnz, int(density * np.prod(x.shape)))
+
+            for fmt in ['coo', 'csc', 'csr', 'lil']:
+                x = f(5, 10, format=fmt)
+                assert_equal(x.format, fmt)
+
+            assert_raises(ValueError, lambda: f(5, 10, 1.1))
+            assert_raises(ValueError, lambda: f(5, 10, -0.1))
+
+    def test_rand(self):
+        # Simple distributional checks for sparse.rand.
+        random_states = [None, 4321, np.random.RandomState()]
+        try:
+            gen = np.random.default_rng()
+            random_states.append(gen)
+        except AttributeError:
+            pass
+
+        for random_state in random_states:
+            x = sprand(10, 20, density=0.5, dtype=np.float64,
+                       random_state=random_state)
+            assert_(np.all(np.less_equal(0, x.data)))
+            assert_(np.all(np.less_equal(x.data, 1)))
+
+    def test_randn(self):
+        # Simple distributional checks for sparse.randn.
+        # Statistically, some of these should be negative
+        # and some should be greater than 1.
+        random_states = [None, 4321, np.random.RandomState()]
+        try:
+            gen = np.random.default_rng()
+            random_states.append(gen)
+        except AttributeError:
+            pass
+
+        for random_state in random_states:
+            x = _sprandn(10, 20, density=0.5, dtype=np.float64,
+                         random_state=random_state)
+            assert_(np.any(np.less(x.data, 0)))
+            assert_(np.any(np.less(1, x.data)))
+
+    def test_random_accept_str_dtype(self):
+        # anything that np.dtype can convert to a dtype should be accepted
+        # for the dtype
+        construct.random(10, 10, dtype='d')
+
+    def test_random_sparse_matrix_returns_correct_number_of_non_zero_elements(self):
+        # A 10 x 10 matrix, with density of 12.65%, should have 13 nonzero elements.
+        # 10 x 10 x 0.1265 = 12.65, which should be rounded up to 13, not 12.
+        sparse_matrix = construct.random(10, 10, density=0.1265)
+        assert_equal(sparse_matrix.count_nonzero(),13)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csc.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csc.py
new file mode 100644
index 00000000..f48aed32
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csc.py
@@ -0,0 +1,98 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_
+from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
+
+import pytest
+
+
+def test_csc_getrow():
+    N = 10
+    np.random.seed(0)
+    X = np.random.random((N, N))
+    X[X > 0.7] = 0
+    Xcsc = csc_matrix(X)
+
+    for i in range(N):
+        arr_row = X[i:i + 1, :]
+        csc_row = Xcsc.getrow(i)
+
+        assert_array_almost_equal(arr_row, csc_row.toarray())
+        assert_(type(csc_row) is csr_matrix)
+
+
+def test_csc_getcol():
+    N = 10
+    np.random.seed(0)
+    X = np.random.random((N, N))
+    X[X > 0.7] = 0
+    Xcsc = csc_matrix(X)
+
+    for i in range(N):
+        arr_col = X[:, i:i + 1]
+        csc_col = Xcsc.getcol(i)
+
+        assert_array_almost_equal(arr_col, csc_col.toarray())
+        assert_(type(csc_col) is csc_matrix)
+
+@pytest.mark.parametrize("matrix_input, axis, expected_shape",
+    [(csc_matrix([[1, 0],
+                [0, 0],
+                [0, 2]]),
+      0, (0, 2)),
+     (csc_matrix([[1, 0],
+                [0, 0],
+                [0, 2]]),
+      1, (3, 0)),
+     (csc_matrix([[1, 0],
+                [0, 0],
+                [0, 2]]),
+      'both', (0, 0)),
+     (csc_matrix([[0, 1, 0, 0, 0, 0],
+                [0, 0, 0, 0, 0, 0],
+                [0, 0, 2, 3, 0, 1]]),
+      0, (0, 6))])
+def test_csc_empty_slices(matrix_input, axis, expected_shape):
+    # see gh-11127 for related discussion
+    slice_1 = matrix_input.A.shape[0] - 1
+    slice_2 = slice_1
+    slice_3 = slice_2 - 1
+
+    if axis == 0:
+        actual_shape_1 = matrix_input[slice_1:slice_2, :].A.shape
+        actual_shape_2 = matrix_input[slice_1:slice_3, :].A.shape
+    elif axis == 1:
+        actual_shape_1 = matrix_input[:, slice_1:slice_2].A.shape
+        actual_shape_2 = matrix_input[:, slice_1:slice_3].A.shape
+    elif axis == 'both':
+        actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].A.shape
+        actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].A.shape
+
+    assert actual_shape_1 == expected_shape
+    assert actual_shape_1 == actual_shape_2
+
+
+@pytest.mark.parametrize('ax', (-2, -1, 0, 1, None))
+def test_argmax_overflow(ax):
+    # See gh-13646: Windows integer overflow for large sparse matrices.
+    dim = (100000, 100000)
+    A = lil_matrix(dim)
+    A[-2, -2] = 42
+    A[-3, -3] = 0.1234
+    A = csc_matrix(A)
+    idx = A.argmax(axis=ax)
+
+    if ax is None:
+        # idx is a single flattened index
+        # that we need to convert to a 2d index pair;
+        # can't do this with np.unravel_index because
+        # the dimensions are too large
+        ii = idx % dim[0]
+        jj = idx // dim[0]
+    else:
+        # idx is an array of size of A.shape[ax];
+        # check the max index to make sure no overflows
+        # we encountered
+        assert np.count_nonzero(idx) == A.nnz
+        ii, jj = np.max(idx), np.argmax(idx)
+
+    assert A[ii, jj] == A[-2, -2]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csr.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csr.py
new file mode 100644
index 00000000..5a05767f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_csr.py
@@ -0,0 +1,169 @@
+import numpy as np
+from numpy.testing import assert_array_almost_equal, assert_
+from scipy.sparse import csr_matrix, hstack
+import pytest
+
+
+def _check_csr_rowslice(i, sl, X, Xcsr):
+    np_slice = X[i, sl]
+    csr_slice = Xcsr[i, sl]
+    assert_array_almost_equal(np_slice, csr_slice.toarray()[0])
+    assert_(type(csr_slice) is csr_matrix)
+
+
+def test_csr_rowslice():
+    N = 10
+    np.random.seed(0)
+    X = np.random.random((N, N))
+    X[X > 0.7] = 0
+    Xcsr = csr_matrix(X)
+
+    slices = [slice(None, None, None),
+              slice(None, None, -1),
+              slice(1, -2, 2),
+              slice(-2, 1, -2)]
+
+    for i in range(N):
+        for sl in slices:
+            _check_csr_rowslice(i, sl, X, Xcsr)
+
+
+def test_csr_getrow():
+    N = 10
+    np.random.seed(0)
+    X = np.random.random((N, N))
+    X[X > 0.7] = 0
+    Xcsr = csr_matrix(X)
+
+    for i in range(N):
+        arr_row = X[i:i + 1, :]
+        csr_row = Xcsr.getrow(i)
+
+        assert_array_almost_equal(arr_row, csr_row.toarray())
+        assert_(type(csr_row) is csr_matrix)
+
+
+def test_csr_getcol():
+    N = 10
+    np.random.seed(0)
+    X = np.random.random((N, N))
+    X[X > 0.7] = 0
+    Xcsr = csr_matrix(X)
+
+    for i in range(N):
+        arr_col = X[:, i:i + 1]
+        csr_col = Xcsr.getcol(i)
+
+        assert_array_almost_equal(arr_col, csr_col.toarray())
+        assert_(type(csr_col) is csr_matrix)
+
+@pytest.mark.parametrize("matrix_input, axis, expected_shape",
+    [(csr_matrix([[1, 0, 0, 0],
+                [0, 0, 0, 0],
+                [0, 2, 3, 0]]),
+      0, (0, 4)),
+     (csr_matrix([[1, 0, 0, 0],
+                [0, 0, 0, 0],
+                [0, 2, 3, 0]]),
+      1, (3, 0)),
+     (csr_matrix([[1, 0, 0, 0],
+                [0, 0, 0, 0],
+                [0, 2, 3, 0]]),
+      'both', (0, 0)),
+     (csr_matrix([[0, 1, 0, 0, 0],
+                [0, 0, 0, 0, 0],
+                [0, 0, 2, 3, 0]]),
+      0, (0, 5))])
+def test_csr_empty_slices(matrix_input, axis, expected_shape):
+    # see gh-11127 for related discussion
+    slice_1 = matrix_input.A.shape[0] - 1
+    slice_2 = slice_1
+    slice_3 = slice_2 - 1
+
+    if axis == 0:
+        actual_shape_1 = matrix_input[slice_1:slice_2, :].A.shape
+        actual_shape_2 = matrix_input[slice_1:slice_3, :].A.shape
+    elif axis == 1:
+        actual_shape_1 = matrix_input[:, slice_1:slice_2].A.shape
+        actual_shape_2 = matrix_input[:, slice_1:slice_3].A.shape
+    elif axis == 'both':
+        actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].A.shape
+        actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].A.shape
+
+    assert actual_shape_1 == expected_shape
+    assert actual_shape_1 == actual_shape_2
+
+
+def test_csr_bool_indexing():
+    data = csr_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
+    list_indices1 = [False, True, False]
+    array_indices1 = np.array(list_indices1)
+    list_indices2 = [[False, True, False], [False, True, False], [False, True, False]]
+    array_indices2 = np.array(list_indices2)
+    list_indices3 = ([False, True, False], [False, True, False])
+    array_indices3 = (np.array(list_indices3[0]), np.array(list_indices3[1]))
+    slice_list1 = data[list_indices1].toarray()
+    slice_array1 = data[array_indices1].toarray()
+    slice_list2 = data[list_indices2]
+    slice_array2 = data[array_indices2]
+    slice_list3 = data[list_indices3]
+    slice_array3 = data[array_indices3]
+    assert (slice_list1 == slice_array1).all()
+    assert (slice_list2 == slice_array2).all()
+    assert (slice_list3 == slice_array3).all()
+
+
+def test_csr_hstack_int64():
+    """
+    Tests if hstack properly promotes to indices and indptr arrays to np.int64
+    when using np.int32 during concatenation would result in either array
+    overflowing.
+    """
+    max_int32 = np.iinfo(np.int32).max
+
+    # First case: indices would overflow with int32
+    data = [1.0]
+    row = [0]
+
+    max_indices_1 = max_int32 - 1
+    max_indices_2 = 3
+
+    # Individual indices arrays are representable with int32
+    col_1 = [max_indices_1 - 1]
+    col_2 = [max_indices_2 - 1]
+
+    X_1 = csr_matrix((data, (row, col_1)))
+    X_2 = csr_matrix((data, (row, col_2)))
+
+    assert max(max_indices_1 - 1, max_indices_2 - 1) < max_int32
+    assert X_1.indices.dtype == X_1.indptr.dtype == np.int32
+    assert X_2.indices.dtype == X_2.indptr.dtype == np.int32
+
+    # ... but when concatenating their CSR matrices, the resulting indices
+    # array can't be represented with int32 and must be promoted to int64.
+    X_hs = hstack([X_1, X_2], format="csr")
+
+    assert X_hs.indices.max() == max_indices_1 + max_indices_2 - 1
+    assert max_indices_1 + max_indices_2 - 1 > max_int32
+    assert X_hs.indices.dtype == X_hs.indptr.dtype == np.int64
+
+    # Even if the matrices are empty, we must account for their size
+    # contribution so that we may safely set the final elements.
+    X_1_empty = csr_matrix(X_1.shape)
+    X_2_empty = csr_matrix(X_2.shape)
+    X_hs_empty = hstack([X_1_empty, X_2_empty], format="csr")
+
+    assert X_hs_empty.shape == X_hs.shape
+    assert X_hs_empty.indices.dtype == np.int64
+
+    # Should be just small enough to stay in int32 after stack. Note that
+    # we theoretically could support indices.max() == max_int32, but due to an
+    # edge-case in the underlying sparsetools code
+    # (namely the `coo_tocsr` routine),
+    # we require that max(X_hs_32.shape) < max_int32 as well.
+    # Hence we can only support max_int32 - 1.
+    col_3 = [max_int32 - max_indices_1 - 1]
+    X_3 = csr_matrix((data, (row, col_3)))
+    X_hs_32 = hstack([X_1, X_3], format="csr")
+    assert X_hs_32.indices.dtype == np.int32
+    assert X_hs_32.indices.max() == max_int32 - 1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_extract.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_extract.py
new file mode 100644
index 00000000..94bbf5cf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_extract.py
@@ -0,0 +1,42 @@
+"""test sparse matrix construction functions"""
+
+from numpy.testing import assert_equal
+from scipy.sparse import csr_matrix
+
+import numpy as np
+from scipy.sparse import _extract
+
+
+class TestExtract:
+    def setup_method(self):
+        self.cases = [
+            csr_matrix([[1,2]]),
+            csr_matrix([[1,0]]),
+            csr_matrix([[0,0]]),
+            csr_matrix([[1],[2]]),
+            csr_matrix([[1],[0]]),
+            csr_matrix([[0],[0]]),
+            csr_matrix([[1,2],[3,4]]),
+            csr_matrix([[0,1],[0,0]]),
+            csr_matrix([[0,0],[1,0]]),
+            csr_matrix([[0,0],[0,0]]),
+            csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]),
+            csr_matrix([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]).T,
+        ]
+
+    def find(self):
+        for A in self.cases:
+            I,J,V = _extract.find(A)
+            assert_equal(A.toarray(), csr_matrix(((I,J),V), shape=A.shape))
+
+    def test_tril(self):
+        for A in self.cases:
+            B = A.toarray()
+            for k in [-3,-2,-1,0,1,2,3]:
+                assert_equal(_extract.tril(A,k=k).toarray(), np.tril(B,k=k))
+
+    def test_triu(self):
+        for A in self.cases:
+            B = A.toarray()
+            for k in [-3,-2,-1,0,1,2,3]:
+                assert_equal(_extract.triu(A,k=k).toarray(), np.triu(B,k=k))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_matrix_io.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_matrix_io.py
new file mode 100644
index 00000000..e7c63612
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_matrix_io.py
@@ -0,0 +1,86 @@
+import os
+import numpy as np
+import tempfile
+
+from pytest import raises as assert_raises
+from numpy.testing import assert_equal, assert_
+
+from scipy.sparse import (csc_matrix, csr_matrix, bsr_matrix, dia_matrix,
+                          coo_matrix, save_npz, load_npz, dok_matrix)
+
+
+DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
+
+
+def _save_and_load(matrix):
+    fd, tmpfile = tempfile.mkstemp(suffix='.npz')
+    os.close(fd)
+    try:
+        save_npz(tmpfile, matrix)
+        loaded_matrix = load_npz(tmpfile)
+    finally:
+        os.remove(tmpfile)
+    return loaded_matrix
+
+def _check_save_and_load(dense_matrix):
+    for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]:
+        matrix = matrix_class(dense_matrix)
+        loaded_matrix = _save_and_load(matrix)
+        assert_(type(loaded_matrix) is matrix_class)
+        assert_(loaded_matrix.shape == dense_matrix.shape)
+        assert_(loaded_matrix.dtype == dense_matrix.dtype)
+        assert_equal(loaded_matrix.toarray(), dense_matrix)
+
+def test_save_and_load_random():
+    N = 10
+    np.random.seed(0)
+    dense_matrix = np.random.random((N, N))
+    dense_matrix[dense_matrix > 0.7] = 0
+    _check_save_and_load(dense_matrix)
+
+def test_save_and_load_empty():
+    dense_matrix = np.zeros((4,6))
+    _check_save_and_load(dense_matrix)
+
+def test_save_and_load_one_entry():
+    dense_matrix = np.zeros((4,6))
+    dense_matrix[1,2] = 1
+    _check_save_and_load(dense_matrix)
+
+
+def test_malicious_load():
+    class Executor:
+        def __reduce__(self):
+            return (assert_, (False, 'unexpected code execution'))
+
+    fd, tmpfile = tempfile.mkstemp(suffix='.npz')
+    os.close(fd)
+    try:
+        np.savez(tmpfile, format=Executor())
+
+        # Should raise a ValueError, not execute code
+        assert_raises(ValueError, load_npz, tmpfile)
+    finally:
+        os.remove(tmpfile)
+
+
+def test_py23_compatibility():
+    # Try loading files saved on Python 2 and Python 3.  They are not
+    # the same, since files saved with SciPy versions < 1.0.0 may
+    # contain unicode.
+
+    a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz'))
+    b = load_npz(os.path.join(DATA_DIR, 'csc_py3.npz'))
+    c = csc_matrix([[0]])
+
+    assert_equal(a.toarray(), c.toarray())
+    assert_equal(b.toarray(), c.toarray())
+
+def test_implemented_error():
+    # Attempts to save an unsupported type and checks that an
+    # NotImplementedError is raised.
+
+    x = dok_matrix((2,3))
+    x[0,1] = 1
+
+    assert_raises(NotImplementedError, save_npz, 'x.npz', x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sparsetools.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sparsetools.py
new file mode 100644
index 00000000..dcada59f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sparsetools.py
@@ -0,0 +1,337 @@
+import sys
+import os
+import gc
+import threading
+
+import numpy as np
+from numpy.testing import assert_equal, assert_, assert_allclose
+from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix,
+                          bsr_matrix, dia_matrix)
+from scipy.sparse._sputils import supported_dtypes
+from scipy._lib._testutils import check_free_memory
+
+import pytest
+from pytest import raises as assert_raises
+
+
+def int_to_int8(n):
+    """
+    Wrap an integer to the interval [-128, 127].
+    """
+    return (n + 128) % 256 - 128
+
+
+def test_exception():
+    assert_raises(MemoryError, _sparsetools.test_throw_error)
+
+
+def test_threads():
+    # Smoke test for parallel threaded execution; doesn't actually
+    # check that code runs in parallel, but just that it produces
+    # expected results.
+    nthreads = 10
+    niter = 100
+
+    n = 20
+    a = csr_matrix(np.ones([n, n]))
+    bres = []
+
+    class Worker(threading.Thread):
+        def run(self):
+            b = a.copy()
+            for j in range(niter):
+                _sparsetools.csr_plus_csr(n, n,
+                                          a.indptr, a.indices, a.data,
+                                          a.indptr, a.indices, a.data,
+                                          b.indptr, b.indices, b.data)
+            bres.append(b)
+
+    threads = [Worker() for _ in range(nthreads)]
+    for thread in threads:
+        thread.start()
+    for thread in threads:
+        thread.join()
+
+    for b in bres:
+        assert_(np.all(b.toarray() == 2))
+
+
+def test_regression_std_vector_dtypes():
+    # Regression test for gh-3780, checking the std::vector typemaps
+    # in sparsetools.cxx are complete.
+    for dtype in supported_dtypes:
+        ad = np.array([[1, 2], [3, 4]]).astype(dtype)
+        a = csr_matrix(ad, dtype=dtype)
+
+        # getcol is one function using std::vector typemaps, and should not fail
+        assert_equal(a.getcol(0).toarray(), ad[:, :1])
+
+
+@pytest.mark.slow
+@pytest.mark.xfail_on_32bit("Can't create large array for test")
+def test_nnz_overflow():
+    # Regression test for gh-7230 / gh-7871, checking that coo_toarray
+    # with nnz > int32max doesn't overflow.
+    nnz = np.iinfo(np.int32).max + 1
+    # Ensure ~20 GB of RAM is free to run this test.
+    check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5)
+
+    # Use nnz duplicate entries to keep the dense version small.
+    row = np.zeros(nnz, dtype=np.int32)
+    col = np.zeros(nnz, dtype=np.int32)
+    data = np.zeros(nnz, dtype=np.int8)
+    data[-1] = 4
+    s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False)
+    # Sums nnz duplicates to produce a 1x1 array containing 4.
+    d = s.toarray()
+
+    assert_allclose(d, [[4]])
+
+
+@pytest.mark.skipif(not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8),
+                    reason="test requires 64-bit Linux")
+class TestInt32Overflow:
+    """
+    Some of the sparsetools routines use dense 2D matrices whose
+    total size is not bounded by the nnz of the sparse matrix. These
+    routines used to suffer from int32 wraparounds; here, we try to
+    check that the wraparounds don't occur any more.
+    """
+    # choose n large enough
+    n = 50000
+
+    def setup_method(self):
+        assert self.n**2 > np.iinfo(np.int32).max
+
+        # check there's enough memory even if everything is run at the
+        # same time
+        try:
+            parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1'))
+        except ValueError:
+            parallel_count = np.inf
+
+        check_free_memory(3000 * parallel_count)
+
+    def teardown_method(self):
+        gc.collect()
+
+    def test_coo_todense(self):
+        # Check *_todense routines (cf. gh-2179)
+        #
+        # All of them in the end call coo_matrix.todense
+
+        n = self.n
+
+        i = np.array([0, n-1])
+        j = np.array([0, n-1])
+        data = np.array([1, 2], dtype=np.int8)
+        m = coo_matrix((data, (i, j)))
+
+        r = m.todense()
+        assert_equal(r[0,0], 1)
+        assert_equal(r[-1,-1], 2)
+        del r
+        gc.collect()
+
+    @pytest.mark.slow
+    def test_matvecs(self):
+        # Check *_matvecs routines
+        n = self.n
+
+        i = np.array([0, n-1])
+        j = np.array([0, n-1])
+        data = np.array([1, 2], dtype=np.int8)
+        m = coo_matrix((data, (i, j)))
+
+        b = np.ones((n, n), dtype=np.int8)
+        for sptype in (csr_matrix, csc_matrix, bsr_matrix):
+            m2 = sptype(m)
+            r = m2.dot(b)
+            assert_equal(r[0,0], 1)
+            assert_equal(r[-1,-1], 2)
+            del r
+            gc.collect()
+
+        del b
+        gc.collect()
+
+    @pytest.mark.slow
+    def test_dia_matvec(self):
+        # Check: huge dia_matrix _matvec
+        n = self.n
+        data = np.ones((n, n), dtype=np.int8)
+        offsets = np.arange(n)
+        m = dia_matrix((data, offsets), shape=(n, n))
+        v = np.ones(m.shape[1], dtype=np.int8)
+        r = m.dot(v)
+        assert_equal(r[0], int_to_int8(n))
+        del data, offsets, m, v, r
+        gc.collect()
+
+    _bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow),
+                pytest.param("matvecs", marks=pytest.mark.xslow),
+                "matvec",
+                "diagonal",
+                "sort_indices",
+                pytest.param("transpose", marks=pytest.mark.xslow)]
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize("op", _bsr_ops)
+    def test_bsr_1_block(self, op):
+        # Check: huge bsr_matrix (1-block)
+        #
+        # The point here is that indices inside a block may overflow.
+
+        def get_matrix():
+            n = self.n
+            data = np.ones((1, n, n), dtype=np.int8)
+            indptr = np.array([0, 1], dtype=np.int32)
+            indices = np.array([0], dtype=np.int32)
+            m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False)
+            del data, indptr, indices
+            return m
+
+        gc.collect()
+        try:
+            getattr(self, "_check_bsr_" + op)(get_matrix)
+        finally:
+            gc.collect()
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize("op", _bsr_ops)
+    def test_bsr_n_block(self, op):
+        # Check: huge bsr_matrix (n-block)
+        #
+        # The point here is that while indices within a block don't
+        # overflow, accumulators across many block may.
+
+        def get_matrix():
+            n = self.n
+            data = np.ones((n, n, 1), dtype=np.int8)
+            indptr = np.array([0, n], dtype=np.int32)
+            indices = np.arange(n, dtype=np.int32)
+            m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
+            del data, indptr, indices
+            return m
+
+        gc.collect()
+        try:
+            getattr(self, "_check_bsr_" + op)(get_matrix)
+        finally:
+            gc.collect()
+
+    def _check_bsr_matvecs(self, m):
+        m = m()
+        n = self.n
+
+        # _matvecs
+        r = m.dot(np.ones((n, 2), dtype=np.int8))
+        assert_equal(r[0, 0], int_to_int8(n))
+
+    def _check_bsr_matvec(self, m):
+        m = m()
+        n = self.n
+
+        # _matvec
+        r = m.dot(np.ones((n,), dtype=np.int8))
+        assert_equal(r[0], int_to_int8(n))
+
+    def _check_bsr_diagonal(self, m):
+        m = m()
+        n = self.n
+
+        # _diagonal
+        r = m.diagonal()
+        assert_equal(r, np.ones(n))
+
+    def _check_bsr_sort_indices(self, m):
+        # _sort_indices
+        m = m()
+        m.sort_indices()
+
+    def _check_bsr_transpose(self, m):
+        # _transpose
+        m = m()
+        m.transpose()
+
+    def _check_bsr_matmat(self, m):
+        m = m()
+        n = self.n
+
+        # _bsr_matmat
+        m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2))
+        m.dot(m2)  # shouldn't SIGSEGV
+        del m2
+
+        # _bsr_matmat
+        m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0]))
+        m2.dot(m)  # shouldn't SIGSEGV
+
+
+@pytest.mark.skip(reason="64-bit indices in sparse matrices not available")
+def test_csr_matmat_int64_overflow():
+    n = 3037000500
+    assert n**2 > np.iinfo(np.int64).max
+
+    # the test would take crazy amounts of memory
+    check_free_memory(n * (8*2 + 1) * 3 / 1e6)
+
+    # int64 overflow
+    data = np.ones((n,), dtype=np.int8)
+    indptr = np.arange(n+1, dtype=np.int64)
+    indices = np.zeros(n, dtype=np.int64)
+    a = csr_matrix((data, indices, indptr))
+    b = a.T
+
+    assert_raises(RuntimeError, a.dot, b)
+
+
+def test_upcast():
+    a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex)
+    b0 = np.array([256+1j, 2**32], dtype=complex)
+
+    for a_dtype in supported_dtypes:
+        for b_dtype in supported_dtypes:
+            msg = "(%r, %r)" % (a_dtype, b_dtype)
+
+            if np.issubdtype(a_dtype, np.complexfloating):
+                a = a0.copy().astype(a_dtype)
+            else:
+                a = a0.real.copy().astype(a_dtype)
+
+            if np.issubdtype(b_dtype, np.complexfloating):
+                b = b0.copy().astype(b_dtype)
+            else:
+                with np.errstate(invalid="ignore"):
+                    # Casting a large value (2**32) to int8 causes a warning in
+                    # numpy >1.23
+                    b = b0.real.copy().astype(b_dtype)
+
+            if not (a_dtype == np.bool_ and b_dtype == np.bool_):
+                c = np.zeros((2,), dtype=np.bool_)
+                assert_raises(ValueError, _sparsetools.csr_matvec,
+                              2, 2, a.indptr, a.indices, a.data, b, c)
+
+            if ((np.issubdtype(a_dtype, np.complexfloating) and
+                 not np.issubdtype(b_dtype, np.complexfloating)) or
+                (not np.issubdtype(a_dtype, np.complexfloating) and
+                 np.issubdtype(b_dtype, np.complexfloating))):
+                c = np.zeros((2,), dtype=np.float64)
+                assert_raises(ValueError, _sparsetools.csr_matvec,
+                              2, 2, a.indptr, a.indices, a.data, b, c)
+
+            c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype))
+            _sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c)
+            assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg)
+
+
+def test_endianness():
+    d = np.ones((3,4))
+    offsets = [-1,0,1]
+
+    a = dia_matrix((d.astype('f8'), offsets), (4, 4))
+    v = np.arange(4)
+
+    assert_allclose(a.dot(v), [1, 3, 6, 5])
+    assert_allclose(b.dot(v), [1, 3, 6, 5])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_spfuncs.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_spfuncs.py
new file mode 100644
index 00000000..75bc2d92
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_spfuncs.py
@@ -0,0 +1,97 @@
+from numpy import array, kron, diag
+from numpy.testing import assert_, assert_equal
+
+from scipy.sparse import _spfuncs as spfuncs
+from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix
+from scipy.sparse._sparsetools import (csr_scale_rows, csr_scale_columns,
+                                       bsr_scale_rows, bsr_scale_columns)
+
+
+class TestSparseFunctions:
+    def test_scale_rows_and_cols(self):
+        D = array([[1, 0, 0, 2, 3],
+                   [0, 4, 0, 5, 0],
+                   [0, 0, 6, 7, 0]])
+
+        #TODO expose through function
+        S = csr_matrix(D)
+        v = array([1,2,3])
+        csr_scale_rows(3,5,S.indptr,S.indices,S.data,v)
+        assert_equal(S.toarray(), diag(v)@D)
+
+        S = csr_matrix(D)
+        v = array([1,2,3,4,5])
+        csr_scale_columns(3,5,S.indptr,S.indices,S.data,v)
+        assert_equal(S.toarray(), D@diag(v))
+
+        # blocks
+        E = kron(D,[[1,2],[3,4]])
+        S = bsr_matrix(E,blocksize=(2,2))
+        v = array([1,2,3,4,5,6])
+        bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v)
+        assert_equal(S.toarray(), diag(v)@E)
+
+        S = bsr_matrix(E,blocksize=(2,2))
+        v = array([1,2,3,4,5,6,7,8,9,10])
+        bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v)
+        assert_equal(S.toarray(), E@diag(v))
+
+        E = kron(D,[[1,2,3],[4,5,6]])
+        S = bsr_matrix(E,blocksize=(2,3))
+        v = array([1,2,3,4,5,6])
+        bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v)
+        assert_equal(S.toarray(), diag(v)@E)
+
+        S = bsr_matrix(E,blocksize=(2,3))
+        v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v)
+        assert_equal(S.toarray(), E@diag(v))
+
+    def test_estimate_blocksize(self):
+        mats = []
+        mats.append([[0,1],[1,0]])
+        mats.append([[1,1,0],[0,0,1],[1,0,1]])
+        mats.append([[0],[0],[1]])
+        mats = [array(x) for x in mats]
+
+        blks = []
+        blks.append([[1]])
+        blks.append([[1,1],[1,1]])
+        blks.append([[1,1],[0,1]])
+        blks.append([[1,1,0],[1,0,1],[1,1,1]])
+        blks = [array(x) for x in blks]
+
+        for A in mats:
+            for B in blks:
+                X = kron(A,B)
+                r,c = spfuncs.estimate_blocksize(X)
+                assert_(r >= B.shape[0])
+                assert_(c >= B.shape[1])
+
+    def test_count_blocks(self):
+        def gold(A,bs):
+            R,C = bs
+            I,J = A.nonzero()
+            return len(set(zip(I//R,J//C)))
+
+        mats = []
+        mats.append([[0]])
+        mats.append([[1]])
+        mats.append([[1,0]])
+        mats.append([[1,1]])
+        mats.append([[0,1],[1,0]])
+        mats.append([[1,1,0],[0,0,1],[1,0,1]])
+        mats.append([[0],[0],[1]])
+
+        for A in mats:
+            for B in mats:
+                X = kron(A,B)
+                Y = csr_matrix(X)
+                for R in range(1,6):
+                    for C in range(1,6):
+                        assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C)))
+
+        X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]])
+        Y = csc_matrix(X)
+        assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2)))
+        assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2)))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sputils.py b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sputils.py
new file mode 100644
index 00000000..2274e2ef
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/sparse/tests/test_sputils.py
@@ -0,0 +1,188 @@
+"""unit tests for sparse utility functions"""
+
+import numpy as np
+from numpy.testing import assert_equal
+from pytest import raises as assert_raises
+from scipy.sparse import _sputils as sputils
+from scipy.sparse._sputils import matrix
+
+
+class TestSparseUtils:
+
+    def test_upcast(self):
+        assert_equal(sputils.upcast('intc'), np.intc)
+        assert_equal(sputils.upcast('int32', 'float32'), np.float64)
+        assert_equal(sputils.upcast('bool', complex, float), np.complex128)
+        assert_equal(sputils.upcast('i', 'd'), np.float64)
+
+    def test_getdtype(self):
+        A = np.array([1], dtype='int8')
+
+        assert_equal(sputils.getdtype(None, default=float), float)
+        assert_equal(sputils.getdtype(None, a=A), np.int8)
+
+        with assert_raises(
+            ValueError,
+            match="object dtype is not supported by sparse matrices",
+        ):
+            sputils.getdtype("O")
+
+    def test_isscalarlike(self):
+        assert_equal(sputils.isscalarlike(3.0), True)
+        assert_equal(sputils.isscalarlike(-4), True)
+        assert_equal(sputils.isscalarlike(2.5), True)
+        assert_equal(sputils.isscalarlike(1 + 3j), True)
+        assert_equal(sputils.isscalarlike(np.array(3)), True)
+        assert_equal(sputils.isscalarlike("16"), True)
+
+        assert_equal(sputils.isscalarlike(np.array([3])), False)
+        assert_equal(sputils.isscalarlike([[3]]), False)
+        assert_equal(sputils.isscalarlike((1,)), False)
+        assert_equal(sputils.isscalarlike((1, 2)), False)
+
+    def test_isintlike(self):
+        assert_equal(sputils.isintlike(-4), True)
+        assert_equal(sputils.isintlike(np.array(3)), True)
+        assert_equal(sputils.isintlike(np.array([3])), False)
+        with assert_raises(
+            ValueError,
+            match="Inexact indices into sparse matrices are not allowed"
+        ):
+            sputils.isintlike(3.0)
+
+        assert_equal(sputils.isintlike(2.5), False)
+        assert_equal(sputils.isintlike(1 + 3j), False)
+        assert_equal(sputils.isintlike((1,)), False)
+        assert_equal(sputils.isintlike((1, 2)), False)
+
+    def test_isshape(self):
+        assert_equal(sputils.isshape((1, 2)), True)
+        assert_equal(sputils.isshape((5, 2)), True)
+
+        assert_equal(sputils.isshape((1.5, 2)), False)
+        assert_equal(sputils.isshape((2, 2, 2)), False)
+        assert_equal(sputils.isshape(([2], 2)), False)
+        assert_equal(sputils.isshape((-1, 2), nonneg=False),True)
+        assert_equal(sputils.isshape((2, -1), nonneg=False),True)
+        assert_equal(sputils.isshape((-1, 2), nonneg=True),False)
+        assert_equal(sputils.isshape((2, -1), nonneg=True),False)
+
+    def test_issequence(self):
+        assert_equal(sputils.issequence((1,)), True)
+        assert_equal(sputils.issequence((1, 2, 3)), True)
+        assert_equal(sputils.issequence([1]), True)
+        assert_equal(sputils.issequence([1, 2, 3]), True)
+        assert_equal(sputils.issequence(np.array([1, 2, 3])), True)
+
+        assert_equal(sputils.issequence(np.array([[1], [2], [3]])), False)
+        assert_equal(sputils.issequence(3), False)
+
+    def test_ismatrix(self):
+        assert_equal(sputils.ismatrix(((),)), True)
+        assert_equal(sputils.ismatrix([[1], [2]]), True)
+        assert_equal(sputils.ismatrix(np.arange(3)[None]), True)
+
+        assert_equal(sputils.ismatrix([1, 2]), False)
+        assert_equal(sputils.ismatrix(np.arange(3)), False)
+        assert_equal(sputils.ismatrix([[[1]]]), False)
+        assert_equal(sputils.ismatrix(3), False)
+
+    def test_isdense(self):
+        assert_equal(sputils.isdense(np.array([1])), True)
+        assert_equal(sputils.isdense(matrix([1])), True)
+
+    def test_validateaxis(self):
+        assert_raises(TypeError, sputils.validateaxis, (0, 1))
+        assert_raises(TypeError, sputils.validateaxis, 1.5)
+        assert_raises(ValueError, sputils.validateaxis, 3)
+
+        # These function calls should not raise errors
+        for axis in (-2, -1, 0, 1, None):
+            sputils.validateaxis(axis)
+
+    def test_get_index_dtype(self):
+        imax = np.int64(np.iinfo(np.int32).max)
+        too_big = imax + 1
+
+        # Check that uint32's with no values too large doesn't return
+        # int64
+        a1 = np.ones(90, dtype='uint32')
+        a2 = np.ones(90, dtype='uint32')
+        assert_equal(
+            np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
+            np.dtype('int32')
+        )
+
+        # Check that if we can not convert but all values are less than or
+        # equal to max that we can just convert to int32
+        a1[-1] = imax
+        assert_equal(
+            np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
+            np.dtype('int32')
+        )
+
+        # Check that if it can not convert directly and the contents are
+        # too large that we return int64
+        a1[-1] = too_big
+        assert_equal(
+            np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
+            np.dtype('int64')
+        )
+
+        # test that if can not convert and didn't specify to check_contents
+        # we return int64
+        a1 = np.ones(89, dtype='uint32')
+        a2 = np.ones(89, dtype='uint32')
+        assert_equal(
+            np.dtype(sputils.get_index_dtype((a1, a2))),
+            np.dtype('int64')
+        )
+
+        # Check that even if we have arrays that can be converted directly
+        # that if we specify a maxval directly it takes precedence
+        a1 = np.ones(12, dtype='uint32')
+        a2 = np.ones(12, dtype='uint32')
+        assert_equal(
+            np.dtype(sputils.get_index_dtype(
+                (a1, a2), maxval=too_big, check_contents=True
+            )),
+            np.dtype('int64')
+        )
+
+        # Check that an array with a too max size and maxval set
+        # still returns int64
+        a1[-1] = too_big
+        assert_equal(
+            np.dtype(sputils.get_index_dtype((a1, a2), maxval=too_big)),
+            np.dtype('int64')
+        )
+
+    def test_check_shape_overflow(self):
+        new_shape = sputils.check_shape([(10, -1)], (65535, 131070))
+        assert_equal(new_shape, (10, 858967245))
+
+    def test_matrix(self):
+        a = [[1, 2, 3]]
+        b = np.array(a)
+
+        assert isinstance(sputils.matrix(a), np.matrix)
+        assert isinstance(sputils.matrix(b), np.matrix)
+
+        c = sputils.matrix(b)
+        c[:, :] = 123
+        assert_equal(b, a)
+
+        c = sputils.matrix(b, copy=False)
+        c[:, :] = 123
+        assert_equal(b, [[123, 123, 123]])
+
+    def test_asmatrix(self):
+        a = [[1, 2, 3]]
+        b = np.array(a)
+
+        assert isinstance(sputils.asmatrix(a), np.matrix)
+        assert isinstance(sputils.asmatrix(b), np.matrix)
+
+        c = sputils.asmatrix(b)
+        c[:, :] = 123
+        assert_equal(b, [[123, 123, 123]])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/__init__.py
new file mode 100644
index 00000000..abba3593
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/__init__.py
@@ -0,0 +1,124 @@
+"""
+=============================================================
+Spatial algorithms and data structures (:mod:`scipy.spatial`)
+=============================================================
+
+.. currentmodule:: scipy.spatial
+
+Spatial transformations
+=======================
+
+These are contained in the `scipy.spatial.transform` submodule.
+
+Nearest-neighbor queries
+========================
+.. autosummary::
+   :toctree: generated/
+
+   KDTree      -- class for efficient nearest-neighbor queries
+   cKDTree     -- class for efficient nearest-neighbor queries (faster implementation)
+   Rectangle
+
+Distance metrics
+================
+
+Distance metrics are contained in the :mod:`scipy.spatial.distance` submodule.
+
+Delaunay triangulation, convex hulls, and Voronoi diagrams
+==========================================================
+
+.. autosummary::
+   :toctree: generated/
+
+   Delaunay    -- compute Delaunay triangulation of input points
+   ConvexHull  -- compute a convex hull for input points
+   Voronoi     -- compute a Voronoi diagram hull from input points
+   SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere
+   HalfspaceIntersection -- compute the intersection points of input halfspaces
+
+Plotting helpers
+================
+
+.. autosummary::
+   :toctree: generated/
+
+   delaunay_plot_2d     -- plot 2-D triangulation
+   convex_hull_plot_2d  -- plot 2-D convex hull
+   voronoi_plot_2d      -- plot 2-D Voronoi diagram
+
+.. seealso:: :ref:`Tutorial `
+
+
+Simplex representation
+======================
+The simplices (triangles, tetrahedra, etc.) appearing in the Delaunay
+tessellation (N-D simplices), convex hull facets, and Voronoi ridges
+(N-1-D simplices) are represented in the following scheme::
+
+    tess = Delaunay(points)
+    hull = ConvexHull(points)
+    voro = Voronoi(points)
+
+    # coordinates of the jth vertex of the ith simplex
+    tess.points[tess.simplices[i, j], :]        # tessellation element
+    hull.points[hull.simplices[i, j], :]        # convex hull facet
+    voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells
+
+For Delaunay triangulations and convex hulls, the neighborhood
+structure of the simplices satisfies the condition:
+``tess.neighbors[i,j]`` is the neighboring simplex of the ith
+simplex, opposite to the ``j``-vertex. It is -1 in case of no neighbor.
+
+Convex hull facets also define a hyperplane equation::
+
+    (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0
+
+Similar hyperplane equations for the Delaunay triangulation correspond
+to the convex hull facets on the corresponding N+1-D
+paraboloid.
+
+The Delaunay triangulation objects offer a method for locating the
+simplex containing a given point, and barycentric coordinate
+computations.
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   tsearch
+   distance_matrix
+   minkowski_distance
+   minkowski_distance_p
+   procrustes
+   geometric_slerp
+
+Warnings / Errors used in :mod:`scipy.spatial`
+----------------------------------------------
+.. autosummary::
+   :toctree: generated/
+
+   QhullError
+"""
+
+from ._kdtree import *
+from ._ckdtree import *
+from ._qhull import *
+from ._spherical_voronoi import SphericalVoronoi
+from ._plotutils import *
+from ._procrustes import procrustes
+from ._geometric_slerp import geometric_slerp
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import ckdtree, kdtree, qhull
+
+__all__ = [s for s in dir() if not s.startswith('_')]
+
+from . import distance, transform
+
+__all__ += ['distance', 'transform']
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_ckdtree.pyi b/__packaged__/coreml/.python_dependencies/scipy/spatial/_ckdtree.pyi
new file mode 100644
index 00000000..dcb25ac6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_ckdtree.pyi
@@ -0,0 +1,230 @@
+import sys
+from typing import (
+    Any,
+    Dict,
+    Generic,
+    List,
+    Optional,
+    overload,
+    Set,
+    Tuple,
+    TypeVar,
+    Union,
+)
+
+import numpy as np
+import numpy.typing as npt
+from scipy.sparse import coo_matrix, dok_matrix
+
+from typing import Literal
+
+# TODO: Replace `ndarray` with a 1D float64 array when possible
+_BoxType = TypeVar("_BoxType", None, npt.NDArray[np.float64])
+
+# Copied from `numpy.typing._scalar_like._ScalarLike`
+# TODO: Expand with 0D arrays once we have shape support
+_ArrayLike0D = Union[
+    bool,
+    int,
+    float,
+    complex,
+    str,
+    bytes,
+    np.generic,
+]
+_WeightType = Union[
+    npt.ArrayLike,
+    Tuple[Optional[npt.ArrayLike], Optional[npt.ArrayLike]],
+]
+
+class cKDTreeNode:
+    @property
+    def data_points(self) -> npt.NDArray[np.float64]: ...
+    @property
+    def indices(self) -> npt.NDArray[np.intp]: ...
+
+    # These are read-only attributes in cython, which behave like properties
+    @property
+    def level(self) -> int: ...
+    @property
+    def split_dim(self) -> int: ...
+    @property
+    def children(self) -> int: ...
+    @property
+    def start_idx(self) -> int: ...
+    @property
+    def end_idx(self) -> int: ...
+    @property
+    def split(self) -> float: ...
+    @property
+    def lesser(self) -> Optional[cKDTreeNode]: ...
+    @property
+    def greater(self) -> Optional[cKDTreeNode]: ...
+
+class cKDTree(Generic[_BoxType]):
+    @property
+    def n(self) -> int: ...
+    @property
+    def m(self) -> int: ...
+    @property
+    def leafsize(self) -> int: ...
+    @property
+    def size(self) -> int: ...
+    @property
+    def tree(self) -> cKDTreeNode: ...
+
+    # These are read-only attributes in cython, which behave like properties
+    @property
+    def data(self) -> npt.NDArray[np.float64]: ...
+    @property
+    def maxes(self) -> npt.NDArray[np.float64]: ...
+    @property
+    def mins(self) -> npt.NDArray[np.float64]: ...
+    @property
+    def indices(self) -> npt.NDArray[np.float64]: ...
+    @property
+    def boxsize(self) -> _BoxType: ...
+
+    # NOTE: In practice `__init__` is used as constructor, not `__new__`.
+    # The latter gives us more flexibility in setting the generic parameter
+    # though.
+    @overload
+    def __new__(  # type: ignore[misc]
+        cls,
+        data: npt.ArrayLike,
+        leafsize: int = ...,
+        compact_nodes: bool = ...,
+        copy_data: bool = ...,
+        balanced_tree: bool = ...,
+        boxsize: None = ...,
+    ) -> cKDTree[None]: ...
+    @overload
+    def __new__(
+        cls,
+        data: npt.ArrayLike,
+        leafsize: int = ...,
+        compact_nodes: bool = ...,
+        copy_data: bool = ...,
+        balanced_tree: bool = ...,
+        boxsize: npt.ArrayLike = ...,
+    ) -> cKDTree[npt.NDArray[np.float64]]: ...
+
+    # TODO: returns a 2-tuple of scalars if `x.ndim == 1` and `k == 1`,
+    # returns a 2-tuple of arrays otherwise
+    def query(
+        self,
+        x: npt.ArrayLike,
+        k: npt.ArrayLike = ...,
+        eps: float = ...,
+        p: float = ...,
+        distance_upper_bound: float = ...,
+        workers: Optional[int] = ...,
+    ) -> Tuple[Any, Any]: ...
+
+    # TODO: returns a list scalars if `x.ndim <= 1`,
+    # returns an object array of lists otherwise
+    def query_ball_point(
+        self,
+        x: npt.ArrayLike,
+        r: npt.ArrayLike,
+        p: float,
+        eps: float = ...,
+        workers: Optional[int] = ...,
+        return_sorted: Optional[bool] = ...,
+        return_length: bool = ...
+    ) -> Any: ...
+
+    def query_ball_tree(
+        self,
+        other: cKDTree,
+        r: float,
+        p: float,
+        eps: float = ...,
+    ) -> List[List[int]]: ...
+
+    @overload
+    def query_pairs(  # type: ignore[misc]
+        self,
+        r: float,
+        p: float = ...,
+        eps: float = ...,
+        output_type: Literal["set"] = ...,
+    ) -> Set[Tuple[int, int]]: ...
+    @overload
+    def query_pairs(
+        self,
+        r: float,
+        p: float = ...,
+        eps: float = ...,
+        output_type: Literal["ndarray"] = ...,
+    ) -> npt.NDArray[np.intp]: ...
+
+    @overload
+    def count_neighbors(  # type: ignore[misc]
+        self,
+        other: cKDTree,
+        r: _ArrayLike0D,
+        p: float = ...,
+        weights: None | Tuple[None, None] = ...,
+        cumulative: bool = ...,
+    ) -> int: ...
+    @overload
+    def count_neighbors(  # type: ignore[misc]
+        self,
+        other: cKDTree,
+        r: _ArrayLike0D,
+        p: float = ...,
+        weights: _WeightType = ...,
+        cumulative: bool = ...,
+    ) -> np.float64: ...
+    @overload
+    def count_neighbors(  # type: ignore[misc]
+        self,
+        other: cKDTree,
+        r: npt.ArrayLike,
+        p: float = ...,
+        weights: None | Tuple[None, None] = ...,
+        cumulative: bool = ...,
+    ) -> npt.NDArray[np.intp]: ...
+    @overload
+    def count_neighbors(
+        self,
+        other: cKDTree,
+        r: npt.ArrayLike,
+        p: float = ...,
+        weights: _WeightType = ...,
+        cumulative: bool = ...,
+    ) -> npt.NDArray[np.float64]: ...
+
+    @overload
+    def sparse_distance_matrix(  # type: ignore[misc]
+        self,
+        other: cKDTree,
+        max_distance: float,
+        p: float = ...,
+        output_type: Literal["dok_matrix"] = ...,
+    ) -> dok_matrix: ...
+    @overload
+    def sparse_distance_matrix(  # type: ignore[misc]
+        self,
+        other: cKDTree,
+        max_distance: float,
+        p: float = ...,
+        output_type: Literal["coo_matrix"] = ...,
+    ) -> coo_matrix: ...
+    @overload
+    def sparse_distance_matrix(  # type: ignore[misc]
+        self,
+        other: cKDTree,
+        max_distance: float,
+        p: float = ...,
+        output_type: Literal["dict"] = ...,
+    ) -> Dict[Tuple[int, int], float]: ...
+    @overload
+    def sparse_distance_matrix(
+        self,
+        other: cKDTree,
+        max_distance: float,
+        p: float = ...,
+        output_type: Literal["ndarray"] = ...,
+    ) -> npt.NDArray[np.void]: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_geometric_slerp.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/_geometric_slerp.py
new file mode 100644
index 00000000..21663a07
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_geometric_slerp.py
@@ -0,0 +1,239 @@
+from __future__ import annotations
+
+__all__ = ['geometric_slerp']
+
+import warnings
+from typing import TYPE_CHECKING
+
+import numpy as np
+from scipy.spatial.distance import euclidean
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+
+
+def _geometric_slerp(start, end, t):
+    # create an orthogonal basis using QR decomposition
+    basis = np.vstack([start, end])
+    Q, R = np.linalg.qr(basis.T)
+    signs = 2 * (np.diag(R) >= 0) - 1
+    Q = Q.T * signs.T[:, np.newaxis]
+    R = R.T * signs.T[:, np.newaxis]
+
+    # calculate the angle between `start` and `end`
+    c = np.dot(start, end)
+    s = np.linalg.det(R)
+    omega = np.arctan2(s, c)
+
+    # interpolate
+    start, end = Q
+    s = np.sin(t * omega)
+    c = np.cos(t * omega)
+    return start * c[:, np.newaxis] + end * s[:, np.newaxis]
+
+
+def geometric_slerp(
+    start: npt.ArrayLike,
+    end: npt.ArrayLike,
+    t: npt.ArrayLike,
+    tol: float = 1e-7,
+) -> np.ndarray:
+    """
+    Geometric spherical linear interpolation.
+
+    The interpolation occurs along a unit-radius
+    great circle arc in arbitrary dimensional space.
+
+    Parameters
+    ----------
+    start : (n_dimensions, ) array-like
+        Single n-dimensional input coordinate in a 1-D array-like
+        object. `n` must be greater than 1.
+    end : (n_dimensions, ) array-like
+        Single n-dimensional input coordinate in a 1-D array-like
+        object. `n` must be greater than 1.
+    t : float or (n_points,) 1D array-like
+        A float or 1D array-like of doubles representing interpolation
+        parameters, with values required in the inclusive interval
+        between 0 and 1. A common approach is to generate the array
+        with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
+        Ascending, descending, and scrambled orders are permitted.
+    tol : float
+        The absolute tolerance for determining if the start and end
+        coordinates are antipodes.
+
+    Returns
+    -------
+    result : (t.size, D)
+        An array of doubles containing the interpolated
+        spherical path and including start and
+        end when 0 and 1 t are used. The
+        interpolated values should correspond to the
+        same sort order provided in the t array. The result
+        may be 1-dimensional if ``t`` is a float.
+
+    Raises
+    ------
+    ValueError
+        If ``start`` and ``end`` are antipodes, not on the
+        unit n-sphere, or for a variety of degenerate conditions.
+
+    See Also
+    --------
+    scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions
+
+    Notes
+    -----
+    The implementation is based on the mathematical formula provided in [1]_,
+    and the first known presentation of this algorithm, derived from study of
+    4-D geometry, is credited to Glenn Davis in a footnote of the original
+    quaternion Slerp publication by Ken Shoemake [2]_.
+
+    .. versionadded:: 1.5.0
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp
+    .. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.
+           ACM SIGGRAPH Computer Graphics, 19(3): 245-254.
+
+    Examples
+    --------
+    Interpolate four linearly-spaced values on the circumference of
+    a circle spanning 90 degrees:
+
+    >>> import numpy as np
+    >>> from scipy.spatial import geometric_slerp
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> start = np.array([1, 0])
+    >>> end = np.array([0, 1])
+    >>> t_vals = np.linspace(0, 1, 4)
+    >>> result = geometric_slerp(start,
+    ...                          end,
+    ...                          t_vals)
+
+    The interpolated results should be at 30 degree intervals
+    recognizable on the unit circle:
+
+    >>> ax.scatter(result[...,0], result[...,1], c='k')
+    >>> circle = plt.Circle((0, 0), 1, color='grey')
+    >>> ax.add_artist(circle)
+    >>> ax.set_aspect('equal')
+    >>> plt.show()
+
+    Attempting to interpolate between antipodes on a circle is
+    ambiguous because there are two possible paths, and on a
+    sphere there are infinite possible paths on the geodesic surface.
+    Nonetheless, one of the ambiguous paths is returned along
+    with a warning:
+
+    >>> opposite_pole = np.array([-1, 0])
+    >>> with np.testing.suppress_warnings() as sup:
+    ...     sup.filter(UserWarning)
+    ...     geometric_slerp(start,
+    ...                     opposite_pole,
+    ...                     t_vals)
+    array([[ 1.00000000e+00,  0.00000000e+00],
+           [ 5.00000000e-01,  8.66025404e-01],
+           [-5.00000000e-01,  8.66025404e-01],
+           [-1.00000000e+00,  1.22464680e-16]])
+
+    Extend the original example to a sphere and plot interpolation
+    points in 3D:
+
+    >>> from mpl_toolkits.mplot3d import proj3d
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111, projection='3d')
+
+    Plot the unit sphere for reference (optional):
+
+    >>> u = np.linspace(0, 2 * np.pi, 100)
+    >>> v = np.linspace(0, np.pi, 100)
+    >>> x = np.outer(np.cos(u), np.sin(v))
+    >>> y = np.outer(np.sin(u), np.sin(v))
+    >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
+    >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
+
+    Interpolating over a larger number of points
+    may provide the appearance of a smooth curve on
+    the surface of the sphere, which is also useful
+    for discretized integration calculations on a
+    sphere surface:
+
+    >>> start = np.array([1, 0, 0])
+    >>> end = np.array([0, 0, 1])
+    >>> t_vals = np.linspace(0, 1, 200)
+    >>> result = geometric_slerp(start,
+    ...                          end,
+    ...                          t_vals)
+    >>> ax.plot(result[...,0],
+    ...         result[...,1],
+    ...         result[...,2],
+    ...         c='k')
+    >>> plt.show()
+    """
+
+    start = np.asarray(start, dtype=np.float64)
+    end = np.asarray(end, dtype=np.float64)
+    t = np.asarray(t)
+
+    if t.ndim > 1:
+        raise ValueError("The interpolation parameter "
+                         "value must be one dimensional.")
+
+    if start.ndim != 1 or end.ndim != 1:
+        raise ValueError("Start and end coordinates "
+                         "must be one-dimensional")
+
+    if start.size != end.size:
+        raise ValueError("The dimensions of start and "
+                         "end must match (have same size)")
+
+    if start.size < 2 or end.size < 2:
+        raise ValueError("The start and end coordinates must "
+                         "both be in at least two-dimensional "
+                         "space")
+
+    if np.array_equal(start, end):
+        return np.linspace(start, start, t.size)
+
+    # for points that violate equation for n-sphere
+    for coord in [start, end]:
+        if not np.allclose(np.linalg.norm(coord), 1.0,
+                           rtol=1e-9,
+                           atol=0):
+            raise ValueError("start and end are not"
+                             " on a unit n-sphere")
+
+    if not isinstance(tol, float):
+        raise ValueError("tol must be a float")
+    else:
+        tol = np.fabs(tol)
+
+    coord_dist = euclidean(start, end)
+
+    # diameter of 2 within tolerance means antipodes, which is a problem
+    # for all unit n-spheres (even the 0-sphere would have an ambiguous path)
+    if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):
+        warnings.warn("start and end are antipodes"
+                      " using the specified tolerance;"
+                      " this may cause ambiguous slerp paths")
+
+    t = np.asarray(t, dtype=np.float64)
+
+    if t.size == 0:
+        return np.empty((0, start.size))
+
+    if t.min() < 0 or t.max() > 1:
+        raise ValueError("interpolation parameter must be in [0, 1]")
+
+    if t.ndim == 0:
+        return _geometric_slerp(start,
+                                end,
+                                np.atleast_1d(t)).ravel()
+    else:
+        return _geometric_slerp(start,
+                                end,
+                                t)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_kdtree.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/_kdtree.py
new file mode 100644
index 00000000..4210f3e4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_kdtree.py
@@ -0,0 +1,920 @@
+# Copyright Anne M. Archibald 2008
+# Released under the scipy license
+import numpy as np
+from ._ckdtree import cKDTree, cKDTreeNode
+
+__all__ = ['minkowski_distance_p', 'minkowski_distance',
+           'distance_matrix',
+           'Rectangle', 'KDTree']
+
+
+def minkowski_distance_p(x, y, p=2):
+    """Compute the pth power of the L**p distance between two arrays.
+
+    For efficiency, this function computes the L**p distance but does
+    not extract the pth root. If `p` is 1 or infinity, this is equal to
+    the actual L**p distance.
+
+    The last dimensions of `x` and `y` must be the same length.  Any
+    other dimensions must be compatible for broadcasting.
+
+    Parameters
+    ----------
+    x : (..., K) array_like
+        Input array.
+    y : (..., K) array_like
+        Input array.
+    p : float, 1 <= p <= infinity
+        Which Minkowski p-norm to use.
+
+    Returns
+    -------
+    dist : ndarray
+        pth power of the distance between the input arrays.
+
+    Examples
+    --------
+    >>> from scipy.spatial import minkowski_distance_p
+    >>> minkowski_distance_p([[0, 0], [0, 0]], [[1, 1], [0, 1]])
+    array([2, 1])
+
+    """
+    x = np.asarray(x)
+    y = np.asarray(y)
+
+    # Find smallest common datatype with float64 (return type of this
+    # function) - addresses #10262.
+    # Don't just cast to float64 for complex input case.
+    common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype),
+                                       'float64')
+
+    # Make sure x and y are NumPy arrays of correct datatype.
+    x = x.astype(common_datatype)
+    y = y.astype(common_datatype)
+
+    if p == np.inf:
+        return np.amax(np.abs(y-x), axis=-1)
+    elif p == 1:
+        return np.sum(np.abs(y-x), axis=-1)
+    else:
+        return np.sum(np.abs(y-x)**p, axis=-1)
+
+
+def minkowski_distance(x, y, p=2):
+    """Compute the L**p distance between two arrays.
+
+    The last dimensions of `x` and `y` must be the same length.  Any
+    other dimensions must be compatible for broadcasting.
+
+    Parameters
+    ----------
+    x : (..., K) array_like
+        Input array.
+    y : (..., K) array_like
+        Input array.
+    p : float, 1 <= p <= infinity
+        Which Minkowski p-norm to use.
+
+    Returns
+    -------
+    dist : ndarray
+        Distance between the input arrays.
+
+    Examples
+    --------
+    >>> from scipy.spatial import minkowski_distance
+    >>> minkowski_distance([[0, 0], [0, 0]], [[1, 1], [0, 1]])
+    array([ 1.41421356,  1.        ])
+
+    """
+    x = np.asarray(x)
+    y = np.asarray(y)
+    if p == np.inf or p == 1:
+        return minkowski_distance_p(x, y, p)
+    else:
+        return minkowski_distance_p(x, y, p)**(1./p)
+
+
+class Rectangle:
+    """Hyperrectangle class.
+
+    Represents a Cartesian product of intervals.
+    """
+    def __init__(self, maxes, mins):
+        """Construct a hyperrectangle."""
+        self.maxes = np.maximum(maxes,mins).astype(float)
+        self.mins = np.minimum(maxes,mins).astype(float)
+        self.m, = self.maxes.shape
+
+    def __repr__(self):
+        return "" % list(zip(self.mins, self.maxes))
+
+    def volume(self):
+        """Total volume."""
+        return np.prod(self.maxes-self.mins)
+
+    def split(self, d, split):
+        """Produce two hyperrectangles by splitting.
+
+        In general, if you need to compute maximum and minimum
+        distances to the children, it can be done more efficiently
+        by updating the maximum and minimum distances to the parent.
+
+        Parameters
+        ----------
+        d : int
+            Axis to split hyperrectangle along.
+        split : float
+            Position along axis `d` to split at.
+
+        """
+        mid = np.copy(self.maxes)
+        mid[d] = split
+        less = Rectangle(self.mins, mid)
+        mid = np.copy(self.mins)
+        mid[d] = split
+        greater = Rectangle(mid, self.maxes)
+        return less, greater
+
+    def min_distance_point(self, x, p=2.):
+        """
+        Return the minimum distance between input and points in the
+        hyperrectangle.
+
+        Parameters
+        ----------
+        x : array_like
+            Input.
+        p : float, optional
+            Input.
+
+        """
+        return minkowski_distance(
+            0, np.maximum(0, np.maximum(self.mins-x, x-self.maxes)),
+            p
+        )
+
+    def max_distance_point(self, x, p=2.):
+        """
+        Return the maximum distance between input and points in the hyperrectangle.
+
+        Parameters
+        ----------
+        x : array_like
+            Input array.
+        p : float, optional
+            Input.
+
+        """
+        return minkowski_distance(0, np.maximum(self.maxes-x, x-self.mins), p)
+
+    def min_distance_rectangle(self, other, p=2.):
+        """
+        Compute the minimum distance between points in the two hyperrectangles.
+
+        Parameters
+        ----------
+        other : hyperrectangle
+            Input.
+        p : float
+            Input.
+
+        """
+        return minkowski_distance(
+            0,
+            np.maximum(0, np.maximum(self.mins-other.maxes,
+                                     other.mins-self.maxes)),
+            p
+        )
+
+    def max_distance_rectangle(self, other, p=2.):
+        """
+        Compute the maximum distance between points in the two hyperrectangles.
+
+        Parameters
+        ----------
+        other : hyperrectangle
+            Input.
+        p : float, optional
+            Input.
+
+        """
+        return minkowski_distance(
+            0, np.maximum(self.maxes-other.mins, other.maxes-self.mins), p)
+
+
+class KDTree(cKDTree):
+    """kd-tree for quick nearest-neighbor lookup.
+
+    This class provides an index into a set of k-dimensional points
+    which can be used to rapidly look up the nearest neighbors of any
+    point.
+
+    Parameters
+    ----------
+    data : array_like, shape (n,m)
+        The n data points of dimension m to be indexed. This array is
+        not copied unless this is necessary to produce a contiguous
+        array of doubles, and so modifying this data will result in
+        bogus results. The data are also copied if the kd-tree is built
+        with copy_data=True.
+    leafsize : positive int, optional
+        The number of points at which the algorithm switches over to
+        brute-force.  Default: 10.
+    compact_nodes : bool, optional
+        If True, the kd-tree is built to shrink the hyperrectangles to
+        the actual data range. This usually gives a more compact tree that
+        is robust against degenerated input data and gives faster queries
+        at the expense of longer build time. Default: True.
+    copy_data : bool, optional
+        If True the data is always copied to protect the kd-tree against
+        data corruption. Default: False.
+    balanced_tree : bool, optional
+        If True, the median is used to split the hyperrectangles instead of
+        the midpoint. This usually gives a more compact tree and
+        faster queries at the expense of longer build time. Default: True.
+    boxsize : array_like or scalar, optional
+        Apply a m-d toroidal topology to the KDTree.. The topology is generated
+        by :math:`x_i + n_i L_i` where :math:`n_i` are integers and :math:`L_i`
+        is the boxsize along i-th dimension. The input data shall be wrapped
+        into :math:`[0, L_i)`. A ValueError is raised if any of the data is
+        outside of this bound.
+
+    Notes
+    -----
+    The algorithm used is described in Maneewongvatana and Mount 1999.
+    The general idea is that the kd-tree is a binary tree, each of whose
+    nodes represents an axis-aligned hyperrectangle. Each node specifies
+    an axis and splits the set of points based on whether their coordinate
+    along that axis is greater than or less than a particular value.
+
+    During construction, the axis and splitting point are chosen by the
+    "sliding midpoint" rule, which ensures that the cells do not all
+    become long and thin.
+
+    The tree can be queried for the r closest neighbors of any given point
+    (optionally returning only those within some maximum distance of the
+    point). It can also be queried, with a substantial gain in efficiency,
+    for the r approximate closest neighbors.
+
+    For large dimensions (20 is already large) do not expect this to run
+    significantly faster than brute force. High-dimensional nearest-neighbor
+    queries are a substantial open problem in computer science.
+
+    Attributes
+    ----------
+    data : ndarray, shape (n,m)
+        The n data points of dimension m to be indexed. This array is
+        not copied unless this is necessary to produce a contiguous
+        array of doubles. The data are also copied if the kd-tree is built
+        with `copy_data=True`.
+    leafsize : positive int
+        The number of points at which the algorithm switches over to
+        brute-force.
+    m : int
+        The dimension of a single data-point.
+    n : int
+        The number of data points.
+    maxes : ndarray, shape (m,)
+        The maximum value in each dimension of the n data points.
+    mins : ndarray, shape (m,)
+        The minimum value in each dimension of the n data points.
+    size : int
+        The number of nodes in the tree.
+
+    """
+
+    class node:
+        @staticmethod
+        def _create(ckdtree_node=None):
+            """Create either an inner or leaf node, wrapping a cKDTreeNode instance"""
+            if ckdtree_node is None:
+                return KDTree.node(ckdtree_node)
+            elif ckdtree_node.split_dim == -1:
+                return KDTree.leafnode(ckdtree_node)
+            else:
+                return KDTree.innernode(ckdtree_node)
+
+        def __init__(self, ckdtree_node=None):
+            if ckdtree_node is None:
+                ckdtree_node = cKDTreeNode()
+            self._node = ckdtree_node
+
+        def __lt__(self, other):
+            return id(self) < id(other)
+
+        def __gt__(self, other):
+            return id(self) > id(other)
+
+        def __le__(self, other):
+            return id(self) <= id(other)
+
+        def __ge__(self, other):
+            return id(self) >= id(other)
+
+        def __eq__(self, other):
+            return id(self) == id(other)
+
+    class leafnode(node):
+        @property
+        def idx(self):
+            return self._node.indices
+
+        @property
+        def children(self):
+            return self._node.children
+
+    class innernode(node):
+        def __init__(self, ckdtreenode):
+            assert isinstance(ckdtreenode, cKDTreeNode)
+            super().__init__(ckdtreenode)
+            self.less = KDTree.node._create(ckdtreenode.lesser)
+            self.greater = KDTree.node._create(ckdtreenode.greater)
+
+        @property
+        def split_dim(self):
+            return self._node.split_dim
+
+        @property
+        def split(self):
+            return self._node.split
+
+        @property
+        def children(self):
+            return self._node.children
+
+    @property
+    def tree(self):
+        if not hasattr(self, "_tree"):
+            self._tree = KDTree.node._create(super().tree)
+
+        return self._tree
+
+    def __init__(self, data, leafsize=10, compact_nodes=True, copy_data=False,
+                 balanced_tree=True, boxsize=None):
+        data = np.asarray(data)
+        if data.dtype.kind == 'c':
+            raise TypeError("KDTree does not work with complex data")
+
+        # Note KDTree has different default leafsize from cKDTree
+        super().__init__(data, leafsize, compact_nodes, copy_data,
+                         balanced_tree, boxsize)
+
+    def query(
+            self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf, workers=1):
+        r"""Query the kd-tree for nearest neighbors.
+
+        Parameters
+        ----------
+        x : array_like, last dimension self.m
+            An array of points to query.
+        k : int or Sequence[int], optional
+            Either the number of nearest neighbors to return, or a list of the
+            k-th nearest neighbors to return, starting from 1.
+        eps : nonnegative float, optional
+            Return approximate nearest neighbors; the kth returned value
+            is guaranteed to be no further than (1+eps) times the
+            distance to the real kth nearest neighbor.
+        p : float, 1<=p<=infinity, optional
+            Which Minkowski p-norm to use.
+            1 is the sum-of-absolute-values distance ("Manhattan" distance).
+            2 is the usual Euclidean distance.
+            infinity is the maximum-coordinate-difference distance.
+            A large, finite p may cause a ValueError if overflow can occur.
+        distance_upper_bound : nonnegative float, optional
+            Return only neighbors within this distance. This is used to prune
+            tree searches, so if you are doing a series of nearest-neighbor
+            queries, it may help to supply the distance to the nearest neighbor
+            of the most recent point.
+        workers : int, optional
+            Number of workers to use for parallel processing. If -1 is given
+            all CPU threads are used. Default: 1.
+
+            .. versionadded:: 1.6.0
+
+        Returns
+        -------
+        d : float or array of floats
+            The distances to the nearest neighbors.
+            If ``x`` has shape ``tuple+(self.m,)``, then ``d`` has shape
+            ``tuple+(k,)``.
+            When k == 1, the last dimension of the output is squeezed.
+            Missing neighbors are indicated with infinite distances.
+            Hits are sorted by distance (nearest first).
+
+            .. versionchanged:: 1.9.0
+               Previously if ``k=None``, then `d` was an object array of
+               shape ``tuple``, containing lists of distances. This behavior
+               has been removed, use `query_ball_point` instead.
+
+        i : integer or array of integers
+            The index of each neighbor in ``self.data``.
+            ``i`` is the same shape as d.
+            Missing neighbors are indicated with ``self.n``.
+
+        Examples
+        --------
+
+        >>> import numpy as np
+        >>> from scipy.spatial import KDTree
+        >>> x, y = np.mgrid[0:5, 2:8]
+        >>> tree = KDTree(np.c_[x.ravel(), y.ravel()])
+
+        To query the nearest neighbours and return squeezed result, use
+
+        >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=1)
+        >>> print(dd, ii, sep='\n')
+        [2.         0.2236068]
+        [ 0 13]
+
+        To query the nearest neighbours and return unsqueezed result, use
+
+        >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1])
+        >>> print(dd, ii, sep='\n')
+        [[2.        ]
+         [0.2236068]]
+        [[ 0]
+         [13]]
+
+        To query the second nearest neighbours and return unsqueezed result,
+        use
+
+        >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[2])
+        >>> print(dd, ii, sep='\n')
+        [[2.23606798]
+         [0.80622577]]
+        [[ 6]
+         [19]]
+
+        To query the first and second nearest neighbours, use
+
+        >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=2)
+        >>> print(dd, ii, sep='\n')
+        [[2.         2.23606798]
+         [0.2236068  0.80622577]]
+        [[ 0  6]
+         [13 19]]
+
+        or, be more specific
+
+        >>> dd, ii = tree.query([[0, 0], [2.2, 2.9]], k=[1, 2])
+        >>> print(dd, ii, sep='\n')
+        [[2.         2.23606798]
+         [0.2236068  0.80622577]]
+        [[ 0  6]
+         [13 19]]
+
+        """
+        x = np.asarray(x)
+        if x.dtype.kind == 'c':
+            raise TypeError("KDTree does not work with complex data")
+
+        if k is None:
+            raise ValueError("k must be an integer or a sequence of integers")
+
+        d, i = super().query(x, k, eps, p, distance_upper_bound, workers)
+        if isinstance(i, int):
+            i = np.intp(i)
+        return d, i
+
+    def query_ball_point(self, x, r, p=2., eps=0, workers=1,
+                         return_sorted=None, return_length=False):
+        """Find all points within distance r of point(s) x.
+
+        Parameters
+        ----------
+        x : array_like, shape tuple + (self.m,)
+            The point or points to search for neighbors of.
+        r : array_like, float
+            The radius of points to return, must broadcast to the length of x.
+        p : float, optional
+            Which Minkowski p-norm to use.  Should be in the range [1, inf].
+            A finite large p may cause a ValueError if overflow can occur.
+        eps : nonnegative float, optional
+            Approximate search. Branches of the tree are not explored if their
+            nearest points are further than ``r / (1 + eps)``, and branches are
+            added in bulk if their furthest points are nearer than
+            ``r * (1 + eps)``.
+        workers : int, optional
+            Number of jobs to schedule for parallel processing. If -1 is given
+            all processors are used. Default: 1.
+
+            .. versionadded:: 1.6.0
+        return_sorted : bool, optional
+            Sorts returned indicies if True and does not sort them if False. If
+            None, does not sort single point queries, but does sort
+            multi-point queries which was the behavior before this option
+            was added.
+
+            .. versionadded:: 1.6.0
+        return_length : bool, optional
+            Return the number of points inside the radius instead of a list
+            of the indices.
+
+            .. versionadded:: 1.6.0
+
+        Returns
+        -------
+        results : list or array of lists
+            If `x` is a single point, returns a list of the indices of the
+            neighbors of `x`. If `x` is an array of points, returns an object
+            array of shape tuple containing lists of neighbors.
+
+        Notes
+        -----
+        If you have many points whose neighbors you want to find, you may save
+        substantial amounts of time by putting them in a KDTree and using
+        query_ball_tree.
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy import spatial
+        >>> x, y = np.mgrid[0:5, 0:5]
+        >>> points = np.c_[x.ravel(), y.ravel()]
+        >>> tree = spatial.KDTree(points)
+        >>> sorted(tree.query_ball_point([2, 0], 1))
+        [5, 10, 11, 15]
+
+        Query multiple points and plot the results:
+
+        >>> import matplotlib.pyplot as plt
+        >>> points = np.asarray(points)
+        >>> plt.plot(points[:,0], points[:,1], '.')
+        >>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
+        ...     nearby_points = points[results]
+        ...     plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
+        >>> plt.margins(0.1, 0.1)
+        >>> plt.show()
+
+        """
+        x = np.asarray(x)
+        if x.dtype.kind == 'c':
+            raise TypeError("KDTree does not work with complex data")
+        return super().query_ball_point(
+            x, r, p, eps, workers, return_sorted, return_length)
+
+    def query_ball_tree(self, other, r, p=2., eps=0):
+        """
+        Find all pairs of points between `self` and `other` whose distance is
+        at most r.
+
+        Parameters
+        ----------
+        other : KDTree instance
+            The tree containing points to search against.
+        r : float
+            The maximum distance, has to be positive.
+        p : float, optional
+            Which Minkowski norm to use.  `p` has to meet the condition
+            ``1 <= p <= infinity``.
+        eps : float, optional
+            Approximate search.  Branches of the tree are not explored
+            if their nearest points are further than ``r/(1+eps)``, and
+            branches are added in bulk if their furthest points are nearer
+            than ``r * (1+eps)``.  `eps` has to be non-negative.
+
+        Returns
+        -------
+        results : list of lists
+            For each element ``self.data[i]`` of this tree, ``results[i]`` is a
+            list of the indices of its neighbors in ``other.data``.
+
+        Examples
+        --------
+        You can search all pairs of points between two kd-trees within a distance:
+
+        >>> import matplotlib.pyplot as plt
+        >>> import numpy as np
+        >>> from scipy.spatial import KDTree
+        >>> rng = np.random.default_rng()
+        >>> points1 = rng.random((15, 2))
+        >>> points2 = rng.random((15, 2))
+        >>> plt.figure(figsize=(6, 6))
+        >>> plt.plot(points1[:, 0], points1[:, 1], "xk", markersize=14)
+        >>> plt.plot(points2[:, 0], points2[:, 1], "og", markersize=14)
+        >>> kd_tree1 = KDTree(points1)
+        >>> kd_tree2 = KDTree(points2)
+        >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
+        >>> for i in range(len(indexes)):
+        ...     for j in indexes[i]:
+        ...         plt.plot([points1[i, 0], points2[j, 0]],
+        ...             [points1[i, 1], points2[j, 1]], "-r")
+        >>> plt.show()
+
+        """
+        return super().query_ball_tree(other, r, p, eps)
+
+    def query_pairs(self, r, p=2., eps=0, output_type='set'):
+        """Find all pairs of points in `self` whose distance is at most r.
+
+        Parameters
+        ----------
+        r : positive float
+            The maximum distance.
+        p : float, optional
+            Which Minkowski norm to use.  `p` has to meet the condition
+            ``1 <= p <= infinity``.
+        eps : float, optional
+            Approximate search.  Branches of the tree are not explored
+            if their nearest points are further than ``r/(1+eps)``, and
+            branches are added in bulk if their furthest points are nearer
+            than ``r * (1+eps)``.  `eps` has to be non-negative.
+        output_type : string, optional
+            Choose the output container, 'set' or 'ndarray'. Default: 'set'
+
+            .. versionadded:: 1.6.0
+
+        Returns
+        -------
+        results : set or ndarray
+            Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
+            positions are close. If output_type is 'ndarray', an ndarry is
+            returned instead of a set.
+
+        Examples
+        --------
+        You can search all pairs of points in a kd-tree within a distance:
+
+        >>> import matplotlib.pyplot as plt
+        >>> import numpy as np
+        >>> from scipy.spatial import KDTree
+        >>> rng = np.random.default_rng()
+        >>> points = rng.random((20, 2))
+        >>> plt.figure(figsize=(6, 6))
+        >>> plt.plot(points[:, 0], points[:, 1], "xk", markersize=14)
+        >>> kd_tree = KDTree(points)
+        >>> pairs = kd_tree.query_pairs(r=0.2)
+        >>> for (i, j) in pairs:
+        ...     plt.plot([points[i, 0], points[j, 0]],
+        ...             [points[i, 1], points[j, 1]], "-r")
+        >>> plt.show()
+
+        """
+        return super().query_pairs(r, p, eps, output_type)
+
+    def count_neighbors(self, other, r, p=2., weights=None, cumulative=True):
+        """Count how many nearby pairs can be formed.
+
+        Count the number of pairs ``(x1,x2)`` can be formed, with ``x1`` drawn
+        from ``self`` and ``x2`` drawn from ``other``, and where
+        ``distance(x1, x2, p) <= r``.
+
+        Data points on ``self`` and ``other`` are optionally weighted by the
+        ``weights`` argument. (See below)
+
+        This is adapted from the "two-point correlation" algorithm described by
+        Gray and Moore [1]_.  See notes for further discussion.
+
+        Parameters
+        ----------
+        other : KDTree
+            The other tree to draw points from, can be the same tree as self.
+        r : float or one-dimensional array of floats
+            The radius to produce a count for. Multiple radii are searched with
+            a single tree traversal.
+            If the count is non-cumulative(``cumulative=False``), ``r`` defines
+            the edges of the bins, and must be non-decreasing.
+        p : float, optional
+            1<=p<=infinity.
+            Which Minkowski p-norm to use.
+            Default 2.0.
+            A finite large p may cause a ValueError if overflow can occur.
+        weights : tuple, array_like, or None, optional
+            If None, the pair-counting is unweighted.
+            If given as a tuple, weights[0] is the weights of points in
+            ``self``, and weights[1] is the weights of points in ``other``;
+            either can be None to indicate the points are unweighted.
+            If given as an array_like, weights is the weights of points in
+            ``self`` and ``other``. For this to make sense, ``self`` and
+            ``other`` must be the same tree. If ``self`` and ``other`` are two
+            different trees, a ``ValueError`` is raised.
+            Default: None
+
+            .. versionadded:: 1.6.0
+        cumulative : bool, optional
+            Whether the returned counts are cumulative. When cumulative is set
+            to ``False`` the algorithm is optimized to work with a large number
+            of bins (>10) specified by ``r``. When ``cumulative`` is set to
+            True, the algorithm is optimized to work with a small number of
+            ``r``. Default: True
+
+            .. versionadded:: 1.6.0
+
+        Returns
+        -------
+        result : scalar or 1-D array
+            The number of pairs. For unweighted counts, the result is integer.
+            For weighted counts, the result is float.
+            If cumulative is False, ``result[i]`` contains the counts with
+            ``(-inf if i == 0 else r[i-1]) < R <= r[i]``
+
+        Notes
+        -----
+        Pair-counting is the basic operation used to calculate the two point
+        correlation functions from a data set composed of position of objects.
+
+        Two point correlation function measures the clustering of objects and
+        is widely used in cosmology to quantify the large scale structure
+        in our Universe, but it may be useful for data analysis in other fields
+        where self-similar assembly of objects also occur.
+
+        The Landy-Szalay estimator for the two point correlation function of
+        ``D`` measures the clustering signal in ``D``. [2]_
+
+        For example, given the position of two sets of objects,
+
+        - objects ``D`` (data) contains the clustering signal, and
+
+        - objects ``R`` (random) that contains no signal,
+
+        .. math::
+
+             \\xi(r) = \\frac{ - 2 f  + f^2}{f^2},
+
+        where the brackets represents counting pairs between two data sets
+        in a finite bin around ``r`` (distance), corresponding to setting
+        `cumulative=False`, and ``f = float(len(D)) / float(len(R))`` is the
+        ratio between number of objects from data and random.
+
+        The algorithm implemented here is loosely based on the dual-tree
+        algorithm described in [1]_. We switch between two different
+        pair-cumulation scheme depending on the setting of ``cumulative``.
+        The computing time of the method we use when for
+        ``cumulative == False`` does not scale with the total number of bins.
+        The algorithm for ``cumulative == True`` scales linearly with the
+        number of bins, though it is slightly faster when only
+        1 or 2 bins are used. [5]_.
+
+        As an extension to the naive pair-counting,
+        weighted pair-counting counts the product of weights instead
+        of number of pairs.
+        Weighted pair-counting is used to estimate marked correlation functions
+        ([3]_, section 2.2),
+        or to properly calculate the average of data per distance bin
+        (e.g. [4]_, section 2.1 on redshift).
+
+        .. [1] Gray and Moore,
+               "N-body problems in statistical learning",
+               Mining the sky, 2000,
+               https://arxiv.org/abs/astro-ph/0012333
+
+        .. [2] Landy and Szalay,
+               "Bias and variance of angular correlation functions",
+               The Astrophysical Journal, 1993,
+               http://adsabs.harvard.edu/abs/1993ApJ...412...64L
+
+        .. [3] Sheth, Connolly and Skibba,
+               "Marked correlations in galaxy formation models",
+               Arxiv e-print, 2005,
+               https://arxiv.org/abs/astro-ph/0511773
+
+        .. [4] Hawkins, et al.,
+               "The 2dF Galaxy Redshift Survey: correlation functions,
+               peculiar velocities and the matter density of the Universe",
+               Monthly Notices of the Royal Astronomical Society, 2002,
+               http://adsabs.harvard.edu/abs/2003MNRAS.346...78H
+
+        .. [5] https://github.com/scipy/scipy/pull/5647#issuecomment-168474926
+
+        Examples
+        --------
+        You can count neighbors number between two kd-trees within a distance:
+
+        >>> import numpy as np
+        >>> from scipy.spatial import KDTree
+        >>> rng = np.random.default_rng()
+        >>> points1 = rng.random((5, 2))
+        >>> points2 = rng.random((5, 2))
+        >>> kd_tree1 = KDTree(points1)
+        >>> kd_tree2 = KDTree(points2)
+        >>> kd_tree1.count_neighbors(kd_tree2, 0.2)
+        1
+
+        This number is same as the total pair number calculated by
+        `query_ball_tree`:
+
+        >>> indexes = kd_tree1.query_ball_tree(kd_tree2, r=0.2)
+        >>> sum([len(i) for i in indexes])
+        1
+
+        """
+        return super().count_neighbors(other, r, p, weights, cumulative)
+
+    def sparse_distance_matrix(
+            self, other, max_distance, p=2., output_type='dok_matrix'):
+        """Compute a sparse distance matrix.
+
+        Computes a distance matrix between two KDTrees, leaving as zero
+        any distance greater than max_distance.
+
+        Parameters
+        ----------
+        other : KDTree
+
+        max_distance : positive float
+
+        p : float, 1<=p<=infinity
+            Which Minkowski p-norm to use.
+            A finite large p may cause a ValueError if overflow can occur.
+
+        output_type : string, optional
+            Which container to use for output data. Options: 'dok_matrix',
+            'coo_matrix', 'dict', or 'ndarray'. Default: 'dok_matrix'.
+
+            .. versionadded:: 1.6.0
+
+        Returns
+        -------
+        result : dok_matrix, coo_matrix, dict or ndarray
+            Sparse matrix representing the results in "dictionary of keys"
+            format. If a dict is returned the keys are (i,j) tuples of indices.
+            If output_type is 'ndarray' a record array with fields 'i', 'j',
+            and 'v' is returned,
+
+        Examples
+        --------
+        You can compute a sparse distance matrix between two kd-trees:
+
+        >>> import numpy as np
+        >>> from scipy.spatial import KDTree
+        >>> rng = np.random.default_rng()
+        >>> points1 = rng.random((5, 2))
+        >>> points2 = rng.random((5, 2))
+        >>> kd_tree1 = KDTree(points1)
+        >>> kd_tree2 = KDTree(points2)
+        >>> sdm = kd_tree1.sparse_distance_matrix(kd_tree2, 0.3)
+        >>> sdm.toarray()
+        array([[0.        , 0.        , 0.12295571, 0.        , 0.        ],
+           [0.        , 0.        , 0.        , 0.        , 0.        ],
+           [0.28942611, 0.        , 0.        , 0.2333084 , 0.        ],
+           [0.        , 0.        , 0.        , 0.        , 0.        ],
+           [0.24617575, 0.29571802, 0.26836782, 0.        , 0.        ]])
+
+        You can check distances above the `max_distance` are zeros:
+
+        >>> from scipy.spatial import distance_matrix
+        >>> distance_matrix(points1, points2)
+        array([[0.56906522, 0.39923701, 0.12295571, 0.8658745 , 0.79428925],
+           [0.37327919, 0.7225693 , 0.87665969, 0.32580855, 0.75679479],
+           [0.28942611, 0.30088013, 0.6395831 , 0.2333084 , 0.33630734],
+           [0.31994999, 0.72658602, 0.71124834, 0.55396483, 0.90785663],
+           [0.24617575, 0.29571802, 0.26836782, 0.57714465, 0.6473269 ]])
+
+        """
+        return super().sparse_distance_matrix(
+            other, max_distance, p, output_type)
+
+
+def distance_matrix(x, y, p=2, threshold=1000000):
+    """Compute the distance matrix.
+
+    Returns the matrix of all pair-wise distances.
+
+    Parameters
+    ----------
+    x : (M, K) array_like
+        Matrix of M vectors in K dimensions.
+    y : (N, K) array_like
+        Matrix of N vectors in K dimensions.
+    p : float, 1 <= p <= infinity
+        Which Minkowski p-norm to use.
+    threshold : positive int
+        If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
+        of large temporary arrays.
+
+    Returns
+    -------
+    result : (M, N) ndarray
+        Matrix containing the distance from every vector in `x` to every vector
+        in `y`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance_matrix
+    >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
+    array([[ 1.        ,  1.41421356],
+           [ 1.41421356,  1.        ]])
+
+    """
+
+    x = np.asarray(x)
+    m, k = x.shape
+    y = np.asarray(y)
+    n, kk = y.shape
+
+    if k != kk:
+        raise ValueError(f"x contains {k}-dimensional vectors but y contains "
+                         f"{kk}-dimensional vectors")
+
+    if m*n*k <= threshold:
+        return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
+    else:
+        result = np.empty((m,n),dtype=float)  # FIXME: figure out the best dtype
+        if m < n:
+            for i in range(m):
+                result[i,:] = minkowski_distance(x[i],y,p)
+        else:
+            for j in range(n):
+                result[:,j] = minkowski_distance(x,y[j],p)
+        return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_plotutils.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/_plotutils.py
new file mode 100644
index 00000000..f799c3f2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_plotutils.py
@@ -0,0 +1,269 @@
+import numpy as np
+from scipy._lib.decorator import decorator as _decorator
+
+__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
+
+
+@_decorator
+def _held_figure(func, obj, ax=None, **kw):
+    import matplotlib.pyplot as plt
+
+    if ax is None:
+        fig = plt.figure()
+        ax = fig.gca()
+        return func(obj, ax=ax, **kw)
+
+    # As of matplotlib 2.0, the "hold" mechanism is deprecated.
+    # When matplotlib 1.x is no longer supported, this check can be removed.
+    was_held = getattr(ax, 'ishold', lambda: True)()
+    if was_held:
+        return func(obj, ax=ax, **kw)
+    try:
+        ax.hold(True)
+        return func(obj, ax=ax, **kw)
+    finally:
+        ax.hold(was_held)
+
+
+def _adjust_bounds(ax, points):
+    margin = 0.1 * points.ptp(axis=0)
+    xy_min = points.min(axis=0) - margin
+    xy_max = points.max(axis=0) + margin
+    ax.set_xlim(xy_min[0], xy_max[0])
+    ax.set_ylim(xy_min[1], xy_max[1])
+
+
+@_held_figure
+def delaunay_plot_2d(tri, ax=None):
+    """
+    Plot the given Delaunay triangulation in 2-D
+
+    Parameters
+    ----------
+    tri : scipy.spatial.Delaunay instance
+        Triangulation to plot
+    ax : matplotlib.axes.Axes instance, optional
+        Axes to plot on
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure instance
+        Figure for the plot
+
+    See Also
+    --------
+    Delaunay
+    matplotlib.pyplot.triplot
+
+    Notes
+    -----
+    Requires Matplotlib.
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.spatial import Delaunay, delaunay_plot_2d
+
+    The Delaunay triangulation of a set of random points:
+
+    >>> rng = np.random.default_rng()
+    >>> points = rng.random((30, 2))
+    >>> tri = Delaunay(points)
+
+    Plot it:
+
+    >>> _ = delaunay_plot_2d(tri)
+    >>> plt.show()
+
+    """
+    if tri.points.shape[1] != 2:
+        raise ValueError("Delaunay triangulation is not 2-D")
+
+    x, y = tri.points.T
+    ax.plot(x, y, 'o')
+    ax.triplot(x, y, tri.simplices.copy())
+
+    _adjust_bounds(ax, tri.points)
+
+    return ax.figure
+
+
+@_held_figure
+def convex_hull_plot_2d(hull, ax=None):
+    """
+    Plot the given convex hull diagram in 2-D
+
+    Parameters
+    ----------
+    hull : scipy.spatial.ConvexHull instance
+        Convex hull to plot
+    ax : matplotlib.axes.Axes instance, optional
+        Axes to plot on
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure instance
+        Figure for the plot
+
+    See Also
+    --------
+    ConvexHull
+
+    Notes
+    -----
+    Requires Matplotlib.
+
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.spatial import ConvexHull, convex_hull_plot_2d
+
+    The convex hull of a random set of points:
+
+    >>> rng = np.random.default_rng()
+    >>> points = rng.random((30, 2))
+    >>> hull = ConvexHull(points)
+
+    Plot it:
+
+    >>> _ = convex_hull_plot_2d(hull)
+    >>> plt.show()
+
+    """
+    from matplotlib.collections import LineCollection
+
+    if hull.points.shape[1] != 2:
+        raise ValueError("Convex hull is not 2-D")
+
+    ax.plot(hull.points[:, 0], hull.points[:, 1], 'o')
+    line_segments = [hull.points[simplex] for simplex in hull.simplices]
+    ax.add_collection(LineCollection(line_segments,
+                                     colors='k',
+                                     linestyle='solid'))
+    _adjust_bounds(ax, hull.points)
+
+    return ax.figure
+
+
+@_held_figure
+def voronoi_plot_2d(vor, ax=None, **kw):
+    """
+    Plot the given Voronoi diagram in 2-D
+
+    Parameters
+    ----------
+    vor : scipy.spatial.Voronoi instance
+        Diagram to plot
+    ax : matplotlib.axes.Axes instance, optional
+        Axes to plot on
+    show_points : bool, optional
+        Add the Voronoi points to the plot.
+    show_vertices : bool, optional
+        Add the Voronoi vertices to the plot.
+    line_colors : string, optional
+        Specifies the line color for polygon boundaries
+    line_width : float, optional
+        Specifies the line width for polygon boundaries
+    line_alpha : float, optional
+        Specifies the line alpha for polygon boundaries
+    point_size : float, optional
+        Specifies the size of points
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure instance
+        Figure for the plot
+
+    See Also
+    --------
+    Voronoi
+
+    Notes
+    -----
+    Requires Matplotlib.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.spatial import Voronoi, voronoi_plot_2d
+
+    Create a set of points for the example:
+
+    >>> rng = np.random.default_rng()
+    >>> points = rng.random((10,2))
+
+    Generate the Voronoi diagram for the points:
+
+    >>> vor = Voronoi(points)
+
+    Use `voronoi_plot_2d` to plot the diagram:
+
+    >>> fig = voronoi_plot_2d(vor)
+
+    Use `voronoi_plot_2d` to plot the diagram again, with some settings
+    customized:
+
+    >>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
+    ...                       line_width=2, line_alpha=0.6, point_size=2)
+    >>> plt.show()
+
+    """
+    from matplotlib.collections import LineCollection
+
+    if vor.points.shape[1] != 2:
+        raise ValueError("Voronoi diagram is not 2-D")
+
+    if kw.get('show_points', True):
+        point_size = kw.get('point_size', None)
+        ax.plot(vor.points[:, 0], vor.points[:, 1], '.', markersize=point_size)
+    if kw.get('show_vertices', True):
+        ax.plot(vor.vertices[:, 0], vor.vertices[:, 1], 'o')
+
+    line_colors = kw.get('line_colors', 'k')
+    line_width = kw.get('line_width', 1.0)
+    line_alpha = kw.get('line_alpha', 1.0)
+
+    center = vor.points.mean(axis=0)
+    ptp_bound = vor.points.ptp(axis=0)
+
+    finite_segments = []
+    infinite_segments = []
+    for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
+        simplex = np.asarray(simplex)
+        if np.all(simplex >= 0):
+            finite_segments.append(vor.vertices[simplex])
+        else:
+            i = simplex[simplex >= 0][0]  # finite end Voronoi vertex
+
+            t = vor.points[pointidx[1]] - vor.points[pointidx[0]]  # tangent
+            t /= np.linalg.norm(t)
+            n = np.array([-t[1], t[0]])  # normal
+
+            midpoint = vor.points[pointidx].mean(axis=0)
+            direction = np.sign(np.dot(midpoint - center, n)) * n
+            if (vor.furthest_site):
+                direction = -direction
+            far_point = vor.vertices[i] + direction * ptp_bound.max()
+
+            infinite_segments.append([vor.vertices[i], far_point])
+
+    ax.add_collection(LineCollection(finite_segments,
+                                     colors=line_colors,
+                                     lw=line_width,
+                                     alpha=line_alpha,
+                                     linestyle='solid'))
+    ax.add_collection(LineCollection(infinite_segments,
+                                     colors=line_colors,
+                                     lw=line_width,
+                                     alpha=line_alpha,
+                                     linestyle='dashed'))
+
+    _adjust_bounds(ax, vor.points)
+
+    return ax.figure
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_procrustes.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/_procrustes.py
new file mode 100644
index 00000000..6fd93bdd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_procrustes.py
@@ -0,0 +1,132 @@
+"""
+This module provides functions to perform full Procrustes analysis.
+
+This code was originally written by Justin Kucynski and ported over from
+scikit-bio by Yoshiki Vazquez-Baeza.
+"""
+
+import numpy as np
+from scipy.linalg import orthogonal_procrustes
+
+
+__all__ = ['procrustes']
+
+
+def procrustes(data1, data2):
+    r"""Procrustes analysis, a similarity test for two data sets.
+
+    Each input matrix is a set of points or vectors (the rows of the matrix).
+    The dimension of the space is the number of columns of each matrix. Given
+    two identically sized matrices, procrustes standardizes both such that:
+
+    - :math:`tr(AA^{T}) = 1`.
+
+    - Both sets of points are centered around the origin.
+
+    Procrustes ([1]_, [2]_) then applies the optimal transform to the second
+    matrix (including scaling/dilation, rotations, and reflections) to minimize
+    :math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
+    pointwise differences between the two input datasets.
+
+    This function was not designed to handle datasets with different numbers of
+    datapoints (rows).  If two data sets have different dimensionality
+    (different number of columns), simply add columns of zeros to the smaller
+    of the two.
+
+    Parameters
+    ----------
+    data1 : array_like
+        Matrix, n rows represent points in k (columns) space `data1` is the
+        reference data, after it is standardised, the data from `data2` will be
+        transformed to fit the pattern in `data1` (must have >1 unique points).
+    data2 : array_like
+        n rows of data in k space to be fit to `data1`.  Must be the  same
+        shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
+
+    Returns
+    -------
+    mtx1 : array_like
+        A standardized version of `data1`.
+    mtx2 : array_like
+        The orientation of `data2` that best fits `data1`. Centered, but not
+        necessarily :math:`tr(AA^{T}) = 1`.
+    disparity : float
+        :math:`M^{2}` as defined above.
+
+    Raises
+    ------
+    ValueError
+        If the input arrays are not two-dimensional.
+        If the shape of the input arrays is different.
+        If the input arrays have zero columns or zero rows.
+
+    See Also
+    --------
+    scipy.linalg.orthogonal_procrustes
+    scipy.spatial.distance.directed_hausdorff : Another similarity test
+      for two data sets
+
+    Notes
+    -----
+    - The disparity should not depend on the order of the input matrices, but
+      the output matrices will, as only the first output matrix is guaranteed
+      to be scaled such that :math:`tr(AA^{T}) = 1`.
+
+    - Duplicate data points are generally ok, duplicating a data point will
+      increase its effect on the procrustes fit.
+
+    - The disparity scales as the number of points per input matrix.
+
+    References
+    ----------
+    .. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
+    .. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.spatial import procrustes
+
+    The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
+    ``a`` here:
+
+    >>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
+    >>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
+    >>> mtx1, mtx2, disparity = procrustes(a, b)
+    >>> round(disparity)
+    0.0
+
+    """
+    mtx1 = np.array(data1, dtype=np.double, copy=True)
+    mtx2 = np.array(data2, dtype=np.double, copy=True)
+
+    if mtx1.ndim != 2 or mtx2.ndim != 2:
+        raise ValueError("Input matrices must be two-dimensional")
+    if mtx1.shape != mtx2.shape:
+        raise ValueError("Input matrices must be of same shape")
+    if mtx1.size == 0:
+        raise ValueError("Input matrices must be >0 rows and >0 cols")
+
+    # translate all the data to the origin
+    mtx1 -= np.mean(mtx1, 0)
+    mtx2 -= np.mean(mtx2, 0)
+
+    norm1 = np.linalg.norm(mtx1)
+    norm2 = np.linalg.norm(mtx2)
+
+    if norm1 == 0 or norm2 == 0:
+        raise ValueError("Input matrices must contain >1 unique points")
+
+    # change scaling of data (in rows) such that trace(mtx*mtx') = 1
+    mtx1 /= norm1
+    mtx2 /= norm2
+
+    # transform mtx2 to minimize disparity
+    R, s = orthogonal_procrustes(mtx1, mtx2)
+    mtx2 = np.dot(mtx2, R.T) * s
+
+    # measure the dissimilarity between the two datasets
+    disparity = np.sum(np.square(mtx1 - mtx2))
+
+    return mtx1, mtx2, disparity
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_qhull.pyi b/__packaged__/coreml/.python_dependencies/scipy/spatial/_qhull.pyi
new file mode 100644
index 00000000..8bf39e8b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_qhull.pyi
@@ -0,0 +1,214 @@
+'''
+Static type checking stub file for scipy/spatial/qhull.pyx
+'''
+
+from typing import List, Tuple, Any, Dict
+
+import numpy as np
+from numpy.typing import ArrayLike, NDArray
+from typing_extensions import final
+
+class QhullError(RuntimeError):
+    ...
+    
+@final
+class _Qhull:
+    # Read-only cython attribute that behaves, more or less, like a property
+    @property
+    def ndim(self) -> int: ...
+    mode_option: bytes
+    options: bytes
+    furthest_site: bool
+
+    def __init__(
+        self,
+        mode_option: bytes,
+        points: NDArray[np.float64],
+        options: None | bytes = ...,
+        required_options: None | bytes = ...,
+        furthest_site: bool = ...,
+        incremental: bool = ...,
+        interior_point: None | NDArray[np.float64] = ...,
+    ) -> None: ...
+    def check_active(self) -> None: ...
+    def close(self) -> None: ...
+    def get_points(self) -> NDArray[np.float64]: ...
+    def add_points(
+        self,
+        points: ArrayLike,
+        interior_point: ArrayLike = ...
+    ) -> None: ...
+    def get_paraboloid_shift_scale(self) -> Tuple[float, float]: ...
+    def volume_area(self) -> Tuple[float, float]: ...
+    def triangulate(self) -> None: ...
+    def get_simplex_facet_array(self) -> Tuple[
+        NDArray[np.intc],
+        NDArray[np.intc],
+        NDArray[np.float64],
+        NDArray[np.intc],
+        NDArray[np.intc],
+    ]: ...
+    def get_hull_points(self) -> NDArray[np.float64]: ...
+    def get_hull_facets(self) -> Tuple[
+        List[List[int]],
+        NDArray[np.float64],
+    ]: ...
+    def get_voronoi_diagram(self) -> Tuple[
+        NDArray[np.float64],
+        NDArray[np.intc],
+        List[List[int]],
+        List[List[int]],
+        NDArray[np.intp],
+    ]: ...
+    def get_extremes_2d(self) -> NDArray[np.intc]: ...
+
+def _get_barycentric_transforms(
+    points: NDArray[np.float64],
+    simplices: NDArray[np.int_],
+    eps: float
+) -> NDArray[np.float64]: ...
+
+class _QhullUser:
+    ndim: int
+    npoints: int
+    min_bound: NDArray[np.float64]
+    max_bound: NDArray[np.float64]
+
+    def __init__(self, qhull: _Qhull, incremental: bool = ...) -> None: ...
+    def close(self) -> None: ...
+    def _update(self, qhull: _Qhull) -> None: ...
+    def _add_points(
+        self,
+        points: ArrayLike,
+        restart: bool = ...,
+        interior_point: ArrayLike = ...
+    ) -> None: ...
+
+class Delaunay(_QhullUser):
+    furthest_site: bool
+    paraboloid_scale: float
+    paraboloid_shift: float
+    simplices: NDArray[np.intc]
+    neighbors: NDArray[np.intc]
+    equations: NDArray[np.float64]
+    coplanar: NDArray[np.intc]
+    good: NDArray[np.intc]
+    nsimplex: int
+    vertices: NDArray[np.intc]
+
+    def __init__(
+        self,
+        points: ArrayLike,
+        furthest_site: bool = ...,
+        incremental: bool = ...,
+        qhull_options: None | str = ...
+    ) -> None: ...
+    def _update(self, qhull: _Qhull) -> None: ...
+    def add_points(
+        self,
+        points: ArrayLike,
+        restart: bool = ...
+    ) -> None: ...
+    @property
+    def points(self) -> NDArray[np.float64]: ...
+    @property
+    def transform(self) -> NDArray[np.float64]: ...
+    @property
+    def vertex_to_simplex(self) -> NDArray[np.intc]: ...
+    @property
+    def vertex_neighbor_vertices(self) -> Tuple[
+        NDArray[np.intc],
+        NDArray[np.intc],
+    ]: ...
+    @property
+    def convex_hull(self) -> NDArray[np.intc]: ...
+    def find_simplex(
+        self,
+        xi: ArrayLike,
+        bruteforce: bool = ...,
+        tol: float = ...
+    ) -> NDArray[np.intc]: ...
+    def plane_distance(self, xi: ArrayLike) -> NDArray[np.float64]: ...
+    def lift_points(self, x: ArrayLike) -> NDArray[np.float64]: ...
+
+def tsearch(tri: Delaunay, xi: ArrayLike) -> NDArray[np.intc]: ...
+def _copy_docstr(dst: object, src: object) -> None: ...
+
+class ConvexHull(_QhullUser):
+    simplices: NDArray[np.intc]
+    neighbors: NDArray[np.intc]
+    equations: NDArray[np.float64]
+    coplanar: NDArray[np.intc]
+    good: None | NDArray[np.bool_]
+    volume: float
+    area: float
+    nsimplex: int
+
+    def __init__(
+        self,
+        points: ArrayLike,
+        incremental: bool = ...,
+        qhull_options: None | str = ...
+    ) -> None: ...
+    def _update(self, qhull: _Qhull) -> None: ...
+    def add_points(self, points: ArrayLike,
+                   restart: bool = ...) -> None: ...
+    @property
+    def points(self) -> NDArray[np.float64]: ...
+    @property
+    def vertices(self) -> NDArray[np.intc]: ...
+
+class Voronoi(_QhullUser):
+    vertices: NDArray[np.float64]
+    ridge_points: NDArray[np.intc]
+    ridge_vertices: List[List[int]]
+    regions: List[List[int]]
+    point_region: NDArray[np.intp]
+    furthest_site: bool
+
+    def __init__(
+        self,
+        points: ArrayLike,
+        furthest_site: bool = ...,
+        incremental: bool = ...,
+        qhull_options: None | str = ...
+    ) -> None: ...
+    def _update(self, qhull: _Qhull) -> None: ...
+    def add_points(
+        self,
+        points: ArrayLike,
+        restart: bool = ...
+    ) -> None: ...
+    @property
+    def points(self) -> NDArray[np.float64]: ...
+    @property
+    def ridge_dict(self) -> Dict[Tuple[int, int], List[int]]: ...
+
+class HalfspaceIntersection(_QhullUser):
+    interior_point: NDArray[np.float64]
+    dual_facets: List[List[int]]
+    dual_equations: NDArray[np.float64]
+    dual_points: NDArray[np.float64]
+    dual_volume: float
+    dual_area: float
+    intersections: NDArray[np.float64]
+    ndim: int
+    nineq: int
+
+    def __init__(
+        self,
+        halfspaces: ArrayLike,
+        interior_point: ArrayLike,
+        incremental: bool = ...,
+        qhull_options: None | str = ...
+    ) -> None: ...
+    def _update(self, qhull: _Qhull) -> None: ...
+    def add_halfspaces(
+        self,
+        halfspaces: ArrayLike,
+        restart: bool = ...
+    ) -> None: ...
+    @property
+    def halfspaces(self) -> NDArray[np.float64]: ...
+    @property
+    def dual_vertices(self) -> NDArray[np.int_]: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_spherical_voronoi.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/_spherical_voronoi.py
new file mode 100644
index 00000000..5c27b8d0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_spherical_voronoi.py
@@ -0,0 +1,342 @@
+"""
+Spherical Voronoi Code
+
+.. versionadded:: 0.18.0
+
+"""
+#
+# Copyright (C)  Tyler Reddy, Ross Hemsley, Edd Edmondson,
+#                Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
+#
+# Distributed under the same BSD license as SciPy.
+#
+
+import numpy as np
+import scipy
+from . import _voronoi
+from scipy.spatial import cKDTree
+
+__all__ = ['SphericalVoronoi']
+
+
+def calculate_solid_angles(R):
+    """Calculates the solid angles of plane triangles. Implements the method of
+    Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
+    that input points have unit norm."""
+    # Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
+    # This is equal to the determinant of the matrix [R1 R2 R3], which can be
+    # computed with better stability.
+    numerator = np.linalg.det(R)
+    denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
+                       np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
+                       np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
+    return np.abs(2 * np.arctan2(numerator, denominator))
+
+
+class SphericalVoronoi:
+    """ Voronoi diagrams on the surface of a sphere.
+
+    .. versionadded:: 0.18.0
+
+    Parameters
+    ----------
+    points : ndarray of floats, shape (npoints, ndim)
+        Coordinates of points from which to construct a spherical
+        Voronoi diagram.
+    radius : float, optional
+        Radius of the sphere (Default: 1)
+    center : ndarray of floats, shape (ndim,)
+        Center of sphere (Default: origin)
+    threshold : float
+        Threshold for detecting duplicate points and
+        mismatches between points and sphere parameters.
+        (Default: 1e-06)
+
+    Attributes
+    ----------
+    points : double array of shape (npoints, ndim)
+        the points in `ndim` dimensions to generate the Voronoi diagram from
+    radius : double
+        radius of the sphere
+    center : double array of shape (ndim,)
+        center of the sphere
+    vertices : double array of shape (nvertices, ndim)
+        Voronoi vertices corresponding to points
+    regions : list of list of integers of shape (npoints, _ )
+        the n-th entry is a list consisting of the indices
+        of the vertices belonging to the n-th point in points
+
+    Methods
+    -------
+    calculate_areas
+        Calculates the areas of the Voronoi regions. For 2D point sets, the
+        regions are circular arcs. The sum of the areas is `2 * pi * radius`.
+        For 3D point sets, the regions are spherical polygons. The sum of the
+        areas is `4 * pi * radius**2`.
+
+    Raises
+    ------
+    ValueError
+        If there are duplicates in `points`.
+        If the provided `radius` is not consistent with `points`.
+
+    Notes
+    -----
+    The spherical Voronoi diagram algorithm proceeds as follows. The Convex
+    Hull of the input points (generators) is calculated, and is equivalent to
+    their Delaunay triangulation on the surface of the sphere [Caroli]_.
+    The Convex Hull neighbour information is then used to
+    order the Voronoi region vertices around each generator. The latter
+    approach is substantially less sensitive to floating point issues than
+    angle-based methods of Voronoi region vertex sorting.
+
+    Empirical assessment of spherical Voronoi algorithm performance suggests
+    quadratic time complexity (loglinear is optimal, but algorithms are more
+    challenging to implement).
+
+    References
+    ----------
+    .. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
+                points on or close to a sphere. Research Report RR-7004, 2009.
+
+    .. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
+                     triangle. IEEE Transactions on Biomedical Engineering,
+                     2, 1983, pp 125--126.
+
+    See Also
+    --------
+    Voronoi : Conventional Voronoi diagrams in N dimensions.
+
+    Examples
+    --------
+    Do some imports and take some points on a cube:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.spatial import SphericalVoronoi, geometric_slerp
+    >>> from mpl_toolkits.mplot3d import proj3d
+    >>> # set input data
+    >>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
+    ...                    [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
+
+    Calculate the spherical Voronoi diagram:
+
+    >>> radius = 1
+    >>> center = np.array([0, 0, 0])
+    >>> sv = SphericalVoronoi(points, radius, center)
+
+    Generate plot:
+
+    >>> # sort vertices (optional, helpful for plotting)
+    >>> sv.sort_vertices_of_regions()
+    >>> t_vals = np.linspace(0, 1, 2000)
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111, projection='3d')
+    >>> # plot the unit sphere for reference (optional)
+    >>> u = np.linspace(0, 2 * np.pi, 100)
+    >>> v = np.linspace(0, np.pi, 100)
+    >>> x = np.outer(np.cos(u), np.sin(v))
+    >>> y = np.outer(np.sin(u), np.sin(v))
+    >>> z = np.outer(np.ones(np.size(u)), np.cos(v))
+    >>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
+    >>> # plot generator points
+    >>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
+    >>> # plot Voronoi vertices
+    >>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
+    ...                    c='g')
+    >>> # indicate Voronoi regions (as Euclidean polygons)
+    >>> for region in sv.regions:
+    ...    n = len(region)
+    ...    for i in range(n):
+    ...        start = sv.vertices[region][i]
+    ...        end = sv.vertices[region][(i + 1) % n]
+    ...        result = geometric_slerp(start, end, t_vals)
+    ...        ax.plot(result[..., 0],
+    ...                result[..., 1],
+    ...                result[..., 2],
+    ...                c='k')
+    >>> ax.azim = 10
+    >>> ax.elev = 40
+    >>> _ = ax.set_xticks([])
+    >>> _ = ax.set_yticks([])
+    >>> _ = ax.set_zticks([])
+    >>> fig.set_size_inches(4, 4)
+    >>> plt.show()
+
+    """
+    def __init__(self, points, radius=1, center=None, threshold=1e-06):
+
+        if radius is None:
+            raise ValueError('`radius` is `None`. '
+                             'Please provide a floating point number '
+                             '(i.e. `radius=1`).')
+
+        self.radius = float(radius)
+        self.points = np.array(points).astype(np.double)
+        self._dim = self.points.shape[1]
+        if center is None:
+            self.center = np.zeros(self._dim)
+        else:
+            self.center = np.array(center, dtype=float)
+
+        # test degenerate input
+        self._rank = np.linalg.matrix_rank(self.points - self.points[0],
+                                           tol=threshold * self.radius)
+        if self._rank < self._dim:
+            raise ValueError("Rank of input points must be at least {0}".format(self._dim))
+
+        if cKDTree(self.points).query_pairs(threshold * self.radius):
+            raise ValueError("Duplicate generators present.")
+
+        radii = np.linalg.norm(self.points - self.center, axis=1)
+        max_discrepancy = np.abs(radii - self.radius).max()
+        if max_discrepancy >= threshold * self.radius:
+            raise ValueError("Radius inconsistent with generators.")
+
+        self._calc_vertices_regions()
+
+    def _calc_vertices_regions(self):
+        """
+        Calculates the Voronoi vertices and regions of the generators stored
+        in self.points. The vertices will be stored in self.vertices and the
+        regions in self.regions.
+
+        This algorithm was discussed at PyData London 2015 by
+        Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
+        """
+        # get Convex Hull
+        conv = scipy.spatial.ConvexHull(self.points)
+        # get circumcenters of Convex Hull triangles from facet equations
+        # for 3D input circumcenters will have shape: (2N-4, 3)
+        self.vertices = self.radius * conv.equations[:, :-1] + self.center
+        self._simplices = conv.simplices
+        # calculate regions from triangulation
+        # for 3D input simplex_indices will have shape: (2N-4,)
+        simplex_indices = np.arange(len(self._simplices))
+        # for 3D input tri_indices will have shape: (6N-12,)
+        tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
+        # for 3D input point_indices will have shape: (6N-12,)
+        point_indices = self._simplices.ravel()
+        # for 3D input indices will have shape: (6N-12,)
+        indices = np.argsort(point_indices, kind='mergesort')
+        # for 3D input flattened_groups will have shape: (6N-12,)
+        flattened_groups = tri_indices[indices].astype(np.intp)
+        # intervals will have shape: (N+1,)
+        intervals = np.cumsum(np.bincount(point_indices + 1))
+        # split flattened groups to get nested list of unsorted regions
+        groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
+                  for i in range(len(intervals) - 1)]
+        self.regions = groups
+
+    def sort_vertices_of_regions(self):
+        """Sort indices of the vertices to be (counter-)clockwise ordered.
+
+        Raises
+        ------
+        TypeError
+            If the points are not three-dimensional.
+
+        Notes
+        -----
+        For each region in regions, it sorts the indices of the Voronoi
+        vertices such that the resulting points are in a clockwise or
+        counterclockwise order around the generator point.
+
+        This is done as follows: Recall that the n-th region in regions
+        surrounds the n-th generator in points and that the k-th
+        Voronoi vertex in vertices is the circumcenter of the k-th triangle
+        in self._simplices.  For each region n, we choose the first triangle
+        (=Voronoi vertex) in self._simplices and a vertex of that triangle
+        not equal to the center n. These determine a unique neighbor of that
+        triangle, which is then chosen as the second triangle. The second
+        triangle will have a unique vertex not equal to the current vertex or
+        the center. This determines a unique neighbor of the second triangle,
+        which is then chosen as the third triangle and so forth. We proceed
+        through all the triangles (=Voronoi vertices) belonging to the
+        generator in points and obtain a sorted version of the vertices
+        of its surrounding region.
+        """
+        if self._dim != 3:
+            raise TypeError("Only supported for three-dimensional point sets")
+        _voronoi.sort_vertices_of_regions(self._simplices, self.regions)
+
+    def _calculate_areas_3d(self):
+        self.sort_vertices_of_regions()
+        sizes = [len(region) for region in self.regions]
+        csizes = np.cumsum(sizes)
+        num_regions = csizes[-1]
+
+        # We create a set of triangles consisting of one point and two Voronoi
+        # vertices. The vertices of each triangle are adjacent in the sorted
+        # regions list.
+        point_indices = [i for i, size in enumerate(sizes)
+                         for j in range(size)]
+
+        nbrs1 = np.array([r for region in self.regions for r in region])
+
+        # The calculation of nbrs2 is a vectorized version of:
+        # np.array([r for region in self.regions for r in np.roll(region, 1)])
+        nbrs2 = np.roll(nbrs1, 1)
+        indices = np.roll(csizes, 1)
+        indices[0] = 0
+        nbrs2[indices] = nbrs1[csizes - 1]
+
+        # Normalize points and vertices.
+        pnormalized = (self.points - self.center) / self.radius
+        vnormalized = (self.vertices - self.center) / self.radius
+
+        # Create the complete set of triangles and calculate their solid angles
+        triangles = np.hstack([pnormalized[point_indices],
+                               vnormalized[nbrs1],
+                               vnormalized[nbrs2]
+                               ]).reshape((num_regions, 3, 3))
+        triangle_solid_angles = calculate_solid_angles(triangles)
+
+        # Sum the solid angles of the triangles in each region
+        solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
+        solid_angles[1:] -= solid_angles[:-1]
+
+        # Get polygon areas using A = omega * r**2
+        return solid_angles * self.radius**2
+
+    def _calculate_areas_2d(self):
+        # Find start and end points of arcs
+        arcs = self.points[self._simplices] - self.center
+
+        # Calculate the angle subtended by arcs
+        cosine = np.einsum('ij,ij->i', arcs[:, 0], arcs[:, 1])
+        sine = np.abs(np.linalg.det(arcs))
+        theta = np.arctan2(sine, cosine)
+
+        # Get areas using A = r * theta
+        areas = self.radius * theta
+
+        # Correct arcs which go the wrong way (single-hemisphere inputs)
+        signs = np.sign(np.einsum('ij,ij->i', arcs[:, 0],
+                                              self.vertices - self.center))
+        indices = np.where(signs < 0)
+        areas[indices] = 2 * np.pi * self.radius - areas[indices]
+        return areas
+
+    def calculate_areas(self):
+        """Calculates the areas of the Voronoi regions.
+
+        For 2D point sets, the regions are circular arcs. The sum of the areas
+        is `2 * pi * radius`.
+
+        For 3D point sets, the regions are spherical polygons. The sum of the
+        areas is `4 * pi * radius**2`.
+
+        .. versionadded:: 1.5.0
+
+        Returns
+        -------
+        areas : double array of shape (npoints,)
+            The areas of the Voronoi regions.
+        """
+        if self._dim == 2:
+            return self._calculate_areas_2d()
+        elif self._dim == 3:
+            return self._calculate_areas_3d()
+        else:
+            raise TypeError("Only supported for 2D and 3D point sets")
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/_voronoi.pyi b/__packaged__/coreml/.python_dependencies/scipy/spatial/_voronoi.pyi
new file mode 100644
index 00000000..cd02ca77
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/_voronoi.pyi
@@ -0,0 +1,5 @@
+from typing import List
+
+import numpy as np
+
+def sort_vertices_of_regions(simplices: np.ndarray, regions: List[List[int]]) -> None: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/ckdtree.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/ckdtree.py
new file mode 100644
index 00000000..ee7283dc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/ckdtree.py
@@ -0,0 +1,35 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.spatial` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _ckdtree
+
+
+__all__ = [  # noqa: F822
+    'cKDTree',
+    'cKDTreeNode',
+    'coo_entries',
+    'operator',
+    'ordered_pairs',
+    'os',
+    'scipy',
+    'threading',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.spatial.ckdtree is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.spatial instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.spatial` namespace, "
+                  "the `scipy.spatial.ckdtree` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_ckdtree, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/distance.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/distance.py
new file mode 100644
index 00000000..68083900
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/distance.py
@@ -0,0 +1,2952 @@
+"""
+Distance computations (:mod:`scipy.spatial.distance`)
+=====================================================
+
+.. sectionauthor:: Damian Eads
+
+Function reference
+------------------
+
+Distance matrix computation from a collection of raw observation vectors
+stored in a rectangular array.
+
+.. autosummary::
+   :toctree: generated/
+
+   pdist   -- pairwise distances between observation vectors.
+   cdist   -- distances between two collections of observation vectors
+   squareform -- convert distance matrix to a condensed one and vice versa
+   directed_hausdorff -- directed Hausdorff distance between arrays
+
+Predicates for checking the validity of distance matrices, both
+condensed and redundant. Also contained in this module are functions
+for computing the number of observations in a distance matrix.
+
+.. autosummary::
+   :toctree: generated/
+
+   is_valid_dm -- checks for a valid distance matrix
+   is_valid_y  -- checks for a valid condensed distance matrix
+   num_obs_dm  -- # of observations in a distance matrix
+   num_obs_y   -- # of observations in a condensed distance matrix
+
+Distance functions between two numeric vectors ``u`` and ``v``. Computing
+distances over a large collection of vectors is inefficient for these
+functions. Use ``pdist`` for this purpose.
+
+.. autosummary::
+   :toctree: generated/
+
+   braycurtis       -- the Bray-Curtis distance.
+   canberra         -- the Canberra distance.
+   chebyshev        -- the Chebyshev distance.
+   cityblock        -- the Manhattan distance.
+   correlation      -- the Correlation distance.
+   cosine           -- the Cosine distance.
+   euclidean        -- the Euclidean distance.
+   jensenshannon    -- the Jensen-Shannon distance.
+   mahalanobis      -- the Mahalanobis distance.
+   minkowski        -- the Minkowski distance.
+   seuclidean       -- the normalized Euclidean distance.
+   sqeuclidean      -- the squared Euclidean distance.
+
+Distance functions between two boolean vectors (representing sets) ``u`` and
+``v``.  As in the case of numerical vectors, ``pdist`` is more efficient for
+computing the distances between all pairs.
+
+.. autosummary::
+   :toctree: generated/
+
+   dice             -- the Dice dissimilarity.
+   hamming          -- the Hamming distance.
+   jaccard          -- the Jaccard distance.
+   kulsinski        -- the Kulsinski distance.
+   kulczynski1      -- the Kulczynski 1 distance.
+   rogerstanimoto   -- the Rogers-Tanimoto dissimilarity.
+   russellrao       -- the Russell-Rao dissimilarity.
+   sokalmichener    -- the Sokal-Michener dissimilarity.
+   sokalsneath      -- the Sokal-Sneath dissimilarity.
+   yule             -- the Yule dissimilarity.
+
+:func:`hamming` also operates over discrete numerical vectors.
+"""
+
+# Copyright (C) Damian Eads, 2007-2008. New BSD License.
+
+__all__ = [
+    'braycurtis',
+    'canberra',
+    'cdist',
+    'chebyshev',
+    'cityblock',
+    'correlation',
+    'cosine',
+    'dice',
+    'directed_hausdorff',
+    'euclidean',
+    'hamming',
+    'is_valid_dm',
+    'is_valid_y',
+    'jaccard',
+    'jensenshannon',
+    'kulsinski',
+    'kulczynski1',
+    'mahalanobis',
+    'minkowski',
+    'num_obs_dm',
+    'num_obs_y',
+    'pdist',
+    'rogerstanimoto',
+    'russellrao',
+    'seuclidean',
+    'sokalmichener',
+    'sokalsneath',
+    'sqeuclidean',
+    'squareform',
+    'yule'
+]
+
+
+import warnings
+import numpy as np
+import dataclasses
+
+from typing import List, Optional, Set, Callable
+
+from functools import partial
+from scipy._lib._util import _asarray_validated
+
+from . import _distance_wrap
+from . import _hausdorff
+from ..linalg import norm
+from ..special import rel_entr
+
+from . import _distance_pybind
+
+from .._lib.deprecation import _deprecated
+
+def _copy_array_if_base_present(a):
+    """Copy the array if its base points to a parent array."""
+    if a.base is not None:
+        return a.copy()
+    return a
+
+
+def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
+    XA = XA - XA.mean(axis=1, keepdims=True)
+    XB = XB - XB.mean(axis=1, keepdims=True)
+    _distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
+
+
+def _correlation_pdist_wrap(X, dm, **kwargs):
+    X2 = X - X.mean(axis=1, keepdims=True)
+    _distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
+
+
+def _convert_to_type(X, out_type):
+    return np.ascontiguousarray(X, dtype=out_type)
+
+
+def _nbool_correspond_all(u, v, w=None):
+    if u.dtype == v.dtype == bool and w is None:
+        not_u = ~u
+        not_v = ~v
+        nff = (not_u & not_v).sum()
+        nft = (not_u & v).sum()
+        ntf = (u & not_v).sum()
+        ntt = (u & v).sum()
+    else:
+        dtype = np.result_type(int, u.dtype, v.dtype)
+        u = u.astype(dtype)
+        v = v.astype(dtype)
+        not_u = 1.0 - u
+        not_v = 1.0 - v
+        if w is not None:
+            not_u = w * not_u
+            u = w * u
+        nff = (not_u * not_v).sum()
+        nft = (not_u * v).sum()
+        ntf = (u * not_v).sum()
+        ntt = (u * v).sum()
+    return (nff, nft, ntf, ntt)
+
+
+def _nbool_correspond_ft_tf(u, v, w=None):
+    if u.dtype == v.dtype == bool and w is None:
+        not_u = ~u
+        not_v = ~v
+        nft = (not_u & v).sum()
+        ntf = (u & not_v).sum()
+    else:
+        dtype = np.result_type(int, u.dtype, v.dtype)
+        u = u.astype(dtype)
+        v = v.astype(dtype)
+        not_u = 1.0 - u
+        not_v = 1.0 - v
+        if w is not None:
+            not_u = w * not_u
+            u = w * u
+        nft = (not_u * v).sum()
+        ntf = (u * not_v).sum()
+    return (nft, ntf)
+
+
+def _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs):
+    # get supported types
+    types = metric_info.types
+    # choose best type
+    typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
+    # validate data
+    XA = _convert_to_type(XA, out_type=typ)
+    XB = _convert_to_type(XB, out_type=typ)
+
+    # validate kwargs
+    _validate_kwargs = metric_info.validator
+    if _validate_kwargs:
+        kwargs = _validate_kwargs((XA, XB), mA + mB, n, **kwargs)
+    return XA, XB, typ, kwargs
+
+
+def _validate_weight_with_size(X, m, n, **kwargs):
+    w = kwargs.pop('w', None)
+    if w is None:
+        return kwargs
+
+    if w.ndim != 1 or w.shape[0] != n:
+        raise ValueError("Weights must have same size as input vector. "
+                         f"{w.shape[0]} vs. {n}")
+
+    kwargs['w'] = _validate_weights(w)
+    return kwargs
+
+
+def _validate_hamming_kwargs(X, m, n, **kwargs):
+    w = kwargs.get('w', np.ones((n,), dtype='double'))
+
+    if w.ndim != 1 or w.shape[0] != n:
+        raise ValueError("Weights must have same size as input vector. %d vs. %d" % (w.shape[0], n))
+
+    kwargs['w'] = _validate_weights(w)
+    return kwargs
+
+
+def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
+    VI = kwargs.pop('VI', None)
+    if VI is None:
+        if m <= n:
+            # There are fewer observations than the dimension of
+            # the observations.
+            raise ValueError("The number of observations (%d) is too "
+                             "small; the covariance matrix is "
+                             "singular. For observations with %d "
+                             "dimensions, at least %d observations "
+                             "are required." % (m, n, n + 1))
+        if isinstance(X, tuple):
+            X = np.vstack(X)
+        CV = np.atleast_2d(np.cov(X.astype(np.double, copy=False).T))
+        VI = np.linalg.inv(CV).T.copy()
+    kwargs["VI"] = _convert_to_double(VI)
+    return kwargs
+
+
+def _validate_minkowski_kwargs(X, m, n, **kwargs):
+    kwargs = _validate_weight_with_size(X, m, n, **kwargs)
+    if 'p' not in kwargs:
+        kwargs['p'] = 2.
+    else:
+        if kwargs['p'] <= 0:
+            raise ValueError("p must be greater than 0")
+
+    return kwargs
+
+
+def _validate_pdist_input(X, m, n, metric_info, **kwargs):
+    # get supported types
+    types = metric_info.types
+    # choose best type
+    typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
+    # validate data
+    X = _convert_to_type(X, out_type=typ)
+
+    # validate kwargs
+    _validate_kwargs = metric_info.validator
+    if _validate_kwargs:
+        kwargs = _validate_kwargs(X, m, n, **kwargs)
+    return X, typ, kwargs
+
+
+def _validate_seuclidean_kwargs(X, m, n, **kwargs):
+    V = kwargs.pop('V', None)
+    if V is None:
+        if isinstance(X, tuple):
+            X = np.vstack(X)
+        V = np.var(X.astype(np.double, copy=False), axis=0, ddof=1)
+    else:
+        V = np.asarray(V, order='c')
+        if len(V.shape) != 1:
+            raise ValueError('Variance vector V must '
+                             'be one-dimensional.')
+        if V.shape[0] != n:
+            raise ValueError('Variance vector V must be of the same '
+                             'dimension as the vectors on which the distances '
+                             'are computed.')
+    kwargs['V'] = _convert_to_double(V)
+    return kwargs
+
+
+def _validate_vector(u, dtype=None):
+    # XXX Is order='c' really necessary?
+    u = np.asarray(u, dtype=dtype, order='c')
+    if u.ndim == 1:
+        return u
+    raise ValueError("Input vector should be 1-D.")
+
+
+def _validate_weights(w, dtype=np.double):
+    w = _validate_vector(w, dtype=dtype)
+    if np.any(w < 0):
+        raise ValueError("Input weights should be all non-negative")
+    return w
+
+
+def directed_hausdorff(u, v, seed=0):
+    """
+    Compute the directed Hausdorff distance between two 2-D arrays.
+
+    Distances between pairs are calculated using a Euclidean metric.
+
+    Parameters
+    ----------
+    u : (M,N) array_like
+        Input array.
+    v : (O,N) array_like
+        Input array.
+    seed : int or None
+        Local `numpy.random.RandomState` seed. Default is 0, a random
+        shuffling of u and v that guarantees reproducibility.
+
+    Returns
+    -------
+    d : double
+        The directed Hausdorff distance between arrays `u` and `v`,
+
+    index_1 : int
+        index of point contributing to Hausdorff pair in `u`
+
+    index_2 : int
+        index of point contributing to Hausdorff pair in `v`
+
+    Raises
+    ------
+    ValueError
+        An exception is thrown if `u` and `v` do not have
+        the same number of columns.
+
+    Notes
+    -----
+    Uses the early break technique and the random sampling approach
+    described by [1]_. Although worst-case performance is ``O(m * o)``
+    (as with the brute force algorithm), this is unlikely in practice
+    as the input data would have to require the algorithm to explore
+    every single point interaction, and after the algorithm shuffles
+    the input points at that. The best case performance is O(m), which
+    is satisfied by selecting an inner loop distance that is less than
+    cmax and leads to an early break as often as possible. The authors
+    have formally shown that the average runtime is closer to O(m).
+
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
+           calculating the exact Hausdorff distance." IEEE Transactions On
+           Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
+           2015.
+
+    See Also
+    --------
+    scipy.spatial.procrustes : Another similarity test for two data sets
+
+    Examples
+    --------
+    Find the directed Hausdorff distance between two 2-D arrays of
+    coordinates:
+
+    >>> from scipy.spatial.distance import directed_hausdorff
+    >>> import numpy as np
+    >>> u = np.array([(1.0, 0.0),
+    ...               (0.0, 1.0),
+    ...               (-1.0, 0.0),
+    ...               (0.0, -1.0)])
+    >>> v = np.array([(2.0, 0.0),
+    ...               (0.0, 2.0),
+    ...               (-2.0, 0.0),
+    ...               (0.0, -4.0)])
+
+    >>> directed_hausdorff(u, v)[0]
+    2.23606797749979
+    >>> directed_hausdorff(v, u)[0]
+    3.0
+
+    Find the general (symmetric) Hausdorff distance between two 2-D
+    arrays of coordinates:
+
+    >>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
+    3.0
+
+    Find the indices of the points that generate the Hausdorff distance
+    (the Hausdorff pair):
+
+    >>> directed_hausdorff(v, u)[1:]
+    (3, 3)
+
+    """
+    u = np.asarray(u, dtype=np.float64, order='c')
+    v = np.asarray(v, dtype=np.float64, order='c')
+    if u.shape[1] != v.shape[1]:
+        raise ValueError('u and v need to have the same '
+                         'number of columns')
+    result = _hausdorff.directed_hausdorff(u, v, seed)
+    return result
+
+
+def minkowski(u, v, p=2, w=None):
+    """
+    Compute the Minkowski distance between two 1-D arrays.
+
+    The Minkowski distance between 1-D arrays `u` and `v`,
+    is defined as
+
+    .. math::
+
+       {\\|u-v\\|}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
+
+
+       \\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    p : scalar
+        The order of the norm of the difference :math:`{\\|u-v\\|}_p`. Note
+        that for :math:`0 < p < 1`, the triangle inequality only holds with
+        an additional multiplicative factor, i.e. it is only a quasi-metric.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    minkowski : double
+        The Minkowski distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
+    2.0
+    >>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
+    1.4142135623730951
+    >>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
+    1.2599210498948732
+    >>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
+    1.0
+    >>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
+    1.0
+    >>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
+    1.0
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if p <= 0:
+        raise ValueError("p must be greater than 0")
+    u_v = u - v
+    if w is not None:
+        w = _validate_weights(w)
+        if p == 1:
+            root_w = w
+        elif p == 2:
+            # better precision and speed
+            root_w = np.sqrt(w)
+        elif p == np.inf:
+            root_w = (w != 0)
+        else:
+            root_w = np.power(w, 1/p)
+        u_v = root_w * u_v
+    dist = norm(u_v, ord=p)
+    return dist
+
+
+def euclidean(u, v, w=None):
+    """
+    Computes the Euclidean distance between two 1-D arrays.
+
+    The Euclidean distance between 1-D arrays `u` and `v`, is defined as
+
+    .. math::
+
+       {\\|u-v\\|}_2
+
+       \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    euclidean : double
+        The Euclidean distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.euclidean([1, 0, 0], [0, 1, 0])
+    1.4142135623730951
+    >>> distance.euclidean([1, 1, 0], [0, 1, 0])
+    1.0
+
+    """
+    return minkowski(u, v, p=2, w=w)
+
+
+def sqeuclidean(u, v, w=None):
+    """
+    Compute the squared Euclidean distance between two 1-D arrays.
+
+    The squared Euclidean distance between `u` and `v` is defined as
+
+    .. math::
+
+       {\\|u-v\\|}_2^2
+
+       \\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    sqeuclidean : double
+        The squared Euclidean distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
+    2.0
+    >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
+    1.0
+
+    """
+    # Preserve float dtypes, but convert everything else to np.float64
+    # for stability.
+    utype, vtype = None, None
+    if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
+        utype = np.float64
+    if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
+        vtype = np.float64
+
+    u = _validate_vector(u, dtype=utype)
+    v = _validate_vector(v, dtype=vtype)
+    u_v = u - v
+    u_v_w = u_v  # only want weights applied once
+    if w is not None:
+        w = _validate_weights(w)
+        u_v_w = w * u_v
+    return np.dot(u_v, u_v_w)
+
+
+def correlation(u, v, w=None, centered=True):
+    """
+    Compute the correlation distance between two 1-D arrays.
+
+    The correlation distance between `u` and `v`, is
+    defined as
+
+    .. math::
+
+        1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
+                  {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
+
+    where :math:`\\bar{u}` is the mean of the elements of `u`
+    and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+    centered : bool, optional
+        If True, `u` and `v` will be centered. Default is True.
+
+    Returns
+    -------
+    correlation : double
+        The correlation distance between 1-D array `u` and `v`.
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is not None:
+        w = _validate_weights(w)
+    if centered:
+        umu = np.average(u, weights=w)
+        vmu = np.average(v, weights=w)
+        u = u - umu
+        v = v - vmu
+    uv = np.average(u * v, weights=w)
+    uu = np.average(np.square(u), weights=w)
+    vv = np.average(np.square(v), weights=w)
+    dist = 1.0 - uv / np.sqrt(uu * vv)
+    # Return absolute value to avoid small negative value due to rounding
+    return np.abs(dist)
+
+
+def cosine(u, v, w=None):
+    """
+    Compute the Cosine distance between 1-D arrays.
+
+    The Cosine distance between `u` and `v`, is defined as
+
+    .. math::
+
+        1 - \\frac{u \\cdot v}
+                  {\\|u\\|_2 \\|v\\|_2}.
+
+    where :math:`u \\cdot v` is the dot product of :math:`u` and
+    :math:`v`.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    cosine : double
+        The Cosine distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.cosine([1, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.cosine([100, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.cosine([1, 1, 0], [0, 1, 0])
+    0.29289321881345254
+
+    """
+    # cosine distance is also referred to as 'uncentered correlation',
+    #   or 'reflective correlation'
+    # clamp the result to 0-2
+    return max(0, min(correlation(u, v, w=w, centered=False), 2.0))
+
+
+def hamming(u, v, w=None):
+    """
+    Compute the Hamming distance between two 1-D arrays.
+
+    The Hamming distance between 1-D arrays `u` and `v`, is simply the
+    proportion of disagreeing components in `u` and `v`. If `u` and `v` are
+    boolean vectors, the Hamming distance is
+
+    .. math::
+
+       \\frac{c_{01} + c_{10}}{n}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n`.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    hamming : double
+        The Hamming distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.hamming([1, 0, 0], [0, 1, 0])
+    0.66666666666666663
+    >>> distance.hamming([1, 0, 0], [1, 1, 0])
+    0.33333333333333331
+    >>> distance.hamming([1, 0, 0], [2, 0, 0])
+    0.33333333333333331
+    >>> distance.hamming([1, 0, 0], [3, 0, 0])
+    0.33333333333333331
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if u.shape != v.shape:
+        raise ValueError('The 1d arrays must have equal lengths.')
+    u_ne_v = u != v
+    if w is not None:
+        w = _validate_weights(w)
+    return np.average(u_ne_v, weights=w)
+
+
+def jaccard(u, v, w=None):
+    """
+    Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
+
+    The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
+    is defined as
+
+    .. math::
+
+       \\frac{c_{TF} + c_{FT}}
+            {c_{TT} + c_{FT} + c_{TF}}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    jaccard : double
+        The Jaccard distance between vectors `u` and `v`.
+
+    Notes
+    -----
+    When both `u` and `v` lead to a `0/0` division i.e. there is no overlap
+    between the items in the vectors the returned distance is 0. See the
+    Wikipedia page on the Jaccard index [1]_, and this paper [2]_.
+
+    .. versionchanged:: 1.2.0
+        Previously, when `u` and `v` lead to a `0/0` division, the function
+        would return NaN. This was changed to return 0 instead.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Jaccard_index
+    .. [2] S. Kosub, "A note on the triangle inequality for the Jaccard
+       distance", 2016, :arxiv:`1612.02696`
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.jaccard([1, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.jaccard([1, 0, 0], [1, 1, 0])
+    0.5
+    >>> distance.jaccard([1, 0, 0], [1, 2, 0])
+    0.5
+    >>> distance.jaccard([1, 0, 0], [1, 1, 1])
+    0.66666666666666663
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+
+    nonzero = np.bitwise_or(u != 0, v != 0)
+    unequal_nonzero = np.bitwise_and((u != v), nonzero)
+    if w is not None:
+        w = _validate_weights(w)
+        nonzero = w * nonzero
+        unequal_nonzero = w * unequal_nonzero
+    a = np.double(unequal_nonzero.sum())
+    b = np.double(nonzero.sum())
+    return (a / b) if b != 0 else 0
+
+
+@_deprecated("Kulsinski has been deprecated from scipy.spatial.distance"
+             " in SciPy 1.9.0 and it will be removed in SciPy 1.11.0."
+             " It is superseded by scipy.spatial.distance.kulczynski1.")
+def kulsinski(u, v, w=None):
+    """
+    Compute the Kulsinski dissimilarity between two boolean 1-D arrays.
+
+    The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
+    is defined as
+
+    .. math::
+
+         \\frac{c_{TF} + c_{FT} - c_{TT} + n}
+              {c_{FT} + c_{TF} + n}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n`.
+
+    .. deprecated:: 0.12.0
+        `kulsinski` has been deprecated from `scipy.spatial.distance` in
+        SciPy 1.9.0 and it will be removed in SciPy 1.11.0. It is superseded
+        by `scipy.spatial.distance.kulczynski1`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    kulsinski : double
+        The Kulsinski distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.kulsinski([1, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.kulsinski([1, 0, 0], [1, 1, 0])
+    0.75
+    >>> distance.kulsinski([1, 0, 0], [2, 1, 0])
+    0.33333333333333331
+    >>> distance.kulsinski([1, 0, 0], [3, 1, 0])
+    -0.5
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is None:
+        n = float(len(u))
+    else:
+        w = _validate_weights(w)
+        n = w.sum()
+    (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
+
+    return (ntf + nft - ntt + n) / (ntf + nft + n)
+
+
+def kulczynski1(u, v, *, w=None):
+    """
+    Compute the Kulczynski 1 dissimilarity between two boolean 1-D arrays.
+
+    The Kulczynski 1 dissimilarity between two boolean 1-D arrays `u` and `v`
+    of length ``n``, is defined as
+
+    .. math::
+
+         \\frac{c_{11}}
+              {c_{01} + c_{10}}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k \\in {0, 1, ..., n-1}`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    kulczynski1 : float
+        The Kulczynski 1 distance between vectors `u` and `v`.
+
+    Notes
+    -----
+    This measure has a minimum value of 0 and no upper limit.
+    It is un-defined when there are no non-matches.
+
+    .. versionadded:: 1.8.0
+
+    References
+    ----------
+    .. [1] Kulczynski S. et al. Bulletin
+           International de l'Academie Polonaise des Sciences
+           et des Lettres, Classe des Sciences Mathematiques
+           et Naturelles, Serie B (Sciences Naturelles). 1927;
+           Supplement II: 57-203.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.kulczynski1([1, 0, 0], [0, 1, 0])
+    0.0
+    >>> distance.kulczynski1([True, False, False], [True, True, False])
+    1.0
+    >>> distance.kulczynski1([True, False, False], [True])
+    0.5
+    >>> distance.kulczynski1([1, 0, 0], [3, 1, 0])
+    -3.0
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is not None:
+        w = _validate_weights(w)
+    (_, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
+
+    return ntt / (ntf + nft)
+
+
+def seuclidean(u, v, V):
+    """
+    Return the standardized Euclidean distance between two 1-D arrays.
+
+    The standardized Euclidean distance between `u` and `v`.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    V : (N,) array_like
+        `V` is an 1-D array of component variances. It is usually computed
+        among a larger collection vectors.
+
+    Returns
+    -------
+    seuclidean : double
+        The standardized Euclidean distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
+    4.4721359549995796
+    >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
+    3.3166247903553998
+    >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
+    3.1780497164141406
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    V = _validate_vector(V, dtype=np.float64)
+    if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
+        raise TypeError('V must be a 1-D array of the same dimension '
+                        'as u and v.')
+    return euclidean(u, v, w=1/V)
+
+
+def cityblock(u, v, w=None):
+    """
+    Compute the City Block (Manhattan) distance.
+
+    Computes the Manhattan distance between two 1-D arrays `u` and `v`,
+    which is defined as
+
+    .. math::
+
+       \\sum_i {\\left| u_i - v_i \\right|}.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    cityblock : double
+        The City Block (Manhattan) distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.cityblock([1, 0, 0], [0, 1, 0])
+    2
+    >>> distance.cityblock([1, 0, 0], [0, 2, 0])
+    3
+    >>> distance.cityblock([1, 0, 0], [1, 1, 0])
+    1
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    l1_diff = abs(u - v)
+    if w is not None:
+        w = _validate_weights(w)
+        l1_diff = w * l1_diff
+    return l1_diff.sum()
+
+
+def mahalanobis(u, v, VI):
+    """
+    Compute the Mahalanobis distance between two 1-D arrays.
+
+    The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
+
+    .. math::
+
+       \\sqrt{ (u-v) V^{-1} (u-v)^T }
+
+    where ``V`` is the covariance matrix.  Note that the argument `VI`
+    is the inverse of ``V``.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    VI : array_like
+        The inverse of the covariance matrix.
+
+    Returns
+    -------
+    mahalanobis : double
+        The Mahalanobis distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
+    >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
+    1.0
+    >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
+    1.0
+    >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
+    1.7320508075688772
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    VI = np.atleast_2d(VI)
+    delta = u - v
+    m = np.dot(np.dot(delta, VI), delta)
+    return np.sqrt(m)
+
+
+def chebyshev(u, v, w=None):
+    """
+    Compute the Chebyshev distance.
+
+    Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
+    which is defined as
+
+    .. math::
+
+       \\max_i {|u_i-v_i|}.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input vector.
+    v : (N,) array_like
+        Input vector.
+    w : (N,) array_like, optional
+        Unused, as 'max' is a weightless operation. Here for API consistency.
+
+    Returns
+    -------
+    chebyshev : double
+        The Chebyshev distance between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.chebyshev([1, 0, 0], [0, 1, 0])
+    1
+    >>> distance.chebyshev([1, 1, 0], [0, 1, 0])
+    1
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is not None:
+        w = _validate_weights(w)
+        has_weight = w > 0
+        if has_weight.sum() < w.size:
+            u = u[has_weight]
+            v = v[has_weight]
+    return max(abs(u - v))
+
+
+def braycurtis(u, v, w=None):
+    """
+    Compute the Bray-Curtis distance between two 1-D arrays.
+
+    Bray-Curtis distance is defined as
+
+    .. math::
+
+       \\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
+
+    The Bray-Curtis distance is in the range [0, 1] if all coordinates are
+    positive, and is undefined if the inputs are of length zero.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    braycurtis : double
+        The Bray-Curtis distance between 1-D arrays `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.braycurtis([1, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.braycurtis([1, 1, 0], [0, 1, 0])
+    0.33333333333333331
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v, dtype=np.float64)
+    l1_diff = abs(u - v)
+    l1_sum = abs(u + v)
+    if w is not None:
+        w = _validate_weights(w)
+        l1_diff = w * l1_diff
+        l1_sum = w * l1_sum
+    return l1_diff.sum() / l1_sum.sum()
+
+
+def canberra(u, v, w=None):
+    """
+    Compute the Canberra distance between two 1-D arrays.
+
+    The Canberra distance is defined as
+
+    .. math::
+
+         d(u,v) = \\sum_i \\frac{|u_i-v_i|}
+                              {|u_i|+|v_i|}.
+
+    Parameters
+    ----------
+    u : (N,) array_like
+        Input array.
+    v : (N,) array_like
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    canberra : double
+        The Canberra distance between vectors `u` and `v`.
+
+    Notes
+    -----
+    When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
+    used in the calculation.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.canberra([1, 0, 0], [0, 1, 0])
+    2.0
+    >>> distance.canberra([1, 1, 0], [0, 1, 0])
+    1.0
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v, dtype=np.float64)
+    if w is not None:
+        w = _validate_weights(w)
+    with np.errstate(invalid='ignore'):
+        abs_uv = abs(u - v)
+        abs_u = abs(u)
+        abs_v = abs(v)
+        d = abs_uv / (abs_u + abs_v)
+        if w is not None:
+            d = w * d
+        d = np.nansum(d)
+    return d
+
+
+def jensenshannon(p, q, base=None, *, axis=0, keepdims=False):
+    """
+    Compute the Jensen-Shannon distance (metric) between
+    two probability arrays. This is the square root
+    of the Jensen-Shannon divergence.
+
+    The Jensen-Shannon distance between two probability
+    vectors `p` and `q` is defined as,
+
+    .. math::
+
+       \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
+
+    where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
+    and :math:`D` is the Kullback-Leibler divergence.
+
+    This routine will normalize `p` and `q` if they don't sum to 1.0.
+
+    Parameters
+    ----------
+    p : (N,) array_like
+        left probability vector
+    q : (N,) array_like
+        right probability vector
+    base : double, optional
+        the base of the logarithm used to compute the output
+        if not given, then the routine uses the default base of
+        scipy.stats.entropy.
+    axis : int, optional
+        Axis along which the Jensen-Shannon distances are computed. The default
+        is 0.
+
+        .. versionadded:: 1.7.0
+    keepdims : bool, optional
+        If this is set to `True`, the reduced axes are left in the
+        result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+        Default is False.
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    js : double or ndarray
+        The Jensen-Shannon distances between `p` and `q` along the `axis`.
+
+    Notes
+    -----
+
+    .. versionadded:: 1.2.0
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> import numpy as np
+    >>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)
+    1.0
+    >>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])
+    0.46450140402245893
+    >>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])
+    0.0
+    >>> a = np.array([[1, 2, 3, 4],
+    ...               [5, 6, 7, 8],
+    ...               [9, 10, 11, 12]])
+    >>> b = np.array([[13, 14, 15, 16],
+    ...               [17, 18, 19, 20],
+    ...               [21, 22, 23, 24]])
+    >>> distance.jensenshannon(a, b, axis=0)
+    array([0.1954288, 0.1447697, 0.1138377, 0.0927636])
+    >>> distance.jensenshannon(a, b, axis=1)
+    array([0.1402339, 0.0399106, 0.0201815])
+
+    """
+    p = np.asarray(p)
+    q = np.asarray(q)
+    p = p / np.sum(p, axis=axis, keepdims=True)
+    q = q / np.sum(q, axis=axis, keepdims=True)
+    m = (p + q) / 2.0
+    left = rel_entr(p, m)
+    right = rel_entr(q, m)
+    left_sum = np.sum(left, axis=axis, keepdims=keepdims)
+    right_sum = np.sum(right, axis=axis, keepdims=keepdims)
+    js = left_sum + right_sum
+    if base is not None:
+        js /= np.log(base)
+    return np.sqrt(js / 2.0)
+
+
+def yule(u, v, w=None):
+    """
+    Compute the Yule dissimilarity between two boolean 1-D arrays.
+
+    The Yule dissimilarity is defined as
+
+    .. math::
+
+         \\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    yule : double
+        The Yule dissimilarity between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.yule([1, 0, 0], [0, 1, 0])
+    2.0
+    >>> distance.yule([1, 1, 0], [0, 1, 0])
+    0.0
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is not None:
+        w = _validate_weights(w)
+    (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
+    half_R = ntf * nft
+    if half_R == 0:
+        return 0.0
+    else:
+        return float(2.0 * half_R / (ntt * nff + half_R))
+
+
+def dice(u, v, w=None):
+    """
+    Compute the Dice dissimilarity between two boolean 1-D arrays.
+
+    The Dice dissimilarity between `u` and `v`, is
+
+    .. math::
+
+         \\frac{c_{TF} + c_{FT}}
+              {2c_{TT} + c_{FT} + c_{TF}}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input 1-D array.
+    v : (N,) array_like, bool
+        Input 1-D array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    dice : double
+        The Dice dissimilarity between 1-D arrays `u` and `v`.
+
+    Notes
+    -----
+    This function computes the Dice dissimilarity index. To compute the
+    Dice similarity index, convert one to the other with similarity =
+    1 - dissimilarity.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.dice([1, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.dice([1, 0, 0], [1, 1, 0])
+    0.3333333333333333
+    >>> distance.dice([1, 0, 0], [2, 0, 0])
+    -0.3333333333333333
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is not None:
+        w = _validate_weights(w)
+    if u.dtype == v.dtype == bool and w is None:
+        ntt = (u & v).sum()
+    else:
+        dtype = np.result_type(int, u.dtype, v.dtype)
+        u = u.astype(dtype)
+        v = v.astype(dtype)
+        if w is None:
+            ntt = (u * v).sum()
+        else:
+            ntt = (u * v * w).sum()
+    (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
+    return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
+
+
+def rogerstanimoto(u, v, w=None):
+    """
+    Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
+
+    The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
+    `u` and `v`, is defined as
+
+    .. math::
+       \\frac{R}
+            {c_{TT} + c_{FF} + R}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    rogerstanimoto : double
+        The Rogers-Tanimoto dissimilarity between vectors
+        `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
+    0.8
+    >>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
+    0.5
+    >>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
+    -1.0
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is not None:
+        w = _validate_weights(w)
+    (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
+    return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
+
+
+def russellrao(u, v, w=None):
+    """
+    Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
+
+    The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
+    `v`, is defined as
+
+    .. math::
+
+      \\frac{n - c_{TT}}
+           {n}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    russellrao : double
+        The Russell-Rao dissimilarity between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.russellrao([1, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.russellrao([1, 0, 0], [1, 1, 0])
+    0.6666666666666666
+    >>> distance.russellrao([1, 0, 0], [2, 0, 0])
+    0.3333333333333333
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if u.dtype == v.dtype == bool and w is None:
+        ntt = (u & v).sum()
+        n = float(len(u))
+    elif w is None:
+        ntt = (u * v).sum()
+        n = float(len(u))
+    else:
+        w = _validate_weights(w)
+        ntt = (u * v * w).sum()
+        n = w.sum()
+    return float(n - ntt) / n
+
+
+def sokalmichener(u, v, w=None):
+    """
+    Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
+
+    The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
+    is defined as
+
+    .. math::
+
+       \\frac{R}
+            {S + R}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
+    :math:`S = c_{FF} + c_{TT}`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    sokalmichener : double
+        The Sokal-Michener dissimilarity between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
+    0.8
+    >>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
+    0.5
+    >>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
+    -1.0
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if w is not None:
+        w = _validate_weights(w)
+    nff, nft, ntf, ntt = _nbool_correspond_all(u, v, w=w)
+    return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
+
+
+def sokalsneath(u, v, w=None):
+    """
+    Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
+
+    The Sokal-Sneath dissimilarity between `u` and `v`,
+
+    .. math::
+
+       \\frac{R}
+            {c_{TT} + R}
+
+    where :math:`c_{ij}` is the number of occurrences of
+    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
+    :math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
+
+    Parameters
+    ----------
+    u : (N,) array_like, bool
+        Input array.
+    v : (N,) array_like, bool
+        Input array.
+    w : (N,) array_like, optional
+        The weights for each value in `u` and `v`. Default is None,
+        which gives each value a weight of 1.0
+
+    Returns
+    -------
+    sokalsneath : double
+        The Sokal-Sneath dissimilarity between vectors `u` and `v`.
+
+    Examples
+    --------
+    >>> from scipy.spatial import distance
+    >>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
+    1.0
+    >>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
+    0.66666666666666663
+    >>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
+    0.0
+    >>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
+    -2.0
+
+    """
+    u = _validate_vector(u)
+    v = _validate_vector(v)
+    if u.dtype == v.dtype == bool and w is None:
+        ntt = (u & v).sum()
+    elif w is None:
+        ntt = (u * v).sum()
+    else:
+        w = _validate_weights(w)
+        ntt = (u * v * w).sum()
+    (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
+    denom = np.array(ntt + 2.0 * (ntf + nft))
+    if not denom.any():
+        raise ValueError('Sokal-Sneath dissimilarity is not defined for '
+                         'vectors that are entirely false.')
+    return float(2.0 * (ntf + nft)) / denom
+
+
+_convert_to_double = partial(_convert_to_type, out_type=np.double)
+_convert_to_bool = partial(_convert_to_type, out_type=bool)
+
+# adding python-only wrappers to _distance_wrap module
+_distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
+_distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
+
+
+@dataclasses.dataclass(frozen=True)
+class CDistMetricWrapper:
+    metric_name: str
+
+    def __call__(self, XA, XB, *, out=None, **kwargs):
+        XA = np.ascontiguousarray(XA)
+        XB = np.ascontiguousarray(XB)
+        mA, n = XA.shape
+        mB, _ = XB.shape
+        metric_name = self.metric_name
+        metric_info = _METRICS[metric_name]
+        XA, XB, typ, kwargs = _validate_cdist_input(
+            XA, XB, mA, mB, n, metric_info, **kwargs)
+
+        w = kwargs.pop('w', None)
+        if w is not None:
+            metric = metric_info.dist_func
+            return _cdist_callable(
+                XA, XB, metric=metric, out=out, w=w, **kwargs)
+
+        dm = _prepare_out_argument(out, np.double, (mA, mB))
+        # get cdist wrapper
+        cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')
+        cdist_fn(XA, XB, dm, **kwargs)
+        return dm
+
+
+@dataclasses.dataclass(frozen=True)
+class CDistWeightedMetricWrapper:
+    metric_name: str
+    weighted_metric: str
+
+    def __call__(self, XA, XB, *, out=None, **kwargs):
+        XA = np.ascontiguousarray(XA)
+        XB = np.ascontiguousarray(XB)
+        mA, n = XA.shape
+        mB, _ = XB.shape
+        metric_name = self.metric_name
+        XA, XB, typ, kwargs = _validate_cdist_input(
+            XA, XB, mA, mB, n, _METRICS[metric_name], **kwargs)
+        dm = _prepare_out_argument(out, np.double, (mA, mB))
+
+        w = kwargs.pop('w', None)
+        if w is not None:
+            metric_name = self.weighted_metric
+            kwargs['w'] = w
+
+        # get cdist wrapper
+        cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')
+        cdist_fn(XA, XB, dm, **kwargs)
+        return dm
+
+
+@dataclasses.dataclass(frozen=True)
+class PDistMetricWrapper:
+    metric_name: str
+
+    def __call__(self, X, *, out=None, **kwargs):
+        X = np.ascontiguousarray(X)
+        m, n = X.shape
+        metric_name = self.metric_name
+        metric_info = _METRICS[metric_name]
+        X, typ, kwargs = _validate_pdist_input(
+            X, m, n, metric_info, **kwargs)
+        out_size = (m * (m - 1)) // 2
+        w = kwargs.pop('w', None)
+        if w is not None:
+            metric = metric_info.dist_func
+            return _pdist_callable(
+                X, metric=metric, out=out, w=w, **kwargs)
+
+        dm = _prepare_out_argument(out, np.double, (out_size,))
+        # get pdist wrapper
+        pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')
+        pdist_fn(X, dm, **kwargs)
+        return dm
+
+
+@dataclasses.dataclass(frozen=True)
+class PDistWeightedMetricWrapper:
+    metric_name: str
+    weighted_metric: str
+
+    def __call__(self, X, *, out=None, **kwargs):
+        X = np.ascontiguousarray(X)
+        m, n = X.shape
+        metric_name = self.metric_name
+        X, typ, kwargs = _validate_pdist_input(
+            X, m, n, _METRICS[metric_name], **kwargs)
+        out_size = (m * (m - 1)) // 2
+        dm = _prepare_out_argument(out, np.double, (out_size,))
+
+        w = kwargs.pop('w', None)
+        if w is not None:
+            metric_name = self.weighted_metric
+            kwargs['w'] = w
+
+        # get pdist wrapper
+        pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')
+        pdist_fn(X, dm, **kwargs)
+        return dm
+
+
+@dataclasses.dataclass(frozen=True)
+class MetricInfo:
+    # Name of python distance function
+    canonical_name: str
+    # All aliases, including canonical_name
+    aka: Set[str]
+    # unvectorized distance function
+    dist_func: Callable
+    # Optimized cdist function
+    cdist_func: Callable
+    # Optimized pdist function
+    pdist_func: Callable
+    # function that checks kwargs and computes default values:
+    # f(X, m, n, **kwargs)
+    validator: Optional[Callable] = None
+    # list of supported types:
+    # X (pdist) and XA (cdist) are used to choose the type. if there is no
+    # match the first type is used. Default double
+    types: List[str] = dataclasses.field(default_factory=lambda: ['double'])
+    # true if out array must be C-contiguous
+    requires_contiguous_out: bool = True
+
+
+# Registry of implemented metrics:
+_METRIC_INFOS = [
+    MetricInfo(
+        canonical_name='braycurtis',
+        aka={'braycurtis'},
+        dist_func=braycurtis,
+        cdist_func=_distance_pybind.cdist_braycurtis,
+        pdist_func=_distance_pybind.pdist_braycurtis,
+    ),
+    MetricInfo(
+        canonical_name='canberra',
+        aka={'canberra'},
+        dist_func=canberra,
+        cdist_func=_distance_pybind.cdist_canberra,
+        pdist_func=_distance_pybind.pdist_canberra,
+    ),
+    MetricInfo(
+        canonical_name='chebyshev',
+        aka={'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'},
+        dist_func=chebyshev,
+        cdist_func=_distance_pybind.cdist_chebyshev,
+        pdist_func=_distance_pybind.pdist_chebyshev,
+    ),
+    MetricInfo(
+        canonical_name='cityblock',
+        aka={'cityblock', 'cblock', 'cb', 'c'},
+        dist_func=cityblock,
+        cdist_func=_distance_pybind.cdist_cityblock,
+        pdist_func=_distance_pybind.pdist_cityblock,
+    ),
+    MetricInfo(
+        canonical_name='correlation',
+        aka={'correlation', 'co'},
+        dist_func=correlation,
+        cdist_func=CDistMetricWrapper('correlation'),
+        pdist_func=PDistMetricWrapper('correlation'),
+    ),
+    MetricInfo(
+        canonical_name='cosine',
+        aka={'cosine', 'cos'},
+        dist_func=cosine,
+        cdist_func=CDistMetricWrapper('cosine'),
+        pdist_func=PDistMetricWrapper('cosine'),
+    ),
+    MetricInfo(
+        canonical_name='dice',
+        aka={'dice'},
+        types=['bool'],
+        dist_func=dice,
+        cdist_func=CDistMetricWrapper('dice'),
+        pdist_func=PDistMetricWrapper('dice'),
+    ),
+    MetricInfo(
+        canonical_name='euclidean',
+        aka={'euclidean', 'euclid', 'eu', 'e'},
+        dist_func=euclidean,
+        cdist_func=_distance_pybind.cdist_euclidean,
+        pdist_func=_distance_pybind.pdist_euclidean,
+    ),
+    MetricInfo(
+        canonical_name='hamming',
+        aka={'matching', 'hamming', 'hamm', 'ha', 'h'},
+        types=['double', 'bool'],
+        validator=_validate_hamming_kwargs,
+        dist_func=hamming,
+        cdist_func=CDistWeightedMetricWrapper('hamming', 'hamming'),
+        pdist_func=PDistWeightedMetricWrapper('hamming', 'hamming'),
+    ),
+    MetricInfo(
+        canonical_name='jaccard',
+        aka={'jaccard', 'jacc', 'ja', 'j'},
+        types=['double', 'bool'],
+        dist_func=jaccard,
+        cdist_func=CDistMetricWrapper('jaccard'),
+        pdist_func=PDistMetricWrapper('jaccard'),
+    ),
+    MetricInfo(
+        canonical_name='jensenshannon',
+        aka={'jensenshannon', 'js'},
+        dist_func=jensenshannon,
+        cdist_func=CDistMetricWrapper('jensenshannon'),
+        pdist_func=PDistMetricWrapper('jensenshannon'),
+    ),
+    MetricInfo(
+        canonical_name='kulsinski',
+        aka={'kulsinski'},
+        types=['bool'],
+        dist_func=kulsinski,
+        cdist_func=CDistMetricWrapper('kulsinski'),
+        pdist_func=PDistMetricWrapper('kulsinski'),
+    ),
+    MetricInfo(
+        canonical_name='kulczynski1',
+        aka={'kulczynski1'},
+        types=['bool'],
+        dist_func=kulczynski1,
+        cdist_func=CDistMetricWrapper('kulczynski1'),
+        pdist_func=PDistMetricWrapper('kulczynski1'),
+    ),
+    MetricInfo(
+        canonical_name='mahalanobis',
+        aka={'mahalanobis', 'mahal', 'mah'},
+        validator=_validate_mahalanobis_kwargs,
+        dist_func=mahalanobis,
+        cdist_func=CDistMetricWrapper('mahalanobis'),
+        pdist_func=PDistMetricWrapper('mahalanobis'),
+    ),
+    MetricInfo(
+        canonical_name='minkowski',
+        aka={'minkowski', 'mi', 'm', 'pnorm'},
+        validator=_validate_minkowski_kwargs,
+        dist_func=minkowski,
+        cdist_func=_distance_pybind.cdist_minkowski,
+        pdist_func=_distance_pybind.pdist_minkowski,
+    ),
+    MetricInfo(
+        canonical_name='rogerstanimoto',
+        aka={'rogerstanimoto'},
+        types=['bool'],
+        dist_func=rogerstanimoto,
+        cdist_func=CDistMetricWrapper('rogerstanimoto'),
+        pdist_func=PDistMetricWrapper('rogerstanimoto'),
+    ),
+    MetricInfo(
+        canonical_name='russellrao',
+        aka={'russellrao'},
+        types=['bool'],
+        dist_func=russellrao,
+        cdist_func=CDistMetricWrapper('russellrao'),
+        pdist_func=PDistMetricWrapper('russellrao'),
+    ),
+    MetricInfo(
+        canonical_name='seuclidean',
+        aka={'seuclidean', 'se', 's'},
+        validator=_validate_seuclidean_kwargs,
+        dist_func=seuclidean,
+        cdist_func=CDistMetricWrapper('seuclidean'),
+        pdist_func=PDistMetricWrapper('seuclidean'),
+    ),
+    MetricInfo(
+        canonical_name='sokalmichener',
+        aka={'sokalmichener'},
+        types=['bool'],
+        dist_func=sokalmichener,
+        cdist_func=CDistMetricWrapper('sokalmichener'),
+        pdist_func=PDistMetricWrapper('sokalmichener'),
+    ),
+    MetricInfo(
+        canonical_name='sokalsneath',
+        aka={'sokalsneath'},
+        types=['bool'],
+        dist_func=sokalsneath,
+        cdist_func=CDistMetricWrapper('sokalsneath'),
+        pdist_func=PDistMetricWrapper('sokalsneath'),
+    ),
+    MetricInfo(
+        canonical_name='sqeuclidean',
+        aka={'sqeuclidean', 'sqe', 'sqeuclid'},
+        dist_func=sqeuclidean,
+        cdist_func=_distance_pybind.cdist_sqeuclidean,
+        pdist_func=_distance_pybind.pdist_sqeuclidean,
+    ),
+    MetricInfo(
+        canonical_name='yule',
+        aka={'yule'},
+        types=['bool'],
+        dist_func=yule,
+        cdist_func=CDistMetricWrapper('yule'),
+        pdist_func=PDistMetricWrapper('yule'),
+    ),
+]
+
+_METRICS = {info.canonical_name: info for info in _METRIC_INFOS}
+_METRIC_ALIAS = dict((alias, info)
+                     for info in _METRIC_INFOS
+                     for alias in info.aka)
+
+_METRICS_NAMES = list(_METRICS.keys())
+
+_TEST_METRICS = {'test_' + info.canonical_name: info for info in _METRIC_INFOS}
+
+
+def pdist(X, metric='euclidean', *, out=None, **kwargs):
+    """
+    Pairwise distances between observations in n-dimensional space.
+
+    See Notes for common calling conventions.
+
+    Parameters
+    ----------
+    X : array_like
+        An m by n array of m original observations in an
+        n-dimensional space.
+    metric : str or function, optional
+        The distance metric to use. The distance function can
+        be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
+        'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
+        'jaccard', 'jensenshannon', 'kulczynski1',
+        'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
+        'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
+        'sqeuclidean', 'yule'.
+    **kwargs : dict, optional
+        Extra arguments to `metric`: refer to each metric documentation for a
+        list of all possible arguments.
+
+        Some possible arguments:
+
+        p : scalar
+        The p-norm to apply for Minkowski, weighted and unweighted.
+        Default: 2.
+
+        w : ndarray
+        The weight vector for metrics that support weights (e.g., Minkowski).
+
+        V : ndarray
+        The variance vector for standardized Euclidean.
+        Default: var(X, axis=0, ddof=1)
+
+        VI : ndarray
+        The inverse of the covariance matrix for Mahalanobis.
+        Default: inv(cov(X.T)).T
+
+        out : ndarray.
+        The output array
+        If not None, condensed distance matrix Y is stored in this array.
+
+    Returns
+    -------
+    Y : ndarray
+        Returns a condensed distance matrix Y. For each :math:`i` and :math:`j`
+        (where :math:`i 0` (note
+       that this is only a quasi-metric if :math:`0 < p < 1`).
+
+    3. ``Y = pdist(X, 'cityblock')``
+
+       Computes the city block or Manhattan distance between the
+       points.
+
+    4. ``Y = pdist(X, 'seuclidean', V=None)``
+
+       Computes the standardized Euclidean distance. The standardized
+       Euclidean distance between two n-vectors ``u`` and ``v`` is
+
+       .. math::
+
+          \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
+
+
+       V is the variance vector; V[i] is the variance computed over all
+       the i'th components of the points.  If not passed, it is
+       automatically computed.
+
+    5. ``Y = pdist(X, 'sqeuclidean')``
+
+       Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
+       the vectors.
+
+    6. ``Y = pdist(X, 'cosine')``
+
+       Computes the cosine distance between vectors u and v,
+
+       .. math::
+
+          1 - \\frac{u \\cdot v}
+                   {{\\|u\\|}_2 {\\|v\\|}_2}
+
+       where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
+       :math:`u \\cdot v` is the dot product of ``u`` and ``v``.
+
+    7. ``Y = pdist(X, 'correlation')``
+
+       Computes the correlation distance between vectors u and v. This is
+
+       .. math::
+
+          1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
+                   {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
+
+       where :math:`\\bar{v}` is the mean of the elements of vector v,
+       and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
+
+    8. ``Y = pdist(X, 'hamming')``
+
+       Computes the normalized Hamming distance, or the proportion of
+       those vector elements between two n-vectors ``u`` and ``v``
+       which disagree. To save memory, the matrix ``X`` can be of type
+       boolean.
+
+    9. ``Y = pdist(X, 'jaccard')``
+
+       Computes the Jaccard distance between the points. Given two
+       vectors, ``u`` and ``v``, the Jaccard distance is the
+       proportion of those elements ``u[i]`` and ``v[i]`` that
+       disagree.
+
+    10. ``Y = pdist(X, 'jensenshannon')``
+
+        Computes the Jensen-Shannon distance between two probability arrays.
+        Given two probability vectors, :math:`p` and :math:`q`, the
+        Jensen-Shannon distance is
+
+        .. math::
+
+           \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
+
+        where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
+        and :math:`D` is the Kullback-Leibler divergence.
+
+    11. ``Y = pdist(X, 'chebyshev')``
+
+        Computes the Chebyshev distance between the points. The
+        Chebyshev distance between two n-vectors ``u`` and ``v`` is the
+        maximum norm-1 distance between their respective elements. More
+        precisely, the distance is given by
+
+        .. math::
+
+           d(u,v) = \\max_i {|u_i-v_i|}
+
+    12. ``Y = pdist(X, 'canberra')``
+
+        Computes the Canberra distance between the points. The
+        Canberra distance between two points ``u`` and ``v`` is
+
+        .. math::
+
+          d(u,v) = \\sum_i \\frac{|u_i-v_i|}
+                               {|u_i|+|v_i|}
+
+
+    13. ``Y = pdist(X, 'braycurtis')``
+
+        Computes the Bray-Curtis distance between the points. The
+        Bray-Curtis distance between two points ``u`` and ``v`` is
+
+
+        .. math::
+
+             d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
+                            {\\sum_i {|u_i+v_i|}}
+
+    14. ``Y = pdist(X, 'mahalanobis', VI=None)``
+
+        Computes the Mahalanobis distance between the points. The
+        Mahalanobis distance between two points ``u`` and ``v`` is
+        :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
+        variable) is the inverse covariance. If ``VI`` is not None,
+        ``VI`` will be used as the inverse covariance matrix.
+
+    15. ``Y = pdist(X, 'yule')``
+
+        Computes the Yule distance between each pair of boolean
+        vectors. (see yule function documentation)
+
+    16. ``Y = pdist(X, 'matching')``
+
+        Synonym for 'hamming'.
+
+    17. ``Y = pdist(X, 'dice')``
+
+        Computes the Dice distance between each pair of boolean
+        vectors. (see dice function documentation)
+
+    18. ``Y = pdist(X, 'kulczynski1')``
+
+        Computes the kulczynski1 distance between each pair of
+        boolean vectors. (see kulczynski1 function documentation)
+
+    19. ``Y = pdist(X, 'rogerstanimoto')``
+
+        Computes the Rogers-Tanimoto distance between each pair of
+        boolean vectors. (see rogerstanimoto function documentation)
+
+    20. ``Y = pdist(X, 'russellrao')``
+
+        Computes the Russell-Rao distance between each pair of
+        boolean vectors. (see russellrao function documentation)
+
+    21. ``Y = pdist(X, 'sokalmichener')``
+
+        Computes the Sokal-Michener distance between each pair of
+        boolean vectors. (see sokalmichener function documentation)
+
+    22. ``Y = pdist(X, 'sokalsneath')``
+
+        Computes the Sokal-Sneath distance between each pair of
+        boolean vectors. (see sokalsneath function documentation)
+
+    23. ``Y = pdist(X, 'kulczynski1')``
+
+        Computes the Kulczynski 1 distance between each pair of
+        boolean vectors. (see kulczynski1 function documentation)
+
+    24. ``Y = pdist(X, f)``
+
+        Computes the distance between all pairs of vectors in X
+        using the user supplied 2-arity function f. For example,
+        Euclidean distance between the vectors could be computed
+        as follows::
+
+          dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
+
+        Note that you should avoid passing a reference to one of
+        the distance functions defined in this library. For example,::
+
+          dm = pdist(X, sokalsneath)
+
+        would calculate the pair-wise distances between the vectors in
+        X using the Python function sokalsneath. This would result in
+        sokalsneath being called :math:`{n \\choose 2}` times, which
+        is inefficient. Instead, the optimized C version is more
+        efficient, and we call it using the following syntax.::
+
+          dm = pdist(X, 'sokalsneath')
+
+    """
+    # You can also call this as:
+    #     Y = pdist(X, 'test_abc')
+    # where 'abc' is the metric being tested.  This computes the distance
+    # between all pairs of vectors in X using the distance metric 'abc' but
+    # with a more succinct, verifiable, but less efficient implementation.
+
+    X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
+                           check_finite=False)
+
+    s = X.shape
+    if len(s) != 2:
+        raise ValueError('A 2-dimensional array must be passed.')
+
+    m, n = s
+
+    if callable(metric):
+        mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
+        metric_info = _METRIC_ALIAS.get(mstr, None)
+
+        if metric_info is not None:
+            X, typ, kwargs = _validate_pdist_input(
+                X, m, n, metric_info, **kwargs)
+
+        return _pdist_callable(X, metric=metric, out=out, **kwargs)
+    elif isinstance(metric, str):
+        mstr = metric.lower()
+        metric_info = _METRIC_ALIAS.get(mstr, None)
+
+        if metric_info is not None:
+            pdist_fn = metric_info.pdist_func
+            return pdist_fn(X, out=out, **kwargs)
+        elif mstr.startswith("test_"):
+            metric_info = _TEST_METRICS.get(mstr, None)
+            if metric_info is None:
+                raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
+            X, typ, kwargs = _validate_pdist_input(
+                X, m, n, metric_info, **kwargs)
+            return _pdist_callable(
+                X, metric=metric_info.dist_func, out=out, **kwargs)
+        else:
+            raise ValueError('Unknown Distance Metric: %s' % mstr)
+    else:
+        raise TypeError('2nd argument metric must be a string identifier '
+                        'or a function.')
+
+
+def squareform(X, force="no", checks=True):
+    """
+    Convert a vector-form distance vector to a square-form distance
+    matrix, and vice-versa.
+
+    Parameters
+    ----------
+    X : array_like
+        Either a condensed or redundant distance matrix.
+    force : str, optional
+        As with MATLAB(TM), if force is equal to ``'tovector'`` or
+        ``'tomatrix'``, the input will be treated as a distance matrix or
+        distance vector respectively.
+    checks : bool, optional
+        If set to False, no checks will be made for matrix
+        symmetry nor zero diagonals. This is useful if it is known that
+        ``X - X.T1`` is small and ``diag(X)`` is close to zero.
+        These values are ignored any way so they do not disrupt the
+        squareform transformation.
+
+    Returns
+    -------
+    Y : ndarray
+        If a condensed distance matrix is passed, a redundant one is
+        returned, or if a redundant one is passed, a condensed distance
+        matrix is returned.
+
+    Notes
+    -----
+    1. ``v = squareform(X)``
+
+       Given a square n-by-n symmetric distance matrix ``X``,
+       ``v = squareform(X)`` returns a ``n * (n-1) / 2``
+       (i.e. binomial coefficient n choose 2) sized vector `v`
+       where :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
+       is the distance between distinct points ``i`` and ``j``.
+       If ``X`` is non-square or asymmetric, an error is raised.
+
+    2. ``X = squareform(v)``
+
+       Given a ``n * (n-1) / 2`` sized vector ``v``
+       for some integer ``n >= 1`` encoding distances as described,
+       ``X = squareform(v)`` returns a n-by-n distance matrix ``X``.
+       The ``X[i, j]`` and ``X[j, i]`` values are set to
+       :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]`
+       and all diagonal elements are zero.
+
+    In SciPy 0.19.0, ``squareform`` stopped casting all input types to
+    float64, and started returning arrays of the same dtype as the input.
+
+    """
+
+    X = np.ascontiguousarray(X)
+
+    s = X.shape
+
+    if force.lower() == 'tomatrix':
+        if len(s) != 1:
+            raise ValueError("Forcing 'tomatrix' but input X is not a "
+                             "distance vector.")
+    elif force.lower() == 'tovector':
+        if len(s) != 2:
+            raise ValueError("Forcing 'tovector' but input X is not a "
+                             "distance matrix.")
+
+    # X = squareform(v)
+    if len(s) == 1:
+        if s[0] == 0:
+            return np.zeros((1, 1), dtype=X.dtype)
+
+        # Grab the closest value to the square root of the number
+        # of elements times 2 to see if the number of elements
+        # is indeed a binomial coefficient.
+        d = int(np.ceil(np.sqrt(s[0] * 2)))
+
+        # Check that v is of valid dimensions.
+        if d * (d - 1) != s[0] * 2:
+            raise ValueError('Incompatible vector size. It must be a binomial '
+                             'coefficient n choose 2 for some integer n >= 2.')
+
+        # Allocate memory for the distance matrix.
+        M = np.zeros((d, d), dtype=X.dtype)
+
+        # Since the C code does not support striding using strides.
+        # The dimensions are used instead.
+        X = _copy_array_if_base_present(X)
+
+        # Fill in the values of the distance matrix.
+        _distance_wrap.to_squareform_from_vector_wrap(M, X)
+
+        # Return the distance matrix.
+        return M
+    elif len(s) == 2:
+        if s[0] != s[1]:
+            raise ValueError('The matrix argument must be square.')
+        if checks:
+            is_valid_dm(X, throw=True, name='X')
+
+        # One-side of the dimensions is set here.
+        d = s[0]
+
+        if d <= 1:
+            return np.array([], dtype=X.dtype)
+
+        # Create a vector.
+        v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
+
+        # Since the C code does not support striding using strides.
+        # The dimensions are used instead.
+        X = _copy_array_if_base_present(X)
+
+        # Convert the vector to squareform.
+        _distance_wrap.to_vector_from_squareform_wrap(X, v)
+        return v
+    else:
+        raise ValueError(('The first argument must be one or two dimensional '
+                          'array. A %d-dimensional array is not '
+                          'permitted') % len(s))
+
+
+def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
+    """
+    Return True if input array is a valid distance matrix.
+
+    Distance matrices must be 2-dimensional numpy arrays.
+    They must have a zero-diagonal, and they must be symmetric.
+
+    Parameters
+    ----------
+    D : array_like
+        The candidate object to test for validity.
+    tol : float, optional
+        The distance matrix should be symmetric. `tol` is the maximum
+        difference between entries ``ij`` and ``ji`` for the distance
+        metric to be considered symmetric.
+    throw : bool, optional
+        An exception is thrown if the distance matrix passed is not valid.
+    name : str, optional
+        The name of the variable to checked. This is useful if
+        throw is set to True so the offending variable can be identified
+        in the exception message when an exception is thrown.
+    warning : bool, optional
+        Instead of throwing an exception, a warning message is
+        raised.
+
+    Returns
+    -------
+    valid : bool
+        True if the variable `D` passed is a valid distance matrix.
+
+    Notes
+    -----
+    Small numerical differences in `D` and `D.T` and non-zeroness of
+    the diagonal are ignored if they are within the tolerance specified
+    by `tol`.
+
+    """
+    D = np.asarray(D, order='c')
+    valid = True
+    try:
+        s = D.shape
+        if len(D.shape) != 2:
+            if name:
+                raise ValueError(('Distance matrix \'%s\' must have shape=2 '
+                                  '(i.e. be two-dimensional).') % name)
+            else:
+                raise ValueError('Distance matrix must have shape=2 (i.e. '
+                                 'be two-dimensional).')
+        if tol == 0.0:
+            if not (D == D.T).all():
+                if name:
+                    raise ValueError(('Distance matrix \'%s\' must be '
+                                     'symmetric.') % name)
+                else:
+                    raise ValueError('Distance matrix must be symmetric.')
+            if not (D[range(0, s[0]), range(0, s[0])] == 0).all():
+                if name:
+                    raise ValueError(('Distance matrix \'%s\' diagonal must '
+                                      'be zero.') % name)
+                else:
+                    raise ValueError('Distance matrix diagonal must be zero.')
+        else:
+            if not (D - D.T <= tol).all():
+                if name:
+                    raise ValueError(('Distance matrix \'%s\' must be '
+                                      'symmetric within tolerance %5.5f.')
+                                     % (name, tol))
+                else:
+                    raise ValueError('Distance matrix must be symmetric within'
+                                     ' tolerance %5.5f.' % tol)
+            if not (D[range(0, s[0]), range(0, s[0])] <= tol).all():
+                if name:
+                    raise ValueError(('Distance matrix \'%s\' diagonal must be'
+                                      ' close to zero within tolerance %5.5f.')
+                                     % (name, tol))
+                else:
+                    raise ValueError(('Distance matrix \'%s\' diagonal must be'
+                                      ' close to zero within tolerance %5.5f.')
+                                     % tol)
+    except Exception as e:
+        if throw:
+            raise
+        if warning:
+            warnings.warn(str(e))
+        valid = False
+    return valid
+
+
+def is_valid_y(y, warning=False, throw=False, name=None):
+    """
+    Return True if the input array is a valid condensed distance matrix.
+
+    Condensed distance matrices must be 1-dimensional numpy arrays.
+    Their length must be a binomial coefficient :math:`{n \\choose 2}`
+    for some positive integer n.
+
+    Parameters
+    ----------
+    y : array_like
+        The condensed distance matrix.
+    warning : bool, optional
+        Invokes a warning if the variable passed is not a valid
+        condensed distance matrix. The warning message explains why
+        the distance matrix is not valid.  `name` is used when
+        referencing the offending variable.
+    throw : bool, optional
+        Throws an exception if the variable passed is not a valid
+        condensed distance matrix.
+    name : bool, optional
+        Used when referencing the offending variable in the
+        warning or exception message.
+
+    """
+    y = np.asarray(y, order='c')
+    valid = True
+    try:
+        if len(y.shape) != 1:
+            if name:
+                raise ValueError(('Condensed distance matrix \'%s\' must '
+                                  'have shape=1 (i.e. be one-dimensional).')
+                                 % name)
+            else:
+                raise ValueError('Condensed distance matrix must have shape=1 '
+                                 '(i.e. be one-dimensional).')
+        n = y.shape[0]
+        d = int(np.ceil(np.sqrt(n * 2)))
+        if (d * (d - 1) / 2) != n:
+            if name:
+                raise ValueError(('Length n of condensed distance matrix '
+                                  '\'%s\' must be a binomial coefficient, i.e.'
+                                  'there must be a k such that '
+                                  '(k \\choose 2)=n)!') % name)
+            else:
+                raise ValueError('Length n of condensed distance matrix must '
+                                 'be a binomial coefficient, i.e. there must '
+                                 'be a k such that (k \\choose 2)=n)!')
+    except Exception as e:
+        if throw:
+            raise
+        if warning:
+            warnings.warn(str(e))
+        valid = False
+    return valid
+
+
+def num_obs_dm(d):
+    """
+    Return the number of original observations that correspond to a
+    square, redundant distance matrix.
+
+    Parameters
+    ----------
+    d : array_like
+        The target distance matrix.
+
+    Returns
+    -------
+    num_obs_dm : int
+        The number of observations in the redundant distance matrix.
+
+    """
+    d = np.asarray(d, order='c')
+    is_valid_dm(d, tol=np.inf, throw=True, name='d')
+    return d.shape[0]
+
+
+def num_obs_y(Y):
+    """
+    Return the number of original observations that correspond to a
+    condensed distance matrix.
+
+    Parameters
+    ----------
+    Y : array_like
+        Condensed distance matrix.
+
+    Returns
+    -------
+    n : int
+        The number of observations in the condensed distance matrix `Y`.
+
+    """
+    Y = np.asarray(Y, order='c')
+    is_valid_y(Y, throw=True, name='Y')
+    k = Y.shape[0]
+    if k == 0:
+        raise ValueError("The number of observations cannot be determined on "
+                         "an empty distance matrix.")
+    d = int(np.ceil(np.sqrt(k * 2)))
+    if (d * (d - 1) / 2) != k:
+        raise ValueError("Invalid condensed distance matrix passed. Must be "
+                         "some k where k=(n choose 2) for some n >= 2.")
+    return d
+
+
+def _prepare_out_argument(out, dtype, expected_shape):
+    if out is None:
+        return np.empty(expected_shape, dtype=dtype)
+
+    if out.shape != expected_shape:
+        raise ValueError("Output array has incorrect shape.")
+    if not out.flags.c_contiguous:
+        raise ValueError("Output array must be C-contiguous.")
+    if out.dtype != np.double:
+        raise ValueError("Output array must be double type.")
+    return out
+
+
+def _pdist_callable(X, *, out, metric, **kwargs):
+    n = X.shape[0]
+    out_size = (n * (n - 1)) // 2
+    dm = _prepare_out_argument(out, np.double, (out_size,))
+    k = 0
+    for i in range(X.shape[0] - 1):
+        for j in range(i + 1, X.shape[0]):
+            dm[k] = metric(X[i], X[j], **kwargs)
+            k += 1
+    return dm
+
+
+def _cdist_callable(XA, XB, *, out, metric, **kwargs):
+    mA = XA.shape[0]
+    mB = XB.shape[0]
+    dm = _prepare_out_argument(out, np.double, (mA, mB))
+    for i in range(mA):
+        for j in range(mB):
+            dm[i, j] = metric(XA[i], XB[j], **kwargs)
+    return dm
+
+
+def cdist(XA, XB, metric='euclidean', *, out=None, **kwargs):
+    """
+    Compute distance between each pair of the two collections of inputs.
+
+    See Notes for common calling conventions.
+
+    Parameters
+    ----------
+    XA : array_like
+        An :math:`m_A` by :math:`n` array of :math:`m_A`
+        original observations in an :math:`n`-dimensional space.
+        Inputs are converted to float type.
+    XB : array_like
+        An :math:`m_B` by :math:`n` array of :math:`m_B`
+        original observations in an :math:`n`-dimensional space.
+        Inputs are converted to float type.
+    metric : str or callable, optional
+        The distance metric to use. If a string, the distance function can be
+        'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
+        'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',
+        'kulczynski1', 'mahalanobis', 'matching', 'minkowski',
+        'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener',
+        'sokalsneath', 'sqeuclidean', 'yule'.
+    **kwargs : dict, optional
+        Extra arguments to `metric`: refer to each metric documentation for a
+        list of all possible arguments.
+
+        Some possible arguments:
+
+        p : scalar
+        The p-norm to apply for Minkowski, weighted and unweighted.
+        Default: 2.
+
+        w : array_like
+        The weight vector for metrics that support weights (e.g., Minkowski).
+
+        V : array_like
+        The variance vector for standardized Euclidean.
+        Default: var(vstack([XA, XB]), axis=0, ddof=1)
+
+        VI : array_like
+        The inverse of the covariance matrix for Mahalanobis.
+        Default: inv(cov(vstack([XA, XB].T))).T
+
+        out : ndarray
+        The output array
+        If not None, the distance matrix Y is stored in this array.
+
+    Returns
+    -------
+    Y : ndarray
+        A :math:`m_A` by :math:`m_B` distance matrix is returned.
+        For each :math:`i` and :math:`j`, the metric
+        ``dist(u=XA[i], v=XB[j])`` is computed and stored in the
+        :math:`ij` th entry.
+
+    Raises
+    ------
+    ValueError
+        An exception is thrown if `XA` and `XB` do not have
+        the same number of columns.
+
+    Notes
+    -----
+    The following are common calling conventions:
+
+    1. ``Y = cdist(XA, XB, 'euclidean')``
+
+       Computes the distance between :math:`m` points using
+       Euclidean distance (2-norm) as the distance metric between the
+       points. The points are arranged as :math:`m`
+       :math:`n`-dimensional row vectors in the matrix X.
+
+    2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
+
+       Computes the distances using the Minkowski distance
+       :math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note
+       that this is only a quasi-metric if :math:`0 < p < 1`).
+
+    3. ``Y = cdist(XA, XB, 'cityblock')``
+
+       Computes the city block or Manhattan distance between the
+       points.
+
+    4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
+
+       Computes the standardized Euclidean distance. The standardized
+       Euclidean distance between two n-vectors ``u`` and ``v`` is
+
+       .. math::
+
+          \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
+
+       V is the variance vector; V[i] is the variance computed over all
+       the i'th components of the points. If not passed, it is
+       automatically computed.
+
+    5. ``Y = cdist(XA, XB, 'sqeuclidean')``
+
+       Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between
+       the vectors.
+
+    6. ``Y = cdist(XA, XB, 'cosine')``
+
+       Computes the cosine distance between vectors u and v,
+
+       .. math::
+
+          1 - \\frac{u \\cdot v}
+                   {{\\|u\\|}_2 {\\|v\\|}_2}
+
+       where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and
+       :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
+
+    7. ``Y = cdist(XA, XB, 'correlation')``
+
+       Computes the correlation distance between vectors u and v. This is
+
+       .. math::
+
+          1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
+                   {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2}
+
+       where :math:`\\bar{v}` is the mean of the elements of vector v,
+       and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
+
+
+    8. ``Y = cdist(XA, XB, 'hamming')``
+
+       Computes the normalized Hamming distance, or the proportion of
+       those vector elements between two n-vectors ``u`` and ``v``
+       which disagree. To save memory, the matrix ``X`` can be of type
+       boolean.
+
+    9. ``Y = cdist(XA, XB, 'jaccard')``
+
+       Computes the Jaccard distance between the points. Given two
+       vectors, ``u`` and ``v``, the Jaccard distance is the
+       proportion of those elements ``u[i]`` and ``v[i]`` that
+       disagree where at least one of them is non-zero.
+
+    10. ``Y = cdist(XA, XB, 'jensenshannon')``
+
+        Computes the Jensen-Shannon distance between two probability arrays.
+        Given two probability vectors, :math:`p` and :math:`q`, the
+        Jensen-Shannon distance is
+
+        .. math::
+
+           \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
+
+        where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
+        and :math:`D` is the Kullback-Leibler divergence.
+
+    11. ``Y = cdist(XA, XB, 'chebyshev')``
+
+        Computes the Chebyshev distance between the points. The
+        Chebyshev distance between two n-vectors ``u`` and ``v`` is the
+        maximum norm-1 distance between their respective elements. More
+        precisely, the distance is given by
+
+        .. math::
+
+           d(u,v) = \\max_i {|u_i-v_i|}.
+
+    12. ``Y = cdist(XA, XB, 'canberra')``
+
+        Computes the Canberra distance between the points. The
+        Canberra distance between two points ``u`` and ``v`` is
+
+        .. math::
+
+          d(u,v) = \\sum_i \\frac{|u_i-v_i|}
+                               {|u_i|+|v_i|}.
+
+    13. ``Y = cdist(XA, XB, 'braycurtis')``
+
+        Computes the Bray-Curtis distance between the points. The
+        Bray-Curtis distance between two points ``u`` and ``v`` is
+
+
+        .. math::
+
+             d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
+                           {\\sum_i (|u_i+v_i|)}
+
+    14. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
+
+        Computes the Mahalanobis distance between the points. The
+        Mahalanobis distance between two points ``u`` and ``v`` is
+        :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
+        variable) is the inverse covariance. If ``VI`` is not None,
+        ``VI`` will be used as the inverse covariance matrix.
+
+    15. ``Y = cdist(XA, XB, 'yule')``
+
+        Computes the Yule distance between the boolean
+        vectors. (see `yule` function documentation)
+
+    16. ``Y = cdist(XA, XB, 'matching')``
+
+        Synonym for 'hamming'.
+
+    17. ``Y = cdist(XA, XB, 'dice')``
+
+        Computes the Dice distance between the boolean vectors. (see
+        `dice` function documentation)
+
+    18. ``Y = cdist(XA, XB, 'kulczynski1')``
+
+        Computes the kulczynski distance between the boolean
+        vectors. (see `kulczynski1` function documentation)
+
+    19. ``Y = cdist(XA, XB, 'rogerstanimoto')``
+
+        Computes the Rogers-Tanimoto distance between the boolean
+        vectors. (see `rogerstanimoto` function documentation)
+
+    20. ``Y = cdist(XA, XB, 'russellrao')``
+
+        Computes the Russell-Rao distance between the boolean
+        vectors. (see `russellrao` function documentation)
+
+    21. ``Y = cdist(XA, XB, 'sokalmichener')``
+
+        Computes the Sokal-Michener distance between the boolean
+        vectors. (see `sokalmichener` function documentation)
+
+    22. ``Y = cdist(XA, XB, 'sokalsneath')``
+
+        Computes the Sokal-Sneath distance between the vectors. (see
+        `sokalsneath` function documentation)
+
+    23. ``Y = cdist(XA, XB, f)``
+
+        Computes the distance between all pairs of vectors in X
+        using the user supplied 2-arity function f. For example,
+        Euclidean distance between the vectors could be computed
+        as follows::
+
+          dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
+
+        Note that you should avoid passing a reference to one of
+        the distance functions defined in this library. For example,::
+
+          dm = cdist(XA, XB, sokalsneath)
+
+        would calculate the pair-wise distances between the vectors in
+        X using the Python function `sokalsneath`. This would result in
+        sokalsneath being called :math:`{n \\choose 2}` times, which
+        is inefficient. Instead, the optimized C version is more
+        efficient, and we call it using the following syntax::
+
+          dm = cdist(XA, XB, 'sokalsneath')
+
+    Examples
+    --------
+    Find the Euclidean distances between four 2-D coordinates:
+
+    >>> from scipy.spatial import distance
+    >>> import numpy as np
+    >>> coords = [(35.0456, -85.2672),
+    ...           (35.1174, -89.9711),
+    ...           (35.9728, -83.9422),
+    ...           (36.1667, -86.7833)]
+    >>> distance.cdist(coords, coords, 'euclidean')
+    array([[ 0.    ,  4.7044,  1.6172,  1.8856],
+           [ 4.7044,  0.    ,  6.0893,  3.3561],
+           [ 1.6172,  6.0893,  0.    ,  2.8477],
+           [ 1.8856,  3.3561,  2.8477,  0.    ]])
+
+
+    Find the Manhattan distance from a 3-D point to the corners of the unit
+    cube:
+
+    >>> a = np.array([[0, 0, 0],
+    ...               [0, 0, 1],
+    ...               [0, 1, 0],
+    ...               [0, 1, 1],
+    ...               [1, 0, 0],
+    ...               [1, 0, 1],
+    ...               [1, 1, 0],
+    ...               [1, 1, 1]])
+    >>> b = np.array([[ 0.1,  0.2,  0.4]])
+    >>> distance.cdist(a, b, 'cityblock')
+    array([[ 0.7],
+           [ 0.9],
+           [ 1.3],
+           [ 1.5],
+           [ 1.5],
+           [ 1.7],
+           [ 2.1],
+           [ 2.3]])
+
+    """
+    # You can also call this as:
+    #     Y = cdist(XA, XB, 'test_abc')
+    # where 'abc' is the metric being tested.  This computes the distance
+    # between all pairs of vectors in XA and XB using the distance metric 'abc'
+    # but with a more succinct, verifiable, but less efficient implementation.
+
+    XA = np.asarray(XA)
+    XB = np.asarray(XB)
+
+    s = XA.shape
+    sB = XB.shape
+
+    if len(s) != 2:
+        raise ValueError('XA must be a 2-dimensional array.')
+    if len(sB) != 2:
+        raise ValueError('XB must be a 2-dimensional array.')
+    if s[1] != sB[1]:
+        raise ValueError('XA and XB must have the same number of columns '
+                         '(i.e. feature dimension.)')
+
+    mA = s[0]
+    mB = sB[0]
+    n = s[1]
+
+    if callable(metric):
+        mstr = getattr(metric, '__name__', 'Unknown')
+        metric_info = _METRIC_ALIAS.get(mstr, None)
+        if metric_info is not None:
+            XA, XB, typ, kwargs = _validate_cdist_input(
+                XA, XB, mA, mB, n, metric_info, **kwargs)
+        return _cdist_callable(XA, XB, metric=metric, out=out, **kwargs)
+    elif isinstance(metric, str):
+        mstr = metric.lower()
+        metric_info = _METRIC_ALIAS.get(mstr, None)
+        if metric_info is not None:
+            cdist_fn = metric_info.cdist_func
+            return cdist_fn(XA, XB, out=out, **kwargs)
+        elif mstr.startswith("test_"):
+            metric_info = _TEST_METRICS.get(mstr, None)
+            if metric_info is None:
+                raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}')
+            XA, XB, typ, kwargs = _validate_cdist_input(
+                XA, XB, mA, mB, n, metric_info, **kwargs)
+            return _cdist_callable(
+                XA, XB, metric=metric_info.dist_func, out=out, **kwargs)
+        else:
+            raise ValueError('Unknown Distance Metric: %s' % mstr)
+    else:
+        raise TypeError('2nd argument metric must be a string identifier '
+                        'or a function.')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/distance.pyi b/__packaged__/coreml/.python_dependencies/scipy/spatial/distance.pyi
new file mode 100644
index 00000000..dee0cfb8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/distance.pyi
@@ -0,0 +1,216 @@
+import sys
+from typing import (overload, Optional, Any, Union, Tuple, SupportsFloat,
+                    Literal, Protocol, SupportsIndex)
+
+import numpy as np
+from numpy.typing import ArrayLike, NDArray
+
+# Anything that can be parsed by `np.float64.__init__` and is thus
+# compatible with `ndarray.__setitem__` (for a float64 array)
+_FloatValue = Union[None, str, bytes, SupportsFloat, SupportsIndex]
+
+class _MetricCallback1(Protocol):
+    def __call__(
+        self, __XA: NDArray[Any], __XB: NDArray[Any]
+    ) -> _FloatValue: ...
+
+class _MetricCallback2(Protocol):
+    def __call__(
+        self, __XA: NDArray[Any], __XB: NDArray[Any], **kwargs: Any
+    ) -> _FloatValue: ...
+
+# TODO: Use a single protocol with a parameter specification variable
+# once available (PEP 612)
+_MetricCallback = Union[_MetricCallback1, _MetricCallback2]
+
+_MetricKind = Literal[
+    'braycurtis',
+    'canberra',
+    'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch',
+    'cityblock', 'cblock', 'cb', 'c',
+    'correlation', 'co',
+    'cosine', 'cos',
+    'dice',
+    'euclidean', 'euclid', 'eu', 'e',
+    'hamming', 'hamm', 'ha', 'h',
+    'minkowski', 'mi', 'm', 'pnorm',
+    'jaccard', 'jacc', 'ja', 'j',
+    'jensenshannon', 'js',
+    'kulsinski', 'kulczynski1',
+    'mahalanobis', 'mahal', 'mah',
+    'rogerstanimoto',
+    'russellrao',
+    'seuclidean', 'se', 's',
+    'sokalmichener',
+    'sokalsneath',
+    'sqeuclidean', 'sqe', 'sqeuclid',
+    'yule',
+]
+
+# Function annotations
+
+def braycurtis(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def canberra(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+# TODO: Add `metric`-specific overloads
+# Returns a float64 or float128 array, depending on the input dtype
+@overload
+def cdist(
+    XA: ArrayLike,
+    XB: ArrayLike,
+    metric: _MetricKind = ...,
+    *,
+    out: None | NDArray[np.floating[Any]] = ...,
+    p: float = ...,
+    w: Optional[ArrayLike] = ...,
+    V: Optional[ArrayLike] = ...,
+    VI: Optional[ArrayLike] = ...,
+) -> NDArray[np.floating[Any]]: ...
+@overload
+def cdist(
+    XA: ArrayLike,
+    XB: ArrayLike,
+    metric: _MetricCallback,
+    *,
+    out: None | NDArray[np.floating[Any]] = ...,
+    **kwargs: Any,
+) -> NDArray[np.floating[Any]]: ...
+
+# TODO: Wait for dtype support; the return type is
+# dependent on the input arrays dtype
+def chebyshev(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> Any: ...
+
+# TODO: Wait for dtype support; the return type is
+# dependent on the input arrays dtype
+def cityblock(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> Any: ...
+
+def correlation(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ..., centered: bool = ...
+) -> np.float64: ...
+
+def cosine(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def dice(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> float: ...
+
+def directed_hausdorff(
+    u: ArrayLike, v: ArrayLike, seed: Optional[int] = ...
+) -> Tuple[float, int, int]: ...
+
+def euclidean(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> float: ...
+
+def hamming(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def is_valid_dm(
+    D: ArrayLike,
+    tol: float = ...,
+    throw: bool = ...,
+    name: Optional[str] = ...,
+    warning: bool = ...,
+) -> bool: ...
+
+def is_valid_y(
+    y: ArrayLike,
+    warning: bool = ...,
+    throw: bool = ...,
+    name: Optional[str] = ...,
+) -> bool: ...
+
+def jaccard(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def jensenshannon(
+    p: ArrayLike, q: ArrayLike, base: Optional[float] = ...
+) -> np.float64: ...
+
+def kulsinski(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def kulczynski1(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def mahalanobis(
+    u: ArrayLike, v: ArrayLike, VI: ArrayLike
+) -> np.float64: ...
+
+def minkowski(
+    u: ArrayLike, v: ArrayLike, p: float = ..., w: Optional[ArrayLike] = ...
+) -> float: ...
+
+def num_obs_dm(d: ArrayLike) -> int: ...
+
+def num_obs_y(Y: ArrayLike) -> int: ...
+
+# TODO: Add `metric`-specific overloads
+@overload
+def pdist(
+    X: ArrayLike,
+    metric: _MetricKind = ...,
+    *,
+    out: None | NDArray[np.floating[Any]] = ...,
+    p: float = ...,
+    w: Optional[ArrayLike] = ...,
+    V: Optional[ArrayLike] = ...,
+    VI: Optional[ArrayLike] = ...,
+) -> NDArray[np.floating[Any]]: ...
+@overload
+def pdist(
+    X: ArrayLike,
+    metric: _MetricCallback,
+    *,
+    out: None | NDArray[np.floating[Any]] = ...,
+    **kwargs: Any,
+) -> NDArray[np.floating[Any]]: ...
+
+def seuclidean(
+    u: ArrayLike, v: ArrayLike, V: ArrayLike
+) -> float: ...
+
+def sokalmichener(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> float: ...
+
+def sokalsneath(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def sqeuclidean(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> np.float64: ...
+
+def squareform(
+    X: ArrayLike,
+    force: Literal["no", "tomatrix", "tovector"] = ...,
+    checks: bool = ...,
+) -> NDArray[Any]: ...
+
+def rogerstanimoto(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> float: ...
+
+def russellrao(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> float: ...
+
+def yule(
+    u: ArrayLike, v: ArrayLike, w: Optional[ArrayLike] = ...
+) -> float: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/kdtree.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/kdtree.py
new file mode 100644
index 00000000..3ecca18b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/kdtree.py
@@ -0,0 +1,34 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.spatial` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _kdtree
+
+
+__all__ = [  # noqa: F822
+    'KDTree',
+    'Rectangle',
+    'cKDTree',
+    'cKDTreeNode',
+    'distance_matrix',
+    'minkowski_distance',
+    'minkowski_distance_p',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.spatial.kdtree is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.spatial instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.spatial` namespace, "
+                  "the `scipy.spatial.kdtree` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_kdtree, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/qhull.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/qhull.py
new file mode 100644
index 00000000..0a2fd077
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/qhull.py
@@ -0,0 +1,37 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.spatial` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _qhull
+
+
+__all__ = [  # noqa: F822
+    'ConvexHull',
+    'Delaunay',
+    'HalfspaceIntersection',
+    'QhullError',
+    'Voronoi',
+    'os',
+    'sys',
+    'tempfile',
+    'threading',
+    'tsearch',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.spatial.qhull is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.spatial instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.spatial` namespace, "
+                  "the `scipy.spatial.qhull` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_qhull, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/qhull_src/COPYING.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/qhull_src/COPYING.txt
new file mode 100644
index 00000000..4ac02a07
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/qhull_src/COPYING.txt
@@ -0,0 +1,38 @@
+                    Qhull, Copyright (c) 1993-2019
+                    
+                            C.B. Barber
+                           Arlington, MA 
+                          
+                               and
+
+       The National Science and Technology Research Center for
+        Computation and Visualization of Geometric Structures
+                        (The Geometry Center)
+                       University of Minnesota
+
+                       email: qhull@qhull.org
+
+This software includes Qhull from C.B. Barber and The Geometry Center.  
+Qhull is copyrighted as noted above.  Qhull is free software and may 
+be obtained via http from www.qhull.org.  It may be freely copied, modified, 
+and redistributed under the following conditions:
+
+1. All copyright notices must remain intact in all files.
+
+2. A copy of this text file must be distributed along with any copies 
+   of Qhull that you redistribute; this includes copies that you have 
+   modified, or copies of programs or other software products that 
+   include Qhull.
+
+3. If you modify Qhull, you must include a notice giving the
+   name of the person performing the modification, the date of
+   modification, and the reason for such modification.
+
+4. When distributing modified versions of Qhull, or other software 
+   products that include Qhull, you must provide notice that the original 
+   source code may be obtained as noted above.
+
+5. There is no warranty or other guarantee of fitness for Qhull, it is 
+   provided solely "as is".  Bug reports or fixes may be sent to 
+   qhull_bug@qhull.org; the authors may or may not act on them as 
+   they desire.
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X1.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X1.txt
new file mode 100644
index 00000000..833d5bdf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X1.txt
@@ -0,0 +1,10 @@
+1.147593763490969421e-01 8.926156143344999849e-01 1.437758624645746330e-02 1.803435962879929022e-02 5.533046214065578949e-01 5.554315640747428118e-01 4.497546637814608950e-02 4.438089247948049376e-01 7.984582810220538507e-01 2.752880789161644692e-01 1.344667112315823809e-01 9.230479561452992199e-01 6.040471462941819913e-01 3.797251652770228247e-01 4.316042735592399149e-01 5.312356915348823705e-01 4.348143005129563310e-01 3.111531488508799681e-01 9.531194313908697424e-04 8.212995023500069269e-02 6.689953269869852726e-01 9.914864535288493430e-01 8.037556036341153565e-01
+9.608925123801395074e-01 2.974451233678974127e-01 9.001110330654185088e-01 5.824163330415995654e-01 7.308574928293812834e-01 2.276154562412870952e-01 7.306791076039623745e-01 8.677244866905511333e-01 9.160806456176984192e-01 6.157216959991280714e-01 5.149053524695440531e-01 3.056427344890983999e-01 9.790557366933895223e-01 4.484995861076724877e-01 4.776550391081165747e-01 7.210436977670631187e-01 9.136399501661039979e-01 4.260275733550000776e-02 5.943900041968954717e-01 3.864571606342745991e-01 9.442027665110838131e-01 4.779949058608601309e-02 6.107551944250865228e-01
+3.297286578103622023e-01 5.980207401936733502e-01 3.673301293561567205e-01 2.585830520887681949e-01 4.660558746104259686e-01 6.083795956610364986e-01 4.535206368070313632e-01 6.873989778785424276e-01 5.130152688495458468e-01 7.665877846542720198e-01 3.444402973525138023e-01 3.583658123644906102e-02 7.924818220986856732e-01 8.746685720522412444e-01 3.010105569182431884e-01 6.012239357385538163e-01 6.233737362204671006e-01 4.830438698668915176e-01 2.317286885842551047e-02 7.585989958123050547e-01 7.108257632278830451e-01 1.551024884178199281e-01 2.665485998155288083e-01
+2.456278068903017253e-02 4.148739837711815648e-01 1.986372227934196655e-01 6.920408530298168825e-01 1.003067576685774398e-01 7.421560456480125190e-01 1.808453980608998313e-01 4.251297882537475870e-01 6.773002683522370004e-01 4.084108792570182445e-01 7.462888013191590897e-01 8.069930220529277776e-01 9.211110587681808903e-01 4.141491046181076108e-01 7.486318689260342829e-01 9.515405507589296263e-01 4.634288892577109742e-03 8.027593488166355762e-01 3.010346805217798405e-01 8.663248877242523127e-01 2.479968181181605447e-01 5.619851096054278017e-01 3.903886764590250857e-01
+7.122019976035700584e-01 6.188878051047785878e-01 7.290897087051201320e-01 6.334802157757637442e-01 5.523084734954342156e-01 5.614937129563645213e-01 2.496741051791574462e-01 5.972227939599233926e-01 1.786590597761109622e-01 2.609525984850900038e-01 7.210438943286010538e-01 2.211429064605652250e-01 9.140497572472672250e-02 1.430242193668443962e-01 7.856446942916397447e-01 4.635256358156553125e-01 5.278744289813760426e-01 3.702808015407184072e-01 5.527073830480792038e-01 6.370732917599846168e-01 9.953487928925482953e-01 3.021789770611936765e-01 3.354901923998221402e-02
+6.509638560895427695e-01 8.387598220902757751e-01 7.761375971745763103e-01 1.481627639227802717e-01 3.529474982902305324e-01 4.883093646287851586e-01 9.652923033658690199e-01 9.500680513565308294e-01 3.061885005078281985e-01 7.271902818906019750e-01 2.358962978196710303e-03 7.359889703223099211e-01 8.988893768074724955e-01 4.135279653937307121e-02 8.516441856688283796e-01 4.889597623270667270e-01 5.575909822114655245e-01 9.010853652261575641e-01 2.912844516556202246e-01 9.088759383368658629e-01 8.104351227460024898e-01 8.080695436776826890e-01 1.430530913253185155e-01
+8.048001196608134400e-01 3.066089444418462762e-02 9.021887554292090661e-01 6.154331491807940591e-02 1.378912575206647784e-02 5.775720193142440673e-01 1.219298963069791464e-01 1.883270243412101808e-01 5.569262398688379356e-02 8.964817777510125651e-02 7.977092785346929782e-01 4.878149375226197293e-01 4.511973131518809410e-02 1.858690046801604323e-01 6.947686471083162063e-01 5.884058794291086025e-01 8.638884676612634816e-01 3.855470871341656336e-01 3.495049047300468059e-01 2.767740932353948136e-01 4.731087031714035218e-01 6.679001673437914288e-01 7.502944200696660682e-01
+6.527328264244687261e-01 8.289483383553154505e-01 9.179741348282299818e-01 1.065639864466713105e-01 6.253616929058514184e-01 5.927750325266062381e-01 3.039157425463192563e-01 2.452766763359194302e-01 6.514027700704632107e-01 5.529218485487964463e-01 4.941158239308394151e-01 6.605306467722642516e-01 2.273688037050677346e-01 4.282616592244774534e-01 2.956128257930247250e-01 1.154803628237965896e-01 9.228220410235263849e-01 6.663525307676617659e-01 1.908852615936970087e-01 9.921383408926374159e-01 4.988716450388516188e-01 1.014900352736023414e-01 3.363930180244284474e-01
+2.914369076275757919e-01 5.196673601143533272e-01 7.420144907858341465e-01 1.768984185504740569e-01 5.296766993228564369e-01 5.922023566159900776e-01 5.965161262020234334e-01 3.810272333046110793e-01 8.368797246118340194e-01 7.896422363801189892e-01 9.655797561098209414e-01 4.430034032346981121e-01 2.780869795706976122e-01 3.047310845416009162e-01 8.051138863500326703e-01 6.731468634690835895e-01 4.743383036815584930e-01 9.530709614322225853e-01 7.753587619850917934e-01 2.801137109357491051e-01 6.182543660889736614e-01 5.005218857766725593e-01 9.071447804755052857e-01
+2.075071644012620453e-01 4.834950086973934802e-01 3.037011473860764532e-01 6.476084284887700937e-01 8.107195771564194020e-01 7.869075869075803364e-01 6.851234019375299633e-01 3.544187468104398331e-02 4.847673235908021017e-01 5.690262846164507726e-01 1.663354142616256803e-01 9.692796809752548537e-01 4.133441725866372485e-01 6.729167604487583665e-01 3.998813427407297283e-01 8.272617414104491695e-01 2.129248316324727774e-01 6.517004761357130249e-01 7.363013506605019520e-01 4.072375306356985636e-01 4.463336683526665238e-01 5.485059309728204102e-01 1.981745754527846071e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X2.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X2.txt
new file mode 100644
index 00000000..fc3ea196
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/cdist-X2.txt
@@ -0,0 +1,20 @@
+7.680465556300619667e-02 4.675022344069014180e-01 8.955498989131543963e-01 3.816236071436276411e-01 1.109030077070989329e-01 2.318928815459808668e-02 7.477394240984251983e-01 1.202289789304434864e-01 8.007290497575981769e-01 6.795195698871731027e-01 6.568225762396605605e-01 2.231475263228478445e-01 7.064624077661341151e-02 1.081656666815267176e-02 1.592069359090128033e-01 1.363392203645097389e-01 9.277020735447568667e-01 8.103136564528209407e-01 5.229467676276455812e-02 7.708020259874025504e-01 6.527954747473352359e-02 5.516397414886525796e-01 3.653371861367954443e-01
+8.144399106025798085e-01 7.731852525462976633e-01 6.909477620673205589e-01 9.696063817000286633e-01 4.297887511677249694e-01 6.989600553425188156e-01 7.310201335033380543e-01 3.135256147868910048e-01 5.715578037275241829e-01 3.935000744675094531e-01 2.057715781268398825e-01 5.892508589665171881e-01 8.512951599236765476e-01 9.569808799061578775e-01 6.164885878024699561e-01 4.714185430004367294e-01 6.128831737628155363e-01 6.641799309623502845e-01 6.001985185338730711e-01 4.231922889723856995e-01 7.605249308075449077e-01 1.064530958018087281e-01 6.306470691957204444e-01
+4.265470127256254518e-01 5.933766716280767239e-01 3.698589270536845053e-02 2.173799740537294412e-01 3.032679325475639009e-01 4.271831790058847611e-01 1.828944535901013690e-01 4.772333422710156592e-01 2.564773455194128138e-01 7.120329875362141347e-01 8.952243430110462530e-01 1.808777012183288013e-01 3.612151871458374464e-01 3.960999167923041631e-01 1.821669970670747318e-02 8.835474857189200559e-01 1.353104648821573663e-01 3.457291739160937016e-01 1.126467375304566199e-01 4.107293162402323450e-01 4.051719311053743056e-01 4.007382985250427243e-01 1.286905671428811848e-01
+2.910657003883979632e-01 9.616259180685315933e-03 2.033032441536681834e-01 1.096599110293863255e-01 4.191101704605176836e-01 5.462131536027151624e-01 8.393047907010142694e-01 9.046805198676335369e-01 7.009863472176891541e-01 2.508215985039629059e-01 6.754410796667598138e-01 6.740895474032024826e-01 1.358993708621679675e-01 8.219861775211464439e-01 6.322220445623235596e-01 2.766813559002430090e-01 6.575983861590951607e-01 9.515869708336625044e-01 8.654526462353933081e-01 3.450245117834797037e-01 5.649032890631299209e-01 4.717687914789682191e-01 3.296483580510030098e-01
+9.172477457635394016e-01 3.057396583041891436e-01 7.335332344225760082e-01 8.370236206345178509e-01 3.765464253115927695e-01 5.089680319287778199e-01 1.202325719268168003e-01 9.717771065272349240e-01 5.907820104019682050e-01 9.809211614977710880e-01 9.064285003671219698e-01 8.848841466121748489e-01 2.043407730734815297e-01 9.157600394927275511e-01 4.532260315147775831e-01 4.241077335005828397e-01 1.751730149568804240e-01 4.090412146081819911e-01 3.632197861847064058e-02 5.832539334970230360e-01 4.041848151536805434e-01 3.603643989086504629e-01 1.838411383882069261e-01
+2.508806403290032572e-01 4.381403985282813496e-01 4.694787405018008286e-02 6.353900562024634713e-01 1.200813444244532846e-01 6.072397042913001419e-01 9.937255904754030977e-01 4.916670237677555066e-01 3.473845913923001572e-01 3.526875922864345370e-01 5.448595548197197047e-01 2.245096010156972799e-01 9.003258279804994269e-01 3.534560469735994470e-01 2.989266066346342177e-01 4.621024982808636938e-01 9.626538866576676012e-01 9.791401720716153001e-01 7.138514287330390840e-01 9.832862333928654719e-01 3.233999591031431198e-01 5.406467224926423398e-01 9.581890295057201579e-01
+5.210583601680578436e-01 4.598159993059653949e-01 2.111497132057748027e-01 5.949977700916546652e-01 6.342618461422359077e-01 9.888228769705599275e-01 6.096770711536318998e-01 7.548431368960863974e-01 7.490858664860100546e-01 3.186213496546415058e-01 7.895687083231245351e-01 4.178326793268141159e-01 8.095818334534051752e-01 7.886271673523481684e-01 4.038905626506847923e-01 3.652649247094948981e-01 8.267205959224892542e-01 6.433617243328785262e-01 3.117681563249452559e-01 9.675995575054980868e-01 3.675673836358472890e-01 5.863757289184046151e-01 9.099029857959717305e-02
+4.024573981231733821e-01 3.578997554002771864e-01 3.519299868071553705e-01 7.417747693762357653e-01 2.963713903285800644e-01 9.602967989298948348e-01 3.811392331739601458e-01 5.493237898295448840e-01 6.835113342793640578e-01 2.304506220807415184e-01 3.727299857731285471e-01 5.450263991912108752e-01 6.951521210987908761e-01 6.474582745861203747e-01 6.316089475403589004e-01 5.672043967425510758e-02 9.034937506977609445e-01 2.332567550780038079e-01 1.096955741449157085e-02 8.870663813493575578e-01 4.384385452180562526e-01 7.100898998169548060e-01 3.245358176196319056e-01
+9.162009194452818139e-01 5.572224742426723498e-02 3.445910686865658601e-01 9.683564008127462097e-01 9.375063149031520604e-01 9.128188852869822956e-02 9.613605414326487075e-01 5.298598697556915482e-01 6.724799695520149445e-01 1.269103938571825019e-02 1.008406153387807480e-01 8.951105272379104028e-01 1.585460318853607609e-01 6.739986455059543413e-01 5.345419321702655768e-01 6.248843899572337213e-01 3.050288488994817859e-01 1.423645553465189284e-01 1.802121190541096096e-01 9.474646822694763326e-01 2.345716438587298613e-01 9.688281784764296578e-01 1.845165243240991515e-01
+2.548297646910531178e-01 2.580877375379494465e-01 1.355482532666937301e-01 6.478812986505504412e-01 9.971695982152032345e-01 2.606721082477282403e-01 5.483439686378906996e-01 4.409612606704470528e-01 4.396442074915688503e-01 7.414262832597111608e-01 7.308840725375539416e-01 8.072095530497225280e-02 6.829509968656330976e-01 5.700030854230387911e-01 3.801845336730320657e-01 2.481059916867158766e-01 3.977295094395927322e-03 5.749480512407895150e-01 4.112033136603401307e-01 8.676159710377848722e-01 9.062646588480167686e-01 3.326691167317923359e-01 8.498307982774666591e-01
+4.464338109330643345e-01 8.546516760817471914e-01 7.384800352329814466e-01 3.692485164984804502e-02 2.915662689505471583e-02 9.010049994217171898e-01 8.622900253010918892e-01 9.786230638032608065e-01 6.546824077297251909e-01 6.342297560006789903e-01 2.230339826582647955e-01 7.658846744185553446e-01 4.603043831539479491e-01 2.017100469861691225e-01 4.891590639893540482e-01 1.937140918314912419e-01 8.161582138652878626e-01 5.597293607114051106e-02 8.423261093326828153e-02 5.105392204475533990e-02 8.234193902673621057e-01 1.784268309975372002e-01 9.118997881986501408e-02
+8.588746913421980711e-01 1.479641118621310980e-02 1.375875301146138874e-01 7.533888774725254756e-01 5.782592791549248101e-01 9.128573037619659436e-01 1.831275762880391067e-01 3.471382864827737835e-01 4.859524740929310749e-02 8.955146541561730400e-01 4.787220791101074457e-01 4.222803577759057791e-01 8.469923964908064873e-01 6.300290047587608910e-02 1.020873237837905956e-01 3.585612487182909813e-02 6.320107119904569970e-01 5.891245970008752719e-01 1.104698053665007507e-01 4.233226558073774903e-01 4.432217054386708988e-01 2.864765416628194394e-01 2.489777211814803159e-02
+5.343810659756068615e-01 4.829076396403546578e-01 8.364480888953172988e-01 8.931374995414760321e-01 6.034161442354715188e-01 3.578336000768178593e-03 4.100579775972763574e-01 3.968667908067096128e-01 5.897163653686778861e-01 3.003241263928478899e-01 2.520935203143799264e-01 3.112129371563532310e-02 9.052865295974613646e-01 1.172285124002711010e-01 4.840001666149388315e-01 3.424620676348436588e-01 5.526057133826853818e-01 6.346139530261846184e-01 5.747945930485597321e-01 1.389915612177697879e-01 2.413801217666421417e-01 7.829900796662081497e-01 7.213528084845653998e-01
+9.384509283406079483e-01 6.303019601671526750e-01 1.787921522728125323e-01 1.556003868047917127e-02 5.662397078816850948e-01 3.437473614806091371e-01 8.615844972800188462e-01 7.624380237306396246e-01 1.096468347898514883e-01 1.276566836610887323e-01 8.479188493443535757e-01 3.634713454428405432e-01 7.478112314318967613e-01 9.856395696968375253e-01 6.250293654177319080e-02 1.919327272501809567e-01 1.415594476031050153e-01 7.224057351041784925e-01 8.452145259310355208e-01 5.434318833772002755e-01 5.177620959731277228e-02 3.358977598185840518e-01 2.542654881527960375e-01
+4.800909104006243489e-01 3.651345393613150137e-01 3.657093052788148446e-01 8.579662326651369408e-01 5.787694361240260932e-01 6.491966196891312268e-01 3.252508517294879775e-01 8.639694334693422961e-01 3.028097078756678551e-01 6.295814666338699350e-01 7.305627351548695803e-01 6.975931849120264872e-03 8.321205159004851915e-01 2.681809305821257761e-01 3.628869474597150591e-01 9.598981434716586936e-01 5.947913523332928332e-01 7.794864238003402779e-01 2.819511239444029149e-01 5.134200958476284882e-01 7.284684743064278045e-01 3.099571109539331903e-01 1.502222882866774967e-01
+2.463382654375219083e-01 4.465700737264240994e-01 7.180855317941433613e-01 5.056099420785193921e-01 6.182117344332578313e-01 2.370453793561340117e-01 9.831748018047525850e-01 6.397098184531551102e-01 8.260469782208745837e-02 7.474671691560941245e-01 9.963429983418570224e-02 5.450078811081275898e-01 5.370188678062637333e-02 2.774024442708808991e-01 2.082643088545442778e-01 2.704155352788065736e-01 7.225035580445194894e-01 4.866791976239246420e-01 1.357043111201584606e-01 7.911335827987711067e-01 7.278977102006007893e-01 6.880892094410231419e-01 1.029231496520791600e-01
+6.901796117735281566e-01 1.558248977395644275e-01 4.241818789360329855e-01 5.055658246392458199e-01 1.756288758075611467e-01 4.215083703818177652e-01 7.809231602323289945e-01 1.170053878686481141e-01 6.497026323614403243e-01 5.733120641440232479e-01 4.407703406152092551e-01 5.608677124532297498e-01 7.471045703286000039e-01 3.334604336022076732e-01 8.927208811415126011e-01 9.794565286182396191e-01 9.621542824973521313e-01 3.945825239405253981e-01 8.338963875792834157e-01 9.310552325082104286e-01 7.688283033784242271e-01 3.798823731047119567e-01 1.459993613028365278e-02
+7.848623555505630511e-01 2.681039365355797344e-03 7.833208051794043891e-01 8.184381915171493604e-01 4.682581645582317709e-01 2.391069309436419932e-01 1.765377537168698607e-01 9.863494676539893424e-01 4.378412300863872009e-01 7.494505491149090481e-01 1.942180356195394308e-01 9.981402467222395547e-01 7.992190944052800505e-01 1.350875702852057936e-01 4.950149186748543650e-01 7.243422481248201761e-01 3.544596746353472216e-01 8.320192561472177228e-01 9.776840296475269865e-01 7.733852731914863110e-01 2.305732998099923048e-01 9.746878189802981041e-01 7.747723331200035979e-01
+6.521099013127149568e-01 5.452399443648201505e-01 8.146707517183656710e-01 3.827256063695345656e-01 7.954832091744263867e-01 7.834427643148527132e-01 9.661317930643520402e-02 9.215673965718058636e-01 4.914305728788055383e-01 4.105628408027649501e-01 9.844647830893304974e-02 3.974831165301851987e-01 3.857608898053827007e-01 5.520210781401946321e-01 3.445787541654143915e-03 4.552922057017416702e-01 7.456544561760444223e-01 4.753985092154335845e-01 2.821385239833401615e-01 7.560136035104459973e-01 8.453142510471420845e-01 6.679627143276523071e-01 6.910882868284401459e-01
+8.526493480446283302e-01 1.183917973068240315e-01 6.163988861865119517e-01 5.751899460059114455e-01 1.638797964925038375e-01 8.214597298784013235e-01 5.424670654187370156e-01 1.806631819658732763e-01 9.268107278221827672e-01 4.127397378597359445e-01 7.529877485901653733e-01 1.714251090083847018e-01 2.601487784245806179e-01 2.028326156742237263e-01 5.299879450122358948e-01 7.587877062981395193e-01 4.070738595375062996e-01 3.546903049793261875e-01 8.695365138547607176e-01 1.447085661525142619e-01 3.193366245820845606e-01 8.797841086211429795e-01 2.666562188639977071e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/degenerate_pointset.npz b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/degenerate_pointset.npz
new file mode 100644
index 00000000..1d393029
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/degenerate_pointset.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/iris.txt
new file mode 100644
index 00000000..4d78390c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/iris.txt
@@ -0,0 +1,150 @@
+5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01
+4.900000000000000355e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01
+4.700000000000000178e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01
+4.599999999999999645e+00 3.100000000000000089e+00 1.500000000000000000e+00 2.000000000000000111e-01
+5.000000000000000000e+00 3.600000000000000089e+00 1.399999999999999911e+00 2.000000000000000111e-01
+5.400000000000000355e+00 3.899999999999999911e+00 1.699999999999999956e+00 4.000000000000000222e-01
+4.599999999999999645e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.999999999999999889e-01
+5.000000000000000000e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01
+4.400000000000000355e+00 2.899999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01
+4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01
+5.400000000000000355e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01
+4.799999999999999822e+00 3.399999999999999911e+00 1.600000000000000089e+00 2.000000000000000111e-01
+4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 1.000000000000000056e-01
+4.299999999999999822e+00 3.000000000000000000e+00 1.100000000000000089e+00 1.000000000000000056e-01
+5.799999999999999822e+00 4.000000000000000000e+00 1.199999999999999956e+00 2.000000000000000111e-01
+5.700000000000000178e+00 4.400000000000000355e+00 1.500000000000000000e+00 4.000000000000000222e-01
+5.400000000000000355e+00 3.899999999999999911e+00 1.300000000000000044e+00 4.000000000000000222e-01
+5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01
+5.700000000000000178e+00 3.799999999999999822e+00 1.699999999999999956e+00 2.999999999999999889e-01
+5.099999999999999645e+00 3.799999999999999822e+00 1.500000000000000000e+00 2.999999999999999889e-01
+5.400000000000000355e+00 3.399999999999999911e+00 1.699999999999999956e+00 2.000000000000000111e-01
+5.099999999999999645e+00 3.700000000000000178e+00 1.500000000000000000e+00 4.000000000000000222e-01
+4.599999999999999645e+00 3.600000000000000089e+00 1.000000000000000000e+00 2.000000000000000111e-01
+5.099999999999999645e+00 3.299999999999999822e+00 1.699999999999999956e+00 5.000000000000000000e-01
+4.799999999999999822e+00 3.399999999999999911e+00 1.899999999999999911e+00 2.000000000000000111e-01
+5.000000000000000000e+00 3.000000000000000000e+00 1.600000000000000089e+00 2.000000000000000111e-01
+5.000000000000000000e+00 3.399999999999999911e+00 1.600000000000000089e+00 4.000000000000000222e-01
+5.200000000000000178e+00 3.500000000000000000e+00 1.500000000000000000e+00 2.000000000000000111e-01
+5.200000000000000178e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01
+4.700000000000000178e+00 3.200000000000000178e+00 1.600000000000000089e+00 2.000000000000000111e-01
+4.799999999999999822e+00 3.100000000000000089e+00 1.600000000000000089e+00 2.000000000000000111e-01
+5.400000000000000355e+00 3.399999999999999911e+00 1.500000000000000000e+00 4.000000000000000222e-01
+5.200000000000000178e+00 4.099999999999999645e+00 1.500000000000000000e+00 1.000000000000000056e-01
+5.500000000000000000e+00 4.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01
+4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01
+5.000000000000000000e+00 3.200000000000000178e+00 1.199999999999999956e+00 2.000000000000000111e-01
+5.500000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01
+4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01
+4.400000000000000355e+00 3.000000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01
+5.099999999999999645e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01
+5.000000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.999999999999999889e-01
+4.500000000000000000e+00 2.299999999999999822e+00 1.300000000000000044e+00 2.999999999999999889e-01
+4.400000000000000355e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01
+5.000000000000000000e+00 3.500000000000000000e+00 1.600000000000000089e+00 5.999999999999999778e-01
+5.099999999999999645e+00 3.799999999999999822e+00 1.899999999999999911e+00 4.000000000000000222e-01
+4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01
+5.099999999999999645e+00 3.799999999999999822e+00 1.600000000000000089e+00 2.000000000000000111e-01
+4.599999999999999645e+00 3.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01
+5.299999999999999822e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01
+5.000000000000000000e+00 3.299999999999999822e+00 1.399999999999999911e+00 2.000000000000000111e-01
+7.000000000000000000e+00 3.200000000000000178e+00 4.700000000000000178e+00 1.399999999999999911e+00
+6.400000000000000355e+00 3.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00
+6.900000000000000355e+00 3.100000000000000089e+00 4.900000000000000355e+00 1.500000000000000000e+00
+5.500000000000000000e+00 2.299999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00
+6.500000000000000000e+00 2.799999999999999822e+00 4.599999999999999645e+00 1.500000000000000000e+00
+5.700000000000000178e+00 2.799999999999999822e+00 4.500000000000000000e+00 1.300000000000000044e+00
+6.299999999999999822e+00 3.299999999999999822e+00 4.700000000000000178e+00 1.600000000000000089e+00
+4.900000000000000355e+00 2.399999999999999911e+00 3.299999999999999822e+00 1.000000000000000000e+00
+6.599999999999999645e+00 2.899999999999999911e+00 4.599999999999999645e+00 1.300000000000000044e+00
+5.200000000000000178e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.399999999999999911e+00
+5.000000000000000000e+00 2.000000000000000000e+00 3.500000000000000000e+00 1.000000000000000000e+00
+5.900000000000000355e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.500000000000000000e+00
+6.000000000000000000e+00 2.200000000000000178e+00 4.000000000000000000e+00 1.000000000000000000e+00
+6.099999999999999645e+00 2.899999999999999911e+00 4.700000000000000178e+00 1.399999999999999911e+00
+5.599999999999999645e+00 2.899999999999999911e+00 3.600000000000000089e+00 1.300000000000000044e+00
+6.700000000000000178e+00 3.100000000000000089e+00 4.400000000000000355e+00 1.399999999999999911e+00
+5.599999999999999645e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00
+5.799999999999999822e+00 2.700000000000000178e+00 4.099999999999999645e+00 1.000000000000000000e+00
+6.200000000000000178e+00 2.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00
+5.599999999999999645e+00 2.500000000000000000e+00 3.899999999999999911e+00 1.100000000000000089e+00
+5.900000000000000355e+00 3.200000000000000178e+00 4.799999999999999822e+00 1.800000000000000044e+00
+6.099999999999999645e+00 2.799999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00
+6.299999999999999822e+00 2.500000000000000000e+00 4.900000000000000355e+00 1.500000000000000000e+00
+6.099999999999999645e+00 2.799999999999999822e+00 4.700000000000000178e+00 1.199999999999999956e+00
+6.400000000000000355e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00
+6.599999999999999645e+00 3.000000000000000000e+00 4.400000000000000355e+00 1.399999999999999911e+00
+6.799999999999999822e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.399999999999999911e+00
+6.700000000000000178e+00 3.000000000000000000e+00 5.000000000000000000e+00 1.699999999999999956e+00
+6.000000000000000000e+00 2.899999999999999911e+00 4.500000000000000000e+00 1.500000000000000000e+00
+5.700000000000000178e+00 2.600000000000000089e+00 3.500000000000000000e+00 1.000000000000000000e+00
+5.500000000000000000e+00 2.399999999999999911e+00 3.799999999999999822e+00 1.100000000000000089e+00
+5.500000000000000000e+00 2.399999999999999911e+00 3.700000000000000178e+00 1.000000000000000000e+00
+5.799999999999999822e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.199999999999999956e+00
+6.000000000000000000e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.600000000000000089e+00
+5.400000000000000355e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00
+6.000000000000000000e+00 3.399999999999999911e+00 4.500000000000000000e+00 1.600000000000000089e+00
+6.700000000000000178e+00 3.100000000000000089e+00 4.700000000000000178e+00 1.500000000000000000e+00
+6.299999999999999822e+00 2.299999999999999822e+00 4.400000000000000355e+00 1.300000000000000044e+00
+5.599999999999999645e+00 3.000000000000000000e+00 4.099999999999999645e+00 1.300000000000000044e+00
+5.500000000000000000e+00 2.500000000000000000e+00 4.000000000000000000e+00 1.300000000000000044e+00
+5.500000000000000000e+00 2.600000000000000089e+00 4.400000000000000355e+00 1.199999999999999956e+00
+6.099999999999999645e+00 3.000000000000000000e+00 4.599999999999999645e+00 1.399999999999999911e+00
+5.799999999999999822e+00 2.600000000000000089e+00 4.000000000000000000e+00 1.199999999999999956e+00
+5.000000000000000000e+00 2.299999999999999822e+00 3.299999999999999822e+00 1.000000000000000000e+00
+5.599999999999999645e+00 2.700000000000000178e+00 4.200000000000000178e+00 1.300000000000000044e+00
+5.700000000000000178e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.199999999999999956e+00
+5.700000000000000178e+00 2.899999999999999911e+00 4.200000000000000178e+00 1.300000000000000044e+00
+6.200000000000000178e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00
+5.099999999999999645e+00 2.500000000000000000e+00 3.000000000000000000e+00 1.100000000000000089e+00
+5.700000000000000178e+00 2.799999999999999822e+00 4.099999999999999645e+00 1.300000000000000044e+00
+6.299999999999999822e+00 3.299999999999999822e+00 6.000000000000000000e+00 2.500000000000000000e+00
+5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00
+7.099999999999999645e+00 3.000000000000000000e+00 5.900000000000000355e+00 2.100000000000000089e+00
+6.299999999999999822e+00 2.899999999999999911e+00 5.599999999999999645e+00 1.800000000000000044e+00
+6.500000000000000000e+00 3.000000000000000000e+00 5.799999999999999822e+00 2.200000000000000178e+00
+7.599999999999999645e+00 3.000000000000000000e+00 6.599999999999999645e+00 2.100000000000000089e+00
+4.900000000000000355e+00 2.500000000000000000e+00 4.500000000000000000e+00 1.699999999999999956e+00
+7.299999999999999822e+00 2.899999999999999911e+00 6.299999999999999822e+00 1.800000000000000044e+00
+6.700000000000000178e+00 2.500000000000000000e+00 5.799999999999999822e+00 1.800000000000000044e+00
+7.200000000000000178e+00 3.600000000000000089e+00 6.099999999999999645e+00 2.500000000000000000e+00
+6.500000000000000000e+00 3.200000000000000178e+00 5.099999999999999645e+00 2.000000000000000000e+00
+6.400000000000000355e+00 2.700000000000000178e+00 5.299999999999999822e+00 1.899999999999999911e+00
+6.799999999999999822e+00 3.000000000000000000e+00 5.500000000000000000e+00 2.100000000000000089e+00
+5.700000000000000178e+00 2.500000000000000000e+00 5.000000000000000000e+00 2.000000000000000000e+00
+5.799999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 2.399999999999999911e+00
+6.400000000000000355e+00 3.200000000000000178e+00 5.299999999999999822e+00 2.299999999999999822e+00
+6.500000000000000000e+00 3.000000000000000000e+00 5.500000000000000000e+00 1.800000000000000044e+00
+7.700000000000000178e+00 3.799999999999999822e+00 6.700000000000000178e+00 2.200000000000000178e+00
+7.700000000000000178e+00 2.600000000000000089e+00 6.900000000000000355e+00 2.299999999999999822e+00
+6.000000000000000000e+00 2.200000000000000178e+00 5.000000000000000000e+00 1.500000000000000000e+00
+6.900000000000000355e+00 3.200000000000000178e+00 5.700000000000000178e+00 2.299999999999999822e+00
+5.599999999999999645e+00 2.799999999999999822e+00 4.900000000000000355e+00 2.000000000000000000e+00
+7.700000000000000178e+00 2.799999999999999822e+00 6.700000000000000178e+00 2.000000000000000000e+00
+6.299999999999999822e+00 2.700000000000000178e+00 4.900000000000000355e+00 1.800000000000000044e+00
+6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.100000000000000089e+00
+7.200000000000000178e+00 3.200000000000000178e+00 6.000000000000000000e+00 1.800000000000000044e+00
+6.200000000000000178e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.800000000000000044e+00
+6.099999999999999645e+00 3.000000000000000000e+00 4.900000000000000355e+00 1.800000000000000044e+00
+6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.100000000000000089e+00
+7.200000000000000178e+00 3.000000000000000000e+00 5.799999999999999822e+00 1.600000000000000089e+00
+7.400000000000000355e+00 2.799999999999999822e+00 6.099999999999999645e+00 1.899999999999999911e+00
+7.900000000000000355e+00 3.799999999999999822e+00 6.400000000000000355e+00 2.000000000000000000e+00
+6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.200000000000000178e+00
+6.299999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 1.500000000000000000e+00
+6.099999999999999645e+00 2.600000000000000089e+00 5.599999999999999645e+00 1.399999999999999911e+00
+7.700000000000000178e+00 3.000000000000000000e+00 6.099999999999999645e+00 2.299999999999999822e+00
+6.299999999999999822e+00 3.399999999999999911e+00 5.599999999999999645e+00 2.399999999999999911e+00
+6.400000000000000355e+00 3.100000000000000089e+00 5.500000000000000000e+00 1.800000000000000044e+00
+6.000000000000000000e+00 3.000000000000000000e+00 4.799999999999999822e+00 1.800000000000000044e+00
+6.900000000000000355e+00 3.100000000000000089e+00 5.400000000000000355e+00 2.100000000000000089e+00
+6.700000000000000178e+00 3.100000000000000089e+00 5.599999999999999645e+00 2.399999999999999911e+00
+6.900000000000000355e+00 3.100000000000000089e+00 5.099999999999999645e+00 2.299999999999999822e+00
+5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00
+6.799999999999999822e+00 3.200000000000000178e+00 5.900000000000000355e+00 2.299999999999999822e+00
+6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.500000000000000000e+00
+6.700000000000000178e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.299999999999999822e+00
+6.299999999999999822e+00 2.500000000000000000e+00 5.000000000000000000e+00 1.899999999999999911e+00
+6.500000000000000000e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.000000000000000000e+00
+6.200000000000000178e+00 3.399999999999999911e+00 5.400000000000000355e+00 2.299999999999999822e+00
+5.900000000000000355e+00 3.000000000000000000e+00 5.099999999999999645e+00 1.800000000000000044e+00
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-boolean-inp.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-boolean-inp.txt
new file mode 100644
index 00000000..0636cc9f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-boolean-inp.txt
@@ -0,0 +1,20 @@
+1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
+0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00
+1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00
+0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
+1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt
new file mode 100644
index 00000000..0aff1267
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt
@@ -0,0 +1 @@
+   5.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   7.0000000e-01   9.0000000e-01   4.0000000e-01   1.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   2.0000000e-01   1.0000000e-01   1.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e-01   1.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   2.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   2.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   1.0000000e-01   6.0000000e-01   1.0000000e+00   1.4000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   8.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   2.0000000e-01   5.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   2.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.1000000e+00   1.2000000e+00   7.0000000e-01   4.0000000e-01   1.0000000e+00   6.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   1.0000000e+00   2.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e-01   6.0000000e-01   3.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   5.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   8.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.2000000e+00   1.3000000e+00   8.0000000e-01   5.0000000e-01   1.1000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e-01   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   3.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   4.0000000e-01   4.0000000e-01   2.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   8.0000000e-01   4.0000000e-01   1.0000000e-01   7.0000000e-01   2.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   2.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   1.0000000e-01   1.3000000e+00   6.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   8.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   2.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   6.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.9000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.4000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   4.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   4.0000000e-01   4.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   5.0000000e-01   1.1000000e+00   5.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   2.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   1.1000000e+00   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   7.0000000e-01   4.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   2.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   1.0000000e-01   2.0000000e-01   1.1000000e+00   6.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   5.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   3.0000000e-01   1.4000000e+00   1.5000000e+00   1.0000000e+00   7.0000000e-01   1.3000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   1.2000000e+00   1.3000000e+00   5.0000000e-01   6.0000000e-01   1.1000000e+00   5.0000000e-01   1.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   9.0000000e-01   6.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   3.0000000e-01   1.0000000e-01   6.0000000e-01   9.0000000e-01   1.3000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   1.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   1.0000000e-01   5.0000000e-01   1.0000000e+00   1.1000000e+00   0.0000000e+00   3.0000000e-01   6.0000000e-01   0.0000000e+00   5.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   6.0000000e-01   7.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e+00   3.0000000e-01   4.0000000e-01   1.4000000e+00   1.0000000e+00   4.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   4.0000000e-01   5.0000000e-01   1.0000000e+00   1.0000000e+00   6.0000000e-01   3.0000000e-01   9.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   1.1000000e+00   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   5.0000000e-01   2.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   5.0000000e-01   1.0000000e+00   1.4000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   2.0000000e-01   6.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e-01   2.0000000e-01   7.0000000e-01   1.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   2.0000000e-01   8.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   1.5000000e+00   1.4000000e+00   1.1000000e+00   8.0000000e-01   1.4000000e+00   8.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   5.0000000e-01   1.1000000e+00   1.1000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   1.2000000e+00   6.0000000e-01   2.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   8.0000000e-01   3.0000000e-01   1.0000000e+00   7.0000000e-01   3.6000000e+00   3.4000000e+00   3.8000000e+00   2.9000000e+00   3.5000000e+00   3.4000000e+00   3.6000000e+00   2.2000000e+00   3.5000000e+00   2.8000000e+00   2.4000000e+00   3.1000000e+00   2.9000000e+00   3.6000000e+00   2.5000000e+00   3.3000000e+00   3.4000000e+00   3.0000000e+00   3.4000000e+00   2.8000000e+00   3.7000000e+00   2.9000000e+00   3.8000000e+00   3.6000000e+00   3.2000000e+00   3.3000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   2.4000000e+00   2.7000000e+00   2.6000000e+00   2.8000000e+00   4.0000000e+00   3.4000000e+00   3.4000000e+00   3.6000000e+00   3.3000000e+00   3.0000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   2.9000000e+00   2.2000000e+00   3.1000000e+00   3.1000000e+00   3.1000000e+00   3.2000000e+00   1.9000000e+00   3.0000000e+00   4.9000000e+00   4.0000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.5000000e+00   3.4000000e+00   5.2000000e+00   4.7000000e+00   5.0000000e+00   4.0000000e+00   4.2000000e+00   4.4000000e+00   3.9000000e+00   4.0000000e+00   4.2000000e+00   4.4000000e+00   5.6000000e+00   5.8000000e+00   3.9000000e+00   4.6000000e+00   3.8000000e+00   5.6000000e+00   3.8000000e+00   4.6000000e+00   4.9000000e+00   3.7000000e+00   3.8000000e+00   4.5000000e+00   4.7000000e+00   5.0000000e+00   5.3000000e+00   4.5000000e+00   4.0000000e+00   4.5000000e+00   5.0000000e+00   4.5000000e+00   4.4000000e+00   3.7000000e+00   4.3000000e+00   4.5000000e+00   4.0000000e+00   4.0000000e+00   4.8000000e+00   4.6000000e+00   4.1000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   4.0000000e+00   4.0000000e-01   4.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   7.0000000e-01   1.2000000e+00   7.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   1.1000000e+00   1.0000000e+00   6.0000000e-01   6.0000000e-01   3.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   9.0000000e-01   1.4000000e+00   7.0000000e-01   8.0000000e-01   1.7000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   8.0000000e-01   3.5000000e+00   3.3000000e+00   3.7000000e+00   2.8000000e+00   3.4000000e+00   3.3000000e+00   3.5000000e+00   2.1000000e+00   3.4000000e+00   2.7000000e+00   2.3000000e+00   3.0000000e+00   2.8000000e+00   3.5000000e+00   2.4000000e+00   3.2000000e+00   3.3000000e+00   2.9000000e+00   3.3000000e+00   2.7000000e+00   3.6000000e+00   2.8000000e+00   3.7000000e+00   3.5000000e+00   3.1000000e+00   3.2000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   2.3000000e+00   2.6000000e+00   2.5000000e+00   2.7000000e+00   3.9000000e+00   3.3000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   2.9000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.8000000e+00   2.1000000e+00   3.0000000e+00   3.0000000e+00   3.0000000e+00   3.1000000e+00   1.8000000e+00   2.9000000e+00   4.8000000e+00   3.9000000e+00   4.7000000e+00   4.4000000e+00   4.6000000e+00   5.4000000e+00   3.3000000e+00   5.1000000e+00   4.6000000e+00   4.9000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   5.5000000e+00   5.7000000e+00   3.8000000e+00   4.5000000e+00   3.7000000e+00   5.5000000e+00   3.7000000e+00   4.5000000e+00   4.8000000e+00   3.6000000e+00   3.7000000e+00   4.4000000e+00   4.6000000e+00   4.9000000e+00   5.2000000e+00   4.4000000e+00   3.9000000e+00   4.4000000e+00   4.9000000e+00   4.4000000e+00   4.3000000e+00   3.6000000e+00   4.2000000e+00   4.4000000e+00   3.9000000e+00   3.9000000e+00   4.7000000e+00   4.5000000e+00   4.0000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.9000000e+00   5.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   7.0000000e-01   1.1000000e+00   1.1000000e+00   1.0000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   1.0000000e+00   1.2000000e+00   1.3000000e+00   1.0000000e+00   5.0000000e-01   2.0000000e-01   1.3000000e+00   1.2000000e+00   9.0000000e-01   1.3000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   2.1000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   1.4000000e+00   6.0000000e-01   1.2000000e+00   7.0000000e-01   1.1000000e+00   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   2.0000000e+00   3.1000000e+00   2.4000000e+00   2.4000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   2.1000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.9000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   4.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   4.0000000e-01   6.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   2.0000000e-01   6.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   6.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   2.0000000e-01   1.0000000e-01   1.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e-01   1.0000000e-01   1.2000000e+00   7.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   2.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   6.0000000e-01   9.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   1.3000000e+00   6.0000000e-01   7.0000000e-01   1.5000000e+00   1.3000000e+00   7.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.3000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   4.0000000e-01   1.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   3.0000000e-01   1.5000000e+00   7.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e-01   6.0000000e-01   2.0000000e-01   5.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   3.0000000e-01   8.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   4.0000000e-01   2.0000000e-01   3.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e+00   3.0000000e-01   4.0000000e-01   1.1000000e+00   1.0000000e+00   4.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.3000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   5.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   1.4000000e+00   7.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   7.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   9.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   1.3000000e+00   4.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   4.0000000e-01   3.7000000e+00   3.5000000e+00   3.9000000e+00   3.0000000e+00   3.6000000e+00   3.5000000e+00   3.7000000e+00   2.3000000e+00   3.6000000e+00   2.9000000e+00   2.5000000e+00   3.2000000e+00   3.0000000e+00   3.7000000e+00   2.6000000e+00   3.4000000e+00   3.5000000e+00   3.1000000e+00   3.5000000e+00   2.9000000e+00   3.8000000e+00   3.0000000e+00   3.9000000e+00   3.7000000e+00   3.3000000e+00   3.4000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   2.5000000e+00   2.8000000e+00   2.7000000e+00   2.9000000e+00   4.1000000e+00   3.5000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   3.1000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.0000000e+00   2.3000000e+00   3.2000000e+00   3.2000000e+00   3.2000000e+00   3.3000000e+00   2.0000000e+00   3.1000000e+00   5.0000000e+00   4.1000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.6000000e+00   3.5000000e+00   5.3000000e+00   4.8000000e+00   5.1000000e+00   4.1000000e+00   4.3000000e+00   4.5000000e+00   4.0000000e+00   4.1000000e+00   4.3000000e+00   4.5000000e+00   5.7000000e+00   5.9000000e+00   4.0000000e+00   4.7000000e+00   3.9000000e+00   5.7000000e+00   3.9000000e+00   4.7000000e+00   5.0000000e+00   3.8000000e+00   3.9000000e+00   4.6000000e+00   4.8000000e+00   5.1000000e+00   5.4000000e+00   4.6000000e+00   4.1000000e+00   4.6000000e+00   5.1000000e+00   4.6000000e+00   4.5000000e+00   3.8000000e+00   4.4000000e+00   4.6000000e+00   4.1000000e+00   4.1000000e+00   4.9000000e+00   4.7000000e+00   4.2000000e+00   4.0000000e+00   4.2000000e+00   4.4000000e+00   4.1000000e+00   3.0000000e-01   3.0000000e-01   1.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   7.0000000e-01   2.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e+00   2.8000000e+00   3.2000000e+00   2.3000000e+00   2.9000000e+00   2.8000000e+00   3.0000000e+00   1.6000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   3.0000000e+00   1.9000000e+00   2.7000000e+00   2.8000000e+00   2.4000000e+00   2.8000000e+00   2.2000000e+00   3.1000000e+00   2.3000000e+00   3.2000000e+00   3.0000000e+00   2.6000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.8000000e+00   1.8000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.7000000e+00   2.4000000e+00   2.3000000e+00   2.7000000e+00   2.9000000e+00   2.3000000e+00   1.6000000e+00   2.5000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   1.3000000e+00   2.4000000e+00   4.3000000e+00   3.4000000e+00   4.2000000e+00   3.9000000e+00   4.1000000e+00   4.9000000e+00   2.8000000e+00   4.6000000e+00   4.1000000e+00   4.4000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   5.0000000e+00   5.2000000e+00   3.3000000e+00   4.0000000e+00   3.2000000e+00   5.0000000e+00   3.2000000e+00   4.0000000e+00   4.3000000e+00   3.1000000e+00   3.2000000e+00   3.9000000e+00   4.1000000e+00   4.4000000e+00   4.7000000e+00   3.9000000e+00   3.4000000e+00   3.9000000e+00   4.4000000e+00   3.9000000e+00   3.8000000e+00   3.1000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.4000000e+00   4.2000000e+00   4.0000000e+00   3.5000000e+00   3.3000000e+00   3.5000000e+00   3.7000000e+00   3.4000000e+00   4.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   7.0000000e-01   7.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   6.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   2.8000000e+00   2.6000000e+00   3.0000000e+00   2.1000000e+00   2.7000000e+00   2.6000000e+00   2.8000000e+00   1.4000000e+00   2.7000000e+00   2.0000000e+00   1.6000000e+00   2.3000000e+00   2.1000000e+00   2.8000000e+00   1.7000000e+00   2.5000000e+00   2.6000000e+00   2.2000000e+00   2.6000000e+00   2.0000000e+00   2.9000000e+00   2.1000000e+00   3.0000000e+00   2.8000000e+00   2.4000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.6000000e+00   1.6000000e+00   1.9000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   2.6000000e+00   2.6000000e+00   2.8000000e+00   2.5000000e+00   2.2000000e+00   2.1000000e+00   2.5000000e+00   2.7000000e+00   2.1000000e+00   1.4000000e+00   2.3000000e+00   2.3000000e+00   2.3000000e+00   2.4000000e+00   1.1000000e+00   2.2000000e+00   4.1000000e+00   3.2000000e+00   4.0000000e+00   3.7000000e+00   3.9000000e+00   4.7000000e+00   2.6000000e+00   4.4000000e+00   3.9000000e+00   4.2000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   4.8000000e+00   5.0000000e+00   3.1000000e+00   3.8000000e+00   3.0000000e+00   4.8000000e+00   3.0000000e+00   3.8000000e+00   4.1000000e+00   2.9000000e+00   3.0000000e+00   3.7000000e+00   3.9000000e+00   4.2000000e+00   4.5000000e+00   3.7000000e+00   3.2000000e+00   3.7000000e+00   4.2000000e+00   3.7000000e+00   3.6000000e+00   2.9000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   3.2000000e+00   4.0000000e+00   3.8000000e+00   3.3000000e+00   3.1000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   4.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e-01   4.0000000e-01   5.0000000e-01   1.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   2.0000000e-01   8.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   2.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.0000000e-01   1.1000000e+00   6.0000000e-01   2.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   1.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e-01   2.0000000e-01   1.2000000e+00   8.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   5.0000000e-01   4.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   1.0000000e-01   2.0000000e-01   1.1000000e+00   8.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   2.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   1.0000000e-01   7.0000000e-01   9.0000000e-01   1.0000000e+00   2.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   3.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   6.0000000e-01   1.0000000e+00   1.1000000e+00   1.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   4.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   2.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   7.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   2.0000000e-01   5.0000000e-01   1.0000000e+00   3.0000000e-01   4.0000000e-01   1.1000000e+00   1.0000000e+00   4.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   3.0000000e-01   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   3.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   1.1000000e+00   7.0000000e-01   6.0000000e-01   1.8000000e+00   9.0000000e-01   6.0000000e-01   4.0000000e-01   1.1000000e+00   3.0000000e-01   9.0000000e-01   4.0000000e-01   8.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.1000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.6000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   1.1000000e+00   1.0000000e+00   7.0000000e-01   1.1000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   1.9000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   1.2000000e+00   4.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.2000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.7000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   3.0000000e-01   6.0000000e-01   0.0000000e+00   5.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   5.0000000e-01   3.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   9.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   3.5000000e+00   3.3000000e+00   3.7000000e+00   2.8000000e+00   3.4000000e+00   3.3000000e+00   3.5000000e+00   2.1000000e+00   3.4000000e+00   2.7000000e+00   2.3000000e+00   3.0000000e+00   2.8000000e+00   3.5000000e+00   2.4000000e+00   3.2000000e+00   3.3000000e+00   2.9000000e+00   3.3000000e+00   2.7000000e+00   3.6000000e+00   2.8000000e+00   3.7000000e+00   3.5000000e+00   3.1000000e+00   3.2000000e+00   3.6000000e+00   3.8000000e+00   3.3000000e+00   2.3000000e+00   2.6000000e+00   2.5000000e+00   2.7000000e+00   3.9000000e+00   3.3000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   2.9000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.8000000e+00   2.1000000e+00   3.0000000e+00   3.0000000e+00   3.0000000e+00   3.1000000e+00   1.8000000e+00   2.9000000e+00   4.8000000e+00   3.9000000e+00   4.7000000e+00   4.4000000e+00   4.6000000e+00   5.4000000e+00   3.3000000e+00   5.1000000e+00   4.6000000e+00   4.9000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.9000000e+00   4.1000000e+00   4.3000000e+00   5.5000000e+00   5.7000000e+00   3.8000000e+00   4.5000000e+00   3.7000000e+00   5.5000000e+00   3.7000000e+00   4.5000000e+00   4.8000000e+00   3.6000000e+00   3.7000000e+00   4.4000000e+00   4.6000000e+00   4.9000000e+00   5.2000000e+00   4.4000000e+00   3.9000000e+00   4.4000000e+00   4.9000000e+00   4.4000000e+00   4.3000000e+00   3.6000000e+00   4.2000000e+00   4.4000000e+00   3.9000000e+00   3.9000000e+00   4.7000000e+00   4.5000000e+00   4.0000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.9000000e+00   6.0000000e-01   1.1000000e+00   4.0000000e-01   5.0000000e-01   1.2000000e+00   1.1000000e+00   5.0000000e-01   6.0000000e-01   7.0000000e-01   4.0000000e-01   9.0000000e-01   2.0000000e-01   5.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   5.0000000e-01   3.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   2.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   7.0000000e-01   6.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e-01   9.0000000e-01   6.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   2.0000000e-01   1.1000000e+00   7.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   1.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   1.2000000e+00   6.0000000e-01   3.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   9.0000000e-01   1.2000000e+00   1.5000000e+00   7.0000000e-01   1.5000000e+00   9.0000000e-01   1.4000000e+00   1.0000000e+00   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   6.0000000e-01   7.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   9.0000000e-01   6.0000000e-01   3.4000000e+00   3.2000000e+00   3.6000000e+00   2.7000000e+00   3.3000000e+00   3.2000000e+00   3.4000000e+00   2.0000000e+00   3.3000000e+00   2.6000000e+00   2.2000000e+00   2.9000000e+00   2.7000000e+00   3.4000000e+00   2.3000000e+00   3.1000000e+00   3.2000000e+00   2.8000000e+00   3.2000000e+00   2.6000000e+00   3.5000000e+00   2.7000000e+00   3.6000000e+00   3.4000000e+00   3.0000000e+00   3.1000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   2.2000000e+00   2.5000000e+00   2.4000000e+00   2.6000000e+00   3.8000000e+00   3.2000000e+00   3.2000000e+00   3.4000000e+00   3.1000000e+00   2.8000000e+00   2.7000000e+00   3.1000000e+00   3.3000000e+00   2.7000000e+00   2.0000000e+00   2.9000000e+00   2.9000000e+00   2.9000000e+00   3.0000000e+00   1.7000000e+00   2.8000000e+00   4.7000000e+00   3.8000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.3000000e+00   3.2000000e+00   5.0000000e+00   4.5000000e+00   4.8000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.8000000e+00   4.0000000e+00   4.2000000e+00   5.4000000e+00   5.6000000e+00   3.7000000e+00   4.4000000e+00   3.6000000e+00   5.4000000e+00   3.6000000e+00   4.4000000e+00   4.7000000e+00   3.5000000e+00   3.6000000e+00   4.3000000e+00   4.5000000e+00   4.8000000e+00   5.1000000e+00   4.3000000e+00   3.8000000e+00   4.3000000e+00   4.8000000e+00   4.3000000e+00   4.2000000e+00   3.5000000e+00   4.1000000e+00   4.3000000e+00   3.8000000e+00   3.8000000e+00   4.6000000e+00   4.4000000e+00   3.9000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.8000000e+00   3.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   8.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.8000000e+00   2.6000000e+00   3.0000000e+00   2.1000000e+00   2.7000000e+00   2.6000000e+00   2.8000000e+00   1.4000000e+00   2.7000000e+00   2.0000000e+00   1.8000000e+00   2.3000000e+00   2.1000000e+00   2.8000000e+00   1.7000000e+00   2.5000000e+00   2.6000000e+00   2.2000000e+00   2.6000000e+00   2.0000000e+00   2.9000000e+00   2.1000000e+00   3.0000000e+00   2.8000000e+00   2.4000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.6000000e+00   1.6000000e+00   1.9000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   2.6000000e+00   2.6000000e+00   2.8000000e+00   2.5000000e+00   2.2000000e+00   2.1000000e+00   2.5000000e+00   2.7000000e+00   2.1000000e+00   1.5000000e+00   2.3000000e+00   2.3000000e+00   2.3000000e+00   2.4000000e+00   1.3000000e+00   2.2000000e+00   4.1000000e+00   3.2000000e+00   4.0000000e+00   3.7000000e+00   3.9000000e+00   4.7000000e+00   2.6000000e+00   4.4000000e+00   3.9000000e+00   4.2000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   3.2000000e+00   3.4000000e+00   3.6000000e+00   4.8000000e+00   5.0000000e+00   3.1000000e+00   3.8000000e+00   3.0000000e+00   4.8000000e+00   3.0000000e+00   3.8000000e+00   4.1000000e+00   2.9000000e+00   3.0000000e+00   3.7000000e+00   3.9000000e+00   4.2000000e+00   4.5000000e+00   3.7000000e+00   3.2000000e+00   3.7000000e+00   4.2000000e+00   3.7000000e+00   3.6000000e+00   2.9000000e+00   3.5000000e+00   3.7000000e+00   3.2000000e+00   3.2000000e+00   4.0000000e+00   3.8000000e+00   3.3000000e+00   3.1000000e+00   3.3000000e+00   3.5000000e+00   3.2000000e+00   8.0000000e-01   2.0000000e-01   7.0000000e-01   3.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   2.0000000e-01   5.0000000e-01   3.1000000e+00   2.9000000e+00   3.3000000e+00   2.4000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   1.7000000e+00   3.0000000e+00   2.3000000e+00   1.9000000e+00   2.6000000e+00   2.4000000e+00   3.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   2.9000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   3.3000000e+00   3.1000000e+00   2.7000000e+00   2.8000000e+00   3.2000000e+00   3.4000000e+00   2.9000000e+00   1.9000000e+00   2.2000000e+00   2.1000000e+00   2.3000000e+00   3.5000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   2.8000000e+00   3.0000000e+00   2.4000000e+00   1.7000000e+00   2.6000000e+00   2.6000000e+00   2.6000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   4.4000000e+00   3.5000000e+00   4.3000000e+00   4.0000000e+00   4.2000000e+00   5.0000000e+00   2.9000000e+00   4.7000000e+00   4.2000000e+00   4.5000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.4000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   5.1000000e+00   5.3000000e+00   3.4000000e+00   4.1000000e+00   3.3000000e+00   5.1000000e+00   3.3000000e+00   4.1000000e+00   4.4000000e+00   3.2000000e+00   3.3000000e+00   4.0000000e+00   4.2000000e+00   4.5000000e+00   4.8000000e+00   4.0000000e+00   3.5000000e+00   4.0000000e+00   4.5000000e+00   4.0000000e+00   3.9000000e+00   3.2000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.5000000e+00   4.3000000e+00   4.1000000e+00   3.6000000e+00   3.4000000e+00   3.6000000e+00   3.8000000e+00   3.5000000e+00   7.0000000e-01   4.0000000e-01   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   4.0000000e-01   3.2000000e+00   3.0000000e+00   3.4000000e+00   2.5000000e+00   3.1000000e+00   3.0000000e+00   3.2000000e+00   1.8000000e+00   3.1000000e+00   2.4000000e+00   2.0000000e+00   2.7000000e+00   2.5000000e+00   3.2000000e+00   2.1000000e+00   2.9000000e+00   3.0000000e+00   2.6000000e+00   3.0000000e+00   2.4000000e+00   3.3000000e+00   2.5000000e+00   3.4000000e+00   3.2000000e+00   2.8000000e+00   2.9000000e+00   3.3000000e+00   3.5000000e+00   3.0000000e+00   2.0000000e+00   2.3000000e+00   2.2000000e+00   2.4000000e+00   3.6000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.9000000e+00   2.6000000e+00   2.5000000e+00   2.9000000e+00   3.1000000e+00   2.5000000e+00   1.8000000e+00   2.7000000e+00   2.7000000e+00   2.7000000e+00   2.8000000e+00   1.5000000e+00   2.6000000e+00   4.5000000e+00   3.6000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   5.1000000e+00   3.0000000e+00   4.8000000e+00   4.3000000e+00   4.6000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.5000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   5.2000000e+00   5.4000000e+00   3.5000000e+00   4.2000000e+00   3.4000000e+00   5.2000000e+00   3.4000000e+00   4.2000000e+00   4.5000000e+00   3.3000000e+00   3.4000000e+00   4.1000000e+00   4.3000000e+00   4.6000000e+00   4.9000000e+00   4.1000000e+00   3.6000000e+00   4.1000000e+00   4.6000000e+00   4.1000000e+00   4.0000000e+00   3.3000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.6000000e+00   4.4000000e+00   4.2000000e+00   3.7000000e+00   3.5000000e+00   3.7000000e+00   3.9000000e+00   3.6000000e+00   3.3000000e+00   3.1000000e+00   3.5000000e+00   2.6000000e+00   3.2000000e+00   3.1000000e+00   3.3000000e+00   1.9000000e+00   3.2000000e+00   2.5000000e+00   2.1000000e+00   2.8000000e+00   2.6000000e+00   3.3000000e+00   2.2000000e+00   3.0000000e+00   3.1000000e+00   2.7000000e+00   3.1000000e+00   2.5000000e+00   3.4000000e+00   2.6000000e+00   3.5000000e+00   3.3000000e+00   2.9000000e+00   3.0000000e+00   3.4000000e+00   3.6000000e+00   3.1000000e+00   2.1000000e+00   2.4000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   3.0000000e+00   3.2000000e+00   2.6000000e+00   1.9000000e+00   2.8000000e+00   2.8000000e+00   2.8000000e+00   2.9000000e+00   1.6000000e+00   2.7000000e+00   4.6000000e+00   3.7000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.2000000e+00   3.1000000e+00   4.9000000e+00   4.4000000e+00   4.7000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   3.6000000e+00   3.7000000e+00   3.9000000e+00   4.1000000e+00   5.3000000e+00   5.5000000e+00   3.6000000e+00   4.3000000e+00   3.5000000e+00   5.3000000e+00   3.5000000e+00   4.3000000e+00   4.6000000e+00   3.4000000e+00   3.5000000e+00   4.2000000e+00   4.4000000e+00   4.7000000e+00   5.0000000e+00   4.2000000e+00   3.7000000e+00   4.2000000e+00   4.7000000e+00   4.2000000e+00   4.1000000e+00   3.4000000e+00   4.0000000e+00   4.2000000e+00   3.7000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.0000000e+00   3.7000000e+00   6.0000000e-01   2.0000000e-01   1.5000000e+00   5.0000000e-01   1.3000000e+00   7.0000000e-01   2.1000000e+00   4.0000000e-01   1.8000000e+00   2.0000000e+00   1.1000000e+00   1.0000000e+00   9.0000000e-01   1.4000000e+00   3.0000000e-01   1.4000000e+00   1.2000000e+00   1.0000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.0000000e+00   1.3000000e+00   1.5000000e+00   1.5000000e+00   1.2000000e+00   1.0000000e+00   1.6000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   1.4000000e+00   1.5000000e+00   1.5000000e+00   9.0000000e-01   1.2000000e+00   2.0000000e+00   1.4000000e+00   1.3000000e+00   1.3000000e+00   8.0000000e-01   1.9000000e+00   1.3000000e+00   1.3000000e+00   1.2000000e+00   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   2.1000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.3000000e+00   1.2000000e+00   9.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   1.0000000e+00   1.0000000e+00   1.4000000e+00   2.0000000e+00   7.0000000e-01   1.0000000e+00   1.3000000e+00   8.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.4000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   1.2000000e+00   1.2000000e+00   1.1000000e+00   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   9.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   1.5000000e+00   3.0000000e-01   1.2000000e+00   1.4000000e+00   5.0000000e-01   1.0000000e+00   3.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   1.0000000e+00   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   1.0000000e+00   9.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   4.0000000e-01   3.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   3.0000000e-01   6.0000000e-01   1.4000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   1.5000000e+00   7.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.5000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   1.0000000e+00   1.2000000e+00   8.0000000e-01   2.2000000e+00   5.0000000e-01   1.2000000e+00   1.5000000e+00   4.0000000e-01   4.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   1.1000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   1.4000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   1.4000000e+00   4.0000000e-01   1.2000000e+00   6.0000000e-01   2.0000000e+00   3.0000000e-01   1.7000000e+00   1.9000000e+00   1.0000000e+00   9.0000000e-01   8.0000000e-01   1.3000000e+00   5.0000000e-01   1.3000000e+00   1.1000000e+00   9.0000000e-01   1.3000000e+00   1.0000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   2.0000000e-01   9.0000000e-01   1.4000000e+00   1.4000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   1.5000000e+00   9.0000000e-01   2.0000000e-01   8.0000000e-01   1.3000000e+00   1.4000000e+00   1.4000000e+00   8.0000000e-01   1.1000000e+00   1.9000000e+00   1.3000000e+00   1.2000000e+00   1.2000000e+00   7.0000000e-01   1.9000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.7000000e+00   2.0000000e+00   1.4000000e+00   9.0000000e-01   1.2000000e+00   5.0000000e-01   5.0000000e-01   6.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   6.0000000e-01   1.8000000e+00   2.0000000e+00   9.0000000e-01   8.0000000e-01   1.3000000e+00   1.8000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   7.0000000e-01   8.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   1.5000000e+00   7.0000000e-01   6.0000000e-01   8.0000000e-01   1.2000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   7.0000000e-01   1.1000000e+00   4.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   7.0000000e-01   2.0000000e-01   9.0000000e-01   6.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.2000000e+00   6.0000000e-01   5.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   1.1000000e+00   7.0000000e-01   1.1000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   3.0000000e-01   7.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   7.0000000e-01   1.0000000e+00   5.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   6.0000000e-01   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.2000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.4000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   8.0000000e-01   5.0000000e-01   1.6000000e+00   2.0000000e-01   1.3000000e+00   1.5000000e+00   6.0000000e-01   6.0000000e-01   4.0000000e-01   1.0000000e+00   3.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   4.0000000e-01   5.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   7.0000000e-01   5.0000000e-01   1.1000000e+00   6.0000000e-01   3.0000000e-01   5.0000000e-01   9.0000000e-01   1.0000000e+00   1.0000000e+00   4.0000000e-01   7.0000000e-01   1.5000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   3.0000000e-01   1.6000000e+00   8.0000000e-01   1.4000000e+00   7.0000000e-01   1.3000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e+00   1.6000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   5.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   2.1000000e+00   2.3000000e+00   6.0000000e-01   1.1000000e+00   9.0000000e-01   2.1000000e+00   3.0000000e-01   1.1000000e+00   1.4000000e+00   3.0000000e-01   4.0000000e-01   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   1.5000000e+00   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   7.0000000e-01   1.3000000e+00   1.1000000e+00   8.0000000e-01   4.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   1.2000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   3.0000000e-01   6.0000000e-01   4.0000000e-01   9.0000000e-01   1.0000000e+00   2.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   1.0000000e+00   3.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   1.0000000e+00   6.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   8.0000000e-01   1.8000000e+00   1.3000000e+00   1.6000000e+00   8.0000000e-01   8.0000000e-01   1.1000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   2.2000000e+00   2.4000000e+00   6.0000000e-01   1.2000000e+00   7.0000000e-01   2.2000000e+00   6.0000000e-01   1.2000000e+00   1.5000000e+00   5.0000000e-01   5.0000000e-01   1.1000000e+00   1.5000000e+00   1.7000000e+00   2.2000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   2.0000000e+00   1.1000000e+00   1.0000000e+00   5.0000000e-01   1.2000000e+00   1.1000000e+00   1.2000000e+00   6.0000000e-01   1.4000000e+00   1.2000000e+00   1.0000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   6.0000000e-01   1.4000000e+00   4.0000000e-01   1.1000000e+00   1.3000000e+00   5.0000000e-01   1.1000000e+00   4.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   6.0000000e-01   1.1000000e+00   8.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   6.0000000e-01   9.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   1.3000000e+00   6.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   4.0000000e-01   6.0000000e-01   8.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   1.1000000e+00   1.0000000e+00   7.0000000e-01   2.0000000e+00   6.0000000e-01   1.0000000e+00   1.3000000e+00   5.0000000e-01   3.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   1.4000000e+00   9.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   1.2000000e+00   1.0000000e+00   7.0000000e-01   8.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   4.0000000e-01   1.0000000e+00   1.1000000e+00   1.4000000e+00   7.0000000e-01   1.8000000e+00   1.2000000e+00   9.0000000e-01   1.3000000e+00   7.0000000e-01   1.5000000e+00   1.2000000e+00   1.6000000e+00   1.4000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.8000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.8000000e+00   1.2000000e+00   1.2000000e+00   1.8000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.3000000e+00   9.0000000e-01   1.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   1.3000000e+00   3.0000000e-01   8.0000000e-01   2.7000000e+00   1.8000000e+00   2.6000000e+00   2.3000000e+00   2.5000000e+00   3.3000000e+00   1.2000000e+00   3.0000000e+00   2.5000000e+00   2.8000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   1.7000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   3.6000000e+00   1.7000000e+00   2.4000000e+00   1.6000000e+00   3.4000000e+00   1.6000000e+00   2.4000000e+00   2.7000000e+00   1.5000000e+00   1.6000000e+00   2.3000000e+00   2.5000000e+00   2.8000000e+00   3.1000000e+00   2.3000000e+00   1.8000000e+00   2.3000000e+00   2.8000000e+00   2.3000000e+00   2.2000000e+00   1.5000000e+00   2.1000000e+00   2.3000000e+00   2.0000000e+00   1.8000000e+00   2.6000000e+00   2.4000000e+00   1.9000000e+00   1.7000000e+00   1.9000000e+00   2.1000000e+00   1.8000000e+00   1.4000000e+00   1.6000000e+00   7.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   2.0000000e-01   1.0000000e+00   8.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   3.0000000e-01   2.0000000e-01   2.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   1.1000000e+00   1.1000000e+00   8.0000000e-01   6.0000000e-01   1.2000000e+00   6.0000000e-01   2.0000000e-01   6.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   4.0000000e-01   1.6000000e+00   9.0000000e-01   1.4000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   7.0000000e-01   7.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   2.1000000e+00   2.3000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   2.1000000e+00   5.0000000e-01   1.1000000e+00   1.4000000e+00   5.0000000e-01   5.0000000e-01   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   1.5000000e+00   1.1000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.0000000e+00   8.0000000e-01   1.3000000e+00   1.2000000e+00   1.0000000e+00   6.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   9.0000000e-01   4.0000000e-01   1.5000000e+00   6.0000000e-01   6.0000000e-01   1.0000000e+00   4.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   1.2000000e+00   6.0000000e-01   8.0000000e-01   1.5000000e+00   1.1000000e+00   4.0000000e-01   3.0000000e-01   5.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   2.1000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.9000000e+00   2.7000000e+00   6.0000000e-01   2.4000000e+00   1.9000000e+00   2.2000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   2.8000000e+00   3.0000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.8000000e+00   1.1000000e+00   1.8000000e+00   2.1000000e+00   1.0000000e+00   1.0000000e+00   1.7000000e+00   2.0000000e+00   2.2000000e+00   2.7000000e+00   1.7000000e+00   1.2000000e+00   1.7000000e+00   2.5000000e+00   1.7000000e+00   1.6000000e+00   9.0000000e-01   1.7000000e+00   1.7000000e+00   1.7000000e+00   1.2000000e+00   2.0000000e+00   1.8000000e+00   1.5000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.2000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   9.0000000e-01   1.7000000e+00   1.0000000e+00   8.0000000e-01   1.2000000e+00   6.0000000e-01   1.3000000e+00   1.1000000e+00   1.4000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   1.7000000e+00   1.0000000e+00   7.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.0000000e+00   1.4000000e+00   1.7000000e+00   1.3000000e+00   1.0000000e+00   5.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   3.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   1.2000000e+00   5.0000000e-01   8.0000000e-01   2.5000000e+00   1.6000000e+00   2.4000000e+00   2.1000000e+00   2.3000000e+00   3.1000000e+00   1.0000000e+00   2.8000000e+00   2.3000000e+00   2.6000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   3.4000000e+00   1.5000000e+00   2.2000000e+00   1.4000000e+00   3.2000000e+00   1.4000000e+00   2.2000000e+00   2.5000000e+00   1.3000000e+00   1.4000000e+00   2.1000000e+00   2.3000000e+00   2.6000000e+00   2.9000000e+00   2.1000000e+00   1.6000000e+00   2.1000000e+00   2.7000000e+00   2.1000000e+00   2.0000000e+00   1.3000000e+00   1.9000000e+00   2.1000000e+00   1.9000000e+00   1.6000000e+00   2.4000000e+00   2.2000000e+00   1.7000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   6.0000000e-01   8.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   6.0000000e-01   2.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   3.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   4.0000000e-01   9.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   3.0000000e-01   1.2000000e+00   2.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   1.0000000e+00   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   9.0000000e-01   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   7.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   1.9000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.0000000e+00   6.0000000e-01   9.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   1.1000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   4.0000000e-01   1.0000000e+00   5.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   1.0000000e+00   6.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   1.1000000e+00   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.4000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   1.0000000e+00   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.3000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.3000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   7.0000000e-01   4.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   4.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   1.0000000e-01   7.0000000e-01   1.4000000e+00   5.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   1.3000000e+00   5.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.2000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   6.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   7.0000000e-01   1.0000000e+00   6.0000000e-01   2.0000000e+00   4.0000000e-01   1.0000000e+00   1.3000000e+00   4.0000000e-01   4.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.8000000e+00   9.0000000e-01   4.0000000e-01   9.0000000e-01   1.6000000e+00   1.0000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   1.2000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   1.1000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   4.0000000e-01   1.2000000e+00   5.0000000e-01   1.3000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   1.5000000e+00   9.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   4.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   2.4000000e+00   1.5000000e+00   2.3000000e+00   2.0000000e+00   2.2000000e+00   3.0000000e+00   9.0000000e-01   2.7000000e+00   2.2000000e+00   2.5000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   3.1000000e+00   3.3000000e+00   1.4000000e+00   2.1000000e+00   1.3000000e+00   3.1000000e+00   1.3000000e+00   2.1000000e+00   2.4000000e+00   1.2000000e+00   1.3000000e+00   2.0000000e+00   2.2000000e+00   2.5000000e+00   2.8000000e+00   2.0000000e+00   1.5000000e+00   2.0000000e+00   2.5000000e+00   2.0000000e+00   1.9000000e+00   1.2000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   1.5000000e+00   2.3000000e+00   2.1000000e+00   1.6000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   1.5000000e+00   1.1000000e+00   9.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   1.0000000e+00   1.2000000e+00   1.2000000e+00   9.0000000e-01   7.0000000e-01   1.3000000e+00   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.1000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.6000000e+00   1.0000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   1.8000000e+00   1.9000000e+00   1.4000000e+00   1.7000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   9.0000000e-01   1.1000000e+00   2.3000000e+00   2.5000000e+00   9.0000000e-01   1.3000000e+00   1.1000000e+00   2.3000000e+00   5.0000000e-01   1.3000000e+00   1.6000000e+00   5.0000000e-01   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   1.2000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   9.0000000e-01   1.5000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   1.2000000e+00   1.1000000e+00   4.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   2.0000000e-01   4.0000000e-01   1.1000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.5000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   7.0000000e-01   1.8000000e+00   1.3000000e+00   1.6000000e+00   9.0000000e-01   8.0000000e-01   1.2000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   8.0000000e-01   1.3000000e+00   5.0000000e-01   2.2000000e+00   7.0000000e-01   1.2000000e+00   1.6000000e+00   6.0000000e-01   5.0000000e-01   1.1000000e+00   1.6000000e+00   1.8000000e+00   2.3000000e+00   1.1000000e+00   7.0000000e-01   1.1000000e+00   2.1000000e+00   1.1000000e+00   1.0000000e+00   4.0000000e-01   1.3000000e+00   1.1000000e+00   1.3000000e+00   6.0000000e-01   1.4000000e+00   1.2000000e+00   1.1000000e+00   7.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   5.0000000e-01   2.0000000e-01   8.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   4.0000000e-01   2.0000000e-01   1.0000000e+00   5.0000000e-01   7.0000000e-01   9.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   2.0000000e-01   8.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   1.1000000e+00   3.0000000e-01   1.9000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   9.0000000e-01   2.2000000e+00   1.7000000e+00   2.0000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.0000000e+00   1.4000000e+00   1.3000000e+00   1.4000000e+00   2.6000000e+00   2.8000000e+00   9.0000000e-01   1.6000000e+00   1.0000000e+00   2.6000000e+00   8.0000000e-01   1.6000000e+00   1.9000000e+00   8.0000000e-01   8.0000000e-01   1.5000000e+00   1.7000000e+00   2.0000000e+00   2.3000000e+00   1.5000000e+00   1.0000000e+00   1.5000000e+00   2.0000000e+00   1.5000000e+00   1.4000000e+00   8.0000000e-01   1.3000000e+00   1.5000000e+00   1.3000000e+00   1.0000000e+00   1.8000000e+00   1.6000000e+00   1.3000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   6.0000000e-01   1.0000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.2000000e+00   9.0000000e-01   2.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   1.2000000e+00   6.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   1.5000000e+00   6.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.3000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   1.0000000e+00   1.0000000e+00   2.2000000e+00   2.4000000e+00   5.0000000e-01   1.2000000e+00   6.0000000e-01   2.2000000e+00   5.0000000e-01   1.2000000e+00   1.5000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   1.2000000e+00   1.0000000e+00   8.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   1.4000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.2000000e+00   8.0000000e-01   9.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   8.0000000e-01   1.0000000e+00   1.2000000e+00   1.1000000e+00   6.0000000e-01   4.0000000e-01   1.0000000e-01   2.0000000e-01   2.0000000e-01   1.2000000e+00   6.0000000e-01   9.0000000e-01   1.1000000e+00   7.0000000e-01   5.0000000e-01   2.0000000e-01   5.0000000e-01   7.0000000e-01   2.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   9.0000000e-01   3.0000000e-01   2.1000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.9000000e+00   2.7000000e+00   7.0000000e-01   2.4000000e+00   1.9000000e+00   2.2000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   2.8000000e+00   3.0000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.8000000e+00   1.0000000e+00   1.8000000e+00   2.1000000e+00   9.0000000e-01   1.0000000e+00   1.7000000e+00   1.9000000e+00   2.2000000e+00   2.5000000e+00   1.7000000e+00   1.2000000e+00   1.7000000e+00   2.2000000e+00   1.7000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   1.7000000e+00   1.3000000e+00   1.2000000e+00   2.0000000e+00   1.8000000e+00   1.3000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.2000000e+00   8.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   3.0000000e-01   1.3000000e+00   1.0000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   1.5000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   1.8000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   1.2000000e+00   8.0000000e-01   1.0000000e+00   1.8000000e+00   1.0000000e+00   1.5000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   5.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   1.9000000e+00   2.1000000e+00   1.0000000e+00   1.0000000e+00   4.0000000e-01   1.9000000e+00   5.0000000e-01   9.0000000e-01   1.3000000e+00   4.0000000e-01   2.0000000e-01   8.0000000e-01   1.3000000e+00   1.5000000e+00   2.0000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.8000000e+00   8.0000000e-01   7.0000000e-01   2.0000000e-01   1.0000000e+00   8.0000000e-01   1.0000000e+00   5.0000000e-01   1.1000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   9.0000000e-01   7.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.1000000e+00   7.0000000e-01   6.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.1000000e+00   5.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.0000000e+00   4.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   1.2000000e+00   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   3.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.4000000e+00   1.1000000e+00   1.2000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   9.0000000e-01   1.6000000e+00   7.0000000e-01   7.0000000e-01   7.0000000e-01   6.0000000e-01   1.9000000e+00   8.0000000e-01   1.1000000e+00   5.0000000e-01   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.4000000e+00   9.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   1.8000000e+00   2.0000000e+00   3.0000000e-01   8.0000000e-01   7.0000000e-01   1.8000000e+00   3.0000000e-01   8.0000000e-01   1.1000000e+00   3.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   1.6000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   9.0000000e-01   6.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   4.0000000e-01   5.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   8.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   7.0000000e-01   1.4000000e+00   5.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   1.7000000e+00   6.0000000e-01   1.3000000e+00   7.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.2000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   2.0000000e+00   2.2000000e+00   6.0000000e-01   1.1000000e+00   8.0000000e-01   2.0000000e+00   6.0000000e-01   1.0000000e+00   1.3000000e+00   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.8000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   1.6000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   9.0000000e-01   1.2000000e+00   1.1000000e+00   7.0000000e-01   1.2000000e+00   1.3000000e+00   1.1000000e+00   7.0000000e-01   8.0000000e-01   1.1000000e+00   6.0000000e-01   2.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   4.0000000e-01   6.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   3.0000000e-01   6.0000000e-01   1.4000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   2.0000000e-01   1.3000000e+00   7.0000000e-01   1.7000000e+00   8.0000000e-01   1.6000000e+00   1.3000000e+00   1.5000000e+00   2.3000000e+00   1.5000000e+00   2.0000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.2000000e+00   2.4000000e+00   2.6000000e+00   7.0000000e-01   1.4000000e+00   8.0000000e-01   2.4000000e+00   6.0000000e-01   1.4000000e+00   1.7000000e+00   5.0000000e-01   6.0000000e-01   1.3000000e+00   1.5000000e+00   1.8000000e+00   2.1000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   1.8000000e+00   1.3000000e+00   1.2000000e+00   5.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   8.0000000e-01   1.6000000e+00   1.4000000e+00   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   1.1000000e+00   8.0000000e-01   7.0000000e-01   1.2000000e+00   6.0000000e-01   3.0000000e-01   7.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   4.0000000e-01   1.5000000e+00   9.0000000e-01   1.6000000e+00   8.0000000e-01   1.5000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.7000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.0000000e+00   9.0000000e-01   1.1000000e+00   2.3000000e+00   2.5000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   2.3000000e+00   5.0000000e-01   1.3000000e+00   1.6000000e+00   4.0000000e-01   5.0000000e-01   1.2000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   1.2000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.1000000e+00   6.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   8.0000000e-01   1.5000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.3000000e+00   1.3000000e+00   1.3000000e+00   1.0000000e+00   8.0000000e-01   1.4000000e+00   8.0000000e-01   3.0000000e-01   5.0000000e-01   1.2000000e+00   1.3000000e+00   1.3000000e+00   7.0000000e-01   1.0000000e+00   1.8000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   1.8000000e+00   1.1000000e+00   1.2000000e+00   1.0000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.8000000e+00   1.9000000e+00   1.5000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   5.0000000e-01   7.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   7.0000000e-01   1.9000000e+00   2.1000000e+00   8.0000000e-01   9.0000000e-01   1.2000000e+00   1.9000000e+00   5.0000000e-01   9.0000000e-01   1.2000000e+00   6.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   1.3000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.3000000e+00   1.0000000e+00   7.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   1.5000000e+00   1.2000000e+00   1.3000000e+00   1.1000000e+00   7.0000000e-01   1.3000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.1000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   1.0000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.0000000e+00   7.0000000e-01   2.0000000e+00   1.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.6000000e+00   1.8000000e+00   1.3000000e+00   8.0000000e-01   1.1000000e+00   3.0000000e-01   3.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   5.0000000e-01   1.7000000e+00   1.9000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.7000000e+00   4.0000000e-01   7.0000000e-01   1.0000000e+00   5.0000000e-01   6.0000000e-01   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.4000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   1.1000000e+00   7.0000000e-01   5.0000000e-01   7.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e-01   5.0000000e-01   1.2000000e+00   4.0000000e-01   3.0000000e-01   3.0000000e-01   2.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.1000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   2.2000000e+00   4.0000000e-01   1.2000000e+00   1.5000000e+00   3.0000000e-01   4.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   1.4000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   9.0000000e-01   6.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   2.5000000e+00   1.6000000e+00   2.4000000e+00   2.1000000e+00   2.3000000e+00   3.1000000e+00   1.0000000e+00   2.8000000e+00   2.3000000e+00   2.6000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   1.6000000e+00   1.8000000e+00   2.0000000e+00   3.2000000e+00   3.4000000e+00   1.5000000e+00   2.2000000e+00   1.4000000e+00   3.2000000e+00   1.4000000e+00   2.2000000e+00   2.5000000e+00   1.3000000e+00   1.4000000e+00   2.1000000e+00   2.3000000e+00   2.6000000e+00   2.9000000e+00   2.1000000e+00   1.6000000e+00   2.1000000e+00   2.6000000e+00   2.1000000e+00   2.0000000e+00   1.3000000e+00   1.9000000e+00   2.1000000e+00   1.6000000e+00   1.6000000e+00   2.4000000e+00   2.2000000e+00   1.7000000e+00   1.5000000e+00   1.7000000e+00   1.9000000e+00   1.6000000e+00   1.0000000e-01   3.0000000e-01   1.3000000e+00   7.0000000e-01   1.0000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   8.0000000e-01   3.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   8.0000000e-01   4.0000000e-01   2.2000000e+00   1.3000000e+00   2.1000000e+00   1.8000000e+00   2.0000000e+00   2.8000000e+00   7.0000000e-01   2.5000000e+00   2.0000000e+00   2.3000000e+00   1.3000000e+00   1.5000000e+00   1.7000000e+00   1.2000000e+00   1.3000000e+00   1.5000000e+00   1.7000000e+00   2.9000000e+00   3.1000000e+00   1.2000000e+00   1.9000000e+00   1.1000000e+00   2.9000000e+00   1.1000000e+00   1.9000000e+00   2.2000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   2.0000000e+00   2.3000000e+00   2.6000000e+00   1.8000000e+00   1.3000000e+00   1.8000000e+00   2.3000000e+00   1.8000000e+00   1.7000000e+00   1.0000000e+00   1.6000000e+00   1.8000000e+00   1.4000000e+00   1.3000000e+00   2.1000000e+00   1.9000000e+00   1.4000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.3000000e+00   3.0000000e-01   1.4000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   4.0000000e-01   2.3000000e+00   1.4000000e+00   2.2000000e+00   1.9000000e+00   2.1000000e+00   2.9000000e+00   8.0000000e-01   2.6000000e+00   2.1000000e+00   2.4000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   1.8000000e+00   3.0000000e+00   3.2000000e+00   1.3000000e+00   2.0000000e+00   1.2000000e+00   3.0000000e+00   1.2000000e+00   2.0000000e+00   2.3000000e+00   1.1000000e+00   1.2000000e+00   1.9000000e+00   2.1000000e+00   2.4000000e+00   2.7000000e+00   1.9000000e+00   1.4000000e+00   1.9000000e+00   2.4000000e+00   1.9000000e+00   1.8000000e+00   1.1000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.4000000e+00   2.2000000e+00   2.0000000e+00   1.5000000e+00   1.3000000e+00   1.5000000e+00   1.7000000e+00   1.4000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   9.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   1.0000000e-01   8.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   4.0000000e-01   9.0000000e-01   2.0000000e-01   2.1000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   1.9000000e+00   2.7000000e+00   9.0000000e-01   2.4000000e+00   1.9000000e+00   2.2000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.2000000e+00   1.4000000e+00   1.6000000e+00   2.8000000e+00   3.0000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.8000000e+00   1.0000000e+00   1.8000000e+00   2.1000000e+00   9.0000000e-01   1.0000000e+00   1.7000000e+00   1.9000000e+00   2.2000000e+00   2.5000000e+00   1.7000000e+00   1.2000000e+00   1.7000000e+00   2.2000000e+00   1.7000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   1.7000000e+00   1.2000000e+00   1.2000000e+00   2.0000000e+00   1.8000000e+00   1.3000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   1.0000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   1.1000000e+00   1.8000000e+00   9.0000000e-01   9.0000000e-01   9.0000000e-01   8.0000000e-01   2.1000000e+00   1.0000000e+00   9.0000000e-01   3.0000000e-01   1.1000000e+00   5.0000000e-01   7.0000000e-01   1.6000000e+00   1.1000000e+00   1.3000000e+00   7.0000000e-01   1.2000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   1.7000000e+00   1.8000000e+00   5.0000000e-01   9.0000000e-01   4.0000000e-01   1.7000000e+00   3.0000000e-01   7.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   5.0000000e-01   1.2000000e+00   1.4000000e+00   1.9000000e+00   6.0000000e-01   3.0000000e-01   5.0000000e-01   1.7000000e+00   8.0000000e-01   4.0000000e-01   3.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   3.0000000e-01   6.0000000e-01   1.3000000e+00   9.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   7.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   3.0000000e-01   3.0000000e-01   8.0000000e-01   1.5000000e+00   4.0000000e-01   1.5000000e+00   6.0000000e-01   1.7000000e+00   1.1000000e+00   1.3000000e+00   2.2000000e+00   5.0000000e-01   1.9000000e+00   1.3000000e+00   1.8000000e+00   1.1000000e+00   1.0000000e+00   1.4000000e+00   5.0000000e-01   9.0000000e-01   1.0000000e+00   1.1000000e+00   2.3000000e+00   2.4000000e+00   8.0000000e-01   1.5000000e+00   5.0000000e-01   2.3000000e+00   9.0000000e-01   1.3000000e+00   1.8000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.8000000e+00   2.0000000e+00   2.5000000e+00   1.1000000e+00   9.0000000e-01   1.1000000e+00   2.3000000e+00   1.1000000e+00   1.0000000e+00   6.0000000e-01   1.5000000e+00   1.3000000e+00   1.5000000e+00   6.0000000e-01   1.4000000e+00   1.3000000e+00   1.3000000e+00   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   7.0000000e-01   1.1000000e+00   4.0000000e-01   9.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.2000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.5000000e+00   6.0000000e-01   1.5000000e+00   7.0000000e-01   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.1000000e+00   1.1000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   6.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   1.0000000e+00   2.2000000e+00   2.4000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   2.2000000e+00   7.0000000e-01   1.2000000e+00   1.5000000e+00   6.0000000e-01   4.0000000e-01   1.1000000e+00   1.3000000e+00   1.6000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   7.0000000e-01   1.4000000e+00   1.2000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   8.0000000e-01   1.1000000e+00   1.2000000e+00   1.2000000e+00   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.7000000e+00   1.0000000e+00   1.3000000e+00   9.0000000e-01   1.2000000e+00   9.0000000e-01   1.1000000e+00   1.9000000e+00   1.8000000e+00   1.6000000e+00   1.1000000e+00   1.4000000e+00   5.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   2.0000000e+00   2.2000000e+00   9.0000000e-01   1.0000000e+00   1.1000000e+00   2.0000000e+00   4.0000000e-01   1.0000000e+00   1.3000000e+00   5.0000000e-01   6.0000000e-01   9.0000000e-01   1.1000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   4.0000000e-01   9.0000000e-01   1.4000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   7.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   1.2000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   1.3000000e+00   7.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   1.4000000e+00   6.0000000e-01   1.6000000e+00   7.0000000e-01   1.5000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   1.4000000e+00   1.9000000e+00   1.4000000e+00   1.7000000e+00   9.0000000e-01   9.0000000e-01   1.1000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.1000000e+00   2.3000000e+00   2.5000000e+00   6.0000000e-01   1.3000000e+00   7.0000000e-01   2.3000000e+00   5.0000000e-01   1.3000000e+00   1.6000000e+00   5.0000000e-01   7.0000000e-01   1.2000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   1.2000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   1.2000000e+00   1.0000000e+00   7.0000000e-01   1.5000000e+00   1.3000000e+00   1.0000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   7.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   3.0000000e-01   1.0000000e-01   1.0000000e-01   6.0000000e-01   1.1000000e+00   2.0000000e-01   1.9000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   7.0000000e-01   2.2000000e+00   1.7000000e+00   2.0000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   1.1000000e+00   1.2000000e+00   1.4000000e+00   2.6000000e+00   2.8000000e+00   9.0000000e-01   1.6000000e+00   8.0000000e-01   2.6000000e+00   8.0000000e-01   1.6000000e+00   1.9000000e+00   7.0000000e-01   8.0000000e-01   1.5000000e+00   1.7000000e+00   2.0000000e+00   2.3000000e+00   1.5000000e+00   1.0000000e+00   1.5000000e+00   2.1000000e+00   1.5000000e+00   1.4000000e+00   7.0000000e-01   1.3000000e+00   1.5000000e+00   1.3000000e+00   1.0000000e+00   1.8000000e+00   1.6000000e+00   1.1000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   4.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   2.0000000e-01   5.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e+00   3.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   6.0000000e-01   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.2000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.4000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   6.0000000e-01   4.0000000e-01   1.1000000e+00   2.0000000e-01   4.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e-01   1.6000000e+00   7.0000000e-01   1.6000000e+00   1.2000000e+00   1.4000000e+00   2.2000000e+00   6.0000000e-01   1.9000000e+00   1.4000000e+00   1.7000000e+00   1.0000000e+00   9.0000000e-01   1.3000000e+00   8.0000000e-01   1.2000000e+00   1.1000000e+00   1.1000000e+00   2.3000000e+00   2.5000000e+00   6.0000000e-01   1.4000000e+00   8.0000000e-01   2.3000000e+00   8.0000000e-01   1.3000000e+00   1.7000000e+00   7.0000000e-01   6.0000000e-01   1.2000000e+00   1.7000000e+00   1.9000000e+00   2.4000000e+00   1.2000000e+00   8.0000000e-01   1.2000000e+00   2.2000000e+00   1.2000000e+00   1.1000000e+00   6.0000000e-01   1.4000000e+00   1.2000000e+00   1.4000000e+00   7.0000000e-01   1.5000000e+00   1.3000000e+00   1.2000000e+00   8.0000000e-01   1.0000000e+00   1.1000000e+00   7.0000000e-01   6.0000000e-01   1.3000000e+00   5.0000000e-01   4.0000000e-01   4.0000000e-01   3.0000000e-01   1.6000000e+00   5.0000000e-01   1.4000000e+00   5.0000000e-01   1.3000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e+00   1.2000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   6.0000000e-01   7.0000000e-01   9.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   9.0000000e-01   2.1000000e+00   2.3000000e+00   8.0000000e-01   1.1000000e+00   6.0000000e-01   2.1000000e+00   4.0000000e-01   1.1000000e+00   1.4000000e+00   4.0000000e-01   4.0000000e-01   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   5.0000000e-01   1.0000000e+00   1.6000000e+00   1.0000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   1.3000000e+00   1.1000000e+00   9.0000000e-01   5.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   8.0000000e-01   2.0000000e-01   4.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   2.0000000e-01   2.0000000e+00   1.1000000e+00   1.9000000e+00   1.6000000e+00   1.8000000e+00   2.6000000e+00   9.0000000e-01   2.3000000e+00   1.8000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   1.5000000e+00   1.0000000e+00   1.2000000e+00   1.3000000e+00   1.5000000e+00   2.7000000e+00   2.9000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   2.7000000e+00   9.0000000e-01   1.7000000e+00   2.0000000e+00   8.0000000e-01   9.0000000e-01   1.6000000e+00   1.8000000e+00   2.1000000e+00   2.4000000e+00   1.6000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.6000000e+00   1.5000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   1.1000000e+00   1.1000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   9.0000000e-01   9.0000000e-01   1.2000000e+00   3.0000000e-01   8.0000000e-01   2.7000000e+00   1.8000000e+00   2.6000000e+00   2.3000000e+00   2.5000000e+00   3.3000000e+00   1.2000000e+00   3.0000000e+00   2.5000000e+00   2.8000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   1.7000000e+00   1.8000000e+00   2.0000000e+00   2.2000000e+00   3.4000000e+00   3.6000000e+00   1.7000000e+00   2.4000000e+00   1.6000000e+00   3.4000000e+00   1.6000000e+00   2.4000000e+00   2.7000000e+00   1.5000000e+00   1.6000000e+00   2.3000000e+00   2.5000000e+00   2.8000000e+00   3.1000000e+00   2.3000000e+00   1.8000000e+00   2.3000000e+00   2.8000000e+00   2.3000000e+00   2.2000000e+00   1.5000000e+00   2.1000000e+00   2.3000000e+00   1.9000000e+00   1.8000000e+00   2.6000000e+00   2.4000000e+00   1.9000000e+00   1.7000000e+00   1.9000000e+00   2.1000000e+00   1.8000000e+00   3.0000000e-01   2.0000000e-01   6.0000000e-01   1.2000000e+00   1.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   7.0000000e-01   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.1000000e+00   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   7.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.3000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   2.1000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.3000000e+00   1.4000000e+00   1.3000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   1.0000000e-01   5.0000000e-01   1.2000000e+00   2.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   8.0000000e-01   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.2000000e+00   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   8.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   2.0000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.2000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.1000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   5.0000000e-01   1.2000000e+00   1.0000000e-01   1.8000000e+00   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.6000000e+00   2.4000000e+00   8.0000000e-01   2.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.1000000e+00   1.1000000e+00   1.3000000e+00   2.5000000e+00   2.7000000e+00   8.0000000e-01   1.5000000e+00   7.0000000e-01   2.5000000e+00   7.0000000e-01   1.5000000e+00   1.8000000e+00   6.0000000e-01   7.0000000e-01   1.4000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   2.0000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.2000000e+00   9.0000000e-01   1.7000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   1.3000000e+00   5.0000000e-01   1.7000000e+00   8.0000000e-01   1.6000000e+00   1.3000000e+00   1.5000000e+00   2.3000000e+00   1.3000000e+00   2.0000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   1.0000000e+00   1.2000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.2000000e+00   2.4000000e+00   2.6000000e+00   7.0000000e-01   1.4000000e+00   7.0000000e-01   2.4000000e+00   6.0000000e-01   1.4000000e+00   1.7000000e+00   5.0000000e-01   6.0000000e-01   1.3000000e+00   1.5000000e+00   1.8000000e+00   2.1000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   1.8000000e+00   1.3000000e+00   1.2000000e+00   5.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   8.0000000e-01   1.6000000e+00   1.4000000e+00   1.0000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   1.1000000e+00   3.0000000e+00   2.1000000e+00   2.9000000e+00   2.6000000e+00   2.8000000e+00   3.6000000e+00   1.5000000e+00   3.3000000e+00   2.8000000e+00   3.1000000e+00   2.1000000e+00   2.3000000e+00   2.5000000e+00   2.0000000e+00   2.1000000e+00   2.3000000e+00   2.5000000e+00   3.7000000e+00   3.9000000e+00   2.0000000e+00   2.7000000e+00   1.9000000e+00   3.7000000e+00   1.9000000e+00   2.7000000e+00   3.0000000e+00   1.8000000e+00   1.9000000e+00   2.6000000e+00   2.8000000e+00   3.1000000e+00   3.4000000e+00   2.6000000e+00   2.1000000e+00   2.6000000e+00   3.1000000e+00   2.6000000e+00   2.5000000e+00   1.8000000e+00   2.4000000e+00   2.6000000e+00   2.1000000e+00   2.1000000e+00   2.9000000e+00   2.7000000e+00   2.2000000e+00   2.0000000e+00   2.2000000e+00   2.4000000e+00   2.1000000e+00   1.9000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   8.0000000e-01   2.2000000e+00   1.7000000e+00   2.0000000e+00   1.0000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   1.1000000e+00   1.2000000e+00   1.4000000e+00   2.6000000e+00   2.8000000e+00   9.0000000e-01   1.6000000e+00   8.0000000e-01   2.6000000e+00   8.0000000e-01   1.6000000e+00   1.9000000e+00   7.0000000e-01   8.0000000e-01   1.5000000e+00   1.7000000e+00   2.0000000e+00   2.3000000e+00   1.5000000e+00   1.0000000e+00   1.5000000e+00   2.0000000e+00   1.5000000e+00   1.4000000e+00   7.0000000e-01   1.3000000e+00   1.5000000e+00   1.2000000e+00   1.0000000e+00   1.8000000e+00   1.6000000e+00   1.1000000e+00   9.0000000e-01   1.1000000e+00   1.3000000e+00   1.0000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   1.3000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   7.0000000e-01   7.0000000e-01   1.4000000e+00   1.4000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.4000000e+00   1.1000000e+00   4.0000000e-01   9.0000000e-01   1.2000000e+00   1.1000000e+00   5.0000000e-01   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.0000000e+00   1.1000000e+00   1.4000000e+00   4.0000000e-01   7.0000000e-01   1.2000000e+00   6.0000000e-01   4.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   6.0000000e-01   9.0000000e-01   1.3000000e+00   5.0000000e-01   7.0000000e-01   1.8000000e+00   9.0000000e-01   1.5000000e+00   9.0000000e-01   1.4000000e+00   7.0000000e-01   6.0000000e-01   1.0000000e+00   2.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   1.9000000e+00   1.9000000e+00   5.0000000e-01   1.1000000e+00   2.0000000e-01   1.9000000e+00   5.0000000e-01   9.0000000e-01   1.4000000e+00   4.0000000e-01   3.0000000e-01   6.0000000e-01   1.4000000e+00   1.6000000e+00   2.1000000e+00   6.0000000e-01   5.0000000e-01   5.0000000e-01   1.9000000e+00   7.0000000e-01   6.0000000e-01   3.0000000e-01   1.1000000e+00   9.0000000e-01   1.1000000e+00   0.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   8.0000000e-01   6.0000000e-01   7.0000000e-01   2.2000000e+00   4.0000000e-01   5.0000000e-01   6.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   1.4000000e+00   1.3000000e+00   7.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   2.0000000e-01   1.5000000e+00   8.0000000e-01   1.0000000e+00   4.0000000e-01   3.0000000e-01   1.1000000e+00   1.0000000e+00   7.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   6.0000000e-01   8.0000000e-01   7.0000000e-01   1.1000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.3000000e+00   3.0000000e-01   4.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   4.0000000e-01   1.3000000e+00   1.4000000e+00   1.0000000e+00   4.0000000e-01   9.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   2.0000000e-01   1.4000000e+00   1.4000000e+00   7.0000000e-01   6.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   4.0000000e-01   9.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   9.0000000e-01   1.1000000e+00   1.6000000e+00   4.0000000e-01   5.0000000e-01   4.0000000e-01   1.4000000e+00   6.0000000e-01   2.0000000e-01   8.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   1.1000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   1.2000000e+00   1.2000000e+00   8.0000000e-01   4.0000000e-01   9.0000000e-01   1.2000000e+00   9.0000000e-01   3.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   2.0000000e-01   7.0000000e-01   9.0000000e-01   1.4000000e+00   2.0000000e-01   7.0000000e-01   8.0000000e-01   1.2000000e+00   4.0000000e-01   4.0000000e-01   1.0000000e+00   4.0000000e-01   2.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   2.7000000e+00   3.0000000e-01   9.0000000e-01   6.0000000e-01   1.5000000e+00   1.3000000e+00   1.1000000e+00   1.9000000e+00   1.8000000e+00   1.3000000e+00   1.1000000e+00   8.0000000e-01   4.0000000e-01   1.6000000e+00   9.0000000e-01   2.0000000e+00   2.0000000e-01   1.7000000e+00   9.0000000e-01   6.0000000e-01   1.8000000e+00   1.7000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.2000000e+00   1.5000000e+00   1.5000000e+00   5.0000000e-01   1.3000000e+00   1.2000000e+00   1.8000000e+00   1.2000000e+00   1.0000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   9.0000000e-01   1.4000000e+00   1.6000000e+00   1.4000000e+00   1.4000000e+00   1.7000000e+00   2.4000000e+00   1.8000000e+00   2.3000000e+00   1.6000000e+00   1.5000000e+00   1.9000000e+00   8.0000000e-01   9.0000000e-01   1.5000000e+00   1.6000000e+00   2.8000000e+00   2.8000000e+00   1.1000000e+00   2.0000000e+00   7.0000000e-01   2.8000000e+00   1.4000000e+00   1.8000000e+00   2.3000000e+00   1.3000000e+00   1.2000000e+00   1.5000000e+00   2.3000000e+00   2.5000000e+00   3.0000000e+00   1.5000000e+00   1.4000000e+00   1.2000000e+00   2.8000000e+00   1.4000000e+00   1.5000000e+00   1.1000000e+00   2.0000000e+00   1.8000000e+00   2.0000000e+00   9.0000000e-01   1.9000000e+00   1.8000000e+00   1.8000000e+00   1.4000000e+00   1.6000000e+00   1.3000000e+00   1.0000000e+00   6.0000000e-01   7.0000000e-01   1.2000000e+00   1.0000000e+00   8.0000000e-01   1.6000000e+00   1.5000000e+00   1.0000000e+00   8.0000000e-01   9.0000000e-01   6.0000000e-01   1.3000000e+00   6.0000000e-01   1.7000000e+00   4.0000000e-01   1.4000000e+00   6.0000000e-01   3.0000000e-01   1.5000000e+00   1.4000000e+00   9.0000000e-01   5.0000000e-01   2.0000000e-01   9.0000000e-01   9.0000000e-01   1.2000000e+00   1.2000000e+00   5.0000000e-01   1.0000000e+00   9.0000000e-01   1.5000000e+00   9.0000000e-01   7.0000000e-01   1.2000000e+00   1.5000000e+00   5.0000000e-01   7.0000000e-01   1.1000000e+00   1.3000000e+00   1.1000000e+00   1.1000000e+00   1.4000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   9.0000000e-01   7.0000000e-01   5.0000000e-01   1.3000000e+00   1.1000000e+00   8.0000000e-01   7.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   1.3000000e+00   4.0000000e-01   7.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   6.0000000e-01   6.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   8.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   6.0000000e-01   1.5000000e+00   1.4000000e+00   8.0000000e-01   7.0000000e-01   6.0000000e-01   1.0000000e+00   1.4000000e+00   4.0000000e-01   1.6000000e+00   8.0000000e-01   1.2000000e+00   5.0000000e-01   7.0000000e-01   1.3000000e+00   1.2000000e+00   8.0000000e-01   9.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   6.0000000e-01   9.0000000e-01   8.0000000e-01   1.3000000e+00   7.0000000e-01   5.0000000e-01   1.0000000e+00   1.4000000e+00   4.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.0000000e+00   1.3000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   7.0000000e-01   3.0000000e-01   4.0000000e-01   1.6000000e+00   1.8000000e+00   1.0000000e+00   6.0000000e-01   9.0000000e-01   1.6000000e+00   5.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   4.0000000e-01   5.0000000e-01   7.0000000e-01   1.0000000e+00   1.4000000e+00   5.0000000e-01   5.0000000e-01   6.0000000e-01   1.2000000e+00   5.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   2.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   1.4000000e+00   1.6000000e+00   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.4000000e+00   4.0000000e-01   6.0000000e-01   8.0000000e-01   5.0000000e-01   4.0000000e-01   3.0000000e-01   8.0000000e-01   1.0000000e+00   1.5000000e+00   3.0000000e-01   4.0000000e-01   5.0000000e-01   1.3000000e+00   7.0000000e-01   4.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   3.0000000e-01   3.0000000e-01   7.0000000e-01   5.0000000e-01   1.1000000e+00   1.0000000e+00   4.0000000e-01   3.0000000e-01   1.2000000e+00   1.4000000e+00   8.0000000e-01   2.0000000e-01   1.2000000e+00   1.2000000e+00   6.0000000e-01   3.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   4.0000000e-01   5.0000000e-01   6.0000000e-01   1.1000000e+00   4.0000000e-01   6.0000000e-01   7.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e-01   3.0000000e-01   4.0000000e-01   1.0000000e+00   4.0000000e-01   4.0000000e-01   3.0000000e-01   5.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   2.0000000e+00   2.0000000e+00   5.0000000e-01   1.2000000e+00   3.0000000e-01   2.0000000e+00   6.0000000e-01   1.0000000e+00   1.5000000e+00   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.5000000e+00   1.7000000e+00   2.2000000e+00   7.0000000e-01   6.0000000e-01   6.0000000e-01   2.0000000e+00   9.0000000e-01   7.0000000e-01   5.0000000e-01   1.2000000e+00   1.0000000e+00   1.2000000e+00   2.0000000e-01   1.1000000e+00   1.0000000e+00   1.0000000e+00   6.0000000e-01   8.0000000e-01   9.0000000e-01   5.0000000e-01   6.0000000e-01   7.0000000e-01   1.9000000e+00   1.9000000e+00   9.0000000e-01   1.1000000e+00   4.0000000e-01   1.9000000e+00   6.0000000e-01   9.0000000e-01   1.4000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.4000000e+00   1.6000000e+00   2.1000000e+00   6.0000000e-01   9.0000000e-01   1.0000000e+00   1.9000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.1000000e+00   9.0000000e-01   1.1000000e+00   5.0000000e-01   1.0000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   5.0000000e-01   1.4000000e+00   1.6000000e+00   1.0000000e+00   5.0000000e-01   8.0000000e-01   1.4000000e+00   5.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   4.0000000e-01   8.0000000e-01   1.0000000e+00   1.5000000e+00   4.0000000e-01   8.0000000e-01   9.0000000e-01   1.3000000e+00   3.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   6.0000000e-01   4.0000000e-01   3.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   5.0000000e-01   1.2000000e+00   1.4000000e+00   8.0000000e-01   5.0000000e-01   9.0000000e-01   1.2000000e+00   6.0000000e-01   3.0000000e-01   7.0000000e-01   7.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   1.4000000e+00   4.0000000e-01   4.0000000e-01   4.0000000e-01   1.2000000e+00   6.0000000e-01   1.0000000e-01   7.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   1.2000000e+00   1.7000000e+00   1.0000000e+00   2.1000000e+00   1.0000000e+00   1.8000000e+00   1.0000000e+00   7.0000000e-01   1.9000000e+00   1.8000000e+00   1.3000000e+00   9.0000000e-01   1.0000000e+00   3.0000000e-01   1.3000000e+00   1.6000000e+00   1.6000000e+00   8.0000000e-01   1.4000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   1.5000000e+00   1.7000000e+00   1.5000000e+00   1.5000000e+00   1.8000000e+00   1.9000000e+00   1.2000000e+00   2.1000000e+00   3.0000000e-01   2.0000000e+00   1.2000000e+00   9.0000000e-01   2.1000000e+00   2.0000000e+00   1.3000000e+00   1.1000000e+00   8.0000000e-01   1.2000000e+00   1.3000000e+00   1.8000000e+00   1.6000000e+00   8.0000000e-01   1.4000000e+00   1.4000000e+00   2.1000000e+00   1.5000000e+00   1.3000000e+00   1.8000000e+00   1.9000000e+00   1.0000000e+00   1.2000000e+00   1.7000000e+00   1.9000000e+00   1.7000000e+00   1.5000000e+00   1.8000000e+00   1.0000000e+00   6.0000000e-01   1.7000000e+00   5.0000000e-01   1.1000000e+00   1.2000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e-01   1.2000000e+00   1.4000000e+00   1.9000000e+00   7.0000000e-01   6.0000000e-01   6.0000000e-01   1.7000000e+00   1.2000000e+00   9.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   1.0000000e+00   1.1000000e+00   8.0000000e-01   4.0000000e-01   8.0000000e-01   1.2000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   8.0000000e-01   2.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   5.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   5.0000000e-01   9.0000000e-01   3.0000000e-01   2.0000000e-01   6.0000000e-01   1.1000000e+00   2.0000000e-01   2.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   7.0000000e-01   1.0000000e+00   2.1000000e+00   7.0000000e-01   1.1000000e+00   1.6000000e+00   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.6000000e+00   1.8000000e+00   2.3000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   2.1000000e+00   7.0000000e-01   8.0000000e-01   4.0000000e-01   1.3000000e+00   1.1000000e+00   1.3000000e+00   2.0000000e-01   1.2000000e+00   1.1000000e+00   1.1000000e+00   7.0000000e-01   9.0000000e-01   6.0000000e-01   3.0000000e-01   1.8000000e+00   1.0000000e+00   7.0000000e-01   1.9000000e+00   1.8000000e+00   1.3000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   1.3000000e+00   1.6000000e+00   1.6000000e+00   6.0000000e-01   1.4000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   1.6000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   1.5000000e+00   1.7000000e+00   1.5000000e+00   1.5000000e+00   1.8000000e+00   8.0000000e-01   1.1000000e+00   1.0000000e-01   3.0000000e-01   7.0000000e-01   9.0000000e-01   1.2000000e+00   1.6000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   6.0000000e-01   3.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   5.0000000e-01   2.0000000e-01   3.0000000e-01   7.0000000e-01   4.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.2000000e+00   5.0000000e-01   6.0000000e-01   7.0000000e-01   1.0000000e+00   4.0000000e-01   3.0000000e-01   9.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   2.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   1.2000000e+00   1.1000000e+00   8.0000000e-01   2.0000000e-01   4.0000000e-01   7.0000000e-01   8.0000000e-01   9.0000000e-01   1.1000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.2000000e+00   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.4000000e+00   5.0000000e-01   7.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   1.0000000e+00   1.3000000e+00   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.3000000e+00   1.7000000e+00   8.0000000e-01   3.0000000e-01   8.0000000e-01   1.5000000e+00   8.0000000e-01   7.0000000e-01   2.0000000e-01   7.0000000e-01   8.0000000e-01   7.0000000e-01   4.0000000e-01   1.1000000e+00   9.0000000e-01   5.0000000e-01   3.0000000e-01   4.0000000e-01   6.0000000e-01   3.0000000e-01   7.0000000e-01   1.1000000e+00   1.3000000e+00   1.8000000e+00   7.0000000e-01   3.0000000e-01   7.0000000e-01   1.6000000e+00   7.0000000e-01   6.0000000e-01   1.0000000e-01   8.0000000e-01   7.0000000e-01   8.0000000e-01   3.0000000e-01   1.0000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   4.0000000e-01   5.0000000e-01   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.5000000e+00   1.0000000e-01   6.0000000e-01   7.0000000e-01   1.3000000e+00   6.0000000e-01   3.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   8.0000000e-01   8.0000000e-01   9.0000000e-01   1.1000000e+00   7.0000000e-01   9.0000000e-01   8.0000000e-01   1.2000000e+00   5.0000000e-01   8.0000000e-01   7.0000000e-01   1.4000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   1.0000000e+00   1.3000000e+00   1.0000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   4.0000000e-01   1.1000000e+00   1.0000000e+00   1.4000000e+00   7.0000000e-01   7.0000000e-01   1.0000000e+00   1.6000000e+00   6.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   1.2000000e+00   1.5000000e+00   1.5000000e+00   1.6000000e+00   1.8000000e+00   8.0000000e-01   1.6000000e+00   1.5000000e+00   1.9000000e+00   1.0000000e+00   1.2000000e+00   1.3000000e+00   2.1000000e+00   1.1000000e+00   1.2000000e+00   1.2000000e+00   1.6000000e+00   1.4000000e+00   1.7000000e+00   2.0000000e+00   7.0000000e-01   8.0000000e-01   1.3000000e+00   6.0000000e-01   4.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01   5.0000000e-01   1.4000000e+00   9.0000000e-01   4.0000000e-01   3.0000000e-01   6.0000000e-01   9.0000000e-01   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   8.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   4.0000000e-01   1.6000000e+00   1.0000000e+00   5.0000000e-01   8.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   9.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   1.4000000e+00   1.3000000e+00   1.7000000e+00   8.0000000e-01   1.0000000e+00   1.0000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   1.0000000e+00   1.4000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   4.0000000e-01   9.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   6.0000000e-01   5.0000000e-01   7.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   9.0000000e-01   8.0000000e-01   9.0000000e-01   3.0000000e-01   1.1000000e+00   9.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   1.1000000e+00   5.0000000e-01   4.0000000e-01   2.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e+00   5.0000000e-01   9.0000000e-01   3.0000000e-01   2.0000000e-01   4.0000000e-01   6.0000000e-01   4.0000000e-01   5.0000000e-01   8.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   2.0000000e-01   6.0000000e-01   4.0000000e-01   7.0000000e-01   1.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   7.0000000e-01   3.0000000e-01   2.0000000e-01   7.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   9.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   5.0000000e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml.txt
new file mode 100644
index 00000000..78648629
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-chebyshev-ml.txt
@@ -0,0 +1 @@
+   8.9084734e-01   9.3573853e-01   9.3507398e-01   9.6040691e-01   9.2918157e-01   9.6617342e-01   9.0430930e-01   9.5753424e-01   8.7106898e-01   9.2169905e-01   9.7401159e-01   8.9013416e-01   9.3956689e-01   9.0041896e-01   9.2588355e-01   9.3849417e-01   8.9713468e-01   9.1481804e-01   9.7500539e-01   9.0012586e-01   9.0962559e-01   8.5860091e-01   8.6981095e-01   8.9995771e-01   8.8070172e-01   9.1456657e-01   8.6711474e-01   9.2593917e-01   8.7560376e-01   8.5193121e-01   9.0898542e-01   8.7765302e-01   8.6555584e-01   8.6093485e-01   9.0447028e-01   8.7614405e-01   9.4803522e-01   8.4998062e-01   7.8398996e-01   8.9538612e-01   8.3902291e-01   9.9039470e-01   9.5480519e-01   8.9152195e-01   9.1623329e-01   7.9094921e-01   9.1777100e-01   9.8972335e-01   9.0429093e-01   8.7646362e-01   9.2136649e-01   9.7178177e-01   8.9610979e-01   9.4710327e-01   9.3612450e-01   9.0241499e-01   7.7992538e-01   8.7262126e-01   9.3325183e-01   8.5796531e-01   9.4267977e-01   6.7224167e-01   7.9568368e-01   8.6411267e-01   9.3311642e-01   9.0160114e-01   9.0698887e-01   8.5833256e-01   9.6902830e-01   9.5072298e-01   8.6808495e-01   9.7879599e-01   8.8060729e-01   8.2818573e-01   8.4366706e-01   8.4506700e-01   9.4532981e-01   9.1792306e-01   7.8917825e-01   9.8337805e-01   8.1751613e-01   9.3037855e-01   9.1618832e-01   8.6568874e-01   8.9751397e-01   8.7923710e-01   8.6814329e-01   9.0330164e-01   8.2426213e-01   9.4644643e-01   8.8431293e-01   8.8497426e-01   9.0633818e-01   9.5537161e-01   8.2167575e-01   8.7771053e-01   9.0681167e-01   8.7626143e-01   8.7463464e-01   9.8033940e-01   9.2920881e-01   9.5108549e-01   9.1287466e-01   8.0052218e-01   9.2409517e-01   8.8252650e-01   8.7873923e-01   9.2989402e-01   9.1985043e-01   9.6172646e-01   8.8223856e-01   9.4477822e-01   8.8310948e-01   9.4461306e-01   9.1875210e-01   9.1233363e-01   9.2124013e-01   9.5460897e-01   8.4640982e-01   9.0882657e-01   9.8169468e-01   9.7828355e-01   8.4150533e-01   8.6888923e-01   9.7138825e-01   8.7988144e-01   9.6720910e-01   8.9450147e-01   9.5331584e-01   8.8871809e-01   8.9736685e-01   8.6258146e-01   9.1331565e-01   9.0968870e-01   9.4833654e-01   9.0536967e-01   9.5099871e-01   8.0251958e-01   9.2526150e-01   9.8971957e-01   9.0340947e-01   9.4955892e-01   9.6838162e-01   8.7534901e-01   9.1178797e-01   9.2649154e-01   9.5260993e-01   9.3178143e-01   9.4943000e-01   8.7816171e-01   9.6506542e-01   8.3422958e-01   9.3443585e-01   9.3220084e-01   8.5706573e-01   8.4666325e-01   9.0474744e-01   9.1080644e-01   9.2406899e-01   8.7901768e-01   9.3265263e-01   9.5992829e-01   9.5696271e-01   9.1932272e-01   8.0937044e-01   9.0904917e-01   8.9516756e-01   9.4797906e-01   8.4159421e-01   9.6773901e-01   9.7099825e-01   9.6941820e-01   9.8174088e-01   9.7569951e-01   9.3655362e-01   8.4130333e-01   9.5994549e-01   8.4235414e-01   9.1429418e-01   9.3418117e-01   8.4600977e-01   8.8166496e-01   8.7594776e-01   8.8571112e-01   9.6308174e-01   9.5315927e-01   8.6997519e-01   8.9383032e-01   9.4686804e-01   9.4399596e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt
new file mode 100644
index 00000000..6722928a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt
@@ -0,0 +1 @@
+   7.0000000e-01   8.0000000e-01   1.0000000e+00   2.0000000e-01   1.2000000e+00   7.0000000e-01   3.0000000e-01   1.3000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.8000000e+00   1.0000000e+00   1.0000000e-01   1.3000000e+00   5.0000000e-01   7.0000000e-01   5.0000000e-01   1.0000000e+00   8.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   2.0000000e-01   2.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.3000000e+00   2.0000000e-01   3.0000000e-01   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   4.0000000e+00   6.4000000e+00   4.6000000e+00   4.5000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   4.0000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.8000000e+00   9.3000000e+00   8.6000000e+00   9.2000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   1.0200000e+01   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.9000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.9000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   1.3000000e+00   7.0000000e-01   2.0000000e-01   1.0000000e+00   2.1000000e+00   2.5000000e+00   1.7000000e+00   8.0000000e-01   2.0000000e+00   1.2000000e+00   1.2000000e+00   1.2000000e+00   1.3000000e+00   1.1000000e+00   1.0000000e+00   3.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   4.0000000e-01   1.2000000e+00   1.6000000e+00   1.8000000e+00   3.0000000e-01   5.0000000e-01   1.2000000e+00   3.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   1.3000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   2.0000000e-01   1.2000000e+00   5.0000000e-01   1.2000000e+00   4.0000000e-01   6.8000000e+00   6.1000000e+00   6.9000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   6.4000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   6.1000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   6.2000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   6.0000000e+00   6.5000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.6000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.9000000e+00   7.3000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.7000000e+00   7.3000000e+00   1.0900000e+01   1.0800000e+01   6.8000000e+00   8.6000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   8.3000000e+00   8.7000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   1.0600000e+01   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   8.2000000e+00   7.3000000e+00   6.1000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.6000000e+00   8.7000000e+00   8.7000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.8000000e+00   6.3000000e+00   4.0000000e-01   8.0000000e-01   2.0000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   6.0000000e-01   1.4000000e+00   6.0000000e-01   5.0000000e-01   9.0000000e-01   2.0000000e+00   2.6000000e+00   1.6000000e+00   9.0000000e-01   2.1000000e+00   1.3000000e+00   1.3000000e+00   1.3000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   8.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   3.0000000e-01   5.0000000e-01   1.3000000e+00   1.7000000e+00   1.9000000e+00   6.0000000e-01   4.0000000e-01   1.1000000e+00   6.0000000e-01   5.0000000e-01   8.0000000e-01   7.0000000e-01   1.2000000e+00   3.0000000e-01   1.3000000e+00   1.8000000e+00   5.0000000e-01   1.3000000e+00   2.0000000e-01   1.3000000e+00   5.0000000e-01   6.9000000e+00   6.2000000e+00   7.2000000e+00   5.5000000e+00   6.8000000e+00   5.7000000e+00   6.5000000e+00   3.8000000e+00   6.6000000e+00   4.8000000e+00   4.5000000e+00   5.6000000e+00   5.8000000e+00   6.3000000e+00   4.6000000e+00   6.4000000e+00   5.6000000e+00   5.2000000e+00   7.0000000e+00   5.1000000e+00   6.3000000e+00   5.6000000e+00   7.2000000e+00   6.2000000e+00   6.1000000e+00   6.4000000e+00   7.2000000e+00   7.4000000e+00   6.1000000e+00   4.6000000e+00   5.0000000e+00   4.8000000e+00   5.2000000e+00   7.0000000e+00   5.4000000e+00   6.1000000e+00   6.8000000e+00   6.7000000e+00   5.0000000e+00   5.3000000e+00   5.5000000e+00   6.1000000e+00   5.4000000e+00   4.0000000e+00   5.4000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   3.7000000e+00   5.3000000e+00   8.7000000e+00   7.1000000e+00   9.1000000e+00   7.8000000e+00   8.5000000e+00   1.0300000e+01   5.6000000e+00   9.5000000e+00   8.8000000e+00   1.0000000e+01   7.4000000e+00   7.9000000e+00   8.4000000e+00   7.2000000e+00   7.5000000e+00   7.8000000e+00   7.8000000e+00   1.1000000e+01   1.1300000e+01   7.3000000e+00   8.7000000e+00   6.7000000e+00   1.0600000e+01   7.3000000e+00   8.4000000e+00   8.8000000e+00   7.0000000e+00   6.8000000e+00   8.3000000e+00   8.6000000e+00   9.6000000e+00   1.0700000e+01   8.4000000e+00   7.1000000e+00   7.5000000e+00   1.0100000e+01   8.3000000e+00   7.6000000e+00   6.6000000e+00   8.3000000e+00   8.6000000e+00   8.2000000e+00   7.1000000e+00   8.8000000e+00   8.8000000e+00   8.2000000e+00   7.7000000e+00   7.7000000e+00   7.9000000e+00   6.8000000e+00   1.0000000e+00   2.0000000e+00   5.0000000e-01   7.0000000e-01   5.0000000e-01   4.0000000e-01   1.4000000e+00   6.0000000e-01   5.0000000e-01   9.0000000e-01   2.4000000e+00   2.6000000e+00   2.0000000e+00   1.1000000e+00   2.1000000e+00   1.3000000e+00   1.3000000e+00   1.3000000e+00   1.0000000e+00   1.2000000e+00   9.0000000e-01   6.0000000e-01   1.0000000e+00   1.0000000e+00   1.0000000e+00   3.0000000e-01   3.0000000e-01   1.3000000e+00   1.7000000e+00   2.1000000e+00   4.0000000e-01   8.0000000e-01   1.5000000e+00   4.0000000e-01   5.0000000e-01   8.0000000e-01   1.1000000e+00   1.2000000e+00   5.0000000e-01   1.3000000e+00   1.8000000e+00   5.0000000e-01   1.3000000e+00   2.0000000e-01   1.3000000e+00   7.0000000e-01   6.9000000e+00   6.2000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.5000000e+00   3.6000000e+00   6.4000000e+00   4.6000000e+00   4.3000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.3000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   6.1000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   3.8000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.7000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.4000000e+00   9.3000000e+00   8.6000000e+00   1.0000000e+01   7.4000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.8000000e+00   7.6000000e+00   1.1000000e+01   1.1100000e+01   7.1000000e+00   8.7000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.4000000e+00   8.8000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   1.0700000e+01   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   8.3000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.8000000e+00   8.8000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.9000000e+00   6.6000000e+00   1.2000000e+00   7.0000000e-01   3.0000000e-01   1.3000000e+00   8.0000000e-01   6.0000000e-01   6.0000000e-01   9.0000000e-01   1.7000000e+00   1.4000000e+00   1.8000000e+00   1.0000000e+00   3.0000000e-01   1.3000000e+00   5.0000000e-01   9.0000000e-01   5.0000000e-01   8.0000000e-01   1.0000000e+00   9.0000000e-01   8.0000000e-01   6.0000000e-01   4.0000000e-01   4.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   7.0000000e-01   8.0000000e-01   1.3000000e+00   4.0000000e-01   3.0000000e-01   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.9000000e+00   6.2000000e+00   7.2000000e+00   5.5000000e+00   6.8000000e+00   5.7000000e+00   6.3000000e+00   4.0000000e+00   6.6000000e+00   4.8000000e+00   4.5000000e+00   5.6000000e+00   5.8000000e+00   6.3000000e+00   4.6000000e+00   6.4000000e+00   5.6000000e+00   5.2000000e+00   7.0000000e+00   5.1000000e+00   6.3000000e+00   5.6000000e+00   7.2000000e+00   6.2000000e+00   6.1000000e+00   6.4000000e+00   7.2000000e+00   7.4000000e+00   6.1000000e+00   4.6000000e+00   5.0000000e+00   4.8000000e+00   5.2000000e+00   7.0000000e+00   5.4000000e+00   5.7000000e+00   6.8000000e+00   6.7000000e+00   5.0000000e+00   5.3000000e+00   5.5000000e+00   6.1000000e+00   5.4000000e+00   4.0000000e+00   5.4000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   3.7000000e+00   5.3000000e+00   8.5000000e+00   7.1000000e+00   9.1000000e+00   7.8000000e+00   8.5000000e+00   1.0300000e+01   5.8000000e+00   9.5000000e+00   8.8000000e+00   9.2000000e+00   7.4000000e+00   7.9000000e+00   8.4000000e+00   7.2000000e+00   7.5000000e+00   7.8000000e+00   7.8000000e+00   1.0200000e+01   1.1300000e+01   7.3000000e+00   8.7000000e+00   6.7000000e+00   1.0600000e+01   7.3000000e+00   8.2000000e+00   8.8000000e+00   7.0000000e+00   6.8000000e+00   8.3000000e+00   8.6000000e+00   9.6000000e+00   9.9000000e+00   8.4000000e+00   7.1000000e+00   7.5000000e+00   1.0100000e+01   7.9000000e+00   7.6000000e+00   6.6000000e+00   8.3000000e+00   8.6000000e+00   8.2000000e+00   7.1000000e+00   8.8000000e+00   8.6000000e+00   8.2000000e+00   7.7000000e+00   7.7000000e+00   7.5000000e+00   6.8000000e+00   1.7000000e+00   1.3000000e+00   2.5000000e+00   1.8000000e+00   6.0000000e-01   1.4000000e+00   2.1000000e+00   2.9000000e+00   1.2000000e+00   1.0000000e+00   4.0000000e-01   1.1000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   7.0000000e-01   2.0000000e+00   1.0000000e+00   1.5000000e+00   1.6000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   1.7000000e+00   1.7000000e+00   7.0000000e-01   9.0000000e-01   9.0000000e-01   1.8000000e+00   1.8000000e+00   1.1000000e+00   1.8000000e+00   2.5000000e+00   1.2000000e+00   1.3000000e+00   3.0000000e+00   2.3000000e+00   1.1000000e+00   6.0000000e-01   1.9000000e+00   7.0000000e-01   2.0000000e+00   7.0000000e-01   1.5000000e+00   6.3000000e+00   5.6000000e+00   6.6000000e+00   4.9000000e+00   6.2000000e+00   5.1000000e+00   5.7000000e+00   4.2000000e+00   6.0000000e+00   4.6000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.7000000e+00   4.0000000e+00   5.8000000e+00   5.0000000e+00   4.6000000e+00   6.4000000e+00   4.5000000e+00   5.7000000e+00   5.0000000e+00   6.6000000e+00   5.6000000e+00   5.5000000e+00   5.8000000e+00   6.6000000e+00   6.8000000e+00   5.5000000e+00   4.0000000e+00   4.4000000e+00   4.2000000e+00   4.6000000e+00   6.4000000e+00   4.8000000e+00   5.1000000e+00   6.2000000e+00   6.1000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   4.8000000e+00   4.2000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   3.7000000e+00   4.7000000e+00   7.9000000e+00   6.5000000e+00   8.5000000e+00   7.2000000e+00   7.9000000e+00   9.7000000e+00   6.0000000e+00   8.9000000e+00   8.2000000e+00   8.6000000e+00   6.8000000e+00   7.3000000e+00   7.8000000e+00   6.6000000e+00   6.9000000e+00   7.2000000e+00   7.2000000e+00   9.2000000e+00   1.0700000e+01   6.7000000e+00   8.1000000e+00   6.1000000e+00   1.0000000e+01   6.7000000e+00   7.6000000e+00   8.2000000e+00   6.4000000e+00   6.2000000e+00   7.7000000e+00   8.0000000e+00   9.0000000e+00   8.9000000e+00   7.8000000e+00   6.5000000e+00   6.9000000e+00   9.5000000e+00   7.3000000e+00   7.0000000e+00   6.0000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.5000000e+00   8.2000000e+00   8.0000000e+00   7.6000000e+00   7.1000000e+00   7.1000000e+00   6.9000000e+00   6.2000000e+00   6.0000000e-01   8.0000000e-01   9.0000000e-01   1.3000000e+00   5.0000000e-01   8.0000000e-01   1.2000000e+00   2.1000000e+00   2.3000000e+00   1.5000000e+00   6.0000000e-01   1.8000000e+00   1.0000000e+00   1.2000000e+00   1.0000000e+00   7.0000000e-01   1.1000000e+00   8.0000000e-01   1.1000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   6.0000000e-01   8.0000000e-01   1.0000000e+00   1.6000000e+00   1.8000000e+00   9.0000000e-01   9.0000000e-01   1.2000000e+00   9.0000000e-01   8.0000000e-01   7.0000000e-01   6.0000000e-01   1.3000000e+00   6.0000000e-01   1.0000000e+00   1.5000000e+00   6.0000000e-01   1.2000000e+00   3.0000000e-01   1.2000000e+00   6.0000000e-01   7.0000000e+00   6.3000000e+00   7.3000000e+00   5.6000000e+00   6.9000000e+00   5.8000000e+00   6.4000000e+00   3.9000000e+00   6.7000000e+00   4.9000000e+00   4.6000000e+00   5.7000000e+00   5.9000000e+00   6.4000000e+00   4.7000000e+00   6.5000000e+00   5.7000000e+00   5.3000000e+00   7.1000000e+00   5.2000000e+00   6.4000000e+00   5.7000000e+00   7.3000000e+00   6.3000000e+00   6.2000000e+00   6.5000000e+00   7.3000000e+00   7.5000000e+00   6.2000000e+00   4.7000000e+00   5.1000000e+00   4.9000000e+00   5.3000000e+00   7.1000000e+00   5.5000000e+00   5.8000000e+00   6.9000000e+00   6.8000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   5.5000000e+00   4.1000000e+00   5.5000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   3.8000000e+00   5.4000000e+00   8.6000000e+00   7.2000000e+00   9.2000000e+00   7.9000000e+00   8.6000000e+00   1.0400000e+01   5.7000000e+00   9.6000000e+00   8.9000000e+00   9.7000000e+00   7.5000000e+00   8.0000000e+00   8.5000000e+00   7.3000000e+00   7.6000000e+00   7.9000000e+00   7.9000000e+00   1.0700000e+01   1.1400000e+01   7.4000000e+00   8.8000000e+00   6.8000000e+00   1.0700000e+01   7.4000000e+00   8.3000000e+00   8.9000000e+00   7.1000000e+00   6.9000000e+00   8.4000000e+00   8.7000000e+00   9.7000000e+00   1.0400000e+01   8.5000000e+00   7.2000000e+00   7.6000000e+00   1.0200000e+01   8.0000000e+00   7.7000000e+00   6.7000000e+00   8.4000000e+00   8.7000000e+00   8.3000000e+00   7.2000000e+00   8.9000000e+00   8.7000000e+00   8.3000000e+00   7.8000000e+00   7.8000000e+00   7.6000000e+00   6.9000000e+00   1.2000000e+00   5.0000000e-01   7.0000000e-01   3.0000000e-01   8.0000000e-01   1.6000000e+00   1.7000000e+00   1.9000000e+00   1.3000000e+00   4.0000000e-01   1.4000000e+00   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.1000000e+00   7.0000000e-01   6.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   3.0000000e-01   6.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   1.4000000e+00   5.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   1.2000000e+00   1.0000000e-01   4.0000000e-01   1.9000000e+00   1.0000000e+00   6.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   7.0000000e-01   6.0000000e-01   2.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.7000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.5000000e+00   9.2000000e+00   8.5000000e+00   9.3000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0300000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0000000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   9.0000000e-01   1.9000000e+00   1.1000000e+00   6.0000000e-01   6.0000000e-01   2.7000000e+00   3.1000000e+00   2.3000000e+00   1.4000000e+00   2.6000000e+00   1.8000000e+00   1.8000000e+00   1.8000000e+00   1.3000000e+00   1.7000000e+00   1.4000000e+00   9.0000000e-01   1.5000000e+00   1.5000000e+00   1.3000000e+00   8.0000000e-01   8.0000000e-01   1.8000000e+00   2.2000000e+00   2.4000000e+00   9.0000000e-01   1.1000000e+00   1.8000000e+00   9.0000000e-01   2.0000000e-01   1.3000000e+00   1.4000000e+00   9.0000000e-01   4.0000000e-01   1.8000000e+00   2.3000000e+00   6.0000000e-01   1.8000000e+00   5.0000000e-01   1.8000000e+00   1.0000000e+00   7.4000000e+00   6.7000000e+00   7.5000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   7.0000000e+00   3.7000000e+00   6.5000000e+00   4.7000000e+00   4.4000000e+00   5.7000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.7000000e+00   5.7000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.8000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.5000000e+00   7.1000000e+00   7.5000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.5000000e+00   6.6000000e+00   7.1000000e+00   6.6000000e+00   5.1000000e+00   5.2000000e+00   5.4000000e+00   6.2000000e+00   5.3000000e+00   3.9000000e+00   5.3000000e+00   5.2000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   9.2000000e+00   7.0000000e+00   9.2000000e+00   7.7000000e+00   8.6000000e+00   1.0400000e+01   5.5000000e+00   9.4000000e+00   8.7000000e+00   1.0500000e+01   7.9000000e+00   7.8000000e+00   8.5000000e+00   7.1000000e+00   7.4000000e+00   8.3000000e+00   7.9000000e+00   1.1500000e+01   1.1200000e+01   7.2000000e+00   9.2000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.9000000e+00   9.3000000e+00   6.9000000e+00   6.9000000e+00   8.2000000e+00   8.7000000e+00   9.5000000e+00   1.1200000e+01   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0200000e+01   8.8000000e+00   7.9000000e+00   6.7000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.0000000e+00   9.3000000e+00   9.3000000e+00   8.3000000e+00   7.6000000e+00   7.8000000e+00   8.4000000e+00   6.9000000e+00   1.2000000e+00   6.0000000e-01   3.0000000e-01   1.1000000e+00   2.2000000e+00   2.4000000e+00   1.8000000e+00   9.0000000e-01   1.9000000e+00   1.1000000e+00   1.1000000e+00   1.1000000e+00   1.4000000e+00   1.0000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   8.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   1.1000000e+00   1.3000000e+00   1.9000000e+00   0.0000000e+00   6.0000000e-01   1.3000000e+00   0.0000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   5.0000000e-01   6.7000000e+00   6.0000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   6.3000000e+00   3.4000000e+00   6.2000000e+00   4.4000000e+00   4.1000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   6.1000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.9000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.6000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.5000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.2000000e+00   9.1000000e+00   8.4000000e+00   9.8000000e+00   7.2000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.6000000e+00   7.4000000e+00   1.0800000e+01   1.0900000e+01   6.9000000e+00   8.5000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   8.2000000e+00   8.6000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   1.0500000e+01   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   8.1000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.6000000e+00   8.6000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.7000000e+00   6.4000000e+00   1.0000000e+00   1.5000000e+00   2.3000000e+00   1.0000000e+00   1.2000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   5.0000000e-01   1.4000000e+00   1.2000000e+00   1.3000000e+00   1.2000000e+00   1.0000000e+00   4.0000000e-01   6.0000000e-01   1.3000000e+00   1.3000000e+00   5.0000000e-01   7.0000000e-01   7.0000000e-01   1.2000000e+00   1.2000000e+00   5.0000000e-01   1.2000000e+00   1.9000000e+00   6.0000000e-01   9.0000000e-01   2.6000000e+00   1.7000000e+00   1.1000000e+00   1.0000000e+00   1.5000000e+00   5.0000000e-01   1.4000000e+00   1.0000000e-01   9.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   4.4000000e+00   6.2000000e+00   4.8000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   4.4000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.9000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   6.2000000e+00   9.1000000e+00   8.4000000e+00   8.8000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   9.6000000e+00   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.3000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   7.0000000e-01   1.5000000e+00   2.0000000e+00   2.2000000e+00   1.6000000e+00   7.0000000e-01   1.5000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   1.0000000e+00   8.0000000e-01   3.0000000e-01   6.0000000e-01   4.0000000e-01   6.0000000e-01   6.0000000e-01   3.0000000e-01   3.0000000e-01   9.0000000e-01   1.3000000e+00   1.7000000e+00   6.0000000e-01   8.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   4.0000000e-01   7.0000000e-01   1.8000000e+00   9.0000000e-01   7.0000000e-01   1.2000000e+00   7.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   5.0000000e-01   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   3.6000000e+00   6.4000000e+00   4.6000000e+00   4.3000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   3.8000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.4000000e+00   9.3000000e+00   8.6000000e+00   9.4000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   1.0400000e+01   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   1.0100000e+01   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   8.0000000e-01   2.3000000e+00   2.7000000e+00   1.9000000e+00   1.0000000e+00   2.2000000e+00   1.4000000e+00   1.4000000e+00   1.4000000e+00   1.3000000e+00   1.3000000e+00   1.0000000e+00   5.0000000e-01   1.1000000e+00   1.1000000e+00   9.0000000e-01   6.0000000e-01   4.0000000e-01   1.4000000e+00   1.6000000e+00   2.0000000e+00   3.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e-01   6.0000000e-01   9.0000000e-01   1.0000000e+00   1.3000000e+00   8.0000000e-01   1.4000000e+00   1.9000000e+00   2.0000000e-01   1.4000000e+00   5.0000000e-01   1.4000000e+00   6.0000000e-01   7.0000000e+00   6.3000000e+00   7.1000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.6000000e+00   3.5000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.3000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.4000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   6.2000000e+00   6.7000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.8000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.3000000e+00   9.2000000e+00   8.5000000e+00   1.0100000e+01   7.5000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.9000000e+00   7.5000000e+00   1.1100000e+01   1.1000000e+01   7.0000000e+00   8.8000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   8.5000000e+00   8.9000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0800000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   8.4000000e+00   7.5000000e+00   6.3000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   6.8000000e+00   8.9000000e+00   8.9000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   8.0000000e+00   6.5000000e+00   2.7000000e+00   3.5000000e+00   2.5000000e+00   1.8000000e+00   3.0000000e+00   2.2000000e+00   2.2000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   1.8000000e+00   1.3000000e+00   1.9000000e+00   1.9000000e+00   1.7000000e+00   1.2000000e+00   1.2000000e+00   2.2000000e+00   2.4000000e+00   2.8000000e+00   1.1000000e+00   1.1000000e+00   2.0000000e+00   1.1000000e+00   4.0000000e-01   1.7000000e+00   1.6000000e+00   1.3000000e+00   6.0000000e-01   2.2000000e+00   2.7000000e+00   1.0000000e+00   2.2000000e+00   9.0000000e-01   2.2000000e+00   1.4000000e+00   7.8000000e+00   7.1000000e+00   7.9000000e+00   6.0000000e+00   7.3000000e+00   6.2000000e+00   7.4000000e+00   4.3000000e+00   7.1000000e+00   5.3000000e+00   5.0000000e+00   6.1000000e+00   6.3000000e+00   6.8000000e+00   5.1000000e+00   7.1000000e+00   6.1000000e+00   5.7000000e+00   7.5000000e+00   5.6000000e+00   7.2000000e+00   6.1000000e+00   7.7000000e+00   6.7000000e+00   6.6000000e+00   6.9000000e+00   7.7000000e+00   7.9000000e+00   6.6000000e+00   5.1000000e+00   5.5000000e+00   5.3000000e+00   5.7000000e+00   7.5000000e+00   5.9000000e+00   7.0000000e+00   7.5000000e+00   7.2000000e+00   5.5000000e+00   5.8000000e+00   6.0000000e+00   6.6000000e+00   5.9000000e+00   4.5000000e+00   5.9000000e+00   5.6000000e+00   5.8000000e+00   6.4000000e+00   4.2000000e+00   5.8000000e+00   9.6000000e+00   7.6000000e+00   9.6000000e+00   8.3000000e+00   9.0000000e+00   1.0800000e+01   6.1000000e+00   1.0000000e+01   9.3000000e+00   1.0900000e+01   8.3000000e+00   8.4000000e+00   8.9000000e+00   7.7000000e+00   8.0000000e+00   8.7000000e+00   8.3000000e+00   1.1900000e+01   1.1800000e+01   7.8000000e+00   9.6000000e+00   7.2000000e+00   1.1100000e+01   7.8000000e+00   9.3000000e+00   9.7000000e+00   7.5000000e+00   7.3000000e+00   8.8000000e+00   9.1000000e+00   1.0100000e+01   1.1600000e+01   8.9000000e+00   7.6000000e+00   8.0000000e+00   1.0600000e+01   9.2000000e+00   8.3000000e+00   7.1000000e+00   9.0000000e+00   9.3000000e+00   8.9000000e+00   7.6000000e+00   9.7000000e+00   9.7000000e+00   8.7000000e+00   8.2000000e+00   8.2000000e+00   8.8000000e+00   7.3000000e+00   1.0000000e+00   8.0000000e-01   1.5000000e+00   9.0000000e-01   1.3000000e+00   1.5000000e+00   1.5000000e+00   1.8000000e+00   2.2000000e+00   2.3000000e+00   2.2000000e+00   2.0000000e+00   1.4000000e+00   1.4000000e+00   2.3000000e+00   2.3000000e+00   1.5000000e+00   1.1000000e+00   7.0000000e-01   2.2000000e+00   1.6000000e+00   9.0000000e-01   2.2000000e+00   2.5000000e+00   1.6000000e+00   1.5000000e+00   3.2000000e+00   2.3000000e+00   2.1000000e+00   1.8000000e+00   2.3000000e+00   1.3000000e+00   2.2000000e+00   1.1000000e+00   1.7000000e+00   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.9000000e+00   6.6000000e+00   5.7000000e+00   6.1000000e+00   5.4000000e+00   6.4000000e+00   5.8000000e+00   5.9000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.8000000e+00   6.2000000e+00   5.8000000e+00   5.0000000e+00   6.8000000e+00   5.3000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.6000000e+00   5.4000000e+00   5.2000000e+00   5.0000000e+00   6.8000000e+00   6.0000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   5.2000000e+00   5.7000000e+00   5.9000000e+00   5.9000000e+00   5.2000000e+00   5.4000000e+00   5.6000000e+00   5.1000000e+00   5.3000000e+00   5.7000000e+00   4.9000000e+00   5.3000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   7.2000000e+00   9.3000000e+00   8.6000000e+00   9.0000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.2000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   9.6000000e+00   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.9000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.3000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   1.3000000e+00   1.7000000e+00   1.3000000e+00   2.6000000e+00   2.0000000e+00   2.5000000e+00   2.4000000e+00   1.8000000e+00   1.6000000e+00   1.8000000e+00   2.5000000e+00   2.5000000e+00   1.3000000e+00   1.1000000e+00   7.0000000e-01   2.4000000e+00   2.4000000e+00   1.5000000e+00   2.4000000e+00   3.1000000e+00   1.8000000e+00   1.9000000e+00   3.6000000e+00   2.9000000e+00   1.9000000e+00   1.6000000e+00   2.5000000e+00   1.5000000e+00   2.6000000e+00   1.3000000e+00   2.1000000e+00   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.7000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   5.2000000e+00   6.4000000e+00   5.6000000e+00   5.7000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.6000000e+00   6.2000000e+00   5.6000000e+00   5.0000000e+00   6.8000000e+00   5.1000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   5.2000000e+00   5.0000000e+00   5.0000000e+00   6.8000000e+00   5.8000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   5.0000000e+00   5.5000000e+00   5.7000000e+00   5.9000000e+00   5.2000000e+00   5.2000000e+00   5.4000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   4.7000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   7.0000000e+00   9.3000000e+00   8.6000000e+00   9.0000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   9.6000000e+00   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.7000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.3000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   9.0000000e-01   9.0000000e-01   7.0000000e-01   1.1000000e+00   7.0000000e-01   1.6000000e+00   1.4000000e+00   1.9000000e+00   1.8000000e+00   1.2000000e+00   1.0000000e+00   1.0000000e+00   1.9000000e+00   1.9000000e+00   7.0000000e-01   9.0000000e-01   7.0000000e-01   1.8000000e+00   1.4000000e+00   7.0000000e-01   1.8000000e+00   2.1000000e+00   1.2000000e+00   9.0000000e-01   2.6000000e+00   1.9000000e+00   1.3000000e+00   1.0000000e+00   1.7000000e+00   9.0000000e-01   1.8000000e+00   7.0000000e-01   1.3000000e+00   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.1000000e+00   4.6000000e+00   6.4000000e+00   5.0000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.5000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   4.6000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   4.1000000e+00   5.1000000e+00   8.3000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   6.4000000e+00   9.3000000e+00   8.6000000e+00   9.0000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   9.6000000e+00   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.0000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   9.3000000e+00   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   7.7000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.4000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.3000000e+00   6.6000000e+00   1.2000000e+00   4.0000000e-01   8.0000000e-01   4.0000000e-01   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   3.0000000e-01   3.0000000e-01   1.0000000e+00   1.0000000e+00   6.0000000e-01   1.0000000e+00   1.2000000e+00   9.0000000e-01   7.0000000e-01   6.0000000e-01   9.0000000e-01   1.4000000e+00   3.0000000e-01   2.0000000e-01   1.9000000e+00   1.2000000e+00   6.0000000e-01   9.0000000e-01   8.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   4.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.9000000e+00   6.3000000e+00   4.5000000e+00   4.4000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.9000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.7000000e+00   9.2000000e+00   8.5000000e+00   9.1000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0100000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   9.8000000e+00   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   8.0000000e-01   8.0000000e-01   1.0000000e+00   2.1000000e+00   1.3000000e+00   1.6000000e+00   1.7000000e+00   1.3000000e+00   1.1000000e+00   1.3000000e+00   1.8000000e+00   1.8000000e+00   1.0000000e+00   1.2000000e+00   1.0000000e+00   1.9000000e+00   1.9000000e+00   1.0000000e+00   1.9000000e+00   2.6000000e+00   1.3000000e+00   1.4000000e+00   3.1000000e+00   2.4000000e+00   1.4000000e+00   9.0000000e-01   2.0000000e+00   8.0000000e-01   2.1000000e+00   8.0000000e-01   1.6000000e+00   6.0000000e+00   5.3000000e+00   6.3000000e+00   5.0000000e+00   5.9000000e+00   4.8000000e+00   5.4000000e+00   4.5000000e+00   5.7000000e+00   4.9000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.4000000e+00   3.9000000e+00   5.5000000e+00   4.9000000e+00   4.3000000e+00   6.1000000e+00   4.4000000e+00   5.4000000e+00   4.7000000e+00   6.3000000e+00   5.3000000e+00   5.2000000e+00   5.5000000e+00   6.3000000e+00   6.5000000e+00   5.2000000e+00   3.7000000e+00   4.5000000e+00   4.3000000e+00   4.3000000e+00   6.1000000e+00   5.1000000e+00   4.8000000e+00   5.9000000e+00   5.8000000e+00   4.3000000e+00   4.8000000e+00   5.0000000e+00   5.2000000e+00   4.5000000e+00   4.5000000e+00   4.7000000e+00   4.2000000e+00   4.4000000e+00   5.0000000e+00   4.0000000e+00   4.4000000e+00   7.6000000e+00   6.2000000e+00   8.2000000e+00   6.9000000e+00   7.6000000e+00   9.4000000e+00   6.3000000e+00   8.6000000e+00   7.9000000e+00   8.3000000e+00   6.5000000e+00   7.0000000e+00   7.5000000e+00   6.3000000e+00   6.6000000e+00   6.9000000e+00   6.9000000e+00   8.9000000e+00   1.0400000e+01   6.4000000e+00   7.8000000e+00   6.0000000e+00   9.7000000e+00   6.4000000e+00   7.3000000e+00   7.9000000e+00   6.1000000e+00   5.9000000e+00   7.4000000e+00   7.7000000e+00   8.7000000e+00   8.6000000e+00   7.5000000e+00   6.2000000e+00   6.6000000e+00   9.2000000e+00   7.0000000e+00   6.7000000e+00   5.7000000e+00   7.4000000e+00   7.7000000e+00   7.3000000e+00   6.2000000e+00   7.9000000e+00   7.7000000e+00   7.3000000e+00   6.8000000e+00   6.8000000e+00   6.6000000e+00   5.9000000e+00   1.0000000e+00   2.0000000e-01   1.3000000e+00   9.0000000e-01   1.2000000e+00   1.1000000e+00   7.0000000e-01   5.0000000e-01   7.0000000e-01   1.2000000e+00   1.2000000e+00   8.0000000e-01   6.0000000e-01   1.0000000e+00   1.1000000e+00   1.1000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   5.0000000e-01   6.0000000e-01   2.3000000e+00   1.6000000e+00   8.0000000e-01   5.0000000e-01   1.2000000e+00   2.0000000e-01   1.3000000e+00   4.0000000e-01   8.0000000e-01   6.8000000e+00   6.1000000e+00   7.1000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   6.2000000e+00   4.1000000e+00   6.5000000e+00   4.7000000e+00   4.6000000e+00   5.5000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.3000000e+00   5.5000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.2000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.3000000e+00   7.1000000e+00   7.3000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.3000000e+00   5.6000000e+00   6.7000000e+00   6.6000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   5.3000000e+00   4.1000000e+00   5.3000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   8.4000000e+00   7.0000000e+00   9.0000000e+00   7.7000000e+00   8.4000000e+00   1.0200000e+01   5.9000000e+00   9.4000000e+00   8.7000000e+00   9.1000000e+00   7.3000000e+00   7.8000000e+00   8.3000000e+00   7.1000000e+00   7.4000000e+00   7.7000000e+00   7.7000000e+00   9.7000000e+00   1.1200000e+01   7.2000000e+00   8.6000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.1000000e+00   8.7000000e+00   6.9000000e+00   6.7000000e+00   8.2000000e+00   8.5000000e+00   9.5000000e+00   9.4000000e+00   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0000000e+01   7.8000000e+00   7.5000000e+00   6.5000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   7.0000000e+00   8.7000000e+00   8.5000000e+00   8.1000000e+00   7.6000000e+00   7.6000000e+00   7.4000000e+00   6.7000000e+00   1.0000000e+00   1.7000000e+00   7.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   5.0000000e-01   5.0000000e-01   1.0000000e+00   1.0000000e+00   4.0000000e-01   1.2000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.8000000e+00   5.0000000e-01   1.0000000e+00   2.5000000e+00   1.6000000e+00   1.0000000e+00   1.1000000e+00   1.4000000e+00   8.0000000e-01   1.3000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e+00   5.3000000e+00   6.3000000e+00   4.6000000e+00   5.9000000e+00   4.8000000e+00   5.4000000e+00   3.9000000e+00   5.7000000e+00   4.3000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.4000000e+00   3.7000000e+00   5.5000000e+00   4.7000000e+00   4.3000000e+00   6.1000000e+00   4.2000000e+00   5.4000000e+00   4.7000000e+00   6.3000000e+00   5.3000000e+00   5.2000000e+00   5.5000000e+00   6.3000000e+00   6.5000000e+00   5.2000000e+00   3.7000000e+00   4.1000000e+00   3.9000000e+00   4.3000000e+00   6.1000000e+00   4.5000000e+00   4.8000000e+00   5.9000000e+00   5.8000000e+00   4.1000000e+00   4.4000000e+00   4.6000000e+00   5.2000000e+00   4.5000000e+00   3.9000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.0000000e+00   3.4000000e+00   4.4000000e+00   7.6000000e+00   6.2000000e+00   8.2000000e+00   6.9000000e+00   7.6000000e+00   9.4000000e+00   5.7000000e+00   8.6000000e+00   7.9000000e+00   8.7000000e+00   6.5000000e+00   7.0000000e+00   7.5000000e+00   6.3000000e+00   6.6000000e+00   6.9000000e+00   6.9000000e+00   9.7000000e+00   1.0400000e+01   6.4000000e+00   7.8000000e+00   5.8000000e+00   9.7000000e+00   6.4000000e+00   7.3000000e+00   7.9000000e+00   6.1000000e+00   5.9000000e+00   7.4000000e+00   7.7000000e+00   8.7000000e+00   9.4000000e+00   7.5000000e+00   6.2000000e+00   6.6000000e+00   9.2000000e+00   7.0000000e+00   6.7000000e+00   5.7000000e+00   7.4000000e+00   7.7000000e+00   7.3000000e+00   6.2000000e+00   7.9000000e+00   7.7000000e+00   7.3000000e+00   6.8000000e+00   6.8000000e+00   6.6000000e+00   5.9000000e+00   1.3000000e+00   7.0000000e-01   1.2000000e+00   1.1000000e+00   5.0000000e-01   5.0000000e-01   7.0000000e-01   1.2000000e+00   1.2000000e+00   6.0000000e-01   8.0000000e-01   1.2000000e+00   1.1000000e+00   1.1000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   5.0000000e-01   6.0000000e-01   2.3000000e+00   1.6000000e+00   6.0000000e-01   5.0000000e-01   1.2000000e+00   4.0000000e-01   1.3000000e+00   4.0000000e-01   8.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.9000000e+00   6.3000000e+00   4.5000000e+00   4.4000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.9000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.7000000e+00   9.2000000e+00   8.5000000e+00   8.9000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   9.7000000e+00   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   9.4000000e+00   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   1.4000000e+00   1.2000000e+00   1.2000000e+00   1.1000000e+00   1.3000000e+00   1.7000000e+00   1.7000000e+00   1.9000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   1.4000000e+00   1.1000000e+00   1.2000000e+00   9.0000000e-01   1.8000000e+00   9.0000000e-01   1.5000000e+00   1.8000000e+00   1.3000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   1.1000000e+00   7.7000000e+00   7.0000000e+00   8.0000000e+00   6.3000000e+00   7.6000000e+00   6.5000000e+00   7.1000000e+00   4.6000000e+00   7.4000000e+00   5.6000000e+00   5.3000000e+00   6.4000000e+00   6.6000000e+00   7.1000000e+00   5.4000000e+00   7.2000000e+00   6.4000000e+00   6.0000000e+00   7.8000000e+00   5.9000000e+00   7.1000000e+00   6.4000000e+00   8.0000000e+00   7.0000000e+00   6.9000000e+00   7.2000000e+00   8.0000000e+00   8.2000000e+00   6.9000000e+00   5.4000000e+00   5.8000000e+00   5.6000000e+00   6.0000000e+00   7.8000000e+00   6.2000000e+00   6.5000000e+00   7.6000000e+00   7.5000000e+00   5.8000000e+00   6.1000000e+00   6.3000000e+00   6.9000000e+00   6.2000000e+00   4.8000000e+00   6.2000000e+00   5.9000000e+00   6.1000000e+00   6.7000000e+00   4.5000000e+00   6.1000000e+00   9.3000000e+00   7.9000000e+00   9.9000000e+00   8.6000000e+00   9.3000000e+00   1.1100000e+01   6.4000000e+00   1.0300000e+01   9.6000000e+00   1.0000000e+01   8.2000000e+00   8.7000000e+00   9.2000000e+00   8.0000000e+00   8.3000000e+00   8.6000000e+00   8.6000000e+00   1.1000000e+01   1.2100000e+01   8.1000000e+00   9.5000000e+00   7.5000000e+00   1.1400000e+01   8.1000000e+00   9.0000000e+00   9.6000000e+00   7.8000000e+00   7.6000000e+00   9.1000000e+00   9.4000000e+00   1.0400000e+01   1.0700000e+01   9.2000000e+00   7.9000000e+00   8.3000000e+00   1.0900000e+01   8.7000000e+00   8.4000000e+00   7.4000000e+00   9.1000000e+00   9.4000000e+00   9.0000000e+00   7.9000000e+00   9.6000000e+00   9.4000000e+00   9.0000000e+00   8.5000000e+00   8.5000000e+00   8.3000000e+00   7.6000000e+00   9.0000000e-01   8.0000000e-01   4.0000000e-01   8.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   7.0000000e-01   1.5000000e+00   1.9000000e+00   1.0000000e+00   1.0000000e+00   1.3000000e+00   1.0000000e+00   1.7000000e+00   6.0000000e-01   9.0000000e-01   2.2000000e+00   1.5000000e+00   5.0000000e-01   8.0000000e-01   1.1000000e+00   9.0000000e-01   1.2000000e+00   1.1000000e+00   7.0000000e-01   5.9000000e+00   5.2000000e+00   6.2000000e+00   4.5000000e+00   5.8000000e+00   4.7000000e+00   5.3000000e+00   3.2000000e+00   5.6000000e+00   3.8000000e+00   3.7000000e+00   4.6000000e+00   4.8000000e+00   5.3000000e+00   3.6000000e+00   5.4000000e+00   4.6000000e+00   4.2000000e+00   6.0000000e+00   4.1000000e+00   5.3000000e+00   4.6000000e+00   6.2000000e+00   5.2000000e+00   5.1000000e+00   5.4000000e+00   6.2000000e+00   6.4000000e+00   5.1000000e+00   3.6000000e+00   4.0000000e+00   3.8000000e+00   4.2000000e+00   6.0000000e+00   4.4000000e+00   4.9000000e+00   5.8000000e+00   5.7000000e+00   4.0000000e+00   4.3000000e+00   4.5000000e+00   5.1000000e+00   4.4000000e+00   3.2000000e+00   4.4000000e+00   4.1000000e+00   4.3000000e+00   4.9000000e+00   2.7000000e+00   4.3000000e+00   7.5000000e+00   6.1000000e+00   8.1000000e+00   6.8000000e+00   7.5000000e+00   9.3000000e+00   5.0000000e+00   8.5000000e+00   7.8000000e+00   8.8000000e+00   6.4000000e+00   6.9000000e+00   7.4000000e+00   6.2000000e+00   6.5000000e+00   6.8000000e+00   6.8000000e+00   9.8000000e+00   1.0300000e+01   6.3000000e+00   7.7000000e+00   5.7000000e+00   9.6000000e+00   6.3000000e+00   7.2000000e+00   7.8000000e+00   6.0000000e+00   5.8000000e+00   7.3000000e+00   7.6000000e+00   8.6000000e+00   9.5000000e+00   7.4000000e+00   6.1000000e+00   6.5000000e+00   9.1000000e+00   7.1000000e+00   6.6000000e+00   5.6000000e+00   7.3000000e+00   7.6000000e+00   7.2000000e+00   6.1000000e+00   7.8000000e+00   7.6000000e+00   7.2000000e+00   6.7000000e+00   6.7000000e+00   6.7000000e+00   5.8000000e+00   9.0000000e-01   7.0000000e-01   9.0000000e-01   9.0000000e-01   6.0000000e-01   6.0000000e-01   1.2000000e+00   1.6000000e+00   2.0000000e+00   9.0000000e-01   1.1000000e+00   1.4000000e+00   9.0000000e-01   1.4000000e+00   7.0000000e-01   1.0000000e+00   2.1000000e+00   1.2000000e+00   1.0000000e+00   9.0000000e-01   1.0000000e+00   1.0000000e+00   9.0000000e-01   1.2000000e+00   8.0000000e-01   6.4000000e+00   5.7000000e+00   6.7000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   5.8000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   5.9000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   5.8000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   5.2000000e+00   6.3000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.0000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.1000000e+00   6.9000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.3000000e+00   7.3000000e+00   1.0100000e+01   1.0800000e+01   6.8000000e+00   8.2000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   7.7000000e+00   8.3000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   9.8000000e+00   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   7.4000000e+00   7.1000000e+00   6.1000000e+00   7.8000000e+00   8.1000000e+00   7.7000000e+00   6.6000000e+00   8.3000000e+00   8.1000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.0000000e+00   6.3000000e+00   6.0000000e-01   8.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   1.1000000e+00   1.5000000e+00   1.9000000e+00   4.0000000e-01   6.0000000e-01   1.3000000e+00   4.0000000e-01   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   1.1000000e+00   9.0000000e-01   1.4000000e+00   5.0000000e-01   9.0000000e-01   8.0000000e-01   1.1000000e+00   5.0000000e-01   6.5000000e+00   5.8000000e+00   6.6000000e+00   4.7000000e+00   6.0000000e+00   4.9000000e+00   6.1000000e+00   3.2000000e+00   5.8000000e+00   4.0000000e+00   3.7000000e+00   4.8000000e+00   5.0000000e+00   5.5000000e+00   3.8000000e+00   5.8000000e+00   4.8000000e+00   4.4000000e+00   6.2000000e+00   4.3000000e+00   5.9000000e+00   4.8000000e+00   6.4000000e+00   5.4000000e+00   5.3000000e+00   5.6000000e+00   6.4000000e+00   6.6000000e+00   5.3000000e+00   3.8000000e+00   4.2000000e+00   4.0000000e+00   4.4000000e+00   6.2000000e+00   4.6000000e+00   5.7000000e+00   6.2000000e+00   5.9000000e+00   4.2000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   4.6000000e+00   3.2000000e+00   4.6000000e+00   4.3000000e+00   4.5000000e+00   5.1000000e+00   2.9000000e+00   4.5000000e+00   8.3000000e+00   6.3000000e+00   8.3000000e+00   7.0000000e+00   7.7000000e+00   9.5000000e+00   5.0000000e+00   8.7000000e+00   8.0000000e+00   9.6000000e+00   7.0000000e+00   7.1000000e+00   7.6000000e+00   6.4000000e+00   6.7000000e+00   7.4000000e+00   7.0000000e+00   1.0600000e+01   1.0500000e+01   6.5000000e+00   8.3000000e+00   5.9000000e+00   9.8000000e+00   6.5000000e+00   8.0000000e+00   8.4000000e+00   6.2000000e+00   6.0000000e+00   7.5000000e+00   7.8000000e+00   8.8000000e+00   1.0300000e+01   7.6000000e+00   6.3000000e+00   6.7000000e+00   9.3000000e+00   7.9000000e+00   7.0000000e+00   5.8000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.3000000e+00   8.4000000e+00   8.4000000e+00   7.4000000e+00   6.9000000e+00   6.9000000e+00   7.5000000e+00   6.0000000e+00   6.0000000e-01   6.0000000e-01   7.0000000e-01   7.0000000e-01   5.0000000e-01   1.3000000e+00   1.7000000e+00   8.0000000e-01   8.0000000e-01   1.1000000e+00   8.0000000e-01   1.5000000e+00   4.0000000e-01   5.0000000e-01   2.0000000e+00   1.3000000e+00   3.0000000e-01   8.0000000e-01   9.0000000e-01   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   6.3000000e+00   5.6000000e+00   6.6000000e+00   4.9000000e+00   6.2000000e+00   5.1000000e+00   5.7000000e+00   3.4000000e+00   6.0000000e+00   4.2000000e+00   3.9000000e+00   5.0000000e+00   5.2000000e+00   5.7000000e+00   4.0000000e+00   5.8000000e+00   5.0000000e+00   4.6000000e+00   6.4000000e+00   4.5000000e+00   5.7000000e+00   5.0000000e+00   6.6000000e+00   5.6000000e+00   5.5000000e+00   5.8000000e+00   6.6000000e+00   6.8000000e+00   5.5000000e+00   4.0000000e+00   4.4000000e+00   4.2000000e+00   4.6000000e+00   6.4000000e+00   4.8000000e+00   5.1000000e+00   6.2000000e+00   6.1000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   4.8000000e+00   3.4000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   3.1000000e+00   4.7000000e+00   7.9000000e+00   6.5000000e+00   8.5000000e+00   7.2000000e+00   7.9000000e+00   9.7000000e+00   5.2000000e+00   8.9000000e+00   8.2000000e+00   9.0000000e+00   6.8000000e+00   7.3000000e+00   7.8000000e+00   6.6000000e+00   6.9000000e+00   7.2000000e+00   7.2000000e+00   1.0000000e+01   1.0700000e+01   6.7000000e+00   8.1000000e+00   6.1000000e+00   1.0000000e+01   6.7000000e+00   7.6000000e+00   8.2000000e+00   6.4000000e+00   6.2000000e+00   7.7000000e+00   8.0000000e+00   9.0000000e+00   9.7000000e+00   7.8000000e+00   6.5000000e+00   6.9000000e+00   9.5000000e+00   7.3000000e+00   7.0000000e+00   6.0000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.5000000e+00   8.2000000e+00   8.0000000e+00   7.6000000e+00   7.1000000e+00   7.1000000e+00   6.9000000e+00   6.2000000e+00   2.0000000e-01   9.0000000e-01   9.0000000e-01   5.0000000e-01   7.0000000e-01   1.1000000e+00   8.0000000e-01   8.0000000e-01   5.0000000e-01   8.0000000e-01   1.5000000e+00   2.0000000e-01   5.0000000e-01   2.2000000e+00   1.3000000e+00   7.0000000e-01   1.0000000e+00   1.1000000e+00   5.0000000e-01   1.0000000e+00   3.0000000e-01   5.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   4.0000000e+00   6.2000000e+00   4.4000000e+00   4.5000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   4.0000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.5000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.8000000e+00   9.1000000e+00   8.4000000e+00   9.0000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   1.0000000e+01   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.7000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   9.0000000e-01   9.0000000e-01   5.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   6.0000000e-01   5.0000000e-01   8.0000000e-01   1.3000000e+00   2.0000000e-01   5.0000000e-01   2.0000000e+00   1.1000000e+00   9.0000000e-01   1.2000000e+00   9.0000000e-01   7.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   4.0000000e+00   6.2000000e+00   4.4000000e+00   4.5000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   4.0000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.5000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.8000000e+00   9.1000000e+00   8.4000000e+00   9.2000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   1.0200000e+01   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.9000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   2.0000000e-01   1.2000000e+00   1.6000000e+00   2.0000000e+00   5.0000000e-01   7.0000000e-01   1.4000000e+00   5.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   1.5000000e+00   6.0000000e-01   1.0000000e+00   1.5000000e+00   6.0000000e-01   1.0000000e+00   3.0000000e-01   1.2000000e+00   6.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.2000000e+00   3.5000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.8000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.4000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.3000000e+00   9.2000000e+00   8.5000000e+00   9.7000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0700000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   8.1000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0400000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   8.0000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.5000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.6000000e+00   6.5000000e+00   1.2000000e+00   1.6000000e+00   2.0000000e+00   3.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e-01   8.0000000e-01   7.0000000e-01   1.0000000e+00   1.5000000e+00   8.0000000e-01   1.0000000e+00   1.5000000e+00   4.0000000e-01   1.0000000e+00   5.0000000e-01   1.2000000e+00   6.0000000e-01   6.6000000e+00   5.9000000e+00   6.7000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   6.2000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   5.9000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   6.0000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   5.8000000e+00   6.3000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.4000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.7000000e+00   7.1000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.5000000e+00   7.3000000e+00   1.0700000e+01   1.0800000e+01   6.8000000e+00   8.4000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   8.1000000e+00   8.5000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   1.0400000e+01   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   8.0000000e+00   7.1000000e+00   6.1000000e+00   7.8000000e+00   8.1000000e+00   7.7000000e+00   6.6000000e+00   8.5000000e+00   8.5000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.6000000e+00   6.3000000e+00   1.2000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   6.0000000e-01   1.1000000e+00   1.8000000e+00   5.0000000e-01   8.0000000e-01   2.3000000e+00   1.6000000e+00   8.0000000e-01   1.1000000e+00   1.2000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   8.0000000e-01   6.0000000e+00   5.3000000e+00   6.3000000e+00   4.6000000e+00   5.9000000e+00   4.8000000e+00   5.4000000e+00   3.9000000e+00   5.7000000e+00   4.3000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.4000000e+00   3.7000000e+00   5.5000000e+00   4.7000000e+00   4.3000000e+00   6.1000000e+00   4.2000000e+00   5.4000000e+00   4.7000000e+00   6.3000000e+00   5.3000000e+00   5.2000000e+00   5.5000000e+00   6.3000000e+00   6.5000000e+00   5.2000000e+00   3.7000000e+00   4.1000000e+00   3.9000000e+00   4.3000000e+00   6.1000000e+00   4.5000000e+00   4.8000000e+00   5.9000000e+00   5.8000000e+00   4.1000000e+00   4.4000000e+00   4.6000000e+00   5.2000000e+00   4.5000000e+00   3.9000000e+00   4.5000000e+00   4.2000000e+00   4.4000000e+00   5.0000000e+00   3.4000000e+00   4.4000000e+00   7.6000000e+00   6.2000000e+00   8.2000000e+00   6.9000000e+00   7.6000000e+00   9.4000000e+00   5.7000000e+00   8.6000000e+00   7.9000000e+00   8.7000000e+00   6.5000000e+00   7.0000000e+00   7.5000000e+00   6.3000000e+00   6.6000000e+00   6.9000000e+00   6.9000000e+00   9.7000000e+00   1.0400000e+01   6.4000000e+00   7.8000000e+00   5.8000000e+00   9.7000000e+00   6.4000000e+00   7.3000000e+00   7.9000000e+00   6.1000000e+00   5.9000000e+00   7.4000000e+00   7.7000000e+00   8.7000000e+00   9.4000000e+00   7.5000000e+00   6.2000000e+00   6.6000000e+00   9.2000000e+00   7.0000000e+00   6.7000000e+00   5.7000000e+00   7.4000000e+00   7.7000000e+00   7.3000000e+00   6.2000000e+00   7.9000000e+00   7.7000000e+00   7.3000000e+00   6.8000000e+00   6.8000000e+00   6.6000000e+00   5.9000000e+00   6.0000000e-01   1.3000000e+00   1.5000000e+00   1.2000000e+00   1.3000000e+00   2.2000000e+00   9.0000000e-01   1.2000000e+00   2.9000000e+00   2.0000000e+00   1.4000000e+00   1.1000000e+00   1.8000000e+00   6.0000000e-01   1.7000000e+00   6.0000000e-01   1.2000000e+00   7.2000000e+00   6.5000000e+00   7.5000000e+00   5.8000000e+00   7.1000000e+00   6.0000000e+00   6.6000000e+00   4.7000000e+00   6.9000000e+00   5.1000000e+00   5.2000000e+00   5.9000000e+00   6.1000000e+00   6.6000000e+00   4.9000000e+00   6.7000000e+00   5.9000000e+00   5.5000000e+00   7.3000000e+00   5.4000000e+00   6.6000000e+00   5.9000000e+00   7.5000000e+00   6.5000000e+00   6.4000000e+00   6.7000000e+00   7.5000000e+00   7.7000000e+00   6.4000000e+00   4.9000000e+00   5.3000000e+00   5.1000000e+00   5.5000000e+00   7.3000000e+00   5.7000000e+00   6.0000000e+00   7.1000000e+00   7.0000000e+00   5.3000000e+00   5.6000000e+00   5.8000000e+00   6.4000000e+00   5.7000000e+00   4.7000000e+00   5.7000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   4.2000000e+00   5.6000000e+00   8.8000000e+00   7.4000000e+00   9.4000000e+00   8.1000000e+00   8.8000000e+00   1.0600000e+01   6.5000000e+00   9.8000000e+00   9.1000000e+00   9.5000000e+00   7.7000000e+00   8.2000000e+00   8.7000000e+00   7.5000000e+00   7.8000000e+00   8.1000000e+00   8.1000000e+00   1.0100000e+01   1.1600000e+01   7.6000000e+00   9.0000000e+00   7.0000000e+00   1.0900000e+01   7.6000000e+00   8.5000000e+00   9.1000000e+00   7.3000000e+00   7.1000000e+00   8.6000000e+00   8.9000000e+00   9.9000000e+00   9.8000000e+00   8.7000000e+00   7.4000000e+00   7.8000000e+00   1.0400000e+01   8.2000000e+00   7.9000000e+00   6.9000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.4000000e+00   9.1000000e+00   8.9000000e+00   8.5000000e+00   8.0000000e+00   8.0000000e+00   7.8000000e+00   7.1000000e+00   1.9000000e+00   1.7000000e+00   8.0000000e-01   1.9000000e+00   2.4000000e+00   1.3000000e+00   1.4000000e+00   3.1000000e+00   2.2000000e+00   1.8000000e+00   1.5000000e+00   2.0000000e+00   1.0000000e+00   1.9000000e+00   8.0000000e-01   1.4000000e+00   7.0000000e+00   6.3000000e+00   7.3000000e+00   5.6000000e+00   6.9000000e+00   5.8000000e+00   6.4000000e+00   5.1000000e+00   6.7000000e+00   5.5000000e+00   5.6000000e+00   5.7000000e+00   5.9000000e+00   6.4000000e+00   4.7000000e+00   6.5000000e+00   5.7000000e+00   5.3000000e+00   7.1000000e+00   5.2000000e+00   6.4000000e+00   5.7000000e+00   7.3000000e+00   6.3000000e+00   6.2000000e+00   6.5000000e+00   7.3000000e+00   7.5000000e+00   6.2000000e+00   4.7000000e+00   5.1000000e+00   4.9000000e+00   5.3000000e+00   7.1000000e+00   5.7000000e+00   5.8000000e+00   6.9000000e+00   6.8000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   5.5000000e+00   5.1000000e+00   5.5000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   4.6000000e+00   5.4000000e+00   8.6000000e+00   7.2000000e+00   9.2000000e+00   7.9000000e+00   8.6000000e+00   1.0400000e+01   6.9000000e+00   9.6000000e+00   8.9000000e+00   9.3000000e+00   7.5000000e+00   8.0000000e+00   8.5000000e+00   7.3000000e+00   7.6000000e+00   7.9000000e+00   7.9000000e+00   9.9000000e+00   1.1400000e+01   7.4000000e+00   8.8000000e+00   6.8000000e+00   1.0700000e+01   7.4000000e+00   8.3000000e+00   8.9000000e+00   7.1000000e+00   6.9000000e+00   8.4000000e+00   8.7000000e+00   9.7000000e+00   9.6000000e+00   8.5000000e+00   7.2000000e+00   7.6000000e+00   1.0200000e+01   8.0000000e+00   7.7000000e+00   6.7000000e+00   8.4000000e+00   8.7000000e+00   8.3000000e+00   7.2000000e+00   8.9000000e+00   8.7000000e+00   8.3000000e+00   7.8000000e+00   7.8000000e+00   7.6000000e+00   6.9000000e+00   6.0000000e-01   1.3000000e+00   0.0000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   5.0000000e-01   6.7000000e+00   6.0000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   6.3000000e+00   3.4000000e+00   6.2000000e+00   4.4000000e+00   4.1000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   6.1000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.9000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.6000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.5000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.2000000e+00   9.1000000e+00   8.4000000e+00   9.8000000e+00   7.2000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.6000000e+00   7.4000000e+00   1.0800000e+01   1.0900000e+01   6.9000000e+00   8.5000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   8.2000000e+00   8.6000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   1.0500000e+01   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   8.1000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.6000000e+00   8.6000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.7000000e+00   6.4000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   6.0000000e-01   5.0000000e-01   1.6000000e+00   7.0000000e-01   1.1000000e+00   1.6000000e+00   7.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   3.0000000e-01   6.7000000e+00   6.0000000e+00   7.0000000e+00   5.3000000e+00   6.6000000e+00   5.5000000e+00   6.3000000e+00   3.8000000e+00   6.4000000e+00   4.6000000e+00   4.3000000e+00   5.4000000e+00   5.6000000e+00   6.1000000e+00   4.4000000e+00   6.2000000e+00   5.4000000e+00   5.0000000e+00   6.8000000e+00   4.9000000e+00   6.1000000e+00   5.4000000e+00   7.0000000e+00   6.0000000e+00   5.9000000e+00   6.2000000e+00   7.0000000e+00   7.2000000e+00   5.9000000e+00   4.4000000e+00   4.8000000e+00   4.6000000e+00   5.0000000e+00   6.8000000e+00   5.2000000e+00   5.9000000e+00   6.6000000e+00   6.5000000e+00   4.8000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   5.2000000e+00   3.8000000e+00   5.2000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   3.5000000e+00   5.1000000e+00   8.5000000e+00   6.9000000e+00   8.9000000e+00   7.6000000e+00   8.3000000e+00   1.0100000e+01   5.6000000e+00   9.3000000e+00   8.6000000e+00   9.8000000e+00   7.2000000e+00   7.7000000e+00   8.2000000e+00   7.0000000e+00   7.3000000e+00   7.6000000e+00   7.6000000e+00   1.0800000e+01   1.1100000e+01   7.1000000e+00   8.5000000e+00   6.5000000e+00   1.0400000e+01   7.1000000e+00   8.2000000e+00   8.6000000e+00   6.8000000e+00   6.6000000e+00   8.1000000e+00   8.4000000e+00   9.4000000e+00   1.0500000e+01   8.2000000e+00   6.9000000e+00   7.3000000e+00   9.9000000e+00   8.1000000e+00   7.4000000e+00   6.4000000e+00   8.1000000e+00   8.4000000e+00   8.0000000e+00   6.9000000e+00   8.6000000e+00   8.6000000e+00   8.0000000e+00   7.5000000e+00   7.5000000e+00   7.7000000e+00   6.6000000e+00   1.3000000e+00   1.6000000e+00   7.0000000e-01   6.0000000e-01   2.3000000e+00   1.4000000e+00   1.2000000e+00   1.5000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   6.0000000e-01   8.0000000e-01   6.4000000e+00   5.7000000e+00   6.7000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   5.8000000e+00   4.5000000e+00   6.1000000e+00   4.9000000e+00   5.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   5.9000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   5.8000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   5.1000000e+00   5.2000000e+00   6.3000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   4.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   4.0000000e+00   4.8000000e+00   8.0000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   6.3000000e+00   9.0000000e+00   8.3000000e+00   8.9000000e+00   6.9000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.3000000e+00   7.3000000e+00   9.9000000e+00   1.0800000e+01   6.8000000e+00   8.2000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   7.7000000e+00   8.3000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   9.6000000e+00   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   7.4000000e+00   7.1000000e+00   6.1000000e+00   7.8000000e+00   8.1000000e+00   7.7000000e+00   6.6000000e+00   8.3000000e+00   8.1000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.0000000e+00   6.3000000e+00   9.0000000e-01   6.0000000e-01   9.0000000e-01   1.6000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   5.0000000e-01   1.1000000e+00   6.0000000e-01   1.1000000e+00   5.0000000e-01   6.7000000e+00   6.0000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   6.3000000e+00   3.4000000e+00   6.2000000e+00   4.4000000e+00   4.1000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   6.1000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.9000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.6000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.5000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.2000000e+00   9.1000000e+00   8.4000000e+00   9.8000000e+00   7.2000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.6000000e+00   7.4000000e+00   1.0800000e+01   1.0900000e+01   6.9000000e+00   8.5000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   8.2000000e+00   8.6000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   1.0500000e+01   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   8.1000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.6000000e+00   8.6000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.7000000e+00   6.4000000e+00   1.3000000e+00   1.2000000e+00   9.0000000e-01   2.0000000e-01   1.8000000e+00   2.3000000e+00   6.0000000e-01   1.8000000e+00   5.0000000e-01   1.8000000e+00   1.0000000e+00   7.4000000e+00   6.7000000e+00   7.5000000e+00   5.6000000e+00   6.9000000e+00   5.8000000e+00   7.0000000e+00   3.9000000e+00   6.7000000e+00   4.9000000e+00   4.6000000e+00   5.7000000e+00   5.9000000e+00   6.4000000e+00   4.7000000e+00   6.7000000e+00   5.7000000e+00   5.3000000e+00   7.1000000e+00   5.2000000e+00   6.8000000e+00   5.7000000e+00   7.3000000e+00   6.3000000e+00   6.2000000e+00   6.5000000e+00   7.3000000e+00   7.5000000e+00   6.2000000e+00   4.7000000e+00   5.1000000e+00   4.9000000e+00   5.3000000e+00   7.1000000e+00   5.5000000e+00   6.6000000e+00   7.1000000e+00   6.8000000e+00   5.1000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   5.5000000e+00   4.1000000e+00   5.5000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   3.8000000e+00   5.4000000e+00   9.2000000e+00   7.2000000e+00   9.2000000e+00   7.9000000e+00   8.6000000e+00   1.0400000e+01   5.7000000e+00   9.6000000e+00   8.9000000e+00   1.0500000e+01   7.9000000e+00   8.0000000e+00   8.5000000e+00   7.3000000e+00   7.6000000e+00   8.3000000e+00   7.9000000e+00   1.1500000e+01   1.1400000e+01   7.4000000e+00   9.2000000e+00   6.8000000e+00   1.0700000e+01   7.4000000e+00   8.9000000e+00   9.3000000e+00   7.1000000e+00   6.9000000e+00   8.4000000e+00   8.7000000e+00   9.7000000e+00   1.1200000e+01   8.5000000e+00   7.2000000e+00   7.6000000e+00   1.0200000e+01   8.8000000e+00   7.9000000e+00   6.7000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.2000000e+00   9.3000000e+00   9.3000000e+00   8.3000000e+00   7.8000000e+00   7.8000000e+00   8.4000000e+00   6.9000000e+00   5.0000000e-01   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.0000000e+00   9.0000000e-01   5.0000000e-01   8.0000000e-01   5.0000000e-01   3.0000000e-01   6.5000000e+00   5.8000000e+00   6.8000000e+00   5.1000000e+00   6.4000000e+00   5.3000000e+00   5.9000000e+00   3.8000000e+00   6.2000000e+00   4.4000000e+00   4.3000000e+00   5.2000000e+00   5.4000000e+00   5.9000000e+00   4.2000000e+00   6.0000000e+00   5.2000000e+00   4.8000000e+00   6.6000000e+00   4.7000000e+00   5.9000000e+00   5.2000000e+00   6.8000000e+00   5.8000000e+00   5.7000000e+00   6.0000000e+00   6.8000000e+00   7.0000000e+00   5.7000000e+00   4.2000000e+00   4.6000000e+00   4.4000000e+00   4.8000000e+00   6.6000000e+00   5.0000000e+00   5.3000000e+00   6.4000000e+00   6.3000000e+00   4.6000000e+00   4.9000000e+00   5.1000000e+00   5.7000000e+00   5.0000000e+00   3.8000000e+00   5.0000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   3.3000000e+00   4.9000000e+00   8.1000000e+00   6.7000000e+00   8.7000000e+00   7.4000000e+00   8.1000000e+00   9.9000000e+00   5.6000000e+00   9.1000000e+00   8.4000000e+00   9.2000000e+00   7.0000000e+00   7.5000000e+00   8.0000000e+00   6.8000000e+00   7.1000000e+00   7.4000000e+00   7.4000000e+00   1.0200000e+01   1.0900000e+01   6.9000000e+00   8.3000000e+00   6.3000000e+00   1.0200000e+01   6.9000000e+00   7.8000000e+00   8.4000000e+00   6.6000000e+00   6.4000000e+00   7.9000000e+00   8.2000000e+00   9.2000000e+00   9.9000000e+00   8.0000000e+00   6.7000000e+00   7.1000000e+00   9.7000000e+00   7.5000000e+00   7.2000000e+00   6.2000000e+00   7.9000000e+00   8.2000000e+00   7.8000000e+00   6.7000000e+00   8.4000000e+00   8.2000000e+00   7.8000000e+00   7.3000000e+00   7.3000000e+00   7.1000000e+00   6.4000000e+00   1.7000000e+00   1.0000000e+00   6.0000000e-01   1.1000000e+00   8.0000000e-01   8.0000000e-01   9.0000000e-01   8.0000000e-01   4.0000000e-01   6.8000000e+00   6.1000000e+00   7.1000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   6.2000000e+00   3.9000000e+00   6.5000000e+00   4.7000000e+00   4.4000000e+00   5.5000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.3000000e+00   5.5000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.2000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.3000000e+00   7.1000000e+00   7.3000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.3000000e+00   5.6000000e+00   6.7000000e+00   6.6000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   5.3000000e+00   3.9000000e+00   5.3000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   8.4000000e+00   7.0000000e+00   9.0000000e+00   7.7000000e+00   8.4000000e+00   1.0200000e+01   5.7000000e+00   9.4000000e+00   8.7000000e+00   9.3000000e+00   7.3000000e+00   7.8000000e+00   8.3000000e+00   7.1000000e+00   7.4000000e+00   7.7000000e+00   7.7000000e+00   1.0300000e+01   1.1200000e+01   7.2000000e+00   8.6000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.1000000e+00   8.7000000e+00   6.9000000e+00   6.7000000e+00   8.2000000e+00   8.5000000e+00   9.5000000e+00   1.0000000e+01   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0000000e+01   7.8000000e+00   7.5000000e+00   6.5000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   7.0000000e+00   8.7000000e+00   8.5000000e+00   8.1000000e+00   7.6000000e+00   7.6000000e+00   7.4000000e+00   6.7000000e+00   1.1000000e+00   2.3000000e+00   2.8000000e+00   1.1000000e+00   2.5000000e+00   1.2000000e+00   2.5000000e+00   1.7000000e+00   7.9000000e+00   7.2000000e+00   8.0000000e+00   4.7000000e+00   7.0000000e+00   5.9000000e+00   7.5000000e+00   3.2000000e+00   7.0000000e+00   4.8000000e+00   3.7000000e+00   6.2000000e+00   5.0000000e+00   6.7000000e+00   5.0000000e+00   7.2000000e+00   6.2000000e+00   5.2000000e+00   6.2000000e+00   4.7000000e+00   7.3000000e+00   5.8000000e+00   6.8000000e+00   6.4000000e+00   6.5000000e+00   7.0000000e+00   7.4000000e+00   8.0000000e+00   6.5000000e+00   4.4000000e+00   4.4000000e+00   4.2000000e+00   5.2000000e+00   7.0000000e+00   6.0000000e+00   7.1000000e+00   7.6000000e+00   5.9000000e+00   5.6000000e+00   4.9000000e+00   5.3000000e+00   6.7000000e+00   5.2000000e+00   3.2000000e+00   5.4000000e+00   5.7000000e+00   5.7000000e+00   6.3000000e+00   3.3000000e+00   5.5000000e+00   9.7000000e+00   7.1000000e+00   9.7000000e+00   8.2000000e+00   9.1000000e+00   1.0900000e+01   5.2000000e+00   9.9000000e+00   8.4000000e+00   1.1000000e+01   8.4000000e+00   7.9000000e+00   9.0000000e+00   6.8000000e+00   7.7000000e+00   8.8000000e+00   8.4000000e+00   1.2000000e+01   1.1100000e+01   6.5000000e+00   9.7000000e+00   6.9000000e+00   1.0800000e+01   7.3000000e+00   9.4000000e+00   9.8000000e+00   7.2000000e+00   7.4000000e+00   8.5000000e+00   9.2000000e+00   9.8000000e+00   1.1700000e+01   8.6000000e+00   7.3000000e+00   7.3000000e+00   1.0700000e+01   9.3000000e+00   8.4000000e+00   7.2000000e+00   9.1000000e+00   9.4000000e+00   9.0000000e+00   7.1000000e+00   9.8000000e+00   9.8000000e+00   8.8000000e+00   7.3000000e+00   8.3000000e+00   8.9000000e+00   7.4000000e+00   1.6000000e+00   2.1000000e+00   8.0000000e-01   1.6000000e+00   3.0000000e-01   1.6000000e+00   8.0000000e-01   7.2000000e+00   6.5000000e+00   7.5000000e+00   5.8000000e+00   7.1000000e+00   6.0000000e+00   6.8000000e+00   4.1000000e+00   6.9000000e+00   5.1000000e+00   4.8000000e+00   5.9000000e+00   6.1000000e+00   6.6000000e+00   4.9000000e+00   6.7000000e+00   5.9000000e+00   5.5000000e+00   7.3000000e+00   5.4000000e+00   6.6000000e+00   5.9000000e+00   7.5000000e+00   6.5000000e+00   6.4000000e+00   6.7000000e+00   7.5000000e+00   7.7000000e+00   6.4000000e+00   4.9000000e+00   5.3000000e+00   5.1000000e+00   5.5000000e+00   7.3000000e+00   5.7000000e+00   6.4000000e+00   7.1000000e+00   7.0000000e+00   5.3000000e+00   5.6000000e+00   5.8000000e+00   6.4000000e+00   5.7000000e+00   4.3000000e+00   5.7000000e+00   5.4000000e+00   5.6000000e+00   6.2000000e+00   4.0000000e+00   5.6000000e+00   9.0000000e+00   7.4000000e+00   9.4000000e+00   8.1000000e+00   8.8000000e+00   1.0600000e+01   5.9000000e+00   9.8000000e+00   9.1000000e+00   1.0300000e+01   7.7000000e+00   8.2000000e+00   8.7000000e+00   7.5000000e+00   7.8000000e+00   8.1000000e+00   8.1000000e+00   1.1300000e+01   1.1600000e+01   7.6000000e+00   9.0000000e+00   7.0000000e+00   1.0900000e+01   7.6000000e+00   8.7000000e+00   9.1000000e+00   7.3000000e+00   7.1000000e+00   8.6000000e+00   8.9000000e+00   9.9000000e+00   1.1000000e+01   8.7000000e+00   7.4000000e+00   7.8000000e+00   1.0400000e+01   8.6000000e+00   7.9000000e+00   6.9000000e+00   8.6000000e+00   8.9000000e+00   8.5000000e+00   7.4000000e+00   9.1000000e+00   9.1000000e+00   8.5000000e+00   8.0000000e+00   8.0000000e+00   8.2000000e+00   7.1000000e+00   9.0000000e-01   1.2000000e+00   8.0000000e-01   1.3000000e+00   1.0000000e+00   8.0000000e-01   6.2000000e+00   5.5000000e+00   6.5000000e+00   4.8000000e+00   6.1000000e+00   5.0000000e+00   5.6000000e+00   3.3000000e+00   5.9000000e+00   4.1000000e+00   3.8000000e+00   4.9000000e+00   5.1000000e+00   5.6000000e+00   3.9000000e+00   5.7000000e+00   4.9000000e+00   4.5000000e+00   6.3000000e+00   4.4000000e+00   5.6000000e+00   4.9000000e+00   6.5000000e+00   5.5000000e+00   5.4000000e+00   5.7000000e+00   6.5000000e+00   6.7000000e+00   5.4000000e+00   3.9000000e+00   4.3000000e+00   4.1000000e+00   4.5000000e+00   6.3000000e+00   4.7000000e+00   5.0000000e+00   6.1000000e+00   6.0000000e+00   4.3000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   4.7000000e+00   3.3000000e+00   4.7000000e+00   4.4000000e+00   4.6000000e+00   5.2000000e+00   3.0000000e+00   4.6000000e+00   7.8000000e+00   6.4000000e+00   8.4000000e+00   7.1000000e+00   7.8000000e+00   9.6000000e+00   5.1000000e+00   8.8000000e+00   8.1000000e+00   8.7000000e+00   6.7000000e+00   7.2000000e+00   7.7000000e+00   6.5000000e+00   6.8000000e+00   7.1000000e+00   7.1000000e+00   9.7000000e+00   1.0600000e+01   6.6000000e+00   8.0000000e+00   6.0000000e+00   9.9000000e+00   6.6000000e+00   7.5000000e+00   8.1000000e+00   6.3000000e+00   6.1000000e+00   7.6000000e+00   7.9000000e+00   8.9000000e+00   9.4000000e+00   7.7000000e+00   6.4000000e+00   6.8000000e+00   9.4000000e+00   7.2000000e+00   6.9000000e+00   5.9000000e+00   7.6000000e+00   7.9000000e+00   7.5000000e+00   6.4000000e+00   8.1000000e+00   7.9000000e+00   7.5000000e+00   7.0000000e+00   7.0000000e+00   6.8000000e+00   6.1000000e+00   1.7000000e+00   5.0000000e-01   1.8000000e+00   9.0000000e-01   1.3000000e+00   6.3000000e+00   5.6000000e+00   6.6000000e+00   4.9000000e+00   6.2000000e+00   5.1000000e+00   5.7000000e+00   3.6000000e+00   6.0000000e+00   4.2000000e+00   4.1000000e+00   5.0000000e+00   5.2000000e+00   5.7000000e+00   4.0000000e+00   5.8000000e+00   5.0000000e+00   4.6000000e+00   6.4000000e+00   4.5000000e+00   5.7000000e+00   5.0000000e+00   6.6000000e+00   5.6000000e+00   5.5000000e+00   5.8000000e+00   6.6000000e+00   6.8000000e+00   5.5000000e+00   4.0000000e+00   4.4000000e+00   4.2000000e+00   4.6000000e+00   6.4000000e+00   4.8000000e+00   5.1000000e+00   6.2000000e+00   6.1000000e+00   4.4000000e+00   4.7000000e+00   4.9000000e+00   5.5000000e+00   4.8000000e+00   3.6000000e+00   4.8000000e+00   4.5000000e+00   4.7000000e+00   5.3000000e+00   3.1000000e+00   4.7000000e+00   7.9000000e+00   6.5000000e+00   8.5000000e+00   7.2000000e+00   7.9000000e+00   9.7000000e+00   5.4000000e+00   8.9000000e+00   8.2000000e+00   8.6000000e+00   6.8000000e+00   7.3000000e+00   7.8000000e+00   6.6000000e+00   6.9000000e+00   7.2000000e+00   7.2000000e+00   9.2000000e+00   1.0700000e+01   6.7000000e+00   8.1000000e+00   6.1000000e+00   1.0000000e+01   6.7000000e+00   7.6000000e+00   8.2000000e+00   6.4000000e+00   6.2000000e+00   7.7000000e+00   8.0000000e+00   9.0000000e+00   8.9000000e+00   7.8000000e+00   6.5000000e+00   6.9000000e+00   9.5000000e+00   7.3000000e+00   7.0000000e+00   6.0000000e+00   7.7000000e+00   8.0000000e+00   7.6000000e+00   6.5000000e+00   8.2000000e+00   8.0000000e+00   7.6000000e+00   7.1000000e+00   7.1000000e+00   6.9000000e+00   6.2000000e+00   1.4000000e+00   5.0000000e-01   1.4000000e+00   6.0000000e-01   6.8000000e+00   6.1000000e+00   6.9000000e+00   5.0000000e+00   6.3000000e+00   5.2000000e+00   6.4000000e+00   3.3000000e+00   6.1000000e+00   4.3000000e+00   4.0000000e+00   5.1000000e+00   5.3000000e+00   5.8000000e+00   4.1000000e+00   6.1000000e+00   5.1000000e+00   4.7000000e+00   6.5000000e+00   4.6000000e+00   6.2000000e+00   5.1000000e+00   6.7000000e+00   5.7000000e+00   5.6000000e+00   5.9000000e+00   6.7000000e+00   6.9000000e+00   5.6000000e+00   4.1000000e+00   4.5000000e+00   4.3000000e+00   4.7000000e+00   6.5000000e+00   4.9000000e+00   6.0000000e+00   6.5000000e+00   6.2000000e+00   4.5000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   4.9000000e+00   3.5000000e+00   4.9000000e+00   4.6000000e+00   4.8000000e+00   5.4000000e+00   3.2000000e+00   4.8000000e+00   8.6000000e+00   6.6000000e+00   8.6000000e+00   7.3000000e+00   8.0000000e+00   9.8000000e+00   5.1000000e+00   9.0000000e+00   8.3000000e+00   9.9000000e+00   7.3000000e+00   7.4000000e+00   7.9000000e+00   6.7000000e+00   7.0000000e+00   7.7000000e+00   7.3000000e+00   1.0900000e+01   1.0800000e+01   6.8000000e+00   8.6000000e+00   6.2000000e+00   1.0100000e+01   6.8000000e+00   8.3000000e+00   8.7000000e+00   6.5000000e+00   6.3000000e+00   7.8000000e+00   8.1000000e+00   9.1000000e+00   1.0600000e+01   7.9000000e+00   6.6000000e+00   7.0000000e+00   9.6000000e+00   8.2000000e+00   7.3000000e+00   6.1000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.6000000e+00   8.7000000e+00   8.7000000e+00   7.7000000e+00   7.2000000e+00   7.2000000e+00   7.8000000e+00   6.3000000e+00   1.3000000e+00   4.0000000e-01   8.0000000e-01   6.8000000e+00   6.1000000e+00   7.1000000e+00   5.4000000e+00   6.7000000e+00   5.6000000e+00   6.2000000e+00   4.1000000e+00   6.5000000e+00   4.7000000e+00   4.6000000e+00   5.5000000e+00   5.7000000e+00   6.2000000e+00   4.5000000e+00   6.3000000e+00   5.5000000e+00   5.1000000e+00   6.9000000e+00   5.0000000e+00   6.2000000e+00   5.5000000e+00   7.1000000e+00   6.1000000e+00   6.0000000e+00   6.3000000e+00   7.1000000e+00   7.3000000e+00   6.0000000e+00   4.5000000e+00   4.9000000e+00   4.7000000e+00   5.1000000e+00   6.9000000e+00   5.3000000e+00   5.6000000e+00   6.7000000e+00   6.6000000e+00   4.9000000e+00   5.2000000e+00   5.4000000e+00   6.0000000e+00   5.3000000e+00   4.1000000e+00   5.3000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   3.6000000e+00   5.2000000e+00   8.4000000e+00   7.0000000e+00   9.0000000e+00   7.7000000e+00   8.4000000e+00   1.0200000e+01   5.9000000e+00   9.4000000e+00   8.7000000e+00   9.1000000e+00   7.3000000e+00   7.8000000e+00   8.3000000e+00   7.1000000e+00   7.4000000e+00   7.7000000e+00   7.7000000e+00   9.7000000e+00   1.1200000e+01   7.2000000e+00   8.6000000e+00   6.6000000e+00   1.0500000e+01   7.2000000e+00   8.1000000e+00   8.7000000e+00   6.9000000e+00   6.7000000e+00   8.2000000e+00   8.5000000e+00   9.5000000e+00   9.4000000e+00   8.3000000e+00   7.0000000e+00   7.4000000e+00   1.0000000e+01   7.8000000e+00   7.5000000e+00   6.5000000e+00   8.2000000e+00   8.5000000e+00   8.1000000e+00   7.0000000e+00   8.7000000e+00   8.5000000e+00   8.1000000e+00   7.6000000e+00   7.6000000e+00   7.4000000e+00   6.7000000e+00   1.3000000e+00   5.0000000e-01   6.9000000e+00   6.2000000e+00   7.2000000e+00   5.5000000e+00   6.8000000e+00   5.7000000e+00   6.5000000e+00   3.8000000e+00   6.6000000e+00   4.8000000e+00   4.5000000e+00   5.6000000e+00   5.8000000e+00   6.3000000e+00   4.6000000e+00   6.4000000e+00   5.6000000e+00   5.2000000e+00   7.0000000e+00   5.1000000e+00   6.3000000e+00   5.6000000e+00   7.2000000e+00   6.2000000e+00   6.1000000e+00   6.4000000e+00   7.2000000e+00   7.4000000e+00   6.1000000e+00   4.6000000e+00   5.0000000e+00   4.8000000e+00   5.2000000e+00   7.0000000e+00   5.4000000e+00   6.1000000e+00   6.8000000e+00   6.7000000e+00   5.0000000e+00   5.3000000e+00   5.5000000e+00   6.1000000e+00   5.4000000e+00   4.0000000e+00   5.4000000e+00   5.1000000e+00   5.3000000e+00   5.9000000e+00   3.7000000e+00   5.3000000e+00   8.7000000e+00   7.1000000e+00   9.1000000e+00   7.8000000e+00   8.5000000e+00   1.0300000e+01   5.6000000e+00   9.5000000e+00   8.8000000e+00   1.0000000e+01   7.4000000e+00   7.9000000e+00   8.4000000e+00   7.2000000e+00   7.5000000e+00   7.8000000e+00   7.8000000e+00   1.1000000e+01   1.1300000e+01   7.3000000e+00   8.7000000e+00   6.7000000e+00   1.0600000e+01   7.3000000e+00   8.4000000e+00   8.8000000e+00   7.0000000e+00   6.8000000e+00   8.3000000e+00   8.6000000e+00   9.6000000e+00   1.0700000e+01   8.4000000e+00   7.1000000e+00   7.5000000e+00   1.0100000e+01   8.3000000e+00   7.6000000e+00   6.6000000e+00   8.3000000e+00   8.6000000e+00   8.2000000e+00   7.1000000e+00   8.8000000e+00   8.8000000e+00   8.2000000e+00   7.7000000e+00   7.7000000e+00   7.9000000e+00   6.8000000e+00   8.0000000e-01   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   4.3000000e+00   6.3000000e+00   4.7000000e+00   4.8000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.4000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   4.3000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.8000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   6.1000000e+00   9.2000000e+00   8.5000000e+00   8.9000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   9.7000000e+00   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   9.4000000e+00   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.6000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.2000000e+00   6.5000000e+00   6.6000000e+00   5.9000000e+00   6.9000000e+00   5.2000000e+00   6.5000000e+00   5.4000000e+00   6.0000000e+00   3.7000000e+00   6.3000000e+00   4.5000000e+00   4.2000000e+00   5.3000000e+00   5.5000000e+00   6.0000000e+00   4.3000000e+00   6.1000000e+00   5.3000000e+00   4.9000000e+00   6.7000000e+00   4.8000000e+00   6.0000000e+00   5.3000000e+00   6.9000000e+00   5.9000000e+00   5.8000000e+00   6.1000000e+00   6.9000000e+00   7.1000000e+00   5.8000000e+00   4.3000000e+00   4.7000000e+00   4.5000000e+00   4.9000000e+00   6.7000000e+00   5.1000000e+00   5.6000000e+00   6.5000000e+00   6.4000000e+00   4.7000000e+00   5.0000000e+00   5.2000000e+00   5.8000000e+00   5.1000000e+00   3.7000000e+00   5.1000000e+00   4.8000000e+00   5.0000000e+00   5.6000000e+00   3.4000000e+00   5.0000000e+00   8.2000000e+00   6.8000000e+00   8.8000000e+00   7.5000000e+00   8.2000000e+00   1.0000000e+01   5.5000000e+00   9.2000000e+00   8.5000000e+00   9.5000000e+00   7.1000000e+00   7.6000000e+00   8.1000000e+00   6.9000000e+00   7.2000000e+00   7.5000000e+00   7.5000000e+00   1.0500000e+01   1.1000000e+01   7.0000000e+00   8.4000000e+00   6.4000000e+00   1.0300000e+01   7.0000000e+00   7.9000000e+00   8.5000000e+00   6.7000000e+00   6.5000000e+00   8.0000000e+00   8.3000000e+00   9.3000000e+00   1.0200000e+01   8.1000000e+00   6.8000000e+00   7.2000000e+00   9.8000000e+00   7.8000000e+00   7.3000000e+00   6.3000000e+00   8.0000000e+00   8.3000000e+00   7.9000000e+00   6.8000000e+00   8.5000000e+00   8.3000000e+00   7.9000000e+00   7.4000000e+00   7.4000000e+00   7.4000000e+00   6.5000000e+00   9.0000000e-01   5.0000000e-01   3.2000000e+00   1.1000000e+00   2.0000000e+00   1.0000000e+00   4.7000000e+00   9.0000000e-01   3.1000000e+00   4.8000000e+00   1.9000000e+00   3.1000000e+00   1.2000000e+00   2.9000000e+00   7.0000000e-01   1.9000000e+00   2.7000000e+00   2.1000000e+00   3.2000000e+00   1.6000000e+00   2.1000000e+00   1.7000000e+00   1.5000000e+00   1.4000000e+00   9.0000000e-01   7.0000000e-01   1.1000000e+00   1.6000000e+00   3.5000000e+00   3.5000000e+00   3.7000000e+00   2.7000000e+00   2.1000000e+00   2.1000000e+00   1.6000000e+00   5.0000000e-01   2.0000000e+00   2.3000000e+00   3.0000000e+00   2.6000000e+00   1.2000000e+00   2.7000000e+00   4.7000000e+00   2.5000000e+00   2.2000000e+00   2.2000000e+00   1.6000000e+00   4.6000000e+00   2.4000000e+00   3.2000000e+00   2.6000000e+00   2.2000000e+00   2.3000000e+00   2.6000000e+00   3.4000000e+00   3.3000000e+00   2.6000000e+00   2.5000000e+00   3.1000000e+00   1.5000000e+00   2.2000000e+00   1.9000000e+00   2.9000000e+00   3.0000000e+00   2.1000000e+00   1.9000000e+00   4.1000000e+00   4.4000000e+00   2.4000000e+00   2.0000000e+00   2.6000000e+00   3.7000000e+00   1.8000000e+00   2.1000000e+00   1.9000000e+00   1.7000000e+00   1.7000000e+00   2.6000000e+00   1.7000000e+00   2.7000000e+00   3.8000000e+00   2.7000000e+00   1.6000000e+00   2.4000000e+00   3.2000000e+00   2.8000000e+00   1.9000000e+00   1.7000000e+00   1.6000000e+00   2.3000000e+00   1.5000000e+00   2.6000000e+00   2.3000000e+00   2.5000000e+00   1.9000000e+00   2.2000000e+00   1.8000000e+00   2.6000000e+00   2.1000000e+00   1.0000000e+00   2.5000000e+00   6.0000000e-01   1.3000000e+00   5.0000000e-01   4.0000000e+00   8.0000000e-01   2.4000000e+00   4.1000000e+00   1.0000000e+00   2.4000000e+00   9.0000000e-01   2.2000000e+00   6.0000000e-01   1.0000000e+00   2.0000000e+00   1.2000000e+00   2.5000000e+00   1.1000000e+00   1.4000000e+00   1.2000000e+00   1.2000000e+00   7.0000000e-01   6.0000000e-01   1.2000000e+00   1.2000000e+00   7.0000000e-01   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.0000000e+00   1.6000000e+00   1.2000000e+00   7.0000000e-01   6.0000000e-01   1.3000000e+00   1.6000000e+00   2.3000000e+00   1.9000000e+00   7.0000000e-01   2.0000000e+00   4.0000000e+00   1.8000000e+00   1.5000000e+00   1.5000000e+00   9.0000000e-01   3.9000000e+00   1.7000000e+00   2.7000000e+00   2.1000000e+00   2.9000000e+00   1.8000000e+00   2.3000000e+00   4.1000000e+00   2.4000000e+00   3.3000000e+00   2.6000000e+00   3.8000000e+00   1.2000000e+00   1.7000000e+00   2.2000000e+00   2.4000000e+00   2.5000000e+00   1.6000000e+00   1.6000000e+00   4.8000000e+00   5.1000000e+00   1.9000000e+00   2.5000000e+00   2.1000000e+00   4.4000000e+00   1.3000000e+00   2.2000000e+00   2.6000000e+00   1.2000000e+00   1.2000000e+00   2.1000000e+00   2.4000000e+00   3.4000000e+00   4.5000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   3.9000000e+00   2.3000000e+00   1.4000000e+00   1.2000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   2.1000000e+00   2.6000000e+00   2.6000000e+00   2.0000000e+00   1.7000000e+00   1.5000000e+00   2.1000000e+00   1.6000000e+00   3.3000000e+00   1.0000000e+00   2.1000000e+00   1.1000000e+00   4.8000000e+00   1.0000000e+00   3.2000000e+00   4.9000000e+00   1.8000000e+00   3.2000000e+00   1.3000000e+00   3.0000000e+00   8.0000000e-01   1.8000000e+00   2.8000000e+00   2.0000000e+00   3.3000000e+00   1.5000000e+00   2.2000000e+00   1.2000000e+00   1.6000000e+00   1.5000000e+00   1.0000000e+00   6.0000000e-01   6.0000000e-01   1.5000000e+00   3.6000000e+00   3.6000000e+00   3.8000000e+00   2.8000000e+00   1.6000000e+00   2.0000000e+00   1.7000000e+00   4.0000000e-01   2.1000000e+00   2.4000000e+00   3.1000000e+00   2.7000000e+00   1.3000000e+00   2.8000000e+00   4.8000000e+00   2.6000000e+00   2.3000000e+00   2.3000000e+00   1.7000000e+00   4.7000000e+00   2.5000000e+00   2.9000000e+00   2.1000000e+00   1.9000000e+00   1.8000000e+00   2.1000000e+00   3.1000000e+00   3.2000000e+00   2.3000000e+00   2.0000000e+00   3.0000000e+00   1.2000000e+00   1.7000000e+00   1.4000000e+00   2.4000000e+00   2.5000000e+00   1.8000000e+00   1.4000000e+00   4.0000000e+00   4.1000000e+00   1.9000000e+00   1.7000000e+00   2.1000000e+00   3.4000000e+00   1.3000000e+00   1.8000000e+00   1.8000000e+00   1.4000000e+00   1.2000000e+00   2.1000000e+00   1.4000000e+00   2.4000000e+00   3.7000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   2.9000000e+00   2.5000000e+00   1.4000000e+00   1.4000000e+00   1.1000000e+00   1.8000000e+00   1.0000000e+00   2.1000000e+00   2.0000000e+00   2.2000000e+00   1.4000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   1.6000000e+00   2.3000000e+00   1.2000000e+00   2.8000000e+00   1.7000000e+00   2.3000000e+00   9.0000000e-01   1.6000000e+00   1.5000000e+00   9.0000000e-01   2.0000000e+00   1.1000000e+00   2.5000000e+00   1.5000000e+00   1.1000000e+00   1.5000000e+00   6.0000000e-01   2.6000000e+00   1.1000000e+00   2.1000000e+00   1.9000000e+00   1.8000000e+00   2.3000000e+00   2.7000000e+00   3.3000000e+00   1.8000000e+00   1.3000000e+00   5.0000000e-01   7.0000000e-01   9.0000000e-01   2.3000000e+00   1.5000000e+00   2.4000000e+00   2.9000000e+00   1.2000000e+00   9.0000000e-01   2.0000000e-01   8.0000000e-01   2.0000000e+00   7.0000000e-01   1.5000000e+00   7.0000000e-01   1.2000000e+00   1.0000000e+00   1.6000000e+00   1.8000000e+00   8.0000000e-01   5.0000000e+00   2.4000000e+00   5.0000000e+00   3.5000000e+00   4.4000000e+00   6.2000000e+00   1.7000000e+00   5.2000000e+00   3.7000000e+00   6.3000000e+00   3.7000000e+00   3.2000000e+00   4.3000000e+00   2.1000000e+00   3.0000000e+00   4.1000000e+00   3.7000000e+00   7.3000000e+00   6.4000000e+00   1.8000000e+00   5.0000000e+00   2.2000000e+00   6.1000000e+00   2.6000000e+00   4.7000000e+00   5.1000000e+00   2.5000000e+00   2.7000000e+00   3.8000000e+00   4.5000000e+00   5.1000000e+00   7.0000000e+00   3.9000000e+00   2.6000000e+00   2.6000000e+00   6.0000000e+00   4.6000000e+00   3.7000000e+00   2.5000000e+00   4.4000000e+00   4.7000000e+00   4.3000000e+00   2.4000000e+00   5.1000000e+00   5.1000000e+00   4.1000000e+00   2.6000000e+00   3.6000000e+00   4.2000000e+00   2.7000000e+00   1.1000000e+00   9.0000000e-01   3.8000000e+00   4.0000000e-01   2.2000000e+00   3.9000000e+00   1.2000000e+00   2.2000000e+00   7.0000000e-01   2.2000000e+00   8.0000000e-01   1.2000000e+00   1.8000000e+00   1.0000000e+00   2.3000000e+00   1.5000000e+00   1.2000000e+00   8.0000000e-01   8.0000000e-01   7.0000000e-01   6.0000000e-01   6.0000000e-01   1.0000000e+00   7.0000000e-01   2.6000000e+00   2.6000000e+00   2.8000000e+00   1.8000000e+00   1.2000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.1000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   7.0000000e-01   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.7000000e+00   1.5000000e+00   9.0000000e-01   3.7000000e+00   1.5000000e+00   3.1000000e+00   1.7000000e+00   2.7000000e+00   1.6000000e+00   2.1000000e+00   3.9000000e+00   2.2000000e+00   2.9000000e+00   2.0000000e+00   4.0000000e+00   1.4000000e+00   1.3000000e+00   2.0000000e+00   2.0000000e+00   2.1000000e+00   2.0000000e+00   1.4000000e+00   5.0000000e+00   4.5000000e+00   1.5000000e+00   2.7000000e+00   1.7000000e+00   3.8000000e+00   9.0000000e-01   2.4000000e+00   2.8000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   2.2000000e+00   2.8000000e+00   4.7000000e+00   1.8000000e+00   7.0000000e-01   1.7000000e+00   3.7000000e+00   2.7000000e+00   1.6000000e+00   1.2000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   1.7000000e+00   2.8000000e+00   2.8000000e+00   1.8000000e+00   1.3000000e+00   1.3000000e+00   2.5000000e+00   1.6000000e+00   1.6000000e+00   2.7000000e+00   1.1000000e+00   1.3000000e+00   2.8000000e+00   9.0000000e-01   1.7000000e+00   8.0000000e-01   1.1000000e+00   1.5000000e+00   5.0000000e-01   9.0000000e-01   1.3000000e+00   1.2000000e+00   1.4000000e+00   9.0000000e-01   1.5000000e+00   7.0000000e-01   1.0000000e+00   1.3000000e+00   1.5000000e+00   2.1000000e+00   6.0000000e-01   1.5000000e+00   1.5000000e+00   1.7000000e+00   9.0000000e-01   1.3000000e+00   7.0000000e-01   1.2000000e+00   1.7000000e+00   1.2000000e+00   7.0000000e-01   1.0000000e+00   6.0000000e-01   8.0000000e-01   9.0000000e-01   2.7000000e+00   5.0000000e-01   6.0000000e-01   4.0000000e-01   8.0000000e-01   2.6000000e+00   4.0000000e-01   3.8000000e+00   1.4000000e+00   3.8000000e+00   2.3000000e+00   3.2000000e+00   5.0000000e+00   1.5000000e+00   4.0000000e+00   3.1000000e+00   5.1000000e+00   2.5000000e+00   2.2000000e+00   3.1000000e+00   1.5000000e+00   1.8000000e+00   2.9000000e+00   2.5000000e+00   6.1000000e+00   5.6000000e+00   1.6000000e+00   3.8000000e+00   1.2000000e+00   4.9000000e+00   1.6000000e+00   3.5000000e+00   3.9000000e+00   1.3000000e+00   1.5000000e+00   2.6000000e+00   3.3000000e+00   3.9000000e+00   5.8000000e+00   2.7000000e+00   1.4000000e+00   1.8000000e+00   4.8000000e+00   3.4000000e+00   2.5000000e+00   1.3000000e+00   3.2000000e+00   3.5000000e+00   3.1000000e+00   1.4000000e+00   3.9000000e+00   3.9000000e+00   2.9000000e+00   2.0000000e+00   2.4000000e+00   3.0000000e+00   1.5000000e+00   4.3000000e+00   1.1000000e+00   2.7000000e+00   4.4000000e+00   1.3000000e+00   2.7000000e+00   8.0000000e-01   2.5000000e+00   1.1000000e+00   1.3000000e+00   2.3000000e+00   1.5000000e+00   2.8000000e+00   8.0000000e-01   1.7000000e+00   1.1000000e+00   1.1000000e+00   1.2000000e+00   1.1000000e+00   1.3000000e+00   1.1000000e+00   1.0000000e+00   3.1000000e+00   3.1000000e+00   3.3000000e+00   2.3000000e+00   1.3000000e+00   1.5000000e+00   6.0000000e-01   7.0000000e-01   1.6000000e+00   1.9000000e+00   2.6000000e+00   2.2000000e+00   8.0000000e-01   2.3000000e+00   4.3000000e+00   2.1000000e+00   1.8000000e+00   1.8000000e+00   1.2000000e+00   4.2000000e+00   2.0000000e+00   2.2000000e+00   1.8000000e+00   2.8000000e+00   1.5000000e+00   2.2000000e+00   4.0000000e+00   2.5000000e+00   3.2000000e+00   2.5000000e+00   3.5000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   2.1000000e+00   2.2000000e+00   1.5000000e+00   1.5000000e+00   4.5000000e+00   5.0000000e+00   1.8000000e+00   2.4000000e+00   1.8000000e+00   4.3000000e+00   1.0000000e+00   1.9000000e+00   2.5000000e+00   9.0000000e-01   9.0000000e-01   2.0000000e+00   2.3000000e+00   3.3000000e+00   4.2000000e+00   2.1000000e+00   1.0000000e+00   2.0000000e+00   3.8000000e+00   1.8000000e+00   1.3000000e+00   9.0000000e-01   2.0000000e+00   2.3000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   2.3000000e+00   1.9000000e+00   1.4000000e+00   1.4000000e+00   1.6000000e+00   1.3000000e+00   3.8000000e+00   1.6000000e+00   7.0000000e-01   3.0000000e+00   2.0000000e+00   3.5000000e+00   1.8000000e+00   4.0000000e+00   3.0000000e+00   2.0000000e+00   3.2000000e+00   1.5000000e+00   4.1000000e+00   2.6000000e+00   3.6000000e+00   3.2000000e+00   3.3000000e+00   3.8000000e+00   4.2000000e+00   4.8000000e+00   3.3000000e+00   1.2000000e+00   1.2000000e+00   1.0000000e+00   2.0000000e+00   3.8000000e+00   2.8000000e+00   3.9000000e+00   4.4000000e+00   2.9000000e+00   2.4000000e+00   1.7000000e+00   2.1000000e+00   3.5000000e+00   2.0000000e+00   2.0000000e-01   2.2000000e+00   2.5000000e+00   2.5000000e+00   3.1000000e+00   7.0000000e-01   2.3000000e+00   6.5000000e+00   3.9000000e+00   6.5000000e+00   5.0000000e+00   5.9000000e+00   7.7000000e+00   2.0000000e+00   6.7000000e+00   5.2000000e+00   7.8000000e+00   5.2000000e+00   4.7000000e+00   5.8000000e+00   3.6000000e+00   4.5000000e+00   5.6000000e+00   5.2000000e+00   8.8000000e+00   7.9000000e+00   3.5000000e+00   6.5000000e+00   3.7000000e+00   7.6000000e+00   4.1000000e+00   6.2000000e+00   6.6000000e+00   4.0000000e+00   4.2000000e+00   5.3000000e+00   6.0000000e+00   6.6000000e+00   8.5000000e+00   5.4000000e+00   4.1000000e+00   4.1000000e+00   7.5000000e+00   6.1000000e+00   5.2000000e+00   4.0000000e+00   5.9000000e+00   6.2000000e+00   5.8000000e+00   3.9000000e+00   6.6000000e+00   6.6000000e+00   5.6000000e+00   4.1000000e+00   5.1000000e+00   5.7000000e+00   4.2000000e+00   2.4000000e+00   3.9000000e+00   1.4000000e+00   2.2000000e+00   7.0000000e-01   2.0000000e+00   6.0000000e-01   1.4000000e+00   1.8000000e+00   1.4000000e+00   2.3000000e+00   1.7000000e+00   1.2000000e+00   1.2000000e+00   8.0000000e-01   5.0000000e-01   4.0000000e-01   6.0000000e-01   1.0000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   2.8000000e+00   1.8000000e+00   1.6000000e+00   1.6000000e+00   1.5000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.7000000e+00   7.0000000e-01   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.5000000e+00   1.3000000e+00   7.0000000e-01   3.7000000e+00   1.5000000e+00   3.3000000e+00   2.1000000e+00   2.7000000e+00   1.8000000e+00   2.3000000e+00   3.9000000e+00   2.6000000e+00   2.9000000e+00   2.2000000e+00   4.0000000e+00   1.6000000e+00   1.7000000e+00   2.0000000e+00   2.4000000e+00   2.5000000e+00   2.2000000e+00   1.6000000e+00   5.0000000e+00   4.7000000e+00   1.9000000e+00   2.7000000e+00   2.1000000e+00   4.0000000e+00   1.3000000e+00   2.4000000e+00   2.8000000e+00   1.2000000e+00   1.4000000e+00   2.1000000e+00   2.2000000e+00   3.0000000e+00   4.7000000e+00   2.2000000e+00   1.1000000e+00   1.9000000e+00   3.7000000e+00   2.9000000e+00   1.8000000e+00   1.4000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   2.1000000e+00   2.8000000e+00   2.8000000e+00   1.8000000e+00   1.7000000e+00   1.5000000e+00   2.7000000e+00   1.8000000e+00   1.7000000e+00   1.4000000e+00   1.8000000e+00   1.9000000e+00   1.0000000e+00   2.4000000e+00   1.4000000e+00   1.2000000e+00   2.2000000e+00   9.0000000e-01   2.5000000e+00   1.2000000e+00   2.4000000e+00   2.0000000e+00   1.9000000e+00   2.2000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   1.4000000e+00   1.0000000e+00   1.2000000e+00   8.0000000e-01   2.2000000e+00   1.2000000e+00   2.3000000e+00   2.8000000e+00   2.1000000e+00   1.0000000e+00   7.0000000e-01   1.1000000e+00   1.9000000e+00   1.0000000e+00   1.6000000e+00   8.0000000e-01   1.3000000e+00   1.1000000e+00   1.7000000e+00   1.5000000e+00   9.0000000e-01   4.9000000e+00   2.3000000e+00   4.9000000e+00   3.4000000e+00   4.3000000e+00   6.1000000e+00   1.4000000e+00   5.1000000e+00   4.0000000e+00   6.2000000e+00   3.6000000e+00   3.1000000e+00   4.2000000e+00   2.4000000e+00   2.9000000e+00   4.0000000e+00   3.6000000e+00   7.2000000e+00   6.5000000e+00   2.5000000e+00   4.9000000e+00   2.1000000e+00   6.0000000e+00   2.5000000e+00   4.6000000e+00   5.0000000e+00   2.4000000e+00   2.6000000e+00   3.7000000e+00   4.4000000e+00   5.0000000e+00   6.9000000e+00   3.8000000e+00   2.5000000e+00   2.7000000e+00   5.9000000e+00   4.5000000e+00   3.6000000e+00   2.4000000e+00   4.3000000e+00   4.6000000e+00   4.2000000e+00   2.3000000e+00   5.0000000e+00   5.0000000e+00   4.0000000e+00   2.9000000e+00   3.5000000e+00   4.1000000e+00   2.6000000e+00   3.1000000e+00   1.7000000e+00   3.6000000e+00   1.9000000e+00   4.1000000e+00   3.1000000e+00   2.1000000e+00   2.9000000e+00   1.6000000e+00   4.2000000e+00   2.7000000e+00   3.7000000e+00   3.3000000e+00   3.4000000e+00   3.9000000e+00   4.3000000e+00   4.9000000e+00   3.4000000e+00   1.3000000e+00   1.3000000e+00   1.1000000e+00   2.1000000e+00   3.9000000e+00   2.9000000e+00   4.0000000e+00   4.5000000e+00   2.8000000e+00   2.5000000e+00   1.8000000e+00   2.2000000e+00   3.6000000e+00   2.1000000e+00   5.0000000e-01   2.3000000e+00   2.6000000e+00   2.6000000e+00   3.2000000e+00   1.2000000e+00   2.4000000e+00   6.6000000e+00   4.0000000e+00   6.6000000e+00   5.1000000e+00   6.0000000e+00   7.8000000e+00   2.3000000e+00   6.8000000e+00   5.3000000e+00   7.9000000e+00   5.3000000e+00   4.8000000e+00   5.9000000e+00   3.7000000e+00   4.6000000e+00   5.7000000e+00   5.3000000e+00   8.9000000e+00   8.0000000e+00   3.2000000e+00   6.6000000e+00   3.8000000e+00   7.7000000e+00   4.2000000e+00   6.3000000e+00   6.7000000e+00   4.1000000e+00   4.3000000e+00   5.4000000e+00   6.1000000e+00   6.7000000e+00   8.6000000e+00   5.5000000e+00   4.2000000e+00   4.2000000e+00   7.6000000e+00   6.2000000e+00   5.3000000e+00   4.1000000e+00   6.0000000e+00   6.3000000e+00   5.9000000e+00   4.0000000e+00   6.7000000e+00   6.7000000e+00   5.7000000e+00   4.2000000e+00   5.2000000e+00   5.8000000e+00   4.3000000e+00   1.6000000e+00   9.0000000e-01   1.2000000e+00   1.2000000e+00   6.0000000e-01   1.0000000e+00   1.4000000e+00   1.5000000e+00   1.1000000e+00   8.0000000e-01   1.6000000e+00   1.2000000e+00   9.0000000e-01   1.0000000e+00   1.8000000e+00   1.8000000e+00   5.0000000e-01   1.8000000e+00   1.8000000e+00   2.0000000e+00   1.0000000e+00   1.4000000e+00   8.0000000e-01   9.0000000e-01   1.4000000e+00   1.5000000e+00   6.0000000e-01   1.3000000e+00   1.3000000e+00   7.0000000e-01   1.0000000e+00   3.0000000e+00   8.0000000e-01   5.0000000e-01   5.0000000e-01   7.0000000e-01   2.9000000e+00   7.0000000e-01   3.5000000e+00   1.7000000e+00   3.5000000e+00   2.2000000e+00   2.9000000e+00   4.7000000e+00   2.0000000e+00   3.9000000e+00   3.2000000e+00   4.8000000e+00   2.2000000e+00   2.3000000e+00   2.8000000e+00   2.0000000e+00   2.1000000e+00   2.6000000e+00   2.2000000e+00   5.8000000e+00   5.7000000e+00   1.7000000e+00   3.5000000e+00   1.7000000e+00   5.0000000e+00   1.7000000e+00   3.2000000e+00   3.6000000e+00   1.4000000e+00   1.2000000e+00   2.7000000e+00   3.0000000e+00   4.0000000e+00   5.5000000e+00   2.8000000e+00   1.5000000e+00   2.1000000e+00   4.5000000e+00   3.1000000e+00   2.2000000e+00   1.0000000e+00   2.9000000e+00   3.2000000e+00   2.8000000e+00   1.7000000e+00   3.6000000e+00   3.6000000e+00   2.6000000e+00   2.1000000e+00   2.1000000e+00   2.7000000e+00   1.2000000e+00   1.9000000e+00   1.8000000e+00   2.4000000e+00   2.2000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   2.7000000e+00   1.0000000e+00   2.0000000e+00   1.6000000e+00   1.7000000e+00   2.2000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   1.2000000e+00   1.0000000e+00   1.0000000e+00   1.0000000e+00   2.2000000e+00   2.4000000e+00   2.3000000e+00   2.8000000e+00   1.1000000e+00   1.6000000e+00   1.1000000e+00   1.5000000e+00   1.9000000e+00   8.0000000e-01   1.8000000e+00   1.4000000e+00   1.5000000e+00   1.5000000e+00   1.5000000e+00   2.3000000e+00   1.3000000e+00   4.9000000e+00   2.7000000e+00   4.9000000e+00   3.4000000e+00   4.3000000e+00   6.1000000e+00   2.6000000e+00   5.1000000e+00   3.6000000e+00   6.2000000e+00   3.6000000e+00   3.1000000e+00   4.2000000e+00   2.6000000e+00   3.3000000e+00   4.0000000e+00   3.6000000e+00   7.2000000e+00   6.3000000e+00   1.5000000e+00   4.9000000e+00   2.9000000e+00   6.0000000e+00   2.5000000e+00   4.6000000e+00   5.0000000e+00   2.4000000e+00   2.6000000e+00   3.7000000e+00   4.4000000e+00   5.0000000e+00   6.9000000e+00   3.8000000e+00   2.5000000e+00   2.5000000e+00   5.9000000e+00   4.5000000e+00   3.6000000e+00   2.4000000e+00   4.3000000e+00   4.6000000e+00   4.2000000e+00   2.7000000e+00   5.0000000e+00   5.0000000e+00   4.0000000e+00   2.5000000e+00   3.5000000e+00   4.1000000e+00   2.8000000e+00   1.7000000e+00   1.1000000e+00   9.0000000e-01   1.5000000e+00   1.1000000e+00   2.0000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   3.0000000e-01   8.0000000e-01   9.0000000e-01   9.0000000e-01   1.3000000e+00   4.0000000e-01   2.3000000e+00   2.3000000e+00   2.5000000e+00   1.5000000e+00   9.0000000e-01   1.1000000e+00   1.0000000e+00   9.0000000e-01   1.2000000e+00   1.3000000e+00   1.8000000e+00   1.4000000e+00   2.0000000e-01   1.5000000e+00   3.5000000e+00   1.3000000e+00   1.2000000e+00   1.0000000e+00   6.0000000e-01   3.4000000e+00   1.2000000e+00   3.0000000e+00   1.4000000e+00   3.0000000e+00   1.5000000e+00   2.4000000e+00   4.2000000e+00   2.1000000e+00   3.2000000e+00   2.5000000e+00   4.3000000e+00   1.7000000e+00   1.6000000e+00   2.3000000e+00   1.7000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   5.3000000e+00   5.0000000e+00   1.2000000e+00   3.0000000e+00   1.4000000e+00   4.3000000e+00   1.0000000e+00   2.7000000e+00   3.1000000e+00   7.0000000e-01   7.0000000e-01   2.0000000e+00   2.5000000e+00   3.3000000e+00   5.0000000e+00   2.1000000e+00   8.0000000e-01   1.2000000e+00   4.0000000e+00   2.6000000e+00   1.7000000e+00   7.0000000e-01   2.4000000e+00   2.7000000e+00   2.3000000e+00   1.4000000e+00   3.1000000e+00   3.1000000e+00   2.1000000e+00   1.4000000e+00   1.6000000e+00   2.2000000e+00   1.1000000e+00   2.2000000e+00   1.2000000e+00   1.2000000e+00   2.4000000e+00   9.0000000e-01   2.3000000e+00   1.0000000e+00   2.6000000e+00   1.8000000e+00   1.5000000e+00   2.0000000e+00   2.6000000e+00   3.0000000e+00   1.5000000e+00   8.0000000e-01   1.0000000e+00   1.0000000e+00   8.0000000e-01   2.4000000e+00   1.4000000e+00   2.1000000e+00   2.6000000e+00   2.1000000e+00   6.0000000e-01   9.0000000e-01   1.3000000e+00   1.7000000e+00   1.0000000e+00   1.8000000e+00   8.0000000e-01   9.0000000e-01   7.0000000e-01   1.3000000e+00   1.7000000e+00   7.0000000e-01   4.7000000e+00   2.5000000e+00   4.7000000e+00   3.2000000e+00   4.1000000e+00   5.9000000e+00   2.4000000e+00   4.9000000e+00   4.2000000e+00   6.0000000e+00   3.4000000e+00   3.3000000e+00   4.0000000e+00   2.6000000e+00   2.9000000e+00   3.8000000e+00   3.4000000e+00   7.0000000e+00   6.7000000e+00   2.7000000e+00   4.7000000e+00   2.1000000e+00   6.0000000e+00   2.7000000e+00   4.4000000e+00   4.8000000e+00   2.4000000e+00   2.4000000e+00   3.7000000e+00   4.2000000e+00   5.0000000e+00   6.7000000e+00   3.8000000e+00   2.5000000e+00   2.9000000e+00   5.7000000e+00   4.3000000e+00   3.4000000e+00   2.2000000e+00   4.1000000e+00   4.4000000e+00   4.0000000e+00   2.5000000e+00   4.8000000e+00   4.8000000e+00   3.8000000e+00   3.1000000e+00   3.3000000e+00   3.9000000e+00   2.4000000e+00   1.4000000e+00   2.0000000e+00   1.6000000e+00   2.5000000e+00   1.7000000e+00   1.4000000e+00   1.6000000e+00   1.4000000e+00   7.0000000e-01   2.0000000e-01   8.0000000e-01   1.0000000e+00   1.1000000e+00   2.8000000e+00   2.8000000e+00   3.0000000e+00   2.0000000e+00   2.0000000e+00   1.6000000e+00   1.3000000e+00   4.0000000e-01   1.3000000e+00   1.6000000e+00   2.3000000e+00   1.9000000e+00   9.0000000e-01   2.0000000e+00   4.0000000e+00   1.8000000e+00   1.5000000e+00   1.5000000e+00   9.0000000e-01   3.9000000e+00   1.7000000e+00   3.3000000e+00   2.5000000e+00   2.7000000e+00   2.2000000e+00   2.5000000e+00   3.9000000e+00   2.8000000e+00   3.1000000e+00   2.4000000e+00   3.8000000e+00   1.6000000e+00   2.1000000e+00   2.0000000e+00   2.8000000e+00   2.9000000e+00   2.2000000e+00   1.8000000e+00   4.8000000e+00   4.9000000e+00   2.3000000e+00   2.5000000e+00   2.5000000e+00   4.2000000e+00   1.7000000e+00   2.2000000e+00   2.6000000e+00   1.6000000e+00   1.6000000e+00   2.5000000e+00   2.2000000e+00   3.2000000e+00   4.5000000e+00   2.6000000e+00   1.5000000e+00   2.3000000e+00   3.7000000e+00   2.9000000e+00   1.8000000e+00   1.6000000e+00   1.9000000e+00   2.2000000e+00   1.8000000e+00   2.5000000e+00   2.6000000e+00   2.6000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   2.7000000e+00   2.0000000e+00   1.4000000e+00   1.4000000e+00   1.5000000e+00   1.1000000e+00   1.4000000e+00   1.6000000e+00   1.2000000e+00   1.3000000e+00   1.2000000e+00   1.8000000e+00   1.8000000e+00   5.0000000e-01   2.0000000e+00   1.8000000e+00   2.0000000e+00   1.4000000e+00   1.4000000e+00   2.0000000e-01   9.0000000e-01   1.4000000e+00   1.7000000e+00   6.0000000e-01   1.3000000e+00   9.0000000e-01   7.0000000e-01   1.4000000e+00   3.0000000e+00   8.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   2.9000000e+00   9.0000000e-01   3.5000000e+00   1.5000000e+00   3.5000000e+00   2.2000000e+00   2.9000000e+00   4.7000000e+00   1.4000000e+00   3.9000000e+00   3.2000000e+00   4.8000000e+00   2.2000000e+00   2.3000000e+00   2.8000000e+00   1.6000000e+00   1.9000000e+00   2.6000000e+00   2.2000000e+00   5.8000000e+00   5.7000000e+00   1.7000000e+00   3.5000000e+00   1.1000000e+00   5.0000000e+00   1.7000000e+00   3.2000000e+00   3.6000000e+00   1.4000000e+00   1.2000000e+00   2.7000000e+00   3.0000000e+00   4.0000000e+00   5.5000000e+00   2.8000000e+00   1.5000000e+00   2.1000000e+00   4.5000000e+00   3.1000000e+00   2.2000000e+00   1.0000000e+00   2.9000000e+00   3.2000000e+00   2.8000000e+00   1.5000000e+00   3.6000000e+00   3.6000000e+00   2.6000000e+00   2.1000000e+00   2.1000000e+00   2.7000000e+00   1.2000000e+00   1.8000000e+00   7.0000000e-01   2.1000000e+00   8.0000000e-01   2.0000000e+00   1.2000000e+00   1.3000000e+00   1.8000000e+00   2.2000000e+00   2.8000000e+00   1.3000000e+00   8.0000000e-01   1.0000000e+00   1.0000000e+00   4.0000000e-01   1.8000000e+00   1.6000000e+00   1.9000000e+00   2.4000000e+00   1.5000000e+00   8.0000000e-01   9.0000000e-01   9.0000000e-01   1.5000000e+00   4.0000000e-01   2.0000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   2.1000000e+00   5.0000000e-01   4.5000000e+00   1.9000000e+00   4.5000000e+00   3.0000000e+00   3.9000000e+00   5.7000000e+00   2.2000000e+00   4.7000000e+00   3.6000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   2.2000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   6.1000000e+00   2.1000000e+00   4.5000000e+00   2.1000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.0000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.1000000e+00   2.3000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.5000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   1.9000000e+00   1.9000000e+00   1.4000000e+00   8.0000000e-01   1.2000000e+00   1.3000000e+00   1.4000000e+00   1.6000000e+00   2.0000000e+00   9.0000000e-01   2.4000000e+00   2.0000000e+00   2.2000000e+00   1.8000000e+00   1.4000000e+00   1.6000000e+00   1.5000000e+00   1.6000000e+00   5.0000000e-01   2.0000000e+00   1.7000000e+00   1.5000000e+00   1.1000000e+00   1.6000000e+00   3.0000000e+00   1.6000000e+00   1.9000000e+00   1.7000000e+00   1.1000000e+00   3.3000000e+00   1.7000000e+00   3.7000000e+00   1.9000000e+00   3.7000000e+00   2.2000000e+00   3.1000000e+00   4.9000000e+00   1.8000000e+00   3.9000000e+00   2.4000000e+00   5.0000000e+00   2.4000000e+00   1.9000000e+00   3.0000000e+00   1.8000000e+00   2.5000000e+00   2.8000000e+00   2.4000000e+00   6.0000000e+00   5.1000000e+00   7.0000000e-01   3.7000000e+00   2.1000000e+00   4.8000000e+00   1.3000000e+00   3.4000000e+00   3.8000000e+00   1.2000000e+00   1.6000000e+00   2.5000000e+00   3.2000000e+00   3.8000000e+00   5.7000000e+00   2.6000000e+00   1.3000000e+00   1.7000000e+00   4.7000000e+00   3.3000000e+00   2.4000000e+00   1.6000000e+00   3.1000000e+00   3.4000000e+00   3.0000000e+00   1.9000000e+00   3.8000000e+00   3.8000000e+00   2.8000000e+00   1.3000000e+00   2.3000000e+00   2.9000000e+00   2.0000000e+00   2.6000000e+00   1.1000000e+00   2.1000000e+00   1.7000000e+00   1.8000000e+00   2.3000000e+00   2.7000000e+00   3.3000000e+00   1.8000000e+00   7.0000000e-01   3.0000000e-01   5.0000000e-01   5.0000000e-01   2.3000000e+00   1.7000000e+00   2.4000000e+00   2.9000000e+00   1.6000000e+00   9.0000000e-01   4.0000000e-01   8.0000000e-01   2.0000000e+00   5.0000000e-01   1.5000000e+00   7.0000000e-01   1.0000000e+00   1.0000000e+00   1.6000000e+00   1.4000000e+00   8.0000000e-01   5.0000000e+00   2.4000000e+00   5.0000000e+00   3.5000000e+00   4.4000000e+00   6.2000000e+00   1.9000000e+00   5.2000000e+00   3.7000000e+00   6.3000000e+00   3.7000000e+00   3.2000000e+00   4.3000000e+00   2.1000000e+00   3.0000000e+00   4.1000000e+00   3.7000000e+00   7.3000000e+00   6.4000000e+00   2.2000000e+00   5.0000000e+00   2.2000000e+00   6.1000000e+00   2.6000000e+00   4.7000000e+00   5.1000000e+00   2.5000000e+00   2.7000000e+00   3.8000000e+00   4.5000000e+00   5.1000000e+00   7.0000000e+00   3.9000000e+00   2.6000000e+00   2.6000000e+00   6.0000000e+00   4.6000000e+00   3.7000000e+00   2.5000000e+00   4.4000000e+00   4.7000000e+00   4.3000000e+00   2.4000000e+00   5.1000000e+00   5.1000000e+00   4.1000000e+00   2.6000000e+00   3.6000000e+00   4.2000000e+00   2.7000000e+00   1.9000000e+00   1.5000000e+00   1.3000000e+00   1.8000000e+00   1.7000000e+00   1.7000000e+00   1.3000000e+00   1.0000000e+00   2.9000000e+00   2.9000000e+00   3.1000000e+00   2.1000000e+00   1.1000000e+00   1.3000000e+00   8.0000000e-01   1.3000000e+00   2.2000000e+00   1.7000000e+00   2.4000000e+00   2.0000000e+00   1.0000000e+00   2.1000000e+00   4.1000000e+00   1.9000000e+00   1.6000000e+00   1.6000000e+00   1.6000000e+00   4.0000000e+00   1.8000000e+00   2.4000000e+00   1.0000000e+00   2.8000000e+00   1.5000000e+00   2.2000000e+00   4.0000000e+00   2.1000000e+00   3.2000000e+00   2.5000000e+00   3.7000000e+00   1.1000000e+00   1.6000000e+00   2.1000000e+00   1.3000000e+00   1.4000000e+00   1.5000000e+00   1.5000000e+00   4.7000000e+00   5.0000000e+00   1.6000000e+00   2.4000000e+00   1.0000000e+00   4.3000000e+00   1.0000000e+00   2.1000000e+00   2.5000000e+00   7.0000000e-01   5.0000000e-01   2.0000000e+00   2.7000000e+00   3.3000000e+00   4.4000000e+00   2.1000000e+00   1.4000000e+00   2.0000000e+00   3.8000000e+00   2.0000000e+00   1.3000000e+00   3.0000000e-01   2.0000000e+00   2.3000000e+00   1.9000000e+00   1.0000000e+00   2.5000000e+00   2.5000000e+00   1.9000000e+00   1.4000000e+00   1.4000000e+00   1.6000000e+00   5.0000000e-01   1.6000000e+00   8.0000000e-01   7.0000000e-01   1.2000000e+00   1.6000000e+00   2.2000000e+00   9.0000000e-01   1.4000000e+00   1.4000000e+00   1.6000000e+00   6.0000000e-01   1.6000000e+00   1.6000000e+00   1.5000000e+00   1.8000000e+00   1.1000000e+00   8.0000000e-01   9.0000000e-01   1.3000000e+00   9.0000000e-01   6.0000000e-01   2.6000000e+00   8.0000000e-01   9.0000000e-01   7.0000000e-01   5.0000000e-01   2.5000000e+00   5.0000000e-01   3.9000000e+00   2.1000000e+00   3.9000000e+00   2.4000000e+00   3.3000000e+00   5.1000000e+00   2.4000000e+00   4.1000000e+00   3.2000000e+00   5.2000000e+00   2.6000000e+00   2.3000000e+00   3.2000000e+00   2.4000000e+00   2.5000000e+00   3.0000000e+00   2.6000000e+00   6.2000000e+00   5.7000000e+00   1.9000000e+00   3.9000000e+00   2.1000000e+00   5.0000000e+00   1.7000000e+00   3.6000000e+00   4.0000000e+00   1.4000000e+00   1.6000000e+00   2.7000000e+00   3.4000000e+00   4.0000000e+00   5.9000000e+00   2.8000000e+00   1.5000000e+00   1.9000000e+00   4.9000000e+00   3.5000000e+00   2.6000000e+00   1.6000000e+00   3.3000000e+00   3.6000000e+00   3.2000000e+00   2.1000000e+00   4.0000000e+00   4.0000000e+00   3.0000000e+00   2.1000000e+00   2.5000000e+00   3.1000000e+00   2.0000000e+00   1.0000000e+00   1.3000000e+00   1.4000000e+00   1.0000000e+00   1.2000000e+00   1.1000000e+00   2.6000000e+00   2.4000000e+00   2.6000000e+00   2.0000000e+00   8.0000000e-01   1.8000000e+00   1.7000000e+00   1.2000000e+00   9.0000000e-01   2.2000000e+00   1.9000000e+00   1.7000000e+00   1.1000000e+00   1.8000000e+00   3.6000000e+00   1.8000000e+00   2.1000000e+00   1.9000000e+00   1.3000000e+00   3.5000000e+00   1.9000000e+00   2.9000000e+00   1.3000000e+00   2.9000000e+00   1.4000000e+00   2.3000000e+00   4.1000000e+00   2.0000000e+00   3.1000000e+00   1.6000000e+00   4.2000000e+00   1.6000000e+00   1.1000000e+00   2.2000000e+00   1.2000000e+00   1.9000000e+00   2.0000000e+00   1.6000000e+00   5.2000000e+00   4.3000000e+00   7.0000000e-01   2.9000000e+00   1.5000000e+00   4.0000000e+00   5.0000000e-01   2.6000000e+00   3.0000000e+00   8.0000000e-01   1.0000000e+00   1.7000000e+00   2.4000000e+00   3.0000000e+00   4.9000000e+00   1.8000000e+00   5.0000000e-01   1.1000000e+00   3.9000000e+00   2.5000000e+00   1.6000000e+00   1.2000000e+00   2.3000000e+00   2.6000000e+00   2.2000000e+00   1.3000000e+00   3.0000000e+00   3.0000000e+00   2.0000000e+00   5.0000000e-01   1.5000000e+00   2.3000000e+00   1.4000000e+00   9.0000000e-01   1.2000000e+00   1.0000000e+00   1.6000000e+00   7.0000000e-01   2.0000000e+00   2.0000000e+00   2.2000000e+00   1.2000000e+00   1.0000000e+00   1.4000000e+00   1.3000000e+00   1.2000000e+00   1.1000000e+00   1.4000000e+00   1.7000000e+00   1.1000000e+00   5.0000000e-01   1.2000000e+00   3.2000000e+00   1.2000000e+00   1.1000000e+00   1.1000000e+00   7.0000000e-01   3.1000000e+00   1.1000000e+00   3.3000000e+00   1.5000000e+00   3.3000000e+00   1.8000000e+00   2.7000000e+00   4.5000000e+00   2.2000000e+00   3.5000000e+00   2.6000000e+00   4.6000000e+00   2.0000000e+00   1.7000000e+00   2.6000000e+00   1.8000000e+00   1.9000000e+00   2.4000000e+00   2.0000000e+00   5.6000000e+00   5.1000000e+00   1.3000000e+00   3.3000000e+00   1.5000000e+00   4.4000000e+00   1.1000000e+00   3.0000000e+00   3.4000000e+00   8.0000000e-01   1.0000000e+00   2.1000000e+00   2.8000000e+00   3.4000000e+00   5.3000000e+00   2.2000000e+00   9.0000000e-01   1.3000000e+00   4.3000000e+00   2.9000000e+00   2.0000000e+00   1.0000000e+00   2.7000000e+00   3.0000000e+00   2.6000000e+00   1.5000000e+00   3.4000000e+00   3.4000000e+00   2.4000000e+00   1.5000000e+00   1.9000000e+00   2.5000000e+00   1.4000000e+00   5.0000000e-01   1.1000000e+00   1.5000000e+00   8.0000000e-01   2.1000000e+00   2.1000000e+00   2.3000000e+00   1.3000000e+00   1.7000000e+00   1.5000000e+00   1.4000000e+00   1.1000000e+00   8.0000000e-01   1.1000000e+00   1.6000000e+00   1.4000000e+00   8.0000000e-01   1.3000000e+00   3.3000000e+00   1.1000000e+00   1.0000000e+00   8.0000000e-01   2.0000000e-01   3.2000000e+00   1.0000000e+00   3.4000000e+00   2.2000000e+00   3.2000000e+00   1.9000000e+00   2.6000000e+00   4.4000000e+00   2.5000000e+00   3.4000000e+00   2.7000000e+00   4.5000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   2.5000000e+00   2.6000000e+00   2.3000000e+00   1.9000000e+00   5.5000000e+00   5.2000000e+00   2.0000000e+00   3.2000000e+00   2.2000000e+00   4.5000000e+00   1.4000000e+00   2.9000000e+00   3.3000000e+00   1.3000000e+00   1.5000000e+00   2.2000000e+00   2.7000000e+00   3.5000000e+00   5.2000000e+00   2.3000000e+00   1.2000000e+00   2.0000000e+00   4.2000000e+00   3.0000000e+00   1.9000000e+00   1.5000000e+00   2.6000000e+00   2.9000000e+00   2.5000000e+00   2.2000000e+00   3.3000000e+00   3.3000000e+00   2.3000000e+00   1.8000000e+00   1.8000000e+00   2.8000000e+00   1.9000000e+00   8.0000000e-01   1.0000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   2.8000000e+00   1.8000000e+00   1.8000000e+00   1.4000000e+00   1.3000000e+00   6.0000000e-01   1.1000000e+00   1.4000000e+00   2.1000000e+00   1.7000000e+00   7.0000000e-01   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.3000000e+00   1.3000000e+00   7.0000000e-01   3.7000000e+00   1.5000000e+00   3.3000000e+00   2.3000000e+00   2.7000000e+00   2.0000000e+00   2.3000000e+00   3.9000000e+00   2.6000000e+00   3.1000000e+00   2.4000000e+00   4.0000000e+00   1.6000000e+00   1.9000000e+00   2.0000000e+00   2.6000000e+00   2.7000000e+00   2.2000000e+00   1.6000000e+00   5.0000000e+00   4.9000000e+00   2.1000000e+00   2.7000000e+00   2.3000000e+00   4.2000000e+00   1.5000000e+00   2.4000000e+00   2.8000000e+00   1.4000000e+00   1.4000000e+00   2.3000000e+00   2.2000000e+00   3.2000000e+00   4.7000000e+00   2.4000000e+00   1.3000000e+00   2.1000000e+00   3.7000000e+00   2.9000000e+00   1.8000000e+00   1.4000000e+00   2.1000000e+00   2.4000000e+00   2.0000000e+00   2.3000000e+00   2.8000000e+00   2.8000000e+00   1.8000000e+00   1.9000000e+00   1.5000000e+00   2.7000000e+00   1.8000000e+00   8.0000000e-01   1.3000000e+00   3.0000000e+00   3.0000000e+00   3.2000000e+00   2.2000000e+00   1.4000000e+00   2.0000000e+00   1.9000000e+00   6.0000000e-01   1.5000000e+00   2.2000000e+00   2.5000000e+00   2.1000000e+00   1.1000000e+00   2.2000000e+00   4.2000000e+00   2.0000000e+00   2.1000000e+00   1.9000000e+00   1.3000000e+00   4.1000000e+00   1.9000000e+00   3.3000000e+00   1.9000000e+00   2.3000000e+00   1.8000000e+00   2.3000000e+00   3.5000000e+00   2.8000000e+00   2.5000000e+00   1.8000000e+00   3.6000000e+00   1.6000000e+00   1.5000000e+00   1.6000000e+00   2.2000000e+00   2.3000000e+00   2.2000000e+00   1.6000000e+00   4.6000000e+00   4.1000000e+00   1.7000000e+00   2.3000000e+00   1.9000000e+00   3.4000000e+00   1.1000000e+00   2.2000000e+00   2.4000000e+00   1.0000000e+00   1.4000000e+00   1.9000000e+00   1.8000000e+00   2.4000000e+00   4.3000000e+00   2.0000000e+00   9.0000000e-01   1.7000000e+00   3.3000000e+00   2.9000000e+00   1.8000000e+00   1.4000000e+00   1.7000000e+00   2.2000000e+00   1.6000000e+00   1.9000000e+00   2.4000000e+00   2.6000000e+00   1.6000000e+00   1.5000000e+00   1.5000000e+00   2.7000000e+00   1.8000000e+00   1.5000000e+00   3.6000000e+00   3.6000000e+00   3.8000000e+00   2.8000000e+00   1.2000000e+00   2.0000000e+00   1.7000000e+00   6.0000000e-01   2.1000000e+00   2.4000000e+00   3.1000000e+00   2.7000000e+00   1.3000000e+00   2.8000000e+00   4.8000000e+00   2.6000000e+00   2.3000000e+00   2.3000000e+00   1.7000000e+00   4.7000000e+00   2.5000000e+00   2.5000000e+00   1.5000000e+00   1.7000000e+00   1.2000000e+00   1.5000000e+00   2.9000000e+00   2.8000000e+00   2.1000000e+00   1.4000000e+00   3.0000000e+00   8.0000000e-01   1.1000000e+00   1.0000000e+00   1.8000000e+00   1.9000000e+00   1.4000000e+00   8.0000000e-01   4.0000000e+00   3.9000000e+00   1.7000000e+00   1.7000000e+00   1.7000000e+00   3.2000000e+00   9.0000000e-01   1.4000000e+00   1.8000000e+00   1.0000000e+00   8.0000000e-01   1.5000000e+00   1.4000000e+00   2.2000000e+00   3.7000000e+00   1.6000000e+00   9.0000000e-01   1.9000000e+00   2.7000000e+00   2.1000000e+00   1.0000000e+00   1.0000000e+00   1.1000000e+00   1.4000000e+00   1.0000000e+00   1.5000000e+00   1.8000000e+00   1.8000000e+00   8.0000000e-01   1.1000000e+00   7.0000000e-01   1.9000000e+00   1.0000000e+00   2.1000000e+00   2.1000000e+00   2.3000000e+00   1.3000000e+00   9.0000000e-01   7.0000000e-01   6.0000000e-01   1.1000000e+00   1.2000000e+00   1.1000000e+00   1.6000000e+00   1.2000000e+00   4.0000000e-01   1.3000000e+00   3.3000000e+00   1.1000000e+00   1.0000000e+00   8.0000000e-01   6.0000000e-01   3.2000000e+00   1.0000000e+00   3.2000000e+00   1.4000000e+00   3.2000000e+00   1.7000000e+00   2.6000000e+00   4.4000000e+00   1.7000000e+00   3.4000000e+00   2.7000000e+00   4.5000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   1.7000000e+00   1.8000000e+00   2.3000000e+00   1.9000000e+00   5.5000000e+00   5.2000000e+00   1.2000000e+00   3.2000000e+00   1.4000000e+00   4.5000000e+00   1.2000000e+00   2.9000000e+00   3.3000000e+00   9.0000000e-01   9.0000000e-01   2.2000000e+00   2.7000000e+00   3.5000000e+00   5.2000000e+00   2.3000000e+00   1.0000000e+00   1.6000000e+00   4.2000000e+00   2.8000000e+00   1.9000000e+00   7.0000000e-01   2.6000000e+00   2.9000000e+00   2.5000000e+00   1.4000000e+00   3.3000000e+00   3.3000000e+00   2.3000000e+00   1.6000000e+00   1.8000000e+00   2.4000000e+00   1.1000000e+00   8.0000000e-01   6.0000000e-01   8.0000000e-01   2.6000000e+00   2.2000000e+00   2.7000000e+00   3.2000000e+00   2.1000000e+00   1.4000000e+00   1.1000000e+00   1.3000000e+00   2.3000000e+00   8.0000000e-01   1.2000000e+00   1.2000000e+00   1.3000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   5.3000000e+00   2.7000000e+00   5.3000000e+00   3.8000000e+00   4.7000000e+00   6.5000000e+00   2.6000000e+00   5.5000000e+00   4.2000000e+00   6.6000000e+00   4.0000000e+00   3.5000000e+00   4.6000000e+00   2.6000000e+00   3.3000000e+00   4.4000000e+00   4.0000000e+00   7.6000000e+00   6.7000000e+00   2.7000000e+00   5.3000000e+00   2.7000000e+00   6.4000000e+00   2.9000000e+00   5.0000000e+00   5.4000000e+00   2.8000000e+00   3.0000000e+00   4.1000000e+00   4.8000000e+00   5.4000000e+00   7.3000000e+00   4.2000000e+00   2.9000000e+00   2.9000000e+00   6.3000000e+00   4.9000000e+00   4.0000000e+00   2.8000000e+00   4.7000000e+00   5.0000000e+00   4.6000000e+00   2.7000000e+00   5.4000000e+00   5.4000000e+00   4.4000000e+00   3.1000000e+00   3.9000000e+00   4.5000000e+00   3.0000000e+00   2.0000000e-01   8.0000000e-01   2.6000000e+00   1.8000000e+00   2.7000000e+00   3.2000000e+00   1.7000000e+00   1.2000000e+00   5.0000000e-01   9.0000000e-01   2.3000000e+00   8.0000000e-01   1.2000000e+00   1.0000000e+00   1.3000000e+00   1.3000000e+00   1.9000000e+00   1.3000000e+00   1.1000000e+00   5.3000000e+00   2.7000000e+00   5.3000000e+00   3.8000000e+00   4.7000000e+00   6.5000000e+00   2.0000000e+00   5.5000000e+00   4.0000000e+00   6.6000000e+00   4.0000000e+00   3.5000000e+00   4.6000000e+00   2.4000000e+00   3.3000000e+00   4.4000000e+00   4.0000000e+00   7.6000000e+00   6.7000000e+00   2.3000000e+00   5.3000000e+00   2.5000000e+00   6.4000000e+00   2.9000000e+00   5.0000000e+00   5.4000000e+00   2.8000000e+00   3.0000000e+00   4.1000000e+00   4.8000000e+00   5.4000000e+00   7.3000000e+00   4.2000000e+00   2.9000000e+00   2.9000000e+00   6.3000000e+00   4.9000000e+00   4.0000000e+00   2.8000000e+00   4.7000000e+00   5.0000000e+00   4.6000000e+00   2.7000000e+00   5.4000000e+00   5.4000000e+00   4.4000000e+00   2.9000000e+00   3.9000000e+00   4.5000000e+00   3.0000000e+00   1.0000000e+00   2.8000000e+00   2.0000000e+00   2.9000000e+00   3.4000000e+00   1.9000000e+00   1.4000000e+00   7.0000000e-01   1.1000000e+00   2.5000000e+00   1.0000000e+00   1.0000000e+00   1.2000000e+00   1.5000000e+00   1.5000000e+00   2.1000000e+00   1.3000000e+00   1.3000000e+00   5.5000000e+00   2.9000000e+00   5.5000000e+00   4.0000000e+00   4.9000000e+00   6.7000000e+00   2.2000000e+00   5.7000000e+00   4.2000000e+00   6.8000000e+00   4.2000000e+00   3.7000000e+00   4.8000000e+00   2.6000000e+00   3.5000000e+00   4.6000000e+00   4.2000000e+00   7.8000000e+00   6.9000000e+00   2.5000000e+00   5.5000000e+00   2.7000000e+00   6.6000000e+00   3.1000000e+00   5.2000000e+00   5.6000000e+00   3.0000000e+00   3.2000000e+00   4.3000000e+00   5.0000000e+00   5.6000000e+00   7.5000000e+00   4.4000000e+00   3.1000000e+00   3.1000000e+00   6.5000000e+00   5.1000000e+00   4.2000000e+00   3.0000000e+00   4.9000000e+00   5.2000000e+00   4.8000000e+00   2.9000000e+00   5.6000000e+00   5.6000000e+00   4.6000000e+00   3.1000000e+00   4.1000000e+00   4.7000000e+00   3.2000000e+00   1.8000000e+00   1.6000000e+00   1.9000000e+00   2.4000000e+00   1.5000000e+00   8.0000000e-01   7.0000000e-01   9.0000000e-01   1.5000000e+00   2.0000000e-01   2.0000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   1.9000000e+00   5.0000000e-01   4.5000000e+00   1.9000000e+00   4.5000000e+00   3.0000000e+00   3.9000000e+00   5.7000000e+00   2.2000000e+00   4.7000000e+00   3.6000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   2.2000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   6.1000000e+00   2.1000000e+00   4.5000000e+00   2.1000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.0000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.1000000e+00   2.3000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.5000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   1.6000000e+00   1.3000000e+00   1.6000000e+00   1.7000000e+00   2.0000000e+00   2.1000000e+00   1.7000000e+00   1.1000000e+00   1.8000000e+00   3.8000000e+00   1.6000000e+00   1.9000000e+00   1.7000000e+00   1.5000000e+00   3.7000000e+00   1.7000000e+00   2.7000000e+00   5.0000000e-01   2.7000000e+00   1.2000000e+00   2.1000000e+00   3.9000000e+00   2.0000000e+00   2.9000000e+00   1.8000000e+00   4.0000000e+00   1.4000000e+00   9.0000000e-01   2.0000000e+00   1.0000000e+00   1.1000000e+00   1.8000000e+00   1.4000000e+00   5.0000000e+00   4.3000000e+00   7.0000000e-01   2.7000000e+00   1.1000000e+00   3.8000000e+00   7.0000000e-01   2.4000000e+00   2.8000000e+00   8.0000000e-01   8.0000000e-01   1.5000000e+00   2.2000000e+00   2.8000000e+00   4.7000000e+00   1.6000000e+00   5.0000000e-01   9.0000000e-01   3.7000000e+00   2.3000000e+00   1.4000000e+00   8.0000000e-01   2.1000000e+00   2.4000000e+00   2.0000000e+00   5.0000000e-01   2.8000000e+00   2.8000000e+00   1.8000000e+00   9.0000000e-01   1.3000000e+00   1.9000000e+00   6.0000000e-01   1.1000000e+00   1.6000000e+00   1.9000000e+00   8.0000000e-01   1.3000000e+00   9.0000000e-01   9.0000000e-01   1.6000000e+00   2.8000000e+00   1.0000000e+00   9.0000000e-01   9.0000000e-01   1.3000000e+00   2.7000000e+00   1.1000000e+00   3.7000000e+00   1.7000000e+00   3.7000000e+00   2.4000000e+00   3.1000000e+00   4.9000000e+00   1.2000000e+00   4.1000000e+00   3.4000000e+00   5.0000000e+00   2.4000000e+00   2.5000000e+00   3.0000000e+00   1.8000000e+00   2.1000000e+00   2.8000000e+00   2.4000000e+00   6.0000000e+00   5.9000000e+00   1.9000000e+00   3.7000000e+00   1.3000000e+00   5.2000000e+00   1.9000000e+00   3.4000000e+00   3.8000000e+00   1.6000000e+00   1.4000000e+00   2.9000000e+00   3.2000000e+00   4.2000000e+00   5.7000000e+00   3.0000000e+00   1.7000000e+00   2.3000000e+00   4.7000000e+00   3.3000000e+00   2.4000000e+00   1.2000000e+00   3.1000000e+00   3.4000000e+00   3.0000000e+00   1.7000000e+00   3.8000000e+00   3.8000000e+00   2.8000000e+00   2.3000000e+00   2.3000000e+00   2.9000000e+00   1.4000000e+00   1.3000000e+00   1.8000000e+00   1.5000000e+00   2.2000000e+00   1.8000000e+00   8.0000000e-01   1.9000000e+00   3.9000000e+00   1.7000000e+00   1.4000000e+00   1.4000000e+00   1.2000000e+00   3.8000000e+00   1.6000000e+00   2.8000000e+00   1.8000000e+00   3.4000000e+00   2.1000000e+00   2.8000000e+00   4.6000000e+00   2.1000000e+00   3.8000000e+00   3.1000000e+00   3.9000000e+00   1.7000000e+00   2.2000000e+00   2.7000000e+00   2.1000000e+00   2.2000000e+00   2.1000000e+00   2.1000000e+00   4.9000000e+00   5.6000000e+00   1.8000000e+00   3.0000000e+00   1.8000000e+00   4.9000000e+00   1.6000000e+00   2.5000000e+00   3.1000000e+00   1.3000000e+00   1.1000000e+00   2.6000000e+00   2.9000000e+00   3.9000000e+00   4.6000000e+00   2.7000000e+00   1.6000000e+00   2.2000000e+00   4.4000000e+00   2.2000000e+00   1.9000000e+00   9.0000000e-01   2.6000000e+00   2.9000000e+00   2.5000000e+00   1.8000000e+00   3.1000000e+00   2.9000000e+00   2.5000000e+00   2.0000000e+00   2.0000000e+00   1.8000000e+00   1.3000000e+00   1.7000000e+00   2.0000000e+00   2.7000000e+00   2.3000000e+00   9.0000000e-01   2.4000000e+00   4.4000000e+00   2.2000000e+00   1.9000000e+00   1.9000000e+00   1.3000000e+00   4.3000000e+00   2.1000000e+00   2.9000000e+00   2.1000000e+00   2.3000000e+00   1.8000000e+00   2.1000000e+00   3.5000000e+00   2.8000000e+00   2.7000000e+00   2.0000000e+00   3.4000000e+00   1.2000000e+00   1.7000000e+00   1.6000000e+00   2.4000000e+00   2.5000000e+00   1.8000000e+00   1.4000000e+00   4.4000000e+00   4.5000000e+00   1.9000000e+00   2.1000000e+00   2.1000000e+00   3.8000000e+00   1.3000000e+00   1.8000000e+00   2.2000000e+00   1.2000000e+00   1.2000000e+00   2.1000000e+00   1.8000000e+00   2.8000000e+00   4.1000000e+00   2.2000000e+00   1.1000000e+00   2.1000000e+00   3.3000000e+00   2.5000000e+00   1.4000000e+00   1.2000000e+00   1.5000000e+00   1.8000000e+00   1.4000000e+00   2.1000000e+00   2.2000000e+00   2.2000000e+00   1.4000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   1.6000000e+00   1.7000000e+00   1.4000000e+00   1.2000000e+00   1.2000000e+00   1.3000000e+00   2.7000000e+00   1.3000000e+00   1.6000000e+00   1.4000000e+00   8.0000000e-01   3.0000000e+00   1.4000000e+00   3.8000000e+00   2.2000000e+00   3.8000000e+00   2.3000000e+00   3.2000000e+00   5.0000000e+00   2.1000000e+00   4.0000000e+00   2.5000000e+00   5.1000000e+00   2.5000000e+00   2.0000000e+00   3.1000000e+00   2.1000000e+00   2.8000000e+00   2.9000000e+00   2.5000000e+00   6.1000000e+00   5.2000000e+00   1.2000000e+00   3.8000000e+00   2.4000000e+00   4.9000000e+00   1.4000000e+00   3.5000000e+00   3.9000000e+00   1.5000000e+00   1.9000000e+00   2.6000000e+00   3.3000000e+00   3.9000000e+00   5.8000000e+00   2.7000000e+00   1.4000000e+00   1.8000000e+00   4.8000000e+00   3.4000000e+00   2.5000000e+00   1.9000000e+00   3.2000000e+00   3.5000000e+00   3.1000000e+00   2.2000000e+00   3.9000000e+00   3.9000000e+00   2.9000000e+00   1.4000000e+00   2.4000000e+00   3.2000000e+00   2.3000000e+00   7.0000000e-01   9.0000000e-01   1.1000000e+00   8.0000000e-01   2.4000000e+00   4.0000000e-01   3.0000000e-01   3.0000000e-01   9.0000000e-01   2.3000000e+00   3.0000000e-01   4.1000000e+00   2.1000000e+00   4.1000000e+00   2.8000000e+00   3.5000000e+00   5.3000000e+00   2.0000000e+00   4.5000000e+00   3.8000000e+00   5.4000000e+00   2.8000000e+00   2.9000000e+00   3.4000000e+00   2.2000000e+00   2.5000000e+00   3.2000000e+00   2.8000000e+00   6.4000000e+00   6.3000000e+00   2.3000000e+00   4.1000000e+00   1.7000000e+00   5.6000000e+00   2.3000000e+00   3.8000000e+00   4.2000000e+00   2.0000000e+00   1.8000000e+00   3.3000000e+00   3.6000000e+00   4.6000000e+00   6.1000000e+00   3.4000000e+00   2.1000000e+00   2.5000000e+00   5.1000000e+00   3.7000000e+00   2.8000000e+00   1.6000000e+00   3.5000000e+00   3.8000000e+00   3.4000000e+00   2.1000000e+00   4.2000000e+00   4.2000000e+00   3.2000000e+00   2.7000000e+00   2.7000000e+00   3.3000000e+00   1.8000000e+00   6.0000000e-01   1.8000000e+00   5.0000000e-01   1.7000000e+00   5.0000000e-01   1.0000000e+00   8.0000000e-01   1.4000000e+00   1.6000000e+00   6.0000000e-01   4.8000000e+00   2.2000000e+00   4.8000000e+00   3.3000000e+00   4.2000000e+00   6.0000000e+00   1.5000000e+00   5.0000000e+00   3.5000000e+00   6.1000000e+00   3.5000000e+00   3.0000000e+00   4.1000000e+00   1.9000000e+00   2.8000000e+00   3.9000000e+00   3.5000000e+00   7.1000000e+00   6.2000000e+00   2.0000000e+00   4.8000000e+00   2.0000000e+00   5.9000000e+00   2.4000000e+00   4.5000000e+00   4.9000000e+00   2.3000000e+00   2.5000000e+00   3.6000000e+00   4.3000000e+00   4.9000000e+00   6.8000000e+00   3.7000000e+00   2.4000000e+00   2.4000000e+00   5.8000000e+00   4.4000000e+00   3.5000000e+00   2.3000000e+00   4.2000000e+00   4.5000000e+00   4.1000000e+00   2.2000000e+00   4.9000000e+00   4.9000000e+00   3.9000000e+00   2.4000000e+00   3.4000000e+00   4.0000000e+00   2.5000000e+00   1.4000000e+00   7.0000000e-01   2.1000000e+00   5.0000000e-01   8.0000000e-01   8.0000000e-01   1.2000000e+00   2.0000000e+00   8.0000000e-01   4.4000000e+00   1.8000000e+00   4.4000000e+00   2.9000000e+00   3.8000000e+00   5.6000000e+00   1.3000000e+00   4.6000000e+00   3.3000000e+00   5.7000000e+00   3.1000000e+00   2.6000000e+00   3.7000000e+00   1.7000000e+00   2.4000000e+00   3.5000000e+00   3.1000000e+00   6.7000000e+00   5.8000000e+00   1.8000000e+00   4.4000000e+00   1.6000000e+00   5.5000000e+00   2.0000000e+00   4.1000000e+00   4.5000000e+00   1.9000000e+00   2.1000000e+00   3.2000000e+00   3.9000000e+00   4.5000000e+00   6.4000000e+00   3.3000000e+00   2.0000000e+00   2.0000000e+00   5.4000000e+00   4.0000000e+00   3.1000000e+00   1.9000000e+00   3.8000000e+00   4.1000000e+00   3.7000000e+00   1.8000000e+00   4.5000000e+00   4.5000000e+00   3.5000000e+00   2.2000000e+00   3.0000000e+00   3.6000000e+00   2.1000000e+00   1.5000000e+00   3.5000000e+00   1.3000000e+00   1.0000000e+00   1.0000000e+00   6.0000000e-01   3.4000000e+00   1.2000000e+00   3.0000000e+00   1.6000000e+00   3.0000000e+00   1.7000000e+00   2.4000000e+00   4.2000000e+00   2.1000000e+00   3.4000000e+00   2.7000000e+00   4.3000000e+00   1.7000000e+00   1.8000000e+00   2.3000000e+00   1.9000000e+00   2.0000000e+00   2.1000000e+00   1.7000000e+00   5.3000000e+00   5.2000000e+00   1.4000000e+00   3.0000000e+00   1.6000000e+00   4.5000000e+00   1.2000000e+00   2.7000000e+00   3.1000000e+00   9.0000000e-01   7.0000000e-01   2.2000000e+00   2.5000000e+00   3.5000000e+00   5.0000000e+00   2.3000000e+00   1.0000000e+00   1.4000000e+00   4.0000000e+00   2.6000000e+00   1.7000000e+00   7.0000000e-01   2.4000000e+00   2.7000000e+00   2.3000000e+00   1.6000000e+00   3.1000000e+00   3.1000000e+00   2.1000000e+00   1.6000000e+00   1.6000000e+00   2.2000000e+00   1.1000000e+00   2.0000000e+00   6.0000000e-01   7.0000000e-01   7.0000000e-01   1.1000000e+00   1.9000000e+00   5.0000000e-01   4.5000000e+00   1.9000000e+00   4.5000000e+00   3.0000000e+00   3.9000000e+00   5.7000000e+00   2.0000000e+00   4.7000000e+00   3.4000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   2.0000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   5.9000000e+00   1.9000000e+00   4.5000000e+00   2.1000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.0000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.1000000e+00   2.1000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.3000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   2.2000000e+00   2.5000000e+00   2.5000000e+00   3.1000000e+00   7.0000000e-01   2.3000000e+00   6.5000000e+00   3.9000000e+00   6.5000000e+00   5.0000000e+00   5.9000000e+00   7.7000000e+00   2.2000000e+00   6.7000000e+00   5.2000000e+00   7.8000000e+00   5.2000000e+00   4.7000000e+00   5.8000000e+00   3.6000000e+00   4.5000000e+00   5.6000000e+00   5.2000000e+00   8.8000000e+00   7.9000000e+00   3.3000000e+00   6.5000000e+00   3.7000000e+00   7.6000000e+00   4.1000000e+00   6.2000000e+00   6.6000000e+00   4.0000000e+00   4.2000000e+00   5.3000000e+00   6.0000000e+00   6.6000000e+00   8.5000000e+00   5.4000000e+00   4.1000000e+00   4.1000000e+00   7.5000000e+00   6.1000000e+00   5.2000000e+00   4.0000000e+00   5.9000000e+00   6.2000000e+00   5.8000000e+00   3.9000000e+00   6.6000000e+00   6.6000000e+00   5.6000000e+00   4.1000000e+00   5.1000000e+00   5.7000000e+00   4.2000000e+00   5.0000000e-01   3.0000000e-01   9.0000000e-01   2.1000000e+00   3.0000000e-01   4.3000000e+00   1.7000000e+00   4.3000000e+00   2.8000000e+00   3.7000000e+00   5.5000000e+00   1.6000000e+00   4.5000000e+00   3.4000000e+00   5.6000000e+00   3.0000000e+00   2.5000000e+00   3.6000000e+00   1.8000000e+00   2.3000000e+00   3.4000000e+00   3.0000000e+00   6.6000000e+00   5.9000000e+00   1.9000000e+00   4.3000000e+00   1.5000000e+00   5.4000000e+00   1.9000000e+00   4.0000000e+00   4.4000000e+00   1.8000000e+00   2.0000000e+00   3.1000000e+00   3.8000000e+00   4.4000000e+00   6.3000000e+00   3.2000000e+00   1.9000000e+00   2.1000000e+00   5.3000000e+00   3.9000000e+00   3.0000000e+00   1.8000000e+00   3.7000000e+00   4.0000000e+00   3.6000000e+00   1.7000000e+00   4.4000000e+00   4.4000000e+00   3.4000000e+00   2.3000000e+00   2.9000000e+00   3.5000000e+00   2.0000000e+00   2.0000000e-01   8.0000000e-01   2.4000000e+00   4.0000000e-01   4.0000000e+00   2.0000000e+00   4.0000000e+00   2.7000000e+00   3.4000000e+00   5.2000000e+00   2.1000000e+00   4.4000000e+00   3.7000000e+00   5.3000000e+00   2.7000000e+00   2.8000000e+00   3.3000000e+00   2.1000000e+00   2.4000000e+00   3.1000000e+00   2.7000000e+00   6.3000000e+00   6.2000000e+00   2.2000000e+00   4.0000000e+00   1.8000000e+00   5.5000000e+00   2.2000000e+00   3.7000000e+00   4.1000000e+00   1.9000000e+00   1.7000000e+00   3.2000000e+00   3.5000000e+00   4.5000000e+00   6.0000000e+00   3.3000000e+00   2.0000000e+00   2.4000000e+00   5.0000000e+00   3.6000000e+00   2.7000000e+00   1.5000000e+00   3.4000000e+00   3.7000000e+00   3.3000000e+00   2.0000000e+00   4.1000000e+00   4.1000000e+00   3.1000000e+00   2.6000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   6.0000000e-01   2.4000000e+00   2.0000000e-01   4.0000000e+00   1.8000000e+00   4.0000000e+00   2.5000000e+00   3.4000000e+00   5.2000000e+00   1.9000000e+00   4.2000000e+00   3.5000000e+00   5.3000000e+00   2.7000000e+00   2.6000000e+00   3.3000000e+00   1.9000000e+00   2.2000000e+00   3.1000000e+00   2.7000000e+00   6.3000000e+00   6.0000000e+00   2.0000000e+00   4.0000000e+00   1.6000000e+00   5.3000000e+00   2.0000000e+00   3.7000000e+00   4.1000000e+00   1.7000000e+00   1.7000000e+00   3.0000000e+00   3.5000000e+00   4.3000000e+00   6.0000000e+00   3.1000000e+00   1.8000000e+00   2.2000000e+00   5.0000000e+00   3.6000000e+00   2.7000000e+00   1.5000000e+00   3.4000000e+00   3.7000000e+00   3.3000000e+00   1.8000000e+00   4.1000000e+00   4.1000000e+00   3.1000000e+00   2.4000000e+00   2.6000000e+00   3.2000000e+00   1.7000000e+00   3.0000000e+00   8.0000000e-01   3.4000000e+00   2.0000000e+00   3.4000000e+00   1.9000000e+00   2.8000000e+00   4.6000000e+00   2.3000000e+00   3.6000000e+00   2.9000000e+00   4.7000000e+00   2.1000000e+00   2.0000000e+00   2.7000000e+00   2.3000000e+00   2.4000000e+00   2.5000000e+00   2.1000000e+00   5.7000000e+00   5.4000000e+00   1.8000000e+00   3.4000000e+00   2.0000000e+00   4.7000000e+00   1.4000000e+00   3.1000000e+00   3.5000000e+00   1.1000000e+00   1.3000000e+00   2.4000000e+00   2.9000000e+00   3.7000000e+00   5.4000000e+00   2.5000000e+00   1.2000000e+00   1.8000000e+00   4.4000000e+00   3.0000000e+00   2.1000000e+00   1.3000000e+00   2.8000000e+00   3.1000000e+00   2.7000000e+00   2.0000000e+00   3.5000000e+00   3.5000000e+00   2.5000000e+00   1.8000000e+00   2.0000000e+00   2.6000000e+00   1.7000000e+00   2.2000000e+00   6.4000000e+00   3.8000000e+00   6.4000000e+00   4.9000000e+00   5.8000000e+00   7.6000000e+00   2.3000000e+00   6.6000000e+00   5.1000000e+00   7.7000000e+00   5.1000000e+00   4.6000000e+00   5.7000000e+00   3.5000000e+00   4.4000000e+00   5.5000000e+00   5.1000000e+00   8.7000000e+00   7.8000000e+00   3.6000000e+00   6.4000000e+00   3.6000000e+00   7.5000000e+00   4.0000000e+00   6.1000000e+00   6.5000000e+00   3.9000000e+00   4.1000000e+00   5.2000000e+00   5.9000000e+00   6.5000000e+00   8.4000000e+00   5.3000000e+00   4.0000000e+00   4.0000000e+00   7.4000000e+00   6.0000000e+00   5.1000000e+00   3.9000000e+00   5.8000000e+00   6.1000000e+00   5.7000000e+00   3.8000000e+00   6.5000000e+00   6.5000000e+00   5.5000000e+00   4.0000000e+00   5.0000000e+00   5.6000000e+00   4.1000000e+00   4.2000000e+00   1.8000000e+00   4.2000000e+00   2.7000000e+00   3.6000000e+00   5.4000000e+00   1.9000000e+00   4.4000000e+00   3.5000000e+00   5.5000000e+00   2.9000000e+00   2.6000000e+00   3.5000000e+00   1.9000000e+00   2.2000000e+00   3.3000000e+00   2.9000000e+00   6.5000000e+00   6.0000000e+00   2.0000000e+00   4.2000000e+00   1.6000000e+00   5.3000000e+00   2.0000000e+00   3.9000000e+00   4.3000000e+00   1.7000000e+00   1.9000000e+00   3.0000000e+00   3.7000000e+00   4.3000000e+00   6.2000000e+00   3.1000000e+00   1.8000000e+00   2.2000000e+00   5.2000000e+00   3.8000000e+00   2.9000000e+00   1.7000000e+00   3.6000000e+00   3.9000000e+00   3.5000000e+00   1.8000000e+00   4.3000000e+00   4.3000000e+00   3.3000000e+00   2.4000000e+00   2.8000000e+00   3.4000000e+00   1.9000000e+00   2.6000000e+00   1.6000000e+00   1.5000000e+00   1.0000000e+00   2.6000000e+00   4.5000000e+00   2.4000000e+00   2.1000000e+00   1.3000000e+00   1.7000000e+00   2.0000000e+00   1.7000000e+00   2.9000000e+00   2.0000000e+00   1.1000000e+00   1.7000000e+00   2.9000000e+00   3.2000000e+00   3.4000000e+00   1.2000000e+00   2.8000000e+00   3.1000000e+00   2.4000000e+00   1.1000000e+00   1.7000000e+00   2.5000000e+00   2.3000000e+00   1.4000000e+00   2.3000000e+00   2.3000000e+00   3.0000000e+00   1.3000000e+00   2.4000000e+00   2.4000000e+00   2.0000000e+00   6.0000000e-01   1.5000000e+00   2.5000000e+00   1.8000000e+00   1.1000000e+00   1.9000000e+00   2.6000000e+00   9.0000000e-01   7.0000000e-01   1.7000000e+00   2.4000000e+00   1.8000000e+00   1.0000000e+00   2.3000000e+00   2.6000000e+00   1.3000000e+00   2.0000000e+00   3.8000000e+00   1.9000000e+00   3.0000000e+00   1.9000000e+00   3.9000000e+00   1.3000000e+00   8.0000000e-01   1.9000000e+00   5.0000000e-01   6.0000000e-01   1.7000000e+00   1.5000000e+00   4.9000000e+00   4.2000000e+00   1.2000000e+00   2.6000000e+00   6.0000000e-01   3.7000000e+00   8.0000000e-01   2.3000000e+00   2.9000000e+00   9.0000000e-01   9.0000000e-01   1.4000000e+00   2.7000000e+00   2.7000000e+00   4.6000000e+00   1.5000000e+00   1.0000000e+00   1.4000000e+00   3.6000000e+00   2.2000000e+00   1.5000000e+00   9.0000000e-01   2.0000000e+00   2.3000000e+00   1.9000000e+00   0.0000000e+00   2.7000000e+00   2.7000000e+00   1.7000000e+00   8.0000000e-01   1.2000000e+00   1.8000000e+00   5.0000000e-01   1.5000000e+00   8.0000000e-01   1.2000000e+00   4.5000000e+00   1.0000000e+00   1.3000000e+00   1.3000000e+00   1.7000000e+00   1.8000000e+00   7.0000000e-01   2.9000000e+00   2.6000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   2.2000000e+00   3.4000000e+00   8.0000000e-01   2.8000000e+00   1.7000000e+00   2.4000000e+00   9.0000000e-01   7.0000000e-01   2.5000000e+00   2.3000000e+00   1.2000000e+00   7.0000000e-01   9.0000000e-01   2.2000000e+00   1.3000000e+00   2.4000000e+00   2.4000000e+00   1.0000000e+00   1.8000000e+00   1.5000000e+00   2.5000000e+00   8.0000000e-01   1.1000000e+00   1.3000000e+00   2.6000000e+00   7.0000000e-01   1.3000000e+00   1.3000000e+00   2.4000000e+00   1.4000000e+00   2.0000000e+00   2.3000000e+00   9.0000000e-01   2.7000000e+00   3.0000000e+00   1.7000000e+00   1.0000000e+00   2.8000000e+00   1.2000000e+00   7.0000000e-01   1.0000000e+00   1.8000000e+00   1.7000000e+00   1.2000000e+00   4.0000000e-01   3.8000000e+00   3.5000000e+00   1.9000000e+00   1.5000000e+00   1.7000000e+00   2.8000000e+00   9.0000000e-01   1.2000000e+00   1.6000000e+00   1.0000000e+00   1.0000000e+00   5.0000000e-01   1.4000000e+00   1.8000000e+00   3.5000000e+00   6.0000000e-01   9.0000000e-01   9.0000000e-01   2.5000000e+00   1.1000000e+00   4.0000000e-01   1.2000000e+00   1.3000000e+00   1.2000000e+00   1.8000000e+00   1.3000000e+00   1.6000000e+00   1.6000000e+00   1.4000000e+00   1.1000000e+00   9.0000000e-01   1.3000000e+00   1.0000000e+00   2.0000000e+00   3.9000000e+00   1.8000000e+00   1.1000000e+00   1.9000000e+00   1.1000000e+00   1.2000000e+00   7.0000000e-01   2.3000000e+00   1.8000000e+00   9.0000000e-01   7.0000000e-01   2.9000000e+00   2.8000000e+00   2.8000000e+00   8.0000000e-01   2.2000000e+00   2.5000000e+00   1.8000000e+00   7.0000000e-01   1.5000000e+00   1.9000000e+00   1.7000000e+00   6.0000000e-01   1.3000000e+00   1.7000000e+00   3.0000000e+00   5.0000000e-01   1.8000000e+00   1.8000000e+00   1.6000000e+00   1.0000000e+00   9.0000000e-01   1.9000000e+00   1.0000000e+00   7.0000000e-01   1.3000000e+00   2.0000000e+00   7.0000000e-01   9.0000000e-01   9.0000000e-01   1.8000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   5.7000000e+00   1.0000000e+00   2.5000000e+00   1.9000000e+00   2.9000000e+00   3.0000000e+00   1.9000000e+00   4.1000000e+00   3.8000000e+00   2.9000000e+00   2.5000000e+00   1.1000000e+00   1.0000000e+00   4.6000000e+00   2.0000000e+00   4.0000000e+00   5.0000000e-01   3.6000000e+00   2.1000000e+00   1.5000000e+00   3.7000000e+00   3.5000000e+00   2.4000000e+00   1.7000000e+00   1.1000000e+00   1.4000000e+00   2.5000000e+00   3.6000000e+00   3.6000000e+00   8.0000000e-01   3.0000000e+00   2.7000000e+00   3.7000000e+00   2.0000000e+00   2.3000000e+00   2.5000000e+00   3.8000000e+00   1.9000000e+00   2.5000000e+00   2.5000000e+00   3.6000000e+00   2.6000000e+00   3.2000000e+00   3.5000000e+00   4.7000000e+00   3.2000000e+00   5.8000000e+00   3.2000000e+00   2.7000000e+00   3.8000000e+00   1.6000000e+00   2.5000000e+00   3.6000000e+00   3.2000000e+00   6.8000000e+00   5.9000000e+00   2.1000000e+00   4.5000000e+00   1.7000000e+00   5.6000000e+00   2.1000000e+00   4.2000000e+00   4.6000000e+00   2.0000000e+00   2.2000000e+00   3.3000000e+00   4.2000000e+00   4.6000000e+00   6.5000000e+00   3.4000000e+00   2.5000000e+00   2.7000000e+00   5.5000000e+00   4.1000000e+00   3.2000000e+00   2.0000000e+00   3.9000000e+00   4.2000000e+00   3.8000000e+00   1.9000000e+00   4.6000000e+00   4.6000000e+00   3.6000000e+00   2.1000000e+00   3.1000000e+00   3.7000000e+00   2.2000000e+00   1.5000000e+00   1.7000000e+00   2.5000000e+00   2.2000000e+00   1.7000000e+00   3.5000000e+00   3.4000000e+00   2.7000000e+00   1.7000000e+00   2.1000000e+00   1.8000000e+00   3.6000000e+00   1.8000000e+00   3.4000000e+00   1.1000000e+00   2.6000000e+00   1.9000000e+00   7.0000000e-01   2.7000000e+00   2.7000000e+00   2.0000000e+00   9.0000000e-01   5.0000000e-01   1.8000000e+00   2.1000000e+00   2.6000000e+00   2.6000000e+00   1.2000000e+00   2.8000000e+00   1.9000000e+00   2.9000000e+00   1.8000000e+00   2.1000000e+00   2.3000000e+00   3.0000000e+00   1.7000000e+00   2.3000000e+00   2.3000000e+00   2.8000000e+00   2.2000000e+00   3.0000000e+00   2.7000000e+00   2.6000000e+00   1.8000000e+00   1.1000000e+00   1.2000000e+00   2.0000000e+00   2.5000000e+00   2.0000000e+00   1.0000000e+00   3.6000000e+00   2.7000000e+00   2.1000000e+00   1.5000000e+00   2.5000000e+00   2.4000000e+00   1.5000000e+00   1.2000000e+00   1.4000000e+00   1.8000000e+00   2.0000000e+00   1.1000000e+00   1.2000000e+00   1.4000000e+00   3.3000000e+00   1.2000000e+00   1.7000000e+00   1.3000000e+00   2.3000000e+00   2.1000000e+00   1.2000000e+00   2.2000000e+00   1.5000000e+00   1.4000000e+00   2.0000000e+00   1.9000000e+00   1.4000000e+00   1.6000000e+00   1.6000000e+00   1.3000000e+00   1.5000000e+00   2.3000000e+00   2.0000000e+00   2.6000000e+00   3.1000000e+00   2.0000000e+00   4.2000000e+00   3.3000000e+00   2.2000000e+00   2.6000000e+00   1.6000000e+00   2.5000000e+00   4.7000000e+00   1.3000000e+00   4.1000000e+00   2.4000000e+00   3.7000000e+00   1.6000000e+00   1.2000000e+00   3.8000000e+00   3.6000000e+00   2.5000000e+00   1.8000000e+00   1.6000000e+00   1.7000000e+00   2.4000000e+00   3.7000000e+00   3.7000000e+00   1.3000000e+00   1.7000000e+00   2.6000000e+00   3.8000000e+00   1.9000000e+00   1.6000000e+00   2.0000000e+00   3.9000000e+00   1.2000000e+00   1.2000000e+00   2.2000000e+00   3.7000000e+00   2.7000000e+00   2.1000000e+00   3.6000000e+00   9.0000000e-01   1.0000000e+00   1.6000000e+00   1.5000000e+00   6.0000000e-01   8.0000000e-01   3.6000000e+00   3.9000000e+00   2.1000000e+00   1.3000000e+00   1.5000000e+00   3.2000000e+00   1.1000000e+00   1.0000000e+00   1.8000000e+00   1.2000000e+00   1.0000000e+00   1.1000000e+00   2.0000000e+00   2.4000000e+00   3.3000000e+00   1.2000000e+00   1.1000000e+00   2.1000000e+00   2.7000000e+00   1.3000000e+00   8.0000000e-01   1.2000000e+00   9.0000000e-01   1.2000000e+00   8.0000000e-01   1.3000000e+00   1.4000000e+00   1.4000000e+00   8.0000000e-01   1.1000000e+00   3.0000000e-01   1.1000000e+00   1.0000000e+00   1.1000000e+00   1.3000000e+00   1.4000000e+00   9.0000000e-01   7.0000000e-01   4.1000000e+00   3.4000000e+00   1.6000000e+00   1.8000000e+00   1.4000000e+00   2.9000000e+00   6.0000000e-01   1.5000000e+00   2.1000000e+00   9.0000000e-01   1.1000000e+00   6.0000000e-01   1.9000000e+00   1.9000000e+00   3.8000000e+00   7.0000000e-01   8.0000000e-01   1.2000000e+00   2.8000000e+00   1.6000000e+00   7.0000000e-01   1.3000000e+00   1.2000000e+00   1.5000000e+00   1.5000000e+00   8.0000000e-01   1.9000000e+00   1.9000000e+00   1.1000000e+00   6.0000000e-01   6.0000000e-01   1.4000000e+00   1.1000000e+00   2.2000000e+00   1.9000000e+00   1.0000000e+00   6.0000000e-01   3.0000000e+00   2.9000000e+00   2.7000000e+00   7.0000000e-01   2.1000000e+00   2.4000000e+00   1.7000000e+00   6.0000000e-01   1.4000000e+00   1.8000000e+00   1.6000000e+00   7.0000000e-01   1.2000000e+00   1.6000000e+00   2.9000000e+00   8.0000000e-01   1.7000000e+00   1.9000000e+00   1.7000000e+00   1.3000000e+00   8.0000000e-01   1.8000000e+00   3.0000000e-01   6.0000000e-01   8.0000000e-01   1.9000000e+00   8.0000000e-01   1.0000000e+00   6.0000000e-01   1.7000000e+00   7.0000000e-01   1.3000000e+00   1.6000000e+00   9.0000000e-01   2.0000000e+00   2.0000000e+00   5.2000000e+00   4.3000000e+00   1.1000000e+00   2.9000000e+00   5.0000000e-01   4.0000000e+00   1.1000000e+00   2.6000000e+00   3.4000000e+00   1.2000000e+00   1.2000000e+00   1.7000000e+00   3.2000000e+00   3.2000000e+00   4.9000000e+00   1.8000000e+00   1.5000000e+00   1.7000000e+00   3.9000000e+00   2.5000000e+00   2.0000000e+00   1.2000000e+00   2.3000000e+00   2.6000000e+00   2.2000000e+00   5.0000000e-01   3.0000000e+00   3.0000000e+00   2.0000000e+00   7.0000000e-01   1.5000000e+00   2.1000000e+00   1.0000000e+00   1.3000000e+00   1.9000000e+00   4.7000000e+00   4.0000000e+00   1.8000000e+00   2.2000000e+00   8.0000000e-01   3.9000000e+00   1.4000000e+00   2.3000000e+00   3.3000000e+00   1.3000000e+00   1.3000000e+00   1.4000000e+00   3.1000000e+00   3.1000000e+00   4.8000000e+00   1.3000000e+00   1.4000000e+00   2.0000000e+00   3.2000000e+00   1.6000000e+00   1.9000000e+00   1.3000000e+00   2.0000000e+00   1.7000000e+00   1.5000000e+00   6.0000000e-01   2.3000000e+00   2.1000000e+00   1.3000000e+00   1.4000000e+00   1.4000000e+00   1.4000000e+00   9.0000000e-01   1.0000000e+00   3.4000000e+00   3.5000000e+00   2.5000000e+00   9.0000000e-01   1.9000000e+00   3.4000000e+00   1.5000000e+00   1.0000000e+00   2.0000000e+00   1.6000000e+00   1.4000000e+00   9.0000000e-01   2.2000000e+00   2.6000000e+00   3.5000000e+00   8.0000000e-01   1.5000000e+00   2.1000000e+00   2.3000000e+00   7.0000000e-01   8.0000000e-01   1.6000000e+00   9.0000000e-01   8.0000000e-01   8.0000000e-01   1.7000000e+00   1.0000000e+00   1.0000000e+00   6.0000000e-01   1.5000000e+00   7.0000000e-01   5.0000000e-01   1.4000000e+00   3.6000000e+00   3.5000000e+00   2.1000000e+00   1.3000000e+00   1.9000000e+00   2.8000000e+00   1.1000000e+00   1.0000000e+00   1.4000000e+00   1.2000000e+00   1.0000000e+00   7.0000000e-01   1.2000000e+00   1.8000000e+00   3.3000000e+00   8.0000000e-01   1.1000000e+00   1.3000000e+00   2.3000000e+00   1.3000000e+00   2.0000000e-01   1.2000000e+00   9.0000000e-01   1.0000000e+00   1.4000000e+00   1.5000000e+00   1.4000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   5.0000000e-01   1.3000000e+00   1.0000000e+00   1.5000000e+00   5.7000000e+00   2.5000000e+00   5.1000000e+00   1.2000000e+00   4.7000000e+00   2.6000000e+00   2.2000000e+00   4.8000000e+00   4.6000000e+00   3.5000000e+00   2.8000000e+00   2.2000000e+00   7.0000000e-01   3.4000000e+00   4.7000000e+00   4.7000000e+00   1.5000000e+00   3.1000000e+00   3.6000000e+00   4.8000000e+00   2.9000000e+00   3.0000000e+00   3.2000000e+00   4.9000000e+00   2.4000000e+00   2.8000000e+00   3.4000000e+00   4.7000000e+00   3.7000000e+00   3.3000000e+00   4.6000000e+00   4.8000000e+00   2.6000000e+00   4.6000000e+00   7.0000000e-01   4.0000000e+00   3.1000000e+00   2.5000000e+00   4.3000000e+00   4.5000000e+00   3.0000000e+00   2.7000000e+00   1.7000000e+00   2.2000000e+00   2.9000000e+00   4.2000000e+00   3.8000000e+00   1.2000000e+00   3.6000000e+00   3.7000000e+00   4.7000000e+00   3.0000000e+00   2.9000000e+00   3.1000000e+00   4.2000000e+00   2.5000000e+00   3.1000000e+00   3.1000000e+00   3.8000000e+00   3.6000000e+00   3.8000000e+00   4.5000000e+00   3.4000000e+00   1.6000000e+00   4.5000000e+00   1.2000000e+00   3.1000000e+00   3.5000000e+00   1.3000000e+00   1.3000000e+00   2.2000000e+00   2.9000000e+00   3.5000000e+00   5.4000000e+00   2.3000000e+00   1.0000000e+00   1.2000000e+00   4.4000000e+00   3.0000000e+00   2.1000000e+00   1.3000000e+00   2.8000000e+00   3.1000000e+00   2.7000000e+00   1.2000000e+00   3.5000000e+00   3.5000000e+00   2.5000000e+00   1.0000000e+00   2.0000000e+00   2.6000000e+00   1.3000000e+00   2.8000000e+00   2.5000000e+00   2.4000000e+00   5.0000000e-01   1.1000000e+00   2.5000000e+00   2.3000000e+00   1.2000000e+00   1.3000000e+00   1.7000000e+00   2.6000000e+00   1.1000000e+00   2.4000000e+00   2.4000000e+00   1.4000000e+00   1.0000000e+00   1.3000000e+00   2.5000000e+00   6.0000000e-01   5.0000000e-01   7.0000000e-01   2.6000000e+00   3.0000000e-01   5.0000000e-01   9.0000000e-01   2.4000000e+00   1.4000000e+00   1.2000000e+00   2.3000000e+00   3.9000000e+00   1.0000000e+00   2.5000000e+00   3.3000000e+00   9.0000000e-01   9.0000000e-01   1.6000000e+00   3.1000000e+00   3.1000000e+00   4.8000000e+00   1.7000000e+00   1.4000000e+00   2.0000000e+00   3.8000000e+00   2.4000000e+00   1.9000000e+00   9.0000000e-01   2.2000000e+00   2.5000000e+00   2.1000000e+00   6.0000000e-01   2.9000000e+00   2.9000000e+00   1.9000000e+00   1.2000000e+00   1.4000000e+00   2.0000000e+00   9.0000000e-01   3.5000000e+00   2.6000000e+00   1.8000000e+00   3.6000000e+00   3.8000000e+00   2.5000000e+00   2.0000000e+00   1.0000000e+00   1.5000000e+00   2.6000000e+00   3.5000000e+00   3.5000000e+00   1.1000000e+00   3.5000000e+00   3.0000000e+00   4.0000000e+00   2.5000000e+00   2.8000000e+00   3.0000000e+00   3.7000000e+00   2.4000000e+00   3.0000000e+00   3.0000000e+00   3.5000000e+00   2.9000000e+00   3.7000000e+00   3.8000000e+00   2.1000000e+00   2.5000000e+00   3.0000000e-01   5.0000000e-01   1.2000000e+00   2.3000000e+00   2.5000000e+00   4.4000000e+00   1.3000000e+00   6.0000000e-01   1.4000000e+00   3.4000000e+00   2.0000000e+00   1.1000000e+00   7.0000000e-01   1.8000000e+00   2.1000000e+00   1.7000000e+00   8.0000000e-01   2.5000000e+00   2.5000000e+00   1.5000000e+00   4.0000000e-01   1.0000000e+00   1.8000000e+00   9.0000000e-01   1.2000000e+00   2.2000000e+00   2.0000000e+00   9.0000000e-01   1.4000000e+00   1.8000000e+00   2.5000000e+00   1.0000000e+00   2.1000000e+00   2.1000000e+00   1.9000000e+00   9.0000000e-01   1.0000000e+00   2.2000000e+00   7.0000000e-01   6.0000000e-01   1.2000000e+00   2.3000000e+00   6.0000000e-01   4.0000000e-01   1.0000000e+00   2.1000000e+00   1.1000000e+00   1.1000000e+00   2.0000000e+00   2.6000000e+00   2.4000000e+00   1.9000000e+00   6.0000000e-01   8.0000000e-01   1.9000000e+00   2.0000000e+00   2.5000000e+00   2.5000000e+00   1.3000000e+00   2.1000000e+00   1.4000000e+00   2.6000000e+00   1.3000000e+00   1.6000000e+00   1.8000000e+00   2.9000000e+00   1.0000000e+00   1.6000000e+00   2.0000000e+00   2.7000000e+00   1.9000000e+00   2.3000000e+00   2.4000000e+00   4.0000000e-01   1.3000000e+00   2.4000000e+00   2.6000000e+00   4.5000000e+00   1.4000000e+00   7.0000000e-01   1.5000000e+00   3.5000000e+00   2.1000000e+00   1.2000000e+00   4.0000000e-01   1.9000000e+00   2.2000000e+00   1.8000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   1.6000000e+00   7.0000000e-01   1.1000000e+00   1.7000000e+00   8.0000000e-01   1.5000000e+00   2.2000000e+00   2.8000000e+00   4.3000000e+00   1.6000000e+00   9.0000000e-01   1.5000000e+00   3.3000000e+00   1.9000000e+00   1.0000000e+00   2.0000000e-01   1.7000000e+00   2.0000000e+00   1.6000000e+00   9.0000000e-01   2.4000000e+00   2.4000000e+00   1.4000000e+00   9.0000000e-01   9.0000000e-01   1.5000000e+00   4.0000000e-01   1.7000000e+00   1.7000000e+00   3.4000000e+00   1.0000000e-01   1.2000000e+00   1.2000000e+00   2.2000000e+00   1.0000000e+00   7.0000000e-01   1.7000000e+00   1.0000000e+00   9.0000000e-01   1.5000000e+00   1.4000000e+00   1.3000000e+00   1.3000000e+00   1.1000000e+00   1.2000000e+00   8.0000000e-01   1.2000000e+00   1.5000000e+00   1.0000000e+00   2.5000000e+00   1.8000000e+00   1.9000000e+00   1.9000000e+00   1.5000000e+00   2.3000000e+00   1.4000000e+00   2.4000000e+00   1.3000000e+00   1.6000000e+00   1.8000000e+00   2.7000000e+00   1.4000000e+00   1.8000000e+00   1.8000000e+00   2.5000000e+00   1.7000000e+00   2.5000000e+00   2.2000000e+00   1.9000000e+00   1.8000000e+00   2.5000000e+00   2.5000000e+00   9.0000000e-01   2.7000000e+00   2.0000000e+00   3.0000000e+00   1.7000000e+00   2.0000000e+00   2.2000000e+00   2.7000000e+00   1.6000000e+00   2.2000000e+00   2.2000000e+00   2.5000000e+00   2.1000000e+00   2.9000000e+00   2.8000000e+00   3.5000000e+00   4.4000000e+00   4.4000000e+00   1.6000000e+00   3.2000000e+00   3.3000000e+00   4.5000000e+00   2.8000000e+00   3.1000000e+00   3.3000000e+00   4.6000000e+00   2.5000000e+00   2.9000000e+00   3.5000000e+00   4.4000000e+00   3.4000000e+00   3.4000000e+00   4.3000000e+00   1.3000000e+00   1.3000000e+00   2.1000000e+00   9.0000000e-01   8.0000000e-01   1.8000000e+00   1.1000000e+00   8.0000000e-01   1.4000000e+00   1.5000000e+00   1.2000000e+00   1.2000000e+00   1.0000000e+00   1.3000000e+00   9.0000000e-01   1.1000000e+00   1.6000000e+00   1.0000000e+00   3.4000000e+00   2.0000000e+00   1.1000000e+00   1.1000000e+00   1.8000000e+00   2.1000000e+00   1.7000000e+00   1.0000000e+00   2.5000000e+00   2.5000000e+00   1.5000000e+00   8.0000000e-01   1.0000000e+00   1.8000000e+00   9.0000000e-01   3.4000000e+00   2.0000000e+00   1.3000000e+00   1.7000000e+00   2.2000000e+00   2.1000000e+00   2.7000000e+00   1.4000000e+00   2.5000000e+00   2.5000000e+00   2.3000000e+00   1.4000000e+00   1.8000000e+00   2.0000000e+00   1.5000000e+00   2.4000000e+00   2.5000000e+00   3.5000000e+00   1.8000000e+00   1.7000000e+00   1.9000000e+00   3.6000000e+00   1.3000000e+00   1.9000000e+00   1.9000000e+00   3.4000000e+00   2.4000000e+00   2.6000000e+00   3.3000000e+00   1.1000000e+00   2.1000000e+00   1.4000000e+00   7.0000000e-01   1.5000000e+00   2.2000000e+00   1.1000000e+00   7.0000000e-01   1.3000000e+00   2.0000000e+00   1.4000000e+00   4.0000000e-01   1.9000000e+00   1.2000000e+00   9.0000000e-01   1.0000000e+00   1.4000000e+00   1.5000000e+00   1.4000000e+00   1.4000000e+00   1.2000000e+00   1.3000000e+00   7.0000000e-01   1.1000000e+00   1.0000000e+00   1.9000000e+00   2.2000000e+00   1.8000000e+00   9.0000000e-01   2.6000000e+00   2.6000000e+00   1.6000000e+00   1.1000000e+00   1.1000000e+00   1.7000000e+00   4.0000000e-01   7.0000000e-01   5.0000000e-01   2.0000000e+00   9.0000000e-01   1.1000000e+00   7.0000000e-01   1.8000000e+00   8.0000000e-01   1.2000000e+00   1.7000000e+00   8.0000000e-01   2.3000000e+00   6.0000000e-01   4.0000000e-01   6.0000000e-01   2.1000000e+00   1.1000000e+00   1.1000000e+00   2.0000000e+00   1.9000000e+00   1.0000000e+00   1.2000000e+00   4.0000000e-01   1.7000000e+00   9.0000000e-01   1.3000000e+00   1.6000000e+00   2.7000000e+00   2.7000000e+00   1.7000000e+00   8.0000000e-01   1.2000000e+00   1.8000000e+00   5.0000000e-01   6.0000000e-01   1.0000000e+00   2.5000000e+00   1.5000000e+00   1.3000000e+00   2.4000000e+00   1.0000000e+00   2.5000000e+00   1.5000000e+00   1.1000000e+00   2.4000000e+00   1.5000000e+00   5.0000000e-01   1.1000000e+00   1.4000000e+00   1.0000000e+00   1.8000000e+00   1.1000000e+00   1.2000000e+00   9.0000000e-01   1.5000000e+00
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml.txt
new file mode 100644
index 00000000..8fb22e62
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cityblock-ml.txt
@@ -0,0 +1 @@
+   3.2420590e+01   3.3246607e+01   3.0526910e+01   3.5166573e+01   3.1868301e+01   3.6025002e+01   3.2513623e+01   3.6557796e+01   3.3752212e+01   3.4422130e+01   3.2526018e+01   3.2581161e+01   3.3743555e+01   3.6960777e+01   3.4225270e+01   3.2965308e+01   3.4591031e+01   3.4204203e+01   3.4678123e+01   3.5728720e+01   3.0830047e+01   3.1550681e+01   3.3304790e+01   3.2676753e+01   3.2742330e+01   3.1684556e+01   3.2830915e+01   3.2956614e+01   2.7365639e+01   3.3207307e+01   3.3420925e+01   3.4357941e+01   2.8280126e+01   3.4523458e+01   3.2705274e+01   3.2455891e+01   3.1636060e+01   3.1594957e+01   3.1805202e+01   3.3886574e+01   3.3438829e+01   3.3330030e+01   3.4168514e+01   3.0637353e+01   4.2149167e+01   3.6340559e+01   2.9315308e+01   3.5778314e+01   3.7693050e+01   3.2598714e+01   3.2990836e+01   3.4967659e+01   3.9748920e+01   3.6745043e+01   2.7117550e+01   3.6014760e+01   2.9367558e+01   3.3845350e+01   3.5477339e+01   3.1513372e+01   3.2517953e+01   2.4755097e+01   3.0229897e+01   3.4799343e+01   3.3371710e+01   2.9600910e+01   3.3275088e+01   3.3567110e+01   3.4527016e+01   3.4942320e+01   3.2359383e+01   3.2607100e+01   3.1467914e+01   2.9032039e+01   3.3122878e+01   2.8496709e+01   2.9908448e+01   2.9962886e+01   3.0345299e+01   3.1737613e+01   2.8551485e+01   3.2610551e+01   3.3082660e+01   3.3719298e+01   3.6434018e+01   3.6589278e+01   3.3889586e+01   3.8036774e+01   3.1483497e+01   3.4196794e+01   3.5154035e+01   3.5488608e+01   3.6143183e+01   3.3473491e+01   3.4686446e+01   2.8687495e+01   3.5725742e+01   3.0188298e+01   3.3084534e+01   3.3538519e+01   3.6226849e+01   2.9052099e+01   3.6032733e+01   3.0811503e+01   3.2616190e+01   3.3888566e+01   3.3074570e+01   2.9683515e+01   3.0600771e+01   3.4345247e+01   3.6983843e+01   3.3692824e+01   3.3762461e+01   3.4024582e+01   3.3698854e+01   3.1238613e+01   3.4978833e+01   3.4991078e+01   3.4577741e+01   3.3749227e+01   3.4982272e+01   3.0487868e+01   3.2317632e+01   3.1125588e+01   3.4413791e+01   3.1881871e+01   3.1373821e+01   3.0416864e+01   3.2066187e+01   3.1128313e+01   3.0240249e+01   3.0125198e+01   3.1343454e+01   3.5479092e+01   3.4450767e+01   3.2953507e+01   3.4456795e+01   3.0136375e+01   3.3462150e+01   2.9894274e+01   3.1367432e+01   3.2839320e+01   3.1440398e+01   2.9400374e+01   3.1106338e+01   3.1242624e+01   3.5537892e+01   3.3056459e+01   2.8610281e+01   3.4296217e+01   3.5819772e+01   3.2503922e+01   3.0963029e+01   3.4762112e+01   3.4796284e+01   2.9645345e+01   3.4468088e+01   2.6975590e+01   3.3738555e+01   2.8825009e+01   3.2663999e+01   3.2547878e+01   3.2308091e+01   3.2489966e+01   3.0868597e+01   3.2974220e+01   3.0866111e+01   3.8197342e+01   3.0609568e+01   3.5478978e+01   2.9249184e+01   3.6185622e+01   3.1948258e+01   3.2649719e+01   3.3305650e+01   3.4643955e+01   3.6566241e+01   3.4968484e+01   3.2632218e+01   3.6741383e+01   3.5700008e+01   3.1962468e+01   3.1410623e+01   3.0412061e+01   3.3749077e+01   3.5649661e+01   3.7649263e+01   3.2832574e+01   3.1783914e+01   2.8264292e+01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt
new file mode 100644
index 00000000..f2975003
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt
@@ -0,0 +1 @@
+   4.0013388e-03   2.6088954e-05   1.8315482e-03   6.5266850e-04   4.1394685e-04   1.1888069e-03   4.6185289e-04   1.9233577e-03   3.4480388e-03   1.5150632e-05   1.9126718e-03   3.0974734e-03   2.2295833e-04   2.4043394e-03   5.0134320e-03   3.0165570e-03   1.3145239e-04   6.0759419e-04   1.6672981e-03   4.0036132e-03   6.1375191e-04   8.5916540e-03   3.0212269e-03   8.6923503e-03   7.7875235e-03   5.1612907e-04   2.9662451e-04   6.2402983e-04   2.7278440e-03   4.0510347e-03   3.0027154e-03   6.2616145e-03   4.1342211e-03   3.4480388e-03   1.5822510e-03   1.7143312e-03   3.4480388e-03   2.2462074e-04   6.1048465e-04   6.5190641e-04   2.4247873e-02   9.0785596e-04   2.1652052e-04   3.4845573e-03   3.2507646e-03   2.3346511e-03   4.0773355e-04   1.1278223e-04   5.0819669e-04   2.1340893e-01   2.1253858e-01   2.5193073e-01   2.9479565e-01   2.6774348e-01   2.8869785e-01   2.3348217e-01   1.9273490e-01   2.4443270e-01   2.4320510e-01   2.7679421e-01   2.1672263e-01   2.6813840e-01   2.8435705e-01   1.5561363e-01   2.0057173e-01   2.7812139e-01   2.2900256e-01   3.4724680e-01   2.3882260e-01   2.9132931e-01   2.0333645e-01   3.5307051e-01   2.8812452e-01   2.1722530e-01   2.1423111e-01   2.7396952e-01   2.9207940e-01   2.6626182e-01   1.7106032e-01   2.4279706e-01   2.2559055e-01   2.0940857e-01   3.8432412e-01   2.9354670e-01   2.0829958e-01   2.3669414e-01   3.0463326e-01   2.1035851e-01   2.6623117e-01   3.0835417e-01   2.5871089e-01   2.3465249e-01   2.0319416e-01   2.6292582e-01   2.1771735e-01   2.3212816e-01   2.2399387e-01   1.3799316e-01   2.3049526e-01   4.8512087e-01   4.2535066e-01   4.0184471e-01   4.1903049e-01   4.4627199e-01   4.4692268e-01   4.3569888e-01   4.2673251e-01   4.5731950e-01   3.7438176e-01   3.0619251e-01   4.0039114e-01   3.7245195e-01   4.5829878e-01   4.5814844e-01   3.6107062e-01   3.7600936e-01   3.7662883e-01   5.2492832e-01   4.2684428e-01   3.7975064e-01   4.0636707e-01   4.6364339e-01   3.4607190e-01   3.6988036e-01   3.6764668e-01   3.2524634e-01   3.1943549e-01   4.4481193e-01   3.5496498e-01   4.1356534e-01   3.2082320e-01   4.5322964e-01   3.4300770e-01   4.4485158e-01   3.9755578e-01   3.9702418e-01   3.7202285e-01   3.1131344e-01   3.4018064e-01   4.0217537e-01   3.1441868e-01   4.2535066e-01   4.1533176e-01   3.9695242e-01   3.5313531e-01   3.9400199e-01   3.4652657e-01   3.6608320e-01   3.6684161e-01   3.3929143e-03   2.6033698e-03   7.7673212e-03   6.4081099e-03   9.2794464e-03   2.8819447e-03   1.4536586e-03   9.6714455e-04   3.7992387e-03   5.9342609e-03   3.9974031e-04   6.0694735e-03   9.1304628e-03   1.7655983e-02   1.1643899e-02   4.0363794e-03   1.6463709e-03   1.0706739e-02   6.7984475e-04   7.6845878e-03   2.3516587e-02   9.9502337e-05   1.0315881e-02   1.0821735e-03   1.8887942e-03   2.4624674e-03   1.5760536e-03   3.6638868e-03   1.6253664e-03   7.8762517e-04   1.9487010e-02   1.6211862e-02   9.6714455e-04   2.2382105e-03   2.1712385e-03   9.6714455e-04   2.9674185e-03   1.9068589e-03   6.4555509e-03   8.8254342e-03   8.1777355e-03   3.4663084e-03   9.6481454e-03   9.7747764e-05   1.0706793e-02   4.2246850e-03   4.9836128e-03   1.6613867e-03   1.6856078e-01   1.6930583e-01   2.0381801e-01   2.4171317e-01   2.1689289e-01   2.4212069e-01   1.9027913e-01   1.5127382e-01   1.9696970e-01   1.9830901e-01   2.2503195e-01   1.7290786e-01   2.1618942e-01   2.3648275e-01   1.1720113e-01   1.5636322e-01   2.3357633e-01   1.8548772e-01   2.8791738e-01   1.9215793e-01   2.4470933e-01   1.5850128e-01   2.9662484e-01   2.4061109e-01   1.7172858e-01   1.6853658e-01   2.2283143e-01   2.4032537e-01   2.1849821e-01   1.2975740e-01   1.9502432e-01   1.7953012e-01   1.6504306e-01   3.2966120e-01   2.4985160e-01   1.6946778e-01   1.8991741e-01   2.4919698e-01   1.7046257e-01   2.1691882e-01   2.6018338e-01   2.1310883e-01   1.8776864e-01   1.5909082e-01   2.1620321e-01   1.7782256e-01   1.8911127e-01   1.7894094e-01   9.9649433e-02   1.8605378e-01   4.2702260e-01   3.6742642e-01   3.4284735e-01   3.6351462e-01   3.8703088e-01   3.8643219e-01   3.8033312e-01   3.6849688e-01   3.9561383e-01   3.1894438e-01   2.5437467e-01   3.4129756e-01   3.1464030e-01   3.9617829e-01   3.9513061e-01   3.0506617e-01   3.2181902e-01   3.2465822e-01   4.5849285e-01   3.6615509e-01   3.2201598e-01   3.4969244e-01   4.0175045e-01   2.8934891e-01   3.1614551e-01   3.1385643e-01   2.7059114e-01   2.6798818e-01   3.8429703e-01   3.0087709e-01   3.5349775e-01   2.7097983e-01   3.9156246e-01   2.9045741e-01   3.8972428e-01   3.3603762e-01   3.4254663e-01   3.1960575e-01   2.6050327e-01   2.8406500e-01   3.4225008e-01   2.5735736e-01   3.6742642e-01   3.5724408e-01   3.3861338e-01   2.9412113e-01   3.3288556e-01   2.9101723e-01   3.1374321e-01   3.1516519e-01   1.6665208e-03   9.3886805e-04   6.2270349e-04   1.5623211e-03   3.9549141e-04   1.6439076e-03   3.0144044e-03   3.0583810e-05   2.0234943e-03   2.6246966e-03   3.8983492e-04   2.5645160e-03   5.6944285e-03   3.3339055e-03   1.2831328e-04   4.1302346e-04   2.1101667e-03   3.4972086e-03   8.7482704e-04   9.4271115e-03   2.5080125e-03   8.7042936e-03   7.0125369e-03   3.5088415e-04   1.9451019e-04   3.9574419e-04   2.5986219e-03   3.6402032e-03   2.4900748e-03   7.0784673e-03   4.7935149e-03   3.0144044e-03   1.2869381e-03   1.4017897e-03   3.0144044e-03   1.7197161e-04   4.4525534e-04   7.8138074e-04   2.2693053e-02   1.2229281e-03   1.5754704e-04   3.7899670e-03   2.6957902e-03   2.7721274e-03   4.5733371e-04   2.2324575e-04   3.1003560e-04   2.1002397e-01   2.0931874e-01   2.4834042e-01   2.9081912e-01   2.6391644e-01   2.8536997e-01   2.3033428e-01   1.8962461e-01   2.4088505e-01   2.3991724e-01   2.7290008e-01   2.1345771e-01   2.6419304e-01   2.8088883e-01   1.5266518e-01   1.9720348e-01   2.7496549e-01   2.2580939e-01   3.4275207e-01   2.3533918e-01   2.8800391e-01   1.9991219e-01   3.4890690e-01   2.8470246e-01   2.1378625e-01   2.1075909e-01   2.7013290e-01   2.8823552e-01   2.6275308e-01   1.6787442e-01   2.3921107e-01   2.2212337e-01   2.0605918e-01   3.8041788e-01   2.9051019e-01   2.0550537e-01   2.3319159e-01   3.0043218e-01   2.0746652e-01   2.6256228e-01   3.0491843e-01   2.5539836e-01   2.3113139e-01   1.9984797e-01   2.5951290e-01   2.1484809e-01   2.2899468e-01   2.2062653e-01   1.3495710e-01   2.2721304e-01   4.8106467e-01   4.2120214e-01   3.9753504e-01   4.1511036e-01   4.4203186e-01   4.4255637e-01   4.3182495e-01   4.2255527e-01   4.5284888e-01   3.7037516e-01   3.0238339e-01   3.9606802e-01   3.6819494e-01   4.5378706e-01   4.5354234e-01   3.5697379e-01   3.7213234e-01   3.7297305e-01   5.2009409e-01   4.2241474e-01   3.7552000e-01   4.0230555e-01   4.5916611e-01   3.4185974e-01   3.6603540e-01   3.6379117e-01   3.2119479e-01   3.1570003e-01   4.4043880e-01   3.5104995e-01   4.0917086e-01   3.1725229e-01   4.4875464e-01   3.3921954e-01   4.4101743e-01   3.9296628e-01   3.9316311e-01   3.6831351e-01   3.0762140e-01   3.3601664e-01   3.9776902e-01   3.1006892e-01   4.2120214e-01   4.1114584e-01   3.9269988e-01   3.4869458e-01   3.8944692e-01   3.4244384e-01   3.6236872e-01   3.6319420e-01   3.2811792e-03   2.1674206e-03   3.8606330e-03   4.5444049e-04   1.6669051e-04   6.9315236e-04   1.5191179e-03   7.4896915e-04   1.0486334e-03   3.0115188e-03   8.3553530e-03   1.1528814e-02   9.5172421e-03   2.7099731e-03   7.1677618e-04   4.9455548e-03   1.1260396e-03   4.0113810e-03   1.7041109e-02   1.7048436e-03   3.2998306e-03   3.5839458e-03   6.5708756e-04   7.5073414e-04   1.6739794e-03   1.4404874e-04   6.5489426e-04   3.9918560e-03   9.7678136e-03   9.5698494e-03   6.9315236e-04   4.1051921e-03   4.2098821e-03   6.9315236e-04   7.7852178e-04   5.0066998e-04   4.6641147e-03   1.9877450e-02   2.9999880e-03   2.6696154e-03   2.3124511e-03   2.6940762e-03   3.6188953e-03   7.8131154e-04   1.7395433e-03   1.1236329e-03   1.8068407e-01   1.7911784e-01   2.1632614e-01   2.5732132e-01   2.3189913e-01   2.4898678e-01   1.9775994e-01   1.6095697e-01   2.0933838e-01   2.0710048e-01   2.4048237e-01   1.8306865e-01   2.3298507e-01   2.4543409e-01   1.2760114e-01   1.6921167e-01   2.3873595e-01   1.9385405e-01   3.0859675e-01   2.0396213e-01   2.5141439e-01   1.7197884e-01   3.1193260e-01   2.4875015e-01   1.8437187e-01   1.8189435e-01   2.3759181e-01   2.5412614e-01   2.2898904e-01   1.4232827e-01   2.0806035e-01   1.9199138e-01   1.7693745e-01   3.4002004e-01   2.5277786e-01   1.7384453e-01   2.0213849e-01   2.6767804e-01   1.7597754e-01   2.2968366e-01   2.6752309e-01   2.2134041e-01   2.0039734e-01   1.7140536e-01   2.2556075e-01   1.8258974e-01   1.9648053e-01   1.9006393e-01   1.1321036e-01   1.9554999e-01   4.3600761e-01   3.7954719e-01   3.5812457e-01   3.7278397e-01   3.9968201e-01   4.0081518e-01   3.8843945e-01   3.8096257e-01   4.1111272e-01   3.3107441e-01   2.6691137e-01   3.5682832e-01   3.3041468e-01   4.1223120e-01   4.1255773e-01   3.1903974e-01   3.3211211e-01   3.3199965e-01   4.7707633e-01   3.8217339e-01   3.3708398e-01   3.6131644e-01   4.1712805e-01   3.0571403e-01   3.2625308e-01   3.2419755e-01   2.8563873e-01   2.7883266e-01   3.9884997e-01   3.1256889e-01   3.6952768e-01   2.7953122e-01   4.0726631e-01   3.0093515e-01   3.9702583e-01   3.5566883e-01   3.5181973e-01   3.2782384e-01   2.7114516e-01   3.0000941e-01   3.5891976e-01   2.7762255e-01   3.7954719e-01   3.7024637e-01   3.5327118e-01   3.1362016e-01   3.5214869e-01   3.0546169e-01   3.2226324e-01   3.2277503e-01   1.1668032e-04   8.6044327e-05   1.4968429e-03   3.9691382e-03   6.2388252e-03   7.0588028e-04   1.9691519e-03   6.1279520e-03   1.9660913e-04   2.5761274e-03   2.5168387e-03   2.4029967e-03   9.7429318e-04   2.3122381e-03   2.3682626e-04   7.1643085e-03   1.3128642e-04   5.3939078e-03   6.2992904e-03   9.0353935e-03   1.2266741e-02   2.0706893e-03   1.5408774e-03   2.5522607e-03   3.9522692e-03   6.6899152e-03   6.3980861e-03   2.9039306e-03   1.6942568e-03   6.2388252e-03   3.9336207e-03   4.1533642e-03   6.2388252e-03   1.2180733e-03   2.1518176e-03   8.8270219e-04   3.2743785e-02   7.7572782e-05   1.3417853e-03   2.5322339e-03   6.7844678e-03   8.2761566e-04   8.6236157e-04   3.1792685e-04   2.2579028e-03   2.2976852e-01   2.2800773e-01   2.6917639e-01   3.1391346e-01   2.8619117e-01   3.0434544e-01   2.4844431e-01   2.0773635e-01   2.6148944e-01   2.5886513e-01   2.9555853e-01   2.3241008e-01   2.8723433e-01   3.0077518e-01   1.6999944e-01   2.1692490e-01   2.9290302e-01   2.4423212e-01   3.6894940e-01   2.5556289e-01   3.0695160e-01   2.1997775e-01   3.7292526e-01   3.0427758e-01   2.3385552e-01   2.3106102e-01   2.9243468e-01   3.1048744e-01   2.8298832e-01   1.8662712e-01   2.6007204e-01   2.4232174e-01   2.2560003e-01   4.0265980e-01   3.0762748e-01   2.2151447e-01   2.5355085e-01   3.2493368e-01   2.2408199e-01   2.8382455e-01   3.2448802e-01   2.7442187e-01   2.5162228e-01   2.1940897e-01   2.7915380e-01   2.3127910e-01   2.4702051e-01   2.4019206e-01   1.5301315e-01   2.4619533e-01   5.0391216e-01   4.4483392e-01   4.2228707e-01   4.3731273e-01   4.6617394e-01   4.6750253e-01   4.5367757e-01   4.4636521e-01   4.7842690e-01   3.9329580e-01   3.2434154e-01   4.2091234e-01   3.9272980e-01   4.7962551e-01   4.7998875e-01   3.8052757e-01   3.9422077e-01   3.9365636e-01   5.4778535e-01   4.4784024e-01   3.9985415e-01   4.2545240e-01   4.8476479e-01   3.6622351e-01   3.8794414e-01   3.8577592e-01   3.4462011e-01   3.3712507e-01   4.6543611e-01   3.7346604e-01   4.3442200e-01   3.3762412e-01   4.7437523e-01   3.6087712e-01   4.6258851e-01   4.1954525e-01   4.1507031e-01   3.8935328e-01   3.2880662e-01   3.6009673e-01   4.2314218e-01   3.3549029e-01   4.4483392e-01   4.3505302e-01   4.1710447e-01   3.7450914e-01   4.1581746e-01   3.6597096e-01   3.8346411e-01   3.8386195e-01   2.7739415e-04   8.2117467e-04   2.7843462e-03   4.7394226e-03   3.9365385e-04   1.1964598e-03   4.7400628e-03   2.5527396e-04   3.2634446e-03   3.7103657e-03   3.3188195e-03   8.6302611e-04   1.5635411e-03   6.0189508e-04   5.5859876e-03   3.8282951e-04   7.0925635e-03   5.0273924e-03   7.3470160e-03   1.0223636e-02   1.3503463e-03   9.3049535e-04   1.9663208e-03   2.7155903e-03   5.0798223e-03   5.5875952e-03   3.5987384e-03   2.6550151e-03   4.7394226e-03   3.5923878e-03   3.7937786e-03   4.7394226e-03   6.6480476e-04   1.3814035e-03   1.1581699e-03   3.0091048e-02   1.0888067e-04   1.1634967e-03   1.9052023e-03   5.6332034e-03   7.9034466e-04   3.5005887e-04   1.0179107e-04   1.6076683e-03   2.2018795e-01   2.1841167e-01   2.5888963e-01   3.0298911e-01   2.7568827e-01   2.9345793e-01   2.3845526e-01   1.9853879e-01   2.5133209e-01   2.4870042e-01   2.8491736e-01   2.2273608e-01   2.7678086e-01   2.8993699e-01   1.6165333e-01   2.0762176e-01   2.8220337e-01   2.3432093e-01   3.5743296e-01   2.4549712e-01   2.9602684e-01   2.1063470e-01   3.6116406e-01   2.9338620e-01   2.2421094e-01   2.2149308e-01   2.8182182e-01   2.9956540e-01   2.7243830e-01   1.7796817e-01   2.4995634e-01   2.3251210e-01   2.1609445e-01   3.9047864e-01   2.9675065e-01   2.1202908e-01   2.4353030e-01   3.1395037e-01   2.1453916e-01   2.7329907e-01   3.1330772e-01   2.6399518e-01   2.4164659e-01   2.1003834e-01   2.6865531e-01   2.2160867e-01   2.3705638e-01   2.3039039e-01   1.4523591e-01   2.3625874e-01   4.9071957e-01   4.3219633e-01   4.0993016e-01   4.2475440e-01   4.5332327e-01   4.5465382e-01   4.4096188e-01   4.3371359e-01   4.6548638e-01   3.8123224e-01   3.1318896e-01   4.0857587e-01   3.8073064e-01   4.6668253e-01   4.6707002e-01   3.6864305e-01   3.8213773e-01   3.8159377e-01   5.3428461e-01   4.3521869e-01   3.8775353e-01   4.1301893e-01   4.7176152e-01   3.5457739e-01   3.7593537e-01   3.7379344e-01   3.3323181e-01   3.2577190e-01   4.5261005e-01   3.6164142e-01   4.2194514e-01   3.2625778e-01   4.6147754e-01   3.4920516e-01   4.4979521e-01   4.0734174e-01   4.0275102e-01   3.7733353e-01   3.1756791e-01   3.4852048e-01   4.1080565e-01   3.2443308e-01   4.3219633e-01   4.2252463e-01   4.0479526e-01   3.6286403e-01   4.0364435e-01   3.5428112e-01   3.7151258e-01   3.7191203e-01   2.0478784e-03   4.7860280e-03   7.2727783e-03   1.2329906e-03   2.0550268e-03   7.3066158e-03   5.3810576e-04   3.2245377e-03   2.1674888e-03   2.7856851e-03   1.6387773e-03   3.1323423e-03   6.7307381e-05   8.3332066e-03   3.3954200e-04   4.9119026e-03   7.6276175e-03   8.9240504e-03   1.3851706e-02   2.8347122e-03   2.2069601e-03   3.5278340e-03   4.3892066e-03   7.6179982e-03   7.9398397e-03   2.0003115e-03   1.2885024e-03   7.2727783e-03   5.1809110e-03   5.4327573e-03   7.2727783e-03   1.7938257e-03   2.8924302e-03   1.4132511e-03   3.5877316e-02   5.5425651e-05   2.1070391e-03   2.2246722e-03   8.2675293e-03   4.8309816e-04   1.2152730e-03   6.6498554e-04   3.1323983e-03   2.3387083e-01   2.3171300e-01   2.7340823e-01   3.1873143e-01   2.9087099e-01   3.0765106e-01   2.5178166e-01   2.1138210e-01   2.6568600e-01   2.6244481e-01   3.0032621e-01   2.3618270e-01   2.9221666e-01   3.0443867e-01   1.7368514e-01   2.2112593e-01   2.9589691e-01   2.4771596e-01   3.7467992e-01   2.5965416e-01   3.1023215e-01   2.2429067e-01   3.7775475e-01   3.0780450e-01   2.3805322e-01   2.3537447e-01   2.9708156e-01   3.1499475e-01   2.8689460e-01   1.9071775e-01   2.6437980e-01   2.4650321e-01   2.2965596e-01   4.0666307e-01   3.1024566e-01   2.2426781e-01   2.5770992e-01   3.3024819e-01   2.2703835e-01   2.8812097e-01   3.2789845e-01   2.7792695e-01   2.5584753e-01   2.2352457e-01   2.8285984e-01   2.3411709e-01   2.5033634e-01   2.4414228e-01   1.5718330e-01   2.4987685e-01   5.0772777e-01   4.4916856e-01   4.2715006e-01   4.4114868e-01   4.7061118e-01   4.7223859e-01   4.5731921e-01   4.5076024e-01   4.8335950e-01   3.9759816e-01   3.2864932e-01   4.2581769e-01   3.9765561e-01   4.8465345e-01   4.8525127e-01   3.8513607e-01   3.9820652e-01   3.9712803e-01   5.5326906e-01   4.5284577e-01   4.0466523e-01   4.2968979e-01   4.8967945e-01   3.7122633e-01   3.9189300e-01   3.8976354e-01   3.4937792e-01   3.4115590e-01   4.7020033e-01   3.7767688e-01   4.3942138e-01   3.4125876e-01   4.7934102e-01   3.6486747e-01   4.6609349e-01   4.2514437e-01   4.1889348e-01   3.9297481e-01   3.3279384e-01   3.6502235e-01   4.2824304e-01   3.4111505e-01   4.4916856e-01   4.3953404e-01   4.2185806e-01   3.8004789e-01   4.2135185e-01   3.7064704e-01   3.8713375e-01   3.8737322e-01   5.9378937e-04   1.6263483e-03   3.1194349e-04   8.5089275e-04   1.6365846e-03   1.1579874e-03   4.9430863e-03   7.7957878e-03   5.8209267e-03   9.7423596e-04   2.1559031e-04   2.8280232e-03   2.1261057e-03   1.8496545e-03   1.2342594e-02   1.9347552e-03   5.4995961e-03   5.2624400e-03   1.3773080e-04   7.1496401e-05   7.1145768e-04   9.6706058e-04   1.9028496e-03   3.0842001e-03   7.5087003e-03   6.3709632e-03   1.6263483e-03   2.4219636e-03   2.5416684e-03   1.6263483e-03   4.5881830e-05   1.0508341e-04   2.2101780e-03   2.1711060e-02   1.4779987e-03   1.0004664e-03   2.4029906e-03   2.5527616e-03   2.4859397e-03   1.2918144e-04   4.6388898e-04   3.7292268e-04   1.9674800e-01   1.9551090e-01   2.3384591e-01   2.7581575e-01   2.4956432e-01   2.6852776e-01   2.1529223e-01   1.7652116e-01   2.2659936e-01   2.2483764e-01   2.5838801e-01   1.9958250e-01   2.5031938e-01   2.6459593e-01   1.4127677e-01   1.8459488e-01   2.5809699e-01   2.1110498e-01   3.2773890e-01   2.2109976e-01   2.7105695e-01   1.8736680e-01   3.3227725e-01   2.6813261e-01   2.0050552e-01   1.9777445e-01   2.5552383e-01   2.7284453e-01   2.4733170e-01   1.5638593e-01   2.2514733e-01   2.0849921e-01   1.9287136e-01   3.6191982e-01   2.7281863e-01   1.9071038e-01   2.1912633e-01   2.8593968e-01   1.9281596e-01   2.4768186e-01   2.8763479e-01   2.3971138e-01   2.1723753e-01   1.8699948e-01   2.4393940e-01   1.9979806e-01   2.1397418e-01   2.0672587e-01   1.2529166e-01   2.1270878e-01   4.6033946e-01   4.0222602e-01   3.7977808e-01   3.9565939e-01   4.2276626e-01   4.2367223e-01   4.1181849e-01   4.0362790e-01   4.3403229e-01   3.5248619e-01   2.8628855e-01   3.7840466e-01   3.5121985e-01   4.3508597e-01   4.3518504e-01   3.3982310e-01   3.5380487e-01   3.5403534e-01   5.0086936e-01   4.0431760e-01   3.5820087e-01   3.8360776e-01   4.4020334e-01   3.2567463e-01   3.4780712e-01   3.4566354e-01   3.0520609e-01   2.9886193e-01   4.2163480e-01   3.3351442e-01   3.9135037e-01   2.9988740e-01   4.3006423e-01   3.2170490e-01   4.2068520e-01   3.7643926e-01   3.7416586e-01   3.4964963e-01   2.9095264e-01   3.1987117e-01   3.8035231e-01   2.9582266e-01   4.0222602e-01   3.9256940e-01   3.7489846e-01   3.3318735e-01   3.7289799e-01   3.2576051e-01   3.4389968e-01   3.4452790e-01   2.6022528e-04   1.6357171e-03   1.5776794e-03   3.8821876e-04   3.3383101e-03   8.1348232e-03   1.2673676e-02   9.6454978e-03   2.5951087e-03   4.7710550e-04   5.9640558e-03   5.0935416e-04   4.5005321e-03   1.8295131e-02   8.0881241e-04   4.5548809e-03   2.4342161e-03   4.9437559e-04   7.1285200e-04   1.1827453e-03   5.3640347e-04   3.9274104e-04   2.7126384e-03   1.1795471e-02   1.0834355e-02   2.6022528e-04   3.1798475e-03   3.2407554e-03   2.6022528e-04   8.6271302e-04   3.7982619e-04   4.6816553e-03   1.6563494e-02   3.8583498e-03   2.4190920e-03   3.6876972e-03   1.5540748e-03   4.9548680e-03   1.1822578e-03   2.1020911e-03   7.8943086e-04   1.7690932e-01   1.7592254e-01   2.1244374e-01   2.5265403e-01   2.2738778e-01   2.4651370e-01   1.9514374e-01   1.5780110e-01   2.0549282e-01   2.0415245e-01   2.3585798e-01   1.7978451e-01   2.2802648e-01   2.4243736e-01   1.2428265e-01   1.6526016e-01   2.3669421e-01   1.9101658e-01   3.0265561e-01   2.0025949e-01   2.4898144e-01   1.6786898e-01   3.0733331e-01   2.4595698e-01   1.8046568e-01   1.7781045e-01   2.3314059e-01   2.4991164e-01   2.2560913e-01   1.3845901e-01   2.0404813e-01   1.8812805e-01   1.7322140e-01   3.3666075e-01   2.5129679e-01   1.7201674e-01   1.9833201e-01   2.6229128e-01   1.7386253e-01   2.2573363e-01   2.6492814e-01   2.1852944e-01   1.9648937e-01   1.6758641e-01   2.2246603e-01   1.8066071e-01   1.9389274e-01   1.8653629e-01   1.0911288e-01   1.9242842e-01   4.3299781e-01   3.7574545e-01   3.5353088e-01   3.6969868e-01   3.9574789e-01   3.9644649e-01   3.8564737e-01   3.7707427e-01   4.0646475e-01   3.2727058e-01   2.6301141e-01   3.5217112e-01   3.2569733e-01   4.0744443e-01   4.0742674e-01   3.1477415e-01   3.2876954e-01   3.2939944e-01   4.7166141e-01   3.7739415e-01   3.3254323e-01   3.5763917e-01   4.1251084e-01   3.0085067e-01   3.2295735e-01   3.2084322e-01   2.8110727e-01   2.7535557e-01   3.9443856e-01   3.0887601e-01   3.6474538e-01   2.7663010e-01   4.0256676e-01   2.9754793e-01   3.9443683e-01   3.4998590e-01   3.4873324e-01   3.2500314e-01   2.6771990e-01   2.9525187e-01   3.5397733e-01   2.7178914e-01   3.7574545e-01   3.6622316e-01   3.4883278e-01   3.0797294e-01   3.4655783e-01   3.0107914e-01   3.1936679e-01   3.2010756e-01   3.0868881e-03   2.8691382e-03   1.3643967e-04   5.3429196e-03   1.0581034e-02   1.6459895e-02   1.2551534e-02   4.1576220e-03   1.2160579e-03   8.7064401e-03   5.4418849e-05   6.8214146e-03   2.2732626e-02   5.6586090e-04   4.9831593e-03   1.1314391e-03   1.3090644e-03   1.7262411e-03   1.9770194e-03   1.0389613e-03   9.3641634e-05   2.8067647e-03   1.5471006e-02   1.4411876e-02   0.0000000e+00   4.0251529e-03   4.0402030e-03   0.0000000e+00   2.0104773e-03   1.1579294e-03   6.7733512e-03   1.3328703e-02   6.1195429e-03   3.8280621e-03   5.4471697e-03   1.3203495e-03   7.3930866e-03   2.5511055e-03   3.8011014e-03   1.5935161e-03   1.6488937e-01   1.6420208e-01   1.9946134e-01   2.3841307e-01   2.1377841e-01   2.3352484e-01   1.8323739e-01   1.4660873e-01   1.9269692e-01   1.9183992e-01   2.2200730e-01   1.6791595e-01   2.1423078e-01   2.2922867e-01   1.1407468e-01   1.5349471e-01   2.2418179e-01   1.7908948e-01   2.8692621e-01   1.8765985e-01   2.3596576e-01   1.5596498e-01   2.9203641e-01   2.3278981e-01   1.6829150e-01   1.6563525e-01   2.1942293e-01   2.3592536e-01   2.1256421e-01   1.2755291e-01   1.9121342e-01   1.7576718e-01   1.6132929e-01   3.2148865e-01   2.3885357e-01   1.6118125e-01   1.8573297e-01   2.4757051e-01   1.6279862e-01   2.1240660e-01   2.5149161e-01   2.0595435e-01   1.8389163e-01   1.5580881e-01   2.0964365e-01   1.6953436e-01   1.8203376e-01   1.7437090e-01   9.9184065e-02   1.8031354e-01   4.1664204e-01   3.5971944e-01   3.3744612e-01   3.5416868e-01   3.7936058e-01   3.7982315e-01   3.7006184e-01   3.6098196e-01   3.8956200e-01   3.1201251e-01   2.4889912e-01   3.3607844e-01   3.1002022e-01   3.9046127e-01   3.9028467e-01   2.9949936e-01   3.1373736e-01   3.1479561e-01   4.5355800e-01   3.6085089e-01   3.1682959e-01   3.4195608e-01   3.9553949e-01   2.8555816e-01   3.0804941e-01   3.0593834e-01   2.6633772e-01   2.6121338e-01   3.7782244e-01   2.9399549e-01   3.4839508e-01   2.6278429e-01   3.8569374e-01   2.8303600e-01   3.7885421e-01   3.3349568e-01   3.3352424e-01   3.1033775e-01   2.5375578e-01   2.8011018e-01   3.3772574e-01   2.5671985e-01   3.5971944e-01   3.5022311e-01   3.3289784e-01   2.9224130e-01   3.3016010e-01   2.8599652e-01   3.0475125e-01   3.0561759e-01   1.6232219e-03   2.8110674e-03   3.0992704e-04   2.8009412e-03   5.3859157e-03   3.4428307e-03   2.2389965e-04   4.8865483e-04   1.7600776e-03   3.6417064e-03   7.4576509e-04   9.1296419e-03   2.8123816e-03   8.0068765e-03   7.3379880e-03   3.9579356e-04   1.9713653e-04   6.0194434e-04   2.3367622e-03   3.6239908e-03   3.0185118e-03   6.3202257e-03   4.4046298e-03   3.0868881e-03   1.7141363e-03   1.8461990e-03   3.0868881e-03   1.2729358e-04   4.6571753e-04   8.6393185e-04   2.3903803e-02   9.0441478e-04   3.0528309e-04   3.1648190e-03   3.1223857e-03   2.2339929e-03   2.7724170e-04   9.5102356e-05   4.3913729e-04   2.1061066e-01   2.0964712e-01   2.4888926e-01   2.9163776e-01   2.6471732e-01   2.8522189e-01   2.3035873e-01   1.8998369e-01   2.4143327e-01   2.4006731e-01   2.7373417e-01   2.1381600e-01   2.6519773e-01   2.8097864e-01   1.5319882e-01   1.9790190e-01   2.7464921e-01   2.2594124e-01   3.4406181e-01   2.3583526e-01   2.8783351e-01   2.0067586e-01   3.4959337e-01   2.8469567e-01   2.1442337e-01   2.1148410e-01   2.7089383e-01   2.8885413e-01   2.6304757e-01   1.6861411e-01   2.3983808e-01   2.2272157e-01   2.0662897e-01   3.8050402e-01   2.8992390e-01   2.0524163e-01   2.3373909e-01   3.0156183e-01   2.0732304e-01   2.6311208e-01   3.0478863e-01   2.5545595e-01   2.3172924e-01   2.0047959e-01   2.5968741e-01   2.1460680e-01   2.2900957e-01   2.2107714e-01   1.3590375e-01   2.2746759e-01   4.8087096e-01   4.2142796e-01   3.9814594e-01   4.1502871e-01   4.4228995e-01   4.4300705e-01   4.3159396e-01   4.2281768e-01   4.5341178e-01   3.7067256e-01   3.0283474e-01   3.9670954e-01   3.6890449e-01   4.5441105e-01   4.5432159e-01   3.5749751e-01   3.7222314e-01   3.7273757e-01   5.2092654e-01   4.2307513e-01   3.7613897e-01   4.0250144e-01   4.5970761e-01   3.4267709e-01   3.6611458e-01   3.6389952e-01   3.2189682e-01   3.1593975e-01   4.4091119e-01   3.5132753e-01   4.0985044e-01   3.1723537e-01   4.4934566e-01   3.3938050e-01   4.4068685e-01   3.9407776e-01   3.9311145e-01   3.6818097e-01   3.0785214e-01   3.3679573e-01   3.9853670e-01   3.1138703e-01   4.2142796e-01   4.1148317e-01   3.9324794e-01   3.4985868e-01   3.9052142e-01   3.4304315e-01   3.6227810e-01   3.6300235e-01   3.5297445e-03   2.3993465e-03   8.1469449e-03   8.4116551e-03   8.4748907e-03   3.0443320e-03   1.8587915e-03   2.8140158e-03   3.7033592e-03   2.9317197e-03   1.3265454e-02   4.5015236e-03   2.6265400e-03   7.5415562e-03   1.6353241e-03   1.4046892e-03   3.1172532e-03   6.0159720e-04   2.6265400e-03   7.0517547e-03   5.5270478e-03   6.4518235e-03   2.8691382e-03   6.1014438e-03   6.3013427e-03   2.8691382e-03   1.1615614e-03   1.4498289e-03   4.4069463e-03   2.8268404e-02   1.4610903e-03   3.3082950e-03   4.5136771e-04   5.8170191e-03   1.2876475e-03   5.5964987e-04   1.3152056e-03   2.3446691e-03   1.9624436e-01   1.9365486e-01   2.3272539e-01   2.7570910e-01   2.4962218e-01   2.6341222e-01   2.1161968e-01   1.7505868e-01   2.2555325e-01   2.2171510e-01   2.5853430e-01   1.9783603e-01   2.5146360e-01   2.6075203e-01   1.4121466e-01   1.8483453e-01   2.5221719e-01   2.0803875e-01   3.2980663e-01   2.1983733e-01   2.6580206e-01   1.8792174e-01   3.3096405e-01   2.6375229e-01   2.0022607e-01   1.9799659e-01   2.5530781e-01   2.7170116e-01   2.4472945e-01   1.5721426e-01   2.2453551e-01   2.0791989e-01   1.9232403e-01   3.5721533e-01   2.6541719e-01   1.8583170e-01   2.1815822e-01   2.8740780e-01   1.8853156e-01   2.4642239e-01   2.8243457e-01   2.3594931e-01   2.1655773e-01   1.8685267e-01   2.4074340e-01   1.9492750e-01   2.1026662e-01   2.0538411e-01   1.2769778e-01   2.1026662e-01   4.5350142e-01   3.9795187e-01   3.7769065e-01   3.8983341e-01   4.1851849e-01   4.2042793e-01   4.0510768e-01   3.9953508e-01   4.3130939e-01   3.4895812e-01   2.8413412e-01   3.7648942e-01   3.4987508e-01   4.3267862e-01   4.3359924e-01   3.3758650e-01   3.4918773e-01   3.4772733e-01   4.9915788e-01   4.0231358e-01   3.5632097e-01   3.7931174e-01   4.3732697e-01   3.2511301e-01   3.4317696e-01   3.4120231e-01   3.0420772e-01   2.9548175e-01   4.1851849e-01   3.3003548e-01   3.8954108e-01   2.9516106e-01   4.2751620e-01   3.1771485e-01   4.1340131e-01   3.7704473e-01   3.6865281e-01   3.4390717e-01   2.8759676e-01   3.1915503e-01   3.7909158e-01   2.9821954e-01   3.9795187e-01   3.8894782e-01   3.7251565e-01   3.3442155e-01   3.7333196e-01   3.2403985e-01   3.3841983e-01   3.3852014e-01   4.9713816e-03   9.2903476e-03   1.5944722e-02   1.1386125e-02   3.5402300e-03   9.6201776e-04   8.6911918e-03   9.4953207e-05   6.4447745e-03   2.1940505e-02   1.4660203e-04   6.6774582e-03   1.0681613e-03   1.0987220e-03   1.5397922e-03   1.3925202e-03   1.6744918e-03   4.5493239e-04   1.7274513e-03   1.6063124e-02   1.4167352e-02   1.3643967e-04   2.8974888e-03   2.8893579e-03   1.3643967e-04   1.8844794e-03   1.0195101e-03   5.9835494e-03   1.1907169e-02   6.2144210e-03   3.1461009e-03   6.4603159e-03   6.0804116e-04   7.9398115e-03   2.6608779e-03   3.6611489e-03   1.1878605e-03   1.6827914e-01   1.6808878e-01   2.0330224e-01   2.4208927e-01   2.1725982e-01   2.3901101e-01   1.8791090e-01   1.5022413e-01   1.9646923e-01   1.9636982e-01   2.2550082e-01   1.7178784e-01   2.1730098e-01   2.3423675e-01   1.1690649e-01   1.5652486e-01   2.2988556e-01   1.8351752e-01   2.8999988e-01   1.9148148e-01   2.4151451e-01   1.5889324e-01   2.9641885e-01   2.3800995e-01   1.7162027e-01   1.6875690e-01   2.2303923e-01   2.3997805e-01   2.1702847e-01   1.3016155e-01   1.9481438e-01   1.7925806e-01   1.6471086e-01   3.2723835e-01   2.4516700e-01   1.6613136e-01   1.8943302e-01   2.5069233e-01   1.6755119e-01   2.1637456e-01   2.5710224e-01   2.1080083e-01   1.8747239e-01   1.5900127e-01   2.1430731e-01   1.7454101e-01   1.8671199e-01   1.7813660e-01   1.0093430e-01   1.8452279e-01   4.2348788e-01   3.6545764e-01   3.4229982e-01   3.6044592e-01   3.8515670e-01   3.8525367e-01   3.7671058e-01   3.6665866e-01   3.9483186e-01   3.1729700e-01   2.5339335e-01   3.4086300e-01   3.1448994e-01   3.9561710e-01   3.9513385e-01   3.0425707e-01   3.1942407e-01   3.2109044e-01   4.5863634e-01   3.6575820e-01   3.2152631e-01   3.4763745e-01   4.0088503e-01   2.8963041e-01   3.1371721e-01   3.1153634e-01   2.7048666e-01   2.6621754e-01   3.8319922e-01   2.9918600e-01   3.5318564e-01   2.6828212e-01   3.9088644e-01   2.8836369e-01   3.8573578e-01   3.3732047e-01   3.3961180e-01   3.1641339e-01   2.5871452e-01   2.8421639e-01   3.4227214e-01   2.5952711e-01   3.6545764e-01   3.5568934e-01   3.3784386e-01   2.9565951e-01   3.3403769e-01   2.9050448e-01   3.1070970e-01   3.1176741e-01   1.7225353e-03   3.1252650e-03   1.9141697e-03   3.0572974e-04   1.5621195e-03   7.7657730e-04   6.0730841e-03   9.6969946e-05   6.0804497e-03   4.8728559e-03   1.0019276e-02   1.0630759e-02   1.4028596e-03   1.0008907e-03   1.5106647e-03   3.9425625e-03   5.9922357e-03   4.5089760e-03   4.5296071e-03   2.4544132e-03   5.3429196e-03   2.3927105e-03   2.5658780e-03   5.3429196e-03   8.0759380e-04   1.5345623e-03   3.2653860e-04   2.8784931e-02   4.6959359e-04   5.2961514e-04   3.5529133e-03   5.0787251e-03   1.7528094e-03   7.9750642e-04   1.8469304e-04   1.3949343e-03   2.2601194e-01   2.2489065e-01   2.6538416e-01   3.0933559e-01   2.8173337e-01   3.0217927e-01   2.4601138e-01   2.0461695e-01   2.5772236e-01   2.5608721e-01   2.9099324e-01   2.2920421e-01   2.8227951e-01   2.9802519e-01   1.6660589e-01   2.1294215e-01   2.9118999e-01   2.4154604e-01   3.6304829e-01   2.5194222e-01   3.0483437e-01   2.1582549e-01   3.6852765e-01   3.0175865e-01   2.2996178e-01   2.2696306e-01   2.8805620e-01   3.0640689e-01   2.7978083e-01   1.8266242e-01   2.5611673e-01   2.3849427e-01   2.2189938e-01   3.9968280e-01   3.0655638e-01   2.1989254e-01   2.4981061e-01   3.1957344e-01   2.2214967e-01   2.7998655e-01   3.2222399e-01   2.7182647e-01   2.4776514e-01   2.1557967e-01   2.7625427e-01   2.2956697e-01   2.4461597e-01   2.3673300e-01   1.4870158e-01   2.4319918e-01   5.0146439e-01   4.4143183e-01   4.1797463e-01   4.3468983e-01   4.6265612e-01   4.6350594e-01   4.5140079e-01   4.4286970e-01   4.7413628e-01   3.8981309e-01   3.2063561e-01   4.1652733e-01   3.8823357e-01   4.7518267e-01   4.7516433e-01   3.7651294e-01   3.9124904e-01   3.9150135e-01   5.4273573e-01   4.4336023e-01   3.9556546e-01   4.2215915e-01   4.8051706e-01   3.6152094e-01   3.8501414e-01   3.8277781e-01   3.4024930e-01   3.3391004e-01   4.6138930e-01   3.7007400e-01   4.2991865e-01   3.3504580e-01   4.7002175e-01   3.5780202e-01   4.6054739e-01   4.1401655e-01   4.1241306e-01   3.8694896e-01   3.2563422e-01   3.5550143e-01   4.1844379e-01   3.2964964e-01   4.4143183e-01   4.3139176e-01   4.1295631e-01   3.6894646e-01   4.1038562e-01   3.6180237e-01   3.8096707e-01   3.8161756e-01   2.9059655e-03   2.3706161e-04   1.5622923e-03   4.6985391e-03   3.0899699e-03   1.1144685e-02   1.5452086e-03   4.3912180e-03   8.2378168e-03   1.9894938e-02   1.6022015e-02   4.6248887e-03   4.1434850e-03   3.4955495e-03   1.0239539e-02   1.2012056e-02   5.3911313e-03   8.1025404e-03   3.3141536e-03   1.0581034e-02   2.5128050e-03   2.6447621e-03   1.0581034e-02   4.0444636e-03   5.0265914e-03   5.7591609e-04   3.1111838e-02   3.5439088e-03   1.6935296e-03   1.0096079e-02   7.4536930e-03   6.1909555e-03   4.6446420e-03   2.9301624e-03   3.9714148e-03   2.5068010e-01   2.5098525e-01   2.9216380e-01   3.3638336e-01   3.0774280e-01   3.3430586e-01   2.7479523e-01   2.2946729e-01   2.8415608e-01   2.8467354e-01   3.1719659e-01   2.5534962e-01   3.0706036e-01   3.2874800e-01   1.8820706e-01   2.3625921e-01   3.2370915e-01   2.6954509e-01   3.8903955e-01   2.7840834e-01   3.3718014e-01   2.3890051e-01   3.9852630e-01   3.3314087e-01   2.5453269e-01   2.5085481e-01   3.1457631e-01   3.3452242e-01   3.0863300e-01   2.0400912e-01   2.8201538e-01   2.6372052e-01   2.4645516e-01   4.3401653e-01   3.4099078e-01   2.4885351e-01   2.7587880e-01   3.4510687e-01   2.5062404e-01   3.0740449e-01   3.5503881e-01   3.0161400e-01   2.7344211e-01   2.3943815e-01   3.0560987e-01   2.5890901e-01   2.7338361e-01   2.6272911e-01   1.6653571e-01   2.7061027e-01   5.3996294e-01   4.7622847e-01   4.4996413e-01   4.7092027e-01   4.9784258e-01   4.9765553e-01   4.8884384e-01   4.7750758e-01   5.0792579e-01   4.2270376e-01   3.5026703e-01   4.4829833e-01   4.1874266e-01   5.0865069e-01   5.0773433e-01   4.0771387e-01   4.2529697e-01   4.2723724e-01   5.7654330e-01   4.7578493e-01   4.2683086e-01   4.5657562e-01   5.1458642e-01   3.9050926e-01   4.1892650e-01   4.1646419e-01   3.6916765e-01   3.6521270e-01   4.9536279e-01   4.0243409e-01   4.6185742e-01   3.6775616e-01   5.0354755e-01   3.9037876e-01   4.9872590e-01   4.4290716e-01   4.4784995e-01   4.2202156e-01   3.5667822e-01   3.8450868e-01   4.4953729e-01   3.5436611e-01   4.7622847e-01   4.6530251e-01   4.4515647e-01   3.9606646e-01   4.3939461e-01   3.9207838e-01   4.1563418e-01   4.1682072e-01   1.5609953e-03   4.8490070e-03   9.0958370e-03   1.4989091e-03   1.7813868e-02   2.1261488e-03   5.5450301e-04   1.5694148e-02   1.9384410e-02   2.5209737e-02   8.6954304e-03   7.6213095e-03   8.7105486e-03   1.2654300e-02   1.7334342e-02   1.3883194e-02   2.2522789e-03   1.7443565e-04   1.6459895e-02   9.1718824e-03   9.4916987e-03   1.6459895e-02   6.9918464e-03   8.9833757e-03   2.9662983e-03   4.9266036e-02   2.9055422e-03   5.5731976e-03   8.0889759e-03   1.5739049e-02   3.7597322e-03   6.3200695e-03   4.4644236e-03   8.6395939e-03   2.7692079e-01   2.7499135e-01   3.1941212e-01   3.6719225e-01   3.3760508e-01   3.5656667e-01   2.9687968e-01   2.5298236e-01   3.1115442e-01   3.0819394e-01   3.4760947e-01   2.7976620e-01   3.3861131e-01   3.5300749e-01   2.1161836e-01   2.6294922e-01   3.4413759e-01   2.9242527e-01   4.2523864e-01   3.0477646e-01   3.5931443e-01   2.6623770e-01   4.2973462e-01   3.5665121e-01   2.8133678e-01   2.7828246e-01   3.4429599e-01   3.6357656e-01   3.3414792e-01   2.2982660e-01   3.0962503e-01   2.9049749e-01   2.7240490e-01   4.6073277e-01   3.5937207e-01   2.6744708e-01   3.0261187e-01   3.7874332e-01   2.7038484e-01   3.3511133e-01   3.7800754e-01   3.2481692e-01   3.0053112e-01   2.6567213e-01   3.2997477e-01   2.7805229e-01   2.9533686e-01   2.8819780e-01   1.9246360e-01   2.9461561e-01   5.6604002e-01   5.0500114e-01   4.8160322e-01   4.9684759e-01   5.2728223e-01   5.2878056e-01   5.1374073e-01   5.0662675e-01   5.4019822e-01   4.5105935e-01   3.7828674e-01   4.8016813e-01   4.5059090e-01   5.4146236e-01   5.4186046e-01   4.3772456e-01   4.5187440e-01   4.5090747e-01   6.1216296e-01   5.0833718e-01   4.5807252e-01   4.8471571e-01   5.4678369e-01   4.2264992e-01   4.4526564e-01   4.4301133e-01   3.9982165e-01   3.9174986e-01   5.2663733e-01   4.3017652e-01   4.9431552e-01   3.9206379e-01   5.3598953e-01   4.1681568e-01   5.2288475e-01   4.7864082e-01   4.7360696e-01   4.4651927e-01   3.8292357e-01   4.1618542e-01   4.8251093e-01   3.8978460e-01   5.0500114e-01   4.9485605e-01   4.7615846e-01   4.3123359e-01   4.7475023e-01   4.2239197e-01   4.4037545e-01   4.4066833e-01   2.2696323e-03   5.9674345e-03   2.4448133e-03   1.3335241e-02   1.4550127e-03   2.5899751e-03   1.0462849e-02   2.0483763e-02   1.9016265e-02   5.8048658e-03   5.1341121e-03   4.8749246e-03   1.1285550e-02   1.3907377e-02   7.6337585e-03   6.3180508e-03   2.0267734e-03   1.2551534e-02   4.1078635e-03   4.2919330e-03   1.2551534e-02   4.8854397e-03   6.2043549e-03   8.9134425e-04   3.6466805e-02   3.2782414e-03   2.5696799e-03   9.8307089e-03   9.8120264e-03   5.5450409e-03   5.1728098e-03   3.2838118e-03   5.2555481e-03   2.6123905e-01   2.6103645e-01   3.0329747e-01   3.4864780e-01   3.1958423e-01   3.4458162e-01   2.8459400e-01   2.3921225e-01   2.9516933e-01   2.9488851e-01   3.2923215e-01   2.6553322e-01   3.1926739e-01   3.3946038e-01   1.9751618e-01   2.4678743e-01   3.3347141e-01   2.7948950e-01   4.0283567e-01   2.8923185e-01   3.4744486e-01   2.4959732e-01   4.1128993e-01   3.4370645e-01   2.6525258e-01   2.6168140e-01   3.2643822e-01   3.4638099e-01   3.1949478e-01   2.1402441e-01   2.9314941e-01   2.7451414e-01   2.5691069e-01   4.4594923e-01   3.5036767e-01   2.5760360e-01   2.8676453e-01   3.5805235e-01   2.5967094e-01   3.1875975e-01   3.6562012e-01   3.1188296e-01   2.8438818e-01   2.4989387e-01   3.1618152e-01   2.6787548e-01   2.8314006e-01   2.7321717e-01   1.7615044e-01   2.8082637e-01   5.5224352e-01   4.8885819e-01   4.6311762e-01   4.8285549e-01   5.1073035e-01   5.1093091e-01   5.0061869e-01   4.9022404e-01   5.2151073e-01   4.3495694e-01   3.6199911e-01   4.6149506e-01   4.3176986e-01   5.2236122e-01   5.2173621e-01   4.2026138e-01   4.3714978e-01   4.3841019e-01   5.9117531e-01   4.8927834e-01   4.3976762e-01   4.6895974e-01   5.2818477e-01   4.0343191e-01   4.3068816e-01   4.2826096e-01   3.8162190e-01   3.7670056e-01   5.0866155e-01   4.1442940e-01   4.7525840e-01   3.7873655e-01   5.1715063e-01   4.0199874e-01   5.1036877e-01   4.5693051e-01   4.5962906e-01   4.3336423e-01   3.6804311e-01   3.9729031e-01   4.6298867e-01   3.6775880e-01   4.8885819e-01   4.7805886e-01   4.5813974e-01   4.0968699e-01   4.5331588e-01   4.0460089e-01   4.2699969e-01   4.2797954e-01   8.7894099e-04   2.0541035e-03   4.6305984e-03   6.7282118e-04   8.1166178e-03   3.1872717e-03   1.0880321e-02   8.3605717e-03   8.2577841e-04   6.1811314e-04   5.6936180e-04   3.8806955e-03   4.9821714e-03   2.5043588e-03   7.1844876e-03   4.2468498e-03   4.1576220e-03   9.9353221e-04   1.1055786e-03   4.1576220e-03   5.9733473e-04   9.9247040e-04   3.2081104e-04   2.3598005e-02   1.4307976e-03   3.0934240e-05   4.9322953e-03   3.1299098e-03   3.2877683e-03   9.9769768e-04   4.3968696e-04   6.2770216e-04   2.1786575e-01   2.1746374e-01   2.5686278e-01   2.9961632e-01   2.7235218e-01   2.9521657e-01   2.3917491e-01   1.9737534e-01   2.4929109e-01   2.4877587e-01   2.8142376e-01   2.2163964e-01   2.7235613e-01   2.9042411e-01   1.5945203e-01   2.0466964e-01   2.8483074e-01   2.3445929e-01   3.5153649e-01   2.4372083e-01   2.9790924e-01   2.0734139e-01   3.5860995e-01   2.9439415e-01   2.2162942e-01   2.1843466e-01   2.7871280e-01   2.9725443e-01   2.7179832e-01   1.7471289e-01   2.4749127e-01   2.3015922e-01   2.1385216e-01   3.9117409e-01   3.0083879e-01   2.1421404e-01   2.4147478e-01   3.0893498e-01   2.1609222e-01   2.7129944e-01   3.1500957e-01   2.6459749e-01   2.3931769e-01   2.0744985e-01   2.6864595e-01   2.2369874e-01   2.3782351e-01   2.2882042e-01   1.4076875e-01   2.3574843e-01   4.9305396e-01   4.3221625e-01   4.0786305e-01   4.2639713e-01   4.5320351e-01   4.5351054e-01   4.4342223e-01   4.3354113e-01   4.6376094e-01   3.8078797e-01   3.1179824e-01   4.0634323e-01   3.7808795e-01   4.6463298e-01   4.6419251e-01   3.6697079e-01   3.8279432e-01   3.8398235e-01   5.3121485e-01   4.3292773e-01   3.8560639e-01   4.1316779e-01   4.7015919e-01   3.5131205e-01   3.7664303e-01   3.7434225e-01   3.3054253e-01   3.2553357e-01   4.5134808e-01   3.6126826e-01   4.1953095e-01   3.2738535e-01   4.5959627e-01   3.4943014e-01   4.5279702e-01   4.0259529e-01   4.0419917e-01   3.7916847e-01   3.1736132e-01   3.4544845e-01   4.0790313e-01   3.1842804e-01   4.3221625e-01   4.2193584e-01   4.0305492e-01   3.5775833e-01   3.9908964e-01   3.5217975e-01   3.7311479e-01   3.7405234e-01   4.0128787e-03   1.5082918e-03   2.4327589e-03   1.3749582e-02   9.8644164e-04   7.0275865e-03   4.0509182e-03   9.2137987e-06   8.1954627e-05   2.0221322e-04   1.4919720e-03   1.6918938e-03   1.6810350e-03   9.8366461e-03   7.8161988e-03   1.2160579e-03   1.4072237e-03   1.4774681e-03   1.2160579e-03   1.9402564e-04   2.6249834e-05   2.2527118e-03   1.8043780e-02   2.4914194e-03   7.5228825e-04   4.0311549e-03   1.3209671e-03   4.0483306e-03   6.3837247e-04   9.1866746e-04   4.5668021e-05   1.9279238e-01   1.9221899e-01   2.2979083e-01   2.7088361e-01   2.4479658e-01   2.6610561e-01   2.1267629e-01   1.7326091e-01   2.2258210e-01   2.2184784e-01   2.5349552e-01   1.9619155e-01   2.4503806e-01   2.6157291e-01   1.3780150e-01   1.8042756e-01   2.5616217e-01   2.0823320e-01   3.2136818e-01   2.1724300e-01   2.6868196e-01   1.8301959e-01   3.2745065e-01   2.6534429e-01   1.9640162e-01   1.9346456e-01   2.5082861e-01   2.6843057e-01   2.4386055e-01   1.5229091e-01   2.2093793e-01   2.0444992e-01   1.8898039e-01   3.5852616e-01   2.7153123e-01   1.8897970e-01   2.1514921e-01   2.8018592e-01   1.9075855e-01   2.4355737e-01   2.8508289e-01   2.3688721e-01   2.1314274e-01   1.8298617e-01   2.4079289e-01   1.9795777e-01   2.1138990e-01   2.0306053e-01   1.2090533e-01   2.0951271e-01   4.5737829e-01   3.9834085e-01   3.7497739e-01   3.9259152e-01   4.1873643e-01   4.1914498e-01   4.0909121e-01   3.9964410e-01   4.2918834e-01   3.4857995e-01   2.8224226e-01   3.7353062e-01   3.4626594e-01   4.3008323e-01   4.2978752e-01   3.3538250e-01   3.5042715e-01   3.5150644e-01   4.9516533e-01   3.9931312e-01   3.5345241e-01   3.7985840e-01   4.3539662e-01   3.2054198e-01   3.4448056e-01   3.4226906e-01   3.0044482e-01   2.9530625e-01   4.1705793e-01   3.2972732e-01   3.8633770e-01   2.9699130e-01   4.2515986e-01   3.1826259e-01   4.1819279e-01   3.7038673e-01   3.7108888e-01   3.4686702e-01   2.8745249e-01   3.1485734e-01   3.7515178e-01   2.8956004e-01   3.9834085e-01   3.8842721e-01   3.7027395e-01   3.2715736e-01   3.6694987e-01   3.2117964e-01   3.4102816e-01   3.4191835e-01   9.8460504e-03   4.2097646e-04   3.8720695e-03   8.9609139e-03   1.0271114e-02   1.5759765e-02   3.6853519e-03   2.9640455e-03   4.3261623e-03   5.5313131e-03   9.1074119e-03   8.9974672e-03   1.5687499e-03   7.6879483e-04   8.7064401e-03   5.8625582e-03   6.1329529e-03   8.7064401e-03   2.4949105e-03   3.7729184e-03   1.5387455e-03   3.8489345e-02   2.4427633e-04   2.5870999e-03   2.8098846e-03   9.5472550e-03   6.5811256e-04   1.8465019e-03   1.0925425e-03   3.9557876e-03   2.4136396e-01   2.3914246e-01   2.8140056e-01   3.2726072e-01   2.9909180e-01   3.1587284e-01   2.5940651e-01   2.1853199e-01   2.7358477e-01   2.7023320e-01   3.0865618e-01   2.4367721e-01   3.0046030e-01   3.1269475e-01   1.8026794e-01   2.2845050e-01   3.0393397e-01   2.5531598e-01   3.8377418e-01   2.6747174e-01   3.1847415e-01   2.3166294e-01   3.8684625e-01   3.1607055e-01   2.4560721e-01   2.4289838e-01   3.0537010e-01   3.2346477e-01   2.9500292e-01   1.9758720e-01   2.7227129e-01   2.5416529e-01   2.3708995e-01   4.1588028e-01   3.1833018e-01   2.3144395e-01   2.6550973e-01   3.3890880e-01   2.3429142e-01   2.9628566e-01   3.3633600e-01   2.8587728e-01   2.6362949e-01   2.3087874e-01   2.9089607e-01   2.4143368e-01   2.5794041e-01   2.5175590e-01   1.6347149e-01   2.5753099e-01   5.1758017e-01   4.5875138e-01   4.3664497e-01   4.5058511e-01   4.8035455e-01   4.8203651e-01   4.6682330e-01   4.6036369e-01   4.9325486e-01   4.0679312e-01   3.3723664e-01   4.3530687e-01   4.0692383e-01   4.9456817e-01   4.9519341e-01   3.9426880e-01   4.0735509e-01   4.0616607e-01   5.6363656e-01   4.6254311e-01   4.1397979e-01   4.3911880e-01   4.9961302e-01   3.8027310e-01   4.0098514e-01   3.9884545e-01   3.5820683e-01   3.4982569e-01   4.7998848e-01   3.8669536e-01   4.4902142e-01   3.4986296e-01   4.8921417e-01   3.7374272e-01   4.7562308e-01   4.3467328e-01   4.2816870e-01   4.0201263e-01   3.4137888e-01   3.7400811e-01   4.3776854e-01   3.4988231e-01   4.5875138e-01   4.4907175e-01   4.3130086e-01   3.8919296e-01   4.3084858e-01   3.7966236e-01   3.9613519e-01   3.9634370e-01   7.6697767e-03   2.4232895e-02   4.2497383e-04   5.8645021e-03   7.1697323e-04   1.6459430e-03   2.1472725e-03   2.1701877e-03   1.5644328e-03   2.1493500e-04   2.5540450e-03   1.7185148e-02   1.5785034e-02   5.4418849e-05   4.0402433e-03   4.0294443e-03   5.4418849e-05   2.5045043e-03   1.5070390e-03   7.3845654e-03   1.1685406e-02   7.1165327e-03   4.2192668e-03   6.5870297e-03   1.0764642e-03   8.6213220e-03   3.2147266e-03   4.4993834e-03   1.8554035e-03   1.6163874e-01   1.6124582e-01   1.9600517e-01   2.3441471e-01   2.0995191e-01   2.3066019e-01   1.8049424e-01   1.4375647e-01   1.8928742e-01   1.8888935e-01   2.1809263e-01   1.6489764e-01   2.1018407e-01   2.2610729e-01   1.1131536e-01   1.5021138e-01   2.2157821e-01   1.7624974e-01   2.8213828e-01   1.8434338e-01   2.3311339e-01   1.5259174e-01   2.8788764e-01   2.2976085e-01   1.6495975e-01   1.6222817e-01   2.1560367e-01   2.3216078e-01   2.0930930e-01   1.2444580e-01   1.8772709e-01   1.7242257e-01   1.5812464e-01   3.1788081e-01   2.3649618e-01   1.5894784e-01   1.8237277e-01   2.4318751e-01   1.6040650e-01   2.0886424e-01   2.4850015e-01   2.0301750e-01   1.8048879e-01   1.5257957e-01   2.0654604e-01   1.6721007e-01   1.7931046e-01   1.7120054e-01   9.6139593e-02   1.7732284e-01   4.1297254e-01   3.5577891e-01   3.3316436e-01   3.5061301e-01   3.7530173e-01   3.7554177e-01   3.6661833e-01   3.5699377e-01   3.8511507e-01   3.0820403e-01   2.4524565e-01   3.3176869e-01   3.0575678e-01   3.8594227e-01   3.8559393e-01   2.9549451e-01   3.1016061e-01   3.1160241e-01   4.4857854e-01   3.5641190e-01   3.1263400e-01   3.3812708e-01   3.9109335e-01   2.8129908e-01   3.0451362e-01   3.0237858e-01   2.6230725e-01   2.5773246e-01   3.7352449e-01   2.9029824e-01   3.4398832e-01   2.5959514e-01   3.8123049e-01   2.7952979e-01   3.7549545e-01   3.2868655e-01   3.3002382e-01   3.0704202e-01   2.5032682e-01   2.7592132e-01   3.3326893e-01   2.5208920e-01   3.5577891e-01   3.4619466e-01   3.2870702e-01   2.8757710e-01   3.2540565e-01   2.8197541e-01   3.0143248e-01   3.0241595e-01   4.6779054e-03   6.3405358e-03   1.1072067e-02   1.2740784e-02   2.2273681e-03   1.7042107e-03   2.3469652e-03   4.9562680e-03   7.4940361e-03   5.7695726e-03   3.5981535e-03   1.5844867e-03   6.8214146e-03   3.1957366e-03   3.3967393e-03   6.8214146e-03   1.4289215e-03   2.3814252e-03   3.6906895e-04   3.2052685e-02   4.0942979e-04   9.9039775e-04   3.7975123e-03   6.5400285e-03   1.5831617e-03   1.2837357e-03   4.6903663e-04   2.2256509e-03   2.3440889e-01   2.3310632e-01   2.7431935e-01   3.1898347e-01   2.9103053e-01   3.1107217e-01   2.5431417e-01   2.1253549e-01   2.6655333e-01   2.6462531e-01   3.0042601e-01   2.3750489e-01   2.9168949e-01   3.0705757e-01   1.7397041e-01   2.2119992e-01   2.9980584e-01   2.4986709e-01   3.7353610e-01   2.6066067e-01   3.1374027e-01   2.2416418e-01   3.7874518e-01   3.1076095e-01   2.3844790e-01   2.3545346e-01   2.9741154e-01   3.1590317e-01   2.8873621e-01   1.9043311e-01   2.6497335e-01   2.4708373e-01   2.3022342e-01   4.0978464e-01   3.1511039e-01   2.2757419e-01   2.5853132e-01   3.2950090e-01   2.2996799e-01   2.8911377e-01   3.3136481e-01   2.8050509e-01   2.5648758e-01   2.2384164e-01   2.8507826e-01   2.3741411e-01   2.5289158e-01   2.4520543e-01   1.5591712e-01   2.5163277e-01   5.1215829e-01   4.5200156e-01   4.2861009e-01   4.4496538e-01   4.7341919e-01   4.7441196e-01   4.6168878e-01   4.5347805e-01   4.8519916e-01   3.9997999e-01   3.3019092e-01   4.2716946e-01   3.9865809e-01   4.8629326e-01   4.8637186e-01   3.8670691e-01   4.0127624e-01   4.0126667e-01   5.5444310e-01   4.5424309e-01   4.0600299e-01   4.3254422e-01   4.9161364e-01   3.7174460e-01   3.9497344e-01   3.9273809e-01   3.5018083e-01   3.4346685e-01   4.7229362e-01   3.8003558e-01   4.4070091e-01   3.4442025e-01   4.8107176e-01   3.6755422e-01   4.7082182e-01   4.2490414e-01   4.2252116e-01   3.9675769e-01   3.3509282e-01   3.6564070e-01   4.2918040e-01   3.3977682e-01   4.5200156e-01   4.4195847e-01   4.2350677e-01   3.7942836e-01   4.2122143e-01   3.7189771e-01   3.9075300e-01   3.9132598e-01   2.1401129e-02   2.6296569e-02   3.2566498e-02   1.3297832e-02   1.1999623e-02   1.2969856e-02   1.8466773e-02   2.3873417e-02   1.8518842e-02   4.0273028e-03   1.2616800e-03   2.2732626e-02   1.2827409e-02   1.3181090e-02   2.2732626e-02   1.1261737e-02   1.3701796e-02   5.3984869e-03   5.7610599e-02   5.9941455e-03   8.9691119e-03   1.2694193e-02   2.1144882e-02   7.0627421e-03   1.0543898e-02   8.0308076e-03   1.3063205e-02   2.9989796e-01   2.9808728e-01   3.4381381e-01   3.9275825e-01   3.6234621e-01   3.8238034e-01   3.2086021e-01   2.7526551e-01   3.3529881e-01   3.3248525e-01   3.7261196e-01   3.0300865e-01   3.6317903e-01   3.7865155e-01   2.3211430e-01   2.8532877e-01   3.6963026e-01   3.1621648e-01   4.5177033e-01   3.2875625e-01   3.8520789e-01   2.8867443e-01   4.5697798e-01   3.8242988e-01   3.0442487e-01   3.0118555e-01   3.6926913e-01   3.8920975e-01   3.5915749e-01   2.5088735e-01   3.3365900e-01   3.1393007e-01   2.9523721e-01   4.8898239e-01   3.8528778e-01   2.9050533e-01   3.2648373e-01   4.0430761e-01   2.9352285e-01   3.5998625e-01   4.0438969e-01   3.4965804e-01   3.2429593e-01   2.8821126e-01   3.5492121e-01   3.0147553e-01   3.1927061e-01   3.1166517e-01   2.1164881e-01   3.1841810e-01   5.9626248e-01   5.3406968e-01   5.1001241e-01   5.2586196e-01   5.5673824e-01   5.5816878e-01   5.4309284e-01   5.3570978e-01   5.6971774e-01   4.7901704e-01   4.0442977e-01   5.0852772e-01   4.7828326e-01   5.7096471e-01   5.7126109e-01   4.6526268e-01   4.7993533e-01   4.7902644e-01   6.4260357e-01   5.3722757e-01   4.8599135e-01   5.1340958e-01   5.7642669e-01   4.4962350e-01   4.7318809e-01   4.7087433e-01   4.2633712e-01   4.1835467e-01   5.5597597e-01   4.5768215e-01   5.2292685e-01   4.1877489e-01   5.6541976e-01   4.4406435e-01   5.5240885e-01   5.0657081e-01   5.0216108e-01   4.7452659e-01   4.0930806e-01   4.4303784e-01   5.1082488e-01   4.1541892e-01   5.3406968e-01   5.2368299e-01   5.0449934e-01   4.5807263e-01   5.0263701e-01   4.4952858e-01   4.6823972e-01   4.6855914e-01   8.7198254e-03   1.2902931e-03   1.1684021e-03   1.6341127e-03   1.0742763e-03   2.6208124e-03   1.1173357e-03   8.9620141e-04   1.6960301e-02   1.4197837e-02   5.6586090e-04   2.0157829e-03   1.9836256e-03   5.6586090e-04   2.0413623e-03   1.1637665e-03   5.4511725e-03   1.0718252e-02   6.5990100e-03   2.7263257e-03   7.7966832e-03   1.5754002e-04   8.7922744e-03   3.0619296e-03   3.8030551e-03   1.0550593e-03   1.7203244e-01   1.7235723e-01   2.0751307e-01   2.4611820e-01   2.2109400e-01   2.4491699e-01   1.9298922e-01   1.5421652e-01   2.0061094e-01   2.0129686e-01   2.2934517e-01   1.7603988e-01   2.2070889e-01   2.3964854e-01   1.2009621e-01   1.5990877e-01   2.3601974e-01   1.8834267e-01   2.9339046e-01   1.9567575e-01   2.4748464e-01   1.6217086e-01   3.0116422e-01   2.4364046e-01   1.7530938e-01   1.7223213e-01   2.2701156e-01   2.4439785e-01   2.2188150e-01   1.3311613e-01   1.9877894e-01   1.8311301e-01   1.6845668e-01   3.3339209e-01   2.5192873e-01   1.7150268e-01   1.9350184e-01   2.5414415e-01   1.7271733e-01   2.2071334e-01   2.6313203e-01   2.1605186e-01   1.9141813e-01   1.6255272e-01   2.1936783e-01   1.7996851e-01   1.9179556e-01   1.8227672e-01   1.0300751e-01   1.8912086e-01   4.3075013e-01   3.7158844e-01   3.4752045e-01   3.6713693e-01   3.9134187e-01   3.9105984e-01   3.8378198e-01   3.7272538e-01   4.0046876e-01   3.2297138e-01   2.5826699e-01   3.4601238e-01   3.1932057e-01   4.0113556e-01   4.0033459e-01   3.0938917e-01   3.2551476e-01   3.2781102e-01   4.6405632e-01   3.7102765e-01   3.2659021e-01   3.5371449e-01   4.0659862e-01   2.9405620e-01   3.1978995e-01   3.1753736e-01   2.7499746e-01   2.7161605e-01   3.8895029e-01   3.0476818e-01   3.5833770e-01   2.7419176e-01   3.9644448e-01   2.9409120e-01   3.9304592e-01   3.4147602e-01   3.4611220e-01   3.2290811e-01   2.6406812e-01   2.8867894e-01   3.4717424e-01   2.6265032e-01   3.7158844e-01   3.6154105e-01   3.4316147e-01   2.9940560e-01   3.3824889e-01   2.9538166e-01   3.1708459e-01   3.1834036e-01   8.7316989e-03   6.7615313e-03   6.7580543e-03   9.6081666e-03   2.0657605e-03   3.8371349e-03   1.4271025e-02   1.2184393e-02   1.6084765e-02   4.9831593e-03   1.4699244e-02   1.4912454e-02   4.9831593e-03   6.5028816e-03   6.2489819e-03   1.3726162e-02   3.1411524e-02   7.7445772e-03   1.1141212e-02   2.5000477e-03   1.1143820e-02   6.1604304e-03   5.3711085e-03   7.5849080e-03   8.1835455e-03   1.6688130e-01   1.6291433e-01   2.0003066e-01   2.4149909e-01   2.1722570e-01   2.2445012e-01   1.7758122e-01   1.4623090e-01   1.9343386e-01   1.8753794e-01   2.2568158e-01   1.6690274e-01   2.2030573e-01   2.2323949e-01   1.1699096e-01   1.5725818e-01   2.1323159e-01   1.7489777e-01   2.9532205e-01   1.8786273e-01   2.2655558e-01   1.6049736e-01   2.9226070e-01   2.2550223e-01   1.7083413e-01   1.6938578e-01   2.2221647e-01   2.3649790e-01   2.0956346e-01   1.3266766e-01   1.9299947e-01   1.7760033e-01   1.6320695e-01   3.1350837e-01   2.2421748e-01   1.5253844e-01   1.8664517e-01   2.5447595e-01   1.5560691e-01   2.1266187e-01   2.4223633e-01   2.0012528e-01   1.8549850e-01   1.5864276e-01   2.0519641e-01   1.6093217e-01   1.7628990e-01   1.7435363e-01   1.0801101e-01   1.7763987e-01   4.0351234e-01   3.5280598e-01   3.3582666e-01   3.4348161e-01   3.7251839e-01   3.7541634e-01   3.5722931e-01   3.5450932e-01   3.8642215e-01   3.0688243e-01   2.4703841e-01   3.3487550e-01   3.1018257e-01   3.8808655e-01   3.8990045e-01   2.9749231e-01   3.0596368e-01   3.0291072e-01   4.5287211e-01   3.5944458e-01   3.1570031e-01   3.3498082e-01   3.9202879e-01   2.8759651e-01   3.0023966e-01   2.9853735e-01   2.6729925e-01   2.5639363e-01   3.7372548e-01   2.8896987e-01   3.4745520e-01   2.5469579e-01   3.8297689e-01   2.7675896e-01   3.6463006e-01   3.3839246e-01   3.2358656e-01   2.9982548e-01   2.4898596e-01   2.8176933e-01   3.3810925e-01   2.6587897e-01   3.5280598e-01   3.4488951e-01   3.3055981e-01   2.9862862e-01   3.3464036e-01   2.8522813e-01   2.9487313e-01   2.9445589e-01   4.3312037e-03   5.1674385e-03   4.6600432e-03   4.1032398e-03   1.3249619e-03   3.6837745e-03   2.4877830e-02   2.2958753e-02   1.1314391e-03   6.3769043e-03   6.2810489e-03   1.1314391e-03   5.7702159e-03   4.1635898e-03   1.1902706e-02   7.2040189e-03   1.2293784e-02   7.6706139e-03   1.1339253e-02   1.8296747e-03   1.4280945e-02   6.9429868e-03   8.6587607e-03   4.4388406e-03   1.4462991e-01   1.4494206e-01   1.7756526e-01   2.1378771e-01   1.9031042e-01   2.1286860e-01   1.6419734e-01   1.2822571e-01   1.7113301e-01   1.7185341e-01   1.9804950e-01   1.4833973e-01   1.9005998e-01   2.0775634e-01   9.7105565e-02   1.3348080e-01   2.0467358e-01   1.5981923e-01   2.5874568e-01   1.6653873e-01   2.1530133e-01   1.3558698e-01   2.6575883e-01   2.1156399e-01   1.4766732e-01   1.4484877e-01   1.9583483e-01   2.1211626e-01   1.9101641e-01   1.0897783e-01   1.6943757e-01   1.5488355e-01   1.4132668e-01   2.9650216e-01   2.1994601e-01   1.4455458e-01   1.6451956e-01   2.2152336e-01   1.4555202e-01   1.8989624e-01   2.3000345e-01   1.8566766e-01   1.6258902e-01   1.3589561e-01   1.8870744e-01   1.5235300e-01   1.6309487e-01   1.5410176e-01   8.1942750e-02   1.6048904e-01   3.8999083e-01   3.3294245e-01   3.0988325e-01   3.2882841e-01   3.5187047e-01   3.5156617e-01   3.4487977e-01   3.3402051e-01   3.6059894e-01   2.8649814e-01   2.2516617e-01   3.0844683e-01   2.8301726e-01   3.6124416e-01   3.6050400e-01   2.7355262e-01   2.8899505e-01   2.9139815e-01   4.2192237e-01   3.3236960e-01   2.8992253e-01   3.1585310e-01   3.6648886e-01   2.5905330e-01   2.8355149e-01   2.8139447e-01   2.4097820e-01   2.3780572e-01   3.4954046e-01   2.6919588e-01   3.2022959e-01   2.4036369e-01   3.5673540e-01   2.5910558e-01   3.5384915e-01   3.0430469e-01   3.0871278e-01   2.8664912e-01   2.3068275e-01   2.5394489e-01   3.0958210e-01   2.2970603e-01   3.3294245e-01   3.2329736e-01   3.0571743e-01   2.6432145e-01   3.0120271e-01   2.6026006e-01   2.8107880e-01   2.8234890e-01   3.9333334e-05   2.5761851e-04   1.3896575e-03   1.7536063e-03   1.9216331e-03   9.2702772e-03   7.4022776e-03   1.3090644e-03   1.5444025e-03   1.6252591e-03   1.3090644e-03   1.2151407e-04   1.0618022e-05   2.1493524e-03   1.8833153e-02   2.2172388e-03   7.3253783e-04   3.6800655e-03   1.5497854e-03   3.6733784e-03   4.9421897e-04   7.6664670e-04   7.0183041e-05   1.9407897e-01   1.9337938e-01   2.3114302e-01   2.7244640e-01   2.4629979e-01   2.6718506e-01   2.1373312e-01   1.7438911e-01   2.2391848e-01   2.2298785e-01   2.5503185e-01   1.9737603e-01   2.4663659e-01   2.6276403e-01   1.3891547e-01   1.8173656e-01   2.5713590e-01   2.0933386e-01   3.2324703e-01   2.1854256e-01   2.6975489e-01   1.8436571e-01   3.2904939e-01   2.6649404e-01   1.9772084e-01   1.9481826e-01   2.5232520e-01   2.6989379e-01   2.4511828e-01   1.5354404e-01   2.2230840e-01   2.0576951e-01   1.9024966e-01   3.5987888e-01   2.7239312e-01   1.8983400e-01   2.1646886e-01   2.8191043e-01   1.9167912e-01   2.4493869e-01   2.8620678e-01   2.3801287e-01   2.1448199e-01   1.8427005e-01   2.4198503e-01   1.9884520e-01   2.1243903e-01   2.0430599e-01   1.2215728e-01   2.1067715e-01   4.5871846e-01   3.9981904e-01   3.7661173e-01   3.9390784e-01   4.2025710e-01   4.2076082e-01   4.1035332e-01   4.0114215e-01   4.3087129e-01   3.5002279e-01   2.8365040e-01   3.7517763e-01   3.4790525e-01   4.3179681e-01   4.3157518e-01   3.3691557e-01   3.5177012e-01   3.5268567e-01   4.9705280e-01   4.0100475e-01   3.5505921e-01   3.8129652e-01   4.3707681e-01   3.2219168e-01   3.4580868e-01   3.4360838e-01   3.0200588e-01   2.9663386e-01   4.1868171e-01   3.3113109e-01   3.8802094e-01   2.9819354e-01   4.2685142e-01   3.1959000e-01   4.1941543e-01   3.7225097e-01   3.7239048e-01   3.4809183e-01   2.8876177e-01   3.1647952e-01   3.7686145e-01   2.9138730e-01   3.9981904e-01   3.8994709e-01   3.7187123e-01   3.2897937e-01   3.6879196e-01   3.2272643e-01   3.4226532e-01   3.4310531e-01   3.4181612e-04   1.4644608e-03   2.1632573e-03   2.3231555e-03   8.1286280e-03   6.3818616e-03   1.7262411e-03   1.6624580e-03   1.7631926e-03   1.7262411e-03   2.4569053e-05   5.6859581e-05   1.7551896e-03   2.0541034e-02   1.6702311e-03   5.9025530e-04   3.2341087e-03   2.0408948e-03   3.0131078e-03   2.8888789e-04   4.5924146e-04   1.3293684e-04   1.9862431e-01   1.9776069e-01   2.3600084e-01   2.7779005e-01   2.5142895e-01   2.7187023e-01   2.1810101e-01   1.7860088e-01   2.2871390e-01   2.2752286e-01   2.6024955e-01   2.0181434e-01   2.5187795e-01   2.6758248e-01   1.4283878e-01   1.8622396e-01   2.6161990e-01   2.1373552e-01   3.2920505e-01   2.2325952e-01   2.7444329e-01   1.8891553e-01   3.3470963e-01   2.7127428e-01   2.0233024e-01   1.9944956e-01   2.5748056e-01   2.7510316e-01   2.4993365e-01   1.5775267e-01   2.2713624e-01   2.1043184e-01   1.9474801e-01   3.6534538e-01   2.7678503e-01   1.9377126e-01   2.2119834e-01   2.8749014e-01   1.9572304e-01   2.4991164e-01   2.9104517e-01   2.4261230e-01   2.1922346e-01   1.8874518e-01   2.4669621e-01   2.0288553e-01   2.1678869e-01   2.0886650e-01   1.2608625e-01   2.1517216e-01   4.6449792e-01   4.0560127e-01   3.8251194e-01   3.9944814e-01   4.2616553e-01   4.2679596e-01   4.1587264e-01   4.0695550e-01   4.3702628e-01   3.5557170e-01   2.8885981e-01   3.8108822e-01   3.5369655e-01   4.3799369e-01   4.3786355e-01   3.4252750e-01   3.5719184e-01   3.5788232e-01   5.0366583e-01   4.0706759e-01   3.6083966e-01   3.8695887e-01   4.4324858e-01   3.2788328e-01   3.5118655e-01   3.4899273e-01   3.0749578e-01   3.0179987e-01   4.2472010e-01   3.3655063e-01   3.9402558e-01   3.0319627e-01   4.3300399e-01   3.2485825e-01   4.2490754e-01   3.7841034e-01   3.7783349e-01   3.5333537e-01   2.9386653e-01   3.2211174e-01   3.8285611e-01   2.9712888e-01   4.0560127e-01   3.9574961e-01   3.7770634e-01   3.3490218e-01   3.7491174e-01   3.2829405e-01   3.4750328e-01   3.4827580e-01   2.7893549e-03   2.7277911e-03   9.4108270e-04   1.0799149e-02   7.7770747e-03   1.9770194e-03   5.4325768e-04   5.8945201e-04   1.9770194e-03   5.1514113e-04   3.6478892e-04   1.6876295e-03   1.7172006e-02   2.9808015e-03   3.7816236e-04   5.6735840e-03   1.0535544e-03   5.0675384e-03   1.2248969e-03   1.1742680e-03   5.8878380e-05   1.9840502e-01   1.9840646e-01   2.3601178e-01   2.7699198e-01   2.5062921e-01   2.7429513e-01   2.1981939e-01   1.7908511e-01   2.2870797e-01   2.2884763e-01   2.5935890e-01   2.0237179e-01   2.5038976e-01   2.6920634e-01   1.4261267e-01   1.8559031e-01   2.6457877e-01   2.1508394e-01   3.2684946e-01   2.2341437e-01   2.7694800e-01   1.8806980e-01   3.3447269e-01   2.7322826e-01   2.0195195e-01   1.9876628e-01   2.5683031e-01   2.7497319e-01   2.5083242e-01   1.5688509e-01   2.2686098e-01   2.1020792e-01   1.9457412e-01   3.6717222e-01   2.8066992e-01   1.9637709e-01   2.2117475e-01   2.8566080e-01   1.9793610e-01   2.4995417e-01   2.9345115e-01   2.4428403e-01   2.1902512e-01   1.8834472e-01   2.4798811e-01   2.0544380e-01   2.1853532e-01   2.0913443e-01   1.2439434e-01   2.1611468e-01   4.6739623e-01   4.0702486e-01   3.8261075e-01   4.0188705e-01   4.2751037e-01   4.2749738e-01   4.1883120e-01   4.0825884e-01   4.3736523e-01   3.5667688e-01   2.8932174e-01   3.8108192e-01   3.5341594e-01   4.3812879e-01   4.3747647e-01   3.4285168e-01   3.5898948e-01   3.6076578e-01   5.0317788e-01   4.0703744e-01   3.6087589e-01   3.8845434e-01   4.4366743e-01   3.2719263e-01   3.5301257e-01   3.5071770e-01   3.0715395e-01   3.0299726e-01   4.2534595e-01   3.3768843e-01   3.9391146e-01   3.0525217e-01   4.3324318e-01   3.2636388e-01   4.2820876e-01   3.7681443e-01   3.8013938e-01   3.5586945e-01   2.9507753e-01   3.2153910e-01   3.8242816e-01   2.9467977e-01   4.0702486e-01   3.9678588e-01   3.7800850e-01   3.3305200e-01   3.7343525e-01   3.2833826e-01   3.4988792e-01   3.5099795e-01   7.6889988e-04   5.5936308e-03   9.7548717e-03   1.0412603e-02   1.0389613e-03   5.7781385e-03   5.9052415e-03   1.0389613e-03   1.4340775e-03   1.1584691e-03   6.0259991e-03   2.1726707e-02   3.4598658e-03   3.9021886e-03   1.8160607e-03   3.9104990e-03   3.6199548e-03   1.2013216e-03   2.4248068e-03   2.0560641e-03   1.7726458e-01   1.7519789e-01   2.1239850e-01   2.5350621e-01   2.2831460e-01   2.4335474e-01   1.9301815e-01   1.5733953e-01   2.0548992e-01   2.0248973e-01   2.3687424e-01   1.7915768e-01   2.2981656e-01   2.4028800e-01   1.2483249e-01   1.6617415e-01   2.3289941e-01   1.8936545e-01   3.0533170e-01   2.0007138e-01   2.4571617e-01   1.6904051e-01   3.0733551e-01   2.4338605e-01   1.8100871e-01   1.7874491e-01   2.3386295e-01   2.4993365e-01   2.2441376e-01   1.3976123e-01   2.0438723e-01   1.8845276e-01   1.7353191e-01   3.3400984e-01   2.4631085e-01   1.6886156e-01   1.9837340e-01   2.6441281e-01   1.7118963e-01   2.2560913e-01   2.6174131e-01   2.1639026e-01   1.9675554e-01   1.6819987e-01   2.2078897e-01   1.7753606e-01   1.9173589e-01   1.8624992e-01   1.1156657e-01   1.9127963e-01   4.2879129e-01   3.7350470e-01   3.5299849e-01   3.6620603e-01   3.9355975e-01   3.9506070e-01   3.8147101e-01   3.7498057e-01   4.0550993e-01   3.2554347e-01   2.6225589e-01   3.5177359e-01   3.2570798e-01   4.0674255e-01   4.0737898e-01   3.1405475e-01   3.2617446e-01   3.2545044e-01   4.7160294e-01   3.7696723e-01   3.3214002e-01   3.5535112e-01   4.1144267e-01   3.0143840e-01   3.2034131e-01   3.1835849e-01   2.8130938e-01   2.7364725e-01   3.9315179e-01   3.0715437e-01   3.6445309e-01   2.7384746e-01   4.0174546e-01   2.9539622e-01   3.8981435e-01   3.5158797e-01   3.4545598e-01   3.2149982e-01   2.6601526e-01   2.9570609e-01   3.5410378e-01   2.7466513e-01   3.7350470e-01   3.6448827e-01   3.4805708e-01   3.0999906e-01   3.4801807e-01   3.0074518e-01   3.1606363e-01   3.1638243e-01   3.9211584e-03   1.5428451e-02   1.5051422e-02   9.3641634e-05   5.2251987e-03   5.2564865e-03   9.3641634e-05   2.4059863e-03   1.5411207e-03   7.7716003e-03   1.4517538e-02   6.4009679e-03   4.6963365e-03   4.9429047e-03   2.1131701e-03   7.3135995e-03   2.7662314e-03   4.2415669e-03   2.1979302e-03   1.6154877e-01   1.6047147e-01   1.9568300e-01   2.3468417e-01   2.1025394e-01   2.2838581e-01   1.7884422e-01   1.4313754e-01   1.8898793e-01   1.8753794e-01   2.1845622e-01   1.6418759e-01   2.1103286e-01   2.2447017e-01   1.1131548e-01   1.5046796e-01   2.1889519e-01   1.7489777e-01   2.8359045e-01   1.8392350e-01   2.3077309e-01   1.5301294e-01   2.8765002e-01   2.2785871e-01   1.6499082e-01   1.6250540e-01   2.1577964e-01   2.3190311e-01   2.0826660e-01   1.2493129e-01   1.8764327e-01   1.7232008e-01   1.5800409e-01   3.1597925e-01   2.3306671e-01   1.5663136e-01   1.8209705e-01   2.4426589e-01   1.5839676e-01   2.0849973e-01   2.4621874e-01   2.0137281e-01   1.8035428e-01   1.5264734e-01   2.0519641e-01   1.6491824e-01   1.7763987e-01   1.7071018e-01   9.7334595e-02   1.7628990e-01   4.1013753e-01   3.5415853e-01   3.3261378e-01   3.4819629e-01   3.7372548e-01   3.7447405e-01   3.6377706e-01   3.5546726e-01   3.8432564e-01   3.0688243e-01   2.4450319e-01   3.3130276e-01   3.0553473e-01   3.8531353e-01   3.8537934e-01   2.9480414e-01   3.0829006e-01   3.0887441e-01   4.4839112e-01   3.5594109e-01   3.1215369e-01   3.3646690e-01   3.9023537e-01   2.8142851e-01   3.0262656e-01   3.0057353e-01   2.6218143e-01   2.5639363e-01   3.7251839e-01   2.8896987e-01   3.4359589e-01   2.5757650e-01   3.8052352e-01   2.7792266e-01   3.7237623e-01   3.2948525e-01   3.2773210e-01   3.0459398e-01   2.4898596e-01   2.7596310e-01   3.3313575e-01   2.5365041e-01   3.5415853e-01   3.4488951e-01   3.2799981e-01   2.8862094e-01   3.2611271e-01   2.8152144e-01   2.9910812e-01   2.9982464e-01   1.7874931e-02   1.3166667e-02   2.8067647e-03   5.4483543e-04   4.8491153e-04   2.8067647e-03   2.7932574e-03   2.1120566e-03   4.0283644e-03   1.1006031e-02   7.1968081e-03   1.9824114e-03   1.0903705e-02   3.4373671e-04   1.0364284e-02   4.2441401e-03   4.1832512e-03   1.3456492e-03   1.8893734e-01   1.9027350e-01   2.2602463e-01   2.6493124e-01   2.3909907e-01   2.6731994e-01   2.1287273e-01   1.7119179e-01   2.1885648e-01   2.2106766e-01   2.4751153e-01   1.9400411e-01   2.3783986e-01   2.6104566e-01   1.3465024e-01   1.7581619e-01   2.5867158e-01   2.0765719e-01   3.1157756e-01   2.1392913e-01   2.7004976e-01   1.7791371e-01   3.2221942e-01   2.6551484e-01   1.9215572e-01   1.8858682e-01   2.4539617e-01   2.6396374e-01   2.4190361e-01   1.4753547e-01   2.1663955e-01   2.0044864e-01   1.8527044e-01   3.5759997e-01   2.7597540e-01   1.9143761e-01   2.1146655e-01   2.7190276e-01   1.9231433e-01   2.3971651e-01   2.8607417e-01   2.3671699e-01   2.0909909e-01   1.7885289e-01   2.3973819e-01   2.0020124e-01   2.1166375e-01   2.0015528e-01   1.1482314e-01   2.0802247e-01   4.5814031e-01   3.9622068e-01   3.7010544e-01   3.9277065e-01   4.1629822e-01   4.1528170e-01   4.1031284e-01   3.9724817e-01   4.2445794e-01   3.4613203e-01   2.7894605e-01   3.6843870e-01   3.4078468e-01   4.2489846e-01   4.2345869e-01   3.3131814e-01   3.4950009e-01   3.5299308e-01   4.8820349e-01   3.9397890e-01   3.4860920e-01   3.7803314e-01   4.3080058e-01   3.1437592e-01   3.4366436e-01   3.4124618e-01   2.9521701e-01   2.9351505e-01   4.1304236e-01   3.2750551e-01   3.8091343e-01   2.9709224e-01   4.2023724e-01   3.1695286e-01   4.2011817e-01   3.6184591e-01   3.7113199e-01   3.4760954e-01   2.8576301e-01   3.0899799e-01   3.6912287e-01   2.7985004e-01   3.9622068e-01   3.8552168e-01   3.6588486e-01   3.1841134e-01   3.5869504e-01   3.1661834e-01   3.4148439e-01   3.4312143e-01   1.2983249e-03   1.5471006e-02   1.3470267e-02   1.3877317e-02   1.5471006e-02   7.2739782e-03   9.2612860e-03   6.0792360e-03   5.4160667e-02   2.4574168e-03   8.1477262e-03   3.8451807e-03   1.8194605e-02   1.5067164e-03   5.6792549e-03   4.8846913e-03   1.0004616e-02   2.6108642e-01   2.5718656e-01   3.0162187e-01   3.4990044e-01   3.2118558e-01   3.3212064e-01   2.7586622e-01   2.3634761e-01   2.9366681e-01   2.8770633e-01   3.3110877e-01   2.6200119e-01   3.2383134e-01   3.3046521e-01   1.9829106e-01   2.4860521e-01   3.1887574e-01   2.7240198e-01   4.1018982e-01   2.8711577e-01   3.3461178e-01   2.5228731e-01   4.0944673e-01   3.3325697e-01   2.6572836e-01   2.6351234e-01   3.2731613e-01   3.4479515e-01   3.1382719e-01   2.1730674e-01   2.9282566e-01   2.7420761e-01   2.5662111e-01   4.3493335e-01   3.3165524e-01   2.4546083e-01   2.8544328e-01   3.6363165e-01   2.4916639e-01   3.1676067e-01   3.5299851e-01   3.0301330e-01   2.8384547e-01   2.5066938e-01   3.0887732e-01   2.5579752e-01   2.7431070e-01   2.7082223e-01   1.8355500e-01   2.7545202e-01   5.3566205e-01   4.7913827e-01   4.5930782e-01   4.6887050e-01   5.0113940e-01   5.0408511e-01   4.8425611e-01   4.8100390e-01   5.1611676e-01   4.2712908e-01   3.5770237e-01   4.5815125e-01   4.2990067e-01   5.1783166e-01   5.1944925e-01   4.1592191e-01   4.2634771e-01   4.2297928e-01   5.8870710e-01   4.8576650e-01   4.3645958e-01   4.5912638e-01   5.2238783e-01   4.0361731e-01   4.1983156e-01   4.1785974e-01   3.8054730e-01   3.6909926e-01   5.0215992e-01   4.0667536e-01   4.7224138e-01   3.6745640e-01   5.1222367e-01   3.9280819e-01   4.9246039e-01   4.6045456e-01   4.4643751e-01   4.1946868e-01   3.6048019e-01   3.9703515e-01   4.6143668e-01   3.7590038e-01   4.7913827e-01   4.7009550e-01   4.5350904e-01   4.1479012e-01   4.5636227e-01   4.0162438e-01   4.1380491e-01   4.1334093e-01   1.4411876e-02   8.7935053e-03   9.1184252e-03   1.4411876e-02   5.7453578e-03   7.6054776e-03   2.7409931e-03   4.7624677e-02   1.8780439e-03   4.9746662e-03   5.9684857e-03   1.4534556e-02   2.3601867e-03   4.9286760e-03   3.4466344e-03   7.5341006e-03   2.6746877e-01   2.6516664e-01   3.0921139e-01   3.5673992e-01   3.2754140e-01   3.4481450e-01   2.8624779e-01   2.4359422e-01   3.0108022e-01   2.9756219e-01   3.3745616e-01   2.6990583e-01   3.2887946e-01   3.4163113e-01   2.0331657e-01   2.5392638e-01   3.3233847e-01   2.8201820e-01   4.1489235e-01   2.9472025e-01   3.4749512e-01   2.5726587e-01   4.1828245e-01   3.4508966e-01   2.7189140e-01   2.6903465e-01   3.3406888e-01   3.5284896e-01   3.2333666e-01   2.2149953e-01   2.9970099e-01   2.8083524e-01   2.6300357e-01   4.4804341e-01   3.4702929e-01   2.5691578e-01   2.9267142e-01   3.6866600e-01   2.5996213e-01   3.2467728e-01   3.6599412e-01   3.1379996e-01   2.9070461e-01   2.5648991e-01   3.1904849e-01   2.6736905e-01   2.8471636e-01   2.7833907e-01   1.8530093e-01   2.8435281e-01   5.5199539e-01   4.9206433e-01   4.6946939e-01   4.8356355e-01   5.1418554e-01   5.1595196e-01   5.0010048e-01   4.9372697e-01   5.2743722e-01   4.3877522e-01   3.6710197e-01   4.6809646e-01   4.3893984e-01   5.2878375e-01   5.2942069e-01   4.2592703e-01   4.3928640e-01   4.3789059e-01   5.9924430e-01   4.9601715e-01   4.4619754e-01   4.7193405e-01   5.3393309e-01   4.1149136e-01   4.3273356e-01   4.3054493e-01   3.8875406e-01   3.8007672e-01   5.1386048e-01   4.1809676e-01   4.8216133e-01   3.8002405e-01   5.2330849e-01   4.0472333e-01   5.0903363e-01   4.6735136e-01   4.6059448e-01   4.3368525e-01   3.7134977e-01   4.0504167e-01   4.7061436e-01   3.7989795e-01   4.9206433e-01   4.8217825e-01   4.6398354e-01   4.2055878e-01   4.6343857e-01   4.1088828e-01   4.2766517e-01   4.2782051e-01   4.0251529e-03   4.0402030e-03   0.0000000e+00   2.0104773e-03   1.1579294e-03   6.7733512e-03   1.3328703e-02   6.1195429e-03   3.8280621e-03   5.4471697e-03   1.3203495e-03   7.3930866e-03   2.5511055e-03   3.8011014e-03   1.5935161e-03   1.6488937e-01   1.6420208e-01   1.9946134e-01   2.3841307e-01   2.1377841e-01   2.3352484e-01   1.8323739e-01   1.4660873e-01   1.9269692e-01   1.9183992e-01   2.2200730e-01   1.6791595e-01   2.1423078e-01   2.2922867e-01   1.1407468e-01   1.5349471e-01   2.2418179e-01   1.7908948e-01   2.8692621e-01   1.8765985e-01   2.3596576e-01   1.5596498e-01   2.9203641e-01   2.3278981e-01   1.6829150e-01   1.6563525e-01   2.1942293e-01   2.3592536e-01   2.1256421e-01   1.2755291e-01   1.9121342e-01   1.7576718e-01   1.6132929e-01   3.2148865e-01   2.3885357e-01   1.6118125e-01   1.8573297e-01   2.4757051e-01   1.6279862e-01   2.1240660e-01   2.5149161e-01   2.0595435e-01   1.8389163e-01   1.5580881e-01   2.0964365e-01   1.6953436e-01   1.8203376e-01   1.7437090e-01   9.9184065e-02   1.8031354e-01   4.1664204e-01   3.5971944e-01   3.3744612e-01   3.5416868e-01   3.7936058e-01   3.7982315e-01   3.7006184e-01   3.6098196e-01   3.8956200e-01   3.1201251e-01   2.4889912e-01   3.3607844e-01   3.1002022e-01   3.9046127e-01   3.9028467e-01   2.9949936e-01   3.1373736e-01   3.1479561e-01   4.5355800e-01   3.6085089e-01   3.1682959e-01   3.4195608e-01   3.9553949e-01   2.8555816e-01   3.0804941e-01   3.0593834e-01   2.6633772e-01   2.6121338e-01   3.7782244e-01   2.9399549e-01   3.4839508e-01   2.6278429e-01   3.8569374e-01   2.8303600e-01   3.7885421e-01   3.3349568e-01   3.3352424e-01   3.1033775e-01   2.5375578e-01   2.8011018e-01   3.3772574e-01   2.5671985e-01   3.5971944e-01   3.5022311e-01   3.3289784e-01   2.9224130e-01   3.3016010e-01   2.8599652e-01   3.0475125e-01   3.0561759e-01   3.0700267e-06   4.0251529e-03   1.9467123e-03   1.7971688e-03   1.7301608e-03   1.6293141e-02   4.7718752e-03   6.8454231e-04   9.3235659e-03   1.4106675e-03   7.7429721e-03   3.1097831e-03   2.5391665e-03   9.5764453e-04   2.0739268e-01   2.0836358e-01   2.4589773e-01   2.8656370e-01   2.5981614e-01   2.8739169e-01   2.3131893e-01   1.8849968e-01   2.3845740e-01   2.4007299e-01   2.6857108e-01   2.1230255e-01   2.5877066e-01   2.8137449e-01   1.5046312e-01   1.9386671e-01   2.7809337e-01   2.2610426e-01   3.3523485e-01   2.3325794e-01   2.9016640e-01   1.9614834e-01   3.4542234e-01   2.8580893e-01   2.1082239e-01   2.0722978e-01   2.6628244e-01   2.8528108e-01   2.6193836e-01   1.6431358e-01   2.3627495e-01   2.1939954e-01   2.0354344e-01   3.8077948e-01   2.9536596e-01   2.0839572e-01   2.3078002e-01   2.9414568e-01   2.0957408e-01   2.6009226e-01   3.0679254e-01   2.5613081e-01   2.2839644e-01   1.9694047e-01   2.5948108e-01   2.1757899e-01   2.3004331e-01   2.1886202e-01   1.3010315e-01   2.2671016e-01   4.8309156e-01   4.2059741e-01   3.9446827e-01   4.1650857e-01   4.4118694e-01   4.4046177e-01   4.3416675e-01   4.2171124e-01   4.5001131e-01   3.6938174e-01   3.0049261e-01   3.9280494e-01   3.6453208e-01   4.5055186e-01   4.4930066e-01   3.5453263e-01   3.7247610e-01   3.7543144e-01   5.1540859e-01   4.1899010e-01   3.7243361e-01   4.0192295e-01   4.5645786e-01   3.3753686e-01   3.6646355e-01   3.6403348e-01   3.1765048e-01   3.1516192e-01   4.3820660e-01   3.5021371e-01   4.0564147e-01   3.1837605e-01   4.4574076e-01   3.3915719e-01   4.4399141e-01   3.8666018e-01   3.9439941e-01   3.7011128e-01   3.0715348e-01   3.3195062e-01   3.9368522e-01   3.0254596e-01   4.2059741e-01   4.0983289e-01   3.9004780e-01   3.4211299e-01   3.8338579e-01   3.3953411e-01   3.6390344e-01   3.6538415e-01   4.0402030e-03   2.0647746e-03   1.8810615e-03   1.8744038e-03   1.5904825e-02   5.0089073e-03   7.7718458e-04   9.5962272e-03   1.3536136e-03   8.0354537e-03   3.2683077e-03   2.7053207e-03   1.0204136e-03   2.0663566e-01   2.0767698e-01   2.4509318e-01   2.8563097e-01   2.5892354e-01   2.8672996e-01   2.3068420e-01   1.8783705e-01   2.3766371e-01   2.3938906e-01   2.6765771e-01   2.1160129e-01   2.5782529e-01   2.8065126e-01   1.4982077e-01   1.9310124e-01   2.7749364e-01   2.2544624e-01   3.3411471e-01   2.3248629e-01   2.8950769e-01   1.9536146e-01   3.4445558e-01   2.8510792e-01   2.1004612e-01   2.0643532e-01   2.6539182e-01   2.8440424e-01   2.6118254e-01   1.6358877e-01   2.3546280e-01   2.1862058e-01   2.0279724e-01   3.7994304e-01   2.9482551e-01   2.0788172e-01   2.2999781e-01   2.9312113e-01   2.0902225e-01   2.5926765e-01   3.0610117e-01   2.5545076e-01   2.2760393e-01   1.9618808e-01   2.5876275e-01   2.1704368e-01   2.2941326e-01   2.1812470e-01   1.2939132e-01   2.2601611e-01   4.8224276e-01   4.1968249e-01   3.9347110e-01   4.1568549e-01   4.4024422e-01   4.3946585e-01   4.3337086e-01   4.2078495e-01   4.4897602e-01   3.6849705e-01   2.9964288e-01   3.9180103e-01   3.6353845e-01   4.4949934e-01   4.4820701e-01   3.5360058e-01   3.7164688e-01   3.7469388e-01   5.1424729e-01   4.1795596e-01   3.7145645e-01   4.0103422e-01   4.5542290e-01   3.3654347e-01   3.6564394e-01   3.6320808e-01   3.1671115e-01   3.1435391e-01   4.3720666e-01   3.4935506e-01   4.0461470e-01   3.1763784e-01   4.4470146e-01   3.3834387e-01   4.4321597e-01   3.8553656e-01   3.9358898e-01   3.6934910e-01   3.0635763e-01   3.3097385e-01   3.9264615e-01   3.0146236e-01   4.1968249e-01   4.0889669e-01   3.8907226e-01   3.4102273e-01   3.8227517e-01   3.3859771e-01   3.6313559e-01   3.6464431e-01   2.0104773e-03   1.1579294e-03   6.7733512e-03   1.3328703e-02   6.1195429e-03   3.8280621e-03   5.4471697e-03   1.3203495e-03   7.3930866e-03   2.5511055e-03   3.8011014e-03   1.5935161e-03   1.6488937e-01   1.6420208e-01   1.9946134e-01   2.3841307e-01   2.1377841e-01   2.3352484e-01   1.8323739e-01   1.4660873e-01   1.9269692e-01   1.9183992e-01   2.2200730e-01   1.6791595e-01   2.1423078e-01   2.2922867e-01   1.1407468e-01   1.5349471e-01   2.2418179e-01   1.7908948e-01   2.8692621e-01   1.8765985e-01   2.3596576e-01   1.5596498e-01   2.9203641e-01   2.3278981e-01   1.6829150e-01   1.6563525e-01   2.1942293e-01   2.3592536e-01   2.1256421e-01   1.2755291e-01   1.9121342e-01   1.7576718e-01   1.6132929e-01   3.2148865e-01   2.3885357e-01   1.6118125e-01   1.8573297e-01   2.4757051e-01   1.6279862e-01   2.1240660e-01   2.5149161e-01   2.0595435e-01   1.8389163e-01   1.5580881e-01   2.0964365e-01   1.6953436e-01   1.8203376e-01   1.7437090e-01   9.9184065e-02   1.8031354e-01   4.1664204e-01   3.5971944e-01   3.3744612e-01   3.5416868e-01   3.7936058e-01   3.7982315e-01   3.7006184e-01   3.6098196e-01   3.8956200e-01   3.1201251e-01   2.4889912e-01   3.3607844e-01   3.1002022e-01   3.9046127e-01   3.9028467e-01   2.9949936e-01   3.1373736e-01   3.1479561e-01   4.5355800e-01   3.6085089e-01   3.1682959e-01   3.4195608e-01   3.9553949e-01   2.8555816e-01   3.0804941e-01   3.0593834e-01   2.6633772e-01   2.6121338e-01   3.7782244e-01   2.9399549e-01   3.4839508e-01   2.6278429e-01   3.8569374e-01   2.8303600e-01   3.7885421e-01   3.3349568e-01   3.3352424e-01   3.1033775e-01   2.5375578e-01   2.8011018e-01   3.3772574e-01   2.5671985e-01   3.5971944e-01   3.5022311e-01   3.3289784e-01   2.9224130e-01   3.3016010e-01   2.8599652e-01   3.0475125e-01   3.0561759e-01   1.3213080e-04   1.6348389e-03   2.1958653e-02   1.3024074e-03   6.2459052e-04   2.7868209e-03   2.5128724e-03   2.4948096e-03   1.5213665e-04   2.9312203e-04   2.6269769e-04   2.0119891e-01   2.0014287e-01   2.3871519e-01   2.8086210e-01   2.5438649e-01   2.7419983e-01   2.2034638e-01   1.8091150e-01   2.3139686e-01   2.2990542e-01   2.6326545e-01   2.0423778e-01   2.5497864e-01   2.7008054e-01   1.4507978e-01   1.8881738e-01   2.6377709e-01   2.1604374e-01   3.3279319e-01   2.2588101e-01   2.7676493e-01   1.9156801e-01   3.3788258e-01   2.7371187e-01   2.0495838e-01   2.0212622e-01   2.6043406e-01   2.7802264e-01   2.5251791e-01   1.6022437e-01   2.2986910e-01   2.1306976e-01   1.9729190e-01   3.6816423e-01   2.7878040e-01   1.9567818e-01   2.2384809e-01   2.9081290e-01   1.9773475e-01   2.5268382e-01   2.9345893e-01   2.4498738e-01   2.2190073e-01   1.9130390e-01   2.4917712e-01   2.0485354e-01   2.1902087e-01   2.1139045e-01   1.2850635e-01   2.1757950e-01   4.6735126e-01   4.0863686e-01   3.8577163e-01   4.0223139e-01   4.2927752e-01   4.3005363e-01   4.1858115e-01   4.1002225e-01   4.4039161e-01   3.5852229e-01   2.9170762e-01   3.8436621e-01   3.5694453e-01   4.4140604e-01   4.4138801e-01   3.4560659e-01   3.5999045e-01   3.6042881e-01   5.0737364e-01   4.1042921e-01   3.6404336e-01   3.8992213e-01   4.4661300e-01   3.3112795e-01   3.5395838e-01   3.5178032e-01   3.1059068e-01   3.0453530e-01   4.2798873e-01   3.3942843e-01   3.9736659e-01   3.0574030e-01   4.3638021e-01   3.2761075e-01   4.2755962e-01   3.8201356e-01   3.8058140e-01   3.5594943e-01   2.9656762e-01   3.2531047e-01   3.8623001e-01   3.0061288e-01   4.0863686e-01   3.9884323e-01   3.8090672e-01   3.3841042e-01   3.7847943e-01   3.3138355e-01   3.5013260e-01   3.5082807e-01   2.3963109e-03   1.8894374e-02   2.2439950e-03   9.0594652e-04   3.4335324e-03   1.6224592e-03   3.5937956e-03   4.5783206e-04   8.1747242e-04   1.3119947e-04   1.9231717e-01   1.9150735e-01   2.2920314e-01   2.7047035e-01   2.4441394e-01   2.6481790e-01   2.1164496e-01   1.7262115e-01   2.2200818e-01   2.2091128e-01   2.5312771e-01   1.9549847e-01   2.4484600e-01   2.6050695e-01   1.3741417e-01   1.8008449e-01   2.5474237e-01   2.0730811e-01   3.2133505e-01   2.1663183e-01   2.6736983e-01   1.8273137e-01   3.2682425e-01   2.6418461e-01   1.9596515e-01   1.9311641e-01   2.5039748e-01   2.6783585e-01   2.4301257e-01   1.5205635e-01   2.2044020e-01   2.0395985e-01   1.8849810e-01   3.5730736e-01   2.6984619e-01   1.8774203e-01   2.1458977e-01   2.8004857e-01   1.8962388e-01   2.4294411e-01   2.8377929e-01   2.3583758e-01   2.1263399e-01   1.8257496e-01   2.3984158e-01   1.9672007e-01   2.1035264e-01   2.0243724e-01   1.2095575e-01   2.0869633e-01   4.5579154e-01   3.9719792e-01   3.7421582e-01   3.9117796e-01   4.1759891e-01   4.1818270e-01   4.0752420e-01   3.9853286e-01   4.2831769e-01   3.4756630e-01   2.8146997e-01   3.7279915e-01   3.4563396e-01   4.2926782e-01   4.2911586e-01   3.3459449e-01   3.4922354e-01   3.5000846e-01   4.9447633e-01   3.9856686e-01   3.5272682e-01   3.7871114e-01   4.3449948e-01   3.2004729e-01   3.4327468e-01   3.4109263e-01   2.9987407e-01   2.9431934e-01   4.1611792e-01   3.2872232e-01   3.8562509e-01   2.9576964e-01   4.2431958e-01   3.1716861e-01   4.1652649e-01   3.7009695e-01   3.6972936e-01   3.4546743e-01   2.8647014e-01   3.1433567e-01   3.7453491e-01   2.8958559e-01   3.9719792e-01   3.8739786e-01   3.6946063e-01   3.2697767e-01   3.6662992e-01   3.2048201e-01   3.3967430e-01   3.4047263e-01   2.8241524e-02   1.4713791e-03   4.6823304e-04   6.0296352e-03   5.2071190e-03   3.4182869e-03   1.9529381e-03   9.1025104e-04   1.8441969e-03   2.3388660e-01   2.3348281e-01   2.7404368e-01   3.1787596e-01   2.8991570e-01   3.1331298e-01   2.5580945e-01   2.1274052e-01   2.6625865e-01   2.6571335e-01   2.9921857e-01   2.3779036e-01   2.8985951e-01   3.0846478e-01   1.7343111e-01   2.2024476e-01   3.0259960e-01   2.5097696e-01   3.7082192e-01   2.6053197e-01   3.1606460e-01   2.2298918e-01   3.7823337e-01   3.1251210e-01   2.3776110e-01   2.3444923e-01   2.9645330e-01   3.1548927e-01   2.8938607e-01   1.8922982e-01   2.6439849e-01   2.4655891e-01   2.2974757e-01   4.1142478e-01   3.1887022e-01   2.2998734e-01   2.5821680e-01   3.2732410e-01   2.3197317e-01   2.8887378e-01   3.3359010e-01   2.8195681e-01   2.5599107e-01   2.2312925e-01   2.8613603e-01   2.3977955e-01   2.5441596e-01   2.4519140e-01   1.5385241e-01   2.5232104e-01   5.1493631e-01   4.5323214e-01   4.2845834e-01   4.4724986e-01   4.7457294e-01   4.7489714e-01   4.6451744e-01   4.5458397e-01   4.8531016e-01   4.0086775e-01   3.3039510e-01   4.2690815e-01   3.9810528e-01   4.8619268e-01   4.8572861e-01   3.8678000e-01   4.0288412e-01   4.0400345e-01   5.5371159e-01   4.5396321e-01   4.0578110e-01   4.3384503e-01   4.9180921e-01   3.7075981e-01   3.9660943e-01   3.9426853e-01   3.4955388e-01   3.4443823e-01   4.7269976e-01   3.8095285e-01   4.4033063e-01   3.4628429e-01   4.8107810e-01   3.6885113e-01   4.7400892e-01   4.2299055e-01   4.2466221e-01   3.9913020e-01   3.3607659e-01   3.6477722e-01   4.2848136e-01   3.3695835e-01   4.5323214e-01   4.4278413e-01   4.2356519e-01   3.7724050e-01   4.1943108e-01   3.7167660e-01   3.9296882e-01   3.9389283e-01   3.3787081e-02   2.1999955e-02   3.5606869e-02   9.7919779e-03   3.8730614e-02   2.5164827e-02   2.6849118e-02   1.7903003e-02   1.2662420e-01   1.3061214e-01   1.5776511e-01   1.8787898e-01   1.6611889e-01   2.0099100e-01   1.5292701e-01   1.1474225e-01   1.5177722e-01   1.5820095e-01   1.7289957e-01   1.3332512e-01   1.6306194e-01   1.9266388e-01   8.3782583e-02   1.1475472e-01   1.9583103e-01   1.4727273e-01   2.2350713e-01   1.4824286e-01   2.0360977e-01   1.1578494e-01   2.3876064e-01   1.9766401e-01   1.2872248e-01   1.2487918e-01   1.7184761e-01   1.8912261e-01   1.7398714e-01   9.1697206e-02   1.4908080e-01   1.3596066e-01   1.2380963e-01   2.7656824e-01   2.1428571e-01   1.3903777e-01   1.4563130e-01   1.9085533e-01   1.3814153e-01   1.6927103e-01   2.1683317e-01   1.7245190e-01   1.4312164e-01   1.1792045e-01   1.7357532e-01   1.4591400e-01   1.5204372e-01   1.3733781e-01   6.4362895e-02   1.4610010e-01   3.6994724e-01   3.0922665e-01   2.8172128e-01   3.0981046e-01   3.2683078e-01   3.2376302e-01   3.2734273e-01   3.0972403e-01   3.3079457e-01   2.6398840e-01   2.0377846e-01   2.7991998e-01   2.5479336e-01   3.3054260e-01   3.2764011e-01   2.4845492e-01   2.6936921e-01   2.7648193e-01   3.8578015e-01   3.0244531e-01   2.6262990e-01   2.9337350e-01   3.3673416e-01   2.3057418e-01   2.6437275e-01   2.6189855e-01   2.1511723e-01   2.1874548e-01   3.2151629e-01   2.4786804e-01   2.9060022e-01   2.2492263e-01   3.2669967e-01   2.4001378e-01   3.3739817e-01   2.6941492e-01   2.9020759e-01   2.7043921e-01   2.1216140e-01   2.2627991e-01   2.7921868e-01   1.9599823e-01   3.0922665e-01   2.9841488e-01   2.7865157e-01   2.3076134e-01   2.6697481e-01   2.3479056e-01   2.6453179e-01   2.6724220e-01   1.8463349e-03   1.7603525e-03   7.3051568e-03   4.1015204e-04   7.6986614e-04   4.1505661e-04   2.5516535e-03   2.2715123e-01   2.2501703e-01   2.6621562e-01   3.1107232e-01   2.8349906e-01   3.0014485e-01   2.4486000e-01   2.0495206e-01   2.5858102e-01   2.5538755e-01   2.9285644e-01   2.2943000e-01   2.8485147e-01   2.9693404e-01   1.6780939e-01   2.1457958e-01   2.8854018e-01   2.4083271e-01   3.6655716e-01   2.5261802e-01   3.0270375e-01   2.1770871e-01   3.6954821e-01   3.0027680e-01   2.3128364e-01   2.2864524e-01   2.8964053e-01   3.0736347e-01   2.7955757e-01   1.8460266e-01   2.5729263e-01   2.3962699e-01   2.2298939e-01   3.9825299e-01   3.0280555e-01   2.1772566e-01   2.5069761e-01   3.2250864e-01   2.2044120e-01   2.8076678e-01   3.2019205e-01   2.7070478e-01   2.4885895e-01   2.1694127e-01   2.7557358e-01   2.2744661e-01   2.4343281e-01   2.3729093e-01   1.5163937e-01   2.4296011e-01   4.9867564e-01   4.4043288e-01   4.1855224e-01   4.3250726e-01   4.6172807e-01   4.6333132e-01   4.4859207e-01   4.4201028e-01   4.7437581e-01   3.8923920e-01   3.2088439e-01   4.1722959e-01   3.8928568e-01   4.7566007e-01   4.7625383e-01   3.7686783e-01   3.8986190e-01   3.8884081e-01   5.4387473e-01   4.4406349e-01   3.9623886e-01   4.2109532e-01   4.8065646e-01   3.6308022e-01   3.8359994e-01   3.8148432e-01   3.4142085e-01   3.3328357e-01   4.6130540e-01   3.6948155e-01   4.3073385e-01   3.3341169e-01   4.7038237e-01   3.5679054e-01   4.5732763e-01   4.1658437e-01   4.1040883e-01   3.8470265e-01   3.2499997e-01   3.5692838e-01   4.1963835e-01   3.3330111e-01   4.4043288e-01   4.3085698e-01   4.1330060e-01   3.7185723e-01   4.1281711e-01   3.6250007e-01   3.7890193e-01   3.7915604e-01   5.4235993e-03   2.5998420e-03   3.8677378e-03   1.1491859e-03   6.2877149e-04   4.8332372e-04   2.1486898e-01   2.1467854e-01   2.5369922e-01   2.9604720e-01   2.6891688e-01   2.9246555e-01   2.3653315e-01   1.9467620e-01   2.4616446e-01   2.4597983e-01   2.7792166e-01   2.1880759e-01   2.6876410e-01   2.8748855e-01   1.5684504e-01   2.0165721e-01   2.8227299e-01   2.3174975e-01   3.4738814e-01   2.4066356e-01   2.9516630e-01   2.0426320e-01   3.5491429e-01   2.9152419e-01   2.1857252e-01   2.1532575e-01   2.7527993e-01   2.9385335e-01   2.6877316e-01   1.7184619e-01   2.4431046e-01   2.2708854e-01   2.1089056e-01   3.8786051e-01   2.9845589e-01   2.1195408e-01   2.3838722e-01   3.0508869e-01   2.1371586e-01   2.6806783e-01   3.1216051e-01   2.6180490e-01   2.3620065e-01   2.0447438e-01   2.6573552e-01   2.2136814e-01   2.3519672e-01   2.2587537e-01   1.3797486e-01   2.3292930e-01   4.8968343e-01   4.2865553e-01   4.0406082e-01   4.2311568e-01   4.4955467e-01   4.4970134e-01   4.4022012e-01   4.2994584e-01   4.5983149e-01   3.7733154e-01   3.0847182e-01   4.0252124e-01   3.7430526e-01   4.6065171e-01   4.6008825e-01   3.6337620e-01   3.7950484e-01   3.8097080e-01   5.2689771e-01   4.2900834e-01   3.8187058e-01   4.0969052e-01   4.6622956e-01   3.4753954e-01   3.7338441e-01   3.7106677e-01   3.2693971e-01   3.2232718e-01   4.4752724e-01   3.5789640e-01   4.1563695e-01   3.2438944e-01   4.5565562e-01   3.4619907e-01   4.4965557e-01   3.9841763e-01   4.0096111e-01   3.7608333e-01   3.1419485e-01   3.4172782e-01   4.0397551e-01   3.1440281e-01   4.2865553e-01   4.1831326e-01   3.9931889e-01   3.5369559e-01   3.9495166e-01   3.4857729e-01   3.7001412e-01   3.7103653e-01   9.4891023e-03   8.2375707e-04   1.6686708e-03   2.4475618e-03   4.6565778e-03   2.0491959e-01   2.0139969e-01   2.4160221e-01   2.8586265e-01   2.5951451e-01   2.7003471e-01   2.1844761e-01   1.8272084e-01   2.3436629e-01   2.2908460e-01   2.6861177e-01   2.0572725e-01   2.6206958e-01   2.6821933e-01   1.4908933e-01   1.9377976e-01   2.5813756e-01   2.1522061e-01   3.4200281e-01   2.2841742e-01   2.7236034e-01   1.9712279e-01   3.4101997e-01   2.7089218e-01   2.0911514e-01   2.0716704e-01   2.6510721e-01   2.8112922e-01   2.5280689e-01   1.6599180e-01   2.3361443e-01   2.1674754e-01   2.0090153e-01   3.6527528e-01   2.7043122e-01   1.9136502e-01   2.2690330e-01   2.9871211e-01   1.9453190e-01   2.5541554e-01   2.8925890e-01   2.4310626e-01   2.2546391e-01   1.9558871e-01   2.4836004e-01   2.0063248e-01   2.1704777e-01   2.1367234e-01   1.3678274e-01   2.1790721e-01   4.6096128e-01   4.0671584e-01   3.8773373e-01   3.9744288e-01   4.2748862e-01   4.3009711e-01   4.1223718e-01   4.0843810e-01   4.4142325e-01   3.5773584e-01   2.9306179e-01   3.8663461e-01   3.6011993e-01   4.4301487e-01   4.4448732e-01   3.4710733e-01   3.5722182e-01   3.5456119e-01   5.1046759e-01   4.1264841e-01   3.6628224e-01   3.8788026e-01   4.4738884e-01   3.3559118e-01   3.5113614e-01   3.4925623e-01   3.1415212e-01   3.0373195e-01   4.2825709e-01   3.3863565e-01   3.9988417e-01   3.0248234e-01   4.3771445e-01   3.2582243e-01   4.2019828e-01   3.8883040e-01   3.7626984e-01   3.5109855e-01   2.9576205e-01   3.2946337e-01   3.8969349e-01   3.1023698e-01   4.0671584e-01   3.9807129e-01   3.8231042e-01   3.4615104e-01   3.8497123e-01   3.3374807e-01   3.4573433e-01   3.4546409e-01   9.9827550e-03   3.7862368e-03   4.2584946e-03   1.2265791e-03   1.7617949e-01   1.7703821e-01   2.1212343e-01   2.5052805e-01   2.2530971e-01   2.5127302e-01   1.9850334e-01   1.5861692e-01   2.0515179e-01   2.0665158e-01   2.3356888e-01   1.8070269e-01   2.2448284e-01   2.4549391e-01   1.2367511e-01   1.6367665e-01   2.4261509e-01   1.9359579e-01   2.9712394e-01   2.0027260e-01   2.5390638e-01   1.6582785e-01   3.0629971e-01   2.4971132e-01   1.7938888e-01   1.7609087e-01   2.3136866e-01   2.4921343e-01   2.2715314e-01   1.3644726e-01   2.0313672e-01   1.8736201e-01   1.7259709e-01   3.3997757e-01   2.5916969e-01   1.7732718e-01   1.9796934e-01   2.5795325e-01   1.7832877e-01   2.2545225e-01   2.6961076e-01   2.2173788e-01   1.9575867e-01   1.6649347e-01   2.2485533e-01   1.8584864e-01   1.9731550e-01   1.8682160e-01   1.0543428e-01   1.9413835e-01   4.3845459e-01   3.7813833e-01   3.5313427e-01   3.7426892e-01   3.9794201e-01   3.9726718e-01   3.9130315e-01   3.7920848e-01   4.0649770e-01   3.2906316e-01   2.6354873e-01   3.5155280e-01   3.2453882e-01   4.0704153e-01   4.0591144e-01   3.1492301e-01   3.3203733e-01   3.3498583e-01   4.6984073e-01   3.7668481e-01   3.3204804e-01   3.6021418e-01   4.1270518e-01   2.9886261e-01   3.2629567e-01   3.2396944e-01   2.7989788e-01   2.7743776e-01   3.9510115e-01   3.1077003e-01   3.6387709e-01   2.8054253e-01   4.0239292e-01   3.0024699e-01   4.0081171e-01   3.4598760e-01   3.5305310e-01   3.2985030e-01   2.6984561e-01   2.9352511e-01   3.5245796e-01   2.6611615e-01   3.7813833e-01   3.6780473e-01   3.4887718e-01   3.0350575e-01   3.4281917e-01   3.0065558e-01   3.2390437e-01   3.2536506e-01   1.4872402e-03   1.4261828e-03   4.3370414e-03   2.2707675e-01   2.2400493e-01   2.6569285e-01   3.1123078e-01   2.8378554e-01   2.9683242e-01   2.4257696e-01   2.0422363e-01   2.5811380e-01   2.5348079e-01   2.9319960e-01   2.2849166e-01   2.8589147e-01   2.9450563e-01   1.6807288e-01   2.1502765e-01   2.8470332e-01   2.3897250e-01   3.6819788e-01   2.5200370e-01   2.9929335e-01   2.1837402e-01   3.6884033e-01   2.9747447e-01   2.3136263e-01   2.2907381e-01   2.8973378e-01   3.0680413e-01   2.7799960e-01   1.8547528e-01   2.5712752e-01   2.3949352e-01   2.2288675e-01   3.9517754e-01   2.9790586e-01   2.1461980e-01   2.5028856e-01   3.2375470e-01   2.1774627e-01   2.8014020e-01   3.1681085e-01   2.6834640e-01   2.4865097e-01   2.1711982e-01   2.7360556e-01   2.2434130e-01   2.4112622e-01   2.3662943e-01   1.5362542e-01   2.4153192e-01   4.9407199e-01   4.3763583e-01   4.1728329e-01   4.2863716e-01   4.5893922e-01   4.6123811e-01   4.4410036e-01   4.3933832e-01   4.7263926e-01   3.8698450e-01   3.1961653e-01   4.1608078e-01   3.8854631e-01   4.7414193e-01   4.7530563e-01   3.7551546e-01   3.8685112e-01   3.8465728e-01   5.4280771e-01   4.4284025e-01   3.9512469e-01   4.1829234e-01   4.7881043e-01   3.6289720e-01   3.8058317e-01   3.7858579e-01   3.4094528e-01   3.3115887e-01   4.5930198e-01   3.6727055e-01   4.2965213e-01   3.3035276e-01   4.6876907e-01   3.5423104e-01   4.5243416e-01   4.1718566e-01   4.0676646e-01   3.8091352e-01   3.2289623e-01   3.5664111e-01   4.1894270e-01   3.3518670e-01   4.3763583e-01   4.2851737e-01   4.1182914e-01   3.7291664e-01   4.1330116e-01   3.6151280e-01   3.7529785e-01   3.7518553e-01   2.1466294e-04   8.1135795e-04   2.0395325e-01   2.0232995e-01   2.4147692e-01   2.8429935e-01   2.5773107e-01   2.7550146e-01   2.2191392e-01   1.8311464e-01   2.3414066e-01   2.3177254e-01   2.6670079e-01   2.0650082e-01   2.5876572e-01   2.7190067e-01   1.4755667e-01   1.9178198e-01   2.6468375e-01   2.1783813e-01   3.3738596e-01   2.2849816e-01   2.7802206e-01   1.9468150e-01   3.4113386e-01   2.7532568e-01   2.0783209e-01   2.0518365e-01   2.6370466e-01   2.8101900e-01   2.5475268e-01   1.6319612e-01   2.3278117e-01   2.1587899e-01   1.9999930e-01   3.7014440e-01   2.7911349e-01   1.9654509e-01   2.2657275e-01   2.9494234e-01   1.9887441e-01   2.5547815e-01   2.9483790e-01   2.4668408e-01   2.2473055e-01   1.9413008e-01   2.5114079e-01   2.0579387e-01   2.2056413e-01   2.1387343e-01   1.3181835e-01   2.1964768e-01   4.6883654e-01   4.1098091e-01   3.8893009e-01   4.0388763e-01   4.3171852e-01   4.3291890e-01   4.1992695e-01   4.1244869e-01   4.4351517e-01   3.6095375e-01   2.9436456e-01   3.8758959e-01   3.6027619e-01   4.4466450e-01   4.4498549e-01   3.4851465e-01   3.6196876e-01   3.6168475e-01   5.1115719e-01   4.1372900e-01   3.6718805e-01   3.9217531e-01   4.4969696e-01   3.3465314e-01   3.5590188e-01   3.5378490e-01   3.1382959e-01   3.0675099e-01   4.3089908e-01   3.4178657e-01   4.0069101e-01   3.0739171e-01   4.3956059e-01   3.2969272e-01   4.2869495e-01   3.8625531e-01   3.8227459e-01   3.5742723e-01   2.9874912e-01   3.2874265e-01   3.8973129e-01   3.0516637e-01   4.1098091e-01   4.0141982e-01   3.8392089e-01   3.4269880e-01   3.8263609e-01   3.3443392e-01   3.5169446e-01   3.5216636e-01   9.1517643e-04   2.1524074e-01   2.1387759e-01   2.5373107e-01   2.9714785e-01   2.7003778e-01   2.8920001e-01   2.3426287e-01   1.9410928e-01   2.4622443e-01   2.4423617e-01   2.7915984e-01   2.1812334e-01   2.7081916e-01   2.8531140e-01   1.5727134e-01   2.0260081e-01   2.7829111e-01   2.2998414e-01   3.5056990e-01   2.4050939e-01   2.9179089e-01   2.0549251e-01   3.5521699e-01   2.8889714e-01   2.1915718e-01   2.1632646e-01   2.7619378e-01   2.9404932e-01   2.6759110e-01   1.7313032e-01   2.4473695e-01   2.2745766e-01   2.1120369e-01   3.8534612e-01   2.9320974e-01   2.0849982e-01   2.3847660e-01   3.0758274e-01   2.1079215e-01   2.6804702e-01   3.0890781e-01   2.5958017e-01   2.3652855e-01   2.0509955e-01   2.6402277e-01   2.1797131e-01   2.3288914e-01   2.2557846e-01   1.4040598e-01   2.3171289e-01   4.8566100e-01   4.2666426e-01   4.0387929e-01   4.1975185e-01   4.4765854e-01   4.4867725e-01   4.3614287e-01   4.2811681e-01   4.5929331e-01   3.7580128e-01   3.0785579e-01   4.0248311e-01   3.7465557e-01   4.6038945e-01   4.6053443e-01   3.6291292e-01   3.7703492e-01   3.7702914e-01   5.2744561e-01   4.2898757e-01   3.8178676e-01   4.0761255e-01   4.6557959e-01   3.4846205e-01   3.7087965e-01   3.6870032e-01   3.2740185e-01   3.2070546e-01   4.4660679e-01   3.5633353e-01   4.1573756e-01   3.2160319e-01   4.5525495e-01   3.4414484e-01   4.4510906e-01   4.0053809e-01   3.9779009e-01   3.7261777e-01   3.1255985e-01   3.4249381e-01   4.0450836e-01   3.1773210e-01   4.2666426e-01   4.1685273e-01   3.9886436e-01   3.5618753e-01   3.9691207e-01   3.4850257e-01   3.6675459e-01   3.6731925e-01   1.9628524e-01   1.9595152e-01   2.3363495e-01   2.7477230e-01   2.4851118e-01   2.7084921e-01   2.1685944e-01   1.7678722e-01   2.2636857e-01   2.2599655e-01   2.5724256e-01   1.9993208e-01   2.4854721e-01   2.6607935e-01   1.4078995e-01   1.8369551e-01   2.6097312e-01   2.1228442e-01   3.2508389e-01   2.2103528e-01   2.7346255e-01   1.8624671e-01   3.3183707e-01   2.6996197e-01   1.9987779e-01   1.9682726e-01   2.5462868e-01   2.7249816e-01   2.4805238e-01   1.5523356e-01   2.2463473e-01   2.0803519e-01   1.9245485e-01   3.6364465e-01   2.7666661e-01   1.9319723e-01   2.1887484e-01   2.8382186e-01   1.9489313e-01   2.4750809e-01   2.8994226e-01   2.4123726e-01   2.1680288e-01   1.8634448e-01   2.4506974e-01   2.0223799e-01   2.1557050e-01   2.0677515e-01   1.2326853e-01   2.1346465e-01   4.6321322e-01   4.0354170e-01   3.7970298e-01   3.9803381e-01   4.2400584e-01   4.2423988e-01   4.1474064e-01   4.0481837e-01   4.3422436e-01   3.5345426e-01   2.8656464e-01   3.7821934e-01   3.5073976e-01   4.3506541e-01   4.3461895e-01   3.3997143e-01   3.5549675e-01   3.5686166e-01   5.0021759e-01   4.0411514e-01   3.5805390e-01   3.8499293e-01   4.4048074e-01   3.2475847e-01   3.4952686e-01   3.4727706e-01   3.0464816e-01   2.9991017e-01   4.2212289e-01   3.3451084e-01   3.9105629e-01   3.0183284e-01   4.3015090e-01   3.2308204e-01   4.2396798e-01   3.7459262e-01   3.7639485e-01   3.5210962e-01   2.9201270e-01   3.1907654e-01   3.7972706e-01   2.9306033e-01   4.0354170e-01   3.9347753e-01   3.7503434e-01   3.3106203e-01   3.7117506e-01   3.2561211e-01   3.4620183e-01   3.4718282e-01   5.4794269e-04   1.8595150e-03   7.6486933e-03   3.6709934e-03   1.1297771e-02   3.2211024e-03   9.3536855e-04   1.2319946e-03   2.8277178e-03   4.9162548e-03   4.5756064e-04   4.5051431e-03   8.2041162e-03   5.0174936e-03   3.6228961e-04   1.1821527e-02   2.0977093e-03   2.1132418e-02   9.2067605e-04   1.2006919e-02   4.4257629e-04   2.0287770e-02   9.6871594e-03   3.6190304e-05   1.5767395e-04   4.3807979e-03   7.0604016e-03   4.2468409e-03   2.9914879e-03   1.0695136e-03   1.8996353e-04   2.2349983e-05   3.2007300e-02   1.8626112e-02   5.9280137e-03   7.1455899e-04   1.0704644e-02   4.3625889e-03   3.3835317e-03   1.4689427e-02   5.0222514e-03   5.6899707e-04   1.6818673e-04   4.5805421e-03   5.6585758e-03   3.2371570e-03   3.2354233e-04   1.1207565e-02   1.3610741e-03   7.1737216e-02   4.4570224e-02   3.4818230e-02   4.5037397e-02   5.2043753e-02   5.1252208e-02   5.2740741e-02   4.4819381e-02   5.4844272e-02   2.7492078e-02   9.9689819e-03   3.4289978e-02   2.5632216e-02   5.5069559e-02   5.4817042e-02   2.2717491e-02   2.9451557e-02   3.3085492e-02   8.2968826e-02   4.3268668e-02   2.7878625e-02   3.8225012e-02   5.7392372e-02   1.8533113e-02   2.7752880e-02   2.6863838e-02   1.3522246e-02   1.3720834e-02   5.0366251e-02   2.2136217e-02   3.8620607e-02   1.6104597e-02   5.3184340e-02   1.9805362e-02   5.7449954e-02   3.4065200e-02   3.7316903e-02   3.0490470e-02   1.2067932e-02   1.7032969e-02   3.4772493e-02   1.3995163e-02   4.4570224e-02   4.0370599e-02   3.3379869e-02   2.1584097e-02   3.2788169e-02   1.8689520e-02   2.8313400e-02   2.9690702e-02   2.1810130e-03   9.0584977e-03   5.0419708e-03   8.6169391e-03   1.3497185e-03   5.5621011e-04   1.5626898e-03   1.4715788e-03   6.4159866e-03   2.8759099e-05   6.7456588e-03   6.5333041e-03   5.4642837e-03   1.4108585e-03   8.4463171e-03   6.8795048e-04   2.4580787e-02   1.0388021e-03   9.2286561e-03   1.7639378e-03   2.1069750e-02   7.6032385e-03   7.9175727e-04   1.2932915e-03   5.5725803e-03   7.6219263e-03   3.4673161e-03   4.2795834e-03   1.7380404e-03   7.6483144e-04   5.0584546e-04   3.0216137e-02   1.4096242e-02   2.8861882e-03   1.0548294e-03   1.3430225e-02   1.8234348e-03   3.6876972e-03   1.2061426e-02   3.2511669e-03   1.1268214e-03   9.2690858e-04   3.2976984e-03   2.6938518e-03   1.3276664e-03   2.7368625e-04   1.3465991e-02   4.6822375e-04   6.8703048e-02   4.3340061e-02   3.5237451e-02   4.2511976e-02   5.0937508e-02   5.0961712e-02   4.9575019e-02   4.3743233e-02   5.5024281e-02   2.6603107e-02   9.8022959e-03   3.4840370e-02   2.6494597e-02   5.5508568e-02   5.5916268e-02   2.2800218e-02   2.7686924e-02   2.9943438e-02   8.4266289e-02   4.3880838e-02   2.8349604e-02   3.6881146e-02   5.7478275e-02   1.9879185e-02   2.5944410e-02   2.5180647e-02   1.4399125e-02   1.2641568e-02   5.0169037e-02   2.1180767e-02   3.9323276e-02   1.3938661e-02   5.3485998e-02   1.8367828e-02   5.3859405e-02   3.6620288e-02   3.4933732e-02   2.7786245e-02   1.0959937e-02   1.8222006e-02   3.5858197e-02   1.7515874e-02   4.3340061e-02   3.9619354e-02   3.3535832e-02   2.4403195e-02   3.5188163e-02   1.9107255e-02   2.5791231e-02   2.6740983e-02   2.3644893e-03   7.3259797e-04   5.5696730e-03   2.5707992e-03   4.5988577e-03   6.5028014e-05   1.3580837e-03   1.2238983e-03   1.7109321e-03   1.8714646e-03   2.9944669e-03   1.2905109e-02   3.6725511e-03   6.9487479e-03   1.9516582e-03   1.2369689e-02   2.1307052e-04   6.0423668e-03   3.5751225e-03   1.0028727e-02   4.0861108e-03   1.5902610e-03   2.1378325e-03   8.2628456e-04   1.7251178e-03   8.0406810e-04   9.4788056e-03   1.5999623e-04   8.7509151e-04   2.2560004e-03   1.8732619e-02   1.2379328e-02   7.5803848e-03   2.7223442e-04   5.1089250e-03   5.8562851e-03   2.2798360e-04   7.3027135e-03   2.0605789e-03   3.8651941e-04   3.1125030e-03   1.3286042e-03   6.3896783e-03   2.7088758e-03   9.7424018e-04   2.1879745e-02   1.1792881e-03   5.1158538e-02   2.8393852e-02   2.0746802e-02   2.9099880e-02   3.4416089e-02   3.3763806e-02   3.5540899e-02   2.8575694e-02   3.6757319e-02   1.5144171e-02   3.2315336e-03   2.0363783e-02   1.3898176e-02   3.6986917e-02   3.6943012e-02   1.1630303e-02   1.6809451e-02   2.0213112e-02   6.0723696e-02   2.7441217e-02   1.5470812e-02   2.3398093e-02   3.8846172e-02   9.0112370e-03   1.5570624e-02   1.4874310e-02   5.5171614e-03   5.6710746e-03   3.3048406e-02   1.1270408e-02   2.3776077e-02   7.7482583e-03   3.5413096e-02   9.7831561e-03   3.9569593e-02   2.1047269e-02   2.3004495e-02   1.8015778e-02   4.6822715e-03   7.9260822e-03   2.0880056e-02   7.9428824e-03   2.8393852e-02   2.5015882e-02   1.9595645e-02   1.2073748e-02   1.9972960e-02   8.8353740e-03   1.6316616e-02   1.7585650e-02   7.5921246e-04   7.6581519e-03   8.7602132e-03   1.3285408e-02   3.1081233e-03   6.1183864e-03   3.4258252e-04   8.0693053e-03   1.1527108e-03   4.3989827e-03   2.4770294e-02   1.0297780e-02   1.0716436e-02   8.0921947e-03   4.0468338e-03   3.9682629e-03   8.0100500e-03   9.6963228e-03   3.4263963e-03   5.5915243e-03   6.8271736e-03   7.3405400e-03   4.5375223e-04   3.3086446e-04   2.9090971e-03   1.8954919e-02   3.0536401e-03   5.4665114e-03   8.4934379e-03   1.2174442e-02   1.6139108e-02   1.7445625e-02   4.0248005e-03   8.4214697e-04   1.4968032e-02   1.3210674e-03   7.6674238e-03   5.8292743e-03   4.1363018e-03   9.7236406e-03   4.2254644e-03   1.5250051e-02   9.0649565e-03   6.2807525e-03   3.4303230e-02   6.6673397e-03   3.8666536e-02   1.8227060e-02   1.0732501e-02   2.0722480e-02   2.2627004e-02   2.1137328e-02   2.6644819e-02   1.8156140e-02   2.2956019e-02   8.4429075e-03   1.3075172e-03   1.0338488e-02   5.7120788e-03   2.2877982e-02   2.2249653e-02   5.0348740e-03   1.0858478e-02   1.5670368e-02   4.1328272e-02   1.5415087e-02   7.0760309e-03   1.4735153e-02   2.4690692e-02   2.4709117e-03   1.0136092e-02   9.4846153e-03   1.0671740e-03   3.5238125e-03   2.0483172e-02   6.1432951e-03   1.2637418e-02   6.5903261e-03   2.1786492e-02   6.0819601e-03   3.0486925e-02   9.5174641e-03   1.6103409e-02   1.3353819e-02   3.2356456e-03   1.9926682e-03   1.0243487e-02   2.2538296e-03   1.8227060e-02   1.5108364e-02   1.0188754e-02   3.7627514e-03   8.8362152e-03   3.0791096e-03   1.1920070e-02   1.3588585e-02   8.0203401e-03   5.9881494e-03   7.9998519e-03   1.0330180e-03   3.9563353e-03   9.0962406e-05   4.3155161e-03   3.4798297e-04   4.5913809e-03   1.6934319e-02   5.4778348e-03   1.0415690e-02   5.0724002e-03   7.4143773e-03   1.5625174e-03   8.5090504e-03   5.0363885e-03   7.2978538e-03   5.9687085e-03   3.0800386e-03   3.3854665e-03   6.0369763e-05   9.7766526e-04   2.1106407e-03   1.2150960e-02   8.7050457e-04   2.2450596e-03   4.2656078e-03   1.7573546e-02   1.6541615e-02   1.3010703e-02   1.4863348e-03   2.0318224e-03   1.0715058e-02   5.1959240e-04   9.1091800e-03   4.5577281e-03   1.4679901e-03   5.0882044e-03   3.2208315e-03   1.1416637e-02   6.2084512e-03   2.9761078e-03   2.5010280e-02   3.7556543e-03   4.8641699e-02   2.5613136e-02   1.7022941e-02   2.7738185e-02   3.0983818e-02   2.9540677e-02   3.4381880e-02   2.5612023e-02   3.1832087e-02   1.3388731e-02   2.7364434e-03   1.6556686e-02   1.0555947e-02   3.1795721e-02   3.1144834e-02   9.3248236e-03   1.5850102e-02   2.0586917e-02   5.3133864e-02   2.2885657e-02   1.2291162e-02   2.1220606e-02   3.3851933e-02   5.9611654e-03   1.4821288e-02   1.4065156e-02   3.4935277e-03   5.6486577e-03   2.8790647e-02   1.0114487e-02   1.9481208e-02   8.7344459e-03   3.0476411e-02   9.4253713e-03   3.8601009e-02   1.5492941e-02   2.2081481e-02   1.8082534e-02   4.9261084e-03   5.1866248e-03   1.6531969e-02   3.8794099e-03   2.5613136e-02   2.2037756e-02   1.6246851e-02   7.5034139e-03   1.4652134e-02   6.6254167e-03   1.6367612e-02   1.8037074e-02   3.5963937e-03   1.3253217e-02   6.2726815e-03   2.9800542e-03   8.1679414e-03   8.1170586e-03   1.1643531e-02   4.8148791e-04   2.7366574e-02   1.5666050e-02   3.5602925e-04   4.6080605e-03   1.9717886e-02   6.2846724e-03   1.0858138e-05   1.6108248e-02   9.2526017e-03   1.6342536e-04   1.1289374e-02   1.3182521e-02   7.1086671e-03   4.8107615e-03   2.1505038e-03   2.4867835e-02   7.4585494e-03   9.3457001e-03   1.1859292e-02   8.6506765e-03   1.5825531e-03   7.6287797e-03   7.1464196e-03   1.3469318e-02   7.0587993e-03   4.6011946e-03   3.8423475e-04   1.2933991e-03   8.0463849e-03   1.4044313e-02   1.5554975e-03   5.8246512e-03   3.7445923e-03   7.8179143e-03   4.3383609e-02   5.1292419e-03   3.1208966e-02   1.7106674e-02   1.5590525e-02   1.4711786e-02   2.2118312e-02   2.3677515e-02   1.8355265e-02   1.7620984e-02   2.7322237e-02   8.2125300e-03   3.2025084e-03   1.5673958e-02   1.1993026e-02   2.8215754e-02   3.0043744e-02   8.3225318e-03   7.3692394e-03   7.0283195e-03   5.0297559e-02   2.1175797e-02   1.1957606e-02   1.3050611e-02   2.8749586e-02   1.0337000e-02   6.4346633e-03   6.2339245e-03   7.0135662e-03   2.1302546e-03   2.3356399e-02   5.5096838e-03   1.8637999e-02   1.1462623e-03   2.6577654e-02   3.5495923e-03   2.0668821e-02   2.2280093e-02   1.0461671e-02   6.3164945e-03   1.6640428e-03   9.2261377e-03   1.7584245e-02   1.7910761e-02   1.7106674e-02   1.5703153e-02   1.4029303e-02   1.7152691e-02   2.0996496e-02   7.4959989e-03   5.5175599e-03   5.5902559e-03   3.2004692e-03   2.3419030e-03   2.3799055e-04   7.0894399e-03   1.3013007e-03   8.8089668e-03   2.9046172e-03   1.1317768e-02   5.4246845e-03   3.1331888e-03   1.2023171e-04   2.4596565e-02   1.8369557e-03   3.9714617e-03   5.9789699e-03   1.7350836e-02   3.3069650e-03   3.5250221e-03   4.6752456e-03   5.9850395e-03   6.3682068e-03   1.7841601e-03   1.0289989e-02   3.0278347e-03   2.8250687e-03   3.3130538e-03   2.2020473e-02   6.7612416e-03   1.5121522e-03   2.2413165e-03   1.4397994e-02   9.1427197e-04   3.3482085e-03   6.1881321e-03   7.4537620e-04   2.6935839e-03   4.4237033e-03   1.1678481e-03   9.0068892e-04   2.9616334e-06   1.6314282e-03   2.2817755e-02   4.4730602e-04   5.5111404e-02   3.4158218e-02   2.9021577e-02   3.2043073e-02   4.1089749e-02   4.2083473e-02   3.7707137e-02   3.4692829e-02   4.6329738e-02   1.9906109e-02   6.9820129e-03   2.8847990e-02   2.2051442e-02   4.7099521e-02   4.8343924e-02   1.7860817e-02   1.9868896e-02   2.0488578e-02   7.4490873e-02   3.6906910e-02   2.3107397e-02   2.8319511e-02   4.8426947e-02   1.7182411e-02   1.8331671e-02   1.7811306e-02   1.2023707e-02   8.1012404e-03   4.1487060e-02   1.5227136e-02   3.2990763e-02   8.0392167e-03   4.5098943e-02   1.2350504e-02   4.1149609e-02   3.3318195e-02   2.5517275e-02   1.9030633e-02   6.7530681e-03   1.5581926e-02   3.0469465e-02   1.9100479e-02   3.4158218e-02   3.1415243e-02   2.7190491e-02   2.3321177e-02   3.1828581e-02   1.5212907e-02   1.7498443e-02   1.7919743e-02   3.6042298e-03   3.7468828e-03   9.7768723e-03   7.8664808e-04   9.4997776e-03   1.0849175e-02   2.5690262e-03   9.8725606e-04   1.2645853e-02   2.2464135e-03   3.0594913e-02   2.8398433e-03   1.3996699e-02   1.4253747e-03   2.8011999e-02   1.2147431e-02   1.3365379e-03   1.5899927e-03   8.8722552e-03   1.1913349e-02   6.7897739e-03   2.1135064e-03   3.6254453e-03   1.7898196e-03   6.9240260e-04   3.8891340e-02   1.9082524e-02   3.5297395e-03   2.7024302e-03   1.7766622e-02   2.5342822e-03   6.8092652e-03   1.7593080e-02   6.3527044e-03   2.6240465e-03   7.1495011e-04   6.5371413e-03   3.8357773e-03   3.1136154e-03   1.3436442e-03   8.9702416e-03   2.0253115e-03   8.1348350e-02   5.3552444e-02   4.4248656e-02   5.2648527e-02   6.1923652e-02   6.1838703e-02   6.0414517e-02   5.3985372e-02   6.6201925e-02   3.4742321e-02   1.4883289e-02   4.3767758e-02   3.4228373e-02   6.6672589e-02   6.6939218e-02   3.0219175e-02   3.6020463e-02   3.8455365e-02   9.7619080e-02   5.3827294e-02   3.6463335e-02   4.6373140e-02   6.8906148e-02   2.6398982e-02   3.4032020e-02   3.3159361e-02   2.0120628e-02   1.8469842e-02   6.0950543e-02   2.8524017e-02   4.8737718e-02   1.9976319e-02   6.4489083e-02   2.5275791e-02   6.5080438e-02   4.4969016e-02   4.4191042e-02   3.6066964e-02   1.6430448e-02   2.4524789e-02   4.4743887e-02   2.2062365e-02   5.3552444e-02   4.9370398e-02   4.2403953e-02   3.0850071e-02   4.3434971e-02   2.5846885e-02   3.3807321e-02   3.4832107e-02   1.2988107e-03   1.6703175e-03   1.1675843e-03   2.1414615e-03   3.6300242e-03   1.1143098e-02   2.7878082e-03   7.4699291e-03   1.6417856e-03   1.3833381e-02   5.5119833e-05   6.7870415e-03   2.7321459e-03   1.1691818e-02   4.7830259e-03   1.0265050e-03   1.5056614e-03   1.2372212e-03   2.4522309e-03   1.1336677e-03   8.0050923e-03   5.6870746e-05   4.6915622e-04   1.5562032e-03   2.0887194e-02   1.3125417e-02   6.8837382e-03   7.1162950e-05   5.9522504e-03   5.2043728e-03   5.3649750e-04   8.3200456e-03   2.2696632e-03   1.4322584e-04   2.2876100e-03   1.6070427e-03   5.8684470e-03   2.4558869e-03   5.5570590e-04   1.9651663e-02   8.9172519e-04   5.4659464e-02   3.1114613e-02   2.3119551e-02   3.1736271e-02   3.7415588e-02   3.6761852e-02   3.8397207e-02   3.1313890e-02   3.9883303e-02   1.7150840e-02   4.2007421e-03   2.2713095e-02   1.5842502e-02   4.0120080e-02   4.0057089e-02   1.3428356e-02   1.8845203e-02   2.2251079e-02   6.4673609e-02   3.0152373e-02   1.7530451e-02   2.5859899e-02   4.2056336e-02   1.0552527e-02   1.7518114e-02   1.6789150e-02   6.7575680e-03   6.8601014e-03   3.6016991e-02   1.2994930e-02   2.6303908e-02   8.9623653e-03   3.8482980e-02   1.1333705e-02   4.2542187e-02   2.3287141e-02   2.5337550e-02   1.9991634e-02   5.7451264e-03   9.3841364e-03   2.3239540e-02   8.8973019e-03   3.1114613e-02   2.7595638e-02   2.1905943e-02   1.3658827e-02   2.2169613e-02   1.0410758e-02   1.8207584e-02   1.9484294e-02   4.8008453e-03   1.2650854e-03   6.4006179e-03   1.8900850e-03   1.2501460e-02   5.1363105e-03   3.0955001e-03   2.4385845e-04   2.0020418e-02   1.0205842e-03   3.3467158e-03   5.5033857e-03   1.3788482e-02   2.4101593e-03   2.9461849e-03   4.0056193e-03   3.8916824e-03   4.1559722e-03   7.4799809e-04   1.0679921e-02   1.8732675e-03   2.1213449e-03   3.0477499e-03   1.9047032e-02   6.9834653e-03   2.9438547e-03   1.3758132e-03   1.0992441e-02   2.0426183e-03   1.8183256e-03   5.1275661e-03   3.4870074e-04   1.7800086e-03   4.1941671e-03   4.4278656e-04   2.0594764e-03   2.9165015e-04   1.2452133e-03   2.3762069e-02   2.9165015e-04   5.0966362e-02   3.0166832e-02   2.4694798e-02   2.8788397e-02   3.6671707e-02   3.7274280e-02   3.4472569e-02   3.0604199e-02   4.1125938e-02   1.6692997e-02   4.7977197e-03   2.4489085e-02   1.8077937e-02   4.1761735e-02   4.2713992e-02   1.4455884e-02   1.7039782e-02   1.8322490e-02   6.7670508e-02   3.2041093e-02   1.9169462e-02   2.4724400e-02   4.3157837e-02   1.3510623e-02   1.5640518e-02   1.5102883e-02   8.9767687e-03   6.0642978e-03   3.6671707e-02   1.2419694e-02   2.8323093e-02   6.4935421e-03   3.9910361e-02   1.0006286e-02   3.7967777e-02   2.8141888e-02   2.2594035e-02   1.6734859e-02   4.8977754e-03   1.2095984e-02   2.5839912e-02   1.5166518e-02   3.0166832e-02   2.7378800e-02   2.3072794e-02   1.8885101e-02   2.6783663e-02   1.1941964e-02   1.5226991e-02   1.5844953e-02   5.5898027e-03   3.6211515e-04   4.6884489e-03   1.9469863e-02   6.9494812e-03   1.0856564e-02   6.2070186e-03   5.9174289e-03   2.3319364e-03   8.6215938e-03   6.4261295e-03   5.9157019e-03   6.0481987e-03   4.2268867e-03   4.5610911e-03   4.8970997e-05   6.8799458e-04   2.4420024e-03   1.4255275e-02   1.5079078e-03   3.2366850e-03   5.6006336e-03   1.5957034e-02   1.6871464e-02   1.4787237e-02   2.2762765e-03   1.3333689e-03   1.2378081e-02   7.3736127e-04   8.8932355e-03   5.1508306e-03   2.2779875e-03   6.5293055e-03   3.6737889e-03   1.2985539e-02   7.3417742e-03   4.0613774e-03   2.7837547e-02   4.7997557e-03   4.5639701e-02   2.3278576e-02   1.4877176e-02   2.5663407e-02   2.8314352e-02   2.6769516e-02   3.2135703e-02   2.3238033e-02   2.8854470e-02   1.1827077e-02   2.2644401e-03   1.4421001e-02   8.8429015e-03   2.8776903e-02   2.8063687e-02   7.8778602e-03   1.4365876e-02   1.9273282e-02   4.9067035e-02   2.0328263e-02   1.0496416e-02   1.9196637e-02   3.0789735e-02   4.6457607e-03   1.3443289e-02   1.2708604e-02   2.5804695e-03   5.0424172e-03   2.6041403e-02   8.8829912e-03   1.7122028e-02   8.2367466e-03   2.7547637e-02   8.4529986e-03   3.6274794e-02   1.3225786e-02   2.0335647e-02   1.6784857e-02   4.4669319e-03   3.9860599e-03   1.4316384e-02   2.9523808e-03   2.3278576e-02   1.9803941e-02   1.4205949e-02   5.9492702e-03   1.2454271e-02   5.3895801e-03   1.5143895e-02   1.6858271e-02   5.9387347e-03   5.9571758e-03   6.0526515e-03   1.4146691e-03   8.1320415e-03   6.3420687e-04   2.2943426e-02   7.2217865e-04   8.7156753e-03   1.7123420e-03   1.9617600e-02   7.0385795e-03   6.4044399e-04   1.1390332e-03   4.8019256e-03   6.7384180e-03   2.9679315e-03   4.6026472e-03   1.3268878e-03   5.4152853e-04   4.6516773e-04   2.8772311e-02   1.3783757e-02   3.2459159e-03   7.3821107e-04   1.2234521e-02   2.0992860e-03   3.0774015e-03   1.1372571e-02   2.9352447e-03   8.1022588e-04   9.2842537e-04   2.8855444e-03   2.9418595e-03   1.2977062e-03   1.2869337e-04   1.4124612e-02   3.4246729e-04   6.6626445e-02   4.1505551e-02   3.3416051e-02   4.0874061e-02   4.8928444e-02   4.8860648e-02   4.7883828e-02   4.1881126e-02   5.2796241e-02   2.5141882e-02   8.8605790e-03   3.3017625e-02   2.4870522e-02   5.3247026e-02   5.3590923e-02   2.1349209e-02   2.6308756e-02   2.8721666e-02   8.1439460e-02   4.1842449e-02   2.6703197e-02   3.5208876e-02   5.5212555e-02   1.8433774e-02   2.4622301e-02   2.3863121e-02   1.3181398e-02   1.1675955e-02   4.8074316e-02   1.9890418e-02   3.7380285e-02   1.3082557e-02   5.1276398e-02   1.7233265e-02   5.2150081e-02   3.4634218e-02   3.3454504e-02   2.6550201e-02   1.0070918e-02   1.6842626e-02   3.3968952e-02   1.6166125e-02   4.1505551e-02   3.7811539e-02   3.1780349e-02   2.2766214e-02   3.3244526e-02   1.7747224e-02   2.4583452e-02   2.5577750e-02   7.4058632e-03   1.7632325e-02   5.8672157e-03   1.4554963e-02   7.5139440e-03   6.3063306e-03   2.8227254e-03   1.2216172e-02   5.2171506e-03   8.3411571e-03   9.1198919e-03   3.7613430e-03   3.7527482e-03   5.6979631e-04   2.0412760e-03   4.1677011e-03   1.2238157e-02   1.7043766e-03   3.0984992e-03   5.1374687e-03   2.0730806e-02   2.1628161e-02   1.6669142e-02   2.5627983e-03   1.3966031e-03   1.3985502e-02   1.7150229e-03   1.2743810e-02   7.3773848e-03   2.3644296e-03   5.7186323e-03   5.6669191e-03   1.5046711e-02   9.0550035e-03   4.3221940e-03   2.4177800e-02   5.7926331e-03   5.2977640e-02   2.8412846e-02   1.8464622e-02   3.1548301e-02   3.3726407e-02   3.1637531e-02   3.8729422e-02   2.8292534e-02   3.3574657e-02   1.5809867e-02   4.4178873e-03   1.7890807e-02   1.1580028e-02   3.3334020e-02   3.2156672e-02   1.0943899e-02   1.8967943e-02   2.4791881e-02   5.4103864e-02   2.4225163e-02   1.3643641e-02   2.4071431e-02   3.5681100e-02   6.5373199e-03   1.7959936e-02   1.7099713e-02   4.3929724e-03   8.0887873e-03   3.0809355e-02   1.2543658e-02   2.0721585e-02   1.2037199e-02   3.2114506e-02   1.2207760e-02   4.3300211e-02   1.5218252e-02   2.5733556e-02   2.1932604e-02   7.3680112e-03   5.8739609e-03   1.7443010e-02   2.6299180e-03   2.8412846e-02   2.4437667e-02   1.7887656e-02   6.9159049e-03   1.4494165e-02   7.9434468e-03   2.0060731e-02   2.2068579e-02   2.3875094e-02   1.2000415e-02   1.3948342e-03   3.4546351e-03   1.4939730e-02   3.7950422e-03   6.0175173e-04   1.2211196e-02   7.0052335e-03   9.1256063e-05   8.0336825e-03   9.5484059e-03   3.8945506e-03   2.3206010e-03   7.0732153e-04   2.0727990e-02   4.4889370e-03   6.3258821e-03   8.7927798e-03   9.0069682e-03   3.7952179e-03   7.8092546e-03   4.4335690e-03   9.0550736e-03   6.7946167e-03   2.1531805e-03   9.6426638e-04   7.0770423e-04   5.1150602e-03   1.0670446e-02   5.5187206e-04   6.0114634e-03   3.0771240e-03   5.3655604e-03   3.8039285e-02   3.5100982e-03   3.3392676e-02   1.7228333e-02   1.4023464e-02   1.5984642e-02   2.2267037e-02   2.3075907e-02   2.0330300e-02   1.7609739e-02   2.6354220e-02   7.6657981e-03   1.4938436e-03   1.3975046e-02   9.8087979e-03   2.7013992e-02   2.8227679e-02   6.7815824e-03   7.6426994e-03   8.5739542e-03   4.8734857e-02   1.9601188e-02   1.0188446e-02   1.3147209e-02   2.7911001e-02   7.5145131e-03   6.7053137e-03   6.3732135e-03   4.4764490e-03   1.3642012e-03   2.2654181e-02   4.8866188e-03   1.6888522e-02   1.3930577e-03   2.5469441e-02   3.2860894e-03   2.3095694e-02   1.8694362e-02   1.1451986e-02   7.4272413e-03   8.6465452e-04   6.5073035e-03   1.5400191e-02   1.2824348e-02   1.7228333e-02   1.5313622e-02   1.2672619e-02   1.3006211e-02   1.7530867e-02   5.5400420e-03   6.4187909e-03   6.8900377e-03   3.1968437e-03   2.6175875e-02   9.5689725e-03   4.4766639e-02   9.9845360e-03   2.8418681e-02   3.6870693e-03   4.5253718e-02   2.5815239e-02   5.6468536e-03   5.2271680e-03   1.8578768e-02   2.3906603e-02   1.7414675e-02   6.6309054e-04   1.0701959e-02   7.1563943e-03   4.3943049e-03   6.0754711e-02   3.4699507e-02   9.6990814e-03   9.4374842e-03   2.8881695e-02   8.6238198e-03   1.6549411e-02   3.3492594e-02   1.6963829e-02   8.9575119e-03   3.4839312e-03   1.7214511e-02   1.0985852e-02   1.1115876e-02   7.0774378e-03   2.3223923e-03   9.1191389e-03   1.1169736e-01   7.8250518e-02   6.5808362e-02   7.7763348e-02   8.8114611e-02   8.7449743e-02   8.7215232e-02   7.8675005e-02   9.2203874e-02   5.5132383e-02   2.8794481e-02   6.5100824e-02   5.3025891e-02   9.2531519e-02   9.2220151e-02   4.8731252e-02   5.7186075e-02   6.0580024e-02   1.2745253e-01   7.7174441e-02   5.6198920e-02   6.9678402e-02   9.5449745e-02   4.2538093e-02   5.4722713e-02   5.3576276e-02   3.4860888e-02   3.4291475e-02   8.6329226e-02   4.7343142e-02   7.0978985e-02   3.6695928e-02   9.0088005e-02   4.3430772e-02   9.2828505e-02   6.4239353e-02   6.7481588e-02   5.7536867e-02   3.1542812e-02   4.0297150e-02   6.5753742e-02   3.3241661e-02   7.8250518e-02   7.2933055e-02   6.3794057e-02   4.6178781e-02   6.2571051e-02   4.2843100e-02   5.4666081e-02   5.6027673e-02   1.6139749e-02   3.9412782e-03   2.4265145e-02   2.3891707e-03   1.6498373e-02   4.1418612e-05   2.4988702e-02   1.3783039e-02   4.3146851e-04   2.5122323e-04   6.4815288e-03   1.0052630e-02   7.0587637e-03   1.3669961e-03   2.4020339e-03   9.7331181e-04   2.7171014e-04   3.8783427e-02   2.3881244e-02   7.7285447e-03   2.0010737e-03   1.2962382e-02   6.0316172e-03   5.6775409e-03   1.9642810e-02   8.0347010e-03   1.6772041e-03   5.1201506e-05   7.5139419e-03   7.7685816e-03   5.4104161e-03   1.3577183e-03   7.6646931e-03   3.0214900e-03   8.1621829e-02   5.2182884e-02   4.1040324e-02   5.3055858e-02   6.0133087e-02   5.8980137e-02   6.1457708e-02   5.2397706e-02   6.2608146e-02   3.3589093e-02   1.3750660e-02   4.0409320e-02   3.0837807e-02   6.2732754e-02   6.2152008e-02   2.7986481e-02   3.5988306e-02   4.0227814e-02   9.1799153e-02   5.0051858e-02   3.3478326e-02   4.5393111e-02   6.5357720e-02   2.2750138e-02   3.4139497e-02   3.3132950e-02   1.7367166e-02   1.8328224e-02   5.7997026e-02   2.7720666e-02   4.5013892e-02   2.1236242e-02   6.0787869e-02   2.5266646e-02   6.6573172e-02   3.9086224e-02   4.4693577e-02   3.7322864e-02   1.6444309e-02   2.1159286e-02   4.0674593e-02   1.6113197e-02   5.2182884e-02   4.7503868e-02   3.9598661e-02   2.5231075e-02   3.7795470e-02   2.3382629e-02   3.4903444e-02   3.6481922e-02   4.3266011e-03   2.5120283e-02   7.2104610e-03   3.6362605e-04   1.6818393e-02   1.3224351e-02   8.9751703e-04   1.2039448e-02   1.4078561e-02   9.5477546e-03   7.2983976e-03   3.1999143e-03   2.4639885e-02   8.8187561e-03   1.0207680e-02   1.2236717e-02   1.1661588e-02   8.0001293e-04   5.7297070e-03   8.1532898e-03   1.7517743e-02   5.5505771e-03   6.3202290e-03   1.2261175e-03   1.5275601e-03   9.1326114e-03   1.4417917e-02   2.2056571e-03   4.2297788e-03   3.2218738e-03   8.2583968e-03   4.2679879e-02   5.2144338e-03   3.5325690e-02   2.1286357e-02   2.0340751e-02   1.7932964e-02   2.6773752e-02   2.8858059e-02   2.1473619e-02   2.1925037e-02   3.2987864e-02   1.1596147e-02   5.5679058e-03   2.0475433e-02   1.6418368e-02   3.4058657e-02   3.6285268e-02   1.2036020e-02   1.0227865e-02   8.9974606e-03   5.7847652e-02   2.6531721e-02   1.6295053e-02   1.6788576e-02   3.4473710e-02   1.4498141e-02   9.1389529e-03   8.9727022e-03   1.0441422e-02   4.2212624e-03   2.8555402e-02   8.4473824e-03   2.3791713e-02   2.5698702e-03   3.2242281e-02   5.9266749e-03   2.3676462e-02   2.8216759e-02   1.3343464e-02   8.4809495e-03   3.5554866e-03   1.3166972e-02   2.2745344e-02   2.2629651e-02   2.1286357e-02   1.9992085e-02   1.8520730e-02   2.2324446e-02   2.6771848e-02   1.1107369e-02   7.6788410e-03   7.4852977e-03   2.3512869e-02   1.1702196e-03   5.0486445e-03   4.4067902e-03   1.7493736e-02   4.0658633e-03   2.3529372e-03   3.3047066e-03   5.2094902e-03   6.0430183e-03   1.7805903e-03   8.3604250e-03   2.1645857e-03   1.8214590e-03   2.1737913e-03   2.3530298e-02   8.5714286e-03   1.8444368e-03   1.4565864e-03   1.3243081e-02   1.0630397e-03   2.8808992e-03   7.3652636e-03   1.0835317e-03   1.7933322e-03   3.0961279e-03   1.3436220e-03   1.3043704e-03   1.2482375e-04   8.8034137e-04   2.0043242e-02   1.2482375e-04   5.7954445e-02   3.5775683e-02   2.9742194e-02   3.4149795e-02   4.2824229e-02   4.3477315e-02   4.0214177e-02   3.6256651e-02   4.7605297e-02   2.0950874e-02   7.1042975e-03   2.9499916e-02   2.2329817e-02   4.8271691e-02   4.9230028e-02   1.8375611e-02   2.1296157e-02   2.2492082e-02   7.5802989e-02   3.7751582e-02   2.3617894e-02   2.9827991e-02   4.9792717e-02   1.7017775e-02   1.9724142e-02   1.9131907e-02   1.1873908e-02   8.7349825e-03   4.2824229e-02   1.6130090e-02   3.3687060e-02   9.1410950e-03   4.6289887e-02   1.3350026e-02   4.3904542e-02   3.3071376e-02   2.7381594e-02   2.0813943e-02   7.3233676e-03   1.5432666e-02   3.0904955e-02   1.7682128e-02   3.5775683e-02   3.2747980e-02   2.7982984e-02   2.2571206e-02   3.1620085e-02   1.5454693e-02   1.9154732e-02   1.9759156e-02   1.5625666e-02   2.0014241e-02   2.2906319e-02   3.3689213e-03   1.6561417e-02   1.9555072e-02   1.9724522e-02   6.8229205e-03   6.0996359e-03   1.3507575e-02   3.5744409e-02   1.3348521e-02   1.7651623e-02   2.2506296e-02   1.4148535e-02   3.1413076e-02   3.8119216e-02   1.5501824e-02   1.7783197e-03   3.4445966e-02   9.9745771e-03   1.7822684e-02   1.8993068e-02   1.5436010e-02   2.3911299e-02   1.6070505e-02   3.4793519e-02   2.5109394e-02   1.9730191e-02   5.3384757e-02   2.0981999e-02   3.5016323e-02   1.5808551e-02   7.2216559e-03   2.1250521e-02   1.8323057e-02   1.5289849e-02   2.7077232e-02   1.5342317e-02   1.5450153e-02   9.5719689e-03   7.0769801e-03   6.7142336e-03   3.8618160e-03   1.4833874e-02   1.3052654e-02   5.5299114e-03   1.3477582e-02   2.0871841e-02   2.6745939e-02   9.5407500e-03   5.3018257e-03   1.4058965e-02   1.6863810e-02   2.0380823e-03   1.3353665e-02   1.2655783e-02   3.2775616e-03   9.7625272e-03   1.4632542e-02   9.0437643e-03   7.6711708e-03   1.4740530e-02   1.4381411e-02   1.0907741e-02   3.1026769e-02   2.6758954e-03   1.8148668e-02   1.8095332e-02   1.0256426e-02   2.3444102e-03   5.5327937e-03   1.0086149e-03   1.5808551e-02   1.2612237e-02   7.5482105e-03   2.3261323e-04   2.5482034e-03   4.2287654e-03   1.6814254e-02   1.9242327e-02   6.8098804e-03   2.4185222e-03   1.3095995e-02   4.9100577e-03   8.0319599e-04   1.3085229e-03   1.8113887e-03   3.1306373e-03   1.2541646e-03   7.2250359e-03   1.5182032e-04   3.2471009e-04   1.1793141e-03   2.2134336e-02   1.2798604e-02   5.8036245e-03   2.9068060e-05   7.1481800e-03   4.2574873e-03   8.5847073e-04   8.5715637e-03   2.1039195e-03   1.1389023e-04   1.8746094e-03   1.5895426e-03   4.9316123e-03   1.9257560e-03   2.7708506e-04   1.8515651e-02   5.3986259e-04   5.6637702e-02   3.2886175e-02   2.4967826e-02   3.3187187e-02   3.9414817e-02   3.8923256e-02   3.9880016e-02   3.3128964e-02   4.2231952e-02   1.8470852e-02   4.9097912e-03   2.4570551e-02   1.7467535e-02   4.2525770e-02   4.2580968e-02   1.4790908e-02   2.0011146e-02   2.3142926e-02   6.7845285e-02   3.2285576e-02   1.9161137e-02   2.7420216e-02   4.4447416e-02   1.1980855e-02   1.8611023e-02   1.7883360e-02   7.8729229e-03   7.5466400e-03   3.8175619e-02   1.4096206e-02   2.8321778e-02   9.4496873e-03   4.0815141e-02   1.2211823e-02   4.4024579e-02   2.5465922e-02   2.6596134e-02   2.0925904e-02   6.3340227e-03   1.0720439e-02   2.5211490e-02   1.0333158e-02   3.2886175e-02   2.9360405e-02   2.3656085e-02   1.5413665e-02   2.4285913e-02   1.1678199e-02   1.9116491e-02   2.0313205e-02   1.6950975e-02   9.2576768e-03   2.2830227e-04   1.1996475e-02   1.3943618e-02   7.5471408e-03   5.0950912e-03   2.4447813e-03   2.5903324e-02   8.0139448e-03   9.9879335e-03   1.2585436e-02   8.2754975e-03   1.4174352e-03   8.0478199e-03   7.7042954e-03   1.3901912e-02   7.5078674e-03   4.9957099e-03   2.9997761e-04   1.5395926e-03   8.6362248e-03   1.4833108e-02   1.8222710e-03   6.1957159e-03   4.1238914e-03   8.4102568e-03   4.4734160e-02   5.6076899e-03   3.0288189e-02   1.6597244e-02   1.5336169e-02   1.4117002e-02   2.1529085e-02   2.3159309e-02   1.7633168e-02   1.7119251e-02   2.6802924e-02   7.9632454e-03   3.3472740e-03   1.5438450e-02   1.1914773e-02   2.7716384e-02   2.9607261e-02   8.2241873e-03   7.0346123e-03   6.5727745e-03   4.9604200e-02   2.0828511e-02   1.1808642e-02   1.2615158e-02   2.8194719e-02   1.0429441e-02   6.1245969e-03   5.9452412e-03   7.1618546e-03   2.1261487e-03   2.2855826e-02   5.3452154e-03   1.8357871e-02   1.0397070e-03   2.6087088e-02   3.3990115e-03   1.9871685e-02   2.2251351e-02   9.9773797e-03   5.9192173e-03   1.6976055e-03   9.3317377e-03   1.7391404e-02   1.8375579e-02   1.6597244e-02   1.5280892e-02   1.3772451e-02   1.7356806e-02   2.0968489e-02   7.4978418e-03   5.1634523e-03   5.1963190e-03   2.4233623e-02   1.4077025e-02   4.3787020e-04   1.8512542e-04   6.0477386e-03   9.6703484e-03   7.1488601e-03   1.5477068e-03   2.2742281e-03   9.7171923e-04   3.9271863e-04   3.8509554e-02   2.4774335e-02   8.7360281e-03   1.9891111e-03   1.1995846e-02   6.8973897e-03   5.5019459e-03   1.9965157e-02   8.4152844e-03   1.6247783e-03   1.4531219e-04   7.7480972e-03   8.6972032e-03   5.9824611e-03   1.5176827e-03   7.8567259e-03   3.3493021e-03   8.1204385e-02   5.1575806e-02   4.0135980e-02   5.2803481e-02   5.9400299e-02   5.8042463e-02   6.1291964e-02   5.1745941e-02   6.1508632e-02   3.3138226e-02   1.3469654e-02   3.9479896e-02   2.9967140e-02   6.1565597e-02   6.0825097e-02   2.7361698e-02   3.5748038e-02   4.0333571e-02   9.0138895e-02   4.8984196e-02   3.2667108e-02   4.4902460e-02   6.4253355e-02   2.1894327e-02   3.3940189e-02   3.2913172e-02   1.6731486e-02   1.8202458e-02   5.7045856e-02   2.7382191e-02   4.3985602e-02   2.1375556e-02   5.9676750e-02   2.5108522e-02   6.6474949e-02   3.7689168e-02   4.4508235e-02   3.7339453e-02   1.6368548e-02   2.0371230e-02   3.9606943e-02   1.4908799e-02   5.1575806e-02   4.6824025e-02   3.8781771e-02   2.3989539e-02   3.6452117e-02   2.2748990e-02   3.4902979e-02   3.6586492e-02   7.5459794e-03   1.9125850e-02   2.0310725e-02   6.1222293e-03   3.4447226e-03   8.1418452e-03   3.7867035e-02   1.2077489e-02   1.6567789e-02   2.1605044e-02   3.9038067e-03   1.6430417e-02   2.8689503e-02   1.3569150e-02   4.2181405e-03   2.6167927e-02   7.2610935e-03   7.0630535e-03   1.1392972e-02   1.4088158e-02   2.3849569e-02   9.5566395e-03   2.5308356e-02   1.7804542e-02   1.7150362e-02   5.8921861e-02   1.6182419e-02   2.0027139e-02   6.1819655e-03   2.0410396e-03   8.6030014e-03   8.6357480e-03   7.5787415e-03   1.2598904e-02   6.0656768e-03   8.7139108e-03   1.7031846e-03   2.3331644e-03   1.8821103e-03   3.1682605e-04   8.7160052e-03   8.5600328e-03   2.8826583e-04   3.4529372e-03   7.5176091e-03   2.1668627e-02   4.3875084e-03   6.5654007e-04   4.4940457e-03   9.7914071e-03   1.9831433e-04   3.3391345e-03   2.9870521e-03   6.9985633e-04   2.5162956e-03   7.1853248e-03   1.3860021e-03   2.9754449e-03   4.8339512e-03   8.0121089e-03   2.3142577e-03   1.5373296e-02   2.8512223e-03   6.2399212e-03   5.8796015e-03   3.1092372e-03   2.3415322e-04   1.9852251e-03   5.2321529e-03   6.1819655e-03   4.3274341e-03   1.8113019e-03   1.9110595e-03   2.4033410e-03   9.3130265e-05   5.1498910e-03   6.5356041e-03   9.5659910e-03   1.1252405e-02   5.1556774e-03   3.2048021e-03   1.2695843e-03   2.2856756e-02   5.7848184e-03   7.7211451e-03   1.0283621e-02   8.3602499e-03   2.7615733e-03   7.9786726e-03   5.6505970e-03   1.0691105e-02   7.1332727e-03   3.1295602e-03   5.2401241e-04   9.5266815e-04   6.4319329e-03   1.2318850e-02   9.4872145e-04   6.1326072e-03   3.4758657e-03   6.5333653e-03   4.0853883e-02   4.3049208e-03   3.1649609e-02   1.6528693e-02   1.4101758e-02   1.4838879e-02   2.1483291e-02   2.2597676e-02   1.8841242e-02   1.6959579e-02   2.5991410e-02   7.4250762e-03   1.9970150e-03   1.4113092e-02   1.0250250e-02   2.6744340e-02   2.8212793e-02   6.9997737e-03   7.0607843e-03   7.4739664e-03   4.8397396e-02   1.9593840e-02   1.0430940e-02   1.2525634e-02   2.7473469e-02   8.3209543e-03   6.1476473e-03   5.8775226e-03   5.2455782e-03   1.4265861e-03   2.2224591e-02   4.7507930e-03   1.7000338e-02   1.0369313e-03   2.5178358e-02   3.0495461e-03   2.1392322e-02   1.9628131e-02   1.0500631e-02   6.5204272e-03   9.7357638e-04   7.2933833e-03   1.5732817e-02   1.4704532e-02   1.6528693e-02   1.4858839e-02   1.2681737e-02   1.4339393e-02   1.8427144e-02   5.9852538e-03   5.6194230e-03   5.9265324e-03   8.2461319e-05   3.7688699e-03   6.4113525e-03   4.0576095e-03   3.3120738e-03   8.1782115e-04   1.0911416e-04   1.0750826e-04   3.1097496e-02   1.8955233e-02   6.6951403e-03   5.7580225e-04   9.5785875e-03   5.0095389e-03   2.9949836e-03   1.4512717e-02   5.0859668e-03   4.0940931e-04   2.6323499e-04   4.5146422e-03   6.3221754e-03   3.5606197e-03   3.7002765e-04   1.1722539e-02   1.5048339e-03   7.0395338e-02   4.3268177e-02   3.3370831e-02   4.4028795e-02   5.0578307e-02   4.9629423e-02   5.1746432e-02   4.3478758e-02   5.3066435e-02   2.6481479e-02   9.3392135e-03   3.2828923e-02   2.4313691e-02   5.3237993e-02   5.2867243e-02   2.1623243e-02   2.8596153e-02   3.2496284e-02   8.0563616e-02   4.1614554e-02   2.6578058e-02   3.7074597e-02   5.5590704e-02   1.7323162e-02   2.6950223e-02   2.6053254e-02   1.2554151e-02   1.3157434e-02   4.8740123e-02   2.1279987e-02   3.7040705e-02   1.5739340e-02   5.1410982e-02   1.9129885e-02   5.6480014e-02   3.2271228e-02   3.6432542e-02   2.9847396e-02   1.1573499e-02   1.5895178e-02   3.3202746e-02   1.2659655e-02   4.3268177e-02   3.9046279e-02   3.2014714e-02   2.0067384e-02   3.1045819e-02   1.7642117e-02   2.7677552e-02   2.9133996e-02   4.1910853e-03   7.2076003e-03   5.1147587e-03   2.7507325e-03   1.1620134e-03   3.3310681e-04   2.1458858e-04   3.3405406e-02   2.1499526e-02   8.0088547e-03   9.8332257e-04   9.6892205e-03   6.1744631e-03   3.6704596e-03   1.6544301e-02   6.4217725e-03   7.2111274e-04   2.1270559e-04   5.7183304e-03   7.6981750e-03   4.7117366e-03   8.0177817e-04   1.0442534e-02   2.2910923e-03   7.3784227e-02   4.5661689e-02   3.5029236e-02   4.6823533e-02   5.3065363e-02   5.1836541e-02   5.4866908e-02   4.5828107e-02   5.5174565e-02   2.8408213e-02   1.0505539e-02   3.4430852e-02   2.5609623e-02   5.5260802e-02   5.4654205e-02   2.3114484e-02   3.0836583e-02   3.5207839e-02   8.2686924e-02   4.3372344e-02   2.8064998e-02   3.9380344e-02   5.7772002e-02   1.8254235e-02   2.9162191e-02   2.8206341e-02   1.3497345e-02   1.4745279e-02   5.0900941e-02   2.3090436e-02   3.8677830e-02   1.7710943e-02   5.3450122e-02   2.1019262e-02   5.9801906e-02   3.3130692e-02   3.9030212e-02   3.2379101e-02   1.3108535e-02   1.6839016e-02   3.4625964e-02   1.2551681e-02   4.5661689e-02   4.1207031e-02   3.3732757e-02   2.0513590e-02   3.1937443e-02   1.8908607e-02   3.0105345e-02   3.1711397e-02   5.5341249e-04   1.8100865e-03   1.3685556e-02   1.1595059e-03   2.7733190e-03   5.0259130e-03   1.5582273e-02   1.5303863e-02   1.3216313e-02   1.8061219e-03   1.8483286e-03   1.0961959e-02   4.0632184e-04   7.9419645e-03   4.2030049e-03   1.8572156e-03   5.9964081e-03   2.8843938e-03   1.1487297e-02   6.2195738e-03   3.4171878e-03   2.7328194e-02   3.9549773e-03   4.5323537e-02   2.3233798e-02   1.5194289e-02   2.5226902e-02   2.8389486e-02   2.7075250e-02   3.1585337e-02   2.3242139e-02   2.9332670e-02   1.1663012e-02   1.9950589e-03   1.4768251e-02   9.1566367e-03   2.9330780e-02   2.8799195e-02   7.9264454e-03   1.3962831e-02   1.8481959e-02   5.0115855e-02   2.0802698e-02   1.0733745e-02   1.9041166e-02   3.1266982e-02   4.9760517e-03   1.3001310e-02   1.2292231e-02   2.6877167e-03   4.5798906e-03   2.6364594e-02   8.6153969e-03   1.7566530e-02   7.4744318e-03   2.8043210e-02   7.9969256e-03   3.5640074e-02   1.4109863e-02   1.9847472e-02   1.6098385e-02   3.9587699e-03   4.2468630e-03   1.4818700e-02   3.7268555e-03   2.3233798e-02   1.9851942e-02   1.4429507e-02   6.7117241e-03   1.3281147e-02   5.4664867e-03   1.4483311e-02   1.6079322e-02   1.4644608e-03   1.8793440e-02   2.6529941e-03   4.9386280e-03   7.8423223e-03   1.0293845e-02   1.1871124e-02   1.4064748e-02   3.3517168e-03   2.2145112e-03   1.1983758e-02   7.1285200e-04   4.9198989e-03   3.5944894e-03   3.6244119e-03   9.2640212e-03   2.3814700e-03   1.1959906e-02   6.6391160e-03   5.2659669e-03   3.4768199e-02   5.0225808e-03   3.6096256e-02   1.6893661e-02   1.0608435e-02   1.8369401e-02   2.1445998e-02   2.0588370e-02   2.3840862e-02   1.6945577e-02   2.2798129e-02   7.2320779e-03   4.5716975e-04   1.0311574e-02   5.8475513e-03   2.2922553e-02   2.2802937e-02   4.5367353e-03   8.9750660e-03   1.2721749e-02   4.2209187e-02   1.5508457e-02   6.9302761e-03   1.3261817e-02   2.4477020e-02   2.9037640e-03   8.2014592e-03   7.6405998e-03   1.0779902e-03   2.0077167e-03   2.0000571e-02   4.8259503e-03   1.2764273e-02   4.2375783e-03   2.1711543e-02   4.3575270e-03   2.7378197e-02   1.1026559e-02   1.3799894e-02   1.0731586e-02   1.6733633e-03   2.2875292e-03   1.0634216e-02   4.2659077e-03   1.6893661e-02   1.4117780e-02   9.8480141e-03   5.3491991e-03   1.0216132e-02   2.7977678e-03   9.4220767e-03   1.0761419e-02   1.4255275e-02   1.6372127e-03   2.8542706e-03   4.7214122e-03   1.3332443e-02   7.0444510e-03   6.5532524e-03   1.6174955e-03   6.7559932e-03   5.2389698e-03   5.3640347e-04   3.3171183e-03   5.1283913e-04   2.0296126e-03   6.0950963e-03   1.2744293e-04   5.0948120e-03   1.9318373e-03   2.3608728e-03   2.9089432e-02   1.4755776e-03   4.1865124e-02   2.2429622e-02   1.7100637e-02   2.2021240e-02   2.8022948e-02   2.8179896e-02   2.7393595e-02   2.2729266e-02   3.1385076e-02   1.0928177e-02   1.7748140e-03   1.6893502e-02   1.1535806e-02   3.1856326e-02   3.2502251e-02   8.7863970e-03   1.1670124e-02   1.3706425e-02   5.4822004e-02   2.3316700e-02   1.2494202e-02   1.7833364e-02   3.3214276e-02   7.9216177e-03   1.0562159e-02   1.0054198e-02   4.5572702e-03   2.8793276e-03   2.7614983e-02   7.5454514e-03   2.0103411e-02   3.8323098e-03   3.0272091e-02   5.9246169e-03   3.0771609e-02   1.9772265e-02   1.6666117e-02   1.2065056e-02   2.1106407e-03   6.8455106e-03   1.7918371e-02   1.0155544e-02   2.2429622e-02   1.9803941e-02   1.5812785e-02   1.2351590e-02   1.8630292e-02   6.7616623e-03   1.0713593e-02   1.1544341e-02   7.3852619e-03   4.6030790e-03   2.5842708e-03   5.4300453e-02   3.3576641e-02   1.0668428e-02   6.6015056e-03   2.1814225e-02   9.0913366e-03   1.2584515e-02   3.0243821e-02   1.4861127e-02   6.0467884e-03   1.7442631e-03   1.4568415e-02   1.1492675e-02   1.0173171e-02   5.0439803e-03   2.6472861e-03   7.4513739e-03   1.0326079e-01   7.0098941e-02   5.7127711e-02   7.0823391e-02   7.9250920e-02   7.7946311e-02   8.0298570e-02   7.0363352e-02   8.2049228e-02   4.8294128e-02   2.3708406e-02   5.6369153e-02   4.4949110e-02   8.2157728e-02   8.1370723e-02   4.1592461e-02   5.0974970e-02   5.5456655e-02   1.1462128e-01   6.7593968e-02   4.8172633e-02   6.2189094e-02   8.5184220e-02   3.4981615e-02   4.8742735e-02   4.7569879e-02   2.8345798e-02   2.9430788e-02   7.6817222e-02   4.1186102e-02   6.1732592e-02   3.2638549e-02   7.9961084e-02   3.8043139e-02   8.5993831e-02   5.4182844e-02   6.1100206e-02   5.2203816e-02   2.6988218e-02   3.3049806e-02   5.6579852e-02   2.5305632e-02   7.0098941e-02   6.4715083e-02   5.5458622e-02   3.7374533e-02   5.2731313e-02   3.5938214e-02   4.9377578e-02   5.1061578e-02   3.5870177e-04   1.3937749e-03   2.2192133e-02   1.4887667e-02   7.8001469e-03   8.8171759e-05   5.5216302e-03   5.9793748e-03   7.0276376e-04   9.5746848e-03   3.0437686e-03   8.2181914e-05   2.0068727e-03   2.2506604e-03   6.8090409e-03   3.1469229e-03   6.3301284e-04   1.8528100e-02   1.2625125e-03   5.6704255e-02   3.2399710e-02   2.3806821e-02   3.3394775e-02   3.8737297e-02   3.7833623e-02   4.0310184e-02   3.2556548e-02   4.0845247e-02   1.8140183e-02   4.6897646e-03   2.3353931e-02   1.6279158e-02   4.1004664e-02   4.0734447e-02   1.4064354e-02   2.0110135e-02   2.3937572e-02   6.5512797e-02   3.0871191e-02   1.8117892e-02   2.7110445e-02   4.3069544e-02   1.0732705e-02   1.8772743e-02   1.7996705e-02   7.0003565e-03   7.6876035e-03   3.7052349e-02   1.3928781e-02   2.6945955e-02   1.0128877e-02   3.9392715e-02   1.2368176e-02   4.4616010e-02   2.3284644e-02   2.6871325e-02   2.1527670e-02   6.5473767e-03   9.5899926e-03   2.3719212e-02   8.2529644e-03   3.2399710e-02   2.8696517e-02   2.2657048e-02   1.3399527e-02   2.2203050e-02   1.0884330e-02   1.9665180e-02   2.1074599e-02   3.3845415e-04   2.7555720e-02   1.6677843e-02   6.3795274e-03   1.8362843e-04   8.3039101e-03   4.7168779e-03   1.9845708e-03   1.2200549e-02   3.8746792e-03   1.0179352e-04   6.8781671e-04   3.2951534e-03   5.8068152e-03   2.8838183e-03   1.7929502e-04   1.4079350e-02   1.0261263e-03   6.5064614e-02   3.9139337e-02   2.9910766e-02   3.9801101e-02   4.6137717e-02   4.5316574e-02   4.7159535e-02   3.9352955e-02   4.8674767e-02   2.3254889e-02   7.4636086e-03   2.9417103e-02   2.1432655e-02   4.8877998e-02   4.8632496e-02   1.8795647e-02   2.5200016e-02   2.8886320e-02   7.5371106e-02   3.7782169e-02   2.3498806e-02   3.3236011e-02   5.1084684e-02   1.4998262e-02   2.3652548e-02   2.2813769e-02   1.0515841e-02   1.0877794e-02   4.4476659e-02   1.8379623e-02   3.3440220e-02   1.3262540e-02   4.7104689e-02   1.6362923e-02   5.1690239e-02   2.9302173e-02   3.2587374e-02   2.6378456e-02   9.4404077e-03   1.3645256e-02   2.9862026e-02   1.1376615e-02   3.9139337e-02   3.5158865e-02   2.8588131e-02   1.7920264e-02   2.8103943e-02   1.5132216e-02   2.4337714e-02   2.5719532e-02   3.3433714e-02   1.9092201e-02   5.6820829e-03   9.6311300e-04   1.1687721e-02   4.1741711e-03   3.9152052e-03   1.5429746e-02   5.3660274e-03   8.0766059e-04   9.2937667e-05   4.9943675e-03   5.5161799e-03   3.3131521e-03   4.2533013e-04   1.0435178e-02   1.4826896e-03   7.3827823e-02   4.6346021e-02   3.6506307e-02   4.6683572e-02   5.3981470e-02   5.3241058e-02   5.4467293e-02   4.6614647e-02   5.6933591e-02   2.8894083e-02   1.0839802e-02   3.5973376e-02   2.7110780e-02   5.7179243e-02   5.6957917e-02   2.4065468e-02   3.0813759e-02   3.4360438e-02   8.5582303e-02   4.5156230e-02   2.9396418e-02   3.9850269e-02   5.9522160e-02   1.9818286e-02   2.9063291e-02   2.8164144e-02   1.4610346e-02   1.4656069e-02   5.2344759e-02   2.3377306e-02   4.0412481e-02   1.6984268e-02   5.5250411e-02   2.0918953e-02   5.9213325e-02   3.5814189e-02   3.8804285e-02   3.1759139e-02   1.2931018e-02   1.8261938e-02   3.6495356e-02   1.5089913e-02   4.6346021e-02   4.2098942e-02   3.5016667e-02   2.2986158e-02   3.4503485e-02   1.9934983e-02   2.9546649e-02   3.0900707e-02   1.1335824e-02   3.2301226e-02   2.3277392e-02   1.5629192e-02   3.0795664e-02   1.5041716e-02   5.4315810e-03   1.4713782e-02   2.4451968e-02   3.6761202e-02   1.3762814e-02   2.8473446e-02   2.2472023e-02   2.6736705e-02   8.0230549e-02   2.3341323e-02   8.1067950e-03   1.4485807e-03   2.4486060e-03   1.1703837e-03   3.1310445e-03   3.9732842e-03   2.8350922e-03   1.6242765e-03   5.7101906e-03   4.5289508e-04   6.6077306e-03   2.6813364e-03   3.3571693e-03   6.2840633e-03   7.7260784e-03   2.0715363e-03   5.6385417e-05   8.9610646e-04   1.8073537e-02   3.9621048e-03   2.3604444e-03   4.7560860e-04   6.2480089e-03   5.8573026e-03   1.7069942e-04   2.3254450e-04   6.3902073e-03   3.8624324e-03   3.9083302e-03   1.0347438e-03   3.4531305e-03   3.5564063e-03   5.5040957e-03   1.4887365e-03   4.1720167e-03   8.6822743e-03   2.8448886e-04   5.1458480e-04   4.8536300e-03   5.8397245e-03   3.9827712e-03   1.8058791e-02   1.4485807e-03   1.2408841e-03   1.8448387e-03   1.0796845e-02   8.0405443e-03   3.4808504e-03   5.2754145e-04   8.8520211e-04   8.8923137e-03   1.4043268e-02   2.4126814e-02   9.1698320e-03   1.1327807e-02   1.9771649e-03   4.5360079e-03   1.5317882e-02   2.1762150e-02   5.6097757e-03   7.1877025e-03   6.8496039e-03   1.4100359e-02   5.3810596e-02   9.9629280e-03   3.1307769e-02   2.0553715e-02   2.1937453e-02   1.5991855e-02   2.5676129e-02   2.8613140e-02   1.8469501e-02   2.1322428e-02   3.3003652e-02   1.2471005e-02   8.9752587e-03   2.2249180e-02   1.9221338e-02   3.4330234e-02   3.7274155e-02   1.4283960e-02   1.0136722e-02   7.4549090e-03   5.7532344e-02   2.7732649e-02   1.8492242e-02   1.6393364e-02   3.4241314e-02   1.8497141e-02   9.1551303e-03   9.1767161e-03   1.4470512e-02   6.3039218e-03   2.8459911e-02   9.7413243e-03   2.5409718e-02   3.4719577e-03   3.2484401e-02   7.0164005e-03   2.0017467e-02   3.2249701e-02   1.2084843e-02   7.5073944e-03   5.8227226e-03   1.7146634e-02   2.5055287e-02   2.9865216e-02   2.0553715e-02   2.0008085e-02   1.9953689e-02   2.7764157e-02   3.0721044e-02   1.4086953e-02   7.0439235e-03   6.3871700e-03   6.2932024e-03   2.4881477e-02   1.2570656e-04   9.2318283e-03   1.1425023e-02   3.9781027e-03   6.8287051e-03   6.6316807e-03   5.1943433e-03   1.2253382e-04   1.3842125e-03   4.4535183e-03   2.1436401e-02   2.8241776e-03   6.8828353e-02   4.7091014e-02   4.2520568e-02   4.3241334e-02   5.5155458e-02   5.7042053e-02   4.8981701e-02   4.7857882e-02   6.2261278e-02   3.0649818e-02   1.4761608e-02   4.2406580e-02   3.4493935e-02   6.3340671e-02   6.5239814e-02   2.8920518e-02   2.9750953e-02   2.8851937e-02   9.4568410e-02   5.1830706e-02   3.5536784e-02   4.0198717e-02   6.4555077e-02   2.8697450e-02   2.7846993e-02   2.7347316e-02   2.1934395e-02   1.5675611e-02   5.6443004e-02   2.4878970e-02   4.7356921e-02   1.4503825e-02   6.0963940e-02   2.0840976e-02   5.2367097e-02   4.8672170e-02   3.5784762e-02   2.7656172e-02   1.3836981e-02   2.6623769e-02   4.4650658e-02   3.0852069e-02   4.7091014e-02   4.4370840e-02   4.0176310e-02   3.6600204e-02   4.6858384e-02   2.5835018e-02   2.6019603e-02   2.5994222e-02   6.9728196e-03   4.6608676e-03   9.9835054e-04   9.5223148e-03   2.6272609e-03   2.7884021e-05   1.5635547e-03   2.0372846e-03   5.4615556e-03   2.3300808e-03   2.5698617e-04   1.7457304e-02   7.2996870e-04   5.8453388e-02   3.4095678e-02   2.5737672e-02   3.4628989e-02   4.0687367e-02   4.0032201e-02   4.1519056e-02   3.4313166e-02   4.3287054e-02   1.9386832e-02   5.3532424e-03   2.5306754e-02   1.8013458e-02   4.3531307e-02   4.3448361e-02   1.5446633e-02   2.1111383e-02   2.4519102e-02   6.8935827e-02   3.3123525e-02   1.9821803e-02   2.8570801e-02   4.5547846e-02   1.2302601e-02   1.9692146e-02   1.8929129e-02   8.1934144e-03   8.2421848e-03   3.9256624e-02   1.4936345e-02   2.9083818e-02   1.0370393e-02   4.1828239e-02   1.3093356e-02   4.5785106e-02   2.5766235e-02   2.7913838e-02   2.2194996e-02   6.9953757e-03   1.1047300e-02   2.5843766e-02   1.0033677e-02   3.4095678e-02   3.0429687e-02   2.4458901e-02   1.5454278e-02   2.4603652e-02   1.2196519e-02   2.0322394e-02   2.1606954e-02   2.1763592e-02   3.8778874e-03   1.3140898e-02   1.0973585e-02   6.8570288e-03   1.2689294e-02   8.7227464e-03   2.2446768e-02   1.4768924e-02   9.8837253e-03   3.6594036e-02   1.1176832e-02   4.2371132e-02   2.0489598e-02   1.1322791e-02   2.4620431e-02   2.4482623e-02   2.2010230e-02   3.1108475e-02   2.0217050e-02   2.3141445e-02   1.0944826e-02   3.8599335e-03   1.0791452e-02   6.1542383e-03   2.2743482e-02   2.1333545e-02   6.5515291e-03   1.4388718e-02   2.0852825e-02   3.9383251e-02   1.5461941e-02   7.8878514e-03   1.7372104e-02   2.4911352e-02   2.6880230e-03   1.3798611e-02   1.3025794e-02   2.1193973e-03   6.9838956e-03   2.1270749e-02   8.9787629e-03   1.2743052e-02   1.1335890e-02   2.1877049e-02   9.6533919e-03   3.5351983e-02   7.6384883e-03   2.0107777e-02   1.8067074e-02   6.8413076e-03   2.4741061e-03   1.0039063e-02   3.4670280e-04   2.0489598e-02   1.6923143e-02   1.1154236e-02   2.1521967e-03   7.1850256e-03   4.3898059e-03   1.6491142e-02   1.8665860e-02   7.4245740e-03   1.0715228e-02   3.1685550e-03   5.1120383e-03   5.0451240e-03   4.1323270e-03   1.4489604e-04   8.1318290e-04   3.0839979e-03   1.9714350e-02   1.7947024e-03   6.7440625e-02   4.5128820e-02   3.9916292e-02   4.1909560e-02   5.3040678e-02   5.4550431e-02   4.7872699e-02   4.5818357e-02   5.9509133e-02   2.8778013e-02   1.2927960e-02   3.9749997e-02   3.1835574e-02   6.0466685e-02   6.2068316e-02   2.6673332e-02   2.8274822e-02   2.8015780e-02   9.1028220e-02   4.9020613e-02   3.3018114e-02   3.8378554e-02   6.1817956e-02   2.5959804e-02   2.6422336e-02   2.5876047e-02   1.9520884e-02   1.4184191e-02   5.3918305e-02   2.3137863e-02   4.4568283e-02   1.3507580e-02   5.8174666e-02   1.9385371e-02   5.1416653e-02   4.5132833e-02   3.4491897e-02   2.6637939e-02   1.2404699e-02   2.3988175e-02   4.1744067e-02   2.7329966e-02   4.5128820e-02   4.2235431e-02   3.7715182e-02   3.3171215e-02   4.3405180e-02   2.3512609e-02   2.4946638e-02   2.5117607e-02   5.7451096e-03   2.0181202e-03   1.1916495e-03   5.0056293e-03   1.1565469e-03   7.7131020e-03   3.5313465e-03   2.1072743e-03   2.6393551e-02   2.0670282e-03   4.4935473e-02   2.3639481e-02   1.6646699e-02   2.4503230e-02   2.9137719e-02   2.8488385e-02   3.0530237e-02   2.3788614e-02   3.1241308e-02   1.1731452e-02   1.7654569e-03   1.6307697e-02   1.0602623e-02   3.1457386e-02   3.1449229e-02   8.6094127e-03   1.3341698e-02   1.6740022e-02   5.3656632e-02   2.2705161e-02   1.1958802e-02   1.9130319e-02   3.3171817e-02   6.4720106e-03   1.2268562e-02   1.1633623e-02   3.5425115e-03   3.7925712e-03   2.7828397e-02   8.3873180e-03   1.9384549e-02   5.8220699e-03   3.0002594e-02   7.2263237e-03   3.4339797e-02   1.7195852e-02   1.8977924e-02   1.4659749e-02   3.0403500e-03   5.5430802e-03   1.6804744e-02   6.5028482e-03   2.3639481e-02   2.0526700e-02   1.5612876e-02   9.4514606e-03   1.6202837e-02   6.2323724e-03   1.3120120e-02   1.4374340e-02   2.8180986e-03   1.0509318e-02   1.7897176e-02   2.8798358e-03   9.1928702e-03   6.3970688e-03   1.0746267e-02   5.0496414e-02   7.7981540e-03   2.4751970e-02   1.2458649e-02   1.1686269e-02   1.0356316e-02   1.6774912e-02   1.8282513e-02   1.3498932e-02   1.2919728e-02   2.1579044e-02   5.2962835e-03   2.7725666e-03   1.1818152e-02   9.1091800e-03   2.2436834e-02   2.4265521e-02   5.8512682e-03   4.4398598e-03   4.1822455e-03   4.2435194e-02   1.6425553e-02   8.8184300e-03   9.0473617e-03   2.2804959e-02   8.4055012e-03   3.7245140e-03   3.5955501e-03   5.8501155e-03   1.2261597e-03   1.8027694e-02   3.2889908e-03   1.4332649e-02   2.5252408e-04   2.0964539e-02   1.7865385e-03   1.5552416e-02   1.8549588e-02   6.8429145e-03   3.5948443e-03   1.0694978e-03   7.5092190e-03   1.3665818e-02   1.7252830e-02   1.2458649e-02   1.1370103e-02   1.0296741e-02   1.5007456e-02   1.7383467e-02   5.5463863e-03   2.9922345e-03   3.0734531e-03   3.1960387e-03   6.8662451e-03   1.2966654e-04   2.7649067e-03   8.3427366e-04   2.7984284e-03   2.9822521e-02   1.2731780e-03   4.3416537e-02   2.4875338e-02   2.0714079e-02   2.3200351e-02   3.0852531e-02   3.1753284e-02   2.8200300e-02   2.5332686e-02   3.5512031e-02   1.2989773e-02   3.4785945e-03   2.0603808e-02   1.5135483e-02   3.6226615e-02   3.7456354e-02   1.1531484e-02   1.2959687e-02   1.3737739e-02   6.0763799e-02   2.7435536e-02   1.5849420e-02   1.9923216e-02   3.7336849e-02   1.1567919e-02   1.1725372e-02   1.1300051e-02   7.4803077e-03   3.9656480e-03   3.1244978e-02   9.2703065e-03   2.4126524e-02   3.9496417e-03   3.4455719e-02   7.0383738e-03   3.1302098e-02   2.5203523e-02   1.7680030e-02   1.2436482e-02   3.0466203e-03   1.0266971e-02   2.2126952e-02   1.5206492e-02   2.4875338e-02   2.2550174e-02   1.9125072e-02   1.7383181e-02   2.3878119e-02   9.5944653e-03   1.1168635e-02   1.1615557e-02   1.3151667e-03   2.5320521e-03   6.0366399e-03   2.7823080e-03   2.9364031e-04   1.6476106e-02   9.7247557e-04   6.0284770e-02   3.5334723e-02   2.6546579e-02   3.6095524e-02   4.1987790e-02   4.1172424e-02   4.3178382e-02   3.5527420e-02   4.4374359e-02   2.0339225e-02   5.8436205e-03   2.6082735e-02   1.8603485e-02   4.4570054e-02   4.4351810e-02   1.6144347e-02   2.2244142e-02   2.5921932e-02   7.0056396e-02   3.3998717e-02   2.0524111e-02   2.9752453e-02   4.6679420e-02   1.2673386e-02   2.0806267e-02   2.0008665e-02   8.5632030e-03   8.9793060e-03   4.0369526e-02   1.5814708e-02   2.9884891e-02   1.1327921e-02   4.2874486e-02   1.4012365e-02   4.7563178e-02   2.6115270e-02   2.9259206e-02   2.3493076e-02   7.6990791e-03   1.1423166e-02   2.6517929e-02   9.7961479e-03   3.5334723e-02   3.1531604e-02   2.5300172e-02   1.5549538e-02   2.4969819e-02   1.2759840e-02   2.1558690e-02   2.2929217e-02   6.4418336e-03   6.6136731e-03   4.4102106e-03   9.1481615e-04   8.6358065e-03   2.3009102e-03   7.8700864e-02   5.0063075e-02   3.9499858e-02   5.0635713e-02   5.7926279e-02   5.6986449e-02   5.8777354e-02   5.0310505e-02   6.0682610e-02   3.1865387e-02   1.2666075e-02   3.8913195e-02   2.9597893e-02   6.0871779e-02   6.0471723e-02   2.6606279e-02   3.4024273e-02   3.7905442e-02   8.9816272e-02   4.8416482e-02   3.2086335e-02   4.3354864e-02   6.3371751e-02   2.1811352e-02   3.2202223e-02   3.1242353e-02   1.6436605e-02   1.6909958e-02   5.6040246e-02   2.6101775e-02   4.3480802e-02   1.9524603e-02   5.8918015e-02   2.3597831e-02   6.3733101e-02   3.8161980e-02   4.2440703e-02   3.5140814e-02   1.5073633e-02   2.0215594e-02   3.9313992e-02   1.6000311e-02   5.0063075e-02   4.5569203e-02   3.8016354e-02   2.4659247e-02   3.6849168e-02   2.2186423e-02   3.2805330e-02   3.4269626e-02   3.8467105e-03   1.2875815e-03   2.5072619e-03   2.9525973e-02   1.2875815e-03   4.2282453e-02   2.3357390e-02   1.8646096e-02   2.2313442e-02   2.9124686e-02   2.9658732e-02   2.7480923e-02   2.3735668e-02   3.3135090e-02   1.1714292e-02   2.4574323e-03   1.8489812e-02   1.3106245e-02   3.3729170e-02   3.4672500e-02   9.9345371e-03   1.2062833e-02   1.3461486e-02   5.7431716e-02   2.5093254e-02   1.3932087e-02   1.8599958e-02   3.4955869e-02   9.5459229e-03   1.0897419e-02   1.0433767e-02   5.8375168e-03   3.2324242e-03   2.9124686e-02   8.1819777e-03   2.1845036e-02   3.6925062e-03   3.2054971e-02   6.2644754e-03   3.0710474e-02   2.2249402e-02   1.6897068e-02   1.1997033e-02   2.3970471e-03   8.3620725e-03   1.9766550e-02   1.2536577e-02   2.3357390e-02   2.0894448e-02   1.7210222e-02   1.4675128e-02   2.1017625e-02   7.9709601e-03   1.0694253e-02   1.1329979e-02   8.0862109e-04   3.9488260e-03   2.3216413e-02   2.2109950e-03   6.3313725e-02   4.2461361e-02   3.8219621e-02   3.8832114e-02   5.0144792e-02   5.1966249e-02   4.4333772e-02   4.3192281e-02   5.6977170e-02   2.6944997e-02   1.2389873e-02   3.8126276e-02   3.0729621e-02   5.8026007e-02   5.9897200e-02   2.5415278e-02   2.6079828e-02   2.5293603e-02   8.8103550e-02   4.7063061e-02   3.1649890e-02   3.5923563e-02   5.9166899e-02   2.5434205e-02   2.4297609e-02   2.3831192e-02   1.9112973e-02   1.3083158e-02   5.1398726e-02   2.1557413e-02   4.2826183e-02   1.1968109e-02   5.5744441e-02   1.7792619e-02   4.7600638e-02   4.4377594e-02   3.1768113e-02   2.4143930e-02   1.1416637e-02   2.3484839e-02   4.0313562e-02   2.8205265e-02   4.2461361e-02   3.9889484e-02   3.5980112e-02   3.3161057e-02   4.2633793e-02   2.2600989e-02   2.2605649e-02   2.2606045e-02   1.6651142e-03   2.2593118e-02   4.7494657e-04   5.5766903e-02   3.4735344e-02   2.9594132e-02   3.2557240e-02   4.1723024e-02   4.2746173e-02   3.8238608e-02   3.5278801e-02   4.7033204e-02   2.0363061e-02   7.2717611e-03   2.9420951e-02   2.2560431e-02   4.7813714e-02   4.9078515e-02   1.8314288e-02   2.0299319e-02   2.0869007e-02   7.5386883e-02   3.7549246e-02   2.3623644e-02   2.8844458e-02   4.9142563e-02   1.7633849e-02   1.8744071e-02   1.8222205e-02   1.2402012e-02   8.3988430e-03   4.2147698e-02   1.5629140e-02   3.3602383e-02   8.3018288e-03   4.5796447e-02   1.2702505e-02   4.1687026e-02   3.3943329e-02   2.5980350e-02   1.9415550e-02   7.0260502e-03   1.6012157e-02   3.1063717e-02   1.9520820e-02   3.4735344e-02   3.1983672e-02   2.7741891e-02   2.3833980e-02   3.2440091e-02   1.5637054e-02   1.7874227e-02   1.8281297e-02   1.4972117e-02   3.8328039e-04   6.3723448e-02   3.8719440e-02   3.0384566e-02   3.8629777e-02   4.5827446e-02   4.5472032e-02   4.5653244e-02   3.9023449e-02   4.9116846e-02   2.2930601e-02   7.4083466e-03   2.9962579e-02   2.2100137e-02   4.9469036e-02   4.9598054e-02   1.8997460e-02   2.4379661e-02   2.7279745e-02   7.6566902e-02   3.8413283e-02   2.3958684e-02   3.2715136e-02   5.1484761e-02   1.5886249e-02   2.2794916e-02   2.2023153e-02   1.1094162e-02   1.0331639e-02   4.4681292e-02   1.7978253e-02   3.4097516e-02   1.2099271e-02   4.7608429e-02   1.5662138e-02   4.9957613e-02   3.0977854e-02   3.1455087e-02   2.5013254e-02   8.8604244e-03   1.4432007e-02   3.0714741e-02   1.3426163e-02   3.8719440e-02   3.4992868e-02   2.8902863e-02   1.9656006e-02   2.9684992e-02   1.5494562e-02   2.3064694e-02   2.4203450e-02   1.8836956e-02   1.3755831e-01   9.8685385e-02   8.2425470e-02   1.0000651e-01   1.0925759e-01   1.0726669e-01   1.1120208e-01   9.8918033e-02   1.1167083e-01   7.2661493e-02   4.1665349e-02   8.1426911e-02   6.7501542e-02   1.1159988e-01   1.1014878e-01   6.3989575e-02   7.6224987e-02   8.1835804e-02   1.4773346e-01   9.4587815e-02   7.1696153e-02   8.9447560e-02   1.1534202e-01   5.4876111e-02   7.3538690e-02   7.2081812e-02   4.6952332e-02   4.9385723e-02   1.0589901e-01   6.4038977e-02   8.7644093e-02   5.3625558e-02   1.0917520e-01   6.0322002e-02   1.1787190e-01   7.6918821e-02   8.8499122e-02   7.7885622e-02   4.6255911e-02   5.2612368e-02   8.1239445e-02   4.0019052e-02   9.8685385e-02   9.2131722e-02   8.0633770e-02   5.6243805e-02   7.5363077e-02   5.6811044e-02   7.4440381e-02   7.6512320e-02   5.8097767e-02   3.5211195e-02   2.8463781e-02   3.4215950e-02   4.2147698e-02   4.2414656e-02   4.0542228e-02   3.5615822e-02   4.6295613e-02   2.0363061e-02   6.3798220e-03   2.8163995e-02   2.0925233e-02   4.6838114e-02   4.7487902e-02   1.7368541e-02   2.1117782e-02   2.2967145e-02   7.3810390e-02   3.6316652e-02   2.2375879e-02   2.9367287e-02   4.8511603e-02   1.5463833e-02   1.9583828e-02   1.8938570e-02   1.0601468e-02   8.3988430e-03   4.1723024e-02   1.5629140e-02   3.2244606e-02   9.3153170e-03   4.4933305e-02   1.3111918e-02   4.4412271e-02   3.0809608e-02   2.7438827e-02   2.1093197e-02   7.0260502e-03   1.3969417e-02   2.9313944e-02   1.5218585e-02   3.5211195e-02   3.1983672e-02   2.6841235e-02   2.0313092e-02   2.9439905e-02   1.4332974e-02   1.9364178e-02   2.0170126e-02   3.9924060e-03   1.0538198e-02   3.2148883e-03   2.7366019e-03   4.4621109e-03   1.7397347e-03   4.1425525e-03   5.2723146e-03   1.1136640e-02   2.9112354e-02   1.1160954e-02   1.6541883e-02   6.0336652e-03   8.3590571e-03   1.6026960e-02   9.4770245e-03   8.7523451e-03   9.5282841e-03   8.5487892e-03   1.3900105e-02   5.7309995e-03   4.7773544e-03   2.3853571e-02   1.0495078e-02   1.1045936e-02   2.7111420e-02   2.3076623e-02   4.8207470e-03   1.4681091e-02   1.0141190e-02   2.1316125e-02   5.8141126e-03   1.6474359e-02   1.3064397e-03   2.0347529e-02   5.7992324e-03   9.5368199e-03   2.5369333e-02   2.4565418e-02   1.2896533e-02   4.4389699e-02   3.9924060e-03   5.8442541e-03   1.0341765e-02   2.9771308e-02   2.0024906e-02   2.0168043e-02   1.0668382e-02   1.0461554e-02   1.6977050e-03   8.0038314e-04   3.2736649e-04   7.3247771e-04   1.7861084e-03   1.2199790e-05   1.6168506e-03   2.0903986e-03   1.2529506e-02   1.9650001e-03   4.3150624e-03   2.0139182e-03   3.1759995e-03   4.1258242e-03   2.0323215e-03   3.5878910e-03   9.4977361e-03   1.5488203e-03   3.0112961e-03   2.7909675e-04   1.8152664e-03   8.3701176e-03   2.5714996e-03   2.7148441e-03   1.0520423e-02   9.2964166e-03   7.5284963e-04   3.9888853e-03   1.8331853e-03   9.4586856e-03   1.6039412e-03   5.4889906e-03   2.8052973e-03   7.3341015e-03   1.2316222e-03   3.2601592e-03   1.0868316e-02   8.8162037e-03   2.9210959e-03   2.1963225e-02   0.0000000e+00   1.8230646e-04   1.5254865e-03   1.2273284e-02   6.9788794e-03   6.3577637e-03   3.5631537e-03   4.1101326e-03   4.2047124e-03   2.6033919e-03   1.7916004e-03   6.7524244e-03   1.5279872e-03   2.3303877e-03   1.3468412e-03   7.9573793e-03   9.8329441e-06   8.0754860e-04   2.3639655e-03   2.4986002e-03   1.3799583e-03   2.6729899e-03   6.2996248e-03   1.0853281e-02   4.8574547e-04   3.9201723e-04   1.4418237e-03   2.9002781e-03   3.0725233e-03   3.0407572e-03   2.9093497e-03   5.1175009e-03   6.6599859e-03   1.5926877e-03   2.5889139e-03   1.3713626e-04   8.5117654e-03   1.9829707e-03   4.3442759e-03   8.7298845e-03   2.1325966e-03   3.5994846e-03   5.1472858e-03   7.9423211e-03   3.5289513e-03   2.0294631e-04   1.1925423e-02   1.6977050e-03   7.8274456e-04   4.8500499e-05   4.9289073e-03   1.8879551e-03   2.5218165e-03   4.9495642e-03   6.1589498e-03   1.4775011e-03   2.8508338e-03   3.8437964e-04   1.0082748e-03   4.3870644e-03   2.7766403e-03   1.3317076e-02   4.6107743e-03   7.0365754e-03   5.0970174e-03   7.0294656e-03   5.8145812e-03   1.6991258e-03   1.6190058e-03   1.4192459e-02   4.5311136e-03   5.3518685e-03   7.5607221e-04   4.5347742e-03   1.1372343e-02   2.1219078e-03   2.3903563e-03   1.2745790e-02   9.1602079e-03   2.9606388e-03   4.3905218e-03   4.7897027e-03   8.0493172e-03   4.4779450e-03   5.1908739e-03   9.8295198e-04   1.2281083e-02   3.7969538e-04   1.7665186e-03   1.0605818e-02   1.1571194e-02   6.2101277e-03   2.7119271e-02   8.0038314e-04   1.4492761e-03   3.6743113e-03   1.7056692e-02   1.1710246e-02   8.2821554e-03   2.2240766e-03   2.2684005e-03   3.0380682e-04   2.0343287e-03   2.8037036e-04   8.3123377e-04   3.9939935e-03   1.6647447e-02   2.9061444e-03   6.1278767e-03   1.1769582e-03   2.2926522e-03   6.3717175e-03   3.9793414e-03   5.6921961e-03   6.6386310e-03   1.6577942e-03   4.6178179e-03   1.2108195e-03   8.4773320e-04   1.1017765e-02   4.7223063e-03   4.9236953e-03   1.3900771e-02   1.3046714e-02   3.7972280e-04   6.5335900e-03   2.3480655e-03   1.3286542e-02   9.3142474e-04   8.4820747e-03   2.8343361e-03   8.2403259e-03   2.5815546e-03   5.4537681e-03   1.4900562e-02   1.1670902e-02   3.7659184e-03   2.5593433e-02   3.2736649e-04   7.7091107e-04   2.6079554e-03   1.4611634e-02   8.0055625e-03   9.0564615e-03   5.9260895e-03   6.5055071e-03   3.8779943e-03   5.7015696e-04   1.7303569e-04   4.1690936e-03   1.6322670e-02   1.9974930e-03   5.0009825e-03   3.2570724e-04   9.5057293e-04   5.8304604e-03   4.8059418e-03   7.5457291e-03   5.1241997e-03   6.9111123e-04   3.7782470e-03   1.7092344e-03   2.5698914e-04   9.5453465e-03   5.5846827e-03   5.6938512e-03   1.2788699e-02   1.3393372e-02   7.3809633e-06   6.7682476e-03   1.3200598e-03   1.4442150e-02   1.8441195e-04   9.0822620e-03   4.9911804e-03   5.8437761e-03   3.8589879e-03   6.9829557e-03   1.5273413e-02   1.0318402e-02   2.4584659e-03   2.2525564e-02   7.3247771e-04   8.2385517e-04   1.9942911e-03   1.2041849e-02   5.7258386e-03   8.2738022e-03   7.3225251e-03   8.2204997e-03   2.0515682e-03   5.4510656e-03   5.2217407e-03   1.7964531e-02   7.2742632e-03   1.0588415e-02   6.2949332e-03   8.6345788e-03   9.1795241e-03   3.5876856e-03   2.6991302e-03   1.4376485e-02   6.5862079e-03   8.4804528e-03   2.1372463e-03   5.3927480e-03   1.5896874e-02   4.1394106e-03   4.5375167e-03   1.7523452e-02   1.2909742e-02   4.0941134e-03   7.2891356e-03   7.2227860e-03   1.1126344e-02   5.7124492e-03   8.1126160e-03   1.3907276e-04   1.6260954e-02   1.3854449e-03   3.2122771e-03   1.4553993e-02   1.6151019e-02   9.1578470e-03   3.3857588e-02   1.7861084e-03   2.9619943e-03   6.1829720e-03   2.2326762e-02   1.5680260e-02   1.2223630e-02   3.9245535e-03   3.7050303e-03   1.3686449e-03   2.1603250e-03   1.2649428e-02   1.7801502e-03   4.1458992e-03   1.7278919e-03   2.7991042e-03   4.0888611e-03   2.2218452e-03   3.9716339e-03   8.9665631e-03   1.3013529e-03   2.8806398e-03   3.4594790e-04   1.5677728e-03   8.1916354e-03   2.7802616e-03   2.9107124e-03   1.0452698e-02   9.5134829e-03   5.8171764e-04   4.1098444e-03   1.6001879e-03   9.8257537e-03   1.3468918e-03   5.7037585e-03   3.1132824e-03   6.8805675e-03   1.4645264e-03   3.5912866e-03   1.1107885e-02   8.6722521e-03   2.6587395e-03   2.1561220e-02   1.2199790e-05   1.4772036e-04   1.4023821e-03   1.1878859e-02   6.5538274e-03   6.3060622e-03   3.8811059e-03   4.4873121e-03   5.6719205e-03   1.8601099e-02   2.5037679e-03   5.8026549e-03   3.0990295e-05   3.8568498e-04   7.1301713e-03   6.6581271e-03   9.9891912e-03   3.5467136e-03   7.7986655e-04   4.6294369e-03   2.9226786e-03   3.3006174e-05   1.0560037e-02   7.5497742e-03   7.6393818e-03   1.4326258e-02   1.5841028e-02   1.7171643e-04   8.5994033e-03   1.5671556e-03   1.7334506e-02   1.9316046e-05   1.1306536e-02   6.6195881e-03   5.5794173e-03   5.6606012e-03   9.3044548e-03   1.7865075e-02   1.1490070e-02   2.7178516e-03   2.3185889e-02   1.6168506e-03   1.6521970e-03   2.7157547e-03   1.2355858e-02   5.5768992e-03   9.6617637e-03   9.6558503e-03   1.0729574e-02   4.4050618e-03   1.4462579e-03   1.4084514e-03   6.0437228e-03   6.9280724e-03   5.9092958e-04   3.5267150e-04   2.3264461e-03   1.8099026e-02   3.0921484e-03   8.6005594e-04   9.1287231e-04   6.3843974e-03   3.0627943e-03   4.0297343e-04   3.2526954e-04   3.6399798e-03   2.6478661e-03   3.9939935e-03   3.1762278e-04   2.2988741e-03   3.2596799e-03   5.3016678e-03   9.7044163e-04   7.0435695e-03   5.7009823e-03   1.4263218e-03   1.5142545e-03   3.5226184e-03   3.0792135e-03   2.3251410e-03   1.2857609e-02   2.0903986e-03   1.3292907e-03   8.8570158e-04   6.8622586e-03   5.1349839e-03   1.4775225e-03   1.2815928e-03   1.9995407e-03   7.7887386e-03   4.2718762e-03   1.8880208e-02   1.9223691e-02   2.7240761e-03   5.5247078e-03   8.3688264e-03   3.7206770e-02   1.2358468e-02   4.8889738e-03   9.3012979e-03   2.0063076e-02   2.3808756e-03   4.8847228e-03   4.4654367e-03   8.0211221e-04   5.5572260e-04   1.5849918e-02   2.4737465e-03   1.0011166e-02   2.0320189e-03   1.7697241e-02   2.0106232e-03   2.1009310e-02   1.0002251e-02   9.3994441e-03   6.7786672e-03   4.3329436e-04   1.8154672e-03   8.4395042e-03   6.2214471e-03   1.2529506e-02   1.0325253e-02   7.1412079e-03   5.6981926e-03   9.1712120e-03   1.6108637e-03   5.7438691e-03   6.7900644e-03   6.8888062e-04   2.5080626e-03   2.5613963e-03   1.3274318e-03   2.8814590e-03   6.6724246e-03   1.1044919e-02   5.3338455e-04   3.3999007e-04   1.6762239e-03   3.1000026e-03   2.8256325e-03   3.2387801e-03   3.0867064e-03   4.8730365e-03   6.6378483e-03   1.7822362e-03   2.6462661e-03   1.4123935e-04   8.6230682e-03   2.1318487e-03   4.4281251e-03   9.3233552e-03   1.8673327e-03   3.9343847e-03   5.4568897e-03   7.9015195e-03   3.2859077e-03   1.3180217e-04   1.1320265e-02   1.9650001e-03   9.6799653e-04   7.8282421e-05   4.5166083e-03   1.6331972e-03   2.3904078e-03   5.2224500e-03   6.4841213e-03   5.7642096e-03   5.5885978e-03   3.7048759e-04   3.1695738e-03   7.3434699e-03   1.6768125e-02   2.3743894e-03   1.1932462e-04   3.2200650e-03   6.6976868e-03   7.3134819e-04   3.2564607e-03   2.9664521e-03   1.9437509e-03   4.0525881e-03   4.6657625e-03   1.7256268e-03   1.3667365e-03   6.4082141e-03   5.2167922e-03   3.0823107e-03   1.3133486e-02   1.7068449e-03   5.3434665e-03   5.8085657e-03   4.9261084e-03   9.6684380e-04   7.1816432e-04   6.8418503e-03   4.3150624e-03   2.7358698e-03   7.4246714e-04   2.1999970e-03   1.3715192e-03   6.3897855e-04   5.2531215e-03   6.6567087e-03   1.9959041e-04   7.2877827e-03   7.2348769e-03   1.0891220e-02   3.2459725e-03   7.4122702e-04   4.6749634e-03   3.3669230e-03   7.9583955e-05   1.0437521e-02   8.1436642e-03   8.2061472e-03   1.4337740e-02   1.6319841e-02   3.0462366e-04   9.0085125e-03   1.5214225e-03   1.8068443e-02   2.5934558e-05   1.1842923e-02   7.5545176e-03   5.1162105e-03   6.3606028e-03   1.0107865e-02   1.8360102e-02   1.1411116e-02   2.5943653e-03   2.2590562e-02   2.0139182e-03   1.9469619e-03   2.8165944e-03   1.1880076e-02   5.1539049e-03   9.7568917e-03   1.0422756e-02   1.1596876e-02   7.5663186e-03   8.6488322e-03   1.3133604e-02   3.0107568e-03   7.9406152e-04   4.7466095e-03   4.5711602e-03   4.9936224e-04   9.9296431e-03   9.5749791e-03   9.5587917e-03   1.4081306e-02   1.7234890e-02   8.7040364e-04   9.9040316e-03   1.5012448e-03   1.9639569e-02   3.0179394e-04   1.3010570e-02   1.0161281e-02   3.9519870e-03   8.1767554e-03   1.2080688e-02   1.9279439e-02   1.0989202e-02   2.3266303e-03   2.0713423e-02   3.1759995e-03   2.8067172e-03   3.1032715e-03   1.0490119e-02   4.0786420e-03   9.8110939e-03   1.2277454e-02   1.3709414e-02   1.7582839e-03   4.9489607e-03   1.9934064e-02   3.5016865e-03   3.4697267e-04   2.6310541e-03   8.0617166e-03   9.6411709e-04   1.7064375e-03   1.4666650e-03   1.4745772e-03   2.1108592e-03   5.5260764e-03   5.1802495e-04   2.3341578e-03   3.7167048e-03   6.5626516e-03   1.3180061e-03   1.1565092e-02   3.6658759e-03   3.8514458e-03   3.6514318e-03   2.8125792e-03   9.8545506e-04   1.7565517e-03   7.9513446e-03   4.1258242e-03   2.7177731e-03   1.0483123e-03   3.5558284e-03   3.1649242e-03   2.4110016e-04   3.1231514e-03   4.2303212e-03   8.7046320e-04   1.9803680e-02   4.5057787e-03   2.2875453e-03   8.1473256e-04   7.2784764e-03   5.2841523e-03   3.1936914e-05   6.0579771e-05   5.5427829e-03   2.9996038e-03   4.7111715e-03   6.7018226e-04   3.8206460e-03   2.7379425e-03   6.3961850e-03   9.7000606e-04   5.0455840e-03   8.7517746e-03   5.2181192e-04   4.2086901e-04   3.8740476e-03   5.1909001e-03   4.1846614e-03   1.6912712e-02   2.0323215e-03   1.6814934e-03   2.0140995e-03   1.0246238e-02   8.0709165e-03   2.9533168e-03   3.5001524e-04   7.2402168e-04   2.4525121e-02   8.3786268e-03   5.9730003e-03   2.2241688e-03   1.0490949e-02   1.0052150e-02   8.4481649e-04   1.0306755e-03   9.7104895e-03   4.7051149e-03   7.5682486e-03   2.5521754e-03   7.8053009e-03   3.0734717e-03   9.8801938e-03   2.2384251e-03   3.6192976e-03   1.5052125e-02   6.1736712e-04   1.0280118e-04   5.5375454e-03   9.7589612e-03   8.6474768e-03   2.4375316e-02   3.5878910e-03   3.8390770e-03   5.3089764e-03   1.6859170e-02   1.4176552e-02   6.5576789e-03   2.5984028e-04   9.3320862e-05   6.7547274e-03   1.5238284e-02   1.2713534e-02   3.0890016e-03   2.3712664e-02   2.1340343e-02   2.1521507e-02   3.0005294e-02   3.3960619e-02   5.2290080e-03   2.3006673e-02   8.7125215e-03   3.6416136e-02   3.8281789e-03   2.7389250e-02   1.5157530e-02   1.2639929e-02   1.7411617e-02   2.3820503e-02   3.6850697e-02   2.5373970e-02   1.0602298e-02   3.7784525e-02   9.4977361e-03   1.0004799e-02   1.1952848e-02   2.3559319e-02   1.3109247e-02   2.3603108e-02   2.4562463e-02   2.6017612e-02   1.7241949e-03   2.0286689e-03   1.1335916e-03   5.6322483e-03   5.1221157e-03   5.0559971e-03   8.5835151e-03   1.0650923e-02   5.5566853e-04   5.1134826e-03   1.3892193e-04   1.2640034e-02   5.6438567e-04   7.4313522e-03   8.3161300e-03   2.5369950e-03   4.7342848e-03   7.3166190e-03   1.2276381e-02   6.3456614e-03   5.8916640e-04   1.5594819e-02   1.5488203e-03   9.3082136e-04   7.5849611e-04   7.0579696e-03   2.4440449e-03   5.1791902e-03   7.3166180e-03   8.5826500e-03   2.1075364e-03   5.4157920e-03   1.3748154e-03   2.4371345e-03   2.2201300e-03   2.6881175e-03   4.1574302e-03   3.5021271e-03   1.4032793e-03   9.1188034e-04   6.1085103e-03   4.1365224e-03   2.7438304e-03   1.0770631e-02   2.1519140e-03   3.9794011e-03   4.6480198e-03   5.1312578e-03   1.6288841e-03   5.4679156e-04   8.7532006e-03   3.0112961e-03   1.7262374e-03   2.9055719e-04   3.3158230e-03   1.8013008e-03   9.2887256e-04   4.2320076e-03   5.4776329e-03   3.2889511e-03   6.5019207e-03   1.1654311e-03   1.2550970e-03   7.9744383e-03   6.4095512e-03   1.6783080e-03   2.2182877e-03   1.9380758e-03   6.5001122e-03   2.8053456e-03   3.3035106e-03   3.3582234e-03   7.0744470e-03   5.6581852e-04   1.8141689e-03   7.7202001e-03   6.7565678e-03   2.7207024e-03   1.9179880e-02   2.7909675e-04   2.2008347e-04   1.1064738e-03   1.0688407e-02   6.6082062e-03   4.4435997e-03   1.9606397e-03   2.4773484e-03   1.1763001e-02   8.2259536e-03   8.3480149e-03   1.5692475e-02   1.7083720e-02   2.8355914e-04   9.4960515e-03   2.0536401e-03   1.8469924e-02   1.0092056e-04   1.2279622e-02   6.4441301e-03   6.4046448e-03   6.0274458e-03   9.8857207e-03   1.9188640e-02   1.2736270e-02   3.3492132e-03   2.4941935e-02   1.8152664e-03   1.9865894e-03   3.3071796e-03   1.3641872e-02   6.4197769e-03   1.0769048e-02   1.0309787e-02   1.1341762e-02   5.1119032e-03   4.6662587e-03   4.6505160e-04   3.2269116e-03   9.0774431e-03   2.5215213e-03   4.0057628e-03   6.1199030e-03   9.7447895e-03   3.5360157e-03   1.8995080e-02   2.6805580e-03   8.6582433e-03   8.1342278e-03   3.7198152e-03   3.9443681e-05   2.6670326e-03   3.4016785e-03   8.3701176e-03   6.1426721e-03   2.9189037e-03   1.0083175e-03   2.2713681e-03   3.9595770e-04   7.2341855e-03   8.8447009e-03   1.1230173e-05   5.1389929e-03   2.4707449e-03   5.4731760e-03   5.1980845e-04   4.3112681e-03   2.1787917e-03   7.2534046e-03   6.7672528e-04   5.6623211e-03   9.1277214e-03   7.3619628e-04   3.7016392e-04   3.2564607e-03   4.9531882e-03   4.5742792e-03   1.6438776e-02   2.5714996e-03   2.1421258e-03   2.3256067e-03   1.0175775e-02   8.4088183e-03   2.7613050e-03   2.4755840e-04   6.0873636e-04   4.6703189e-03   2.2089026e-03   5.5642336e-03   3.7824549e-04   4.1878969e-03   2.0462491e-03   7.3148116e-03   5.4583143e-04   6.1390745e-03   8.7247882e-03   9.1073145e-04   4.9034572e-04   2.9664521e-03   4.5041455e-03   4.3650355e-03   1.5605807e-02   2.7148441e-03   2.1951828e-03   2.2077213e-03   9.5777719e-03   8.0120192e-03   2.4278861e-03   3.2552080e-04   7.3734328e-04   1.7636357e-03   1.2290813e-02   2.3919392e-03   6.5534766e-03   4.2355076e-03   1.3428854e-02   2.7760250e-03   2.0719142e-02   5.3736931e-03   9.3604766e-03   7.8350168e-03   1.9437509e-03   2.3451546e-04   4.9805974e-03   3.4615227e-03   1.0520423e-02   8.1642247e-03   4.6947450e-03   2.2490758e-03   4.7893620e-03   5.2359029e-04   6.8110246e-03   8.2524171e-03   1.3046714e-02   1.1318623e-03   8.6855001e-03   5.4130623e-04   1.5131426e-02   5.6179632e-04   1.5407719e-02   1.0669118e-02   5.8683852e-03   3.5797271e-03   6.2414181e-05   2.6714308e-03   7.7088950e-03   9.8081718e-03   9.2964166e-03   7.7221233e-03   5.7330158e-03   7.6711675e-03   9.7842038e-03   1.6419007e-03   2.8364539e-03   3.5173281e-03   6.5335900e-03   1.1340078e-03   1.4192754e-02   1.5964523e-04   8.8481511e-03   5.2711173e-03   5.4383051e-03   3.8856537e-03   6.9538378e-03   1.4900562e-02   9.8443835e-03   2.2013577e-03   2.1746585e-02   7.5284963e-04   7.7091107e-04   1.8026328e-03   1.1463429e-02   5.3229428e-03   7.8904169e-03   7.2583306e-03   8.1944055e-03   3.9135288e-03   1.7196090e-03   8.1113282e-03   2.2942459e-04   9.3463740e-03   6.8261058e-03   2.3665724e-03   1.6309596e-03   1.7256268e-03   2.3397606e-03   3.5922701e-03   1.1208568e-02   3.9888853e-03   2.9455145e-03   1.9660659e-03   6.5467152e-03   6.1492489e-03   9.3538874e-04   1.2212079e-03   1.9267388e-03   1.0793365e-02   1.2592720e-03   6.0147390e-03   9.1609131e-03   1.8537817e-03   4.5335508e-03   6.6172179e-03   1.0131803e-02   4.6089934e-03   1.7595473e-04   1.2987364e-02   1.8331853e-03   9.7087402e-04   3.3609260e-04   5.4018686e-03   1.7068395e-03   3.6646912e-03   6.4787894e-03   7.7901480e-03   1.6754655e-02   6.9641544e-04   1.3182224e-02   1.4651385e-02   4.9466239e-03   2.3931163e-03   5.9263439e-04   5.4211666e-03   1.0221921e-02   1.4928620e-02   9.4586856e-03   8.3819492e-03   7.3339215e-03   1.2011676e-02   1.3620169e-02   3.6119007e-03   1.8435886e-03   2.0979826e-03   1.0794235e-02   6.9811950e-03   4.9473584e-03   5.5993500e-03   9.1192858e-03   1.7103671e-02   1.0654852e-02   2.2936354e-03   2.1887320e-02   1.6039412e-03   1.5236521e-03   2.3684718e-03   1.1405606e-02   4.9416620e-03   8.9601309e-03   9.4128890e-03   1.0537337e-02   1.0144176e-02   9.3650956e-03   2.8049928e-03   1.4261619e-03   9.6795720e-04   3.1673467e-03   5.6308712e-03   1.2399657e-02   5.4889906e-03   4.4629844e-03   3.5148873e-03   8.2921709e-03   8.5570702e-03   1.5767832e-03   9.7941910e-04   1.5044401e-03   1.9162648e-02   2.3258011e-03   4.3611697e-03   1.7154674e-02   1.9266334e-02   1.1399040e-02   3.8294804e-02   2.8052973e-03   4.2987486e-03   8.1194560e-03   2.5933333e-02   1.8564970e-02   1.4940871e-02   5.2230072e-03   4.8296564e-03   1.1136786e-02   1.2996602e-02   1.1896568e-02   3.3692870e-03   1.0295085e-03   6.7928621e-03   7.3341015e-03   5.3674586e-03   2.6380919e-03   1.7126363e-03   1.9252416e-05   3.9628066e-03   1.2371798e-02   1.4417093e-02   5.5916770e-04   7.0172680e-03   8.6736288e-03   5.5025710e-03   2.2796596e-02   1.2316222e-03   1.4793666e-03   2.9451077e-03   1.4303806e-02   1.0475492e-02   5.7470026e-03   7.8882742e-04   9.0526661e-04   4.3645694e-03   7.8632956e-03   7.2260963e-03   2.1331409e-02   3.2601592e-03   3.2514734e-03   4.2300937e-03   1.4385386e-02   1.2160577e-02   5.0215597e-03   4.3279739e-05   5.1818418e-05   3.0767343e-03   9.0015857e-03   9.8042907e-03   1.0868316e-02   9.1698320e-03   6.9442206e-03   8.2610449e-03   1.0963075e-02   2.1355881e-03   3.5431999e-03   4.2180590e-03   3.2143555e-03   3.3819870e-03   8.8162037e-03   6.5643841e-03   3.2940615e-03   1.2995092e-03   2.9075684e-03   3.1897532e-04   6.9331287e-03   8.4863836e-03   1.0164058e-02   2.9210959e-03   1.7078622e-03   4.0814516e-04   3.6297378e-03   8.7693352e-04   2.6826046e-03   6.9132822e-03   8.3825591e-03   2.1963225e-02   1.8183756e-02   1.2010413e-02   1.7351817e-03   6.4958425e-03   5.7545512e-03   1.9704198e-02   2.2181841e-02   1.8230646e-04   1.5254865e-03   1.2273284e-02   6.9788794e-03   6.3577637e-03   3.5631537e-03   4.1101326e-03   6.5324999e-04   9.4729463e-03   5.0293123e-03   4.5622977e-03   3.3814637e-03   4.1233865e-03   5.1615257e-03   2.3326030e-03   2.1577255e-03   4.0325048e-03   5.1426622e-03   1.5351757e-03   2.6422594e-03   1.3282600e-02   1.5461999e-02   3.4307479e-03   1.1531055e-02   1.3518551e-02   4.2918744e-03   5.5398788e-03   8.4122900e-05
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml.txt
new file mode 100644
index 00000000..2a17a2a8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-correlation-ml.txt
@@ -0,0 +1 @@
+   9.2507465e-01   9.6528566e-01   8.7255441e-01   1.1287379e+00   8.7318727e-01   1.0767102e+00   9.1419676e-01   1.1503304e+00   9.8074509e-01   1.0135025e+00   1.0495025e+00   9.4794536e-01   9.6829273e-01   1.1345767e+00   1.1048008e+00   9.2407796e-01   1.0228634e+00   9.3853195e-01   9.9377619e-01   1.0407662e+00   9.5048989e-01   9.0465688e-01   9.8056930e-01   8.9777156e-01   9.6357127e-01   9.3864452e-01   9.9754613e-01   9.7271356e-01   8.4383151e-01   9.6981983e-01   9.7510267e-01   1.0112663e+00   7.8730400e-01   1.0299498e+00   9.9307979e-01   9.0239520e-01   8.5428231e-01   8.8972742e-01   8.5933162e-01   9.6625934e-01   9.4175449e-01   9.9120729e-01   1.0503963e+00   8.8223053e-01   1.3261434e+00   1.1063209e+00   8.4058398e-01   1.0844267e+00   1.1153093e+00   1.0092643e+00   8.9585237e-01   1.0599818e+00   1.2321707e+00   1.1359624e+00   8.3503556e-01   1.1792243e+00   7.9159781e-01   1.0830419e+00   1.2181870e+00   9.9888500e-01   1.0227144e+00   6.8557277e-01   9.6836193e-01   1.1061227e+00   1.0883453e+00   9.5681974e-01   9.9436299e-01   1.0304323e+00   1.1273949e+00   1.0735563e+00   1.0582583e+00   9.6040272e-01   1.0032137e+00   8.4900547e-01   1.1035351e+00   8.7867480e-01   9.6433176e-01   9.1850122e-01   8.9337435e-01   1.0449390e+00   8.9639384e-01   9.6704971e-01   1.0084258e+00   1.0528587e+00   1.1764481e+00   1.0913280e+00   1.0136672e+00   1.2737156e+00   9.5130359e-01   1.0367909e+00   1.1983402e+00   1.1319901e+00   1.1117462e+00   1.0343695e+00   1.0838628e+00   7.5266057e-01   1.0763316e+00   8.8067924e-01   9.6734383e-01   9.8800551e-01   1.2265742e+00   7.8833055e-01   1.0338670e+00   8.6666625e-01   9.9039950e-01   9.7142684e-01   9.3138616e-01   8.5849977e-01   8.5486301e-01   1.0516028e+00   1.1105313e+00   9.5943505e-01   9.8845171e-01   1.0566288e+00   9.9712198e-01   9.5545756e-01   1.1817974e+00   9.9128482e-01   1.0117892e+00   1.0979115e+00   1.0493943e+00   9.1318848e-01   9.3157311e-01   8.7073304e-01   1.2459441e+00   9.3412689e-01   1.0482297e+00   9.4224032e-01   9.5134153e-01   9.0857493e-01   9.7264161e-01   8.2900820e-01   9.3140549e-01   1.1330242e+00   1.0333002e+00   1.0117861e+00   1.2053255e+00   8.5291396e-01   1.0148928e+00   8.6641379e-01   9.7080819e-01   9.5457159e-01   9.5207457e-01   9.3539674e-01   9.0769069e-01   9.5322590e-01   1.1181803e+00   9.9765614e-01   7.5370610e-01   1.0807114e+00   1.0804601e+00   9.0214124e-01   8.7101998e-01   1.0167435e+00   1.2045936e+00   8.7300539e-01   1.1054300e+00   7.9145574e-01   1.0279340e+00   8.7623462e-01   1.0034756e+00   1.0386933e+00   9.3910970e-01   1.0028455e+00   9.9868824e-01   9.8752945e-01   9.8319327e-01   1.3110209e+00   8.6180633e-01   1.0993856e+00   8.5912563e-01   1.1303979e+00   9.8690459e-01   9.6910090e-01   9.1456819e-01   1.1525339e+00   1.1064552e+00   1.1062255e+00   9.7226683e-01   1.1091447e+00   1.1072238e+00   9.6544444e-01   9.6681036e-01   9.3247685e-01   9.6854634e-01   1.1035119e+00   1.1317148e+00   9.5557793e-01   9.8908485e-01   7.4873648e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt
new file mode 100644
index 00000000..8b705b34
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt
@@ -0,0 +1 @@
+   1.4208365e-03   1.2652718e-05   8.9939315e-04   2.4232332e-04   9.9747033e-04   9.2045721e-04   2.2040648e-04   8.6480051e-04   1.2354911e-03   5.3650090e-06   1.0275886e-03   1.1695784e-03   2.3556571e-04   1.4590172e-03   1.8981327e-03   1.0939621e-03   1.2392314e-04   3.5850877e-04   8.6078038e-04   1.4490833e-03   8.4059347e-04   3.2873982e-03   2.7359832e-03   4.1316044e-03   2.7719149e-03   1.1814143e-03   1.1431285e-04   2.3850299e-04   1.3446247e-03   1.6406549e-03   1.2070654e-03   2.2241257e-03   1.4969348e-03   1.2354911e-03   7.6154552e-04   9.0853884e-04   1.2354911e-03   1.5825612e-04   2.3716586e-04   2.5806020e-04   8.5870759e-03   4.3447170e-04   2.6103416e-03   3.4026094e-03   1.2625429e-03   1.0000714e-03   2.7088099e-04   4.6161202e-05   1.7993015e-04   7.1619641e-02   7.4013940e-02   8.2336355e-02   9.3599031e-02   8.6542298e-02   9.2667602e-02   8.0934616e-02   6.7002415e-02   7.9695318e-02   8.3991107e-02   8.8330128e-02   7.6449243e-02   8.6123390e-02   9.1414445e-02   5.9767596e-02   6.8589764e-02   9.2363748e-02   7.5261304e-02   1.0768528e-01   7.8250149e-02   9.7383870e-02   6.9410330e-02   1.0895936e-01   9.1644587e-02   7.2677910e-02   7.2208930e-02   8.7635618e-02   9.3586395e-02   8.7700193e-02   5.8825053e-02   7.9271072e-02   7.4136423e-02   7.0977606e-02   1.1670751e-01   9.6691498e-02   7.7157266e-02   7.8793137e-02   9.6187418e-02   7.4355610e-02   8.6677009e-02   9.7286808e-02   8.5214421e-02   7.7419803e-02   6.8888638e-02   8.6192502e-02   7.4757686e-02   7.8851331e-02   7.5042247e-02   5.2484298e-02   7.8023694e-02   1.3991867e-01   1.2655756e-01   1.2099780e-01   1.2515784e-01   1.3134370e-01   1.3306336e-01   1.2911903e-01   1.2854613e-01   1.3655327e-01   1.1601604e-01   9.9632498e-02   1.2063863e-01   1.1404742e-01   1.3409335e-01   1.3451976e-01   1.1368563e-01   1.1469397e-01   1.1505768e-01   1.5479411e-01   1.2906390e-01   1.1634186e-01   1.2299625e-01   1.3892070e-01   1.0732534e-01   1.1401190e-01   1.1254699e-01   1.0266168e-01   1.0210743e-01   1.3111378e-01   1.0950615e-01   1.2501276e-01   1.0108759e-01   1.3297245e-01   1.0624129e-01   1.3360037e-01   1.2002867e-01   1.2233784e-01   1.1387071e-01   1.0061412e-01   1.0649150e-01   1.2174429e-01   1.0147290e-01   1.2655756e-01   1.2438709e-01   1.2138109e-01   1.1044406e-01   1.1910000e-01   1.0821359e-01   1.1609070e-01   1.1329724e-01   1.2085473e-03   1.2060695e-03   2.7592041e-03   3.0736184e-03   3.7201033e-03   1.0861043e-03   7.3910902e-04   3.4790667e-04   1.3491546e-03   2.4493052e-03   1.8482587e-04   2.3308566e-03   3.8997403e-03   6.3069928e-03   4.1362617e-03   1.5079538e-03   7.4890015e-04   4.0049414e-03   3.0763412e-04   3.2877725e-03   8.6909088e-03   1.8863199e-03   4.7592122e-03   4.5180751e-04   1.7148301e-03   8.8703626e-04   5.7128783e-04   1.7151033e-03   8.4814176e-04   4.7551630e-04   6.9313334e-03   5.8126778e-03   3.4790667e-04   9.7078221e-04   1.0390338e-03   3.4790667e-04   1.1371495e-03   7.0598263e-04   2.3100870e-03   3.1332241e-03   2.9870115e-03   3.7693564e-03   5.5008337e-03   2.0081767e-04   3.9261497e-03   1.6237803e-03   1.7731168e-03   5.9153033e-04   5.9997244e-02   6.3706418e-02   7.0131342e-02   8.0131815e-02   7.3670020e-02   8.1412444e-02   7.1132932e-02   5.6572408e-02   6.7223691e-02   7.3993918e-02   7.4363256e-02   6.6371013e-02   7.1106157e-02   7.9730716e-02   5.0610503e-02   5.7285563e-02   8.2536028e-02   6.3695818e-02   9.1877918e-02   6.6044079e-02   8.7700525e-02   5.7975072e-02   9.4407127e-02   7.9385033e-02   6.0900938e-02   6.0521931e-02   7.4070557e-02   8.1073873e-02   7.6438218e-02   4.7634460e-02   6.6728846e-02   6.1732271e-02   5.9656897e-02   1.0363139e-01   8.7312695e-02   6.8806126e-02   6.7142432e-02   8.0911573e-02   6.5091322e-02   7.4541034e-02   8.5313436e-02   7.4229332e-02   6.5328348e-02   5.7461491e-02   7.4891760e-02   6.5136264e-02   6.8598864e-02   6.3641018e-02   4.2790811e-02   6.7276779e-02   1.2872765e-01   1.1385917e-01   1.0708423e-01   1.1221780e-01   1.1844388e-01   1.1798239e-01   1.1767648e-01   1.1356773e-01   1.2073038e-01   1.0467824e-01   8.8441784e-02   1.0671832e-01   1.0091826e-01   1.2051300e-01   1.2244533e-01   1.0247664e-01   1.0203920e-01   1.0334656e-01   1.3764340e-01   1.1314999e-01   1.0390175e-01   1.1148602e-01   1.2274267e-01   9.3929112e-02   1.0239198e-01   9.9372667e-02   9.0109024e-02   9.0770318e-02   1.1749345e-01   9.5509620e-02   1.0956056e-01   8.9331297e-02   1.1936188e-01   9.3207628e-02   1.1935153e-01   1.0516553e-01   1.1204585e-01   1.0191688e-01   8.9582588e-02   9.3806716e-02   1.0922100e-01   8.9087100e-02   1.1385917e-01   1.1193127e-01   1.0978099e-01   9.7766696e-02   1.0448839e-01   9.5849546e-02   1.0619992e-01   1.0212555e-01   7.8301662e-04   3.3186074e-04   9.6097551e-04   9.6384587e-04   1.7160230e-04   7.1714495e-04   1.0915291e-03   1.4406904e-05   9.9431295e-04   1.0280837e-03   3.4520010e-04   1.6070142e-03   2.0814960e-03   1.1810349e-03   9.3270090e-05   2.4892291e-04   9.5000112e-04   1.2447556e-03   8.3736374e-04   3.6303226e-03   2.4141846e-03   3.9965261e-03   2.4688022e-03   1.0115165e-03   6.9871786e-05   1.7487334e-04   1.2251185e-03   1.4398826e-03   9.8199498e-04   2.5137187e-03   1.7466742e-03   1.0915291e-03   7.0690363e-04   8.5846505e-04   1.0915291e-03   1.0992291e-04   1.6427013e-04   2.8562896e-04   8.0123750e-03   5.0490687e-04   2.4076078e-03   3.3222239e-03   1.0270492e-03   1.0987887e-03   2.4862356e-04   7.8815959e-05   1.1120052e-04   7.0071463e-02   7.2494258e-02   8.0694698e-02   9.1816479e-02   8.4823937e-02   9.1055284e-02   7.9406161e-02   6.5540015e-02   7.8075821e-02   8.2418924e-02   8.6586217e-02   7.4908999e-02   8.4375857e-02   8.9771433e-02   5.8365951e-02   6.7055640e-02   9.0792516e-02   7.3755504e-02   1.0570869e-01   7.6652799e-02   9.5758989e-02   6.7858347e-02   1.0707149e-01   9.0015148e-02   7.1111432e-02   7.0634591e-02   8.5909852e-02   9.1841705e-02   8.6060650e-02   5.7382885e-02   7.7642663e-02   7.2560884e-02   6.9439824e-02   1.1486601e-01   9.5132094e-02   7.5722276e-02   7.7186494e-02   9.4329550e-02   7.2913445e-02   8.4999890e-02   9.5631654e-02   8.3632299e-02   7.5814411e-02   6.7360493e-02   8.4581854e-02   7.3324210e-02   7.7335911e-02   7.3484711e-02   5.1093482e-02   7.6474851e-02   1.3800148e-01   1.2463801e-01   1.1904450e-01   1.2328593e-01   1.2938789e-01   1.3104169e-01   1.2726294e-01   1.2658511e-01   1.3448678e-01   1.1418055e-01   9.7888383e-02   1.1868360e-01   1.1213978e-01   1.3206545e-01   1.3251384e-01   1.1184454e-01   1.1286955e-01   1.1328841e-01   1.5256500e-01   1.2703121e-01   1.1444439e-01   1.2112577e-01   1.3684054e-01   1.0544428e-01   1.1220824e-01   1.1073079e-01   1.0084086e-01   1.0036834e-01   1.2912019e-01   1.0768201e-01   1.2300696e-01   9.9385216e-02   1.3095409e-01   1.0446385e-01   1.3171213e-01   1.1800444e-01   1.2052688e-01   1.1209190e-01   9.8892088e-02   1.0463359e-01   1.1979721e-01   9.9600101e-02   1.2463801e-01   1.2247195e-01   1.1948197e-01   1.0852184e-01   1.1709036e-01   1.0637133e-01   1.1433097e-01   1.1154058e-01   1.2829581e-03   8.6520525e-04   1.3042912e-03   2.3052671e-04   6.0609671e-05   6.1408538e-04   7.9384016e-04   2.5551469e-04   9.4346154e-04   1.8930050e-03   4.6203036e-03   3.8649853e-03   3.3273220e-03   9.7135787e-04   2.5836286e-04   1.6395377e-03   4.6720392e-04   1.3833444e-03   6.8585778e-03   1.1817616e-03   1.4184724e-03   1.2935682e-03   4.4534899e-04   4.3337262e-04   9.9734142e-04   6.2957380e-05   2.1802414e-04   1.3452346e-03   3.6759458e-03   3.7514511e-03   6.1408538e-04   2.3527566e-03   2.5967147e-03   6.1408538e-04   3.1896708e-04   3.0643540e-04   1.7034162e-03   7.0964884e-03   1.0371098e-03   1.9760564e-03   1.6993217e-03   9.2490489e-04   1.2129757e-03   2.8785057e-04   7.8777499e-04   6.4144968e-04   5.7636535e-02   5.9786679e-02   6.7275391e-02   7.7706661e-02   7.1288776e-02   7.6308806e-02   6.5987844e-02   5.3398709e-02   6.4839697e-02   6.8887148e-02   7.2874646e-02   6.2111692e-02   7.1088473e-02   7.5274214e-02   4.7295630e-02   5.5048251e-02   7.6266639e-02   6.0532100e-02   9.0997542e-02   6.3501941e-02   8.1155480e-02   5.5841790e-02   9.1620605e-02   7.5304976e-02   5.8627379e-02   5.8302297e-02   7.2188128e-02   7.7632065e-02   7.2128571e-02   4.6353347e-02   6.4522763e-02   5.9860052e-02   5.7075256e-02   9.8501473e-02   8.0208982e-02   6.2676929e-02   6.4117314e-02   8.0306154e-02   5.9903400e-02   7.1264506e-02   8.0454669e-02   6.9667510e-02   6.2855874e-02   5.5234852e-02   7.0611788e-02   6.0083969e-02   6.3933681e-02   6.0638614e-02   4.1119113e-02   6.3291748e-02   1.2072945e-01   1.0797760e-01   1.0284307e-01   1.0630032e-01   1.1246316e-01   1.1377579e-01   1.1035397e-01   1.0939330e-01   1.1704519e-01   9.8543065e-02   8.3389076e-02   1.0253622e-01   9.6610654e-02   1.1523295e-01   1.1624035e-01   9.6621030e-02   9.6718555e-02   9.7003685e-02   1.3426257e-01   1.1013293e-01   9.8838972e-02   1.0496266e-01   1.1920082e-01   9.0400878e-02   9.6352086e-02   9.4617133e-02   8.6118226e-02   8.5443225e-02   1.1226469e-01   9.1815383e-02   1.0642172e-01   8.4132371e-02   1.1413570e-01   8.8823115e-02   1.1373227e-01   1.0228600e-01   1.0454965e-01   9.5917796e-02   8.4129252e-02   8.9732713e-02   1.0404039e-01   8.5714179e-02   1.0797760e-01   1.0611357e-01   1.0375975e-01   9.3828435e-02   1.0141953e-01   9.1231247e-02   9.8764813e-02   9.5558448e-02   7.0033377e-04   3.9650610e-04   5.3529876e-04   1.4703029e-03   2.2471049e-03   2.6137215e-04   9.1585095e-04   2.3098853e-03   3.2779352e-04   1.7003275e-03   9.5035099e-04   8.4163249e-04   3.6423601e-04   8.6760304e-04   2.6110376e-04   2.4965606e-03   5.0990123e-04   2.2208392e-03   3.4995017e-03   3.9813106e-03   4.2652650e-03   1.4776191e-03   5.3856223e-04   9.6152184e-04   1.6178695e-03   2.4296336e-03   2.2824176e-03   1.0483334e-03   6.6735604e-04   2.2471049e-03   1.7166964e-03   1.9224889e-03   2.2471049e-03   4.4953685e-04   7.5090712e-04   3.1050470e-04   1.1530910e-02   8.0837373e-05   2.6173161e-03   2.7612054e-03   2.3974656e-03   3.9140870e-04   3.5730731e-04   1.1232648e-04   8.0278741e-04   7.4728046e-02   7.6441141e-02   8.5477412e-02   9.7141382e-02   8.9947057e-02   9.5081677e-02   8.2962705e-02   6.9633999e-02   8.3013931e-02   8.6069979e-02   9.2215558e-02   7.8736928e-02   9.0603515e-02   9.4074986e-02   6.2034704e-02   7.1640320e-02   9.4150759e-02   7.8195110e-02   1.1214391e-01   8.1468219e-02   9.9059263e-02   7.2514318e-02   1.1269547e-01   9.4545020e-02   7.5842542e-02   7.5358360e-02   9.1332869e-02   9.6662705e-02   9.0277244e-02   6.2066860e-02   8.2644288e-02   7.7554694e-02   7.3959493e-02   1.1955630e-01   9.8181734e-02   7.8602674e-02   8.1755435e-02   1.0058819e-01   7.6248524e-02   8.9701900e-02   9.9938282e-02   8.7676596e-02   8.0619290e-02   7.1976555e-02   8.8793557e-02   7.6779152e-02   8.1107438e-02   7.7952944e-02   5.5245517e-02   8.0550459e-02   1.4162183e-01   1.2912349e-01   1.2423521e-01   1.2779447e-01   1.3393410e-01   1.3660889e-01   1.3105158e-01   1.3208577e-01   1.4040000e-01   1.1817736e-01   1.0200650e-01   1.2388995e-01   1.1706801e-01   1.3699958e-01   1.3682207e-01   1.1586916e-01   1.1739162e-01   1.1729454e-01   1.5902469e-01   1.3308573e-01   1.1901641e-01   1.2511327e-01   1.4289089e-01   1.1059070e-01   1.1627926e-01   1.1550831e-01   1.0561378e-01   1.0446495e-01   1.3405102e-01   1.1291439e-01   1.2888996e-01   1.0359625e-01   1.3590097e-01   1.0925250e-01   1.3665207e-01   1.2379539e-01   1.2392962e-01   1.1624448e-01   1.0286550e-01   1.0945264e-01   1.2440339e-01   1.0449561e-01   1.2912349e-01   1.2690130e-01   1.2362142e-01   1.1341467e-01   1.2276171e-01   1.1097585e-01   1.1759891e-01   1.1534218e-01   1.3143808e-04   7.3710840e-04   1.1313742e-03   2.6277162e-03   9.9332749e-04   4.8298989e-04   2.9659782e-03   1.8303797e-03   3.9657692e-03   1.4753738e-03   1.6266891e-03   7.0233916e-04   8.0313831e-04   3.4526160e-04   2.3291483e-03   1.3867759e-04   4.2228272e-03   1.6991343e-03   2.3223655e-03   3.8453210e-03   4.2904903e-04   9.9302567e-04   1.7706867e-03   9.4981017e-04   1.8259864e-03   2.0820613e-03   2.1473879e-03   2.0420431e-03   2.6277162e-03   3.0779094e-03   3.4332541e-03   2.6277162e-03   6.3280964e-04   1.0576914e-03   9.5198627e-04   1.0925795e-02   3.7286463e-04   7.9546610e-04   9.1841431e-04   2.1468126e-03   4.9129575e-04   4.3562197e-04   7.5083238e-04   1.3686608e-03   6.3901299e-02   6.4740623e-02   7.3708779e-02   8.4613714e-02   7.7866771e-02   8.2261058e-02   7.0449151e-02   5.8874682e-02   7.1767088e-02   7.3210535e-02   8.0660949e-02   6.6601983e-02   8.0033785e-02   8.1391959e-02   5.1369939e-02   6.0897790e-02   8.0716992e-02   6.7403323e-02   9.9203670e-02   7.0276809e-02   8.4922276e-02   6.1688045e-02   9.9339240e-02   8.2362360e-02   6.4928234e-02   6.4360101e-02   7.9641814e-02   8.3721620e-02   7.7549963e-02   5.2617898e-02   7.1414187e-02   6.6946935e-02   6.3031902e-02   1.0509118e-01   8.4332170e-02   6.6064468e-02   7.0064616e-02   8.8758294e-02   6.4379548e-02   7.7371173e-02   8.7052850e-02   7.5305342e-02   6.9340944e-02   6.1339869e-02   7.6377320e-02   6.5179636e-02   6.9093895e-02   6.6669498e-02   4.5609365e-02   6.8684945e-02   1.2445912e-01   1.1341836e-01   1.0935772e-01   1.1262566e-01   1.1789507e-01   1.2147174e-01   1.1488682e-01   1.1752559e-01   1.2531063e-01   1.0271865e-01   8.7888567e-02   1.0902443e-01   1.0234160e-01   1.2080033e-01   1.1990073e-01   1.0043696e-01   1.0286413e-01   1.0252340e-01   1.4292168e-01   1.1866325e-01   1.0381139e-01   1.0919240e-01   1.2785249e-01   9.6570465e-02   1.0127523e-01   1.0149554e-01   9.1688518e-02   9.0323099e-02   1.1822766e-01   9.9584713e-02   1.1452014e-01   9.0018133e-02   1.1983081e-01   9.5741335e-02   1.2190290e-01   1.0915996e-01   1.0773474e-01   1.0161859e-01   8.8729453e-02   9.5169428e-02   1.0868349e-01   9.0278091e-02   1.1341836e-01   1.1118524e-01   1.0767597e-01   9.8555096e-02   1.0809822e-01   9.6490550e-02   1.0179914e-01   1.0040847e-01   9.0953179e-04   1.6478123e-03   3.1324421e-03   9.3747882e-04   6.8074049e-04   3.4285457e-03   1.4256139e-03   3.3141786e-03   8.1135619e-04   1.2040955e-03   7.3894006e-04   1.1469835e-03   5.4914496e-05   3.0238895e-03   1.1512346e-04   2.9874978e-03   2.7356591e-03   2.9755481e-03   4.8570629e-03   9.8132331e-04   1.1267736e-03   1.9187302e-03   1.4320892e-03   2.5472569e-03   2.7129147e-03   1.2621760e-03   1.1868918e-03   3.1324421e-03   3.1260816e-03   3.4622842e-03   3.1324421e-03   7.8737454e-04   1.2923124e-03   7.7291736e-04   1.2676988e-02   1.5795155e-04   1.4073300e-03   1.3093851e-03   2.8558230e-03   2.3589004e-04   5.3160641e-04   6.3306680e-04   1.5563919e-03   6.9394652e-02   7.0160248e-02   7.9549278e-02   9.0909253e-02   8.3929778e-02   8.8133516e-02   7.5949213e-02   6.4094635e-02   7.7538115e-02   7.8838295e-02   8.6828513e-02   7.2078729e-02   8.6190925e-02   8.7328483e-02   5.6305232e-02   6.6307663e-02   8.6433769e-02   7.2861306e-02   1.0610432e-01   7.5977192e-02   9.0782134e-02   6.7147548e-02   1.0605640e-01   8.8295560e-02   7.0476640e-02   6.9912539e-02   8.5755505e-02   8.9909894e-02   8.3415192e-02   5.7694397e-02   7.7198547e-02   7.2551886e-02   6.8485682e-02   1.1174631e-01   9.0047290e-02   7.1258462e-02   7.5770197e-02   9.5276007e-02   6.9606963e-02   8.3332111e-02   9.3091350e-02   8.1019819e-02   7.5041473e-02   6.6748030e-02   8.2172293e-02   7.0413691e-02   7.4567733e-02   7.2221920e-02   5.0422561e-02   7.4234075e-02   1.3135838e-01   1.2029572e-01   1.1630277e-01   1.1941581e-01   1.2489530e-01   1.2871814e-01   1.2159882e-01   1.2460620e-01   1.3270425e-01   1.0925415e-01   9.4076611e-02   1.1596894e-01   1.0908894e-01   1.2799150e-01   1.2695158e-01   1.0694484e-01   1.0944639e-01   1.0895711e-01   1.5084375e-01   1.2591962e-01   1.1052596e-01   1.1587184e-01   1.3530738e-01   1.0320818e-01   1.0775506e-01   1.0806337e-01   9.8123191e-02   9.6541726e-02   1.2533326e-01   1.0616585e-01   1.2166800e-01   9.6181548e-02   1.2699662e-01   1.0216112e-01   1.2885603e-01   1.1626103e-01   1.1421827e-01   1.0807124e-01   9.4882428e-02   1.0171954e-01   1.1554226e-01   9.6763759e-02   1.2029572e-01   1.1801757e-01   1.1438908e-01   1.0525128e-01   1.1515210e-01   1.0301668e-01   1.0810316e-01   1.0676998e-01   2.4407151e-04   6.8243680e-04   1.6882982e-04   4.2217018e-04   8.1245396e-04   8.1915702e-04   2.7980568e-03   2.6783721e-03   2.0076713e-03   3.3526400e-04   9.3506008e-05   1.0407900e-03   7.3148476e-04   9.1895790e-04   4.8425923e-03   1.7878106e-03   2.5638304e-03   1.8092053e-03   6.2482332e-04   4.5470127e-05   3.8680919e-04   4.8577398e-04   7.0932539e-04   1.0773286e-03   2.7081281e-03   2.3916675e-03   6.8243680e-04   1.3234869e-03   1.5152295e-03   6.8243680e-04   1.7279927e-05   4.4719936e-05   7.6774714e-04   7.6386402e-03   5.1509749e-04   2.1386706e-03   2.3673979e-03   8.8641907e-04   8.8317423e-04   5.7646989e-05   1.8767975e-04   1.8238427e-04   6.4591491e-02   6.6891146e-02   7.4787553e-02   8.5653640e-02   7.8909235e-02   8.4481757e-02   7.3468926e-02   6.0165176e-02   7.2232139e-02   7.6459237e-02   8.0572670e-02   6.9287036e-02   7.8547451e-02   8.3338681e-02   5.3514192e-02   6.1787978e-02   8.4336540e-02   6.7840538e-02   9.9351761e-02   7.0839680e-02   8.9318727e-02   6.2598635e-02   1.0029777e-01   8.3444651e-02   6.5618944e-02   6.5228710e-02   7.9886645e-02   8.5622882e-02   7.9922508e-02   5.2526388e-02   7.1863670e-02   6.6948234e-02   6.3994975e-02   1.0763490e-01   8.8479248e-02   6.9931400e-02   7.1440370e-02   8.8224815e-02   6.7118281e-02   7.8968665e-02   8.8858891e-02   7.7432758e-02   7.0109240e-02   6.2023845e-02   7.8396402e-02   6.7393801e-02   7.1380489e-02   6.7813026e-02   4.6767795e-02   7.0645561e-02   1.3044475e-01   1.1734304e-01   1.1197394e-01   1.1577499e-01   1.2198760e-01   1.2346289e-01   1.1982320e-01   1.1899008e-01   1.2683842e-01   1.0736476e-01   9.1564623e-02   1.1164167e-01   1.0538958e-01   1.2475783e-01   1.2551509e-01   1.0524662e-01   1.0574315e-01   1.0607279e-01   1.4459461e-01   1.1962188e-01   1.0766800e-01   1.1407280e-01   1.2909426e-01   9.8905414e-02   1.0524346e-01   1.0359925e-01   9.4433579e-02   9.3820759e-02   1.2176744e-01   1.0065671e-01   1.1574436e-01   9.2625059e-02   1.2364363e-01   9.7538593e-02   1.2367543e-01   1.1121391e-01   1.1355049e-01   1.0493323e-01   9.2419908e-02   9.8167154e-02   1.1298864e-01   9.3668541e-02   1.1734304e-01   1.1533257e-01   1.1267510e-01   1.0222063e-01   1.1031800e-01   9.9779829e-02   1.0752614e-01   1.0448251e-01   3.8330702e-04   7.6710204e-04   5.4934344e-04   6.1141025e-04   1.8880070e-03   4.3782366e-03   4.2558302e-03   3.3445116e-03   9.0730658e-04   1.6460272e-04   1.9935351e-03   2.2277110e-04   1.5935452e-03   7.2001884e-03   1.0201171e-03   1.9163397e-03   8.7300929e-04   4.6754224e-04   3.6671499e-04   7.4258415e-04   2.1567602e-04   1.3361003e-04   9.1168360e-04   4.3156597e-03   4.1158943e-03   3.8330702e-04   1.9019978e-03   2.1146706e-03   3.8330702e-04   3.1982857e-04   2.1854146e-04   1.6719903e-03   5.9155088e-03   1.3110961e-03   2.0595508e-03   2.2774590e-03   5.2912957e-04   1.6598142e-03   4.0619000e-04   8.5702191e-04   4.6128261e-04   5.7335316e-02   5.9791552e-02   6.7034247e-02   7.7315388e-02   7.0912461e-02   7.6541197e-02   6.6231857e-02   5.3290914e-02   6.4524455e-02   6.9096848e-02   7.2330829e-02   6.2164647e-02   7.0266106e-02   7.5359485e-02   4.7211115e-02   5.4717217e-02   7.6724195e-02   6.0437574e-02   9.0217835e-02   6.3227153e-02   8.1624838e-02   5.5479636e-02   9.1272977e-02   7.5343817e-02   5.8299412e-02   5.7952393e-02   7.1727180e-02   7.7451339e-02   7.2165967e-02   4.5880845e-02   6.4164650e-02   5.9464600e-02   5.6817377e-02   9.8638775e-02   8.0838937e-02   6.3146715e-02   6.3916551e-02   7.9536591e-02   6.0201366e-02   7.1084400e-02   8.0631146e-02   6.9793274e-02   6.2554472e-02   5.4911579e-02   7.0667171e-02   6.0374722e-02   6.4102637e-02   6.0460719e-02   4.0707229e-02   6.3307106e-02   1.2135312e-01   1.0820155e-01   1.0273439e-01   1.0658023e-01   1.1268914e-01   1.1365695e-01   1.1088857e-01   1.0931070e-01   1.1680946e-01   9.8829208e-02   8.3503653e-02   1.0241354e-01   9.6518124e-02   1.1527856e-01   1.1644011e-01   9.6835948e-02   9.6892604e-02   9.7403729e-02   1.3389056e-01   1.0978147e-01   9.8889679e-02   1.0531384e-01   1.1893855e-01   9.0170315e-02   9.6654705e-02   9.4696682e-02   8.5999457e-02   8.5628213e-02   1.1232778e-01   9.1692367e-02   1.0609783e-01   8.4336563e-02   1.1417833e-01   8.8845800e-02   1.1399156e-01   1.0186496e-01   1.0510756e-01   9.6245658e-02   8.4341961e-02   8.9608741e-02   1.0408154e-01   8.5412047e-02   1.0820155e-01   1.0631639e-01   1.0398240e-01   9.3623884e-02   1.0104040e-01   9.1224725e-02   9.9332091e-02   9.5993921e-02   1.1067957e-03   1.4791390e-03   7.1256747e-05   2.0377231e-03   4.3755431e-03   5.9630791e-03   4.4970379e-03   1.5921641e-03   6.4984761e-04   3.3935862e-03   1.2039709e-04   3.0970780e-03   8.3950153e-03   2.1890332e-03   3.1326528e-03   5.0256002e-04   1.6389584e-03   6.4717383e-04   7.1019942e-04   8.9864077e-04   3.8255378e-04   1.2286350e-03   5.5229901e-03   5.1766813e-03   0.0000000e+00   1.5860612e-03   1.6773969e-03   0.0000000e+00   8.4337656e-04   4.6746407e-04   2.4549978e-03   4.7529836e-03   2.3235808e-03   4.0683267e-03   4.3260986e-03   6.7336618e-04   2.8454658e-03   1.0918918e-03   1.3756658e-03   5.7784546e-04   5.9573290e-02   6.3070670e-02   6.9597309e-02   7.9911457e-02   7.3480528e-02   7.9923883e-02   7.0144874e-02   5.5923876e-02   6.6635620e-02   7.3192589e-02   7.4096565e-02   6.5836734e-02   7.1022277e-02   7.8555696e-02   5.0423423e-02   5.7089619e-02   8.1093473e-02   6.2483167e-02   9.2251714e-02   6.5399221e-02   8.6573432e-02   5.7873871e-02   9.3861710e-02   7.7914479e-02   6.0545459e-02   6.0328179e-02   7.3725736e-02   8.0652769e-02   7.5662466e-02   4.7500835e-02   6.6267157e-02   6.1219907e-02   5.9246920e-02   1.0235525e-01   8.5584812e-02   6.7627185e-02   6.6676399e-02   8.1028522e-02   6.3874534e-02   7.4037098e-02   8.3735875e-02   7.3091049e-02   6.4875922e-02   5.7134332e-02   7.3898502e-02   6.3669341e-02   6.7483654e-02   6.3032151e-02   4.3195391e-02   6.6465430e-02   1.2757303e-01   1.1294171e-01   1.0654531e-01   1.1074736e-01   1.1756538e-01   1.1705185e-01   1.1633100e-01   1.1230305e-01   1.1988948e-01   1.0404710e-01   8.7981667e-02   1.0622547e-01   1.0061669e-01   1.2008497e-01   1.2242153e-01   1.0217012e-01   1.0084187e-01   1.0181343e-01   1.3714147e-01   1.1243585e-01   1.0356517e-01   1.1071692e-01   1.2181923e-01   9.3742899e-02   1.0137566e-01   9.8085445e-02   8.9840814e-02   8.9979544e-02   1.1682155e-01   9.4340727e-02   1.0893096e-01   8.8036209e-02   1.1887723e-01   9.1995894e-02   1.1718099e-01   1.0529554e-01   1.1115622e-01   1.0048991e-01   8.8823715e-02   9.3647258e-02   1.0909883e-01   8.9729548e-02   1.1294171e-01   1.1121254e-01   1.0947844e-01   9.8164553e-02   1.0458423e-01   9.5468337e-02   1.0529433e-01   1.0077315e-01   9.3075268e-04   1.0661545e-03   2.6597529e-04   1.6036733e-03   2.0280056e-03   1.2436972e-03   1.5688801e-04   3.1850165e-04   8.9411637e-04   1.3235015e-03   8.8731482e-04   3.4816593e-03   2.6719247e-03   3.9091992e-03   2.6159373e-03   1.1443019e-03   7.9601608e-05   2.3028989e-04   1.2135551e-03   1.4956336e-03   1.2137749e-03   2.2449952e-03   1.5932074e-03   1.1067957e-03   8.0827056e-04   9.5525701e-04   1.1067957e-03   1.2520454e-04   1.8684214e-04   3.3283736e-04   8.4659292e-03   4.3428627e-04   2.6431662e-03   3.3043825e-03   1.2192723e-03   9.6672170e-04   2.2673224e-04   4.0165289e-05   1.5556464e-04   7.0885985e-02   7.3312749e-02   8.1555170e-02   9.2789394e-02   8.5768022e-02   9.1811327e-02   8.0210428e-02   6.6299983e-02   7.8898236e-02   8.3277528e-02   8.7497229e-02   7.5768419e-02   8.5268176e-02   9.0575828e-02   5.9182538e-02   6.7899308e-02   9.1577973e-02   7.4436351e-02   1.0683101e-01   7.7459472e-02   9.6638219e-02   6.8724317e-02   1.0805306e-01   9.0746123e-02   7.1944393e-02   7.1498544e-02   8.6811878e-02   9.2796163e-02   8.6929388e-02   5.8156140e-02   7.8485798e-02   7.3355880e-02   7.0259533e-02   1.1577796e-01   9.5890635e-02   7.6480264e-02   7.8047377e-02   9.5335392e-02   7.3634293e-02   8.5899063e-02   9.6384778e-02   8.4415996e-02   7.6657094e-02   6.8177250e-02   8.5395861e-02   7.3990972e-02   7.8093423e-02   7.4294151e-02   5.1950910e-02   7.7279181e-02   1.3907744e-01   1.2568112e-01   1.2011323e-01   1.2420710e-01   1.3046004e-01   1.3207399e-01   1.2824918e-01   1.2752552e-01   1.3553911e-01   1.1524115e-01   9.8893820e-02   1.1975932e-01   1.1323008e-01   1.3322963e-01   1.3377253e-01   1.1295596e-01   1.1379378e-01   1.1416138e-01   1.5374990e-01   1.2806482e-01   1.1555054e-01   1.2219358e-01   1.3787989e-01   1.0651347e-01   1.1318026e-01   1.1161431e-01   1.0188137e-01   1.0132199e-01   1.3022140e-01   1.0855236e-01   1.2404638e-01   1.0022528e-01   1.3210134e-01   1.0532767e-01   1.3250558e-01   1.1917979e-01   1.2157791e-01   1.1297631e-01   9.9847302e-02   1.0571550e-01   1.2097128e-01   1.0080768e-01   1.2568112e-01   1.2354605e-01   1.2062969e-01   1.0973133e-01   1.1825900e-01   1.0742526e-01   1.1535080e-01   1.1244756e-01   1.9470856e-03   1.8498175e-03   4.7714250e-03   2.8358661e-03   3.0255426e-03   1.1308587e-03   6.7035566e-04   9.3284570e-04   1.3935241e-03   9.8369983e-04   5.6854836e-03   1.9144361e-03   1.0961099e-03   2.6770659e-03   6.7637792e-04   7.3922961e-04   1.6168588e-03   1.9795771e-04   8.8027763e-04   2.3819907e-03   2.3199642e-03   2.7913184e-03   1.4791390e-03   3.2257382e-03   3.5250868e-03   1.4791390e-03   4.9791374e-04   7.0216560e-04   1.6800207e-03   1.0022835e-02   5.5855445e-04   1.9786373e-03   9.4684044e-04   1.9956071e-03   4.5593799e-04   2.5049818e-04   7.2992180e-04   1.1563910e-03   6.0779252e-02   6.2273308e-02   7.0462169e-02   8.1326510e-02   7.4767830e-02   7.8734546e-02   6.8077240e-02   5.6059538e-02   6.8181020e-02   7.1050105e-02   7.6799016e-02   6.4482663e-02   7.5580358e-02   7.7962233e-02   4.9642606e-02   5.8153068e-02   7.8105114e-02   6.3436311e-02   9.5564553e-02   6.6739850e-02   8.2930379e-02   5.9008492e-02   9.5418848e-02   7.8187143e-02   6.1832470e-02   6.1508833e-02   7.5927040e-02   8.0793818e-02   7.4771898e-02   4.9618073e-02   6.7927507e-02   6.3289271e-02   6.0099335e-02   1.0140473e-01   8.1745663e-02   6.4187383e-02   6.7135505e-02   8.4768567e-02   6.1827019e-02   7.4354928e-02   8.3103529e-02   7.2159743e-02   6.6094709e-02   5.8361788e-02   7.3251937e-02   6.2103535e-02   6.6220430e-02   6.3584868e-02   4.3971154e-02   6.5863068e-02   1.2260074e-01   1.1066837e-01   1.0619606e-01   1.0899833e-01   1.1518894e-01   1.1739848e-01   1.1240635e-01   1.1296742e-01   1.2096568e-01   1.0086214e-01   8.5896751e-02   1.0590639e-01   9.9771313e-02   1.1830710e-01   1.1878365e-01   9.8989344e-02   9.9484141e-02   9.9300506e-02   1.3860781e-01   1.1421784e-01   1.0167360e-01   1.0723785e-01   1.2323222e-01   9.3791376e-02   9.8729091e-02   9.7617417e-02   8.9196630e-02   8.7906597e-02   1.1533851e-01   9.5241683e-02   1.1037299e-01   8.6684313e-02   1.1722334e-01   9.1866320e-02   1.1676032e-01   1.0620321e-01   1.0631345e-01   9.8352717e-02   8.6492752e-02   9.2837846e-02   1.0689111e-01   8.8947496e-02   1.1066837e-01   1.0877202e-01   1.0619549e-01   9.7005210e-02   1.0523294e-01   9.4129616e-02   1.0043708e-01   9.7689504e-02   1.8508011e-03   3.7513322e-03   6.0039368e-03   4.2304138e-03   1.5191600e-03   7.2789043e-04   3.6236504e-03   2.5132214e-04   3.2740913e-03   8.1034702e-03   2.4941139e-03   4.0964229e-03   6.0206143e-04   1.9190323e-03   6.6472571e-04   5.1664338e-04   1.3616103e-03   7.0613265e-04   1.0312088e-03   5.8211090e-03   5.1401914e-03   7.1256747e-05   1.1045080e-03   1.1556192e-03   7.1256747e-05   9.3818356e-04   5.1597856e-04   2.2957469e-03   4.3308939e-03   2.5276111e-03   4.3800580e-03   5.1684770e-03   5.8668191e-04   3.2395561e-03   1.2942225e-03   1.4104695e-03   4.9075437e-04   6.2060492e-02   6.5820549e-02   7.2324527e-02   8.2688153e-02   7.6151035e-02   8.3216525e-02   7.3194172e-02   5.8477367e-02   6.9270609e-02   7.6249005e-02   7.6681852e-02   6.8643243e-02   7.3331578e-02   8.1706941e-02   5.2792718e-02   5.9477668e-02   8.4501232e-02   6.5244243e-02   9.4903309e-02   6.8042766e-02   9.0018791e-02   6.0245936e-02   9.6930604e-02   8.1064051e-02   6.3026851e-02   6.2769846e-02   7.6368221e-02   8.3589775e-02   7.8680956e-02   4.9590571e-02   6.8853459e-02   6.3691512e-02   6.1750809e-02   1.0592213e-01   8.9183264e-02   7.0750492e-02   6.9363027e-02   8.3535040e-02   6.6868103e-02   7.6873580e-02   8.7073732e-02   7.6162997e-02   6.7469442e-02   5.9546534e-02   7.6928171e-02   6.6694450e-02   7.0471384e-02   6.5682900e-02   4.5133798e-02   6.9312951e-02   1.3167296e-01   1.1664541e-01   1.0993682e-01   1.1453374e-01   1.2132631e-01   1.2063781e-01   1.2028602e-01   1.1588513e-01   1.2343288e-01   1.0759677e-01   9.1184353e-02   1.0959799e-01   1.0389171e-01   1.2371908e-01   1.2606621e-01   1.0559841e-01   1.0439417e-01   1.0553794e-01   1.4077951e-01   1.1579171e-01   1.0695513e-01   1.1441431e-01   1.2538100e-01   9.6825962e-02   1.0496558e-01   1.0155971e-01   9.2933277e-02   9.3305204e-02   1.2046243e-01   9.7626604e-02   1.1224568e-01   9.1421248e-02   1.2250398e-01   9.5336276e-02   1.2112681e-01   1.0839632e-01   1.1495562e-01   1.0414545e-01   9.2136906e-02   9.6777242e-02   1.1252211e-01   9.2559606e-02   1.1664541e-01   1.1484781e-01   1.1301462e-01   1.0121871e-01   1.0770332e-01   9.8720694e-02   1.0901533e-01   1.0446815e-01   7.8277898e-04   1.7490530e-03   1.0345024e-03   5.6185312e-04   1.1591486e-03   1.1405764e-03   2.5549089e-03   1.4484284e-03   2.2580494e-03   4.5713265e-03   5.6870335e-03   4.1902203e-03   2.4320876e-03   6.0369458e-04   6.2286369e-04   2.4521502e-03   2.9038905e-03   2.2436415e-03   1.7675525e-03   9.5896000e-04   2.0377231e-03   8.9090360e-04   9.7827632e-04   2.0377231e-03   7.4516940e-04   8.4824201e-04   4.3724648e-04   1.0582513e-02   7.1366344e-04   4.1221085e-03   4.7945036e-03   2.3833891e-03   1.3170043e-03   8.5049004e-04   2.9093352e-04   6.7142903e-04   7.9558936e-02   8.2158081e-02   9.0820201e-02   1.0264403e-01   9.5277746e-02   1.0150997e-01   8.9387659e-02   7.4718776e-02   8.7974881e-02   9.2637642e-02   9.6993873e-02   8.4761019e-02   9.4485044e-02   1.0025701e-01   6.7224195e-02   7.6433848e-02   1.0126400e-01   8.3185627e-02   1.1729605e-01   8.6461029e-02   1.0658954e-01   7.7314215e-02   1.1857784e-01   1.0034666e-01   8.0683042e-02   8.0237049e-02   9.6300382e-02   1.0267579e-01   9.6489769e-02   6.6032956e-02   8.7552344e-02   8.2099586e-02   7.8915667e-02   1.2662250e-01   1.0571935e-01   8.5387558e-02   8.7148938e-02   1.0518820e-01   8.2417845e-02   9.5416475e-02   1.0627769e-01   9.3794810e-02   8.5650752e-02   7.6704239e-02   9.4843643e-02   8.2753514e-02   8.7142105e-02   8.3165683e-02   5.9528971e-02   8.6320416e-02   1.5080041e-01   1.3699340e-01   1.3123407e-01   1.3538631e-01   1.4196949e-01   1.4361623e-01   1.3957348e-01   1.3881335e-01   1.4720194e-01   1.2611020e-01   1.0906565e-01   1.3086970e-01   1.2408176e-01   1.4489915e-01   1.4541581e-01   1.2374159e-01   1.2456922e-01   1.2489938e-01   1.6613145e-01   1.3940980e-01   1.2649129e-01   1.3333884e-01   1.4960338e-01   1.1706680e-01   1.2394226e-01   1.2226369e-01   1.1222236e-01   1.1158045e-01   1.4175176e-01   1.1903032e-01   1.3525827e-01   1.1036833e-01   1.4372294e-01   1.1569638e-01   1.4384978e-01   1.3029466e-01   1.3261278e-01   1.2368173e-01   1.1003418e-01   1.1624174e-01   1.3214763e-01   1.1113124e-01   1.3699340e-01   1.3478604e-01   1.3174332e-01   1.2045775e-01   1.2933910e-01   1.1801062e-01   1.2611372e-01   1.2312584e-01   2.4038804e-03   9.9356679e-04   1.6379146e-03   2.9968879e-03   2.7777132e-03   5.0292552e-03   2.9485139e-03   1.7810659e-03   7.1187929e-03   1.0385224e-02   6.8192179e-03   4.7007047e-03   2.2536066e-03   1.6978265e-03   5.5995030e-03   5.8830752e-03   3.3086218e-03   3.5101479e-03   1.6089784e-03   4.3755431e-03   1.0574938e-03   1.0592785e-03   4.3755431e-03   2.5472554e-03   2.6641717e-03   1.0704333e-03   1.2070572e-02   2.4953874e-03   6.0785955e-03   8.5181088e-03   3.9627930e-03   3.6619699e-03   2.9233174e-03   1.7757672e-03   2.0595965e-03   9.0904959e-02   9.3846404e-02   1.0296173e-01   1.1510418e-01   1.0729700e-01   1.1521099e-01   1.0181822e-01   8.5988795e-02   1.0000742e-01   1.0504161e-01   1.0917671e-01   9.6459196e-02   1.0622808e-01   1.1358449e-01   7.7434068e-02   8.7329703e-02   1.1478646e-01   9.5582611e-02   1.2982843e-01   9.8465917e-02   1.1998512e-01   8.8160180e-02   1.3221652e-01   1.1399918e-01   9.2024225e-02   9.1371105e-02   1.0854159e-01   1.1533865e-01   1.0916600e-01   7.6158175e-02   9.9423258e-02   9.3687045e-02   9.0204262e-02   1.4138542e-01   1.1970815e-01   9.7663366e-02   9.8994798e-02   1.1736337e-01   9.4681982e-02   1.0777767e-01   1.2034441e-01   1.0671033e-01   9.7400706e-02   8.7763928e-02   1.0767516e-01   9.5332473e-02   9.9630913e-02   9.4930517e-02   6.8563663e-02   9.8462875e-02   1.6617515e-01   1.5176730e-01   1.4543395e-01   1.5072522e-01   1.5691405e-01   1.5882414e-01   1.5479314e-01   1.5416358e-01   1.6247914e-01   1.3996762e-01   1.2197918e-01   1.4500225e-01   1.3765005e-01   1.5950314e-01   1.5937612e-01   1.3710080e-01   1.3913130e-01   1.3976637e-01   1.8183098e-01   1.5420823e-01   1.4014252e-01   1.4766552e-01   1.6507182e-01   1.3020370e-01   1.3818766e-01   1.3684571e-01   1.2517707e-01   1.2500429e-01   1.5651711e-01   1.3335007e-01   1.4978001e-01   1.2432862e-01   1.5834992e-01   1.2988375e-01   1.6032290e-01   1.4373178e-01   1.4688056e-01   1.3840222e-01   1.2332107e-01   1.2926358e-01   1.4578293e-01   1.2292671e-01   1.5176730e-01   1.4922075e-01   1.4546636e-01   1.3299550e-01   1.4276363e-01   1.3134387e-01   1.4008961e-01   1.3766630e-01   5.7728358e-04   1.6620505e-03   3.0662753e-03   5.1693417e-04   6.0968463e-03   8.4744633e-04   8.7364721e-04   5.8106642e-03   6.7399476e-03   8.6083103e-03   3.1702748e-03   2.7104978e-03   3.3164143e-03   4.2509190e-03   5.8084215e-03   4.6709776e-03   9.8526568e-04   3.6786909e-04   5.9630791e-03   3.9566796e-03   4.2657824e-03   5.9630791e-03   2.3876668e-03   3.1383576e-03   1.0693622e-03   1.7187016e-02   9.8571152e-04   3.1367899e-03   3.7448988e-03   5.3108404e-03   1.2637202e-03   2.1359423e-03   1.6418787e-03   3.1352541e-03   8.3834616e-02   8.4243676e-02   9.4832859e-02   1.0703873e-01   9.9466934e-02   1.0403376e-01   9.0316788e-02   7.7914871e-02   9.2850027e-02   9.3299365e-02   1.0296391e-01   8.6082003e-02   1.0248600e-01   1.0318273e-01   6.8824902e-02   8.0299100e-02   1.0157055e-01   8.7955506e-02   1.2341155e-01   9.1142100e-02   1.0580332e-01   8.1173197e-02   1.2350931e-01   1.0460831e-01   8.4991511e-02   8.4254332e-02   1.0174542e-01   1.0573929e-01   9.8648070e-02   7.1061471e-02   9.2438015e-02   8.7508516e-02   8.2750270e-02   1.2928539e-01   1.0529438e-01   8.4835976e-02   9.0594121e-02   1.1204138e-01   8.3577196e-02   9.8754999e-02   1.0957368e-01   9.6258171e-02   8.9990776e-02   8.0898906e-02   9.7508458e-02   8.4744426e-02   8.9161769e-02   8.6853262e-02   6.2377571e-02   8.8830405e-02   1.4853162e-01   1.3772455e-01   1.3389458e-01   1.3729951e-01   1.4254710e-01   1.4753124e-01   1.3874272e-01   1.4343376e-01   1.5191156e-01   1.2542851e-01   1.0950060e-01   1.3351960e-01   1.2589032e-01   1.4575308e-01   1.4361388e-01   1.2273259e-01   1.2665814e-01   1.2593062e-01   1.7100462e-01   1.4482556e-01   1.2707776e-01   1.3245591e-01   1.5480441e-01   1.1982038e-01   1.2429326e-01   1.2550864e-01   1.1421155e-01   1.1236837e-01   1.4320023e-01   1.2379599e-01   1.4017288e-01   1.1252737e-01   1.4478118e-01   1.1925773e-01   1.4807486e-01   1.3379535e-01   1.3019772e-01   1.2505772e-01   1.1047444e-01   1.1793234e-01   1.3214851e-01   1.1203286e-01   1.3772455e-01   1.3511133e-01   1.3062456e-01   1.2117415e-01   1.3256036e-01   1.1928977e-01   1.2368263e-01   1.2328316e-01   7.8697050e-04   2.0732289e-03   9.4315564e-04   4.6001401e-03   8.4240364e-04   1.3015708e-03   4.6297460e-03   7.5292997e-03   6.5572401e-03   2.5566943e-03   1.7941741e-03   1.8226799e-03   3.9920133e-03   4.8070278e-03   2.6490734e-03   2.2737423e-03   8.3198965e-04   4.4970379e-03   1.8707122e-03   2.0801746e-03   4.4970379e-03   1.6864362e-03   2.1518496e-03   3.0908897e-04   1.2802000e-02   1.1444166e-03   2.7605116e-03   4.8428825e-03   3.3840997e-03   1.9469936e-03   1.7958172e-03   1.1565833e-03   1.8697862e-03   8.2127567e-02   8.3518119e-02   9.3314949e-02   1.0506408e-01   9.7541782e-02   1.0397584e-01   9.0341739e-02   7.6817337e-02   9.1083710e-02   9.3231274e-02   1.0048418e-01   8.5524503e-02   9.9112893e-02   1.0267345e-01   6.7846626e-02   7.8515797e-02   1.0225403e-01   8.6849217e-02   1.2022955e-01   8.9502113e-02   1.0655817e-01   7.9297559e-02   1.2165144e-01   1.0391861e-01   8.3203942e-02   8.2410584e-02   9.9528824e-02   1.0443382e-01   9.8019721e-02   6.8818975e-02   9.0543610e-02   8.5485551e-02   8.1186134e-02   1.2894588e-01   1.0651601e-01   8.5580315e-02   8.9213768e-02   1.0886353e-01   8.3754339e-02   9.7441118e-02   1.0932583e-01   9.5882843e-02   8.8282134e-02   7.9128322e-02   9.6917266e-02   8.4873719e-02   8.8927957e-02   8.5532654e-02   6.0384203e-02   8.8123284e-02   1.4980593e-01   1.3770759e-01   1.3282368e-01   1.3741111e-01   1.4254251e-01   1.4639384e-01   1.3970199e-01   1.4238069e-01   1.5040197e-01   1.2563911e-01   1.0916002e-01   1.3240724e-01   1.2489275e-01   1.4520334e-01   1.4360899e-01   1.2274102e-01   1.2644577e-01   1.2642535e-01   1.6908994e-01   1.4294672e-01   1.2654652e-01   1.3286867e-01   1.5320101e-01   1.1837980e-01   1.2451917e-01   1.2497748e-01   1.1312705e-01   1.1222677e-01   1.4268257e-01   1.2261254e-01   1.3839073e-01   1.1239899e-01   1.4421566e-01   1.1854547e-01   1.4805458e-01   1.3176737e-01   1.3127552e-01   1.2532607e-01   1.1042618e-01   1.1684289e-01   1.3160924e-01   1.1043724e-01   1.3770759e-01   1.3504379e-01   1.3066174e-01   1.1987723e-01   1.3066578e-01   1.1856367e-01   1.2478710e-01   1.2391087e-01   3.0983409e-04   7.5828890e-04   1.5917755e-03   4.8958816e-04   3.3744944e-03   2.1101097e-03   4.2400870e-03   2.8698866e-03   7.9583168e-04   2.4610899e-04   3.6436132e-04   1.4311801e-03   1.7294514e-03   8.6738167e-04   2.6111809e-03   1.6704106e-03   1.5921641e-03   8.6161593e-04   1.0547029e-03   1.5921641e-03   2.0427868e-04   3.5845705e-04   1.2194863e-04   8.2981219e-03   4.9180195e-04   1.7380522e-03   3.0734607e-03   1.0728608e-03   1.1397310e-03   3.4603128e-04   1.9200118e-04   2.8845040e-04   6.9779438e-02   7.1836392e-02   8.0319868e-02   9.1354890e-02   8.4353236e-02   9.0635736e-02   7.8599311e-02   6.5140986e-02   7.7899100e-02   8.1486701e-02   8.6472565e-02   7.4062042e-02   8.4619349e-02   8.9336326e-02   5.7575298e-02   6.6635212e-02   8.9946961e-02   7.3779929e-02   1.0532587e-01   7.6464940e-02   9.4587006e-02   6.7402630e-02   1.0673169e-01   8.9925800e-02   7.0797999e-02   7.0219100e-02   8.5721997e-02   9.1187854e-02   8.5377890e-02   5.7235173e-02   7.7431768e-02   7.2498793e-02   6.9063158e-02   1.1428266e-01   9.4218471e-02   7.4725647e-02   7.6703845e-02   9.4228329e-02   7.2261001e-02   8.4462132e-02   9.5360997e-02   8.3133678e-02   7.5506963e-02   6.7041127e-02   8.4070372e-02   7.2903149e-02   7.6784460e-02   7.3115143e-02   5.0413571e-02   7.5926129e-02   1.3636539e-01   1.2353657e-01   1.1821263e-01   1.2258303e-01   1.2822457e-01   1.3051322e-01   1.2599517e-01   1.2631512e-01   1.3406707e-01   1.1278003e-01   9.6722933e-02   1.1783731e-01   1.1110860e-01   1.3080317e-01   1.3063996e-01   1.1029647e-01   1.1216341e-01   1.1248813e-01   1.5199693e-01   1.2674108e-01   1.1318561e-01   1.1969790e-01   1.3653036e-01   1.0458853e-01   1.1112582e-01   1.1028100e-01   9.9890110e-02   9.9356661e-02   1.2805612e-01   1.0750038e-01   1.2261289e-01   9.8791075e-02   1.2975191e-01   1.0408196e-01   1.3162969e-01   1.1713290e-01   1.1886116e-01   1.1132823e-01   9.7814075e-02   1.0357451e-01   1.1833994e-01   9.8179977e-02   1.2353657e-01   1.2124412e-01   1.1787602e-01   1.0709455e-01   1.1618054e-01   1.0529428e-01   1.1269702e-01   1.1053049e-01   1.3650135e-03   5.3926014e-04   9.6875216e-04   5.5085642e-03   1.1951469e-03   2.8096772e-03   1.4033998e-03   3.8702395e-04   1.0970323e-04   3.3218009e-04   5.6326785e-04   5.8024795e-04   5.6723773e-04   3.5935032e-03   3.0059920e-03   6.4984761e-04   1.1677062e-03   1.3673782e-03   6.4984761e-04   7.6378345e-05   6.3488092e-05   8.1586688e-04   6.3954323e-03   8.4458294e-04   1.6959745e-03   2.5316364e-03   4.4648839e-04   1.3649198e-03   2.1646092e-04   4.0910219e-04   1.5323026e-04   6.2114649e-02   6.4461203e-02   7.2168083e-02   8.2712554e-02   7.6067484e-02   8.2127836e-02   7.1085856e-02   5.7869953e-02   6.9689694e-02   7.3941980e-02   7.7755077e-02   6.6772898e-02   7.5730146e-02   8.0858032e-02   5.1159438e-02   5.9263906e-02   8.1982206e-02   6.5662191e-02   9.5957099e-02   6.8346124e-02   8.6755825e-02   6.0017643e-02   9.7272303e-02   8.1103209e-02   6.3092478e-02   6.2637310e-02   7.7107544e-02   8.2765719e-02   7.7320565e-02   5.0179546e-02   6.9272103e-02   6.4477670e-02   6.1520691e-02   1.0482403e-01   8.6201836e-02   6.7723532e-02   6.8849001e-02   8.5128110e-02   6.4954612e-02   7.6260237e-02   8.6474261e-02   7.5037042e-02   6.7540885e-02   5.9554295e-02   7.5917230e-02   6.5334858e-02   6.9084098e-02   6.5352935e-02   4.4308417e-02   6.8222624e-02   1.2733314e-01   1.1424612e-01   1.0876932e-01   1.1294180e-01   1.1881213e-01   1.2027633e-01   1.1690957e-01   1.1601884e-01   1.2357066e-01   1.0430113e-01   8.8647048e-02   1.0842142e-01   1.0217893e-01   1.2134270e-01   1.2195833e-01   1.0207761e-01   1.0292571e-01   1.0341320e-01   1.4095409e-01   1.1639921e-01   1.0445121e-01   1.1097871e-01   1.2583780e-01   9.5736690e-02   1.0236688e-01   1.0085185e-01   9.1372067e-02   9.1009848e-02   1.1849408e-01   9.7905297e-02   1.1253291e-01   9.0050809e-02   1.2026601e-01   9.4848068e-02   1.2106318e-01   1.0772883e-01   1.1055160e-01   1.0223795e-01   8.9621067e-02   9.5003079e-02   1.0961118e-01   9.0242843e-02   1.1424612e-01   1.1217935e-01   1.0939970e-01   9.8767943e-02   1.0686010e-01   9.6691216e-02   1.0462140e-01   1.0177309e-01   3.4212913e-03   2.0350185e-04   2.2645835e-03   3.4346676e-03   3.6178579e-03   5.4115677e-03   1.4013939e-03   1.2010586e-03   1.9342083e-03   1.8303919e-03   3.0241355e-03   3.0182648e-03   8.7783530e-04   7.3200159e-04   3.3935862e-03   3.0016113e-03   3.3110105e-03   3.3935862e-03   9.0468402e-04   1.4291912e-03   6.5529771e-04   1.3482371e-02   1.1883350e-04   1.9129351e-03   1.8189821e-03   3.2224845e-03   2.2840556e-04   6.5009173e-04   5.8224397e-04   1.6275063e-03   7.3184911e-02   7.4026716e-02   8.3609924e-02   9.5240271e-02   8.8102740e-02   9.2390529e-02   7.9966102e-02   6.7767341e-02   8.1513618e-02   8.2938634e-02   9.0997666e-02   7.6011769e-02   9.0234918e-02   9.1578032e-02   5.9804069e-02   7.0034281e-02   9.0682677e-02   7.6686753e-02   1.1071727e-01   7.9918092e-02   9.5151478e-02   7.0899518e-02   1.1069472e-01   9.2509702e-02   7.4297007e-02   7.3732579e-02   8.9920224e-02   9.4250688e-02   8.7608739e-02   6.1121885e-02   8.1169501e-02   7.6375843e-02   7.2267735e-02   1.1652820e-01   9.4360734e-02   7.5150369e-02   7.9755441e-02   9.9609504e-02   7.3443983e-02   8.7507436e-02   9.7438140e-02   8.5130511e-02   7.8978434e-02   7.0471516e-02   8.6314807e-02   7.4241923e-02   7.8526940e-02   7.6102189e-02   5.3711374e-02   7.8190521e-02   1.3653802e-01   1.2528995e-01   1.2121146e-01   1.2435019e-01   1.2997976e-01   1.3382130e-01   1.2659694e-01   1.2959353e-01   1.3786369e-01   1.1404399e-01   9.8548323e-02   1.2087284e-01   1.1387348e-01   1.3314978e-01   1.3209550e-01   1.1169631e-01   1.1419600e-01   1.1368838e-01   1.5633317e-01   1.3093427e-01   1.1535002e-01   1.2078800e-01   1.4049523e-01   1.0785716e-01   1.1249675e-01   1.1275652e-01   1.0267406e-01   1.0105332e-01   1.3042827e-01   1.1078233e-01   1.2662105e-01   1.0064162e-01   1.3213313e-01   1.0672611e-01   1.3386868e-01   1.2116827e-01   1.1908244e-01   1.1278797e-01   9.9360911e-02   1.0635497e-01   1.2047378e-01   1.0130606e-01   1.2528995e-01   1.2297832e-01   1.1929072e-01   1.0997770e-01   1.2004306e-01   1.0767864e-01   1.1284273e-01   1.1147304e-01   2.8709378e-03   9.0791333e-03   1.3407674e-03   2.7138923e-03   2.4677711e-04   1.1444972e-03   7.6121265e-04   8.8810957e-04   7.0009018e-04   1.4757411e-04   9.0393445e-04   6.0883361e-03   5.6961877e-03   1.2039709e-04   1.8816976e-03   2.0265121e-03   1.2039709e-04   8.6239061e-04   5.2686780e-04   2.5538294e-03   4.1367540e-03   2.4461852e-03   3.1968429e-03   3.7345867e-03   3.8813913e-04   2.9749715e-03   1.1153685e-03   1.5845430e-03   6.9221070e-04   5.5493017e-02   5.8687008e-02   6.5182775e-02   7.5153394e-02   6.8882837e-02   7.5289957e-02   6.5516107e-02   5.1904720e-02   6.2434251e-02   6.8395322e-02   6.9725101e-02   6.1264247e-02   6.7000659e-02   7.3915045e-02   4.6337454e-02   5.2993228e-02   7.6203506e-02   5.8574285e-02   8.7235787e-02   6.1228310e-02   8.1349394e-02   5.3727422e-02   8.8860415e-02   7.3529860e-02   5.6419133e-02   5.6136434e-02   6.9314277e-02   7.5768587e-02   7.0921131e-02   4.3887190e-02   6.2047408e-02   5.7247874e-02   5.5122668e-02   9.7063471e-02   8.0589157e-02   6.3015465e-02   6.2273448e-02   7.6485434e-02   5.9534434e-02   6.9400424e-02   7.9100961e-02   6.8556756e-02   6.0632063e-02   5.3105900e-02   6.9320431e-02   5.9485747e-02   6.3075829e-02   5.8812039e-02   3.9389619e-02   6.2057390e-02   1.2120786e-01   1.0709958e-01   1.0095433e-01   1.0522389e-01   1.1158919e-01   1.1144992e-01   1.1038844e-01   1.0698853e-01   1.1429410e-01   9.8230586e-02   8.2643326e-02   1.0062987e-01   9.5029365e-02   1.1396256e-01   1.1592889e-01   9.6297509e-02   9.5506831e-02   9.6444702e-02   1.3110088e-01   1.0707131e-01   9.7795782e-02   1.0475251e-01   1.1626300e-01   8.8405248e-02   9.5813058e-02   9.2970070e-02   8.4549021e-02   8.4699662e-02   1.1089247e-01   8.9470273e-02   1.0356569e-01   8.3077190e-02   1.1281589e-01   8.7056416e-02   1.1197016e-01   9.9670877e-02   1.0510107e-01   9.5157512e-02   8.3537128e-02   8.8197538e-02   1.0308982e-01   8.4141549e-02   1.0709958e-01   1.0532428e-01   1.0341164e-01   9.2382015e-02   9.8953568e-02   8.9983057e-02   9.9390435e-02   9.5301345e-02   3.0771633e-03   2.2394953e-03   3.5785600e-03   4.5455517e-03   7.4728021e-04   1.0553655e-03   1.6471751e-03   1.6255623e-03   2.5337657e-03   2.0402330e-03   1.9207121e-03   1.4230439e-03   3.0970780e-03   2.6131629e-03   2.9423829e-03   3.0970780e-03   7.3620931e-04   1.2096052e-03   5.1210113e-04   1.1445573e-02   3.3743120e-04   9.3762253e-04   1.6709589e-03   2.3328362e-03   6.4367426e-04   6.1292579e-04   6.6980522e-04   1.3596425e-03   6.8771058e-02   6.9631829e-02   7.8939123e-02   9.0076585e-02   8.3106206e-02   8.8021023e-02   7.5587788e-02   6.3613086e-02   7.6977857e-02   7.8355410e-02   8.6058485e-02   7.1477483e-02   8.5350309e-02   8.7042060e-02   5.5585563e-02   6.5564323e-02   8.6285657e-02   7.2679829e-02   1.0489589e-01   7.5454083e-02   9.0429367e-02   6.6346194e-02   1.0535674e-01   8.8189227e-02   6.9809235e-02   6.9152322e-02   8.5027647e-02   8.9182145e-02   8.2908995e-02   5.6974544e-02   7.6567724e-02   7.1978204e-02   6.7853368e-02   1.1142320e-01   9.0045691e-02   7.1019587e-02   7.5131918e-02   9.4265212e-02   6.9407495e-02   8.2681448e-02   9.3017444e-02   8.0734547e-02   7.4408554e-02   6.6081064e-02   8.1800498e-02   7.0361312e-02   7.4293705e-02   7.1683996e-02   4.9412056e-02   7.3791248e-02   1.3087381e-01   1.1972349e-01   1.1554097e-01   1.1917243e-01   1.2428472e-01   1.2815053e-01   1.2126029e-01   1.2425112e-01   1.3208038e-01   1.0854852e-01   9.3334993e-02   1.1518188e-01   1.0821021e-01   1.2711496e-01   1.2583624e-01   1.0606084e-01   1.0908141e-01   1.0877992e-01   1.4997249e-01   1.2525272e-01   1.0965413e-01   1.1522136e-01   1.3472784e-01   1.0229252e-01   1.0728052e-01   1.0776484e-01   9.7267064e-02   9.5981774e-02   1.2460839e-01   1.0582154e-01   1.2096134e-01   9.5922459e-02   1.2615683e-01   1.0184377e-01   1.2900688e-01   1.1512576e-01   1.1363761e-01   1.0783825e-01   9.4308496e-02   1.0078579e-01   1.1452781e-01   9.5389301e-02   1.1972349e-01   1.1733679e-01   1.1347632e-01   1.0398208e-01   1.1403752e-01   1.0220157e-01   1.0755302e-01   1.0649118e-01   1.0147620e-02   1.1247381e-02   1.2064168e-02   6.5374061e-03   4.5897505e-03   4.8228518e-03   7.5563629e-03   9.2308338e-03   7.2321889e-03   1.6011806e-03   5.3396772e-04   8.3950153e-03   4.7739373e-03   4.9472694e-03   8.3950153e-03   4.4998786e-03   5.2439437e-03   2.2809748e-03   2.1071898e-02   2.7056396e-03   6.9610435e-03   7.8780829e-03   8.1126375e-03   3.2257880e-03   4.3393216e-03   3.1425928e-03   4.9174647e-03   1.0007131e-01   1.0106593e-01   1.1217080e-01   1.2532148e-01   1.1715442e-01   1.2248393e-01   1.0792073e-01   9.3860583e-02   1.0976919e-01   1.1122163e-01   1.2044721e-01   1.0322968e-01   1.1921347e-01   1.2149913e-01   8.4186074e-02   9.6291408e-02   1.2023049e-01   1.0442285e-01   1.4247402e-01   1.0795998e-01   1.2499239e-01   9.7246530e-02   1.4298267e-01   1.2269113e-01   1.0132833e-01   1.0059203e-01   1.1928692e-01   1.2425009e-01   1.1675840e-01   8.5744895e-02   1.0931412e-01   1.0376332e-01   9.9002718e-02   1.4971116e-01   1.2434000e-01   1.0215677e-01   1.0769100e-01   1.2998610e-01   1.0050022e-01   1.1660891e-01   1.2830083e-01   1.1408155e-01   1.0679553e-01   9.6866698e-02   1.1540492e-01   1.0158690e-01   1.0644304e-01   1.0354221e-01   7.6665035e-02   1.0598765e-01   1.7102185e-01   1.5912057e-01   1.5467066e-01   1.5843570e-01   1.6430112e-01   1.6898961e-01   1.6040927e-01   1.6442765e-01   1.7347908e-01   1.4613872e-01   1.2882261e-01   1.5426961e-01   1.4623773e-01   1.6767510e-01   1.6569930e-01   1.4326884e-01   1.4700928e-01   1.4639344e-01   1.9375995e-01   1.6572948e-01   1.4772209e-01   1.5370280e-01   1.7644069e-01   1.3951457e-01   1.4477511e-01   1.4552738e-01   1.3362833e-01   1.3186831e-01   1.6485780e-01   1.4332228e-01   1.6088049e-01   1.3176893e-01   1.6660706e-01   1.3873049e-01   1.6938110e-01   1.5434218e-01   1.5143663e-01   1.4540553e-01   1.2987912e-01   1.3768917e-01   1.5323248e-01   1.3136310e-01   1.5912057e-01   1.5638588e-01   1.5175254e-01   1.4128505e-01   1.5308268e-01   1.3923812e-01   1.4444485e-01   1.4370095e-01   2.6770864e-03   1.6033718e-03   4.5840441e-04   2.0276877e-03   2.4561050e-03   1.2787767e-03   1.0310968e-03   1.1138996e-03   7.3996134e-03   6.8122401e-03   2.1890332e-03   3.7576667e-03   4.1081994e-03   2.1890332e-03   1.7366871e-03   1.7496571e-03   3.0804223e-03   5.2624414e-03   3.0566387e-03   8.7139687e-04   2.3320271e-03   9.5854356e-04   3.5582665e-03   1.9033529e-03   2.7781156e-03   2.0582238e-03   4.8466175e-02   5.0119607e-02   5.7333122e-02   6.6635612e-02   6.0625611e-02   6.6546613e-02   5.6002103e-02   4.4610205e-02   5.5428894e-02   5.8355341e-02   6.2731565e-02   5.1936578e-02   6.1589720e-02   6.5254921e-02   3.8121898e-02   4.5707178e-02   6.5954438e-02   5.2310619e-02   7.8737277e-02   5.4221486e-02   6.9833666e-02   4.6306978e-02   8.0104533e-02   6.6061691e-02   4.9289042e-02   4.8703396e-02   6.2029842e-02   6.6459215e-02   6.1626890e-02   3.8139914e-02   5.4977441e-02   5.0955269e-02   4.7810568e-02   8.6879401e-02   6.9864176e-02   5.2960539e-02   5.4195866e-02   6.9397405e-02   5.0805294e-02   6.0771100e-02   7.0699543e-02   5.9928918e-02   5.3277849e-02   4.6137567e-02   6.0647902e-02   5.1507184e-02   5.4535520e-02   5.1271651e-02   3.2182295e-02   5.3653922e-02   1.0655841e-01   9.4809862e-02   8.9943103e-02   9.4257768e-02   9.8930460e-02   1.0102907e-01   9.7318001e-02   9.7573045e-02   1.0420091e-01   8.5339961e-02   7.1237738e-02   8.9593643e-02   8.3615246e-02   1.0099568e-01   1.0093948e-01   8.3072610e-02   8.4968711e-02   8.5464008e-02   1.2001752e-01   9.7755169e-02   8.5475001e-02   9.1482164e-02   1.0648093e-01   7.7912530e-02   8.4002788e-02   8.3421922e-02   7.3855173e-02   7.3645673e-02   9.8656580e-02   8.1073912e-02   9.4018658e-02   7.3420060e-02   1.0008227e-01   7.8012093e-02   1.0282145e-01   8.8787179e-02   9.1014600e-02   8.4377747e-02   7.2315249e-02   7.7005472e-02   8.9946277e-02   7.2132565e-02   9.4809862e-02   9.2717682e-02   8.9711332e-02   7.9921256e-02   8.7947099e-02   7.8589830e-02   8.5634568e-02   8.3685774e-02   3.6322977e-03   2.1046334e-03   3.2662345e-03   4.7576391e-03   8.8436254e-04   1.6169936e-03   5.0909036e-03   5.3937261e-03   6.9592372e-03   3.1326528e-03   7.3963134e-03   7.8155750e-03   3.1326528e-03   2.8162695e-03   2.9888005e-03   5.3841195e-03   1.1640832e-02   3.1058623e-03   3.5268449e-03   8.9518404e-04   4.1579453e-03   2.4418843e-03   2.3174283e-03   3.5804624e-03   3.9289766e-03   4.8779386e-02   4.9807262e-02   5.7295117e-02   6.7429274e-02   6.1528729e-02   6.3805672e-02   5.4672352e-02   4.4305273e-02   5.5263563e-02   5.7527840e-02   6.3439356e-02   5.1886311e-02   6.2844358e-02   6.3392021e-02   3.9105833e-02   4.6659036e-02   6.3315061e-02   5.0431450e-02   8.1151197e-02   5.3901750e-02   6.8036856e-02   4.7518043e-02   7.9948453e-02   6.3397617e-02   4.9788208e-02   4.9648688e-02   6.2516012e-02   6.6678586e-02   6.0874148e-02   3.9322135e-02   5.5164136e-02   5.1029588e-02   4.8159012e-02   8.4641068e-02   6.6410487e-02   5.1100232e-02   5.4352465e-02   7.1152366e-02   4.8870866e-02   6.0790663e-02   6.7705030e-02   5.8185400e-02   5.3489810e-02   4.6729049e-02   5.9304859e-02   4.8888831e-02   5.2875044e-02   5.1050585e-02   3.4854587e-02   5.2844524e-02   1.0451993e-01   9.3506986e-02   8.9732041e-02   9.1442484e-02   9.7717353e-02   9.9707944e-02   9.4802947e-02   9.5351404e-02   1.0312713e-01   8.4850971e-02   7.1294232e-02   8.9511382e-02   8.4075688e-02   1.0102482e-01   1.0205801e-01   8.3487981e-02   8.2948462e-02   8.2499658e-02   1.1982484e-01   9.7067919e-02   8.5815629e-02   9.0584285e-02   1.0517734e-01   7.8728615e-02   8.2465362e-02   8.1175179e-02   7.4451161e-02   7.2780961e-02   9.8027119e-02   7.9186352e-02   9.3572539e-02   7.1178913e-02   9.9960717e-02   7.6001190e-02   9.8069469e-02   9.0444802e-02   8.9768862e-02   8.1715688e-02   7.1540326e-02   7.7885383e-02   9.0855680e-02   7.5245252e-02   9.3506986e-02   9.1968167e-02   9.0109483e-02   8.2320804e-02   8.9509517e-02   7.8843854e-02   8.4366358e-02   8.1217849e-02   2.0130888e-03   1.8101212e-03   1.7679630e-03   1.5452657e-03   5.1945438e-04   1.2854163e-03   8.7808919e-03   8.2238558e-03   5.0256002e-04   2.7264330e-03   2.8467434e-03   5.0256002e-04   1.9812707e-03   1.4461308e-03   4.1136119e-03   2.5737383e-03   4.2121435e-03   4.2634118e-03   5.2197085e-03   6.4299253e-04   4.8925560e-03   2.3856210e-03   3.0302048e-03   1.5954763e-03   5.0938593e-02   5.4618269e-02   6.0341206e-02   6.9748949e-02   6.3744319e-02   7.0794949e-02   6.1613026e-02   4.7841585e-02   5.7516679e-02   6.4388046e-02   6.4176559e-02   5.7259869e-02   6.1078980e-02   6.9247074e-02   4.2780731e-02   4.8566082e-02   7.2247187e-02   5.4065395e-02   8.0844343e-02   5.6422768e-02   7.7401119e-02   4.9239970e-02   8.2976560e-02   6.8666014e-02   5.1795393e-02   5.1539865e-02   6.3922410e-02   7.0731007e-02   6.6410587e-02   3.9578438e-02   5.7098964e-02   5.2392525e-02   5.0683886e-02   9.1729236e-02   7.6796353e-02   5.9700691e-02   5.7658559e-02   7.0380963e-02   5.5891948e-02   6.4553084e-02   7.4314140e-02   6.4188376e-02   5.5862752e-02   4.8638731e-02   6.4818200e-02   5.5721441e-02   5.9023587e-02   5.4325379e-02   3.5659427e-02   5.7806321e-02   1.1646754e-01   1.0183406e-01   9.5237923e-02   9.9907095e-02   1.0622089e-01   1.0525522e-01   1.0561297e-01   1.0087833e-01   1.0779948e-01   9.3522115e-02   7.8070592e-02   9.4910280e-02   8.9633058e-02   1.0829781e-01   1.1081740e-01   9.1634033e-02   9.0337246e-02   9.1650208e-02   1.2399805e-01   1.0057671e-01   9.2649910e-02   9.9949877e-02   1.0962952e-01   8.2940269e-02   9.1028259e-02   8.7623472e-02   7.9432792e-02   8.0075100e-02   1.0524078e-01   8.3828651e-02   9.7259211e-02   7.8329945e-02   1.0714828e-01   8.1794698e-02   1.0616577e-01   9.3567438e-02   1.0077628e-01   9.0271060e-02   7.9035426e-02   8.3003648e-02   9.7873414e-02   7.9050596e-02   1.0183406e-01   1.0015100e-01   9.8558964e-02   8.7141498e-02   9.2951123e-02   8.4912476e-02   9.5252034e-02   9.0710349e-02   8.3242342e-04   1.3658963e-03   5.7230018e-04   8.1918836e-04   9.7205318e-04   4.1873206e-03   3.8010080e-03   1.6389584e-03   2.5918915e-03   2.9169588e-03   1.6389584e-03   5.6093257e-04   7.3131385e-04   1.3995850e-03   7.2853070e-03   1.1548608e-03   5.6661765e-04   1.3645766e-03   9.0462881e-04   1.5033145e-03   5.7640672e-04   1.1086000e-03   1.0041867e-03   5.6613254e-02   5.8073267e-02   6.6069758e-02   7.6238332e-02   6.9820331e-02   7.5150667e-02   6.4021588e-02   5.2221754e-02   6.4043055e-02   6.6631628e-02   7.2118099e-02   6.0001482e-02   7.1017077e-02   7.4033042e-02   4.5299125e-02   5.3744092e-02   7.4268794e-02   6.0257552e-02   8.9540407e-02   6.2698279e-02   7.8447791e-02   5.4448716e-02   9.0401373e-02   7.4795629e-02   5.7546133e-02   5.6993841e-02   7.1300530e-02   7.5820775e-02   7.0342915e-02   4.5615196e-02   6.3638314e-02   5.9296597e-02   5.5885275e-02   9.6910282e-02   7.8117499e-02   6.0390425e-02   6.2700176e-02   7.9484269e-02   5.8304382e-02   6.9718471e-02   7.9589842e-02   6.8316048e-02   6.1785991e-02   5.4151233e-02   6.9205713e-02   5.8981505e-02   6.2496988e-02   5.9482310e-02   3.9268283e-02   6.1808750e-02   1.1700097e-01   1.0528100e-01   1.0062122e-01   1.0448643e-01   1.0962576e-01   1.1218593e-01   1.0740486e-01   1.0838294e-01   1.1564442e-01   9.5242357e-02   8.0565058e-02   1.0027898e-01   9.3976493e-02   1.1211413e-01   1.1185843e-01   9.2981806e-02   9.4878465e-02   9.5039709e-02   1.3246555e-01   1.0898490e-01   9.5760022e-02   1.0161372e-01   1.1802422e-01   8.8109745e-02   9.3746381e-02   9.3301915e-02   8.3669649e-02   8.2970713e-02   1.0958444e-01   9.1015393e-02   1.0506496e-01   8.2570171e-02   1.1114727e-01   8.7646381e-02   1.1324005e-01   9.9872205e-02   1.0076379e-01   9.4009317e-02   8.1526386e-02   8.7041367e-02   1.0052094e-01   8.2193042e-02   1.0528100e-01   1.0314067e-01   9.9984788e-02   9.0308392e-02   9.8939123e-02   8.8538901e-02   9.5061969e-02   9.3157351e-02   1.7227462e-04   7.9314599e-04   8.9844173e-04   8.9518090e-04   2.8880348e-03   2.3244520e-03   6.4717383e-04   8.8327223e-04   1.0385020e-03   6.4717383e-04   4.2082433e-05   2.2535838e-05   6.1632160e-04   7.2421116e-03   6.3599645e-04   2.4204146e-03   3.0244507e-03   7.7555952e-04   1.1490838e-03   1.6721205e-04   1.6116507e-04   5.3985478e-05   6.6593275e-02   6.9140456e-02   7.6991415e-02   8.7909276e-02   8.1079796e-02   8.7140283e-02   7.5972851e-02   6.2229264e-02   7.4340995e-02   7.8983123e-02   8.2638003e-02   7.1602020e-02   8.0355120e-02   8.5886853e-02   5.5471847e-02   6.3724100e-02   8.7131187e-02   7.0026001e-02   1.0150375e-01   7.2956045e-02   9.2179245e-02   6.4526163e-02   1.0277569e-01   8.5954479e-02   6.7618774e-02   6.7207904e-02   8.2005017e-02   8.8025831e-02   8.2391127e-02   5.4193907e-02   7.3937560e-02   6.8913785e-02   6.6018815e-02   1.1053450e-01   9.1432865e-02   7.2512462e-02   7.3622571e-02   9.0228640e-02   6.9559505e-02   8.1277678e-02   9.1538587e-02   7.9923144e-02   7.2198625e-02   6.3968343e-02   8.0855092e-02   6.9835377e-02   7.3807917e-02   6.9953549e-02   4.8370205e-02   7.2961708e-02   1.3388298e-01   1.2040855e-01   1.1476271e-01   1.1886295e-01   1.2510613e-01   1.2637598e-01   1.2310531e-01   1.2187125e-01   1.2970708e-01   1.1033742e-01   9.4233672e-02   1.1441687e-01   1.0810566e-01   1.2778743e-01   1.2861773e-01   1.0813836e-01   1.0864380e-01   1.0911947e-01   1.4755823e-01   1.2232653e-01   1.1049966e-01   1.1716689e-01   1.3196625e-01   1.0144926e-01   1.0821364e-01   1.0641029e-01   9.6993665e-02   9.6571345e-02   1.2478057e-01   1.0328932e-01   1.1842664e-01   9.5377765e-02   1.2666075e-01   1.0023484e-01   1.2682498e-01   1.1377681e-01   1.1674990e-01   1.0792126e-01   9.5167233e-02   1.0076949e-01   1.1586910e-01   9.6070623e-02   1.2040855e-01   1.1835687e-01   1.1566030e-01   1.0480317e-01   1.1289922e-01   1.0248125e-01   1.1065853e-01   1.0752770e-01   1.5518070e-03   1.3360426e-03   6.1855325e-04   3.8684575e-03   2.7981409e-03   7.1019942e-04   2.9249692e-04   3.8108588e-04   7.1019942e-04   3.4811331e-04   2.0628507e-04   6.8481588e-04   6.1413327e-03   1.2640084e-03   3.0810810e-03   4.5162171e-03   6.1490495e-04   2.0823475e-03   6.6391763e-04   4.5940617e-04   4.0824218e-05   6.8980444e-02   7.2049409e-02   7.9659630e-02   9.0518795e-02   8.3601945e-02   9.0707158e-02   7.9362725e-02   6.4838475e-02   7.6843708e-02   8.2367370e-02   8.4912050e-02   7.4621958e-02   8.2116380e-02   8.9210086e-02   5.7968714e-02   6.6009408e-02   9.1025554e-02   7.2793579e-02   1.0368926e-01   7.5498978e-02   9.6154006e-02   6.6776572e-02   1.0567953e-01   8.9202124e-02   6.9982422e-02   6.9528749e-02   8.4404562e-02   9.0968518e-02   8.5580442e-02   5.6059636e-02   7.6365493e-02   7.1188605e-02   6.8464233e-02   1.1430428e-01   9.5645657e-02   7.6165252e-02   7.6295454e-02   9.2254316e-02   7.2920975e-02   8.4113452e-02   9.5088632e-02   8.3209827e-02   7.4687466e-02   6.6271074e-02   8.4049660e-02   7.3195324e-02   7.7054222e-02   7.2597553e-02   5.0198701e-02   7.5958466e-02   1.3864088e-01   1.2443023e-01   1.1820696e-01   1.2296103e-01   1.2918980e-01   1.2996108e-01   1.2761885e-01   1.2545185e-01   1.3315049e-01   1.1429516e-01   9.7708479e-02   1.1783417e-01   1.1147502e-01   1.3162480e-01   1.3265015e-01   1.1194384e-01   1.1244164e-01   1.1326233e-01   1.5100180e-01   1.2549100e-01   1.1411167e-01   1.2131599e-01   1.3539430e-01   1.0451333e-01   1.1218678e-01   1.1003412e-01   1.0016268e-01   1.0019858e-01   1.2861591e-01   1.0655136e-01   1.2158527e-01   9.9029274e-02   1.3048234e-01   1.0368193e-01   1.3098775e-01   1.1671318e-01   1.2117898e-01   1.1194010e-01   9.8811276e-02   1.0398171e-01   1.1952683e-01   9.8904418e-02   1.2443023e-01   1.2231211e-01   1.1957927e-01   1.0792296e-01   1.1588914e-01   1.0590028e-01   1.1501774e-01   1.1169315e-01   2.7458883e-04   1.9079543e-03   3.8013798e-03   4.1957403e-03   8.9864077e-04   3.1780627e-03   3.4575366e-03   8.9864077e-04   6.1086528e-04   6.3429136e-04   2.2515700e-03   7.8191545e-03   1.2413621e-03   2.0952535e-03   1.3093150e-03   1.3748953e-03   1.2388833e-03   4.8114625e-04   1.1404059e-03   1.0969768e-03   5.5584260e-02   5.7510469e-02   6.4993010e-02   7.5368447e-02   6.9057379e-02   7.3499750e-02   6.3437246e-02   5.1298347e-02   6.2635149e-02   6.6332421e-02   7.0700793e-02   5.9790543e-02   6.9153134e-02   7.2596138e-02   4.5395419e-02   5.3097576e-02   7.3376441e-02   5.8208889e-02   8.8752390e-02   6.1292160e-02   7.8242185e-02   5.3906344e-02   8.8989869e-02   7.2610553e-02   5.6579969e-02   5.6297092e-02   6.9966539e-02   7.5157531e-02   6.9589413e-02   4.4676337e-02   6.2364995e-02   5.7806901e-02   5.5013010e-02   9.5419957e-02   7.7143847e-02   6.0075763e-02   6.1882505e-02   7.8193894e-02   5.7405077e-02   6.8884889e-02   7.7590521e-02   6.7073564e-02   6.0698980e-02   5.3256477e-02   6.8055547e-02   5.7544133e-02   6.1428351e-02   5.8436070e-02   3.9618229e-02   6.0914936e-02   1.1719371e-01   1.0478727e-01   9.9928933e-02   1.0301520e-01   1.0921839e-01   1.1065431e-01   1.0694189e-01   1.0626748e-01   1.1395294e-01   9.5522682e-02   8.0692317e-02   9.9640584e-02   9.3821909e-02   1.1210560e-01   1.1313776e-01   9.3722341e-02   9.3659306e-02   9.3792773e-02   1.3107147e-01   1.0721381e-01   9.5955259e-02   1.0180084e-01   1.1608360e-01   8.7786080e-02   9.3279943e-02   9.1618132e-02   8.3505928e-02   8.2622852e-02   1.0912386e-01   8.8978076e-02   1.0355009e-01   8.1236966e-02   1.1101306e-01   8.5950464e-02   1.1026035e-01   9.9640402e-02   1.0131013e-01   9.2768985e-02   8.1325904e-02   8.7087186e-02   1.0113076e-01   8.3368795e-02   1.0478727e-01   1.0299507e-01   1.0075649e-01   9.1267488e-02   9.8760345e-02   8.8473047e-02   9.5602739e-02   9.2388034e-02   1.3192990e-03   5.6049942e-03   5.6260410e-03   3.8255378e-04   2.7098321e-03   2.9260165e-03   3.8255378e-04   8.5873923e-04   6.4551657e-04   2.7466704e-03   5.2436473e-03   2.1741200e-03   2.6488612e-03   2.5599967e-03   7.2546074e-04   2.4454411e-03   9.4817109e-04   1.6251073e-03   9.8969141e-04   5.2867682e-02   5.5519966e-02   6.2241614e-02   7.2187515e-02   6.6023504e-02   7.1524895e-02   6.1872329e-02   4.9090525e-02   5.9694897e-02   6.4712456e-02   6.7154795e-02   5.7947928e-02   6.4972194e-02   7.0357979e-02   4.3573654e-02   5.0441816e-02   7.2068579e-02   5.5680796e-02   8.4591184e-02   5.8459303e-02   7.7051055e-02   5.1193288e-02   8.5601942e-02   7.0117829e-02   5.3803216e-02   5.3535941e-02   6.6621467e-02   7.2473708e-02   6.7439009e-02   4.1816308e-02   5.9366143e-02   5.4760331e-02   5.2431175e-02   9.2978431e-02   7.6152091e-02   5.9131458e-02   5.9321453e-02   7.4096463e-02   5.5985281e-02   6.6256786e-02   7.5362720e-02   6.5046998e-02   5.7885295e-02   5.0561408e-02   6.5880549e-02   5.5988437e-02   5.9621488e-02   5.5930673e-02   3.7267604e-02   5.8818934e-02   1.1596549e-01   1.0264359e-01   9.7070235e-02   1.0079872e-01   1.0704759e-01   1.0746690e-01   1.0547300e-01   1.0308827e-01   1.1044375e-01   9.3809882e-02   7.8755194e-02   9.6766957e-02   9.1194118e-02   1.0959636e-01   1.1126853e-01   9.1984794e-02   9.1378676e-02   9.2003603e-02   1.2714849e-01   1.0351897e-01   9.3695356e-02   1.0013450e-01   1.1244271e-01   8.4898325e-02   9.1456450e-02   8.9057498e-02   8.0952577e-02   8.0704372e-02   1.0658329e-01   8.5941483e-02   1.0000975e-01   7.9159793e-02   1.0848060e-01   8.3337337e-02   1.0760265e-01   9.6215589e-02   1.0020263e-01   9.0833970e-02   7.9520366e-02   8.4523201e-02   9.8885306e-02   8.0736144e-02   1.0264359e-01   1.0090595e-01   9.8957167e-02   8.8690158e-02   9.5447639e-02   8.6121379e-02   9.4585258e-02   9.0802578e-02   6.3934178e-03   4.8940589e-03   1.2286350e-03   9.0839358e-04   1.0654961e-03   1.2286350e-03   9.6426336e-04   7.9049403e-04   1.4335758e-03   3.9777866e-03   2.4355051e-03   2.0107867e-03   4.6162602e-03   1.1770386e-04   3.4806567e-03   1.4352123e-03   1.5474906e-03   6.2303871e-04   6.0828103e-02   6.3671616e-02   7.0896871e-02   8.0864907e-02   7.4298413e-02   8.2081782e-02   7.0784097e-02   5.7058421e-02   6.8417644e-02   7.3405222e-02   7.5846906e-02   6.5924537e-02   7.3406010e-02   8.0389200e-02   5.0100593e-02   5.7793778e-02   8.2174514e-02   6.5233957e-02   9.3014438e-02   6.7186690e-02   8.6645560e-02   5.8420934e-02   9.5569964e-02   8.0832485e-02   6.1703014e-02   6.1092740e-02   7.5364208e-02   8.1326732e-02   7.6506202e-02   4.8648267e-02   6.7850123e-02   6.3129543e-02   6.0279722e-02   1.0421037e-01   8.6793624e-02   6.7911943e-02   6.7612814e-02   8.2552902e-02   6.4996479e-02   7.4977087e-02   8.6383221e-02   7.4662447e-02   6.6198278e-02   5.8199721e-02   7.5325266e-02   6.5572089e-02   6.8824430e-02   6.4314329e-02   4.2494903e-02   6.7537432e-02   1.2698890e-01   1.1333529e-01   1.0720337e-01   1.1257147e-01   1.1782339e-01   1.1890376e-01   1.1671052e-01   1.1501886e-01   1.2195484e-01   1.0334755e-01   8.7523621e-02   1.0680449e-01   1.0051071e-01   1.1973768e-01   1.2022270e-01   1.0080013e-01   1.0231108e-01   1.0334772e-01   1.3872306e-01   1.1462175e-01   1.0296153e-01   1.1014449e-01   1.2423991e-01   9.3883877e-02   1.0176675e-01   1.0022558e-01   8.9772340e-02   9.0213298e-02   1.1713918e-01   9.6981953e-02   1.1075521e-01   8.9707194e-02   1.1871449e-01   9.4162640e-02   1.2118172e-01   1.0525415e-01   1.1008916e-01   1.0200908e-01   8.8850235e-02   9.3264552e-02   1.0788029e-01   8.7700151e-02   1.1333529e-01   1.1110284e-01   1.0804665e-01   9.6438203e-02   1.0447205e-01   9.5256431e-02   1.0425068e-01   1.0162139e-01   4.7487225e-04   5.5229901e-03   5.0553045e-03   5.3192416e-03   5.5229901e-03   2.6440955e-03   3.2948907e-03   2.1817756e-03   1.9232390e-02   1.0060949e-03   5.2893888e-03   3.6565664e-03   6.4838776e-03   7.5722530e-04   2.1347736e-03   1.7405562e-03   3.5508478e-03   8.4906111e-02   8.5684875e-02   9.5941240e-02   1.0863973e-01   1.0110388e-01   1.0421406e-01   9.1634377e-02   7.8911086e-02   9.3575504e-02   9.5023267e-02   1.0394314e-01   8.7940160e-02   1.0307352e-01   1.0372095e-01   7.0927620e-02   8.1814584e-02   1.0248405e-01   8.7682138e-02   1.2553590e-01   9.1822634e-02   1.0758552e-01   8.2848043e-02   1.2459059e-01   1.0427303e-01   8.6172494e-02   8.5771372e-02   1.0276107e-01   1.0743739e-01   1.0000507e-01   7.2180344e-02   9.3352407e-02   8.8118179e-02   8.3972299e-02   1.2999219e-01   1.0601815e-01   8.6245777e-02   9.1942699e-02   1.1341937e-01   8.4389659e-02   1.0016617e-01   1.0942619e-01   9.6927434e-02   9.1068024e-02   8.2112078e-02   9.8354950e-02   8.4918144e-02   8.9929197e-02   8.7860129e-02   6.4913483e-02   8.9916257e-02   1.5108513e-01   1.3966025e-01   1.3579363e-01   1.3800221e-01   1.4462965e-01   1.4853849e-01   1.4049017e-01   1.4366186e-01   1.5284247e-01   1.2813521e-01   1.1198929e-01   1.3549048e-01   1.2835377e-01   1.4847864e-01   1.4775011e-01   1.2601872e-01   1.2764409e-01   1.2670935e-01   1.7255531e-01   1.4567287e-01   1.2987249e-01   1.3506836e-01   1.5547286e-01   1.2211481e-01   1.2607603e-01   1.2598262e-01   1.1656479e-01   1.1426125e-01   1.4534374e-01   1.2395854e-01   1.4127350e-01   1.1320894e-01   1.4734853e-01   1.1969377e-01   1.4703899e-01   1.3646872e-01   1.3305975e-01   1.2588961e-01   1.1250545e-01   1.2058130e-01   1.3549735e-01   1.1610735e-01   1.3966025e-01   1.3745925e-01   1.3401818e-01   1.2501338e-01   1.3525796e-01   1.2173379e-01   1.2646805e-01   1.2458940e-01   5.1766813e-03   3.3038909e-03   3.5089109e-03   5.1766813e-03   2.1988415e-03   2.7782772e-03   1.0688451e-03   1.7030893e-02   8.9726872e-04   4.6231995e-03   4.6736672e-03   5.3382818e-03   1.1647725e-03   1.9758755e-03   1.2750935e-03   2.7128598e-03   8.7833448e-02   8.9010914e-02   9.9253982e-02   1.1183035e-01   1.0412107e-01   1.0885582e-01   9.5547284e-02   8.2028755e-02   9.6837094e-02   9.8811318e-02   1.0697473e-01   9.1242752e-02   1.0570232e-01   1.0798221e-01   7.3423587e-02   8.4439842e-02   1.0715501e-01   9.1521191e-02   1.2824419e-01   9.5124610e-02   1.1205287e-01   8.5389026e-02   1.2841576e-01   1.0878552e-01   8.9048303e-02   8.8474213e-02   1.0589810e-01   1.1091852e-01   1.0379001e-01   7.4413565e-02   9.6465402e-02   9.1132676e-02   8.6893608e-02   1.3487393e-01   1.1110410e-01   9.0320856e-02   9.5130800e-02   1.1613190e-01   8.8387927e-02   1.0357744e-01   1.1422195e-01   1.0103671e-01   9.4160592e-02   8.4870839e-02   1.0232030e-01   8.9160088e-02   9.3889889e-02   9.1104413e-02   6.6493231e-02   9.3508454e-02   1.5639991e-01   1.4440871e-01   1.3995325e-01   1.4327783e-01   1.4942424e-01   1.5327636e-01   1.4579380e-01   1.4864466e-01   1.5749945e-01   1.3242397e-01   1.1574188e-01   1.3959357e-01   1.3216131e-01   1.5281767e-01   1.5172347e-01   1.2991524e-01   1.3242882e-01   1.3190251e-01   1.7712896e-01   1.5002220e-01   1.3380600e-01   1.3964037e-01   1.6023126e-01   1.2562692e-01   1.3071698e-01   1.3077115e-01   1.2010966e-01   1.1841338e-01   1.4987755e-01   1.2848062e-01   1.4548859e-01   1.1783301e-01   1.5172346e-01   1.2426441e-01   1.5309487e-01   1.3983154e-01   1.3778354e-01   1.3093423e-01   1.1660453e-01   1.2409289e-01   1.3931059e-01   1.1865130e-01   1.4440871e-01   1.4196632e-01   1.3805465e-01   1.2801436e-01   1.3865562e-01   1.2553921e-01   1.3109454e-01   1.2958547e-01   1.5860612e-03   1.6773969e-03   0.0000000e+00   8.4337656e-04   4.6746407e-04   2.4549978e-03   4.7529836e-03   2.3235808e-03   4.0683267e-03   4.3260986e-03   6.7336618e-04   2.8454658e-03   1.0918918e-03   1.3756658e-03   5.7784546e-04   5.9573290e-02   6.3070670e-02   6.9597309e-02   7.9911457e-02   7.3480528e-02   7.9923883e-02   7.0144874e-02   5.5923876e-02   6.6635620e-02   7.3192589e-02   7.4096565e-02   6.5836734e-02   7.1022277e-02   7.8555696e-02   5.0423423e-02   5.7089619e-02   8.1093473e-02   6.2483167e-02   9.2251714e-02   6.5399221e-02   8.6573432e-02   5.7873871e-02   9.3861710e-02   7.7914479e-02   6.0545459e-02   6.0328179e-02   7.3725736e-02   8.0652769e-02   7.5662466e-02   4.7500835e-02   6.6267157e-02   6.1219907e-02   5.9246920e-02   1.0235525e-01   8.5584812e-02   6.7627185e-02   6.6676399e-02   8.1028522e-02   6.3874534e-02   7.4037098e-02   8.3735875e-02   7.3091049e-02   6.4875922e-02   5.7134332e-02   7.3898502e-02   6.3669341e-02   6.7483654e-02   6.3032151e-02   4.3195391e-02   6.6465430e-02   1.2757303e-01   1.1294171e-01   1.0654531e-01   1.1074736e-01   1.1756538e-01   1.1705185e-01   1.1633100e-01   1.1230305e-01   1.1988948e-01   1.0404710e-01   8.7981667e-02   1.0622547e-01   1.0061669e-01   1.2008497e-01   1.2242153e-01   1.0217012e-01   1.0084187e-01   1.0181343e-01   1.3714147e-01   1.1243585e-01   1.0356517e-01   1.1071692e-01   1.2181923e-01   9.3742899e-02   1.0137566e-01   9.8085445e-02   8.9840814e-02   8.9979544e-02   1.1682155e-01   9.4340727e-02   1.0893096e-01   8.8036209e-02   1.1887723e-01   9.1995894e-02   1.1718099e-01   1.0529554e-01   1.1115622e-01   1.0048991e-01   8.8823715e-02   9.3647258e-02   1.0909883e-01   8.9729548e-02   1.1294171e-01   1.1121254e-01   1.0947844e-01   9.8164553e-02   1.0458423e-01   9.5468337e-02   1.0529433e-01   1.0077315e-01   1.0959804e-05   1.5860612e-03   1.2066237e-03   9.8801305e-04   9.8752232e-04   6.0992006e-03   2.3026455e-03   4.3295194e-03   6.8464966e-03   1.1467011e-03   3.5017117e-03   1.7326793e-03   1.1803657e-03   5.4725577e-04   7.4912594e-02   7.8462634e-02   8.6080465e-02   9.7055695e-02   8.9913940e-02   9.8246196e-02   8.6359614e-02   7.0875411e-02   8.3096610e-02   8.9373603e-02   9.1095332e-02   8.1132274e-02   8.7784959e-02   9.6471287e-02   6.3584445e-02   7.1727351e-02   9.8741504e-02   7.9297358e-02   1.1001947e-01   8.1763635e-02   1.0392038e-01   7.2462318e-02   1.1283140e-01   9.6493172e-02   7.5904241e-02   7.5362083e-02   9.0691159e-02   9.7798442e-02   9.2550220e-02   6.1191529e-02   8.2519289e-02   7.7118501e-02   7.4418954e-02   1.2241546e-01   1.0373213e-01   8.3271349e-02   8.2617382e-02   9.8302252e-02   7.9806174e-02   9.0742202e-02   1.0274136e-01   9.0294649e-02   8.0841362e-02   7.2046648e-02   9.1054206e-02   8.0166072e-02   8.3952641e-02   7.8850217e-02   5.4962225e-02   8.2583735e-02   1.4771497e-01   1.3277361e-01   1.2596285e-01   1.3150244e-01   1.3764710e-01   1.3814813e-01   1.3643324e-01   1.3364709e-01   1.4127174e-01   1.2228442e-01   1.0501430e-01   1.2555100e-01   1.1896947e-01   1.3983185e-01   1.4080838e-01   1.1967635e-01   1.2050783e-01   1.2165153e-01   1.5932394e-01   1.3324590e-01   1.2180922e-01   1.2960297e-01   1.4356572e-01   1.1163860e-01   1.2028439e-01   1.1797002e-01   1.0728569e-01   1.0776505e-01   1.3685139e-01   1.1414077e-01   1.2924282e-01   1.0675126e-01   1.3867943e-01   1.1135088e-01   1.3991357e-01   1.2389778e-01   1.2963231e-01   1.2019755e-01   1.0634263e-01   1.1117491e-01   1.2727853e-01   1.0546335e-01   1.3277361e-01   1.3050496e-01   1.2753116e-01   1.1493738e-01   1.2310368e-01   1.1333277e-01   1.2330887e-01   1.1999903e-01   1.6773969e-03   1.4044930e-03   1.1476188e-03   1.1728210e-03   6.0850239e-03   2.5611449e-03   4.7720148e-03   7.3487783e-03   1.2981958e-03   3.8044610e-03   1.9624179e-03   1.3573639e-03   6.7050163e-04   7.5875383e-02   7.9591742e-02   8.7130965e-02   9.8136915e-02   9.0968214e-02   9.9474916e-02   8.7612410e-02   7.1886206e-02   8.4072967e-02   9.0657173e-02   9.2037661e-02   8.2322796e-02   8.8559519e-02   9.7661149e-02   6.4636714e-02   7.2692172e-02   1.0010765e-01   8.0266168e-02   1.1103798e-01   8.2745924e-02   1.0537406e-01   7.3430328e-02   1.1396572e-01   9.7599809e-02   7.6869874e-02   7.6340270e-02   9.1668858e-02   9.8974404e-02   9.3760611e-02   6.2005453e-02   8.3489209e-02   7.8020463e-02   7.5407355e-02   1.2375637e-01   1.0517097e-01   8.4594299e-02   8.3682974e-02   9.9214366e-02   8.1008204e-02   9.1862911e-02   1.0394615e-01   9.1478420e-02   8.1837398e-02   7.2994499e-02   9.2227696e-02   8.1322109e-02   8.5126430e-02   7.9879628e-02   5.5860810e-02   8.3714500e-02   1.4946173e-01   1.3427629e-01   1.2730802e-01   1.3293539e-01   1.3918009e-01   1.3947497e-01   1.3805161e-01   1.3491280e-01   1.4255824e-01   1.2381647e-01   1.0639120e-01   1.2689399e-01   1.2032986e-01   1.4134878e-01   1.4247553e-01   1.2120787e-01   1.2187456e-01   1.2309328e-01   1.6066767e-01   1.3444738e-01   1.2325830e-01   1.3118369e-01   1.4483072e-01   1.1290137e-01   1.2175315e-01   1.1925251e-01   1.0857610e-01   1.0914142e-01   1.3832403e-01   1.1530308e-01   1.3045824e-01   1.0804612e-01   1.4018015e-01   1.1257905e-01   1.4124338e-01   1.2516443e-01   1.3130183e-01   1.2160995e-01   1.0773181e-01   1.1250108e-01   1.2878318e-01   1.0678720e-01   1.3427629e-01   1.3201801e-01   1.2910620e-01   1.1632723e-01   1.2438544e-01   1.1469977e-01   1.2494953e-01   1.2148287e-01   8.4337656e-04   4.6746407e-04   2.4549978e-03   4.7529836e-03   2.3235808e-03   4.0683267e-03   4.3260986e-03   6.7336618e-04   2.8454658e-03   1.0918918e-03   1.3756658e-03   5.7784546e-04   5.9573290e-02   6.3070670e-02   6.9597309e-02   7.9911457e-02   7.3480528e-02   7.9923883e-02   7.0144874e-02   5.5923876e-02   6.6635620e-02   7.3192589e-02   7.4096565e-02   6.5836734e-02   7.1022277e-02   7.8555696e-02   5.0423423e-02   5.7089619e-02   8.1093473e-02   6.2483167e-02   9.2251714e-02   6.5399221e-02   8.6573432e-02   5.7873871e-02   9.3861710e-02   7.7914479e-02   6.0545459e-02   6.0328179e-02   7.3725736e-02   8.0652769e-02   7.5662466e-02   4.7500835e-02   6.6267157e-02   6.1219907e-02   5.9246920e-02   1.0235525e-01   8.5584812e-02   6.7627185e-02   6.6676399e-02   8.1028522e-02   6.3874534e-02   7.4037098e-02   8.3735875e-02   7.3091049e-02   6.4875922e-02   5.7134332e-02   7.3898502e-02   6.3669341e-02   6.7483654e-02   6.3032151e-02   4.3195391e-02   6.6465430e-02   1.2757303e-01   1.1294171e-01   1.0654531e-01   1.1074736e-01   1.1756538e-01   1.1705185e-01   1.1633100e-01   1.1230305e-01   1.1988948e-01   1.0404710e-01   8.7981667e-02   1.0622547e-01   1.0061669e-01   1.2008497e-01   1.2242153e-01   1.0217012e-01   1.0084187e-01   1.0181343e-01   1.3714147e-01   1.1243585e-01   1.0356517e-01   1.1071692e-01   1.2181923e-01   9.3742899e-02   1.0137566e-01   9.8085445e-02   8.9840814e-02   8.9979544e-02   1.1682155e-01   9.4340727e-02   1.0893096e-01   8.8036209e-02   1.1887723e-01   9.1995894e-02   1.1718099e-01   1.0529554e-01   1.1115622e-01   1.0048991e-01   8.8823715e-02   9.3647258e-02   1.0909883e-01   8.9729548e-02   1.1294171e-01   1.1121254e-01   1.0947844e-01   9.8164553e-02   1.0458423e-01   9.5468337e-02   1.0529433e-01   1.0077315e-01   6.2742331e-05   5.7500583e-04   7.7277880e-03   4.4752900e-04   1.9151463e-03   2.3881652e-03   8.6221368e-04   8.6997251e-04   5.7712764e-05   1.4261239e-04   1.6337476e-04   6.5357511e-02   6.7564930e-02   7.5599895e-02   8.6482524e-02   7.9693502e-02   8.5392044e-02   7.4152863e-02   6.0880360e-02   7.3094884e-02   7.7108408e-02   8.1483729e-02   6.9905750e-02   7.9537736e-02   8.4220614e-02   5.4020845e-02   6.2478782e-02   8.5095725e-02   6.8776984e-02   1.0023818e-01   7.1693803e-02   8.9974847e-02   6.3277271e-02   1.0126704e-01   8.4456284e-02   6.6380508e-02   6.5944304e-02   8.0776371e-02   8.6401760e-02   8.0678654e-02   5.3232214e-02   7.2705055e-02   6.7807498e-02   6.4728976e-02   1.0860574e-01   8.9250516e-02   7.0532132e-02   7.2192370e-02   8.9155393e-02   6.7825786e-02   7.9751674e-02   8.9848634e-02   7.8255825e-02   7.0908403e-02   6.2757363e-02   7.9213209e-02   6.8195297e-02   7.2146158e-02   6.8587288e-02   4.7220467e-02   7.1390990e-02   1.3114639e-01   1.1816523e-01   1.1284157e-01   1.1675611e-01   1.2280854e-01   1.2450964e-01   1.2061835e-01   1.2011904e-01   1.2793055e-01   1.0801409e-01   9.2203467e-02   1.1250005e-01   1.0614185e-01   1.2553294e-01   1.2604996e-01   1.0581394e-01   1.0665851e-01   1.0697400e-01   1.4569576e-01   1.2071369e-01   1.0835453e-01   1.1475367e-01   1.3023667e-01   9.9676180e-02   1.0601570e-01   1.0459446e-01   9.5152798e-02   9.4544535e-02   1.2261107e-01   1.0171679e-01   1.1677938e-01   9.3513090e-02   1.2443770e-01   9.8519974e-02   1.2493623e-01   1.1202254e-01   1.1414514e-01   1.0583616e-01   9.3110547e-02   9.8863035e-02   1.1361961e-01   9.4163495e-02   1.1816523e-01   1.1608967e-01   1.1325987e-01   1.0277556e-01   1.1111172e-01   1.0049112e-01   1.0810161e-01   1.0529258e-01   8.3201047e-04   6.6553558e-03   8.0817319e-04   2.3666253e-03   2.9401972e-03   6.0430641e-04   1.3109323e-03   2.0042209e-04   2.9042771e-04   6.4823857e-05   6.4369752e-02   6.6980839e-02   7.4622998e-02   8.5371818e-02   7.8644884e-02   8.4714120e-02   7.3778054e-02   6.0124143e-02   7.1976215e-02   7.6757894e-02   8.0112003e-02   6.9445381e-02   7.7800626e-02   8.3451231e-02   5.3558729e-02   6.1563285e-02   8.4824350e-02   6.7739660e-02   9.8727492e-02   7.0619990e-02   8.9865595e-02   6.2353086e-02   1.0002589e-01   8.3464224e-02   6.5377675e-02   6.4985443e-02   7.9506921e-02   8.5546781e-02   8.0035925e-02   5.2147031e-02   7.1577598e-02   6.6610625e-02   6.3822994e-02   1.0780225e-01   8.9119904e-02   7.0461434e-02   7.1328305e-02   8.7570443e-02   6.7451743e-02   7.8878318e-02   8.9018945e-02   7.7592264e-02   6.9886540e-02   6.1789581e-02   7.8498655e-02   6.7684600e-02   7.1587759e-02   6.7704325e-02   4.6526712e-02   7.0724596e-02   1.3117528e-01   1.1766016e-01   1.1197322e-01   1.1607657e-01   1.2231416e-01   1.2340084e-01   1.2042522e-01   1.1891967e-01   1.2666041e-01   1.0778594e-01   9.1812847e-02   1.1163169e-01   1.0543690e-01   1.2494830e-01   1.2593147e-01   1.0563286e-01   1.0596141e-01   1.0649455e-01   1.4431904e-01   1.1933022e-01   1.0786987e-01   1.1454955e-01   1.2887601e-01   9.8812977e-02   1.0562884e-01   1.0369928e-01   9.4451580e-02   9.4102215e-02   1.2194237e-01   1.0054711e-01   1.1549365e-01   9.2857172e-02   1.2382256e-01   9.7583400e-02   1.2385907e-01   1.1095947e-01   1.1423829e-01   1.0528942e-01   9.2735747e-02   9.8196145e-02   1.1321168e-01   9.3608756e-02   1.1766016e-01   1.1565299e-01   1.1307354e-01   1.0223904e-01   1.1010492e-01   9.9908851e-02   1.0821954e-01   1.0496782e-01   9.9295890e-03   5.3473138e-04   2.1693539e-03   3.7076821e-03   1.8222092e-03   1.2380588e-03   7.0429726e-04   3.2576994e-04   6.7027380e-04   7.5169669e-02   7.7062740e-02   8.6033367e-02   9.7417771e-02   9.0184218e-02   9.6534643e-02   8.3912481e-02   7.0270670e-02   8.3626528e-02   8.6844061e-02   9.2545497e-02   7.9257265e-02   9.0777176e-02   9.5235076e-02   6.2208659e-02   7.1858472e-02   9.5535449e-02   7.9390740e-02   1.1189093e-01   8.2130752e-02   1.0013922e-01   7.2643917e-02   1.1330079e-01   9.5998078e-02   7.6221141e-02   7.5580594e-02   9.1730689e-02   9.7114882e-02   9.1048046e-02   6.2224749e-02   8.3137120e-02   7.8094271e-02   7.4382594e-02   1.2083561e-01   9.9831872e-02   7.9711043e-02   8.2237066e-02   1.0057287e-01   7.7408745e-02   9.0227459e-02   1.0148848e-01   8.8790024e-02   8.1095637e-02   7.2322440e-02   8.9773416e-02   7.8184960e-02   8.2186791e-02   7.8567440e-02   5.4863235e-02   8.1347337e-02   1.4275245e-01   1.3005259e-01   1.2482271e-01   1.2925722e-01   1.3482920e-01   1.3759096e-01   1.3236924e-01   1.3338627e-01   1.4130888e-01   1.1880967e-01   1.0247320e-01   1.2443347e-01   1.1741517e-01   1.3747488e-01   1.3688510e-01   1.1618864e-01   1.1858835e-01   1.1879679e-01   1.5963135e-01   1.3387511e-01   1.1938532e-01   1.2588037e-01   1.4388488e-01   1.1083228e-01   1.1728514e-01   1.1680010e-01   1.0591970e-01   1.0525056e-01   1.3476053e-01   1.1410719e-01   1.2959057e-01   1.0487194e-01   1.3643100e-01   1.1046993e-01   1.3880755e-01   1.2375874e-01   1.2479172e-01   1.1764923e-01   1.0361548e-01   1.0965651e-01   1.2456830e-01   1.0392711e-01   1.3005259e-01   1.2763609e-01   1.2394348e-01   1.1308481e-01   1.2275352e-01   1.1138648e-01   1.1846972e-01   1.1666230e-01   1.1839370e-02   9.5901133e-03   1.3740734e-02   3.5338001e-03   1.3550011e-02   8.8526707e-03   9.4699409e-03   6.3350154e-03   4.8467987e-02   5.3749987e-02   5.7717889e-02   6.5676566e-02   6.0050880e-02   7.0693363e-02   6.1877604e-02   4.6729706e-02   5.4665912e-02   6.4242849e-02   5.9590174e-02   5.6493218e-02   5.5132102e-02   6.8254515e-02   4.1974046e-02   4.5979775e-02   7.3375693e-02   5.2912711e-02   7.3889063e-02   5.3879214e-02   7.8254066e-02   4.6403740e-02   7.8645675e-02   6.7578949e-02   4.9098308e-02   4.8671019e-02   5.9827034e-02   6.7861186e-02   6.5136324e-02   3.6759287e-02   5.3980965e-02   4.9365653e-02   4.8462917e-02   9.0010105e-02   7.8830145e-02   6.1496889e-02   5.5380536e-02   6.4107134e-02   5.6871111e-02   6.2034296e-02   7.3759034e-02   6.3674900e-02   5.3119604e-02   4.6143136e-02   6.3811294e-02   5.6759011e-02   5.9030859e-02   5.2419966e-02   3.3258744e-02   5.6880106e-02   1.1655832e-01   1.0005043e-01   9.1684275e-02   9.8727002e-02   1.0425490e-01   1.0138126e-01   1.0565129e-01   9.7538138e-02   1.0314095e-01   9.2386824e-02   7.6571607e-02   9.1274575e-02   8.6336607e-02   1.0505203e-01   1.0829773e-01   9.0077109e-02   8.8858206e-02   9.1549427e-02   1.1781623e-01   9.5530481e-02   9.0068051e-02   9.8963246e-02   1.0479240e-01   7.9118056e-02   9.0207341e-02   8.5766485e-02   7.6442113e-02   7.8991975e-02   1.0229780e-01   8.0971468e-02   9.2462279e-02   7.7616682e-02   1.0394880e-01   7.9854175e-02   1.0493060e-01   8.8086863e-02   1.0105189e-01   8.9771099e-02   7.8157448e-02   7.9782471e-02   9.4953430e-02   7.4768636e-02   1.0005043e-01   9.8253215e-02   9.6744777e-02   8.3113340e-02   8.7744069e-02   8.2353939e-02   9.5830913e-02   9.0799350e-02   2.1341759e-03   1.9110407e-03   2.4747400e-03   1.4759127e-04   2.6111423e-04   2.1389787e-04   9.9338048e-04   7.1511570e-02   7.2884660e-02   8.1954557e-02   9.3478761e-02   8.6419800e-02   9.0995738e-02   7.9065079e-02   6.6366318e-02   7.9643396e-02   8.2112775e-02   8.8849792e-02   7.5066735e-02   8.7600830e-02   9.0111864e-02   5.8883625e-02   6.8498056e-02   8.9863635e-02   7.4803608e-02   1.0853136e-01   7.8097142e-02   9.4627165e-02   6.9370599e-02   1.0871906e-01   9.0676026e-02   7.2617660e-02   7.2144076e-02   8.7900274e-02   9.2810262e-02   8.6387249e-02   5.9335633e-02   7.9309675e-02   7.4397587e-02   7.0710353e-02   1.1504320e-01   9.3692385e-02   7.4629165e-02   7.8263498e-02   9.7248749e-02   7.2487054e-02   8.6011897e-02   9.5823155e-02   8.3806772e-02   7.7264691e-02   6.8838121e-02   8.4948991e-02   7.3047820e-02   7.7341294e-02   7.4548923e-02   5.2558496e-02   7.6909140e-02   1.3627575e-01   1.2430643e-01   1.1978148e-01   1.2301688e-01   1.2902659e-01   1.3200226e-01   1.2597225e-01   1.2757685e-01   1.3584109e-01   1.1348127e-01   9.7761057e-02   1.1945082e-01   1.1270375e-01   1.3215934e-01   1.3182973e-01   1.1125522e-01   1.1287096e-01   1.1260483e-01   1.5425448e-01   1.2876341e-01   1.1448536e-01   1.2024528e-01   1.3833407e-01   1.0647529e-01   1.1163750e-01   1.1113351e-01   1.0149473e-01   1.0013701e-01   1.2927004e-01   1.0879101e-01   1.2459621e-01   9.9330508e-02   1.3108770e-01   1.0504760e-01   1.3186075e-01   1.1958837e-01   1.1892921e-01   1.1162833e-01   9.8541878e-02   1.0525098e-01   1.1976536e-01   1.0049727e-01   1.2430643e-01   1.2212353e-01   1.1885667e-01   1.0916600e-01   1.1853397e-01   1.0665478e-01   1.1270995e-01   1.1063603e-01   1.5750019e-03   2.3061943e-03   2.5267586e-03   1.8816551e-03   2.4916769e-03   2.6490348e-03   5.6749564e-02   5.7171493e-02   6.5939847e-02   7.5932398e-02   6.9508351e-02   7.4656451e-02   6.2624290e-02   5.2024188e-02   6.4437230e-02   6.4937043e-02   7.2745199e-02   5.8611781e-02   7.2652586e-02   7.3573688e-02   4.4171072e-02   5.3594578e-02   7.2641168e-02   6.1050608e-02   8.9566976e-02   6.3051224e-02   7.6010398e-02   5.4225638e-02   9.0321512e-02   7.5195114e-02   5.7639533e-02   5.6852233e-02   7.1710391e-02   7.4949900e-02   6.9318916e-02   4.6230253e-02   6.3971485e-02   6.0026157e-02   5.5798348e-02   9.5989279e-02   7.6232404e-02   5.8444003e-02   6.2302257e-02   8.0224536e-02   5.7278169e-02   6.9150783e-02   7.9467477e-02   6.7704503e-02   6.1864652e-02   5.4241616e-02   6.8592620e-02   5.8516944e-02   6.1759106e-02   5.9360348e-02   3.8591759e-02   6.1151978e-02   1.1324556e-01   1.0303819e-01   9.9201558e-02   1.0318500e-01   1.0721879e-01   1.1147036e-01   1.0460948e-01   1.0828938e-01   1.1523751e-01   9.2280249e-02   7.8300492e-02   9.8832112e-02   9.2088327e-02   1.0954453e-01   1.0774163e-01   8.9704304e-02   9.3649245e-02   9.3477002e-02   1.3171389e-01   1.0896793e-01   9.3248418e-02   9.8534682e-02   1.1788866e-01   8.6730988e-02   9.1550165e-02   9.2743651e-02   8.2023117e-02   8.1036634e-02   1.0749375e-01   9.1211084e-02   1.0479473e-01   8.1639608e-02   1.0872731e-01   8.7298316e-02   1.1342725e-01   9.8507274e-02   9.7018335e-02   9.2572028e-02   7.9424005e-02   8.5123740e-02   9.7519007e-02   7.9504622e-02   1.0303819e-01   1.0060889e-01   9.6540999e-02   8.7528391e-02   9.7464514e-02   8.6517419e-02   9.1407615e-02   9.1076579e-02   4.2759888e-03   1.3998823e-03   1.8495751e-03   2.8301386e-03   3.7334484e-03   5.5009109e-02   5.4964037e-02   6.3820777e-02   7.4331569e-02   6.8062522e-02   7.0406681e-02   5.9595322e-02   4.9880526e-02   6.2237804e-02   6.2265151e-02   7.1113130e-02   5.6594278e-02   7.1451534e-02   6.9994053e-02   4.3126676e-02   5.2381128e-02   6.8520339e-02   5.7597897e-02   8.9058557e-02   6.0751652e-02   7.2510768e-02   5.3201106e-02   8.7849614e-02   7.0985642e-02   5.6028788e-02   5.5591108e-02   6.9920676e-02   7.2931234e-02   6.6633349e-02   4.5295849e-02   6.2044642e-02   5.8063752e-02   5.4097801e-02   9.1903182e-02   7.1473709e-02   5.5091248e-02   6.0376704e-02   7.9310761e-02   5.3854777e-02   6.7042056e-02   7.4968513e-02   6.4284932e-02   5.9983090e-02   5.2745182e-02   6.5463515e-02   5.4553279e-02   5.8476643e-02   5.7183259e-02   3.8909009e-02   5.8515093e-02   1.0925308e-01   9.9624317e-02   9.6622305e-02   9.8607255e-02   1.0383961e-01   1.0792916e-01   1.0029496e-01   1.0408880e-01   1.1184636e-01   8.9608429e-02   7.6157674e-02   9.6354194e-02   9.0095873e-02   1.0708448e-01   1.0618312e-01   8.7751513e-02   8.9756663e-02   8.8888346e-02   1.2885985e-01   1.0591974e-01   9.1173391e-02   9.5499762e-02   1.1429851e-01   8.5048829e-02   8.8069568e-02   8.8675858e-02   8.0228990e-02   7.8160661e-02   1.0454059e-01   8.7419578e-02   1.0196835e-01   7.7696889e-02   1.0615601e-01   8.3458946e-02   1.0727792e-01   9.7348785e-02   9.3770662e-02   8.8214646e-02   7.6638467e-02   8.3519036e-02   9.5849316e-02   7.9674217e-02   9.9624317e-02   9.7643485e-02   9.4514516e-02   8.7075933e-02   9.6245490e-02   8.4428758e-02   8.8195121e-02   8.6900395e-02   3.3656636e-03   1.2813341e-03   1.5508786e-03   5.5126016e-04   5.7906981e-02   6.0932190e-02   6.7790041e-02   7.7661589e-02   7.1242410e-02   7.8649743e-02   6.7963400e-02   5.4275670e-02   6.5215187e-02   7.0651008e-02   7.2495968e-02   6.3298898e-02   6.9915496e-02   7.7039648e-02   4.7868354e-02   5.5074415e-02   7.9095397e-02   6.1872293e-02   8.9630734e-02   6.4010779e-02   8.3786653e-02   5.5727300e-02   9.1943867e-02   7.7179306e-02   5.8787276e-02   5.8290887e-02   7.2059853e-02   7.8226343e-02   7.3481150e-02   4.5978845e-02   6.4704253e-02   5.9980041e-02   5.7435326e-02   1.0050646e-01   8.3655459e-02   6.5308029e-02   6.4668898e-02   7.9130910e-02   6.2159352e-02   7.1908147e-02   8.2739305e-02   7.1488143e-02   6.3159434e-02   5.5376396e-02   7.2163727e-02   6.2508559e-02   6.5824794e-02   6.1340761e-02   4.0472995e-02   6.4599849e-02   1.2380425e-01   1.0993395e-01   1.0373859e-01   1.0879322e-01   1.1440237e-01   1.1495444e-01   1.1335001e-01   1.1089530e-01   1.1789001e-01   1.0041407e-01   8.4708778e-02   1.0336455e-01   9.7357032e-02   1.1642404e-01   1.1748667e-01   9.8081847e-02   9.8747663e-02   9.9805801e-02   1.3456454e-01   1.1060300e-01   9.9947092e-02   1.0709744e-01   1.2004751e-01   9.0729860e-02   9.8544296e-02   9.6489030e-02   8.6759859e-02   8.7175826e-02   1.1367288e-01   9.3122372e-02   1.0688813e-01   8.6280635e-02   1.1536030e-01   9.0496365e-02   1.1670380e-01   1.0195807e-01   1.0724940e-01   9.8472199e-02   8.5899722e-02   9.0288199e-02   1.0497525e-01   8.5251170e-02   1.0993395e-01   1.0787024e-01   1.0524763e-01   9.3789157e-02   1.0121309e-01   9.2226653e-02   1.0148816e-01   9.8306001e-02   5.0782435e-04   6.2020689e-04   1.6756986e-03   7.0476887e-02   7.1501157e-02   8.0709837e-02   9.2336294e-02   8.5349594e-02   8.8954923e-02   7.7301298e-02   6.5155686e-02   7.8476830e-02   8.0402665e-02   8.7884855e-02   7.3650267e-02   8.6974315e-02   8.8299731e-02   5.7895678e-02   6.7595783e-02   8.7665774e-02   7.3314500e-02   1.0776923e-01   7.6895020e-02   9.2474273e-02   6.8514181e-02   1.0728576e-01   8.8812805e-02   7.1614033e-02   7.1214401e-02   8.6845407e-02   9.1428310e-02   8.4778392e-02   5.8702706e-02   7.8223013e-02   7.3389903e-02   6.9650470e-02   1.1290739e-01   9.1229062e-02   7.2678536e-02   7.7045799e-02   9.6518176e-02   7.0683794e-02   8.4677029e-02   9.3753287e-02   8.2039996e-02   7.6152676e-02   6.7885263e-02   8.3273481e-02   7.1166737e-02   7.5618620e-02   7.3311647e-02   5.2129744e-02   7.5414559e-02   1.3361288e-01   1.2212885e-01   1.1803753e-01   1.2062983e-01   1.2681873e-01   1.3003785e-01   1.2339855e-01   1.2552093e-01   1.3397302e-01   1.1145011e-01   9.6072866e-02   1.1773621e-01   1.1108813e-01   1.3021606e-01   1.2991222e-01   1.0941117e-01   1.1074904e-01   1.1019635e-01   1.5245649e-01   1.2709803e-01   1.1272646e-01   1.1805571e-01   1.3644429e-01   1.0507143e-01   1.0948152e-01   1.0908044e-01   1.0002018e-01   9.8259510e-02   1.2725753e-01   1.0697349e-01   1.2296793e-01   9.7285465e-02   1.2913723e-01   1.0312519e-01   1.2920697e-01   1.1832381e-01   1.1655849e-01   1.0932054e-01   9.6669076e-02   1.0377916e-01   1.1803847e-01   9.9488230e-02   1.2212885e-01   1.2004765e-01   1.1693776e-01   1.0790940e-01   1.1723226e-01   1.0500092e-01   1.1038445e-01   1.0827603e-01   1.5232780e-04   4.0007181e-04   6.5289213e-02   6.7204649e-02   7.5443787e-02   8.6444762e-02   7.9660759e-02   8.4749914e-02   7.3546883e-02   6.0641799e-02   7.3012359e-02   7.6531840e-02   8.1598668e-02   6.9499172e-02   7.9923854e-02   8.3724381e-02   5.3799024e-02   6.2453508e-02   8.4253942e-02   6.8486788e-02   1.0054294e-01   7.1576362e-02   8.9122210e-02   6.3281093e-02   1.0114322e-01   8.3993466e-02   6.6335786e-02   6.5925957e-02   8.0814161e-02   8.6165539e-02   8.0247902e-02   5.3368917e-02   7.2666724e-02   6.7816556e-02   6.4623203e-02   1.0800053e-01   8.8235002e-02   6.9728892e-02   7.2010954e-02   8.9470933e-02   6.7181463e-02   7.9529500e-02   8.9243318e-02   7.7739806e-02   7.0823200e-02   6.2720078e-02   7.8762930e-02   6.7548169e-02   7.1608713e-02   6.8390856e-02   4.7339693e-02   7.1004341e-02   1.3006516e-01   1.1747357e-01   1.1247378e-01   1.1599806e-01   1.2210677e-01   1.2411360e-01   1.1962941e-01   1.1968938e-01   1.2763702e-01   1.0730056e-01   9.1685935e-02   1.1214725e-01   1.0578284e-01   1.2500364e-01   1.2540001e-01   1.0518188e-01   1.0602704e-01   1.0612761e-01   1.4550231e-01   1.2054520e-01   1.0786099e-01   1.1396219e-01   1.2996046e-01   9.9460763e-02   1.0527691e-01   1.0405039e-01   9.4844256e-02   9.3945852e-02   1.2206303e-01   1.0136794e-01   1.1659636e-01   9.2882126e-02   1.2391115e-01   9.8044098e-02   1.2415930e-01   1.1196299e-01   1.1316845e-01   1.0506061e-01   9.2491851e-02   9.8554383e-02   1.1313163e-01   9.4057839e-02   1.1747357e-01   1.1542880e-01   1.1260458e-01   1.0255212e-01   1.1101490e-01   1.0006494e-01   1.0712947e-01   1.0442482e-01   3.2637736e-04   7.1228332e-02   7.3360309e-02   8.1848237e-02   9.3197783e-02   8.6156854e-02   9.1733116e-02   8.0052769e-02   6.6468809e-02   7.9277125e-02   8.3130576e-02   8.8073461e-02   7.5751270e-02   8.6114214e-02   9.0610516e-02   5.9264749e-02   6.8242422e-02   9.1255768e-02   7.4667198e-02   1.0756842e-01   7.7803425e-02   9.6260495e-02   6.9087389e-02   1.0845861e-01   9.0870006e-02   7.2305626e-02   7.1861030e-02   8.7311178e-02   9.3009359e-02   8.6960677e-02   5.8641082e-02   7.8892851e-02   7.3809649e-02   7.0554233e-02   1.1577001e-01   9.5424873e-02   7.6110400e-02   7.8288785e-02   9.6097634e-02   7.3451146e-02   8.6123272e-02   9.6377446e-02   8.4403314e-02   7.7003085e-02   6.8529959e-02   8.5437768e-02   7.3849362e-02   7.8032414e-02   7.4531055e-02   5.2299301e-02   7.7339214e-02   1.3853845e-01   1.2553252e-01   1.2026484e-01   1.2407451e-01   1.3030382e-01   1.3228892e-01   1.2783055e-01   1.2774776e-01   1.3586522e-01   1.1498861e-01   9.8800101e-02   1.1991948e-01   1.1333278e-01   1.3320266e-01   1.3352807e-01   1.1273476e-01   1.1373642e-01   1.1391093e-01   1.5417322e-01   1.2849633e-01   1.1550662e-01   1.2189074e-01   1.3824515e-01   1.0674666e-01   1.1296262e-01   1.1166908e-01   1.0200786e-01   1.0119861e-01   1.3020916e-01   1.0880524e-01   1.2443747e-01   1.0015009e-01   1.3208523e-01   1.0543261e-01   1.3248948e-01   1.1957063e-01   1.2107326e-01   1.1278773e-01   9.9690412e-02   1.0583253e-01   1.2090813e-01   1.0100447e-01   1.2553252e-01   1.2339293e-01   1.2039941e-01   1.0985773e-01   1.1861026e-01   1.0744824e-01   1.1483872e-01   1.1213527e-01   6.6878313e-02   6.9715409e-02   7.7364528e-02   8.8171063e-02   8.1336125e-02   8.7984980e-02   7.6788970e-02   6.2680317e-02   7.4638540e-02   7.9773718e-02   8.2744330e-02   7.2225777e-02   8.0191157e-02   8.6590874e-02   5.5913229e-02   6.3969698e-02   8.8177048e-02   7.0519412e-02   1.0143390e-01   7.3287383e-02   9.3242485e-02   6.4744474e-02   1.0311960e-01   8.6619094e-02   6.7881619e-02   6.7447267e-02   8.2187438e-02   8.8483348e-02   8.3036483e-02   5.4275541e-02   7.4193034e-02   6.9119612e-02   6.6341718e-02   1.1134910e-01   9.2648103e-02   7.3522039e-02   7.4021876e-02   9.0141442e-02   7.0408618e-02   8.1718494e-02   9.2345175e-02   8.0646828e-02   7.2501162e-02   6.4223536e-02   8.1515678e-02   7.0682605e-02   7.4552513e-02   7.0364536e-02   4.8478235e-02   7.3560525e-02   1.3517901e-01   1.2131760e-01   1.1535330e-01   1.1982809e-01   1.2602646e-01   1.2698846e-01   1.2430464e-01   1.2251062e-01   1.3021845e-01   1.1127258e-01   9.4973617e-02   1.1499323e-01   1.0869119e-01   1.2854331e-01   1.2950169e-01   1.0899747e-01   1.0948608e-01   1.1017110e-01   1.4797664e-01   1.2271397e-01   1.1121974e-01   1.1817834e-01   1.3245873e-01   1.0189203e-01   1.0916690e-01   1.0716280e-01   9.7527876e-02   9.7386912e-02   1.2555051e-01   1.0384747e-01   1.1883003e-01   9.6216108e-02   1.2741273e-01   1.0091958e-01   1.2779606e-01   1.1407020e-01   1.1794445e-01   1.0890370e-01   9.6003260e-02   1.0130628e-01   1.1658801e-01   9.6417670e-02   1.2131760e-01   1.1923852e-01   1.1654354e-01   1.0526503e-01   1.1322934e-01   1.0313209e-01   1.1184758e-01   1.0860395e-01   7.4548764e-04   4.1909104e-04   1.5729317e-03   7.9277531e-04   2.5985333e-03   2.1157871e-03   2.6363318e-04   2.7214324e-04   2.5006701e-03   1.1999234e-03   1.3641574e-03   2.4228657e-03   1.9193080e-03   1.5580763e-03   9.1547441e-05   4.2498838e-03   5.1587623e-04   4.4451535e-03   1.9710136e-04   5.9468529e-03   1.1637586e-04   4.0879090e-03   1.9577278e-03   7.5438506e-06   5.1321994e-05   9.5746588e-04   1.8362275e-03   1.6353958e-03   8.5567943e-04   2.3534169e-04   2.0155706e-04   2.8744085e-05   6.6464816e-03   6.0500135e-03   3.7609984e-03   3.0183871e-04   2.6908058e-03   1.8208811e-03   9.2994693e-04   3.0287331e-03   1.4489077e-03   1.1766146e-04   3.4058119e-05   1.3255619e-03   1.5104843e-03   1.2375803e-03   1.2042792e-04   2.2770079e-03   7.0925866e-04   1.7863102e-02   1.0250835e-02   7.3485693e-03   9.3316539e-03   1.1683662e-02   1.0169272e-02   1.2909901e-02   9.0217168e-03   1.1005819e-02   9.1383359e-03   4.6331293e-03   7.2658380e-03   6.2625155e-03   1.2163934e-02   1.5604510e-02   9.0849374e-03   6.4499430e-03   7.6430735e-03   1.6734199e-02   8.8633570e-03   7.8095179e-03   1.1083442e-02   1.1720144e-02   4.3192019e-03   7.6217573e-03   5.4833250e-03   3.8602370e-03   4.7396014e-03   1.0813609e-02   4.4772013e-03   7.7667524e-03   3.8237256e-03   1.1654961e-02   4.0471444e-03   1.1640132e-02   6.9842505e-03   1.3199398e-02   6.9808241e-03   4.8195688e-03   4.8204301e-03   9.7963275e-03   5.3012133e-03   1.0250835e-02   1.0025463e-02   1.1016974e-02   6.8718627e-03   6.8391566e-03   5.4173007e-03   1.1780352e-02   7.9156890e-03   7.3587704e-04   1.9839586e-03   1.2219042e-03   1.5405721e-03   4.1726993e-04   3.7297590e-04   1.1835574e-03   6.1737386e-04   2.7177989e-03   1.1395438e-04   5.3634867e-03   1.2095802e-03   9.5552631e-04   7.0211590e-04   1.8297765e-03   1.3098829e-03   5.8522900e-03   1.0011765e-03   2.8317516e-03   7.2859483e-04   4.4710316e-03   2.0244176e-03   7.6733040e-04   6.7685529e-04   2.1301657e-03   1.3518842e-03   6.1205846e-04   2.4644310e-03   1.1938153e-03   1.5975309e-03   5.1076446e-04   5.2791382e-03   3.0339660e-03   1.1907842e-03   3.5011674e-04   4.7257393e-03   3.5465173e-04   7.3647426e-04   2.3478030e-03   5.8214999e-04   7.6861523e-04   8.1449121e-04   5.9893306e-04   4.9750178e-04   2.3193980e-04   3.6882491e-04   2.8017975e-03   9.9802686e-05   1.3102890e-02   7.5969761e-03   6.0828162e-03   7.3064415e-03   8.8657957e-03   9.3996859e-03   9.1529212e-03   8.6958624e-03   1.0707084e-02   5.8653516e-03   2.3848099e-03   6.0110419e-03   4.6435179e-03   9.5841132e-03   1.1495876e-02   5.7351815e-03   4.7762002e-03   5.2279685e-03   1.6343054e-02   9.1684398e-03   5.3835136e-03   7.5338144e-03   1.1677909e-02   3.4411805e-03   4.9237859e-03   4.5530879e-03   2.5678146e-03   2.5808970e-03   8.5897045e-03   4.5950787e-03   7.8389611e-03   2.4182455e-03   9.2053995e-03   3.4479875e-03   1.0780684e-02   6.4366206e-03   8.7029459e-03   4.8244677e-03   2.4820766e-03   3.2994674e-03   6.9725103e-03   3.6004536e-03   7.5969761e-03   7.2055876e-03   7.4680636e-03   4.8244418e-03   6.1219436e-03   3.5524443e-03   7.3785563e-03   5.0327851e-03   4.5958248e-04   1.4323573e-04   1.2376608e-03   1.4854110e-03   8.9326822e-04   1.4516004e-04   1.6538068e-03   6.5338340e-04   1.1299287e-03   2.4074626e-03   6.9551596e-04   2.6949476e-03   7.2730987e-04   2.7608296e-03   6.5430290e-04   2.7448595e-03   1.4433165e-04   4.0981488e-03   7.0217976e-04   2.0017194e-03   8.4737221e-04   3.5875154e-04   4.2590326e-04   3.7194881e-04   5.5222558e-04   6.3134375e-04   2.4465093e-03   1.5551077e-04   5.7141821e-04   4.4617793e-04   3.7751002e-03   4.2307180e-03   3.3297057e-03   8.4238856e-05   1.8060244e-03   1.6735237e-03   1.3470834e-04   1.4289074e-03   6.1420964e-04   1.0488316e-04   6.6710002e-04   4.5037482e-04   1.3817339e-03   8.2351922e-04   1.8947713e-04   4.2527867e-03   4.2014447e-04   1.3415184e-02   6.6868372e-03   4.2910287e-03   5.8862692e-03   7.8406972e-03   6.4938295e-03   9.0912854e-03   5.6561218e-03   7.2322567e-03   6.1641630e-03   2.7540356e-03   4.2344365e-03   3.5847503e-03   8.2354910e-03   1.1610160e-02   6.2859853e-03   3.6577005e-03   4.7344286e-03   1.2033367e-02   5.6148647e-03   4.9101139e-03   7.6703569e-03   7.8664866e-03   2.1500523e-03   4.7726337e-03   2.9188064e-03   1.9107921e-03   2.6804057e-03   7.0603390e-03   2.2776004e-03   4.6992435e-03   1.8838392e-03   7.7943774e-03   1.9124732e-03   7.9102721e-03   4.1636243e-03   9.7823337e-03   4.1721395e-03   2.8414673e-03   2.6224860e-03   6.5787438e-03   3.5255969e-03   6.6868372e-03   6.5865547e-03   7.7526002e-03   4.4665713e-03   4.0404299e-03   3.0508638e-03   8.7371106e-03   5.0758684e-03   1.4741381e-04   1.6125952e-03   2.5451963e-03   2.5693849e-03   7.6673984e-04   2.4419486e-03   4.9090126e-04   2.2351893e-03   2.2914219e-03   9.4806103e-04   4.8625587e-03   2.0262116e-03   3.3701508e-03   1.9166326e-03   1.0683081e-03   9.0452408e-04   4.3648468e-03   1.8983778e-03   7.0324108e-04   1.1499379e-03   1.4013722e-03   1.4463883e-03   3.0992383e-04   2.7889447e-04   9.9370768e-04   4.4215550e-03   7.4609763e-04   1.5322654e-03   1.6670592e-03   2.5458750e-03   4.8164855e-03   4.9491294e-03   7.9622502e-04   9.3705610e-04   3.2918730e-03   3.3244867e-04   1.4917733e-03   1.2997427e-03   8.4964648e-04   1.9833350e-03   9.7663737e-04   3.0207145e-03   1.9767871e-03   1.2186915e-03   6.6584952e-03   1.4294810e-03   1.1447125e-02   4.9138585e-03   2.4483433e-03   4.3479747e-03   5.8020082e-03   4.0665925e-03   7.5951000e-03   3.6015682e-03   4.5184530e-03   5.0362911e-03   2.3873447e-03   2.3916463e-03   2.1166084e-03   5.8005881e-03   9.3326086e-03   5.1921662e-03   2.5583136e-03   3.9040792e-03   8.1942534e-03   3.2098756e-03   3.4610228e-03   6.2351763e-03   5.0378736e-03   9.3974884e-04   3.8135545e-03   1.8905878e-03   1.0935023e-03   2.2739982e-03   4.8352649e-03   1.2660181e-03   2.5068904e-03   1.6511455e-03   5.4229770e-03   1.1989559e-03   6.1054655e-03   2.0081796e-03   8.6788304e-03   3.3159020e-03   2.5546078e-03   1.5534900e-03   4.7975700e-03   2.5264412e-03   4.9138585e-03   4.8875866e-03   6.2264786e-03   3.0375207e-03   1.9750388e-03   2.0217929e-03   8.0063599e-03   4.3571190e-03   1.6611913e-03   2.0167984e-03   1.5438876e-03   3.6494229e-04   2.0217253e-03   4.6462511e-04   1.5312613e-03   2.1707116e-03   9.6955753e-04   3.3920004e-03   1.0848877e-03   3.2679078e-03   1.3217642e-03   1.7681092e-03   4.3634818e-04   4.3762590e-03   9.8982281e-04   1.4702047e-03   1.2309511e-03   6.6753502e-04   6.7519636e-04   2.4877587e-04   3.8105160e-04   8.2183923e-04   3.0399429e-03   3.2267433e-04   8.9534104e-04   8.4272052e-04   3.5139014e-03   4.8234985e-03   4.1432495e-03   3.0463297e-04   1.2193814e-03   2.4923401e-03   1.7011388e-04   1.7584273e-03   1.0444437e-03   3.3026408e-04   1.0723664e-03   7.7198092e-04   2.2859213e-03   1.4276818e-03   5.7687029e-04   4.8466907e-03   8.6936394e-04   1.2850654e-02   6.1088154e-03   3.5569698e-03   5.5719646e-03   7.1488198e-03   5.6571014e-03   8.7634622e-03   5.0674178e-03   6.2538450e-03   5.7468537e-03   2.5694606e-03   3.4855366e-03   2.9211508e-03   7.2454781e-03   1.0560745e-02   5.7784909e-03   3.4230741e-03   4.7152707e-03   1.0513823e-02   4.7114505e-03   4.2697686e-03   7.1803873e-03   6.8688743e-03   1.5447042e-03   4.5401495e-03   2.7361382e-03   1.4855517e-03   2.5857007e-03   6.2270622e-03   2.0576267e-03   3.8519560e-03   2.0130817e-03   6.8461641e-03   1.8250891e-03   7.7109246e-03   3.0910476e-03   9.4645289e-03   4.1075164e-03   2.7837137e-03   2.0667492e-03   5.7442304e-03   2.7416306e-03   6.1088154e-03   5.9744542e-03   7.0784056e-03   3.5969994e-03   3.0144395e-03   2.5834027e-03   8.5675229e-03   5.0446428e-03   9.3410747e-04   2.6108336e-03   1.8631164e-03   1.0514258e-03   2.7757095e-03   1.6027319e-03   5.9009151e-03   9.3507643e-05   4.8585250e-03   3.2279691e-03   7.0814372e-04   1.8064210e-03   4.7021523e-03   1.7906187e-03   1.7438865e-03   3.2726931e-03   2.1092470e-03   4.4199215e-04   2.5742583e-03   2.7576288e-03   2.1865740e-03   8.5954487e-04   4.3875130e-04   6.2385517e-03   2.0706463e-03   2.9895922e-03   2.4900150e-03   1.5452115e-03   1.2972442e-03   2.2089282e-03   1.3825902e-03   4.4047937e-03   1.3378707e-03   8.5528525e-04   1.4287812e-04   2.3200710e-04   1.9018574e-03   3.1193134e-03   2.7835858e-04   1.0440284e-03   6.7774868e-04   1.6434489e-03   8.2787646e-03   9.1805293e-04   8.2830461e-03   3.6334874e-03   2.7936555e-03   2.6701226e-03   4.5417785e-03   4.4199379e-03   4.7638022e-03   3.6853341e-03   5.4544340e-03   3.4799734e-03   1.6014887e-03   2.8117239e-03   2.4093914e-03   5.5035080e-03   8.3987820e-03   4.0331795e-03   1.3946498e-03   1.6013776e-03   9.9231424e-03   4.6841468e-03   3.0824955e-03   4.3724199e-03   6.0877536e-03   1.8755546e-03   2.0838499e-03   1.1387331e-03   1.4636530e-03   1.0937142e-03   4.3877001e-03   1.4753015e-03   3.8552640e-03   2.5868499e-04   5.1223304e-03   7.0168484e-04   4.3920187e-03   3.9679568e-03   5.8082286e-03   1.3720301e-03   1.2437314e-03   1.9911684e-03   4.5361159e-03   3.9571028e-03   3.6334874e-03   3.7423473e-03   5.0666856e-03   3.9975266e-03   3.7265040e-03   1.8650355e-03   5.0595778e-03   1.9211780e-03   1.4802848e-03   2.3288314e-03   6.8065434e-05   4.0423471e-03   2.2193959e-04   7.6466779e-03   9.2054349e-04   2.1021154e-03   2.1770952e-03   5.4412625e-04   2.3412508e-03   6.7735470e-03   2.1146424e-03   1.1473823e-03   2.2087962e-03   4.4699141e-03   2.0616206e-03   2.1327263e-03   2.0380999e-03   3.2539726e-03   1.3598045e-03   3.9670027e-04   4.8634117e-03   2.4197901e-03   3.2204932e-03   1.7622064e-03   3.8856461e-03   1.2334789e-03   4.0452577e-04   1.0887743e-03   6.2435560e-03   2.0292084e-04   1.0829901e-03   1.7786841e-03   4.1228310e-04   1.8859956e-03   2.3286589e-03   5.1772577e-04   4.9172870e-04   2.0007065e-04   1.2918676e-03   5.0330599e-03   3.9898095e-04   9.4287570e-03   5.3866372e-03   4.8647393e-03   5.2819054e-03   6.4598280e-03   8.0073865e-03   6.1852254e-03   7.5566712e-03   9.5128884e-03   3.7267827e-03   1.2843714e-03   4.8237356e-03   3.5085400e-03   7.3800011e-03   8.6672418e-03   3.7373665e-03   3.3006522e-03   3.2553459e-03   1.4768796e-02   8.5372346e-03   3.7597131e-03   5.0208833e-03   1.0540608e-02   2.9216135e-03   2.9987594e-03   3.5026889e-03   1.9346800e-03   1.3415748e-03   6.5619943e-03   4.2356898e-03   7.2096387e-03   1.4370251e-03   7.0771420e-03   2.7742326e-03   8.9575949e-03   5.8574207e-03   5.7119619e-03   3.0499081e-03   1.2067939e-03   2.4757916e-03   5.0772246e-03   3.1235792e-03   5.3866372e-03   5.0347068e-03   5.1609892e-03   3.8212989e-03   5.4644467e-03   2.4256658e-03   4.6018293e-03   2.9278326e-03   8.9754714e-04   1.9308173e-03   2.4780225e-03   8.6351250e-04   4.2337045e-03   2.1236680e-03   7.2381764e-04   2.1294516e-04   3.5897400e-03   7.7335429e-04   6.4715989e-03   7.0701488e-04   5.1814848e-03   2.9010443e-04   5.5409554e-03   2.4566735e-03   3.3429771e-04   3.2970103e-04   2.0470962e-03   2.3835444e-03   1.6383998e-03   1.0044594e-03   8.9092263e-04   8.3257442e-04   1.5048347e-04   7.3947599e-03   5.1765389e-03   2.4819362e-03   5.2929628e-04   4.5409213e-03   9.8467896e-04   1.3414871e-03   3.3640577e-03   1.3544701e-03   5.6680732e-04   2.1931945e-04   1.3672243e-03   8.5440953e-04   8.3314842e-04   2.6161270e-04   1.7366856e-03   5.2799539e-04   1.7649847e-02   1.0764587e-02   8.4383096e-03   9.9924928e-03   1.2273825e-02   1.1788899e-02   1.2850999e-02   1.0630699e-02   1.2964118e-02   9.0438994e-03   4.5288546e-03   8.3580766e-03   6.9988358e-03   1.3044370e-02   1.5771615e-02   8.9276151e-03   7.0025139e-03   7.7681345e-03   1.9248623e-02   1.0890288e-02   8.2148016e-03   1.1050411e-02   1.3845989e-02   5.2067991e-03   7.6819868e-03   6.2976924e-03   4.3733262e-03   4.7066273e-03   1.1733076e-02   5.6953630e-03   9.5825887e-03   4.0184455e-03   1.2555508e-02   4.8201445e-03   1.2938333e-02   8.5012649e-03   1.2613331e-02   7.2187981e-03   4.6491251e-03   5.3739269e-03   1.0213138e-02   5.7106829e-03   1.0764587e-02   1.0424589e-02   1.1013685e-02   7.3861586e-03   8.2413252e-03   5.8213434e-03   1.1031181e-02   7.7860554e-03   2.6482878e-03   4.2794287e-04   1.8319104e-03   1.5871773e-03   1.2340260e-03   2.9761573e-03   6.4181441e-04   3.9337040e-03   3.6570436e-04   2.9155752e-03   1.3382816e-05   5.6692471e-03   6.4631376e-04   2.4262663e-03   1.0063227e-03   2.3414789e-04   3.8352406e-04   2.6670153e-04   1.1841642e-03   1.3362953e-03   1.8214422e-03   1.1939941e-05   1.6841308e-04   4.0205285e-04   4.8423454e-03   5.6256276e-03   4.4224155e-03   3.0861840e-04   1.5326607e-03   2.2893524e-03   5.4845532e-04   1.9363566e-03   1.1483728e-03   6.7060376e-05   4.8890125e-04   9.7136579e-04   1.7676633e-03   1.3375465e-03   2.5372175e-04   4.0846507e-03   8.2917869e-04   1.6077651e-02   8.4460632e-03   5.5177458e-03   7.2638972e-03   9.7215743e-03   7.5597148e-03   1.1218034e-02   6.4234899e-03   8.1582316e-03   8.1119193e-03   4.1520915e-03   5.4624397e-03   4.9276011e-03   1.0142169e-02   1.4194600e-02   8.3023965e-03   4.8434975e-03   6.1466052e-03   1.3239048e-02   6.2447391e-03   6.5902144e-03   9.7724034e-03   8.6919298e-03   3.1454944e-03   6.3952659e-03   3.7622934e-03   3.0039560e-03   4.0045050e-03   8.7466217e-03   2.6847018e-03   5.4026497e-03   2.7900002e-03   9.6269482e-03   2.5687827e-03   8.7444600e-03   5.2241473e-03   1.2215015e-02   5.4919656e-03   4.2251052e-03   3.8716574e-03   8.5147448e-03   4.9424074e-03   8.4460632e-03   8.4294916e-03   9.9304629e-03   6.0757927e-03   5.1533590e-03   4.4115457e-03   1.1061881e-02   6.6918878e-03   4.2077380e-03   2.8084601e-04   8.0255307e-03   1.0009734e-03   2.4531898e-03   2.5259126e-03   4.7784400e-04   2.9272193e-03   6.4204523e-03   2.4613181e-03   8.1446100e-04   2.5131313e-03   4.2288269e-03   2.3479589e-03   2.4787654e-03   2.3219118e-03   3.4016339e-03   1.1923522e-03   3.6609993e-04   5.5309499e-03   2.7130308e-03   3.6876750e-03   2.1157012e-03   3.5197143e-03   1.1211667e-03   5.0040735e-04   1.2581494e-03   6.2453424e-03   4.9316252e-04   1.0956505e-03   1.9034882e-03   5.6262931e-04   2.1623642e-03   2.7381233e-03   6.2275542e-04   9.1647472e-04   4.3469066e-04   1.6016964e-03   5.5073116e-03   6.0170029e-04   8.2861929e-03   4.5965840e-03   4.2124912e-03   4.7715416e-03   5.5699315e-03   7.3813594e-03   5.3659110e-03   7.1660104e-03   8.8786575e-03   2.9267242e-03   8.0445420e-04   4.1635703e-03   2.8364579e-03   6.3397911e-03   7.3211567e-03   2.8639086e-03   2.8974680e-03   2.8626073e-03   1.3776826e-02   8.0525608e-03   2.9632152e-03   4.1293440e-03   9.9396499e-03   2.4124505e-03   2.4179141e-03   3.2613675e-03   1.4673422e-03   9.3714862e-04   5.6839580e-03   4.1506352e-03   6.7187933e-03   1.3083992e-03   6.0861850e-03   2.6486423e-03   8.7285257e-03   5.1099460e-03   4.7657924e-03   2.6722144e-03   7.9417630e-04   1.8862622e-03   4.0883053e-03   2.3510235e-03   4.5965840e-03   4.1923613e-03   4.1490574e-03   2.9383782e-03   4.7171745e-03   1.8344857e-03   3.7646957e-03   2.4446440e-03   3.4307781e-03   6.9180223e-04   1.9272484e-03   5.2229584e-03   1.7410764e-03   5.5262484e-03   1.3512624e-03   1.2930220e-03   5.9265821e-04   7.2336834e-03   1.6622228e-03   1.4643809e-03   1.4579196e-03   1.0665934e-03   1.2486050e-03   4.3399433e-05   1.3778574e-03   2.2390179e-03   3.1206766e-03   3.9982611e-04   7.0342073e-04   1.4793565e-03   4.5495005e-03   7.4063181e-03   6.9037801e-03   1.1215419e-03   3.4551859e-04   4.3750592e-03   1.0242073e-03   2.4559606e-03   2.2544417e-03   6.9180223e-04   1.5393939e-03   1.8990091e-03   3.7039856e-03   2.8782332e-03   1.2534261e-03   6.1345149e-03   2.1154370e-03   1.6389445e-02   8.1714731e-03   4.6971245e-03   6.9480013e-03   9.2874957e-03   6.0735661e-03   1.1557043e-02   5.0795005e-03   6.2695782e-03   8.5942939e-03   4.8981354e-03   4.6386510e-03   4.5456138e-03   9.3051239e-03   1.4039626e-02   8.8459552e-03   4.7683489e-03   6.5513763e-03   1.0462868e-02   4.3969158e-03   6.5113261e-03   1.0098096e-02   6.6155737e-03   2.7131547e-03   6.8044529e-03   3.4785563e-03   3.0414918e-03   4.6799978e-03   7.9508909e-03   2.0423594e-03   3.8146379e-03   3.3303959e-03   8.7946774e-03   2.4271150e-03   7.7798359e-03   3.9016462e-03   1.3149131e-02   5.7850137e-03   5.0633237e-03   3.7856360e-03   8.3083336e-03   4.9921128e-03   8.1714731e-03   8.2786520e-03   1.0189399e-02   5.8912773e-03   3.9649361e-03   4.4771942e-03   1.2268673e-02   7.3587328e-03   6.5787312e-03   1.3233357e-03   1.1455828e-03   1.2416155e-03   1.4002549e-03   2.1211045e-03   6.2047866e-03   1.6380421e-03   2.0343353e-03   1.2366343e-03   4.6737502e-03   2.4960613e-03   1.3635919e-03   1.1869805e-03   2.7420392e-03   1.3311387e-03   5.4030547e-04   3.4496369e-03   1.8301986e-03   2.4543913e-03   1.0360218e-03   4.9823495e-03   2.4358984e-03   7.7653049e-04   6.6085729e-04   5.4666528e-03   3.6251211e-04   9.0034369e-04   2.5308489e-03   6.8325669e-04   1.3068439e-03   1.4466712e-03   7.0328570e-04   7.2909544e-04   3.3427965e-04   8.2319726e-04   3.3236658e-03   2.7101948e-04   1.1415097e-02   6.6763180e-03   5.5975222e-03   6.7637660e-03   7.8425097e-03   9.1076654e-03   7.9396677e-03   8.6849227e-03   1.0537825e-02   4.7340864e-03   1.6981356e-03   5.5204544e-03   4.0238168e-03   8.5090240e-03   9.7837063e-03   4.5130967e-03   4.3644743e-03   4.6397935e-03   1.5929686e-02   9.2384755e-03   4.4702915e-03   6.2885333e-03   1.1607431e-02   3.1082253e-03   4.1080254e-03   4.4476701e-03   2.1448891e-03   1.9903866e-03   7.7241343e-03   4.8610346e-03   7.8308568e-03   2.2349743e-03   8.1986845e-03   3.4906235e-03   1.0785042e-02   6.0455598e-03   7.1678534e-03   4.3085958e-03   1.8327850e-03   2.7372439e-03   5.8248161e-03   2.8157351e-03   6.6763180e-03   6.1912806e-03   6.1165743e-03   3.9060199e-03   5.6854050e-03   2.9112999e-03   5.9286549e-03   4.2459890e-03   4.7261943e-03   7.5564450e-03   3.0589295e-03   9.9138151e-03   2.5436162e-03   2.3081715e-03   1.8344588e-03   1.2327595e-02   2.9913447e-03   3.4407458e-03   3.5166367e-03   2.2912956e-03   2.5985157e-03   1.0536407e-03   4.0148201e-03   5.3033668e-03   3.3636231e-03   1.5135209e-03   1.3379167e-03   2.9432336e-03   8.2210569e-03   1.2378534e-02   1.1256214e-02   3.0883819e-03   6.3877121e-04   7.6741777e-03   3.3091271e-03   5.2745018e-03   5.1312091e-03   2.0746888e-03   2.6973394e-03   4.6616800e-03   6.5765736e-03   5.8096774e-03   2.9820979e-03   7.3187243e-03   4.6783172e-03   2.3410581e-02   1.3144529e-02   8.3391705e-03   1.1305315e-02   1.4457465e-02   9.3485129e-03   1.7555848e-02   7.8838497e-03   9.1347689e-03   1.4043414e-02   9.2390419e-03   8.2683227e-03   8.4558564e-03   1.4291672e-02   2.0441558e-02   1.4348706e-02   8.7091477e-03   1.1171095e-02   1.3577182e-02   6.6301175e-03   1.1191012e-02   1.5865943e-02   9.2559664e-03   5.8683930e-03   1.1694065e-02   6.7001372e-03   6.5334334e-03   8.9560518e-03   1.2574664e-02   4.2523471e-03   6.2343724e-03   6.8791672e-03   1.3649249e-02   5.2782119e-03   1.1089361e-02   6.8777652e-03   1.9771053e-02   1.0157447e-02   9.4893989e-03   7.5319640e-03   1.3411350e-02   8.8625592e-03   1.3144529e-02   1.3390278e-02   1.5951074e-02   1.0173619e-02   7.0905829e-03   8.5554531e-03   1.8741319e-02   1.2400067e-02   4.2985441e-03   2.4763265e-03   1.0193789e-03   1.4380091e-03   3.6495663e-03   1.2003820e-03   2.0584975e-03   2.4845352e-03   1.6034070e-03   3.3059261e-04   1.8671770e-03   2.0088877e-03   1.4284810e-03   4.3098388e-04   2.3514979e-04   5.2698749e-03   1.3824059e-03   2.2360501e-03   1.8501267e-03   1.6468815e-03   1.8319470e-03   2.4154150e-03   8.5423988e-04   3.3259702e-03   1.3633177e-03   3.9750527e-04   2.1357310e-04   1.4089375e-04   1.2605369e-03   2.3906980e-03   1.0696291e-04   1.0906412e-03   5.9076498e-04   1.1304618e-03   7.2732511e-03   6.4012947e-04   8.9598694e-03   3.8314732e-03   2.5915433e-03   2.9763427e-03   4.7600376e-03   4.3053173e-03   5.3266981e-03   3.6190516e-03   5.2272723e-03   3.6549051e-03   1.5047575e-03   2.5888356e-03   2.1634980e-03   5.5055672e-03   8.5050147e-03   4.0885074e-03   1.5151975e-03   1.9844585e-03   9.6042351e-03   4.2899092e-03   2.9941597e-03   4.6609860e-03   5.8520656e-03   1.4443089e-03   2.3092039e-03   1.1621771e-03   1.1296414e-03   1.1169479e-03   4.4204603e-03   1.2641450e-03   3.4620250e-03   3.4971048e-04   5.1226200e-03   6.2752047e-04   4.7853928e-03   3.3731886e-03   6.3070664e-03   1.6662334e-03   1.2828407e-03   1.6597311e-03   4.4397739e-03   3.3292201e-03   3.8314732e-03   3.8867782e-03   5.1669823e-03   3.5417199e-03   3.1734550e-03   1.6842820e-03   5.5385508e-03   2.3108610e-03   1.0105990e-03   4.7523926e-03   2.9847642e-03   9.7105522e-03   2.6675945e-03   5.8410645e-03   1.0628436e-03   8.9773521e-03   5.4199214e-03   1.6519449e-03   1.3847497e-03   4.5932482e-03   4.2245140e-03   3.0278103e-03   1.7095869e-03   2.8675709e-03   2.8000554e-03   1.2095222e-03   1.0633409e-02   6.3588443e-03   2.3146826e-03   1.8579783e-03   7.7501487e-03   1.5279559e-03   3.0361588e-03   6.2292475e-03   2.9996444e-03   2.2092276e-03   1.2670329e-03   3.0585508e-03   1.9631739e-03   1.9448597e-03   1.6026176e-03   7.3057463e-04   1.6268750e-03   1.9360609e-02   1.3271536e-02   1.1367917e-02   1.3356276e-02   1.4871782e-02   1.6028245e-02   1.4980963e-02   1.5218503e-02   1.7630002e-02   1.0242398e-02   5.4451367e-03   1.1236806e-02   9.0841292e-03   1.5577818e-02   1.6739427e-02   9.6435556e-03   9.8426714e-03   1.0341095e-02   2.4442578e-02   1.5482723e-02   9.8424496e-03   1.2530745e-02   1.8870958e-02   7.3605563e-03   9.5556413e-03   9.6316193e-03   6.0281749e-03   6.1257342e-03   1.4596779e-02   9.4938425e-03   1.3739836e-02   6.3629397e-03   1.5181016e-02   7.9755348e-03   1.8111593e-02   1.1305790e-02   1.3376686e-02   9.8378125e-03   5.8135644e-03   6.9819428e-03   1.1631750e-02   6.1445613e-03   1.3271536e-02   1.2517306e-02   1.1992997e-02   8.3045136e-03   1.0906252e-02   7.4644491e-03   1.1536762e-02   9.7524154e-03   4.5852486e-03   9.8595493e-04   5.1231602e-03   5.3641159e-04   6.1291344e-03   9.0289414e-06   4.9719488e-03   2.7623691e-03   1.0135570e-04   4.9630072e-05   1.4573369e-03   2.2050307e-03   1.9067713e-03   6.5935996e-04   5.5655703e-04   4.9349156e-04   5.3910141e-05   7.6334144e-03   6.4541501e-03   3.6168243e-03   4.5600228e-04   3.3337204e-03   1.8551457e-03   1.2329546e-03   3.8576017e-03   1.8263928e-03   3.4082293e-04   2.8939589e-05   1.6992773e-03   1.7320592e-03   1.4340517e-03   2.7567571e-04   1.5103910e-03   8.5202938e-04   1.8535044e-02   1.1041503e-02   8.1659733e-03   1.0422568e-02   1.2515324e-02   1.1445689e-02   1.3674148e-02   1.0415137e-02   1.2394915e-02   9.4870366e-03   4.7984401e-03   8.0611256e-03   6.7876377e-03   1.2924053e-02   1.5886474e-02   9.2445598e-03   7.3109344e-03   8.4847047e-03   1.8293716e-02   1.0175519e-02   8.2306422e-03   1.1561315e-02   1.3225081e-02   4.8091709e-03   8.1976852e-03   6.4746796e-03   4.2076714e-03   5.1007444e-03   1.1683626e-02   5.5527590e-03   8.9342726e-03   4.4973686e-03   1.2442734e-02   4.9468838e-03   1.3343592e-02   7.6599210e-03   1.3497447e-02   7.8132331e-03   5.1004820e-03   5.1592495e-03   1.0155636e-02   5.1788357e-03   1.1041503e-02   1.0666367e-02   1.1283660e-02   6.9627068e-03   7.4793969e-03   5.8082013e-03   1.1981463e-02   8.5614846e-03   3.8688652e-03   7.5179133e-03   3.7407669e-03   3.1676786e-04   4.6173801e-03   4.2799166e-03   2.2501511e-03   4.2413983e-03   4.2204030e-03   4.6109813e-03   1.7937707e-03   8.2328963e-04   8.3021324e-03   4.1326900e-03   5.3767829e-03   3.8579826e-03   2.3734756e-03   1.6523409e-04   8.8835369e-04   2.5288831e-03   7.6911436e-03   1.1035999e-03   1.9572002e-03   1.3626197e-03   8.4501724e-04   3.6117590e-03   4.7013989e-03   1.0056440e-03   1.3674472e-03   9.9055737e-04   2.9505217e-03   8.8795046e-03   1.5067108e-03   6.0099517e-03   3.2148697e-03   3.5475051e-03   3.0620571e-03   4.0315627e-03   6.0415844e-03   3.4044063e-03   5.7826017e-03   7.5784861e-03   2.1065729e-03   8.8233953e-04   3.5528527e-03   2.5792431e-03   5.1245213e-03   6.2842876e-03   2.4095844e-03   1.8446522e-03   1.4167723e-03   1.2151930e-02   7.2262167e-03   2.4839286e-03   2.9019075e-03   8.5383029e-03   2.6181282e-03   1.4064800e-03   2.3288946e-03   1.7187301e-03   6.4203056e-04   4.4078979e-03   3.6116427e-03   6.0606537e-03   7.3312980e-04   4.8772717e-03   2.0701887e-03   6.3495233e-03   5.1782781e-03   3.3738477e-03   1.3922890e-03   5.6378115e-04   2.0528392e-03   3.5526857e-03   3.4142665e-03   3.2148697e-03   3.0384066e-03   3.3899600e-03   3.3852248e-03   4.7610217e-03   1.6939002e-03   2.6027995e-03   1.1563987e-03   4.9937739e-03   2.8043563e-04   6.0192505e-03   1.1047123e-03   3.7340869e-03   9.3891763e-04   5.7693223e-04   8.5742923e-04   1.1063653e-03   2.1518410e-03   1.7873418e-03   1.8443813e-03   4.7874237e-04   4.0562362e-04   6.3806313e-04   5.6896950e-03   5.3990383e-03   4.0838955e-03   7.6270784e-04   3.0255235e-03   1.8702287e-03   1.2256654e-03   1.9416278e-03   1.1863062e-03   4.8488269e-04   7.2384993e-04   1.1782779e-03   1.1397261e-03   1.1824844e-03   4.5012569e-04   4.3416102e-03   9.3532010e-04   1.7514227e-02   9.8600607e-03   7.1630436e-03   8.1480968e-03   1.1292909e-02   9.1710206e-03   1.2255610e-02   7.6051240e-03   9.9403988e-03   9.4620221e-03   5.2227781e-03   7.1398046e-03   6.5861173e-03   1.2201906e-02   1.6541006e-02   9.8696183e-03   5.7022093e-03   6.6806000e-03   1.5779735e-02   7.9447125e-03   8.2548534e-03   1.1179561e-02   1.0455124e-02   4.7301378e-03   7.3678164e-03   4.4876356e-03   4.3705173e-03   4.8557182e-03   1.0520487e-02   3.4331839e-03   7.0769130e-03   3.1675776e-03   1.1611243e-02   3.1862426e-03   9.1961094e-03   7.4495828e-03   1.3429738e-02   6.1002834e-03   5.0525215e-03   5.4421147e-03   1.0502545e-02   7.0546448e-03   9.8600607e-03   9.9581080e-03   1.1708746e-02   8.2306703e-03   7.3218404e-03   5.8241089e-03   1.2089443e-02   7.3253099e-03   3.2913459e-03   8.4499731e-03   4.8569537e-03   8.0216351e-04   3.5049401e-03   4.1209949e-03   4.1951632e-03   1.4430192e-03   2.1132422e-03   3.9581533e-03   7.7606295e-03   2.8139659e-03   3.8070595e-03   4.7702994e-03   3.8227367e-03   9.3108167e-03   1.0381275e-02   3.5346820e-03   5.2926972e-04   8.0977990e-03   2.5864687e-03   3.9952884e-03   4.6152713e-03   3.2808967e-03   5.0209478e-03   3.9992007e-03   7.6245140e-03   5.9395645e-03   4.2477403e-03   1.1072543e-02   4.9514063e-03   1.3733574e-02   6.2131974e-03   2.8230097e-03   5.6455623e-03   6.8638978e-03   3.4892544e-03   9.9148605e-03   3.2690483e-03   3.3086076e-03   7.4423917e-03   5.1197423e-03   2.7541244e-03   3.1219477e-03   6.1841097e-03   1.0726676e-02   7.6421585e-03   4.1594455e-03   6.2994749e-03   5.6115691e-03   2.0176931e-03   4.9542156e-03   8.4395555e-03   3.5502322e-03   1.8230265e-03   6.1478670e-03   3.1652703e-03   2.7049802e-03   4.9489115e-03   5.3042297e-03   1.9463142e-03   1.6986432e-03   4.1722516e-03   5.8173968e-03   2.6602408e-03   6.4952123e-03   1.5042394e-03   1.1842312e-02   5.5188608e-03   5.4735640e-03   2.9735027e-03   6.0829501e-03   4.0306925e-03   6.2131974e-03   6.3472325e-03   8.2596987e-03   4.2451973e-03   1.6828229e-03   3.7151898e-03   1.1574575e-02   7.1331719e-03   5.4811265e-03   5.5565221e-04   2.6852359e-03   1.0154324e-03   1.7597013e-04   3.1956114e-04   3.9523878e-04   1.2388706e-03   1.2655903e-03   1.6805675e-03   3.2406061e-05   1.6284558e-04   3.0055489e-04   4.9827743e-03   5.3992645e-03   4.0722394e-03   2.5388663e-04   1.8315484e-03   2.0093416e-03   5.5333384e-04   1.9333559e-03   1.0417210e-03   4.5631651e-05   3.9213732e-04   8.9628584e-04   1.5141190e-03   1.1540120e-03   1.6596111e-04   3.8108192e-03   6.9068728e-04   1.6110144e-02   8.5829426e-03   5.7514563e-03   7.4081873e-03   9.8865471e-03   7.9109476e-03   1.1246146e-02   6.7496818e-03   8.5805974e-03   8.1149529e-03   4.1088457e-03   5.6967153e-03   5.0837880e-03   1.0378372e-02   1.4309813e-02   8.2945922e-03   4.9454249e-03   6.1635386e-03   1.3817928e-02   6.6600542e-03   6.6926476e-03   9.8029996e-03   9.1474450e-03   3.3107453e-03   6.4113962e-03   3.9012077e-03   3.0861181e-03   3.9737498e-03   8.9756426e-03   2.8871345e-03   5.7720900e-03   2.7832463e-03   9.8623531e-03   2.6826772e-03   9.0033513e-03   5.5466456e-03   1.2137789e-02   5.5287165e-03   4.1655510e-03   3.9755943e-03   8.6395601e-03   5.0222445e-03   8.5829426e-03   8.5444862e-03   9.9728516e-03   6.1969528e-03   5.4521508e-03   4.4886104e-03   1.0936532e-02   6.6624227e-03   6.0879321e-03   5.2071133e-03   3.8816096e-03   5.8831678e-03   5.7002681e-03   6.1926865e-03   2.4416494e-03   1.5149583e-03   1.0539677e-02   5.8224280e-03   7.4292172e-03   5.4190952e-03   2.7168224e-03   2.8895677e-04   1.2473682e-03   3.7477469e-03   9.3088565e-03   2.0944009e-03   2.9293124e-03   2.5720420e-03   1.8897161e-03   5.1660356e-03   6.4102501e-03   2.0138936e-03   2.7413840e-03   2.0664916e-03   4.4338085e-03   1.0353410e-02   2.6118482e-03   4.2150650e-03   2.5222817e-03   3.4164904e-03   3.0582580e-03   3.1593359e-03   6.2318287e-03   2.3862805e-03   6.4839490e-03   7.9101598e-03   1.0920878e-03   5.4699759e-04   3.4046237e-03   2.2291279e-03   4.0543858e-03   4.2004727e-03   1.1970351e-03   2.0117915e-03   1.4112028e-03   1.1968526e-02   7.9088029e-03   1.7082776e-03   1.7239090e-03   9.0238817e-03   2.6874577e-03   9.4975956e-04   2.9722514e-03   1.7009640e-03   5.1343341e-04   3.7130353e-03   4.7806054e-03   6.6267337e-03   1.3220831e-03   3.9245747e-03   2.9403206e-03   7.2757164e-03   5.0266602e-03   1.8157078e-03   1.4701768e-03   3.5054511e-04   1.7883207e-03   2.3616946e-03   2.6617941e-03   2.5222817e-03   2.1723972e-03   1.9530095e-03   2.4533627e-03   4.5559375e-03   1.3609168e-03   1.2016297e-03   8.2861882e-04   4.8139327e-03   2.8209366e-03   1.1052349e-04   3.7194090e-05   1.3880093e-03   2.0960687e-03   1.8746430e-03   7.3307377e-04   5.4600497e-04   5.2531656e-04   7.7399437e-05   7.5260309e-03   6.5039124e-03   3.6958100e-03   4.3794778e-04   3.1691974e-03   1.9589162e-03   1.1747018e-03   3.8950000e-03   1.8610625e-03   3.3531102e-04   5.6607375e-05   1.7087501e-03   1.8728452e-03   1.4997100e-03   3.0123700e-04   1.5391826e-03   8.8147902e-04   1.8294246e-02   1.0829571e-02   7.9341800e-03   1.0299926e-02   1.2275737e-02   1.1220530e-02   1.3508784e-02   1.0262914e-02   1.2147087e-02   9.2823036e-03   4.6531594e-03   7.8245831e-03   6.5524640e-03   1.2610156e-02   1.5512672e-02   9.0061844e-03   7.1992659e-03   8.4158356e-03   1.7921554e-02   9.9492079e-03   7.9854978e-03   1.1342596e-02   1.2982212e-02   4.5954373e-03   8.0580657e-03   6.3922041e-03   4.0251341e-03   4.9895677e-03   1.1418706e-02   5.4787432e-03   8.7107499e-03   4.4679304e-03   1.2142235e-02   4.8867636e-03   1.3297934e-02   7.3458933e-03   1.3299484e-02   7.7358341e-03   4.9917221e-03   4.9413674e-03   9.8545070e-03   4.8770063e-03   1.0829571e-02   1.0435652e-02   1.1007902e-02   6.6474718e-03   7.1727397e-03   5.6106947e-03   1.1815640e-02   8.4729744e-03   1.5166983e-03   3.8470717e-03   4.0414722e-03   1.3464004e-03   1.0521554e-03   2.2256122e-03   8.2355212e-03   2.4972654e-03   3.6758320e-03   4.2952040e-03   1.2466978e-03   5.4237685e-03   7.4233145e-03   2.7434894e-03   1.3920410e-03   5.6705580e-03   1.6006987e-03   1.4795623e-03   2.5504964e-03   2.8217649e-03   4.8039535e-03   2.1716992e-03   5.1387834e-03   3.8511647e-03   3.4066344e-03   1.1638730e-02   3.4223746e-03   9.4998939e-03   3.3659956e-03   1.1438717e-03   2.4828929e-03   3.9653407e-03   1.5796312e-03   5.9993003e-03   1.2126234e-03   1.7377370e-03   4.6402796e-03   3.0884931e-03   1.1351744e-03   1.5562166e-03   3.9244608e-03   8.1556610e-03   5.1855221e-03   1.5226783e-03   2.8914073e-03   4.3431210e-03   9.2550137e-04   2.9272754e-03   5.3162687e-03   2.0104865e-03   7.9177243e-04   3.1975522e-03   8.1317449e-04   1.3785091e-03   2.5714969e-03   2.9511423e-03   2.8161529e-04   5.9442405e-04   1.5991249e-03   3.5615321e-03   5.8433150e-04   3.1011353e-03   9.7656296e-04   8.0983113e-03   2.3700003e-03   3.0461348e-03   1.6450583e-03   4.0872867e-03   3.5603497e-03   3.3659956e-03   3.6518311e-03   5.6844228e-03   3.2852236e-03   1.0303385e-03   1.9470044e-03   7.8778855e-03   3.7000111e-03   1.9300708e-03   2.2574717e-03   1.1284358e-03   1.0646979e-03   1.0312076e-03   5.0621817e-03   1.2060164e-03   1.8000639e-03   2.0650564e-03   2.1487796e-03   3.2333878e-03   4.0195479e-03   1.2411974e-03   2.7183002e-03   2.2726383e-03   8.5094120e-04   2.4056963e-04   6.3782113e-04   1.2937611e-03   2.4889893e-03   5.9722053e-04   1.5621785e-03   1.2375553e-03   1.3358412e-03   8.1240216e-03   1.2311022e-03   1.1517636e-02   5.2909227e-03   3.4509195e-03   3.7153220e-03   6.3154320e-03   4.5407961e-03   7.2027738e-03   3.4054800e-03   5.2043334e-03   5.7490488e-03   3.1731637e-03   3.4719309e-03   3.4347027e-03   7.1701181e-03   1.1379612e-02   6.4467389e-03   2.2692649e-03   2.9960956e-03   9.7363741e-03   4.0178908e-03   4.7338729e-03   6.7982603e-03   5.6061060e-03   2.3699712e-03   3.8192855e-03   1.4133848e-03   2.2956056e-03   2.5088406e-03   5.7314562e-03   9.5862581e-04   3.4107092e-03   9.7091810e-04   6.6778472e-03   7.6184177e-04   4.3247639e-03   4.2032521e-03   8.9746527e-03   2.5881501e-03   2.8171688e-03   3.0086239e-03   6.5256052e-03   5.3225794e-03   5.2909227e-03   5.5860698e-03   7.6013097e-03   5.5376614e-03   4.0938335e-03   3.1000848e-03   8.1789436e-03   3.7376243e-03   3.2267663e-05   8.3707549e-04   1.6905622e-03   1.5718936e-03   9.3681209e-04   1.8762095e-04   1.9684682e-04   4.1277465e-05   6.4440482e-03   6.0518000e-03   3.8422414e-03   2.6274935e-04   2.4665214e-03   1.9088192e-03   8.3864343e-04   2.9786928e-03   1.4388393e-03   8.3975474e-05   5.3416679e-05   1.2919166e-03   1.6159093e-03   1.2740163e-03   1.2266042e-04   2.3692761e-03   7.1636545e-04   1.7563748e-02   9.9676420e-03   7.0439691e-03   9.1049407e-03   1.1370346e-02   9.8276989e-03   1.2673450e-02   8.7383729e-03   1.0632094e-02   8.9036592e-03   4.4721835e-03   6.9588127e-03   5.9822651e-03   1.1786793e-02   1.5215597e-02   8.8361414e-03   6.2553172e-03   7.4909328e-03   1.6226352e-02   8.5173085e-03   7.5296182e-03   1.0823312e-02   1.1340207e-02   4.0626163e-03   7.4274805e-03   5.3041231e-03   3.6467217e-03   4.5925766e-03   1.0472609e-02   4.2980846e-03   7.4369871e-03   3.7248522e-03   1.1287932e-02   3.8978058e-03   1.1428778e-02   6.6149115e-03   1.2975959e-02   6.8214468e-03   4.6824073e-03   4.5741646e-03   9.4691004e-03   5.0187206e-03   9.9676420e-03   9.7385546e-03   1.0722589e-02   6.5565379e-03   6.4801586e-03   5.1854796e-03   1.1596572e-02   7.7633670e-03   9.9158601e-04   1.6686143e-03   1.5457871e-03   9.5071878e-04   3.0701864e-04   3.6362811e-04   4.2548991e-05   6.6288818e-03   6.0417331e-03   3.6564704e-03   2.5834714e-04   2.6458983e-03   1.8764428e-03   8.4820574e-04   3.2578450e-03   1.5209238e-03   1.5090465e-04   6.1935165e-05   1.3587649e-03   1.7158299e-03   1.3006818e-03   1.6628415e-04   2.0535582e-03   7.1188329e-04   1.7324639e-02   9.9147481e-03   7.0600417e-03   9.2840701e-03   1.1302120e-02   1.0064078e-02   1.2589616e-02   9.1080261e-03   1.0920912e-02   8.6425556e-03   4.2412792e-03   6.9624194e-03   5.8567924e-03   1.1639501e-02   1.4739829e-02   8.4651679e-03   6.3684651e-03   7.5923014e-03   1.6475338e-02   8.8252851e-03   7.3097093e-03   1.0588424e-02   1.1694482e-02   3.9807447e-03   7.3452100e-03   5.5353092e-03   3.5104587e-03   4.4753907e-03   1.0430724e-02   4.6273816e-03   7.6755144e-03   3.8450588e-03   1.1170257e-02   4.1281664e-03   1.1986478e-02   6.5423939e-03   1.2634016e-02   6.9257811e-03   4.5247923e-03   4.3922796e-03   9.1553155e-03   4.5676871e-03   9.9147481e-03   9.5995789e-03   1.0353234e-02   6.1627903e-03   6.3902007e-03   5.0235520e-03   1.1247115e-02   7.7428181e-03   9.7824889e-04   1.6624661e-03   3.0059873e-03   2.5249972e-04   6.2343657e-04   1.1719366e-03   4.0275459e-03   6.3575546e-03   5.8712921e-03   7.5872446e-04   5.6741436e-04   3.5908637e-03   6.4996051e-04   1.9776689e-03   1.6840088e-03   4.6219818e-04   1.2917587e-03   1.3743184e-03   3.0187893e-03   2.2373182e-03   9.0828703e-04   5.7426890e-03   1.5775049e-03   1.5168309e-02   7.4080646e-03   4.2538993e-03   6.3033105e-03   8.5100385e-03   5.7899432e-03   1.0520014e-02   4.8624768e-03   6.1169570e-03   7.6384090e-03   4.1091153e-03   4.1981402e-03   3.9923041e-03   8.6172736e-03   1.2997087e-02   7.8717119e-03   4.1667776e-03   5.7584992e-03   1.0387455e-02   4.3546484e-03   5.7746712e-03   9.1054055e-03   6.5301937e-03   2.3054088e-03   5.9614728e-03   3.0379183e-03   2.5067124e-03   3.9126904e-03   7.3239330e-03   1.8370981e-03   3.7021481e-03   2.7152943e-03   8.1304066e-03   2.0347479e-03   7.4172182e-03   3.6502880e-03   1.1915038e-02   5.0530642e-03   4.2458688e-03   3.2213237e-03   7.5116191e-03   4.3860606e-03   7.4080646e-03   7.4726409e-03   9.2071058e-03   5.2421868e-03   3.6675370e-03   3.8308993e-03   1.1024145e-02   6.4520833e-03   3.0133930e-04   5.1037905e-03   1.2017859e-03   2.1951761e-03   1.7686888e-03   1.8224411e-03   2.8788865e-03   3.1740291e-03   7.0420689e-04   2.2350252e-03   2.1605967e-03   1.6074282e-04   9.9312132e-04   6.3910600e-04   1.1068342e-03   2.2522084e-03   4.2563220e-04   2.1383582e-03   1.1781524e-03   1.1965920e-03   6.6826197e-03   8.9701315e-04   8.8374021e-03   3.5249181e-03   1.9006375e-03   3.2674819e-03   4.3522253e-03   3.8602822e-03   5.5032387e-03   3.5903574e-03   4.6280025e-03   3.2207897e-03   1.0860964e-03   1.8528751e-03   1.3287556e-03   4.5637650e-03   7.2040689e-03   3.3452381e-03   1.6438459e-03   2.5042800e-03   8.4078534e-03   3.6412189e-03   2.1968551e-03   4.2884610e-03   5.3072049e-03   5.5962180e-04   2.2924485e-03   1.3972917e-03   4.1775124e-04   1.0120628e-03   3.7672964e-03   1.3901921e-03   2.7802443e-03   7.8074497e-04   4.2513803e-03   8.6439557e-04   5.7017672e-03   1.9558095e-03   6.1305769e-03   2.0764524e-03   1.1750038e-03   7.8121442e-04   3.3536027e-03   1.6840705e-03   3.5249181e-03   3.4031938e-03   4.3169071e-03   2.0323666e-03   1.8104373e-03   1.0313169e-03   5.4663596e-03   2.6797009e-03   4.7017751e-03   1.4050521e-03   2.2967640e-03   1.4490557e-03   2.3196093e-03   1.7015479e-03   1.5767700e-03   5.5713099e-04   3.7392678e-03   8.9324098e-04   2.5002761e-04   8.6482380e-04   1.3585049e-04   1.1239692e-03   1.9761479e-03   8.2927740e-05   9.6014531e-04   3.4965263e-04   8.9552257e-04   5.7645956e-03   3.1975366e-04   8.7743211e-03   4.0156462e-03   2.9085611e-03   3.7265804e-03   4.9611690e-03   5.3200137e-03   5.4291747e-03   4.9090444e-03   6.4181467e-03   3.1699225e-03   9.0457257e-04   2.8707306e-03   2.0343345e-03   5.5511033e-03   7.6886258e-03   3.2962764e-03   1.9800100e-03   2.4033904e-03   1.0905558e-02   5.4434884e-03   2.6411651e-03   4.3219947e-03   7.2333465e-03   1.3483652e-03   2.2580657e-03   1.9023537e-03   8.2889320e-04   8.3511491e-04   4.6996589e-03   2.2192064e-03   4.3941392e-03   6.5409726e-04   5.2313685e-03   1.2879740e-03   6.5768476e-03   3.4674432e-03   5.6567012e-03   2.0849532e-03   8.7522015e-04   1.2925162e-03   3.9107304e-03   2.2116014e-03   4.0156462e-03   3.8265815e-03   4.4724719e-03   2.6832503e-03   3.2108081e-03   1.3825243e-03   4.7909587e-03   2.3944351e-03   1.6956470e-03   1.0108236e-03   9.3414345e-04   1.2241588e-02   1.0680036e-02   6.4457112e-03   2.1113831e-03   4.8851719e-03   3.9417362e-03   3.5304281e-03   6.9169294e-03   4.3255103e-03   1.5489174e-03   5.9380000e-04   4.2050996e-03   3.5612837e-03   3.5981180e-03   1.5473563e-03   1.0469113e-03   2.7881158e-03   2.6084642e-02   1.6915515e-02   1.3046936e-02   1.5813114e-02   1.8728660e-02   1.6622622e-02   2.0172358e-02   1.5047933e-02   1.7480656e-02   1.5121187e-02   8.9988271e-03   1.2921567e-02   1.1505555e-02   1.9202447e-02   2.2948431e-02   1.4807179e-02   1.1989561e-02   1.3522703e-02   2.4353472e-02   1.4534512e-02   1.3492376e-02   1.7699736e-02   1.8260951e-02   8.7551977e-03   1.3397972e-02   1.0605977e-02   8.1065177e-03   9.3705995e-03   1.7600384e-02   8.9170386e-03   1.3238432e-02   8.2394556e-03   1.8591466e-02   8.5466794e-03   1.8371580e-02   1.2139358e-02   2.0015536e-02   1.2667833e-02   9.3753580e-03   9.4099897e-03   1.5925767e-02   9.2577085e-03   1.6915515e-02   1.6530549e-02   1.7369847e-02   1.1736690e-02   1.2003814e-02   1.0321540e-02   1.8104120e-02   1.3766972e-02   1.5141485e-04   3.6186411e-04   5.0640473e-03   5.8846843e-03   4.5372473e-03   2.9889995e-04   1.4503455e-03   2.4048820e-03   5.6473356e-04   2.1694139e-03   1.2760726e-03   5.0001009e-05   4.2762331e-04   1.0752642e-03   1.9285330e-03   1.4459840e-03   2.6007132e-04   3.8461838e-03   8.8079628e-04   1.6321310e-02   8.6239000e-03   5.6156570e-03   7.5378743e-03   9.9017513e-03   7.7506746e-03   1.1476827e-02   6.6644304e-03   8.3385285e-03   8.2160708e-03   4.1911289e-03   5.5513041e-03   4.9688023e-03   1.0246372e-02   1.4224144e-02   8.3426053e-03   5.0473242e-03   6.4114140e-03   1.3391748e-02   6.3862951e-03   6.6425216e-03   9.9154668e-03   8.8906649e-03   3.1510729e-03   6.5653256e-03   3.9767727e-03   3.0148559e-03   4.1087786e-03   8.8891438e-03   2.8692574e-03   5.5219515e-03   2.9822205e-03   9.7392578e-03   2.7559926e-03   9.1569917e-03   5.1978118e-03   1.2376315e-02   5.7353587e-03   4.3202502e-03   3.8711781e-03   8.5388416e-03   4.7844327e-03   8.6239000e-03   8.5671038e-03   9.9856257e-03   5.9819812e-03   5.1316831e-03   4.4550423e-03   1.1213210e-02   6.9205612e-03   3.8131057e-04   6.7888158e-03   7.3363003e-03   5.3694436e-03   6.8516623e-04   1.8924051e-03   2.8895525e-03   1.2584203e-03   3.1106561e-03   1.9851820e-03   2.2993360e-04   3.0705767e-04   1.8065799e-03   2.2681792e-03   1.9895114e-03   4.4561501e-04   3.2950230e-03   1.3536253e-03   1.9442033e-02   1.0986408e-02   7.5586116e-03   9.6139706e-03   1.2432885e-02   9.8077912e-03   1.4077548e-02   8.4254038e-03   1.0370813e-02   1.0478515e-02   5.7856689e-03   7.4894276e-03   6.8497491e-03   1.2864462e-02   1.7242156e-02   1.0603978e-02   6.8089917e-03   8.2980029e-03   1.5941696e-02   8.0862824e-03   8.7712331e-03   1.2406133e-02   1.0894930e-02   4.6791769e-03   8.5829625e-03   5.4740472e-03   4.5017704e-03   5.7029968e-03   1.1309070e-02   4.0232333e-03   7.1989285e-03   4.2761837e-03   1.2289743e-02   4.0060693e-03   1.1002553e-02   7.0316526e-03   1.5021687e-02   7.5484658e-03   5.9167328e-03   5.5390768e-03   1.0934157e-02   6.4525090e-03   1.0986408e-02   1.0943118e-02   1.2502015e-02   7.9589531e-03   6.9802991e-03   6.2208864e-03   1.3663387e-02   8.8999016e-03   6.6062542e-03   5.5827314e-03   3.2137911e-03   2.4648342e-04   3.0882054e-03   1.4855278e-03   8.8238305e-04   3.0312113e-03   1.3048774e-03   1.7074342e-04   4.2409993e-05   1.2055084e-03   1.2882692e-03   1.0204762e-03   8.9780466e-05   2.0467514e-03   5.4370125e-04   1.7241595e-02   9.9776023e-03   7.2970604e-03   9.2205394e-03   1.1400153e-02   1.0313298e-02   1.2454804e-02   9.2538420e-03   1.1254653e-02   8.6534668e-03   4.2479840e-03   7.2108683e-03   6.0807502e-03   1.1908599e-02   1.4998922e-02   8.5435101e-03   6.3304436e-03   7.3992448e-03   1.7031719e-02   9.1810782e-03   7.4786833e-03   1.0592315e-02   1.2037933e-02   4.2382770e-03   7.2750145e-03   5.5074473e-03   3.6741461e-03   4.4146887e-03   1.0634443e-02   4.6823366e-03   8.0104480e-03   3.6842552e-03   1.1424845e-02   4.0945647e-03   1.1857190e-02   7.0169427e-03   1.2513676e-02   6.7752703e-03   4.4459599e-03   4.6022279e-03   9.3965771e-03   4.9465269e-03   9.9776023e-03   9.6902181e-03   1.0478640e-02   6.5221232e-03   6.8355801e-03   5.1573625e-03   1.1079009e-02   7.5518079e-03   2.6192775e-03   5.9216859e-03   4.3727502e-03   5.1296754e-03   5.3199310e-03   2.7670517e-03   1.1371035e-03   2.6020928e-03   5.1188394e-03   7.5382072e-03   2.4476107e-03   5.0616317e-03   3.9293810e-03   5.2302586e-03   1.5095801e-02   4.1435464e-03   4.4658108e-03   8.7058429e-04   4.4660793e-04   2.5555941e-04   1.2381894e-03   8.7244097e-04   2.0362511e-03   8.1311609e-04   1.5164656e-03   1.9481959e-03   1.9017569e-03   4.9004150e-04   8.1438359e-04   1.7141435e-03   4.7093504e-03   2.7065864e-03   6.0393489e-05   4.2915211e-04   3.9373776e-03   1.5669301e-03   1.2950596e-03   2.1025536e-03   1.9522193e-03   1.0322026e-03   8.4218839e-04   9.2905490e-05   1.2346326e-03   1.1781032e-03   9.8002670e-04   7.9339756e-04   1.1328834e-03   6.2948320e-04   1.4629322e-03   3.9084846e-04   1.4164544e-03   1.5298834e-03   3.8721999e-03   2.7001180e-04   1.5287601e-03   1.2669024e-03   2.1037534e-03   3.7134859e-03   8.7058429e-04   1.1775044e-03   2.8495857e-03   2.7086756e-03   1.4023687e-03   1.0191451e-03   3.8647977e-03   9.3179883e-04   1.2988786e-03   3.9786961e-03   9.7659791e-03   1.8948352e-03   3.2059058e-03   1.9902291e-03   1.7004118e-03   5.2879179e-03   6.5830280e-03   1.9368261e-03   2.2268965e-03   1.8962421e-03   4.4774419e-03   1.1162650e-02   2.6447647e-03   4.9365528e-03   3.0323131e-03   3.9972109e-03   2.9343480e-03   3.7523678e-03   6.3980393e-03   2.7120874e-03   6.2404178e-03   8.0807935e-03   1.9416596e-03   1.3024398e-03   4.0245181e-03   3.0697725e-03   5.0283287e-03   5.8270256e-03   2.3505477e-03   2.0375391e-03   1.2652208e-03   1.2527532e-02   8.0556250e-03   2.6750700e-03   2.5225228e-03   9.0764566e-03   3.4699713e-03   1.3308823e-03   2.7664696e-03   2.4678015e-03   9.5729618e-04   4.3858320e-03   4.4729680e-03   6.8828714e-03   1.1547480e-03   4.8183810e-03   2.7179325e-03   6.3087897e-03   6.0843336e-03   2.6715726e-03   1.3701564e-03   8.5085504e-04   2.7129190e-03   3.6187454e-03   4.3357364e-03   3.0323131e-03   2.8888833e-03   3.1752051e-03   4.0026050e-03   5.6110347e-03   2.1520192e-03   2.0021167e-03   9.3001678e-04   2.6139835e-03   9.7625461e-03   4.3820793e-04   2.7906691e-03   3.4740135e-03   1.5471352e-03   3.7324822e-03   3.8774805e-03   1.8049144e-03   1.0166909e-03   9.6586952e-04   2.7620874e-03   5.6379477e-03   1.4267399e-03   9.9420936e-03   6.9534902e-03   7.2046628e-03   7.1611325e-03   8.0864542e-03   1.1050679e-02   7.0236664e-03   1.0684883e-02   1.2982713e-02   4.4982901e-03   2.1523458e-03   7.1621069e-03   5.3786825e-03   9.3055499e-03   9.5487861e-03   4.3879387e-03   5.0693092e-03   4.4931267e-03   1.8805226e-02   1.2143973e-02   5.1616364e-03   5.8432704e-03   1.4239876e-02   5.0396297e-03   4.0643450e-03   5.6688069e-03   3.5911389e-03   2.3422208e-03   8.6067938e-03   6.9700590e-03   1.0540875e-02   2.8498735e-03   9.0488233e-03   4.9258801e-03   1.1781603e-02   8.6822745e-03   5.8089260e-03   4.4337196e-03   2.0267056e-03   4.1354243e-03   6.4148025e-03   4.4976834e-03   6.9534902e-03   6.4275292e-03   5.9584428e-03   5.3006657e-03   8.1437729e-03   3.9023880e-03   4.5299634e-03   3.7988456e-03   2.5123066e-03   1.1946309e-03   2.0097566e-04   1.7928786e-03   5.7143349e-04   1.2667176e-04   4.6644872e-04   4.4712161e-04   1.0683378e-03   5.7468992e-04   7.6554900e-05   3.3664265e-03   2.0582490e-04   1.3553574e-02   7.1051776e-03   4.8941009e-03   6.5257957e-03   8.3098959e-03   7.5373275e-03   9.3233389e-03   6.7621473e-03   8.4467156e-03   6.1291129e-03   2.5944515e-03   4.8248569e-03   3.9028342e-03   8.7515124e-03   1.1627531e-02   6.1178515e-03   4.1203288e-03   5.0572182e-03   1.3529172e-02   6.7984240e-03   5.0687125e-03   7.7357162e-03   9.2015610e-03   2.4873418e-03   4.9175701e-03   3.5355888e-03   2.0473249e-03   2.6605314e-03   7.6511613e-03   3.0628424e-03   5.7307078e-03   2.1169019e-03   8.3320524e-03   2.4574218e-03   9.1102030e-03   4.8098810e-03   9.5736130e-03   4.5225384e-03   2.7314130e-03   2.7690856e-03   6.7080946e-03   3.3506310e-03   7.1051776e-03   6.8798895e-03   7.7001955e-03   4.4569824e-03   4.6305164e-03   3.1848752e-03   8.4170139e-03   5.2045050e-03   6.9296246e-03   2.1360002e-03   3.7784686e-03   3.9888330e-03   1.9332806e-03   3.1165972e-03   3.4622240e-03   6.1843155e-03   4.9689115e-03   2.8276933e-03   8.4143362e-03   3.9488633e-03   1.7390613e-02   8.6823566e-03   4.7125346e-03   7.5297389e-03   9.6551734e-03   5.6073015e-03   1.2632980e-02   4.7967985e-03   5.4595549e-03   9.6779905e-03   6.1919843e-03   4.6437798e-03   4.8811137e-03   9.2634464e-03   1.4470481e-02   9.9239882e-03   5.5116951e-03   7.7393895e-03   8.8498403e-03   3.6104684e-03   7.0913900e-03   1.1068993e-02   5.6668826e-03   3.0002631e-03   7.9032607e-03   4.0760726e-03   3.7279027e-03   5.9729084e-03   7.9919733e-03   2.3383845e-03   3.2272638e-03   4.6096764e-03   8.7690968e-03   3.1364844e-03   7.9578373e-03   3.3962632e-03   1.4631125e-02   6.8663759e-03   6.4834920e-03   4.3461235e-03   8.7377082e-03   5.5255283e-03   8.6823566e-03   8.8561968e-03   1.1024987e-02   6.2645608e-03   3.5745084e-03   5.1960404e-03   1.3983177e-02   8.7265781e-03   1.5360474e-03   2.2738132e-03   6.3957013e-04   1.8470485e-03   1.9436948e-03   8.2753134e-04   1.4870044e-04   1.8575051e-04   1.1511457e-03   4.1570185e-03   4.2481246e-04   1.2022631e-02   7.4558885e-03   6.7319225e-03   7.0097246e-03   8.7236579e-03   1.0059866e-02   8.2686681e-03   9.2543911e-03   1.1644804e-02   5.5866311e-03   2.5062451e-03   6.6946690e-03   5.2622870e-03   9.8836223e-03   1.1452219e-02   5.6355000e-03   4.7339621e-03   4.6436729e-03   1.7620106e-02   1.0378141e-02   5.6675320e-03   7.1003305e-03   1.2674455e-02   4.3836233e-03   4.5891805e-03   4.7259960e-03   3.2437023e-03   2.5249532e-03   8.8366518e-03   5.2434703e-03   8.9867759e-03   2.2946416e-03   9.5089828e-03   3.7537168e-03   1.0503141e-02   7.8427036e-03   7.7804598e-03   4.4127994e-03   2.3620086e-03   3.9941388e-03   7.2903223e-03   4.7424240e-03   7.4558885e-03   7.1215300e-03   7.3539364e-03   5.7328206e-03   7.4284923e-03   3.9643246e-03   6.4306545e-03   4.3729078e-03   1.0852360e-03   3.9664584e-04   4.5123208e-04   1.2414442e-03   2.3150826e-04   1.4183402e-03   7.0426023e-04   4.8163546e-04   5.0741189e-03   3.9595383e-04   1.0937070e-02   5.0339955e-03   3.1168119e-03   4.5426488e-03   6.0432452e-03   5.3182662e-03   7.1300333e-03   4.7641753e-03   6.1358685e-03   4.4885462e-03   1.6916118e-03   3.0636842e-03   2.3987749e-03   6.3937387e-03   9.2584560e-03   4.5899059e-03   2.5786937e-03   3.4750392e-03   1.0541835e-02   4.8350707e-03   3.4370915e-03   5.7977832e-03   6.8277961e-03   1.3144796e-03   3.3638906e-03   2.1338739e-03   1.0504939e-03   1.6413828e-03   5.4173620e-03   1.8780432e-03   3.9014661e-03   1.1794316e-03   6.0207953e-03   1.3473818e-03   6.9135169e-03   3.1440431e-03   7.6724602e-03   2.9974710e-03   1.7751706e-03   1.5977690e-03   4.8531144e-03   2.4494451e-03   5.0339955e-03   4.8947958e-03   5.8464391e-03   3.1299587e-03   2.9881785e-03   1.9161069e-03   6.7804975e-03   3.6799724e-03   6.0827379e-04   2.1616612e-03   3.6536587e-03   6.0346559e-04   1.7560211e-03   1.3150048e-03   2.0808595e-03   9.6848049e-03   1.5050304e-03   8.5509882e-03   3.4934050e-03   2.3873070e-03   2.2266374e-03   4.3437966e-03   3.4432000e-03   4.8822411e-03   2.6118729e-03   4.2515298e-03   3.9280244e-03   2.1763551e-03   2.4215318e-03   2.3726469e-03   5.2469603e-03   8.8156975e-03   4.6543689e-03   1.1653780e-03   1.5634616e-03   8.3770205e-03   3.5034084e-03   3.2683461e-03   4.7014835e-03   4.7252691e-03   1.7941232e-03   2.2843067e-03   6.9328893e-04   1.6436639e-03   1.4866860e-03   4.0234517e-03   7.8970448e-04   2.8726545e-03   3.0564882e-04   4.8340126e-03   3.3681970e-04   3.2031086e-03   3.4962764e-03   6.5051951e-03   1.2935806e-03   1.7549327e-03   2.1673002e-03   4.7736712e-03   4.5514956e-03   3.4934050e-03   3.7661841e-03   5.5497035e-03   4.3927525e-03   3.3294822e-03   2.0638533e-03   5.8922322e-03   2.1421877e-03   1.0254014e-03   1.7967204e-03   2.4056491e-05   4.9606070e-04   1.5493043e-04   7.3786851e-04   5.7634641e-03   2.2821239e-04   9.8983432e-03   4.8234579e-03   3.6558248e-03   4.1003359e-03   5.8726550e-03   5.9033047e-03   6.1776943e-03   5.1708698e-03   7.0154090e-03   4.1004756e-03   1.5716690e-03   3.6383691e-03   2.8864008e-03   6.7180252e-03   9.3042807e-03   4.3918407e-03   2.3314998e-03   2.6748196e-03   1.1913776e-02   5.9266374e-03   3.6227612e-03   5.2997514e-03   7.7658365e-03   2.0655115e-03   2.8598574e-03   2.0567764e-03   1.5031818e-03   1.3330059e-03   5.6389436e-03   2.2318728e-03   4.9186471e-03   7.3077456e-04   6.3335619e-03   1.3529510e-03   6.4463256e-03   4.4695359e-03   6.6841393e-03   2.3650796e-03   1.3974597e-03   2.1137539e-03   5.1383865e-03   3.4413190e-03   4.8234579e-03   4.7468419e-03   5.6953492e-03   3.9468482e-03   4.2095924e-03   2.1520165e-03   5.7168561e-03   2.8181362e-03   2.6785665e-04   8.6267880e-04   1.5080867e-03   1.0657562e-03   9.1259932e-05   3.2834346e-03   5.6400090e-04   1.5768182e-02   8.4481987e-03   5.6912110e-03   7.5460195e-03   9.7379656e-03   8.1209399e-03   1.1078194e-02   7.1125751e-03   8.8544675e-03   7.7247994e-03   3.7380313e-03   5.6213520e-03   4.8502413e-03   1.0137958e-02   1.3681745e-02   7.7719782e-03   4.9953780e-03   6.2009166e-03   1.4059422e-02   6.9488117e-03   6.3504999e-03   9.4504672e-03   9.5001122e-03   3.1213299e-03   6.2307927e-03   4.0875652e-03   2.8287725e-03   3.7400310e-03   8.8571631e-03   3.1845212e-03   5.9757813e-03   2.8285304e-03   9.6559261e-03   2.8616391e-03   9.5937311e-03   5.3787128e-03   1.1658427e-02   5.5682032e-03   3.8827352e-03   3.6680728e-03   8.1913042e-03   4.3904219e-03   8.4481987e-03   8.3060404e-03   9.4592722e-03   5.6422254e-03   5.2631423e-03   4.2070121e-03   1.0442544e-02   6.5544595e-03   1.6777879e-03   1.6905742e-03   1.4559893e-03   2.3696578e-04   1.7661444e-03   8.8626613e-04   1.8929605e-02   1.1198434e-02   8.2126916e-03   1.0357192e-02   1.2691977e-02   1.1286676e-02   1.3897779e-02   1.0124890e-02   1.2174180e-02   9.8437553e-03   5.0898821e-03   8.1181082e-03   6.9678076e-03   1.3160374e-02   1.6460126e-02   9.6980805e-03   7.2881507e-03   8.4988687e-03   1.8112744e-02   9.9154333e-03   8.5214392e-03   1.1906445e-02   1.2939064e-02   4.9240987e-03   8.3797448e-03   6.3232280e-03   4.3836801e-03   5.2916266e-03   1.1816149e-02   5.2785357e-03   8.7418147e-03   4.4568647e-03   1.2648763e-02   4.7832566e-03   1.2911612e-02   7.7565330e-03   1.3968530e-02   7.8153448e-03   5.3323341e-03   5.3832666e-03   1.0542156e-02   5.6397042e-03   1.1198434e-02   1.0902962e-02   1.1744032e-02   7.3882415e-03   7.5970260e-03   6.0277743e-03   1.2454722e-02   8.6970669e-03   6.8936185e-04   2.4185836e-04   6.5755985e-04   5.6966351e-03   2.3035735e-04   9.8692354e-03   4.6426684e-03   3.3219514e-03   3.9792968e-03   5.6658817e-03   5.5182925e-03   6.1624599e-03   4.8454827e-03   6.5522795e-03   4.0009447e-03   1.4714328e-03   3.2975731e-03   2.5896752e-03   6.3872309e-03   9.0422907e-03   4.2582444e-03   2.2027730e-03   2.6736829e-03   1.1276803e-02   5.4428314e-03   3.3876877e-03   5.2005260e-03   7.2827649e-03   1.7342875e-03   2.7931367e-03   1.8984292e-03   1.2582586e-03   1.2661063e-03   5.3418398e-03   1.9890109e-03   4.4646783e-03   7.0136727e-04   6.0091268e-03   1.2000111e-03   6.3087140e-03   3.9600850e-03   6.7054154e-03   2.3290598e-03   1.3528267e-03   1.8394373e-03   4.8617299e-03   3.0806223e-03   4.6426684e-03   4.5595872e-03   5.5227532e-03   3.5904469e-03   3.7269772e-03   1.9354221e-03   5.7786823e-03   2.8379142e-03   1.6160265e-04   9.1316164e-04   4.5133200e-03   3.9636483e-04   1.2999342e-02   7.7990273e-03   6.7507696e-03   6.8565065e-03   9.1164934e-03   9.6187705e-03   8.8289255e-03   8.5171572e-03   1.1041936e-02   6.3870800e-03   3.1070828e-03   6.7336821e-03   5.5817760e-03   1.0377938e-02   1.2745315e-02   6.6393015e-03   4.6463353e-03   4.6865341e-03   1.7095703e-02   9.6546615e-03   6.2703784e-03   7.8942299e-03   1.1919237e-02   4.5195099e-03   4.9936591e-03   4.3235727e-03   3.5370732e-03   2.9138150e-03   9.0955590e-03   4.5162367e-03   8.4195838e-03   2.1592479e-03   9.9327314e-03   3.3056202e-03   9.5169563e-03   7.9097267e-03   8.9017922e-03   4.4124132e-03   2.8482486e-03   4.3995745e-03   8.1140558e-03   5.6448175e-03   7.7990273e-03   7.6396844e-03   8.3822247e-03   6.5839845e-03   7.5512097e-03   4.3854717e-03   7.5528676e-03   4.6977929e-03   6.1532842e-04   4.4755911e-03   9.8852555e-05   1.1287330e-02   6.2297232e-03   5.1189561e-03   5.6158860e-03   7.4118621e-03   7.9100135e-03   7.4458980e-03   7.1082862e-03   9.2159095e-03   4.9487763e-03   1.9996447e-03   5.0869737e-03   4.0020507e-03   8.3714289e-03   1.0564909e-02   5.1004357e-03   3.5205852e-03   3.7366566e-03   1.4679778e-02   7.9564069e-03   4.6553854e-03   6.3553187e-03   1.0091302e-02   3.0597107e-03   3.7953264e-03   3.3093658e-03   2.2411736e-03   1.9075393e-03   7.2834604e-03   3.5416682e-03   6.7651117e-03   1.4482690e-03   7.9807479e-03   2.4205104e-03   8.4840253e-03   5.9704010e-03   7.4790367e-03   3.4333236e-03   1.8691676e-03   2.9457380e-03   6.2606998e-03   3.9378134e-03   6.2297232e-03   6.0227040e-03   6.6475915e-03   4.7553767e-03   5.6493596e-03   2.9997214e-03   6.2979764e-03   3.7014773e-03   2.9064840e-03   2.5951953e-04   1.5239927e-02   8.3975944e-03   6.0152535e-03   7.5736149e-03   9.7205279e-03   8.7194769e-03   1.0665619e-02   7.7107157e-03   9.6513260e-03   7.3495285e-03   3.4129734e-03   5.9493098e-03   4.9891659e-03   1.0300134e-02   1.3429483e-02   7.3762928e-03   4.9967161e-03   5.9349958e-03   1.5151045e-02   7.8143476e-03   6.2821596e-03   9.0873094e-03   1.0390318e-02   3.3768176e-03   5.9427925e-03   4.2522990e-03   2.8707879e-03   3.4399883e-03   9.0442431e-03   3.6087055e-03   6.7269934e-03   2.6557188e-03   9.8316941e-03   3.0241465e-03   9.9672648e-03   5.9844482e-03   1.0964814e-02   5.3765757e-03   3.5060112e-03   3.7251338e-03   8.1205003e-03   4.4033527e-03   8.3975944e-03   8.1986819e-03   9.1329776e-03   5.6810271e-03   5.8010063e-03   4.1618459e-03   9.6695634e-03   6.1457695e-03   3.7030792e-03   2.6425725e-02   1.8599298e-02   1.5467357e-02   1.8635118e-02   2.0442342e-02   2.0541696e-02   2.1259986e-02   1.9510831e-02   2.1931276e-02   1.5294793e-02   9.1682566e-03   1.5285848e-02   1.2953400e-02   2.0823480e-02   2.2513198e-02   1.4419732e-02   1.4350471e-02   1.5480442e-02   2.9209010e-02   1.9066274e-02   1.4266029e-02   1.8093335e-02   2.3152907e-02   1.0464507e-02   1.4480769e-02   1.3685178e-02   9.2298310e-03   1.0125817e-02   1.9700965e-02   1.2730523e-02   1.7262155e-02   1.0225203e-02   2.0352884e-02   1.1535854e-02   2.3443654e-02   1.4503051e-02   1.9462802e-02   1.4723300e-02   9.8246219e-03   1.0395301e-02   1.6330792e-02   8.8773452e-03   1.8599298e-02   1.7720786e-02   1.7175716e-02   1.1774527e-02   1.4190018e-02   1.1287889e-02   1.7334951e-02   1.4959707e-02   1.2176711e-02   6.5819636e-03   5.0346277e-03   6.0397395e-03   7.7827629e-03   7.8599708e-03   8.1942510e-03   7.0899174e-03   9.0304038e-03   5.3331159e-03   2.1084043e-03   4.9833227e-03   3.9086435e-03   8.5223444e-03   1.0895531e-02   5.3819973e-03   3.7720778e-03   4.2725796e-03   1.4381123e-02   7.5978371e-03   4.7474248e-03   6.8472012e-03   9.8808760e-03   2.7678758e-03   4.1999833e-03   3.4360629e-03   2.0712504e-03   2.1185414e-03   7.4497841e-03   3.4016631e-03   6.4264729e-03   1.6858980e-03   8.1251590e-03   2.4514173e-03   8.9161864e-03   5.4840504e-03   8.2209587e-03   3.8745808e-03   2.1076714e-03   2.7824071e-03   6.3561971e-03   3.5186798e-03   6.5819636e-03   6.3377454e-03   6.9667686e-03   4.4964760e-03   5.2124475e-03   2.9917795e-03   7.0285944e-03   4.2716151e-03   1.5675754e-03   4.2437140e-03   2.9528525e-03   1.2801911e-03   5.5300675e-03   5.2313183e-04   7.0373258e-03   7.0028853e-03   1.5817688e-03   4.6017698e-03   4.2905415e-03   3.9763653e-03   1.7507004e-03   9.8088526e-04   2.0205251e-03   3.8651167e-03   2.8498196e-03   8.0083780e-03   8.7219602e-03   2.5429883e-03   8.6090275e-04   7.9906965e-03   6.1518285e-03   2.2097973e-03   5.7963314e-03   5.7460009e-03   4.2262969e-03   2.1872477e-03   8.9731860e-03   7.8793225e-03   5.8991938e-03   1.8999235e-03   7.1341168e-03   7.0170212e-03   6.4409068e-03   6.9062404e-04   3.2402506e-03   4.2346770e-03   4.9489360e-03   1.9844891e-03   6.6121605e-03   1.5675754e-03   1.4409377e-03   1.2937675e-03   4.5154908e-03   6.0031850e-03   4.0189646e-03   1.2526895e-03   2.2326743e-03   6.7976748e-04   4.4251056e-04   5.1964468e-05   1.5314410e-03   4.7093005e-04   2.2872076e-03   2.4409680e-03   6.1996952e-04   1.8531872e-03   7.0675084e-04   7.4672595e-04   2.9977980e-04   1.6320684e-03   1.1279305e-03   6.4871929e-04   6.2074215e-04   4.0109945e-03   3.1610099e-03   4.6600326e-04   4.3207236e-04   3.1187047e-03   1.7495883e-03   3.8892953e-04   1.4972893e-03   1.7881991e-03   1.3932149e-03   1.6264024e-04   3.1616233e-03   2.5553184e-03   1.9652949e-03   2.4129313e-04   2.2301817e-03   3.0218268e-03   1.9168834e-03   1.5054952e-03   6.4024254e-04   1.6084351e-03   1.4082859e-03   5.9336508e-04   3.2376306e-03   1.1102230e-16   6.3653022e-05   8.5949029e-04   1.8263557e-03   1.6881339e-03   9.7411877e-04   1.7845049e-03   6.1330704e-04   7.1368649e-04   9.0151789e-04   6.1896245e-04   2.2461726e-03   1.0406196e-03   1.1172454e-03   1.6730261e-03   1.8311894e-03   1.8071232e-06   2.5364008e-04   8.3907159e-04   3.3874526e-03   2.1375798e-03   4.5487046e-04   1.1672957e-03   2.8115805e-03   1.2138761e-03   7.0446879e-04   1.8201572e-03   1.6098088e-03   5.2152529e-04   1.0325997e-03   6.5974667e-04   9.0603694e-04   1.4060118e-03   4.3618665e-04   1.3469029e-03   7.5850938e-04   1.4500880e-03   6.7416609e-04   1.0299923e-03   2.5247687e-03   4.1453726e-04   3.7399848e-03   9.2294426e-04   1.7551096e-03   7.2749277e-04   1.1614195e-03   2.4439916e-03   6.7976748e-04   8.1575962e-04   2.1038526e-03   1.4799615e-03   3.3653084e-04   6.6936205e-04   3.8899674e-03   1.5033836e-03   6.1892938e-04   8.9565267e-04   1.1187414e-03   1.0854623e-03   1.6238473e-03   1.6997610e-03   2.5416419e-03   7.7939556e-04   1.2163589e-03   1.1645635e-03   3.7479516e-03   2.5673270e-03   2.8413719e-04   3.4882680e-04   3.5538869e-03   2.1358644e-03   1.3529179e-03   1.5262264e-03   2.0757398e-03   1.9224937e-03   7.7565401e-04   6.3114517e-04   2.1204969e-03   1.7089017e-03   5.9516371e-04   1.8253945e-03   1.7466546e-03   1.3725400e-03   9.7938279e-04   1.2586511e-03   1.2414457e-03   2.1946939e-03   3.0180258e-03   3.2655328e-04   2.0574339e-03   1.9692598e-03   1.9049224e-03   4.6812273e-03   4.4251056e-04   7.9383810e-04   2.4056659e-03   3.2171363e-03   2.0291670e-03   1.5121604e-03   3.2358269e-03   7.8092419e-04   1.6056499e-03   4.5150817e-04   2.5183573e-03   2.4854849e-03   8.1663583e-04   2.4301739e-03   9.3222022e-04   1.0572824e-03   1.7339381e-04   1.3855383e-03   1.3424761e-03   1.0242405e-03   9.7281350e-04   3.6981982e-03   3.3860618e-03   6.8052310e-04   4.8835842e-04   3.1532670e-03   2.2405021e-03   6.9163264e-04   1.9828394e-03   2.3495346e-03   1.9517373e-03   1.3548451e-04   3.7877283e-03   2.8247707e-03   2.6466102e-03   1.4866617e-04   2.8614588e-03   3.2185979e-03   2.1613637e-03   1.5430396e-03   1.0217871e-03   2.1845781e-03   1.8662401e-03   6.6257305e-04   3.7358116e-03   5.1964468e-05   1.2907411e-04   9.1062515e-04   2.1316335e-03   1.9388777e-03   1.3908595e-03   1.9526372e-03   9.5243046e-04   3.3534848e-03   2.1612689e-04   1.1409237e-04   3.7511533e-03   4.4135123e-03   6.6848080e-04   1.6638889e-03   1.5639090e-03   5.3797969e-03   4.6512834e-03   1.2304041e-03   2.1424312e-03   1.1209148e-03   3.9967200e-04   2.4397061e-03   3.5566606e-03   2.8773633e-04   1.9980195e-03   2.5921392e-03   1.0825130e-03   2.8538916e-03   3.5346603e-03   9.3770297e-04   1.4682668e-03   3.5169417e-04   2.8507687e-03   1.3414178e-03   1.7057263e-03   1.1395352e-03   1.1778138e-03   6.0686882e-03   1.8828714e-03   4.1140616e-03   2.6499598e-03   2.9837515e-03   5.4227271e-03   1.5314410e-03   2.0381041e-03   4.2816261e-03   3.9075302e-03   1.2227927e-03   2.5264748e-03   6.5268770e-03   2.9950818e-03   4.2523782e-03   4.6456758e-03   7.0284879e-04   2.6206169e-03   2.2973667e-03   2.1186617e-03   1.0662204e-03   1.4771683e-03   1.2714179e-03   1.6200996e-03   9.3400517e-04   6.3864823e-03   5.7928278e-03   1.2702417e-03   3.0680262e-04   5.4794582e-03   3.6431092e-03   7.0967201e-04   2.9368020e-03   3.3273895e-03   2.1071133e-03   1.0635659e-03   5.3575283e-03   5.0378701e-03   2.9935636e-03   1.0701915e-03   3.8904013e-03   4.2643219e-03   4.2480632e-03   6.5030200e-04   1.1722241e-03   2.1985353e-03   2.8568074e-03   1.2361632e-03   4.8426872e-03   4.7093005e-04   4.8520973e-04   9.1253531e-04   3.1519557e-03   3.8819044e-03   2.1030647e-03   9.4974174e-04   6.5992898e-04   2.7415573e-04   4.6934818e-03   4.8265016e-03   1.1075187e-03   2.2246224e-03   2.7094103e-03   7.2547386e-03   5.7651516e-03   1.2879775e-03   2.2498412e-03   1.7969640e-03   2.9988677e-04   3.2873557e-03   4.6281528e-03   3.4411066e-04   2.2557578e-03   3.0860670e-03   7.9349579e-04   3.1195585e-03   3.7700064e-03   1.7729958e-03   8.2927671e-04   3.1955044e-04   2.5127017e-03   2.3924363e-03   1.2355886e-03   6.3306711e-04   1.7271405e-03   7.3517242e-03   1.9482896e-03   4.3986367e-03   3.1257329e-03   4.1788193e-03   6.3227646e-03   2.2872076e-03   2.9291555e-03   5.5970414e-03   4.9483109e-03   1.7952206e-03   3.0186434e-03   7.6597710e-03   3.3340783e-03   5.0895780e-03   5.6951678e-03   1.1683986e-03   2.4173290e-03   2.2930538e-03   6.6766405e-03   6.0598142e-03   2.0196993e-03   3.2103351e-03   7.1260012e-04   2.1040770e-04   3.4457175e-03   4.8676879e-03   4.7880714e-05   2.6160358e-03   3.7638559e-03   1.6534967e-03   3.7166920e-03   4.7397184e-03   1.5966719e-03   1.7314841e-03   3.1225254e-04   3.8216967e-03   2.0442457e-03   2.2822879e-03   1.3421268e-03   1.4265946e-03   7.7649269e-03   2.8740126e-03   5.4160448e-03   3.5155679e-03   4.0306705e-03   6.4534752e-03   2.4409680e-03   3.0445932e-03   5.6145787e-03   4.8826195e-03   1.5492752e-03   3.4872724e-03   8.3107496e-03   4.2635519e-03   8.0787158e-04   1.6588755e-03   9.5088926e-04   1.1215622e-03   1.1319394e-03   1.0749073e-04   1.3651148e-03   1.0419898e-03   7.3951172e-03   5.6721517e-03   2.8717807e-04   1.2167194e-04   6.0929710e-03   1.9467902e-03   2.7497575e-04   2.6021687e-03   1.4711384e-03   7.6653532e-04   1.1979861e-03   4.6497866e-03   4.6528613e-03   2.0491908e-03   1.1214382e-03   3.1087470e-03   5.8331337e-03   2.9008505e-03   5.1851355e-04   1.1077981e-03   7.4180273e-04   1.1189833e-03   3.3213225e-04   1.9786092e-03   6.1996952e-04   3.2630432e-04   1.6788710e-04   1.0717507e-03   2.5327575e-03   6.9916673e-04   4.8860962e-04   5.0860173e-04   1.7852918e-03   8.1925898e-04   2.7637031e-03   3.4415701e-03   7.6048751e-04   1.3454814e-03   1.4168273e-03   9.1102129e-03   5.4634531e-03   7.0608839e-04   1.5253323e-03   6.6930140e-03   9.6900708e-04   7.0882160e-04   2.0455668e-03   3.9468549e-04   1.1422246e-04   2.4972040e-03   3.2985277e-03   4.3265068e-03   9.6084950e-04   2.6351726e-03   1.9457490e-03   6.5435669e-03   2.5549327e-03   2.2787630e-03   1.2770395e-03   6.6117888e-05   4.1362061e-04   1.2709485e-03   9.0111668e-04   1.8531872e-03   1.4879007e-03   1.4236785e-03   8.2396190e-04   2.2228636e-03   3.0260473e-04   1.7887502e-03   9.8733924e-04   2.2593145e-04   8.4529966e-04   3.3545284e-03   2.0967474e-03   4.8795197e-04   1.2181976e-03   2.8660735e-03   1.2480152e-03   6.7735579e-04   1.8245752e-03   1.6711447e-03   4.7937341e-04   1.0455121e-03   6.9960603e-04   8.5898853e-04   1.3874556e-03   4.5677202e-04   1.3792343e-03   7.7973864e-04   1.4683104e-03   6.8342524e-04   1.0561802e-03   2.6555506e-03   3.7342452e-04   3.7432085e-03   9.6667604e-04   1.7304819e-03   6.7810292e-04   1.1254700e-03   2.3291425e-03   7.0675084e-04   8.2471895e-04   2.0738735e-03   1.3951480e-03   2.9534051e-04   6.3569311e-04   3.8846725e-03   1.5360581e-03   9.9102840e-04   2.6998600e-03   1.1403522e-03   5.9435819e-04   1.1584769e-03   4.5439398e-03   2.4115910e-03   2.2376231e-04   1.3051908e-03   3.1170665e-03   2.4639357e-04   6.5093729e-04   1.0408914e-03   3.2170646e-04   6.8086675e-04   7.3723950e-04   1.9464154e-03   1.6938542e-03   1.2010351e-03   8.6567601e-04   1.2605984e-03   4.0415296e-03   6.0024917e-04   2.8241029e-03   9.3145876e-04   8.7987159e-04   1.5390142e-04   6.1621791e-04   1.2031695e-03   7.4672595e-04   6.4561848e-04   1.3132954e-03   5.7671738e-04   4.3996999e-04   1.2646330e-04   2.7819647e-03   1.1571313e-03   1.1519638e-03   1.4945239e-03   1.5137092e-03   1.7542391e-03   3.0584682e-03   3.1624125e-03   6.9423087e-04   8.0741075e-04   2.9564986e-03   2.1264808e-03   1.1956222e-03   2.4834226e-03   2.4046990e-03   2.4389289e-03   1.1428932e-04   4.1572751e-03   2.6270682e-03   3.3826290e-03   1.0906814e-05   3.3779688e-03   3.9749243e-03   1.6265579e-03   2.0735613e-03   1.7238890e-03   2.6977636e-03   1.8191981e-03   5.1180139e-04   3.3068455e-03   2.9977980e-04   3.0495320e-04   9.6732841e-04   1.7461140e-03   1.4560452e-03   1.4920651e-03   2.5772801e-03   1.7019143e-03   1.0341988e-03   4.0085546e-03   3.6492138e-03   7.4340196e-03   7.9703529e-03   1.4966880e-03   7.8440192e-04   7.7789554e-03   4.4532033e-03   2.2360209e-03   5.9110335e-03   4.1723764e-03   3.6115483e-03   1.8645164e-03   8.6587941e-03   6.9790025e-03   5.9223579e-03   1.3508421e-03   6.9679643e-03   8.8100022e-03   4.3884451e-03   9.3956602e-04   3.8113149e-03   3.5744927e-03   3.3082295e-03   7.5316334e-04   3.6491972e-03   1.6320684e-03   1.1884181e-03   4.5803930e-04   2.1497659e-03   4.0210128e-03   2.8148066e-03   1.4099016e-03   2.8014283e-03   2.0075866e-03   1.7351697e-03   8.4094224e-03   6.5677927e-03   3.9418601e-04   3.5055970e-04   7.1718250e-03   2.0973679e-03   6.4851765e-04   3.3744442e-03   1.5292791e-03   9.4071576e-04   1.7144529e-03   5.4753391e-03   5.4188405e-03   2.5828658e-03   1.5301373e-03   3.8027206e-03   7.3328244e-03   3.1192913e-03   6.2731613e-04   1.7741918e-03   8.4678687e-04   1.1412463e-03   3.3683903e-04   1.4744098e-03   1.1279305e-03   6.7560808e-04   1.3395103e-04   7.7360874e-04   2.7301747e-03   8.0481079e-04   5.3215427e-04   1.0082007e-03   2.3386408e-04   4.5650335e-03   2.1085336e-03   8.9398060e-04   1.5597528e-03   2.5626398e-03   8.9643929e-04   4.6401620e-04   2.1474828e-04   9.4200330e-04   7.4363388e-04   8.8544521e-04   1.1214375e-03   1.5513223e-03   4.7369920e-04   1.2949283e-03   5.0107186e-04   1.9686134e-03   1.5839083e-03   3.0734994e-03   1.1037013e-04   1.0184869e-03   9.4459481e-04   1.6215035e-03   3.1102930e-03   6.4871929e-04   8.4571319e-04   2.1956968e-03   2.1853516e-03   1.4074387e-03   6.7079422e-04   3.0128980e-03   5.5375407e-04   5.9745601e-03   3.5596915e-03   1.0825427e-03   1.1193941e-03   3.8619013e-03   1.7868782e-03   2.6699310e-04   7.4652731e-04   1.5644519e-03   7.6914730e-04   1.1868249e-03   2.1537215e-03   2.8904145e-03   6.7050048e-04   1.5688544e-03   1.1553603e-03   2.3985919e-03   2.8520067e-03   2.1226295e-03   2.6532317e-05   9.5443067e-04   1.5325109e-03   1.7403036e-03   3.8293675e-03   6.2074215e-04   7.8759073e-04   1.8993585e-03   2.7454611e-03   2.5765044e-03   1.0363780e-03   2.0443972e-03   1.4335859e-04   1.4016332e-03   5.4805806e-03   6.7405649e-03   6.4579738e-04   5.1748633e-03   6.3487765e-03   4.3874825e-03   6.7084849e-03   8.1049162e-03   2.6677096e-03   4.6345484e-03   1.7447861e-03   7.4893653e-03   2.9062036e-03   5.4610122e-03   3.0610024e-03   2.8530196e-03   9.9111331e-03   5.6471487e-03   8.9070297e-03   6.1633735e-03   5.5983777e-03   9.1911029e-03   4.0109945e-03   4.6927139e-03   7.3914201e-03   7.0233589e-03   3.0645626e-03   6.1283907e-03   1.0939049e-02   7.1096522e-03   3.7615860e-03   5.7439366e-03   2.3434979e-04   2.1625792e-03   4.1436319e-03   1.4386017e-03   3.2647240e-03   4.5819077e-03   2.2920937e-03   1.0610016e-03   6.9564930e-05   3.4218787e-03   2.8472107e-03   1.7665885e-03   1.7134963e-03   1.2022963e-03   8.8958832e-03   3.1032776e-03   5.2642601e-03   3.2432815e-03   4.6032265e-03   6.0005810e-03   3.1610099e-03   3.7408101e-03   6.4052558e-03   4.8371122e-03   1.3589516e-03   3.3936977e-03   9.2321743e-03   4.6963171e-03   5.0386940e-04   4.2984797e-03   8.6973796e-04   3.5526497e-04   1.7666127e-03   7.2192984e-04   6.5802622e-04   6.6868074e-04   3.2431480e-03   2.9035420e-03   1.6594087e-03   6.4854242e-04   2.1612810e-03   4.9124735e-03   1.3725331e-03   1.5050690e-03   9.8777331e-04   7.5260407e-04   4.1403958e-04   1.4453279e-04   1.2645371e-03   4.6600326e-04   2.4792089e-04   4.5627999e-04   4.7560079e-04   1.1214322e-03   2.2091820e-04   1.5167980e-03   7.9735767e-04   5.8276719e-03   2.5886189e-03   4.3280198e-04   2.9205975e-03   2.2021650e-03   1.3597486e-03   9.5466012e-04   5.1915037e-03   4.8194588e-03   2.6659023e-03   8.3877743e-04   3.6595156e-03   5.4665486e-03   3.2603866e-03   3.5246614e-04   1.2546385e-03   1.3738455e-03   1.7317120e-03   4.0173517e-04   2.8852033e-03   4.3207236e-04   2.1872967e-04   1.7262684e-04   1.6274416e-03   2.8950267e-03   1.1933707e-03   5.1057942e-04   6.2761497e-04   3.2515433e-03   4.5844712e-03   2.0397580e-03   4.4892831e-03   5.6236454e-03   2.1663608e-03   1.9457792e-03   4.4715761e-04   4.4562273e-03   2.6795180e-03   2.6855442e-03   1.3224094e-03   1.8950095e-03   8.9432615e-03   3.4981521e-03   6.3671383e-03   4.3127165e-03   4.9533188e-03   7.5137514e-03   3.1187047e-03   3.8229023e-03   6.6814123e-03   5.8612561e-03   2.0597987e-03   4.3006086e-03   9.5397630e-03   5.0775992e-03   1.3692874e-03   9.6403546e-04   1.3966479e-04   8.6664284e-04   1.6597338e-03   1.3212159e-03   1.4578093e-03   1.0432500e-03   1.9220792e-03   8.7918940e-04   4.4168060e-03   5.0670419e-04   4.4696559e-03   1.4208846e-03   1.1004494e-03   1.6436932e-04   1.5507648e-03   1.1191058e-03   1.7495883e-03   1.6621469e-03   2.5414268e-03   8.8363295e-04   4.0096791e-04   3.3482530e-04   4.2345389e-03   1.9374158e-03   1.2770848e-03   1.0395507e-03   3.9095383e-04   9.2829494e-04   2.8992176e-03   3.2975300e-03   9.5393320e-04   1.0925514e-03   1.6776296e-03   3.8507253e-03   2.3805613e-03   1.2082502e-03   2.8135004e-04   4.7639114e-04   8.7356741e-04   7.4310336e-04   2.3860646e-03   3.8892953e-04   3.1610282e-04   8.0643344e-04   1.4728895e-03   2.0712058e-03   4.6512964e-04   1.1229948e-03   8.8791739e-05   1.1956364e-03   1.3055469e-03   1.5870433e-03   3.7223827e-04   1.0219838e-03   4.8252621e-04   2.1760282e-03   1.1580239e-04   1.5036929e-03   1.5974284e-03   4.9006747e-03   5.1331960e-04   1.6812595e-03   1.3616634e-03   2.7816530e-03   3.8434871e-03   1.4972893e-03   1.8359326e-03   3.6884416e-03   3.0552141e-03   1.4996272e-03   1.2110783e-03   4.7697841e-03   1.3648602e-03   3.9191527e-04   1.9808749e-03   1.8113262e-03   2.3841094e-03   7.8154360e-04   2.2164780e-03   1.0439462e-03   5.1766229e-03   1.1157470e-03   3.6934914e-03   1.2626931e-03   5.1659909e-04   5.8959899e-05   1.4097594e-03   7.9470954e-04   1.7881991e-03   1.5872251e-03   2.1165612e-03   7.0368642e-04   9.2823209e-04   1.5533041e-04   3.3160103e-03   1.5048956e-03   2.0235000e-03   2.4966081e-03   3.5807846e-03   4.7284012e-04   2.2730788e-03   1.2853266e-03   5.0519729e-03   2.3762808e-03   2.2444800e-03   6.5500299e-04   2.4750944e-05   4.4815197e-04   1.3292061e-03   1.4780836e-03   1.3932149e-03   1.1924273e-03   1.5277304e-03   1.1896691e-03   2.0673918e-03   2.5400150e-04   1.8225589e-03   5.4227610e-04   2.9880363e-03   1.8314313e-03   2.5238977e-03   5.5757663e-05   2.3698503e-03   2.8213423e-03   1.3040881e-03   2.4190319e-03   1.1221589e-03   2.3332880e-03   1.5386845e-03   7.3215728e-04   3.3801838e-03   1.6264024e-04   2.8457958e-04   1.3051710e-03   1.8856170e-03   1.1599202e-03   1.2252261e-03   2.8396493e-03   1.3307965e-03   7.9081662e-04   1.1208231e-03   3.7513747e-03   2.2532467e-04   1.8915999e-03   1.8066444e-03   7.8138393e-03   1.7396624e-03   2.9887611e-03   2.1626603e-03   4.5716343e-03   4.7705983e-03   3.1616233e-03   3.6125170e-03   5.9695278e-03   4.2653532e-03   1.8208998e-03   2.2513824e-03   7.5822521e-03   3.0950854e-03   2.6638425e-03   2.3329339e-03   1.2833498e-03   1.8503904e-03   7.6221882e-04   7.7332789e-03   2.4545666e-03   4.1819304e-03   2.3678329e-03   3.7122984e-03   4.8118232e-03   2.5553184e-03   3.0167737e-03   5.3596333e-03   3.8038292e-03   8.6990936e-04   2.5148680e-03   7.9707274e-03   3.8479364e-03   3.0845972e-03   3.4545687e-04   3.2912795e-03   2.5772857e-03   4.0614792e-03   4.9082550e-04   6.4992316e-04   1.0688017e-03   2.7640538e-03   2.9796108e-03   1.9652949e-03   2.0639503e-03   3.2701588e-03   2.6492756e-03   2.3500195e-03   8.8549039e-04   3.5901884e-03   9.2048730e-04   3.0276112e-03   3.6195589e-03   1.4632965e-03   2.1763560e-03   1.5224813e-03   2.5475111e-03   1.6768950e-03   5.4331181e-04   3.2527051e-03   2.4129313e-04   2.7799169e-04   1.0453548e-03   1.7267901e-03   1.3027778e-03   1.3633707e-03   2.6503970e-03   1.5759024e-03   2.1731558e-03   1.7887862e-03   5.7263386e-03   8.5148931e-04   1.6383081e-03   1.3458097e-03   3.3594814e-03   3.6389958e-03   2.2301817e-03   2.5225544e-03   4.3534713e-03   3.1787209e-03   1.6954559e-03   1.3061945e-03   5.4111697e-03   1.7803664e-03   4.1741292e-03   7.9197469e-03   2.2821957e-03   5.7150571e-03   5.2344879e-03   5.9478982e-03   9.4860449e-03   3.0218268e-03   3.9157169e-03   7.0424278e-03   7.6315025e-03   4.1845322e-03   4.7751866e-03   8.3019483e-03   3.6628005e-03   5.6294536e-03   2.4239872e-03   2.7647556e-03   9.2421646e-04   1.7874511e-03   2.0292522e-03   1.9168834e-03   1.9458286e-03   3.1481713e-03   1.3908818e-03   1.3324310e-05   1.1782972e-03   5.7639460e-03   3.2058410e-03   2.4140579e-03   2.0956665e-03   3.1463866e-03   1.2261330e-03   4.0134358e-03   1.5054952e-03   1.1240475e-03   4.7360391e-04   2.7309553e-03   5.1301270e-03   2.4137049e-03   8.9922816e-05   1.2592279e-03   8.6164876e-04   1.2616309e-03   1.6936265e-03   3.4914911e-03   6.4024254e-04   8.0798030e-04   1.9823430e-03   2.5037231e-03   2.1768005e-03   8.4515239e-04   2.3117307e-03   2.1993248e-04   5.6812764e-04   1.3931392e-03   1.4308209e-03   1.6084351e-03   1.3403960e-03   1.4811963e-03   1.2106398e-03   2.4235208e-03   3.5787719e-04   1.6247178e-03   6.1681362e-04   9.2162383e-04   6.4491663e-04   1.4082859e-03   1.1774797e-03   1.5785221e-03   4.0122185e-04   7.3237681e-04   6.4530654e-05   2.8852212e-03   1.3991525e-03   1.5175131e-03   5.9336508e-04   2.9036861e-04   2.0099198e-04   5.5129609e-04   1.5179733e-03   6.8841417e-04   1.3866758e-03   1.2880812e-03   3.2376306e-03   2.6196338e-03   2.0860993e-03   2.5676122e-04   1.7880288e-03   9.1594192e-04   3.5976292e-03   3.2249197e-03   6.3653022e-05   8.5949029e-04   1.8263557e-03   1.6881339e-03   9.7411877e-04   1.7845049e-03   6.1330704e-04   4.6521065e-04   1.3489557e-03   1.6857799e-03   7.7549379e-04   1.3461987e-03   6.0478443e-04   1.0485877e-03   2.7784351e-03   1.1708179e-03   5.9587066e-04   1.2049026e-03   1.1592978e-03   4.9960727e-04   2.5830270e-03   2.2390567e-03   9.5053595e-04   5.2396832e-03   2.8622802e-03   2.1952710e-03   8.7416055e-04   1.1307517e-03
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml.txt
new file mode 100644
index 00000000..7c6b67fa
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-cosine-ml.txt
@@ -0,0 +1 @@
+   2.5695885e-01   2.6882042e-01   2.3470353e-01   2.9299329e-01   2.2742702e-01   3.1253572e-01   2.4986352e-01   3.0770122e-01   2.5191977e-01   2.7931567e-01   2.8133743e-01   2.6316239e-01   2.6067201e-01   3.2982339e-01   2.8993002e-01   2.5506356e-01   2.8728051e-01   2.4952121e-01   2.8613379e-01   2.6894157e-01   2.3606353e-01   2.1670935e-01   2.3470242e-01   2.4294172e-01   2.4376454e-01   2.3228195e-01   2.3554918e-01   2.4851241e-01   2.0917546e-01   2.4971488e-01   2.4264224e-01   2.7405461e-01   1.9086415e-01   2.6346574e-01   2.5908801e-01   2.2138495e-01   2.2910721e-01   2.2169919e-01   2.0660065e-01   2.3207102e-01   2.5554688e-01   2.5153751e-01   2.6073682e-01   2.0919640e-01   3.3984433e-01   2.7503792e-01   2.1709889e-01   2.7068095e-01   3.0307041e-01   2.4529612e-01   2.2987015e-01   2.7736967e-01   3.0310708e-01   3.0544316e-01   1.9205388e-01   2.7098021e-01   2.0722466e-01   2.6387343e-01   2.8998308e-01   2.2633010e-01   2.5177075e-01   1.6347011e-01   2.4036389e-01   2.6485871e-01   2.8491965e-01   2.2273619e-01   2.4511873e-01   2.5930533e-01   2.6589995e-01   2.7797191e-01   2.3357373e-01   2.4279909e-01   2.3544532e-01   1.9447286e-01   2.3993534e-01   2.0856243e-01   2.2125251e-01   2.1988206e-01   2.0590152e-01   2.6441952e-01   2.0052739e-01   2.2978496e-01   2.4483670e-01   2.3879510e-01   2.9398425e-01   2.7541852e-01   2.3777469e-01   2.9151131e-01   2.0672752e-01   2.4584031e-01   2.7475025e-01   2.7064343e-01   2.5603684e-01   2.6165327e-01   2.4233155e-01   1.7892657e-01   2.6111203e-01   1.9965682e-01   2.4201634e-01   2.6281353e-01   3.1928221e-01   1.9731963e-01   2.7752862e-01   2.2633080e-01   2.6783167e-01   2.5447186e-01   2.6424243e-01   2.1960672e-01   2.2984242e-01   2.8788736e-01   2.8681630e-01   2.6949787e-01   2.3993685e-01   2.4440073e-01   2.5010397e-01   2.3230769e-01   2.9879682e-01   2.4200592e-01   2.6957748e-01   2.6073240e-01   2.6355347e-01   2.3403674e-01   2.2411413e-01   2.2956729e-01   2.8105976e-01   2.2913304e-01   2.4898608e-01   2.3304000e-01   2.2692988e-01   2.3728251e-01   2.2552243e-01   2.0364084e-01   2.3359511e-01   2.6619167e-01   2.6666588e-01   2.3666880e-01   2.7239113e-01   2.0146697e-01   2.3045559e-01   2.1695523e-01   2.1387991e-01   2.2366404e-01   2.2809635e-01   2.0901297e-01   2.2441100e-01   2.3418882e-01   2.8552218e-01   2.4609015e-01   2.0282492e-01   2.5940295e-01   2.7407006e-01   2.3344890e-01   2.1179142e-01   2.7047821e-01   2.9832768e-01   2.0859082e-01   2.8881331e-01   1.8384598e-01   2.5286491e-01   2.2012615e-01   2.3615775e-01   2.6845565e-01   2.3356355e-01   2.7164193e-01   2.4179380e-01   2.5247973e-01   2.5637548e-01   3.2126483e-01   2.3100774e-01   2.8832546e-01   2.0043257e-01   2.7918333e-01   2.4884522e-01   2.2904723e-01   2.3738940e-01   2.9461278e-01   2.9782005e-01   3.0332073e-01   2.5175971e-01   3.1203784e-01   2.6611535e-01   2.3713507e-01   2.2203585e-01   2.3602325e-01   2.5093670e-01   2.6860434e-01   3.0137874e-01   2.3759606e-01   2.6840346e-01   1.9200556e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-double-inp.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-double-inp.txt
new file mode 100644
index 00000000..7a770217
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-double-inp.txt
@@ -0,0 +1,20 @@
+8.278938049410748956e-01 9.035293984476246987e-01 1.862188994679486731e-01 8.921151312310462433e-01 2.061859119379583216e-02 3.440636727385729676e-01 1.533779912830328662e-01 5.701372300009802663e-01 5.510020730211558915e-01 1.792362258426003496e-01 8.086175120876580857e-01 6.115487184317183189e-01 1.233471787164852618e-02 1.441643531871039663e-03 4.044309209045688913e-01 3.561398959499905148e-01 1.281985712929750720e-01 8.663300833847481508e-01 8.696027786291581352e-01 3.611727370363766454e-01 5.283537658772616830e-01 1.440241088090119526e-01 3.112457227138950566e-01 6.031280796897889873e-01 9.230324792742518047e-01 2.332121881136874908e-01 3.192652267403439659e-02 3.466206294995559656e-01 2.988687728046366399e-01 5.116749542048093513e-02 2.584975830914494344e-01 4.302023478042227289e-01 8.003972751713522849e-01 9.364931911368097328e-01 9.737098649964673891e-01 4.718038453972229762e-01 4.526591686607864817e-01 1.056485678520797666e-01 5.883019714285405710e-01 3.846092237676981274e-01 6.461500053435473845e-01 1.013239729848824933e-01 1.216151561651189761e-01 5.159668929484659827e-01 8.452074473510227115e-01 9.885170962247968873e-01 7.623883073490128615e-01 2.291163243615434997e-02 5.775530980802381364e-01 7.820699896828091635e-01 8.239186345842965942e-01 3.391800105260227571e-01 9.546318451614538292e-01 3.789677917867695367e-01 4.526533399649290690e-02 8.366786473238587707e-01 3.082636811049858094e-01 1.173936820793450853e-01 7.631994969169442200e-02 2.997416650722183329e-01 5.795208655160232203e-01 3.942350892542011431e-01 1.175126383297261379e-01 4.928232513950027149e-01 9.421293996225950096e-01 8.365391053841342295e-02 6.868059693571844093e-01 3.589527962429440722e-01 7.592939427166059962e-01 5.623849466131448649e-01 2.110746828032050715e-01 9.824683704668600859e-01 2.661230142246236996e-01 6.162272315007123469e-01 5.023254536607497656e-01 5.202854476669782624e-02 5.835090668842095596e-01 7.864642118889143552e-01 2.504012386867506823e-01 6.728308641135989365e-01 4.610793534576096420e-01 4.820508770515909980e-01 9.720403251022265989e-01 3.100069285263498120e-01 7.681017126461753275e-01 7.956539306007082146e-02 2.593389637887737464e-01 1.137852590403054531e-01 3.885303073284454012e-01 8.599094660075957686e-01 5.215167875918280682e-02 1.620908248572288102e-01 1.859236090457663249e-01 6.247716512610480555e-01 3.415128495520775020e-01 7.034903368378029320e-01 6.037564640019568163e-01 2.338969434423310290e-01 1.002104885609900187e-02 7.866058403969036217e-01
+8.033694116033356369e-01 8.653264545544031572e-01 7.468340410754038539e-01 6.362430919910603278e-01 5.120006306625468628e-02 9.503348372633585450e-01 4.697732609626817935e-01 4.221305288459429317e-01 3.153452119838391354e-01 2.991014843442657556e-01 1.190667967280257811e-01 3.486567714509342109e-01 8.289493649885054660e-01 8.454811050800014049e-01 9.149673018211901265e-01 7.708707837193897738e-01 2.640157732122547785e-01 2.107897022189605396e-01 4.207633055054439408e-01 6.719500284654699174e-01 1.458031684893063007e-01 1.800412735886125493e-02 8.402733435220011149e-02 4.206760156883160295e-02 1.376933515041314227e-01 1.716717341022133692e-01 1.788220727652158892e-01 8.224310433402118869e-01 7.729093666867475898e-01 2.064223621025984556e-01 9.592092036227207741e-01 8.312490243754996344e-01 6.673289360369902834e-01 4.632847903690773261e-02 7.643954098358983762e-01 9.359341525615098023e-01 1.914966319163026176e-01 4.536590469402868031e-01 8.640836016538007147e-01 3.941529178175462444e-02 5.602101995205478469e-01 9.263806161941660067e-01 1.555995325944817820e-01 6.172208102950116348e-01 6.335576752812099866e-01 9.766975460368043649e-02 4.475795689539874278e-02 3.248842796104995934e-01 5.700377122149502540e-01 9.066962967256807504e-01 5.458460621505676347e-01 6.833401285581487405e-01 2.887244409544044155e-01 1.316338647016834784e-01 2.325673305245992140e-01 4.150121963188406760e-01 3.834845466366055833e-01 8.149365773968725302e-01 1.867003849450201702e-01 3.170322173543018707e-01 6.832093662682684476e-01 1.729728518929105618e-01 9.236557359702636250e-01 9.152941252150086360e-01 7.224879983096620384e-01 8.557920626598064517e-01 5.344883059251644974e-01 4.876873274449112783e-01 8.308277804506420949e-01 3.916624489322212410e-01 3.459695122273966916e-01 4.033512499027409604e-01 6.555726444913008155e-01 7.138452409380238173e-01 1.683937314599968094e-01 1.769382143486440961e-01 7.588683655178136700e-01 3.750589892880819010e-01 7.525176245126207197e-01 6.083961152538303052e-01 1.145972309907993258e-01 6.239614485809552580e-01 1.307655482065895880e-01 8.530458750670916190e-01 4.801602070124768584e-01 8.168122189863546989e-02 3.793139622744635675e-01 1.496986997776840189e-01 7.129023878302899186e-01 6.830979237438047358e-01 7.635375943876505644e-01 1.824004963251233402e-01 5.764695848992339444e-01 8.865113248731604223e-01 5.784337085544002388e-01 9.700026628755119562e-01 7.318207347905059112e-01 3.851401393936705331e-01 1.774291851193399161e-01 9.763423229242296220e-01
+9.287178470949695175e-01 1.748282433617460718e-01 9.238531711586964734e-01 8.291274445125006443e-01 9.513259272578692416e-01 7.486316801165745494e-01 6.257378457524477300e-01 2.062711693536473101e-01 3.970721244184766130e-01 2.738325225026445597e-01 8.735038948299954642e-01 5.415282140033768066e-01 5.176317904298315398e-01 5.347036264518250093e-01 7.482056965410627258e-01 4.140672582824351800e-01 8.709067272363142376e-01 9.499605569181273079e-01 5.380266748336398619e-01 4.369252161707162241e-01 8.235722216228258397e-03 4.308187193646527691e-01 6.030581482859224129e-01 7.316831195156517920e-01 5.540499846834291420e-01 2.044203040111662872e-01 8.645251782981867583e-01 1.816095717570278545e-01 9.639119168018674966e-01 3.572031072322333634e-01 5.580226816834680248e-01 5.586629875016585478e-01 7.213854320902782780e-01 8.513998260042524580e-01 6.308764347277173723e-02 4.299855362100638567e-01 8.789303907444128150e-01 9.178850359236285783e-01 2.275205845091231582e-01 1.899395443939643213e-01 7.103070862773533944e-01 9.450015289553428399e-01 1.691856364522159595e-01 7.368719616877857925e-01 9.600189536623833231e-01 5.128846522932454244e-01 6.209162727118655578e-02 7.992250598838029907e-01 9.141050280518014937e-01 1.471297785256820978e-01 7.466162372930541524e-01 4.656107650642931084e-01 6.399324135161845728e-01 2.023617619481610230e-01 1.019104648900100996e-01 4.390693688536728700e-02 9.822620353006089600e-01 2.881951852926285529e-01 6.191575015960482098e-02 8.989580763251467932e-01 4.635958631890454429e-01 1.781973138114967270e-02 7.906911683818984571e-02 6.525270776225711167e-02 3.620583622807886925e-01 2.651673718940715796e-01 5.829372395929610651e-01 2.118159824373908595e-01 5.900287159143694504e-01 9.405929925178391215e-01 9.262415619063500971e-01 5.639581506302312475e-01 4.529556154689695635e-02 2.873819210518682166e-01 5.718545934306838996e-01 9.877670791317306742e-01 4.120364488714320927e-01 9.896078045634184583e-01 3.796586997026456523e-01 1.178183652203194098e-01 6.641068305236120795e-01 4.045960610587706618e-03 2.262690437428437340e-01 7.839938005832693957e-01 7.695391333937223743e-01 3.713918392552509884e-01 4.245533341514018399e-01 1.475072494020331915e-01 6.011975181419888514e-01 5.158174017998343741e-01 1.788706151398071764e-01 8.880707130134481986e-01 6.463351030474082659e-01 6.499920635615744624e-01 8.570273676455353318e-01 6.055019270899113515e-01 2.123561211054603159e-02 2.027688787664126968e-01 1.930834215328548487e-01 5.131906052747271518e-01
+2.599990881903107010e-01 6.767857524909899336e-01 7.188217446352963558e-01 3.037178903357997672e-01 4.252381412838680541e-01 4.070924411439535984e-02 8.426710493038247485e-02 8.301517457289483426e-01 8.254603255702420705e-01 7.258533909453509514e-01 9.958706809470796451e-01 1.323408451651194584e-01 8.523995455245143571e-01 2.572405385832454705e-02 4.715363690065482727e-01 7.920130365690022378e-01 7.613745641534582775e-01 5.108305991695683002e-01 7.908714335912382376e-01 4.641131983754837043e-01 3.112627109831845873e-01 4.218013908715474436e-01 3.291577909008427394e-01 2.538715054071232213e-01 1.362470842487485401e-01 2.716429790290709745e-01 1.485325814161112534e-01 4.514539027544387517e-01 6.900835128673067365e-01 7.793407072946112457e-02 5.938024345270752624e-01 1.497853829906865553e-01 5.399567982652856424e-01 1.419209916759478496e-03 7.719776132867679497e-01 3.130795105576239523e-01 6.670071611167494030e-01 8.900596881158256979e-01 8.011158503301568645e-01 7.089295605187424520e-01 4.671116382997058114e-01 6.682965170673403899e-01 6.524835265739736823e-02 5.454288420771494783e-01 7.751910790556310049e-01 8.192595541387335256e-01 3.098855848167891835e-01 3.689971355659119601e-01 8.666507475054133769e-01 2.749042684253171220e-01 3.566565602478318775e-01 4.838173174723044978e-01 1.032975933616413489e-01 5.063065339610417492e-02 5.791168455729079900e-01 3.573337411289496668e-01 6.714098909652352898e-01 2.917057662433912846e-01 2.654964332620638467e-01 7.171804039048814694e-01 3.314488637898249657e-01 5.230399837442840649e-01 6.866534136026025692e-02 1.252966394621071178e-01 5.349397882659551184e-01 2.841423847455760709e-01 4.158473635710734362e-01 7.197062989831272128e-01 5.123869045047864113e-01 8.675622821594339840e-01 8.097441845042540054e-01 7.317178252133832439e-01 3.300847596465853462e-01 5.922311859141077273e-01 8.852619511417836318e-02 2.673412917259408994e-01 6.878259052441990651e-01 3.223000927116328462e-01 8.859387123976615319e-01 5.722722388300067742e-01 8.254877606669521750e-01 5.705299682290687624e-01 7.046478734972855262e-01 1.316324413616759559e-01 3.056358395675535800e-01 2.396516834600909140e-01 2.041201422493257311e-01 1.610755140653103989e-01 1.617012564641111538e-01 4.449920510036902144e-01 2.731012972755201274e-01 7.826874666257994662e-01 5.193612375350010746e-01 8.688804522977213729e-01 3.742157602758655610e-02 6.649628920608219307e-01 5.978149424619171315e-01 5.345645500553952711e-01 9.443202650415919441e-01 6.105837075491723498e-01
+6.387761328141735584e-01 4.210087412162694109e-01 3.777306694964789324e-01 3.576349403292201634e-01 7.272699618880260619e-01 9.173392803607671731e-02 1.212535698300880593e-01 3.871229381194544183e-01 7.735150198351389284e-01 4.687200483013695962e-01 5.161778571874678923e-01 9.839646447226980674e-01 8.626932748911960713e-01 9.618485576577924245e-01 2.997996427525421170e-01 3.955404657388794654e-01 8.480126027102616870e-01 8.194992325050480808e-01 2.800213436873294492e-01 7.188391466620779324e-01 2.289766105875049584e-01 3.838547514028287644e-01 1.363553401061209369e-01 2.131328253542326134e-01 2.666779468144075960e-02 3.252883844200405994e-01 4.207860197469600605e-01 2.991365385037647595e-01 9.180779845534067229e-01 8.787338732192649937e-01 5.404510999105649471e-01 1.735493827761729335e-01 7.405224640747264386e-01 3.927355563629583157e-01 3.957109873399460298e-01 1.313029813325972128e-01 6.434498219738993274e-01 7.162213694578050127e-01 6.454998257494671821e-01 3.808124530008022424e-01 2.027201015737234435e-01 6.667632842770417900e-01 1.609491052365198405e-01 1.192413785409307536e-02 4.546773323526854815e-01 7.733541911050207940e-01 3.902525737195561284e-01 4.006023779897505133e-01 5.156517815815246930e-01 6.135685498584592112e-01 7.062153114980724844e-01 5.505858882117883324e-01 3.541308807182554919e-01 5.237151122342533771e-01 5.230649229131387745e-01 1.973541027697351957e-01 7.940327858595511712e-01 9.998588700623055603e-01 3.878271015153827994e-01 4.455006584967207139e-01 8.376414508056347907e-01 3.310833863524501597e-01 8.020469097392601832e-01 1.890327633084128989e-01 3.830289472395409511e-01 8.605040171046141051e-02 9.978185524023941433e-01 8.333890591892906263e-01 4.509013468741837061e-01 6.355778557686052599e-01 1.422515991097305088e-01 9.549891485963732940e-01 7.535776302868563148e-01 9.306005301880662106e-01 2.444330347211679522e-01 5.828218427569508142e-01 1.261938242968304591e-01 2.829188731405173352e-01 8.100246952078660190e-01 2.032739130996042975e-01 3.997268448390065565e-01 3.882777703107541667e-01 1.102505652624736765e-01 5.826634725328041498e-01 6.508734477956333864e-01 1.777287661702166011e-01 4.857051012052149286e-02 6.850537712379254351e-01 5.012281307761055071e-01 3.329154880061502286e-01 5.006261767216675374e-01 4.542081454976160115e-01 6.777801995399822532e-01 4.271303586474960445e-01 7.820470659692947413e-01 5.143462618485082904e-01 4.071273891563575997e-02 8.503383643856671226e-01 6.877485768345151795e-01 6.498843855014626580e-01
+5.539512747016193117e-01 6.329206647391879548e-01 2.798533500321682688e-01 4.825977295850051307e-01 7.625297023172977751e-01 9.081309101427640362e-01 4.124792086535029600e-01 3.647019658319609059e-01 7.529595202332928228e-02 3.072404010876803593e-01 7.890673660964639957e-01 4.079781478915127657e-01 1.440519120695739064e-01 2.538968953804546791e-01 1.595028243568367143e-01 9.066545851872198636e-02 6.367601114674349416e-01 7.622263643880089479e-02 3.015728236404162654e-01 2.424070469873378375e-01 5.711440390241000475e-01 5.717001375511508998e-01 2.237853674032181939e-01 7.112101625753678436e-01 4.321054197012103026e-01 2.505322169010260058e-02 5.877307077139551916e-01 4.415771174397812304e-01 3.766022855145171322e-01 9.803490652619811785e-01 1.229258314111529860e-01 8.108351868714478439e-01 8.558595456964329662e-01 2.168217533833206589e-01 2.034022719386595623e-01 8.687457137579783772e-01 9.013327195854559104e-01 8.156766512673154779e-01 2.717576187546973943e-01 1.756417893371479133e-01 7.555856977566548505e-01 6.708809351312817748e-01 8.998789237886926085e-01 1.936367585946979775e-01 7.949724635465026390e-01 3.164799312763589834e-01 5.493048513173155456e-01 1.608917269168268493e-01 3.048667492191803330e-01 5.599401537727016764e-01 5.779501360842279611e-01 1.296714605309662316e-01 9.160752328055997706e-01 8.058674476110374574e-01 4.385508937505578908e-01 9.212419718012100356e-01 2.249887451242467140e-01 6.283927745352599903e-01 3.778992451536005159e-01 3.571958698867505611e-03 7.276526470528231760e-01 9.051678673805297892e-01 8.465837072484881931e-01 4.548317505393462135e-02 3.189318261926020748e-01 4.446388607398673587e-01 4.292356336344156365e-01 4.203980977718795309e-01 4.698059253071955599e-01 6.151991200848159203e-01 8.479986139404802614e-01 9.870993262459623052e-01 3.164206525899861955e-01 6.464672171639846976e-01 8.508781429592480183e-01 4.733667503354813677e-01 8.076014176740163863e-01 6.671443255679101458e-01 6.639213267047979761e-01 3.681688930741919830e-01 4.679870252651611162e-01 1.790041740686979521e-01 8.446070273663058847e-01 3.350737544979878191e-01 6.600272349677447359e-01 4.356083218487936115e-01 7.995134167346013010e-01 9.083660261041469619e-01 9.743975306734570241e-01 8.144839650654719376e-01 6.865011984586443239e-01 1.709747281999153268e-01 8.534933687161740945e-01 9.494753729726415070e-01 8.140124992294850426e-01 8.936241255316055287e-01 9.087976860818796077e-01 9.030687493451383663e-02 4.025785149840914734e-01 9.592005611533803711e-01
+5.714058727476275523e-01 7.913573761505965365e-02 9.301773447377043036e-01 4.302822433307075256e-01 4.618892554175407783e-01 1.882471300213742760e-01 6.231472878215863487e-01 2.350437450940777717e-01 8.483410480771292894e-01 8.580803842040533036e-01 4.246398783388435350e-01 5.667321565946502604e-01 7.247417018955526480e-02 5.373984417482219333e-01 8.794242091541510931e-01 9.699025554453030162e-01 8.254197752548814160e-01 7.739723972867470492e-01 6.365819416181199841e-01 3.451230687021222820e-02 1.829102490094791644e-02 9.179618383026147965e-01 4.481667270072077214e-01 4.771270250445739380e-01 1.588469404953456454e-01 3.766332499200618633e-01 5.057026248713025751e-02 9.125900914275182352e-01 8.438133644246305076e-01 3.282972411719701222e-01 6.042003956122835584e-01 7.423456085393266290e-01 1.389012737541106546e-02 3.674754266702850991e-02 2.126646727703802586e-01 3.085666164246750887e-01 4.303440338750976757e-01 1.749037978865556342e-01 2.177699993322510519e-01 6.675614739991906355e-01 1.926533336347433512e-01 8.032010572660308600e-01 4.611412981769049679e-01 9.907201268457492827e-01 8.973785930837320235e-01 6.286342392657409128e-01 8.111266245859546364e-01 1.154230969025437092e-01 8.382880466301794176e-01 1.053753927827069115e-01 9.921712862234919328e-01 9.041662667920956631e-01 3.626267376021269362e-01 2.262225368932846425e-02 8.669003741626111204e-01 7.597054897704164089e-01 4.700318514995387442e-01 4.338185014241978665e-01 1.205425463362067573e-01 2.413879270602589111e-01 5.483334840461459025e-01 2.042653841254596925e-01 5.452588940366013270e-01 3.164646091706100339e-01 1.878958248945691301e-01 2.188622304737641855e-01 2.970982599823450698e-01 5.952148400199362976e-01 9.614251220149501176e-01 5.446813400697393392e-01 5.900748097930779146e-01 2.653062526715309621e-01 5.459933097767216692e-01 3.174185404661935550e-01 1.412133354129242457e-01 1.487441669790685594e-01 3.953776242211952674e-01 5.274261039692862418e-01 1.756132307607755072e-01 4.481942852746899630e-01 6.390660088765629521e-01 2.860380430081067571e-01 5.866902519902850166e-03 3.026687645174785946e-02 1.952533570196290924e-01 2.154769096186736066e-01 8.920573593276575064e-01 5.644513191915436767e-01 5.551464696654353492e-01 4.378199413349500579e-01 8.685737643974280608e-01 7.493934764293597173e-02 9.556749726352036234e-01 6.386433482536227890e-01 8.714694524097754691e-02 1.722786161701279628e-01 6.526867532768643176e-01 8.950304705281527662e-01 6.158198776753203152e-01 9.587176904005377809e-01
+7.705718397401561948e-01 3.165816092999733655e-01 4.334200859975760878e-01 8.639807015515663657e-01 5.576514209532534849e-03 2.456745447057938625e-01 1.664686313299922338e-01 9.637084729617834133e-01 1.083448720752323569e-01 1.865218070380464388e-01 3.730358890475884426e-01 5.015351872138350542e-01 7.420710795841709562e-01 4.919420674769692248e-01 3.426558201886464872e-02 8.669984854934246199e-01 2.204243734202966376e-01 4.109792246853891662e-01 4.361732572946559472e-01 6.819306998053020763e-02 9.986304248057148447e-01 4.119289455392274313e-01 8.533050041845835487e-01 3.416914861912183632e-01 6.522191951039880697e-01 4.162803668786793088e-01 9.051674379917418189e-02 4.552378661306888397e-02 2.122677193466918633e-01 7.461518531655018105e-01 4.654688019259497489e-01 7.877564083548750373e-01 4.518328005682387127e-01 7.173857464237374248e-01 6.940056370290903498e-02 2.804574410412373764e-01 6.095681113112718652e-01 3.680596478602831123e-01 1.814569150719304025e-01 6.505055517979729807e-01 2.759585245701871026e-01 1.429501104786028431e-01 7.813891153083207808e-02 8.925314279991185540e-01 6.692056941902108091e-01 1.915141341107173822e-01 5.750233129581091562e-01 2.051961006251528108e-01 3.849013692629975614e-01 9.503788222043518807e-01 7.690419386411734282e-01 9.978147530014782607e-01 1.719584162437415298e-01 4.890758882401113894e-01 7.195660736040896399e-01 2.485818040997200828e-01 9.706486601870933928e-01 5.182604282071262558e-01 8.082072245463804983e-01 4.889961284821118248e-01 8.042893959057633158e-01 3.200685313413229593e-01 8.983245016887355661e-01 2.811495336955205371e-01 3.986095833814048417e-01 8.607229214132059436e-01 4.827620119717191960e-01 6.715610252037491623e-01 9.330824374137768329e-01 7.537710530085762750e-01 9.840804224010484269e-01 2.319352541177217564e-01 9.569114943157627229e-01 5.821928104654411351e-01 6.700479524814679788e-01 5.663434680086896211e-01 8.851091082101365526e-01 6.800562815862243315e-01 3.578475213752868589e-01 2.900164669281133367e-01 8.379170683569914235e-02 9.929972839740475177e-02 5.946248553621906741e-01 1.991332889320840405e-01 8.115065723822508792e-01 2.023388190440008616e-01 4.056545651129230823e-01 2.966825350250481552e-01 7.457176343507545546e-01 9.856015771246517954e-01 2.264338016147812160e-01 8.366528670045663141e-01 6.116829813603242849e-01 2.605933184296719274e-01 5.765962146558850643e-01 5.064075092266390188e-01 5.499615769589756287e-01 9.240234698632640020e-01 7.169900155229913530e-02 3.544181364560751168e-01
+8.154844535553099627e-01 4.797965609394789777e-01 7.476703385713100447e-01 9.086708404761600910e-01 3.191752505450355937e-01 7.611128630021511965e-01 6.246790343299296611e-01 1.942001426217137006e-01 2.789860414631386565e-01 3.236359785042408621e-02 3.178191288741717413e-01 8.372264298357038337e-01 8.872692914664047636e-01 9.589758852077276963e-01 3.123722260380168425e-01 8.980164015338999439e-01 7.260784140459818348e-01 6.567013512265649222e-01 1.028743505926521529e-01 6.821705410750319443e-01 6.889838995316139858e-01 5.587525493094736007e-02 6.921487028366646310e-01 3.616312929861494885e-01 1.673758008792780583e-01 6.626504595920326146e-01 9.125680913222075086e-01 1.424077784972291871e-01 6.508496429060767197e-01 6.615417385775157477e-01 9.654167310675311198e-01 5.536662974550183858e-01 7.092622144968085962e-03 6.694595400455760625e-01 1.828533619119211417e-01 3.421514408394116247e-01 1.242580151818144518e-01 9.888774797458224075e-01 9.777955172739735135e-01 4.271370765628749178e-01 1.211608384809655936e-01 1.580132417172936954e-01 3.242705395708289640e-01 3.268994391754735940e-01 5.213767653645562383e-03 4.475169480357120699e-01 9.593245219293577986e-01 6.994304536782350867e-01 7.063863152769014331e-01 8.381620829497931080e-01 2.760441799736219615e-01 3.755200946645842475e-01 3.627729621737311172e-01 9.518310606719182498e-01 3.577273025276901386e-01 3.991159901003488164e-01 4.187060513068554535e-01 7.422605403637314581e-01 6.697944269780702342e-01 6.020599837037767799e-01 1.571185850817550245e-01 7.519860911185742847e-01 6.635775704496444938e-01 9.487848173531471252e-01 7.900030232338028924e-01 4.143783957270819052e-01 5.618429740858444932e-01 3.737804619062014000e-01 6.179941187802344693e-01 6.553638605616040058e-01 1.009709416658691739e-01 4.935037098582963910e-01 5.485489972455533936e-01 1.024147956480448984e-01 1.195764707555347917e-01 4.910516327810896531e-01 3.551185778630389089e-01 3.857601645798814927e-01 2.074975219600547760e-01 2.084038664460790002e-01 5.268616653491025037e-01 6.948014877618717833e-01 6.179744044618615817e-01 7.063658085955483168e-01 7.925757227686872630e-01 6.199016959584816577e-01 1.163676037434490107e-01 7.425752264755586252e-01 5.403115665133301215e-01 2.546191951391015840e-01 6.961300925345208501e-01 4.003013072125547467e-01 5.906120962720950995e-02 5.879915846330325824e-01 1.213602408288709800e-01 3.801780679842765576e-01 1.731477742402802722e-01 4.624568816669496485e-01 3.304453744619206823e-01 8.810445876116090869e-02
+5.140190515373614932e-01 1.419225260054487459e-01 7.777845802285945354e-01 3.327562899409282071e-01 8.916875699762913943e-01 7.212852862736146564e-01 5.727327199433507321e-01 5.897820225918504189e-01 7.318614954542906892e-01 7.393985144455500480e-01 4.531340740296823100e-01 9.903061584426188224e-01 4.213350938331624773e-01 4.542342471963995987e-01 9.788786426453045530e-01 1.881707000343846303e-02 8.005433413647761176e-01 1.523502822273363755e-01 5.630164732287495921e-01 5.946603842470724599e-01 1.225547698678740582e-01 1.531136594724622491e-01 8.157973612638946825e-02 2.752046015644330490e-01 6.809045821946161370e-01 6.455289724528190387e-01 3.830356726830793646e-01 4.446144649678575034e-01 4.969038423960672191e-01 5.497873820641221432e-01 9.471879627821714331e-01 5.933046675329255448e-01 4.099233758501530378e-02 5.790409810134594659e-01 9.546095885251496549e-01 2.608616052375664074e-01 6.910160339170060562e-01 1.293709850476291168e-01 6.407264616302255078e-03 6.186037089828009261e-01 5.537861302543241049e-01 3.527421038298221845e-01 8.033232052121624944e-01 8.128114152830284711e-01 8.319982582278713235e-01 5.939566376046836460e-01 2.291090283499520597e-01 5.438101817725821130e-01 6.881146379117278888e-01 2.421968586304659166e-01 5.874047918543783275e-01 6.210102709484541794e-01 7.041387566450251212e-01 6.959223476278774134e-01 9.133877300988062498e-01 9.230647706207778525e-01 6.856884219815310155e-01 6.997988808693775820e-01 6.177944932528769417e-01 5.512902545683161515e-01 5.818280341729102911e-01 6.538267999985679646e-01 6.946673485935980219e-01 4.817938258357623571e-02 9.352008817207906333e-01 4.774162142215661042e-01 5.768063588692976529e-01 4.589648891483899540e-02 7.998946815651652997e-01 4.434260476954369201e-01 9.850053510925722566e-01 6.648626681529369309e-01 4.606293826856903140e-01 3.309042418210563774e-01 1.438901922508034614e-01 7.986559119276418484e-01 7.037818421334554042e-01 3.605119534240813772e-01 3.785959549258922641e-01 9.562491516841659100e-01 4.997955143590974147e-01 1.029540300938682762e-01 1.819017177001992502e-01 3.665425750262368831e-01 1.688063588370778412e-01 7.030735208313992901e-01 8.922375654244527610e-01 1.055706412056253152e-01 2.664739907746691561e-01 9.906029568647586325e-01 6.043845090140997911e-03 3.495786295043534775e-01 5.989441999519146131e-01 6.776147193866479679e-01 7.012991789852640601e-01 1.825838783477321536e-01 7.612293578749116385e-01 1.564769891240175292e-01 2.762157292905387251e-01 7.641900040015234818e-01
+4.746013333880729768e-01 7.609202966712714788e-01 2.537820854162747830e-01 1.709362234877408460e-01 1.886635378734374813e-01 2.439567014093724229e-02 7.640304718272151741e-01 3.483216170435471382e-01 7.744289278738043514e-01 4.190437573644867353e-01 5.319091476394965934e-02 8.580130976087452233e-01 6.259446446786639529e-01 8.793213970773006150e-01 2.441023074890465994e-01 7.753405549489799098e-01 8.760187573193888300e-01 5.946480724009295393e-02 2.873093046571124631e-01 8.710837851946537924e-01 9.103181731924696596e-01 6.534637257615111272e-01 4.128420398577182793e-01 4.905858108576378607e-01 6.178275806701372108e-02 6.368043900016381320e-01 2.865296941219959148e-01 6.371773028539067241e-01 4.924322796636745325e-01 1.709313290387282080e-01 1.856892551689268700e-01 9.592782603102242289e-01 5.402593764193130976e-02 7.287312244390512506e-01 5.679467572000697073e-01 6.255587794305905724e-02 3.069660218141317953e-01 1.089960430557104232e-01 5.550748245336984965e-01 2.555948886689661803e-01 4.140925514039996980e-01 1.180376445052062628e-01 8.832322629884041820e-01 7.784546946701487169e-02 3.177678935473182698e-01 6.681804863429485764e-02 7.047099396645268854e-01 4.133897376851528582e-01 5.600656990480865627e-01 3.883995683475501837e-01 4.459430113152932362e-01 4.214077227574740681e-01 4.763369230200156235e-01 2.701480661168440545e-01 4.296286564389811824e-01 9.601402258758658936e-01 6.326999441846863359e-01 2.442086919688498670e-01 8.407708423957936938e-01 3.626867985638081437e-01 3.641441713291436733e-01 7.932397565989488530e-01 8.902073520619256941e-01 1.929173010337000838e-01 7.309376779324568973e-01 7.305852858337777977e-01 6.510197444582447313e-01 9.512661608643838695e-01 8.461467164366111016e-01 9.245490147941206605e-01 2.658844813385705663e-01 9.538758859344749208e-01 8.215517204998477041e-01 8.217795540390903097e-01 7.569662091300560780e-01 6.262685322871274218e-01 5.597770510574888725e-01 8.155720175123675197e-01 8.545688745180864965e-01 8.986051518529034610e-01 2.477911506572628708e-01 8.462580108996445860e-01 6.065941220995090255e-01 6.500490804973033665e-01 1.120463882674053169e-01 9.299049132942927010e-02 1.388364074229719858e-02 5.901199124540731367e-01 2.795110110544174464e-02 1.644097083463245124e-01 5.341029647603202646e-01 5.276816677181681570e-01 5.439849107754858304e-01 5.371677986392331405e-02 4.515163125788429488e-01 5.036243367087100964e-01 5.721818679625961801e-01 5.271368612400184617e-03 7.720961020546839304e-01 9.015383457479009266e-01
+8.301526916287945701e-01 8.704609696144033348e-01 2.955689129581380303e-01 1.762209253489944727e-01 2.698172933050072553e-01 1.138095349991521399e-01 4.092588531860634760e-01 8.202978121681584467e-01 2.822241377079557356e-01 6.117376205659387223e-01 7.169923068016897938e-01 9.310256256264415331e-02 3.989664052931106708e-01 1.651874953308862803e-02 7.890202597932294282e-02 9.068686774810821305e-01 5.203866694486933842e-01 4.297748572844445336e-01 5.208786995443430712e-01 2.163224881365530816e-01 7.274307306357226111e-01 1.675784956180090823e-01 5.969822786565782691e-01 8.959750832846602453e-02 1.253794151891943764e-01 5.352628522116801291e-01 2.562706125890066300e-01 6.030433202137867044e-01 8.330717547440393833e-01 9.603613683422040914e-02 7.569714244468559450e-01 3.184801677796517128e-01 1.667069341164499896e-01 3.132470247801235619e-01 6.417752836394801097e-01 6.433909425912354152e-02 4.056860213146201710e-01 3.166772891331335327e-01 9.574059746098845247e-01 1.492907964460536974e-01 8.311513764927496162e-01 6.652928354977717396e-01 2.396804722185036374e-01 5.812361618600220270e-01 9.724228681350225445e-01 2.853983236378453414e-01 5.337719354896472979e-01 6.779446197712412081e-01 5.485102006140557540e-01 9.010109155962182648e-01 5.724439967467525037e-01 5.965540527411405947e-01 1.598667990086183321e-01 1.363934512727023041e-01 5.327536522697270405e-01 4.123866715061276222e-01 4.617251396918636841e-01 6.935944951381239898e-01 4.300337419593377453e-01 1.892407993760835128e-01 1.666936825594794724e-01 4.625634184864588772e-01 4.805197636774838355e-02 7.003542850133466224e-01 2.130226006716084974e-03 8.678863343041013367e-01 4.874478520451258623e-01 7.043560228741558848e-01 6.317719270475393722e-01 5.372392256296196766e-01 2.982649812986511995e-01 1.272558612133412037e-01 2.467337555730741983e-01 6.546893200021091097e-01 6.291921159383098150e-01 8.505920470407707379e-01 4.046520490181828578e-01 3.875732096593392795e-01 8.551517214319142024e-01 4.152602284179877090e-01 9.587779137989138611e-01 6.977437468944928112e-01 3.240620775541913634e-02 4.025873770391376061e-01 5.485549335619134270e-01 7.146066156157020455e-01 3.012702534568838519e-01 3.526414480395153594e-01 3.309707144485515284e-01 4.315687014460974913e-01 6.641934530697197747e-01 2.172886798352815507e-01 4.807480925564590057e-01 5.006795397998469177e-01 5.818100901154411586e-01 2.107716091585690732e-01 6.606606051140029301e-01 9.317629042790995797e-01 9.840326342340242061e-01 5.752000964817773898e-01
+9.843444595454536872e-01 1.339523968066913540e-02 6.082172659959028671e-03 7.828244785439336662e-01 5.069653703872761819e-01 2.804896494365415327e-01 2.112385836660957139e-02 6.016479440778699228e-02 7.457477935084961818e-01 3.445503949245375397e-01 4.063494277166557200e-01 8.630275274433116817e-01 5.948396018456146850e-01 1.400867933474212457e-01 6.997522422654076646e-01 5.766519767930851081e-01 5.419976500582250889e-01 7.474121304089603735e-01 2.951600193008566686e-01 7.980170422334191827e-01 1.829036799578199757e-01 6.317636496261300749e-01 2.812612231140887431e-02 5.464747656105657381e-01 3.909873503320924204e-01 4.940850205957293406e-01 8.157850130814222611e-01 5.111092739445756150e-01 9.336823640685747439e-01 7.157105167170837445e-01 7.778989455994214097e-01 1.398722535910470466e-01 5.642653936300449091e-01 3.218717164845980028e-01 9.717427501967056402e-01 3.665791984428700134e-01 3.874321311211759156e-02 9.437600858738082188e-02 5.679526822961932231e-01 5.141385991358327079e-01 7.497840799582222715e-02 5.736515309094968318e-01 1.928132849879083954e-01 6.924244068001785823e-01 1.748389677952593146e-01 4.469577663506929532e-01 1.738527450963387455e-01 7.195287763517190793e-01 8.861150811892871682e-01 1.058443750714600506e-01 1.941789362229970894e-01 9.188374820700584422e-02 7.706736301449305104e-01 6.718642548609364828e-01 5.981029087121966237e-01 4.832880127232569434e-01 3.073688779938709148e-01 5.156312334804930009e-01 1.777418420119527553e-01 8.885462205165685079e-01 4.486254681289014723e-02 1.345398129556140132e-01 7.467627984379916484e-01 4.384565546058830643e-01 7.217750080760946263e-01 3.949550352625393890e-01 4.307950907642028593e-01 6.087680934849041270e-01 3.294516167246774874e-01 1.316682090209408962e-01 1.824857738754404046e-01 5.332379826483617524e-01 3.567136182864261151e-02 1.976220743086236631e-01 5.849349042822560296e-01 1.133174406357483344e-01 7.711522754393199675e-01 8.557306786807005183e-01 3.038353471344266143e-01 4.422747047768413875e-01 2.537160404215925702e-01 2.372714099723788328e-01 5.906462765375103396e-01 4.849909323133470007e-01 2.692576210504484813e-01 4.540849506602829821e-01 9.664935719107857759e-01 2.044371576459835804e-01 4.505417469690352616e-01 7.110722322201217249e-01 3.051357995214963870e-01 8.978937034341526457e-01 6.090501112506481185e-01 6.595415779178889215e-01 6.565426836745864581e-01 6.565608489824376059e-01 2.679102664248229626e-01 3.819533138204529443e-01 6.609794961162380744e-01 2.289558446859882856e-01
+9.274935298374649140e-01 1.174096651033715855e-01 3.030761852629033637e-01 1.605508209527917174e-01 9.601854834873225775e-01 4.341959513718630648e-01 6.320768160802121560e-01 4.213429090614078110e-01 3.695553969042019160e-01 5.965457437116089556e-01 3.520335041155040479e-01 7.702703502247409961e-01 8.571112772962534709e-01 7.904077282532658844e-01 2.247339318352784554e-01 6.823720204503556097e-01 5.883435710582129996e-02 6.786037033312407596e-01 9.721137137641507886e-01 2.042576970668320557e-01 8.394085754806240862e-01 7.433082729552867862e-01 4.072614159870893147e-01 7.451483066617257123e-01 1.699472962789440045e-01 1.753052015584344314e-01 2.255269204788400428e-01 7.794755643807432799e-01 8.407732260470973662e-01 9.301182862857163558e-01 3.701995309382508648e-01 4.481909027604019657e-01 1.261889085033987001e-01 5.600591735875248833e-01 8.244692493969552061e-01 8.969188061645969601e-01 4.802217973423368313e-01 3.556164122713412201e-02 3.393317823164623270e-01 2.491242957582864292e-01 9.863253789366602797e-01 5.585415885291432625e-01 3.702350606362231344e-01 6.766101432620400535e-01 6.999259389475386284e-01 6.676108316872160220e-01 7.870681827507105544e-01 8.746765411259082024e-01 9.125268371282718727e-01 6.638849997061806452e-01 3.253268113800632522e-01 7.968625619248901337e-01 7.584122525443606211e-01 9.028886850768532701e-01 5.381622293189292083e-02 8.097562873320752752e-01 7.092942088208666895e-01 9.915538877968065323e-01 4.319294903327922652e-01 4.307127933969153721e-01 2.768507739641907772e-01 8.076253078288621046e-01 2.569233696442670967e-01 7.595893829724666979e-01 5.768081727897018673e-01 2.537536777625452045e-01 8.874419624636734616e-01 5.091705681832693342e-01 4.811826624992353585e-01 2.794462461940371290e-01 3.846927898276129021e-01 5.129562951959991679e-01 8.515004062224775794e-01 7.103144978683579858e-01 9.526388607201888847e-01 2.367905569592337889e-01 9.137336039323161740e-01 5.722969943101696710e-02 2.019723935481291255e-01 3.098764675203513619e-02 1.121146613918624357e-01 9.937693067724532314e-01 8.476717958861412772e-02 2.059652110343795917e-01 2.139791918759540446e-01 9.137860316709250919e-01 9.530862653366889425e-03 2.027843281683039400e-03 2.506229951837134484e-01 6.244523528392044165e-01 5.523937894075592325e-01 3.712168074031840792e-01 4.218847794299319665e-01 4.827576239387890711e-01 5.244634168840578425e-01 5.182241092381567604e-01 3.308639956263292881e-03 9.370528021570383448e-01 4.694554875029453012e-01 4.950447554541728135e-01
+1.525818111800841814e-01 4.708012184002630107e-02 3.899035965341954846e-01 3.928304521031263929e-01 5.602286661727436945e-01 9.738256658043862313e-01 9.404465779766183475e-01 5.750862754958349088e-01 9.547546956257608741e-01 2.750275291553152535e-01 1.682773435862793265e-01 5.865928471016079726e-04 8.543378154943809255e-01 3.547649971465383079e-01 5.058056647397523031e-01 9.116332486700751137e-02 7.534666421106954726e-01 3.082429494433007733e-01 4.527145111847344916e-01 5.456680635225539255e-01 2.504131242494785914e-01 2.509240770568589296e-01 3.949236999582302898e-01 8.782959620323271821e-03 2.474641132111736752e-01 8.229417958971670943e-01 3.444225768479134420e-01 4.000027489436257522e-01 4.247741954177396417e-01 2.497745404169693373e-02 4.325768602588443423e-01 7.336592463477830117e-01 7.667663267650381975e-02 4.179022553581047683e-01 8.745172741480690126e-01 9.417705509525042817e-02 2.807522782799587446e-01 8.212710101351362590e-01 2.211181944001613386e-01 4.319929503523877168e-01 1.858636923768219873e-02 6.737037795085246694e-01 7.997187114913413275e-01 2.976552505976116647e-01 3.272347030789168887e-01 5.550935453236346406e-01 9.224109746648162522e-01 3.192827922106745708e-01 3.500098324549234530e-01 7.821988386980260888e-01 4.478417135239194380e-01 1.580956175222456572e-01 5.300807813550156844e-01 5.806154798468634581e-01 9.456842911054151868e-01 7.688127895655872956e-01 8.456527833650537840e-01 1.784229089865225770e-01 8.114517450321339087e-01 8.062506298824222428e-01 2.113482500442499523e-01 2.629226789210241666e-01 6.478686221690072022e-01 6.006672861605766300e-02 7.013679843242253131e-01 8.784753961212666828e-01 3.487138165323044880e-02 4.928426758517070461e-01 5.976224683315064512e-01 7.629063997052759616e-01 2.761721278953045422e-01 7.240740503283805696e-01 6.131065729985127888e-01 1.630885615792579957e-01 8.473783868551159060e-01 8.347614542399306448e-02 8.137265626844719657e-01 8.512508664918938539e-01 2.777097816703766320e-01 1.729154355214796990e-01 2.203382750835449766e-01 6.134780912629795857e-01 3.524352564238901753e-01 5.370314860129862256e-01 8.013986113284543578e-02 2.555842138998117852e-01 6.553915758947851389e-01 9.679125599178584061e-01 2.549566319678178150e-01 4.008180804370896633e-01 9.145789951670967310e-01 2.787926039163850511e-01 8.599455912576436933e-02 9.637558000691170967e-02 8.274101203974880692e-01 1.803747268179315411e-01 2.175735407836230095e-01 7.825994939720237742e-01 7.928519890958951599e-02 8.707949373106749213e-01
+6.398420210047787160e-01 5.739624494012524059e-01 3.359672805578653998e-01 1.130399363175038641e-02 3.349439685346782269e-01 2.315484030880912147e-01 4.575228302577399875e-01 1.149494135594463229e-01 2.888244352925943836e-01 3.625470995156252485e-01 3.795973190611611203e-01 6.567047810450010736e-01 1.484039742710284715e-01 9.273251916560719676e-01 4.334256728976307871e-01 6.734771102219323513e-01 9.125080197222198430e-01 4.974393931097168542e-01 8.301481563280355136e-01 4.526450714147856047e-01 2.414236092573898151e-01 8.070129698367667359e-02 7.260400697427102923e-01 1.396509691839398215e-02 2.496450588391967429e-01 4.335741205447194435e-01 3.089314419194891803e-01 9.543503534526003307e-01 5.457977547458532364e-01 3.139663643587058406e-01 5.034762326753475792e-01 4.756788330475764104e-01 6.849334942793482428e-01 3.880666613022351052e-01 6.483446580176778218e-01 5.217503801099343530e-01 5.371145824070304720e-01 3.121260159429154468e-01 8.314121854062171968e-01 4.538695969561833410e-01 8.598896961203845724e-01 9.961993522734106099e-01 8.865717795946430613e-01 7.828987966783660379e-01 3.412415531643435695e-01 7.421170530151157685e-01 4.484104178639959359e-01 6.793217012099640462e-01 3.756179958191659951e-01 7.821287098222597933e-01 6.227726265188193722e-02 8.552983413221663112e-01 4.824668768009222619e-01 2.241531065858231031e-01 4.939536577599041856e-01 5.129566641128722182e-01 1.057984177672518511e-01 9.541452507300716146e-01 3.396646181755047511e-01 7.452588103611947901e-01 5.315559265659929311e-01 5.493475179850665358e-01 5.214824278139198466e-01 5.150075718147916204e-01 1.196075368500321146e-01 9.035665331176232495e-01 7.522653903639873185e-01 6.638708679914825384e-01 5.584174553800479446e-01 5.015819402508836511e-01 5.507698483308445248e-01 5.978677577011723976e-01 8.450418028759657529e-01 3.266677322748618995e-01 1.321610045897971819e-01 2.394354042746985600e-01 2.723972163557076831e-01 5.523301747352814539e-01 5.518043850608547185e-01 5.283968096837132755e-02 8.192733312104071297e-01 2.277106024970321219e-02 1.414998099027269252e-01 6.517281615256080851e-01 1.811694734825117781e-01 9.472370614713256920e-01 5.454497319021770485e-01 1.364119913158231556e-01 8.446142008509562871e-01 7.671725984742419069e-01 2.461161648406858804e-01 1.421724627107351369e-01 6.290652581179481118e-01 7.094144689448004248e-01 4.419656923472803367e-02 6.614741876652251440e-01 8.712193265403500586e-02 4.734931280852430202e-01 5.382037050480286133e-01 1.396459758005891283e-01
+9.709329844415439670e-01 8.998575745276288229e-01 9.151313462895852568e-01 6.920489275523904471e-01 2.892231405199537919e-01 6.750679746268205550e-01 5.515642485826798280e-01 1.065253097812824956e-01 2.957026803465776510e-01 8.937347659632134400e-01 9.800016515925590310e-01 7.745900896182087436e-01 1.570977683146633774e-01 1.482028765821026273e-01 2.111147779712029271e-01 9.683759902485811200e-01 6.550951580826911425e-01 8.728324682592377703e-01 5.044803166579884257e-01 8.285704754811143991e-01 1.693574499337324735e-02 6.032669995180495182e-02 1.687026879086964692e-01 7.701554026145973619e-01 1.429888016593102718e-01 5.881172815379975827e-02 9.704206919487038396e-01 4.450807650730836951e-01 1.597445784258376689e-01 9.849229394397314152e-01 4.220083573536804744e-01 9.357693600374825671e-01 2.313199262338369033e-01 4.556443403861323294e-01 2.590791012828855822e-01 8.438664994487065085e-01 5.519045677502344427e-01 4.702170125676508050e-01 6.814723205638187897e-01 7.418295483665861001e-01 3.684921032028853904e-01 1.501895844844561845e-01 4.214513377519605308e-01 8.600279963652578408e-01 6.625616611189292238e-01 5.200151456470966105e-01 7.881072743086801058e-01 2.771703241081423519e-01 9.034135930616548071e-01 5.848441705791300738e-01 8.341698181274771473e-01 1.966638677318299777e-01 7.059747894371543042e-01 7.013854316067694716e-01 1.828430942760242983e-01 4.745548949934464966e-01 6.306422394641082452e-01 7.760751707194470939e-01 9.813187212598396547e-01 2.293595795266353266e-01 7.749261876107090830e-01 2.384106107787011819e-01 9.721209688979495223e-01 2.715569353686980714e-01 2.915573577694993146e-01 3.579601509630966349e-01 3.085697512342830962e-01 4.070219981627976047e-01 1.989632411372218579e-01 7.330003339460906542e-01 5.397259604481572381e-01 6.931009942216573849e-01 1.385457419653816080e-01 1.140339999976658358e-01 3.980752590866034613e-01 9.471822621683767540e-01 5.476643721405823895e-01 6.824131903515884279e-02 5.844099130744569992e-01 2.346881692012994236e-01 9.436439228519653000e-01 4.855518260479008141e-02 8.157036123302675579e-01 1.169761256455048581e-01 5.532962903488753970e-01 1.100965596251435308e-01 9.789490602992410029e-01 8.433487462016989733e-01 1.272410782852178013e-01 2.885715258680641160e-01 7.990943955388217779e-01 1.565305358979097727e-01 9.160846960406943129e-02 8.521842244411678147e-01 4.474243106736998099e-01 3.843945818845087015e-01 4.710645906071458944e-01 2.398348154123419729e-01 6.435351435258193087e-01 7.656897921129046658e-01
+4.894328120406804539e-01 7.881019629214267574e-01 6.974585354155089512e-01 2.023858939857701156e-01 1.660984914264745926e-01 4.854517801734643534e-01 2.789848572630315715e-01 2.311636522410289718e-01 9.821076233980715608e-01 1.220641257408076052e-01 2.614036146663852866e-01 7.657560715165320220e-01 3.968360577545695378e-01 4.566023622802184434e-02 1.049701948619241598e-02 9.281162949127452766e-01 4.490137965769909201e-01 2.095846458383606725e-01 9.195504656719085679e-01 9.683515436855471004e-01 9.800174878114910060e-01 5.517610861380117804e-01 6.711570559348770670e-01 5.125258050287277989e-01 2.105581493613526423e-01 8.281813206544574868e-01 4.964783994807770995e-01 7.284974208756571645e-01 1.320629592816270348e-01 6.652194518096135045e-01 9.430156297917950958e-01 7.477263137894260003e-01 2.054087806450300979e-01 4.248209124837907247e-01 7.657518666018259257e-02 1.031614100713345028e-01 4.122242287567021712e-01 4.919658859336810686e-01 3.752650167259050651e-01 4.175771429986683270e-01 6.131376293448997927e-01 5.463797405837259591e-01 3.119918548921774004e-01 6.331762507678504459e-01 5.484632429281035559e-01 6.815448032785871302e-01 8.065695507425107991e-02 8.720129122297424207e-01 8.318188557125294480e-03 2.199323537180564170e-02 8.933872719887463454e-01 1.953120287872067706e-02 2.478721941404590234e-01 5.994061179859005994e-01 6.588362611693047155e-01 6.332808851020984564e-01 3.823849348043323326e-01 5.111091324899629251e-01 7.034808459110406531e-01 4.347681568463539481e-01 4.316973576672314961e-01 9.620411080123215664e-01 6.247837467655984467e-01 8.196961678222113301e-01 5.574601810887074294e-01 8.800635018469276094e-01 8.772255241161972528e-01 5.075275933138404527e-01 8.022583187266906224e-01 2.320670802521890286e-01 1.165626629103270195e-01 4.623759662685936744e-01 7.938327000737943617e-02 7.986374689793115378e-01 6.728842751465858862e-01 8.133909095059230765e-01 1.202639390769081329e-01 1.052937257108800262e-01 8.717600467040409473e-02 2.163819956545051104e-01 6.596483385763984852e-01 1.202843170392309258e-02 1.538789195854695091e-01 3.120247727263308901e-01 3.408168327248596308e-01 3.241861797851740556e-01 3.637074533655986208e-01 1.533669345890729119e-01 4.455921334699539660e-01 5.619140093874478437e-01 1.881731359879111887e-01 9.416670800570559052e-01 1.740018593664415247e-01 7.030242331869680505e-01 5.922055553954849172e-01 9.326211623391688077e-01 6.608322881013140027e-01 7.009721551241574478e-01 1.079126054675583202e-01 6.158176671761947940e-01
+5.185079639625639336e-01 9.613742991518259284e-01 5.555312825626229634e-01 2.647628827924735084e-01 6.003697207460141350e-01 5.392112376769145898e-01 6.781186965667050925e-01 9.908971748181496508e-01 4.124155872095397468e-01 9.814941401724619485e-02 2.684237785531295994e-02 1.774652505962848181e-01 1.707589529595294753e-01 4.640932098465534450e-01 2.882179883914587348e-01 7.276822905806898945e-01 6.145789546745269449e-01 1.100959863917608805e-01 6.798859723042820491e-01 9.096984032948918220e-01 3.971368455178179158e-01 2.959494950971321980e-01 3.742088799298171065e-02 1.960739526210202310e-01 7.536102695342027369e-01 6.680915510628401277e-01 4.136507204312135366e-01 3.613996339406737590e-01 3.605422038261204554e-01 7.098503555159476619e-01 8.093719147087541366e-01 6.344097009128880638e-01 3.990082448083617228e-01 2.805918009906902544e-01 7.078488167363675698e-01 9.969917259866583059e-01 9.442054998992396309e-01 1.329075240769165278e-01 6.810681350588387861e-02 8.503491437913293094e-01 8.347117439165431252e-01 2.381858201903953587e-01 7.884260706938626129e-01 7.109907917419661105e-01 6.390916681983604963e-02 6.174365227062991179e-01 5.085733343630816083e-01 1.716846139694149231e-01 9.065664924270055991e-02 5.625330757196970177e-01 3.539663480209681579e-01 8.937139525947165319e-01 3.981380511900556307e-02 7.403597927449541150e-01 3.803872284089604427e-02 6.729519695709765825e-01 5.306080908840085097e-01 2.091237680402112664e-01 5.902903662907804661e-01 2.094778612095482551e-01 7.323447855684165342e-01 3.644574495843493356e-01 2.006215478057034041e-01 3.737617545555030896e-01 5.253471759602216240e-01 4.287889547869583318e-01 7.086098806190446187e-01 4.510792335515292351e-01 6.383187180169215269e-01 8.779355722397681472e-01 4.221338898667141848e-01 6.375840144651815367e-01 8.683057298299173832e-01 6.093730356952498095e-01 9.297141161056151626e-01 7.770838342807246946e-01 6.549661287008456956e-02 2.835060738158660110e-01 4.474138867374952699e-01 8.530336387421445510e-01 3.160209657891883683e-01 8.301538680518486535e-01 6.646903097549101691e-01 7.187130118106234145e-01 1.651862041735395747e-01 9.578252676762609719e-01 6.490273812885494209e-02 9.777273484666341163e-01 8.930729829254262508e-01 9.851054752118463265e-01 4.094323402286751401e-01 1.139176240124337713e-01 7.612865863899589414e-01 2.266379302491570158e-01 6.998882496157835531e-01 9.945043379099228753e-01 7.111578056749194854e-01 7.806190603886183910e-01 3.410170920712443099e-01 9.446084168886822452e-01
+5.015172758330755931e-01 5.569527971282052237e-01 1.122406928736449094e-01 8.960352822124777461e-01 6.049568585854003810e-02 1.202196001338627918e-01 1.870314295763603196e-01 9.017590029396971296e-01 3.597904628087450485e-01 2.130941062746317671e-01 2.556281834629479111e-01 5.123669364829196438e-01 4.754061129282013409e-01 9.764470380372083369e-01 8.038663983900646848e-01 6.960491266420890666e-01 2.940135977911654264e-01 2.857282759910040326e-03 4.599343225832352999e-02 5.597554495210212977e-01 7.445266674304001908e-01 3.387528030535971180e-01 6.429542922125383031e-01 2.123331785532429627e-01 5.302332654117811739e-01 7.262555377662539557e-01 3.982425859900724507e-01 3.243388301740235402e-01 6.191064123738921898e-01 8.988047781373914580e-01 7.819700328765150088e-01 7.664269102804815992e-01 6.734095355422575757e-03 2.904762329148526945e-01 5.097537644843168625e-01 9.524734606001823423e-01 4.812869576591960463e-01 6.236868013640477493e-01 1.459170943214320726e-01 9.874505139403206844e-01 7.561708982837871407e-01 3.798591332432484924e-01 6.056633451375117438e-01 7.935708170258731764e-01 1.458141583518740569e-01 7.082511296391911237e-01 1.098798009731616343e-02 3.655618484905173160e-01 9.551862303858617009e-01 8.148959351152762487e-02 4.739306219219985294e-02 7.963357515359494876e-01 6.208332695202813944e-01 3.884182264923189409e-01 4.589167647950288531e-01 6.496652974138312775e-01 2.467528128074852889e-01 5.309593064844935206e-01 5.364606369543487574e-01 2.421352989851309756e-01 3.776834556696828660e-02 1.564861233558080267e-01 5.197231021782636740e-01 8.725375120634637494e-01 2.441225493455024820e-01 2.320363366041028330e-01 5.026358683423555185e-01 7.035766000474735771e-01 8.347805591467084563e-01 2.303229841813967393e-01 6.908373419683054850e-01 2.646662377366995056e-01 1.259467197942290007e-01 9.372770922994989595e-01 6.674216272867254940e-01 1.027944489143072238e-01 5.686267290346079806e-01 3.948222804451942958e-01 4.689706944496729868e-01 4.446117700449114807e-02 6.817992275557515081e-01 9.084821829413957106e-01 9.184021015315092518e-01 3.045815734169987632e-01 2.204958624923980537e-03 7.542672057172502553e-01 9.460844786545006269e-01 3.373139094575949848e-02 9.059565314915285494e-01 9.938525461318854504e-01 2.542072661725306437e-01 9.685734112479216229e-02 8.223629541824816203e-01 1.057429056898460118e-01 8.080679390260248063e-01 5.823014244609205914e-01 6.413551528031806725e-01 1.787341975438894170e-01 1.250471413912357388e-01 8.390281297596062782e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt
new file mode 100644
index 00000000..86de3c75
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt
@@ -0,0 +1 @@
+   5.3851648e-01   5.0990195e-01   6.4807407e-01   1.4142136e-01   6.1644140e-01   5.1961524e-01   1.7320508e-01   9.2195445e-01   4.6904158e-01   3.7416574e-01   3.7416574e-01   5.9160798e-01   9.9498744e-01   8.8317609e-01   1.1045361e+00   5.4772256e-01   1.0000000e-01   7.4161985e-01   3.3166248e-01   4.3588989e-01   3.0000000e-01   6.4807407e-01   4.6904158e-01   5.9160798e-01   5.4772256e-01   3.1622777e-01   1.4142136e-01   1.4142136e-01   5.3851648e-01   5.3851648e-01   3.8729833e-01   6.2449980e-01   8.0622577e-01   4.6904158e-01   3.7416574e-01   4.1231056e-01   4.6904158e-01   8.6602540e-01   1.4142136e-01   1.7320508e-01   1.3490738e+00   7.6811457e-01   4.5825757e-01   6.1644140e-01   5.9160798e-01   3.6055513e-01   5.8309519e-01   3.0000000e-01   2.2360680e-01   4.0037482e+00   3.6166283e+00   4.1641326e+00   3.0935417e+00   3.7920970e+00   3.4161382e+00   3.7854986e+00   2.3452079e+00   3.7496667e+00   2.8879058e+00   2.7037012e+00   3.2280025e+00   3.1464265e+00   3.7000000e+00   2.5806976e+00   3.6276714e+00   3.4351128e+00   3.0099834e+00   3.7682887e+00   2.8827071e+00   3.8535698e+00   3.0757113e+00   4.0472213e+00   3.6578682e+00   3.4161382e+00   3.5972211e+00   4.0472213e+00   4.2449971e+00   3.5312887e+00   2.4939928e+00   2.8178006e+00   2.7018512e+00   2.8948230e+00   4.1352146e+00   3.4117444e+00   3.5199432e+00   3.9115214e+00   3.6180105e+00   3.0000000e+00   3.0215890e+00   3.3120990e+00   3.5958309e+00   3.0099834e+00   2.3874673e+00   3.1527766e+00   3.0740852e+00   3.1256999e+00   3.3451457e+00   2.0904545e+00   3.0577770e+00   5.2848841e+00   4.2083251e+00   5.3018865e+00   4.6904158e+00   5.0566788e+00   6.0950800e+00   3.5916570e+00   5.6364883e+00   5.0477718e+00   5.6391489e+00   4.3566042e+00   4.5199558e+00   4.8538644e+00   4.1904654e+00   4.4170126e+00   4.6260134e+00   4.6454279e+00   6.2401923e+00   6.4984614e+00   4.1412558e+00   5.1215232e+00   4.0286474e+00   6.2112801e+00   4.1097445e+00   4.9699095e+00   5.3122500e+00   3.9774364e+00   4.0074930e+00   4.8404545e+00   5.0970580e+00   5.5461698e+00   6.0141500e+00   4.8805737e+00   4.1605288e+00   4.5705580e+00   5.7887823e+00   4.8918299e+00   4.6065171e+00   3.8961519e+00   4.7968740e+00   5.0199602e+00   4.6368092e+00   4.2083251e+00   5.2573758e+00   5.1361464e+00   4.6540305e+00   4.2766810e+00   4.4598206e+00   4.6508064e+00   4.1400483e+00   3.0000000e-01   3.3166248e-01   6.0827625e-01   1.0908712e+00   5.0990195e-01   4.2426407e-01   5.0990195e-01   1.7320508e-01   8.6602540e-01   4.5825757e-01   1.4142136e-01   6.7823300e-01   1.3601471e+00   1.6278821e+00   1.0535654e+00   5.4772256e-01   1.1747340e+00   8.3666003e-01   7.0710678e-01   7.6157731e-01   7.8102497e-01   5.5677644e-01   6.4807407e-01   2.2360680e-01   5.0000000e-01   5.9160798e-01   5.0000000e-01   3.4641016e-01   2.4494897e-01   6.7823300e-01   1.1489125e+00   1.3416408e+00   1.7320508e-01   3.0000000e-01   7.8740079e-01   1.7320508e-01   5.0990195e-01   4.5825757e-01   5.2915026e-01   8.1853528e-01   5.4772256e-01   6.7823300e-01   9.8488578e-01   1.4142136e-01   8.4852814e-01   3.6055513e-01   8.1240384e-01   3.1622777e-01   4.0963398e+00   3.6864617e+00   4.2367440e+00   2.9698485e+00   3.8118237e+00   3.3911650e+00   3.8600518e+00   2.1470911e+00   3.7881394e+00   2.8053520e+00   2.4617067e+00   3.2449961e+00   3.0413813e+00   3.7121422e+00   2.5592968e+00   3.7000000e+00   3.4336569e+00   2.9715316e+00   3.6918830e+00   2.7928480e+00   3.8935845e+00   3.0740852e+00   4.0187063e+00   3.6565011e+00   3.4467376e+00   3.6510273e+00   4.0804412e+00   4.2953463e+00   3.5383612e+00   2.4186773e+00   2.7000000e+00   2.5787594e+00   2.8548205e+00   4.1170378e+00   3.3985291e+00   3.5972211e+00   3.9786933e+00   3.5580894e+00   2.9983329e+00   2.9291637e+00   3.2434549e+00   3.6221541e+00   2.9546573e+00   2.1794495e+00   3.1032241e+00   3.0789609e+00   3.1144823e+00   3.3645208e+00   1.9131126e+00   3.0298515e+00   5.3385391e+00   4.1809090e+00   5.3572381e+00   4.7085029e+00   5.0911688e+00   6.1595454e+00   3.4799425e+00   5.6868269e+00   5.0408333e+00   5.7471732e+00   4.4192760e+00   4.5210618e+00   4.9020404e+00   4.1340053e+00   4.4022721e+00   4.6808119e+00   4.6829478e+00   6.3694584e+00   6.5314623e+00   4.0620192e+00   5.1903757e+00   4.0024992e+00   6.2617889e+00   4.1060930e+00   5.0428167e+00   5.3898052e+00   3.9812058e+00   4.0311289e+00   4.8518038e+00   5.1584882e+00   5.5919585e+00   6.1546730e+00   4.8918299e+00   4.1689327e+00   4.5475268e+00   5.8600341e+00   4.9598387e+00   4.6508064e+00   3.9153544e+00   4.8600412e+00   5.0724747e+00   4.7021272e+00   4.1809090e+00   5.3207142e+00   5.2067264e+00   4.7000000e+00   4.2497059e+00   4.4988888e+00   4.7180504e+00   4.1533119e+00   2.4494897e-01   5.0990195e-01   1.0862780e+00   2.6457513e-01   4.1231056e-01   4.3588989e-01   3.1622777e-01   8.8317609e-01   3.7416574e-01   2.6457513e-01   5.0000000e-01   1.3638182e+00   1.5874508e+00   1.0099505e+00   5.1961524e-01   1.2369317e+00   7.5498344e-01   8.3066239e-01   7.0000000e-01   5.0990195e-01   6.4807407e-01   6.4031242e-01   4.6904158e-01   5.0990195e-01   6.1644140e-01   5.4772256e-01   3.0000000e-01   3.3166248e-01   7.8102497e-01   1.0535654e+00   1.2845233e+00   3.1622777e-01   3.1622777e-01   8.5440037e-01   3.1622777e-01   3.6055513e-01   4.8989795e-01   4.3588989e-01   9.2736185e-01   3.0000000e-01   6.5574385e-01   9.5916630e-01   2.6457513e-01   7.8102497e-01   1.4142136e-01   8.0622577e-01   3.3166248e-01   4.2766810e+00   3.8496753e+00   4.4158804e+00   3.1543621e+00   3.9974992e+00   3.5510562e+00   4.0112342e+00   2.3065125e+00   3.9749214e+00   2.9495762e+00   2.6476405e+00   3.4029399e+00   3.2588341e+00   3.8794329e+00   2.7202941e+00   3.8807216e+00   3.5749126e+00   3.1527766e+00   3.8961519e+00   2.9782545e+00   4.0311289e+00   3.2588341e+00   4.2071368e+00   3.8314488e+00   3.6318040e+00   3.8340579e+00   4.2731721e+00   4.4698993e+00   3.7027017e+00   2.6153394e+00   2.8879058e+00   2.7712813e+00   3.0364453e+00   4.2825226e+00   3.5298725e+00   3.7322915e+00   4.1545156e+00   3.7669616e+00   3.1464265e+00   3.1032241e+00   3.4073450e+00   3.7854986e+00   3.1400637e+00   2.3537205e+00   3.2680269e+00   3.2326460e+00   3.2726136e+00   3.5425979e+00   2.0856654e+00   3.1953091e+00   5.4726593e+00   4.3347434e+00   5.5290144e+00   4.8682646e+00   5.2469038e+00   6.3364028e+00   3.6083237e+00   5.8660038e+00   5.2249402e+00   5.8940648e+00   4.5738387e+00   4.6936127e+00   5.0695167e+00   4.2918527e+00   4.5442271e+00   4.8270074e+00   4.8456166e+00   6.5207362e+00   6.7178866e+00   4.2508823e+00   5.3488316e+00   4.1436699e+00   6.4467046e+00   4.2813549e+00   5.1942276e+00   5.5587768e+00   4.1496988e+00   4.1856899e+00   5.0149776e+00   5.3385391e+00   5.7775427e+00   6.3126856e+00   5.0537115e+00   4.3416587e+00   4.7169906e+00   6.0406953e+00   5.0921508e+00   4.8062459e+00   4.0669399e+00   5.0269275e+00   5.2287666e+00   4.8682646e+00   4.3347434e+00   5.4753995e+00   5.3535035e+00   4.8641546e+00   4.4305756e+00   4.6615448e+00   4.8487112e+00   4.2988371e+00   6.4807407e-01   1.1661904e+00   3.3166248e-01   5.0000000e-01   3.0000000e-01   3.1622777e-01   1.0000000e+00   3.7416574e-01   2.6457513e-01   5.1961524e-01   1.5297059e+00   1.7146428e+00   1.1661904e+00   6.5574385e-01   1.3228757e+00   8.6602540e-01   8.7749644e-01   8.0622577e-01   7.0710678e-01   6.4807407e-01   5.3851648e-01   4.2426407e-01   5.4772256e-01   7.2111026e-01   6.7823300e-01   1.7320508e-01   2.2360680e-01   8.7749644e-01   1.1704700e+00   1.4247807e+00   3.1622777e-01   5.0990195e-01   1.0049876e+00   3.1622777e-01   3.0000000e-01   5.8309519e-01   6.0827625e-01   8.3666003e-01   3.0000000e-01   7.0000000e-01   9.6953597e-01   2.6457513e-01   8.6602540e-01   1.4142136e-01   9.2195445e-01   4.5825757e-01   4.1773197e+00   3.7336309e+00   4.3058100e+00   2.9849623e+00   3.8729833e+00   3.3926391e+00   3.8897301e+00   2.1118712e+00   3.8548671e+00   2.7784888e+00   2.4515301e+00   3.2680269e+00   3.1080541e+00   3.7376463e+00   2.5806976e+00   3.7762415e+00   3.4205263e+00   3.0000000e+00   3.7496667e+00   2.8160256e+00   3.8923001e+00   3.1304952e+00   4.0620192e+00   3.6851052e+00   3.5114100e+00   3.7229021e+00   4.1545156e+00   4.3497126e+00   3.5623026e+00   2.4698178e+00   2.7202941e+00   2.6038433e+00   2.8913665e+00   4.1279535e+00   3.3674916e+00   3.6069378e+00   4.0422766e+00   3.6262929e+00   2.9966648e+00   2.9376862e+00   3.2357379e+00   3.6482873e+00   2.9899833e+00   2.1633308e+00   3.1080541e+00   3.0838288e+00   3.1224990e+00   3.4132096e+00   1.9157244e+00   3.0446675e+00   5.3357286e+00   4.1773197e+00   5.4064776e+00   4.7222876e+00   5.1097945e+00   6.2153037e+00   3.4205263e+00   5.7384667e+00   5.0813384e+00   5.7844619e+00   4.4519659e+00   4.5530210e+00   4.9457052e+00   4.1303753e+00   4.3965896e+00   4.7010637e+00   4.7095647e+00   6.4140471e+00   6.5901442e+00   4.0877867e+00   5.2297227e+00   3.9862263e+00   6.3229740e+00   4.1436699e+00   5.0695167e+00   5.4387499e+00   4.0124805e+00   4.0472213e+00   4.8733972e+00   5.2172790e+00   5.6550862e+00   6.2153037e+00   4.9132474e+00   4.1988094e+00   4.5552168e+00   5.9321160e+00   4.9628621e+00   4.6690470e+00   3.9268308e+00   4.9101935e+00   5.1048996e+00   4.7602521e+00   4.1773197e+00   5.3497664e+00   5.2325902e+00   4.7455242e+00   4.2883563e+00   4.5332108e+00   4.7191101e+00   4.1496988e+00   6.1644140e-01   4.5825757e-01   2.2360680e-01   9.2195445e-01   5.2915026e-01   4.2426407e-01   3.4641016e-01   6.4031242e-01   9.7467943e-01   9.1651514e-01   1.0862780e+00   5.4772256e-01   1.7320508e-01   7.9372539e-01   2.6457513e-01   5.3851648e-01   2.6457513e-01   5.6568542e-01   5.2915026e-01   5.7445626e-01   6.3245553e-01   3.4641016e-01   2.4494897e-01   2.8284271e-01   5.3851648e-01   5.7445626e-01   5.0000000e-01   5.5677644e-01   7.8102497e-01   5.2915026e-01   4.4721360e-01   5.1961524e-01   5.2915026e-01   8.5440037e-01   2.4494897e-01   1.7320508e-01   1.4000000e+00   7.2801099e-01   4.5825757e-01   5.8309519e-01   6.4031242e-01   3.0000000e-01   5.6568542e-01   3.3166248e-01   3.0000000e-01   4.0607881e+00   3.6633318e+00   4.2190046e+00   3.1480152e+00   3.8496753e+00   3.4568772e+00   3.8249183e+00   2.3874673e+00   3.8078866e+00   2.9223278e+00   2.7586228e+00   3.2710854e+00   3.2186954e+00   3.7456642e+00   2.6267851e+00   3.6851052e+00   3.4669872e+00   3.0626786e+00   3.8340579e+00   2.9376862e+00   3.8845849e+00   3.1336879e+00   4.1036569e+00   3.7067506e+00   3.4741906e+00   3.6551334e+00   4.1085277e+00   4.2965102e+00   3.5763109e+00   2.5573424e+00   2.8740216e+00   2.7604347e+00   2.9495762e+00   4.1785165e+00   3.4380227e+00   3.5510562e+00   3.9648455e+00   3.6864617e+00   3.0364453e+00   3.0708305e+00   3.3541020e+00   3.6400549e+00   3.0659419e+00   2.4372115e+00   3.1968735e+00   3.1128765e+00   3.1670175e+00   3.3985291e+00   2.1424285e+00   3.1032241e+00   5.3131911e+00   4.2461747e+00   5.3507009e+00   4.7307505e+00   5.0960769e+00   6.1457302e+00   3.6166283e+00   5.6877060e+00   5.1009803e+00   5.6762664e+00   4.3977267e+00   4.5683695e+00   4.9010203e+00   4.2308392e+00   4.4508426e+00   4.6626173e+00   4.6882833e+00   6.2785349e+00   6.5536250e+00   4.1964271e+00   5.1643005e+00   4.0607881e+00   6.2657801e+00   4.1605288e+00   5.0079936e+00   5.3591044e+00   4.0249224e+00   4.0472213e+00   4.8836462e+00   5.1497573e+00   5.6017854e+00   6.0572271e+00   4.9234135e+00   4.2083251e+00   4.6141088e+00   5.8438001e+00   4.9203658e+00   4.6454279e+00   3.9344631e+00   4.8445846e+00   5.0616203e+00   4.6861498e+00   4.2461747e+00   5.2971691e+00   5.1730069e+00   4.7010637e+00   4.3301270e+00   4.5044423e+00   4.6786750e+00   4.1737274e+00   9.9498744e-01   7.0000000e-01   1.4594520e+00   1.0099505e+00   3.4641016e-01   8.1240384e-01   1.1618950e+00   1.5716234e+00   6.7823300e-01   6.1644140e-01   4.0000000e-01   5.9160798e-01   3.3166248e-01   3.8729833e-01   5.3851648e-01   4.1231056e-01   1.1224972e+00   6.7823300e-01   8.3066239e-01   1.0099505e+00   6.4807407e-01   5.2915026e-01   6.4807407e-01   1.0148892e+00   1.0246951e+00   5.3851648e-01   4.5825757e-01   4.7958315e-01   1.0099505e+00   9.6953597e-01   6.0827625e-01   1.0099505e+00   1.4177447e+00   6.4807407e-01   7.0000000e-01   1.8814888e+00   1.3000000e+00   6.0827625e-01   3.7416574e-01   1.1269428e+00   3.8729833e-01   1.1224972e+00   3.6055513e-01   8.0622577e-01   3.6124784e+00   3.2465366e+00   3.7868192e+00   2.9444864e+00   3.4698703e+00   3.1543621e+00   3.4073450e+00   2.3280893e+00   3.4146742e+00   2.7055499e+00   2.7147744e+00   2.9189039e+00   2.9832868e+00   3.3896903e+00   2.3366643e+00   3.2588341e+00   3.1464265e+00   2.7784888e+00   3.5468296e+00   2.7073973e+00   3.5085610e+00   2.7928480e+00   3.7709415e+00   3.3674916e+00   3.0935417e+00   3.2465366e+00   3.7121422e+00   3.8832976e+00   3.2264532e+00   2.3194827e+00   2.6758176e+00   2.5729361e+00   2.6608269e+00   3.8470768e+00   3.1400637e+00   3.1448370e+00   3.5411862e+00   3.3867388e+00   2.7239677e+00   2.8407745e+00   3.1032241e+00   3.2726136e+00   2.7892651e+00   2.3748684e+00   2.9223278e+00   2.7910571e+00   2.8548205e+00   3.0347982e+00   2.0566964e+00   2.8053520e+00   4.9061186e+00   3.9255573e+00   4.9223978e+00   4.3566042e+00   4.6978719e+00   5.7052607e+00   3.4263683e+00   5.2659282e+00   4.7349762e+00   5.2057660e+00   3.9774364e+00   4.2011903e+00   4.4833024e+00   3.9370039e+00   4.1146081e+00   4.2497059e+00   4.2918527e+00   5.7913729e+00   6.1343296e+00   3.9179076e+00   4.7275787e+00   3.7483330e+00   5.8360946e+00   3.8013156e+00   4.5760245e+00   4.9173163e+00   3.6633318e+00   3.6742346e+00   4.5066617e+00   4.7222876e+00   5.1788030e+00   5.5596762e+00   4.5453273e+00   3.8457769e+00   4.2883563e+00   5.3916602e+00   4.5022217e+00   4.2473521e+00   3.5693137e+00   4.4124823e+00   4.6411206e+00   4.2497059e+00   3.9255573e+00   4.8682646e+00   4.7391982e+00   4.2848571e+00   3.9887341e+00   4.1024383e+00   4.2649736e+00   3.8183766e+00   4.2426407e-01   5.4772256e-01   4.7958315e-01   8.6602540e-01   3.0000000e-01   4.8989795e-01   6.1644140e-01   1.3601471e+00   1.4933185e+00   9.5393920e-01   5.0990195e-01   1.2083046e+00   6.4807407e-01   8.6023253e-01   6.0000000e-01   4.5825757e-01   6.2449980e-01   5.4772256e-01   6.0827625e-01   4.5825757e-01   6.2449980e-01   6.0827625e-01   3.1622777e-01   4.2426407e-01   8.1240384e-01   9.4868330e-01   1.2083046e+00   4.7958315e-01   5.0000000e-01   9.1651514e-01   4.7958315e-01   4.6904158e-01   5.1961524e-01   4.2426407e-01   1.1090537e+00   3.1622777e-01   5.4772256e-01   8.1853528e-01   4.4721360e-01   6.7823300e-01   2.2360680e-01   7.7459667e-01   4.2426407e-01   4.2308392e+00   3.7854986e+00   4.3669211e+00   3.1272992e+00   3.9560081e+00   3.4899857e+00   3.9344631e+00   2.2781571e+00   3.9357337e+00   2.8827071e+00   2.6495283e+00   3.3361655e+00   3.2634338e+00   3.8209946e+00   2.6627054e+00   3.8353618e+00   3.4942810e+00   3.1160873e+00   3.8794329e+00   2.9495762e+00   3.9420807e+00   3.2202484e+00   4.1701319e+00   3.7828561e+00   3.5916570e+00   3.7907783e+00   4.2391037e+00   4.4147480e+00   3.6414283e+00   2.5980762e+00   2.8653098e+00   2.7549955e+00   2.9983329e+00   4.2225585e+00   3.4423829e+00   3.6414283e+00   4.1024383e+00   3.7549967e+00   3.0740852e+00   3.0626786e+00   3.3555923e+00   3.7229021e+00   3.1064449e+00   2.3388031e+00   3.2140317e+00   3.1654384e+00   3.2093613e+00   3.4957117e+00   2.0639767e+00   3.1400637e+00   5.3758720e+00   4.2638011e+00   5.4680892e+00   4.7989582e+00   5.1710734e+00   6.2801274e+00   3.5312887e+00   5.8137767e+00   5.1797683e+00   5.8077534e+00   4.4977772e+00   4.6368092e+00   5.0049975e+00   4.2272923e+00   4.4609416e+00   4.7423623e+00   4.7780749e+00   6.4397205e+00   6.6708320e+00   4.2190046e+00   5.2744668e+00   4.0620192e+00   6.3992187e+00   4.2284749e+00   5.1137071e+00   5.4963624e+00   4.0902323e+00   4.1121770e+00   4.9477268e+00   5.2886671e+00   5.7314920e+00   6.2401923e+00   4.9849774e+00   4.2871902e+00   4.6626173e+00   5.9883220e+00   4.9939964e+00   4.7318073e+00   3.9912404e+00   4.9618545e+00   5.1526692e+00   4.8031240e+00   4.2638011e+00   5.3972215e+00   5.2678269e+00   4.7968740e+00   4.3840620e+00   4.5934736e+00   4.7497368e+00   4.2178193e+00   7.8740079e-01   3.3166248e-01   5.0000000e-01   2.2360680e-01   4.6904158e-01   9.0553851e-01   1.0440307e+00   1.2369317e+00   7.0000000e-01   2.0000000e-01   8.3666003e-01   4.2426407e-01   4.4721360e-01   3.7416574e-01   6.7082039e-01   3.8729833e-01   4.4721360e-01   4.1231056e-01   2.2360680e-01   2.2360680e-01   2.2360680e-01   3.7416574e-01   3.7416574e-01   4.4721360e-01   7.3484692e-01   9.4868330e-01   3.3166248e-01   3.6055513e-01   5.4772256e-01   3.3166248e-01   7.4833148e-01   1.0000000e-01   2.4494897e-01   1.2288206e+00   6.6332496e-01   4.2426407e-01   6.0827625e-01   4.6904158e-01   4.2426407e-01   4.5825757e-01   4.2426407e-01   1.4142136e-01   3.9648455e+00   3.5623026e+00   4.1170378e+00   2.9866369e+00   3.7296112e+00   3.3256578e+00   3.7282704e+00   2.2113344e+00   3.6918830e+00   2.7802878e+00   2.5690465e+00   3.1543621e+00   3.0545049e+00   3.6249138e+00   2.4959968e+00   3.5818989e+00   3.3481338e+00   2.9206164e+00   3.6837481e+00   2.7820855e+00   3.7815341e+00   3.0049958e+00   3.9686270e+00   3.5791060e+00   3.3555923e+00   3.5454196e+00   3.9912404e+00   4.1892720e+00   3.4554305e+00   2.4020824e+00   2.7110883e+00   2.5942244e+00   2.8089144e+00   4.0509258e+00   3.3181320e+00   3.4583233e+00   3.8613469e+00   3.5383612e+00   2.9137605e+00   2.9189039e+00   3.2093613e+00   3.5242020e+00   2.9206164e+00   2.2561028e+00   3.0577770e+00   2.9899833e+00   3.0397368e+00   3.2771939e+00   1.9697716e+00   2.9698485e+00   5.2191953e+00   4.1206796e+00   5.2478567e+00   4.6162756e+00   4.9899900e+00   6.0448325e+00   3.4741906e+00   5.5803226e+00   4.9749372e+00   5.5973208e+00   4.3000000e+00   4.4474712e+00   4.7968740e+00   4.0975602e+00   4.3358967e+00   4.5661800e+00   4.5793013e+00   6.2040309e+00   6.4420494e+00   4.0472213e+00   5.0695167e+00   3.9395431e+00   6.1587336e+00   4.0373258e+00   4.9142650e+00   5.2621288e+00   3.9051248e+00   3.9357337e+00   4.7686476e+00   5.0447993e+00   5.4927225e+00   5.9849812e+00   4.8093659e+00   4.0865633e+00   4.4833024e+00   5.7463032e+00   4.8311489e+00   4.5398238e+00   3.8223030e+00   4.7455242e+00   4.9628621e+00   4.5902070e+00   4.1206796e+00   5.2009614e+00   5.0823223e+00   4.5989129e+00   4.2000000e+00   4.3977267e+00   4.5891176e+00   4.0607881e+00   5.5677644e-01   1.2845233e+00   6.7082039e-01   4.2426407e-01   3.4641016e-01   1.7916473e+00   1.9974984e+00   1.4317821e+00   9.2736185e-01   1.6124515e+00   1.1489125e+00   1.1575837e+00   1.0862780e+00   8.3066239e-01   9.1104336e-01   8.1240384e-01   6.4031242e-01   8.3066239e-01   1.0049876e+00   9.4339811e-01   4.6904158e-01   4.8989795e-01   1.1401754e+00   1.4491377e+00   1.7029386e+00   5.5677644e-01   7.0000000e-01   1.2569805e+00   5.5677644e-01   1.4142136e-01   8.6602540e-01   8.6023253e-01   6.2449980e-01   3.1622777e-01   9.5916630e-01   1.2609520e+00   4.2426407e-01   1.1575837e+00   3.6055513e-01   1.2083046e+00   7.2111026e-01   4.3794977e+00   3.9230090e+00   4.4977772e+00   3.0886890e+00   4.0435133e+00   3.5383612e+00   4.0767634e+00   2.1794495e+00   4.0360872e+00   2.8930952e+00   2.4939928e+00   3.4336569e+00   3.2326460e+00   3.9012818e+00   2.7367864e+00   3.9711459e+00   3.5707142e+00   3.1511903e+00   3.8768544e+00   2.9427878e+00   4.0570926e+00   3.2969683e+00   4.2083251e+00   3.8457769e+00   3.6905284e+00   3.9102430e+00   4.3324358e+00   4.5287967e+00   3.7229021e+00   2.6134269e+00   2.8337255e+00   2.7184554e+00   3.0413813e+00   4.2720019e+00   3.5085610e+00   3.7920970e+00   4.2320208e+00   3.7656341e+00   3.1543621e+00   3.0561414e+00   3.3615473e+00   3.8183766e+00   3.1320920e+00   2.2293497e+00   3.2449961e+00   3.2465366e+00   3.2771939e+00   3.5860842e+00   2.0049938e+00   3.1937439e+00   5.4972721e+00   4.3104524e+00   5.5821143e+00   4.8795492e+00   5.2706736e+00   6.3953108e+00   3.5028560e+00   5.9143892e+00   5.2316345e+00   5.9757845e+00   4.6292548e+00   4.7053161e+00   5.1176166e+00   4.2485292e+00   4.5276926e+00   4.8692915e+00   4.8774994e+00   6.6174013e+00   6.7557383e+00   4.2071368e+00   5.4074023e+00   4.1158231e+00   6.4984614e+00   4.2965102e+00   5.2488094e+00   5.6258333e+00   4.1677332e+00   4.2083251e+00   5.0259327e+00   5.4009258e+00   5.8300943e+00   6.4265076e+00   5.0645829e+00   4.3588989e+00   4.6968074e+00   6.1155539e+00   5.1322510e+00   4.8383882e+00   4.0853396e+00   5.0892043e+00   5.2735187e+00   4.9386233e+00   4.3104524e+00   5.5235858e+00   5.4064776e+00   4.9142650e+00   4.4294469e+00   4.7010637e+00   4.8887626e+00   4.3023250e+00   7.8740079e-01   3.4641016e-01   1.7320508e-01   7.2801099e-01   1.3114877e+00   1.5556349e+00   1.0099505e+00   5.0000000e-01   1.1000000e+00   7.5498344e-01   6.2449980e-01   7.0000000e-01   7.7459667e-01   5.2915026e-01   5.1961524e-01   2.0000000e-01   4.4721360e-01   5.0990195e-01   4.4721360e-01   2.6457513e-01   1.7320508e-01   6.5574385e-01   1.0440307e+00   1.2609520e+00   0.0000000e+00   3.4641016e-01   7.5498344e-01   0.0000000e+00   5.5677644e-01   3.7416574e-01   5.0000000e-01   9.3808315e-01   5.5677644e-01   6.5574385e-01   8.8317609e-01   2.6457513e-01   7.4161985e-01   3.4641016e-01   7.2801099e-01   2.6457513e-01   4.0435133e+00   3.6359318e+00   4.1856899e+00   2.9478806e+00   3.7709415e+00   3.3421550e+00   3.8065733e+00   2.1307276e+00   3.7389838e+00   2.7748874e+00   2.4556058e+00   3.2031235e+00   3.0133038e+00   3.6619667e+00   2.5258662e+00   3.6523965e+00   3.3852622e+00   2.9223278e+00   3.6687873e+00   2.7586228e+00   3.8457769e+00   3.0364453e+00   3.9799497e+00   3.6027767e+00   3.4014703e+00   3.6055513e+00   4.0348482e+00   4.2497059e+00   3.4942810e+00   2.3874673e+00   2.6720778e+00   2.5495098e+00   2.8178006e+00   4.0718546e+00   3.3496268e+00   3.5425979e+00   3.9293765e+00   3.5284558e+00   2.9495762e+00   2.9000000e+00   3.1984371e+00   3.5707142e+00   2.9189039e+00   2.1679483e+00   3.0626786e+00   3.0248967e+00   3.0675723e+00   3.3181320e+00   1.9104973e+00   2.9883106e+00   5.2924474e+00   4.1436699e+00   5.3113087e+00   4.6583259e+00   5.0467812e+00   6.1081912e+00   3.4525353e+00   5.6329388e+00   4.9979996e+00   5.6973678e+00   4.3749286e+00   4.4821870e+00   4.8600412e+00   4.1060930e+00   4.3760713e+00   4.6411206e+00   4.6324939e+00   6.3071388e+00   6.4876806e+00   4.0286474e+00   5.1468437e+00   3.9686270e+00   6.2112801e+00   4.0706265e+00   4.9919936e+00   5.3329167e+00   3.9446166e+00   3.9874804e+00   4.8114447e+00   5.1029403e+00   5.5443665e+00   6.0917978e+00   4.8538644e+00   4.1194660e+00   4.4933284e+00   5.8180753e+00   4.9142650e+00   4.5978256e+00   3.8729833e+00   4.8176758e+00   5.0338852e+00   4.6690470e+00   4.1436699e+00   5.2744668e+00   5.1652686e+00   4.6669048e+00   4.2201896e+00   4.4575778e+00   4.6722586e+00   4.1060930e+00   6.7823300e-01   9.3273791e-01   1.3674794e+00   5.8309519e-01   7.8740079e-01   3.4641016e-01   3.8729833e-01   3.8729833e-01   3.3166248e-01   3.6055513e-01   3.6055513e-01   9.4868330e-01   6.1644140e-01   7.8102497e-01   8.1240384e-01   5.4772256e-01   2.8284271e-01   3.7416574e-01   8.6602540e-01   8.5440037e-01   3.6055513e-01   4.5825757e-01   5.1961524e-01   7.8740079e-01   7.0710678e-01   3.0000000e-01   7.8740079e-01   1.2369317e+00   4.2426407e-01   5.0000000e-01   1.6792856e+00   1.1357817e+00   6.0827625e-01   5.4772256e-01   9.3273791e-01   3.3166248e-01   9.4868330e-01   1.0000000e-01   5.7445626e-01   3.8065733e+00   3.4554305e+00   3.9824616e+00   3.0708305e+00   3.6496575e+00   3.3331667e+00   3.6290495e+00   2.4124676e+00   3.5916570e+00   2.8705400e+00   2.7730849e+00   3.1176915e+00   3.0822070e+00   3.5791060e+00   2.5099801e+00   3.4496377e+00   3.3496268e+00   2.9257478e+00   3.6851052e+00   2.8372522e+00   3.7349699e+00   2.9597297e+00   3.9370039e+00   3.5411862e+00   3.2695565e+00   3.4322005e+00   3.8858718e+00   4.0841156e+00   3.4190642e+00   2.4372115e+00   2.7928480e+00   2.6795522e+00   2.8142495e+00   4.0348482e+00   3.3436507e+00   3.3778692e+00   3.7389838e+00   3.5199432e+00   2.9154759e+00   2.9849623e+00   3.2603681e+00   3.4684290e+00   2.9359837e+00   2.4494897e+00   3.0886890e+00   2.9782545e+00   3.0380915e+00   3.2140317e+00   2.1424285e+00   2.9782545e+00   5.1487863e+00   4.1243181e+00   5.1332251e+00   4.5628938e+00   4.9183331e+00   5.9118525e+00   3.5972211e+00   5.4635154e+00   4.9173163e+00   5.4497706e+00   4.2023803e+00   4.3965896e+00   4.6968074e+00   4.1255303e+00   4.3324358e+00   4.4833024e+00   4.5011110e+00   6.0282667e+00   6.3300869e+00   4.0681691e+00   4.9547957e+00   3.9560081e+00   6.0315835e+00   3.9912404e+00   4.8062459e+00   5.1283526e+00   3.8600518e+00   3.8858718e+00   4.7148701e+00   4.9173163e+00   5.3721504e+00   5.7887823e+00   4.7560488e+00   4.0336088e+00   4.4665423e+00   5.5991071e+00   4.7486840e+00   4.4631827e+00   3.7815341e+00   4.6292548e+00   4.8682646e+00   4.4698993e+00   4.1243181e+00   5.0970580e+00   4.9779514e+00   4.5033321e+00   4.1701319e+00   4.3162484e+00   4.5110974e+00   4.0323690e+00   4.5825757e-01   8.1853528e-01   1.2328828e+00   1.3638182e+00   8.6023253e-01   3.8729833e-01   9.9498744e-01   5.1961524e-01   6.0827625e-01   4.7958315e-01   6.6332496e-01   4.4721360e-01   3.0000000e-01   4.4721360e-01   2.8284271e-01   4.2426407e-01   4.4721360e-01   2.2360680e-01   3.0000000e-01   6.4031242e-01   8.1853528e-01   1.0816654e+00   3.4641016e-01   4.8989795e-01   7.6811457e-01   3.4641016e-01   6.4031242e-01   3.1622777e-01   3.8729833e-01   1.1832160e+00   5.3851648e-01   4.5825757e-01   6.1644140e-01   4.5825757e-01   5.0000000e-01   3.4641016e-01   5.9160798e-01   3.0000000e-01   3.9912404e+00   3.5637059e+00   4.1327957e+00   2.9444864e+00   3.7336309e+00   3.2848135e+00   3.7188708e+00   2.1307276e+00   3.7013511e+00   2.7166155e+00   2.5000000e+00   3.1336879e+00   3.0463092e+00   3.6041643e+00   2.4698178e+00   3.6027767e+00   3.3015148e+00   2.8948230e+00   3.6742346e+00   2.7477263e+00   3.7483330e+00   3.0033315e+00   3.9547440e+00   3.5580894e+00   3.3630343e+00   3.5608988e+00   4.0049969e+00   4.1928511e+00   3.4336569e+00   2.3874673e+00   2.6720778e+00   2.5573424e+00   2.7892651e+00   4.0174619e+00   3.2588341e+00   3.4365681e+00   3.8729833e+00   3.5369478e+00   2.8740216e+00   2.8757608e+00   3.1575307e+00   3.5057096e+00   2.8982753e+00   2.1863211e+00   3.0166206e+00   2.9546573e+00   3.0049958e+00   3.2726136e+00   1.9157244e+00   2.9376862e+00   5.1874849e+00   4.0779897e+00   5.2488094e+00   4.5891176e+00   4.9689033e+00   6.0506198e+00   3.3882149e+00   5.5812185e+00   4.9618545e+00   5.5982140e+00   4.2918527e+00   4.4305756e+00   4.7937459e+00   4.0521599e+00   4.2953463e+00   4.5497253e+00   4.5628938e+00   6.2112801e+00   6.4459289e+00   4.0162171e+00   5.0665570e+00   3.8897301e+00   6.1660360e+00   4.0236799e+00   4.9030603e+00   5.2649786e+00   3.8884444e+00   3.9115214e+00   4.7465777e+00   5.0517324e+00   5.5009090e+00   6.0041652e+00   4.7874837e+00   4.0681691e+00   4.4463468e+00   5.7645468e+00   4.8052055e+00   4.5188494e+00   3.7947332e+00   4.7486840e+00   4.9537864e+00   4.6000000e+00   4.0779897e+00   5.1903757e+00   5.0714889e+00   4.5978256e+00   4.1844952e+00   4.3874822e+00   4.5617979e+00   4.0224371e+00   5.8309519e-01   1.4317821e+00   1.6941074e+00   1.1269428e+00   6.1644140e-01   1.2569805e+00   8.8317609e-01   7.8740079e-01   8.2462113e-01   7.5498344e-01   6.5574385e-01   6.4807407e-01   3.0000000e-01   5.7445626e-01   6.5574385e-01   5.7445626e-01   3.1622777e-01   2.4494897e-01   7.8740079e-01   1.1747340e+00   1.3928388e+00   1.7320508e-01   3.6055513e-01   8.7177979e-01   1.7320508e-01   4.2426407e-01   5.1961524e-01   5.8309519e-01   7.9372539e-01   4.6904158e-01   7.6157731e-01   1.0344080e+00   2.0000000e-01   8.8317609e-01   3.0000000e-01   8.7177979e-01   3.7416574e-01   4.1785165e+00   3.7643060e+00   4.3162484e+00   3.0298515e+00   3.8897301e+00   3.4496377e+00   3.9344631e+00   2.1886069e+00   3.8639358e+00   2.8618176e+00   2.5019992e+00   3.3181320e+00   3.1064449e+00   3.7788887e+00   2.6324893e+00   3.7828561e+00   3.4942810e+00   3.0315013e+00   3.7643060e+00   2.8530685e+00   3.9623226e+00   3.1511903e+00   4.0877867e+00   3.7188708e+00   3.5242020e+00   3.7322915e+00   4.1581246e+00   4.3737855e+00   3.6083237e+00   2.4879711e+00   2.7586228e+00   2.6362853e+00   2.9240383e+00   4.1797129e+00   3.4539832e+00   3.6687873e+00   4.0583248e+00   3.6304270e+00   3.0610456e+00   2.9899833e+00   3.2954514e+00   3.6905284e+00   3.0215890e+00   2.2248595e+00   3.1638584e+00   3.1400637e+00   3.1780497e+00   3.4380227e+00   1.9748418e+00   3.0951575e+00   5.4092513e+00   4.2449971e+00   5.4350713e+00   4.7738873e+00   5.1633323e+00   6.2353829e+00   3.5256205e+00   5.7584720e+00   5.1097945e+00   5.8283788e+00   4.4977772e+00   4.5934736e+00   4.9809638e+00   4.1988094e+00   4.4743715e+00   4.7592016e+00   4.7528939e+00   6.4459289e+00   6.6075714e+00   4.1231056e+00   5.2706736e+00   4.0669399e+00   6.3364028e+00   4.1809090e+00   5.1176166e+00   5.4635154e+00   4.0558600e+00   4.1024383e+00   4.9234135e+00   5.2316345e+00   5.6683331e+00   6.2337790e+00   4.9648766e+00   4.2355637e+00   4.6021734e+00   5.9447456e+00   5.0338852e+00   4.7191101e+00   3.9862263e+00   4.9416596e+00   5.1526692e+00   4.7906158e+00   4.2449971e+00   5.3972215e+00   5.2867760e+00   4.7843495e+00   4.3243497e+00   4.5760245e+00   4.7916594e+00   4.2178193e+00   1.8083141e+00   2.0420578e+00   1.4662878e+00   1.0099505e+00   1.7320508e+00   1.2165525e+00   1.3190906e+00   1.1747340e+00   6.8556546e-01   1.1180340e+00   1.0295630e+00   8.6602540e-01   9.9498744e-01   1.1090537e+00   1.0344080e+00   6.7823300e-01   7.2111026e-01   1.2727922e+00   1.4764823e+00   1.7262677e+00   7.2801099e-01   7.4161985e-01   1.3190906e+00   7.2801099e-01   2.4494897e-01   9.8488578e-01   9.0553851e-01   7.8102497e-01   3.1622777e-01   1.1135529e+00   1.4177447e+00   6.1644140e-01   1.2409674e+00   4.7958315e-01   1.2884099e+00   8.2462113e-01   4.6882833e+00   4.2391037e+00   4.8135226e+00   3.4322005e+00   4.3692105e+00   3.8729833e+00   4.3931765e+00   2.5238859e+00   4.3577517e+00   3.2295511e+00   2.8390139e+00   3.7589892e+00   3.5707142e+00   4.2308392e+00   3.0643107e+00   4.2836900e+00   3.9000000e+00   3.4856850e+00   4.2154478e+00   3.2832910e+00   4.3794977e+00   3.6235342e+00   4.5442271e+00   4.1773197e+00   4.0124805e+00   4.2272923e+00   4.6551047e+00   4.8507731e+00   4.0521599e+00   2.9478806e+00   3.1764760e+00   3.0610456e+00   3.3749074e+00   4.6076024e+00   3.8379682e+00   4.1060930e+00   4.5486262e+00   4.1012193e+00   3.4828150e+00   3.3970576e+00   3.7013511e+00   4.1448764e+00   3.4684290e+00   2.5748786e+00   3.5818989e+00   3.5749126e+00   3.6083237e+00   3.9115214e+00   2.3452079e+00   3.5270384e+00   5.8189346e+00   4.6454279e+00   5.9059292e+00   5.2105662e+00   5.5982140e+00   6.7186308e+00   3.8379682e+00   6.2401923e+00   5.5668663e+00   6.2872888e+00   4.9487372e+00   5.0378567e+00   5.4415071e+00   4.5858478e+00   4.8559242e+00   5.1894123e+00   5.2048055e+00   6.9260378e+00   7.0851958e+00   4.5497253e+00   5.7271284e+00   4.4474712e+00   6.8242216e+00   4.6281746e+00   5.5686623e+00   5.9455866e+00   4.4977772e+00   4.5354162e+00   5.3572381e+00   5.7227616e+00   6.1554854e+00   6.7305275e+00   5.3953684e+00   4.6904158e+00   5.0338852e+00   6.4342832e+00   5.4497706e+00   5.1643005e+00   4.4124823e+00   5.4092513e+00   5.5955339e+00   5.2545219e+00   4.6454279e+00   5.8455111e+00   5.7245087e+00   5.2354560e+00   4.7644517e+00   5.0259327e+00   5.2057660e+00   4.6314145e+00   5.4772256e-01   4.6904158e-01   8.8881944e-01   5.5677644e-01   7.9372539e-01   8.7749644e-01   8.4261498e-01   1.2806248e+00   1.1489125e+00   1.3601471e+00   1.3416408e+00   1.0954451e+00   8.3666003e-01   8.7177979e-01   1.4177447e+00   1.4035669e+00   8.0622577e-01   6.8556546e-01   4.1231056e-01   1.3114877e+00   1.1313708e+00   5.9160798e-01   1.3114877e+00   1.7233688e+00   9.6953597e-01   9.5393920e-01   2.1447611e+00   1.6155494e+00   1.1000000e+00   1.0295630e+00   1.4317821e+00   8.3066239e-01   1.4560220e+00   6.5574385e-01   1.0816654e+00   3.9711459e+00   3.6851052e+00   4.1713307e+00   3.4684290e+00   3.8961519e+00   3.6810325e+00   3.8665230e+00   2.9017236e+00   3.8236109e+00   3.2832910e+00   3.2511536e+00   3.4205263e+00   3.4292856e+00   3.8716921e+00   2.8670542e+00   3.6469165e+00   3.6905284e+00   3.2771939e+00   3.9974992e+00   3.2233523e+00   4.0211939e+00   3.2526912e+00   4.2284749e+00   3.8444766e+00   3.5199432e+00   3.6496575e+00   4.1036569e+00   4.3011626e+00   3.7188708e+00   2.8106939e+00   3.1968735e+00   3.0886890e+00   3.1591138e+00   4.3474130e+00   3.7067506e+00   3.6400549e+00   3.9446166e+00   3.8196859e+00   3.2649655e+00   3.3749074e+00   3.6455452e+00   3.7536649e+00   3.2863353e+00   2.9291637e+00   3.4554305e+00   3.3181320e+00   3.3808283e+00   3.4914181e+00   2.6057628e+00   3.3271610e+00   5.3916602e+00   4.4485953e+00   5.3282267e+00   4.8352870e+00   5.1623638e+00   6.0835845e+00   4.0249224e+00   5.6595053e+00   5.1749396e+00   5.6053546e+00   4.4249294e+00   4.6636895e+00   4.9091751e+00   4.4654227e+00   4.6357308e+00   4.7138095e+00   4.7476310e+00   6.1562976e+00   6.5169011e+00   4.4056782e+00   5.1487863e+00   4.2906876e+00   6.2080593e+00   4.2649736e+00   5.0159745e+00   5.3103672e+00   4.1376322e+00   4.1641326e+00   4.9769469e+00   5.1068581e+00   5.5587768e+00   5.8932164e+00   5.0159745e+00   4.3116122e+00   4.7801674e+00   5.7471732e+00   4.9809638e+00   4.7138095e+00   4.0693980e+00   4.8238988e+00   5.0813384e+00   4.6518813e+00   4.4485953e+00   5.3047149e+00   5.1807335e+00   4.7138095e+00   4.4530888e+00   4.5530210e+00   4.7507894e+00   4.3335897e+00   6.1644140e-01   1.0908712e+00   6.4031242e-01   8.5440037e-01   1.0816654e+00   9.2195445e-01   1.4628739e+00   1.2727922e+00   1.4177447e+00   1.5811388e+00   1.2247449e+00   1.0488088e+00   1.1401754e+00   1.5779734e+00   1.5968719e+00   1.0440307e+00   6.5574385e-01   3.6055513e-01   1.5556349e+00   1.4352700e+00   9.6436508e-01   1.5556349e+00   1.9313208e+00   1.1832160e+00   1.1618950e+00   2.4289916e+00   1.7916473e+00   1.1618950e+00   9.3808315e-01   1.6703293e+00   8.7749644e-01   1.6431677e+00   8.3066239e-01   1.3228757e+00   3.7907783e+00   3.4842503e+00   3.9874804e+00   3.3926391e+00   3.7443290e+00   3.5171011e+00   3.6400549e+00   2.8705400e+00   3.6715120e+00   3.1464265e+00   3.2572995e+00   3.2403703e+00   3.3970576e+00   3.6945906e+00   2.7349589e+00   3.4785054e+00   3.4899857e+00   3.1654384e+00   3.9115214e+00   3.1416556e+00   3.7854986e+00   3.1272992e+00   4.0914545e+00   3.6878178e+00   3.3749074e+00   3.4899857e+00   3.9572718e+00   4.1109610e+00   3.5425979e+00   2.7568098e+00   3.1336879e+00   3.0397368e+00   3.0495901e+00   4.1689327e+00   3.5014283e+00   3.3955854e+00   3.7603191e+00   3.7403208e+00   3.0886890e+00   3.2726136e+00   3.5114100e+00   3.5679126e+00   3.1843367e+00   2.9154759e+00   3.3166248e+00   3.1448370e+00   3.2171416e+00   3.3391616e+00   2.5903668e+00   3.1827661e+00   5.1215232e+00   4.2555846e+00   5.1156622e+00   4.6238512e+00   4.9325450e+00   5.8711157e+00   3.8652296e+00   5.4598535e+00   5.0059964e+00   5.3347915e+00   4.1952354e+00   4.4799554e+00   4.6968074e+00   4.2918527e+00   4.4192760e+00   4.4698993e+00   4.5343136e+00   5.8855756e+00   6.3253458e+00   4.2883563e+00   4.9122296e+00   4.0853396e+00   6.0133186e+00   4.0951190e+00   4.7686476e+00   5.0892043e+00   3.9572718e+00   3.9547440e+00   4.7696960e+00   4.9132474e+00   5.3721504e+00   5.6364883e+00   4.8062459e+00   4.1340053e+00   4.6054316e+00   5.5434646e+00   4.7085029e+00   4.4877611e+00   3.8600518e+00   4.6076024e+00   4.8476799e+00   4.4384682e+00   4.2555846e+00   5.0616203e+00   4.9254441e+00   4.5011110e+00   4.2976738e+00   4.3416587e+00   4.4799554e+00   4.1133928e+00   5.1961524e-01   5.1961524e-01   3.8729833e-01   6.7082039e-01   4.1231056e-01   9.2736185e-01   7.8740079e-01   1.0049876e+00   1.0488088e+00   7.0710678e-01   5.2915026e-01   5.8309519e-01   1.0535654e+00   1.0630146e+00   5.3851648e-01   4.5825757e-01   3.8729833e-01   1.0099505e+00   8.3666003e-01   4.5825757e-01   1.0099505e+00   1.3601471e+00   6.4807407e-01   5.7445626e-01   1.8384776e+00   1.2369317e+00   6.7082039e-01   6.7823300e-01   1.0908712e+00   4.7958315e-01   1.0862780e+00   3.6055513e-01   7.5498344e-01   3.9509493e+00   3.5972211e+00   4.1303753e+00   3.2664966e+00   3.8105118e+00   3.5142567e+00   3.7643060e+00   2.6191602e+00   3.7603191e+00   3.0397368e+00   2.9949958e+00   3.2680269e+00   3.3015148e+00   3.7483330e+00   2.6720778e+00   3.5972211e+00   3.5071356e+00   3.1304952e+00   3.8704005e+00   3.0413813e+00   3.8665230e+00   3.1304952e+00   4.1158231e+00   3.7282704e+00   3.4365681e+00   3.5860842e+00   4.0521599e+00   4.2284749e+00   3.5791060e+00   2.6419690e+00   3.0000000e+00   2.8948230e+00   3.0000000e+00   4.2047592e+00   3.5014283e+00   3.5057096e+00   3.8858718e+00   3.7134889e+00   3.0822070e+00   3.1733263e+00   3.4568772e+00   3.6318040e+00   3.1272992e+00   2.6608269e+00   3.2710854e+00   3.1543621e+00   3.2109189e+00   3.3837849e+00   2.3302360e+00   3.1543621e+00   5.2602281e+00   4.2766810e+00   5.2678269e+00   4.7180504e+00   5.0507425e+00   6.0522723e+00   3.7603191e+00   5.6187187e+00   5.0852729e+00   5.5479726e+00   4.3243497e+00   4.5486262e+00   4.8270074e+00   4.2778499e+00   4.4508426e+00   4.5934736e+00   4.6497312e+00   6.1400326e+00   6.4768820e+00   4.2602817e+00   5.0705029e+00   4.0951190e+00   6.1822326e+00   4.1436699e+00   4.9295030e+00   5.2706736e+00   4.0074930e+00   4.0274061e+00   4.8569538e+00   5.0734604e+00   5.5226805e+00   5.9016947e+00   4.8928519e+00   4.2035699e+00   4.6551047e+00   5.7227616e+00   4.8528342e+00   4.6086874e+00   3.9217343e+00   4.7528939e+00   4.9819675e+00   4.5760245e+00   4.2766810e+00   5.2172790e+00   5.0813384e+00   4.6173586e+00   4.3255058e+00   4.4485953e+00   4.6162756e+00   4.1785165e+00   7.3484692e-01   3.1622777e-01   4.4721360e-01   2.4494897e-01   6.5574385e-01   4.1231056e-01   6.0000000e-01   5.5677644e-01   2.6457513e-01   1.7320508e-01   1.7320508e-01   5.4772256e-01   5.4772256e-01   3.4641016e-01   6.4807407e-01   8.1240384e-01   5.0000000e-01   3.8729833e-01   4.2426407e-01   5.0000000e-01   8.7177979e-01   1.7320508e-01   1.4142136e-01   1.3453624e+00   7.7459667e-01   3.7416574e-01   5.9160798e-01   5.8309519e-01   3.7416574e-01   5.9160798e-01   3.1622777e-01   2.4494897e-01   3.9749214e+00   3.5818989e+00   4.1340053e+00   3.0594117e+00   3.7589892e+00   3.3852622e+00   3.7496667e+00   2.3130067e+00   3.7215588e+00   2.8478062e+00   2.6758176e+00   3.1890437e+00   3.1224990e+00   3.6687873e+00   2.5396850e+00   3.5958309e+00   3.3985291e+00   2.9849623e+00   3.7349699e+00   2.8530685e+00   3.8131352e+00   3.0413813e+00   4.0162171e+00   3.6318040e+00   3.3852622e+00   3.5651087e+00   4.0187063e+00   4.2107007e+00   3.4957117e+00   2.4637370e+00   2.7874720e+00   2.6739484e+00   2.8618176e+00   4.1024383e+00   3.3749074e+00   3.4813790e+00   3.8794329e+00   3.5888717e+00   2.9647934e+00   2.9866369e+00   3.2832910e+00   3.5637059e+00   2.9782545e+00   2.3558438e+00   3.1192948e+00   3.0430248e+00   3.0919250e+00   3.3136083e+00   2.0493902e+00   3.0232433e+00   5.2421370e+00   4.1689327e+00   5.2668776e+00   4.6572524e+00   5.0179677e+00   6.0646517e+00   3.5510562e+00   5.6089215e+00   5.0169712e+00   5.5991071e+00   4.3162484e+00   4.4833024e+00   4.8155997e+00   4.1484937e+00   4.3680659e+00   4.5814845e+00   4.6119410e+00   6.2088646e+00   6.4668385e+00   4.1109610e+00   5.0813384e+00   3.9849718e+00   6.1830413e+00   4.0718546e+00   4.9325450e+00   5.2829916e+00   3.9382737e+00   3.9686270e+00   4.8020829e+00   5.0705029e+00   5.5163394e+00   5.9849812e+00   4.8404545e+00   4.1303753e+00   4.5453273e+00   5.7532599e+00   4.8476799e+00   4.5727453e+00   3.8561639e+00   4.7581509e+00   4.9769469e+00   4.5923850e+00   4.1689327e+00   5.2182373e+00   5.0921508e+00   4.6097722e+00   4.2379240e+00   4.4204072e+00   4.6065171e+00   4.1024383e+00   6.3245553e-01   5.0990195e-01   6.4807407e-01   1.3228757e+00   8.0622577e-01   1.0099505e+00   1.0723805e+00   8.1853528e-01   6.2449980e-01   7.1414284e-01   1.1747340e+00   1.1489125e+00   5.4772256e-01   6.4807407e-01   5.4772256e-01   1.1000000e+00   1.0535654e+00   5.4772256e-01   1.1000000e+00   1.5811388e+00   7.5498344e-01   8.6023253e-01   1.9621417e+00   1.4899664e+00   8.2462113e-01   6.4031242e-01   1.2409674e+00   6.1644140e-01   1.2922848e+00   4.6904158e-01   9.1651514e-01   3.5014283e+00   3.1827661e+00   3.6891733e+00   2.9291637e+00   3.3896903e+00   3.1368774e+00   3.3615473e+00   2.3769729e+00   3.3211444e+00   2.7404379e+00   2.7313001e+00   2.8930952e+00   2.9034462e+00   3.3436507e+00   2.3302360e+00   3.1606961e+00   3.1511903e+00   2.7331301e+00   3.4770677e+00   2.6795522e+00   3.5014283e+00   2.7294688e+00   3.7054015e+00   3.3120990e+00   3.0099834e+00   3.1543621e+00   3.6097091e+00   3.8065733e+00   3.1906112e+00   2.2737634e+00   2.6551836e+00   2.5475478e+00   2.6210685e+00   3.8144462e+00   3.1638584e+00   3.1272992e+00   3.4539832e+00   3.3015148e+00   2.7221315e+00   2.8319605e+00   3.0951575e+00   3.2280025e+00   2.7477263e+00   2.4062419e+00   2.9103264e+00   2.7748874e+00   2.8390139e+00   2.9698485e+00   2.0928450e+00   2.7856777e+00   4.8928519e+00   3.9166312e+00   4.8456166e+00   4.3162484e+00   4.6583259e+00   5.6124861e+00   3.4828150e+00   5.1749396e+00   4.6636895e+00   5.1468437e+00   3.9306488e+00   4.1496988e+00   4.4192760e+00   3.9331921e+00   4.1206796e+00   4.2201896e+00   4.2391037e+00   5.7105166e+00   6.0398675e+00   3.8704005e+00   4.6690470e+00   3.7603191e+00   5.7349804e+00   3.7496667e+00   4.5265881e+00   4.8321838e+00   3.6207734e+00   3.6455452e+00   4.4654227e+00   4.6249324e+00   5.0803543e+00   5.4607692e+00   4.5066617e+00   3.7894591e+00   4.2449971e+00   5.2915026e+00   4.4877611e+00   4.2035699e+00   3.5482390e+00   4.3428102e+00   4.5945620e+00   4.1821047e+00   3.9166312e+00   4.8176758e+00   4.7000000e+00   4.2296572e+00   3.9370039e+00   4.0521599e+00   4.2544095e+00   3.8065733e+00   5.4772256e-01   1.4142136e-01   7.4161985e-01   5.7445626e-01   6.4807407e-01   8.1853528e-01   4.3588989e-01   3.3166248e-01   4.3588989e-01   7.3484692e-01   7.7459667e-01   5.0990195e-01   3.7416574e-01   5.8309519e-01   7.5498344e-01   6.8556546e-01   5.4772256e-01   7.5498344e-01   1.0862780e+00   4.1231056e-01   3.7416574e-01   1.6278821e+00   9.4868330e-01   4.4721360e-01   4.1231056e-01   8.6023253e-01   1.4142136e-01   7.9372539e-01   2.4494897e-01   5.2915026e-01   3.9268308e+00   3.5341194e+00   4.0902323e+00   3.1080541e+00   3.7429935e+00   3.3704599e+00   3.6905284e+00   2.3937418e+00   3.6972963e+00   2.8618176e+00   2.7820855e+00   3.1638584e+00   3.1796226e+00   3.6414283e+00   2.5436195e+00   3.5594943e+00   3.3660065e+00   2.9916551e+00   3.7696154e+00   2.8879058e+00   3.7603191e+00   3.0413813e+00   4.0162171e+00   3.6124784e+00   3.3674916e+00   3.5369478e+00   3.9987498e+00   4.1725292e+00   3.4727511e+00   2.5079872e+00   2.8372522e+00   2.7294688e+00   2.8757608e+00   4.0828911e+00   3.3421550e+00   3.4146742e+00   3.8379682e+00   3.6193922e+00   2.9410882e+00   3.0166206e+00   3.2893768e+00   3.5298725e+00   2.9983329e+00   2.4474477e+00   3.1224990e+00   3.0166206e+00   3.0757113e+00   3.2954514e+00   2.1400935e+00   3.0199338e+00   5.1749396e+00   4.1496988e+00   5.2191953e+00   4.6162756e+00   4.9699095e+00   6.0116553e+00   3.5623026e+00   5.5623736e+00   4.9989999e+00   5.5181519e+00   4.2626283e+00   4.4609416e+00   4.7717921e+00   4.1460825e+00   4.3428102e+00   4.5265881e+00   4.5661800e+00   6.1163715e+00   6.4311741e+00   4.1303753e+00   5.0239427e+00   3.9623226e+00   6.1392182e+00   4.0570926e+00   4.8672374e+00   5.2220686e+00   3.9179076e+00   3.9306488e+00   4.7686476e+00   5.0229473e+00   5.4781384e+00   5.8940648e+00   4.8072861e+00   4.1036569e+00   4.5232732e+00   5.7061370e+00   4.7770284e+00   4.5199558e+00   3.8196859e+00   4.7095647e+00   4.9264592e+00   4.5486262e+00   4.1496988e+00   5.1584882e+00   5.0289164e+00   4.5705580e+00   4.2355637e+00   4.3794977e+00   4.5365185e+00   4.0607881e+00   5.0990195e-01   1.0816654e+00   4.3588989e-01   6.3245553e-01   5.7445626e-01   4.5825757e-01   3.0000000e-01   3.6055513e-01   7.3484692e-01   6.7823300e-01   2.8284271e-01   7.6157731e-01   8.6023253e-01   6.2449980e-01   6.7082039e-01   4.2426407e-01   6.2449980e-01   1.1489125e+00   3.6055513e-01   5.8309519e-01   1.4798649e+00   1.0954451e+00   5.8309519e-01   5.7445626e-01   7.8740079e-01   5.0990195e-01   8.7749644e-01   3.7416574e-01   5.0990195e-01   3.6110940e+00   3.2511536e+00   3.7775654e+00   2.7784888e+00   3.4161382e+00   3.0822070e+00   3.4322005e+00   2.1095023e+00   3.3630343e+00   2.6095977e+00   2.4494897e+00   2.8896367e+00   2.7802878e+00   3.3436507e+00   2.2605309e+00   3.2419130e+00   3.1192948e+00   2.6551836e+00   3.4073450e+00   2.5495098e+00   3.5298725e+00   2.7110883e+00   3.6810325e+00   3.2939338e+00   3.0364453e+00   3.2140317e+00   3.6565011e+00   3.8716921e+00   3.1843367e+00   2.1470911e+00   2.4959968e+00   2.3769729e+00   2.5475478e+00   3.7907783e+00   3.1128765e+00   3.1874755e+00   3.5312887e+00   3.2434549e+00   2.6776856e+00   2.7055499e+00   2.9899833e+00   3.2403703e+00   2.6627054e+00   2.1377558e+00   2.8266588e+00   2.7386128e+00   2.7928480e+00   2.9765752e+00   1.8439089e+00   2.7239677e+00   4.9598387e+00   3.8858718e+00   4.9295030e+00   4.3393548e+00   4.7095647e+00   5.7113921e+00   3.3391616e+00   5.2516664e+00   4.6765372e+00   5.2848841e+00   4.0062451e+00   4.1641326e+00   4.4911023e+00   3.8768544e+00   4.1133928e+00   4.2906876e+00   4.2860238e+00   5.8694122e+00   6.1139185e+00   3.7920970e+00   4.7644517e+00   3.7255872e+00   5.8215118e+00   3.7549967e+00   4.6162756e+00   4.9325450e+00   3.6290495e+00   3.6674242e+00   4.4922155e+00   4.7085029e+00   5.1584882e+00   5.6338264e+00   4.5354162e+00   3.7973675e+00   4.2166337e+00   5.4055527e+00   4.5672749e+00   4.2532341e+00   3.5623026e+00   4.4317040e+00   4.6722586e+00   4.2790186e+00   3.8858718e+00   4.9040799e+00   4.7947888e+00   4.3023250e+00   3.9242834e+00   4.1060930e+00   4.3289722e+00   3.8118237e+00   7.4161985e-01   4.5825757e-01   6.1644140e-01   7.4161985e-01   3.3166248e-01   3.0000000e-01   3.8729833e-01   6.7823300e-01   7.0710678e-01   4.2426407e-01   5.0990195e-01   6.7823300e-01   7.0000000e-01   6.2449980e-01   5.2915026e-01   7.0000000e-01   1.0295630e+00   3.6055513e-01   3.1622777e-01   1.5394804e+00   9.0553851e-01   3.1622777e-01   4.1231056e-01   7.7459667e-01   2.4494897e-01   7.4161985e-01   2.8284271e-01   4.6904158e-01   3.8858718e+00   3.4856850e+00   4.0459857e+00   3.0298515e+00   3.6864617e+00   3.3136083e+00   3.6441734e+00   2.3086793e+00   3.6482873e+00   2.7874720e+00   2.6944387e+00   3.1032241e+00   3.1096624e+00   3.5888717e+00   2.4718414e+00   3.5114100e+00   3.3090784e+00   2.9342802e+00   3.6972963e+00   2.8178006e+00   3.7067506e+00   2.9782545e+00   3.9560081e+00   3.5623026e+00   3.3136083e+00   3.4856850e+00   3.9484174e+00   4.1218928e+00   3.4146742e+00   2.4351591e+00   2.7622455e+00   2.6551836e+00   2.8089144e+00   4.0261644e+00   3.2848135e+00   3.3674916e+00   3.7907783e+00   3.5524639e+00   2.8827071e+00   2.9427878e+00   3.2280025e+00   3.4785054e+00   2.9308702e+00   2.3600847e+00   3.0577770e+00   2.9631065e+00   3.0166206e+00   3.2403703e+00   2.0445048e+00   2.9563491e+00   5.1244512e+00   4.0865633e+00   5.1710734e+00   4.5661800e+00   4.9173163e+00   5.9699246e+00   3.4885527e+00   5.5208695e+00   4.9446941e+00   5.4763126e+00   4.2107007e+00   4.4022721e+00   4.7191101e+00   4.0755368e+00   4.2731721e+00   4.4710178e+00   4.5177428e+00   6.0868711e+00   6.3827894e+00   4.0644803e+00   4.9739320e+00   3.8961519e+00   6.0967204e+00   3.9949969e+00   4.8218254e+00   5.1836281e+00   3.8561639e+00   3.8742741e+00   4.7116876e+00   4.9829710e+00   5.4323107e+00   5.8668561e+00   4.7486840e+00   4.0521599e+00   4.4743715e+00   5.6586217e+00   4.7265209e+00   4.4732538e+00   3.7616486e+00   4.6583259e+00   4.8713448e+00   4.4911023e+00   4.0865633e+00   5.1097945e+00   4.9769469e+00   4.5110974e+00   4.1689327e+00   4.3243497e+00   4.4855323e+00   4.0062451e+00   9.5916630e-01   9.4339811e-01   9.3808315e-01   7.7459667e-01   7.8740079e-01   7.4833148e-01   7.2801099e-01   8.0622577e-01   9.8488578e-01   9.3273791e-01   1.1532563e+00   7.7459667e-01   6.0000000e-01   9.5393920e-01   7.7459667e-01   7.0000000e-01   7.3484692e-01   5.1961524e-01   1.3416408e+00   5.3851648e-01   8.3066239e-01   1.0677078e+00   7.5498344e-01   8.0622577e-01   5.6568542e-01   8.6602540e-01   6.4031242e-01   4.5880279e+00   4.1641326e+00   4.7370877e+00   3.5651087e+00   4.3474130e+00   3.9127995e+00   4.3162484e+00   2.7313001e+00   4.3197222e+00   3.3196385e+00   3.1000000e+00   3.7389838e+00   3.6823905e+00   4.2272923e+00   3.0757113e+00   4.2023803e+00   3.9115214e+00   3.5355339e+00   4.2965102e+00   3.3808283e+00   4.3416587e+00   3.6193922e+00   4.5825757e+00   4.1928511e+00   3.9786933e+00   4.1665333e+00   4.6216880e+00   4.7979162e+00   4.0484565e+00   3.0166206e+00   3.3015148e+00   3.1906112e+00   3.4146742e+00   4.6411206e+00   3.8652296e+00   4.0261644e+00   4.4766059e+00   4.1653331e+00   3.4899857e+00   3.4971417e+00   3.7907783e+00   4.1243181e+00   3.5270384e+00   2.7892651e+00   3.6414283e+00   3.5791060e+00   3.6262929e+00   3.8923001e+00   2.5039968e+00   3.5594943e+00   5.7680153e+00   4.6850827e+00   5.8506410e+00   5.2057660e+00   5.5686623e+00   6.6580778e+00   3.9749214e+00   6.1991935e+00   5.5874860e+00   6.1692787e+00   4.8805737e+00   5.0428167e+00   5.3907328e+00   4.6540305e+00   4.8713448e+00   5.1283526e+00   5.1749396e+00   6.7926431e+00   7.0590368e+00   4.6486557e+00   5.6524331e+00   4.4821870e+00   6.7808554e+00   4.6335731e+00   5.4954527e+00   5.8719673e+00   4.4944410e+00   4.5144213e+00   5.3525695e+00   5.6674509e+00   6.1139185e+00   6.5825527e+00   5.3888774e+00   4.6936127e+00   5.0842895e+00   6.3553127e+00   5.3786615e+00   5.1283526e+00   4.3954522e+00   5.3394756e+00   5.5371473e+00   5.1730069e+00   4.6850827e+00   5.7810034e+00   5.6462377e+00   5.1788030e+00   4.7947888e+00   4.9849774e+00   5.1351728e+00   4.6281746e+00   4.7958315e-01   4.4721360e-01   2.0000000e-01   4.2426407e-01   4.4721360e-01   5.1961524e-01   4.7958315e-01   3.8729833e-01   9.2195445e-01   1.0723805e+00   5.2915026e-01   6.0000000e-01   6.7082039e-01   5.2915026e-01   9.1104336e-01   3.7416574e-01   5.0000000e-01   1.2489996e+00   8.6602540e-01   2.6457513e-01   5.4772256e-01   5.5677644e-01   5.9160798e-01   6.6332496e-01   5.7445626e-01   4.3588989e-01   3.6646964e+00   3.2465366e+00   3.8105118e+00   2.6627054e+00   3.4088121e+00   3.0149627e+00   3.4132096e+00   1.9131126e+00   3.3852622e+00   2.4535688e+00   2.2781571e+00   2.8248894e+00   2.7495454e+00   3.3120990e+00   2.1587033e+00   3.2710854e+00   3.0298515e+00   2.6191602e+00   3.3555923e+00   2.4677925e+00   3.4568772e+00   2.6795522e+00   3.6496575e+00   3.2771939e+00   3.0413813e+00   3.2310989e+00   3.6823905e+00   3.8704005e+00   3.1320920e+00   2.0832667e+00   2.3958297e+00   2.2847319e+00   2.4859606e+00   3.7336309e+00   3.0033315e+00   3.1416556e+00   3.5496479e+00   3.2202484e+00   2.5961510e+00   2.5942244e+00   2.9034462e+00   3.2109189e+00   2.6000000e+00   1.9544820e+00   2.7386128e+00   2.6814175e+00   2.7221315e+00   2.9614186e+00   1.6401219e+00   2.6476405e+00   4.8918299e+00   3.7907783e+00   4.9284886e+00   4.3011626e+00   4.6636895e+00   5.7367238e+00   3.1559468e+00   5.2773099e+00   4.6583259e+00   5.2782573e+00   3.9724048e+00   4.1194660e+00   4.4698993e+00   3.7603191e+00   3.9887341e+00   4.2308392e+00   4.2638011e+00   5.9076222e+00   6.1261734e+00   3.7296112e+00   4.7423623e+00   3.6041643e+00   5.8532043e+00   3.7054015e+00   4.5956501e+00   4.9598387e+00   3.5721142e+00   3.6083237e+00   4.4395946e+00   4.7455242e+00   5.1826634e+00   5.6947344e+00   4.4766059e+00   3.7749172e+00   4.1844952e+00   5.4267854e+00   4.5022217e+00   4.2261093e+00   3.4928498e+00   4.4192760e+00   4.6281746e+00   4.2520583e+00   3.7907783e+00   4.8764741e+00   4.7497368e+00   4.2591079e+00   3.8639358e+00   4.0681691e+00   4.2602817e+00   3.7389838e+00   5.3851648e-01   4.1231056e-01   5.7445626e-01   6.4031242e-01   3.7416574e-01   4.2426407e-01   7.4833148e-01   9.0553851e-01   1.1747340e+00   5.1961524e-01   7.5498344e-01   9.2736185e-01   5.1961524e-01   8.2462113e-01   5.0000000e-01   6.4807407e-01   1.2922848e+00   7.4833148e-01   5.4772256e-01   5.3851648e-01   6.4807407e-01   5.8309519e-01   5.7445626e-01   7.0710678e-01   5.4772256e-01   3.7629775e+00   3.3241540e+00   3.8974351e+00   2.7055499e+00   3.4971417e+00   3.0232433e+00   3.4727511e+00   1.9000000e+00   3.4626579e+00   2.4677925e+00   2.2803509e+00   2.8896367e+00   2.8160256e+00   3.3496268e+00   2.2338308e+00   3.3749074e+00   3.0413813e+00   2.6400758e+00   3.4423829e+00   2.5019992e+00   3.4957117e+00   2.7694765e+00   3.7080992e+00   3.3000000e+00   3.1272992e+00   3.3301652e+00   3.7696154e+00   3.9534795e+00   3.1843367e+00   2.1563859e+00   2.4310492e+00   2.3173260e+00   2.5475478e+00   3.7589892e+00   2.9949958e+00   3.1874755e+00   3.6373067e+00   3.3045423e+00   2.6172505e+00   2.6305893e+00   2.8948230e+00   3.2526912e+00   2.6551836e+00   1.9621417e+00   2.7622455e+00   2.6944387e+00   2.7495454e+00   3.0298515e+00   1.7088007e+00   2.6870058e+00   4.9355851e+00   3.8236109e+00   5.0059964e+00   4.3301270e+00   4.7180504e+00   5.8051701e+00   3.1352831e+00   5.3310412e+00   4.7106263e+00   5.3600373e+00   4.0509258e+00   4.1833001e+00   4.5530210e+00   3.8039453e+00   4.0546270e+00   4.3092923e+00   4.3092923e+00   5.9674115e+00   6.2016127e+00   3.7656341e+00   4.8270074e+00   3.6386811e+00   5.9203040e+00   3.7815341e+00   4.6551047e+00   5.0169712e+00   3.6455452e+00   3.6619667e+00   4.4966654e+00   4.8052055e+00   5.2583267e+00   5.7671483e+00   4.5398238e+00   3.8131352e+00   4.1785165e+00   5.5335341e+00   4.5585085e+00   4.2626283e+00   3.5454196e+00   4.5122057e+00   4.7148701e+00   4.3760713e+00   3.8236109e+00   4.9446941e+00   4.8321838e+00   4.3669211e+00   3.9446166e+00   4.1448764e+00   4.3150898e+00   3.7643060e+00   4.4721360e-01   5.4772256e-01   4.8989795e-01   3.6055513e-01   2.2360680e-01   6.0827625e-01   1.1269428e+00   1.3152946e+00   2.0000000e-01   4.4721360e-01   7.6811457e-01   2.0000000e-01   6.7082039e-01   4.2426407e-01   5.9160798e-01   9.1651514e-01   7.0000000e-01   6.4031242e-01   8.8317609e-01   3.0000000e-01   8.0622577e-01   4.8989795e-01   7.6811457e-01   3.6055513e-01   3.8845849e+00   3.4785054e+00   4.0249224e+00   2.7766887e+00   3.6027767e+00   3.1859065e+00   3.6537652e+00   1.9748418e+00   3.5749126e+00   2.6191602e+00   2.2912878e+00   3.0430248e+00   2.8354894e+00   3.5028560e+00   2.3622024e+00   3.4899857e+00   3.2341923e+00   2.7604347e+00   3.4899857e+00   2.5903668e+00   3.6945906e+00   2.8670542e+00   3.8105118e+00   3.4438351e+00   3.2357379e+00   3.4409301e+00   3.8678159e+00   4.0865633e+00   3.3331667e+00   2.2135944e+00   2.5019992e+00   2.3790755e+00   2.6495283e+00   3.9115214e+00   3.2031235e+00   3.3955854e+00   3.7682887e+00   3.3511192e+00   2.7964263e+00   2.7331301e+00   3.0413813e+00   3.4132096e+00   2.7495454e+00   2.0049938e+00   2.9017236e+00   2.8722813e+00   2.9103264e+00   3.1543621e+00   1.7406895e+00   2.8266588e+00   5.1410116e+00   3.9837169e+00   5.1487863e+00   4.5011110e+00   4.8877398e+00   5.9472683e+00   3.3045423e+00   5.4726593e+00   4.8311489e+00   5.5443665e+00   4.2166337e+00   4.3162484e+00   4.6968074e+00   3.9420807e+00   4.2154478e+00   4.4833024e+00   4.4743715e+00   6.1595454e+00   6.3206012e+00   3.8587563e+00   4.9869831e+00   3.8118237e+00   6.0481402e+00   3.9025633e+00   4.8373546e+00   5.1768716e+00   3.7788887e+00   3.8288379e+00   4.6486557e+00   4.9436828e+00   5.3795911e+00   5.9439044e+00   4.6904158e+00   3.9585351e+00   4.3370497e+00   5.6524331e+00   4.7634021e+00   4.4429720e+00   3.7148351e+00   4.6551047e+00   4.8723711e+00   4.5033321e+00   3.9837169e+00   5.1166395e+00   5.0079936e+00   4.5011110e+00   4.0484565e+00   4.2953463e+00   4.5221676e+00   3.9522146e+00   3.1622777e-01   3.4641016e-01   4.1231056e-01   4.1231056e-01   4.1231056e-01   7.9372539e-01   9.8488578e-01   4.4721360e-01   4.8989795e-01   6.2449980e-01   4.4721360e-01   8.0622577e-01   2.4494897e-01   3.3166248e-01   1.2489996e+00   7.2801099e-01   2.2360680e-01   5.0990195e-01   5.0000000e-01   4.5825757e-01   5.2915026e-01   4.7958315e-01   3.0000000e-01   3.8275318e+00   3.4088121e+00   3.9749214e+00   2.8337255e+00   3.5805028e+00   3.1733263e+00   3.5707142e+00   2.0639767e+00   3.5524639e+00   2.6115130e+00   2.4351591e+00   2.9899833e+00   2.9257478e+00   3.4741906e+00   2.3280893e+00   3.4380227e+00   3.1843367e+00   2.7820855e+00   3.5355339e+00   2.6362853e+00   3.6124784e+00   2.8530685e+00   3.8209946e+00   3.4380227e+00   3.2109189e+00   3.4000000e+00   3.8522721e+00   4.0373258e+00   3.2969683e+00   2.2583180e+00   2.5651511e+00   2.4535688e+00   2.6570661e+00   3.8961519e+00   3.1527766e+00   3.2939338e+00   3.7148351e+00   3.3985291e+00   2.7531800e+00   2.7622455e+00   3.0610456e+00   3.3719431e+00   2.7712813e+00   2.1118712e+00   2.9017236e+00   2.8372522e+00   2.8827071e+00   3.1288976e+00   1.8083141e+00   2.8124722e+00   5.0467812e+00   3.9534795e+00   5.0941143e+00   4.4609416e+00   4.8259714e+00   5.9000000e+00   3.3045423e+00   5.4396691e+00   4.8270074e+00   5.4350713e+00   4.1352146e+00   4.2883563e+00   4.6368092e+00   3.9268308e+00   4.1533119e+00   4.3931765e+00   4.4249294e+00   6.0580525e+00   6.2952363e+00   3.9000000e+00   4.9061186e+00   3.7643060e+00   6.0183054e+00   3.8768544e+00   4.7539457e+00   5.1185936e+00   3.7416574e+00   3.7709415e+00   4.6054316e+00   4.9071377e+00   5.3497664e+00   5.8455111e+00   4.6432747e+00   3.9382737e+00   4.3416587e+00   5.5955339e+00   4.6572524e+00   4.3840620e+00   3.6551334e+00   4.5858478e+00   4.7937459e+00   4.4226689e+00   3.9534795e+00   5.0378567e+00   4.9112117e+00   4.4294469e+00   4.0385641e+00   4.2343831e+00   4.4147480e+00   3.8961519e+00   1.4142136e-01   5.9160798e-01   5.7445626e-01   3.0000000e-01   6.0827625e-01   7.6811457e-01   5.0990195e-01   4.6904158e-01   3.6055513e-01   5.0990195e-01   9.6436508e-01   1.4142136e-01   3.0000000e-01   1.4071247e+00   8.7749644e-01   4.5825757e-01   5.4772256e-01   6.5574385e-01   3.3166248e-01   6.7823300e-01   2.2360680e-01   3.0000000e-01   3.8742741e+00   3.4957117e+00   4.0373258e+00   2.9983329e+00   3.6715120e+00   3.3090784e+00   3.6674242e+00   2.2759613e+00   3.6249138e+00   2.8000000e+00   2.6324893e+00   3.1176915e+00   3.0364453e+00   3.5846897e+00   2.4779023e+00   3.5014283e+00   3.3316662e+00   2.8982753e+00   3.6578682e+00   2.7802878e+00   3.7456642e+00   2.9597297e+00   3.9319207e+00   3.5411862e+00   3.2939338e+00   3.4727511e+00   3.9217343e+00   4.1231056e+00   3.4190642e+00   2.3874673e+00   2.7202941e+00   2.6038433e+00   2.7856777e+00   4.0249224e+00   3.3136083e+00   3.4073450e+00   3.7868192e+00   3.5028560e+00   2.8948230e+00   2.9240383e+00   3.2109189e+00   3.4799425e+00   2.9017236e+00   2.3151674e+00   3.0495901e+00   2.9647934e+00   3.0182777e+00   3.2264532e+00   2.0174241e+00   2.9512709e+00   5.1759057e+00   4.1048752e+00   5.1797683e+00   4.5760245e+00   4.9426713e+00   5.9690870e+00   3.5128336e+00   5.5108983e+00   4.9295030e+00   5.5190579e+00   4.2402830e+00   4.4056782e+00   4.7349762e+00   4.0914545e+00   4.3185646e+00   4.5144213e+00   4.5276926e+00   6.1139185e+00   6.3741666e+00   4.0336088e+00   5.0029991e+00   3.9306488e+00   6.0844063e+00   3.9962482e+00   4.8518038e+00   5.1865210e+00   3.8652296e+00   3.8961519e+00   4.7275787e+00   4.9699095e+00   5.4203321e+00   5.8847260e+00   4.7686476e+00   4.0435133e+00   4.4575778e+00   5.6630381e+00   4.7822589e+00   4.4899889e+00   3.7868192e+00   4.6765372e+00   4.9050994e+00   4.5188494e+00   4.1048752e+00   5.1400389e+00   5.0219518e+00   4.5387223e+00   4.1653331e+00   4.3439613e+00   4.5420260e+00   4.0323690e+00   5.7445626e-01   5.3851648e-01   3.0000000e-01   7.1414284e-01   8.5440037e-01   4.4721360e-01   3.4641016e-01   3.3166248e-01   4.4721360e-01   9.0000000e-01   1.4142136e-01   2.6457513e-01   1.3114877e+00   8.3066239e-01   5.0000000e-01   6.7823300e-01   5.7445626e-01   4.5825757e-01   6.3245553e-01   3.3166248e-01   2.2360680e-01   3.9509493e+00   3.5749126e+00   4.1133928e+00   3.0446675e+00   3.7389838e+00   3.3808283e+00   3.7509999e+00   2.3108440e+00   3.6959437e+00   2.8600699e+00   2.6551836e+00   3.1906112e+00   3.0789609e+00   3.6592349e+00   2.5416530e+00   3.5749126e+00   3.4088121e+00   2.9631065e+00   3.7067506e+00   2.8337255e+00   3.8275318e+00   3.0232433e+00   3.9949969e+00   3.6138622e+00   3.3630343e+00   3.5440090e+00   3.9899875e+00   4.1976184e+00   3.4914181e+00   2.4372115e+00   2.7676705e+00   2.6495283e+00   2.8460499e+00   4.0963398e+00   3.3911650e+00   3.4942810e+00   3.8626416e+00   3.5538711e+00   2.9698485e+00   2.9782545e+00   3.2756679e+00   3.5566838e+00   2.9597297e+00   2.3452079e+00   3.1144823e+00   3.0413813e+00   3.0903074e+00   3.2969683e+00   2.0469489e+00   3.0182777e+00   5.2602281e+00   4.1749251e+00   5.2564246e+00   4.6540305e+00   5.0209561e+00   6.0473135e+00   3.5721142e+00   5.5883808e+00   4.9979996e+00   5.6053546e+00   4.3197222e+00   4.4754888e+00   4.8104054e+00   4.1545156e+00   4.3874822e+00   4.5934736e+00   4.6065171e+00   6.2048368e+00   6.4459289e+00   4.0902323e+00   5.0823223e+00   4.0012498e+00   6.1595454e+00   4.0632499e+00   4.9355851e+00   5.2687759e+00   3.9344631e+00   3.9724048e+00   4.8010416e+00   5.0477718e+00   5.4936327e+00   5.9741108e+00   4.8414874e+00   4.1170378e+00   4.5310043e+00   5.7367238e+00   4.8672374e+00   4.5716518e+00   3.8626416e+00   4.7528939e+00   4.9819675e+00   4.5912961e+00   4.1749251e+00   5.2211110e+00   5.1029403e+00   4.6108568e+00   4.2272923e+00   4.4192760e+00   4.6270941e+00   4.1109610e+00   1.4142136e-01   7.6157731e-01   1.0392305e+00   1.2961481e+00   2.6457513e-01   5.0000000e-01   9.0553851e-01   2.6457513e-01   4.6904158e-01   4.5825757e-01   5.2915026e-01   9.7467943e-01   4.2426407e-01   5.8309519e-01   8.0622577e-01   3.1622777e-01   7.2111026e-01   2.2360680e-01   7.8740079e-01   3.7416574e-01   4.0422766e+00   3.6041643e+00   4.1749251e+00   2.9017236e+00   3.7536649e+00   3.2832910e+00   3.7603191e+00   2.0518285e+00   3.7296112e+00   2.6888659e+00   2.4041631e+00   3.1511903e+00   3.0149627e+00   3.6193922e+00   2.4718414e+00   3.6455452e+00   3.3090784e+00   2.8896367e+00   3.6537652e+00   2.7202941e+00   3.7735925e+00   3.0149627e+00   3.9534795e+00   3.5679126e+00   3.3882149e+00   3.5958309e+00   4.0311289e+00   4.2249260e+00   3.4467376e+00   2.3685439e+00   2.6324893e+00   2.5159491e+00   2.7838822e+00   4.0187063e+00   3.2603681e+00   3.4785054e+00   3.9127995e+00   3.5242020e+00   2.8827071e+00   2.8460499e+00   3.1368774e+00   3.5270384e+00   2.8861739e+00   2.1047565e+00   3.0049958e+00   2.9664794e+00   3.0099834e+00   3.2924155e+00   1.8493242e+00   2.9359837e+00   5.2172790e+00   4.0743098e+00   5.2820451e+00   4.6054316e+00   4.9919936e+00   6.0876925e+00   3.3451457e+00   5.6124861e+00   4.9689033e+00   5.6524331e+00   4.3278170e+00   4.4407207e+00   4.8238988e+00   4.0360872e+00   4.2965102e+00   4.5814845e+00   4.5880279e+00   6.2745518e+00   6.4699304e+00   3.9924930e+00   5.1048996e+00   3.8858718e+00   6.1975802e+00   4.0323690e+00   4.9426713e+00   5.3075418e+00   3.9000000e+00   3.9306488e+00   4.7602521e+00   5.0882217e+00   5.5308227e+00   6.0728906e+00   4.8010416e+00   4.0816663e+00   4.4452222e+00   5.8051701e+00   4.8414874e+00   4.5464272e+00   3.8118237e+00   4.7853944e+00   4.9849774e+00   4.6378875e+00   4.0743098e+00   5.2258971e+00   5.1097945e+00   4.6270941e+00   4.1833001e+00   4.4136153e+00   4.5978256e+00   4.0360872e+00   7.0710678e-01   1.0862780e+00   1.3190906e+00   1.7320508e-01   4.5825757e-01   8.6023253e-01   1.7320508e-01   5.0990195e-01   4.3588989e-01   5.4772256e-01   9.1104336e-01   5.0990195e-01   6.0000000e-01   8.4261498e-01   2.4494897e-01   7.6157731e-01   3.0000000e-01   7.8740079e-01   3.4641016e-01   3.9874804e+00   3.5594943e+00   4.1218928e+00   2.8460499e+00   3.6972963e+00   3.2434549e+00   3.7229021e+00   2.0074860e+00   3.6728735e+00   2.6551836e+00   2.3452079e+00   3.1096624e+00   2.9410882e+00   3.5749126e+00   2.4269322e+00   3.5902646e+00   3.2787193e+00   2.8372522e+00   3.5874782e+00   2.6645825e+00   3.7443290e+00   2.9580399e+00   3.8974351e+00   3.5199432e+00   3.3316662e+00   3.5397740e+00   3.9711459e+00   4.1749251e+00   3.4029399e+00   2.3043437e+00   2.5748786e+00   2.4556058e+00   2.7294688e+00   3.9761791e+00   3.2357379e+00   3.4496377e+00   3.8613469e+00   3.4554305e+00   2.8478062e+00   2.7964263e+00   3.0951575e+00   3.4842503e+00   2.8301943e+00   2.0518285e+00   2.9614186e+00   2.9291637e+00   2.9698485e+00   3.2403703e+00   1.7944358e+00   2.8913665e+00   5.1903757e+00   4.0373258e+00   5.2345009e+00   4.5661800e+00   4.9537864e+00   6.0382117e+00   3.3211444e+00   5.5623736e+00   4.9162994e+00   5.6169387e+00   4.2883563e+00   4.3931765e+00   4.7780749e+00   3.9962482e+00   4.2638011e+00   4.5464272e+00   4.5464272e+00   6.2377881e+00   6.4156060e+00   3.9370039e+00   5.0635956e+00   3.8548671e+00   6.1441029e+00   3.9824616e+00   4.9061186e+00   5.2621288e+00   3.8535698e+00   3.8923001e+00   4.7180504e+00   5.0368641e+00   5.4763126e+00   6.0315835e+00   4.7592016e+00   4.0348482e+00   4.4022721e+00   5.7515215e+00   4.8145612e+00   4.5088801e+00   3.7749172e+00   4.7391982e+00   4.9446941e+00   4.5902070e+00   4.0373258e+00   5.1874849e+00   5.0744458e+00   4.5814845e+00   4.1303753e+00   4.3703547e+00   4.5716518e+00   4.0037482e+00   7.8740079e-01   8.3666003e-01   6.5574385e-01   5.7445626e-01   3.1622777e-01   6.5574385e-01   1.1135529e+00   3.6055513e-01   4.6904158e-01   1.4387495e+00   1.0583005e+00   4.6904158e-01   6.4031242e-01   7.3484692e-01   5.4772256e-01   8.5440037e-01   3.7416574e-01   4.6904158e-01   3.7202150e+00   3.3541020e+00   3.8871583e+00   2.8774989e+00   3.5199432e+00   3.2031235e+00   3.5355339e+00   2.2022716e+00   3.4799425e+00   2.7000000e+00   2.5455844e+00   2.9849623e+00   2.9000000e+00   3.4612137e+00   2.3473389e+00   3.3451457e+00   3.2264532e+00   2.7874720e+00   3.5057096e+00   2.6645825e+00   3.6249138e+00   2.8124722e+00   3.7934153e+00   3.4249088e+00   3.1464265e+00   3.3181320e+00   3.7696154e+00   3.9736633e+00   3.2893768e+00   2.2561028e+00   2.6057628e+00   2.4919872e+00   2.6551836e+00   3.9051248e+00   3.2202484e+00   3.2863353e+00   3.6373067e+00   3.3526109e+00   2.7874720e+00   2.8071338e+00   3.1144823e+00   3.3555923e+00   2.7730849e+00   2.2293497e+00   2.9376862e+00   2.8600699e+00   2.9051678e+00   3.0886890e+00   1.9078784e+00   2.8319605e+00   5.0477718e+00   3.9824616e+00   5.0299105e+00   4.4530888e+00   4.8062459e+00   5.8223707e+00   3.4278273e+00   5.3721504e+00   4.7906158e+00   5.3712196e+00   4.0951190e+00   4.2638011e+00   4.5836667e+00   3.9635842e+00   4.1809090e+00   4.3692105e+00   4.3965896e+00   5.9774577e+00   6.2209324e+00   3.9064050e+00   4.8518038e+00   3.8105118e+00   5.9371710e+00   3.8496753e+00   4.7148701e+00   5.0487622e+00   3.7215588e+00   3.7643060e+00   4.5891176e+00   4.8301139e+00   5.2697249e+00   5.7428216e+00   4.6270941e+00   3.9166312e+00   4.3520110e+00   5.4972721e+00   4.6497312e+00   4.3646306e+00   3.6565011e+00   4.5210618e+00   4.7528939e+00   4.3485630e+00   3.9824616e+00   4.9969991e+00   4.8733972e+00   4.3760713e+00   4.0149720e+00   4.1976184e+00   4.4113490e+00   3.9153544e+00   3.4641016e-01   1.0440307e+00   9.7467943e-01   7.0710678e-01   1.0440307e+00   1.3784049e+00   7.1414284e-01   6.9282032e-01   1.9519221e+00   1.2247449e+00   8.1240384e-01   5.9160798e-01   1.1916375e+00   3.4641016e-01   1.0908712e+00   4.2426407e-01   8.3666003e-01   3.9974992e+00   3.6345564e+00   4.1725292e+00   3.3196385e+00   3.8665230e+00   3.5185224e+00   3.7868192e+00   2.6514147e+00   3.8013156e+00   3.0675723e+00   3.0430248e+00   3.3090784e+00   3.3630343e+00   3.7656341e+00   2.7294688e+00   3.6537652e+00   3.5114100e+00   3.1448370e+00   3.9458839e+00   3.0789609e+00   3.8832976e+00   3.1921779e+00   4.1581246e+00   3.7349699e+00   3.4871192e+00   3.6428011e+00   4.1024383e+00   4.2743421e+00   3.6110940e+00   2.7037012e+00   3.0446675e+00   2.9376862e+00   3.0479501e+00   4.2201896e+00   3.4942810e+00   3.5185224e+00   3.9306488e+00   3.7815341e+00   3.0935417e+00   3.2155870e+00   3.4583233e+00   3.6496575e+00   3.1733263e+00   2.7073973e+00   3.2939338e+00   3.1559468e+00   3.2280025e+00   3.4234486e+00   2.4124676e+00   3.1843367e+00   5.2782573e+00   4.3034870e+00   5.3084838e+00   4.7275787e+00   5.0793700e+00   6.0811183e+00   3.7696154e+00   5.6373753e+00   5.1176166e+00   5.5830099e+00   4.3669211e+00   4.5912961e+00   4.8754487e+00   4.3208795e+00   4.5055521e+00   4.6400431e+00   4.6679760e+00   6.1473572e+00   6.5192024e+00   4.2965102e+00   5.1166395e+00   4.1255303e+00   6.2120850e+00   4.1976184e+00   4.9527770e+00   5.2867760e+00   4.0583248e+00   4.0583248e+00   4.8928519e+00   5.0941143e+00   5.5614746e+00   5.9160798e+00   4.9345719e+00   4.2213742e+00   4.6432747e+00   5.7844619e+00   4.8785244e+00   4.6184413e+00   3.9534795e+00   4.8062459e+00   5.0348784e+00   4.6572524e+00   4.3034870e+00   5.2507142e+00   5.1273775e+00   4.6893496e+00   4.3886217e+00   4.4944410e+00   4.6411206e+00   4.1892720e+00   1.2609520e+00   1.1357817e+00   7.0710678e-01   1.2609520e+00   1.6309506e+00   9.0000000e-01   8.7177979e-01   2.1517435e+00   1.4899664e+00   9.6953597e-01   7.8102497e-01   1.3928388e+00   6.0000000e-01   1.3453624e+00   5.4772256e-01   1.0295630e+00   3.9471509e+00   3.6207734e+00   4.1364236e+00   3.4029399e+00   3.8587563e+00   3.5805028e+00   3.7815341e+00   2.8017851e+00   3.7881394e+00   3.1670175e+00   3.1843367e+00   3.3361655e+00   3.4132096e+00   3.7920970e+00   2.7838822e+00   3.6180105e+00   3.5707142e+00   3.2046841e+00   3.9736633e+00   3.1559468e+00   3.9089641e+00   3.2078030e+00   4.1797129e+00   3.7696154e+00   3.4813790e+00   3.6180105e+00   4.0804412e+00   4.2532341e+00   3.6386811e+00   2.7658633e+00   3.1320920e+00   3.0282008e+00   3.0967725e+00   4.2602817e+00   3.5707142e+00   3.5298725e+00   3.9025633e+00   3.8026307e+00   3.1543621e+00   3.2954514e+00   3.5440090e+00   3.6715120e+00   3.2264532e+00   2.8478062e+00   3.3630343e+00   3.2124757e+00   3.2832910e+00   3.4351128e+00   2.5337719e+00   3.2403703e+00   5.2820451e+00   4.3497126e+00   5.2782573e+00   4.7465777e+00   5.0793700e+00   6.0415230e+00   3.8871583e+00   5.6124861e+00   5.1234754e+00   5.5344376e+00   4.3508620e+00   4.6000000e+00   4.8528342e+00   4.3737855e+00   4.5365185e+00   4.6292548e+00   4.6701178e+00   6.0901560e+00   6.4853681e+00   4.3474130e+00   5.0852729e+00   4.1785165e+00   6.1749494e+00   4.2071368e+00   4.9345719e+00   5.2545219e+00   4.0706265e+00   4.0755368e+00   4.9010203e+00   5.0645829e+00   5.5272054e+00   5.8446557e+00   4.9406477e+00   4.2402830e+00   4.6904158e+00   5.7253821e+00   4.8744230e+00   4.6249324e+00   3.9761791e+00   4.7728398e+00   5.0129831e+00   4.6119410e+00   4.3497126e+00   5.2297227e+00   5.1019604e+00   4.6615448e+00   4.4022721e+00   4.4855323e+00   4.6411206e+00   4.2249260e+00   3.4641016e-01   7.5498344e-01   0.0000000e+00   5.5677644e-01   3.7416574e-01   5.0000000e-01   9.3808315e-01   5.5677644e-01   6.5574385e-01   8.8317609e-01   2.6457513e-01   7.4161985e-01   3.4641016e-01   7.2801099e-01   2.6457513e-01   4.0435133e+00   3.6359318e+00   4.1856899e+00   2.9478806e+00   3.7709415e+00   3.3421550e+00   3.8065733e+00   2.1307276e+00   3.7389838e+00   2.7748874e+00   2.4556058e+00   3.2031235e+00   3.0133038e+00   3.6619667e+00   2.5258662e+00   3.6523965e+00   3.3852622e+00   2.9223278e+00   3.6687873e+00   2.7586228e+00   3.8457769e+00   3.0364453e+00   3.9799497e+00   3.6027767e+00   3.4014703e+00   3.6055513e+00   4.0348482e+00   4.2497059e+00   3.4942810e+00   2.3874673e+00   2.6720778e+00   2.5495098e+00   2.8178006e+00   4.0718546e+00   3.3496268e+00   3.5425979e+00   3.9293765e+00   3.5284558e+00   2.9495762e+00   2.9000000e+00   3.1984371e+00   3.5707142e+00   2.9189039e+00   2.1679483e+00   3.0626786e+00   3.0248967e+00   3.0675723e+00   3.3181320e+00   1.9104973e+00   2.9883106e+00   5.2924474e+00   4.1436699e+00   5.3113087e+00   4.6583259e+00   5.0467812e+00   6.1081912e+00   3.4525353e+00   5.6329388e+00   4.9979996e+00   5.6973678e+00   4.3749286e+00   4.4821870e+00   4.8600412e+00   4.1060930e+00   4.3760713e+00   4.6411206e+00   4.6324939e+00   6.3071388e+00   6.4876806e+00   4.0286474e+00   5.1468437e+00   3.9686270e+00   6.2112801e+00   4.0706265e+00   4.9919936e+00   5.3329167e+00   3.9446166e+00   3.9874804e+00   4.8114447e+00   5.1029403e+00   5.5443665e+00   6.0917978e+00   4.8538644e+00   4.1194660e+00   4.4933284e+00   5.8180753e+00   4.9142650e+00   4.5978256e+00   3.8729833e+00   4.8176758e+00   5.0338852e+00   4.6690470e+00   4.1436699e+00   5.2744668e+00   5.1652686e+00   4.6669048e+00   4.2201896e+00   4.4575778e+00   4.6722586e+00   4.1060930e+00   5.9160798e-01   3.4641016e-01   6.4031242e-01   3.7416574e-01   3.3166248e-01   1.0392305e+00   6.0827625e-01   6.4031242e-01   9.4868330e-01   3.6055513e-01   7.2801099e-01   4.4721360e-01   6.5574385e-01   2.2360680e-01   4.2059482e+00   3.8131352e+00   4.3588989e+00   3.1796226e+00   3.9572718e+00   3.5707142e+00   3.9887341e+00   2.3874673e+00   3.9268308e+00   3.0033315e+00   2.7147744e+00   3.3970576e+00   3.2372828e+00   3.8716921e+00   2.7239677e+00   3.8183766e+00   3.6027767e+00   3.1527766e+00   3.8755645e+00   2.9916551e+00   4.0410395e+00   3.2280025e+00   4.1904654e+00   3.8236109e+00   3.5874782e+00   3.7788887e+00   4.2190046e+00   4.4294469e+00   3.6972963e+00   2.6038433e+00   2.9086079e+00   2.7892651e+00   3.0298515e+00   4.2918527e+00   3.5749126e+00   3.7269290e+00   4.1036569e+00   3.7349699e+00   3.1654384e+00   3.1288976e+00   3.4423829e+00   3.7749172e+00   3.1368774e+00   2.4207437e+00   3.2893768e+00   3.2449961e+00   3.2848135e+00   3.5142567e+00   2.1330729e+00   3.2046841e+00   5.4799635e+00   4.3577517e+00   5.4909016e+00   4.8682646e+00   5.2392748e+00   6.2904690e+00   3.6932371e+00   5.8266629e+00   5.2057660e+00   5.8566202e+00   4.5497253e+00   4.6808119e+00   5.0378567e+00   4.3197222e+00   4.5661800e+00   4.8145612e+00   4.8311489e+00   6.4730209e+00   6.6745786e+00   4.2579338e+00   5.3169540e+00   4.1773197e+00   6.3984373e+00   4.2649736e+00   5.1730069e+00   5.5172457e+00   4.1376322e+00   4.1833001e+00   5.0089919e+00   5.2915026e+00   5.7288742e+00   6.2489999e+00   5.0477718e+00   4.3301270e+00   4.7296934e+00   5.9791304e+00   5.0921508e+00   4.7979162e+00   4.0693980e+00   4.9869831e+00   5.2057660e+00   4.8207883e+00   4.3577517e+00   5.4534393e+00   5.3329167e+00   4.8311489e+00   4.4170126e+00   4.6400431e+00   4.8507731e+00   4.3150898e+00   7.5498344e-01   1.2083046e+00   4.5825757e-01   5.0990195e-01   1.5652476e+00   1.1401754e+00   7.0710678e-01   8.0622577e-01   8.7177979e-01   5.8309519e-01   9.5393920e-01   3.4641016e-01   5.4772256e-01   3.9166312e+00   3.5818989e+00   4.0951190e+00   3.1527766e+00   3.7509999e+00   3.4612137e+00   3.7682887e+00   2.4919872e+00   3.6972963e+00   2.9883106e+00   2.8248894e+00   3.2419130e+00   3.1416556e+00   3.7040518e+00   2.6210685e+00   3.5566838e+00   3.4914181e+00   3.0347982e+00   3.7563280e+00   2.9291637e+00   3.8807216e+00   3.0577770e+00   4.0360872e+00   3.6619667e+00   3.3734256e+00   3.5369478e+00   3.9837169e+00   4.1988094e+00   3.5411862e+00   2.5159491e+00   2.8757608e+00   2.7586228e+00   2.9137605e+00   4.1581246e+00   3.4914181e+00   3.5298725e+00   3.8535698e+00   3.5916570e+00   3.0512293e+00   3.0822070e+00   3.3793490e+00   3.5972211e+00   3.0315013e+00   2.5159491e+00   3.2046841e+00   3.1144823e+00   3.1654384e+00   3.3256578e+00   2.2045408e+00   3.0951575e+00   5.2971691e+00   4.2497059e+00   5.2516664e+00   4.6957428e+00   5.0497525e+00   6.0299254e+00   3.7215588e+00   5.5821143e+00   5.0249378e+00   5.5883808e+00   4.3324358e+00   4.5099889e+00   4.8155997e+00   4.2391037e+00   4.4564560e+00   4.6162756e+00   4.6314145e+00   6.1717096e+00   6.4358372e+00   4.1617304e+00   5.0813384e+00   4.0865633e+00   6.1424751e+00   4.0987803e+00   4.9446941e+00   5.2564246e+00   3.9736633e+00   4.0162171e+00   4.8373546e+00   5.0348784e+00   5.4799635e+00   5.9245253e+00   4.8774994e+00   4.1545156e+00   4.5934736e+00   5.7043843e+00   4.8969378e+00   4.6010868e+00   3.9127995e+00   4.7476310e+00   4.9929951e+00   4.5793013e+00   4.2497059e+00   5.2297227e+00   5.1117512e+00   4.6162756e+00   4.2684892e+00   4.4384682e+00   4.6604721e+00   4.1725292e+00   5.5677644e-01   3.7416574e-01   5.0000000e-01   9.3808315e-01   5.5677644e-01   6.5574385e-01   8.8317609e-01   2.6457513e-01   7.4161985e-01   3.4641016e-01   7.2801099e-01   2.6457513e-01   4.0435133e+00   3.6359318e+00   4.1856899e+00   2.9478806e+00   3.7709415e+00   3.3421550e+00   3.8065733e+00   2.1307276e+00   3.7389838e+00   2.7748874e+00   2.4556058e+00   3.2031235e+00   3.0133038e+00   3.6619667e+00   2.5258662e+00   3.6523965e+00   3.3852622e+00   2.9223278e+00   3.6687873e+00   2.7586228e+00   3.8457769e+00   3.0364453e+00   3.9799497e+00   3.6027767e+00   3.4014703e+00   3.6055513e+00   4.0348482e+00   4.2497059e+00   3.4942810e+00   2.3874673e+00   2.6720778e+00   2.5495098e+00   2.8178006e+00   4.0718546e+00   3.3496268e+00   3.5425979e+00   3.9293765e+00   3.5284558e+00   2.9495762e+00   2.9000000e+00   3.1984371e+00   3.5707142e+00   2.9189039e+00   2.1679483e+00   3.0626786e+00   3.0248967e+00   3.0675723e+00   3.3181320e+00   1.9104973e+00   2.9883106e+00   5.2924474e+00   4.1436699e+00   5.3113087e+00   4.6583259e+00   5.0467812e+00   6.1081912e+00   3.4525353e+00   5.6329388e+00   4.9979996e+00   5.6973678e+00   4.3749286e+00   4.4821870e+00   4.8600412e+00   4.1060930e+00   4.3760713e+00   4.6411206e+00   4.6324939e+00   6.3071388e+00   6.4876806e+00   4.0286474e+00   5.1468437e+00   3.9686270e+00   6.2112801e+00   4.0706265e+00   4.9919936e+00   5.3329167e+00   3.9446166e+00   3.9874804e+00   4.8114447e+00   5.1029403e+00   5.5443665e+00   6.0917978e+00   4.8538644e+00   4.1194660e+00   4.4933284e+00   5.8180753e+00   4.9142650e+00   4.5978256e+00   3.8729833e+00   4.8176758e+00   5.0338852e+00   4.6690470e+00   4.1436699e+00   5.2744668e+00   5.1652686e+00   4.6669048e+00   4.2201896e+00   4.4575778e+00   4.6722586e+00   4.1060930e+00   8.3066239e-01   7.8740079e-01   7.1414284e-01   2.0000000e-01   9.2736185e-01   1.2369317e+00   4.2426407e-01   1.1045361e+00   3.0000000e-01   1.1575837e+00   6.7823300e-01   4.4497191e+00   3.9962482e+00   4.5727453e+00   3.1937439e+00   4.1267421e+00   3.6304270e+00   4.1496988e+00   2.2912878e+00   4.1170378e+00   2.9883106e+00   2.6153394e+00   3.5142567e+00   3.3361655e+00   3.9874804e+00   2.8195744e+00   4.0435133e+00   3.6565011e+00   3.2449961e+00   3.9761791e+00   3.0430248e+00   4.1352146e+00   3.3808283e+00   4.3023250e+00   3.9357337e+00   3.7709415e+00   3.9862263e+00   4.4147480e+00   4.6076024e+00   3.8078866e+00   2.7073973e+00   2.9376862e+00   2.8231188e+00   3.1320920e+00   4.3646306e+00   3.5958309e+00   3.8626416e+00   4.3069711e+00   3.8626416e+00   3.2388269e+00   3.1559468e+00   3.4612137e+00   3.9012818e+00   3.2264532e+00   2.3430749e+00   3.3391616e+00   3.3316662e+00   3.3645208e+00   3.6687873e+00   2.1071308e+00   3.2832910e+00   5.5749439e+00   4.4022721e+00   5.6621551e+00   4.9668904e+00   5.3535035e+00   6.4761099e+00   3.6041643e+00   5.9983331e+00   5.3244718e+00   6.0440053e+00   4.7042534e+00   4.7937459e+00   5.1971146e+00   4.3439613e+00   4.6130250e+00   4.9446941e+00   4.9608467e+00   6.6850580e+00   6.8425142e+00   4.3104524e+00   5.4827001e+00   4.2047592e+00   6.5825527e+00   4.3840620e+00   5.3244718e+00   5.7035077e+00   4.2532341e+00   4.2906876e+00   5.1127292e+00   5.4817880e+00   5.9135438e+00   6.4915329e+00   5.1507281e+00   4.4474712e+00   4.7937459e+00   6.1919302e+00   5.2057660e+00   4.9203658e+00   4.1677332e+00   5.1652686e+00   5.3507009e+00   5.0109879e+00   4.4022721e+00   5.6008928e+00   5.4799635e+00   4.9909919e+00   4.5210618e+00   4.7812132e+00   4.9618545e+00   4.3874822e+00   2.6457513e-01   1.2727922e+00   7.5498344e-01   4.3588989e-01   6.0000000e-01   5.1961524e-01   4.1231056e-01   5.4772256e-01   3.6055513e-01   1.7320508e-01   3.9153544e+00   3.5242020e+00   4.0718546e+00   2.9715316e+00   3.6905284e+00   3.3060551e+00   3.6945906e+00   2.2181073e+00   3.6496575e+00   2.7748874e+00   2.5709920e+00   3.1272992e+00   3.0232433e+00   3.5958309e+00   2.4738634e+00   3.5355339e+00   3.3316662e+00   2.8948230e+00   3.6523965e+00   2.7622455e+00   3.7589892e+00   2.9698485e+00   3.9370039e+00   3.5496479e+00   3.3151169e+00   3.5014283e+00   3.9471509e+00   4.1496988e+00   3.4278273e+00   2.3748684e+00   2.6944387e+00   2.5768197e+00   2.7820855e+00   4.0274061e+00   3.3075671e+00   3.4307434e+00   3.8183766e+00   3.5028560e+00   2.8948230e+00   2.9034462e+00   3.1953091e+00   3.4942810e+00   2.8948230e+00   2.2583180e+00   3.0397368e+00   2.9681644e+00   3.0182777e+00   3.2419130e+00   1.9672316e+00   2.9478806e+00   5.1951901e+00   4.1024383e+00   5.2086467e+00   4.5891176e+00   4.9608467e+00   6.0024995e+00   3.4785054e+00   5.5398556e+00   4.9416596e+00   5.5587768e+00   4.2661458e+00   4.4170126e+00   4.7602521e+00   4.0816663e+00   4.3185646e+00   4.5365185e+00   4.5475268e+00   6.1611687e+00   6.4007812e+00   4.0236799e+00   5.0328918e+00   3.9255573e+00   6.1155539e+00   4.0062451e+00   4.8805737e+00   5.2211110e+00   3.8755645e+00   3.9089641e+00   4.7402532e+00   5.0019996e+00   5.4497706e+00   5.9371710e+00   4.7812132e+00   4.0558600e+00   4.4598206e+00   5.7000000e+00   4.8052055e+00   4.5099889e+00   3.7973675e+00   4.7063787e+00   4.9295030e+00   4.5497253e+00   4.1024383e+00   5.1672043e+00   5.0497525e+00   4.5628938e+00   4.1701319e+00   4.3646306e+00   4.5639895e+00   4.0398020e+00   1.3000000e+00   6.7823300e-01   4.2426407e-01   6.8556546e-01   5.4772256e-01   4.4721360e-01   5.1961524e-01   4.2426407e-01   2.4494897e-01   4.1060930e+00   3.7054015e+00   4.2626283e+00   3.1591138e+00   3.8820098e+00   3.4957117e+00   3.8704005e+00   2.3895606e+00   3.8483763e+00   2.9410882e+00   2.7531800e+00   3.3030289e+00   3.2357379e+00   3.7868192e+00   2.6476405e+00   3.7242449e+00   3.5057096e+00   3.1000000e+00   3.8483763e+00   2.9597297e+00   3.9242834e+00   3.1606961e+00   4.1340053e+00   3.7509999e+00   3.5099858e+00   3.6918830e+00   4.1460825e+00   4.3347434e+00   3.6110940e+00   2.5748786e+00   2.8896367e+00   2.7766887e+00   2.9748950e+00   4.2154478e+00   3.4770677e+00   3.5972211e+00   4.0062451e+00   3.7067506e+00   3.0740852e+00   3.0886890e+00   3.3882149e+00   3.6823905e+00   3.0903074e+00   2.4351591e+00   3.2264532e+00   3.1559468e+00   3.2031235e+00   3.4351128e+00   2.1307276e+00   3.1336879e+00   5.3535035e+00   4.2755117e+00   5.3907328e+00   4.7738873e+00   5.1341991e+00   6.1919302e+00   3.6345564e+00   5.7358522e+00   5.1371198e+00   5.7210139e+00   4.4350874e+00   4.6000000e+00   4.9365980e+00   4.2508823e+00   4.4698993e+00   4.6957428e+00   4.7318073e+00   6.3364028e+00   6.5924199e+00   4.2213742e+00   5.2019227e+00   4.0865633e+00   6.3111013e+00   4.1880783e+00   5.0527220e+00   5.4101756e+00   4.0533936e+00   4.0828911e+00   4.9173163e+00   5.1990384e+00   5.6435804e+00   6.1155539e+00   4.9547957e+00   4.2497059e+00   4.6604721e+00   5.8804762e+00   4.9598387e+00   4.6914816e+00   3.9686270e+00   4.8805737e+00   5.0941143e+00   4.7127487e+00   4.2755117e+00   5.3376025e+00   5.2086467e+00   4.7275787e+00   4.3520110e+00   4.5387223e+00   4.7180504e+00   4.2130749e+00   9.1104336e-01   1.3674794e+00   1.7262677e+00   7.6811457e-01   1.6462078e+00   9.1651514e-01   1.6278821e+00   1.1269428e+00   4.4530888e+00   4.0124805e+00   4.5607017e+00   3.0479501e+00   4.0718546e+00   3.5958309e+00   4.1821047e+00   2.1587033e+00   4.0816663e+00   2.9359837e+00   2.3811762e+00   3.5071356e+00   3.1685959e+00   3.9610605e+00   2.8035692e+00   4.0373258e+00   3.6578682e+00   3.1906112e+00   3.8183766e+00   2.9410882e+00   4.1557190e+00   3.3316662e+00   4.2047592e+00   3.8961519e+00   3.7376463e+00   3.9648455e+00   4.3588989e+00   4.5803930e+00   3.7802116e+00   2.6191602e+00   2.8106939e+00   2.6944387e+00   3.0692019e+00   4.3058100e+00   3.6027767e+00   3.9230090e+00   4.2988371e+00   3.7215588e+00   3.2465366e+00   3.0545049e+00   3.3926391e+00   3.8923001e+00   3.1432467e+00   2.1771541e+00   3.2832910e+00   3.3391616e+00   3.3481338e+00   3.6400549e+00   1.9824228e+00   3.2449961e+00   5.5830099e+00   4.3416587e+00   5.6258333e+00   4.9335586e+00   5.3244718e+00   6.4366140e+00   3.5213634e+00   5.9539903e+00   5.2325902e+00   6.0712437e+00   4.7053161e+00   4.7254629e+00   5.1633323e+00   4.2497059e+00   4.5596052e+00   4.9416596e+00   4.9376108e+00   6.7275553e+00   6.7594378e+00   4.1701319e+00   5.4708317e+00   4.1605288e+00   6.5222695e+00   4.3139309e+00   5.3329167e+00   5.6956123e+00   4.2000000e+00   4.2731721e+00   5.0586559e+00   5.4516053e+00   5.8532043e+00   6.5352888e+00   5.0950957e+00   4.4011362e+00   4.7275787e+00   6.1457302e+00   5.2297227e+00   4.9132474e+00   4.1521079e+00   5.1429563e+00   5.3272882e+00   4.9839743e+00   4.3416587e+00   5.5910643e+00   5.4808758e+00   4.9537864e+00   4.4192760e+00   4.7528939e+00   4.9909919e+00   4.3749286e+00   8.3666003e-01   1.1180340e+00   4.6904158e-01   9.6953597e-01   2.2360680e-01   1.0488088e+00   6.1644140e-01   4.4452222e+00   3.9912404e+00   4.5727453e+00   3.2434549e+00   4.1412558e+00   3.6469165e+00   4.1400483e+00   2.3515952e+00   4.1267421e+00   3.0149627e+00   2.6981475e+00   3.5199432e+00   3.3896903e+00   3.9974992e+00   2.8337255e+00   4.0435133e+00   3.6619667e+00   3.2695565e+00   4.0211939e+00   3.0822070e+00   4.1303753e+00   3.3985291e+00   4.3301270e+00   3.9509493e+00   3.7815341e+00   3.9912404e+00   4.4283180e+00   4.6119410e+00   3.8183766e+00   2.7440845e+00   2.9849623e+00   2.8722813e+00   3.1575307e+00   4.3829214e+00   3.6013886e+00   3.8470768e+00   4.3069711e+00   3.9038443e+00   3.2449961e+00   3.1937439e+00   3.4899857e+00   3.9064050e+00   3.2572995e+00   2.4103942e+00   3.3630343e+00   3.3376639e+00   3.3763886e+00   3.6796739e+00   2.1633308e+00   3.3015148e+00   5.5677644e+00   4.4204072e+00   5.6656862e+00   4.9749372e+00   5.3572381e+00   6.4791975e+00   3.6373067e+00   6.0049979e+00   5.3469618e+00   6.0274373e+00   4.7000000e+00   4.8104054e+00   5.2009614e+00   4.3714986e+00   4.6260134e+00   4.9406477e+00   4.9648766e+00   6.6640828e+00   6.8571131e+00   4.3520110e+00   5.4790510e+00   4.2190046e+00   6.5916614e+00   4.4022721e+00   5.3169540e+00   5.7000000e+00   4.2673177e+00   4.2953463e+00   5.1244512e+00   5.4854353e+00   5.9236813e+00   6.4699304e+00   5.1623638e+00   4.4609416e+00   4.8145612e+00   6.1951594e+00   5.1942276e+00   4.9203658e+00   4.1725292e+00   5.1652686e+00   5.3507009e+00   5.0109879e+00   4.4204072e+00   5.5973208e+00   5.4726593e+00   4.9949975e+00   4.5475268e+00   4.7853944e+00   4.9497475e+00   4.3920383e+00   4.7958315e-01   6.4807407e-01   5.0990195e-01   6.7082039e-01   5.4772256e-01   4.8989795e-01   3.7868192e+00   3.3570821e+00   3.9331921e+00   2.8178006e+00   3.5425979e+00   3.1432467e+00   3.5128336e+00   2.0663978e+00   3.5227830e+00   2.5709920e+00   2.4535688e+00   2.9376862e+00   2.9342802e+00   3.4380227e+00   2.2825424e+00   3.3955854e+00   3.1352831e+00   2.7730849e+00   3.5142567e+00   2.6267851e+00   3.5468296e+00   2.8195744e+00   3.7934153e+00   3.4161382e+00   3.1780497e+00   3.3600595e+00   3.8223030e+00   3.9887341e+00   3.2526912e+00   2.2516660e+00   2.5592968e+00   2.4556058e+00   2.6324893e+00   3.8587563e+00   3.1032241e+00   3.2280025e+00   3.6701499e+00   3.3852622e+00   2.7110883e+00   2.7386128e+00   3.0430248e+00   3.3316662e+00   2.7513633e+00   2.1189620e+00   2.8722813e+00   2.8035692e+00   2.8460499e+00   3.0951575e+00   1.7944358e+00   2.7784888e+00   4.9699095e+00   3.9012818e+00   5.0398413e+00   4.4147480e+00   4.7644517e+00   5.8532043e+00   3.2603681e+00   5.4018515e+00   4.7927028e+00   5.3581713e+00   4.0681691e+00   4.2402830e+00   4.5771170e+00   3.8742741e+00   4.0767634e+00   4.3162484e+00   4.3760713e+00   5.9958319e+00   6.2513998e+00   3.8807216e+00   4.8373546e+00   3.7013511e+00   5.9791304e+00   3.8288379e+00   4.6893496e+00   5.0724747e+00   3.6891733e+00   3.7134889e+00   4.5497253e+00   4.8713448e+00   5.3094256e+00   5.7879185e+00   4.5836667e+00   3.9038443e+00   4.3197222e+00   5.5389530e+00   4.5760245e+00   4.3324358e+00   3.5958309e+00   4.5232732e+00   4.7212287e+00   4.3485630e+00   3.9012818e+00   4.9709154e+00   4.8321838e+00   4.3577517e+00   3.9924930e+00   4.1737274e+00   4.3335897e+00   3.8405729e+00   9.9498744e-01   3.6055513e-01   9.4868330e-01   5.0000000e-01   7.4161985e-01   3.5791060e+00   3.1654384e+00   3.7336309e+00   2.7622455e+00   3.3852622e+00   2.9883106e+00   3.3120990e+00   2.0784610e+00   3.3406586e+00   2.4939928e+00   2.4839485e+00   2.7892651e+00   2.8530685e+00   3.2634338e+00   2.1817424e+00   3.2093613e+00   2.9765752e+00   2.6267851e+00   3.4263683e+00   2.5357445e+00   3.3719431e+00   2.6870058e+00   3.6523965e+00   3.2372828e+00   3.0116441e+00   3.1843367e+00   3.6469165e+00   3.8078866e+00   3.0967725e+00   2.1725561e+00   2.4939928e+00   2.3916521e+00   2.5179357e+00   3.7013511e+00   2.9495762e+00   3.0282008e+00   3.4785054e+00   3.2787193e+00   2.5573424e+00   2.6589472e+00   2.9137605e+00   3.1511903e+00   2.6419690e+00   2.1400935e+00   2.7495454e+00   2.6324893e+00   2.6962938e+00   2.9308702e+00   1.8411953e+00   2.6476405e+00   4.7864392e+00   3.7669616e+00   4.8507731e+00   4.2308392e+00   4.5880279e+00   5.6453521e+00   3.1906112e+00   5.1932649e+00   4.6281746e+00   5.1478151e+00   3.8884444e+00   4.0877867e+00   4.4022721e+00   3.7709415e+00   3.9661064e+00   4.1496988e+00   4.1856899e+00   5.7480431e+00   6.0671245e+00   3.7669616e+00   4.6529560e+00   3.5791060e+00   5.7758116e+00   3.6891733e+00   4.4877611e+00   4.8518038e+00   3.5468296e+00   3.5496479e+00   4.3897608e+00   4.6583259e+00   5.1166395e+00   5.5362442e+00   4.4294469e+00   3.7269290e+00   4.1388404e+00   5.3525695e+00   4.3920383e+00   4.1352146e+00   3.4380227e+00   4.3439613e+00   4.5541190e+00   4.1928511e+00   3.7669616e+00   4.7812132e+00   4.6540305e+00   4.2071368e+00   3.8716921e+00   4.0062451e+00   4.1509035e+00   3.6715120e+00   8.8317609e-01   3.0000000e-01   8.7177979e-01   3.7416574e-01   4.1206796e+00   3.6945906e+00   4.2555846e+00   2.9563491e+00   3.8223030e+00   3.3852622e+00   3.8626416e+00   2.1142375e+00   3.8065733e+00   2.7766887e+00   2.4372115e+00   3.2388269e+00   3.0545049e+00   3.7148351e+00   2.5475478e+00   3.7188708e+00   3.4190642e+00   2.9782545e+00   3.6945906e+00   2.7892651e+00   3.8807216e+00   3.0805844e+00   4.0236799e+00   3.6646964e+00   3.4612137e+00   3.6674242e+00   4.1000000e+00   4.3046487e+00   3.5355339e+00   2.4228083e+00   2.6925824e+00   2.5748786e+00   2.8548205e+00   4.1121770e+00   3.3778692e+00   3.5916570e+00   3.9937451e+00   3.5693137e+00   2.9883106e+00   2.9154759e+00   3.2341923e+00   3.6249138e+00   2.9546573e+00   2.1517435e+00   3.0935417e+00   3.0757113e+00   3.1080541e+00   3.3734256e+00   1.8814888e+00   3.0232433e+00   5.3235327e+00   4.1641326e+00   5.3646994e+00   4.7063787e+00   5.0852729e+00   6.1741396e+00   3.4394767e+00   5.7026310e+00   5.0467812e+00   5.7489129e+00   4.4170126e+00   4.5188494e+00   4.9040799e+00   4.1121770e+00   4.3749286e+00   4.6701178e+00   4.6850827e+00   6.3835727e+00   6.5436993e+00   4.0595566e+00   5.1903757e+00   3.9774364e+00   6.2793312e+00   4.1036569e+00   5.0428167e+00   5.4046276e+00   3.9761791e+00   4.0236799e+00   4.8456166e+00   5.1778374e+00   5.6080300e+00   6.1757591e+00   4.8836462e+00   4.1737274e+00   4.5497253e+00   5.8736701e+00   4.9457052e+00   4.6508064e+00   3.9051248e+00   4.8641546e+00   5.0665570e+00   4.7021272e+00   4.1641326e+00   5.3188345e+00   5.1990384e+00   4.6957428e+00   4.2449971e+00   4.4966654e+00   4.7031904e+00   4.1412558e+00   8.0622577e-01   2.4494897e-01   5.4772256e-01   3.8755645e+00   3.4856850e+00   4.0385641e+00   3.0626786e+00   3.6945906e+00   3.3136083e+00   3.6414283e+00   2.3515952e+00   3.6428011e+00   2.8195744e+00   2.7386128e+00   3.1192948e+00   3.1256999e+00   3.5860842e+00   2.5039968e+00   3.5114100e+00   3.3151169e+00   2.9308702e+00   3.7242449e+00   2.8354894e+00   3.7148351e+00   2.9949958e+00   3.9635842e+00   3.5510562e+00   3.3166248e+00   3.4885527e+00   3.9458839e+00   4.1243181e+00   3.4234486e+00   2.4596748e+00   2.7874720e+00   2.6776856e+00   2.8266588e+00   4.0286474e+00   3.2908965e+00   3.3674916e+00   3.7881394e+00   3.5693137e+00   2.8896367e+00   2.9698485e+00   3.2310989e+00   3.4756294e+00   2.9478806e+00   2.4062419e+00   3.0708305e+00   2.9597297e+00   3.0232433e+00   3.2434549e+00   2.1118712e+00   2.9698485e+00   5.1322510e+00   4.1036569e+00   5.1710734e+00   4.5617979e+00   4.9234135e+00   5.9581876e+00   3.5199432e+00   5.5045436e+00   4.9446941e+00   5.4763126e+00   4.2201896e+00   4.4136153e+00   4.7275787e+00   4.1048752e+00   4.3104524e+00   4.4888751e+00   4.5133136e+00   6.0638272e+00   6.3796552e+00   4.0767634e+00   4.9819675e+00   3.9217343e+00   6.0835845e+00   4.0124805e+00   4.8197510e+00   5.1662365e+00   3.8742741e+00   3.8845849e+00   4.7222876e+00   4.9648766e+00   5.4249424e+00   5.8412327e+00   4.7634021e+00   4.0472213e+00   4.4586994e+00   5.6621551e+00   4.7370877e+00   4.4665423e+00   3.7749172e+00   4.6669048e+00   4.8877398e+00   4.5155288e+00   4.1036569e+00   5.1137071e+00   4.9909919e+00   4.5354162e+00   4.1928511e+00   4.3358967e+00   4.4966654e+00   4.0112342e+00   8.6602540e-01   4.1231056e-01   4.2532341e+00   3.8131352e+00   4.3863424e+00   3.0967725e+00   3.9623226e+00   3.4914181e+00   3.9686270e+00   2.2315914e+00   3.9420807e+00   2.8809721e+00   2.5787594e+00   3.3555923e+00   3.2186954e+00   3.8301436e+00   2.6720778e+00   3.8548671e+00   3.5128336e+00   3.1016125e+00   3.8548671e+00   2.9240383e+00   3.9761791e+00   3.2218007e+00   4.1617304e+00   3.7815341e+00   3.5986108e+00   3.8052595e+00   4.2426407e+00   4.4339599e+00   3.6537652e+00   2.5729361e+00   2.8319605e+00   2.7166155e+00   2.9899833e+00   4.2261093e+00   3.4612137e+00   3.6837481e+00   4.1231056e+00   3.7296112e+00   3.0886890e+00   3.0446675e+00   3.3421550e+00   3.7376463e+00   3.0919250e+00   2.2847319e+00   3.2093613e+00   3.1764760e+00   3.2171416e+00   3.5028560e+00   2.0273135e+00   3.1416556e+00   5.4175640e+00   4.2743421e+00   5.4909016e+00   4.8145612e+00   5.1971146e+00   6.3000000e+00   3.5270384e+00   5.8266629e+00   5.1788030e+00   5.8566202e+00   4.5321077e+00   4.6465041e+00   5.0299105e+00   4.2308392e+00   4.4866469e+00   4.7812132e+00   4.7979162e+00   6.4853681e+00   6.6805688e+00   4.1964271e+00   5.3094256e+00   4.0804412e+00   6.4109282e+00   4.2367440e+00   5.1497573e+00   5.5208695e+00   4.1036569e+00   4.1352146e+00   4.9648766e+00   5.3028294e+00   5.7428216e+00   6.2841069e+00   5.0039984e+00   4.2930176e+00   4.6572524e+00   6.0124870e+00   5.0408333e+00   4.7560488e+00   4.0149720e+00   4.9909919e+00   5.1865210e+00   4.8373546e+00   4.2743421e+00   5.4313902e+00   5.3103672e+00   4.8270074e+00   4.3852024e+00   4.6184413e+00   4.7968740e+00   4.2402830e+00   5.0990195e-01   3.8496753e+00   3.4856850e+00   4.0211939e+00   3.0757113e+00   3.6810325e+00   3.3436507e+00   3.6551334e+00   2.3937418e+00   3.6262929e+00   2.8653098e+00   2.7604347e+00   3.1352831e+00   3.1032241e+00   3.6000000e+00   2.5199206e+00   3.4885527e+00   3.3570821e+00   2.9410882e+00   3.7080992e+00   2.8460499e+00   3.7496667e+00   2.9849623e+00   3.9610605e+00   3.5623026e+00   3.3015148e+00   3.4684290e+00   3.9230090e+00   4.1170378e+00   3.4380227e+00   2.4515301e+00   2.7982137e+00   2.6851443e+00   2.8301943e+00   4.0509258e+00   3.3451457e+00   3.3970576e+00   3.7749172e+00   3.5468296e+00   2.9240383e+00   2.9899833e+00   3.2649655e+00   3.4899857e+00   2.9512709e+00   2.4351591e+00   3.0967725e+00   2.9899833e+00   3.0495901e+00   3.2403703e+00   2.1307276e+00   2.9899833e+00   5.1672043e+00   4.1352146e+00   5.1672043e+00   4.5836667e+00   4.9416596e+00   5.9497899e+00   3.5846897e+00   5.4990908e+00   4.9446941e+00   5.4836119e+00   4.2296572e+00   4.4204072e+00   4.7275787e+00   4.1340053e+00   4.3428102e+00   4.5066617e+00   4.5265881e+00   6.0671245e+00   6.3671030e+00   4.0841156e+00   4.9859803e+00   3.9623226e+00   6.0704201e+00   4.0149720e+00   4.8342528e+00   5.1643005e+00   3.8820098e+00   3.9051248e+00   4.7370877e+00   4.9547957e+00   5.4101756e+00   5.8326666e+00   4.7780749e+00   4.0570926e+00   4.4833024e+00   5.6409219e+00   4.7686476e+00   4.4866469e+00   3.7986840e+00   4.6626173e+00   4.8959167e+00   4.5044423e+00   4.1352146e+00   5.1254268e+00   5.0049975e+00   4.5332108e+00   4.1928511e+00   4.3428102e+00   4.5299007e+00   4.0459857e+00   4.0422766e+00   3.6428011e+00   4.1940434e+00   3.0364453e+00   3.7986840e+00   3.4000000e+00   3.8131352e+00   2.2516660e+00   3.7643060e+00   2.8442925e+00   2.5961510e+00   3.2295511e+00   3.1000000e+00   3.7013511e+00   2.5632011e+00   3.6565011e+00   3.4278273e+00   2.9883106e+00   3.7349699e+00   2.8390139e+00   3.8652296e+00   3.0708305e+00   4.0336088e+00   3.6537652e+00   3.4263683e+00   3.6180105e+00   4.0607881e+00   4.2649736e+00   3.5298725e+00   2.4556058e+00   2.7622455e+00   2.6438608e+00   2.8722813e+00   4.1243181e+00   3.3985291e+00   3.5468296e+00   3.9382737e+00   3.5916570e+00   2.9916551e+00   2.9765752e+00   3.2771939e+00   3.6027767e+00   2.9816103e+00   2.2912878e+00   3.1256999e+00   3.0692019e+00   3.1144823e+00   3.3496268e+00   2.0049938e+00   3.0397368e+00   5.3047149e+00   4.1928511e+00   5.3254108e+00   4.6957428e+00   5.0695167e+00   6.1237244e+00   3.5369478e+00   5.6586217e+00   5.0447993e+00   5.6841886e+00   4.3806392e+00   4.5188494e+00   4.8733972e+00   4.1629317e+00   4.4068129e+00   4.6465041e+00   4.6593991e+00   6.2952363e+00   6.5145990e+00   4.1060930e+00   5.1497573e+00   4.0124805e+00   6.2345810e+00   4.1060930e+00   4.9989999e+00   5.3450912e+00   3.9761791e+00   4.0137264e+00   4.8435524e+00   5.1234754e+00   5.5668663e+00   6.0745370e+00   4.8836462e+00   4.1617304e+00   4.5585085e+00   5.8206529e+00   4.9173163e+00   4.6227697e+00   3.9000000e+00   4.8228622e+00   5.0408333e+00   4.6636895e+00   4.1928511e+00   5.2829916e+00   5.1643005e+00   4.6722586e+00   4.2638011e+00   4.4743715e+00   4.6754679e+00   4.1412558e+00   6.4031242e-01   2.6457513e-01   1.8867962e+00   6.5574385e-01   1.3784049e+00   7.3484692e-01   2.6776856e+00   5.1961524e-01   2.0322401e+00   2.6532998e+00   1.2288206e+00   1.6278821e+00   9.4868330e-01   1.8083141e+00   4.3588989e-01   1.4317821e+00   1.4866069e+00   1.3000000e+00   1.7832555e+00   1.1747340e+00   1.2124356e+00   1.0148892e+00   1.0049876e+00   7.8740079e-01   5.3851648e-01   4.5825757e-01   5.5677644e-01   1.0677078e+00   1.9104973e+00   1.9467922e+00   2.0124612e+00   1.5394804e+00   1.2041595e+00   1.6278821e+00   1.0583005e+00   3.3166248e-01   1.1832160e+00   1.5394804e+00   1.8000000e+00   1.6552945e+00   9.2736185e-01   1.5264338e+00   2.6324893e+00   1.5716234e+00   1.4212670e+00   1.4282857e+00   9.4868330e-01   2.6608269e+00   1.4899664e+00   1.8439089e+00   1.4491377e+00   1.4071247e+00   1.2449900e+00   1.4628739e+00   2.1213203e+00   2.2427661e+00   1.7029386e+00   1.3964240e+00   1.8357560e+00   8.7749644e-01   1.1045361e+00   1.1000000e+00   1.6217275e+00   1.6613248e+00   1.2369317e+00   1.0440307e+00   2.3430749e+00   2.5495098e+00   1.4491377e+00   1.3490738e+00   1.5874508e+00   2.2383029e+00   9.6953597e-01   1.2609520e+00   1.3747727e+00   9.8488578e-01   1.0246951e+00   1.3490738e+00   1.1532563e+00   1.5905974e+00   2.1023796e+00   1.4035669e+00   9.0553851e-01   1.4071247e+00   1.8165902e+00   1.5297059e+00   1.0816654e+00   1.1000000e+00   1.0000000e+00   1.3820275e+00   9.9498744e-01   1.4491377e+00   1.5132746e+00   1.5198684e+00   1.0908712e+00   1.1489125e+00   9.4868330e-01   1.4071247e+00   1.2529964e+00   6.4807407e-01   1.3820275e+00   4.2426407e-01   8.3066239e-01   2.6457513e-01   2.1400935e+00   4.2426407e-01   1.4352700e+00   2.1563859e+00   6.1644140e-01   1.2884099e+00   4.7958315e-01   1.2569805e+00   3.4641016e-01   8.2462113e-01   1.0099505e+00   1.0198039e+00   1.2845233e+00   6.5574385e-01   7.3484692e-01   8.1240384e-01   6.1644140e-01   4.1231056e-01   3.1622777e-01   6.4807407e-01   6.4807407e-01   5.0000000e-01   1.4491377e+00   1.4491377e+00   1.5297059e+00   1.0295630e+00   8.8317609e-01   1.0198039e+00   4.5825757e-01   3.7416574e-01   9.3273791e-01   9.3808315e-01   1.2609520e+00   1.1269428e+00   3.8729833e-01   1.0295630e+00   2.1118712e+00   1.0099505e+00   8.4261498e-01   8.4261498e-01   4.5825757e-01   2.1424285e+00   9.2195445e-01   1.8083141e+00   1.0630146e+00   1.6881943e+00   1.1832160e+00   1.4933185e+00   2.5000000e+00   1.6673332e+00   2.0566964e+00   1.5362291e+00   2.0880613e+00   7.8740079e-01   1.0246951e+00   1.2489996e+00   1.2165525e+00   1.3000000e+00   1.1313708e+00   1.0677078e+00   2.7166155e+00   2.9068884e+00   1.1874342e+00   1.5264338e+00   1.1000000e+00   2.6343880e+00   7.1414284e-01   1.3784049e+00   1.7262677e+00   6.1644140e-01   6.1644140e-01   1.3152946e+00   1.5427249e+00   1.9697716e+00   2.5436195e+00   1.3638182e+00   7.2801099e-01   1.2922848e+00   2.2203603e+00   1.4387495e+00   1.0488088e+00   6.1644140e-01   1.1958261e+00   1.4560220e+00   1.1224972e+00   1.0630146e+00   1.6613248e+00   1.5937377e+00   1.1224972e+00   9.5393920e-01   8.8881944e-01   1.2369317e+00   8.6023253e-01   1.8574176e+00   5.8309519e-01   1.3152946e+00   6.7082039e-01   2.7018512e+00   5.0990195e-01   2.0149442e+00   2.6514147e+00   1.2247449e+00   1.6370706e+00   8.5440037e-01   1.8601075e+00   5.4772256e-01   1.3638182e+00   1.5033296e+00   1.2083046e+00   1.7916473e+00   1.0535654e+00   1.2569805e+00   8.4852814e-01   9.2736185e-01   8.3066239e-01   6.0000000e-01   3.4641016e-01   3.1622777e-01   1.0049876e+00   1.9748418e+00   1.9544820e+00   2.0346990e+00   1.5684387e+00   1.0099505e+00   1.5556349e+00   1.0344080e+00   2.8284271e-01   1.1357817e+00   1.5427249e+00   1.7804494e+00   1.5968719e+00   8.6602540e-01   1.5362291e+00   2.6570661e+00   1.5427249e+00   1.4247807e+00   1.4177447e+00   9.6436508e-01   2.7147744e+00   1.4866069e+00   1.6155494e+00   1.2529964e+00   1.1874342e+00   9.8994949e-01   1.2124356e+00   1.9364917e+00   2.1354157e+00   1.5000000e+00   1.1401754e+00   1.6673332e+00   6.7823300e-01   8.5440037e-01   8.6023253e-01   1.4352700e+00   1.4662878e+00   1.0295630e+00   7.8740079e-01   2.2045408e+00   2.3515952e+00   1.2767145e+00   1.1357817e+00   1.4247807e+00   2.0542639e+00   7.8102497e-01   1.0392305e+00   1.1832160e+00   8.2462113e-01   8.6023253e-01   1.0908712e+00   9.5916630e-01   1.3928388e+00   1.9974984e+00   1.1489125e+00   7.0000000e-01   1.1789826e+00   1.6522712e+00   1.3228757e+00   8.3666003e-01   9.5916630e-01   7.8102497e-01   1.1575837e+00   8.2462113e-01   1.2529964e+00   1.2884099e+00   1.3114877e+00   8.8317609e-01   9.4339811e-01   7.1414284e-01   1.2124356e+00   1.0677078e+00   1.2845233e+00   7.3484692e-01   1.4899664e+00   9.7467943e-01   1.3892444e+00   5.1961524e-01   8.2462113e-01   8.5440037e-01   5.9160798e-01   1.1045361e+00   7.2801099e-01   1.5000000e+00   8.8881944e-01   5.9160798e-01   8.8881944e-01   3.1622777e-01   1.3638182e+00   7.8102497e-01   1.2369317e+00   1.0535654e+00   1.1224972e+00   1.3674794e+00   1.6093477e+00   1.7578396e+00   9.4868330e-01   6.8556546e-01   3.0000000e-01   4.3588989e-01   5.1961524e-01   1.3076697e+00   8.8881944e-01   1.3416408e+00   1.6155494e+00   8.9442719e-01   7.1414284e-01   2.0000000e-01   5.0990195e-01   1.1045361e+00   4.3588989e-01   9.1104336e-01   4.5825757e-01   7.6157731e-01   6.6332496e-01   9.6953597e-01   1.1135529e+00   5.4772256e-01   2.6608269e+00   1.3490738e+00   2.7018512e+00   1.9519221e+00   2.3537205e+00   3.5071356e+00   9.0000000e-01   3.0232433e+00   2.2293497e+00   3.2295511e+00   1.8734994e+00   1.7378147e+00   2.2516660e+00   1.2529964e+00   1.6613248e+00   2.0760539e+00   1.9974984e+00   3.8974351e+00   3.7868192e+00   1.1401754e+00   2.5806976e+00   1.2489996e+00   3.5874782e+00   1.3638182e+00   2.4433583e+00   2.8195744e+00   1.2767145e+00   1.3820275e+00   2.0639767e+00   2.5903668e+00   2.9376862e+00   3.7762415e+00   2.1047565e+00   1.4628739e+00   1.7378147e+00   3.2771939e+00   2.3706539e+00   1.9874607e+00   1.2767145e+00   2.2803509e+00   2.4186773e+00   2.1931712e+00   1.3490738e+00   2.6664583e+00   2.6019224e+00   2.0904545e+00   1.4282857e+00   1.8493242e+00   2.1587033e+00   1.4525839e+00   8.3066239e-01   5.5677644e-01   2.1587033e+00   2.4494897e-01   1.4832397e+00   2.0856654e+00   7.4833148e-01   1.1045361e+00   4.3588989e-01   1.3638182e+00   4.2426407e-01   9.2736185e-01   1.0000000e+00   6.7823300e-01   1.2449900e+00   8.0622577e-01   7.4833148e-01   4.6904158e-01   5.0990195e-01   3.8729833e-01   3.1622777e-01   3.7416574e-01   5.2915026e-01   5.1961524e-01   1.4628739e+00   1.4000000e+00   1.4899664e+00   1.0392305e+00   7.2111026e-01   1.1224972e+00   7.9372539e-01   3.7416574e-01   6.0827625e-01   1.0677078e+00   1.2206556e+00   1.0816654e+00   4.5825757e-01   9.8994949e-01   2.1071308e+00   1.0099505e+00   9.6436508e-01   9.2195445e-01   4.7958315e-01   2.1840330e+00   9.6436508e-01   1.8027756e+00   9.5393920e-01   1.5652476e+00   1.0677078e+00   1.4035669e+00   2.3685439e+00   1.6431677e+00   1.9052559e+00   1.2884099e+00   2.0928450e+00   8.1240384e-01   8.1853528e-01   1.1401754e+00   1.0677078e+00   1.2449900e+00   1.1401754e+00   9.6953597e-01   2.7092434e+00   2.7221315e+00   8.7749644e-01   1.4730920e+00   1.0723805e+00   2.4698178e+00   4.7958315e-01   1.3638182e+00   1.6431677e+00   4.6904158e-01   6.1644140e-01   1.1704700e+00   1.4071247e+00   1.7944358e+00   2.5396850e+00   1.2247449e+00   5.3851648e-01   1.1000000e+00   2.0904545e+00   1.4866069e+00   1.0000000e+00   6.4807407e-01   1.1180340e+00   1.3928388e+00   1.0677078e+00   9.5393920e-01   1.6062378e+00   1.5811388e+00   1.0392305e+00   6.7082039e-01   8.0622577e-01   1.3152946e+00   8.6023253e-01   8.6023253e-01   1.5264338e+00   9.1104336e-01   7.9372539e-01   1.4899664e+00   4.5825757e-01   8.8881944e-01   4.6904158e-01   9.1104336e-01   1.0535654e+00   3.0000000e-01   5.1961524e-01   8.0622577e-01   7.0710678e-01   7.3484692e-01   6.4031242e-01   8.0622577e-01   4.5825757e-01   7.3484692e-01   9.3273791e-01   1.1445523e+00   1.2041595e+00   3.7416574e-01   1.0630146e+00   8.5440037e-01   9.6436508e-01   6.2449980e-01   7.4161985e-01   4.1231056e-01   7.3484692e-01   1.0816654e+00   7.8740079e-01   4.5825757e-01   6.1644140e-01   3.1622777e-01   4.6904158e-01   5.5677644e-01   1.5066519e+00   3.3166248e-01   3.7416574e-01   3.1622777e-01   5.4772256e-01   1.6552945e+00   4.0000000e-01   2.0736441e+00   8.6023253e-01   2.1447611e+00   1.3527749e+00   1.7832555e+00   2.9495762e+00   9.4339811e-01   2.4617067e+00   1.7406895e+00   2.6248809e+00   1.2845233e+00   1.2247449e+00   1.7000000e+00   9.1104336e-01   1.2569805e+00   1.5132746e+00   1.3892444e+00   3.2634338e+00   3.2863353e+00   8.6023253e-01   2.0099751e+00   8.1240384e-01   3.0545049e+00   8.8317609e-01   1.8248288e+00   2.2158520e+00   7.6811457e-01   7.8102497e-01   1.5297059e+00   2.0174241e+00   2.4103942e+00   3.1527766e+00   1.5842980e+00   8.7177979e-01   1.1916375e+00   2.7568098e+00   1.7720045e+00   1.3527749e+00   6.8556546e-01   1.7262677e+00   1.8734994e+00   1.7000000e+00   8.6023253e-01   2.0808652e+00   2.0322401e+00   1.5905974e+00   1.0295630e+00   1.2884099e+00   1.5556349e+00   8.3066239e-01   2.2561028e+00   5.9160798e-01   1.5000000e+00   2.2759613e+00   7.1414284e-01   1.4662878e+00   4.8989795e-01   1.3964240e+00   5.7445626e-01   7.9372539e-01   1.1532563e+00   1.1269428e+00   1.4212670e+00   4.6904158e-01   9.3273791e-01   8.3066239e-01   6.7082039e-01   6.4807407e-01   5.5677644e-01   7.4161985e-01   5.9160798e-01   5.4772256e-01   1.6278821e+00   1.5842980e+00   1.6763055e+00   1.1874342e+00   7.8102497e-01   9.7467943e-01   3.7416574e-01   4.5825757e-01   1.0862780e+00   1.0148892e+00   1.3638182e+00   1.1747340e+00   4.2426407e-01   1.1789826e+00   2.2383029e+00   1.0908712e+00   9.2736185e-01   9.2736185e-01   6.4807407e-01   2.2847319e+00   1.0295630e+00   1.5811388e+00   9.2736185e-01   1.5556349e+00   1.0049876e+00   1.3038405e+00   2.3748684e+00   1.6278821e+00   1.9390719e+00   1.4317821e+00   1.9157244e+00   6.0827625e-01   9.0553851e-01   1.1090537e+00   1.1180340e+00   1.1401754e+00   9.3273791e-01   9.0000000e-01   2.5632011e+00   2.7892651e+00   1.1832160e+00   1.3638182e+00   9.6953597e-01   2.5238859e+00   6.6332496e-01   1.1874342e+00   1.5968719e+00   5.5677644e-01   4.5825757e-01   1.1489125e+00   1.4525839e+00   1.8734994e+00   2.4207437e+00   1.1958261e+00   6.4807407e-01   1.1747340e+00   2.1213203e+00   1.2083046e+00   8.5440037e-01   4.7958315e-01   1.0677078e+00   1.2845233e+00   1.0246951e+00   9.2736185e-01   1.4798649e+00   1.4035669e+00   9.9498744e-01   9.0553851e-01   7.3484692e-01   1.0000000e+00   6.7082039e-01   2.2181073e+00   8.3666003e-01   4.5825757e-01   1.5556349e+00   1.3190906e+00   1.9519221e+00   9.5916630e-01   2.2583180e+00   1.5937377e+00   1.2409674e+00   1.8493242e+00   9.3273791e-01   2.1283797e+00   1.4764823e+00   2.1863211e+00   1.8973666e+00   1.8947295e+00   2.1494185e+00   2.4859606e+00   2.6419690e+00   1.7748239e+00   8.4852814e-01   7.8740079e-01   7.2111026e-01   1.1401754e+00   2.2135944e+00   1.5165751e+00   2.0024984e+00   2.4372115e+00   1.8083141e+00   1.2569805e+00   9.7467943e-01   1.2845233e+00   1.9104973e+00   1.1747340e+00   1.4142136e-01   1.2165525e+00   1.3601471e+00   1.3379088e+00   1.7406895e+00   3.8729833e-01   1.2369317e+00   3.5085610e+00   2.2248595e+00   3.6290495e+00   2.8530685e+00   3.2572995e+00   4.4440972e+00   1.3928388e+00   3.9560081e+00   3.1843367e+00   4.1012193e+00   2.7276363e+00   2.6739484e+00   3.1654384e+00   2.1307276e+00   2.4839485e+00   2.9291637e+00   2.8982753e+00   4.7749346e+00   4.7465777e+00   2.0952327e+00   3.4770677e+00   2.0518285e+00   4.5343136e+00   2.2912878e+00   3.3196385e+00   3.7229021e+00   2.1771541e+00   2.2360680e+00   2.9849623e+00   3.5014283e+00   3.8807216e+00   4.6443514e+00   3.0232433e+00   2.3685439e+00   2.6324893e+00   4.2107007e+00   3.1953091e+00   2.8670542e+00   2.1118712e+00   3.1796226e+00   3.3136083e+00   3.0692019e+00   2.2248595e+00   3.5637059e+00   3.4727511e+00   2.9832868e+00   2.3811762e+00   2.7440845e+00   2.9647934e+00   2.2891046e+00   1.5811388e+00   2.1610183e+00   8.3666003e-01   1.1401754e+00   5.1961524e-01   1.4142136e+00   3.1622777e-01   1.0295630e+00   1.0099505e+00   8.3666003e-01   1.3000000e+00   9.3273791e-01   7.8740079e-01   6.1644140e-01   5.2915026e-01   3.6055513e-01   2.4494897e-01   3.1622777e-01   5.8309519e-01   6.4031242e-01   1.4832397e+00   1.4628739e+00   1.5362291e+00   1.0862780e+00   8.6023253e-01   1.2247449e+00   8.4261498e-01   3.1622777e-01   7.0000000e-01   1.1224972e+00   1.3152946e+00   1.1618950e+00   5.1961524e-01   1.0488088e+00   2.1679483e+00   1.0954451e+00   9.9498744e-01   9.8488578e-01   5.0000000e-01   2.2383029e+00   1.0344080e+00   1.9104973e+00   1.1357817e+00   1.6093477e+00   1.1575837e+00   1.5066519e+00   2.3769729e+00   1.7944358e+00   1.9052559e+00   1.3638182e+00   2.1307276e+00   9.1651514e-01   9.6436508e-01   1.2247449e+00   1.2727922e+00   1.4525839e+00   1.2727922e+00   1.0392305e+00   2.6907248e+00   2.7549955e+00   1.0246951e+00   1.5459625e+00   1.2609520e+00   2.4738634e+00   6.8556546e-01   1.4212670e+00   1.6309506e+00   6.7823300e-01   7.7459667e-01   1.3000000e+00   1.3784049e+00   1.8055470e+00   2.4959968e+00   1.3638182e+00   6.2449980e-01   1.1618950e+00   2.1142375e+00   1.5968719e+00   1.0677078e+00   8.1240384e-01   1.1874342e+00   1.5033296e+00   1.1747340e+00   1.1357817e+00   1.6792856e+00   1.6792856e+00   1.1747340e+00   8.7749644e-01   9.3273791e-01   1.4317821e+00   1.0000000e+00   9.2195445e-01   8.2462113e-01   1.0295630e+00   1.2206556e+00   5.4772256e-01   1.6309506e+00   7.8740079e-01   7.4833148e-01   1.2727922e+00   5.3851648e-01   1.3076697e+00   9.1651514e-01   1.5033296e+00   1.2247449e+00   1.2845233e+00   1.5165751e+00   1.8384776e+00   1.9078784e+00   1.0246951e+00   7.6157731e-01   5.2915026e-01   6.1644140e-01   6.3245553e-01   1.4560220e+00   7.0710678e-01   1.2369317e+00   1.7492856e+00   1.2767145e+00   5.4772256e-01   3.8729833e-01   6.2449980e-01   1.1789826e+00   6.4807407e-01   8.4852814e-01   5.0990195e-01   6.8556546e-01   6.2449980e-01   1.1000000e+00   9.7467943e-01   5.5677644e-01   2.6814175e+00   1.4317821e+00   2.8618176e+00   2.0736441e+00   2.4556058e+00   3.6918830e+00   7.6157731e-01   3.2202484e+00   2.4617067e+00   3.2954514e+00   1.9339080e+00   1.9104973e+00   2.3874673e+00   1.3638182e+00   1.6763055e+00   2.1118712e+00   2.1213203e+00   3.9924930e+00   4.0087405e+00   1.4525839e+00   2.6814175e+00   1.2369317e+00   3.8026307e+00   1.5394804e+00   2.5179357e+00   2.9698485e+00   1.4071247e+00   1.4352700e+00   2.1977261e+00   2.7820855e+00   3.1527766e+00   3.8871583e+00   2.2315914e+00   1.6340135e+00   1.9261360e+00   3.4626579e+00   2.3643181e+00   2.0784610e+00   1.3038405e+00   2.4062419e+00   2.5099801e+00   2.3021729e+00   1.4317821e+00   2.7604347e+00   2.6570661e+00   2.2000000e+00   1.6462078e+00   1.9570386e+00   2.1330729e+00   1.4764823e+00   1.5968719e+00   1.1357817e+00   1.9026298e+00   1.1269428e+00   2.2516660e+00   1.6155494e+00   1.2206556e+00   1.6522712e+00   8.8317609e-01   2.1400935e+00   1.4798649e+00   2.0371549e+00   1.8248288e+00   1.8708287e+00   2.1283797e+00   2.3937418e+00   2.5748786e+00   1.7492856e+00   9.2195445e-01   7.1414284e-01   6.7082039e-01   1.1532563e+00   2.1000000e+00   1.5524175e+00   2.0784610e+00   2.4062419e+00   1.6370706e+00   1.3453624e+00   9.1651514e-01   1.2083046e+00   1.8920888e+00   1.1357817e+00   3.6055513e-01   1.1958261e+00   1.4212670e+00   1.3711309e+00   1.7262677e+00   7.2111026e-01   1.2569805e+00   3.4467376e+00   2.1213203e+00   3.5185224e+00   2.7477263e+00   3.1591138e+00   4.3104524e+00   1.3228757e+00   3.8183766e+00   3.0116441e+00   4.0509258e+00   2.6925824e+00   2.5495098e+00   3.0740852e+00   1.9974984e+00   2.4083189e+00   2.8861739e+00   2.8089144e+00   4.7127487e+00   4.5716518e+00   1.8814888e+00   3.4029399e+00   1.9899749e+00   4.3783559e+00   2.1863211e+00   3.2603681e+00   3.6290495e+00   2.1000000e+00   2.1931712e+00   2.8670542e+00   3.3896903e+00   3.7376463e+00   4.5891176e+00   2.9068884e+00   2.2671568e+00   2.4779023e+00   4.0914545e+00   3.1654384e+00   2.7946377e+00   2.0808652e+00   3.1048349e+00   3.2357379e+00   3.0116441e+00   2.1213203e+00   3.4828150e+00   3.4161382e+00   2.9103264e+00   2.2360680e+00   2.6720778e+00   2.9495762e+00   2.2383029e+00   9.6953597e-01   5.5677644e-01   7.0710678e-01   8.3666003e-01   4.2426407e-01   6.0000000e-01   9.0553851e-01   7.6811457e-01   7.0000000e-01   4.0000000e-01   9.4868330e-01   6.4807407e-01   5.5677644e-01   7.3484692e-01   1.1045361e+00   1.1489125e+00   3.3166248e-01   9.6953597e-01   9.1651514e-01   1.0099505e+00   5.2915026e-01   9.5916630e-01   5.8309519e-01   5.1961524e-01   9.4868330e-01   8.5440037e-01   3.7416574e-01   7.0000000e-01   6.7082039e-01   4.5825757e-01   5.4772256e-01   1.5362291e+00   4.6904158e-01   3.6055513e-01   3.0000000e-01   3.8729833e-01   1.5779734e+00   3.6055513e-01   2.1189620e+00   1.0344080e+00   2.1656408e+00   1.4899664e+00   1.8466185e+00   3.0016662e+00   1.1747340e+00   2.5436195e+00   1.8814888e+00   2.5806976e+00   1.2083046e+00   1.3076697e+00   1.6911535e+00   1.0862780e+00   1.2922848e+00   1.4628739e+00   1.4628739e+00   3.2588341e+00   3.3660065e+00   1.1357817e+00   1.9824228e+00   9.3273791e-01   3.1272992e+00   9.1104336e-01   1.8275667e+00   2.2494444e+00   7.6157731e-01   7.8740079e-01   1.6155494e+00   2.0639767e+00   2.4617067e+00   3.1192948e+00   1.6552945e+00   1.0049876e+00   1.4730920e+00   2.7367864e+00   1.7578396e+00   1.4282857e+00   6.7823300e-01   1.6763055e+00   1.8493242e+00   1.5684387e+00   1.0344080e+00   2.0928450e+00   1.9949937e+00   1.5099669e+00   1.1000000e+00   1.2688578e+00   1.5264338e+00   9.4868330e-01   1.0723805e+00   9.4868330e-01   1.2727922e+00   1.1401754e+00   5.4772256e-01   7.3484692e-01   5.1961524e-01   1.5132746e+00   6.7823300e-01   1.1135529e+00   9.4868330e-01   9.1104336e-01   1.1489125e+00   1.3416408e+00   1.6186414e+00   9.9498744e-01   7.0710678e-01   5.8309519e-01   6.1644140e-01   5.8309519e-01   1.3490738e+00   1.2247449e+00   1.4317821e+00   1.4282857e+00   5.9160798e-01   9.4868330e-01   6.5574385e-01   7.8102497e-01   1.0816654e+00   4.8989795e-01   1.2247449e+00   7.3484692e-01   9.0000000e-01   8.4261498e-01   8.4261498e-01   1.3820275e+00   7.4161985e-01   2.7477263e+00   1.5198684e+00   2.5826343e+00   1.9442222e+00   2.3600847e+00   3.3421550e+00   1.4282857e+00   2.8478062e+00   2.1118712e+00   3.1717503e+00   1.8601075e+00   1.7058722e+00   2.1771541e+00   1.4764823e+00   1.8894444e+00   2.1307276e+00   1.9442222e+00   3.7656341e+00   3.6262929e+00   1.1180340e+00   2.5278449e+00   1.5264338e+00   3.3970576e+00   1.3379088e+00   2.4083189e+00   2.6608269e+00   1.2961481e+00   1.4491377e+00   2.0712315e+00   2.3832751e+00   2.7459060e+00   3.5958309e+00   2.1260292e+00   1.3820275e+00   1.7000000e+00   3.1032241e+00   2.4596748e+00   1.9646883e+00   1.3856406e+00   2.1886069e+00   2.4124676e+00   2.1260292e+00   1.5198684e+00   2.6343880e+00   2.6153394e+00   2.0639767e+00   1.4106736e+00   1.8248288e+00   2.2649503e+00   1.5811388e+00   1.2124356e+00   7.0000000e-01   5.5677644e-01   8.0622577e-01   7.4161985e-01   1.0677078e+00   5.4772256e-01   7.1414284e-01   5.0000000e-01   2.2360680e-01   5.0990195e-01   5.9160798e-01   7.1414284e-01   7.4161985e-01   2.4494897e-01   1.3601471e+00   1.2288206e+00   1.3304135e+00   9.0000000e-01   5.0000000e-01   7.4161985e-01   5.8309519e-01   6.4031242e-01   7.0710678e-01   7.9372539e-01   1.0099505e+00   7.6157731e-01   1.4142136e-01   8.4261498e-01   1.9209373e+00   7.4161985e-01   6.7823300e-01   6.4807407e-01   4.2426407e-01   2.0346990e+00   7.3484692e-01   1.7606817e+00   7.3484692e-01   1.7146428e+00   1.0049876e+00   1.4212670e+00   2.5219040e+00   1.3152946e+00   2.0396078e+00   1.3747727e+00   2.2068076e+00   8.7749644e-01   8.6023253e-01   1.2767145e+00   8.7749644e-01   1.1224972e+00   1.1618950e+00   9.8488578e-01   2.8301943e+00   2.8809721e+00   7.7459667e-01   1.5937377e+00   8.1240384e-01   2.6324893e+00   5.2915026e-01   1.4177447e+00   1.7748239e+00   4.3588989e-01   4.5825757e-01   1.1832160e+00   1.5716234e+00   1.9773720e+00   2.7018512e+00   1.2449900e+00   4.6904158e-01   9.4868330e-01   2.3108440e+00   1.4491377e+00   9.6436508e-01   4.3588989e-01   1.2884099e+00   1.4866069e+00   1.2845233e+00   7.3484692e-01   1.6822604e+00   1.6522712e+00   1.1958261e+00   7.3484692e-01   8.8317609e-01   1.2489996e+00   6.0827625e-01   1.3784049e+00   9.2736185e-01   6.4807407e-01   1.3038405e+00   5.3851648e-01   1.3674794e+00   6.4807407e-01   1.5427249e+00   1.2165525e+00   1.0630146e+00   1.2884099e+00   1.7029386e+00   1.8275667e+00   1.0049876e+00   4.4721360e-01   5.8309519e-01   6.0000000e-01   4.2426407e-01   1.5937377e+00   9.4868330e-01   1.1445523e+00   1.5811388e+00   1.2206556e+00   5.0990195e-01   5.7445626e-01   8.6602540e-01   1.1269428e+00   5.4772256e-01   9.4868330e-01   6.3245553e-01   6.2449980e-01   6.0827625e-01   9.2195445e-01   9.0000000e-01   5.1961524e-01   2.8017851e+00   1.6401219e+00   2.8618176e+00   2.1771541e+00   2.5436195e+00   3.6945906e+00   1.2727922e+00   3.2295511e+00   2.5416530e+00   3.2771939e+00   1.9078784e+00   1.9824228e+00   2.3874673e+00   1.6186414e+00   1.8734994e+00   2.1494185e+00   2.1633308e+00   3.9547440e+00   4.0484565e+00   1.6278821e+00   2.6814175e+00   1.4798649e+00   3.8105118e+00   1.5716234e+00   2.5337719e+00   2.9427878e+00   1.4352700e+00   1.4832397e+00   2.3000000e+00   2.7386128e+00   3.1400637e+00   3.7986840e+00   2.3366643e+00   1.6703293e+00   2.0856654e+00   3.4161382e+00   2.4392622e+00   2.1307276e+00   1.3638182e+00   2.3685439e+00   2.5416530e+00   2.2315914e+00   1.6401219e+00   2.7964263e+00   2.6870058e+00   2.1863211e+00   1.7233688e+00   1.9672316e+00   2.2022716e+00   1.6124515e+00   1.1135529e+00   1.1045361e+00   1.0392305e+00   1.3820275e+00   9.8488578e-01   7.8740079e-01   8.8317609e-01   7.6157731e-01   3.8729833e-01   1.4142136e-01   5.0990195e-01   6.7823300e-01   7.4161985e-01   1.4899664e+00   1.5427249e+00   1.6062378e+00   1.1224972e+00   1.0862780e+00   1.3114877e+00   7.9372539e-01   3.1622777e-01   9.0000000e-01   1.1489125e+00   1.4035669e+00   1.3152946e+00   6.4031242e-01   1.1224972e+00   2.2135944e+00   1.1916375e+00   1.0440307e+00   1.0440307e+00   5.5677644e-01   2.2293497e+00   1.0908712e+00   1.9924859e+00   1.3076697e+00   1.7058722e+00   1.3416408e+00   1.6278821e+00   2.4799194e+00   1.9235384e+00   2.0420578e+00   1.5748016e+00   2.1447611e+00   9.4868330e-01   1.1445523e+00   1.3114877e+00   1.4422205e+00   1.5459625e+00   1.3114877e+00   1.1916375e+00   2.7239677e+00   2.8827071e+00   1.2922848e+00   1.5968719e+00   1.3820275e+00   2.5961510e+00   8.5440037e-01   1.4899664e+00   1.7262677e+00   8.1240384e-01   8.8317609e-01   1.4525839e+00   1.5033296e+00   1.9287302e+00   2.5079872e+00   1.5033296e+00   8.6602540e-01   1.4317821e+00   2.1702534e+00   1.6401219e+00   1.2083046e+00   9.0553851e-01   1.2369317e+00   1.5620499e+00   1.1575837e+00   1.3076697e+00   1.7549929e+00   1.7146428e+00   1.2083046e+00   1.0630146e+00   1.0246951e+00   1.4662878e+00   1.1401754e+00   7.3484692e-01   1.0000000e+00   8.7749644e-01   5.5677644e-01   7.6157731e-01   9.4868330e-01   6.4807407e-01   8.5440037e-01   1.0099505e+00   1.2569805e+00   1.2247449e+00   4.1231056e-01   1.1916375e+00   1.0099505e+00   1.1224972e+00   7.6157731e-01   7.8740079e-01   2.0000000e-01   5.7445626e-01   1.1224972e+00   1.0148892e+00   4.4721360e-01   7.4161985e-01   5.1961524e-01   5.1961524e-01   7.3484692e-01   1.5937377e+00   4.6904158e-01   4.3588989e-01   3.8729833e-01   6.7082039e-01   1.7058722e+00   5.0000000e-01   1.9570386e+00   8.0622577e-01   2.1377558e+00   1.3416408e+00   1.7291616e+00   2.9614186e+00   8.8317609e-01   2.4959968e+00   1.8000000e+00   2.5455844e+00   1.2083046e+00   1.2369317e+00   1.6733201e+00   8.7177979e-01   1.1180340e+00   1.4000000e+00   1.3784049e+00   3.2218007e+00   3.3120990e+00   1.0246951e+00   1.9519221e+00   6.7082039e-01   3.0886890e+00   9.1104336e-01   1.7606817e+00   2.2226111e+00   7.6157731e-01   7.0710678e-01   1.5000000e+00   2.0639767e+00   2.4494897e+00   3.1288976e+00   1.5427249e+00   9.4339811e-01   1.2767145e+00   2.7586228e+00   1.6340135e+00   1.3190906e+00   5.8309519e-01   1.6941074e+00   1.8000000e+00   1.6431677e+00   8.0622577e-01   2.0199010e+00   1.9339080e+00   1.5297059e+00   1.0723805e+00   1.2449900e+00   1.4035669e+00   7.3484692e-01   9.0553851e-01   3.6055513e-01   1.1789826e+00   4.4721360e-01   1.0862780e+00   7.0710678e-01   7.2801099e-01   9.8994949e-01   1.2884099e+00   1.4832397e+00   7.0000000e-01   6.1644140e-01   5.2915026e-01   5.8309519e-01   2.8284271e-01   1.1832160e+00   8.1240384e-01   1.0246951e+00   1.2569805e+00   7.6811457e-01   4.6904158e-01   4.7958315e-01   4.7958315e-01   7.6811457e-01   2.4494897e-01   1.2000000e+00   3.7416574e-01   3.8729833e-01   3.8729833e-01   5.7445626e-01   1.3228757e+00   3.3166248e-01   2.5436195e+00   1.3453624e+00   2.4959968e+00   1.7832555e+00   2.2158520e+00   3.2848135e+00   1.2247449e+00   2.7874720e+00   2.0928450e+00   3.0033315e+00   1.6552945e+00   1.6155494e+00   2.0639767e+00   1.3638182e+00   1.7233688e+00   1.9339080e+00   1.7832555e+00   3.6083237e+00   3.6262929e+00   1.1618950e+00   2.3895606e+00   1.3000000e+00   3.3734256e+00   1.2369317e+00   2.2226111e+00   2.5416530e+00   1.1401754e+00   1.2083046e+00   1.9570386e+00   2.3021729e+00   2.7166155e+00   3.4510868e+00   2.0149442e+00   1.2288206e+00   1.5842980e+00   3.0643107e+00   2.2248595e+00   1.7663522e+00   1.1224972e+00   2.0663978e+00   2.2759613e+00   2.0149442e+00   1.3453624e+00   2.4859606e+00   2.4454039e+00   1.9493589e+00   1.3820275e+00   1.6703293e+00   2.0074860e+00   1.3190906e+00   9.8488578e-01   1.1269428e+00   8.1240384e-01   5.0990195e-01   7.0710678e-01   7.8102497e-01   9.0553851e-01   9.0553851e-01   1.0862780e+00   7.2801099e-01   1.2884099e+00   1.0862780e+00   1.1916375e+00   9.2736185e-01   8.1240384e-01   1.1313708e+00   1.2206556e+00   1.0488088e+00   2.6457513e-01   1.0954451e+00   9.3273791e-01   8.6602540e-01   8.1853528e-01   8.1240384e-01   1.7720045e+00   8.6023253e-01   1.0344080e+00   9.3273791e-01   7.5498344e-01   1.9261360e+00   9.0000000e-01   2.1142375e+00   9.6436508e-01   1.9416488e+00   1.3416408e+00   1.7058722e+00   2.7147744e+00   1.3490738e+00   2.2427661e+00   1.4560220e+00   2.5534291e+00   1.3038405e+00   1.0440307e+00   1.5362291e+00   9.1651514e-01   1.3000000e+00   1.5231546e+00   1.3490738e+00   3.1843367e+00   2.9681644e+00   5.3851648e-01   1.8894444e+00   1.0630146e+00   2.7748874e+00   7.1414284e-01   1.8055470e+00   2.0832667e+00   7.3484692e-01   9.4868330e-01   1.4035669e+00   1.8275667e+00   2.1260292e+00   3.0512293e+00   1.4491377e+00   8.5440037e-01   1.1789826e+00   2.4677925e+00   1.8627936e+00   1.3928388e+00   9.2736185e-01   1.5716234e+00   1.7549929e+00   1.5165751e+00   9.6436508e-01   1.9899749e+00   1.9748418e+00   1.4212670e+00   7.1414284e-01   1.2124356e+00   1.7000000e+00   1.0862780e+00   1.3711309e+00   6.2449980e-01   1.2845233e+00   9.9498744e-01   1.0000000e+00   1.2609520e+00   1.5588457e+00   1.7406895e+00   9.1651514e-01   4.3588989e-01   1.7320508e-01   2.6457513e-01   3.0000000e-01   1.3747727e+00   9.0000000e-01   1.2569805e+00   1.5394804e+00   9.0553851e-01   5.7445626e-01   2.4494897e-01   5.2915026e-01   1.0392305e+00   2.6457513e-01   8.7749644e-01   4.1231056e-01   6.0000000e-01   5.4772256e-01   8.4852814e-01   1.0295630e+00   4.2426407e-01   2.7386128e+00   1.4696938e+00   2.7386128e+00   2.0074860e+00   2.4248711e+00   3.5411862e+00   1.1000000e+00   3.0495901e+00   2.3043437e+00   3.2511536e+00   1.8841444e+00   1.8110770e+00   2.2912878e+00   1.4247807e+00   1.8055470e+00   2.1283797e+00   2.0273135e+00   3.8923001e+00   3.8548671e+00   1.2727922e+00   2.6191602e+00   1.3784049e+00   3.6262929e+00   1.4212670e+00   2.4677925e+00   2.8195744e+00   1.3228757e+00   1.4106736e+00   2.1494185e+00   2.5826343e+00   2.9681644e+00   3.7469988e+00   2.1977261e+00   1.4764823e+00   1.8000000e+00   3.3075671e+00   2.4248711e+00   2.0124612e+00   1.3076697e+00   2.3021729e+00   2.4799194e+00   2.2203603e+00   1.4696938e+00   2.7147744e+00   2.6551836e+00   2.1424285e+00   1.5297059e+00   1.8867962e+00   2.2045408e+00   1.5066519e+00   1.0440307e+00   8.6602540e-01   7.5498344e-01   9.1651514e-01   9.2195445e-01   1.0630146e+00   8.5440037e-01   5.2915026e-01   1.6522712e+00   1.5132746e+00   1.6278821e+00   1.1958261e+00   6.2449980e-01   6.8556546e-01   4.2426407e-01   8.6602540e-01   1.1747340e+00   9.3273791e-01   1.2409674e+00   1.0198039e+00   5.2915026e-01   1.1704700e+00   2.1236761e+00   9.7467943e-01   8.9442719e-01   8.6023253e-01   8.2462113e-01   2.2045408e+00   9.6953597e-01   1.4491377e+00   6.0000000e-01   1.6673332e+00   9.4339811e-01   1.2489996e+00   2.5019992e+00   1.2609520e+00   2.0736441e+00   1.4594520e+00   2.0074860e+00   7.0000000e-01   8.7177979e-01   1.1958261e+00   7.8102497e-01   7.8740079e-01   8.6602540e-01   9.4339811e-01   2.7147744e+00   2.8740216e+00   1.0677078e+00   1.4352700e+00   5.4772256e-01   2.6551836e+00   6.4807407e-01   1.2449900e+00   1.7691806e+00   5.0000000e-01   3.0000000e-01   1.0677078e+00   1.6643317e+00   2.0273135e+00   2.6381812e+00   1.1000000e+00   7.0710678e-01   1.0954451e+00   2.2847319e+00   1.0954451e+00   8.6602540e-01   2.2360680e-01   1.2083046e+00   1.2845233e+00   1.1618950e+00   6.0000000e-01   1.5066519e+00   1.3964240e+00   1.0440307e+00   8.3666003e-01   7.7459667e-01   8.6023253e-01   3.6055513e-01   9.8994949e-01   7.0710678e-01   4.3588989e-01   6.7823300e-01   1.0677078e+00   1.2489996e+00   5.5677644e-01   7.3484692e-01   7.7459667e-01   8.3666003e-01   3.4641016e-01   1.1489125e+00   9.0553851e-01   8.4261498e-01   9.8994949e-01   6.7082039e-01   5.4772256e-01   6.7082039e-01   7.5498344e-01   6.4031242e-01   3.7416574e-01   1.4282857e+00   5.4772256e-01   5.0000000e-01   4.5825757e-01   3.3166248e-01   1.4594520e+00   4.1231056e-01   2.3937418e+00   1.2922848e+00   2.3000000e+00   1.6911535e+00   2.0615528e+00   3.1128765e+00   1.3928388e+00   2.6438608e+00   1.9849433e+00   2.7748874e+00   1.4212670e+00   1.4662878e+00   1.8493242e+00   1.3190906e+00   1.5842980e+00   1.7146428e+00   1.6431677e+00   3.4146742e+00   3.4655447e+00   1.1874342e+00   2.1656408e+00   1.2449900e+00   3.2155870e+00   1.0535654e+00   2.0346990e+00   2.3706539e+00   9.4868330e-01   1.0488088e+00   1.8138357e+00   2.1400935e+00   2.5416530e+00   3.2388269e+00   1.8601075e+00   1.1357817e+00   1.6155494e+00   2.8301943e+00   2.0420578e+00   1.6370706e+00   9.6953597e-01   1.8248288e+00   2.0542639e+00   1.7146428e+00   1.2922848e+00   2.2934690e+00   2.2226111e+00   1.6852300e+00   1.2206556e+00   1.4594520e+00   1.8248288e+00   1.2409674e+00   5.0990195e-01   7.5498344e-01   7.7459667e-01   6.0000000e-01   6.7823300e-01   6.4031242e-01   1.6062378e+00   1.4212670e+00   1.5297059e+00   1.1747340e+00   4.2426407e-01   1.1045361e+00   1.0344080e+00   7.4833148e-01   5.7445626e-01   1.1916375e+00   1.2206556e+00   9.9498744e-01   6.2449980e-01   1.0770330e+00   2.1307276e+00   1.0295630e+00   1.0908712e+00   1.0246951e+00   7.5498344e-01   2.2825424e+00   1.0630146e+00   1.6881943e+00   7.0000000e-01   1.5000000e+00   8.6023253e-01   1.2609520e+00   2.2781571e+00   1.4696938e+00   1.7916473e+00   1.0295630e+00   2.1118712e+00   9.0553851e-01   6.0827625e-01   1.1045361e+00   7.8740079e-01   1.0908712e+00   1.1401754e+00   8.6023253e-01   2.7166155e+00   2.5709920e+00   4.3588989e-01   1.4594520e+00   9.1104336e-01   2.3537205e+00   3.6055513e-01   1.3416408e+00   1.6124515e+00   4.4721360e-01   6.1644140e-01   9.7467943e-01   1.3711309e+00   1.7029386e+00   2.5980762e+00   1.0392305e+00   3.6055513e-01   7.4161985e-01   2.0712315e+00   1.4525839e+00   9.0553851e-01   6.6332496e-01   1.1532563e+00   1.3490738e+00   1.1832160e+00   7.0000000e-01   1.5427249e+00   1.5620499e+00   1.0677078e+00   4.1231056e-01   7.9372539e-01   1.3076697e+00   7.3484692e-01   5.1961524e-01   6.4807407e-01   7.3484692e-01   8.6023253e-01   3.8729833e-01   1.2961481e+00   1.1575837e+00   1.2489996e+00   8.6023253e-01   5.8309519e-01   8.1240384e-01   7.5498344e-01   7.3484692e-01   6.2449980e-01   8.1240384e-01   9.7467943e-01   7.0000000e-01   3.0000000e-01   7.8740079e-01   1.8601075e+00   7.2111026e-01   6.7082039e-01   6.5574385e-01   4.3588989e-01   1.9974984e+00   7.2801099e-01   1.9157244e+00   8.6602540e-01   1.8138357e+00   1.1045361e+00   1.5524175e+00   2.5903668e+00   1.3490738e+00   2.0904545e+00   1.4212670e+00   2.3452079e+00   1.0583005e+00   9.7467943e-01   1.4071247e+00   9.8994949e-01   1.3000000e+00   1.3490738e+00   1.0954451e+00   2.9257478e+00   2.9410882e+00   7.4161985e-01   1.7349352e+00   9.6436508e-01   2.6832816e+00   6.7082039e-01   1.5556349e+00   1.8493242e+00   6.1644140e-01   6.6332496e-01   1.3076697e+00   1.6186414e+00   2.0346990e+00   2.7874720e+00   1.3784049e+00   5.3851648e-01   9.4339811e-01   2.4020824e+00   1.6278821e+00   1.0862780e+00   6.4807407e-01   1.4247807e+00   1.6431677e+00   1.4491377e+00   8.6602540e-01   1.8165902e+00   1.8165902e+00   1.3638182e+00   8.4261498e-01   1.0440307e+00   1.4387495e+00   7.7459667e-01   2.6457513e-01   6.5574385e-01   8.6602540e-01   4.8989795e-01   1.1445523e+00   1.1618950e+00   1.2288206e+00   7.5498344e-01   9.6436508e-01   1.0440307e+00   7.3484692e-01   5.7445626e-01   6.1644140e-01   8.3066239e-01   1.0295630e+00   9.5916630e-01   4.4721360e-01   7.4161985e-01   1.8466185e+00   8.3066239e-01   7.2111026e-01   7.0710678e-01   2.0000000e-01   1.8920888e+00   7.3484692e-01   2.1213203e+00   1.1832160e+00   1.9235384e+00   1.3964240e+00   1.7549929e+00   2.7166155e+00   1.6155494e+00   2.2494444e+00   1.6583124e+00   2.4103942e+00   1.1090537e+00   1.1832160e+00   1.5000000e+00   1.2767145e+00   1.4899664e+00   1.4456832e+00   1.3076697e+00   3.0116441e+00   3.0886890e+00   1.0862780e+00   1.8165902e+00   1.2247449e+00   2.8195744e+00   8.1240384e-01   1.6881943e+00   1.9672316e+00   7.4161985e-01   8.4261498e-01   1.5297059e+00   1.7291616e+00   2.1470911e+00   2.8213472e+00   1.5842980e+00   8.3666003e-01   1.3711309e+00   2.4372115e+00   1.7776389e+00   1.3152946e+00   8.1853528e-01   1.4628739e+00   1.7406895e+00   1.3892444e+00   1.1832160e+00   1.9519221e+00   1.9104973e+00   1.3820275e+00   1.0099505e+00   1.1489125e+00   1.5811388e+00   1.0723805e+00   4.8989795e-01   6.7823300e-01   6.2449980e-01   1.3928388e+00   1.4212670e+00   1.4899664e+00   1.0099505e+00   9.8994949e-01   1.2083046e+00   7.5498344e-01   3.4641016e-01   7.6811457e-01   1.0488088e+00   1.2767145e+00   1.1874342e+00   5.3851648e-01   1.0000000e+00   2.1023796e+00   1.0677078e+00   9.4339811e-01   9.3273791e-01   4.3588989e-01   2.1330729e+00   9.7467943e-01   1.9874607e+00   1.2124356e+00   1.7291616e+00   1.3038405e+00   1.6155494e+00   2.5159491e+00   1.8000000e+00   2.0663978e+00   1.5427249e+00   2.1954498e+00   9.4868330e-01   1.0908712e+00   1.3190906e+00   1.3341664e+00   1.4730920e+00   1.3038405e+00   1.1747340e+00   2.7892651e+00   2.9034462e+00   1.1704700e+00   1.6217275e+00   1.2845233e+00   2.6267851e+00   7.6811457e-01   1.5099669e+00   1.7663522e+00   7.2111026e-01   8.1240384e-01   1.4177447e+00   1.5362291e+00   1.9544820e+00   2.5865034e+00   1.4696938e+00   7.9372539e-01   1.3601471e+00   2.2158520e+00   1.6401219e+00   1.1916375e+00   8.2462113e-01   1.2609520e+00   1.5684387e+00   1.1832160e+00   1.2124356e+00   1.7720045e+00   1.7320508e+00   1.2083046e+00   9.7467943e-01   1.0049876e+00   1.4594520e+00   1.0677078e+00   4.2426407e-01   8.6602540e-01   1.7606817e+00   1.7146428e+00   1.7944358e+00   1.3638182e+00   8.8317609e-01   1.4491377e+00   1.0630146e+00   3.4641016e-01   8.1853528e-01   1.4071247e+00   1.5588457e+00   1.3892444e+00   7.5498344e-01   1.3114877e+00   2.4289916e+00   1.3490738e+00   1.2845233e+00   1.2609520e+00   7.9372539e-01   2.5119713e+00   1.3076697e+00   1.7748239e+00   1.1618950e+00   1.3527749e+00   1.0295630e+00   1.3304135e+00   2.1000000e+00   1.9697716e+00   1.6340135e+00   1.1224972e+00   1.9235384e+00   8.3666003e-01   8.1853528e-01   1.0099505e+00   1.3038405e+00   1.4456832e+00   1.1747340e+00   8.8317609e-01   2.4617067e+00   2.4637370e+00   1.0246951e+00   1.3379088e+00   1.3453624e+00   2.1863211e+00   6.5574385e-01   1.2489996e+00   1.3856406e+00   7.2111026e-01   8.3666003e-01   1.1357817e+00   1.1135529e+00   1.5165751e+00   2.2649503e+00   1.2000000e+00   5.9160798e-01   1.0816654e+00   1.8303005e+00   1.5000000e+00   9.4868330e-01   9.1651514e-01   9.7467943e-01   1.3190906e+00   1.0000000e+00   1.1618950e+00   1.4764823e+00   1.5099669e+00   1.0099505e+00   7.9372539e-01   8.0622577e-01   1.3747727e+00   1.0488088e+00   8.8881944e-01   1.9748418e+00   1.8973666e+00   1.9949937e+00   1.5362291e+00   7.7459667e-01   1.4071247e+00   9.5393920e-01   3.7416574e-01   1.0816654e+00   1.4764823e+00   1.6881943e+00   1.4866069e+00   7.8102497e-01   1.4899664e+00   2.6000000e+00   1.4491377e+00   1.3747727e+00   1.3453624e+00   9.5393920e-01   2.6776856e+00   1.4177447e+00   1.3747727e+00   9.7467943e-01   1.0630146e+00   7.3484692e-01   9.6436508e-01   1.8788294e+00   1.9339080e+00   1.4387495e+00   9.4868330e-01   1.5684387e+00   4.2426407e-01   5.5677644e-01   6.4807407e-01   1.1575837e+00   1.1618950e+00   7.6157731e-01   5.4772256e-01   2.1863211e+00   2.2649503e+00   1.0816654e+00   9.6436508e-01   1.1618950e+00   2.0049938e+00   5.1961524e-01   8.6023253e-01   1.1401754e+00   5.8309519e-01   6.1644140e-01   8.0622577e-01   9.4868330e-01   1.3341664e+00   2.0322401e+00   8.6023253e-01   5.0000000e-01   9.8488578e-01   1.6031220e+00   1.0816654e+00   6.0000000e-01   7.3484692e-01   6.0827625e-01   9.2736185e-01   6.4807407e-01   9.7467943e-01   1.1045361e+00   1.1045361e+00   6.3245553e-01   6.7082039e-01   4.1231056e-01   9.6436508e-01   8.1240384e-01   1.1958261e+00   1.0723805e+00   1.1789826e+00   7.2801099e-01   6.4031242e-01   6.0827625e-01   5.0990195e-01   7.5498344e-01   7.0710678e-01   6.0827625e-01   8.3666003e-01   6.6332496e-01   2.0000000e-01   6.8556546e-01   1.7464249e+00   5.7445626e-01   5.2915026e-01   4.6904158e-01   3.4641016e-01   1.8384776e+00   5.4772256e-01   1.8708287e+00   7.7459667e-01   1.8814888e+00   1.1789826e+00   1.5620499e+00   2.7092434e+00   1.1874342e+00   2.2405357e+00   1.5588457e+00   2.3430749e+00   9.7467943e-01   1.0000000e+00   1.4177447e+00   8.6602540e-01   1.1045361e+00   1.2369317e+00   1.1618950e+00   3.0049958e+00   3.0626786e+00   8.6023253e-01   1.7262677e+00   7.6157731e-01   2.8266588e+00   6.1644140e-01   1.5652476e+00   1.9672316e+00   4.7958315e-01   5.1961524e-01   1.3190906e+00   1.7748239e+00   2.1656408e+00   2.8774989e+00   1.3674794e+00   6.7823300e-01   1.1489125e+00   2.4698178e+00   1.5362291e+00   1.1357817e+00   4.3588989e-01   1.4212670e+00   1.5968719e+00   1.3601471e+00   7.7459667e-01   1.8248288e+00   1.7578396e+00   1.2767145e+00   8.1240384e-01   1.0000000e+00   1.3190906e+00   6.8556546e-01   4.2426407e-01   3.4641016e-01   4.6904158e-01   1.7378147e+00   1.2247449e+00   1.4456832e+00   1.7146428e+00   1.1618950e+00   7.8740079e-01   6.2449980e-01   9.4339811e-01   1.3000000e+00   5.4772256e-01   7.8740079e-01   7.7459667e-01   8.3066239e-01   8.1853528e-01   1.0344080e+00   7.9372539e-01   7.0000000e-01   3.0577770e+00   1.8411953e+00   3.0149627e+00   2.3452079e+00   2.7440845e+00   3.8196859e+00   1.4628739e+00   3.3361655e+00   2.6343880e+00   3.5014283e+00   2.1354157e+00   2.1330729e+00   2.5651511e+00   1.8055470e+00   2.1377558e+00   2.4041631e+00   2.3323808e+00   4.1376322e+00   4.1533119e+00   1.6583124e+00   2.8861739e+00   1.7349352e+00   3.9089641e+00   1.7233688e+00   2.7459060e+00   3.0822070e+00   1.6186414e+00   1.7088007e+00   2.4799194e+00   2.8390139e+00   3.2403703e+00   3.9610605e+00   2.5258662e+00   1.7916473e+00   2.1748563e+00   3.5510562e+00   2.7147744e+00   2.3194827e+00   1.6062378e+00   2.5514702e+00   2.7604347e+00   2.4372115e+00   1.8411953e+00   3.0033315e+00   2.9291637e+00   2.3958297e+00   1.8520259e+00   2.1656408e+00   2.4879711e+00   1.8439089e+00   1.4142136e-01   4.4721360e-01   1.5099669e+00   1.0099505e+00   1.4106736e+00   1.7029386e+00   1.0246951e+00   7.0710678e-01   3.0000000e-01   6.4031242e-01   1.2041595e+00   4.2426407e-01   7.2111026e-01   5.4772256e-01   7.5498344e-01   7.0000000e-01   1.0148892e+00   9.0000000e-01   5.7445626e-01   2.8722813e+00   1.5842980e+00   2.8861739e+00   2.1494185e+00   2.5632011e+00   3.6891733e+00   1.1045361e+00   3.1984371e+00   2.4372115e+00   3.4029399e+00   2.0346990e+00   1.9467922e+00   2.4372115e+00   1.5165751e+00   1.9052559e+00   2.2671568e+00   2.1771541e+00   4.0521599e+00   3.9912404e+00   1.3747727e+00   2.7658633e+00   1.4798649e+00   3.7709415e+00   1.5588457e+00   2.6191602e+00   2.9765752e+00   1.4628739e+00   1.5556349e+00   2.2825424e+00   2.7386128e+00   3.1144823e+00   3.9102430e+00   2.3280893e+00   1.6278821e+00   1.9313208e+00   3.4539832e+00   2.5632011e+00   2.1633308e+00   1.4491377e+00   2.4515301e+00   2.6191602e+00   2.3622024e+00   1.5842980e+00   2.8600699e+00   2.7964263e+00   2.2803509e+00   1.6522712e+00   2.0322401e+00   2.3430749e+00   1.6431677e+00   5.0990195e-01   1.6309506e+00   1.1224972e+00   1.5000000e+00   1.7832555e+00   1.1090537e+00   7.8740079e-01   4.3588989e-01   7.5498344e-01   1.3000000e+00   5.0990195e-01   6.4807407e-01   6.6332496e-01   8.3066239e-01   7.9372539e-01   1.0908712e+00   8.1853528e-01   6.7082039e-01   2.9983329e+00   1.7175564e+00   2.9949958e+00   2.2671568e+00   2.6851443e+00   3.7934153e+00   1.2247449e+00   3.3000000e+00   2.5495098e+00   3.5128336e+00   2.1447611e+00   2.0663978e+00   2.5495098e+00   1.6552945e+00   2.0420578e+00   2.3874673e+00   2.2891046e+00   4.1521079e+00   4.1000000e+00   1.4933185e+00   2.8792360e+00   1.6155494e+00   3.8729833e+00   1.6763055e+00   2.7313001e+00   3.0757113e+00   1.5811388e+00   1.6733201e+00   2.4062419e+00   2.8319605e+00   3.2155870e+00   4.0012498e+00   2.4535688e+00   1.7349352e+00   2.0420578e+00   3.5566838e+00   2.6851443e+00   2.2759613e+00   1.5684387e+00   2.5592968e+00   2.7386128e+00   2.4698178e+00   1.7175564e+00   2.9765752e+00   2.9154759e+00   2.3958297e+00   1.7748239e+00   2.1470911e+00   2.4637370e+00   1.7663522e+00   1.2806248e+00   8.3666003e-01   1.0246951e+00   1.3038405e+00   8.1853528e-01   4.2426407e-01   3.8729833e-01   5.9160798e-01   8.4261498e-01   1.4142136e-01   1.0954451e+00   3.7416574e-01   4.3588989e-01   3.8729833e-01   6.0827625e-01   1.1618950e+00   2.6457513e-01   2.5903668e+00   1.3892444e+00   2.5670995e+00   1.8814888e+00   2.2781571e+00   3.3808283e+00   1.2083046e+00   2.9000000e+00   2.1954498e+00   3.0495901e+00   1.6792856e+00   1.6763055e+00   2.1118712e+00   1.3784049e+00   1.7000000e+00   1.9442222e+00   1.8708287e+00   3.6959437e+00   3.7188708e+00   1.2609520e+00   2.4310492e+00   1.3000000e+00   3.4785054e+00   1.2688578e+00   2.2847319e+00   2.6419690e+00   1.1575837e+00   1.2409674e+00   2.0174241e+00   2.4124676e+00   2.8106939e+00   3.5369478e+00   2.0639767e+00   1.3379088e+00   1.7406895e+00   3.1224990e+00   2.2516660e+00   1.8547237e+00   1.1401754e+00   2.1047565e+00   2.3021729e+00   2.0049938e+00   1.3892444e+00   2.5416530e+00   2.4698178e+00   1.9493589e+00   1.4106736e+00   1.7058722e+00   2.0273135e+00   1.3784049e+00   9.0553851e-01   9.2195445e-01   9.0553851e-01   9.1104336e-01   1.1575837e+00   1.2609520e+00   9.5393920e-01   6.2449980e-01   1.1916375e+00   2.1817424e+00   1.0295630e+00   1.0723805e+00   1.0148892e+00   9.0000000e-01   2.3473389e+00   1.0908712e+00   1.4387495e+00   3.6055513e-01   1.4798649e+00   6.4807407e-01   1.0908712e+00   2.2693611e+00   1.2727922e+00   1.7916473e+00   1.0295630e+00   2.0149442e+00   8.1240384e-01   5.3851648e-01   1.0677078e+00   5.4772256e-01   8.3066239e-01   9.6953597e-01   7.3484692e-01   2.6495283e+00   2.5748786e+00   5.1961524e-01   1.3820275e+00   6.0827625e-01   2.3706539e+00   4.1231056e-01   1.2083046e+00   1.5937377e+00   4.2426407e-01   4.2426407e-01   8.1853528e-01   1.4212670e+00   1.7492856e+00   2.5826343e+00   8.8317609e-01   3.3166248e-01   5.5677644e-01   2.1142375e+00   1.2124356e+00   7.2111026e-01   4.6904158e-01   1.1445523e+00   1.2409674e+00   1.2083046e+00   3.6055513e-01   1.4212670e+00   1.4212670e+00   1.0392305e+00   4.7958315e-01   7.1414284e-01   1.0535654e+00   3.7416574e-01   7.2801099e-01   1.3190906e+00   1.1618950e+00   4.8989795e-01   7.4161985e-01   5.1961524e-01   7.1414284e-01   8.1240384e-01   1.5297059e+00   5.0990195e-01   5.1961524e-01   4.7958315e-01   8.5440037e-01   1.6583124e+00   5.7445626e-01   2.0371549e+00   8.7749644e-01   2.2825424e+00   1.4560220e+00   1.8411953e+00   3.1000000e+00   7.3484692e-01   2.6362853e+00   1.9287302e+00   2.6758176e+00   1.3638182e+00   1.3747727e+00   1.8220867e+00   9.1651514e-01   1.1704700e+00   1.5231546e+00   1.5165751e+00   3.3555923e+00   3.4423829e+00   1.1180340e+00   2.0904545e+00   7.0000000e-01   3.2280025e+00   1.0723805e+00   1.8920888e+00   2.3706539e+00   9.2736185e-01   8.6023253e-01   1.6155494e+00   2.2226111e+00   2.6000000e+00   3.2787193e+00   1.6552945e+00   1.1000000e+00   1.3674794e+00   2.9137605e+00   1.7291616e+00   1.4491377e+00   7.3484692e-01   1.8520259e+00   1.9287302e+00   1.8055470e+00   8.7749644e-01   2.1447611e+00   2.0542639e+00   1.6792856e+00   1.2124356e+00   1.3964240e+00   1.5000000e+00   8.3666003e-01   7.9372539e-01   1.1832160e+00   7.5498344e-01   1.1832160e+00   1.0295630e+00   4.6904158e-01   1.0440307e+00   2.0024984e+00   9.1104336e-01   7.0710678e-01   7.2111026e-01   6.4807407e-01   2.0297783e+00   8.3666003e-01   1.7776389e+00   9.8994949e-01   1.8920888e+00   1.2609520e+00   1.5684387e+00   2.7166155e+00   1.4247807e+00   2.2847319e+00   1.7406895e+00   2.2022716e+00   9.0000000e-01   1.1747340e+00   1.4317821e+00   1.1445523e+00   1.1832160e+00   1.1532563e+00   1.2041595e+00   2.8722813e+00   3.1272992e+00   1.3038405e+00   1.6673332e+00   9.1651514e-01   2.8722813e+00   8.8317609e-01   1.4798649e+00   1.9416488e+00   7.2801099e-01   6.0827625e-01   1.4071247e+00   1.8138357e+00   2.2293497e+00   2.7459060e+00   1.4456832e+00   9.0553851e-01   1.3784049e+00   2.4698178e+00   1.3928388e+00   1.1357817e+00   5.3851648e-01   1.4000000e+00   1.5588457e+00   1.3228757e+00   9.8994949e-01   1.7691806e+00   1.6583124e+00   1.2767145e+00   1.1135529e+00   1.0295630e+00   1.1575837e+00   7.5498344e-01   9.6436508e-01   1.2727922e+00   1.5264338e+00   1.3674794e+00   6.2449980e-01   1.2806248e+00   2.3958297e+00   1.2884099e+00   1.1618950e+00   1.1532563e+00   7.0000000e-01   2.4433583e+00   1.2206556e+00   1.7000000e+00   1.1357817e+00   1.4035669e+00   1.0488088e+00   1.3228757e+00   2.1886069e+00   1.9183326e+00   1.7464249e+00   1.2884099e+00   1.8601075e+00   6.7823300e-01   8.7749644e-01   1.0099505e+00   1.3038405e+00   1.3674794e+00   1.0488088e+00   8.8317609e-01   2.4454039e+00   2.5942244e+00   1.1789826e+00   1.3000000e+00   1.2609520e+00   2.3108440e+00   6.7082039e-01   1.1832160e+00   1.4282857e+00   6.6332496e-01   7.0710678e-01   1.1618950e+00   1.2165525e+00   1.6431677e+00   2.2516660e+00   1.2165525e+00   6.4031242e-01   1.1958261e+00   1.9000000e+00   1.3674794e+00   9.0553851e-01   7.7459667e-01   9.4339811e-01   1.2727922e+00   9.1651514e-01   1.1357817e+00   1.4491377e+00   1.4282857e+00   9.4868330e-01   8.7749644e-01   7.4161985e-01   1.2124356e+00   9.4868330e-01   1.0344080e+00   9.1651514e-01   8.6023253e-01   7.6157731e-01   7.1414284e-01   1.7291616e+00   8.3066239e-01   9.4868330e-01   8.7177979e-01   6.1644140e-01   1.8654758e+00   8.3666003e-01   2.2360680e+00   1.1224972e+00   2.0049938e+00   1.4317821e+00   1.8165902e+00   2.7676705e+00   1.4730920e+00   2.2847319e+00   1.5524175e+00   2.6134269e+00   1.3527749e+00   1.1575837e+00   1.6093477e+00   1.1180340e+00   1.4832397e+00   1.6217275e+00   1.4106736e+00   3.2109189e+00   3.0495901e+00   7.0710678e-01   1.9646883e+00   1.2165525e+00   2.8266588e+00   8.1240384e-01   1.8681542e+00   2.1047565e+00   8.1853528e-01   1.0148892e+00   1.5297059e+00   1.8303005e+00   2.1702534e+00   3.0495901e+00   1.5842980e+00   8.8317609e-01   1.2569805e+00   2.5179357e+00   1.9646883e+00   1.4525839e+00   9.9498744e-01   1.6248077e+00   1.8574176e+00   1.5779734e+00   1.1224972e+00   2.0760539e+00   2.0712315e+00   1.5132746e+00   8.7177979e-01   1.2884099e+00   1.7944358e+00   1.1789826e+00   5.1961524e-01   5.1961524e-01   7.1414284e-01   4.6904158e-01   1.2569805e+00   3.1622777e-01   1.7320508e-01   1.7320508e-01   6.4031242e-01   1.3228757e+00   2.2360680e-01   2.3727621e+00   1.2206556e+00   2.4758837e+00   1.7320508e+00   2.1236761e+00   3.3000000e+00   1.0295630e+00   2.8266588e+00   2.1447611e+00   2.8913665e+00   1.5297059e+00   1.5905974e+00   2.0099751e+00   1.2489996e+00   1.5132746e+00   1.7663522e+00   1.7378147e+00   3.5524639e+00   3.6619667e+00   1.2845233e+00   2.3000000e+00   1.0816654e+00   3.4205263e+00   1.2124356e+00   2.1213203e+00   2.5416530e+00   1.0677078e+00   1.0677078e+00   1.8894444e+00   2.3537205e+00   2.7640550e+00   3.4219877e+00   1.9339080e+00   1.2529964e+00   1.6340135e+00   3.0675723e+00   2.0273135e+00   1.6911535e+00   9.4868330e-01   2.0074860e+00   2.1633308e+00   1.9235384e+00   1.2206556e+00   2.3916521e+00   2.3021729e+00   1.8493242e+00   1.3820275e+00   1.5842980e+00   1.7916473e+00   1.1575837e+00   4.2426407e-01   9.8994949e-01   3.3166248e-01   9.3273791e-01   3.0000000e-01   5.8309519e-01   4.8989795e-01   8.6023253e-01   1.0954451e+00   3.7416574e-01   2.5922963e+00   1.3038405e+00   2.6570661e+00   1.9000000e+00   2.3021729e+00   3.4727511e+00   8.7749644e-01   2.9899833e+00   2.2203603e+00   3.1543621e+00   1.7860571e+00   1.7029386e+00   2.1977261e+00   1.2369317e+00   1.6124515e+00   1.9974984e+00   1.9364917e+00   3.8249183e+00   3.7762415e+00   1.1747340e+00   2.5179357e+00   1.1832160e+00   3.5651087e+00   1.3190906e+00   2.3685439e+00   2.7622455e+00   1.2124356e+00   1.2922848e+00   2.0248457e+00   2.5436195e+00   2.9103264e+00   3.7013511e+00   2.0663978e+00   1.4071247e+00   1.7146428e+00   3.2403703e+00   2.2847319e+00   1.9157244e+00   1.1789826e+00   2.2181073e+00   2.3600847e+00   2.1283797e+00   1.3038405e+00   2.6057628e+00   2.5317978e+00   2.0322401e+00   1.4142136e+00   1.7832555e+00   2.0639767e+00   1.3674794e+00   7.7459667e-01   5.0000000e-01   1.2609520e+00   2.6457513e-01   4.8989795e-01   4.2426407e-01   7.7459667e-01   1.4628739e+00   4.2426407e-01   2.3194827e+00   1.0392305e+00   2.4041631e+00   1.5905974e+00   2.0297783e+00   3.1968735e+00   7.9372539e-01   2.7018512e+00   1.9416488e+00   2.9103264e+00   1.5779734e+00   1.4560220e+00   1.9672316e+00   1.0246951e+00   1.4352700e+00   1.7860571e+00   1.6522712e+00   3.5454196e+00   3.5071356e+00   9.2736185e-01   2.2847319e+00   9.6953597e-01   3.2878564e+00   1.1224972e+00   2.1047565e+00   2.4839485e+00   1.0246951e+00   1.0630146e+00   1.7606817e+00   2.2737634e+00   2.6514147e+00   3.4409301e+00   1.8138357e+00   1.1224972e+00   1.3564660e+00   3.0166206e+00   2.0396078e+00   1.6217275e+00   9.6436508e-01   2.0049938e+00   2.1377558e+00   1.9773720e+00   1.0392305e+00   2.3473389e+00   2.3043437e+00   1.8574176e+00   1.2247449e+00   1.5620499e+00   1.8275667e+00   1.0816654e+00   8.0622577e-01   1.8841444e+00   7.1414284e-01   6.0000000e-01   5.8309519e-01   3.4641016e-01   1.9748418e+00   6.7823300e-01   1.8165902e+00   8.2462113e-01   1.7832555e+00   1.1000000e+00   1.4966630e+00   2.5961510e+00   1.3379088e+00   2.1213203e+00   1.4866069e+00   2.2427661e+00   9.0000000e-01   9.5916630e-01   1.3379088e+00   9.6436508e-01   1.1747340e+00   1.1958261e+00   1.0630146e+00   2.8722813e+00   2.9698485e+00   9.0553851e-01   1.6431677e+00   8.6023253e-01   2.7147744e+00   6.1644140e-01   1.4662878e+00   1.8357560e+00   5.0000000e-01   5.0000000e-01   1.2727922e+00   1.6401219e+00   2.0566964e+00   2.7349589e+00   1.3304135e+00   5.8309519e-01   1.0770330e+00   2.3706539e+00   1.4832397e+00   1.0344080e+00   4.5825757e-01   1.3341664e+00   1.5394804e+00   1.3076697e+00   8.2462113e-01   1.7406895e+00   1.6941074e+00   1.2369317e+00   8.3666003e-01   9.3808315e-01   1.2727922e+00   6.7082039e-01   1.1224972e+00   3.1622777e-01   4.5825757e-01   3.8729833e-01   5.9160798e-01   1.2288206e+00   2.6457513e-01   2.5357445e+00   1.3076697e+00   2.5039968e+00   1.8055470e+00   2.2113344e+00   3.3120990e+00   1.1489125e+00   2.8266588e+00   2.1023796e+00   3.0099834e+00   1.6431677e+00   1.5968719e+00   2.0542639e+00   1.2884099e+00   1.6401219e+00   1.9026298e+00   1.8055470e+00   3.6523965e+00   3.6373067e+00   1.1357817e+00   2.3811762e+00   1.2369317e+00   3.4029399e+00   1.1958261e+00   2.2360680e+00   2.5845696e+00   1.0954451e+00   1.1916375e+00   1.9416488e+00   2.3494680e+00   2.7386128e+00   3.5000000e+00   1.9899749e+00   1.2609520e+00   1.6401219e+00   3.0643107e+00   2.2113344e+00   1.7944358e+00   1.0954451e+00   2.0566964e+00   2.2494444e+00   1.9697716e+00   1.3076697e+00   2.4859606e+00   2.4248711e+00   1.9026298e+00   1.3228757e+00   1.6522712e+00   1.9924859e+00   1.3190906e+00   1.1916375e+00   1.3527749e+00   1.3228757e+00   1.7000000e+00   3.8729833e-01   1.2124356e+00   3.4971417e+00   2.2022716e+00   3.5874782e+00   2.8248894e+00   3.2295511e+00   4.3988635e+00   1.4071247e+00   3.9102430e+00   3.1336879e+00   4.0767634e+00   2.7018512e+00   2.6324893e+00   3.1272992e+00   2.1023796e+00   2.4677925e+00   2.9086079e+00   2.8670542e+00   4.7476310e+00   4.6936127e+00   2.0371549e+00   3.4452866e+00   2.0420578e+00   4.4833024e+00   2.2472205e+00   3.2954514e+00   3.6851052e+00   2.1400935e+00   2.2135944e+00   2.9512709e+00   3.4554305e+00   3.8288379e+00   4.6119410e+00   2.9899833e+00   2.3302360e+00   2.5980762e+00   4.1605288e+00   3.1859065e+00   2.8425341e+00   2.0928450e+00   3.1416556e+00   3.2832910e+00   3.0298515e+00   2.2022716e+00   3.5355339e+00   3.4496377e+00   2.9461840e+00   2.3302360e+00   2.7110883e+00   2.9580399e+00   2.2759613e+00   3.3166248e-01   2.2360680e-01   6.4031242e-01   1.3304135e+00   1.7320508e-01   2.3515952e+00   1.1000000e+00   2.4228083e+00   1.6552945e+00   2.0663978e+00   3.2388269e+00   8.8317609e-01   2.7549955e+00   2.0149442e+00   2.9017236e+00   1.5362291e+00   1.4866069e+00   1.9646883e+00   1.0862780e+00   1.4387495e+00   1.7606817e+00   1.6852300e+00   3.5608988e+00   3.5651087e+00   1.0440307e+00   2.2781571e+00   9.9498744e-01   3.3406586e+00   1.1090537e+00   2.1118712e+00   2.5099801e+00   9.8994949e-01   1.0392305e+00   1.8027756e+00   2.3021729e+00   2.6870058e+00   3.4394767e+00   1.8493242e+00   1.1618950e+00   1.4933185e+00   3.0182777e+00   2.0371549e+00   1.6552945e+00   9.2736185e-01   1.9824228e+00   2.1307276e+00   1.9131126e+00   1.1000000e+00   2.3622024e+00   2.2934690e+00   1.8165902e+00   1.2369317e+00   1.5459625e+00   1.8138357e+00   1.1135529e+00   1.4142136e-01   5.2915026e-01   1.4352700e+00   2.4494897e-01   2.3194827e+00   1.1832160e+00   2.3790755e+00   1.6401219e+00   2.0493902e+00   3.1906112e+00   1.1090537e+00   2.7092434e+00   2.0420578e+00   2.8124722e+00   1.4594520e+00   1.5099669e+00   1.9261360e+00   1.2369317e+00   1.5165751e+00   1.7175564e+00   1.6401219e+00   3.4481879e+00   3.5580894e+00   1.2083046e+00   2.2226111e+00   1.0862780e+00   3.3060551e+00   1.1401754e+00   2.0371549e+00   2.4269322e+00   1.0049876e+00   1.0049876e+00   1.8165902e+00   2.2293497e+00   2.6514147e+00   3.3105891e+00   1.8681542e+00   1.1401754e+00   1.5231546e+00   2.9698485e+00   1.9798990e+00   1.5968719e+00   9.0000000e-01   1.9235384e+00   2.1000000e+00   1.8627936e+00   1.1832160e+00   2.3130067e+00   2.2427661e+00   1.7916473e+00   1.3190906e+00   1.5099669e+00   1.7492856e+00   1.1000000e+00   5.0990195e-01   1.4142136e+00   1.4142136e-01   2.2803509e+00   1.1045361e+00   2.3452079e+00   1.6031220e+00   2.0049938e+00   3.1654384e+00   1.0246951e+00   2.6870058e+00   1.9924859e+00   2.7910571e+00   1.4247807e+00   1.4491377e+00   1.8841444e+00   1.1357817e+00   1.4282857e+00   1.6703293e+00   1.6093477e+00   3.4452866e+00   3.5185224e+00   1.1224972e+00   2.1863211e+00   1.0000000e+00   3.2787193e+00   1.0677078e+00   2.0124612e+00   2.4145393e+00   9.3273791e-01   9.5393920e-01   1.7606817e+00   2.2158520e+00   2.6210685e+00   3.3136083e+00   1.8083141e+00   1.1045361e+00   1.4899664e+00   2.9359837e+00   1.9442222e+00   1.5716234e+00   8.4261498e-01   1.8867962e+00   2.0518285e+00   1.8138357e+00   1.1045361e+00   2.2781571e+00   2.2022716e+00   1.7349352e+00   1.2328828e+00   1.4628739e+00   1.7146428e+00   1.0535654e+00   1.7606817e+00   5.4772256e-01   2.1213203e+00   1.0954451e+00   2.0049938e+00   1.3964240e+00   1.7776389e+00   2.8106939e+00   1.4317821e+00   2.3366643e+00   1.7058722e+00   2.4839485e+00   1.1445523e+00   1.2000000e+00   1.5652476e+00   1.1789826e+00   1.4212670e+00   1.4594520e+00   1.3379088e+00   3.1032241e+00   3.1780497e+00   1.0295630e+00   1.8814888e+00   1.1045361e+00   2.9171904e+00   8.1240384e-01   1.7349352e+00   2.0566964e+00   7.1414284e-01   7.9372539e-01   1.5427249e+00   1.8303005e+00   2.2472205e+00   2.9325757e+00   1.5968719e+00   8.3666003e-01   1.3416408e+00   2.5495098e+00   1.7776389e+00   1.3304135e+00   7.4161985e-01   1.5427249e+00   1.7860571e+00   1.4730920e+00   1.0954451e+00   2.0024984e+00   1.9519221e+00   1.4387495e+00   1.0099505e+00   1.1832160e+00   1.5684387e+00   9.9498744e-01   1.3038405e+00   3.6110940e+00   2.3622024e+00   3.6959437e+00   2.9748950e+00   3.3555923e+00   4.5232732e+00   1.6278821e+00   4.0472213e+00   3.3000000e+00   4.1460825e+00   2.7694765e+00   2.7676705e+00   3.2233523e+00   2.2737634e+00   2.5845696e+00   2.9849623e+00   2.9916551e+00   4.8321838e+00   4.8394215e+00   2.2494444e+00   3.5298725e+00   2.1817424e+00   4.6206060e+00   2.3622024e+00   3.3896903e+00   3.7934153e+00   2.2427661e+00   2.3130067e+00   3.0886890e+00   3.5707142e+00   3.9534795e+00   4.6797436e+00   3.1224990e+00   2.4698178e+00   2.8035692e+00   4.2497059e+00   3.2710854e+00   2.9647934e+00   2.1886069e+00   3.2186954e+00   3.3719431e+00   3.0740852e+00   2.3622024e+00   3.6373067e+00   3.5284558e+00   3.0149627e+00   2.4657656e+00   2.8035692e+00   3.0364453e+00   2.4062419e+00   2.3790755e+00   1.1747340e+00   2.4248711e+00   1.6941074e+00   2.0928450e+00   3.2465366e+00   1.0246951e+00   2.7676705e+00   2.0566964e+00   2.8861739e+00   1.5132746e+00   1.5165751e+00   1.9621417e+00   1.1789826e+00   1.4899664e+00   1.7578396e+00   1.7000000e+00   3.5454196e+00   3.5888717e+00   1.1401754e+00   2.2715633e+00   1.0677078e+00   3.3541020e+00   1.1224972e+00   2.1095023e+00   2.5039968e+00   9.9498744e-01   1.0440307e+00   1.8384776e+00   2.2956481e+00   2.6925824e+00   3.4088121e+00   1.8841444e+00   1.1832160e+00   1.5684387e+00   3.0066593e+00   2.0445048e+00   1.6703293e+00   9.3273791e-01   1.9646883e+00   2.1330729e+00   1.8788294e+00   1.1747340e+00   2.3685439e+00   2.2912878e+00   1.8027756e+00   1.2727922e+00   1.5427249e+00   1.8165902e+00   1.1532563e+00   1.3341664e+00   9.4868330e-01   9.0000000e-01   5.0990195e-01   1.5165751e+00   2.3430749e+00   1.3190906e+00   1.1532563e+00   9.5393920e-01   1.0535654e+00   1.1045361e+00   8.6602540e-01   1.5000000e+00   1.1489125e+00   7.4161985e-01   9.3273791e-01   1.6703293e+00   1.8165902e+00   1.8165902e+00   7.0710678e-01   1.4832397e+00   1.7175564e+00   1.4352700e+00   6.4031242e-01   1.1445523e+00   1.4798649e+00   1.3527749e+00   7.6157731e-01   1.3228757e+00   1.3527749e+00   1.7944358e+00   7.1414284e-01   1.4352700e+00   1.3784049e+00   1.4491377e+00   4.2426407e-01   8.8881944e-01   1.4525839e+00   9.5916630e-01   6.0827625e-01   1.1180340e+00   1.3341664e+00   5.5677644e-01   5.0000000e-01   9.6436508e-01   1.4142136e+00   1.0099505e+00   6.4807407e-01   1.2449900e+00   1.5684387e+00   7.4161985e-01   1.0770330e+00   2.3706539e+00   1.1180340e+00   1.9339080e+00   1.1618950e+00   2.0322401e+00   8.6602540e-01   6.3245553e-01   1.1357817e+00   2.6457513e-01   5.0990195e-01   9.0000000e-01   8.6602540e-01   2.7331301e+00   2.6495283e+00   6.7823300e-01   1.4071247e+00   3.1622777e-01   2.4879711e+00   5.4772256e-01   1.2529964e+00   1.7406895e+00   5.1961524e-01   4.7958315e-01   8.1240384e-01   1.6217275e+00   1.8894444e+00   2.7055499e+00   8.4261498e-01   6.4807407e-01   7.7459667e-01   2.2045408e+00   1.1135529e+00   8.3066239e-01   4.7958315e-01   1.2247449e+00   1.2124356e+00   1.2369317e+00   0.0000000e+00   1.4317821e+00   1.3747727e+00   1.0344080e+00   5.4772256e-01   7.7459667e-01   9.4868330e-01   3.3166248e-01   9.1104336e-01   6.1644140e-01   8.6023253e-01   2.6851443e+00   5.4772256e-01   7.1414284e-01   7.5498344e-01   1.0246951e+00   9.8994949e-01   5.0000000e-01   1.7406895e+00   1.5684387e+00   9.6436508e-01   7.8102497e-01   1.2845233e+00   1.2489996e+00   1.7378147e+00   4.0000000e-01   1.8165902e+00   1.0246951e+00   1.3490738e+00   5.3851648e-01   3.8729833e-01   1.4662878e+00   1.4456832e+00   7.8740079e-01   5.1961524e-01   4.5825757e-01   1.2409674e+00   7.9372539e-01   1.2961481e+00   1.3190906e+00   6.6332496e-01   9.8994949e-01   8.6602540e-01   1.5842980e+00   5.4772256e-01   5.9160798e-01   8.5440037e-01   1.5684387e+00   4.1231056e-01   6.7082039e-01   8.3066239e-01   1.3190906e+00   9.2736185e-01   1.1224972e+00   1.4730920e+00   5.0000000e-01   1.6703293e+00   1.8275667e+00   1.2206556e+00   6.0000000e-01   1.4282857e+00   6.4807407e-01   3.8729833e-01   6.0000000e-01   9.5916630e-01   9.3273791e-01   6.6332496e-01   2.4494897e-01   2.0346990e+00   1.9974984e+00   1.0148892e+00   8.4261498e-01   1.0148892e+00   1.7944358e+00   7.2801099e-01   6.4807407e-01   1.0295630e+00   8.1240384e-01   7.3484692e-01   3.3166248e-01   9.4868330e-01   1.2165525e+00   2.0124612e+00   4.2426407e-01   5.9160798e-01   5.3851648e-01   1.5716234e+00   7.8102497e-01   2.4494897e-01   8.6023253e-01   7.2801099e-01   7.4833148e-01   9.4868330e-01   7.4161985e-01   8.2462113e-01   9.0553851e-01   7.6157731e-01   7.2801099e-01   5.0000000e-01   7.4161985e-01   6.4807407e-01   1.3638182e+00   2.1794495e+00   1.0295630e+00   6.7082039e-01   1.0148892e+00   7.5498344e-01   6.6332496e-01   4.3588989e-01   1.2529964e+00   1.0295630e+00   5.5677644e-01   5.0000000e-01   1.7000000e+00   1.6792856e+00   1.4212670e+00   4.6904158e-01   1.3038405e+00   1.5264338e+00   1.0488088e+00   3.8729833e-01   8.5440037e-01   1.1357817e+00   1.0630146e+00   3.1622777e-01   9.2195445e-01   1.0148892e+00   1.7320508e+00   3.0000000e-01   1.0295630e+00   1.0000000e+00   1.2409674e+00   5.2915026e-01   5.1961524e-01   1.1874342e+00   5.8309519e-01   3.6055513e-01   8.1853528e-01   1.0770330e+00   3.8729833e-01   4.7958315e-01   6.4031242e-01   1.0099505e+00   6.3245553e-01   6.4807407e-01   1.0049876e+00   3.4799425e+00   5.2915026e-01   1.3379088e+00   9.6436508e-01   1.8734994e+00   1.8055470e+00   1.3601471e+00   2.5357445e+00   2.3706539e+00   1.7916473e+00   1.5842980e+00   8.1853528e-01   5.4772256e-01   2.4738634e+00   1.1747340e+00   2.6343880e+00   2.6457513e-01   2.1817424e+00   1.3076697e+00   8.0622577e-01   2.3086793e+00   2.2869193e+00   1.5748016e+00   1.0246951e+00   6.0827625e-01   8.8317609e-01   1.5779734e+00   2.0832667e+00   1.9748418e+00   5.4772256e-01   1.7146428e+00   1.6583124e+00   2.4269322e+00   1.3928388e+00   1.3820275e+00   1.6703293e+00   2.3706539e+00   1.1000000e+00   1.3674794e+00   1.6763055e+00   2.1307276e+00   1.7832555e+00   1.8973666e+00   2.2869193e+00   3.0282008e+00   2.2226111e+00   3.1144823e+00   1.8708287e+00   1.7233688e+00   2.2405357e+00   9.8994949e-01   1.3228757e+00   1.9339080e+00   1.9544820e+00   3.8236109e+00   3.7376463e+00   1.2609520e+00   2.5079872e+00   9.1104336e-01   3.5860842e+00   1.4730920e+00   2.3409400e+00   2.8354894e+00   1.3711309e+00   1.3638182e+00   1.9261360e+00   2.6907248e+00   2.9899833e+00   3.7934153e+00   1.9493589e+00   1.5652476e+00   1.6583124e+00   3.3181320e+00   2.1142375e+00   1.9026298e+00   1.2489996e+00   2.3086793e+00   2.3021729e+00   2.2538855e+00   1.1180340e+00   2.5337719e+00   2.4413111e+00   2.0832667e+00   1.5000000e+00   1.8411953e+00   1.9157244e+00   1.2727922e+00   8.7749644e-01   1.0148892e+00   1.4866069e+00   1.3638182e+00   9.9498744e-01   2.1095023e+00   2.0149442e+00   1.4662878e+00   1.1357817e+00   1.1357817e+00   9.2736185e-01   1.9899749e+00   9.2736185e-01   2.2135944e+00   6.0827625e-01   1.7320508e+00   9.8488578e-01   4.3588989e-01   1.8627936e+00   1.8466185e+00   1.1832160e+00   5.5677644e-01   2.6457513e-01   1.1045361e+00   1.2124356e+00   1.5937377e+00   1.4764823e+00   6.7823300e-01   1.4491377e+00   1.2206556e+00   1.9874607e+00   1.0488088e+00   1.1180340e+00   1.3747727e+00   1.9339080e+00   8.6602540e-01   1.1704700e+00   1.3527749e+00   1.6911535e+00   1.3784049e+00   1.5874508e+00   1.8466185e+00   1.4282857e+00   1.0295630e+00   6.2449980e-01   6.6332496e-01   1.2961481e+00   1.3228757e+00   1.0392305e+00   6.1644140e-01   1.9131126e+00   1.5716234e+00   1.1445523e+00   8.8881944e-01   1.4662878e+00   1.3928388e+00   1.0049876e+00   8.6023253e-01   8.8317609e-01   1.1575837e+00   1.1916375e+00   5.5677644e-01   7.3484692e-01   8.2462113e-01   1.8788294e+00   6.1644140e-01   9.1104336e-01   7.5498344e-01   1.2609520e+00   1.1704700e+00   7.3484692e-01   1.3190906e+00   8.0622577e-01   8.7177979e-01   1.0677078e+00   1.1618950e+00   8.7177979e-01   1.0677078e+00   9.2736185e-01   9.0000000e-01   8.3066239e-01   1.2124356e+00   1.1747340e+00   1.3784049e+00   1.5652476e+00   1.0198039e+00   2.2181073e+00   1.9000000e+00   1.2165525e+00   1.3038405e+00   8.6023253e-01   1.3892444e+00   2.3685439e+00   6.7082039e-01   2.2113344e+00   1.2247449e+00   1.8841444e+00   8.1240384e-01   8.1240384e-01   1.9544820e+00   1.8708287e+00   1.3000000e+00   1.1224972e+00   1.0198039e+00   9.3273791e-01   1.2727922e+00   1.8574176e+00   1.9157244e+00   8.0622577e-01   1.0535654e+00   1.3190906e+00   1.9949937e+00   9.9498744e-01   8.7177979e-01   1.1747340e+00   2.0322401e+00   6.3245553e-01   7.0710678e-01   1.2083046e+00   1.8947295e+00   1.3820275e+00   1.2529964e+00   1.8814888e+00   5.5677644e-01   5.4772256e-01   1.0677078e+00   9.0000000e-01   3.7416574e-01   4.8989795e-01   2.0976177e+00   2.2649503e+00   1.2288206e+00   7.8102497e-01   1.0049876e+00   2.0396078e+00   6.0827625e-01   6.4807407e-01   1.1575837e+00   6.1644140e-01   5.2915026e-01   6.5574385e-01   1.0862780e+00   1.4071247e+00   2.0024984e+00   6.7823300e-01   6.7082039e-01   1.0630146e+00   1.6031220e+00   7.0000000e-01   4.6904158e-01   6.4807407e-01   5.1961524e-01   6.7823300e-01   5.0990195e-01   8.6602540e-01   9.0553851e-01   8.1240384e-01   4.2426407e-01   7.4161985e-01   2.2360680e-01   5.5677644e-01   6.6332496e-01   5.7445626e-01   7.9372539e-01   8.1240384e-01   6.4031242e-01   3.8729833e-01   2.2248595e+00   2.1023796e+00   8.1240384e-01   9.0553851e-01   9.0553851e-01   1.9157244e+00   4.2426407e-01   8.0622577e-01   1.1789826e+00   5.5677644e-01   5.9160798e-01   3.7416574e-01   1.0344080e+00   1.2845233e+00   2.1633308e+00   4.3588989e-01   4.6904158e-01   6.6332496e-01   1.6062378e+00   9.1651514e-01   4.5825757e-01   7.1414284e-01   6.7823300e-01   7.6811457e-01   7.8102497e-01   6.3245553e-01   9.6436508e-01   9.8488578e-01   5.9160798e-01   3.7416574e-01   3.4641016e-01   8.3666003e-01   6.2449980e-01   1.3114877e+00   1.1357817e+00   5.2915026e-01   4.2426407e-01   1.7029386e+00   1.7233688e+00   1.3747727e+00   3.6055513e-01   1.3601471e+00   1.5165751e+00   8.8881944e-01   3.7416574e-01   7.3484692e-01   9.8994949e-01   9.6953597e-01   4.5825757e-01   7.0710678e-01   8.9442719e-01   1.6340135e+00   4.6904158e-01   9.0000000e-01   1.0723805e+00   1.1000000e+00   7.1414284e-01   5.0990195e-01   1.1045361e+00   1.7320508e-01   3.4641016e-01   4.6904158e-01   1.1357817e+00   4.8989795e-01   5.4772256e-01   3.7416574e-01   8.8881944e-01   4.3588989e-01   7.5498344e-01   1.0295630e+00   5.1961524e-01   1.0770330e+00   1.0862780e+00   2.9359837e+00   2.7766887e+00   6.5574385e-01   1.5842980e+00   3.3166248e-01   2.6419690e+00   6.7082039e-01   1.4628739e+00   1.9442222e+00   6.4807407e-01   6.7823300e-01   9.7467943e-01   1.8165902e+00   2.0493902e+00   2.9137605e+00   9.8994949e-01   8.4261498e-01   9.4339811e-01   2.3558438e+00   1.3000000e+00   1.0677078e+00   6.4807407e-01   1.4035669e+00   1.3711309e+00   1.3784049e+00   2.6457513e-01   1.6124515e+00   1.5427249e+00   1.1747340e+00   6.0827625e-01   9.6436508e-01   1.1445523e+00   5.8309519e-01   7.5498344e-01   1.0246951e+00   2.6851443e+00   2.6267851e+00   1.1045361e+00   1.3190906e+00   4.8989795e-01   2.5159491e+00   8.1240384e-01   1.2288206e+00   1.8138357e+00   7.8102497e-01   7.2801099e-01   8.3666003e-01   1.7691806e+00   1.9519221e+00   2.6944387e+00   8.0622577e-01   1.0295630e+00   1.1747340e+00   2.1587033e+00   9.2736185e-01   9.8488578e-01   7.2801099e-01   1.2165525e+00   1.0723805e+00   1.1445523e+00   5.0990195e-01   1.3453624e+00   1.1958261e+00   9.3273791e-01   7.7459667e-01   8.3666003e-01   7.8740079e-01   6.4031242e-01   5.8309519e-01   2.0049938e+00   2.1470911e+00   1.3747727e+00   6.4031242e-01   1.0246951e+00   1.9748418e+00   8.1853528e-01   5.4772256e-01   1.1747340e+00   8.3666003e-01   7.3484692e-01   5.3851648e-01   1.1916375e+00   1.4000000e+00   1.9773720e+00   5.0990195e-01   9.2195445e-01   1.1618950e+00   1.5394804e+00   3.8729833e-01   5.4772256e-01   8.3666003e-01   5.5677644e-01   4.4721360e-01   5.4772256e-01   9.0000000e-01   7.2111026e-01   5.4772256e-01   3.7416574e-01   8.6602540e-01   3.8729833e-01   3.0000000e-01   7.6157731e-01   1.9183326e+00   1.9519221e+00   1.1090537e+00   7.0000000e-01   1.1180340e+00   1.7204651e+00   7.0000000e-01   5.0990195e-01   8.8317609e-01   7.8740079e-01   7.2111026e-01   3.8729833e-01   7.8740079e-01   1.1045361e+00   1.8574176e+00   4.6904158e-01   5.7445626e-01   7.0000000e-01   1.4317821e+00   7.5498344e-01   1.4142136e-01   8.6023253e-01   5.1961524e-01   6.4807407e-01   7.6157731e-01   8.6602540e-01   7.3484692e-01   8.1240384e-01   6.1644140e-01   7.4161985e-01   3.6055513e-01   7.1414284e-01   7.2111026e-01   1.2206556e+00   2.9715316e+00   1.4177447e+00   2.9478806e+00   1.0198039e+00   2.5632011e+00   1.5033296e+00   1.1224972e+00   2.6495283e+00   2.5690465e+00   1.9773720e+00   1.4352700e+00   1.2409674e+00   4.1231056e-01   1.9748418e+00   2.4515301e+00   2.4186773e+00   1.0049876e+00   1.8357560e+00   1.9442222e+00   2.7018512e+00   1.6822604e+00   1.6552945e+00   1.9235384e+00   2.7331301e+00   1.3490738e+00   1.5297059e+00   1.9748418e+00   2.5748786e+00   2.0904545e+00   2.0273135e+00   2.5690465e+00   2.7018512e+00   1.5620499e+00   2.9223278e+00   4.1231056e-01   2.4939928e+00   1.7233688e+00   1.2922848e+00   2.6362853e+00   2.6400758e+00   1.8601075e+00   1.4525839e+00   9.6436508e-01   1.3490738e+00   1.8520259e+00   2.4248711e+00   2.2494444e+00   8.9442719e-01   2.0736441e+00   2.0371549e+00   2.7766887e+00   1.7832555e+00   1.7175564e+00   2.0322401e+00   2.6495283e+00   1.4730920e+00   1.7233688e+00   2.0124612e+00   2.3958297e+00   2.1400935e+00   2.2671568e+00   2.6248809e+00   1.7146428e+00   8.8317609e-01   2.5278449e+00   6.6332496e-01   1.5968719e+00   1.8788294e+00   7.2801099e-01   8.6602540e-01   1.1135529e+00   1.6522712e+00   1.9209373e+00   2.8948230e+00   1.1704700e+00   6.7823300e-01   7.3484692e-01   2.3194827e+00   1.6431677e+00   1.1445523e+00   8.7749644e-01   1.4628739e+00   1.5716234e+00   1.5066519e+00   6.7823300e-01   1.7578396e+00   1.7860571e+00   1.3453624e+00   5.8309519e-01   1.0862780e+00   1.5099669e+00   8.6602540e-01   1.6062378e+00   1.3747727e+00   1.2247449e+00   3.0000000e-01   6.5574385e-01   1.3076697e+00   1.2529964e+00   6.7823300e-01   7.9372539e-01   8.5440037e-01   1.3928388e+00   6.5574385e-01   1.2328828e+00   1.3490738e+00   9.1651514e-01   6.4807407e-01   7.4161985e-01   1.3820275e+00   3.7416574e-01   2.6457513e-01   6.0827625e-01   1.4071247e+00   2.2360680e-01   3.0000000e-01   5.7445626e-01   1.2247449e+00   7.3484692e-01   7.8740079e-01   1.2845233e+00   2.7658633e+00   7.3484692e-01   1.4525839e+00   1.9924859e+00   6.4031242e-01   5.7445626e-01   1.0677078e+00   1.8894444e+00   2.1656408e+00   2.9223278e+00   1.0816654e+00   8.8317609e-01   1.0677078e+00   2.4454039e+00   1.2247449e+00   1.0630146e+00   5.0000000e-01   1.4282857e+00   1.3964240e+00   1.3820275e+00   3.1622777e-01   1.6401219e+00   1.5329710e+00   1.1958261e+00   7.7459667e-01   9.6953597e-01   1.0295630e+00   4.5825757e-01   2.2912878e+00   1.5033296e+00   9.6953597e-01   2.4289916e+00   2.4248711e+00   1.7058722e+00   1.1224972e+00   6.7823300e-01   1.0630146e+00   1.7146428e+00   2.1840330e+00   2.0420578e+00   7.0000000e-01   1.9209373e+00   1.8055470e+00   2.5651511e+00   1.5588457e+00   1.5684387e+00   1.8384776e+00   2.4879711e+00   1.3038405e+00   1.5811388e+00   1.8384776e+00   2.2248595e+00   1.9313208e+00   2.0952327e+00   2.4248711e+00   1.1180340e+00   1.5066519e+00   1.7320508e-01   3.6055513e-01   7.7459667e-01   1.3228757e+00   1.6340135e+00   2.4617067e+00   8.1853528e-01   3.7416574e-01   8.3666003e-01   1.9339080e+00   1.1575837e+00   7.2801099e-01   4.3588989e-01   9.2736185e-01   1.0816654e+00   9.0000000e-01   5.4772256e-01   1.3228757e+00   1.2845233e+00   7.6811457e-01   2.4494897e-01   5.0990195e-01   1.0000000e+00   5.3851648e-01   6.6332496e-01   1.1832160e+00   1.0862780e+00   5.9160798e-01   7.7459667e-01   9.6953597e-01   1.4798649e+00   6.0000000e-01   1.0630146e+00   1.1618950e+00   1.1357817e+00   5.1961524e-01   5.0990195e-01   1.2165525e+00   4.1231056e-01   3.7416574e-01   6.9282032e-01   1.2529964e+00   3.1622777e-01   4.0000000e-01   6.1644140e-01   1.1532563e+00   6.2449980e-01   6.2449980e-01   1.0862780e+00   1.6124515e+00   1.5684387e+00   1.0246951e+00   3.4641016e-01   4.6904158e-01   1.0246951e+00   1.0583005e+00   1.3674794e+00   1.3747727e+00   7.4161985e-01   1.1704700e+00   9.4868330e-01   1.7088007e+00   7.4161985e-01   8.8317609e-01   1.0770330e+00   1.7406895e+00   6.4807407e-01   9.1651514e-01   1.0862780e+00   1.5198684e+00   1.1000000e+00   1.2845233e+00   1.5937377e+00   2.4494897e-01   8.7749644e-01   1.4422205e+00   1.7720045e+00   2.5475478e+00   9.1651514e-01   4.3588989e-01   9.2195445e-01   2.0566964e+00   1.1704700e+00   7.8740079e-01   2.8284271e-01   1.0148892e+00   1.1575837e+00   9.5916630e-01   5.1961524e-01   1.4071247e+00   1.3416408e+00   8.3666003e-01   3.8729833e-01   5.7445626e-01   9.8488578e-01   4.6904158e-01   8.4261498e-01   1.4352700e+00   1.7832555e+00   2.4839485e+00   8.8317609e-01   4.5825757e-01   9.0000000e-01   2.0615528e+00   1.0246951e+00   6.7823300e-01   1.4142136e-01   9.9498744e-01   1.1045361e+00   9.6953597e-01   4.7958315e-01   1.3341664e+00   1.2569805e+00   8.3666003e-01   5.5677644e-01   5.3851648e-01   8.1853528e-01   2.8284271e-01   9.8488578e-01   1.1357817e+00   1.9748418e+00   1.0000000e-01   7.8740079e-01   7.8740079e-01   1.4212670e+00   6.7823300e-01   4.3588989e-01   9.6436508e-01   6.1644140e-01   5.1961524e-01   7.9372539e-01   8.1240384e-01   6.7082039e-01   7.1414284e-01   5.7445626e-01   7.0710678e-01   4.6904158e-01   6.9282032e-01   7.9372539e-01   5.0990195e-01   1.2845233e+00   1.0392305e+00   1.1618950e+00   1.2041595e+00   9.1104336e-01   1.2845233e+00   8.8317609e-01   1.5748016e+00   7.1414284e-01   9.6953597e-01   1.0392305e+00   1.6217275e+00   8.3666003e-01   1.0770330e+00   1.0488088e+00   1.3379088e+00   1.0049876e+00   1.3453624e+00   1.4899664e+00   1.1618950e+00   1.1575837e+00   1.5394804e+00   1.4933185e+00   5.3851648e-01   1.4387495e+00   1.2083046e+00   1.9235384e+00   9.3273791e-01   1.0392305e+00   1.2247449e+00   1.8894444e+00   8.4852814e-01   1.1224972e+00   1.2247449e+00   1.5842980e+00   1.2922848e+00   1.5652476e+00   1.8165902e+00   1.9824228e+00   2.3452079e+00   2.3832751e+00   9.2736185e-01   1.8761663e+00   1.8947295e+00   2.6172505e+00   1.5811388e+00   1.6522712e+00   1.8083141e+00   2.7055499e+00   1.3820275e+00   1.5588457e+00   1.9000000e+00   2.4939928e+00   2.0099751e+00   2.0346990e+00   2.5238859e+00   8.6602540e-01   8.7749644e-01   1.4106736e+00   6.4031242e-01   5.0990195e-01   1.0000000e+00   6.2449980e-01   4.6904158e-01   7.7459667e-01   8.4261498e-01   6.4807407e-01   6.6332496e-01   5.4772256e-01   7.4161985e-01   5.0000000e-01   6.7082039e-01   8.3666003e-01   5.8309519e-01   1.9078784e+00   1.1916375e+00   5.9160798e-01   5.5677644e-01   9.4868330e-01   1.1445523e+00   1.0440307e+00   6.4807407e-01   1.3000000e+00   1.3304135e+00   9.2195445e-01   5.0990195e-01   5.8309519e-01   1.0488088e+00   5.3851648e-01   1.9442222e+00   1.2961481e+00   7.1414284e-01   9.8488578e-01   1.1916375e+00   1.2688578e+00   1.3964240e+00   7.7459667e-01   1.3228757e+00   1.4387495e+00   1.2206556e+00   8.1240384e-01   9.1651514e-01   1.2247449e+00   7.8102497e-01   1.5427249e+00   1.5198684e+00   2.1977261e+00   1.0862780e+00   1.1269428e+00   1.2845233e+00   2.2045408e+00   9.4339811e-01   1.1357817e+00   1.3453624e+00   1.8920888e+00   1.5297059e+00   1.7029386e+00   2.1189620e+00   6.8556546e-01   1.1180340e+00   7.6157731e-01   5.0000000e-01   8.4261498e-01   1.1135529e+00   6.2449980e-01   4.3588989e-01   7.0000000e-01   1.1916375e+00   7.2111026e-01   2.4494897e-01   9.6436508e-01   8.1240384e-01   5.9160798e-01   6.7823300e-01   8.1240384e-01   8.3066239e-01   7.6157731e-01   8.1240384e-01   6.6332496e-01   7.9372539e-01   3.8729833e-01   6.2449980e-01   6.4807407e-01   1.1269428e+00   1.2247449e+00   1.0770330e+00   4.7958315e-01   1.4628739e+00   1.3711309e+00   9.4868330e-01   6.2449980e-01   6.7082039e-01   9.0000000e-01   3.1622777e-01   4.1231056e-01   3.6055513e-01   1.2247449e+00   5.5677644e-01   5.7445626e-01   3.6055513e-01   9.5916630e-01   4.6904158e-01   7.8740079e-01   1.0908712e+00   5.4772256e-01   1.2124356e+00   3.4641016e-01   2.4494897e-01   4.2426407e-01   1.0630146e+00   6.0827625e-01   6.2449980e-01   1.1224972e+00   1.2369317e+00   8.1240384e-01   6.9282032e-01   2.4494897e-01   9.4339811e-01   5.1961524e-01   8.1853528e-01   1.1224972e+00   1.4317821e+00   1.3747727e+00   1.0344080e+00   5.4772256e-01   7.7459667e-01   9.4868330e-01   3.3166248e-01   3.1622777e-01   7.3484692e-01   1.3076697e+00   8.4261498e-01   8.0622577e-01   1.3190906e+00   6.1644140e-01   1.2845233e+00   7.9372539e-01   6.2449980e-01   1.2569805e+00   7.8102497e-01   3.6055513e-01   6.7082039e-01   9.4868330e-01   5.8309519e-01   1.0677078e+00   6.5574385e-01   6.1644140e-01   6.4031242e-01   7.6811457e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml.txt
new file mode 100644
index 00000000..1b755202
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-euclidean-ml.txt
@@ -0,0 +1 @@
+   4.0515260e+00   4.2121458e+00   3.7357405e+00   4.2313317e+00   3.9136009e+00   4.3843298e+00   3.9811426e+00   4.3624182e+00   4.0642508e+00   4.2105933e+00   4.0747226e+00   3.9068586e+00   4.1637004e+00   4.4303203e+00   4.1841564e+00   4.1063279e+00   4.1862390e+00   4.0719925e+00   4.2227579e+00   4.3173531e+00   3.8811067e+00   3.7577567e+00   4.0623722e+00   3.9882453e+00   4.0432671e+00   3.9085109e+00   4.0283414e+00   4.0846110e+00   3.6459235e+00   3.9544001e+00   4.1134244e+00   4.1805752e+00   3.5121011e+00   4.2747789e+00   4.1048323e+00   3.9269426e+00   3.8932032e+00   3.8281172e+00   3.7288430e+00   4.0863477e+00   4.1527428e+00   4.1646409e+00   4.2027433e+00   3.8441594e+00   4.8419117e+00   4.2455384e+00   3.7622220e+00   4.3967923e+00   4.4663183e+00   4.0435853e+00   4.0421692e+00   4.3124625e+00   4.6499961e+00   4.5595743e+00   3.4230430e+00   4.2612266e+00   3.5676603e+00   4.0866580e+00   4.2307103e+00   3.8521940e+00   3.9951183e+00   3.1022409e+00   3.7290193e+00   4.1931517e+00   4.1127027e+00   3.6633651e+00   4.0235815e+00   3.9729858e+00   4.1980132e+00   4.1579993e+00   3.9948955e+00   3.9081966e+00   3.9031152e+00   3.5069036e+00   4.0015727e+00   3.6763496e+00   3.6614339e+00   3.6227109e+00   3.7357992e+00   4.0170026e+00   3.5216829e+00   3.9322227e+00   3.9094621e+00   4.0170286e+00   4.3264246e+00   4.3435483e+00   4.0788635e+00   4.4761765e+00   3.8468186e+00   4.1490333e+00   4.2800007e+00   4.2260191e+00   4.3031858e+00   4.1897413e+00   4.0530244e+00   3.5893641e+00   4.2186615e+00   3.7979503e+00   4.0915473e+00   4.1343073e+00   4.5063851e+00   3.6394889e+00   4.2508448e+00   3.7160826e+00   4.0105262e+00   4.1578269e+00   4.0290590e+00   3.6971819e+00   3.9414087e+00   4.2522313e+00   4.4091714e+00   4.1542292e+00   3.9594691e+00   4.0923600e+00   4.0855497e+00   3.8253075e+00   4.3034717e+00   4.0976731e+00   4.1316523e+00   4.0872717e+00   4.2643353e+00   3.8887280e+00   3.9411273e+00   3.8848001e+00   4.3481996e+00   3.8716733e+00   3.9084684e+00   3.7546361e+00   3.9354816e+00   3.8293694e+00   3.7568515e+00   3.7184961e+00   3.8404278e+00   4.2570811e+00   4.1423777e+00   4.0291411e+00   4.2094682e+00   3.6127418e+00   4.0459839e+00   3.7737985e+00   3.7647653e+00   3.9762006e+00   3.8999512e+00   3.8509090e+00   3.8975941e+00   3.8432839e+00   4.2109046e+00   4.1339124e+00   3.5898873e+00   4.0794519e+00   4.3504966e+00   3.8862612e+00   3.8332931e+00   4.2190310e+00   4.1366595e+00   3.7220268e+00   4.1250795e+00   3.3169452e+00   4.0757181e+00   3.6487114e+00   3.9513724e+00   4.0735549e+00   3.9137880e+00   3.9656942e+00   3.7724953e+00   4.0505153e+00   3.9062302e+00   4.5671852e+00   3.7542175e+00   4.3731708e+00   3.6733907e+00   4.4667545e+00   4.1004635e+00   4.0530038e+00   4.0346958e+00   4.2145752e+00   4.4298637e+00   4.2982360e+00   4.0878239e+00   4.4061563e+00   4.2115971e+00   3.8263277e+00   3.8603258e+00   3.8572375e+00   4.1051910e+00   4.3787786e+00   4.5309659e+00   4.0047055e+00   4.1308854e+00   3.6283561e+00
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-hamming-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-hamming-ml.txt
new file mode 100644
index 00000000..bc4e1ddc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-hamming-ml.txt
@@ -0,0 +1 @@
+   4.6000000e-01   4.3000000e-01   4.3000000e-01   5.4000000e-01   4.1000000e-01   5.3000000e-01   4.3000000e-01   5.9000000e-01   4.8000000e-01   4.7000000e-01   4.6000000e-01   4.9000000e-01   4.5000000e-01   5.5000000e-01   5.3000000e-01   4.5000000e-01   4.8000000e-01   4.7000000e-01   4.8000000e-01   5.1000000e-01   4.9000000e-01   4.4000000e-01   4.9000000e-01   4.7000000e-01   4.9000000e-01   4.7000000e-01   5.2000000e-01   4.7000000e-01   4.2000000e-01   4.9000000e-01   4.7000000e-01   5.5000000e-01   3.9000000e-01   5.5000000e-01   4.6000000e-01   4.5000000e-01   4.0000000e-01   4.8000000e-01   4.5000000e-01   4.8000000e-01   4.8000000e-01   5.0000000e-01   4.8000000e-01   4.5000000e-01   6.4000000e-01   5.7000000e-01   4.6000000e-01   5.4000000e-01   5.6000000e-01   4.8000000e-01   4.8000000e-01   5.3000000e-01   5.4000000e-01   5.3000000e-01   4.5000000e-01   5.8000000e-01   4.2000000e-01   5.4000000e-01   6.0000000e-01   5.1000000e-01   4.6000000e-01   4.1000000e-01   4.4000000e-01   5.6000000e-01   5.4000000e-01   4.8000000e-01   4.8000000e-01   5.1000000e-01   5.2000000e-01   5.5000000e-01   4.5000000e-01   4.3000000e-01   4.7000000e-01   4.7000000e-01   5.6000000e-01   4.9000000e-01   4.8000000e-01   4.5000000e-01   4.9000000e-01   4.7000000e-01   4.5000000e-01   4.5000000e-01   5.6000000e-01   4.9000000e-01   5.8000000e-01   5.4000000e-01   4.6000000e-01   5.8000000e-01   5.3000000e-01   5.4000000e-01   5.5000000e-01   5.0000000e-01   5.2000000e-01   4.8000000e-01   5.0000000e-01   3.8000000e-01   5.3000000e-01   4.8000000e-01   5.1000000e-01   4.8000000e-01   5.2000000e-01   4.7000000e-01   5.0000000e-01   4.3000000e-01   4.8000000e-01   5.2000000e-01   5.0000000e-01   4.2000000e-01   4.2000000e-01   4.7000000e-01   5.4000000e-01   5.1000000e-01   5.4000000e-01   5.1000000e-01   4.8000000e-01   4.7000000e-01   5.2000000e-01   5.2000000e-01   5.4000000e-01   5.4000000e-01   5.0000000e-01   4.5000000e-01   4.4000000e-01   4.1000000e-01   5.7000000e-01   4.6000000e-01   5.1000000e-01   5.2000000e-01   5.0000000e-01   4.8000000e-01   5.0000000e-01   4.4000000e-01   5.3000000e-01   5.2000000e-01   4.9000000e-01   5.7000000e-01   5.8000000e-01   4.9000000e-01   5.1000000e-01   4.5000000e-01   5.3000000e-01   4.5000000e-01   4.4000000e-01   3.5000000e-01   4.2000000e-01   5.3000000e-01   5.2000000e-01   5.0000000e-01   3.8000000e-01   5.2000000e-01   5.6000000e-01   4.7000000e-01   4.4000000e-01   5.1000000e-01   5.7000000e-01   4.5000000e-01   5.7000000e-01   4.3000000e-01   5.1000000e-01   3.8000000e-01   5.3000000e-01   4.8000000e-01   4.4000000e-01   5.0000000e-01   4.8000000e-01   5.0000000e-01   4.7000000e-01   6.4000000e-01   4.9000000e-01   5.2000000e-01   4.8000000e-01   5.6000000e-01   4.3000000e-01   4.8000000e-01   4.7000000e-01   6.0000000e-01   5.4000000e-01   5.5000000e-01   4.0000000e-01   5.5000000e-01   5.6000000e-01   4.9000000e-01   5.0000000e-01   4.3000000e-01   5.7000000e-01   5.0000000e-01   5.7000000e-01   4.9000000e-01   4.2000000e-01   3.9000000e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jaccard-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jaccard-ml.txt
new file mode 100644
index 00000000..a7570d8c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jaccard-ml.txt
@@ -0,0 +1 @@
+   6.5714286e-01   6.0563380e-01   6.3235294e-01   7.3972603e-01   6.0294118e-01   7.3611111e-01   6.4179104e-01   7.7631579e-01   6.4000000e-01   6.6197183e-01   6.6666667e-01   7.0000000e-01   6.4285714e-01   7.7464789e-01   7.1621622e-01   6.4285714e-01   6.8571429e-01   6.4383562e-01   6.6666667e-01   6.5384615e-01   6.6216216e-01   6.1971831e-01   6.5333333e-01   6.5277778e-01   6.7123288e-01   6.4383562e-01   6.5000000e-01   6.3513514e-01   6.0000000e-01   6.7123288e-01   6.3513514e-01   7.4324324e-01   5.5714286e-01   7.0512821e-01   6.3888889e-01   6.0000000e-01   5.6338028e-01   6.3157895e-01   6.0810811e-01   6.2337662e-01   6.4000000e-01   6.5789474e-01   6.3157895e-01   5.6962025e-01   7.5294118e-01   7.1250000e-01   6.2162162e-01   6.7500000e-01   7.2727273e-01   6.2337662e-01   6.2337662e-01   6.7948718e-01   6.5853659e-01   6.6250000e-01   6.3380282e-01   7.3417722e-01   6.0869565e-01   7.2000000e-01   7.5949367e-01   6.4556962e-01   6.3013699e-01   5.9420290e-01   6.2857143e-01   7.1794872e-01   7.3972603e-01   6.4864865e-01   6.4864865e-01   6.8918919e-01   6.6666667e-01   7.0512821e-01   6.2500000e-01   6.2318841e-01   6.6197183e-01   6.5277778e-01   6.9135802e-01   6.6216216e-01   6.6666667e-01   6.4285714e-01   6.6216216e-01   6.8115942e-01   6.2500000e-01   6.2500000e-01   7.3684211e-01   6.4473684e-01   7.3417722e-01   7.1052632e-01   6.3888889e-01   7.3417722e-01   6.5432099e-01   6.9230769e-01   7.1428571e-01   6.7567568e-01   6.7532468e-01   6.7605634e-01   6.5789474e-01   5.4285714e-01   6.9736842e-01   6.2337662e-01   6.6233766e-01   6.7605634e-01   7.0270270e-01   6.1842105e-01   6.7567568e-01   6.2318841e-01   6.7605634e-01   6.9333333e-01   7.1428571e-01   6.0000000e-01   6.0000000e-01   6.6197183e-01   6.9230769e-01   6.8000000e-01   7.2000000e-01   6.5384615e-01   6.5753425e-01   6.6197183e-01   7.1232877e-01   6.9333333e-01   7.5000000e-01   7.1052632e-01   6.7567568e-01   6.4285714e-01   6.0273973e-01   5.8571429e-01   6.9512195e-01   6.3013699e-01   6.8918919e-01   7.0270270e-01   6.6666667e-01   6.8571429e-01   6.6666667e-01   6.1111111e-01   7.0666667e-01   6.6666667e-01   6.5333333e-01   6.8674699e-01   7.0731707e-01   6.3636364e-01   6.3750000e-01   6.1643836e-01   6.5432099e-01   5.8441558e-01   5.8666667e-01   4.7297297e-01   5.5263158e-01   6.9736842e-01   6.9333333e-01   6.5789474e-01   5.7575758e-01   6.7532468e-01   7.0886076e-01   6.4383562e-01   5.8666667e-01   6.6233766e-01   7.5000000e-01   6.2500000e-01   7.7027027e-01   6.0563380e-01   6.8000000e-01   5.6716418e-01   6.7948718e-01   6.4864865e-01   6.1971831e-01   7.1428571e-01   6.5753425e-01   6.7567568e-01   6.6197183e-01   7.7108434e-01   6.6216216e-01   7.1232877e-01   6.4000000e-01   7.0886076e-01   6.0563380e-01   6.2337662e-01   6.2666667e-01   7.7922078e-01   7.2972973e-01   7.5342466e-01   5.7971014e-01   7.3333333e-01   7.0886076e-01   6.6216216e-01   6.4102564e-01   5.8904110e-01   7.3076923e-01   6.4102564e-01   7.1250000e-01   6.4473684e-01   5.9154930e-01   5.3424658e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt
new file mode 100644
index 00000000..da698cf5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt
@@ -0,0 +1 @@
+0.0211635609063 0.00455072769716 0.0230610904531 0.0076674324982 0.037571216894 0.029561354778 0.0115281186735 0.0225809070507 0.0346801442638 0.00321049176948 0.0232839774828 0.0321124517082 0.0244179197971 0.0331466156799 0.0373949302575 0.0411984503375 0.0218945865519 0.0198268474453 0.02395616278 0.0254898420418 0.0394901943037 0.0396613298853 0.0621120626322 0.0458316817045 0.0334832834948 0.0445794256135 0.00775602650151 0.00770188279153 0.0277044809709 0.0292851269343 0.0408206135002 0.0384837880405 0.0229145225178 0.0346801442638 0.017155900548 0.0186865079856 0.0346801442638 0.011778601551 0.0109570078484 0.0244803928224 0.0609237090857 0.0125274661674 0.0713079215249 0.05072181848 0.0329702193648 0.018118079192 0.0129441131729 0.00461235131171 0.0077204189151 0.190781073622 0.196235539354 0.202381067744 0.215567934651 0.208951210729 0.208395222197 0.202977772245 0.185995150024 0.197488445796 0.207754344906 0.207231776416 0.200895721783 0.202370257926 0.208839694602 0.184151316466 0.18979501874 0.211864590783 0.187854399445 0.231134943838 0.195836809821 0.220082308081 0.191451897661 0.227329222402 0.204586412278 0.192443163994 0.193684928543 0.206841791869 0.21612354251 0.209268704489 0.175345617131 0.197941218478 0.190625337098 0.191065854858 0.2317867516 0.214902945463 0.200203629275 0.200116177839 0.216845735492 0.194043462589 0.208271280018 0.210985756639 0.203659220099 0.197020978632 0.188475437149 0.205376756706 0.190989482965 0.1980172695 0.194499957306 0.176357663314 0.198492265793 0.255232551639 0.244270007892 0.240469208946 0.23837051036 0.248414598574 0.246817944636 0.244919131711 0.239985639856 0.249741998486 0.238825041235 0.224881988223 0.240479022193 0.236919216915 0.253134884385 0.257281336145 0.239143010746 0.231252341207 0.230464321684 0.26688757037 0.243702807017 0.239741931201 0.243722504108 0.250777226737 0.230838149152 0.233779458319 0.227365052742 0.226996712309 0.224529623997 0.248876761269 0.223504306637 0.241222705588 0.218359591571 0.251783757155 0.22178058433 0.237922879169 0.243259140467 0.243757719218 0.229784431814 0.223834512884 0.231417699032 0.245855002542 0.23260880661 0.244270007892 0.24412721812 0.245364556958 0.239069534798 0.242476824971 0.231950344971 0.238821966724 0.23119389821 0.0191037071088 0.0192689125135 0.0279599225292 0.0400056720024 0.0369973592153 0.0168401225107 0.0156647141811 0.0317569365649 0.0209196472407 0.0261095572505 0.0295911292198 0.0387034711929 0.047943015361 0.0502801086558 0.0505076233319 0.0276985510653 0.0178344200842 0.0354647529806 0.0132311775272 0.0439058315568 0.0587698655317 0.0548749800113 0.0420211683289 0.0158475875817 0.0410205075223 0.0159821398213 0.0151652303532 0.0238719428054 0.0190485214781 0.0351129659726 0.0545720530766 0.0439213935019 0.0317569365649 0.0241815879409 0.0259117209292 0.0317569365649 0.0173211662353 0.0138864344566 0.0338981856531 0.0429537854169 0.027658485714 0.0691172321094 0.0500059403041 0.0236011037029 0.0316644090037 0.0201837094834 0.0228863104939 0.0134861623597 0.176517909274 0.183281450302 0.188265462996 0.20119658342 0.194697326208 0.195209942657 0.190674274015 0.172425697578 0.182894065786 0.195529592383 0.192026007893 0.188346396823 0.186051479032 0.19538200246 0.171605310091 0.175766658214 0.199864266754 0.173408334646 0.215875435175 0.181358449055 0.208526316653 0.177419239992 0.212710197663 0.190468934035 0.178153770213 0.179556589614 0.19192211318 0.202468559318 0.196181327665 0.160572766746 0.183327527516 0.175701303909 0.177066663037 0.218355819513 0.203310616701 0.188843124957 0.186323930321 0.201194011774 0.18173319992 0.194510318604 0.197386404553 0.190483994401 0.182718294182 0.174255518011 0.19205782762 0.178239088983 0.18513760318 0.180597338326 0.163063102971 0.185284857996 0.243887274146 0.231624187925 0.22689829944 0.225289762668 0.235796354542 0.23269059518 0.23306609907 0.225602345189 0.235244584021 0.226892475392 0.212543999067 0.226908361887 0.223757309255 0.240296170501 0.245569279789 0.227339668618 0.2180947426 0.217889650333 0.252259964829 0.228879839341 0.227150610245 0.231836962279 0.236076260811 0.217244760533 0.221438124324 0.213671912157 0.213819376581 0.212040793299 0.235827974309 0.209053467969 0.226615210725 0.205271111175 0.238869689413 0.207939309627 0.223898822147 0.229226063436 0.232630777303 0.217026651272 0.211532427176 0.218347693285 0.233460132247 0.219891922362 0.231624187925 0.231652249658 0.233525349836 0.226377769419 0.228579062405 0.219098380821 0.227789794229 0.219023772057 0.0212386043587 0.00993825296406 0.0336976816686 0.0262325070397 0.0107568964379 0.0200399473123 0.0369530877134 0.00706121377315 0.0225180018442 0.0345620434634 0.0288649599558 0.0352792061476 0.0352526366714 0.0385518621347 0.0178346265809 0.0156087458393 0.0212893461987 0.0244762891905 0.0356848493955 0.0412118431681 0.0577499958146 0.0449060313177 0.0316682156452 0.0402876009485 0.00791017688182 0.00777689270989 0.0262859463939 0.0275691765497 0.0363841779992 0.0426229028778 0.0258430808451 0.0369530877134 0.0171494695134 0.0197038085728 0.0369530877134 0.00856221073911 0.010146504576 0.0213442662268 0.0572142712511 0.0116574543859 0.0671597340405 0.047388301579 0.0285557929122 0.0192111185981 0.0112749020632 0.00781127840012 0.00637566729419 0.187210918471 0.192623672543 0.198833727144 0.212008654042 0.205367416257 0.204958566039 0.199402528264 0.182409867249 0.193987091024 0.204155295526 0.20374075502 0.197257634781 0.198980554413 0.205364330922 0.180426094029 0.186163209648 0.208359044162 0.184479344493 0.227592539467 0.192334488284 0.216529219774 0.187806950452 0.223855994439 0.201226354807 0.188864300604 0.190066520684 0.203335651256 0.212548662023 0.205705049083 0.171742464799 0.194417377469 0.187136601809 0.187472552212 0.228344421279 0.211429948522 0.196622478767 0.196530972542 0.21335054643 0.190497911091 0.204702393397 0.207598118603 0.200155023454 0.193467129006 0.184880183147 0.201857382862 0.187517863683 0.194486736804 0.1909406597 0.172553592377 0.194927231832 0.251743344729 0.240762357546 0.236947962879 0.234942968898 0.244909063494 0.243388177909 0.24143957892 0.236617606758 0.24633077256 0.235257992461 0.221275459366 0.23695229759 0.23334624948 0.249598367646 0.253700089415 0.235542233543 0.227782636512 0.227013807453 0.263469639225 0.240293112196 0.236160757703 0.240179126335 0.247395319298 0.227257451928 0.23025606552 0.223937789122 0.223399862869 0.220967129276 0.245363860699 0.220116506797 0.237779687596 0.214888164759 0.248252329967 0.218347705592 0.234681014401 0.239690494574 0.240217402996 0.22632629355 0.220260809644 0.227813787503 0.242262375922 0.228918020841 0.240762357546 0.240592325032 0.241778783496 0.235418026861 0.23890352284 0.228365058816 0.235271978928 0.227703766766 0.0251699105319 0.0326109277538 0.029278801732 0.0117605763436 0.00495713045566 0.032971458625 0.022273270218 0.00845866705418 0.0338325850006 0.0418060793613 0.0560571569439 0.0475989809058 0.0526432986362 0.0287512950603 0.0156470261333 0.0286833299321 0.0125318521449 0.039628171731 0.0605879245284 0.051592881751 0.0247889558 0.0185457610439 0.0360813596602 0.0162897964903 0.0233731835598 0.00555113851114 0.00851608036789 0.039350861172 0.0494281040416 0.0428445793936 0.032971458625 0.0361084743768 0.0382083791018 0.032971458625 0.0135564720949 0.0133460812565 0.0359059029584 0.0525998249753 0.020448013216 0.0653401649712 0.0366222774812 0.0276779812669 0.0199994530533 0.0115734561992 0.0214333278479 0.0186461579961 0.171344593808 0.177166238774 0.182950771401 0.19643196185 0.18986301898 0.188443393542 0.18388535541 0.166562175459 0.177783941311 0.188923487579 0.187724110496 0.182156573883 0.18272910062 0.189078094755 0.165787292858 0.170722554665 0.192539814536 0.167433166664 0.21234151671 0.17610118578 0.201339136255 0.172496446246 0.207869320447 0.184234771078 0.173084677744 0.174586008872 0.187343267555 0.197067369338 0.190065322687 0.155992913972 0.178370192697 0.170834599332 0.171756811444 0.21215391799 0.195543956872 0.181382419503 0.180878423055 0.197648250997 0.174630568935 0.189049602981 0.190809942592 0.184007315234 0.177572802284 0.169158986096 0.1858235382 0.171054750208 0.178472858167 0.175024341592 0.15843173338 0.179125844986 0.236753289776 0.225336913605 0.221457751336 0.21877528886 0.229546376269 0.227338714151 0.226000082599 0.220106179064 0.230241030133 0.220420771483 0.206398010187 0.221506177578 0.218254053746 0.234550382603 0.23950257292 0.221077875617 0.211789394262 0.210958467875 0.247726054061 0.224160699687 0.221293898641 0.225242596712 0.231164838975 0.212084684776 0.21483758197 0.207582855086 0.208324546741 0.205668770285 0.22999948579 0.203519866038 0.221791807814 0.198729512666 0.233119883684 0.201955632148 0.21744774227 0.224658382044 0.22554531899 0.210282327898 0.205086194314 0.212901784527 0.227685054369 0.214992030153 0.225336913605 0.225426421062 0.227294020009 0.22122180599 0.223891772502 0.213328572484 0.220627825622 0.212027719306 0.036692370716 0.0274221847228 0.014614486903 0.0257905543192 0.038185072385 0.00854173398239 0.0228846598272 0.0362880440778 0.0247966202157 0.0338575762449 0.0341810302892 0.0403839419732 0.023192138006 0.0228332335778 0.0207802721216 0.030246984472 0.0385095591836 0.036116458124 0.063670015249 0.0456331337141 0.0387265099713 0.0454622939774 0.0128829753328 0.0153687384621 0.0290580106985 0.032571197138 0.0439484983687 0.0345946330706 0.0178789804494 0.038185072385 0.0229066226947 0.0243703411158 0.038185072385 0.0145921399018 0.0157758022112 0.0246899780508 0.0667942861058 0.00859371091119 0.0713780561444 0.0493343894065 0.0370078971529 0.0136450272229 0.0138896306008 0.00617904291603 0.0146027872105 0.19304474601 0.19797909852 0.204564547012 0.217893788171 0.211240589753 0.210085035577 0.204424468033 0.187966151726 0.199846283114 0.209201380502 0.209876418912 0.202509063074 0.205495616388 0.210676243708 0.185852822947 0.192014733821 0.213129345848 0.190059083655 0.233894763867 0.198141527364 0.221234996275 0.193689434624 0.229701750232 0.206631480423 0.194727732257 0.195938025333 0.209362001884 0.218147207364 0.211029564766 0.177869147322 0.200327033561 0.193124484677 0.193233901741 0.233604044389 0.215980508747 0.201289146742 0.20219168761 0.219726845187 0.195458087393 0.2103246028 0.212820091698 0.205396680761 0.199290845186 0.190742394929 0.207189399227 0.192521418757 0.199651552643 0.196594932336 0.178468769167 0.200295584792 0.256297768876 0.245841451519 0.242438509968 0.240033073248 0.249980802879 0.248953385462 0.246144223104 0.242178797272 0.252036489845 0.240176361939 0.226412773569 0.242454491215 0.238768549206 0.254841315698 0.258625650226 0.240494667361 0.232976534038 0.231927166971 0.269283834821 0.246140263748 0.241374087503 0.245035624054 0.253144665918 0.232868387014 0.235226383286 0.229279397026 0.228861516729 0.226064745168 0.250630665997 0.225726655668 0.243583203961 0.220042394187 0.253511231548 0.2237596548 0.239872842457 0.245497904196 0.24478307529 0.231328796826 0.225307213023 0.233256809435 0.247442229489 0.234444929147 0.245841451519 0.245661961251 0.246724980668 0.240848644418 0.244659004118 0.233675626419 0.239814102397 0.232532592403 0.010524058973 0.0347497758249 0.0310026365608 0.0636683508445 0.039628310023 0.0339732471818 0.0632599742009 0.0607622080336 0.0610552648732 0.0274939888458 0.0321794296029 0.0203038890155 0.0225566538528 0.0181521200419 0.0426282898435 0.00971744861179 0.0573638408809 0.0304112802741 0.0463628234513 0.044509344874 0.0125060810098 0.0372012029006 0.0399195029174 0.0351572922113 0.0381714857339 0.0236066809352 0.0675880085279 0.0489234884353 0.0636683508445 0.0451372647506 0.0495218266562 0.0636683508445 0.028718795999 0.0363358460459 0.0247409309981 0.0536494939568 0.0291783067693 0.0362475310016 0.0206193569389 0.0238060051702 0.0365127412376 0.0303247636874 0.0382706081182 0.036679138207 0.165252454208 0.168745392776 0.176697416201 0.190075579294 0.183193768644 0.182210296577 0.174895117306 0.159516185843 0.172780618769 0.179424300801 0.183362052205 0.17275366841 0.180884475296 0.18272692365 0.155645508044 0.163596126812 0.183814684533 0.163944686708 0.206939183794 0.170981324059 0.191238016882 0.165171506195 0.202635613449 0.180157192696 0.16688029242 0.167662828735 0.182473473654 0.189606814944 0.182144568907 0.150494922446 0.17311022882 0.166528964674 0.165043072843 0.205731384902 0.18659197245 0.17105591364 0.173799739615 0.193545885155 0.166211621507 0.181959703917 0.185636136233 0.177047042786 0.171588556623 0.162758165885 0.178831153511 0.164197394115 0.170920859204 0.168528635782 0.148460744764 0.171573948268 0.226522867074 0.216861787556 0.214094925757 0.211961277469 0.220987866614 0.22168305754 0.216782699249 0.215609692292 0.225208444508 0.210312198283 0.19661028383 0.214074476024 0.209773195915 0.225810880474 0.228551359916 0.210341321513 0.204691741777 0.203336697222 0.242485382573 0.21963599647 0.211892193047 0.215282949931 0.226705435666 0.20418234927 0.205942096193 0.201754356567 0.199749686219 0.196656401255 0.221913123953 0.199165134473 0.216656797893 0.191759529503 0.224569670715 0.196349964299 0.213992874847 0.217230566883 0.214548114667 0.2028246956 0.195690210734 0.20400781198 0.217750478797 0.204445613083 0.216861787556 0.216379424168 0.21668692278 0.211041069144 0.216249554038 0.204373397921 0.209459569208 0.203359671893 0.0284940982012 0.0283835072119 0.0582564069663 0.0317209110303 0.0290062929149 0.0575581968066 0.0518675712793 0.0525497818792 0.0214648817816 0.0289820919427 0.0151981567712 0.0194868108708 0.00772151270155 0.0396885548083 0.0133170749495 0.0480498930165 0.0406834920646 0.0454173876936 0.04374952779 0.0223043370554 0.0304932603619 0.0333669476098 0.0323541790178 0.036462611527 0.0283937271683 0.0581656862224 0.0386849486073 0.0582564069663 0.0386824403573 0.0427767064205 0.0582564069663 0.0225670168648 0.0303996112373 0.0186317385432 0.0589532282724 0.020129496567 0.0456219811369 0.0271705146906 0.0266114081755 0.0281840659315 0.0237276312035 0.0300017258072 0.0304084467621 0.174689674341 0.17830904459 0.186122916272 0.199541551099 0.192698821274 0.191439303409 0.184399484383 0.168993025372 0.182083678728 0.188980455315 0.192641208359 0.182376656778 0.189869605957 0.192034662906 0.165406381187 0.173160863593 0.193190156667 0.172952807322 0.216356837462 0.18028635421 0.200707489616 0.174765006311 0.211946253402 0.189197004074 0.176338357547 0.17720731475 0.191796191467 0.199121585044 0.191634817003 0.159923924784 0.182459504488 0.175769947329 0.174536678956 0.215011640541 0.195898920801 0.180555890973 0.183304141459 0.202828623711 0.175640593326 0.191448592698 0.194762075203 0.186406863824 0.181008054094 0.172240816201 0.188223844291 0.17346231425 0.180325575914 0.177965844821 0.158332658267 0.181046361448 0.235882031296 0.226280867303 0.2235366831 0.221221709251 0.230404936567 0.23094802366 0.226136073658 0.224736021115 0.234431734165 0.219824894986 0.20619515856 0.223526372128 0.219308937798 0.235287233784 0.23807753807 0.219908336162 0.214027462834 0.212631745032 0.251732773763 0.228838671706 0.221440978506 0.224753704594 0.235866097652 0.213724598783 0.215380278296 0.210994508636 0.209321687173 0.206162323946 0.231346876087 0.208300913946 0.225928825898 0.201080283138 0.234037288778 0.20558958438 0.222856381147 0.226751911512 0.223989137906 0.212135251626 0.205214537461 0.213595258296 0.2273163389 0.21417146415 0.226280867303 0.225846388312 0.226229092302 0.220705781832 0.225781259333 0.213928177909 0.218913775078 0.212729498479 0.0122225873329 0.0303322491715 0.0105162282326 0.0130161869455 0.0294681125528 0.031169289575 0.0445674903412 0.0424428041907 0.0471592401326 0.0242919299586 0.0151911030837 0.0250458377851 0.0159588466859 0.0393890228389 0.0501712969027 0.0573610210315 0.0347635318983 0.0244162409612 0.040396894165 0.00489128055225 0.0130158047868 0.0162739668734 0.0181749821427 0.0399100872217 0.0416234365466 0.032217222914 0.0303322491715 0.0260818708259 0.0277812891175 0.0303322491715 0.0068887146948 0.00350057866506 0.0298024545939 0.056647752597 0.0131029257559 0.0689399130874 0.0438694576111 0.0295733202179 0.0148293487362 0.005187559828 0.00994170320116 0.00857601759458 0.181761248694 0.187453410696 0.193365623784 0.206702523841 0.20011934013 0.199105672278 0.194191029325 0.176999836949 0.188302627791 0.199111466491 0.198126208998 0.192301121465 0.193121251528 0.199652739338 0.175765384319 0.180985589904 0.202947496477 0.178262931918 0.22241465847 0.18663850283 0.211489015658 0.182706386145 0.218266373994 0.195039865626 0.18346554188 0.184856636434 0.197755074291 0.207324505424 0.200404409787 0.166355990223 0.188830148216 0.181378679541 0.182125858248 0.222660838403 0.205968099039 0.191576935017 0.19121911341 0.207887116649 0.185081160877 0.199380829938 0.201554837448 0.194543615891 0.187991163757 0.179524780118 0.19631300534 0.181726234556 0.188970696545 0.185468734774 0.168191837054 0.18953883133 0.246767069406 0.235545780286 0.231684267956 0.229269594484 0.239724337717 0.237749571543 0.236212316891 0.230682790909 0.240645700931 0.230405224861 0.216420479968 0.231715542723 0.228337424754 0.244594612682 0.249193956399 0.230909382279 0.222229420235 0.221427946934 0.257965809127 0.234573740528 0.231288571496 0.235259005866 0.241607850531 0.22220070955 0.225060949621 0.218149643482 0.218415896436 0.21585883122 0.24017332069 0.214149237729 0.232167950695 0.20925074028 0.243199182069 0.21253980165 0.228281003551 0.23468620257 0.235446691383 0.22074773222 0.215228869367 0.222922571895 0.237553097068 0.224606387466 0.235545780286 0.235534393045 0.23712429287 0.230940484692 0.233917063813 0.223401305554 0.230524046304 0.222353817706 0.0346270873711 0.0222428303765 0.0130402644662 0.0348466257078 0.042829178942 0.0549568638218 0.0464424131108 0.050271964255 0.0260503884653 0.0122224116333 0.0282180922789 0.011864843965 0.0376053109772 0.060340947297 0.0487164281844 0.0285013587126 0.0163929694658 0.0334500762426 0.015996373074 0.0216948948211 0.00981357617646 0.00897169179454 0.0351677992971 0.0524329239391 0.0436506734112 0.0346270873711 0.0336931600411 0.03612022228 0.0346270873711 0.0123155461924 0.0128144174355 0.033561339537 0.0480641708164 0.0212422700005 0.0628734973132 0.0366806543871 0.0231396795589 0.0230741684915 0.0123098386274 0.0219470818919 0.0170687771133 0.170097980345 0.176050748737 0.181762587161 0.195138740624 0.188553715994 0.187634353283 0.182931238354 0.165421712546 0.176600578903 0.187912731345 0.186408453678 0.181027641681 0.181322068987 0.188146844457 0.16448627936 0.169388121003 0.191757508168 0.166524283083 0.21084441712 0.17494262847 0.200480456222 0.171127562134 0.206682857942 0.183402734812 0.171811556423 0.173255302855 0.186064001811 0.195873759085 0.189008503285 0.154586199139 0.177136590503 0.16961214015 0.170511848112 0.211247624618 0.194873193684 0.18053463387 0.179673493778 0.196193226513 0.173758955601 0.187869379999 0.19001738823 0.183074417621 0.176342969787 0.167870586049 0.184832516957 0.170273119593 0.177542554096 0.173865162824 0.156824879402 0.178069468543 0.235952767235 0.224377142544 0.220334312937 0.217941307072 0.228582134377 0.226295522057 0.225204079848 0.219149120214 0.229153457047 0.219423743811 0.20530168342 0.220370010221 0.217084867796 0.233463162138 0.238411309618 0.220004052761 0.210872881275 0.210171938201 0.24654768975 0.223020715486 0.220176358035 0.224293060856 0.230089603615 0.210845620347 0.213914309618 0.206670271948 0.207126565579 0.204661896322 0.228953899388 0.202543633745 0.220637247329 0.197849243585 0.232039444595 0.201016080923 0.216786754649 0.223327252818 0.224678101179 0.209454984614 0.204079198003 0.211687536154 0.226538202286 0.213579234366 0.224377142544 0.224425108032 0.226231350025 0.219899730556 0.222577135927 0.212192019374 0.21976273048 0.211209202307 0.0316629149872 0.033505533415 0.00599304401229 0.03147088677 0.0569379974042 0.0709823822809 0.0748230813085 0.0531369685873 0.043263968218 0.0546416139704 0.0249055105449 0.0688386171987 0.0687094353901 0.0814189949505 0.0424145094995 0.0312264925083 0.0671690041442 0.0297074253675 0.0326159908833 0.0331671559449 0.0312681130066 0.0653641254742 0.040141041955 0.0490986378456 0.0 0.0426893927085 0.0408370899258 0.0 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.0221776604768 0.0292076409984 0.0225953199419 0.0344078822964 0.0402556862186 0.0443490345462 0.0247422055695 0.0212582761966 0.0262237412261 0.0239779350196 0.0420497572884 0.0414593565817 0.0638943455501 0.0444256278823 0.0324613404347 0.0464717210574 0.0064847144584 0.00810593532016 0.026613709743 0.0281809240896 0.0431707040779 0.0364933096346 0.0236549105265 0.0316629149872 0.0188389154871 0.0197111271958 0.0316629149872 0.0126413659503 0.00989228745945 0.0276229976961 0.0619054176788 0.0136582341261 0.0735855911944 0.0517713161376 0.0346942423053 0.0171636471405 0.0128780143687 0.00330427938484 0.00787705209324 0.191124461218 0.196709270957 0.202716444922 0.215924187936 0.209335641351 0.208652638742 0.203456445375 0.186385899088 0.197740844714 0.208277146555 0.207460837635 0.201437849638 0.202451222155 0.209128071387 0.184787202976 0.190226937612 0.212288039878 0.187958804478 0.231460569504 0.196093364986 0.220613648389 0.191903262961 0.227580912528 0.204703238288 0.192798319471 0.194099784682 0.207098250381 0.21653588119 0.209689452484 0.175692486622 0.198220686642 0.190843207535 0.19145100341 0.232063526554 0.215320322312 0.200757345911 0.200512766736 0.217080327154 0.194471953875 0.2086592247 0.211165769014 0.203992872465 0.197349559187 0.188849659131 0.205722712353 0.191296031353 0.198394567468 0.194851113014 0.177048060287 0.198893335106 0.255751598489 0.244696550693 0.240850735029 0.238657915403 0.248846650839 0.247061872969 0.245367004129 0.240135191988 0.249950401792 0.239377278919 0.225435027987 0.240867925442 0.23738991955 0.253602763959 0.257909304325 0.239755581779 0.231578379023 0.230802663983 0.267123052444 0.243889033036 0.240266927764 0.244253437447 0.250943098487 0.231284290644 0.234232349856 0.227603903464 0.227483301065 0.225010355342 0.249291083828 0.223656027478 0.241455021635 0.21867583866 0.252237135554 0.222012069338 0.237944922778 0.2436864771 0.244348021388 0.230119265967 0.224342678419 0.231928535106 0.246422479994 0.233277995952 0.244696550693 0.244602643666 0.245964063641 0.239692017082 0.242916090829 0.232449793011 0.23942553099 0.231611684555 0.0349328180738 0.0397488289561 0.055917859907 0.0466657680267 0.0537525131124 0.0312322620487 0.0204661899168 0.0273925121636 0.0188335643607 0.0409848131484 0.0582072876054 0.056071962696 0.0229170555223 0.0261517042618 0.0397135231185 0.0177392332242 0.0260266328079 0.00803879586998 0.0153514357558 0.0448019039458 0.0436873202641 0.0393784019825 0.033505533415 0.038918396453 0.040710313775 0.033505533415 0.0158373818638 0.0159973219202 0.0374102115892 0.0608056403948 0.0178404535904 0.0681680501025 0.0368277650919 0.0342153028534 0.0136005609896 0.0114725513671 0.0202654226306 0.0215111320227 0.175502940168 0.180846739943 0.186993890383 0.200622200655 0.194047326964 0.191871659177 0.187230724864 0.170446969951 0.181958118895 0.192296146604 0.192168805538 0.185737701873 0.187567790504 0.1926934047 0.169562979332 0.174903941965 0.195627236518 0.171332536122 0.216941277953 0.180224282629 0.204375817171 0.176708588212 0.212007481629 0.187940100384 0.177271729963 0.178782860675 0.191678059195 0.200974345147 0.193702992688 0.160469688943 0.182595805427 0.175146841545 0.17584337797 0.215706304999 0.198413689274 0.184386485081 0.184856323095 0.20232915317 0.177913816132 0.192982627694 0.194328346433 0.187558388958 0.181714052911 0.173349805963 0.189464804214 0.174374123803 0.181953747446 0.178997842818 0.162709307879 0.18281187754 0.239644460978 0.228724428122 0.225246554022 0.222159074766 0.232927392687 0.231188577573 0.229024014707 0.22394884582 0.234228075319 0.223668927512 0.209861178703 0.225307285274 0.221985913786 0.238104571085 0.242774657148 0.224369518131 0.215280858443 0.214174032708 0.25182022511 0.228290074367 0.224826938646 0.228427350702 0.235193041024 0.216001866886 0.218122104545 0.211215702011 0.212099059638 0.209083204086 0.233569980649 0.207419375116 0.225880589953 0.202184861075 0.236687676647 0.205660738951 0.22091809464 0.228766193737 0.228457617824 0.2135856342 0.208453819381 0.216659972673 0.231192094033 0.218856194539 0.228724428122 0.228811270004 0.230578167797 0.224983578773 0.22794936407 0.216954366886 0.223518672063 0.215168993038 0.0278789967436 0.0521696459286 0.0689795584164 0.0720246274977 0.0510464689946 0.0421745394626 0.053582971262 0.0252834975098 0.0677158937278 0.0652657188093 0.081318384881 0.0463957610979 0.0317140261778 0.0668416555065 0.0279143378417 0.0293650428541 0.0349789935753 0.032697569141 0.0634994948437 0.0398508689254 0.0464805914619 0.00599304401229 0.0382168939676 0.0359888514026 0.00599304401229 0.0356302780251 0.0278103138868 0.0553135387293 0.0681148770417 0.0401494228711 0.0950529579724 0.0698373790182 0.0522664102967 0.0364245082526 0.034367526255 0.0307005764611 0.0296340954647 0.193320448722 0.200616479264 0.204805927895 0.217738543992 0.211505662739 0.210916031972 0.207785188245 0.189492055249 0.199011305592 0.212849559963 0.207930120156 0.205973985295 0.201080290256 0.211364107387 0.190111543422 0.193149888648 0.216396870129 0.188564248953 0.232124397575 0.197509520178 0.225563508571 0.194908404083 0.228408782968 0.205404773753 0.195016829794 0.196780548006 0.207983584103 0.219251719916 0.21299620972 0.177753842585 0.19963357851 0.19173429638 0.194103048534 0.233951642516 0.219652294406 0.206249008964 0.203257559758 0.217055047586 0.19863330849 0.211258733702 0.212590735452 0.206794766477 0.199327419787 0.191301214309 0.208462930841 0.194441169877 0.201765843334 0.197324504463 0.182225897174 0.202147597456 0.260363424846 0.247928312505 0.243139798114 0.240805098305 0.252082485905 0.248010938223 0.249316339351 0.240374954583 0.250342905364 0.243926059402 0.229857687769 0.243199557315 0.240557269117 0.256813113392 0.262812568913 0.244726906585 0.233983032632 0.233739246967 0.267372287242 0.243942332388 0.244149194945 0.248660562989 0.250891907974 0.234045856002 0.23797517084 0.229108542601 0.230855414047 0.228916694642 0.252083674667 0.224056856634 0.242001030284 0.221239323161 0.255322806495 0.223424493307 0.237685672547 0.245823282675 0.249646734812 0.232903558602 0.228555612396 0.235462898266 0.250614129287 0.237926457957 0.247928312505 0.248245352669 0.250761236826 0.244030479155 0.2452417146 0.236085335476 0.244925556651 0.235313123462 0.0296919493074 0.0549103775111 0.0590022859872 0.0450958795243 0.0436647014416 0.0451669494409 0.0401697758406 0.0620690815544 0.0404063812339 0.0856236153109 0.0591687282212 0.0490473517415 0.0683619493373 0.0275851844543 0.0264002504943 0.0446945676427 0.0461763497794 0.0638640733472 0.0232220049486 0.0233353107646 0.03147088677 0.029201781247 0.02614478673 0.03147088677 0.0350355717384 0.030428654072 0.0452764113597 0.0802967236314 0.0328354116031 0.0941098407344 0.0724718577687 0.0562014612922 0.0313029110017 0.0340889421853 0.0234757814211 0.028925036543 0.210407140423 0.216202187825 0.221880775368 0.234954371989 0.228487320903 0.227668142527 0.22288748901 0.205844703323 0.216768667446 0.227711464843 0.226263348504 0.220970493431 0.220784685064 0.228189601628 0.204608420295 0.209669658318 0.231537789973 0.206793805641 0.250139733355 0.215157813925 0.239903556316 0.211352210266 0.246303099122 0.223431243026 0.212079033799 0.213468334673 0.225996065584 0.235701634712 0.228967589943 0.195079844754 0.217288259849 0.209835756349 0.210818201018 0.25087781482 0.234509225035 0.220284749566 0.219817558162 0.235704379779 0.213924917847 0.22786813663 0.229992970272 0.223192642095 0.216540668547 0.208207378637 0.224921456698 0.210576921517 0.217728441709 0.214138769918 0.196963968339 0.21825274764 0.274633184392 0.263620373951 0.259724001387 0.257434131046 0.267733388911 0.265627371082 0.264299751314 0.258574475793 0.268388644672 0.258492710734 0.244710620006 0.259750243754 0.256426981572 0.272472244255 0.27686351493 0.258914849135 0.250487159092 0.249742739323 0.285410306304 0.262307563551 0.259345413586 0.26329314732 0.269271128247 0.250334873532 0.253300294048 0.246383898221 0.246645581278 0.244211963707 0.268141898451 0.242252606427 0.260005586654 0.237699783889 0.271104326635 0.240825446457 0.256147548483 0.26253800174 0.263413026859 0.249061635853 0.243583443282 0.251070491042 0.265469741093 0.25252673858 0.263620373951 0.263585528386 0.265041995624 0.258842865142 0.261802853748 0.251583944502 0.25855292944 0.250657339544 0.0450102465608 0.0439198537957 0.0420878714528 0.0489743500788 0.0458798154287 0.0561577170329 0.0578224624858 0.0224176386409 0.0849412968827 0.0784422731853 0.0630254855321 0.0686145922194 0.0402128784166 0.0342259139129 0.0607491308771 0.0616063147121 0.059057582132 0.0469260494382 0.0248556117621 0.0569379974042 0.0246670677105 0.0228986845775 0.0569379974042 0.0437638412118 0.0433305015411 0.0375189879884 0.0808012493057 0.0411311531308 0.089015155435 0.0787045494767 0.0568803303557 0.0468151856286 0.0457099341419 0.0358166687321 0.0382966404493 0.218181156993 0.223322073096 0.229705702275 0.242392790988 0.23577882008 0.236467641295 0.230139466345 0.213525276303 0.225126391191 0.234563959305 0.23447409507 0.227593305984 0.22968375989 0.236629811874 0.210530415308 0.216784944512 0.23930372472 0.216469414915 0.257297585894 0.22354090813 0.246798396633 0.218282233397 0.254457430072 0.233053108231 0.219729861221 0.220661425884 0.234112165808 0.242944509178 0.236406584132 0.202708452811 0.225406734885 0.218378296681 0.218342011887 0.259279663879 0.242430236301 0.227193609953 0.227261836804 0.24357213448 0.22167035443 0.235341335514 0.239265397892 0.231386700827 0.224381906232 0.215751708357 0.232947742221 0.219218926008 0.225693206986 0.221969156729 0.202171293512 0.225894654473 0.281426284779 0.270908280604 0.267090828143 0.26583132699 0.274949583689 0.273930107263 0.271660675229 0.267616788632 0.276815360824 0.264923167376 0.251110814116 0.267050687079 0.263175899022 0.279239724537 0.282486166675 0.264820551157 0.258570659963 0.257930961428 0.293412787858 0.270816835713 0.265787467516 0.269896000193 0.277962792777 0.257183140616 0.260538043514 0.255036127386 0.253324298915 0.251219664681 0.275354649609 0.251349757403 0.268235077166 0.245978035775 0.277988121804 0.249516964722 0.266387651101 0.269264891567 0.269706692396 0.257221982881 0.25041421232 0.257531332229 0.271532129624 0.257608408712 0.270908280604 0.270491687514 0.271003440471 0.264326332823 0.268486578685 0.258229704408 0.264793848318 0.258283260113 0.0140423845301 0.0233496953457 0.0357817640834 0.0194594148752 0.0561842963236 0.0207161808072 0.0344686045315 0.0532665757163 0.0656105748711 0.0611304155975 0.0376479555399 0.0422417749013 0.0418045042693 0.0514369987371 0.0551924216967 0.0370785412495 0.0618826263881 0.03552992519 0.0709823822809 0.0406476055096 0.0445130802003 0.0709823822809 0.0367370595656 0.0436433053629 0.0176515558758 0.0714383400943 0.0313405988749 0.050902359898 0.0456873150128 0.0406736463161 0.0404939048403 0.0386201712918 0.0390017962403 0.0411563720519 0.190383467974 0.193150151335 0.201647820189 0.214732592431 0.207848794505 0.207203203998 0.199017728171 0.184517441065 0.198117570692 0.203251095462 0.208611510757 0.196706601418 0.206520396425 0.207659337014 0.179547279859 0.188402996857 0.207901541524 0.18984126915 0.231436366396 0.196328732424 0.214604717337 0.189868309751 0.227504737805 0.2057488472 0.191940214233 0.192467873609 0.207646738607 0.21397919575 0.206530920079 0.175883310987 0.198349858418 0.192082967323 0.190004696209 0.230325216181 0.21052421867 0.194679583814 0.198537353627 0.218586319181 0.19070413186 0.206580728089 0.210890308648 0.201869808993 0.196665077614 0.187817228574 0.203604184545 0.189253631177 0.195640755776 0.193545341254 0.172360847921 0.196255157011 0.249220192749 0.240515221189 0.238164043403 0.236348677214 0.244540315717 0.246252369823 0.240132469075 0.240621741867 0.249888930273 0.233326376677 0.219986372028 0.238112203328 0.233455949013 0.249164983161 0.25077275089 0.233019708703 0.229068571188 0.227556818352 0.266813634827 0.244493984417 0.235169888951 0.238293435462 0.251527743788 0.228140449415 0.22959779378 0.226587756924 0.223543468869 0.220396683922 0.245620426642 0.22441324809 0.241383050303 0.216373265669 0.24802824928 0.221314439433 0.239346728325 0.241091845042 0.237050180919 0.227111645095 0.219282181885 0.227591404442 0.240665010845 0.227138628142 0.240515221189 0.239782829874 0.239319392462 0.23385508337 0.24006053231 0.227964392353 0.231962468731 0.227162089828 0.0243523838323 0.0383436294161 0.0281380877138 0.0591143980174 0.023507742642 0.0377968944731 0.0517794728194 0.0731209286425 0.0626942772583 0.0386453724111 0.0461236944312 0.0433886463071 0.057222198299 0.0591551673076 0.0319956615202 0.0701506857494 0.0422353525103 0.0748230813085 0.0387325777977 0.0428806382505 0.0748230813085 0.0409657262208 0.047385502674 0.0175421295668 0.0657295550805 0.0386908764688 0.0489968109439 0.0522686619491 0.0387311691946 0.0491554938498 0.0442532671202 0.0441014308007 0.0436449779054 0.189947462454 0.193087358051 0.201270443197 0.213957860003 0.207091706444 0.207812220084 0.199357780455 0.184445706942 0.197758177741 0.203406879958 0.207785745488 0.196588475912 0.205402521621 0.207935882191 0.179126107639 0.187763747972 0.208638518258 0.190249811145 0.22994679767 0.196055336563 0.215105618909 0.189122223025 0.226906503622 0.206245399814 0.191418910324 0.191795589288 0.206939834769 0.213506326546 0.206505332082 0.175157210084 0.197857598212 0.191658112797 0.189588115864 0.230508642351 0.211553886467 0.195374420383 0.198156887541 0.217288762795 0.191332101287 0.206198782834 0.211486527696 0.202202827305 0.19621322526 0.187298385594 0.203764973183 0.190101114406 0.196046751208 0.193325009164 0.171164264517 0.19631141813 0.249660253112 0.240527632421 0.23771162293 0.236702658605 0.244516745342 0.245937622906 0.240621381598 0.240549902948 0.249413960629 0.233305723303 0.219774348622 0.237625603888 0.232924324305 0.248768260048 0.250380844287 0.232789805762 0.22924781342 0.228117670835 0.26599273559 0.24389404773 0.234785533178 0.238373023947 0.251062908639 0.227440481837 0.229791016779 0.226764157548 0.223002288166 0.220422035353 0.245352612988 0.224391473735 0.240777024278 0.216738025139 0.247654798968 0.221447853711 0.240056634641 0.240035093698 0.237386138185 0.227558813593 0.219321439562 0.226978987712 0.240167124817 0.22597544824 0.240527632421 0.2396924683 0.239078218682 0.232902821621 0.239060496039 0.227569835839 0.232339810664 0.227659905228 0.0141262101806 0.0139426926457 0.0350538423884 0.0193552608508 0.0431621025158 0.0440916637552 0.050896637678 0.0388189182549 0.0267566278127 0.024110231253 0.0232035130335 0.0336464874245 0.0349533341335 0.0219308383764 0.0572621322316 0.0349099564127 0.0531369685873 0.0252590392656 0.0298245046475 0.0531369685873 0.0175869571119 0.0244711956848 0.00818635303924 0.0514811245358 0.0194723442375 0.0509148981601 0.038941584783 0.0196470806889 0.0301479553419 0.021545096414 0.0245644202822 0.0217515624084 0.179463055971 0.184039565703 0.191058390156 0.204133065196 0.197353411589 0.197439799103 0.1907070948 0.174350095292 0.18671934138 0.195225461177 0.19666314681 0.188285080293 0.192912863971 0.197716084636 0.17106321505 0.177948538218 0.199885832792 0.178001143095 0.220003811923 0.185034059424 0.207478284578 0.179492397587 0.21650741229 0.194560853934 0.181055185718 0.181934852801 0.196069170377 0.204292703623 0.197354097622 0.164139489747 0.187014857055 0.180129327357 0.179477085752 0.220692949894 0.202951258529 0.187497059139 0.188415486818 0.206311756974 0.182082560815 0.196599300923 0.200515380101 0.192263706104 0.185773424375 0.176983033355 0.193914579783 0.179773695055 0.186365398569 0.183085748173 0.163066216827 0.18672092116 0.242702110891 0.232274391832 0.228788679171 0.227187602483 0.236388804345 0.235969651611 0.232778101812 0.229721372697 0.23914299349 0.226115326683 0.212181006801 0.228757804803 0.224715842477 0.240932159296 0.244193462731 0.226111600887 0.219855639469 0.218968698082 0.256162056313 0.233278273993 0.227208648321 0.231128251362 0.240452766308 0.218806638317 0.221623839858 0.216518120679 0.214708469871 0.212229381986 0.23697508889 0.213254322456 0.230501952576 0.207030794754 0.239662752997 0.210997516317 0.22840539478 0.231383198724 0.230834439598 0.218316993527 0.211375460714 0.218990694003 0.233092437868 0.219339375552 0.232274391832 0.231855607362 0.232408829329 0.226038981221 0.230519762371 0.219567778072 0.225824086027 0.219230412501 0.0195919993919 0.0221059172753 0.0271326704392 0.0528905379198 0.0431586822848 0.0385447030867 0.0255010287112 0.0262337606525 0.0170321830128 0.0195302099147 0.0206521036595 0.0209833716129 0.0252645250533 0.0550811186683 0.0394746523074 0.043263968218 0.0280088992059 0.0316978756724 0.043263968218 0.00932281475944 0.0154549635915 0.0220221996956 0.04619442006 0.0176558634743 0.0549187053198 0.0347531723104 0.0154010655624 0.0249961358537 0.0130624709681 0.021086975526 0.0157582275316 0.172494143831 0.17784850126 0.184169405165 0.197413015598 0.190717679771 0.190383314088 0.184688114733 0.167622718241 0.179385018024 0.189454833136 0.189254205537 0.182497405034 0.184783379021 0.190756581821 0.165553322788 0.171383967321 0.193742345805 0.169993474917 0.213189630746 0.17771520654 0.201947261672 0.173029090008 0.209395719039 0.186777092552 0.174149245532 0.175318744446 0.188797436083 0.197902799323 0.191019058185 0.156992121883 0.179797341784 0.172560525004 0.172720884671 0.213854773981 0.196871318612 0.181938976962 0.181811193063 0.198969640765 0.17576606357 0.19002752539 0.193102069722 0.185493626505 0.178790364297 0.170132074883 0.187191891105 0.172843021691 0.179777298661 0.176222723299 0.157641466463 0.180195116525 0.237394679003 0.226280310947 0.222447934209 0.220486140395 0.230450693082 0.229006895923 0.227001818993 0.222289337207 0.232008422548 0.22075701885 0.20666547313 0.222449960987 0.218801277251 0.235156691974 0.239347508119 0.221054539901 0.213261323555 0.212504634094 0.249237472588 0.225979854618 0.221636903122 0.225713392695 0.233125083698 0.212685206963 0.215719410439 0.209455227207 0.208792490533 0.206361366722 0.230902205737 0.20571314342 0.223403407555 0.200311655198 0.233802587518 0.20384593219 0.22048704521 0.225216247618 0.225812871456 0.211806271505 0.205652934938 0.213231447371 0.227786843309 0.214377387143 0.226280310947 0.226104307704 0.22732908173 0.220904746241 0.224415757753 0.21378854989 0.220845147853 0.213178189936 0.0385029210536 0.0191331286834 0.0412210559221 0.0479132957057 0.0465498029846 0.0439952648454 0.0294155998226 0.0263022819525 0.0288814535105 0.0321813612841 0.0366154046916 0.0324760763417 0.0515944791638 0.0310049526439 0.0546416139704 0.0338597056505 0.0376153728239 0.0546416139704 0.019563854062 0.0269885058553 0.0157505744896 0.0625368339241 0.0143844790412 0.0526157254036 0.0338348827289 0.0297234721938 0.0233631827985 0.0204547357021 0.0243203298193 0.026443786847 0.18161798851 0.18540629351 0.193056292253 0.206460086953 0.199649422865 0.198358533748 0.191516052491 0.176005251549 0.188914967022 0.196119500233 0.199392563722 0.189524653623 0.196344377596 0.19896940691 0.172616127427 0.180167039066 0.200264745011 0.179650753185 0.223145799502 0.187130729579 0.207841346365 0.181781654149 0.218763807382 0.195938920487 0.183272877978 0.184194077304 0.198599402775 0.206127033545 0.198682577368 0.166796826532 0.189310437328 0.182538488189 0.181515358431 0.221921151125 0.202966250725 0.187732725258 0.190303161279 0.209524247949 0.182729260129 0.198439370358 0.201593038612 0.193392410161 0.187922687561 0.179193994752 0.195211914017 0.180441261226 0.187358375156 0.184927566912 0.165517695891 0.188083050798 0.242939707589 0.233299699182 0.23049760933 0.228144979761 0.237419943564 0.237777043946 0.233178399857 0.23147925362 0.241203272786 0.226929567318 0.213318762235 0.230491320835 0.22634238089 0.242302330141 0.245163744774 0.227035678455 0.220985219764 0.21961554026 0.258484704802 0.235572463293 0.228516087928 0.231839786454 0.242586485114 0.220732170317 0.222439032331 0.217865646283 0.216378419732 0.213247698263 0.238336887242 0.215052759841 0.232719204269 0.208046041848 0.241045913549 0.212450914632 0.229495311164 0.23369764439 0.231101958719 0.219113353825 0.212320151032 0.220660185426 0.234399345217 0.221293359798 0.233299699182 0.23289508713 0.233336932661 0.227805970609 0.232744111879 0.220997609023 0.226042688917 0.219773770977 0.0487507947542 0.0650775470184 0.057802762893 0.0304021403296 0.00977564963899 0.0440829229362 0.0178656109143 0.0223657443481 0.0145412613726 0.00880579384939 0.0433599813023 0.0513293545711 0.0472054655688 0.0249055105449 0.0346274013058 0.0356855998153 0.0249055105449 0.0197624673252 0.0147740519949 0.0419438715965 0.0491548979957 0.0286642180871 0.0731736765881 0.0474587573553 0.0307413987988 0.0281173399333 0.0193986394444 0.0247050709585 0.0190246608336 0.173083486457 0.179910481276 0.184747521108 0.19795391577 0.191518465927 0.190900207622 0.18706914222 0.168885152715 0.179192090216 0.192134059909 0.188561922702 0.185169221179 0.182516578227 0.191341288573 0.168884190059 0.172666616329 0.195883736852 0.16891577804 0.213018522015 0.177613800133 0.204946600226 0.174427531668 0.209108112252 0.18591519295 0.174796071764 0.17642769541 0.188436163643 0.199161967465 0.19263437332 0.157387090389 0.179769711315 0.171986018804 0.173714059835 0.214316704261 0.199152104404 0.185242989169 0.182953007753 0.198045578814 0.177854485907 0.191103831493 0.192902410163 0.18652468984 0.179244739552 0.170950373519 0.188227253613 0.173938849831 0.181251003885 0.177034455953 0.160977434421 0.181661357468 0.240293898195 0.228034626835 0.223480275138 0.221169492968 0.232239647508 0.228872448909 0.229260803782 0.221433214134 0.231450162781 0.223643381382 0.209415344527 0.223528489107 0.220601809879 0.237045012584 0.242726403737 0.224366911478 0.21417342735 0.213777217999 0.24873924373 0.225124576112 0.224039797111 0.228474279829 0.232185938187 0.214138987375 0.217816173262 0.209554716543 0.210717178803 0.208592377005 0.232371287988 0.204873194809 0.222962175481 0.201230387345 0.23556954796 0.203837920113 0.218977104053 0.226289614933 0.229286064361 0.212971252918 0.208146125074 0.215343203422 0.230520772996 0.217614435398 0.228034626835 0.228243837858 0.230522424537 0.223850190554 0.225634530741 0.215935214332 0.224460188266 0.215143400901 0.0529022439667 0.0328752197893 0.0555054115849 0.0509289255649 0.0173017354019 0.040824127775 0.0418711547682 0.0429127577978 0.0455543968368 0.0222834470095 0.0700549868865 0.0480571802481 0.0688386171987 0.0442386440219 0.048830469586 0.0688386171987 0.0328274300443 0.0405886029945 0.0207406620607 0.0561033512724 0.0322786585229 0.0341942563123 0.0291137581017 0.0267606033099 0.0411628160219 0.0351933008888 0.0409598634365 0.0395653722748 0.170381496838 0.173513369262 0.181763628401 0.194913312213 0.188005094963 0.187597093173 0.17963993246 0.164619720579 0.178109956535 0.183964962678 0.188586214824 0.177259498897 0.186421186364 0.18797705999 0.159970598511 0.168451386355 0.188698976448 0.169860506279 0.211615177079 0.176329007245 0.195682012245 0.169937526308 0.20770892717 0.185927394166 0.171947667368 0.172526710735 0.187654893417 0.194331077481 0.186965121806 0.155693053512 0.178340485408 0.171986662107 0.170058435784 0.210831670623 0.191498228781 0.175622787806 0.178712133706 0.198589692402 0.171227708945 0.186830515333 0.191211132234 0.182219297429 0.176703062328 0.167807776518 0.183929379926 0.16962686739 0.17602523241 0.173642929989 0.152568053554 0.176555503125 0.230732284086 0.221405569072 0.21873883425 0.217008773205 0.22548132942 0.226701619289 0.221306518291 0.22096326048 0.23027897199 0.21448417882 0.200865540743 0.218691208961 0.214158679542 0.230111756153 0.232289297369 0.214283536851 0.209653108975 0.208315017711 0.24732302577 0.224778673487 0.216104443077 0.219500448511 0.231884032027 0.208661452368 0.210499772687 0.206990362059 0.20415679031 0.201176497026 0.226427503413 0.204626996142 0.221691815993 0.19686908479 0.228928610762 0.20163871009 0.219806895143 0.22162291443 0.218587303851 0.207804967644 0.200129766869 0.208295752101 0.221764477978 0.208127749312 0.221405569072 0.220756657587 0.220625985458 0.214861357139 0.220620889076 0.208723632287 0.213494112425 0.208081504586 0.0844496882978 0.080315980567 0.0726849367116 0.0675030089859 0.0473153931529 0.0443066374773 0.0646752376508 0.0680246382237 0.0621370759065 0.0478849585017 0.0214355468841 0.0687094353901 0.0384979333887 0.0389190825118 0.0687094353901 0.0480894783324 0.0504544201599 0.0366660205236 0.0908834253131 0.0410486802894 0.0843383154277 0.0744888741312 0.0620377042787 0.0466247584157 0.0490662247741 0.0410809318858 0.0466044522535 0.220241217585 0.22387872958 0.231567441105 0.244549509651 0.237801597966 0.237271702597 0.229957304486 0.214803305719 0.2275731622 0.234289966045 0.237659204867 0.227701989162 0.234344570768 0.237727323575 0.210730852647 0.218580436475 0.238765139748 0.218842181584 0.260585713889 0.225852980679 0.24576980875 0.220083510611 0.25691340974 0.234991633089 0.221816502308 0.222561867727 0.236939418607 0.244255467135 0.237073335368 0.205416544089 0.227870869354 0.221241762596 0.220094090971 0.260317043933 0.241447551617 0.226019474356 0.228752673734 0.247333244315 0.221529692284 0.236780633698 0.24056209021 0.232170242771 0.226474325634 0.217767662464 0.233885787279 0.219595234935 0.226160651319 0.223600212873 0.203287025744 0.22672247966 0.280139937217 0.271046573965 0.268296536378 0.26646171863 0.275059767728 0.275799812567 0.270911764394 0.269815722172 0.27914751079 0.264301608642 0.250949666543 0.26825814336 0.263926860434 0.279625941989 0.2816892989 0.264092216423 0.259287884013 0.257990452056 0.295945646215 0.273550874886 0.265902495939 0.269211685027 0.28055869892 0.258431707188 0.260338655828 0.256388822593 0.254096852299 0.25120005211 0.275959599762 0.25362897924 0.27069009133 0.246633770083 0.278453866291 0.251048870754 0.268244717954 0.271056483898 0.268212813274 0.257487053623 0.250194192056 0.258191856723 0.27145388403 0.257986657626 0.271046573965 0.2704506544 0.270310564507 0.264635332559 0.270115829289 0.258629632749 0.263204545545 0.257890609764 0.0600044713459 0.054351041098 0.0187381084361 0.0598557442646 0.0614469307754 0.0531453686192 0.0525559397425 0.0279315026787 0.094761292833 0.0773971393828 0.0814189949505 0.0655620569523 0.0700255275487 0.0814189949505 0.0520201647503 0.0579117136152 0.0488956649362 0.0419109171187 0.0567137285928 0.0208614229037 0.0306610868801 0.0327090237565 0.063272034339 0.0543984191577 0.0634480421324 0.0583685669148 0.139330779418 0.142986837848 0.150850053523 0.163810831232 0.156868051014 0.157669598862 0.149679456655 0.133871337528 0.147188409072 0.153968295801 0.157417010427 0.14689076955 0.155379198936 0.157713790383 0.129254210759 0.137253284329 0.159227064857 0.139693977567 0.180206197156 0.145463442392 0.166351059616 0.138682374547 0.176816693259 0.155870230339 0.140840856643 0.141328264666 0.156536274226 0.163539791349 0.156545980147 0.124345584519 0.147307265103 0.141019689746 0.139034333305 0.180682036747 0.1624502749 0.146314826392 0.147811298228 0.167221256708 0.141400345074 0.156004354912 0.161263479124 0.152006654871 0.145684418167 0.136675034398 0.153561404536 0.139874017863 0.145889334783 0.142806737922 0.121109447078 0.146069682444 0.201853472765 0.191504661606 0.188213817168 0.18712354961 0.1956147304 0.196211110416 0.19205227926 0.19059963575 0.199675257584 0.184878502918 0.170804202645 0.188147863911 0.18370042204 0.200019806255 0.20290821197 0.184684396544 0.179556147674 0.178663646288 0.216648538359 0.194032671841 0.185998827234 0.190015746161 0.201299319844 0.177907547001 0.180731098286 0.176760615884 0.17358596254 0.171156072474 0.196260390732 0.174185108777 0.190914661822 0.166819613367 0.198798087344 0.171315193398 0.190200635461 0.190703604042 0.189694426068 0.178004531696 0.170196606131 0.177783458136 0.19179082566 0.177644952 0.191504661606 0.190881968726 0.191068709685 0.184514101166 0.189756786146 0.178407147108 0.184631515769 0.178548671216 0.0324892056253 0.0484887435869 0.0393579503362 0.0472627188893 0.0192847169709 0.0232472318507 0.0583393494412 0.0585791036352 0.0611786752117 0.0424145094995 0.0604358052165 0.0620651155443 0.0424145094995 0.037432002363 0.036885144463 0.057932237922 0.0663347563067 0.0402054510934 0.0745837939532 0.0379481013657 0.0474009418109 0.0343119409392 0.0339409730754 0.0428079642087 0.0427340086613 0.16139435599 0.167055199688 0.172679145019 0.186588672878 0.18018023169 0.176484226078 0.173177363378 0.15635152751 0.167324305619 0.178559723881 0.177731707828 0.172313382954 0.172981201837 0.177656055027 0.157022451257 0.161367937512 0.181005231952 0.155624650937 0.203264920982 0.165557822324 0.190457367134 0.163325089652 0.197312648791 0.172046353812 0.163261037864 0.165144860989 0.177247951111 0.186978615056 0.179504291425 0.146841839918 0.168174392768 0.160512412099 0.161906209121 0.200546381048 0.18360708151 0.170619997941 0.170849280459 0.188248015891 0.163499893332 0.178863271671 0.178580924982 0.172741578769 0.16748001244 0.159472149167 0.174802823761 0.159227769495 0.167358477359 0.164720514491 0.151099028747 0.168548224145 0.225620984689 0.214383995774 0.210968404368 0.206923044041 0.218629792329 0.216162853062 0.214556733684 0.208383921822 0.219163632298 0.210075568948 0.196412311391 0.211088115641 0.208225582965 0.224211015109 0.229824216763 0.21125046128 0.200361910494 0.199102744226 0.237092122358 0.213257676298 0.21126821314 0.214648561609 0.219950980033 0.202248505777 0.203853239286 0.195906673599 0.198467217831 0.195092454721 0.219319110187 0.191906900165 0.211043854216 0.187166023617 0.222703227226 0.190377176965 0.20432458647 0.215124925944 0.214931156714 0.198560742198 0.194613360236 0.203185148475 0.217932610212 0.206623397839 0.214383995774 0.214797954846 0.217376030313 0.212326968407 0.214327312553 0.203281369316 0.21006441216 0.200554878155 0.0431984510448 0.0262542814372 0.0292765976694 0.0197548242369 0.011556553192 0.0420248516295 0.0609848282508 0.0559281666693 0.0312264925083 0.0398958448421 0.0413044920489 0.0312264925083 0.0264369738429 0.0230439739916 0.0462734733613 0.0414691510708 0.0363662025945 0.071550533242 0.0478849716213 0.0291862789915 0.0369373377392 0.0270714127462 0.0334218260536 0.0262390277171 0.165544038214 0.172891912863 0.17727312869 0.190354892388 0.183958646909 0.183879480011 0.180336742003 0.161635841791 0.171548979047 0.185413091762 0.180663093483 0.178298498301 0.174219977581 0.184181187608 0.161990271375 0.165191063063 0.189309277337 0.161423020955 0.205054842396 0.170020243784 0.198514733988 0.166940816324 0.201451973118 0.178556962169 0.167238778058 0.168913749918 0.180647428918 0.191850429195 0.185573186785 0.149626920851 0.172104358108 0.164216849771 0.166272040954 0.207167511585 0.192763981398 0.178906141081 0.175593499508 0.189955336349 0.171151190119 0.183759195847 0.185737635433 0.179470325207 0.171692649892 0.163416602952 0.18110112813 0.167100843514 0.174311352225 0.169649991905 0.153736698586 0.174561747041 0.233945473753 0.221154951746 0.216216020369 0.214168470951 0.225368727897 0.22143399553 0.222723035108 0.213934293134 0.223870685298 0.217024022208 0.202620827758 0.216260896263 0.213476245827 0.230060267172 0.236168299839 0.217779153489 0.207119858677 0.206977473585 0.241093686538 0.217416199365 0.217140656671 0.221883614074 0.224541883596 0.206836488872 0.211062122527 0.202310625247 0.203579796415 0.201763166599 0.225319290313 0.197343747333 0.215321920645 0.194216980733 0.228558415828 0.196533366942 0.211711953148 0.218804815465 0.22300715182 0.206091661798 0.201387938181 0.208241563837 0.223688965104 0.210605474313 0.221154951746 0.221416959944 0.223922313478 0.216868041009 0.218202757079 0.208934179216 0.218220121204 0.208484033719 0.0428988991781 0.0449811252912 0.038369173012 0.0392012066073 0.0184411809504 0.0770158611592 0.0591685049971 0.0671690041442 0.0499388239897 0.0544215157144 0.0671690041442 0.0346140690794 0.0413101969584 0.0321994698404 0.0440967531043 0.0383047062906 0.0298070621094 0.0211708137004 0.0201129458309 0.0453565917011 0.0368886973605 0.0457257341 0.0417220412167 0.154888546678 0.158813196403 0.166441835812 0.179672088583 0.172784191158 0.17260621351 0.165322327284 0.149403283235 0.162445964052 0.169819706795 0.172790429714 0.162919256631 0.17010598628 0.17291506116 0.145591206314 0.153172124666 0.174534430185 0.15398983773 0.196191934512 0.160693736573 0.182015889553 0.154710886954 0.192325902157 0.170351903358 0.156482289316 0.157229639215 0.171983255038 0.179460677481 0.172268582678 0.139849276668 0.162711676043 0.156097286205 0.154728919391 0.195984213978 0.177559464338 0.161865850494 0.163584898864 0.182768842876 0.156718657607 0.171791912863 0.175973376944 0.167287390852 0.161244255475 0.152349968293 0.168972557751 0.154707872737 0.161224201041 0.158337859598 0.137882544324 0.161653779265 0.217498297387 0.207268525118 0.204089016051 0.202387117484 0.211405750921 0.211656794766 0.207586435206 0.20562940887 0.215067539532 0.200874708618 0.186922162094 0.204055283767 0.199813802793 0.216054855672 0.219141272905 0.20087129472 0.194993585948 0.193928687775 0.232251037143 0.209370889696 0.202145172225 0.205917531589 0.216546254935 0.194031863501 0.196450064794 0.191939021682 0.189735506736 0.187016932169 0.212133736422 0.189132050882 0.20640648386 0.182100826622 0.214796842045 0.186466235268 0.20445164291 0.206912827034 0.205504201388 0.193322781156 0.186101115818 0.194015369065 0.208047565025 0.194373832921 0.207268525118 0.20679029341 0.207231461284 0.201073842632 0.205977385627 0.194517602543 0.200437951551 0.194023674059 0.00847168263945 0.0208969725179 0.0218078109788 0.0404437599311 0.0404194345362 0.0297098891496 0.0297074253675 0.0216763215811 0.0230614305273 0.0297074253675 0.00860159575866 0.00340785264521 0.0287758992091 0.0571909561512 0.0141236699669 0.0709946391549 0.0479055513747 0.0306293087394 0.0170492128659 0.00888265836541 0.00729150357409 0.00480661141663 0.185289458233 0.191085055337 0.196910034804 0.210145347321 0.203571296435 0.202882056675 0.197912476038 0.180625340213 0.191835734795 0.202787038295 0.201526915714 0.195916323017 0.19637797116 0.203353873438 0.179306027922 0.184472082084 0.206754233077 0.181958697279 0.225649748274 0.19019500565 0.21522963857 0.186167360837 0.221735787446 0.198765098442 0.186973637291 0.18833349586 0.201196357535 0.21085114286 0.204041973491 0.169797982896 0.192333522046 0.184882922721 0.185665053896 0.226331622925 0.209833971543 0.195365654989 0.194769866304 0.211153682947 0.188870228152 0.202929999244 0.20531727443 0.198262875463 0.19151438955 0.183035158868 0.19999216658 0.185562442931 0.19270971507 0.189054601453 0.171539131405 0.193197031986 0.250465285504 0.239173402368 0.23520366736 0.232978158323 0.24334022521 0.241285239552 0.239942028126 0.234265108703 0.244131225098 0.234017030201 0.220001112132 0.235226825533 0.231841004595 0.248118070359 0.252690451382 0.234464488853 0.225903274535 0.225189555796 0.261354129729 0.238022768459 0.234821661628 0.238891339667 0.245085625014 0.225666229038 0.228735290931 0.22181584168 0.221922833386 0.219499858076 0.243732621444 0.217749348385 0.235627115362 0.212972717548 0.246730042299 0.216195919385 0.232029501648 0.238052252824 0.239120375862 0.224484292562 0.218871170879 0.226408868705 0.241049615623 0.227943328268 0.239173402368 0.239135618661 0.240673256099 0.234329166611 0.237299131316 0.226939754937 0.234208016817 0.226098822397 0.028402190164 0.0277439229346 0.038946334352 0.0434213038922 0.0291676419928 0.0326159908833 0.0134911389606 0.0149220924049 0.0326159908833 0.0134816006656 0.0104081517294 0.0265932070523 0.0554420028459 0.0189257472352 0.0720390188254 0.0531694571518 0.0303305078503 0.0242591305562 0.0161087615284 0.011068875208 0.00482735633945 0.188743108882 0.194722342773 0.200407653053 0.213430694909 0.206860229667 0.206917746824 0.201757002065 0.18426566592 0.195342886433 0.206526515083 0.204777293747 0.199508643938 0.199425600774 0.207211939812 0.182702196026 0.187804249003 0.210815779569 0.185880854144 0.228533589019 0.193748347964 0.21913688541 0.189440254064 0.225129338462 0.202750726696 0.19038113591 0.191653574921 0.204515978518 0.214295374936 0.207718295645 0.173066791358 0.195766348142 0.188344856347 0.189125824359 0.230149564934 0.214039936134 0.199354571186 0.198257058594 0.214135463178 0.192868986061 0.206423136152 0.209356843732 0.20214026995 0.194966833942 0.186438002469 0.20377859508 0.189699928978 0.196612341236 0.192629244536 0.174503529883 0.196914127595 0.254336052592 0.242870282486 0.238667846113 0.23688365824 0.247015930548 0.244839615824 0.243870689439 0.237954961473 0.247598002179 0.237655720218 0.223546452001 0.238671394435 0.23524462783 0.251589019272 0.256100611398 0.237973407633 0.229710932979 0.229189966161 0.264623863258 0.241418386796 0.238285878732 0.24258651618 0.248557401899 0.228985563027 0.232518055884 0.22563381547 0.2253177885 0.223189662674 0.247286475663 0.221462369267 0.239019854286 0.21687609732 0.250217711381 0.219989961903 0.23614243802 0.241179806044 0.242913875985 0.228427928816 0.222559090866 0.229759973658 0.244438457877 0.230953863516 0.242870282486 0.24276526561 0.244179265793 0.237465205634 0.24045622955 0.230407994296 0.238017253402 0.230045288889 0.0082487413397 0.0434871339043 0.050183840328 0.0462946366306 0.0331671559449 0.0413860283481 0.0432754400997 0.0331671559449 0.018728526127 0.0181135727037 0.0407852007406 0.0552412031248 0.0241124765906 0.0672915663182 0.036051090003 0.0318240646756 0.0214815502931 0.0159437959725 0.0254901889174 0.0237539942631 0.169264556488 0.175091115152 0.180818741393 0.194404360875 0.187867354301 0.185963216048 0.181699481724 0.164435072081 0.175589139186 0.186821392192 0.185627969522 0.180155862933 0.18064011609 0.186712516664 0.164002900607 0.168780956251 0.190185205445 0.164923288811 0.210475012613 0.173888729012 0.199146471591 0.170598213889 0.205685349652 0.18167215365 0.171034804287 0.172628140609 0.185232711805 0.19500403819 0.187902476327 0.154050585176 0.176234935944 0.168650273349 0.169706859713 0.209769138775 0.193110247226 0.179199013537 0.178806517751 0.195681872133 0.172338947581 0.186953230165 0.188260629529 0.181677967227 0.175468629894 0.167135725316 0.183547713996 0.168593151982 0.176177479228 0.172880341359 0.156935998004 0.176943539051 0.234533528357 0.223126396017 0.219327759561 0.216348929156 0.227347260416 0.225044163552 0.22369577874 0.21767535265 0.227960059154 0.218355967949 0.204394726122 0.219392188471 0.216234012192 0.232478694207 0.237601395838 0.219130755018 0.209451421522 0.208536786671 0.245551542374 0.221905162641 0.219288471778 0.22312655653 0.228847315313 0.210092965448 0.212618708514 0.205172062073 0.20633435877 0.203530506813 0.227844403478 0.201098042061 0.219576265526 0.196351165226 0.231028528894 0.199557870197 0.214705957114 0.222733230705 0.223437599857 0.207886214359 0.202974139337 0.210949037245 0.225748458168 0.213347187038 0.223126396017 0.223291835541 0.22533034675 0.219465519892 0.221962474599 0.211307137623 0.21852957733 0.209700938041 0.0419192621384 0.0545713872547 0.0501857282151 0.0312681130066 0.0402402021553 0.0420008929211 0.0312681130066 0.0205720172252 0.0184919891428 0.0424913715031 0.0489285308529 0.0286939286865 0.0683097144827 0.0399734095489 0.0293256914093 0.0276858239832 0.0194684720042 0.0280123772406 0.0234918354003 0.166859723597 0.173331371512 0.178503598003 0.191909917223 0.185423378094 0.184241312774 0.180294315558 0.162400014304 0.173055799755 0.185412361634 0.18273757472 0.178554599617 0.177148633502 0.184812731656 0.162323255063 0.166439616176 0.188986532191 0.162572794611 0.207449709654 0.171422042516 0.198077549025 0.168235674576 0.203119984389 0.179508471538 0.16860424661 0.170241216483 0.182496759111 0.192879913246 0.186103766816 0.151333877107 0.173669495408 0.165942821486 0.167423536544 0.20787297933 0.192134128169 0.178241744028 0.176630454655 0.192503529595 0.170982423968 0.184797056786 0.186354645979 0.179907483835 0.173050611212 0.164731478826 0.181684378357 0.167089130276 0.174542724285 0.170681059367 0.154769008136 0.175102602238 0.233527638993 0.221524217045 0.217249882254 0.214634557666 0.225748190551 0.222754126699 0.222507410575 0.215313006261 0.225475910208 0.217028556203 0.202867049651 0.217307455338 0.214308520933 0.230711314018 0.236268919515 0.217806003587 0.207670960165 0.207072717123 0.242941912447 0.219250459655 0.217623775648 0.221838573376 0.226272530404 0.207953270239 0.211176611808 0.20315950921 0.204399143496 0.201993427277 0.226020220858 0.198708591851 0.217018221505 0.194631582156 0.229236199557 0.197470332905 0.212630809879 0.22033552246 0.22249437564 0.206324343096 0.201514537553 0.209042945777 0.224134795171 0.211456208554 0.221524217045 0.221736417649 0.223988271389 0.217633390228 0.219633609699 0.209536083539 0.217632171671 0.20838956304 0.0775399671204 0.0564468400456 0.0653641254742 0.0395849667657 0.0441943006701 0.0653641254742 0.03373111328 0.0393588608039 0.0260318537498 0.035188052496 0.0394930245033 0.0375459624937 0.0389958606199 0.0129896263362 0.048815943545 0.0379517413039 0.0436570210079 0.0373605585088 0.163502150984 0.168337389523 0.175155703097 0.187828039408 0.181028588214 0.182647655516 0.175454197959 0.158708602469 0.170954310136 0.179762949884 0.180496225683 0.172501221065 0.176828601599 0.182533215894 0.154901122018 0.161684635697 0.185118234991 0.163277975005 0.203127118484 0.169348081929 0.192443927773 0.163103682661 0.200509349698 0.179853786658 0.164993784705 0.165655282459 0.179962811264 0.188228038198 0.18173609183 0.147930089008 0.171080786567 0.164375611634 0.163483874158 0.205438086531 0.18853460221 0.172643498492 0.17244334411 0.189729944562 0.167174105351 0.180637454384 0.185804185303 0.177096116234 0.169809585441 0.16090449309 0.178551065139 0.165213876 0.171240797871 0.16732581913 0.146067131815 0.17119760345 0.227732558213 0.216775565291 0.212818856201 0.212113111804 0.220864451794 0.220275362854 0.217805693503 0.214378859193 0.223348154292 0.21056133718 0.196361313338 0.212749259626 0.208612136858 0.225027947273 0.228363013794 0.210350767964 0.204552905568 0.204069917831 0.240058060786 0.217392662406 0.211260278871 0.215701291359 0.224730833936 0.202526361799 0.206282759299 0.201292044309 0.198558189974 0.196671901675 0.221200953228 0.197951565037 0.214539723027 0.191917757906 0.223779418483 0.195733699704 0.214045901526 0.214810423689 0.215727246675 0.203293848315 0.195827563807 0.20277537878 0.217057373264 0.202583470922 0.216775565291 0.216238379997 0.216660429864 0.209514540125 0.213990052328 0.203581443905 0.210749726957 0.20422943306 0.0299803388219 0.040141041955 0.0494042382248 0.0475047073328 0.040141041955 0.0460070623429 0.0427394298505 0.0575631845334 0.0966299998606 0.0399614977189 0.101837465377 0.0750240618009 0.0697188576907 0.0332240827502 0.042809511279 0.0352142478257 0.0438103448171 0.215592317841 0.220449330264 0.226776190885 0.240218246846 0.233773884944 0.231038348034 0.2263684672 0.210465175429 0.221881119192 0.231308080404 0.231995396207 0.22508238649 0.227350588366 0.232024663305 0.209334152567 0.215020396004 0.234337406165 0.211132081266 0.256379513335 0.220156177626 0.242726282533 0.216799299323 0.251315628789 0.227298889535 0.217343977664 0.218830240402 0.231491464759 0.240364579333 0.233021642066 0.20102218505 0.222551706707 0.215242680103 0.215887525017 0.254517907032 0.236803444285 0.223122921939 0.224639449766 0.242029857582 0.217221787473 0.232566133597 0.233478733428 0.226945320336 0.221662735537 0.213507733767 0.228892766884 0.213808892567 0.221389781107 0.218899946715 0.202924474402 0.222409930936 0.277044937084 0.267058523642 0.264053120566 0.260683081125 0.271161003688 0.269928478944 0.266921380471 0.262757571495 0.272983826376 0.261803183737 0.248535164351 0.264118904646 0.260747941814 0.276361888176 0.280348586054 0.262426412526 0.254046125774 0.252677731845 0.290308221249 0.267228122274 0.26327783075 0.266417210913 0.273921992864 0.255063180539 0.256547903477 0.250174111671 0.251094888905 0.247813627121 0.2719924919 0.246585755223 0.264876118769 0.241182097008 0.275004799417 0.244784301617 0.259319666846 0.267703173929 0.266005774781 0.252196034878 0.247119094967 0.255516011217 0.269407340011 0.257485027934 0.267058523642 0.267089999571 0.268509786469 0.263477813268 0.266862086523 0.25569939087 0.261126117571 0.253527997613 0.0490986378456 0.0295882981502 0.0295703235206 0.0490986378456 0.0321772405698 0.0329624694956 0.0320791928311 0.0819581869701 0.0243777845713 0.0815880274595 0.0630031242432 0.0521832400773 0.0270041961787 0.0316626367995 0.0225970983534 0.0306135777155 0.209138400452 0.213486068388 0.2205444529 0.233810061497 0.227135531522 0.225853564299 0.21966357841 0.203852341272 0.216088869802 0.224306040557 0.226217923772 0.217746213648 0.22224285579 0.226486749722 0.201086071725 0.207931744175 0.228270944293 0.206490835508 0.249940445551 0.214362660634 0.235990805565 0.209564571075 0.245741550277 0.222850058752 0.210799114332 0.211874421056 0.225608964554 0.23378761328 0.226547651045 0.194192088088 0.216536885951 0.209533041634 0.209201443093 0.249253656365 0.230961033564 0.21611849601 0.218017866257 0.236081990375 0.21084689547 0.226091432124 0.228781524913 0.221104911259 0.215367366824 0.206800674523 0.222911320613 0.208230596605 0.21525709201 0.2125575014 0.193883560936 0.215963986139 0.270701972881 0.26093677072 0.257916700448 0.255512820108 0.265026258573 0.264740784654 0.260931231555 0.258198696768 0.267948592812 0.254868213876 0.241356571396 0.257920982238 0.254009770883 0.269860597717 0.272941762013 0.255029971614 0.248480879151 0.247232962401 0.285073136127 0.262202525959 0.256323297606 0.259707446043 0.269155247223 0.248322830219 0.250262594392 0.245086205405 0.244160084795 0.241180503071 0.265837705385 0.241871800867 0.259549733542 0.235634358004 0.268589453275 0.239657575868 0.255932774798 0.261005427367 0.2590787222 0.246704864465 0.240323606031 0.248446059436 0.262200385961 0.249216212931 0.26093677072 0.260623330072 0.261243034875 0.255650557301 0.260116706756 0.248814449864 0.254094104786 0.247584466928 0.0426893927085 0.0408370899258 0.0 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.00485178735022 0.0426893927085 0.0245520805084 0.0237321483943 0.0251494297711 0.057422445578 0.0278324002426 0.0740340192529 0.0616449531864 0.0345266403523 0.0352178925007 0.0280808122885 0.0215364121522 0.0177797202497 0.195939824645 0.201850357058 0.207610278022 0.22033675889 0.213750209485 0.214735564684 0.209038553623 0.191585516698 0.202707820911 0.213610139259 0.211883129405 0.206450810174 0.206559465565 0.214810867748 0.189369070647 0.194741318593 0.218356798216 0.193913039155 0.235021949805 0.201158850164 0.226304640977 0.196279111135 0.23226696484 0.210758787397 0.197506357802 0.198589557713 0.211650666116 0.221261000242 0.214919567324 0.180148757389 0.203013849771 0.195747568982 0.196259855385 0.237643665501 0.221708411792 0.206639148011 0.205357745264 0.220920753341 0.200419916968 0.213509698507 0.217293606067 0.209702604063 0.202159105945 0.193545439121 0.211227865592 0.19759997516 0.20414742205 0.199914048013 0.180698578828 0.204233409175 0.261355583351 0.249944219156 0.245612323364 0.244419283687 0.254044690674 0.252080319195 0.251117898529 0.245501499007 0.254799073865 0.244461494615 0.230320341653 0.245583982907 0.241986600586 0.258340089378 0.262471019952 0.244539880639 0.237114058797 0.236754636273 0.271527597514 0.248603215251 0.244975611097 0.24946627049 0.255826675762 0.23571262824 0.239663820665 0.233206292475 0.232061847402 0.230234276049 0.254229520181 0.229085095534 0.246138012907 0.224434087127 0.257016267918 0.227571722833 0.244299258458 0.247702557023 0.249766808341 0.235948695396 0.229554249737 0.236400217176 0.250952182681 0.236961759284 0.249944219156 0.249686907558 0.250743450736 0.243657848264 0.246990711027 0.237179418187 0.244877764453 0.237415960787 0.0408370899258 0.0273499265107 0.025297636378 0.0296282714156 0.0603715739764 0.030303105355 0.0787085816014 0.0655434565706 0.0387042108075 0.0365503327535 0.0303622028421 0.0226096284705 0.0196557817863 0.19925839426 0.205348957591 0.210921560248 0.223617852816 0.217069506306 0.218059817218 0.212575335145 0.194999027921 0.205926088655 0.217171129009 0.215009852018 0.21001079042 0.209452929226 0.218136520475 0.193003747556 0.198137341753 0.221866239829 0.197043564846 0.238162085485 0.204392911714 0.229893095054 0.199683809546 0.235440202166 0.213909407049 0.200827966888 0.201961005661 0.214826820158 0.224639586072 0.218358620882 0.183440825957 0.206248208954 0.198918059014 0.19962810203 0.240932955881 0.225232933618 0.210279009964 0.208738673233 0.223984027141 0.203935797463 0.216876482587 0.220527121607 0.21308659534 0.205456328664 0.196890655988 0.214607347176 0.201007750183 0.207588221909 0.203264328774 0.184313736826 0.207665740899 0.26489357675 0.253366014308 0.248941435732 0.247733075772 0.257464720601 0.255265649267 0.254603866324 0.248608337617 0.25792057227 0.248007634376 0.233863042555 0.24891723403 0.245404277418 0.261753780229 0.266030956859 0.248122124098 0.240457509587 0.240148573616 0.27462290691 0.251686093628 0.248460344614 0.252996524531 0.258897040073 0.239091218679 0.243138472416 0.236454818573 0.235503469176 0.233728803563 0.257606144443 0.232210377675 0.249277823428 0.227793973728 0.260419107201 0.230810446264 0.247340816609 0.251008018719 0.253372216044 0.239328849004 0.233077723245 0.239855175697 0.254460096493 0.240510971982 0.253366014308 0.253147704183 0.254305992093 0.247178452498 0.250316773817 0.240645130337 0.248503359474 0.240885126472 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.00792313459309 0.0236297776546 0.0536541076705 0.0108412074571 0.0628560371629 0.0400426717019 0.0242916508179 0.0172043768705 0.0050674059167 0.0120480352419 0.00909567999576 0.179733982337 0.185130641595 0.191359647755 0.204666509458 0.198016972069 0.197255576955 0.19185518721 0.174855164332 0.186481505005 0.196685913747 0.196375987191 0.189833405887 0.19172374648 0.197739787295 0.173088361774 0.178764981726 0.200732295255 0.17675687262 0.220477540317 0.184804954654 0.209049846749 0.180446207623 0.21646451476 0.193507677512 0.181414841832 0.18267689502 0.195938395299 0.20515640915 0.198204629073 0.164323672477 0.186951627799 0.179636188812 0.180004434454 0.220785829324 0.203765417111 0.189073881786 0.189075562692 0.206144383335 0.182845346787 0.197260508087 0.199879524686 0.192526504243 0.186000212457 0.177429453542 0.194272636643 0.179755047797 0.186852872988 0.183420017951 0.165408476828 0.187374971211 0.244379342624 0.233352874625 0.229596132577 0.227371143644 0.2375226222 0.23596534417 0.233967797192 0.229101525181 0.238946455726 0.22794079194 0.213947298319 0.229612039594 0.226054866131 0.242325328031 0.24658946328 0.228320886035 0.220245475488 0.219410638455 0.256223296024 0.232925784221 0.228888220536 0.232841814871 0.240004491346 0.219971839687 0.222802209556 0.216357175156 0.216093165101 0.213536808192 0.238014479196 0.212556057937 0.230415334062 0.207278762186 0.240959781593 0.210762423837 0.226970181506 0.23251461018 0.23290234378 0.218739342276 0.212847200349 0.220551656017 0.235068599786 0.221910374672 0.233352874625 0.233237981586 0.234568034713 0.228343429058 0.231717374059 0.221050886534 0.227947933137 0.220165226053 0.0299322879907 0.05484354778 0.015508827535 0.0698377856312 0.0461226756997 0.0288439268589 0.0179749812614 0.00820193608475 0.0104514607363 0.00601471721254 0.182232775094 0.188144985482 0.193867682363 0.20711590793 0.200550766912 0.199860135361 0.195016228938 0.177611309591 0.188739284934 0.199919451707 0.198412537874 0.193032154463 0.193186123937 0.200329430243 0.176446801251 0.181459762374 0.203862717343 0.178811701467 0.222599641879 0.187102653143 0.212418919228 0.183165138771 0.218668582765 0.195650642874 0.183922319896 0.18531418569 0.198099986213 0.207874048966 0.201087073424 0.166709980365 0.189246782627 0.18175655241 0.182635613511 0.223327464443 0.206968516624 0.19255440527 0.191763016548 0.208041469681 0.185943960269 0.199929552497 0.20225187193 0.195263692383 0.188456709089 0.17999008547 0.196992188976 0.182563902533 0.189736470349 0.186019084029 0.168664903455 0.190216873808 0.247702840069 0.236282170601 0.232244216334 0.230002139075 0.240457584895 0.238254033101 0.237105182737 0.231183514354 0.241076068443 0.231215767875 0.217161214732 0.232270602033 0.228935795245 0.245246208475 0.249963377759 0.231700745278 0.222930449716 0.222251471523 0.258324331925 0.234942120984 0.231973156153 0.236088516952 0.242009296663 0.222723564262 0.225860019318 0.218781249758 0.219012595092 0.216618271437 0.240821050718 0.214649402949 0.232567858569 0.209986385677 0.243846113816 0.213146433073 0.228924161819 0.235098611969 0.236391467186 0.22153411273 0.216011138881 0.223520339681 0.238239770023 0.225155295827 0.236282170601 0.236275025927 0.237908750738 0.231525069379 0.234355153325 0.224056763193 0.231485450395 0.223215654925 0.0572284762647 0.0227453897084 0.0528238726337 0.0444991634471 0.0266973769114 0.0336948312224 0.0272062333147 0.0274688740262 0.0263328670099 0.185716682176 0.189934662812 0.1972487923 0.210230033124 0.203429572326 0.203652674494 0.196491149306 0.180504651022 0.193104816748 0.200892549998 0.203064104164 0.193989394378 0.199601579765 0.203900303764 0.17670118478 0.184039790634 0.205684310622 0.1846555251 0.226113704537 0.191415178407 0.212986837934 0.185538541574 0.222740004307 0.201086740863 0.187280443615 0.188038576609 0.202414184433 0.210238732317 0.203278048935 0.170511460418 0.193354539773 0.186626010428 0.185640639557 0.226777734968 0.208697723096 0.193060716385 0.194490643156 0.212667601723 0.188009393157 0.2026405567 0.206872630148 0.198376957217 0.192017886181 0.18319542151 0.200010008529 0.185964041385 0.192410951726 0.189280207199 0.168712331151 0.192752182621 0.248024768226 0.23796255467 0.234657432476 0.23319215601 0.242043627351 0.242092609424 0.238343194449 0.236048752979 0.245336704526 0.231521041315 0.21770196943 0.234612870361 0.230403022013 0.246515101463 0.249324779094 0.231381623939 0.225835327084 0.224876205525 0.262239370649 0.239554652873 0.232726560614 0.236544998844 0.246724791051 0.22460575232 0.227286958934 0.222708966341 0.220419587826 0.21790450046 0.242699459353 0.219658545965 0.236700307439 0.213086341068 0.245286981729 0.217239309033 0.234909245332 0.237182720463 0.236050820735 0.224251890124 0.21698485009 0.224622112179 0.238475066919 0.224611061526 0.23796255467 0.237436165661 0.237675287343 0.231370754771 0.236291823604 0.225200181658 0.231029643716 0.224953381888 0.0635710838178 0.0592624543316 0.0596058092469 0.0328544820604 0.0698260168465 0.0573261822931 0.0634460229613 0.0541080958794 0.147303482686 0.155161448288 0.159136079545 0.171063643845 0.16463221595 0.168584971583 0.163624194761 0.144235266953 0.153827968777 0.168063795028 0.16179307599 0.160206770568 0.155398151635 0.167887347752 0.142965424509 0.146100735749 0.17375554158 0.146517107633 0.184102282814 0.152518687773 0.182006446517 0.147493939017 0.182975421708 0.163616999091 0.148716120222 0.149770660992 0.161958247648 0.173135111896 0.168022513735 0.130738265008 0.153920373487 0.146537313722 0.147925357539 0.190546691654 0.177990062346 0.162980599048 0.157241520972 0.169863812022 0.155438284157 0.165389918434 0.170707644688 0.163197102596 0.153422018191 0.144858486622 0.164315178145 0.152440808389 0.158128021971 0.151920639439 0.132537826475 0.157359920068 0.217237278816 0.203578319465 0.19760287759 0.197919551969 0.207681519923 0.203644803794 0.206313583309 0.1971606419 0.205828867849 0.199064489236 0.184178187026 0.197538822775 0.194404518771 0.211337912042 0.217143803133 0.199166671372 0.190313841849 0.191119829867 0.222105237734 0.199176357415 0.198307304 0.204224647581 0.206706114847 0.187412194782 0.19389709622 0.185837912363 0.184448440446 0.184112718669 0.207064291919 0.180760878547 0.196874753064 0.177987251583 0.20993106085 0.180009687643 0.197504007472 0.198552660709 0.205899002329 0.189953313758 0.183706289376 0.188863124638 0.204480568311 0.189510910124 0.203578319465 0.203449868624 0.205307715981 0.196398052452 0.198053210544 0.190139115088 0.201200460168 0.192215464603 0.0646648846724 0.0408252785942 0.0324283536149 0.0110096321254 0.0094227049932 0.0109851717283 0.0161323040352 0.185995265043 0.190666336997 0.197500031875 0.21094278484 0.204249573992 0.202798103731 0.196994111724 0.180742040387 0.192881751338 0.201790619427 0.203110749465 0.195155158145 0.19908437034 0.203454592708 0.178505994536 0.184932353957 0.205646229373 0.183039730424 0.227253690432 0.191142107437 0.213742423108 0.186621634482 0.222820224457 0.199555206103 0.187692696694 0.188884746385 0.202513734055 0.21103182776 0.203761324581 0.170936996707 0.193374027451 0.18623653615 0.18612791719 0.226436319176 0.208431967216 0.193707164322 0.195058461584 0.2131353075 0.187987297837 0.203202157658 0.20562939159 0.19810682266 0.192261721083 0.183685994256 0.199939039445 0.185109577754 0.192293556305 0.189460942551 0.171326016098 0.193018202765 0.248893705524 0.238594153414 0.235368547023 0.232809197602 0.242745772985 0.241996085058 0.238752248121 0.235253956997 0.245180784501 0.232837883436 0.219103668122 0.235388183401 0.231636120508 0.247694764978 0.251385734184 0.233178069062 0.225757404345 0.224585782777 0.262534762143 0.239353867417 0.234157942476 0.23769286224 0.246342255252 0.225805082888 0.227891511242 0.222153883963 0.221706146684 0.218742321816 0.243483255972 0.218775046696 0.236736505477 0.212768083313 0.246367071263 0.216653745648 0.232866084107 0.238586937872 0.237342638049 0.224021718097 0.217958679267 0.226108360277 0.24024082191 0.227355271774 0.238594153414 0.238405022146 0.239431466759 0.233734880392 0.237715653864 0.226470518154 0.232346541994 0.225139690924 0.0393671458325 0.0464638919444 0.0721210062546 0.0652058473437 0.0727980489441 0.069475743339 0.146377403101 0.147841270546 0.157364583964 0.169967544666 0.162961576643 0.163688135507 0.153695971045 0.140223917866 0.154815274304 0.157460458795 0.165356253736 0.150768971529 0.165320051187 0.163744020488 0.133177827162 0.143578449734 0.163063976092 0.148488500757 0.186880468442 0.153029883361 0.168852310214 0.144817839236 0.183493098196 0.16367577587 0.147767272784 0.147687188567 0.164075138054 0.168705452589 0.161392920433 0.132534765042 0.154766041608 0.149414499828 0.145576557217 0.186084208163 0.165901662347 0.149128920765 0.153742006284 0.175201329509 0.146090630422 0.161672202019 0.168031911823 0.15768748388 0.152605178523 0.14358929792 0.159222856143 0.14586039418 0.151244472229 0.149339889326 0.125727972716 0.151529925609 0.203450036676 0.194986217471 0.192843589773 0.192004248179 0.198946853705 0.202034655336 0.194735870429 0.197390062155 0.205967080811 0.187074482741 0.173679535364 0.192730113791 0.187498820793 0.203203985607 0.204052975425 0.186370806955 0.184428712838 0.182999565721 0.222535792393 0.200873648 0.188899014225 0.192203777807 0.208005111614 0.182384645276 0.184037705225 0.182740791406 0.177549631149 0.174643342307 0.20005255491 0.181451046073 0.197344936488 0.172060390986 0.202165775813 0.177622921879 0.197550181919 0.195373313831 0.190875275066 0.18253037262 0.173366080571 0.181382900577 0.194116360457 0.17989694523 0.194986217471 0.19390780748 0.192735187036 0.186889353803 0.194252690611 0.181881177771 0.185754237755 0.182012475847 0.0362542669235 0.0433946419111 0.0395938166279 0.0499100911085 0.0488639489089 0.15152574063 0.154669324023 0.162831337388 0.176665753207 0.169806006203 0.167122046596 0.160378494232 0.145396880889 0.158882941041 0.165181343889 0.170013544811 0.158841213295 0.168049497633 0.168033584387 0.142281111905 0.150191536414 0.168801703096 0.149137142763 0.194382185315 0.156984666205 0.176687245859 0.151908690894 0.188914920448 0.165149458367 0.153257832095 0.154263633953 0.168972071115 0.175883337588 0.16793063653 0.137286345822 0.159386690667 0.152763325671 0.151315442368 0.191108253021 0.171290308569 0.156330262179 0.159994057675 0.180780718652 0.151317533557 0.168120728135 0.170508979962 0.162341150711 0.157837504705 0.149168793021 0.164327797527 0.148926027403 0.156192253462 0.154516403486 0.136169004568 0.157264478091 0.212122717934 0.202635989676 0.200296956824 0.197168424809 0.206818081536 0.207593997357 0.202129844535 0.201194661862 0.211287068634 0.196367072918 0.182828097116 0.200324517328 0.196191886272 0.212106652571 0.215234691898 0.196754062665 0.19012565359 0.188386972878 0.22899541165 0.20586463613 0.198258080181 0.201204812212 0.21275581912 0.190751178898 0.191559171703 0.187109767024 0.186199376174 0.182482910799 0.207969239625 0.184720534783 0.20291190811 0.176982703379 0.210810911009 0.181756418439 0.198660937707 0.204191111601 0.200384835859 0.187985877445 0.181554202617 0.190580156178 0.204342491841 0.191957847677 0.202635989676 0.20235052312 0.203098119616 0.198224746232 0.20315819974 0.190687755597 0.195275148422 0.188602851388 0.0403137962831 0.0283224608052 0.0352856337592 0.0279159520938 0.163417587591 0.169024399252 0.175174902733 0.188097949492 0.181377843525 0.182339093803 0.176249889638 0.158817342465 0.170474967966 0.180845899763 0.180001842158 0.173609983768 0.175496658011 0.182387588854 0.15630806646 0.162058645803 0.185720747493 0.161911269413 0.203372519665 0.168871283524 0.193698104443 0.163602773962 0.200347540332 0.178768462478 0.164992621499 0.165987648079 0.179610648238 0.188805808549 0.182291142535 0.147635227715 0.17074779736 0.163623388142 0.163626611748 0.205461058801 0.189137857415 0.173807850394 0.172761367844 0.189359771077 0.167597338583 0.181003226921 0.18512092453 0.177130623568 0.169726367846 0.160952549622 0.178668367454 0.164953979441 0.171439694143 0.167334380068 0.147661742228 0.171525073941 0.229212799489 0.217687295638 0.213458314816 0.212251579761 0.221837277994 0.220248120289 0.218836902814 0.21380890761 0.22315649106 0.21209021333 0.197773787684 0.213427195478 0.209691510886 0.226221325989 0.230427027381 0.212200169545 0.204828617154 0.204406576244 0.240127225685 0.217034360511 0.212651201199 0.2171572787 0.224326708779 0.203421180278 0.207247009432 0.201076078606 0.199634752126 0.197699005741 0.222082174457 0.197241869749 0.214400679015 0.192020712279 0.224886788727 0.195424460906 0.212817607264 0.215705919361 0.217495203752 0.20360497892 0.196993475509 0.204019508797 0.218720046506 0.204677118987 0.217687295638 0.217405004007 0.218489214457 0.2114130463 0.214943199157 0.204771656731 0.212548494217 0.204988233676 0.0127227257693 0.0140144534262 0.021205315625 0.187358882688 0.192125766059 0.198772205734 0.212374766999 0.205757524042 0.203466313878 0.198269360384 0.182075536022 0.193992765379 0.203217006858 0.204331991647 0.196763084721 0.20014614007 0.2043270841 0.180514052654 0.186571402116 0.206603961592 0.183553088594 0.228871000555 0.192232411854 0.214999691886 0.188338267885 0.22392031363 0.199986970587 0.189109313009 0.19048409561 0.20374267521 0.212452607057 0.205039311477 0.17250608603 0.194596025922 0.187333202847 0.187569910566 0.227244357321 0.209244321292 0.19501056691 0.196469921009 0.214533216851 0.189073691369 0.204563213894 0.206128514596 0.199072106837 0.193573357583 0.185159768848 0.200995780718 0.185854759801 0.193344854477 0.190733738309 0.173803583444 0.194263918799 0.250041858008 0.239752334137 0.236636793113 0.233547933384 0.243918830588 0.242901503454 0.239756635176 0.235879076786 0.246072511959 0.234292093183 0.220682667006 0.236686092505 0.233134556981 0.249085035733 0.25310029637 0.234845006722 0.226672837999 0.225370202913 0.263585873652 0.240269679341 0.235701118126 0.239046163418 0.247138992266 0.227341018945 0.229062028698 0.222899030248 0.223273394137 0.220071420598 0.244719204688 0.219434280561 0.237757183282 0.213627898609 0.247720909818 0.217420020786 0.232895251914 0.240194731922 0.23871780496 0.22484832656 0.219342733041 0.227741527766 0.241902288188 0.229541957119 0.239752334137 0.239709455557 0.241056941465 0.235717844261 0.239326740269 0.227988125157 0.23374803561 0.226117504317 0.0112944223112 0.0116717916683 0.180315550232 0.185624806253 0.191893759643 0.205310880338 0.198677901623 0.19742928403 0.192207723586 0.175346071899 0.186991091141 0.197103740487 0.197010559796 0.19035952375 0.192415460068 0.198031907427 0.173783350974 0.179445795208 0.200920544981 0.176980189616 0.221316628359 0.185291341794 0.209338461592 0.181164395028 0.216995536327 0.193672787851 0.182024521085 0.183353435463 0.196543976916 0.205721030365 0.198641096205 0.165041173069 0.187513275437 0.180170480519 0.180593580969 0.221064954649 0.203852710847 0.189347093282 0.189636739108 0.206917162302 0.183105626465 0.197802407018 0.200023916345 0.192826802002 0.18656768388 0.178054301819 0.194629767109 0.179904508376 0.187157083025 0.183925453374 0.166391248738 0.187799823192 0.244629672801 0.233720966397 0.230103074977 0.227589285523 0.237898238239 0.236372028201 0.234191133105 0.229406521443 0.239389014631 0.228371263352 0.214461216321 0.2301324354 0.226622699222 0.242825477857 0.247136546528 0.228837795311 0.220544275821 0.219592617307 0.256764787312 0.233410014962 0.229423132068 0.233225877209 0.240428033421 0.220595144087 0.223137647662 0.216631996383 0.216685610877 0.213950565713 0.238461242313 0.212870718316 0.230920602382 0.207535413146 0.241449474994 0.211057657029 0.226977435726 0.233226691247 0.233226664174 0.218957536154 0.213267398065 0.221167604245 0.235644719391 0.222751284499 0.233720966397 0.233655964463 0.235074683268 0.229091941731 0.232416283617 0.221592631388 0.228271321964 0.220395124759 0.00983853465038 0.190743576675 0.196127634014 0.202306832097 0.215593264431 0.208988714901 0.208014681254 0.202752493595 0.185879141861 0.197391448173 0.207587390017 0.207246286244 0.200818808941 0.202436920976 0.208560198594 0.184214467342 0.189842910423 0.211501430369 0.187510793393 0.23133568248 0.195719341744 0.219810583637 0.19153274103 0.227260064133 0.204197085901 0.192430421495 0.193730481177 0.206830708645 0.216081015857 0.209114627207 0.175418662775 0.197889059256 0.19054759391 0.191035254803 0.231501988345 0.214455580052 0.199914298524 0.200065006525 0.216984298716 0.193736240961 0.208206242937 0.210580217034 0.203385972256 0.196973524191 0.188474338185 0.205152454389 0.190585220715 0.197746685322 0.194398694242 0.176663593729 0.198324488838 0.254937931723 0.244068220209 0.240384243666 0.238035456407 0.248220627185 0.246646653405 0.244595211757 0.23972473422 0.249602645305 0.238680104062 0.224803483418 0.240405804306 0.236887764418 0.253050369586 0.257246240499 0.239074958955 0.230985078994 0.230097737916 0.266839316033 0.24359819756 0.239685124735 0.243538571704 0.250621888643 0.230851769093 0.233547789712 0.227075078845 0.226983547477 0.224362834405 0.248743336169 0.223247552937 0.241135379781 0.21805286342 0.251689329115 0.22150661959 0.237406179341 0.243353541542 0.243526559835 0.229448331316 0.22367406721 0.231427017129 0.245837459247 0.232818717405 0.244068220209 0.243969807123 0.245289155672 0.239200193755 0.242559723366 0.231895813526 0.238589343178 0.230870674099 0.185074856292 0.190966638679 0.196734232482 0.209858428647 0.203274864625 0.203041999473 0.197926990268 0.180506604609 0.191676192961 0.202741147438 0.201237653931 0.195771907735 0.19601488206 0.203403274574 0.179018659177 0.184172394714 0.206916175569 0.182062920029 0.225161143908 0.190059350101 0.215302655172 0.18583273214 0.22154130677 0.198918819466 0.186732254392 0.188033674574 0.200939126942 0.210648020541 0.203966819203 0.169461460201 0.19212652304 0.184698761261 0.185447917615 0.226382707341 0.210090272929 0.195464472523 0.194576182922 0.210724866996 0.188972223666 0.202749851185 0.205495551665 0.198311837225 0.191308228071 0.1827836447 0.199987032182 0.185758986102 0.192762378199 0.188912535548 0.170985151469 0.193136210612 0.250560354729 0.239150130043 0.235042618318 0.233086785485 0.243308731763 0.241203268173 0.2400598963 0.234273096316 0.244008938502 0.233948328457 0.219858019146 0.235053540533 0.231632132621 0.247968147104 0.252509320758 0.234318201257 0.225941055535 0.225341346975 0.261132458323 0.237859251815 0.23464919225 0.238863315744 0.244976238699 0.225403382703 0.228750181247 0.221868483944 0.221696049716 0.219446034661 0.243630311449 0.217754591911 0.235449762585 0.213055210612 0.246589228494 0.216230229788 0.232311179578 0.237695560547 0.239155145074 0.22460119101 0.218814227997 0.226159982187 0.240839111604 0.227492031494 0.239150130043 0.239069291581 0.240533107829 0.233959974366 0.236956995194 0.226761267667 0.234246750663 0.226212511867 0.0174000566092 0.0121264323023 0.0260599074483 0.0195345881682 0.024716388707 0.0269505563041 0.00951579401108 0.00952789620934 0.0312682326376 0.021380704298 0.0249359987416 0.0321633377276 0.0221499569656 0.0250465767598 0.00649156808823 0.035247677191 0.0178478945632 0.0452164673822 0.00796037757126 0.0457327534983 0.00806434689496 0.0382898633561 0.0225581753549 0.00212523780258 0.00635071079008 0.0188691425113 0.0279463141708 0.0242753243704 0.0170805625395 0.00904309726647 0.0106287996797 0.00382969063087 0.0441552932431 0.0411601284763 0.0356126616505 0.0116846154421 0.0333280322148 0.0231237862627 0.0194233884188 0.0266405905658 0.0193741592831 0.00647009737397 0.00313195595137 0.0192809490983 0.0198825976901 0.0184223649953 0.00589033700008 0.0274923472821 0.0151107135826 0.0780209291989 0.0597893086125 0.0526478480904 0.0519801851129 0.0640907142958 0.0582643284706 0.0646025548306 0.0523589334483 0.0615531482061 0.0598796956679 0.0449628875056 0.0527423072945 0.051324556652 0.0686409960047 0.0805660524071 0.0623345264313 0.0443847427682 0.0466830846559 0.0795542446923 0.0559362746494 0.057180756356 0.0643259703348 0.063211257271 0.0437185245372 0.0513548290636 0.0393714351609 0.0417127661704 0.0422926634719 0.0628873923745 0.0360959787877 0.0527047257983 0.0325588237609 0.0667976247829 0.0335624409497 0.0547114607912 0.0561052885987 0.069799650238 0.0449738444715 0.0432370258673 0.0470356297838 0.065014861143 0.055075271212 0.0597893086125 0.0609892947965 0.0676680770887 0.0593149961508 0.0554983088895 0.0479751568133 0.0662596895747 0.0497728280187 0.01649842894 0.0270940163297 0.0209738779605 0.0204474607028 0.0105739124512 0.0137437284214 0.0226997190651 0.0150100972387 0.0323011347894 0.008153166122 0.0475382748664 0.0187641297777 0.0164404692163 0.0144508320868 0.0210394349685 0.0305556358824 0.0486242638836 0.0214513889049 0.030175982789 0.0140964441541 0.0400575333251 0.0288561044949 0.0169609734513 0.0143755139823 0.0284025548343 0.0224203703593 0.013792641556 0.0304826758057 0.0219984307058 0.027438003111 0.0138498079587 0.0391014082904 0.0268826742795 0.0187520448645 0.0110691342578 0.0424853466078 0.0102534616754 0.0160596134656 0.0266767705353 0.0127998581546 0.0173245890268 0.0173211449901 0.0131739480428 0.0158792885006 0.00835247262611 0.012879803971 0.0267489620569 0.00596710410688 0.0651865992814 0.0501303259948 0.0467631748034 0.0452702235248 0.0544981935174 0.0559804285713 0.0526556031199 0.0527154428326 0.0609295585885 0.0469864956417 0.0318305696362 0.0467870360621 0.0428839020559 0.0596952123639 0.0682904935263 0.0490145651142 0.0373470202066 0.0372051258398 0.0783094346225 0.0573164765228 0.0464381891239 0.0518006344835 0.0637213875725 0.037328539174 0.0398159521041 0.0364375689163 0.0327311081772 0.0300536558581 0.0548465086523 0.039011355091 0.052932116237 0.0253006236285 0.0581288867478 0.0321437027692 0.0554616174667 0.0518535767409 0.0554912699649 0.0361633495108 0.0300500951908 0.0376868875379 0.053920841562 0.0443787428504 0.0501303259948 0.0504194914628 0.0549441033997 0.0487769599128 0.0505784847321 0.0377653392745 0.0513716565226 0.0379306609676 0.0148794236891 0.00906915259144 0.016817766784 0.022666965263 0.0181414830259 0.00943690730089 0.0254552863313 0.0160108606893 0.0217205062473 0.032481574081 0.0125336713193 0.0301048876528 0.0144114156753 0.0283145714635 0.0236499286545 0.0356994826174 0.00980454037789 0.0380382046842 0.0138535372004 0.0270349861178 0.0179310476065 0.0105970849446 0.0106446588122 0.0120133548138 0.0166102052911 0.0152998463146 0.0290596443937 0.00834773888805 0.0173051001714 0.012006228531 0.0322817993752 0.0342847484172 0.0334965405575 0.00578703582339 0.0267498608594 0.0233284219943 0.00836772170811 0.018341518653 0.0127097608473 0.00603575216863 0.0145919151911 0.0109073587446 0.0221533308504 0.0160929276419 0.00858765501382 0.0355326614102 0.0120318414802 0.067539995406 0.0482957753815 0.0406895564252 0.0402187129364 0.0525505335689 0.046337446086 0.0538706159523 0.0410532995186 0.049945680623 0.0498002070389 0.0356573113194 0.0408170339224 0.03998131445 0.0570983300794 0.0703114871106 0.0527515586 0.0326360388464 0.0357703215059 0.068007735392 0.0448145788863 0.0463929144792 0.0539072356938 0.0519007533329 0.0322898342044 0.0406511091494 0.027662402635 0.0308780067072 0.0322628111538 0.0510866561777 0.025649802677 0.0412344607513 0.0218269690426 0.0551778627391 0.0220763570226 0.0446670171533 0.0446820028935 0.060359178521 0.0338187065026 0.0337343667693 0.0362783001067 0.0544202026485 0.046225200341 0.0482957753815 0.049817335882 0.0576169439653 0.0494636606912 0.0440581542748 0.0371733017103 0.0573289834367 0.0394045266195 0.00716257307188 0.0232372547893 0.0296783885761 0.0325204811189 0.0212497773179 0.0292620976856 0.016439517739 0.0284506794787 0.0345599570635 0.018188478274 0.0407426749726 0.0271225123173 0.0318719827355 0.0364884226289 0.021914442716 0.0227786086069 0.0376078412936 0.0256329664506 0.0146652900265 0.0257412263731 0.0241649286356 0.0229439407062 0.0136364254073 0.0100256292567 0.0184950349026 0.0419365600148 0.0198805169881 0.0290470401931 0.0259349477967 0.0239003990661 0.037192775807 0.0407761513521 0.0178664543062 0.0194803036782 0.0346542430628 0.0113663417752 0.0228609184952 0.0221099611853 0.0198250112005 0.0281996969801 0.0190943935979 0.0354944682629 0.027502181016 0.0232700909093 0.0459502516894 0.0236639254111 0.0598327139142 0.0386582188727 0.0283018389706 0.0317793366202 0.0424462733589 0.0338166029949 0.0467225177273 0.0306128740578 0.0370882424143 0.0426365040044 0.0306518342203 0.028353389025 0.0289452185335 0.045605512774 0.0609187137277 0.0456096520871 0.0247062397964 0.0307047013286 0.0541889879817 0.0321986089116 0.0369056583991 0.0461635888574 0.0394191509814 0.0202672234717 0.0339543216254 0.0203331670034 0.0216962768111 0.0275014797571 0.0396207876643 0.019383687668 0.028134515225 0.0203542968498 0.0436399460151 0.0166979961449 0.0388212853573 0.0305366541923 0.0544557347145 0.0282093601187 0.0297076171269 0.0264388860156 0.0445799816631 0.0375526043804 0.0386582188727 0.0404030430489 0.0493940445937 0.0396161416365 0.0302144214958 0.0282464575757 0.0525063270042 0.0345543356833 0.0213910735206 0.0254312508458 0.0257342205184 0.0165221855109 0.0259539784544 0.0158835031429 0.0233831935041 0.0339323139257 0.0162106434865 0.03399674531 0.0200386549433 0.0299171917849 0.0320747556874 0.028046974659 0.0176794481153 0.0368388291758 0.0185008751091 0.0214981879903 0.0241691252685 0.0175651807134 0.0158907875652 0.0122369735355 0.0109776452166 0.015527622801 0.0354957768624 0.014819189185 0.0241388300803 0.019062303667 0.0287269405771 0.0357916006232 0.0364633531028 0.011219279972 0.0225748368915 0.0292557862917 0.00702356235419 0.0224830610185 0.0181511844266 0.0136555064616 0.0214280218685 0.0152776051594 0.0301845131917 0.0223495731118 0.0167956602264 0.0389756608983 0.0178914144765 0.0630604023291 0.0430080788574 0.0341031378994 0.0365519407683 0.0470199755056 0.0406269394348 0.0499352678431 0.0370580183618 0.0441324571638 0.0450148609009 0.0313792244576 0.0341228348643 0.0333031398497 0.0505703458181 0.0642434790604 0.0476293944045 0.0288843329998 0.0334940865056 0.0612862785232 0.0392274534904 0.040346557275 0.0490663667656 0.0464449738085 0.0249899515531 0.0366261362911 0.0249803957153 0.0245501998624 0.0286632518672 0.0448796558538 0.024023128213 0.0352285170514 0.0213779538928 0.0486983458625 0.0204530056806 0.043719713027 0.0366461987398 0.0563678077617 0.0312461294161 0.0303695774583 0.0296679302141 0.0480887950749 0.0392929320337 0.0430080788574 0.0443169978812 0.0521158245642 0.0424602914349 0.0361040078467 0.0312787145737 0.0538034695951 0.0366723303295 0.0189709615579 0.0257111384326 0.0217335533305 0.0223113040739 0.027967478664 0.0234852927562 0.0436322798908 0.00521461133699 0.0364760096592 0.0272343470762 0.018116669378 0.0277674536165 0.043201573857 0.0213285903305 0.0304538340544 0.0273512817022 0.0293661558134 0.0151289313008 0.0243064809516 0.0249356313294 0.024322988602 0.0196946095768 0.0139855052687 0.0405716245072 0.0223909261135 0.0287463352875 0.0239629151594 0.02510185342 0.0224608355139 0.0285849443101 0.0178309380123 0.0374526670612 0.0202528662781 0.0157931802813 0.00761690092841 0.00770880409418 0.0208073209186 0.0270724449013 0.00859765119326 0.0186885305588 0.0137210864523 0.0193875507074 0.0459800217652 0.015147695259 0.0594631949225 0.0420406539118 0.0381002519704 0.0316288068581 0.0464453358384 0.0430647918893 0.0450678160713 0.0372936406921 0.0478953066773 0.0442824164473 0.0325191406563 0.038458833697 0.0379877190085 0.0530105166244 0.0661049064219 0.0485790002934 0.0252530768184 0.0252616659632 0.0666194586026 0.0445179442461 0.0429682351205 0.0475934680999 0.0500384221554 0.032835448251 0.0334890161181 0.0210425012042 0.0304165560005 0.0269992218058 0.0464858418191 0.0236376116141 0.0409389296383 0.0119097107153 0.0510342004912 0.0165862044563 0.0364051705359 0.0459765436849 0.0532158463913 0.0240697227805 0.0285944307415 0.0355069812685 0.0512442268801 0.0481793211849 0.0420406539118 0.0442662890139 0.05312949223 0.0494910921385 0.04499219501 0.0346009573898 0.0502278687979 0.0300299308142 0.0232712606741 0.0301762988443 0.00654363258115 0.0383536762695 0.0073760670349 0.0549696818035 0.0187358469678 0.0230006125585 0.0248620926321 0.0112006081505 0.0374302122761 0.0507791829944 0.0291552453818 0.0203658790931 0.0243768337268 0.0405838171216 0.0318523232399 0.0264398107213 0.0241549255092 0.0342108755533 0.021966370396 0.0115756992435 0.0406700397742 0.029726336077 0.0360763557135 0.0237708872008 0.0347952708329 0.0167090325333 0.0113314779127 0.0183460384347 0.0474972085699 0.0106130759732 0.0188803713912 0.0264255690706 0.0134988723184 0.025504997589 0.027405254367 0.0141844800377 0.0190580694839 0.0107985492476 0.0215712414492 0.0353201768965 0.0121413245456 0.0563908913649 0.043304104353 0.0426898087169 0.0394771848203 0.0476531796683 0.0529352282385 0.044079995111 0.0508430194559 0.0587105683651 0.0390993709526 0.0249779507582 0.042747127932 0.0380731496814 0.053623295371 0.0609715530482 0.0414798961609 0.0320700568798 0.0299307088952 0.0756552859532 0.0564187262812 0.0401315540974 0.0437426630802 0.0619393205223 0.0343990170803 0.0321000148697 0.0335451545545 0.0286757976711 0.0228629245983 0.0490706959168 0.039335399235 0.0517108395757 0.0207931533413 0.0521719534959 0.0306385429933 0.0529657791748 0.0494073245643 0.0465244230409 0.0294935309982 0.022387741137 0.033135656028 0.0474637239198 0.0404408008061 0.043304104353 0.0434045699957 0.0474449484031 0.0438694244062 0.0478136370998 0.032205230585 0.0422362620829 0.0295056722481 0.0179188644218 0.028584000913 0.0304649482776 0.0216593575612 0.0411663680687 0.024518088592 0.0182283990095 0.00873472135302 0.0327024039756 0.0205661418233 0.0530659348987 0.0158790060301 0.0434559310925 0.0108179884547 0.0451149656888 0.0268976993783 0.0108384529361 0.0115766662184 0.0275567255166 0.0316971869351 0.0249562865626 0.0177154908046 0.0178955252093 0.0182906823384 0.00749636648732 0.0477714342488 0.038203551745 0.0295629098692 0.0151014852938 0.0424264199032 0.016651504005 0.0233361298106 0.0296064297332 0.0193940010564 0.0143559792056 0.00822362335298 0.0204472045407 0.0141062231631 0.0150969431722 0.00961358010122 0.0230069897016 0.0136344497691 0.0781873312389 0.061922358127 0.0567947708263 0.0549074799203 0.0663268940474 0.0637272258979 0.0651276548535 0.0583171690756 0.0677292446726 0.060191106251 0.0450946120505 0.0568830058441 0.0543288790766 0.0714589426004 0.081454303397 0.0624243269582 0.0472314410611 0.0479124253475 0.0858307227677 0.0627914211132 0.0589518720089 0.0648453559762 0.0697319385158 0.0476614784634 0.0522209851297 0.0435983831723 0.0443141443319 0.0427834724857 0.0660408790701 0.0423473900154 0.0592034189384 0.034371623803 0.0697342027164 0.0381294864628 0.0595840773258 0.0612390496755 0.0689539919437 0.0466415215244 0.0431628346368 0.0495434525796 0.0666477691564 0.0566659496435 0.061922358127 0.062735676927 0.0682003243846 0.0612178236271 0.0603270178223 0.0499653486319 0.0649071391116 0.0500436367076 0.0339571590708 0.013301666182 0.0294613107742 0.0254168369791 0.0187830848109 0.0337456927151 0.0151760493477 0.0360233288761 0.0161149469416 0.039042024755 0.00229536147376 0.0468249873675 0.0157583667263 0.0310761119725 0.0155497505138 0.0090301180649 0.0126416375855 0.0110159110748 0.0254616235228 0.0242814349621 0.0236690592627 0.00236651040215 0.00842631126501 0.0122777482071 0.0387720000407 0.041740632348 0.0402615455592 0.013434165421 0.0259957062278 0.0279382933191 0.0176919201528 0.0210030594474 0.0190515742522 0.00605687883869 0.0125440326934 0.0183812478057 0.0234838433483 0.0213287295776 0.0102611349583 0.0367608397052 0.018305882063 0.0762459812633 0.0565232340764 0.0480647092458 0.0470080033305 0.0607210126742 0.0515562009886 0.0623134210102 0.0445281266442 0.0542656274802 0.0589172543408 0.0450145677229 0.0482495838943 0.0484339717661 0.0652417317109 0.0793204584838 0.0620264746962 0.0400173133601 0.0433798691859 0.0725797896424 0.0481813389954 0.055284998644 0.0628522019597 0.0554630904838 0.0404443368976 0.0493602212467 0.0332419109523 0.0398075629785 0.0413677301877 0.0589057964055 0.0278286603835 0.0454781228443 0.029186418008 0.0632373035893 0.027173160002 0.0466041993476 0.0516248397682 0.0694549695494 0.0414073101396 0.0429302830553 0.0451579600752 0.0633364240737 0.0552241510377 0.0565232340764 0.0584051969715 0.0667610558404 0.0584355181495 0.0512925402879 0.046128531904 0.0664605075068 0.0478081299752 0.0403784266437 0.00849189377523 0.0578244327435 0.0213782060591 0.0260735654855 0.0286868543518 0.0107633182991 0.0429130184438 0.049112317265 0.0332704837119 0.015321761445 0.0277153223765 0.0397499694056 0.0358925712782 0.0304111104426 0.027505083584 0.0362377250231 0.020296537393 0.0117771620381 0.0454349567531 0.033203078919 0.0404550655622 0.028050968973 0.0329121522515 0.0157822820327 0.0128529655968 0.0212960295437 0.0481179953609 0.0170390332384 0.0197624290813 0.029414246654 0.0176689832365 0.0290804953813 0.0317312604538 0.0173770193477 0.0255273516204 0.0167054579942 0.0259572392414 0.0381174742782 0.0168503391466 0.0510138836142 0.0385473731826 0.0388016187658 0.0368178557203 0.04276997017 0.0503778656709 0.0392641023806 0.0497809957465 0.0564266766196 0.0332095636962 0.0188695135337 0.0387787813789 0.0331502464834 0.0483979502244 0.0548055143427 0.035228159998 0.0294466884888 0.0272290139414 0.0724784935896 0.0548011964122 0.0344489111929 0.0380672364964 0.060026483594 0.0303284751736 0.0272545178904 0.0327801045139 0.0239410636139 0.0177611799925 0.0444007657806 0.0400513163792 0.0497786040376 0.0204852863736 0.0470867569115 0.0308376722615 0.0532970554221 0.0452748414675 0.0407821021663 0.0268849282918 0.0168086928609 0.0279020988959 0.0414910752652 0.0343097482389 0.0385473731826 0.0381367753126 0.0413101510581 0.0377465227401 0.0435171810289 0.0268955341281 0.0365057689544 0.0254190380854 0.0372013435687 0.018490846954 0.023945912793 0.0440067159299 0.0253810829497 0.0423576975539 0.0266775910883 0.0276067076791 0.0155964706338 0.0512008225103 0.0249293141549 0.0224091031216 0.0208182862063 0.0201027144741 0.0218258381078 0.00416795120123 0.0250523436876 0.0291897433081 0.0336104473732 0.0126474260294 0.0179897427224 0.0236687976248 0.0357767564718 0.0479080621505 0.0493839683144 0.0213027411218 0.0127962715487 0.0389577422313 0.0207982711984 0.0248766740175 0.0272195414617 0.0164396805465 0.0239043019084 0.0253433507519 0.0357454256498 0.0316012688091 0.0218891275488 0.046006369731 0.0279699723289 0.0753592714385 0.0538300830176 0.0427234551893 0.0442120918411 0.0575724115573 0.0438794711912 0.0617203772126 0.0368175055007 0.0452394206226 0.0586608041232 0.0462735571883 0.0428932945735 0.0449361857949 0.0608531076963 0.0771129194067 0.0618336393757 0.0380658785095 0.0436894906412 0.0627047512052 0.0382553066725 0.0531191059331 0.0620656917263 0.0459127944105 0.0362899248016 0.0492167826913 0.0301637294202 0.0380460619309 0.0426550687382 0.0544072702012 0.0218332584664 0.0362322493618 0.031226533309 0.0587827461321 0.024852970378 0.0417700516252 0.0442659791406 0.0702283886386 0.0412678777108 0.0447863140602 0.0428258891548 0.0608093994885 0.0534400200237 0.0538300830176 0.0561072465285 0.0656581075266 0.0558523559264 0.0444818994194 0.044500683531 0.0680831562837 0.0488303143353 0.0536500814259 0.0217777943298 0.0182308184172 0.0213271967089 0.017640751735 0.0384313065823 0.0491331799987 0.0285490579199 0.0236508921207 0.0203518801552 0.0410544418794 0.0345949638149 0.024163451076 0.0208164937907 0.0331698057229 0.0214370208347 0.0129093004816 0.0378630437774 0.0285080143021 0.0349561724295 0.0213643314537 0.0378207671811 0.023156173716 0.0144257922467 0.0164328495191 0.0458898771597 0.0137078716838 0.0180512548912 0.0303444429836 0.0166949845236 0.0239375979831 0.0248141920509 0.0164642247089 0.0221495310709 0.0136759772653 0.0203492925476 0.0297801495496 0.012327304817 0.0589145352863 0.0454329273476 0.0436909431618 0.0429329583507 0.0496681092117 0.0546717056664 0.0472190839202 0.0531335572006 0.0601545762173 0.0404993410456 0.025351107679 0.0436326686228 0.0384109589864 0.0546942484238 0.0614849395874 0.0420121432296 0.0350678306139 0.034181943564 0.076607832627 0.0574938051354 0.0407284888751 0.0455392391661 0.0634559846389 0.0340449392101 0.0346886110004 0.0365094436091 0.0283668149646 0.0247441178114 0.0505723350307 0.0414268809452 0.0526572068528 0.0247442404176 0.0533283030157 0.0333480302269 0.0569330422551 0.0488445949836 0.0485166318417 0.0334202567015 0.0241088713298 0.0327605074387 0.0477602460683 0.037977730422 0.0454329273476 0.0450703466816 0.0481686005759 0.0425586735361 0.0473020173719 0.0326189496433 0.0442268465034 0.033265894657 0.0406164099076 0.0563656658624 0.0369501005762 0.0596090654503 0.0304778147307 0.0384204581231 0.0270915625525 0.0693260105947 0.0372136698768 0.037023479673 0.0317062741776 0.0317712182279 0.034804368211 0.0223060266021 0.0435100417127 0.047004084469 0.0362329082931 0.0256397756303 0.0233719185841 0.035627421568 0.0521858971919 0.0650552610656 0.0654396577163 0.0372909018916 0.0208395075735 0.0532980934411 0.0387978654626 0.0393043110859 0.0433091405548 0.0302415541747 0.0340064333643 0.0421117035102 0.0478257550421 0.0466283489206 0.035247716101 0.054937797688 0.0434605477125 0.0930034854114 0.0710598303107 0.0590352084097 0.0604460830393 0.0745555173324 0.0571183440581 0.0793617023165 0.0484952151811 0.0566053521726 0.0768259736839 0.0646780127707 0.0592344115045 0.0623544010763 0.0773513341111 0.0945306112415 0.0799933849368 0.0552426925494 0.0610368934489 0.0726391747026 0.0486086004123 0.0708765879781 0.0800292786375 0.0558578611463 0.0537011940437 0.0672777153371 0.0461852109805 0.0561622363977 0.0610513630375 0.0708198771797 0.0350965268909 0.0484124521051 0.0486964562823 0.075256407672 0.04111323881 0.0515712020719 0.0592740141065 0.0884497661149 0.0586350703024 0.0632023881374 0.0606979638812 0.078320587954 0.0707259147977 0.0710598303107 0.0736338137855 0.0835800603427 0.0732101393079 0.0599496433571 0.0625510181289 0.0864139348042 0.0667637091982 0.0350728110176 0.0244007367668 0.0194418255357 0.0278046770383 0.0387260088041 0.0187288873365 0.0305873095592 0.0241694671072 0.0257451620145 0.0153815378858 0.021353060898 0.0215496497445 0.0200984831417 0.0151985245998 0.0110228413253 0.0387908454863 0.0190085239065 0.026530841158 0.0213969993651 0.0238500818663 0.0245406806879 0.0294364678495 0.0140120927131 0.0331219747793 0.0211978333105 0.0108265764026 0.00897607251111 0.00677241917437 0.0173770286262 0.0245814114194 0.00563326172159 0.0204952689406 0.0138047466575 0.0168569359655 0.0438273371107 0.0133334601758 0.0592456323654 0.0408071286879 0.0355509485261 0.031081872872 0.0452073820968 0.0410054777303 0.04495248578 0.0356498623047 0.0456510923283 0.0431383529174 0.0306345228853 0.0358468154266 0.0353265194622 0.0511623196634 0.0646276367384 0.0471382277373 0.0240229200194 0.0254553208119 0.0642596486532 0.0418982548195 0.0409421092482 0.0466753277343 0.0478887558589 0.0293373407147 0.0326161841904 0.0195735225778 0.0272641873276 0.0254981680323 0.0447040552558 0.0215611560931 0.0381253844376 0.0111199895155 0.0491745036263 0.0145691145727 0.0368088577033 0.0425091408325 0.0528722213324 0.0238501784613 0.0272505069941 0.0325614342759 0.0492627638247 0.0449950723976 0.0408071286879 0.042880253884 0.0517160325388 0.0466599102029 0.0415966376912 0.0321745745193 0.0499813496989 0.030016506883 0.0195067443548 0.034042657808 0.0387050626348 0.0607833234632 0.0322025711177 0.0402462250605 0.019510297358 0.0548235012014 0.0429595771314 0.0252279466187 0.0223273919125 0.0407231129516 0.0368354815518 0.0293953937656 0.0290463736365 0.0328461116776 0.035143771121 0.0215923461726 0.0549385181088 0.0388660143681 0.023623079783 0.0246479181765 0.0539903453268 0.0196033552608 0.0309786977565 0.0425462811063 0.0289476729073 0.0283702078661 0.0228452205014 0.0295421719187 0.0252210967901 0.0230772437036 0.0240590413191 0.0135013916275 0.0218148808282 0.0760114892469 0.0634029220478 0.0607879784498 0.0607163777171 0.0675569624906 0.0711706523576 0.0650651993965 0.0685468614076 0.0760185520266 0.0575507664821 0.0425210776149 0.0606855343213 0.0555229333703 0.0720565340813 0.0774071503121 0.0581416488422 0.0527363337967 0.0521637490438 0.0926239124524 0.0722776295267 0.0578783465588 0.0627903946651 0.0789206671037 0.0503951188312 0.0527646858335 0.0526096570134 0.04527903594 0.0427992547678 0.0682205850017 0.0547141937568 0.067807383273 0.0414244261778 0.0707718291801 0.0483573376556 0.0717861683592 0.0644987372167 0.0647006499463 0.0513517515126 0.0419564687506 0.0495341433982 0.064364362053 0.0518587226049 0.0634029220478 0.0627776825589 0.0645131806296 0.0578143875392 0.0631581345522 0.0499505499876 0.0600491597535 0.0513712453732 0.034450057717 0.0238219725869 0.0465207238958 0.0139550218762 0.0437148052069 0.00245848843359 0.0406705408218 0.027996084796 0.00617634742576 0.0042140797155 0.0226171443798 0.0276920922451 0.023535776763 0.0178395839466 0.0140356715542 0.0166022608919 0.00395476408572 0.0457707350729 0.0404789526881 0.0327219703849 0.0114454148489 0.0362680793242 0.0217463268752 0.0196365744138 0.030477863462 0.0206103562172 0.0102714385383 0.00460445870022 0.0203218526125 0.0212018784388 0.0183586998171 0.00829854248447 0.0218572079865 0.0144443422105 0.0767877803745 0.0594466868469 0.0529468255635 0.0532520366965 0.0637146030163 0.0600960587419 0.0639510014921 0.0553587219012 0.0637257572972 0.0581054381403 0.0427611569751 0.0529559974475 0.050488809742 0.0679805704334 0.0784814462329 0.0599903091584 0.0453120703317 0.0472956572051 0.081207762841 0.0585100350392 0.0557351056658 0.0628545029747 0.0657746195642 0.0431635890019 0.0505766161122 0.0416755044323 0.0404647152471 0.0409948112567 0.0627553433594 0.0398795409571 0.0548556307871 0.0337749376373 0.0662751646447 0.0362650420971 0.058753352182 0.0559719307576 0.0677796526261 0.0456851568449 0.0415580901724 0.0455930571155 0.0632638085815 0.0519953365762 0.0594466868469 0.0601182129397 0.0655928087235 0.0568949371894 0.0552051980285 0.0466604473635 0.0640269526764 0.0493532376221 0.0428114080859 0.0513187599237 0.0353300715155 0.0136841990953 0.0339659047401 0.0392151216672 0.0331598693815 0.0346578423583 0.0330024342773 0.038276947888 0.0230224626975 0.0145213724276 0.0501490398304 0.0358866478447 0.0428680377048 0.0326728263648 0.0285835471973 0.0062386073831 0.0158573158736 0.0257280664001 0.0503050577684 0.0188536314021 0.0230369129683 0.0250256284162 0.0171680840121 0.0324912467925 0.0363869094834 0.0177612063031 0.0250245943265 0.0179627656197 0.0294197347692 0.0465091219099 0.0203095862842 0.0478795030574 0.0357982238192 0.0376241763265 0.0315623347361 0.0401002610089 0.0474897864671 0.0350582543035 0.0460362585126 0.0538318698513 0.0325365856585 0.0213227378076 0.0377990397126 0.0335693240845 0.0471274543161 0.054885621667 0.0360897261843 0.0253548565171 0.0209548211511 0.0704293001167 0.0527939534511 0.0346935966786 0.0364130601336 0.0572327947958 0.0318390512051 0.0244040617426 0.0286768378156 0.0261566126157 0.0173773142125 0.0423900706098 0.037171182215 0.0481110975007 0.0159454061909 0.0456669379702 0.0274665151098 0.0469607798152 0.0462908705234 0.039158298024 0.0212460794033 0.0171141394847 0.0299475002492 0.0420369583583 0.0395076063195 0.0357982238192 0.0363456286371 0.0414629362584 0.0409765644808 0.0445581081298 0.027819794971 0.035217696192 0.0204081888775 0.0536229831874 0.0147412411033 0.0555896434003 0.0257773616784 0.0440411962897 0.0177255540439 0.0193829905731 0.0239978426404 0.0256940264499 0.0393410530504 0.0352411367288 0.0221731284836 0.0182277280594 0.0123391761488 0.020654487001 0.0493059373458 0.0475681206642 0.0451873269558 0.025880190654 0.0390288508413 0.0309214453223 0.0313196281309 0.0264802426158 0.0266703711652 0.0198006836859 0.019520741807 0.0278514155812 0.0221034761072 0.026690732588 0.0196134707881 0.0412756734602 0.0265178671443 0.0867397306197 0.0682110021203 0.0611304199736 0.0570363597121 0.0725160802101 0.0632079822129 0.072465417084 0.0544582268199 0.0658094706322 0.0704919404627 0.0569226677141 0.0614294730407 0.0618668588016 0.0780403185985 0.0918397178914 0.0740264863472 0.0508436174709 0.0525989509117 0.0847012354501 0.0597190404435 0.0681089165817 0.0742388101657 0.0664380426823 0.0545769816583 0.0603124830305 0.0434291774759 0.0533790593767 0.0526730162139 0.07136820435 0.037381607524 0.0577096830472 0.0386410478706 0.0759869972218 0.0373998132392 0.052181569916 0.0660810261842 0.0798969360666 0.0510826914494 0.0540234946407 0.0588039304452 0.0763291167757 0.0694007087698 0.0682110021203 0.0704124305268 0.0789271737863 0.0724038719993 0.0657040131714 0.0590995697111 0.0765722724394 0.0574793382897 0.0411299322154 0.0539204969946 0.0448971315706 0.0161163886115 0.0424587591617 0.0433134103038 0.0424269061508 0.0282464178608 0.029088245744 0.0394507490162 0.0585960215177 0.0377541724349 0.0451043153526 0.0458829699472 0.0321616028135 0.0557124503126 0.0614547711288 0.0392057260171 0.0177559051904 0.0564954090624 0.0332181102648 0.0405454141429 0.0436595776199 0.0393478274847 0.0472383546112 0.0407140770645 0.0569892712332 0.049354627832 0.0439022116296 0.0637553975882 0.0455099918129 0.0646100490562 0.0426539647843 0.028431151743 0.0378227253234 0.0447657124399 0.0278050988657 0.0537117960824 0.0274550112394 0.027109787916 0.0505464931112 0.0435971104256 0.0283504838988 0.0329648689469 0.0443909167392 0.0625701957077 0.0530229796398 0.0343871989004 0.0426240079793 0.0396498771349 0.0213457948378 0.0419360502864 0.0525916757896 0.0283713421426 0.0258314536376 0.0439902997437 0.0303827915475 0.0319578433558 0.0415135400801 0.039276736137 0.0267873256159 0.0192565547583 0.03784586336 0.0425882783228 0.0300770949457 0.0411024644378 0.0235206108502 0.0627241786637 0.0400442811976 0.0439864971889 0.0336917004451 0.0474026229045 0.0427127785344 0.0426539647843 0.0447428873557 0.0545498150062 0.042821329639 0.024917555179 0.03656154349 0.0624685036836 0.0466134489709 0.0464211898017 0.0148261628982 0.0328892390973 0.0156382282878 0.00785541359441 0.0118772597644 0.013233811524 0.0262299374219 0.024092235964 0.0223052815816 0.00384479195153 0.00794260597132 0.0107672255318 0.0397240757159 0.0410257993957 0.038935751255 0.0130110302225 0.0282819504394 0.0262547979726 0.0181226466418 0.0211289335782 0.0182692961983 0.00567846812613 0.0110316054291 0.0179271098732 0.0214946645806 0.0199268176637 0.00875561948865 0.0354271933282 0.0171443662393 0.0766374045646 0.0572939392451 0.0492966948245 0.0478544225205 0.061544844174 0.0531089657897 0.0627070970919 0.0461191081801 0.0559975469422 0.0592703107177 0.045198111152 0.0494826907565 0.0493817865713 0.0662451441692 0.0799247658409 0.0623534855885 0.040791427856 0.0437433118438 0.0743922416686 0.0500539193261 0.0559768175635 0.0632764008918 0.057263588801 0.0415573436738 0.0497522962913 0.0342757495228 0.040553840562 0.0415809282216 0.0599519771943 0.0293556022635 0.0472716928172 0.029440558171 0.0642586667075 0.0282006776448 0.047766217977 0.0531327322745 0.069584244341 0.041867047458 0.0430265316703 0.045945636772 0.0640480645613 0.0558532996544 0.0572939392451 0.0591020248311 0.0672221887664 0.0591786574244 0.0527229760336 0.0467939943149 0.066452337221 0.048010057481 0.0426662887331 0.0439949614302 0.0453715955589 0.0447907897725 0.0421151539334 0.0471958607455 0.0276193449237 0.02275846799 0.0605343055308 0.046215555164 0.0540603153716 0.0428037480825 0.0313795261955 0.0125172459763 0.0208290281635 0.0350621941996 0.0569552641601 0.0299458792012 0.0309610087229 0.0366049420462 0.0292005950568 0.0426658539169 0.046550389751 0.0288402250941 0.037809939324 0.0299619998536 0.0401350153602 0.0525912191682 0.0310232912468 0.0370292782789 0.029362531619 0.0351405044644 0.0315813022426 0.0330583005053 0.0477931213785 0.0264557013575 0.0497838425895 0.0546676669155 0.0214340498054 0.0132666230167 0.0351245595911 0.0284887636401 0.0394555006358 0.0430107543153 0.024001610215 0.0265746605534 0.0212961513987 0.0686985381679 0.055308045978 0.0261438582005 0.0256904480148 0.0588231449389 0.030013743601 0.0177237336601 0.0340579413817 0.023239083403 0.0129096702298 0.0366510262704 0.0448114548715 0.0501265325324 0.0238298010541 0.0385248366721 0.0348349791307 0.0531258509004 0.0432244806886 0.0265717263897 0.0222990802833 0.0105547424712 0.0247433535936 0.0319856839059 0.0308056274733 0.029362531619 0.0282110860995 0.0297242114734 0.0317000113527 0.0411003820059 0.0219347750579 0.022168792283 0.0155469808263 0.0395024062804 0.0287670845389 0.00713435108734 0.00340555052012 0.0220748170877 0.0261325674803 0.0225086838709 0.0197301659136 0.0143163055062 0.0179547860371 0.00552830002832 0.044759241534 0.0400746442935 0.0324889323762 0.0105741309237 0.0352991764051 0.0223168808509 0.0183878728912 0.0306904112299 0.0206021475442 0.0104104939928 0.00670538845305 0.0199820984844 0.0226302237702 0.0187233601872 0.00907856174289 0.0219615297998 0.0143637742128 0.0753337380263 0.0579434032885 0.0513377916805 0.0521965044751 0.0621695045027 0.0588078748161 0.0626419440897 0.0544695113035 0.0624620281749 0.0565017669041 0.0411290585403 0.0513195597187 0.0487054775012 0.0662358469866 0.0766400648909 0.0582604755345 0.0441853596565 0.0463597810918 0.0797172873623 0.0573362873289 0.0539615186802 0.0612879452506 0.064613107429 0.0413483548752 0.0492060616579 0.0408536005927 0.0386658835072 0.0395835329499 0.0611338973657 0.0393645393569 0.053572584104 0.0330686361703 0.0645571164168 0.0355933171917 0.0584689678276 0.0540828717859 0.066313152361 0.0447064134304 0.0401381607957 0.0437269026711 0.0614075519202 0.0498319714223 0.0579434032885 0.0585128670581 0.0638402708268 0.0548281142484 0.0533071471302 0.0449195692472 0.0626153729552 0.0482343239424 0.0295703841711 0.0366619395227 0.036500768924 0.0216100790796 0.0202086133386 0.0291081498137 0.0538445159183 0.0304641791012 0.0384602037434 0.0388505887816 0.0184939827468 0.0431509831641 0.0514497599393 0.0311190761962 0.0193480902941 0.0458820296239 0.0240997287473 0.0258856149523 0.0318135695083 0.0318488388141 0.0408906139312 0.0292269134036 0.0455476970026 0.0384416242221 0.0355459730354 0.0604270677753 0.0357756191383 0.0579552498509 0.0353240752803 0.0225716123758 0.0257247517645 0.0383804782921 0.0215488097029 0.044948702922 0.0168022708851 0.0235401078242 0.0440963094143 0.0364932029322 0.0228797040221 0.0280879909327 0.0410984380147 0.0600604954457 0.0480533666552 0.0218775898923 0.029791578282 0.04158778439 0.0178882014032 0.0370867411707 0.0460836281119 0.0252155578724 0.0208740877353 0.0346851323104 0.0150019768994 0.0257835645737 0.0322493089661 0.0342156649906 0.0116183721538 0.0144931943243 0.0240188836176 0.0389068297986 0.0144502424291 0.0276541092905 0.0250061652639 0.0558771163058 0.0271661677907 0.0351271284352 0.0289684152137 0.0442230773604 0.0424650907505 0.0353240752803 0.0384614305736 0.0501641618566 0.0418657156844 0.0254781137454 0.0303709238998 0.055175185082 0.0355533707256 0.0226236551837 0.0257243733887 0.0188708962976 0.0280458447088 0.0259495603965 0.0358126700891 0.0173972925493 0.0203550537734 0.0241984077655 0.0326658712857 0.0374251933899 0.0415458358848 0.0220997016335 0.0314739029711 0.0297599572502 0.0224642058136 0.0101652715813 0.0184552115135 0.0190795074292 0.0255522841638 0.0188568594492 0.0233889500734 0.0229626929389 0.0201746110209 0.0490887418108 0.0229752614583 0.0721999639917 0.0528489482775 0.0457665690796 0.0402580156331 0.0570067559398 0.0466251225309 0.0575932728503 0.0376765605581 0.0495760090258 0.0573857799099 0.0457294834507 0.0461836275015 0.0478961825174 0.0628300939828 0.0782978077723 0.0617431168606 0.0349090738753 0.0372003973766 0.0687801137491 0.0442160827041 0.0545072221083 0.0604041835816 0.0503536267186 0.0412302683921 0.0462335606677 0.0265634879228 0.0409252862943 0.0403112510912 0.055713517152 0.0210444978159 0.042126696634 0.0241765196843 0.060672423249 0.020816933598 0.0350778856202 0.052005350197 0.067095717986 0.0355534496705 0.0422916670752 0.0461036815572 0.0628103650139 0.0591144676161 0.0528489482775 0.055715116829 0.0658580956838 0.0605104799512 0.0516473901424 0.0459913703308 0.0644951804376 0.0433395766961 0.00476396676703 0.0174479971383 0.0262117873028 0.0230803954362 0.0185627883328 0.00806620839217 0.0113814933623 0.00390319674762 0.0428177321428 0.0406615445683 0.0354624532828 0.0102263088249 0.0317971018832 0.0234656694869 0.0177926588758 0.0262276626577 0.0188776295133 0.00499606040594 0.00424685036617 0.0184558479833 0.02090066744 0.0184013332257 0.00554993882929 0.0277916235291 0.0146255258466 0.0766584518429 0.0582536091399 0.0509151171955 0.0506702158494 0.0625265075224 0.0566765790267 0.063296553303 0.0510181236531 0.0599592936046 0.0584198089247 0.0435074153396 0.0509940717271 0.0495661398107 0.0669266401341 0.0789376730305 0.0608280979649 0.0430167703447 0.0455496571569 0.0778333721022 0.054368608181 0.0555188693765 0.0628704456678 0.0616793859568 0.0418722056261 0.0499801315934 0.0381178178645 0.0399704213931 0.0409195428074 0.0612188756554 0.0349680364553 0.0510653686933 0.0315069690942 0.065089713199 0.0323745393338 0.0538917621299 0.0541667478451 0.0685063182149 0.0437802642661 0.0419063387522 0.0452698567868 0.063322234724 0.0532615551019 0.0582536091399 0.0594215850469 0.0661130238288 0.0575059193505 0.0535703049783 0.046311202915 0.0650382074097 0.0485914933238 0.018842155525 0.0240370427448 0.0209528738511 0.0208089534878 0.0111203801188 0.015917150662 0.00453488271341 0.0421322819802 0.0391458330884 0.0331083714381 0.00824403626512 0.0325132311882 0.0224847085725 0.015999516676 0.0277730715816 0.0186363801442 0.00708833477439 0.00642676690132 0.0178233234532 0.0220593051536 0.0178006297585 0.00678573265748 0.0253531080362 0.0132629896024 0.0741187095157 0.0561450172019 0.0491350925935 0.0497514221438 0.060382347286 0.0560370322524 0.0611402268324 0.051356658754 0.059586423134 0.0554899035837 0.04031824984 0.0491508161478 0.0470289167752 0.0645318276086 0.0757421493114 0.057544117742 0.0418388253062 0.0443100927117 0.0770487593095 0.0543425693403 0.0526585588221 0.0601234502371 0.0616298008584 0.039475560154 0.0476974484773 0.0379820027229 0.0371681460661 0.0383173623723 0.0591794106435 0.0360653613059 0.0506856040282 0.0307481929538 0.0627839311984 0.0325910275487 0.055237903563 0.0520576344331 0.0655568890209 0.042566036842 0.0391094232808 0.0423459347432 0.0602723002528 0.0493981873195 0.0561450172019 0.0569656980973 0.0629553672806 0.0540102942581 0.0513493159685 0.0435185067862 0.0620208318713 0.0466647868495 0.0214518595921 0.0250574648372 0.0327035392179 0.0101396500324 0.0173409936704 0.0207342919251 0.0332931627481 0.0438902497615 0.0452760516117 0.0173962021214 0.0155474932413 0.035097178823 0.0167075078514 0.0219356609327 0.0232161362242 0.0132718217012 0.0215035441799 0.0212520758854 0.0323731403418 0.0276797560909 0.0185474301296 0.0436301136913 0.0240030156204 0.0722858985812 0.0511085237891 0.0406650357841 0.0417405611462 0.0549851372416 0.0429543701734 0.0585838550979 0.0363293620395 0.0449605458738 0.0553495140321 0.0426335340052 0.0408304262584 0.0422933209142 0.0585800490211 0.0742997883793 0.058525226517 0.035206446299 0.0404435198283 0.0627381786217 0.038449263747 0.0501920171677 0.058888502058 0.0460241869732 0.0337463818569 0.0458812434451 0.0278046680998 0.0349131438742 0.0389869853015 0.0521722266345 0.0209043609517 0.0359181281747 0.0276204608178 0.056526179998 0.0222941226577 0.041152359144 0.0428797366789 0.0667722352073 0.0380782189511 0.0410414680557 0.0398735425148 0.0580112990116 0.0505852294836 0.0511085237891 0.0532571789631 0.0625496034306 0.0531021220011 0.042888351609 0.0413906647006 0.0644729984699 0.0453457651066 0.0107504885543 0.0445819304752 0.0242663168605 0.0336280858548 0.0265479358211 0.0206001571473 0.0282052030653 0.0325495864256 0.0168933846577 0.0294835383052 0.0291726320507 0.0086711740192 0.0221542204142 0.0179304565478 0.022368606911 0.0297212987281 0.0149126627655 0.0323363820236 0.0229519593576 0.0235384954017 0.0445361789109 0.0198724736323 0.0521573964273 0.0325036248406 0.025391360945 0.0274230805005 0.0366454630316 0.0344942582431 0.0390192491133 0.0331598239249 0.0393605141281 0.0342759827638 0.021260474927 0.0254073365966 0.0234580906639 0.0407813070352 0.054069225305 0.037293221414 0.0193616327469 0.0235535155332 0.0561408138747 0.0363119747552 0.0299684113953 0.0382284111131 0.0425406057591 0.0161350959097 0.0256650379468 0.0189027762255 0.0143744986934 0.0179840539103 0.0351859317755 0.0236434364907 0.0314452790447 0.0139598707264 0.0389431870684 0.016485382228 0.040355084472 0.0299825744277 0.0456481354326 0.0213868007101 0.0199817088813 0.0197684998025 0.0379808456048 0.0313521152981 0.0325036248406 0.0336827412775 0.0416843797906 0.0334784161986 0.0289224474022 0.0207816321212 0.0432746333929 0.0259935830061 0.0406061367838 0.0235468720047 0.0318323205013 0.0219961670771 0.0255808203638 0.0205571609912 0.0226267329863 0.013012280745 0.0369778751619 0.0187789289183 0.00874641080149 0.0197571101094 0.00977736627462 0.0202312308031 0.0257037864622 0.00771970434192 0.0231557657352 0.0133099999073 0.0187324692629 0.0392432332024 0.0115155558073 0.0539655210169 0.0370438046358 0.0333786958054 0.0316409822149 0.0414621149977 0.0426991016561 0.0406517401284 0.0403678402162 0.0480644681966 0.0361424947464 0.0218113942904 0.0334641673727 0.0302365918541 0.0468956611505 0.0577004528593 0.0391757625612 0.0236163775334 0.0242576354959 0.0653230280715 0.0452817250312 0.0345762397733 0.0404850805623 0.0511603238121 0.024886725311 0.0274569982987 0.0236904007661 0.0206352481789 0.0182697542476 0.041544522984 0.0289437440201 0.0406091261712 0.0126309522282 0.0451825068417 0.020490830418 0.0442568377481 0.0395946754396 0.0458654268305 0.0228937718071 0.0192636370802 0.0258703030154 0.0425840964507 0.0360074541747 0.0370438046358 0.0378930233244 0.0444471390747 0.0387351356827 0.0382610925586 0.0256300024801 0.0425154846229 0.0258478343265 0.0236633695184 0.0174990292841 0.0186530571476 0.0611247172591 0.0557656094279 0.0466088592976 0.0282320418304 0.0445464770565 0.0341524834412 0.0362512556468 0.0421364706395 0.0353354631726 0.0231187500774 0.0150626098763 0.0358189243099 0.0298015850374 0.0324439491366 0.0223980477721 0.0244141427415 0.0302418773665 0.0942328467355 0.076594072426 0.0693014297828 0.0689934026371 0.0808708035071 0.0745545238332 0.0811424703773 0.0680895982601 0.0772898278285 0.0757784502044 0.0604959493699 0.069358828284 0.0677179794145 0.0851546659241 0.0961465377952 0.0777040409423 0.0614342341624 0.0635105029946 0.0949910709403 0.0710640834394 0.0733442971825 0.0804630717269 0.0785569975811 0.0599979412685 0.0679000809486 0.0561320615274 0.0579270123928 0.0585056430456 0.0796324819571 0.0514612322157 0.068283919556 0.0494122791964 0.083373938553 0.0501855689957 0.06973964597 0.0718399524308 0.0852557355766 0.0618964253913 0.0591154896454 0.0630863991576 0.0809010142666 0.069276699042 0.076594072426 0.0775503896407 0.0833112406891 0.0743754315619 0.0713482849075 0.0642332894623 0.0813844341388 0.0662752735857 0.00943572253542 0.0115403490897 0.0385513754293 0.041736873589 0.0399182937318 0.0122452242861 0.025058464158 0.0280391379811 0.0165428935252 0.0220426648475 0.0191966494038 0.00476868066193 0.0119136882752 0.0182274158907 0.024378991238 0.0214350053307 0.00987681971674 0.0356319079498 0.0179231346829 0.0755450486118 0.0557883449648 0.0471764532756 0.0467952668235 0.0599617395979 0.0511011457957 0.0617632501287 0.0445144536833 0.0538325742859 0.0579584947727 0.0439095831377 0.0473266714409 0.047305404175 0.0642647315012 0.0781620294368 0.0608980895696 0.0396397814207 0.0431908772896 0.0719383279912 0.0477762451878 0.0541744509705 0.0619776318533 0.0551486398926 0.0392023772277 0.0486708484659 0.0332061436773 0.0385651962783 0.0405238673503 0.0580675371179 0.0281169946403 0.0449308552075 0.0290979509914 0.0622896414172 0.0272386507233 0.0473564201885 0.0503681043652 0.0686223309305 0.0411717919112 0.0420509879955 0.0438795889294 0.0621492769732 0.0535535915584 0.0557883449648 0.0575324110618 0.0656578520536 0.0569417414102 0.0500298962763 0.0450037013757 0.0656341911967 0.0473715492692 0.0144468889514 0.0470085725986 0.0485138320241 0.0451352115307 0.0199583230651 0.0301372822106 0.0318368515531 0.0256571286264 0.0278614622113 0.0258405224944 0.0121251490044 0.0124421067497 0.0257425497103 0.0259848999587 0.0265128839547 0.0147071440313 0.0356921243154 0.0239599037157 0.0844599977072 0.0649098127865 0.0563152793656 0.0552526867175 0.0691043038035 0.0592804769565 0.0705635348052 0.0516880380121 0.0615515055365 0.0669939251635 0.0527850317966 0.0564919445965 0.0566781222463 0.0735436571696 0.0873943668972 0.0699246931044 0.0483722118201 0.0515510652048 0.079719756552 0.0550353446586 0.0634817595673 0.0710367571435 0.0623974106591 0.0485852577555 0.0576004808055 0.0413084590066 0.0479219849808 0.0493882028299 0.0672450344951 0.0348287223508 0.0527462211198 0.037276801135 0.071549415301 0.0351894067592 0.0529630080357 0.0594111357934 0.0773657257414 0.0496448829109 0.0507905960575 0.0532464315334 0.0714643731585 0.0625646663061 0.0649098127865 0.0667401518449 0.0748051995393 0.0661923309263 0.0591726016457 0.0543093310288 0.074196762744 0.0559244370052 0.043526425057 0.0386335179446 0.0321768451862 0.00974615024856 0.0352347635745 0.0201959159339 0.0180776754132 0.0269400303578 0.0177526168011 0.0076128290146 0.00378003726334 0.0176983136317 0.0184021944475 0.015957334363 0.00467338620778 0.0252008449064 0.0123712673884 0.075769333126 0.0581585948517 0.051685391535 0.0510927438975 0.0624824329901 0.058237684953 0.0625760535583 0.0529652916573 0.0618856654756 0.0574514448673 0.0423317456956 0.0517508019275 0.0497149058646 0.0670867680773 0.0782168275888 0.0597219475669 0.0433096265158 0.0451925797267 0.0797528207091 0.0566461202307 0.0551309318768 0.0620350591031 0.0638200944876 0.0424059873805 0.0493045876457 0.0391604065434 0.0398454975082 0.039967724192 0.0615633801452 0.0371201263022 0.0531118088353 0.0313373664803 0.0653089870936 0.0335830893361 0.0556009421638 0.055317918307 0.0671296861942 0.0435900410315 0.0407047621897 0.0451219309698 0.062876852421 0.0526327547566 0.0581585948517 0.0591116735406 0.0652217833265 0.0570769673892 0.0545681643866 0.0459758175248 0.0634453480612 0.0477419996054 0.0302678050916 0.043643410877 0.0344015081898 0.0370220423378 0.041870658286 0.0264826423168 0.0239447061259 0.0294219290852 0.0381639993795 0.046622906961 0.0276160450358 0.0433656713185 0.0358877725536 0.0394027280044 0.0640596529703 0.0352660320463 0.0428200645336 0.0215929365207 0.0157178006612 0.00848045847957 0.0252717757761 0.0193974269851 0.0287059984697 0.018482386953 0.0255997786212 0.0310533244304 0.0267766622161 0.0163561213819 0.0202134806234 0.03120585866 0.04895305148 0.036414957876 0.00526673250197 0.012342309222 0.0430330254803 0.0254800237585 0.0266262246906 0.032239986602 0.0289090953163 0.0180202124552 0.0199181330431 0.00657470209208 0.019948180111 0.0209059906389 0.0237059104211 0.0188029876251 0.0211536929916 0.0148162018356 0.0289733910382 0.0123381921604 0.0242362829274 0.0258181102869 0.0413288399769 0.0100525363381 0.0238857865216 0.0223322555234 0.0341921837249 0.0380927560603 0.0215929365207 0.0255329039423 0.0383043472154 0.0356000313746 0.0249953874006 0.0211460552822 0.0407734262534 0.0193178864072 0.0180029141367 0.0318989553395 0.055546862882 0.0233316606029 0.0289827460324 0.0288090712213 0.0227692137274 0.0384790428766 0.0423072029606 0.0235668927239 0.02923399269 0.0234314824709 0.0353361736228 0.0517306479462 0.0262806947106 0.0449202216804 0.0352666691701 0.0392752718621 0.0317034826189 0.0393329164331 0.0490036652351 0.0325915526781 0.0480657848217 0.0556144972514 0.0314856186317 0.0229568301755 0.0394863344353 0.0351981784458 0.0468732286627 0.0534614036052 0.0352961935447 0.0267660200432 0.0206247795688 0.0716485928619 0.0553558677376 0.0350658679333 0.0348297205048 0.0591015265332 0.0349050611526 0.0239450314084 0.0312292238968 0.0291976883203 0.0191245738332 0.0424715605102 0.0407160306554 0.0507428891398 0.0195019979736 0.0455419926359 0.03094049167 0.0480027022309 0.0486981223658 0.0364405293098 0.0216724201002 0.0185311393183 0.0322264249099 0.0418711782883 0.0415641388928 0.0352666691701 0.0357541599092 0.040359691621 0.0422344237589 0.0468672273606 0.0294846892119 0.0324923805449 0.019122055634 0.028756827183 0.0587035417373 0.0144079071022 0.0301136242831 0.0361947314748 0.0239369814187 0.0355016994114 0.0354103580658 0.0251937444079 0.0240593800606 0.0195703439645 0.0308533101673 0.0370972551577 0.0220259748313 0.0576270076145 0.0487199724465 0.0508215730415 0.0469075647824 0.0527931212734 0.0620087731623 0.046977227863 0.0607033064427 0.0681905343994 0.0417090362137 0.0294084394516 0.0508408090071 0.0450400867986 0.0589677823649 0.0627613332544 0.0433082894199 0.0402642195426 0.0363046359117 0.084449006206 0.0666097386489 0.0449981598795 0.0463289168121 0.0716590868723 0.0429897098207 0.0369198960753 0.0432687121664 0.0364322199569 0.0287560795721 0.0553568599314 0.0500592177291 0.0617693212505 0.0302484927047 0.057817398636 0.0410079662048 0.0620054930234 0.0578523465697 0.0464641684152 0.0365865801942 0.0272374209649 0.0399576916682 0.0513227964118 0.0446516670985 0.0487199724465 0.0480806958126 0.0495453699635 0.0482845333883 0.0560302897599 0.0385013240981 0.0415333749992 0.0339802993211 0.0315416974034 0.0191639032899 0.00845591360235 0.0214341775937 0.0115218935447 0.00798589270321 0.013178045677 0.0100207917484 0.0199483776485 0.012776598265 0.00707708079363 0.0310463483447 0.00779811122226 0.0665120827305 0.0484731432285 0.0421114253952 0.0418216626354 0.0527853739739 0.0493966141428 0.0532439640509 0.0450980317427 0.0534929652418 0.0482563107324 0.0334028573772 0.0421738253769 0.0400650763191 0.0574010770653 0.0690115183659 0.0507807835016 0.033887277172 0.0360916940356 0.0712074753299 0.0488889567075 0.0456112729946 0.052712754383 0.0558191941722 0.0328832629679 0.0398769847778 0.0305626616722 0.03027845702 0.0307126203306 0.051853149134 0.0304315085387 0.0449208583046 0.0225669791691 0.0556104755493 0.0254498384131 0.0488668504899 0.0462046921677 0.0583599392562 0.0343786096194 0.0317076229625 0.0356254179774 0.0534761774586 0.0442401179536 0.0484731432285 0.0494784680372 0.0560638562362 0.0481022882275 0.0453399462887 0.0364100969213 0.0549637988767 0.0386182470618 0.0499647144567 0.0286991839096 0.0337581082348 0.0374427591873 0.0282526339036 0.0355267981937 0.0350362485208 0.0478325170984 0.0425063748067 0.0336958297594 0.0552821351039 0.0385952701325 0.0756759815417 0.0533281139759 0.0401812481707 0.0447968354881 0.0564105044414 0.0391525826608 0.0629279047421 0.0332827070938 0.0388499692511 0.059944716429 0.0494848197832 0.0402828974015 0.0438747097228 0.0580998054312 0.0757410641548 0.0628856838636 0.039846534973 0.047063496141 0.0543336344875 0.0312551117596 0.052807685198 0.0627916823231 0.0390112066642 0.0353058185357 0.0513654740192 0.0323261929775 0.0392211101217 0.0463708307148 0.0519963158873 0.0234224809867 0.0301000678074 0.0372829629985 0.056085164213 0.0288004220361 0.0417416575715 0.0389119081863 0.0720657500645 0.0444323135884 0.0487706488559 0.0429521653792 0.0596874437499 0.052997284605 0.0533281139759 0.055723410021 0.0657462146584 0.0546473494099 0.0397492903292 0.0452841873006 0.0707319601823 0.0522103619845 0.0233361726138 0.027152976334 0.0145101369991 0.0239084572879 0.0231796409398 0.016731423623 0.010101659051 0.00781108168527 0.0186261838058 0.0315054768721 0.011535154356 0.0661583467836 0.0529737197777 0.0515726376237 0.0472342284059 0.0573866872941 0.0602508051558 0.0535888133028 0.0563499220883 0.0655740867856 0.0494619373879 0.0355566546043 0.0516986354459 0.0478914383755 0.0636133913461 0.0714227137745 0.0519652065027 0.040040966538 0.0379888138015 0.0833863881089 0.0624232672218 0.0504889561622 0.0539483262888 0.0682863371673 0.0434492287718 0.0419221148541 0.0393420375143 0.0383919560321 0.0330409365388 0.0586345177546 0.0426935993753 0.0582011555148 0.0271038932712 0.0620596992058 0.0353264927481 0.0565229896235 0.0582751327845 0.0564224406826 0.0375222198637 0.032781658946 0.0431629716317 0.0579468986629 0.0506284295544 0.0529737197777 0.0534601982596 0.057907553496 0.0543043024926 0.0568716124285 0.0423649825393 0.0520169525452 0.0387128090446 0.0188041113809 0.0118952838684 0.0140246174547 0.0212962400935 0.00885470160798 0.025142494638 0.0164130694781 0.0148836892075 0.0382106616301 0.012548123562 0.0595024177002 0.0405144987261 0.0336829979036 0.0339109092342 0.0447786612887 0.0412528753098 0.0460796810618 0.0378073897699 0.0456289595962 0.0415318567716 0.0274434917532 0.0337586538145 0.0320559171837 0.0493065770936 0.0620181140557 0.044443934423 0.0259210454652 0.0290199212383 0.0631931226882 0.0415500215307 0.0381432011516 0.0457001329437 0.0482419653282 0.0247624956339 0.0326912275918 0.0231122641396 0.0226732232029 0.0241582823353 0.0435758556102 0.024717419625 0.037259551901 0.0161507334564 0.0474557435456 0.0186437176733 0.0428749718429 0.0381563349544 0.0522572394535 0.0270598018866 0.0256825354187 0.028101885414 0.0461630275758 0.0383904744963 0.0405144987261 0.041758354552 0.0492909187408 0.0413606774947 0.0372601462253 0.0289047461458 0.049343864353 0.0319066136889 0.0140884736377 0.0220949123029 0.0294588701998 0.0141915109934 0.0236402055251 0.0202701530019 0.0222961106051 0.0508328337578 0.0209048958304 0.0623063919994 0.0435137888223 0.0379179155361 0.0310595169631 0.0477275300573 0.0401401417297 0.0476510837343 0.0326069267264 0.0442002914704 0.048031576146 0.0373260310255 0.0383688546713 0.0396828967833 0.0540671732006 0.0691161945626 0.052697233895 0.0256698443155 0.0271866198766 0.063343063446 0.0401717345523 0.0456835007162 0.0508390224232 0.0457136947621 0.0340506342939 0.0366331778515 0.0185292352758 0.0332180639746 0.031402826069 0.0470047522912 0.0178796862446 0.0373117201537 0.0146755143259 0.0519441438124 0.0134308953908 0.0305516636587 0.0455547559496 0.0574314455133 0.025677142017 0.0334820400224 0.038185075996 0.053981408775 0.0519396327628 0.0435137888223 0.0464090712113 0.0566586435278 0.0526391071675 0.0449104072539 0.0375180555166 0.0549667946861 0.0334290223836 0.0161979126548 0.0212360948185 0.00313302296585 0.0148689509772 0.00704616989296 0.0135603719097 0.0384674432624 0.00748847746612 0.0612097252086 0.0442840466006 0.040253243803 0.0360606427738 0.0487530949526 0.0470778914902 0.0473392259982 0.0422963534873 0.051949068182 0.0444929580638 0.0309270188278 0.040473799777 0.0386243677637 0.0547837147046 0.066413370926 0.0479885282668 0.0287291459439 0.0288822795748 0.0702829288423 0.0483322868723 0.0432975336344 0.0484818516534 0.0543652596123 0.0329738645592 0.0348525988976 0.0258401695463 0.029652318833 0.0266204166929 0.0488217477328 0.0282325775461 0.0443695558327 0.0152728890442 0.0529346931915 0.0211960241124 0.0432358961175 0.0470042201915 0.0535566647123 0.027654993588 0.0276975587847 0.0349524016669 0.051450438001 0.0457471816551 0.0442840466006 0.0457673454519 0.0531390087147 0.0482613634351 0.0459084957324 0.0344895940573 0.0501063288839 0.0320407745768 0.00918896575074 0.0152928703979 0.0213827580606 0.0175694634428 0.00562421953864 0.0321021314886 0.0136468651148 0.0732793088732 0.0542749513511 0.0465599433205 0.0461499050441 0.0585282720374 0.0518386762248 0.0596896497046 0.0460357977086 0.0551020502351 0.055347044484 0.0408456006547 0.0466734650365 0.0457685420886 0.0629790052567 0.0758414718388 0.0580725583953 0.0386295601195 0.0415657641172 0.0731303485703 0.0495368191967 0.0520897347287 0.0595893389246 0.0567976430466 0.0379647331116 0.046441942626 0.0333063038869 0.036500108881 0.0377849182347 0.0570420811998 0.030012797008 0.0462545479693 0.027461418846 0.0610786707463 0.0275181743774 0.0490679602747 0.0500650525841 0.0657243645716 0.0396831631678 0.0390548397455 0.0418579268325 0.0600259300869 0.0509169565607 0.0542749513511 0.0557017357111 0.0631094362549 0.0546387182457 0.049518990394 0.0428637051261 0.0625010516077 0.0450876329039 0.0212339329669 0.0205215987455 0.0193863377449 0.00780222226267 0.0244911121423 0.0160871853235 0.0794295187203 0.0616053956912 0.0547043851777 0.0543405312398 0.065903416382 0.0608093966938 0.066232208671 0.0551996275352 0.0641635601661 0.0610614868167 0.0459223913813 0.0547672174246 0.0529488520021 0.0703578675954 0.081658744309 0.063265127216 0.046621662518 0.0487339053303 0.0819943915105 0.0586031447424 0.058550915637 0.0656481691263 0.065908662167 0.0454276311919 0.0529363521846 0.0420305180364 0.0431442291715 0.0436296366503 0.0648039964872 0.0390809908814 0.0552779379516 0.0347675148936 0.0685677387884 0.0363124697872 0.0577513229251 0.0579440183076 0.0708163109663 0.0470812852612 0.0443909494169 0.0483964045026 0.0662606542355 0.055662814312 0.0616053956912 0.0625961650479 0.0687432253304 0.0602433308395 0.0572909174214 0.0493875071422 0.0671416550914 0.051430941132 0.0176886906331 0.00927892520724 0.0135248076976 0.0386449359276 0.00800171044068 0.0600153322548 0.0424684845259 0.0378068586886 0.0345103555887 0.0469162632965 0.044830307306 0.0461600332306 0.040366688607 0.0496446456688 0.0429723253644 0.0292480429291 0.0380026884561 0.0362133523808 0.0526530252718 0.0646507321938 0.046401756397 0.0269368048123 0.0278152961494 0.0678508831968 0.0459682894465 0.0412431425852 0.0469952418705 0.0521326959742 0.0302237793371 0.0333696851677 0.0240543524356 0.0271406206916 0.0250182744726 0.0466959485275 0.0265020349694 0.0419000008026 0.0139599809083 0.0507909794153 0.0193885955935 0.0423195771966 0.0441873849773 0.0525088605043 0.0263754966172 0.0262639235251 0.0325133748369 0.0494223408625 0.0434143928247 0.0424684845259 0.0439360734198 0.0514706200386 0.0459476778298 0.0431249041549 0.0322850478478 0.0492075262735 0.0310084454102 0.0100495790147 0.0163467110294 0.0344303173981 0.0138069903297 0.0725561849391 0.0577945355153 0.0549392051976 0.0494983401957 0.0622636335656 0.0613281176125 0.0591564733558 0.055493289997 0.0660163516694 0.0564238014063 0.0428263617367 0.0551692935595 0.0528843814502 0.0687090576791 0.0786138592889 0.0595588928727 0.0427108568825 0.041401774003 0.084635719422 0.0620469795992 0.0566375025559 0.0605458826833 0.0680826326606 0.0475957607451 0.0475121922821 0.0395532575369 0.0436869591597 0.0392259763169 0.0629613789201 0.0403138291944 0.0584854937624 0.0290090498587 0.0669476358426 0.0346983343078 0.0538333844175 0.0617823249939 0.0639146571596 0.0406990904981 0.0395983575205 0.0488304003282 0.0645174644364 0.0580105192199 0.0577945355153 0.0590354282227 0.0651460923012 0.0612394393036 0.0606595939265 0.0481508588685 0.059835663962 0.0439114917806 0.0129734005291 0.03341718436 0.00522839839082 0.0644629130549 0.0490781090479 0.0460393397147 0.042060104717 0.0535473784021 0.0537667869164 0.051142158888 0.0493088817338 0.0587609555936 0.047452874403 0.0333145084294 0.0462018313471 0.0433659094036 0.0595851004773 0.0694769331379 0.0504124171924 0.034671039268 0.0338977868767 0.0768809318577 0.0551957563061 0.0471765984426 0.0517762151648 0.0612752535588 0.0380650031758 0.0388085089346 0.0327052071067 0.0338902731124 0.0299610722521 0.054082097785 0.0351427472718 0.0511360689963 0.0213341577068 0.0578680354373 0.0282202368919 0.0499673113862 0.0525352197686 0.0557073834724 0.0329893658613 0.0303826410135 0.0390336792828 0.055051644837 0.0480885204528 0.0490781090479 0.0500556489468 0.0560168186449 0.0513666437929 0.0513150683738 0.038531174408 0.0517653307967 0.0358776775011 0.0293027668161 0.00949293329263 0.0725916210374 0.0547194436592 0.0483620571735 0.0469816749901 0.0590799074957 0.0545464429945 0.0590974756513 0.0490311503881 0.0583273766433 0.0546626805606 0.0398779297173 0.0484777367952 0.0468025950712 0.0639880374892 0.0757199402524 0.0573199482995 0.0393352512936 0.0411552751652 0.0764775433632 0.0532445829203 0.052349146311 0.0590579753428 0.0602574471507 0.039602885829 0.0459481099263 0.0349175810863 0.037150941259 0.03692168916 0.0582151479375 0.0331340211018 0.0497159727507 0.0271028830882 0.0621502493207 0.0292960379946 0.0512053897158 0.0526539904442 0.0643939446385 0.0395489605444 0.0378591872997 0.0425337328718 0.0602822057384 0.0511975943751 0.0547194436592 0.0559279154126 0.0626694858218 0.0551041885347 0.0518894661445 0.0431638732605 0.0608387750183 0.0441361499894 0.0310183369902 0.0871660444405 0.0733823196322 0.0689340787316 0.0705597518969 0.0774366556509 0.07861295118 0.076322371958 0.075585060054 0.0826717778434 0.0681219966699 0.0528565530205 0.0687776982924 0.0640973745197 0.0810751053908 0.0869333145725 0.0682988428223 0.0623918658396 0.0630905228705 0.0989058396189 0.0780506774957 0.0672923155095 0.0734447484338 0.0852286487915 0.0580045491026 0.0635565107431 0.0611199283903 0.0538632888957 0.0534691027903 0.0772908944799 0.0610905623861 0.073927736475 0.0514266311669 0.0797776498939 0.0563654161709 0.0797411714169 0.0709112220147 0.075991910188 0.061929266059 0.0528794637266 0.0580761513446 0.0735774043698 0.0591743277341 0.0733823196322 0.0727567267189 0.0745131906994 0.065799040936 0.0698581828462 0.0592195179672 0.0715468662712 0.0627789092372 0.0648080233561 0.0484745494015 0.0442686485072 0.0419227062634 0.0529072690322 0.05220865754 0.0515658331236 0.0480206916873 0.056947174509 0.0470499747083 0.0323100366631 0.0443750009709 0.0415450201967 0.0583606796016 0.0686531977281 0.0497286282257 0.0341488382347 0.0345013563823 0.0748626932863 0.0530450676854 0.0459172064129 0.0515384326883 0.0594738121093 0.0355453778886 0.038659716641 0.0319368788596 0.0317165879728 0.0293867131129 0.0529269518286 0.0336846365247 0.0489212862277 0.021422361173 0.0566364033836 0.0272163294484 0.0501093106191 0.0498104699827 0.056059086848 0.0332712049783 0.0299486671345 0.0369730112559 0.0537844555125 0.0455932312627 0.0484745494015 0.0493458255088 0.0553162838774 0.0492693292197 0.0486803630647 0.0369835817896 0.0522579331221 0.0364683447074 0.022730708667 0.036743783492 0.0359076620395 0.0204805867516 0.0463990865864 0.0147686048767 0.0549817963395 0.0525592871928 0.0196223614467 0.0349129145261 0.0366443799136 0.0327544499352 0.0228629827418 0.017026958725 0.0209966358066 0.0388626349727 0.035298115438 0.0567980979705 0.0582058906835 0.0248891314561 0.0145753449597 0.0570907866116 0.0413833987479 0.0269969183741 0.0492024913366 0.039440986454 0.0359176091053 0.0265520996515 0.0615044403055 0.0540461620767 0.0485288037826 0.0239710979892 0.0540724881457 0.0603640513646 0.0428978851084 0.0134027017241 0.0368991356344 0.0354211161184 0.0355257446359 0.0210753154172 0.0377446845484 0.022730708667 0.0201673641607 0.0164841811953 0.0313109932769 0.041027522952 0.0326196770584 0.0184701671719 0.0295244972329 0.0144112105736 0.0158826147242 0.00448936000965 0.0256749628733 0.0117360219999 0.0336843126425 0.0324372887253 0.0146677582401 0.0224513170291 0.0144322007261 0.0126899376427 0.0122323800999 0.0277700509569 0.0197834641463 0.0180901487399 0.0184584697146 0.0418096588931 0.0367452397055 0.0108947935426 0.0131136689852 0.0371795334759 0.0204476192567 0.0118634455346 0.0280193213775 0.0206906520081 0.0208413846069 0.00805992628737 0.0397245591306 0.032104964073 0.0301985227624 0.0107001908888 0.0330988349255 0.0415195660786 0.0230606485279 0.0236410923325 0.0187109927435 0.0222023475244 0.0175662614613 0.0151089228932 0.0279798538754 0.0 0.00504787707871 0.0188703006517 0.0219245561497 0.0212095981034 0.0149942492526 0.0252687339312 0.016225955903 0.014982874816 0.0165713440517 0.0153992347708 0.0258128425881 0.0233194300829 0.0216404069707 0.0254243329386 0.0254506863873 0.000788850240312 0.00972288497974 0.0186083951584 0.0381473811219 0.0294157845108 0.0147578085456 0.0211609731312 0.0342335739858 0.0241609615185 0.0173732430671 0.0259324451415 0.0264222187829 0.0114741578552 0.0193298399608 0.0206022299244 0.0166568971164 0.0229188388297 0.0118905814083 0.0295550989457 0.0189848877848 0.0268759492012 0.0164070102936 0.0252554335694 0.0350869023834 0.0115237799469 0.0366507700029 0.0195520445986 0.0253794363549 0.0152320827299 0.0231766024476 0.028592244401 0.0144112105736 0.0173074040909 0.0296651493536 0.0243710502336 0.0103105434217 0.0155263675354 0.0374184940592 0.0231136537684 0.0190194510466 0.0188364876977 0.0221985054079 0.0214172478398 0.0257446692843 0.0273354856863 0.0275203255805 0.0156705691302 0.0195445751869 0.0258686776409 0.0435616108671 0.0331590462603 0.00822908256455 0.0111578554098 0.0408350390305 0.0282065307084 0.0237933699021 0.0272090918986 0.029489839001 0.0211107451245 0.0171676820259 0.0146997004559 0.0226821224304 0.022183640748 0.0185118691459 0.0264356522916 0.0241910879615 0.0208215741795 0.0237366538469 0.0205889692169 0.0263075431581 0.0263185412116 0.036342074994 0.0103523538982 0.0247947799759 0.0232770954007 0.0303205821473 0.0382875192266 0.0158826147242 0.0205185838207 0.033936913628 0.0342591359421 0.0252819239657 0.0211775313941 0.0366164619483 0.0168683582115 0.0265527528137 0.0122930639405 0.0356399190513 0.0329283261088 0.0161748547301 0.0260907486468 0.0165355786656 0.0155013603686 0.00871939649167 0.0250375042482 0.0205784254424 0.0221880849182 0.0226228234946 0.0402265547715 0.0380348851944 0.0126404460089 0.0132955588731 0.037673161045 0.023685130291 0.0159536871846 0.0317445670434 0.0244407629294 0.0250650773838 0.00665080674841 0.0431354773139 0.0336884150562 0.0346100496205 0.00755310734613 0.037025240188 0.043753914844 0.0237910793513 0.023601769275 0.0229675330633 0.0262374537149 0.0207694947791 0.0142462256068 0.0294419901123 0.00448936000965 0.00600179744469 0.0181950411243 0.0225874743282 0.0220819669003 0.0185630904749 0.0261141108628 0.0202600233978 0.0359510556228 0.0122181959079 0.00715556619596 0.0393252590156 0.0398773260728 0.015944130912 0.025117898688 0.0279596047113 0.0495072405762 0.0438343802676 0.0223301740975 0.0291958566192 0.0238057945893 0.0121408308676 0.0320439133203 0.0386704894061 0.0116681269474 0.0246115626131 0.0320654848123 0.02212022544 0.0306066666749 0.0360151785428 0.0212925403107 0.0265520435189 0.0096564754612 0.0336990775918 0.0258669646258 0.0271355620099 0.0255831511409 0.0192288539502 0.0492946436623 0.0275425360484 0.0388140188539 0.0303333899385 0.0366778253264 0.0435686919256 0.0256749628733 0.0298872725089 0.043124071299 0.0391147976424 0.020010154166 0.0305524053833 0.0506217525372 0.0340993001213 0.0427032875962 0.0428000340065 0.0127191401929 0.0244302586973 0.0258873530415 0.0224665589838 0.0196473040025 0.0259734946343 0.0180766430627 0.0245847379502 0.0205530555257 0.0515531804399 0.047284161103 0.0168752313645 0.0093274994833 0.0472940785154 0.0299859804517 0.013568861355 0.0349261393238 0.0278394065303 0.0233159306106 0.018729490125 0.0474594716238 0.0428441003445 0.0340535271365 0.0192623858156 0.0396592952666 0.0474504224755 0.0346317498496 0.0155232846365 0.0221965807346 0.023519540409 0.0251795457747 0.0186807942123 0.0328466600284 0.0117360219999 0.0111390258469 0.0171063233969 0.027248122956 0.0326787410905 0.0216187767654 0.0174724607588 0.0153838329478 0.013043913361 0.0463610530412 0.0437380208469 0.0239671160531 0.0322222363953 0.0387763357271 0.0597848214293 0.0513617728094 0.0233595904168 0.0301944308891 0.0320013483014 0.010613155035 0.0399275999718 0.0464560281456 0.0134988892532 0.0290788786594 0.0367911394695 0.0175534572334 0.0343523037935 0.0386269072909 0.0314326304477 0.0171757591643 0.0105270444576 0.0306360966619 0.0365489530298 0.0211256372062 0.0155275027866 0.0282406353658 0.0565789653078 0.0282583663219 0.0416523037157 0.0359394330813 0.0459783038684 0.0506220203623 0.0336843126425 0.0381881672141 0.0517807652888 0.0474506190651 0.0289707556044 0.0360504295798 0.0570578425896 0.0371145976129 0.0460576692202 0.0464080290911 0.0220712004031 0.0312574889523 0.0332251791601 0.054841891687 0.0503036463483 0.028999517533 0.0360798173432 0.0193249539307 0.00826557354917 0.0383933246971 0.0453200679143 0.00495350657916 0.0300295268949 0.0391229254289 0.0271682846269 0.036533385441 0.0426841669003 0.0272844902637 0.0287077471132 0.00927183865749 0.0394174170837 0.0313084618098 0.0314758507766 0.0267668687054 0.022518491913 0.05600898249 0.0343402195218 0.045509854552 0.0363226467926 0.0425348706975 0.0488168350062 0.0324372887253 0.0364696219213 0.0493441352532 0.0445105561012 0.0238995483109 0.0369592605884 0.0574925847452 0.0411978099793 0.0155890245102 0.0251635522565 0.0177733966699 0.020295344741 0.0224627747939 0.00622985531203 0.0261058920982 0.0236064364637 0.0550881017687 0.0493624804622 0.00949195374034 0.00547982782741 0.0508785947136 0.0251610996387 0.0119430843146 0.0365988456521 0.0213251727582 0.0179868212343 0.0207990673061 0.0485594746656 0.044214664825 0.0334335462828 0.0201922684831 0.0401754502972 0.0535325351547 0.0317074448278 0.0123261383208 0.0244676333996 0.0171629247157 0.0177983318365 0.0115555789303 0.021116940732 0.0146677582401 0.0105358367361 0.00915254671069 0.0167177891777 0.0294167459865 0.0147897849251 0.0123457776793 0.0164773765795 0.0252103273152 0.0171988861049 0.0305685435552 0.0365843208441 0.0175183238357 0.0218496748957 0.0205878945245 0.0593121863314 0.0470793832619 0.0159816216856 0.020744505382 0.0508661739809 0.018760262956 0.0128710885205 0.0300992820345 0.0117566522917 0.00679248320348 0.0280357325772 0.040552704264 0.0415859654681 0.0232256372519 0.0295874345059 0.0314690999231 0.0507965688951 0.0314591977122 0.0256867489287 0.0204093797109 0.00501434925534 0.0122277574576 0.0226603353781 0.0187158051887 0.0224513170291 0.020695499337 0.0231928239165 0.0198154038419 0.0292814261752 0.0103125125409 0.0227651885673 0.0153489133169 0.00921470910295 0.0183064776985 0.0377227049932 0.0290274791546 0.0152667053733 0.0215933253328 0.0343731361054 0.0245700487863 0.0169471424987 0.0257260078813 0.0268671065769 0.0111334970897 0.0193681705086 0.0211865368155 0.0163734248392 0.0228777878657 0.0117963493804 0.030068869871 0.0193762721713 0.027229886316 0.0161367068103 0.025740037188 0.0358416431275 0.0109673463454 0.0364509080414 0.0199965660095 0.0252914478428 0.014773865842 0.0226846344514 0.0279231861054 0.0144322007261 0.0171240275822 0.0292730788831 0.0236979129564 0.00966001570023 0.0151974508598 0.0372251985508 0.023311727536 0.0176051360114 0.032564167101 0.0208965631668 0.0168197313548 0.0207764664023 0.0423976172234 0.033280884693 0.00922898632458 0.0197200489933 0.0360385863631 0.00893473638299 0.014132357355 0.0250685285695 0.0103719683171 0.0167517993882 0.0135716425012 0.0350532380107 0.027879799306 0.0265658749664 0.015993421345 0.0286771298489 0.0428550440411 0.0151980773887 0.0298292088786 0.0196261415194 0.0184891599574 0.00655621912452 0.016322105066 0.0195353045374 0.0126899376427 0.0127701290018 0.0223217848231 0.0157154460048 0.0129568955631 0.00690277207156 0.0299954575682 0.0193817024498 0.0216771607077 0.0225658582257 0.0286138040807 0.0305011727447 0.0368382394098 0.0388439321615 0.0148355509953 0.0174423915339 0.0380129595202 0.0258082909988 0.0229842208153 0.0373470514825 0.0277476701907 0.0308564780706 0.00775830938446 0.0476084837369 0.0347450599297 0.0411478727988 0.00231111578025 0.0424821924039 0.0488975441679 0.0213563138315 0.0271270574956 0.0304501786922 0.0318897260431 0.0231190177025 0.0126379860304 0.0283589858381 0.0122323800999 0.0114136835682 0.0186791916183 0.0206672138954 0.0199079464104 0.0222007080182 0.0303110417953 0.0280827256688 0.0197052367937 0.0449418080277 0.0438182248518 0.0562345701188 0.0602081091792 0.0243728506009 0.019370550695 0.0596171194363 0.0411146925258 0.0330451422406 0.0550957050319 0.039836592047 0.0399228025142 0.0284759591968 0.0663825437409 0.0558252777705 0.0548034680532 0.0236830779253 0.0595333433641 0.0686676458986 0.0397842689041 0.0197958382609 0.0446061287137 0.0392287195509 0.0347118369491 0.0164026290561 0.0305214779366 0.0277700509569 0.0235155079857 0.0135888018548 0.0240589544851 0.0380160024194 0.0333951182547 0.0242014401746 0.0377621051763 0.0314790157987 0.029423203836 0.0582327389784 0.0535477924002 0.0121411421616 0.00926081168701 0.0551978876015 0.028043323191 0.0176337854836 0.041735927557 0.023998617859 0.0217227463265 0.0246395690903 0.0532994548372 0.0483234850835 0.0380931811444 0.0229416680066 0.0449797181572 0.059280772983 0.0338814552602 0.0120128740103 0.0301325195322 0.0203844916972 0.0200232099081 0.0110576598728 0.0182627991321 0.0197834641463 0.0150522414851 0.006556855609 0.0144048387324 0.0315801243453 0.0179150623078 0.0119668288225 0.022167313587 0.00841936493113 0.0453128226707 0.029523676844 0.0223084484022 0.0276272783534 0.0327625901905 0.0158536182647 0.0147737513682 0.0106741877313 0.0162753926733 0.0159900539559 0.0215767754577 0.023165172617 0.0247814128516 0.0137331692872 0.0264980088885 0.0153005942866 0.0291348842471 0.0254286093168 0.0364522562324 0.00599431682769 0.0189173783639 0.0184393411187 0.0301213577678 0.0340540018144 0.0180901487399 0.0215314467503 0.0337154513722 0.0315978293722 0.024169684578 0.0166816873996 0.0356875981931 0.0143747226708 0.0514837021891 0.0372994668391 0.0230531280792 0.0250236859971 0.0398047566678 0.021934356719 0.0118568359564 0.016712157857 0.0198275637831 0.0143476538676 0.0244078681037 0.0292462046489 0.0328036706334 0.014376575397 0.0286902071785 0.0204063466033 0.0328369650405 0.0322219054124 0.0320787692664 0.00266433410273 0.0165423262191 0.0215926529236 0.03047036768 0.0358252300263 0.0184584697146 0.0213100670766 0.032046547675 0.0333108429689 0.0306570115485 0.018573514939 0.030845411182 0.00824349209933 0.0254197321026 0.047324533609 0.0530498030585 0.0193482837408 0.0436475526218 0.0516271254066 0.0457976820229 0.0502335440929 0.0569904536613 0.0344485460271 0.0478896186827 0.0272539439051 0.0572698534085 0.0358919394369 0.050424643016 0.0434868500285 0.0309009333379 0.0632686594285 0.0501928388473 0.0594250482186 0.0482632028717 0.0482478127894 0.0570074387196 0.0418096588931 0.0448198518438 0.0553205633247 0.0514202209421 0.0325934353049 0.0491031454545 0.0660730408504 0.0548573842682 0.041501827818 0.0494594306994 0.00785967991178 0.0297803384368 0.0414124759052 0.0250716250932 0.0363814861914 0.0430927179021 0.0324719682551 0.0232534554573 0.00557282966442 0.0375065288364 0.0368003051347 0.0279998159154 0.0252506405295 0.0244125277036 0.0601405054627 0.0351799654171 0.0460355764213 0.0372310571496 0.0466115069454 0.0500875517655 0.0367452397055 0.0406580228721 0.0534802351733 0.0469702184582 0.0259083864948 0.0383091416944 0.0609916135179 0.0430915691708 0.0113258711014 0.0432937210726 0.0175511998728 0.0124157338497 0.03212454916 0.0158561794962 0.0176896387613 0.0143093876737 0.043181186929 0.0362893217044 0.0315177649353 0.0141883840024 0.0359022033885 0.0492746305333 0.0223018781484 0.0211045193185 0.0229404408443 0.0181288833884 0.0110415500412 0.00837364985146 0.0172572597582 0.0108947935426 0.00733209366776 0.0131427681285 0.0115359384743 0.0199994152339 0.0092535804352 0.0217603350968 0.0184515208125 0.0501255669434 0.0279388443296 0.0142651752221 0.0382528821443 0.0251771073359 0.0222405400165 0.018907996804 0.050407441851 0.0445874581383 0.03644061978 0.0176520694999 0.0424414179548 0.05338489352 0.0323923780445 0.0108812710336 0.0260958051981 0.0217902651406 0.0213471493056 0.0109405604951 0.0249968133743 0.0131136689852 0.00893536297262 0.00802513881767 0.0192978256402 0.0302167707341 0.018365059649 0.0130926236805 0.0184689475202 0.0343186729394 0.0436186327673 0.0296745080858 0.0408423177906 0.0469164023153 0.0321355072481 0.0293927572919 0.0115197032526 0.0423686398976 0.036129628995 0.0335484782832 0.0259949361948 0.0269704035297 0.0607621523939 0.0380404513983 0.049798653181 0.0409231346593 0.0474755392541 0.0535032395417 0.0371795334759 0.0413092694389 0.054257484038 0.0493594143559 0.0284802931744 0.0415978746211 0.0622229836323 0.0453773607077 0.0190484002067 0.0207749590973 0.00729318592678 0.0172314224853 0.0208211767145 0.0287420582694 0.0242194382748 0.0225231335589 0.0240241380028 0.0229680498334 0.0402905798782 0.0150429891355 0.0374592753112 0.0199314954492 0.0195088829088 0.00836639606005 0.0247669201274 0.0222507048525 0.0204476192567 0.021382719754 0.0305241597933 0.0212891172726 0.0134776031268 0.0107610654817 0.0367912026131 0.0227467739819 0.0252412358335 0.0153803531506 0.0101093922522 0.0191555088988 0.0375470144869 0.0363071087647 0.0222024666973 0.021689493319 0.0288400411309 0.0426974513075 0.0286740878663 0.0220177373135 0.0125507954062 0.0111145943691 0.0144555489867 0.0195277194831 0.0258649050915 0.0118634455346 0.0120062047089 0.0205457113931 0.0226933950128 0.0265812358015 0.010518368073 0.0210005232169 0.00605049088221 0.0227443548737 0.0240543202468 0.0298445720969 0.0127076622649 0.0212764877573 0.0130936058244 0.0350960563748 0.00616350137707 0.0220053566886 0.029182207808 0.0470447673611 0.0143549158293 0.0270200109504 0.0261798064669 0.0399469917707 0.0417589617676 0.0280193213775 0.0317524179632 0.0441157592928 0.0402109444472 0.0286518076954 0.0255338040061 0.0460635600013 0.0240522825965 0.0109052701432 0.0235077295524 0.0318665250498 0.0308330797717 0.0201941994246 0.026209699134 0.024272445381 0.0437154276666 0.0215651164399 0.0334110813047 0.0182660118628 0.0127706309369 0.00547043300996 0.0236736081076 0.0193715054877 0.0206906520081 0.0206326054685 0.0278093020752 0.0196854448654 0.0196839348285 0.00687772387853 0.0318861086105 0.0187829482374 0.026787715043 0.0349795665731 0.0377560197701 0.0169430774361 0.0294826475268 0.02554791035 0.044564230482 0.0307872765104 0.0282156909894 0.0140157312571 0.00304825224888 0.013131876158 0.0254598849649 0.0242359050423 0.0208413846069 0.0206178921833 0.0265921223132 0.024263714379 0.0287692622655 0.0105411880456 0.0255889461915 0.0107722057824 0.040112260752 0.0281920691871 0.0346799127455 0.00547299312148 0.0351698801996 0.0413927767406 0.0178561632488 0.0295704831304 0.0240442618733 0.028433519767 0.0198981190422 0.0164655446546 0.0293872770207 0.00805992628737 0.0103936152637 0.0224915115769 0.0226675446032 0.0164308167554 0.0187959028502 0.0318543120786 0.0236941138036 0.0214366809685 0.02158460699 0.0453234846239 0.0100348820296 0.0205571695519 0.0349449705288 0.0594353543617 0.0269232145172 0.0378563722335 0.0357646095878 0.0508614713562 0.0504482642845 0.0397245591306 0.0434432071263 0.055747628056 0.0497669350857 0.035138779746 0.0361053959246 0.0583555321158 0.0365217793435 0.0331608473129 0.0326376645366 0.0244797200401 0.0260351758605 0.0198255233977 0.055257342683 0.030607412773 0.0406724640229 0.0316826493706 0.0417015250124 0.044714144963 0.032104964073 0.0358439539437 0.0485265392209 0.0417397589401 0.0211023329973 0.0328093106913 0.055937896155 0.0382216948189 0.039178382683 0.0116462448464 0.0328449320294 0.0356452166019 0.0431040247851 0.0127555033307 0.0192172916283 0.0248029464024 0.0398126039657 0.0387676066679 0.0301985227624 0.0325088097853 0.0421224110282 0.0389248491362 0.0344395222458 0.0235149407659 0.0407276725105 0.0194230824244 0.0402634745964 0.0467109968717 0.0198309327046 0.0277799641062 0.0285372209353 0.0306922978338 0.0217975015009 0.0133715864954 0.0282760151712 0.0107001908888 0.0106288988311 0.0196086000788 0.0208164998238 0.0183683750797 0.0208677693371 0.0306825719595 0.0267116346537 0.0244223076771 0.0326854873354 0.0507787766717 0.0180800830921 0.0283453513368 0.0285692206738 0.0439964220236 0.0435330479692 0.0330988349255 0.0364104267178 0.047999984958 0.0430530022579 0.0322085934623 0.0282999450485 0.049271623839 0.0273166054796 0.0425206430225 0.0621747559979 0.0316259435967 0.0474306721161 0.0460270405303 0.055722110458 0.0616108763567 0.0415195660786 0.0463779068274 0.0600405303926 0.058487923658 0.0428330727641 0.0451145046508 0.0622811849491 0.040848815271 0.0429266615699 0.0304603009077 0.0328680598837 0.0193092495687 0.0257358921646 0.0276472175985 0.0230606485279 0.0242829569209 0.0335562675322 0.0240112096757 0.0023427193432 0.0215758461563 0.0440130376785 0.0333268924093 0.0336594835101 0.0267459293135 0.0300685155468 0.0190916149163 0.0302108808069 0.0236410923325 0.0197983357166 0.0116623663768 0.025981149508 0.0407083432987 0.0270506742353 0.00533601555818 0.0243947872794 0.0165388277645 0.0203993556991 0.0306449706863 0.0351570407904 0.0187109927435 0.0216936002715 0.0328125704384 0.0328483395342 0.0289711567156 0.0177290185247 0.0324417191457 0.00990245486371 0.0144435719572 0.0253854499058 0.0235470613048 0.0222023475244 0.0213735089495 0.0257216217897 0.0239916366178 0.0307770932602 0.0118559544137 0.0237036848381 0.0117425310805 0.0184555467145 0.0157796168272 0.0175662614613 0.0166625646078 0.0231975140243 0.0146299258577 0.0171445810537 0.00397360286698 0.029169512627 0.0192796522734 0.0184254094366 0.0151089228932 0.0104342480435 0.00810789324855 0.0108902089943 0.0236280097789 0.017384613998 0.0213096795304 0.0252727380791 0.0279798538754 0.0244605767611 0.021500625639 0.00796997392989 0.0256665045822 0.0176058458155 0.0292950203769 0.0310442784549 0.00504787707871 0.0188703006517 0.0219245561497 0.0212095981034 0.0149942492526 0.0252687339312 0.016225955903 0.0138989004364 0.0180767927652 0.0222193332382 0.0141002986592 0.0215877207325 0.0172352062998 0.0156454524782 0.031383424491 0.0212505739868 0.0142951058056 0.0253431933269 0.0219030212003 0.0156518397703 0.0263438271284 0.0285058962486 0.0193626993311 0.0417261059969 0.0313743437253 0.0260801912789 0.0155074292908 0.0227467684355
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt
new file mode 100644
index 00000000..8ed5b965
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt
@@ -0,0 +1 @@
+0.320369972991 0.338972466 0.308199372323 0.3452431902 0.310024768313 0.357115225615 0.311131096357 0.357391534414 0.329718053755 0.347365921475 0.335272625287 0.336451560653 0.33015370606 0.369628769749 0.344499490029 0.321622508707 0.345377707016 0.321007207534 0.350728979121 0.32809430086 0.30207071308 0.291663252492 0.30760470102 0.315976639534 0.308132467187 0.313014586878 0.310463895925 0.321091616502 0.290044394125 0.322213459935 0.315509196522 0.3331114403 0.281071919202 0.320854431887 0.332190658438 0.299342730178 0.313528775154 0.310049073937 0.288821516545 0.307662081954 0.328387688508 0.317185603454 0.332046170365 0.291912213887 0.37870970117 0.336080073379 0.304593343921 0.330138983604 0.355071759299 0.311946140607 0.302025400768 0.330940761586 0.351140062502 0.354772884287 0.272605322053 0.327957349848 0.28871110366 0.320821172951 0.340976919806 0.30757488831 0.320975346884 0.252776262329 0.314549731907 0.326876483 0.337684418756 0.296520013735 0.31493077245 0.327721982167 0.325802862624 0.341908184107 0.300481749419 0.312499767894 0.301061762121 0.27665157989 0.3082566692 0.287466396145 0.288313694552 0.296629698731 0.283556095025 0.322489360684 0.280765581604 0.297958166613 0.313189657041 0.303470399659 0.348652898212 0.331594734387 0.299446687464 0.339047458559 0.286979246044 0.316326095312 0.321618884109 0.330065896317 0.324500638067 0.328300795872 0.309002568222 0.262587468469 0.31974123777 0.286316182293 0.321162329165 0.328160620315 0.356618051635 0.289733970648 0.344507756538 0.301485561986 0.335785898715 0.322635066518 0.331480718646 0.297897604494 0.306942928189 0.350843442517 0.342585296966 0.341311053315 0.306780105123 0.313401804298 0.319978145568 0.302460397612 0.346105758567 0.312802351189 0.331552275517 0.321624157344 0.318798118247 0.301906095501 0.301585920138 0.314556178985 0.333215221158 0.306929663844 0.317083256901 0.309667679181 0.306529028004 0.30865993751 0.296031907986 0.28742420979 0.311584483038 0.319043629504 0.330278008622 0.314466433681 0.327937382021 0.296448162218 0.307033121385 0.296391953011 0.292691206116 0.297146209653 0.307929858983 0.291863681454 0.307300188104 0.306597817799 0.34718100163 0.317436210259 0.29952626739 0.330762834707 0.334951064852 0.323806678898 0.296203706701 0.33398466797 0.344931265559 0.293948734727 0.332764639313 0.272651853935 0.317324315923 0.300493570867 0.307008231016 0.333263322802 0.31390648462 0.332416491248 0.314766869708 0.321015549211 0.322909289307 0.356882966656 0.310596945263 0.343939748528 0.286269629586 0.33173459898 0.323848483719 0.305841388975 0.319266258167 0.34012363898 0.3443280395 0.353885654057 0.320544729867 0.353280499623 0.315621795536 0.312176062734 0.301562130879 0.312061680573 0.312642847966 0.326222109701 0.357417912858 0.313083593142 0.334033412713 0.295630506074
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt
new file mode 100644
index 00000000..dc396c8c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt
@@ -0,0 +1 @@
+   5.0817745e-01   4.4535192e-01   5.6700421e-01   1.2418578e-01   4.8927739e-01   5.0180477e-01   1.4096146e-01   8.1242502e-01   4.1586001e-01   3.2586371e-01   3.2586371e-01   5.2942799e-01   8.6137722e-01   7.7039952e-01   9.7270522e-01   4.5581864e-01   1.0000000e-01   6.3861009e-01   3.0546431e-01   3.7427929e-01   2.5251796e-01   5.6700421e-01   3.8776762e-01   5.2942799e-01   5.0905001e-01   2.5651975e-01   1.2418578e-01   1.2418578e-01   4.5470518e-01   4.5470518e-01   3.2816937e-01   6.0181382e-01   7.3457830e-01   4.1586001e-01   3.2586371e-01   4.0147421e-01   4.1586001e-01   7.6752131e-01   1.2418578e-01   1.4096146e-01   1.2396136e+00   7.1462831e-01   4.1449626e-01   5.3588338e-01   5.2942799e-01   3.2352160e-01   5.2862779e-01   2.5251796e-01   2.0656129e-01   3.5031395e+00   3.2158090e+00   3.6682165e+00   2.7164367e+00   3.3288934e+00   3.1477087e+00   3.4033622e+00   2.0308266e+00   3.3209346e+00   2.5912926e+00   2.3257069e+00   2.8912179e+00   2.7273721e+00   3.3660466e+00   2.2876649e+00   3.1664710e+00   3.1642132e+00   2.7448172e+00   3.2474124e+00   2.5734684e+00   3.5025969e+00   2.6980573e+00   3.5983434e+00   3.3515288e+00   3.0113552e+00   3.1469325e+00   3.5526357e+00   3.7475562e+00   3.1812462e+00   2.1818668e+00   2.4927109e+00   2.3909738e+00   2.5729378e+00   3.7711998e+00   3.1620401e+00   3.1916270e+00   3.4478147e+00   3.1312883e+00   2.7541224e+00   2.6886547e+00   3.0483897e+00   3.2685282e+00   2.6752185e+00   2.0587064e+00   2.8619072e+00   2.8416143e+00   2.8554471e+00   2.9845926e+00   1.7697734e+00   2.7640668e+00   4.7690606e+00   3.8067806e+00   4.6866422e+00   4.2843668e+00   4.5417384e+00   5.4120246e+00   3.2161426e+00   5.0569442e+00   4.5165793e+00   4.9462324e+00   3.8595100e+00   4.0249346e+00   4.2787236e+00   3.7387507e+00   3.9160762e+00   4.0938708e+00   4.2028863e+00   5.5316487e+00   5.7297286e+00   3.6968486e+00   4.5074741e+00   3.6330985e+00   5.5146761e+00   3.6293227e+00   4.4495340e+00   4.7599229e+00   3.5255287e+00   3.6076762e+00   4.3339547e+00   4.5590471e+00   4.8997298e+00   5.2856169e+00   4.3511402e+00   3.7760534e+00   4.2460554e+00   5.0103780e+00   4.3808704e+00   4.1939019e+00   3.5087649e+00   4.2018804e+00   4.4140402e+00   3.9807996e+00   3.8067806e+00   4.6775324e+00   4.5250934e+00   4.0376133e+00   3.7473276e+00   3.9523060e+00   4.1709262e+00   3.7872951e+00   2.5251796e-01   3.0546431e-01   6.0060595e-01   9.5035453e-01   4.4535192e-01   4.0293660e-01   5.0090417e-01   1.4096146e-01   7.6752131e-01   4.1449626e-01   1.2418578e-01   6.2024833e-01   1.1845977e+00   1.4700179e+00   9.4309624e-01   5.0905001e-01   1.0003617e+00   8.0358695e-01   5.8851328e-01   7.0826681e-01   6.6384020e-01   4.3456114e-01   5.6700421e-01   2.0656129e-01   4.2667565e-01   5.2942799e-01   4.4417983e-01   2.8192292e-01   2.1269358e-01   5.7324170e-01   1.1056650e+00   1.2393677e+00   1.4096146e-01   2.5251796e-01   6.8961791e-01   1.4096146e-01   5.0090417e-01   4.1449626e-01   5.0270183e-01   7.3535471e-01   5.0905001e-01   5.7324170e-01   8.5690100e-01   1.2418578e-01   8.0587320e-01   3.2352160e-01   7.3496673e-01   3.0275928e-01   3.5601468e+00   3.2472699e+00   3.7137483e+00   2.6693888e+00   3.3563815e+00   3.1472333e+00   3.4276314e+00   1.9506288e+00   3.3563695e+00   2.5739370e+00   2.1870094e+00   2.9033014e+00   2.6860278e+00   3.3789262e+00   2.2884830e+00   3.2153154e+00   3.1667333e+00   2.7423060e+00   3.2269725e+00   2.5465772e+00   3.5123782e+00   2.7147889e+00   3.6030381e+00   3.3619470e+00   3.0427908e+00   3.1888219e+00   3.5910272e+00   3.7805671e+00   3.1921903e+00   2.1611020e+00   2.4491518e+00   2.3430978e+00   2.5700421e+00   3.7741357e+00   3.1615131e+00   3.2084454e+00   3.4884789e+00   3.1228939e+00   2.7575407e+00   2.6617768e+00   3.0343591e+00   3.2842184e+00   2.6656374e+00   1.9595652e+00   2.8539100e+00   2.8474367e+00   2.8585579e+00   3.0059712e+00   1.6867642e+00   2.7634340e+00   4.7806735e+00   3.8055585e+00   4.7194850e+00   4.2963997e+00   4.5579706e+00   5.4507801e+00   3.1945300e+00   5.0903533e+00   4.5297786e+00   4.9814379e+00   3.8841455e+00   4.0376849e+00   4.3069372e+00   3.7284750e+00   3.9173293e+00   4.1124749e+00   4.2221165e+00   5.5759608e+00   5.7633066e+00   3.6758942e+00   4.5370189e+00   3.6312130e+00   5.5536680e+00   3.6416405e+00   4.4736906e+00   4.7961103e+00   3.5380868e+00   3.6203213e+00   4.3467079e+00   4.5977693e+00   4.9380624e+00   5.3421274e+00   4.3637834e+00   3.7899304e+00   4.2477635e+00   5.0602038e+00   4.3953045e+00   4.2110583e+00   3.5192753e+00   4.2358121e+00   4.4378207e+00   4.0189525e+00   3.8055585e+00   4.7017335e+00   4.5483787e+00   4.0656879e+00   3.7516222e+00   3.9742971e+00   4.1845313e+00   3.7939847e+00   2.1269358e-01   4.4535192e-01   8.9366705e-01   2.1845981e-01   3.4378533e-01   3.7427929e-01   2.5651975e-01   7.7039952e-01   3.2586371e-01   2.1845981e-01   4.2667565e-01   1.2113327e+00   1.3801284e+00   8.7175869e-01   4.4651726e-01   1.0719360e+00   6.5223271e-01   7.3813096e-01   5.7867728e-01   4.4535192e-01   5.2655962e-01   6.0611244e-01   3.8776762e-01   4.0176783e-01   5.3588338e-01   5.0905001e-01   3.0000000e-01   3.0546431e-01   7.1169738e-01   9.4309624e-01   1.1327825e+00   2.5651975e-01   3.0275928e-01   8.1067767e-01   2.5651975e-01   3.2352160e-01   4.2538717e-01   3.7427929e-01   9.0252542e-01   3.0000000e-01   5.1138698e-01   7.7869083e-01   2.1845981e-01   6.6384020e-01   1.2418578e-01   6.9325418e-01   3.0546431e-01   3.7098973e+00   3.3770904e+00   3.8553941e+00   2.7868575e+00   3.4895316e+00   3.2571492e+00   3.5499573e+00   2.0646687e+00   3.4944845e+00   2.6743800e+00   2.3196869e+00   3.0181476e+00   2.8270253e+00   3.4973911e+00   2.3997585e+00   3.3600102e+00   3.2716172e+00   2.8619072e+00   3.3597438e+00   2.6649106e+00   3.6203213e+00   2.8440609e+00   3.7280682e+00   3.4822008e+00   3.1786890e+00   3.3296038e+00   3.7325066e+00   3.9121945e+00   3.3084060e+00   2.2888897e+00   2.5683989e+00   2.4649412e+00   2.6906230e+00   3.8866112e+00   3.2625043e+00   3.3219248e+00   3.6264668e+00   3.2609948e+00   2.8656468e+00   2.7738624e+00   3.1430282e+00   3.4033622e+00   2.7865812e+00   2.0797392e+00   2.9638836e+00   2.9589097e+00   2.9695568e+00   3.1337459e+00   1.7991433e+00   2.8758936e+00   4.8875515e+00   3.9111857e+00   4.8490379e+00   4.4107143e+00   4.6725771e+00   5.5854254e+00   3.2933477e+00   5.2226262e+00   4.6541348e+00   5.1068487e+00   4.0049607e+00   4.1564977e+00   4.4321573e+00   3.8331006e+00   4.0161098e+00   4.2255639e+00   4.3417782e+00   5.7091264e+00   5.8970064e+00   3.7961619e+00   4.6611065e+00   3.7313856e+00   5.6903014e+00   3.7618406e+00   4.5942943e+00   4.9290197e+00   3.6553612e+00   3.7333492e+00   4.4613366e+00   4.7342792e+00   5.0749049e+00   5.4844039e+00   4.4774673e+00   3.9102500e+00   4.3611782e+00   5.2016658e+00   4.5034762e+00   4.3281161e+00   3.6300436e+00   4.3648112e+00   4.5562166e+00   4.1482002e+00   3.9111857e+00   4.8218416e+00   4.6648403e+00   4.1879434e+00   3.8717400e+00   4.0945154e+00   4.2919258e+00   3.9013483e+00   5.6700421e-01   9.9714776e-01   3.0546431e-01   4.4417983e-01   2.5251796e-01   3.0275928e-01   8.8835966e-01   3.2586371e-01   2.1845981e-01   4.4651726e-01   1.3360558e+00   1.5022608e+00   9.9714776e-01   5.6769031e-01   1.1765359e+00   7.6752131e-01   8.1354181e-01   6.9325418e-01   6.2092891e-01   5.4292906e-01   4.5470518e-01   4.0293660e-01   4.5581864e-01   6.4704320e-01   6.2024833e-01   1.4096146e-01   2.0656129e-01   8.1354181e-01   1.0574300e+00   1.2554784e+00   3.0275928e-01   4.4535192e-01   9.2264612e-01   3.0275928e-01   2.5251796e-01   5.2862779e-01   5.0592043e-01   8.0358695e-01   2.5251796e-01   5.6454040e-01   7.9878917e-01   2.1845981e-01   7.6752131e-01   1.2418578e-01   8.1242502e-01   4.1449626e-01   3.5875094e+00   3.2277825e+00   3.7190120e+00   2.6019240e+00   3.3414931e+00   3.0741797e+00   3.3904673e+00   1.8683030e+00   3.3506325e+00   2.4892190e+00   2.1209506e+00   2.8530088e+00   2.6606291e+00   3.3264150e+00   2.2345869e+00   3.2325480e+00   3.0894572e+00   2.6859989e+00   3.1954750e+00   2.4836725e+00   3.4467337e+00   2.6928468e+00   3.5602810e+00   3.3090659e+00   3.0346426e+00   3.1953687e+00   3.5930845e+00   3.7635112e+00   3.1392617e+00   2.1242643e+00   2.3839455e+00   2.2806773e+00   2.5225548e+00   3.7070070e+00   3.0760590e+00   3.1551922e+00   3.4865435e+00   3.1033781e+00   2.6867856e+00   2.5906376e+00   2.9536363e+00   3.2348458e+00   2.6148507e+00   1.8841403e+00   2.7819255e+00   2.7801917e+00   2.7920574e+00   2.9774862e+00   1.6195190e+00   2.7001131e+00   4.7151191e+00   3.7310738e+00   4.6963107e+00   4.2348119e+00   4.5036643e+00   5.4345951e+00   3.1040223e+00   5.0660245e+00   4.4858951e+00   4.9576471e+00   3.8485743e+00   3.9894963e+00   4.2781102e+00   3.6535208e+00   3.8473084e+00   4.0656969e+00   4.1736133e+00   5.5611269e+00   5.7439963e+00   3.6142694e+00   4.5082936e+00   3.5527533e+00   5.5400450e+00   3.5988819e+00   4.4321573e+00   4.7754556e+00   3.4913787e+00   3.5638529e+00   4.2915574e+00   4.5844335e+00   4.9269527e+00   5.3501611e+00   4.3091163e+00   3.7395252e+00   4.1763853e+00   5.0687940e+00   4.3363292e+00   4.1568278e+00   3.4594086e+00   4.2175903e+00   4.4004449e+00   4.0139427e+00   3.7310738e+00   4.6611132e+00   4.5083524e+00   4.0415593e+00   3.7070350e+00   3.9354060e+00   4.1243443e+00   3.7225506e+00   4.8927739e-01   4.1449626e-01   2.0656129e-01   8.1242502e-01   5.0270183e-01   4.0293660e-01   2.8192292e-01   6.0611244e-01   8.2305664e-01   8.2899253e-01   9.3824087e-01   4.5581864e-01   1.4096146e-01   7.1840099e-01   2.1845981e-01   4.5470518e-01   2.1845981e-01   4.9674312e-01   4.2418962e-01   5.1607523e-01   6.0551856e-01   2.8192292e-01   2.1269358e-01   2.4837156e-01   4.5470518e-01   5.1607523e-01   4.2667565e-01   5.0991930e-01   6.8917100e-01   5.0270183e-01   4.1312257e-01   5.0180477e-01   5.0270183e-01   7.4549115e-01   2.1269358e-01   1.4096146e-01   1.3190071e+00   6.4755655e-01   4.1449626e-01   5.1691876e-01   6.0611244e-01   2.5251796e-01   4.9674312e-01   3.0546431e-01   3.0000000e-01   3.5310961e+00   3.2313174e+00   3.6912396e+00   2.7363446e+00   3.3486156e+00   3.1550780e+00   3.4146950e+00   2.0587064e+00   3.3422688e+00   2.6000813e+00   2.3658814e+00   2.9005672e+00   2.7581254e+00   3.3764180e+00   2.2982662e+00   3.1914715e+00   3.1684808e+00   2.7581145e+00   3.2719146e+00   2.5906376e+00   3.5076679e+00   2.7164648e+00   3.6146980e+00   3.3629944e+00   3.0317688e+00   3.1699749e+00   3.5767944e+00   3.7653940e+00   3.1912695e+00   2.2046610e+00   2.5131017e+00   2.4132939e+00   2.5882494e+00   3.7797776e+00   3.1649733e+00   3.1986968e+00   3.4685882e+00   3.1575873e+00   2.7599092e+00   2.7031874e+00   3.0575551e+00   3.2787144e+00   2.6914804e+00   2.0914773e+00   2.8714673e+00   2.8482104e+00   2.8631525e+00   3.0002861e+00   1.8009624e+00   2.7738624e+00   4.7744685e+00   3.8132783e+00   4.7036953e+00   4.2925903e+00   4.5507995e+00   5.4317036e+00   3.2245243e+00   5.0748136e+00   4.5314818e+00   4.9621679e+00   3.8715927e+00   4.0372136e+00   4.2937599e+00   3.7469906e+00   3.9213497e+00   4.1030149e+00   4.2136261e+00   5.5512721e+00   5.7499082e+00   3.7127205e+00   4.5218897e+00   3.6377830e+00   5.5357771e+00   3.6429670e+00   4.4609633e+00   4.7775824e+00   3.5373240e+00   3.6158814e+00   4.3437318e+00   4.5790474e+00   4.9211035e+00   5.3110568e+00   4.3608329e+00   3.7876656e+00   4.2543813e+00   5.0356467e+00   4.3872625e+00   4.2028863e+00   3.5161021e+00   4.2189979e+00   4.4261470e+00   4.0000622e+00   3.8132783e+00   4.6893387e+00   4.5361087e+00   4.0527696e+00   3.7622948e+00   3.9645936e+00   4.1768667e+00   3.7924679e+00   8.6137722e-01   5.7867728e-01   1.2470767e+00   8.6361309e-01   2.8192292e-01   6.9369532e-01   9.8450810e-01   1.2949422e+00   5.7324170e-01   5.3588338e-01   4.0000000e-01   4.8135521e-01   3.0546431e-01   3.2816937e-01   5.0817745e-01   3.4378533e-01   9.4558103e-01   6.2024833e-01   6.9728513e-01   9.2288144e-01   5.6700421e-01   4.3691963e-01   5.4292906e-01   8.7202528e-01   8.9095811e-01   5.0817745e-01   3.6171588e-01   3.8934542e-01   8.6361309e-01   7.9878917e-01   5.0592043e-01   8.6361309e-01   1.1959482e+00   5.4292906e-01   5.6454040e-01   1.6807352e+00   1.1055064e+00   5.0592043e-01   3.2586371e-01   9.7779835e-01   3.2816937e-01   9.4558103e-01   2.8507955e-01   6.6827038e-01   3.1533911e+00   2.8840079e+00   3.3274872e+00   2.5335921e+00   3.0169509e+00   2.8661222e+00   3.0732956e+00   1.9492232e+00   3.0013391e+00   2.3437032e+00   2.3116343e+00   2.5873149e+00   2.5591371e+00   3.0631725e+00   2.0220740e+00   2.8270253e+00   2.8656468e+00   2.4892190e+00   3.0178921e+00   2.3656538e+00   3.1846482e+00   2.4132559e+00   3.3163294e+00   3.0590735e+00   2.6993871e+00   2.8174914e+00   3.2310326e+00   3.4162231e+00   2.8802219e+00   1.9932786e+00   2.3173648e+00   2.2314118e+00   2.3212593e+00   3.4779999e+00   2.8654680e+00   2.8662571e+00   3.1113805e+00   2.8927401e+00   2.4634131e+00   2.4685230e+00   2.7948819e+00   2.9596963e+00   2.4341346e+00   2.0039447e+00   2.6000813e+00   2.5498770e+00   2.5700421e+00   2.6813098e+00   1.7123398e+00   2.4913669e+00   4.4418755e+00   3.5123791e+00   4.3488707e+00   3.9713081e+00   4.2172545e+00   5.0700045e+00   2.9631582e+00   4.7239900e+00   4.2113881e+00   4.5979409e+00   3.5255287e+00   3.7162377e+00   3.9448212e+00   3.4598280e+00   3.6097419e+00   3.7620043e+00   3.8810240e+00   5.1822310e+00   5.3953096e+00   3.4508156e+00   4.1665786e+00   3.3353616e+00   5.1763300e+00   3.3260356e+00   4.1143832e+00   4.4201622e+00   3.2188998e+00   3.2929599e+00   4.0183758e+00   4.2229849e+00   4.5637045e+00   4.9290256e+00   4.0343724e+00   3.4708900e+00   3.9559935e+00   4.6576736e+00   4.0502252e+00   3.8718131e+00   3.1963475e+00   3.8610636e+00   4.0785553e+00   3.6345765e+00   3.5123791e+00   4.3416284e+00   4.1864302e+00   3.7018916e+00   3.4568305e+00   3.6254423e+00   3.8415026e+00   3.4775621e+00   4.0293660e-01   5.0905001e-01   3.8934542e-01   8.1130291e-01   2.5251796e-01   4.2538717e-01   4.8927739e-01   1.2406194e+00   1.3074132e+00   8.5233811e-01   5.0090417e-01   1.1185330e+00   5.6700421e-01   8.1099042e-01   5.3022554e-01   4.1449626e-01   5.3665999e-01   5.0905001e-01   5.0592043e-01   4.1449626e-01   6.0181382e-01   6.0060595e-01   2.5651975e-01   3.4583729e-01   8.0064372e-01   8.1558458e-01   1.0597541e+00   3.8934542e-01   4.2667565e-01   9.0074515e-01   3.8934542e-01   4.1586001e-01   5.0180477e-01   4.0293660e-01   1.1003197e+00   2.5651975e-01   4.5581864e-01   6.6539428e-01   4.1312257e-01   5.7324170e-01   2.0656129e-01   7.1504098e-01   4.0293660e-01   3.6583368e+00   3.3018939e+00   3.7934214e+00   2.7118627e+00   3.4196168e+00   3.1646752e+00   3.4663954e+00   1.9965608e+00   3.4302944e+00   2.5753574e+00   2.2837561e+00   2.9283888e+00   2.7788099e+00   3.4116298e+00   2.3101107e+00   3.3028359e+00   3.1719381e+00   2.7826178e+00   3.2957091e+00   2.5882494e+00   3.5217244e+00   2.7724782e+00   3.6509512e+00   3.3998935e+00   3.1126354e+00   3.2681313e+00   3.6719821e+00   3.8384263e+00   3.2202056e+00   2.2240476e+00   2.4960474e+00   2.3970928e+00   2.6119950e+00   3.7951491e+00   3.1592993e+00   3.2300555e+00   3.5604418e+00   3.2025128e+00   2.7701355e+00   2.6892823e+00   3.0524247e+00   3.3177721e+00   2.7092568e+00   2.0227167e+00   2.8731220e+00   2.8671099e+00   2.8775912e+00   3.0586720e+00   1.7332099e+00   2.7865812e+00   4.7866828e+00   3.8123695e+00   4.7714708e+00   4.3189924e+00   4.5796358e+00   5.5132325e+00   3.1921277e+00   5.1489022e+00   4.5737586e+00   5.0249531e+00   3.9185849e+00   4.0697987e+00   4.3502657e+00   3.7349501e+00   3.9102370e+00   4.1311343e+00   4.2548570e+00   5.6362307e+00   5.8240252e+00   3.7174048e+00   4.5773480e+00   3.6270581e+00   5.6209145e+00   3.6774027e+00   4.5072397e+00   4.8555167e+00   3.5675580e+00   3.6401392e+00   4.3693804e+00   4.6657186e+00   5.0062061e+00   5.4227201e+00   4.3844239e+00   3.8261182e+00   4.2718480e+00   5.1373047e+00   4.4043123e+00   4.2383633e+00   3.5347392e+00   4.2868650e+00   4.4668117e+00   4.0716645e+00   3.8123695e+00   4.7338066e+00   4.5734052e+00   4.1036179e+00   3.7882079e+00   4.0078491e+00   4.1922661e+00   3.8027591e+00   6.8961791e-01   3.0546431e-01   4.4417983e-01   2.0656129e-01   4.1586001e-01   7.6625946e-01   8.9687438e-01   1.0919712e+00   5.7867728e-01   1.5422108e-01   7.3851529e-01   4.0293660e-01   4.1312257e-01   3.2586371e-01   5.7257017e-01   3.2816937e-01   4.1312257e-01   4.0147421e-01   2.0656129e-01   2.0656129e-01   2.0656129e-01   3.2586371e-01   3.2586371e-01   4.1312257e-01   7.0437330e-01   8.5205778e-01   3.0546431e-01   3.2352160e-01   5.0905001e-01   3.0546431e-01   6.5172743e-01   1.0000000e-01   2.1269358e-01   1.1283882e+00   6.1092863e-01   4.0293660e-01   5.0592043e-01   4.1586001e-01   4.0293660e-01   4.1449626e-01   3.7255734e-01   1.2418578e-01   3.4445326e+00   3.1392617e+00   3.6011035e+00   2.6118700e+00   3.2516941e+00   3.0511838e+00   3.3218097e+00   1.9189245e+00   3.2468925e+00   2.4924452e+00   2.2081024e+00   2.8038661e+00   2.6291264e+00   3.2767369e+00   2.1964719e+00   3.1025274e+00   3.0696611e+00   2.6485861e+00   3.1554034e+00   2.4715204e+00   3.4135983e+00   2.6151245e+00   3.5092032e+00   3.2604423e+00   2.9354140e+00   3.0782101e+00   3.4818889e+00   3.6726568e+00   3.0922811e+00   2.0843471e+00   2.3874354e+00   2.2845234e+00   2.4794505e+00   3.6775470e+00   3.0659000e+00   3.1055388e+00   3.3775462e+00   3.0430948e+00   2.6597612e+00   2.5873149e+00   2.9471553e+00   3.1807044e+00   2.5795723e+00   1.9450499e+00   2.7640668e+00   2.7473221e+00   2.7611864e+00   2.9015702e+00   1.6626642e+00   2.6693888e+00   4.6823704e+00   3.7130994e+00   4.6117428e+00   4.1946425e+00   4.4565357e+00   5.3399939e+00   3.1168466e+00   4.9805386e+00   4.4303862e+00   4.8738189e+00   3.7806643e+00   3.9387918e+00   4.2018804e+00   3.6441274e+00   3.8290120e+00   4.0132700e+00   4.1177139e+00   5.4615788e+00   5.6559440e+00   3.5983434e+00   4.4321573e+00   3.5405803e+00   5.4429455e+00   3.5441556e+00   4.3687483e+00   4.6853394e+00   3.4399664e+00   3.5203203e+00   4.2473048e+00   4.4861009e+00   4.8281381e+00   5.2242271e+00   4.2652659e+00   3.6876909e+00   4.1503255e+00   4.9488209e+00   4.2966585e+00   4.1071698e+00   3.4205830e+00   4.1292490e+00   4.3363292e+00   3.9150359e+00   3.7130994e+00   4.5977729e+00   4.4473292e+00   3.9643224e+00   3.6603913e+00   3.8715927e+00   4.0861975e+00   3.6954796e+00   5.0991930e-01   1.1327825e+00   5.7257017e-01   4.0293660e-01   3.0811765e-01   1.5771666e+00   1.7488874e+00   1.2431040e+00   8.1273630e-01   1.4170618e+00   1.0106392e+00   1.0389435e+00   9.3824087e-01   7.3813096e-01   7.5976039e-01   6.6491075e-01   6.0611244e-01   6.9728513e-01   8.8861541e-01   8.5177726e-01   3.8776762e-01   4.2538717e-01   1.0346741e+00   1.2943100e+00   1.5015203e+00   5.0991930e-01   6.2482915e-01   1.1473003e+00   5.0991930e-01   1.2418578e-01   7.6752131e-01   7.4586719e-01   6.0181382e-01   3.0275928e-01   7.7869083e-01   1.0440187e+00   4.0293660e-01   1.0120221e+00   3.2352160e-01   1.0597541e+00   6.4704320e-01   3.7504939e+00   3.3717768e+00   3.8731169e+00   2.7062054e+00   3.4865562e+00   3.1921903e+00   3.5262546e+00   1.9522524e+00   3.5018009e+00   2.5914913e+00   2.1818668e+00   2.9807120e+00   2.7874290e+00   3.4557351e+00   2.3604042e+00   3.3915488e+00   3.2027420e+00   2.8150728e+00   3.3206640e+00   2.6018930e+00   3.5642457e+00   2.8360166e+00   3.6902583e+00   3.4394878e+00   3.1847477e+00   3.3503379e+00   3.7461474e+00   3.9068076e+00   3.2666666e+00   2.2590074e+00   2.4950353e+00   2.3935209e+00   2.6534332e+00   3.8259590e+00   3.1834936e+00   3.2834077e+00   3.6377049e+00   3.2390016e+00   2.8060305e+00   2.7012392e+00   3.0647279e+00   3.3658240e+00   2.7423171e+00   1.9645331e+00   2.8984764e+00   2.9033203e+00   2.9139413e+00   3.1189900e+00   1.7118795e+00   2.8228127e+00   4.8290847e+00   3.8416142e+00   4.8350745e+00   4.3569606e+00   4.6261788e+00   5.5774554e+00   3.1958228e+00   5.2067803e+00   4.6153241e+00   5.0947934e+00   3.9803199e+00   4.1159553e+00   4.4131159e+00   3.7587872e+00   3.9513472e+00   4.1881498e+00   4.3024754e+00   5.7071195e+00   5.8839539e+00   3.7280682e+00   4.6419531e+00   3.6578722e+00   5.6843788e+00   3.7276068e+00   4.5625120e+00   4.9179194e+00   3.6182608e+00   3.6866535e+00   4.4136707e+00   4.7307689e+00   5.0723886e+00   5.5062533e+00   4.4301849e+00   3.8690719e+00   4.2943891e+00   5.2192815e+00   4.4536259e+00   4.2828634e+00   3.5797958e+00   4.3570079e+00   4.5278761e+00   4.1542558e+00   3.8416142e+00   4.7899951e+00   4.6341170e+00   4.1740602e+00   3.8316735e+00   4.0656969e+00   4.2413113e+00   3.8376713e+00   6.8961791e-01   3.0811765e-01   1.4096146e-01   6.4755655e-01   1.1229906e+00   1.3835747e+00   8.6361309e-01   4.2667565e-01   9.4009473e-01   7.0784540e-01   5.3665999e-01   6.2482915e-01   6.3977563e-01   4.3691963e-01   4.4651726e-01   1.5422108e-01   3.7598397e-01   4.4535192e-01   3.7598397e-01   2.1845981e-01   1.4096146e-01   5.5419992e-01   1.0065841e+00   1.1474460e+00   0.0000000e+00   3.0811765e-01   6.5223271e-01   0.0000000e+00   5.0991930e-01   3.2586371e-01   4.2667565e-01   8.3172002e-01   5.0991930e-01   5.6769031e-01   7.5082357e-01   2.1845981e-01   7.0479928e-01   3.0811765e-01   6.4755655e-01   2.1845981e-01   3.4865562e+00   3.1726595e+00   3.6377960e+00   2.5987470e+00   3.2814045e+00   3.0627375e+00   3.3515846e+00   1.8841865e+00   3.2769379e+00   2.5038079e+00   2.1311468e+00   2.8311678e+00   2.6104387e+00   3.2962520e+00   2.2214438e+00   3.1433122e+00   3.0878634e+00   2.6552472e+00   3.1570103e+00   2.4668912e+00   3.4394878e+00   2.6411293e+00   3.5233648e+00   3.2747247e+00   2.9659871e+00   3.1154783e+00   3.5134741e+00   3.7059620e+00   3.1148696e+00   2.0851901e+00   2.3731428e+00   2.2655571e+00   2.4927109e+00   3.6920087e+00   3.0823446e+00   3.1337459e+00   3.4135200e+00   3.0481703e+00   2.6780487e+00   2.5874301e+00   2.9489507e+00   3.2027420e+00   2.5873149e+00   1.8973383e+00   2.7738355e+00   2.7632614e+00   2.7778954e+00   2.9269923e+00   1.6390769e+00   2.6848587e+00   4.7106706e+00   3.7313856e+00   4.6446321e+00   4.2142736e+00   4.4836580e+00   5.3716885e+00   3.1250284e+00   5.0074019e+00   4.4485220e+00   4.9128219e+00   3.8150636e+00   3.9624529e+00   4.2358121e+00   3.6602286e+00   3.8605980e+00   4.0488387e+00   4.1418643e+00   5.4970002e+00   5.6855224e+00   3.5964347e+00   4.4685630e+00   3.5634461e+00   5.4730406e+00   3.5693950e+00   4.3989089e+00   4.7150659e+00   3.4668130e+00   3.5464993e+00   4.2723380e+00   4.5155386e+00   4.8594290e+00   5.2647079e+00   4.2921213e+00   3.7064459e+00   4.1581964e+00   4.9913682e+00   4.3286007e+00   4.1303097e+00   3.4468286e+00   4.1669742e+00   4.3729308e+00   3.9624170e+00   3.7313856e+00   4.6297577e+00   4.4844827e+00   4.0056359e+00   3.6817961e+00   3.9035218e+00   4.1179678e+00   3.7164366e+00   6.2024833e-01   8.1304731e-01   1.1868139e+00   4.8036801e-01   7.1799256e-01   2.8192292e-01   3.2816937e-01   3.2816937e-01   3.0546431e-01   3.2352160e-01   3.2352160e-01   8.5205778e-01   4.8927739e-01   6.6384020e-01   7.3496673e-01   4.5581864e-01   2.4837156e-01   3.2586371e-01   7.6752131e-01   7.4549115e-01   3.2352160e-01   4.1449626e-01   5.0180477e-01   6.8961791e-01   5.8851328e-01   2.5251796e-01   6.8961791e-01   1.0919712e+00   3.7255734e-01   4.2667565e-01   1.4993782e+00   1.0344911e+00   5.0592043e-01   4.5581864e-01   8.1304731e-01   3.0546431e-01   8.5205778e-01   1.0000000e-01   4.9766035e-01   3.3472053e+00   3.0922811e+00   3.5254266e+00   2.6661987e+00   3.2094276e+00   3.0570957e+00   3.2869053e+00   2.0190980e+00   3.1913594e+00   2.5206151e+00   2.3403819e+00   2.7928582e+00   2.6680945e+00   3.2615924e+00   2.2070201e+00   3.0233425e+00   3.0716969e+00   2.6575076e+00   3.1694367e+00   2.5088543e+00   3.4030318e+00   2.5954147e+00   3.4988409e+00   3.2483608e+00   2.8891737e+00   3.0123702e+00   3.4182420e+00   3.6203759e+00   3.0811775e+00   2.1190324e+00   2.4416796e+00   2.3440712e+00   2.4897570e+00   3.6753309e+00   3.0715435e+00   3.0851463e+00   3.3123070e+00   3.0424689e+00   2.6625505e+00   2.6241824e+00   2.9689697e+00   3.1616811e+00   2.5961850e+00   2.0559262e+00   2.7803619e+00   2.7462372e+00   2.7639489e+00   2.8736288e+00   1.7674365e+00   2.6773131e+00   4.6660957e+00   3.7173526e+00   4.5567672e+00   4.1782968e+00   4.4326194e+00   5.2720689e+00   3.1469325e+00   4.9232255e+00   4.4057732e+00   4.8164157e+00   3.7433882e+00   3.9194796e+00   4.1567419e+00   3.6582432e+00   3.8303544e+00   3.9861488e+00   4.0892044e+00   5.3882212e+00   5.5946413e+00   3.6180819e+00   4.3839191e+00   3.5469476e+00   5.3734444e+00   3.5262672e+00   4.3306501e+00   4.6237863e+00   3.4237160e+00   3.5051302e+00   4.2288456e+00   4.4201622e+00   4.7609637e+00   5.1280035e+00   4.2469785e+00   3.6684143e+00   4.1480002e+00   4.8602572e+00   4.2765700e+00   4.0824098e+00   3.4092877e+00   4.0737132e+00   4.2991233e+00   3.8524190e+00   3.7173526e+00   4.5590471e+00   4.4107160e+00   3.9202843e+00   3.6509512e+00   3.8388884e+00   4.0680120e+00   3.6894983e+00   4.1449626e-01   6.6539428e-01   1.0717668e+00   1.1847335e+00   7.0776547e-01   3.2816937e-01   9.2095040e-01   4.4651726e-01   6.0060595e-01   3.8934542e-01   6.1092863e-01   3.7598397e-01   3.0000000e-01   4.1312257e-01   2.4837156e-01   4.0293660e-01   4.1312257e-01   2.0656129e-01   3.0000000e-01   6.0611244e-01   7.3535471e-01   9.3801395e-01   3.0811765e-01   4.2538717e-01   7.1462831e-01   3.0811765e-01   5.2574978e-01   3.0275928e-01   3.2816937e-01   1.1107977e+00   4.5470518e-01   4.1449626e-01   4.8927739e-01   4.1449626e-01   4.4417983e-01   2.8192292e-01   5.2942799e-01   2.5251796e-01   3.4297053e+00   3.0906838e+00   3.5704156e+00   2.5301680e+00   3.2062204e+00   2.9663489e+00   3.2615889e+00   1.8330979e+00   3.2074600e+00   2.4030878e+00   2.1292724e+00   2.7344480e+00   2.5716369e+00   3.2053511e+00   2.1242643e+00   3.0798277e+00   2.9831836e+00   2.5729378e+00   3.0964590e+00   2.3917863e+00   3.3353616e+00   2.5635110e+00   3.4441347e+00   3.1882407e+00   2.8938821e+00   3.0477086e+00   3.4484194e+00   3.6265826e+00   3.0209783e+00   2.0203134e+00   2.3063579e+00   2.2046610e+00   2.4100833e+00   3.5972040e+00   2.9748436e+00   3.0349291e+00   3.3414931e+00   2.9918962e+00   2.5764694e+00   2.5038051e+00   2.8573838e+00   3.1113597e+00   2.5079404e+00   1.8623849e+00   2.6799601e+00   2.6656374e+00   2.6804452e+00   2.8458006e+00   1.5870088e+00   2.5906376e+00   4.6056614e+00   3.6293396e+00   4.5625120e+00   4.1184849e+00   4.3862724e+00   5.2957861e+00   3.0253131e+00   4.9300368e+00   4.3656957e+00   4.8256905e+00   3.7228356e+00   3.8717400e+00   4.1487889e+00   3.5605424e+00   3.7509165e+00   3.9489970e+00   4.0502806e+00   5.4193574e+00   5.6096505e+00   3.5201263e+00   4.3797165e+00   3.4555095e+00   5.4004015e+00   3.4805320e+00   4.3069452e+00   4.6373516e+00   3.3738930e+00   3.4478147e+00   4.1762321e+00   4.4428877e+00   4.7870294e+00   5.1982218e+00   4.1948678e+00   3.6180819e+00   4.0668114e+00   4.9227056e+00   4.2245318e+00   4.0358897e+00   3.3459883e+00   4.0835979e+00   4.2783731e+00   3.8797354e+00   3.6293396e+00   4.5370189e+00   4.3879553e+00   3.9155334e+00   3.5955337e+00   3.8113970e+00   4.0131848e+00   3.6132595e+00   5.2862779e-01   1.2431040e+00   1.5013525e+00   9.7779835e-01   5.3588338e-01   1.0669582e+00   8.1385214e-01   6.6432544e-01   7.2823007e-01   6.5223271e-01   5.1138698e-01   5.6700421e-01   2.5251796e-01   4.6472023e-01   5.6769031e-01   4.9766035e-01   2.5651975e-01   2.1269358e-01   6.6432544e-01   1.1134787e+00   1.2632199e+00   1.4096146e-01   2.8507955e-01   7.6787403e-01   1.4096146e-01   4.0293660e-01   4.4651726e-01   5.1691876e-01   7.1840099e-01   4.1586001e-01   6.3108414e-01   8.7021234e-01   2.0000000e-01   8.1385214e-01   2.5251796e-01   7.6787403e-01   3.2586371e-01   3.6025735e+00   3.2810515e+00   3.7511944e+00   2.6894009e+00   3.3904673e+00   3.1636869e+00   3.4574937e+00   1.9666356e+00   3.3893691e+00   2.5954173e+00   2.1997395e+00   2.9322283e+00   2.7092568e+00   3.4012145e+00   2.3186758e+00   3.2568914e+00   3.1861493e+00   2.7595194e+00   3.2561045e+00   2.5646808e+00   3.5381764e+00   2.7476411e+00   3.6278993e+00   3.3809159e+00   3.0768226e+00   3.2277675e+00   3.6265617e+00   3.8150532e+00   3.2176230e+00   2.1864840e+00   2.4668912e+00   2.3596992e+00   2.5949561e+00   3.7935487e+00   3.1789378e+00   3.2360886e+00   3.5252258e+00   3.1522058e+00   2.7777040e+00   2.6819136e+00   3.0473722e+00   3.3079290e+00   2.6886547e+00   1.9757309e+00   2.8726212e+00   2.8654680e+00   2.8788483e+00   3.0349462e+00   1.7160413e+00   2.7852734e+00   4.8087107e+00   3.8282466e+00   4.7531334e+00   4.3176393e+00   4.5857287e+00   5.4831923e+00   3.2147850e+00   5.1185883e+00   4.5544260e+00   5.0194259e+00   3.9185849e+00   4.0655452e+00   4.3416283e+00   3.7535680e+00   3.9509795e+00   4.1478442e+00   4.2472736e+00   5.6096505e+00   5.7957776e+00   3.6945993e+00   4.5734622e+00   3.6568202e+00   5.5854254e+00   3.6720840e+00   4.5038991e+00   4.8262859e+00   3.5684917e+00   3.6474985e+00   4.3740189e+00   4.6282931e+00   4.9713928e+00   5.3806679e+00   4.3928114e+00   3.8121990e+00   4.2612863e+00   5.1032991e+00   4.4267055e+00   4.2347444e+00   3.5464871e+00   4.2738510e+00   4.4745238e+00   4.0663411e+00   3.8282466e+00   4.7338066e+00   4.5852690e+00   4.1075310e+00   3.7823897e+00   4.0070636e+00   4.2156933e+00   3.8157950e+00   1.6177449e+00   1.7454671e+00   1.2604558e+00   8.6361309e-01   1.4955532e+00   1.0118409e+00   1.1594648e+00   9.6204649e-01   6.2081167e-01   9.1750357e-01   8.7504951e-01   7.6752131e-01   8.0660588e-01   9.5965467e-01   9.2859317e-01   5.7324170e-01   6.2205176e-01   1.1313840e+00   1.2653669e+00   1.4930627e+00   6.4755655e-01   7.0479928e-01   1.2236003e+00   6.4755655e-01   2.1269358e-01   8.5105559e-01   7.7360126e-01   7.1169738e-01   2.5651975e-01   8.7229670e-01   1.1327578e+00   5.3588338e-01   1.0269295e+00   3.8934542e-01   1.1042097e+00   7.2823007e-01   4.0317004e+00   3.6659830e+00   4.1618561e+00   3.0123702e+00   3.7804276e+00   3.4970843e+00   3.8244351e+00   2.2591077e+00   3.7930789e+00   2.8953397e+00   2.4889124e+00   3.2809188e+00   3.0866488e+00   3.7578933e+00   2.6595288e+00   3.6754272e+00   3.5073435e+00   3.1173742e+00   3.6212723e+00   2.9065572e+00   3.8667462e+00   3.1302383e+00   3.9918403e+00   3.7416229e+00   3.4760444e+00   3.6375992e+00   4.0358101e+00   4.2016915e+00   3.5683934e+00   2.5569968e+00   2.8007817e+00   2.6989368e+00   2.9539253e+00   4.1306024e+00   3.4882801e+00   3.5831257e+00   3.9280671e+00   3.5362697e+00   3.1099883e+00   3.0065416e+00   3.3706887e+00   3.6672620e+00   3.0442126e+00   2.2719663e+00   3.2032390e+00   3.2071637e+00   3.2176230e+00   3.4155491e+00   2.0139971e+00   3.1260028e+00   5.1310217e+00   4.1456639e+00   5.1323742e+00   4.6609614e+00   4.9284761e+00   5.8739676e+00   3.4984873e+00   5.5048216e+00   4.9175276e+00   5.3898806e+00   4.2781762e+00   4.4176533e+00   4.7107211e+00   4.0621414e+00   4.2494597e+00   4.4865569e+00   4.6045396e+00   6.0012333e+00   6.1816086e+00   4.0339598e+00   4.9390284e+00   3.9601358e+00   5.9803696e+00   4.0277694e+00   4.8626276e+00   5.2147981e+00   3.9185849e+00   3.9887029e+00   4.7161140e+00   5.0257341e+00   5.3673499e+00   5.7939320e+00   4.7320534e+00   4.1713411e+00   4.5995433e+00   5.5085511e+00   4.7536729e+00   4.5857356e+00   3.8819510e+00   4.6519178e+00   4.8256399e+00   4.4430890e+00   4.1456639e+00   5.0898961e+00   4.9316646e+00   4.4680158e+00   4.1325542e+00   4.3648035e+00   4.5412859e+00   4.1418557e+00   4.5581864e-01   4.1586001e-01   7.7074935e-01   5.0991930e-01   7.1840099e-01   7.2486328e-01   7.3145860e-01   1.2122249e+00   9.2112464e-01   1.1384810e+00   1.1451403e+00   9.1163729e-01   7.0386584e-01   7.4855857e-01   1.2220203e+00   1.1947245e+00   6.6827038e-01   6.2081167e-01   3.4378533e-01   1.1229906e+00   9.9348625e-01   5.2942799e-01   1.1229906e+00   1.5344133e+00   8.2275389e-01   8.5233811e-01   1.8985661e+00   1.4692412e+00   8.9653332e-01   8.7420176e-01   1.2431040e+00   7.3813096e-01   1.2951131e+00   5.5419992e-01   9.3801395e-01   3.5789198e+00   3.3663244e+00   3.7753619e+00   3.0049442e+00   3.4909841e+00   3.3695525e+00   3.5654259e+00   2.3989172e+00   3.4663502e+00   2.8427326e+00   2.7185849e+00   3.0894572e+00   3.0108764e+00   3.5617386e+00   2.5173832e+00   3.2758681e+00   3.3732554e+00   2.9816791e+00   3.4895316e+00   2.8451507e+00   3.6905956e+00   2.8989400e+00   3.8036776e+00   3.5549103e+00   3.1734856e+00   3.2772927e+00   3.6834126e+00   3.8868430e+00   3.3809159e+00   2.4588872e+00   2.7850016e+00   2.6918796e+00   2.8113773e+00   3.9804187e+00   3.3742776e+00   3.3692592e+00   3.5726491e+00   3.3587234e+00   2.9691171e+00   2.9539253e+00   3.2926883e+00   3.4584304e+00   2.9217347e+00   2.4321061e+00   3.0988783e+00   3.0546600e+00   3.0736357e+00   3.1699903e+00   2.1306832e+00   2.9913743e+00   4.9420128e+00   4.0177712e+00   4.8123874e+00   4.4703485e+00   4.7113827e+00   5.5147622e+00   3.4715574e+00   5.1811127e+00   4.6944291e+00   5.0587041e+00   4.0117533e+00   4.2085851e+00   4.4199792e+00   3.9616570e+00   4.1103933e+00   4.2537610e+00   4.3721243e+00   5.6218420e+00   5.8419148e+00   3.9412893e+00   4.6390186e+00   3.8408636e+00   5.6159291e+00   3.8175051e+00   4.5984929e+00   4.8771654e+00   3.7143727e+00   3.7943375e+00   4.5141564e+00   4.6734732e+00   5.0086627e+00   5.3396700e+00   4.5298770e+00   3.9647930e+00   4.4561969e+00   5.0778756e+00   4.5477422e+00   4.3671210e+00   3.6996802e+00   4.3269400e+00   4.5602465e+00   4.0901232e+00   4.0177712e+00   4.8233796e+00   4.6689006e+00   4.1757336e+00   3.9466531e+00   4.1130674e+00   4.3408596e+00   3.9840684e+00   5.3588338e-01   9.7098574e-01   6.0611244e-01   7.4549115e-01   1.0101422e+00   8.1242502e-01   1.2342162e+00   1.1486378e+00   1.1959482e+00   1.4468211e+00   1.0906388e+00   9.4287188e-01   1.0346741e+00   1.3793330e+00   1.4148192e+00   1.0065841e+00   5.5419992e-01   2.8507955e-01   1.3835747e+00   1.2681309e+00   9.0679720e-01   1.3835747e+00   1.6801917e+00   1.0588560e+00   1.0122141e+00   2.2040881e+00   1.5564198e+00   1.0122141e+00   7.7553525e-01   1.4987155e+00   7.4893123e-01   1.4320120e+00   7.3813096e-01   1.1765359e+00   3.3186105e+00   3.0934278e+00   3.5115632e+00   2.9015832e+00   3.2557855e+00   3.1381850e+00   3.2787144e+00   2.3983798e+00   3.2261964e+00   2.6655261e+00   2.7738368e+00   2.8425716e+00   2.9377092e+00   3.3097860e+00   2.3365894e+00   3.0236933e+00   3.1147370e+00   2.7988444e+00   3.3431646e+00   2.7201960e+00   3.4033622e+00   2.7009102e+00   3.5863979e+00   3.3171611e+00   2.9439491e+00   3.0327979e+00   3.4475678e+00   3.6195561e+00   3.1337459e+00   2.3758157e+00   2.6957302e+00   2.6219409e+00   2.6429556e+00   3.7305206e+00   3.1152653e+00   3.0762634e+00   3.3088561e+00   3.2115055e+00   2.7318540e+00   2.8101506e+00   3.0967820e+00   3.1998457e+00   2.7619926e+00   2.4596921e+00   2.9002737e+00   2.8148869e+00   2.8449691e+00   2.9378173e+00   2.1723936e+00   2.7843048e+00   4.6358686e+00   3.7621042e+00   4.5316360e+00   4.1928583e+00   4.4221843e+00   5.2364242e+00   3.2682245e+00   4.9072991e+00   4.4428425e+00   4.7561724e+00   3.7219581e+00   3.9498605e+00   4.1390009e+00   3.7275066e+00   3.8377652e+00   3.9569614e+00   4.0917968e+00   5.3289557e+00   5.5738695e+00   3.7540308e+00   4.3457024e+00   3.5797958e+00   5.3465693e+00   3.5720882e+00   4.3012547e+00   4.5936468e+00   3.4616111e+00   3.5205889e+00   4.2389017e+00   4.4031390e+00   4.7432976e+00   5.0569442e+00   4.2531342e+00   3.7092459e+00   4.2038056e+00   4.8064634e+00   4.2402361e+00   4.0806404e+00   3.4276314e+00   4.0438546e+00   4.2676463e+00   3.8085992e+00   3.7621042e+00   4.5272919e+00   4.3667579e+00   3.8946701e+00   3.7163265e+00   3.8338395e+00   4.0342445e+00   3.7061759e+00   4.4651726e-01   4.4651726e-01   3.2816937e-01   5.7257017e-01   3.4378533e-01   8.2384013e-01   6.6432544e-01   8.0758367e-01   9.3048953e-01   5.8851328e-01   4.3691963e-01   5.1691876e-01   8.8062848e-01   8.9917007e-01   5.0817745e-01   3.6171588e-01   3.2816937e-01   8.6361309e-01   7.3851529e-01   4.1449626e-01   8.6361309e-01   1.1845977e+00   5.4292906e-01   4.9766035e-01   1.6754036e+00   1.0919712e+00   5.3309112e-01   6.2024833e-01   9.7098574e-01   3.8934542e-01   9.3824087e-01   2.8507955e-01   6.5223271e-01   3.5185448e+00   3.2633258e+00   3.6996953e+00   2.8710255e+00   3.3892942e+00   3.2497279e+00   3.4561374e+00   2.2371784e+00   3.3772302e+00   2.7023432e+00   2.5704711e+00   2.9638836e+00   2.8904978e+00   3.4483274e+00   2.3826791e+00   3.1954061e+00   3.2493673e+00   2.8645447e+00   3.3669805e+00   2.7184506e+00   3.5654259e+00   2.7812639e+00   3.6908684e+00   3.4451701e+00   3.0736340e+00   3.1881331e+00   3.6017790e+00   3.7914024e+00   3.2604423e+00   2.3308454e+00   2.6548674e+00   2.5630676e+00   2.6859989e+00   3.8615219e+00   3.2492317e+00   3.2498302e+00   3.4856775e+00   3.2460061e+00   2.8456767e+00   2.8220742e+00   3.1709561e+00   3.3452644e+00   2.7965957e+00   2.2780262e+00   2.9733693e+00   2.9362769e+00   2.9511072e+00   3.0600825e+00   1.9688013e+00   2.8661222e+00   4.8176767e+00   3.8889176e+00   4.7230291e+00   4.3578295e+00   4.5962942e+00   5.4442203e+00   3.3242177e+00   5.1039076e+00   4.5914416e+00   4.9653393e+00   3.8994399e+00   4.0931001e+00   4.3174903e+00   3.8262108e+00   3.9673816e+00   4.1302370e+00   4.2654212e+00   5.5551457e+00   5.7673205e+00   3.8189943e+00   4.5366342e+00   3.7059360e+00   5.5500825e+00   3.6985459e+00   4.4934936e+00   4.7995387e+00   3.5922369e+00   3.6724425e+00   4.3963259e+00   4.6010378e+00   4.9364818e+00   5.2936070e+00   4.4094664e+00   3.8558772e+00   4.3453589e+00   5.0159331e+00   4.4225169e+00   4.2579435e+00   3.5745624e+00   4.2301614e+00   4.4459076e+00   3.9875954e+00   3.8889176e+00   4.7169919e+00   4.5531173e+00   4.0619315e+00   3.8238093e+00   3.9999729e+00   4.2141826e+00   3.8611742e+00   6.3808075e-01   3.0275928e-01   3.7598397e-01   2.1269358e-01   5.6769031e-01   3.4378533e-01   5.3022554e-01   5.0991930e-01   2.1845981e-01   1.4096146e-01   1.4096146e-01   4.5581864e-01   4.5581864e-01   3.0811765e-01   6.0670504e-01   7.3496673e-01   4.2667565e-01   3.2816937e-01   4.0293660e-01   4.2667565e-01   7.6787403e-01   1.4096146e-01   1.2418578e-01   1.2394907e+00   7.1504098e-01   3.2586371e-01   5.2942799e-01   5.2862779e-01   3.2586371e-01   5.2942799e-01   2.5651975e-01   2.1269358e-01   3.4944845e+00   3.2032390e+00   3.6588207e+00   2.7040077e+00   3.3172489e+00   3.1387381e+00   3.3902207e+00   2.0195610e+00   3.3129652e+00   2.5744164e+00   2.3173648e+00   2.8753045e+00   2.7215057e+00   3.3565935e+00   2.2694598e+00   3.1556501e+00   3.1511848e+00   2.7390328e+00   3.2351115e+00   2.5646808e+00   3.4858646e+00   2.6854398e+00   3.5885398e+00   3.3452644e+00   3.0014619e+00   3.1359624e+00   3.5442447e+00   3.7351231e+00   3.1683717e+00   2.1722580e+00   2.4832809e+00   2.3831271e+00   2.5617005e+00   3.7607269e+00   3.1489919e+00   3.1764760e+00   3.4370400e+00   3.1222134e+00   2.7420671e+00   2.6759392e+00   3.0406669e+00   3.2584404e+00   2.6649106e+00   2.0477765e+00   2.8508344e+00   2.8325946e+00   2.8443188e+00   2.9745020e+00   1.7495699e+00   2.7521074e+00   4.7498176e+00   3.7908064e+00   4.6736601e+00   4.2736523e+00   4.5261084e+00   5.4025762e+00   3.1986968e+00   5.0495127e+00   4.5070435e+00   4.9284820e+00   3.8418637e+00   4.0108142e+00   4.2628455e+00   3.7198158e+00   3.8891307e+00   4.0719001e+00   4.1917075e+00   5.5215376e+00   5.7192826e+00   3.6876129e+00   4.4897240e+00   3.6129201e+00   5.5066558e+00   3.6138577e+00   4.4349731e+00   4.7514299e+00   3.5090368e+00   3.5920050e+00   4.3185209e+00   4.5521574e+00   4.8905855e+00   5.2768100e+00   4.3339547e+00   3.7672401e+00   4.2403934e+00   4.9963306e+00   4.3598652e+00   4.1826701e+00   3.4920978e+00   4.1853524e+00   4.3933832e+00   3.9574195e+00   3.7908064e+00   4.6611791e+00   4.5034762e+00   4.0149574e+00   3.7307866e+00   3.9355645e+00   4.1498459e+00   3.7732223e+00   6.0551856e-01   4.4535192e-01   6.0670504e-01   1.1765359e+00   6.9325418e-01   9.2288144e-01   9.3637892e-01   7.3535471e-01   5.3665999e-01   5.8914551e-01   1.0576043e+00   1.0106392e+00   4.5581864e-01   5.4292906e-01   4.5581864e-01   9.4009473e-01   8.6290690e-01   4.5581864e-01   9.4009473e-01   1.3885563e+00   6.5223271e-01   7.4740267e-01   1.7041201e+00   1.3421549e+00   7.2823007e-01   6.0611244e-01   1.0653845e+00   6.0121055e-01   1.1521791e+00   4.1586001e-01   7.7919451e-01   3.1037808e+00   2.8727295e+00   3.2914954e+00   2.5112138e+00   2.9950832e+00   2.8632951e+00   3.0711321e+00   1.9332545e+00   2.9709745e+00   2.3448578e+00   2.2688022e+00   2.5914913e+00   2.5183808e+00   3.0579528e+00   2.0217308e+00   2.7906520e+00   2.8719896e+00   2.4738237e+00   2.9926636e+00   2.3440712e+00   3.1967616e+00   2.3980102e+00   3.3005331e+00   3.0483897e+00   2.6752185e+00   2.7868575e+00   3.1931777e+00   3.3968373e+00   2.8794765e+00   1.9640287e+00   2.2899742e+00   2.1990648e+00   2.3082381e+00   3.4762640e+00   2.8726212e+00   2.8752807e+00   3.0841055e+00   2.8599559e+00   2.4658566e+00   2.4543822e+00   2.7852734e+00   2.9558536e+00   2.4184985e+00   1.9730073e+00   2.5941661e+00   2.5490308e+00   2.5692104e+00   2.6676432e+00   1.6855044e+00   2.4875166e+00   4.4549901e+00   3.5192877e+00   4.3283747e+00   3.9701062e+00   4.2192020e+00   5.0364062e+00   2.9740218e+00   4.6944291e+00   4.1953299e+00   4.5849301e+00   3.5249823e+00   3.7114178e+00   3.9340128e+00   3.4663826e+00   3.6308220e+00   3.7719045e+00   3.8752244e+00   5.1489675e+00   5.3620178e+00   3.4363773e+00   4.1584261e+00   3.3485848e+00   5.1375979e+00   3.3209346e+00   4.1101337e+00   4.3926615e+00   3.2186045e+00   3.2982828e+00   4.0192434e+00   4.1885158e+00   4.5274663e+00   4.8785522e+00   4.0373008e+00   3.4619693e+00   3.9494193e+00   4.6147130e+00   4.0642632e+00   3.8698115e+00   3.2042572e+00   3.8459316e+00   4.0795080e+00   3.6227082e+00   3.5192877e+00   4.3377723e+00   4.1907472e+00   3.6992844e+00   3.4507210e+00   3.6230931e+00   3.8568809e+00   3.4855556e+00   4.5581864e-01   1.2418578e-01   6.2660376e-01   5.1607523e-01   5.2655962e-01   8.0096515e-01   4.0438741e-01   3.0546431e-01   4.0438741e-01   6.4806901e-01   7.1504098e-01   4.4535192e-01   3.2586371e-01   4.9857388e-01   7.0784540e-01   6.2081167e-01   4.5581864e-01   7.0784540e-01   9.3824087e-01   4.0147421e-01   3.2586371e-01   1.5252485e+00   8.1558458e-01   3.7598397e-01   4.0147421e-01   8.1099042e-01   1.2418578e-01   6.9006418e-01   2.1269358e-01   5.0270183e-01   3.4104878e+00   3.1150013e+00   3.5735680e+00   2.6813198e+00   3.2413086e+00   3.0598576e+00   3.2985843e+00   2.0418831e+00   3.2329790e+00   2.5171713e+00   2.3816204e+00   2.7937685e+00   2.7102198e+00   3.2724336e+00   2.2054062e+00   3.0737397e+00   3.0650685e+00   2.6738621e+00   3.1983741e+00   2.5253420e+00   3.3951420e+00   2.6185785e+00   3.5201263e+00   3.2642011e+00   2.9245132e+00   3.0559405e+00   3.4672191e+00   3.6501426e+00   3.0869409e+00   2.1449779e+00   2.4611582e+00   2.3678836e+00   2.5038051e+00   3.6798850e+00   3.0627375e+00   3.0833417e+00   3.3518108e+00   3.0810282e+00   2.6595270e+00   2.6323570e+00   2.9747184e+00   3.1719581e+00   2.6119950e+00   2.0875308e+00   2.7837517e+00   2.7481947e+00   2.7653278e+00   2.8959432e+00   1.7918633e+00   2.6810089e+00   4.6579399e+00   3.7114178e+00   4.5860934e+00   4.1845117e+00   4.4368376e+00   5.3138890e+00   3.1392617e+00   4.9608959e+00   4.4280037e+00   4.8387532e+00   3.7530908e+00   3.9304148e+00   4.1764880e+00   3.6510310e+00   3.8111653e+00   3.9838913e+00   4.1019789e+00   5.4302306e+00   5.6352651e+00   3.6334268e+00   4.4011799e+00   3.5332521e+00   5.4201202e+00   3.5378545e+00   4.3430510e+00   4.6603717e+00   3.4302376e+00   3.5053533e+00   4.2335883e+00   4.4640423e+00   4.8059592e+00   5.1881280e+00   4.2497073e+00   3.6834126e+00   4.1572779e+00   4.9128775e+00   4.2687104e+00   4.0908836e+00   3.4061169e+00   4.0989195e+00   4.3064904e+00   3.8759115e+00   3.7114178e+00   4.5707958e+00   4.4147019e+00   3.9326468e+00   3.6624594e+00   3.8494244e+00   4.0586633e+00   3.6843892e+00   4.0176783e-01   9.3801395e-01   3.7427929e-01   6.0551856e-01   4.9766035e-01   4.1449626e-01   2.5251796e-01   3.2352160e-01   7.0437330e-01   6.2024833e-01   2.4837156e-01   7.0826681e-01   8.1099042e-01   5.3665999e-01   5.7257017e-01   4.0293660e-01   5.3665999e-01   1.0321505e+00   3.2352160e-01   4.9857388e-01   1.2654843e+00   1.0181000e+00   4.9857388e-01   4.6472023e-01   6.6432544e-01   4.4535192e-01   8.1354181e-01   3.2586371e-01   4.4535192e-01   3.1652953e+00   2.9034751e+00   3.3383293e+00   2.4277398e+00   3.0113552e+00   2.8500355e+00   3.0981427e+00   1.7594421e+00   2.9944056e+00   2.3105335e+00   2.0559262e+00   2.5987706e+00   2.4171653e+00   3.0605340e+00   2.0054351e+00   2.8372675e+00   2.8748086e+00   2.4385827e+00   2.9411544e+00   2.2761106e+00   3.2149087e+00   2.3896630e+00   3.2878368e+00   3.0415738e+00   2.6906230e+00   2.8216976e+00   3.2222602e+00   3.4304873e+00   2.8822802e+00   1.8816502e+00   2.1994544e+00   2.0961718e+00   2.2729984e+00   3.4713427e+00   2.8746311e+00   2.8977380e+00   3.1239380e+00   2.8149627e+00   2.4626518e+00   2.3988202e+00   2.7512943e+00   2.9634715e+00   2.3744738e+00   1.7861398e+00   2.5679553e+00   2.5438761e+00   2.5602722e+00   2.6724144e+00   1.5132025e+00   2.4693940e+00   4.4818617e+00   3.5188715e+00   4.3693901e+00   3.9814082e+00   4.2430316e+00   5.0846750e+00   2.9378173e+00   4.7308926e+00   4.2026915e+00   4.6369546e+00   3.5592955e+00   3.7219741e+00   3.9702025e+00   3.4565374e+00   3.6485303e+00   3.8059948e+00   3.8952692e+00   5.2045888e+00   5.4038637e+00   3.3942456e+00   4.2018404e+00   3.3550631e+00   5.1838582e+00   3.3280757e+00   4.1439346e+00   4.4349731e+00   3.2284912e+00   3.3133518e+00   4.0354963e+00   4.2293106e+00   4.5707958e+00   4.9486891e+00   4.0555804e+00   3.4667821e+00   3.9402495e+00   4.6817169e+00   4.0952984e+00   3.8891597e+00   3.2181054e+00   3.8906833e+00   4.1179678e+00   3.6792093e+00   3.5188715e+00   4.3738746e+00   4.2318888e+00   3.7415007e+00   3.4482727e+00   3.6509580e+00   3.8867812e+00   3.4956677e+00   6.2660376e-01   4.1449626e-01   4.8927739e-01   7.0479928e-01   3.0546431e-01   2.5251796e-01   3.2816937e-01   5.7324170e-01   6.2538346e-01   3.7255734e-01   4.4535192e-01   5.7324170e-01   6.2482915e-01   5.3665999e-01   4.3691963e-01   6.2482915e-01   8.7420176e-01   3.2352160e-01   2.5651975e-01   1.4293465e+00   7.7360126e-01   2.5651975e-01   4.0147421e-01   7.1504098e-01   2.1269358e-01   6.2660376e-01   2.4837156e-01   4.1586001e-01   3.4011512e+00   3.1015495e+00   3.5629124e+00   2.6446848e+00   3.2242409e+00   3.0444970e+00   3.2854349e+00   1.9922212e+00   3.2208624e+00   2.4875432e+00   2.3235341e+00   2.7738624e+00   2.6761538e+00   3.2590031e+00   2.1770137e+00   3.0609726e+00   3.0488669e+00   2.6564689e+00   3.1671679e+00   2.4967542e+00   3.3778209e+00   2.5968629e+00   3.5012162e+00   3.2523394e+00   2.9093820e+00   3.0417674e+00   3.4541339e+00   3.6357801e+00   3.0695657e+00   2.1117686e+00   2.4265922e+00   2.3324013e+00   2.4794505e+00   3.6641654e+00   3.0465086e+00   3.0686943e+00   3.3395333e+00   3.0541897e+00   2.6428278e+00   2.6018930e+00   2.9558536e+00   3.1589081e+00   2.5867906e+00   2.0334278e+00   2.7624503e+00   2.7347909e+00   2.7481947e+00   2.8804789e+00   1.7294430e+00   2.6604040e+00   4.6390099e+00   3.6904099e+00   4.5721680e+00   4.1717456e+00   4.4201622e+00   5.3038306e+00   3.1101376e+00   4.9521292e+00   4.4131751e+00   4.8218478e+00   3.7351231e+00   3.9119157e+00   4.1593600e+00   3.6239123e+00   3.7806200e+00   3.9616940e+00   4.0893976e+00   5.4208566e+00   5.6225133e+00   3.6099406e+00   4.3833809e+00   3.5087649e+00   5.4106224e+00   3.5167400e+00   4.3287713e+00   4.6517741e+00   3.4091049e+00   3.4875352e+00   4.2154392e+00   4.4559638e+00   4.7948032e+00   5.1800729e+00   4.2298456e+00   3.6705550e+00   4.1464752e+00   4.8981257e+00   4.2482702e+00   4.0788778e+00   3.3871264e+00   4.0817153e+00   4.2852718e+00   3.8517038e+00   3.6904099e+00   4.5544260e+00   4.3933832e+00   3.9084933e+00   3.6377874e+00   3.8310704e+00   4.0381483e+00   3.6684338e+00   7.9016429e-01   9.0454394e-01   7.7553525e-01   6.5633874e-01   6.8961791e-01   6.5172743e-01   6.4755655e-01   6.9325418e-01   8.5690100e-01   7.5871717e-01   9.8800009e-01   6.3977563e-01   5.0503591e-01   9.0852141e-01   6.3977563e-01   6.2482915e-01   6.2605182e-01   4.4651726e-01   1.3039319e+00   4.5470518e-01   6.8801986e-01   9.4492923e-01   6.5223271e-01   6.9325418e-01   4.9674312e-01   7.6752131e-01   5.2574978e-01   3.9950977e+00   3.6682165e+00   4.1454421e+00   3.1171350e+00   3.7869887e+00   3.5623665e+00   3.8418637e+00   2.4093459e+00   3.7913585e+00   2.9787373e+00   2.6930133e+00   3.3123070e+00   3.1668302e+00   3.7985007e+00   2.6980573e+00   3.6472316e+00   3.5682291e+00   3.1756360e+00   3.6828708e+00   2.9891564e+00   3.9102500e+00   3.1455588e+00   4.0375495e+00   3.7881557e+00   3.4756264e+00   3.6204175e+00   4.0288578e+00   4.2043522e+00   3.6069560e+00   2.6127852e+00   2.9004348e+00   2.8010550e+00   3.0013391e+00   4.1901031e+00   3.5584876e+00   3.6126340e+00   3.9170234e+00   3.5823448e+00   3.1646752e+00   3.0923554e+00   3.4563987e+00   3.7021702e+00   3.1018442e+00   2.4341346e+00   3.2724336e+00   3.2604423e+00   3.2715632e+00   3.4335342e+00   2.1375243e+00   3.1807243e+00   5.1732049e+00   4.2085267e+00   5.1402548e+00   4.7090394e+00   4.9640507e+00   5.8783901e+00   3.5970425e+00   5.5195747e+00   4.9586651e+00   5.3905797e+00   4.2919639e+00   4.4545908e+00   4.7214902e+00   4.1323720e+00   4.2962344e+00   4.5078390e+00   4.6379987e+00   5.9985593e+00   6.1927610e+00   4.1173292e+00   4.9467209e+00   4.0217355e+00   5.9855018e+00   4.0597680e+00   4.8845913e+00   5.2228901e+00   3.9505682e+00   4.0262006e+00   4.7557226e+00   5.0295898e+00   5.3695643e+00   5.7704875e+00   4.7697480e+00   4.2124808e+00   4.6690776e+00   5.4857949e+00   4.7867344e+00   4.6237863e+00   3.9220577e+00   4.6512574e+00   4.8397561e+00   4.4244727e+00   4.2085267e+00   5.1102370e+00   4.9464234e+00   4.4688286e+00   4.1733964e+00   4.3844239e+00   4.5752087e+00   4.1957914e+00   3.8934542e-01   3.7598397e-01   1.5422108e-01   3.4583729e-01   3.7598397e-01   4.4651726e-01   3.8934542e-01   3.2816937e-01   8.2929029e-01   9.3610001e-01   4.3691963e-01   5.3022554e-01   5.3309112e-01   4.3691963e-01   7.5976039e-01   3.2586371e-01   4.2667565e-01   1.0733200e+00   7.4777660e-01   2.1845981e-01   5.0905001e-01   4.3456114e-01   5.2942799e-01   5.5492130e-01   4.6472023e-01   3.7427929e-01   3.2191540e+00   2.9033203e+00   3.3725057e+00   2.3744738e+00   3.0162346e+00   2.8254861e+00   3.0850817e+00   1.6867642e+00   3.0206125e+00   2.2489449e+00   1.9859316e+00   2.5612285e+00   2.4037412e+00   3.0483897e+00   1.9482601e+00   2.8711841e+00   2.8361445e+00   2.4280197e+00   2.9151666e+00   2.2428341e+00   3.1709561e+00   2.3770599e+00   3.2772927e+00   3.0392407e+00   2.7044574e+00   2.8456337e+00   3.2543813e+00   3.4363773e+00   2.8560815e+00   1.8517858e+00   2.1570512e+00   2.0577182e+00   2.2449618e+00   3.4472201e+00   2.8333788e+00   2.8654874e+00   3.1455372e+00   2.8102985e+00   2.4278629e+00   2.3504126e+00   2.7240842e+00   2.9511072e+00   2.3468577e+00   1.7140774e+00   2.5325623e+00   2.5220817e+00   2.5303132e+00   2.6704164e+00   1.4096199e+00   2.4355523e+00   4.4339908e+00   3.4713427e+00   4.3742064e+00   3.9639546e+00   4.2144772e+00   5.1104621e+00   2.8721481e+00   4.7555981e+00   4.1997133e+00   4.6264002e+00   3.5337158e+00   3.6986235e+00   3.9580816e+00   3.3951420e+00   3.5651172e+00   3.7578933e+00   3.8852294e+00   5.2312618e+00   5.4232729e+00   3.3678461e+00   4.1846468e+00   3.2909043e+00   5.2164277e+00   3.3005331e+00   4.1286019e+00   4.4583488e+00   3.1948184e+00   3.2785487e+00   4.0052019e+00   4.2627019e+00   4.5989546e+00   4.9978258e+00   4.0193656e+00   3.4602150e+00   3.9315374e+00   4.7096962e+00   4.0441539e+00   3.8751788e+00   3.1769821e+00   3.8841455e+00   4.0829381e+00   3.6559398e+00   3.4713427e+00   4.3535851e+00   4.1922661e+00   3.7063225e+00   3.4135466e+00   3.6262912e+00   3.8337160e+00   3.4586921e+00   4.5470518e-01   3.4378533e-01   4.9766035e-01   5.6631629e-01   3.2586371e-01   3.7255734e-01   6.5172743e-01   7.6625946e-01   9.7356960e-01   4.4651726e-01   7.0784540e-01   8.1273630e-01   4.4651726e-01   6.8757066e-01   4.4417983e-01   6.0670504e-01   1.1521791e+00   6.5172743e-01   4.5581864e-01   4.5470518e-01   5.6700421e-01   4.8036801e-01   5.1607523e-01   5.8851328e-01   5.0905001e-01   3.1972361e+00   2.8360340e+00   3.3243092e+00   2.2696891e+00   2.9531300e+00   2.6835197e+00   2.9981183e+00   1.5916843e+00   2.9546153e+00   2.1366783e+00   1.9099663e+00   2.4717637e+00   2.3219731e+00   2.9300203e+00   1.8705419e+00   2.8448793e+00   2.7044574e+00   2.2951567e+00   2.8430073e+00   2.1220080e+00   3.0654291e+00   2.3117864e+00   3.1749624e+00   2.9091307e+00   2.6433897e+00   2.8065044e+00   3.2000771e+00   3.3714688e+00   2.7511201e+00   1.7679293e+00   2.0426611e+00   1.9423536e+00   2.1457242e+00   3.3172489e+00   2.6940968e+00   2.7682296e+00   3.0935247e+00   2.7391698e+00   2.2996943e+00   2.2360451e+00   2.5729378e+00   2.8382774e+00   2.2413445e+00   1.6312555e+00   2.4031247e+00   2.3848740e+00   2.4037412e+00   2.5843380e+00   1.3803845e+00   2.3178393e+00   4.3373464e+00   3.3555449e+00   4.3029534e+00   3.8394246e+00   4.1166152e+00   5.0345534e+00   2.7564428e+00   4.6628709e+00   4.0929284e+00   4.5724955e+00   3.4657942e+00   3.6038441e+00   3.8912788e+00   3.2935105e+00   3.4985926e+00   3.6938082e+00   3.7771525e+00   5.1600819e+00   5.3477989e+00   3.2453592e+00   4.1245629e+00   3.1885527e+00   5.1389533e+00   3.2183480e+00   4.0411872e+00   4.3734530e+00   3.1116430e+00   3.1793624e+00   3.9065553e+00   4.1815080e+00   4.5288943e+00   4.9506208e+00   3.9281139e+00   3.3421539e+00   3.7789119e+00   4.6812838e+00   3.9623295e+00   3.7603559e+00   3.0782101e+00   3.8325036e+00   4.0241284e+00   3.6474081e+00   3.3555449e+00   4.2739171e+00   4.1339606e+00   3.6720159e+00   3.3349237e+00   3.5512382e+00   3.7511837e+00   3.3364095e+00   4.1312257e-01   5.0905001e-01   4.2538717e-01   3.2352160e-01   2.0656129e-01   5.0592043e-01   1.1017858e+00   1.2234738e+00   1.5422108e-01   4.1312257e-01   6.3924842e-01   1.5422108e-01   6.1968386e-01   4.0293660e-01   5.2942799e-01   7.7919451e-01   6.2482915e-01   5.6631629e-01   8.1385214e-01   2.5251796e-01   8.0032200e-01   4.2538717e-01   7.1462831e-01   3.2352160e-01   3.3601225e+00   3.0492285e+00   3.5130686e+00   2.4784234e+00   3.1574358e+00   2.9495683e+00   3.2305582e+00   1.7639552e+00   3.1543365e+00   2.3872777e+00   2.0066796e+00   2.7104259e+00   2.4866453e+00   3.1794134e+00   2.0999661e+00   3.0164205e+00   2.9733298e+00   2.5409084e+00   3.0313923e+00   2.3496997e+00   3.3211033e+00   2.5173832e+00   3.4035007e+00   3.1599783e+00   2.8424094e+00   2.9896107e+00   3.3894792e+00   3.5821206e+00   2.9960387e+00   1.9633030e+00   2.2546134e+00   2.1472921e+00   2.3729779e+00   3.5766918e+00   2.9692947e+00   3.0147040e+00   3.2887803e+00   2.9233984e+00   2.5628362e+00   2.4694536e+00   2.8371552e+00   3.0850817e+00   2.4681135e+00   1.7749726e+00   2.6585961e+00   2.6493446e+00   2.6623632e+00   2.8060305e+00   1.5124582e+00   2.5679553e+00   4.5912390e+00   3.6144502e+00   4.5212874e+00   4.0982070e+00   4.3637125e+00   5.2494547e+00   3.0086587e+00   4.8875515e+00   4.3294620e+00   4.7880147e+00   3.6912876e+00   3.8418637e+00   4.1117747e+00   3.5413188e+00   3.7389462e+00   3.9250135e+00   4.0233529e+00   5.3754694e+00   5.5627643e+00   3.4785161e+00   4.3436980e+00   3.4455964e+00   5.3512930e+00   3.4471566e+00   4.2776053e+00   4.5941143e+00   3.3449470e+00   3.4269051e+00   4.1524717e+00   4.3946634e+00   4.7366258e+00   5.1414650e+00   4.1713411e+00   3.5895201e+00   4.0467919e+00   4.8635964e+00   4.2075047e+00   4.0127353e+00   3.3273395e+00   4.0411872e+00   4.2480617e+00   3.8321687e+00   3.6144502e+00   4.5072985e+00   4.3598062e+00   3.8780834e+00   3.5586316e+00   3.7805671e+00   3.9971028e+00   3.6003219e+00   2.5651975e-01   2.8192292e-01   3.4378533e-01   3.4378533e-01   4.0147421e-01   7.1840099e-01   8.5690100e-01   3.7598397e-01   4.2538717e-01   5.3665999e-01   3.7598397e-01   6.6827038e-01   2.1269358e-01   3.0546431e-01   1.1320702e+00   6.2988288e-01   2.0656129e-01   4.4535192e-01   4.2667565e-01   4.1449626e-01   4.3691963e-01   3.8934542e-01   2.5251796e-01   3.3428183e+00   3.0232018e+00   3.4944845e+00   2.4950353e+00   3.1381402e+00   2.9363801e+00   3.2027420e+00   1.8084630e+00   3.1409294e+00   2.3642733e+00   2.1113036e+00   2.6784604e+00   2.5283251e+00   3.1625374e+00   2.0667297e+00   2.9950041e+00   2.9473422e+00   2.5410503e+00   3.0407257e+00   2.3596992e+00   3.2858223e+00   2.4986337e+00   3.3960124e+00   3.1519721e+00   2.8255193e+00   2.9686973e+00   3.3765772e+00   3.5575681e+00   2.9720515e+00   1.9732878e+00   2.2758449e+00   2.1765379e+00   2.3630256e+00   3.5606213e+00   2.9432282e+00   2.9813163e+00   3.2672636e+00   2.9349850e+00   2.5393641e+00   2.4678343e+00   2.8348218e+00   3.0655803e+00   2.4649412e+00   1.8381372e+00   2.6459992e+00   2.6324803e+00   2.6428278e+00   2.7886501e+00   1.5384446e+00   2.5500177e+00   4.5509522e+00   3.5863729e+00   4.4953238e+00   4.0776551e+00   4.3319640e+00   5.2308062e+00   2.9880146e+00   4.8736353e+00   4.3174903e+00   4.7496159e+00   3.6545040e+00   3.8173510e+00   4.0797075e+00   3.5129177e+00   3.6850739e+00   3.8789949e+00   4.0011311e+00   5.3516635e+00   5.5447338e+00   3.4854203e+00   4.3070102e+00   3.4066692e+00   5.3366535e+00   3.4209331e+00   4.2472818e+00   4.5769661e+00   3.3144678e+00   3.3951450e+00   4.1229838e+00   4.3815083e+00   4.7200939e+00   5.1200990e+00   4.1381062e+00   3.5750015e+00   4.0416512e+00   4.8355880e+00   4.1628805e+00   3.9898962e+00   3.2934163e+00   4.0073765e+00   4.2053647e+00   3.7839552e+00   3.5863729e+00   4.4735121e+00   4.3145846e+00   3.8316735e+00   3.5355330e+00   3.7466071e+00   3.9521135e+00   3.5718733e+00   1.2418578e-01   5.2942799e-01   4.9766035e-01   2.5251796e-01   6.0060595e-01   7.1462831e-01   4.4535192e-01   3.8776762e-01   3.2352160e-01   4.4535192e-01   8.5434758e-01   1.2418578e-01   2.5251796e-01   1.2643026e+00   8.1354181e-01   4.1449626e-01   4.5581864e-01   5.6769031e-01   3.0546431e-01   6.2024833e-01   2.0656129e-01   2.5251796e-01   3.3898078e+00   3.1105347e+00   3.5575681e+00   2.6249526e+00   3.2229246e+00   3.0488669e+00   3.3004172e+00   1.9465831e+00   3.2118493e+00   2.4993166e+00   2.2473120e+00   2.7928582e+00   2.6296047e+00   3.2639046e+00   2.1933937e+00   3.0559188e+00   3.0673749e+00   2.6440180e+00   3.1486786e+00   2.4776707e+00   3.4056271e+00   2.5954147e+00   3.4958702e+00   3.2483608e+00   2.9041534e+00   3.0378828e+00   3.4425295e+00   3.6411503e+00   3.0811775e+00   2.0851901e+00   2.3997585e+00   2.2980893e+00   2.4741664e+00   3.6714798e+00   3.0661139e+00   3.0923102e+00   3.3390355e+00   3.0288896e+00   2.6566259e+00   2.5949561e+00   2.9511072e+00   3.1662394e+00   2.5768305e+00   1.9764051e+00   2.7650187e+00   2.7420671e+00   2.7570191e+00   2.8802219e+00   1.6913411e+00   2.6662783e+00   4.6723657e+00   3.7109297e+00   4.5802461e+00   4.1829734e+00   4.4414351e+00   5.3024507e+00   3.1246867e+00   4.9479218e+00   4.4123750e+00   4.8421359e+00   3.7582429e+00   3.9238367e+00   4.1750713e+00   3.6455011e+00   3.8262353e+00   3.9966275e+00   4.0997246e+00   5.4219259e+00   5.6210533e+00   3.5985833e+00   4.4045665e+00   3.5402439e+00   5.4041845e+00   3.5288526e+00   4.3467079e+00   4.6509698e+00   3.4261305e+00   3.5087649e+00   4.2340479e+00   4.4487072e+00   4.7898591e+00   5.1726632e+00   4.2521321e+00   3.6728562e+00   4.1446028e+00   4.9002452e+00   4.2845042e+00   4.0915924e+00   3.4110551e+00   4.0971854e+00   4.3142588e+00   3.8789274e+00   3.7109297e+00   4.5753785e+00   4.4261431e+00   3.9377466e+00   3.6482464e+00   3.8509694e+00   4.0749843e+00   3.6894983e+00   5.1607523e-01   4.5470518e-01   2.5251796e-01   7.0086313e-01   8.1067767e-01   3.7598397e-01   2.8192292e-01   3.0546431e-01   3.7598397e-01   8.2654509e-01   1.2418578e-01   2.1845981e-01   1.1754055e+00   8.0326782e-01   4.2667565e-01   5.7324170e-01   4.9766035e-01   4.1449626e-01   6.0551856e-01   3.0546431e-01   2.0656129e-01   3.4780839e+00   3.2028668e+00   3.6478832e+00   2.7001131e+00   3.3123070e+00   3.1424188e+00   3.3940268e+00   2.0081113e+00   3.3027430e+00   2.5846998e+00   2.2899742e+00   2.8843735e+00   2.7014135e+00   3.3579678e+00   2.2804674e+00   3.1447330e+00   3.1614572e+00   2.7347909e+00   3.2266676e+00   2.5600441e+00   3.4989115e+00   2.6835197e+00   3.5848035e+00   3.3425343e+00   2.9944056e+00   3.1272113e+00   3.5315985e+00   3.7321938e+00   3.1736230e+00   2.1642821e+00   2.4763086e+00   2.3729779e+00   2.5613679e+00   3.7645411e+00   3.1602773e+00   3.1861493e+00   3.4298218e+00   3.1090174e+00   2.7503801e+00   2.6773131e+00   3.0414782e+00   3.2606191e+00   2.6626548e+00   2.0308266e+00   2.8549070e+00   2.8371552e+00   2.8500790e+00   2.9720515e+00   1.7439430e+00   2.7570191e+00   4.7646254e+00   3.8019108e+00   4.6714768e+00   4.2777154e+00   4.5341669e+00   5.3940437e+00   3.2096520e+00   5.0408889e+00   4.5037829e+00   4.9318274e+00   3.8493108e+00   4.0147814e+00   4.2656755e+00   3.7323432e+00   3.9122146e+00   4.0862794e+00   4.1939019e+00   5.5136120e+00   5.7113100e+00   3.6836404e+00   4.4947801e+00   3.6298047e+00   5.4953655e+00   3.6181784e+00   4.4396174e+00   4.7440323e+00   3.5161021e+00   3.6013159e+00   4.3259034e+00   4.5411167e+00   4.8804165e+00   5.2620015e+00   4.3431589e+00   3.7666196e+00   4.2394195e+00   4.9871255e+00   4.3755778e+00   4.1864815e+00   3.5032323e+00   4.1868319e+00   4.4036244e+00   3.9638383e+00   3.8019108e+00   4.6672392e+00   4.5155386e+00   4.0245873e+00   3.7349501e+00   3.9420153e+00   4.1660972e+00   3.7835217e+00   1.2418578e-01   7.0826681e-01   9.4125538e-01   1.1340084e+00   2.1845981e-01   4.4417983e-01   8.2105460e-01   2.1845981e-01   3.8776762e-01   4.1449626e-01   4.2418962e-01   9.1075311e-01   3.7255734e-01   4.8036801e-01   6.6827038e-01   2.5651975e-01   6.4704320e-01   2.0656129e-01   6.8961791e-01   3.2586371e-01   3.4686627e+00   3.1154621e+00   3.6024772e+00   2.5108475e+00   3.2290313e+00   2.9704704e+00   3.2809332e+00   1.7903952e+00   3.2349033e+00   2.3960250e+00   2.0600195e+00   2.7476411e+00   2.5610667e+00   3.2181149e+00   2.1323638e+00   3.1154281e+00   2.9881598e+00   2.5786309e+00   3.0947136e+00   2.3839455e+00   3.3448197e+00   2.5821869e+00   3.4532666e+00   3.1998457e+00   2.9200439e+00   3.0794219e+00   3.4771054e+00   3.6510733e+00   3.0328831e+00   2.0197525e+00   2.2893157e+00   2.1858172e+00   2.4166535e+00   3.6030381e+00   2.9770247e+00   3.0492285e+00   3.3713329e+00   2.9974031e+00   2.5833316e+00   2.4944673e+00   2.8535197e+00   3.1260028e+00   2.5104998e+00   1.8109223e+00   2.6804452e+00   2.6742360e+00   2.6875169e+00   2.8656044e+00   1.5447938e+00   2.5961850e+00   4.6147552e+00   3.6320149e+00   4.5850999e+00   4.1288948e+00   4.3989089e+00   5.3208661e+00   3.0146208e+00   4.9525575e+00   4.3778283e+00   4.8485483e+00   3.7416375e+00   3.8836706e+00   4.1692394e+00   3.5584719e+00   3.7545764e+00   3.9635178e+00   4.0653249e+00   5.4465238e+00   5.6318041e+00   3.5148434e+00   4.4004449e+00   3.4571917e+00   5.4256316e+00   3.4931266e+00   4.3243671e+00   4.6617210e+00   3.3863989e+00   3.4595139e+00   4.1872611e+00   4.4691596e+00   4.8126955e+00   5.2323771e+00   4.2057897e+00   3.6309135e+00   4.0714444e+00   4.9544547e+00   4.2355968e+00   4.0495222e+00   3.3563815e+00   4.1075398e+00   4.2957899e+00   3.9065020e+00   3.6320149e+00   4.5543025e+00   4.4046818e+00   3.9362563e+00   3.6038441e+00   3.8285790e+00   4.0238937e+00   3.6204282e+00   6.2538346e-01   1.0167353e+00   1.1763980e+00   1.4096146e-01   4.1449626e-01   7.4740267e-01   1.4096146e-01   4.4535192e-01   3.7427929e-01   4.5581864e-01   8.2135873e-01   4.4535192e-01   5.0503591e-01   7.3145860e-01   2.1269358e-01   7.1421512e-01   2.5251796e-01   6.8961791e-01   2.8192292e-01   3.4295980e+00   3.0905489e+00   3.5700123e+00   2.4944673e+00   3.2020292e+00   2.9613737e+00   3.2617087e+00   1.7750284e+00   3.2049793e+00   2.3909366e+00   2.0308266e+00   2.7326472e+00   2.5286673e+00   3.2028668e+00   2.1181082e+00   3.0792693e+00   2.9816970e+00   2.5624932e+00   3.0681391e+00   2.3677174e+00   3.3352475e+00   2.5566444e+00   3.4334259e+00   3.1839972e+00   2.8907702e+00   3.0462904e+00   3.4448498e+00   3.6256155e+00   3.0181476e+00   1.9946242e+00   2.2719663e+00   2.1665831e+00   2.3980102e+00   3.5922217e+00   2.9733478e+00   3.0355056e+00   3.3410264e+00   2.9673667e+00   2.5744164e+00   2.4820750e+00   2.8455141e+00   3.1100045e+00   2.4920874e+00   1.7903952e+00   2.6704164e+00   2.6637326e+00   2.6767607e+00   2.8425716e+00   1.5257596e+00   2.5839288e+00   4.6057175e+00   3.6244539e+00   4.5619285e+00   4.1170543e+00   4.3856360e+00   5.2953657e+00   3.0110441e+00   4.9290739e+00   4.3593510e+00   4.8266994e+00   3.7227460e+00   3.8675033e+00   4.1480696e+00   3.5505922e+00   3.7479505e+00   3.9489183e+00   4.0495222e+00   5.4213759e+00   5.6069704e+00   3.4988409e+00   4.3796539e+00   3.4519560e+00   5.3990720e+00   3.4751739e+00   4.3070102e+00   4.6372963e+00   3.3701472e+00   3.4467337e+00   4.1738909e+00   4.4422690e+00   4.7852959e+00   5.2004339e+00   4.1925494e+00   3.6148707e+00   4.0613681e+00   4.9222119e+00   4.2248103e+00   4.0355816e+00   3.3448336e+00   4.0832977e+00   4.2781022e+00   3.8793995e+00   3.6244539e+00   4.5369609e+00   4.3880177e+00   3.9147164e+00   3.5857963e+00   3.8105301e+00   4.0134966e+00   3.6122845e+00   7.1799256e-01   8.0358695e-01   5.5419992e-01   4.6472023e-01   2.5651975e-01   5.5419992e-01   1.0198386e+00   3.2352160e-01   4.1586001e-01   1.2565757e+00   1.0054037e+00   4.1586001e-01   5.2574978e-01   6.4806901e-01   4.5581864e-01   8.0619006e-01   3.2586371e-01   4.1586001e-01   3.3274681e+00   3.0643176e+00   3.5031390e+00   2.5831315e+00   3.1734856e+00   3.0256804e+00   3.2593968e+00   1.9049236e+00   3.1662394e+00   2.4587503e+00   2.1948123e+00   2.7522526e+00   2.5874301e+00   3.2341367e+00   2.1493214e+00   2.9966141e+00   3.0389019e+00   2.6214821e+00   3.0978571e+00   2.4463233e+00   3.3673968e+00   2.5500177e+00   3.4578354e+00   3.2240818e+00   2.8578051e+00   2.9828212e+00   3.3905763e+00   3.5904118e+00   3.0455175e+00   2.0467937e+00   2.3640301e+00   2.2638728e+00   2.4385827e+00   3.6424937e+00   3.0387448e+00   3.0543001e+00   3.2867025e+00   2.9810914e+00   2.6291524e+00   2.5579619e+00   2.9291061e+00   3.1351569e+00   2.5421955e+00   1.9274228e+00   2.7359835e+00   2.7196686e+00   2.7293044e+00   2.8418790e+00   1.6234861e+00   2.6349941e+00   4.6268918e+00   3.6735954e+00   4.5276539e+00   4.1519514e+00   4.3981255e+00   5.2510005e+00   3.0847983e+00   4.9049143e+00   4.3738893e+00   4.7804645e+00   3.7059360e+00   3.8806135e+00   4.1210200e+00   3.6007179e+00   3.7674167e+00   3.9401211e+00   4.0632074e+00   5.3683319e+00   5.5675337e+00   3.5650560e+00   4.3467079e+00   3.4964385e+00   5.3534247e+00   3.4817971e+00   4.3007421e+00   4.6057628e+00   3.3796707e+00   3.4684743e+00   4.1910954e+00   4.4033511e+00   4.7374076e+00   5.1107389e+00   4.2056854e+00   3.6417452e+00   4.1251907e+00   4.8290847e+00   4.2339606e+00   4.0576409e+00   3.3702841e+00   4.0376849e+00   4.2550416e+00   3.8015574e+00   3.6735954e+00   4.5249462e+00   4.3662620e+00   3.8699102e+00   3.5979726e+00   3.8007870e+00   4.0252278e+00   3.6568095e+00   3.0811765e-01   1.0065841e+00   9.1075311e-01   6.2538346e-01   1.0065841e+00   1.2125198e+00   7.0086313e-01   6.1623531e-01   1.8279039e+00   1.0613462e+00   6.9369532e-01   4.8135521e-01   1.1149070e+00   3.0811765e-01   9.7098574e-01   4.0293660e-01   8.0358695e-01   3.4154940e+00   3.1439160e+00   3.5872997e+00   2.8054691e+00   3.2839149e+00   3.1127876e+00   3.3274872e+00   2.2159139e+00   3.2598167e+00   2.6167778e+00   2.5758644e+00   2.8523754e+00   2.8256291e+00   3.3122081e+00   2.3003824e+00   3.0947136e+00   3.1160876e+00   2.7384939e+00   3.2940867e+00   2.6281170e+00   3.4401593e+00   2.6851662e+00   3.5758474e+00   3.3024121e+00   2.9636544e+00   3.0850893e+00   3.4935692e+00   3.6787177e+00   3.1382691e+00   2.2643860e+00   2.5838312e+00   2.4957147e+00   2.5876691e+00   3.7272092e+00   3.1148696e+00   3.1192689e+00   3.3731473e+00   3.1641238e+00   2.7164367e+00   2.7371177e+00   3.0433470e+00   3.2094276e+00   2.6989752e+00   2.2729593e+00   2.8576425e+00   2.7954161e+00   2.8234677e+00   2.9407805e+00   1.9980146e+00   2.7511201e+00   4.6993349e+00   3.7716613e+00   4.6090409e+00   4.2170163e+00   4.4740971e+00   5.3233695e+00   3.2312218e+00   4.9715358e+00   4.4649192e+00   4.8634712e+00   3.7907269e+00   3.9777091e+00   4.2102908e+00   3.7293867e+00   3.8887827e+00   4.0321207e+00   4.1304736e+00   5.4336694e+00   5.6535520e+00   3.7088191e+00   4.4333012e+00   3.6017014e+00   5.4287623e+00   3.5939935e+00   4.3696884e+00   4.6684884e+00   3.4865359e+00   3.5518450e+00   4.2777882e+00   4.4716883e+00   4.8204359e+00   5.1830573e+00   4.2975164e+00   3.7189281e+00   4.1915116e+00   4.9282164e+00   4.3126073e+00   4.1182993e+00   3.4568393e+00   4.1297239e+00   4.3496867e+00   3.9207320e+00   3.7716613e+00   4.6018276e+00   4.4564872e+00   3.9827168e+00   3.7295388e+00   3.8905364e+00   4.1037903e+00   3.7281480e+00   1.1474460e+00   1.0344911e+00   7.0043186e-01   1.1474460e+00   1.4311891e+00   8.2654509e-01   7.6787403e-01   1.9730918e+00   1.3073038e+00   7.9878917e-01   6.2407309e-01   1.2632199e+00   5.0503591e-01   1.1833480e+00   5.0905001e-01   9.4080461e-01   3.4392518e+00   3.2008338e+00   3.6261197e+00   2.9076510e+00   3.3439089e+00   3.2073032e+00   3.3907558e+00   2.3329978e+00   3.3168472e+00   2.7081478e+00   2.6929215e+00   2.9283888e+00   2.9264154e+00   3.3942456e+00   2.3848677e+00   3.1312883e+00   3.2027420e+00   2.8385969e+00   3.3781905e+00   2.7324845e+00   3.5133135e+00   2.7602023e+00   3.6563535e+00   3.3905763e+00   3.0256209e+00   3.1312883e+00   3.5423793e+00   3.7299679e+00   3.2179031e+00   2.3661539e+00   2.6906231e+00   2.6054995e+00   2.6801752e+00   3.8138933e+00   3.2027420e+00   3.1888037e+00   3.4187265e+00   3.2459231e+00   2.8060305e+00   2.8359967e+00   3.1453916e+00   3.2886661e+00   2.7943622e+00   2.3874574e+00   2.9533314e+00   2.8877105e+00   2.9141136e+00   3.0147886e+00   2.0992326e+00   2.8425716e+00   4.7630756e+00   3.8535531e+00   4.6550014e+00   4.2947468e+00   4.5389196e+00   5.3632740e+00   3.3234239e+00   5.0232338e+00   4.5363757e+00   4.8985515e+00   3.8441304e+00   4.0476997e+00   4.2599433e+00   3.8106152e+00   3.9516603e+00   4.0850903e+00   4.1995425e+00   5.4671477e+00   5.6957748e+00   3.8037734e+00   4.4771379e+00   3.6783395e+00   5.4690202e+00   3.6632177e+00   4.4259826e+00   4.7160634e+00   3.5560806e+00   3.6239123e+00   4.3464973e+00   4.5187406e+00   4.8619146e+00   5.2001625e+00   4.3635746e+00   3.7979761e+00   4.2843668e+00   4.9451734e+00   4.3709882e+00   4.1899199e+00   3.5299194e+00   4.1707976e+00   4.3972344e+00   3.9457376e+00   3.8535531e+00   4.6546556e+00   4.5024055e+00   4.0227020e+00   3.8004969e+00   3.9484773e+00   4.1635108e+00   3.8079860e+00   3.0811765e-01   6.5223271e-01   0.0000000e+00   5.0991930e-01   3.2586371e-01   4.2667565e-01   8.3172002e-01   5.0991930e-01   5.6769031e-01   7.5082357e-01   2.1845981e-01   7.0479928e-01   3.0811765e-01   6.4755655e-01   2.1845981e-01   3.4865562e+00   3.1726595e+00   3.6377960e+00   2.5987470e+00   3.2814045e+00   3.0627375e+00   3.3515846e+00   1.8841865e+00   3.2769379e+00   2.5038079e+00   2.1311468e+00   2.8311678e+00   2.6104387e+00   3.2962520e+00   2.2214438e+00   3.1433122e+00   3.0878634e+00   2.6552472e+00   3.1570103e+00   2.4668912e+00   3.4394878e+00   2.6411293e+00   3.5233648e+00   3.2747247e+00   2.9659871e+00   3.1154783e+00   3.5134741e+00   3.7059620e+00   3.1148696e+00   2.0851901e+00   2.3731428e+00   2.2655571e+00   2.4927109e+00   3.6920087e+00   3.0823446e+00   3.1337459e+00   3.4135200e+00   3.0481703e+00   2.6780487e+00   2.5874301e+00   2.9489507e+00   3.2027420e+00   2.5873149e+00   1.8973383e+00   2.7738355e+00   2.7632614e+00   2.7778954e+00   2.9269923e+00   1.6390769e+00   2.6848587e+00   4.7106706e+00   3.7313856e+00   4.6446321e+00   4.2142736e+00   4.4836580e+00   5.3716885e+00   3.1250284e+00   5.0074019e+00   4.4485220e+00   4.9128219e+00   3.8150636e+00   3.9624529e+00   4.2358121e+00   3.6602286e+00   3.8605980e+00   4.0488387e+00   4.1418643e+00   5.4970002e+00   5.6855224e+00   3.5964347e+00   4.4685630e+00   3.5634461e+00   5.4730406e+00   3.5693950e+00   4.3989089e+00   4.7150659e+00   3.4668130e+00   3.5464993e+00   4.2723380e+00   4.5155386e+00   4.8594290e+00   5.2647079e+00   4.2921213e+00   3.7064459e+00   4.1581964e+00   4.9913682e+00   4.3286007e+00   4.1303097e+00   3.4468286e+00   4.1669742e+00   4.3729308e+00   3.9624170e+00   3.7313856e+00   4.6297577e+00   4.4844827e+00   4.0056359e+00   3.6817961e+00   3.9035218e+00   4.1179678e+00   3.7164366e+00   5.2942799e-01   3.0811765e-01   6.0611244e-01   3.2586371e-01   3.0546431e-01   9.4125538e-01   6.0060595e-01   5.2574978e-01   8.1558458e-01   2.8507955e-01   6.4755655e-01   4.1312257e-01   5.5419992e-01   2.0656129e-01   3.7045940e+00   3.4142500e+00   3.8690719e+00   2.8688189e+00   3.5226542e+00   3.3385842e+00   3.6010215e+00   2.1580776e+00   3.5196916e+00   2.7652601e+00   2.4083873e+00   3.0820950e+00   2.8783309e+00   3.5617386e+00   2.4693940e+00   3.3658240e+00   3.3558214e+00   2.9322808e+00   3.4112518e+00   2.7424260e+00   3.6945405e+00   2.8867565e+00   3.7848217e+00   3.5471494e+00   3.2076743e+00   3.3449470e+00   3.7498842e+00   3.9450818e+00   3.3735875e+00   2.3490515e+00   2.6490839e+00   2.5444216e+00   2.7549369e+00   3.9621873e+00   3.3527310e+00   3.3865418e+00   3.6475099e+00   3.3024121e+00   2.9459653e+00   2.8566322e+00   3.2311957e+00   3.4653703e+00   2.8535197e+00   2.1708533e+00   3.0455175e+00   3.0364473e+00   3.0465086e+00   3.1799184e+00   1.8842354e+00   2.9509414e+00   4.9594922e+00   3.9924292e+00   4.8844442e+00   4.4804205e+00   4.7354764e+00   5.6130421e+00   3.3873806e+00   5.2583931e+00   4.7090394e+00   5.1413739e+00   4.0532097e+00   4.2156327e+00   4.4735121e+00   3.9153764e+00   4.0928954e+00   4.2826383e+00   4.4004808e+00   5.7341534e+00   5.9271927e+00   3.8700742e+00   4.7016733e+00   3.8156003e+00   5.7155096e+00   3.8175051e+00   4.6462020e+00   4.9620553e+00   3.7143727e+00   3.8000004e+00   4.5254820e+00   4.7613782e+00   5.0991856e+00   5.4888822e+00   4.5411167e+00   3.9718373e+00   4.4397363e+00   5.2074442e+00   4.5701358e+00   4.3916992e+00   3.6996802e+00   4.3970196e+00   4.6045465e+00   4.1691696e+00   3.9924292e+00   4.8725396e+00   4.7150659e+00   4.2256400e+00   3.9292780e+00   4.1454529e+00   4.3599285e+00   3.9798670e+00   6.5223271e-01   1.1268457e+00   4.1449626e-01   5.0090417e-01   1.3784393e+00   1.1053488e+00   5.8851328e-01   6.6827038e-01   7.6787403e-01   4.8036801e-01   9.0852141e-01   2.8192292e-01   5.0905001e-01   3.5117473e+00   3.2719724e+00   3.6961290e+00   2.8060101e+00   3.3799936e+00   3.2401159e+00   3.4709594e+00   2.1293854e+00   3.3643376e+00   2.6848587e+00   2.4115946e+00   2.9723937e+00   2.7984009e+00   3.4455106e+00   2.3749088e+00   3.1913397e+00   3.2574960e+00   2.8320532e+00   3.3150921e+00   2.6637326e+00   3.5883493e+00   2.7640668e+00   3.6695251e+00   3.4317240e+00   3.0617355e+00   3.1820648e+00   3.5855784e+00   3.7950050e+00   3.2620096e+00   2.2645766e+00   2.5831070e+00   2.4810276e+00   2.6563403e+00   3.8575846e+00   3.2574960e+00   3.2718360e+00   3.4856613e+00   3.1913594e+00   2.8466990e+00   2.7801709e+00   3.1437608e+00   3.3466419e+00   2.7595194e+00   2.1498672e+00   2.9543365e+00   2.9330570e+00   2.9459653e+00   3.0511838e+00   1.8555964e+00   2.8534301e+00   4.8490742e+00   3.8962785e+00   4.7308926e+00   4.3643842e+00   4.6144072e+00   5.4442665e+00   3.3129652e+00   5.0997539e+00   4.5816539e+00   4.9883395e+00   3.9213002e+00   4.0961125e+00   4.3313403e+00   3.8279951e+00   4.0003537e+00   4.1625074e+00   4.2731561e+00   5.5604940e+00   5.7635900e+00   3.7812981e+00   4.5580745e+00   3.7236571e+00   5.5437155e+00   3.6992145e+00   4.5120086e+00   4.8012240e+00   3.5989213e+00   3.6872980e+00   4.4084575e+00   4.5949523e+00   4.9306472e+00   5.2918657e+00   4.4250143e+00   3.8510143e+00   4.3338058e+00   5.0198276e+00   4.4571663e+00   4.2687771e+00   3.5910375e+00   4.2454653e+00   4.4729144e+00   4.0139676e+00   3.8962785e+00   4.7377574e+00   4.5853438e+00   4.0877338e+00   3.8179781e+00   4.0161568e+00   4.2490407e+00   3.8755800e+00   5.0991930e-01   3.2586371e-01   4.2667565e-01   8.3172002e-01   5.0991930e-01   5.6769031e-01   7.5082357e-01   2.1845981e-01   7.0479928e-01   3.0811765e-01   6.4755655e-01   2.1845981e-01   3.4865562e+00   3.1726595e+00   3.6377960e+00   2.5987470e+00   3.2814045e+00   3.0627375e+00   3.3515846e+00   1.8841865e+00   3.2769379e+00   2.5038079e+00   2.1311468e+00   2.8311678e+00   2.6104387e+00   3.2962520e+00   2.2214438e+00   3.1433122e+00   3.0878634e+00   2.6552472e+00   3.1570103e+00   2.4668912e+00   3.4394878e+00   2.6411293e+00   3.5233648e+00   3.2747247e+00   2.9659871e+00   3.1154783e+00   3.5134741e+00   3.7059620e+00   3.1148696e+00   2.0851901e+00   2.3731428e+00   2.2655571e+00   2.4927109e+00   3.6920087e+00   3.0823446e+00   3.1337459e+00   3.4135200e+00   3.0481703e+00   2.6780487e+00   2.5874301e+00   2.9489507e+00   3.2027420e+00   2.5873149e+00   1.8973383e+00   2.7738355e+00   2.7632614e+00   2.7778954e+00   2.9269923e+00   1.6390769e+00   2.6848587e+00   4.7106706e+00   3.7313856e+00   4.6446321e+00   4.2142736e+00   4.4836580e+00   5.3716885e+00   3.1250284e+00   5.0074019e+00   4.4485220e+00   4.9128219e+00   3.8150636e+00   3.9624529e+00   4.2358121e+00   3.6602286e+00   3.8605980e+00   4.0488387e+00   4.1418643e+00   5.4970002e+00   5.6855224e+00   3.5964347e+00   4.4685630e+00   3.5634461e+00   5.4730406e+00   3.5693950e+00   4.3989089e+00   4.7150659e+00   3.4668130e+00   3.5464993e+00   4.2723380e+00   4.5155386e+00   4.8594290e+00   5.2647079e+00   4.2921213e+00   3.7064459e+00   4.1581964e+00   4.9913682e+00   4.3286007e+00   4.1303097e+00   3.4468286e+00   4.1669742e+00   4.3729308e+00   3.9624170e+00   3.7313856e+00   4.6297577e+00   4.4844827e+00   4.0056359e+00   3.6817961e+00   3.9035218e+00   4.1179678e+00   3.7164366e+00   7.3813096e-01   6.8961791e-01   7.0086313e-01   2.0000000e-01   7.3805807e-01   1.0030700e+00   4.0293660e-01   9.4352681e-01   2.5251796e-01   1.0120221e+00   6.2024833e-01   3.8265307e+00   3.4552560e+00   3.9537404e+00   2.8022534e+00   3.5701225e+00   3.2863508e+00   3.6126198e+00   2.0524386e+00   3.5845127e+00   2.6848587e+00   2.2888897e+00   3.0684384e+00   2.8791325e+00   3.5464993e+00   2.4469125e+00   3.4686755e+00   3.2961206e+00   2.9072057e+00   3.4107902e+00   2.6959009e+00   3.6545040e+00   2.9195876e+00   3.7806187e+00   3.5312474e+00   3.2669151e+00   3.4295849e+00   3.8277021e+00   3.9909030e+00   3.3562690e+00   2.3465937e+00   2.5906376e+00   2.4892531e+00   2.7423171e+00   3.9193693e+00   3.2780620e+00   3.3708389e+00   3.7190229e+00   3.3268984e+00   2.8983020e+00   2.7954161e+00   3.1611584e+00   3.4557351e+00   2.8328337e+00   2.0658700e+00   2.9919517e+00   2.9960210e+00   3.0059712e+00   3.2048547e+00   1.8038968e+00   2.9141136e+00   4.9189452e+00   3.9342203e+00   4.9208723e+00   4.4494735e+00   4.7160542e+00   5.6635213e+00   3.2909043e+00   5.2946188e+00   4.7062325e+00   5.1779277e+00   4.0657707e+00   4.2053647e+00   4.4986279e+00   3.8509694e+00   4.0384339e+00   4.2739171e+00   4.3927415e+00   5.7909894e+00   5.9706827e+00   3.8237409e+00   4.7267599e+00   3.7490739e+00   5.7704875e+00   3.8154018e+00   4.6503268e+00   5.0044958e+00   3.7060524e+00   3.7762575e+00   4.5037232e+00   4.8164539e+00   5.1574084e+00   5.5860081e+00   4.5195237e+00   3.9601358e+00   4.3901702e+00   5.2992055e+00   4.5412859e+00   4.3739561e+00   3.6694918e+00   4.4403343e+00   4.6130415e+00   4.2323959e+00   3.9342203e+00   4.8773871e+00   4.7190595e+00   4.2559866e+00   3.9201797e+00   4.1523445e+00   4.3289315e+00   3.9302319e+00   2.1845981e-01   1.1486378e+00   7.0784540e-01   4.0438741e-01   5.0503591e-01   4.4651726e-01   4.0147421e-01   5.0905001e-01   3.2352160e-01   1.4096146e-01   3.4156574e+00   3.1235447e+00   3.5778307e+00   2.6097685e+00   3.2346686e+00   3.0478400e+00   3.3101102e+00   1.9193093e+00   3.2270948e+00   2.4922287e+00   2.2081369e+00   2.7965957e+00   2.6184141e+00   3.2685282e+00   2.1916898e+00   3.0773654e+00   3.0673749e+00   2.6423274e+00   3.1444983e+00   2.4678343e+00   3.4088888e+00   2.6016025e+00   3.4988409e+00   3.2521427e+00   2.9171710e+00   3.0559188e+00   3.4597125e+00   3.6553612e+00   3.0847983e+00   2.0765921e+00   2.3848740e+00   2.2817008e+00   2.4722095e+00   3.6724425e+00   3.0650478e+00   3.0981264e+00   3.3567173e+00   3.0288896e+00   2.6566259e+00   2.5851693e+00   2.9455446e+00   3.1719381e+00   2.5729378e+00   1.9450955e+00   2.7611864e+00   2.7431084e+00   2.7570191e+00   2.8884401e+00   1.6625998e+00   2.6648989e+00   4.6768871e+00   3.7101281e+00   4.5948688e+00   4.1876541e+00   4.4480565e+00   5.3202405e+00   3.1169790e+00   4.9630576e+00   4.4189653e+00   4.8572313e+00   3.7684708e+00   3.9292780e+00   4.1872611e+00   3.6418662e+00   3.8262353e+00   4.0041417e+00   4.1076175e+00   5.4411191e+00   5.6370076e+00   3.5929878e+00   4.4174698e+00   3.5389106e+00   5.4223305e+00   3.5340177e+00   4.3569684e+00   4.6672392e+00   3.4309563e+00   3.5133135e+00   4.2392500e+00   4.4661721e+00   4.8075242e+00   5.1977119e+00   4.2572858e+00   3.6784045e+00   4.1454521e+00   4.9233665e+00   4.2900309e+00   4.0984960e+00   3.4145942e+00   4.1120703e+00   4.3243538e+00   3.8957047e+00   3.7101281e+00   4.5857922e+00   4.4360042e+00   3.9497198e+00   3.6509512e+00   3.8600234e+00   4.0800357e+00   3.6915258e+00   1.2223099e+00   6.2024833e-01   3.7255734e-01   6.2081167e-01   5.0905001e-01   3.7598397e-01   4.4651726e-01   3.4583729e-01   2.1269358e-01   3.6091470e+00   3.3105724e+00   3.7708932e+00   2.7979838e+00   3.4251430e+00   3.2391031e+00   3.4951642e+00   2.1075335e+00   3.4235403e+00   2.6687055e+00   2.3989464e+00   2.9762945e+00   2.8216056e+00   3.4603628e+00   2.3673218e+00   3.2680041e+00   3.2498302e+00   2.8414525e+00   3.3361912e+00   2.6626548e+00   3.5849794e+00   2.7906520e+00   3.6926714e+00   3.4497714e+00   3.1105675e+00   3.2468925e+00   3.6557661e+00   3.8432801e+00   3.2705166e+00   2.2719663e+00   2.5786309e+00   2.4784234e+00   2.6629602e+00   3.8619321e+00   3.2465133e+00   3.2780765e+00   3.5475145e+00   3.2266676e+00   2.8416143e+00   2.7719788e+00   3.1392934e+00   3.3624692e+00   2.7656089e+00   2.1335398e+00   2.9496515e+00   2.9338590e+00   2.9447165e+00   3.0808399e+00   1.8330979e+00   2.8520904e+00   4.8482992e+00   3.8884996e+00   4.7814945e+00   4.3763964e+00   4.6280797e+00   5.5131379e+00   3.2922206e+00   5.1594703e+00   4.6125121e+00   5.0342965e+00   3.9453171e+00   4.1137002e+00   4.3683295e+00   3.8151836e+00   3.9816719e+00   4.1715493e+00   4.2963195e+00   5.6322635e+00   5.8289682e+00   3.7874513e+00   4.5945190e+00   3.7079546e+00   5.6180203e+00   3.7164611e+00   4.5394421e+00   4.8614468e+00   3.6107037e+00   3.6929889e+00   4.4201622e+00   4.6634723e+00   5.0015266e+00   5.3906681e+00   4.4348302e+00   3.8718490e+00   4.3427947e+00   5.1078309e+00   4.4583488e+00   4.2864208e+00   3.5920050e+00   4.2919639e+00   4.4953238e+00   4.0619633e+00   3.8884996e+00   4.7650352e+00   4.6046026e+00   4.1173867e+00   3.8320624e+00   4.0389546e+00   4.2480032e+00   3.8727359e+00   9.0049692e-01   1.2307737e+00   1.5483011e+00   7.1462831e-01   1.5272277e+00   9.0074515e-01   1.4700179e+00   1.0331736e+00   3.7896333e+00   3.4304205e+00   3.9179666e+00   2.7683644e+00   3.5321772e+00   3.2685282e+00   3.5962433e+00   2.0250421e+00   3.5485736e+00   2.6642702e+00   2.2244833e+00   3.0435803e+00   2.8325748e+00   3.5230967e+00   2.4205131e+00   3.4307077e+00   3.2815626e+00   2.8843735e+00   3.3658240e+00   2.6687055e+00   3.6389628e+00   2.8830783e+00   3.7490739e+00   3.5087649e+00   3.2305801e+00   3.3908902e+00   3.7876239e+00   3.9561876e+00   3.3306114e+00   2.3112968e+00   2.5604155e+00   2.4585271e+00   2.7122813e+00   3.8971376e+00   3.2667813e+00   3.3674720e+00   3.6849783e+00   3.2837729e+00   2.8840079e+00   2.7685572e+00   3.1442943e+00   3.4335342e+00   2.8028287e+00   2.0286682e+00   2.9704704e+00   2.9822537e+00   2.9868163e+00   3.1741954e+00   1.7644184e+00   2.8908296e+00   4.8985114e+00   3.9102500e+00   4.8863496e+00   4.4272873e+00   4.6888407e+00   5.6294495e+00   3.2706756e+00   5.2636641e+00   4.6765452e+00   5.1547576e+00   4.0378383e+00   4.1743230e+00   4.4638518e+00   3.8230269e+00   4.0056625e+00   4.2450961e+00   4.3676124e+00   5.7745449e+00   5.9344852e+00   3.7932062e+00   4.6942925e+00   3.7245053e+00   5.7354045e+00   3.7814598e+00   4.6271849e+00   4.9763401e+00   3.6736483e+00   3.7511498e+00   4.4747039e+00   4.7841150e+00   5.1205765e+00   5.5660104e+00   4.4889834e+00   3.9348528e+00   4.3728356e+00   5.2548517e+00   4.5226167e+00   4.3526639e+00   3.6449787e+00   4.4041536e+00   4.5787890e+00   4.1879339e+00   3.9102500e+00   4.8490587e+00   4.6897155e+00   4.2150171e+00   3.8841455e+00   4.1203091e+00   4.3121165e+00   3.9109678e+00   6.7975091e-01   9.0056222e-01   4.1586001e-01   8.2275389e-01   2.0656129e-01   9.4287188e-01   6.0121055e-01   3.8264361e+00   3.4551376e+00   3.9537404e+00   2.8149627e+00   3.5710248e+00   3.2874334e+00   3.6122384e+00   2.0711789e+00   3.5849006e+00   2.6879715e+00   2.3281827e+00   3.0685922e+00   2.8946126e+00   3.5468964e+00   2.4478108e+00   3.4686755e+00   3.2962520e+00   2.9098194e+00   3.4214793e+00   2.7033034e+00   3.6543993e+00   2.9209919e+00   3.7841436e+00   3.5321717e+00   3.2673909e+00   3.4297053e+00   3.8284763e+00   3.9909892e+00   3.3567173e+00   2.3533545e+00   2.6019240e+00   2.5015675e+00   2.7452885e+00   3.9207248e+00   3.2781950e+00   3.3698144e+00   3.7190229e+00   3.3356294e+00   2.8984764e+00   2.8022534e+00   3.1646752e+00   3.4558535e+00   2.8373077e+00   2.0905239e+00   2.9944056e+00   2.9961831e+00   3.0065426e+00   3.2053509e+00   1.8216743e+00   2.9155237e+00   4.9187518e+00   3.9355645e+00   4.9209267e+00   4.4497146e+00   4.7161140e+00   5.6635613e+00   3.2956846e+00   5.2947833e+00   4.7084108e+00   5.1767384e+00   4.0656879e+00   4.2065257e+00   4.4986941e+00   3.8543544e+00   4.0391220e+00   4.2738429e+00   4.3928114e+00   5.7890564e+00   5.9715517e+00   3.8320624e+00   4.7267005e+00   3.7498842e+00   5.7708014e+00   3.8168398e+00   4.6501080e+00   5.0044433e+00   3.7068836e+00   3.7763550e+00   4.5042646e+00   4.8165110e+00   5.1578102e+00   5.5839155e+00   4.5200609e+00   3.9608542e+00   4.3918790e+00   5.2992517e+00   4.5407542e+00   4.3739561e+00   3.6695956e+00   4.4403343e+00   4.6130415e+00   4.2323959e+00   3.9355645e+00   4.8773316e+00   4.7188476e+00   4.2560614e+00   3.9234348e+00   4.1524235e+00   4.3283407e+00   3.9303212e+00   3.8934542e-01   5.4292906e-01   4.4535192e-01   5.3309112e-01   4.5581864e-01   4.2538717e-01   3.3319064e+00   3.0058998e+00   3.4822680e+00   2.4967542e+00   3.1249915e+00   2.9284660e+00   3.1836200e+00   1.8265014e+00   3.1331426e+00   2.3481462e+00   2.1458939e+00   2.6572703e+00   2.5437119e+00   3.1519721e+00   2.0470220e+00   2.9815576e+00   2.9302134e+00   2.5421955e+00   3.0374850e+00   2.3632684e+00   3.2598750e+00   2.4873223e+00   3.3884269e+00   3.1477087e+00   2.8156804e+00   2.9556616e+00   3.3682605e+00   3.5401725e+00   2.9561205e+00   1.9790422e+00   2.2832934e+00   2.1885968e+00   2.3571494e+00   3.5486864e+00   2.9260462e+00   2.9587612e+00   3.2530866e+00   2.9361879e+00   2.5256527e+00   2.4631898e+00   2.8325946e+00   3.0534395e+00   2.4619105e+00   1.8618589e+00   2.6377354e+00   2.6235630e+00   2.6314198e+00   2.7785210e+00   1.5475473e+00   2.5392051e+00   4.5179415e+00   3.5641186e+00   4.4752183e+00   4.0626016e+00   4.3069105e+00   5.2164277e+00   2.9689697e+00   4.8634846e+00   4.3067423e+00   4.7194915e+00   3.6262912e+00   3.7979761e+00   4.0547736e+00   3.4875352e+00   3.6400497e+00   3.8418637e+00   3.9849753e+00   5.3352888e+00   5.5294519e+00   3.4830211e+00   4.2776053e+00   3.3760149e+00   5.3253207e+00   3.4004918e+00   4.2238184e+00   4.5645220e+00   3.2914954e+00   3.3718862e+00   4.0995928e+00   4.3725648e+00   4.7075069e+00   5.1063282e+00   4.1113292e+00   3.5651462e+00   4.0375056e+00   4.8132427e+00   4.1268905e+00   3.9732869e+00   3.2685282e+00   3.9810803e+00   4.1706050e+00   3.7449914e+00   3.5641186e+00   4.4464848e+00   4.2774083e+00   3.7941767e+00   3.5148434e+00   3.7206115e+00   3.9162695e+00   3.5510950e+00   8.6137722e-01   3.2352160e-01   7.6166891e-01   4.2667565e-01   6.2660376e-01   3.0634640e+00   2.7392828e+00   3.2125175e+00   2.3391296e+00   2.8734025e+00   2.6707502e+00   2.9145708e+00   1.7569738e+00   2.8671376e+00   2.1479276e+00   2.1308063e+00   2.4108292e+00   2.3854031e+00   2.8851564e+00   1.8368900e+00   2.7201546e+00   2.6724144e+00   2.2982662e+00   2.8483417e+00   2.1701312e+00   3.0045018e+00   2.2531409e+00   3.1444983e+00   2.8783309e+00   2.5586145e+00   2.6967931e+00   3.1073497e+00   3.2779401e+00   2.7018605e+00   1.8104298e+00   2.1219691e+00   2.0368741e+00   2.1367260e+00   3.2905600e+00   2.6692615e+00   2.6939411e+00   2.9863438e+00   2.7320931e+00   2.2698938e+00   2.2722516e+00   2.5933163e+00   2.7845473e+00   2.2472326e+00   1.8195937e+00   2.4037412e+00   2.3571494e+00   2.3781826e+00   2.5200525e+00   1.5411691e+00   2.3001580e+00   4.2694227e+00   3.3237039e+00   4.2115652e+00   3.7930080e+00   4.0498216e+00   4.9425633e+00   2.7652100e+00   4.5847429e+00   4.0464464e+00   4.4666326e+00   3.3738930e+00   3.5479683e+00   3.8004969e+00   3.2711669e+00   3.4346585e+00   3.6043418e+00   3.7146255e+00   5.0600559e+00   5.2639977e+00   3.2609948e+00   4.0260071e+00   3.1481222e+00   5.0506841e+00   3.1599876e+00   3.9594081e+00   4.2851208e+00   3.0501817e+00   3.1181310e+00   3.8476631e+00   4.0931004e+00   4.4378867e+00   4.8317916e+00   3.8652515e+00   3.2970551e+00   3.7653550e+00   4.5583070e+00   3.8836720e+00   3.7008091e+00   3.0188386e+00   3.7282629e+00   3.9300286e+00   3.5186510e+00   3.3237039e+00   4.1891573e+00   4.0376133e+00   3.5648733e+00   3.2885200e+00   3.4693398e+00   3.6732275e+00   3.2917360e+00   8.1385214e-01   2.5251796e-01   7.6787403e-01   3.2586371e-01   3.5846101e+00   3.2546607e+00   3.7315988e+00   2.6609901e+00   3.3659358e+00   3.1439065e+00   3.4298218e+00   1.9383545e+00   3.3723944e+00   2.5580763e+00   2.1777421e+00   2.8983020e+00   2.6954219e+00   3.3808052e+00   2.2790175e+00   3.2344170e+00   3.1579733e+00   2.7462372e+00   3.2292608e+00   2.5444216e+00   3.5028333e+00   2.7205595e+00   3.6067948e+00   3.3670797e+00   3.0557792e+00   3.2048395e+00   3.6088607e+00   3.7891577e+00   3.1900581e+00   2.1641873e+00   2.4447974e+00   2.3408917e+00   2.5700421e+00   3.7710363e+00   3.1506190e+00   3.2040124e+00   3.5027314e+00   3.1322650e+00   2.7512730e+00   2.6533250e+00   3.0299528e+00   3.2862185e+00   2.6656374e+00   1.9477421e+00   2.8481000e+00   2.8454952e+00   2.8544453e+00   3.0132514e+00   1.6658308e+00   2.7590026e+00   4.7688362e+00   3.7943375e+00   4.7263320e+00   4.2949506e+00   4.5532150e+00   5.4636648e+00   3.1768366e+00   5.1030228e+00   4.5342701e+00   4.9831843e+00   3.8820426e+00   4.0358897e+00   4.3088496e+00   3.7133362e+00   3.8949006e+00   4.1025163e+00   4.2237423e+00   5.5888590e+00   5.7742361e+00   3.6743314e+00   4.5370189e+00   3.6141649e+00   5.5687868e+00   3.6395551e+00   4.4736906e+00   4.8085584e+00   3.5338163e+00   3.6144782e+00   4.3417782e+00   4.6138035e+00   4.9524148e+00   5.3625968e+00   4.3570317e+00   3.7932922e+00   4.2488997e+00   5.0747397e+00   4.3832471e+00   4.2110583e+00   3.5113289e+00   4.2399031e+00   4.4320962e+00   4.0189525e+00   3.7943375e+00   4.7000551e+00   4.5409269e+00   4.0612006e+00   3.7475562e+00   3.9722979e+00   4.1719819e+00   3.7859347e+00   6.9325418e-01   2.1269358e-01   5.0905001e-01   3.3337914e+00   3.0376046e+00   3.4948415e+00   2.6099670e+00   3.1641230e+00   2.9745020e+00   3.2202056e+00   1.9796375e+00   3.1511769e+00   2.4469125e+00   2.3235342e+00   2.7196435e+00   2.6337042e+00   3.1881331e+00   2.1375243e+00   2.9985516e+00   2.9847499e+00   2.5867906e+00   3.1255136e+00   2.4464131e+00   3.3203249e+00   2.5432298e+00   3.4386456e+00   3.1757436e+00   2.8453413e+00   2.9797458e+00   3.3872988e+00   3.5731577e+00   3.0079222e+00   2.0710306e+00   2.3862284e+00   2.2923690e+00   2.4260574e+00   3.5964347e+00   2.9822786e+00   3.0067893e+00   3.2740296e+00   3.0040546e+00   2.5786309e+00   2.5579141e+00   2.8891491e+00   3.0885642e+00   2.5333642e+00   2.0283816e+00   2.7031874e+00   2.6626548e+00   2.6835197e+00   2.8149627e+00   1.7472675e+00   2.6016025e+00   4.5864727e+00   3.6358644e+00   4.5092329e+00   4.1008711e+00   4.3608329e+00   5.2329560e+00   3.0685922e+00   4.8761643e+00   4.3448624e+00   4.7688607e+00   3.6817961e+00   3.8534030e+00   4.1033021e+00   3.5811154e+00   3.7530357e+00   3.9183591e+00   4.0199092e+00   5.3504583e+00   5.5556442e+00   3.5517562e+00   4.3306939e+00   3.4641481e+00   5.3377017e+00   3.4637450e+00   4.2663890e+00   4.5772917e+00   3.3571533e+00   3.4296698e+00   4.1575709e+00   4.3797497e+00   4.7253427e+00   5.1097682e+00   4.1763898e+00   3.5983434e+00   4.0666282e+00   4.8419079e+00   4.2005634e+00   4.0083071e+00   3.3318481e+00   4.0278210e+00   4.2396851e+00   3.8171357e+00   3.6358644e+00   4.4969460e+00   4.3490518e+00   3.8705614e+00   3.5905415e+00   3.7766172e+00   3.9906340e+00   3.6052686e+00   7.6752131e-01   4.0147421e-01   3.6660608e+00   3.3135273e+00   3.8017611e+00   2.7018605e+00   3.4275679e+00   3.1699903e+00   3.4789136e+00   1.9730403e+00   3.4358679e+00   2.5840983e+00   2.2331309e+00   2.9422991e+00   2.7581254e+00   3.4189217e+00   2.3233512e+00   3.3121677e+00   3.1836200e+00   2.7812918e+00   3.2896000e+00   2.5816639e+00   3.5379744e+00   2.7795823e+00   3.6532797e+00   3.4029480e+00   3.1195329e+00   3.2770643e+00   3.6772238e+00   3.8493740e+00   3.2305582e+00   2.2173292e+00   2.4840285e+00   2.3814525e+00   2.6148507e+00   3.8019334e+00   3.1710836e+00   3.2447990e+00   3.5700242e+00   3.1955870e+00   2.7803619e+00   2.6879415e+00   3.0521985e+00   3.3264150e+00   2.7089627e+00   1.9908167e+00   2.8775912e+00   2.8744403e+00   2.8857836e+00   3.0658390e+00   1.7171798e+00   2.7936066e+00   4.8056033e+00   3.8247106e+00   4.7834029e+00   4.3283723e+00   4.5943507e+00   5.5219241e+00   3.2001457e+00   5.1552806e+00   4.5786596e+00   5.0423887e+00   3.9353963e+00   4.0804944e+00   4.3648743e+00   3.7469906e+00   3.9346929e+00   4.1523445e+00   4.2650653e+00   5.6468786e+00   5.8321617e+00   3.7127205e+00   4.5943003e+00   3.6444925e+00   5.6275915e+00   3.6885686e+00   4.5212945e+00   4.8635596e+00   3.5807904e+00   3.6545040e+00   4.3827087e+00   4.6717461e+00   5.0136174e+00   5.4320857e+00   4.3994790e+00   3.8323332e+00   4.2736523e+00   5.1501184e+00   4.4249262e+00   4.2490074e+00   3.5500567e+00   4.3022895e+00   4.4864974e+00   4.0933482e+00   3.8247106e+00   4.7495571e+00   4.5943072e+00   4.1245629e+00   3.7976741e+00   4.0232438e+00   4.2129603e+00   3.8158144e+00   4.4535192e-01   3.3681634e+00   3.1015495e+00   3.5417515e+00   2.6663854e+00   3.2198557e+00   3.0579528e+00   3.2934163e+00   2.0153916e+00   3.2040320e+00   2.5204039e+00   2.3388377e+00   2.7956674e+00   2.6725726e+00   3.2655357e+00   2.2078644e+00   3.0402181e+00   3.0721050e+00   2.6595270e+00   3.1749624e+00   2.5094912e+00   3.4048516e+00   2.6019240e+00   3.5045178e+00   3.2523394e+00   2.8999277e+00   3.0267460e+00   3.4333346e+00   3.6317584e+00   3.0844423e+00   2.1209506e+00   2.4419061e+00   2.3443191e+00   2.4920874e+00   3.6775470e+00   3.0715602e+00   3.0884019e+00   3.3261336e+00   3.0501817e+00   2.6631094e+00   2.6243758e+00   2.9691171e+00   3.1659032e+00   2.5983106e+00   2.0538718e+00   2.7808700e+00   2.7473221e+00   2.7650187e+00   2.8804789e+00   1.7660585e+00   2.6784604e+00   4.6691123e+00   3.7183181e+00   4.5689156e+00   4.1821417e+00   4.4377562e+00   5.2873851e+00   3.1455384e+00   4.9362269e+00   4.4131751e+00   4.8285697e+00   3.7508315e+00   3.9249912e+00   4.1665786e+00   3.6588207e+00   3.8312584e+00   3.9914600e+00   4.0953360e+00   5.4042844e+00   5.6094349e+00   3.6203759e+00   4.3940519e+00   3.5472449e+00   5.3896044e+00   3.5318477e+00   4.3383367e+00   4.6370771e+00   3.4283809e+00   3.5084967e+00   4.2335104e+00   4.4348302e+00   4.7765764e+00   5.1494118e+00   4.2515997e+00   3.6735311e+00   4.1503255e+00   4.8803856e+00   4.2802235e+00   4.0874500e+00   3.4119017e+00   4.0856134e+00   4.3069340e+00   3.8658667e+00   3.7183181e+00   4.5670800e+00   4.4180998e+00   3.9298460e+00   3.6561219e+00   3.8459316e+00   4.0712063e+00   3.6910219e+00   3.5300704e+00   3.2300705e+00   3.6894189e+00   2.6906230e+00   3.3402581e+00   3.1455455e+00   3.4142500e+00   1.9871921e+00   3.3364095e+00   2.5801041e+00   2.2579027e+00   2.8953397e+00   2.7040361e+00   3.3706887e+00   2.2848507e+00   3.1889632e+00   3.1641787e+00   2.7405950e+00   3.2351115e+00   2.5567836e+00   3.5066271e+00   2.7031874e+00   3.5985833e+00   3.3547156e+00   3.0245025e+00   3.1656773e+00   3.5695690e+00   3.7624530e+00   3.1847809e+00   2.1665831e+00   2.4678343e+00   2.3636654e+00   2.5680682e+00   3.7710578e+00   3.1606607e+00   3.1985717e+00   3.4665002e+00   3.1244487e+00   2.7540755e+00   2.6724144e+00   3.0392407e+00   3.2747247e+00   2.6671530e+00   2.0066796e+00   2.8554471e+00   2.8427684e+00   2.8549070e+00   2.9928504e+00   1.7230625e+00   2.7611864e+00   4.7742557e+00   3.8047268e+00   4.7018935e+00   4.2892152e+00   4.5488617e+00   5.4303909e+00   3.2037606e+00   5.0724791e+00   4.5217060e+00   4.9623634e+00   3.8707551e+00   4.0296740e+00   4.2915574e+00   3.7321091e+00   3.9154512e+00   4.1022778e+00   4.2113304e+00   5.5520135e+00   5.7453700e+00   3.6849669e+00   4.5212945e+00   3.6308222e+00   5.5330176e+00   3.6335075e+00   4.4607162e+00   4.7770551e+00   3.5299194e+00   3.6126659e+00   4.3390241e+00   4.5771358e+00   4.9175276e+00   5.3118739e+00   4.3561658e+00   3.7812981e+00   4.2455646e+00   5.0340960e+00   4.3872001e+00   4.2015182e+00   3.5126820e+00   4.2176412e+00   4.4249262e+00   3.9985367e+00   3.8047268e+00   4.6887893e+00   4.5358705e+00   4.0502685e+00   3.7475470e+00   3.9619683e+00   4.1767972e+00   3.7895730e+00   6.0611244e-01   2.1845981e-01   1.6212669e+00   5.6769031e-01   1.3103855e+00   7.0437330e-01   2.2923690e+00   4.4651726e-01   1.8497891e+00   2.2196852e+00   1.1283882e+00   1.3099706e+00   9.0827783e-01   1.5790055e+00   3.7427929e-01   1.4018200e+00   1.2701139e+00   1.1341579e+00   1.5133392e+00   1.1134787e+00   1.0264409e+00   8.7202528e-01   9.2264612e-01   6.6432544e-01   4.5470518e-01   4.1449626e-01   4.3456114e-01   1.0085601e+00   1.5838351e+00   1.6415861e+00   1.6742876e+00   1.3140585e+00   1.0496979e+00   1.6013574e+00   1.0054037e+00   3.0546431e-01   1.0168833e+00   1.4293465e+00   1.5774037e+00   1.5278635e+00   9.0252542e-01   1.2994764e+00   2.2231652e+00   1.4317371e+00   1.3207609e+00   1.3224963e+00   8.3649708e-01   2.2607507e+00   1.3421549e+00   1.5412452e+00   1.2539702e+00   1.2643026e+00   1.0324775e+00   1.2342162e+00   1.9387309e+00   2.1209313e+00   1.6105602e+00   1.1912106e+00   1.5832517e+00   7.2486328e-01   8.5585239e-01   9.4009473e-01   1.3873503e+00   1.3945703e+00   1.0313560e+00   8.7720955e-01   2.0658700e+00   2.2655571e+00   1.2460824e+00   1.1834841e+00   1.4368020e+00   2.0378171e+00   7.9878917e-01   1.0960883e+00   1.3102767e+00   8.5105559e-01   9.2480363e-01   1.0805899e+00   1.1043883e+00   1.4313279e+00   1.8006336e+00   1.1235486e+00   7.6625946e-01   1.1633029e+00   1.5390703e+00   1.2493717e+00   9.0965328e-01   1.0182895e+00   8.6983677e-01   1.1880428e+00   9.2095040e-01   1.2539702e+00   1.3335022e+00   1.3109705e+00   9.5035453e-01   9.2112464e-01   7.6166891e-01   1.1418127e+00   1.1276971e+00   5.6700421e-01   1.1449732e+00   4.0293660e-01   7.3813096e-01   2.1845981e-01   1.7551534e+00   3.4583729e-01   1.2603076e+00   1.7354460e+00   5.3588338e-01   1.0777972e+00   3.8934542e-01   1.0669582e+00   3.0811765e-01   8.0294841e-01   7.8768770e-01   1.0018083e+00   1.0175773e+00   5.5419992e-01   5.9426792e-01   7.3496673e-01   4.8927739e-01   3.4378533e-01   2.5651975e-01   5.2655962e-01   5.4292906e-01   4.4417983e-01   1.1634384e+00   1.1527805e+00   1.2020363e+00   8.1521713e-01   7.2526325e-01   1.0018083e+00   4.1449626e-01   3.2586371e-01   9.0277242e-01   8.3172002e-01   1.0440187e+00   9.7779835e-01   3.2816937e-01   8.1521713e-01   1.7083888e+00   8.6361309e-01   7.3145860e-01   7.3145860e-01   3.6171588e-01   1.7816674e+00   7.6914805e-01   1.6177449e+00   8.3060013e-01   1.4732400e+00   1.1107977e+00   1.3546017e+00   2.2147080e+00   1.5404344e+00   1.8624350e+00   1.3603471e+00   1.7544191e+00   6.8961791e-01   8.7478495e-01   1.0733200e+00   9.5271386e-01   1.0466623e+00   9.9348625e-01   1.0085601e+00   2.3452277e+00   2.5288464e+00   1.0480665e+00   1.3130641e+00   8.9653332e-01   2.3282127e+00   5.8914551e-01   1.2436109e+00   1.5625142e+00   4.8927739e-01   4.8927739e-01   1.1593224e+00   1.3813076e+00   1.7138020e+00   2.1603815e+00   1.1866786e+00   6.4755655e-01   1.1521791e+00   1.8620175e+00   1.2565757e+00   1.0067784e+00   4.8927739e-01   1.0056742e+00   1.2594846e+00   9.3049742e-01   8.3060013e-01   1.4762619e+00   1.3817041e+00   9.4558103e-01   7.9613242e-01   7.7074935e-01   1.0627606e+00   7.0776547e-01   1.5593809e+00   4.8036801e-01   1.2165505e+00   6.1151102e-01   2.2871743e+00   4.0176783e-01   1.7963441e+00   2.1851225e+00   1.0906388e+00   1.2884575e+00   8.0619006e-01   1.6156775e+00   5.0905001e-01   1.3093850e+00   1.2434795e+00   1.0262547e+00   1.4875372e+00   1.0069726e+00   1.0669582e+00   7.4511469e-01   8.2384013e-01   6.9728513e-01   5.3022554e-01   3.0811765e-01   2.5651975e-01   9.2264612e-01   1.6478667e+00   1.6180636e+00   1.6694817e+00   1.3199714e+00   9.2288144e-01   1.5068702e+00   9.2859317e-01   2.4837156e-01   9.3238528e-01   1.3813076e+00   1.5238543e+00   1.4346522e+00   8.1130291e-01   1.2794849e+00   2.2234347e+00   1.3629833e+00   1.2671726e+00   1.2652657e+00   8.1810461e-01   2.3116343e+00   1.2988558e+00   1.3410314e+00   1.1276971e+00   1.0590298e+00   8.2552685e-01   1.0264409e+00   1.7485421e+00   2.0171203e+00   1.4118594e+00   9.7949166e-01   1.3980896e+00   5.7324170e-01   6.6317860e-01   7.4586719e-01   1.2603076e+00   1.2604558e+00   8.7504951e-01   6.6432544e-01   1.8915404e+00   2.0711789e+00   1.1178264e+00   9.9368623e-01   1.3223897e+00   1.8515012e+00   6.6384020e-01   8.9303452e-01   1.1107977e+00   7.2823007e-01   8.1099042e-01   8.7170815e-01   9.0876485e-01   1.2370832e+00   1.6626615e+00   9.2112464e-01   6.2482915e-01   9.7377870e-01   1.3752391e+00   1.0720678e+00   7.0386584e-01   9.0876485e-01   6.8917100e-01   1.0120221e+00   8.0294841e-01   1.1276971e+00   1.1329323e+00   1.1353806e+00   8.1385214e-01   7.7588000e-01   5.8914551e-01   9.8054887e-01   1.0085601e+00   1.0879524e+00   6.2605182e-01   1.2079117e+00   8.2305664e-01   1.1903922e+00   4.4651726e-01   6.5648056e-01   7.4164639e-01   5.2942799e-01   8.9852394e-01   6.4755655e-01   1.3035649e+00   7.7074935e-01   4.8135521e-01   7.7074935e-01   2.5651975e-01   1.1022599e+00   6.8917100e-01   1.0627606e+00   8.6290690e-01   9.7759114e-01   1.1868139e+00   1.3969297e+00   1.4333755e+00   7.6166891e-01   5.6075294e-01   2.5251796e-01   3.7427929e-01   4.4651726e-01   1.1444449e+00   7.7074935e-01   1.1571858e+00   1.3491011e+00   8.2624515e-01   7.0086313e-01   2.0000000e-01   4.4535192e-01   8.9852394e-01   3.7427929e-01   7.7885297e-01   4.1449626e-01   7.0826681e-01   6.1092863e-01   8.2275389e-01   1.0198386e+00   5.0905001e-01   2.2002582e+00   1.1640914e+00   2.2347161e+00   1.6833015e+00   1.9584639e+00   2.9773446e+00   7.2852070e-01   2.5984158e+00   1.9494155e+00   2.5625921e+00   1.4644662e+00   1.4491244e+00   1.8190688e+00   1.0934620e+00   1.3861754e+00   1.6265426e+00   1.6626615e+00   3.1878246e+00   3.2549253e+00   1.0346741e+00   2.0606771e+00   1.0425476e+00   3.0882196e+00   1.1022599e+00   1.9692383e+00   2.3537589e+00   1.0082605e+00   1.0950112e+00   1.7332099e+00   2.1942739e+00   2.5032087e+00   3.0886055e+00   1.7538274e+00   1.2342162e+00   1.6237100e+00   2.7181432e+00   1.8926658e+00   1.6507294e+00   1.0082605e+00   1.8244836e+00   1.9254808e+00   1.7303440e+00   1.1640914e+00   2.1641182e+00   2.0565627e+00   1.6435752e+00   1.1782910e+00   1.4699978e+00   1.7142546e+00   1.2095267e+00   8.0326782e-01   5.0991930e-01   1.8350577e+00   2.1269358e-01   1.3537729e+00   1.7146525e+00   6.5172743e-01   8.5585239e-01   4.0438741e-01   1.1847335e+00   3.4583729e-01   9.0252542e-01   8.2372435e-01   6.2024833e-01   1.0324775e+00   6.6827038e-01   6.5172743e-01   3.8776762e-01   4.4535192e-01   3.2816937e-01   2.5651975e-01   3.2586371e-01   4.3691963e-01   5.0180477e-01   1.2342162e+00   1.1573546e+00   1.2172454e+00   8.7848692e-01   6.2205176e-01   1.1016264e+00   6.9006418e-01   3.2586371e-01   5.2371571e-01   9.4492923e-01   1.0646687e+00   1.0101422e+00   4.1449626e-01   8.2552685e-01   1.7679545e+00   9.2288144e-01   8.3888121e-01   8.2929029e-01   3.8934542e-01   1.8776878e+00   8.5434758e-01   1.5481649e+00   7.9613242e-01   1.3657247e+00   1.0085601e+00   1.2641849e+00   2.1002817e+00   1.6030661e+00   1.7482192e+00   1.2100024e+00   1.7005893e+00   6.6491075e-01   7.3535471e-01   9.7949166e-01   8.8358844e-01   1.0423677e+00   9.5498315e-01   9.1051084e-01   2.2737459e+00   2.4086493e+00   7.2486328e-01   1.2326306e+00   9.4832302e-01   2.2096958e+00   3.8934542e-01   1.1729895e+00   1.4561933e+00   3.8776762e-01   4.8927739e-01   1.0574300e+00   1.2643026e+00   1.5918956e+00   2.0914667e+00   1.0906388e+00   5.0817745e-01   1.0182895e+00   1.7457596e+00   1.2250414e+00   9.1663180e-01   5.4292906e-01   9.1750357e-01   1.1891470e+00   8.8358844e-01   7.9613242e-01   1.3916739e+00   1.3267389e+00   8.9303452e-01   5.3309112e-01   6.9325418e-01   1.0574013e+00   7.0776547e-01   7.0776547e-01   1.3071453e+00   9.0049692e-01   6.9006418e-01   1.2079117e+00   3.6171588e-01   7.1791510e-01   4.1586001e-01   9.0049692e-01   1.0069726e+00   2.5251796e-01   4.4651726e-01   6.9325418e-01   6.2538346e-01   5.9426792e-01   5.6631629e-01   6.6827038e-01   4.1449626e-01   7.0437330e-01   9.0277242e-01   1.1055069e+00   1.0496979e+00   3.2586371e-01   1.0083666e+00   7.4164639e-01   8.3888121e-01   6.0181382e-01   6.3861009e-01   3.4378533e-01   6.3808075e-01   1.0101422e+00   6.8961791e-01   4.1449626e-01   5.3588338e-01   2.5651975e-01   4.1586001e-01   5.0991930e-01   1.2869134e+00   3.0546431e-01   3.2586371e-01   3.0275928e-01   5.0905001e-01   1.5278635e+00   4.0000000e-01   1.7279861e+00   7.4586719e-01   1.7831878e+00   1.1718516e+00   1.4824233e+00   2.5111349e+00   8.3620494e-01   2.1256928e+00   1.4719311e+00   2.0814452e+00   1.0175773e+00   1.0014633e+00   1.3875139e+00   7.7885297e-01   1.1473003e+00   1.2144845e+00   1.1591754e+00   2.6773585e+00   2.7900071e+00   7.0776547e-01   1.6151673e+00   7.3496673e-01   2.6263773e+00   7.2526325e-01   1.4645804e+00   1.8755806e+00   6.3924842e-01   6.2407309e-01   1.2731262e+00   1.7507664e+00   2.0635966e+00   2.6116811e+00   1.3129189e+00   7.4855857e-01   1.1149070e+00   2.3160147e+00   1.4246028e+00   1.1229843e+00   5.6075294e-01   1.4120836e+00   1.5094575e+00   1.4108494e+00   7.4586719e-01   1.6884234e+00   1.6211869e+00   1.3017208e+00   8.1521713e-01   1.0401425e+00   1.2452704e+00   6.9728513e-01   1.8185955e+00   4.8135521e-01   1.2509218e+00   1.8049926e+00   5.8914551e-01   1.2205493e+00   4.2538717e-01   1.1912106e+00   4.6472023e-01   7.1840099e-01   8.9207714e-01   1.1017858e+00   1.1127329e+00   4.1586001e-01   7.8197925e-01   8.0326782e-01   5.7257017e-01   5.2655962e-01   4.3456114e-01   6.2660376e-01   4.8135521e-01   4.5581864e-01   1.3318128e+00   1.2468939e+00   1.3144065e+00   9.4935318e-01   6.6384020e-01   9.1075311e-01   3.2586371e-01   4.1449626e-01   1.0130748e+00   8.3280511e-01   1.0906119e+00   9.6204649e-01   3.4583729e-01   9.3296062e-01   1.7901543e+00   8.7170815e-01   7.3805807e-01   7.3805807e-01   5.2655962e-01   1.9041928e+00   8.1521713e-01   1.4138821e+00   7.3805807e-01   1.3166957e+00   9.2264612e-01   1.1533602e+00   2.0690479e+00   1.4700179e+00   1.7092525e+00   1.2231847e+00   1.5870088e+00   5.0592043e-01   7.5791688e-01   9.0575661e-01   9.1750357e-01   9.1802948e-01   8.1304731e-01   8.1638392e-01   2.1978861e+00   2.3802944e+00   1.1107977e+00   1.1386292e+00   7.9878917e-01   2.1900222e+00   6.1092863e-01   1.0480665e+00   1.4148192e+00   5.0991930e-01   3.6171588e-01   9.7825559e-01   1.2593659e+00   1.5912764e+00   2.0615043e+00   1.0056742e+00   5.6700421e-01   1.0137836e+00   1.7695175e+00   1.0597541e+00   8.0619006e-01   3.8934542e-01   8.6513410e-01   1.0755693e+00   8.4050231e-01   7.3805807e-01   1.2832075e+00   1.1947245e+00   8.0660588e-01   8.2105460e-01   5.9426792e-01   8.6983677e-01   5.3309112e-01   1.9083318e+00   6.7975091e-01   4.1449626e-01   1.2452704e+00   1.1763980e+00   1.6420607e+00   7.9016429e-01   1.9365498e+00   1.3172979e+00   1.0653845e+00   1.5684812e+00   8.1304731e-01   1.7169601e+00   1.2768639e+00   1.8804140e+00   1.6311692e+00   1.6315809e+00   1.8424891e+00   2.1489929e+00   2.2038673e+00   1.4613032e+00   8.0587320e-01   6.8961791e-01   6.4704320e-01   9.7949166e-01   1.9250543e+00   1.2802798e+00   1.5824669e+00   2.0485534e+00   1.5790055e+00   1.0078327e+00   8.2305664e-01   1.1498269e+00   1.5838351e+00   1.0137836e+00   1.2418578e-01   1.0230441e+00   1.1119327e+00   1.0941064e+00   1.4719311e+00   3.2816937e-01   1.0165138e+00   2.9338155e+00   1.9158303e+00   3.0455280e+00   2.4635485e+00   2.7430309e+00   3.7921012e+00   1.2632199e+00   3.4105293e+00   2.7619926e+00   3.3261421e+00   2.2045198e+00   2.2598424e+00   2.6204307e+00   1.8330979e+00   2.0701646e+00   2.3622531e+00   2.4525409e+00   3.9619101e+00   4.0743074e+00   1.8315269e+00   2.8475224e+00   1.7388184e+00   3.9054939e+00   1.9111264e+00   2.7377517e+00   3.1510494e+00   1.7964653e+00   1.8350071e+00   2.5277506e+00   2.9970778e+00   3.3196868e+00   3.8532018e+00   2.5453122e+00   2.0312250e+00   2.3887539e+00   3.5269824e+00   2.5986705e+00   2.4210417e+00   1.7228354e+00   2.6125646e+00   2.7062349e+00   2.4839132e+00   1.9158303e+00   2.9502077e+00   2.8139128e+00   2.4180244e+00   1.9947426e+00   2.2550764e+00   2.3956104e+00   1.9332869e+00   1.4468211e+00   1.8027242e+00   7.3851529e-01   9.0658670e-01   5.0180477e-01   1.2418578e+00   2.5651975e-01   1.0022010e+00   8.6361309e-01   7.3851529e-01   1.1055064e+00   7.8197925e-01   6.8961791e-01   4.8927739e-01   5.0270183e-01   3.2352160e-01   2.1269358e-01   2.5651975e-01   4.9857388e-01   6.0611244e-01   1.2633451e+00   1.2342162e+00   1.2794849e+00   9.3824087e-01   7.0776547e-01   1.2014753e+00   7.0429250e-01   2.5651975e-01   6.2482915e-01   1.0329901e+00   1.1593224e+00   1.1069580e+00   5.0180477e-01   8.9712482e-01   1.8394959e+00   1.0181000e+00   9.2095040e-01   9.2047746e-01   4.4417983e-01   1.9314297e+00   9.4103005e-01   1.6328100e+00   9.3238528e-01   1.3969297e+00   1.0389435e+00   1.3327491e+00   2.0961718e+00   1.7103548e+00   1.7405652e+00   1.2330392e+00   1.7474965e+00   7.7919451e-01   8.1810461e-01   1.0613462e+00   1.0417249e+00   1.2331989e+00   1.0974061e+00   9.4125538e-01   2.2568188e+00   2.4127176e+00   8.4050231e-01   1.3145067e+00   1.0960883e+00   2.1973666e+00   5.6075294e-01   1.2221471e+00   1.4467170e+00   5.7324170e-01   6.3977563e-01   1.1341579e+00   1.2436109e+00   1.5826476e+00   2.0476065e+00   1.1847335e+00   5.3665999e-01   1.0391247e+00   1.7521201e+00   1.3293211e+00   9.4492923e-01   6.9369532e-01   1.0019724e+00   1.3083079e+00   1.0406064e+00   9.3238528e-01   1.4580335e+00   1.4387122e+00   1.0576043e+00   7.0233835e-01   8.1304731e-01   1.1697902e+00   8.2372435e-01   7.6914805e-01   7.2823007e-01   8.7504951e-01   1.0611732e+00   4.5581864e-01   1.5204521e+00   6.6432544e-01   6.5172743e-01   1.0866092e+00   4.5470518e-01   1.0573285e+00   9.0074515e-01   1.3083079e+00   1.0613462e+00   1.2123540e+00   1.4190961e+00   1.6754036e+00   1.6596797e+00   8.9095811e-01   6.1947990e-01   4.2418962e-01   4.8927739e-01   6.0551856e-01   1.2951131e+00   6.2538346e-01   1.0030700e+00   1.5663312e+00   1.1396406e+00   4.5581864e-01   3.2816937e-01   5.3665999e-01   1.0166932e+00   6.0670504e-01   6.9167458e-01   4.4535192e-01   5.6075294e-01   5.3665999e-01   1.0182895e+00   9.1075311e-01   5.0991930e-01   2.2632657e+00   1.2601890e+00   2.4384530e+00   1.8269304e+00   2.0927845e+00   3.1870761e+00   6.4290921e-01   2.8096725e+00   2.1462316e+00   2.6900593e+00   1.5901181e+00   1.6364474e+00   2.0101738e+00   1.1729895e+00   1.4078246e+00   1.7083888e+00   1.8278268e+00   3.3435703e+00   3.4604677e+00   1.2331989e+00   2.2206574e+00   1.0719360e+00   3.3068858e+00   1.3163598e+00   2.0994872e+00   2.5539296e+00   1.1948578e+00   1.1991899e+00   1.8828324e+00   2.4245766e+00   2.7358293e+00   3.2702869e+00   1.8959565e+00   1.4312787e+00   1.7665622e+00   2.9527671e+00   1.9255490e+00   1.7860690e+00   1.0796583e+00   2.0205937e+00   2.0647798e+00   1.9168750e+00   1.2601890e+00   2.3069539e+00   2.1608869e+00   1.8128438e+00   1.3838212e+00   1.6376058e+00   1.7220696e+00   1.2768639e+00   1.2687651e+00   1.0344911e+00   1.5320003e+00   9.7779835e-01   1.8837258e+00   1.2979752e+00   1.0012667e+00   1.3957794e+00   7.2526325e-01   1.6850672e+00   1.2372418e+00   1.7004805e+00   1.4979666e+00   1.5611922e+00   1.7745022e+00   2.0153916e+00   2.0815027e+00   1.3830210e+00   8.1242502e-01   5.8914551e-01   5.7257017e-01   9.5676647e-01   1.7518264e+00   1.2724737e+00   1.6669115e+00   1.9675324e+00   1.4200435e+00   1.1136605e+00   7.1881659e-01   1.0072663e+00   1.5134954e+00   9.3238528e-01   3.2352160e-01   9.5222919e-01   1.1681971e+00   1.1043332e+00   1.4120836e+00   6.2205176e-01   1.0078327e+00   2.8028143e+00   1.7525933e+00   2.8815987e+00   2.2944257e+00   2.5825907e+00   3.6132031e+00   1.1179743e+00   3.2286633e+00   2.5673494e+00   3.2133201e+00   2.1127170e+00   2.0867931e+00   2.4721080e+00   1.6626615e+00   1.9456450e+00   2.2607446e+00   2.2983453e+00   3.8335668e+00   3.8818411e+00   1.6299374e+00   2.7127377e+00   1.6132118e+00   3.7182722e+00   1.7559391e+00   2.6123294e+00   2.9966900e+00   1.6625128e+00   1.7303440e+00   2.3560577e+00   2.8340159e+00   3.1407514e+00   3.7366777e+00   2.3765195e+00   1.8701780e+00   2.1933937e+00   3.3657099e+00   2.5066503e+00   2.2797241e+00   1.6331631e+00   2.4808010e+00   2.5698271e+00   2.3770285e+00   1.7525933e+00   2.8053367e+00   2.6963680e+00   2.2934334e+00   1.8202060e+00   2.1229819e+00   2.3231793e+00   1.8122257e+00   8.5462626e-01   5.0991930e-01   6.2538346e-01   8.0358695e-01   3.7255734e-01   5.3022554e-01   8.2105460e-01   6.0900723e-01   6.2482915e-01   3.0844217e-01   7.9580667e-01   5.4292906e-01   5.0991930e-01   7.0437330e-01   9.7270522e-01   9.9532071e-01   3.0546431e-01   7.9878917e-01   7.2343175e-01   7.8768770e-01   4.2418962e-01   9.0876485e-01   5.2862779e-01   4.4651726e-01   8.5205778e-01   7.4164639e-01   3.2586371e-01   5.7867728e-01   5.3309112e-01   4.1449626e-01   4.5581864e-01   1.2131545e+00   3.8776762e-01   3.2352160e-01   2.5251796e-01   3.2816937e-01   1.3221405e+00   2.8507955e-01   1.8873850e+00   9.2859317e-01   1.8730683e+00   1.4111029e+00   1.6550480e+00   2.6320302e+00   1.0406064e+00   2.2657813e+00   1.6658308e+00   2.1339968e+00   1.0072663e+00   1.1444449e+00   1.4417207e+00   8.9971984e-01   1.1192426e+00   1.2342162e+00   1.3367840e+00   2.7726042e+00   2.9277580e+00   9.9368623e-01   1.6694974e+00   7.8197925e-01   2.7493700e+00   7.5976039e-01   1.5849874e+00   1.9802196e+00   6.4290921e-01   7.1799256e-01   1.4445746e+00   1.8216794e+00   2.1462316e+00   2.6367554e+00   1.4616539e+00   9.2264612e-01   1.4088394e+00   2.3235034e+00   1.5120955e+00   1.3224963e+00   6.2024833e-01   1.4078246e+00   1.5587730e+00   1.2801437e+00   9.2859317e-01   1.8096161e+00   1.6710566e+00   1.2378278e+00   8.9653332e-01   1.0864449e+00   1.3071453e+00   9.0827783e-01   8.9159388e-01   7.7763126e-01   1.0417249e+00   9.1802948e-01   5.0905001e-01   6.2605182e-01   4.4651726e-01   1.2379511e+00   6.2024833e-01   9.5571254e-01   8.1558458e-01   7.5976039e-01   9.2944046e-01   1.0661822e+00   1.2662457e+00   8.2342214e-01   5.8851328e-01   5.1691876e-01   5.3588338e-01   5.1691876e-01   1.1717125e+00   9.6838716e-01   1.2601890e+00   1.1258723e+00   4.8135521e-01   8.3649708e-01   5.5419992e-01   6.2407309e-01   9.0965328e-01   4.2538717e-01   1.0906388e+00   5.9426792e-01   8.1638392e-01   7.3145860e-01   7.3145860e-01   1.1880428e+00   6.3861009e-01   2.2927296e+00   1.2766755e+00   2.1156916e+00   1.6869465e+00   1.9835684e+00   2.8209672e+00   1.2028939e+00   2.4458200e+00   1.8683030e+00   2.5149752e+00   1.4746001e+00   1.4371043e+00   1.7501772e+00   1.2500343e+00   1.5991931e+00   1.7211928e+00   1.6271057e+00   3.0580852e+00   3.1168283e+00   1.0328064e+00   2.0203543e+00   1.2344562e+00   2.9146252e+00   1.0941064e+00   1.9515265e+00   2.2002582e+00   1.0531192e+00   1.1790011e+00   1.7600233e+00   1.9895190e+00   2.3106402e+00   2.8876509e+00   1.7983401e+00   1.1763719e+00   1.6118154e+00   2.5100676e+00   2.0039716e+00   1.6449456e+00   1.1276917e+00   1.7245185e+00   1.9495298e+00   1.6638124e+00   1.2766755e+00   2.1512175e+00   2.1034605e+00   1.6449189e+00   1.1924295e+00   1.4645804e+00   1.8408873e+00   1.3037063e+00   1.1269972e+00   6.2482915e-01   5.0991930e-01   6.6827038e-01   7.0479928e-01   8.8358844e-01   4.5581864e-01   7.0086313e-01   4.2667565e-01   2.0656129e-01   4.4535192e-01   5.2942799e-01   7.0086313e-01   6.3861009e-01   2.1269358e-01   1.2261087e+00   1.0119857e+00   1.1001291e+00   8.1638392e-01   4.2667565e-01   7.0479928e-01   5.1691876e-01   6.0611244e-01   6.2538346e-01   6.9006418e-01   8.3812833e-01   6.4290921e-01   1.2418578e-01   7.3145860e-01   1.6044563e+00   6.2660376e-01   5.7324170e-01   5.6700421e-01   4.0293660e-01   1.7981158e+00   6.4806901e-01   1.5090287e+00   5.9426792e-01   1.4258804e+00   9.2264612e-01   1.2221471e+00   2.1613095e+00   1.2165505e+00   1.7814077e+00   1.1712156e+00   1.7475837e+00   7.0233835e-01   7.0776547e-01   1.0386594e+00   7.0233835e-01   1.0228981e+00   9.8450810e-01   8.5105559e-01   2.3257048e+00   2.4547248e+00   7.1504098e-01   1.2838690e+00   6.9369532e-01   2.2753334e+00   4.3691963e-01   1.1508502e+00   1.5109753e+00   4.0438741e-01   4.1449626e-01   1.0168833e+00   1.3670543e+00   1.6898941e+00   2.2253038e+00   1.0655560e+00   4.1586001e-01   9.0827783e-01   1.9262937e+00   1.2075315e+00   8.3888121e-01   4.0438741e-01   1.0401425e+00   1.2250414e+00   1.0755693e+00   5.9426792e-01   1.3866792e+00   1.3487634e+00   1.0056742e+00   5.9426792e-01   7.2526325e-01   1.0425476e+00   5.0592043e-01   1.2125198e+00   9.0252542e-01   5.4292906e-01   1.0679144e+00   4.5470518e-01   1.2307737e+00   5.6700421e-01   1.3629833e+00   1.1271488e+00   9.3592296e-01   1.1329323e+00   1.4903933e+00   1.5826638e+00   9.2264612e-01   3.7598397e-01   5.1691876e-01   5.3022554e-01   3.4583729e-01   1.5102079e+00   9.0478973e-01   9.6664346e-01   1.3678655e+00   1.0012667e+00   5.0090417e-01   4.9766035e-01   8.1130291e-01   1.0331736e+00   4.5581864e-01   7.6955924e-01   6.0551856e-01   6.0181382e-01   6.0060595e-01   8.1242502e-01   7.2852070e-01   5.0180477e-01   2.4944334e+00   1.5259640e+00   2.4897635e+00   2.0286682e+00   2.2758462e+00   3.2467454e+00   1.0417249e+00   2.8819633e+00   2.2804674e+00   2.7472449e+00   1.6234861e+00   1.7644184e+00   2.0587064e+00   1.4533724e+00   1.6559784e+00   1.8347926e+00   1.9605308e+00   3.3855928e+00   3.5452222e+00   1.4540815e+00   2.2852232e+00   1.3536716e+00   3.3617477e+00   1.3717027e+00   2.2096171e+00   2.5931780e+00   1.2603076e+00   1.3371180e+00   2.0643410e+00   2.4233813e+00   2.7521037e+00   3.2246201e+00   2.0784533e+00   1.5405106e+00   2.0088441e+00   2.9099860e+00   2.1140685e+00   1.9448322e+00   1.2330392e+00   2.0122105e+00   2.1687358e+00   1.8353933e+00   1.5259640e+00   2.4321505e+00   2.2783778e+00   1.8251179e+00   1.4795374e+00   1.7068208e+00   1.9049236e+00   1.5165339e+00   1.1004794e+00   9.4753140e-01   9.4125538e-01   1.1763719e+00   8.5105559e-01   6.6432544e-01   7.2526325e-01   6.4290921e-01   3.2816937e-01   1.2418578e-01   4.4535192e-01   6.2024833e-01   7.0479928e-01   1.2172454e+00   1.3021788e+00   1.3289150e+00   9.6141901e-01   8.9366705e-01   1.3003320e+00   7.1840099e-01   3.0275928e-01   8.2654509e-01   1.1056650e+00   1.2497790e+00   1.2234738e+00   6.0611244e-01   9.6141901e-01   1.8661545e+00   1.1149070e+00   1.0038051e+00   1.0038051e+00   5.0991930e-01   1.8886923e+00   1.0132664e+00   1.7427900e+00   1.0573285e+00   1.5462225e+00   1.2230220e+00   1.4700179e+00   2.2554582e+00   1.8183902e+00   1.9191337e+00   1.4359851e+00   1.8399871e+00   8.1558458e-01   9.6664346e-01   1.1754055e+00   1.1548215e+00   1.2523175e+00   1.1229906e+00   1.1149070e+00   2.3868096e+00   2.5734684e+00   1.0665149e+00   1.4148192e+00   1.1763719e+00   2.3591336e+00   6.6317860e-01   1.3545005e+00   1.6178623e+00   6.3735887e-01   7.2526325e-01   1.2709820e+00   1.4169523e+00   1.7425222e+00   2.1449779e+00   1.3015611e+00   7.4777660e-01   1.2601890e+00   1.8513630e+00   1.3897316e+00   1.1185330e+00   7.6625946e-01   1.0919712e+00   1.3783420e+00   1.0120221e+00   1.0573285e+00   1.5860263e+00   1.5022608e+00   1.0597541e+00   8.3060013e-01   8.9095811e-01   1.2107055e+00   9.5498315e-01   5.9426792e-01   8.8835966e-01   7.2486328e-01   4.3456114e-01   6.3108414e-01   7.9580667e-01   5.4292906e-01   8.0619006e-01   1.0003942e+00   1.2057554e+00   1.1282371e+00   4.0147421e-01   1.0482443e+00   8.3812833e-01   9.3049742e-01   6.4290921e-01   6.6432544e-01   2.0000000e-01   4.9766035e-01   1.1016264e+00   8.7202528e-01   4.1312257e-01   6.2660376e-01   4.4651726e-01   5.0180477e-01   5.9426792e-01   1.3172979e+00   3.8776762e-01   3.7427929e-01   3.2816937e-01   6.1151102e-01   1.5338492e+00   4.2667565e-01   1.6536633e+00   6.6827038e-01   1.8195408e+00   1.1798960e+00   1.4588731e+00   2.5552364e+00   7.7039952e-01   2.1764356e+00   1.5179392e+00   2.0659196e+00   1.0072663e+00   1.0165138e+00   1.4077317e+00   7.0523271e-01   9.7441804e-01   1.1290808e+00   1.1879078e+00   2.7003420e+00   2.8251568e+00   8.7478495e-01   1.6113870e+00   5.7257017e-01   2.6756977e+00   7.5976039e-01   1.4611141e+00   1.9290721e+00   6.4290921e-01   5.8851328e-01   1.2509218e+00   1.8216794e+00   2.1226924e+00   2.6556584e+00   1.2741904e+00   8.1527569e-01   1.1396406e+00   2.3658814e+00   1.3219975e+00   1.1377990e+00   4.8036801e-01   1.4418088e+00   1.4695582e+00   1.4097125e+00   6.6827038e-01   1.6762567e+00   1.5624022e+00   1.2731262e+00   8.4812820e-01   1.0423677e+00   1.1235486e+00   6.3808075e-01   7.0328431e-01   2.8507955e-01   9.7377870e-01   3.7598397e-01   8.9971984e-01   6.2538346e-01   6.2988288e-01   8.4591037e-01   1.1042097e+00   1.1949615e+00   5.7867728e-01   6.0121055e-01   4.2418962e-01   4.8036801e-01   2.4837156e-01   1.0588560e+00   6.3735887e-01   8.4050231e-01   1.0216438e+00   6.0900723e-01   3.8776762e-01   3.8934542e-01   3.8934542e-01   6.0900723e-01   2.1269358e-01   1.0100718e+00   3.2586371e-01   3.2816937e-01   3.2816937e-01   4.6472023e-01   1.1765359e+00   3.0546431e-01   2.1603815e+00   1.1833480e+00   2.0696037e+00   1.5733646e+00   1.8844302e+00   2.7913211e+00   1.0279631e+00   2.4069427e+00   1.8096161e+00   2.4013270e+00   1.3194762e+00   1.3641156e+00   1.6852518e+00   1.1847335e+00   1.5344133e+00   1.5901181e+00   1.5133392e+00   2.9601125e+00   3.0929882e+00   9.7994716e-01   1.9359434e+00   1.1341579e+00   2.8969791e+00   1.0267435e+00   1.8174459e+00   2.1353579e+00   9.5498315e-01   1.0067464e+00   1.6752254e+00   1.9600024e+00   2.3015655e+00   2.8161147e+00   1.7177705e+00   1.0636401e+00   1.5095556e+00   2.5229584e+00   1.8388413e+00   1.5016471e+00   9.4558103e-01   1.6620056e+00   1.8661202e+00   1.6246433e+00   1.1833480e+00   2.0524784e+00   1.9917352e+00   1.5896248e+00   1.1449732e+00   1.3635198e+00   1.6539414e+00   1.1377990e+00   7.8695083e-01   1.0194752e+00   6.9369532e-01   4.4535192e-01   6.2538346e-01   7.1169738e-01   8.2684479e-01   7.5791688e-01   8.9971984e-01   7.0394675e-01   1.0777972e+00   8.9366705e-01   9.7548738e-01   7.3805807e-01   6.9369532e-01   9.9348625e-01   1.2013436e+00   9.4287188e-01   2.1845981e-01   9.1163729e-01   7.8197925e-01   7.4777660e-01   8.0096515e-01   6.3735887e-01   1.5043029e+00   7.0776547e-01   8.7021234e-01   7.8197925e-01   7.0784540e-01   1.6629594e+00   7.2852070e-01   1.7521201e+00   7.5705927e-01   1.5812904e+00   1.1798960e+00   1.4306494e+00   2.2994849e+00   1.3047221e+00   1.9342059e+00   1.3259654e+00   2.0165210e+00   1.0919404e+00   8.7720955e-01   1.2180145e+00   7.1881659e-01   1.0466623e+00   1.2389598e+00   1.1426203e+00   2.5872805e+00   2.5766735e+00   5.0817745e-01   1.4924169e+00   8.3060013e-01   2.3982377e+00   5.8914551e-01   1.4728952e+00   1.7209381e+00   6.3808075e-01   8.3649708e-01   1.1916257e+00   1.5183917e+00   1.7983401e+00   2.4620092e+00   1.2174316e+00   7.4549115e-01   1.1136343e+00   1.9965599e+00   1.5255331e+00   1.1891470e+00   8.2384013e-01   1.2304904e+00   1.3937115e+00   1.1842231e+00   7.5705927e-01   1.6132118e+00   1.5725854e+00   1.1127329e+00   5.8914551e-01   9.8054887e-01   1.4089719e+00   9.0521488e-01   1.1043332e+00   5.3665999e-01   1.1040512e+00   8.6137722e-01   8.5335130e-01   1.0692258e+00   1.3395518e+00   1.4121163e+00   7.2343175e-01   4.0438741e-01   1.4096146e-01   2.1845981e-01   2.5251796e-01   1.2340567e+00   7.2852070e-01   1.0216438e+00   1.2599182e+00   7.7360126e-01   5.1607523e-01   2.1269358e-01   5.0270183e-01   8.3345577e-01   2.1845981e-01   7.4893123e-01   3.4378533e-01   5.3022554e-01   4.5581864e-01   6.9167458e-01   9.4080461e-01   3.4583729e-01   2.3056888e+00   1.2961380e+00   2.2790932e+00   1.7645599e+00   2.0520955e+00   3.0186066e+00   8.9827435e-01   2.6386155e+00   2.0191749e+00   2.5992685e+00   1.4843324e+00   1.5325189e+00   1.8691652e+00   1.2554784e+00   1.5582387e+00   1.7070813e+00   1.7171798e+00   3.2008583e+00   3.3121677e+00   1.1313840e+00   2.1147926e+00   1.1879078e+00   3.1280700e+00   1.1681971e+00   2.0145868e+00   2.3728666e+00   1.0720678e+00   1.1437669e+00   1.8347926e+00   2.2027051e+00   2.5315934e+00   3.0688850e+00   1.8636112e+00   1.2768639e+00   1.7126039e+00   2.7381221e+00   1.9739212e+00   1.7039473e+00   1.0573285e+00   1.8507968e+00   2.0095044e+00   1.7591313e+00   1.2961380e+00   2.2339736e+00   2.1358764e+00   1.7106141e+00   1.2731262e+00   1.5241199e+00   1.7828037e+00   1.2869134e+00   8.7720955e-01   7.4777660e-01   6.5223271e-01   7.1881659e-01   7.6914805e-01   9.3999899e-01   8.0619006e-01   4.2418962e-01   1.4104707e+00   1.2144845e+00   1.3128167e+00   1.0056742e+00   5.3665999e-01   5.6075294e-01   3.4583729e-01   8.1130291e-01   9.7730901e-01   7.8197925e-01   9.9089002e-01   8.0353565e-01   4.3691963e-01   9.6095130e-01   1.7110336e+00   7.7033318e-01   7.5196795e-01   7.0776547e-01   6.5648056e-01   1.8915404e+00   7.9878917e-01   1.2730931e+00   5.3022554e-01   1.4349259e+00   8.3620494e-01   1.0733200e+00   2.1767273e+00   1.0960883e+00   1.8048569e+00   1.2035173e+00   1.6539414e+00   6.2482915e-01   7.0523271e-01   1.0184370e+00   7.1169738e-01   6.6432544e-01   7.0480730e-01   8.1527569e-01   2.3116343e+00   2.4505705e+00   1.0085601e+00   1.2063335e+00   4.5581864e-01   2.3022338e+00   5.6700421e-01   1.0655560e+00   1.5550492e+00   4.4417983e-01   2.5251796e-01   8.8358844e-01   1.4559276e+00   1.7532140e+00   2.2755980e+00   8.9653332e-01   5.5160819e-01   9.1163729e-01   1.9862884e+00   9.1163729e-01   7.6752131e-01   2.0656129e-01   1.0632598e+00   1.0516761e+00   1.0391247e+00   5.3022554e-01   1.2756158e+00   1.1406052e+00   8.7720955e-01   7.3851529e-01   6.5633874e-01   7.0776547e-01   3.2352160e-01   9.1273187e-01   7.0043186e-01   3.7427929e-01   5.7324170e-01   9.3615100e-01   1.0733200e+00   5.0991930e-01   5.9426792e-01   6.5633874e-01   6.7975091e-01   3.0811765e-01   1.1056650e+00   7.7360126e-01   7.0429250e-01   8.2552685e-01   5.7257017e-01   5.0905001e-01   6.1968386e-01   6.5223271e-01   6.0611244e-01   3.2586371e-01   1.2028939e+00   5.0905001e-01   4.2667565e-01   4.1449626e-01   3.0546431e-01   1.2470767e+00   4.0147421e-01   2.1213832e+00   1.1521791e+00   2.0070710e+00   1.6126950e+00   1.8637576e+00   2.7490677e+00   1.2370832e+00   2.3910690e+00   1.8274132e+00   2.3004229e+00   1.1979861e+00   1.3368881e+00   1.5972416e+00   1.1093572e+00   1.3693737e+00   1.4644753e+00   1.5211725e+00   2.9013543e+00   3.0559175e+00   1.0590298e+00   1.8374244e+00   1.0423677e+00   2.8588399e+00   9.4309624e-01   1.7735968e+00   2.0979142e+00   8.5205778e-01   9.4287188e-01   1.6546836e+00   1.9109434e+00   2.2424413e+00   2.7118839e+00   1.6774684e+00   1.1029298e+00   1.6007141e+00   2.3898698e+00   1.7557336e+00   1.5191033e+00   8.5462626e-01   1.5344007e+00   1.7571295e+00   1.3898545e+00   1.1521791e+00   1.9987470e+00   1.8815752e+00   1.4085850e+00   1.0646687e+00   1.2740417e+00   1.5577803e+00   1.1296247e+00   4.0176783e-01   6.5223271e-01   6.3977563e-01   5.3022554e-01   5.7324170e-01   5.2574978e-01   1.4438552e+00   1.2221471e+00   1.3131724e+00   1.0406064e+00   3.4583729e-01   9.5943875e-01   9.2859317e-01   6.5172743e-01   5.1607523e-01   9.7548738e-01   1.0611732e+00   8.6137722e-01   5.3665999e-01   9.4854455e-01   1.8311457e+00   8.7420176e-01   8.7170815e-01   8.4050231e-01   6.5223271e-01   2.0303919e+00   8.9917007e-01   1.3866318e+00   5.7867728e-01   1.2002762e+00   7.4740267e-01   1.0440187e+00   1.9213397e+00   1.4087466e+00   1.5433565e+00   9.2836103e-01   1.6392533e+00   7.7360126e-01   5.0592043e-01   8.5585239e-01   6.8961791e-01   9.5035453e-01   9.5498315e-01   7.0776547e-01   2.1812146e+00   2.2081369e+00   3.7427929e-01   1.1335961e+00   7.7885297e-01   2.0291151e+00   3.2352160e-01   1.0661822e+00   1.3165513e+00   3.7598397e-01   5.3588338e-01   8.2305664e-01   1.1437730e+00   1.4415965e+00   2.0902718e+00   8.7848692e-01   3.2352160e-01   7.0479928e-01   1.6865203e+00   1.1904611e+00   7.5791688e-01   5.5492130e-01   8.9207714e-01   1.0805899e+00   9.6271042e-01   5.7867728e-01   1.2256881e+00   1.2481462e+00   8.8358844e-01   4.0147421e-01   6.4405773e-01   1.0887986e+00   5.9426792e-01   4.4651726e-01   5.4292906e-01   7.0437330e-01   7.0776547e-01   3.2816937e-01   1.2134101e+00   9.8820253e-01   1.0733200e+00   8.1099042e-01   4.9857388e-01   7.2172678e-01   6.5223271e-01   6.3808075e-01   5.3665999e-01   6.9369532e-01   8.2305664e-01   6.2482915e-01   2.5251796e-01   7.1799256e-01   1.5895397e+00   6.2205176e-01   5.7257017e-01   5.6769031e-01   4.0438741e-01   1.7935777e+00   6.4755655e-01   1.6267976e+00   7.4777660e-01   1.4807336e+00   9.7270522e-01   1.3173487e+00   2.1839601e+00   1.2277129e+00   1.7938033e+00   1.1948932e+00   1.8448199e+00   8.7383925e-01   8.2305664e-01   1.1418127e+00   8.4591037e-01   1.2153720e+00   1.1640914e+00   9.1163729e-01   2.3638833e+00   2.4815883e+00   6.3861009e-01   1.3946921e+00   8.5434758e-01   2.2902807e+00   6.1151102e-01   1.2452704e+00   1.5325394e+00   6.0121055e-01   6.1092863e-01   1.1228379e+00   1.3752705e+00   1.7103060e+00   2.2560685e+00   1.1879078e+00   4.5470518e-01   9.0454394e-01   1.9729066e+00   1.3650300e+00   9.0521488e-01   6.0670504e-01   1.1454006e+00   1.3674559e+00   1.2262704e+00   7.4777660e-01   1.4820085e+00   1.4947429e+00   1.1729895e+00   7.3145860e-01   8.7720955e-01   1.2163831e+00   6.5633874e-01   2.1845981e-01   5.6769031e-01   7.4777660e-01   4.2538717e-01   9.5099818e-01   9.7994716e-01   1.0119857e+00   6.5223271e-01   8.3888121e-01   1.0038051e+00   5.9426792e-01   4.6472023e-01   6.0121055e-01   8.0326782e-01   9.2836103e-01   9.0876485e-01   3.7598397e-01   6.3861009e-01   1.5602029e+00   8.0326782e-01   7.0129382e-01   7.0043186e-01   2.0000000e-01   1.6208239e+00   7.0437330e-01   1.8619092e+00   9.6271042e-01   1.6849072e+00   1.3188999e+00   1.5860263e+00   2.4084158e+00   1.5142414e+00   2.0543079e+00   1.5230852e+00   1.9980352e+00   9.4375082e-01   1.0588560e+00   1.3035649e+00   1.0035600e+00   1.2499342e+00   1.2459608e+00   1.2225634e+00   2.5586145e+00   2.7210925e+00   8.9366705e-01   1.5500052e+00   1.0014633e+00   2.5139485e+00   6.9369532e-01   1.4790710e+00   1.7580510e+00   6.2660376e-01   7.0429250e-01   1.3804167e+00   1.5625881e+00   1.8966943e+00   2.3518757e+00   1.4139741e+00   8.0358695e-01   1.3075101e+00   2.0455018e+00   1.5153654e+00   1.2234738e+00   6.6539428e-01   1.2342162e+00   1.5049644e+00   1.1591754e+00   9.6271042e-01   1.7107332e+00   1.6328100e+00   1.1880428e+00   8.3812833e-01   1.0106392e+00   1.3267389e+00   8.9767734e-01   4.2538717e-01   6.2024833e-01   6.0181382e-01   1.1431021e+00   1.1948932e+00   1.2269747e+00   8.6361309e-01   8.2552685e-01   1.2002640e+00   6.5223271e-01   3.0811765e-01   7.1462831e-01   1.0067784e+00   1.1396406e+00   1.1147518e+00   5.0817745e-01   8.5335130e-01   1.7711504e+00   1.0085601e+00   9.0454394e-01   9.0277242e-01   4.0438741e-01   1.8140813e+00   9.1075311e-01   1.7412567e+00   9.8054887e-01   1.5527694e+00   1.2155004e+00   1.4692412e+00   2.2702600e+00   1.7126039e+00   1.9279661e+00   1.4238090e+00   1.8539569e+00   8.1558458e-01   9.5035453e-01   1.1763980e+00   1.0621081e+00   1.2047214e+00   1.1205013e+00   1.1134787e+00   2.4108292e+00   2.5851693e+00   9.6095130e-01   1.4178113e+00   1.0879524e+00   2.3751496e+00   6.0900723e-01   1.3570688e+00   1.6277043e+00   5.7015910e-01   6.6491075e-01   1.2652657e+00   1.4292566e+00   1.7566567e+00   2.1846001e+00   1.2961380e+00   7.1840099e-01   1.2329148e+00   1.8795815e+00   1.3897316e+00   1.1149070e+00   6.8757066e-01   1.0960883e+00   1.3785366e+00   1.0168833e+00   9.8054887e-01   1.5871961e+00   1.5043071e+00   1.0597541e+00   7.7033318e-01   8.8861541e-01   1.2058675e+00   8.9134001e-01   3.4583729e-01   8.1130291e-01   1.5090287e+00   1.4644753e+00   1.5150043e+00   1.1847335e+00   8.1385214e-01   1.4041085e+00   8.9917007e-01   3.0811765e-01   6.6539428e-01   1.2643026e+00   1.3836712e+00   1.3112758e+00   7.0784540e-01   1.1353806e+00   2.0777059e+00   1.2396136e+00   1.1498269e+00   1.1474460e+00   6.9006418e-01   2.1775976e+00   1.1752673e+00   1.4613032e+00   1.0391247e+00   1.1810170e+00   8.7504951e-01   1.1390131e+00   1.8670836e+00   1.9048338e+00   1.5205305e+00   1.0228981e+00   1.5676403e+00   6.7975091e-01   6.6539428e-01   8.7175869e-01   1.1533602e+00   1.2459608e+00   9.7730901e-01   7.5082357e-01   2.0536508e+00   2.1838261e+00   8.9095811e-01   1.1306949e+00   1.2394907e+00   1.9665910e+00   5.6769031e-01   1.0425476e+00   1.2324706e+00   6.4704320e-01   7.3851529e-01   9.5476489e-01   1.0198386e+00   1.3510699e+00   1.8411319e+00   1.0100718e+00   5.2942799e-01   9.3801395e-01   1.5112621e+00   1.2002762e+00   7.7763126e-01   8.2899253e-01   8.2305664e-01   1.1377990e+00   9.1663180e-01   1.0391247e+00   1.2653669e+00   1.2757312e+00   9.2288144e-01   6.4405773e-01   6.6827038e-01   1.0851476e+00   9.3048953e-01   7.7074935e-01   1.6569692e+00   1.5391185e+00   1.6134578e+00   1.2794849e+00   7.1504098e-01   1.3197776e+00   7.9613242e-01   3.2586371e-01   8.6165877e-01   1.2653669e+00   1.4028652e+00   1.2701139e+00   6.6384020e-01   1.2172454e+00   2.1489775e+00   1.2262704e+00   1.1578646e+00   1.1452867e+00   7.9613242e-01   2.2808589e+00   1.1959482e+00   1.1500393e+00   9.1075311e-01   9.3999899e-01   6.4806901e-01   8.5434758e-01   1.6806723e+00   1.8184542e+00   1.3334814e+00   8.5205778e-01   1.2702636e+00   3.4583729e-01   4.3456114e-01   5.6700421e-01   1.0389435e+00   1.0122141e+00   6.4290921e-01   5.0905001e-01   1.8419636e+00   1.9902374e+00   9.3801395e-01   8.1810461e-01   1.1069580e+00   1.7940242e+00   4.4651726e-01   7.4740267e-01   1.0346741e+00   5.1691876e-01   6.0121055e-01   6.6827038e-01   8.5205778e-01   1.1776640e+00   1.6778021e+00   7.0776547e-01   4.2667565e-01   7.8695083e-01   1.3400806e+00   8.6165877e-01   5.3022554e-01   7.0437330e-01   5.0592043e-01   8.1273630e-01   6.0670504e-01   9.1075311e-01   9.7270522e-01   9.4352681e-01   6.0551856e-01   5.7257017e-01   3.4378533e-01   7.5705927e-01   8.0064372e-01   1.0450018e+00   8.4812820e-01   9.3847194e-01   6.2988288e-01   6.0611244e-01   6.0060595e-01   5.0090417e-01   7.0784540e-01   6.2538346e-01   5.0592043e-01   6.6932542e-01   5.5492130e-01   1.5422108e-01   5.6075294e-01   1.4235605e+00   4.6472023e-01   4.2418962e-01   3.8776762e-01   2.8192292e-01   1.5978583e+00   4.5581864e-01   1.6256459e+00   6.5633874e-01   1.5986180e+00   1.1106412e+00   1.3708966e+00   2.3519748e+00   1.1147518e+00   1.9798165e+00   1.3654173e+00   1.8856245e+00   7.7033318e-01   8.5335130e-01   1.1771643e+00   6.8076724e-01   9.7270522e-01   1.0165138e+00   1.0391247e+00   2.5081056e+00   2.6437138e+00   7.6716823e-01   1.4120836e+00   6.1947990e-01   2.4692682e+00   4.8927739e-01   1.3077572e+00   1.7030709e+00   3.8934542e-01   4.4651726e-01   1.1594648e+00   1.5551984e+00   1.8760773e+00   2.3977345e+00   1.1868139e+00   6.2024833e-01   1.1056650e+00   2.0821572e+00   1.2794849e+00   1.0244319e+00   3.7427929e-01   1.1646003e+00   1.3139135e+00   1.1119327e+00   6.5633874e-01   1.5344007e+00   1.4333755e+00   1.0386594e+00   6.3735887e-01   8.2372435e-01   1.0901359e+00   6.2081167e-01   3.4583729e-01   2.8192292e-01   4.1586001e-01   1.6237100e+00   1.0540105e+00   1.1816401e+00   1.4110536e+00   9.8450810e-01   6.6432544e-01   5.3665999e-01   9.0454394e-01   1.1389644e+00   5.0905001e-01   7.1799256e-01   7.1504098e-01   7.3813096e-01   7.2783368e-01   8.7021234e-01   6.9006418e-01   6.2482915e-01   2.6619364e+00   1.6754669e+00   2.5821869e+00   2.1421834e+00   2.4107926e+00   3.3209792e+00   1.2036484e+00   2.9531363e+00   2.3720162e+00   2.8824828e+00   1.7671769e+00   1.8842354e+00   2.1714345e+00   1.6176764e+00   1.8723516e+00   2.0134817e+00   2.0676751e+00   3.4808493e+00   3.6264553e+00   1.5230852e+00   2.4135751e+00   1.5351194e+00   3.4281547e+00   1.4948868e+00   2.3378347e+00   2.6680945e+00   1.3977032e+00   1.4832928e+00   2.1976523e+00   2.4795532e+00   2.8157449e+00   3.2956170e+00   2.2214438e+00   1.6336229e+00   2.1064881e+00   2.9775369e+00   2.2994849e+00   2.0603946e+00   1.3916739e+00   2.1189748e+00   2.3204945e+00   1.9672068e+00   1.6754669e+00   2.5635110e+00   2.4436082e+00   1.9753271e+00   1.6077195e+00   1.8374244e+00   2.0981613e+00   1.6585806e+00   1.2418578e-01   3.7598397e-01   1.3405045e+00   8.3812833e-01   1.1437669e+00   1.3915412e+00   8.9095811e-01   6.2538346e-01   2.5251796e-01   6.0611244e-01   9.6791960e-01   3.4583729e-01   6.2205176e-01   4.5581864e-01   6.5223271e-01   5.7867728e-01   8.2619017e-01   8.2654509e-01   4.6472023e-01   2.4061696e+00   1.3868130e+00   2.3985329e+00   1.8751958e+00   2.1591630e+00   3.1397173e+00   8.9852394e-01   2.7599154e+00   2.1335771e+00   2.7193241e+00   1.6021202e+00   1.6415861e+00   1.9851797e+00   1.3336069e+00   1.6224878e+00   1.8082080e+00   1.8350829e+00   3.3292261e+00   3.4297053e+00   1.2340567e+00   2.2298076e+00   1.2654843e+00   3.2489933e+00   1.2770118e+00   2.1337366e+00   2.4988032e+00   1.1786349e+00   1.2545301e+00   1.9393053e+00   2.3288114e+00   2.6536325e+00   3.2010862e+00   1.9648876e+00   1.3964978e+00   1.8188234e+00   2.8588023e+00   2.0766308e+00   1.8216743e+00   1.1634384e+00   1.9698860e+00   2.1147926e+00   1.8660999e+00   1.3868130e+00   2.3472906e+00   2.2417376e+00   1.8131734e+00   1.3752391e+00   1.6372749e+00   1.8856245e+00   1.3922071e+00   4.0176783e-01   1.4467170e+00   9.3049742e-01   1.2002762e+00   1.4411886e+00   9.4375082e-01   6.6432544e-01   3.7427929e-01   7.0784540e-01   1.0466623e+00   4.0176783e-01   5.6700421e-01   5.5492130e-01   6.9728513e-01   6.4405773e-01   8.7170815e-01   7.3535471e-01   5.3309112e-01   2.5193321e+00   1.5039793e+00   2.4895662e+00   1.9791576e+00   2.2673478e+00   3.2268480e+00   1.0014633e+00   2.8466240e+00   2.2303173e+00   2.8133325e+00   1.6962564e+00   1.7458338e+00   2.0805039e+00   1.4552205e+00   1.7454671e+00   1.9165907e+00   1.9332869e+00   3.4131779e+00   3.5208177e+00   1.3379696e+00   2.3275025e+00   1.3866044e+00   3.3340853e+00   1.3784233e+00   2.2317775e+00   2.5822073e+00   1.2828332e+00   1.3595018e+00   2.0485193e+00   2.4057315e+00   2.7355566e+00   3.2722484e+00   2.0762069e+00   1.4905436e+00   1.9191337e+00   2.9375895e+00   2.1864773e+00   1.9213461e+00   1.2702636e+00   2.0582667e+00   2.2206505e+00   1.9521784e+00   1.5039793e+00   2.4497583e+00   2.3480580e+00   1.9124077e+00   1.4817438e+00   1.7372199e+00   1.9937367e+00   1.5016471e+00   1.2122249e+00   6.7975091e-01   8.4050231e-01   1.0796583e+00   6.6539428e-01   3.4583729e-01   3.2816937e-01   5.2942799e-01   7.3145860e-01   1.2418578e-01   9.1163729e-01   3.2586371e-01   3.7427929e-01   3.2816937e-01   5.0592043e-01   1.0122141e+00   2.1845981e-01   2.2481791e+00   1.2631020e+00   2.1874158e+00   1.7295419e+00   1.9965608e+00   2.9333950e+00   1.0072663e+00   2.5632712e+00   1.9670002e+00   2.4858980e+00   1.3655398e+00   1.4724669e+00   1.7716601e+00   1.2125198e+00   1.4903113e+00   1.6105641e+00   1.6572339e+00   3.0940465e+00   3.2344170e+00   1.1332978e+00   2.0129643e+00   1.1341579e+00   3.0445772e+00   1.0864449e+00   1.9287276e+00   2.2802541e+00   9.8820253e-01   1.0688498e+00   1.7838044e+00   2.1039999e+00   2.4365934e+00   2.9349538e+00   1.8084630e+00   1.2266837e+00   1.7026843e+00   2.6138892e+00   1.8911946e+00   1.6476803e+00   9.7949166e-01   1.7305789e+00   1.9168750e+00   1.6065247e+00   1.2631020e+00   2.1541966e+00   2.0395401e+00   1.5896248e+00   1.1996741e+00   1.4306494e+00   1.6928004e+00   1.2436109e+00   7.5791688e-01   8.1242502e-01   7.6625946e-01   7.5976039e-01   1.0289803e+00   1.1332978e+00   7.9613242e-01   5.3665999e-01   1.1149070e+00   1.9007091e+00   9.2836103e-01   9.3610001e-01   9.1858284e-01   8.1638392e-01   2.1493214e+00   1.0132664e+00   1.1680362e+00   3.2352160e-01   1.2372418e+00   5.4292906e-01   8.7170815e-01   1.9366254e+00   1.1486378e+00   1.5564198e+00   8.7420176e-01   1.5682049e+00   6.6491075e-01   4.5470518e-01   8.8358844e-01   4.5581864e-01   8.0326782e-01   7.9878917e-01   5.9426792e-01   2.1460410e+00   2.1931311e+00   5.0180477e-01   1.0950112e+00   5.0592043e-01   2.0545952e+00   3.4378533e-01   9.3923979e-01   1.3512935e+00   3.4583729e-01   3.4583729e-01   6.6539428e-01   1.2670555e+00   1.5369942e+00   2.1465859e+00   7.2526325e-01   3.0546431e-01   5.0991930e-01   1.8206746e+00   9.8054887e-01   5.7015910e-01   3.8776762e-01   9.6664346e-01   9.9089002e-01   1.0262547e+00   3.2352160e-01   1.1127329e+00   1.1166017e+00   8.7848692e-01   3.8934542e-01   5.8914551e-01   8.8062848e-01   3.2586371e-01   6.4755655e-01   1.3011270e+00   1.0122141e+00   4.2538717e-01   6.2660376e-01   4.4651726e-01   7.0086313e-01   6.3735887e-01   1.2926374e+00   4.0176783e-01   4.2288438e-01   3.8934542e-01   8.0619006e-01   1.5230852e+00   4.6472023e-01   1.6933635e+00   7.0233835e-01   1.9584922e+00   1.2594846e+00   1.5411691e+00   2.6785768e+00   6.2605182e-01   2.3003744e+00   1.6284481e+00   2.1882128e+00   1.1729895e+00   1.1500393e+00   1.5577059e+00   7.1881659e-01   9.8985697e-01   1.2389598e+00   1.3108618e+00   2.8217641e+00   2.9357847e+00   9.3026633e-01   1.7457596e+00   5.7867728e-01   2.7994225e+00   9.3610001e-01   1.5801828e+00   2.0692197e+00   8.2384013e-01   7.4740267e-01   1.3410314e+00   1.9783833e+00   2.2683159e+00   2.8062463e+00   1.3610783e+00   9.7249562e-01   1.1868139e+00   2.5244698e+00   1.3852951e+00   1.2460824e+00   6.3808075e-01   1.6077195e+00   1.5873000e+00   1.5826476e+00   7.0233835e-01   1.7831878e+00   1.6667819e+00   1.4276261e+00   9.9519977e-01   1.1984588e+00   1.1903343e+00   7.0386584e-01   7.1840099e-01   1.1107977e+00   5.8624446e-01   9.8495853e-01   8.7504951e-01   4.1586001e-01   8.7720955e-01   1.5824669e+00   7.5976039e-01   5.5160819e-01   5.7741073e-01   5.4292906e-01   1.6736318e+00   6.7975091e-01   1.5883552e+00   8.2552685e-01   1.5948732e+00   1.1332978e+00   1.3595997e+00   2.3503762e+00   1.2554784e+00   1.9862884e+00   1.4596621e+00   1.8378727e+00   7.2852070e-01   9.6204649e-01   1.1697902e+00   9.6664346e-01   9.6271042e-01   9.5676647e-01   1.0496979e+00   2.4751922e+00   2.6546397e+00   1.2224367e+00   1.3843268e+00   7.2343175e-01   2.4751922e+00   7.5082357e-01   1.2832075e+00   1.7000773e+00   6.2988288e-01   5.0592043e-01   1.1833351e+00   1.5613251e+00   1.8886923e+00   2.3645560e+00   1.2016233e+00   7.5791688e-01   1.2125198e+00   2.0748074e+00   1.2155370e+00   1.0244319e+00   4.5470518e-01   1.1485394e+00   1.2770118e+00   1.0720678e+00   8.2552685e-01   1.5113992e+00   1.3835368e+00   1.0035600e+00   9.5571254e-01   8.2234151e-01   1.0120221e+00   6.5223271e-01   8.3888121e-01   1.1486378e+00   1.2994764e+00   1.2307737e+00   6.0181382e-01   1.0483827e+00   1.9867752e+00   1.1408504e+00   1.0391247e+00   1.0361698e+00   5.7867728e-01   2.0669733e+00   1.0646687e+00   1.4623898e+00   9.5866719e-01   1.2497790e+00   9.3048953e-01   1.1765359e+00   1.9666356e+00   1.8175297e+00   1.6242657e+00   1.1520347e+00   1.5603665e+00   5.7324170e-01   7.0233835e-01   8.8887100e-01   1.0919404e+00   1.1355826e+00   8.9712482e-01   8.1385214e-01   2.1052360e+00   2.2845234e+00   1.0166932e+00   1.1341579e+00   1.1332978e+00   2.0738150e+00   5.3309112e-01   1.0588560e+00   1.3224963e+00   5.5492130e-01   6.2538346e-01   9.8450810e-01   1.1271488e+00   1.4561933e+00   1.8911946e+00   1.0230441e+00   5.2574978e-01   1.0056742e+00   1.5916843e+00   1.1355826e+00   8.2105460e-01   7.1504098e-01   8.1527569e-01   1.1176720e+00   8.2899253e-01   9.5866719e-01   1.2943100e+00   1.2429818e+00   8.5205778e-01   7.0233835e-01   6.2660376e-01   9.8054887e-01   8.3649708e-01   8.7822463e-01   8.2899253e-01   8.1099042e-01   7.0826681e-01   5.8914551e-01   1.5042268e+00   7.3813096e-01   8.1558458e-01   7.4855857e-01   6.0121055e-01   1.6260946e+00   7.0386584e-01   1.8605327e+00   8.8503502e-01   1.6493191e+00   1.2601890e+00   1.5390703e+00   2.3592515e+00   1.4088394e+00   1.9940473e+00   1.4245508e+00   2.0716002e+00   1.1004436e+00   9.8820253e-01   1.2927814e+00   9.0056222e-01   1.2208301e+00   1.3194807e+00   1.1996741e+00   2.6153308e+00   2.6539963e+00   6.2538346e-01   1.5687169e+00   9.5271386e-01   2.4562038e+00   6.6491075e-01   1.5249255e+00   1.7538274e+00   6.6539428e-01   8.2619017e-01   1.3131724e+00   1.5409345e+00   1.8470010e+00   2.4533073e+00   1.3504603e+00   7.7039952e-01   1.2057554e+00   2.0352149e+00   1.6006330e+00   1.2331989e+00   8.0660588e-01   1.2747177e+00   1.5040391e+00   1.2426449e+00   8.8503502e-01   1.7019078e+00   1.6700310e+00   1.2144845e+00   7.4855857e-01   1.0401425e+00   1.4600567e+00   9.3296062e-01   5.0180477e-01   4.4651726e-01   6.2149089e-01   4.1586001e-01   1.0078327e+00   3.0275928e-01   1.4096146e-01   1.4096146e-01   6.0611244e-01   1.1536782e+00   2.0656129e-01   2.0491051e+00   1.0646687e+00   2.0979729e+00   1.5528443e+00   1.8279176e+00   2.8469870e+00   8.2234151e-01   2.4692682e+00   1.8399871e+00   2.3632803e+00   1.2493717e+00   1.3312249e+00   1.6756749e+00   1.0425476e+00   1.3092012e+00   1.4505265e+00   1.5123788e+00   2.9884772e+00   3.1361386e+00   1.0755693e+00   1.9017004e+00   9.3801395e-01   2.9635613e+00   9.8054887e-01   1.7833384e+00   2.1970231e+00   8.6513410e-01   8.9742724e-01   1.6159903e+00   2.0524973e+00   2.3760856e+00   2.8811560e+00   1.6399646e+00   1.0934620e+00   1.5205305e+00   2.5867433e+00   1.6928004e+00   1.4836711e+00   7.9580667e-01   1.6659943e+00   1.7839298e+00   1.5792930e+00   1.0646687e+00   2.0113485e+00   1.8901379e+00   1.5067717e+00   1.0950112e+00   1.3129189e+00   1.4875372e+00   1.0389435e+00   4.0293660e-01   8.0499049e-01   3.0546431e-01   7.8197925e-01   2.5251796e-01   5.1691876e-01   4.2538717e-01   7.4740267e-01   1.0181000e+00   3.2586371e-01   2.1717162e+00   1.1533602e+00   2.2234347e+00   1.6690840e+00   1.9433381e+00   2.9713636e+00   7.2486328e-01   2.5929835e+00   1.9489982e+00   2.5241649e+00   1.4089364e+00   1.4425304e+00   1.8012344e+00   1.0919712e+00   1.3726860e+00   1.5830057e+00   1.6408468e+00   3.1546522e+00   3.2544456e+00   1.0406064e+00   2.0352149e+00   1.0168833e+00   3.0859269e+00   1.0901359e+00   1.9325796e+00   2.3348454e+00   9.8054887e-01   1.0379132e+00   1.7250039e+00   2.1825260e+00   2.4995667e+00   3.0529994e+00   1.7458338e+00   1.2167151e+00   1.6214915e+00   2.7108297e+00   1.8418195e+00   1.6195190e+00   9.3847194e-01   1.7995863e+00   1.9034198e+00   1.7022897e+00   1.1533602e+00   2.1413027e+00   2.0233319e+00   1.6211869e+00   1.1770266e+00   1.4411886e+00   1.6502968e+00   1.1644030e+00   6.5633874e-01   4.4417983e-01   1.1332978e+00   2.1845981e-01   4.2538717e-01   3.4583729e-01   7.1504098e-01   1.4080793e+00   3.4583729e-01   1.8866180e+00   8.7848692e-01   1.9819543e+00   1.3312249e+00   1.6523803e+00   2.6988671e+00   6.9006418e-01   2.3100474e+00   1.6455737e+00   2.2934334e+00   1.2426449e+00   1.1905954e+00   1.5932297e+00   8.9095811e-01   1.2681309e+00   1.4065584e+00   1.3487634e+00   2.8835141e+00   2.9705897e+00   7.3805807e-01   1.8205354e+00   8.5462626e-01   2.8117234e+00   9.3049742e-01   1.6679957e+00   2.0758969e+00   8.4050231e-01   8.3060013e-01   1.4419145e+00   1.9521697e+00   2.2599493e+00   2.8310619e+00   1.4807336e+00   9.4558103e-01   1.2404967e+00   2.5235709e+00   1.6070713e+00   1.3102444e+00   7.5705927e-01   1.6281130e+00   1.7021627e+00   1.6240596e+00   8.7848692e-01   1.8790831e+00   1.8155245e+00   1.5040391e+00   1.0014633e+00   1.2481462e+00   1.4334902e+00   8.6165877e-01   6.6827038e-01   1.5475692e+00   5.8914551e-01   5.0503591e-01   4.9857388e-01   3.0811765e-01   1.7160413e+00   5.7324170e-01   1.5795964e+00   6.5648056e-01   1.4967461e+00   1.0182895e+00   1.3034549e+00   2.2380042e+00   1.2266837e+00   1.8619092e+00   1.2701139e+00   1.8007564e+00   7.2852070e-01   7.9016429e-01   1.0989735e+00   7.5705927e-01   1.0406064e+00   1.0184370e+00   9.3999899e-01   2.3886514e+00   2.5357185e+00   8.2684479e-01   1.3424112e+00   7.0776547e-01   2.3522207e+00   4.8927739e-01   1.2205493e+00   1.5832517e+00   4.2667565e-01   4.4417983e-01   1.0974061e+00   1.4319225e+00   1.7587110e+00   2.2711652e+00   1.1390131e+00   5.1691876e-01   1.0163549e+00   1.9782498e+00   1.2532075e+00   9.2859317e-01   4.1449626e-01   1.0852663e+00   1.2786117e+00   1.0887986e+00   6.5648056e-01   1.4596621e+00   1.3991741e+00   1.0313560e+00   6.6932542e-01   7.7553525e-01   1.0741917e+00   5.7257017e-01   9.4558103e-01   2.5651975e-01   4.1449626e-01   3.2816937e-01   4.8135521e-01   1.0908017e+00   2.1845981e-01   2.1701312e+00   1.1752673e+00   2.1084262e+00   1.6352583e+00   1.9101184e+00   2.8518881e+00   9.7825559e-01   2.4781934e+00   1.8745369e+00   2.4235816e+00   1.3078976e+00   1.3842113e+00   1.6965018e+00   1.1329323e+00   1.4319225e+00   1.5496439e+00   1.5691346e+00   3.0266197e+00   3.1503439e+00   1.0244319e+00   1.9425540e+00   1.0627606e+00   2.9623467e+00   1.0056742e+00   1.8539828e+00   2.2026387e+00   9.1163729e-01   9.9475949e-01   1.6952454e+00   2.0275673e+00   2.3581240e+00   2.8793190e+00   1.7227544e+00   1.1332978e+00   1.6029963e+00   2.5482247e+00   1.8278913e+00   1.5611067e+00   9.1163729e-01   1.6653066e+00   1.8462692e+00   1.5634147e+00   1.1752673e+00   2.0757295e+00   1.9739212e+00   1.5320003e+00   1.1179743e+00   1.3567326e+00   1.6362950e+00   1.1594648e+00   9.9475949e-01   1.1004436e+00   1.0720678e+00   1.4108494e+00   3.2816937e-01   9.8054887e-01   2.9240179e+00   1.9013501e+00   3.0016960e+00   2.4403742e+00   2.7185134e+00   3.7481490e+00   1.2643026e+00   3.3676733e+00   2.7249658e+00   3.2951180e+00   2.1701459e+00   2.2231652e+00   2.5778372e+00   1.8193838e+00   2.0594742e+00   2.3383666e+00   2.4210417e+00   3.9277558e+00   4.0319248e+00   1.8011138e+00   2.8105150e+00   1.7324239e+00   3.8595400e+00   1.8657887e+00   2.7098209e+00   3.1083486e+00   1.7551534e+00   1.8090259e+00   2.5002193e+00   2.9463843e+00   3.2688068e+00   3.8087204e+00   2.5182043e+00   1.9933741e+00   2.3692479e+00   3.4705738e+00   2.5885717e+00   2.3959721e+00   1.7005893e+00   2.5655126e+00   2.6734142e+00   2.4311441e+00   1.9013501e+00   2.9205331e+00   2.7876433e+00   2.3735872e+00   1.9516947e+00   2.2170194e+00   2.3879674e+00   1.9213461e+00   3.0546431e-01   2.0656129e-01   6.0611244e-01   1.2246352e+00   1.4096146e-01   1.9777274e+00   9.7249562e-01   2.0297383e+00   1.4616539e+00   1.7458338e+00   2.7737198e+00   7.5082357e-01   2.3931826e+00   1.7478866e+00   2.3213742e+00   1.2131545e+00   1.2498134e+00   1.6130724e+00   9.3824087e-01   1.2565757e+00   1.4029855e+00   1.4325768e+00   2.9414941e+00   3.0577671e+00   8.7720955e-01   1.8438146e+00   8.6956871e-01   2.8895427e+00   9.1310225e-01   1.7228354e+00   2.1321061e+00   8.0499049e-01   8.3345577e-01   1.5318874e+00   1.9898963e+00   2.3090270e+00   2.8491218e+00   1.5587730e+00   1.0122141e+00   1.4162017e+00   2.5326059e+00   1.6463627e+00   1.4047678e+00   7.3805807e-01   1.6165635e+00   1.7228488e+00   1.5520745e+00   9.7249562e-01   1.9420274e+00   1.8372522e+00   1.4628493e+00   1.0030700e+00   1.2523175e+00   1.4531349e+00   9.5571254e-01   1.2418578e-01   5.0270183e-01   1.2603076e+00   2.1269358e-01   1.9932786e+00   1.0168833e+00   1.9946994e+00   1.4557537e+00   1.7495699e+00   2.7337358e+00   9.0575661e-01   2.3519748e+00   1.7324239e+00   2.2796281e+00   1.1801240e+00   1.2450709e+00   1.5872286e+00   1.0267435e+00   1.3336069e+00   1.4152303e+00   1.4096199e+00   2.8778917e+00   3.0268604e+00   1.0067464e+00   1.8215944e+00   9.3824087e-01   2.8471683e+00   9.0658670e-01   1.6933635e+00   2.0801243e+00   8.0758367e-01   8.3783744e-01   1.5390703e+00   1.9310038e+00   2.2599493e+00   2.7651778e+00   1.5728839e+00   9.7949166e-01   1.4165336e+00   2.4822593e+00   1.6510537e+00   1.3842113e+00   7.5755387e-01   1.5773217e+00   1.7253276e+00   1.5255331e+00   1.0168833e+00   1.9287244e+00   1.8366596e+00   1.4599710e+00   1.0339865e+00   1.2378278e+00   1.4537266e+00   9.7249562e-01   5.0090417e-01   1.2507669e+00   1.2418578e-01   1.9589833e+00   9.7270522e-01   1.9792779e+00   1.4437673e+00   1.7230625e+00   2.7260686e+00   8.6012420e-01   2.3478326e+00   1.7190893e+00   2.2590861e+00   1.1454006e+00   1.2174316e+00   1.5614941e+00   9.5476489e-01   1.2555979e+00   1.3635198e+00   1.3969297e+00   2.8759951e+00   3.0161959e+00   9.4558103e-01   1.7925890e+00   8.6983677e-01   2.8416706e+00   8.6513410e-01   1.6742876e+00   2.0756986e+00   7.5871717e-01   7.9613242e-01   1.5107481e+00   1.9286915e+00   2.2531942e+00   2.7669732e+00   1.5384446e+00   9.7270522e-01   1.4111029e+00   2.4671050e+00   1.6105641e+00   1.3717027e+00   7.0429250e-01   1.5517600e+00   1.6837214e+00   1.4807336e+00   9.7270522e-01   1.9032219e+00   1.7953587e+00   1.4097072e+00   9.7855477e-01   1.2036484e+00   1.4110536e+00   9.4309624e-01   1.5090287e+00   5.0905001e-01   1.8619092e+00   9.1163729e-01   1.7230625e+00   1.3188999e+00   1.5883552e+00   2.4588872e+00   1.3193952e+00   2.0946464e+00   1.5338492e+00   2.0321740e+00   9.5099818e-01   1.0604511e+00   1.3277861e+00   9.3296062e-01   1.2221471e+00   1.2470767e+00   1.2266837e+00   2.6106370e+00   2.7667028e+00   8.7420176e-01   1.5746612e+00   8.9852394e-01   2.5679581e+00   6.9369532e-01   1.4905436e+00   1.8028753e+00   6.2149089e-01   6.9006418e-01   1.3813076e+00   1.6199747e+00   1.9552274e+00   2.4344864e+00   1.4148192e+00   8.0358695e-01   1.3039319e+00   2.1287551e+00   1.5153654e+00   1.2246352e+00   6.2660376e-01   1.2741904e+00   1.5160122e+00   1.2047214e+00   9.1163729e-01   1.7242097e+00   1.6420607e+00   1.2064640e+00   8.3812833e-01   1.0168833e+00   1.3257654e+00   8.6137722e-01   1.1533602e+00   3.1382691e+00   2.1485328e+00   3.1786790e+00   2.6799312e+00   2.9354140e+00   3.9353144e+00   1.5252485e+00   3.5658557e+00   2.9475994e+00   3.4455976e+00   2.3167786e+00   2.4320363e+00   2.7459122e+00   2.0598189e+00   2.2501104e+00   2.5013206e+00   2.6321552e+00   4.0910078e+00   4.2293986e+00   2.0521052e+00   2.9731112e+00   1.9619929e+00   4.0491656e+00   2.0481101e+00   2.8946126e+00   3.2860707e+00   1.9342059e+00   2.0025214e+00   2.7210925e+00   3.1148548e+00   3.4424959e+00   3.9360563e+00   2.7333517e+00   2.2078200e+00   2.6383936e+00   3.6047462e+00   2.7713578e+00   2.6121617e+00   1.8925840e+00   2.7075477e+00   2.8419886e+00   2.5215069e+00   2.1485328e+00   3.1102248e+00   2.9517129e+00   2.5041493e+00   2.1435335e+00   2.3887866e+00   2.5633372e+00   2.1544995e+00   2.0467316e+00   1.0576043e+00   2.0528819e+00   1.5379283e+00   1.8096161e+00   2.8029161e+00   8.6012420e-01   2.4272793e+00   1.8028753e+00   2.3372930e+00   1.2144845e+00   1.2985682e+00   1.6312555e+00   1.0166932e+00   1.3073038e+00   1.4333755e+00   1.4843487e+00   2.9588514e+00   3.0950947e+00   9.7949166e-01   1.8647706e+00   9.3615100e-01   2.9181741e+00   9.3049742e-01   1.7594421e+00   2.1522124e+00   8.2342214e-01   8.7720955e-01   1.5965946e+00   1.9973159e+00   2.3235032e+00   2.8379387e+00   1.6211988e+00   1.0588560e+00   1.5076049e+00   2.5254157e+00   1.6945041e+00   1.4637418e+00   7.8197925e-01   1.6130724e+00   1.7539916e+00   1.5193574e+00   1.0576043e+00   1.9849009e+00   1.8691652e+00   1.4607586e+00   1.0375119e+00   1.2741904e+00   1.4947429e+00   1.0361698e+00   1.0621081e+00   8.3649708e-01   7.6590510e-01   4.0176783e-01   1.3455136e+00   1.8827665e+00   1.1093572e+00   9.5676647e-01   9.0852141e-01   9.4309624e-01   8.9852394e-01   6.8076724e-01   1.2002762e+00   9.7825559e-01   7.0479928e-01   7.8197925e-01   1.4637418e+00   1.5390703e+00   1.4628493e+00   6.2538346e-01   1.2208301e+00   1.4754770e+00   1.2162549e+00   5.2574978e-01   1.0104465e+00   1.2832075e+00   1.1810170e+00   6.1947990e-01   1.1242402e+00   1.1718516e+00   1.6295015e+00   5.8914551e-01   1.2063335e+00   1.1879206e+00   1.4041085e+00   4.0293660e-01   7.7074935e-01   1.2709820e+00   7.7869083e-01   5.0592043e-01   9.7441804e-01   1.0621081e+00   5.0991930e-01   4.4417983e-01   8.3888121e-01   1.1770266e+00   8.6361309e-01   6.0670504e-01   1.0324775e+00   1.3844611e+00   6.2660376e-01   8.8695363e-01   2.0692197e+00   9.7441804e-01   1.6995747e+00   1.0122141e+00   1.6372749e+00   7.6752131e-01   6.0551856e-01   1.0244319e+00   2.1845981e-01   5.0090417e-01   7.2852070e-01   7.4777660e-01   2.2645802e+00   2.3019759e+00   5.7324170e-01   1.1833351e+00   2.5651975e-01   2.1907335e+00   5.0905001e-01   1.0330459e+00   1.5124582e+00   4.4651726e-01   3.8934542e-01   6.9369532e-01   1.4517959e+00   1.7036156e+00   2.3021295e+00   7.0429250e-01   5.6700421e-01   6.3977563e-01   1.9782093e+00   8.7229670e-01   6.8801986e-01   3.8934542e-01   1.1199472e+00   9.9519977e-01   1.1263042e+00   0.0000000e+00   1.1697902e+00   1.0851476e+00   9.2859317e-01   5.0905001e-01   7.1504098e-01   7.7763126e-01   3.0546431e-01   8.2135873e-01   6.0121055e-01   7.6716823e-01   2.3579605e+00   4.5581864e-01   5.8914551e-01   6.5223271e-01   8.9095811e-01   8.2552685e-01   4.4417983e-01   1.5124582e+00   1.3844611e+00   8.1810461e-01   6.6384020e-01   1.0516761e+00   1.0733200e+00   1.3725949e+00   3.0844217e-01   1.6183051e+00   8.9095811e-01   1.1426203e+00   4.5470518e-01   3.2816937e-01   1.2604558e+00   1.2459608e+00   7.1799256e-01   5.0180477e-01   3.6171588e-01   1.0269295e+00   7.1840099e-01   1.0531192e+00   1.1093572e+00   6.1092863e-01   8.4591037e-01   7.4777660e-01   1.3693737e+00   5.0905001e-01   4.8135521e-01   8.0619006e-01   1.3844611e+00   3.4378533e-01   5.3309112e-01   7.3813096e-01   1.0901359e+00   8.1273630e-01   9.6141901e-01   1.2978356e+00   4.2667565e-01   1.4573287e+00   1.5826638e+00   1.0904758e+00   5.0503591e-01   1.1258723e+00   5.4292906e-01   3.2816937e-01   5.3022554e-01   7.7869083e-01   7.5871717e-01   5.5492130e-01   2.1269358e-01   1.6596342e+00   1.6919202e+00   8.3280511e-01   7.0429250e-01   8.7202528e-01   1.5772389e+00   7.0394675e-01   5.2655962e-01   9.2836103e-01   8.0064372e-01   7.0437330e-01   3.0546431e-01   9.0478973e-01   1.1271488e+00   1.7235501e+00   4.0293660e-01   5.2942799e-01   4.5470518e-01   1.4317371e+00   6.8917100e-01   2.1269358e-01   8.1099042e-01   6.2988288e-01   6.5172743e-01   7.6166891e-01   6.2660376e-01   6.5648056e-01   7.6625946e-01   6.1947990e-01   6.4755655e-01   4.2667565e-01   6.2660376e-01   5.6700421e-01   1.2113327e+00   1.8396098e+00   8.7504951e-01   5.7257017e-01   8.3280511e-01   7.0784540e-01   5.5492130e-01   3.7427929e-01   1.0284501e+00   8.7420176e-01   5.0991930e-01   4.4417983e-01   1.4089719e+00   1.4387122e+00   1.1127329e+00   4.1586001e-01   1.1205013e+00   1.3344634e+00   9.3048953e-01   3.2816937e-01   7.4164639e-01   1.0244319e+00   9.3999899e-01   2.5651975e-01   8.1242502e-01   9.1858284e-01   1.4955532e+00   2.5251796e-01   8.7420176e-01   8.5335130e-01   1.2045536e+00   4.3691963e-01   4.4651726e-01   1.0480665e+00   4.9857388e-01   2.8507955e-01   7.3535471e-01   8.8695363e-01   3.2816937e-01   3.8934542e-01   6.0611244e-01   8.6361309e-01   6.0551856e-01   5.2655962e-01   8.3783744e-01   3.0351721e+00   4.2418962e-01   1.0941064e+00   7.5705927e-01   1.6559784e+00   1.5582387e+00   1.2112034e+00   2.1967372e+00   2.0692197e+00   1.5564198e+00   1.3693737e+00   8.0096515e-01   4.5581864e-01   2.0330276e+00   1.0137836e+00   2.3142399e+00   2.1845981e-01   1.9017011e+00   1.1228379e+00   6.6827038e-01   2.0223026e+00   1.9969203e+00   1.3792358e+00   8.7478495e-01   5.2371571e-01   8.1385214e-01   1.3793330e+00   1.7664528e+00   1.6569692e+00   5.0905001e-01   1.4644753e+00   1.4341959e+00   2.1204309e+00   1.2632199e+00   1.1880428e+00   1.5405106e+00   2.0692197e+00   9.4009473e-01   1.1355826e+00   1.4992973e+00   1.8311457e+00   1.5765737e+00   1.6311692e+00   1.9969203e+00   2.6670272e+00   1.9783833e+00   2.5784641e+00   1.6572339e+00   1.5613865e+00   1.9842916e+00   8.6110333e-01   1.0720678e+00   1.6180482e+00   1.7140774e+00   3.2123303e+00   3.2542669e+00   1.1332978e+00   2.1449779e+00   7.5976039e-01   3.1540626e+00   1.4088394e+00   1.9797139e+00   2.4825886e+00   1.3075101e+00   1.2330392e+00   1.6629594e+00   2.4148300e+00   2.6746409e+00   3.2573703e+00   1.6686069e+00   1.4322723e+00   1.4341959e+00   2.9471490e+00   1.6864366e+00   1.6385322e+00   1.1320702e+00   2.0632091e+00   1.9468380e+00   2.0389505e+00   9.7441804e-01   2.1303950e+00   2.0095672e+00   1.8517858e+00   1.4168607e+00   1.6483152e+00   1.5346983e+00   1.0866092e+00   7.2486328e-01   8.7202528e-01   1.2988558e+00   1.1847335e+00   8.6137722e-01   1.8265471e+00   1.7177705e+00   1.2107055e+00   9.9368623e-01   9.5866719e-01   7.3805807e-01   1.6506221e+00   7.3805807e-01   1.9449573e+00   5.0592043e-01   1.5350426e+00   7.8695083e-01   3.7427929e-01   1.6553809e+00   1.6249178e+00   1.0168833e+00   5.0991930e-01   2.1845981e-01   9.7270522e-01   1.0264409e+00   1.3817041e+00   1.2768639e+00   5.7324170e-01   1.1634384e+00   1.0611732e+00   1.7483574e+00   9.3048953e-01   9.0056222e-01   1.2340567e+00   1.6995747e+00   6.8076724e-01   9.1883539e-01   1.1718516e+00   1.4616896e+00   1.2125198e+00   1.2951888e+00   1.6249178e+00   1.2028939e+00   8.7420176e-01   5.3665999e-01   5.5492130e-01   1.1340084e+00   1.0720678e+00   8.3345577e-01   5.3588338e-01   1.5520745e+00   1.3258714e+00   9.5099818e-01   7.7074935e-01   1.2604558e+00   1.1891470e+00   9.2264612e-01   8.1099042e-01   7.7039952e-01   1.0389435e+00   1.0054794e+00   4.3456114e-01   6.2605182e-01   7.2823007e-01   1.5784191e+00   4.8927739e-01   7.5976039e-01   6.5223271e-01   1.0692258e+00   9.8985697e-01   6.3808075e-01   1.1178200e+00   6.6827038e-01   7.4855857e-01   8.6513410e-01   1.0122141e+00   7.6787403e-01   9.3615100e-01   7.5835500e-01   8.2654509e-01   6.9728513e-01   9.9519977e-01   9.7356960e-01   1.1306887e+00   1.2197188e+00   8.0353565e-01   1.7933375e+00   1.5916843e+00   1.0118409e+00   1.0089164e+00   7.0776547e-01   1.1591754e+00   1.8437762e+00   5.3309112e-01   1.8278913e+00   9.6838716e-01   1.4843324e+00   6.3735887e-01   7.3496673e-01   1.5570415e+00   1.5003972e+00   1.0421979e+00   9.7759114e-01   8.9070384e-01   7.8197925e-01   1.0329598e+00   1.4388174e+00   1.5204340e+00   6.9325418e-01   9.4309624e-01   1.0339865e+00   1.6134578e+00   8.0660588e-01   7.0523271e-01   1.0406064e+00   1.6372749e+00   5.1303949e-01   5.8851328e-01   1.0072663e+00   1.4951106e+00   1.0950112e+00   1.0934620e+00   1.5213929e+00   5.0991930e-01   4.5581864e-01   9.3615100e-01   7.6590510e-01   3.2586371e-01   4.2538717e-01   1.7942496e+00   1.9566981e+00   1.0636401e+00   6.6384020e-01   9.2264612e-01   1.7814077e+00   5.2371571e-01   6.0670504e-01   1.0120221e+00   4.8927739e-01   4.3691963e-01   5.6769031e-01   8.9366705e-01   1.1948578e+00   1.6982795e+00   5.7324170e-01   5.7257017e-01   8.3060013e-01   1.3824965e+00   5.7867728e-01   4.1586001e-01   5.4292906e-01   4.4651726e-01   5.7324170e-01   4.4535192e-01   7.6752131e-01   8.2105460e-01   6.9369532e-01   3.4583729e-01   7.0479928e-01   2.0656129e-01   4.3456114e-01   6.1092863e-01   4.6472023e-01   7.1840099e-01   6.9369532e-01   5.6631629e-01   3.2816937e-01   1.8058693e+00   1.8261179e+00   6.3735887e-01   7.0328431e-01   8.2684479e-01   1.6791597e+00   4.0293660e-01   6.6827038e-01   9.7377870e-01   5.0991930e-01   4.8135521e-01   3.2586371e-01   8.7021234e-01   1.1327825e+00   1.7839298e+00   3.7427929e-01   4.1586001e-01   5.5492130e-01   1.3916739e+00   7.7919451e-01   4.1449626e-01   5.8914551e-01   5.7324170e-01   6.0900723e-01   6.2407309e-01   6.0551856e-01   7.5705927e-01   7.8695083e-01   4.8135521e-01   3.2586371e-01   3.0811765e-01   7.3851529e-01   5.3665999e-01   1.1524979e+00   1.0244319e+00   4.3691963e-01   3.7255734e-01   1.4090646e+00   1.5060944e+00   1.0810263e+00   2.8507955e-01   1.2406194e+00   1.3336069e+00   7.1791510e-01   3.2586371e-01   5.9426792e-01   8.2552685e-01   8.2275389e-01   4.1449626e-01   5.8851328e-01   7.5196795e-01   1.3415658e+00   4.1586001e-01   7.2852070e-01   8.9159388e-01   9.7249562e-01   5.8914551e-01   4.4535192e-01   9.4352681e-01   1.4096146e-01   3.0811765e-01   4.1586001e-01   1.0244319e+00   4.2538717e-01   4.5581864e-01   3.2586371e-01   7.0869559e-01   3.7427929e-01   6.5223271e-01   9.2836103e-01   4.4651726e-01   8.8695363e-01   8.9971984e-01   2.4227359e+00   2.4243464e+00   5.5419992e-01   1.3235313e+00   3.0546431e-01   2.3149695e+00   6.1151102e-01   1.2036484e+00   1.6520677e+00   5.4292906e-01   5.7324170e-01   8.2305664e-01   1.5788188e+00   1.8238348e+00   2.4554026e+00   8.2552685e-01   7.0429250e-01   7.7588000e-01   2.0959492e+00   1.0466623e+00   8.6513410e-01   5.4292906e-01   1.2497790e+00   1.1215059e+00   1.2436109e+00   2.1845981e-01   1.3165513e+00   1.2256881e+00   1.0406064e+00   6.0060595e-01   8.5434758e-01   9.6664346e-01   5.1691876e-01   6.5223271e-01   8.4050231e-01   2.2451458e+00   2.2996030e+00   9.7270522e-01   1.1594648e+00   4.2538717e-01   2.1936248e+00   6.9369532e-01   1.0119857e+00   1.5297036e+00   6.6384020e-01   6.2988288e-01   7.0386584e-01   1.5113992e+00   1.7140171e+00   2.2868482e+00   6.9325418e-01   9.4080461e-01   1.0406064e+00   1.9734538e+00   7.5835500e-01   7.8695083e-01   6.2988288e-01   1.1158787e+00   9.4832302e-01   1.1055069e+00   5.0090417e-01   1.1452867e+00   1.0056742e+00   9.0277242e-01   6.3977563e-01   7.3851529e-01   6.6432544e-01   6.0611244e-01   5.1691876e-01   1.6983410e+00   1.8377590e+00   1.1500393e+00   5.6631629e-01   8.6012420e-01   1.6864433e+00   6.6539428e-01   4.5581864e-01   9.7356960e-01   6.6932542e-01   5.9426792e-01   4.5470518e-01   9.7548738e-01   1.1573546e+00   1.6772907e+00   4.4535192e-01   8.2929029e-01   9.8450810e-01   1.3812107e+00   3.2816937e-01   5.0905001e-01   6.6932542e-01   5.0991930e-01   3.7598397e-01   5.0905001e-01   7.2852070e-01   6.4704320e-01   4.5581864e-01   3.2586371e-01   7.4777660e-01   3.2816937e-01   2.5251796e-01   6.3108414e-01   1.5573817e+00   1.6420607e+00   9.0575661e-01   5.7867728e-01   9.7441804e-01   1.4917344e+00   6.2482915e-01   4.0176783e-01   7.7039952e-01   7.1799256e-01   6.4704320e-01   3.2816937e-01   7.1799256e-01   9.7270522e-01   1.5593809e+00   4.1586001e-01   4.6472023e-01   5.6454040e-01   1.2601890e+00   6.5223271e-01   1.2418578e-01   7.6716823e-01   4.4651726e-01   6.0670504e-01   6.1947990e-01   7.4777660e-01   5.9426792e-01   7.2172678e-01   5.3588338e-01   6.2660376e-01   3.2352160e-01   5.8914551e-01   6.4704320e-01   1.2013436e+00   2.3665136e+00   1.1771643e+00   2.4806944e+00   1.0018083e+00   2.1098467e+00   1.2627078e+00   8.8503502e-01   2.2024869e+00   2.1511385e+00   1.6189643e+00   1.1368070e+00   1.0688498e+00   3.4378533e-01   1.6188960e+00   1.9698860e+00   1.9254808e+00   8.8861541e-01   1.5832517e+00   1.5978297e+00   2.2712062e+00   1.4277162e+00   1.3610783e+00   1.6849072e+00   2.2645802e+00   1.1106525e+00   1.2665468e+00   1.6689743e+00   2.0995265e+00   1.7457596e+00   1.7532140e+00   2.1511385e+00   2.2712062e+00   1.3276804e+00   2.5485519e+00   3.4378533e-01   2.1870851e+00   1.4266198e+00   1.0379132e+00   2.3072128e+00   2.2736138e+00   1.6156775e+00   1.2095267e+00   8.3888121e-01   1.2277129e+00   1.6151153e+00   2.0528819e+00   1.8792214e+00   8.2624515e-01   1.7265353e+00   1.7004805e+00   2.3953564e+00   1.5733646e+00   1.4691764e+00   1.8497891e+00   2.3019759e+00   1.2238809e+00   1.4266198e+00   1.7962897e+00   2.1027465e+00   1.8635467e+00   1.9008621e+00   2.2439391e+00   1.3353353e+00   7.2526325e-01   2.1293320e+00   5.5492130e-01   1.2776560e+00   1.5193574e+00   6.2988288e-01   8.1130291e-01   8.6912228e-01   1.3752391e+00   1.6044563e+00   2.3474075e+00   9.1883539e-01   6.2024833e-01   6.4806901e-01   1.9000365e+00   1.3674559e+00   9.6664346e-01   8.1354181e-01   1.1751082e+00   1.2304904e+00   1.2256933e+00   5.7324170e-01   1.3628690e+00   1.4089364e+00   1.0866132e+00   4.8036801e-01   8.9971984e-01   1.3044654e+00   8.1130291e-01   1.3916739e+00   1.1500393e+00   9.6838716e-01   2.5251796e-01   5.5419992e-01   1.0573285e+00   1.0284501e+00   5.7324170e-01   7.1840099e-01   6.6317860e-01   1.1434428e+00   5.6769031e-01   9.7855477e-01   1.1106525e+00   8.2899253e-01   6.0670504e-01   6.2660376e-01   1.1449732e+00   3.2586371e-01   2.1845981e-01   6.0060595e-01   1.1833351e+00   2.0656129e-01   2.5251796e-01   5.1607523e-01   9.6324667e-01   5.9426792e-01   7.1799256e-01   1.0879524e+00   2.4372751e+00   7.0437330e-01   1.2331989e+00   1.7427900e+00   6.0611244e-01   5.1607523e-01   9.3615100e-01   1.6812503e+00   1.9411754e+00   2.5109747e+00   9.3801395e-01   7.7039952e-01   8.6513410e-01   2.2052183e+00   9.6324667e-01   8.9917007e-01   4.2667565e-01   1.3224963e+00   1.1912106e+00   1.3084046e+00   2.5651975e-01   1.3897316e+00   1.2541242e+00   1.1120775e+00   7.1504098e-01   9.1051084e-01   8.1521713e-01   3.6171588e-01   2.0209349e+00   1.2627078e+00   7.9878917e-01   2.1431239e+00   2.1198551e+00   1.5016009e+00   9.6141901e-01   6.2024833e-01   1.0083666e+00   1.5022608e+00   1.8803649e+00   1.7557336e+00   6.2482915e-01   1.6044563e+00   1.5582387e+00   2.2435182e+00   1.3836712e+00   1.3199714e+00   1.6568705e+00   2.1907335e+00   1.0796583e+00   1.2825987e+00   1.6205332e+00   1.9460721e+00   1.6995133e+00   1.7678302e+00   2.1198551e+00   9.1750357e-01   1.2756158e+00   1.4096146e-01   3.2352160e-01   7.1504098e-01   1.1242402e+00   1.4312787e+00   2.0223464e+00   7.3535471e-01   3.2586371e-01   7.3851529e-01   1.6386882e+00   9.4477932e-01   6.4755655e-01   3.7427929e-01   7.3805807e-01   8.6165877e-01   7.2852070e-01   5.0905001e-01   1.0922991e+00   1.0175773e+00   6.0900723e-01   2.1269358e-01   4.0176783e-01   8.2372435e-01   4.5470518e-01   5.5492130e-01   9.8495853e-01   9.0521488e-01   5.2942799e-01   6.3977563e-01   7.9878917e-01   1.2832075e+00   5.3022554e-01   8.3060013e-01   9.4500268e-01   1.0244319e+00   4.4651726e-01   4.0176783e-01   1.0230441e+00   3.4378533e-01   3.2586371e-01   6.1623531e-01   1.0330459e+00   2.5651975e-01   4.0000000e-01   5.3588338e-01   9.5676647e-01   5.3665999e-01   5.3665999e-01   9.0521488e-01   1.3865084e+00   1.3669552e+00   8.6012420e-01   2.8192292e-01   4.1586001e-01   8.4050231e-01   8.7383925e-01   1.1355826e+00   1.1712156e+00   6.2660376e-01   9.8985697e-01   8.5205778e-01   1.4909823e+00   6.3861009e-01   7.2526325e-01   9.4854455e-01   1.5124582e+00   5.6700421e-01   7.7919451e-01   8.9971984e-01   1.2483814e+00   9.4009473e-01   1.0879524e+00   1.4147273e+00   2.1269358e-01   8.1354181e-01   1.2441035e+00   1.5551238e+00   2.1137172e+00   8.2899253e-01   3.7427929e-01   8.2929029e-01   1.7587110e+00   9.6095130e-01   7.1799256e-01   2.4837156e-01   8.3280511e-01   9.3797093e-01   7.9016429e-01   4.4651726e-01   1.1833351e+00   1.0724413e+00   6.6932542e-01   3.2816937e-01   4.6472023e-01   8.0467258e-01   3.8776762e-01   7.3145860e-01   1.2564564e+00   1.5558094e+00   2.0983278e+00   7.5082357e-01   3.6171588e-01   7.6590510e-01   1.7862655e+00   8.4050231e-01   6.2024833e-01   1.2418578e-01   8.6137722e-01   8.9852394e-01   8.5462626e-01   3.8934542e-01   1.1192362e+00   1.0078327e+00   7.0386584e-01   5.0991930e-01   4.5470518e-01   6.6539428e-01   2.4837156e-01   8.5690100e-01   1.0344911e+00   1.6689743e+00   1.0000000e-01   6.8961791e-01   7.1799256e-01   1.3207609e+00   6.2024833e-01   3.7427929e-01   8.3888121e-01   5.3588338e-01   4.2288438e-01   6.4405773e-01   6.9369532e-01   5.3309112e-01   5.8914551e-01   4.6472023e-01   6.2538346e-01   4.1586001e-01   6.1623531e-01   6.4405773e-01   4.0176783e-01   1.0175773e+00   8.9303452e-01   1.0122141e+00   1.1161766e+00   7.7885297e-01   1.0755693e+00   8.1385214e-01   1.3792358e+00   5.8914551e-01   8.5462626e-01   8.7848692e-01   1.4517959e+00   7.3851529e-01   9.4854455e-01   8.6263408e-01   1.0941064e+00   8.3783744e-01   1.1172689e+00   1.3545005e+00   1.0391247e+00   1.0389435e+00   1.3163598e+00   1.3379696e+00   4.5470518e-01   1.1951875e+00   1.0632598e+00   1.6796759e+00   7.8197925e-01   8.3345577e-01   1.0540105e+00   1.7036156e+00   6.9167458e-01   8.8503502e-01   1.0279631e+00   1.3693737e+00   1.1192426e+00   1.3077572e+00   1.6183051e+00   1.6694974e+00   1.9094934e+00   1.9895190e+00   8.2384013e-01   1.6634400e+00   1.6218244e+00   2.2178691e+00   1.3008161e+00   1.3567326e+00   1.4994715e+00   2.3021295e+00   1.1763719e+00   1.3024224e+00   1.5535909e+00   2.0373882e+00   1.6756749e+00   1.7981158e+00   2.1739455e+00   7.6752131e-01   8.1354181e-01   1.3198846e+00   6.0611244e-01   4.4535192e-01   8.5335130e-01   5.3665999e-01   3.8776762e-01   6.3977563e-01   7.0429250e-01   5.2655962e-01   5.5492130e-01   4.5581864e-01   6.3861009e-01   4.2667565e-01   6.1151102e-01   6.6932542e-01   5.1691876e-01   1.5922648e+00   1.0054794e+00   4.8135521e-01   4.3456114e-01   7.6955924e-01   9.6664346e-01   8.9687438e-01   5.6700421e-01   1.0421979e+00   1.1001291e+00   8.2929029e-01   4.4535192e-01   5.1691876e-01   8.9712482e-01   4.5470518e-01   1.6914476e+00   1.1340084e+00   5.8914551e-01   8.5105559e-01   9.7548738e-01   1.0864449e+00   1.1160770e+00   6.3977563e-01   1.0720678e+00   1.2163831e+00   1.0047836e+00   6.9369532e-01   7.2343175e-01   1.0613462e+00   6.2407309e-01   1.4238090e+00   1.3511716e+00   1.9067300e+00   9.3824087e-01   1.0331736e+00   1.1327825e+00   1.9782093e+00   9.0454394e-01   1.0244319e+00   1.1833480e+00   1.5948732e+00   1.3360558e+00   1.5461469e+00   1.8900319e+00   6.2081167e-01   9.1750357e-01   6.4290921e-01   4.4417983e-01   7.0429250e-01   8.7229670e-01   5.3665999e-01   4.0438741e-01   5.6454040e-01   1.0054794e+00   5.7015910e-01   2.1269358e-01   7.5705927e-01   7.3496673e-01   5.2942799e-01   6.2024833e-01   6.6491075e-01   6.8801986e-01   6.1947990e-01   7.2172678e-01   5.5492130e-01   6.9006418e-01   3.2816937e-01   5.3665999e-01   5.6700421e-01   9.7779835e-01   1.0014633e+00   9.4854455e-01   3.8934542e-01   1.2342162e+00   1.1043332e+00   7.9580667e-01   5.3665999e-01   5.7257017e-01   7.2852070e-01   3.0275928e-01   3.4378533e-01   3.2352160e-01   1.1199472e+00   5.0991930e-01   4.6472023e-01   2.8507955e-01   7.7869083e-01   4.1586001e-01   7.1799256e-01   1.0132664e+00   5.0905001e-01   9.9519977e-01   3.0811765e-01   2.1269358e-01   4.0293660e-01   8.3060013e-01   5.0592043e-01   5.3665999e-01   9.3049742e-01   1.1263042e+00   8.0064372e-01   6.1623531e-01   2.1269358e-01   7.7588000e-01   4.4651726e-01   7.2783368e-01   1.0329901e+00   1.1697902e+00   1.0851476e+00   9.2859317e-01   5.0905001e-01   7.1504098e-01   7.7763126e-01   3.0546431e-01   2.5651975e-01   7.0437330e-01   1.0573285e+00   7.3145860e-01   6.9325418e-01   1.0901359e+00   5.3588338e-01   1.0175773e+00   6.4405773e-01   5.3665999e-01   1.0078327e+00   6.2407309e-01   3.2352160e-01   5.7257017e-01   8.5205778e-01   5.1691876e-01   9.4022486e-01   5.6769031e-01   4.8927739e-01   6.0611244e-01   6.0900723e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt
new file mode 100644
index 00000000..daa81110
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt
@@ -0,0 +1 @@
+   2.0215050e+00   2.0988154e+00   1.8614681e+00   2.0510161e+00   1.9210911e+00   2.1323516e+00   1.9565454e+00   2.1029889e+00   1.9617871e+00   2.0544792e+00   2.0357408e+00   1.8811414e+00   2.0694693e+00   2.1245977e+00   2.0632165e+00   2.0452823e+00   2.0249330e+00   1.9635489e+00   2.0508580e+00   2.0838578e+00   1.9324052e+00   1.8224609e+00   1.9795343e+00   1.9536534e+00   1.9694910e+00   1.9075569e+00   1.9590397e+00   2.0022087e+00   1.8814000e+00   1.8884208e+00   1.9961121e+00   2.0215351e+00   1.7515769e+00   2.0756437e+00   2.0109476e+00   1.9234849e+00   1.9160076e+00   1.8550862e+00   1.7733640e+00   2.0071906e+00   2.0209542e+00   2.0616569e+00   2.0565503e+00   1.9083573e+00   2.2732431e+00   1.9975503e+00   1.9080072e+00   2.1437809e+00   2.1296295e+00   1.9739085e+00   1.9834166e+00   2.1078664e+00   2.2016840e+00   2.2080962e+00   1.7340579e+00   2.0549287e+00   1.7331748e+00   1.9559688e+00   2.0343364e+00   1.8736929e+00   1.9730416e+00   1.5308944e+00   1.8421831e+00   2.0174240e+00   2.0137378e+00   1.7956151e+00   1.9606596e+00   1.9074857e+00   2.0413879e+00   2.0070305e+00   1.9584677e+00   1.8977851e+00   1.9176239e+00   1.7067419e+00   1.9461927e+00   1.8431700e+00   1.8284576e+00   1.7778704e+00   1.8350329e+00   2.0175415e+00   1.7459063e+00   1.9242505e+00   1.8757370e+00   1.9312506e+00   2.0574808e+00   2.0894636e+00   1.9780203e+00   2.1374036e+00   1.8900436e+00   2.0273032e+00   2.0681953e+00   2.0234699e+00   2.0666449e+00   2.0663485e+00   1.9281402e+00   1.7846314e+00   2.0372479e+00   1.8831230e+00   2.0186015e+00   2.0193231e+00   2.2022665e+00   1.8145737e+00   2.0466545e+00   1.8092421e+00   1.9600687e+00   2.0322961e+00   1.9556364e+00   1.8266422e+00   1.9950345e+00   2.1038429e+00   2.1164145e+00   2.0188062e+00   1.8863331e+00   2.0006971e+00   1.9971068e+00   1.8771862e+00   2.1148855e+00   1.9570638e+00   1.9859615e+00   2.0030854e+00   2.0737344e+00   1.9739259e+00   1.9266524e+00   1.9200535e+00   2.1376689e+00   1.8944425e+00   1.9330553e+00   1.8561590e+00   1.9422954e+00   1.8874178e+00   1.8624808e+00   1.8265563e+00   1.8840519e+00   2.0515092e+00   2.0174226e+00   1.9771196e+00   2.0635988e+00   1.7334466e+00   1.9912604e+00   1.8915711e+00   1.8262636e+00   1.9369173e+00   1.9560446e+00   1.9549934e+00   1.9279230e+00   1.9021073e+00   2.0113391e+00   2.0305786e+00   1.8066806e+00   1.9656739e+00   2.1219217e+00   1.8820250e+00   1.8936826e+00   2.0565131e+00   1.9839441e+00   1.8553479e+00   1.9923760e+00   1.6393276e+00   1.9786440e+00   1.8274394e+00   1.9322611e+00   2.0404318e+00   1.9216532e+00   1.9361171e+00   1.8401373e+00   1.9908059e+00   1.9495117e+00   2.1975655e+00   1.8413913e+00   2.1528773e+00   1.8434374e+00   2.1668863e+00   2.0429273e+00   1.9980016e+00   1.9790129e+00   2.0264829e+00   2.1478843e+00   2.0899600e+00   2.0280670e+00   2.1210881e+00   1.9993891e+00   1.8646871e+00   1.9099983e+00   1.9263353e+00   2.0042495e+00   2.1365919e+00   2.1830279e+00   1.9631961e+00   2.0880004e+00   1.8348369e+00
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt
new file mode 100644
index 00000000..aa26b043
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt
@@ -0,0 +1 @@
+   5.0042326e-01   4.1210927e-01   5.2133179e-01   1.1269424e-01   4.2362917e-01   5.0001522e-01   1.2085435e-01   7.4262850e-01   4.0127250e-01   3.0482299e-01   3.0482299e-01   5.0436965e-01   8.0923926e-01   7.1629168e-01   9.1424701e-01   4.1317535e-01   1.0000000e-01   6.0366256e-01   3.0017653e-01   3.3813251e-01   2.2573593e-01   5.2133179e-01   3.4080442e-01   5.0436965e-01   5.0043084e-01   2.2608083e-01   1.1269424e-01   1.1269424e-01   4.1315633e-01   4.1315633e-01   3.0490481e-01   6.0000952e-01   7.0462550e-01   4.0127250e-01   3.0482299e-01   4.0002221e-01   4.0127250e-01   7.1621748e-01   1.1269424e-01   1.2085435e-01   1.2036864e+00   7.0088477e-01   4.0125062e-01   5.0476836e-01   5.0436965e-01   3.0474106e-01   5.0436235e-01   2.2573593e-01   2.0061436e-01   3.3243227e+00   3.1068812e+00   3.5145413e+00   2.6080595e+00   3.2075731e+00   3.1014454e+00   3.3055260e+00   1.9156198e+00   3.2079238e+00   2.5066441e+00   2.1498493e+00   2.8059664e+00   2.6093989e+00   3.3021953e+00   2.2070266e+00   3.0158454e+00   3.1034764e+00   2.7009878e+00   3.1081779e+00   2.5032992e+00   3.4074959e+00   2.6050088e+00   3.5035589e+00   3.3011884e+00   2.9065890e+00   3.0117336e+00   3.4118782e+00   3.6094426e+00   3.1038958e+00   2.1042326e+00   2.4058620e+00   2.3063407e+00   2.5029614e+00   3.7025335e+00   3.1034636e+00   3.1057006e+00   3.3110189e+00   3.0065909e+00   2.7025941e+00   2.6047974e+00   3.0013665e+00   3.2025221e+00   2.6029242e+00   1.9242109e+00   2.8024935e+00   2.8013151e+00   2.8022622e+00   2.9036582e+00   1.6267693e+00   2.7028014e+00   4.6144526e+00   3.7071079e+00   4.5121787e+00   4.2031939e+00   4.4087839e+00   5.2153194e+00   3.1086291e+00   4.9093646e+00   4.4044245e+00   4.7202040e+00   3.7119486e+00   3.9066365e+00   4.1123628e+00   3.6114402e+00   3.7307413e+00   3.9194642e+00   4.1043951e+00   5.3177489e+00   5.5157728e+00   3.6035661e+00   4.3162097e+00   3.5127031e+00   5.3163123e+00   3.5077296e+00   4.3088507e+00   4.6100803e+00   3.4082578e+00   3.5068380e+00   4.2080636e+00   4.4113183e+00   4.7149608e+00   5.0316727e+00   4.2105572e+00   3.7024462e+00   4.2007769e+00   4.7331529e+00   4.2173557e+00   4.1039096e+00   3.4076329e+00   4.0157626e+00   4.2194897e+00   3.7329396e+00   3.7071079e+00   4.5119962e+00   4.3218071e+00   3.8249612e+00   3.6093673e+00   3.8105293e+00   4.0166459e+00   3.7050109e+00   2.2573593e-01   3.0017653e-01   6.0000317e-01   9.0534502e-01   4.1210927e-01   4.0004442e-01   5.0000761e-01   1.2085435e-01   7.1621748e-01   4.0125062e-01   1.1269424e-01   6.0184622e-01   1.0776294e+00   1.4092540e+00   9.0508756e-01   5.0043084e-01   9.0181717e-01   8.0004602e-01   5.2491131e-01   7.0017011e-01   6.1119267e-01   3.6452132e-01   5.2133179e-01   2.0061436e-01   4.0246123e-01   5.0436965e-01   4.1209001e-01   2.4170870e-01   2.0121983e-01   5.2167829e-01   1.1001015e+00   1.2036862e+00   1.2085435e-01   2.2573593e-01   6.3164977e-01   1.2085435e-01   5.0000761e-01   4.0125062e-01   5.0002283e-01   7.0462844e-01   5.0043084e-01   5.2167829e-01   8.0888055e-01   1.1269424e-01   8.0008884e-01   3.0474106e-01   7.0462697e-01   3.0008832e-01   3.3416860e+00   3.1112912e+00   3.5249966e+00   2.6033557e+00   3.2127499e+00   3.1015178e+00   3.3078313e+00   1.9025708e+00   3.2150318e+00   2.5060738e+00   2.1061951e+00   2.8068283e+00   2.6040016e+00   3.3032134e+00   2.2072454e+00   3.0286102e+00   3.1035443e+00   2.7011973e+00   3.1070853e+00   2.5014549e+00   3.4078435e+00   2.6080511e+00   3.5048916e+00   3.3021665e+00   2.9125999e+00   3.0213627e+00   3.4211337e+00   3.6148618e+00   3.1047537e+00   2.1027003e+00   2.4016639e+00   2.3011929e+00   2.5032633e+00   3.7028303e+00   3.1034629e+00   3.1065984e+00   3.3192072e+00   3.0078209e+00   2.7027260e+00   2.6031664e+00   3.0009332e+00   3.2037232e+00   2.6027120e+00   1.9031578e+00   2.8022915e+00   2.8015662e+00   2.8024715e+00   2.9065359e+00   1.6099792e+00   2.7029416e+00   4.6149181e+00   3.7071538e+00   4.5172866e+00   4.2039132e+00   4.4099272e+00   5.2224057e+00   3.1078968e+00   4.9146298e+00   4.4063795e+00   4.7253524e+00   3.7145622e+00   3.9080413e+00   4.1161770e+00   3.6111646e+00   3.7308314e+00   3.9209137e+00   4.1060063e+00   5.3254977e+00   5.5222404e+00   3.6024247e+00   4.3201293e+00   3.5126957e+00   5.3240486e+00   3.5093499e+00   4.3111749e+00   4.6158382e+00   3.4095576e+00   3.5076152e+00   4.2090727e+00   4.4184242e+00   4.7227808e+00   5.0458491e+00   4.2115634e+00   3.7037441e+00   4.2010125e+00   4.7466313e+00   4.2180733e+00   4.1050714e+00   3.4081972e+00   4.0212972e+00   4.2220584e+00   3.7407842e+00   3.7071538e+00   4.5144444e+00   4.3240980e+00   3.8290678e+00   3.6105228e+00   3.8128297e+00   4.0172657e+00   3.7052380e+00   2.0121983e-01   4.1210927e-01   7.9153339e-01   2.0181667e-01   3.0915245e-01   3.3813251e-01   2.2608083e-01   7.1629168e-01   3.0482299e-01   2.0181667e-01   4.0246123e-01   1.1281267e+00   1.2633045e+00   7.8890721e-01   4.1212852e-01   1.0095370e+00   6.0964891e-01   7.0470720e-01   5.2201750e-01   4.1210927e-01   4.5784410e-01   6.0017982e-01   3.4080442e-01   3.4342562e-01   5.0476836e-01   5.0043084e-01   3.0000000e-01   3.0017653e-01   7.0025283e-01   9.0508756e-01   1.0426513e+00   2.2608083e-01   3.0008832e-01   8.0046605e-01   2.2608083e-01   3.0474106e-01   4.0243965e-01   3.3813251e-01   9.0002570e-01   3.0000000e-01   4.3213914e-01   6.8170466e-01   2.0181667e-01   6.1119267e-01   1.1269424e-01   6.3178534e-01   3.0017653e-01   3.4595765e+00   3.2168311e+00   3.6364650e+00   2.7037323e+00   3.3192099e+00   3.2017763e+00   3.4107328e+00   2.0033798e+00   3.3237063e+00   2.6050967e+00   2.2121910e+00   2.9077087e+00   2.7085154e+00   3.4047917e+00   2.3071665e+00   3.1428042e+00   3.2033135e+00   2.8024935e+00   3.2103481e+00   2.6021247e+00   3.5076152e+00   2.7127272e+00   3.6073242e+00   3.4038884e+00   3.0203881e+00   3.1325879e+00   3.5317021e+00   3.7210979e+00   3.2059139e+00   2.2051638e+00   2.5023084e+00   2.4021168e+00   2.6048201e+00   3.8033004e+00   3.2030448e+00   3.2074921e+00   3.4286399e+00   3.1131211e+00   2.8028008e+00   2.7031257e+00   3.1010004e+00   3.3055260e+00   2.7040740e+00   2.0050309e+00   2.9023862e+00   2.9020767e+00   2.9028421e+00   3.0107283e+00   1.7089863e+00   2.8033666e+00   4.7142986e+00   3.8066401e+00   4.6226512e+00   4.3047830e+00   4.5107876e+00   5.3296471e+00   3.2068572e+00   5.0203871e+00   4.5089338e+00   4.8299744e+00   3.8170042e+00   4.0095939e+00   4.2200398e+00   3.7100654e+00   3.8275330e+00   4.0209836e+00   4.2079639e+00   5.4332277e+00   5.6287689e+00   3.7032748e+00   4.4237036e+00   3.6112573e+00   5.4319232e+00   3.6111754e+00   4.4135512e+00   4.7221364e+00   3.5107924e+00   3.6081749e+00   4.3098514e+00   4.5261773e+00   4.8309399e+00   5.1593152e+00   4.3120751e+00   3.8056232e+00   4.3015640e+00   4.8592534e+00   4.3174320e+00   4.2064763e+00   3.5083248e+00   4.1268500e+00   4.3236383e+00   3.8471097e+00   3.8066401e+00   4.6166518e+00   4.4251081e+00   3.9318948e+00   3.7118930e+00   3.9150333e+00   4.1165034e+00   3.8051417e+00   5.2133179e-01   9.0160400e-01   3.0017653e-01   4.1209001e-01   2.2573593e-01   3.0008832e-01   8.2418002e-01   3.0482299e-01   2.0181667e-01   4.1212852e-01   1.2363278e+00   1.3741498e+00   9.0160400e-01   5.2133802e-01   1.1133986e+00   7.1621748e-01   8.0051036e-01   6.3178534e-01   5.6347121e-01   5.0517282e-01   4.1315633e-01   4.0004442e-01   4.1317535e-01   6.0948212e-01   6.0184622e-01   1.2085435e-01   2.0061436e-01   8.0051036e-01   1.0087250e+00   1.1527669e+00   3.0008832e-01   4.1210927e-01   9.0142636e-01   3.0008832e-01   2.2573593e-01   5.0436235e-01   4.5148429e-01   8.0004602e-01   2.2573593e-01   4.8342635e-01   7.2044167e-01   2.0181667e-01   7.1621748e-01   1.1269424e-01   7.4262850e-01   4.0125062e-01   3.2983364e+00   3.0300451e+00   3.4603347e+00   2.5053901e+00   3.1338090e+00   3.0030658e+00   3.2183845e+00   1.8040969e+00   3.1419971e+00   2.4075162e+00   2.0123013e+00   2.7132680e+00   2.5163999e+00   3.2086215e+00   2.1132077e+00   2.9750754e+00   3.0049127e+00   2.6055197e+00   3.0177719e+00   2.4040962e+00   3.3110162e+00   2.5253371e+00   3.4126529e+00   3.2074182e+00   2.8380954e+00   2.9580787e+00   3.3536443e+00   3.5347730e+00   3.0101869e+00   2.0123796e+00   2.3038195e+00   2.2036797e+00   2.4099203e+00   3.6051707e+00   3.0042758e+00   3.0123228e+00   3.2490712e+00   2.9241808e+00   2.6047889e+00   2.5049231e+00   2.9016211e+00   3.1100277e+00   2.5081992e+00   1.8056342e+00   2.7040060e+00   2.7039988e+00   2.7050721e+00   2.8205713e+00   1.5147271e+00   2.6060742e+00   4.5183778e+00   3.6090052e+00   4.4337691e+00   4.1072664e+00   4.3151164e+00   5.1425125e+00   3.0092613e+00   4.8303615e+00   4.3139066e+00   4.6422789e+00   3.6259317e+00   3.8146285e+00   4.0301568e+00   3.5133848e+00   3.6358680e+00   3.8290678e+00   4.0124919e+00   5.2471177e+00   5.4403962e+00   3.5051114e+00   4.2343452e+00   3.4149831e+00   5.2455706e+00   3.4177035e+00   4.2200398e+00   4.5335328e+00   3.3168776e+00   3.4123846e+00   4.1140176e+00   4.3402553e+00   4.6459028e+00   4.9843016e+00   4.1167964e+00   3.6096226e+00   4.1026403e+00   4.6849407e+00   4.1230798e+00   4.0100505e+00   3.3123688e+00   3.9407837e+00   4.1330547e+00   3.6700537e+00   3.6090052e+00   4.4237036e+00   4.2343452e+00   3.7463488e+00   3.5181052e+00   3.7227931e+00   3.9220791e+00   3.6072781e+00   4.2362917e-01   4.0125062e-01   2.0061436e-01   7.4262850e-01   5.0002283e-01   4.0004442e-01   2.4170870e-01   6.0017982e-01   7.4329527e-01   8.0250123e-01   8.5406674e-01   4.1317535e-01   1.2085435e-01   7.0096858e-01   2.0181667e-01   4.1315633e-01   2.0181667e-01   4.5077696e-01   3.6259865e-01   5.0084481e-01   6.0017665e-01   2.4170870e-01   2.0121983e-01   2.2538848e-01   4.1315633e-01   5.0084481e-01   4.0246123e-01   5.0043842e-01   6.3164729e-01   5.0002283e-01   4.0122873e-01   5.0001522e-01   5.0002283e-01   6.7616723e-01   2.0121983e-01   1.2085435e-01   1.3008771e+00   6.0948506e-01   4.0125062e-01   5.0085236e-01   6.0017982e-01   2.2573593e-01   4.5077696e-01   3.0017653e-01   3.0000000e-01   3.3320240e+00   3.1087192e+00   3.5191371e+00   2.6110181e+00   3.2098845e+00   3.1016129e+00   3.3064697e+00   1.9242109e+00   3.2110200e+00   2.5072065e+00   2.1702438e+00   2.8063347e+00   2.6144115e+00   3.3026483e+00   2.2074446e+00   3.0213781e+00   3.1035271e+00   2.7015967e+00   3.1108570e+00   2.5049231e+00   3.4076266e+00   2.6065485e+00   3.5045818e+00   3.3016829e+00   2.9091905e+00   3.0158857e+00   3.4160038e+00   3.6117923e+00   3.1042949e+00   2.1068047e+00   2.4087956e+00   2.3099309e+00   2.5038387e+00   3.7027671e+00   3.1034919e+00   3.1060428e+00   3.3145595e+00   3.0095593e+00   2.7026925e+00   2.6061038e+00   3.0017811e+00   3.2030205e+00   2.6039803e+00   1.9366876e+00   2.8028640e+00   2.8014482e+00   2.8024453e+00   2.9049136e+00   1.6388635e+00   2.7031257e+00   4.6146430e+00   3.7072412e+00   4.5144508e+00   4.2035048e+00   4.4092709e+00   5.2185448e+00   3.1091788e+00   4.9117351e+00   4.4054277e+00   4.7224997e+00   3.7130507e+00   3.9073151e+00   4.1140274e+00   3.6117351e+00   3.7308330e+00   3.9200674e+00   4.1050815e+00   5.3212796e+00   5.5187578e+00   3.6046347e+00   4.3179262e+00   3.5127783e+00   5.3198559e+00   3.5085510e+00   4.3098508e+00   4.6126513e+00   3.4088749e+00   3.5071604e+00   4.2085176e+00   4.4144980e+00   4.7185095e+00   5.0381903e+00   4.2110099e+00   3.7030413e+00   4.2009868e+00   4.7393218e+00   4.2176488e+00   4.1043951e+00   3.4078683e+00   4.0181902e+00   4.2205976e+00   3.7363838e+00   3.7072412e+00   4.5130595e+00   4.3227928e+00   3.8267408e+00   3.6102542e+00   3.8115096e+00   4.0168944e+00   3.7051079e+00   8.0923926e-01   5.2201750e-01   1.1270411e+00   8.0928056e-01   2.4170870e-01   6.3178782e-01   9.1471442e-01   1.1573074e+00   5.2167829e-01   5.0476836e-01   4.0000000e-01   4.2270142e-01   3.0017653e-01   3.0490481e-01   5.0042326e-01   3.0915245e-01   8.5440680e-01   6.0184622e-01   6.3192325e-01   9.0142681e-01   5.2133179e-01   4.0363334e-01   5.0517282e-01   7.8890806e-01   8.2421923e-01   5.0042326e-01   3.1328089e-01   3.4085233e-01   8.0928056e-01   7.2044167e-01   4.5148429e-01   8.0928056e-01   1.0782211e+00   5.0517282e-01   4.8342635e-01   1.6097492e+00   1.0215068e+00   4.5148429e-01   3.0482299e-01   9.1446938e-01   3.0490481e-01   8.5440680e-01   2.4195741e-01   6.1135434e-01   3.0143288e+00   2.8035152e+00   3.2080663e+00   2.3476141e+00   2.9053991e+00   2.8028019e+00   3.0030626e+00   1.7519158e+00   2.9045816e+00   2.2149484e+00   2.0887699e+00   2.5048522e+00   2.3645147e+00   3.0018766e+00   1.9120303e+00   2.7085154e+00   2.8028008e+00   2.4075162e+00   2.8284908e+00   2.2272457e+00   3.1054022e+00   2.3075573e+00   3.2060163e+00   3.0018874e+00   2.6044486e+00   2.7064438e+00   3.1073418e+00   3.3054063e+00   2.8034238e+00   1.8447840e+00   2.1492024e+00   2.0607272e+00   2.2122063e+00   3.4028104e+00   2.8028007e+00   2.8036182e+00   3.0057998e+00   2.7234787e+00   2.4027927e+00   2.3234132e+00   2.7070699e+00   2.9017335e+00   2.3151346e+00   1.8036834e+00   2.5072065e+00   2.5017313e+00   2.5032633e+00   2.6031823e+00   1.5292174e+00   2.4058519e+00   4.3116266e+00   3.4064593e+00   4.2076930e+00   3.9021503e+00   4.1063936e+00   4.9099401e+00   2.8141516e+00   4.6055969e+00   4.1036742e+00   4.4145324e+00   3.4082578e+00   3.6052799e+00   3.8082804e+00   3.3123693e+00   3.4273179e+00   3.6154977e+00   3.8026444e+00   5.0117750e+00   5.2107474e+00   3.3130198e+00   4.0114753e+00   3.2109395e+00   5.0107787e+00   3.2067490e+00   4.0058313e+00   4.3058539e+00   3.1067996e+00   3.2049797e+00   3.9061098e+00   4.1066170e+00   4.4095056e+00   4.7221364e+00   3.9082316e+00   3.4019453e+00   3.9014304e+00   4.4232188e+00   3.9139973e+00   3.8023591e+00   3.1057392e+00   3.7104219e+00   3.9150553e+00   3.4248402e+00   3.4064593e+00   4.2084919e+00   4.0172759e+00   3.5193527e+00   3.3100431e+00   3.5073655e+00   3.7133435e+00   3.4036743e+00   4.0004442e-01   5.0043084e-01   3.4085233e-01   8.0046764e-01   2.2573593e-01   4.0243965e-01   4.2362917e-01   1.2036925e+00   1.1896595e+00   8.0879776e-01   5.0000761e-01   1.1006371e+00   5.2133179e-01   8.0046685e-01   5.0437695e-01   4.0125062e-01   5.0477564e-01   5.0043084e-01   4.5148429e-01   4.0125062e-01   6.0000952e-01   6.0000317e-01   2.2608083e-01   3.0922892e-01   8.0000160e-01   7.4269314e-01   9.6572569e-01   3.4085233e-01   4.0246123e-01   9.0000136e-01   3.4085233e-01   4.0127250e-01   5.0001522e-01   4.0004442e-01   1.1000003e+00   2.2608083e-01   4.1317535e-01   5.7609230e-01   4.0122873e-01   5.2167829e-01   2.0061436e-01   7.0088627e-01   4.0004442e-01   3.3852404e+00   3.1245391e+00   3.5521657e+00   2.6057331e+00   3.2281303e+00   3.1021033e+00   3.3145497e+00   1.9088256e+00   3.2358110e+00   2.5040476e+00   2.1337832e+00   2.8091158e+00   2.6173653e+00   3.3068237e+00   2.2078368e+00   3.0635687e+00   3.1029264e+00   2.7045714e+00   3.1156892e+00   2.5038387e+00   3.4072735e+00   2.6199287e+00   3.5105217e+00   3.3061800e+00   2.9316687e+00   3.0488379e+00   3.4462681e+00   3.6292576e+00   3.1074604e+00   2.1103491e+00   2.4046650e+00   2.3052527e+00   2.5074705e+00   3.7037846e+00   3.1023805e+00   3.1087156e+00   3.3416864e+00   3.0212423e+00   2.7029308e+00   2.6036513e+00   3.0012006e+00   3.2078939e+00   2.6064541e+00   1.9145304e+00   2.8026114e+00   2.8028068e+00   2.8033825e+00   2.9167099e+00   1.6147493e+00   2.7040740e+00   4.6133719e+00   3.7058811e+00   4.5290217e+00   4.2056470e+00   4.4115634e+00   5.2381327e+00   3.1057013e+00   4.9271590e+00   4.4118721e+00   4.7354168e+00   3.7201124e+00   3.9113698e+00   4.1247181e+00   3.6087856e+00   3.7244383e+00   3.9212835e+00   4.1101783e+00   5.3422962e+00   5.5362181e+00   3.6046999e+00   4.3279835e+00   3.5095358e+00   5.3412086e+00   3.5135120e+00   4.3162096e+00   4.6297141e+00   3.4124092e+00   3.5088081e+00   4.2105763e+00   4.4358170e+00   4.7408876e+00   5.0762364e+00   4.2125085e+00   3.7079173e+00   4.2021973e+00   4.7752666e+00   4.2166536e+00   4.1080028e+00   3.4084548e+00   4.0338654e+00   4.2256165e+00   3.7563734e+00   3.7058811e+00   4.5190617e+00   4.3264209e+00   3.8360186e+00   3.6136974e+00   3.8177300e+00   4.0156240e+00   3.7048582e+00   6.3164977e-01   3.0017653e-01   4.1209001e-01   2.0061436e-01   4.0127250e-01   7.0911112e-01   8.2458409e-01   1.0207396e+00   5.2201750e-01   1.2699992e-01   7.0470867e-01   4.0004442e-01   4.0122873e-01   3.0482299e-01   5.2167208e-01   3.0490481e-01   4.0122873e-01   4.0002221e-01   2.0061436e-01   2.0061436e-01   2.0061436e-01   3.0482299e-01   3.0482299e-01   4.0122873e-01   7.0008584e-01   8.0879701e-01   3.0017653e-01   3.0474106e-01   5.0043084e-01   3.0017653e-01   6.0964597e-01   1.0000000e-01   2.0121983e-01   1.1019599e+00   6.0035305e-01   4.0004442e-01   4.5148429e-01   4.0127250e-01   4.0004442e-01   4.0125062e-01   3.3808272e-01   1.1269424e-01   3.2369541e+00   3.0101869e+00   3.4219340e+00   2.5073576e+00   3.1113295e+00   3.0016913e+00   3.2074921e+00   1.8128536e+00   3.1127326e+00   2.4076937e+00   2.0429861e+00   2.7074657e+00   2.5087337e+00   3.2029987e+00   2.1087640e+00   2.9250474e+00   3.0040848e+00   2.6011837e+00   3.0090716e+00   2.4029250e+00   3.3087901e+00   2.5074281e+00   3.4046875e+00   3.2018065e+00   2.8107271e+00   2.9185950e+00   3.3183094e+00   3.5134617e+00   3.0049285e+00   2.0041542e+00   2.3049133e+00   2.2050331e+00   2.4035997e+00   3.6030023e+00   3.0040438e+00   3.0070658e+00   3.2168317e+00   2.9083216e+00   2.6031436e+00   2.5048522e+00   2.9013423e+00   3.1034810e+00   2.5032729e+00   1.8201043e+00   2.7028014e+00   2.7016556e+00   2.7027522e+00   2.8056775e+00   1.5256523e+00   2.6033557e+00   4.5162553e+00   3.6081006e+00   4.4160732e+00   4.1039121e+00   4.3103378e+00   5.1203327e+00   3.0096880e+00   4.8129366e+00   4.3058720e+00   4.6249088e+00   3.6148619e+00   3.8081633e+00   4.0157626e+00   3.5129206e+00   3.6349703e+00   3.8226858e+00   4.0057080e+00   5.2232912e+00   5.4204287e+00   3.5035589e+00   4.2200398e+00   3.4145570e+00   5.2217206e+00   3.4096180e+00   4.2110197e+00   4.5140458e+00   3.3101076e+00   3.4081996e+00   4.1095117e+00   4.3161641e+00   4.6204721e+00   4.9419857e+00   4.1123051e+00   3.6033860e+00   4.1009647e+00   4.6434791e+00   4.1197833e+00   4.0049425e+00   3.3090452e+00   3.9205015e+00   4.1230798e+00   3.6413278e+00   3.6081006e+00   4.4145323e+00   4.2254713e+00   3.7302938e+00   3.5112285e+00   3.7130507e+00   3.9190472e+00   3.6058055e+00   5.0043842e-01   1.0426513e+00   5.2167208e-01   4.0004442e-01   3.0026460e-01   1.4542931e+00   1.5965783e+00   1.1269511e+00   7.4262964e-01   1.3253871e+00   9.3306807e-01   1.0032293e+00   8.5406674e-01   7.0470720e-01   7.0633229e-01   5.7608844e-01   6.0017982e-01   6.3192325e-01   8.2418071e-01   8.0879625e-01   3.4080442e-01   4.0243965e-01   1.0030871e+00   1.2189645e+00   1.3741465e+00   5.0043842e-01   6.0201716e-01   1.1055705e+00   5.0043842e-01   1.1269424e-01   7.1621748e-01   6.7616902e-01   6.0000952e-01   3.0008832e-01   6.8170466e-01   9.3735629e-01   4.0004442e-01   9.3308853e-01   3.0474106e-01   9.6572569e-01   6.0948212e-01   3.4311880e+00   3.1440065e+00   3.5828092e+00   2.6061623e+00   3.2490712e+00   3.1047537e+00   3.3265679e+00   1.9024467e+00   3.2610547e+00   2.5066443e+00   2.1042326e+00   2.8182771e+00   2.6268573e+00   3.3136174e+00   2.2177383e+00   3.1042292e+00   3.1056084e+00   2.7106185e+00   3.1258664e+00   2.5072166e+00   3.4123850e+00   2.6397031e+00   3.5191318e+00   3.3125861e+00   2.9569988e+00   3.0825000e+00   3.4750557e+00   3.6484459e+00   3.1148203e+00   2.1231535e+00   2.4058952e+00   2.3063814e+00   2.5167763e+00   3.7071732e+00   3.1042001e+00   3.1166462e+00   3.3690976e+00   3.0370401e+00   2.7067267e+00   2.6060811e+00   3.0024163e+00   3.2157547e+00   2.6139440e+00   1.9029771e+00   2.8056558e+00   2.8068283e+00   2.8077255e+00   2.9323793e+00   1.6119586e+00   2.7091848e+00   4.6187512e+00   3.7092298e+00   4.5442576e+00   4.2099019e+00   4.4180513e+00   5.2548586e+00   3.1079005e+00   4.9407795e+00   4.4195588e+00   4.7516511e+00   3.7329384e+00   3.9191954e+00   4.1389224e+00   3.6127211e+00   3.7328453e+00   3.9318950e+00   4.1174259e+00   5.3601143e+00   5.5513974e+00   3.6073242e+00   4.3425018e+00   3.5138357e+00   5.3586950e+00   3.5235095e+00   4.3257995e+00   4.6452056e+00   3.4217238e+00   3.5154314e+00   4.2169032e+00   4.4544908e+00   4.7602896e+00   5.1057502e+00   4.2193718e+00   3.7147036e+00   4.2043114e+00   4.8058872e+00   4.2239687e+00   4.1138950e+00   3.4146523e+00   4.0526593e+00   4.2382079e+00   3.7847403e+00   3.7092298e+00   4.5291248e+00   4.3385161e+00   3.8547029e+00   3.6228903e+00   3.8290678e+00   4.0228342e+00   3.7082809e+00   6.3164977e-01   3.0026460e-01   1.2085435e-01   6.0948506e-01   1.0143978e+00   1.3131369e+00   8.0928056e-01   4.0246123e-01   8.5409862e-01   7.0016860e-01   5.0477564e-01   6.0201716e-01   5.6595908e-01   4.0363334e-01   4.1212852e-01   1.2699992e-01   3.3818226e-01   4.1210927e-01   3.3818226e-01   2.0181667e-01   1.2085435e-01   5.0855077e-01   1.0001598e+00   1.1055707e+00   0.0000000e+00   3.0026460e-01   6.0964891e-01   0.0000000e+00   5.0043842e-01   3.0482299e-01   4.0246123e-01   8.0254500e-01   5.0043842e-01   5.2133802e-01   7.0556260e-01   2.0181667e-01   7.0008735e-01   3.0026460e-01   6.0948506e-01   2.0181667e-01   3.2490712e+00   3.0153168e+00   3.4297841e+00   2.5067523e+00   3.1166337e+00   3.0027816e+00   3.2112793e+00   1.8068048e+00   3.1183051e+00   2.4116924e+00   2.0138832e+00   2.7116615e+00   2.5059537e+00   3.2048192e+00   2.1144760e+00   2.9351753e+00   3.0063019e+00   2.6019122e+00   3.0106587e+00   2.4030297e+00   3.3125861e+00   2.5120719e+00   3.4068163e+00   3.2029877e+00   2.8162444e+00   2.9267417e+00   3.3252407e+00   3.5189464e+00   3.0077107e+00   2.0051350e+00   2.3037132e+00   2.2028146e+00   2.4058620e+00   3.6044981e+00   3.0062070e+00   3.0107283e+00   3.2237456e+00   2.9105093e+00   2.6052541e+00   2.5062865e+00   2.9018772e+00   3.1056084e+00   2.5048522e+00   1.8082911e+00   2.7043948e+00   2.7029415e+00   2.7046027e+00   2.8091099e+00   1.5248852e+00   2.6055127e+00   4.5209020e+00   3.6112573e+00   4.4212031e+00   4.1056541e+00   4.3138986e+00   5.1255338e+00   3.0133997e+00   4.8167235e+00   4.3081273e+00   4.6319211e+00   3.6205854e+00   3.8114965e+00   4.0212972e+00   3.5173798e+00   3.6449970e+00   3.8299342e+00   4.0081754e+00   5.2290121e+00   5.4254411e+00   3.5039202e+00   4.2264145e+00   3.4198378e+00   5.2270034e+00   3.4138008e+00   4.2149806e+00   4.5183778e+00   3.3145502e+00   3.4118179e+00   4.1129687e+00   4.3210760e+00   4.6261633e+00   4.9512603e+00   4.1165035e+00   3.6051692e+00   4.1014742e+00   4.6540056e+00   4.1257291e+00   4.0071257e+00   3.3129914e+00   3.9274863e+00   4.1301604e+00   3.6542046e+00   3.6112573e+00   4.4192311e+00   4.2328883e+00   3.7399948e+00   3.5155767e+00   3.7180846e+00   3.9250546e+00   3.6083191e+00   6.0184622e-01   7.4263078e-01   1.1138955e+00   4.2268438e-01   7.0096708e-01   2.4170870e-01   3.0490481e-01   3.0490481e-01   3.0017653e-01   3.0474106e-01   3.0474106e-01   8.0879701e-01   4.2362917e-01   6.1119267e-01   7.0462697e-01   4.1317535e-01   2.2538848e-01   3.0482299e-01   7.1621748e-01   6.7616723e-01   3.0474106e-01   4.0125062e-01   5.0001522e-01   6.3164977e-01   5.2491131e-01   2.2573593e-01   6.3164977e-01   1.0207396e+00   3.3808272e-01   4.0246123e-01   1.4180463e+00   1.0030868e+00   4.5148429e-01   4.1317535e-01   7.4263078e-01   3.0017653e-01   8.0879701e-01   1.0000000e-01   4.5078948e-01   3.2116783e+00   3.0049285e+00   3.4072983e+00   2.5182898e+00   3.1051604e+00   3.0020136e+00   3.2049016e+00   1.8469618e+00   3.1036832e+00   2.4099081e+00   2.1180493e+00   2.7068820e+00   2.5224740e+00   3.2021231e+00   2.1097449e+00   2.9077617e+00   3.0041462e+00   2.6022422e+00   3.0134290e+00   2.4087504e+00   3.3085101e+00   2.5050799e+00   3.4038679e+00   3.2010814e+00   2.8036959e+00   2.9060895e+00   3.3058271e+00   3.5063866e+00   3.0043212e+00   2.0122773e+00   2.3159426e+00   2.2186306e+00   2.4051454e+00   3.6029749e+00   3.0041461e+00   3.0062373e+00   3.2059465e+00   2.9096170e+00   2.6032656e+00   2.5097004e+00   2.9028411e+00   3.1023606e+00   2.5057847e+00   1.8685354e+00   2.7039990e+00   2.7016498e+00   2.7029428e+00   2.8028074e+00   1.5747520e+00   2.6039937e+00   4.5157550e+00   3.6083209e+00   4.4088451e+00   4.1031691e+00   4.3089952e+00   5.1095334e+00   3.0117336e+00   4.8052574e+00   4.3035619e+00   4.6175091e+00   3.6116958e+00   3.8067089e+00   4.0107159e+00   3.5138361e+00   3.6350483e+00   3.8210210e+00   4.0037985e+00   5.2113565e+00   5.4105254e+00   3.5063553e+00   4.2147222e+00   3.4147657e+00   5.2097995e+00   3.4081036e+00   4.2080425e+00   4.5057296e+00   3.3089414e+00   3.4074852e+00   4.1084282e+00   4.3058539e+00   4.6088153e+00   4.9193995e+00   4.1112251e+00   3.6020843e+00   4.1009356e+00   4.6223848e+00   4.1190046e+00   4.0036188e+00   3.3085886e+00   3.9129256e+00   4.1197933e+00   3.6305006e+00   3.6083209e+00   4.4113183e+00   4.2225427e+00   3.7249938e+00   3.5105217e+00   3.7103007e+00   3.9184088e+00   3.6056580e+00   4.0125062e-01   5.7609230e-01   1.0095367e+00   1.0776296e+00   6.3322667e-01   3.0490481e-01   9.0140221e-01   4.1212852e-01   6.0000317e-01   3.4085233e-01   6.0035305e-01   3.3818226e-01   3.0000000e-01   4.0122873e-01   2.2538848e-01   4.0004442e-01   4.0122873e-01   2.0061436e-01   3.0000000e-01   6.0017982e-01   7.0462844e-01   8.5406616e-01   3.0026460e-01   4.0243965e-01   7.0088477e-01   3.0026460e-01   4.5783248e-01   3.0008832e-01   3.0490481e-01   1.1002025e+00   4.1315633e-01   4.0125062e-01   4.2362917e-01   4.0125062e-01   4.1209001e-01   2.4170870e-01   5.0436965e-01   2.2573593e-01   3.1712557e+00   2.9203034e+00   3.3425817e+00   2.4092081e+00   3.0228582e+00   2.9024211e+00   3.1131137e+00   1.7168003e+00   3.0276611e+00   2.3094323e+00   1.9540727e+00   2.6109956e+00   2.4153242e+00   3.1056218e+00   2.0123796e+00   2.8520945e+00   2.9050328e+00   2.5029614e+00   2.9148948e+00   2.3042831e+00   3.2109395e+00   2.4161682e+00   3.3086859e+00   3.1042389e+00   2.7244207e+00   2.8394157e+00   3.2369857e+00   3.4247142e+00   2.9077271e+00   1.9085444e+00   2.2064916e+00   2.1068047e+00   2.3066817e+00   3.5042241e+00   2.9048033e+00   2.9102290e+00   3.1338090e+00   2.8169587e+00   2.5042601e+00   2.4061715e+00   2.8017212e+00   3.0065627e+00   2.4058322e+00   1.7261843e+00   2.6037439e+00   2.6027120e+00   2.6040234e+00   2.7127458e+00   1.4350761e+00   2.5049231e+00   4.4189015e+00   3.5095669e+00   4.3257995e+00   4.0057109e+00   4.2135057e+00   5.0324952e+00   2.9113810e+00   4.7221382e+00   4.2099962e+00   4.5353918e+00   3.5215862e+00   3.7118930e+00   3.9240025e+00   3.4150232e+00   3.5401623e+00   3.7282910e+00   3.9092259e+00   5.1365012e+00   5.3314853e+00   3.4049933e+00   4.1286955e+00   3.3168890e+00   5.1347989e+00   3.3143385e+00   4.1161770e+00   4.4243750e+00   3.2143454e+00   3.3110189e+00   4.0125032e+00   4.2289520e+00   4.5343227e+00   4.8661173e+00   4.0156353e+00   3.5063553e+00   4.0017163e+00   4.5675364e+00   4.0235140e+00   3.9076272e+00   3.2116700e+00   3.8321139e+00   4.0301570e+00   3.5598557e+00   3.5095669e+00   4.3201293e+00   4.1322798e+00   3.6413292e+00   3.4157005e+00   3.6188994e+00   3.8226858e+00   3.5071409e+00   5.0436235e-01   1.1269511e+00   1.4180734e+00   9.1446938e-01   5.0476836e-01   9.6593231e-01   8.0051115e-01   6.1119558e-01   7.0176271e-01   6.0964891e-01   4.3213914e-01   5.2133179e-01   2.2573593e-01   4.1420960e-01   5.2133802e-01   4.5078948e-01   2.2608083e-01   2.0121983e-01   6.1119558e-01   1.1005364e+00   1.2089192e+00   1.2085435e-01   2.4195741e-01   7.1621884e-01   1.2085435e-01   4.0004442e-01   4.1212852e-01   5.0085236e-01   7.0096858e-01   4.0127250e-01   5.6394820e-01   8.0967961e-01   2.0000000e-01   8.0051115e-01   2.2573593e-01   7.1621884e-01   3.0482299e-01   3.3545239e+00   3.1166331e+00   3.5333785e+00   2.6054739e+00   3.2183845e+00   3.1025789e+00   3.3116521e+00   1.9046783e+00   3.2211369e+00   2.5096353e+00   2.1074907e+00   2.8107054e+00   2.6064541e+00   3.3051050e+00   2.2121875e+00   3.0393610e+00   3.1054994e+00   2.7022579e+00   3.1107490e+00   2.5027328e+00   3.4112739e+00   2.6129479e+00   3.5073688e+00   3.3035252e+00   2.9185900e+00   3.0300451e+00   3.4286400e+00   3.6205854e+00   3.1074470e+00   2.1053074e+00   2.4030297e+00   2.3022754e+00   2.5057763e+00   3.7043108e+00   3.1053329e+00   3.1100313e+00   3.3265652e+00   3.0118276e+00   2.7046025e+00   2.6052853e+00   3.0016501e+00   3.2059133e+00   2.6047974e+00   1.9052628e+00   2.8038694e+00   2.8028007e+00   2.8041967e+00   2.9102290e+00   1.6179159e+00   2.7049931e+00   4.6192199e+00   3.7100254e+00   4.5225779e+00   4.2056438e+00   4.4133506e+00   5.2278849e+00   3.1114444e+00   4.9186970e+00   4.4088300e+00   4.7323336e+00   3.7201124e+00   3.9113387e+00   4.1217116e+00   3.6152935e+00   3.7397620e+00   3.9276515e+00   4.1085246e+00   5.3314853e+00   5.5274937e+00   3.6037456e+00   4.3264210e+00   3.5173586e+00   5.3296471e+00   3.5134601e+00   4.3151165e+00   4.6204664e+00   3.4137985e+00   3.5110031e+00   4.2123903e+00   4.4237218e+00   4.7288133e+00   5.0555470e+00   4.2155430e+00   3.7056457e+00   4.2016096e+00   4.7574592e+00   4.2235569e+00   4.1072664e+00   3.4118179e+00   4.0283196e+00   4.2288238e+00   3.7532858e+00   3.7100254e+00   4.5190617e+00   4.3311362e+00   3.8383398e+00   3.6148683e+00   3.8177286e+00   4.0227665e+00   3.7075359e+00   1.5237054e+00   1.5778323e+00   1.1528553e+00   8.0928056e-01   1.4109657e+00   9.0296858e-01   1.1060939e+00   8.5617086e-01   6.0184934e-01   8.2671175e-01   8.1112984e-01   7.1621748e-01   7.2113820e-01   9.0642722e-01   9.0166476e-01   5.2167829e-01   5.6347978e-01   1.1011719e+00   1.1531951e+00   1.3523685e+00   6.0948506e-01   7.0008735e-01   1.2012929e+00   6.0948506e-01   2.0121983e-01   8.0488008e-01   7.1636719e-01   7.0025283e-01   2.2608083e-01   7.4418186e-01   9.6702272e-01   5.0476836e-01   9.0657583e-01   3.4085233e-01   1.0214933e+00   7.0176271e-01   3.7102713e+00   3.4382051e+00   3.8712380e+00   2.9060895e+00   3.5425492e+00   3.4047913e+00   3.6240078e+00   2.2025238e+00   3.5521653e+00   2.8062729e+00   2.4042873e+00   3.1166330e+00   2.9229849e+00   3.6127194e+00   2.5155829e+00   3.3866455e+00   3.4056098e+00   3.0096888e+00   3.4232171e+00   2.8068501e+00   3.7118528e+00   2.9335034e+00   3.8176417e+00   3.6116893e+00   3.2480452e+00   3.3690976e+00   3.7643838e+00   3.9429243e+00   3.4137984e+00   2.4191998e+00   2.7057317e+00   2.6060678e+00   2.8148779e+00   4.0071259e+00   3.4042418e+00   3.4154455e+00   3.6592943e+00   3.3320889e+00   3.0065584e+00   2.9059779e+00   3.3025806e+00   3.5145393e+00   2.9126049e+00   2.2031052e+00   3.1056091e+00   3.1065947e+00   3.1074470e+00   3.2280982e+00   1.9100994e+00   3.0087060e+00   4.9179684e+00   4.0090033e+00   4.8406432e+00   4.5097222e+00   4.7173415e+00   5.5505575e+00   3.4073974e+00   5.2376127e+00   4.7184838e+00   5.0477042e+00   4.0301568e+00   4.2181242e+00   4.4357103e+00   3.9120615e+00   4.0296437e+00   4.2295175e+00   4.4165186e+00   5.6553854e+00   5.8478137e+00   3.9072483e+00   4.6391758e+00   3.8129545e+00   5.6540008e+00   3.8217034e+00   4.6242406e+00   4.9412981e+00   3.7201124e+00   3.8146271e+00   4.5162248e+00   4.7490801e+00   5.0547228e+00   5.3951639e+00   4.5184830e+00   4.0138270e+00   4.5043856e+00   5.0949990e+00   4.5225786e+00   4.4133506e+00   3.7138970e+00   4.3475342e+00   4.5353918e+00   4.0747727e+00   4.0090033e+00   4.8274110e+00   4.6357670e+00   4.1493188e+00   3.9212879e+00   4.1268500e+00   4.3214438e+00   4.0081754e+00   4.1317535e-01   4.0127250e-01   7.1629303e-01   5.0043842e-01   7.0096858e-01   6.3912709e-01   7.0184453e-01   1.2003596e+00   7.9871893e-01   1.0286506e+00   1.0433442e+00   8.2635069e-01   6.3309012e-01   6.7626502e-01   1.1286016e+00   1.0782105e+00   6.1135434e-01   6.0184934e-01   3.0915245e-01   1.0143978e+00   9.0155393e-01   5.0436965e-01   1.0143978e+00   1.4324323e+00   7.4329414e-01   8.0879776e-01   1.7570482e+00   1.4092511e+00   8.1343016e-01   7.8895472e-01   1.1269511e+00   7.0470720e-01   1.2189701e+00   5.0855077e-01   8.5406616e-01   3.5025396e+00   3.3027388e+00   3.7022129e+00   2.8281704e+00   3.4036672e+00   3.3025779e+00   3.5030234e+00   2.1725076e+00   3.4018155e+00   2.7109019e+00   2.4518621e+00   3.0049127e+00   2.8364042e+00   3.5019450e+00   2.4088882e+00   3.2025657e+00   3.3031143e+00   2.9050277e+00   3.3192099e+00   2.7159616e+00   3.6057054e+00   2.8056568e+00   3.7048624e+00   3.5016345e+00   3.1026586e+00   3.2026875e+00   3.6024856e+00   3.8034160e+00   3.3035252e+00   2.3226028e+00   2.6270968e+00   2.5319718e+00   2.7081195e+00   3.9029099e+00   3.3031170e+00   3.3039553e+00   3.5023855e+00   3.2150432e+00   2.9028412e+00   2.8148779e+00   3.2051933e+00   3.4018781e+00   2.8098127e+00   2.1974660e+00   3.0055598e+00   3.0017653e+00   3.0030650e+00   3.1026235e+00   1.9002712e+00   2.9047832e+00   4.8115512e+00   3.9065683e+00   4.7047990e+00   4.4023911e+00   4.6064349e+00   5.4038141e+00   3.3119500e+00   5.1019028e+00   4.6029848e+00   4.9110422e+00   3.9076509e+00   4.1051819e+00   4.3067850e+00   3.8114951e+00   3.9246405e+00   4.1145310e+00   4.3025710e+00   5.5046681e+00   5.7049556e+00   3.8098343e+00   4.5095384e+00   3.7106240e+00   5.5035834e+00   3.7063915e+00   4.5052925e+00   4.8020893e+00   3.6066590e+00   3.7052383e+00   4.4062090e+00   4.6017113e+00   4.9033376e+00   5.2065497e+00   4.4082090e+00   3.9018737e+00   4.4013937e+00   4.9097097e+00   4.4135256e+00   4.3024877e+00   3.6059708e+00   4.2076410e+00   4.4136664e+00   3.9189006e+00   3.9065683e+00   4.7076752e+00   4.5157700e+00   4.0166034e+00   3.8091065e+00   4.0069397e+00   4.2129114e+00   3.9040721e+00   5.0476836e-01   9.1422402e-01   6.0017982e-01   6.7616723e-01   1.0001903e+00   7.4262850e-01   1.1298636e+00   1.1055799e+00   1.0782211e+00   1.4043036e+00   1.0207260e+00   9.0508712e-01   1.0030871e+00   1.2632996e+00   1.3253497e+00   1.0001598e+00   5.0855077e-01   2.4195741e-01   1.3131369e+00   1.2089895e+00   9.0007572e-01   1.3131369e+00   1.5263518e+00   1.0087393e+00   9.3308891e-01   2.1138769e+00   1.4140515e+00   9.3308891e-01   6.8160885e-01   1.4180436e+00   6.7626681e-01   1.3018145e+00   7.0470720e-01   1.1133986e+00   3.2054626e+00   3.0041787e+00   3.4044439e+00   2.6382589e+00   3.1129223e+00   3.0138245e+00   3.2030205e+00   2.1566438e+00   3.1086927e+00   2.4554557e+00   2.5269479e+00   2.7127202e+00   2.6737930e+00   3.2074207e+00   2.1510232e+00   2.9068053e+00   3.0077106e+00   2.6369414e+00   3.0816280e+00   2.4971338e+00   3.3055260e+00   2.5325155e+00   3.4206210e+00   3.2100081e+00   2.8135926e+00   2.9088609e+00   3.3100014e+00   3.5053029e+00   3.0107283e+00   2.1554609e+00   2.4508792e+00   2.3794578e+00   2.4537333e+00   3.6090037e+00   3.0077114e+00   3.0034200e+00   3.2047284e+00   2.9729700e+00   2.6131590e+00   2.5821442e+00   2.9309340e+00   3.1060464e+00   2.5610212e+00   2.2285414e+00   2.7317035e+00   2.7106184e+00   2.7159615e+00   2.8134622e+00   1.9767345e+00   2.6270952e+00   4.5095106e+00   3.6117736e+00   4.4050179e+00   4.1034650e+00   4.3058769e+00   5.1048430e+00   3.0395885e+00   4.8030341e+00   4.3077256e+00   4.6095748e+00   3.6067573e+00   3.8091370e+00   4.0067454e+00   3.5235094e+00   3.6257071e+00   3.8125141e+00   4.0031827e+00   5.2054101e+00   5.4066794e+00   3.5404367e+00   4.2082468e+00   3.4146523e+00   5.2054259e+00   3.4138230e+00   4.2042865e+00   4.5025743e+00   3.3123786e+00   3.4067938e+00   4.1072911e+00   4.3032007e+00   4.6053784e+00   4.9093646e+00   4.1089590e+00   3.6062594e+00   4.1061436e+00   4.6117560e+00   4.1111295e+00   4.0026055e+00   3.3078313e+00   3.9072844e+00   4.1120111e+00   3.6177796e+00   3.6117736e+00   4.4064445e+00   4.2133758e+00   3.7157992e+00   3.5215805e+00   3.7072609e+00   3.9105673e+00   3.6051689e+00   4.1212852e-01   4.1212852e-01   3.0490481e-01   5.2167208e-01   3.0915245e-01   8.0097499e-01   6.1119558e-01   6.9518117e-01   9.0168933e-01   5.2491131e-01   4.0363334e-01   5.0085236e-01   7.8940551e-01   8.2462252e-01   5.0042326e-01   3.1328089e-01   3.0490481e-01   8.0928056e-01   7.0470867e-01   4.0125062e-01   8.0928056e-01   1.0776294e+00   5.0517282e-01   4.5078948e-01   1.6096629e+00   1.0207396e+00   4.5847767e-01   6.0184622e-01   9.1422402e-01   3.4085233e-01   8.5406674e-01   2.4195741e-01   6.0964891e-01   3.4079041e+00   3.2018548e+00   3.6045966e+00   2.7227151e+00   3.3029106e+00   3.2014779e+00   3.4016816e+00   2.0608234e+00   3.3024690e+00   2.6067721e+00   2.3393392e+00   2.9023862e+00   2.7310942e+00   3.4010299e+00   2.3048574e+00   3.1044057e+00   3.2014774e+00   2.8036023e+00   3.2152055e+00   2.6124447e+00   3.5030234e+00   2.7035171e+00   3.6034258e+00   3.4010358e+00   3.0022434e+00   3.1033306e+00   3.5041120e+00   3.7031277e+00   3.2018065e+00   2.2177966e+00   2.5220687e+00   2.4265152e+00   2.6055197e+00   3.8016494e+00   3.2014773e+00   3.2019092e+00   3.4031881e+00   3.1122360e+00   2.8013347e+00   2.7110046e+00   3.1036553e+00   3.3009330e+00   2.7070770e+00   2.0855888e+00   2.9035486e+00   2.9008500e+00   2.9016034e+00   3.0016038e+00   1.7857124e+00   2.8028019e+00   4.7076061e+00   3.8037955e+00   4.6049801e+00   4.3013465e+00   4.5040960e+00   5.3068325e+00   3.2075036e+00   5.0037550e+00   4.5023523e+00   4.8096009e+00   3.8048551e+00   4.0031892e+00   4.2051335e+00   3.7071735e+00   3.8161632e+00   4.0093901e+00   4.2016368e+00   5.4081547e+00   5.6075434e+00   3.7075525e+00   4.4072835e+00   3.6062405e+00   5.4074634e+00   3.6038441e+00   4.4036959e+00   4.7038247e+00   3.5038075e+00   3.6028345e+00   4.3038299e+00   4.5042394e+00   4.8062731e+00   5.1150203e+00   4.3051629e+00   3.8011413e+00   4.3008955e+00   4.8153681e+00   4.3087925e+00   4.2014602e+00   3.5032125e+00   4.1063865e+00   4.3094598e+00   3.8146853e+00   3.8037955e+00   4.6054982e+00   4.4109814e+00   3.9115832e+00   3.7058197e+00   3.9043917e+00   4.1081838e+00   3.8021570e+00   6.0365948e-01   3.0008832e-01   3.3818226e-01   2.0121983e-01   5.2133802e-01   3.0915245e-01   5.0437695e-01   5.0043842e-01   2.0181667e-01   1.2085435e-01   1.2085435e-01   4.1317535e-01   4.1317535e-01   3.0026460e-01   6.0018299e-01   7.0462697e-01   4.0246123e-01   3.0490481e-01   4.0004442e-01   4.0246123e-01   7.1621884e-01   1.2085435e-01   1.1269424e-01   1.2036863e+00   7.0088627e-01   3.0482299e-01   5.0436965e-01   5.0436235e-01   3.0482299e-01   5.0436965e-01   2.2608083e-01   2.0121983e-01   3.3237063e+00   3.1056091e+00   3.5138377e+00   2.6067805e+00   3.2064817e+00   3.1008890e+00   3.3041599e+00   1.9144935e+00   3.2074507e+00   2.5042498e+00   2.1492024e+00   2.8038903e+00   2.6091437e+00   3.3015588e+00   2.2041706e+00   3.0148613e+00   3.1021975e+00   2.7007716e+00   3.1069083e+00   2.5027328e+00   3.4052051e+00   2.6037226e+00   3.5028446e+00   3.3009330e+00   2.9058292e+00   3.0107430e+00   3.4113341e+00   3.6081814e+00   3.1026177e+00   2.1035154e+00   2.4051766e+00   2.3058791e+00   2.5019964e+00   3.7017412e+00   3.1021847e+00   3.1038570e+00   3.3100818e+00   3.0059450e+00   2.7015162e+00   2.6035107e+00   3.0009631e+00   3.2017847e+00   2.6021247e+00   1.9231085e+00   2.8015882e+00   2.8007533e+00   2.8013565e+00   2.9028946e+00   1.6222582e+00   2.7017239e+00   4.6112605e+00   3.7050457e+00   4.5107898e+00   4.2023583e+00   4.4067851e+00   5.2146266e+00   3.1060428e+00   4.9089682e+00   4.4037570e+00   4.7173415e+00   3.7092301e+00   3.9050336e+00   4.1101936e+00   3.6083379e+00   3.7235997e+00   3.9149881e+00   4.1034584e+00   5.3169366e+00   5.5149065e+00   3.6029421e+00   4.3133953e+00   3.5091580e+00   5.3158283e+00   3.5057369e+00   4.3071174e+00   4.6095441e+00   3.4059695e+00   3.5048429e+00   4.2061215e+00   4.4109767e+00   4.7143113e+00   5.0310424e+00   4.2080636e+00   3.7018984e+00   4.2005765e+00   4.7313463e+00   4.2134002e+00   4.1029723e+00   3.4053426e+00   4.0133308e+00   4.2155438e+00   3.7272780e+00   3.7050457e+00   4.5097224e+00   4.3174320e+00   3.8199266e+00   3.6070223e+00   3.8081328e+00   4.0126677e+00   3.7034791e+00   6.0017665e-01   4.1210927e-01   6.0018299e-01   1.1133986e+00   6.3178534e-01   9.0142681e-01   8.5403486e-01   7.0462844e-01   5.0477564e-01   5.2491734e-01   1.0087252e+00   9.3306807e-01   4.1317535e-01   5.0517282e-01   4.1317535e-01   8.5409862e-01   7.5503094e-01   4.1317535e-01   8.5409862e-01   1.3133231e+00   6.0964891e-01   7.0548138e-01   1.5640758e+00   1.3027556e+00   7.0176271e-01   6.0017982e-01   9.6591433e-01   6.0000635e-01   1.1056693e+00   4.0127250e-01   7.1700909e-01   3.0056049e+00   2.8037508e+00   3.2038047e+00   2.3350905e+00   2.9043042e+00   2.8024566e+00   3.0040963e+00   1.7133143e+00   2.9021652e+00   2.2134864e+00   2.0299551e+00   2.5066443e+00   2.3464211e+00   3.0020171e+00   1.9120296e+00   2.7041827e+00   2.8038683e+00   2.4047867e+00   2.8219468e+00   2.2186306e+00   3.1079220e+00   2.3063026e+00   3.2048524e+00   3.0013665e+00   2.6029242e+00   2.7037323e+00   3.1033714e+00   3.3046348e+00   2.8041978e+00   1.8296471e+00   2.1344310e+00   2.0421601e+00   2.2088481e+00   3.4030563e+00   2.8038694e+00   2.8056176e+00   3.0035303e+00   2.7166861e+00   2.4032760e+00   2.3173405e+00   2.7049931e+00   2.9020943e+00   2.3107074e+00   1.7540491e+00   2.5057744e+00   2.5017294e+00   2.5032614e+00   2.6027348e+00   1.4738792e+00   2.4051328e+00   4.3150879e+00   3.4081972e+00   4.2065765e+00   3.9027794e+00   4.1082339e+00   4.9060122e+00   2.8144592e+00   4.6029848e+00   4.1031680e+00   4.4149701e+00   3.4105998e+00   3.6062867e+00   3.8091132e+00   3.3145497e+00   3.4354252e+00   3.6203140e+00   3.8031346e+00   5.0073650e+00   5.2071845e+00   3.3100796e+00   4.0129286e+00   3.2145637e+00   5.0059527e+00   3.2079238e+00   4.0069158e+00   4.3033003e+00   3.1086424e+00   3.2069551e+00   3.9078313e+00   4.1030253e+00   4.4053246e+00   4.7120702e+00   3.9105941e+00   3.4019027e+00   3.9011588e+00   4.4155733e+00   3.9183552e+00   3.8030509e+00   3.1080886e+00   3.7106642e+00   3.9186175e+00   3.4279064e+00   3.4081972e+00   4.2100505e+00   4.0214626e+00   3.5236480e+00   3.3110446e+00   3.5093262e+00   3.7177968e+00   3.4052047e+00   4.1317535e-01   1.1269424e-01   5.6371422e-01   5.0084481e-01   4.5784410e-01   8.0000239e-01   4.0006662e-01   3.0017653e-01   4.0006662e-01   6.0948800e-01   7.0088627e-01   4.1210927e-01   3.0482299e-01   4.5080200e-01   7.0016860e-01   6.0184934e-01   4.1317535e-01   7.0016860e-01   8.5406674e-01   4.0002221e-01   3.0482299e-01   1.5012719e+00   7.4269314e-01   3.3818226e-01   4.0002221e-01   8.0046685e-01   1.1269424e-01   6.3165225e-01   2.0121983e-01   5.0002283e-01   3.2274206e+00   3.0066036e+00   3.4159337e+00   2.5238518e+00   3.1081931e+00   3.0018108e+00   3.2048307e+00   1.8672243e+00   3.1090333e+00   2.4088880e+00   2.1557835e+00   2.7050009e+00   2.5327574e+00   3.2021240e+00   2.1075767e+00   2.9175658e+00   3.0027966e+00   2.6034860e+00   3.0173373e+00   2.4124132e+00   3.3060311e+00   2.5063233e+00   3.4049933e+00   3.2016467e+00   2.8074882e+00   2.9128790e+00   3.3135401e+00   3.5094636e+00   3.0034944e+00   2.0185049e+00   2.3226176e+00   2.2272636e+00   2.4061715e+00   3.6025233e+00   3.0027816e+00   3.0045159e+00   3.2117473e+00   2.9147768e+00   2.6022650e+00   2.5117111e+00   2.9035536e+00   3.1022707e+00   2.5074705e+00   1.8959538e+00   2.7040250e+00   2.7012715e+00   2.7023320e+00   2.8040244e+00   1.6015419e+00   2.6035923e+00   4.5125057e+00   3.6062867e+00   4.4120448e+00   4.1027436e+00   4.3076126e+00   5.1160601e+00   3.0101869e+00   4.8099407e+00   4.3047534e+00   4.6192050e+00   3.6105351e+00   3.8061121e+00   4.0115220e+00   3.5110245e+00   3.6271576e+00   3.8169672e+00   4.0039490e+00   5.2185416e+00   5.4163883e+00   3.5078417e+00   4.2149895e+00   3.4109302e+00   5.2173835e+00   3.4072913e+00   4.2079670e+00   4.5106052e+00   3.3073674e+00   3.4056857e+00   4.1070396e+00   4.3122866e+00   4.6159499e+00   4.9341410e+00   4.1092167e+00   3.6024856e+00   4.1011078e+00   4.6347105e+00   4.1150272e+00   4.0033723e+00   3.3063033e+00   3.9150644e+00   4.1174503e+00   3.6310621e+00   3.6062867e+00   4.4108290e+00   4.2194920e+00   3.7226812e+00   3.5095242e+00   3.7093173e+00   3.9142888e+00   3.6040604e+00   3.4342562e-01   8.5406616e-01   3.3813251e-01   6.0017665e-01   4.5078948e-01   4.0125062e-01   2.2573593e-01   3.0474106e-01   7.0008584e-01   6.0184622e-01   2.2538848e-01   7.0017011e-01   8.0046685e-01   5.0477564e-01   5.2167208e-01   4.0004442e-01   5.0477564e-01   1.0016896e+00   3.0474106e-01   4.5080200e-01   1.1531953e+00   1.0008617e+00   4.5080200e-01   4.1420960e-01   6.1119558e-01   4.1210927e-01   8.0051036e-01   3.0482299e-01   4.1210927e-01   3.0158412e+00   2.8068284e+00   3.2097099e+00   2.3108757e+00   2.9065890e+00   2.8022000e+00   3.0066661e+00   1.6225614e+00   2.9048017e+00   2.2116239e+00   1.8685354e+00   2.5096708e+00   2.3100354e+00   3.0026666e+00   1.9136652e+00   2.7108290e+00   2.8056165e+00   2.4010446e+00   2.8094419e+00   2.2042326e+00   3.1114445e+00   2.3060252e+00   3.2036636e+00   3.0010404e+00   2.6048201e+00   2.7083838e+00   3.1074856e+00   3.3083888e+00   2.8056953e+00   1.8055938e+00   2.1074903e+00   2.0078120e+00   2.2044098e+00   3.4034897e+00   2.8056164e+00   2.8086638e+00   3.0080453e+00   2.7058598e+00   2.4044765e+00   2.3071636e+00   2.7018643e+00   2.9031229e+00   2.3040302e+00   1.6345914e+00   2.5039390e+00   2.5021287e+00   2.5037126e+00   2.6035547e+00   1.3485963e+00   2.4045982e+00   4.3195471e+00   3.4105069e+00   4.2110205e+00   3.9039624e+00   4.1112641e+00   4.9115281e+00   2.8134622e+00   4.6064153e+00   4.1040152e+00   4.4216160e+00   3.4153329e+00   3.6083651e+00   3.8136432e+00   3.3170079e+00   3.4454804e+00   3.6271127e+00   3.8048208e+00   5.0136911e+00   5.2125102e+00   3.3041886e+00   4.0185527e+00   3.2193583e+00   5.0117789e+00   3.2102572e+00   4.0101485e+00   4.3071174e+00   3.1116727e+00   3.2099137e+00   3.9105756e+00   4.1073261e+00   4.4108290e+00   4.7236350e+00   3.9141183e+00   3.4025037e+00   3.9008223e+00   4.4275986e+00   3.9240716e+00   3.8046112e+00   3.1114731e+00   3.7165788e+00   3.9250546e+00   3.4397950e+00   3.4105069e+00   4.2141201e+00   4.0283725e+00   3.5323888e+00   3.3126331e+00   3.5133654e+00   3.7236064e+00   3.4073749e+00   5.6371422e-01   4.0125062e-01   4.2362917e-01   7.0008735e-01   3.0017653e-01   2.2573593e-01   3.0490481e-01   5.2167829e-01   6.0202028e-01   3.3808272e-01   4.1210927e-01   5.2167829e-01   6.0201716e-01   5.0477564e-01   4.0363334e-01   6.0201716e-01   7.8895472e-01   3.0474106e-01   2.2608083e-01   1.4017696e+00   7.1636719e-01   2.2608083e-01   4.0002221e-01   7.0088627e-01   2.0121983e-01   5.6371422e-01   2.2538848e-01   4.0127250e-01   3.2269400e+00   3.0055754e+00   3.4153574e+00   2.5158464e+00   3.1070003e+00   3.0010043e+00   3.2037264e+00   1.8447764e+00   3.1084925e+00   2.4051329e+00   2.1169795e+00   2.7031257e+00   2.5230194e+00   3.2014729e+00   2.1040690e+00   2.9167440e+00   3.0016616e+00   2.6020655e+00   3.0122358e+00   2.4077390e+00   3.3040889e+00   2.5044039e+00   3.4036241e+00   3.2011770e+00   2.8066054e+00   2.9119764e+00   3.3128842e+00   3.5083774e+00   3.0022542e+00   2.0112442e+00   2.3146813e+00   2.2178117e+00   2.4035997e+00   3.6016272e+00   3.0016466e+00   3.0030180e+00   3.2109724e+00   2.9107838e+00   2.6012056e+00   2.5072166e+00   2.9020943e+00   3.1016037e+00   2.5045154e+00   1.8665804e+00   2.7022828e+00   2.7006623e+00   2.7012715e+00   2.8031364e+00   1.5665127e+00   2.6019940e+00   4.5096473e+00   3.6042722e+00   4.4108394e+00   4.1020090e+00   4.3058539e+00   5.1154681e+00   3.0065585e+00   4.8095984e+00   4.3039464e+00   4.6166518e+00   3.6081814e+00   3.8045576e+00   4.0096177e+00   3.5076367e+00   3.6204901e+00   3.8129595e+00   4.0031500e+00   5.2178509e+00   5.4155855e+00   3.5053718e+00   4.2125027e+00   3.4076329e+00   5.2169557e+00   3.4052726e+00   4.2064771e+00   4.5101686e+00   3.3051940e+00   3.4039470e+00   4.1052782e+00   4.3120002e+00   4.6153660e+00   4.9336188e+00   4.1069501e+00   3.6018985e+00   4.1007374e+00   4.6331222e+00   4.1114855e+00   4.0025890e+00   3.3042986e+00   3.9129418e+00   4.1139050e+00   3.6259501e+00   3.6042722e+00   4.4088300e+00   4.2155438e+00   3.7181244e+00   3.5068261e+00   3.7072136e+00   3.9107458e+00   3.6027359e+00   7.1779518e-01   9.0005048e-01   6.8160885e-01   6.0980961e-01   6.3164977e-01   6.0964597e-01   6.0948506e-01   6.3178534e-01   8.0888055e-01   6.5712813e-01   9.1552331e-01   5.6595908e-01   4.5147187e-01   9.0026543e-01   5.6595908e-01   6.0201716e-01   5.6370994e-01   4.1212852e-01   1.3000455e+00   4.1315633e-01   6.1830764e-01   9.0511169e-01   6.0964891e-01   6.3178534e-01   4.5077696e-01   7.1621748e-01   4.5783248e-01   3.7510248e+00   3.5145413e+00   3.9319585e+00   3.0060350e+00   3.6168418e+00   3.5015800e+00   3.7092301e+00   2.3098782e+00   3.6209185e+00   2.9036019e+00   2.5319798e+00   3.2059465e+00   3.0125595e+00   3.7043511e+00   2.6050088e+00   3.4362995e+00   3.5023713e+00   3.1027850e+00   3.5112541e+00   2.9034027e+00   3.8056232e+00   3.0109656e+00   3.9070007e+00   3.7037948e+00   3.3177283e+00   3.4278716e+00   3.8279200e+00   4.0185639e+00   3.5049370e+00   2.5063489e+00   2.8048596e+00   2.7053910e+00   2.9045816e+00   4.1028806e+00   3.5020661e+00   3.5059137e+00   3.7249599e+00   3.4134555e+00   3.1021033e+00   3.0035422e+00   3.4012314e+00   3.6049329e+00   3.0042977e+00   2.3151346e+00   3.2021240e+00   3.2018065e+00   3.2023319e+00   3.3095196e+00   2.0139907e+00   3.1028259e+00   5.0111326e+00   4.1049446e+00   4.9203200e+00   4.6042055e+00   4.8089555e+00   5.6273643e+00   3.5051449e+00   5.3190012e+00   4.8083867e+00   5.1260178e+00   4.1140184e+00   4.3082037e+00   4.5172930e+00   4.0074572e+00   4.1195081e+00   4.3162103e+00   4.5071301e+00   5.7305902e+00   5.9266306e+00   4.0041352e+00   4.7202045e+00   3.9078680e+00   5.7295930e+00   3.9093593e+00   4.7117434e+00   5.0203874e+00   3.8087102e+00   3.9064537e+00   4.6081315e+00   4.8239967e+00   5.1282911e+00   5.4538016e+00   4.6097450e+00   4.1052255e+00   4.6016323e+00   5.1527860e+00   4.6133719e+00   4.5057296e+00   3.8063301e+00   4.4231469e+00   4.6192070e+00   4.1384491e+00   4.1049446e+00   4.9142249e+00   4.7202041e+00   4.2256252e+00   4.0099717e+00   4.2125085e+00   4.4124585e+00   4.1039188e+00   3.4085233e-01   3.3818226e-01   1.2699992e-01   3.0922892e-01   3.3818226e-01   4.1212852e-01   3.4085233e-01   3.0490481e-01   8.0250202e-01   9.0192695e-01   4.0363334e-01   5.0437695e-01   4.5847767e-01   4.0363334e-01   7.0633229e-01   3.0482299e-01   4.0246123e-01   1.0095513e+00   7.0548283e-01   2.0181667e-01   5.0043084e-01   3.6452132e-01   5.0436965e-01   5.0855778e-01   4.1420960e-01   3.3813251e-01   3.0360001e+00   2.8068283e+00   3.2199554e+00   2.3040302e+00   2.9083228e+00   2.8004229e+00   3.0040677e+00   1.6099792e+00   2.9111118e+00   2.2023225e+00   1.8444678e+00   2.5026969e+00   2.3072196e+00   3.0013665e+00   1.9023442e+00   2.7227152e+00   2.8012528e+00   2.4005053e+00   2.8054838e+00   2.2013444e+00   3.1036553e+00   2.3040711e+00   3.2026875e+00   3.0010106e+00   2.6084695e+00   2.7159628e+00   3.1166009e+00   3.3100796e+00   2.8019019e+00   1.8020058e+00   2.1029252e+00   2.0034861e+00   2.2011906e+00   3.4011297e+00   2.8012319e+00   2.8028007e+00   3.0142197e+00   2.7060544e+00   2.4007552e+00   2.3017471e+00   2.7003774e+00   2.9016034e+00   2.3011979e+00   1.6179000e+00   2.5007284e+00   2.5003793e+00   2.5007008e+00   2.6035320e+00   1.3154932e+00   2.4008859e+00   4.3091529e+00   3.4034897e+00   4.2123904e+00   3.9018704e+00   4.1056542e+00   4.9181670e+00   2.8038684e+00   4.6114520e+00   4.1039624e+00   4.4180514e+00   3.4084525e+00   3.6042874e+00   3.8104403e+00   3.3060311e+00   3.4198458e+00   3.6127194e+00   3.8032956e+00   5.0208687e+00   5.2178587e+00   3.3018328e+00   4.0133297e+00   3.2067991e+00   5.0200323e+00   3.2048524e+00   4.0067585e+00   4.3122441e+00   3.1047671e+00   3.2036090e+00   3.9049697e+00   4.1148052e+00   4.4184267e+00   4.7404155e+00   3.9065727e+00   3.4018864e+00   3.9004186e+00   4.4392802e+00   3.9110280e+00   3.8025990e+00   3.1038577e+00   3.7145622e+00   3.9140895e+00   3.4287259e+00   3.4034897e+00   4.2090841e+00   4.0156240e+00   3.5189468e+00   3.3056781e+00   3.5073617e+00   3.7102604e+00   3.4023494e+00   4.1315633e-01   3.0915245e-01   4.5078948e-01   5.2132556e-01   3.0482299e-01   3.3808272e-01   6.0964597e-01   7.0911112e-01   8.6051414e-01   4.1212852e-01   7.0016860e-01   7.4262964e-01   4.1212852e-01   6.1830489e-01   4.1209001e-01   6.0018299e-01   1.1056693e+00   6.0964597e-01   4.1317535e-01   4.1315633e-01   5.2133179e-01   4.2268438e-01   5.0084481e-01   5.2491131e-01   5.0043084e-01   2.9115264e+00   2.6338022e+00   3.0658361e+00   2.1172959e+00   2.7373419e+00   2.6040822e+00   2.8212040e+00   1.4407364e+00   2.7450517e+00   2.0182279e+00   1.7116683e+00   2.3196029e+00   2.1285888e+00   2.8091316e+00   1.7264084e+00   2.5863714e+00   2.6084695e+00   2.2054529e+00   2.6248910e+00   2.0083311e+00   2.9174382e+00   2.1301354e+00   3.0136614e+00   2.8068911e+00   2.4421128e+00   2.5659281e+00   2.9581275e+00   3.1380442e+00   2.6129786e+00   1.6191482e+00   1.9129988e+00   1.8141046e+00   2.0129571e+00   3.2064817e+00   2.6080847e+00   2.6171503e+00   2.8540078e+00   2.5288353e+00   2.2078337e+00   2.1116321e+00   2.5029614e+00   2.7108347e+00   2.1109969e+00   1.4620239e+00   2.3067198e+00   2.3048725e+00   2.3072196e+00   2.4221911e+00   1.1960519e+00   2.2090466e+00   4.1263920e+00   3.2146438e+00   4.0362393e+00   3.7082866e+00   3.9191966e+00   4.7434452e+00   2.6190662e+00   4.4302264e+00   3.9142233e+00   4.2488425e+00   3.2328627e+00   3.4177609e+00   3.6349468e+00   3.1232336e+00   3.2606481e+00   3.4419771e+00   3.6135028e+00   4.8484858e+00   5.0414126e+00   3.1077602e+00   3.8409517e+00   3.0264502e+00   4.8462395e+00   3.0224855e+00   3.8231767e+00   4.1339850e+00   2.9228238e+00   3.0173096e+00   3.7181009e+00   3.9409635e+00   4.2473796e+00   4.5888422e+00   3.7226114e+00   3.2097424e+00   3.7024938e+00   4.2924779e+00   3.7339169e+00   3.6111693e+00   2.9185950e+00   3.5470876e+00   3.7434043e+00   3.2896416e+00   3.2146438e+00   4.0283196e+00   3.8460162e+00   3.3616756e+00   3.1242731e+00   3.3284650e+00   3.5333785e+00   3.2109426e+00   4.0122873e-01   5.0043084e-01   4.0243965e-01   3.0474106e-01   2.0061436e-01   4.5148429e-01   1.1000100e+00   1.2012928e+00   1.2699992e-01   4.0122873e-01   5.6595488e-01   1.2699992e-01   6.0184309e-01   4.0004442e-01   5.0436965e-01   7.1700909e-01   6.0201716e-01   5.2132556e-01   8.0051115e-01   2.2573593e-01   8.0000080e-01   4.0243965e-01   7.0088477e-01   3.0474106e-01   3.1428043e+00   2.9119661e+00   3.3252402e+00   2.4048325e+00   3.0131919e+00   2.9019362e+00   3.1087162e+00   1.7043719e+00   3.0148571e+00   2.3090280e+00   1.9099608e+00   2.6089255e+00   2.4039780e+00   3.1034773e+00   2.0109328e+00   2.8295063e+00   2.9047982e+00   2.5011632e+00   2.9079901e+00   2.3019339e+00   3.2101766e+00   2.4088882e+00   3.3051149e+00   3.1020645e+00   2.7127201e+00   2.8219255e+00   3.2211370e+00   3.4154432e+00   2.9057760e+00   1.9031964e+00   2.2023924e+00   2.1016800e+00   2.3040177e+00   3.5033831e+00   2.9047500e+00   2.9083094e+00   3.1195526e+00   2.8078790e+00   2.5037817e+00   2.4045555e+00   2.8012577e+00   3.0040677e+00   2.4032886e+00   1.7053664e+00   2.6031367e+00   2.6019751e+00   2.6032655e+00   2.7067267e+00   1.4186217e+00   2.5039390e+00   4.4180854e+00   3.5092124e+00   4.3179254e+00   4.0043989e+00   4.2115634e+00   5.0223341e+00   2.9108451e+00   4.7142986e+00   4.2064794e+00   4.5276385e+00   3.5169841e+00   3.7092301e+00   3.9177719e+00   3.4145764e+00   3.5398518e+00   3.7257231e+00   3.9064409e+00   5.1255523e+00   5.3223081e+00   3.4028319e+00   4.1224590e+00   3.3167394e+00   5.1238108e+00   3.3110167e+00   4.1123595e+00   4.4156295e+00   3.2116669e+00   3.3094499e+00   4.0106882e+00   4.2180724e+00   4.5227110e+00   4.8462592e+00   4.0138270e+00   3.5038530e+00   4.0010263e+00   4.5481710e+00   4.0222346e+00   3.9055783e+00   3.2104685e+00   3.8231767e+00   4.0259306e+00   3.5470873e+00   3.5092124e+00   4.3162096e+00   4.1285334e+00   3.6344358e+00   3.4126369e+00   3.6148618e+00   3.8215373e+00   3.5066394e+00   2.2608083e-01   2.4170870e-01   3.0915245e-01   3.0915245e-01   4.0002221e-01   7.0096858e-01   8.0888055e-01   3.3818226e-01   4.0243965e-01   5.0477564e-01   3.3818226e-01   6.1135434e-01   2.0121983e-01   3.0017653e-01   1.1020506e+00   6.0219099e-01   2.0061436e-01   4.1210927e-01   4.0246123e-01   4.0125062e-01   4.0363334e-01   3.4085233e-01   2.2573593e-01   3.1414744e+00   2.9090612e+00   3.3237063e+00   2.4058952e+00   3.0107723e+00   2.9007492e+00   3.1056084e+00   1.7139238e+00   3.0138400e+00   2.3035512e+00   1.9525301e+00   2.6040007e+00   2.4100386e+00   3.1020779e+00   2.0037730e+00   2.8273039e+00   2.9018637e+00   2.5009579e+00   2.9077468e+00   2.3022754e+00   3.2048985e+00   2.4059812e+00   3.3038272e+00   3.1015567e+00   2.7110304e+00   2.8196992e+00   3.2199879e+00   3.4126307e+00   2.9028597e+00   1.9035634e+00   2.2044603e+00   2.1052067e+00   2.3021298e+00   3.5016873e+00   2.9018153e+00   2.9040211e+00   3.1174676e+00   2.8083845e+00   2.5012694e+00   2.4028385e+00   2.8006965e+00   3.0024198e+00   2.4021168e+00   1.7233814e+00   2.6012647e+00   2.6007117e+00   2.6012056e+00   2.7050190e+00   1.4220898e+00   2.5015263e+00   4.4109742e+00   3.5045842e+00   4.3148937e+00   4.0025815e+00   4.2071337e+00   5.0208677e+00   2.9053047e+00   4.7134688e+00   4.2051335e+00   4.5213131e+00   3.5107904e+00   3.7056862e+00   3.9129303e+00   3.4076849e+00   3.5232606e+00   3.7154827e+00   3.9043905e+00   5.1238111e+00   5.3204854e+00   3.4027174e+00   4.1161770e+00   3.3085473e+00   5.1228006e+00   3.3065392e+00   4.1085246e+00   4.4144908e+00   3.2064337e+00   3.3048953e+00   4.0063737e+00   4.2173565e+00   4.5213359e+00   4.8449108e+00   4.0082529e+00   3.5026815e+00   4.0006690e+00   4.5442582e+00   4.0132894e+00   3.9035247e+00   3.2051958e+00   3.8177289e+00   4.0170262e+00   3.5340950e+00   3.5045842e+00   4.3111747e+00   4.1186693e+00   3.6228903e+00   3.4075338e+00   3.6094379e+00   3.8124787e+00   3.5031928e+00   1.1269424e-01   5.0436965e-01   4.5078948e-01   2.2573593e-01   6.0000317e-01   7.0088477e-01   4.1210927e-01   3.4080442e-01   3.0474106e-01   4.1210927e-01   8.0883841e-01   1.1269424e-01   2.2573593e-01   1.2089253e+00   8.0051036e-01   4.0125062e-01   4.1317535e-01   5.2133802e-01   3.0017653e-01   6.0184622e-01   2.0061436e-01   2.2573593e-01   3.2211375e+00   3.0065592e+00   3.4126307e+00   2.5097024e+00   3.1069750e+00   3.0016616e+00   3.2056674e+00   1.8201133e+00   3.1066333e+00   2.4080686e+00   2.0619186e+00   2.7068820e+00   2.5107632e+00   3.2022485e+00   2.1087015e+00   2.9137660e+00   3.0040552e+00   2.6010528e+00   3.0089164e+00   2.4039766e+00   3.3085605e+00   2.5050799e+00   3.4035383e+00   3.2010814e+00   2.8057187e+00   2.9102474e+00   3.3101488e+00   3.5088135e+00   3.0043212e+00   2.0051350e+00   2.3071665e+00   2.2078183e+00   2.4034084e+00   3.6027901e+00   3.0040510e+00   3.0064311e+00   3.2097125e+00   2.9065741e+00   2.6030847e+00   2.5057763e+00   2.9016034e+00   3.1025924e+00   2.5033702e+00   1.8310475e+00   2.7029487e+00   2.7015162e+00   2.7026433e+00   2.8034238e+00   1.5358653e+00   2.6032968e+00   4.5159027e+00   3.6080734e+00   4.4115652e+00   4.1033603e+00   4.3094200e+00   5.1138780e+00   3.0100866e+00   4.8082318e+00   4.3041941e+00   4.6203464e+00   3.6127197e+00   3.8070338e+00   4.0124958e+00   3.5130651e+00   3.6349183e+00   3.8215362e+00   4.0044029e+00   5.2162094e+00   5.4145184e+00   3.5039680e+00   4.2166537e+00   3.4145702e+00   5.2146334e+00   3.4083285e+00   4.2090727e+00   4.5089181e+00   3.3091123e+00   3.4076329e+00   4.1087140e+00   4.3098021e+00   4.6133860e+00   4.9287684e+00   4.1115100e+00   3.6023704e+00   4.1007820e+00   4.6309835e+00   4.1192351e+00   4.0040240e+00   3.3086516e+00   3.9156759e+00   4.1209257e+00   3.6344375e+00   3.6080734e+00   4.4124586e+00   4.2235561e+00   3.7268101e+00   3.5102374e+00   3.7111714e+00   3.9185866e+00   3.6056580e+00   5.0084481e-01   4.1315633e-01   2.2573593e-01   7.0000303e-01   8.0046605e-01   3.3818226e-01   2.4170870e-01   3.0017653e-01   3.3818226e-01   8.0245824e-01   1.1269424e-01   2.0181667e-01   1.1133897e+00   8.0004523e-01   4.0246123e-01   5.2167829e-01   4.5078948e-01   4.0125062e-01   6.0017665e-01   3.0017653e-01   2.0061436e-01   3.3182813e+00   3.1056085e+00   3.5110034e+00   2.6060742e+00   3.2059465e+00   3.1013637e+00   3.3048926e+00   1.9099680e+00   3.2056789e+00   2.5063346e+00   2.1344310e+00   2.8057704e+00   2.6059875e+00   3.3019214e+00   2.2068463e+00   3.0117188e+00   3.1034568e+00   2.7006623e+00   3.1063566e+00   2.5023073e+00   3.4074246e+00   2.6040822e+00   3.5028878e+00   3.3008913e+00   2.9048017e+00   3.0087102e+00   3.4087684e+00   3.6077011e+00   3.1036688e+00   2.1027637e+00   2.4039664e+00   2.3040177e+00   2.5024922e+00   3.7023994e+00   3.1034532e+00   3.1054994e+00   3.3083865e+00   3.0045919e+00   2.7025560e+00   2.6039937e+00   3.0011260e+00   3.2022183e+00   2.6023240e+00   1.9156198e+00   2.8022964e+00   2.8012577e+00   2.8021795e+00   2.9028597e+00   1.6190551e+00   2.7026433e+00   4.6143249e+00   3.7070367e+00   4.5103890e+00   4.2029881e+00   4.4084395e+00   5.2126509e+00   3.1082889e+00   4.9074567e+00   4.4036930e+00   4.7183734e+00   3.7111657e+00   3.9061763e+00   4.1111077e+00   3.6112621e+00   3.7306950e+00   3.9190472e+00   4.1039096e+00   5.3148048e+00   5.5132902e+00   3.6028435e+00   4.3148929e+00   3.5126667e+00   5.3133599e+00   3.5071916e+00   4.3081091e+00   4.6080296e+00   3.4078683e+00   3.5066415e+00   4.2077543e+00   4.4087821e+00   4.7120754e+00   5.0261502e+00   4.2102487e+00   3.7020547e+00   4.2006494e+00   4.7279956e+00   4.2171591e+00   4.1035746e+00   3.4074978e+00   4.0138994e+00   4.2186688e+00   3.7302926e+00   3.7070367e+00   4.5111938e+00   4.3210760e+00   3.8236433e+00   3.6087856e+00   3.8098356e+00   4.0164852e+00   3.7049593e+00   1.1269424e-01   7.0017011e-01   9.0506343e-01   1.0426636e+00   2.0181667e-01   4.1209001e-01   8.0093081e-01   2.0181667e-01   3.4080442e-01   4.0125062e-01   3.6259865e-01   9.0029064e-01   3.3808272e-01   4.2268438e-01   6.1135434e-01   2.2608083e-01   6.0948212e-01   2.0061436e-01   6.3164977e-01   3.0482299e-01   3.1902650e+00   2.9267417e+00   3.3545239e+00   2.4065479e+00   3.0300492e+00   2.9028462e+00   3.1166330e+00   1.7073273e+00   3.0369978e+00   2.3091363e+00   1.9242178e+00   2.6129479e+00   2.4148785e+00   3.1074477e+00   2.0138888e+00   2.8680310e+00   2.9053048e+00   2.5042875e+00   2.9165009e+00   2.3038195e+00   3.2116668e+00   2.4221589e+00   3.3110857e+00   3.1060464e+00   2.7333589e+00   2.8520935e+00   3.2480481e+00   3.4313924e+00   2.9094538e+00   1.9103596e+00   2.2042535e+00   2.1040084e+00   2.3086427e+00   3.5048916e+00   2.9048754e+00   2.9119661e+00   3.1440058e+00   2.8212158e+00   2.5048147e+00   2.4054865e+00   2.8016296e+00   3.0087060e+00   2.4071454e+00   1.7108740e+00   2.6040234e+00   2.6035022e+00   2.6047905e+00   2.7176633e+00   1.4222462e+00   2.5057847e+00   4.4195581e+00   3.5098289e+00   4.3311360e+00   4.0067587e+00   4.2149806e+00   5.0390074e+00   2.9109559e+00   4.7273232e+00   4.2124122e+00   4.5405876e+00   3.5250718e+00   3.7139027e+00   3.9284285e+00   3.4150426e+00   3.5404383e+00   3.7302923e+00   3.9113385e+00   5.1434691e+00   5.3373009e+00   3.4049076e+00   4.1330547e+00   3.3170101e+00   5.1417717e+00   3.3168869e+00   4.1189487e+00   4.4302240e+00   3.2165105e+00   3.3123688e+00   4.0139003e+00   4.2362095e+00   4.5418862e+00   4.8784553e+00   4.0170271e+00   3.5083268e+00   4.0022121e+00   4.5797332e+00   4.0245435e+00   3.9092247e+00   3.2127499e+00   3.8383398e+00   4.0332236e+00   3.5687055e+00   3.5098289e+00   4.3229228e+00   4.1350002e+00   3.6463113e+00   3.4177609e+00   3.6219569e+00   3.8236419e+00   3.5076152e+00   6.0202028e-01   1.0008471e+00   1.1133984e+00   1.2085435e-01   4.0125062e-01   7.0548138e-01   1.2085435e-01   4.1210927e-01   3.3813251e-01   4.1317535e-01   8.0093160e-01   4.1210927e-01   4.5147187e-01   7.0184453e-01   2.0121983e-01   7.0088326e-01   2.2573593e-01   6.3164977e-01   2.4170870e-01   3.1712556e+00   2.9203033e+00   3.3425812e+00   2.4054865e+00   3.0228150e+00   2.9023686e+00   3.1131138e+00   1.7053664e+00   3.0276460e+00   2.3090554e+00   1.9156198e+00   2.6109872e+00   2.4094445e+00   3.1056085e+00   2.0122722e+00   2.8520934e+00   2.9050277e+00   2.5027053e+00   2.9125192e+00   2.3027405e+00   3.2109394e+00   2.4160415e+00   3.3084146e+00   3.1042008e+00   2.7243956e+00   2.8394100e+00   3.2369546e+00   3.4247119e+00   2.9077087e+00   1.9065546e+00   2.2031052e+00   2.1025721e+00   2.3063026e+00   3.5041732e+00   2.9047982e+00   2.9102300e+00   3.1338083e+00   2.8152056e+00   2.5042498e+00   2.4049187e+00   2.8014068e+00   3.0065584e+00   2.4051787e+00   1.7073273e+00   2.6035320e+00   2.6027034e+00   2.6039922e+00   2.7127202e+00   1.4197348e+00   2.5048165e+00   4.4189015e+00   3.5095163e+00   4.3257988e+00   4.0057069e+00   4.2135049e+00   5.0324949e+00   2.9108796e+00   4.7221364e+00   4.2099109e+00   4.5353940e+00   3.5215862e+00   3.7118544e+00   3.9240013e+00   3.4147901e+00   3.5401421e+00   3.7282909e+00   3.9092247e+00   5.1365094e+00   5.3314710e+00   3.4038679e+00   4.1286955e+00   3.3168613e+00   5.1347955e+00   3.3142720e+00   4.1161770e+00   4.4243750e+00   3.2143132e+00   3.3110162e+00   4.0124921e+00   4.2289511e+00   4.5343165e+00   4.8661278e+00   4.0156242e+00   3.5063341e+00   4.0016595e+00   4.5675358e+00   4.0235142e+00   3.9076269e+00   3.2116668e+00   3.8321137e+00   4.0301568e+00   3.5598554e+00   3.5095163e+00   4.3201293e+00   4.1322798e+00   3.6413274e+00   3.4154677e+00   3.6188977e+00   3.8226861e+00   3.5071388e+00   7.0096708e-01   8.0004602e-01   5.0855077e-01   4.1420960e-01   2.2608083e-01   5.0855077e-01   1.0008768e+00   3.0474106e-01   4.0127250e-01   1.1527746e+00   1.0000457e+00   4.0127250e-01   4.5783248e-01   6.0948800e-01   4.1317535e-01   8.0008964e-01   3.0482299e-01   4.0127250e-01   3.2104685e+00   3.0024155e+00   3.4059092e+00   2.5048145e+00   3.1026586e+00   3.0005260e+00   3.2022151e+00   1.8108200e+00   3.1025924e+00   2.4028974e+00   2.0417688e+00   2.7025751e+00   2.5062865e+00   3.2007416e+00   2.1027376e+00   2.9057769e+00   3.0015388e+00   2.6003213e+00   3.0043084e+00   2.4017233e+00   3.3039365e+00   2.5015263e+00   3.4013673e+00   3.2002931e+00   2.8019179e+00   2.9040262e+00   3.3045112e+00   3.5038551e+00   3.0015958e+00   2.0020172e+00   2.3035509e+00   2.2041009e+00   2.4010446e+00   3.6011254e+00   3.0015387e+00   3.0025850e+00   3.2040849e+00   2.9029297e+00   2.6009614e+00   2.5022969e+00   2.9005699e+00   3.1008537e+00   2.5011717e+00   1.8179820e+00   2.7009799e+00   2.7004102e+00   2.7008225e+00   2.8010266e+00   1.5160787e+00   2.6010449e+00   4.5093548e+00   3.6039076e+00   4.4060847e+00   4.1014985e+00   4.3050076e+00   5.1081747e+00   3.0045274e+00   4.8044751e+00   4.3019076e+00   4.6117611e+00   3.6062405e+00   3.8032982e+00   4.0063634e+00   3.5066398e+00   3.6202705e+00   3.8119529e+00   4.0019489e+00   5.2097659e+00   5.4087506e+00   3.5019664e+00   4.2090727e+00   3.4073888e+00   5.2088330e+00   3.4037266e+00   4.2046087e+00   4.5046939e+00   3.3041077e+00   3.4034672e+00   4.1044794e+00   4.3051857e+00   4.6074982e+00   4.9181673e+00   4.1061528e+00   3.6008588e+00   4.1002763e+00   4.6187512e+00   4.1110303e+00   4.0017844e+00   3.3039580e+00   3.9080413e+00   4.1118167e+00   3.6188744e+00   3.6039076e+00   4.4067826e+00   4.2136946e+00   3.7147052e+00   3.5048712e+00   3.7054764e+00   3.9103830e+00   3.6025973e+00   3.0026460e-01   1.0001598e+00   9.0029064e-01   6.0202028e-01   1.0001598e+00   1.1281352e+00   7.0000303e-01   6.0052920e-01   1.8012962e+00   9.6574369e-01   6.3178782e-01   4.2270142e-01   1.1005460e+00   3.0026460e-01   9.1422402e-01   4.0004442e-01   8.0004602e-01   3.2225450e+00   3.0091788e+00   3.4142737e+00   2.5659206e+00   3.1121190e+00   3.0065741e+00   3.2080663e+00   1.9795481e+00   3.1095927e+00   2.4291135e+00   2.3151880e+00   2.7129011e+00   2.5826416e+00   3.2051685e+00   2.1273517e+00   2.9165009e+00   3.0077149e+00   2.6132476e+00   3.0422268e+00   2.4403272e+00   3.3124044e+00   2.5166989e+00   3.4115621e+00   3.2044340e+00   2.8105357e+00   2.9137358e+00   3.3135329e+00   3.5115123e+00   3.0089448e+00   2.0634477e+00   2.3669955e+00   2.2798676e+00   2.4222743e+00   3.6065353e+00   3.0077107e+00   3.0095641e+00   3.2119165e+00   2.9352117e+00   2.6080595e+00   2.5371436e+00   2.9126008e+00   3.1051604e+00   2.5254396e+00   2.0316239e+00   2.7143597e+00   2.7050980e+00   2.7084026e+00   2.8082597e+00   1.7626307e+00   2.6129786e+00   4.5202811e+00   3.6136284e+00   4.4137920e+00   4.1051791e+00   4.3125133e+00   5.1149745e+00   3.0264326e+00   4.8090821e+00   4.3074243e+00   4.6242424e+00   3.6169385e+00   3.8113313e+00   4.0160029e+00   3.5235168e+00   3.6464145e+00   3.8279938e+00   4.0062032e+00   5.2173409e+00   5.4162239e+00   3.5202520e+00   4.2206877e+00   3.4219359e+00   5.2156030e+00   3.4146242e+00   4.2116109e+00   4.5097900e+00   3.3150972e+00   3.4115327e+00   4.1123767e+00   4.3106068e+00   4.6148396e+00   4.9296764e+00   4.1159139e+00   3.6049049e+00   4.1030781e+00   4.6336876e+00   4.1247374e+00   4.0056652e+00   3.3131419e+00   3.9194404e+00   4.1265834e+00   3.6428011e+00   3.6136284e+00   4.4157045e+00   4.2295866e+00   3.7344540e+00   3.5196603e+00   3.7152542e+00   3.9242137e+00   3.6086340e+00   1.1055707e+00   1.0030868e+00   7.0000151e-01   1.1055707e+00   1.3018102e+00   8.0245824e-01   7.1621884e-01   1.9078389e+00   1.1896594e+00   7.2044167e-01   5.3943256e-01   1.2089192e+00   4.5147187e-01   1.0776188e+00   5.0043084e-01   9.0506254e-01   3.3079985e+00   3.1046072e+00   3.5056118e+00   2.6709345e+00   3.2081329e+00   3.1065948e+00   3.3043807e+00   2.0901204e+00   3.2052035e+00   2.5276373e+00   2.4267777e+00   2.8091158e+00   2.6904888e+00   3.3041886e+00   2.2241050e+00   3.0065909e+00   3.1056084e+00   2.7155799e+00   3.1440950e+00   2.5451785e+00   3.4078458e+00   2.6152930e+00   3.5111169e+00   3.3045112e+00   2.9070941e+00   3.0065909e+00   3.4069929e+00   3.6059670e+00   3.1068940e+00   2.1702441e+00   2.4737523e+00   2.3880607e+00   2.5238437e+00   3.7056514e+00   3.1056084e+00   3.1055129e+00   3.3051256e+00   3.0372254e+00   2.7067267e+00   2.6397031e+00   3.0142197e+00   3.2037566e+00   2.6278605e+00   2.1424915e+00   2.8148768e+00   2.8047553e+00   2.8077256e+00   2.9066659e+00   1.8682644e+00   2.7127202e+00   4.6142218e+00   3.7103348e+00   4.5074842e+00   4.2035307e+00   4.4083412e+00   5.2074280e+00   3.1239158e+00   4.9041928e+00   4.4055872e+00   4.7149533e+00   3.7103439e+00   3.9081758e+00   4.1095835e+00   3.6188977e+00   3.7328455e+00   3.9187196e+00   4.1037709e+00   5.3087451e+00   5.5089283e+00   3.6218973e+00   4.3127798e+00   3.5155554e+00   5.3076917e+00   3.5109043e+00   4.3070065e+00   4.6043043e+00   3.4107931e+00   3.5076367e+00   4.2085644e+00   4.4044360e+00   4.7071579e+00   5.0144130e+00   4.2110565e+00   3.7038321e+00   4.2031939e+00   4.7176313e+00   4.2169539e+00   4.1034568e+00   3.4087523e+00   4.0110693e+00   4.2176552e+00   3.7262659e+00   3.7103348e+00   4.5099841e+00   4.3199893e+00   3.8223318e+00   3.6159248e+00   3.8096375e+00   4.0163546e+00   3.7058422e+00   3.0026460e-01   6.0964891e-01   0.0000000e+00   5.0043842e-01   3.0482299e-01   4.0246123e-01   8.0254500e-01   5.0043842e-01   5.2133802e-01   7.0556260e-01   2.0181667e-01   7.0008735e-01   3.0026460e-01   6.0948506e-01   2.0181667e-01   3.2490712e+00   3.0153168e+00   3.4297841e+00   2.5067523e+00   3.1166337e+00   3.0027816e+00   3.2112793e+00   1.8068048e+00   3.1183051e+00   2.4116924e+00   2.0138832e+00   2.7116615e+00   2.5059537e+00   3.2048192e+00   2.1144760e+00   2.9351753e+00   3.0063019e+00   2.6019122e+00   3.0106587e+00   2.4030297e+00   3.3125861e+00   2.5120719e+00   3.4068163e+00   3.2029877e+00   2.8162444e+00   2.9267417e+00   3.3252407e+00   3.5189464e+00   3.0077107e+00   2.0051350e+00   2.3037132e+00   2.2028146e+00   2.4058620e+00   3.6044981e+00   3.0062070e+00   3.0107283e+00   3.2237456e+00   2.9105093e+00   2.6052541e+00   2.5062865e+00   2.9018772e+00   3.1056084e+00   2.5048522e+00   1.8082911e+00   2.7043948e+00   2.7029415e+00   2.7046027e+00   2.8091099e+00   1.5248852e+00   2.6055127e+00   4.5209020e+00   3.6112573e+00   4.4212031e+00   4.1056541e+00   4.3138986e+00   5.1255338e+00   3.0133997e+00   4.8167235e+00   4.3081273e+00   4.6319211e+00   3.6205854e+00   3.8114965e+00   4.0212972e+00   3.5173798e+00   3.6449970e+00   3.8299342e+00   4.0081754e+00   5.2290121e+00   5.4254411e+00   3.5039202e+00   4.2264145e+00   3.4198378e+00   5.2270034e+00   3.4138008e+00   4.2149806e+00   4.5183778e+00   3.3145502e+00   3.4118179e+00   4.1129687e+00   4.3210760e+00   4.6261633e+00   4.9512603e+00   4.1165035e+00   3.6051692e+00   4.1014742e+00   4.6540056e+00   4.1257291e+00   4.0071257e+00   3.3129914e+00   3.9274863e+00   4.1301604e+00   3.6542046e+00   3.6112573e+00   4.4192311e+00   4.2328883e+00   3.7399948e+00   3.5155767e+00   3.7180846e+00   3.9250546e+00   3.6083191e+00   5.0436965e-01   3.0026460e-01   6.0017982e-01   3.0482299e-01   3.0017653e-01   9.0506343e-01   6.0000317e-01   4.5783248e-01   7.4269314e-01   2.4195741e-01   6.0948506e-01   4.0122873e-01   5.0855077e-01   2.0061436e-01   3.5243030e+00   3.3064692e+00   3.7147036e+00   2.8028227e+00   3.4072759e+00   3.3010449e+00   3.5048841e+00   2.1026764e+00   3.4081977e+00   2.7042302e+00   2.3098753e+00   3.0045117e+00   2.8027924e+00   3.5019450e+00   2.4045982e+00   3.2157547e+00   3.3025861e+00   2.9005886e+00   3.3047156e+00   2.7010554e+00   3.6058037e+00   2.8042692e+00   3.7029937e+00   3.5011559e+00   3.1065954e+00   3.2116669e+00   3.6121048e+00   3.8091015e+00   3.3031148e+00   2.3014285e+00   2.6014645e+00   2.5011992e+00   2.7018905e+00   3.9020189e+00   3.3025600e+00   3.3044825e+00   3.5110031e+00   3.2044340e+00   2.9018587e+00   2.8023124e+00   3.2006932e+00   3.4022345e+00   2.8016296e+00   2.1039819e+00   3.0015958e+00   3.0009948e+00   3.0016466e+00   3.1034780e+00   1.8068049e+00   2.9019412e+00   4.8119571e+00   3.9055005e+00   4.7117433e+00   4.4027872e+00   4.6074923e+00   5.4154953e+00   3.3059205e+00   5.1096877e+00   4.6042055e+00   4.9184662e+00   3.9101579e+00   4.1056576e+00   4.3111747e+00   3.8086187e+00   3.9240074e+00   4.1158326e+00   4.3040379e+00   5.5178488e+00   5.7157885e+00   3.8018678e+00   4.5144444e+00   3.7097243e+00   5.5166376e+00   3.7063915e+00   4.5079295e+00   4.8103281e+00   3.6066590e+00   3.7054748e+00   4.4067833e+00   4.6117275e+00   4.9151622e+00   5.2317566e+00   4.4087821e+00   3.9022961e+00   4.4006562e+00   4.9323259e+00   4.4141504e+00   4.3034963e+00   3.6059708e+00   4.2144276e+00   4.4165186e+00   3.9284285e+00   3.9055005e+00   4.7106152e+00   4.5183778e+00   4.0209837e+00   3.8074712e+00   4.0090032e+00   4.2134002e+00   3.9039579e+00   6.0964891e-01   1.1019501e+00   4.0125062e-01   5.0000761e-01   1.2632947e+00   1.1001012e+00   5.2491131e-01   6.1135434e-01   7.1621884e-01   4.2268438e-01   9.0026543e-01   2.4170870e-01   5.0043084e-01   3.4064574e+00   3.2033141e+00   3.6042703e+00   2.7067267e+00   3.3031847e+00   3.2012079e+00   3.4035361e+00   2.0125822e+00   3.3019706e+00   2.6055127e+00   2.2404570e+00   2.9047685e+00   2.7070958e+00   3.4014441e+00   2.3056303e+00   3.1043374e+00   3.2029748e+00   2.8006755e+00   3.2059945e+00   2.6027034e+00   3.5064150e+00   2.7028014e+00   3.6021536e+00   3.4005708e+00   3.0020583e+00   3.1034908e+00   3.5031921e+00   3.7043162e+00   3.2030081e+00   2.2031888e+00   2.5048145e+00   2.4051640e+00   2.6022353e+00   3.8020808e+00   3.2029748e+00   3.2045605e+00   3.4036086e+00   3.1036832e+00   2.8021575e+00   2.7039988e+00   3.1011640e+00   3.3016475e+00   2.7022579e+00   2.0191796e+00   2.9020892e+00   2.9010579e+00   2.9018587e+00   3.0016913e+00   1.7203066e+00   2.8022905e+00   4.7127831e+00   3.8062227e+00   4.6064153e+00   4.3024456e+00   4.5071341e+00   5.3066177e+00   3.2074507e+00   5.0034627e+00   4.5024143e+00   4.8135231e+00   3.8088290e+00   4.0049892e+00   4.2080447e+00   3.7100251e+00   3.8270894e+00   4.0163858e+00   4.2028588e+00   5.4079977e+00   5.6075453e+00   3.7029588e+00   4.4113160e+00   3.6111044e+00   5.4066727e+00   3.6058057e+00   4.4062018e+00   4.7037812e+00   3.5065182e+00   3.6056307e+00   4.3065776e+00   4.5036239e+00   4.8058364e+00   5.1131156e+00   4.3088094e+00   3.8014142e+00   4.3005452e+00   4.8156922e+00   4.3151195e+00   4.2027764e+00   3.5064276e+00   4.1095026e+00   4.3155223e+00   3.8226872e+00   3.8062227e+00   4.6088777e+00   4.4178507e+00   3.9190516e+00   3.7073875e+00   3.9078043e+00   4.1144923e+00   3.8043348e+00   5.0043842e-01   3.0482299e-01   4.0246123e-01   8.0254500e-01   5.0043842e-01   5.2133802e-01   7.0556260e-01   2.0181667e-01   7.0008735e-01   3.0026460e-01   6.0948506e-01   2.0181667e-01   3.2490712e+00   3.0153168e+00   3.4297841e+00   2.5067523e+00   3.1166337e+00   3.0027816e+00   3.2112793e+00   1.8068048e+00   3.1183051e+00   2.4116924e+00   2.0138832e+00   2.7116615e+00   2.5059537e+00   3.2048192e+00   2.1144760e+00   2.9351753e+00   3.0063019e+00   2.6019122e+00   3.0106587e+00   2.4030297e+00   3.3125861e+00   2.5120719e+00   3.4068163e+00   3.2029877e+00   2.8162444e+00   2.9267417e+00   3.3252407e+00   3.5189464e+00   3.0077107e+00   2.0051350e+00   2.3037132e+00   2.2028146e+00   2.4058620e+00   3.6044981e+00   3.0062070e+00   3.0107283e+00   3.2237456e+00   2.9105093e+00   2.6052541e+00   2.5062865e+00   2.9018772e+00   3.1056084e+00   2.5048522e+00   1.8082911e+00   2.7043948e+00   2.7029415e+00   2.7046027e+00   2.8091099e+00   1.5248852e+00   2.6055127e+00   4.5209020e+00   3.6112573e+00   4.4212031e+00   4.1056541e+00   4.3138986e+00   5.1255338e+00   3.0133997e+00   4.8167235e+00   4.3081273e+00   4.6319211e+00   3.6205854e+00   3.8114965e+00   4.0212972e+00   3.5173798e+00   3.6449970e+00   3.8299342e+00   4.0081754e+00   5.2290121e+00   5.4254411e+00   3.5039202e+00   4.2264145e+00   3.4198378e+00   5.2270034e+00   3.4138008e+00   4.2149806e+00   4.5183778e+00   3.3145502e+00   3.4118179e+00   4.1129687e+00   4.3210760e+00   4.6261633e+00   4.9512603e+00   4.1165035e+00   3.6051692e+00   4.1014742e+00   4.6540056e+00   4.1257291e+00   4.0071257e+00   3.3129914e+00   3.9274863e+00   4.1301604e+00   3.6542046e+00   3.6112573e+00   4.4192311e+00   4.2328883e+00   3.7399948e+00   3.5155767e+00   3.7180846e+00   3.9250546e+00   3.6083191e+00   7.0470720e-01   6.3164977e-01   7.0000303e-01   2.0000000e-01   6.4049114e-01   8.7212232e-01   4.0004442e-01   8.5437440e-01   2.2573593e-01   9.3308853e-01   6.0184622e-01   3.5152865e+00   3.2379971e+00   3.6729302e+00   2.7052554e+00   3.3425813e+00   3.2040843e+00   3.4230889e+00   2.0021220e+00   3.3530528e+00   2.6055127e+00   2.2051638e+00   2.9154879e+00   2.7227228e+00   3.4118179e+00   2.3143923e+00   3.1902650e+00   3.2048191e+00   2.8089346e+00   3.2223766e+00   2.6060092e+00   3.5107904e+00   2.7333577e+00   3.6167495e+00   3.4109217e+00   3.0488339e+00   3.1712556e+00   3.5658221e+00   3.7426726e+00   3.2127498e+00   2.2186491e+00   2.5049231e+00   2.4052960e+00   2.6139440e+00   3.8063158e+00   3.2036084e+00   3.2143157e+00   3.4603347e+00   3.1318587e+00   2.8056557e+00   2.7050980e+00   3.1020681e+00   3.3136174e+00   2.7116685e+00   2.0027889e+00   2.9047842e+00   2.9057760e+00   2.9065359e+00   3.0276459e+00   1.7091578e+00   2.8077256e+00   4.7169309e+00   3.8081280e+00   4.6399369e+00   4.3088507e+00   4.5162248e+00   5.3501952e+00   3.2067991e+00   5.0370912e+00   4.5175831e+00   4.8468186e+00   3.8290678e+00   4.0170262e+00   4.2347722e+00   3.7111714e+00   3.8289857e+00   4.0283196e+00   4.2155430e+00   5.4550926e+00   5.6472422e+00   3.7064735e+00   4.4381718e+00   3.6121030e+00   5.4538016e+00   3.6205857e+00   4.4231445e+00   4.7408825e+00   3.5189464e+00   3.6135011e+00   4.3151164e+00   4.5490925e+00   4.8546830e+00   5.1966534e+00   4.3173271e+00   3.8129545e+00   4.3038527e+00   4.8962803e+00   4.3214438e+00   4.2123903e+00   3.5127693e+00   4.1469662e+00   4.3342207e+00   3.8751227e+00   3.8081280e+00   4.6262568e+00   4.4345825e+00   3.9485180e+00   3.7201181e+00   3.9257254e+00   4.1203162e+00   3.8072915e+00   2.0181667e-01   1.1055799e+00   7.0016860e-01   4.0006662e-01   4.5147187e-01   4.1212852e-01   4.0002221e-01   5.0043084e-01   3.0474106e-01   1.2085435e-01   3.2280983e+00   3.0080445e+00   3.4166801e+00   2.5073304e+00   3.1087541e+00   3.0016255e+00   3.2064005e+00   1.8128544e+00   3.1091921e+00   2.4076934e+00   2.0429861e+00   2.7070770e+00   2.5077793e+00   3.2025221e+00   2.1086021e+00   2.9185909e+00   3.0040552e+00   2.6009247e+00   3.0080768e+00   2.4028385e+00   3.3086417e+00   2.5058827e+00   3.4038679e+00   3.2013290e+00   2.8077473e+00   2.9137660e+00   3.3136458e+00   3.5107924e+00   3.0045274e+00   2.0036964e+00   2.3048725e+00   2.2049827e+00   2.4032211e+00   3.6028345e+00   3.0040403e+00   3.0066661e+00   3.2127504e+00   2.9065741e+00   2.6030847e+00   2.5048249e+00   2.9013288e+00   3.1029264e+00   2.5029614e+00   1.8201043e+00   2.7027522e+00   2.7015465e+00   2.7026433e+00   2.8042851e+00   1.5256523e+00   2.6032253e+00   4.5160443e+00   3.6080467e+00   4.4135519e+00   4.1035779e+00   4.3098000e+00   5.1168009e+00   3.0096881e+00   4.8103297e+00   4.3048676e+00   4.6223708e+00   3.6136097e+00   3.8074712e+00   4.0139003e+00   3.5128896e+00   3.6349183e+00   3.8220063e+00   4.0049433e+00   5.2194303e+00   5.4171979e+00   3.5033669e+00   4.2181241e+00   3.4145410e+00   5.2178541e+00   3.4088042e+00   4.2099019e+00   4.5111938e+00   3.3094784e+00   3.4078458e+00   4.1090316e+00   4.3126257e+00   4.6165627e+00   4.9348334e+00   4.1118265e+00   3.6027619e+00   4.1008192e+00   4.6366757e+00   4.1194553e+00   4.0043991e+00   3.3087928e+00   3.9177721e+00   4.1218428e+00   3.6374332e+00   3.6080467e+00   4.4133507e+00   4.2243717e+00   3.7282925e+00   3.5105217e+00   3.7119499e+00   3.9187675e+00   3.6057072e+00   1.2012865e+00   6.0184622e-01   3.3808272e-01   6.0184934e-01   5.0043084e-01   3.3818226e-01   4.1212852e-01   3.0922892e-01   2.0121983e-01   3.4273160e+00   3.2064011e+00   3.6161317e+00   2.7056828e+00   3.3075153e+00   3.2008118e+00   3.4044261e+00   2.0113827e+00   3.3090710e+00   2.6035236e+00   2.2398619e+00   2.9035670e+00   2.7083020e+00   3.4017079e+00   2.3034787e+00   3.1174706e+00   3.2019092e+00   2.8008297e+00   3.2066700e+00   2.6023240e+00   3.5046442e+00   2.7041827e+00   3.6031099e+00   3.4011658e+00   3.0071110e+00   3.1127326e+00   3.5134157e+00   3.7092355e+00   3.2025439e+00   2.2031052e+00   2.5042875e+00   2.4048325e+00   2.6019131e+00   3.8016620e+00   3.2018790e+00   3.2036084e+00   3.4118203e+00   3.1063566e+00   2.8013151e+00   2.7029498e+00   3.1008327e+00   3.3019518e+00   2.7019892e+00   2.0181988e+00   2.9013773e+00   2.9007142e+00   2.9012240e+00   3.0034647e+00   1.7168003e+00   2.8015399e+00   4.7103355e+00   3.8044833e+00   4.6117631e+00   4.3023732e+00   4.5065283e+00   5.3163061e+00   3.2051927e+00   5.0102923e+00   4.5041827e+00   4.8177760e+00   3.8091017e+00   4.0050028e+00   4.2105704e+00   3.7073402e+00   3.8208501e+00   4.0138272e+00   4.2036880e+00   5.4187310e+00   5.6164039e+00   3.7027275e+00   4.4135513e+00   3.6080195e+00   5.4177192e+00   3.6056365e+00   4.4072751e+00   4.7109359e+00   3.5056751e+00   3.6045028e+00   4.3058539e+00   4.5127175e+00   4.8161488e+00   5.1342237e+00   4.3075896e+00   3.8021528e+00   4.3006308e+00   4.8339900e+00   4.3122441e+00   4.2030792e+00   3.5048429e+00   4.1140184e+00   4.3148937e+00   3.8271256e+00   3.8044833e+00   4.6097143e+00   4.4165186e+00   3.9191998e+00   3.7067060e+00   3.9080455e+00   4.1114854e+00   3.8031381e+00   9.0000091e-01   1.2014191e+00   1.5025345e+00   7.0088477e-01   1.5012926e+00   9.0000136e-01   1.4092540e+00   1.0030724e+00   3.4932678e+00   3.2284356e+00   3.6579694e+00   2.7029237e+00   3.3320310e+00   3.2025221e+00   3.4171529e+00   2.0008116e+00   3.3407229e+00   2.6032740e+00   2.2005685e+00   2.9103582e+00   2.7153679e+00   3.4082220e+00   2.3087475e+00   3.1706684e+00   3.2030687e+00   2.8057704e+00   3.2157547e+00   2.6035236e+00   3.5075879e+00   2.7233837e+00   3.6121030e+00   3.4076329e+00   3.0364247e+00   3.1548602e+00   3.5517240e+00   3.7328844e+00   3.2086545e+00   2.2116271e+00   2.5026949e+00   2.4028972e+00   2.6089340e+00   3.8042764e+00   3.2022967e+00   3.2108192e+00   3.4468872e+00   3.1231713e+00   2.8035152e+00   2.7029238e+00   3.1011647e+00   3.3095196e+00   2.7074599e+00   2.0008921e+00   2.9028462e+00   2.9036791e+00   2.9040745e+00   3.0197997e+00   1.7043730e+00   2.8047771e+00   4.7130344e+00   3.8056232e+00   4.6318991e+00   4.3063824e+00   4.5121922e+00   5.3416858e+00   3.2045522e+00   5.0302179e+00   4.5134527e+00   4.8380714e+00   3.8218532e+00   4.0124930e+00   4.2269486e+00   3.7079008e+00   3.8220112e+00   4.0214147e+00   4.2115853e+00   5.4465234e+00   5.6393868e+00   3.7043105e+00   4.4299700e+00   3.6085945e+00   5.4449998e+00   3.6148636e+00   4.4178341e+00   4.7331065e+00   3.5134671e+00   3.6094821e+00   4.3111775e+00   4.5398308e+00   4.8449119e+00   5.1826640e+00   4.3129030e+00   3.8093617e+00   4.3026669e+00   4.8806417e+00   4.3164776e+00   4.2091204e+00   3.5088587e+00   4.1368711e+00   4.3264627e+00   3.8592376e+00   3.8056232e+00   4.6204067e+00   4.4269727e+00   3.9374314e+00   3.7145622e+00   3.9192263e+00   4.1154804e+00   3.8050056e+00   6.1288055e-01   7.7603846e-01   4.0127250e-01   7.4329414e-01   2.0061436e-01   9.0508712e-01   6.0000635e-01   3.5152864e+00   3.2379970e+00   3.6729302e+00   2.7058598e+00   3.3425838e+00   3.2040874e+00   3.4230885e+00   2.0034894e+00   3.3530533e+00   2.6055423e+00   2.2123846e+00   2.9154880e+00   2.7237438e+00   3.4118184e+00   2.3143951e+00   3.1902650e+00   3.2048192e+00   2.8089552e+00   3.2228316e+00   2.6061975e+00   3.5107904e+00   2.7333644e+00   3.6167885e+00   3.4109240e+00   3.0488346e+00   3.1712557e+00   3.5658240e+00   3.7426726e+00   3.2127504e+00   2.2188249e+00   2.5053901e+00   2.4058634e+00   2.6139732e+00   3.8063206e+00   3.2036085e+00   3.2143126e+00   3.4603347e+00   3.1321581e+00   2.8056558e+00   2.7052554e+00   3.1021033e+00   3.3136175e+00   2.7117356e+00   2.0053411e+00   2.9048017e+00   2.9057761e+00   2.9065368e+00   3.0276467e+00   1.7105814e+00   2.8077315e+00   4.7169308e+00   3.8081328e+00   4.6399369e+00   4.3088509e+00   4.5162248e+00   5.3501952e+00   3.2068686e+00   5.0370912e+00   4.5175965e+00   4.8468145e+00   3.8290678e+00   4.0170299e+00   4.2347722e+00   3.7112059e+00   3.8289870e+00   4.0283196e+00   4.2155430e+00   5.4550814e+00   5.6472442e+00   3.7067060e+00   4.4381718e+00   3.6121048e+00   5.4538018e+00   3.6205918e+00   4.4231444e+00   4.7408825e+00   3.5189484e+00   3.6135011e+00   4.3151171e+00   4.5490925e+00   4.8546834e+00   5.1966394e+00   4.3173279e+00   3.8129558e+00   4.3038600e+00   4.8962803e+00   4.3214431e+00   4.2123903e+00   3.5127694e+00   4.1469662e+00   4.3342207e+00   3.8751227e+00   3.8081328e+00   4.6262568e+00   4.4345823e+00   3.9485180e+00   3.7201522e+00   3.9257254e+00   4.1203153e+00   3.8072915e+00   3.4085233e-01   5.0517282e-01   4.1210927e-01   4.5847767e-01   4.1317535e-01   4.0243965e-01   3.1409605e+00   2.9078360e+00   3.3230621e+00   2.4077390e+00   3.0097979e+00   2.9003941e+00   3.1042002e+00   1.7227897e+00   3.0135090e+00   2.3017319e+00   1.9755996e+00   2.6019350e+00   2.4142039e+00   3.1015567e+00   2.0014192e+00   2.8264551e+00   2.9006366e+00   2.5011717e+00   2.9082658e+00   2.3033727e+00   3.2022157e+00   2.4051094e+00   3.3034165e+00   3.1014454e+00   2.7104802e+00   2.8188502e+00   3.2195779e+00   3.4112824e+00   2.9016560e+00   1.9053006e+00   2.2068965e+00   2.1085395e+00   2.3018945e+00   3.5009585e+00   2.9005880e+00   2.9020766e+00   3.1165913e+00   2.8092629e+00   2.5004154e+00   2.4029432e+00   2.8007533e+00   3.0017918e+00   2.4022355e+00   1.7369589e+00   2.6007937e+00   2.6003442e+00   2.6005344e+00   2.7044628e+00   1.4329832e+00   2.5008032e+00   4.4064393e+00   3.5021596e+00   4.3131639e+00   4.0016670e+00   4.2045223e+00   5.0200323e+00   2.9028411e+00   4.7130517e+00   4.2044861e+00   4.5172866e+00   3.5073617e+00   3.7038321e+00   3.9101623e+00   3.4039470e+00   3.5128093e+00   3.7092301e+00   3.9033549e+00   5.1227972e+00   5.3193887e+00   3.4029615e+00   4.1123595e+00   3.3040255e+00   5.1222477e+00   3.3043123e+00   4.1063328e+00   4.4139149e+00   3.2038047e+00   3.3025879e+00   4.0039164e+00   4.2170349e+00   4.5206139e+00   4.8441812e+00   4.0049702e+00   3.5022104e+00   4.0005674e+00   4.5418878e+00   4.0077003e+00   3.9024858e+00   3.2025221e+00   3.8146102e+00   4.0114630e+00   3.5261349e+00   3.5021596e+00   4.3081194e+00   4.1123594e+00   3.6158324e+00   3.4049076e+00   3.6064417e+00   3.8069566e+00   3.5014492e+00   8.0923926e-01   3.0474106e-01   6.5724028e-01   4.0246123e-01   5.6371422e-01   2.8500310e+00   2.6110761e+00   3.0277528e+00   2.1510447e+00   2.7141512e+00   2.6027937e+00   2.8070667e+00   1.5787180e+00   2.7167342e+00   2.0166118e+00   1.9318287e+00   2.3071806e+00   2.1715812e+00   2.8031215e+00   1.7145956e+00   2.5336675e+00   2.6035547e+00   2.2074446e+00   2.6319765e+00   2.0282636e+00   2.9076123e+00   2.1122780e+00   3.0080768e+00   2.8027924e+00   2.4144060e+00   2.5243969e+00   2.9241685e+00   3.1150226e+00   2.6049377e+00   1.6499744e+00   1.9530826e+00   1.8666228e+00   2.0130304e+00   3.2033374e+00   2.6035250e+00   2.6059866e+00   2.8207213e+00   2.5287261e+00   2.2032583e+00   2.1244184e+00   2.5066544e+00   2.7033232e+00   2.1157724e+00   1.6395342e+00   2.3072196e+00   2.3018945e+00   2.3035850e+00   2.4072314e+00   1.3788456e+00   2.2062031e+00   4.1150297e+00   3.2079717e+00   4.0170857e+00   3.7033716e+00   3.9093672e+00   4.7228082e+00   2.6158579e+00   4.4145659e+00   3.9067187e+00   4.2256164e+00   3.2143454e+00   3.4081069e+00   3.6159248e+00   3.1148584e+00   3.2358776e+00   3.4219579e+00   3.6052692e+00   4.8260874e+00   5.0225486e+00   3.1131211e+00   3.8201108e+00   3.0142352e+00   4.8248264e+00   3.0102191e+00   3.8104451e+00   4.1158426e+00   2.9100849e+00   3.0073054e+00   3.7087638e+00   3.9191144e+00   4.2237272e+00   4.5500784e+00   3.7114840e+00   3.2036323e+00   3.7015743e+00   4.2506870e+00   3.7186993e+00   3.6043148e+00   2.9081165e+00   3.5216379e+00   3.7226348e+00   3.2449757e+00   3.2079717e+00   4.0139105e+00   3.8249612e+00   3.3311289e+00   3.1134242e+00   3.3125194e+00   3.5179632e+00   3.2049019e+00   8.0051115e-01   2.2573593e-01   7.1621884e-01   3.0482299e-01   3.3530529e+00   3.1135637e+00   3.5317001e+00   2.6021962e+00   3.2157548e+00   3.1011640e+00   3.3083865e+00   1.9014069e+00   3.2199554e+00   2.5036852e+00   2.1054816e+00   2.8056557e+00   2.6057308e+00   3.3035252e+00   2.2049636e+00   3.0369970e+00   3.1023768e+00   2.7016498e+00   3.1076516e+00   2.5011992e+00   3.4059088e+00   2.6097152e+00   3.5056297e+00   3.3028597e+00   2.9166917e+00   3.0276459e+00   3.4273156e+00   3.6176286e+00   3.1043337e+00   2.1032882e+00   2.4011650e+00   2.3009622e+00   2.5032633e+00   3.7024058e+00   3.1022094e+00   3.1056121e+00   3.3243222e+00   3.0101957e+00   2.7018643e+00   2.6020065e+00   3.0005955e+00   3.2040843e+00   2.6027120e+00   1.9019962e+00   2.8015673e+00   2.8013346e+00   2.8018959e+00   2.9083044e+00   1.6052507e+00   2.7022567e+00   4.6121162e+00   3.7052383e+00   4.5194334e+00   4.2036849e+00   4.4088275e+00   5.2263180e+00   3.1053076e+00   4.9177739e+00   4.4072684e+00   4.7260117e+00   3.7138970e+00   3.9076272e+00   4.1167962e+00   3.6081590e+00   3.7238338e+00   3.9176170e+00   4.1063328e+00   5.3296625e+00   5.5255577e+00   3.6022190e+00   4.3201293e+00   3.5092121e+00   5.3285444e+00   3.5088064e+00   4.3111749e+00   4.6192198e+00   3.4084525e+00   3.5063337e+00   4.2079639e+00   4.4229098e+00   4.7273231e+00   5.0541259e+00   4.2099019e+00   3.7043105e+00   4.2011108e+00   4.7534769e+00   4.2147199e+00   4.1050714e+00   3.4064570e+00   4.0228303e+00   4.2200398e+00   3.7407842e+00   3.7052383e+00   4.5139612e+00   4.3214432e+00   3.8271242e+00   3.6094426e+00   3.8122429e+00   4.0138280e+00   3.7039439e+00   6.3178534e-01   2.0121983e-01   5.0043084e-01   3.1326249e+00   2.9095058e+00   3.3192760e+00   2.4306373e+00   3.0110559e+00   2.9028946e+00   3.1074604e+00   1.7872757e+00   3.0111989e+00   2.3143923e+00   2.0898615e+00   2.6089349e+00   2.4398792e+00   3.1033306e+00   2.0139907e+00   2.8220753e+00   2.9050462e+00   2.5045154e+00   2.9220490e+00   2.3159976e+00   3.2100379e+00   2.4095513e+00   3.3067017e+00   3.1022615e+00   2.7099669e+00   2.8165723e+00   3.2163877e+00   3.4125151e+00   2.9058641e+00   1.9245956e+00   2.2287984e+00   2.1344529e+00   2.3089795e+00   3.5039202e+00   2.9050287e+00   2.9078401e+00   3.1149135e+00   2.8183214e+00   2.5042875e+00   2.4160514e+00   2.8047612e+00   3.0036601e+00   2.4102273e+00   1.8223604e+00   2.6061038e+00   2.6023240e+00   2.6040822e+00   2.7058598e+00   1.5384093e+00   2.5058827e+00   4.4178532e+00   3.5098740e+00   4.3151587e+00   4.0041429e+00   4.2110099e+00   5.0184788e+00   2.9154880e+00   4.7114737e+00   4.2061525e+00   4.5248210e+00   3.5155767e+00   3.7089995e+00   3.9157422e+00   3.4167041e+00   3.5401921e+00   3.7249704e+00   3.9056466e+00   5.1213055e+00   5.3189433e+00   3.4098171e+00   4.1203252e+00   3.3172667e+00   5.1196436e+00   3.3110366e+00   4.1111102e+00   4.4124656e+00   3.2115770e+00   3.3091938e+00   4.0103680e+00   4.2141675e+00   4.5185015e+00   4.8383751e+00   4.0135080e+00   3.5035589e+00   4.0015001e+00   4.5406852e+00   4.0218666e+00   3.9049967e+00   3.2103515e+00   3.8201314e+00   4.0245707e+00   3.5427188e+00   3.5098740e+00   4.3149009e+00   4.1273075e+00   3.6322643e+00   3.4139979e+00   3.6137088e+00   3.8212216e+00   3.5066417e+00   7.1621748e-01   4.0002221e-01   3.3858048e+00   3.1257747e+00   3.5528332e+00   2.6049377e+00   3.2291581e+00   3.1026235e+00   3.3158954e+00   1.9043238e+00   3.2362541e+00   2.5062158e+00   2.1151984e+00   2.8111676e+00   2.6144115e+00   3.3074459e+00   2.2106051e+00   3.0644792e+00   3.1042002e+00   2.7046286e+00   3.1155579e+00   2.5035275e+00   3.4095576e+00   2.6210983e+00   3.5110555e+00   3.3064076e+00   2.9323802e+00   3.0497665e+00   3.4467650e+00   3.6304824e+00   3.1087162e+00   2.1099919e+00   2.4034951e+00   2.3034398e+00   2.5081992e+00   3.7045399e+00   3.1036554e+00   3.1105454e+00   3.3425812e+00   3.0208515e+00   2.7039990e+00   2.6042124e+00   3.0014077e+00   3.2086215e+00   2.6068616e+00   1.9064532e+00   2.8033825e+00   2.8033607e+00   2.8042643e+00   2.9174390e+00   1.6121856e+00   2.7050791e+00   4.6165570e+00   3.7079065e+00   4.5303834e+00   4.2064764e+00   4.4135512e+00   5.2388103e+00   3.1079784e+00   4.9275471e+00   4.4124760e+00   4.7382279e+00   3.7227931e+00   3.9129335e+00   4.1268500e+00   3.6117351e+00   3.7315577e+00   3.9257254e+00   4.1111068e+00   5.3430927e+00   5.5370582e+00   3.6046347e+00   4.3307527e+00   3.5130597e+00   5.3416790e+00   3.5154388e+00   4.3179254e+00   4.6302391e+00   3.4146546e+00   3.5107904e+00   4.2125004e+00   4.4361489e+00   4.7415152e+00   5.0768435e+00   4.2149814e+00   3.7084460e+00   4.2023583e+00   4.7769945e+00   4.2205945e+00   4.1089343e+00   3.4107328e+00   4.0362383e+00   4.2295174e+00   3.7618281e+00   3.7079065e+00   4.5213131e+00   4.3307527e+00   3.8409517e+00   3.6158715e+00   3.8200965e+00   4.0195882e+00   3.7063858e+00   4.1210927e-01   3.2157661e+00   3.0055754e+00   3.4095823e+00   2.5182899e+00   3.1060146e+00   3.0020171e+00   3.2051958e+00   1.8468437e+00   3.1049591e+00   2.4099079e+00   2.1180305e+00   2.7069308e+00   2.5226256e+00   3.2022186e+00   2.1097489e+00   2.9102819e+00   3.0041469e+00   2.6022650e+00   3.0136614e+00   2.4087525e+00   3.3085287e+00   2.5053901e+00   3.4040883e+00   3.2011770e+00   2.8045980e+00   2.9078384e+00   3.3077457e+00   3.5074140e+00   3.0043867e+00   2.0123013e+00   2.3159429e+00   2.2186309e+00   2.4051787e+00   3.6030023e+00   3.0041461e+00   3.0063027e+00   3.2075252e+00   2.9100849e+00   2.6032671e+00   2.5097006e+00   2.9028412e+00   3.1024718e+00   2.5058120e+00   1.8685011e+00   2.7040002e+00   2.7016556e+00   2.7029487e+00   2.8031364e+00   1.5747356e+00   2.6040007e+00   4.5158117e+00   3.6083256e+00   4.4100325e+00   4.1032589e+00   4.3091726e+00   5.1114855e+00   3.0117223e+00   4.8065772e+00   4.3039464e+00   4.6187506e+00   3.6121095e+00   3.8069169e+00   4.0114753e+00   3.5138377e+00   3.6350529e+00   3.8212252e+00   4.0040508e+00   5.2135438e+00   5.4123528e+00   3.5063866e+00   4.2155461e+00   3.4147661e+00   5.2119900e+00   3.4083227e+00   4.2084707e+00   4.5071259e+00   3.3090897e+00   3.4075560e+00   4.1085724e+00   4.3075896e+00   4.6108642e+00   4.9236635e+00   4.1113689e+00   3.6022523e+00   4.1009647e+00   4.6262707e+00   4.1190929e+00   4.0037820e+00   3.3086298e+00   3.9141023e+00   4.1202674e+00   3.6321858e+00   3.6083256e+00   4.4117993e+00   4.2229640e+00   3.7257625e+00   3.5107118e+00   3.7106642e+00   3.9184747e+00   3.6056703e+00   3.3320214e+00   3.1087156e+00   3.5191298e+00   2.6048201e+00   3.2097208e+00   3.1014199e+00   3.3064692e+00   1.9064149e+00   3.2109426e+00   2.5061784e+00   2.1231492e+00   2.8062729e+00   2.6052659e+00   3.3025806e+00   2.2069764e+00   3.0213628e+00   3.1034889e+00   2.7008785e+00   3.1069083e+00   2.5018386e+00   3.4076243e+00   2.6061038e+00   3.5039680e+00   3.3015400e+00   2.9090661e+00   3.0158419e+00   3.4158824e+00   3.6117739e+00   3.1042038e+00   2.1025721e+00   2.4028385e+00   2.3026344e+00   2.5028039e+00   3.7026090e+00   3.1034538e+00   3.1060427e+00   3.3145497e+00   3.0064351e+00   2.7026184e+00   2.6035547e+00   3.0010106e+00   3.2029877e+00   2.6024546e+00   1.9099608e+00   2.8022622e+00   2.8013859e+00   2.8022964e+00   2.9047883e+00   1.6144390e+00   2.7027522e+00   4.6146429e+00   3.7070840e+00   4.5144445e+00   4.2034836e+00   4.4092638e+00   5.2185417e+00   3.1080879e+00   4.9117250e+00   4.4052231e+00   4.7224998e+00   3.7130492e+00   3.9071930e+00   4.1140176e+00   3.6112039e+00   3.7307535e+00   3.9200662e+00   4.1050716e+00   5.3212806e+00   5.5187164e+00   3.6026912e+00   4.3179254e+00   3.5126721e+00   5.3198415e+00   3.5083463e+00   4.3098506e+00   4.6126508e+00   3.4087523e+00   3.5071392e+00   4.2084730e+00   4.4144909e+00   4.7184838e+00   5.0381916e+00   4.2109654e+00   3.7029588e+00   4.2008334e+00   4.7393168e+00   4.2176488e+00   4.1043916e+00   3.4078439e+00   4.0181863e+00   4.2205945e+00   3.7363783e+00   3.7070840e+00   4.5130589e+00   4.3227927e+00   3.8267268e+00   3.6097220e+00   3.8114954e+00   4.0168944e+00   3.7050916e+00   6.0017982e-01   2.0181667e-01   1.5160570e+00   5.2133802e-01   1.3002451e+00   7.0008584e-01   2.1344529e+00   4.1212852e-01   1.8029854e+00   2.0342311e+00   1.1019599e+00   1.1393620e+00   9.0026497e-01   1.4543172e+00   3.3813251e-01   1.4000061e+00   1.2053003e+00   1.0426638e+00   1.4134492e+00   1.1005364e+00   9.3424697e-01   7.8890806e-01   9.0142636e-01   6.1119558e-01   4.1315633e-01   4.0125062e-01   3.6452132e-01   1.0001753e+00   1.4158897e+00   1.5195166e+00   1.5300146e+00   1.2201636e+00   1.0039209e+00   1.6000032e+00   1.0000457e+00   3.0017653e-01   9.3329055e-01   1.4017696e+00   1.5061610e+00   1.5012947e+00   9.0002570e-01   1.2124837e+00   2.0445123e+00   1.4012283e+00   1.3008855e+00   1.3009222e+00   8.0291749e-01   2.0440118e+00   1.3027556e+00   1.3788457e+00   1.2029161e+00   1.2089253e+00   9.3446811e-01   1.1298636e+00   1.9014076e+00   2.1006232e+00   1.6001224e+00   1.1139906e+00   1.4544336e+00   6.3912709e-01   7.1183012e-01   8.5409862e-01   1.3086191e+00   1.2638465e+00   9.2745734e-01   8.1117067e-01   2.0027889e+00   2.2028146e+00   1.1270327e+00   1.0776190e+00   1.4019372e+00   2.0011307e+00   7.2044167e-01   1.0208709e+00   1.3002450e+00   8.0488008e-01   9.0145141e-01   9.4622126e-01   1.1000289e+00   1.4009513e+00   1.7086186e+00   9.7694377e-01   7.0911112e-01   1.0224133e+00   1.4220925e+00   1.0923537e+00   8.2631334e-01   1.0008620e+00   7.8886139e-01   1.0777307e+00   9.0140221e-01   1.2029161e+00   1.2362755e+00   1.1897289e+00   9.0534502e-01   7.9871893e-01   6.5724028e-01   9.8998705e-01   1.1010807e+00   5.2133179e-01   1.0171340e+00   4.0004442e-01   7.0470720e-01   2.0181667e-01   1.5698091e+00   3.0922892e-01   1.2049541e+00   1.5104875e+00   5.0476836e-01   1.0069214e+00   3.4085233e-01   9.6593231e-01   3.0026460e-01   8.0004443e-01   6.6334810e-01   1.0000152e+00   8.7372177e-01   5.0855077e-01   5.2524663e-01   7.0462697e-01   4.2362917e-01   3.0915245e-01   2.2608083e-01   4.5784410e-01   5.0517282e-01   4.1209001e-01   1.0313359e+00   9.9085945e-01   1.0179856e+00   6.9600743e-01   6.3912943e-01   1.0000152e+00   4.0125062e-01   3.0482299e-01   9.0002615e-01   8.0254500e-01   9.3735629e-01   9.1446938e-01   3.0490481e-01   6.9600743e-01   1.4994060e+00   8.0928056e-01   7.0184453e-01   7.0184453e-01   3.1328089e-01   1.5989637e+00   7.0918894e-01   1.5237054e+00   6.9987517e-01   1.4060443e+00   1.1002025e+00   1.3061181e+00   2.1141220e+00   1.5030978e+00   1.8055480e+00   1.3062025e+00   1.6223413e+00   6.3164977e-01   8.1112909e-01   1.0095513e+00   8.0713433e-01   9.2867113e-01   9.0155393e-01   1.0001753e+00   2.2182690e+00   2.4124980e+00   1.0039060e+00   1.2201577e+00   8.1343016e-01   2.2176846e+00   5.2491734e-01   1.2037520e+00   1.5066999e+00   4.2362917e-01   4.2362917e-01   1.1060937e+00   1.3130978e+00   1.6177611e+00   1.9760242e+00   1.1138953e+00   6.0948506e-01   1.1056693e+00   1.6779798e+00   1.1527746e+00   1.0001601e+00   4.2362917e-01   9.1892454e-01   1.1528477e+00   8.3183672e-01   6.9987517e-01   1.4094144e+00   1.2633467e+00   8.5440680e-01   7.2036951e-01   7.1629303e-01   9.6576136e-01   6.3322667e-01   1.4267554e+00   4.2268438e-01   1.2004262e+00   6.0035621e-01   2.0860325e+00   3.4342562e-01   1.7133162e+00   1.9641993e+00   1.0207260e+00   1.0897469e+00   8.0008964e-01   1.4650300e+00   5.0043084e-01   1.3002407e+00   1.1303267e+00   9.3424659e-01   1.3473688e+00   1.0001604e+00   9.6593231e-01   6.7616545e-01   8.0097499e-01   6.3192325e-01   5.0437695e-01   3.0026460e-01   2.2608083e-01   9.0142636e-01   1.4861824e+00   1.4580174e+00   1.4889602e+00   1.1900969e+00   9.0142681e-01   1.5001212e+00   9.0166476e-01   2.2538848e-01   8.3187290e-01   1.3130978e+00   1.4197078e+00   1.4012600e+00   8.0046764e-01   1.1544060e+00   2.0075255e+00   1.3063533e+00   1.2089835e+00   1.2089313e+00   7.4275547e-01   2.0887699e+00   1.2190319e+00   1.1935069e+00   1.1010807e+00   1.0087396e+00   7.4335736e-01   9.3424697e-01   1.7023957e+00   2.0003507e+00   1.4002035e+00   9.1449234e-01   1.2643523e+00   5.2167829e-01   5.5450500e-01   6.7616902e-01   1.2049541e+00   1.1528553e+00   8.1112984e-01   6.1119558e-01   1.8053679e+00   2.0034894e+00   1.0142484e+00   9.0155438e-01   1.3009222e+00   1.8029948e+00   6.1119267e-01   8.2425704e-01   1.1002025e+00   7.0176271e-01   8.0046685e-01   7.5564478e-01   9.0026588e-01   1.2017042e+00   1.5269837e+00   7.9871893e-01   6.0201716e-01   8.6051471e-01   1.2366099e+00   9.4532171e-01   6.3309012e-01   9.0026588e-01   6.3164729e-01   9.3308853e-01   8.0004443e-01   1.1010807e+00   1.0426516e+00   1.0426760e+00   8.0051115e-01   6.8161057e-01   5.2491734e-01   8.6084272e-01   1.0001753e+00   1.0116865e+00   5.6370994e-01   1.0599087e+00   7.4329527e-01   1.1110092e+00   4.1212852e-01   5.6838732e-01   7.0478886e-01   5.0436965e-01   7.7598796e-01   6.0948506e-01   1.2192920e+00   7.1629303e-01   4.2270142e-01   7.1629303e-01   2.2608083e-01   9.7033357e-01   6.3164729e-01   9.6576136e-01   7.5503094e-01   9.1446896e-01   1.1138955e+00   1.3139296e+00   1.2705641e+00   6.5724028e-01   5.0894102e-01   2.2573593e-01   3.3813251e-01   4.1212852e-01   1.1025819e+00   7.1629303e-01   1.1039833e+00   1.2272550e+00   8.0245746e-01   7.0000303e-01   2.0000000e-01   4.1210927e-01   7.7598796e-01   3.3813251e-01   7.1700774e-01   4.0125062e-01   7.0017011e-01   6.0035305e-01   7.4329414e-01   1.0008768e+00   5.0043084e-01   2.0249458e+00   1.1061923e+00   2.0081838e+00   1.6061519e+00   1.8167511e+00   2.7171724e+00   6.3925756e-01   2.3875202e+00   1.8286180e+00   2.2239101e+00   1.2353587e+00   1.3278587e+00   1.6038059e+00   1.0207533e+00   1.2407946e+00   1.3868868e+00   1.5269837e+00   2.8396169e+00   2.9941208e+00   1.0030871e+00   1.8005082e+00   9.3733589e-01   2.8269381e+00   9.7033357e-01   1.7520952e+00   2.1193712e+00   8.6676847e-01   9.4912864e-01   1.6147493e+00   1.9768316e+00   2.2674825e+00   2.7199050e+00   1.6193612e+00   1.1298636e+00   1.6009488e+00   2.4288142e+00   1.6617386e+00   1.5199103e+00   8.6676847e-01   1.5881447e+00   1.6785116e+00   1.4886759e+00   1.1061923e+00   1.9457481e+00   1.7813291e+00   1.3946348e+00   1.0498347e+00   1.2771155e+00   1.4848797e+00   1.1157320e+00   8.0004523e-01   5.0043842e-01   1.6743483e+00   2.0121983e-01   1.3061139e+00   1.5464046e+00   6.0964597e-01   7.1183012e-01   4.0006662e-01   1.0776296e+00   3.0922892e-01   9.0002570e-01   7.3084171e-01   6.0184622e-01   9.3446811e-01   6.1135434e-01   6.0964597e-01   3.4080442e-01   4.1210927e-01   3.0490481e-01   2.2608083e-01   3.0482299e-01   4.0363334e-01   5.0001522e-01   1.1298636e+00   1.0440350e+00   1.0803561e+00   7.8935898e-01   5.6347978e-01   1.1000098e+00   6.3165225e-01   3.0482299e-01   5.0126466e-01   9.0511169e-01   1.0088926e+00   1.0001903e+00   4.0125062e-01   7.4335736e-01   1.5972311e+00   9.0142681e-01   8.0296037e-01   8.0250202e-01   3.4085233e-01   1.7081446e+00   8.0883841e-01   1.4329858e+00   7.2036951e-01   1.3050153e+00   1.0001753e+00   1.2089252e+00   2.0109333e+00   1.6000184e+00   1.7036944e+00   1.2001396e+00   1.5327217e+00   5.7608844e-01   7.0462844e-01   9.1449234e-01   8.1156529e-01   9.3733552e-01   8.5583415e-01   9.0029018e-01   2.1191883e+00   2.3098756e+00   6.3912709e-01   1.1290757e+00   9.0532049e-01   2.1139617e+00   3.4085233e-01   1.1074834e+00   1.4044980e+00   3.4080442e-01   4.2362917e-01   1.0087250e+00   1.2089253e+00   1.5132032e+00   1.8748226e+00   1.0207260e+00   5.0042326e-01   1.0008620e+00   1.5694554e+00   1.0837679e+00   9.0053003e-01   5.0517282e-01   8.2671175e-01   1.0777411e+00   8.1156529e-01   7.2036951e-01   1.3133662e+00   1.1910068e+00   8.2425704e-01   4.5847767e-01   6.3178534e-01   9.1590889e-01   6.3322667e-01   6.3322667e-01   1.2193537e+00   9.0000091e-01   6.3165225e-01   1.0599087e+00   3.1328089e-01   6.3451734e-01   4.0127250e-01   9.0000091e-01   1.0001604e+00   2.2573593e-01   4.1212852e-01   6.3178534e-01   6.0202028e-01   5.2524663e-01   5.2132556e-01   6.1135434e-01   4.0125062e-01   7.0008584e-01   9.0002615e-01   1.1001014e+00   1.0039209e+00   3.0482299e-01   1.0001751e+00   7.0478886e-01   8.0296037e-01   6.0000952e-01   6.0366256e-01   3.0915245e-01   6.0365948e-01   1.0001903e+00   6.3164977e-01   4.0125062e-01   5.0476836e-01   2.2608083e-01   4.0127250e-01   5.0043842e-01   1.2102248e+00   3.0017653e-01   3.0482299e-01   3.0008832e-01   5.0043084e-01   1.5012947e+00   4.0000000e-01   1.5653766e+00   6.7616902e-01   1.5829749e+00   1.1074742e+00   1.3373141e+00   2.2681751e+00   8.0291671e-01   1.9315820e+00   1.3458100e+00   1.7862938e+00   8.7372177e-01   8.7209348e-01   1.2093969e+00   7.1700774e-01   1.1055705e+00   1.0604287e+00   1.0451812e+00   2.3834499e+00   2.5286011e+00   6.3322667e-01   1.3903623e+00   7.0462697e-01   2.3796582e+00   6.3912943e-01   1.2792049e+00   1.6907308e+00   5.6595488e-01   5.3943256e-01   1.1400339e+00   1.5965952e+00   1.8639835e+00   2.3424496e+00   1.1635325e+00   6.7626502e-01   1.1005460e+00   2.0903382e+00   1.2459141e+00   1.0236548e+00   5.0894102e-01   1.2528590e+00   1.2949162e+00   1.2662318e+00   6.7616902e-01   1.4817248e+00   1.3908238e+00   1.1389163e+00   6.9600743e-01   8.9540816e-01   1.0858512e+00   6.3192325e-01   1.5890088e+00   4.2270142e-01   1.1330776e+00   1.5368468e+00   5.2491734e-01   1.1187430e+00   4.0243965e-01   1.1139906e+00   4.1420960e-01   7.0096858e-01   7.3895268e-01   1.1000100e+00   9.3861512e-01   4.0127250e-01   7.1708289e-01   8.0004523e-01   5.2167208e-01   4.5784410e-01   3.6452132e-01   5.6371422e-01   4.2270142e-01   4.1317535e-01   1.2159868e+00   1.0567817e+00   1.1138092e+00   8.3387677e-01   6.1119267e-01   9.0029064e-01   3.0482299e-01   4.0125062e-01   1.0003196e+00   7.4395693e-01   9.3459651e-01   8.5617086e-01   3.0922892e-01   8.0073117e-01   1.5493206e+00   7.5564478e-01   6.4049114e-01   6.4049114e-01   4.5784410e-01   1.7404389e+00   6.9600743e-01   1.3253457e+00   6.4049114e-01   1.2202193e+00   9.0142636e-01   1.1056785e+00   1.9348400e+00   1.4092540e+00   1.6176783e+00   1.1286101e+00   1.4350761e+00   4.5148429e-01   6.7720957e-01   8.1757693e-01   8.2671175e-01   8.1937731e-01   7.4263078e-01   8.0055465e-01   2.0418418e+00   2.2277117e+00   1.1002025e+00   1.0286508e+00   7.2044167e-01   2.0415798e+00   6.0035305e-01   1.0039060e+00   1.3253497e+00   5.0043842e-01   3.1328089e-01   9.0999313e-01   1.1528476e+00   1.4548293e+00   1.8637334e+00   9.1892454e-01   5.2133179e-01   9.3310976e-01   1.5801693e+00   9.6572569e-01   8.0008964e-01   3.4085233e-01   7.5508853e-01   9.6674360e-01   7.4618926e-01   6.4049114e-01   1.2101609e+00   1.0782105e+00   7.2113820e-01   8.0093081e-01   5.2524663e-01   7.8886139e-01   4.5847767e-01   1.7572657e+00   6.1288055e-01   4.0125062e-01   1.0858512e+00   1.1133984e+00   1.4858469e+00   7.1779518e-01   1.8187119e+00   1.2137020e+00   9.6591433e-01   1.4146346e+00   7.4263078e-01   1.5359852e+00   1.2093243e+00   1.7083042e+00   1.4853863e+00   1.5241361e+00   1.7234436e+00   1.9756319e+00   1.9771636e+00   1.3035495e+00   8.0008884e-01   6.3164977e-01   6.0948212e-01   9.1449234e-01   1.8179429e+00   1.2062153e+00   1.3486924e+00   1.8673780e+00   1.4543172e+00   8.7240114e-01   7.4329527e-01   1.1055892e+00   1.4158897e+00   9.3310976e-01   1.1269424e-01   9.3351278e-01   9.7600992e-01   9.6953662e-01   1.3458100e+00   3.0490481e-01   9.0320459e-01   2.7259033e+00   1.8109877e+00   2.7506971e+00   2.3226569e+00   2.5372438e+00   3.4590995e+00   1.2089192e+00   3.1281646e+00   2.5610212e+00   2.9500632e+00   1.9406671e+00   2.0633570e+00   2.3443688e+00   1.7168003e+00   1.8708330e+00   2.0857354e+00   2.2573821e+00   3.5725062e+00   3.7336869e+00   1.7229558e+00   2.5362836e+00   1.6198349e+00   3.5690915e+00   1.7116793e+00   2.4776152e+00   2.8599555e+00   1.6016303e+00   1.6534235e+00   2.3372717e+00   2.7159844e+00   3.0094888e+00   3.4430661e+00   2.3406025e+00   1.8663319e+00   2.3090404e+00   3.1586433e+00   2.3454995e+00   2.2408459e+00   1.5471213e+00   2.3192230e+00   2.4059451e+00   2.1751377e+00   1.8109877e+00   2.6757243e+00   2.4966678e+00   2.1111734e+00   1.7901165e+00   2.0121175e+00   2.1471399e+00   1.8133657e+00   1.4043036e+00   1.6388784e+00   7.0470867e-01   7.7652636e-01   5.0001522e-01   1.1269424e+00   2.2608083e-01   1.0000158e+00   8.0928056e-01   7.0470867e-01   1.0215068e+00   7.1708289e-01   6.3164977e-01   4.2362917e-01   5.0002283e-01   3.0474106e-01   2.0121983e-01   2.2608083e-01   4.5080200e-01   6.0017982e-01   1.1529284e+00   1.1298636e+00   1.1544060e+00   8.5406674e-01   6.3322667e-01   1.2000066e+00   6.3309258e-01   2.2608083e-01   6.0201716e-01   1.0030721e+00   1.1060937e+00   1.1001110e+00   5.0001522e-01   8.2458478e-01   1.6747799e+00   1.0008617e+00   9.0140221e-01   9.0140131e-01   4.1209001e-01   1.7511598e+00   9.0506299e-01   1.4854079e+00   8.3187290e-01   1.3139296e+00   1.0032293e+00   1.2362702e+00   2.0078120e+00   1.7001329e+00   1.7019430e+00   1.2016381e+00   1.5675442e+00   7.1700909e-01   7.4275547e-01   9.6574369e-01   9.3541878e-01   1.1298552e+00   1.0208844e+00   9.0506343e-01   2.1136134e+00   2.3085898e+00   7.4618926e-01   1.1897982e+00   1.0208709e+00   2.1090362e+00   5.0894102e-01   1.1286018e+00   1.4024091e+00   5.2167829e-01   5.6595908e-01   1.0426638e+00   1.2037520e+00   1.5079206e+00   1.8503663e+00   1.0776296e+00   5.0477564e-01   1.0032296e+00   1.5611241e+00   1.1910693e+00   9.0511169e-01   6.3178782e-01   9.0184172e-01   1.1896660e+00   1.0032443e+00   8.3187290e-01   1.3450688e+00   1.3020492e+00   1.0087252e+00   6.1990228e-01   7.4263078e-01   1.0458540e+00   7.3084171e-01   7.0918894e-01   7.0176271e-01   8.1112984e-01   9.6574336e-01   4.1317535e-01   1.5005626e+00   6.1119558e-01   6.0964597e-01   1.0116724e+00   4.1315633e-01   9.3848935e-01   9.0000136e-01   1.1896660e+00   9.6574369e-01   1.2003597e+00   1.4006465e+00   1.6096629e+00   1.5401713e+00   8.2421923e-01   5.3914287e-01   3.6259865e-01   4.2362917e-01   6.0017665e-01   1.2189701e+00   6.0202028e-01   8.7212232e-01   1.5067961e+00   1.1024820e+00   4.1317535e-01   3.0490481e-01   5.0477564e-01   9.3329017e-01   6.0018299e-01   6.1845783e-01   4.1210927e-01   5.0894102e-01   5.0477564e-01   1.0008620e+00   9.0029064e-01   5.0043842e-01   2.1169442e+00   1.2049539e+00   2.2014913e+00   1.7227908e+00   1.9366943e+00   2.8973091e+00   6.0383105e-01   2.5621105e+00   1.9756002e+00   2.3854144e+00   1.4163126e+00   1.4857201e+00   1.8044011e+00   1.1074834e+00   1.2661803e+00   1.4994060e+00   1.6741010e+00   3.0107625e+00   3.1586112e+00   1.1298552e+00   1.9796588e+00   1.0095370e+00   3.0090568e+00   1.1900276e+00   1.8963668e+00   2.3135911e+00   1.0782107e+00   1.0783219e+00   1.7384347e+00   2.2009981e+00   2.4793129e+00   2.9421182e+00   1.7402224e+00   1.3018103e+00   1.7072540e+00   2.6745468e+00   1.7367211e+00   1.6485141e+00   9.6691372e-01   1.8209763e+00   1.8293641e+00   1.7435092e+00   1.2049539e+00   2.0881329e+00   1.9090398e+00   1.6063540e+00   1.2407432e+00   1.4664722e+00   1.5386307e+00   1.2093243e+00   1.0943600e+00   1.0030868e+00   1.3272142e+00   9.1446938e-01   1.7295996e+00   1.1336109e+00   8.7209296e-01   1.2643054e+00   6.3912943e-01   1.4396100e+00   1.1299441e+00   1.5271597e+00   1.3148232e+00   1.4267817e+00   1.6268514e+00   1.8468437e+00   1.8305154e+00   1.1759988e+00   7.4262850e-01   5.2491734e-01   5.2167208e-01   8.5586571e-01   1.6206300e+00   1.1291536e+00   1.4631182e+00   1.7576822e+00   1.3254284e+00   1.0172489e+00   6.0605366e-01   9.1894698e-01   1.2951152e+00   8.3187290e-01   3.0474106e-01   8.1500329e-01   1.0396215e+00   9.6150595e-01   1.2528590e+00   5.6347978e-01   8.7240114e-01   2.5401214e+00   1.6166178e+00   2.5672376e+00   2.1256636e+00   2.3434890e+00   3.2706021e+00   1.0235120e+00   2.9379053e+00   2.3650373e+00   2.7819820e+00   1.7939428e+00   1.8718670e+00   2.1669217e+00   1.5269837e+00   1.7152309e+00   1.9257519e+00   2.0672316e+00   3.3962101e+00   3.5413320e+00   1.5241169e+00   2.3604685e+00   1.4422764e+00   3.3805191e+00   1.5352907e+00   2.2985560e+00   2.6785349e+00   1.4314424e+00   1.4886759e+00   2.1422340e+00   2.5406863e+00   2.8290490e+00   3.2861997e+00   2.1472832e+00   1.6782365e+00   2.1087015e+00   2.9939447e+00   2.1830772e+00   2.0525906e+00   1.3935744e+00   2.1564350e+00   2.2287876e+00   2.0426476e+00   1.6166178e+00   2.4889554e+00   2.3256006e+00   1.9561612e+00   1.6066555e+00   1.8386981e+00   2.0009986e+00   1.6313110e+00   8.0883916e-01   5.0043842e-01   6.0202028e-01   8.0004602e-01   3.3808272e-01   5.0437695e-01   8.0093081e-01   5.2838320e-01   6.0201716e-01   2.5399984e-01   7.2036819e-01   5.0517282e-01   5.0043842e-01   7.0008584e-01   9.1424701e-01   9.0157896e-01   3.0017653e-01   7.2044167e-01   6.2656178e-01   6.6334810e-01   3.6259865e-01   9.0026588e-01   5.0436235e-01   4.1212852e-01   8.0879701e-01   7.0478886e-01   3.0482299e-01   5.2201750e-01   4.5847767e-01   4.0125062e-01   4.1317535e-01   1.0363096e+00   3.4080442e-01   3.0474106e-01   2.2573593e-01   3.0490481e-01   1.2204839e+00   2.4195741e-01   1.8101835e+00   9.0166476e-01   1.7375279e+00   1.4002005e+00   1.6032003e+00   2.4532171e+00   1.0032443e+00   2.1331916e+00   1.6052507e+00   1.9422649e+00   9.1894698e-01   1.1025819e+00   1.3276411e+00   8.1719606e-01   1.0142626e+00   1.1298636e+00   1.3025621e+00   2.5612597e+00   2.7430487e+00   9.0155438e-01   1.5299064e+00   7.1708289e-01   2.5605373e+00   7.0633229e-01   1.5079428e+00   1.8443132e+00   6.0383105e-01   7.0096708e-01   1.4023806e+00   1.6740160e+00   1.9756002e+00   2.3801033e+00   1.4049093e+00   9.0142636e-01   1.4001717e+00   2.0898615e+00   1.4183606e+00   1.3009222e+00   6.0184622e-01   1.2661803e+00   1.4267527e+00   1.1084368e+00   9.0166476e-01   1.7108631e+00   1.5299252e+00   1.0782751e+00   8.1343016e-01   1.0116721e+00   1.2193537e+00   9.0026497e-01   7.9148746e-01   7.0993998e-01   9.3541878e-01   8.1937731e-01   5.0043084e-01   5.6370994e-01   4.1212852e-01   1.0782753e+00   6.0184622e-01   9.0557807e-01   7.4269314e-01   7.0633229e-01   8.2841920e-01   9.1695534e-01   1.0756891e+00   7.3084048e-01   5.2491131e-01   5.0085236e-01   5.0476836e-01   5.0085236e-01   1.1074740e+00   8.3916809e-01   1.2049539e+00   9.6501813e-01   4.2270142e-01   8.0291749e-01   5.0855077e-01   5.3943256e-01   8.2631334e-01   4.0243965e-01   1.0207260e+00   5.2524663e-01   8.0055465e-01   7.0184453e-01   7.0184453e-01   1.0777307e+00   6.0366256e-01   2.0696799e+00   1.1543334e+00   1.9286352e+00   1.6071727e+00   1.8312163e+00   2.6295459e+00   1.1153247e+00   2.3155068e+00   1.8040969e+00   2.1901871e+00   1.2562955e+00   1.3263639e+00   1.5518083e+00   1.1271226e+00   1.4557657e+00   1.4915559e+00   1.5136393e+00   2.7555974e+00   2.9267466e+00   1.0030718e+00   1.7744103e+00   1.0843333e+00   2.7324083e+00   9.6953662e-01   1.7456060e+00   2.0249458e+00   9.1568820e-01   1.0151258e+00   1.6309461e+00   1.8315348e+00   2.1358791e+00   2.5304748e+00   1.6492450e+00   1.1075720e+00   1.6001777e+00   2.2141198e+00   1.7441970e+00   1.5196090e+00   9.6683480e-01   1.4838225e+00   1.7167914e+00   1.4122282e+00   1.1543334e+00   1.9438463e+00   1.8374400e+00   1.4268530e+00   1.0778421e+00   1.2792049e+00   1.5857302e+00   1.1532421e+00   1.1019503e+00   6.0201716e-01   5.0043842e-01   6.1135434e-01   7.0008735e-01   8.1156529e-01   4.1317535e-01   7.0000303e-01   4.0246123e-01   2.0061436e-01   4.1210927e-01   5.0436965e-01   7.0000303e-01   6.0366256e-01   2.0121983e-01   1.2007726e+00   9.1916394e-01   1.0124729e+00   8.0055465e-01   4.0246123e-01   7.0008735e-01   5.0085236e-01   6.0017982e-01   6.0202028e-01   6.3165225e-01   7.4612830e-01   6.0383105e-01   1.1269424e-01   7.0184453e-01   1.4559030e+00   5.6371422e-01   5.2167829e-01   5.2133179e-01   4.0004442e-01   1.7133283e+00   6.0948800e-01   1.3743342e+00   5.2524663e-01   1.2702954e+00   9.0142636e-01   1.1286018e+00   1.9763960e+00   1.2004262e+00   1.6484371e+00   1.1066159e+00   1.5033966e+00   6.1990228e-01   6.3322667e-01   8.9538275e-01   6.1990228e-01   1.0010060e+00   9.1471442e-01   8.0488008e-01   2.0894199e+00   2.2581358e+00   7.0088627e-01   1.1085342e+00   6.3178782e-01   2.0855639e+00   4.0363334e-01   1.0293900e+00   1.3743657e+00   4.0006662e-01   4.0125062e-01   9.3329055e-01   1.2396422e+00   1.5267540e+00   1.9798779e+00   9.6591465e-01   4.0127250e-01   9.0026497e-01   1.7151603e+00   1.0797805e+00   8.0296037e-01   4.0006662e-01   8.9540816e-01   1.0837679e+00   9.6674360e-01   5.2524663e-01   1.2440789e+00   1.1938630e+00   9.1892454e-01   5.2524663e-01   6.3912943e-01   9.3733589e-01   4.5148429e-01   1.1281352e+00   9.0002570e-01   5.0517282e-01   9.4513210e-01   4.1315633e-01   1.2014191e+00   5.2133179e-01   1.3063533e+00   1.1019505e+00   8.5403370e-01   1.0426516e+00   1.3523310e+00   1.4544312e+00   9.0142636e-01   3.3818226e-01   5.0085236e-01   5.0437695e-01   3.0922892e-01   1.5001461e+00   9.0005094e-01   9.0668287e-01   1.2396475e+00   8.7209296e-01   5.0000761e-01   4.5078948e-01   8.0046764e-01   1.0030724e+00   4.1317535e-01   6.7824250e-01   6.0017665e-01   6.0000952e-01   6.0000317e-01   7.4262850e-01   6.3925756e-01   5.0001522e-01   2.4077059e+00   1.5012741e+00   2.3329496e+00   2.0008921e+00   2.2042323e+00   3.0476353e+00   9.3541878e-01   2.7309758e+00   2.2068463e+00   2.5373913e+00   1.5160787e+00   1.7043730e+00   1.9242109e+00   1.4044668e+00   1.5401330e+00   1.7168122e+00   1.9044144e+00   3.1543192e+00   3.3406961e+00   1.4044697e+00   2.1265131e+00   1.3061138e+00   3.1536525e+00   1.3069754e+00   2.1097680e+00   2.4379736e+00   1.2049541e+00   1.3017511e+00   2.0033792e+00   2.2562563e+00   2.5606009e+00   2.9377608e+00   2.0050253e+00   1.5030978e+00   2.0001168e+00   2.6390071e+00   2.0114908e+00   1.9023062e+00   1.2016381e+00   1.8467998e+00   2.0209800e+00   1.6143467e+00   1.5012741e+00   2.3121231e+00   2.1220697e+00   1.6461460e+00   1.4062065e+00   1.6118728e+00   1.8108200e+00   1.5004645e+00   1.1000005e+00   9.0305330e-01   9.0506343e-01   1.1075720e+00   8.0488008e-01   6.1119558e-01   6.3912943e-01   6.0383105e-01   3.0490481e-01   1.1269424e-01   4.1210927e-01   6.0184622e-01   7.0008735e-01   1.0803561e+00   1.2125410e+00   1.2178626e+00   9.0645118e-01   7.9153339e-01   1.3000002e+00   7.0096858e-01   3.0008832e-01   8.0245824e-01   1.1001015e+00   1.2040344e+00   1.2012928e+00   6.0017982e-01   9.0645118e-01   1.7262450e+00   1.1005460e+00   1.0000307e+00   1.0000307e+00   5.0043842e-01   1.7087610e+00   1.0003198e+00   1.6300950e+00   9.3848935e-01   1.5032156e+00   1.2007124e+00   1.4092540e+00   2.2026134e+00   1.8005395e+00   1.9004485e+00   1.4019342e+00   1.7231818e+00   7.4269314e-01   9.0668287e-01   1.1133897e+00   1.0251597e+00   1.0924484e+00   1.0143978e+00   1.1005460e+00   2.3044111e+00   2.5032992e+00   9.4511250e-01   1.3253497e+00   1.1075720e+00   2.3033192e+00   5.5450500e-01   1.3061180e+00   1.6004128e+00   5.4219811e-01   6.3912943e-01   1.2090477e+00   1.4006179e+00   1.7019555e+00   2.0185049e+00   1.2190878e+00   7.0548283e-01   1.2049539e+00   1.7202439e+00   1.2636227e+00   1.1006371e+00   7.0911112e-01   1.0207396e+00   1.2632946e+00   9.3308853e-01   9.3848935e-01   1.5130871e+00   1.3741498e+00   9.6572569e-01   6.9987517e-01   8.2421923e-01   1.0798806e+00   8.5583415e-01   5.2524663e-01   8.2418002e-01   6.3912709e-01   3.6452132e-01   5.6394820e-01   7.2036819e-01   5.0517282e-01   8.0008964e-01   1.0000005e+00   1.2000731e+00   1.1019597e+00   4.0002221e-01   1.0039063e+00   7.4612830e-01   8.3183672e-01   6.0383105e-01   6.1119558e-01   2.0000000e-01   4.5078948e-01   1.1000098e+00   7.8890806e-01   4.0122873e-01   5.6371422e-01   4.1212852e-01   5.0001522e-01   5.2524663e-01   1.2137020e+00   3.4080442e-01   3.3813251e-01   3.0490481e-01   6.0035621e-01   1.5010034e+00   4.0246123e-01   1.5265987e+00   6.1135434e-01   1.6395342e+00   1.1134850e+00   1.3309249e+00   2.3136797e+00   7.1629168e-01   1.9760038e+00   1.3748534e+00   1.8136626e+00   9.1894698e-01   9.0320459e-01   1.2661802e+00   6.0427481e-01   9.1427000e-01   9.6685270e-01   1.0777305e+00   2.4270428e+00   2.5626268e+00   8.1112909e-01   1.4228744e+00   5.2167208e-01   2.4261071e+00   7.0633229e-01   1.3043552e+00   1.7511131e+00   6.0383105e-01   5.2491131e-01   1.1330776e+00   1.6740160e+00   1.9314874e+00   2.4166999e+00   1.1400420e+00   7.4269200e-01   1.1024820e+00   2.1702438e+00   1.1639421e+00   1.0427822e+00   4.2268438e-01   1.3276412e+00   1.2710363e+00   1.3154933e+00   6.1135434e-01   1.4922566e+00   1.3466030e+00   1.1400339e+00   7.3461436e-01   9.3733552e-01   9.7694377e-01   6.0365948e-01   5.8750389e-01   2.4195741e-01   8.6051471e-01   3.3818226e-01   8.1719606e-01   6.0202028e-01   6.0219099e-01   8.0337471e-01   1.0214933e+00   1.0338224e+00   5.2201750e-01   6.0000635e-01   3.6259865e-01   4.2268438e-01   2.2538848e-01   1.0087393e+00   5.4219811e-01   7.4618926e-01   9.2019277e-01   5.2838320e-01   3.4080442e-01   3.4085233e-01   3.4085233e-01   5.2838320e-01   2.0121983e-01   9.0294373e-01   3.0482299e-01   3.0490481e-01   3.0490481e-01   4.1420960e-01   1.1133986e+00   3.0017653e-01   1.9760242e+00   1.0776188e+00   1.8598666e+00   1.5071120e+00   1.7384459e+00   2.5637810e+00   9.3426769e-01   2.2403929e+00   1.7108631e+00   2.0993246e+00   1.1405598e+00   1.2394690e+00   1.4816205e+00   1.0776296e+00   1.4324323e+00   1.4163126e+00   1.4134492e+00   2.6753615e+00   2.8540068e+00   9.1001664e-01   1.6986597e+00   1.0426638e+00   2.6697938e+00   9.0657539e-01   1.6396943e+00   1.9541963e+00   8.5583415e-01   9.0207914e-01   1.5412500e+00   1.7849153e+00   2.0880425e+00   2.4973149e+00   1.5650163e+00   1.0060994e+00   1.5001440e+00   2.2185588e+00   1.6410190e+00   1.4111252e+00   8.5440680e-01   1.4330979e+00   1.6474117e+00   1.4095656e+00   1.0776188e+00   1.8534896e+00   1.7579984e+00   1.3938438e+00   1.0171340e+00   1.1990152e+00   1.4686236e+00   1.0427822e+00   6.8261201e-01   1.0004792e+00   6.3178782e-01   4.1210927e-01   6.0202028e-01   7.0025283e-01   8.0245903e-01   6.7720957e-01   8.1719606e-01   7.0008432e-01   1.0069214e+00   7.9153339e-01   8.6054545e-01   6.4049114e-01   6.3178782e-01   9.0155393e-01   1.2000065e+00   9.0508712e-01   2.0181667e-01   8.2635069e-01   7.1708289e-01   7.0548283e-01   8.0000239e-01   5.4219811e-01   1.3530568e+00   6.3322667e-01   8.0967961e-01   7.1708289e-01   7.0016860e-01   1.5402579e+00   6.3925756e-01   1.5611241e+00   6.4620889e-01   1.4283663e+00   1.1134850e+00   1.3189663e+00   2.1346646e+00   1.3000497e+00   1.8186729e+00   1.3009674e+00   1.7335369e+00   1.0118233e+00   8.1117067e-01   1.0567664e+00   6.0605366e-01   9.2867113e-01   1.0782857e+00   1.0429127e+00   2.2917679e+00   2.4270713e+00   5.0042326e-01   1.2848760e+00   6.9987517e-01   2.2396581e+00   5.2491734e-01   1.3051737e+00   1.5457820e+00   6.0365948e-01   8.0291749e-01   1.1110184e+00   1.3561934e+00   1.6492450e+00   2.1212048e+00   1.1186586e+00   6.7616723e-01   1.1005365e+00   1.7574668e+00   1.3269962e+00   1.0777411e+00   8.0097499e-01   1.0411548e+00   1.1972915e+00   9.9911696e-01   6.4620889e-01   1.4422764e+00   1.3473056e+00   9.3861512e-01   5.2491734e-01   8.6084272e-01   1.2528048e+00   8.2498722e-01   9.6150595e-01   5.0477564e-01   1.0214931e+00   8.0923926e-01   8.0492246e-01   1.0062544e+00   1.2363856e+00   1.2438823e+00   6.2656178e-01   4.0006662e-01   1.2085435e-01   2.0181667e-01   2.2573593e-01   1.2016443e+00   6.3925756e-01   9.2019277e-01   1.1335345e+00   7.1636719e-01   5.0084481e-01   2.0121983e-01   5.0002283e-01   7.3155911e-01   2.0181667e-01   6.7626681e-01   3.0915245e-01   5.0437695e-01   4.1317535e-01   6.1845783e-01   9.0506254e-01   3.0922892e-01   2.1350025e+00   1.2189760e+00   2.0658767e+00   1.7034615e+00   1.9177947e+00   2.7775988e+00   7.7598704e-01   2.4534018e+00   1.9144928e+00   2.2857680e+00   1.2749306e+00   1.4182222e+00   1.6639408e+00   1.1527669e+00   1.4140789e+00   1.4954274e+00   1.6121856e+00   2.8913337e+00   3.0644792e+00   1.1011719e+00   1.8708183e+00   1.0777305e+00   2.8852099e+00   1.0396215e+00   1.8297117e+00   2.1701542e+00   9.4532171e-01   1.0262619e+00   1.7168122e+00   2.0059655e+00   2.3063931e+00   2.7230908e+00   1.7261949e+00   1.2093243e+00   1.7002548e+00   2.4331092e+00   1.7646791e+00   1.6080687e+00   9.3848935e-01   1.6152383e+00   1.7771159e+00   1.4971922e+00   1.2189760e+00   2.0349233e+00   1.8831259e+00   1.4665397e+00   1.1400339e+00   1.3492939e+00   1.5757399e+00   1.2102248e+00   8.1117067e-01   7.0548283e-01   6.0964891e-01   6.0605366e-01   7.0918894e-01   9.0279223e-01   8.0008964e-01   3.6259865e-01   1.3154973e+00   1.0604287e+00   1.1536694e+00   9.1892454e-01   5.0477564e-01   5.0894102e-01   3.0922892e-01   8.0046764e-01   9.0778124e-01   7.1708289e-01   8.6225026e-01   6.8685125e-01   4.0363334e-01   8.4536936e-01   1.5318139e+00   6.5832080e-01   6.7636452e-01   6.3322667e-01   5.6838732e-01   1.8053679e+00   7.2044167e-01   1.2092602e+00   5.0437695e-01   1.3018595e+00   8.0291671e-01   1.0095513e+00   1.9760044e+00   1.0208709e+00   1.6387179e+00   1.0597877e+00   1.4686236e+00   6.0201716e-01   6.0427481e-01   9.3331138e-01   7.0025283e-01   6.1119558e-01   6.0427175e-01   7.4269200e-01   2.0887699e+00   2.2281421e+00   1.0001753e+00   1.0797700e+00   4.1317535e-01   2.0885107e+00   5.2133179e-01   9.6591465e-01   1.4140457e+00   4.1209001e-01   2.2573593e-01   8.1156529e-01   1.3450340e+00   1.5966664e+00   2.0855643e+00   8.1343016e-01   4.6440171e-01   8.2635069e-01   1.8444686e+00   8.2635069e-01   7.1621748e-01   2.0061436e-01   1.0088783e+00   9.1566538e-01   1.0032296e+00   5.0437695e-01   1.1543257e+00   9.8997136e-01   8.1117067e-01   7.0470867e-01   6.0980961e-01   6.3322667e-01   3.0474106e-01   9.0031539e-01   7.0000151e-01   3.3813251e-01   5.2167829e-01   8.5403428e-01   1.0095513e+00   5.0043842e-01   5.2524663e-01   6.0980961e-01   6.1288055e-01   3.0026460e-01   1.1001015e+00   7.1636719e-01   6.3309258e-01   7.4335736e-01   5.2167208e-01   5.0043084e-01   6.0184309e-01   6.0964891e-01   6.0017982e-01   3.0482299e-01   1.1153247e+00   5.0043084e-01   4.0246123e-01   4.0125062e-01   3.0017653e-01   1.1270411e+00   4.0002221e-01   2.0175565e+00   1.1056693e+00   1.9099615e+00   1.6003257e+00   1.8055799e+00   2.6186105e+00   1.2017042e+00   2.3090806e+00   1.8007233e+00   2.1233216e+00   1.1144002e+00   1.3025622e+00   1.5097103e+00   1.0216374e+00   1.2396937e+00   1.3452695e+00   1.5005647e+00   2.7241212e+00   2.9166918e+00   1.0087396e+00   1.7168636e+00   9.3733552e-01   2.7221286e+00   9.0508756e-01   1.7046111e+00   2.0107590e+00   8.0879701e-01   9.0508712e-01   1.6049314e+00   1.8174378e+00   2.1221146e+00   2.4750525e+00   1.6096791e+00   1.1000193e+00   1.6000016e+00   2.1732693e+00   1.6308665e+00   1.5004872e+00   8.0883916e-01   1.4182493e+00   1.6308803e+00   1.2094550e+00   1.1056693e+00   1.9088565e+00   1.7377460e+00   1.2661852e+00   1.0088926e+00   1.2092662e+00   1.4340155e+00   1.1019692e+00   3.4342562e-01   6.0964891e-01   5.6595908e-01   5.0437695e-01   5.2167829e-01   4.5783248e-01   1.4023777e+00   1.1286018e+00   1.2201578e+00   1.0032443e+00   3.0922892e-01   9.0642679e-01   9.0166476e-01   6.0964597e-01   5.0084481e-01   8.6054545e-01   9.6574336e-01   8.0923926e-01   5.0477564e-01   9.0532093e-01   1.6742781e+00   7.8895472e-01   7.5564478e-01   7.4618926e-01   6.0964891e-01   1.9222003e+00   8.2462252e-01   1.2093908e+00   5.2201750e-01   1.0522594e+00   7.0548138e-01   9.3735629e-01   1.7578497e+00   1.4001717e+00   1.4326118e+00   9.0166431e-01   1.3681502e+00   7.1636719e-01   4.5148429e-01   7.1183012e-01   6.3164977e-01   9.0534502e-01   8.5583415e-01   6.3322667e-01   1.9047821e+00   2.0429861e+00   3.3813251e-01   9.4634218e-01   7.1700774e-01   1.8662975e+00   3.0474106e-01   9.1695534e-01   1.1636098e+00   3.3818226e-01   5.0476836e-01   7.4329527e-01   1.0171202e+00   1.3020942e+00   1.8013674e+00   7.8935898e-01   3.0474106e-01   7.0008735e-01   1.4927071e+00   1.0336860e+00   6.7720957e-01   5.0855778e-01   7.3895268e-01   9.4622126e-01   8.4540285e-01   5.2201750e-01   1.0621172e+00   1.0788651e+00   8.1156529e-01   4.0002221e-01   5.6618864e-01   9.6935134e-01   5.2524663e-01   4.1212852e-01   5.0517282e-01   7.0008584e-01   6.3322667e-01   3.0490481e-01   1.2003660e+00   9.1552373e-01   1.0095513e+00   8.0046685e-01   4.5080200e-01   7.0105084e-01   6.0964891e-01   6.0365948e-01   5.0477564e-01   6.3178782e-01   7.4329527e-01   6.0201716e-01   2.2573593e-01   7.0096708e-01   1.4548054e+00   5.6347978e-01   5.2167208e-01   5.2133802e-01   4.0006662e-01   1.7132643e+00   6.0948506e-01   1.4655221e+00   7.0548283e-01   1.2921474e+00   9.1424701e-01   1.1900342e+00   1.9791159e+00   1.2013591e+00   1.6491682e+00   1.1111057e+00   1.5689626e+00   8.0726668e-01   7.4329527e-01   9.8998705e-01   8.0337471e-01   1.2004198e+00   1.1061923e+00   8.2635069e-01   2.0953157e+00   2.2622460e+00   6.0366256e-01   1.2097311e+00   8.0883841e-01   2.0866883e+00   6.0035621e-01   1.0858512e+00   1.3762609e+00   6.0000635e-01   6.0035305e-01   1.0143975e+00   1.2399444e+00   1.5291965e+00   1.9842703e+00   1.0777305e+00   4.1315633e-01   9.0005048e-01   1.7303039e+00   1.2394744e+00   8.2498722e-01   6.0018299e-01   9.9013884e-01   1.2395260e+00   1.1286911e+00   7.0548283e-01   1.3081175e+00   1.3479052e+00   1.1074834e+00   7.0184453e-01   8.1117067e-01   1.1186499e+00   6.0980961e-01   2.0181667e-01   5.2133802e-01   7.0548283e-01   4.0243965e-01   8.5471446e-01   9.1001664e-01   9.1916394e-01   6.0964891e-01   8.0296037e-01   1.0000307e+00   5.2524663e-01   4.1420960e-01   6.0000635e-01   8.0004523e-01   9.0166431e-01   9.0026588e-01   3.3818226e-01   6.0366256e-01   1.4340438e+00   8.0004523e-01   7.0000454e-01   7.0000151e-01   2.0000000e-01   1.4651632e+00   7.0008584e-01   1.7369589e+00   8.4540285e-01   1.6071563e+00   1.3008770e+00   1.5130871e+00   2.3098753e+00   1.5002444e+00   2.0034559e+00   1.5005854e+00   1.8322392e+00   8.5437498e-01   1.0087393e+00   1.2192920e+00   8.4786353e-01   1.1330694e+00   1.1270325e+00   1.2012867e+00   2.4144060e+00   2.6097166e+00   7.9153339e-01   1.4330116e+00   8.7209348e-01   2.4119960e+00   6.3178782e-01   1.4094452e+00   1.7039341e+00   5.6371422e-01   6.3309258e-01   1.3130937e+00   1.5066999e+00   1.8106410e+00   2.1515742e+00   1.3253458e+00   8.0004602e-01   1.3000908e+00   1.8533295e+00   1.3748188e+00   1.2012928e+00   5.7609230e-01   1.1298636e+00   1.3741846e+00   1.0451812e+00   8.4540285e-01   1.6176927e+00   1.4854079e+00   1.0777307e+00   7.4612830e-01   9.3306807e-01   1.1910068e+00   8.1715665e-01   4.0243965e-01   6.0184622e-01   6.0000952e-01   1.0158274e+00   1.1111057e+00   1.1191444e+00   8.0928056e-01   7.4335736e-01   1.2000002e+00   6.0964891e-01   3.0026460e-01   7.0088477e-01   1.0001601e+00   1.1024820e+00   1.1005458e+00   5.0042326e-01   8.0492246e-01   1.6321742e+00   1.0001753e+00   9.0005048e-01   9.0002615e-01   4.0006662e-01   1.6390068e+00   9.0029064e-01   1.6300430e+00   8.6084272e-01   1.5035329e+00   1.2004200e+00   1.4092511e+00   2.2043907e+00   1.7002548e+00   1.9010379e+00   1.4007831e+00   1.7240342e+00   7.4269314e-01   9.0534502e-01   1.1133984e+00   9.3184922e-01   1.0597992e+00   1.0142766e+00   1.1005364e+00   2.3071806e+00   2.5048249e+00   8.4536936e-01   1.3253910e+00   1.0116865e+00   2.3056305e+00   5.2838320e-01   1.3061582e+00   1.6010223e+00   4.8391482e-01   5.7608844e-01   1.2089313e+00   1.4017695e+00   1.7039229e+00   2.0293124e+00   1.2189760e+00   7.0096858e-01   1.2016380e+00   1.7295384e+00   1.2636227e+00   1.1005460e+00   6.1830489e-01   1.0208709e+00   1.2632948e+00   9.3329055e-01   8.6084272e-01   1.5130912e+00   1.3741813e+00   9.6572569e-01   6.5832080e-01   8.2418071e-01   1.0788007e+00   7.9148662e-01   3.0922892e-01   8.0046764e-01   1.3743342e+00   1.3452695e+00   1.3745152e+00   1.0776296e+00   8.0051115e-01   1.4000349e+00   8.2462252e-01   3.0026460e-01   5.7609230e-01   1.2089253e+00   1.3131370e+00   1.3002493e+00   7.0016860e-01   1.0426760e+00   1.8951252e+00   1.2036864e+00   1.1055892e+00   1.1055707e+00   6.3165225e-01   1.9760099e+00   1.1133895e+00   1.3035495e+00   1.0032296e+00   1.1134939e+00   8.1112984e-01   1.0427944e+00   1.8040883e+00   1.9000220e+00   1.5005626e+00   1.0010060e+00   1.3844234e+00   6.1288055e-01   5.7609230e-01   7.8890721e-01   1.1056785e+00   1.1270325e+00   9.0778124e-01   7.0556260e-01   1.9141294e+00   2.1052841e+00   8.2421923e-01   1.0150395e+00   1.2036863e+00   1.9046783e+00   5.2133802e-01   9.3733589e-01   1.2010584e+00   6.0948212e-01   7.0470867e-01   8.5583357e-01   1.0008768e+00   1.3033860e+00   1.6469593e+00   9.0294373e-01   5.0436965e-01   8.5406616e-01   1.3485619e+00   1.0522594e+00   7.0993998e-01   8.0250123e-01   7.4329527e-01   1.0427822e+00   9.0053003e-01   1.0032296e+00   1.1531951e+00   1.1543259e+00   9.0142681e-01   5.6618864e-01   6.1135434e-01   9.3984267e-01   9.0168933e-01   7.1629303e-01   1.5266891e+00   1.3564850e+00   1.4198077e+00   1.1544060e+00   7.0088627e-01   1.3008812e+00   7.2036951e-01   3.0482299e-01   7.4954884e-01   1.1531951e+00   1.2645755e+00   1.2053003e+00   6.1119267e-01   1.0803561e+00   1.9177201e+00   1.1286911e+00   1.0451689e+00   1.0433444e+00   7.2036951e-01   2.0856547e+00   1.0782211e+00   1.0434746e+00   9.0029064e-01   9.0279223e-01   6.0948800e-01   8.0883841e-01   1.6097492e+00   1.8003682e+00   1.3025173e+00   8.0879701e-01   1.1347620e+00   3.0922892e-01   3.6452132e-01   5.2133179e-01   1.0032293e+00   9.3308891e-01   6.0383105e-01   5.0043084e-01   1.7170314e+00   1.9082779e+00   8.5406616e-01   7.4275547e-01   1.1001110e+00   1.7132654e+00   4.1212852e-01   7.0548138e-01   1.0030871e+00   5.0085236e-01   6.0000635e-01   6.1135434e-01   8.0879701e-01   1.1134075e+00   1.4922778e+00   6.3322667e-01   4.0246123e-01   6.8261201e-01   1.1935004e+00   7.4954884e-01   5.0437695e-01   7.0008584e-01   4.5148429e-01   7.4262964e-01   6.0018299e-01   9.0029064e-01   9.1424701e-01   8.5437440e-01   6.0017665e-01   5.2167208e-01   3.0915245e-01   6.4620889e-01   8.0000160e-01   1.0033867e+00   7.3461436e-01   8.2512420e-01   6.0219099e-01   6.0017982e-01   6.0000317e-01   5.0000761e-01   7.0016860e-01   6.0202028e-01   4.5148429e-01   5.7630313e-01   5.0855778e-01   1.2699992e-01   5.0894102e-01   1.2671752e+00   4.1420960e-01   3.6259865e-01   3.4080442e-01   2.4170870e-01   1.5133193e+00   4.1317535e-01   1.5238388e+00   6.0980961e-01   1.4557632e+00   1.1002023e+00   1.3069713e+00   2.1693127e+00   1.1005458e+00   1.8443124e+00   1.3063934e+00   1.6655594e+00   6.5832080e-01   8.0492246e-01   1.0498228e+00   5.7832449e-01   9.1424701e-01   9.0320459e-01   1.0032296e+00   2.2802814e+00   2.4537354e+00   7.1621613e-01   1.2528590e+00   5.3914287e-01   2.2781292e+00   4.2362917e-01   1.2128138e+00   1.5640141e+00   3.4085233e-01   4.1212852e-01   1.1060939e+00   1.4140458e+00   1.7081323e+00   2.1436849e+00   1.1138955e+00   6.0184622e-01   1.1001015e+00   1.8659091e+00   1.1544060e+00   1.0010209e+00   3.3813251e-01   1.0224270e+00   1.1635398e+00   9.7600992e-01   6.0980961e-01   1.4182493e+00   1.2705641e+00   8.9538275e-01   5.4219811e-01   7.3084171e-01   9.6936870e-01   6.0184934e-01   3.0922892e-01   2.4170870e-01   4.0127250e-01   1.6009488e+00   1.0040629e+00   1.0499492e+00   1.2653025e+00   9.1471442e-01   6.1119558e-01   5.0477564e-01   9.0005048e-01   1.1016049e+00   5.0043084e-01   7.0096708e-01   7.0088627e-01   7.0470720e-01   7.0176121e-01   8.0967961e-01   6.3165225e-01   6.0201716e-01   2.5221737e+00   1.6096629e+00   2.4221589e+00   2.1015969e+00   2.3098905e+00   3.1317714e+00   1.0597879e+00   2.8188299e+00   2.3040148e+00   2.6373482e+00   1.6231306e+00   1.8068049e+00   2.0210084e+00   1.5237053e+00   1.7080686e+00   1.8459262e+00   2.0034094e+00   3.2387184e+00   3.4286399e+00   1.5005854e+00   2.2285172e+00   1.4324350e+00   3.2358000e+00   1.4109628e+00   2.2110849e+00   2.5224740e+00   1.3139336e+00   1.4095777e+00   2.1090366e+00   2.3323064e+00   2.6377472e+00   2.9966835e+00   2.1144760e+00   1.6012568e+00   2.1000482e+00   2.6968519e+00   2.1346646e+00   2.0025815e+00   1.3133662e+00   1.9351024e+00   2.1377869e+00   1.7137965e+00   1.6096629e+00   2.4161682e+00   2.2434416e+00   1.7684500e+00   1.5143051e+00   1.7168636e+00   1.9368172e+00   1.6050040e+00   1.1269424e-01   3.3818226e-01   1.3017961e+00   7.4612830e-01   1.0262619e+00   1.2443200e+00   8.2421923e-01   6.0202028e-01   2.2573593e-01   6.0017982e-01   8.4572653e-01   3.0922892e-01   5.6347978e-01   4.1317535e-01   6.0964891e-01   5.2201750e-01   7.3090905e-01   8.0245824e-01   4.1420960e-01   2.2297880e+00   1.3131802e+00   2.1734835e+00   1.8042696e+00   2.0169191e+00   2.8857511e+00   7.7598796e-01   2.5607759e+00   2.0181988e+00   2.3909895e+00   1.3770846e+00   1.5195166e+00   1.7689720e+00   1.2362756e+00   1.4651863e+00   1.5800353e+00   1.7155605e+00   3.0010211e+00   3.1712557e+00   1.2016443e+00   1.9735224e+00   1.1531953e+00   2.9937162e+00   1.1401191e+00   1.9335528e+00   2.2793947e+00   1.0403116e+00   1.1237940e+00   1.8155572e+00   2.1170640e+00   2.4166673e+00   2.8369211e+00   1.8227568e+00   1.3135522e+00   1.8005404e+00   2.5443612e+00   1.8557670e+00   1.7105814e+00   1.0313359e+00   1.7226330e+00   1.8708183e+00   1.5881025e+00   1.3131802e+00   2.1363271e+00   1.9752912e+00   1.5530527e+00   1.2366099e+00   1.4501583e+00   1.6655594e+00   1.3088083e+00   3.4342562e-01   1.4024091e+00   8.3183672e-01   1.0522594e+00   1.2712749e+00   8.5437498e-01   6.1119558e-01   3.3813251e-01   7.0016860e-01   9.2867113e-01   3.4342562e-01   5.2133179e-01   5.0855778e-01   6.3192325e-01   5.6618864e-01   7.5564478e-01   7.0462844e-01   4.5847767e-01   2.3345511e+00   1.4181033e+00   2.2624227e+00   1.9044571e+00   2.1188381e+00   2.9740725e+00   8.7209348e-01   2.6511588e+00   2.1151751e+00   2.4832720e+00   1.4692287e+00   1.6190709e+00   1.8603115e+00   1.3450304e+00   1.5778323e+00   1.6856949e+00   1.8133657e+00   3.0879393e+00   3.2627356e+00   1.3017553e+00   2.0678448e+00   1.2635708e+00   3.0810441e+00   1.2366675e+00   2.0307764e+00   2.3657459e+00   1.1404856e+00   1.2257611e+00   1.9176961e+00   2.1957105e+00   2.4980314e+00   2.9055191e+00   1.9262438e+00   1.4100098e+00   1.9004485e+00   2.6116431e+00   1.9609333e+00   1.8095574e+00   1.1347620e+00   1.8037909e+00   1.9725464e+00   1.6581521e+00   1.4181033e+00   2.2355461e+00   2.0784269e+00   1.6462102e+00   1.3373104e+00   1.5468618e+00   1.7698041e+00   1.4111252e+00   1.2003596e+00   6.1288055e-01   7.4618926e-01   9.6691372e-01   5.7609230e-01   3.0922892e-01   3.0490481e-01   5.0436965e-01   7.0184453e-01   1.1269424e-01   8.2635069e-01   3.0482299e-01   3.3813251e-01   3.0490481e-01   4.5148429e-01   9.3308891e-01   2.0181667e-01   2.1221982e+00   1.2089191e+00   2.0305682e+00   1.7009400e+00   1.9088256e+00   2.7434081e+00   9.1894698e-01   2.4265154e+00   1.9046790e+00   2.2453370e+00   1.2284047e+00   1.4060413e+00   1.6267848e+00   1.1281352e+00   1.3523310e+00   1.4562730e+00   1.6032169e+00   2.8519346e+00   3.0369970e+00   1.1020600e+00   1.8342569e+00   1.0426638e+00   2.8491513e+00   1.0116721e+00   1.8114932e+00   2.1335035e+00   9.1552373e-01   1.0090312e+00   1.7079369e+00   1.9522117e+00   2.2566913e+00   2.6406601e+00   1.7139238e+00   1.2013529e+00   1.7000137e+00   2.3442222e+00   1.7386523e+00   1.6019500e+00   9.1449234e-01   1.5517970e+00   1.7435092e+00   1.3757183e+00   1.2089191e+00   2.0167186e+00   1.8496945e+00   1.3938438e+00   1.1152390e+00   1.3189663e+00   1.5429659e+00   1.2037520e+00   6.7720957e-01   7.4262850e-01   7.0911112e-01   7.0633229e-01   1.0011648e+00   1.1020600e+00   7.2036951e-01   5.0477564e-01   1.1005460e+00   1.8106900e+00   9.0166431e-01   9.0192695e-01   9.0055475e-01   8.0055465e-01   2.1027376e+00   1.0003198e+00   1.0225570e+00   3.0474106e-01   1.1299441e+00   5.0517282e-01   7.5564478e-01   1.7513222e+00   1.1055799e+00   1.4140515e+00   7.8895472e-01   1.3181953e+00   5.7608844e-01   4.1315633e-01   8.1156529e-01   4.1317535e-01   8.0004523e-01   7.2044167e-01   5.2524663e-01   1.8787830e+00   1.9768256e+00   5.0001522e-01   9.4912864e-01   4.5148429e-01   1.8635775e+00   3.0915245e-01   7.8611860e-01   1.2373911e+00   3.0922892e-01   3.0922892e-01   5.7609230e-01   1.2089834e+00   1.4324608e+00   1.9471600e+00   6.3912943e-01   3.0017653e-01   5.0043842e-01   1.7149040e+00   8.6084272e-01   4.8391482e-01   3.4080442e-01   9.0668287e-01   8.6225026e-01   9.3424659e-01   3.0474106e-01   9.3861512e-01   9.5646231e-01   7.8935898e-01   3.4085233e-01   5.2491734e-01   7.8940551e-01   3.0482299e-01   6.0948506e-01   1.3000044e+00   9.3308891e-01   4.0243965e-01   5.6371422e-01   4.1212852e-01   7.0000303e-01   5.4219811e-01   1.2105001e+00   3.4342562e-01   3.6256305e-01   3.4085233e-01   8.0008964e-01   1.5005854e+00   4.1420960e-01   1.5358856e+00   6.1990228e-01   1.7849054e+00   1.1528477e+00   1.3788456e+00   2.4261894e+00   5.6370994e-01   2.0884901e+00   1.4655452e+00   1.9390732e+00   1.1074834e+00   1.0434746e+00   1.4340155e+00   6.0605366e-01   9.1554656e-01   1.0782857e+00   1.1897288e+00   2.5394068e+00   2.6516318e+00   8.3183606e-01   1.5694554e+00   5.2201750e-01   2.5386539e+00   9.0192695e-01   1.4157600e+00   1.8949500e+00   8.0097499e-01   7.0548138e-01   1.1935069e+00   1.8443040e+00   2.0853276e+00   2.5816887e+00   1.1989547e+00   9.1424659e-01   1.1138955e+00   2.3468430e+00   1.1963432e+00   1.1270327e+00   6.0365948e-01   1.5143051e+00   1.3938114e+00   1.5079206e+00   6.1990228e-01   1.5829749e+00   1.4450801e+00   1.3189240e+00   9.1132198e-01   1.1152300e+00   1.0159134e+00   6.3309012e-01   7.0096858e-01   1.1002025e+00   4.8852375e-01   9.1024401e-01   8.1112984e-01   4.0127250e-01   8.1117067e-01   1.3486924e+00   7.0633229e-01   4.6440171e-01   5.1257987e-01   5.0517282e-01   1.5260594e+00   6.1288055e-01   1.5131090e+00   7.4335736e-01   1.4549432e+00   1.1020600e+00   1.3036236e+00   2.1691920e+00   1.1527669e+00   1.8444686e+00   1.3309288e+00   1.6567564e+00   6.3925756e-01   8.5617086e-01   1.0458540e+00   9.0668287e-01   8.4540285e-01   8.5586571e-01   1.0039209e+00   2.2782572e+00   2.4540263e+00   1.2012866e+00   1.2440282e+00   6.2656178e-01   2.2782572e+00   7.0556260e-01   1.2101609e+00   1.5639802e+00   6.0219099e-01   4.5148429e-01   1.1079931e+00   1.4142064e+00   1.7087610e+00   2.1412345e+00   1.1115204e+00   6.7720957e-01   1.1281352e+00   1.8646736e+00   1.1282162e+00   1.0010209e+00   4.1315633e-01   1.0172673e+00   1.1401191e+00   9.4532171e-01   7.4335736e-01   1.4134218e+00   1.2440229e+00   8.4786353e-01   9.0557807e-01   7.2440846e-01   9.3308853e-01   6.0964891e-01   8.0296037e-01   1.1055799e+00   1.2124837e+00   1.2014191e+00   6.0000952e-01   9.3755356e-01   1.7874653e+00   1.1024913e+00   1.0032296e+00   1.0031018e+00   5.2201750e-01   1.8640262e+00   1.0088926e+00   1.3452347e+00   9.0417295e-01   1.2040344e+00   9.0168933e-01   1.1133986e+00   1.9046783e+00   1.8005318e+00   1.6009504e+00   1.1056691e+00   1.4335330e+00   5.2167829e-01   6.1990228e-01   8.2418141e-01   1.0118233e+00   1.0151880e+00   8.2458478e-01   8.0051115e-01   2.0076819e+00   2.2050331e+00   9.3329017e-01   1.0426638e+00   1.1020600e+00   2.0062587e+00   4.5847767e-01   1.0087393e+00   1.3009222e+00   5.0855778e-01   6.0202028e-01   9.1471442e-01   1.1019505e+00   1.4044980e+00   1.7386523e+00   9.3351278e-01   4.5783248e-01   9.1892454e-01   1.4407364e+00   1.0151880e+00   8.0093081e-01   7.0088627e-01   7.4269200e-01   1.0142482e+00   8.0250123e-01   9.0417295e-01   1.2189645e+00   1.1269510e+00   8.0879701e-01   6.1990228e-01   5.6371422e-01   8.6084272e-01   8.0291749e-01   7.8935813e-01   8.0250123e-01   8.0046685e-01   7.0017011e-01   5.2491734e-01   1.3741813e+00   7.0470720e-01   7.4269314e-01   6.7626502e-01   6.0000635e-01   1.4852616e+00   6.3309012e-01   1.6636721e+00   7.5826453e-01   1.5161847e+00   1.2049539e+00   1.4220925e+00   2.2191056e+00   1.4001717e+00   1.9083789e+00   1.4007861e+00   1.7945122e+00   9.6133119e-01   9.1552373e-01   1.1416778e+00   7.7603846e-01   1.1170561e+00   1.1351073e+00   1.1152390e+00   2.3540839e+00   2.5167781e+00   6.0202028e-01   1.3687074e+00   8.0713433e-01   2.3222107e+00   5.7608844e-01   1.3563898e+00   1.6193612e+00   5.7609230e-01   7.3090905e-01   1.2201578e+00   1.4221192e+00   1.7236083e+00   2.1360791e+00   1.2373857e+00   7.1629168e-01   1.2000731e+00   1.7962160e+00   1.3755348e+00   1.1298552e+00   7.2113820e-01   1.0843962e+00   1.3150470e+00   1.0664292e+00   7.5826453e-01   1.5362595e+00   1.4451976e+00   1.0604287e+00   6.7626502e-01   8.9540816e-01   1.2552585e+00   8.0073117e-01   5.0001522e-01   4.1212852e-01   5.6347549e-01   4.0127250e-01   8.7240114e-01   3.0008832e-01   1.2085435e-01   1.2085435e-01   6.0017982e-01   1.1038933e+00   2.0061436e-01   1.9231154e+00   1.0088926e+00   1.8971338e+00   1.5035330e+00   1.7143629e+00   2.6071033e+00   7.2440846e-01   2.2781292e+00   1.7231818e+00   2.0998984e+00   1.0923537e+00   1.2224463e+00   1.4922544e+00   9.3733589e-01   1.1896725e+00   1.2782589e+00   1.4186216e+00   2.7177641e+00   2.8857013e+00   9.6674360e-01   1.6882618e+00   8.5406616e-01   2.7167827e+00   8.6084272e-01   1.6345263e+00   2.0058565e+00   7.5508853e-01   8.1715593e-01   1.5132180e+00   1.8635428e+00   2.1554613e+00   2.5926811e+00   1.5194972e+00   1.0207533e+00   1.5005626e+00   2.3165875e+00   1.5429659e+00   1.4098467e+00   7.2036819e-01   1.4724918e+00   1.5757929e+00   1.3838027e+00   1.0088926e+00   1.8378491e+00   1.6745686e+00   1.2948699e+00   9.4912864e-01   1.1635325e+00   1.3473688e+00   1.0032293e+00   4.0004442e-01   6.9509552e-01   3.0017653e-01   7.1708289e-01   2.2573593e-01   5.0085236e-01   4.0243965e-01   7.0548138e-01   1.0008617e+00   3.0482299e-01   2.0206913e+00   1.1056785e+00   2.0075255e+00   1.6053217e+00   1.8156855e+00   2.7170183e+00   6.3912709e-01   2.3873965e+00   1.8286172e+00   2.2132187e+00   1.2079042e+00   1.3276450e+00   1.6018644e+00   1.0207396e+00   1.2397507e+00   1.3715471e+00   1.5245240e+00   2.8327619e+00   2.9941199e+00   1.0032443e+00   1.7962160e+00   9.3329055e-01   2.8269181e+00   9.6936870e-01   1.7435156e+00   2.1174156e+00   8.6084272e-01   9.2351241e-01   1.6144550e+00   1.9761215e+00   2.2674248e+00   2.7114616e+00   1.6190709e+00   1.1282247e+00   1.6009322e+00   2.4285500e+00   1.6432478e+00   1.5147271e+00   8.2512420e-01   1.5839544e+00   1.6753044e+00   1.4829434e+00   1.1056785e+00   1.9427965e+00   1.7734131e+00   1.3908238e+00   1.0498226e+00   1.2712749e+00   1.4523130e+00   1.1044111e+00   6.0980961e-01   4.1209001e-01   1.1020600e+00   2.0181667e-01   4.0243965e-01   3.0922892e-01   7.0088627e-01   1.4001688e+00   3.0922892e-01   1.6797901e+00   7.8935898e-01   1.7574606e+00   1.2224463e+00   1.4618181e+00   2.4274025e+00   6.3165225e-01   2.0887499e+00   1.4865883e+00   1.9561612e+00   1.0664292e+00   1.0336863e+00   1.3939836e+00   8.2421923e-01   1.2089895e+00   1.1997296e+00   1.1938630e+00   2.5462062e+00   2.6763758e+00   6.4049114e-01   1.5645185e+00   8.0883916e-01   2.5391583e+00   8.3183672e-01   1.4351453e+00   1.8644317e+00   7.4618926e-01   6.9987517e-01   1.2680580e+00   1.7844580e+00   2.0440071e+00   2.5329067e+00   1.2921474e+00   8.5440680e-01   1.2036924e+00   2.2838099e+00   1.3737025e+00   1.1587585e+00   6.4620889e-01   1.4491800e+00   1.4507713e+00   1.4583848e+00   7.8935898e-01   1.6277433e+00   1.5384791e+00   1.3150470e+00   8.7209348e-01   1.0788651e+00   1.2179890e+00   7.4954884e-01   6.1135434e-01   1.3790270e+00   5.2491734e-01   4.5147187e-01   4.5080200e-01   3.0026460e-01   1.6179159e+00   5.2167829e-01   1.4543196e+00   5.6838732e-01   1.3502290e+00   1.0008620e+00   1.2192919e+00   2.0611274e+00   1.2013529e+00   1.7369589e+00   1.2053003e+00   1.5767956e+00   6.3925756e-01   7.1779518e-01   9.6131279e-01   6.4620889e-01   1.0032443e+00   9.3331138e-01   9.0279223e-01   2.1713885e+00   2.3476282e+00   8.0245903e-01   1.1755517e+00   6.3322667e-01   2.1693131e+00   4.2362917e-01   1.1187430e+00   1.4544336e+00   4.0246123e-01   4.1209001e-01   1.0208844e+00   1.3018144e+00   1.5969056e+00   2.0303761e+00   1.0427944e+00   5.0085236e-01   1.0008465e+00   1.7574039e+00   1.1274284e+00   9.0166476e-01   4.0125062e-01   9.3437551e-01   1.1319099e+00   9.6935134e-01   5.6838732e-01   1.3309288e+00   1.2428507e+00   9.2745734e-01   5.7630313e-01   6.8160885e-01   9.6672602e-01   5.2167208e-01   8.5440680e-01   2.2608083e-01   4.0125062e-01   3.0490481e-01   4.2270142e-01   1.0207262e+00   2.0181667e-01   2.0282636e+00   1.1133895e+00   1.9386586e+00   1.6012719e+00   1.8114342e+00   2.6515677e+00   9.0999313e-01   2.3322945e+00   1.8060515e+00   2.1578375e+00   1.1447365e+00   1.3085752e+00   1.5359734e+00   1.0426516e+00   1.3018144e+00   1.3779960e+00   1.5044724e+00   2.7627283e+00   2.9432651e+00   1.0010209e+00   1.7447170e+00   9.6576136e-01   2.7579768e+00   9.1892454e-01   1.7159977e+00   2.0420308e+00   8.2635069e-01   9.1576742e-01   1.6105700e+00   1.8662195e+00   2.1696258e+00   2.5680120e+00   1.6184785e+00   1.1020600e+00   1.6000183e+00   2.2731185e+00   1.6529028e+00   1.5029725e+00   8.2635069e-01   1.4699000e+00   1.6570292e+00   1.3301857e+00   1.1133895e+00   1.9214944e+00   1.7646791e+00   1.3272142e+00   1.0235120e+00   1.2275665e+00   1.4621584e+00   1.1060939e+00   9.1576742e-01   9.6133119e-01   9.4532171e-01   1.2662318e+00   3.0490481e-01   8.6084272e-01   2.7230933e+00   1.8083405e+00   2.7192500e+00   2.3152780e+00   2.5278895e+00   3.4307167e+00   1.2089253e+00   3.1023107e+00   2.5446557e+00   2.9238544e+00   1.9071235e+00   2.0445123e+00   2.3113306e+00   1.7148932e+00   1.8686471e+00   2.0692591e+00   2.2408459e+00   3.5448121e+00   3.7102716e+00   1.7134856e+00   2.5076804e+00   1.6187837e+00   3.5398901e+00   1.6780493e+00   2.4594169e+00   2.8277283e+00   1.5698091e+00   1.6365650e+00   2.3270565e+00   2.6739856e+00   2.9710335e+00   3.3954286e+00   2.3304578e+00   1.8446316e+00   2.3054869e+00   3.1050823e+00   2.3405162e+00   2.2288004e+00   1.5327217e+00   2.2740189e+00   2.3840998e+00   2.1122339e+00   1.8083405e+00   2.6588338e+00   2.4791402e+00   2.0688078e+00   1.7632513e+00   1.9828965e+00   2.1428811e+00   1.8095574e+00   3.0017653e-01   2.0061436e-01   6.0017982e-01   1.2012991e+00   1.2085435e-01   1.8301371e+00   9.1424659e-01   1.8223693e+00   1.4049093e+00   1.6190709e+00   2.5271432e+00   7.0556260e-01   2.1953731e+00   1.6303102e+00   2.0261311e+00   1.0363096e+00   1.1330692e+00   1.4229011e+00   8.5406674e-01   1.1527746e+00   1.2106302e+00   1.3261864e+00   2.6410980e+00   2.8004332e+00   8.1117067e-01   1.6146557e+00   7.8886054e-01   2.6375763e+00   7.9824795e-01   1.5471213e+00   1.9317133e+00   6.9509552e-01   7.3155911e-01   1.4182194e+00   1.8031267e+00   2.0887452e+00   2.5422790e+00   1.4267527e+00   9.3308891e-01   1.4006149e+00   2.2706274e+00   1.4614246e+00   1.3141581e+00   6.4049114e-01   1.4230277e+00   1.5004249e+00   1.3669148e+00   9.1424659e-01   1.7490906e+00   1.5981930e+00   1.2553121e+00   8.7212232e-01   1.0924484e+00   1.2731059e+00   9.0557807e-01   1.1269424e-01   5.0002283e-01   1.2049541e+00   2.0121983e-01   1.8447840e+00   9.3329055e-01   1.7901165e+00   1.4035225e+00   1.6222582e+00   2.4980210e+00   8.1757693e-01   2.1693127e+00   1.6187837e+00   2.0049100e+00   1.0151397e+00   1.1261381e+00   1.3938113e+00   9.0657539e-01   1.2362756e+00   1.2472959e+00   1.3154932e+00   2.6088344e+00   2.7785257e+00   9.0207914e-01   1.5972548e+00   8.5406674e-01   2.6071034e+00   7.7652636e-01   1.5358856e+00   1.8953567e+00   6.9518117e-01   7.4612718e-01   1.4220925e+00   1.7511588e+00   2.0440071e+00   2.4804818e+00   1.4362913e+00   9.1449234e-01   1.4003402e+00   2.2077377e+00   1.4867147e+00   1.3085752e+00   6.7720780e-01   1.3734975e+00   1.5100598e+00   1.3269962e+00   9.3329055e-01   1.7441015e+00   1.6143613e+00   1.2552584e+00   8.7796615e-01   1.0782751e+00   1.3029195e+00   9.1424659e-01   5.0000761e-01   1.2040406e+00   1.1269424e-01   1.8289847e+00   9.1424701e-01   1.7872748e+00   1.4023776e+00   1.6144390e+00   2.4974488e+00   8.0533198e-01   2.1691714e+00   1.6179842e+00   1.9948417e+00   9.9013884e-01   1.1186586e+00   1.3842454e+00   8.5583357e-01   1.1527671e+00   1.1990152e+00   1.3139296e+00   2.6085083e+00   2.7775771e+00   8.5440680e-01   1.5835478e+00   7.8886139e-01   2.6068470e+00   7.5508853e-01   1.5300146e+00   1.8950932e+00   6.5712813e-01   7.2036951e-01   1.4134189e+00   1.7511120e+00   2.0435901e+00   2.4807480e+00   1.4220898e+00   9.1424701e-01   1.4002005e+00   2.2048860e+00   1.4562730e+00   1.3069754e+00   6.3309258e-01   1.3632211e+00   1.4815986e+00   1.2921474e+00   9.1424701e-01   1.7351894e+00   1.5836236e+00   1.2085436e+00   8.4725834e-01   1.0597879e+00   1.2653025e+00   9.0508756e-01   1.3743342e+00   5.0043084e-01   1.7369589e+00   8.2635069e-01   1.6144390e+00   1.3008770e+00   1.5131090e+00   2.3226028e+00   1.3004854e+00   2.0107294e+00   1.5010034e+00   1.8390199e+00   8.5471446e-01   1.0087539e+00   1.2223855e+00   8.0073117e-01   1.1286018e+00   1.1270411e+00   1.2013529e+00   2.4290388e+00   2.6198428e+00   7.8895472e-01   1.4363167e+00   7.7598796e-01   2.4266979e+00   6.3178782e-01   1.4100098e+00   1.7134977e+00   5.6347549e-01   6.3165225e-01   1.3130978e+00   1.5237265e+00   1.8289379e+00   2.1979419e+00   1.3253497e+00   8.0004602e-01   1.3000455e+00   1.9028805e+00   1.3748188e+00   1.2012991e+00   5.6371422e-01   1.1400420e+00   1.3748220e+00   1.0597992e+00   8.2635069e-01   1.6184929e+00   1.4858469e+00   1.0797702e+00   7.4612830e-01   9.3329055e-01   1.1910002e+00   8.0923926e-01   1.1056785e+00   3.0089448e+00   2.1019570e+00   2.9563162e+00   2.6052626e+00   2.8107271e+00   3.6717374e+00   1.5012719e+00   3.3522198e+00   2.8186527e+00   3.1596417e+00   2.1362161e+00   2.3151198e+00   2.5461024e+00   2.0036629e+00   2.1224665e+00   2.3234228e+00   2.5150159e+00   3.7801779e+00   3.9623034e+00   2.0033816e+00   2.7467405e+00   1.9044216e+00   3.7784933e+00   1.9231092e+00   2.7237438e+00   3.0623796e+00   1.8186729e+00   1.9089573e+00   2.6097166e+00   2.8846684e+00   3.1885494e+00   3.5706709e+00   2.6109888e+00   2.1139043e+00   2.6017555e+00   3.2706949e+00   2.6138780e+00   2.5099937e+00   1.8069857e+00   2.4748863e+00   2.6338874e+00   2.2385678e+00   2.1019570e+00   2.9251726e+00   2.7321685e+00   2.2661994e+00   2.0190735e+00   2.2288464e+00   2.4131370e+00   2.1020441e+00   1.9226845e+00   1.0087252e+00   1.8684939e+00   1.5017097e+00   1.7108631e+00   2.5816562e+00   8.0533198e-01   2.2563154e+00   1.7134977e+00   2.0770423e+00   1.0604287e+00   1.2124777e+00   1.4620239e+00   9.3329017e-01   1.1896594e+00   1.2705641e+00   1.4098496e+00   2.6923501e+00   2.8659663e+00   9.1449234e-01   1.6637458e+00   8.5403428e-01   2.6902417e+00   8.3183672e-01   1.6225614e+00   1.9757175e+00   7.3084048e-01   8.1117067e-01   1.5097082e+00   1.8197097e+00   2.1169795e+00   2.5408329e+00   1.5160570e+00   1.0087393e+00   1.5001233e+00   2.2573596e+00   1.5423651e+00   1.4049376e+00   7.1708289e-01   1.4229011e+00   1.5611429e+00   1.3142952e+00   1.0087252e+00   1.8271493e+00   1.6639408e+00   1.2552635e+00   9.2768675e-01   1.1400420e+00   1.3479052e+00   1.0031018e+00   9.3184922e-01   8.0291749e-01   7.0910969e-01   3.4342562e-01   1.3028005e+00   1.6474201e+00   1.0216374e+00   8.5586571e-01   9.0026543e-01   9.0508756e-01   7.7598796e-01   5.7832449e-01   1.0522594e+00   9.0999313e-01   7.0008735e-01   7.1708289e-01   1.4049376e+00   1.4220925e+00   1.2553121e+00   6.0202028e-01   1.1170561e+00   1.4055109e+00   1.1186497e+00   4.5783248e-01   9.3306769e-01   1.2101609e+00   1.1134939e+00   5.3914287e-01   1.0144117e+00   1.1074742e+00   1.6007365e+00   5.2491734e-01   1.0797700e+00   1.1139044e+00   1.4000349e+00   4.0004442e-01   7.1629303e-01   1.2090477e+00   6.8170466e-01   4.5148429e-01   9.1427000e-01   9.3184922e-01   5.0043842e-01   4.1209001e-01   8.0296037e-01   1.0498226e+00   8.0928056e-01   6.0018299e-01   9.3446811e-01   1.3131410e+00   5.6371422e-01   7.8985507e-01   1.8949500e+00   9.1427000e-01   1.5639785e+00   9.3308891e-01   1.4501583e+00   7.1621748e-01   6.0017665e-01   1.0010209e+00   2.0181667e-01   5.0000761e-01   6.3925756e-01   7.0548283e-01   2.0162299e+00   2.0885102e+00   5.2167829e-01   1.1079931e+00   2.2608083e-01   2.0057464e+00   5.0043084e-01   9.2747919e-01   1.4186217e+00   4.1212852e-01   3.4085233e-01   6.3178782e-01   1.4043632e+00   1.6175925e+00   2.1298991e+00   6.3309258e-01   5.2133179e-01   5.6595908e-01   1.9078843e+00   7.4418186e-01   6.1830764e-01   3.4085233e-01   1.1006468e+00   9.1132198e-01   1.1010711e+00   0.0000000e+00   1.0458540e+00   9.3984267e-01   9.0166476e-01   5.0043084e-01   7.0088627e-01   7.0993998e-01   3.0017653e-01   8.0093160e-01   6.0000635e-01   7.1621613e-01   2.2268632e+00   4.1317535e-01   5.2491734e-01   6.0964891e-01   8.2421923e-01   7.4335736e-01   4.1209001e-01   1.4186217e+00   1.3131410e+00   7.4275547e-01   6.1119267e-01   9.1566538e-01   1.0095513e+00   1.1796101e+00   2.5399984e-01   1.5237074e+00   8.2421923e-01   1.0429127e+00   4.1315633e-01   3.0490481e-01   1.1528553e+00   1.1270325e+00   7.0096708e-01   5.0001522e-01   3.1328089e-01   9.0657583e-01   7.0096858e-01   9.1568820e-01   1.0216374e+00   6.0035305e-01   8.0337471e-01   7.0548283e-01   1.2396937e+00   5.0043084e-01   4.2270142e-01   8.0008964e-01   1.3131410e+00   3.0915245e-01   4.5847767e-01   7.0470720e-01   9.6936870e-01   7.4262964e-01   9.0645118e-01   1.2190260e+00   4.0246123e-01   1.3450652e+00   1.4544312e+00   1.0207258e+00   4.5147187e-01   9.6501813e-01   5.0517282e-01   3.0490481e-01   5.0437695e-01   6.8170466e-01   6.5712813e-01   5.0855778e-01   2.0121983e-01   1.4695463e+00   1.5267750e+00   7.4395693e-01   6.3309258e-01   7.8890806e-01   1.4542932e+00   7.0008432e-01   4.5784410e-01   9.0166431e-01   8.0000160e-01   7.0008584e-01   3.0017653e-01   9.0005094e-01   1.1019505e+00   1.6144405e+00   4.0004442e-01   5.0436965e-01   4.1315633e-01   1.4012283e+00   6.3164729e-01   2.0121983e-01   8.0046685e-01   6.0219099e-01   6.0964597e-01   6.5724028e-01   5.6371422e-01   5.6838732e-01   7.0911112e-01   5.3914287e-01   6.0948506e-01   4.0246123e-01   5.6371422e-01   5.2133179e-01   1.1281267e+00   1.6745375e+00   8.1112984e-01   5.2167208e-01   7.4395693e-01   7.0016860e-01   5.0855778e-01   3.3813251e-01   9.0659977e-01   7.8895472e-01   5.0043842e-01   4.1209001e-01   1.2528048e+00   1.3020492e+00   9.3861512e-01   4.0127250e-01   1.0142766e+00   1.2362810e+00   9.0168933e-01   3.0490481e-01   7.0478886e-01   1.0010209e+00   9.0279223e-01   2.2608083e-01   7.4262850e-01   9.0055475e-01   1.4109657e+00   2.2573593e-01   7.8895472e-01   8.0492246e-01   1.2000668e+00   4.0363334e-01   4.1212852e-01   1.0039060e+00   4.5080200e-01   2.4195741e-01   7.0462844e-01   7.8985507e-01   3.0490481e-01   3.4085233e-01   6.0017982e-01   8.0928056e-01   6.0017665e-01   4.5784410e-01   7.4612718e-01   2.7992301e+00   3.6259865e-01   9.6953662e-01   6.4620889e-01   1.5401330e+00   1.4140789e+00   1.1281265e+00   2.0058560e+00   1.8949500e+00   1.4140515e+00   1.2396937e+00   8.0000239e-01   4.1317535e-01   1.8064092e+00   9.3310976e-01   2.1167364e+00   2.0181667e-01   1.7570696e+00   1.0143975e+00   6.1135434e-01   1.8661434e+00   1.8197089e+00   1.2632995e+00   8.1112909e-01   5.0126466e-01   8.0051115e-01   1.2632996e+00   1.5975200e+00   1.5266891e+00   5.0043084e-01   1.3452695e+00   1.3018553e+00   1.9314575e+00   1.2089192e+00   1.0777307e+00   1.5030978e+00   1.8949500e+00   8.5409862e-01   1.0151880e+00   1.4180463e+00   1.6742781e+00   1.4542907e+00   1.4853863e+00   1.8197089e+00   2.4725511e+00   1.8443040e+00   2.3518103e+00   1.6032169e+00   1.5066818e+00   1.9080163e+00   8.0923851e-01   9.4532171e-01   1.5109394e+00   1.6179000e+00   2.9132779e+00   2.9705619e+00   1.1020600e+00   2.0185049e+00   7.0633229e-01   2.9085831e+00   1.4001717e+00   1.8310931e+00   2.3325127e+00   1.3000908e+00   1.2016381e+00   1.5402579e+00   2.3143334e+00   2.5314248e+00   3.0393618e+00   1.5405402e+00   1.4018011e+00   1.3018553e+00   2.8185850e+00   1.4728279e+00   1.5248833e+00   1.1020506e+00   2.0036931e+00   1.8191683e+00   2.0009584e+00   9.1427000e-01   1.9534067e+00   1.8336293e+00   1.8020058e+00   1.4006178e+00   1.6026130e+00   1.3506710e+00   1.0116724e+00   6.3912709e-01   7.8890806e-01   1.2190319e+00   1.0776296e+00   8.0923926e-01   1.6740888e+00   1.5650163e+00   1.0798806e+00   9.0155438e-01   9.0417295e-01   6.4049114e-01   1.4685147e+00   6.4049114e-01   1.7843537e+00   4.5148429e-01   1.4324350e+00   6.8261201e-01   3.3813251e-01   1.5401311e+00   1.4852570e+00   9.3329055e-01   5.0043842e-01   2.0181667e-01   9.1424701e-01   9.3424697e-01   1.2633467e+00   1.2093243e+00   5.2167829e-01   1.0313359e+00   9.6574336e-01   1.5965767e+00   9.0168933e-01   7.7603846e-01   1.2016443e+00   1.5639785e+00   5.7832449e-01   7.7882758e-01   1.1074742e+00   1.3452311e+00   1.1281352e+00   1.1558746e+00   1.4852570e+00   1.1153247e+00   7.8895472e-01   5.0477564e-01   5.0855778e-01   1.0426636e+00   9.4532171e-01   7.3155911e-01   5.0476836e-01   1.3669148e+00   1.1910003e+00   8.5471446e-01   7.1629303e-01   1.1528553e+00   1.0777411e+00   9.0142636e-01   8.0046685e-01   7.1629168e-01   1.0032293e+00   9.1892413e-01   3.6452132e-01   5.6370994e-01   7.0176271e-01   1.4157327e+00   4.2362917e-01   7.0633229e-01   6.0964891e-01   1.0062544e+00   9.1554656e-01   6.0365948e-01   1.0235118e+00   6.1135434e-01   6.7626502e-01   7.5508853e-01   9.3308891e-01   7.1621884e-01   8.5403428e-01   6.5712608e-01   8.0245824e-01   6.3192325e-01   9.1132198e-01   8.6051414e-01   1.0242692e+00   1.0232576e+00   6.8685125e-01   1.5761415e+00   1.4407364e+00   9.0296858e-01   8.3689956e-01   6.3322667e-01   1.0451812e+00   1.5490134e+00   4.5847767e-01   1.6529028e+00   8.3916809e-01   1.2749306e+00   5.4219811e-01   7.0462697e-01   1.3611955e+00   1.3103292e+00   9.0792879e-01   9.1446896e-01   8.2421853e-01   7.1708289e-01   9.0683128e-01   1.1954899e+00   1.2957636e+00   6.3178534e-01   9.0508756e-01   8.7796615e-01   1.4198077e+00   7.2113820e-01   6.0427481e-01   1.0032443e+00   1.4501583e+00   4.5216167e-01   5.2491131e-01   9.1894698e-01   1.2738390e+00   9.4912864e-01   1.0207533e+00   1.3523292e+00   5.0043842e-01   4.1317535e-01   8.5403428e-01   7.0910969e-01   3.0482299e-01   4.0243965e-01   1.6491696e+00   1.8289467e+00   1.0060994e+00   6.1119267e-01   9.0142636e-01   1.6484371e+00   5.0126466e-01   6.0018299e-01   9.3308853e-01   4.2362917e-01   4.0363334e-01   5.2133802e-01   7.9153339e-01   1.0782107e+00   1.5275160e+00   5.2167829e-01   5.2167208e-01   6.9987517e-01   1.2633516e+00   5.2201750e-01   4.0127250e-01   5.0517282e-01   4.1212852e-01   5.2167829e-01   4.1210927e-01   7.1621748e-01   8.0093081e-01   6.3178782e-01   3.0922892e-01   7.0008735e-01   2.0061436e-01   3.6452132e-01   6.0035305e-01   4.1420960e-01   7.0096858e-01   6.3178782e-01   5.2132556e-01   3.0490481e-01   1.5634961e+00   1.6740875e+00   5.4219811e-01   5.8750389e-01   8.0245903e-01   1.5263478e+00   4.0004442e-01   6.1135434e-01   8.6051471e-01   5.0043842e-01   4.2270142e-01   3.0482299e-01   8.0967961e-01   1.0426513e+00   1.5757929e+00   3.3813251e-01   4.0127250e-01   5.0855778e-01   1.3133662e+00   7.1700909e-01   4.0125062e-01   5.2491734e-01   5.2167829e-01   5.2838320e-01   5.3943256e-01   6.0017665e-01   6.4620889e-01   6.8261201e-01   4.2270142e-01   3.0482299e-01   3.0026460e-01   7.0470867e-01   5.0477564e-01   1.1038840e+00   1.0010209e+00   4.0363334e-01   3.3808272e-01   1.2528049e+00   1.4182049e+00   9.2033101e-01   2.4195741e-01   1.2036925e+00   1.2362756e+00   6.3451734e-01   3.0482299e-01   5.2524663e-01   7.4335736e-01   7.4329414e-01   4.0125062e-01   5.2491131e-01   6.7636452e-01   1.1755449e+00   4.0127250e-01   6.3925756e-01   7.9148746e-01   9.1424659e-01   5.2491734e-01   4.1210927e-01   8.5437440e-01   1.2085435e-01   3.0026460e-01   4.0127250e-01   1.0010209e+00   4.0243965e-01   4.1317535e-01   3.0482299e-01   6.0444249e-01   3.3813251e-01   6.0964891e-01   9.0166431e-01   4.1212852e-01   7.8985507e-01   8.1719606e-01   2.1378157e+00   2.2009978e+00   5.0855077e-01   1.2175952e+00   3.0017653e-01   2.1167404e+00   6.0035621e-01   1.0597879e+00   1.5265797e+00   5.0517282e-01   5.2167829e-01   7.4329527e-01   1.5072282e+00   1.7227391e+00   2.2434054e+00   7.4335736e-01   6.3309258e-01   6.8161057e-01   2.0107350e+00   9.2867113e-01   7.5508853e-01   5.0517282e-01   1.2040344e+00   1.0178820e+00   1.2037520e+00   2.0181667e-01   1.1636098e+00   1.0621172e+00   1.0032443e+00   6.0000317e-01   8.0883841e-01   9.0668287e-01   5.0085236e-01   6.0964891e-01   7.4618926e-01   2.0118073e+00   2.0884859e+00   9.1424701e-01   1.1060939e+00   4.0243965e-01   2.0057764e+00   6.3178782e-01   9.1916394e-01   1.4198627e+00   6.1119267e-01   6.0219099e-01   6.3309012e-01   1.4134218e+00   1.6179000e+00   2.1265315e+00   6.3178534e-01   9.0506254e-01   1.0032443e+00   1.9078396e+00   6.5712608e-01   6.8261201e-01   6.0219099e-01   1.1003034e+00   9.0532049e-01   1.1001014e+00   5.0000761e-01   1.0433444e+00   9.1892454e-01   9.0002615e-01   5.6595908e-01   7.0470867e-01   6.1119558e-01   6.0017982e-01   5.0085236e-01   1.5275160e+00   1.6747664e+00   1.0434746e+00   5.2132556e-01   8.0533198e-01   1.5264802e+00   5.7609230e-01   4.1317535e-01   8.6051414e-01   5.7630313e-01   5.2524663e-01   4.1315633e-01   8.6054545e-01   1.0440350e+00   1.5412701e+00   4.1210927e-01   8.0250202e-01   9.1471442e-01   1.3130978e+00   3.0490481e-01   5.0043084e-01   5.7630313e-01   5.0043842e-01   3.3818226e-01   5.0043084e-01   6.3925756e-01   6.0948212e-01   4.1317535e-01   3.0482299e-01   7.0548283e-01   3.0490481e-01   2.2573593e-01   5.6394820e-01   1.3634093e+00   1.4858469e+00   8.1757693e-01   5.2201750e-01   9.1427000e-01   1.3523380e+00   6.0201716e-01   3.4342562e-01   7.1629168e-01   7.0096708e-01   6.0948212e-01   3.0490481e-01   7.0096708e-01   9.1424701e-01   1.4267554e+00   4.0127250e-01   4.1420960e-01   4.8342635e-01   1.2049539e+00   6.0964891e-01   1.1269424e-01   7.1621613e-01   4.1212852e-01   6.0018299e-01   5.3914287e-01   7.0548283e-01   5.2524663e-01   7.0105084e-01   5.0476836e-01   5.6371422e-01   3.0474106e-01   5.2491734e-01   6.0948212e-01   1.2000065e+00   2.0187441e+00   1.0498228e+00   2.2315584e+00   1.0000152e+00   1.8808952e+00   1.1286798e+00   7.5826453e-01   1.9821126e+00   1.9334872e+00   1.4094023e+00   9.7944085e-01   1.0090312e+00   3.0915245e-01   1.4094022e+00   1.7226330e+00   1.6785116e+00   8.2418071e-01   1.4544336e+00   1.4183053e+00   2.0448570e+00   1.3189240e+00   1.1989547e+00   1.6071563e+00   2.0162299e+00   9.7599312e-01   1.1287691e+00   1.5299044e+00   1.8304280e+00   1.5694554e+00   1.5966664e+00   1.9334872e+00   2.0448570e+00   1.2223853e+00   2.3135239e+00   3.0915245e-01   2.0415522e+00   1.2703001e+00   9.2351241e-01   2.1487275e+00   2.0854181e+00   1.4650300e+00   1.1157320e+00   8.0296037e-01   1.2013591e+00   1.4650276e+00   1.8684939e+00   1.6818191e+00   8.0245746e-01   1.5324961e+00   1.5271597e+00   2.1953922e+00   1.5071120e+00   1.3457716e+00   1.8029854e+00   2.0885102e+00   1.0837575e+00   1.2703001e+00   1.7133162e+00   1.9522053e+00   1.7369702e+00   1.6941963e+00   2.0286286e+00   1.1213597e+00   6.3912943e-01   1.9163315e+00   5.0855778e-01   1.1310337e+00   1.3142952e+00   6.0219099e-01   8.0046764e-01   7.2904264e-01   1.2366099e+00   1.4559030e+00   2.0467625e+00   7.7882758e-01   6.0184622e-01   6.0948800e-01   1.7296063e+00   1.2395260e+00   9.0668287e-01   8.0051036e-01   1.0231745e+00   1.0411548e+00   1.0543951e+00   5.2167829e-01   1.1356187e+00   1.2079042e+00   9.3439622e-01   4.2268438e-01   8.1719606e-01   1.2192978e+00   8.0046764e-01   1.3133662e+00   1.0434746e+00   8.3916809e-01   2.2573593e-01   5.0855077e-01   9.3848935e-01   9.0659977e-01   5.2167829e-01   7.0096858e-01   5.5450500e-01   1.0287902e+00   5.2133802e-01   8.4725834e-01   9.7599312e-01   8.0250123e-01   6.0018299e-01   5.6371422e-01   1.0171340e+00   3.0482299e-01   2.0181667e-01   6.0000317e-01   1.1079931e+00   2.0061436e-01   2.2573593e-01   5.0084481e-01   8.1683095e-01   5.2524663e-01   7.0096708e-01   1.0116865e+00   2.2278855e+00   7.0008584e-01   1.1298552e+00   1.6300950e+00   6.0017982e-01   5.0084481e-01   8.5403428e-01   1.6097507e+00   1.8284464e+00   2.3350903e+00   8.5406616e-01   7.1629168e-01   7.5508853e-01   2.1138813e+00   8.1683095e-01   8.2462252e-01   4.0246123e-01   1.3009222e+00   1.1139906e+00   1.3000951e+00   2.2608083e-01   1.2636227e+00   1.1315710e+00   1.1002119e+00   7.0088627e-01   9.0029018e-01   6.9600743e-01   3.1328089e-01   1.8661354e+00   1.1286798e+00   7.2044167e-01   1.9755679e+00   1.9314520e+00   1.3741466e+00   9.0645118e-01   6.0184622e-01   1.0001751e+00   1.3741498e+00   1.7083042e+00   1.6308665e+00   6.0201716e-01   1.4559030e+00   1.4140789e+00   2.0433026e+00   1.3131370e+00   1.1900969e+00   1.6049479e+00   2.0057464e+00   9.6691372e-01   1.1304042e+00   1.5237285e+00   1.7843627e+00   1.5639785e+00   1.5975352e+00   1.9314520e+00   8.2671175e-01   1.1543257e+00   1.2085435e-01   3.0474106e-01   7.0088627e-01   1.0144117e+00   1.3018103e+00   1.7709153e+00   7.0462844e-01   3.0482299e-01   7.0470867e-01   1.4857440e+00   8.1457587e-01   6.0948506e-01   3.3813251e-01   6.4049114e-01   7.4954884e-01   6.3925756e-01   5.0043084e-01   1.0090834e+00   8.7372177e-01   5.2838320e-01   2.0121983e-01   3.4342562e-01   7.3084171e-01   4.1315633e-01   5.0855778e-01   9.1024401e-01   8.2498722e-01   5.0436965e-01   5.6595908e-01   7.2044167e-01   1.2101609e+00   5.0437695e-01   6.9987517e-01   8.1457660e-01   1.0010209e+00   4.1212852e-01   3.4342562e-01   9.3351278e-01   3.0915245e-01   3.0482299e-01   6.0052920e-01   9.2747919e-01   2.2608083e-01   4.0000000e-01   5.0476836e-01   8.5586571e-01   5.0477564e-01   5.0477564e-01   8.2498722e-01   1.2635707e+00   1.2396421e+00   8.0533198e-01   2.4170870e-01   4.0127250e-01   7.4618926e-01   8.0726668e-01   1.0151880e+00   1.1066159e+00   5.6371422e-01   9.1554656e-01   8.0879701e-01   1.3523345e+00   6.0366256e-01   6.3912943e-01   9.0532093e-01   1.4186217e+00   5.2133179e-01   7.1700909e-01   8.1719606e-01   1.0923439e+00   8.5409862e-01   1.0116865e+00   1.3253496e+00   2.0121983e-01   8.0051036e-01   1.1269596e+00   1.4140457e+00   1.8721285e+00   8.0250123e-01   3.3813251e-01   8.0250202e-01   1.5969056e+00   8.4536936e-01   7.0096708e-01   2.2538848e-01   7.4395693e-01   8.3222261e-01   7.1779518e-01   4.1212852e-01   1.1079931e+00   9.4151244e-01   5.7630313e-01   3.0490481e-01   4.1420960e-01   6.9509395e-01   3.4080442e-01   7.0184453e-01   1.1527745e+00   1.4140486e+00   1.8971345e+00   7.0556260e-01   3.1328089e-01   7.0910969e-01   1.6486410e+00   7.4618926e-01   6.0184622e-01   1.1269424e-01   8.0923926e-01   7.7598796e-01   8.0883916e-01   3.4085233e-01   1.0235254e+00   8.7240114e-01   6.3309012e-01   5.0043842e-01   4.1315633e-01   5.7609230e-01   2.2538848e-01   8.0888055e-01   1.0030868e+00   1.5299044e+00   1.0000000e-01   6.3164977e-01   7.0096708e-01   1.3008855e+00   6.0184622e-01   3.3813251e-01   8.0296037e-01   5.0476836e-01   3.6256305e-01   5.6618864e-01   6.3178782e-01   4.5847767e-01   5.2491734e-01   4.1420960e-01   6.0202028e-01   4.0127250e-01   6.0052920e-01   5.6618864e-01   3.4342562e-01   8.7372177e-01   8.2425704e-01   9.3308891e-01   1.1005554e+00   7.1700774e-01   9.6674360e-01   8.0051115e-01   1.2632995e+00   5.2491734e-01   8.0883916e-01   7.8935898e-01   1.4043632e+00   7.0470867e-01   9.0532093e-01   7.5502989e-01   9.6953662e-01   7.4612718e-01   1.0222576e+00   1.3061180e+00   1.0032296e+00   1.0032293e+00   1.1900276e+00   1.3017553e+00   4.1315633e-01   1.1093621e+00   1.0088783e+00   1.5263498e+00   7.1708289e-01   7.3155911e-01   1.0040629e+00   1.6175925e+00   6.1845783e-01   7.5826453e-01   9.3426769e-01   1.2396937e+00   1.0142626e+00   1.2128138e+00   1.5237074e+00   1.5299064e+00   1.6885111e+00   1.8315348e+00   8.0097499e-01   1.6050900e+00   1.5160591e+00   2.0074169e+00   1.1389082e+00   1.2275665e+00   1.3502668e+00   2.1298991e+00   1.1075720e+00   1.2113964e+00   1.3632538e+00   1.7639471e+00   1.4922544e+00   1.7133283e+00   2.0290146e+00   7.1621748e-01   8.0051036e-01   1.3008813e+00   6.0017982e-01   4.1210927e-01   8.0492246e-01   5.0477564e-01   3.4080442e-01   5.6595908e-01   6.3309258e-01   4.5784410e-01   5.0855778e-01   4.1317535e-01   6.0366256e-01   4.0246123e-01   6.0035621e-01   5.7630313e-01   5.0085236e-01   1.4407390e+00   9.1892413e-01   4.2270142e-01   3.6452132e-01   6.7824250e-01   9.0668287e-01   8.2458409e-01   5.2133179e-01   9.0792879e-01   1.0124729e+00   8.0250202e-01   4.1210927e-01   5.0085236e-01   8.2458478e-01   4.1315633e-01   1.6100639e+00   1.0426636e+00   5.2491734e-01   8.0488008e-01   8.6054545e-01   1.0116721e+00   9.7291273e-01   5.6595908e-01   9.4532171e-01   1.1186499e+00   9.1681464e-01   6.3178782e-01   6.2656178e-01   9.6574369e-01   5.3943256e-01   1.4007831e+00   1.3033860e+00   1.7572550e+00   8.5406674e-01   1.0030724e+00   1.0426513e+00   1.9078843e+00   9.0005048e-01   1.0010209e+00   1.0776188e+00   1.4549432e+00   1.2363278e+00   1.5032156e+00   1.8103044e+00   6.0184934e-01   8.2671175e-01   6.0383105e-01   4.1209001e-01   6.3309258e-01   7.4418186e-01   5.0477564e-01   4.0006662e-01   4.8342635e-01   9.1892413e-01   4.8391482e-01   2.0121983e-01   6.4620889e-01   7.0462697e-01   5.0436965e-01   6.0184622e-01   5.7608844e-01   6.1830764e-01   5.3914287e-01   7.0105084e-01   5.0855778e-01   6.3165225e-01   3.0490481e-01   5.0477564e-01   5.2133179e-01   9.1446938e-01   8.7209348e-01   9.0532093e-01   3.4085233e-01   1.1298636e+00   9.6150595e-01   7.2036819e-01   5.0477564e-01   5.2167208e-01   6.3925756e-01   3.0008832e-01   3.0915245e-01   3.0474106e-01   1.1006468e+00   5.0043842e-01   4.1420960e-01   2.4195741e-01   6.8170466e-01   4.0127250e-01   7.0096708e-01   1.0003198e+00   5.0043084e-01   9.1132198e-01   3.0026460e-01   2.0121983e-01   4.0004442e-01   6.9987517e-01   4.5148429e-01   5.0477564e-01   8.3183672e-01   1.1010711e+00   8.0000160e-01   6.0052920e-01   2.0121983e-01   6.8161057e-01   4.1212852e-01   7.0176121e-01   1.0030721e+00   1.0458540e+00   9.3984267e-01   9.0166476e-01   5.0043084e-01   7.0088627e-01   7.0993998e-01   3.0017653e-01   2.2608083e-01   7.0008584e-01   9.3848935e-01   7.0184453e-01   6.3178534e-01   9.6936870e-01   5.0476836e-01   8.7372177e-01   5.6618864e-01   5.0477564e-01   8.7240114e-01   5.3943256e-01   3.0474106e-01   5.2167208e-01   8.0879701e-01   5.0085236e-01   9.0279268e-01   5.2133802e-01   4.2362917e-01   6.0017982e-01   5.2838320e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt
new file mode 100644
index 00000000..3e2759df
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt
@@ -0,0 +1 @@
+   1.1781739e+00   8.4573383e-01   1.1040164e+00   2.6033464e-01   1.0391769e+00   6.5951091e-01   2.6643250e-01   1.6215602e+00   9.6424206e-01   5.8926015e-01   4.4417668e-01   1.2158053e+00   1.5196051e+00   1.4342986e+00   2.2147971e+00   1.0267382e+00   1.3103399e-01   1.0246015e+00   7.0646671e-01   4.6190224e-01   5.3352899e-01   6.8496652e-01   6.2944414e-01   5.1453676e-01   1.1649855e+00   3.8639663e-01   1.3340137e-01   2.6033464e-01   8.5141186e-01   9.9757114e-01   5.0629646e-01   1.3963590e+00   1.6851313e+00   9.6424206e-01   7.1143905e-01   4.8636669e-01   9.6424206e-01   1.4309353e+00   2.3749211e-01   1.8699153e-01   2.8644037e+00   1.0938603e+00   5.4968254e-01   7.9227302e-01   1.2158053e+00   7.0111465e-01   9.1831777e-01   5.2374483e-01   4.7680727e-01   3.4225655e+00   2.9886388e+00   3.5231786e+00   3.4844976e+00   3.4140450e+00   2.8802393e+00   3.0292175e+00   2.9585178e+00   3.2500773e+00   2.8104851e+00   3.8076036e+00   2.7718515e+00   3.6661618e+00   3.0567513e+00   2.4313960e+00   3.1540282e+00   2.7718124e+00   2.7494216e+00   4.0917474e+00   3.0136333e+00   3.0855821e+00   2.8833448e+00   3.7756717e+00   3.0462644e+00   3.0262994e+00   3.1582448e+00   3.6064873e+00   3.6179250e+00   3.0140884e+00   2.7108793e+00   3.1480683e+00   3.0769261e+00   2.8006021e+00   3.5140011e+00   2.7293962e+00   2.7724816e+00   3.3142477e+00   3.8377035e+00   2.4725633e+00   3.1307104e+00   3.0248446e+00   2.9240118e+00   2.9852014e+00   3.1515797e+00   2.8921724e+00   2.4678110e+00   2.6524994e+00   2.9083443e+00   2.7444686e+00   2.7478281e+00   4.2652803e+00   3.6712837e+00   4.4571526e+00   3.7518856e+00   4.1563042e+00   5.0327536e+00   3.5110505e+00   4.5914343e+00   4.4347154e+00   4.7605836e+00   3.6465897e+00   3.9644198e+00   4.1403419e+00   3.9458900e+00   4.0035735e+00   3.9244083e+00   3.7394250e+00   5.1213512e+00   5.6085412e+00   4.1515197e+00   4.3260896e+00   3.5311267e+00   5.2010517e+00   3.7194928e+00   4.0104626e+00   4.2547108e+00   3.5326627e+00   3.3344440e+00   4.1152831e+00   4.1647603e+00   4.7306329e+00   5.0503286e+00   4.1958529e+00   3.4649010e+00   3.7290073e+00   5.0848756e+00   4.0161825e+00   3.6208873e+00   3.2588014e+00   4.1126592e+00   4.3082433e+00   4.1887411e+00   3.6712837e+00   4.3324309e+00   4.3552675e+00   4.1561376e+00   4.0674499e+00   3.7933592e+00   3.8117183e+00   3.3250640e+00   5.2374483e-01   4.3319335e-01   1.3890415e+00   2.1841707e+00   9.9973471e-01   9.3211669e-01   6.4636269e-01   2.7124234e-01   1.7245677e+00   9.3727156e-01   1.7819563e-01   7.5570839e-01   2.5520912e+00   3.3809114e+00   2.1782802e+00   1.1854382e+00   2.0937104e+00   1.8662528e+00   1.1155937e+00   1.6542533e+00   1.4482752e+00   8.4881492e-01   9.7259094e-01   1.6562722e-01   9.7322023e-01   1.2100516e+00   9.9111027e-01   5.3286499e-01   2.8394141e-01   1.1346946e+00   2.5666454e+00   2.8608436e+00   2.7124234e-01   4.9009568e-01   1.3630799e+00   2.7124234e-01   6.0647055e-01   9.5529726e-01   1.1682143e+00   1.6911681e+00   7.6195008e-01   1.2774622e+00   1.9003949e+00   1.7819563e-01   1.8642334e+00   5.8652824e-01   1.6860841e+00   7.0235100e-01   3.5517185e+00   3.0793995e+00   3.5669733e+00   2.7166736e+00   3.1838913e+00   2.5120824e+00   3.1938169e+00   2.0428688e+00   3.1039816e+00   2.2561090e+00   2.8016158e+00   2.6226738e+00   2.9050141e+00   2.8502197e+00   2.0976260e+00   3.1846091e+00   2.5890531e+00   2.2584354e+00   3.4434621e+00   2.3329638e+00   3.1272801e+00   2.5616006e+00   3.3203580e+00   2.7436923e+00   2.8484236e+00   3.0948526e+00   3.4151452e+00   3.5708989e+00   2.7939968e+00   2.0736054e+00   2.3834491e+00   2.2886612e+00   2.3204706e+00   3.1632397e+00   2.5205525e+00   3.0112889e+00   3.3433635e+00   3.2300528e+00   2.2657938e+00   2.4705763e+00   2.4462190e+00   2.8038852e+00   2.4332562e+00   2.2089299e+00   2.4060762e+00   2.2734727e+00   2.3627180e+00   2.7012637e+00   1.8976741e+00   2.3590971e+00   4.3837112e+00   3.3195686e+00   4.4453895e+00   3.6018522e+00   4.1012355e+00   5.0512932e+00   2.8774753e+00   4.5344585e+00   4.0827835e+00   5.0801762e+00   3.7291677e+00   3.6888814e+00   4.1064223e+00   3.4625304e+00   3.7552252e+00   3.9939605e+00   3.6781201e+00   5.5433523e+00   5.4381439e+00   3.4976392e+00   4.4223824e+00   3.2288234e+00   5.1217596e+00   3.4157742e+00   4.1643077e+00   4.3726405e+00   3.2842292e+00   3.2296198e+00   3.9190152e+00   4.1591876e+00   4.6244312e+00   5.4884429e+00   4.0035368e+00   3.2202996e+00   3.3301365e+00   5.1089381e+00   4.2054648e+00   3.6234874e+00   3.1421933e+00   4.1502382e+00   4.3306814e+00   4.2256435e+00   3.3195686e+00   4.4219948e+00   4.4973329e+00   4.1152664e+00   3.6487297e+00   3.7329401e+00   4.0033827e+00   3.2017663e+00   2.8394141e-01   9.9272943e-01   1.8549949e+00   4.9772204e-01   5.9738093e-01   7.8305765e-01   3.7622328e-01   1.4342986e+00   5.0621589e-01   4.9772204e-01   6.9001472e-01   2.2742100e+00   3.0330374e+00   1.8410898e+00   8.5582452e-01   1.8552074e+00   1.4758765e+00   9.8932340e-01   1.2824303e+00   9.4580058e-01   7.0175090e-01   5.8564715e-01   6.1067563e-01   6.6453319e-01   9.2528705e-01   7.6195008e-01   1.7002750e-01   3.1093967e-01   1.0044375e+00   2.1686473e+00   2.5011215e+00   3.7622328e-01   3.6669623e-01   1.1883075e+00   3.7622328e-01   5.8652824e-01   6.7745878e-01   7.9191984e-01   2.0937821e+00   3.6228991e-01   9.5582164e-01   1.5272557e+00   4.9772204e-01   1.4755007e+00   1.3340137e-01   1.3666101e+00   4.3319335e-01   3.7283414e+00   3.2257816e+00   3.7651559e+00   3.1082143e+00   3.4606263e+00   2.7705999e+00   3.2962379e+00   2.4179023e+00   3.3643791e+00   2.5175848e+00   3.2317517e+00   2.8135312e+00   3.3502574e+00   3.0859108e+00   2.3316915e+00   3.3832002e+00   2.7540885e+00   2.5906747e+00   3.8459512e+00   2.7110494e+00   3.2296198e+00   2.8510843e+00   3.6612067e+00   3.0231940e+00   3.1083626e+00   3.3221751e+00   3.6999785e+00   3.7824508e+00   3.0223050e+00   2.4549511e+00   2.7813487e+00   2.6993734e+00   2.6424988e+00   3.4348309e+00   2.6680185e+00   3.0548262e+00   3.5357687e+00   3.6340473e+00   2.4474337e+00   2.8211532e+00   2.7662396e+00   3.0069386e+00   2.7817508e+00   2.6121651e+00   2.7000040e+00   2.4677010e+00   2.5915377e+00   2.9544131e+00   2.2712863e+00   2.6277952e+00   4.4682388e+00   3.5629824e+00   4.6484688e+00   3.8140427e+00   4.2790737e+00   5.2629823e+00   3.1332305e+00   4.7710813e+00   4.3977196e+00   5.1429155e+00   3.8634878e+00   3.9555042e+00   4.3021827e+00   3.7450218e+00   3.9451569e+00   4.1141318e+00   3.8729361e+00   5.5923916e+00   5.7171219e+00   3.8836634e+00   4.5660923e+00   3.4290419e+00   5.3764415e+00   3.6907517e+00   4.2782894e+00   4.5393827e+00   3.5302656e+00   3.4102235e+00   4.1476932e+00   4.3814983e+00   4.8831870e+00   5.5467550e+00   4.2276454e+00   3.4820326e+00   3.6311162e+00   5.3207972e+00   4.2656428e+00   3.7854499e+00   3.3178004e+00   4.3254709e+00   4.4873379e+00   4.3956810e+00   3.5629824e+00   4.5607328e+00   4.6030755e+00   4.3016138e+00   3.9622355e+00   3.9225802e+00   4.0577906e+00   3.3684813e+00   1.2515236e+00   2.1021589e+00   7.0646671e-01   8.4383266e-01   5.2374483e-01   3.8525820e-01   1.6876653e+00   7.3502408e-01   3.6319073e-01   5.0299964e-01   2.5372015e+00   3.2897546e+00   2.1021589e+00   1.1117653e+00   2.0978519e+00   1.7286097e+00   1.1937015e+00   1.5323598e+00   1.1874605e+00   8.6297946e-01   7.6710016e-01   5.3827772e-01   8.8540687e-01   1.1730565e+00   1.0034646e+00   2.6643250e-01   2.4808718e-01   1.2168625e+00   2.4209958e+00   2.7605308e+00   3.8525820e-01   5.6164055e-01   1.4300979e+00   3.8525820e-01   3.5266705e-01   9.1831777e-01   1.0556536e+00   1.8570904e+00   3.5266705e-01   1.1671832e+00   1.7581227e+00   3.6319073e-01   1.7245677e+00   2.3749211e-01   1.6215602e+00   6.7030885e-01   3.7702988e+00   3.2513048e+00   3.7854692e+00   2.9445918e+00   3.4252075e+00   2.6854877e+00   3.3289662e+00   2.2084365e+00   3.3482393e+00   2.3872006e+00   3.0088381e+00   2.7858967e+00   3.2051999e+00   3.0423378e+00   2.2727200e+00   3.4066596e+00   2.7026316e+00   2.4942728e+00   3.7194777e+00   2.5718070e+00   3.2266664e+00   2.8009311e+00   3.5699473e+00   2.9607930e+00   3.0876835e+00   3.3257458e+00   3.6752903e+00   3.7792524e+00   2.9772187e+00   2.3405405e+00   2.6225184e+00   2.5379456e+00   2.5530959e+00   3.3522700e+00   2.6036900e+00   3.0973166e+00   3.5528019e+00   3.5210610e+00   2.4001125e+00   2.6797931e+00   2.6323855e+00   2.9822614e+00   2.6747723e+00   2.4035668e+00   2.5939621e+00   2.4241443e+00   2.5291529e+00   2.9226858e+00   2.0959349e+00   2.5480036e+00   4.4738080e+00   3.4750769e+00   4.6459778e+00   3.7712854e+00   4.2573656e+00   5.2660922e+00   2.9665251e+00   4.7582165e+00   4.3221591e+00   5.2027091e+00   3.8786505e+00   3.8957203e+00   4.2952898e+00   3.6300721e+00   3.8796854e+00   4.1217239e+00   3.8539421e+00   5.6722969e+00   5.6818418e+00   3.7421166e+00   4.5832488e+00   3.3486395e+00   5.3611963e+00   3.6296692e+00   4.3021827e+00   4.5620086e+00   3.4793229e+00   3.3827920e+00   4.0990020e+00   4.3836503e+00   4.8653269e+00   5.6359100e+00   4.1798856e+00   3.4290065e+00   3.5331571e+00   5.3326389e+00   4.2899049e+00   3.7762521e+00   3.2871170e+00   4.3357622e+00   4.4879065e+00   4.4101806e+00   3.4750769e+00   4.5719131e+00   4.6252914e+00   4.2958117e+00   3.8764095e+00   3.9087616e+00   4.0828629e+00   3.3281063e+00   8.9980139e-01   6.8064066e-01   4.6472955e-01   1.7695601e+00   1.1682143e+00   5.3827772e-01   5.3286499e-01   1.4108003e+00   1.6357068e+00   1.3406177e+00   2.0471148e+00   8.8540687e-01   2.9145160e-01   9.8663349e-01   4.9772204e-01   6.8921053e-01   3.7371902e-01   5.3360548e-01   8.2263932e-01   5.9279023e-01   1.3884168e+00   5.4248468e-01   3.3872939e-01   5.2066928e-01   9.9757114e-01   1.1836141e+00   7.1971771e-01   1.1867923e+00   1.5097838e+00   1.1682143e+00   9.2945909e-01   6.4884272e-01   1.1682143e+00   1.5630357e+00   4.8016385e-01   2.7124234e-01   3.0617227e+00   1.1744248e+00   5.8374436e-01   6.1345624e-01   1.4108003e+00   4.9009568e-01   1.0413386e+00   4.3319335e-01   6.9189100e-01   3.5573943e+00   3.1141702e+00   3.6648465e+00   3.6881887e+00   3.5883824e+00   3.0468381e+00   3.1315658e+00   3.1515797e+00   3.4214871e+00   2.9743594e+00   4.0164863e+00   2.9182492e+00   3.8928105e+00   3.2158144e+00   2.6006889e+00   3.3027067e+00   2.9031809e+00   2.9465762e+00   4.3027855e+00   3.2186029e+00   3.1845052e+00   3.0688421e+00   3.9670252e+00   3.2223969e+00   3.2005819e+00   3.3183883e+00   3.7835219e+00   3.7624114e+00   3.1706932e+00   2.9238797e+00   3.3563322e+00   3.2896970e+00   2.9943889e+00   3.6782511e+00   2.8525048e+00   2.8501434e+00   3.4560405e+00   4.0524463e+00   2.6189855e+00   3.3240937e+00   3.2080454e+00   3.0726532e+00   3.1844624e+00   3.3537486e+00   3.0707196e+00   2.6200714e+00   2.8136838e+00   3.0798325e+00   2.9434145e+00   2.9219863e+00   4.3385668e+00   3.8211670e+00   4.5879449e+00   3.8900790e+00   4.2758494e+00   5.1630878e+00   3.6606996e+00   4.7359279e+00   4.6113949e+00   4.8204842e+00   3.7540483e+00   4.1248784e+00   4.2705921e+00   4.1081181e+00   4.1285849e+00   4.0208302e+00   3.8718630e+00   5.1706118e+00   5.7653526e+00   4.3529699e+00   4.4302351e+00   3.6643052e+00   5.3499285e+00   3.8863166e+00   4.1025634e+00   4.3705834e+00   3.6895783e+00   3.4655259e+00   4.2576017e+00   4.3078329e+00   4.8848930e+00   5.1059908e+00   4.3355275e+00   3.6287723e+00   3.9015858e+00   5.2167033e+00   4.0809175e+00   3.7394250e+00   3.3885059e+00   4.2346521e+00   4.4182506e+00   4.3085803e+00   3.8211670e+00   4.4331391e+00   4.4402220e+00   4.2825037e+00   4.2456731e+00   3.9239772e+00   3.8761056e+00   3.4480528e+00   1.5196051e+00   1.2824303e+00   2.6220224e+00   1.9839744e+00   5.4248468e-01   1.3880441e+00   2.2398377e+00   2.5185753e+00   6.5993495e-01   1.2140269e+00   2.2670334e-01   1.0140902e+00   4.4901474e-01   4.6310132e-01   1.1825559e+00   5.9738093e-01   1.2799022e+00   1.4364110e+00   1.3915110e+00   2.1479410e+00   1.2515236e+00   9.9544409e-01   1.2188859e+00   1.8419620e+00   2.0002725e+00   1.1587093e+00   6.6217390e-01   7.6869104e-01   1.9839744e+00   1.7287715e+00   9.9282597e-01   1.9839744e+00   2.4262873e+00   1.2419907e+00   1.0737552e+00   3.8557204e+00   2.0456732e+00   1.0753036e+00   4.4417668e-01   2.2089621e+00   5.0629646e-01   1.9071648e+00   5.5576380e-01   1.4985933e+00   3.3087308e+00   2.9428879e+00   3.4716469e+00   4.0891690e+00   3.6027276e+00   3.2367228e+00   2.9085289e+00   3.7111791e+00   3.3928277e+00   3.3150290e+00   4.5928107e+00   2.9594198e+00   4.2678298e+00   3.2621413e+00   2.8156205e+00   3.1507918e+00   2.9937665e+00   3.2188610e+00   4.5717893e+00   3.5888229e+00   3.0697068e+00   3.2000980e+00   4.1198780e+00   3.3377308e+00   3.2155231e+00   3.2352945e+00   3.7547729e+00   3.6294383e+00   3.2310889e+00   3.2831808e+00   3.7736317e+00   3.7263104e+00   3.2475076e+00   3.7907966e+00   2.9840079e+00   2.6164035e+00   3.2920107e+00   4.3046992e+00   2.7582086e+00   3.6782988e+00   3.5276458e+00   3.0726914e+00   3.4670753e+00   3.9103065e+00   3.3340819e+00   2.7470239e+00   2.9746673e+00   3.1328218e+00   3.4555378e+00   3.1318122e+00   4.0752096e+00   3.9330937e+00   4.3762388e+00   3.8407425e+00   4.1274362e+00   4.9032073e+00   4.0261574e+00   4.5547766e+00   4.6534818e+00   4.3582690e+00   3.5326627e+00   4.1405270e+00   4.0947877e+00   4.2953569e+00   4.1533819e+00   3.7981559e+00   3.7518931e+00   4.6218430e+00   5.6203182e+00   4.6338556e+00   4.1503558e+00   3.7655153e+00   5.1552615e+00   3.9363994e+00   3.8053980e+00   4.0787299e+00   3.7177375e+00   3.4172455e+00   4.2121483e+00   4.1116484e+00   4.7277367e+00   4.5452377e+00   4.2828893e+00   3.6617103e+00   4.0381240e+00   4.9437127e+00   3.7768622e+00   3.5869495e+00   3.3594066e+00   4.0056296e+00   4.1979142e+00   4.0739556e+00   3.9330937e+00   4.1628496e+00   4.1341118e+00   4.1117268e+00   4.3552101e+00   3.7951858e+00   3.5859295e+00   3.4280549e+00   5.0370871e-01   1.1854382e+00   8.2574748e-01   1.1968529e+00   2.9724335e-01   9.8896933e-01   1.0391769e+00   2.0112023e+00   2.6653431e+00   1.5111262e+00   6.4636269e-01   1.6262201e+00   1.1040164e+00   9.8966705e-01   9.2934901e-01   5.3040146e-01   7.1789533e-01   3.9472619e-01   1.0556536e+00   5.1318506e-01   7.7368489e-01   7.3633268e-01   5.0731024e-01   7.5303835e-01   9.7659801e-01   1.7897583e+00   2.1453760e+00   8.2574748e-01   6.9001472e-01   1.1202045e+00   8.2574748e-01   9.6424206e-01   6.2046469e-01   5.3827772e-01   2.5404386e+00   5.3988754e-01   6.7372733e-01   1.1459117e+00   9.5361455e-01   1.1160907e+00   4.7951153e-01   1.1016806e+00   5.5109043e-01   3.7667766e+00   3.2399456e+00   3.8211099e+00   3.3920086e+00   3.5974024e+00   2.9126202e+00   3.2661365e+00   2.7296888e+00   3.4884812e+00   2.6863536e+00   3.5939578e+00   2.8820992e+00   3.6783922e+00   3.1916608e+00   2.4616677e+00   3.4465420e+00   2.8051321e+00   2.8088029e+00   4.1173049e+00   2.9788024e+00   3.2021702e+00   3.0140681e+00   3.8639979e+00   3.1756883e+00   3.2362495e+00   3.4136564e+00   3.8424217e+00   3.8484723e+00   3.1221021e+00   2.7251978e+00   3.0739866e+00   3.0068046e+00   2.8468838e+00   3.5726594e+00   2.7099355e+00   2.9743925e+00   3.5889631e+00   3.9062347e+00   2.5235038e+00   3.0623697e+00   2.9777787e+00   3.0820765e+00   3.0110500e+00   2.9445347e+00   2.8809757e+00   2.5543631e+00   2.7073441e+00   3.0792230e+00   2.5679169e+00   2.7817508e+00   4.4017095e+00   3.6741422e+00   4.6939918e+00   3.8825162e+00   4.3049794e+00   5.3127345e+00   3.3002804e+00   4.8514877e+00   4.5630927e+00   5.0475012e+00   3.8518880e+00   4.0758615e+00   4.3442223e+00   3.8984747e+00   3.9980344e+00   4.0855291e+00   3.9215612e+00   5.4851910e+00   5.8312870e+00   4.1416475e+00   4.5535489e+00   3.5028870e+00   5.4694510e+00   3.8234999e+00   4.2411026e+00   4.5531893e+00   3.6365882e+00   3.4540568e+00   4.2272070e+00   4.4531017e+00   4.9839411e+00   5.4520873e+00   4.3016933e+00   3.6054769e+00   3.7985950e+00   5.3693255e+00   4.1776636e+00   3.8035129e+00   3.3594554e+00   4.3469553e+00   4.4886880e+00   4.4112274e+00   3.6741422e+00   4.5435532e+00   4.5534752e+00   4.3346048e+00   4.1329859e+00   3.9643709e+00   3.9674740e+00   3.4024060e+00   1.3630799e+00   7.1446962e-01   8.4383266e-01   2.4808718e-01   9.6424206e-01   1.2783641e+00   1.6962086e+00   2.4702874e+00   1.2824303e+00   2.9691107e-01   1.2631980e+00   9.3957399e-01   4.9617437e-01   7.4965096e-01   7.2553812e-01   4.8492463e-01   3.3125444e-01   9.2426065e-01   2.6812643e-01   3.3395426e-01   2.4808718e-01   5.8926015e-01   7.3502408e-01   5.4956349e-01   1.6376300e+00   1.9421609e+00   7.1446962e-01   4.9160020e-01   6.5622658e-01   7.1446962e-01   1.1785203e+00   1.2076330e-01   2.8845946e-01   2.6135503e+00   8.6638670e-01   5.7543116e-01   9.9282597e-01   9.6424206e-01   9.3211669e-01   6.7030885e-01   7.8100392e-01   2.3749211e-01   3.4362741e+00   2.9772187e+00   3.5154539e+00   3.2993605e+00   3.3443673e+00   2.7564382e+00   3.0285957e+00   2.7337208e+00   3.1980682e+00   2.6433553e+00   3.5789723e+00   2.6973512e+00   3.4963203e+00   2.9759207e+00   2.3127671e+00   3.1412273e+00   2.6774449e+00   2.6095931e+00   3.9436181e+00   2.8415480e+00   3.0475523e+00   2.7865107e+00   3.6589666e+00   2.9471549e+00   2.9637920e+00   3.1238401e+00   3.5511257e+00   3.5866237e+00   2.9292978e+00   2.5500042e+00   2.9620296e+00   2.8874183e+00   2.6658729e+00   3.4048426e+00   2.6224103e+00   2.7775195e+00   3.2991484e+00   3.6986035e+00   2.3717154e+00   2.9594198e+00   2.8613259e+00   2.8592001e+00   2.8393888e+00   2.9284199e+00   2.7478281e+00   2.3715604e+00   2.5423572e+00   2.8329679e+00   2.5370255e+00   2.6226761e+00   4.2550363e+00   3.5587552e+00   4.4384175e+00   3.6863990e+00   4.1157774e+00   5.0262130e+00   3.3282378e+00   4.5651798e+00   4.3425674e+00   4.8115590e+00   3.6359466e+00   3.8813909e+00   4.1126592e+00   3.8106374e+00   3.9142555e+00   3.9091502e+00   3.6969354e+00   5.1996380e+00   5.5654572e+00   3.9942942e+00   4.3261607e+00   3.4228883e+00   5.1764012e+00   3.6303894e+00   4.0165247e+00   4.2627936e+00   3.4508614e+00   3.2748167e+00   4.0461381e+00   4.1489951e+00   4.6983104e+00   5.1372583e+00   4.1280577e+00   3.3829235e+00   3.6112201e+00   5.0844328e+00   4.0217556e+00   3.5877667e+00   3.1942059e+00   4.1021303e+00   4.2899049e+00   4.1807095e+00   3.5587552e+00   4.3276502e+00   4.3608503e+00   4.1273623e+00   3.9585431e+00   3.7540483e+00   3.8154519e+00   3.2543470e+00   7.7313507e-01   2.2058495e+00   1.2553676e+00   5.5109043e-01   3.3742167e-01   3.0507869e+00   3.8084614e+00   2.6171176e+00   1.6268459e+00   2.6113513e+00   2.2457527e+00   1.6784057e+00   2.0471148e+00   1.6480463e+00   1.3225313e+00   1.2819528e+00   7.6880092e-01   1.3915110e+00   1.6886167e+00   1.5043671e+00   7.8918675e-01   6.7745878e-01   1.6911617e+00   2.9348176e+00   3.2792996e+00   7.7313507e-01   1.0082548e+00   1.9190366e+00   7.7313507e-01   2.3749211e-01   1.4309353e+00   1.5685186e+00   1.3963590e+00   6.9420840e-01   1.6514950e+00   2.2742046e+00   5.5109043e-01   2.2440749e+00   7.3283576e-01   2.1421205e+00   1.1730565e+00   4.0382971e+00   3.5072516e+00   4.0204750e+00   2.8157524e+00   3.5602797e+00   2.7716933e+00   3.6026548e+00   1.9881684e+00   3.5249607e+00   2.3719578e+00   2.7108793e+00   2.9588139e+00   3.1000099e+00   3.1914275e+00   2.3942229e+00   3.6456797e+00   2.8533918e+00   2.5518058e+00   3.6496659e+00   2.5198146e+00   3.4451131e+00   2.9183683e+00   3.5989425e+00   3.0794356e+00   3.2576813e+00   3.5320163e+00   3.8261152e+00   3.9741896e+00   3.1180182e+00   2.3364082e+00   2.5170134e+00   2.4274466e+00   2.6068690e+00   3.4218333e+00   2.7386417e+00   3.3934324e+00   3.7851452e+00   3.4854109e+00   2.5636830e+00   2.6200486e+00   2.6174942e+00   3.1669559e+00   2.6880360e+00   2.1675629e+00   2.6284424e+00   2.5986852e+00   2.6571682e+00   3.0828753e+00   1.9438939e+00   2.6338308e+00   4.6899445e+00   3.5257224e+00   4.8360835e+00   3.9149039e+00   4.4239486e+00   5.4654338e+00   2.8575769e+00   4.9368840e+00   4.3795070e+00   5.4971381e+00   4.1073887e+00   3.9867349e+00   4.4778796e+00   3.6113294e+00   3.9521234e+00   4.3324165e+00   4.0348180e+00   6.0067497e+00   5.8007868e+00   3.6612067e+00   4.8067419e+00   3.4133837e+00   5.5245725e+00   3.7158962e+00   4.5501061e+00   4.8067452e+00   3.5898573e+00   3.5494058e+00   4.2132256e+00   4.5903044e+00   5.0235775e+00   5.9805492e+00   4.2919572e+00   3.5520531e+00   3.5821954e+00   5.5319518e+00   4.5355231e+00   3.9801247e+00   3.4489678e+00   4.5459210e+00   4.6801759e+00   4.6148655e+00   3.5257224e+00   4.7911899e+00   4.8567489e+00   4.4697071e+00   3.9039516e+00   4.0848538e+00   4.3320055e+00   3.4824517e+00   1.5154593e+00   7.1671402e-01   2.6643250e-01   7.9347379e-01   2.3528246e+00   3.1744385e+00   1.9839744e+00   9.9059199e-01   1.9029496e+00   1.6532822e+00   9.3451915e-01   1.4586696e+00   1.2483935e+00   7.4743804e-01   7.4957404e-01   2.9691107e-01   8.0686941e-01   9.9973471e-01   7.9394533e-01   3.6319073e-01   1.8699153e-01   9.9891776e-01   2.3345854e+00   2.6422396e+00   0.0000000e+00   3.3742167e-01   1.1857824e+00   0.0000000e+00   6.6918102e-01   7.4445830e-01   9.7322023e-01   1.9284841e+00   6.6918102e-01   1.1393372e+00   1.6942803e+00   3.7371902e-01   1.6386105e+00   4.5257749e-01   1.4715172e+00   4.9772204e-01   3.5602797e+00   3.0968979e+00   3.5933352e+00   2.8998722e+00   3.2656298e+00   2.6029746e+00   3.1974447e+00   2.2445103e+00   3.1601923e+00   2.3946216e+00   3.0209665e+00   2.6867317e+00   3.0775658e+00   2.9161244e+00   2.1946278e+00   3.2137635e+00   2.6502891e+00   2.3652711e+00   3.6096140e+00   2.4893065e+00   3.1578003e+00   2.6568473e+00   3.4426472e+00   2.8187901e+00   2.9128857e+00   3.1418202e+00   3.4847097e+00   3.6205958e+00   2.8694312e+00   2.2223283e+00   2.5588203e+00   2.4651138e+00   2.4413293e+00   3.2621861e+00   2.5834128e+00   2.9995857e+00   3.3733791e+00   3.3817876e+00   2.3263008e+00   2.6305758e+00   2.5756071e+00   2.8533918e+00   2.5683063e+00   2.4187322e+00   2.5258216e+00   2.3250308e+00   2.4413618e+00   2.7691536e+00   2.1006933e+00   2.4608850e+00   4.4119896e+00   3.4290419e+00   4.4942656e+00   3.6650933e+00   4.1590662e+00   5.0899438e+00   3.0333619e+00   4.5799469e+00   4.1882413e+00   5.0726081e+00   3.7613721e+00   3.7859992e+00   4.1623715e+00   3.6029758e+00   3.8608065e+00   4.0352348e+00   3.7266849e+00   5.5043247e+00   5.5172732e+00   3.6569442e+00   4.4568115e+00   3.3324016e+00   5.1765224e+00   3.5192065e+00   4.1799642e+00   4.3857400e+00   3.3769078e+00   3.2906843e+00   4.0034548e+00   4.1917183e+00   4.6854597e+00   5.4444866e+00   4.0904298e+00   3.2962678e+00   3.4250783e+00   5.1569386e+00   4.2213314e+00   3.6582637e+00   3.2059261e+00   4.1937040e+00   4.3826532e+00   4.2786320e+00   3.4290419e+00   4.4549850e+00   4.5270304e+00   4.1816268e+00   3.7777251e+00   3.7924144e+00   4.0173731e+00   3.2613828e+00   1.0034646e+00   1.7753099e+00   2.1070188e+00   8.6079202e-01   1.6751898e+00   5.4248468e-01   6.0365341e-01   4.6310132e-01   4.4901474e-01   7.0111465e-01   4.4713936e-01   1.0328871e+00   1.0722301e+00   1.0271920e+00   1.6860841e+00   8.8540687e-01   5.2066928e-01   7.3502408e-01   1.4309353e+00   1.5630357e+00   7.3985997e-01   9.6257499e-01   1.1608422e+00   1.5154593e+00   1.2617482e+00   4.9009568e-01   1.5154593e+00   2.0192952e+00   7.8100392e-01   6.9001472e-01   3.4112480e+00   1.6736143e+00   8.5090098e-01   5.5183182e-01   1.7753099e+00   4.3319335e-01   1.5054343e+00   1.2076330e-01   1.0428797e+00   3.2901237e+00   2.9292978e+00   3.4367371e+00   3.8111737e+00   3.4729880e+00   3.0672734e+00   2.9473505e+00   3.3901879e+00   3.2662947e+00   3.1144880e+00   4.2413542e+00   2.8660589e+00   3.9495965e+00   3.1433256e+00   2.6375433e+00   3.0908568e+00   2.9081458e+00   2.9702968e+00   4.3236374e+00   3.3103937e+00   3.0964304e+00   3.0179755e+00   3.9313682e+00   3.1669001e+00   3.0754584e+00   3.1432906e+00   3.6245464e+00   3.5873526e+00   3.1179878e+00   2.9918256e+00   3.4776059e+00   3.4142800e+00   3.0198617e+00   3.6568155e+00   2.8980988e+00   2.6944324e+00   3.2512254e+00   4.0479095e+00   2.6293791e+00   3.4291613e+00   3.2968205e+00   2.9799791e+00   3.2239661e+00   3.5774656e+00   3.1299499e+00   2.6069579e+00   2.8203994e+00   2.9888842e+00   3.1470877e+00   2.9476507e+00   4.1975965e+00   3.8311128e+00   4.3861256e+00   3.7921747e+00   4.1446583e+00   4.9211801e+00   3.8442384e+00   4.5238497e+00   4.5231439e+00   4.5451210e+00   3.5805575e+00   4.0469570e+00   4.0990882e+00   4.1579560e+00   4.1249171e+00   3.8727781e+00   3.7290616e+00   4.8292468e+00   5.5757877e+00   4.3965263e+00   4.2248397e+00   3.6936498e+00   5.1256160e+00   3.8221804e+00   3.8961870e+00   4.1176453e+00   3.6242665e+00   3.3807801e+00   4.1671042e+00   4.0787299e+00   4.6798442e+00   4.7374542e+00   4.2466909e+00   3.5432140e+00   3.8759165e+00   4.9689016e+00   3.9204411e+00   3.5927937e+00   3.3203721e+00   4.0348754e+00   4.2531600e+00   4.1147391e+00   3.8311128e+00   4.2401451e+00   4.2502727e+00   4.1279956e+00   4.2116129e+00   3.7856899e+00   3.7242025e+00   3.3954918e+00   9.3865015e-01   1.1459117e+00   1.8505741e+00   2.5636327e+00   1.3972701e+00   4.6310132e-01   1.4327294e+00   1.0013399e+00   7.2679299e-01   8.2574748e-01   6.2187934e-01   5.8496636e-01   1.7002750e-01   9.5361455e-01   3.5639126e-01   5.3827772e-01   4.9617437e-01   4.7680727e-01   6.9189100e-01   7.7259801e-01   1.6911681e+00   2.0326426e+00   7.1671402e-01   5.6788283e-01   8.9258315e-01   7.1671402e-01   1.0551281e+00   3.6669623e-01   3.9699460e-01   2.5716465e+00   6.8921053e-01   6.2148529e-01   1.0391769e+00   9.3865015e-01   9.9111027e-01   5.3286499e-01   9.2006504e-01   3.5266705e-01   3.5819899e+00   3.0902007e+00   3.6482741e+00   3.3284223e+00   3.4528558e+00   2.8062636e+00   3.1283731e+00   2.7130802e+00   3.3201500e+00   2.6478976e+00   3.5696084e+00   2.7728704e+00   3.5649049e+00   3.0583917e+00   2.3718219e+00   3.2763163e+00   2.7180029e+00   2.6779045e+00   4.0150919e+00   2.8864805e+00   3.1083977e+00   2.8822332e+00   3.7402558e+00   3.0304088e+00   3.0793541e+00   3.2506893e+00   3.6756049e+00   3.7003058e+00   3.0054875e+00   2.6160903e+00   2.9965118e+00   2.9238797e+00   2.7351276e+00   3.4650507e+00   2.6418165e+00   2.8577585e+00   3.4252075e+00   3.7832878e+00   2.4227175e+00   2.9917855e+00   2.8903467e+00   2.9460322e+00   2.9034030e+00   2.9191699e+00   2.7908173e+00   2.4332562e+00   2.6000033e+00   2.9338362e+00   2.5416562e+00   2.6797931e+00   4.3169600e+00   3.6002350e+00   4.5501061e+00   3.7611228e+00   4.1952298e+00   5.1491201e+00   3.2996439e+00   4.6835622e+00   4.4311275e+00   4.9194005e+00   3.7316761e+00   3.9622355e+00   4.2152785e+00   3.8426553e+00   3.9520057e+00   3.9894324e+00   3.7877346e+00   5.3234168e+00   5.6801405e+00   4.0465336e+00   4.4289683e+00   3.4509848e+00   5.3007226e+00   3.7123045e+00   4.1128937e+00   4.3848871e+00   3.5295907e+00   3.3480190e+00   4.1214168e+00   4.2758432e+00   4.8208082e+00   5.2754050e+00   4.2018689e+00   3.4688327e+00   3.6716137e+00   5.2146461e+00   4.0903576e+00   3.6733277e+00   3.2612647e+00   4.2126998e+00   4.3809966e+00   4.2914999e+00   3.6002350e+00   4.4223824e+00   4.4497684e+00   4.2250047e+00   4.0330034e+00   3.8460049e+00   3.8818416e+00   3.3084835e+00   6.2729876e-01   2.6091054e+00   3.4299177e+00   2.2340939e+00   1.2368073e+00   2.1640372e+00   1.8992968e+00   1.1925354e+00   1.7015647e+00   1.4288989e+00   9.5582164e-01   9.7391954e-01   2.9724335e-01   1.0376697e+00   1.2583645e+00   1.0495503e+00   5.0731024e-01   2.8845946e-01   1.2384679e+00   2.5831347e+00   2.8967543e+00   2.6643250e-01   5.4873947e-01   1.4369223e+00   2.6643250e-01   5.0370871e-01   1.0013399e+00   1.2082987e+00   1.6761482e+00   6.8299624e-01   1.3528452e+00   1.9417181e+00   2.6206799e-01   1.8882412e+00   5.3690447e-01   1.7295385e+00   7.4445830e-01   3.6974389e+00   3.2246529e+00   3.7127916e+00   2.8221999e+00   3.3289662e+00   2.6369282e+00   3.3348648e+00   2.1165503e+00   3.2465431e+00   2.3709411e+00   2.8608899e+00   2.7655496e+00   3.0110500e+00   2.9862341e+00   2.2391292e+00   3.3332541e+00   2.7176350e+00   2.3810734e+00   3.5657791e+00   2.4469788e+00   3.2638546e+00   2.7057900e+00   3.4512743e+00   2.8728052e+00   2.9934131e+00   3.2431147e+00   3.5582624e+00   3.7179545e+00   2.9335017e+00   2.1999209e+00   2.4893065e+00   2.3915367e+00   2.4540260e+00   3.2923304e+00   2.6414379e+00   3.1466196e+00   3.4901671e+00   3.3542627e+00   2.3973915e+00   2.5861640e+00   2.5561973e+00   2.9420425e+00   2.5609364e+00   2.2836399e+00   2.5303888e+00   2.4035745e+00   2.4950488e+00   2.8435005e+00   2.0000785e+00   2.4916202e+00   4.5218181e+00   3.4492861e+00   4.5921002e+00   3.7366932e+00   4.2432727e+00   5.1949299e+00   2.9709788e+00   4.6735988e+00   4.2160797e+00   5.2249989e+00   3.8759828e+00   3.8289542e+00   4.2545385e+00   3.5878027e+00   3.8924868e+00   4.1403048e+00   3.8179103e+00   5.6801405e+00   5.5805905e+00   3.6100547e+00   4.5709635e+00   3.3584734e+00   5.2629823e+00   3.5576748e+00   4.3070506e+00   4.5135384e+00   3.4273212e+00   3.3707040e+00   4.0596064e+00   4.2990937e+00   4.7676077e+00   5.6256469e+00   4.1454035e+00   3.3551224e+00   3.4472672e+00   5.2603070e+00   4.3452859e+00   3.7614312e+00   3.2825924e+00   4.3002370e+00   4.4796258e+00   4.3809022e+00   3.4492861e+00   4.5673965e+00   4.6446301e+00   4.2677070e+00   3.7864370e+00   3.8796124e+00   4.1423594e+00   3.3352922e+00   2.9361142e+00   3.6728262e+00   2.4980859e+00   1.5364600e+00   2.5390784e+00   2.1113072e+00   1.6578570e+00   1.9353585e+00   1.4375287e+00   1.3425463e+00   1.1993279e+00   9.0115406e-01   1.3418210e+00   1.6061161e+00   1.4416694e+00   7.3727571e-01   7.1781501e-01   1.6797637e+00   2.7692440e+00   3.1313820e+00   7.9347379e-01   9.7352372e-01   1.8600648e+00   7.9347379e-01   2.1119253e-01   1.3612390e+00   1.4580439e+00   1.6571634e+00   5.0731024e-01   1.5980974e+00   2.1674065e+00   6.7984069e-01   2.1059482e+00   6.2457556e-01   2.0330443e+00   1.1132823e+00   4.2319020e+00   3.7044235e+00   4.2326669e+00   3.1432906e+00   3.8172627e+00   3.0425144e+00   3.7866079e+00   2.3206274e+00   3.7650177e+00   2.6608343e+00   3.0454230e+00   3.1914925e+00   3.4221447e+00   3.4413652e+00   2.6453561e+00   3.8539838e+00   3.0892079e+00   2.8357998e+00   3.9683086e+00   2.8336784e+00   3.6477040e+00   3.1799040e+00   3.8944724e+00   3.3434129e+00   3.4994776e+00   3.7569353e+00   4.0775935e+00   4.2049294e+00   3.3684490e+00   2.6363662e+00   2.8414019e+00   2.7526519e+00   2.8906655e+00   3.7008234e+00   2.9737492e+00   3.5555916e+00   3.9977110e+00   3.7960948e+00   2.7978670e+00   2.9332077e+00   2.9200513e+00   3.4002561e+00   2.9851921e+00   2.5032729e+00   2.9159414e+00   2.8324645e+00   2.9104902e+00   3.3286096e+00   2.2670901e+00   2.9042354e+00   4.8902416e+00   3.8029663e+00   5.0697571e+00   4.1657422e+00   4.6611282e+00   5.6979337e+00   3.1565039e+00   5.1794156e+00   4.6677357e+00   5.6656906e+00   4.3138249e+00   4.2590423e+00   4.7118516e+00   3.9079657e+00   4.2090892e+00   4.5409993e+00   4.2707579e+00   6.1569683e+00   6.0684263e+00   3.9837013e+00   5.0178221e+00   3.6761530e+00   5.7743610e+00   3.9890690e+00   4.7480354e+00   5.0151961e+00   3.8518880e+00   3.7849163e+00   4.4740109e+00   4.8191103e+00   5.2745801e+00   6.1258486e+00   4.5520039e+00   3.8145791e+00   3.8707244e+00   5.7618968e+00   4.7193263e+00   4.2030300e+00   3.6843246e+00   4.7664506e+00   4.9031551e+00   4.8333734e+00   3.8029663e+00   5.0038635e+00   5.0562579e+00   4.7021393e+00   4.1966653e+00   4.3193180e+00   4.5127918e+00   3.7195417e+00   9.8143688e-01   5.9868400e-01   1.4402716e+00   5.6992880e-01   9.8663349e-01   1.4928150e+00   1.1361809e+00   1.7216149e+00   1.8856736e+00   1.8789959e+00   2.5107352e+00   1.7228721e+00   1.3724737e+00   1.5661153e+00   2.2847787e+00   2.4120925e+00   1.4985933e+00   7.9011741e-01   5.9738093e-01   2.3528246e+00   2.0826771e+00   1.2100516e+00   2.3528246e+00   2.8601865e+00   1.6304499e+00   1.5111262e+00   4.2257604e+00   2.5031609e+00   1.6091095e+00   1.0739839e+00   2.6091054e+00   9.8932340e-01   2.3488496e+00   9.3392552e-01   1.8848176e+00   3.4513181e+00   3.2138675e+00   3.6568023e+00   4.4832075e+00   3.8715598e+00   3.6399979e+00   3.2048569e+00   4.1609431e+00   3.6276988e+00   3.7852753e+00   5.0007603e+00   3.3356061e+00   4.5726588e+00   3.6020324e+00   3.2283316e+00   3.3543125e+00   3.4317803e+00   3.5762357e+00   4.8853618e+00   3.9697083e+00   3.4608105e+00   3.5194529e+00   4.4307529e+00   3.6664068e+00   3.4821665e+00   3.4661369e+00   3.9690303e+00   3.8732280e+00   3.5908374e+00   3.6384054e+00   4.1605480e+00   4.1054173e+00   3.6121763e+00   4.1591449e+00   3.4571840e+00   2.9726287e+00   3.5108834e+00   4.5938444e+00   3.1869025e+00   4.0859475e+00   3.9449709e+00   3.4111584e+00   3.8289196e+00   4.3382952e+00   3.7437946e+00   3.1530215e+00   3.3792174e+00   3.4400302e+00   3.8876642e+00   3.5288768e+00   4.4107365e+00   4.3401559e+00   4.5910423e+00   4.1731099e+00   4.4383007e+00   5.0605479e+00   4.5288382e+00   4.7400085e+00   4.9337127e+00   4.5282137e+00   3.8167418e+00   4.4582410e+00   4.3491396e+00   4.7099690e+00   4.5667632e+00   4.1110524e+00   4.0457882e+00   4.6970439e+00   5.8050200e+00   4.9831785e+00   4.3869525e+00   4.2045486e+00   5.3107398e+00   4.2598936e+00   4.0608563e+00   4.2495756e+00   4.0560071e+00   3.7740189e+00   4.5388860e+00   4.2824837e+00   4.9058468e+00   4.5708763e+00   4.6120616e+00   3.9763551e+00   4.3872260e+00   5.0860672e+00   4.0998055e+00   3.8946369e+00   3.7330702e+00   4.2352833e+00   4.4742220e+00   4.3047259e+00   4.3401559e+00   4.4192906e+00   4.4017154e+00   4.3831142e+00   4.6832544e+00   4.0909816e+00   3.9225445e+00   3.8229302e+00   1.2140269e+00   2.2031378e+00   1.3945864e+00   1.5674943e+00   2.3519816e+00   1.7695601e+00   2.3060361e+00   2.6440626e+00   2.5730128e+00   3.3484034e+00   2.4570006e+00   2.1775427e+00   2.3990667e+00   3.0314484e+00   3.2003667e+00   2.3345854e+00   9.9891776e-01   5.8565201e-01   3.1744385e+00   2.9106021e+00   2.1090950e+00   3.1744385e+00   3.6015962e+00   2.4316107e+00   2.2478972e+00   5.0583620e+00   3.1946200e+00   2.2571919e+00   1.5783735e+00   3.4098353e+00   1.5848534e+00   3.0815481e+00   1.7053877e+00   2.6874763e+00   3.8897688e+00   3.6527400e+00   4.1085323e+00   5.1878354e+00   4.4401043e+00   4.2306533e+00   3.5668974e+00   4.8855250e+00   4.1984227e+00   4.3936084e+00   5.7667342e+00   3.8604223e+00   5.3386416e+00   4.1481805e+00   3.8457422e+00   3.8556395e+00   3.9253518e+00   4.2633467e+00   5.5746944e+00   4.6805795e+00   3.8185179e+00   4.1531225e+00   5.0514886e+00   4.2706189e+00   4.0732692e+00   4.0031242e+00   4.5383244e+00   4.3266944e+00   4.1312924e+00   4.3745457e+00   4.8862079e+00   4.8484299e+00   4.2820168e+00   4.7051757e+00   3.9401848e+00   3.2884177e+00   3.9767257e+00   5.2985037e+00   3.7419338e+00   4.7600849e+00   4.5926355e+00   3.9322406e+00   4.5116211e+00   5.0823619e+00   4.3725296e+00   3.7236864e+00   3.9623539e+00   4.0300759e+00   4.6141946e+00   4.1447443e+00   4.5866744e+00   4.8386745e+00   4.9461657e+00   4.6106150e+00   4.7813164e+00   5.3858108e+00   5.0919277e+00   5.1446449e+00   5.4739987e+00   4.5885042e+00   4.1414025e+00   4.9586480e+00   4.7213864e+00   5.2471037e+00   4.9661074e+00   4.3830009e+00   4.4568388e+00   4.6901031e+00   6.2154767e+00   5.6469307e+00   4.6501660e+00   4.6626232e+00   5.7036296e+00   4.7932843e+00   4.3038060e+00   4.5618709e+00   4.5655950e+00   4.2115551e+00   4.9692114e+00   4.7030193e+00   5.3377504e+00   4.5914343e+00   5.0293150e+00   4.5146706e+00   4.9581881e+00   5.4087027e+00   4.2557771e+00   4.2671438e+00   4.1737149e+00   4.5756985e+00   4.7660397e+00   4.6314702e+00   4.8386745e+00   4.6734470e+00   4.5970177e+00   4.7412505e+00   5.2464126e+00   4.4890534e+00   4.0948317e+00   4.2440420e+00   1.0013399e+00   5.0299964e-01   4.6310132e-01   1.2040900e+00   5.9738093e-01   1.2286837e+00   1.4541908e+00   1.4279677e+00   2.1539145e+00   1.2617482e+00   9.9544409e-01   1.2082987e+00   1.8489243e+00   2.0066856e+00   1.1587093e+00   6.6217390e-01   7.5179033e-01   1.9839744e+00   1.7063292e+00   9.6659661e-01   1.9839744e+00   2.4156729e+00   1.2419907e+00   1.0495503e+00   3.8490499e+00   2.0330726e+00   1.0871867e+00   5.4779717e-01   2.2031378e+00   5.3106808e-01   1.9004159e+00   5.5576380e-01   1.4899949e+00   3.4307448e+00   3.0710756e+00   3.5952799e+00   4.1669813e+00   3.7116384e+00   3.3536981e+00   3.0466130e+00   3.7729829e+00   3.5082606e+00   3.4067800e+00   4.6484249e+00   3.0744089e+00   4.3424419e+00   3.3858347e+00   2.9098729e+00   3.2669110e+00   3.1198644e+00   3.3210229e+00   4.6553382e+00   3.6737423e+00   3.2048569e+00   3.2989479e+00   4.2245828e+00   3.4587219e+00   3.3255241e+00   3.3484846e+00   3.8660480e+00   3.7512964e+00   3.3482610e+00   3.3605387e+00   3.8511468e+00   3.8014113e+00   3.3411134e+00   3.9109127e+00   3.1105014e+00   2.7597977e+00   3.4146222e+00   4.3904048e+00   2.8767763e+00   3.7646132e+00   3.6317356e+00   3.1996946e+00   3.5585167e+00   3.9690108e+00   3.4365573e+00   2.8705339e+00   3.0890888e+00   3.2456270e+00   3.5108688e+00   3.2367228e+00   4.2147014e+00   4.0489906e+00   4.5035700e+00   3.9755362e+00   4.2591912e+00   5.0350768e+00   4.1207838e+00   4.6882252e+00   4.7707308e+00   4.4918348e+00   3.6612573e+00   4.2568131e+00   4.2184327e+00   4.3988058e+00   4.2632946e+00   3.9245996e+00   3.8864624e+00   4.7642090e+00   5.7424408e+00   4.7299069e+00   4.2784034e+00   3.8797952e+00   5.2832732e+00   4.0458553e+00   3.9446593e+00   4.2181053e+00   3.8300888e+00   3.5427774e+00   4.3354099e+00   4.2438935e+00   4.8511407e+00   4.6817036e+00   4.4041714e+00   3.7859242e+00   4.1665370e+00   5.0618540e+00   3.9138566e+00   3.7274783e+00   3.4833346e+00   4.1288327e+00   4.3215818e+00   4.1859543e+00   4.0489906e+00   4.2965095e+00   4.2626474e+00   4.2257654e+00   4.4572702e+00   3.9184477e+00   3.7230473e+00   3.5604297e+00   1.0161882e+00   6.9420840e-01   4.8012872e-01   4.8284931e-01   6.9738730e-01   5.5709100e-01   5.3095950e-01   1.1723315e+00   3.1271814e-01   1.8699153e-01   2.9145160e-01   8.6143605e-01   1.0061402e+00   4.5257749e-01   1.4146831e+00   1.6902182e+00   9.9059199e-01   7.2340544e-01   5.0370871e-01   9.9059199e-01   1.4369223e+00   2.7124234e-01   1.3340137e-01   2.8614050e+00   1.1016806e+00   4.2656951e-01   7.5906970e-01   1.2087236e+00   7.1325427e-01   9.2761923e-01   5.3988754e-01   4.9448466e-01   3.3643791e+00   2.9159414e+00   3.4617249e+00   3.4323687e+00   3.3505903e+00   2.8169505e+00   2.9517065e+00   2.9146662e+00   3.1941250e+00   2.7393282e+00   3.7736317e+00   2.6933089e+00   3.6308668e+00   2.9914579e+00   2.3560812e+00   3.0907905e+00   2.6932687e+00   2.7021788e+00   4.0389540e+00   2.9648098e+00   2.9980910e+00   2.8201257e+00   3.7183934e+00   2.9922398e+00   2.9661287e+00   3.0950932e+00   3.5513156e+00   3.5484439e+00   2.9420200e+00   2.6629529e+00   3.1013619e+00   3.0347859e+00   2.7417410e+00   3.4474072e+00   2.6495954e+00   2.6875764e+00   3.2488445e+00   3.7904353e+00   2.3985415e+00   3.0725852e+00   2.9704304e+00   2.8556850e+00   2.9300511e+00   3.1104511e+00   2.8291506e+00   2.4008046e+00   2.5836379e+00   2.8456807e+00   2.6907656e+00   2.6814159e+00   4.1737238e+00   3.5932878e+00   4.3853076e+00   3.6802688e+00   4.0749525e+00   4.9692376e+00   3.4394110e+00   4.5331007e+00   4.3742923e+00   4.6787296e+00   3.5632387e+00   3.8923023e+00   4.0628985e+00   3.8689922e+00   3.9102807e+00   3.8336686e+00   3.6675649e+00   5.0555526e+00   5.5454277e+00   4.0994961e+00   4.2439469e+00   3.4449831e+00   5.1429556e+00   3.6472400e+00   3.9304610e+00   4.1916938e+00   3.4565067e+00   3.2536517e+00   4.0373591e+00   4.1087274e+00   4.6703619e+00   4.9904781e+00   4.1152831e+00   3.4023949e+00   3.6756751e+00   5.0151763e+00   3.9231895e+00   3.5466262e+00   3.1760855e+00   4.0346845e+00   4.2216886e+00   4.1038501e+00   3.5932878e+00   4.2504107e+00   4.2656428e+00   4.0705667e+00   3.9971917e+00   3.7133040e+00   3.7182295e+00   3.2440381e+00   7.3339246e-01   9.9973471e-01   7.7988766e-01   1.4669572e+00   1.3868865e+00   1.4360884e+00   2.0344949e+00   1.2593779e+00   9.3451915e-01   1.1232628e+00   1.8421759e+00   1.9514085e+00   1.0061402e+00   9.6168382e-01   9.7747632e-01   1.9029496e+00   1.6513423e+00   7.7821113e-01   1.9029496e+00   2.4366790e+00   1.1857824e+00   1.1156669e+00   3.7575639e+00   2.1090460e+00   1.1623508e+00   7.4500632e-01   2.1481102e+00   7.3851064e-01   1.9301732e+00   5.6262711e-01   1.4458364e+00   3.0574507e+00   2.7604800e+00   3.2354442e+00   3.9296796e+00   3.3802783e+00   3.0910114e+00   2.7653977e+00   3.6086433e+00   3.1477156e+00   3.2299948e+00   4.4531271e+00   2.8182580e+00   4.0359059e+00   3.0838698e+00   2.6832031e+00   2.9127171e+00   2.8999239e+00   3.0235972e+00   4.3556993e+00   3.4142800e+00   2.9871882e+00   2.9947610e+00   3.9084388e+00   3.1359326e+00   2.9852014e+00   3.0007807e+00   3.4997296e+00   3.4243092e+00   3.0709062e+00   3.0889274e+00   3.6054231e+00   3.5510321e+00   3.0652992e+00   3.6307363e+00   2.9199707e+00   2.5302845e+00   3.0705222e+00   4.0683526e+00   2.6430958e+00   3.5303998e+00   3.3838093e+00   2.9011205e+00   3.2808511e+00   3.7876206e+00   3.1898591e+00   2.6081677e+00   2.8342553e+00   2.9259899e+00   3.3400468e+00   2.9809771e+00   4.0130133e+00   3.8156727e+00   4.1823301e+00   3.6854232e+00   3.9919300e+00   4.6844795e+00   3.9756960e+00   4.3245814e+00   4.4396397e+00   4.2453585e+00   3.3946374e+00   3.9634682e+00   3.9204865e+00   4.1772364e+00   4.0766155e+00   3.6959934e+00   3.5831715e+00   4.4790872e+00   5.3894840e+00   4.4405497e+00   4.0027890e+00   3.6857786e+00   4.9137586e+00   3.7567964e+00   3.6729588e+00   3.8728151e+00   3.5543966e+00   3.2848126e+00   4.0598486e+00   3.8712880e+00   4.4886485e+00   4.3722180e+00   4.1373490e+00   3.4683949e+00   3.8543469e+00   4.7248681e+00   3.7193644e+00   3.4383872e+00   3.2381387e+00   3.8297356e+00   4.0647650e+00   3.9099360e+00   3.8156727e+00   4.0266221e+00   4.0296168e+00   3.9579549e+00   4.1722551e+00   3.6379295e+00   3.5328511e+00   3.3224979e+00   1.0061402e+00   2.6525508e-01   8.2148003e-01   1.1879760e+00   1.0251165e+00   1.8544941e+00   9.4128180e-01   7.1446962e-01   9.4128180e-01   1.4726083e+00   1.6607117e+00   9.9973471e-01   7.4965096e-01   1.0510795e+00   1.6532822e+00   1.4055304e+00   8.6143605e-01   1.6532822e+00   2.0368618e+00   9.3178083e-01   7.1143905e-01   3.5363390e+00   1.6307900e+00   8.0686941e-01   2.6184788e-01   1.8811296e+00   1.4276574e-01   1.5165187e+00   3.5874135e-01   1.1682143e+00   3.5420892e+00   3.1213639e+00   3.6765721e+00   3.9907084e+00   3.7063187e+00   3.2329517e+00   3.1017380e+00   3.5164906e+00   3.5204595e+00   3.2215483e+00   4.4016409e+00   3.0251724e+00   4.2008255e+00   3.3367044e+00   2.7940225e+00   3.3344791e+00   3.0219495e+00   3.1880051e+00   4.5546425e+00   3.5075399e+00   3.1952269e+00   3.2406785e+00   4.1563139e+00   3.3848806e+00   3.3178898e+00   3.3859281e+00   3.8870730e+00   3.7997125e+00   3.2944053e+00   3.2110142e+00   3.6683444e+00   3.6131226e+00   3.2236002e+00   3.8317071e+00   2.9830921e+00   2.7973167e+00   3.4787185e+00   4.2995699e+00   2.7671612e+00   3.5982071e+00   3.4619260e+00   3.1665431e+00   3.4310890e+00   3.7235030e+00   3.2953414e+00   2.7679629e+00   2.9819595e+00   3.2106653e+00   3.2879746e+00   3.1196883e+00   4.2713770e+00   3.9634682e+00   4.5846990e+00   3.9587038e+00   4.2895460e+00   5.1416821e+00   3.9119997e+00   4.7572039e+00   4.7460626e+00   4.6638344e+00   3.7280454e+00   4.2349181e+00   4.2803486e+00   4.2908242e+00   4.2152718e+00   3.9857018e+00   3.9070797e+00   4.9741957e+00   5.8097018e+00   4.6049285e+00   4.3788345e+00   3.7893243e+00   5.3689314e+00   4.0140467e+00   4.0363955e+00   4.3259832e+00   3.8006490e+00   3.5269016e+00   4.3297076e+00   4.3216441e+00   4.9220177e+00   4.9100057e+00   4.4024600e+00   3.7489348e+00   4.0736926e+00   5.1891895e+00   3.9903212e+00   3.7514870e+00   3.4564041e+00   4.2166575e+00   4.3944655e+00   4.2851348e+00   3.9634682e+00   4.3836343e+00   4.3634474e+00   4.2898748e+00   4.4067690e+00   3.9524852e+00   3.7906894e+00   3.5162082e+00   8.3156200e-01   1.1417173e+00   5.8221430e-01   7.3339246e-01   1.0428797e+00   5.5247822e-01   3.5266705e-01   2.9537172e-01   9.6466498e-01   1.0034646e+00   2.8553149e-01   1.6415483e+00   1.8567917e+00   9.3451915e-01   7.2553812e-01   3.4520795e-01   9.3451915e-01   1.5364952e+00   3.7960845e-01   5.9589853e-01   2.7723424e+00   1.3124532e+00   7.5130648e-01   1.0314203e+00   1.1925354e+00   9.9272943e-01   1.0839891e+00   7.1143905e-01   5.6164055e-01   3.0511653e+00   2.6629268e+00   3.1545235e+00   3.1980310e+00   3.0467396e+00   2.5772061e+00   2.7369167e+00   2.7576827e+00   2.8651003e+00   2.5868532e+00   3.5774656e+00   2.4748633e+00   3.3139897e+00   2.7217211e+00   2.1506369e+00   2.7852281e+00   2.5158340e+00   2.4059801e+00   3.7433691e+00   2.7041074e+00   2.8389661e+00   2.5310559e+00   3.4176981e+00   2.6902386e+00   2.6527549e+00   2.7866168e+00   3.2144386e+00   3.2675625e+00   2.6971865e+00   2.3822357e+00   2.8532332e+00   2.7780124e+00   2.4721123e+00   3.1952919e+00   2.5042136e+00   2.5315299e+00   2.9556760e+00   3.4693709e+00   2.1993495e+00   2.8460127e+00   2.7344861e+00   2.5960616e+00   2.6558880e+00   2.9309653e+00   2.5980406e+00   2.1695355e+00   2.3550298e+00   2.5518802e+00   2.5245372e+00   2.4441489e+00   4.0319503e+00   3.3933783e+00   4.1146478e+00   3.4339804e+00   3.8578841e+00   4.6712206e+00   3.3248410e+00   4.2174528e+00   4.0704050e+00   4.4988341e+00   3.3546526e+00   3.6317702e+00   3.8139412e+00   3.6743372e+00   3.7645284e+00   3.6614225e+00   3.4131399e+00   4.8439861e+00   5.2322637e+00   3.8189229e+00   4.0256031e+00   3.2902097e+00   4.8190347e+00   3.3870859e+00   3.7223145e+00   3.9080259e+00   3.2141230e+00   3.0414452e+00   3.8022687e+00   3.7869665e+00   4.3507688e+00   4.7565318e+00   3.8893282e+00   3.1162635e+00   3.3877625e+00   4.7282676e+00   3.7917280e+00   3.3122397e+00   2.9763122e+00   3.7889092e+00   4.0173731e+00   3.8788191e+00   3.3933783e+00   4.0384828e+00   4.0914753e+00   3.8500022e+00   3.7349483e+00   3.4804621e+00   3.5920363e+00   3.0535851e+00   7.5284003e-01   9.3865015e-01   8.5442446e-01   1.6409761e+00   7.0463400e-01   5.4408162e-01   7.5179033e-01   1.2786676e+00   1.4553344e+00   7.8100392e-01   1.0100290e+00   1.2786676e+00   1.4586696e+00   1.2008045e+00   7.2638147e-01   1.4586696e+00   1.8445759e+00   7.3985997e-01   5.0731024e-01   3.3136601e+00   1.4580439e+00   5.4702555e-01   3.2339566e-01   1.6607117e+00   3.5366952e-01   1.3290015e+00   3.5639126e-01   9.6825676e-01   3.4059851e+00   2.9602214e+00   3.5257340e+00   3.7492673e+00   3.5115913e+00   3.0191277e+00   2.9517483e+00   3.2720571e+00   3.3411331e+00   2.9834222e+00   4.1580731e+00   2.8211532e+00   3.9717534e+00   3.1414634e+00   2.5643903e+00   3.1728076e+00   2.8176969e+00   2.9703822e+00   4.3244606e+00   3.2734582e+00   3.0210021e+00   3.0274003e+00   3.9433841e+00   3.1866772e+00   3.1269680e+00   3.2103093e+00   3.7065013e+00   3.6299273e+00   3.0909484e+00   2.9772513e+00   3.4297321e+00   3.3756936e+00   2.9971174e+00   3.6243253e+00   2.7759818e+00   2.6501700e+00   3.3189004e+00   4.0764633e+00   2.5559925e+00   3.3602243e+00   3.2356862e+00   2.9780147e+00   3.2026716e+00   3.4783251e+00   3.0685582e+00   2.5635668e+00   2.7679629e+00   3.0129566e+00   3.0370166e+00   2.8975180e+00   4.1264564e+00   3.7496421e+00   4.4295218e+00   3.7774558e+00   4.1191095e+00   5.0038078e+00   3.6756489e+00   4.6074750e+00   4.5494422e+00   4.5665604e+00   3.5702411e+00   4.0355009e+00   4.1137066e+00   4.0638414e+00   4.0067360e+00   3.8250619e+00   3.7375780e+00   4.9153359e+00   5.6444336e+00   4.3773916e+00   4.2331397e+00   3.5751580e+00   5.2199809e+00   3.8075775e+00   3.9003626e+00   4.1989415e+00   3.5967191e+00   3.3381510e+00   4.1394208e+00   4.1772604e+00   4.7627066e+00   4.8574416e+00   4.2113835e+00   3.5565415e+00   3.8744064e+00   5.0458107e+00   3.8530980e+00   3.5894641e+00   3.2635788e+00   4.0605147e+00   4.2327162e+00   4.1232607e+00   3.7496421e+00   4.2381044e+00   4.2216886e+00   4.1152818e+00   4.1901775e+00   3.7759339e+00   3.6506667e+00   3.3268510e+00   1.0748172e+00   7.2889003e-01   1.5046031e+00   7.9398919e-01   8.1148630e-01   8.8835337e-01   9.9058911e-01   1.2262672e+00   1.1380274e+00   1.3972288e+00   1.7741287e+00   1.2483935e+00   1.0474897e+00   1.1240042e+00   1.2483935e+00   1.4149548e+00   8.1096210e-01   5.7672351e-01   3.0082939e+00   9.6865373e-01   8.2273123e-01   9.5195566e-01   1.4288989e+00   8.3246212e-01   9.4996842e-01   9.2092295e-01   8.7375509e-01   4.0151215e+00   3.5231786e+00   4.1026785e+00   3.8908802e+00   3.9665585e+00   3.3438394e+00   3.5293285e+00   3.2540384e+00   3.8314935e+00   3.1634347e+00   4.1178317e+00   3.2512254e+00   4.1561446e+00   3.5717750e+00   2.8833448e+00   3.7345570e+00   3.1952822e+00   3.2548783e+00   4.5820687e+00   3.4621656e+00   3.5141919e+00   3.4137993e+00   4.2939653e+00   3.5777026e+00   3.5926398e+00   3.7328374e+00   4.1920808e+00   4.1652091e+00   3.5073274e+00   3.1921998e+00   3.5706840e+00   3.5044583e+00   3.2904241e+00   3.9914615e+00   3.1120432e+00   3.2204607e+00   3.8807669e+00   4.3582892e+00   2.9219284e+00   3.5476488e+00   3.4540639e+00   3.4397115e+00   3.4680001e+00   3.4670753e+00   3.3367044e+00   2.9471549e+00   3.1205392e+00   3.4518638e+00   3.0783517e+00   3.2145380e+00   4.6697624e+00   4.0951447e+00   4.9940386e+00   4.2442248e+00   4.6312366e+00   5.5957028e+00   3.7901714e+00   5.1629764e+00   4.9662629e+00   5.2245876e+00   4.1326097e+00   4.4648550e+00   4.6557857e+00   4.3477764e+00   4.3833898e+00   4.3689111e+00   4.2520226e+00   5.6153369e+00   6.1715044e+00   4.6178873e+00   4.8201070e+00   3.9129644e+00   5.7808751e+00   4.2195149e+00   4.4949009e+00   4.8099428e+00   4.0213766e+00   3.8049151e+00   4.5961475e+00   4.7475868e+00   5.3061067e+00   5.5699347e+00   4.6684257e+00   3.9900166e+00   4.2272640e+00   5.6441645e+00   4.4197984e+00   4.1176453e+00   3.7157925e+00   4.6326704e+00   4.7820862e+00   4.6921349e+00   4.0951447e+00   4.8160040e+00   4.8050680e+00   4.6459078e+00   4.5554678e+00   4.2905569e+00   4.2115152e+00   3.7649212e+00   5.9314593e-01   8.0686941e-01   2.9691107e-01   6.2826980e-01   5.0121118e-01   6.6653737e-01   7.0834786e-01   4.6310132e-01   1.9251840e+00   2.1737519e+00   7.4743804e-01   5.5009731e-01   8.0748088e-01   7.4743804e-01   1.1828955e+00   4.6964680e-01   5.8942278e-01   2.4421558e+00   9.8677196e-01   4.9772204e-01   1.1660949e+00   8.4116354e-01   1.2196311e+00   7.7538587e-01   1.0376697e+00   4.4499696e-01   3.0983271e+00   2.5986852e+00   3.1534326e+00   2.8897192e+00   2.9336987e+00   2.3392252e+00   2.6586759e+00   2.3702978e+00   2.8165027e+00   2.2079130e+00   3.2363154e+00   2.2664200e+00   3.1218253e+00   2.5673178e+00   1.8638939e+00   2.7710337e+00   2.2535803e+00   2.2156046e+00   3.5264693e+00   2.4375344e+00   2.6410495e+00   2.3635223e+00   3.2419868e+00   2.5535068e+00   2.5663186e+00   2.7372400e+00   3.1657714e+00   3.1910277e+00   2.5035271e+00   2.1450705e+00   2.5644558e+00   2.5011730e+00   2.2417546e+00   2.9810976e+00   2.2012005e+00   2.4146140e+00   2.9247451e+00   3.2953953e+00   1.9474034e+00   2.5368532e+00   2.4541089e+00   2.4554575e+00   2.4210504e+00   2.5661600e+00   2.3207574e+00   1.9628165e+00   2.1171985e+00   2.4261018e+00   2.1366217e+00   2.1917681e+00   3.8609963e+00   3.1157672e+00   4.0464741e+00   3.2769657e+00   3.7011972e+00   4.6584806e+00   2.9074576e+00   4.1962146e+00   3.9292452e+00   4.4717831e+00   3.2385313e+00   3.4507621e+00   3.7050324e+00   3.3601278e+00   3.4577371e+00   3.4991206e+00   3.2980589e+00   4.9174048e+00   5.1685262e+00   3.5822256e+00   3.9345678e+00   2.9743611e+00   4.8043725e+00   3.1946631e+00   3.6425792e+00   3.9147944e+00   3.0137993e+00   2.8509730e+00   3.6160190e+00   3.7930649e+00   4.3160863e+00   4.8705552e+00   3.6935350e+00   2.9765851e+00   3.2157655e+00   4.7030966e+00   3.6383061e+00   3.1964791e+00   2.7656084e+00   3.7055141e+00   3.8768834e+00   3.7701724e+00   3.1157672e+00   3.9366463e+00   3.9674740e+00   3.7027144e+00   3.5167570e+00   3.3369517e+00   3.4319544e+00   2.8332022e+00   9.6865373e-01   3.9487224e-01   5.8131330e-01   5.6003943e-01   5.0621589e-01   7.1247632e-01   8.0317491e-01   1.7053539e+00   2.0491684e+00   7.4957404e-01   6.5459290e-01   9.3991103e-01   7.4957404e-01   1.0954558e+00   4.2737382e-01   4.9430028e-01   2.5884539e+00   7.4949264e-01   6.4432393e-01   1.0251728e+00   9.7391954e-01   1.0055888e+00   5.9279023e-01   9.4588685e-01   4.3798311e-01   3.5017283e+00   3.0032209e+00   3.5640998e+00   3.2626300e+00   3.3723783e+00   2.7101865e+00   3.0361435e+00   2.6574564e+00   3.2363742e+00   2.5684615e+00   3.5220489e+00   2.6863775e+00   3.5035562e+00   2.9639854e+00   2.2954282e+00   3.1974234e+00   2.6186896e+00   2.5919605e+00   3.9485388e+00   2.8137879e+00   3.0123600e+00   2.8059985e+00   3.6581986e+00   2.9351026e+00   2.9984934e+00   3.1711590e+00   3.5947528e+00   3.6146776e+00   2.9159819e+00   2.5508141e+00   2.9298445e+00   2.8588898e+00   2.6582994e+00   3.3705985e+00   2.5395254e+00   2.7634723e+00   3.3411818e+00   3.7151762e+00   2.3273691e+00   2.9184140e+00   2.8006021e+00   2.8512853e+00   2.8277392e+00   2.8675465e+00   2.7048983e+00   2.3342128e+00   2.5075548e+00   2.8488482e+00   2.4938133e+00   2.5939117e+00   4.2210242e+00   3.5094230e+00   4.4613495e+00   3.6611526e+00   4.1011462e+00   5.0575391e+00   3.2183295e+00   4.5889909e+00   4.3421582e+00   4.8334387e+00   3.6441411e+00   3.8749352e+00   4.1286607e+00   3.7602700e+00   3.8694583e+00   3.9027404e+00   3.6910974e+00   5.2330448e+00   5.5920875e+00   3.9683831e+00   4.3421746e+00   3.3618744e+00   5.2099570e+00   3.6296154e+00   4.0192804e+00   4.2904705e+00   3.4453138e+00   3.2560920e+00   4.0303932e+00   4.1835729e+00   4.7330562e+00   5.1897695e+00   4.1126264e+00   3.3744864e+00   3.5691373e+00   5.1336306e+00   3.9986271e+00   3.5735980e+00   3.1698618e+00   4.1283627e+00   4.2954773e+00   4.2156054e+00   3.5094230e+00   4.3310092e+00   4.3633885e+00   4.1455698e+00   3.9545856e+00   3.7585686e+00   3.7901495e+00   3.2094268e+00   9.5902306e-01   1.1795364e+00   9.6032771e-01   5.8652824e-01   3.3395426e-01   1.0753036e+00   2.5524007e+00   2.8349345e+00   2.9691107e-01   5.1396090e-01   1.3127309e+00   2.9691107e-01   7.4426155e-01   9.3211669e-01   1.1729612e+00   1.7369516e+00   8.7560645e-01   1.2666796e+00   1.8751947e+00   2.9724335e-01   1.8489906e+00   6.7745878e-01   1.6555341e+00   7.0111465e-01   3.4067014e+00   2.9452188e+00   3.4231096e+00   2.6265336e+00   3.0474186e+00   2.3887954e+00   3.0652160e+00   1.9891259e+00   2.9589070e+00   2.1699637e+00   2.7527251e+00   2.5008826e+00   2.7949298e+00   2.7160947e+00   1.9851008e+00   3.0428102e+00   2.4755098e+00   2.1256864e+00   3.3327734e+00   2.2236827e+00   3.0131023e+00   2.4300526e+00   3.1928299e+00   2.6040863e+00   2.7075499e+00   2.9536823e+00   3.2710263e+00   3.4338296e+00   2.6673396e+00   1.9555334e+00   2.2858019e+00   2.1897213e+00   2.1973377e+00   3.0392888e+00   2.4158794e+00   2.8941568e+00   3.2025759e+00   3.1091590e+00   2.1471303e+00   2.3710990e+00   2.3347284e+00   2.6698387e+00   2.3133518e+00   2.1525596e+00   2.2918773e+00   2.1454625e+00   2.2398142e+00   2.5636830e+00   1.8343082e+00   2.2388656e+00   4.2714137e+00   3.2107728e+00   4.3091817e+00   3.4717121e+00   3.9768763e+00   4.9078859e+00   2.8122927e+00   4.3885241e+00   3.9504682e+00   4.9558940e+00   3.6044479e+00   3.5632387e+00   3.9760735e+00   3.3646188e+00   3.6594046e+00   3.8782141e+00   3.5443654e+00   5.4091146e+00   5.2988184e+00   3.3878489e+00   4.2952367e+00   3.1303129e+00   4.9761618e+00   3.2919446e+00   4.0362588e+00   4.2291285e+00   3.1618923e+00   3.1077588e+00   3.7959134e+00   4.0112445e+00   4.4810404e+00   5.3509795e+00   3.8831153e+00   3.0844802e+00   3.1980603e+00   4.9707249e+00   4.0945548e+00   3.4918172e+00   3.0237585e+00   4.0192804e+00   4.2092252e+00   4.1017980e+00   3.2107728e+00   4.2952415e+00   4.3790330e+00   3.9936934e+00   3.5312555e+00   3.6065699e+00   3.8937621e+00   3.0840989e+00   4.2827238e-01   3.7398306e-01   6.4241342e-01   7.7828522e-01   4.8636669e-01   1.6800011e+00   1.9622194e+00   8.0686941e-01   5.7691891e-01   7.1789533e-01   8.0686941e-01   1.2139401e+00   2.9406726e-01   3.1507080e-01   2.6166211e+00   9.1398375e-01   3.4909881e-01   9.4580058e-01   9.6922609e-01   9.6659661e-01   7.2638147e-01   8.2574748e-01   3.6704030e-01   3.2939549e+00   2.8018134e+00   3.3643791e+00   3.1688464e+00   3.1882120e+00   2.5926123e+00   2.8420400e+00   2.6229843e+00   3.0569434e+00   2.4659441e+00   3.4932809e+00   2.5062529e+00   3.4038365e+00   2.8103848e+00   2.1284730e+00   2.9880998e+00   2.4809350e+00   2.4830222e+00   3.8129321e+00   2.7155086e+00   2.8370039e+00   2.6306749e+00   3.5140670e+00   2.8045035e+00   2.8143558e+00   2.9698163e+00   3.4126571e+00   3.4177063e+00   2.7508387e+00   2.4282690e+00   2.8424700e+00   2.7781836e+00   2.5174968e+00   3.2360555e+00   2.4214382e+00   2.5753180e+00   3.1397228e+00   3.5790751e+00   2.1850441e+00   2.8131786e+00   2.7177153e+00   2.6876771e+00   2.6993734e+00   2.8253248e+00   2.5871836e+00   2.1990767e+00   2.3678133e+00   2.6762366e+00   2.4070540e+00   2.4551607e+00   4.0383784e+00   3.3671653e+00   4.2642537e+00   3.5070157e+00   3.9193977e+00   4.8684861e+00   3.1505769e+00   4.4165051e+00   4.1898287e+00   4.6203725e+00   3.4386758e+00   3.7047820e+00   3.9273366e+00   3.6237778e+00   3.6947343e+00   3.6968840e+00   3.5190023e+00   5.0398879e+00   5.4089760e+00   3.8611646e+00   4.1322470e+00   3.2145601e+00   5.0295849e+00   3.4546083e+00   3.8248698e+00   4.1055247e+00   3.2664139e+00   3.0788010e+00   3.8567883e+00   4.0060320e+00   4.5478501e+00   4.9912214e+00   3.9339246e+00   3.2236553e+00   3.4677443e+00   4.9178815e+00   3.8042163e+00   3.4041321e+00   2.9939885e+00   3.9171296e+00   4.0866646e+00   3.9845548e+00   3.3671653e+00   4.1322520e+00   4.1520423e+00   3.9277271e+00   3.7880802e+00   3.5624203e+00   3.5967687e+00   3.0549169e+00   2.3749211e-01   9.2006504e-01   1.0428797e+00   4.2450569e-01   1.3899721e+00   1.6555341e+00   9.9973471e-01   7.5230154e-01   3.7960845e-01   9.9973471e-01   1.5086315e+00   2.6033464e-01   2.9724335e-01   2.8989712e+00   1.1937015e+00   5.7988427e-01   7.8318003e-01   1.2583645e+00   7.0463400e-01   1.0034646e+00   4.7680727e-01   5.2374483e-01   3.3114294e+00   2.8933417e+00   3.4177063e+00   3.4461307e+00   3.3255941e+00   2.8176969e+00   2.9380167e+00   2.9507452e+00   3.1524130e+00   2.7797208e+00   3.7960371e+00   2.6995806e+00   3.6095704e+00   2.9762135e+00   2.3753547e+00   3.0506196e+00   2.7121484e+00   2.6831858e+00   4.0299127e+00   2.9653560e+00   3.0144396e+00   2.8058449e+00   3.7011663e+00   2.9654419e+00   2.9344166e+00   3.0597490e+00   3.5085997e+00   3.5226725e+00   2.9395346e+00   2.6564538e+00   3.1076159e+00   3.0365838e+00   2.7379532e+00   3.4446760e+00   2.6796909e+00   2.6912430e+00   3.2129972e+00   3.7686900e+00   2.4108238e+00   3.0879511e+00   2.9762529e+00   2.8408428e+00   2.9254092e+00   3.1396427e+00   2.8384396e+00   2.3985415e+00   2.5881776e+00   2.8229620e+00   2.7289403e+00   2.6869860e+00   4.1910480e+00   3.6130663e+00   4.3602249e+00   3.6707779e+00   4.0745117e+00   4.9277939e+00   3.4934873e+00   4.4880495e+00   4.3514534e+00   4.6654573e+00   3.5594056e+00   3.8864758e+00   4.0498126e+00   3.8963526e+00   3.9502563e+00   3.8456535e+00   3.6509387e+00   5.0146974e+00   5.5101576e+00   4.0937915e+00   4.2345704e+00   3.4807992e+00   5.0960662e+00   3.6438388e+00   3.9190152e+00   4.1487738e+00   3.4580678e+00   3.2588014e+00   4.0378648e+00   4.0580582e+00   4.6285945e+00   4.9381887e+00   4.1199488e+00   3.3816601e+00   3.6553790e+00   4.9813108e+00   3.9405186e+00   3.5335600e+00   3.1869497e+00   4.0186781e+00   4.2240093e+00   4.0988575e+00   3.6130663e+00   4.2429720e+00   4.2712088e+00   4.0719125e+00   3.9975817e+00   3.7087600e+00   3.7375363e+00   3.2561951e+00   7.6824760e-01   8.5141186e-01   3.6086962e-01   1.6207126e+00   1.8802756e+00   7.9394533e-01   5.3286499e-01   4.3319335e-01   7.9394533e-01   1.3370188e+00   1.3340137e-01   3.6319073e-01   2.6778759e+00   1.0720705e+00   6.3173774e-01   1.0072799e+00   1.0495503e+00   9.3727156e-01   8.5893964e-01   7.0463400e-01   3.3395426e-01   3.3027871e+00   2.8812178e+00   3.3955887e+00   3.2888081e+00   3.2512254e+00   2.7283479e+00   2.9463809e+00   2.7764635e+00   3.0911130e+00   2.6620270e+00   3.6054231e+00   2.6430457e+00   3.4442793e+00   2.9123088e+00   2.2793286e+00   3.0205073e+00   2.6595069e+00   2.5635668e+00   3.8866925e+00   2.8178210e+00   3.0060120e+00   2.7101865e+00   3.5930006e+00   2.8829084e+00   2.8651003e+00   3.0121201e+00   3.4400598e+00   3.4869142e+00   2.8725792e+00   2.5068325e+00   2.9480926e+00   2.8720011e+00   2.6183827e+00   3.3619078e+00   2.6263990e+00   2.7176350e+00   3.1874455e+00   3.6289342e+00   2.3459758e+00   2.9476507e+00   2.8536577e+00   2.7917808e+00   2.7959976e+00   2.9585178e+00   2.7268209e+00   2.3347284e+00   2.5080346e+00   2.7508387e+00   2.5565749e+00   2.5881776e+00   4.2068537e+00   3.5342439e+00   4.3380559e+00   3.6271373e+00   4.0499864e+00   4.9127682e+00   3.3748745e+00   4.4574738e+00   4.2666131e+00   4.7143178e+00   3.5549829e+00   3.8149934e+00   4.0227420e+00   3.7946027e+00   3.8919837e+00   3.8432323e+00   3.6208873e+00   5.0849580e+00   5.4596452e+00   3.9569474e+00   4.2354064e+00   3.4126422e+00   5.0611947e+00   3.5638945e+00   3.9334644e+00   4.1519487e+00   3.3885059e+00   3.2191166e+00   3.9849073e+00   4.0334328e+00   4.5859724e+00   5.0075986e+00   4.0680600e+00   3.3134027e+00   3.5670952e+00   4.9632121e+00   3.9675062e+00   3.5176551e+00   3.1453377e+00   4.0038982e+00   4.2114761e+00   4.0820077e+00   3.5342439e+00   4.2453199e+00   4.2844704e+00   4.0426067e+00   3.8984747e+00   3.6765607e+00   3.7642725e+00   3.2184749e+00   2.6033464e-01   9.9962901e-01   2.1664244e+00   2.5030472e+00   3.6319073e-01   4.2737382e-01   1.2004100e+00   3.6319073e-01   6.1067563e-01   6.7030885e-01   8.0996690e-01   2.1006743e+00   4.0020411e-01   9.4057729e-01   1.4985933e+00   5.0731024e-01   1.4656715e+00   1.6562722e-01   1.3630799e+00   4.4417668e-01   3.6433721e+00   3.1333439e+00   3.6757970e+00   3.0281244e+00   3.3717708e+00   2.6624050e+00   3.1998148e+00   2.3430115e+00   3.2729115e+00   2.4219924e+00   3.1700354e+00   2.7177110e+00   3.2760900e+00   2.9826961e+00   2.2410752e+00   3.2981044e+00   2.6452183e+00   2.4901533e+00   3.7687554e+00   2.6225184e+00   3.1280668e+00   2.7635526e+00   3.5692464e+00   2.9177616e+00   3.0187150e+00   3.2354747e+00   3.6116754e+00   3.6909005e+00   2.9234404e+00   2.3731183e+00   2.6987010e+00   2.6178190e+00   2.5515904e+00   3.3308561e+00   2.5554841e+00   2.9570491e+00   3.4460544e+00   3.5549612e+00   2.3407691e+00   2.7326628e+00   2.6614903e+00   2.9042354e+00   2.6919656e+00   2.5430017e+00   2.6000033e+00   2.3578684e+00   2.4871797e+00   2.8599439e+00   2.2045434e+00   2.5287499e+00   4.3690091e+00   3.4628576e+00   4.5552847e+00   3.7077077e+00   4.1799642e+00   5.1678263e+00   3.0379779e+00   4.6720960e+00   4.3013447e+00   5.0550361e+00   3.7713495e+00   3.8605708e+00   4.2104897e+00   3.6525334e+00   3.8549710e+00   4.0229434e+00   3.7708197e+00   5.5011831e+00   5.6245097e+00   3.7945557e+00   4.4755001e+00   3.3306776e+00   5.2815051e+00   3.5995462e+00   4.1814664e+00   4.4417427e+00   3.4376057e+00   3.3113002e+00   4.0501276e+00   4.2847585e+00   4.7905454e+00   5.4600808e+00   4.1319681e+00   3.3795104e+00   3.5192584e+00   5.2359042e+00   4.1708373e+00   3.6809073e+00   3.2190305e+00   4.2365575e+00   4.3973146e+00   4.3149219e+00   3.4628576e+00   4.4657183e+00   4.5132257e+00   4.2167698e+00   3.8749352e+00   3.8293474e+00   3.9628758e+00   3.2623926e+00   1.0371214e+00   2.3606689e+00   2.6764689e+00   1.8699153e-01   4.0363332e-01   1.2627589e+00   1.8699153e-01   5.6164055e-01   7.8305765e-01   9.7747632e-01   1.8924893e+00   5.6164055e-01   1.0881632e+00   1.6837963e+00   2.8845946e-01   1.6545637e+00   3.5266705e-01   1.5108472e+00   5.3286499e-01   3.5596461e+00   3.0642731e+00   3.5820652e+00   2.8366432e+00   3.2382208e+00   2.5375138e+00   3.1537738e+00   2.1559444e+00   3.1474432e+00   2.2926143e+00   2.9585178e+00   2.6250629e+00   3.0590120e+00   2.8699760e+00   2.1233327e+00   3.2024265e+00   2.5670381e+00   2.3272067e+00   3.5735096e+00   2.4368431e+00   3.0826231e+00   2.6212838e+00   3.4052824e+00   2.7833861e+00   2.8923043e+00   3.1255601e+00   3.4747564e+00   3.5908785e+00   2.8135312e+00   2.1839196e+00   2.5032729e+00   2.4158569e+00   2.3928312e+00   3.2017644e+00   2.4862299e+00   2.9403226e+00   3.3545989e+00   3.3587820e+00   2.2520446e+00   2.5607059e+00   2.5059290e+00   2.8073565e+00   2.5209771e+00   2.3430115e+00   2.4562939e+00   2.2633781e+00   2.3755041e+00   2.7368591e+00   2.0165442e+00   2.3969046e+00   4.3354025e+00   3.3475977e+00   4.4615703e+00   3.6095772e+00   4.0990362e+00   5.0710534e+00   2.9144612e+00   4.5627576e+00   4.1522674e+00   5.0316497e+00   3.7102339e+00   3.7341705e+00   4.1195531e+00   3.5174471e+00   3.7659275e+00   3.9693828e+00   3.6809073e+00   5.4858042e+00   5.4945039e+00   3.6088006e+00   4.4109170e+00   3.2362256e+00   5.1634795e+00   3.4678413e+00   4.1322470e+00   4.3666536e+00   3.3199204e+00   3.2266664e+00   3.9433408e+00   4.1815045e+00   4.6694810e+00   5.4392260e+00   4.0273519e+00   3.2552513e+00   3.3773249e+00   5.1375752e+00   4.1484621e+00   3.6075786e+00   3.1365574e+00   4.1554935e+00   4.3260165e+00   4.2353581e+00   3.3475977e+00   4.4043042e+00   4.4676627e+00   4.1295047e+00   3.7244531e+00   3.7408419e+00   3.9430201e+00   3.1856251e+00   1.6790448e+00   1.8683303e+00   9.9891776e-01   7.3735391e-01   3.8639663e-01   9.9891776e-01   1.5462701e+00   4.4713936e-01   5.6262711e-01   2.7653818e+00   1.3238834e+00   5.9868400e-01   1.0167074e+00   1.1817121e+00   1.0267382e+00   1.1036371e+00   7.4965096e-01   5.9868400e-01   2.9920629e+00   2.5767485e+00   3.0904477e+00   3.1383073e+00   2.9738737e+00   2.5155127e+00   2.6450302e+00   2.7097016e+00   2.8120208e+00   2.4963677e+00   3.5442384e+00   2.3737852e+00   3.2878773e+00   2.6552959e+00   2.0482711e+00   2.7132601e+00   2.4244329e+00   2.3725931e+00   3.6825624e+00   2.6567419e+00   2.7277627e+00   2.4551607e+00   3.3586468e+00   2.6490703e+00   2.5878996e+00   2.7146857e+00   3.1604264e+00   3.1862677e+00   2.6121387e+00   2.3320406e+00   2.8060955e+00   2.7397840e+00   2.4059801e+00   3.1251810e+00   2.4123722e+00   2.4266062e+00   2.8827369e+00   3.4219145e+00   2.1146056e+00   2.7787333e+00   2.6868306e+00   2.5237903e+00   2.5969195e+00   2.8858666e+00   2.5292454e+00   2.1030528e+00   2.2789104e+00   2.4843930e+00   2.4502513e+00   2.3681813e+00   3.9129285e+00   3.2963379e+00   4.0307050e+00   3.3579713e+00   3.7573925e+00   4.6072225e+00   3.2350676e+00   4.1666050e+00   4.0096938e+00   4.3939440e+00   3.2458961e+00   3.5448948e+00   3.7163166e+00   3.5735211e+00   3.6303034e+00   3.5366397e+00   3.3347301e+00   4.7764597e+00   5.1656525e+00   3.7678733e+00   3.9190152e+00   3.1752055e+00   4.7655871e+00   3.2963860e+00   3.6257668e+00   3.8480918e+00   3.1163356e+00   2.9401017e+00   3.7060704e+00   3.7400429e+00   4.2905130e+00   4.6982735e+00   3.7862785e+00   3.0555922e+00   3.3519252e+00   4.6433941e+00   3.6672706e+00   3.2313825e+00   2.8704346e+00   3.6888814e+00   3.9001229e+00   3.7578379e+00   3.2963379e+00   3.9355102e+00   3.9693842e+00   3.7298088e+00   3.6452459e+00   3.3776637e+00   3.4666091e+00   2.9570067e+00   4.5257749e-01   2.3345854e+00   2.1006743e+00   1.4408765e+00   2.3345854e+00   2.7201861e+00   1.6242170e+00   1.4334280e+00   4.2461520e+00   2.2960397e+00   1.5510150e+00   8.3619405e-01   2.5963945e+00   7.1671402e-01   2.2031378e+00   9.3957399e-01   1.8662528e+00   3.9018608e+00   3.5587525e+00   4.0758181e+00   4.6738622e+00   4.2315488e+00   3.8362958e+00   3.5101695e+00   4.2349456e+00   4.0096351e+00   3.8957954e+00   5.1177048e+00   3.5857491e+00   4.8511271e+00   3.8770675e+00   3.4324591e+00   3.7687554e+00   3.5952204e+00   3.8095276e+00   5.1880951e+00   4.1733990e+00   3.6719420e+00   3.8275992e+00   4.7391892e+00   3.9417326e+00   3.8406076e+00   3.8597390e+00   4.3729123e+00   4.2482658e+00   3.8534412e+00   3.8740219e+00   4.3496532e+00   4.2951960e+00   3.8572117e+00   4.4028225e+00   3.5707989e+00   3.2084035e+00   3.9057558e+00   4.9165227e+00   3.3635180e+00   4.2694314e+00   4.1082916e+00   3.6886188e+00   4.0716087e+00   4.4411156e+00   3.9335446e+00   3.3496034e+00   3.5830335e+00   3.7561390e+00   4.0088699e+00   3.7413425e+00   4.6436290e+00   4.5471223e+00   4.9787007e+00   4.4481204e+00   4.7341193e+00   5.4826137e+00   4.5863292e+00   5.1433212e+00   5.2725182e+00   4.8836508e+00   4.1393671e+00   4.7672599e+00   4.7092337e+00   4.9106562e+00   4.7707455e+00   4.3996661e+00   4.3591552e+00   5.0844032e+00   6.2257171e+00   5.2378683e+00   4.7433741e+00   4.3742533e+00   5.7435198e+00   4.5678545e+00   4.3840310e+00   4.6485090e+00   4.3482964e+00   4.0364176e+00   4.8328894e+00   4.6980887e+00   5.3298852e+00   5.0020990e+00   4.9051796e+00   4.2757519e+00   4.6314634e+00   5.5369700e+00   4.3420582e+00   4.1857666e+00   3.9786340e+00   4.6138259e+00   4.8044656e+00   4.6911542e+00   4.5471223e+00   4.7508760e+00   4.7161034e+00   4.7355095e+00   4.9879154e+00   4.4154797e+00   4.1545903e+00   4.0343137e+00   2.6422396e+00   2.3867296e+00   1.6154069e+00   2.6422396e+00   3.0703842e+00   1.9080710e+00   1.7295385e+00   4.5475791e+00   2.6621202e+00   1.8051284e+00   1.1105716e+00   2.8967543e+00   1.0474897e+00   2.5495727e+00   1.1795364e+00   2.1617152e+00   3.8171826e+00   3.5339654e+00   4.0163479e+00   4.8425911e+00   4.2514283e+00   3.9557653e+00   3.4792404e+00   4.4740529e+00   4.0150475e+00   4.0717494e+00   5.3501548e+00   3.6486698e+00   4.9910943e+00   3.9350582e+00   3.5547141e+00   3.7282030e+00   3.6962934e+00   3.9420318e+00   5.2895497e+00   4.3341610e+00   3.6960949e+00   3.8986276e+00   4.8106103e+00   4.0206152e+00   3.8664495e+00   3.8454452e+00   4.3675713e+00   4.2173036e+00   3.9169317e+00   4.0237417e+00   4.5248905e+00   4.4756871e+00   3.9778974e+00   4.4827624e+00   3.6962934e+00   3.1970228e+00   3.8646917e+00   5.0103465e+00   3.4775294e+00   4.4295579e+00   4.2690345e+00   3.7344524e+00   4.1995700e+00   4.6716989e+00   4.0716455e+00   3.4573201e+00   3.6936958e+00   3.8056210e+00   4.2211876e+00   3.8604223e+00   4.5958210e+00   4.6323449e+00   4.9087471e+00   4.4703758e+00   4.7121623e+00   5.3828288e+00   4.7798674e+00   5.0815530e+00   5.2996504e+00   4.7231435e+00   4.0911974e+00   4.7955081e+00   4.6606898e+00   5.0156099e+00   4.8233009e+00   4.3540706e+00   4.3488974e+00   4.8785742e+00   6.1614901e+00   5.3577416e+00   4.6571075e+00   4.4651793e+00   5.6630236e+00   4.6077729e+00   4.3065165e+00   4.5525984e+00   4.3873290e+00   4.0638414e+00   4.8447047e+00   4.6322938e+00   5.2676175e+00   4.7796156e+00   4.9133278e+00   4.3194697e+00   4.7202167e+00   5.4208419e+00   4.2794874e+00   4.1728215e+00   4.0165591e+00   4.5422714e+00   4.7447407e+00   4.6112705e+00   4.6323449e+00   4.6754921e+00   4.6293227e+00   4.6871898e+00   5.0428586e+00   4.3953592e+00   4.1024574e+00   4.0848110e+00   3.3742167e-01   1.1857824e+00   0.0000000e+00   6.6918102e-01   7.4445830e-01   9.7322023e-01   1.9284841e+00   6.6918102e-01   1.1393372e+00   1.6942803e+00   3.7371902e-01   1.6386105e+00   4.5257749e-01   1.4715172e+00   4.9772204e-01   3.5602797e+00   3.0968979e+00   3.5933352e+00   2.8998722e+00   3.2656298e+00   2.6029746e+00   3.1974447e+00   2.2445103e+00   3.1601923e+00   2.3946216e+00   3.0209665e+00   2.6867317e+00   3.0775658e+00   2.9161244e+00   2.1946278e+00   3.2137635e+00   2.6502891e+00   2.3652711e+00   3.6096140e+00   2.4893065e+00   3.1578003e+00   2.6568473e+00   3.4426472e+00   2.8187901e+00   2.9128857e+00   3.1418202e+00   3.4847097e+00   3.6205958e+00   2.8694312e+00   2.2223283e+00   2.5588203e+00   2.4651138e+00   2.4413293e+00   3.2621861e+00   2.5834128e+00   2.9995857e+00   3.3733791e+00   3.3817876e+00   2.3263008e+00   2.6305758e+00   2.5756071e+00   2.8533918e+00   2.5683063e+00   2.4187322e+00   2.5258216e+00   2.3250308e+00   2.4413618e+00   2.7691536e+00   2.1006933e+00   2.4608850e+00   4.4119896e+00   3.4290419e+00   4.4942656e+00   3.6650933e+00   4.1590662e+00   5.0899438e+00   3.0333619e+00   4.5799469e+00   4.1882413e+00   5.0726081e+00   3.7613721e+00   3.7859992e+00   4.1623715e+00   3.6029758e+00   3.8608065e+00   4.0352348e+00   3.7266849e+00   5.5043247e+00   5.5172732e+00   3.6569442e+00   4.4568115e+00   3.3324016e+00   5.1765224e+00   3.5192065e+00   4.1799642e+00   4.3857400e+00   3.3769078e+00   3.2906843e+00   4.0034548e+00   4.1917183e+00   4.6854597e+00   5.4444866e+00   4.0904298e+00   3.2962678e+00   3.4250783e+00   5.1569386e+00   4.2213314e+00   3.6582637e+00   3.2059261e+00   4.1937040e+00   4.3826532e+00   4.2786320e+00   3.4290419e+00   4.4549850e+00   4.5270304e+00   4.1816268e+00   3.7777251e+00   3.7924144e+00   4.0173731e+00   3.2613828e+00   9.2006504e-01   3.3742167e-01   8.6080744e-01   5.0621589e-01   7.0646671e-01   2.1664244e+00   7.2679299e-01   8.9712099e-01   1.4681660e+00   5.4873947e-01   1.4074199e+00   4.9617437e-01   1.2206236e+00   2.5698045e-01   3.4986942e+00   3.0427234e+00   3.5520531e+00   3.0444864e+00   3.2783159e+00   2.6723101e+00   3.1333743e+00   2.4360210e+00   3.1627463e+00   2.4904253e+00   3.2338077e+00   2.6808015e+00   3.2240677e+00   2.9412073e+00   2.2206950e+00   3.1669559e+00   2.6716144e+00   2.4623998e+00   3.7173707e+00   2.6198784e+00   3.1208539e+00   2.6854361e+00   3.5171200e+00   2.8753360e+00   2.9157449e+00   3.1157529e+00   3.4945103e+00   3.5956983e+00   2.8873581e+00   2.3297122e+00   2.7075732e+00   2.6220688e+00   2.5143128e+00   3.3225169e+00   2.6164571e+00   2.9213819e+00   3.3323415e+00   3.4842326e+00   2.3487772e+00   2.7507828e+00   2.6991998e+00   2.8571158e+00   2.6614903e+00   2.6122501e+00   2.6121387e+00   2.3527202e+00   2.4822998e+00   2.7826627e+00   2.2477567e+00   2.5188545e+00   4.3590737e+00   3.4800724e+00   4.4652192e+00   3.6820633e+00   4.1423404e+00   5.0632361e+00   3.1594575e+00   4.5764429e+00   4.2442248e+00   4.9703970e+00   3.7054123e+00   3.8144340e+00   4.1322520e+00   3.6772717e+00   3.8704422e+00   3.9786899e+00   3.7187193e+00   5.3973273e+00   5.5276243e+00   3.7838435e+00   4.3978718e+00   3.3669786e+00   5.1732410e+00   3.5478651e+00   4.1195682e+00   4.3422158e+00   3.3925731e+00   3.2818178e+00   4.0157845e+00   4.1753467e+00   4.6824968e+00   5.3318394e+00   4.0983108e+00   3.3321312e+00   3.5171976e+00   5.1116177e+00   4.1480571e+00   3.6395566e+00   3.1983718e+00   4.1451783e+00   4.3355345e+00   4.2161052e+00   3.4800724e+00   4.4037157e+00   4.4559384e+00   4.1399085e+00   3.8303307e+00   3.7678377e+00   3.9434740e+00   3.2672961e+00   1.1857824e+00   1.7590894e+00   5.4715569e-01   6.1787077e-01   3.0224093e+00   1.4977817e+00   8.1744862e-01   9.4676850e-01   1.4369223e+00   8.6079202e-01   1.2896554e+00   5.3286499e-01   7.6195008e-01   3.1536923e+00   2.8019556e+00   3.2823965e+00   3.4754319e+00   3.2348803e+00   2.8339836e+00   2.8678686e+00   3.0569237e+00   3.0422163e+00   2.8599505e+00   3.8711727e+00   2.6769819e+00   3.5769114e+00   2.9369339e+00   2.3887701e+00   2.9172679e+00   2.7450499e+00   2.6744413e+00   3.9868196e+00   2.9825819e+00   3.0070640e+00   2.7478281e+00   3.6492544e+00   2.9260177e+00   2.8398296e+00   2.9417237e+00   3.3879693e+00   3.4191352e+00   2.9103957e+00   2.6495864e+00   3.1359829e+00   3.0635119e+00   2.7246726e+00   3.4310966e+00   2.7450499e+00   2.6593850e+00   3.0929063e+00   3.7090709e+00   2.4372581e+00   3.1206171e+00   3.0186562e+00   2.7973689e+00   2.9151879e+00   3.2261028e+00   2.8631702e+00   2.4096686e+00   2.5984928e+00   2.7564382e+00   2.8056103e+00   2.6945402e+00   4.1622883e+00   3.6243461e+00   4.2495237e+00   3.6308369e+00   4.0200378e+00   4.7940036e+00   3.6050689e+00   4.3664479e+00   4.2800934e+00   4.5553898e+00   3.4840330e+00   3.8323637e+00   3.9571437e+00   3.9163572e+00   3.9605759e+00   3.7909588e+00   3.5846709e+00   4.8756388e+00   5.3862972e+00   4.0807979e+00   4.1385728e+00   3.5138167e+00   4.9592897e+00   3.5910983e+00   3.8379532e+00   4.0230039e+00   3.4134019e+00   3.2269518e+00   3.9906410e+00   3.9261146e+00   4.4982182e+00   4.7746016e+00   4.0736767e+00   3.3286256e+00   3.6393910e+00   4.8333249e+00   3.9033387e+00   3.4776516e+00   3.1661860e+00   3.9124707e+00   4.1473618e+00   3.9899548e+00   3.6243461e+00   4.1607944e+00   4.1969547e+00   3.9859042e+00   3.9511939e+00   3.6382505e+00   3.7066628e+00   3.2552942e+00   6.6918102e-01   7.4445830e-01   9.7322023e-01   1.9284841e+00   6.6918102e-01   1.1393372e+00   1.6942803e+00   3.7371902e-01   1.6386105e+00   4.5257749e-01   1.4715172e+00   4.9772204e-01   3.5602797e+00   3.0968979e+00   3.5933352e+00   2.8998722e+00   3.2656298e+00   2.6029746e+00   3.1974447e+00   2.2445103e+00   3.1601923e+00   2.3946216e+00   3.0209665e+00   2.6867317e+00   3.0775658e+00   2.9161244e+00   2.1946278e+00   3.2137635e+00   2.6502891e+00   2.3652711e+00   3.6096140e+00   2.4893065e+00   3.1578003e+00   2.6568473e+00   3.4426472e+00   2.8187901e+00   2.9128857e+00   3.1418202e+00   3.4847097e+00   3.6205958e+00   2.8694312e+00   2.2223283e+00   2.5588203e+00   2.4651138e+00   2.4413293e+00   3.2621861e+00   2.5834128e+00   2.9995857e+00   3.3733791e+00   3.3817876e+00   2.3263008e+00   2.6305758e+00   2.5756071e+00   2.8533918e+00   2.5683063e+00   2.4187322e+00   2.5258216e+00   2.3250308e+00   2.4413618e+00   2.7691536e+00   2.1006933e+00   2.4608850e+00   4.4119896e+00   3.4290419e+00   4.4942656e+00   3.6650933e+00   4.1590662e+00   5.0899438e+00   3.0333619e+00   4.5799469e+00   4.1882413e+00   5.0726081e+00   3.7613721e+00   3.7859992e+00   4.1623715e+00   3.6029758e+00   3.8608065e+00   4.0352348e+00   3.7266849e+00   5.5043247e+00   5.5172732e+00   3.6569442e+00   4.4568115e+00   3.3324016e+00   5.1765224e+00   3.5192065e+00   4.1799642e+00   4.3857400e+00   3.3769078e+00   3.2906843e+00   4.0034548e+00   4.1917183e+00   4.6854597e+00   5.4444866e+00   4.0904298e+00   3.2962678e+00   3.4250783e+00   5.1569386e+00   4.2213314e+00   3.6582637e+00   3.2059261e+00   4.1937040e+00   4.3826532e+00   4.2786320e+00   3.4290419e+00   4.4549850e+00   4.5270304e+00   4.1816268e+00   3.7777251e+00   3.7924144e+00   4.0173731e+00   3.2613828e+00   1.2563834e+00   1.3681903e+00   1.6242170e+00   4.6126066e-01   1.4691503e+00   2.0743925e+00   5.0370871e-01   2.0365895e+00   5.2374483e-01   1.9494772e+00   1.0034646e+00   4.0320101e+00   3.4981749e+00   4.0289839e+00   2.9648238e+00   3.6116412e+00   2.8362334e+00   3.5807825e+00   2.1594400e+00   3.5619276e+00   2.4608850e+00   2.9150653e+00   2.9806848e+00   3.2524084e+00   3.2332049e+00   2.4351674e+00   3.6506644e+00   2.8794131e+00   2.6371069e+00   3.7842148e+00   2.6442387e+00   3.4386758e+00   2.9743384e+00   3.6958304e+00   3.1396988e+00   3.2947222e+00   3.5521670e+00   3.8756118e+00   3.9969338e+00   3.1587328e+00   2.4432066e+00   2.6604221e+00   2.5745994e+00   2.6880360e+00   3.4951118e+00   2.7657429e+00   3.3524671e+00   3.7924883e+00   3.6104716e+00   2.5876530e+00   2.7410968e+00   2.7238850e+00   3.1914275e+00   2.7871336e+00   2.3484202e+00   2.7125180e+00   2.6235600e+00   2.7012637e+00   3.1219910e+00   2.0888843e+00   2.6969064e+00   4.6820911e+00   3.5968849e+00   4.8607426e+00   3.9563487e+00   4.4501699e+00   5.4913616e+00   2.9743611e+00   4.9743359e+00   4.4659463e+00   5.4618868e+00   4.1043393e+00   4.0513907e+00   4.5016468e+00   3.7087600e+00   4.0024694e+00   4.3310092e+00   4.0611789e+00   5.9599083e+00   5.8632763e+00   3.7995759e+00   4.8081465e+00   3.4697005e+00   5.5699347e+00   3.7817852e+00   4.5398887e+00   4.8101536e+00   3.6425657e+00   3.5739550e+00   4.2642554e+00   4.6155808e+00   5.0696209e+00   5.9318766e+00   4.3420618e+00   3.6079861e+00   3.6711700e+00   5.5546786e+00   4.5127918e+00   3.9935485e+00   3.4733020e+00   4.5569739e+00   4.6922818e+00   4.6236700e+00   3.5968849e+00   4.7939394e+00   4.8471780e+00   4.4913725e+00   3.9942507e+00   4.1085491e+00   4.3067090e+00   3.5093006e+00   3.1271814e-01   2.6440626e+00   9.6964683e-01   5.8796666e-01   9.8545402e-01   1.0013399e+00   9.2426065e-01   7.6195008e-01   7.3283576e-01   2.6643250e-01   3.3524935e+00   2.9103383e+00   3.4378505e+00   3.2794093e+00   3.2805279e+00   2.7218307e+00   2.9677934e+00   2.7417113e+00   3.1265865e+00   2.6350666e+00   3.5810092e+00   2.6509959e+00   3.4564670e+00   2.9240118e+00   2.2778214e+00   3.0636652e+00   2.6473174e+00   2.5673371e+00   3.9008585e+00   2.8131786e+00   3.0066012e+00   2.7310040e+00   3.6088006e+00   2.8947302e+00   2.8966017e+00   3.0506196e+00   3.4785143e+00   3.5188926e+00   2.8816131e+00   2.5125550e+00   2.9397900e+00   2.8645995e+00   2.6245231e+00   3.3639057e+00   2.6028733e+00   2.7271822e+00   3.2253861e+00   3.6489825e+00   2.3376510e+00   2.9371604e+00   2.8382974e+00   2.8051321e+00   2.8006021e+00   2.9309089e+00   2.7184808e+00   2.3312464e+00   2.5047936e+00   2.7731354e+00   2.5341497e+00   2.5862794e+00   4.2119757e+00   3.5278864e+00   4.3705395e+00   3.6366115e+00   4.0640736e+00   4.9516709e+00   3.3348040e+00   4.4927271e+00   4.2867968e+00   4.7459453e+00   3.5773144e+00   3.8303307e+00   4.0501276e+00   3.7856794e+00   3.8862115e+00   3.8584574e+00   3.6392860e+00   5.1247727e+00   5.4955777e+00   3.9594564e+00   4.2633399e+00   3.3993739e+00   5.1011948e+00   3.5798230e+00   3.9561606e+00   4.1885925e+00   3.4019138e+00   3.2277183e+00   3.9971830e+00   4.0727212e+00   4.6247900e+00   5.0557045e+00   4.0800856e+00   3.3285999e+00   3.5685643e+00   5.0078455e+00   3.9761694e+00   3.5324648e+00   3.1505332e+00   4.0358238e+00   4.2334406e+00   4.1156691e+00   3.5278864e+00   4.2682695e+00   4.3053166e+00   4.0686429e+00   3.9122205e+00   3.6972894e+00   3.7712394e+00   3.2160302e+00   2.8326674e+00   1.0103954e+00   4.2829723e-01   7.9126749e-01   1.1795364e+00   7.3442235e-01   8.5582452e-01   6.1158310e-01   4.8284931e-01   3.4789406e+00   3.0164286e+00   3.5708825e+00   3.4760111e+00   3.4435701e+00   2.8856827e+00   3.0483404e+00   2.9286177e+00   3.2959553e+00   2.7769569e+00   3.7899650e+00   2.7721706e+00   3.6919547e+00   3.0773843e+00   2.4199353e+00   3.1984671e+00   2.7597977e+00   2.7743820e+00   4.1049898e+00   3.0189963e+00   3.0754044e+00   2.9033794e+00   3.7972498e+00   3.0781443e+00   3.0628742e+00   3.1980682e+00   3.6529321e+00   3.6479042e+00   3.0224061e+00   2.7237896e+00   3.1475538e+00   3.0809334e+00   2.8106441e+00   3.5217353e+00   2.7064382e+00   2.7753422e+00   3.3543209e+00   3.8636687e+00   2.4678110e+00   3.1212622e+00   3.0250044e+00   2.9444840e+00   2.9956969e+00   3.1281937e+00   2.8892226e+00   2.4772050e+00   2.6547819e+00   2.9364676e+00   2.7130802e+00   2.7488632e+00   4.2524457e+00   3.6566905e+00   4.4856620e+00   3.7659004e+00   4.1610154e+00   5.0768465e+00   3.4623926e+00   4.6393183e+00   4.4611186e+00   4.7773180e+00   3.6552032e+00   3.9746118e+00   4.1574253e+00   3.9234139e+00   3.9686223e+00   3.9172103e+00   3.7603950e+00   5.1648090e+00   5.6463490e+00   4.1614237e+00   4.3393711e+00   3.5009132e+00   5.2503936e+00   3.7276020e+00   4.0260707e+00   4.3007127e+00   3.5361708e+00   3.3347521e+00   4.1191095e+00   4.2183675e+00   4.7752353e+00   5.1049558e+00   4.1955154e+00   3.4902431e+00   3.7536489e+00   5.1215318e+00   4.0036288e+00   3.6385337e+00   3.2536517e+00   4.1326097e+00   4.3100988e+00   4.1978681e+00   3.6566905e+00   4.3438151e+00   4.3538983e+00   4.1591001e+00   4.0714399e+00   3.8024850e+00   3.7974783e+00   3.3185266e+00   2.0833080e+00   2.8648636e+00   3.5532593e+00   1.6555341e+00   3.5410343e+00   2.0840787e+00   3.3747131e+00   2.3883072e+00   4.3833871e+00   3.9159762e+00   4.2941647e+00   2.3488350e+00   3.6240540e+00   2.9044888e+00   4.0815608e+00   1.5532921e+00   3.6825697e+00   2.4113529e+00   1.7998094e+00   3.2616916e+00   2.5529439e+00   3.3821744e+00   2.6637769e+00   3.9531209e+00   3.1831859e+00   2.5836708e+00   3.1669559e+00   2.2907828e+00   3.8684560e+00   3.0202406e+00   3.4019580e+00   3.1886068e+00   3.4332960e+00   3.7685816e+00   3.8803374e+00   4.1746389e+00   3.3102735e+00   2.2304221e+00   2.1489616e+00   2.0501444e+00   2.6225712e+00   3.4164974e+00   3.0901976e+00   3.9885258e+00   4.0802502e+00   3.0869095e+00   2.9336463e+00   2.3936974e+00   2.5327316e+00   3.4518638e+00   2.5837552e+00   1.5782205e+00   2.6521862e+00   2.9662386e+00   2.9040188e+00   3.2768109e+00   1.6628177e+00   2.7685987e+00   5.0448046e+00   3.5141919e+00   4.9824611e+00   4.0549341e+00   4.5981277e+00   5.5863508e+00   2.6647036e+00   5.0241554e+00   4.1998979e+00   5.9440534e+00   4.4432394e+00   3.9560997e+00   4.6422438e+00   3.4164839e+00   4.0005863e+00   4.6454886e+00   4.2390210e+00   6.5166388e+00   5.6880370e+00   3.1944389e+00   5.0789131e+00   3.4956324e+00   5.5310286e+00   3.6881384e+00   4.9152167e+00   5.0890922e+00   3.6527501e+00   3.7902440e+00   4.2540354e+00   4.7585941e+00   5.0389487e+00   6.4918086e+00   4.3280601e+00   3.6284588e+00   3.4969965e+00   5.6399353e+00   4.9671290e+00   4.2659568e+00   3.6994310e+00   4.7714894e+00   4.8963175e+00   4.8281202e+00   3.5141919e+00   5.0683437e+00   5.1871515e+00   4.6280145e+00   3.7055141e+00   4.2764028e+00   4.7873094e+00   3.7371542e+00   1.1433971e+00   1.6774310e+00   6.8299624e-01   1.6304499e+00   2.4808718e-01   1.5886765e+00   7.6250797e-01   4.0055392e+00   3.4676312e+00   4.0289839e+00   3.2391776e+00   3.6989507e+00   2.9466089e+00   3.5208636e+00   2.4804256e+00   3.6211670e+00   2.6281173e+00   3.2921089e+00   3.0161637e+00   3.5345457e+00   3.2983536e+00   2.5210242e+00   3.6506644e+00   2.9161244e+00   2.7938108e+00   4.0292846e+00   2.8755116e+00   3.4075988e+00   3.0797683e+00   3.8646774e+00   3.2397520e+00   3.3586779e+00   3.5819899e+00   3.9571013e+00   4.0234614e+00   3.2253861e+00   2.6519927e+00   2.9269738e+00   2.8491914e+00   2.8419330e+00   3.6148101e+00   2.8039428e+00   3.2558795e+00   3.7924883e+00   3.8389577e+00   2.6284424e+00   2.9648238e+00   2.9126202e+00   3.2245885e+00   2.9718548e+00   2.6864788e+00   2.8651003e+00   2.6637996e+00   2.7789114e+00   3.1894122e+00   2.3748697e+00   2.8127546e+00   4.6364269e+00   3.7133040e+00   4.8825792e+00   4.0097653e+00   4.4740109e+00   5.5106999e+00   3.1817279e+00   5.0169254e+00   4.6066522e+00   5.3636182e+00   4.0783379e+00   4.1550948e+00   4.5252166e+00   3.8770438e+00   4.0814269e+00   4.3063766e+00   4.0872895e+00   5.8336247e+00   5.9533030e+00   4.0437148e+00   4.7859703e+00   3.5604924e+00   5.6269403e+00   3.8926783e+00   4.4927794e+00   4.7879866e+00   3.7291513e+00   3.6035976e+00   4.3384511e+00   4.6385717e+00   5.1321867e+00   5.8049832e+00   4.4149501e+00   3.6953819e+00   3.8133051e+00   5.5737973e+00   4.4415093e+00   3.9935485e+00   3.5037963e+00   4.5569739e+00   4.6922818e+00   4.6236700e+00   3.7133040e+00   4.7716971e+00   4.8030835e+00   4.5149959e+00   4.1509766e+00   4.1343605e+00   4.2319568e+00   3.5394847e+00   7.6869104e-01   1.2471855e+00   8.7636491e-01   9.9981032e-01   7.8863556e-01   7.0733904e-01   3.2400577e+00   2.7256768e+00   3.3173156e+00   3.2734582e+00   3.1889456e+00   2.6198618e+00   2.7351941e+00   2.7665224e+00   3.0627699e+00   2.5021229e+00   3.6608924e+00   2.4643905e+00   3.5457670e+00   2.8045035e+00   2.1368327e+00   2.9466857e+00   2.4386380e+00   2.5729082e+00   3.8963334e+00   2.8235662e+00   2.7242811e+00   2.6575342e+00   3.5598437e+00   2.8418228e+00   2.8206835e+00   2.9462527e+00   3.4233881e+00   3.3667899e+00   2.7322904e+00   2.5411273e+00   2.9638750e+00   2.9140871e+00   2.5797070e+00   3.2425969e+00   2.3780833e+00   2.4351544e+00   3.0892387e+00   3.6720185e+00   2.1688001e+00   2.8939857e+00   2.7945402e+00   2.6616170e+00   2.7767058e+00   2.9769851e+00   2.6347557e+00   2.1986118e+00   2.3753308e+00   2.6828901e+00   2.5283291e+00   2.4839186e+00   3.8851614e+00   3.3427747e+00   4.1909067e+00   3.4628626e+00   3.8305140e+00   4.8043725e+00   3.1800308e+00   4.3815267e+00   4.2038608e+00   4.4513681e+00   3.3256952e+00   3.6826282e+00   3.8475722e+00   3.6210755e+00   3.6107649e+00   3.5632387e+00   3.4596612e+00   4.8847297e+00   5.3781989e+00   3.9435460e+00   4.0131264e+00   3.1614358e+00   4.9957986e+00   3.4408340e+00   3.7000441e+00   4.0284550e+00   3.2354442e+00   3.0107962e+00   3.8036057e+00   3.9713385e+00   4.5180638e+00   4.8486868e+00   3.8729425e+00   3.2243808e+00   3.5087561e+00   4.8402519e+00   3.6359784e+00   3.3268022e+00   2.9240118e+00   3.8232660e+00   3.9709252e+00   3.8746323e+00   3.3427747e+00   4.0131316e+00   4.0031779e+00   3.8300809e+00   3.7945557e+00   3.4841580e+00   3.4283673e+00   2.9863682e+00   1.9060194e+00   3.1239235e-01   1.5583422e+00   4.8124784e-01   1.2220171e+00   3.3785962e+00   2.9374280e+00   3.5071305e+00   3.8740792e+00   3.5491790e+00   3.0669573e+00   2.9018296e+00   3.4251046e+00   3.3648459e+00   3.0744865e+00   4.3230411e+00   2.8485664e+00   4.1027662e+00   3.1626116e+00   2.6442554e+00   3.1724373e+00   2.8315630e+00   3.0534300e+00   4.4306138e+00   3.3882074e+00   2.9857887e+00   3.0959220e+00   4.0072094e+00   3.2240677e+00   3.1644964e+00   3.2264712e+00   3.7352584e+00   3.6230125e+00   3.1206853e+00   3.1023948e+00   3.5580276e+00   3.5096299e+00   3.0877778e+00   3.6577352e+00   2.7900553e+00   2.5838366e+00   3.3069107e+00   4.1792641e+00   2.5911812e+00   3.4684046e+00   3.3165070e+00   2.9868392e+00   3.2999163e+00   3.6373219e+00   3.1449351e+00   2.5937039e+00   2.8148578e+00   3.0518873e+00   3.1967428e+00   2.9647081e+00   4.0498613e+00   3.7819451e+00   4.3976398e+00   3.7644678e+00   4.0879497e+00   4.9574979e+00   3.7577430e+00   4.5772252e+00   4.5796941e+00   4.4589650e+00   3.5295907e+00   4.0592075e+00   4.0919364e+00   4.1226882e+00   4.0237849e+00   3.7803562e+00   3.7136035e+00   4.7772875e+00   5.6344258e+00   4.4679358e+00   4.1805116e+00   3.6013971e+00   5.1936459e+00   3.8460802e+00   3.8293150e+00   4.1365715e+00   3.6263469e+00   3.3344860e+00   4.1404384e+00   4.1465376e+00   4.7500857e+00   4.7258629e+00   4.2123837e+00   3.5757376e+00   3.9028467e+00   5.0127222e+00   3.7704782e+00   3.5495399e+00   3.2637691e+00   4.0284560e+00   4.1958516e+00   4.1011034e+00   3.7819451e+00   4.1793948e+00   4.1561376e+00   4.1029254e+00   4.2472742e+00   3.7624633e+00   3.5705606e+00   3.3154318e+00   1.8882412e+00   5.3690447e-01   1.7295385e+00   7.4445830e-01   3.5842571e+00   3.0831073e+00   3.5905412e+00   2.6850208e+00   3.1920496e+00   2.4895611e+00   3.1874455e+00   1.9825106e+00   3.1280291e+00   2.1902526e+00   2.7631963e+00   2.5991209e+00   2.9183874e+00   2.8448970e+00   2.0635463e+00   3.2072460e+00   2.5480786e+00   2.2627580e+00   3.4383056e+00   2.3172372e+00   3.0909340e+00   2.5623863e+00   3.3194064e+00   2.7506751e+00   2.8644451e+00   3.1134606e+00   3.4405052e+00   3.5767292e+00   2.7771563e+00   2.0712834e+00   2.3618912e+00   2.2737657e+00   2.3098587e+00   3.1429166e+00   2.4666493e+00   2.9899331e+00   3.3598261e+00   3.2396917e+00   2.2342806e+00   2.4357274e+00   2.4181291e+00   2.7984743e+00   2.4231383e+00   2.1599940e+00   2.3764241e+00   2.2561857e+00   2.3387588e+00   2.7074009e+00   1.8390751e+00   2.3351007e+00   4.3436399e+00   3.2756710e+00   4.4477490e+00   3.5866422e+00   4.0782068e+00   5.0677790e+00   2.7922251e+00   4.5545199e+00   4.0836814e+00   5.0715858e+00   3.7130862e+00   3.6733277e+00   4.0983149e+00   3.4111720e+00   3.6933050e+00   3.9623038e+00   3.6711803e+00   5.5579136e+00   5.4498365e+00   3.4842014e+00   4.4103781e+00   3.1690867e+00   5.1441957e+00   3.3997317e+00   4.1528029e+00   4.3901202e+00   3.2630747e+00   3.2035560e+00   3.8955732e+00   4.1857725e+00   4.6435471e+00   5.5146776e+00   3.9762768e+00   3.2193184e+00   3.3255820e+00   5.1213824e+00   4.1678001e+00   3.6124079e+00   3.1107135e+00   4.1457358e+00   4.3076787e+00   4.2130787e+00   3.2756710e+00   4.4066811e+00   4.4713485e+00   4.0952473e+00   3.6289876e+00   3.7168749e+00   3.9644506e+00   3.1662754e+00   1.5140329e+00   3.3872939e-01   1.1649855e+00   3.5691650e+00   3.1595321e+00   3.7055656e+00   4.0160835e+00   3.7376603e+00   3.2592987e+00   3.1435650e+00   3.5370651e+00   3.5437638e+00   3.2591886e+00   4.4166410e+00   3.0676819e+00   4.2127293e+00   3.3654329e+00   2.8346837e+00   3.3660906e+00   3.0613575e+00   3.2026716e+00   4.5808841e+00   3.5275705e+00   3.2454510e+00   3.2718756e+00   4.1819826e+00   3.4031279e+00   3.3454884e+00   3.4170637e+00   3.9109404e+00   3.8358967e+00   3.3305911e+00   3.2315455e+00   3.6883726e+00   3.6296117e+00   3.2506700e+00   3.8623185e+00   3.0230066e+00   2.8458833e+00   3.5111771e+00   4.3201594e+00   2.8024863e+00   3.6263297e+00   3.4825375e+00   3.1978058e+00   3.4556048e+00   3.7429398e+00   3.3240937e+00   2.7959976e+00   3.0137031e+00   3.2391776e+00   3.3180586e+00   3.1510639e+00   4.3279818e+00   4.0059487e+00   4.6233424e+00   3.9929211e+00   4.3355275e+00   5.1718231e+00   3.9512216e+00   4.7810147e+00   4.7732950e+00   4.7150495e+00   3.7777251e+00   4.2731987e+00   4.3246862e+00   4.3347988e+00   4.2753666e+00   4.0433740e+00   3.9425600e+00   5.0081332e+00   5.8406251e+00   4.6274156e+00   4.4284929e+00   3.8398842e+00   5.3940263e+00   4.0533472e+00   4.0818092e+00   4.3543675e+00   3.8429689e+00   3.5715666e+00   4.3728103e+00   4.3436347e+00   4.9498040e+00   4.9393850e+00   4.4487185e+00   3.7756717e+00   4.0901950e+00   5.2287042e+00   4.0497882e+00   3.7884247e+00   3.5028854e+00   4.2624115e+00   4.4485334e+00   4.3403092e+00   4.0059487e+00   4.4317896e+00   4.4210531e+00   4.3442496e+00   4.4457375e+00   3.9985746e+00   3.8504489e+00   3.5592028e+00   1.4309353e+00   5.3528567e-01   3.7908776e+00   3.2731841e+00   3.8215973e+00   3.1206853e+00   3.5080970e+00   2.7892862e+00   3.3363505e+00   2.4070515e+00   3.4174587e+00   2.5169099e+00   3.2261716e+00   2.8456035e+00   3.3834513e+00   3.1193847e+00   2.3599428e+00   3.4420978e+00   2.7676217e+00   2.6211360e+00   3.8782821e+00   2.7318604e+00   3.2516765e+00   2.8950591e+00   3.6956240e+00   3.0573546e+00   3.1595622e+00   3.3778208e+00   3.7543715e+00   3.8301935e+00   3.0538048e+00   2.4889600e+00   2.7975756e+00   2.7172725e+00   2.6747723e+00   3.4570094e+00   2.6710885e+00   3.0859941e+00   3.5894820e+00   3.6730945e+00   2.4678645e+00   2.8348872e+00   2.7756196e+00   3.0423378e+00   2.8112845e+00   2.6077230e+00   2.7173555e+00   2.4925318e+00   2.6151930e+00   2.9985224e+00   2.2768387e+00   2.6523384e+00   4.4886181e+00   3.5762214e+00   4.6936725e+00   3.8412437e+00   4.3086181e+00   5.3124523e+00   3.1125049e+00   4.8185220e+00   4.4330566e+00   5.1853832e+00   3.9019516e+00   3.9878172e+00   4.3438773e+00   3.7545919e+00   3.9571174e+00   4.1452084e+00   3.9080206e+00   5.6409887e+00   5.7635531e+00   3.9041153e+00   4.6071696e+00   3.4361835e+00   5.4269728e+00   3.7248960e+00   4.3153491e+00   4.5881410e+00   3.5627565e+00   3.4386758e+00   4.1762134e+00   4.4334431e+00   4.9338087e+00   5.6026788e+00   4.2556298e+00   3.5163766e+00   3.6516985e+00   5.3754384e+00   4.2899814e+00   3.8175194e+00   3.3436392e+00   4.3710164e+00   4.5233951e+00   4.4426760e+00   3.5762214e+00   4.5972906e+00   4.6375405e+00   4.3421746e+00   3.9932553e+00   3.9596590e+00   4.0813696e+00   3.3867904e+00   9.9272943e-01   3.3624661e+00   2.9811147e+00   3.5018936e+00   3.8169093e+00   3.5209477e+00   3.0838698e+00   2.9939885e+00   3.3707744e+00   3.3216375e+00   3.1074562e+00   4.2293025e+00   2.8939100e+00   3.9735251e+00   3.1779321e+00   2.6513305e+00   3.1539115e+00   2.9206559e+00   2.9923096e+00   4.3522137e+00   3.3213891e+00   3.1222272e+00   3.0540027e+00   3.9664528e+00   3.2012517e+00   3.1248526e+00   3.2007609e+00   3.6824267e+00   3.6418210e+00   3.1482435e+00   3.0088381e+00   3.4838907e+00   3.4206811e+00   3.0415159e+00   3.6826470e+00   2.9006138e+00   2.7293873e+00   3.3112277e+00   4.0819926e+00   2.6432089e+00   3.4355346e+00   3.3034492e+00   3.0164601e+00   3.2442582e+00   3.5631690e+00   3.1415769e+00   2.6264645e+00   2.8384396e+00   3.0300747e+00   3.1354811e+00   2.9649167e+00   4.2304738e+00   3.8482047e+00   4.4439318e+00   3.8285351e+00   4.1849277e+00   4.9874125e+00   3.8271288e+00   4.5862820e+00   4.5664639e+00   4.6040986e+00   3.6270951e+00   4.0846199e+00   4.1503558e+00   4.1702140e+00   4.1407964e+00   3.9121183e+00   3.7737683e+00   4.8997002e+00   5.6369181e+00   4.4180349e+00   4.2780098e+00   3.7035075e+00   5.1920494e+00   3.8582580e+00   3.9463952e+00   4.1826549e+00   3.6583100e+00   3.4129797e+00   4.2036908e+00   4.1443501e+00   4.7432980e+00   4.8153136e+00   4.2825977e+00   3.5821023e+00   3.9040345e+00   5.0374022e+00   3.9556226e+00   3.6351652e+00   3.3487998e+00   4.0905154e+00   4.2992014e+00   4.1693135e+00   3.8482047e+00   4.2897273e+00   4.2963449e+00   4.1754173e+00   4.2443816e+00   3.8297356e+00   3.7573406e+00   3.4190329e+00   3.4434283e+00   2.9833205e+00   3.5091456e+00   3.1516030e+00   3.2866494e+00   2.6849207e+00   3.0541761e+00   2.5654361e+00   3.1545670e+00   2.5403244e+00   3.3918434e+00   2.6608343e+00   3.3413616e+00   2.9302185e+00   2.2379234e+00   3.1290373e+00   2.6442995e+00   2.5077372e+00   3.8111267e+00   2.7069456e+00   3.0566678e+00   2.7098649e+00   3.5644689e+00   2.8826061e+00   2.9134932e+00   3.0944404e+00   3.4986589e+00   3.5664548e+00   2.8806273e+00   2.4158569e+00   2.8131786e+00   2.7333339e+00   2.5637470e+00   3.3370594e+00   2.5885603e+00   2.8220110e+00   3.2904739e+00   3.5710204e+00   2.3287218e+00   2.8315630e+00   2.7529707e+00   2.8293212e+00   2.7254528e+00   2.7527251e+00   2.6524994e+00   2.3299431e+00   2.4822440e+00   2.7803033e+00   2.3731496e+00   2.5423572e+00   4.2830420e+00   3.4939592e+00   4.4286678e+00   3.6575173e+00   4.1044791e+00   5.0220848e+00   3.2200733e+00   4.5468375e+00   4.2700249e+00   4.8698851e+00   3.6462342e+00   3.8237490e+00   4.0990020e+00   3.7208580e+00   3.8692104e+00   3.9203597e+00   3.6817365e+00   5.2775206e+00   5.5250867e+00   3.8676958e+00   4.3392543e+00   3.3693781e+00   5.1524083e+00   3.5650934e+00   4.0437994e+00   4.2783342e+00   3.3968462e+00   3.2517273e+00   4.0065881e+00   4.1377875e+00   4.6677357e+00   5.2142247e+00   4.0893000e+00   3.3307003e+00   3.5369003e+00   5.0771896e+00   4.0613196e+00   3.5869628e+00   3.1695162e+00   4.1006440e+00   4.2899814e+00   4.1769447e+00   3.4939592e+00   4.3422191e+00   4.3859843e+00   4.1114107e+00   3.8721945e+00   3.7365034e+00   3.8554667e+00   3.2330990e+00   7.4500632e-01   3.1271814e-01   2.7864553e+00   1.1117653e+00   1.8291315e+00   9.1459005e-01   3.2771828e+00   8.5582452e-01   2.5020950e+00   3.7722922e+00   1.4404415e+00   2.6850561e+00   1.2884095e+00   1.9346765e+00   4.6190224e-01   1.7610224e+00   1.9545276e+00   2.5064746e+00   2.4134734e+00   1.4291842e+00   1.4855627e+00   1.8305602e+00   1.4494865e+00   1.0355160e+00   6.8921053e-01   9.5529726e-01   7.2626021e-01   1.4025367e+00   2.2620298e+00   2.6646285e+00   2.6984190e+00   1.9245986e+00   1.7053476e+00   1.9940477e+00   1.3238834e+00   4.4901474e-01   2.2514668e+00   1.7899689e+00   2.4621620e+00   2.3008240e+00   1.1820572e+00   2.0593667e+00   3.3235867e+00   2.0701817e+00   1.6811909e+00   1.7438018e+00   1.2168151e+00   2.9923086e+00   1.8570167e+00   1.8407084e+00   1.9774894e+00   1.2374249e+00   1.3146181e+00   1.4369760e+00   1.6548985e+00   3.0339990e+00   1.3065206e+00   1.8441719e+00   1.9017153e+00   1.0169098e+00   1.5490835e+00   1.1480416e+00   2.3912363e+00   2.1724397e+00   1.4252775e+00   1.0284221e+00   2.2390158e+00   2.3611228e+00   2.6121814e+00   1.3139868e+00   2.0833701e+00   1.8624251e+00   1.5270661e+00   1.1605968e+00   9.3589904e-01   1.4360842e+00   1.2967707e+00   1.5740302e+00   8.5349066e-01   1.4639724e+00   2.1546616e+00   1.6538197e+00   1.2783641e+00   1.8320269e+00   1.7168897e+00   1.7042715e+00   1.0288355e+00   1.3960908e+00   1.0327124e+00   1.4702446e+00   1.2287925e+00   1.9774894e+00   1.3826233e+00   1.6072393e+00   1.3472497e+00   1.9439880e+00   1.1295026e+00   1.6414265e+00   1.5177322e+00   6.8496652e-01   2.3745921e+00   9.3211669e-01   1.2784093e+00   3.1271814e-01   2.7526949e+00   7.8034610e-01   1.8874930e+00   3.3568278e+00   7.7863029e-01   2.4620981e+00   7.9999102e-01   1.3194463e+00   4.5257749e-01   1.0705713e+00   1.5282070e+00   2.3189157e+00   1.9824340e+00   7.4029244e-01   1.0636179e+00   1.6347187e+00   1.0722301e+00   7.4849274e-01   5.3988754e-01   1.0632334e+00   7.0213871e-01   8.4383266e-01   1.8384560e+00   2.2399960e+00   2.2847962e+00   1.4577178e+00   1.3022697e+00   1.2927254e+00   6.8064066e-01   4.4417668e-01   2.0964002e+00   1.1252542e+00   1.9840858e+00   1.8038514e+00   6.0365341e-01   1.6354514e+00   2.8387736e+00   1.5364600e+00   1.0539473e+00   1.1361809e+00   7.8649633e-01   2.4634199e+00   1.2983546e+00   1.5835083e+00   1.4983760e+00   1.4748100e+00   1.0180846e+00   1.2694582e+00   2.0850659e+00   2.4405647e+00   1.6897529e+00   1.8533655e+00   2.0793529e+00   7.4797652e-01   1.3453828e+00   1.1770444e+00   1.9571621e+00   1.6977813e+00   1.1421260e+00   8.3850424e-01   2.6029823e+00   2.7071356e+00   2.3733266e+00   1.3878105e+00   1.5050081e+00   2.3020930e+00   1.2450968e+00   1.1247714e+00   1.3455945e+00   1.0453799e+00   7.4157869e-01   1.3630233e+00   1.3061954e+00   1.8456576e+00   2.6048102e+00   1.4425815e+00   9.9058911e-01   1.5658693e+00   2.1444356e+00   1.4166079e+00   7.2727886e-01   7.9343577e-01   1.1384575e+00   1.4013840e+00   1.2776135e+00   1.4983760e+00   1.4006413e+00   1.5375255e+00   1.2650236e+00   1.7250893e+00   9.0221296e-01   1.2767751e+00   9.2060977e-01   2.5673851e+00   8.6079202e-01   1.6428179e+00   8.7623959e-01   3.1131006e+00   6.6453319e-01   2.3246810e+00   3.5720588e+00   1.2918836e+00   2.4857868e+00   1.0845006e+00   1.8135469e+00   3.9472619e-01   1.6028858e+00   1.8029164e+00   2.2526468e+00   2.2305704e+00   1.2920175e+00   1.3194463e+00   1.5620078e+00   1.2567627e+00   8.7273869e-01   5.3095950e-01   7.1671402e-01   4.2827238e-01   1.2022652e+00   2.1186438e+00   2.4755072e+00   2.5212188e+00   1.7582453e+00   1.4360884e+00   1.8400908e+00   1.3147484e+00   2.6680274e-01   2.0194508e+00   1.6709595e+00   2.2587909e+00   2.1030957e+00   1.0161846e+00   1.8732616e+00   3.1496799e+00   1.8819614e+00   1.5700887e+00   1.5933926e+00   1.0543640e+00   2.8415314e+00   1.6890927e+00   1.6862498e+00   1.7038925e+00   1.0251132e+00   1.0245496e+00   1.1781513e+00   1.5212572e+00   2.8050734e+00   1.1091494e+00   1.5452835e+00   1.9080234e+00   8.5359653e-01   1.2416734e+00   8.9528108e-01   2.1088803e+00   1.9097018e+00   1.2522193e+00   7.4612152e-01   2.3284654e+00   2.1556564e+00   2.3436971e+00   1.1651790e+00   1.8364691e+00   1.6976628e+00   1.2371704e+00   1.0463225e+00   8.5302032e-01   1.1623508e+00   1.0682140e+00   1.2723284e+00   6.7955751e-01   1.2572095e+00   2.2840066e+00   1.3572135e+00   1.0082548e+00   1.5613089e+00   1.5962380e+00   1.5974627e+00   7.9671887e-01   1.1799226e+00   8.3571552e-01   1.2674750e+00   1.0543826e+00   1.7038925e+00   1.2197800e+00   1.4811026e+00   1.1132425e+00   1.6485749e+00   8.6295295e-01   1.5402909e+00   1.2957413e+00   1.7240804e+00   1.2117746e+00   2.5620931e+00   9.4346743e-01   1.9481085e+00   1.0013399e+00   1.0383354e+00   1.7091506e+00   7.5651431e-01   1.6169211e+00   1.4074199e+00   2.3606801e+00   1.6642999e+00   1.0677270e+00   9.5748562e-01   5.4702555e-01   2.2752108e+00   1.3619011e+00   1.2144903e+00   1.4245490e+00   1.7677805e+00   2.1070188e+00   2.0042865e+00   2.3026776e+00   1.5583422e+00   8.7856768e-01   3.6704030e-01   4.8644514e-01   1.0013399e+00   1.3262124e+00   1.6642999e+00   2.6524441e+00   2.3938089e+00   9.9234874e-01   1.6199145e+00   4.6126066e-01   7.3978204e-01   1.8066960e+00   7.9191984e-01   8.2250769e-01   9.3727156e-01   1.6415483e+00   1.4092680e+00   1.6304499e+00   9.1432842e-01   1.1795364e+00   3.1638147e+00   1.4103498e+00   2.9322745e+00   2.0247895e+00   2.5487652e+00   3.5082844e+00   1.0453705e+00   2.9611604e+00   1.9449446e+00   4.1343567e+00   2.6451449e+00   1.7869811e+00   2.6253748e+00   1.1973458e+00   1.9817270e+00   2.7838011e+00   2.2840066e+00   4.7706180e+00   3.4576971e+00   8.9870984e-01   3.1324336e+00   1.5639220e+00   3.4016597e+00   1.5728443e+00   3.0734808e+00   3.1995683e+00   1.6368228e+00   1.9546803e+00   2.1052860e+00   2.8313078e+00   2.9375460e+00   4.8020420e+00   2.1735035e+00   1.6493848e+00   1.3576485e+00   3.5774884e+00   3.2045691e+00   2.3952974e+00   1.8988804e+00   2.8268459e+00   2.8989852e+00   2.8927951e+00   1.4103498e+00   3.1063891e+00   3.2893581e+00   2.6241058e+00   1.4441104e+00   2.3170196e+00   3.0817543e+00   1.9124815e+00   1.0026233e+00   1.1867923e+00   2.3572427e+00   3.6939647e-01   1.6408576e+00   2.7392425e+00   8.8835337e-01   1.6805749e+00   5.5399712e-01   1.2745081e+00   7.5303835e-01   1.1820572e+00   1.1301977e+00   1.4315442e+00   1.4464138e+00   1.2423523e+00   6.4626422e-01   7.5230154e-01   6.2536527e-01   4.0664863e-01   5.0731024e-01   4.0158746e-01   6.2543628e-01   6.4884272e-01   1.4014424e+00   1.6702453e+00   1.7317202e+00   1.0390957e+00   7.1781501e-01   1.4073416e+00   1.5165187e+00   7.3502408e-01   1.2122797e+00   1.2421878e+00   1.4565053e+00   1.3559191e+00   6.8064066e-01   1.0943185e+00   2.3628816e+00   1.1638514e+00   1.1627754e+00   1.0519629e+00   5.3106808e-01   2.1057450e+00   1.0403581e+00   1.9325284e+00   1.0596309e+00   1.3779504e+00   7.6633520e-01   1.2315180e+00   1.9698667e+00   2.0697950e+00   1.4385383e+00   1.0743031e+00   2.5609592e+00   1.1664463e+00   7.0702759e-01   1.1055841e+00   1.3757605e+00   1.4784016e+00   1.4566739e+00   7.9213303e-01   3.1107848e+00   2.2607358e+00   1.5267093e+00   1.6037239e+00   1.2804073e+00   1.9864213e+00   5.4310586e-01   1.5475402e+00   1.5328931e+00   5.4647209e-01   7.9343577e-01   9.7668596e-01   1.1862065e+00   1.4760549e+00   3.1060327e+00   1.0849536e+00   3.7234239e-01   8.8571256e-01   2.0333305e+00   1.9196784e+00   9.5289573e-01   8.6297946e-01   1.2392529e+00   1.4996752e+00   1.3752205e+00   1.0596309e+00   1.6198849e+00   1.8691588e+00   1.2188552e+00   9.2906468e-01   8.7042892e-01   1.8304530e+00   9.8621003e-01   1.4220241e+00   1.5496729e+00   1.1125144e+00   7.4201890e-01   2.1434858e+00   6.0719477e-01   1.5102780e+00   5.6262711e-01   5.7267643e-01   1.3990971e+00   5.4408162e-01   5.2316125e-01   1.5323598e+00   8.2317311e-01   1.1694177e+00   5.6003943e-01   1.0600958e+00   5.1318506e-01   8.8354057e-01   1.1892978e+00   1.3456285e+00   1.4234329e+00   5.0311426e-01   8.2976237e-01   1.0655776e+00   1.1267154e+00   4.4786319e-01   6.7424840e-01   6.4241342e-01   1.4834540e+00   1.4207811e+00   1.3630799e+00   5.2795793e-01   7.8571751e-01   5.3988754e-01   6.8299624e-01   5.6992880e-01   1.6313928e+00   3.1093967e-01   5.0876385e-01   2.8653046e-01   6.5622658e-01   1.3398293e+00   2.2670334e-01   2.2472150e+00   8.9528108e-01   2.1908074e+00   1.1815770e+00   1.7549185e+00   2.8271782e+00   1.2987661e+00   2.2927322e+00   1.7056353e+00   3.1591627e+00   1.6557083e+00   1.2615426e+00   1.8432274e+00   1.1833606e+00   1.4858600e+00   1.8676774e+00   1.3771658e+00   3.7547288e+00   3.1005581e+00   1.4815836e+00   2.2650937e+00   9.5252488e-01   2.8687133e+00   1.0290036e+00   2.0855599e+00   2.2987772e+00   9.0705646e-01   9.6267538e-01   1.4839640e+00   2.0473137e+00   2.3780534e+00   3.7918985e+00   1.5792523e+00   8.4221906e-01   9.2300698e-01   2.9301148e+00   2.2149712e+00   1.3941954e+00   8.9564079e-01   1.9843979e+00   2.0984088e+00   2.1003345e+00   8.9528108e-01   2.2276119e+00   2.3923111e+00   1.8829565e+00   1.3046645e+00   1.4645285e+00   2.0631582e+00   9.0331700e-01   2.9007820e+00   1.0677270e+00   1.9884030e+00   3.5404087e+00   8.9973730e-01   2.7097598e+00   9.8896933e-01   1.4521880e+00   7.3735391e-01   1.1060455e+00   1.7358574e+00   2.5457091e+00   2.1802781e+00   5.9868400e-01   1.3038475e+00   1.8531597e+00   1.2895008e+00   1.0351584e+00   8.4116354e-01   1.3290015e+00   8.7070822e-01   1.0061402e+00   2.0523180e+00   2.4354079e+00   2.4861842e+00   1.6612475e+00   1.4482752e+00   1.3000067e+00   4.4417668e-01   6.8064066e-01   2.3457352e+00   1.2097457e+00   2.1562626e+00   1.9604379e+00   7.8034610e-01   1.8447318e+00   3.0052273e+00   1.6924215e+00   1.1656549e+00   1.2692102e+00   1.0351584e+00   2.6195047e+00   1.4577178e+00   1.3905452e+00   1.5765058e+00   1.5178511e+00   1.0862363e+00   1.2425116e+00   2.1288976e+00   2.5085097e+00   1.7889699e+00   2.0235792e+00   1.9184220e+00   6.6154242e-01   1.4831058e+00   1.2157849e+00   2.0573833e+00   1.6866006e+00   1.0122929e+00   9.0072498e-01   2.4680266e+00   2.8037035e+00   2.5716465e+00   1.3193736e+00   1.5270661e+00   2.3974481e+00   1.4129334e+00   9.9186850e-01   1.3586792e+00   1.1900564e+00   7.8649633e-01   1.4261046e+00   1.4313173e+00   1.9693923e+00   2.5032449e+00   1.4908532e+00   1.1825071e+00   1.7301809e+00   2.1927243e+00   1.1883807e+00   7.0823896e-01   8.2574748e-01   1.1508346e+00   1.3435624e+00   1.2769092e+00   1.5765058e+00   1.3121204e+00   1.3947465e+00   1.2781560e+00   1.8941016e+00   9.4449485e-01   1.0327124e+00   9.1221028e-01   2.4983699e+00   1.0001615e+00   9.3727156e-01   2.0156046e+00   1.4610933e+00   2.0818555e+00   1.4925824e+00   2.8275182e+00   1.8765007e+00   1.3658611e+00   1.8892371e+00   9.4900087e-01   2.5853758e+00   1.8063869e+00   2.0403844e+00   1.9103325e+00   2.2554051e+00   2.6063293e+00   2.6670659e+00   2.8999367e+00   1.9965452e+00   1.0765554e+00   7.8898008e-01   7.5921691e-01   1.3580560e+00   1.9753995e+00   1.7807988e+00   2.8573306e+00   2.8966014e+00   1.8587118e+00   1.7290357e+00   9.4346743e-01   1.0932187e+00   2.1982921e+00   1.2728402e+00   2.6033464e-01   1.2680818e+00   1.7824360e+00   1.6364088e+00   2.0664368e+00   3.9699460e-01   1.4644159e+00   3.6567369e+00   2.0227452e+00   3.6362580e+00   2.6431572e+00   3.1825084e+00   4.2569960e+00   1.1649315e+00   3.7040279e+00   2.8079889e+00   4.6643094e+00   3.1456885e+00   2.5368652e+00   3.2881355e+00   1.9057424e+00   2.5373943e+00   3.2972877e+00   2.8812938e+00   5.2957248e+00   4.3256332e+00   1.8261865e+00   3.7402681e+00   2.0260681e+00   4.2089146e+00   2.2931022e+00   3.6001832e+00   3.8156954e+00   2.2665638e+00   2.4364115e+00   2.8123267e+00   3.5007685e+00   3.7249152e+00   5.3249013e+00   2.8816817e+00   2.2758405e+00   2.0704519e+00   4.3322711e+00   3.6389537e+00   2.9225385e+00   2.3454418e+00   3.4545556e+00   3.5207953e+00   3.5188476e+00   2.0227452e+00   3.7070275e+00   3.8401809e+00   3.2712836e+00   2.2870689e+00   2.9197390e+00   3.4787880e+00   2.3479440e+00   1.8015956e+00   2.9300280e+00   9.4226820e-01   1.8443182e+00   6.2046469e-01   1.3340137e+00   5.0731024e-01   1.2583559e+00   1.1751408e+00   1.7063292e+00   1.5923247e+00   1.2788332e+00   7.3035754e-01   1.0391769e+00   6.6194168e-01   2.9537172e-01   2.8845946e-01   3.7622328e-01   6.2760421e-01   7.7259801e-01   1.4843174e+00   1.8353890e+00   1.8732616e+00   1.1492120e+00   9.8621003e-01   1.4916922e+00   1.4186317e+00   5.4702555e-01   1.4349060e+00   1.2616939e+00   1.6526705e+00   1.5077694e+00   6.5951091e-01   1.2429329e+00   2.5190636e+00   1.3124532e+00   1.1415080e+00   1.1102613e+00   5.1210327e-01   2.2412909e+00   1.1466385e+00   2.0209769e+00   1.3581397e+00   1.4351001e+00   9.3899770e-01   1.3860326e+00   1.9736520e+00   2.3116417e+00   1.4395013e+00   1.3256797e+00   2.5152621e+00   1.1895067e+00   1.0230389e+00   1.2126763e+00   1.7102781e+00   1.7732497e+00   1.5528794e+00   8.7017583e-01   2.9799960e+00   2.3789847e+00   1.8031686e+00   1.6479163e+00   1.5433090e+00   2.0188390e+00   8.9564079e-01   1.5340057e+00   1.4361609e+00   8.5359653e-01   9.3591761e-01   1.2375842e+00   1.0932909e+00   1.5255827e+00   2.9419617e+00   1.3503714e+00   5.7743200e-01   1.0870568e+00   2.0633836e+00   1.9646340e+00   9.8006549e-01   1.0101003e+00   1.2839264e+00   1.6205305e+00   1.4633215e+00   1.3581397e+00   1.6723912e+00   1.9304834e+00   1.3785508e+00   1.2852279e+00   1.0122929e+00   1.8669942e+00   1.1301977e+00   1.7293858e+00   1.1132823e+00   1.5940674e+00   1.2647627e+00   7.0155617e-01   2.0524860e+00   9.1916314e-01   9.0143387e-01   1.7090768e+00   7.7500385e-01   1.6060095e+00   1.1202045e+00   1.5217697e+00   1.2283051e+00   1.5431751e+00   1.8486311e+00   2.0116712e+00   2.0744305e+00   1.1308980e+00   8.6249502e-01   8.7618973e-01   9.4738284e-01   7.7051641e-01   1.2102028e+00   8.1844705e-01   1.9297683e+00   2.0868978e+00   1.6471661e+00   8.6143605e-01   6.0365341e-01   5.7743200e-01   1.3481077e+00   8.0628657e-01   1.1400599e+00   5.2860161e-01   9.6999820e-01   7.8957903e-01   1.3189781e+00   8.0128554e-01   6.6918102e-01   2.6783589e+00   1.1902996e+00   2.8052881e+00   1.7833755e+00   2.2807524e+00   3.4730319e+00   7.8369762e-01   2.9612706e+00   2.2200035e+00   3.7113566e+00   2.2079590e+00   1.7773274e+00   2.4240040e+00   1.2586273e+00   1.6606465e+00   2.3345591e+00   2.0100747e+00   4.3781379e+00   3.6673897e+00   1.6336953e+00   2.8241758e+00   1.1071867e+00   3.5077760e+00   1.5364148e+00   2.6605007e+00   2.9756588e+00   1.4305490e+00   1.5019762e+00   1.9806289e+00   2.7459951e+00   3.0159022e+00   4.4377151e+00   2.0446123e+00   1.5157660e+00   1.4706419e+00   3.5410473e+00   2.6488235e+00   2.0119986e+00   1.3953413e+00   2.5748429e+00   2.6034011e+00   2.6304123e+00   1.1902996e+00   2.7818749e+00   2.8834870e+00   2.3861430e+00   1.6719199e+00   2.0259174e+00   2.4855987e+00   1.3894554e+00   2.6621352e+00   1.3234208e+00   2.6096596e+00   2.2340939e+00   3.3444949e+00   2.5679785e+00   1.9118907e+00   1.7502251e+00   1.3868450e+00   3.2376571e+00   2.3245757e+00   2.2030084e+00   2.3874774e+00   2.7435279e+00   3.0963501e+00   2.9911365e+00   3.3313369e+00   2.5528922e+00   1.6215602e+00   1.1232628e+00   1.1083720e+00   1.9130507e+00   2.3463017e+00   2.5105454e+00   3.5809242e+00   3.3974315e+00   1.8325077e+00   2.4726944e+00   1.3889514e+00   1.6150266e+00   2.7833542e+00   1.7312416e+00   7.0111465e-01   1.8556044e+00   2.5019422e+00   2.3097506e+00   2.6016512e+00   1.2007565e+00   2.0949830e+00   4.1622891e+00   2.3984916e+00   3.9595754e+00   3.0477055e+00   3.5738045e+00   4.5102220e+00   1.5833139e+00   3.9547990e+00   2.8883510e+00   5.1681640e+00   3.6715203e+00   2.8100260e+00   3.6615020e+00   2.1175666e+00   2.9197870e+00   3.8026677e+00   3.3142297e+00   5.7988752e+00   4.3773719e+00   1.6802144e+00   4.1689985e+00   2.5051488e+00   4.3637125e+00   2.6075737e+00   4.1031841e+00   4.2218983e+00   2.6731954e+00   2.9685228e+00   3.1235748e+00   3.8333962e+00   3.9200272e+00   5.8238336e+00   3.1861618e+00   2.6684075e+00   2.3174914e+00   4.5851647e+00   4.2037872e+00   3.4173362e+00   2.9015754e+00   3.8649606e+00   3.9284352e+00   3.9274419e+00   2.3984916e+00   4.1396215e+00   4.3152972e+00   3.6556493e+00   2.4306199e+00   3.3534589e+00   4.0726739e+00   2.9019830e+00   1.9649078e+00   4.5716421e-01   6.0725725e-01   1.0082512e+00   4.0020411e-01   9.6216255e-01   1.8879475e+00   1.3283978e+00   6.9493020e-01   5.9382214e-01   1.3116762e+00   7.1128716e-01   6.9976890e-01   8.6291569e-01   1.2356595e+00   1.0989171e+00   3.1093967e-01   1.2231205e+00   1.5729927e+00   1.6302590e+00   8.2263932e-01   8.7786730e-01   6.2729876e-01   9.5483435e-01   1.0328871e+00   1.7091506e+00   4.5071694e-01   1.2824303e+00   1.1188225e+00   3.5622944e-01   1.0163696e+00   2.1159028e+00   8.2380019e-01   4.6137216e-01   4.2450569e-01   5.0629646e-01   1.7321630e+00   5.8565201e-01   1.8627348e+00   1.0140018e+00   1.9095789e+00   1.0347180e+00   1.4794093e+00   2.5851550e+00   1.6987423e+00   2.1172382e+00   1.7999889e+00   2.6937126e+00   1.1946586e+00   1.2274756e+00   1.5304430e+00   1.4222936e+00   1.3705079e+00   1.4369760e+00   1.1056213e+00   3.3133435e+00   3.0027854e+00   1.9037709e+00   1.8688892e+00   9.6470639e-01   2.7156484e+00   1.0119180e+00   1.6591942e+00   1.9679139e+00   7.8369762e-01   6.0848963e-01   1.3509456e+00   1.8177289e+00   2.2200035e+00   3.3498688e+00   1.4311753e+00   8.4040822e-01   1.2474502e+00   2.6426508e+00   1.7620244e+00   1.0560148e+00   5.3362004e-01   1.6100417e+00   1.7340403e+00   1.6942922e+00   1.0140018e+00   1.8496575e+00   1.9626001e+00   1.5340961e+00   1.4294738e+00   1.1293709e+00   1.5949054e+00   6.4398240e-01   1.7472907e+00   1.7451622e+00   2.3128200e+00   2.0364367e+00   1.1795364e+00   7.5358247e-01   8.5582452e-01   2.5764453e+00   1.4435947e+00   1.1399118e+00   1.4681660e+00   1.7387082e+00   2.0628406e+00   1.8244206e+00   2.2981140e+00   1.7651851e+00   1.0308265e+00   7.7934221e-01   7.7863029e-01   1.2082987e+00   1.5285763e+00   2.1068341e+00   2.8909913e+00   2.3684734e+00   6.2479428e-01   1.9481438e+00   9.9891776e-01   1.1557309e+00   1.9516972e+00   9.8896933e-01   1.2918836e+00   1.3154759e+00   1.9018319e+00   1.7043940e+00   1.6876317e+00   1.4136421e+00   1.4845363e+00   3.4227731e+00   1.7797546e+00   2.8993041e+00   2.1584174e+00   2.6985144e+00   3.3744038e+00   1.7790388e+00   2.8051892e+00   1.8256311e+00   4.2196161e+00   2.7909300e+00   1.8699505e+00   2.6716730e+00   1.6273208e+00   2.3931485e+00   2.9994905e+00   2.3643994e+00   4.7587356e+00   3.2663266e+00   8.6629251e-01   3.2140857e+00   2.0311002e+00   3.1918979e+00   1.6793067e+00   3.1869276e+00   3.1309476e+00   1.8104252e+00   2.1858235e+00   2.2467893e+00   2.6763967e+00   2.7532877e+00   4.7380019e+00   2.3330174e+00   1.6923429e+00   1.4009491e+00   3.4550204e+00   3.4609647e+00   2.5225714e+00   2.1699387e+00   2.8630132e+00   3.0349029e+00   2.9631214e+00   1.7797546e+00   3.2114945e+00   3.4557456e+00   2.7355167e+00   1.5237929e+00   2.4389172e+00   3.3539591e+00   2.2150193e+00   8.7774396e-01   8.7560645e-01   6.6918102e-01   8.5695467e-01   1.6281675e+00   1.2552875e+00   9.0276183e-01   4.7723749e-01   9.6922609e-01   3.4909881e-01   4.4701039e-01   6.6835176e-01   8.7807032e-01   8.7272262e-01   2.1119253e-01   1.2038778e+00   1.5064820e+00   1.5654738e+00   7.8630314e-01   5.8942278e-01   8.9320425e-01   1.1940983e+00   8.6887698e-01   1.4210091e+00   7.4201890e-01   1.2452417e+00   1.0494370e+00   2.3749211e-01   9.1435339e-01   2.1409786e+00   8.2148003e-01   6.5993495e-01   5.7516438e-01   2.8835410e-01   1.8418099e+00   6.4756318e-01   1.8787743e+00   9.0810653e-01   1.6779282e+00   7.7021931e-01   1.3319441e+00   2.3098596e+00   1.7659238e+00   1.7880416e+00   1.4280932e+00   2.6604707e+00   1.1753998e+00   9.4281519e-01   1.3471074e+00   1.3158313e+00   1.3974368e+00   1.4547739e+00   8.7568652e-01   3.2288696e+00   2.6753697e+00   1.6330922e+00   1.7674989e+00   1.0240850e+00   2.3852911e+00   7.4743804e-01   1.5932991e+00   1.7495490e+00   5.8796666e-01   5.8374436e-01   1.1339991e+00   1.5083690e+00   1.8912106e+00   3.2526896e+00   1.2423778e+00   4.2436984e-01   8.5959137e-01   2.4097678e+00   1.8344669e+00   9.0791603e-01   5.8796666e-01   1.4645285e+00   1.6477112e+00   1.6088132e+00   9.0810653e-01   1.7454599e+00   1.9428935e+00   1.4315280e+00   1.1694177e+00   9.9244707e-01   1.7007353e+00   6.6154242e-01   1.4832888e+00   6.1810529e-01   7.1128716e-01   1.8601631e+00   9.7397874e-01   1.2254650e+00   6.8496652e-01   1.4755282e+00   9.0753778e-01   1.0443931e+00   1.3169341e+00   1.6226440e+00   1.6498870e+00   7.4980278e-01   8.0686941e-01   1.1940983e+00   1.2255953e+00   5.6318359e-01   1.1503759e+00   6.6361830e-01   1.4063472e+00   1.5603679e+00   1.6837563e+00   3.6536845e-01   9.5761359e-01   8.4619410e-01   8.6958016e-01   7.7821113e-01   1.6196626e+00   5.7306091e-01   4.4786319e-01   3.6086172e-01   8.2608188e-01   1.1831978e+00   3.8480889e-01   2.4265852e+00   1.2696247e+00   2.4764170e+00   1.5584328e+00   2.0444851e+00   3.1426915e+00   1.4493286e+00   2.6430316e+00   2.1446703e+00   3.2893516e+00   1.7955663e+00   1.6408995e+00   2.1004081e+00   1.5285733e+00   1.7064050e+00   2.0142932e+00   1.6802708e+00   3.9009617e+00   3.4821230e+00   1.8809382e+00   2.4651410e+00   1.1989033e+00   3.2268928e+00   1.3782117e+00   2.2651964e+00   2.5478630e+00   1.2124370e+00   1.1789342e+00   1.8358339e+00   2.3443222e+00   2.7210373e+00   3.9221023e+00   1.9136809e+00   1.2486828e+00   1.4646971e+00   3.1951870e+00   2.3252489e+00   1.6537705e+00   1.0855082e+00   2.1947734e+00   2.3108044e+00   2.2621105e+00   1.2696247e+00   2.4484679e+00   2.5504328e+00   2.0873736e+00   1.6773040e+00   1.7023842e+00   2.1476736e+00   1.1560388e+00   1.3558057e+00   1.5283845e+00   2.1664244e+00   1.9784646e+00   1.1457159e+00   1.0355160e+00   1.4985549e+00   1.0494370e+00   6.0365341e-01   2.6033464e-01   7.3803207e-01   5.6864482e-01   9.7352372e-01   1.8229204e+00   2.2308199e+00   2.2668270e+00   1.4769275e+00   1.3385535e+00   1.5931825e+00   1.1248155e+00   2.1466080e-01   1.9117251e+00   1.3652496e+00   2.0207624e+00   1.8704283e+00   7.6880092e-01   1.6220723e+00   2.8778954e+00   1.6265611e+00   1.2621791e+00   1.3042843e+00   7.7313507e-01   2.5362196e+00   1.4082507e+00   1.8291996e+00   1.6183246e+00   1.3603639e+00   1.0878281e+00   1.3564590e+00   1.9053825e+00   2.6072470e+00   1.4737985e+00   1.6790332e+00   2.1679999e+00   9.4182667e-01   1.2929545e+00   1.1391970e+00   2.0265696e+00   1.8799960e+00   1.3547660e+00   8.8029208e-01   2.6196958e+00   2.4872661e+00   2.2706454e+00   1.4300844e+00   1.7151590e+00   2.0626281e+00   1.1997534e+00   1.2637010e+00   1.2307777e+00   1.0813975e+00   9.6603754e-01   1.3834169e+00   1.0564307e+00   1.5971466e+00   2.5708690e+00   1.4735640e+00   9.4160439e-01   1.5222760e+00   1.9572025e+00   1.7004687e+00   8.9142733e-01   1.0459007e+00   1.1049324e+00   1.4763267e+00   1.2674750e+00   1.6183246e+00   1.4769125e+00   1.6832034e+00   1.2843405e+00   1.6410601e+00   9.6706760e-01   1.5985259e+00   1.1910776e+00   1.0088064e+00   1.9822205e+00   1.3115314e+00   7.2626021e-01   8.5225534e-01   1.4476734e+00   8.6297946e-01   1.0334797e+00   1.2160426e+00   1.5358725e+00   1.3833366e+00   5.3528567e-01   1.2712561e+00   1.5367336e+00   1.6013312e+00   8.9845135e-01   9.1916314e-01   2.4152660e-01   1.0495503e+00   1.3530247e+00   1.8419620e+00   3.4651700e-01   1.2220171e+00   1.0116179e+00   6.2046469e-01   1.0696792e+00   2.0057768e+00   7.5914566e-01   4.4499696e-01   4.0664863e-01   8.1224041e-01   1.6406723e+00   5.8942278e-01   1.9060542e+00   9.6301827e-01   2.1281559e+00   1.1449868e+00   1.6017068e+00   2.8050285e+00   1.4536311e+00   2.3373419e+00   1.9472489e+00   2.8613983e+00   1.3924555e+00   1.3756347e+00   1.7433862e+00   1.3615778e+00   1.3332278e+00   1.5654312e+00   1.2872568e+00   3.4973752e+00   3.1986815e+00   1.9281666e+00   2.0588451e+00   8.3270853e-01   2.9373687e+00   1.1828955e+00   1.8231885e+00   2.1962402e+00   9.5979989e-01   7.5532639e-01   1.4672797e+00   2.0720690e+00   2.4566102e+00   3.5648048e+00   1.5414664e+00   1.0212756e+00   1.2733735e+00   2.8900916e+00   1.8289569e+00   1.2092544e+00   6.4558417e-01   1.8428644e+00   1.8966444e+00   1.9319316e+00   9.6301827e-01   2.0102936e+00   2.1030669e+00   1.7380754e+00   1.5489952e+00   1.3296349e+00   1.6538197e+00   6.3357758e-01   1.4295948e+00   5.4873947e-01   1.6126413e+00   5.8496636e-01   1.1009910e+00   6.0725725e-01   9.5139638e-01   1.3098483e+00   1.3941599e+00   1.6617787e+00   8.6702860e-01   4.2826573e-01   8.0996690e-01   8.1324137e-01   2.8553149e-01   9.9883272e-01   1.0921061e+00   1.8259719e+00   1.6053711e+00   1.1828265e+00   8.3161134e-01   7.0834786e-01   5.3106808e-01   9.8233874e-01   3.5366952e-01   1.4106682e+00   4.6484021e-01   7.5179033e-01   6.2055338e-01   7.8324937e-01   1.1546456e+00   4.7149050e-01   2.7022699e+00   1.3084256e+00   2.4620452e+00   1.5488588e+00   2.1433843e+00   3.0477876e+00   1.5122060e+00   2.4794486e+00   1.8496575e+00   3.5092631e+00   2.0205369e+00   1.5421829e+00   2.1550478e+00   1.4847626e+00   1.9338323e+00   2.2845215e+00   1.7093196e+00   4.0428524e+00   3.2768847e+00   1.4413624e+00   2.6112105e+00   1.4262166e+00   3.0341947e+00   1.2919157e+00   2.4486747e+00   2.5390232e+00   1.2420951e+00   1.3836252e+00   1.8380693e+00   2.2098781e+00   2.5420974e+00   4.0353061e+00   1.9425259e+00   1.0808550e+00   1.0871507e+00   3.1511951e+00   2.6568698e+00   1.7619640e+00   1.3391481e+00   2.2882514e+00   2.4739376e+00   2.4163220e+00   1.3084256e+00   2.5943375e+00   2.7895659e+00   2.2249457e+00   1.4927500e+00   1.8163092e+00   2.5068377e+00   1.3832520e+00   1.1807138e+00   2.3735475e+00   1.4416726e+00   7.3803207e-01   1.4480380e+00   1.6571634e+00   1.9125650e+00   1.5766889e+00   1.9793333e+00   1.6323793e+00   1.4021778e+00   1.1659675e+00   1.2498767e+00   1.3539814e+00   1.2332482e+00   2.0826771e+00   2.7811716e+00   2.1646850e+00   3.7371902e-01   2.0122804e+00   1.1585774e+00   1.3127802e+00   1.8544941e+00   1.1485726e+00   1.7450075e+00   1.3972701e+00   1.9880179e+00   1.7517164e+00   1.6394680e+00   1.8002228e+00   1.5490387e+00   2.9816674e+00   1.3976606e+00   2.4151949e+00   1.7787946e+00   2.2180206e+00   2.8804995e+00   1.7355261e+00   2.3592859e+00   1.2412454e+00   3.7977608e+00   2.4485045e+00   1.3668906e+00   2.2064746e+00   1.1631247e+00   1.9116990e+00   2.5849220e+00   2.0027932e+00   4.3925033e+00   2.6611027e+00   3.7234239e-01   2.7559143e+00   1.7089501e+00   2.6795765e+00   1.2450968e+00   2.8073641e+00   2.7667084e+00   1.4485479e+00   1.9038618e+00   1.7262603e+00   2.3286441e+00   2.2609611e+00   4.4068441e+00   1.7897439e+00   1.4300608e+00   1.1275945e+00   2.9337206e+00   3.0746426e+00   2.2005676e+00   1.9094387e+00   2.4292641e+00   2.5401664e+00   2.4975057e+00   1.3976606e+00   2.7518188e+00   2.9966927e+00   2.2416615e+00   9.2104245e-01   2.0302905e+00   3.0030765e+00   1.9507955e+00   1.9593598e+00   9.5666050e-01   1.1447875e+00   1.0324994e+00   1.3800294e+00   1.7386688e+00   1.7301705e+00   2.0251376e+00   1.2143895e+00   3.6924035e-01   2.6643250e-01   3.1271814e-01   5.3690447e-01   1.1566759e+00   1.3335853e+00   2.2553589e+00   2.0395552e+00   1.0374728e+00   1.1879760e+00   2.9406726e-01   4.0650681e-01   1.4164313e+00   3.6319073e-01   9.3305124e-01   5.5709100e-01   1.1791615e+00   9.8143688e-01   1.2231662e+00   7.9042934e-01   7.5817225e-01   2.9833953e+00   1.3537061e+00   2.7591591e+00   1.8262769e+00   2.3975382e+00   3.3499130e+00   1.2034779e+00   2.7851895e+00   1.9405021e+00   3.8845156e+00   2.3750632e+00   1.6954582e+00   2.4431790e+00   1.3394090e+00   1.9751740e+00   2.5771567e+00   2.0432035e+00   4.4739802e+00   3.4420978e+00   1.1727925e+00   2.9298786e+00   1.4800982e+00   3.2892623e+00   1.4456510e+00   2.8154123e+00   2.9321762e+00   1.4509441e+00   1.6902348e+00   2.0142932e+00   2.5791547e+00   2.8031074e+00   4.4835636e+00   2.1018908e+00   1.3894554e+00   1.2250001e+00   3.4334168e+00   2.9754074e+00   2.1241116e+00   1.6323629e+00   2.6113665e+00   2.7403495e+00   2.7045382e+00   1.3537061e+00   2.9092469e+00   3.0943267e+00   2.4717839e+00   1.4839640e+00   2.1082363e+00   2.8334846e+00   1.6627952e+00   1.2426609e+00   1.7313027e+00   1.2372185e+00   1.1631247e+00   1.1195889e+00   1.5188976e+00   1.0845006e+00   8.2263932e-01   1.9012930e+00   2.1909047e+00   2.2638611e+00   1.4908532e+00   1.2008045e+00   8.7223523e-01   5.7002996e-01   1.0697164e+00   2.2410714e+00   9.6470639e-01   1.8639992e+00   1.6786018e+00   7.4743804e-01   1.6592561e+00   2.7039438e+00   1.4162972e+00   1.0024224e+00   1.0401603e+00   1.0580730e+00   2.3284654e+00   1.2231205e+00   1.2611129e+00   1.1791615e+00   1.6899776e+00   9.5793067e-01   1.1548640e+00   2.3712314e+00   2.0275068e+00   2.0149111e+00   1.9649183e+00   2.1679212e+00   7.8905316e-01   1.3385913e+00   1.3061285e+00   1.6571634e+00   1.2299006e+00   9.3495766e-01   9.4613565e-01   2.8415314e+00   2.9130399e+00   2.3454203e+00   1.4655406e+00   1.0267382e+00   2.6085350e+00   1.2515236e+00   1.1837505e+00   1.7109084e+00   9.9111027e-01   5.2374483e-01   1.2552875e+00   1.7513749e+00   2.1661990e+00   2.9392776e+00   1.3022811e+00   1.1259771e+00   1.5663601e+00   2.4310503e+00   1.1268523e+00   7.5840628e-01   4.7680727e-01   1.3348163e+00   1.3454539e+00   1.4034689e+00   1.1791615e+00   1.4139320e+00   1.4450127e+00   1.2754470e+00   1.6940148e+00   9.2620264e-01   9.4281519e-01   4.9160020e-01   9.3054395e-01   4.1781009e-01   4.6190224e-01   8.0369154e-01   9.6816967e-01   1.1548640e+00   4.6557224e-01   8.2518769e-01   1.2073068e+00   1.2487994e+00   4.5257749e-01   7.8164792e-01   1.0374728e+00   1.4711456e+00   1.1089653e+00   1.1997868e+00   7.6195008e-01   1.0018628e+00   8.9796526e-01   5.8785093e-01   6.0098693e-01   1.8456219e+00   6.5622658e-01   6.9001472e-01   5.4715569e-01   3.1093967e-01   1.5254459e+00   4.8636669e-01   2.2683520e+00   1.0914354e+00   1.9823217e+00   1.1675117e+00   1.6963493e+00   2.6008457e+00   1.7128336e+00   2.0692339e+00   1.5728043e+00   3.0096252e+00   1.5213092e+00   1.1599200e+00   1.6580031e+00   1.3691582e+00   1.6116709e+00   1.8005954e+00   1.2641532e+00   3.5755981e+00   2.8921647e+00   1.5229350e+00   2.1046875e+00   1.2108278e+00   2.6299105e+00   8.9496218e-01   1.9702691e+00   2.0808148e+00   8.0585922e-01   9.4983854e-01   1.4326334e+00   1.7811974e+00   2.1211631e+00   3.5687116e+00   1.5311195e+00   7.1811205e-01   1.0257884e+00   2.6607812e+00   2.2075002e+00   1.3273841e+00   9.2853136e-01   1.7721541e+00   1.9757526e+00   1.8755628e+00   1.0914354e+00   2.1076593e+00   2.2924992e+00   1.7080157e+00   1.2150638e+00   1.3228669e+00   2.0678512e+00   1.0435585e+00   8.3930091e-01   1.0246689e+00   1.2483935e+00   9.2934901e-01   1.2786676e+00   1.0167074e+00   1.2794668e+00   1.2845002e+00   1.3705288e+00   1.0262066e+00   6.1158310e-01   1.6007620e+00   2.1232609e+00   1.4700482e+00   6.0145223e-01   1.5227019e+00   1.1234881e+00   1.1051628e+00   1.1975697e+00   9.1241332e-01   1.9821649e+00   1.0739839e+00   1.4719712e+00   1.2657553e+00   1.0246689e+00   1.8799916e+00   1.1304806e+00   2.3473055e+00   9.3001231e-01   1.7895396e+00   1.0784109e+00   1.5778477e+00   2.3110268e+00   1.7258314e+00   1.7588444e+00   8.0501785e-01   3.1299934e+00   1.7625999e+00   7.4394765e-01   1.5582385e+00   9.7850690e-01   1.4989725e+00   1.9419525e+00   1.2877346e+00   3.7053544e+00   2.3011615e+00   7.8305765e-01   2.1061327e+00   1.2737998e+00   2.1925140e+00   6.0604502e-01   2.1121593e+00   2.0810604e+00   8.0686941e-01   1.2420238e+00   1.1264142e+00   1.6698499e+00   1.7264467e+00   3.7248620e+00   1.2214818e+00   7.0111465e-01   5.3487449e-01   2.3978329e+00   2.4200364e+00   1.4831058e+00   1.2723027e+00   1.7715216e+00   1.9225896e+00   1.8845666e+00   9.3001231e-01   2.0954738e+00   2.3579846e+00   1.6403910e+00   5.2719130e-01   1.3587682e+00   2.3456726e+00   1.3154759e+00   5.0299964e-01   8.2155022e-01   8.8684653e-01   1.0935878e+00   4.8492463e-01   9.8860056e-01   1.2858521e+00   1.3288928e+00   6.2451737e-01   6.2760421e-01   1.0463002e+00   1.4889605e+00   1.0762241e+00   1.1975697e+00   8.4271175e-01   1.0854927e+00   8.7560645e-01   5.3352899e-01   7.0810362e-01   1.9474744e+00   7.1781501e-01   7.2553812e-01   6.1968090e-01   3.6924035e-01   1.6978139e+00   6.0510141e-01   2.1983316e+00   1.0378652e+00   1.8773521e+00   9.9490014e-01   1.5974238e+00   2.4585483e+00   1.7380659e+00   1.8957007e+00   1.4179266e+00   2.9495957e+00   1.4948761e+00   1.0683666e+00   1.5886178e+00   1.3564059e+00   1.6294524e+00   1.7819921e+00   1.1268523e+00   3.4719349e+00   2.7528980e+00   1.4535731e+00   2.0452826e+00   1.2150379e+00   2.4732935e+00   8.6167901e-01   1.8885847e+00   1.9433611e+00   7.9744128e-01   9.1854596e-01   1.3349909e+00   1.6250498e+00   1.9838258e+00   3.4743868e+00   1.4520430e+00   5.1406096e-01   7.3595190e-01   2.5794085e+00   2.1692945e+00   1.1973560e+00   9.2123504e-01   1.7205327e+00   1.9329718e+00   1.8817619e+00   1.0378652e+00   2.0262673e+00   2.2533761e+00   1.7016580e+00   1.1862896e+00   1.2748646e+00   2.0406838e+00   9.6984925e-01   3.6319073e-01   6.1968090e-01   7.8521221e-01   5.6113157e-01   1.2463647e+00   1.6309592e+00   1.6676963e+00   8.9796526e-01   8.9789119e-01   1.2621791e+00   1.3154759e+00   6.8124108e-01   1.3901973e+00   9.9970024e-01   1.4357022e+00   1.2962951e+00   4.8012872e-01   1.0246015e+00   2.2910733e+00   1.0720705e+00   8.8779355e-01   8.4724089e-01   2.4152660e-01   1.9817257e+00   8.8354057e-01   2.0655284e+00   1.2495886e+00   1.6398109e+00   9.9332012e-01   1.4769125e+00   2.2251642e+00   2.1023706e+00   1.7015856e+00   1.4609179e+00   2.6557282e+00   1.2410479e+00   1.0733560e+00   1.3593949e+00   1.6013655e+00   1.6915504e+00   1.5864800e+00   9.7957718e-01   3.1644964e+00   2.6137665e+00   1.7509262e+00   1.7860234e+00   1.3941000e+00   2.2824049e+00   8.7876634e-01   1.6464371e+00   1.6642217e+00   7.8808432e-01   8.5400786e-01   1.3018901e+00   1.3652161e+00   1.7805677e+00   3.1380968e+00   1.4095411e+00   5.8483448e-01   1.0816610e+00   2.2968622e+00   1.9911692e+00   1.0509799e+00   8.9223438e-01   1.4369760e+00   1.7217513e+00   1.5811148e+00   1.2495886e+00   1.8031513e+00   2.0209769e+00   1.4702446e+00   1.2810704e+00   1.0813343e+00   1.8691588e+00   1.0259679e+00   5.6788283e-01   5.3362004e-01   7.7368489e-01   1.6022591e+00   1.9873741e+00   2.0277089e+00   1.2494231e+00   1.1089653e+00   1.4561750e+00   1.2033093e+00   3.3742167e-01   1.6597443e+00   1.2265630e+00   1.7784712e+00   1.6384023e+00   6.1436388e-01   1.3800294e+00   2.6463489e+00   1.4025367e+00   1.1237500e+00   1.1244975e+00   5.5399712e-01   2.3227610e+00   1.2000527e+00   1.8734557e+00   1.4137602e+00   1.3887598e+00   9.6005858e-01   1.3202421e+00   1.9632584e+00   2.3879303e+00   1.4839475e+00   1.4995474e+00   2.3336107e+00   1.0014276e+00   1.1074656e+00   1.1350466e+00   1.8013325e+00   1.7379612e+00   1.3863777e+00   8.2339084e-01   2.8225734e+00   2.4523537e+00   2.0154418e+00   1.5091823e+00   1.5393373e+00   2.0723760e+00   9.8233874e-01   1.3702101e+00   1.3545502e+00   8.7875749e-01   8.4830222e-01   1.2549787e+00   1.1060185e+00   1.5823028e+00   2.7877979e+00   1.3537061e+00   7.2012544e-01   1.2954496e+00   2.0208193e+00   1.7781563e+00   8.8029208e-01   9.2256644e-01   1.1605968e+00   1.4991046e+00   1.3162835e+00   1.4137602e+00   1.5442127e+00   1.7645705e+00   1.2692218e+00   1.4162972e+00   9.1557526e-01   1.6722331e+00   1.0708496e+00   6.2826980e-01   1.0161846e+00   1.6718164e+00   1.9471640e+00   1.9947662e+00   1.3566251e+00   1.0412209e+00   1.7655766e+00   1.7163342e+00   7.1671402e-01   1.3277490e+00   1.5771462e+00   1.7793591e+00   1.6725709e+00   9.6964683e-01   1.3947746e+00   2.6556269e+00   1.5119726e+00   1.4702773e+00   1.3966512e+00   8.2199752e-01   2.4266623e+00   1.3925524e+00   2.0577807e+00   1.4034689e+00   1.2545960e+00   9.4767129e-01   1.3281960e+00   1.7401681e+00   2.4345214e+00   1.1896374e+00   1.0436620e+00   2.5015865e+00   1.2764504e+00   8.9223438e-01   1.1006735e+00   1.6953806e+00   1.7900496e+00   1.5985782e+00   8.8098199e-01   2.9595238e+00   2.0497239e+00   1.6965355e+00   1.5863720e+00   1.6496643e+00   1.7201711e+00   8.3409556e-01   1.5639220e+00   1.3496867e+00   8.9427872e-01   1.0978602e+00   1.1314785e+00   9.1432842e-01   1.2235673e+00   2.9196060e+00   1.2400775e+00   6.4083823e-01   1.0643984e+00   1.8241883e+00   2.0498818e+00   1.0696576e+00   1.1919906e+00   1.2042673e+00   1.5543054e+00   1.3831011e+00   1.4034689e+00   1.6218749e+00   1.9188761e+00   1.2920921e+00   1.1337565e+00   1.0067405e+00   1.9865217e+00   1.3029486e+00   9.5748562e-01   1.9681165e+00   2.2573397e+00   2.3235953e+00   1.5741400e+00   1.1016806e+00   1.6166760e+00   1.2896217e+00   3.8830315e-01   1.7972266e+00   1.5164233e+00   2.0064286e+00   1.8697578e+00   8.5494999e-01   1.6681709e+00   2.9309853e+00   1.6503473e+00   1.4467905e+00   1.4113341e+00   9.2189946e-01   2.6393526e+00   1.4852749e+00   1.4601858e+00   1.3160132e+00   8.7649478e-01   6.4756318e-01   8.3256255e-01   1.5094087e+00   2.4769347e+00   1.0668784e+00   1.2459961e+00   1.9408738e+00   6.5485710e-01   8.4116354e-01   6.0795234e-01   1.7154199e+00   1.4961901e+00   9.9551062e-01   3.9472619e-01   2.4940166e+00   2.0216642e+00   2.0463301e+00   1.0230389e+00   1.4612117e+00   1.6595118e+00   8.5582452e-01   9.5437259e-01   9.5694342e-01   7.7934221e-01   7.3851064e-01   8.5695467e-01   7.6638235e-01   1.1767396e+00   2.5076596e+00   9.4281519e-01   7.1971771e-01   1.2830542e+00   1.5700842e+00   1.4287578e+00   5.3095950e-01   8.6291569e-01   6.6154242e-01   1.0050638e+00   8.5606908e-01   1.3160132e+00   1.0514970e+00   1.3171874e+00   7.9433323e-01   1.2774110e+00   4.7509249e-01   1.3730080e+00   9.7659801e-01   1.1663747e+00   1.4582411e+00   1.5261646e+00   7.3570584e-01   5.8785093e-01   7.6039875e-01   1.1605726e+00   9.6964683e-01   1.4553344e+00   6.3765570e-01   1.1681709e+00   1.0005243e+00   2.9691107e-01   8.7856768e-01   2.0651943e+00   7.3735391e-01   6.0653347e-01   4.7837533e-01   3.7398306e-01   1.7406274e+00   5.5183182e-01   1.8498714e+00   8.1329726e-01   1.7508641e+00   8.2125107e-01   1.3423724e+00   2.4127395e+00   1.6384023e+00   1.9130927e+00   1.5043381e+00   2.6917823e+00   1.1782159e+00   9.6249568e-01   1.3877621e+00   1.2214135e+00   1.2719770e+00   1.4200371e+00   9.4526659e-01   3.3044115e+00   2.7645066e+00   1.6390945e+00   1.7948322e+00   8.7588404e-01   2.5003659e+00   7.4157869e-01   1.6267504e+00   1.8590427e+00   5.4310586e-01   5.2316125e-01   1.1372412e+00   1.6472029e+00   2.0021586e+00   3.3409572e+00   1.2314733e+00   5.4779717e-01   9.4822835e-01   2.4877874e+00   1.8001237e+00   9.6012811e-01   4.8644514e-01   1.5074309e+00   1.6452353e+00   1.6151033e+00   8.1329726e-01   1.7721541e+00   1.9352496e+00   1.4226963e+00   1.1564263e+00   1.0022114e+00   1.6574535e+00   5.8132667e-01   5.6318359e-01   5.3286499e-01   4.3341454e-01   1.2747045e+00   1.3163443e+00   2.1153648e+00   1.9183153e+00   1.1909838e+00   1.0657373e+00   5.8852220e-01   6.2225308e-01   1.3220343e+00   4.0443437e-01   1.0982562e+00   6.1619693e-01   1.0378442e+00   8.8917809e-01   1.0970024e+00   8.2199752e-01   6.9493020e-01   3.0003610e+00   1.5102474e+00   2.7635526e+00   1.8759428e+00   2.4405125e+00   3.3586044e+00   1.4659783e+00   2.7980993e+00   2.0759743e+00   3.8255754e+00   2.3211022e+00   1.7886572e+00   2.4450156e+00   1.5788967e+00   2.1011800e+00   2.5635734e+00   2.0416027e+00   4.3880097e+00   3.5282390e+00   1.4609179e+00   2.9105517e+00   1.6043433e+00   3.3245307e+00   1.5187698e+00   2.7743367e+00   2.8814354e+00   1.4896588e+00   1.6771528e+00   2.1027324e+00   2.5396337e+00   2.8265966e+00   4.3745133e+00   2.1946278e+00   1.4104380e+00   1.3873057e+00   3.4289479e+00   2.9514502e+00   2.1043046e+00   1.6198849e+00   2.5820471e+00   2.7513626e+00   2.6746677e+00   1.5102474e+00   2.9036877e+00   3.0793854e+00   2.4777911e+00   1.6406409e+00   2.1046875e+00   2.7982275e+00   1.6824284e+00   1.4276574e-01   7.9394533e-01   1.3473710e+00   1.5367336e+00   2.5040512e+00   2.2893871e+00   1.0820670e+00   1.4237364e+00   3.6704030e-01   5.8785093e-01   1.6733127e+00   6.1158310e-01   7.1781501e-01   7.8318003e-01   1.4288989e+00   1.2280749e+00   1.4809953e+00   7.0150436e-01   1.0034788e+00   3.1877520e+00   1.5005648e+00   2.9634183e+00   2.0359721e+00   2.5953129e+00   3.5470571e+00   1.1634940e+00   2.9839272e+00   2.0686806e+00   4.1156592e+00   2.6069476e+00   1.8659064e+00   2.6504363e+00   1.4017266e+00   2.1040122e+00   2.7893850e+00   2.2677890e+00   4.7183507e+00   3.5819899e+00   1.1465705e+00   3.1455771e+00   1.6263647e+00   3.4643576e+00   1.6254447e+00   3.0471395e+00   3.1646326e+00   1.6517237e+00   1.9156890e+00   2.1886203e+00   2.8006555e+00   2.9856138e+00   4.7315684e+00   2.2694993e+00   1.6130651e+00   1.3903392e+00   3.6256164e+00   3.1929372e+00   2.3573820e+00   1.8552594e+00   2.8291427e+00   2.9408913e+00   2.9120555e+00   1.5005648e+00   3.1237582e+00   3.3065647e+00   2.6677639e+00   1.5962380e+00   2.3224070e+00   3.0542458e+00   1.8794605e+00   8.3156200e-01   1.4460310e+00   1.6013312e+00   2.5509456e+00   2.3359909e+00   1.1395071e+00   1.4612871e+00   4.8644514e-01   6.6244727e-01   1.7247525e+00   6.6453319e-01   6.8496652e-01   8.5330525e-01   1.4567673e+00   1.2739414e+00   1.5213580e+00   6.7904052e-01   1.0560797e+00   3.2869799e+00   1.6218234e+00   3.0463976e+00   2.1264009e+00   2.6948572e+00   3.6228821e+00   1.2747978e+00   3.0537173e+00   2.1607143e+00   4.1937512e+00   2.6849827e+00   1.9680122e+00   2.7382122e+00   1.5399252e+00   2.2309601e+00   2.8826191e+00   2.3479440e+00   4.7798805e+00   3.6690959e+00   1.2447718e+00   3.2325186e+00   1.7450415e+00   3.5380106e+00   1.7243835e+00   3.1258440e+00   3.2275370e+00   1.7473394e+00   2.0003230e+00   2.2955340e+00   2.8573132e+00   3.0588804e+00   4.7837445e+00   2.3799967e+00   1.6861899e+00   1.4737985e+00   3.7047689e+00   3.2828775e+00   2.4345890e+00   1.9408738e+00   2.9104325e+00   3.0383020e+00   2.9993404e+00   1.6218234e+00   3.2132904e+00   3.3994957e+00   2.7639400e+00   1.7088499e+00   2.4110070e+00   3.1406474e+00   1.9689207e+00   8.9196595e-01   9.9107019e-01   1.7478610e+00   1.5467508e+00   1.1459117e+00   7.5303835e-01   6.0365341e-01   5.1453676e-01   9.1435339e-01   2.3749211e-01   1.4031123e+00   3.2313211e-01   7.2263841e-01   5.2290002e-01   7.1740234e-01   1.0975976e+00   3.1271814e-01   2.5686027e+00   1.1418735e+00   2.3704417e+00   1.4573208e+00   2.0173981e+00   2.9893605e+00   1.3924555e+00   2.4418079e+00   1.7809408e+00   3.4092863e+00   1.8988911e+00   1.4127713e+00   2.0371950e+00   1.3095379e+00   1.7286430e+00   2.1358640e+00   1.6228818e+00   3.9920035e+00   3.2072460e+00   1.3897098e+00   2.4925270e+00   1.2375842e+00   2.9891693e+00   1.1418958e+00   2.3510928e+00   2.4945648e+00   1.0792736e+00   1.2447083e+00   1.7021399e+00   2.1843621e+00   2.4864921e+00   3.9967418e+00   1.7954132e+00   1.0172824e+00   1.0869385e+00   3.0619758e+00   2.5242226e+00   1.6782397e+00   1.1896845e+00   2.1746675e+00   2.3309032e+00   2.2706367e+00   1.1418735e+00   2.4800313e+00   2.6530340e+00   2.0689155e+00   1.3443777e+00   1.6837594e+00   2.3748600e+00   1.2545769e+00   1.0660846e+00   1.6498377e+00   1.2783641e+00   1.1376397e+00   1.0898613e+00   1.0585628e+00   9.2189946e-01   8.0142393e-01   8.8029208e-01   1.9920533e+00   8.0501785e-01   1.0699859e+00   8.7105035e-01   7.9448303e-01   1.7999592e+00   8.1251986e-01   1.9227723e+00   4.6137216e-01   1.6965186e+00   7.0213871e-01   1.2723284e+00   2.3160615e+00   1.4526546e+00   1.7912701e+00   1.0739839e+00   2.8496420e+00   1.4032361e+00   6.3302304e-01   1.3757605e+00   7.8863556e-01   1.1001816e+00   1.5547583e+00   9.8152003e-01   3.4772360e+00   2.4799120e+00   1.1619556e+00   1.8622587e+00   7.5769247e-01   2.3162319e+00   4.6128322e-01   1.7816685e+00   1.9387331e+00   4.5729032e-01   7.5817225e-01   8.9223438e-01   1.6541379e+00   1.8404767e+00   3.5381276e+00   9.9244707e-01   4.4901474e-01   4.6557224e-01   2.4199101e+00   1.9790803e+00   1.0974789e+00   7.5914566e-01   1.5781281e+00   1.6567524e+00   1.6951864e+00   4.6137216e-01   1.8193470e+00   2.0336808e+00   1.4275349e+00   7.0834786e-01   1.0588854e+00   1.8801322e+00   7.4965096e-01   1.1803522e+00   1.5908164e+00   1.9645622e+00   4.2238505e-01   1.2220171e+00   1.0116179e+00   8.5731385e-01   1.1485726e+00   1.9317000e+00   7.9664122e-01   5.6097460e-01   5.3106808e-01   1.0334797e+00   1.5679493e+00   6.8124108e-01   2.0247774e+00   1.0499569e+00   2.3371797e+00   1.3332950e+00   1.7744903e+00   3.0154970e+00   1.3277924e+00   2.5520970e+00   2.1193865e+00   3.0297355e+00   1.5881698e+00   1.5547948e+00   1.9487821e+00   1.4037678e+00   1.3973195e+00   1.7249900e+00   1.4967902e+00   3.6762761e+00   3.3933664e+00   2.0023741e+00   2.2484516e+00   8.6702860e-01   3.1482546e+00   1.3659877e+00   2.0060004e+00   2.4114658e+00   1.1530661e+00   9.5944179e-01   1.6364369e+00   2.2989490e+00   2.6726954e+00   3.7560452e+00   1.7032717e+00   1.2286922e+00   1.4040978e+00   3.1041910e+00   1.9523740e+00   1.4097206e+00   8.4169735e-01   2.0525205e+00   2.0729884e+00   2.1328505e+00   1.0499569e+00   2.1908074e+00   2.2633850e+00   1.9289706e+00   1.6929462e+00   1.5333884e+00   1.7729821e+00   7.9671887e-01   1.1060455e+00   2.5932658e+00   1.1359179e+00   2.2153658e+00   2.0116430e+00   9.6825676e-01   1.9538525e+00   2.9958431e+00   1.7387082e+00   1.1339874e+00   1.2823616e+00   1.2471855e+00   2.5771468e+00   1.5006766e+00   1.5158960e+00   1.7131342e+00   1.9169015e+00   1.3850497e+00   1.5416258e+00   2.5358032e+00   2.4678381e+00   2.2144600e+00   2.3737220e+00   2.1274158e+00   9.8372339e-01   1.7887913e+00   1.5921275e+00   2.1896791e+00   1.7854129e+00   1.2218858e+00   1.2670969e+00   2.6904561e+00   3.2109839e+00   2.7851183e+00   1.6425353e+00   1.5729927e+00   2.8211634e+00   1.6904600e+00   1.2882518e+00   1.7618849e+00   1.4390193e+00   9.9282597e-01   1.7222401e+00   1.8692144e+00   2.3979391e+00   2.7477433e+00   1.7762263e+00   1.4761145e+00   1.9687854e+00   2.5941073e+00   1.2723200e+00   1.0497372e+00   9.7397874e-01   1.5327853e+00   1.6373339e+00   1.6177026e+00   1.7131342e+00   1.6177237e+00   1.6189835e+00   1.6013655e+00   2.1620604e+00   1.2836487e+00   1.0769609e+00   1.0246689e+00   1.9326437e+00   1.4149714e+00   2.0593667e+00   1.9008579e+00   7.7368489e-01   1.6801694e+00   2.9457974e+00   1.6627284e+00   1.3215146e+00   1.3491191e+00   8.3512263e-01   2.6175043e+00   1.4565053e+00   1.6449760e+00   1.5357227e+00   1.1692720e+00   9.2780124e-01   1.1582405e+00   1.7355630e+00   2.5925902e+00   1.3094338e+00   1.5678176e+00   2.0102053e+00   7.6952423e-01   1.1716038e+00   9.4417605e-01   1.9573929e+00   1.7612938e+00   1.1827746e+00   6.8675486e-01   2.4881500e+00   2.3327432e+00   2.2476505e+00   1.2375842e+00   1.6387331e+00   1.9108109e+00   1.1188225e+00   1.0733560e+00   1.0560148e+00   1.0005243e+00   8.6347207e-01   1.2199459e+00   9.0753778e-01   1.4483156e+00   2.4625089e+00   1.3082342e+00   8.7375509e-01   1.4601811e+00   1.8000065e+00   1.5372053e+00   7.0097130e-01   9.6204815e-01   9.1315231e-01   1.2848917e+00   1.0993651e+00   1.5357227e+00   1.2764003e+00   1.5003224e+00   1.1101208e+00   1.5658291e+00   7.8808432e-01   1.4489917e+00   1.0920053e+00   1.8302572e+00   1.0943114e+00   1.1955102e+00   1.6415483e+00   9.5491981e-01   1.7343175e+00   1.2563834e+00   1.7780218e+00   1.5661153e+00   1.3901973e+00   1.7352481e+00   1.3724737e+00   2.9349297e+00   1.4110819e+00   2.3154474e+00   1.6753059e+00   2.1644875e+00   2.7793057e+00   1.8300579e+00   2.2275691e+00   1.2267563e+00   3.6839155e+00   2.3163493e+00   1.3205795e+00   2.1115079e+00   1.3018219e+00   1.9822480e+00   2.5100153e+00   1.8661672e+00   4.2327578e+00   2.6573893e+00   6.0725725e-01   2.6633209e+00   1.7222058e+00   2.5939799e+00   1.1664463e+00   2.6821825e+00   2.5963941e+00   1.3509199e+00   1.7816323e+00   1.7046308e+00   2.1381589e+00   2.1542571e+00   4.2222578e+00   1.7881987e+00   1.2473306e+00   1.0083490e+00   2.8478148e+00   2.9960208e+00   2.0583207e+00   1.7939407e+00   2.3128527e+00   2.4854841e+00   2.4090630e+00   1.4110819e+00   2.6669716e+00   2.9270626e+00   2.1822548e+00   9.7289027e-01   1.9265423e+00   2.9135583e+00   1.8510296e+00   1.1608422e+00   9.5483435e-01   6.7975587e-01   9.6424206e-01   1.8685422e+00   6.9420840e-01   1.8699153e-01   2.6643250e-01   7.6880092e-01   1.4668684e+00   4.7680727e-01   2.1966727e+00   1.2150638e+00   2.3282955e+00   1.3855601e+00   1.8709248e+00   2.9899796e+00   1.5396352e+00   2.5003659e+00   2.1099656e+00   3.0668593e+00   1.5989333e+00   1.5788417e+00   1.9566602e+00   1.5639220e+00   1.6339738e+00   1.8236402e+00   1.4967013e+00   3.6603010e+00   3.3937895e+00   1.9915789e+00   2.2840141e+00   1.1223478e+00   3.1075626e+00   1.3520885e+00   2.0407191e+00   2.3526669e+00   1.1508346e+00   9.9970980e-01   1.7227103e+00   2.1946041e+00   2.6155078e+00   3.6958832e+00   1.8054416e+00   1.1477199e+00   1.3984076e+00   3.0713671e+00   2.0894840e+00   1.4301682e+00   9.0552938e-01   2.0395038e+00   2.1489811e+00   2.1344915e+00   1.2150638e+00   2.2517887e+00   2.3533226e+00   1.9673072e+00   1.7095802e+00   1.5528301e+00   1.9068051e+00   9.3899770e-01   3.4893361e-01   1.4098163e+00   4.4901474e-01   9.4301660e-01   4.9009568e-01   1.1908452e+00   9.6032771e-01   1.2627589e+00   7.8945238e-01   7.3502408e-01   2.8451486e+00   1.1622402e+00   2.7058576e+00   1.7424022e+00   2.2846522e+00   3.3213689e+00   9.3810350e-01   2.7757276e+00   1.8894571e+00   3.8131048e+00   2.3010216e+00   1.5984421e+00   2.3698153e+00   1.1049324e+00   1.7539088e+00   2.4591578e+00   1.9849730e+00   4.4474865e+00   3.3956070e+00   1.1104964e+00   2.8478148e+00   1.2628565e+00   3.2741783e+00   1.3548265e+00   2.7443454e+00   2.9214972e+00   1.3520885e+00   1.5950569e+00   1.8924015e+00   2.5961001e+00   2.7889301e+00   4.4811770e+00   1.9680122e+00   1.3672690e+00   1.1906665e+00   3.3943858e+00   2.8533575e+00   2.0610968e+00   1.5261646e+00   2.5498486e+00   2.6295980e+00   2.6227721e+00   1.1622402e+00   2.8191421e+00   2.9841287e+00   2.3684093e+00   1.3684638e+00   2.0228721e+00   2.7146999e+00   1.5430544e+00   1.2073068e+00   4.2737382e-01   1.1404637e+00   3.1271814e-01   9.6032771e-01   7.5303835e-01   1.1016806e+00   9.6606527e-01   5.6318359e-01   2.6951278e+00   1.0877340e+00   2.5880472e+00   1.5788417e+00   2.1577755e+00   3.1981141e+00   1.0053189e+00   2.6422640e+00   1.8441670e+00   3.6556493e+00   2.1516272e+00   1.5283932e+00   2.2572409e+00   1.1515368e+00   1.7244934e+00   2.3310687e+00   1.8210464e+00   4.2584241e+00   3.3382181e+00   1.2189366e+00   2.7191331e+00   1.1859692e+00   3.1732334e+00   1.2980649e+00   2.5768210e+00   2.7513616e+00   1.2636762e+00   1.4403062e+00   1.8020431e+00   2.4433699e+00   2.6920515e+00   4.2945779e+00   1.8903935e+00   1.2074964e+00   1.0277379e+00   3.3038558e+00   2.6967685e+00   1.8755883e+00   1.3730080e+00   2.4290237e+00   2.5228632e+00   2.5343900e+00   1.0877340e+00   2.6795155e+00   2.8549884e+00   2.2878474e+00   1.3941000e+00   1.9010194e+00   2.5529515e+00   1.3637808e+00   1.0801003e+00   2.2778358e+00   9.5491981e-01   5.9448670e-01   5.9589853e-01   3.3742167e-01   1.9403546e+00   7.3727571e-01   1.8011631e+00   1.0580730e+00   1.6859882e+00   8.4110582e-01   1.3396881e+00   2.3254107e+00   1.8940865e+00   1.8320164e+00   1.6099822e+00   2.5455416e+00   1.0698235e+00   1.0938968e+00   1.3476330e+00   1.4941922e+00   1.4633215e+00   1.3755629e+00   8.7649478e-01   3.1069376e+00   2.7702855e+00   1.8674396e+00   1.7104256e+00   1.1065179e+00   2.4455843e+00   9.1688392e-01   1.4945651e+00   1.6975565e+00   7.1757390e-01   5.5102439e-01   1.2274184e+00   1.5152116e+00   1.9568855e+00   3.1286065e+00   1.3281960e+00   6.0709980e-01   1.0827099e+00   2.4180452e+00   1.7168537e+00   8.4814328e-01   5.4968254e-01   1.4259927e+00   1.6175327e+00   1.5676792e+00   1.0580730e+00   1.6914438e+00   1.8627823e+00   1.4252775e+00   1.3670172e+00   9.8340962e-01   1.5690664e+00   6.4292875e-01   1.2799022e+00   3.7622328e-01   9.3727156e-01   7.2340544e-01   8.7070822e-01   1.0517510e+00   4.9772204e-01   2.6753497e+00   1.1327780e+00   2.4219935e+00   1.5112031e+00   2.0792734e+00   3.0229728e+00   1.3206164e+00   2.4652397e+00   1.7009790e+00   3.5349296e+00   2.0290396e+00   1.4008516e+00   2.1030738e+00   1.2197800e+00   1.7532536e+00   2.2495068e+00   1.7048463e+00   4.1210195e+00   3.1691829e+00   1.1769133e+00   2.5856062e+00   1.2767751e+00   2.9863081e+00   1.1384575e+00   2.4711712e+00   2.5838439e+00   1.1268523e+00   1.3640383e+00   1.7178039e+00   2.2416335e+00   2.4908014e+00   4.1279448e+00   1.8102703e+00   1.0585628e+00   1.0110609e+00   3.0999848e+00   2.6577347e+00   1.7876312e+00   1.3164631e+00   2.2615790e+00   2.4095273e+00   2.3580977e+00   1.1327780e+00   2.5710650e+00   2.7600070e+00   2.1383264e+00   1.2571099e+00   1.7683536e+00   2.5188615e+00   1.3683626e+00   1.3381984e+00   1.9104439e+00   1.7447553e+00   2.1191178e+00   5.2290002e-01   1.5506355e+00   3.7401310e+00   2.0532672e+00   3.6450987e+00   2.6791066e+00   3.2198971e+00   4.2474168e+00   1.2374249e+00   3.6904578e+00   2.7448024e+00   4.7359552e+00   3.2167523e+00   2.5268732e+00   3.3111493e+00   1.8901504e+00   2.5824715e+00   3.3694826e+00   2.9225385e+00   5.3651760e+00   4.2632085e+00   1.6938497e+00   3.7848480e+00   2.0962051e+00   4.1703198e+00   2.2884248e+00   3.6689921e+00   3.8480511e+00   2.2915999e+00   2.5084155e+00   2.8222270e+00   3.5057930e+00   3.6931154e+00   5.3885667e+00   2.8913445e+00   2.2944283e+00   2.0536056e+00   4.3194837e+00   3.7370068e+00   2.9859935e+00   2.4261724e+00   3.4875553e+00   3.5613793e+00   3.5512500e+00   2.0532672e+00   3.7558873e+00   3.9047630e+00   3.2988390e+00   2.2352837e+00   2.9604482e+00   3.5852990e+00   2.4345890e+00   7.1446962e-01   4.7680727e-01   8.6080744e-01   1.0528937e+00   2.6643250e-01   2.4784392e+00   9.6779954e-01   2.4056700e+00   1.4093245e+00   1.9680122e+00   3.0432377e+00   1.1095018e+00   2.5046513e+00   1.7969297e+00   3.4167115e+00   1.9006720e+00   1.3928921e+00   2.0543866e+00   1.1288261e+00   1.5650139e+00   2.0901630e+00   1.6223748e+00   4.0330923e+00   3.2470423e+00   1.3554912e+00   2.4968262e+00   1.0256272e+00   3.0550867e+00   1.1407226e+00   2.3454418e+00   2.5560105e+00   1.0597600e+00   1.1958054e+00   1.6477280e+00   2.2779375e+00   2.5604758e+00   4.0677826e+00   1.7340403e+00   1.0472149e+00   1.0317636e+00   3.1283758e+00   2.4552133e+00   1.6602736e+00   1.1211328e+00   2.2084219e+00   2.3071243e+00   2.3006257e+00   9.6779954e-01   2.4647768e+00   2.6219630e+00   2.0691920e+00   1.3232765e+00   1.6800415e+00   2.3045354e+00   1.1399118e+00   2.6525508e-01   6.6194168e-01   1.5279052e+00   4.8284931e-01   2.2240009e+00   1.2628565e+00   2.2754107e+00   1.3512603e+00   1.8635082e+00   2.9164540e+00   1.6496295e+00   2.4127395e+00   2.0563471e+00   3.0426144e+00   1.5827764e+00   1.5566996e+00   1.9230842e+00   1.6230251e+00   1.7204639e+00   1.8421714e+00   1.4471806e+00   3.6003163e+00   3.3322323e+00   1.9737130e+00   2.2612204e+00   1.2180372e+00   3.0253495e+00   1.3338820e+00   2.0126070e+00   2.2700141e+00   1.1450371e+00   1.0044165e+00   1.7168897e+00   2.0924575e+00   2.5354258e+00   3.6216411e+00   1.8094028e+00   1.0735412e+00   1.3351581e+00   3.0117528e+00   2.1161544e+00   1.3888000e+00   9.3005809e-01   2.0016409e+00   2.1479330e+00   2.1191972e+00   1.2628565e+00   2.2323235e+00   2.3582915e+00   1.9639632e+00   1.7034312e+00   1.5340961e+00   1.9379753e+00   9.6779954e-01   6.0647055e-01   1.3810470e+00   2.3749211e-01   2.2111682e+00   1.0514970e+00   2.2223402e+00   1.2585091e+00   1.7887495e+00   2.8752401e+00   1.4450035e+00   2.3620441e+00   1.8870562e+00   3.0854059e+00   1.5854288e+00   1.3907384e+00   1.8599878e+00   1.3776606e+00   1.5509730e+00   1.8163092e+00   1.3995189e+00   3.6797126e+00   3.2203837e+00   1.7354649e+00   2.2402012e+00   1.0327124e+00   2.9556082e+00   1.1508346e+00   2.0324938e+00   2.2869296e+00   9.8115739e-01   9.3443769e-01   1.5799528e+00   2.0763861e+00   2.4587811e+00   3.7098479e+00   1.6697722e+00   9.5240225e-01   1.1656779e+00   2.9602833e+00   2.1358640e+00   1.3782117e+00   8.5400786e-01   1.9683111e+00   2.0924338e+00   2.0712315e+00   1.0514970e+00   2.2110297e+00   2.3461934e+00   1.8840843e+00   1.4831574e+00   1.4659783e+00   1.9682209e+00   8.9496218e-01   1.7964452e+00   6.5622658e-01   2.0655284e+00   1.1268523e+00   1.7764179e+00   9.9332012e-01   1.5158960e+00   2.3895003e+00   1.8982283e+00   1.8651393e+00   1.5387077e+00   2.7528000e+00   1.2871947e+00   1.1001946e+00   1.4627474e+00   1.4880729e+00   1.6030181e+00   1.6047598e+00   1.0374207e+00   3.2910073e+00   2.7655862e+00   1.7002168e+00   1.8814596e+00   1.2390194e+00   2.4548042e+00   8.7876634e-01   1.7158367e+00   1.8151170e+00   7.5016118e-01   7.8272551e-01   1.3241046e+00   1.5455843e+00   1.9524619e+00   3.2834453e+00   1.4300844e+00   5.8483448e-01   1.0263139e+00   2.4682518e+00   1.9911692e+00   1.0783755e+00   7.8808432e-01   1.5539983e+00   1.7882304e+00   1.6881750e+00   1.1268523e+00   1.8822939e+00   2.0779047e+00   1.5475657e+00   1.2810704e+00   1.1339991e+00   1.8534885e+00   9.0513514e-01   1.2087510e+00   3.4293561e+00   1.8554780e+00   3.4031864e+00   2.4420991e+00   2.9637920e+00   4.0403658e+00   1.1828717e+00   3.4998500e+00   2.6632870e+00   4.3954130e+00   2.8761203e+00   2.3399831e+00   3.0445122e+00   1.7890328e+00   2.3476777e+00   3.0401775e+00   2.6527529e+00   5.0321758e+00   4.1557153e+00   1.7943181e+00   3.4850530e+00   1.8421879e+00   4.0156620e+00   2.0769537e+00   3.3460838e+00   3.5735182e+00   2.0311110e+00   2.1883894e+00   2.6137665e+00   3.2724268e+00   3.5184202e+00   5.0524110e+00   2.6818545e+00   2.0664108e+00   1.9589565e+00   4.0923995e+00   3.3884524e+00   2.6885740e+00   2.0959739e+00   3.1948685e+00   3.2743590e+00   3.2448452e+00   1.8554780e+00   3.4633688e+00   3.5839347e+00   3.0150212e+00   2.1174980e+00   2.6708841e+00   3.2242395e+00   2.1262653e+00   2.3423978e+00   1.0035466e+00   2.2827159e+00   1.3153660e+00   1.8615039e+00   2.9298417e+00   1.3184035e+00   2.4022001e+00   1.8151170e+00   3.2315412e+00   1.7166676e+00   1.3595814e+00   1.9250594e+00   1.2570691e+00   1.5534985e+00   1.9352496e+00   1.4849081e+00   3.8359772e+00   3.2064915e+00   1.5410939e+00   2.3431625e+00   1.0302848e+00   2.9742645e+00   1.1013771e+00   2.1700970e+00   2.3919091e+00   9.7531402e-01   1.0396764e+00   1.5925492e+00   2.1393811e+00   2.4733960e+00   3.8624002e+00   1.6816960e+00   9.5650957e-01   1.0890388e+00   3.0080097e+00   2.2891398e+00   1.5007156e+00   9.6470639e-01   2.0543866e+00   2.1765531e+00   2.1487165e+00   1.0035466e+00   2.3180617e+00   2.4663563e+00   1.9433990e+00   1.3718709e+00   1.5414664e+00   2.1305612e+00   1.0107221e+00   1.7770053e+00   1.3000021e+00   1.3205171e+00   8.3930091e-01   1.8258497e+00   2.8432746e+00   1.7831595e+00   2.1193624e+00   1.2896554e+00   8.9496218e-01   1.6446727e+00   1.0946825e+00   2.1632524e+00   1.4041749e+00   5.4207852e-01   1.2077572e+00   2.1213447e+00   2.4069921e+00   2.9335119e+00   8.2206766e-01   1.6918279e+00   2.1851365e+00   1.7733720e+00   7.3278119e-01   1.4407642e+00   1.6273345e+00   1.3293020e+00   1.2924610e+00   1.7503171e+00   1.9276214e+00   2.3545376e+00   1.2450968e+00   1.8184976e+00   2.1894327e+00   1.8463545e+00   3.4893361e-01   1.0719022e+00   1.3834169e+00   1.0621362e+00   7.1740234e-01   1.0327832e+00   1.7770053e+00   6.9976890e-01   5.1210327e-01   9.9313181e-01   2.0841099e+00   1.0825311e+00   5.0208681e-01   1.3466860e+00   1.7937749e+00   8.2148003e-01   1.2268833e+00   2.4485240e+00   1.2563297e+00   1.9934469e+00   1.2524426e+00   2.8471336e+00   1.4358042e+00   7.3339246e-01   1.4342819e+00   4.9772204e-01   6.9457760e-01   1.4636741e+00   1.1233354e+00   3.5605638e+00   2.5755365e+00   1.2907457e+00   1.8667489e+00   3.7622328e-01   2.4814136e+00   6.2818221e-01   1.8112028e+00   2.1131807e+00   5.7672351e-01   7.9999102e-01   8.5275415e-01   1.9102507e+00   2.0267836e+00   3.6643554e+00   9.0168685e-01   8.3216780e-01   8.3306409e-01   2.5178144e+00   1.8656026e+00   1.2019259e+00   7.6362786e-01   1.6472011e+00   1.5943283e+00   1.7001179e+00   0.0000000e+00   1.8078806e+00   1.9570111e+00   1.3920954e+00   7.6195008e-01   1.1016806e+00   1.7729341e+00   7.1446962e-01   1.0816610e+00   7.3851064e-01   7.2248857e-01   3.0483776e+00   5.6342615e-01   1.3118081e+00   1.4889605e+00   9.8006369e-01   1.1737270e+00   4.2737382e-01   2.1131807e+00   1.7428500e+00   1.0543640e+00   8.5494999e-01   2.0376324e+00   1.3288928e+00   2.4590893e+00   5.9382214e-01   1.9576761e+00   9.8006369e-01   1.3739792e+00   8.5141186e-01   6.2055338e-01   1.3918500e+00   1.3907270e+00   9.7789352e-01   6.6861320e-01   6.5233704e-01   2.1059482e+00   9.8663349e-01   1.4035018e+00   1.7831595e+00   7.7880944e-01   1.4027992e+00   9.8677196e-01   1.5191564e+00   4.3798311e-01   6.8554305e-01   6.2111408e-01   1.7937749e+00   6.4241342e-01   9.9981032e-01   6.7780188e-01   1.6099640e+00   8.3640969e-01   1.4769275e+00   1.5684930e+00   6.3173774e-01   1.7302001e+00   2.0286216e+00   1.2711306e+00   1.0474897e+00   2.1700788e+00   8.2827027e-01   5.2290002e-01   7.5863433e-01   1.2491511e+00   1.0565061e+00   9.7542502e-01   3.3872939e-01   2.7982543e+00   2.0758695e+00   1.7342859e+00   1.1984110e+00   9.9693045e-01   1.8354727e+00   6.0840510e-01   1.1145077e+00   1.3082023e+00   5.2283051e-01   5.1857575e-01   4.7149050e-01   1.1471723e+00   1.3839439e+00   2.8837687e+00   5.8522871e-01   5.3667800e-01   9.0098101e-01   1.8496383e+00   1.3956631e+00   4.8016385e-01   6.2451737e-01   9.5139638e-01   1.0316097e+00   1.1168387e+00   8.2148003e-01   1.1408175e+00   1.3888569e+00   8.7588404e-01   9.9189360e-01   4.8124784e-01   1.3365773e+00   6.0566865e-01   1.4097462e+00   2.4566860e+00   1.1582635e+00   1.2895008e+00   1.6771691e+00   6.6244727e-01   8.5330525e-01   4.2110953e-01   1.5929148e+00   1.0739839e+00   5.6992880e-01   5.5102439e-01   2.4009228e+00   1.8321979e+00   2.1944657e+00   6.8299624e-01   1.3125970e+00   1.6253273e+00   1.0353506e+00   7.4661256e-01   1.1022402e+00   9.6950963e-01   8.7649478e-01   5.0731024e-01   1.1544356e+00   1.2559800e+00   2.5390784e+00   4.9009568e-01   1.1268617e+00   1.4819274e+00   1.4649720e+00   9.9544409e-01   6.0942760e-01   9.8006526e-01   5.9589853e-01   4.3937875e-01   6.7904052e-01   1.2268833e+00   6.0365341e-01   8.3354038e-01   4.3719837e-01   1.3221954e+00   4.2932160e-01   1.0251165e+00   9.7833010e-01   3.6949435e+00   6.0653347e-01   1.6944472e+00   1.5821553e+00   1.6484241e+00   1.7861438e+00   1.1497964e+00   2.7265330e+00   2.4114658e+00   1.7100754e+00   1.5191564e+00   1.8544941e+00   9.8143688e-01   2.9288318e+00   1.1208167e+00   2.6441923e+00   4.9772204e-01   2.0065422e+00   1.3857067e+00   8.4632640e-01   2.0655380e+00   2.0890644e+00   1.6229726e+00   9.3175410e-01   6.4813570e-01   1.8882412e+00   1.6282537e+00   2.0045623e+00   2.3010727e+00   4.0443437e-01   1.9471640e+00   1.6420881e+00   2.2200703e+00   1.1092092e+00   1.3077539e+00   1.2486828e+00   2.4485240e+00   1.1714086e+00   1.4815200e+00   1.3709657e+00   2.1645801e+00   1.5528645e+00   2.0592947e+00   2.2565403e+00   3.2107953e+00   2.2989490e+00   4.0089941e+00   2.5709804e+00   1.9412285e+00   2.6814987e+00   1.0808305e+00   1.6177026e+00   2.5906314e+00   2.3241321e+00   4.7335798e+00   3.7356640e+00   1.5467170e+00   3.0855313e+00   1.1828955e+00   3.6907456e+00   1.7719327e+00   2.9776826e+00   3.3258154e+00   1.7290027e+00   1.8703975e+00   2.1032002e+00   3.0991288e+00   3.2379874e+00   4.8403184e+00   2.1396216e+00   1.8765527e+00   1.6420881e+00   3.7688016e+00   2.8977291e+00   2.3525703e+00   1.7721385e+00   2.8780663e+00   2.8053505e+00   2.9124074e+00   1.2563297e+00   3.0197298e+00   3.1129968e+00   2.6135061e+00   1.7341866e+00   2.3184326e+00   2.7661123e+00   1.7090768e+00   1.2067996e+00   1.8641580e+00   1.3940244e+00   1.3162189e+00   8.8198158e-01   2.2794791e+00   2.1012392e+00   1.5525661e+00   1.0918469e+00   2.2063254e+00   1.1211328e+00   2.4017426e+00   1.1211328e+00   2.2284888e+00   6.3765570e-01   1.5168126e+00   1.2830542e+00   7.2263841e-01   1.5939137e+00   1.6681833e+00   1.2435436e+00   4.6557224e-01   3.1271814e-01   2.2147971e+00   1.2909648e+00   1.4589882e+00   1.7351918e+00   8.5359653e-01   1.8877628e+00   1.2647627e+00   1.8001617e+00   9.2780124e-01   1.2301583e+00   1.1566759e+00   1.9934469e+00   1.1506301e+00   1.5274241e+00   1.1815770e+00   1.6939440e+00   1.2016246e+00   1.9452063e+00   1.8368886e+00   2.7696320e+00   1.7002168e+00   6.6444642e-01   1.2360344e+00   1.3162958e+00   1.5606124e+00   1.8019802e+00   1.1903794e+00   3.3139779e+00   1.5262653e+00   1.2463647e+00   1.7598642e+00   1.6038123e+00   1.5053088e+00   8.4040822e-01   1.8873059e+00   1.7273593e+00   1.0791305e+00   1.4542898e+00   8.8167165e-01   1.3277924e+00   1.1132823e+00   3.3576107e+00   9.4738284e-01   1.0119180e+00   9.3046944e-01   1.8017473e+00   2.2743623e+00   1.4404916e+00   1.5380438e+00   1.4761813e+00   1.5955619e+00   1.5999471e+00   1.2524426e+00   1.7473897e+00   2.0612423e+00   1.3691763e+00   6.7534282e-01   1.2539581e+00   2.2701663e+00   1.5558007e+00   1.5218782e+00   2.4628184e+00   1.5932824e+00   3.2458126e+00   2.5692387e+00   1.4348047e+00   1.8937847e+00   9.2060977e-01   2.4408782e+00   3.8250534e+00   1.0499398e+00   2.8336242e+00   2.0769357e+00   2.6064495e+00   1.0813975e+00   1.3021456e+00   2.4993477e+00   2.2323451e+00   2.1662332e+00   1.8260680e+00   2.0200580e+00   1.1770826e+00   2.1383117e+00   2.5736499e+00   3.0399892e+00   1.5323598e+00   1.2212784e+00   1.7944590e+00   2.3235953e+00   1.3759094e+00   1.3385913e+00   1.3604806e+00   2.8471336e+00   1.0797751e+00   9.4588685e-01   1.6150266e+00   2.9366827e+00   1.8217819e+00   1.3773939e+00   2.3541561e+00   1.1723315e+00   6.4232366e-01   1.8822595e+00   1.3566020e+00   4.2656951e-01   5.7691891e-01   2.2149281e+00   2.2825823e+00   2.4730728e+00   7.0958226e-01   1.4300979e+00   1.9425292e+00   1.2122797e+00   4.9430028e-01   1.0215032e+00   1.0391769e+00   7.2638147e-01   9.8137813e-01   1.1659675e+00   1.5397131e+00   2.3056726e+00   1.0072799e+00   1.1569911e+00   1.6871910e+00   1.6699010e+00   7.9127668e-01   4.3341454e-01   8.2155022e-01   5.7672351e-01   6.8304299e-01   6.6412342e-01   1.4358042e+00   7.0097130e-01   8.1019167e-01   6.5485710e-01   1.6386105e+00   4.6472955e-01   7.2626021e-01   8.9802947e-01   8.9083207e-01   9.8663349e-01   1.0101003e+00   1.2666796e+00   7.2340544e-01   3.1120413e+00   1.9012831e+00   1.3662822e+00   1.4214310e+00   1.0271885e+00   1.7789322e+00   2.8835410e-01   1.4717950e+00   1.5613089e+00   4.5716421e-01   8.2373020e-01   3.8830315e-01   1.2833190e+00   1.3103990e+00   3.1817011e+00   4.8644514e-01   5.9610506e-01   8.0162421e-01   1.8503155e+00   1.7547273e+00   9.3865015e-01   8.9973730e-01   1.1346946e+00   1.2001902e+00   1.2260535e+00   7.3339246e-01   1.3976606e+00   1.6479131e+00   9.4228329e-01   5.0621589e-01   7.1671402e-01   1.7153988e+00   9.3451915e-01   1.7865803e+00   1.3700593e+00   7.2638147e-01   5.3458689e-01   2.2505972e+00   1.6524504e+00   2.2440955e+00   5.5576380e-01   1.5638518e+00   1.3688560e+00   1.0552128e+00   7.1143905e-01   8.2518769e-01   1.0245496e+00   9.9235657e-01   6.7030885e-01   8.3156325e-01   9.6025744e-01   2.3337038e+00   6.8299624e-01   1.1166319e+00   1.5524781e+00   1.1685901e+00   1.1719135e+00   6.6412342e-01   1.1159239e+00   2.6643250e-01   4.7488466e-01   4.3341454e-01   1.4342819e+00   5.7691891e-01   8.8366512e-01   3.3492202e-01   1.3576953e+00   4.2110953e-01   1.2033093e+00   1.1777985e+00   8.7819565e-01   1.8719964e+00   1.5530949e+00   3.9773949e+00   2.6834336e+00   1.0194189e+00   2.2401597e+00   7.0463400e-01   2.6908242e+00   8.9981614e-01   2.2443541e+00   2.5055082e+00   9.6168382e-01   1.2786676e+00   1.1515752e+00   2.2564140e+00   2.2581551e+00   4.0837847e+00   1.1737270e+00   1.1984110e+00   1.0100915e+00   2.7760526e+00   2.2855612e+00   1.6668656e+00   1.2419907e+00   2.0207624e+00   1.9399964e+00   2.0427084e+00   4.9772204e-01   2.1876191e+00   2.3343528e+00   1.7191609e+00   7.3633268e-01   1.5086315e+00   2.2088314e+00   1.2082987e+00   1.1857824e+00   1.2636762e+00   3.3874427e+00   2.5564450e+00   1.8349829e+00   1.6578570e+00   5.8813453e-01   2.5222553e+00   1.0240850e+00   1.6676963e+00   2.1419072e+00   9.3827844e-01   9.8741108e-01   8.7169308e-01   2.0802526e+00   2.1175243e+00   3.5451448e+00   8.2097460e-01   1.3248988e+00   1.4633215e+00   2.4116155e+00   1.5361480e+00   1.2935378e+00   9.5818710e-01   1.5578153e+00   1.3192053e+00   1.5035025e+00   6.9457760e-01   1.5912796e+00   1.6259926e+00   1.1892978e+00   1.1294987e+00   1.0978602e+00   1.4813076e+00   9.1948999e-01   8.1819403e-01   2.2419326e+00   2.2807501e+00   2.5846003e+00   6.4497192e-01   1.4107908e+00   2.0247998e+00   1.3509199e+00   5.5183182e-01   1.2328847e+00   1.1911894e+00   9.0810653e-01   9.7397874e-01   1.4379681e+00   1.6702453e+00   2.3957048e+00   9.4716675e-01   1.4061835e+00   1.8616601e+00   1.6979390e+00   5.2290002e-01   7.0376604e-01   9.7757519e-01   6.9976890e-01   4.8012872e-01   6.5622658e-01   1.4636741e+00   5.9074344e-01   5.5183182e-01   5.8926015e-01   1.7101283e+00   6.2055338e-01   5.2374483e-01   1.0096792e+00   2.4983023e+00   2.0024830e+00   2.0009022e+00   9.4244262e-01   1.2563297e+00   1.6864324e+00   8.0788963e-01   8.3930091e-01   1.0038277e+00   7.0810362e-01   5.9074344e-01   6.2055338e-01   9.0121804e-01   1.2356595e+00   2.5673851e+00   7.1082758e-01   6.9066640e-01   1.1671832e+00   1.6263298e+00   1.2372185e+00   2.6033464e-01   7.2248857e-01   6.6653737e-01   8.5606908e-01   8.7588404e-01   1.1233354e+00   9.0810653e-01   1.1795009e+00   7.1867388e-01   1.2188386e+00   3.1239235e-01   1.1894366e+00   7.5921691e-01   2.7729820e+00   4.4273104e+00   1.7851048e+00   3.5860697e+00   2.3211451e+00   3.2572853e+00   1.7681972e+00   1.6466818e+00   3.1677578e+00   2.9074188e+00   2.8617360e+00   2.1557081e+00   2.3917474e+00   3.9487224e-01   2.8587345e+00   3.1370513e+00   3.5889276e+00   1.8806886e+00   2.0412779e+00   2.4100318e+00   3.0088533e+00   2.0247746e+00   2.1265123e+00   2.0926464e+00   3.5605638e+00   1.8217810e+00   1.8066213e+00   2.3669524e+00   3.5958907e+00   2.5091153e+00   2.1661990e+00   3.0374915e+00   2.7063291e+00   1.8195505e+00   2.8431665e+00   6.1655427e-01   2.1507483e+00   2.1438129e+00   1.7230435e+00   2.3108260e+00   2.5097011e+00   1.8135469e+00   1.5638528e+00   9.0791603e-01   2.8200316e+00   1.7992895e+00   2.2827159e+00   2.3805592e+00   1.0279218e+00   2.6120156e+00   2.2030084e+00   2.6289851e+00   1.7477225e+00   1.8297977e+00   1.8176515e+00   2.5755365e+00   1.8486085e+00   2.1438129e+00   1.7993706e+00   2.0846858e+00   2.0084695e+00   2.7218129e+00   2.6544629e+00   2.7850656e+00   1.6064410e+00   2.7362607e+00   1.2723027e+00   2.8153418e+00   2.8097763e+00   1.4630671e+00   1.8911656e+00   1.6976298e+00   2.3931138e+00   2.3316649e+00   4.4654565e+00   1.7621453e+00   1.4315442e+00   9.9921804e-01   3.0176875e+00   3.0491088e+00   2.1855415e+00   1.8898572e+00   2.4817766e+00   2.5552736e+00   2.5674482e+00   1.2907457e+00   2.7588865e+00   3.0041677e+00   2.2870308e+00   9.4057729e-01   2.0520412e+00   2.9779211e+00   1.8911656e+00   1.9172403e+00   1.5033800e+00   1.5778466e+00   4.2450569e-01   7.6773108e-01   1.5016932e+00   1.3345218e+00   1.1346946e+00   1.0902078e+00   1.2416734e+00   1.9196757e+00   1.1117653e+00   1.6095257e+00   2.0596575e+00   1.0943114e+00   8.7072347e-01   9.2729770e-01   1.4434261e+00   3.8830315e-01   3.6319073e-01   4.1088655e-01   1.8667489e+00   1.6562722e-01   4.2450569e-01   5.9279023e-01   1.8877121e+00   8.2518769e-01   9.7789352e-01   1.4886316e+00   2.7335291e+00   9.1459005e-01   1.8213026e+00   2.2454046e+00   7.7259801e-01   8.0376328e-01   1.0525811e+00   2.1168634e+00   2.2814168e+00   3.7089872e+00   1.0767714e+00   1.0755005e+00   1.1631285e+00   2.6946772e+00   1.7497347e+00   1.2634840e+00   7.1971771e-01   1.7438018e+00   1.6356845e+00   1.7637315e+00   3.7622328e-01   1.8511762e+00   1.9311191e+00   1.4699785e+00   1.1016806e+00   1.1928774e+00   1.6354514e+00   6.5233704e-01   2.0052498e+00   1.7681972e+00   1.2007144e+00   2.1235855e+00   2.2484715e+00   1.6942544e+00   1.0546367e+00   5.1386894e-01   2.3251407e+00   1.7093881e+00   2.0273081e+00   2.2255325e+00   6.9493020e-01   2.3316649e+00   1.8640280e+00   2.3781796e+00   1.4043141e+00   1.6126002e+00   1.5456113e+00   2.4814136e+00   1.5467508e+00   1.8811164e+00   1.5963715e+00   2.0694478e+00   1.7422855e+00   2.4276706e+00   2.4143104e+00   1.5837613e+00   1.7028549e+00   2.6643250e-01   7.3283576e-01   6.1619693e-01   1.4102704e+00   1.5157660e+00   3.3107238e+00   7.0702759e-01   4.6964680e-01   7.3731902e-01   2.0564363e+00   1.8389778e+00   9.9058911e-01   7.8305765e-01   1.2692102e+00   1.3637808e+00   1.3483908e+00   6.2818221e-01   1.5635907e+00   1.7874832e+00   1.0817627e+00   4.8284931e-01   7.9664122e-01   1.7693113e+00   8.5141186e-01   7.7538587e-01   1.4522625e+00   1.1678338e+00   1.2100516e+00   1.1294987e+00   1.4712028e+00   1.8985225e+00   1.2171256e+00   1.5155373e+00   1.9939611e+00   1.4342819e+00   6.6653737e-01   7.1511757e-01   1.2680818e+00   5.4772790e-01   6.0868934e-01   6.7484334e-01   1.8112028e+00   3.8639663e-01   5.2413598e-01   7.9227302e-01   1.9656037e+00   7.9656884e-01   7.1789533e-01   1.2970125e+00   1.6649242e+00   1.5382030e+00   1.4107908e+00   5.4248468e-01   9.6424206e-01   1.6581712e+00   1.4527629e+00   1.5643033e+00   2.0014001e+00   1.0048958e+00   1.4365091e+00   1.0328871e+00   1.6659456e+00   6.7424840e-01   1.0427348e+00   9.3481345e-01   2.1131807e+00   8.1596583e-01   1.1349095e+00   1.1009910e+00   2.0312552e+00   1.0961859e+00   1.4886316e+00   1.7139439e+00   4.8016385e-01   6.4687084e-01   1.4356300e+00   1.6309773e+00   3.2287360e+00   7.3391501e-01   4.4499696e-01   8.4121419e-01   2.1133414e+00   1.6592561e+00   8.3333283e-01   5.2066928e-01   1.2097457e+00   1.2911242e+00   1.2850973e+00   5.7672351e-01   1.4812088e+00   1.6720834e+00   1.0285902e+00   7.2340544e-01   6.8124108e-01   1.5683551e+00   6.1067563e-01   8.0990117e-01   1.4468934e+00   1.7768340e+00   2.9867606e+00   8.8098199e-01   6.6217390e-01   1.1327663e+00   2.1506380e+00   1.2980342e+00   5.4779717e-01   1.3340137e-01   1.1051628e+00   1.1634940e+00   1.1952607e+00   7.9999102e-01   1.2953104e+00   1.4320028e+00   9.9155078e-01   1.1867923e+00   5.7526462e-01   1.1726810e+00   2.6680274e-01   1.2602457e+00   1.2678174e+00   2.9703757e+00   1.3103399e-01   8.4439576e-01   1.0887336e+00   1.6811909e+00   1.4435947e+00   7.9778097e-01   8.9789119e-01   9.2528705e-01   8.7435479e-01   9.9613800e-01   8.5275415e-01   1.0871867e+00   1.3186900e+00   6.8124108e-01   8.2317311e-01   5.4397563e-01   1.4334280e+00   9.0121513e-01   6.7419212e-01   2.1234744e+00   1.3330747e+00   1.2524426e+00   1.6423187e+00   1.1112287e+00   1.7731481e+00   1.0412209e+00   1.5779602e+00   8.1552831e-01   1.2367326e+00   1.0877340e+00   1.9102507e+00   1.1360631e+00   1.4957547e+00   1.1495900e+00   1.6944472e+00   1.0511712e+00   1.7894533e+00   1.6403454e+00   2.3936810e+00   1.3012342e+00   1.5364148e+00   1.7852089e+00   7.8659640e-01   2.0467146e+00   1.4387140e+00   1.9055720e+00   1.0341095e+00   1.3049404e+00   1.1996837e+00   2.0267836e+00   1.2898173e+00   1.6473842e+00   1.2092432e+00   1.6223502e+00   1.2928268e+00   2.1087983e+00   1.9576761e+00   2.9790337e+00   3.1661621e+00   3.6343153e+00   1.9094387e+00   2.2505084e+00   2.4932991e+00   3.0919113e+00   2.0983540e+00   2.2774299e+00   2.1822207e+00   3.6643554e+00   1.9784646e+00   2.0041121e+00   2.4741311e+00   3.6564145e+00   2.5932898e+00   2.3540393e+00   3.1383476e+00   9.6758101e-01   1.2012033e+00   1.6658010e+00   1.4135473e+00   8.6985276e-01   9.6249568e-01   9.3451915e-01   8.2380019e-01   9.6993876e-01   9.0168685e-01   1.0632334e+00   1.2723027e+00   6.4232366e-01   8.7376399e-01   5.8942278e-01   1.4153467e+00   9.6559725e-01   6.0709980e-01   2.1192618e+00   1.8400866e+00   8.3619405e-01   7.2626021e-01   1.2848171e+00   1.4775384e+00   1.4500356e+00   8.3216780e-01   1.5874797e+00   1.8427499e+00   1.2442620e+00   8.6985276e-01   8.3878265e-01   1.7484907e+00   7.7500385e-01   2.4608044e+00   2.2758533e+00   1.3186900e+00   1.1601403e+00   1.7655861e+00   1.8899115e+00   1.9324044e+00   8.3306409e-01   2.0122449e+00   2.2830055e+00   1.6787550e+00   8.1019167e-01   1.3243478e+00   2.1959912e+00   1.1244567e+00   1.9511379e+00   1.7500667e+00   2.2774574e+00   1.1011934e+00   1.2684800e+00   1.1435764e+00   2.5178144e+00   1.1861264e+00   1.4342819e+00   1.3109392e+00   2.2026274e+00   1.5858048e+00   2.0711809e+00   2.3400013e+00   1.0557584e+00   1.3438727e+00   1.0821769e+00   8.4383266e-01   1.0493821e+00   1.8656026e+00   7.8957903e-01   5.5399712e-01   1.0737552e+00   2.2030214e+00   1.1115276e+00   2.1119253e-01   1.3352177e+00   6.6627781e-01   7.2272795e-01   8.6751530e-01   9.1936743e-01   1.2019259e+00   8.7588404e-01   1.0946184e+00   8.0162421e-01   1.4236959e+00   4.0664863e-01   9.8463602e-01   6.8496652e-01   1.2266388e+00   1.2615426e+00   1.3010124e+00   7.6362786e-01   1.4014424e+00   1.5148689e+00   1.0932736e+00   1.2210779e+00   6.9618131e-01   1.2059294e+00   2.0855006e-01   4.7509249e-01   3.1239235e-01   1.6472011e+00   4.6557224e-01   7.5810578e-01   4.3937875e-01   1.5999820e+00   5.6262711e-01   1.1233867e+00   1.3019241e+00   3.9472619e-01   1.5943283e+00   3.3742167e-01   4.8284931e-01   3.4893361e-01   1.6410601e+00   6.6154242e-01   9.3451915e-01   1.2980649e+00   1.7001179e+00   5.2283051e-01   6.7484334e-01   3.3872939e-01   1.6485749e+00   6.6653737e-01   1.1055440e+00   1.3931316e+00   1.8078806e+00   1.9570111e+00   1.3920954e+00   7.6195008e-01   1.1016806e+00   1.7729341e+00   7.1446962e-01   3.8639663e-01   6.2027457e-01   1.8723846e+00   8.0990117e-01   9.0447834e-01   1.4243850e+00   7.9227302e-01   2.1007225e+00   1.0230346e+00   7.1789533e-01   1.5391678e+00   1.3603920e+00   4.6137216e-01   1.1083720e+00   1.1686836e+00   1.1908452e+00   2.1561807e+00   1.2583645e+00   1.0722301e+00   7.7259801e-01   1.2001902e+00
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml.txt
new file mode 100644
index 00000000..ce80cb1e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-seuclidean-ml.txt
@@ -0,0 +1 @@
+   1.4330520e+01   1.4635426e+01   1.3450855e+01   1.4761140e+01   1.3508642e+01   1.5434417e+01   1.3887693e+01   1.5166776e+01   1.3966038e+01   1.4950451e+01   1.4564587e+01   1.3834201e+01   1.4347008e+01   1.5641962e+01   1.4689053e+01   1.4418720e+01   1.4545856e+01   1.4151822e+01   1.4669017e+01   1.5150750e+01   1.3770166e+01   1.3288969e+01   1.4048191e+01   1.4049959e+01   1.4164158e+01   1.3727834e+01   1.4074687e+01   1.4321303e+01   1.2497330e+01   1.3820273e+01   1.4441030e+01   1.4780222e+01   1.2504339e+01   1.5022245e+01   1.4263650e+01   1.3704507e+01   1.3694385e+01   1.3667517e+01   1.3177468e+01   1.4391931e+01   1.4893903e+01   1.4475753e+01   1.4440707e+01   1.3603096e+01   1.6889651e+01   1.4731174e+01   1.3337775e+01   1.5187532e+01   1.5667271e+01   1.4226037e+01   1.4203554e+01   1.5272898e+01   1.6031460e+01   1.5991549e+01   1.1855060e+01   1.4844776e+01   1.2475182e+01   1.4408126e+01   1.4836870e+01   1.3472986e+01   1.4089281e+01   1.1018298e+01   1.3183296e+01   1.4590802e+01   1.4404230e+01   1.2717623e+01   1.3983283e+01   1.4017133e+01   1.4608005e+01   1.4402553e+01   1.3977803e+01   1.4091040e+01   1.3977459e+01   1.2630449e+01   1.4160109e+01   1.3029417e+01   1.2654432e+01   1.2794946e+01   1.3194978e+01   1.4378745e+01   1.2431908e+01   1.3852651e+01   1.3748358e+01   1.4003568e+01   1.5066681e+01   1.5192826e+01   1.4370013e+01   1.5792545e+01   1.3547546e+01   1.4411543e+01   1.4794215e+01   1.4924312e+01   1.4789153e+01   1.4875055e+01   1.4208537e+01   1.2786148e+01   1.4882476e+01   1.3302010e+01   1.4354774e+01   1.4542129e+01   1.5889633e+01   1.2928185e+01   1.4877868e+01   1.2890902e+01   1.4406165e+01   1.4498123e+01   1.4303273e+01   1.3207002e+01   1.3954732e+01   1.4841248e+01   1.5427799e+01   1.4363463e+01   1.3976277e+01   1.4284878e+01   1.4457991e+01   1.3369469e+01   1.5246610e+01   1.4487573e+01   1.4525176e+01   1.4505865e+01   1.5037347e+01   1.3834927e+01   1.3758988e+01   1.3424987e+01   1.4914766e+01   1.3783923e+01   1.3434291e+01   1.2895927e+01   1.3870360e+01   1.3342977e+01   1.3094322e+01   1.3057847e+01   1.3322375e+01   1.4940650e+01   1.4476829e+01   1.4197503e+01   1.4597035e+01   1.2963234e+01   1.4011414e+01   1.3181409e+01   1.3339615e+01   1.3928735e+01   1.3508015e+01   1.3170749e+01   1.3529133e+01   1.3454724e+01   1.4883437e+01   1.4564565e+01   1.2474313e+01   1.4435790e+01   1.5285703e+01   1.3701736e+01   1.3578312e+01   1.4807311e+01   1.4281072e+01   1.2920213e+01   1.4427803e+01   1.1408611e+01   1.4097334e+01   1.2868115e+01   1.3903683e+01   1.3800332e+01   1.3439339e+01   1.4062651e+01   1.3242107e+01   1.4400424e+01   1.3826132e+01   1.5991146e+01   1.3118258e+01   1.5377390e+01   1.2858378e+01   1.5249567e+01   1.4081585e+01   1.4458052e+01   1.4175623e+01   1.4850069e+01   1.5506668e+01   1.5014770e+01   1.4337030e+01   1.5214705e+01   1.4803729e+01   1.3188675e+01   1.3437739e+01   1.3409394e+01   1.4607386e+01   1.5394271e+01   1.5946451e+01   1.3769364e+01   1.4181208e+01   1.2551765e+01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-spearman-ml.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-spearman-ml.txt
new file mode 100644
index 00000000..b50fe3af
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/pdist-spearman-ml.txt
@@ -0,0 +1 @@
+   9.3540954e-01   9.7904590e-01   8.6703870e-01   1.1569997e+00   8.7174317e-01   1.0627183e+00   9.1272727e-01   1.1593999e+00   9.7573357e-01   1.0072127e+00   1.0536814e+00   9.6276028e-01   9.7700570e-01   1.1513951e+00   1.0719592e+00   9.2178818e-01   1.0004680e+00   9.3689769e-01   9.8205821e-01   1.0332673e+00   9.4517852e-01   8.9437744e-01   9.7556556e-01   9.0460246e-01   9.7210921e-01   9.2230423e-01   9.9605161e-01   9.6852085e-01   8.4162016e-01   9.6667267e-01   9.7759376e-01   9.9757576e-01   7.6992499e-01   1.0151695e+00   9.8691869e-01   9.0325833e-01   8.6665467e-01   8.8844884e-01   8.4553255e-01   9.7700570e-01   9.5159916e-01   9.8906691e-01   1.0551935e+00   9.1973597e-01   1.3266247e+00   1.0982778e+00   8.4531653e-01   1.0887369e+00   1.0984938e+00   9.9851185e-01   9.0701470e-01   1.0639304e+00   1.2392919e+00   1.1422502e+00   8.1725773e-01   1.1844944e+00   7.8219022e-01   1.0817162e+00   1.2196100e+00   1.0003120e+00   1.0164536e+00   7.0724272e-01   9.7981398e-01   1.1134953e+00   1.0671107e+00   9.3600960e-01   9.9984398e-01   1.0356916e+00   1.1248005e+00   1.0696310e+00   1.0634263e+00   9.6472847e-01   9.9365137e-01   8.5724572e-01   1.1257846e+00   8.9930993e-01   9.4903090e-01   9.0667867e-01   9.1231923e-01   1.0573777e+00   9.0105011e-01   9.5255926e-01   1.0177978e+00   1.0606901e+00   1.1966997e+00   1.0891929e+00   1.0085089e+00   1.2640264e+00   9.3246925e-01   1.0198020e+00   1.2055806e+00   1.1237924e+00   1.1060666e+00   1.0517252e+00   1.0684668e+00   7.6844884e-01   1.0572697e+00   8.7373537e-01   9.6283228e-01   9.9350735e-01   1.2412601e+00   7.6322832e-01   1.0298950e+00   8.6148215e-01   1.0042724e+00   9.7012901e-01   9.3712571e-01   8.5845785e-01   8.5862586e-01   1.0336634e+00   1.0955536e+00   9.5302730e-01   9.8696670e-01   1.0633063e+00   1.0026643e+00   9.6380438e-01   1.1711251e+00   9.9273927e-01   1.0260906e+00   1.0863966e+00   1.0482808e+00   9.0361836e-01   9.2358836e-01   8.7794779e-01   1.2461206e+00   9.2985299e-01   1.0418962e+00   9.4660666e-01   9.5636364e-01   9.0646265e-01   9.9113111e-01   8.3027903e-01   9.3341734e-01   1.1378938e+00   1.0548215e+00   1.0086889e+00   1.1998920e+00   8.6063006e-01   1.0255506e+00   8.4786079e-01   1.0090729e+00   9.2542454e-01   9.5176718e-01   9.3477348e-01   9.0091809e-01   9.6404440e-01   1.1158716e+00   9.9614761e-01   7.7682568e-01   1.0605461e+00   1.0895650e+00   9.0065407e-01   8.7173117e-01   9.9821182e-01   1.2165617e+00   8.6127813e-01   1.1111071e+00   7.9015902e-01   1.0433843e+00   8.6510651e-01   1.0019202e+00   1.0154815e+00   9.4381038e-01   9.8646265e-01   1.0062526e+00   9.7426943e-01   9.8191419e-01   1.3038944e+00   8.6277828e-01   1.0830243e+00   8.6851485e-01   1.1192559e+00   9.9120312e-01   9.6540054e-01   9.1072307e-01   1.1775698e+00   1.1139154e+00   1.1083468e+00   9.9593159e-01   1.0825923e+00   1.1115032e+00   9.7430543e-01   9.5605161e-01   9.2800480e-01   9.4369037e-01   1.1136034e+00   1.1382898e+00   9.5937594e-01   9.8843084e-01   7.4563456e-01
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-bool-data.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-bool-data.txt
new file mode 100644
index 00000000..df0d838f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-bool-data.txt
@@ -0,0 +1,100 @@
+0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 1 0 0 1 1
+1 1 1 1 1 1 1 0 0 1 1 1 0 0 0 0 1 0 1 0 1 1 1 0 1 0 1 1 1 1
+0 1 0 1 1 0 0 1 1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 1 0 1 1 1 0 1
+1 1 1 0 0 1 1 0 0 1 1 1 0 0 1 1 0 1 1 1 0 1 1 0 0 0 0 1 0 0
+1 0 0 0 0 1 1 0 1 1 0 1 0 0 0 0 1 0 0 1 0 1 0 0 1 1 1 1 0 0
+1 0 1 1 0 0 0 1 1 1 1 1 0 1 1 0 1 0 1 0 1 0 0 0 0 0 0 0 1 1
+0 1 0 0 1 0 0 0 1 0 0 1 1 0 0 0 0 1 1 0 0 1 0 1 1 1 1 0 1 0
+1 0 1 1 1 0 0 0 0 1 1 0 0 0 0 1 0 1 0 0 0 1 1 1 0 1 0 0 1 0
+1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 1 0 1 1 0 1 1 0 1 1 1 0 0 1 1
+1 1 0 1 0 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0
+1 0 1 0 1 1 0 1 1 0 1 1 0 1 1 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0
+1 1 1 1 0 1 0 0 0 0 0 1 0 1 1 1 1 0 1 1 1 1 1 1 0 1 0 1 1 1
+1 1 1 1 1 1 1 1 1 0 1 1 0 0 1 0 1 0 1 0 1 0 0 0 1 0 0 1 0 1
+0 1 1 0 0 1 1 0 0 0 0 1 0 1 1 0 1 0 1 0 1 1 0 1 0 0 1 1 1 1
+1 0 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 0 0 1 1 0 1 1
+1 0 0 1 1 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 1 1 1 0 0
+1 1 0 0 1 0 0 0 1 0 0 1 0 1 0 0 1 0 1 1 0 1 0 0 0 1 1 1 1 1
+0 0 0 1 1 1 1 1 0 1 0 1 1 1 1 0 0 1 1 1 1 1 0 0 1 0 1 0 0 0
+1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 0 1 1 0 1 1
+0 0 0 0 1 0 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 1 1 1
+0 1 0 0 1 1 0 0 1 1 1 0 0 0 1 0 0 0 0 1 1 0 0 1 0 1 1 0 1 0
+1 0 1 0 1 1 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 0 0 1 1 1 0 1 1
+0 0 1 0 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0 0 1 0
+0 1 0 1 1 1 0 1 1 1 0 1 0 1 1 1 0 0 0 0 1 1 1 0 0 1 1 0 0 1
+0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 1 1 0 1 1 0 0
+1 0 0 0 1 0 1 0 0 1 0 1 1 0 1 0 1 0 1 0 1 1 1 0 0 0 1 1 1 0
+1 0 0 0 1 1 1 0 0 1 0 1 1 1 0 0 0 1 1 1 0 0 0 0 1 0 0 0 1 1
+0 1 0 0 0 1 1 1 0 1 1 1 0 1 0 0 1 1 1 1 0 1 0 1 0 1 1 0 1 1
+0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 1 0 1 0 1 0 1
+0 0 1 0 1 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 1 0 1 0
+1 1 0 1 1 1 1 1 0 1 0 0 0 1 1 1 0 1 0 0 0 1 1 0 1 0 0 0 0 1
+0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 1 1 1 0 0 1 1 0 1 1 0 0 1 0 1
+1 1 0 0 0 0 0 1 1 0 1 1 0 0 1 0 1 1 0 0 0 1 0 1 0 1 0 1 0 1
+1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1
+0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 1 0 0 1 0
+1 1 1 1 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 0 1 1 0 0 1 0 1 0 0 0
+0 0 0 0 1 1 1 0 1 1 0 0 1 1 1 1 0 1 0 1 1 1 1 1 1 0 0 0 0 0
+0 1 1 1 0 0 0 1 1 1 0 1 0 0 1 1 1 1 1 0 1 0 0 1 0 0 0 0 1 1
+0 1 0 0 1 1 1 1 0 0 1 0 1 0 1 1 0 0 1 0 0 1 1 0 0 0 0 1 0 0
+1 1 0 1 0 0 1 1 0 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 1 0 1 0 0 1
+0 1 1 0 1 0 1 1 0 0 0 1 1 0 0 0 0 0 0 1 0 0 1 1 0 1 0 0 1 1
+0 0 1 1 1 0 1 0 0 1 1 0 0 0 1 1 1 0 1 0 0 0 0 1 1 0 1 1 0 0
+1 0 1 1 1 1 1 1 1 1 0 1 0 0 0 1 0 1 0 0 0 1 1 0 0 1 0 0 0 0
+1 0 1 1 1 0 1 1 1 1 0 0 1 0 1 1 1 0 0 0 0 1 1 1 1 1 0 1 0 0
+1 0 0 0 1 1 1 0 1 1 0 0 1 1 1 0 1 0 0 1 0 1 0 1 1 1 0 0 0 1
+1 0 1 0 1 0 0 0 1 0 0 1 1 0 1 1 0 0 0 1 0 1 1 0 1 0 0 1 0 0
+0 1 1 0 1 0 1 1 1 1 1 0 0 0 0 1 0 1 0 0 1 1 1 1 0 1 0 1 1 1
+0 1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 1 0 1 0 0 0 1 1 1 0 0 1 0 1
+1 0 1 1 1 0 1 0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1
+1 1 1 1 1 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1
+1 1 1 1 0 0 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 0 0 0 1 0 1 0 0
+1 0 1 0 0 1 1 1 1 0 1 1 0 0 1 0 0 1 1 0 1 1 1 1 1 1 0 0 0 0
+0 1 1 0 0 1 0 0 0 0 0 1 0 1 0 0 1 1 0 1 0 1 0 0 0 1 0 0 1 0
+0 0 0 1 0 0 0 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 1 0 1 0 1 1 1 0
+1 1 0 0 0 0 1 1 1 0 1 0 1 1 1 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0
+1 0 1 1 1 0 1 0 1 0 0 1 1 1 1 1 0 0 1 1 0 1 1 1 1 0 0 0 0 1
+0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 1 0 0 1 0 0
+0 0 1 1 1 1 1 0 1 0 1 0 0 1 1 1 1 0 0 0 1 0 1 1 0 1 1 1 0 0
+0 0 0 0 0 1 0 0 1 1 0 1 1 0 0 0 0 1 0 1 1 0 0 1 0 0 1 0 1 0
+1 0 0 1 0 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 1 0 1 0 0 0 1 1 1 1
+0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 1 0 0 1 1 0 0 0
+1 0 0 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 0 1 1 0 0 1 0 1 0
+0 1 0 1 1 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 0 1 1 0 1 1 0 0 0 1
+1 0 1 1 1 0 0 0 1 0 0 1 0 0 0 1 0 1 1 1 0 0 1 1 1 1 0 0 0 1
+0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1
+0 0 1 0 1 1 1 0 0 0 1 0 1 0 1 1 0 0 1 1 0 1 0 1 1 0 0 1 0 1
+0 1 1 1 1 1 0 0 0 0 0 1 0 1 1 1 1 1 0 1 1 1 0 0 1 0 0 1 1 1
+1 1 1 1 0 1 1 1 1 1 1 0 0 1 1 0 1 1 0 1 0 1 0 1 0 1 1 0 0 0
+1 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 1 1 1 1 1 0 0 1 1 1 1 1 0
+0 0 0 0 1 1 1 0 1 0 0 1 1 0 0 1 1 1 1 0 0 1 0 1 0 0 0 1 0 0
+1 1 1 1 1 0 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 0 0 0 1 1 0 1 1 0
+1 0 1 1 0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 0 1 1 0 0 1 1 0 1 0 1
+1 1 1 1 1 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 1
+0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 1 1
+1 1 1 0 1 1 1 1 1 0 0 0 0 1 0 0 1 0 1 0 1 1 1 0 0 1 0 0 1 1
+1 1 0 1 0 1 0 1 0 0 1 0 0 0 1 0 1 1 0 1 1 0 1 0 0 1 0 0 1 0
+1 0 1 1 0 0 1 1 0 0 1 1 0 0 0 1 1 0 0 1 0 0 0 0 0 1 0 1 1 0
+1 1 1 1 1 0 0 1 0 0 1 1 1 0 1 0 0 1 1 1 0 1 1 1 1 1 1 1 1 1
+1 0 1 1 0 0 1 1 0 1 1 1 0 0 0 1 0 1 0 0 0 1 1 1 1 1 0 0 1 0
+0 0 0 0 0 1 1 1 0 0 0 0 0 1 1 1 1 1 0 0 1 1 0 0 1 0 0 1 0 0
+1 1 1 0 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 1 0 1 0
+1 0 0 1 0 1 0 0 0 0 0 0 1 0 1 0 1 1 0 1 0 1 1 0 0 1 0 1 0 1
+1 0 0 0 1 0 1 1 0 1 0 0 0 1 0 1 0 0 0 0 1 1 1 0 1 0 1 1 0 1
+0 1 0 0 0 0 1 0 1 1 1 0 1 1 0 1 0 1 0 1 1 0 0 0 0 0 0 1 1 1
+0 1 0 0 1 0 1 1 0 0 0 0 1 1 0 1 1 1 0 0 1 1 0 0 1 0 1 0 0 0
+0 1 0 1 1 1 1 1 1 1 0 0 1 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0
+0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 1 0 0
+1 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 0
+1 0 0 0 1 0 1 0 0 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 1 0 1 1 1 0
+0 0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 0 1 0 0 0 1 1 0
+1 0 0 0 0 0 1 0 1 0 1 0 0 1 1 1 0 1 1 1 0 0 1 0 1 1 1 0 1 0
+0 1 0 0 1 1 1 0 0 1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 1 1 1 0 1
+0 0 0 1 1 0 1 0 1 0 1 0 0 0 1 1 1 0 1 1 0 0 0 1 1 0 0 1 0 1
+1 1 1 1 1 1 1 1 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 1 1 0 1 0 1 0
+0 1 1 0 0 0 1 1 0 0 1 1 0 1 1 1 1 1 0 1 0 0 0 0 1 0 1 0 0 0
+1 1 1 0 1 1 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 1 0 1 1 0 0 1
+0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 1 0 1 0 0 0 1 1 0
+1 1 1 0 1 1 0 1 1 0 1 1 0 1 0 0 1 0 0 0 1 1 1 1 0 1 1 0 1 1
+0 0 1 1 1 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 0 1 0 0 0 1 1 0 0 1
+0 0 0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 0 0 0 0 1 1 1 1 1 1 0 0
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-double-data.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-double-data.txt
new file mode 100644
index 00000000..039ac506
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-double-data.txt
@@ -0,0 +1,100 @@
+1.172993630434470589e+02 1.905532343119886605e+02 2.613653823499444115e+02 1.570270816248337269e+02 2.373767637129340642e+02 2.175366144750510671e+02 2.609909144757107242e+02 2.086671686166440622e+02 2.674986450118991002e+02 1.395992762090408235e+02 1.115453060949917159e+02 1.531034842395609701e+02 2.621042034264289668e+02 2.958729454449504033e+02 2.137960368830719062e+02 2.606436280968571282e+02 2.492136530687155869e+02 2.770806237064748530e+02 2.667325121892417883e+02 2.909243437665674037e+02 1.570328417294508085e+02 1.738762543815240917e+02 1.514157955792608163e+02 2.264748814818163396e+02 1.911869834397498380e+02 2.083706054660671043e+02 2.778115921852293013e+02 1.330374814391803397e+02 2.988697222234711717e+02 2.534167825404447001e+02
+2.090964891529778242e+02 1.322006104643973003e+02 1.443415945355371832e+02 2.991388772264980389e+02 1.649302344777914868e+02 2.839528641910463875e+02 1.677159709681393736e+02 2.597553832458208944e+02 2.861055547321268477e+02 1.866431671806918189e+02 2.131812134614140177e+02 1.881465139477683124e+02 1.271865139985419262e+02 1.821608865941132649e+02 2.793653846657656459e+02 1.745982981552271838e+02 2.440893905635109888e+02 1.926469151980868446e+02 2.752453852984189098e+02 1.333479229516146347e+02 1.756311805755703404e+02 2.039367615619088383e+02 2.441861159155101575e+02 2.136111324500645594e+02 2.893808960992043922e+02 2.723220466017930335e+02 2.367879096909125565e+02 2.831541206793258425e+02 2.017643187924728068e+02 1.293072046241175030e+02
+2.311242818257193221e+02 2.180694109009666306e+02 2.728791416531455525e+02 1.239345918565636993e+02 2.885729762050686418e+02 2.082619393005260804e+02 2.331416004257805525e+02 1.003112528445347778e+02 2.796331120515330895e+02 2.804679740148056339e+02 2.466936828597247597e+02 1.422398585800914361e+02 1.312115029632765015e+02 1.324417143647877708e+02 2.161716508991076466e+02 1.791489656100356171e+02 2.239038785146145472e+02 2.456511993086799919e+02 2.885023077068626662e+02 2.127338775308419940e+02 2.468090724782538246e+02 2.704135008577740109e+02 1.144148504575758665e+02 1.641571759150080538e+02 2.473349551308716343e+02 2.366620528761779667e+02 1.208143167141831498e+02 1.403705034199327599e+02 2.061073908129479548e+02 1.482034962693051057e+02
+1.938319500339997035e+02 2.000523826243218650e+02 1.356134735235139317e+02 1.224357428573656250e+02 1.262840705282213918e+02 1.112797762573139977e+02 1.727826315738305993e+02 2.199559683100150664e+02 1.817290208723558180e+02 2.185579898773881951e+02 1.772844462934412491e+02 1.589145011846130728e+02 1.017520743541414703e+02 2.836990856171782980e+02 1.265544072638776640e+02 2.503473341476423855e+02 2.178539278172635534e+02 2.063574432066289432e+02 1.473169457524925861e+02 1.112719632489760784e+02 1.195996070145015722e+02 1.345099678548529312e+02 2.992645259487585463e+02 2.692242364540683752e+02 2.139649193607747861e+02 2.313659165106297451e+02 2.524185025119667785e+02 2.678714004815313388e+02 1.111457754393238702e+02 1.296443575800298902e+02
+1.183944097426736306e+02 2.750477277868330930e+02 1.688558971333346790e+02 1.432283295687057034e+02 2.226043174503911359e+02 1.825124733235978169e+02 1.806485153578007612e+02 2.270256019866706936e+02 2.852913053786990076e+02 2.867562520175486043e+02 2.795056496733417362e+02 1.142488895870292822e+02 1.502985045661773427e+02 2.246907359526948937e+02 2.051158858061974115e+02 2.663351441156772808e+02 2.864853431806749882e+02 2.276548949573071070e+02 2.678087640355958001e+02 2.266463576941352187e+02 1.886763304826383774e+02 1.150603609957262563e+02 1.596187994714221929e+02 1.844565420383776484e+02 1.730173420200940768e+02 1.427940137102308995e+02 1.774757620992130001e+02 2.563086691508434001e+02 1.666317348809653822e+02 1.878143419608473437e+02
+1.642344698640436036e+02 1.591648429561690818e+02 1.561851029939521140e+02 1.854367091922420059e+02 1.494951311500319093e+02 2.443780767043579942e+02 2.741090240793212160e+02 1.519200656263381006e+02 1.391711947382712538e+02 1.482414334940778815e+02 2.574425018646875287e+02 1.455120022089010945e+02 1.620904376421240727e+02 2.098493186451893848e+02 2.377904829227144887e+02 2.881187570801528750e+02 1.785609418793050054e+02 1.500483139796714340e+02 1.697371065898091729e+02 1.824143324642365087e+02 2.329862749140337712e+02 1.372006180078979298e+02 2.250666134242961789e+02 1.760894707637434067e+02 1.874161150869196035e+02 2.860410495381969440e+02 1.539271628213176086e+02 1.051658254213322152e+02 1.501619097950496666e+02 1.205717364486104515e+02
+1.275638286377957371e+02 2.620802183565458563e+02 2.290828196339760723e+02 2.591630015014513333e+02 2.102568650793322149e+02 2.385080320420775593e+02 2.683788150825365619e+02 1.808700201925492763e+02 1.972184450648797451e+02 2.382313686117472287e+02 1.733526990293641177e+02 2.369802981553972074e+02 1.835652530901061823e+02 1.274084560526275141e+02 2.403488205519001326e+02 2.713515297463850402e+02 1.455311801633137065e+02 1.889430214806582171e+02 1.676324321357484735e+02 2.327799977696781184e+02 2.846419393176552148e+02 1.510702433968490936e+02 1.361559014852734606e+02 1.732199851325496525e+02 2.451323003571785364e+02 1.833444866660036894e+02 2.451280287301300405e+02 1.669088211440060832e+02 2.768492228383354359e+02 2.445882168033535038e+02
+2.905092787520428601e+02 2.948076984760371033e+02 1.731080208454208673e+02 2.825532355845657548e+02 1.108820315678514845e+02 2.862013985457700755e+02 2.111453776876104769e+02 2.614428154999528147e+02 1.461523265575596042e+02 2.304914832379158156e+02 2.502987607420118934e+02 2.474276046141548875e+02 1.739607960146905725e+02 2.098700376203710789e+02 2.373226438948917121e+02 1.258493219462072119e+02 2.692932028872633055e+02 2.819145908444669999e+02 1.941653933285864468e+02 1.666395497972145847e+02 2.371919109091950588e+02 1.978302896313488191e+02 1.951483674191611613e+02 2.694357972099330141e+02 2.387068160427941450e+02 2.826084316255729618e+02 1.350954172043159929e+02 1.414479610501084039e+02 1.407657276334374501e+02 2.725513503737778365e+02
+2.055761393809777360e+02 1.070553196069381556e+02 1.045726024365074096e+02 1.611577217417760153e+02 1.258091705742062629e+02 1.038769334534844120e+02 2.956016304760584035e+02 1.586570076132481972e+02 1.636816353299032585e+02 2.375674325770941095e+02 2.085436646116971531e+02 2.088922128397473443e+02 2.316234644183506930e+02 2.623581653234684268e+02 1.714245300492981698e+02 2.844387943099641234e+02 1.469270259610659650e+02 1.157700922187784727e+02 2.367694595159086361e+02 1.548671738744121740e+02 2.013687686570863207e+02 1.860374943080277887e+02 1.733446602950305930e+02 2.488507085609763010e+02 2.929099979257852056e+02 1.825615338506695480e+02 1.338575452835397925e+02 1.491478381149757979e+02 1.116052925520655066e+02 2.341983606431906537e+02
+1.014445800974648222e+02 2.539987638010908597e+02 1.871788778457793399e+02 1.454231386314719998e+02 2.284640297096368045e+02 1.174773591296971915e+02 1.395683165851895637e+02 1.137193571402578414e+02 2.370662356797280950e+02 1.767292649815032064e+02 2.688513591587910696e+02 2.913902923086397436e+02 1.122392290694582897e+02 1.366157623619356229e+02 2.667409125457835444e+02 1.834435599491967537e+02 1.437174343391732236e+02 1.130622879516462120e+02 2.898543289046954214e+02 1.559795378531963479e+02 1.765577834073310157e+02 2.422955620302867885e+02 2.384835032255701321e+02 1.708163174135501094e+02 2.012159081107001839e+02 2.825663186839160517e+02 2.627299211659199045e+02 2.173916205317264883e+02 1.878835852278120910e+02 2.578733373077019451e+02
+2.843897417914848802e+02 2.685865547709703378e+02 2.810255710736182664e+02 2.572690897085278152e+02 2.416998564827035523e+02 1.770932574976374099e+02 2.021652319180342943e+02 1.414744641219446351e+02 1.464677002516696405e+02 1.831165552459343644e+02 1.157177632931430651e+02 2.625289386264841482e+02 2.972225480003540952e+02 1.024156386789293265e+02 2.305099741095138768e+02 2.241903749843916671e+02 1.157222019118702292e+02 1.533205318359311775e+02 1.179505454242311799e+02 2.666741766563739020e+02 2.792728900733587238e+02 1.222170248460037811e+02 2.573772727215269924e+02 1.535874607134987286e+02 1.231830862844115728e+02 2.584552954023608891e+02 2.541883057030129862e+02 1.001259630352790566e+02 2.332879439260797767e+02 2.240027888381033563e+02
+1.537092645679641123e+02 1.737278083620151392e+02 1.736358049797527201e+02 2.251608985235982630e+02 1.812387130195175473e+02 1.605621432944637377e+02 1.880655312831545700e+02 2.234500385148787700e+02 1.156918728696038272e+02 2.243685096423413654e+02 1.934342626327970720e+02 1.850952349553267027e+02 2.629944548485545965e+02 1.410418270562070973e+02 1.442479234012843960e+02 2.244518961458842909e+02 1.350755563946989923e+02 1.207094763037939913e+02 1.849900977633715797e+02 1.712315707730903398e+02 1.136025349108833495e+02 2.266901327137990734e+02 2.049289406654929735e+02 2.168279721613268407e+02 2.802488024880154285e+02 2.288593244920211873e+02 2.512942787545493957e+02 1.605416563468323261e+02 1.449848598254574483e+02 1.444073785399158396e+02
+1.576600406756634243e+02 1.316580100950168912e+02 2.530050469343043460e+02 1.319013133578224028e+02 2.708693079386434306e+02 1.256852413190491689e+02 1.471714019119002046e+02 1.119112141125198576e+02 1.482405279774543772e+02 2.151504825709631064e+02 1.449998801809978488e+02 2.163638771503673581e+02 1.272949254250747657e+02 2.476027791419436141e+02 2.891208457332292028e+02 2.642744540427622724e+02 1.972643066216432999e+02 2.480891057982425423e+02 1.265454595896786003e+02 2.957735252703171227e+02 1.831389323451852533e+02 2.674516147697771657e+02 1.404389674972707667e+02 1.350952754772052913e+02 2.169062951790871807e+02 2.445227715623778408e+02 1.771545655819627427e+02 2.729961759152714649e+02 2.655105689521545855e+02 1.887977700062222084e+02
+1.336462666694000632e+02 1.333709897858500995e+02 2.263366393511863350e+02 1.847175439991091821e+02 1.121699721143812383e+02 1.985314153845103533e+02 2.097626398761568396e+02 1.994292542548276970e+02 2.119822099620050722e+02 1.121578896112172430e+02 2.285640262135607372e+02 1.530452060058861719e+02 2.280757825791220625e+02 1.002584314437652893e+02 1.549763597162410349e+02 1.962603185897801836e+02 1.520023734031539107e+02 2.188357004065238129e+02 2.078620274892635678e+02 2.253215106546470281e+02 1.707542413836397373e+02 2.818584030117174279e+02 2.256862624833151472e+02 1.123882683852972377e+02 2.188298604829752776e+02 1.623779544769217296e+02 2.272253780943444212e+02 1.236449568833132560e+02 1.456708971140968174e+02 2.173334506159979753e+02
+1.355111076933105210e+02 2.882277378633141325e+02 1.458332953325788139e+02 2.038461345794760007e+02 2.077052275373579278e+02 2.430957456359013804e+02 2.398926697516154150e+02 1.861334604823129553e+02 1.056851094080089695e+02 1.250491536199931772e+02 1.475324860190441427e+02 2.446126161547439324e+02 2.283994822545897705e+02 1.411463500178549850e+02 1.017206978570942510e+02 2.805514386584911790e+02 1.128847993259780083e+02 2.326583828053989862e+02 1.968387029218569069e+02 2.013375618903088480e+02 2.981010702857409456e+02 1.018614681114941902e+02 1.799507821883679526e+02 1.133741465580100396e+02 1.235533581072856038e+02 1.980629645203880500e+02 2.289642287691829097e+02 1.596082722591768288e+02 1.905110471998515322e+02 1.789448781159623820e+02
+2.588286452268601465e+02 1.978130463173739599e+02 1.052689337312009599e+02 1.316763830509305251e+02 2.659236586726388509e+02 1.637014132384438767e+02 1.416031833329826668e+02 2.638665530652568236e+02 1.007257384115875425e+02 1.143900271701907769e+02 2.977834670475828602e+02 1.589765734727692745e+02 1.903975572290986520e+02 2.371635535037608804e+02 1.840341975670916668e+02 2.047003785265828242e+02 2.798969769773281655e+02 2.731706896262927557e+02 1.266878907904394254e+02 1.882415083052244427e+02 2.273996647906652129e+02 1.051754139634791869e+02 1.949647447346334843e+02 2.153583447980240919e+02 2.763468452623635585e+02 1.126493843527773322e+02 1.566047572050934491e+02 1.655928523150526246e+02 1.733528322945315949e+02 1.292815908595541146e+02
+1.453195062153936874e+02 1.443849872704900008e+02 2.393030362110915519e+02 2.203850914291498668e+02 2.628192548589183275e+02 1.142161203389242132e+02 2.954875947743198594e+02 1.914138981839176950e+02 1.956478457154231023e+02 1.282875398486639824e+02 2.801001077571227142e+02 2.478095646281364566e+02 2.467477848581343949e+02 2.819656424464902784e+02 2.951823714077539194e+02 1.777239847229775478e+02 1.197979896746704185e+02 1.481181033052623661e+02 1.906710229153984528e+02 2.142395628283543658e+02 2.300980272040501973e+02 2.228884003748859186e+02 2.473330601440014220e+02 1.391193242835927322e+02 2.836257563055140736e+02 1.510096324299383923e+02 2.202302141125946946e+02 1.931468179284185851e+02 1.332427495686727639e+02 2.591048546650930575e+02
+1.878681542531208208e+02 1.576240359584147654e+02 2.653849736815447500e+02 2.963544993865212973e+02 2.044592436730770828e+02 2.022626486161902903e+02 2.692262675681025144e+02 2.660999355751699227e+02 2.275843495473382347e+02 1.090849337992742818e+02 2.095602584555617227e+02 1.896271059113536808e+02 1.103822849104477513e+02 2.916911739044173260e+02 1.131212278363718582e+02 2.998892666268029643e+02 2.476782245756396605e+02 2.259689579913920738e+02 1.853942231198421950e+02 1.358270117521841200e+02 1.538630682720535674e+02 1.002148317174243601e+02 2.538393939061405433e+02 1.631649956267838206e+02 2.086654853664906000e+02 2.065167771482954322e+02 2.184161808630845485e+02 2.204789814939956045e+02 2.876785893506615821e+02 2.415299687386639675e+02
+2.578989465605797591e+02 2.309888943086805853e+02 2.139372792253111584e+02 1.438019921733897775e+02 2.686852572045135616e+02 1.347038004304963579e+02 2.662658866335509060e+02 2.378358170108797367e+02 2.901455078003721155e+02 2.653867524737770509e+02 1.011162296015096302e+02 1.236447329941733528e+02 2.440241295351771669e+02 1.285889645706482725e+02 1.234088480316093808e+02 2.765916670935633874e+02 1.132915304101479421e+02 2.967043774237617413e+02 2.960414394814537786e+02 1.923965028192617410e+02 2.177448618307050765e+02 2.328047369831131732e+02 1.702256773965170282e+02 2.320080409490440729e+02 2.962065584958517093e+02 1.421971909775941185e+02 1.416181340866144183e+02 2.318260414882616374e+02 1.990521696869427046e+02 1.291045564046920333e+02
+1.562042774178686386e+02 1.004265446278790392e+02 2.987714610921041185e+02 1.843637355858842284e+02 1.975513718825063165e+02 2.869996482942455032e+02 1.598134132589713943e+02 1.814921031876193638e+02 2.433389905907341983e+02 2.220363745053336970e+02 1.548306942100590504e+02 2.274512269554506361e+02 2.173006200058655963e+02 2.139515436667214772e+02 1.820439741095771353e+02 2.954110718222074183e+02 2.706126458816278273e+02 2.546812106115172583e+02 1.499899738326257363e+02 1.498010641912065921e+02 1.897725780579399668e+02 2.531561160917130167e+02 2.568891780637028432e+02 2.223136077092870551e+02 1.518604819103856585e+02 1.610422120589223027e+02 1.090455809489133259e+02 1.950503873748027388e+02 1.235704160644129388e+02 2.711492093024702967e+02
+2.039597038432034424e+02 2.026680584622021684e+02 1.365818873512059213e+02 2.909476552420245525e+02 1.721994194158640425e+02 1.854386667051114443e+02 2.287109571295530372e+02 1.912591665763447963e+02 1.607322994166321450e+02 2.949516230628389053e+02 2.522065912002103403e+02 1.869433122585654701e+02 1.235797649248940644e+02 1.522422059501078024e+02 2.738245135411146975e+02 1.059681837441489307e+02 1.013027238331489173e+02 1.660100598156148237e+02 2.454471731623151243e+02 2.467503196183328100e+02 2.584564749953993896e+02 2.079587352810677316e+02 1.650926041957846451e+02 2.269719270682073784e+02 2.376254891983122093e+02 1.510146656008620596e+02 2.672848371954185041e+02 2.692845974117340688e+02 2.180714754246087921e+02 2.186797802447831884e+02
+1.704231257711912519e+02 1.993416036368699906e+02 2.293703655438095268e+02 1.494582642918422266e+02 1.988970317734676030e+02 2.329763291241497711e+02 2.594871448385057420e+02 2.168089936885102134e+02 1.825320854593447280e+02 1.816754553181755796e+02 2.164740515812325725e+02 2.676208645391697019e+02 1.298365075936954725e+02 1.802664596093496243e+02 1.015344620621038132e+02 1.955048336384612639e+02 1.938953913674110083e+02 2.716932071347151805e+02 2.391085978949223829e+02 1.852300387899809380e+02 2.933293185307651356e+02 2.502753353909542966e+02 1.326128348575908262e+02 1.132638325194699433e+02 1.382024010322260494e+02 1.899310337488860796e+02 2.577639546186944699e+02 2.130234590296898887e+02 2.056292296528304746e+02 2.070746044453983927e+02
+2.712524956603344890e+02 1.103212761114690750e+02 1.501201791543782917e+02 1.588084859702673555e+02 1.780379814134324192e+02 1.938691258391782810e+02 1.322057441019641146e+02 1.105823874551086590e+02 2.879365916037821194e+02 2.457617763012990224e+02 1.036189749330240488e+02 1.682919366264929124e+02 2.271749409116763161e+02 2.468308259697249127e+02 2.530034131464132088e+02 2.481420904342841709e+02 1.546080547019561209e+02 1.278414739842506265e+02 2.234886960240669111e+02 2.535365186455997843e+02 1.599130733896959669e+02 1.151371295028686035e+02 2.378656188176093451e+02 2.901072209563180877e+02 2.524076257924749882e+02 2.849501171254129304e+02 1.802791659856764568e+02 1.527418387706650833e+02 2.578820596338672431e+02 1.208856989199291263e+02
+1.884906470590645711e+02 2.304295185581007672e+02 1.035923344330140736e+02 1.647061655195892627e+02 1.910201770870304472e+02 1.752788518438422614e+02 2.763014227316762117e+02 2.545709641405486252e+02 1.642694881393259152e+02 1.850698110761380804e+02 2.423689469305483328e+02 2.821007056776016384e+02 1.440765548977453250e+02 1.082195827231368952e+02 1.292487205530619008e+02 2.136496853657876613e+02 2.268509220579896635e+02 2.999629735037570981e+02 2.135306905316524535e+02 2.807718279523737692e+02 1.079256111018183759e+02 2.233050677333321801e+02 1.960571416898615951e+02 2.930642308139058514e+02 1.350490077967585307e+02 2.626074042719769750e+02 2.812196827814445328e+02 2.812753678081913336e+02 1.893738913514469004e+02 1.237248675858835725e+02
+2.024005284879252144e+02 2.663611407988397559e+02 2.687079844301063076e+02 1.583164038086077312e+02 1.451019436850150441e+02 1.100558451420041450e+02 2.083655450975085159e+02 2.034012033819327598e+02 2.745375932717230398e+02 1.454718097055225599e+02 1.519068131933423729e+02 2.522666952972969625e+02 2.409340029943109300e+02 1.697386944425205115e+02 1.092659514648129289e+02 2.785598218078254149e+02 1.404092026094307357e+02 2.152301424167146990e+02 1.170396027347833723e+02 2.495323893679063474e+02 2.070836095469416591e+02 2.187978925167305135e+02 1.478606128149070855e+02 1.189323178954538207e+02 2.012925160284665651e+02 2.080878545398990127e+02 1.510128433840351647e+02 1.657302151838663065e+02 2.177026636795220043e+02 1.221198981216710422e+02
+1.411258561955272341e+02 1.419717097672817374e+02 2.247481951315160984e+02 2.805973971111802712e+02 2.755562061324142178e+02 2.039769327420251557e+02 2.994080883760036045e+02 2.417843309736466040e+02 1.023751441731232319e+02 1.491356884971497152e+02 2.542464200475323821e+02 1.496044144381669128e+02 2.829129207809560285e+02 2.479316882407134699e+02 2.441205876677642550e+02 2.045492313770996020e+02 2.855582203360229414e+02 2.884005586284110336e+02 2.039668453101600676e+02 1.690279206477617890e+02 2.136822090795746760e+02 1.254275901194574772e+02 1.084851042192170922e+02 1.656011685190305229e+02 1.415195951026897774e+02 1.578115814760412263e+02 2.619737257057257693e+02 1.492347147839753347e+02 1.627213988646173561e+02 1.343297485726322691e+02
+2.544675070683062756e+02 1.367461330002975899e+02 2.928364121110963652e+02 2.024865028281971036e+02 2.758937379397792142e+02 1.293527538914390220e+02 1.003170531204512059e+02 1.514803620238746760e+02 2.603616046431354789e+02 1.790387290949859960e+02 1.954717187769221027e+02 1.325226280128280223e+02 1.522166198122710625e+02 1.162911821325583048e+02 2.798489406348742250e+02 2.521718932296424498e+02 2.622327475379161115e+02 1.027798265388270949e+02 2.437256510683693023e+02 1.911771820917219884e+02 2.722604457055863350e+02 2.850557929858495640e+02 1.953760157441756746e+02 2.473572905253965644e+02 1.891404804097296051e+02 1.514672503279451803e+02 2.213565012031598940e+02 2.253356064978207769e+02 2.044629345029305227e+02 2.805872739342098612e+02
+2.859142434488251183e+02 1.016009480575973356e+02 1.779351649172412522e+02 2.205171340775500539e+02 2.104472905774927369e+02 1.755755724600441567e+02 2.751836189782782185e+02 2.820692049982218350e+02 1.337557428916256015e+02 1.569761138230965969e+02 1.991757527032745543e+02 2.615974376894962461e+02 1.944849272958306017e+02 1.868411694165790777e+02 2.994394032068257729e+02 2.802783326794233290e+02 2.693871918204162625e+02 1.750293298802730249e+02 1.468161278725061720e+02 1.272003326865558108e+02 2.233103517167062932e+02 2.103066399402185027e+02 2.720825853079193735e+02 2.728915492341989193e+02 2.160004538807991992e+02 1.325145501710478015e+02 2.549827549782140466e+02 2.921469675413995901e+02 1.846231529604695822e+02 1.391152989663993651e+02
+2.538717579982014456e+02 1.450483481068324352e+02 2.720200816305956550e+02 1.120834821105324011e+02 1.703801876168104741e+02 1.091293661435919233e+02 1.410263490040598526e+02 1.910022197757120352e+02 2.505223413771657022e+02 2.069613533172621374e+02 1.367200764291426935e+02 1.269156762039037574e+02 1.459486945063737267e+02 1.585863332989725905e+02 1.433846106215619329e+02 2.893202513225785424e+02 1.754070497414596730e+02 1.678900237854272746e+02 2.363821059303507752e+02 1.088858921730617908e+02 1.962435837543239927e+02 2.151311182954276831e+02 1.943029551670006754e+02 1.670799798236046172e+02 1.348235227224938910e+02 2.005836112104490212e+02 2.601588534628079969e+02 1.194827586439497935e+02 2.131891535893303740e+02 1.835674362703964277e+02
+2.872207377280434457e+02 1.680389491751975299e+02 2.268072198735419533e+02 1.324343035526375729e+02 2.746241572770433095e+02 2.142161570690199710e+02 1.852290440736100550e+02 1.772431485621305285e+02 1.144750125154023266e+02 2.162070901557998468e+02 1.490690769171257557e+02 2.904041493178549445e+02 2.673617561413327621e+02 2.904362235840736730e+02 1.438791831406123833e+02 2.596893065528289526e+02 2.617155941751458386e+02 2.388486986717779246e+02 2.718819501315180105e+02 1.265484539827731680e+02 2.508989305854047700e+02 1.677208481362706323e+02 1.527665277518251230e+02 2.069026506407369084e+02 2.223100964495413336e+02 2.859845330217733022e+02 1.430291068893224349e+02 1.186508486537613436e+02 2.043257492072551713e+02 2.909823892985461953e+02
+2.385945641230763670e+02 2.011887933217761031e+02 1.622448188725907983e+02 1.738874847453056987e+02 1.669498482708885376e+02 1.853462372214463016e+02 1.514500885098960907e+02 1.569159134451362547e+02 2.521399095730983504e+02 1.246878140446721659e+02 1.758330561641313352e+02 2.722601647479554003e+02 1.679012078705679869e+02 1.710944469563905272e+02 2.012619557548435978e+02 2.130692925302264200e+02 2.489118511754019778e+02 1.553758318484749452e+02 2.531318516516165857e+02 1.895498740333992487e+02 2.010265603399928409e+02 1.805605111948569856e+02 2.471772127430102159e+02 2.822665908577009759e+02 1.256656757093761314e+02 1.218957078832023626e+02 2.851942693987446660e+02 2.434079459678487751e+02 2.183256665756584312e+02 1.881473862468819220e+02
+2.878274557836845133e+02 1.654481949983921254e+02 1.215681808546938214e+02 2.567820905945674781e+02 2.104106688330284101e+02 2.960796083414018085e+02 2.020680111052573693e+02 2.328934707961639106e+02 1.081575190462602336e+02 1.003340046261853189e+02 2.009697278729638299e+02 2.231963192062537757e+02 1.203849639323555323e+02 1.187994179134823156e+02 2.211937485225296030e+02 1.667300587261732119e+02 1.727379541915926211e+02 2.085029285798690353e+02 2.440827389167183981e+02 2.864522928573259151e+02 2.974890568790378893e+02 2.102945085846974393e+02 1.972598274048171447e+02 1.762889209976547136e+02 1.346946323322499666e+02 1.554434255958064170e+02 2.915634104756007901e+02 1.434053307556222876e+02 1.055800565037633163e+02 2.043924431141962259e+02
+1.494596010135965116e+02 1.369114048625681335e+02 1.414146701131132886e+02 1.383970135097982848e+02 1.734304788623498155e+02 1.594301265610334610e+02 1.040146208229407137e+02 2.208381597698417806e+02 2.904998286250861383e+02 1.300157615397056929e+02 2.667076669416877621e+02 1.062418844419948556e+02 2.717657999079561364e+02 1.054097765488278640e+02 2.401074677516734823e+02 1.045408432466875297e+02 1.330046749931937882e+02 2.297648034226271534e+02 1.488059718063634307e+02 1.725671935994615183e+02 1.330818497812682608e+02 2.341687919103425770e+02 2.983144736799429211e+02 2.798846823197050071e+02 2.218705077010061473e+02 2.681931695329894865e+02 2.339384973461015420e+02 2.893058480095726281e+02 1.539801301873031321e+02 2.746688360458649640e+02
+1.330701439354522222e+02 1.727884450558678395e+02 2.309082669627648272e+02 2.027633892073664299e+02 2.725503026364725656e+02 1.999882667367585896e+02 1.904108867169430255e+02 2.952458047945178805e+02 2.903769421220866320e+02 1.593020200554085477e+02 1.236139458806368623e+02 2.670862420061573062e+02 2.910830183895285472e+02 1.860711175093342149e+02 2.161724988935532963e+02 2.564488756979296795e+02 1.231566645138573648e+02 1.554206254375235403e+02 1.148558104746345521e+02 1.512714227454516163e+02 1.953024826710307025e+02 1.296022137194406127e+02 1.500450396815122076e+02 2.611742573447975246e+02 1.601671705158374550e+02 2.391666762859087214e+02 2.566415095930981352e+02 1.923304801412870404e+02 1.194174883996373353e+02 1.970722090829630986e+02
+1.912113734453868688e+02 1.498407015577022605e+02 2.038188614169363007e+02 1.315017316695561647e+02 2.564290419741012101e+02 1.890015309531812022e+02 2.451565642315005960e+02 2.794356592632736920e+02 2.286941218755985972e+02 1.959549984609147941e+02 1.183834182035568716e+02 2.102820643179567242e+02 1.748108698585573393e+02 1.534379248653211221e+02 1.919662859034699522e+02 1.273611408042816464e+02 1.848163823983119585e+02 1.719445827292381637e+02 1.098466009889928898e+02 2.781108902268393877e+02 2.089286134506138524e+02 2.324518337977864348e+02 1.983840049195213169e+02 1.897881971862217370e+02 1.057077761008814605e+02 2.693629461665184408e+02 1.359710117509105487e+02 2.191184409971657487e+02 1.295811391257115304e+02 1.272165218667991553e+02
+1.987244486959793903e+02 1.516360617950651317e+02 2.198509518241761498e+02 2.494181713303175911e+02 2.903223989223247372e+02 2.847249789220907132e+02 1.747037051964282171e+02 1.610307305098726829e+02 1.866621867053561061e+02 1.016530888490581503e+02 2.606194448419089440e+02 1.820037020201941402e+02 2.650669443765450524e+02 1.137210849453726098e+02 1.329244106101075715e+02 1.741312140090854257e+02 2.301425980066611885e+02 1.051708772384664030e+02 1.994040172335078864e+02 1.874773290907829733e+02 2.745616984783777070e+02 2.354781865911449756e+02 1.598287033335407159e+02 2.650689470710170212e+02 1.643692352330562017e+02 2.991199217036622713e+02 2.713535332162406348e+02 2.516280148665988463e+02 1.124367393830256532e+02 1.725070309959049837e+02
+1.637875882282461077e+02 1.407642428016634426e+02 2.759741260511348173e+02 1.982469453863400304e+02 2.966736241669494802e+02 2.756530253528777052e+02 1.426661371226006167e+02 1.585144634205103102e+02 2.836415355000413001e+02 2.468213340046699784e+02 2.898204535963063790e+02 1.711408259966125343e+02 1.900542569026269177e+02 1.112151031999617032e+02 2.679918109779015936e+02 2.737346364036235400e+02 2.597479311885246602e+02 1.719445390286030886e+02 2.361360157374418236e+02 1.123330408578339785e+02 1.214203690485689719e+02 2.552722899309185891e+02 2.436705678248840456e+02 1.596697357728296254e+02 2.533254006866929444e+02 2.066863222258713790e+02 1.194370826184286329e+02 2.943584774485435673e+02 1.636272134478143130e+02 1.191267138602315185e+02
+2.350924626651462006e+02 2.263138093076711357e+02 2.206572605284771385e+02 1.704171521239532296e+02 2.000250897638135257e+02 2.966317084215347109e+02 1.350543763227695138e+02 1.248113195978286285e+02 1.480602782771696297e+02 2.391913401309390679e+02 1.908758915801345779e+02 2.476074601271855045e+02 2.408834383325319095e+02 1.009169451940341560e+02 2.567526834523320645e+02 1.791854948779896688e+02 1.412277552146151152e+02 2.660711025781407670e+02 2.073940326990519054e+02 2.509760072499196610e+02 1.358593750308925223e+02 2.127422683140523532e+02 1.874643773621423293e+02 2.844455725631112273e+02 2.197223292953194118e+02 2.049519862750077266e+02 1.674367936692717365e+02 2.806316262053937294e+02 2.040091003350897836e+02 2.675290975004411962e+02
+1.483513543637005796e+02 2.384008274111940011e+02 2.834409911154408519e+02 1.344593118283445392e+02 2.346883831968173979e+02 1.381882879805813218e+02 1.241165074750676638e+02 2.186327911062819567e+02 2.466602279029802673e+02 1.573094529523951906e+02 1.568918412618390903e+02 2.289205163045023710e+02 1.170165333644822283e+02 1.742406104080407658e+02 2.082974381484526702e+02 1.600869123712819260e+02 2.399160913983472199e+02 2.877189278027444743e+02 2.845252294036096146e+02 2.342337907657317544e+02 1.496264758341107779e+02 2.905797831387872066e+02 2.824703799011629144e+02 1.047015685176013307e+02 1.056531628249932169e+02 2.778559625738202499e+02 1.693549799118289343e+02 1.654193764711911570e+02 1.062077606699500762e+02 1.159643419206647792e+02
+2.694780377267857716e+02 2.229138360502907403e+02 2.407432883969363218e+02 1.240072643521201741e+02 2.128611568148922970e+02 2.114050669978733481e+02 1.042337934877265297e+02 1.044783539591350490e+02 2.706611056394938259e+02 1.972285130309975898e+02 1.959046941044780681e+02 2.915493579522836853e+02 1.131994346897827342e+02 1.197362406389762839e+02 2.877593780027675621e+02 1.089470964294721824e+02 1.996015695685267417e+02 2.185569019121031999e+02 2.102686704320404374e+02 2.955299037924150980e+02 2.987478446256551479e+02 2.517129931888254646e+02 1.552463625479420557e+02 2.295020326441428153e+02 2.886454895961533111e+02 1.869792800456660871e+02 2.703426621835664037e+02 1.873514421416134326e+02 2.714620374401066556e+02 1.623625260081516331e+02
+1.457420078291350194e+02 1.926195242081234369e+02 1.841639049563959247e+02 1.397830290030836125e+02 1.287503203163068406e+02 1.684614546803193775e+02 2.820658047345126533e+02 2.986548244924653090e+02 2.631399932039782925e+02 2.870930868530864473e+02 1.141938207690214426e+02 2.868552010662050407e+02 2.019110175402121286e+02 2.840219745246005232e+02 2.848478851173646262e+02 1.902287203163165259e+02 2.696968940302964484e+02 1.690355482825476656e+02 2.171695948786692725e+02 1.960363641465239652e+02 2.930566891688549731e+02 1.380341365242818483e+02 1.769912313914243214e+02 1.164985277343077996e+02 2.079184380436491324e+02 2.871364788135472850e+02 1.796231479741346391e+02 1.115892945700443875e+02 1.922852518794877028e+02 1.851500906627327083e+02
+2.894943401361737187e+02 1.972990286414578804e+02 2.801948561309920933e+02 1.993490085147259947e+02 2.539099743775018112e+02 2.972486389690005240e+02 1.162404922698449354e+02 1.801898545246462504e+02 1.283416456049016858e+02 2.289248555429664407e+02 2.419505668531598985e+02 2.755101537543703216e+02 2.786083442131507013e+02 2.461931811431258552e+02 2.699066237266536064e+02 1.088542193903703179e+02 2.302113104476973149e+02 2.158136503417114227e+02 2.797451432348925096e+02 2.832754349673875822e+02 2.207567008139471909e+02 2.920947868166995249e+02 1.300092217647513735e+02 2.953259288980694350e+02 2.539624465668687492e+02 1.304833679125420645e+02 1.051395153781939484e+02 1.855592224876973830e+02 2.160289702497469477e+02 1.227895712666205981e+02
+1.029685235386965587e+02 1.410297052380113882e+02 1.832105986621241982e+02 1.016727951098498579e+02 2.130361696974732126e+02 1.817578553203918830e+02 2.644724203174304193e+02 1.713346250427240420e+02 1.297164370175517547e+02 1.072810924841072193e+02 1.083932811014470161e+02 2.860684171745337494e+02 2.893854146138399983e+02 1.677808320623732925e+02 2.343535290724524600e+02 1.209564642240636090e+02 1.329537830609780542e+02 2.924542956964438645e+02 2.733376468658280487e+02 1.397146179999238598e+02 1.103570089598620285e+02 2.231457082965310690e+02 1.056672424832338635e+02 2.887779644840117612e+02 1.127167878193751704e+02 1.387640376146708263e+02 1.791595456124304633e+02 2.709107895779202408e+02 2.238624693992912569e+02 1.773395240564728397e+02
+2.317578772498348769e+02 1.294950944138938667e+02 1.126253428029936572e+02 1.371351849575549693e+02 1.785990678455200964e+02 1.021081186758702444e+02 1.471984209931611360e+02 2.907355141803875540e+02 1.881128962816476644e+02 2.776434621780599628e+02 2.231668573818950279e+02 1.905362514139340817e+02 1.921875823712000226e+02 1.027725913116546792e+02 2.939602582690168902e+02 1.776540079128602656e+02 2.761214484196684111e+02 1.042033722248946646e+02 1.812858538041361385e+02 1.739774673118114663e+02 2.626640185867897799e+02 1.702975408841979288e+02 2.558138050153142729e+02 1.733257751657050392e+02 2.918973111180089859e+02 2.499103812623473857e+02 1.210050998380505973e+02 2.819910650801346605e+02 1.887952629909842699e+02 1.910084514453274380e+02
+2.212539479167726029e+02 2.774434360961662378e+02 2.337566454731646104e+02 2.345785537275947661e+02 2.365459264006348405e+02 1.983982238092833086e+02 2.030822332599765332e+02 1.995891111618029186e+02 2.834365683300363798e+02 1.036872616932399609e+02 2.192093181482490252e+02 2.601252995545215754e+02 2.498786393235831724e+02 2.102914196276636858e+02 1.344974807588668000e+02 2.319076536245909210e+02 2.769341510052834110e+02 2.705990780330756138e+02 1.679097240924248240e+02 2.394521666103182724e+02 2.042111123157340842e+02 1.679545908808316028e+02 1.638112120198904051e+02 2.498667640522866407e+02 1.298749690282424183e+02 2.953546510122243944e+02 2.420377599473625025e+02 1.972281420856064642e+02 1.511153679243939223e+02 1.785899871179086063e+02
+2.568297621323404201e+02 2.469847896802298237e+02 2.766623631158322496e+02 2.476135901735717937e+02 1.788596740963971570e+02 1.849716544556056874e+02 2.568516536462929594e+02 1.692762419184084877e+02 1.468834240718183537e+02 2.716053370235183593e+02 1.674083895790932957e+02 2.340636951853666687e+02 1.637725360284847227e+02 1.316562872243186177e+02 2.850086566701365882e+02 2.066513343106022944e+02 2.990778363456342390e+02 1.780020440519503495e+02 2.906711993591478631e+02 2.149926413975278479e+02 2.151504627144789765e+02 1.458362697904619836e+02 2.339644011324822657e+02 1.740513991402896181e+02 1.804876886135730842e+02 1.706585538790989176e+02 1.113370339871644603e+02 2.032819788543359039e+02 1.225434838619497526e+02 1.558188197132453183e+02
+2.752385657001058803e+02 1.704994416021052643e+02 1.607090409105587696e+02 2.031247490318933444e+02 1.333383797740430339e+02 1.922643047184382112e+02 2.665685682619526915e+02 2.611043497447243453e+02 2.444450591022788615e+02 1.012899678037660181e+02 2.236752860048796947e+02 1.164606756896235993e+02 1.768812782093617955e+02 2.532808672341815850e+02 1.308823477633827395e+02 1.683394957344131626e+02 1.787390150786144716e+02 1.962681762314343530e+02 1.178176219749694980e+02 2.151624908275416885e+02 2.951256579216935734e+02 2.058583926262361388e+02 2.348769662163374790e+02 2.500118096543036472e+02 2.065978549387351109e+02 1.732426267043477139e+02 2.575950640438621804e+02 1.826939497339359946e+02 1.586062531006688801e+02 1.141086110094916819e+02
+2.107478059550890066e+02 1.212326460542207940e+02 2.154852140069355073e+02 2.624147598788578648e+02 1.169795422214265699e+02 1.682202484364929660e+02 2.987700686247625299e+02 2.259973608163532504e+02 1.912690930240648015e+02 1.896338093439390775e+02 2.747727757049322008e+02 2.388804299971102978e+02 2.538821160842531128e+02 1.839990833334872491e+02 2.839611350159472067e+02 2.953225980324958755e+02 1.674336071760058076e+02 1.609172697163818953e+02 2.902596210806400450e+02 1.513824951234124114e+02 1.873458283487339600e+02 1.695960935104061491e+02 2.116215526550050470e+02 1.849422962892989233e+02 1.434256749723924713e+02 1.304784783123307079e+02 2.632948417544853328e+02 1.656472047377057777e+02 2.303125851744007377e+02 1.681993961373014486e+02
+1.104191565760665128e+02 1.750924257030650040e+02 1.242494131306669090e+02 1.541741282893887899e+02 2.585460716706878657e+02 2.286423505464783261e+02 1.890990979891397501e+02 2.707781238779197679e+02 2.619171833457787670e+02 2.695823002806438353e+02 1.941989480397771786e+02 1.389058748786196134e+02 1.283479072532797431e+02 2.347481590897206729e+02 1.518985431591505630e+02 1.757095590143896402e+02 2.225334593093496096e+02 2.231309387578290568e+02 1.039310896134069395e+02 2.614149485334186238e+02 2.212890027388380076e+02 1.425609106790709859e+02 1.376620423520403733e+02 2.403640719649376933e+02 1.152284694789922526e+02 2.108068210397188409e+02 2.526640691383259991e+02 2.323633859683563969e+02 2.720522122905912283e+02 2.498034621012949685e+02
+2.223449436042899947e+02 2.823923482876032267e+02 1.728419664392092727e+02 1.542710015610415724e+02 2.699062389875002737e+02 1.776741825057288793e+02 1.800001384193664080e+02 1.819433000632012636e+02 1.436484983468620840e+02 2.344086094824976954e+02 2.824459866922626361e+02 1.860318500101035681e+02 1.749968777772715498e+02 2.792448396035428004e+02 2.134719239619671498e+02 2.649346822194891047e+02 2.535109715864082602e+02 1.651109960016319178e+02 2.407385671793928736e+02 2.276937454871455770e+02 2.965404491761371446e+02 1.771850291606413634e+02 2.317902380753697855e+02 2.233400563607936817e+02 2.471010629200553694e+02 2.999085009765063319e+02 1.263611681933084725e+02 2.954593528043474180e+02 2.279026703099021915e+02 2.630592311905735414e+02
+1.662671322607742752e+02 1.600442354914371208e+02 2.476541290397616137e+02 1.471310870365195740e+02 2.302232198157895198e+02 2.833854716762933776e+02 1.464787719165046553e+02 1.913553080525503560e+02 1.014594285276723156e+02 2.182963956218923158e+02 1.629807715448000636e+02 2.692152036144454428e+02 2.287521686048013976e+02 2.982465613581407524e+02 1.646080094271899839e+02 1.685350412843276899e+02 2.638506951547767585e+02 2.931520510309920837e+02 1.395453733045734168e+02 2.192750645467382355e+02 1.118562057344099543e+02 2.210439168983162972e+02 1.977199388190010438e+02 2.248771354041466566e+02 2.967583759675493411e+02 1.144799677712354793e+02 2.877369511761256149e+02 2.831237961244747225e+02 2.909105411130262269e+02 2.550977837950437390e+02
+1.519738194711488006e+02 1.042788193386050608e+02 1.298121344332743377e+02 1.827398187867084971e+02 2.371985543371917800e+02 1.647119082252074236e+02 2.792046599520904238e+02 1.737333830141970452e+02 2.019611337599129968e+02 2.402390448779260623e+02 2.107045415433176174e+02 2.447101973248666411e+02 1.584507446746840174e+02 2.877533155913679366e+02 1.209142860803932251e+02 1.903846717728129931e+02 1.485923447895592631e+02 1.040627746119376695e+02 2.329784390325348795e+02 1.136264746597146882e+02 1.019818146651219024e+02 2.395077159260278847e+02 2.571474008697522322e+02 2.507839876514990465e+02 2.649762964978717719e+02 1.398370322453145889e+02 1.116668292809188614e+02 1.262068209877756289e+02 2.561228606182183967e+02 1.019925993853918413e+02
+2.525550526067758881e+02 2.649927164229666232e+02 1.457764901336312846e+02 1.519121804298574148e+02 1.112983565335166247e+02 2.979018464293943680e+02 2.517559946611144142e+02 1.257251989750113239e+02 2.377842966816966737e+02 2.692916709774201536e+02 1.558791612193160745e+02 2.988101508442036334e+02 1.264682305510686575e+02 2.586186621657187743e+02 2.397705732393993969e+02 1.799773948514575750e+02 2.289212202830902072e+02 2.551439950194432242e+02 2.270410183155361210e+02 2.624250216967006395e+02 2.894508375480465361e+02 1.106681053253299183e+02 1.696755343387707171e+02 2.302155275158106917e+02 1.445113211107399138e+02 1.886794441144848236e+02 2.129906512422033131e+02 2.340704769023953986e+02 1.082933010325512981e+02 1.977265970892881626e+02
+2.874406426475449052e+02 1.913451373833616742e+02 2.647704607931181044e+02 1.881279366057496532e+02 2.840067538093052804e+02 2.179159896935567247e+02 1.839859875309309132e+02 1.189702187115672132e+02 2.794517441847542614e+02 2.815599370853284427e+02 1.258259904677427699e+02 1.428483537633051412e+02 2.541426109645265967e+02 1.338781623221585164e+02 2.877181693280556374e+02 2.041742222547631513e+02 2.429167887622087392e+02 1.861891141000048435e+02 2.815058357304060337e+02 2.932279451804108703e+02 1.428092602118218792e+02 1.129541128601477595e+02 1.104970415865426503e+02 1.361068733124779726e+02 1.702082770497633533e+02 1.583852379729134157e+02 1.614070717213254511e+02 1.054529192214523476e+02 1.116913943762218366e+02 1.806474879921846366e+02
+1.904583320230821926e+02 1.477903225290235980e+02 2.926623631581093150e+02 2.267002240281469199e+02 1.643763662729302268e+02 2.199235242233247902e+02 1.853923849032223359e+02 2.941726936508506469e+02 2.665966841434134835e+02 1.199566433868006357e+02 2.951991052054676175e+02 1.594510101065885124e+02 1.458298791153635534e+02 1.532145001211049475e+02 1.411023254500616133e+02 2.140513226665028128e+02 1.678784758049908419e+02 1.708308530430679184e+02 2.099440033407245778e+02 2.664570659333852518e+02 2.959905162222905801e+02 2.829445582187913715e+02 2.588706049990775000e+02 1.722199615074994483e+02 2.869184560072056343e+02 1.681559218785307053e+02 1.503240659973911306e+02 2.588597461006905291e+02 2.678295026364270939e+02 2.154561503934444886e+02
+2.071927904539387839e+02 2.171736003654224305e+02 1.593735315924418785e+02 2.947356579175152547e+02 1.742775794491871011e+02 2.184611101357660914e+02 2.225198306238390842e+02 2.168369296352294668e+02 1.755672175076374231e+02 2.252214925755263835e+02 1.563369877784152209e+02 2.085332604119019209e+02 2.572482649031854862e+02 2.951800051631508950e+02 1.079183556031880329e+02 1.218838648771928774e+02 2.685371616407055626e+02 2.419162624723466877e+02 1.022244855205179022e+02 1.101224552326326602e+02 2.597819405832950679e+02 1.134555412120959517e+02 2.870491931154815575e+02 1.374365654160442318e+02 2.645641258978021142e+02 2.531141673781916666e+02 2.361747183362105886e+02 1.893108861581111171e+02 1.539026912190118139e+02 2.501170032332128415e+02
+2.547888423116186232e+02 1.853670755857669974e+02 1.389074705955763420e+02 2.709929622842061008e+02 1.228800068832790515e+02 2.778321736112652616e+02 1.309641642706778555e+02 1.156980811627219055e+02 1.431313378740429982e+02 1.646591400066212714e+02 1.920182917083556049e+02 2.178001706163468043e+02 2.235489712948179886e+02 1.079088316874027242e+02 2.447091545393394370e+02 2.320303973549428065e+02 2.359105911115680101e+02 2.382951907588607128e+02 1.062067779247245483e+02 2.905379355334102911e+02 2.023335418134440715e+02 2.128348219019524095e+02 2.865957710750057004e+02 1.782427960783044796e+02 2.856139874187100531e+02 1.139905905655008098e+02 2.264676166669663360e+02 2.479179013019825675e+02 1.746165350218777803e+02 2.255842464851874070e+02
+1.883869033800616819e+02 1.965817072065136699e+02 1.890868666652849015e+02 1.898737766004000491e+02 2.779218373710688184e+02 2.134628932560298722e+02 1.100835458783813436e+02 2.768750976313177148e+02 2.547073561014202880e+02 2.728160162818061281e+02 1.733645011505617504e+02 1.625036971255624394e+02 2.977754324167240156e+02 1.632372616873928450e+02 2.174045665187836107e+02 2.606964806055048030e+02 1.625508452643421720e+02 1.715067940576683441e+02 1.218481476549646629e+02 2.842560845538128547e+02 1.928678337146606623e+02 2.708765321293922739e+02 2.077020047066411621e+02 2.923591890868326004e+02 2.230876482822842206e+02 2.689925468225608256e+02 1.036588336737814586e+02 2.052618530546818363e+02 2.648220111560104897e+02 1.868396012623422280e+02
+1.785937212608853315e+02 2.973454718025594161e+02 2.368986004504845084e+02 1.146953890760472348e+02 1.265905165006724644e+02 2.255973396401841455e+02 2.163675674740596264e+02 1.527913853500098185e+02 2.283358642424602465e+02 2.759303134283557597e+02 2.876072117803540777e+02 2.029362495845153944e+02 1.212425121544320490e+02 1.100001317370093830e+02 2.335268996183764330e+02 2.375268130741384027e+02 2.336339660612213436e+02 2.462747325703657282e+02 2.841981652294566061e+02 1.081959034831858446e+02 1.291296469376330833e+02 2.602425849072438950e+02 2.575669438145553727e+02 2.135342654708205714e+02 2.294373105308322067e+02 2.706502840281193016e+02 2.928412927772634475e+02 1.330151104176747765e+02 1.533759962548247131e+02 2.744006234275867655e+02
+2.257735103076358882e+02 2.728385269717355186e+02 2.290872800510813363e+02 2.330934692803050154e+02 1.037274604992595215e+02 2.674079561164307961e+02 1.195755645916240866e+02 1.402804464035359047e+02 2.170516922702277611e+02 2.744725918691634661e+02 2.930458735600458908e+02 1.496408395971007224e+02 1.595562419103408729e+02 2.835538666488008630e+02 1.780163567793609332e+02 2.906408145890961237e+02 1.133853019218590248e+02 1.494630592331960770e+02 1.214592101712915451e+02 2.263015460193574881e+02 2.598100406717117608e+02 1.963383361449393192e+02 2.235083985338561376e+02 2.946475410923074492e+02 1.758055989844200724e+02 2.637780439251395137e+02 2.875400021086666698e+02 1.577781508415756662e+02 2.146553072676672684e+02 1.798181279868336446e+02
+2.620574340171276617e+02 2.153711882285265915e+02 2.245961661539886904e+02 2.054509343172356921e+02 2.926008719008261210e+02 2.432564531143420652e+02 2.303655720936658611e+02 1.615953803481287991e+02 2.918921003884012748e+02 2.760746977013722017e+02 1.909442200188182710e+02 1.596536528765051060e+02 2.491411570718119037e+02 2.924629085319008936e+02 2.587604848561293807e+02 1.524605619386706792e+02 2.737599884275671798e+02 2.090365453766356723e+02 1.610548024559351461e+02 1.018774121963877803e+02 2.410901898572944049e+02 1.875862586601133444e+02 2.588626077539996686e+02 2.579873618626863845e+02 2.838744453525392828e+02 2.580071516854936817e+02 2.114887112935771256e+02 2.675506009048368696e+02 1.260391751775616029e+02 1.858866479221875920e+02
+1.963224789638335892e+02 2.444908535968891954e+02 1.962779352478895589e+02 1.553096436749702889e+02 2.483662294276224429e+02 1.067992874414757978e+02 2.633849667942634483e+02 2.454321751613854588e+02 1.854433418739394028e+02 2.562889653665436072e+02 2.506342746416453622e+02 1.900819942764665598e+02 1.704565979131312474e+02 2.916979173024495822e+02 1.898592592817412310e+02 2.687872145548625440e+02 1.525347862509104004e+02 2.786582104923993484e+02 2.310813531087783872e+02 1.166208530157265386e+02 2.602471623613457723e+02 2.102772607982462034e+02 2.183751071150112466e+02 1.065011561509572999e+02 2.813176394708128782e+02 1.792292558016025623e+02 2.804083600455996361e+02 1.557890480883644102e+02 2.439522159916458861e+02 2.652201783594097719e+02
+1.425266334964659904e+02 2.075049705342416928e+02 1.704914602333145126e+02 1.886474594627911756e+02 1.252313163849750595e+02 2.836097447326676502e+02 1.406399617929505439e+02 2.414245225193989768e+02 2.576349788827002385e+02 1.486724691707949262e+02 1.092388214497626961e+02 1.685935770192617724e+02 2.033388664740227227e+02 1.390809359458484948e+02 1.056188661648174758e+02 2.350581131530574055e+02 1.964295662906907012e+02 2.578831766420791496e+02 1.109952979966328144e+02 2.027546721440710940e+02 2.501377690830167637e+02 2.111868593440530617e+02 2.324728205186171692e+02 2.453971856382445935e+02 1.723822394524685819e+02 2.872924628066301693e+02 1.140766727214026446e+02 2.221345013854892159e+02 1.728173248741775296e+02 2.676400838220500873e+02
+1.711571121866394947e+02 1.085759247733173396e+02 2.001753766691515750e+02 2.760446855018309407e+02 2.056587091496190567e+02 1.121827347031253197e+02 2.274644480946081444e+02 2.571858980756533128e+02 2.945439217283808375e+02 1.913312305877045674e+02 1.500446430731354894e+02 1.650397772114545489e+02 2.581660073502400792e+02 2.094009769144933273e+02 1.731816092302842094e+02 2.727903589313663133e+02 2.606648610353666982e+02 1.460656197586831695e+02 2.016951883706858268e+02 1.247477859691891240e+02 1.732157361502286221e+02 1.195560196858487245e+02 1.253893910664414904e+02 2.455457677441618216e+02 1.778732818035962850e+02 2.490436815297808266e+02 1.487573988963908960e+02 1.937302250034929898e+02 1.502426775501600389e+02 1.110841009912817583e+02
+2.382535443835092508e+02 1.972031918916456732e+02 2.576267295349729807e+02 1.730194312205534288e+02 1.301593684828995094e+02 1.624008376323430127e+02 2.060036399923972681e+02 1.233366573394677630e+02 2.194763391620297739e+02 1.701495187616251314e+02 1.223397596968992218e+02 1.987622577877627350e+02 2.511738650001373117e+02 2.130204435763062634e+02 1.993899817227978133e+02 1.597764561560970265e+02 1.205224890815559604e+02 2.184250491898233690e+02 1.755709834516516139e+02 2.741081010321077542e+02 2.104755291992826187e+02 2.698148014221883386e+02 1.299106544858947814e+02 2.008369880697999292e+02 2.938716155581552130e+02 2.671516623028076083e+02 1.332347035771324215e+02 1.291435420390463378e+02 1.835021202063177554e+02 2.002866194329941720e+02
+2.554906544300547182e+02 2.365682876454178540e+02 2.924004211094360244e+02 1.662852505275750730e+02 1.123350814405425808e+02 1.910015128879867632e+02 1.341551373493250594e+02 1.313122940860927770e+02 2.397311819484906152e+02 1.559268654058377024e+02 1.407120959783594003e+02 2.371419051640040152e+02 2.217591327496910480e+02 1.881187811266301537e+02 1.632462641154496907e+02 2.970940639140721373e+02 2.422917505999918433e+02 1.356966040631749593e+02 1.702398486895437486e+02 2.608644720933497183e+02 2.783751927848827563e+02 2.951746624002826138e+02 1.720706565846523688e+02 1.275268866601749096e+02 1.880990845238362681e+02 1.129502795714700625e+02 2.919985401845127626e+02 2.747497807112307555e+02 2.667734033775608395e+02 1.373740617490475699e+02
+2.115416415080857746e+02 1.431719947715498336e+02 1.718744824503889674e+02 1.075365968452523902e+02 2.220100335193473029e+02 1.965127222891928795e+02 1.062726056237197838e+02 2.631794488147562561e+02 1.658640190278337627e+02 1.169182569761068464e+02 1.645780782039788619e+02 2.940728738870184316e+02 2.979920277570993790e+02 2.125849825405138631e+02 1.533327700316632161e+02 2.655551337415409421e+02 1.329075684859120088e+02 2.686536376777100941e+02 2.299223677315555676e+02 2.123135030200585334e+02 1.474417961566917654e+02 2.899688778344954017e+02 1.439992490259426461e+02 1.606165457016644780e+02 2.854253601360321682e+02 2.837928223954166924e+02 1.868865943198568402e+02 1.809928275876523571e+02 1.583918020284682484e+02 2.384217495701244331e+02
+1.181670050605631417e+02 1.525653020190297582e+02 2.615084872177121724e+02 1.755024420886775829e+02 2.989795566898581001e+02 1.573585789513378188e+02 1.903575226478752711e+02 1.641861715477102166e+02 2.943146494922903003e+02 2.038802368327418719e+02 2.581560000437879694e+02 1.504995935930718076e+02 1.095655891680627008e+02 2.628623226127134558e+02 1.069018430130149255e+02 2.750818506761686422e+02 1.121786007219489818e+02 1.106710601660877415e+02 1.217291564359016149e+02 2.915199334459504144e+02 1.325859381653097557e+02 1.737237090326784141e+02 1.036075961875061751e+02 2.392327113385031510e+02 2.486092083099548233e+02 1.259492139939950306e+02 2.665249241620523435e+02 2.103119814995928039e+02 2.718465347096271216e+02 2.018653364759854298e+02
+2.085808638159350608e+02 2.977621083099649582e+02 1.394173606621695285e+02 2.232898484647512873e+02 1.347812725162832521e+02 1.574683348766579627e+02 1.827258429860655724e+02 2.827887224427595356e+02 2.608349632236463549e+02 2.370910079389979046e+02 2.033290260845359398e+02 1.566531500677691042e+02 2.982287288081304837e+02 2.998057140577807900e+02 1.906108269451214596e+02 2.023344526730545851e+02 1.717672594576409040e+02 2.093320563180507747e+02 2.649028095061802333e+02 2.840422446800275793e+02 2.111868958418739908e+02 1.803076798272542760e+02 2.311954915496957312e+02 1.563425451766251513e+02 2.610066662710300989e+02 1.855286443040786537e+02 1.478912573842241045e+02 2.544380211258828410e+02 2.799416317427427430e+02 2.238937193404353252e+02
+1.269470316997365131e+02 1.895539822645488357e+02 2.443421824114378467e+02 2.632321641240823737e+02 2.164919638664115951e+02 1.042697198382110884e+02 2.896061632271033659e+02 2.068164163046922681e+02 2.059671371408958294e+02 2.352532326493898722e+02 1.046233655847859296e+02 2.755187319279126541e+02 2.344641322699609987e+02 1.434858288567621969e+02 1.255438908126368176e+02 2.548141480364848803e+02 1.466719626681152704e+02 2.020892715394597872e+02 1.195107046056347713e+02 2.012968701954913797e+02 1.996902768982717191e+02 1.560547951636197013e+02 2.162555170020900164e+02 1.483278604161245084e+02 2.615607136845001151e+02 2.424344777210258997e+02 2.524090919470299070e+02 1.726167614603126026e+02 2.199373130240069258e+02 2.318614758097714912e+02
+1.590143031424979370e+02 1.933970326403360502e+02 1.227042846200323112e+02 2.107086401017011781e+02 2.844049872407889552e+02 1.420899421875644464e+02 1.736571760246831673e+02 1.130876049831349661e+02 1.470306210908964317e+02 2.959723384067232246e+02 1.438030965279091049e+02 1.685928342779160403e+02 1.351720793691902713e+02 1.909711091249450590e+02 1.477005416416634205e+02 1.010528808923594681e+02 2.205493627613245167e+02 2.367352422049318079e+02 1.224997665062844305e+02 1.620949451166091251e+02 1.270634404764108467e+02 2.673321646154778932e+02 1.618882934467209225e+02 1.208967331765591524e+02 2.073956586593529607e+02 1.223277950209799059e+02 2.625820210851194361e+02 2.262632377752408672e+02 2.222881433937307349e+02 1.716205611551696961e+02
+2.376094214038359667e+02 2.287867757784330820e+02 2.035778067022395703e+02 2.546588007138803391e+02 1.514832565507949198e+02 1.736683542684334327e+02 1.991020520349750598e+02 1.873563480883249213e+02 1.589186331386689801e+02 1.042563150975229149e+02 2.019924784676414902e+02 1.136537158101241971e+02 1.091264020137841158e+02 1.352770409719844054e+02 2.178414513482917414e+02 1.831380105899948489e+02 1.114225947990316570e+02 1.736029819106907439e+02 1.354612112967272424e+02 1.996055424300992627e+02 2.905125217944571432e+02 2.980326934372309893e+02 1.560898949881966473e+02 1.943286005606112212e+02 2.429797193518882636e+02 2.652714760000731076e+02 2.863852813340179182e+02 1.838252831614893239e+02 1.814799327205894315e+02 2.338290144642930954e+02
+2.526381992552952340e+02 2.089745531365245483e+02 1.869938021147821701e+02 2.864405091884094645e+02 1.736924996547539877e+02 1.479914815134324613e+02 2.132537252074255321e+02 1.830098172980584934e+02 2.476607236946428827e+02 1.066503395377639265e+02 1.405219898965278276e+02 2.743866427972425299e+02 2.269305408710248173e+02 2.791638036143738191e+02 1.824422387811073634e+02 1.852994662516045423e+02 2.777032940597408128e+02 2.109153407914434126e+02 2.214759900082639490e+02 1.857033490029854761e+02 1.302118293337227328e+02 1.889562709124264188e+02 1.844813915245081546e+02 2.875482403705134402e+02 2.022892465111445404e+02 2.230217175841083872e+02 2.843056043891419904e+02 2.350834055358549222e+02 2.080929758762673032e+02 2.770814576487081240e+02
+2.389430507965955428e+02 2.463651891862864147e+02 2.369578462650186452e+02 1.902366989508459199e+02 2.003468797600664004e+02 2.681735461841141728e+02 2.362787745532336601e+02 2.323782975776413480e+02 2.525302892415198812e+02 2.828059530799229151e+02 2.840327053185673662e+02 1.223941816187275435e+02 1.056255174412387134e+02 1.386503050117574105e+02 1.384325506562210535e+02 1.176641636239777426e+02 1.670688688422628161e+02 2.506322552784647826e+02 1.181229702988334083e+02 2.607048520072489737e+02 1.667476448166365515e+02 1.310085831735554223e+02 1.553111545647699927e+02 2.907454039462255651e+02 2.844644695877585718e+02 1.989933906493695019e+02 2.662036190025202131e+02 1.792754658114438371e+02 1.073664330563030944e+02 2.793141822468826376e+02
+2.640306978448612654e+02 2.458161373226257069e+02 1.015510894380497575e+02 1.527048938693112916e+02 2.893334394723561900e+02 2.994916089563248534e+02 1.054055716033572452e+02 2.278819528330843127e+02 1.890909183007994443e+02 2.134436011261824433e+02 2.654189934957544210e+02 1.780852604264427725e+02 2.222277079756825628e+02 2.689688042831336361e+02 2.232046857529678050e+02 1.778434593737022169e+02 1.336418515516146783e+02 2.739064893378349552e+02 2.065065746675076355e+02 1.329712924393647313e+02 2.176938186185978736e+02 1.918043587714230114e+02 2.280421349429639122e+02 1.182282112372680842e+02 1.370131137248831692e+02 1.716251366233928195e+02 2.412427837766657888e+02 2.738208811966829899e+02 1.471415247536169488e+02 1.638288393831292353e+02
+2.669085627842696908e+02 2.477147782526785136e+02 1.718200513884793565e+02 2.299346472745743597e+02 2.016242169414389309e+02 1.631378839470685307e+02 1.859938403107781255e+02 1.609729169019194330e+02 1.536303039404505171e+02 2.234728543554556950e+02 1.953401084257108096e+02 2.920381588589057174e+02 2.034966688752892310e+02 1.019427894404581139e+02 2.980736970140829953e+02 1.738263823108001418e+02 1.531314323312329293e+02 1.400030133312995702e+02 1.802287961283190043e+02 1.719909696301723443e+02 1.974918793689569725e+02 1.666882741246514001e+02 2.879569025675030502e+02 1.334044307903087088e+02 1.016937569869423896e+02 1.660343944328368764e+02 2.214967229035601974e+02 2.539424882366704992e+02 1.211914878013190133e+02 2.835892388637473687e+02
+1.704109091340931741e+02 1.337843054639438378e+02 1.570106251098002588e+02 2.123587857442842335e+02 2.788290802167920219e+02 2.795601449888932848e+02 1.220747715539721696e+02 1.179984498565524405e+02 1.552783750686872963e+02 1.257256444039083192e+02 2.312614004137946893e+02 1.971625968209403084e+02 1.208837070227885135e+02 2.231693789143681386e+02 2.332576722664892941e+02 1.659208209363902711e+02 1.979623049620595907e+02 2.497459328714609512e+02 2.540243570817084446e+02 1.309045902221261599e+02 2.376613837929333499e+02 2.140333351750954023e+02 2.231625169053620539e+02 2.869160136215916737e+02 1.282002159167354023e+02 1.029173927424986488e+02 2.432034421383394545e+02 1.495648010251883306e+02 1.971910657968611247e+02 1.358409247687675361e+02
+1.833826243837603442e+02 2.960483510370855811e+02 2.343723986770386318e+02 1.560358896543934293e+02 2.499669478251469172e+02 1.762005778153444169e+02 1.918050503412152921e+02 2.089352602085182866e+02 2.770127170480132008e+02 1.268157216157417224e+02 2.670673189640755822e+02 1.547628252866769287e+02 2.602514896343354849e+02 1.557532905756793866e+02 2.574076233589491949e+02 2.646855654359934533e+02 1.749681240869035719e+02 2.465698370051858035e+02 1.076897610845538082e+02 2.337637497458482301e+02 1.791847918196868932e+02 1.967068388721293104e+02 2.340964493346380095e+02 2.762770912600988140e+02 1.174465260954359564e+02 2.950490567997024982e+02 1.354710376622284116e+02 2.342233227246520642e+02 1.617966271393036379e+02 2.107879984327653915e+02
+2.493754578342164336e+02 2.275093847135933061e+02 1.466148442335522191e+02 2.261697123059220189e+02 1.213252451599347950e+02 1.628949300801819504e+02 2.100466501082228206e+02 1.508908296808102989e+02 1.488199564735201079e+02 1.727131563468088302e+02 2.306747713688439205e+02 2.570279850661015644e+02 2.309125192178541113e+02 2.422081718543400370e+02 1.769407234272878782e+02 2.688532243604371956e+02 2.276780878660686085e+02 1.065345319601523641e+02 1.535069430280279050e+02 1.717902253122074967e+02 2.876755354986605084e+02 1.683056100689713332e+02 1.120105413679224569e+02 1.755508096146901664e+02 2.095863991316655870e+02 1.523590730880595174e+02 2.944635547123552897e+02 1.444697311944634066e+02 2.165062978405008494e+02 1.410128743297030098e+02
+1.434402193906418006e+02 2.368914090178307106e+02 1.963465933374949941e+02 1.914557752364961516e+02 2.870767419320768568e+02 2.044699144835463187e+02 1.223520556576680036e+02 2.352284247043744472e+02 2.917945011866975165e+02 2.225925999946875322e+02 2.240309397680480288e+02 2.048455962243571093e+02 1.188048963943729035e+02 2.200553599997707579e+02 1.885605934416515765e+02 2.863412817843446874e+02 2.913876692311304737e+02 2.446563674684449552e+02 2.981153955140326843e+02 1.111775924383378253e+02 2.239868361016714857e+02 2.540473271011064469e+02 1.343930974769885438e+02 2.368686732696482409e+02 1.175691554116390591e+02 1.014879352562223715e+02 1.330784448687188046e+02 2.045426156006566885e+02 1.168174380391246245e+02 1.704438548713551995e+02
+2.696784010384477597e+02 2.991318545155386346e+02 2.120364825583467336e+02 1.950895785161033018e+02 1.216112431291165592e+02 2.438998438799096391e+02 1.588292735755803733e+02 2.347670069791354024e+02 1.862846309471772770e+02 2.258642611266068343e+02 1.423367506635381119e+02 2.692888471853933083e+02 2.950212092401994255e+02 2.331327670110776467e+02 1.542291422318579635e+02 2.809064569107727038e+02 2.358857646534314654e+02 2.378124255062788563e+02 2.664164586086786812e+02 1.387157904298663880e+02 2.297158046581682243e+02 2.386372312695162634e+02 1.246509391338716171e+02 2.338956320284196408e+02 1.820257170558419944e+02 1.957425768708682767e+02 1.680974560138464540e+02 1.288235048549348676e+02 1.483029350020115089e+02 1.744880718659300669e+02
+2.512494238114035738e+02 1.112846425403449615e+02 2.472643304237797395e+02 1.241745840646870818e+02 1.808849124644312099e+02 2.524760780760417731e+02 1.836118621524309447e+02 1.408362492891266982e+02 1.099623406752946693e+02 2.383967522197594064e+02 2.436606913384966049e+02 2.770699525768120566e+02 2.597573569531676867e+02 2.935649366424795517e+02 2.702790297508025219e+02 2.563597369995835606e+02 2.279477293752616447e+02 2.477470305460766440e+02 1.962131167814513333e+02 2.859744526791636190e+02 2.703401534622389590e+02 2.763052603711840902e+02 2.934416645125817809e+02 2.193475948646207030e+02 2.822891098008749395e+02 1.085391177109117820e+02 1.782208012387337703e+02 2.335496863699061976e+02 1.715066387390946829e+02 1.948062204233656303e+02
+2.879262290016004613e+02 1.676743911135137068e+02 1.403503828589753937e+02 2.744454339345198832e+02 2.935124358491533485e+02 2.920282649929100671e+02 1.390240222956847447e+02 2.426642861805074745e+02 1.217336684570653489e+02 1.311823750440439085e+02 1.647679902066092836e+02 2.962811279981685288e+02 2.945746172932865647e+02 2.005257587949587332e+02 2.072045953580022228e+02 2.893049469033056766e+02 1.913962360581630833e+02 1.823675529874825543e+02 1.830342103129283373e+02 1.222396004373517400e+02 2.248239872372262482e+02 1.170253438297526429e+02 2.853825568202013301e+02 2.214973458763422514e+02 2.563932510909227176e+02 2.144837192650675206e+02 1.793062298958048473e+02 2.920176466690815005e+02 1.515607839109829627e+02 1.981203765908239802e+02
+1.733053660232129403e+02 1.312183264386245583e+02 1.276233157677672807e+02 2.020942572504836789e+02 2.314817368496994732e+02 2.242589617101967008e+02 2.160504620978007893e+02 2.360595788588375399e+02 2.952977074031120992e+02 2.334652590044975682e+02 1.243453875174208747e+02 1.916144242306085630e+02 1.092365115042800596e+02 1.478765005471206280e+02 2.191946613400726278e+02 2.879274886834762697e+02 2.733443652356662597e+02 1.858481832262083344e+02 2.193747651131673706e+02 2.695165737089945424e+02 2.960753121523491700e+02 1.890691006834304631e+02 2.638343907584013550e+02 1.510492177865631334e+02 1.878288206285384661e+02 2.726561149875388992e+02 1.704246795027074199e+02 1.006381753343381718e+02 2.153734239260733148e+02 2.551451126036402854e+02
+1.591849792872858984e+02 1.304671215023752779e+02 1.427456440770346831e+02 2.882324895344759170e+02 1.680635293254793510e+02 1.205800311663507642e+02 2.861305963205076637e+02 1.219224106654408928e+02 2.467003871618023538e+02 2.830287806498602095e+02 1.445950870572595193e+02 2.496562286252286640e+02 1.464987579205844099e+02 2.848280464142704318e+02 2.785616857190397013e+02 1.837468579783306950e+02 1.246964377230690673e+02 1.251791080124520050e+02 1.496399061799681363e+02 1.375936265087168522e+02 2.547928467777094852e+02 2.554856419260690927e+02 1.285559318166884850e+02 2.092144446410586909e+02 2.868951534942014518e+02 1.178319347908447270e+02 1.347784205269015274e+02 2.851299399919766984e+02 1.754694686670390809e+02 1.016886128619324694e+02
+2.606618423405234353e+02 2.125366732076933545e+02 2.822772640751277322e+02 1.096405633955119185e+02 2.437561663288932721e+02 2.129146561548243994e+02 1.148823764090175530e+02 1.516868774610028368e+02 2.090025176018670265e+02 1.817684320186263562e+02 1.584667226055155709e+02 1.501973711988126468e+02 2.530199923706828713e+02 1.847948752811591930e+02 1.778871618489498303e+02 1.664551902511519188e+02 1.100020157933824265e+02 1.352000835393275509e+02 1.710981737682794801e+02 1.530513645967782566e+02 2.588476693974693035e+02 1.775587245068043956e+02 2.006331886716666588e+02 1.389709403689849694e+02 2.489553638298030194e+02 1.673604491791948021e+02 1.991154502489720812e+02 2.423848982654565418e+02 2.882603768001737308e+02 1.620650086718309240e+02
+2.723642490909132903e+02 1.680927290528325670e+02 1.005734627393615455e+02 1.598916606218045047e+02 1.672547346703738071e+02 2.361420151042074451e+02 2.741857058408131707e+02 2.533004150866734392e+02 2.036092771261417340e+02 1.091915011443997230e+02 1.145604210422382323e+02 1.209982156413156247e+02 2.749595368914399387e+02 2.177794513808643160e+02 2.054163746311436967e+02 2.185860861470465579e+02 1.504022045473846845e+02 1.713704456854883347e+02 2.175221629008602804e+02 1.230663148243889253e+02 2.419648244223723168e+02 1.383010418990747326e+02 2.040260833828849059e+02 2.966316994044250919e+02 1.630596872908637351e+02 2.562534082821714492e+02 2.549425872735235998e+02 1.983522705781282127e+02 1.524860865223137694e+02 2.736848821358530586e+02
+1.277021385004174192e+02 2.448445434866889343e+02 1.296687360965440803e+02 1.874271582575348702e+02 1.145742775945452792e+02 1.884744688522491742e+02 1.336298647132909423e+02 1.523816963142488419e+02 2.658270705367647224e+02 1.781637174983711134e+02 1.154610011723892171e+02 2.005342781476718415e+02 1.303166615041172918e+02 2.397284110571510496e+02 1.612912854182502542e+02 2.821645080329541315e+02 2.544831471501324813e+02 2.622237400581972224e+02 1.417212269902922230e+02 2.054005404298748658e+02 1.092142219674599062e+02 1.652051184306486107e+02 2.825679563619778492e+02 2.056286073102957630e+02 1.772062144904277545e+02 1.163520479257007310e+02 1.006186351926139366e+02 1.734025793931427586e+02 1.446958902579306709e+02 2.025820689614877779e+02
+1.798382687901162740e+02 1.604629760861514001e+02 2.668981169240885265e+02 2.763242846779806996e+02 1.318105471716862098e+02 2.191362245125996537e+02 2.770758446308884686e+02 2.308910816293108326e+02 2.956895796828827656e+02 1.566426856848869988e+02 2.326210561246332418e+02 1.206555816723871715e+02 2.603144096756907970e+02 1.172571782204154829e+02 2.219493974369055991e+02 2.385109304229506790e+02 2.599678734377965839e+02 2.850516346518521686e+02 1.472948582444382168e+02 2.234296740595885922e+02 1.427895312415343199e+02 2.848238578369252423e+02 2.260232767550441508e+02 1.544648385858973541e+02 1.163971462755376791e+02 1.762731012775239492e+02 1.089523563056807660e+02 1.663966154222005116e+02 1.342495772836978745e+02 2.922401077696804350e+02
+2.806557294060240224e+02 1.077657131130299604e+02 1.622983596366119059e+02 1.723469481204717795e+02 2.678046848873893850e+02 1.442059922525422451e+02 2.629931208031973711e+02 2.741083495447689415e+02 1.194142462414748707e+02 1.688961325073638022e+02 2.967954354880449728e+02 1.822107331135221671e+02 1.292333403080546645e+02 1.856814508383810391e+02 2.103923137448445573e+02 2.517859299913771451e+02 2.551152596962431574e+02 2.077883190793959898e+02 2.986930461834413677e+02 1.196764061335889551e+02 2.378823960447958257e+02 1.692017967083341432e+02 1.471250494556689432e+02 2.608355254883699672e+02 1.757172426071724942e+02 2.629426236813185369e+02 1.040244734248400533e+02 1.533558690719498827e+02 2.011860465194789072e+02 1.720545334339216765e+02
+2.966488050331527688e+02 1.809989340563203086e+02 1.871527370563514978e+02 2.315558973515319394e+02 2.657682292004950000e+02 2.237816732699509998e+02 2.282045922056215090e+02 1.846236325909775928e+02 1.644827554373339353e+02 2.760250360653360531e+02 2.492622345937652995e+02 1.483432536002697191e+02 1.527550390024584601e+02 1.573429964258168070e+02 2.090721206423400247e+02 2.535819867756219708e+02 2.420536340362719159e+02 1.691914404667937788e+02 2.388696721384086459e+02 2.593840245957078423e+02 1.331872961625781500e+02 1.116342264469163581e+02 1.680964276125217793e+02 1.555020753508222526e+02 2.422052215908822177e+02 2.626184375196450560e+02 2.674230788003709449e+02 1.948146659156083729e+02 2.663681889818526543e+02 2.795342087705012659e+02
+1.674728956867265310e+02 2.635505920196726493e+02 1.395353777027027604e+02 1.883233466008314565e+02 1.249441512057495913e+02 2.512189370435067417e+02 2.719913755602378842e+02 1.237326636617429614e+02 2.939951219495833357e+02 1.686366002602222807e+02 1.800181056076297068e+02 2.288525977776352818e+02 2.717306800175948638e+02 1.565292507387619594e+02 1.445460932655216766e+02 2.092313282690445249e+02 2.370375511382032698e+02 2.880525812713749474e+02 1.172567175017127141e+02 1.112412797274302250e+02 2.246954385922853135e+02 2.812359340959551446e+02 1.004168603505609241e+02 1.005387863078678805e+02 1.815971195408835683e+02 2.811251817522295937e+02 2.605765849402707772e+02 2.298114360271968621e+02 2.557293814584297706e+02 2.542416589790913122e+02
+2.943583269632734414e+02 1.442274778682184717e+02 2.700917391987959491e+02 2.527420049761408904e+02 1.527279900348522688e+02 1.841979337126335281e+02 2.902442440856567600e+02 2.889101481258517765e+02 1.828125218264408716e+02 1.133179379993730862e+02 1.484787634874768116e+02 2.676352293304336740e+02 1.452118425579454311e+02 2.636966617786087568e+02 1.313546620759107100e+02 1.834019443937838787e+02 2.892465421328221282e+02 2.575015388377624959e+02 1.970702343003932242e+02 2.507528167727347181e+02 1.724897096143170074e+02 2.664268628760375464e+02 1.365257050051324370e+02 1.198011035974838308e+02 1.176831988053894520e+02 1.070946883963453899e+02 1.964638491125322446e+02 2.570844982939356100e+02 1.593905150913052466e+02 1.202569936867807598e+02
+2.734271498156417692e+02 2.352133531486530842e+02 2.590835237087205769e+02 2.260994493040042528e+02 1.805421354394846105e+02 2.728408805160995598e+02 2.367263522625478913e+02 2.580210451062748689e+02 1.204524877415260562e+02 2.946465680607327613e+02 1.547220269335912803e+02 1.186203172746691337e+02 1.923878728892914864e+02 1.094127410697402354e+02 2.222837240826847278e+02 1.529333599077602628e+02 1.861450256630199647e+02 2.125583079944122176e+02 1.527591657960447264e+02 2.694001797345342766e+02 1.986063989766776388e+02 2.192493126389772442e+02 2.986827335637019587e+02 2.790660387254000625e+02 2.781487003899754313e+02 2.564198676846006606e+02 2.597551240338123648e+02 2.358970425952163907e+02 1.951628676328612357e+02 1.078208269500064347e+02
+1.190762776130697205e+02 2.951075493308472346e+02 1.091043363430719069e+02 2.824365312299846664e+02 2.445811468414383398e+02 2.538090805786315514e+02 1.230092364266577363e+02 2.633887649939744051e+02 1.865216093980499181e+02 1.540388898662323243e+02 2.047343894245035756e+02 1.431412534309083640e+02 2.857794001060171922e+02 1.492366175285521592e+02 1.380934567887849198e+02 1.331831467466375898e+02 1.149412013934811796e+02 2.205070844660474734e+02 2.939252657951740844e+02 2.049464694042562769e+02 2.047902832862141054e+02 1.810793422252176015e+02 2.005356992447976836e+02 1.381400138775680375e+02 2.582445444487385657e+02 1.698212931623984616e+02 2.252085951830697468e+02 1.808378144669676999e+02 1.307311344108444473e+02 1.050024101356033697e+02
+1.722314120162143354e+02 2.530014253763471856e+02 1.298340795948372772e+02 2.948664870226410812e+02 2.383106068289312702e+02 1.822969205106659558e+02 2.285226769051377005e+02 2.759417691711663565e+02 2.120970517474504220e+02 2.831046044310812704e+02 2.320579821788242612e+02 1.286125039667014960e+02 1.609837368065715282e+02 2.931112965353385107e+02 1.441758663366052531e+02 2.810263276191118962e+02 1.239857273771131077e+02 2.399447548605567988e+02 1.460208836055017514e+02 1.205325462037979491e+02 2.112513935912650993e+02 1.036793750016967692e+02 1.113202625217208777e+02 1.646612561683649574e+02 1.018350908838390581e+02 1.263835026124204859e+02 2.766683711501553944e+02 1.682407929561517506e+02 2.677103056024840271e+02 2.147294480454548307e+02
+2.763536852866382105e+02 1.511976958084401872e+02 1.026794659371155944e+02 1.805990415690671398e+02 2.442493962549426385e+02 1.881796213041043018e+02 1.028768312506858535e+02 2.787706953534510603e+02 2.589640601731795755e+02 1.730107396932538677e+02 2.218419822849910190e+02 2.651646152747807719e+02 1.476149140151474342e+02 1.986450675254654072e+02 1.050693447352362853e+02 1.819666738706916931e+02 2.873544952103893593e+02 1.472060704631180954e+02 1.297023844405691761e+02 2.824778443572924971e+02 2.918073394139615289e+02 2.128134400148996974e+02 2.223096450508596149e+02 2.761940547406351811e+02 1.348708672340777639e+02 1.857009592938832441e+02 1.062906640064134649e+02 2.104442283262811202e+02 2.812954268214299418e+02 2.739038950945439979e+02
+1.837264129055918147e+02 2.399207190527903322e+02 2.843910623120511900e+02 1.773207161532972975e+02 2.056581469496123873e+02 1.558029517788254168e+02 1.458438122541016924e+02 1.893030782939712253e+02 1.139027557376393673e+02 2.228775749423569437e+02 1.367670384452707140e+02 2.854480456674787092e+02 2.424985140340279202e+02 2.940521113211518696e+02 1.330693282221190259e+02 1.212599008475133076e+02 2.754747741586869552e+02 1.062856492128348549e+02 1.212724485003486166e+02 2.100514698158626743e+02 2.547262582240854272e+02 1.999488755181088777e+02 2.578561029518564283e+02 2.784200494851090752e+02 2.728829168298310606e+02 2.071711407548560544e+02 1.708729380756020362e+02 2.726254883308487251e+02 1.104364015278258364e+02 1.175773277008901090e+02
+2.554381337818412305e+02 1.634513906120204183e+02 2.309962436793083214e+02 2.460443770945291249e+02 1.618890365991254896e+02 1.046310291743186980e+02 2.772116654811295575e+02 2.098555252827713957e+02 2.309383801112169863e+02 2.845300950466865402e+02 1.268119123926061320e+02 1.697885006171669602e+02 1.901887742560337529e+02 2.605757830463372215e+02 2.755463791239279772e+02 1.771647294768940810e+02 2.403902735905423356e+02 1.774352552408031443e+02 1.796883744424403631e+02 2.736192366006921475e+02 2.118505050785533967e+02 1.873353967662169453e+02 1.802980863638028950e+02 1.869858546159753132e+02 1.200946851663063342e+02 2.350811068219035178e+02 2.018941614745772313e+02 1.010158706413519525e+02 1.661546933057649937e+02 2.570882207683835077e+02
+2.856134023048114159e+02 1.356279054667102741e+02 1.225310201562991494e+02 1.529777144242077327e+02 2.936506440162480658e+02 2.589580133771784176e+02 1.864782805190425279e+02 1.931182124516369640e+02 2.913608028278327993e+02 1.555662042949096531e+02 1.173676742008071301e+02 2.242990267171766732e+02 2.651338851871976203e+02 1.128980005738893482e+02 1.283582653966309408e+02 2.071495534530326097e+02 1.241509031508031740e+02 2.393403040292282640e+02 2.829812266966206380e+02 2.294799861563923287e+02 2.129576840814710295e+02 2.165539860914115877e+02 1.357366103660294243e+02 2.396252028023287153e+02 1.395106368224716107e+02 1.700689743264745744e+02 1.253435651632085950e+02 1.508112259783626428e+02 2.310267786371028933e+02 2.311667616985857876e+02
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-int-data.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-int-data.txt
new file mode 100644
index 00000000..4fd11b75
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-int-data.txt
@@ -0,0 +1,100 @@
+-67 65 82 64 51 1 -12 2 -84 -52 12 82 -45 -84 -41 31 -49 36 -70 40 -74 -99 32 64 -6 43 -53 -43 43 96
+-58 20 25 99 -25 78 -6 59 -23 30 36 25 -8 83 -43 -7 -8 42 -90 96 46 88 31 12 68 -21 -6 7 78 -19
+-66 -51 0 13 42 -43 -30 -29 20 10 -24 -5 -42 38 -56 6 1 -80 -65 -91 89 64 -21 49 -84 41 6 -78 71 -2
+-50 -84 -50 -66 46 -88 -10 -28 -25 6 -7 10 -35 86 41 -17 72 -67 13 -67 -76 -84 -15 35 67 40 90 38 -1 -47
+-51 27 -48 26 -73 -46 -68 -56 -38 -4 49 -64 57 -86 -80 70 50 34 84 97 -76 3 -54 -89 -7 -53 15 36 -28 85
+2 -59 4 30 70 -42 -26 -1 27 -90 -18 95 -10 -36 43 24 86 -8 -100 92 80 -40 17 -93 -81 54 -8 84 -53 38
+-80 0 -71 -41 -33 9 -61 0 26 80 64 67 74 68 -72 78 -72 -52 -19 37 -33 -24 -11 -71 -53 -16 25 56 -74 0
+71 -23 49 -36 -43 -70 82 69 -100 -27 50 20 30 84 -33 90 49 39 -52 -51 -86 -76 -72 -88 12 91 -96 -61 -87 -47
+21 39 1 78 68 -80 -54 71 17 -94 34 -20 14 -5 -24 55 -84 -50 -90 -24 -79 -81 53 -50 22 -13 -92 78 -22 -50
+-47 -73 77 -93 -20 51 -37 -14 -37 -18 -8 -14 -71 29 -27 -5 54 77 -7 -2 15 -23 98 -34 -65 -78 -77 -90 -5 -35
+92 -33 71 24 43 -19 50 -40 -48 -33 -51 -14 23 40 -78 -14 -76 1 52 69 93 5 -13 30 -60 -20 -54 49 -52 93
+32 -86 21 -41 -86 -38 97 -35 -37 -89 -15 -18 -46 -37 8 63 -63 -61 57 50 43 -27 -45 98 -56 -81 16 -38 -25 -28
+-18 19 -52 -86 92 -72 23 35 20 57 69 -22 52 -66 -74 -29 -1 -10 -97 22 -97 -93 -70 87 85 -31 42 -29 -10 -36
+78 80 -93 68 41 84 -37 -62 38 -9 99 -60 90 47 -33 -40 -59 97 -28 9 35 -6 -60 -83 -39 -97 -25 -78 95 40
+79 -35 -45 -46 69 10 29 -88 98 -44 66 11 45 -58 -11 -25 51 -44 54 30 59 98 35 -28 93 86 99 19 -27 -83
+80 77 -72 57 -35 -27 86 -67 11 77 -28 -89 -30 -31 -72 64 -95 -75 92 -32 -96 -14 6 -83 -66 -58 71 -17 58 -53
+-1 17 -72 82 -57 -48 -7 -44 -80 85 -99 -9 27 -11 24 13 86 18 67 -9 12 77 98 49 49 12 -82 45 31 -68
+-13 -75 -26 17 91 12 -95 -62 -54 -60 22 50 86 58 -11 -11 -21 31 16 -15 67 90 1 80 -57 -98 35 -54 51 91
+28 -75 -31 49 0 73 75 -66 50 -77 -20 82 -40 -90 -28 32 -44 89 -75 -33 -11 -19 -55 79 18 2 -39 -49 78 -72
+14 56 78 69 -40 -20 -39 71 99 -89 60 -82 -1 -77 -42 94 -41 35 72 11 -13 89 -52 -41 -93 43 -39 -61 68 -4
+88 18 -90 -75 -49 46 -28 -48 -69 -64 77 -8 91 -65 62 -27 -19 34 10 78 82 49 -34 63 78 -88 -17 -37 -85 91
+4 36 -77 -75 -12 70 42 8 7 -31 -69 -74 -65 18 85 -92 91 16 -15 24 -74 -56 71 -70 -90 20 13 73 -68 -65
+92 22 -31 -73 -59 -78 -20 -11 -61 36 -40 34 -96 -12 51 -45 -12 12 -3 -42 -71 68 -8 -91 50 -73 -96 -46 -38 -4
+-87 44 -58 -83 70 -81 32 29 -79 45 -64 -52 57 73 -80 69 7 -22 31 -71 -34 -33 47 79 -17 6 -77 -89 3 50
+85 2 73 -88 -99 -13 -76 1 -90 51 30 -52 75 -2 -8 10 -83 -40 -5 -79 82 19 79 94 49 4 66 -76 6 -48
+29 -34 66 -93 45 -1 -98 92 -92 29 -10 64 -23 -81 -73 -62 -18 37 -29 -50 -52 90 -28 24 -4 -67 -33 25 -78 93
+57 -46 36 -16 34 -59 -96 -86 64 2 28 42 -32 6 -17 37 38 -40 -92 55 -22 -42 11 -77 12 81 -89 -39 -30 -39
+-72 -68 -41 -5 93 55 24 -6 84 77 30 33 -51 -62 6 -5 -83 60 -1 -64 7 -7 -92 31 5 -21 -34 -14 21 -33
+26 -75 -36 -54 -21 -38 -49 -20 82 73 -84 -5 -69 84 -87 12 7 -67 -40 -50 -35 -65 80 -83 -2 1 34 -16 91 82
+61 -21 1 -64 -56 -61 74 16 0 38 51 34 -35 37 -28 -52 -14 61 14 58 50 27 -43 -27 14 56 -16 -78 50 -89
+45 -47 -61 68 -41 -70 14 -51 49 -84 64 -65 88 -39 -88 28 -55 -18 81 -2 -1 -45 65 -6 62 16 71 71 -1 47
+47 60 22 -42 -5 -74 12 66 89 -82 -85 65 74 0 -18 56 -39 84 -65 -42 -33 -60 23 33 -8 -72 3 -64 -3 -25
+-70 11 -19 -12 -1 -50 -89 -61 78 28 55 92 -17 86 -17 -45 -31 68 -24 -99 -59 27 79 -2 21 -80 54 9 14 -70
+-38 52 -99 50 -46 -63 -74 -41 -43 -62 -81 38 -99 17 -94 -6 44 -20 -13 -30 71 -43 43 -28 -8 57 -93 98 4 42
+-17 -27 -60 -22 86 -49 39 -83 72 -16 82 74 73 -29 16 -59 81 -60 -96 51 -62 -55 -79 -31 -15 -67 -18 -83 -61 -86
+28 37 -44 7 -17 -10 -65 8 -78 -17 -46 -5 -35 -86 13 -16 27 24 60 -12 -48 -45 16 -33 70 -45 -63 -60 21 70
+-75 -89 -93 -93 62 -44 -39 46 31 57 72 30 -65 29 66 -53 2 -2 71 -90 -73 -40 -63 32 68 30 25 98 38 92
+88 3 5 73 -2 -61 -94 79 99 94 71 -83 -40 80 -79 -14 -34 -99 -52 27 23 13 13 -35 -74 13 43 -19 2 -62
+92 -47 -27 9 -68 -86 -57 43 9 -81 -9 69 52 -28 80 -13 -6 -44 -81 -89 -10 30 -64 86 -76 -11 -100 15 12 -62
+76 -42 39 70 74 79 84 -52 18 -58 78 53 89 58 -32 20 -51 35 12 37 -70 -21 5 97 67 -25 -25 -10 2 30
+-84 26 -60 -34 11 -27 47 85 -89 29 54 -53 66 -9 12 4 92 70 2 -12 -55 72 -62 -79 -8 68 -19 12 -8 -100
+78 -97 -76 86 -47 42 99 -3 9 49 -84 86 26 43 -26 90 23 -66 -37 -35 25 -12 -42 -12 96 -15 48 87 -95 -12
+-60 57 -30 -4 -84 24 -82 -5 34 56 76 81 -64 23 32 34 -41 -48 -6 77 -42 64 87 92 82 59 9 -71 -56 -45
+-74 -90 -27 93 33 15 -35 -73 78 23 17 -28 9 63 9 35 15 32 0 -4 -32 54 -76 14 -14 -8 16 -43 -81 57
+-2 22 85 -33 -48 74 64 -59 -27 17 -65 27 -50 -81 41 -69 -26 -29 -83 48 -81 51 58 -62 -63 -55 -63 39 32 -34
+98 -99 13 25 -10 43 -62 50 82 -90 -51 40 -71 82 27 -73 19 -62 37 10 -21 45 -94 -45 -41 -3 44 86 -2 27
+-80 -89 -57 87 -42 19 32 -49 37 -4 -30 54 46 -3 -92 89 60 37 -86 38 61 93 45 -45 -86 54 21 45 50 -53
+7 -68 71 -6 41 -72 67 45 15 46 85 59 82 19 65 75 -62 -35 47 -51 23 41 -54 27 -99 14 9 69 60 62
+99 -51 83 -47 -19 -57 -22 51 -52 52 92 80 69 1 -31 0 -19 -54 73 -5 3 82 -86 -84 -95 -83 -92 -52 -90 -79
+43 -75 62 99 66 -43 -38 -21 23 35 -63 -61 -46 5 3 -90 -28 55 87 89 -29 -46 23 -61 -5 10 -70 -63 50 -14
+39 38 10 66 -24 -45 55 -33 31 29 44 31 73 44 6 69 -21 -58 -3 93 -51 86 -16 -88 88 -30 75 78 -20 -12
+-11 11 -19 40 82 6 10 22 90 -78 -88 -49 72 69 -62 42 -23 22 -38 -98 0 -3 -43 20 9 18 -67 -7 22 21
+99 80 -55 74 43 -31 60 -26 -29 -6 75 60 92 -42 85 18 1 1 -74 -44 -12 72 -57 -98 99 62 45 -40 -39 -75
+50 30 -18 -29 -80 -59 -96 46 -99 -76 -13 -75 -93 -95 -45 62 -37 53 -96 57 -40 3 14 -45 -84 58 75 16 37 -6
+1 -47 87 -99 -22 -22 -20 71 -91 13 35 -80 75 65 -87 16 -37 99 -60 49 52 18 55 -11 18 24 -65 -80 8 -79
+-8 -87 86 -9 -64 -76 59 -52 -89 18 13 70 44 93 99 62 39 49 83 28 72 -71 -13 -71 -22 44 -87 73 -68 80
+41 -26 44 -63 -26 -83 -44 63 -51 -48 52 -8 55 73 -45 84 40 45 32 -34 -78 -46 -79 57 -40 11 34 -75 -20 91
+94 9 -35 -5 3 59 -63 2 -7 -72 -34 -70 78 99 -29 37 11 91 61 29 85 -15 59 79 47 41 19 -18 -92 47
+-59 -89 57 -72 -79 88 -85 18 -35 -96 -57 33 83 70 -55 -16 -21 72 -53 89 -44 -86 9 -44 -26 78 2 -93 -75 6
+55 73 89 80 -69 -93 -39 -88 62 49 91 -68 87 -26 40 16 -49 -53 -57 23 -97 39 -78 44 -15 1 60 -87 43 -42
+-2 -23 -74 -80 -59 52 -58 68 64 97 -86 -41 -88 35 49 3 -40 90 34 -2 3 13 -95 8 -1 6 75 92 19 -31
+57 76 65 3 37 -72 -43 57 64 -23 41 87 26 76 -18 -32 28 47 11 47 -33 -12 4 81 -92 -47 -81 43 -2 5
+68 74 66 -89 -95 -40 -78 -58 -54 -20 2 20 94 -35 58 -20 41 77 0 95 39 14 36 -40 -85 -60 -63 82 0 58
+-61 -99 61 10 -2 -31 -70 37 -77 -10 85 95 -28 70 -81 -78 -68 -33 -77 77 -6 42 -100 -68 -59 -86 -42 -74 35 -32
+64 -1 -1 -64 51 11 -65 47 -87 -8 5 58 22 -80 68 -25 24 59 -25 -75 95 -22 -73 27 86 -39 -98 -1 -17 -32
+94 -50 -53 -62 -53 46 50 38 -95 -77 40 -38 -23 -14 -68 -20 -47 23 -8 -12 -92 -69 -97 30 94 -45 47 -81 82 -60
+28 67 -48 4 74 27 -30 12 -32 35 91 -83 30 -55 -7 79 97 11 93 -45 -79 31 78 65 84 -23 -26 17 -61 43
+44 60 -88 72 31 98 55 -4 66 -14 10 -81 -40 66 -15 21 69 -98 34 3 75 18 98 -6 47 -39 31 -19 30 -51
+-6 18 -93 31 51 -20 -16 -33 -38 -19 71 4 -53 23 97 1 -28 -72 -44 -48 45 33 -76 86 64 49 -45 -34 -9 -76
+-19 8 28 -27 -51 -58 -36 63 -92 -95 70 41 -38 -49 -95 -100 43 97 -60 -5 -56 45 -13 -3 20 -10 -21 -85 -5 63
+-74 -74 -74 -39 -57 -12 51 11 -11 -22 -26 -54 71 24 -37 77 -90 77 75 86 -53 3 69 -99 -82 -59 30 81 -21 -86
+67 63 87 -15 60 -82 87 51 -39 -49 -16 74 51 17 6 47 98 89 -20 -98 97 -61 18 34 37 -36 37 -96 90 44
+53 -8 37 -76 -61 70 -77 -11 98 -80 12 -80 6 -89 8 -59 -69 -100 -52 -30 95 -58 61 29 52 -64 -51 10 16 -58
+54 -10 49 62 76 -25 80 36 13 5 59 -65 14 41 26 -78 23 -45 -51 -85 91 -43 -61 -37 94 27 -11 49 98 48
+53 -51 27 34 28 -53 18 17 31 -31 59 71 -34 25 54 -84 -34 -24 76 38 -36 15 -1 56 2 -12 0 26 -38 -62
+4 -94 -63 -21 -95 -42 -12 86 14 -86 -1 80 -48 62 -47 -52 3 91 -86 11 79 32 -24 -33 -54 19 -17 28 -33 -97
+-18 41 84 1 -83 48 -99 -64 26 -52 3 -64 68 -98 93 -79 -97 11 88 74 41 -31 -42 -35 -66 18 97 -30 19 -93
+-19 42 61 -91 -20 59 -11 -64 -60 85 -6 -71 33 -52 46 51 -86 -77 74 -4 74 -81 1 -39 -30 12 -12 20 66 60
+86 1 -67 -91 -92 -22 91 -90 -45 26 53 -6 99 46 -29 -40 -99 57 -45 -47 -3 -86 90 -78 -33 73 90 -51 -75 2
+88 -34 -2 30 -18 35 -23 90 99 -49 90 -79 94 -38 48 67 -35 -58 81 -24 18 -54 83 65 -58 -12 13 89 -59 57
+92 -99 94 -73 97 -78 -93 98 -78 95 -21 -17 -11 -92 69 -60 86 9 -36 -18 -33 -39 -65 74 -65 37 -49 87 -28 -81
+-95 2 -18 20 93 54 86 -63 -5 -89 17 -9 75 -66 -64 -82 -46 -48 82 5 -89 19 -32 -45 53 -47 21 -9 40 34
+86 87 55 -41 49 -10 -6 -7 -99 23 90 -50 -9 -81 77 65 29 -21 22 -82 19 48 -24 -72 75 -66 -69 -17 72 6
+13 37 96 31 -65 -54 -91 -27 84 52 -9 -28 85 96 14 63 -34 -29 -85 78 -75 -44 -30 -5 4 72 -45 6 13 71
+96 -69 67 59 69 46 80 42 81 30 89 -45 -10 -44 25 31 89 16 -36 86 31 92 1 5 -2 92 -11 77 20 40
+-48 98 -100 30 54 9 84 -88 5 48 93 56 -94 -89 81 33 44 -30 -95 -98 29 -33 13 -26 -59 -80 -68 -40 12 11
+82 -63 -30 -67 54 -68 50 -63 -91 -68 -45 -66 -58 16 -25 9 -50 -59 -55 4 -2 0 -63 67 30 -21 -8 55 21 -68
+9 -8 56 -6 84 81 -63 -35 81 56 -50 -54 96 -51 86 0 66 -4 -18 65 -26 -57 8 78 -54 17 18 86 21 68
+9 38 33 16 3 86 -57 28 -6 -44 -42 -2 3 -71 -86 23 34 -29 33 -30 67 63 -11 76 -65 92 30 -66 61 1
+-72 -85 -1 64 -79 -78 -1 15 -35 -32 80 33 -36 -82 24 -65 -23 29 38 -31 87 55 -18 -52 -77 -22 -11 54 62 -48
+65 -77 50 16 41 -94 -21 16 85 24 60 86 -78 -13 69 46 55 5 -27 -18 -6 -1 59 -62 -58 -99 -49 -84 89 18
+-21 -15 -55 60 78 98 67 94 58 -5 -36 42 36 73 13 72 -78 -68 41 -37 -33 -46 -80 40 13 -44 -71 -8 15 -77
+16 -93 -42 -10 14 57 -54 -3 -44 -21 30 -93 71 25 -60 -94 93 5 -94 -84 -72 1 -50 -34 23 -15 15 18 72 -29
+-22 -82 -30 -87 -88 -25 46 32 -30 -55 -79 -85 71 -89 -57 -88 21 53 -100 -64 -92 -97 56 -51 -17 -34 -31 6 -68 84
+-53 -51 90 -38 -61 57 -63 67 22 22 70 44 43 97 20 -62 -74 72 83 -32 35 -66 -29 5 -88 55 -94 94 -19 55
+57 51 29 -42 -21 63 -57 7 -48 -87 -60 -55 -77 -53 -1 -85 64 60 53 71 41 59 -61 -73 -12 86 90 10 -60 -38
+2 -9 14 67 -2 70 11 -78 26 -55 -86 -25 99 66 63 64 46 59 66 -37 -78 -70 63 1 -20 2 46 50 34 19
+-87 -40 75 -11 -88 -80 -95 -20 -92 -28 83 24 88 -39 83 -36 -61 56 99 -73 -59 -85 -49 -10 91 12 -79 -18 -15 6
+35 -74 -4 -15 40 -87 81 -22 -12 -46 14 9 98 -35 -2 -12 57 -74 -52 71 70 -70 -61 -47 89 44 33 -100 54 42
+-4 -34 80 -12 -15 -9 -8 -29 89 -55 -33 89 16 -33 -73 -82 98 27 88 59 48 20 -67 -21 -86 11 -50 46 64 -8
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-uint-data.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-uint-data.txt
new file mode 100644
index 00000000..c1ec7a5d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/random-uint-data.txt
@@ -0,0 +1,100 @@
+52 34 59 34 64 20 89 69 26 93 95 32 17 93 77 49 51 60 51 27 60 10 61 2 16 30 41 68 65 0
+43 74 11 37 32 61 72 29 47 21 7 47 68 58 22 33 29 37 14 45 71 1 67 79 69 9 6 6 95 78
+86 20 68 67 43 5 77 70 96 37 79 71 35 30 22 4 56 28 33 50 97 17 85 52 21 5 57 19 35 97
+15 21 99 4 54 39 15 29 68 21 50 76 64 51 79 0 24 5 65 95 90 51 99 82 9 80 61 32 2 38
+46 97 53 96 51 84 18 42 30 52 82 77 72 59 1 67 72 16 14 63 70 94 20 27 38 70 86 95 41 75
+2 35 45 63 92 76 81 60 62 72 90 46 47 33 1 30 54 22 50 85 63 61 22 79 45 53 45 33 8 28
+43 41 14 79 2 77 95 16 74 19 17 78 47 12 68 55 3 2 77 10 35 86 52 33 47 26 98 42 48 86
+18 32 85 4 91 10 69 68 15 42 58 77 88 64 91 43 56 30 92 11 52 23 43 92 65 50 68 8 80 81
+20 57 38 44 62 10 80 25 32 11 70 32 13 50 41 55 44 0 28 83 5 1 34 94 55 52 56 24 76 21
+36 43 59 28 10 59 4 41 64 98 54 66 44 3 37 41 67 10 85 23 58 35 58 34 35 79 46 18 1 51
+72 63 85 51 23 91 3 56 35 72 38 26 91 0 68 98 27 10 12 71 30 1 14 47 47 88 17 68 78 46
+53 47 1 89 95 53 11 45 46 6 91 20 57 35 58 79 60 3 21 45 4 18 59 96 36 12 13 83 52 46
+33 91 82 24 97 28 50 43 65 22 14 44 32 57 33 10 34 77 58 6 27 90 26 77 62 81 87 96 0 32
+96 44 59 3 47 18 0 91 83 68 48 26 67 82 39 18 88 47 80 0 57 40 30 7 57 74 49 37 57 65
+18 44 0 46 47 30 65 79 53 8 26 42 80 76 30 61 82 93 78 25 89 49 55 15 86 63 35 74 41 11
+18 14 40 90 91 79 80 36 33 72 25 56 73 28 65 27 62 17 60 84 23 70 32 26 77 97 47 94 72 1
+82 36 68 10 83 83 40 42 51 55 82 6 37 69 93 82 64 13 54 30 45 36 87 59 1 80 39 93 11 61
+78 34 53 39 64 52 52 22 33 69 71 82 57 37 78 52 62 31 87 68 70 5 85 94 41 75 38 45 84 22
+36 23 51 15 61 76 88 85 36 96 21 60 34 61 72 60 69 81 5 17 16 82 30 61 39 96 40 70 42 71
+45 30 60 50 78 90 36 40 11 85 42 14 61 3 66 53 68 14 41 30 97 74 79 91 64 8 1 53 52 33
+55 24 35 4 49 51 44 70 93 78 25 65 1 29 96 12 93 94 13 65 4 47 84 10 90 12 36 48 21 36
+17 74 61 54 21 83 35 97 47 90 57 11 16 39 95 78 23 40 23 55 17 51 20 73 98 93 50 32 58 4
+84 76 78 33 50 29 11 20 5 93 63 22 91 92 44 85 62 25 63 92 36 26 57 33 8 74 69 64 78 91
+58 34 91 71 37 84 28 90 28 37 97 7 26 44 59 18 58 64 31 83 16 17 50 36 65 81 19 63 66 64
+20 71 1 35 87 5 47 27 6 95 86 75 74 9 94 93 26 5 61 3 97 88 0 57 21 64 46 24 86 12
+23 53 31 39 37 77 29 51 85 10 41 91 67 82 50 91 53 72 75 81 50 63 52 92 83 49 92 50 26 9
+38 43 13 87 11 45 28 16 27 61 70 52 77 9 57 42 73 22 32 95 23 91 93 63 16 44 26 9 93 83
+77 68 21 96 44 45 9 2 14 2 67 90 55 82 67 21 18 64 31 16 2 27 86 42 34 72 22 98 91 33
+89 66 87 76 0 32 81 39 55 76 23 56 51 53 75 79 30 86 1 66 64 14 46 84 92 19 95 47 77 97
+88 79 61 26 66 92 54 22 15 25 26 0 76 27 17 59 48 4 42 61 65 91 0 62 55 79 29 88 10 11
+24 89 91 39 56 36 16 86 41 31 14 35 7 71 77 74 33 11 49 7 96 83 31 63 90 49 96 22 58 86
+45 7 93 44 50 54 83 80 3 36 11 38 14 17 10 84 96 94 26 34 26 75 72 0 41 89 96 47 39 88
+0 95 2 22 68 38 0 3 51 6 13 10 14 49 75 69 25 39 63 67 12 80 37 77 10 90 60 35 84 37
+98 56 99 75 49 66 3 33 65 86 1 79 91 23 69 98 91 73 95 45 64 26 99 75 49 77 71 55 42 18
+80 39 26 94 85 42 91 27 14 57 36 34 10 44 38 77 23 39 54 25 32 5 17 9 66 3 67 94 20 11
+88 80 30 77 72 67 16 75 84 87 60 89 21 94 24 11 63 8 79 89 37 18 6 82 76 70 81 95 67 95
+92 36 55 55 43 18 76 94 30 74 95 38 45 95 54 87 22 57 4 65 15 90 90 38 73 24 67 24 36 25
+98 30 34 68 11 48 42 38 80 23 12 91 77 22 65 2 88 31 70 12 46 63 17 63 27 76 21 71 70 7
+76 29 56 12 41 66 22 96 8 6 7 13 27 10 77 90 2 76 30 24 81 88 19 16 93 13 30 24 98 96
+45 94 89 41 52 14 71 88 80 74 7 85 44 69 65 88 4 15 84 97 86 5 53 15 39 34 9 10 45 20
+95 47 45 96 71 10 36 10 90 49 7 68 14 46 97 89 82 58 69 34 93 77 90 9 27 91 29 27 22 17
+80 6 29 26 34 59 10 55 32 53 18 72 39 40 29 35 52 64 2 64 38 83 16 46 53 20 19 8 10 67
+47 44 79 32 58 82 26 69 0 26 4 73 95 98 61 96 20 38 3 92 6 5 25 24 42 49 15 92 80 16
+74 37 86 84 47 15 56 36 43 59 72 72 74 73 49 54 26 5 40 80 78 48 4 65 31 70 14 91 88 72
+91 45 73 62 83 40 49 3 27 79 80 90 3 3 58 44 7 66 77 42 37 25 20 91 47 63 71 7 72 22
+51 3 36 90 45 84 18 55 75 78 42 62 86 63 65 67 46 75 1 79 2 85 85 60 36 92 34 89 66 99
+36 99 0 63 89 65 54 58 52 28 98 27 67 1 45 71 35 52 55 55 44 23 46 89 83 37 8 2 92 75
+51 13 71 2 9 95 23 60 24 98 86 43 32 16 75 70 92 78 26 84 29 14 35 55 61 89 73 59 76 44
+59 57 28 92 33 50 70 94 89 67 70 38 53 16 35 70 35 92 39 78 88 80 71 1 93 21 87 64 49 84
+29 6 17 45 38 65 41 48 81 69 34 12 2 14 41 71 16 92 69 27 61 74 58 20 75 19 39 66 57 82
+12 8 14 85 97 31 58 31 20 76 6 42 29 95 60 94 15 84 86 69 73 52 73 57 12 66 89 65 60 84
+20 74 96 34 83 41 8 37 22 36 30 25 20 8 58 73 9 75 76 73 84 38 16 24 95 95 68 66 43 19
+33 15 25 80 48 69 63 39 16 45 6 77 14 46 38 15 64 85 49 5 59 28 9 4 23 68 59 26 1 75
+35 45 3 6 34 59 55 51 81 59 59 93 18 41 8 44 88 7 86 4 88 90 24 54 73 62 89 13 44 92
+72 60 68 83 39 32 30 15 98 92 69 94 51 48 9 0 4 1 30 92 40 1 61 82 66 4 39 10 93 87
+12 20 34 72 33 31 67 71 67 47 98 76 53 29 17 17 13 31 43 76 25 37 8 39 9 5 96 41 87 66
+96 30 2 57 57 10 14 17 86 76 35 94 42 54 18 24 19 34 12 42 18 11 83 65 86 38 45 17 60 70
+19 62 71 99 35 60 96 30 44 80 78 15 14 5 32 43 10 26 81 72 41 98 30 87 75 8 53 33 25 95
+22 0 38 57 88 7 47 83 49 41 52 1 14 93 41 3 18 42 15 57 28 74 97 2 18 48 64 25 77 69
+36 95 65 81 44 41 6 74 62 16 72 81 15 72 31 5 22 17 19 6 7 15 82 10 31 93 11 45 41 11
+22 76 14 62 34 65 82 5 57 51 51 5 1 6 17 43 28 31 90 99 48 14 96 49 95 40 87 85 40 51
+95 13 99 46 52 80 4 18 95 94 0 46 10 80 3 34 60 15 86 10 28 59 6 35 14 93 18 8 3 65
+57 37 6 31 45 85 42 34 47 92 48 40 7 17 5 74 67 62 0 74 58 21 23 3 5 24 50 54 99 19
+24 14 10 4 36 33 88 51 40 66 40 56 65 23 43 13 82 62 27 88 89 91 36 37 19 11 50 39 96 68
+82 7 39 80 52 90 57 17 61 15 51 71 82 15 21 44 4 46 75 50 78 18 63 75 98 45 6 16 57 25
+0 26 56 74 62 84 71 42 25 86 68 10 73 0 71 6 15 99 1 51 45 42 5 49 3 35 84 29 15 36
+60 78 76 3 95 73 36 57 35 44 50 42 85 57 18 69 37 42 75 79 15 12 74 72 51 36 79 3 58 71
+69 24 16 96 17 25 21 94 71 78 74 39 7 96 3 12 13 16 7 99 65 72 12 28 75 44 55 8 75 67
+3 13 92 9 92 83 69 91 65 92 29 63 46 1 4 62 29 85 47 93 81 3 15 23 63 50 17 9 13 13
+9 18 46 53 0 86 10 41 87 89 24 25 70 73 8 23 27 76 66 46 58 39 28 1 99 64 59 13 7 68
+72 57 90 50 47 57 34 27 94 39 23 31 74 77 45 74 18 49 96 8 95 50 20 81 73 55 72 2 32 15
+87 77 74 5 99 86 5 65 97 39 17 74 48 87 20 66 28 2 18 58 49 22 79 23 36 30 64 20 71 32
+35 43 66 96 63 77 18 90 47 86 94 19 88 79 23 12 38 4 56 42 36 2 77 1 3 17 64 52 31 24
+80 2 4 39 61 60 74 83 28 28 61 10 71 82 44 29 55 30 1 58 81 79 34 41 85 82 84 55 22 12
+76 77 58 92 90 0 54 28 77 68 58 12 1 81 37 28 19 60 71 59 25 83 8 49 52 11 28 65 59 70
+14 1 92 90 5 48 28 78 1 42 54 43 60 83 72 19 28 33 12 52 18 15 56 95 39 33 37 70 53 23
+53 76 26 31 18 81 83 79 25 1 82 43 50 24 63 49 5 23 66 37 80 41 63 77 2 28 15 21 32 93
+80 41 81 7 37 95 19 42 57 30 12 25 29 34 41 45 87 8 20 95 63 16 99 55 16 61 16 36 81 25
+32 30 2 81 23 25 88 30 37 76 52 77 79 58 21 58 10 0 13 32 72 80 3 75 75 25 21 9 79 18
+26 13 36 63 43 2 50 41 65 18 88 44 82 75 73 24 1 30 54 68 15 18 22 50 41 99 27 96 51 53
+22 4 76 11 85 88 28 75 1 2 92 66 63 3 58 43 53 5 1 24 99 90 87 87 41 1 85 37 98 92
+16 39 13 88 60 55 35 11 34 23 23 85 79 41 79 87 65 78 47 83 88 78 35 84 30 61 37 58 25 55
+27 33 15 76 82 79 73 92 93 78 18 38 22 96 63 92 41 9 50 96 14 55 8 60 15 61 97 56 43 22
+42 34 94 11 35 70 50 49 36 34 59 14 87 84 88 83 4 69 29 99 35 24 2 18 97 97 74 88 91 49
+33 25 71 12 60 2 48 22 81 33 27 95 54 25 53 14 20 43 26 96 98 37 64 27 72 33 78 45 22 61
+61 21 91 38 92 47 26 90 78 96 58 41 21 72 81 61 55 9 55 60 28 25 25 74 73 81 64 16 49 39
+90 89 12 93 91 23 82 36 63 58 73 81 49 32 60 39 4 84 73 16 18 26 58 85 46 28 82 91 72 7
+79 41 28 76 33 70 47 6 18 64 40 54 45 61 28 63 87 83 38 9 65 68 62 45 80 63 89 29 20 40
+20 59 58 23 61 79 35 19 78 2 26 48 90 34 69 31 31 42 92 33 18 74 28 47 45 52 36 89 19 40
+58 13 72 24 31 26 73 72 84 29 85 99 20 32 54 92 8 80 86 58 23 80 59 21 76 75 90 76 92 57
+74 53 80 51 8 88 84 63 82 99 97 77 38 9 51 61 37 20 68 47 65 21 53 82 85 96 62 65 35 4
+71 82 14 18 88 79 38 76 66 27 10 10 62 54 80 21 6 57 83 33 52 10 97 37 6 38 12 51 0 84
+95 30 75 92 84 30 55 57 32 44 53 24 77 81 34 84 69 85 91 33 50 72 62 79 62 12 59 75 99 81
+38 42 47 1 11 34 27 77 70 85 89 84 79 15 14 54 78 93 72 68 63 39 98 72 55 32 93 0 13 21
+3 15 10 15 3 31 84 89 53 5 60 41 66 77 45 12 68 68 50 68 99 64 46 54 30 56 2 90 99 78
+66 10 27 89 42 16 9 98 16 2 68 51 0 22 73 60 69 96 37 69 30 36 20 21 51 26 65 13 74 86
+94 58 34 97 77 88 90 75 47 30 6 36 89 66 48 9 20 6 52 45 0 37 99 46 11 53 53 72 94 40
+5 71 50 96 89 71 80 43 27 95 49 9 74 28 62 65 64 97 2 55 58 11 69 0 31 22 73 20 66 11
+63 39 84 62 64 5 56 92 26 86 19 20 56 85 42 48 56 51 54 29 26 95 72 38 70 61 16 54 57 19
+76 97 40 99 73 68 98 92 97 62 73 1 29 72 18 70 90 4 98 95 70 36 65 45 86 36 88 38 64 54
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/selfdual-4d-polytope.txt b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/selfdual-4d-polytope.txt
new file mode 100644
index 00000000..47ce4a7a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/data/selfdual-4d-polytope.txt
@@ -0,0 +1,27 @@
+# The facets of a self-dual 4-dim regular polytope
+# with 24 octahedron facets. Taken from cddlib.
+# Format b + Ax >= 0
+ 1  1  1  1  1
+ 1  1  1  1 -1
+ 1  1  1 -1  1
+ 1  1  1 -1 -1
+ 1  1 -1  1  1
+ 1  1 -1  1 -1
+ 1  1 -1 -1  1
+ 1  1 -1 -1 -1
+ 1 -1  1  1  1
+ 1 -1  1  1 -1
+ 1 -1  1 -1  1
+ 1 -1  1 -1 -1
+ 1 -1 -1  1  1
+ 1 -1 -1  1 -1
+ 1 -1 -1 -1  1
+ 1 -1 -1 -1 -1
+ 1  2  0  0  0
+ 1  0  2  0  0
+ 1  0  0  2  0
+ 1  0  0  0  2
+ 1 -2  0  0  0
+ 1  0 -2  0  0
+ 1  0  0 -2  0
+ 1  0  0  0 -2
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__plotutils.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__plotutils.py
new file mode 100644
index 00000000..c85d2187
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__plotutils.py
@@ -0,0 +1,54 @@
+import pytest
+from numpy.testing import assert_, assert_array_equal, suppress_warnings
+try:
+    import matplotlib
+    matplotlib.rcParams['backend'] = 'Agg'
+    import matplotlib.pyplot as plt
+    has_matplotlib = True
+except Exception:
+    has_matplotlib = False
+
+from scipy.spatial import \
+     delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
+     Delaunay, Voronoi, ConvexHull
+
+
+@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available")
+class TestPlotting:
+    points = [(0,0), (0,1), (1,0), (1,1)]
+
+    def test_delaunay(self):
+        # Smoke test
+        fig = plt.figure()
+        obj = Delaunay(self.points)
+        s_before = obj.simplices.copy()
+        with suppress_warnings() as sup:
+            # filter can be removed when matplotlib 1.x is dropped
+            sup.filter(message="The ishold function was deprecated in version")
+            r = delaunay_plot_2d(obj, ax=fig.gca())
+        assert_array_equal(obj.simplices, s_before)  # shouldn't modify
+        assert_(r is fig)
+        delaunay_plot_2d(obj, ax=fig.gca())
+
+    def test_voronoi(self):
+        # Smoke test
+        fig = plt.figure()
+        obj = Voronoi(self.points)
+        with suppress_warnings() as sup:
+            # filter can be removed when matplotlib 1.x is dropped
+            sup.filter(message="The ishold function was deprecated in version")
+            r = voronoi_plot_2d(obj, ax=fig.gca())
+        assert_(r is fig)
+        voronoi_plot_2d(obj)
+        voronoi_plot_2d(obj, show_vertices=False)
+
+    def test_convex_hull(self):
+        # Smoke test
+        fig = plt.figure()
+        tri = ConvexHull(self.points)
+        with suppress_warnings() as sup:
+            # filter can be removed when matplotlib 1.x is dropped
+            sup.filter(message="The ishold function was deprecated in version")
+            r = convex_hull_plot_2d(tri, ax=fig.gca())
+        assert_(r is fig)
+        convex_hull_plot_2d(tri)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__procrustes.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__procrustes.py
new file mode 100644
index 00000000..42a3c4d3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test__procrustes.py
@@ -0,0 +1,116 @@
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, assert_almost_equal
+from pytest import raises as assert_raises
+
+from scipy.spatial import procrustes
+
+
+class TestProcrustes:
+    def setup_method(self):
+        """creates inputs"""
+        # an L
+        self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
+
+        # a larger, shifted, mirrored L
+        self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
+
+        # an L shifted up 1, right 1, and with point 4 shifted an extra .5
+        # to the right
+        # pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
+        self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
+
+        # data4, data5 are standardized (trace(A*A') = 1).
+        # procrustes should return an identical copy if they are used
+        # as the first matrix argument.
+        shiftangle = np.pi / 8
+        self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
+                              [0, -1]], 'd') / np.sqrt(4)
+        self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
+                              [np.cos(np.pi / 2 - shiftangle),
+                               np.sin(np.pi / 2 - shiftangle)],
+                              [-np.cos(shiftangle),
+                               -np.sin(shiftangle)],
+                              [-np.cos(np.pi / 2 - shiftangle),
+                               -np.sin(np.pi / 2 - shiftangle)]],
+                              'd') / np.sqrt(4)
+
+    def test_procrustes(self):
+        # tests procrustes' ability to match two matrices.
+        #
+        # the second matrix is a rotated, shifted, scaled, and mirrored version
+        # of the first, in two dimensions only
+        #
+        # can shift, mirror, and scale an 'L'?
+        a, b, disparity = procrustes(self.data1, self.data2)
+        assert_allclose(b, a)
+        assert_almost_equal(disparity, 0.)
+
+        # if first mtx is standardized, leaves first mtx unchanged?
+        m4, m5, disp45 = procrustes(self.data4, self.data5)
+        assert_equal(m4, self.data4)
+
+        # at worst, data3 is an 'L' with one point off by .5
+        m1, m3, disp13 = procrustes(self.data1, self.data3)
+        #assert_(disp13 < 0.5 ** 2)
+
+    def test_procrustes2(self):
+        # procrustes disparity should not depend on order of matrices
+        m1, m3, disp13 = procrustes(self.data1, self.data3)
+        m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
+        assert_almost_equal(disp13, disp31)
+
+        # try with 3d, 8 pts per
+        rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
+                         [0.41124708, -0.03966978, -0.31854548],
+                         [0.91910318, 1.39451809, -0.15295084],
+                         [2.00452023, 0.50150048, 0.29485268],
+                         [0.09453595, 0.67528885, 0.03283872],
+                         [0.07015232, 2.18892599, -1.67266852],
+                         [0.65029688, 1.60551637, 0.80013549],
+                         [-0.6607528, 0.53644208, 0.17033891]])
+
+        rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
+                         [-1.84888465, -0.92589646, -1.29335743],
+                         [0.67031855, -1.35957463, 0.41938621],
+                         [0.73967209, -0.20230757, 0.52418027],
+                         [0.17752796, 0.09065607, 0.29827466],
+                         [0.47999368, -0.88455717, -0.57547934],
+                         [-0.11486344, -0.12608506, -0.3395779],
+                         [-0.86106154, -0.28687488, 0.9644429]])
+        res1, res3, disp13 = procrustes(rand1, rand3)
+        res3_2, res1_2, disp31 = procrustes(rand3, rand1)
+        assert_almost_equal(disp13, disp31)
+
+    def test_procrustes_shape_mismatch(self):
+        assert_raises(ValueError, procrustes,
+                      np.array([[1, 2], [3, 4]]),
+                      np.array([[5, 6, 7], [8, 9, 10]]))
+
+    def test_procrustes_empty_rows_or_cols(self):
+        empty = np.array([[]])
+        assert_raises(ValueError, procrustes, empty, empty)
+
+    def test_procrustes_no_variation(self):
+        assert_raises(ValueError, procrustes,
+                      np.array([[42, 42], [42, 42]]),
+                      np.array([[45, 45], [45, 45]]))
+
+    def test_procrustes_bad_number_of_dimensions(self):
+        # fewer dimensions in one dataset
+        assert_raises(ValueError, procrustes,
+                      np.array([1, 1, 2, 3, 5, 8]),
+                      np.array([[1, 2], [3, 4]]))
+
+        # fewer dimensions in both datasets
+        assert_raises(ValueError, procrustes,
+                      np.array([1, 1, 2, 3, 5, 8]),
+                      np.array([1, 1, 2, 3, 5, 8]))
+
+        # zero dimensions
+        assert_raises(ValueError, procrustes, np.array(7), np.array(11))
+
+        # extra dimensions
+        assert_raises(ValueError, procrustes,
+                      np.array([[[11], [7]]]),
+                      np.array([[[5, 13]]]))
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_distance.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_distance.py
new file mode 100644
index 00000000..42152c2d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_distance.py
@@ -0,0 +1,2186 @@
+#
+# Author: Damian Eads
+# Date: April 17, 2008
+#
+# Copyright (C) 2008 Damian Eads
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#
+# 3. The name of the author may not be used to endorse or promote
+#    products derived from this software without specific prior
+#    written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import os.path
+
+from functools import wraps, partial
+import weakref
+
+import numpy as np
+import warnings
+from numpy.linalg import norm
+from numpy.testing import (verbose, assert_,
+                           assert_array_equal, assert_equal,
+                           assert_almost_equal, assert_allclose,
+                           break_cycles, IS_PYPY)
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.spatial.distance import (
+    squareform, pdist, cdist, num_obs_y, num_obs_dm, is_valid_dm, is_valid_y,
+    _validate_vector, _METRICS_NAMES)
+
+# these were missing: chebyshev cityblock kulsinski
+# jensenshannon  and seuclidean are referenced by string name.
+from scipy.spatial.distance import (braycurtis, canberra, chebyshev, cityblock,
+                                    correlation, cosine, dice, euclidean,
+                                    hamming, jaccard, jensenshannon,
+                                    kulsinski, kulczynski1, mahalanobis,
+                                    minkowski, rogerstanimoto,
+                                    russellrao, seuclidean, sokalmichener,
+                                    sokalsneath, sqeuclidean, yule)
+
+_filenames = [
+              "cdist-X1.txt",
+              "cdist-X2.txt",
+              "iris.txt",
+              "pdist-boolean-inp.txt",
+              "pdist-chebyshev-ml-iris.txt",
+              "pdist-chebyshev-ml.txt",
+              "pdist-cityblock-ml-iris.txt",
+              "pdist-cityblock-ml.txt",
+              "pdist-correlation-ml-iris.txt",
+              "pdist-correlation-ml.txt",
+              "pdist-cosine-ml-iris.txt",
+              "pdist-cosine-ml.txt",
+              "pdist-double-inp.txt",
+              "pdist-euclidean-ml-iris.txt",
+              "pdist-euclidean-ml.txt",
+              "pdist-hamming-ml.txt",
+              "pdist-jaccard-ml.txt",
+              "pdist-jensenshannon-ml-iris.txt",
+              "pdist-jensenshannon-ml.txt",
+              "pdist-minkowski-3.2-ml-iris.txt",
+              "pdist-minkowski-3.2-ml.txt",
+              "pdist-minkowski-5.8-ml-iris.txt",
+              "pdist-seuclidean-ml-iris.txt",
+              "pdist-seuclidean-ml.txt",
+              "pdist-spearman-ml.txt",
+              "random-bool-data.txt",
+              "random-double-data.txt",
+              "random-int-data.txt",
+              "random-uint-data.txt",
+              ]
+
+_tdist = np.array([[0, 662, 877, 255, 412, 996],
+                      [662, 0, 295, 468, 268, 400],
+                      [877, 295, 0, 754, 564, 138],
+                      [255, 468, 754, 0, 219, 869],
+                      [412, 268, 564, 219, 0, 669],
+                      [996, 400, 138, 869, 669, 0]], dtype='double')
+
+_ytdist = squareform(_tdist)
+
+# A hashmap of expected output arrays for the tests. These arrays
+# come from a list of text files, which are read prior to testing.
+# Each test loads inputs and outputs from this dictionary.
+eo = {}
+
+
+def load_testing_files():
+    for fn in _filenames:
+        name = fn.replace(".txt", "").replace("-ml", "")
+        fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)
+        fp = open(fqfn)
+        eo[name] = np.loadtxt(fp)
+        fp.close()
+    eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
+    eo['random-bool-data'] = np.bool_(eo['random-bool-data'])
+    eo['random-float32-data'] = np.float32(eo['random-double-data'])
+    eo['random-int-data'] = np.int_(eo['random-int-data'])
+    eo['random-uint-data'] = np.uint(eo['random-uint-data'])
+
+
+load_testing_files()
+
+
+def _is_32bit():
+    return np.intp(0).itemsize < 8
+
+
+def _chk_asarrays(arrays, axis=None):
+    arrays = [np.asanyarray(a) for a in arrays]
+    if axis is None:
+        # np < 1.10 ravel removes subclass from arrays
+        arrays = [np.ravel(a) if a.ndim != 1 else a
+                  for a in arrays]
+        axis = 0
+    arrays = tuple(np.atleast_1d(a) for a in arrays)
+    if axis < 0:
+        if not all(a.ndim == arrays[0].ndim for a in arrays):
+            raise ValueError("array ndim must be the same for neg axis")
+        axis = range(arrays[0].ndim)[axis]
+    return arrays + (axis,)
+
+
+def _chk_weights(arrays, weights=None, axis=None,
+                 force_weights=False, simplify_weights=True,
+                 pos_only=False, neg_check=False,
+                 nan_screen=False, mask_screen=False,
+                 ddof=None):
+    chked = _chk_asarrays(arrays, axis=axis)
+    arrays, axis = chked[:-1], chked[-1]
+
+    simplify_weights = simplify_weights and not force_weights
+    if not force_weights and mask_screen:
+        force_weights = any(np.ma.getmask(a) is not np.ma.nomask for a in arrays)
+
+    if nan_screen:
+        has_nans = [np.isnan(np.sum(a)) for a in arrays]
+        if any(has_nans):
+            mask_screen = True
+            force_weights = True
+            arrays = tuple(np.ma.masked_invalid(a) if has_nan else a
+                           for a, has_nan in zip(arrays, has_nans))
+
+    if weights is not None:
+        weights = np.asanyarray(weights)
+    elif force_weights:
+        weights = np.ones(arrays[0].shape[axis])
+    else:
+        return arrays + (weights, axis)
+
+    if ddof:
+        weights = _freq_weights(weights)
+
+    if mask_screen:
+        weights = _weight_masked(arrays, weights, axis)
+
+    if not all(weights.shape == (a.shape[axis],) for a in arrays):
+        raise ValueError("weights shape must match arrays along axis")
+    if neg_check and (weights < 0).any():
+        raise ValueError("weights cannot be negative")
+
+    if pos_only:
+        pos_weights = np.nonzero(weights > 0)[0]
+        if pos_weights.size < weights.size:
+            arrays = tuple(np.take(a, pos_weights, axis=axis) for a in arrays)
+            weights = weights[pos_weights]
+    if simplify_weights and (weights == 1).all():
+        weights = None
+    return arrays + (weights, axis)
+
+
+def _freq_weights(weights):
+    if weights is None:
+        return weights
+    int_weights = weights.astype(int)
+    if (weights != int_weights).any():
+        raise ValueError("frequency (integer count-type) weights required %s" % weights)
+    return int_weights
+
+
+def _weight_masked(arrays, weights, axis):
+    if axis is None:
+        axis = 0
+    weights = np.asanyarray(weights)
+    for a in arrays:
+        axis_mask = np.ma.getmask(a)
+        if axis_mask is np.ma.nomask:
+            continue
+        if a.ndim > 1:
+            not_axes = tuple(i for i in range(a.ndim) if i != axis)
+            axis_mask = axis_mask.any(axis=not_axes)
+        weights *= 1 - axis_mask.astype(int)
+    return weights
+
+
+def _rand_split(arrays, weights, axis, split_per, seed=None):
+    # Coerce `arrays` to float64 if integer, to avoid nan-to-integer issues
+    arrays = [arr.astype(np.float64) if np.issubdtype(arr.dtype, np.integer)
+              else arr for arr in arrays]
+
+    # inverse operation for stats.collapse_weights
+    weights = np.array(weights, dtype=np.float64)  # modified inplace; need a copy
+    seeded_rand = np.random.RandomState(seed)
+
+    def mytake(a, ix, axis):
+        record = np.asanyarray(np.take(a, ix, axis=axis))
+        return record.reshape([a.shape[i] if i != axis else 1
+                               for i in range(a.ndim)])
+
+    n_obs = arrays[0].shape[axis]
+    assert all(a.shape[axis] == n_obs for a in arrays), "data must be aligned on sample axis"
+    for i in range(int(split_per) * n_obs):
+        split_ix = seeded_rand.randint(n_obs + i)
+        prev_w = weights[split_ix]
+        q = seeded_rand.rand()
+        weights[split_ix] = q * prev_w
+        weights = np.append(weights, (1. - q) * prev_w)
+        arrays = [np.append(a, mytake(a, split_ix, axis=axis),
+                            axis=axis) for a in arrays]
+    return arrays, weights
+
+
+def _rough_check(a, b, compare_assert=partial(assert_allclose, atol=1e-5),
+                  key=lambda x: x, w=None):
+    check_a = key(a)
+    check_b = key(b)
+    try:
+        if np.array(check_a != check_b).any():  # try strict equality for string types
+            compare_assert(check_a, check_b)
+    except AttributeError:  # masked array
+        compare_assert(check_a, check_b)
+    except (TypeError, ValueError):  # nested data structure
+        for a_i, b_i in zip(check_a, check_b):
+            _rough_check(a_i, b_i, compare_assert=compare_assert)
+
+# diff from test_stats:
+#  n_args=2, weight_arg='w', default_axis=None
+#  ma_safe = False, nan_safe = False
+def _weight_checked(fn, n_args=2, default_axis=None, key=lambda x: x, weight_arg='w',
+                    squeeze=True, silent=False,
+                    ones_test=True, const_test=True, dup_test=True,
+                    split_test=True, dud_test=True, ma_safe=False, ma_very_safe=False, nan_safe=False,
+                    split_per=1.0, seed=0, compare_assert=partial(assert_allclose, atol=1e-5)):
+    """runs fn on its arguments 2 or 3 ways, checks that the results are the same,
+       then returns the same thing it would have returned before"""
+    @wraps(fn)
+    def wrapped(*args, **kwargs):
+        result = fn(*args, **kwargs)
+
+        arrays = args[:n_args]
+        rest = args[n_args:]
+        weights = kwargs.get(weight_arg, None)
+        axis = kwargs.get('axis', default_axis)
+
+        chked = _chk_weights(arrays, weights=weights, axis=axis, force_weights=True, mask_screen=True)
+        arrays, weights, axis = chked[:-2], chked[-2], chked[-1]
+        if squeeze:
+            arrays = [np.atleast_1d(a.squeeze()) for a in arrays]
+
+        try:
+            # WEIGHTS CHECK 1: EQUAL WEIGHTED OBESERVATIONS
+            args = tuple(arrays) + rest
+            if ones_test:
+                kwargs[weight_arg] = weights
+                _rough_check(result, fn(*args, **kwargs), key=key)
+            if const_test:
+                kwargs[weight_arg] = weights * 101.0
+                _rough_check(result, fn(*args, **kwargs), key=key)
+                kwargs[weight_arg] = weights * 0.101
+                try:
+                    _rough_check(result, fn(*args, **kwargs), key=key)
+                except Exception as e:
+                    raise type(e)((e, arrays, weights)) from e
+
+            # WEIGHTS CHECK 2: ADDL 0-WEIGHTED OBS
+            if dud_test:
+                # add randomly resampled rows, weighted at 0
+                dud_arrays, dud_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)
+                dud_weights[:weights.size] = weights  # not exactly 1 because of masked arrays
+                dud_weights[weights.size:] = 0
+                dud_args = tuple(dud_arrays) + rest
+                kwargs[weight_arg] = dud_weights
+                _rough_check(result, fn(*dud_args, **kwargs), key=key)
+                # increase the value of those 0-weighted rows
+                for a in dud_arrays:
+                    indexer = [slice(None)] * a.ndim
+                    indexer[axis] = slice(weights.size, None)
+                    indexer = tuple(indexer)
+                    a[indexer] = a[indexer] * 101
+                dud_args = tuple(dud_arrays) + rest
+                _rough_check(result, fn(*dud_args, **kwargs), key=key)
+                # set those 0-weighted rows to NaNs
+                for a in dud_arrays:
+                    indexer = [slice(None)] * a.ndim
+                    indexer[axis] = slice(weights.size, None)
+                    indexer = tuple(indexer)
+                    a[indexer] = a[indexer] * np.nan
+                if kwargs.get("nan_policy", None) == "omit" and nan_safe:
+                    dud_args = tuple(dud_arrays) + rest
+                    _rough_check(result, fn(*dud_args, **kwargs), key=key)
+                # mask out those nan values
+                if ma_safe:
+                    dud_arrays = [np.ma.masked_invalid(a) for a in dud_arrays]
+                    dud_args = tuple(dud_arrays) + rest
+                    _rough_check(result, fn(*dud_args, **kwargs), key=key)
+                    if ma_very_safe:
+                        kwargs[weight_arg] = None
+                        _rough_check(result, fn(*dud_args, **kwargs), key=key)
+                del dud_arrays, dud_args, dud_weights
+
+            # WEIGHTS CHECK 3: DUPLICATE DATA (DUMB SPLITTING)
+            if dup_test:
+                dup_arrays = [np.append(a, a, axis=axis) for a in arrays]
+                dup_weights = np.append(weights, weights) / 2.0
+                dup_args = tuple(dup_arrays) + rest
+                kwargs[weight_arg] = dup_weights
+                _rough_check(result, fn(*dup_args, **kwargs), key=key)
+                del dup_args, dup_arrays, dup_weights
+
+            # WEIGHT CHECK 3: RANDOM SPLITTING
+            if split_test and split_per > 0:
+                split_arrays, split_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)
+                split_args = tuple(split_arrays) + rest
+                kwargs[weight_arg] = split_weights
+                _rough_check(result, fn(*split_args, **kwargs), key=key)
+        except NotImplementedError as e:
+            # when some combination of arguments makes weighting impossible,
+            #  this is the desired response
+            if not silent:
+                warnings.warn("%s NotImplemented weights: %s" % (fn.__name__, e))
+        return result
+    return wrapped
+
+
+wcdist = _weight_checked(cdist, default_axis=1, squeeze=False)
+wcdist_no_const = _weight_checked(cdist, default_axis=1, squeeze=False, const_test=False)
+wpdist = _weight_checked(pdist, default_axis=1, squeeze=False, n_args=1)
+wpdist_no_const = _weight_checked(pdist, default_axis=1, squeeze=False, const_test=False, n_args=1)
+wrogerstanimoto = _weight_checked(rogerstanimoto)
+wmatching = whamming = _weight_checked(hamming, dud_test=False)
+wyule = _weight_checked(yule)
+wdice = _weight_checked(dice)
+wcityblock = _weight_checked(cityblock)
+wchebyshev = _weight_checked(chebyshev)
+wcosine = _weight_checked(cosine)
+wcorrelation = _weight_checked(correlation)
+wkulsinski = _weight_checked(kulsinski)
+wkulczynski1 = _weight_checked(kulczynski1)
+wjaccard = _weight_checked(jaccard)
+weuclidean = _weight_checked(euclidean, const_test=False)
+wsqeuclidean = _weight_checked(sqeuclidean, const_test=False)
+wbraycurtis = _weight_checked(braycurtis)
+wcanberra = _weight_checked(canberra, const_test=False)
+wsokalsneath = _weight_checked(sokalsneath)
+wsokalmichener = _weight_checked(sokalmichener)
+wrussellrao = _weight_checked(russellrao)
+
+
+class TestCdist:
+
+    def setup_method(self):
+        self.rnd_eo_names = ['random-float32-data', 'random-int-data',
+                             'random-uint-data', 'random-double-data',
+                             'random-bool-data']
+        self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],
+                              'uint': [np.int_, np.float32, np.double],
+                              'int': [np.float32, np.double],
+                              'float32': [np.double]}
+
+    def test_cdist_extra_args(self):
+        # Tests that args and kwargs are correctly handled
+        def _my_metric(x, y, arg, kwarg=1, kwarg2=2):
+            return arg + kwarg + kwarg2
+
+        X1 = [[1., 2., 3.], [1.2, 2.3, 3.4], [2.2, 2.3, 4.4]]
+        X2 = [[7., 5., 8.], [7.5, 5.8, 8.4], [5.5, 5.8, 4.4]]
+        kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(3)}
+        args = [3.14] * 200
+        for metric in _METRICS_NAMES:
+            with np.testing.suppress_warnings() as sup:
+                if metric == "kulsinski":
+                    sup.filter(DeprecationWarning,
+                               "Kulsinski has been deprecated from")
+                assert_raises(TypeError, cdist, X1, X2,
+                              metric=metric, **kwargs)
+                assert_raises(TypeError, cdist, X1, X2,
+                              metric=eval(metric), **kwargs)
+                assert_raises(TypeError, cdist, X1, X2,
+                              metric="test_" + metric, **kwargs)
+                assert_raises(TypeError, cdist, X1, X2,
+                              metric=metric, *args)
+                assert_raises(TypeError, cdist, X1, X2,
+                              metric=eval(metric), *args)
+                assert_raises(TypeError, cdist, X1, X2,
+                              metric="test_" + metric, *args)
+
+        assert_raises(TypeError, cdist, X1, X2, _my_metric)
+        assert_raises(TypeError, cdist, X1, X2, _my_metric, *args)
+        assert_raises(TypeError, cdist, X1, X2, _my_metric, **kwargs)
+        assert_raises(TypeError, cdist, X1, X2, _my_metric,
+                      kwarg=2.2, kwarg2=3.3)
+        assert_raises(TypeError, cdist, X1, X2, _my_metric, 1, 2, kwarg=2.2)
+
+        assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2, 3.3)
+        assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2)
+        assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1)
+        assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1,
+                      kwarg=2.2, kwarg2=3.3)
+
+        # this should work
+        assert_allclose(cdist(X1, X2, metric=_my_metric,
+                              arg=1.1, kwarg2=3.3), 5.4)
+
+    def test_cdist_euclidean_random_unicode(self):
+        eps = 1e-15
+        X1 = eo['cdist-X1']
+        X2 = eo['cdist-X2']
+        Y1 = wcdist_no_const(X1, X2, 'euclidean')
+        Y2 = wcdist_no_const(X1, X2, 'test_euclidean')
+        assert_allclose(Y1, Y2, rtol=eps, verbose=verbose > 2)
+
+    @pytest.mark.parametrize("p", [0.1, 0.25, 1.0, 1.23,
+                                   2.0, 3.8, 4.6, np.inf])
+    def test_cdist_minkowski_random(self, p):
+        eps = 1e-13
+        X1 = eo['cdist-X1']
+        X2 = eo['cdist-X2']
+        Y1 = wcdist_no_const(X1, X2, 'minkowski', p=p)
+        Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=p)
+        assert_allclose(Y1, Y2, atol=0, rtol=eps, verbose=verbose > 2)
+
+    def test_cdist_cosine_random(self):
+        eps = 1e-14
+        X1 = eo['cdist-X1']
+        X2 = eo['cdist-X2']
+        Y1 = wcdist(X1, X2, 'cosine')
+
+        # Naive implementation
+        def norms(X):
+            return np.linalg.norm(X, axis=1).reshape(-1, 1)
+
+        Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T)
+
+        assert_allclose(Y1, Y2, rtol=eps, verbose=verbose > 2)
+
+    def test_cdist_mahalanobis(self):
+        # 1-dimensional observations
+        x1 = np.array([[2], [3]])
+        x2 = np.array([[2], [5]])
+        dist = cdist(x1, x2, metric='mahalanobis')
+        assert_allclose(dist, [[0.0, np.sqrt(4.5)], [np.sqrt(0.5), np.sqrt(2)]])
+
+        # 2-dimensional observations
+        x1 = np.array([[0, 0], [-1, 0]])
+        x2 = np.array([[0, 2], [1, 0], [0, -2]])
+        dist = cdist(x1, x2, metric='mahalanobis')
+        rt2 = np.sqrt(2)
+        assert_allclose(dist, [[rt2, rt2, rt2], [2, 2 * rt2, 2]])
+
+        # Too few observations
+        assert_raises(ValueError,
+                      cdist, [[0, 1]], [[2, 3]], metric='mahalanobis')
+
+    def test_cdist_custom_notdouble(self):
+        class myclass:
+            pass
+
+        def _my_metric(x, y):
+            if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):
+                raise ValueError("Type has been changed")
+            return 1.123
+        data = np.array([[myclass()]], dtype=object)
+        cdist_y = cdist(data, data, metric=_my_metric)
+        right_y = 1.123
+        assert_equal(cdist_y, right_y, verbose=verbose > 2)
+
+    def _check_calling_conventions(self, X1, X2, metric, eps=1e-07, **kwargs):
+        # helper function for test_cdist_calling_conventions
+        try:
+            y1 = cdist(X1, X2, metric=metric, **kwargs)
+            y2 = cdist(X1, X2, metric=eval(metric), **kwargs)
+            y3 = cdist(X1, X2, metric="test_" + metric, **kwargs)
+        except Exception as e:
+            e_cls = e.__class__
+            if verbose > 2:
+                print(e_cls.__name__)
+                print(e)
+            assert_raises(e_cls, cdist, X1, X2, metric=metric, **kwargs)
+            assert_raises(e_cls, cdist, X1, X2, metric=eval(metric), **kwargs)
+            assert_raises(e_cls, cdist, X1, X2, metric="test_" + metric, **kwargs)
+        else:
+            assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
+            assert_allclose(y1, y3, rtol=eps, verbose=verbose > 2)
+
+    def test_cdist_calling_conventions(self):
+        # Ensures that specifying the metric with a str or scipy function
+        # gives the same behaviour (i.e. same result or same exception).
+        # NOTE: The correctness should be checked within each metric tests.
+        for eo_name in self.rnd_eo_names:
+            # subsampling input data to speed-up tests
+            # NOTE: num samples needs to be > than dimensions for mahalanobis
+            X1 = eo[eo_name][::5, ::-2]
+            X2 = eo[eo_name][1::5, ::2]
+            for metric in _METRICS_NAMES:
+                if verbose > 2:
+                    print("testing: ", metric, " with: ", eo_name)
+                if metric in {'dice', 'yule', 'kulsinski',
+                              'rogerstanimoto',
+                              'russellrao', 'sokalmichener',
+                              'sokalsneath',
+                              'kulczynski1'} and 'bool' not in eo_name:
+                    # python version permits non-bools e.g. for fuzzy logic
+                    continue
+                with np.testing.suppress_warnings() as sup:
+                    if metric == "kulsinski":
+                        sup.filter(DeprecationWarning,
+                                   "Kulsinski has been deprecated from")
+                    self._check_calling_conventions(X1, X2, metric)
+
+                # Testing built-in metrics with extra args
+                if metric == "seuclidean":
+                    X12 = np.vstack([X1, X2]).astype(np.double)
+                    V = np.var(X12, axis=0, ddof=1)
+                    self._check_calling_conventions(X1, X2, metric, V=V)
+                elif metric == "mahalanobis":
+                    X12 = np.vstack([X1, X2]).astype(np.double)
+                    V = np.atleast_2d(np.cov(X12.T))
+                    VI = np.array(np.linalg.inv(V).T)
+                    self._check_calling_conventions(X1, X2, metric, VI=VI)
+
+    def test_cdist_dtype_equivalence(self):
+        # Tests that the result is not affected by type up-casting
+        eps = 1e-07
+        tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),
+                 (eo['random-uint-data'], self.valid_upcasts['uint']),
+                 (eo['random-int-data'], self.valid_upcasts['int']),
+                 (eo['random-float32-data'], self.valid_upcasts['float32'])]
+        for metric in _METRICS_NAMES:
+            for test in tests:
+                X1 = test[0][::5, ::-2]
+                X2 = test[0][1::5, ::2]
+                try:
+                    y1 = cdist(X1, X2, metric=metric)
+                except Exception as e:
+                    e_cls = e.__class__
+                    if verbose > 2:
+                        print(e_cls.__name__)
+                        print(e)
+                    for new_type in test[1]:
+                        X1new = new_type(X1)
+                        X2new = new_type(X2)
+                        assert_raises(e_cls, cdist, X1new, X2new, metric=metric)
+                else:
+                    for new_type in test[1]:
+                        y2 = cdist(new_type(X1), new_type(X2), metric=metric)
+                        assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
+
+    def test_cdist_out(self):
+        # Test that out parameter works properly
+        eps = 1e-15
+        X1 = eo['cdist-X1']
+        X2 = eo['cdist-X2']
+        out_r, out_c = X1.shape[0], X2.shape[0]
+
+        for metric in _METRICS_NAMES:
+            kwargs = dict()
+            if metric == 'minkowski':
+                kwargs['p'] = 1.23
+            out1 = np.empty((out_r, out_c), dtype=np.double)
+            Y1 = cdist(X1, X2, metric, **kwargs)
+            Y2 = cdist(X1, X2, metric, out=out1, **kwargs)
+            # test that output is numerically equivalent
+            assert_allclose(Y1, Y2, rtol=eps, verbose=verbose > 2)
+            # test that Y_test1 and out1 are the same object
+            assert_(Y2 is out1)
+            # test for incorrect shape
+            out2 = np.empty((out_r-1, out_c+1), dtype=np.double)
+            assert_raises(ValueError,
+                          cdist, X1, X2, metric, out=out2, **kwargs)
+            # test for C-contiguous order
+            out3 = np.empty(
+                (2 * out_r, 2 * out_c), dtype=np.double)[::2, ::2]
+            out4 = np.empty((out_r, out_c), dtype=np.double, order='F')
+            assert_raises(ValueError,
+                          cdist, X1, X2, metric, out=out3, **kwargs)
+            assert_raises(ValueError,
+                          cdist, X1, X2, metric, out=out4, **kwargs)
+
+            # test for incorrect dtype
+            out5 = np.empty((out_r, out_c), dtype=np.int64)
+            assert_raises(ValueError,
+                          cdist, X1, X2, metric, out=out5, **kwargs)
+
+    def test_striding(self):
+        # test that striding is handled correct with calls to
+        # _copy_array_if_base_present
+        eps = 1e-15
+        X1 = eo['cdist-X1'][::2, ::2]
+        X2 = eo['cdist-X2'][::2, ::2]
+        X1_copy = X1.copy()
+        X2_copy = X2.copy()
+
+        # confirm equivalence
+        assert_equal(X1, X1_copy)
+        assert_equal(X2, X2_copy)
+        # confirm contiguity
+        assert_(not X1.flags.c_contiguous)
+        assert_(not X2.flags.c_contiguous)
+        assert_(X1_copy.flags.c_contiguous)
+        assert_(X2_copy.flags.c_contiguous)
+
+        for metric in _METRICS_NAMES:
+            kwargs = dict()
+            if metric == 'minkowski':
+                kwargs['p'] = 1.23
+            Y1 = cdist(X1, X2, metric, **kwargs)
+            Y2 = cdist(X1_copy, X2_copy, metric, **kwargs)
+            # test that output is numerically equivalent
+            assert_allclose(Y1, Y2, rtol=eps, verbose=verbose > 2)
+
+    def test_cdist_refcount(self):
+        for metric in _METRICS_NAMES:
+            x1 = np.random.rand(10, 10)
+            x2 = np.random.rand(10, 10)
+
+            kwargs = dict()
+            if metric == 'minkowski':
+                kwargs['p'] = 1.23
+
+            out = cdist(x1, x2, metric=metric, **kwargs)
+
+            # Check reference counts aren't messed up. If we only hold weak
+            # references, the arrays should be deallocated.
+            weak_refs = [weakref.ref(v) for v in (x1, x2, out)]
+            del x1, x2, out
+
+            if IS_PYPY:
+                break_cycles()
+            assert all(weak_ref() is None for weak_ref in weak_refs)
+
+
+class TestPdist:
+
+    def setup_method(self):
+        self.rnd_eo_names = ['random-float32-data', 'random-int-data',
+                             'random-uint-data', 'random-double-data',
+                             'random-bool-data']
+        self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],
+                              'uint': [np.int_, np.float32, np.double],
+                              'int': [np.float32, np.double],
+                              'float32': [np.double]}
+
+    def test_pdist_extra_args(self):
+        # Tests that args and kwargs are correctly handled
+        def _my_metric(x, y, arg, kwarg=1, kwarg2=2):
+            return arg + kwarg + kwarg2
+
+        X1 = [[1., 2.], [1.2, 2.3], [2.2, 2.3]]
+        kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(2)}
+        args = [3.14] * 200
+        for metric in _METRICS_NAMES:
+            with np.testing.suppress_warnings() as sup:
+                if metric == "kulsinski":
+                    sup.filter(DeprecationWarning,
+                               "Kulsinski has been deprecated from")
+                assert_raises(TypeError, pdist, X1, metric=metric, **kwargs)
+                assert_raises(TypeError, pdist, X1,
+                              metric=eval(metric), **kwargs)
+                assert_raises(TypeError, pdist, X1,
+                              metric="test_" + metric, **kwargs)
+                assert_raises(TypeError, pdist, X1, metric=metric, *args)
+                assert_raises(TypeError, pdist, X1, metric=eval(metric), *args)
+                assert_raises(TypeError, pdist, X1,
+                              metric="test_" + metric, *args)
+
+        assert_raises(TypeError, pdist, X1, _my_metric)
+        assert_raises(TypeError, pdist, X1, _my_metric, *args)
+        assert_raises(TypeError, pdist, X1, _my_metric, **kwargs)
+        assert_raises(TypeError, pdist, X1, _my_metric,
+                      kwarg=2.2, kwarg2=3.3)
+        assert_raises(TypeError, pdist, X1, _my_metric, 1, 2, kwarg=2.2)
+
+        assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2, 3.3)
+        assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2)
+        assert_raises(TypeError, pdist, X1, _my_metric, 1.1)
+        assert_raises(TypeError, pdist, X1, _my_metric, 1.1,
+                      kwarg=2.2, kwarg2=3.3)
+
+        # these should work
+        assert_allclose(pdist(X1, metric=_my_metric,
+                              arg=1.1, kwarg2=3.3), 5.4)
+
+    def test_pdist_euclidean_random(self):
+        eps = 1e-07
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-euclidean']
+        Y_test1 = wpdist_no_const(X, 'euclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_euclidean_random_u(self):
+        eps = 1e-07
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-euclidean']
+        Y_test1 = wpdist_no_const(X, 'euclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_euclidean_random_float32(self):
+        eps = 1e-07
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-euclidean']
+        Y_test1 = wpdist_no_const(X, 'euclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_euclidean_random_nonC(self):
+        eps = 1e-07
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-euclidean']
+        Y_test2 = wpdist_no_const(X, 'test_euclidean')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_euclidean_iris_double(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-euclidean-iris']
+        Y_test1 = wpdist_no_const(X, 'euclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_euclidean_iris_float32(self):
+        eps = 1e-5
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-euclidean-iris']
+        Y_test1 = wpdist_no_const(X, 'euclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
+
+    @pytest.mark.slow
+    def test_pdist_euclidean_iris_nonC(self):
+        # Test pdist(X, 'test_euclidean') [the non-C implementation] on the
+        # Iris data set.
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-euclidean-iris']
+        Y_test2 = wpdist_no_const(X, 'test_euclidean')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_seuclidean_random(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-seuclidean']
+        Y_test1 = pdist(X, 'seuclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_seuclidean_random_float32(self):
+        eps = 1e-7
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-seuclidean']
+        Y_test1 = pdist(X, 'seuclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+        # Check no error is raise when V has float32 dtype (#11171).
+        V = np.var(X, axis=0, ddof=1)
+        Y_test2 = pdist(X, 'seuclidean', V=V)
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_seuclidean_random_nonC(self):
+        # Test pdist(X, 'test_sqeuclidean') [the non-C implementation]
+        eps = 1e-07
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-seuclidean']
+        Y_test2 = pdist(X, 'test_seuclidean')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_seuclidean_iris(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-seuclidean-iris']
+        Y_test1 = pdist(X, 'seuclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_seuclidean_iris_float32(self):
+        # Tests pdist(X, 'seuclidean') on the Iris data set (float32).
+        eps = 1e-5
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-seuclidean-iris']
+        Y_test1 = pdist(X, 'seuclidean')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_seuclidean_iris_nonC(self):
+        # Test pdist(X, 'test_seuclidean') [the non-C implementation] on the
+        # Iris data set.
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-seuclidean-iris']
+        Y_test2 = pdist(X, 'test_seuclidean')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_cosine_random(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-cosine']
+        Y_test1 = wpdist(X, 'cosine')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_cosine_random_float32(self):
+        eps = 1e-7
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-cosine']
+        Y_test1 = wpdist(X, 'cosine')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_cosine_random_nonC(self):
+        # Test pdist(X, 'test_cosine') [the non-C implementation]
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-cosine']
+        Y_test2 = wpdist(X, 'test_cosine')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_cosine_iris(self):
+        eps = 1e-05
+        X = eo['iris']
+        Y_right = eo['pdist-cosine-iris']
+        Y_test1 = wpdist(X, 'cosine')
+        assert_allclose(Y_test1, Y_right, atol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_cosine_iris_float32(self):
+        eps = 1e-05
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-cosine-iris']
+        Y_test1 = wpdist(X, 'cosine')
+        assert_allclose(Y_test1, Y_right, atol=eps, verbose=verbose > 2)
+
+    @pytest.mark.slow
+    def test_pdist_cosine_iris_nonC(self):
+        eps = 1e-05
+        X = eo['iris']
+        Y_right = eo['pdist-cosine-iris']
+        Y_test2 = wpdist(X, 'test_cosine')
+        assert_allclose(Y_test2, Y_right, atol=eps)
+
+    def test_pdist_cosine_bounds(self):
+        # Test adapted from @joernhees's example at gh-5208: case where
+        # cosine distance used to be negative. XXX: very sensitive to the
+        # specific norm computation.
+        x = np.abs(np.random.RandomState(1337).rand(91))
+        X = np.vstack([x, x])
+        assert_(wpdist(X, 'cosine')[0] >= 0,
+                msg='cosine distance should be non-negative')
+
+    def test_pdist_cityblock_random(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-cityblock']
+        Y_test1 = wpdist_no_const(X, 'cityblock')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_cityblock_random_float32(self):
+        eps = 1e-7
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-cityblock']
+        Y_test1 = wpdist_no_const(X, 'cityblock')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_cityblock_random_nonC(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-cityblock']
+        Y_test2 = wpdist_no_const(X, 'test_cityblock')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_cityblock_iris(self):
+        eps = 1e-14
+        X = eo['iris']
+        Y_right = eo['pdist-cityblock-iris']
+        Y_test1 = wpdist_no_const(X, 'cityblock')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_cityblock_iris_float32(self):
+        eps = 1e-5
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-cityblock-iris']
+        Y_test1 = wpdist_no_const(X, 'cityblock')
+        assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
+
+    @pytest.mark.slow
+    def test_pdist_cityblock_iris_nonC(self):
+        # Test pdist(X, 'test_cityblock') [the non-C implementation] on the
+        # Iris data set.
+        eps = 1e-14
+        X = eo['iris']
+        Y_right = eo['pdist-cityblock-iris']
+        Y_test2 = wpdist_no_const(X, 'test_cityblock')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_correlation_random(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-correlation']
+        Y_test1 = wpdist(X, 'correlation')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_correlation_random_float32(self):
+        eps = 1e-7
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-correlation']
+        Y_test1 = wpdist(X, 'correlation')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_correlation_random_nonC(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-correlation']
+        Y_test2 = wpdist(X, 'test_correlation')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_correlation_iris(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-correlation-iris']
+        Y_test1 = wpdist(X, 'correlation')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_correlation_iris_float32(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = np.float32(eo['pdist-correlation-iris'])
+        Y_test1 = wpdist(X, 'correlation')
+        assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
+
+    @pytest.mark.slow
+    def test_pdist_correlation_iris_nonC(self):
+        if sys.maxsize > 2**32:
+            eps = 1e-7
+        else:
+            pytest.skip("see gh-16456")
+        X = eo['iris']
+        Y_right = eo['pdist-correlation-iris']
+        Y_test2 = wpdist(X, 'test_correlation')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    @pytest.mark.parametrize("p", [0.1, 0.25, 1.0, 2.0, 3.2, np.inf])
+    def test_pdist_minkowski_random_p(self, p):
+        eps = 1e-13
+        X = eo['pdist-double-inp']
+        Y1 = wpdist_no_const(X, 'minkowski', p=p)
+        Y2 = wpdist_no_const(X, 'test_minkowski', p=p)
+        assert_allclose(Y1, Y2, atol=0, rtol=eps)
+
+    def test_pdist_minkowski_random(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-minkowski-3.2']
+        Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_minkowski_random_float32(self):
+        eps = 1e-7
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-minkowski-3.2']
+        Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_minkowski_random_nonC(self):
+        eps = 1e-7
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-minkowski-3.2']
+        Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_minkowski_3_2_iris(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-minkowski-3.2-iris']
+        Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_minkowski_3_2_iris_float32(self):
+        eps = 1e-5
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-minkowski-3.2-iris']
+        Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_minkowski_3_2_iris_nonC(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-minkowski-3.2-iris']
+        Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_minkowski_5_8_iris(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-minkowski-5.8-iris']
+        Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    @pytest.mark.slow
+    def test_pdist_minkowski_5_8_iris_float32(self):
+        eps = 1e-5
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-minkowski-5.8-iris']
+        Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
+        assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
+
+    @pytest.mark.slow
+    def test_pdist_minkowski_5_8_iris_nonC(self):
+        eps = 1e-7
+        X = eo['iris']
+        Y_right = eo['pdist-minkowski-5.8-iris']
+        Y_test2 = wpdist_no_const(X, 'test_minkowski', p=5.8)
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_mahalanobis(self):
+        # 1-dimensional observations
+        x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1)
+        dist = pdist(x, metric='mahalanobis')
+        assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5),
+                               np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)])
+
+        # 2-dimensional observations
+        x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]])
+        dist = pdist(x, metric='mahalanobis')
+        rt2 = np.sqrt(2)
+        assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2 * rt2, 2, 2, 2 * rt2, 2])
+
+        # Too few observations
+        assert_raises(ValueError,
+                      wpdist, [[0, 1], [2, 3]], metric='mahalanobis')
+
+    def test_pdist_hamming_random(self):
+        eps = 1e-15
+        X = eo['pdist-boolean-inp']
+        Y_right = eo['pdist-hamming']
+        Y_test1 = wpdist(X, 'hamming')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_hamming_random_float32(self):
+        eps = 1e-15
+        X = np.float32(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-hamming']
+        Y_test1 = wpdist(X, 'hamming')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_hamming_random_nonC(self):
+        eps = 1e-15
+        X = eo['pdist-boolean-inp']
+        Y_right = eo['pdist-hamming']
+        Y_test2 = wpdist(X, 'test_hamming')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_dhamming_random(self):
+        eps = 1e-15
+        X = np.float64(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-hamming']
+        Y_test1 = wpdist(X, 'hamming')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_dhamming_random_float32(self):
+        eps = 1e-15
+        X = np.float32(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-hamming']
+        Y_test1 = wpdist(X, 'hamming')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_dhamming_random_nonC(self):
+        eps = 1e-15
+        X = np.float64(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-hamming']
+        Y_test2 = wpdist(X, 'test_hamming')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_jaccard_random(self):
+        eps = 1e-8
+        X = eo['pdist-boolean-inp']
+        Y_right = eo['pdist-jaccard']
+        Y_test1 = wpdist(X, 'jaccard')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_jaccard_random_float32(self):
+        eps = 1e-8
+        X = np.float32(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-jaccard']
+        Y_test1 = wpdist(X, 'jaccard')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_jaccard_random_nonC(self):
+        eps = 1e-8
+        X = eo['pdist-boolean-inp']
+        Y_right = eo['pdist-jaccard']
+        Y_test2 = wpdist(X, 'test_jaccard')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_djaccard_random(self):
+        eps = 1e-8
+        X = np.float64(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-jaccard']
+        Y_test1 = wpdist(X, 'jaccard')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_djaccard_random_float32(self):
+        eps = 1e-8
+        X = np.float32(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-jaccard']
+        Y_test1 = wpdist(X, 'jaccard')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_djaccard_allzeros(self):
+        eps = 1e-15
+        Y = pdist(np.zeros((5, 3)), 'jaccard')
+        assert_allclose(np.zeros(10), Y, rtol=eps)
+
+    def test_pdist_djaccard_random_nonC(self):
+        eps = 1e-8
+        X = np.float64(eo['pdist-boolean-inp'])
+        Y_right = eo['pdist-jaccard']
+        Y_test2 = wpdist(X, 'test_jaccard')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_jensenshannon_random(self):
+        eps = 1e-11
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-jensenshannon']
+        Y_test1 = pdist(X, 'jensenshannon')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_jensenshannon_random_float32(self):
+        eps = 1e-8
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-jensenshannon']
+        Y_test1 = pdist(X, 'jensenshannon')
+        assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
+
+    def test_pdist_jensenshannon_random_nonC(self):
+        eps = 1e-11
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-jensenshannon']
+        Y_test2 = pdist(X, 'test_jensenshannon')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_jensenshannon_iris(self):
+        if _is_32bit():
+            # Test failing on 32-bit Linux on Azure otherwise, see gh-12810
+            eps = 2.5e-10
+        else:
+            eps = 1e-12
+
+        X = eo['iris']
+        Y_right = eo['pdist-jensenshannon-iris']
+        Y_test1 = pdist(X, 'jensenshannon')
+        assert_allclose(Y_test1, Y_right, atol=eps)
+
+    def test_pdist_jensenshannon_iris_float32(self):
+        eps = 1e-06
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-jensenshannon-iris']
+        Y_test1 = pdist(X, 'jensenshannon')
+        assert_allclose(Y_test1, Y_right, atol=eps, verbose=verbose > 2)
+
+    def test_pdist_jensenshannon_iris_nonC(self):
+        eps = 5e-5
+        X = eo['iris']
+        Y_right = eo['pdist-jensenshannon-iris']
+        Y_test2 = pdist(X, 'test_jensenshannon')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_djaccard_allzeros_nonC(self):
+        eps = 1e-15
+        Y = pdist(np.zeros((5, 3)), 'test_jaccard')
+        assert_allclose(np.zeros(10), Y, rtol=eps)
+
+    def test_pdist_chebyshev_random(self):
+        eps = 1e-8
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-chebyshev']
+        Y_test1 = pdist(X, 'chebyshev')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_chebyshev_random_float32(self):
+        eps = 1e-7
+        X = np.float32(eo['pdist-double-inp'])
+        Y_right = eo['pdist-chebyshev']
+        Y_test1 = pdist(X, 'chebyshev')
+        assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
+
+    def test_pdist_chebyshev_random_nonC(self):
+        eps = 1e-8
+        X = eo['pdist-double-inp']
+        Y_right = eo['pdist-chebyshev']
+        Y_test2 = pdist(X, 'test_chebyshev')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_chebyshev_iris(self):
+        eps = 1e-14
+        X = eo['iris']
+        Y_right = eo['pdist-chebyshev-iris']
+        Y_test1 = pdist(X, 'chebyshev')
+        assert_allclose(Y_test1, Y_right, rtol=eps)
+
+    def test_pdist_chebyshev_iris_float32(self):
+        eps = 1e-5
+        X = np.float32(eo['iris'])
+        Y_right = eo['pdist-chebyshev-iris']
+        Y_test1 = pdist(X, 'chebyshev')
+        assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
+
+    def test_pdist_chebyshev_iris_nonC(self):
+        eps = 1e-14
+        X = eo['iris']
+        Y_right = eo['pdist-chebyshev-iris']
+        Y_test2 = pdist(X, 'test_chebyshev')
+        assert_allclose(Y_test2, Y_right, rtol=eps)
+
+    def test_pdist_matching_mtica1(self):
+        # Test matching(*,*) with mtica example #1 (nums).
+        m = wmatching(np.array([1, 0, 1, 1, 0]),
+                      np.array([1, 1, 0, 1, 1]))
+        m2 = wmatching(np.array([1, 0, 1, 1, 0], dtype=bool),
+                       np.array([1, 1, 0, 1, 1], dtype=bool))
+        assert_allclose(m, 0.6, rtol=0, atol=1e-10)
+        assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
+
+    def test_pdist_matching_mtica2(self):
+        # Test matching(*,*) with mtica example #2.
+        m = wmatching(np.array([1, 0, 1]),
+                     np.array([1, 1, 0]))
+        m2 = wmatching(np.array([1, 0, 1], dtype=bool),
+                      np.array([1, 1, 0], dtype=bool))
+        assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
+        assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
+
+    def test_pdist_jaccard_mtica1(self):
+        m = wjaccard(np.array([1, 0, 1, 1, 0]),
+                     np.array([1, 1, 0, 1, 1]))
+        m2 = wjaccard(np.array([1, 0, 1, 1, 0], dtype=bool),
+                      np.array([1, 1, 0, 1, 1], dtype=bool))
+        assert_allclose(m, 0.6, rtol=0, atol=1e-10)
+        assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
+
+    def test_pdist_jaccard_mtica2(self):
+        m = wjaccard(np.array([1, 0, 1]),
+                     np.array([1, 1, 0]))
+        m2 = wjaccard(np.array([1, 0, 1], dtype=bool),
+                      np.array([1, 1, 0], dtype=bool))
+        assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
+        assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
+
+    def test_pdist_yule_mtica1(self):
+        m = wyule(np.array([1, 0, 1, 1, 0]),
+                  np.array([1, 1, 0, 1, 1]))
+        m2 = wyule(np.array([1, 0, 1, 1, 0], dtype=bool),
+                   np.array([1, 1, 0, 1, 1], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 2, rtol=0, atol=1e-10)
+        assert_allclose(m2, 2, rtol=0, atol=1e-10)
+
+    def test_pdist_yule_mtica2(self):
+        m = wyule(np.array([1, 0, 1]),
+                  np.array([1, 1, 0]))
+        m2 = wyule(np.array([1, 0, 1], dtype=bool),
+                   np.array([1, 1, 0], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 2, rtol=0, atol=1e-10)
+        assert_allclose(m2, 2, rtol=0, atol=1e-10)
+
+    def test_pdist_dice_mtica1(self):
+        m = wdice(np.array([1, 0, 1, 1, 0]),
+                  np.array([1, 1, 0, 1, 1]))
+        m2 = wdice(np.array([1, 0, 1, 1, 0], dtype=bool),
+                   np.array([1, 1, 0, 1, 1], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 3 / 7, rtol=0, atol=1e-10)
+        assert_allclose(m2, 3 / 7, rtol=0, atol=1e-10)
+
+    def test_pdist_dice_mtica2(self):
+        m = wdice(np.array([1, 0, 1]),
+                  np.array([1, 1, 0]))
+        m2 = wdice(np.array([1, 0, 1], dtype=bool),
+                   np.array([1, 1, 0], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 0.5, rtol=0, atol=1e-10)
+        assert_allclose(m2, 0.5, rtol=0, atol=1e-10)
+
+    def test_pdist_sokalsneath_mtica1(self):
+        m = sokalsneath(np.array([1, 0, 1, 1, 0]),
+                        np.array([1, 1, 0, 1, 1]))
+        m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=bool),
+                         np.array([1, 1, 0, 1, 1], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
+        assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
+
+    def test_pdist_sokalsneath_mtica2(self):
+        m = wsokalsneath(np.array([1, 0, 1]),
+                         np.array([1, 1, 0]))
+        m2 = wsokalsneath(np.array([1, 0, 1], dtype=bool),
+                          np.array([1, 1, 0], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
+        assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
+
+    def test_pdist_rogerstanimoto_mtica1(self):
+        m = wrogerstanimoto(np.array([1, 0, 1, 1, 0]),
+                            np.array([1, 1, 0, 1, 1]))
+        m2 = wrogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=bool),
+                             np.array([1, 1, 0, 1, 1], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
+        assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
+
+    def test_pdist_rogerstanimoto_mtica2(self):
+        m = wrogerstanimoto(np.array([1, 0, 1]),
+                            np.array([1, 1, 0]))
+        m2 = wrogerstanimoto(np.array([1, 0, 1], dtype=bool),
+                             np.array([1, 1, 0], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
+        assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
+
+    def test_pdist_russellrao_mtica1(self):
+        m = wrussellrao(np.array([1, 0, 1, 1, 0]),
+                        np.array([1, 1, 0, 1, 1]))
+        m2 = wrussellrao(np.array([1, 0, 1, 1, 0], dtype=bool),
+                         np.array([1, 1, 0, 1, 1], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 3 / 5, rtol=0, atol=1e-10)
+        assert_allclose(m2, 3 / 5, rtol=0, atol=1e-10)
+
+    def test_pdist_russellrao_mtica2(self):
+        m = wrussellrao(np.array([1, 0, 1]),
+                        np.array([1, 1, 0]))
+        m2 = wrussellrao(np.array([1, 0, 1], dtype=bool),
+                         np.array([1, 1, 0], dtype=bool))
+        if verbose > 2:
+            print(m)
+        assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
+        assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
+
+    @pytest.mark.slow
+    def test_pdist_canberra_match(self):
+        D = eo['iris']
+        if verbose > 2:
+            print(D.shape, D.dtype)
+        eps = 1e-15
+        y1 = wpdist_no_const(D, "canberra")
+        y2 = wpdist_no_const(D, "test_canberra")
+        assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
+
+    def test_pdist_canberra_ticket_711(self):
+        # Test pdist(X, 'canberra') to see if Canberra gives the right result
+        # as reported on gh-1238.
+        eps = 1e-8
+        pdist_y = wpdist_no_const(([3.3], [3.4]), "canberra")
+        right_y = 0.01492537
+        assert_allclose(pdist_y, right_y, atol=eps, verbose=verbose > 2)
+
+    def test_pdist_custom_notdouble(self):
+        # tests that when using a custom metric the data type is not altered
+        class myclass:
+            pass
+
+        def _my_metric(x, y):
+            if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):
+                raise ValueError("Type has been changed")
+            return 1.123
+        data = np.array([[myclass()], [myclass()]], dtype=object)
+        pdist_y = pdist(data, metric=_my_metric)
+        right_y = 1.123
+        assert_equal(pdist_y, right_y, verbose=verbose > 2)
+
+    def _check_calling_conventions(self, X, metric, eps=1e-07, **kwargs):
+        # helper function for test_pdist_calling_conventions
+        try:
+            y1 = pdist(X, metric=metric, **kwargs)
+            y2 = pdist(X, metric=eval(metric), **kwargs)
+            y3 = pdist(X, metric="test_" + metric, **kwargs)
+        except Exception as e:
+            e_cls = e.__class__
+            if verbose > 2:
+                print(e_cls.__name__)
+                print(e)
+            assert_raises(e_cls, pdist, X, metric=metric, **kwargs)
+            assert_raises(e_cls, pdist, X, metric=eval(metric), **kwargs)
+            assert_raises(e_cls, pdist, X, metric="test_" + metric, **kwargs)
+        else:
+            assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
+            assert_allclose(y1, y3, rtol=eps, verbose=verbose > 2)
+
+    def test_pdist_calling_conventions(self):
+        # Ensures that specifying the metric with a str or scipy function
+        # gives the same behaviour (i.e. same result or same exception).
+        # NOTE: The correctness should be checked within each metric tests.
+        # NOTE: Extra args should be checked with a dedicated test
+        for eo_name in self.rnd_eo_names:
+            # subsampling input data to speed-up tests
+            # NOTE: num samples needs to be > than dimensions for mahalanobis
+            X = eo[eo_name][::5, ::2]
+            for metric in _METRICS_NAMES:
+                if verbose > 2:
+                    print("testing: ", metric, " with: ", eo_name)
+                if metric in {'dice', 'yule', 'kulsinski', 'matching',
+                              'rogerstanimoto', 'russellrao', 'sokalmichener',
+                              'sokalsneath',
+                              'kulczynski1'} and 'bool' not in eo_name:
+                    # python version permits non-bools e.g. for fuzzy logic
+                    continue
+                with np.testing.suppress_warnings() as sup:
+                    if metric == "kulsinski":
+                        sup.filter(DeprecationWarning,
+                                   "Kulsinski has been deprecated from")
+                    self._check_calling_conventions(X, metric)
+
+                # Testing built-in metrics with extra args
+                if metric == "seuclidean":
+                    V = np.var(X.astype(np.double), axis=0, ddof=1)
+                    self._check_calling_conventions(X, metric, V=V)
+                elif metric == "mahalanobis":
+                    V = np.atleast_2d(np.cov(X.astype(np.double).T))
+                    VI = np.array(np.linalg.inv(V).T)
+                    self._check_calling_conventions(X, metric, VI=VI)
+
+    def test_pdist_dtype_equivalence(self):
+        # Tests that the result is not affected by type up-casting
+        eps = 1e-07
+        tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),
+                 (eo['random-uint-data'], self.valid_upcasts['uint']),
+                 (eo['random-int-data'], self.valid_upcasts['int']),
+                 (eo['random-float32-data'], self.valid_upcasts['float32'])]
+        for metric in _METRICS_NAMES:
+            for test in tests:
+                X1 = test[0][::5, ::2]
+                try:
+                    y1 = pdist(X1, metric=metric)
+                except Exception as e:
+                    e_cls = e.__class__
+                    if verbose > 2:
+                        print(e_cls.__name__)
+                        print(e)
+                    for new_type in test[1]:
+                        X2 = new_type(X1)
+                        assert_raises(e_cls, pdist, X2, metric=metric)
+                else:
+                    for new_type in test[1]:
+                        y2 = pdist(new_type(X1), metric=metric)
+                        assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
+
+    def test_pdist_out(self):
+        # Test that out parameter works properly
+        eps = 1e-15
+        X = eo['random-float32-data'][::5, ::2]
+        out_size = int((X.shape[0] * (X.shape[0] - 1)) / 2)
+        for metric in _METRICS_NAMES:
+            kwargs = dict()
+            if metric == 'minkowski':
+                kwargs['p'] = 1.23
+            out1 = np.empty(out_size, dtype=np.double)
+            Y_right = pdist(X, metric, **kwargs)
+            Y_test1 = pdist(X, metric, out=out1, **kwargs)
+            # test that output is numerically equivalent
+            assert_allclose(Y_test1, Y_right, rtol=eps)
+            # test that Y_test1 and out1 are the same object
+            assert_(Y_test1 is out1)
+            # test for incorrect shape
+            out2 = np.empty(out_size + 3, dtype=np.double)
+            assert_raises(ValueError, pdist, X, metric, out=out2, **kwargs)
+            # test for (C-)contiguous output
+            out3 = np.empty(2 * out_size, dtype=np.double)[::2]
+            assert_raises(ValueError, pdist, X, metric, out=out3, **kwargs)
+            # test for incorrect dtype
+            out5 = np.empty(out_size, dtype=np.int64)
+            assert_raises(ValueError, pdist, X, metric, out=out5, **kwargs)
+
+    def test_striding(self):
+        # test that striding is handled correct with calls to
+        # _copy_array_if_base_present
+        eps = 1e-15
+        X = eo['random-float32-data'][::5, ::2]
+        X_copy = X.copy()
+
+        # confirm contiguity
+        assert_(not X.flags.c_contiguous)
+        assert_(X_copy.flags.c_contiguous)
+
+        for metric in _METRICS_NAMES:
+            kwargs = dict()
+            if metric == 'minkowski':
+                kwargs['p'] = 1.23
+            Y1 = pdist(X, metric, **kwargs)
+            Y2 = pdist(X_copy, metric, **kwargs)
+            # test that output is numerically equivalent
+            assert_allclose(Y1, Y2, rtol=eps, verbose=verbose > 2)
+
+class TestSomeDistanceFunctions:
+
+    def setup_method(self):
+        # 1D arrays
+        x = np.array([1.0, 2.0, 3.0])
+        y = np.array([1.0, 1.0, 5.0])
+
+        self.cases = [(x, y)]
+
+    def test_minkowski(self):
+        for x, y in self.cases:
+            dist1 = minkowski(x, y, p=1)
+            assert_almost_equal(dist1, 3.0)
+            dist1p5 = minkowski(x, y, p=1.5)
+            assert_almost_equal(dist1p5, (1.0 + 2.0**1.5)**(2. / 3))
+            dist2 = minkowski(x, y, p=2)
+            assert_almost_equal(dist2, 5.0 ** 0.5)
+            dist0p25 = minkowski(x, y, p=0.25)
+            assert_almost_equal(dist0p25, (1.0 + 2.0 ** 0.25) ** 4)
+
+        # Check that casting input to minimum scalar type doesn't affect result
+        # (issue #10262). This could be extended to more test inputs with
+        # np.min_scalar_type(np.max(input_matrix)).
+        a = np.array([352, 916])
+        b = np.array([350, 660])
+        assert_equal(minkowski(a, b),
+                     minkowski(a.astype('uint16'), b.astype('uint16')))
+
+    def test_euclidean(self):
+        for x, y in self.cases:
+            dist = weuclidean(x, y)
+            assert_almost_equal(dist, np.sqrt(5))
+
+    def test_sqeuclidean(self):
+        for x, y in self.cases:
+            dist = wsqeuclidean(x, y)
+            assert_almost_equal(dist, 5.0)
+
+    def test_cosine(self):
+        for x, y in self.cases:
+            dist = wcosine(x, y)
+            assert_almost_equal(dist, 1.0 - 18.0 / (np.sqrt(14) * np.sqrt(27)))
+
+    def test_correlation(self):
+        xm = np.array([-1.0, 0, 1.0])
+        ym = np.array([-4.0 / 3, -4.0 / 3, 5.0 - 7.0 / 3])
+        for x, y in self.cases:
+            dist = wcorrelation(x, y)
+            assert_almost_equal(dist, 1.0 - np.dot(xm, ym) / (norm(xm) * norm(ym)))
+
+    def test_correlation_positive(self):
+        # Regression test for gh-12320 (negative return value due to rounding
+        x = np.array([0., 0., 0., 0., 0., 0., -2., 0., 0., 0., -2., -2., -2.,
+                      0., -2., 0., -2., 0., 0., -1., -2., 0., 1., 0., 0., -2.,
+                      0., 0., -2., 0., -2., -2., -2., -2., -2., -2., 0.])
+        y = np.array([1., 1., 1., 1., 1., 1., -1., 1., 1., 1., -1., -1., -1.,
+                      1., -1., 1., -1., 1., 1., 0., -1., 1., 2., 1., 1., -1.,
+                      1., 1., -1., 1., -1., -1., -1., -1., -1., -1., 1.])
+        dist = correlation(x, y)
+        assert 0 <= dist <= 10 * np.finfo(np.float64).eps
+
+    def test_mahalanobis(self):
+        x = np.array([1.0, 2.0, 3.0])
+        y = np.array([1.0, 1.0, 5.0])
+        vi = np.array([[2.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
+        for x, y in self.cases:
+            dist = mahalanobis(x, y, vi)
+            assert_almost_equal(dist, np.sqrt(6.0))
+
+
+class TestSquareForm:
+    checked_dtypes = [np.float64, np.float32, np.int32, np.int8, bool]
+
+    def test_squareform_matrix(self):
+        for dtype in self.checked_dtypes:
+            self.check_squareform_matrix(dtype)
+
+    def test_squareform_vector(self):
+        for dtype in self.checked_dtypes:
+            self.check_squareform_vector(dtype)
+
+    def check_squareform_matrix(self, dtype):
+        A = np.zeros((0, 0), dtype=dtype)
+        rA = squareform(A)
+        assert_equal(rA.shape, (0,))
+        assert_equal(rA.dtype, dtype)
+
+        A = np.zeros((1, 1), dtype=dtype)
+        rA = squareform(A)
+        assert_equal(rA.shape, (0,))
+        assert_equal(rA.dtype, dtype)
+
+        A = np.array([[0, 4.2], [4.2, 0]], dtype=dtype)
+        rA = squareform(A)
+        assert_equal(rA.shape, (1,))
+        assert_equal(rA.dtype, dtype)
+        assert_array_equal(rA, np.array([4.2], dtype=dtype))
+
+    def check_squareform_vector(self, dtype):
+        v = np.zeros((0,), dtype=dtype)
+        rv = squareform(v)
+        assert_equal(rv.shape, (1, 1))
+        assert_equal(rv.dtype, dtype)
+        assert_array_equal(rv, [[0]])
+
+        v = np.array([8.3], dtype=dtype)
+        rv = squareform(v)
+        assert_equal(rv.shape, (2, 2))
+        assert_equal(rv.dtype, dtype)
+        assert_array_equal(rv, np.array([[0, 8.3], [8.3, 0]], dtype=dtype))
+
+    def test_squareform_multi_matrix(self):
+        for n in range(2, 5):
+            self.check_squareform_multi_matrix(n)
+
+    def check_squareform_multi_matrix(self, n):
+        X = np.random.rand(n, 4)
+        Y = wpdist_no_const(X)
+        assert_equal(len(Y.shape), 1)
+        A = squareform(Y)
+        Yr = squareform(A)
+        s = A.shape
+        k = 0
+        if verbose >= 3:
+            print(A.shape, Y.shape, Yr.shape)
+        assert_equal(len(s), 2)
+        assert_equal(len(Yr.shape), 1)
+        assert_equal(s[0], s[1])
+        for i in range(0, s[0]):
+            for j in range(i + 1, s[1]):
+                if i != j:
+                    assert_equal(A[i, j], Y[k])
+                    k += 1
+                else:
+                    assert_equal(A[i, j], 0)
+
+
+class TestNumObsY:
+
+    def test_num_obs_y_multi_matrix(self):
+        for n in range(2, 10):
+            X = np.random.rand(n, 4)
+            Y = wpdist_no_const(X)
+            assert_equal(num_obs_y(Y), n)
+
+    def test_num_obs_y_1(self):
+        # Tests num_obs_y(y) on a condensed distance matrix over 1
+        # observations. Expecting exception.
+        assert_raises(ValueError, self.check_y, 1)
+
+    def test_num_obs_y_2(self):
+        # Tests num_obs_y(y) on a condensed distance matrix over 2
+        # observations.
+        assert_(self.check_y(2))
+
+    def test_num_obs_y_3(self):
+        assert_(self.check_y(3))
+
+    def test_num_obs_y_4(self):
+        assert_(self.check_y(4))
+
+    def test_num_obs_y_5_10(self):
+        for i in range(5, 16):
+            self.minit(i)
+
+    def test_num_obs_y_2_100(self):
+        # Tests num_obs_y(y) on 100 improper condensed distance matrices.
+        # Expecting exception.
+        a = set([])
+        for n in range(2, 16):
+            a.add(n * (n - 1) / 2)
+        for i in range(5, 105):
+            if i not in a:
+                assert_raises(ValueError, self.bad_y, i)
+
+    def minit(self, n):
+        assert_(self.check_y(n))
+
+    def bad_y(self, n):
+        y = np.random.rand(n)
+        return num_obs_y(y)
+
+    def check_y(self, n):
+        return num_obs_y(self.make_y(n)) == n
+
+    def make_y(self, n):
+        return np.random.rand((n * (n - 1)) // 2)
+
+
+class TestNumObsDM:
+
+    def test_num_obs_dm_multi_matrix(self):
+        for n in range(1, 10):
+            X = np.random.rand(n, 4)
+            Y = wpdist_no_const(X)
+            A = squareform(Y)
+            if verbose >= 3:
+                print(A.shape, Y.shape)
+            assert_equal(num_obs_dm(A), n)
+
+    def test_num_obs_dm_0(self):
+        # Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.
+        assert_(self.check_D(0))
+
+    def test_num_obs_dm_1(self):
+        # Tests num_obs_dm(D) on a 1x1 distance matrix.
+        assert_(self.check_D(1))
+
+    def test_num_obs_dm_2(self):
+        assert_(self.check_D(2))
+
+    def test_num_obs_dm_3(self):
+        assert_(self.check_D(2))
+
+    def test_num_obs_dm_4(self):
+        assert_(self.check_D(4))
+
+    def check_D(self, n):
+        return num_obs_dm(self.make_D(n)) == n
+
+    def make_D(self, n):
+        return np.random.rand(n, n)
+
+
+def is_valid_dm_throw(D):
+    return is_valid_dm(D, throw=True)
+
+
+class TestIsValidDM:
+
+    def test_is_valid_dm_improper_shape_1D_E(self):
+        D = np.zeros((5,), dtype=np.double)
+        assert_raises(ValueError, is_valid_dm_throw, (D))
+
+    def test_is_valid_dm_improper_shape_1D_F(self):
+        D = np.zeros((5,), dtype=np.double)
+        assert_equal(is_valid_dm(D), False)
+
+    def test_is_valid_dm_improper_shape_3D_E(self):
+        D = np.zeros((3, 3, 3), dtype=np.double)
+        assert_raises(ValueError, is_valid_dm_throw, (D))
+
+    def test_is_valid_dm_improper_shape_3D_F(self):
+        D = np.zeros((3, 3, 3), dtype=np.double)
+        assert_equal(is_valid_dm(D), False)
+
+    def test_is_valid_dm_nonzero_diagonal_E(self):
+        y = np.random.rand(10)
+        D = squareform(y)
+        for i in range(0, 5):
+            D[i, i] = 2.0
+        assert_raises(ValueError, is_valid_dm_throw, (D))
+
+    def test_is_valid_dm_nonzero_diagonal_F(self):
+        y = np.random.rand(10)
+        D = squareform(y)
+        for i in range(0, 5):
+            D[i, i] = 2.0
+        assert_equal(is_valid_dm(D), False)
+
+    def test_is_valid_dm_asymmetric_E(self):
+        y = np.random.rand(10)
+        D = squareform(y)
+        D[1, 3] = D[3, 1] + 1
+        assert_raises(ValueError, is_valid_dm_throw, (D))
+
+    def test_is_valid_dm_asymmetric_F(self):
+        y = np.random.rand(10)
+        D = squareform(y)
+        D[1, 3] = D[3, 1] + 1
+        assert_equal(is_valid_dm(D), False)
+
+    def test_is_valid_dm_correct_1_by_1(self):
+        D = np.zeros((1, 1), dtype=np.double)
+        assert_equal(is_valid_dm(D), True)
+
+    def test_is_valid_dm_correct_2_by_2(self):
+        y = np.random.rand(1)
+        D = squareform(y)
+        assert_equal(is_valid_dm(D), True)
+
+    def test_is_valid_dm_correct_3_by_3(self):
+        y = np.random.rand(3)
+        D = squareform(y)
+        assert_equal(is_valid_dm(D), True)
+
+    def test_is_valid_dm_correct_4_by_4(self):
+        y = np.random.rand(6)
+        D = squareform(y)
+        assert_equal(is_valid_dm(D), True)
+
+    def test_is_valid_dm_correct_5_by_5(self):
+        y = np.random.rand(10)
+        D = squareform(y)
+        assert_equal(is_valid_dm(D), True)
+
+
+def is_valid_y_throw(y):
+    return is_valid_y(y, throw=True)
+
+
+class TestIsValidY:
+    # If test case name ends on "_E" then an exception is expected for the
+    # given input, if it ends in "_F" then False is expected for the is_valid_y
+    # check.  Otherwise the input is expected to be valid.
+
+    def test_is_valid_y_improper_shape_2D_E(self):
+        y = np.zeros((3, 3,), dtype=np.double)
+        assert_raises(ValueError, is_valid_y_throw, (y))
+
+    def test_is_valid_y_improper_shape_2D_F(self):
+        y = np.zeros((3, 3,), dtype=np.double)
+        assert_equal(is_valid_y(y), False)
+
+    def test_is_valid_y_improper_shape_3D_E(self):
+        y = np.zeros((3, 3, 3), dtype=np.double)
+        assert_raises(ValueError, is_valid_y_throw, (y))
+
+    def test_is_valid_y_improper_shape_3D_F(self):
+        y = np.zeros((3, 3, 3), dtype=np.double)
+        assert_equal(is_valid_y(y), False)
+
+    def test_is_valid_y_correct_2_by_2(self):
+        y = self.correct_n_by_n(2)
+        assert_equal(is_valid_y(y), True)
+
+    def test_is_valid_y_correct_3_by_3(self):
+        y = self.correct_n_by_n(3)
+        assert_equal(is_valid_y(y), True)
+
+    def test_is_valid_y_correct_4_by_4(self):
+        y = self.correct_n_by_n(4)
+        assert_equal(is_valid_y(y), True)
+
+    def test_is_valid_y_correct_5_by_5(self):
+        y = self.correct_n_by_n(5)
+        assert_equal(is_valid_y(y), True)
+
+    def test_is_valid_y_2_100(self):
+        a = set([])
+        for n in range(2, 16):
+            a.add(n * (n - 1) / 2)
+        for i in range(5, 105):
+            if i not in a:
+                assert_raises(ValueError, self.bad_y, i)
+
+    def bad_y(self, n):
+        y = np.random.rand(n)
+        return is_valid_y(y, throw=True)
+
+    def correct_n_by_n(self, n):
+        y = np.random.rand((n * (n - 1)) // 2)
+        return y
+
+
+@pytest.mark.parametrize("p", [-10.0, -0.5, 0.0])
+def test_bad_p(p):
+    # Raise ValueError if p <=0.
+    assert_raises(ValueError, minkowski, [1, 2], [3, 4], p)
+    assert_raises(ValueError, minkowski, [1, 2], [3, 4], p, [1, 1])
+
+
+def test_sokalsneath_all_false():
+    # Regression test for ticket #876
+    assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
+
+
+def test_canberra():
+    # Regression test for ticket #1430.
+    assert_equal(wcanberra([1, 2, 3], [2, 4, 6]), 1)
+    assert_equal(wcanberra([1, 1, 0, 0], [1, 0, 1, 0]), 2)
+
+
+def test_braycurtis():
+    # Regression test for ticket #1430.
+    assert_almost_equal(wbraycurtis([1, 2, 3], [2, 4, 6]), 1. / 3, decimal=15)
+    assert_almost_equal(wbraycurtis([1, 1, 0, 0], [1, 0, 1, 0]), 0.5, decimal=15)
+
+
+def test_euclideans():
+    # Regression test for ticket #1328.
+    x1 = np.array([1, 1, 1])
+    x2 = np.array([0, 0, 0])
+
+    # Basic test of the calculation.
+    assert_almost_equal(wsqeuclidean(x1, x2), 3.0, decimal=14)
+    assert_almost_equal(weuclidean(x1, x2), np.sqrt(3), decimal=14)
+
+    # Check flattening for (1, N) or (N, 1) inputs
+    with assert_raises(ValueError,
+                       match="Input vector should be 1-D"):
+        weuclidean(x1[np.newaxis, :], x2[np.newaxis, :]), np.sqrt(3)
+    with assert_raises(ValueError,
+                       match="Input vector should be 1-D"):
+        wsqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :])
+    with assert_raises(ValueError,
+                       match="Input vector should be 1-D"):
+        wsqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis])
+
+    # Distance metrics only defined for vectors (= 1-D)
+    x = np.arange(4).reshape(2, 2)
+    assert_raises(ValueError, weuclidean, x, x)
+    assert_raises(ValueError, wsqeuclidean, x, x)
+
+    # Another check, with random data.
+    rs = np.random.RandomState(1234567890)
+    x = rs.rand(10)
+    y = rs.rand(10)
+    d1 = weuclidean(x, y)
+    d2 = wsqeuclidean(x, y)
+    assert_almost_equal(d1**2, d2, decimal=14)
+
+
+def test_hamming_unequal_length():
+    # Regression test for gh-4290.
+    x = [0, 0, 1]
+    y = [1, 0, 1, 0]
+    # Used to give an AttributeError from ndarray.mean called on bool
+    assert_raises(ValueError, whamming, x, y)
+
+
+def test_hamming_string_array():
+    # https://github.com/scikit-learn/scikit-learn/issues/4014
+    a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',
+                  'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',
+                  'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],
+                  dtype='|S4')
+    b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',
+                  'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',
+                  'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],
+                  dtype='|S4')
+    desired = 0.45
+    assert_allclose(whamming(a, b), desired)
+
+
+def test_minkowski_w():
+    # Regression test for gh-8142.
+    arr_in = np.array([[83.33333333, 100., 83.33333333, 100., 36.,
+                        60., 90., 150., 24., 48.],
+                       [83.33333333, 100., 83.33333333, 100., 36.,
+                        60., 90., 150., 24., 48.]])
+    p0 = pdist(arr_in, metric='minkowski', p=1, w=None)
+    c0 = cdist(arr_in, arr_in, metric='minkowski', p=1, w=None)
+    p1 = pdist(arr_in, metric='minkowski', p=1)
+    c1 = cdist(arr_in, arr_in, metric='minkowski', p=1)
+
+    assert_allclose(p0, p1, rtol=1e-15)
+    assert_allclose(c0, c1, rtol=1e-15)
+
+
+def test_sqeuclidean_dtypes():
+    # Assert that sqeuclidean returns the right types of values.
+    # Integer types should be converted to floating for stability.
+    # Floating point types should be the same as the input.
+    x = [1, 2, 3]
+    y = [4, 5, 6]
+
+    for dtype in [np.int8, np.int16, np.int32, np.int64]:
+        d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
+        assert_(np.issubdtype(d.dtype, np.floating))
+
+    for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
+        umax = np.iinfo(dtype).max
+        d1 = wsqeuclidean([0], np.asarray([umax], dtype=dtype))
+        d2 = wsqeuclidean(np.asarray([umax], dtype=dtype), [0])
+
+        assert_equal(d1, d2)
+        assert_equal(d1, np.float64(umax)**2)
+
+    dtypes = [np.float32, np.float64, np.complex64, np.complex128]
+    for dtype in ['float16', 'float128']:
+        # These aren't present in older numpy versions; float128 may also not
+        # be present on all platforms.
+        if hasattr(np, dtype):
+            dtypes.append(getattr(np, dtype))
+
+    for dtype in dtypes:
+        d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
+        assert_equal(d.dtype, dtype)
+
+
+def test_sokalmichener():
+    # Test that sokalmichener has the same result for bool and int inputs.
+    p = [True, True, False]
+    q = [True, False, True]
+    x = [int(b) for b in p]
+    y = [int(b) for b in q]
+    dist1 = sokalmichener(p, q)
+    dist2 = sokalmichener(x, y)
+    # These should be exactly the same.
+    assert_equal(dist1, dist2)
+
+
+def test_sokalmichener_with_weight():
+    # from: | 1 |   | 0 |
+    # to:   | 1 |   | 1 |
+    # weight|   | 1 |   | 0.2
+    ntf = 0 * 1 + 0 * 0.2
+    nft = 0 * 1 + 1 * 0.2
+    ntt = 1 * 1 + 0 * 0.2
+    nff = 0 * 1 + 0 * 0.2
+    expected = 2 * (nft + ntf) / (ntt + nff + 2 * (nft + ntf))
+    assert_almost_equal(expected, 0.2857143)
+    actual = sokalmichener([1, 0], [1, 1], w=[1, 0.2])
+    assert_almost_equal(expected, actual)
+
+    a1 = [False, False, True, True, True, False, False, True, True, True, True,
+          True, True, False, True, False, False, False, True, True]
+    a2 = [True, True, True, False, False, True, True, True, False, True,
+          True, True, True, True, False, False, False, True, True, True]
+
+    for w in [0.05, 0.1, 1.0, 20.0]:
+        assert_almost_equal(sokalmichener(a2, a1, [w]), 0.6666666666666666)
+
+
+def test_modifies_input():
+    # test whether cdist or pdist modifies input arrays
+    X1 = np.asarray([[1., 2., 3.],
+                     [1.2, 2.3, 3.4],
+                     [2.2, 2.3, 4.4],
+                     [22.2, 23.3, 44.4]])
+    X1_copy = X1.copy()
+    for metric in _METRICS_NAMES:
+        cdist(X1, X1, metric)
+        pdist(X1, metric)
+        assert_array_equal(X1, X1_copy)
+
+
+def test_Xdist_deprecated_args():
+    # testing both cdist and pdist deprecated warnings
+    X1 = np.asarray([[1., 2., 3.],
+                     [1.2, 2.3, 3.4],
+                     [2.2, 2.3, 4.4],
+                     [22.2, 23.3, 44.4]])
+    weights = np.arange(3)
+    for metric in _METRICS_NAMES:
+        with pytest.raises(TypeError):
+            cdist(X1, X1, metric, 2.)
+
+        with pytest.raises(TypeError):
+            pdist(X1, metric, 2.)
+
+        for arg in ["p", "V", "VI"]:
+            kwargs = {arg:"foo"}
+
+            if ((arg == "V" and metric == "seuclidean") or
+            (arg == "VI" and metric == "mahalanobis") or
+            (arg == "p" and metric == "minkowski")):
+                continue
+
+            with pytest.raises(TypeError):
+                cdist(X1, X1, metric, **kwargs)
+
+            with pytest.raises(TypeError):
+                pdist(X1, metric, **kwargs)
+
+
+def test_Xdist_non_negative_weights():
+    X = eo['random-float32-data'][::5, ::2]
+    w = np.ones(X.shape[1])
+    w[::5] = -w[::5]
+    for metric in _METRICS_NAMES:
+        if metric in ['seuclidean', 'mahalanobis', 'jensenshannon']:
+            continue
+        with np.testing.suppress_warnings() as sup:
+            if metric == "kulsinski":
+                sup.filter(DeprecationWarning,
+                           "Kulsinski has been deprecated from")
+            for m in [metric, eval(metric), "test_" + metric]:
+                assert_raises(ValueError, pdist, X, m, w=w)
+                assert_raises(ValueError, cdist, X, X, m, w=w)
+
+
+def test__validate_vector():
+    x = [1, 2, 3]
+    y = _validate_vector(x)
+    assert_array_equal(y, x)
+
+    y = _validate_vector(x, dtype=np.float64)
+    assert_array_equal(y, x)
+    assert_equal(y.dtype, np.float64)
+
+    x = [1]
+    y = _validate_vector(x)
+    assert_equal(y.ndim, 1)
+    assert_equal(y, x)
+
+    x = 1
+    with assert_raises(ValueError,
+                       match="Input vector should be 1-D"):
+        _validate_vector(x)
+
+    x = np.arange(5).reshape(1, -1, 1)
+    with assert_raises(ValueError,
+                       match="Input vector should be 1-D"):
+        _validate_vector(x)
+
+    x = [[1, 2], [3, 4]]
+    with assert_raises(ValueError,
+                       match="Input vector should be 1-D"):
+        _validate_vector(x)
+
+def test_yule_all_same():
+    # Test yule avoids a divide by zero when exactly equal
+    x = np.ones((2, 6), dtype=bool)
+    d = wyule(x[0], x[0])
+    assert d == 0.0
+
+    d = pdist(x, 'yule')
+    assert_equal(d, [0.0])
+
+    d = cdist(x[:1], x[:1], 'yule')
+    assert_equal(d, [[0.0]])
+
+
+def test_jensenshannon():
+    assert_almost_equal(jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0),
+                        1.0)
+    assert_almost_equal(jensenshannon([1.0, 0.0], [0.5, 0.5]),
+                        0.46450140402245893)
+    assert_almost_equal(jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0]), 0.0)
+
+    assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=0),
+                        [0.0, 0.0])
+    assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=1),
+                        [0.0649045])
+    assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=0,
+                                      keepdims=True), [[0.0, 0.0]])
+    assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=1,
+                                      keepdims=True), [[0.0649045]])
+
+    a = np.array([[1, 2, 3, 4],
+                  [5, 6, 7, 8],
+                  [9, 10, 11, 12]])
+    b = np.array([[13, 14, 15, 16],
+                  [17, 18, 19, 20],
+                  [21, 22, 23, 24]])
+
+    assert_almost_equal(jensenshannon(a, b, axis=0),
+                        [0.1954288, 0.1447697, 0.1138377, 0.0927636])
+    assert_almost_equal(jensenshannon(a, b, axis=1),
+                        [0.1402339, 0.0399106, 0.0201815])
+
+
+def test_kulsinski_deprecation():
+    msg = ("Kulsinski has been deprecated from scipy.spatial.distance"
+           " in SciPy 1.9.0 and it will be removed in SciPy 1.11.0."
+           " It is superseded by scipy.spatial.distance.kulczynski1.")
+    with pytest.warns(DeprecationWarning, match=msg):
+        kulsinski([], [])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_hausdorff.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_hausdorff.py
new file mode 100644
index 00000000..c5a5255a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_hausdorff.py
@@ -0,0 +1,172 @@
+import numpy as np
+from numpy.testing import (assert_allclose,
+                           assert_array_equal,
+                           assert_equal)
+import pytest
+from scipy.spatial.distance import directed_hausdorff
+from scipy.spatial import distance
+from scipy._lib._util import check_random_state
+
+
+class TestHausdorff:
+    # Test various properties of the directed Hausdorff code.
+
+    def setup_method(self):
+        np.random.seed(1234)
+        random_angles = np.random.random(100) * np.pi * 2
+        random_columns = np.column_stack(
+            (random_angles, random_angles, np.zeros(100)))
+        random_columns[..., 0] = np.cos(random_columns[..., 0])
+        random_columns[..., 1] = np.sin(random_columns[..., 1])
+        random_columns_2 = np.column_stack(
+            (random_angles, random_angles, np.zeros(100)))
+        random_columns_2[1:, 0] = np.cos(random_columns_2[1:, 0]) * 2.0
+        random_columns_2[1:, 1] = np.sin(random_columns_2[1:, 1]) * 2.0
+        # move one point farther out so we don't have two perfect circles
+        random_columns_2[0, 0] = np.cos(random_columns_2[0, 0]) * 3.3
+        random_columns_2[0, 1] = np.sin(random_columns_2[0, 1]) * 3.3
+        self.path_1 = random_columns
+        self.path_2 = random_columns_2
+        self.path_1_4d = np.insert(self.path_1, 3, 5, axis=1)
+        self.path_2_4d = np.insert(self.path_2, 3, 27, axis=1)
+
+    def test_symmetry(self):
+        # Ensure that the directed (asymmetric) Hausdorff distance is
+        # actually asymmetric
+
+        forward = directed_hausdorff(self.path_1, self.path_2)[0]
+        reverse = directed_hausdorff(self.path_2, self.path_1)[0]
+        assert forward != reverse
+
+    def test_brute_force_comparison_forward(self):
+        # Ensure that the algorithm for directed_hausdorff gives the
+        # same result as the simple / brute force approach in the
+        # forward direction.
+        actual = directed_hausdorff(self.path_1, self.path_2)[0]
+        # brute force over rows:
+        expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
+                               axis=1))
+        assert_allclose(actual, expected)
+
+    def test_brute_force_comparison_reverse(self):
+        # Ensure that the algorithm for directed_hausdorff gives the
+        # same result as the simple / brute force approach in the
+        # reverse direction.
+        actual = directed_hausdorff(self.path_2, self.path_1)[0]
+        # brute force over columns:
+        expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
+                               axis=0))
+        assert_allclose(actual, expected)
+
+    def test_degenerate_case(self):
+        # The directed Hausdorff distance must be zero if both input
+        # data arrays match.
+        actual = directed_hausdorff(self.path_1, self.path_1)[0]
+        assert_allclose(actual, 0.0)
+
+    def test_2d_data_forward(self):
+        # Ensure that 2D data is handled properly for a simple case
+        # relative to brute force approach.
+        actual = directed_hausdorff(self.path_1[..., :2],
+                                    self.path_2[..., :2])[0]
+        expected = max(np.amin(distance.cdist(self.path_1[..., :2],
+                                              self.path_2[..., :2]),
+                               axis=1))
+        assert_allclose(actual, expected)
+
+    def test_4d_data_reverse(self):
+        # Ensure that 4D data is handled properly for a simple case
+        # relative to brute force approach.
+        actual = directed_hausdorff(self.path_2_4d, self.path_1_4d)[0]
+        # brute force over columns:
+        expected = max(np.amin(distance.cdist(self.path_1_4d, self.path_2_4d),
+                               axis=0))
+        assert_allclose(actual, expected)
+
+    def test_indices(self):
+        # Ensure that correct point indices are returned -- they should
+        # correspond to the Hausdorff pair
+        path_simple_1 = np.array([[-1,-12],[0,0], [1,1], [3,7], [1,2]])
+        path_simple_2 = np.array([[0,0], [1,1], [4,100], [10,9]])
+        actual = directed_hausdorff(path_simple_2, path_simple_1)[1:]
+        expected = (2, 3)
+        assert_array_equal(actual, expected)
+
+    def test_random_state(self):
+        # ensure that the global random state is not modified because
+        # the directed Hausdorff algorithm uses randomization
+        rs = check_random_state(None)
+        old_global_state = rs.get_state()
+        directed_hausdorff(self.path_1, self.path_2)
+        rs2 = check_random_state(None)
+        new_global_state = rs2.get_state()
+        assert_equal(new_global_state, old_global_state)
+
+    @pytest.mark.parametrize("seed", [None, 27870671])
+    def test_random_state_None_int(self, seed):
+        # check that seed values of None or int do not alter global
+        # random state
+        rs = check_random_state(None)
+        old_global_state = rs.get_state()
+        directed_hausdorff(self.path_1, self.path_2, seed)
+        rs2 = check_random_state(None)
+        new_global_state = rs2.get_state()
+        assert_equal(new_global_state, old_global_state)
+
+    def test_invalid_dimensions(self):
+        # Ensure that a ValueError is raised when the number of columns
+        # is not the same
+        rng = np.random.default_rng(189048172503940875434364128139223470523)
+        A = rng.random((3, 2))
+        B = rng.random((3, 5))
+        msg = r"need to have the same number of columns"
+        with pytest.raises(ValueError, match=msg):
+            directed_hausdorff(A, B)
+
+    @pytest.mark.parametrize("A, B, seed, expected", [
+        # the two cases from gh-11332
+        ([(0,0)],
+         [(0,1), (0,0)],
+         0,
+         (0.0, 0, 1)),
+        ([(0,0)],
+         [(0,1), (0,0)],
+         1,
+         (0.0, 0, 1)),
+        # slightly more complex case
+        ([(-5, 3), (0,0)],
+         [(0,1), (0,0), (-5, 3)],
+         77098,
+         # the maximum minimum distance will
+         # be the last one found, but a unique
+         # solution is not guaranteed more broadly
+         (0.0, 1, 1)),
+    ])
+    def test_subsets(self, A, B, seed, expected):
+        # verify fix for gh-11332
+        actual = directed_hausdorff(u=A, v=B, seed=seed)
+        # check distance
+        assert_allclose(actual[0], expected[0])
+        # check indices
+        assert actual[1:] == expected[1:]
+
+
+@pytest.mark.xslow
+def test_massive_arr_overflow():
+    # on 64-bit systems we should be able to
+    # handle arrays that exceed the indexing
+    # size of a 32-bit signed integer
+    try:
+        import psutil
+    except ModuleNotFoundError:
+        pytest.skip("psutil required to check available memory")
+    if psutil.virtual_memory().available < 80*2**30:
+        # Don't run the test if there is less than 80 gig of RAM available.
+        pytest.skip('insufficient memory available to run this test')
+    size = int(3e9)
+    arr1 = np.zeros(shape=(size, 2))
+    arr2 = np.zeros(shape=(3, 2))
+    arr1[size - 1] = [5, 5]
+    actual = directed_hausdorff(u=arr1, v=arr2)
+    assert_allclose(actual[0], 7.0710678118654755)
+    assert_allclose(actual[1], size - 1)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_kdtree.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_kdtree.py
new file mode 100644
index 00000000..d7e1f82f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_kdtree.py
@@ -0,0 +1,1470 @@
+# Copyright Anne M. Archibald 2008
+# Released under the scipy license
+
+import os
+from numpy.testing import (assert_equal, assert_array_equal, assert_,
+                           assert_almost_equal, assert_array_almost_equal,
+                           assert_allclose)
+from pytest import raises as assert_raises
+import pytest
+from platform import python_implementation
+import numpy as np
+from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
+from scipy.spatial._ckdtree import cKDTreeNode
+from scipy.spatial import minkowski_distance
+
+import itertools
+
+@pytest.fixture(params=[KDTree, cKDTree])
+def kdtree_type(request):
+    return request.param
+
+
+def KDTreeTest(kls):
+    """Class decorator to create test cases for KDTree and cKDTree
+
+    Tests use the class variable ``kdtree_type`` as the tree constructor.
+    """
+    if not kls.__name__.startswith('_Test'):
+        raise RuntimeError("Expected a class name starting with _Test")
+
+    for tree in (KDTree, cKDTree):
+        test_name = kls.__name__[1:] + '_' + tree.__name__
+
+        if test_name in globals():
+            raise RuntimeError("Duplicated test name: " + test_name)
+
+        # Create a new sub-class with kdtree_type defined
+        test_case = type(test_name, (kls,), {'kdtree_type': tree})
+        globals()[test_name] = test_case
+    return kls
+
+
+def distance_box(a, b, p, boxsize):
+    diff = a - b
+    diff[diff > 0.5 * boxsize] -= boxsize
+    diff[diff < -0.5 * boxsize] += boxsize
+    d = minkowski_distance(diff, 0, p)
+    return d
+
+class ConsistencyTests:
+    def distance(self, a, b, p):
+        return minkowski_distance(a, b, p)
+
+    def test_nearest(self):
+        x = self.x
+        d, i = self.kdtree.query(x, 1)
+        assert_almost_equal(d**2, np.sum((x-self.data[i])**2))
+        eps = 1e-8
+        assert_(np.all(np.sum((self.data-x[np.newaxis, :])**2, axis=1) > d**2-eps))
+
+    def test_m_nearest(self):
+        x = self.x
+        m = self.m
+        dd, ii = self.kdtree.query(x, m)
+        d = np.amax(dd)
+        i = ii[np.argmax(dd)]
+        assert_almost_equal(d**2, np.sum((x-self.data[i])**2))
+        eps = 1e-8
+        assert_equal(np.sum(np.sum((self.data-x[np.newaxis, :])**2, axis=1) < d**2+eps), m)
+
+    def test_points_near(self):
+        x = self.x
+        d = self.d
+        dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
+        eps = 1e-8
+        hits = 0
+        for near_d, near_i in zip(dd, ii):
+            if near_d == np.inf:
+                continue
+            hits += 1
+            assert_almost_equal(near_d**2, np.sum((x-self.data[near_i])**2))
+            assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d, d))
+        assert_equal(np.sum(self.distance(self.data, x, 2) < d**2+eps), hits)
+
+    def test_points_near_l1(self):
+        x = self.x
+        d = self.d
+        dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
+        eps = 1e-8
+        hits = 0
+        for near_d, near_i in zip(dd, ii):
+            if near_d == np.inf:
+                continue
+            hits += 1
+            assert_almost_equal(near_d, self.distance(x, self.data[near_i], 1))
+            assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d, d))
+        assert_equal(np.sum(self.distance(self.data, x, 1) < d+eps), hits)
+
+    def test_points_near_linf(self):
+        x = self.x
+        d = self.d
+        dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
+        eps = 1e-8
+        hits = 0
+        for near_d, near_i in zip(dd, ii):
+            if near_d == np.inf:
+                continue
+            hits += 1
+            assert_almost_equal(near_d, self.distance(x, self.data[near_i], np.inf))
+            assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d, d))
+        assert_equal(np.sum(self.distance(self.data, x, np.inf) < d+eps), hits)
+
+    def test_approx(self):
+        x = self.x
+        k = self.k
+        eps = 0.1
+        d_real, i_real = self.kdtree.query(x, k)
+        d, i = self.kdtree.query(x, k, eps=eps)
+        assert_(np.all(d <= d_real*(1+eps)))
+
+
+@KDTreeTest
+class _Test_random(ConsistencyTests):
+    def setup_method(self):
+        self.n = 100
+        self.m = 4
+        np.random.seed(1234)
+        self.data = np.random.randn(self.n, self.m)
+        self.kdtree = self.kdtree_type(self.data, leafsize=2)
+        self.x = np.random.randn(self.m)
+        self.d = 0.2
+        self.k = 10
+
+
+@KDTreeTest
+class _Test_random_far(_Test_random):
+    def setup_method(self):
+        super().setup_method()
+        self.x = np.random.randn(self.m)+10
+
+
+@KDTreeTest
+class _Test_small(ConsistencyTests):
+    def setup_method(self):
+        self.data = np.array([[0, 0, 0],
+                              [0, 0, 1],
+                              [0, 1, 0],
+                              [0, 1, 1],
+                              [1, 0, 0],
+                              [1, 0, 1],
+                              [1, 1, 0],
+                              [1, 1, 1]])
+        self.kdtree = self.kdtree_type(self.data)
+        self.n = self.kdtree.n
+        self.m = self.kdtree.m
+        np.random.seed(1234)
+        self.x = np.random.randn(3)
+        self.d = 0.5
+        self.k = 4
+
+    def test_nearest(self):
+        assert_array_equal(
+                self.kdtree.query((0, 0, 0.1), 1),
+                (0.1, 0))
+
+    def test_nearest_two(self):
+        assert_array_equal(
+                self.kdtree.query((0, 0, 0.1), 2),
+                ([0.1, 0.9], [0, 1]))
+
+
+@KDTreeTest
+class _Test_small_nonleaf(_Test_small):
+    def setup_method(self):
+        super().setup_method()
+        self.kdtree = self.kdtree_type(self.data, leafsize=1)
+
+
+class Test_vectorization_KDTree:
+    def setup_method(self):
+        self.data = np.array([[0, 0, 0],
+                              [0, 0, 1],
+                              [0, 1, 0],
+                              [0, 1, 1],
+                              [1, 0, 0],
+                              [1, 0, 1],
+                              [1, 1, 0],
+                              [1, 1, 1]])
+        self.kdtree = KDTree(self.data)
+
+    def test_single_query(self):
+        d, i = self.kdtree.query(np.array([0, 0, 0]))
+        assert_(isinstance(d, float))
+        assert_(np.issubdtype(i, np.signedinteger))
+
+    def test_vectorized_query(self):
+        d, i = self.kdtree.query(np.zeros((2, 4, 3)))
+        assert_equal(np.shape(d), (2, 4))
+        assert_equal(np.shape(i), (2, 4))
+
+    def test_single_query_multiple_neighbors(self):
+        s = 23
+        kk = self.kdtree.n+s
+        d, i = self.kdtree.query(np.array([0, 0, 0]), k=kk)
+        assert_equal(np.shape(d), (kk,))
+        assert_equal(np.shape(i), (kk,))
+        assert_(np.all(~np.isfinite(d[-s:])))
+        assert_(np.all(i[-s:] == self.kdtree.n))
+
+    def test_vectorized_query_multiple_neighbors(self):
+        s = 23
+        kk = self.kdtree.n+s
+        d, i = self.kdtree.query(np.zeros((2, 4, 3)), k=kk)
+        assert_equal(np.shape(d), (2, 4, kk))
+        assert_equal(np.shape(i), (2, 4, kk))
+        assert_(np.all(~np.isfinite(d[:, :, -s:])))
+        assert_(np.all(i[:, :, -s:] == self.kdtree.n))
+
+    def test_query_raises_for_k_none(self):
+        x = 1.0
+        with pytest.raises(ValueError, match="k must be an integer or*"):
+            self.kdtree.query(x, k=None)
+
+class Test_vectorization_cKDTree:
+    def setup_method(self):
+        self.data = np.array([[0, 0, 0],
+                              [0, 0, 1],
+                              [0, 1, 0],
+                              [0, 1, 1],
+                              [1, 0, 0],
+                              [1, 0, 1],
+                              [1, 1, 0],
+                              [1, 1, 1]])
+        self.kdtree = cKDTree(self.data)
+
+    def test_single_query(self):
+        d, i = self.kdtree.query([0, 0, 0])
+        assert_(isinstance(d, float))
+        assert_(isinstance(i, int))
+
+    def test_vectorized_query(self):
+        d, i = self.kdtree.query(np.zeros((2, 4, 3)))
+        assert_equal(np.shape(d), (2, 4))
+        assert_equal(np.shape(i), (2, 4))
+
+    def test_vectorized_query_noncontiguous_values(self):
+        np.random.seed(1234)
+        qs = np.random.randn(3, 1000).T
+        ds, i_s = self.kdtree.query(qs)
+        for q, d, i in zip(qs, ds, i_s):
+            assert_equal(self.kdtree.query(q), (d, i))
+
+    def test_single_query_multiple_neighbors(self):
+        s = 23
+        kk = self.kdtree.n+s
+        d, i = self.kdtree.query([0, 0, 0], k=kk)
+        assert_equal(np.shape(d), (kk,))
+        assert_equal(np.shape(i), (kk,))
+        assert_(np.all(~np.isfinite(d[-s:])))
+        assert_(np.all(i[-s:] == self.kdtree.n))
+
+    def test_vectorized_query_multiple_neighbors(self):
+        s = 23
+        kk = self.kdtree.n+s
+        d, i = self.kdtree.query(np.zeros((2, 4, 3)), k=kk)
+        assert_equal(np.shape(d), (2, 4, kk))
+        assert_equal(np.shape(i), (2, 4, kk))
+        assert_(np.all(~np.isfinite(d[:, :, -s:])))
+        assert_(np.all(i[:, :, -s:] == self.kdtree.n))
+
+class ball_consistency:
+    tol = 0.0
+
+    def distance(self, a, b, p):
+        return minkowski_distance(a * 1.0, b * 1.0, p)
+
+    def test_in_ball(self):
+        x = np.atleast_2d(self.x)
+        d = np.broadcast_to(self.d, x.shape[:-1])
+        l = self.T.query_ball_point(x, self.d, p=self.p, eps=self.eps)
+        for i, ind in enumerate(l):
+            dist = self.distance(self.data[ind], x[i], self.p) - d[i]*(1.+self.eps)
+            norm = self.distance(self.data[ind], x[i], self.p) + d[i]*(1.+self.eps)
+            assert_array_equal(dist < self.tol * norm, True)
+
+    def test_found_all(self):
+        x = np.atleast_2d(self.x)
+        d = np.broadcast_to(self.d, x.shape[:-1])
+        l = self.T.query_ball_point(x, self.d, p=self.p, eps=self.eps)
+        for i, ind in enumerate(l):
+            c = np.ones(self.T.n, dtype=bool)
+            c[ind] = False
+            dist = self.distance(self.data[c], x[i], self.p) - d[i]/(1.+self.eps)
+            norm = self.distance(self.data[c], x[i], self.p) + d[i]/(1.+self.eps)
+            assert_array_equal(dist > -self.tol * norm, True)
+
+@KDTreeTest
+class _Test_random_ball(ball_consistency):
+    def setup_method(self):
+        n = 100
+        m = 4
+        np.random.seed(1234)
+        self.data = np.random.randn(n, m)
+        self.T = self.kdtree_type(self.data, leafsize=2)
+        self.x = np.random.randn(m)
+        self.p = 2.
+        self.eps = 0
+        self.d = 0.2
+
+
+@KDTreeTest
+class _Test_random_ball_periodic(ball_consistency):
+    def distance(self, a, b, p):
+        return distance_box(a, b, p, 1.0)
+
+    def setup_method(self):
+        n = 10000
+        m = 4
+        np.random.seed(1234)
+        self.data = np.random.uniform(size=(n, m))
+        self.T = self.kdtree_type(self.data, leafsize=2, boxsize=1)
+        self.x = np.full(m, 0.1)
+        self.p = 2.
+        self.eps = 0
+        self.d = 0.2
+
+    def test_in_ball_outside(self):
+        l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
+        for i in l:
+            assert_(self.distance(self.data[i], self.x, self.p) <= self.d*(1.+self.eps))
+        l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
+        for i in l:
+            assert_(self.distance(self.data[i], self.x, self.p) <= self.d*(1.+self.eps))
+
+    def test_found_all_outside(self):
+        c = np.ones(self.T.n, dtype=bool)
+        l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
+        c[l] = False
+        assert_(np.all(self.distance(self.data[c], self.x, self.p) >= self.d/(1.+self.eps)))
+
+        l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
+        c[l] = False
+        assert_(np.all(self.distance(self.data[c], self.x, self.p) >= self.d/(1.+self.eps)))
+
+
+@KDTreeTest
+class _Test_random_ball_largep_issue9890(ball_consistency):
+
+    # allow some roundoff errors due to numerical issues
+    tol = 1e-13
+
+    def setup_method(self):
+        n = 1000
+        m = 2
+        np.random.seed(123)
+        self.data = np.random.randint(100, 1000, size=(n, m))
+        self.T = self.kdtree_type(self.data)
+        self.x = self.data
+        self.p = 100
+        self.eps = 0
+        self.d = 10
+
+
+@KDTreeTest
+class _Test_random_ball_approx(_Test_random_ball):
+
+    def setup_method(self):
+        super().setup_method()
+        self.eps = 0.1
+
+
+@KDTreeTest
+class _Test_random_ball_approx_periodic(_Test_random_ball):
+
+    def setup_method(self):
+        super().setup_method()
+        self.eps = 0.1
+
+
+@KDTreeTest
+class _Test_random_ball_far(_Test_random_ball):
+
+    def setup_method(self):
+        super().setup_method()
+        self.d = 2.
+
+@KDTreeTest
+class _Test_random_ball_far_periodic(_Test_random_ball_periodic):
+
+    def setup_method(self):
+        super().setup_method()
+        self.d = 2.
+
+
+@KDTreeTest
+class _Test_random_ball_l1(_Test_random_ball):
+
+    def setup_method(self):
+        super().setup_method()
+        self.p = 1
+
+
+@KDTreeTest
+class _Test_random_ball_linf(_Test_random_ball):
+
+    def setup_method(self):
+        super().setup_method()
+        self.p = np.inf
+
+
+def test_random_ball_vectorized(kdtree_type):
+    n = 20
+    m = 5
+    np.random.seed(1234)
+    T = kdtree_type(np.random.randn(n, m))
+
+    r = T.query_ball_point(np.random.randn(2, 3, m), 1)
+    assert_equal(r.shape, (2, 3))
+    assert_(isinstance(r[0, 0], list))
+
+
+def test_query_ball_point_multithreading(kdtree_type):
+    np.random.seed(0)
+    n = 5000
+    k = 2
+    points = np.random.randn(n, k)
+    T = kdtree_type(points)
+    l1 = T.query_ball_point(points, 0.003, workers=1)
+    l2 = T.query_ball_point(points, 0.003, workers=64)
+    l3 = T.query_ball_point(points, 0.003, workers=-1)
+
+    for i in range(n):
+        if l1[i] or l2[i]:
+            assert_array_equal(l1[i], l2[i])
+
+    for i in range(n):
+        if l1[i] or l3[i]:
+            assert_array_equal(l1[i], l3[i])
+
+
+class two_trees_consistency:
+
+    def distance(self, a, b, p):
+        return minkowski_distance(a, b, p)
+
+    def test_all_in_ball(self):
+        r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
+        for i, l in enumerate(r):
+            for j in l:
+                assert_(self.distance(self.data1[i], self.data2[j], self.p) <= self.d*(1.+self.eps))
+
+    def test_found_all(self):
+        r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
+        for i, l in enumerate(r):
+            c = np.ones(self.T2.n, dtype=bool)
+            c[l] = False
+            assert_(np.all(self.distance(self.data2[c], self.data1[i], self.p) >= self.d/(1.+self.eps)))
+
+
+@KDTreeTest
+class _Test_two_random_trees(two_trees_consistency):
+
+    def setup_method(self):
+        n = 50
+        m = 4
+        np.random.seed(1234)
+        self.data1 = np.random.randn(n, m)
+        self.T1 = self.kdtree_type(self.data1, leafsize=2)
+        self.data2 = np.random.randn(n, m)
+        self.T2 = self.kdtree_type(self.data2, leafsize=2)
+        self.p = 2.
+        self.eps = 0
+        self.d = 0.2
+
+
+@KDTreeTest
+class _Test_two_random_trees_periodic(two_trees_consistency):
+    def distance(self, a, b, p):
+        return distance_box(a, b, p, 1.0)
+
+    def setup_method(self):
+        n = 50
+        m = 4
+        np.random.seed(1234)
+        self.data1 = np.random.uniform(size=(n, m))
+        self.T1 = self.kdtree_type(self.data1, leafsize=2, boxsize=1.0)
+        self.data2 = np.random.uniform(size=(n, m))
+        self.T2 = self.kdtree_type(self.data2, leafsize=2, boxsize=1.0)
+        self.p = 2.
+        self.eps = 0
+        self.d = 0.2
+
+
+@KDTreeTest
+class _Test_two_random_trees_far(_Test_two_random_trees):
+
+    def setup_method(self):
+        super().setup_method()
+        self.d = 2
+
+
+@KDTreeTest
+class _Test_two_random_trees_far_periodic(_Test_two_random_trees_periodic):
+
+    def setup_method(self):
+        super().setup_method()
+        self.d = 2
+
+
+@KDTreeTest
+class _Test_two_random_trees_linf(_Test_two_random_trees):
+
+    def setup_method(self):
+        super().setup_method()
+        self.p = np.inf
+
+
+@KDTreeTest
+class _Test_two_random_trees_linf_periodic(_Test_two_random_trees_periodic):
+
+    def setup_method(self):
+        super().setup_method()
+        self.p = np.inf
+
+
+class Test_rectangle:
+
+    def setup_method(self):
+        self.rect = Rectangle([0, 0], [1, 1])
+
+    def test_min_inside(self):
+        assert_almost_equal(self.rect.min_distance_point([0.5, 0.5]), 0)
+
+    def test_min_one_side(self):
+        assert_almost_equal(self.rect.min_distance_point([0.5, 1.5]), 0.5)
+
+    def test_min_two_sides(self):
+        assert_almost_equal(self.rect.min_distance_point([2, 2]), np.sqrt(2))
+
+    def test_max_inside(self):
+        assert_almost_equal(self.rect.max_distance_point([0.5, 0.5]), 1/np.sqrt(2))
+
+    def test_max_one_side(self):
+        assert_almost_equal(self.rect.max_distance_point([0.5, 1.5]), np.hypot(0.5, 1.5))
+
+    def test_max_two_sides(self):
+        assert_almost_equal(self.rect.max_distance_point([2, 2]), 2*np.sqrt(2))
+
+    def test_split(self):
+        less, greater = self.rect.split(0, 0.1)
+        assert_array_equal(less.maxes, [0.1, 1])
+        assert_array_equal(less.mins, [0, 0])
+        assert_array_equal(greater.maxes, [1, 1])
+        assert_array_equal(greater.mins, [0.1, 0])
+
+
+def test_distance_l2():
+    assert_almost_equal(minkowski_distance([0, 0], [1, 1], 2), np.sqrt(2))
+
+
+def test_distance_l1():
+    assert_almost_equal(minkowski_distance([0, 0], [1, 1], 1), 2)
+
+
+def test_distance_linf():
+    assert_almost_equal(minkowski_distance([0, 0], [1, 1], np.inf), 1)
+
+
+def test_distance_vectorization():
+    np.random.seed(1234)
+    x = np.random.randn(10, 1, 3)
+    y = np.random.randn(1, 7, 3)
+    assert_equal(minkowski_distance(x, y).shape, (10, 7))
+
+
+class count_neighbors_consistency:
+    def test_one_radius(self):
+        r = 0.2
+        assert_equal(self.T1.count_neighbors(self.T2, r),
+                np.sum([len(l) for l in self.T1.query_ball_tree(self.T2, r)]))
+
+    def test_large_radius(self):
+        r = 1000
+        assert_equal(self.T1.count_neighbors(self.T2, r),
+                np.sum([len(l) for l in self.T1.query_ball_tree(self.T2, r)]))
+
+    def test_multiple_radius(self):
+        rs = np.exp(np.linspace(np.log(0.01), np.log(10), 3))
+        results = self.T1.count_neighbors(self.T2, rs)
+        assert_(np.all(np.diff(results) >= 0))
+        for r, result in zip(rs, results):
+            assert_equal(self.T1.count_neighbors(self.T2, r), result)
+
+@KDTreeTest
+class _Test_count_neighbors(count_neighbors_consistency):
+    def setup_method(self):
+        n = 50
+        m = 2
+        np.random.seed(1234)
+        self.T1 = self.kdtree_type(np.random.randn(n, m), leafsize=2)
+        self.T2 = self.kdtree_type(np.random.randn(n, m), leafsize=2)
+
+
+class sparse_distance_matrix_consistency:
+
+    def distance(self, a, b, p):
+        return minkowski_distance(a, b, p)
+
+    def test_consistency_with_neighbors(self):
+        M = self.T1.sparse_distance_matrix(self.T2, self.r)
+        r = self.T1.query_ball_tree(self.T2, self.r)
+        for i, l in enumerate(r):
+            for j in l:
+                assert_almost_equal(M[i, j],
+                                    self.distance(self.T1.data[i], self.T2.data[j], self.p),
+                                    decimal=14)
+        for ((i, j), d) in M.items():
+            assert_(j in r[i])
+
+    def test_zero_distance(self):
+        # raises an exception for bug 870 (FIXME: Does it?)
+        self.T1.sparse_distance_matrix(self.T1, self.r)
+
+    def test_consistency(self):
+        # Test consistency with a distance_matrix
+        M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
+        expected = distance_matrix(self.T1.data, self.T2.data)
+        expected[expected > self.r] = 0
+        assert_array_almost_equal(M1.toarray(), expected, decimal=14)
+
+    def test_against_logic_error_regression(self):
+        # regression test for gh-5077 logic error
+        np.random.seed(0)
+        too_many = np.array(np.random.randn(18, 2), dtype=int)
+        tree = self.kdtree_type(
+            too_many, balanced_tree=False, compact_nodes=False)
+        d = tree.sparse_distance_matrix(tree, 3).toarray()
+        assert_array_almost_equal(d, d.T, decimal=14)
+
+    def test_ckdtree_return_types(self):
+        # brute-force reference
+        ref = np.zeros((self.n, self.n))
+        for i in range(self.n):
+            for j in range(self.n):
+                v = self.data1[i, :] - self.data2[j, :]
+                ref[i, j] = np.dot(v, v)
+        ref = np.sqrt(ref)
+        ref[ref > self.r] = 0.
+        # test return type 'dict'
+        dist = np.zeros((self.n, self.n))
+        r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dict')
+        for i, j in r.keys():
+            dist[i, j] = r[(i, j)]
+        assert_array_almost_equal(ref, dist, decimal=14)
+        # test return type 'ndarray'
+        dist = np.zeros((self.n, self.n))
+        r = self.T1.sparse_distance_matrix(self.T2, self.r,
+            output_type='ndarray')
+        for k in range(r.shape[0]):
+            i = r['i'][k]
+            j = r['j'][k]
+            v = r['v'][k]
+            dist[i, j] = v
+        assert_array_almost_equal(ref, dist, decimal=14)
+        # test return type 'dok_matrix'
+        r = self.T1.sparse_distance_matrix(self.T2, self.r,
+            output_type='dok_matrix')
+        assert_array_almost_equal(ref, r.toarray(), decimal=14)
+        # test return type 'coo_matrix'
+        r = self.T1.sparse_distance_matrix(self.T2, self.r,
+            output_type='coo_matrix')
+        assert_array_almost_equal(ref, r.toarray(), decimal=14)
+
+
+@KDTreeTest
+class _Test_sparse_distance_matrix(sparse_distance_matrix_consistency):
+    def setup_method(self):
+        n = 50
+        m = 4
+        np.random.seed(1234)
+        data1 = np.random.randn(n, m)
+        data2 = np.random.randn(n, m)
+        self.T1 = self.kdtree_type(data1, leafsize=2)
+        self.T2 = self.kdtree_type(data2, leafsize=2)
+        self.r = 0.5
+        self.p = 2
+        self.data1 = data1
+        self.data2 = data2
+        self.n = n
+        self.m = m
+
+
+def test_distance_matrix():
+    m = 10
+    n = 11
+    k = 4
+    np.random.seed(1234)
+    xs = np.random.randn(m, k)
+    ys = np.random.randn(n, k)
+    ds = distance_matrix(xs, ys)
+    assert_equal(ds.shape, (m, n))
+    for i in range(m):
+        for j in range(n):
+            assert_almost_equal(minkowski_distance(xs[i], ys[j]), ds[i, j])
+
+
+def test_distance_matrix_looping():
+    m = 10
+    n = 11
+    k = 4
+    np.random.seed(1234)
+    xs = np.random.randn(m, k)
+    ys = np.random.randn(n, k)
+    ds = distance_matrix(xs, ys)
+    dsl = distance_matrix(xs, ys, threshold=1)
+    assert_equal(ds, dsl)
+
+
+def check_onetree_query(T, d):
+    r = T.query_ball_tree(T, d)
+    s = set()
+    for i, l in enumerate(r):
+        for j in l:
+            if i < j:
+                s.add((i, j))
+
+    assert_(s == T.query_pairs(d))
+
+def test_onetree_query(kdtree_type):
+    np.random.seed(0)
+    n = 50
+    k = 4
+    points = np.random.randn(n, k)
+    T = kdtree_type(points)
+    check_onetree_query(T, 0.1)
+
+    points = np.random.randn(3*n, k)
+    points[:n] *= 0.001
+    points[n:2*n] += 2
+    T = kdtree_type(points)
+    check_onetree_query(T, 0.1)
+    check_onetree_query(T, 0.001)
+    check_onetree_query(T, 0.00001)
+    check_onetree_query(T, 1e-6)
+
+
+def test_query_pairs_single_node(kdtree_type):
+    tree = kdtree_type([[0, 1]])
+    assert_equal(tree.query_pairs(0.5), set())
+
+
+def test_kdtree_query_pairs(kdtree_type):
+    np.random.seed(0)
+    n = 50
+    k = 2
+    r = 0.1
+    r2 = r**2
+    points = np.random.randn(n, k)
+    T = kdtree_type(points)
+    # brute force reference
+    brute = set()
+    for i in range(n):
+        for j in range(i+1, n):
+            v = points[i, :] - points[j, :]
+            if np.dot(v, v) <= r2:
+                brute.add((i, j))
+    l0 = sorted(brute)
+    # test default return type
+    s = T.query_pairs(r)
+    l1 = sorted(s)
+    assert_array_equal(l0, l1)
+    # test return type 'set'
+    s = T.query_pairs(r, output_type='set')
+    l1 = sorted(s)
+    assert_array_equal(l0, l1)
+    # test return type 'ndarray'
+    s = set()
+    arr = T.query_pairs(r, output_type='ndarray')
+    for i in range(arr.shape[0]):
+        s.add((int(arr[i, 0]), int(arr[i, 1])))
+    l2 = sorted(s)
+    assert_array_equal(l0, l2)
+
+
+def test_query_pairs_eps(kdtree_type):
+    spacing = np.sqrt(2)
+    # irrational spacing to have potential rounding errors
+    x_range = np.linspace(0, 3 * spacing, 4)
+    y_range = np.linspace(0, 3 * spacing, 4)
+    xy_array = [(xi, yi) for xi in x_range for yi in y_range]
+    tree = kdtree_type(xy_array)
+    pairs_eps = tree.query_pairs(r=spacing, eps=.1)
+    # result: 24 with eps, 16 without due to rounding
+    pairs = tree.query_pairs(r=spacing * 1.01)
+    # result: 24
+    assert_equal(pairs, pairs_eps)
+
+
+def test_ball_point_ints(kdtree_type):
+    # Regression test for #1373.
+    x, y = np.mgrid[0:4, 0:4]
+    points = list(zip(x.ravel(), y.ravel()))
+    tree = kdtree_type(points)
+    assert_equal(sorted([4, 8, 9, 12]),
+                 sorted(tree.query_ball_point((2, 0), 1)))
+    points = np.asarray(points, dtype=float)
+    tree = kdtree_type(points)
+    assert_equal(sorted([4, 8, 9, 12]),
+                 sorted(tree.query_ball_point((2, 0), 1)))
+
+
+def test_kdtree_comparisons():
+    # Regression test: node comparisons were done wrong in 0.12 w/Py3.
+    nodes = [KDTree.node() for _ in range(3)]
+    assert_equal(sorted(nodes), sorted(nodes[::-1]))
+
+
+def test_kdtree_build_modes(kdtree_type):
+    # check if different build modes for KDTree give similar query results
+    np.random.seed(0)
+    n = 5000
+    k = 4
+    points = np.random.randn(n, k)
+    T1 = kdtree_type(points).query(points, k=5)[-1]
+    T2 = kdtree_type(points, compact_nodes=False).query(points, k=5)[-1]
+    T3 = kdtree_type(points, balanced_tree=False).query(points, k=5)[-1]
+    T4 = kdtree_type(points, compact_nodes=False,
+                     balanced_tree=False).query(points, k=5)[-1]
+    assert_array_equal(T1, T2)
+    assert_array_equal(T1, T3)
+    assert_array_equal(T1, T4)
+
+def test_kdtree_pickle(kdtree_type):
+    # test if it is possible to pickle a KDTree
+    import pickle
+    np.random.seed(0)
+    n = 50
+    k = 4
+    points = np.random.randn(n, k)
+    T1 = kdtree_type(points)
+    tmp = pickle.dumps(T1)
+    T2 = pickle.loads(tmp)
+    T1 = T1.query(points, k=5)[-1]
+    T2 = T2.query(points, k=5)[-1]
+    assert_array_equal(T1, T2)
+
+def test_kdtree_pickle_boxsize(kdtree_type):
+    # test if it is possible to pickle a periodic KDTree
+    import pickle
+    np.random.seed(0)
+    n = 50
+    k = 4
+    points = np.random.uniform(size=(n, k))
+    T1 = kdtree_type(points, boxsize=1.0)
+    tmp = pickle.dumps(T1)
+    T2 = pickle.loads(tmp)
+    T1 = T1.query(points, k=5)[-1]
+    T2 = T2.query(points, k=5)[-1]
+    assert_array_equal(T1, T2)
+
+def test_kdtree_copy_data(kdtree_type):
+    # check if copy_data=True makes the kd-tree
+    # impervious to data corruption by modification of
+    # the data arrray
+    np.random.seed(0)
+    n = 5000
+    k = 4
+    points = np.random.randn(n, k)
+    T = kdtree_type(points, copy_data=True)
+    q = points.copy()
+    T1 = T.query(q, k=5)[-1]
+    points[...] = np.random.randn(n, k)
+    T2 = T.query(q, k=5)[-1]
+    assert_array_equal(T1, T2)
+
+def test_ckdtree_parallel(kdtree_type, monkeypatch):
+    # check if parallel=True also generates correct query results
+    np.random.seed(0)
+    n = 5000
+    k = 4
+    points = np.random.randn(n, k)
+    T = kdtree_type(points)
+    T1 = T.query(points, k=5, workers=64)[-1]
+    T2 = T.query(points, k=5, workers=-1)[-1]
+    T3 = T.query(points, k=5)[-1]
+    assert_array_equal(T1, T2)
+    assert_array_equal(T1, T3)
+
+    monkeypatch.setattr(os, 'cpu_count', lambda: None)
+    with pytest.raises(NotImplementedError, match="Cannot determine the"):
+        T.query(points, 1, workers=-1)
+
+
+def test_ckdtree_view():
+    # Check that the nodes can be correctly viewed from Python.
+    # This test also sanity checks each node in the cKDTree, and
+    # thus verifies the internal structure of the kd-tree.
+    np.random.seed(0)
+    n = 100
+    k = 4
+    points = np.random.randn(n, k)
+    kdtree = cKDTree(points)
+
+    # walk the whole kd-tree and sanity check each node
+    def recurse_tree(n):
+        assert_(isinstance(n, cKDTreeNode))
+        if n.split_dim == -1:
+            assert_(n.lesser is None)
+            assert_(n.greater is None)
+            assert_(n.indices.shape[0] <= kdtree.leafsize)
+        else:
+            recurse_tree(n.lesser)
+            recurse_tree(n.greater)
+            x = n.lesser.data_points[:, n.split_dim]
+            y = n.greater.data_points[:, n.split_dim]
+            assert_(x.max() < y.min())
+
+    recurse_tree(kdtree.tree)
+    # check that indices are correctly retrieved
+    n = kdtree.tree
+    assert_array_equal(np.sort(n.indices), range(100))
+    # check that data_points are correctly retrieved
+    assert_array_equal(kdtree.data[n.indices, :], n.data_points)
+
+# KDTree is specialized to type double points, so no need to make
+# a unit test corresponding to test_ball_point_ints()
+
+def test_kdtree_list_k(kdtree_type):
+    # check kdtree periodic boundary
+    n = 200
+    m = 2
+    klist = [1, 2, 3]
+    kint = 3
+
+    np.random.seed(1234)
+    data = np.random.uniform(size=(n, m))
+    kdtree = kdtree_type(data, leafsize=1)
+
+    # check agreement between arange(1, k+1) and k
+    dd, ii = kdtree.query(data, klist)
+    dd1, ii1 = kdtree.query(data, kint)
+    assert_equal(dd, dd1)
+    assert_equal(ii, ii1)
+
+    # now check skipping one element
+    klist = np.array([1, 3])
+    kint = 3
+    dd, ii = kdtree.query(data, kint)
+    dd1, ii1 = kdtree.query(data, klist)
+    assert_equal(dd1, dd[..., klist - 1])
+    assert_equal(ii1, ii[..., klist - 1])
+
+    # check k == 1 special case
+    # and k == [1] non-special case
+    dd, ii = kdtree.query(data, 1)
+    dd1, ii1 = kdtree.query(data, [1])
+    assert_equal(len(dd.shape), 1)
+    assert_equal(len(dd1.shape), 2)
+    assert_equal(dd, np.ravel(dd1))
+    assert_equal(ii, np.ravel(ii1))
+
+def test_kdtree_box(kdtree_type):
+    # check ckdtree periodic boundary
+    n = 2000
+    m = 3
+    k = 3
+    np.random.seed(1234)
+    data = np.random.uniform(size=(n, m))
+    kdtree = kdtree_type(data, leafsize=1, boxsize=1.0)
+
+    # use the standard python KDTree for the simulated periodic box
+    kdtree2 = kdtree_type(data, leafsize=1)
+
+    for p in [1, 2, 3.0, np.inf]:
+        dd, ii = kdtree.query(data, k, p=p)
+
+        dd1, ii1 = kdtree.query(data + 1.0, k, p=p)
+        assert_almost_equal(dd, dd1)
+        assert_equal(ii, ii1)
+
+        dd1, ii1 = kdtree.query(data - 1.0, k, p=p)
+        assert_almost_equal(dd, dd1)
+        assert_equal(ii, ii1)
+
+        dd2, ii2 = simulate_periodic_box(kdtree2, data, k, boxsize=1.0, p=p)
+        assert_almost_equal(dd, dd2)
+        assert_equal(ii, ii2)
+
+def test_kdtree_box_0boxsize(kdtree_type):
+    # check ckdtree periodic boundary that mimics non-periodic
+    n = 2000
+    m = 2
+    k = 3
+    np.random.seed(1234)
+    data = np.random.uniform(size=(n, m))
+    kdtree = kdtree_type(data, leafsize=1, boxsize=0.0)
+
+    # use the standard python KDTree for the simulated periodic box
+    kdtree2 = kdtree_type(data, leafsize=1)
+
+    for p in [1, 2, np.inf]:
+        dd, ii = kdtree.query(data, k, p=p)
+
+        dd1, ii1 = kdtree2.query(data, k, p=p)
+        assert_almost_equal(dd, dd1)
+        assert_equal(ii, ii1)
+
+def test_kdtree_box_upper_bounds(kdtree_type):
+    data = np.linspace(0, 2, 10).reshape(-1, 2)
+    data[:, 1] += 10
+    with pytest.raises(ValueError):
+        kdtree_type(data, leafsize=1, boxsize=1.0)
+    with pytest.raises(ValueError):
+        kdtree_type(data, leafsize=1, boxsize=(0.0, 2.0))
+    # skip a dimension.
+    kdtree_type(data, leafsize=1, boxsize=(2.0, 0.0))
+
+def test_kdtree_box_lower_bounds(kdtree_type):
+    data = np.linspace(-1, 1, 10)
+    assert_raises(ValueError, kdtree_type, data, leafsize=1, boxsize=1.0)
+
+def simulate_periodic_box(kdtree, data, k, boxsize, p):
+    dd = []
+    ii = []
+    x = np.arange(3 ** data.shape[1])
+    nn = np.array(np.unravel_index(x, [3] * data.shape[1])).T
+    nn = nn - 1.0
+    for n in nn:
+        image = data + n * 1.0 * boxsize
+        dd2, ii2 = kdtree.query(image, k, p=p)
+        dd2 = dd2.reshape(-1, k)
+        ii2 = ii2.reshape(-1, k)
+        dd.append(dd2)
+        ii.append(ii2)
+    dd = np.concatenate(dd, axis=-1)
+    ii = np.concatenate(ii, axis=-1)
+
+    result = np.empty([len(data), len(nn) * k], dtype=[
+            ('ii', 'i8'),
+            ('dd', 'f8')])
+    result['ii'][:] = ii
+    result['dd'][:] = dd
+    result.sort(order='dd')
+    return result['dd'][:, :k], result['ii'][:, :k]
+
+
+@pytest.mark.skipif(python_implementation() == 'PyPy',
+                    reason="Fails on PyPy CI runs. See #9507")
+def test_ckdtree_memuse():
+    # unit test adaptation of gh-5630
+
+    # NOTE: this will fail when run via valgrind,
+    # because rss is no longer a reliable memory usage indicator.
+
+    try:
+        import resource
+    except ImportError:
+        # resource is not available on Windows
+        return
+    # Make some data
+    dx, dy = 0.05, 0.05
+    y, x = np.mgrid[slice(1, 5 + dy, dy),
+                    slice(1, 5 + dx, dx)]
+    z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
+    z_copy = np.empty_like(z)
+    z_copy[:] = z
+    # Place FILLVAL in z_copy at random number of random locations
+    FILLVAL = 99.
+    mask = np.random.randint(0, z.size, np.random.randint(50) + 5)
+    z_copy.flat[mask] = FILLVAL
+    igood = np.vstack(np.nonzero(x != FILLVAL)).T
+    ibad = np.vstack(np.nonzero(x == FILLVAL)).T
+    mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
+    # burn-in
+    for i in range(10):
+        tree = cKDTree(igood)
+    # count memleaks while constructing and querying cKDTree
+    num_leaks = 0
+    for i in range(100):
+        mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
+        tree = cKDTree(igood)
+        dist, iquery = tree.query(ibad, k=4, p=2)
+        new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
+        if new_mem_use > mem_use:
+            num_leaks += 1
+    # ideally zero leaks, but errors might accidentally happen
+    # outside cKDTree
+    assert_(num_leaks < 10)
+
+def test_kdtree_weights(kdtree_type):
+
+    data = np.linspace(0, 1, 4).reshape(-1, 1)
+    tree1 = kdtree_type(data, leafsize=1)
+    weights = np.ones(len(data), dtype='f4')
+
+    nw = tree1._build_weights(weights)
+    assert_array_equal(nw, [4, 2, 1, 1, 2, 1, 1])
+
+    assert_raises(ValueError, tree1._build_weights, weights[:-1])
+
+    for i in range(10):
+        # since weights are uniform, these shall agree:
+        c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i))
+        c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
+                weights=(weights, weights))
+        c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
+                weights=(weights, None))
+        c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
+                weights=(None, weights))
+        tree1.count_neighbors(tree1, np.linspace(0, 10, i),
+                weights=weights)
+
+        assert_array_equal(c1, c2)
+        assert_array_equal(c1, c3)
+        assert_array_equal(c1, c4)
+
+    for i in range(len(data)):
+        # this tests removal of one data point by setting weight to 0
+        w1 = weights.copy()
+        w1[i] = 0
+        data2 = data[w1 != 0]
+        tree2 = kdtree_type(data2)
+
+        c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, 100),
+                weights=(w1, w1))
+        # "c2 is correct"
+        c2 = tree2.count_neighbors(tree2, np.linspace(0, 10, 100))
+
+        assert_array_equal(c1, c2)
+
+        #this asserts for two different trees, singular weights
+        # crashes
+        assert_raises(ValueError, tree1.count_neighbors,
+            tree2, np.linspace(0, 10, 100), weights=w1)
+
+def test_kdtree_count_neighbous_multiple_r(kdtree_type):
+    n = 2000
+    m = 2
+    np.random.seed(1234)
+    data = np.random.normal(size=(n, m))
+    kdtree = kdtree_type(data, leafsize=1)
+    r0 = [0, 0.01, 0.01, 0.02, 0.05]
+    i0 = np.arange(len(r0))
+    n0 = kdtree.count_neighbors(kdtree, r0)
+    nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False)
+    assert_equal(n0, nnc.cumsum())
+
+    for i, r in zip(itertools.permutations(i0),
+                    itertools.permutations(r0)):
+        # permute n0 by i and it shall agree
+        n = kdtree.count_neighbors(kdtree, r)
+        assert_array_equal(n, n0[list(i)])
+
+def test_len0_arrays(kdtree_type):
+    # make sure len-0 arrays are handled correctly
+    # in range queries (gh-5639)
+    np.random.seed(1234)
+    X = np.random.rand(10, 2)
+    Y = np.random.rand(10, 2)
+    tree = kdtree_type(X)
+    # query_ball_point (single)
+    d, i = tree.query([.5, .5], k=1)
+    z = tree.query_ball_point([.5, .5], 0.1*d)
+    assert_array_equal(z, [])
+    # query_ball_point (multiple)
+    d, i = tree.query(Y, k=1)
+    mind = d.min()
+    z = tree.query_ball_point(Y, 0.1*mind)
+    y = np.empty(shape=(10, ), dtype=object)
+    y.fill([])
+    assert_array_equal(y, z)
+    # query_ball_tree
+    other = kdtree_type(Y)
+    y = tree.query_ball_tree(other, 0.1*mind)
+    assert_array_equal(10*[[]], y)
+    # count_neighbors
+    y = tree.count_neighbors(other, 0.1*mind)
+    assert_(y == 0)
+    # sparse_distance_matrix
+    y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dok_matrix')
+    assert_array_equal(y == np.zeros((10, 10)), True)
+    y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='coo_matrix')
+    assert_array_equal(y == np.zeros((10, 10)), True)
+    y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dict')
+    assert_equal(y, {})
+    y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='ndarray')
+    _dtype = [('i', np.intp), ('j', np.intp), ('v', np.float64)]
+    res_dtype = np.dtype(_dtype, align=True)
+    z = np.empty(shape=(0, ), dtype=res_dtype)
+    assert_array_equal(y, z)
+    # query_pairs
+    d, i = tree.query(X, k=2)
+    mind = d[:, -1].min()
+    y = tree.query_pairs(0.1*mind, output_type='set')
+    assert_equal(y, set())
+    y = tree.query_pairs(0.1*mind, output_type='ndarray')
+    z = np.empty(shape=(0, 2), dtype=np.intp)
+    assert_array_equal(y, z)
+
+def test_kdtree_duplicated_inputs(kdtree_type):
+    # check kdtree with duplicated inputs
+    n = 1024
+    for m in range(1, 8):
+        data = np.ones((n, m))
+        data[n//2:] = 2
+
+        for balanced, compact in itertools.product((False, True), repeat=2):
+            kdtree = kdtree_type(data, balanced_tree=balanced,
+                                 compact_nodes=compact, leafsize=1)
+            assert kdtree.size == 3
+
+            tree = (kdtree.tree if kdtree_type is cKDTree else
+                    kdtree.tree._node)
+
+            assert_equal(
+                np.sort(tree.lesser.indices),
+                np.arange(0, n // 2))
+            assert_equal(
+                np.sort(tree.greater.indices),
+                np.arange(n // 2, n))
+
+
+def test_kdtree_noncumulative_nondecreasing(kdtree_type):
+    # check kdtree with duplicated inputs
+
+    # it shall not divide more than 3 nodes.
+    # root left (1), and right (2)
+    kdtree = kdtree_type([[0]], leafsize=1)
+
+    assert_raises(ValueError, kdtree.count_neighbors,
+        kdtree, [0.1, 0], cumulative=False)
+
+def test_short_knn(kdtree_type):
+
+    # The test case is based on github: #6425 by @SteveDoyle2
+
+    xyz = np.array([
+        [0., 0., 0.],
+        [1.01, 0., 0.],
+        [0., 1., 0.],
+        [0., 1.01, 0.],
+        [1., 0., 0.],
+        [1., 1., 0.]],
+    dtype='float64')
+
+    ckdt = kdtree_type(xyz)
+
+    deq, ieq = ckdt.query(xyz, k=4, distance_upper_bound=0.2)
+
+    assert_array_almost_equal(deq,
+            [[0., np.inf, np.inf, np.inf],
+            [0., 0.01, np.inf, np.inf],
+            [0., 0.01, np.inf, np.inf],
+            [0., 0.01, np.inf, np.inf],
+            [0., 0.01, np.inf, np.inf],
+            [0., np.inf, np.inf, np.inf]])
+
+def test_query_ball_point_vector_r(kdtree_type):
+
+    np.random.seed(1234)
+    data = np.random.normal(size=(100, 3))
+    query = np.random.normal(size=(100, 3))
+    tree = kdtree_type(data)
+    d = np.random.uniform(0, 0.3, size=len(query))
+
+    rvector = tree.query_ball_point(query, d)
+    rscalar = [tree.query_ball_point(qi, di) for qi, di in zip(query, d)]
+    for a, b in zip(rvector, rscalar):
+        assert_array_equal(sorted(a), sorted(b))
+
+def test_query_ball_point_length(kdtree_type):
+
+    np.random.seed(1234)
+    data = np.random.normal(size=(100, 3))
+    query = np.random.normal(size=(100, 3))
+    tree = kdtree_type(data)
+    d = 0.3
+
+    length = tree.query_ball_point(query, d, return_length=True)
+    length2 = [len(ind) for ind in tree.query_ball_point(query, d, return_length=False)]
+    length3 = [len(tree.query_ball_point(qi, d)) for qi in query]
+    length4 = [tree.query_ball_point(qi, d, return_length=True) for qi in query]
+    assert_array_equal(length, length2)
+    assert_array_equal(length, length3)
+    assert_array_equal(length, length4)
+
+def test_discontiguous(kdtree_type):
+
+    np.random.seed(1234)
+    data = np.random.normal(size=(100, 3))
+    d_contiguous = np.arange(100) * 0.04
+    d_discontiguous = np.ascontiguousarray(
+                          np.arange(100)[::-1] * 0.04)[::-1]
+    query_contiguous = np.random.normal(size=(100, 3))
+    query_discontiguous = np.ascontiguousarray(query_contiguous.T).T
+    assert query_discontiguous.strides[-1] != query_contiguous.strides[-1]
+    assert d_discontiguous.strides[-1] != d_contiguous.strides[-1]
+
+    tree = kdtree_type(data)
+
+    length1 = tree.query_ball_point(query_contiguous,
+                                    d_contiguous, return_length=True)
+    length2 = tree.query_ball_point(query_discontiguous,
+                                    d_discontiguous, return_length=True)
+
+    assert_array_equal(length1, length2)
+
+    d1, i1 = tree.query(query_contiguous, 1)
+    d2, i2 = tree.query(query_discontiguous, 1)
+
+    assert_array_equal(d1, d2)
+    assert_array_equal(i1, i2)
+
+
+@pytest.mark.parametrize("balanced_tree, compact_nodes",
+    [(True, False),
+     (True, True),
+     (False, False),
+     (False, True)])
+def test_kdtree_empty_input(kdtree_type, balanced_tree, compact_nodes):
+    # https://github.com/scipy/scipy/issues/5040
+    np.random.seed(1234)
+    empty_v3 = np.empty(shape=(0, 3))
+    query_v3 = np.ones(shape=(1, 3))
+    query_v2 = np.ones(shape=(2, 3))
+
+    tree = kdtree_type(empty_v3, balanced_tree=balanced_tree,
+                       compact_nodes=compact_nodes)
+    length = tree.query_ball_point(query_v3, 0.3, return_length=True)
+    assert length == 0
+
+    dd, ii = tree.query(query_v2, 2)
+    assert ii.shape == (2, 2)
+    assert dd.shape == (2, 2)
+    assert np.isinf(dd).all()
+
+    N = tree.count_neighbors(tree, [0, 1])
+    assert_array_equal(N, [0, 0])
+
+    M = tree.sparse_distance_matrix(tree, 0.3)
+    assert M.shape == (0, 0)
+
+@KDTreeTest
+class _Test_sorted_query_ball_point:
+    def setup_method(self):
+        np.random.seed(1234)
+        self.x = np.random.randn(100, 1)
+        self.ckdt = self.kdtree_type(self.x)
+
+    def test_return_sorted_True(self):
+        idxs_list = self.ckdt.query_ball_point(self.x, 1., return_sorted=True)
+        for idxs in idxs_list:
+            assert_array_equal(idxs, sorted(idxs))
+
+        for xi in self.x:
+            idxs = self.ckdt.query_ball_point(xi, 1., return_sorted=True)
+            assert_array_equal(idxs, sorted(idxs))
+
+    def test_return_sorted_None(self):
+        """Previous behavior was to sort the returned indices if there were
+        multiple points per query but not sort them if there was a single point
+        per query."""
+        idxs_list = self.ckdt.query_ball_point(self.x, 1.)
+        for idxs in idxs_list:
+            assert_array_equal(idxs, sorted(idxs))
+
+        idxs_list_single = [self.ckdt.query_ball_point(xi, 1.) for xi in self.x]
+        idxs_list_False = self.ckdt.query_ball_point(self.x, 1., return_sorted=False)
+        for idxs0, idxs1 in zip(idxs_list_False, idxs_list_single):
+            assert_array_equal(idxs0, idxs1)
+
+
+def test_kdtree_complex_data():
+    # Test that KDTree rejects complex input points (gh-9108)
+    points = np.random.rand(10, 2).view(complex)
+
+    with pytest.raises(TypeError, match="complex data"):
+        t = KDTree(points)
+
+    t = KDTree(points.real)
+
+    with pytest.raises(TypeError, match="complex data"):
+        t.query(points)
+
+    with pytest.raises(TypeError, match="complex data"):
+        t.query_ball_point(points, r=1)
+
+
+def test_kdtree_tree_access():
+    # Test KDTree.tree can be used to traverse the KDTree
+    np.random.seed(1234)
+    points = np.random.rand(100, 4)
+    t = KDTree(points)
+    root = t.tree
+
+    assert isinstance(root, KDTree.innernode)
+    assert root.children == points.shape[0]
+
+    # Visit the tree and assert some basic properties for each node
+    nodes = [root]
+    while nodes:
+        n = nodes.pop(-1)
+
+        if isinstance(n, KDTree.leafnode):
+            assert isinstance(n.children, int)
+            assert n.children == len(n.idx)
+            assert_array_equal(points[n.idx], n._node.data_points)
+        else:
+            assert isinstance(n, KDTree.innernode)
+            assert isinstance(n.split_dim, int)
+            assert 0 <= n.split_dim < t.m
+            assert isinstance(n.split, float)
+            assert isinstance(n.children, int)
+            assert n.children == n.less.children + n.greater.children
+            nodes.append(n.greater)
+            nodes.append(n.less)
+
+
+def test_kdtree_attributes():
+    # Test KDTree's attributes are available
+    np.random.seed(1234)
+    points = np.random.rand(100, 4)
+    t = KDTree(points)
+
+    assert isinstance(t.m, int)
+    assert t.n == points.shape[0]
+
+    assert isinstance(t.n, int)
+    assert t.m == points.shape[1]
+
+    assert isinstance(t.leafsize, int)
+    assert t.leafsize == 10
+
+    assert_array_equal(t.maxes, np.amax(points, axis=0))
+    assert_array_equal(t.mins, np.amin(points, axis=0))
+    assert t.data is points
+
+
+@pytest.mark.parametrize("kdtree_class", [KDTree, cKDTree])
+def test_kdtree_count_neighbors_weighted(kdtree_class):
+    np.random.seed(1234)
+    r = np.arange(0.05, 1, 0.05)
+
+    A = np.random.random(21).reshape((7,3))
+    B = np.random.random(45).reshape((15,3))
+
+    wA = np.random.random(7)
+    wB = np.random.random(15)
+
+    kdA = kdtree_class(A)
+    kdB = kdtree_class(B)
+
+    nAB = kdA.count_neighbors(kdB, r, cumulative=False, weights=(wA,wB))
+
+    # Compare against brute-force
+    weights = wA[None, :] * wB[:, None]
+    dist = np.linalg.norm(A[None, :, :] - B[:, None, :], axis=-1)
+    expect = [np.sum(weights[(prev_radius < dist) & (dist <= radius)])
+              for prev_radius, radius in zip(itertools.chain([0], r[:-1]), r)]
+    assert_allclose(nAB, expect)
+
+
+def test_kdtree_nan():
+    vals = [1, 5, -10, 7, -4, -16, -6, 6, 3, -11]
+    n = len(vals)
+    data = np.concatenate([vals, np.full(n, np.nan)])[:, None]
+
+    query_with_nans = KDTree(data).query_pairs(2)
+    query_without_nans = KDTree(data[:n]).query_pairs(2)
+    assert query_with_nans == query_without_nans
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_qhull.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_qhull.py
new file mode 100644
index 00000000..b571eea2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_qhull.py
@@ -0,0 +1,1178 @@
+import os
+import copy
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal,
+                           assert_, assert_allclose, assert_array_equal)
+import pytest
+from pytest import raises as assert_raises
+
+import scipy.spatial._qhull as qhull
+from scipy.spatial import cKDTree as KDTree
+from scipy.spatial import Voronoi
+
+import itertools
+
+def sorted_tuple(x):
+    return tuple(sorted(x))
+
+
+def sorted_unique_tuple(x):
+    return tuple(np.unique(x))
+
+
+def assert_unordered_tuple_list_equal(a, b, tpl=tuple):
+    if isinstance(a, np.ndarray):
+        a = a.tolist()
+    if isinstance(b, np.ndarray):
+        b = b.tolist()
+    a = list(map(tpl, a))
+    a.sort()
+    b = list(map(tpl, b))
+    b.sort()
+    assert_equal(a, b)
+
+
+np.random.seed(1234)
+
+points = [(0,0), (0,1), (1,0), (1,1), (0.5, 0.5), (0.5, 1.5)]
+
+pathological_data_1 = np.array([
+    [-3.14,-3.14], [-3.14,-2.36], [-3.14,-1.57], [-3.14,-0.79],
+    [-3.14,0.0], [-3.14,0.79], [-3.14,1.57], [-3.14,2.36],
+    [-3.14,3.14], [-2.36,-3.14], [-2.36,-2.36], [-2.36,-1.57],
+    [-2.36,-0.79], [-2.36,0.0], [-2.36,0.79], [-2.36,1.57],
+    [-2.36,2.36], [-2.36,3.14], [-1.57,-0.79], [-1.57,0.79],
+    [-1.57,-1.57], [-1.57,0.0], [-1.57,1.57], [-1.57,-3.14],
+    [-1.57,-2.36], [-1.57,2.36], [-1.57,3.14], [-0.79,-1.57],
+    [-0.79,1.57], [-0.79,-3.14], [-0.79,-2.36], [-0.79,-0.79],
+    [-0.79,0.0], [-0.79,0.79], [-0.79,2.36], [-0.79,3.14],
+    [0.0,-3.14], [0.0,-2.36], [0.0,-1.57], [0.0,-0.79], [0.0,0.0],
+    [0.0,0.79], [0.0,1.57], [0.0,2.36], [0.0,3.14], [0.79,-3.14],
+    [0.79,-2.36], [0.79,-0.79], [0.79,0.0], [0.79,0.79],
+    [0.79,2.36], [0.79,3.14], [0.79,-1.57], [0.79,1.57],
+    [1.57,-3.14], [1.57,-2.36], [1.57,2.36], [1.57,3.14],
+    [1.57,-1.57], [1.57,0.0], [1.57,1.57], [1.57,-0.79],
+    [1.57,0.79], [2.36,-3.14], [2.36,-2.36], [2.36,-1.57],
+    [2.36,-0.79], [2.36,0.0], [2.36,0.79], [2.36,1.57],
+    [2.36,2.36], [2.36,3.14], [3.14,-3.14], [3.14,-2.36],
+    [3.14,-1.57], [3.14,-0.79], [3.14,0.0], [3.14,0.79],
+    [3.14,1.57], [3.14,2.36], [3.14,3.14],
+])
+
+pathological_data_2 = np.array([
+    [-1, -1], [-1, 0], [-1, 1],
+    [0, -1], [0, 0], [0, 1],
+    [1, -1 - np.finfo(np.float_).eps], [1, 0], [1, 1],
+])
+
+bug_2850_chunks = [np.random.rand(10, 2),
+                   np.array([[0,0], [0,1], [1,0], [1,1]])  # add corners
+                   ]
+
+# same with some additional chunks
+bug_2850_chunks_2 = (bug_2850_chunks +
+                     [np.random.rand(10, 2),
+                      0.25 + np.array([[0,0], [0,1], [1,0], [1,1]])])
+
+DATASETS = {
+    'some-points': np.asarray(points),
+    'random-2d': np.random.rand(30, 2),
+    'random-3d': np.random.rand(30, 3),
+    'random-4d': np.random.rand(30, 4),
+    'random-5d': np.random.rand(30, 5),
+    'random-6d': np.random.rand(10, 6),
+    'random-7d': np.random.rand(10, 7),
+    'random-8d': np.random.rand(10, 8),
+    'pathological-1': pathological_data_1,
+    'pathological-2': pathological_data_2
+}
+
+INCREMENTAL_DATASETS = {
+    'bug-2850': (bug_2850_chunks, None),
+    'bug-2850-2': (bug_2850_chunks_2, None),
+}
+
+
+def _add_inc_data(name, chunksize):
+    """
+    Generate incremental datasets from basic data sets
+    """
+    points = DATASETS[name]
+    ndim = points.shape[1]
+
+    opts = None
+    nmin = ndim + 2
+
+    if name == 'some-points':
+        # since Qz is not allowed, use QJ
+        opts = 'QJ Pp'
+    elif name == 'pathological-1':
+        # include enough points so that we get different x-coordinates
+        nmin = 12
+
+    chunks = [points[:nmin]]
+    for j in range(nmin, len(points), chunksize):
+        chunks.append(points[j:j+chunksize])
+
+    new_name = "%s-chunk-%d" % (name, chunksize)
+    assert new_name not in INCREMENTAL_DATASETS
+    INCREMENTAL_DATASETS[new_name] = (chunks, opts)
+
+
+for name in DATASETS:
+    for chunksize in 1, 4, 16:
+        _add_inc_data(name, chunksize)
+
+
+class Test_Qhull:
+    def test_swapping(self):
+        # Check that Qhull state swapping works
+
+        x = qhull._Qhull(b'v',
+                         np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]),
+                         b'Qz')
+        xd = copy.deepcopy(x.get_voronoi_diagram())
+
+        y = qhull._Qhull(b'v',
+                         np.array([[0,0],[0,1],[1,0],[1,2.]]),
+                         b'Qz')
+        yd = copy.deepcopy(y.get_voronoi_diagram())
+
+        xd2 = copy.deepcopy(x.get_voronoi_diagram())
+        x.close()
+        yd2 = copy.deepcopy(y.get_voronoi_diagram())
+        y.close()
+
+        assert_raises(RuntimeError, x.get_voronoi_diagram)
+        assert_raises(RuntimeError, y.get_voronoi_diagram)
+
+        assert_allclose(xd[0], xd2[0])
+        assert_unordered_tuple_list_equal(xd[1], xd2[1], tpl=sorted_tuple)
+        assert_unordered_tuple_list_equal(xd[2], xd2[2], tpl=sorted_tuple)
+        assert_unordered_tuple_list_equal(xd[3], xd2[3], tpl=sorted_tuple)
+        assert_array_equal(xd[4], xd2[4])
+
+        assert_allclose(yd[0], yd2[0])
+        assert_unordered_tuple_list_equal(yd[1], yd2[1], tpl=sorted_tuple)
+        assert_unordered_tuple_list_equal(yd[2], yd2[2], tpl=sorted_tuple)
+        assert_unordered_tuple_list_equal(yd[3], yd2[3], tpl=sorted_tuple)
+        assert_array_equal(yd[4], yd2[4])
+
+        x.close()
+        assert_raises(RuntimeError, x.get_voronoi_diagram)
+        y.close()
+        assert_raises(RuntimeError, y.get_voronoi_diagram)
+
+    def test_issue_8051(self):
+        points = np.array([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2],[2, 0], [2, 1], [2, 2]])
+        Voronoi(points)
+
+
+class TestUtilities:
+    """
+    Check that utility functions work.
+
+    """
+
+    def test_find_simplex(self):
+        # Simple check that simplex finding works
+        points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
+        tri = qhull.Delaunay(points)
+
+        # +---+
+        # |\ 0|
+        # | \ |
+        # |1 \|
+        # +---+
+
+        assert_equal(tri.simplices, [[1, 3, 2], [3, 1, 0]])
+
+        for p in [(0.25, 0.25, 1),
+                  (0.75, 0.75, 0),
+                  (0.3, 0.2, 1)]:
+            i = tri.find_simplex(p[:2])
+            assert_equal(i, p[2], err_msg='%r' % (p,))
+            j = qhull.tsearch(tri, p[:2])
+            assert_equal(i, j)
+
+    def test_plane_distance(self):
+        # Compare plane distance from hyperplane equations obtained from Qhull
+        # to manually computed plane equations
+        x = np.array([(0,0), (1, 1), (1, 0), (0.99189033, 0.37674127),
+                      (0.99440079, 0.45182168)], dtype=np.double)
+        p = np.array([0.99966555, 0.15685619], dtype=np.double)
+
+        tri = qhull.Delaunay(x)
+
+        z = tri.lift_points(x)
+        pz = tri.lift_points(p)
+
+        dist = tri.plane_distance(p)
+
+        for j, v in enumerate(tri.simplices):
+            x1 = z[v[0]]
+            x2 = z[v[1]]
+            x3 = z[v[2]]
+
+            n = np.cross(x1 - x3, x2 - x3)
+            n /= np.sqrt(np.dot(n, n))
+            n *= -np.sign(n[2])
+
+            d = np.dot(n, pz - x3)
+
+            assert_almost_equal(dist[j], d)
+
+    def test_convex_hull(self):
+        # Simple check that the convex hull seems to works
+        points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
+        tri = qhull.Delaunay(points)
+
+        # +---+
+        # |\ 0|
+        # | \ |
+        # |1 \|
+        # +---+
+
+        assert_equal(tri.convex_hull, [[3, 2], [1, 2], [1, 0], [3, 0]])
+
+    def test_volume_area(self):
+        #Basic check that we get back the correct volume and area for a cube
+        points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
+                           (0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
+        hull = qhull.ConvexHull(points)
+
+        assert_allclose(hull.volume, 1., rtol=1e-14,
+                        err_msg="Volume of cube is incorrect")
+        assert_allclose(hull.area, 6., rtol=1e-14,
+                        err_msg="Area of cube is incorrect")
+
+    def test_random_volume_area(self):
+        #Test that the results for a random 10-point convex are
+        #coherent with the output of qconvex Qt s FA
+        points = np.array([(0.362568364506, 0.472712355305, 0.347003084477),
+                           (0.733731893414, 0.634480295684, 0.950513180209),
+                           (0.511239955611, 0.876839441267, 0.418047827863),
+                           (0.0765906233393, 0.527373281342, 0.6509863541),
+                           (0.146694972056, 0.596725793348, 0.894860986685),
+                           (0.513808585741, 0.069576205858, 0.530890338876),
+                           (0.512343805118, 0.663537132612, 0.037689295973),
+                           (0.47282965018, 0.462176697655, 0.14061843691),
+                           (0.240584597123, 0.778660020591, 0.722913476339),
+                           (0.951271745935, 0.967000673944, 0.890661319684)])
+
+        hull = qhull.ConvexHull(points)
+        assert_allclose(hull.volume, 0.14562013, rtol=1e-07,
+                        err_msg="Volume of random polyhedron is incorrect")
+        assert_allclose(hull.area, 1.6670425, rtol=1e-07,
+                        err_msg="Area of random polyhedron is incorrect")
+
+    def test_incremental_volume_area_random_input(self):
+        """Test that incremental mode gives the same volume/area as
+        non-incremental mode and incremental mode with restart"""
+        nr_points = 20
+        dim = 3
+        points = np.random.random((nr_points, dim))
+        inc_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True)
+        inc_restart_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True)
+        for i in range(dim+1, nr_points):
+            hull = qhull.ConvexHull(points[:i+1, :])
+            inc_hull.add_points(points[i:i+1, :])
+            inc_restart_hull.add_points(points[i:i+1, :], restart=True)
+            assert_allclose(hull.volume, inc_hull.volume, rtol=1e-7)
+            assert_allclose(hull.volume, inc_restart_hull.volume, rtol=1e-7)
+            assert_allclose(hull.area, inc_hull.area, rtol=1e-7)
+            assert_allclose(hull.area, inc_restart_hull.area, rtol=1e-7)
+
+    def _check_barycentric_transforms(self, tri, err_msg="",
+                                      unit_cube=False,
+                                      unit_cube_tol=0):
+        """Check that a triangulation has reasonable barycentric transforms"""
+        vertices = tri.points[tri.simplices]
+        sc = 1/(tri.ndim + 1.0)
+        centroids = vertices.sum(axis=1) * sc
+
+        # Either: (i) the simplex has a `nan` barycentric transform,
+        # or, (ii) the centroid is in the simplex
+
+        def barycentric_transform(tr, x):
+            r = tr[:,-1,:]
+            Tinv = tr[:,:-1,:]
+            return np.einsum('ijk,ik->ij', Tinv, x - r)
+
+        eps = np.finfo(float).eps
+
+        c = barycentric_transform(tri.transform, centroids)
+        with np.errstate(invalid="ignore"):
+            ok = np.isnan(c).all(axis=1) | (abs(c - sc)/sc < 0.1).all(axis=1)
+
+        assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
+
+        # Invalid simplices must be (nearly) zero volume
+        q = vertices[:,:-1,:] - vertices[:,-1,None,:]
+        volume = np.array([np.linalg.det(q[k,:,:])
+                           for k in range(tri.nsimplex)])
+        ok = np.isfinite(tri.transform[:,0,0]) | (volume < np.sqrt(eps))
+        assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
+
+        # Also, find_simplex for the centroid should end up in some
+        # simplex for the non-degenerate cases
+        j = tri.find_simplex(centroids)
+        ok = (j != -1) | np.isnan(tri.transform[:,0,0])
+        assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
+
+        if unit_cube:
+            # If in unit cube, no interior point should be marked out of hull
+            at_boundary = (centroids <= unit_cube_tol).any(axis=1)
+            at_boundary |= (centroids >= 1 - unit_cube_tol).any(axis=1)
+
+            ok = (j != -1) | at_boundary
+            assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
+
+    def test_degenerate_barycentric_transforms(self):
+        # The triangulation should not produce invalid barycentric
+        # transforms that stump the simplex finding
+        data = np.load(os.path.join(os.path.dirname(__file__), 'data',
+                                    'degenerate_pointset.npz'))
+        points = data['c']
+        data.close()
+
+        tri = qhull.Delaunay(points)
+
+        # Check that there are not too many invalid simplices
+        bad_count = np.isnan(tri.transform[:,0,0]).sum()
+        assert_(bad_count < 23, bad_count)
+
+        # Check the transforms
+        self._check_barycentric_transforms(tri)
+
+    @pytest.mark.slow
+    def test_more_barycentric_transforms(self):
+        # Triangulate some "nasty" grids
+
+        eps = np.finfo(float).eps
+
+        npoints = {2: 70, 3: 11, 4: 5, 5: 3}
+
+        for ndim in range(2, 6):
+            # Generate an uniform grid in n-d unit cube
+            x = np.linspace(0, 1, npoints[ndim])
+            grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T
+
+            err_msg = "ndim=%d" % ndim
+
+            # Check using regular grid
+            tri = qhull.Delaunay(grid)
+            self._check_barycentric_transforms(tri, err_msg=err_msg,
+                                               unit_cube=True)
+
+            # Check with eps-perturbations
+            np.random.seed(1234)
+            m = (np.random.rand(grid.shape[0]) < 0.2)
+            grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
+
+            tri = qhull.Delaunay(grid)
+            self._check_barycentric_transforms(tri, err_msg=err_msg,
+                                               unit_cube=True,
+                                               unit_cube_tol=2*eps)
+
+            # Check with duplicated data
+            tri = qhull.Delaunay(np.r_[grid, grid])
+            self._check_barycentric_transforms(tri, err_msg=err_msg,
+                                               unit_cube=True,
+                                               unit_cube_tol=2*eps)
+
+
+class TestVertexNeighborVertices:
+    def _check(self, tri):
+        expected = [set() for j in range(tri.points.shape[0])]
+        for s in tri.simplices:
+            for a in s:
+                for b in s:
+                    if a != b:
+                        expected[a].add(b)
+
+        indptr, indices = tri.vertex_neighbor_vertices
+
+        got = [set(map(int, indices[indptr[j]:indptr[j+1]]))
+               for j in range(tri.points.shape[0])]
+
+        assert_equal(got, expected, err_msg="%r != %r" % (got, expected))
+
+    def test_triangle(self):
+        points = np.array([(0,0), (0,1), (1,0)], dtype=np.double)
+        tri = qhull.Delaunay(points)
+        self._check(tri)
+
+    def test_rectangle(self):
+        points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
+        tri = qhull.Delaunay(points)
+        self._check(tri)
+
+    def test_complicated(self):
+        points = np.array([(0,0), (0,1), (1,1), (1,0),
+                           (0.5, 0.5), (0.9, 0.5)], dtype=np.double)
+        tri = qhull.Delaunay(points)
+        self._check(tri)
+
+
+class TestDelaunay:
+    """
+    Check that triangulation works.
+
+    """
+    def test_masked_array_fails(self):
+        masked_array = np.ma.masked_all(1)
+        assert_raises(ValueError, qhull.Delaunay, masked_array)
+
+    def test_array_with_nans_fails(self):
+        points_with_nan = np.array([(0,0), (0,1), (1,1), (1,np.nan)], dtype=np.double)
+        assert_raises(ValueError, qhull.Delaunay, points_with_nan)
+
+    def test_nd_simplex(self):
+        # simple smoke test: triangulate a n-dimensional simplex
+        for nd in range(2, 8):
+            points = np.zeros((nd+1, nd))
+            for j in range(nd):
+                points[j,j] = 1.0
+            points[-1,:] = 1.0
+
+            tri = qhull.Delaunay(points)
+
+            tri.simplices.sort()
+
+            assert_equal(tri.simplices, np.arange(nd+1, dtype=int)[None, :])
+            assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:])
+
+    def test_2d_square(self):
+        # simple smoke test: 2d square
+        points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
+        tri = qhull.Delaunay(points)
+
+        assert_equal(tri.simplices, [[1, 3, 2], [3, 1, 0]])
+        assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]])
+
+    def test_duplicate_points(self):
+        x = np.array([0, 1, 0, 1], dtype=np.float64)
+        y = np.array([0, 0, 1, 1], dtype=np.float64)
+
+        xp = np.r_[x, x]
+        yp = np.r_[y, y]
+
+        # shouldn't fail on duplicate points
+        qhull.Delaunay(np.c_[x, y])
+        qhull.Delaunay(np.c_[xp, yp])
+
+    def test_pathological(self):
+        # both should succeed
+        points = DATASETS['pathological-1']
+        tri = qhull.Delaunay(points)
+        assert_equal(tri.points[tri.simplices].max(), points.max())
+        assert_equal(tri.points[tri.simplices].min(), points.min())
+
+        points = DATASETS['pathological-2']
+        tri = qhull.Delaunay(points)
+        assert_equal(tri.points[tri.simplices].max(), points.max())
+        assert_equal(tri.points[tri.simplices].min(), points.min())
+
+    def test_joggle(self):
+        # Check that the option QJ indeed guarantees that all input points
+        # occur as vertices of the triangulation
+
+        points = np.random.rand(10, 2)
+        points = np.r_[points, points]  # duplicate input data
+
+        tri = qhull.Delaunay(points, qhull_options="QJ Qbb Pp")
+        assert_array_equal(np.unique(tri.simplices.ravel()),
+                           np.arange(len(points)))
+
+    def test_coplanar(self):
+        # Check that the coplanar point output option indeed works
+        points = np.random.rand(10, 2)
+        points = np.r_[points, points]  # duplicate input data
+
+        tri = qhull.Delaunay(points)
+
+        assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2)
+        assert_(len(tri.coplanar) == len(points)//2)
+
+        assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2)
+
+        assert_(np.all(tri.vertex_to_simplex >= 0))
+
+    def test_furthest_site(self):
+        points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
+        tri = qhull.Delaunay(points, furthest_site=True)
+
+        expected = np.array([(1, 4, 0), (4, 2, 0)])  # from Qhull
+        assert_array_equal(tri.simplices, expected)
+
+    @pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
+    def test_incremental(self, name):
+        # Test incremental construction of the triangulation
+
+        chunks, opts = INCREMENTAL_DATASETS[name]
+        points = np.concatenate(chunks, axis=0)
+
+        obj = qhull.Delaunay(chunks[0], incremental=True,
+                             qhull_options=opts)
+        for chunk in chunks[1:]:
+            obj.add_points(chunk)
+
+        obj2 = qhull.Delaunay(points)
+
+        obj3 = qhull.Delaunay(chunks[0], incremental=True,
+                              qhull_options=opts)
+        if len(chunks) > 1:
+            obj3.add_points(np.concatenate(chunks[1:], axis=0),
+                            restart=True)
+
+        # Check that the incremental mode agrees with upfront mode
+        if name.startswith('pathological'):
+            # XXX: These produce valid but different triangulations.
+            #      They look OK when plotted, but how to check them?
+
+            assert_array_equal(np.unique(obj.simplices.ravel()),
+                               np.arange(points.shape[0]))
+            assert_array_equal(np.unique(obj2.simplices.ravel()),
+                               np.arange(points.shape[0]))
+        else:
+            assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices,
+                                              tpl=sorted_tuple)
+
+        assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices,
+                                          tpl=sorted_tuple)
+
+    def test_vertices_deprecation(self):
+        tri = qhull.Delaunay([(0, 0), (0, 1), (1, 0)])
+        msg = ("Delaunay attribute 'vertices' is deprecated in favour of "
+               "'simplices' and will be removed in Scipy 1.11.0.")
+        with pytest.warns(DeprecationWarning, match=msg):
+            tri.vertices
+
+
+def assert_hulls_equal(points, facets_1, facets_2):
+    # Check that two convex hulls constructed from the same point set
+    # are equal
+
+    facets_1 = set(map(sorted_tuple, facets_1))
+    facets_2 = set(map(sorted_tuple, facets_2))
+
+    if facets_1 != facets_2 and points.shape[1] == 2:
+        # The direct check fails for the pathological cases
+        # --- then the convex hull from Delaunay differs (due
+        # to rounding error etc.) from the hull computed
+        # otherwise, by the question whether (tricoplanar)
+        # points that lie almost exactly on the hull are
+        # included as vertices of the hull or not.
+        #
+        # So we check the result, and accept it if the Delaunay
+        # hull line segments are a subset of the usual hull.
+
+        eps = 1000 * np.finfo(float).eps
+
+        for a, b in facets_1:
+            for ap, bp in facets_2:
+                t = points[bp] - points[ap]
+                t /= np.linalg.norm(t)       # tangent
+                n = np.array([-t[1], t[0]])  # normal
+
+                # check that the two line segments are parallel
+                # to the same line
+                c1 = np.dot(n, points[b] - points[ap])
+                c2 = np.dot(n, points[a] - points[ap])
+                if not np.allclose(np.dot(c1, n), 0):
+                    continue
+                if not np.allclose(np.dot(c2, n), 0):
+                    continue
+
+                # Check that the segment (a, b) is contained in (ap, bp)
+                c1 = np.dot(t, points[a] - points[ap])
+                c2 = np.dot(t, points[b] - points[ap])
+                c3 = np.dot(t, points[bp] - points[ap])
+                if c1 < -eps or c1 > c3 + eps:
+                    continue
+                if c2 < -eps or c2 > c3 + eps:
+                    continue
+
+                # OK:
+                break
+            else:
+                raise AssertionError("comparison fails")
+
+        # it was OK
+        return
+
+    assert_equal(facets_1, facets_2)
+
+
+class TestConvexHull:
+    def test_masked_array_fails(self):
+        masked_array = np.ma.masked_all(1)
+        assert_raises(ValueError, qhull.ConvexHull, masked_array)
+
+    def test_array_with_nans_fails(self):
+        points_with_nan = np.array([(0,0), (1,1), (2,np.nan)], dtype=np.double)
+        assert_raises(ValueError, qhull.ConvexHull, points_with_nan)
+
+    @pytest.mark.parametrize("name", sorted(DATASETS))
+    def test_hull_consistency_tri(self, name):
+        # Check that a convex hull returned by qhull in ndim
+        # and the hull constructed from ndim delaunay agree
+        points = DATASETS[name]
+
+        tri = qhull.Delaunay(points)
+        hull = qhull.ConvexHull(points)
+
+        assert_hulls_equal(points, tri.convex_hull, hull.simplices)
+
+        # Check that the hull extremes are as expected
+        if points.shape[1] == 2:
+            assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
+        else:
+            assert_equal(np.unique(hull.simplices), hull.vertices)
+
+    @pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
+    def test_incremental(self, name):
+        # Test incremental construction of the convex hull
+        chunks, _ = INCREMENTAL_DATASETS[name]
+        points = np.concatenate(chunks, axis=0)
+
+        obj = qhull.ConvexHull(chunks[0], incremental=True)
+        for chunk in chunks[1:]:
+            obj.add_points(chunk)
+
+        obj2 = qhull.ConvexHull(points)
+
+        obj3 = qhull.ConvexHull(chunks[0], incremental=True)
+        if len(chunks) > 1:
+            obj3.add_points(np.concatenate(chunks[1:], axis=0),
+                            restart=True)
+
+        # Check that the incremental mode agrees with upfront mode
+        assert_hulls_equal(points, obj.simplices, obj2.simplices)
+        assert_hulls_equal(points, obj.simplices, obj3.simplices)
+
+    def test_vertices_2d(self):
+        # The vertices should be in counterclockwise order in 2-D
+        np.random.seed(1234)
+        points = np.random.rand(30, 2)
+
+        hull = qhull.ConvexHull(points)
+        assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
+
+        # Check counterclockwiseness
+        x, y = hull.points[hull.vertices].T
+        angle = np.arctan2(y - y.mean(), x - x.mean())
+        assert_(np.all(np.diff(np.unwrap(angle)) > 0))
+
+    def test_volume_area(self):
+        # Basic check that we get back the correct volume and area for a cube
+        points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
+                           (0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
+        tri = qhull.ConvexHull(points)
+
+        assert_allclose(tri.volume, 1., rtol=1e-14)
+        assert_allclose(tri.area, 6., rtol=1e-14)
+
+    @pytest.mark.parametrize("incremental", [False, True])
+    def test_good2d(self, incremental):
+        # Make sure the QGn option gives the correct value of "good".
+        points = np.array([[0.2, 0.2],
+                           [0.2, 0.4],
+                           [0.4, 0.4],
+                           [0.4, 0.2],
+                           [0.3, 0.6]])
+        hull = qhull.ConvexHull(points=points,
+                                incremental=incremental,
+                                qhull_options='QG4')
+        expected = np.array([False, True, False, False], dtype=bool)
+        actual = hull.good
+        assert_equal(actual, expected)
+
+    @pytest.mark.parametrize("visibility", [
+                              "QG4",  # visible=True
+                              "QG-4",  # visible=False
+                              ])
+    @pytest.mark.parametrize("new_gen, expected", [
+        # add generator that places QG4 inside hull
+        # so all facets are invisible
+        (np.array([[0.3, 0.7]]),
+         np.array([False, False, False, False, False], dtype=bool)),
+        # adding a generator on the opposite side of the square
+        # should preserve the single visible facet & add one invisible
+        # facet
+        (np.array([[0.3, -0.7]]),
+         np.array([False, True, False, False, False], dtype=bool)),
+        # split the visible facet on top of the square into two
+        # visible facets, with visibility at the end of the array
+        # because add_points concatenates
+        (np.array([[0.3, 0.41]]),
+         np.array([False, False, False, True, True], dtype=bool)),
+        # with our current Qhull options, coplanarity will not count
+        # for visibility; this case shifts one visible & one invisible
+        # facet & adds a coplanar facet
+        # simplex at index position 2 is the shifted visible facet
+        # the final simplex is the coplanar facet
+        (np.array([[0.5, 0.6], [0.6, 0.6]]),
+         np.array([False, False, True, False, False], dtype=bool)),
+        # place the new generator such that it envelops the query
+        # point within the convex hull, but only just barely within
+        # the double precision limit
+        # NOTE: testing exact degeneracy is less predictable than this
+        # scenario, perhaps because of the default Qt option we have
+        # enabled for Qhull to handle precision matters
+        (np.array([[0.3, 0.6 + 1e-16]]),
+         np.array([False, False, False, False, False], dtype=bool)),
+        ])
+    def test_good2d_incremental_changes(self, new_gen, expected,
+                                        visibility):
+        # use the usual square convex hull
+        # generators from test_good2d
+        points = np.array([[0.2, 0.2],
+                           [0.2, 0.4],
+                           [0.4, 0.4],
+                           [0.4, 0.2],
+                           [0.3, 0.6]])
+        hull = qhull.ConvexHull(points=points,
+                                incremental=True,
+                                qhull_options=visibility)
+        hull.add_points(new_gen)
+        actual = hull.good
+        if '-' in visibility:
+            expected = np.invert(expected)
+        assert_equal(actual, expected)
+
+    @pytest.mark.parametrize("incremental", [False, True])
+    def test_good2d_no_option(self, incremental):
+        # handle case where good attribue doesn't exist
+        # because Qgn or Qg-n wasn't specified
+        points = np.array([[0.2, 0.2],
+                           [0.2, 0.4],
+                           [0.4, 0.4],
+                           [0.4, 0.2],
+                           [0.3, 0.6]])
+        hull = qhull.ConvexHull(points=points,
+                                incremental=incremental)
+        actual = hull.good
+        assert actual is None
+        # preserve None after incremental addition
+        if incremental:
+            hull.add_points(np.zeros((1, 2)))
+            actual = hull.good
+            assert actual is None
+
+    @pytest.mark.parametrize("incremental", [False, True])
+    def test_good2d_inside(self, incremental):
+        # Make sure the QGn option gives the correct value of "good".
+        # When point n is inside the convex hull of the rest, good is
+        # all False.
+        points = np.array([[0.2, 0.2],
+                           [0.2, 0.4],
+                           [0.4, 0.4],
+                           [0.4, 0.2],
+                           [0.3, 0.3]])
+        hull = qhull.ConvexHull(points=points,
+                                incremental=incremental,
+                                qhull_options='QG4')
+        expected = np.array([False, False, False, False], dtype=bool)
+        actual = hull.good
+        assert_equal(actual, expected)
+
+    @pytest.mark.parametrize("incremental", [False, True])
+    def test_good3d(self, incremental):
+        # Make sure the QGn option gives the correct value of "good"
+        # for a 3d figure
+        points = np.array([[0.0, 0.0, 0.0],
+                           [0.90029516, -0.39187448, 0.18948093],
+                           [0.48676420, -0.72627633, 0.48536925],
+                           [0.57651530, -0.81179274, -0.09285832],
+                           [0.67846893, -0.71119562, 0.18406710]])
+        hull = qhull.ConvexHull(points=points,
+                                incremental=incremental,
+                                qhull_options='QG0')
+        expected = np.array([True, False, False, False], dtype=bool)
+        assert_equal(hull.good, expected)
+
+class TestVoronoi:
+
+    @pytest.mark.parametrize("qhull_opts, extra_pts", [
+        # option Qz (default for SciPy) will add
+        # an extra point at infinity
+        ("Qbb Qc Qz", 1),
+        ("Qbb Qc", 0),
+    ])
+    @pytest.mark.parametrize("n_pts", [50, 100])
+    @pytest.mark.parametrize("ndim", [2, 3])
+    def test_point_region_structure(self,
+                                    qhull_opts,
+                                    n_pts,
+                                    extra_pts,
+                                    ndim):
+        # see gh-16773
+        rng = np.random.default_rng(7790)
+        points = rng.random((n_pts, ndim))
+        vor = Voronoi(points, qhull_options=qhull_opts)
+        pt_region = vor.point_region
+        assert pt_region.max() == n_pts - 1 + extra_pts
+        assert pt_region.size == len(vor.regions) - extra_pts
+        assert len(vor.regions) == n_pts + extra_pts
+        assert vor.points.shape[0] == n_pts
+        # if there is an empty sublist in the Voronoi
+        # regions data structure, it should never be
+        # indexed because it corresponds to an internally
+        # added point at infinity and is not a member of the
+        # generators (input points)
+        if extra_pts:
+            sublens = [len(x) for x in vor.regions]
+            # only one point at infinity (empty region)
+            # is allowed
+            assert sublens.count(0) == 1
+            assert sublens.index(0) not in pt_region
+
+    def test_masked_array_fails(self):
+        masked_array = np.ma.masked_all(1)
+        assert_raises(ValueError, qhull.Voronoi, masked_array)
+
+    def test_simple(self):
+        # Simple case with known Voronoi diagram
+        points = [(0, 0), (0, 1), (0, 2),
+                  (1, 0), (1, 1), (1, 2),
+                  (2, 0), (2, 1), (2, 2)]
+
+        # qhull v o Fv Qbb Qc Qz < dat
+        output = """
+        2
+        5 10 1
+        -10.101 -10.101
+           0.5    0.5
+           0.5    1.5
+           1.5    0.5
+           1.5    1.5
+        2 0 1
+        3 2 0 1
+        2 0 2
+        3 3 0 1
+        4 1 2 4 3
+        3 4 0 2
+        2 0 3
+        3 4 0 3
+        2 0 4
+        0
+        12
+        4 0 3 0 1
+        4 0 1 0 1
+        4 1 4 1 2
+        4 1 2 0 2
+        4 2 5 0 2
+        4 3 4 1 3
+        4 3 6 0 3
+        4 4 5 2 4
+        4 4 7 3 4
+        4 5 8 0 4
+        4 6 7 0 3
+        4 7 8 0 4
+        """
+        self._compare_qvoronoi(points, output)
+
+    def _compare_qvoronoi(self, points, output, **kw):
+        """Compare to output from 'qvoronoi o Fv < data' to Voronoi()"""
+
+        # Parse output
+        output = [list(map(float, x.split())) for x in output.strip().splitlines()]
+        nvertex = int(output[1][0])
+        vertices = list(map(tuple, output[3:2+nvertex]))  # exclude inf
+        nregion = int(output[1][1])
+        regions = [[int(y)-1 for y in x[1:]]
+                   for x in output[2+nvertex:2+nvertex+nregion]]
+        ridge_points = [[int(y) for y in x[1:3]]
+                        for x in output[3+nvertex+nregion:]]
+        ridge_vertices = [[int(y)-1 for y in x[3:]]
+                          for x in output[3+nvertex+nregion:]]
+
+        # Compare results
+        vor = qhull.Voronoi(points, **kw)
+
+        def sorttuple(x):
+            return tuple(sorted(x))
+
+        assert_allclose(vor.vertices, vertices)
+        assert_equal(set(map(tuple, vor.regions)),
+                     set(map(tuple, regions)))
+
+        p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices))))
+        p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())),
+                 list(map(sorttuple, vor.ridge_vertices))))
+        p1.sort()
+        p2.sort()
+
+        assert_equal(p1, p2)
+
+    @pytest.mark.parametrize("name", sorted(DATASETS))
+    def test_ridges(self, name):
+        # Check that the ridges computed by Voronoi indeed separate
+        # the regions of nearest neighborhood, by comparing the result
+        # to KDTree.
+
+        points = DATASETS[name]
+
+        tree = KDTree(points)
+        vor = qhull.Voronoi(points)
+
+        for p, v in vor.ridge_dict.items():
+            # consider only finite ridges
+            if not np.all(np.asarray(v) >= 0):
+                continue
+
+            ridge_midpoint = vor.vertices[v].mean(axis=0)
+            d = 1e-6 * (points[p[0]] - ridge_midpoint)
+
+            dist, k = tree.query(ridge_midpoint + d, k=1)
+            assert_equal(k, p[0])
+
+            dist, k = tree.query(ridge_midpoint - d, k=1)
+            assert_equal(k, p[1])
+
+    def test_furthest_site(self):
+        points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
+
+        # qhull v o Fv Qbb Qc Qu < dat
+        output = """
+        2
+        3 5 1
+        -10.101 -10.101
+        0.6000000000000001    0.5
+           0.5 0.6000000000000001
+        3 0 2 1
+        2 0 1
+        2 0 2
+        0
+        3 0 2 1
+        5
+        4 0 2 0 2
+        4 0 4 1 2
+        4 0 1 0 1
+        4 1 4 0 1
+        4 2 4 0 2
+        """
+        self._compare_qvoronoi(points, output, furthest_site=True)
+
+    def test_furthest_site_flag(self):
+        points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
+
+        vor = Voronoi(points)
+        assert_equal(vor.furthest_site,False)
+        vor = Voronoi(points,furthest_site=True)
+        assert_equal(vor.furthest_site,True)
+
+    @pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
+    def test_incremental(self, name):
+        # Test incremental construction of the triangulation
+
+        if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3:
+            # too slow (testing of the result --- qhull is still fast)
+            return
+
+        chunks, opts = INCREMENTAL_DATASETS[name]
+        points = np.concatenate(chunks, axis=0)
+
+        obj = qhull.Voronoi(chunks[0], incremental=True,
+                             qhull_options=opts)
+        for chunk in chunks[1:]:
+            obj.add_points(chunk)
+
+        obj2 = qhull.Voronoi(points)
+
+        obj3 = qhull.Voronoi(chunks[0], incremental=True,
+                             qhull_options=opts)
+        if len(chunks) > 1:
+            obj3.add_points(np.concatenate(chunks[1:], axis=0),
+                            restart=True)
+
+        # -- Check that the incremental mode agrees with upfront mode
+        assert_equal(len(obj.point_region), len(obj2.point_region))
+        assert_equal(len(obj.point_region), len(obj3.point_region))
+
+        # The vertices may be in different order or duplicated in
+        # the incremental map
+        for objx in obj, obj3:
+            vertex_map = {-1: -1}
+            for i, v in enumerate(objx.vertices):
+                for j, v2 in enumerate(obj2.vertices):
+                    if np.allclose(v, v2):
+                        vertex_map[i] = j
+
+            def remap(x):
+                if hasattr(x, '__len__'):
+                    return tuple(set([remap(y) for y in x]))
+                try:
+                    return vertex_map[x]
+                except KeyError as e:
+                    raise AssertionError("incremental result has spurious vertex at %r"
+                                         % (objx.vertices[x],)) from e
+
+            def simplified(x):
+                items = set(map(sorted_tuple, x))
+                if () in items:
+                    items.remove(())
+                items = [x for x in items if len(x) > 1]
+                items.sort()
+                return items
+
+            assert_equal(
+                simplified(remap(objx.regions)),
+                simplified(obj2.regions)
+                )
+            assert_equal(
+                simplified(remap(objx.ridge_vertices)),
+                simplified(obj2.ridge_vertices)
+                )
+
+            # XXX: compare ridge_points --- not clear exactly how to do this
+
+
+class Test_HalfspaceIntersection:
+    def assert_unordered_allclose(self, arr1, arr2, rtol=1e-7):
+        """Check that every line in arr1 is only once in arr2"""
+        assert_equal(arr1.shape, arr2.shape)
+
+        truths = np.zeros((arr1.shape[0],), dtype=bool)
+        for l1 in arr1:
+            indexes = np.nonzero((abs(arr2 - l1) < rtol).all(axis=1))[0]
+            assert_equal(indexes.shape, (1,))
+            truths[indexes[0]] = True
+        assert_(truths.all())
+
+    @pytest.mark.parametrize("dt", [np.float64, int])
+    def test_cube_halfspace_intersection(self, dt):
+        halfspaces = np.array([[-1, 0, 0],
+                               [0, -1, 0],
+                               [1, 0, -2],
+                               [0, 1, -2]], dtype=dt)
+        feasible_point = np.array([1, 1], dtype=dt)
+
+        points = np.array([[0.0, 0.0], [2.0, 0.0], [0.0, 2.0], [2.0, 2.0]])
+
+        hull = qhull.HalfspaceIntersection(halfspaces, feasible_point)
+
+        assert_allclose(hull.intersections, points)
+
+    def test_self_dual_polytope_intersection(self):
+        fname = os.path.join(os.path.dirname(__file__), 'data',
+                             'selfdual-4d-polytope.txt')
+        ineqs = np.genfromtxt(fname)
+        halfspaces = -np.hstack((ineqs[:, 1:], ineqs[:, :1]))
+
+        feas_point = np.array([0., 0., 0., 0.])
+        hs = qhull.HalfspaceIntersection(halfspaces, feas_point)
+
+        assert_equal(hs.intersections.shape, (24, 4))
+
+        assert_almost_equal(hs.dual_volume, 32.0)
+        assert_equal(len(hs.dual_facets), 24)
+        for facet in hs.dual_facets:
+            assert_equal(len(facet), 6)
+
+        dists = halfspaces[:, -1] + halfspaces[:, :-1].dot(feas_point)
+        self.assert_unordered_allclose((halfspaces[:, :-1].T/dists).T, hs.dual_points)
+
+        points = itertools.permutations([0., 0., 0.5, -0.5])
+        for point in points:
+            assert_equal(np.sum((hs.intersections == point).all(axis=1)), 1)
+
+    def test_wrong_feasible_point(self):
+        halfspaces = np.array([[-1.0, 0.0, 0.0],
+                               [0.0, -1.0, 0.0],
+                               [1.0, 0.0, -1.0],
+                               [0.0, 1.0, -1.0]])
+        feasible_point = np.array([0.5, 0.5, 0.5])
+        #Feasible point is (ndim,) instead of (ndim-1,)
+        assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
+        feasible_point = np.array([[0.5], [0.5]])
+        #Feasible point is (ndim-1, 1) instead of (ndim-1,)
+        assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
+        feasible_point = np.array([[0.5, 0.5]])
+        #Feasible point is (1, ndim-1) instead of (ndim-1,)
+        assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
+
+        feasible_point = np.array([-0.5, -0.5])
+        #Feasible point is outside feasible region
+        assert_raises(qhull.QhullError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
+
+    def test_incremental(self):
+        #Cube
+        halfspaces = np.array([[0., 0., -1., -0.5],
+                               [0., -1., 0., -0.5],
+                               [-1., 0., 0., -0.5],
+                               [1., 0., 0., -0.5],
+                               [0., 1., 0., -0.5],
+                               [0., 0., 1., -0.5]])
+        #Cut each summit
+        extra_normals = np.array([[1., 1., 1.],
+                                  [1., 1., -1.],
+                                  [1., -1., 1.],
+                                  [1, -1., -1.]])
+        offsets = np.array([[-1.]]*8)
+        extra_halfspaces = np.hstack((np.vstack((extra_normals, -extra_normals)),
+                                      offsets))
+
+        feas_point = np.array([0., 0., 0.])
+
+        inc_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True)
+
+        inc_res_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True)
+
+        for i, ehs in enumerate(extra_halfspaces):
+            inc_hs.add_halfspaces(ehs[np.newaxis, :])
+
+            inc_res_hs.add_halfspaces(ehs[np.newaxis, :], restart=True)
+
+            total = np.vstack((halfspaces, extra_halfspaces[:i+1, :]))
+
+            hs = qhull.HalfspaceIntersection(total, feas_point)
+
+            assert_allclose(inc_hs.halfspaces, inc_res_hs.halfspaces)
+            assert_allclose(inc_hs.halfspaces, hs.halfspaces)
+
+            #Direct computation and restart should have points in same order
+            assert_allclose(hs.intersections, inc_res_hs.intersections)
+            #Incremental will have points in different order than direct computation
+            self.assert_unordered_allclose(inc_hs.intersections, hs.intersections)
+
+        inc_hs.close()
+
+    def test_cube(self):
+        # Halfspaces of the cube:
+        halfspaces = np.array([[-1., 0., 0., 0.],  # x >= 0
+                               [1., 0., 0., -1.],  # x <= 1
+                               [0., -1., 0., 0.],  # y >= 0
+                               [0., 1., 0., -1.],  # y <= 1
+                               [0., 0., -1., 0.],  # z >= 0
+                               [0., 0., 1., -1.]])  # z <= 1
+        point = np.array([0.5, 0.5, 0.5])
+
+        hs = qhull.HalfspaceIntersection(halfspaces, point)
+
+        # qhalf H0.5,0.5,0.5 o < input.txt
+        qhalf_points = np.array([
+            [-2, 0, 0],
+            [2, 0, 0],
+            [0, -2, 0],
+            [0, 2, 0],
+            [0, 0, -2],
+            [0, 0, 2]])
+        qhalf_facets = [
+            [2, 4, 0],
+            [4, 2, 1],
+            [5, 2, 0],
+            [2, 5, 1],
+            [3, 4, 1],
+            [4, 3, 0],
+            [5, 3, 1],
+            [3, 5, 0]]
+
+        assert len(qhalf_facets) == len(hs.dual_facets)
+        for a, b in zip(qhalf_facets, hs.dual_facets):
+            assert set(a) == set(b)  # facet orientation can differ
+
+        assert_allclose(hs.dual_points, qhalf_points)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_slerp.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_slerp.py
new file mode 100644
index 00000000..4754d525
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_slerp.py
@@ -0,0 +1,416 @@
+import numpy as np
+from numpy.testing import assert_allclose
+
+import pytest
+from scipy.spatial import geometric_slerp
+
+
+def _generate_spherical_points(ndim=3, n_pts=2):
+    # generate uniform points on sphere
+    # see: https://stackoverflow.com/a/23785326
+    # tentatively extended to arbitrary dims
+    # for 0-sphere it will always produce antipodes
+    np.random.seed(123)
+    points = np.random.normal(size=(n_pts, ndim))
+    points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
+    return points[0], points[1]
+
+
+class TestGeometricSlerp:
+    # Test various properties of the geometric slerp code
+
+    @pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
+    @pytest.mark.parametrize("n_pts", [0, 3, 17])
+    def test_shape_property(self, n_dims, n_pts):
+        # geometric_slerp output shape should match
+        # input dimensionality & requested number
+        # of interpolation points
+        start, end = _generate_spherical_points(n_dims, 2)
+
+        actual = geometric_slerp(start=start,
+                                 end=end,
+                                 t=np.linspace(0, 1, n_pts))
+
+        assert actual.shape == (n_pts, n_dims)
+
+    @pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
+    @pytest.mark.parametrize("n_pts", [3, 17])
+    def test_include_ends(self, n_dims, n_pts):
+        # geometric_slerp should return a data structure
+        # that includes the start and end coordinates
+        # when t includes 0 and 1 ends
+        # this is convenient for plotting surfaces represented
+        # by interpolations for example
+
+        # the generator doesn't work so well for the unit
+        # sphere (it always produces antipodes), so use
+        # custom values there
+        start, end = _generate_spherical_points(n_dims, 2)
+
+        actual = geometric_slerp(start=start,
+                                 end=end,
+                                 t=np.linspace(0, 1, n_pts))
+
+        assert_allclose(actual[0], start)
+        assert_allclose(actual[-1], end)
+
+    @pytest.mark.parametrize("start, end", [
+        # both arrays are not flat
+        (np.zeros((1, 3)), np.ones((1, 3))),
+        # only start array is not flat
+        (np.zeros((1, 3)), np.ones(3)),
+        # only end array is not flat
+        (np.zeros(1), np.ones((3, 1))),
+        ])
+    def test_input_shape_flat(self, start, end):
+        # geometric_slerp should handle input arrays that are
+        # not flat appropriately
+        with pytest.raises(ValueError, match='one-dimensional'):
+            geometric_slerp(start=start,
+                            end=end,
+                            t=np.linspace(0, 1, 10))
+
+    @pytest.mark.parametrize("start, end", [
+        # 7-D and 3-D ends
+        (np.zeros(7), np.ones(3)),
+        # 2-D and 1-D ends
+        (np.zeros(2), np.ones(1)),
+        # empty, "3D" will also get caught this way
+        (np.array([]), np.ones(3)),
+        ])
+    def test_input_dim_mismatch(self, start, end):
+        # geometric_slerp must appropriately handle cases where
+        # an interpolation is attempted across two different
+        # dimensionalities
+        with pytest.raises(ValueError, match='dimensions'):
+            geometric_slerp(start=start,
+                            end=end,
+                            t=np.linspace(0, 1, 10))
+
+    @pytest.mark.parametrize("start, end", [
+        # both empty
+        (np.array([]), np.array([])),
+        ])
+    def test_input_at_least1d(self, start, end):
+        # empty inputs to geometric_slerp must
+        # be handled appropriately when not detected
+        # by mismatch
+        with pytest.raises(ValueError, match='at least two-dim'):
+            geometric_slerp(start=start,
+                            end=end,
+                            t=np.linspace(0, 1, 10))
+
+    @pytest.mark.parametrize("start, end, expected", [
+        # North and South Poles are definitely antipodes
+        # but should be handled gracefully now
+        (np.array([0, 0, 1.0]), np.array([0, 0, -1.0]), "warning"),
+        # this case will issue a warning & be handled
+        # gracefully as well;
+        # North Pole was rotated very slightly
+        # using r = R.from_euler('x', 0.035, degrees=True)
+        # to achieve Euclidean distance offset from diameter by
+        # 9.328908379124812e-08, within the default tol
+        (np.array([0.00000000e+00,
+                  -6.10865200e-04,
+                  9.99999813e-01]), np.array([0, 0, -1.0]), "warning"),
+        # this case should succeed without warning because a
+        # sufficiently large
+        # rotation was applied to North Pole point to shift it
+        # to a Euclidean distance of 2.3036691931821451e-07
+        # from South Pole, which is larger than tol
+        (np.array([0.00000000e+00,
+                  -9.59930941e-04,
+                  9.99999539e-01]), np.array([0, 0, -1.0]), "success"),
+        ])
+    def test_handle_antipodes(self, start, end, expected):
+        # antipodal points must be handled appropriately;
+        # there are an infinite number of possible geodesic
+        # interpolations between them in higher dims
+        if expected == "warning":
+            with pytest.warns(UserWarning, match='antipodes'):
+                res = geometric_slerp(start=start,
+                                      end=end,
+                                      t=np.linspace(0, 1, 10))
+        else:
+            res = geometric_slerp(start=start,
+                                  end=end,
+                                  t=np.linspace(0, 1, 10))
+
+        # antipodes or near-antipodes should still produce
+        # slerp paths on the surface of the sphere (but they
+        # may be ambiguous):
+        assert_allclose(np.linalg.norm(res, axis=1), 1.0)
+
+    @pytest.mark.parametrize("start, end, expected", [
+        # 2-D with n_pts=4 (two new interpolation points)
+        # this is an actual circle
+        (np.array([1, 0]),
+         np.array([0, 1]),
+         np.array([[1, 0],
+                   [np.sqrt(3) / 2, 0.5],  # 30 deg on unit circle
+                   [0.5, np.sqrt(3) / 2],  # 60 deg on unit circle
+                   [0, 1]])),
+        # likewise for 3-D (add z = 0 plane)
+        # this is an ordinary sphere
+        (np.array([1, 0, 0]),
+         np.array([0, 1, 0]),
+         np.array([[1, 0, 0],
+                   [np.sqrt(3) / 2, 0.5, 0],
+                   [0.5, np.sqrt(3) / 2, 0],
+                   [0, 1, 0]])),
+        # for 5-D, pad more columns with constants
+        # zeros are easiest--non-zero values on unit
+        # circle are more difficult to reason about
+        # at higher dims
+        (np.array([1, 0, 0, 0, 0]),
+         np.array([0, 1, 0, 0, 0]),
+         np.array([[1, 0, 0, 0, 0],
+                   [np.sqrt(3) / 2, 0.5, 0, 0, 0],
+                   [0.5, np.sqrt(3) / 2, 0, 0, 0],
+                   [0, 1, 0, 0, 0]])),
+
+    ])
+    def test_straightforward_examples(self, start, end, expected):
+        # some straightforward interpolation tests, sufficiently
+        # simple to use the unit circle to deduce expected values;
+        # for larger dimensions, pad with constants so that the
+        # data is N-D but simpler to reason about
+        actual = geometric_slerp(start=start,
+                                 end=end,
+                                 t=np.linspace(0, 1, 4))
+        assert_allclose(actual, expected, atol=1e-16)
+
+    @pytest.mark.parametrize("t", [
+        # both interval ends clearly violate limits
+        np.linspace(-20, 20, 300),
+        # only one interval end violating limit slightly
+        np.linspace(-0.0001, 0.0001, 17),
+        ])
+    def test_t_values_limits(self, t):
+        # geometric_slerp() should appropriately handle
+        # interpolation parameters < 0 and > 1
+        with pytest.raises(ValueError, match='interpolation parameter'):
+            _ = geometric_slerp(start=np.array([1, 0]),
+                                end=np.array([0, 1]),
+                                t=t)
+
+    @pytest.mark.parametrize("start, end", [
+        (np.array([1]),
+         np.array([0])),
+        (np.array([0]),
+         np.array([1])),
+        (np.array([-17.7]),
+         np.array([165.9])),
+     ])
+    def test_0_sphere_handling(self, start, end):
+        # it does not make sense to interpolate the set of
+        # two points that is the 0-sphere
+        with pytest.raises(ValueError, match='at least two-dim'):
+            _ = geometric_slerp(start=start,
+                                end=end,
+                                t=np.linspace(0, 1, 4))
+
+    @pytest.mark.parametrize("tol", [
+        # an integer currently raises
+        5,
+        # string raises
+        "7",
+        # list and arrays also raise
+        [5, 6, 7], np.array(9.0),
+        ])
+    def test_tol_type(self, tol):
+        # geometric_slerp() should raise if tol is not
+        # a suitable float type
+        with pytest.raises(ValueError, match='must be a float'):
+            _ = geometric_slerp(start=np.array([1, 0]),
+                                end=np.array([0, 1]),
+                                t=np.linspace(0, 1, 5),
+                                tol=tol)
+
+    @pytest.mark.parametrize("tol", [
+        -5e-6,
+        -7e-10,
+        ])
+    def test_tol_sign(self, tol):
+        # geometric_slerp() currently handles negative
+        # tol values, as long as they are floats
+        _ = geometric_slerp(start=np.array([1, 0]),
+                            end=np.array([0, 1]),
+                            t=np.linspace(0, 1, 5),
+                            tol=tol)
+
+    @pytest.mark.parametrize("start, end", [
+        # 1-sphere (circle) with one point at origin
+        # and the other on the circle
+        (np.array([1, 0]), np.array([0, 0])),
+        # 2-sphere (normal sphere) with both points
+        # just slightly off sphere by the same amount
+        # in different directions
+        (np.array([1 + 1e-6, 0, 0]),
+         np.array([0, 1 - 1e-6, 0])),
+        # same thing in 4-D
+        (np.array([1 + 1e-6, 0, 0, 0]),
+         np.array([0, 1 - 1e-6, 0, 0])),
+        ])
+    def test_unit_sphere_enforcement(self, start, end):
+        # geometric_slerp() should raise on input that clearly
+        # cannot be on an n-sphere of radius 1
+        with pytest.raises(ValueError, match='unit n-sphere'):
+            geometric_slerp(start=start,
+                            end=end,
+                            t=np.linspace(0, 1, 5))
+
+    @pytest.mark.parametrize("start, end", [
+        # 1-sphere 45 degree case
+        (np.array([1, 0]),
+         np.array([np.sqrt(2) / 2.,
+                   np.sqrt(2) / 2.])),
+        # 2-sphere 135 degree case
+        (np.array([1, 0]),
+         np.array([-np.sqrt(2) / 2.,
+                   np.sqrt(2) / 2.])),
+        ])
+    @pytest.mark.parametrize("t_func", [
+        np.linspace, np.logspace])
+    def test_order_handling(self, start, end, t_func):
+        # geometric_slerp() should handle scenarios with
+        # ascending and descending t value arrays gracefully;
+        # results should simply be reversed
+
+        # for scrambled / unsorted parameters, the same values
+        # should be returned, just in scrambled order
+
+        num_t_vals = 20
+        np.random.seed(789)
+        forward_t_vals = t_func(0, 10, num_t_vals)
+        # normalize to max of 1
+        forward_t_vals /= forward_t_vals.max()
+        reverse_t_vals = np.flipud(forward_t_vals)
+        shuffled_indices = np.arange(num_t_vals)
+        np.random.shuffle(shuffled_indices)
+        scramble_t_vals = forward_t_vals.copy()[shuffled_indices]
+
+        forward_results = geometric_slerp(start=start,
+                                          end=end,
+                                          t=forward_t_vals)
+        reverse_results = geometric_slerp(start=start,
+                                          end=end,
+                                          t=reverse_t_vals)
+        scrambled_results = geometric_slerp(start=start,
+                                            end=end,
+                                            t=scramble_t_vals)
+
+        # check fidelity to input order
+        assert_allclose(forward_results, np.flipud(reverse_results))
+        assert_allclose(forward_results[shuffled_indices],
+                        scrambled_results)
+
+    @pytest.mark.parametrize("t", [
+        # string:
+        "15, 5, 7",
+        # complex numbers currently produce a warning
+        # but not sure we need to worry about it too much:
+        # [3 + 1j, 5 + 2j],
+        ])
+    def test_t_values_conversion(self, t):
+        with pytest.raises(ValueError):
+            _ = geometric_slerp(start=np.array([1]),
+                                end=np.array([0]),
+                                t=t)
+
+    def test_accept_arraylike(self):
+        # array-like support requested by reviewer
+        # in gh-10380
+        actual = geometric_slerp([1, 0], [0, 1], [0, 1/3, 0.5, 2/3, 1])
+
+        # expected values are based on visual inspection
+        # of the unit circle for the progressions along
+        # the circumference provided in t
+        expected = np.array([[1, 0],
+                             [np.sqrt(3) / 2, 0.5],
+                             [np.sqrt(2) / 2,
+                              np.sqrt(2) / 2],
+                             [0.5, np.sqrt(3) / 2],
+                             [0, 1]], dtype=np.float64)
+        # Tyler's original Cython implementation of geometric_slerp
+        # can pass at atol=0 here, but on balance we will accept
+        # 1e-16 for an implementation that avoids Cython and
+        # makes up accuracy ground elsewhere
+        assert_allclose(actual, expected, atol=1e-16)
+
+    def test_scalar_t(self):
+        # when t is a scalar, return value is a single
+        # interpolated point of the appropriate dimensionality
+        # requested by reviewer in gh-10380
+        actual = geometric_slerp([1, 0], [0, 1], 0.5)
+        expected = np.array([np.sqrt(2) / 2,
+                             np.sqrt(2) / 2], dtype=np.float64)
+        assert actual.shape == (2,)
+        assert_allclose(actual, expected)
+
+    @pytest.mark.parametrize('start', [
+        np.array([1, 0, 0]),
+        np.array([0, 1]),
+    ])
+    @pytest.mark.parametrize('t', [
+        np.array(1),
+        np.array([1]),
+        np.array([[1]]),
+        np.array([[[1]]]),
+        np.array([]),
+        np.linspace(0, 1, 5),
+    ])
+    def test_degenerate_input(self, start, t):
+        if np.asarray(t).ndim > 1:
+            with pytest.raises(ValueError):
+                geometric_slerp(start=start, end=start, t=t)
+        else:
+
+            shape = (t.size,) + start.shape
+            expected = np.full(shape, start)
+
+            actual = geometric_slerp(start=start, end=start, t=t)
+            assert_allclose(actual, expected)
+
+            # Check that degenerate and non-degenerate
+            # inputs yield the same size
+            non_degenerate = geometric_slerp(start=start, end=start[::-1], t=t)
+            assert actual.size == non_degenerate.size
+
+    @pytest.mark.parametrize('k', np.logspace(-10, -1, 10))
+    def test_numerical_stability_pi(self, k):
+        # geometric_slerp should have excellent numerical
+        # stability for angles approaching pi between
+        # the start and end points
+        angle = np.pi - k
+        ts = np.linspace(0, 1, 100)
+        P = np.array([1, 0, 0, 0])
+        Q = np.array([np.cos(angle), np.sin(angle), 0, 0])
+        # the test should only be enforced for cases where
+        # geometric_slerp determines that the input is actually
+        # on the unit sphere
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            result = geometric_slerp(P, Q, ts, 1e-18)
+            norms = np.linalg.norm(result, axis=1)
+            error = np.max(np.abs(norms - 1))
+            assert error < 4e-15
+
+    @pytest.mark.parametrize('t', [
+     [[0, 0.5]],
+     [[[[[[[[[0, 0.5]]]]]]]]],
+    ])
+    def test_interpolation_param_ndim(self, t):
+        # regression test for gh-14465
+        arr1 = np.array([0, 1])
+        arr2 = np.array([1, 0])
+
+        with pytest.raises(ValueError):
+            geometric_slerp(start=arr1,
+                            end=arr2,
+                            t=t)
+
+        with pytest.raises(ValueError):
+            geometric_slerp(start=arr1,
+                            end=arr1,
+                            t=t)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_spherical_voronoi.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_spherical_voronoi.py
new file mode 100644
index 00000000..dcc96210
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/tests/test_spherical_voronoi.py
@@ -0,0 +1,355 @@
+import numpy as np
+import itertools
+from numpy.testing import (assert_equal,
+                           assert_almost_equal,
+                           assert_array_equal,
+                           assert_array_almost_equal)
+import pytest
+from pytest import raises as assert_raises
+from scipy.spatial import SphericalVoronoi, distance
+from scipy.optimize import linear_sum_assignment
+from scipy.constants import golden as phi
+from scipy.special import gamma
+
+
+TOL = 1E-10
+
+
+def _generate_tetrahedron():
+    return np.array([[1, 1, 1], [1, -1, -1], [-1, 1, -1], [-1, -1, 1]])
+
+
+def _generate_cube():
+    return np.array(list(itertools.product([-1, 1.], repeat=3)))
+
+
+def _generate_octahedron():
+    return np.array([[-1, 0, 0], [+1, 0, 0], [0, -1, 0],
+                     [0, +1, 0], [0, 0, -1], [0, 0, +1]])
+
+
+def _generate_dodecahedron():
+
+    x1 = _generate_cube()
+    x2 = np.array([[0, -phi, -1 / phi],
+                   [0, -phi, +1 / phi],
+                   [0, +phi, -1 / phi],
+                   [0, +phi, +1 / phi]])
+    x3 = np.array([[-1 / phi, 0, -phi],
+                   [+1 / phi, 0, -phi],
+                   [-1 / phi, 0, +phi],
+                   [+1 / phi, 0, +phi]])
+    x4 = np.array([[-phi, -1 / phi, 0],
+                   [-phi, +1 / phi, 0],
+                   [+phi, -1 / phi, 0],
+                   [+phi, +1 / phi, 0]])
+    return np.concatenate((x1, x2, x3, x4))
+
+
+def _generate_icosahedron():
+    x = np.array([[0, -1, -phi],
+                  [0, -1, +phi],
+                  [0, +1, -phi],
+                  [0, +1, +phi]])
+    return np.concatenate([np.roll(x, i, axis=1) for i in range(3)])
+
+
+def _generate_polytope(name):
+    polygons = ["triangle", "square", "pentagon", "hexagon", "heptagon",
+                "octagon", "nonagon", "decagon", "undecagon", "dodecagon"]
+    polyhedra = ["tetrahedron", "cube", "octahedron", "dodecahedron",
+                 "icosahedron"]
+    if name not in polygons and name not in polyhedra:
+        raise ValueError("unrecognized polytope")
+
+    if name in polygons:
+        n = polygons.index(name) + 3
+        thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
+        p = np.vstack([np.cos(thetas), np.sin(thetas)]).T
+    elif name == "tetrahedron":
+        p = _generate_tetrahedron()
+    elif name == "cube":
+        p = _generate_cube()
+    elif name == "octahedron":
+        p = _generate_octahedron()
+    elif name == "dodecahedron":
+        p = _generate_dodecahedron()
+    elif name == "icosahedron":
+        p = _generate_icosahedron()
+
+    return p / np.linalg.norm(p, axis=1, keepdims=True)
+
+
+def _hypersphere_area(dim, radius):
+    # https://en.wikipedia.org/wiki/N-sphere#Closed_forms
+    return 2 * np.pi**(dim / 2) / gamma(dim / 2) * radius**(dim - 1)
+
+
+def _sample_sphere(n, dim, seed=None):
+    # Sample points uniformly at random from the hypersphere
+    rng = np.random.RandomState(seed=seed)
+    points = rng.randn(n, dim)
+    points /= np.linalg.norm(points, axis=1, keepdims=True)
+    return points
+
+
+class TestSphericalVoronoi:
+
+    def setup_method(self):
+        self.points = np.array([
+            [-0.78928481, -0.16341094, 0.59188373],
+            [-0.66839141, 0.73309634, 0.12578818],
+            [0.32535778, -0.92476944, -0.19734181],
+            [-0.90177102, -0.03785291, -0.43055335],
+            [0.71781344, 0.68428936, 0.12842096],
+            [-0.96064876, 0.23492353, -0.14820556],
+            [0.73181537, -0.22025898, -0.6449281],
+            [0.79979205, 0.54555747, 0.25039913]]
+        )
+
+    def test_constructor(self):
+        center = np.array([1, 2, 3])
+        radius = 2
+        s1 = SphericalVoronoi(self.points)
+        # user input checks in SphericalVoronoi now require
+        # the radius / center to match the generators so adjust
+        # accordingly here
+        s2 = SphericalVoronoi(self.points * radius, radius)
+        s3 = SphericalVoronoi(self.points + center, center=center)
+        s4 = SphericalVoronoi(self.points * radius + center, radius, center)
+        assert_array_equal(s1.center, np.array([0, 0, 0]))
+        assert_equal(s1.radius, 1)
+        assert_array_equal(s2.center, np.array([0, 0, 0]))
+        assert_equal(s2.radius, 2)
+        assert_array_equal(s3.center, center)
+        assert_equal(s3.radius, 1)
+        assert_array_equal(s4.center, center)
+        assert_equal(s4.radius, radius)
+
+        # Test a non-sequence/-ndarray based array-like
+        s5 = SphericalVoronoi(memoryview(self.points))  # type: ignore[arg-type]
+        assert_array_equal(s5.center, np.array([0, 0, 0]))
+        assert_equal(s5.radius, 1)
+
+    def test_vertices_regions_translation_invariance(self):
+        sv_origin = SphericalVoronoi(self.points)
+        center = np.array([1, 1, 1])
+        sv_translated = SphericalVoronoi(self.points + center, center=center)
+        assert_equal(sv_origin.regions, sv_translated.regions)
+        assert_array_almost_equal(sv_origin.vertices + center,
+                                  sv_translated.vertices)
+
+    def test_vertices_regions_scaling_invariance(self):
+        sv_unit = SphericalVoronoi(self.points)
+        sv_scaled = SphericalVoronoi(self.points * 2, 2)
+        assert_equal(sv_unit.regions, sv_scaled.regions)
+        assert_array_almost_equal(sv_unit.vertices * 2,
+                                  sv_scaled.vertices)
+
+    def test_old_radius_api_error(self):
+        with pytest.raises(ValueError, match='`radius` is `None`. *'):
+            SphericalVoronoi(self.points, radius=None)
+
+    def test_sort_vertices_of_regions(self):
+        sv = SphericalVoronoi(self.points)
+        unsorted_regions = sv.regions
+        sv.sort_vertices_of_regions()
+        assert_equal(sorted(sv.regions), sorted(unsorted_regions))
+
+    def test_sort_vertices_of_regions_flattened(self):
+        expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1],
+                           [4, 8, 7, 5, 6], [9, 11, 10], [2, 7, 5],
+                           [1, 4, 8, 11, 9], [0, 3, 10, 9, 1]])
+        expected = list(itertools.chain(*sorted(expected)))  # type: ignore
+        sv = SphericalVoronoi(self.points)
+        sv.sort_vertices_of_regions()
+        actual = list(itertools.chain(*sorted(sv.regions)))
+        assert_array_equal(actual, expected)
+
+    def test_sort_vertices_of_regions_dimensionality(self):
+        points = np.array([[1, 0, 0, 0],
+                           [0, 1, 0, 0],
+                           [0, 0, 1, 0],
+                           [0, 0, 0, 1],
+                           [0.5, 0.5, 0.5, 0.5]])
+        with pytest.raises(TypeError, match="three-dimensional"):
+            sv = SphericalVoronoi(points)
+            sv.sort_vertices_of_regions()
+
+    def test_num_vertices(self):
+        # for any n >= 3, a spherical Voronoi diagram has 2n - 4
+        # vertices; this is a direct consequence of Euler's formula
+        # as explained by Dinis and Mamede (2010) Proceedings of the
+        # 2010 International Symposium on Voronoi Diagrams in Science
+        # and Engineering
+        sv = SphericalVoronoi(self.points)
+        expected = self.points.shape[0] * 2 - 4
+        actual = sv.vertices.shape[0]
+        assert_equal(actual, expected)
+
+    def test_voronoi_circles(self):
+        sv = SphericalVoronoi(self.points)
+        for vertex in sv.vertices:
+            distances = distance.cdist(sv.points, np.array([vertex]))
+            closest = np.array(sorted(distances)[0:3])
+            assert_almost_equal(closest[0], closest[1], 7, str(vertex))
+            assert_almost_equal(closest[0], closest[2], 7, str(vertex))
+
+    def test_duplicate_point_handling(self):
+        # an exception should be raised for degenerate generators
+        # related to Issue# 7046
+        self.degenerate = np.concatenate((self.points, self.points))
+        with assert_raises(ValueError):
+            SphericalVoronoi(self.degenerate)
+
+    def test_incorrect_radius_handling(self):
+        # an exception should be raised if the radius provided
+        # cannot possibly match the input generators
+        with assert_raises(ValueError):
+            SphericalVoronoi(self.points, radius=0.98)
+
+    def test_incorrect_center_handling(self):
+        # an exception should be raised if the center provided
+        # cannot possibly match the input generators
+        with assert_raises(ValueError):
+            SphericalVoronoi(self.points, center=[0.1, 0, 0])
+
+    @pytest.mark.parametrize("dim", range(2, 6))
+    @pytest.mark.parametrize("shift", [False, True])
+    def test_single_hemisphere_handling(self, dim, shift):
+        n = 10
+        points = _sample_sphere(n, dim, seed=0)
+        points[:, 0] = np.abs(points[:, 0])
+        center = (np.arange(dim) + 1) * shift
+        sv = SphericalVoronoi(points + center, center=center)
+        dots = np.einsum('ij,ij->i', sv.vertices - center,
+                                     sv.points[sv._simplices[:, 0]] - center)
+        circumradii = np.arccos(np.clip(dots, -1, 1))
+        assert np.max(circumradii) > np.pi / 2
+
+    @pytest.mark.parametrize("n", [1, 2, 10])
+    @pytest.mark.parametrize("dim", range(2, 6))
+    @pytest.mark.parametrize("shift", [False, True])
+    def test_rank_deficient(self, n, dim, shift):
+        center = (np.arange(dim) + 1) * shift
+        points = _sample_sphere(n, dim - 1, seed=0)
+        points = np.hstack([points, np.zeros((n, 1))])
+        with pytest.raises(ValueError, match="Rank of input points"):
+            SphericalVoronoi(points + center, center=center)
+
+    @pytest.mark.parametrize("dim", range(2, 6))
+    def test_higher_dimensions(self, dim):
+        n = 100
+        points = _sample_sphere(n, dim, seed=0)
+        sv = SphericalVoronoi(points)
+        assert sv.vertices.shape[1] == dim
+        assert len(sv.regions) == n
+
+        # verify Euler characteristic
+        cell_counts = []
+        simplices = np.sort(sv._simplices)
+        for i in range(1, dim + 1):
+            cells = []
+            for indices in itertools.combinations(range(dim), i):
+                cells.append(simplices[:, list(indices)])
+            cells = np.unique(np.concatenate(cells), axis=0)
+            cell_counts.append(len(cells))
+        expected_euler = 1 + (-1)**(dim-1)
+        actual_euler = sum([(-1)**i * e for i, e in enumerate(cell_counts)])
+        assert expected_euler == actual_euler
+
+    @pytest.mark.parametrize("dim", range(2, 6))
+    def test_cross_polytope_regions(self, dim):
+        # The hypercube is the dual of the cross-polytope, so the voronoi
+        # vertices of the cross-polytope lie on the points of the hypercube.
+
+        # generate points of the cross-polytope
+        points = np.concatenate((-np.eye(dim), np.eye(dim)))
+        sv = SphericalVoronoi(points)
+        assert all([len(e) == 2**(dim - 1) for e in sv.regions])
+
+        # generate points of the hypercube
+        expected = np.vstack(list(itertools.product([-1, 1], repeat=dim)))
+        expected = expected.astype(np.float64) / np.sqrt(dim)
+
+        # test that Voronoi vertices are correctly placed
+        dist = distance.cdist(sv.vertices, expected)
+        res = linear_sum_assignment(dist)
+        assert dist[res].sum() < TOL
+
+    @pytest.mark.parametrize("dim", range(2, 6))
+    def test_hypercube_regions(self, dim):
+        # The cross-polytope is the dual of the hypercube, so the voronoi
+        # vertices of the hypercube lie on the points of the cross-polytope.
+
+        # generate points of the hypercube
+        points = np.vstack(list(itertools.product([-1, 1], repeat=dim)))
+        points = points.astype(np.float64) / np.sqrt(dim)
+        sv = SphericalVoronoi(points)
+
+        # generate points of the cross-polytope
+        expected = np.concatenate((-np.eye(dim), np.eye(dim)))
+
+        # test that Voronoi vertices are correctly placed
+        dist = distance.cdist(sv.vertices, expected)
+        res = linear_sum_assignment(dist)
+        assert dist[res].sum() < TOL
+
+    @pytest.mark.parametrize("n", [10, 500])
+    @pytest.mark.parametrize("dim", [2, 3])
+    @pytest.mark.parametrize("radius", [0.5, 1, 2])
+    @pytest.mark.parametrize("shift", [False, True])
+    @pytest.mark.parametrize("single_hemisphere", [False, True])
+    def test_area_reconstitution(self, n, dim, radius, shift,
+                                 single_hemisphere):
+        points = _sample_sphere(n, dim, seed=0)
+
+        # move all points to one side of the sphere for single-hemisphere test
+        if single_hemisphere:
+            points[:, 0] = np.abs(points[:, 0])
+
+        center = (np.arange(dim) + 1) * shift
+        points = radius * points + center
+
+        sv = SphericalVoronoi(points, radius=radius, center=center)
+        areas = sv.calculate_areas()
+        assert_almost_equal(areas.sum(), _hypersphere_area(dim, radius))
+
+    @pytest.mark.parametrize("poly", ["triangle", "dodecagon",
+                                      "tetrahedron", "cube", "octahedron",
+                                      "dodecahedron", "icosahedron"])
+    def test_equal_area_reconstitution(self, poly):
+        points = _generate_polytope(poly)
+        n, dim = points.shape
+        sv = SphericalVoronoi(points)
+        areas = sv.calculate_areas()
+        assert_almost_equal(areas, _hypersphere_area(dim, 1) / n)
+
+    def test_area_unsupported_dimension(self):
+        dim = 4
+        points = np.concatenate((-np.eye(dim), np.eye(dim)))
+        sv = SphericalVoronoi(points)
+        with pytest.raises(TypeError, match="Only supported"):
+            sv.calculate_areas()
+
+    @pytest.mark.parametrize("radius", [1, 1.])
+    @pytest.mark.parametrize("center", [None, (1, 2, 3), (1., 2., 3.)])
+    def test_attribute_types(self, radius, center):
+        points = radius * self.points
+        if center is not None:
+            points += center
+
+        sv = SphericalVoronoi(points, radius=radius, center=center)
+        assert sv.points.dtype is np.dtype(np.float64)
+        assert sv.center.dtype is np.dtype(np.float64)
+        assert isinstance(sv.radius, float)
+
+    def test_region_types(self):
+        # Tests that region integer type does not change
+        # See Issue #13412
+        sv = SphericalVoronoi(self.points)
+        dtype = type(sv.regions[0][0])
+        sv.sort_vertices_of_regions()
+        assert type(sv.regions[0][0]) == dtype
+        sv.sort_vertices_of_regions()
+        assert type(sv.regions[0][0]) == dtype
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/__init__.py
new file mode 100644
index 00000000..abe4a32f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/__init__.py
@@ -0,0 +1,29 @@
+"""
+Spatial Transformations (:mod:`scipy.spatial.transform`)
+========================================================
+
+.. currentmodule:: scipy.spatial.transform
+
+This package implements various spatial transformations. For now,
+only rotations are supported.
+
+Rotations in 3 dimensions
+-------------------------
+.. autosummary::
+   :toctree: generated/
+
+   Rotation
+   Slerp
+   RotationSpline
+"""
+from ._rotation import Rotation, Slerp
+from ._rotation_spline import RotationSpline
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import rotation
+
+__all__ = ['Rotation', 'Slerp', 'RotationSpline']
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation.pyi b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation.pyi
new file mode 100644
index 00000000..ab36077c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation.pyi
@@ -0,0 +1,57 @@
+from __future__ import annotations
+from typing import TYPE_CHECKING, Union, Tuple, Optional, Sequence
+import numpy as np
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+
+_IntegerType = Union[int, np.integer]
+
+
+class Rotation:
+    def __init__(self, quat: npt.ArrayLike, normalize: bool = ..., copy: bool = ...) -> None: ...
+    @property
+    def single(self) -> bool: ...
+    def __len__(self) -> int: ...
+    @classmethod
+    def from_quat(cls, quat: npt.ArrayLike) -> Rotation: ...
+    @classmethod
+    def from_matrix(cls, matrix: npt.ArrayLike) -> Rotation: ...
+    @classmethod
+    def from_rotvec(cls, rotvec: npt.ArrayLike) -> Rotation: ...
+    @classmethod
+    def from_euler(cls, seq: str, angles: Union[float, npt.ArrayLike], degrees: bool = ...) -> Rotation: ...
+    @classmethod
+    def from_mrp(cls, mrp: npt.ArrayLike) -> Rotation: ...
+    def as_quat(self) -> np.ndarray: ...
+    def as_matrix(self) -> np.ndarray: ...
+    def as_rotvec(self) -> np.ndarray: ...
+    def as_euler(self, seq: str, degrees: bool = ...) -> np.ndarray: ...
+    def as_mrp(self) -> np.ndarray: ...
+    @classmethod
+    def concatenate(cls, rotations: Sequence[Rotation]) -> Rotation: ...
+    def apply(self, vectors: npt.ArrayLike, inverse: bool = ...) -> np.ndarray: ...
+    def __mul__(self, other: Rotation) -> Rotation: ...
+    def inv(self) -> Rotation: ...
+    def magnitude(self) -> Union[np.ndarray, float]: ...
+    def mean(self, weights: Optional[npt.ArrayLike] = ...) -> Rotation: ...
+    def reduce(self, left: Optional[Rotation] = ..., right: Optional[Rotation] = ...,
+               return_indices: bool = ...) -> Union[Rotation, Tuple[Rotation, np.ndarray, np.ndarray]]: ...
+    @classmethod
+    def create_group(cls, group: str, axis: str = ...) -> Rotation: ...
+    def __getitem__(self, indexer: Union[int, slice, npt.ArrayLike]) -> Rotation: ...
+    @classmethod
+    def identity(cls, num: Optional[int] = ...) -> Rotation: ...
+    @classmethod
+    def random(cls, num: Optional[int] = ...,
+               random_state: Optional[Union[_IntegerType,
+                                            np.random.Generator,
+                                            np.random.RandomState]] = ...) -> Rotation: ...
+    @classmethod
+    def align_vectors(cls, a: npt.ArrayLike, b: npt.ArrayLike,
+                      weights: Optional[npt.ArrayLike] = ...,
+                      return_sensitivity: bool = ...) -> Union[Tuple[Rotation, float], Tuple[Rotation, float, np.ndarray]]:...
+
+class Slerp:
+    def __init__(self, times: npt.ArrayLike, rotations: Rotation) -> None: ...
+    def __call__(self, times: npt.ArrayLike) -> Rotation: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_groups.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_groups.py
new file mode 100644
index 00000000..870e9b9e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_groups.py
@@ -0,0 +1,140 @@
+import numpy as np
+from scipy.constants import golden as phi
+
+
+def icosahedral(cls):
+    g1 = tetrahedral(cls).as_quat()
+    a = 0.5
+    b = 0.5 / phi
+    c = phi / 2
+    g2 = np.array([[+a, +b, +c, 0],
+                   [+a, +b, -c, 0],
+                   [+a, +c, 0, +b],
+                   [+a, +c, 0, -b],
+                   [+a, -b, +c, 0],
+                   [+a, -b, -c, 0],
+                   [+a, -c, 0, +b],
+                   [+a, -c, 0, -b],
+                   [+a, 0, +b, +c],
+                   [+a, 0, +b, -c],
+                   [+a, 0, -b, +c],
+                   [+a, 0, -b, -c],
+                   [+b, +a, 0, +c],
+                   [+b, +a, 0, -c],
+                   [+b, +c, +a, 0],
+                   [+b, +c, -a, 0],
+                   [+b, -a, 0, +c],
+                   [+b, -a, 0, -c],
+                   [+b, -c, +a, 0],
+                   [+b, -c, -a, 0],
+                   [+b, 0, +c, +a],
+                   [+b, 0, +c, -a],
+                   [+b, 0, -c, +a],
+                   [+b, 0, -c, -a],
+                   [+c, +a, +b, 0],
+                   [+c, +a, -b, 0],
+                   [+c, +b, 0, +a],
+                   [+c, +b, 0, -a],
+                   [+c, -a, +b, 0],
+                   [+c, -a, -b, 0],
+                   [+c, -b, 0, +a],
+                   [+c, -b, 0, -a],
+                   [+c, 0, +a, +b],
+                   [+c, 0, +a, -b],
+                   [+c, 0, -a, +b],
+                   [+c, 0, -a, -b],
+                   [0, +a, +c, +b],
+                   [0, +a, +c, -b],
+                   [0, +a, -c, +b],
+                   [0, +a, -c, -b],
+                   [0, +b, +a, +c],
+                   [0, +b, +a, -c],
+                   [0, +b, -a, +c],
+                   [0, +b, -a, -c],
+                   [0, +c, +b, +a],
+                   [0, +c, +b, -a],
+                   [0, +c, -b, +a],
+                   [0, +c, -b, -a]])
+    return cls.from_quat(np.concatenate((g1, g2)))
+
+
+def octahedral(cls):
+    g1 = tetrahedral(cls).as_quat()
+    c = np.sqrt(2) / 2
+    g2 = np.array([[+c, 0, 0, +c],
+                   [0, +c, 0, +c],
+                   [0, 0, +c, +c],
+                   [0, 0, -c, +c],
+                   [0, -c, 0, +c],
+                   [-c, 0, 0, +c],
+                   [0, +c, +c, 0],
+                   [0, -c, +c, 0],
+                   [+c, 0, +c, 0],
+                   [-c, 0, +c, 0],
+                   [+c, +c, 0, 0],
+                   [-c, +c, 0, 0]])
+    return cls.from_quat(np.concatenate((g1, g2)))
+
+
+def tetrahedral(cls):
+    g1 = np.eye(4)
+    c = 0.5
+    g2 = np.array([[c, -c, -c, +c],
+                   [c, -c, +c, +c],
+                   [c, +c, -c, +c],
+                   [c, +c, +c, +c],
+                   [c, -c, -c, -c],
+                   [c, -c, +c, -c],
+                   [c, +c, -c, -c],
+                   [c, +c, +c, -c]])
+    return cls.from_quat(np.concatenate((g1, g2)))
+
+
+def dicyclic(cls, n, axis=2):
+    g1 = cyclic(cls, n, axis).as_rotvec()
+
+    thetas = np.linspace(0, np.pi, n, endpoint=False)
+    rv = np.pi * np.vstack([np.zeros(n), np.cos(thetas), np.sin(thetas)]).T
+    g2 = np.roll(rv, axis, axis=1)
+    return cls.from_rotvec(np.concatenate((g1, g2)))
+
+
+def cyclic(cls, n, axis=2):
+    thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
+    rv = np.vstack([thetas, np.zeros(n), np.zeros(n)]).T
+    return cls.from_rotvec(np.roll(rv, axis, axis=1))
+
+
+def create_group(cls, group, axis='Z'):
+    if not isinstance(group, str):
+        raise ValueError("`group` argument must be a string")
+
+    permitted_axes = ['x', 'y', 'z', 'X', 'Y', 'Z']
+    if axis not in permitted_axes:
+        raise ValueError("`axis` must be one of " + ", ".join(permitted_axes))
+
+    if group in ['I', 'O', 'T']:
+        symbol = group
+        order = 1
+    elif group[:1] in ['C', 'D'] and group[1:].isdigit():
+        symbol = group[:1]
+        order = int(group[1:])
+    else:
+        raise ValueError("`group` must be one of 'I', 'O', 'T', 'Dn', 'Cn'")
+
+    if order < 1:
+        raise ValueError("Group order must be positive")
+
+    axis = 'xyz'.index(axis.lower())
+    if symbol == 'I':
+        return icosahedral(cls)
+    elif symbol == 'O':
+        return octahedral(cls)
+    elif symbol == 'T':
+        return tetrahedral(cls)
+    elif symbol == 'D':
+        return dicyclic(cls, order, axis=axis)
+    elif symbol == 'C':
+        return cyclic(cls, order, axis=axis)
+    else:
+        assert False
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_spline.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_spline.py
new file mode 100644
index 00000000..1c071297
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/_rotation_spline.py
@@ -0,0 +1,460 @@
+import numpy as np
+from scipy.linalg import solve_banded
+from ._rotation import Rotation
+
+
+def _create_skew_matrix(x):
+    """Create skew-symmetric matrices corresponding to vectors.
+
+    Parameters
+    ----------
+    x : ndarray, shape (n, 3)
+        Set of vectors.
+
+    Returns
+    -------
+    ndarray, shape (n, 3, 3)
+    """
+    result = np.zeros((len(x), 3, 3))
+    result[:, 0, 1] = -x[:, 2]
+    result[:, 0, 2] = x[:, 1]
+    result[:, 1, 0] = x[:, 2]
+    result[:, 1, 2] = -x[:, 0]
+    result[:, 2, 0] = -x[:, 1]
+    result[:, 2, 1] = x[:, 0]
+    return result
+
+
+def _matrix_vector_product_of_stacks(A, b):
+    """Compute the product of stack of matrices and vectors."""
+    return np.einsum("ijk,ik->ij", A, b)
+
+
+def _angular_rate_to_rotvec_dot_matrix(rotvecs):
+    """Compute matrices to transform angular rates to rot. vector derivatives.
+
+    The matrices depend on the current attitude represented as a rotation
+    vector.
+
+    Parameters
+    ----------
+    rotvecs : ndarray, shape (n, 3)
+        Set of rotation vectors.
+
+    Returns
+    -------
+    ndarray, shape (n, 3, 3)
+    """
+    norm = np.linalg.norm(rotvecs, axis=1)
+    k = np.empty_like(norm)
+
+    mask = norm > 1e-4
+    nm = norm[mask]
+    k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2
+    mask = ~mask
+    nm = norm[mask]
+    k[mask] = 1/12 + 1/720 * nm**2
+
+    skew = _create_skew_matrix(rotvecs)
+
+    result = np.empty((len(rotvecs), 3, 3))
+    result[:] = np.identity(3)
+    result[:] += 0.5 * skew
+    result[:] += k[:, None, None] * np.matmul(skew, skew)
+
+    return result
+
+
+def _rotvec_dot_to_angular_rate_matrix(rotvecs):
+    """Compute matrices to transform rot. vector derivatives to angular rates.
+
+    The matrices depend on the current attitude represented as a rotation
+    vector.
+
+    Parameters
+    ----------
+    rotvecs : ndarray, shape (n, 3)
+        Set of rotation vectors.
+
+    Returns
+    -------
+    ndarray, shape (n, 3, 3)
+    """
+    norm = np.linalg.norm(rotvecs, axis=1)
+    k1 = np.empty_like(norm)
+    k2 = np.empty_like(norm)
+
+    mask = norm > 1e-4
+    nm = norm[mask]
+    k1[mask] = (1 - np.cos(nm)) / nm ** 2
+    k2[mask] = (nm - np.sin(nm)) / nm ** 3
+
+    mask = ~mask
+    nm = norm[mask]
+    k1[mask] = 0.5 - nm ** 2 / 24
+    k2[mask] = 1 / 6 - nm ** 2 / 120
+
+    skew = _create_skew_matrix(rotvecs)
+
+    result = np.empty((len(rotvecs), 3, 3))
+    result[:] = np.identity(3)
+    result[:] -= k1[:, None, None] * skew
+    result[:] += k2[:, None, None] * np.matmul(skew, skew)
+
+    return result
+
+
+def _angular_acceleration_nonlinear_term(rotvecs, rotvecs_dot):
+    """Compute the non-linear term in angular acceleration.
+
+    The angular acceleration contains a quadratic term with respect to
+    the derivative of the rotation vector. This function computes that.
+
+    Parameters
+    ----------
+    rotvecs : ndarray, shape (n, 3)
+        Set of rotation vectors.
+    rotvecs_dot : ndarray, shape (n, 3)
+        Set of rotation vector derivatives.
+
+    Returns
+    -------
+    ndarray, shape (n, 3)
+    """
+    norm = np.linalg.norm(rotvecs, axis=1)
+    dp = np.sum(rotvecs * rotvecs_dot, axis=1)
+    cp = np.cross(rotvecs, rotvecs_dot)
+    ccp = np.cross(rotvecs, cp)
+    dccp = np.cross(rotvecs_dot, cp)
+
+    k1 = np.empty_like(norm)
+    k2 = np.empty_like(norm)
+    k3 = np.empty_like(norm)
+
+    mask = norm > 1e-4
+    nm = norm[mask]
+    k1[mask] = (-nm * np.sin(nm) - 2 * (np.cos(nm) - 1)) / nm ** 4
+    k2[mask] = (-2 * nm + 3 * np.sin(nm) - nm * np.cos(nm)) / nm ** 5
+    k3[mask] = (nm - np.sin(nm)) / nm ** 3
+
+    mask = ~mask
+    nm = norm[mask]
+    k1[mask] = 1/12 - nm ** 2 / 180
+    k2[mask] = -1/60 + nm ** 2 / 12604
+    k3[mask] = 1/6 - nm ** 2 / 120
+
+    dp = dp[:, None]
+    k1 = k1[:, None]
+    k2 = k2[:, None]
+    k3 = k3[:, None]
+
+    return dp * (k1 * cp + k2 * ccp) + k3 * dccp
+
+
+def _compute_angular_rate(rotvecs, rotvecs_dot):
+    """Compute angular rates given rotation vectors and its derivatives.
+
+    Parameters
+    ----------
+    rotvecs : ndarray, shape (n, 3)
+        Set of rotation vectors.
+    rotvecs_dot : ndarray, shape (n, 3)
+        Set of rotation vector derivatives.
+
+    Returns
+    -------
+    ndarray, shape (n, 3)
+    """
+    return _matrix_vector_product_of_stacks(
+        _rotvec_dot_to_angular_rate_matrix(rotvecs), rotvecs_dot)
+
+
+def _compute_angular_acceleration(rotvecs, rotvecs_dot, rotvecs_dot_dot):
+    """Compute angular acceleration given rotation vector and its derivatives.
+
+    Parameters
+    ----------
+    rotvecs : ndarray, shape (n, 3)
+        Set of rotation vectors.
+    rotvecs_dot : ndarray, shape (n, 3)
+        Set of rotation vector derivatives.
+    rotvecs_dot_dot : ndarray, shape (n, 3)
+        Set of rotation vector second derivatives.
+
+    Returns
+    -------
+    ndarray, shape (n, 3)
+    """
+    return (_compute_angular_rate(rotvecs, rotvecs_dot_dot) +
+            _angular_acceleration_nonlinear_term(rotvecs, rotvecs_dot))
+
+
+def _create_block_3_diagonal_matrix(A, B, d):
+    """Create a 3-diagonal block matrix as banded.
+
+    The matrix has the following structure:
+
+        DB...
+        ADB..
+        .ADB.
+        ..ADB
+        ...AD
+
+    The blocks A, B and D are 3-by-3 matrices. The D matrices has the form
+    d * I.
+
+    Parameters
+    ----------
+    A : ndarray, shape (n, 3, 3)
+        Stack of A blocks.
+    B : ndarray, shape (n, 3, 3)
+        Stack of B blocks.
+    d : ndarray, shape (n + 1,)
+        Values for diagonal blocks.
+
+    Returns
+    -------
+    ndarray, shape (11, 3 * (n + 1))
+        Matrix in the banded form as used by `scipy.linalg.solve_banded`.
+    """
+    ind = np.arange(3)
+    ind_blocks = np.arange(len(A))
+
+    A_i = np.empty_like(A, dtype=int)
+    A_i[:] = ind[:, None]
+    A_i += 3 * (1 + ind_blocks[:, None, None])
+
+    A_j = np.empty_like(A, dtype=int)
+    A_j[:] = ind
+    A_j += 3 * ind_blocks[:, None, None]
+
+    B_i = np.empty_like(B, dtype=int)
+    B_i[:] = ind[:, None]
+    B_i += 3 * ind_blocks[:, None, None]
+
+    B_j = np.empty_like(B, dtype=int)
+    B_j[:] = ind
+    B_j += 3 * (1 + ind_blocks[:, None, None])
+
+    diag_i = diag_j = np.arange(3 * len(d))
+    i = np.hstack((A_i.ravel(), B_i.ravel(), diag_i))
+    j = np.hstack((A_j.ravel(), B_j.ravel(), diag_j))
+    values = np.hstack((A.ravel(), B.ravel(), np.repeat(d, 3)))
+
+    u = 5
+    l = 5
+    result = np.zeros((u + l + 1, 3 * len(d)))
+    result[u + i - j, j] = values
+    return result
+
+
+class RotationSpline:
+    """Interpolate rotations with continuous angular rate and acceleration.
+
+    The rotation vectors between each consecutive orientation are cubic
+    functions of time and it is guaranteed that angular rate and acceleration
+    are continuous. Such interpolation are analogous to cubic spline
+    interpolation.
+
+    Refer to [1]_ for math and implementation details.
+
+    Parameters
+    ----------
+    times : array_like, shape (N,)
+        Times of the known rotations. At least 2 times must be specified.
+    rotations : `Rotation` instance
+        Rotations to perform the interpolation between. Must contain N
+        rotations.
+
+    Methods
+    -------
+    __call__
+
+    References
+    ----------
+    .. [1] `Smooth Attitude Interpolation
+            `_
+
+    Examples
+    --------
+    >>> from scipy.spatial.transform import Rotation, RotationSpline
+    >>> import numpy as np
+
+    Define the sequence of times and rotations from the Euler angles:
+
+    >>> times = [0, 10, 20, 40]
+    >>> angles = [[-10, 20, 30], [0, 15, 40], [-30, 45, 30], [20, 45, 90]]
+    >>> rotations = Rotation.from_euler('XYZ', angles, degrees=True)
+
+    Create the interpolator object:
+
+    >>> spline = RotationSpline(times, rotations)
+
+    Interpolate the Euler angles, angular rate and acceleration:
+
+    >>> angular_rate = np.rad2deg(spline(times, 1))
+    >>> angular_acceleration = np.rad2deg(spline(times, 2))
+    >>> times_plot = np.linspace(times[0], times[-1], 100)
+    >>> angles_plot = spline(times_plot).as_euler('XYZ', degrees=True)
+    >>> angular_rate_plot = np.rad2deg(spline(times_plot, 1))
+    >>> angular_acceleration_plot = np.rad2deg(spline(times_plot, 2))
+
+    On this plot you see that Euler angles are continuous and smooth:
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(times_plot, angles_plot)
+    >>> plt.plot(times, angles, 'x')
+    >>> plt.title("Euler angles")
+    >>> plt.show()
+
+    The angular rate is also smooth:
+
+    >>> plt.plot(times_plot, angular_rate_plot)
+    >>> plt.plot(times, angular_rate, 'x')
+    >>> plt.title("Angular rate")
+    >>> plt.show()
+
+    The angular acceleration is continuous, but not smooth. Also note that
+    the angular acceleration is not a piecewise-linear function, because
+    it is different from the second derivative of the rotation vector (which
+    is a piecewise-linear function as in the cubic spline).
+
+    >>> plt.plot(times_plot, angular_acceleration_plot)
+    >>> plt.plot(times, angular_acceleration, 'x')
+    >>> plt.title("Angular acceleration")
+    >>> plt.show()
+    """
+    # Parameters for the solver for angular rate.
+    MAX_ITER = 10
+    TOL = 1e-9
+
+    def _solve_for_angular_rates(self, dt, angular_rates, rotvecs):
+        angular_rate_first = angular_rates[0].copy()
+
+        A = _angular_rate_to_rotvec_dot_matrix(rotvecs)
+        A_inv = _rotvec_dot_to_angular_rate_matrix(rotvecs)
+        M = _create_block_3_diagonal_matrix(
+            2 * A_inv[1:-1] / dt[1:-1, None, None],
+            2 * A[1:-1] / dt[1:-1, None, None],
+            4 * (1 / dt[:-1] + 1 / dt[1:]))
+
+        b0 = 6 * (rotvecs[:-1] * dt[:-1, None] ** -2 +
+                  rotvecs[1:] * dt[1:, None] ** -2)
+        b0[0] -= 2 / dt[0] * A_inv[0].dot(angular_rate_first)
+        b0[-1] -= 2 / dt[-1] * A[-1].dot(angular_rates[-1])
+
+        for iteration in range(self.MAX_ITER):
+            rotvecs_dot = _matrix_vector_product_of_stacks(A, angular_rates)
+            delta_beta = _angular_acceleration_nonlinear_term(
+                rotvecs[:-1], rotvecs_dot[:-1])
+            b = b0 - delta_beta
+            angular_rates_new = solve_banded((5, 5), M, b.ravel())
+            angular_rates_new = angular_rates_new.reshape((-1, 3))
+
+            delta = np.abs(angular_rates_new - angular_rates[:-1])
+            angular_rates[:-1] = angular_rates_new
+            if np.all(delta < self.TOL * (1 + np.abs(angular_rates_new))):
+                break
+
+        rotvecs_dot = _matrix_vector_product_of_stacks(A, angular_rates)
+        angular_rates = np.vstack((angular_rate_first, angular_rates[:-1]))
+
+        return angular_rates, rotvecs_dot
+
+    def __init__(self, times, rotations):
+        from scipy.interpolate import PPoly
+
+        if rotations.single:
+            raise ValueError("`rotations` must be a sequence of rotations.")
+
+        if len(rotations) == 1:
+            raise ValueError("`rotations` must contain at least 2 rotations.")
+
+        times = np.asarray(times, dtype=float)
+        if times.ndim != 1:
+            raise ValueError("`times` must be 1-dimensional.")
+
+        if len(times) != len(rotations):
+            raise ValueError("Expected number of rotations to be equal to "
+                             "number of timestamps given, got {} rotations "
+                             "and {} timestamps."
+                             .format(len(rotations), len(times)))
+
+        dt = np.diff(times)
+        if np.any(dt <= 0):
+            raise ValueError("Values in `times` must be in a strictly "
+                             "increasing order.")
+
+        rotvecs = (rotations[:-1].inv() * rotations[1:]).as_rotvec()
+        angular_rates = rotvecs / dt[:, None]
+
+        if len(rotations) == 2:
+            rotvecs_dot = angular_rates
+        else:
+            angular_rates, rotvecs_dot = self._solve_for_angular_rates(
+                dt, angular_rates, rotvecs)
+
+        dt = dt[:, None]
+        coeff = np.empty((4, len(times) - 1, 3))
+        coeff[0] = (-2 * rotvecs + dt * angular_rates
+                    + dt * rotvecs_dot) / dt ** 3
+        coeff[1] = (3 * rotvecs - 2 * dt * angular_rates
+                    - dt * rotvecs_dot) / dt ** 2
+        coeff[2] = angular_rates
+        coeff[3] = 0
+
+        self.times = times
+        self.rotations = rotations
+        self.interpolator = PPoly(coeff, times)
+
+    def __call__(self, times, order=0):
+        """Compute interpolated values.
+
+        Parameters
+        ----------
+        times : float or array_like
+            Times of interest.
+        order : {0, 1, 2}, optional
+            Order of differentiation:
+
+                * 0 (default) : return Rotation
+                * 1 : return the angular rate in rad/sec
+                * 2 : return the angular acceleration in rad/sec/sec
+
+        Returns
+        -------
+        Interpolated Rotation, angular rate or acceleration.
+        """
+        if order not in [0, 1, 2]:
+            raise ValueError("`order` must be 0, 1 or 2.")
+
+        times = np.asarray(times, dtype=float)
+        if times.ndim > 1:
+            raise ValueError("`times` must be at most 1-dimensional.")
+
+        singe_time = times.ndim == 0
+        times = np.atleast_1d(times)
+
+        rotvecs = self.interpolator(times)
+        if order == 0:
+            index = np.searchsorted(self.times, times, side='right')
+            index -= 1
+            index[index < 0] = 0
+            n_segments = len(self.times) - 1
+            index[index > n_segments - 1] = n_segments - 1
+            result = self.rotations[index] * Rotation.from_rotvec(rotvecs)
+        elif order == 1:
+            rotvecs_dot = self.interpolator(times, 1)
+            result = _compute_angular_rate(rotvecs, rotvecs_dot)
+        elif order == 2:
+            rotvecs_dot = self.interpolator(times, 1)
+            rotvecs_dot_dot = self.interpolator(times, 2)
+            result = _compute_angular_acceleration(rotvecs, rotvecs_dot,
+                                                   rotvecs_dot_dot)
+        else:
+            assert False
+
+        if singe_time:
+            result = result[0]
+
+        return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/rotation.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/rotation.py
new file mode 100644
index 00000000..63eef48a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/rotation.py
@@ -0,0 +1,33 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.spatial` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _rotation
+
+
+__all__ = [  # noqa: F822
+    'Rotation',
+    'Slerp',
+    'check_random_state',
+    'create_group',
+    're',
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.spatial.transform.rotation is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.spatial instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.spatial.transform` "
+                  "namespace, the `scipy.spatial.transform.rotation` namespace"
+                  " is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_rotation, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation.py
new file mode 100644
index 00000000..c5d530cd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation.py
@@ -0,0 +1,1370 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_equal, assert_array_almost_equal
+from numpy.testing import assert_allclose
+from scipy.spatial.transform import Rotation, Slerp
+from scipy.stats import special_ortho_group
+from itertools import permutations
+
+import pickle
+import copy
+
+def test_generic_quat_matrix():
+    x = np.array([[3, 4, 0, 0], [5, 12, 0, 0]])
+    r = Rotation.from_quat(x)
+    expected_quat = x / np.array([[5], [13]])
+    assert_array_almost_equal(r.as_quat(), expected_quat)
+
+
+def test_from_single_1d_quaternion():
+    x = np.array([3, 4, 0, 0])
+    r = Rotation.from_quat(x)
+    expected_quat = x / 5
+    assert_array_almost_equal(r.as_quat(), expected_quat)
+
+
+def test_from_single_2d_quaternion():
+    x = np.array([[3, 4, 0, 0]])
+    r = Rotation.from_quat(x)
+    expected_quat = x / 5
+    assert_array_almost_equal(r.as_quat(), expected_quat)
+
+
+def test_from_square_quat_matrix():
+    # Ensure proper norm array broadcasting
+    x = np.array([
+        [3, 0, 0, 4],
+        [5, 0, 12, 0],
+        [0, 0, 0, 1],
+        [0, 0, 0, -1]
+        ])
+    r = Rotation.from_quat(x)
+    expected_quat = x / np.array([[5], [13], [1], [1]])
+    assert_array_almost_equal(r.as_quat(), expected_quat)
+
+
+def test_malformed_1d_from_quat():
+    with pytest.raises(ValueError):
+        Rotation.from_quat(np.array([1, 2, 3]))
+
+
+def test_malformed_2d_from_quat():
+    with pytest.raises(ValueError):
+        Rotation.from_quat(np.array([
+            [1, 2, 3, 4, 5],
+            [4, 5, 6, 7, 8]
+            ]))
+
+
+def test_zero_norms_from_quat():
+    x = np.array([
+            [3, 4, 0, 0],
+            [0, 0, 0, 0],
+            [5, 0, 12, 0]
+            ])
+    with pytest.raises(ValueError):
+        Rotation.from_quat(x)
+
+
+def test_as_matrix_single_1d_quaternion():
+    quat = [0, 0, 0, 1]
+    mat = Rotation.from_quat(quat).as_matrix()
+    # mat.shape == (3,3) due to 1d input
+    assert_array_almost_equal(mat, np.eye(3))
+
+
+def test_as_matrix_single_2d_quaternion():
+    quat = [[0, 0, 1, 1]]
+    mat = Rotation.from_quat(quat).as_matrix()
+    assert_equal(mat.shape, (1, 3, 3))
+    expected_mat = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+        ])
+    assert_array_almost_equal(mat[0], expected_mat)
+
+
+def test_as_matrix_from_square_input():
+    quats = [
+            [0, 0, 1, 1],
+            [0, 1, 0, 1],
+            [0, 0, 0, 1],
+            [0, 0, 0, -1]
+            ]
+    mat = Rotation.from_quat(quats).as_matrix()
+    assert_equal(mat.shape, (4, 3, 3))
+
+    expected0 = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+        ])
+    assert_array_almost_equal(mat[0], expected0)
+
+    expected1 = np.array([
+        [0, 0, 1],
+        [0, 1, 0],
+        [-1, 0, 0]
+        ])
+    assert_array_almost_equal(mat[1], expected1)
+
+    assert_array_almost_equal(mat[2], np.eye(3))
+    assert_array_almost_equal(mat[3], np.eye(3))
+
+
+def test_as_matrix_from_generic_input():
+    quats = [
+            [0, 0, 1, 1],
+            [0, 1, 0, 1],
+            [1, 2, 3, 4]
+            ]
+    mat = Rotation.from_quat(quats).as_matrix()
+    assert_equal(mat.shape, (3, 3, 3))
+
+    expected0 = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+        ])
+    assert_array_almost_equal(mat[0], expected0)
+
+    expected1 = np.array([
+        [0, 0, 1],
+        [0, 1, 0],
+        [-1, 0, 0]
+        ])
+    assert_array_almost_equal(mat[1], expected1)
+
+    expected2 = np.array([
+        [0.4, -2, 2.2],
+        [2.8, 1, 0.4],
+        [-1, 2, 2]
+        ]) / 3
+    assert_array_almost_equal(mat[2], expected2)
+
+
+def test_from_single_2d_matrix():
+    mat = [
+            [0, 0, 1],
+            [1, 0, 0],
+            [0, 1, 0]
+            ]
+    expected_quat = [0.5, 0.5, 0.5, 0.5]
+    assert_array_almost_equal(
+            Rotation.from_matrix(mat).as_quat(),
+            expected_quat)
+
+
+def test_from_single_3d_matrix():
+    mat = np.array([
+        [0, 0, 1],
+        [1, 0, 0],
+        [0, 1, 0]
+        ]).reshape((1, 3, 3))
+    expected_quat = np.array([0.5, 0.5, 0.5, 0.5]).reshape((1, 4))
+    assert_array_almost_equal(
+            Rotation.from_matrix(mat).as_quat(),
+            expected_quat)
+
+
+def test_from_matrix_calculation():
+    expected_quat = np.array([1, 1, 6, 1]) / np.sqrt(39)
+    mat = np.array([
+            [-0.8974359, -0.2564103, 0.3589744],
+            [0.3589744, -0.8974359, 0.2564103],
+            [0.2564103, 0.3589744, 0.8974359]
+            ])
+    assert_array_almost_equal(
+            Rotation.from_matrix(mat).as_quat(),
+            expected_quat)
+    assert_array_almost_equal(
+            Rotation.from_matrix(mat.reshape((1, 3, 3))).as_quat(),
+            expected_quat.reshape((1, 4)))
+
+
+def test_matrix_calculation_pipeline():
+    mat = special_ortho_group.rvs(3, size=10, random_state=0)
+    assert_array_almost_equal(Rotation.from_matrix(mat).as_matrix(), mat)
+
+
+def test_from_matrix_ortho_output():
+    rnd = np.random.RandomState(0)
+    mat = rnd.random_sample((100, 3, 3))
+    ortho_mat = Rotation.from_matrix(mat).as_matrix()
+
+    mult_result = np.einsum('...ij,...jk->...ik', ortho_mat,
+                            ortho_mat.transpose((0, 2, 1)))
+
+    eye3d = np.zeros((100, 3, 3))
+    for i in range(3):
+        eye3d[:, i, i] = 1.0
+
+    assert_array_almost_equal(mult_result, eye3d)
+
+
+def test_from_1d_single_rotvec():
+    rotvec = [1, 0, 0]
+    expected_quat = np.array([0.4794255, 0, 0, 0.8775826])
+    result = Rotation.from_rotvec(rotvec)
+    assert_array_almost_equal(result.as_quat(), expected_quat)
+
+
+def test_from_2d_single_rotvec():
+    rotvec = [[1, 0, 0]]
+    expected_quat = np.array([[0.4794255, 0, 0, 0.8775826]])
+    result = Rotation.from_rotvec(rotvec)
+    assert_array_almost_equal(result.as_quat(), expected_quat)
+
+
+def test_from_generic_rotvec():
+    rotvec = [
+            [1, 2, 2],
+            [1, -1, 0.5],
+            [0, 0, 0]
+            ]
+    expected_quat = np.array([
+        [0.3324983, 0.6649967, 0.6649967, 0.0707372],
+        [0.4544258, -0.4544258, 0.2272129, 0.7316889],
+        [0, 0, 0, 1]
+        ])
+    assert_array_almost_equal(
+            Rotation.from_rotvec(rotvec).as_quat(),
+            expected_quat)
+
+
+def test_from_rotvec_small_angle():
+    rotvec = np.array([
+        [5e-4 / np.sqrt(3), -5e-4 / np.sqrt(3), 5e-4 / np.sqrt(3)],
+        [0.2, 0.3, 0.4],
+        [0, 0, 0]
+        ])
+
+    quat = Rotation.from_rotvec(rotvec).as_quat()
+    # cos(theta/2) ~~ 1 for small theta
+    assert_allclose(quat[0, 3], 1)
+    # sin(theta/2) / theta ~~ 0.5 for small theta
+    assert_allclose(quat[0, :3], rotvec[0] * 0.5)
+
+    assert_allclose(quat[1, 3], 0.9639685)
+    assert_allclose(
+            quat[1, :3],
+            np.array([
+                0.09879603932153465,
+                0.14819405898230198,
+                0.19759207864306931
+                ]))
+
+    assert_equal(quat[2], np.array([0, 0, 0, 1]))
+
+
+def test_degrees_from_rotvec():
+    rotvec1 = [1.0 / np.cbrt(3), 1.0 / np.cbrt(3), 1.0 / np.cbrt(3)]
+    rot1 = Rotation.from_rotvec(rotvec1, degrees=True)
+    quat1 = rot1.as_quat()
+
+    rotvec2 = np.deg2rad(rotvec1)
+    rot2 = Rotation.from_rotvec(rotvec2)
+    quat2 = rot2.as_quat()
+
+    assert_allclose(quat1, quat2)
+
+
+def test_malformed_1d_from_rotvec():
+    with pytest.raises(ValueError, match='Expected `rot_vec` to have shape'):
+        Rotation.from_rotvec([1, 2])
+
+
+def test_malformed_2d_from_rotvec():
+    with pytest.raises(ValueError, match='Expected `rot_vec` to have shape'):
+        Rotation.from_rotvec([
+            [1, 2, 3, 4],
+            [5, 6, 7, 8]
+            ])
+
+
+def test_as_generic_rotvec():
+    quat = np.array([
+            [1, 2, -1, 0.5],
+            [1, -1, 1, 0.0003],
+            [0, 0, 0, 1]
+            ])
+    quat /= np.linalg.norm(quat, axis=1)[:, None]
+
+    rotvec = Rotation.from_quat(quat).as_rotvec()
+    angle = np.linalg.norm(rotvec, axis=1)
+
+    assert_allclose(quat[:, 3], np.cos(angle/2))
+    assert_allclose(np.cross(rotvec, quat[:, :3]), np.zeros((3, 3)))
+
+
+def test_as_rotvec_single_1d_input():
+    quat = np.array([1, 2, -3, 2])
+    expected_rotvec = np.array([0.5772381, 1.1544763, -1.7317144])
+
+    actual_rotvec = Rotation.from_quat(quat).as_rotvec()
+
+    assert_equal(actual_rotvec.shape, (3,))
+    assert_allclose(actual_rotvec, expected_rotvec)
+
+
+def test_as_rotvec_single_2d_input():
+    quat = np.array([[1, 2, -3, 2]])
+    expected_rotvec = np.array([[0.5772381, 1.1544763, -1.7317144]])
+
+    actual_rotvec = Rotation.from_quat(quat).as_rotvec()
+
+    assert_equal(actual_rotvec.shape, (1, 3))
+    assert_allclose(actual_rotvec, expected_rotvec)
+
+
+def test_as_rotvec_degrees():
+    # x->y, y->z, z->x
+    mat = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]
+    rot = Rotation.from_matrix(mat)
+    rotvec = rot.as_rotvec(degrees=True)
+    angle = np.linalg.norm(rotvec)
+    assert_allclose(angle, 120.0)
+    assert_allclose(rotvec[0], rotvec[1])
+    assert_allclose(rotvec[1], rotvec[2])
+
+
+def test_rotvec_calc_pipeline():
+    # Include small angles
+    rotvec = np.array([
+        [0, 0, 0],
+        [1, -1, 2],
+        [-3e-4, 3.5e-4, 7.5e-5]
+        ])
+    assert_allclose(Rotation.from_rotvec(rotvec).as_rotvec(), rotvec)
+    assert_allclose(Rotation.from_rotvec(rotvec, degrees=True).as_rotvec(degrees=True), rotvec)
+
+
+def test_from_1d_single_mrp():
+    mrp = [0, 0, 1.0]
+    expected_quat = np.array([0, 0, 1, 0])
+    result = Rotation.from_mrp(mrp)
+    assert_array_almost_equal(result.as_quat(), expected_quat)
+
+
+def test_from_2d_single_mrp():
+    mrp = [[0, 0, 1.0]]
+    expected_quat = np.array([[0, 0, 1, 0]])
+    result = Rotation.from_mrp(mrp)
+    assert_array_almost_equal(result.as_quat(), expected_quat)
+
+
+def test_from_generic_mrp():
+    mrp = np.array([
+        [1, 2, 2],
+        [1, -1, 0.5],
+        [0, 0, 0]])
+    expected_quat = np.array([
+        [0.2, 0.4, 0.4, -0.8],
+        [0.61538462, -0.61538462, 0.30769231, -0.38461538],
+        [0, 0, 0, 1]])
+    assert_array_almost_equal(Rotation.from_mrp(mrp).as_quat(), expected_quat)
+
+
+def test_malformed_1d_from_mrp():
+    with pytest.raises(ValueError, match='Expected `mrp` to have shape'):
+        Rotation.from_mrp([1, 2])
+
+
+def test_malformed_2d_from_mrp():
+    with pytest.raises(ValueError, match='Expected `mrp` to have shape'):
+        Rotation.from_mrp([
+            [1, 2, 3, 4],
+            [5, 6, 7, 8]
+            ])
+
+
+def test_as_generic_mrp():
+    quat = np.array([
+        [1, 2, -1, 0.5],
+        [1, -1, 1, 0.0003],
+        [0, 0, 0, 1]])
+    quat /= np.linalg.norm(quat, axis=1)[:, None]
+
+    expected_mrp = np.array([
+        [0.33333333, 0.66666667, -0.33333333],
+        [0.57725028, -0.57725028, 0.57725028],
+        [0, 0, 0]])
+    assert_array_almost_equal(Rotation.from_quat(quat).as_mrp(), expected_mrp)
+
+def test_past_180_degree_rotation():
+    # ensure that a > 180 degree rotation is returned as a <180 rotation in MRPs
+    # in this case 270 should be returned as -90
+    expected_mrp = np.array([-np.tan(np.pi/2/4), 0.0, 0])
+    assert_array_almost_equal(Rotation.from_euler('xyz', [270, 0, 0], degrees=True).as_mrp(), expected_mrp)
+
+
+def test_as_mrp_single_1d_input():
+    quat = np.array([1, 2, -3, 2])
+    expected_mrp = np.array([0.16018862, 0.32037724, -0.48056586])
+
+    actual_mrp = Rotation.from_quat(quat).as_mrp()
+
+    assert_equal(actual_mrp.shape, (3,))
+    assert_allclose(actual_mrp, expected_mrp)
+
+
+def test_as_mrp_single_2d_input():
+    quat = np.array([[1, 2, -3, 2]])
+    expected_mrp = np.array([[0.16018862, 0.32037724, -0.48056586]])
+
+    actual_mrp = Rotation.from_quat(quat).as_mrp()
+
+    assert_equal(actual_mrp.shape, (1, 3))
+    assert_allclose(actual_mrp, expected_mrp)
+
+
+def test_mrp_calc_pipeline():
+    actual_mrp = np.array([
+        [0, 0, 0],
+        [1, -1, 2],
+        [0.41421356, 0, 0],
+        [0.1, 0.2, 0.1]])
+    expected_mrp = np.array([
+        [0, 0, 0],
+        [-0.16666667, 0.16666667, -0.33333333],
+        [0.41421356, 0, 0],
+        [0.1, 0.2, 0.1]])
+    assert_allclose(Rotation.from_mrp(actual_mrp).as_mrp(), expected_mrp)
+
+
+def test_from_euler_single_rotation():
+    quat = Rotation.from_euler('z', 90, degrees=True).as_quat()
+    expected_quat = np.array([0, 0, 1, 1]) / np.sqrt(2)
+    assert_allclose(quat, expected_quat)
+
+
+def test_single_intrinsic_extrinsic_rotation():
+    extrinsic = Rotation.from_euler('z', 90, degrees=True).as_matrix()
+    intrinsic = Rotation.from_euler('Z', 90, degrees=True).as_matrix()
+    assert_allclose(extrinsic, intrinsic)
+
+
+def test_from_euler_rotation_order():
+    # Intrinsic rotation is same as extrinsic with order reversed
+    rnd = np.random.RandomState(0)
+    a = rnd.randint(low=0, high=180, size=(6, 3))
+    b = a[:, ::-1]
+    x = Rotation.from_euler('xyz', a, degrees=True).as_quat()
+    y = Rotation.from_euler('ZYX', b, degrees=True).as_quat()
+    assert_allclose(x, y)
+
+
+def test_from_euler_elementary_extrinsic_rotation():
+    # Simple test to check if extrinsic rotations are implemented correctly
+    mat = Rotation.from_euler('zx', [90, 90], degrees=True).as_matrix()
+    expected_mat = np.array([
+        [0, -1, 0],
+        [0, 0, -1],
+        [1, 0, 0]
+    ])
+    assert_array_almost_equal(mat, expected_mat)
+
+
+def test_from_euler_intrinsic_rotation_312():
+    angles = [
+        [30, 60, 45],
+        [30, 60, 30],
+        [45, 30, 60]
+        ]
+    mat = Rotation.from_euler('ZXY', angles, degrees=True).as_matrix()
+
+    assert_array_almost_equal(mat[0], np.array([
+        [0.3061862, -0.2500000, 0.9185587],
+        [0.8838835, 0.4330127, -0.1767767],
+        [-0.3535534, 0.8660254, 0.3535534]
+    ]))
+
+    assert_array_almost_equal(mat[1], np.array([
+        [0.5334936, -0.2500000, 0.8080127],
+        [0.8080127, 0.4330127, -0.3995191],
+        [-0.2500000, 0.8660254, 0.4330127]
+    ]))
+
+    assert_array_almost_equal(mat[2], np.array([
+        [0.0473672, -0.6123725, 0.7891491],
+        [0.6597396, 0.6123725, 0.4355958],
+        [-0.7500000, 0.5000000, 0.4330127]
+    ]))
+
+
+def test_from_euler_intrinsic_rotation_313():
+    angles = [
+        [30, 60, 45],
+        [30, 60, 30],
+        [45, 30, 60]
+        ]
+    mat = Rotation.from_euler('ZXZ', angles, degrees=True).as_matrix()
+
+    assert_array_almost_equal(mat[0], np.array([
+        [0.43559574, -0.78914913, 0.4330127],
+        [0.65973961, -0.04736717, -0.750000],
+        [0.61237244, 0.61237244, 0.500000]
+    ]))
+
+    assert_array_almost_equal(mat[1], np.array([
+        [0.6250000, -0.64951905, 0.4330127],
+        [0.64951905, 0.1250000, -0.750000],
+        [0.4330127, 0.750000, 0.500000]
+    ]))
+
+    assert_array_almost_equal(mat[2], np.array([
+        [-0.1767767, -0.91855865, 0.35355339],
+        [0.88388348, -0.30618622, -0.35355339],
+        [0.4330127, 0.25000000, 0.8660254]
+    ]))
+
+
+def test_from_euler_extrinsic_rotation_312():
+    angles = [
+        [30, 60, 45],
+        [30, 60, 30],
+        [45, 30, 60]
+        ]
+    mat = Rotation.from_euler('zxy', angles, degrees=True).as_matrix()
+
+    assert_array_almost_equal(mat[0], np.array([
+        [0.91855865, 0.1767767, 0.35355339],
+        [0.25000000, 0.4330127, -0.8660254],
+        [-0.30618622, 0.88388348, 0.35355339]
+    ]))
+
+    assert_array_almost_equal(mat[1], np.array([
+        [0.96650635, -0.0580127, 0.2500000],
+        [0.25000000, 0.4330127, -0.8660254],
+        [-0.0580127, 0.89951905, 0.4330127]
+    ]))
+
+    assert_array_almost_equal(mat[2], np.array([
+        [0.65973961, -0.04736717, 0.7500000],
+        [0.61237244, 0.61237244, -0.5000000],
+        [-0.43559574, 0.78914913, 0.4330127]
+    ]))
+
+
+def test_from_euler_extrinsic_rotation_313():
+    angles = [
+        [30, 60, 45],
+        [30, 60, 30],
+        [45, 30, 60]
+        ]
+    mat = Rotation.from_euler('zxz', angles, degrees=True).as_matrix()
+
+    assert_array_almost_equal(mat[0], np.array([
+        [0.43559574, -0.65973961, 0.61237244],
+        [0.78914913, -0.04736717, -0.61237244],
+        [0.4330127, 0.75000000, 0.500000]
+    ]))
+
+    assert_array_almost_equal(mat[1], np.array([
+        [0.62500000, -0.64951905, 0.4330127],
+        [0.64951905, 0.12500000, -0.750000],
+        [0.4330127, 0.75000000, 0.500000]
+    ]))
+
+    assert_array_almost_equal(mat[2], np.array([
+        [-0.1767767, -0.88388348, 0.4330127],
+        [0.91855865, -0.30618622, -0.250000],
+        [0.35355339, 0.35355339, 0.8660254]
+    ]))
+
+
+def test_as_euler_asymmetric_axes():
+    rnd = np.random.RandomState(0)
+    n = 10
+    angles = np.empty((n, 3))
+    angles[:, 0] = rnd.uniform(low=-np.pi, high=np.pi, size=(n,))
+    angles[:, 1] = rnd.uniform(low=-np.pi / 2, high=np.pi / 2, size=(n,))
+    angles[:, 2] = rnd.uniform(low=-np.pi, high=np.pi, size=(n,))
+
+    for seq_tuple in permutations('xyz'):
+        # Extrinsic rotations
+        seq = ''.join(seq_tuple)
+        assert_allclose(angles, Rotation.from_euler(seq, angles).as_euler(seq))
+        # Intrinsic rotations
+        seq = seq.upper()
+        assert_allclose(angles, Rotation.from_euler(seq, angles).as_euler(seq))
+
+
+def test_as_euler_symmetric_axes():
+    rnd = np.random.RandomState(0)
+    n = 10
+    angles = np.empty((n, 3))
+    angles[:, 0] = rnd.uniform(low=-np.pi, high=np.pi, size=(n,))
+    angles[:, 1] = rnd.uniform(low=0, high=np.pi, size=(n,))
+    angles[:, 2] = rnd.uniform(low=-np.pi, high=np.pi, size=(n,))
+
+    for axis1 in ['x', 'y', 'z']:
+        for axis2 in ['x', 'y', 'z']:
+            if axis1 == axis2:
+                continue
+            # Extrinsic rotations
+            seq = axis1 + axis2 + axis1
+            assert_allclose(
+                angles, Rotation.from_euler(seq, angles).as_euler(seq))
+            # Intrinsic rotations
+            seq = seq.upper()
+            assert_allclose(
+                angles, Rotation.from_euler(seq, angles).as_euler(seq))
+
+
+def test_as_euler_degenerate_asymmetric_axes():
+    # Since we cannot check for angle equality, we check for rotation matrix
+    # equality
+    angles = np.array([
+        [45, 90, 35],
+        [35, -90, 20],
+        [35, 90, 25],
+        [25, -90, 15]
+        ])
+
+    with pytest.warns(UserWarning, match="Gimbal lock"):
+        for seq_tuple in permutations('xyz'):
+            # Extrinsic rotations
+            seq = ''.join(seq_tuple)
+            rotation = Rotation.from_euler(seq, angles, degrees=True)
+            mat_expected = rotation.as_matrix()
+
+            angle_estimates = rotation.as_euler(seq, degrees=True)
+            mat_estimated = Rotation.from_euler(
+                seq, angle_estimates, degrees=True
+                ).as_matrix()
+
+            assert_array_almost_equal(mat_expected, mat_estimated)
+
+            # Intrinsic rotations
+            seq = seq.upper()
+            rotation = Rotation.from_euler(seq, angles, degrees=True)
+            mat_expected = rotation.as_matrix()
+
+            angle_estimates = rotation.as_euler(seq, degrees=True)
+            mat_estimated = Rotation.from_euler(
+                seq, angle_estimates, degrees=True
+                ).as_matrix()
+
+            assert_array_almost_equal(mat_expected, mat_estimated)
+
+
+def test_as_euler_degenerate_symmetric_axes():
+    # Since we cannot check for angle equality, we check for rotation matrix
+    # equality
+    angles = np.array([
+        [15, 0, 60],
+        [35, 0, 75],
+        [60, 180, 35],
+        [15, -180, 25],
+        ])
+
+    with pytest.warns(UserWarning, match="Gimbal lock"):
+        for axis1 in ['x', 'y', 'z']:
+            for axis2 in ['x', 'y', 'z']:
+                if axis1 == axis2:
+                    continue
+
+                # Extrinsic rotations
+                seq = axis1 + axis2 + axis1
+                rotation = Rotation.from_euler(seq, angles, degrees=True)
+                mat_expected = rotation.as_matrix()
+
+                angle_estimates = rotation.as_euler(seq, degrees=True)
+                mat_estimated = Rotation.from_euler(
+                    seq, angle_estimates, degrees=True
+                    ).as_matrix()
+
+                assert_array_almost_equal(mat_expected, mat_estimated)
+
+                # Intrinsic rotations
+                seq = seq.upper()
+                rotation = Rotation.from_euler(seq, angles, degrees=True)
+                mat_expected = rotation.as_matrix()
+
+                angle_estimates = rotation.as_euler(seq, degrees=True)
+                mat_estimated = Rotation.from_euler(
+                    seq, angle_estimates, degrees=True
+                    ).as_matrix()
+
+                assert_array_almost_equal(mat_expected, mat_estimated)
+
+
+def test_inv():
+    rnd = np.random.RandomState(0)
+    n = 10
+    p = Rotation.random(num=n, random_state=rnd)
+    q = p.inv()
+
+    p_mat = p.as_matrix()
+    q_mat = q.as_matrix()
+    result1 = np.einsum('...ij,...jk->...ik', p_mat, q_mat)
+    result2 = np.einsum('...ij,...jk->...ik', q_mat, p_mat)
+
+    eye3d = np.empty((n, 3, 3))
+    eye3d[:] = np.eye(3)
+
+    assert_array_almost_equal(result1, eye3d)
+    assert_array_almost_equal(result2, eye3d)
+
+
+def test_inv_single_rotation():
+    rnd = np.random.RandomState(0)
+    p = Rotation.random(random_state=rnd)
+    q = p.inv()
+
+    p_mat = p.as_matrix()
+    q_mat = q.as_matrix()
+    res1 = np.dot(p_mat, q_mat)
+    res2 = np.dot(q_mat, p_mat)
+
+    eye = np.eye(3)
+
+    assert_array_almost_equal(res1, eye)
+    assert_array_almost_equal(res2, eye)
+
+    x = Rotation.random(num=1, random_state=rnd)
+    y = x.inv()
+
+    x_matrix = x.as_matrix()
+    y_matrix = y.as_matrix()
+    result1 = np.einsum('...ij,...jk->...ik', x_matrix, y_matrix)
+    result2 = np.einsum('...ij,...jk->...ik', y_matrix, x_matrix)
+
+    eye3d = np.empty((1, 3, 3))
+    eye3d[:] = np.eye(3)
+
+    assert_array_almost_equal(result1, eye3d)
+    assert_array_almost_equal(result2, eye3d)
+
+
+def test_identity_magnitude():
+    n = 10
+    assert_allclose(Rotation.identity(n).magnitude(), 0)
+    assert_allclose(Rotation.identity(n).inv().magnitude(), 0)
+
+
+def test_single_identity_magnitude():
+    assert Rotation.identity().magnitude() == 0
+    assert Rotation.identity().inv().magnitude() == 0
+
+
+def test_identity_invariance():
+    n = 10
+    p = Rotation.random(n, random_state=0)
+
+    result = p * Rotation.identity(n)
+    assert_array_almost_equal(p.as_quat(), result.as_quat())
+
+    result = result * p.inv()
+    assert_array_almost_equal(result.magnitude(), np.zeros(n))
+
+
+def test_single_identity_invariance():
+    n = 10
+    p = Rotation.random(n, random_state=0)
+
+    result = p * Rotation.identity()
+    assert_array_almost_equal(p.as_quat(), result.as_quat())
+
+    result = result * p.inv()
+    assert_array_almost_equal(result.magnitude(), np.zeros(n))
+
+
+def test_magnitude():
+    r = Rotation.from_quat(np.eye(4))
+    result = r.magnitude()
+    assert_array_almost_equal(result, [np.pi, np.pi, np.pi, 0])
+
+    r = Rotation.from_quat(-np.eye(4))
+    result = r.magnitude()
+    assert_array_almost_equal(result, [np.pi, np.pi, np.pi, 0])
+
+
+def test_magnitude_single_rotation():
+    r = Rotation.from_quat(np.eye(4))
+    result1 = r[0].magnitude()
+    assert_allclose(result1, np.pi)
+
+    result2 = r[3].magnitude()
+    assert_allclose(result2, 0)
+
+
+def test_mean():
+    axes = np.concatenate((-np.eye(3), np.eye(3)))
+    thetas = np.linspace(0, np.pi / 2, 100)
+    for t in thetas:
+        r = Rotation.from_rotvec(t * axes)
+        assert_allclose(r.mean().magnitude(), 0, atol=1E-10)
+
+
+def test_weighted_mean():
+    # test that doubling a weight is equivalent to including a rotation twice.
+    axes = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0]])
+    thetas = np.linspace(0, np.pi / 2, 100)
+    for t in thetas:
+        rw = Rotation.from_rotvec(t * axes[:2])
+        mw = rw.mean(weights=[1, 2])
+
+        r = Rotation.from_rotvec(t * axes)
+        m = r.mean()
+        assert_allclose((m * mw.inv()).magnitude(), 0, atol=1E-10)
+
+
+def test_mean_invalid_weights():
+    with pytest.raises(ValueError, match="non-negative"):
+        r = Rotation.from_quat(np.eye(4))
+        r.mean(weights=-np.ones(4))
+
+
+def test_reduction_no_indices():
+    result = Rotation.identity().reduce(return_indices=False)
+    assert isinstance(result, Rotation)
+
+
+def test_reduction_none_indices():
+    result = Rotation.identity().reduce(return_indices=True)
+    assert type(result) == tuple
+    assert len(result) == 3
+
+    reduced, left_best, right_best = result
+    assert left_best is None
+    assert right_best is None
+
+
+def test_reduction_scalar_calculation():
+    rng = np.random.RandomState(0)
+    l = Rotation.random(5, random_state=rng)
+    r = Rotation.random(10, random_state=rng)
+    p = Rotation.random(7, random_state=rng)
+    reduced, left_best, right_best = p.reduce(l, r, return_indices=True)
+
+    # Loop implementation of the vectorized calculation in Rotation.reduce
+    scalars = np.zeros((len(l), len(p), len(r)))
+    for i, li in enumerate(l):
+        for j, pj in enumerate(p):
+            for k, rk in enumerate(r):
+                scalars[i, j, k] = np.abs((li * pj * rk).as_quat()[3])
+    scalars = np.reshape(np.moveaxis(scalars, 1, 0), (scalars.shape[1], -1))
+
+    max_ind = np.argmax(np.reshape(scalars, (len(p), -1)), axis=1)
+    left_best_check = max_ind // len(r)
+    right_best_check = max_ind % len(r)
+    assert (left_best == left_best_check).all()
+    assert (right_best == right_best_check).all()
+
+    reduced_check = l[left_best_check] * p * r[right_best_check]
+    mag = (reduced.inv() * reduced_check).magnitude()
+    assert_array_almost_equal(mag, np.zeros(len(p)))
+
+
+def test_apply_single_rotation_single_point():
+    mat = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+    ])
+    r_1d = Rotation.from_matrix(mat)
+    r_2d = Rotation.from_matrix(np.expand_dims(mat, axis=0))
+
+    v_1d = np.array([1, 2, 3])
+    v_2d = np.expand_dims(v_1d, axis=0)
+    v1d_rotated = np.array([-2, 1, 3])
+    v2d_rotated = np.expand_dims(v1d_rotated, axis=0)
+
+    assert_allclose(r_1d.apply(v_1d), v1d_rotated)
+    assert_allclose(r_1d.apply(v_2d), v2d_rotated)
+    assert_allclose(r_2d.apply(v_1d), v2d_rotated)
+    assert_allclose(r_2d.apply(v_2d), v2d_rotated)
+
+    v1d_inverse = np.array([2, -1, 3])
+    v2d_inverse = np.expand_dims(v1d_inverse, axis=0)
+
+    assert_allclose(r_1d.apply(v_1d, inverse=True), v1d_inverse)
+    assert_allclose(r_1d.apply(v_2d, inverse=True), v2d_inverse)
+    assert_allclose(r_2d.apply(v_1d, inverse=True), v2d_inverse)
+    assert_allclose(r_2d.apply(v_2d, inverse=True), v2d_inverse)
+
+
+def test_apply_single_rotation_multiple_points():
+    mat = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+    ])
+    r1 = Rotation.from_matrix(mat)
+    r2 = Rotation.from_matrix(np.expand_dims(mat, axis=0))
+
+    v = np.array([[1, 2, 3], [4, 5, 6]])
+    v_rotated = np.array([[-2, 1, 3], [-5, 4, 6]])
+
+    assert_allclose(r1.apply(v), v_rotated)
+    assert_allclose(r2.apply(v), v_rotated)
+
+    v_inverse = np.array([[2, -1, 3], [5, -4, 6]])
+
+    assert_allclose(r1.apply(v, inverse=True), v_inverse)
+    assert_allclose(r2.apply(v, inverse=True), v_inverse)
+
+
+def test_apply_multiple_rotations_single_point():
+    mat = np.empty((2, 3, 3))
+    mat[0] = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+    ])
+    mat[1] = np.array([
+        [1, 0, 0],
+        [0, 0, -1],
+        [0, 1, 0]
+    ])
+    r = Rotation.from_matrix(mat)
+
+    v1 = np.array([1, 2, 3])
+    v2 = np.expand_dims(v1, axis=0)
+
+    v_rotated = np.array([[-2, 1, 3], [1, -3, 2]])
+
+    assert_allclose(r.apply(v1), v_rotated)
+    assert_allclose(r.apply(v2), v_rotated)
+
+    v_inverse = np.array([[2, -1, 3], [1, 3, -2]])
+
+    assert_allclose(r.apply(v1, inverse=True), v_inverse)
+    assert_allclose(r.apply(v2, inverse=True), v_inverse)
+
+
+def test_apply_multiple_rotations_multiple_points():
+    mat = np.empty((2, 3, 3))
+    mat[0] = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+    ])
+    mat[1] = np.array([
+        [1, 0, 0],
+        [0, 0, -1],
+        [0, 1, 0]
+    ])
+    r = Rotation.from_matrix(mat)
+
+    v = np.array([[1, 2, 3], [4, 5, 6]])
+    v_rotated = np.array([[-2, 1, 3], [4, -6, 5]])
+    assert_allclose(r.apply(v), v_rotated)
+
+    v_inverse = np.array([[2, -1, 3], [4, 6, -5]])
+    assert_allclose(r.apply(v, inverse=True), v_inverse)
+
+
+def test_getitem():
+    mat = np.empty((2, 3, 3))
+    mat[0] = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+    ])
+    mat[1] = np.array([
+        [1, 0, 0],
+        [0, 0, -1],
+        [0, 1, 0]
+    ])
+    r = Rotation.from_matrix(mat)
+
+    assert_allclose(r[0].as_matrix(), mat[0], atol=1e-15)
+    assert_allclose(r[1].as_matrix(), mat[1], atol=1e-15)
+    assert_allclose(r[:-1].as_matrix(), np.expand_dims(mat[0], axis=0), atol=1e-15)
+
+
+def test_getitem_single():
+    with pytest.raises(TypeError, match='not subscriptable'):
+        Rotation.identity()[0]
+
+
+def test_setitem_single():
+    r = Rotation.identity()
+    with pytest.raises(TypeError, match='not subscriptable'):
+        r[0] = Rotation.identity()
+
+
+def test_setitem_slice():
+    rng = np.random.RandomState(seed=0)
+    r1 = Rotation.random(10, random_state=rng)
+    r2 = Rotation.random(5, random_state=rng)
+    r1[1:6] = r2
+    assert_equal(r1[1:6].as_quat(), r2.as_quat())
+
+
+def test_setitem_integer():
+    rng = np.random.RandomState(seed=0)
+    r1 = Rotation.random(10, random_state=rng)
+    r2 = Rotation.random(random_state=rng)
+    r1[1] = r2
+    assert_equal(r1[1].as_quat(), r2.as_quat())
+
+
+def test_setitem_wrong_type():
+    r = Rotation.random(10, random_state=0)
+    with pytest.raises(TypeError, match='Rotation object'):
+        r[0] = 1
+
+
+def test_n_rotations():
+    mat = np.empty((2, 3, 3))
+    mat[0] = np.array([
+        [0, -1, 0],
+        [1, 0, 0],
+        [0, 0, 1]
+    ])
+    mat[1] = np.array([
+        [1, 0, 0],
+        [0, 0, -1],
+        [0, 1, 0]
+    ])
+    r = Rotation.from_matrix(mat)
+
+    assert_equal(len(r), 2)
+    assert_equal(len(r[:-1]), 1)
+
+
+def test_align_vectors_no_rotation():
+    x = np.array([[1, 2, 3], [4, 5, 6]])
+    y = x.copy()
+
+    r, rmsd = Rotation.align_vectors(x, y)
+    assert_array_almost_equal(r.as_matrix(), np.eye(3))
+    assert_allclose(rmsd, 0, atol=1e-6)
+
+
+def test_align_vectors_no_noise():
+    rnd = np.random.RandomState(0)
+    c = Rotation.random(random_state=rnd)
+    b = rnd.normal(size=(5, 3))
+    a = c.apply(b)
+
+    est, rmsd = Rotation.align_vectors(a, b)
+    assert_allclose(c.as_quat(), est.as_quat())
+    assert_allclose(rmsd, 0, atol=1e-7)
+
+
+def test_align_vectors_improper_rotation():
+    # Tests correct logic for issue #10444
+    x = np.array([[0.89299824, -0.44372674, 0.0752378],
+                  [0.60221789, -0.47564102, -0.6411702]])
+    y = np.array([[0.02386536, -0.82176463, 0.5693271],
+                  [-0.27654929, -0.95191427, -0.1318321]])
+
+    est, rmsd = Rotation.align_vectors(x, y)
+    assert_allclose(x, est.apply(y), atol=1e-6)
+    assert_allclose(rmsd, 0, atol=1e-7)
+
+
+def test_align_vectors_scaled_weights():
+    rng = np.random.RandomState(0)
+    c = Rotation.random(random_state=rng)
+    b = rng.normal(size=(5, 3))
+    a = c.apply(b)
+
+    est1, rmsd1, cov1 = Rotation.align_vectors(a, b, np.ones(5), True)
+    est2, rmsd2, cov2 = Rotation.align_vectors(a, b, 2 * np.ones(5), True)
+
+    assert_allclose(est1.as_matrix(), est2.as_matrix())
+    assert_allclose(np.sqrt(2) * rmsd1, rmsd2)
+    assert_allclose(cov1, cov2)
+
+
+def test_align_vectors_noise():
+    rnd = np.random.RandomState(0)
+    n_vectors = 100
+    rot = Rotation.random(random_state=rnd)
+    vectors = rnd.normal(size=(n_vectors, 3))
+    result = rot.apply(vectors)
+
+    # The paper adds noise as independently distributed angular errors
+    sigma = np.deg2rad(1)
+    tolerance = 1.5 * sigma
+    noise = Rotation.from_rotvec(
+        rnd.normal(
+            size=(n_vectors, 3),
+            scale=sigma
+        )
+    )
+
+    # Attitude errors must preserve norm. Hence apply individual random
+    # rotations to each vector.
+    noisy_result = noise.apply(result)
+
+    est, rmsd, cov = Rotation.align_vectors(noisy_result, vectors,
+                                            return_sensitivity=True)
+
+    # Use rotation compositions to find out closeness
+    error_vector = (rot * est.inv()).as_rotvec()
+    assert_allclose(error_vector[0], 0, atol=tolerance)
+    assert_allclose(error_vector[1], 0, atol=tolerance)
+    assert_allclose(error_vector[2], 0, atol=tolerance)
+
+    # Check error bounds using covariance matrix
+    cov *= sigma
+    assert_allclose(cov[0, 0], 0, atol=tolerance)
+    assert_allclose(cov[1, 1], 0, atol=tolerance)
+    assert_allclose(cov[2, 2], 0, atol=tolerance)
+
+    assert_allclose(rmsd, np.sum((noisy_result - est.apply(vectors))**2)**0.5)
+
+
+def test_align_vectors_single_vector():
+    with pytest.warns(UserWarning, match="Optimal rotation is not"):
+        r_estimate, rmsd = Rotation.align_vectors([[1, -1, 1]], [[1, 1, -1]])
+        assert_allclose(rmsd, 0, atol=1e-16)
+
+
+def test_align_vectors_invalid_input():
+    with pytest.raises(ValueError, match="Expected input `a` to have shape"):
+        Rotation.align_vectors([1, 2, 3], [[1, 2, 3]])
+
+    with pytest.raises(ValueError, match="Expected input `b` to have shape"):
+        Rotation.align_vectors([[1, 2, 3]], [1, 2, 3])
+
+    with pytest.raises(ValueError, match="Expected inputs `a` and `b` "
+                                         "to have same shapes"):
+        Rotation.align_vectors([[1, 2, 3],[4, 5, 6]], [[1, 2, 3]])
+
+    with pytest.raises(ValueError,
+                       match="Expected `weights` to be 1 dimensional"):
+        Rotation.align_vectors([[1, 2, 3]], [[1, 2, 3]], weights=[[1]])
+
+    with pytest.raises(ValueError,
+                       match="Expected `weights` to have number of values"):
+        Rotation.align_vectors([[1, 2, 3]], [[1, 2, 3]], weights=[1, 2])
+
+    with pytest.raises(ValueError,
+                       match="`weights` may not contain negative values"):
+        Rotation.align_vectors([[1, 2, 3]], [[1, 2, 3]], weights=[-1])
+
+
+def test_random_rotation_shape():
+    rnd = np.random.RandomState(0)
+    assert_equal(Rotation.random(random_state=rnd).as_quat().shape, (4,))
+    assert_equal(Rotation.random(None, random_state=rnd).as_quat().shape, (4,))
+
+    assert_equal(Rotation.random(1, random_state=rnd).as_quat().shape, (1, 4))
+    assert_equal(Rotation.random(5, random_state=rnd).as_quat().shape, (5, 4))
+
+
+def test_slerp():
+    rnd = np.random.RandomState(0)
+
+    key_rots = Rotation.from_quat(rnd.uniform(size=(5, 4)))
+    key_quats = key_rots.as_quat()
+
+    key_times = [0, 1, 2, 3, 4]
+    interpolator = Slerp(key_times, key_rots)
+
+    times = [0, 0.5, 0.25, 1, 1.5, 2, 2.75, 3, 3.25, 3.60, 4]
+    interp_rots = interpolator(times)
+    interp_quats = interp_rots.as_quat()
+
+    # Dot products are affected by sign of quaternions
+    interp_quats[interp_quats[:, -1] < 0] *= -1
+    # Checking for quaternion equality, perform same operation
+    key_quats[key_quats[:, -1] < 0] *= -1
+
+    # Equality at keyframes, including both endpoints
+    assert_allclose(interp_quats[0], key_quats[0])
+    assert_allclose(interp_quats[3], key_quats[1])
+    assert_allclose(interp_quats[5], key_quats[2])
+    assert_allclose(interp_quats[7], key_quats[3])
+    assert_allclose(interp_quats[10], key_quats[4])
+
+    # Constant angular velocity between keyframes. Check by equating
+    # cos(theta) between quaternion pairs with equal time difference.
+    cos_theta1 = np.sum(interp_quats[0] * interp_quats[2])
+    cos_theta2 = np.sum(interp_quats[2] * interp_quats[1])
+    assert_allclose(cos_theta1, cos_theta2)
+
+    cos_theta4 = np.sum(interp_quats[3] * interp_quats[4])
+    cos_theta5 = np.sum(interp_quats[4] * interp_quats[5])
+    assert_allclose(cos_theta4, cos_theta5)
+
+    # theta1: 0 -> 0.25, theta3 : 0.5 -> 1
+    # Use double angle formula for double the time difference
+    cos_theta3 = np.sum(interp_quats[1] * interp_quats[3])
+    assert_allclose(cos_theta3, 2 * (cos_theta1**2) - 1)
+
+    # Miscellaneous checks
+    assert_equal(len(interp_rots), len(times))
+
+
+def test_slerp_single_rot():
+    with pytest.raises(ValueError, match="must be a sequence of rotations"):
+        r = Rotation.from_quat([1, 2, 3, 4])
+        Slerp([1], r)
+
+
+def test_slerp_time_dim_mismatch():
+    with pytest.raises(ValueError,
+                       match="times to be specified in a 1 dimensional array"):
+        rnd = np.random.RandomState(0)
+        r = Rotation.from_quat(rnd.uniform(size=(2, 4)))
+        t = np.array([[1],
+                      [2]])
+        Slerp(t, r)
+
+
+def test_slerp_num_rotations_mismatch():
+    with pytest.raises(ValueError, match="number of rotations to be equal to "
+                                         "number of timestamps"):
+        rnd = np.random.RandomState(0)
+        r = Rotation.from_quat(rnd.uniform(size=(5, 4)))
+        t = np.arange(7)
+        Slerp(t, r)
+
+
+def test_slerp_equal_times():
+    with pytest.raises(ValueError, match="strictly increasing order"):
+        rnd = np.random.RandomState(0)
+        r = Rotation.from_quat(rnd.uniform(size=(5, 4)))
+        t = [0, 1, 2, 2, 4]
+        Slerp(t, r)
+
+
+def test_slerp_decreasing_times():
+    with pytest.raises(ValueError, match="strictly increasing order"):
+        rnd = np.random.RandomState(0)
+        r = Rotation.from_quat(rnd.uniform(size=(5, 4)))
+        t = [0, 1, 3, 2, 4]
+        Slerp(t, r)
+
+
+def test_slerp_call_time_dim_mismatch():
+    rnd = np.random.RandomState(0)
+    r = Rotation.from_quat(rnd.uniform(size=(5, 4)))
+    t = np.arange(5)
+    s = Slerp(t, r)
+
+    with pytest.raises(ValueError,
+                       match="`times` must be at most 1-dimensional."):
+        interp_times = np.array([[3.5],
+                                 [4.2]])
+        s(interp_times)
+
+
+def test_slerp_call_time_out_of_range():
+    rnd = np.random.RandomState(0)
+    r = Rotation.from_quat(rnd.uniform(size=(5, 4)))
+    t = np.arange(5) + 1
+    s = Slerp(t, r)
+
+    with pytest.raises(ValueError, match="times must be within the range"):
+        s([0, 1, 2])
+    with pytest.raises(ValueError, match="times must be within the range"):
+        s([1, 2, 6])
+
+
+def test_slerp_call_scalar_time():
+    r = Rotation.from_euler('X', [0, 80], degrees=True)
+    s = Slerp([0, 1], r)
+
+    r_interpolated = s(0.25)
+    r_interpolated_expected = Rotation.from_euler('X', 20, degrees=True)
+
+    delta = r_interpolated * r_interpolated_expected.inv()
+
+    assert_allclose(delta.magnitude(), 0, atol=1e-16)
+
+
+def test_multiplication_stability():
+    qs = Rotation.random(50, random_state=0)
+    rs = Rotation.random(1000, random_state=1)
+    for q in qs:
+        rs *= q * rs
+        assert_allclose(np.linalg.norm(rs.as_quat(), axis=1), 1)
+
+
+def test_rotation_within_numpy_array():
+    single = Rotation.random(random_state=0)
+    multiple = Rotation.random(2, random_state=1)
+
+    array = np.array(single)
+    assert_equal(array.shape, ())
+
+    array = np.array(multiple)
+    assert_equal(array.shape, (2,))
+    assert_allclose(array[0].as_matrix(), multiple[0].as_matrix())
+    assert_allclose(array[1].as_matrix(), multiple[1].as_matrix())
+
+    array = np.array([single])
+    assert_equal(array.shape, (1,))
+    assert_equal(array[0], single)
+
+    array = np.array([multiple])
+    assert_equal(array.shape, (1, 2))
+    assert_allclose(array[0, 0].as_matrix(), multiple[0].as_matrix())
+    assert_allclose(array[0, 1].as_matrix(), multiple[1].as_matrix())
+
+    array = np.array([single, multiple], dtype=object)
+    assert_equal(array.shape, (2,))
+    assert_equal(array[0], single)
+    assert_equal(array[1], multiple)
+
+    array = np.array([multiple, multiple, multiple])
+    assert_equal(array.shape, (3, 2))
+
+
+def test_pickling():
+    r = Rotation.from_quat([0, 0, np.sin(np.pi/4), np.cos(np.pi/4)])
+    pkl = pickle.dumps(r)
+    unpickled = pickle.loads(pkl)
+    assert_allclose(r.as_matrix(), unpickled.as_matrix(), atol=1e-15)
+
+
+def test_deepcopy():
+    r = Rotation.from_quat([0, 0, np.sin(np.pi/4), np.cos(np.pi/4)])
+    r1 = copy.deepcopy(r)
+    assert_allclose(r.as_matrix(), r1.as_matrix(), atol=1e-15)
+
+
+def test_as_euler_contiguous():
+    r = Rotation.from_quat([0, 0, 0, 1])
+    e1 = r.as_euler('xyz')  # extrinsic euler rotation
+    e2 = r.as_euler('XYZ')  # intrinsic
+    assert e1.flags['C_CONTIGUOUS'] is True
+    assert e2.flags['C_CONTIGUOUS'] is True
+    assert all(i >= 0 for i in e1.strides)
+    assert all(i >= 0 for i in e2.strides)
+
+
+def test_concatenate():
+    rotation = Rotation.random(10, random_state=0)
+    sizes = [1, 2, 3, 1, 3]
+    starts = [0] + list(np.cumsum(sizes))
+    split = [rotation[i:i + n] for i, n in zip(starts, sizes)]
+    result = Rotation.concatenate(split)
+    assert_equal(rotation.as_quat(), result.as_quat())
+
+
+def test_concatenate_wrong_type():
+    with pytest.raises(TypeError, match='Rotation objects only'):
+        Rotation.concatenate([Rotation.identity(), 1, None])
+
+
+# Regression test for gh-16663
+def test_len_and_bool():
+    rotation_multi_empty = Rotation(np.empty((0, 4)))
+    rotation_multi_one = Rotation([[0, 0, 0, 1]])
+    rotation_multi = Rotation([[0, 0, 0, 1], [0, 0, 0, 1]])
+    rotation_single = Rotation([0, 0, 0, 1])
+
+    assert len(rotation_multi_empty) == 0
+    assert len(rotation_multi_one) == 1
+    assert len(rotation_multi) == 2
+    with pytest.raises(TypeError, match="Single rotation has no len()."):
+        len(rotation_single)
+
+    # Rotation should always be truthy. See gh-16663
+    assert rotation_multi_empty
+    assert rotation_multi_one
+    assert rotation_multi
+    assert rotation_single
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_groups.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_groups.py
new file mode 100644
index 00000000..befe60c1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_groups.py
@@ -0,0 +1,169 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from scipy.spatial.transform import Rotation
+from scipy.optimize import linear_sum_assignment
+from scipy.spatial.distance import cdist
+from scipy.constants import golden as phi
+from scipy.spatial import cKDTree
+
+
+TOL = 1E-12
+NS = range(1, 13)
+NAMES = ["I", "O", "T"] + ["C%d" % n for n in NS] + ["D%d" % n for n in NS]
+SIZES = [60, 24, 12] + list(NS) + [2 * n for n in NS]
+
+
+def _calculate_rmsd(P, Q):
+    """Calculates the root-mean-square distance between the points of P and Q.
+    The distance is taken as the minimum over all possible matchings. It is
+    zero if P and Q are identical and non-zero if not.
+    """
+    distance_matrix = cdist(P, Q, metric='sqeuclidean')
+    matching = linear_sum_assignment(distance_matrix)
+    return np.sqrt(distance_matrix[matching].sum())
+
+
+def _generate_pyramid(n, axis):
+    thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
+    P = np.vstack([np.zeros(n), np.cos(thetas), np.sin(thetas)]).T
+    P = np.concatenate((P, [[1, 0, 0]]))
+    return np.roll(P, axis, axis=1)
+
+
+def _generate_prism(n, axis):
+    thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
+    bottom = np.vstack([-np.ones(n), np.cos(thetas), np.sin(thetas)]).T
+    top = np.vstack([+np.ones(n), np.cos(thetas), np.sin(thetas)]).T
+    P = np.concatenate((bottom, top))
+    return np.roll(P, axis, axis=1)
+
+
+def _generate_icosahedron():
+    x = np.array([[0, -1, -phi],
+                  [0, -1, +phi],
+                  [0, +1, -phi],
+                  [0, +1, +phi]])
+    return np.concatenate([np.roll(x, i, axis=1) for i in range(3)])
+
+
+def _generate_octahedron():
+    return np.array([[-1, 0, 0], [+1, 0, 0], [0, -1, 0],
+                     [0, +1, 0], [0, 0, -1], [0, 0, +1]])
+
+
+def _generate_tetrahedron():
+    return np.array([[1, 1, 1], [1, -1, -1], [-1, 1, -1], [-1, -1, 1]])
+
+
+@pytest.mark.parametrize("name", [-1, None, True, np.array(['C3'])])
+def test_group_type(name):
+    with pytest.raises(ValueError,
+                       match="must be a string"):
+        Rotation.create_group(name)
+
+
+@pytest.mark.parametrize("name", ["Q", " ", "CA", "C ", "DA", "D ", "I2", ""])
+def test_group_name(name):
+    with pytest.raises(ValueError,
+                       match="must be one of 'I', 'O', 'T', 'Dn', 'Cn'"):
+        Rotation.create_group(name)
+
+
+@pytest.mark.parametrize("name", ["C0", "D0"])
+def test_group_order_positive(name):
+    with pytest.raises(ValueError,
+                       match="Group order must be positive"):
+        Rotation.create_group(name)
+
+
+@pytest.mark.parametrize("axis", ['A', 'b', 0, 1, 2, 4, False, None])
+def test_axis_valid(axis):
+    with pytest.raises(ValueError,
+                       match="`axis` must be one of"):
+        Rotation.create_group("C1", axis)
+
+
+def test_icosahedral():
+    """The icosahedral group fixes the rotations of an icosahedron. Here we
+    test that the icosahedron is invariant after application of the elements
+    of the rotation group."""
+    P = _generate_icosahedron()
+    for g in Rotation.create_group("I"):
+        g = Rotation.from_quat(g.as_quat())
+        assert _calculate_rmsd(P, g.apply(P)) < TOL
+
+
+def test_octahedral():
+    """Test that the octahedral group correctly fixes the rotations of an
+    octahedron."""
+    P = _generate_octahedron()
+    for g in Rotation.create_group("O"):
+        assert _calculate_rmsd(P, g.apply(P)) < TOL
+
+
+def test_tetrahedral():
+    """Test that the tetrahedral group correctly fixes the rotations of a
+    tetrahedron."""
+    P = _generate_tetrahedron()
+    for g in Rotation.create_group("T"):
+        assert _calculate_rmsd(P, g.apply(P)) < TOL
+
+
+@pytest.mark.parametrize("n", NS)
+@pytest.mark.parametrize("axis", 'XYZ')
+def test_dicyclic(n, axis):
+    """Test that the dicyclic group correctly fixes the rotations of a
+    prism."""
+    P = _generate_prism(n, axis='XYZ'.index(axis))
+    for g in Rotation.create_group("D%d" % n, axis=axis):
+        assert _calculate_rmsd(P, g.apply(P)) < TOL
+
+
+@pytest.mark.parametrize("n", NS)
+@pytest.mark.parametrize("axis", 'XYZ')
+def test_cyclic(n, axis):
+    """Test that the cyclic group correctly fixes the rotations of a
+    pyramid."""
+    P = _generate_pyramid(n, axis='XYZ'.index(axis))
+    for g in Rotation.create_group("C%d" % n, axis=axis):
+        assert _calculate_rmsd(P, g.apply(P)) < TOL
+
+
+@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
+def test_group_sizes(name, size):
+    assert len(Rotation.create_group(name)) == size
+
+
+@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
+def test_group_no_duplicates(name, size):
+    g = Rotation.create_group(name)
+    kdtree = cKDTree(g.as_quat())
+    assert len(kdtree.query_pairs(1E-3)) == 0
+
+
+@pytest.mark.parametrize("name, size", zip(NAMES, SIZES))
+def test_group_symmetry(name, size):
+    g = Rotation.create_group(name)
+    q = np.concatenate((-g.as_quat(), g.as_quat()))
+    distance = np.sort(cdist(q, q))
+    deltas = np.max(distance, axis=0) - np.min(distance, axis=0)
+    assert (deltas < TOL).all()
+
+
+@pytest.mark.parametrize("name", NAMES)
+def test_reduction(name):
+    """Test that the elements of the rotation group are correctly
+    mapped onto the identity rotation."""
+    g = Rotation.create_group(name)
+    f = g.reduce(g)
+    assert_array_almost_equal(f.magnitude(), np.zeros(len(g)))
+
+
+@pytest.mark.parametrize("name", NAMES)
+def test_single_reduction(name):
+    g = Rotation.create_group(name)
+    f = g[-1].reduce(g)
+    assert_array_almost_equal(f.magnitude(), 0)
+    assert f.as_quat().shape == (4,)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_spline.py b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_spline.py
new file mode 100644
index 00000000..e39eccb8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/spatial/transform/tests/test_rotation_spline.py
@@ -0,0 +1,161 @@
+from itertools import product
+import numpy as np
+from numpy.testing import assert_allclose
+from pytest import raises
+from scipy.spatial.transform import Rotation, RotationSpline
+from scipy.spatial.transform._rotation_spline import (
+    _angular_rate_to_rotvec_dot_matrix,
+    _rotvec_dot_to_angular_rate_matrix,
+    _matrix_vector_product_of_stacks,
+    _angular_acceleration_nonlinear_term,
+    _create_block_3_diagonal_matrix)
+
+
+def test_angular_rate_to_rotvec_conversions():
+    np.random.seed(0)
+    rv = np.random.randn(4, 3)
+    A = _angular_rate_to_rotvec_dot_matrix(rv)
+    A_inv = _rotvec_dot_to_angular_rate_matrix(rv)
+
+    # When the rotation vector is aligned with the angular rate, then
+    # the rotation vector rate and angular rate are the same.
+    assert_allclose(_matrix_vector_product_of_stacks(A, rv), rv)
+    assert_allclose(_matrix_vector_product_of_stacks(A_inv, rv), rv)
+
+    # A and A_inv must be reciprocal to each other.
+    I_stack = np.empty((4, 3, 3))
+    I_stack[:] = np.eye(3)
+    assert_allclose(np.matmul(A, A_inv), I_stack, atol=1e-15)
+
+
+def test_angular_rate_nonlinear_term():
+    # The only simple test is to check that the term is zero when
+    # the rotation vector
+    np.random.seed(0)
+    rv = np.random.rand(4, 3)
+    assert_allclose(_angular_acceleration_nonlinear_term(rv, rv), 0,
+                    atol=1e-19)
+
+
+def test_create_block_3_diagonal_matrix():
+    np.random.seed(0)
+    A = np.empty((4, 3, 3))
+    A[:] = np.arange(1, 5)[:, None, None]
+
+    B = np.empty((4, 3, 3))
+    B[:] = -np.arange(1, 5)[:, None, None]
+    d = 10 * np.arange(10, 15)
+
+    banded = _create_block_3_diagonal_matrix(A, B, d)
+
+    # Convert the banded matrix to the full matrix.
+    k, l = list(zip(*product(np.arange(banded.shape[0]),
+                             np.arange(banded.shape[1]))))
+    k = np.asarray(k)
+    l = np.asarray(l)
+
+    i = k - 5 + l
+    j = l
+    values = banded.ravel()
+    mask = (i >= 0) & (i < 15)
+    i = i[mask]
+    j = j[mask]
+    values = values[mask]
+    full = np.zeros((15, 15))
+    full[i, j] = values
+
+    zero = np.zeros((3, 3))
+    eye = np.eye(3)
+
+    # Create the reference full matrix in the most straightforward manner.
+    ref = np.block([
+        [d[0] * eye, B[0], zero, zero, zero],
+        [A[0], d[1] * eye, B[1], zero, zero],
+        [zero, A[1], d[2] * eye, B[2], zero],
+        [zero, zero, A[2], d[3] * eye, B[3]],
+        [zero, zero, zero, A[3], d[4] * eye],
+    ])
+
+    assert_allclose(full, ref, atol=1e-19)
+
+
+def test_spline_2_rotations():
+    times = [0, 10]
+    rotations = Rotation.from_euler('xyz', [[0, 0, 0], [10, -20, 30]],
+                                    degrees=True)
+    spline = RotationSpline(times, rotations)
+
+    rv = (rotations[0].inv() * rotations[1]).as_rotvec()
+    rate = rv / (times[1] - times[0])
+    times_check = np.array([-1, 5, 12])
+    dt = times_check - times[0]
+    rv_ref = rate * dt[:, None]
+
+    assert_allclose(spline(times_check).as_rotvec(), rv_ref)
+    assert_allclose(spline(times_check, 1), np.resize(rate, (3, 3)))
+    assert_allclose(spline(times_check, 2), 0, atol=1e-16)
+
+
+def test_constant_attitude():
+    times = np.arange(10)
+    rotations = Rotation.from_rotvec(np.ones((10, 3)))
+    spline = RotationSpline(times, rotations)
+
+    times_check = np.linspace(-1, 11)
+    assert_allclose(spline(times_check).as_rotvec(), 1, rtol=1e-15)
+    assert_allclose(spline(times_check, 1), 0, atol=1e-17)
+    assert_allclose(spline(times_check, 2), 0, atol=1e-17)
+
+    assert_allclose(spline(5.5).as_rotvec(), 1, rtol=1e-15)
+    assert_allclose(spline(5.5, 1), 0, atol=1e-17)
+    assert_allclose(spline(5.5, 2), 0, atol=1e-17)
+
+
+def test_spline_properties():
+    times = np.array([0, 5, 15, 27])
+    angles = [[-5, 10, 27], [3, 5, 38], [-12, 10, 25], [-15, 20, 11]]
+
+    rotations = Rotation.from_euler('xyz', angles, degrees=True)
+    spline = RotationSpline(times, rotations)
+
+    assert_allclose(spline(times).as_euler('xyz', degrees=True), angles)
+    assert_allclose(spline(0).as_euler('xyz', degrees=True), angles[0])
+
+    h = 1e-8
+    rv0 = spline(times).as_rotvec()
+    rvm = spline(times - h).as_rotvec()
+    rvp = spline(times + h).as_rotvec()
+    assert_allclose(rv0, 0.5 * (rvp + rvm), rtol=1e-15)
+
+    r0 = spline(times, 1)
+    rm = spline(times - h, 1)
+    rp = spline(times + h, 1)
+    assert_allclose(r0, 0.5 * (rm + rp), rtol=1e-14)
+
+    a0 = spline(times, 2)
+    am = spline(times - h, 2)
+    ap = spline(times + h, 2)
+    assert_allclose(a0, am, rtol=1e-7)
+    assert_allclose(a0, ap, rtol=1e-7)
+
+
+def test_error_handling():
+    raises(ValueError, RotationSpline, [1.0], Rotation.random())
+
+    r = Rotation.random(10)
+    t = np.arange(10).reshape(5, 2)
+    raises(ValueError, RotationSpline, t, r)
+
+    t = np.arange(9)
+    raises(ValueError, RotationSpline, t, r)
+
+    t = np.arange(10)
+    t[5] = 0
+    raises(ValueError, RotationSpline, t, r)
+
+    t = np.arange(10)
+
+    s = RotationSpline(t, r)
+    raises(ValueError, s, 10, -1)
+
+    raises(ValueError, s, np.arange(10).reshape(5, 2))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special.pxd b/__packaged__/coreml/.python_dependencies/scipy/special.pxd
new file mode 100644
index 00000000..62cb8280
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special.pxd
@@ -0,0 +1 @@
+from .special cimport cython_special
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/special/__init__.py
new file mode 100644
index 00000000..b713d8b8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/__init__.py
@@ -0,0 +1,710 @@
+"""
+========================================
+Special functions (:mod:`scipy.special`)
+========================================
+
+.. currentmodule:: scipy.special
+
+Almost all of the functions below accept NumPy arrays as input
+arguments as well as single numbers. This means they follow
+broadcasting and automatic array-looping rules. Technically,
+they are `NumPy universal functions
+`_.
+Functions which do not accept NumPy arrays are marked by a warning
+in the section description.
+
+.. seealso::
+
+   `scipy.special.cython_special` -- Typed Cython versions of special functions
+
+
+Error handling
+==============
+
+Errors are handled by returning NaNs or other appropriate values.
+Some of the special function routines can emit warnings or raise
+exceptions when an error occurs. By default this is disabled; to
+query and control the current error handling state the following
+functions are provided.
+
+.. autosummary::
+   :toctree: generated/
+
+   geterr                 -- Get the current way of handling special-function errors.
+   seterr                 -- Set how special-function errors are handled.
+   errstate               -- Context manager for special-function error handling.
+   SpecialFunctionWarning -- Warning that can be emitted by special functions.
+   SpecialFunctionError   -- Exception that can be raised by special functions.
+
+Available functions
+===================
+
+Airy functions
+--------------
+
+.. autosummary::
+   :toctree: generated/
+
+   airy     -- Airy functions and their derivatives.
+   airye    -- Exponentially scaled Airy functions and their derivatives.
+   ai_zeros -- Compute `nt` zeros and values of the Airy function Ai and its derivative.
+   bi_zeros -- Compute `nt` zeros and values of the Airy function Bi and its derivative.
+   itairy   -- Integrals of Airy functions
+
+
+Elliptic functions and integrals
+--------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   ellipj    -- Jacobian elliptic functions.
+   ellipk    -- Complete elliptic integral of the first kind.
+   ellipkm1  -- Complete elliptic integral of the first kind around `m` = 1.
+   ellipkinc -- Incomplete elliptic integral of the first kind.
+   ellipe    -- Complete elliptic integral of the second kind.
+   ellipeinc -- Incomplete elliptic integral of the second kind.
+   elliprc   -- Degenerate symmetric integral RC.
+   elliprd   -- Symmetric elliptic integral of the second kind.
+   elliprf   -- Completely-symmetric elliptic integral of the first kind.
+   elliprg   -- Completely-symmetric elliptic integral of the second kind.
+   elliprj   -- Symmetric elliptic integral of the third kind.
+
+Bessel functions
+----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   jv            -- Bessel function of the first kind of real order and \
+                    complex argument.
+   jve           -- Exponentially scaled Bessel function of order `v`.
+   yn            -- Bessel function of the second kind of integer order and \
+                    real argument.
+   yv            -- Bessel function of the second kind of real order and \
+                    complex argument.
+   yve           -- Exponentially scaled Bessel function of the second kind \
+                    of real order.
+   kn            -- Modified Bessel function of the second kind of integer \
+                    order `n`
+   kv            -- Modified Bessel function of the second kind of real order \
+                    `v`
+   kve           -- Exponentially scaled modified Bessel function of the \
+                    second kind.
+   iv            -- Modified Bessel function of the first kind of real order.
+   ive           -- Exponentially scaled modified Bessel function of the \
+                    first kind.
+   hankel1       -- Hankel function of the first kind.
+   hankel1e      -- Exponentially scaled Hankel function of the first kind.
+   hankel2       -- Hankel function of the second kind.
+   hankel2e      -- Exponentially scaled Hankel function of the second kind.
+   wright_bessel -- Wright's generalized Bessel function.
+
+The following function does not accept NumPy arrays (it is not a
+universal function):
+
+.. autosummary::
+   :toctree: generated/
+
+   lmbda -- Jahnke-Emden Lambda function, Lambdav(x).
+
+Zeros of Bessel functions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following functions do not accept NumPy arrays (they are not
+universal functions):
+
+.. autosummary::
+   :toctree: generated/
+
+   jnjnp_zeros -- Compute zeros of integer-order Bessel functions Jn and Jn'.
+   jnyn_zeros  -- Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
+   jn_zeros    -- Compute zeros of integer-order Bessel function Jn(x).
+   jnp_zeros   -- Compute zeros of integer-order Bessel function derivative Jn'(x).
+   yn_zeros    -- Compute zeros of integer-order Bessel function Yn(x).
+   ynp_zeros   -- Compute zeros of integer-order Bessel function derivative Yn'(x).
+   y0_zeros    -- Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
+   y1_zeros    -- Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
+   y1p_zeros   -- Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
+
+Faster versions of common Bessel functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   j0  -- Bessel function of the first kind of order 0.
+   j1  -- Bessel function of the first kind of order 1.
+   y0  -- Bessel function of the second kind of order 0.
+   y1  -- Bessel function of the second kind of order 1.
+   i0  -- Modified Bessel function of order 0.
+   i0e -- Exponentially scaled modified Bessel function of order 0.
+   i1  -- Modified Bessel function of order 1.
+   i1e -- Exponentially scaled modified Bessel function of order 1.
+   k0  -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
+   k0e -- Exponentially scaled modified Bessel function K of order 0
+   k1  -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
+   k1e -- Exponentially scaled modified Bessel function K of order 1.
+
+Integrals of Bessel functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   itj0y0     -- Integrals of Bessel functions of order 0.
+   it2j0y0    -- Integrals related to Bessel functions of order 0.
+   iti0k0     -- Integrals of modified Bessel functions of order 0.
+   it2i0k0    -- Integrals related to modified Bessel functions of order 0.
+   besselpoly -- Weighted integral of a Bessel function.
+
+Derivatives of Bessel functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   jvp  -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
+   yvp  -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
+   kvp  -- Compute nth derivative of real-order modified Bessel function Kv(z)
+   ivp  -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
+   h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
+   h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
+
+Spherical Bessel functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   spherical_jn -- Spherical Bessel function of the first kind or its derivative.
+   spherical_yn -- Spherical Bessel function of the second kind or its derivative.
+   spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
+   spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
+
+Riccati-Bessel functions
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following functions do not accept NumPy arrays (they are not
+universal functions):
+
+.. autosummary::
+   :toctree: generated/
+
+   riccati_jn -- Compute Ricatti-Bessel function of the first kind and its derivative.
+   riccati_yn -- Compute Ricatti-Bessel function of the second kind and its derivative.
+
+Struve functions
+----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   struve       -- Struve function.
+   modstruve    -- Modified Struve function.
+   itstruve0    -- Integral of the Struve function of order 0.
+   it2struve0   -- Integral related to the Struve function of order 0.
+   itmodstruve0 -- Integral of the modified Struve function of order 0.
+
+
+Raw statistical functions
+-------------------------
+
+.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
+
+.. autosummary::
+   :toctree: generated/
+
+   bdtr         -- Binomial distribution cumulative distribution function.
+   bdtrc        -- Binomial distribution survival function.
+   bdtri        -- Inverse function to `bdtr` with respect to `p`.
+   bdtrik       -- Inverse function to `bdtr` with respect to `k`.
+   bdtrin       -- Inverse function to `bdtr` with respect to `n`.
+   btdtr        -- Cumulative distribution function of the beta distribution.
+   btdtri       -- The `p`-th quantile of the beta distribution.
+   btdtria      -- Inverse of `btdtr` with respect to `a`.
+   btdtrib      -- btdtria(a, p, x).
+   fdtr         -- F cumulative distribution function.
+   fdtrc        -- F survival function.
+   fdtri        -- The `p`-th quantile of the F-distribution.
+   fdtridfd     -- Inverse to `fdtr` vs dfd.
+   gdtr         -- Gamma distribution cumulative distribution function.
+   gdtrc        -- Gamma distribution survival function.
+   gdtria       -- Inverse of `gdtr` vs a.
+   gdtrib       -- Inverse of `gdtr` vs b.
+   gdtrix       -- Inverse of `gdtr` vs x.
+   nbdtr        -- Negative binomial cumulative distribution function.
+   nbdtrc       -- Negative binomial survival function.
+   nbdtri       -- Inverse of `nbdtr` vs `p`.
+   nbdtrik      -- Inverse of `nbdtr` vs `k`.
+   nbdtrin      -- Inverse of `nbdtr` vs `n`.
+   ncfdtr       -- Cumulative distribution function of the non-central F distribution.
+   ncfdtridfd   -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
+   ncfdtridfn   -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
+   ncfdtri      -- Inverse cumulative distribution function of the non-central F distribution.
+   ncfdtrinc    -- Calculate non-centrality parameter for non-central F distribution.
+   nctdtr       -- Cumulative distribution function of the non-central `t` distribution.
+   nctdtridf    -- Calculate degrees of freedom for non-central t distribution.
+   nctdtrit     -- Inverse cumulative distribution function of the non-central t distribution.
+   nctdtrinc    -- Calculate non-centrality parameter for non-central t distribution.
+   nrdtrimn     -- Calculate mean of normal distribution given other params.
+   nrdtrisd     -- Calculate standard deviation of normal distribution given other params.
+   pdtr         -- Poisson cumulative distribution function.
+   pdtrc        -- Poisson survival function.
+   pdtri        -- Inverse to `pdtr` vs m.
+   pdtrik       -- Inverse to `pdtr` vs k.
+   stdtr        -- Student t distribution cumulative distribution function.
+   stdtridf     -- Inverse of `stdtr` vs df.
+   stdtrit      -- Inverse of `stdtr` vs `t`.
+   chdtr        -- Chi square cumulative distribution function.
+   chdtrc       -- Chi square survival function.
+   chdtri       -- Inverse to `chdtrc`.
+   chdtriv      -- Inverse to `chdtr` vs `v`.
+   ndtr         -- Gaussian cumulative distribution function.
+   log_ndtr     -- Logarithm of Gaussian cumulative distribution function.
+   ndtri        -- Inverse of `ndtr` vs x.
+   ndtri_exp    -- Inverse of `log_ndtr` vs x.
+   chndtr       -- Non-central chi square cumulative distribution function.
+   chndtridf    -- Inverse to `chndtr` vs `df`.
+   chndtrinc    -- Inverse to `chndtr` vs `nc`.
+   chndtrix     -- Inverse to `chndtr` vs `x`.
+   smirnov      -- Kolmogorov-Smirnov complementary cumulative distribution function.
+   smirnovi     -- Inverse to `smirnov`.
+   kolmogorov   -- Complementary cumulative distribution function of Kolmogorov distribution.
+   kolmogi      -- Inverse function to `kolmogorov`.
+   tklmbda      -- Tukey-Lambda cumulative distribution function.
+   logit        -- Logit ufunc for ndarrays.
+   expit        -- Logistic sigmoid function.
+   log_expit    -- Logarithm of the logistic sigmoid function.
+   boxcox       -- Compute the Box-Cox transformation.
+   boxcox1p     -- Compute the Box-Cox transformation of 1 + `x`.
+   inv_boxcox   -- Compute the inverse of the Box-Cox transformation.
+   inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
+   owens_t      -- Owen's T Function.
+
+
+Information Theory functions
+----------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   entr         -- Elementwise function for computing entropy.
+   rel_entr     -- Elementwise function for computing relative entropy.
+   kl_div       -- Elementwise function for computing Kullback-Leibler divergence.
+   huber        -- Huber loss function.
+   pseudo_huber -- Pseudo-Huber loss function.
+
+
+Gamma and related functions
+---------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   gamma        -- Gamma function.
+   gammaln      -- Logarithm of the absolute value of the Gamma function for real inputs.
+   loggamma     -- Principal branch of the logarithm of the Gamma function.
+   gammasgn     -- Sign of the gamma function.
+   gammainc     -- Regularized lower incomplete gamma function.
+   gammaincinv  -- Inverse to `gammainc`.
+   gammaincc    -- Regularized upper incomplete gamma function.
+   gammainccinv -- Inverse to `gammaincc`.
+   beta         -- Beta function.
+   betaln       -- Natural logarithm of absolute value of beta function.
+   betainc      -- Incomplete beta integral.
+   betaincinv   -- Inverse function to beta integral.
+   psi          -- The digamma function.
+   rgamma       -- Gamma function inverted.
+   polygamma    -- Polygamma function n.
+   multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
+   digamma      -- psi(x[, out]).
+   poch         -- Rising factorial (z)_m.
+
+
+Error function and Fresnel integrals
+------------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   erf           -- Returns the error function of complex argument.
+   erfc          -- Complementary error function, ``1 - erf(x)``.
+   erfcx         -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
+   erfi          -- Imaginary error function, ``-i erf(i z)``.
+   erfinv        -- Inverse function for erf.
+   erfcinv       -- Inverse function for erfc.
+   wofz          -- Faddeeva function.
+   dawsn         -- Dawson's integral.
+   fresnel       -- Fresnel sin and cos integrals.
+   fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
+   modfresnelp   -- Modified Fresnel positive integrals.
+   modfresnelm   -- Modified Fresnel negative integrals.
+   voigt_profile -- Voigt profile.
+
+The following functions do not accept NumPy arrays (they are not
+universal functions):
+
+.. autosummary::
+   :toctree: generated/
+
+   erf_zeros      -- Compute nt complex zeros of error function erf(z).
+   fresnelc_zeros -- Compute nt complex zeros of cosine Fresnel integral C(z).
+   fresnels_zeros -- Compute nt complex zeros of sine Fresnel integral S(z).
+
+Legendre functions
+------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   lpmv     -- Associated Legendre function of integer order and real degree.
+   sph_harm -- Compute spherical harmonics.
+
+The following functions do not accept NumPy arrays (they are not
+universal functions):
+
+.. autosummary::
+   :toctree: generated/
+
+   clpmn -- Associated Legendre function of the first kind for complex arguments.
+   lpn   -- Legendre function of the first kind.
+   lqn   -- Legendre function of the second kind.
+   lpmn  -- Sequence of associated Legendre functions of the first kind.
+   lqmn  -- Sequence of associated Legendre functions of the second kind.
+
+Ellipsoidal harmonics
+---------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   ellip_harm   -- Ellipsoidal harmonic functions E^p_n(l).
+   ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l).
+   ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n.
+
+Orthogonal polynomials
+----------------------
+
+The following functions evaluate values of orthogonal polynomials:
+
+.. autosummary::
+   :toctree: generated/
+
+   assoc_laguerre   -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
+   eval_legendre    -- Evaluate Legendre polynomial at a point.
+   eval_chebyt      -- Evaluate Chebyshev polynomial of the first kind at a point.
+   eval_chebyu      -- Evaluate Chebyshev polynomial of the second kind at a point.
+   eval_chebyc      -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
+   eval_chebys      -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
+   eval_jacobi      -- Evaluate Jacobi polynomial at a point.
+   eval_laguerre    -- Evaluate Laguerre polynomial at a point.
+   eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
+   eval_hermite     -- Evaluate physicist's Hermite polynomial at a point.
+   eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
+   eval_gegenbauer  -- Evaluate Gegenbauer polynomial at a point.
+   eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
+   eval_sh_chebyt   -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
+   eval_sh_chebyu   -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
+   eval_sh_jacobi   -- Evaluate shifted Jacobi polynomial at a point.
+
+The following functions compute roots and quadrature weights for
+orthogonal polynomials:
+
+.. autosummary::
+   :toctree: generated/
+
+   roots_legendre    -- Gauss-Legendre quadrature.
+   roots_chebyt      -- Gauss-Chebyshev (first kind) quadrature.
+   roots_chebyu      -- Gauss-Chebyshev (second kind) quadrature.
+   roots_chebyc      -- Gauss-Chebyshev (first kind) quadrature.
+   roots_chebys      -- Gauss-Chebyshev (second kind) quadrature.
+   roots_jacobi      -- Gauss-Jacobi quadrature.
+   roots_laguerre    -- Gauss-Laguerre quadrature.
+   roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
+   roots_hermite     -- Gauss-Hermite (physicst's) quadrature.
+   roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
+   roots_gegenbauer  -- Gauss-Gegenbauer quadrature.
+   roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
+   roots_sh_chebyt   -- Gauss-Chebyshev (first kind, shifted) quadrature.
+   roots_sh_chebyu   -- Gauss-Chebyshev (second kind, shifted) quadrature.
+   roots_sh_jacobi   -- Gauss-Jacobi (shifted) quadrature.
+
+The functions below, in turn, return the polynomial coefficients in
+``orthopoly1d`` objects, which function similarly as `numpy.poly1d`.
+The ``orthopoly1d`` class also has an attribute ``weights``, which returns
+the roots, weights, and total weights for the appropriate form of Gaussian
+quadrature. These are returned in an ``n x 3`` array with roots in the first
+column, weights in the second column, and total weights in the final column.
+Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing
+arithmetic, and lose information of the original orthogonal polynomial.
+
+.. autosummary::
+   :toctree: generated/
+
+   legendre    -- Legendre polynomial.
+   chebyt      -- Chebyshev polynomial of the first kind.
+   chebyu      -- Chebyshev polynomial of the second kind.
+   chebyc      -- Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
+   chebys      -- Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
+   jacobi      -- Jacobi polynomial.
+   laguerre    -- Laguerre polynomial.
+   genlaguerre -- Generalized (associated) Laguerre polynomial.
+   hermite     -- Physicist's Hermite polynomial.
+   hermitenorm -- Normalized (probabilist's) Hermite polynomial.
+   gegenbauer  -- Gegenbauer (ultraspherical) polynomial.
+   sh_legendre -- Shifted Legendre polynomial.
+   sh_chebyt   -- Shifted Chebyshev polynomial of the first kind.
+   sh_chebyu   -- Shifted Chebyshev polynomial of the second kind.
+   sh_jacobi   -- Shifted Jacobi polynomial.
+
+.. warning::
+
+   Computing values of high-order polynomials (around ``order > 20``) using
+   polynomial coefficients is numerically unstable. To evaluate polynomial
+   values, the ``eval_*`` functions should be used instead.
+
+
+Hypergeometric functions
+------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
+   hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x).
+   hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind.
+   hyp0f1 -- Confluent hypergeometric limit function 0F1.
+
+
+Parabolic cylinder functions
+----------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   pbdv -- Parabolic cylinder function D.
+   pbvv -- Parabolic cylinder function V.
+   pbwa -- Parabolic cylinder function W.
+
+The following functions do not accept NumPy arrays (they are not
+universal functions):
+
+.. autosummary::
+   :toctree: generated/
+
+   pbdv_seq -- Parabolic cylinder functions Dv(x) and derivatives.
+   pbvv_seq -- Parabolic cylinder functions Vv(x) and derivatives.
+   pbdn_seq -- Parabolic cylinder functions Dn(z) and derivatives.
+
+Mathieu and related functions
+-----------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   mathieu_a -- Characteristic value of even Mathieu functions.
+   mathieu_b -- Characteristic value of odd Mathieu functions.
+
+The following functions do not accept NumPy arrays (they are not
+universal functions):
+
+.. autosummary::
+   :toctree: generated/
+
+   mathieu_even_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
+   mathieu_odd_coef  -- Fourier coefficients for even Mathieu and modified Mathieu functions.
+
+The following return both function and first derivative:
+
+.. autosummary::
+   :toctree: generated/
+
+   mathieu_cem     -- Even Mathieu function and its derivative.
+   mathieu_sem     -- Odd Mathieu function and its derivative.
+   mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative.
+   mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative.
+   mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative.
+   mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative.
+
+Spheroidal wave functions
+-------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   pro_ang1   -- Prolate spheroidal angular function of the first kind and its derivative.
+   pro_rad1   -- Prolate spheroidal radial function of the first kind and its derivative.
+   pro_rad2   -- Prolate spheroidal radial function of the secon kind and its derivative.
+   obl_ang1   -- Oblate spheroidal angular function of the first kind and its derivative.
+   obl_rad1   -- Oblate spheroidal radial function of the first kind and its derivative.
+   obl_rad2   -- Oblate spheroidal radial function of the second kind and its derivative.
+   pro_cv     -- Characteristic value of prolate spheroidal function.
+   obl_cv     -- Characteristic value of oblate spheroidal function.
+   pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
+   obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
+
+The following functions require pre-computed characteristic value:
+
+.. autosummary::
+   :toctree: generated/
+
+   pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value.
+   pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value.
+   pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value.
+   obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value.
+   obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value.
+   obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value.
+
+Kelvin functions
+----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   kelvin       -- Kelvin functions as complex numbers.
+   kelvin_zeros -- Compute nt zeros of all Kelvin functions.
+   ber          -- Kelvin function ber.
+   bei          -- Kelvin function bei
+   berp         -- Derivative of the Kelvin function `ber`.
+   beip         -- Derivative of the Kelvin function `bei`.
+   ker          -- Kelvin function ker.
+   kei          -- Kelvin function ker.
+   kerp         -- Derivative of the Kelvin function ker.
+   keip         -- Derivative of the Kelvin function kei.
+
+The following functions do not accept NumPy arrays (they are not
+universal functions):
+
+.. autosummary::
+   :toctree: generated/
+
+   ber_zeros  -- Compute nt zeros of the Kelvin function ber(x).
+   bei_zeros  -- Compute nt zeros of the Kelvin function bei(x).
+   berp_zeros -- Compute nt zeros of the Kelvin function ber'(x).
+   beip_zeros -- Compute nt zeros of the Kelvin function bei'(x).
+   ker_zeros  -- Compute nt zeros of the Kelvin function ker(x).
+   kei_zeros  -- Compute nt zeros of the Kelvin function kei(x).
+   kerp_zeros -- Compute nt zeros of the Kelvin function ker'(x).
+   keip_zeros -- Compute nt zeros of the Kelvin function kei'(x).
+
+Combinatorics
+-------------
+
+.. autosummary::
+   :toctree: generated/
+
+   comb -- The number of combinations of N things taken k at a time.
+   perm -- Permutations of N things taken k at a time, i.e., k-permutations of N.
+
+Lambert W and related functions
+-------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   lambertw    -- Lambert W function.
+   wrightomega -- Wright Omega function.
+
+Other special functions
+-----------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   agm         -- Arithmetic, Geometric Mean.
+   bernoulli   -- Bernoulli numbers B0..Bn (inclusive).
+   binom       -- Binomial coefficient
+   diric       -- Periodic sinc function, also called the Dirichlet function.
+   euler       -- Euler numbers E0..En (inclusive).
+   expn        -- Exponential integral E_n.
+   exp1        -- Exponential integral E_1 of complex argument z.
+   expi        -- Exponential integral Ei.
+   factorial   -- The factorial of a number or array of numbers.
+   factorial2  -- Double factorial.
+   factorialk  -- Multifactorial of n of order k, n(!!...!).
+   shichi      -- Hyperbolic sine and cosine integrals.
+   sici        -- Sine and cosine integrals.
+   softmax     -- Softmax function.
+   log_softmax -- Logarithm of softmax function.
+   spence      -- Spence's function, also known as the dilogarithm.
+   zeta        -- Riemann zeta function.
+   zetac       -- Riemann zeta function minus 1.
+
+Convenience functions
+---------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   cbrt      -- Cube root of `x`.
+   exp10     -- 10**x.
+   exp2      -- 2**x.
+   radian    -- Convert from degrees to radians.
+   cosdg     -- Cosine of the angle `x` given in degrees.
+   sindg     -- Sine of angle given in degrees.
+   tandg     -- Tangent of angle x given in degrees.
+   cotdg     -- Cotangent of the angle `x` given in degrees.
+   log1p     -- Calculates log(1+x) for use when `x` is near zero.
+   expm1     -- ``exp(x) - 1`` for use when `x` is near zero.
+   cosm1     -- ``cos(x) - 1`` for use when `x` is near zero.
+   powm1     -- ``x**y - 1`` for use when `y` is near zero or `x` is near 1.
+   round     -- Round to nearest integer.
+   xlogy     -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
+   xlog1py   -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
+   logsumexp -- Compute the log of the sum of exponentials of input elements.
+   exprel    -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
+   sinc      -- Return the sinc function.
+
+"""
+
+from ._sf_error import SpecialFunctionWarning, SpecialFunctionError
+
+from . import _ufuncs
+from ._ufuncs import *
+
+from . import _basic
+from ._basic import *
+
+from ._logsumexp import logsumexp, softmax, log_softmax
+
+from . import _orthogonal
+from ._orthogonal import *
+
+from ._spfun_stats import multigammaln
+from ._ellip_harm import (
+    ellip_harm,
+    ellip_harm_2,
+    ellip_normal
+)
+from ._lambertw import lambertw
+from ._spherical_bessel import (
+    spherical_jn,
+    spherical_yn,
+    spherical_in,
+    spherical_kn
+)
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import add_newdocs, basic, orthogonal, specfun, sf_error, spfun_stats
+
+__all__ = _ufuncs.__all__ + _basic.__all__ + _orthogonal.__all__ + [
+    'SpecialFunctionWarning',
+    'SpecialFunctionError',
+    'logsumexp',
+    'softmax',
+    'log_softmax',
+    'multigammaln',
+    'ellip_harm',
+    'ellip_harm_2',
+    'ellip_normal',
+    'lambertw',
+    'spherical_jn',
+    'spherical_yn',
+    'spherical_in',
+    'spherical_kn',
+]
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_add_newdocs.py b/__packaged__/coreml/.python_dependencies/scipy/special/_add_newdocs.py
new file mode 100644
index 00000000..67bc6af8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_add_newdocs.py
@@ -0,0 +1,13639 @@
+# Docstrings for generated ufuncs
+#
+# The syntax is designed to look like the function add_newdoc is being
+# called from numpy.lib, but in this file add_newdoc puts the
+# docstrings in a dictionary. This dictionary is used in
+# _generate_pyx.py to generate the docstrings for the ufuncs in
+# scipy.special at the C level when the ufuncs are created at compile
+# time.
+from typing import Dict
+
+docdict: Dict[str, str] = {}
+
+
+def get(name):
+    return docdict.get(name)
+
+
+def add_newdoc(name, doc):
+    docdict[name] = doc
+
+
+add_newdoc("_sf_error_test_function",
+    """
+    Private function; do not use.
+    """)
+
+
+add_newdoc("_cosine_cdf",
+    """
+    _cosine_cdf(x)
+
+    Cumulative distribution function (CDF) of the cosine distribution::
+
+                 {             0,              x < -pi
+        cdf(x) = { (pi + x + sin(x))/(2*pi),   -pi <= x <= pi
+                 {             1,              x > pi
+
+    Parameters
+    ----------
+    x : array_like
+        `x` must contain real numbers.
+
+    Returns
+    -------
+    scalar or ndarray
+        The cosine distribution CDF evaluated at `x`.
+
+    """)
+
+add_newdoc("_cosine_invcdf",
+    """
+    _cosine_invcdf(p)
+
+    Inverse of the cumulative distribution function (CDF) of the cosine
+    distribution.
+
+    The CDF of the cosine distribution is::
+
+        cdf(x) = (pi + x + sin(x))/(2*pi)
+
+    This function computes the inverse of cdf(x).
+
+    Parameters
+    ----------
+    p : array_like
+        `p` must contain real numbers in the interval ``0 <= p <= 1``.
+        `nan` is returned for values of `p` outside the interval [0, 1].
+
+    Returns
+    -------
+    scalar or ndarray
+        The inverse of the cosine distribution CDF evaluated at `p`.
+
+    """)
+
+add_newdoc("sph_harm",
+    r"""
+    sph_harm(m, n, theta, phi, out=None)
+
+    Compute spherical harmonics.
+
+    The spherical harmonics are defined as
+
+    .. math::
+
+        Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
+          e^{i m \theta} P^m_n(\cos(\phi))
+
+    where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the harmonic (int); must have ``|m| <= n``.
+    n : array_like
+       Degree of the harmonic (int); must have ``n >= 0``. This is
+       often denoted by ``l`` (lower case L) in descriptions of
+       spherical harmonics.
+    theta : array_like
+       Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
+    phi : array_like
+       Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y_mn : complex scalar or ndarray
+       The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
+
+    Notes
+    -----
+    There are different conventions for the meanings of the input
+    arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
+    azimuthal angle and ``phi`` is the polar angle. It is common to
+    see the opposite convention, that is, ``theta`` as the polar angle
+    and ``phi`` as the azimuthal angle.
+
+    Note that SciPy's spherical harmonics include the Condon-Shortley
+    phase [2]_ because it is part of `lpmv`.
+
+    With SciPy's conventions, the first several spherical harmonics
+    are
+
+    .. math::
+
+        Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
+        Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
+                                    e^{-i\theta} \sin(\phi) \\
+        Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
+                                 \cos(\phi) \\
+        Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
+                                 e^{i\theta} \sin(\phi).
+
+    References
+    ----------
+    .. [1] Digital Library of Mathematical Functions, 14.30.
+           https://dlmf.nist.gov/14.30
+    .. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
+    """)
+
+add_newdoc("_ellip_harm",
+    """
+    Internal function, use `ellip_harm` instead.
+    """)
+
+add_newdoc("_ellip_norm",
+    """
+    Internal function, use `ellip_norm` instead.
+    """)
+
+add_newdoc("_lambertw",
+    """
+    Internal function, use `lambertw` instead.
+    """)
+
+add_newdoc("voigt_profile",
+    r"""
+    voigt_profile(x, sigma, gamma, out=None)
+
+    Voigt profile.
+
+    The Voigt profile is a convolution of a 1-D Normal distribution with
+    standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at
+    half-maximum ``gamma``.
+
+    If ``sigma = 0``, PDF of Cauchy distribution is returned.
+    Conversely, if ``gamma = 0``, PDF of Normal distribution is returned.
+    If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``, and ``0`` for all other ``x``.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument
+    sigma : array_like
+        The standard deviation of the Normal distribution part
+    gamma : array_like
+        The half-width at half-maximum of the Cauchy distribution part
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        The Voigt profile at the given arguments
+
+    Notes
+    -----
+    It can be expressed in terms of Faddeeva function
+
+    .. math:: V(x; \sigma, \gamma) = \frac{Re[w(z)]}{\sigma\sqrt{2\pi}},
+    .. math:: z = \frac{x + i\gamma}{\sqrt{2}\sigma}
+
+    where :math:`w(z)` is the Faddeeva function.
+
+    See Also
+    --------
+    wofz : Faddeeva function
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Voigt_profile
+
+    Examples
+    --------
+    Calculate the function at point 2 for ``sigma=1`` and ``gamma=1``.
+
+    >>> from scipy.special import voigt_profile
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> voigt_profile(2, 1., 1.)
+    0.09071519942627544
+
+    Calculate the function at several points by providing a NumPy array
+    for `x`.
+
+    >>> values = np.array([-2., 0., 5])
+    >>> voigt_profile(values, 1., 1.)
+    array([0.0907152 , 0.20870928, 0.01388492])
+
+    Plot the function for different parameter sets.
+
+    >>> fig, ax = plt.subplots(figsize=(8, 8))
+    >>> x = np.linspace(-10, 10, 500)
+    >>> parameters_list = [(1.5, 0., "solid"), (1.3, 0.5, "dashed"),
+    ...                    (0., 1.8, "dotted"), (1., 1., "dashdot")]
+    >>> for params in parameters_list:
+    ...     sigma, gamma, linestyle = params
+    ...     voigt = voigt_profile(x, sigma, gamma)
+    ...     ax.plot(x, voigt, label=rf"$\sigma={sigma},\, \gamma={gamma}$",
+    ...             ls=linestyle)
+    >>> ax.legend()
+    >>> plt.show()
+
+    Verify visually that the Voigt profile indeed arises as the convolution
+    of a normal and a Cauchy distribution.
+
+    >>> from scipy.signal import convolve
+    >>> x, dx = np.linspace(-10, 10, 500, retstep=True)
+    >>> def gaussian(x, sigma):
+    ...     return np.exp(-0.5 * x**2/sigma**2)/(sigma * np.sqrt(2*np.pi))
+    >>> def cauchy(x, gamma):
+    ...     return gamma/(np.pi * (np.square(x)+gamma**2))
+    >>> sigma = 2
+    >>> gamma = 1
+    >>> gauss_profile = gaussian(x, sigma)
+    >>> cauchy_profile = cauchy(x, gamma)
+    >>> convolved = dx * convolve(cauchy_profile, gauss_profile, mode="same")
+    >>> voigt = voigt_profile(x, sigma, gamma)
+    >>> fig, ax = plt.subplots(figsize=(8, 8))
+    >>> ax.plot(x, gauss_profile, label="Gauss: $G$", c='b')
+    >>> ax.plot(x, cauchy_profile, label="Cauchy: $C$", c='y', ls="dashed")
+    >>> xx = 0.5*(x[1:] + x[:-1])  # midpoints
+    >>> ax.plot(xx, convolved[1:], label="Convolution: $G * C$", ls='dashdot',
+    ...         c='k')
+    >>> ax.plot(x, voigt, label="Voigt", ls='dotted', c='r')
+    >>> ax.legend()
+    >>> plt.show()
+    """)
+
+add_newdoc("wrightomega",
+    r"""
+    wrightomega(z, out=None)
+
+    Wright Omega function.
+
+    Defined as the solution to
+
+    .. math::
+
+        \omega + \log(\omega) = z
+
+    where :math:`\log` is the principal branch of the complex logarithm.
+
+    Parameters
+    ----------
+    z : array_like
+        Points at which to evaluate the Wright Omega function
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    omega : scalar or ndarray
+        Values of the Wright Omega function
+
+    Notes
+    -----
+    .. versionadded:: 0.19.0
+
+    The function can also be defined as
+
+    .. math::
+
+        \omega(z) = W_{K(z)}(e^z)
+
+    where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
+    unwinding number and :math:`W` is the Lambert W function.
+
+    The implementation here is taken from [1]_.
+
+    See Also
+    --------
+    lambertw : The Lambert W function
+
+    References
+    ----------
+    .. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
+           Double-Precision Evaluation of the Wright :math:`\omega`
+           Function." ACM Transactions on Mathematical Software,
+           2012. :doi:`10.1145/2168773.2168779`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import wrightomega, lambertw
+
+    >>> wrightomega([-2, -1, 0, 1, 2])
+    array([0.12002824, 0.27846454, 0.56714329, 1.        , 1.5571456 ])
+
+    Complex input:
+
+    >>> wrightomega(3 + 5j)
+    (1.5804428632097158+3.8213626783287937j)
+
+    Verify that ``wrightomega(z)`` satisfies ``w + log(w) = z``:
+
+    >>> w = -5 + 4j
+    >>> wrightomega(w + np.log(w))
+    (-5+4j)
+
+    Verify the connection to ``lambertw``:
+
+    >>> z = 0.5 + 3j
+    >>> wrightomega(z)
+    (0.0966015889280649+1.4937828458191993j)
+    >>> lambertw(np.exp(z))
+    (0.09660158892806493+1.4937828458191993j)
+
+    >>> z = 0.5 + 4j
+    >>> wrightomega(z)
+    (-0.3362123489037213+2.282986001579032j)
+    >>> lambertw(np.exp(z), k=1)
+    (-0.33621234890372115+2.282986001579032j)
+    """)
+
+
+add_newdoc("agm",
+    """
+    agm(a, b, out=None)
+
+    Compute the arithmetic-geometric mean of `a` and `b`.
+
+    Start with a_0 = a and b_0 = b and iteratively compute::
+
+        a_{n+1} = (a_n + b_n)/2
+        b_{n+1} = sqrt(a_n*b_n)
+
+    a_n and b_n converge to the same limit as n increases; their common
+    limit is agm(a, b).
+
+    Parameters
+    ----------
+    a, b : array_like
+        Real values only. If the values are both negative, the result
+        is negative. If one value is negative and the other is positive,
+        `nan` is returned.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        The arithmetic-geometric mean of `a` and `b`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import agm
+    >>> a, b = 24.0, 6.0
+    >>> agm(a, b)
+    13.458171481725614
+
+    Compare that result to the iteration:
+
+    >>> while a != b:
+    ...     a, b = (a + b)/2, np.sqrt(a*b)
+    ...     print("a = %19.16f  b=%19.16f" % (a, b))
+    ...
+    a = 15.0000000000000000  b=12.0000000000000000
+    a = 13.5000000000000000  b=13.4164078649987388
+    a = 13.4582039324993694  b=13.4581390309909850
+    a = 13.4581714817451772  b=13.4581714817060547
+    a = 13.4581714817256159  b=13.4581714817256159
+
+    When array-like arguments are given, broadcasting applies:
+
+    >>> a = np.array([[1.5], [3], [6]])  # a has shape (3, 1).
+    >>> b = np.array([6, 12, 24, 48])    # b has shape (4,).
+    >>> agm(a, b)
+    array([[  3.36454287,   5.42363427,   9.05798751,  15.53650756],
+           [  4.37037309,   6.72908574,  10.84726853,  18.11597502],
+           [  6.        ,   8.74074619,  13.45817148,  21.69453707]])
+    """)
+
+add_newdoc("airy",
+    r"""
+    airy(z, out=None)
+
+    Airy functions and their derivatives.
+
+    Parameters
+    ----------
+    z : array_like
+        Real or complex argument.
+    out : tuple of ndarray, optional
+        Optional output arrays for the function values
+
+    Returns
+    -------
+    Ai, Aip, Bi, Bip : 4-tuple of scalar or ndarray
+        Airy functions Ai and Bi, and their derivatives Aip and Bip.
+
+    Notes
+    -----
+    The Airy functions Ai and Bi are two independent solutions of
+
+    .. math:: y''(x) = x y(x).
+
+    For real `z` in [-10, 10], the computation is carried out by calling
+    the Cephes [1]_ `airy` routine, which uses power series summation
+    for small `z` and rational minimax approximations for large `z`.
+
+    Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
+    employed.  They are computed using power series for :math:`|z| < 1` and
+    the following relations to modified Bessel functions for larger `z`
+    (where :math:`t \equiv 2 z^{3/2}/3`):
+
+    .. math::
+
+        Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
+
+        Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
+
+        Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
+
+        Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
+
+    See also
+    --------
+    airye : exponentially scaled Airy functions.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    Compute the Airy functions on the interval [-15, 5].
+
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> x = np.linspace(-15, 5, 201)
+    >>> ai, aip, bi, bip = special.airy(x)
+
+    Plot Ai(x) and Bi(x).
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(x, ai, 'r', label='Ai(x)')
+    >>> plt.plot(x, bi, 'b--', label='Bi(x)')
+    >>> plt.ylim(-0.5, 1.0)
+    >>> plt.grid()
+    >>> plt.legend(loc='upper left')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("airye",
+    """
+    airye(z, out=None)
+
+    Exponentially scaled Airy functions and their derivatives.
+
+    Scaling::
+
+        eAi  = Ai  * exp(2.0/3.0*z*sqrt(z))
+        eAip = Aip * exp(2.0/3.0*z*sqrt(z))
+        eBi  = Bi  * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
+        eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
+
+    Parameters
+    ----------
+    z : array_like
+        Real or complex argument.
+    out : tuple of ndarray, optional
+        Optional output arrays for the function values
+
+    Returns
+    -------
+    eAi, eAip, eBi, eBip : 4-tuple of scalar or ndarray
+        Exponentially scaled Airy functions eAi and eBi, and their derivatives
+        eAip and eBip
+
+    Notes
+    -----
+    Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
+
+    See also
+    --------
+    airy
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    We can compute exponentially scaled Airy functions and their derivatives:
+
+    >>> import numpy as np
+    >>> from scipy.special import airye
+    >>> import matplotlib.pyplot as plt
+    >>> z = np.linspace(0, 50, 500)
+    >>> eAi, eAip, eBi, eBip = airye(z)
+    >>> f, ax = plt.subplots(2, 1, sharex=True)
+    >>> for ind, data in enumerate([[eAi, eAip, ["eAi", "eAip"]],
+    ...                             [eBi, eBip, ["eBi", "eBip"]]]):
+    ...     ax[ind].plot(z, data[0], "-r", z, data[1], "-b")
+    ...     ax[ind].legend(data[2])
+    ...     ax[ind].grid(True)
+    >>> plt.show()
+
+    We can compute these using usual non-scaled Airy functions by:
+
+    >>> from scipy.special import airy
+    >>> Ai, Aip, Bi, Bip = airy(z)
+    >>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
+    True
+    >>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
+    True
+    >>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
+    True
+    >>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
+    True
+
+    Comparing non-scaled and exponentially scaled ones, the usual non-scaled
+    function quickly underflows for large values, whereas the exponentially
+    scaled function does not.
+
+    >>> airy(200)
+    (0.0, 0.0, nan, nan)
+    >>> airye(200)
+    (0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093)
+
+    """)
+
+add_newdoc("bdtr",
+    r"""
+    bdtr(k, n, p, out=None)
+
+    Binomial distribution cumulative distribution function.
+
+    Sum of the terms 0 through `floor(k)` of the Binomial probability density.
+
+    .. math::
+        \mathrm{bdtr}(k, n, p) = \sum_{j=0}^{\lfloor k \rfloor} {{n}\choose{j}} p^j (1-p)^{n-j}
+
+    Parameters
+    ----------
+    k : array_like
+        Number of successes (double), rounded down to the nearest integer.
+    n : array_like
+        Number of events (int).
+    p : array_like
+        Probability of success in a single event (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Probability of `floor(k)` or fewer successes in `n` independent events with
+        success probabilities of `p`.
+
+    Notes
+    -----
+    The terms are not summed directly; instead the regularized incomplete beta
+    function is employed, according to the formula,
+
+    .. math::
+        \mathrm{bdtr}(k, n, p) = I_{1 - p}(n - \lfloor k \rfloor, \lfloor k \rfloor + 1).
+
+    Wrapper for the Cephes [1]_ routine `bdtr`.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    """)
+
+add_newdoc("bdtrc",
+    r"""
+    bdtrc(k, n, p, out=None)
+
+    Binomial distribution survival function.
+
+    Sum of the terms `floor(k) + 1` through `n` of the binomial probability
+    density,
+
+    .. math::
+        \mathrm{bdtrc}(k, n, p) = \sum_{j=\lfloor k \rfloor +1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
+
+    Parameters
+    ----------
+    k : array_like
+        Number of successes (double), rounded down to nearest integer.
+    n : array_like
+        Number of events (int)
+    p : array_like
+        Probability of success in a single event.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Probability of `floor(k) + 1` or more successes in `n` independent
+        events with success probabilities of `p`.
+
+    See also
+    --------
+    bdtr
+    betainc
+
+    Notes
+    -----
+    The terms are not summed directly; instead the regularized incomplete beta
+    function is employed, according to the formula,
+
+    .. math::
+        \mathrm{bdtrc}(k, n, p) = I_{p}(\lfloor k \rfloor + 1, n - \lfloor k \rfloor).
+
+    Wrapper for the Cephes [1]_ routine `bdtrc`.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    """)
+
+add_newdoc("bdtri",
+    r"""
+    bdtri(k, n, y, out=None)
+
+    Inverse function to `bdtr` with respect to `p`.
+
+    Finds the event probability `p` such that the sum of the terms 0 through
+    `k` of the binomial probability density is equal to the given cumulative
+    probability `y`.
+
+    Parameters
+    ----------
+    k : array_like
+        Number of successes (float), rounded down to the nearest integer.
+    n : array_like
+        Number of events (float)
+    y : array_like
+        Cumulative probability (probability of `k` or fewer successes in `n`
+        events).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    p : scalar or ndarray
+        The event probability such that `bdtr(\lfloor k \rfloor, n, p) = y`.
+
+    See also
+    --------
+    bdtr
+    betaincinv
+
+    Notes
+    -----
+    The computation is carried out using the inverse beta integral function
+    and the relation,::
+
+        1 - p = betaincinv(n - k, k + 1, y).
+
+    Wrapper for the Cephes [1]_ routine `bdtri`.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    """)
+
+add_newdoc("bdtrik",
+    """
+    bdtrik(y, n, p, out=None)
+
+    Inverse function to `bdtr` with respect to `k`.
+
+    Finds the number of successes `k` such that the sum of the terms 0 through
+    `k` of the Binomial probability density for `n` events with probability
+    `p` is equal to the given cumulative probability `y`.
+
+    Parameters
+    ----------
+    y : array_like
+        Cumulative probability (probability of `k` or fewer successes in `n`
+        events).
+    n : array_like
+        Number of events (float).
+    p : array_like
+        Success probability (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    k : scalar or ndarray
+        The number of successes `k` such that `bdtr(k, n, p) = y`.
+
+    See also
+    --------
+    bdtr
+
+    Notes
+    -----
+    Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
+    cumulative incomplete beta distribution.
+
+    Computation of `k` involves a search for a value that produces the desired
+    value of `y`. The search relies on the monotonicity of `y` with `k`.
+
+    Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
+
+    References
+    ----------
+    .. [1] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+    .. [2] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+
+    """)
+
+add_newdoc("bdtrin",
+    """
+    bdtrin(k, y, p, out=None)
+
+    Inverse function to `bdtr` with respect to `n`.
+
+    Finds the number of events `n` such that the sum of the terms 0 through
+    `k` of the Binomial probability density for events with probability `p` is
+    equal to the given cumulative probability `y`.
+
+    Parameters
+    ----------
+    k : array_like
+        Number of successes (float).
+    y : array_like
+        Cumulative probability (probability of `k` or fewer successes in `n`
+        events).
+    p : array_like
+        Success probability (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    n : scalar or ndarray
+        The number of events `n` such that `bdtr(k, n, p) = y`.
+
+    See also
+    --------
+    bdtr
+
+    Notes
+    -----
+    Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
+    cumulative incomplete beta distribution.
+
+    Computation of `n` involves a search for a value that produces the desired
+    value of `y`. The search relies on the monotonicity of `y` with `n`.
+
+    Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
+
+    References
+    ----------
+    .. [1] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+    .. [2] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    """)
+
+add_newdoc(
+    "binom",
+    r"""
+    binom(x, y, out=None)
+
+    Binomial coefficient considered as a function of two real variables.
+
+    For real arguments, the binomial coefficient is defined as
+
+    .. math::
+
+        \binom{x}{y} = \frac{\Gamma(x + 1)}{\Gamma(y + 1)\Gamma(x - y + 1)} =
+            \frac{1}{(x + 1)\mathrm{B}(x - y + 1, y + 1)}
+
+    Where :math:`\Gamma` is the Gamma function (`gamma`) and :math:`\mathrm{B}`
+    is the Beta function (`beta`) [1]_.
+
+    Parameters
+    ----------
+    x, y: array_like
+       Real arguments to :math:`\binom{x}{y}`.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of binomial coefficient.
+
+    See Also
+    --------
+    comb : The number of combinations of N things taken k at a time.
+
+    Notes
+    -----
+    The Gamma function has poles at non-positive integers and tends to either
+    positive or negative infinity depending on the direction on the real line
+    from which a pole is approached. When considered as a function of two real
+    variables, :math:`\binom{x}{y}` is thus undefined when `x` is a negative
+    integer.  `binom` returns ``nan`` when ``x`` is a negative integer. This
+    is the case even when ``x`` is a negative integer and ``y`` an integer,
+    contrary to the usual convention for defining :math:`\binom{n}{k}` when it
+    is considered as a function of two integer variables.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Binomial_coefficient
+
+    Examples
+    --------
+    The following examples illustrate the ways in which `binom` differs from
+    the function `comb`.
+
+    >>> from scipy.special import binom, comb
+
+    When ``exact=False`` and ``x`` and ``y`` are both positive, `comb` calls
+    `binom` internally.
+
+    >>> x, y = 3, 2
+    >>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
+    (3.0, 3.0, 3)
+
+    For larger values, `comb` with ``exact=True`` no longer agrees
+    with `binom`.
+
+    >>> x, y = 43, 23
+    >>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
+    (960566918219.9999, 960566918219.9999, 960566918220)
+
+    `binom` returns ``nan`` when ``x`` is a negative integer, but is otherwise
+    defined for negative arguments. `comb` returns 0 whenever one of ``x`` or
+    ``y`` is negative or ``x`` is less than ``y``.
+
+    >>> x, y = -3, 2
+    >>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
+    (nan, 0.0, 0)
+
+    >>> x, y = -3.1, 2.2
+    >>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
+    (18.714147876804432, 0.0, 0)
+
+    >>> x, y = 2.2, 3.1
+    >>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
+    (0.037399983365134115, 0.0, 0)
+    """
+)
+
+add_newdoc("btdtria",
+    r"""
+    btdtria(p, b, x, out=None)
+
+    Inverse of `btdtr` with respect to `a`.
+
+    This is the inverse of the beta cumulative distribution function, `btdtr`,
+    considered as a function of `a`, returning the value of `a` for which
+    `btdtr(a, b, x) = p`, or
+
+    .. math::
+        p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
+
+    Parameters
+    ----------
+    p : array_like
+        Cumulative probability, in [0, 1].
+    b : array_like
+        Shape parameter (`b` > 0).
+    x : array_like
+        The quantile, in [0, 1].
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    a : scalar or ndarray
+        The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
+
+    See Also
+    --------
+    btdtr : Cumulative distribution function of the beta distribution.
+    btdtri : Inverse with respect to `x`.
+    btdtrib : Inverse with respect to `b`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
+
+    The cumulative distribution function `p` is computed using a routine by
+    DiDinato and Morris [2]_. Computation of `a` involves a search for a value
+    that produces the desired value of `p`. The search relies on the
+    monotonicity of `p` with `a`.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] DiDinato, A. R. and Morris, A. H.,
+           Algorithm 708: Significant Digit Computation of the Incomplete Beta
+           Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
+
+    """)
+
+add_newdoc("btdtrib",
+    r"""
+    btdtria(a, p, x, out=None)
+
+    Inverse of `btdtr` with respect to `b`.
+
+    This is the inverse of the beta cumulative distribution function, `btdtr`,
+    considered as a function of `b`, returning the value of `b` for which
+    `btdtr(a, b, x) = p`, or
+
+    .. math::
+        p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
+
+    Parameters
+    ----------
+    a : array_like
+        Shape parameter (`a` > 0).
+    p : array_like
+        Cumulative probability, in [0, 1].
+    x : array_like
+        The quantile, in [0, 1].
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    b : scalar or ndarray
+        The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
+
+    See Also
+    --------
+    btdtr : Cumulative distribution function of the beta distribution.
+    btdtri : Inverse with respect to `x`.
+    btdtria : Inverse with respect to `a`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
+
+    The cumulative distribution function `p` is computed using a routine by
+    DiDinato and Morris [2]_. Computation of `b` involves a search for a value
+    that produces the desired value of `p`. The search relies on the
+    monotonicity of `p` with `b`.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] DiDinato, A. R. and Morris, A. H.,
+           Algorithm 708: Significant Digit Computation of the Incomplete Beta
+           Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
+
+
+    """)
+
+add_newdoc("bei",
+    r"""
+    bei(x, out=None)
+
+    Kelvin function bei.
+
+    Defined as
+
+    .. math::
+
+        \mathrm{bei}(x) = \Im[J_0(x e^{3 \pi i / 4})]
+
+    where :math:`J_0` is the Bessel function of the first kind of
+    order zero (see `jv`). See [dlmf]_ for more details.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Kelvin function.
+
+    See Also
+    --------
+    ber : the corresponding real part
+    beip : the derivative of bei
+    jv : Bessel function of the first kind
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10.61
+
+    Examples
+    --------
+    It can be expressed using Bessel functions.
+
+    >>> import numpy as np
+    >>> import scipy.special as sc
+    >>> x = np.array([1.0, 2.0, 3.0, 4.0])
+    >>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).imag
+    array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
+    >>> sc.bei(x)
+    array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
+
+    """)
+
+add_newdoc("beip",
+    r"""
+    beip(x, out=None)
+
+    Derivative of the Kelvin function bei.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        The values of the derivative of bei.
+
+    See Also
+    --------
+    bei
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10#PT5
+
+    """)
+
+add_newdoc("ber",
+    r"""
+    ber(x, out=None)
+
+    Kelvin function ber.
+
+    Defined as
+
+    .. math::
+
+        \mathrm{ber}(x) = \Re[J_0(x e^{3 \pi i / 4})]
+
+    where :math:`J_0` is the Bessel function of the first kind of
+    order zero (see `jv`). See [dlmf]_ for more details.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Kelvin function.
+
+    See Also
+    --------
+    bei : the corresponding real part
+    berp : the derivative of bei
+    jv : Bessel function of the first kind
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10.61
+
+    Examples
+    --------
+    It can be expressed using Bessel functions.
+
+    >>> import numpy as np
+    >>> import scipy.special as sc
+    >>> x = np.array([1.0, 2.0, 3.0, 4.0])
+    >>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).real
+    array([ 0.98438178,  0.75173418, -0.22138025, -2.56341656])
+    >>> sc.ber(x)
+    array([ 0.98438178,  0.75173418, -0.22138025, -2.56341656])
+
+    """)
+
+add_newdoc("berp",
+    r"""
+    berp(x, out=None)
+
+    Derivative of the Kelvin function ber.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        The values of the derivative of ber.
+
+    See Also
+    --------
+    ber
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10#PT5
+
+    """)
+
+add_newdoc("besselpoly",
+    r"""
+    besselpoly(a, lmb, nu, out=None)
+
+    Weighted integral of the Bessel function of the first kind.
+
+    Computes
+
+    .. math::
+
+       \int_0^1 x^\lambda J_\nu(2 a x) \, dx
+
+    where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
+    :math:`\nu=nu`.
+
+    Parameters
+    ----------
+    a : array_like
+        Scale factor inside the Bessel function.
+    lmb : array_like
+        Power of `x`
+    nu : array_like
+        Order of the Bessel function.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the integral.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Evaluate the function for one parameter set.
+
+    >>> from scipy.special import besselpoly
+    >>> besselpoly(1, 1, 1)
+    0.24449718372863877
+
+    Evaluate the function for different scale factors.
+
+    >>> import numpy as np
+    >>> factors = np.array([0., 3., 6.])
+    >>> besselpoly(factors, 1, 1)
+    array([ 0.        , -0.00549029,  0.00140174])
+
+    Plot the function for varying powers, orders and scales.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> powers = np.linspace(0, 10, 100)
+    >>> orders = [1, 2, 3]
+    >>> scales = [1, 2]
+    >>> all_combinations = [(order, scale) for order in orders
+    ...                     for scale in scales]
+    >>> for order, scale in all_combinations:
+    ...     ax.plot(powers, besselpoly(scale, powers, order),
+    ...             label=rf"$\nu={order}, a={scale}$")
+    >>> ax.legend()
+    >>> ax.set_xlabel(r"$\lambda$")
+    >>> ax.set_ylabel(r"$\int_0^1 x^{\lambda} J_{\nu}(2ax)\,dx$")
+    >>> plt.show()
+    """)
+
+add_newdoc("beta",
+    r"""
+    beta(a, b, out=None)
+
+    Beta function.
+
+    This function is defined in [1]_ as
+
+    .. math::
+
+        B(a, b) = \int_0^1 t^{a-1}(1-t)^{b-1}dt
+                = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)},
+
+    where :math:`\Gamma` is the gamma function.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Real-valued arguments
+    out : ndarray, optional
+        Optional output array for the function result
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the beta function
+
+    See Also
+    --------
+    gamma : the gamma function
+    betainc :  the regularized incomplete beta function
+    betaln : the natural logarithm of the absolute
+             value of the beta function
+
+    References
+    ----------
+    .. [1] NIST Digital Library of Mathematical Functions,
+           Eq. 5.12.1. https://dlmf.nist.gov/5.12
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    The beta function relates to the gamma function by the
+    definition given above:
+
+    >>> sc.beta(2, 3)
+    0.08333333333333333
+    >>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)
+    0.08333333333333333
+
+    As this relationship demonstrates, the beta function
+    is symmetric:
+
+    >>> sc.beta(1.7, 2.4)
+    0.16567527689031739
+    >>> sc.beta(2.4, 1.7)
+    0.16567527689031739
+
+    This function satisfies :math:`B(1, b) = 1/b`:
+
+    >>> sc.beta(1, 4)
+    0.25
+
+    """)
+
+add_newdoc("betainc",
+    r"""
+    betainc(a, b, x, out=None)
+
+    Regularized incomplete beta function.
+
+    Computes the regularized incomplete beta function, defined as [1]_:
+
+    .. math::
+
+        I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x
+        t^{a-1}(1-t)^{b-1}dt,
+
+    for :math:`0 \leq x \leq 1`.
+
+    Parameters
+    ----------
+    a, b : array_like
+           Positive, real-valued parameters
+    x : array_like
+        Real-valued such that :math:`0 \leq x \leq 1`,
+        the upper limit of integration
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the regularized incomplete beta function
+
+    See Also
+    --------
+    beta : beta function
+    betaincinv : inverse of the regularized incomplete beta function
+
+    Notes
+    -----
+    The term *regularized* in the name of this function refers to the
+    scaling of the function by the gamma function terms shown in the
+    formula.  When not qualified as *regularized*, the name *incomplete
+    beta function* often refers to just the integral expression,
+    without the gamma terms.  One can use the function `beta` from
+    `scipy.special` to get this "nonregularized" incomplete beta
+    function by multiplying the result of ``betainc(a, b, x)`` by
+    ``beta(a, b)``.
+
+    References
+    ----------
+    .. [1] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/8.17
+
+    Examples
+    --------
+
+    Let :math:`B(a, b)` be the `beta` function.
+
+    >>> import scipy.special as sc
+
+    The coefficient in terms of `gamma` is equal to
+    :math:`1/B(a, b)`. Also, when :math:`x=1`
+    the integral is equal to :math:`B(a, b)`.
+    Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.
+
+    >>> sc.betainc(0.2, 3.5, 1.0)
+    1.0
+
+    It satisfies
+    :math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,
+    where :math:`F` is the hypergeometric function `hyp2f1`:
+
+    >>> a, b, x = 1.4, 3.1, 0.5
+    >>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))
+    0.8148904036225295
+    >>> sc.betainc(a, b, x)
+    0.8148904036225296
+
+    This functions satisfies the relationship
+    :math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:
+
+    >>> sc.betainc(2.2, 3.1, 0.4)
+    0.49339638807619446
+    >>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)
+    0.49339638807619446
+
+    """)
+
+add_newdoc("betaincinv",
+    r"""
+    betaincinv(a, b, y, out=None)
+
+    Inverse of the regularized incomplete beta function.
+
+    Computes :math:`x` such that:
+
+    .. math::
+
+        y = I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}
+        \int_0^x t^{a-1}(1-t)^{b-1}dt,
+
+    where :math:`I_x` is the normalized incomplete beta
+    function `betainc` and
+    :math:`\Gamma` is the `gamma` function [1]_.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Positive, real-valued parameters
+    y : array_like
+        Real-valued input
+    out : ndarray, optional
+        Optional output array for function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the inverse of the regularized incomplete beta function
+
+    See Also
+    --------
+    betainc : regularized incomplete beta function
+    gamma : gamma function
+
+    References
+    ----------
+    .. [1] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/8.17
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    This function is the inverse of `betainc` for fixed
+    values of :math:`a` and :math:`b`.
+
+    >>> a, b = 1.2, 3.1
+    >>> y = sc.betainc(a, b, 0.2)
+    >>> sc.betaincinv(a, b, y)
+    0.2
+    >>>
+    >>> a, b = 7.5, 0.4
+    >>> x = sc.betaincinv(a, b, 0.5)
+    >>> sc.betainc(a, b, x)
+    0.5
+
+    """)
+
+add_newdoc("betaln",
+    """
+    betaln(a, b, out=None)
+
+    Natural logarithm of absolute value of beta function.
+
+    Computes ``ln(abs(beta(a, b)))``.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Positive, real-valued parameters
+    out : ndarray, optional
+        Optional output array for function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the betaln function
+
+    See Also
+    --------
+    gamma : the gamma function
+    betainc :  the regularized incomplete beta function
+    beta : the beta function
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import betaln, beta
+
+    Verify that, for moderate values of ``a`` and ``b``, ``betaln(a, b)``
+    is the same as ``log(beta(a, b))``:
+
+    >>> betaln(3, 4)
+    -4.0943445622221
+
+    >>> np.log(beta(3, 4))
+    -4.0943445622221
+
+    In the following ``beta(a, b)`` underflows to 0, so we can't compute
+    the logarithm of the actual value.
+
+    >>> a = 400
+    >>> b = 900
+    >>> beta(a, b)
+    0.0
+
+    We can compute the logarithm of ``beta(a, b)`` by using `betaln`:
+
+    >>> betaln(a, b)
+    -804.3069951764146
+
+    """)
+
+add_newdoc("boxcox",
+    """
+    boxcox(x, lmbda, out=None)
+
+    Compute the Box-Cox transformation.
+
+    The Box-Cox transformation is::
+
+        y = (x**lmbda - 1) / lmbda  if lmbda != 0
+            log(x)                  if lmbda == 0
+
+    Returns `nan` if ``x < 0``.
+    Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
+
+    Parameters
+    ----------
+    x : array_like
+        Data to be transformed.
+    lmbda : array_like
+        Power parameter of the Box-Cox transform.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Transformed data.
+
+    Notes
+    -----
+
+    .. versionadded:: 0.14.0
+
+    Examples
+    --------
+    >>> from scipy.special import boxcox
+    >>> boxcox([1, 4, 10], 2.5)
+    array([   0.        ,   12.4       ,  126.09110641])
+    >>> boxcox(2, [0, 1, 2])
+    array([ 0.69314718,  1.        ,  1.5       ])
+    """)
+
+add_newdoc("boxcox1p",
+    """
+    boxcox1p(x, lmbda, out=None)
+
+    Compute the Box-Cox transformation of 1 + `x`.
+
+    The Box-Cox transformation computed by `boxcox1p` is::
+
+        y = ((1+x)**lmbda - 1) / lmbda  if lmbda != 0
+            log(1+x)                    if lmbda == 0
+
+    Returns `nan` if ``x < -1``.
+    Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
+
+    Parameters
+    ----------
+    x : array_like
+        Data to be transformed.
+    lmbda : array_like
+        Power parameter of the Box-Cox transform.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Transformed data.
+
+    Notes
+    -----
+
+    .. versionadded:: 0.14.0
+
+    Examples
+    --------
+    >>> from scipy.special import boxcox1p
+    >>> boxcox1p(1e-4, [0, 0.5, 1])
+    array([  9.99950003e-05,   9.99975001e-05,   1.00000000e-04])
+    >>> boxcox1p([0.01, 0.1], 0.25)
+    array([ 0.00996272,  0.09645476])
+    """)
+
+add_newdoc("inv_boxcox",
+    """
+    inv_boxcox(y, lmbda, out=None)
+
+    Compute the inverse of the Box-Cox transformation.
+
+    Find ``x`` such that::
+
+        y = (x**lmbda - 1) / lmbda  if lmbda != 0
+            log(x)                  if lmbda == 0
+
+    Parameters
+    ----------
+    y : array_like
+        Data to be transformed.
+    lmbda : array_like
+        Power parameter of the Box-Cox transform.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    x : scalar or ndarray
+        Transformed data.
+
+    Notes
+    -----
+
+    .. versionadded:: 0.16.0
+
+    Examples
+    --------
+    >>> from scipy.special import boxcox, inv_boxcox
+    >>> y = boxcox([1, 4, 10], 2.5)
+    >>> inv_boxcox(y, 2.5)
+    array([1., 4., 10.])
+    """)
+
+add_newdoc("inv_boxcox1p",
+    """
+    inv_boxcox1p(y, lmbda, out=None)
+
+    Compute the inverse of the Box-Cox transformation.
+
+    Find ``x`` such that::
+
+        y = ((1+x)**lmbda - 1) / lmbda  if lmbda != 0
+            log(1+x)                    if lmbda == 0
+
+    Parameters
+    ----------
+    y : array_like
+        Data to be transformed.
+    lmbda : array_like
+        Power parameter of the Box-Cox transform.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    x : scalar or ndarray
+        Transformed data.
+
+    Notes
+    -----
+
+    .. versionadded:: 0.16.0
+
+    Examples
+    --------
+    >>> from scipy.special import boxcox1p, inv_boxcox1p
+    >>> y = boxcox1p([1, 4, 10], 2.5)
+    >>> inv_boxcox1p(y, 2.5)
+    array([1., 4., 10.])
+    """)
+
+add_newdoc("btdtr",
+    r"""
+    btdtr(a, b, x, out=None)
+
+    Cumulative distribution function of the beta distribution.
+
+    Returns the integral from zero to `x` of the beta probability density
+    function,
+
+    .. math::
+        I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
+
+    where :math:`\Gamma` is the gamma function.
+
+    Parameters
+    ----------
+    a : array_like
+        Shape parameter (a > 0).
+    b : array_like
+        Shape parameter (b > 0).
+    x : array_like
+        Upper limit of integration, in [0, 1].
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        Cumulative distribution function of the beta distribution with
+        parameters `a` and `b` at `x`.
+
+    See Also
+    --------
+    betainc
+
+    Notes
+    -----
+    This function is identical to the incomplete beta integral function
+    `betainc`.
+
+    Wrapper for the Cephes [1]_ routine `btdtr`.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    """)
+
+add_newdoc("btdtri",
+    r"""
+    btdtri(a, b, p, out=None)
+
+    The `p`-th quantile of the beta distribution.
+
+    This function is the inverse of the beta cumulative distribution function,
+    `btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
+
+    .. math::
+        p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
+
+    Parameters
+    ----------
+    a : array_like
+        Shape parameter (`a` > 0).
+    b : array_like
+        Shape parameter (`b` > 0).
+    p : array_like
+        Cumulative probability, in [0, 1].
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    x : scalar or ndarray
+        The quantile corresponding to `p`.
+
+    See Also
+    --------
+    betaincinv
+    btdtr
+
+    Notes
+    -----
+    The value of `x` is found by interval halving or Newton iterations.
+
+    Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
+    problem of finding the inverse of the incomplete beta integral.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    """)
+
+add_newdoc("cbrt",
+    """
+    cbrt(x, out=None)
+
+    Element-wise cube root of `x`.
+
+    Parameters
+    ----------
+    x : array_like
+        `x` must contain real numbers.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        The cube root of each value in `x`.
+
+    Examples
+    --------
+    >>> from scipy.special import cbrt
+
+    >>> cbrt(8)
+    2.0
+    >>> cbrt([-8, -3, 0.125, 1.331])
+    array([-2.        , -1.44224957,  0.5       ,  1.1       ])
+
+    """)
+
+add_newdoc("chdtr",
+    r"""
+    chdtr(v, x, out=None)
+
+    Chi square cumulative distribution function.
+
+    Returns the area under the left tail (from 0 to `x`) of the Chi
+    square probability density function with `v` degrees of freedom:
+
+    .. math::
+
+        \frac{1}{2^{v/2} \Gamma(v/2)} \int_0^x t^{v/2 - 1} e^{-t/2} dt
+
+    Here :math:`\Gamma` is the Gamma function; see `gamma`. This
+    integral can be expressed in terms of the regularized lower
+    incomplete gamma function `gammainc` as
+    ``gammainc(v / 2, x / 2)``. [1]_
+
+    Parameters
+    ----------
+    v : array_like
+        Degrees of freedom.
+    x : array_like
+        Upper bound of the integral.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the cumulative distribution function.
+
+    See Also
+    --------
+    chdtrc, chdtri, chdtriv, gammainc
+
+    References
+    ----------
+    .. [1] Chi-Square distribution,
+        https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It can be expressed in terms of the regularized lower incomplete
+    gamma function.
+
+    >>> v = 1
+    >>> x = np.arange(4)
+    >>> sc.chdtr(v, x)
+    array([0.        , 0.68268949, 0.84270079, 0.91673548])
+    >>> sc.gammainc(v / 2, x / 2)
+    array([0.        , 0.68268949, 0.84270079, 0.91673548])
+
+    """)
+
+add_newdoc("chdtrc",
+    r"""
+    chdtrc(v, x, out=None)
+
+    Chi square survival function.
+
+    Returns the area under the right hand tail (from `x` to infinity)
+    of the Chi square probability density function with `v` degrees of
+    freedom:
+
+    .. math::
+
+        \frac{1}{2^{v/2} \Gamma(v/2)} \int_x^\infty t^{v/2 - 1} e^{-t/2} dt
+
+    Here :math:`\Gamma` is the Gamma function; see `gamma`. This
+    integral can be expressed in terms of the regularized upper
+    incomplete gamma function `gammaincc` as
+    ``gammaincc(v / 2, x / 2)``. [1]_
+
+    Parameters
+    ----------
+    v : array_like
+        Degrees of freedom.
+    x : array_like
+        Lower bound of the integral.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the survival function.
+
+    See Also
+    --------
+    chdtr, chdtri, chdtriv, gammaincc
+
+    References
+    ----------
+    .. [1] Chi-Square distribution,
+        https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It can be expressed in terms of the regularized upper incomplete
+    gamma function.
+
+    >>> v = 1
+    >>> x = np.arange(4)
+    >>> sc.chdtrc(v, x)
+    array([1.        , 0.31731051, 0.15729921, 0.08326452])
+    >>> sc.gammaincc(v / 2, x / 2)
+    array([1.        , 0.31731051, 0.15729921, 0.08326452])
+
+    """)
+
+add_newdoc("chdtri",
+    """
+    chdtri(v, p, out=None)
+
+    Inverse to `chdtrc` with respect to `x`.
+
+    Returns `x` such that ``chdtrc(v, x) == p``.
+
+    Parameters
+    ----------
+    v : array_like
+        Degrees of freedom.
+    p : array_like
+        Probability.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    x : scalar or ndarray
+        Value so that the probability a Chi square random variable
+        with `v` degrees of freedom is greater than `x` equals `p`.
+
+    See Also
+    --------
+    chdtrc, chdtr, chdtriv
+
+    References
+    ----------
+    .. [1] Chi-Square distribution,
+        https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It inverts `chdtrc`.
+
+    >>> v, p = 1, 0.3
+    >>> sc.chdtrc(v, sc.chdtri(v, p))
+    0.3
+    >>> x = 1
+    >>> sc.chdtri(v, sc.chdtrc(v, x))
+    1.0
+
+    """)
+
+add_newdoc("chdtriv",
+    """
+    chdtriv(p, x, out=None)
+
+    Inverse to `chdtr` with respect to `v`.
+
+    Returns `v` such that ``chdtr(v, x) == p``.
+
+    Parameters
+    ----------
+    p : array_like
+        Probability that the Chi square random variable is less than
+        or equal to `x`.
+    x : array_like
+        Nonnegative input.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Degrees of freedom.
+
+    See Also
+    --------
+    chdtr, chdtrc, chdtri
+
+    References
+    ----------
+    .. [1] Chi-Square distribution,
+        https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It inverts `chdtr`.
+
+    >>> p, x = 0.5, 1
+    >>> sc.chdtr(sc.chdtriv(p, x), x)
+    0.5000000000202172
+    >>> v = 1
+    >>> sc.chdtriv(sc.chdtr(v, x), v)
+    1.0000000000000013
+
+    """)
+
+add_newdoc("chndtr",
+    r"""
+    chndtr(x, df, nc, out=None)
+
+    Non-central chi square cumulative distribution function
+
+    The cumulative distribution function is given by:
+
+    .. math::
+
+        P(\chi^{\prime 2} \vert \nu, \lambda) =\sum_{j=0}^{\infty}
+        e^{-\lambda /2}
+        \frac{(\lambda /2)^j}{j!} P(\chi^{\prime 2} \vert \nu + 2j),
+
+    where :math:`\nu > 0` is the degrees of freedom (``df``) and
+    :math:`\lambda \geq 0` is the non-centrality parameter (``nc``).
+
+    Parameters
+    ----------
+    x : array_like
+        Upper bound of the integral; must satisfy ``x >= 0``
+    df : array_like
+        Degrees of freedom; must satisfy ``df > 0``
+    nc : array_like
+        Non-centrality parameter; must satisfy ``nc >= 0``
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    x : scalar or ndarray
+        Value of the non-central chi square cumulative distribution function.
+
+    See Also
+    --------
+    chndtrix, chndtridf, chndtrinc
+
+    """)
+
+add_newdoc("chndtrix",
+    """
+    chndtrix(p, df, nc, out=None)
+
+    Inverse to `chndtr` vs `x`
+
+    Calculated using a search to find a value for `x` that produces the
+    desired value of `p`.
+
+    Parameters
+    ----------
+    p : array_like
+        Probability; must satisfy ``0 <= p < 1``
+    df : array_like
+        Degrees of freedom; must satisfy ``df > 0``
+    nc : array_like
+        Non-centrality parameter; must satisfy ``nc >= 0``
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    x : scalar or ndarray
+        Value so that the probability a non-central Chi square random variable
+        with `df` degrees of freedom and non-centrality, `nc`, is greater than
+        `x` equals `p`.
+
+    See Also
+    --------
+    chndtr, chndtridf, chndtrinc
+
+    """)
+
+add_newdoc("chndtridf",
+    """
+    chndtridf(x, p, nc, out=None)
+
+    Inverse to `chndtr` vs `df`
+
+    Calculated using a search to find a value for `df` that produces the
+    desired value of `p`.
+
+    Parameters
+    ----------
+    x : array_like
+        Upper bound of the integral; must satisfy ``x >= 0``
+    p : array_like
+        Probability; must satisfy ``0 <= p < 1``
+    nc : array_like
+        Non-centrality parameter; must satisfy ``nc >= 0``
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    df : scalar or ndarray
+        Degrees of freedom
+
+    See Also
+    --------
+    chndtr, chndtrix, chndtrinc
+
+    """)
+
+add_newdoc("chndtrinc",
+    """
+    chndtrinc(x, df, p, out=None)
+
+    Inverse to `chndtr` vs `nc`
+
+    Calculated using a search to find a value for `df` that produces the
+    desired value of `p`.
+
+    Parameters
+    ----------
+    x : array_like
+        Upper bound of the integral; must satisfy ``x >= 0``
+    df : array_like
+        Degrees of freedom; must satisfy ``df > 0``
+    p : array_like
+        Probability; must satisfy ``0 <= p < 1``
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    nc : scalar or ndarray
+        Non-centrality
+
+    See Also
+    --------
+    chndtr, chndtrix, chndtrinc
+
+    """)
+
+add_newdoc("cosdg",
+    """
+    cosdg(x, out=None)
+
+    Cosine of the angle `x` given in degrees.
+
+    Parameters
+    ----------
+    x : array_like
+        Angle, given in degrees.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Cosine of the input.
+
+    See Also
+    --------
+    sindg, tandg, cotdg
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is more accurate than using cosine directly.
+
+    >>> x = 90 + 180 * np.arange(3)
+    >>> sc.cosdg(x)
+    array([-0.,  0., -0.])
+    >>> np.cos(x * np.pi / 180)
+    array([ 6.1232340e-17, -1.8369702e-16,  3.0616170e-16])
+
+    """)
+
+add_newdoc("cosm1",
+    """
+    cosm1(x, out=None)
+
+    cos(x) - 1 for use when `x` is near zero.
+
+    Parameters
+    ----------
+    x : array_like
+        Real valued argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of ``cos(x) - 1``.
+
+    See Also
+    --------
+    expm1, log1p
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is more accurate than computing ``cos(x) - 1`` directly for
+    ``x`` around 0.
+
+    >>> x = 1e-30
+    >>> np.cos(x) - 1
+    0.0
+    >>> sc.cosm1(x)
+    -5.0000000000000005e-61
+
+    """)
+
+add_newdoc("cotdg",
+    """
+    cotdg(x, out=None)
+
+    Cotangent of the angle `x` given in degrees.
+
+    Parameters
+    ----------
+    x : array_like
+        Angle, given in degrees.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Cotangent at the input.
+
+    See Also
+    --------
+    sindg, cosdg, tandg
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is more accurate than using cotangent directly.
+
+    >>> x = 90 + 180 * np.arange(3)
+    >>> sc.cotdg(x)
+    array([0., 0., 0.])
+    >>> 1 / np.tan(x * np.pi / 180)
+    array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16])
+
+    """)
+
+add_newdoc("dawsn",
+    """
+    dawsn(x, out=None)
+
+    Dawson's integral.
+
+    Computes::
+
+        exp(-x**2) * integral(exp(t**2), t=0..x).
+
+    Parameters
+    ----------
+    x : array_like
+        Function parameter.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Value of the integral.
+
+    See Also
+    --------
+    wofz, erf, erfc, erfcx, erfi
+
+    References
+    ----------
+    .. [1] Steven G. Johnson, Faddeeva W function implementation.
+       http://ab-initio.mit.edu/Faddeeva
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-15, 15, num=1000)
+    >>> plt.plot(x, special.dawsn(x))
+    >>> plt.xlabel('$x$')
+    >>> plt.ylabel('$dawsn(x)$')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("ellipe",
+    r"""
+    ellipe(m, out=None)
+
+    Complete elliptic integral of the second kind
+
+    This function is defined as
+
+    .. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt
+
+    Parameters
+    ----------
+    m : array_like
+        Defines the parameter of the elliptic integral.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    E : scalar or ndarray
+        Value of the elliptic integral.
+
+    Notes
+    -----
+    Wrapper for the Cephes [1]_ routine `ellpe`.
+
+    For `m > 0` the computation uses the approximation,
+
+    .. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m),
+
+    where :math:`P` and :math:`Q` are tenth-order polynomials.  For
+    `m < 0`, the relation
+
+    .. math:: E(m) = E(m/(m - 1)) \sqrt(1-m)
+
+    is used.
+
+    The parameterization in terms of :math:`m` follows that of section
+    17.2 in [2]_. Other parameterizations in terms of the
+    complementary parameter :math:`1 - m`, modular angle
+    :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
+    used, so be careful that you choose the correct parameter.
+
+    The Legendre E integral is related to Carlson's symmetric R_D or R_G
+    functions in multiple ways [3]_. For example,
+
+    .. math:: E(m) = 2 R_G(0, 1-k^2, 1) .
+
+    See Also
+    --------
+    ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
+    ellipk : Complete elliptic integral of the first kind
+    ellipkinc : Incomplete elliptic integral of the first kind
+    ellipeinc : Incomplete elliptic integral of the second kind
+    elliprd : Symmetric elliptic integral of the second kind.
+    elliprg : Completely-symmetric elliptic integral of the second kind.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    .. [2] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+    .. [3] NIST Digital Library of Mathematical
+           Functions. http://dlmf.nist.gov/, Release 1.0.28 of
+           2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
+
+    Examples
+    --------
+    This function is used in finding the circumference of an
+    ellipse with semi-major axis `a` and semi-minor axis `b`.
+
+    >>> import numpy as np
+    >>> from scipy import special
+
+    >>> a = 3.5
+    >>> b = 2.1
+    >>> e_sq = 1.0 - b**2/a**2  # eccentricity squared
+
+    Then the circumference is found using the following:
+
+    >>> C = 4*a*special.ellipe(e_sq)  # circumference formula
+    >>> C
+    17.868899204378693
+
+    When `a` and `b` are the same (meaning eccentricity is 0),
+    this reduces to the circumference of a circle.
+
+    >>> 4*a*special.ellipe(0.0)  # formula for ellipse with a = b
+    21.991148575128552
+    >>> 2*np.pi*a  # formula for circle of radius a
+    21.991148575128552
+
+    """)
+
+add_newdoc("ellipeinc",
+    r"""
+    ellipeinc(phi, m, out=None)
+
+    Incomplete elliptic integral of the second kind
+
+    This function is defined as
+
+    .. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt
+
+    Parameters
+    ----------
+    phi : array_like
+        amplitude of the elliptic integral.
+    m : array_like
+        parameter of the elliptic integral.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    E : scalar or ndarray
+        Value of the elliptic integral.
+
+    Notes
+    -----
+    Wrapper for the Cephes [1]_ routine `ellie`.
+
+    Computation uses arithmetic-geometric means algorithm.
+
+    The parameterization in terms of :math:`m` follows that of section
+    17.2 in [2]_. Other parameterizations in terms of the
+    complementary parameter :math:`1 - m`, modular angle
+    :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
+    used, so be careful that you choose the correct parameter.
+
+    The Legendre E incomplete integral can be related to combinations
+    of Carlson's symmetric integrals R_D, R_F, and R_G in multiple
+    ways [3]_. For example, with :math:`c = \csc^2\phi`,
+
+    .. math::
+      E(\phi, m) = R_F(c-1, c-k^2, c)
+        - \frac{1}{3} k^2 R_D(c-1, c-k^2, c) .
+
+    See Also
+    --------
+    ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
+    ellipk : Complete elliptic integral of the first kind
+    ellipkinc : Incomplete elliptic integral of the first kind
+    ellipe : Complete elliptic integral of the second kind
+    elliprd : Symmetric elliptic integral of the second kind.
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+    elliprg : Completely-symmetric elliptic integral of the second kind.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    .. [2] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+    .. [3] NIST Digital Library of Mathematical
+           Functions. http://dlmf.nist.gov/, Release 1.0.28 of
+           2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
+    """)
+
+add_newdoc("ellipj",
+    """
+    ellipj(u, m, out=None)
+
+    Jacobian elliptic functions
+
+    Calculates the Jacobian elliptic functions of parameter `m` between
+    0 and 1, and real argument `u`.
+
+    Parameters
+    ----------
+    m : array_like
+        Parameter.
+    u : array_like
+        Argument.
+    out : tuple of ndarray, optional
+        Optional output arrays for the function values
+
+    Returns
+    -------
+    sn, cn, dn, ph : 4-tuple of scalar or ndarray
+        The returned functions::
+
+            sn(u|m), cn(u|m), dn(u|m)
+
+        The value `ph` is such that if `u = ellipkinc(ph, m)`,
+        then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
+
+    Notes
+    -----
+    Wrapper for the Cephes [1]_ routine `ellpj`.
+
+    These functions are periodic, with quarter-period on the real axis
+    equal to the complete elliptic integral `ellipk(m)`.
+
+    Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then
+    `sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
+    the amplitude of `u`.
+
+    Computation is by means of the arithmetic-geometric mean algorithm,
+    except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
+    close to 1, the approximation applies only for `phi < pi/2`.
+
+    See also
+    --------
+    ellipk : Complete elliptic integral of the first kind
+    ellipkinc : Incomplete elliptic integral of the first kind
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    """)
+
+add_newdoc("ellipkm1",
+    """
+    ellipkm1(p, out=None)
+
+    Complete elliptic integral of the first kind around `m` = 1
+
+    This function is defined as
+
+    .. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
+
+    where `m = 1 - p`.
+
+    Parameters
+    ----------
+    p : array_like
+        Defines the parameter of the elliptic integral as `m = 1 - p`.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    K : scalar or ndarray
+        Value of the elliptic integral.
+
+    Notes
+    -----
+    Wrapper for the Cephes [1]_ routine `ellpk`.
+
+    For `p <= 1`, computation uses the approximation,
+
+    .. math:: K(p) \\approx P(p) - \\log(p) Q(p),
+
+    where :math:`P` and :math:`Q` are tenth-order polynomials.  The
+    argument `p` is used internally rather than `m` so that the logarithmic
+    singularity at `m = 1` will be shifted to the origin; this preserves
+    maximum accuracy.  For `p > 1`, the identity
+
+    .. math:: K(p) = K(1/p)/\\sqrt(p)
+
+    is used.
+
+    See Also
+    --------
+    ellipk : Complete elliptic integral of the first kind
+    ellipkinc : Incomplete elliptic integral of the first kind
+    ellipe : Complete elliptic integral of the second kind
+    ellipeinc : Incomplete elliptic integral of the second kind
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    """)
+
+add_newdoc("ellipk",
+    r"""
+    ellipk(m, out=None)
+
+    Complete elliptic integral of the first kind.
+
+    This function is defined as
+
+    .. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
+
+    Parameters
+    ----------
+    m : array_like
+        The parameter of the elliptic integral.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    K : scalar or ndarray
+        Value of the elliptic integral.
+
+    Notes
+    -----
+    For more precision around point m = 1, use `ellipkm1`, which this
+    function calls.
+
+    The parameterization in terms of :math:`m` follows that of section
+    17.2 in [1]_. Other parameterizations in terms of the
+    complementary parameter :math:`1 - m`, modular angle
+    :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
+    used, so be careful that you choose the correct parameter.
+
+    The Legendre K integral is related to Carlson's symmetric R_F
+    function by [2]_:
+
+    .. math:: K(m) = R_F(0, 1-k^2, 1) .
+
+    See Also
+    --------
+    ellipkm1 : Complete elliptic integral of the first kind around m = 1
+    ellipkinc : Incomplete elliptic integral of the first kind
+    ellipe : Complete elliptic integral of the second kind
+    ellipeinc : Incomplete elliptic integral of the second kind
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+
+    References
+    ----------
+    .. [1] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+    .. [2] NIST Digital Library of Mathematical
+           Functions. http://dlmf.nist.gov/, Release 1.0.28 of
+           2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
+
+    """)
+
+add_newdoc("ellipkinc",
+    r"""
+    ellipkinc(phi, m, out=None)
+
+    Incomplete elliptic integral of the first kind
+
+    This function is defined as
+
+    .. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt
+
+    This function is also called :math:`F(\phi, m)`.
+
+    Parameters
+    ----------
+    phi : array_like
+        amplitude of the elliptic integral
+    m : array_like
+        parameter of the elliptic integral
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    K : scalar or ndarray
+        Value of the elliptic integral
+
+    Notes
+    -----
+    Wrapper for the Cephes [1]_ routine `ellik`.  The computation is
+    carried out using the arithmetic-geometric mean algorithm.
+
+    The parameterization in terms of :math:`m` follows that of section
+    17.2 in [2]_. Other parameterizations in terms of the
+    complementary parameter :math:`1 - m`, modular angle
+    :math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
+    used, so be careful that you choose the correct parameter.
+
+    The Legendre K incomplete integral (or F integral) is related to
+    Carlson's symmetric R_F function [3]_.
+    Setting :math:`c = \csc^2\phi`,
+
+    .. math:: F(\phi, m) = R_F(c-1, c-k^2, c) .
+
+    See Also
+    --------
+    ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
+    ellipk : Complete elliptic integral of the first kind
+    ellipe : Complete elliptic integral of the second kind
+    ellipeinc : Incomplete elliptic integral of the second kind
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    .. [2] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+    .. [3] NIST Digital Library of Mathematical
+           Functions. http://dlmf.nist.gov/, Release 1.0.28 of
+           2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
+    """)
+
+add_newdoc(
+    "elliprc",
+    r"""
+    elliprc(x, y, out=None)
+
+    Degenerate symmetric elliptic integral.
+
+    The function RC is defined as [1]_
+
+    .. math::
+
+        R_{\mathrm{C}}(x, y) =
+           \frac{1}{2} \int_0^{+\infty} (t + x)^{-1/2} (t + y)^{-1} dt
+           = R_{\mathrm{F}}(x, y, y)
+
+    Parameters
+    ----------
+    x, y : array_like
+        Real or complex input parameters. `x` can be any number in the
+        complex plane cut along the negative real axis. `y` must be non-zero.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    R : scalar or ndarray
+        Value of the integral. If `y` is real and negative, the Cauchy
+        principal value is returned. If both of `x` and `y` are real, the
+        return value is real. Otherwise, the return value is complex.
+
+    Notes
+    -----
+    RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) ==
+    elliprf(x, y, y)``. It is an elementary function rather than an elliptic
+    integral.
+
+    The code implements Carlson's algorithm based on the duplication theorems
+    and series expansion up to the 7th order. [2]_
+
+    .. versionadded:: 1.8.0
+
+    See Also
+    --------
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+    elliprd : Symmetric elliptic integral of the second kind.
+    elliprg : Completely-symmetric elliptic integral of the second kind.
+    elliprj : Symmetric elliptic integral of the third kind.
+
+    References
+    ----------
+    .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
+           Functions," NIST, US Dept. of Commerce.
+           https://dlmf.nist.gov/19.16.E6
+    .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
+           integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
+           https://arxiv.org/abs/math/9409227
+           https://doi.org/10.1007/BF02198293
+
+    Examples
+    --------
+    Basic homogeneity property:
+
+    >>> import numpy as np
+    >>> from scipy.special import elliprc
+
+    >>> x = 1.2 + 3.4j
+    >>> y = 5.
+    >>> scale = 0.3 + 0.4j
+    >>> elliprc(scale*x, scale*y)
+    (0.5484493976710874-0.4169557678995833j)
+
+    >>> elliprc(x, y)/np.sqrt(scale)
+    (0.5484493976710874-0.41695576789958333j)
+
+    When the two arguments coincide, the integral is particularly
+    simple:
+
+    >>> x = 1.2 + 3.4j
+    >>> elliprc(x, x)
+    (0.4299173120614631-0.3041729818745595j)
+
+    >>> 1/np.sqrt(x)
+    (0.4299173120614631-0.30417298187455954j)
+
+    Another simple case: the first argument vanishes:
+
+    >>> y = 1.2 + 3.4j
+    >>> elliprc(0, y)
+    (0.6753125346116815-0.47779380263880866j)
+
+    >>> np.pi/2/np.sqrt(y)
+    (0.6753125346116815-0.4777938026388088j)
+
+    When `x` and `y` are both positive, we can express
+    :math:`R_C(x,y)` in terms of more elementary functions.  For the
+    case :math:`0 \le x < y`,
+
+    >>> x = 3.2
+    >>> y = 6.
+    >>> elliprc(x, y)
+    0.44942991498453444
+
+    >>> np.arctan(np.sqrt((y-x)/x))/np.sqrt(y-x)
+    0.44942991498453433
+
+    And for the case :math:`0 \le y < x`,
+
+    >>> x = 6.
+    >>> y = 3.2
+    >>> elliprc(x,y)
+    0.4989837501576147
+
+    >>> np.log((np.sqrt(x)+np.sqrt(x-y))/np.sqrt(y))/np.sqrt(x-y)
+    0.49898375015761476
+
+    """)
+
+add_newdoc(
+    "elliprd",
+    r"""
+    elliprd(x, y, z, out=None)
+
+    Symmetric elliptic integral of the second kind.
+
+    The function RD is defined as [1]_
+
+    .. math::
+
+        R_{\mathrm{D}}(x, y, z) =
+           \frac{3}{2} \int_0^{+\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2}
+           dt
+
+    Parameters
+    ----------
+    x, y, z : array_like
+        Real or complex input parameters. `x` or `y` can be any number in the
+        complex plane cut along the negative real axis, but at most one of them
+        can be zero, while `z` must be non-zero.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    R : scalar or ndarray
+        Value of the integral. If all of `x`, `y`, and `z` are real, the
+        return value is real. Otherwise, the return value is complex.
+
+    Notes
+    -----
+    RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) ==
+    elliprj(x, y, z, z)``.
+
+    The code implements Carlson's algorithm based on the duplication theorems
+    and series expansion up to the 7th order. [2]_
+
+    .. versionadded:: 1.8.0
+
+    See Also
+    --------
+    elliprc : Degenerate symmetric elliptic integral.
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+    elliprg : Completely-symmetric elliptic integral of the second kind.
+    elliprj : Symmetric elliptic integral of the third kind.
+
+    References
+    ----------
+    .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
+           Functions," NIST, US Dept. of Commerce.
+           https://dlmf.nist.gov/19.16.E5
+    .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
+           integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
+           https://arxiv.org/abs/math/9409227
+           https://doi.org/10.1007/BF02198293
+
+    Examples
+    --------
+    Basic homogeneity property:
+
+    >>> import numpy as np
+    >>> from scipy.special import elliprd
+
+    >>> x = 1.2 + 3.4j
+    >>> y = 5.
+    >>> z = 6.
+    >>> scale = 0.3 + 0.4j
+    >>> elliprd(scale*x, scale*y, scale*z)
+    (-0.03703043835680379-0.24500934665683802j)
+
+    >>> elliprd(x, y, z)*np.power(scale, -1.5)
+    (-0.0370304383568038-0.24500934665683805j)
+
+    All three arguments coincide:
+
+    >>> x = 1.2 + 3.4j
+    >>> elliprd(x, x, x)
+    (-0.03986825876151896-0.14051741840449586j)
+
+    >>> np.power(x, -1.5)
+    (-0.03986825876151894-0.14051741840449583j)
+
+    The so-called "second lemniscate constant":
+
+    >>> elliprd(0, 2, 1)/3
+    0.5990701173677961
+
+    >>> from scipy.special import gamma
+    >>> gamma(0.75)**2/np.sqrt(2*np.pi)
+    0.5990701173677959
+
+    """)
+
+add_newdoc(
+    "elliprf",
+    r"""
+    elliprf(x, y, z, out=None)
+
+    Completely-symmetric elliptic integral of the first kind.
+
+    The function RF is defined as [1]_
+
+    .. math::
+
+        R_{\mathrm{F}}(x, y, z) =
+           \frac{1}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt
+
+    Parameters
+    ----------
+    x, y, z : array_like
+        Real or complex input parameters. `x`, `y`, or `z` can be any number in
+        the complex plane cut along the negative real axis, but at most one of
+        them can be zero.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    R : scalar or ndarray
+        Value of the integral. If all of `x`, `y`, and `z` are real, the return
+        value is real. Otherwise, the return value is complex.
+
+    Notes
+    -----
+    The code implements Carlson's algorithm based on the duplication theorems
+    and series expansion up to the 7th order (cf.:
+    https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete
+    integral. [2]_
+
+    .. versionadded:: 1.8.0
+
+    See Also
+    --------
+    elliprc : Degenerate symmetric integral.
+    elliprd : Symmetric elliptic integral of the second kind.
+    elliprg : Completely-symmetric elliptic integral of the second kind.
+    elliprj : Symmetric elliptic integral of the third kind.
+
+    References
+    ----------
+    .. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
+           Functions," NIST, US Dept. of Commerce.
+           https://dlmf.nist.gov/19.16.E1
+    .. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
+           integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
+           https://arxiv.org/abs/math/9409227
+           https://doi.org/10.1007/BF02198293
+
+    Examples
+    --------
+    Basic homogeneity property:
+
+    >>> import numpy as np
+    >>> from scipy.special import elliprf
+
+    >>> x = 1.2 + 3.4j
+    >>> y = 5.
+    >>> z = 6.
+    >>> scale = 0.3 + 0.4j
+    >>> elliprf(scale*x, scale*y, scale*z)
+    (0.5328051227278146-0.4008623567957094j)
+
+    >>> elliprf(x, y, z)/np.sqrt(scale)
+    (0.5328051227278147-0.4008623567957095j)
+
+    All three arguments coincide:
+
+    >>> x = 1.2 + 3.4j
+    >>> elliprf(x, x, x)
+    (0.42991731206146316-0.30417298187455954j)
+
+    >>> 1/np.sqrt(x)
+    (0.4299173120614631-0.30417298187455954j)
+
+    The so-called "first lemniscate constant":
+
+    >>> elliprf(0, 1, 2)
+    1.3110287771460598
+
+    >>> from scipy.special import gamma
+    >>> gamma(0.25)**2/(4*np.sqrt(2*np.pi))
+    1.3110287771460598
+
+    """)
+
+add_newdoc(
+    "elliprg",
+    r"""
+    elliprg(x, y, z, out=None)
+
+    Completely-symmetric elliptic integral of the second kind.
+
+    The function RG is defined as [1]_
+
+    .. math::
+
+        R_{\mathrm{G}}(x, y, z) =
+           \frac{1}{4} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2}
+           \left(\frac{x}{t + x} + \frac{y}{t + y} + \frac{z}{t + z}\right) t
+           dt
+
+    Parameters
+    ----------
+    x, y, z : array_like
+        Real or complex input parameters. `x`, `y`, or `z` can be any number in
+        the complex plane cut along the negative real axis.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    R : scalar or ndarray
+        Value of the integral. If all of `x`, `y`, and `z` are real, the return
+        value is real. Otherwise, the return value is complex.
+
+    Notes
+    -----
+    The implementation uses the relation [1]_
+
+    .. math::
+
+        2 R_{\mathrm{G}}(x, y, z) =
+           z R_{\mathrm{F}}(x, y, z) -
+           \frac{1}{3} (x - z) (y - z) R_{\mathrm{D}}(x, y, z) +
+           \sqrt{\frac{x y}{z}}
+
+    and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can
+    be chosen as the pivot. When one of the arguments is close to zero, the AGM
+    method is applied instead. Other special cases are computed following Ref.
+    [2]_
+
+    .. versionadded:: 1.8.0
+
+    See Also
+    --------
+    elliprc : Degenerate symmetric integral.
+    elliprd : Symmetric elliptic integral of the second kind.
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+    elliprj : Symmetric elliptic integral of the third kind.
+
+    References
+    ----------
+    .. [1] B. C. Carlson, "Numerical computation of real or complex elliptic
+           integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
+           https://arxiv.org/abs/math/9409227
+           https://doi.org/10.1007/BF02198293
+    .. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
+           Functions," NIST, US Dept. of Commerce.
+           https://dlmf.nist.gov/19.16.E1
+           https://dlmf.nist.gov/19.20.ii
+
+    Examples
+    --------
+    Basic homogeneity property:
+
+    >>> import numpy as np
+    >>> from scipy.special import elliprg
+
+    >>> x = 1.2 + 3.4j
+    >>> y = 5.
+    >>> z = 6.
+    >>> scale = 0.3 + 0.4j
+    >>> elliprg(scale*x, scale*y, scale*z)
+    (1.195936862005246+0.8470988320464167j)
+
+    >>> elliprg(x, y, z)*np.sqrt(scale)
+    (1.195936862005246+0.8470988320464165j)
+
+    Simplifications:
+
+    >>> elliprg(0, y, y)
+    1.756203682760182
+
+    >>> 0.25*np.pi*np.sqrt(y)
+    1.7562036827601817
+
+    >>> elliprg(0, 0, z)
+    1.224744871391589
+
+    >>> 0.5*np.sqrt(z)
+    1.224744871391589
+
+    The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and
+    ``c`` is given by
+
+    .. math::
+
+        S = 4 \pi a b c R_{\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2).
+
+    >>> def ellipsoid_area(a, b, c):
+    ...     r = 4.0 * np.pi * a * b * c
+    ...     return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c))
+    >>> print(ellipsoid_area(1, 3, 5))
+    108.62688289491807
+    """)
+
+add_newdoc(
+    "elliprj",
+    r"""
+    elliprj(x, y, z, p, out=None)
+
+    Symmetric elliptic integral of the third kind.
+
+    The function RJ is defined as [1]_
+
+    .. math::
+
+        R_{\mathrm{J}}(x, y, z, p) =
+           \frac{3}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2}
+           (t + p)^{-1} dt
+
+    .. warning::
+        This function should be considered experimental when the inputs are
+        unbalanced.  Check correctness with another independent implementation.
+
+    Parameters
+    ----------
+    x, y, z, p : array_like
+        Real or complex input parameters. `x`, `y`, or `z` are numbers in
+        the complex plane cut along the negative real axis (subject to further
+        constraints, see Notes), and at most one of them can be zero. `p` must
+        be non-zero.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    R : scalar or ndarray
+        Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the
+        return value is real. Otherwise, the return value is complex.
+
+        If `p` is real and negative, while `x`, `y`, and `z` are real,
+        non-negative, and at most one of them is zero, the Cauchy principal
+        value is returned. [1]_ [2]_
+
+    Notes
+    -----
+    The code implements Carlson's algorithm based on the duplication theorems
+    and series expansion up to the 7th order. [3]_ The algorithm is slightly
+    different from its earlier incarnation as it appears in [1]_, in that the
+    call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in
+    the inner loop. Asymptotic approximations are used where arguments differ
+    widely in the order of magnitude. [5]_
+
+    The input values are subject to certain sufficient but not necessary
+    constaints when input arguments are complex. Notably, ``x``, ``y``, and
+    ``z`` must have non-negative real parts, unless two of them are
+    non-negative and complex-conjugates to each other while the other is a real
+    non-negative number. [1]_ If the inputs do not satisfy the sufficient
+    condition described in Ref. [1]_ they are rejected outright with the output
+    set to NaN.
+
+    In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the
+    function ``elliprd`` should be preferred because of its less restrictive
+    domain.
+
+    .. versionadded:: 1.8.0
+
+    See Also
+    --------
+    elliprc : Degenerate symmetric integral.
+    elliprd : Symmetric elliptic integral of the second kind.
+    elliprf : Completely-symmetric elliptic integral of the first kind.
+    elliprg : Completely-symmetric elliptic integral of the second kind.
+
+    References
+    ----------
+    .. [1] B. C. Carlson, "Numerical computation of real or complex elliptic
+           integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
+           https://arxiv.org/abs/math/9409227
+           https://doi.org/10.1007/BF02198293
+    .. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
+           Functions," NIST, US Dept. of Commerce.
+           https://dlmf.nist.gov/19.20.iii
+    .. [3] B. C. Carlson, J. FitzSimmons, "Reduction Theorems for Elliptic
+           Integrands with the Square Root of Two Quadratic Factors," J.
+           Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000.
+           https://doi.org/10.1016/S0377-0427(00)00282-X
+    .. [4] F. Johansson, "Numerical Evaluation of Elliptic Functions, Elliptic
+           Integrals and Modular Forms," in J. Blumlein, C. Schneider, P.
+           Paule, eds., "Elliptic Integrals, Elliptic Functions and Modular
+           Forms in Quantum Field Theory," pp. 269-293, 2019 (Cham,
+           Switzerland: Springer Nature Switzerland)
+           https://arxiv.org/abs/1806.06725
+           https://doi.org/10.1007/978-3-030-04480-0
+    .. [5] B. C. Carlson, J. L. Gustafson, "Asymptotic Approximations for
+           Symmetric Elliptic Integrals," SIAM J. Math. Anls., vol. 25, no. 2,
+           pp. 288-303, 1994.
+           https://arxiv.org/abs/math/9310223
+           https://doi.org/10.1137/S0036141092228477
+
+    Examples
+    --------
+    Basic homogeneity property:
+
+    >>> import numpy as np
+    >>> from scipy.special import elliprj
+
+    >>> x = 1.2 + 3.4j
+    >>> y = 5.
+    >>> z = 6.
+    >>> p = 7.
+    >>> scale = 0.3 - 0.4j
+    >>> elliprj(scale*x, scale*y, scale*z, scale*p)
+    (0.10834905565679157+0.19694950747103812j)
+
+    >>> elliprj(x, y, z, p)*np.power(scale, -1.5)
+    (0.10834905565679556+0.19694950747103854j)
+
+    Reduction to simpler elliptic integral:
+
+    >>> elliprj(x, y, z, z)
+    (0.08288462362195129-0.028376809745123258j)
+
+    >>> from scipy.special import elliprd
+    >>> elliprd(x, y, z)
+    (0.08288462362195136-0.028376809745123296j)
+
+    All arguments coincide:
+
+    >>> elliprj(x, x, x, x)
+    (-0.03986825876151896-0.14051741840449586j)
+
+    >>> np.power(x, -1.5)
+    (-0.03986825876151894-0.14051741840449583j)
+
+    """)
+
+add_newdoc("entr",
+    r"""
+    entr(x, out=None)
+
+    Elementwise function for computing entropy.
+
+    .. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0  \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
+
+    Parameters
+    ----------
+    x : ndarray
+        Input array.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    res : scalar or ndarray
+        The value of the elementwise entropy function at the given points `x`.
+
+    See Also
+    --------
+    kl_div, rel_entr, scipy.stats.entropy
+
+    Notes
+    -----
+    .. versionadded:: 0.15.0
+
+    This function is concave.
+
+    The origin of this function is in convex programming; see [1]_.
+    Given a probability distribution :math:`p_1, \ldots, p_n`,
+    the definition of entropy in the context of *information theory* is
+
+    .. math::
+
+        \sum_{i = 1}^n \mathrm{entr}(p_i).
+
+    To compute the latter quantity, use `scipy.stats.entropy`.
+
+    References
+    ----------
+    .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.
+           Cambridge University Press, 2004.
+           :doi:`https://doi.org/10.1017/CBO9780511804441`
+
+    """)
+
+add_newdoc("erf",
+    """
+    erf(z, out=None)
+
+    Returns the error function of complex argument.
+
+    It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
+
+    Parameters
+    ----------
+    x : ndarray
+        Input array.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    res : scalar or ndarray
+        The values of the error function at the given points `x`.
+
+    See Also
+    --------
+    erfc, erfinv, erfcinv, wofz, erfcx, erfi
+
+    Notes
+    -----
+    The cumulative of the unit normal distribution is given by
+    ``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Error_function
+    .. [2] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover,
+        1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
+    .. [3] Steven G. Johnson, Faddeeva W function implementation.
+       http://ab-initio.mit.edu/Faddeeva
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-3, 3)
+    >>> plt.plot(x, special.erf(x))
+    >>> plt.xlabel('$x$')
+    >>> plt.ylabel('$erf(x)$')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("erfc",
+    """
+    erfc(x, out=None)
+
+    Complementary error function, ``1 - erf(x)``.
+
+    Parameters
+    ----------
+    x : array_like
+        Real or complex valued argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the complementary error function
+
+    See Also
+    --------
+    erf, erfi, erfcx, dawsn, wofz
+
+    References
+    ----------
+    .. [1] Steven G. Johnson, Faddeeva W function implementation.
+       http://ab-initio.mit.edu/Faddeeva
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-3, 3)
+    >>> plt.plot(x, special.erfc(x))
+    >>> plt.xlabel('$x$')
+    >>> plt.ylabel('$erfc(x)$')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("erfi",
+    """
+    erfi(z, out=None)
+
+    Imaginary error function, ``-i erf(i z)``.
+
+    Parameters
+    ----------
+    z : array_like
+        Real or complex valued argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the imaginary error function
+
+    See Also
+    --------
+    erf, erfc, erfcx, dawsn, wofz
+
+    Notes
+    -----
+
+    .. versionadded:: 0.12.0
+
+    References
+    ----------
+    .. [1] Steven G. Johnson, Faddeeva W function implementation.
+       http://ab-initio.mit.edu/Faddeeva
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-3, 3)
+    >>> plt.plot(x, special.erfi(x))
+    >>> plt.xlabel('$x$')
+    >>> plt.ylabel('$erfi(x)$')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("erfcx",
+    """
+    erfcx(x, out=None)
+
+    Scaled complementary error function, ``exp(x**2) * erfc(x)``.
+
+    Parameters
+    ----------
+    x : array_like
+        Real or complex valued argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the scaled complementary error function
+
+
+    See Also
+    --------
+    erf, erfc, erfi, dawsn, wofz
+
+    Notes
+    -----
+
+    .. versionadded:: 0.12.0
+
+    References
+    ----------
+    .. [1] Steven G. Johnson, Faddeeva W function implementation.
+       http://ab-initio.mit.edu/Faddeeva
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-3, 3)
+    >>> plt.plot(x, special.erfcx(x))
+    >>> plt.xlabel('$x$')
+    >>> plt.ylabel('$erfcx(x)$')
+    >>> plt.show()
+
+    """)
+
+add_newdoc(
+    "erfinv",
+    """
+    erfinv(y, out=None)
+
+    Inverse of the error function.
+
+    Computes the inverse of the error function.
+
+    In the complex domain, there is no unique complex number w satisfying
+    erf(w)=z. This indicates a true inverse function would be multivalued.
+    When the domain restricts to the real, -1 < x < 1, there is a unique real
+    number satisfying erf(erfinv(x)) = x.
+
+    Parameters
+    ----------
+    y : ndarray
+        Argument at which to evaluate. Domain: [-1, 1]
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    erfinv : scalar or ndarray
+        The inverse of erf of y, element-wise
+
+    See Also
+    --------
+    erf : Error function of a complex argument
+    erfc : Complementary error function, ``1 - erf(x)``
+    erfcinv : Inverse of the complementary error function
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import erfinv, erf
+
+    >>> erfinv(0.5)
+    0.4769362762044699
+
+    >>> y = np.linspace(-1.0, 1.0, num=9)
+    >>> x = erfinv(y)
+    >>> x
+    array([       -inf, -0.81341985, -0.47693628, -0.22531206,  0.        ,
+            0.22531206,  0.47693628,  0.81341985,         inf])
+
+    Verify that ``erf(erfinv(y))`` is ``y``.
+
+    >>> erf(x)
+    array([-1.  , -0.75, -0.5 , -0.25,  0.  ,  0.25,  0.5 ,  0.75,  1.  ])
+
+    Plot the function:
+
+    >>> y = np.linspace(-1, 1, 200)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(y, erfinv(y))
+    >>> ax.grid(True)
+    >>> ax.set_xlabel('y')
+    >>> ax.set_title('erfinv(y)')
+    >>> plt.show()
+
+    """)
+
+add_newdoc(
+    "erfcinv",
+    """
+    erfcinv(y, out=None)
+
+    Inverse of the complementary error function.
+
+    Computes the inverse of the complementary error function.
+
+    In the complex domain, there is no unique complex number w satisfying
+    erfc(w)=z. This indicates a true inverse function would be multivalued.
+    When the domain restricts to the real, 0 < x < 2, there is a unique real
+    number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)).
+
+    It is related to inverse of the error function by erfcinv(1-x) = erfinv(x)
+
+    Parameters
+    ----------
+    y : ndarray
+        Argument at which to evaluate. Domain: [0, 2]
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    erfcinv : scalar or ndarray
+        The inverse of erfc of y, element-wise
+
+    See Also
+    --------
+    erf : Error function of a complex argument
+    erfc : Complementary error function, ``1 - erf(x)``
+    erfinv : Inverse of the error function
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import erfcinv
+
+    >>> erfcinv(0.5)
+    0.4769362762044699
+
+    >>> y = np.linspace(0.0, 2.0, num=11)
+    >>> erfcinv(y)
+    array([        inf,  0.9061938 ,  0.59511608,  0.37080716,  0.17914345,
+           -0.        , -0.17914345, -0.37080716, -0.59511608, -0.9061938 ,
+                  -inf])
+
+    Plot the function:
+
+    >>> y = np.linspace(0, 2, 200)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(y, erfcinv(y))
+    >>> ax.grid(True)
+    >>> ax.set_xlabel('y')
+    >>> ax.set_title('erfcinv(y)')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("eval_jacobi",
+    r"""
+    eval_jacobi(n, alpha, beta, x, out=None)
+
+    Evaluate Jacobi polynomial at a point.
+
+    The Jacobi polynomials can be defined via the Gauss hypergeometric
+    function :math:`{}_2F_1` as
+
+    .. math::
+
+        P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
+          {}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
+
+    where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
+    :math:`n` is an integer the result is a polynomial of degree
+    :math:`n`. See 22.5.42 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer the result is
+        determined via the relation to the Gauss hypergeometric
+        function.
+    alpha : array_like
+        Parameter
+    beta : array_like
+        Parameter
+    x : array_like
+        Points at which to evaluate the polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    P : scalar or ndarray
+        Values of the Jacobi polynomial
+
+    See Also
+    --------
+    roots_jacobi : roots and quadrature weights of Jacobi polynomials
+    jacobi : Jacobi polynomial object
+    hyp2f1 : Gauss hypergeometric function
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_sh_jacobi",
+    r"""
+    eval_sh_jacobi(n, p, q, x, out=None)
+
+    Evaluate shifted Jacobi polynomial at a point.
+
+    Defined by
+
+    .. math::
+
+        G_n^{(p, q)}(x)
+          = \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
+
+    where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi
+    polynomial. See 22.5.2 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to `binom` and `eval_jacobi`.
+    p : float
+        Parameter
+    q : float
+        Parameter
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    G : scalar or ndarray
+        Values of the shifted Jacobi polynomial.
+
+    See Also
+    --------
+    roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
+                      polynomials
+    sh_jacobi : shifted Jacobi polynomial object
+    eval_jacobi : evaluate Jacobi polynomials
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_gegenbauer",
+    r"""
+    eval_gegenbauer(n, alpha, x, out=None)
+
+    Evaluate Gegenbauer polynomial at a point.
+
+    The Gegenbauer polynomials can be defined via the Gauss
+    hypergeometric function :math:`{}_2F_1` as
+
+    .. math::
+
+        C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
+          {}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
+
+    When :math:`n` is an integer the result is a polynomial of degree
+    :math:`n`. See 22.5.46 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to the Gauss hypergeometric
+        function.
+    alpha : array_like
+        Parameter
+    x : array_like
+        Points at which to evaluate the Gegenbauer polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    C : scalar or ndarray
+        Values of the Gegenbauer polynomial
+
+    See Also
+    --------
+    roots_gegenbauer : roots and quadrature weights of Gegenbauer
+                       polynomials
+    gegenbauer : Gegenbauer polynomial object
+    hyp2f1 : Gauss hypergeometric function
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_chebyt",
+    r"""
+    eval_chebyt(n, x, out=None)
+
+    Evaluate Chebyshev polynomial of the first kind at a point.
+
+    The Chebyshev polynomials of the first kind can be defined via the
+    Gauss hypergeometric function :math:`{}_2F_1` as
+
+    .. math::
+
+        T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
+
+    When :math:`n` is an integer the result is a polynomial of degree
+    :math:`n`. See 22.5.47 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to the Gauss hypergeometric
+        function.
+    x : array_like
+        Points at which to evaluate the Chebyshev polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    T : scalar or ndarray
+        Values of the Chebyshev polynomial
+
+    See Also
+    --------
+    roots_chebyt : roots and quadrature weights of Chebyshev
+                   polynomials of the first kind
+    chebyu : Chebychev polynomial object
+    eval_chebyu : evaluate Chebyshev polynomials of the second kind
+    hyp2f1 : Gauss hypergeometric function
+    numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
+
+    Notes
+    -----
+    This routine is numerically stable for `x` in ``[-1, 1]`` at least
+    up to order ``10000``.
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_chebyu",
+    r"""
+    eval_chebyu(n, x, out=None)
+
+    Evaluate Chebyshev polynomial of the second kind at a point.
+
+    The Chebyshev polynomials of the second kind can be defined via
+    the Gauss hypergeometric function :math:`{}_2F_1` as
+
+    .. math::
+
+        U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
+
+    When :math:`n` is an integer the result is a polynomial of degree
+    :math:`n`. See 22.5.48 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to the Gauss hypergeometric
+        function.
+    x : array_like
+        Points at which to evaluate the Chebyshev polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    U : scalar or ndarray
+        Values of the Chebyshev polynomial
+
+    See Also
+    --------
+    roots_chebyu : roots and quadrature weights of Chebyshev
+                   polynomials of the second kind
+    chebyu : Chebyshev polynomial object
+    eval_chebyt : evaluate Chebyshev polynomials of the first kind
+    hyp2f1 : Gauss hypergeometric function
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_chebys",
+    r"""
+    eval_chebys(n, x, out=None)
+
+    Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
+    point.
+
+    These polynomials are defined as
+
+    .. math::
+
+        S_n(x) = U_n(x/2)
+
+    where :math:`U_n` is a Chebyshev polynomial of the second
+    kind. See 22.5.13 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to `eval_chebyu`.
+    x : array_like
+        Points at which to evaluate the Chebyshev polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    S : scalar or ndarray
+        Values of the Chebyshev polynomial
+
+    See Also
+    --------
+    roots_chebys : roots and quadrature weights of Chebyshev
+                   polynomials of the second kind on [-2, 2]
+    chebys : Chebyshev polynomial object
+    eval_chebyu : evaluate Chebyshev polynomials of the second kind
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    They are a scaled version of the Chebyshev polynomials of the
+    second kind.
+
+    >>> x = np.linspace(-2, 2, 6)
+    >>> sc.eval_chebys(3, x)
+    array([-4.   ,  0.672,  0.736, -0.736, -0.672,  4.   ])
+    >>> sc.eval_chebyu(3, x / 2)
+    array([-4.   ,  0.672,  0.736, -0.736, -0.672,  4.   ])
+
+    """)
+
+add_newdoc("eval_chebyc",
+    r"""
+    eval_chebyc(n, x, out=None)
+
+    Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
+    point.
+
+    These polynomials are defined as
+
+    .. math::
+
+        C_n(x) = 2 T_n(x/2)
+
+    where :math:`T_n` is a Chebyshev polynomial of the first kind. See
+    22.5.11 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to `eval_chebyt`.
+    x : array_like
+        Points at which to evaluate the Chebyshev polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    C : scalar or ndarray
+        Values of the Chebyshev polynomial
+
+    See Also
+    --------
+    roots_chebyc : roots and quadrature weights of Chebyshev
+                   polynomials of the first kind on [-2, 2]
+    chebyc : Chebyshev polynomial object
+    numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
+    eval_chebyt : evaluate Chebycshev polynomials of the first kind
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    They are a scaled version of the Chebyshev polynomials of the
+    first kind.
+
+    >>> x = np.linspace(-2, 2, 6)
+    >>> sc.eval_chebyc(3, x)
+    array([-2.   ,  1.872,  1.136, -1.136, -1.872,  2.   ])
+    >>> 2 * sc.eval_chebyt(3, x / 2)
+    array([-2.   ,  1.872,  1.136, -1.136, -1.872,  2.   ])
+
+    """)
+
+add_newdoc("eval_sh_chebyt",
+    r"""
+    eval_sh_chebyt(n, x, out=None)
+
+    Evaluate shifted Chebyshev polynomial of the first kind at a
+    point.
+
+    These polynomials are defined as
+
+    .. math::
+
+        T_n^*(x) = T_n(2x - 1)
+
+    where :math:`T_n` is a Chebyshev polynomial of the first kind. See
+    22.5.14 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to `eval_chebyt`.
+    x : array_like
+        Points at which to evaluate the shifted Chebyshev polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    T : scalar or ndarray
+        Values of the shifted Chebyshev polynomial
+
+    See Also
+    --------
+    roots_sh_chebyt : roots and quadrature weights of shifted
+                      Chebyshev polynomials of the first kind
+    sh_chebyt : shifted Chebyshev polynomial object
+    eval_chebyt : evaluate Chebyshev polynomials of the first kind
+    numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_sh_chebyu",
+    r"""
+    eval_sh_chebyu(n, x, out=None)
+
+    Evaluate shifted Chebyshev polynomial of the second kind at a
+    point.
+
+    These polynomials are defined as
+
+    .. math::
+
+        U_n^*(x) = U_n(2x - 1)
+
+    where :math:`U_n` is a Chebyshev polynomial of the first kind. See
+    22.5.15 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to `eval_chebyu`.
+    x : array_like
+        Points at which to evaluate the shifted Chebyshev polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    U : scalar or ndarray
+        Values of the shifted Chebyshev polynomial
+
+    See Also
+    --------
+    roots_sh_chebyu : roots and quadrature weights of shifted
+                      Chebychev polynomials of the second kind
+    sh_chebyu : shifted Chebyshev polynomial object
+    eval_chebyu : evaluate Chebyshev polynomials of the second kind
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_legendre",
+    r"""
+    eval_legendre(n, x, out=None)
+
+    Evaluate Legendre polynomial at a point.
+
+    The Legendre polynomials can be defined via the Gauss
+    hypergeometric function :math:`{}_2F_1` as
+
+    .. math::
+
+        P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
+
+    When :math:`n` is an integer the result is a polynomial of degree
+    :math:`n`. See 22.5.49 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to the Gauss hypergeometric
+        function.
+    x : array_like
+        Points at which to evaluate the Legendre polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    P : scalar or ndarray
+        Values of the Legendre polynomial
+
+    See Also
+    --------
+    roots_legendre : roots and quadrature weights of Legendre
+                     polynomials
+    legendre : Legendre polynomial object
+    hyp2f1 : Gauss hypergeometric function
+    numpy.polynomial.legendre.Legendre : Legendre series
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import eval_legendre
+
+    Evaluate the zero-order Legendre polynomial at x = 0
+
+    >>> eval_legendre(0, 0)
+    1.0
+
+    Evaluate the first-order Legendre polynomial between -1 and 1
+
+    >>> X = np.linspace(-1, 1, 5)  # Domain of Legendre polynomials
+    >>> eval_legendre(1, X)
+    array([-1. , -0.5,  0. ,  0.5,  1. ])
+
+    Evaluate Legendre polynomials of order 0 through 4 at x = 0
+
+    >>> N = range(0, 5)
+    >>> eval_legendre(N, 0)
+    array([ 1.   ,  0.   , -0.5  ,  0.   ,  0.375])
+
+    Plot Legendre polynomials of order 0 through 4
+
+    >>> X = np.linspace(-1, 1)
+
+    >>> import matplotlib.pyplot as plt
+    >>> for n in range(0, 5):
+    ...     y = eval_legendre(n, X)
+    ...     plt.plot(X, y, label=r'$P_{}(x)$'.format(n))
+
+    >>> plt.title("Legendre Polynomials")
+    >>> plt.xlabel("x")
+    >>> plt.ylabel(r'$P_n(x)$')
+    >>> plt.legend(loc='lower right')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("eval_sh_legendre",
+    r"""
+    eval_sh_legendre(n, x, out=None)
+
+    Evaluate shifted Legendre polynomial at a point.
+
+    These polynomials are defined as
+
+    .. math::
+
+        P_n^*(x) = P_n(2x - 1)
+
+    where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_
+    for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the value is
+        determined via the relation to `eval_legendre`.
+    x : array_like
+        Points at which to evaluate the shifted Legendre polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    P : scalar or ndarray
+        Values of the shifted Legendre polynomial
+
+    See Also
+    --------
+    roots_sh_legendre : roots and quadrature weights of shifted
+                        Legendre polynomials
+    sh_legendre : shifted Legendre polynomial object
+    eval_legendre : evaluate Legendre polynomials
+    numpy.polynomial.legendre.Legendre : Legendre series
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_genlaguerre",
+    r"""
+    eval_genlaguerre(n, alpha, x, out=None)
+
+    Evaluate generalized Laguerre polynomial at a point.
+
+    The generalized Laguerre polynomials can be defined via the
+    confluent hypergeometric function :math:`{}_1F_1` as
+
+    .. math::
+
+        L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
+          {}_1F_1(-n, \alpha + 1, x).
+
+    When :math:`n` is an integer the result is a polynomial of degree
+    :math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre
+    polynomials are the special case where :math:`\alpha = 0`.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer, the result is
+        determined via the relation to the confluent hypergeometric
+        function.
+    alpha : array_like
+        Parameter; must have ``alpha > -1``
+    x : array_like
+        Points at which to evaluate the generalized Laguerre
+        polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    L : scalar or ndarray
+        Values of the generalized Laguerre polynomial
+
+    See Also
+    --------
+    roots_genlaguerre : roots and quadrature weights of generalized
+                        Laguerre polynomials
+    genlaguerre : generalized Laguerre polynomial object
+    hyp1f1 : confluent hypergeometric function
+    eval_laguerre : evaluate Laguerre polynomials
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_laguerre",
+    r"""
+    eval_laguerre(n, x, out=None)
+
+    Evaluate Laguerre polynomial at a point.
+
+    The Laguerre polynomials can be defined via the confluent
+    hypergeometric function :math:`{}_1F_1` as
+
+    .. math::
+
+        L_n(x) = {}_1F_1(-n, 1, x).
+
+    See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an
+    integer the result is a polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial. If not an integer the result is
+        determined via the relation to the confluent hypergeometric
+        function.
+    x : array_like
+        Points at which to evaluate the Laguerre polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    L : scalar or ndarray
+        Values of the Laguerre polynomial
+
+    See Also
+    --------
+    roots_laguerre : roots and quadrature weights of Laguerre
+                     polynomials
+    laguerre : Laguerre polynomial object
+    numpy.polynomial.laguerre.Laguerre : Laguerre series
+    eval_genlaguerre : evaluate generalized Laguerre polynomials
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+     """)
+
+add_newdoc("eval_hermite",
+    r"""
+    eval_hermite(n, x, out=None)
+
+    Evaluate physicist's Hermite polynomial at a point.
+
+    Defined by
+
+    .. math::
+
+        H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
+
+    :math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in
+    [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial
+    x : array_like
+        Points at which to evaluate the Hermite polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    H : scalar or ndarray
+        Values of the Hermite polynomial
+
+    See Also
+    --------
+    roots_hermite : roots and quadrature weights of physicist's
+                    Hermite polynomials
+    hermite : physicist's Hermite polynomial object
+    numpy.polynomial.hermite.Hermite : Physicist's Hermite series
+    eval_hermitenorm : evaluate Probabilist's Hermite polynomials
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("eval_hermitenorm",
+    r"""
+    eval_hermitenorm(n, x, out=None)
+
+    Evaluate probabilist's (normalized) Hermite polynomial at a
+    point.
+
+    Defined by
+
+    .. math::
+
+        He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
+
+    :math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in
+    [AS]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        Degree of the polynomial
+    x : array_like
+        Points at which to evaluate the Hermite polynomial
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    He : scalar or ndarray
+        Values of the Hermite polynomial
+
+    See Also
+    --------
+    roots_hermitenorm : roots and quadrature weights of probabilist's
+                        Hermite polynomials
+    hermitenorm : probabilist's Hermite polynomial object
+    numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
+    eval_hermite : evaluate physicist's Hermite polynomials
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("exp1",
+    r"""
+    exp1(z, out=None)
+
+    Exponential integral E1.
+
+    For complex :math:`z \ne 0` the exponential integral can be defined as
+    [1]_
+
+    .. math::
+
+       E_1(z) = \int_z^\infty \frac{e^{-t}}{t} dt,
+
+    where the path of the integral does not cross the negative real
+    axis or pass through the origin.
+
+    Parameters
+    ----------
+    z: array_like
+        Real or complex argument.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the exponential integral E1
+
+    See Also
+    --------
+    expi : exponential integral :math:`Ei`
+    expn : generalization of :math:`E_1`
+
+    Notes
+    -----
+    For :math:`x > 0` it is related to the exponential integral
+    :math:`Ei` (see `expi`) via the relation
+
+    .. math::
+
+       E_1(x) = -Ei(-x).
+
+    References
+    ----------
+    .. [1] Digital Library of Mathematical Functions, 6.2.1
+           https://dlmf.nist.gov/6.2#E1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It has a pole at 0.
+
+    >>> sc.exp1(0)
+    inf
+
+    It has a branch cut on the negative real axis.
+
+    >>> sc.exp1(-1)
+    nan
+    >>> sc.exp1(complex(-1, 0))
+    (-1.8951178163559368-3.141592653589793j)
+    >>> sc.exp1(complex(-1, -0.0))
+    (-1.8951178163559368+3.141592653589793j)
+
+    It approaches 0 along the positive real axis.
+
+    >>> sc.exp1([1, 10, 100, 1000])
+    array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00])
+
+    It is related to `expi`.
+
+    >>> x = np.array([1, 2, 3, 4])
+    >>> sc.exp1(x)
+    array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
+    >>> -sc.expi(-x)
+    array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
+
+    """)
+
+add_newdoc("exp10",
+    """
+    exp10(x, out=None)
+
+    Compute ``10**x`` element-wise.
+
+    Parameters
+    ----------
+    x : array_like
+        `x` must contain real numbers.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        ``10**x``, computed element-wise.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import exp10
+
+    >>> exp10(3)
+    1000.0
+    >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
+    >>> exp10(x)
+    array([[  0.1       ,   0.31622777,   1.        ],
+           [  3.16227766,  10.        ,  31.6227766 ]])
+
+    """)
+
+add_newdoc("exp2",
+    """
+    exp2(x, out=None)
+
+    Compute ``2**x`` element-wise.
+
+    Parameters
+    ----------
+    x : array_like
+        `x` must contain real numbers.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        ``2**x``, computed element-wise.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import exp2
+
+    >>> exp2(3)
+    8.0
+    >>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
+    >>> exp2(x)
+    array([[ 0.5       ,  0.70710678,  1.        ],
+           [ 1.41421356,  2.        ,  2.82842712]])
+    """)
+
+add_newdoc("expi",
+    r"""
+    expi(x, out=None)
+
+    Exponential integral Ei.
+
+    For real :math:`x`, the exponential integral is defined as [1]_
+
+    .. math::
+
+        Ei(x) = \int_{-\infty}^x \frac{e^t}{t} dt.
+
+    For :math:`x > 0` the integral is understood as a Cauchy principal
+    value.
+
+    It is extended to the complex plane by analytic continuation of
+    the function on the interval :math:`(0, \infty)`. The complex
+    variant has a branch cut on the negative real axis.
+
+    Parameters
+    ----------
+    x : array_like
+        Real or complex valued argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the exponential integral
+
+    Notes
+    -----
+    The exponential integrals :math:`E_1` and :math:`Ei` satisfy the
+    relation
+
+    .. math::
+
+        E_1(x) = -Ei(-x)
+
+    for :math:`x > 0`.
+
+    See Also
+    --------
+    exp1 : Exponential integral :math:`E_1`
+    expn : Generalized exponential integral :math:`E_n`
+
+    References
+    ----------
+    .. [1] Digital Library of Mathematical Functions, 6.2.5
+           https://dlmf.nist.gov/6.2#E5
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is related to `exp1`.
+
+    >>> x = np.array([1, 2, 3, 4])
+    >>> -sc.expi(-x)
+    array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
+    >>> sc.exp1(x)
+    array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
+
+    The complex variant has a branch cut on the negative real axis.
+
+    >>> sc.expi(-1 + 1e-12j)
+    (-0.21938393439552062+3.1415926535894254j)
+    >>> sc.expi(-1 - 1e-12j)
+    (-0.21938393439552062-3.1415926535894254j)
+
+    As the complex variant approaches the branch cut, the real parts
+    approach the value of the real variant.
+
+    >>> sc.expi(-1)
+    -0.21938393439552062
+
+    The SciPy implementation returns the real variant for complex
+    values on the branch cut.
+
+    >>> sc.expi(complex(-1, 0.0))
+    (-0.21938393439552062-0j)
+    >>> sc.expi(complex(-1, -0.0))
+    (-0.21938393439552062-0j)
+
+    """)
+
+add_newdoc('expit',
+    """
+    expit(x, out=None)
+
+    Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.
+
+    The expit function, also known as the logistic sigmoid function, is
+    defined as ``expit(x) = 1/(1+exp(-x))``.  It is the inverse of the
+    logit function.
+
+    Parameters
+    ----------
+    x : ndarray
+        The ndarray to apply expit to element-wise.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        An ndarray of the same shape as x. Its entries
+        are `expit` of the corresponding entry of x.
+
+    See Also
+    --------
+    logit
+
+    Notes
+    -----
+    As a ufunc expit takes a number of optional
+    keyword arguments. For more information
+    see `ufuncs `_
+
+    .. versionadded:: 0.10.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import expit, logit
+
+    >>> expit([-np.inf, -1.5, 0, 1.5, np.inf])
+    array([ 0.        ,  0.18242552,  0.5       ,  0.81757448,  1.        ])
+
+    `logit` is the inverse of `expit`:
+
+    >>> logit(expit([-2.5, 0, 3.1, 5.0]))
+    array([-2.5,  0. ,  3.1,  5. ])
+
+    Plot expit(x) for x in [-6, 6]:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-6, 6, 121)
+    >>> y = expit(x)
+    >>> plt.plot(x, y)
+    >>> plt.grid()
+    >>> plt.xlim(-6, 6)
+    >>> plt.xlabel('x')
+    >>> plt.title('expit(x)')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("expm1",
+    """
+    expm1(x, out=None)
+
+    Compute ``exp(x) - 1``.
+
+    When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
+    of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
+    ``expm1(x)`` is implemented to avoid the loss of precision that occurs when
+    `x` is near zero.
+
+    Parameters
+    ----------
+    x : array_like
+        `x` must contain real numbers.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        ``exp(x) - 1`` computed element-wise.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import expm1
+
+    >>> expm1(1.0)
+    1.7182818284590451
+    >>> expm1([-0.2, -0.1, 0, 0.1, 0.2])
+    array([-0.18126925, -0.09516258,  0.        ,  0.10517092,  0.22140276])
+
+    The exact value of ``exp(7.5e-13) - 1`` is::
+
+        7.5000000000028125000000007031250000001318...*10**-13.
+
+    Here is what ``expm1(7.5e-13)`` gives:
+
+    >>> expm1(7.5e-13)
+    7.5000000000028135e-13
+
+    Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in
+    a "catastrophic" loss of precision:
+
+    >>> np.exp(7.5e-13) - 1
+    7.5006667543675576e-13
+
+    """)
+
+add_newdoc("expn",
+    r"""
+    expn(n, x, out=None)
+
+    Generalized exponential integral En.
+
+    For integer :math:`n \geq 0` and real :math:`x \geq 0` the
+    generalized exponential integral is defined as [dlmf]_
+
+    .. math::
+
+        E_n(x) = x^{n - 1} \int_x^\infty \frac{e^{-t}}{t^n} dt.
+
+    Parameters
+    ----------
+    n : array_like
+        Non-negative integers
+    x : array_like
+        Real argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the generalized exponential integral
+
+    See Also
+    --------
+    exp1 : special case of :math:`E_n` for :math:`n = 1`
+    expi : related to :math:`E_n` when :math:`n = 1`
+
+    References
+    ----------
+    .. [dlmf] Digital Library of Mathematical Functions, 8.19.2
+              https://dlmf.nist.gov/8.19#E2
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    Its domain is nonnegative n and x.
+
+    >>> sc.expn(-1, 1.0), sc.expn(1, -1.0)
+    (nan, nan)
+
+    It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it
+    is equal to ``1 / (n - 1)``.
+
+    >>> sc.expn([0, 1, 2, 3, 4], 0)
+    array([       inf,        inf, 1.        , 0.5       , 0.33333333])
+
+    For n equal to 0 it reduces to ``exp(-x) / x``.
+
+    >>> x = np.array([1, 2, 3, 4])
+    >>> sc.expn(0, x)
+    array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
+    >>> np.exp(-x) / x
+    array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
+
+    For n equal to 1 it reduces to `exp1`.
+
+    >>> sc.expn(1, x)
+    array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
+    >>> sc.exp1(x)
+    array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
+
+    """)
+
+add_newdoc("exprel",
+    r"""
+    exprel(x, out=None)
+
+    Relative error exponential, ``(exp(x) - 1)/x``.
+
+    When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
+    of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
+    ``exprel(x)`` is implemented to avoid the loss of precision that occurs when
+    `x` is near zero.
+
+    Parameters
+    ----------
+    x : ndarray
+        Input array.  `x` must contain real numbers.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        ``(exp(x) - 1)/x``, computed element-wise.
+
+    See Also
+    --------
+    expm1
+
+    Notes
+    -----
+    .. versionadded:: 0.17.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import exprel
+
+    >>> exprel(0.01)
+    1.0050167084168056
+    >>> exprel([-0.25, -0.1, 0, 0.1, 0.25])
+    array([ 0.88479687,  0.95162582,  1.        ,  1.05170918,  1.13610167])
+
+    Compare ``exprel(5e-9)`` to the naive calculation.  The exact value
+    is ``1.00000000250000000416...``.
+
+    >>> exprel(5e-9)
+    1.0000000025
+
+    >>> (np.exp(5e-9) - 1)/5e-9
+    0.99999999392252903
+    """)
+
+add_newdoc("fdtr",
+    r"""
+    fdtr(dfn, dfd, x, out=None)
+
+    F cumulative distribution function.
+
+    Returns the value of the cumulative distribution function of the
+    F-distribution, also known as Snedecor's F-distribution or the
+    Fisher-Snedecor distribution.
+
+    The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
+    distribution of the random variable,
+
+    .. math::
+        X = \frac{U_n/d_n}{U_d/d_d},
+
+    where :math:`U_n` and :math:`U_d` are random variables distributed
+    :math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
+    respectively.
+
+    Parameters
+    ----------
+    dfn : array_like
+        First parameter (positive float).
+    dfd : array_like
+        Second parameter (positive float).
+    x : array_like
+        Argument (nonnegative float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y : scalar or ndarray
+        The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
+
+    See Also
+    --------
+    fdtrc : F distribution survival function
+    fdtri : F distribution inverse cumulative distribution
+    scipy.stats.f : F distribution
+
+    Notes
+    -----
+    The regularized incomplete beta function is used, according to the
+    formula,
+
+    .. math::
+        F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
+
+    Wrapper for the Cephes [1]_ routine `fdtr`. The F distribution is also
+    available as `scipy.stats.f`. Calling `fdtr` directly can improve
+    performance compared to the ``cdf`` method of `scipy.stats.f` (see last
+    example below).
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.
+
+    >>> import numpy as np
+    >>> from scipy.special import fdtr
+    >>> fdtr(1, 2, 1)
+    0.5773502691896258
+
+    Calculate the function at several points by providing a NumPy array for
+    `x`.
+
+    >>> x = np.array([0.5, 2., 3.])
+    >>> fdtr(1, 2, x)
+    array([0.4472136 , 0.70710678, 0.77459667])
+
+    Plot the function for several parameter sets.
+
+    >>> import matplotlib.pyplot as plt
+    >>> dfn_parameters = [1, 5, 10, 50]
+    >>> dfd_parameters = [1, 1, 2, 3]
+    >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
+    >>> parameters_list = list(zip(dfn_parameters, dfd_parameters,
+    ...                            linestyles))
+    >>> x = np.linspace(0, 30, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> for parameter_set in parameters_list:
+    ...     dfn, dfd, style = parameter_set
+    ...     fdtr_vals = fdtr(dfn, dfd, x)
+    ...     ax.plot(x, fdtr_vals, label=rf"$d_n={dfn},\, d_d={dfd}$",
+    ...             ls=style)
+    >>> ax.legend()
+    >>> ax.set_xlabel("$x$")
+    >>> ax.set_title("F distribution cumulative distribution function")
+    >>> plt.show()
+
+    The F distribution is also available as `scipy.stats.f`. Using `fdtr`
+    directly can be much faster than calling the ``cdf`` method of
+    `scipy.stats.f`, especially for small arrays or individual values.
+    To get the same results one must use the following parametrization:
+    ``stats.f(dfn, dfd).cdf(x)=fdtr(dfn, dfd, x)``.
+
+    >>> from scipy.stats import f
+    >>> dfn, dfd = 1, 2
+    >>> x = 1
+    >>> fdtr_res = fdtr(dfn, dfd, x)  # this will often be faster than below
+    >>> f_dist_res = f(dfn, dfd).cdf(x)
+    >>> fdtr_res == f_dist_res  # test that results are equal
+    True
+    """)
+
+add_newdoc("fdtrc",
+    r"""
+    fdtrc(dfn, dfd, x, out=None)
+
+    F survival function.
+
+    Returns the complemented F-distribution function (the integral of the
+    density from `x` to infinity).
+
+    Parameters
+    ----------
+    dfn : array_like
+        First parameter (positive float).
+    dfd : array_like
+        Second parameter (positive float).
+    x : array_like
+        Argument (nonnegative float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    y : scalar or ndarray
+        The complemented F-distribution function with parameters `dfn` and
+        `dfd` at `x`.
+
+    See Also
+    --------
+    fdtr : F distribution cumulative distribution function
+    fdtri : F distribution inverse cumulative distribution function
+    scipy.stats.f : F distribution
+
+    Notes
+    -----
+    The regularized incomplete beta function is used, according to the
+    formula,
+
+    .. math::
+        F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
+
+    Wrapper for the Cephes [1]_ routine `fdtrc`. The F distribution is also
+    available as `scipy.stats.f`. Calling `fdtrc` directly can improve
+    performance compared to the ``sf`` method of `scipy.stats.f` (see last
+    example below).
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.
+
+    >>> import numpy as np
+    >>> from scipy.special import fdtrc
+    >>> fdtrc(1, 2, 1)
+    0.42264973081037427
+
+    Calculate the function at several points by providing a NumPy array for
+    `x`.
+
+    >>> x = np.array([0.5, 2., 3.])
+    >>> fdtrc(1, 2, x)
+    array([0.5527864 , 0.29289322, 0.22540333])
+
+    Plot the function for several parameter sets.
+
+    >>> import matplotlib.pyplot as plt
+    >>> dfn_parameters = [1, 5, 10, 50]
+    >>> dfd_parameters = [1, 1, 2, 3]
+    >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
+    >>> parameters_list = list(zip(dfn_parameters, dfd_parameters,
+    ...                            linestyles))
+    >>> x = np.linspace(0, 30, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> for parameter_set in parameters_list:
+    ...     dfn, dfd, style = parameter_set
+    ...     fdtrc_vals = fdtrc(dfn, dfd, x)
+    ...     ax.plot(x, fdtrc_vals, label=rf"$d_n={dfn},\, d_d={dfd}$",
+    ...             ls=style)
+    >>> ax.legend()
+    >>> ax.set_xlabel("$x$")
+    >>> ax.set_title("F distribution survival function")
+    >>> plt.show()
+
+    The F distribution is also available as `scipy.stats.f`. Using `fdtrc`
+    directly can be much faster than calling the ``sf`` method of
+    `scipy.stats.f`, especially for small arrays or individual values.
+    To get the same results one must use the following parametrization:
+    ``stats.f(dfn, dfd).sf(x)=fdtrc(dfn, dfd, x)``.
+
+    >>> from scipy.stats import f
+    >>> dfn, dfd = 1, 2
+    >>> x = 1
+    >>> fdtrc_res = fdtrc(dfn, dfd, x)  # this will often be faster than below
+    >>> f_dist_res = f(dfn, dfd).sf(x)
+    >>> f_dist_res == fdtrc_res  # test that results are equal
+    True
+    """)
+
+add_newdoc("fdtri",
+    r"""
+    fdtri(dfn, dfd, p, out=None)
+
+    The `p`-th quantile of the F-distribution.
+
+    This function is the inverse of the F-distribution CDF, `fdtr`, returning
+    the `x` such that `fdtr(dfn, dfd, x) = p`.
+
+    Parameters
+    ----------
+    dfn : array_like
+        First parameter (positive float).
+    dfd : array_like
+        Second parameter (positive float).
+    p : array_like
+        Cumulative probability, in [0, 1].
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    x : scalar or ndarray
+        The quantile corresponding to `p`.
+
+    See Also
+    --------
+    fdtr : F distribution cumulative distribution function
+    fdtrc : F distribution survival function
+    scipy.stats.f : F distribution
+
+    Notes
+    -----
+    The computation is carried out using the relation to the inverse
+    regularized beta function, :math:`I^{-1}_x(a, b)`.  Let
+    :math:`z = I^{-1}_p(d_d/2, d_n/2).`  Then,
+
+    .. math::
+        x = \frac{d_d (1 - z)}{d_n z}.
+
+    If `p` is such that :math:`x < 0.5`, the following relation is used
+    instead for improved stability: let
+    :math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
+
+    .. math::
+        x = \frac{d_d z'}{d_n (1 - z')}.
+
+    Wrapper for the Cephes [1]_ routine `fdtri`.
+
+    The F distribution is also available as `scipy.stats.f`. Calling
+    `fdtri` directly can improve performance compared to the ``ppf``
+    method of `scipy.stats.f` (see last example below).
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    `fdtri` represents the inverse of the F distribution CDF which is
+    available as `fdtr`. Here, we calculate the CDF for ``df1=1``, ``df2=2``
+    at ``x=3``. `fdtri` then returns ``3`` given the same values for `df1`,
+    `df2` and the computed CDF value.
+
+    >>> import numpy as np
+    >>> from scipy.special import fdtri, fdtr
+    >>> df1, df2 = 1, 2
+    >>> x = 3
+    >>> cdf_value =  fdtr(df1, df2, x)
+    >>> fdtri(df1, df2, cdf_value)
+    3.000000000000006
+
+    Calculate the function at several points by providing a NumPy array for
+    `x`.
+
+    >>> x = np.array([0.1, 0.4, 0.7])
+    >>> fdtri(1, 2, x)
+    array([0.02020202, 0.38095238, 1.92156863])
+
+    Plot the function for several parameter sets.
+
+    >>> import matplotlib.pyplot as plt
+    >>> dfn_parameters = [50, 10, 1, 50]
+    >>> dfd_parameters = [0.5, 1, 1, 5]
+    >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
+    >>> parameters_list = list(zip(dfn_parameters, dfd_parameters,
+    ...                            linestyles))
+    >>> x = np.linspace(0, 1, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> for parameter_set in parameters_list:
+    ...     dfn, dfd, style = parameter_set
+    ...     fdtri_vals = fdtri(dfn, dfd, x)
+    ...     ax.plot(x, fdtri_vals, label=rf"$d_n={dfn},\, d_d={dfd}$",
+    ...             ls=style)
+    >>> ax.legend()
+    >>> ax.set_xlabel("$x$")
+    >>> title = "F distribution inverse cumulative distribution function"
+    >>> ax.set_title(title)
+    >>> ax.set_ylim(0, 30)
+    >>> plt.show()
+
+    The F distribution is also available as `scipy.stats.f`. Using `fdtri`
+    directly can be much faster than calling the ``ppf`` method of
+    `scipy.stats.f`, especially for small arrays or individual values.
+    To get the same results one must use the following parametrization:
+    ``stats.f(dfn, dfd).ppf(x)=fdtri(dfn, dfd, x)``.
+
+    >>> from scipy.stats import f
+    >>> dfn, dfd = 1, 2
+    >>> x = 0.7
+    >>> fdtri_res = fdtri(dfn, dfd, x)  # this will often be faster than below
+    >>> f_dist_res = f(dfn, dfd).ppf(x)
+    >>> f_dist_res == fdtri_res  # test that results are equal
+    True
+    """)
+
+add_newdoc("fdtridfd",
+    """
+    fdtridfd(dfn, p, x, out=None)
+
+    Inverse to `fdtr` vs dfd
+
+    Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
+
+    Parameters
+    ----------
+    dfn : array_like
+        First parameter (positive float).
+    p : array_like
+        Cumulative probability, in [0, 1].
+    x : array_like
+        Argument (nonnegative float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    dfd : scalar or ndarray
+        `dfd` such that ``fdtr(dfn, dfd, x) == p``.
+
+    See Also
+    --------
+    fdtr, fdtrc, fdtri
+
+    """)
+
+'''
+commented out as fdtridfn seems to have bugs and is not in functions.json
+see: https://github.com/scipy/scipy/pull/15622#discussion_r811440983
+
+add_newdoc(
+    "fdtridfn",
+    """
+    fdtridfn(p, dfd, x, out=None)
+
+    Inverse to `fdtr` vs dfn
+
+    finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
+
+
+    Parameters
+    ----------
+    p : array_like
+        Cumulative probability, in [0, 1].
+    dfd : array_like
+        Second parameter (positive float).
+    x : array_like
+        Argument (nonnegative float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    dfn : scalar or ndarray
+        `dfn` such that ``fdtr(dfn, dfd, x) == p``.
+
+    See Also
+    --------
+    fdtr, fdtrc, fdtri, fdtridfd
+
+
+    """)
+'''
+
+add_newdoc("fresnel",
+    r"""
+    fresnel(z, out=None)
+
+    Fresnel integrals.
+
+    The Fresnel integrals are defined as
+
+    .. math::
+
+       S(z) &= \int_0^z \sin(\pi t^2 /2) dt \\
+       C(z) &= \int_0^z \cos(\pi t^2 /2) dt.
+
+    See [dlmf]_ for details.
+
+    Parameters
+    ----------
+    z : array_like
+        Real or complex valued argument
+    out : 2-tuple of ndarrays, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    S, C : 2-tuple of scalar or ndarray
+        Values of the Fresnel integrals
+
+    See Also
+    --------
+    fresnel_zeros : zeros of the Fresnel integrals
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/7.2#iii
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    As z goes to infinity along the real axis, S and C converge to 0.5.
+
+    >>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])
+    >>> S
+    array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5       ])
+    >>> C
+    array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5       ])
+
+    They are related to the error function `erf`.
+
+    >>> z = np.array([1, 2, 3, 4])
+    >>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z
+    >>> S, C = sc.fresnel(z)
+    >>> C + 1j*S
+    array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
+           0.60572079+0.496313j  , 0.49842603+0.42051575j])
+    >>> 0.5 * (1 + 1j) * sc.erf(zeta)
+    array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
+           0.60572079+0.496313j  , 0.49842603+0.42051575j])
+
+    """)
+
+add_newdoc("gamma",
+    r"""
+    gamma(z, out=None)
+
+    gamma function.
+
+    The gamma function is defined as
+
+    .. math::
+
+       \Gamma(z) = \int_0^\infty t^{z-1} e^{-t} dt
+
+    for :math:`\Re(z) > 0` and is extended to the rest of the complex
+    plane by analytic continuation. See [dlmf]_ for more details.
+
+    Parameters
+    ----------
+    z : array_like
+        Real or complex valued argument
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the gamma function
+
+    Notes
+    -----
+    The gamma function is often referred to as the generalized
+    factorial since :math:`\Gamma(n + 1) = n!` for natural numbers
+    :math:`n`. More generally it satisfies the recurrence relation
+    :math:`\Gamma(z + 1) = z \cdot \Gamma(z)` for complex :math:`z`,
+    which, combined with the fact that :math:`\Gamma(1) = 1`, implies
+    the above identity for :math:`z = n`.
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/5.2#E1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import gamma, factorial
+
+    >>> gamma([0, 0.5, 1, 5])
+    array([         inf,   1.77245385,   1.        ,  24.        ])
+
+    >>> z = 2.5 + 1j
+    >>> gamma(z)
+    (0.77476210455108352+0.70763120437959293j)
+    >>> gamma(z+1), z*gamma(z)  # Recurrence property
+    ((1.2292740569981171+2.5438401155000685j),
+     (1.2292740569981158+2.5438401155000658j))
+
+    >>> gamma(0.5)**2  # gamma(0.5) = sqrt(pi)
+    3.1415926535897927
+
+    Plot gamma(x) for real x
+
+    >>> x = np.linspace(-3.5, 5.5, 2251)
+    >>> y = gamma(x)
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')
+    >>> k = np.arange(1, 7)
+    >>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,
+    ...          label='(x-1)!, x = 1, 2, ...')
+    >>> plt.xlim(-3.5, 5.5)
+    >>> plt.ylim(-10, 25)
+    >>> plt.grid()
+    >>> plt.xlabel('x')
+    >>> plt.legend(loc='lower right')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("gammainc",
+    r"""
+    gammainc(a, x, out=None)
+
+    Regularized lower incomplete gamma function.
+
+    It is defined as
+
+    .. math::
+
+        P(a, x) = \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
+
+    for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
+
+    Parameters
+    ----------
+    a : array_like
+        Positive parameter
+    x : array_like
+        Nonnegative argument
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the lower incomplete gamma function
+
+    Notes
+    -----
+    The function satisfies the relation ``gammainc(a, x) +
+    gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper
+    incomplete gamma function.
+
+    The implementation largely follows that of [boost]_.
+
+    See also
+    --------
+    gammaincc : regularized upper incomplete gamma function
+    gammaincinv : inverse of the regularized lower incomplete gamma function
+    gammainccinv : inverse of the regularized upper incomplete gamma function
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical functions
+              https://dlmf.nist.gov/8.2#E4
+    .. [boost] Maddock et. al., "Incomplete Gamma Functions",
+       https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It is the CDF of the gamma distribution, so it starts at 0 and
+    monotonically increases to 1.
+
+    >>> sc.gammainc(0.5, [0, 1, 10, 100])
+    array([0.        , 0.84270079, 0.99999226, 1.        ])
+
+    It is equal to one minus the upper incomplete gamma function.
+
+    >>> a, x = 0.5, 0.4
+    >>> sc.gammainc(a, x)
+    0.6289066304773024
+    >>> 1 - sc.gammaincc(a, x)
+    0.6289066304773024
+
+    """)
+
+add_newdoc("gammaincc",
+    r"""
+    gammaincc(a, x, out=None)
+
+    Regularized upper incomplete gamma function.
+
+    It is defined as
+
+    .. math::
+
+        Q(a, x) = \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
+
+    for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
+
+    Parameters
+    ----------
+    a : array_like
+        Positive parameter
+    x : array_like
+        Nonnegative argument
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the upper incomplete gamma function
+
+    Notes
+    -----
+    The function satisfies the relation ``gammainc(a, x) +
+    gammaincc(a, x) = 1`` where `gammainc` is the regularized lower
+    incomplete gamma function.
+
+    The implementation largely follows that of [boost]_.
+
+    See also
+    --------
+    gammainc : regularized lower incomplete gamma function
+    gammaincinv : inverse of the regularized lower incomplete gamma function
+    gammainccinv : inverse of the regularized upper incomplete gamma function
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical functions
+              https://dlmf.nist.gov/8.2#E4
+    .. [boost] Maddock et. al., "Incomplete Gamma Functions",
+       https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It is the survival function of the gamma distribution, so it
+    starts at 1 and monotonically decreases to 0.
+
+    >>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])
+    array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,
+           0.00000000e+00])
+
+    It is equal to one minus the lower incomplete gamma function.
+
+    >>> a, x = 0.5, 0.4
+    >>> sc.gammaincc(a, x)
+    0.37109336952269756
+    >>> 1 - sc.gammainc(a, x)
+    0.37109336952269756
+
+    """)
+
+add_newdoc("gammainccinv",
+    """
+    gammainccinv(a, y, out=None)
+
+    Inverse of the regularized upper incomplete gamma function.
+
+    Given an input :math:`y` between 0 and 1, returns :math:`x` such
+    that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper
+    incomplete gamma function; see `gammaincc`. This is well-defined
+    because the upper incomplete gamma function is monotonic as can
+    be seen from its definition in [dlmf]_.
+
+    Parameters
+    ----------
+    a : array_like
+        Positive parameter
+    y : array_like
+        Argument between 0 and 1, inclusive
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the inverse of the upper incomplete gamma function
+
+    See Also
+    --------
+    gammaincc : regularized upper incomplete gamma function
+    gammainc : regularized lower incomplete gamma function
+    gammaincinv : inverse of the regularized lower incomplete gamma function
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/8.2#E4
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It starts at infinity and monotonically decreases to 0.
+
+    >>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])
+    array([       inf, 1.35277173, 0.22746821, 0.        ])
+
+    It inverts the upper incomplete gamma function.
+
+    >>> a, x = 0.5, [0, 0.1, 0.5, 1]
+    >>> sc.gammaincc(a, sc.gammainccinv(a, x))
+    array([0. , 0.1, 0.5, 1. ])
+
+    >>> a, x = 0.5, [0, 10, 50]
+    >>> sc.gammainccinv(a, sc.gammaincc(a, x))
+    array([ 0., 10., 50.])
+
+    """)
+
+add_newdoc("gammaincinv",
+    """
+    gammaincinv(a, y, out=None)
+
+    Inverse to the regularized lower incomplete gamma function.
+
+    Given an input :math:`y` between 0 and 1, returns :math:`x` such
+    that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower
+    incomplete gamma function; see `gammainc`. This is well-defined
+    because the lower incomplete gamma function is monotonic as can be
+    seen from its definition in [dlmf]_.
+
+    Parameters
+    ----------
+    a : array_like
+        Positive parameter
+    y : array_like
+        Parameter between 0 and 1, inclusive
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the inverse of the lower incomplete gamma function
+
+    See Also
+    --------
+    gammainc : regularized lower incomplete gamma function
+    gammaincc : regularized upper incomplete gamma function
+    gammainccinv : inverse of the regularized upper incomplete gamma function
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/8.2#E4
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It starts at 0 and monotonically increases to infinity.
+
+    >>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])
+    array([0.        , 0.00789539, 0.22746821,        inf])
+
+    It inverts the lower incomplete gamma function.
+
+    >>> a, x = 0.5, [0, 0.1, 0.5, 1]
+    >>> sc.gammainc(a, sc.gammaincinv(a, x))
+    array([0. , 0.1, 0.5, 1. ])
+
+    >>> a, x = 0.5, [0, 10, 25]
+    >>> sc.gammaincinv(a, sc.gammainc(a, x))
+    array([ 0.        , 10.        , 25.00001465])
+
+    """)
+
+add_newdoc("gammaln",
+    r"""
+    gammaln(x, out=None)
+
+    Logarithm of the absolute value of the gamma function.
+
+    Defined as
+
+    .. math::
+
+       \ln(\lvert\Gamma(x)\rvert)
+
+    where :math:`\Gamma` is the gamma function. For more details on
+    the gamma function, see [dlmf]_.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the log of the absolute value of gamma
+
+    See Also
+    --------
+    gammasgn : sign of the gamma function
+    loggamma : principal branch of the logarithm of the gamma function
+
+    Notes
+    -----
+    It is the same function as the Python standard library function
+    :func:`math.lgamma`.
+
+    When used in conjunction with `gammasgn`, this function is useful
+    for working in logspace on the real axis without having to deal
+    with complex numbers via the relation ``exp(gammaln(x)) =
+    gammasgn(x) * gamma(x)``.
+
+    For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/5
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It has two positive zeros.
+
+    >>> sc.gammaln([1, 2])
+    array([0., 0.])
+
+    It has poles at nonpositive integers.
+
+    >>> sc.gammaln([0, -1, -2, -3, -4])
+    array([inf, inf, inf, inf, inf])
+
+    It asymptotically approaches ``x * log(x)`` (Stirling's formula).
+
+    >>> x = np.array([1e10, 1e20, 1e40, 1e80])
+    >>> sc.gammaln(x)
+    array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82])
+    >>> x * np.log(x)
+    array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82])
+
+    """)
+
+add_newdoc("gammasgn",
+    r"""
+    gammasgn(x, out=None)
+
+    Sign of the gamma function.
+
+    It is defined as
+
+    .. math::
+
+       \text{gammasgn}(x) =
+       \begin{cases}
+         +1 & \Gamma(x) > 0 \\
+         -1 & \Gamma(x) < 0
+       \end{cases}
+
+    where :math:`\Gamma` is the gamma function; see `gamma`. This
+    definition is complete since the gamma function is never zero;
+    see the discussion after [dlmf]_.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Sign of the gamma function
+
+    Notes
+    -----
+    The gamma function can be computed as ``gammasgn(x) *
+    np.exp(gammaln(x))``.
+
+    See Also
+    --------
+    gamma : the gamma function
+    gammaln : log of the absolute value of the gamma function
+    loggamma : analytic continuation of the log of the gamma function
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/5.2#E1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is 1 for `x > 0`.
+
+    >>> sc.gammasgn([1, 2, 3, 4])
+    array([1., 1., 1., 1.])
+
+    It alternates between -1 and 1 for negative integers.
+
+    >>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])
+    array([-1.,  1., -1.,  1.])
+
+    It can be used to compute the gamma function.
+
+    >>> x = [1.5, 0.5, -0.5, -1.5]
+    >>> sc.gammasgn(x) * np.exp(sc.gammaln(x))
+    array([ 0.88622693,  1.77245385, -3.5449077 ,  2.3632718 ])
+    >>> sc.gamma(x)
+    array([ 0.88622693,  1.77245385, -3.5449077 ,  2.3632718 ])
+
+    """)
+
+add_newdoc("gdtr",
+    r"""
+    gdtr(a, b, x, out=None)
+
+    Gamma distribution cumulative distribution function.
+
+    Returns the integral from zero to `x` of the gamma probability density
+    function,
+
+    .. math::
+
+        F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
+
+    where :math:`\Gamma` is the gamma function.
+
+    Parameters
+    ----------
+    a : array_like
+        The rate parameter of the gamma distribution, sometimes denoted
+        :math:`\beta` (float).  It is also the reciprocal of the scale
+        parameter :math:`\theta`.
+    b : array_like
+        The shape parameter of the gamma distribution, sometimes denoted
+        :math:`\alpha` (float).
+    x : array_like
+        The quantile (upper limit of integration; float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    See also
+    --------
+    gdtrc : 1 - CDF of the gamma distribution.
+    scipy.stats.gamma: Gamma distribution
+
+    Returns
+    -------
+    F : scalar or ndarray
+        The CDF of the gamma distribution with parameters `a` and `b`
+        evaluated at `x`.
+
+    Notes
+    -----
+    The evaluation is carried out using the relation to the incomplete gamma
+    integral (regularized gamma function).
+
+    Wrapper for the Cephes [1]_ routine `gdtr`. Calling `gdtr` directly can
+    improve performance compared to the ``cdf`` method of `scipy.stats.gamma`
+    (see last example below).
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Compute the function for ``a=1``, ``b=2`` at ``x=5``.
+
+    >>> import numpy as np
+    >>> from scipy.special import gdtr
+    >>> import matplotlib.pyplot as plt
+    >>> gdtr(1., 2., 5.)
+    0.9595723180054873
+
+    Compute the function for ``a=1`` and ``b=2`` at several points by
+    providing a NumPy array for `x`.
+
+    >>> xvalues = np.array([1., 2., 3., 4])
+    >>> gdtr(1., 1., xvalues)
+    array([0.63212056, 0.86466472, 0.95021293, 0.98168436])
+
+    `gdtr` can evaluate different parameter sets by providing arrays with
+    broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the
+    function for three different `a` at four positions `x` and ``b=3``,
+    resulting in a 3x4 array.
+
+    >>> a = np.array([[0.5], [1.5], [2.5]])
+    >>> x = np.array([1., 2., 3., 4])
+    >>> a.shape, x.shape
+    ((3, 1), (4,))
+
+    >>> gdtr(a, 3., x)
+    array([[0.01438768, 0.0803014 , 0.19115317, 0.32332358],
+           [0.19115317, 0.57680992, 0.82642193, 0.9380312 ],
+           [0.45618688, 0.87534798, 0.97974328, 0.9972306 ]])
+
+    Plot the function for four different parameter sets.
+
+    >>> a_parameters = [0.3, 1, 2, 6]
+    >>> b_parameters = [2, 10, 15, 20]
+    >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
+    >>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))
+    >>> x = np.linspace(0, 30, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> for parameter_set in parameters_list:
+    ...     a, b, style = parameter_set
+    ...     gdtr_vals = gdtr(a, b, x)
+    ...     ax.plot(x, gdtr_vals, label=f"$a= {a},\, b={b}$", ls=style)
+    >>> ax.legend()
+    >>> ax.set_xlabel("$x$")
+    >>> ax.set_title("Gamma distribution cumulative distribution function")
+    >>> plt.show()
+
+    The gamma distribution is also available as `scipy.stats.gamma`. Using
+    `gdtr` directly can be much faster than calling the ``cdf`` method of
+    `scipy.stats.gamma`, especially for small arrays or individual values.
+    To get the same results one must use the following parametrization:
+    ``stats.gamma(b, scale=1/a).cdf(x)=gdtr(a, b, x)``.
+
+    >>> from scipy.stats import gamma
+    >>> a = 2.
+    >>> b = 3
+    >>> x = 1.
+    >>> gdtr_result = gdtr(a, b, x)  # this will often be faster than below
+    >>> gamma_dist_result = gamma(b, scale=1/a).cdf(x)
+    >>> gdtr_result == gamma_dist_result  # test that results are equal
+    True
+    """)
+
+add_newdoc("gdtrc",
+    r"""
+    gdtrc(a, b, x, out=None)
+
+    Gamma distribution survival function.
+
+    Integral from `x` to infinity of the gamma probability density function,
+
+    .. math::
+
+        F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
+
+    where :math:`\Gamma` is the gamma function.
+
+    Parameters
+    ----------
+    a : array_like
+        The rate parameter of the gamma distribution, sometimes denoted
+        :math:`\beta` (float). It is also the reciprocal of the scale
+        parameter :math:`\theta`.
+    b : array_like
+        The shape parameter of the gamma distribution, sometimes denoted
+        :math:`\alpha` (float).
+    x : array_like
+        The quantile (lower limit of integration; float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    F : scalar or ndarray
+        The survival function of the gamma distribution with parameters `a`
+        and `b` evaluated at `x`.
+
+    See Also
+    --------
+    gdtr: Gamma distribution cumulative distribution function
+    scipy.stats.gamma: Gamma distribution
+    gdtrix
+
+    Notes
+    -----
+    The evaluation is carried out using the relation to the incomplete gamma
+    integral (regularized gamma function).
+
+    Wrapper for the Cephes [1]_ routine `gdtrc`. Calling `gdtrc` directly can
+    improve performance compared to the ``sf`` method of `scipy.stats.gamma`
+    (see last example below).
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Compute the function for ``a=1`` and ``b=2`` at ``x=5``.
+
+    >>> import numpy as np
+    >>> from scipy.special import gdtrc
+    >>> import matplotlib.pyplot as plt
+    >>> gdtrc(1., 2., 5.)
+    0.04042768199451279
+
+    Compute the function for ``a=1``, ``b=2`` at several points by providing
+    a NumPy array for `x`.
+
+    >>> xvalues = np.array([1., 2., 3., 4])
+    >>> gdtrc(1., 1., xvalues)
+    array([0.36787944, 0.13533528, 0.04978707, 0.01831564])
+
+    `gdtrc` can evaluate different parameter sets by providing arrays with
+    broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the
+    function for three different `a` at four positions `x` and ``b=3``,
+    resulting in a 3x4 array.
+
+    >>> a = np.array([[0.5], [1.5], [2.5]])
+    >>> x = np.array([1., 2., 3., 4])
+    >>> a.shape, x.shape
+    ((3, 1), (4,))
+
+    >>> gdtrc(a, 3., x)
+    array([[0.98561232, 0.9196986 , 0.80884683, 0.67667642],
+           [0.80884683, 0.42319008, 0.17357807, 0.0619688 ],
+           [0.54381312, 0.12465202, 0.02025672, 0.0027694 ]])
+
+    Plot the function for four different parameter sets.
+
+    >>> a_parameters = [0.3, 1, 2, 6]
+    >>> b_parameters = [2, 10, 15, 20]
+    >>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
+    >>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))
+    >>> x = np.linspace(0, 30, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> for parameter_set in parameters_list:
+    ...     a, b, style = parameter_set
+    ...     gdtrc_vals = gdtrc(a, b, x)
+    ...     ax.plot(x, gdtrc_vals, label=f"$a= {a},\, b={b}$", ls=style)
+    >>> ax.legend()
+    >>> ax.set_xlabel("$x$")
+    >>> ax.set_title("Gamma distribution survival function")
+    >>> plt.show()
+
+    The gamma distribution is also available as `scipy.stats.gamma`.
+    Using `gdtrc` directly can be much faster than calling the ``sf`` method
+    of `scipy.stats.gamma`, especially for small arrays or individual
+    values. To get the same results one must use the following parametrization:
+    ``stats.gamma(b, scale=1/a).sf(x)=gdtrc(a, b, x)``.
+
+    >>> from scipy.stats import gamma
+    >>> a = 2
+    >>> b = 3
+    >>> x = 1.
+    >>> gdtrc_result = gdtrc(a, b, x)  # this will often be faster than below
+    >>> gamma_dist_result = gamma(b, scale=1/a).sf(x)
+    >>> gdtrc_result == gamma_dist_result  # test that results are equal
+    True
+    """)
+
+add_newdoc("gdtria",
+    """
+    gdtria(p, b, x, out=None)
+
+    Inverse of `gdtr` vs a.
+
+    Returns the inverse with respect to the parameter `a` of ``p =
+    gdtr(a, b, x)``, the cumulative distribution function of the gamma
+    distribution.
+
+    Parameters
+    ----------
+    p : array_like
+        Probability values.
+    b : array_like
+        `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
+        of the gamma distribution.
+    x : array_like
+        Nonnegative real values, from the domain of the gamma distribution.
+    out : ndarray, optional
+        If a fourth argument is given, it must be a numpy.ndarray whose size
+        matches the broadcast result of `a`, `b` and `x`.  `out` is then the
+        array returned by the function.
+
+    Returns
+    -------
+    a : scalar or ndarray
+        Values of the `a` parameter such that `p = gdtr(a, b, x)`.  `1/a`
+        is the "scale" parameter of the gamma distribution.
+
+    See Also
+    --------
+    gdtr : CDF of the gamma distribution.
+    gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
+    gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
+
+    The cumulative distribution function `p` is computed using a routine by
+    DiDinato and Morris [2]_. Computation of `a` involves a search for a value
+    that produces the desired value of `p`. The search relies on the
+    monotonicity of `p` with `a`.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] DiDinato, A. R. and Morris, A. H.,
+           Computation of the incomplete gamma function ratios and their
+           inverse.  ACM Trans. Math. Softw. 12 (1986), 377-393.
+
+    Examples
+    --------
+    First evaluate `gdtr`.
+
+    >>> from scipy.special import gdtr, gdtria
+    >>> p = gdtr(1.2, 3.4, 5.6)
+    >>> print(p)
+    0.94378087442
+
+    Verify the inverse.
+
+    >>> gdtria(p, 3.4, 5.6)
+    1.2
+    """)
+
+add_newdoc("gdtrib",
+    """
+    gdtrib(a, p, x, out=None)
+
+    Inverse of `gdtr` vs b.
+
+    Returns the inverse with respect to the parameter `b` of ``p =
+    gdtr(a, b, x)``, the cumulative distribution function of the gamma
+    distribution.
+
+    Parameters
+    ----------
+    a : array_like
+        `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
+        parameter of the gamma distribution.
+    p : array_like
+        Probability values.
+    x : array_like
+        Nonnegative real values, from the domain of the gamma distribution.
+    out : ndarray, optional
+        If a fourth argument is given, it must be a numpy.ndarray whose size
+        matches the broadcast result of `a`, `b` and `x`.  `out` is then the
+        array returned by the function.
+
+    Returns
+    -------
+    b : scalar or ndarray
+        Values of the `b` parameter such that `p = gdtr(a, b, x)`.  `b` is
+        the "shape" parameter of the gamma distribution.
+
+    See Also
+    --------
+    gdtr : CDF of the gamma distribution.
+    gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
+    gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
+
+    The cumulative distribution function `p` is computed using a routine by
+    DiDinato and Morris [2]_. Computation of `b` involves a search for a value
+    that produces the desired value of `p`. The search relies on the
+    monotonicity of `p` with `b`.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] DiDinato, A. R. and Morris, A. H.,
+           Computation of the incomplete gamma function ratios and their
+           inverse.  ACM Trans. Math. Softw. 12 (1986), 377-393.
+
+    Examples
+    --------
+    First evaluate `gdtr`.
+
+    >>> from scipy.special import gdtr, gdtrib
+    >>> p = gdtr(1.2, 3.4, 5.6)
+    >>> print(p)
+    0.94378087442
+
+    Verify the inverse.
+
+    >>> gdtrib(1.2, p, 5.6)
+    3.3999999999723882
+    """)
+
+add_newdoc("gdtrix",
+    """
+    gdtrix(a, b, p, out=None)
+
+    Inverse of `gdtr` vs x.
+
+    Returns the inverse with respect to the parameter `x` of ``p =
+    gdtr(a, b, x)``, the cumulative distribution function of the gamma
+    distribution. This is also known as the pth quantile of the
+    distribution.
+
+    Parameters
+    ----------
+    a : array_like
+        `a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
+        parameter of the gamma distribution.
+    b : array_like
+        `b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
+        of the gamma distribution.
+    p : array_like
+        Probability values.
+    out : ndarray, optional
+        If a fourth argument is given, it must be a numpy.ndarray whose size
+        matches the broadcast result of `a`, `b` and `x`. `out` is then the
+        array returned by the function.
+
+    Returns
+    -------
+    x : scalar or ndarray
+        Values of the `x` parameter such that `p = gdtr(a, b, x)`.
+
+    See Also
+    --------
+    gdtr : CDF of the gamma distribution.
+    gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
+    gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
+
+    The cumulative distribution function `p` is computed using a routine by
+    DiDinato and Morris [2]_. Computation of `x` involves a search for a value
+    that produces the desired value of `p`. The search relies on the
+    monotonicity of `p` with `x`.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] DiDinato, A. R. and Morris, A. H.,
+           Computation of the incomplete gamma function ratios and their
+           inverse.  ACM Trans. Math. Softw. 12 (1986), 377-393.
+
+    Examples
+    --------
+    First evaluate `gdtr`.
+
+    >>> from scipy.special import gdtr, gdtrix
+    >>> p = gdtr(1.2, 3.4, 5.6)
+    >>> print(p)
+    0.94378087442
+
+    Verify the inverse.
+
+    >>> gdtrix(1.2, 3.4, p)
+    5.5999999999999996
+    """)
+
+add_newdoc("hankel1",
+    r"""
+    hankel1(v, z, out=None)
+
+    Hankel function of the first kind
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Hankel function of the first kind.
+
+    Notes
+    -----
+    A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
+    computation using the relation,
+
+    .. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
+
+    where :math:`K_v` is the modified Bessel function of the second kind.
+    For negative orders, the relation
+
+    .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
+
+    is used.
+
+    See also
+    --------
+    hankel1e : ndarray
+        This function with leading exponential behavior stripped off.
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+    """)
+
+add_newdoc("hankel1e",
+    r"""
+    hankel1e(v, z, out=None)
+
+    Exponentially scaled Hankel function of the first kind
+
+    Defined as::
+
+        hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the exponentially scaled Hankel function.
+
+    Notes
+    -----
+    A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
+    computation using the relation,
+
+    .. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
+
+    where :math:`K_v` is the modified Bessel function of the second kind.
+    For negative orders, the relation
+
+    .. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
+
+    is used.
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+    """)
+
+add_newdoc("hankel2",
+    r"""
+    hankel2(v, z, out=None)
+
+    Hankel function of the second kind
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Hankel function of the second kind.
+
+    Notes
+    -----
+    A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
+    computation using the relation,
+
+    .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
+
+    where :math:`K_v` is the modified Bessel function of the second kind.
+    For negative orders, the relation
+
+    .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
+
+    is used.
+
+    See also
+    --------
+    hankel2e : this function with leading exponential behavior stripped off.
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+    """)
+
+add_newdoc("hankel2e",
+    r"""
+    hankel2e(v, z, out=None)
+
+    Exponentially scaled Hankel function of the second kind
+
+    Defined as::
+
+        hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the exponentially scaled Hankel function of the second kind.
+
+    Notes
+    -----
+    A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
+    computation using the relation,
+
+    .. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
+
+    where :math:`K_v` is the modified Bessel function of the second kind.
+    For negative orders, the relation
+
+    .. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
+
+    is used.
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    """)
+
+add_newdoc("huber",
+    r"""
+    huber(delta, r, out=None)
+
+    Huber loss function.
+
+    .. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0  \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
+
+    Parameters
+    ----------
+    delta : ndarray
+        Input array, indicating the quadratic vs. linear loss changepoint.
+    r : ndarray
+        Input array, possibly representing residuals.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        The computed Huber loss function values.
+
+    See also
+    --------
+    pseudo_huber : smooth approximation of this function
+
+    Notes
+    -----
+    `huber` is useful as a loss function in robust statistics or machine
+    learning to reduce the influence of outliers as compared to the common
+    squared error loss, residuals with a magnitude higher than `delta` are
+    not squared [1]_.
+
+    Typically, `r` represents residuals, the difference
+    between a model prediction and data. Then, for :math:`|r|\leq\delta`,
+    `huber` resembles the squared error and for :math:`|r|>\delta` the
+    absolute error. This way, the Huber loss often achieves
+    a fast convergence in model fitting for small residuals like the squared
+    error loss function and still reduces the influence of outliers
+    (:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is
+    the cutoff between squared and absolute error regimes, it has
+    to be tuned carefully for each problem. `huber` is also
+    convex, making it suitable for gradient based optimization.
+
+    .. versionadded:: 0.15.0
+
+    References
+    ----------
+    .. [1] Peter Huber. "Robust Estimation of a Location Parameter",
+           1964. Annals of Statistics. 53 (1): 73 - 101.
+
+    Examples
+    --------
+    Import all necessary modules.
+
+    >>> import numpy as np
+    >>> from scipy.special import huber
+    >>> import matplotlib.pyplot as plt
+
+    Compute the function for ``delta=1`` at ``r=2``
+
+    >>> huber(1., 2.)
+    1.5
+
+    Compute the function for different `delta` by providing a NumPy array or
+    list for `delta`.
+
+    >>> huber([1., 3., 5.], 4.)
+    array([3.5, 7.5, 8. ])
+
+    Compute the function at different points by providing a NumPy array or
+    list for `r`.
+
+    >>> huber(2., np.array([1., 1.5, 3.]))
+    array([0.5  , 1.125, 4.   ])
+
+    The function can be calculated for different `delta` and `r` by
+    providing arrays for both with compatible shapes for broadcasting.
+
+    >>> r = np.array([1., 2.5, 8., 10.])
+    >>> deltas = np.array([[1.], [5.], [9.]])
+    >>> print(r.shape, deltas.shape)
+    (4,) (3, 1)
+
+    >>> huber(deltas, r)
+    array([[ 0.5  ,  2.   ,  7.5  ,  9.5  ],
+           [ 0.5  ,  3.125, 27.5  , 37.5  ],
+           [ 0.5  ,  3.125, 32.   , 49.5  ]])
+
+    Plot the function for different `delta`.
+
+    >>> x = np.linspace(-4, 4, 500)
+    >>> deltas = [1, 2, 3]
+    >>> linestyles = ["dashed", "dotted", "dashdot"]
+    >>> fig, ax = plt.subplots()
+    >>> combined_plot_parameters = list(zip(deltas, linestyles))
+    >>> for delta, style in combined_plot_parameters:
+    ...     ax.plot(x, huber(delta, x), label=f"$\delta={delta}$", ls=style)
+    >>> ax.legend(loc="upper center")
+    >>> ax.set_xlabel("$x$")
+    >>> ax.set_title("Huber loss function $h_{\delta}(x)$")
+    >>> ax.set_xlim(-4, 4)
+    >>> ax.set_ylim(0, 8)
+    >>> plt.show()
+    """)
+
+add_newdoc("hyp0f1",
+    r"""
+    hyp0f1(v, z, out=None)
+
+    Confluent hypergeometric limit function 0F1.
+
+    Parameters
+    ----------
+    v : array_like
+        Real-valued parameter
+    z : array_like
+        Real- or complex-valued argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The confluent hypergeometric limit function
+
+    Notes
+    -----
+    This function is defined as:
+
+    .. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
+
+    It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
+    and satisfies the differential equation :math:`f''(z) + vf'(z) =
+    f(z)`. See [1]_ for more information.
+
+    References
+    ----------
+    .. [1] Wolfram MathWorld, "Confluent Hypergeometric Limit Function",
+           http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is one when `z` is zero.
+
+    >>> sc.hyp0f1(1, 0)
+    1.0
+
+    It is the limit of the confluent hypergeometric function as `q`
+    goes to infinity.
+
+    >>> q = np.array([1, 10, 100, 1000])
+    >>> v = 1
+    >>> z = 1
+    >>> sc.hyp1f1(q, v, z / q)
+    array([2.71828183, 2.31481985, 2.28303778, 2.27992985])
+    >>> sc.hyp0f1(v, z)
+    2.2795853023360673
+
+    It is related to Bessel functions.
+
+    >>> n = 1
+    >>> x = np.linspace(0, 1, 5)
+    >>> sc.jv(n, x)
+    array([0.        , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
+    >>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)
+    array([0.        , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
+
+    """)
+
+add_newdoc("hyp1f1",
+    r"""
+    hyp1f1(a, b, x, out=None)
+
+    Confluent hypergeometric function 1F1.
+
+    The confluent hypergeometric function is defined by the series
+
+    .. math::
+
+       {}_1F_1(a; b; x) = \sum_{k = 0}^\infty \frac{(a)_k}{(b)_k k!} x^k.
+
+    See [dlmf]_ for more details. Here :math:`(\cdot)_k` is the
+    Pochhammer symbol; see `poch`.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Real parameters
+    x : array_like
+        Real or complex argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the confluent hypergeometric function
+
+    See also
+    --------
+    hyperu : another confluent hypergeometric function
+    hyp0f1 : confluent hypergeometric limit function
+    hyp2f1 : Gaussian hypergeometric function
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/13.2#E2
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is one when `x` is zero:
+
+    >>> sc.hyp1f1(0.5, 0.5, 0)
+    1.0
+
+    It is singular when `b` is a nonpositive integer.
+
+    >>> sc.hyp1f1(0.5, -1, 0)
+    inf
+
+    It is a polynomial when `a` is a nonpositive integer.
+
+    >>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])
+    >>> sc.hyp1f1(a, b, x)
+    array([-1., -3., -5., -7.])
+    >>> 1 + (a / b) * x
+    array([-1., -3., -5., -7.])
+
+    It reduces to the exponential function when `a = b`.
+
+    >>> sc.hyp1f1(2, 2, [1, 2, 3, 4])
+    array([ 2.71828183,  7.3890561 , 20.08553692, 54.59815003])
+    >>> np.exp([1, 2, 3, 4])
+    array([ 2.71828183,  7.3890561 , 20.08553692, 54.59815003])
+
+    """)
+
+add_newdoc("hyp2f1",
+    r"""
+    hyp2f1(a, b, c, z, out=None)
+
+    Gauss hypergeometric function 2F1(a, b; c; z)
+
+    Parameters
+    ----------
+    a, b, c : array_like
+        Arguments, should be real-valued.
+    z : array_like
+        Argument, real or complex.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    hyp2f1 : scalar or ndarray
+        The values of the gaussian hypergeometric function.
+
+    See also
+    --------
+    hyp0f1 : confluent hypergeometric limit function.
+    hyp1f1 : Kummer's (confluent hypergeometric) function.
+
+    Notes
+    -----
+    This function is defined for :math:`|z| < 1` as
+
+    .. math::
+
+       \mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty
+       \frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!},
+
+    and defined on the rest of the complex z-plane by analytic
+    continuation [1]_.
+    Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
+    :math:`n` is an integer the result is a polynomial of degree :math:`n`.
+
+    The implementation for complex values of ``z`` is described in [2]_,
+    except for ``z`` in the region defined by
+
+    .. math::
+
+         0.9 <= \left|z\right| < 1.1,
+         \left|1 - z\right| >= 0.9,
+         \mathrm{real}(z) >= 0
+
+    in which the implementation follows [4]_.
+
+    References
+    ----------
+    .. [1] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/15.2
+    .. [2] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996
+    .. [3] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    .. [4] J.L. Lopez and N.M. Temme, "New series expansions of the Gauss
+           hypergeometric function", Adv Comput Math 39, 349-365 (2013).
+           https://doi.org/10.1007/s10444-012-9283-y
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It has poles when `c` is a negative integer.
+
+    >>> sc.hyp2f1(1, 1, -2, 1)
+    inf
+
+    It is a polynomial when `a` or `b` is a negative integer.
+
+    >>> a, b, c = -1, 1, 1.5
+    >>> z = np.linspace(0, 1, 5)
+    >>> sc.hyp2f1(a, b, c, z)
+    array([1.        , 0.83333333, 0.66666667, 0.5       , 0.33333333])
+    >>> 1 + a * b * z / c
+    array([1.        , 0.83333333, 0.66666667, 0.5       , 0.33333333])
+
+    It is symmetric in `a` and `b`.
+
+    >>> a = np.linspace(0, 1, 5)
+    >>> b = np.linspace(0, 1, 5)
+    >>> sc.hyp2f1(a, b, 1, 0.5)
+    array([1.        , 1.03997334, 1.1803406 , 1.47074441, 2.        ])
+    >>> sc.hyp2f1(b, a, 1, 0.5)
+    array([1.        , 1.03997334, 1.1803406 , 1.47074441, 2.        ])
+
+    It contains many other functions as special cases.
+
+    >>> z = 0.5
+    >>> sc.hyp2f1(1, 1, 2, z)
+    1.3862943611198901
+    >>> -np.log(1 - z) / z
+    1.3862943611198906
+
+    >>> sc.hyp2f1(0.5, 1, 1.5, z**2)
+    1.098612288668109
+    >>> np.log((1 + z) / (1 - z)) / (2 * z)
+    1.0986122886681098
+
+    >>> sc.hyp2f1(0.5, 1, 1.5, -z**2)
+    0.9272952180016117
+    >>> np.arctan(z) / z
+    0.9272952180016122
+
+    """)
+
+add_newdoc("hyperu",
+    r"""
+    hyperu(a, b, x, out=None)
+
+    Confluent hypergeometric function U
+
+    It is defined as the solution to the equation
+
+    .. math::
+
+       x \frac{d^2w}{dx^2} + (b - x) \frac{dw}{dx} - aw = 0
+
+    which satisfies the property
+
+    .. math::
+
+       U(a, b, x) \sim x^{-a}
+
+    as :math:`x \to \infty`. See [dlmf]_ for more details.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Real-valued parameters
+    x : array_like
+        Real-valued argument
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of `U`
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematics Functions
+              https://dlmf.nist.gov/13.2#E6
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It has a branch cut along the negative `x` axis.
+
+    >>> x = np.linspace(-0.1, -10, 5)
+    >>> sc.hyperu(1, 1, x)
+    array([nan, nan, nan, nan, nan])
+
+    It approaches zero as `x` goes to infinity.
+
+    >>> x = np.array([1, 10, 100])
+    >>> sc.hyperu(1, 1, x)
+    array([0.59634736, 0.09156333, 0.00990194])
+
+    It satisfies Kummer's transformation.
+
+    >>> a, b, x = 2, 1, 1
+    >>> sc.hyperu(a, b, x)
+    0.1926947246463881
+    >>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)
+    0.1926947246463881
+
+    """)
+
+add_newdoc("i0",
+    r"""
+    i0(x, out=None)
+
+    Modified Bessel function of order 0.
+
+    Defined as,
+
+    .. math::
+        I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
+
+    where :math:`J_0` is the Bessel function of the first kind of order 0.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float)
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        Value of the modified Bessel function of order 0 at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 8] and (8, infinity).
+    Chebyshev polynomial expansions are employed in each interval.
+
+    This function is a wrapper for the Cephes [1]_ routine `i0`.
+
+    See also
+    --------
+    iv: Modified Bessel function of any order
+    i0e: Exponentially scaled modified Bessel function of order 0
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import i0
+    >>> i0(1.)
+    1.2660658777520082
+
+    Calculate at several points:
+
+    >>> import numpy as np
+    >>> i0(np.array([-2., 0., 3.5]))
+    array([2.2795853 , 1.        , 7.37820343])
+
+    Plot the function from -10 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> y = i0(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("i0e",
+    """
+    i0e(x, out=None)
+
+    Exponentially scaled modified Bessel function of order 0.
+
+    Defined as::
+
+        i0e(x) = exp(-abs(x)) * i0(x).
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float)
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        Value of the exponentially scaled modified Bessel function of order 0
+        at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 8] and (8, infinity).
+    Chebyshev polynomial expansions are employed in each interval. The
+    polynomial expansions used are the same as those in `i0`, but
+    they are not multiplied by the dominant exponential factor.
+
+    This function is a wrapper for the Cephes [1]_ routine `i0e`.
+
+    See also
+    --------
+    iv: Modified Bessel function of the first kind
+    i0: Modified Bessel function of order 0
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import i0e
+    >>> i0e(1.)
+    0.46575960759364043
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> i0e(np.array([-2., 0., 3.]))
+    array([0.30850832, 1.        , 0.24300035])
+
+    Plot the function from -10 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> y = i0e(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    Exponentially scaled Bessel functions are useful for large arguments for
+    which the unscaled Bessel functions overflow or lose precision. In the
+    following example `i0` returns infinity whereas `i0e` still returns
+    a finite number.
+
+    >>> from scipy.special import i0
+    >>> i0(1000.), i0e(1000.)
+    (inf, 0.012617240455891257)
+    """)
+
+add_newdoc("i1",
+    r"""
+    i1(x, out=None)
+
+    Modified Bessel function of order 1.
+
+    Defined as,
+
+    .. math::
+        I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
+               = -\imath J_1(\imath x),
+
+    where :math:`J_1` is the Bessel function of the first kind of order 1.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float)
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        Value of the modified Bessel function of order 1 at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 8] and (8, infinity).
+    Chebyshev polynomial expansions are employed in each interval.
+
+    This function is a wrapper for the Cephes [1]_ routine `i1`.
+
+    See also
+    --------
+    iv: Modified Bessel function of the first kind
+    i1e: Exponentially scaled modified Bessel function of order 1
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import i1
+    >>> i1(1.)
+    0.5651591039924851
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> i1(np.array([-2., 0., 6.]))
+    array([-1.59063685,  0.        , 61.34193678])
+
+    Plot the function between -10 and 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> y = i1(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("i1e",
+    """
+    i1e(x, out=None)
+
+    Exponentially scaled modified Bessel function of order 1.
+
+    Defined as::
+
+        i1e(x) = exp(-abs(x)) * i1(x)
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float)
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        Value of the exponentially scaled modified Bessel function of order 1
+        at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 8] and (8, infinity).
+    Chebyshev polynomial expansions are employed in each interval. The
+    polynomial expansions used are the same as those in `i1`, but
+    they are not multiplied by the dominant exponential factor.
+
+    This function is a wrapper for the Cephes [1]_ routine `i1e`.
+
+    See also
+    --------
+    iv: Modified Bessel function of the first kind
+    i1: Modified Bessel function of order 1
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import i1e
+    >>> i1e(1.)
+    0.2079104153497085
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> i1e(np.array([-2., 0., 6.]))
+    array([-0.21526929,  0.        ,  0.15205146])
+
+    Plot the function between -10 and 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> y = i1e(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    Exponentially scaled Bessel functions are useful for large arguments for
+    which the unscaled Bessel functions overflow or lose precision. In the
+    following example `i1` returns infinity whereas `i1e` still returns a
+    finite number.
+
+    >>> from scipy.special import i1
+    >>> i1(1000.), i1e(1000.)
+    (inf, 0.01261093025692863)
+    """)
+
+add_newdoc("_igam_fac",
+    """
+    Internal function, do not use.
+    """)
+
+add_newdoc("it2i0k0",
+    r"""
+    it2i0k0(x, out=None)
+
+    Integrals related to modified Bessel functions of order 0.
+
+    Computes the integrals
+
+    .. math::
+
+        \int_0^x \frac{I_0(t) - 1}{t} dt \\
+        \int_x^\infty \frac{K_0(t)}{t} dt.
+
+    Parameters
+    ----------
+    x : array_like
+        Values at which to evaluate the integrals.
+    out : tuple of ndarrays, optional
+        Optional output arrays for the function results.
+
+    Returns
+    -------
+    ii0 : scalar or ndarray
+        The integral for `i0`
+    ik0 : scalar or ndarray
+        The integral for `k0`
+
+    References
+    ----------
+    .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
+           Wiley 1996
+
+    Examples
+    --------
+    Evaluate the functions at one point.
+
+    >>> from scipy.special import it2i0k0
+    >>> int_i, int_k = it2i0k0(1.)
+    >>> int_i, int_k
+    (0.12897944249456852, 0.2085182909001295)
+
+    Evaluate the functions at several points.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 1.5, 3.])
+    >>> int_i, int_k = it2i0k0(points)
+    >>> int_i, int_k
+    (array([0.03149527, 0.30187149, 1.50012461]),
+     array([0.66575102, 0.0823715 , 0.00823631]))
+
+    Plot the functions from 0 to 5.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 5., 1000)
+    >>> int_i, int_k = it2i0k0(x)
+    >>> ax.plot(x, int_i, label=r"$\int_0^x \frac{I_0(t)-1}{t}\,dt$")
+    >>> ax.plot(x, int_k, label=r"$\int_x^{\infty} \frac{K_0(t)}{t}\,dt$")
+    >>> ax.legend()
+    >>> ax.set_ylim(0, 10)
+    >>> plt.show()
+    """)
+
+add_newdoc("it2j0y0",
+    r"""
+    it2j0y0(x, out=None)
+
+    Integrals related to Bessel functions of the first kind of order 0.
+
+    Computes the integrals
+
+    .. math::
+
+        \int_0^x \frac{1 - J_0(t)}{t} dt \\
+        \int_x^\infty \frac{Y_0(t)}{t} dt.
+
+    For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
+
+    Parameters
+    ----------
+    x : array_like
+        Values at which to evaluate the integrals.
+    out : tuple of ndarrays, optional
+        Optional output arrays for the function results.
+
+    Returns
+    -------
+    ij0 : scalar or ndarray
+        The integral for `j0`
+    iy0 : scalar or ndarray
+        The integral for `y0`
+
+    References
+    ----------
+    .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
+           Wiley 1996
+
+    Examples
+    --------
+    Evaluate the functions at one point.
+
+    >>> from scipy.special import it2j0y0
+    >>> int_j, int_y = it2j0y0(1.)
+    >>> int_j, int_y
+    (0.12116524699506871, 0.39527290169929336)
+
+    Evaluate the functions at several points.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 1.5, 3.])
+    >>> int_j, int_y = it2j0y0(points)
+    >>> int_j, int_y
+    (array([0.03100699, 0.26227724, 0.85614669]),
+     array([ 0.26968854,  0.29769696, -0.02987272]))
+
+    Plot the functions from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> int_j, int_y = it2j0y0(x)
+    >>> ax.plot(x, int_j, label=r"$\int_0^x \frac{1-J_0(t)}{t}\,dt$")
+    >>> ax.plot(x, int_y, label=r"$\int_x^{\infty} \frac{Y_0(t)}{t}\,dt$")
+    >>> ax.legend()
+    >>> ax.set_ylim(-2.5, 2.5)
+    >>> plt.show()
+    """)
+
+add_newdoc("it2struve0",
+    r"""
+    it2struve0(x, out=None)
+
+    Integral related to the Struve function of order 0.
+
+    Returns the integral,
+
+    .. math::
+        \int_x^\infty \frac{H_0(t)}{t}\,dt
+
+    where :math:`H_0` is the Struve function of order 0.
+
+    Parameters
+    ----------
+    x : array_like
+        Lower limit of integration.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        The value of the integral.
+
+    See also
+    --------
+    struve
+
+    Notes
+    -----
+    Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
+    Jin [1]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Evaluate the function at one point.
+
+    >>> import numpy as np
+    >>> from scipy.special import it2struve0
+    >>> it2struve0(1.)
+    0.9571973506383524
+
+    Evaluate the function at several points by supplying
+    an array for `x`.
+
+    >>> points = np.array([1., 2., 3.5])
+    >>> it2struve0(points)
+    array([0.95719735, 0.46909296, 0.10366042])
+
+    Plot the function from -10 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> it2struve0_values = it2struve0(x)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, it2struve0_values)
+    >>> ax.set_xlabel(r'$x$')
+    >>> ax.set_ylabel(r'$\int_x^{\infty}\frac{H_0(t)}{t}\,dt$')
+    >>> plt.show()
+    """)
+
+add_newdoc(
+    "itairy",
+    r"""
+    itairy(x, out=None)
+
+    Integrals of Airy functions
+
+    Calculates the integrals of Airy functions from 0 to `x`.
+
+    Parameters
+    ----------
+
+    x : array_like
+        Upper limit of integration (float).
+    out : tuple of ndarray, optional
+        Optional output arrays for the function values
+
+    Returns
+    -------
+    Apt : scalar or ndarray
+        Integral of Ai(t) from 0 to x.
+    Bpt : scalar or ndarray
+        Integral of Bi(t) from 0 to x.
+    Ant : scalar or ndarray
+        Integral of Ai(-t) from 0 to x.
+    Bnt : scalar or ndarray
+        Integral of Bi(-t) from 0 to x.
+
+    Notes
+    -----
+
+    Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
+    Jin [1]_.
+
+    References
+    ----------
+
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the functions at ``x=1.``.
+
+    >>> import numpy as np
+    >>> from scipy.special import itairy
+    >>> import matplotlib.pyplot as plt
+    >>> apt, bpt, ant, bnt = itairy(1.)
+    >>> apt, bpt, ant, bnt
+    (0.23631734191710949,
+     0.8727691167380077,
+     0.46567398346706845,
+     0.3730050096342943)
+
+    Compute the functions at several points by providing a NumPy array for `x`.
+
+    >>> x = np.array([1., 1.5, 2.5, 5])
+    >>> apt, bpt, ant, bnt = itairy(x)
+    >>> apt, bpt, ant, bnt
+    (array([0.23631734, 0.28678675, 0.324638  , 0.33328759]),
+     array([  0.87276912,   1.62470809,   5.20906691, 321.47831857]),
+     array([0.46567398, 0.72232876, 0.93187776, 0.7178822 ]),
+     array([ 0.37300501,  0.35038814, -0.02812939,  0.15873094]))
+
+    Plot the functions from -10 to 10.
+
+    >>> x = np.linspace(-10, 10, 500)
+    >>> apt, bpt, ant, bnt = itairy(x)
+    >>> fig, ax = plt.subplots(figsize=(6, 5))
+    >>> ax.plot(x, apt, label="$\int_0^x\, Ai(t)\, dt$")
+    >>> ax.plot(x, bpt, ls="dashed", label="$\int_0^x\, Bi(t)\, dt$")
+    >>> ax.plot(x, ant, ls="dashdot", label="$\int_0^x\, Ai(-t)\, dt$")
+    >>> ax.plot(x, bnt, ls="dotted", label="$\int_0^x\, Bi(-t)\, dt$")
+    >>> ax.set_ylim(-2, 1.5)
+    >>> ax.legend(loc="lower right")
+    >>> plt.show()
+    """)
+
+add_newdoc("iti0k0",
+    r"""
+    iti0k0(x, out=None)
+
+    Integrals of modified Bessel functions of order 0.
+
+    Computes the integrals
+
+    .. math::
+
+        \int_0^x I_0(t) dt \\
+        \int_0^x K_0(t) dt.
+
+    For more on :math:`I_0` and :math:`K_0` see `i0` and `k0`.
+
+    Parameters
+    ----------
+    x : array_like
+        Values at which to evaluate the integrals.
+    out : tuple of ndarrays, optional
+        Optional output arrays for the function results.
+
+    Returns
+    -------
+    ii0 : scalar or ndarray
+        The integral for `i0`
+    ik0 : scalar or ndarray
+        The integral for `k0`
+
+    References
+    ----------
+    .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
+           Wiley 1996
+
+    Examples
+    --------
+    Evaluate the functions at one point.
+
+    >>> from scipy.special import iti0k0
+    >>> int_i, int_k = iti0k0(1.)
+    >>> int_i, int_k
+    (1.0865210970235892, 1.2425098486237771)
+
+    Evaluate the functions at several points.
+
+    >>> import numpy as np
+    >>> points = np.array([0., 1.5, 3.])
+    >>> int_i, int_k = iti0k0(points)
+    >>> int_i, int_k
+    (array([0.        , 1.80606937, 6.16096149]),
+     array([0.        , 1.39458246, 1.53994809]))
+
+    Plot the functions from 0 to 5.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 5., 1000)
+    >>> int_i, int_k = iti0k0(x)
+    >>> ax.plot(x, int_i, label="$\int_0^x I_0(t)\,dt$")
+    >>> ax.plot(x, int_k, label="$\int_0^x K_0(t)\,dt$")
+    >>> ax.legend()
+    >>> plt.show()
+    """)
+
+add_newdoc("itj0y0",
+    r"""
+    itj0y0(x, out=None)
+
+    Integrals of Bessel functions of the first kind of order 0.
+
+    Computes the integrals
+
+    .. math::
+
+        \int_0^x J_0(t) dt \\
+        \int_0^x Y_0(t) dt.
+
+    For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
+
+    Parameters
+    ----------
+    x : array_like
+        Values at which to evaluate the integrals.
+    out : tuple of ndarrays, optional
+        Optional output arrays for the function results.
+
+    Returns
+    -------
+    ij0 : scalar or ndarray
+        The integral of `j0`
+    iy0 : scalar or ndarray
+        The integral of `y0`
+
+    References
+    ----------
+    .. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
+           Wiley 1996
+
+    Examples
+    --------
+    Evaluate the functions at one point.
+
+    >>> from scipy.special import itj0y0
+    >>> int_j, int_y = itj0y0(1.)
+    >>> int_j, int_y
+    (0.9197304100897596, -0.637069376607422)
+
+    Evaluate the functions at several points.
+
+    >>> import numpy as np
+    >>> points = np.array([0., 1.5, 3.])
+    >>> int_j, int_y = itj0y0(points)
+    >>> int_j, int_y
+    (array([0.        , 1.24144951, 1.38756725]),
+     array([ 0.        , -0.51175903,  0.19765826]))
+
+    Plot the functions from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> int_j, int_y = itj0y0(x)
+    >>> ax.plot(x, int_j, label="$\int_0^x J_0(t)\,dt$")
+    >>> ax.plot(x, int_y, label="$\int_0^x Y_0(t)\,dt$")
+    >>> ax.legend()
+    >>> plt.show()
+
+    """)
+
+add_newdoc("itmodstruve0",
+    r"""
+    itmodstruve0(x, out=None)
+
+    Integral of the modified Struve function of order 0.
+
+    .. math::
+        I = \int_0^x L_0(t)\,dt
+
+    Parameters
+    ----------
+    x : array_like
+        Upper limit of integration (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        The integral of :math:`L_0` from 0 to `x`.
+
+    Notes
+    -----
+    Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
+    Jin [1]_.
+
+    See Also
+    --------
+    modstruve: Modified Struve function which is integrated by this function
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Evaluate the function at one point.
+
+    >>> import numpy as np
+    >>> from scipy.special import itmodstruve0
+    >>> itmodstruve0(1.)
+    0.3364726286440384
+
+    Evaluate the function at several points by supplying
+    an array for `x`.
+
+    >>> points = np.array([1., 2., 3.5])
+    >>> itmodstruve0(points)
+    array([0.33647263, 1.588285  , 7.60382578])
+
+    Plot the function from -10 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> itmodstruve0_values = itmodstruve0(x)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, itmodstruve0_values)
+    >>> ax.set_xlabel(r'$x$')
+    >>> ax.set_ylabel(r'$\int_0^xL_0(t)\,dt$')
+    >>> plt.show()
+    """)
+
+add_newdoc("itstruve0",
+    r"""
+    itstruve0(x, out=None)
+
+    Integral of the Struve function of order 0.
+
+    .. math::
+        I = \int_0^x H_0(t)\,dt
+
+    Parameters
+    ----------
+    x : array_like
+        Upper limit of integration (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    I : scalar or ndarray
+        The integral of :math:`H_0` from 0 to `x`.
+
+    See also
+    --------
+    struve: Function which is integrated by this function
+
+    Notes
+    -----
+    Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
+    Jin [1]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Evaluate the function at one point.
+
+    >>> import numpy as np
+    >>> from scipy.special import itstruve0
+    >>> itstruve0(1.)
+    0.30109042670805547
+
+    Evaluate the function at several points by supplying
+    an array for `x`.
+
+    >>> points = np.array([1., 2., 3.5])
+    >>> itstruve0(points)
+    array([0.30109043, 1.01870116, 1.96804581])
+
+    Plot the function from -20 to 20.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-20., 20., 1000)
+    >>> istruve0_values = itstruve0(x)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, istruve0_values)
+    >>> ax.set_xlabel(r'$x$')
+    >>> ax.set_ylabel(r'$\int_0^{x}H_0(t)\,dt$')
+    >>> plt.show()
+    """)
+
+add_newdoc("iv",
+    r"""
+    iv(v, z, out=None)
+
+    Modified Bessel function of the first kind of real order.
+
+    Parameters
+    ----------
+    v : array_like
+        Order. If `z` is of real type and negative, `v` must be integer
+        valued.
+    z : array_like of float or complex
+        Argument.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the modified Bessel function.
+
+    Notes
+    -----
+    For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
+    using Temme's method [1]_.  For larger orders, uniform asymptotic
+    expansions are applied.
+
+    For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
+    called. It uses a power series for small `z`, the asymptotic expansion
+    for large `abs(z)`, the Miller algorithm normalized by the Wronskian
+    and a Neumann series for intermediate magnitudes, and the uniform
+    asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
+    orders. Backward recurrence is used to generate sequences or reduce
+    orders when necessary.
+
+    The calculations above are done in the right half plane and continued
+    into the left half plane by the formula,
+
+    .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
+
+    (valid when the real part of `z` is positive).  For negative `v`, the
+    formula
+
+    .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
+
+    is used, where :math:`K_v(z)` is the modified Bessel function of the
+    second kind, evaluated using the AMOS routine `zbesk`.
+
+    See also
+    --------
+    ive : This function with leading exponential behavior stripped off.
+    i0 : Faster version of this function for order 0.
+    i1 : Faster version of this function for order 1.
+
+    References
+    ----------
+    .. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
+    .. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    Evaluate the function of order 0 at one point.
+
+    >>> from scipy.special import iv
+    >>> iv(0, 1.)
+    1.2660658777520084
+
+    Evaluate the function at one point for different orders.
+
+    >>> iv(0, 1.), iv(1, 1.), iv(1.5, 1.)
+    (1.2660658777520084, 0.565159103992485, 0.2935253263474798)
+
+    The evaluation for different orders can be carried out in one call by
+    providing a list or NumPy array as argument for the `v` parameter:
+
+    >>> iv([0, 1, 1.5], 1.)
+    array([1.26606588, 0.5651591 , 0.29352533])
+
+    Evaluate the function at several points for order 0 by providing an
+    array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([-2., 0., 3.])
+    >>> iv(0, points)
+    array([2.2795853 , 1.        , 4.88079259])
+
+    If `z` is an array, the order parameter `v` must be broadcastable to
+    the correct shape if different orders shall be computed in one call.
+    To calculate the orders 0 and 1 for an 1D array:
+
+    >>> orders = np.array([[0], [1]])
+    >>> orders.shape
+    (2, 1)
+
+    >>> iv(orders, points)
+    array([[ 2.2795853 ,  1.        ,  4.88079259],
+           [-1.59063685,  0.        ,  3.95337022]])
+
+    Plot the functions of order 0 to 3 from -5 to 5.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-5., 5., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, iv(i, x), label=f'$I_{i!r}$')
+    >>> ax.legend()
+    >>> plt.show()
+
+    """)
+
+add_newdoc("ive",
+    r"""
+    ive(v, z, out=None)
+
+    Exponentially scaled modified Bessel function of the first kind.
+
+    Defined as::
+
+        ive(v, z) = iv(v, z) * exp(-abs(z.real))
+
+    For imaginary numbers without a real part, returns the unscaled
+    Bessel function of the first kind `iv`.
+
+    Parameters
+    ----------
+    v : array_like of float
+        Order.
+    z : array_like of float or complex
+        Argument.
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the exponentially scaled modified Bessel function.
+
+    Notes
+    -----
+    For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
+    power series for small `z`, the asymptotic expansion for large
+    `abs(z)`, the Miller algorithm normalized by the Wronskian and a
+    Neumann series for intermediate magnitudes, and the uniform asymptotic
+    expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
+    Backward recurrence is used to generate sequences or reduce orders when
+    necessary.
+
+    The calculations above are done in the right half plane and continued
+    into the left half plane by the formula,
+
+    .. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
+
+    (valid when the real part of `z` is positive).  For negative `v`, the
+    formula
+
+    .. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
+
+    is used, where :math:`K_v(z)` is the modified Bessel function of the
+    second kind, evaluated using the AMOS routine `zbesk`.
+
+    See also
+    --------
+    iv: Modified Bessel function of the first kind
+    i0e: Faster implementation of this function for order 0
+    i1e: Faster implementation of this function for order 1
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    Evaluate the function of order 0 at one point.
+
+    >>> import numpy as np
+    >>> from scipy.special import iv, ive
+    >>> import matplotlib.pyplot as plt
+    >>> ive(0, 1.)
+    0.4657596075936404
+
+    Evaluate the function at one point for different orders by
+    providing a list or NumPy array as argument for the `v` parameter:
+
+    >>> ive([0, 1, 1.5], 1.)
+    array([0.46575961, 0.20791042, 0.10798193])
+
+    Evaluate the function at several points for order 0 by providing an
+    array for `z`.
+
+    >>> points = np.array([-2., 0., 3.])
+    >>> ive(0, points)
+    array([0.30850832, 1.        , 0.24300035])
+
+    Evaluate the function at several points for different orders by
+    providing arrays for both `v` for `z`. Both arrays have to be
+    broadcastable to the correct shape. To calculate the orders 0, 1
+    and 2 for a 1D array of points:
+
+    >>> ive([[0], [1], [2]], points)
+    array([[ 0.30850832,  1.        ,  0.24300035],
+           [-0.21526929,  0.        ,  0.19682671],
+           [ 0.09323903,  0.        ,  0.11178255]])
+
+    Plot the functions of order 0 to 3 from -5 to 5.
+
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-5., 5., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, ive(i, x), label=f'$I_{i!r}(z)\cdot e^{{-|z|}}$')
+    >>> ax.legend()
+    >>> ax.set_xlabel(r"$z$")
+    >>> plt.show()
+
+    Exponentially scaled Bessel functions are useful for large arguments for
+    which the unscaled Bessel functions over- or underflow. In the
+    following example `iv` returns infinity whereas `ive` still returns
+    a finite number.
+
+    >>> iv(3, 1000.), ive(3, 1000.)
+    (inf, 0.01256056218254712)
+    """)
+
+add_newdoc("j0",
+    r"""
+    j0(x, out=None)
+
+    Bessel function of the first kind of order 0.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    J : scalar or ndarray
+        Value of the Bessel function of the first kind of order 0 at `x`.
+
+    Notes
+    -----
+    The domain is divided into the intervals [0, 5] and (5, infinity). In the
+    first interval the following rational approximation is used:
+
+    .. math::
+
+        J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
+
+    where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
+    :math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
+    and 8, respectively.
+
+    In the second interval, the Hankel asymptotic expansion is employed with
+    two rational functions of degree 6/6 and 7/7.
+
+    This function is a wrapper for the Cephes [1]_ routine `j0`.
+    It should not be confused with the spherical Bessel functions (see
+    `spherical_jn`).
+
+    See also
+    --------
+    jv : Bessel function of real order and complex argument.
+    spherical_jn : spherical Bessel functions.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import j0
+    >>> j0(1.)
+    0.7651976865579665
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> j0(np.array([-2., 0., 4.]))
+    array([ 0.22389078,  1.        , -0.39714981])
+
+    Plot the function from -20 to 20.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-20., 20., 1000)
+    >>> y = j0(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("j1",
+    """
+    j1(x, out=None)
+
+    Bessel function of the first kind of order 1.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    J : scalar or ndarray
+        Value of the Bessel function of the first kind of order 1 at `x`.
+
+    Notes
+    -----
+    The domain is divided into the intervals [0, 8] and (8, infinity). In the
+    first interval a 24 term Chebyshev expansion is used. In the second, the
+    asymptotic trigonometric representation is employed using two rational
+    functions of degree 5/5.
+
+    This function is a wrapper for the Cephes [1]_ routine `j1`.
+    It should not be confused with the spherical Bessel functions (see
+    `spherical_jn`).
+
+    See also
+    --------
+    jv: Bessel function of the first kind
+    spherical_jn: spherical Bessel functions.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import j1
+    >>> j1(1.)
+    0.44005058574493355
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> j1(np.array([-2., 0., 4.]))
+    array([-0.57672481,  0.        , -0.06604333])
+
+    Plot the function from -20 to 20.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-20., 20., 1000)
+    >>> y = j1(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("jn",
+    """
+    jn(n, x, out=None)
+
+    Bessel function of the first kind of integer order and real argument.
+
+    Parameters
+    ----------
+    n : array_like
+        order of the Bessel function
+    x : array_like
+        argument of the Bessel function
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    scalar or ndarray
+        The value of the bessel function
+
+    See also
+    --------
+    jv
+    spherical_jn : spherical Bessel functions.
+
+    Notes
+    -----
+    `jn` is an alias of `jv`.
+    Not to be confused with the spherical Bessel functions (see
+    `spherical_jn`).
+
+    """)
+
+add_newdoc("jv",
+    r"""
+    jv(v, z, out=None)
+
+    Bessel function of the first kind of real order and complex argument.
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    J : scalar or ndarray
+        Value of the Bessel function, :math:`J_v(z)`.
+
+    See also
+    --------
+    jve : :math:`J_v` with leading exponential behavior stripped off.
+    spherical_jn : spherical Bessel functions.
+    j0 : faster version of this function for order 0.
+    j1 : faster version of this function for order 1.
+
+    Notes
+    -----
+    For positive `v` values, the computation is carried out using the AMOS
+    [1]_ `zbesj` routine, which exploits the connection to the modified
+    Bessel function :math:`I_v`,
+
+    .. math::
+        J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
+
+        J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
+
+    For negative `v` values the formula,
+
+    .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
+
+    is used, where :math:`Y_v(z)` is the Bessel function of the second
+    kind, computed using the AMOS routine `zbesy`.  Note that the second
+    term is exactly zero for integer `v`; to improve accuracy the second
+    term is explicitly omitted for `v` values such that `v = floor(v)`.
+
+    Not to be confused with the spherical Bessel functions (see `spherical_jn`).
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    Evaluate the function of order 0 at one point.
+
+    >>> from scipy.special import jv
+    >>> jv(0, 1.)
+    0.7651976865579666
+
+    Evaluate the function at one point for different orders.
+
+    >>> jv(0, 1.), jv(1, 1.), jv(1.5, 1.)
+    (0.7651976865579666, 0.44005058574493355, 0.24029783912342725)
+
+    The evaluation for different orders can be carried out in one call by
+    providing a list or NumPy array as argument for the `v` parameter:
+
+    >>> jv([0, 1, 1.5], 1.)
+    array([0.76519769, 0.44005059, 0.24029784])
+
+    Evaluate the function at several points for order 0 by providing an
+    array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([-2., 0., 3.])
+    >>> jv(0, points)
+    array([ 0.22389078,  1.        , -0.26005195])
+
+    If `z` is an array, the order parameter `v` must be broadcastable to
+    the correct shape if different orders shall be computed in one call.
+    To calculate the orders 0 and 1 for an 1D array:
+
+    >>> orders = np.array([[0], [1]])
+    >>> orders.shape
+    (2, 1)
+
+    >>> jv(orders, points)
+    array([[ 0.22389078,  1.        , -0.26005195],
+           [-0.57672481,  0.        ,  0.33905896]])
+
+    Plot the functions of order 0 to 3 from -10 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, jv(i, x), label=f'$J_{i!r}$')
+    >>> ax.legend()
+    >>> plt.show()
+
+    """)
+
+add_newdoc("jve",
+    r"""
+    jve(v, z, out=None)
+
+    Exponentially scaled Bessel function of the first kind of order `v`.
+
+    Defined as::
+
+        jve(v, z) = jv(v, z) * exp(-abs(z.imag))
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    J : scalar or ndarray
+        Value of the exponentially scaled Bessel function.
+
+    See also
+    --------
+    jv: Unscaled Bessel function of the first kind
+
+    Notes
+    -----
+    For positive `v` values, the computation is carried out using the AMOS
+    [1]_ `zbesj` routine, which exploits the connection to the modified
+    Bessel function :math:`I_v`,
+
+    .. math::
+        J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
+
+        J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
+
+    For negative `v` values the formula,
+
+    .. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
+
+    is used, where :math:`Y_v(z)` is the Bessel function of the second
+    kind, computed using the AMOS routine `zbesy`.  Note that the second
+    term is exactly zero for integer `v`; to improve accuracy the second
+    term is explicitly omitted for `v` values such that `v = floor(v)`.
+
+    Exponentially scaled Bessel functions are useful for large arguments `z`:
+    for these, the unscaled Bessel functions can easily under-or overflow.
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    Compare the output of `jv` and `jve` for large complex arguments for `z`
+    by computing their values for order ``v=1`` at ``z=1000j``. We see that
+    `jv` overflows but `jve` returns a finite number:
+
+    >>> import numpy as np
+    >>> from scipy.special import jv, jve
+    >>> v = 1
+    >>> z = 1000j
+    >>> jv(v, z), jve(v, z)
+    ((inf+infj), (7.721967686709077e-19+0.012610930256928629j))
+
+    For real arguments for `z`, `jve` returns the same as `jv`.
+
+    >>> v, z = 1, 1000
+    >>> jv(v, z), jve(v, z)
+    (0.004728311907089523, 0.004728311907089523)
+
+    The function can be evaluated for several orders at the same time by
+    providing a list or NumPy array for `v`:
+
+    >>> jve([1, 3, 5], 1j)
+    array([1.27304208e-17+2.07910415e-01j, -4.99352086e-19-8.15530777e-03j,
+           6.11480940e-21+9.98657141e-05j])
+
+    In the same way, the function can be evaluated at several points in one
+    call by providing a list or NumPy array for `z`:
+
+    >>> jve(1, np.array([1j, 2j, 3j]))
+    array([1.27308412e-17+0.20791042j, 1.31814423e-17+0.21526929j,
+           1.20521602e-17+0.19682671j])
+
+    It is also possible to evaluate several orders at several points
+    at the same time by providing arrays for `v` and `z` with
+    compatible shapes for broadcasting. Compute `jve` for two different orders
+    `v` and three points `z` resulting in a 2x3 array.
+
+    >>> v = np.array([[1], [3]])
+    >>> z = np.array([1j, 2j, 3j])
+    >>> v.shape, z.shape
+    ((2, 1), (3,))
+
+    >>> jve(v, z)
+    array([[1.27304208e-17+0.20791042j,  1.31810070e-17+0.21526929j,
+            1.20517622e-17+0.19682671j],
+           [-4.99352086e-19-0.00815531j, -1.76289571e-18-0.02879122j,
+            -2.92578784e-18-0.04778332j]])
+    """)
+
+add_newdoc("k0",
+    r"""
+    k0(x, out=None)
+
+    Modified Bessel function of the second kind of order 0, :math:`K_0`.
+
+    This function is also sometimes referred to as the modified Bessel
+    function of the third kind of order 0.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float).
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    K : scalar or ndarray
+        Value of the modified Bessel function :math:`K_0` at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 2] and (2, infinity).
+    Chebyshev polynomial expansions are employed in each interval.
+
+    This function is a wrapper for the Cephes [1]_ routine `k0`.
+
+    See also
+    --------
+    kv: Modified Bessel function of the second kind of any order
+    k0e: Exponentially scaled modified Bessel function of the second kind
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import k0
+    >>> k0(1.)
+    0.42102443824070823
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> k0(np.array([0.5, 2., 3.]))
+    array([0.92441907, 0.11389387, 0.0347395 ])
+
+    Plot the function from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> y = k0(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("k0e",
+    """
+    k0e(x, out=None)
+
+    Exponentially scaled modified Bessel function K of order 0
+
+    Defined as::
+
+        k0e(x) = exp(x) * k0(x).
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float)
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    K : scalar or ndarray
+        Value of the exponentially scaled modified Bessel function K of order
+        0 at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 2] and (2, infinity).
+    Chebyshev polynomial expansions are employed in each interval.
+
+    This function is a wrapper for the Cephes [1]_ routine `k0e`.
+
+    See also
+    --------
+    kv: Modified Bessel function of the second kind of any order
+    k0: Modified Bessel function of the second kind
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import k0e
+    >>> k0e(1.)
+    1.1444630798068947
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> k0e(np.array([0.5, 2., 3.]))
+    array([1.52410939, 0.84156822, 0.6977616 ])
+
+    Plot the function from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> y = k0e(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    Exponentially scaled Bessel functions are useful for large arguments for
+    which the unscaled Bessel functions are not precise enough.
+
+    >>> from scipy.special import k0
+    >>> k0(1000.)
+    0.
+
+    While `k0` returns zero, `k0e` still returns a finite number:
+
+    >>> k0e(1000.)
+    0.03962832160075422
+
+    """)
+
+add_newdoc("k1",
+    """
+    k1(x, out=None)
+
+    Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float)
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    K : scalar or ndarray
+        Value of the modified Bessel function K of order 1 at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 2] and (2, infinity).
+    Chebyshev polynomial expansions are employed in each interval.
+
+    This function is a wrapper for the Cephes [1]_ routine `k1`.
+
+    See also
+    --------
+    kv: Modified Bessel function of the second kind of any order
+    k1e: Exponentially scaled modified Bessel function K of order 1
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import k1
+    >>> k1(1.)
+    0.6019072301972346
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> k1(np.array([0.5, 2., 3.]))
+    array([1.65644112, 0.13986588, 0.04015643])
+
+    Plot the function from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> y = k1(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("k1e",
+    """
+    k1e(x, out=None)
+
+    Exponentially scaled modified Bessel function K of order 1
+
+    Defined as::
+
+        k1e(x) = exp(x) * k1(x)
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float)
+    out : ndarray, optional
+        Optional output array for the function values
+
+    Returns
+    -------
+    K : scalar or ndarray
+        Value of the exponentially scaled modified Bessel function K of order
+        1 at `x`.
+
+    Notes
+    -----
+    The range is partitioned into the two intervals [0, 2] and (2, infinity).
+    Chebyshev polynomial expansions are employed in each interval.
+
+    This function is a wrapper for the Cephes [1]_ routine `k1e`.
+
+    See also
+    --------
+    kv: Modified Bessel function of the second kind of any order
+    k1: Modified Bessel function of the second kind of order 1
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import k1e
+    >>> k1e(1.)
+    1.636153486263258
+
+    Calculate the function at several points:
+
+    >>> import numpy as np
+    >>> k1e(np.array([0.5, 2., 3.]))
+    array([2.73100971, 1.03347685, 0.80656348])
+
+    Plot the function from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> y = k1e(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    Exponentially scaled Bessel functions are useful for large arguments for
+    which the unscaled Bessel functions are not precise enough. In the
+    following example `k1` returns zero whereas `k1e` still returns a
+    useful floating point number.
+
+    >>> from scipy.special import k1
+    >>> k1(1000.), k1e(1000.)
+    (0., 0.03964813081296021)
+    """)
+
+add_newdoc("kei",
+    r"""
+    kei(x, out=None)
+
+    Kelvin function kei.
+
+    Defined as
+
+    .. math::
+
+        \mathrm{kei}(x) = \Im[K_0(x e^{\pi i / 4})]
+
+    where :math:`K_0` is the modified Bessel function of the second
+    kind (see `kv`). See [dlmf]_ for more details.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Kelvin function.
+
+    See Also
+    --------
+    ker : the corresponding real part
+    keip : the derivative of kei
+    kv : modified Bessel function of the second kind
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10.61
+
+    Examples
+    --------
+    It can be expressed using the modified Bessel function of the
+    second kind.
+
+    >>> import numpy as np
+    >>> import scipy.special as sc
+    >>> x = np.array([1.0, 2.0, 3.0, 4.0])
+    >>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).imag
+    array([-0.49499464, -0.20240007, -0.05112188,  0.0021984 ])
+    >>> sc.kei(x)
+    array([-0.49499464, -0.20240007, -0.05112188,  0.0021984 ])
+
+    """)
+
+add_newdoc("keip",
+    r"""
+    keip(x, out=None)
+
+    Derivative of the Kelvin function kei.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        The values of the derivative of kei.
+
+    See Also
+    --------
+    kei
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10#PT5
+
+    """)
+
+add_newdoc("kelvin",
+    """
+    kelvin(x, out=None)
+
+    Kelvin functions as complex numbers
+
+    Parameters
+    ----------
+    x : array_like
+        Argument
+    out : tuple of ndarray, optional
+        Optional output arrays for the function values
+
+    Returns
+    -------
+    Be, Ke, Bep, Kep : 4-tuple of scalar or ndarray
+        The tuple (Be, Ke, Bep, Kep) contains complex numbers
+        representing the real and imaginary Kelvin functions and their
+        derivatives evaluated at `x`.  For example, kelvin(x)[0].real =
+        ber x and kelvin(x)[0].imag = bei x with similar relationships
+        for ker and kei.
+    """)
+
+add_newdoc("ker",
+    r"""
+    ker(x, out=None)
+
+    Kelvin function ker.
+
+    Defined as
+
+    .. math::
+
+        \mathrm{ker}(x) = \Re[K_0(x e^{\pi i / 4})]
+
+    Where :math:`K_0` is the modified Bessel function of the second
+    kind (see `kv`). See [dlmf]_ for more details.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Kelvin function.
+
+    See Also
+    --------
+    kei : the corresponding imaginary part
+    kerp : the derivative of ker
+    kv : modified Bessel function of the second kind
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10.61
+
+    Examples
+    --------
+    It can be expressed using the modified Bessel function of the
+    second kind.
+
+    >>> import numpy as np
+    >>> import scipy.special as sc
+    >>> x = np.array([1.0, 2.0, 3.0, 4.0])
+    >>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real
+    array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
+    >>> sc.ker(x)
+    array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
+
+    """)
+
+add_newdoc("kerp",
+    r"""
+    kerp(x, out=None)
+
+    Derivative of the Kelvin function ker.
+
+    Parameters
+    ----------
+    x : array_like
+        Real argument.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the derivative of ker.
+
+    See Also
+    --------
+    ker
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/10#PT5
+
+    """)
+
+add_newdoc("kl_div",
+    r"""
+    kl_div(x, y, out=None)
+
+    Elementwise function for computing Kullback-Leibler divergence.
+
+    .. math::
+
+        \mathrm{kl\_div}(x, y) =
+          \begin{cases}
+            x \log(x / y) - x + y & x > 0, y > 0 \\
+            y & x = 0, y \ge 0 \\
+            \infty & \text{otherwise}
+          \end{cases}
+
+    Parameters
+    ----------
+    x, y : array_like
+        Real arguments
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Kullback-Liebler divergence.
+
+    See Also
+    --------
+    entr, rel_entr, scipy.stats.entropy
+
+    Notes
+    -----
+    .. versionadded:: 0.15.0
+
+    This function is non-negative and is jointly convex in `x` and `y`.
+
+    The origin of this function is in convex programming; see [1]_ for
+    details. This is why the function contains the extra :math:`-x
+    + y` terms over what might be expected from the Kullback-Leibler
+    divergence. For a version of the function without the extra terms,
+    see `rel_entr`.
+
+    References
+    ----------
+    .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.
+           Cambridge University Press, 2004.
+           :doi:`https://doi.org/10.1017/CBO9780511804441`
+
+    """)
+
+add_newdoc("kn",
+    r"""
+    kn(n, x, out=None)
+
+    Modified Bessel function of the second kind of integer order `n`
+
+    Returns the modified Bessel function of the second kind for integer order
+    `n` at real `z`.
+
+    These are also sometimes called functions of the third kind, Basset
+    functions, or Macdonald functions.
+
+    Parameters
+    ----------
+    n : array_like of int
+        Order of Bessel functions (floats will truncate with a warning)
+    x : array_like of float
+        Argument at which to evaluate the Bessel functions
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the Modified Bessel function of the second kind,
+        :math:`K_n(x)`.
+
+    Notes
+    -----
+    Wrapper for AMOS [1]_ routine `zbesk`.  For a discussion of the
+    algorithm used, see [2]_ and the references therein.
+
+    See Also
+    --------
+    kv : Same function, but accepts real order and complex argument
+    kvp : Derivative of this function
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+    .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
+           functions of a complex argument and nonnegative order", ACM
+           TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
+
+    Examples
+    --------
+    Plot the function of several orders for real input:
+
+    >>> import numpy as np
+    >>> from scipy.special import kn
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(0, 5, 1000)
+    >>> for N in range(6):
+    ...     plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
+    >>> plt.ylim(0, 10)
+    >>> plt.legend()
+    >>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
+    >>> plt.show()
+
+    Calculate for a single value at multiple orders:
+
+    >>> kn([4, 5, 6], 1)
+    array([   44.23241585,   360.9605896 ,  3653.83831186])
+    """)
+
+add_newdoc("kolmogi",
+    """
+    kolmogi(p, out=None)
+
+    Inverse Survival Function of Kolmogorov distribution
+
+    It is the inverse function to `kolmogorov`.
+    Returns y such that ``kolmogorov(y) == p``.
+
+    Parameters
+    ----------
+    p : float array_like
+        Probability
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The value(s) of kolmogi(p)
+
+    Notes
+    -----
+    `kolmogorov` is used by `stats.kstest` in the application of the
+    Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
+    function is exposed in `scpy.special`, but the recommended way to achieve
+    the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
+    `stats.kstwobign` distribution.
+
+    See Also
+    --------
+    kolmogorov : The Survival Function for the distribution
+    scipy.stats.kstwobign : Provides the functionality as a continuous distribution
+    smirnov, smirnovi : Functions for the one-sided distribution
+
+    Examples
+    --------
+    >>> from scipy.special import kolmogi
+    >>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])
+    array([        inf,  1.22384787,  1.01918472,  0.82757356,  0.67644769,
+            0.57117327,  0.        ])
+
+    """)
+
+add_newdoc("kolmogorov",
+    r"""
+    kolmogorov(y, out=None)
+
+    Complementary cumulative distribution (Survival Function) function of
+    Kolmogorov distribution.
+
+    Returns the complementary cumulative distribution function of
+    Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity)
+    of a two-sided test for equality between an empirical and a theoretical
+    distribution. It is equal to the (limit as n->infinity of the)
+    probability that ``sqrt(n) * max absolute deviation > y``.
+
+    Parameters
+    ----------
+    y : float array_like
+      Absolute deviation between the Empirical CDF (ECDF) and the target CDF,
+      multiplied by sqrt(n).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The value(s) of kolmogorov(y)
+
+    Notes
+    -----
+    `kolmogorov` is used by `stats.kstest` in the application of the
+    Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
+    function is exposed in `scpy.special`, but the recommended way to achieve
+    the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
+    `stats.kstwobign` distribution.
+
+    See Also
+    --------
+    kolmogi : The Inverse Survival Function for the distribution
+    scipy.stats.kstwobign : Provides the functionality as a continuous distribution
+    smirnov, smirnovi : Functions for the one-sided distribution
+
+    Examples
+    --------
+    Show the probability of a gap at least as big as 0, 0.5 and 1.0.
+
+    >>> import numpy as np
+    >>> from scipy.special import kolmogorov
+    >>> from scipy.stats import kstwobign
+    >>> kolmogorov([0, 0.5, 1.0])
+    array([ 1.        ,  0.96394524,  0.26999967])
+
+    Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against
+    the target distribution, a Normal(0, 1) distribution.
+
+    >>> from scipy.stats import norm, laplace
+    >>> rng = np.random.default_rng()
+    >>> n = 1000
+    >>> lap01 = laplace(0, 1)
+    >>> x = np.sort(lap01.rvs(n, random_state=rng))
+    >>> np.mean(x), np.std(x)
+    (-0.05841730131499543, 1.3968109101997568)
+
+    Construct the Empirical CDF and the K-S statistic Dn.
+
+    >>> target = norm(0,1)  # Normal mean 0, stddev 1
+    >>> cdfs = target.cdf(x)
+    >>> ecdfs = np.arange(n+1, dtype=float)/n
+    >>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
+    >>> Dn = np.max(gaps)
+    >>> Kn = np.sqrt(n) * Dn
+    >>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))
+    Dn=0.043363, sqrt(n)*Dn=1.371265
+    >>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',
+    ...   ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' %  (Kn, kolmogorov(Kn)),
+    ...   ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' %  (Kn, kstwobign.cdf(Kn))]))
+    For a sample of size n drawn from a N(0, 1) distribution:
+     the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533
+     the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467
+
+    Plot the Empirical CDF against the target N(0, 1) CDF.
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
+    >>> x3 = np.linspace(-3, 3, 100)
+    >>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
+    >>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
+    >>> # Add vertical lines marking Dn+ and Dn-
+    >>> iminus, iplus = np.argmax(gaps, axis=0)
+    >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
+    >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4)
+    >>> plt.show()
+    """)
+
+add_newdoc("_kolmogc",
+    r"""
+    Internal function, do not use.
+    """)
+
+add_newdoc("_kolmogci",
+    r"""
+    Internal function, do not use.
+    """)
+
+add_newdoc("_kolmogp",
+    r"""
+    Internal function, do not use.
+    """)
+
+add_newdoc("kv",
+    r"""
+    kv(v, z, out=None)
+
+    Modified Bessel function of the second kind of real order `v`
+
+    Returns the modified Bessel function of the second kind for real order
+    `v` at complex `z`.
+
+    These are also sometimes called functions of the third kind, Basset
+    functions, or Macdonald functions.  They are defined as those solutions
+    of the modified Bessel equation for which,
+
+    .. math::
+        K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
+
+    as :math:`x \to \infty` [3]_.
+
+    Parameters
+    ----------
+    v : array_like of float
+        Order of Bessel functions
+    z : array_like of complex
+        Argument at which to evaluate the Bessel functions
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The results. Note that input must be of complex type to get complex
+        output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
+
+    Notes
+    -----
+    Wrapper for AMOS [1]_ routine `zbesk`.  For a discussion of the
+    algorithm used, see [2]_ and the references therein.
+
+    See Also
+    --------
+    kve : This function with leading exponential behavior stripped off.
+    kvp : Derivative of this function
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+    .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
+           functions of a complex argument and nonnegative order", ACM
+           TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
+    .. [3] NIST Digital Library of Mathematical Functions,
+           Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3
+
+    Examples
+    --------
+    Plot the function of several orders for real input:
+
+    >>> import numpy as np
+    >>> from scipy.special import kv
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(0, 5, 1000)
+    >>> for N in np.linspace(0, 6, 5):
+    ...     plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
+    >>> plt.ylim(0, 10)
+    >>> plt.legend()
+    >>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
+    >>> plt.show()
+
+    Calculate for a single value at multiple orders:
+
+    >>> kv([4, 4.5, 5], 1+2j)
+    array([ 0.1992+2.3892j,  2.3493+3.6j   ,  7.2827+3.8104j])
+
+    """)
+
+add_newdoc("kve",
+    r"""
+    kve(v, z, out=None)
+
+    Exponentially scaled modified Bessel function of the second kind.
+
+    Returns the exponentially scaled, modified Bessel function of the
+    second kind (sometimes called the third kind) for real order `v` at
+    complex `z`::
+
+        kve(v, z) = kv(v, z) * exp(z)
+
+    Parameters
+    ----------
+    v : array_like of float
+        Order of Bessel functions
+    z : array_like of complex
+        Argument at which to evaluate the Bessel functions
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The exponentially scaled modified Bessel function of the second kind.
+
+    Notes
+    -----
+    Wrapper for AMOS [1]_ routine `zbesk`.  For a discussion of the
+    algorithm used, see [2]_ and the references therein.
+
+    See Also
+    --------
+    kv : This function without exponential scaling.
+    k0e : Faster version of this function for order 0.
+    k1e : Faster version of this function for order 1.
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+    .. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
+           functions of a complex argument and nonnegative order", ACM
+           TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
+
+    Examples
+    --------
+    Evaluate the function of order 0 at one point.
+
+    >>> import numpy as np
+    >>> from scipy.special import kv, kve
+    >>> import matplotlib.pyplot as plt
+    >>> kve(0, 1.)
+    1.1444630798068949
+
+    Evaluate the function at one point for different orders by
+    providing a list or NumPy array as argument for the `v` parameter:
+
+    >>> kve([0, 1, 1.5], 1.)
+    array([1.14446308, 1.63615349, 2.50662827])
+
+    Evaluate the function at several points for order 0 by providing an
+    array for `z`.
+
+    >>> points = np.array([1., 3., 10.])
+    >>> kve(0, points)
+    array([1.14446308, 0.6977616 , 0.39163193])
+
+    Evaluate the function at several points for different orders by
+    providing arrays for both `v` for `z`. Both arrays have to be
+    broadcastable to the correct shape. To calculate the orders 0, 1
+    and 2 for a 1D array of points:
+
+    >>> kve([[0], [1], [2]], points)
+    array([[1.14446308, 0.6977616 , 0.39163193],
+           [1.63615349, 0.80656348, 0.41076657],
+           [4.41677005, 1.23547058, 0.47378525]])
+
+    Plot the functions of order 0 to 3 from 0 to 5.
+
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 5., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, kve(i, x), label=f'$K_{i!r}(z)\cdot e^z$')
+    >>> ax.legend()
+    >>> ax.set_xlabel(r"$z$")
+    >>> ax.set_ylim(0, 4)
+    >>> ax.set_xlim(0, 5)
+    >>> plt.show()
+
+    Exponentially scaled Bessel functions are useful for large arguments for
+    which the unscaled Bessel functions over- or underflow. In the
+    following example `kv` returns 0 whereas `kve` still returns
+    a useful finite number.
+
+    >>> kv(3, 1000.), kve(3, 1000.)
+    (0.0, 0.03980696128440973)
+    """)
+
+add_newdoc("_lanczos_sum_expg_scaled",
+    """
+    Internal function, do not use.
+    """)
+
+add_newdoc("_lgam1p",
+    """
+    Internal function, do not use.
+    """)
+
+add_newdoc("log1p",
+    """
+    log1p(x, out=None)
+
+    Calculates log(1 + x) for use when `x` is near zero.
+
+    Parameters
+    ----------
+    x : array_like
+        Real or complex valued input.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of ``log(1 + x)``.
+
+    See Also
+    --------
+    expm1, cosm1
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is more accurate than using ``log(1 + x)`` directly for ``x``
+    near 0. Note that in the below example ``1 + 1e-17 == 1`` to
+    double precision.
+
+    >>> sc.log1p(1e-17)
+    1e-17
+    >>> np.log(1 + 1e-17)
+    0.0
+
+    """)
+
+add_newdoc("_log1pmx",
+    """
+    Internal function, do not use.
+    """)
+
+add_newdoc('log_expit',
+    """
+    log_expit(x, out=None)
+
+    Logarithm of the logistic sigmoid function.
+
+    The SciPy implementation of the logistic sigmoid function is
+    `scipy.special.expit`, so this function is called ``log_expit``.
+
+    The function is mathematically equivalent to ``log(expit(x))``, but
+    is formulated to avoid loss of precision for inputs with large
+    (positive or negative) magnitude.
+
+    Parameters
+    ----------
+    x : array_like
+        The values to apply ``log_expit`` to element-wise.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    out : scalar or ndarray
+        The computed values, an ndarray of the same shape as ``x``.
+
+    See Also
+    --------
+    expit
+
+    Notes
+    -----
+    As a ufunc, ``log_expit`` takes a number of optional keyword arguments.
+    For more information see
+    `ufuncs `_
+
+    .. versionadded:: 1.8.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import log_expit, expit
+
+    >>> log_expit([-3.0, 0.25, 2.5, 5.0])
+    array([-3.04858735, -0.57593942, -0.07888973, -0.00671535])
+
+    Large negative values:
+
+    >>> log_expit([-100, -500, -1000])
+    array([ -100.,  -500., -1000.])
+
+    Note that ``expit(-1000)`` returns 0, so the naive implementation
+    ``log(expit(-1000))`` return ``-inf``.
+
+    Large positive values:
+
+    >>> log_expit([29, 120, 400])
+    array([-2.54366565e-013, -7.66764807e-053, -1.91516960e-174])
+
+    Compare that to the naive implementation:
+
+    >>> np.log(expit([29, 120, 400]))
+    array([-2.54463117e-13,  0.00000000e+00,  0.00000000e+00])
+
+    The first value is accurate to only 3 digits, and the larger inputs
+    lose all precision and return 0.
+    """)
+
+add_newdoc('logit',
+    """
+    logit(x, out=None)
+
+    Logit ufunc for ndarrays.
+
+    The logit function is defined as logit(p) = log(p/(1-p)).
+    Note that logit(0) = -inf, logit(1) = inf, and logit(p)
+    for p<0 or p>1 yields nan.
+
+    Parameters
+    ----------
+    x : ndarray
+        The ndarray to apply logit to element-wise.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        An ndarray of the same shape as x. Its entries
+        are logit of the corresponding entry of x.
+
+    See Also
+    --------
+    expit
+
+    Notes
+    -----
+    As a ufunc logit takes a number of optional
+    keyword arguments. For more information
+    see `ufuncs `_
+
+    .. versionadded:: 0.10.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import logit, expit
+
+    >>> logit([0, 0.25, 0.5, 0.75, 1])
+    array([       -inf, -1.09861229,  0.        ,  1.09861229,         inf])
+
+    `expit` is the inverse of `logit`:
+
+    >>> expit(logit([0.1, 0.75, 0.999]))
+    array([ 0.1  ,  0.75 ,  0.999])
+
+    Plot logit(x) for x in [0, 1]:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(0, 1, 501)
+    >>> y = logit(x)
+    >>> plt.plot(x, y)
+    >>> plt.grid()
+    >>> plt.ylim(-6, 6)
+    >>> plt.xlabel('x')
+    >>> plt.title('logit(x)')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("lpmv",
+    r"""
+    lpmv(m, v, x, out=None)
+
+    Associated Legendre function of integer order and real degree.
+
+    Defined as
+
+    .. math::
+
+        P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
+
+    where
+
+    .. math::
+
+        P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
+                \left(\frac{1 - x}{2}\right)^k
+
+    is the Legendre function of the first kind. Here :math:`(\cdot)_k`
+    is the Pochhammer symbol; see `poch`.
+
+    Parameters
+    ----------
+    m : array_like
+        Order (int or float). If passed a float not equal to an
+        integer the function returns NaN.
+    v : array_like
+        Degree (float).
+    x : array_like
+        Argument (float). Must have ``|x| <= 1``.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    pmv : scalar or ndarray
+        Value of the associated Legendre function.
+
+    See Also
+    --------
+    lpmn : Compute the associated Legendre function for all orders
+           ``0, ..., m`` and degrees ``0, ..., n``.
+    clpmn : Compute the associated Legendre function at complex
+            arguments.
+
+    Notes
+    -----
+    Note that this implementation includes the Condon-Shortley phase.
+
+    References
+    ----------
+    .. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
+           and Sons, Inc, 1996.
+
+    """)
+
+add_newdoc("mathieu_a",
+    """
+    mathieu_a(m, q, out=None)
+
+    Characteristic value of even Mathieu functions
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Characteristic value for the even solution, ``ce_m(z, q)``, of
+        Mathieu's equation.
+
+    See Also
+    --------
+    mathieu_b, mathieu_cem, mathieu_sem
+
+    """)
+
+add_newdoc("mathieu_b",
+    """
+    mathieu_b(m, q, out=None)
+
+    Characteristic value of odd Mathieu functions
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Characteristic value for the odd solution, ``se_m(z, q)``, of Mathieu's
+        equation.
+
+    See Also
+    --------
+    mathieu_a, mathieu_cem, mathieu_sem
+
+    """)
+
+add_newdoc("mathieu_cem",
+    """
+    mathieu_cem(m, q, x, out=None)
+
+    Even Mathieu function and its derivative
+
+    Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
+    parameter `q` evaluated at `x` (given in degrees).  Also returns the
+    derivative with respect to `x` of ce_m(x, q)
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    x : array_like
+        Argument of the function, *given in degrees, not radians*
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Value of the function
+    yp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    mathieu_a, mathieu_b, mathieu_sem
+
+    """)
+
+add_newdoc("mathieu_modcem1",
+    """
+    mathieu_modcem1(m, q, x, out=None)
+
+    Even modified Mathieu function of the first kind and its derivative
+
+    Evaluates the even modified Mathieu function of the first kind,
+    ``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
+    `q`.
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    x : array_like
+        Argument of the function, *given in degrees, not radians*
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Value of the function
+    yp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    mathieu_modsem1
+
+    """)
+
+add_newdoc("mathieu_modcem2",
+    """
+    mathieu_modcem2(m, q, x, out=None)
+
+    Even modified Mathieu function of the second kind and its derivative
+
+    Evaluates the even modified Mathieu function of the second kind,
+    Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
+    and parameter `q`.
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    x : array_like
+        Argument of the function, *given in degrees, not radians*
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Value of the function
+    yp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    mathieu_modsem2
+
+    """)
+
+add_newdoc("mathieu_modsem1",
+    """
+    mathieu_modsem1(m, q, x, out=None)
+
+    Odd modified Mathieu function of the first kind and its derivative
+
+    Evaluates the odd modified Mathieu function of the first kind,
+    Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
+    and parameter `q`.
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    x : array_like
+        Argument of the function, *given in degrees, not radians*
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Value of the function
+    yp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    mathieu_modcem1
+
+    """)
+
+add_newdoc("mathieu_modsem2",
+    """
+    mathieu_modsem2(m, q, x, out=None)
+
+    Odd modified Mathieu function of the second kind and its derivative
+
+    Evaluates the odd modified Mathieu function of the second kind,
+    Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
+    and parameter q.
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    x : array_like
+        Argument of the function, *given in degrees, not radians*
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Value of the function
+    yp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    mathieu_modcem2
+
+    """)
+
+add_newdoc(
+    "mathieu_sem",
+    """
+    mathieu_sem(m, q, x, out=None)
+
+    Odd Mathieu function and its derivative
+
+    Returns the odd Mathieu function, se_m(x, q), of order `m` and
+    parameter `q` evaluated at `x` (given in degrees).  Also returns the
+    derivative with respect to `x` of se_m(x, q).
+
+    Parameters
+    ----------
+    m : array_like
+        Order of the function
+    q : array_like
+        Parameter of the function
+    x : array_like
+        Argument of the function, *given in degrees, not radians*.
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    y : scalar or ndarray
+        Value of the function
+    yp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    mathieu_a, mathieu_b, mathieu_cem
+
+    """)
+
+add_newdoc("modfresnelm",
+    """
+    modfresnelm(x, out=None)
+
+    Modified Fresnel negative integrals
+
+    Parameters
+    ----------
+    x : array_like
+        Function argument
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    fm : scalar or ndarray
+        Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
+    km : scalar or ndarray
+        Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
+
+    See Also
+    --------
+    modfresnelp
+
+    """)
+
+add_newdoc("modfresnelp",
+    """
+    modfresnelp(x, out=None)
+
+    Modified Fresnel positive integrals
+
+    Parameters
+    ----------
+    x : array_like
+        Function argument
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    fp : scalar or ndarray
+        Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
+    kp : scalar or ndarray
+        Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
+
+    See Also
+    --------
+    modfresnelm
+
+    """)
+
+add_newdoc("modstruve",
+    r"""
+    modstruve(v, x, out=None)
+
+    Modified Struve function.
+
+    Return the value of the modified Struve function of order `v` at `x`.  The
+    modified Struve function is defined as,
+
+    .. math::
+        L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(\imath x),
+
+    where :math:`H_v` is the Struve function.
+
+    Parameters
+    ----------
+    v : array_like
+        Order of the modified Struve function (float).
+    x : array_like
+        Argument of the Struve function (float; must be positive unless `v` is
+        an integer).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    L : scalar or ndarray
+        Value of the modified Struve function of order `v` at `x`.
+
+    Notes
+    -----
+    Three methods discussed in [1]_ are used to evaluate the function:
+
+    - power series
+    - expansion in Bessel functions (if :math:`|x| < |v| + 20`)
+    - asymptotic large-x expansion (if :math:`x \geq 0.7v + 12`)
+
+    Rounding errors are estimated based on the largest terms in the sums, and
+    the result associated with the smallest error is returned.
+
+    See also
+    --------
+    struve
+
+    References
+    ----------
+    .. [1] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/11
+
+    Examples
+    --------
+    Calculate the modified Struve function of order 1 at 2.
+
+    >>> import numpy as np
+    >>> from scipy.special import modstruve
+    >>> import matplotlib.pyplot as plt
+    >>> modstruve(1, 2.)
+    1.102759787367716
+
+    Calculate the modified Struve function at 2 for orders 1, 2 and 3 by
+    providing a list for the order parameter `v`.
+
+    >>> modstruve([1, 2, 3], 2.)
+    array([1.10275979, 0.41026079, 0.11247294])
+
+    Calculate the modified Struve function of order 1 for several points
+    by providing an array for `x`.
+
+    >>> points = np.array([2., 5., 8.])
+    >>> modstruve(1, points)
+    array([  1.10275979,  23.72821578, 399.24709139])
+
+    Compute the modified Struve function for several orders at several
+    points by providing arrays for `v` and `z`. The arrays have to be
+    broadcastable to the correct shapes.
+
+    >>> orders = np.array([[1], [2], [3]])
+    >>> points.shape, orders.shape
+    ((3,), (3, 1))
+
+    >>> modstruve(orders, points)
+    array([[1.10275979e+00, 2.37282158e+01, 3.99247091e+02],
+           [4.10260789e-01, 1.65535979e+01, 3.25973609e+02],
+           [1.12472937e-01, 9.42430454e+00, 2.33544042e+02]])
+
+    Plot the modified Struve functions of order 0 to 3 from -5 to 5.
+
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-5., 5., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, modstruve(i, x), label=f'$L_{i!r}$')
+    >>> ax.legend(ncol=2)
+    >>> ax.set_xlim(-5, 5)
+    >>> ax.set_title(r"Modified Struve functions $L_{\nu}$")
+    >>> plt.show()
+    """)
+
+add_newdoc("nbdtr",
+    r"""
+    nbdtr(k, n, p, out=None)
+
+    Negative binomial cumulative distribution function.
+
+    Returns the sum of the terms 0 through `k` of the negative binomial
+    distribution probability mass function,
+
+    .. math::
+
+        F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
+
+    In a sequence of Bernoulli trials with individual success probabilities
+    `p`, this is the probability that `k` or fewer failures precede the nth
+    success.
+
+    Parameters
+    ----------
+    k : array_like
+        The maximum number of allowed failures (nonnegative int).
+    n : array_like
+        The target number of successes (positive int).
+    p : array_like
+        Probability of success in a single event (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    F : scalar or ndarray
+        The probability of `k` or fewer failures before `n` successes in a
+        sequence of events with individual success probability `p`.
+
+    See also
+    --------
+    nbdtrc
+
+    Notes
+    -----
+    If floating point values are passed for `k` or `n`, they will be truncated
+    to integers.
+
+    The terms are not summed directly; instead the regularized incomplete beta
+    function is employed, according to the formula,
+
+    .. math::
+        \mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
+
+    Wrapper for the Cephes [1]_ routine `nbdtr`.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    """)
+
+add_newdoc("nbdtrc",
+    r"""
+    nbdtrc(k, n, p, out=None)
+
+    Negative binomial survival function.
+
+    Returns the sum of the terms `k + 1` to infinity of the negative binomial
+    distribution probability mass function,
+
+    .. math::
+
+        F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
+
+    In a sequence of Bernoulli trials with individual success probabilities
+    `p`, this is the probability that more than `k` failures precede the nth
+    success.
+
+    Parameters
+    ----------
+    k : array_like
+        The maximum number of allowed failures (nonnegative int).
+    n : array_like
+        The target number of successes (positive int).
+    p : array_like
+        Probability of success in a single event (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    F : scalar or ndarray
+        The probability of `k + 1` or more failures before `n` successes in a
+        sequence of events with individual success probability `p`.
+
+    Notes
+    -----
+    If floating point values are passed for `k` or `n`, they will be truncated
+    to integers.
+
+    The terms are not summed directly; instead the regularized incomplete beta
+    function is employed, according to the formula,
+
+    .. math::
+        \mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
+
+    Wrapper for the Cephes [1]_ routine `nbdtrc`.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    """)
+
+add_newdoc("nbdtri",
+    """
+    nbdtri(k, n, y, out=None)
+
+    Inverse of `nbdtr` vs `p`.
+
+    Returns the inverse with respect to the parameter `p` of
+    `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
+    function.
+
+    Parameters
+    ----------
+    k : array_like
+        The maximum number of allowed failures (nonnegative int).
+    n : array_like
+        The target number of successes (positive int).
+    y : array_like
+        The probability of `k` or fewer failures before `n` successes (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    p : scalar or ndarray
+        Probability of success in a single event (float) such that
+        `nbdtr(k, n, p) = y`.
+
+    See also
+    --------
+    nbdtr : Cumulative distribution function of the negative binomial.
+    nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
+    nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
+
+    Notes
+    -----
+    Wrapper for the Cephes [1]_ routine `nbdtri`.
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    """)
+
+add_newdoc("nbdtrik",
+    r"""
+    nbdtrik(y, n, p, out=None)
+
+    Inverse of `nbdtr` vs `k`.
+
+    Returns the inverse with respect to the parameter `k` of
+    `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
+    function.
+
+    Parameters
+    ----------
+    y : array_like
+        The probability of `k` or fewer failures before `n` successes (float).
+    n : array_like
+        The target number of successes (positive int).
+    p : array_like
+        Probability of success in a single event (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    k : scalar or ndarray
+        The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
+
+    See also
+    --------
+    nbdtr : Cumulative distribution function of the negative binomial.
+    nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
+    nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
+
+    Formula 26.5.26 of [2]_,
+
+    .. math::
+        \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
+
+    is used to reduce calculation of the cumulative distribution function to
+    that of a regularized incomplete beta :math:`I`.
+
+    Computation of `k` involves a search for a value that produces the desired
+    value of `y`.  The search relies on the monotonicity of `y` with `k`.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("nbdtrin",
+    r"""
+    nbdtrin(k, y, p, out=None)
+
+    Inverse of `nbdtr` vs `n`.
+
+    Returns the inverse with respect to the parameter `n` of
+    `y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
+    function.
+
+    Parameters
+    ----------
+    k : array_like
+        The maximum number of allowed failures (nonnegative int).
+    y : array_like
+        The probability of `k` or fewer failures before `n` successes (float).
+    p : array_like
+        Probability of success in a single event (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    n : scalar or ndarray
+        The number of successes `n` such that `nbdtr(k, n, p) = y`.
+
+    See also
+    --------
+    nbdtr : Cumulative distribution function of the negative binomial.
+    nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
+    nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
+
+    Formula 26.5.26 of [2]_,
+
+    .. math::
+        \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
+
+    is used to reduce calculation of the cumulative distribution function to
+    that of a regularized incomplete beta :math:`I`.
+
+    Computation of `n` involves a search for a value that produces the desired
+    value of `y`.  The search relies on the monotonicity of `y` with `n`.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """)
+
+add_newdoc("ncfdtr",
+    r"""
+    ncfdtr(dfn, dfd, nc, f, out=None)
+
+    Cumulative distribution function of the non-central F distribution.
+
+    The non-central F describes the distribution of,
+
+    .. math::
+        Z = \frac{X/d_n}{Y/d_d}
+
+    where :math:`X` and :math:`Y` are independently distributed, with
+    :math:`X` distributed non-central :math:`\chi^2` with noncentrality
+    parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
+    distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
+
+    Parameters
+    ----------
+    dfn : array_like
+        Degrees of freedom of the numerator sum of squares.  Range (0, inf).
+    dfd : array_like
+        Degrees of freedom of the denominator sum of squares.  Range (0, inf).
+    nc : array_like
+        Noncentrality parameter.  Should be in range (0, 1e4).
+    f : array_like
+        Quantiles, i.e. the upper limit of integration.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    cdf : scalar or ndarray
+        The calculated CDF.  If all inputs are scalar, the return will be a
+        float.  Otherwise it will be an array.
+
+    See Also
+    --------
+    ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
+    ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
+    ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
+    ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
+
+    Notes
+    -----
+    Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
+
+    The cumulative distribution function is computed using Formula 26.6.20 of
+    [2]_:
+
+    .. math::
+        F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
+
+    where :math:`I` is the regularized incomplete beta function, and
+    :math:`x = f d_n/(f d_n + d_d)`.
+
+    The computation time required for this routine is proportional to the
+    noncentrality parameter `nc`.  Very large values of this parameter can
+    consume immense computer resources.  This is why the search range is
+    bounded by 10,000.
+
+    References
+    ----------
+    .. [1] Barry Brown, James Lovato, and Kathy Russell,
+           CDFLIB: Library of Fortran Routines for Cumulative Distribution
+           Functions, Inverses, and Other Parameters.
+    .. [2] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    Plot the CDF of the non-central F distribution, for nc=0.  Compare with the
+    F-distribution from scipy.stats:
+
+    >>> x = np.linspace(-1, 8, num=500)
+    >>> dfn = 3
+    >>> dfd = 2
+    >>> ncf_stats = stats.f.cdf(x, dfn, dfd)
+    >>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(x, ncf_stats, 'b-', lw=3)
+    >>> ax.plot(x, ncf_special, 'r-')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("ncfdtri",
+    """
+    ncfdtri(dfn, dfd, nc, p, out=None)
+
+    Inverse with respect to `f` of the CDF of the non-central F distribution.
+
+    See `ncfdtr` for more details.
+
+    Parameters
+    ----------
+    dfn : array_like
+        Degrees of freedom of the numerator sum of squares.  Range (0, inf).
+    dfd : array_like
+        Degrees of freedom of the denominator sum of squares.  Range (0, inf).
+    nc : array_like
+        Noncentrality parameter.  Should be in range (0, 1e4).
+    p : array_like
+        Value of the cumulative distribution function.  Must be in the
+        range [0, 1].
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    f : scalar or ndarray
+        Quantiles, i.e., the upper limit of integration.
+
+    See Also
+    --------
+    ncfdtr : CDF of the non-central F distribution.
+    ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
+    ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
+    ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
+
+    Examples
+    --------
+    >>> from scipy.special import ncfdtr, ncfdtri
+
+    Compute the CDF for several values of `f`:
+
+    >>> f = [0.5, 1, 1.5]
+    >>> p = ncfdtr(2, 3, 1.5, f)
+    >>> p
+    array([ 0.20782291,  0.36107392,  0.47345752])
+
+    Compute the inverse.  We recover the values of `f`, as expected:
+
+    >>> ncfdtri(2, 3, 1.5, p)
+    array([ 0.5,  1. ,  1.5])
+
+    """)
+
+add_newdoc("ncfdtridfd",
+    """
+    ncfdtridfd(dfn, p, nc, f, out=None)
+
+    Calculate degrees of freedom (denominator) for the noncentral F-distribution.
+
+    This is the inverse with respect to `dfd` of `ncfdtr`.
+    See `ncfdtr` for more details.
+
+    Parameters
+    ----------
+    dfn : array_like
+        Degrees of freedom of the numerator sum of squares.  Range (0, inf).
+    p : array_like
+        Value of the cumulative distribution function.  Must be in the
+        range [0, 1].
+    nc : array_like
+        Noncentrality parameter.  Should be in range (0, 1e4).
+    f : array_like
+        Quantiles, i.e., the upper limit of integration.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    dfd : scalar or ndarray
+        Degrees of freedom of the denominator sum of squares.
+
+    See Also
+    --------
+    ncfdtr : CDF of the non-central F distribution.
+    ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
+    ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
+    ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
+
+    Notes
+    -----
+    The value of the cumulative noncentral F distribution is not necessarily
+    monotone in either degrees of freedom. There thus may be two values that
+    provide a given CDF value. This routine assumes monotonicity and will
+    find an arbitrary one of the two values.
+
+    Examples
+    --------
+    >>> from scipy.special import ncfdtr, ncfdtridfd
+
+    Compute the CDF for several values of `dfd`:
+
+    >>> dfd = [1, 2, 3]
+    >>> p = ncfdtr(2, dfd, 0.25, 15)
+    >>> p
+    array([ 0.8097138 ,  0.93020416,  0.96787852])
+
+    Compute the inverse.  We recover the values of `dfd`, as expected:
+
+    >>> ncfdtridfd(2, p, 0.25, 15)
+    array([ 1.,  2.,  3.])
+
+    """)
+
+add_newdoc("ncfdtridfn",
+    """
+    ncfdtridfn(p, dfd, nc, f, out=None)
+
+    Calculate degrees of freedom (numerator) for the noncentral F-distribution.
+
+    This is the inverse with respect to `dfn` of `ncfdtr`.
+    See `ncfdtr` for more details.
+
+    Parameters
+    ----------
+    p : array_like
+        Value of the cumulative distribution function. Must be in the
+        range [0, 1].
+    dfd : array_like
+        Degrees of freedom of the denominator sum of squares. Range (0, inf).
+    nc : array_like
+        Noncentrality parameter.  Should be in range (0, 1e4).
+    f : float
+        Quantiles, i.e., the upper limit of integration.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    dfn : scalar or ndarray
+        Degrees of freedom of the numerator sum of squares.
+
+    See Also
+    --------
+    ncfdtr : CDF of the non-central F distribution.
+    ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
+    ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
+    ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
+
+    Notes
+    -----
+    The value of the cumulative noncentral F distribution is not necessarily
+    monotone in either degrees of freedom. There thus may be two values that
+    provide a given CDF value. This routine assumes monotonicity and will
+    find an arbitrary one of the two values.
+
+    Examples
+    --------
+    >>> from scipy.special import ncfdtr, ncfdtridfn
+
+    Compute the CDF for several values of `dfn`:
+
+    >>> dfn = [1, 2, 3]
+    >>> p = ncfdtr(dfn, 2, 0.25, 15)
+    >>> p
+    array([ 0.92562363,  0.93020416,  0.93188394])
+
+    Compute the inverse. We recover the values of `dfn`, as expected:
+
+    >>> ncfdtridfn(p, 2, 0.25, 15)
+    array([ 1.,  2.,  3.])
+
+    """)
+
+add_newdoc("ncfdtrinc",
+    """
+    ncfdtrinc(dfn, dfd, p, f, out=None)
+
+    Calculate non-centrality parameter for non-central F distribution.
+
+    This is the inverse with respect to `nc` of `ncfdtr`.
+    See `ncfdtr` for more details.
+
+    Parameters
+    ----------
+    dfn : array_like
+        Degrees of freedom of the numerator sum of squares. Range (0, inf).
+    dfd : array_like
+        Degrees of freedom of the denominator sum of squares. Range (0, inf).
+    p : array_like
+        Value of the cumulative distribution function. Must be in the
+        range [0, 1].
+    f : array_like
+        Quantiles, i.e., the upper limit of integration.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    nc : scalar or ndarray
+        Noncentrality parameter.
+
+    See Also
+    --------
+    ncfdtr : CDF of the non-central F distribution.
+    ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
+    ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
+    ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
+
+    Examples
+    --------
+    >>> from scipy.special import ncfdtr, ncfdtrinc
+
+    Compute the CDF for several values of `nc`:
+
+    >>> nc = [0.5, 1.5, 2.0]
+    >>> p = ncfdtr(2, 3, nc, 15)
+    >>> p
+    array([ 0.96309246,  0.94327955,  0.93304098])
+
+    Compute the inverse. We recover the values of `nc`, as expected:
+
+    >>> ncfdtrinc(2, 3, p, 15)
+    array([ 0.5,  1.5,  2. ])
+
+    """)
+
+add_newdoc("nctdtr",
+    """
+    nctdtr(df, nc, t, out=None)
+
+    Cumulative distribution function of the non-central `t` distribution.
+
+    Parameters
+    ----------
+    df : array_like
+        Degrees of freedom of the distribution. Should be in range (0, inf).
+    nc : array_like
+        Noncentrality parameter. Should be in range (-1e6, 1e6).
+    t : array_like
+        Quantiles, i.e., the upper limit of integration.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    cdf : scalar or ndarray
+        The calculated CDF. If all inputs are scalar, the return will be a
+        float. Otherwise, it will be an array.
+
+    See Also
+    --------
+    nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
+    nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
+    nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    Plot the CDF of the non-central t distribution, for nc=0. Compare with the
+    t-distribution from scipy.stats:
+
+    >>> x = np.linspace(-5, 5, num=500)
+    >>> df = 3
+    >>> nct_stats = stats.t.cdf(x, df)
+    >>> nct_special = special.nctdtr(df, 0, x)
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(x, nct_stats, 'b-', lw=3)
+    >>> ax.plot(x, nct_special, 'r-')
+    >>> plt.show()
+
+    """)
+
+add_newdoc("nctdtridf",
+    """
+    nctdtridf(p, nc, t, out=None)
+
+    Calculate degrees of freedom for non-central t distribution.
+
+    See `nctdtr` for more details.
+
+    Parameters
+    ----------
+    p : array_like
+        CDF values, in range (0, 1].
+    nc : array_like
+        Noncentrality parameter. Should be in range (-1e6, 1e6).
+    t : array_like
+        Quantiles, i.e., the upper limit of integration.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    cdf : scalar or ndarray
+        The calculated CDF. If all inputs are scalar, the return will be a
+        float. Otherwise, it will be an array.
+
+    See Also
+    --------
+    nctdtr :  CDF of the non-central `t` distribution.
+    nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
+    nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
+
+    """)
+
+add_newdoc("nctdtrinc",
+    """
+    nctdtrinc(df, p, t, out=None)
+
+    Calculate non-centrality parameter for non-central t distribution.
+
+    See `nctdtr` for more details.
+
+    Parameters
+    ----------
+    df : array_like
+        Degrees of freedom of the distribution. Should be in range (0, inf).
+    p : array_like
+        CDF values, in range (0, 1].
+    t : array_like
+        Quantiles, i.e., the upper limit of integration.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    nc : scalar or ndarray
+        Noncentrality parameter
+
+    See Also
+    --------
+    nctdtr :  CDF of the non-central `t` distribution.
+    nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
+    nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
+
+    """)
+
+add_newdoc("nctdtrit",
+    """
+    nctdtrit(df, nc, p, out=None)
+
+    Inverse cumulative distribution function of the non-central t distribution.
+
+    See `nctdtr` for more details.
+
+    Parameters
+    ----------
+    df : array_like
+        Degrees of freedom of the distribution. Should be in range (0, inf).
+    nc : array_like
+        Noncentrality parameter. Should be in range (-1e6, 1e6).
+    p : array_like
+        CDF values, in range (0, 1].
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    t : scalar or ndarray
+        Quantiles
+
+    See Also
+    --------
+    nctdtr :  CDF of the non-central `t` distribution.
+    nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
+    nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
+
+    """)
+
+add_newdoc("ndtr",
+    r"""
+    ndtr(x, out=None)
+
+    Gaussian cumulative distribution function.
+
+    Returns the area under the standard Gaussian probability
+    density function, integrated from minus infinity to `x`
+
+    .. math::
+
+       \frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
+
+    Parameters
+    ----------
+    x : array_like, real or complex
+        Argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The value of the normal CDF evaluated at `x`
+
+    See Also
+    --------
+    erf, erfc, scipy.stats.norm, log_ndtr
+
+    """)
+
+
+add_newdoc("nrdtrimn",
+    """
+    nrdtrimn(p, x, std, out=None)
+
+    Calculate mean of normal distribution given other params.
+
+    Parameters
+    ----------
+    p : array_like
+        CDF values, in range (0, 1].
+    x : array_like
+        Quantiles, i.e. the upper limit of integration.
+    std : array_like
+        Standard deviation.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    mn : scalar or ndarray
+        The mean of the normal distribution.
+
+    See Also
+    --------
+    nrdtrimn, ndtr
+
+    """)
+
+add_newdoc("nrdtrisd",
+    """
+    nrdtrisd(p, x, mn, out=None)
+
+    Calculate standard deviation of normal distribution given other params.
+
+    Parameters
+    ----------
+    p : array_like
+        CDF values, in range (0, 1].
+    x : array_like
+        Quantiles, i.e. the upper limit of integration.
+    mn : scalar or ndarray
+        The mean of the normal distribution.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    std : scalar or ndarray
+        Standard deviation.
+
+    See Also
+    --------
+    ndtr
+
+    """)
+
+add_newdoc("log_ndtr",
+    """
+    log_ndtr(x, out=None)
+
+    Logarithm of Gaussian cumulative distribution function.
+
+    Returns the log of the area under the standard Gaussian probability
+    density function, integrated from minus infinity to `x`::
+
+        log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
+
+    Parameters
+    ----------
+    x : array_like, real or complex
+        Argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The value of the log of the normal CDF evaluated at `x`
+
+    See Also
+    --------
+    erf
+    erfc
+    scipy.stats.norm
+    ndtr
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import log_ndtr, ndtr
+
+    The benefit of ``log_ndtr(x)`` over the naive implementation
+    ``np.log(ndtr(x))`` is most evident with moderate to large positive
+    values of ``x``:
+
+    >>> x = np.array([6, 7, 9, 12, 15, 25])
+    >>> log_ndtr(x)
+    array([-9.86587646e-010, -1.27981254e-012, -1.12858841e-019,
+           -1.77648211e-033, -3.67096620e-051, -3.05669671e-138])
+
+    The results of the naive calculation for the moderate ``x`` values
+    have only 5 or 6 correct significant digits. For values of ``x``
+    greater than approximately 8.3, the naive expression returns 0:
+
+    >>> np.log(ndtr(x))
+    array([-9.86587701e-10, -1.27986510e-12,  0.00000000e+00,
+            0.00000000e+00,  0.00000000e+00,  0.00000000e+00])
+    """)
+
+add_newdoc("ndtri",
+    """
+    ndtri(y, out=None)
+
+    Inverse of `ndtr` vs x
+
+    Returns the argument x for which the area under the Gaussian
+    probability density function (integrated from minus infinity to `x`)
+    is equal to y.
+
+    Parameters
+    ----------
+    p : array_like
+        Probability
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    x : scalar or ndarray
+        Value of x such that ``ndtr(x) == p``.
+
+    See Also
+    --------
+    ndtr
+
+    """)
+
+add_newdoc("obl_ang1",
+    """
+    obl_ang1(m, n, c, x, out=None)
+
+    Oblate spheroidal angular function of the first kind and its derivative
+
+    Computes the oblate spheroidal angular function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
+
+    Parameters
+    ----------
+    m : array_like
+        Mode parameter m (nonnegative)
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    x : array_like
+        Parameter x (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    obl_ang1_cv
+
+    """)
+
+add_newdoc("obl_ang1_cv",
+    """
+    obl_ang1_cv(m, n, c, cv, x, out=None)
+
+    Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
+
+    Computes the oblate spheroidal angular function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
+    pre-computed characteristic value.
+
+    Parameters
+    ----------
+    m : array_like
+        Mode parameter m (nonnegative)
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    cv : array_like
+        Characteristic value
+    x : array_like
+        Parameter x (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    obl_ang1
+
+    """)
+
+add_newdoc("obl_cv",
+    """
+    obl_cv(m, n, c, out=None)
+
+    Characteristic value of oblate spheroidal function
+
+    Computes the characteristic value of oblate spheroidal wave
+    functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
+
+    Parameters
+    ----------
+    m : array_like
+        Mode parameter m (nonnegative)
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    cv : scalar or ndarray
+        Characteristic value
+
+    """)
+
+add_newdoc("obl_rad1",
+    """
+    obl_rad1(m, n, c, x, out=None)
+
+    Oblate spheroidal radial function of the first kind and its derivative
+
+    Computes the oblate spheroidal radial function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
+
+    Parameters
+    ----------
+    m : array_like
+        Mode parameter m (nonnegative)
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    x : array_like
+        Parameter x (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    obl_rad1_cv
+
+    """)
+
+add_newdoc("obl_rad1_cv",
+    """
+    obl_rad1_cv(m, n, c, cv, x, out=None)
+
+    Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
+
+    Computes the oblate spheroidal radial function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
+    pre-computed characteristic value.
+
+    Parameters
+    ----------
+    m : array_like
+        Mode parameter m (nonnegative)
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    cv : array_like
+        Characteristic value
+    x : array_like
+        Parameter x (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    obl_rad1
+
+    """)
+
+add_newdoc("obl_rad2",
+    """
+    obl_rad2(m, n, c, x, out=None)
+
+    Oblate spheroidal radial function of the second kind and its derivative.
+
+    Computes the oblate spheroidal radial function of the second kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
+
+    Parameters
+    ----------
+    m : array_like
+        Mode parameter m (nonnegative)
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    x : array_like
+        Parameter x (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    obl_rad2_cv
+
+    """)
+
+add_newdoc("obl_rad2_cv",
+    """
+    obl_rad2_cv(m, n, c, cv, x, out=None)
+
+    Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
+
+    Computes the oblate spheroidal radial function of the second kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
+    pre-computed characteristic value.
+
+    Parameters
+    ----------
+    m : array_like
+        Mode parameter m (nonnegative)
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    cv : array_like
+        Characteristic value
+    x : array_like
+        Parameter x (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+
+    See Also
+    --------
+    obl_rad2
+    """)
+
+add_newdoc("pbdv",
+    """
+    pbdv(v, x, out=None)
+
+    Parabolic cylinder function D
+
+    Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
+    derivative, Dv'(x) in dp.
+
+    Parameters
+    ----------
+    v : array_like
+        Real parameter
+    x : array_like
+        Real argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    d : scalar or ndarray
+        Value of the function
+    dp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pbvv",
+    """
+    pbvv(v, x, out=None)
+
+    Parabolic cylinder function V
+
+    Returns the parabolic cylinder function Vv(x) in v and the
+    derivative, Vv'(x) in vp.
+
+    Parameters
+    ----------
+    v : array_like
+        Real parameter
+    x : array_like
+        Real argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    v : scalar or ndarray
+        Value of the function
+    vp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pbwa",
+    r"""
+    pbwa(a, x, out=None)
+
+    Parabolic cylinder function W.
+
+    The function is a particular solution to the differential equation
+
+    .. math::
+
+        y'' + \left(\frac{1}{4}x^2 - a\right)y = 0,
+
+    for a full definition see section 12.14 in [1]_.
+
+    Parameters
+    ----------
+    a : array_like
+        Real parameter
+    x : array_like
+        Real argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    w : scalar or ndarray
+        Value of the function
+    wp : scalar or ndarray
+        Value of the derivative in x
+
+    Notes
+    -----
+    The function is a wrapper for a Fortran routine by Zhang and Jin
+    [2]_. The implementation is accurate only for ``|a|, |x| < 5`` and
+    returns NaN outside that range.
+
+    References
+    ----------
+    .. [1] Digital Library of Mathematical Functions, 14.30.
+           https://dlmf.nist.gov/14.30
+    .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
+    """)
+
+add_newdoc("pdtr",
+    r"""
+    pdtr(k, m, out=None)
+
+    Poisson cumulative distribution function.
+
+    Defined as the probability that a Poisson-distributed random
+    variable with event rate :math:`m` is less than or equal to
+    :math:`k`. More concretely, this works out to be [1]_
+
+    .. math::
+
+       \exp(-m) \sum_{j = 0}^{\lfloor{k}\rfloor} \frac{m^j}{j!}.
+
+    Parameters
+    ----------
+    k : array_like
+        Number of occurrences (nonnegative, real)
+    m : array_like
+        Shape parameter (nonnegative, real)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Poisson cumulative distribution function
+
+    See Also
+    --------
+    pdtrc : Poisson survival function
+    pdtrik : inverse of `pdtr` with respect to `k`
+    pdtri : inverse of `pdtr` with respect to `m`
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Poisson_distribution
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is a cumulative distribution function, so it converges to 1
+    monotonically as `k` goes to infinity.
+
+    >>> sc.pdtr([1, 10, 100, np.inf], 1)
+    array([0.73575888, 0.99999999, 1.        , 1.        ])
+
+    It is discontinuous at integers and constant between integers.
+
+    >>> sc.pdtr([1, 1.5, 1.9, 2], 1)
+    array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ])
+
+    """)
+
+add_newdoc("pdtrc",
+    """
+    pdtrc(k, m, out=None)
+
+    Poisson survival function
+
+    Returns the sum of the terms from k+1 to infinity of the Poisson
+    distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
+    k+1, m). Arguments must both be non-negative doubles.
+
+    Parameters
+    ----------
+    k : array_like
+        Number of occurrences (nonnegative, real)
+    m : array_like
+        Shape parameter (nonnegative, real)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the Poisson survival function
+
+    See Also
+    --------
+    pdtr : Poisson cumulative distribution function
+    pdtrik : inverse of `pdtr` with respect to `k`
+    pdtri : inverse of `pdtr` with respect to `m`
+
+    """)
+
+add_newdoc("pdtri",
+    """
+    pdtri(k, y, out=None)
+
+    Inverse to `pdtr` vs m
+
+    Returns the Poisson variable `m` such that the sum from 0 to `k` of
+    the Poisson density is equal to the given probability `y`:
+    calculated by ``gammaincinv(k + 1, y)``. `k` must be a nonnegative
+    integer and `y` between 0 and 1.
+
+    Parameters
+    ----------
+    k : array_like
+        Number of occurrences (nonnegative, real)
+    y : array_like
+        Probability
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the shape paramter `m` such that ``pdtr(k, m) = p``
+
+    See Also
+    --------
+    pdtr : Poisson cumulative distribution function
+    pdtrc : Poisson survival function
+    pdtrik : inverse of `pdtr` with respect to `k`
+
+    """)
+
+add_newdoc("pdtrik",
+    """
+    pdtrik(p, m, out=None)
+
+    Inverse to `pdtr` vs `m`.
+
+    Parameters
+    ----------
+    m : array_like
+        Shape parameter (nonnegative, real)
+    p : array_like
+        Probability
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The number of occurrences `k` such that ``pdtr(k, m) = p``
+
+    See Also
+    --------
+    pdtr : Poisson cumulative distribution function
+    pdtrc : Poisson survival function
+    pdtri : inverse of `pdtr` with respect to `m`
+
+    """)
+
+add_newdoc("poch",
+    r"""
+    poch(z, m, out=None)
+
+    Pochhammer symbol.
+
+    The Pochhammer symbol (rising factorial) is defined as
+
+    .. math::
+
+        (z)_m = \frac{\Gamma(z + m)}{\Gamma(z)}
+
+    For positive integer `m` it reads
+
+    .. math::
+
+        (z)_m = z (z + 1) ... (z + m - 1)
+
+    See [dlmf]_ for more details.
+
+    Parameters
+    ----------
+    z, m : array_like
+        Real-valued arguments.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The value of the function.
+
+    References
+    ----------
+    .. [dlmf] Nist, Digital Library of Mathematical Functions
+        https://dlmf.nist.gov/5.2#iii
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It is 1 when m is 0.
+
+    >>> sc.poch([1, 2, 3, 4], 0)
+    array([1., 1., 1., 1.])
+
+    For z equal to 1 it reduces to the factorial function.
+
+    >>> sc.poch(1, 5)
+    120.0
+    >>> 1 * 2 * 3 * 4 * 5
+    120
+
+    It can be expressed in terms of the gamma function.
+
+    >>> z, m = 3.7, 2.1
+    >>> sc.poch(z, m)
+    20.529581933776953
+    >>> sc.gamma(z + m) / sc.gamma(z)
+    20.52958193377696
+
+    """)
+
+add_newdoc("powm1", """
+    powm1(x, y, out=None)
+
+    Computes ``x**y - 1``.
+
+    This function is useful when `y` is near 0, or when `x` is near 1.
+
+    The function is implemented for real types only (unlike ``numpy.power``,
+    which accepts complex inputs).
+
+    Parameters
+    ----------
+    x : array_like
+        The base. Must be a real type (i.e. integer or float, not complex).
+    y : array_like
+        The exponent. Must be a real type (i.e. integer or float, not complex).
+
+    Returns
+    -------
+    array_like
+        Result of the calculation
+
+    Notes
+    -----
+    .. versionadded:: 1.10.0
+
+    The underlying code is implemented for single precision and double
+    precision floats only.  Unlike `numpy.power`, integer inputs to
+    `powm1` are converted to floating point, and complex inputs are
+    not accepted.
+
+    Note the following edge cases:
+
+    * ``powm1(x, 0)`` returns 0 for any ``x``, including 0, ``inf``
+      and ``nan``.
+    * ``powm1(1, y)`` returns 0 for any ``y``, including ``nan``
+      and ``inf``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import powm1
+
+    >>> x = np.array([1.2, 10.0, 0.9999999975])
+    >>> y = np.array([1e-9, 1e-11, 0.1875])
+    >>> powm1(x, y)
+    array([ 1.82321557e-10,  2.30258509e-11, -4.68749998e-10])
+
+    It can be verified that the relative errors in those results
+    are less than 2.5e-16.
+
+    Compare that to the result of ``x**y - 1``, where the
+    relative errors are all larger than 8e-8:
+
+    >>> x**y - 1
+    array([ 1.82321491e-10,  2.30258035e-11, -4.68750039e-10])
+
+    """)
+
+
+add_newdoc("pro_ang1",
+    """
+    pro_ang1(m, n, c, x, out=None)
+
+    Prolate spheroidal angular function of the first kind and its derivative
+
+    Computes the prolate spheroidal angular function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
+
+    Parameters
+    ----------
+    m : array_like
+        Nonnegative mode parameter m
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    x : array_like
+        Real parameter (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pro_ang1_cv",
+    """
+    pro_ang1_cv(m, n, c, cv, x, out=None)
+
+    Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
+
+    Computes the prolate spheroidal angular function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
+    pre-computed characteristic value.
+
+    Parameters
+    ----------
+    m : array_like
+        Nonnegative mode parameter m
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    cv : array_like
+        Characteristic value
+    x : array_like
+        Real parameter (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pro_cv",
+    """
+    pro_cv(m, n, c, out=None)
+
+    Characteristic value of prolate spheroidal function
+
+    Computes the characteristic value of prolate spheroidal wave
+    functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
+
+    Parameters
+    ----------
+    m : array_like
+        Nonnegative mode parameter m
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    cv : scalar or ndarray
+        Characteristic value
+    """)
+
+add_newdoc("pro_rad1",
+    """
+    pro_rad1(m, n, c, x, out=None)
+
+    Prolate spheroidal radial function of the first kind and its derivative
+
+    Computes the prolate spheroidal radial function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
+
+    Parameters
+    ----------
+    m : array_like
+        Nonnegative mode parameter m
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    x : array_like
+        Real parameter (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pro_rad1_cv",
+    """
+    pro_rad1_cv(m, n, c, cv, x, out=None)
+
+    Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
+
+    Computes the prolate spheroidal radial function of the first kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
+    pre-computed characteristic value.
+
+    Parameters
+    ----------
+    m : array_like
+        Nonnegative mode parameter m
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    cv : array_like
+        Characteristic value
+    x : array_like
+        Real parameter (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pro_rad2",
+    """
+    pro_rad2(m, n, c, x, out=None)
+
+    Prolate spheroidal radial function of the second kind and its derivative
+
+    Computes the prolate spheroidal radial function of the second kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
+
+    Parameters
+    ----------
+    m : array_like
+        Nonnegative mode parameter m
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    cv : array_like
+        Characteristic value
+    x : array_like
+        Real parameter (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pro_rad2_cv",
+    """
+    pro_rad2_cv(m, n, c, cv, x, out=None)
+
+    Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
+
+    Computes the prolate spheroidal radial function of the second kind
+    and its derivative (with respect to `x`) for mode parameters m>=0
+    and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
+    pre-computed characteristic value.
+
+    Parameters
+    ----------
+    m : array_like
+        Nonnegative mode parameter m
+    n : array_like
+        Mode parameter n (>= m)
+    c : array_like
+        Spheroidal parameter
+    cv : array_like
+        Characteristic value
+    x : array_like
+        Real parameter (``|x| < 1.0``)
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Value of the function
+    sp : scalar or ndarray
+        Value of the derivative vs x
+    """)
+
+add_newdoc("pseudo_huber",
+    r"""
+    pseudo_huber(delta, r, out=None)
+
+    Pseudo-Huber loss function.
+
+    .. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
+
+    Parameters
+    ----------
+    delta : array_like
+        Input array, indicating the soft quadratic vs. linear loss changepoint.
+    r : array_like
+        Input array, possibly representing residuals.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    res : scalar or ndarray
+        The computed Pseudo-Huber loss function values.
+
+    See also
+    --------
+    huber: Similar function which this function approximates
+
+    Notes
+    -----
+    Like `huber`, `pseudo_huber` often serves as a robust loss function
+    in statistics or machine learning to reduce the influence of outliers.
+    Unlike `huber`, `pseudo_huber` is smooth.
+
+    Typically, `r` represents residuals, the difference
+    between a model prediction and data. Then, for :math:`|r|\leq\delta`,
+    `pseudo_huber` resembles the squared error and for :math:`|r|>\delta` the
+    absolute error. This way, the Pseudo-Huber loss often achieves
+    a fast convergence in model fitting for small residuals like the squared
+    error loss function and still reduces the influence of outliers
+    (:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is
+    the cutoff between squared and absolute error regimes, it has
+    to be tuned carefully for each problem. `pseudo_huber` is also
+    convex, making it suitable for gradient based optimization. [1]_ [2]_
+
+    .. versionadded:: 0.15.0
+
+    References
+    ----------
+    .. [1] Hartley, Zisserman, "Multiple View Geometry in Computer Vision".
+           2003. Cambridge University Press. p. 619
+    .. [2] Charbonnier et al. "Deterministic edge-preserving regularization
+           in computed imaging". 1997. IEEE Trans. Image Processing.
+           6 (2): 298 - 311.
+
+    Examples
+    --------
+    Import all necessary modules.
+
+    >>> import numpy as np
+    >>> from scipy.special import pseudo_huber, huber
+    >>> import matplotlib.pyplot as plt
+
+    Calculate the function for ``delta=1`` at ``r=2``.
+
+    >>> pseudo_huber(1., 2.)
+    1.2360679774997898
+
+    Calculate the function at ``r=2`` for different `delta` by providing
+    a list or NumPy array for `delta`.
+
+    >>> pseudo_huber([1., 2., 4.], 3.)
+    array([2.16227766, 3.21110255, 4.        ])
+
+    Calculate the function for ``delta=1`` at several points by providing
+    a list or NumPy array for `r`.
+
+    >>> pseudo_huber(2., np.array([1., 1.5, 3., 4.]))
+    array([0.47213595, 1.        , 3.21110255, 4.94427191])
+
+    The function can be calculated for different `delta` and `r` by
+    providing arrays for both with compatible shapes for broadcasting.
+
+    >>> r = np.array([1., 2.5, 8., 10.])
+    >>> deltas = np.array([[1.], [5.], [9.]])
+    >>> print(r.shape, deltas.shape)
+    (4,) (3, 1)
+
+    >>> pseudo_huber(deltas, r)
+    array([[ 0.41421356,  1.6925824 ,  7.06225775,  9.04987562],
+           [ 0.49509757,  2.95084972, 22.16990566, 30.90169944],
+           [ 0.49846624,  3.06693762, 27.37435121, 40.08261642]])
+
+    Plot the function for different `delta`.
+
+    >>> x = np.linspace(-4, 4, 500)
+    >>> deltas = [1, 2, 3]
+    >>> linestyles = ["dashed", "dotted", "dashdot"]
+    >>> fig, ax = plt.subplots()
+    >>> combined_plot_parameters = list(zip(deltas, linestyles))
+    >>> for delta, style in combined_plot_parameters:
+    ...     ax.plot(x, pseudo_huber(delta, x), label=f"$\delta={delta}$",
+    ...             ls=style)
+    >>> ax.legend(loc="upper center")
+    >>> ax.set_xlabel("$x$")
+    >>> ax.set_title("Pseudo-Huber loss function $h_{\delta}(x)$")
+    >>> ax.set_xlim(-4, 4)
+    >>> ax.set_ylim(0, 8)
+    >>> plt.show()
+
+    Finally, illustrate the difference between `huber` and `pseudo_huber` by
+    plotting them and their gradients with respect to `r`. The plot shows
+    that `pseudo_huber` is continuously differentiable while `huber` is not
+    at the points :math:`\pm\delta`.
+
+    >>> def huber_grad(delta, x):
+    ...     grad = np.copy(x)
+    ...     linear_area = np.argwhere(np.abs(x) > delta)
+    ...     grad[linear_area]=delta*np.sign(x[linear_area])
+    ...     return grad
+    >>> def pseudo_huber_grad(delta, x):
+    ...     return x* (1+(x/delta)**2)**(-0.5)
+    >>> x=np.linspace(-3, 3, 500)
+    >>> delta = 1.
+    >>> fig, ax = plt.subplots(figsize=(7, 7))
+    >>> ax.plot(x, huber(delta, x), label="Huber", ls="dashed")
+    >>> ax.plot(x, huber_grad(delta, x), label="Huber Gradient", ls="dashdot")
+    >>> ax.plot(x, pseudo_huber(delta, x), label="Pseudo-Huber", ls="dotted")
+    >>> ax.plot(x, pseudo_huber_grad(delta, x), label="Pseudo-Huber Gradient",
+    ...         ls="solid")
+    >>> ax.legend(loc="upper center")
+    >>> plt.show()
+    """)
+
+add_newdoc("psi",
+    """
+    psi(z, out=None)
+
+    The digamma function.
+
+    The logarithmic derivative of the gamma function evaluated at ``z``.
+
+    Parameters
+    ----------
+    z : array_like
+        Real or complex argument.
+    out : ndarray, optional
+        Array for the computed values of ``psi``.
+
+    Returns
+    -------
+    digamma : scalar or ndarray
+        Computed values of ``psi``.
+
+    Notes
+    -----
+    For large values not close to the negative real axis, ``psi`` is
+    computed using the asymptotic series (5.11.2) from [1]_. For small
+    arguments not close to the negative real axis, the recurrence
+    relation (5.5.2) from [1]_ is used until the argument is large
+    enough to use the asymptotic series. For values close to the
+    negative real axis, the reflection formula (5.5.4) from [1]_ is
+    used first. Note that ``psi`` has a family of zeros on the
+    negative real axis which occur between the poles at nonpositive
+    integers. Around the zeros the reflection formula suffers from
+    cancellation and the implementation loses precision. The sole
+    positive zero and the first negative zero, however, are handled
+    separately by precomputing series expansions using [2]_, so the
+    function should maintain full accuracy around the origin.
+
+    References
+    ----------
+    .. [1] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/5
+    .. [2] Fredrik Johansson and others.
+           "mpmath: a Python library for arbitrary-precision floating-point arithmetic"
+           (Version 0.19) http://mpmath.org/
+
+    Examples
+    --------
+    >>> from scipy.special import psi
+    >>> z = 3 + 4j
+    >>> psi(z)
+    (1.55035981733341+1.0105022091860445j)
+
+    Verify psi(z) = psi(z + 1) - 1/z:
+
+    >>> psi(z + 1) - 1/z
+    (1.55035981733341+1.0105022091860445j)
+    """)
+
+add_newdoc("radian",
+    """
+    radian(d, m, s, out=None)
+
+    Convert from degrees to radians.
+
+    Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
+    radians.
+
+    Parameters
+    ----------
+    d : array_like
+        Degrees, can be real-valued.
+    m : array_like
+        Minutes, can be real-valued.
+    s : array_like
+        Seconds, can be real-valued.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the inputs in radians.
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    There are many ways to specify an angle.
+
+    >>> sc.radian(90, 0, 0)
+    1.5707963267948966
+    >>> sc.radian(0, 60 * 90, 0)
+    1.5707963267948966
+    >>> sc.radian(0, 0, 60**2 * 90)
+    1.5707963267948966
+
+    The inputs can be real-valued.
+
+    >>> sc.radian(1.5, 0, 0)
+    0.02617993877991494
+    >>> sc.radian(1, 30, 0)
+    0.02617993877991494
+
+    """)
+
+add_newdoc("rel_entr",
+    r"""
+    rel_entr(x, y, out=None)
+
+    Elementwise function for computing relative entropy.
+
+    .. math::
+
+        \mathrm{rel\_entr}(x, y) =
+            \begin{cases}
+                x \log(x / y) & x > 0, y > 0 \\
+                0 & x = 0, y \ge 0 \\
+                \infty & \text{otherwise}
+            \end{cases}
+
+    Parameters
+    ----------
+    x, y : array_like
+        Input arrays
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Relative entropy of the inputs
+
+    See Also
+    --------
+    entr, kl_div, scipy.stats.entropy
+
+    Notes
+    -----
+    .. versionadded:: 0.15.0
+
+    This function is jointly convex in x and y.
+
+    The origin of this function is in convex programming; see
+    [1]_. Given two discrete probability distributions :math:`p_1,
+    \ldots, p_n` and :math:`q_1, \ldots, q_n`, the definition of relative
+    entropy in the context of *information theory* is
+
+    .. math::
+
+        \sum_{i = 1}^n \mathrm{rel\_entr}(p_i, q_i).
+
+    To compute the latter quantity, use `scipy.stats.entropy`.
+
+    See [2]_ for details.
+
+    References
+    ----------
+    .. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.
+           Cambridge University Press, 2004.
+           :doi:`https://doi.org/10.1017/CBO9780511804441`
+    .. [2] Kullback-Leibler divergence,
+           https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
+
+    """)
+
+add_newdoc("rgamma",
+    r"""
+    rgamma(z, out=None)
+
+    Reciprocal of the gamma function.
+
+    Defined as :math:`1 / \Gamma(z)`, where :math:`\Gamma` is the
+    gamma function. For more on the gamma function see `gamma`.
+
+    Parameters
+    ----------
+    z : array_like
+        Real or complex valued input
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Function results
+
+    Notes
+    -----
+    The gamma function has no zeros and has simple poles at
+    nonpositive integers, so `rgamma` is an entire function with zeros
+    at the nonpositive integers. See the discussion in [dlmf]_ for
+    more details.
+
+    See Also
+    --------
+    gamma, gammaln, loggamma
+
+    References
+    ----------
+    .. [dlmf] Nist, Digital Library of Mathematical functions,
+        https://dlmf.nist.gov/5.2#i
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It is the reciprocal of the gamma function.
+
+    >>> sc.rgamma([1, 2, 3, 4])
+    array([1.        , 1.        , 0.5       , 0.16666667])
+    >>> 1 / sc.gamma([1, 2, 3, 4])
+    array([1.        , 1.        , 0.5       , 0.16666667])
+
+    It is zero at nonpositive integers.
+
+    >>> sc.rgamma([0, -1, -2, -3])
+    array([0., 0., 0., 0.])
+
+    It rapidly underflows to zero along the positive real axis.
+
+    >>> sc.rgamma([10, 100, 179])
+    array([2.75573192e-006, 1.07151029e-156, 0.00000000e+000])
+
+    """)
+
+add_newdoc("round",
+    """
+    round(x, out=None)
+
+    Round to the nearest integer.
+
+    Returns the nearest integer to `x`.  If `x` ends in 0.5 exactly,
+    the nearest even integer is chosen.
+
+    Parameters
+    ----------
+    x : array_like
+        Real valued input.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        The nearest integers to the elements of `x`. The result is of
+        floating type, not integer type.
+
+    Examples
+    --------
+    >>> import scipy.special as sc
+
+    It rounds to even.
+
+    >>> sc.round([0.5, 1.5])
+    array([0., 2.])
+
+    """)
+
+add_newdoc("shichi",
+    r"""
+    shichi(x, out=None)
+
+    Hyperbolic sine and cosine integrals.
+
+    The hyperbolic sine integral is
+
+    .. math::
+
+      \int_0^x \frac{\sinh{t}}{t}dt
+
+    and the hyperbolic cosine integral is
+
+    .. math::
+
+      \gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
+
+    where :math:`\gamma` is Euler's constant and :math:`\log` is the
+    principal branch of the logarithm [1]_.
+
+    Parameters
+    ----------
+    x : array_like
+        Real or complex points at which to compute the hyperbolic sine
+        and cosine integrals.
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    si : scalar or ndarray
+        Hyperbolic sine integral at ``x``
+    ci : scalar or ndarray
+        Hyperbolic cosine integral at ``x``
+
+    See Also
+    --------
+    sici : Sine and cosine integrals.
+    exp1 : Exponential integral E1.
+    expi : Exponential integral Ei.
+
+    Notes
+    -----
+    For real arguments with ``x < 0``, ``chi`` is the real part of the
+    hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+    + 0j)`` differ by a factor of ``1j*pi``.
+
+    For real arguments the function is computed by calling Cephes'
+    [2]_ *shichi* routine. For complex arguments the algorithm is based
+    on Mpmath's [3]_ *shi* and *chi* routines.
+
+    References
+    ----------
+    .. [1] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+           (See Section 5.2.)
+    .. [2] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    .. [3] Fredrik Johansson and others.
+           "mpmath: a Python library for arbitrary-precision floating-point
+           arithmetic" (Version 0.19) http://mpmath.org/
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import shichi, sici
+
+    `shichi` accepts real or complex input:
+
+    >>> shichi(0.5)
+    (0.5069967498196671, -0.05277684495649357)
+    >>> shichi(0.5 + 2.5j)
+    ((0.11772029666668238+1.831091777729851j),
+     (0.29912435887648825+1.7395351121166562j))
+
+    The hyperbolic sine and cosine integrals Shi(z) and Chi(z) are
+    related to the sine and cosine integrals Si(z) and Ci(z) by
+
+    * Shi(z) = -i*Si(i*z)
+    * Chi(z) = Ci(-i*z) + i*pi/2
+
+    >>> z = 0.25 + 5j
+    >>> shi, chi = shichi(z)
+    >>> shi, -1j*sici(1j*z)[0]            # Should be the same.
+    ((-0.04834719325101729+1.5469354086921228j),
+     (-0.04834719325101729+1.5469354086921228j))
+    >>> chi, sici(-1j*z)[1] + 1j*np.pi/2  # Should be the same.
+    ((-0.19568708973868087+1.556276312103824j),
+     (-0.19568708973868087+1.556276312103824j))
+
+    Plot the functions evaluated on the real axis:
+
+    >>> xp = np.geomspace(1e-8, 4.0, 250)
+    >>> x = np.concatenate((-xp[::-1], xp))
+    >>> shi, chi = shichi(x)
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, shi, label='Shi(x)')
+    >>> ax.plot(x, chi, '--', label='Chi(x)')
+    >>> ax.set_xlabel('x')
+    >>> ax.set_title('Hyperbolic Sine and Cosine Integrals')
+    >>> ax.legend(shadow=True, framealpha=1, loc='lower right')
+    >>> ax.grid(True)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("sici",
+    r"""
+    sici(x, out=None)
+
+    Sine and cosine integrals.
+
+    The sine integral is
+
+    .. math::
+
+      \int_0^x \frac{\sin{t}}{t}dt
+
+    and the cosine integral is
+
+    .. math::
+
+      \gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
+
+    where :math:`\gamma` is Euler's constant and :math:`\log` is the
+    principal branch of the logarithm [1]_.
+
+    Parameters
+    ----------
+    x : array_like
+        Real or complex points at which to compute the sine and cosine
+        integrals.
+    out : tuple of ndarray, optional
+        Optional output arrays for the function results
+
+    Returns
+    -------
+    si : scalar or ndarray
+        Sine integral at ``x``
+    ci : scalar or ndarray
+        Cosine integral at ``x``
+
+    See Also
+    --------
+    shichi : Hyperbolic sine and cosine integrals.
+    exp1 : Exponential integral E1.
+    expi : Exponential integral Ei.
+
+    Notes
+    -----
+    For real arguments with ``x < 0``, ``ci`` is the real part of the
+    cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
+    differ by a factor of ``1j*pi``.
+
+    For real arguments the function is computed by calling Cephes'
+    [2]_ *sici* routine. For complex arguments the algorithm is based
+    on Mpmath's [3]_ *si* and *ci* routines.
+
+    References
+    ----------
+    .. [1] Milton Abramowitz and Irene A. Stegun, eds.
+           Handbook of Mathematical Functions with Formulas,
+           Graphs, and Mathematical Tables. New York: Dover, 1972.
+           (See Section 5.2.)
+    .. [2] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+    .. [3] Fredrik Johansson and others.
+           "mpmath: a Python library for arbitrary-precision floating-point
+           arithmetic" (Version 0.19) http://mpmath.org/
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import sici, exp1
+
+    `sici` accepts real or complex input:
+
+    >>> sici(2.5)
+    (1.7785201734438267, 0.2858711963653835)
+    >>> sici(2.5 + 3j)
+    ((4.505735874563953+0.06863305018999577j),
+    (0.0793644206906966-2.935510262937543j))
+
+    For z in the right half plane, the sine and cosine integrals are
+    related to the exponential integral E1 (implemented in SciPy as
+    `scipy.special.exp1`) by
+
+    * Si(z) = (E1(i*z) - E1(-i*z))/2i + pi/2
+    * Ci(z) = -(E1(i*z) + E1(-i*z))/2
+
+    See [1]_ (equations 5.2.21 and 5.2.23).
+
+    We can verify these relations:
+
+    >>> z = 2 - 3j
+    >>> sici(z)
+    ((4.54751388956229-1.3991965806460565j),
+    (1.408292501520851+2.9836177420296055j))
+
+    >>> (exp1(1j*z) - exp1(-1j*z))/2j + np.pi/2  # Same as sine integral
+    (4.54751388956229-1.3991965806460565j)
+
+    >>> -(exp1(1j*z) + exp1(-1j*z))/2            # Same as cosine integral
+    (1.408292501520851+2.9836177420296055j)
+
+    Plot the functions evaluated on the real axis; the dotted horizontal
+    lines are at pi/2 and -pi/2:
+
+    >>> x = np.linspace(-16, 16, 150)
+    >>> si, ci = sici(x)
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, si, label='Si(x)')
+    >>> ax.plot(x, ci, '--', label='Ci(x)')
+    >>> ax.legend(shadow=True, framealpha=1, loc='upper left')
+    >>> ax.set_xlabel('x')
+    >>> ax.set_title('Sine and Cosine Integrals')
+    >>> ax.axhline(np.pi/2, linestyle=':', alpha=0.5, color='k')
+    >>> ax.axhline(-np.pi/2, linestyle=':', alpha=0.5, color='k')
+    >>> ax.grid(True)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("sindg",
+    """
+    sindg(x, out=None)
+
+    Sine of the angle `x` given in degrees.
+
+    Parameters
+    ----------
+    x : array_like
+        Angle, given in degrees.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Sine at the input.
+
+    See Also
+    --------
+    cosdg, tandg, cotdg
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is more accurate than using sine directly.
+
+    >>> x = 180 * np.arange(3)
+    >>> sc.sindg(x)
+    array([ 0., -0.,  0.])
+    >>> np.sin(x * np.pi / 180)
+    array([ 0.0000000e+00,  1.2246468e-16, -2.4492936e-16])
+
+    """)
+
+add_newdoc("smirnov",
+    r"""
+    smirnov(n, d, out=None)
+
+    Kolmogorov-Smirnov complementary cumulative distribution function
+
+    Returns the exact Kolmogorov-Smirnov complementary cumulative
+    distribution function,(aka the Survival Function) of Dn+ (or Dn-)
+    for a one-sided test of equality between an empirical and a
+    theoretical distribution. It is equal to the probability that the
+    maximum difference between a theoretical distribution and an empirical
+    one based on `n` samples is greater than d.
+
+    Parameters
+    ----------
+    n : int
+      Number of samples
+    d : float array_like
+      Deviation between the Empirical CDF (ECDF) and the target CDF.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))
+
+    See Also
+    --------
+    smirnovi : The Inverse Survival Function for the distribution
+    scipy.stats.ksone : Provides the functionality as a continuous distribution
+    kolmogorov, kolmogi : Functions for the two-sided distribution
+
+    Notes
+    -----
+    `smirnov` is used by `stats.kstest` in the application of the
+    Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
+    function is exposed in `scpy.special`, but the recommended way to achieve
+    the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
+    `stats.ksone` distribution.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import smirnov
+    >>> from scipy.stats import norm
+
+    Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a
+    sample of size 5.
+
+    >>> smirnov(5, [0, 0.5, 1.0])
+    array([ 1.   ,  0.056,  0.   ])
+
+    Compare a sample of size 5 against N(0, 1), the standard normal
+    distribution with mean 0 and standard deviation 1.
+
+    `x` is the sample.
+
+    >>> x = np.array([-1.392, -0.135, 0.114, 0.190, 1.82])
+
+    >>> target = norm(0, 1)
+    >>> cdfs = target.cdf(x)
+    >>> cdfs
+    array([0.0819612 , 0.44630594, 0.5453811 , 0.57534543, 0.9656205 ])
+
+    Construct the empirical CDF and the K-S statistics (Dn+, Dn-, Dn).
+
+    >>> n = len(x)
+    >>> ecdfs = np.arange(n+1, dtype=float)/n
+    >>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n],
+    ...                        ecdfs[1:] - cdfs])
+    >>> with np.printoptions(precision=3):
+    ...    print(cols)
+    [[-1.392  0.2    0.082  0.082  0.118]
+     [-0.135  0.4    0.446  0.246 -0.046]
+     [ 0.114  0.6    0.545  0.145  0.055]
+     [ 0.19   0.8    0.575 -0.025  0.225]
+     [ 1.82   1.     0.966  0.166  0.034]]
+    >>> gaps = cols[:, -2:]
+    >>> Dnpm = np.max(gaps, axis=0)
+    >>> print(f'Dn-={Dnpm[0]:f}, Dn+={Dnpm[1]:f}')
+    Dn-=0.246306, Dn+=0.224655
+    >>> probs = smirnov(n, Dnpm)
+    >>> print(f'For a sample of size {n} drawn from N(0, 1):',
+    ...       f' Smirnov n={n}: Prob(Dn- >= {Dnpm[0]:f}) = {probs[0]:.4f}',
+    ...       f' Smirnov n={n}: Prob(Dn+ >= {Dnpm[1]:f}) = {probs[1]:.4f}',
+    ...       sep='\n')
+    For a sample of size 5 drawn from N(0, 1):
+     Smirnov n=5: Prob(Dn- >= 0.246306) = 0.4711
+     Smirnov n=5: Prob(Dn+ >= 0.224655) = 0.5245
+
+    Plot the empirical CDF and the standard normal CDF.
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.step(np.concatenate(([-2.5], x, [2.5])),
+    ...          np.concatenate((ecdfs, [1])),
+    ...          where='post', label='Empirical CDF')
+    >>> xx = np.linspace(-2.5, 2.5, 100)
+    >>> plt.plot(xx, target.cdf(xx), '--', label='CDF for N(0, 1)')
+
+    Add vertical lines marking Dn+ and Dn-.
+
+    >>> iminus, iplus = np.argmax(gaps, axis=0)
+    >>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r',
+    ...            alpha=0.5, lw=4)
+    >>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m',
+    ...            alpha=0.5, lw=4)
+
+    >>> plt.grid(True)
+    >>> plt.legend(framealpha=1, shadow=True)
+    >>> plt.show()
+    """)
+
+add_newdoc("smirnovi",
+    """
+    smirnovi(n, p, out=None)
+
+    Inverse to `smirnov`
+
+    Returns `d` such that ``smirnov(n, d) == p``, the critical value
+    corresponding to `p`.
+
+    Parameters
+    ----------
+    n : int
+      Number of samples
+    p : float array_like
+        Probability
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        The value(s) of smirnovi(n, p), the critical values.
+
+    See Also
+    --------
+    smirnov : The Survival Function (SF) for the distribution
+    scipy.stats.ksone : Provides the functionality as a continuous distribution
+    kolmogorov, kolmogi : Functions for the two-sided distribution
+    scipy.stats.kstwobign : Two-sided Kolmogorov-Smirnov distribution, large n
+
+    Notes
+    -----
+    `smirnov` is used by `stats.kstest` in the application of the
+    Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
+    function is exposed in `scpy.special`, but the recommended way to achieve
+    the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
+    `stats.ksone` distribution.
+
+    Examples
+    --------
+    >>> from scipy.special import smirnovi, smirnov
+
+    >>> n = 24
+    >>> deviations = [0.1, 0.2, 0.3]
+
+    Use `smirnov` to compute the complementary CDF of the Smirnov
+    distribution for the given number of samples and deviations.
+
+    >>> p = smirnov(n, deviations)
+    >>> p
+    array([0.58105083, 0.12826832, 0.01032231])
+
+    The inverse function ``smirnovi(n, p)`` returns ``deviations``.
+
+    >>> smirnovi(n, p)
+    array([0.1, 0.2, 0.3])
+
+    """)
+
+add_newdoc("_smirnovc",
+    """
+    _smirnovc(n, d)
+     Internal function, do not use.
+    """)
+
+add_newdoc("_smirnovci",
+    """
+     Internal function, do not use.
+    """)
+
+add_newdoc("_smirnovp",
+    """
+    _smirnovp(n, p)
+     Internal function, do not use.
+    """)
+
+add_newdoc("spence",
+    r"""
+    spence(z, out=None)
+
+    Spence's function, also known as the dilogarithm.
+
+    It is defined to be
+
+    .. math::
+      \int_1^z \frac{\log(t)}{1 - t}dt
+
+    for complex :math:`z`, where the contour of integration is taken
+    to avoid the branch cut of the logarithm. Spence's function is
+    analytic everywhere except the negative real axis where it has a
+    branch cut.
+
+    Parameters
+    ----------
+    z : array_like
+        Points at which to evaluate Spence's function
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    s : scalar or ndarray
+        Computed values of Spence's function
+
+    Notes
+    -----
+    There is a different convention which defines Spence's function by
+    the integral
+
+    .. math::
+      -\int_0^z \frac{\log(1 - t)}{t}dt;
+
+    this is our ``spence(1 - z)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import spence
+    >>> import matplotlib.pyplot as plt
+
+    The function is defined for complex inputs:
+
+    >>> spence([1-1j, 1.5+2j, 3j, -10-5j])
+    array([-0.20561676+0.91596559j, -0.86766909-1.39560134j,
+           -0.59422064-2.49129918j, -1.14044398+6.80075924j])
+
+    For complex inputs on the branch cut, which is the negative real axis,
+    the function returns the limit for ``z`` with positive imaginary part.
+    For example, in the following, note the sign change of the imaginary
+    part of the output for ``z = -2`` and ``z = -2 - 1e-8j``:
+
+    >>> spence([-2 + 1e-8j, -2, -2 - 1e-8j])
+    array([2.32018041-3.45139229j, 2.32018042-3.4513923j ,
+           2.32018041+3.45139229j])
+
+    The function returns ``nan`` for real inputs on the branch cut:
+
+    >>> spence(-1.5)
+    nan
+
+    Verify some particular values: ``spence(0) = pi**2/6``,
+    ``spence(1) = 0`` and ``spence(2) = -pi**2/12``.
+
+    >>> spence([0, 1, 2])
+    array([ 1.64493407,  0.        , -0.82246703])
+    >>> np.pi**2/6, -np.pi**2/12
+    (1.6449340668482264, -0.8224670334241132)
+
+    Verify the identity::
+
+        spence(z) + spence(1 - z) = pi**2/6 - log(z)*log(1 - z)
+
+    >>> z = 3 + 4j
+    >>> spence(z) + spence(1 - z)
+    (-2.6523186143876067+1.8853470951513935j)
+    >>> np.pi**2/6 - np.log(z)*np.log(1 - z)
+    (-2.652318614387606+1.885347095151394j)
+
+    Plot the function for positive real input.
+
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0, 6, 400)
+    >>> ax.plot(x, spence(x))
+    >>> ax.grid()
+    >>> ax.set_xlabel('x')
+    >>> ax.set_title('spence(x)')
+    >>> plt.show()
+    """)
+
+add_newdoc("stdtr",
+    """
+    stdtr(df, t, out=None)
+
+    Student t distribution cumulative distribution function
+
+    Returns the integral from minus infinity to t of the Student t
+    distribution with df > 0 degrees of freedom::
+
+       gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
+       integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
+
+    Parameters
+    ----------
+    df : array_like
+        Degrees of freedom
+    t : array_like
+        Upper bound of the integral
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the Student t CDF at t
+
+    See Also
+    --------
+    stdtridf : inverse of stdtr with respect to `df`
+    stdtrit : inverse of stdtr with respect to `t`
+    """)
+
+add_newdoc("stdtridf",
+    """
+    stdtridf(p, t, out=None)
+
+    Inverse of `stdtr` vs df
+
+    Returns the argument df such that stdtr(df, t) is equal to `p`.
+
+    Parameters
+    ----------
+    p : array_like
+        Probability
+    t : array_like
+        Upper bound of the integral
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    df : scalar or ndarray
+        Value of `df` such that ``stdtr(df, t) == p``
+
+    See Also
+    --------
+    stdtr : Student t CDF
+    stdtrit : inverse of stdtr with respect to `t`
+    """)
+
+add_newdoc("stdtrit",
+    """
+    stdtrit(df, p, out=None)
+
+    Inverse of `stdtr` vs `t`
+
+    Returns the argument `t` such that stdtr(df, t) is equal to `p`.
+
+    Parameters
+    ----------
+    df : array_like
+        Degrees of freedom
+    p : array_like
+        Probability
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    t : scalar or ndarray
+        Value of `t` such that ``stdtr(df, t) == p``
+
+    See Also
+    --------
+    stdtr : Student t CDF
+    stdtridf : inverse of stdtr with respect to `df`
+
+    """)
+
+add_newdoc("struve",
+    r"""
+    struve(v, x, out=None)
+
+    Struve function.
+
+    Return the value of the Struve function of order `v` at `x`.  The Struve
+    function is defined as,
+
+    .. math::
+        H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
+
+    where :math:`\Gamma` is the gamma function.
+
+    Parameters
+    ----------
+    v : array_like
+        Order of the Struve function (float).
+    x : array_like
+        Argument of the Struve function (float; must be positive unless `v` is
+        an integer).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    H : scalar or ndarray
+        Value of the Struve function of order `v` at `x`.
+
+    Notes
+    -----
+    Three methods discussed in [1]_ are used to evaluate the Struve function:
+
+    - power series
+    - expansion in Bessel functions (if :math:`|z| < |v| + 20`)
+    - asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
+
+    Rounding errors are estimated based on the largest terms in the sums, and
+    the result associated with the smallest error is returned.
+
+    See also
+    --------
+    modstruve: Modified Struve function
+
+    References
+    ----------
+    .. [1] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/11
+
+    Examples
+    --------
+    Calculate the Struve function of order 1 at 2.
+
+    >>> import numpy as np
+    >>> from scipy.special import struve
+    >>> import matplotlib.pyplot as plt
+    >>> struve(1, 2.)
+    0.6467637282835622
+
+    Calculate the Struve function at 2 for orders 1, 2 and 3 by providing
+    a list for the order parameter `v`.
+
+    >>> struve([1, 2, 3], 2.)
+    array([0.64676373, 0.28031806, 0.08363767])
+
+    Calculate the Struve function of order 1 for several points by providing
+    an array for `x`.
+
+    >>> points = np.array([2., 5., 8.])
+    >>> struve(1, points)
+    array([0.64676373, 0.80781195, 0.48811605])
+
+    Compute the Struve function for several orders at several points by
+    providing arrays for `v` and `z`. The arrays have to be broadcastable
+    to the correct shapes.
+
+    >>> orders = np.array([[1], [2], [3]])
+    >>> points.shape, orders.shape
+    ((3,), (3, 1))
+
+    >>> struve(orders, points)
+    array([[0.64676373, 0.80781195, 0.48811605],
+           [0.28031806, 1.56937455, 1.51769363],
+           [0.08363767, 1.50872065, 2.98697513]])
+
+    Plot the Struve functions of order 0 to 3 from -10 to 10.
+
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(-10., 10., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, struve(i, x), label=f'$H_{i!r}$')
+    >>> ax.legend(ncol=2)
+    >>> ax.set_xlim(-10, 10)
+    >>> ax.set_title(r"Struve functions $H_{\nu}$")
+    >>> plt.show()
+    """)
+
+add_newdoc("tandg",
+    """
+    tandg(x, out=None)
+
+    Tangent of angle `x` given in degrees.
+
+    Parameters
+    ----------
+    x : array_like
+        Angle, given in degrees.
+    out : ndarray, optional
+        Optional output array for the function results.
+
+    Returns
+    -------
+    scalar or ndarray
+        Tangent at the input.
+
+    See Also
+    --------
+    sindg, cosdg, cotdg
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    It is more accurate than using tangent directly.
+
+    >>> x = 180 * np.arange(3)
+    >>> sc.tandg(x)
+    array([0., 0., 0.])
+    >>> np.tan(x * np.pi / 180)
+    array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16])
+
+    """)
+
+add_newdoc("tklmbda",
+    """
+    tklmbda(x, lmbda, out=None)
+
+    Tukey-Lambda cumulative distribution function
+
+    Parameters
+    ----------
+    x, lmbda : array_like
+        Parameters
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    cdf : scalar or ndarray
+        Value of the Tukey-Lambda CDF
+    """)
+
+add_newdoc("wofz",
+    """
+    wofz(z, out=None)
+
+    Faddeeva function
+
+    Returns the value of the Faddeeva function for complex argument::
+
+        exp(-z**2) * erfc(-i*z)
+
+    Parameters
+    ----------
+    z : array_like
+        complex argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the Faddeeva function
+
+    See Also
+    --------
+    dawsn, erf, erfc, erfcx, erfi
+
+    References
+    ----------
+    .. [1] Steven G. Johnson, Faddeeva W function implementation.
+       http://ab-initio.mit.edu/Faddeeva
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+
+    >>> x = np.linspace(-3, 3)
+    >>> z = special.wofz(x)
+
+    >>> plt.plot(x, z.real, label='wofz(x).real')
+    >>> plt.plot(x, z.imag, label='wofz(x).imag')
+    >>> plt.xlabel('$x$')
+    >>> plt.legend(framealpha=1, shadow=True)
+    >>> plt.grid(alpha=0.25)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("xlogy",
+    """
+    xlogy(x, y, out=None)
+
+    Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
+
+    Parameters
+    ----------
+    x : array_like
+        Multiplier
+    y : array_like
+        Argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    z : scalar or ndarray
+        Computed x*log(y)
+
+    Notes
+    -----
+
+    .. versionadded:: 0.13.0
+
+    """)
+
+add_newdoc("xlog1py",
+    """
+    xlog1py(x, y, out=None)
+
+    Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
+
+    Parameters
+    ----------
+    x : array_like
+        Multiplier
+    y : array_like
+        Argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    z : scalar or ndarray
+        Computed x*log1p(y)
+
+    Notes
+    -----
+
+    .. versionadded:: 0.13.0
+
+    """)
+
+add_newdoc("y0",
+    r"""
+    y0(x, out=None)
+
+    Bessel function of the second kind of order 0.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    Y : scalar or ndarray
+        Value of the Bessel function of the second kind of order 0 at `x`.
+
+    Notes
+    -----
+
+    The domain is divided into the intervals [0, 5] and (5, infinity). In the
+    first interval a rational approximation :math:`R(x)` is employed to
+    compute,
+
+    .. math::
+
+        Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
+
+    where :math:`J_0` is the Bessel function of the first kind of order 0.
+
+    In the second interval, the Hankel asymptotic expansion is employed with
+    two rational functions of degree 6/6 and 7/7.
+
+    This function is a wrapper for the Cephes [1]_ routine `y0`.
+
+    See also
+    --------
+    j0: Bessel function of the first kind of order 0
+    yv: Bessel function of the first kind
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import y0
+    >>> y0(1.)
+    0.08825696421567697
+
+    Calculate at several points:
+
+    >>> import numpy as np
+    >>> y0(np.array([0.5, 2., 3.]))
+    array([-0.44451873,  0.51037567,  0.37685001])
+
+    Plot the function from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> y = y0(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("y1",
+    """
+    y1(x, out=None)
+
+    Bessel function of the second kind of order 1.
+
+    Parameters
+    ----------
+    x : array_like
+        Argument (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    Y : scalar or ndarray
+        Value of the Bessel function of the second kind of order 1 at `x`.
+
+    Notes
+    -----
+
+    The domain is divided into the intervals [0, 8] and (8, infinity). In the
+    first interval a 25 term Chebyshev expansion is used, and computing
+    :math:`J_1` (the Bessel function of the first kind) is required. In the
+    second, the asymptotic trigonometric representation is employed using two
+    rational functions of degree 5/5.
+
+    This function is a wrapper for the Cephes [1]_ routine `y1`.
+
+    See also
+    --------
+    j1: Bessel function of the first kind of order 1
+    yn: Bessel function of the second kind
+    yv: Bessel function of the second kind
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Calculate the function at one point:
+
+    >>> from scipy.special import y1
+    >>> y1(1.)
+    -0.7812128213002888
+
+    Calculate at several points:
+
+    >>> import numpy as np
+    >>> y1(np.array([0.5, 2., 3.]))
+    array([-1.47147239, -0.10703243,  0.32467442])
+
+    Plot the function from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> y = y1(x)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    """)
+
+add_newdoc("yn",
+    r"""
+    yn(n, x, out=None)
+
+    Bessel function of the second kind of integer order and real argument.
+
+    Parameters
+    ----------
+    n : array_like
+        Order (integer).
+    x : array_like
+        Argument (float).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    Y : scalar or ndarray
+        Value of the Bessel function, :math:`Y_n(x)`.
+
+    Notes
+    -----
+    Wrapper for the Cephes [1]_ routine `yn`.
+
+    The function is evaluated by forward recurrence on `n`, starting with
+    values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
+    the routine for `y0` or `y1` is called directly.
+
+    See also
+    --------
+    yv : For real order and real or complex argument.
+    y0: faster implementation of this function for order 0
+    y1: faster implementation of this function for order 1
+
+    References
+    ----------
+    .. [1] Cephes Mathematical Functions Library,
+           http://www.netlib.org/cephes/
+
+    Examples
+    --------
+    Evaluate the function of order 0 at one point.
+
+    >>> from scipy.special import yn
+    >>> yn(0, 1.)
+    0.08825696421567697
+
+    Evaluate the function at one point for different orders.
+
+    >>> yn(0, 1.), yn(1, 1.), yn(2, 1.)
+    (0.08825696421567697, -0.7812128213002888, -1.6506826068162546)
+
+    The evaluation for different orders can be carried out in one call by
+    providing a list or NumPy array as argument for the `v` parameter:
+
+    >>> yn([0, 1, 2], 1.)
+    array([ 0.08825696, -0.78121282, -1.65068261])
+
+    Evaluate the function at several points for order 0 by providing an
+    array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 3., 8.])
+    >>> yn(0, points)
+    array([-0.44451873,  0.37685001,  0.22352149])
+
+    If `z` is an array, the order parameter `v` must be broadcastable to
+    the correct shape if different orders shall be computed in one call.
+    To calculate the orders 0 and 1 for an 1D array:
+
+    >>> orders = np.array([[0], [1]])
+    >>> orders.shape
+    (2, 1)
+
+    >>> yn(orders, points)
+    array([[-0.44451873,  0.37685001,  0.22352149],
+           [-1.47147239,  0.32467442, -0.15806046]])
+
+    Plot the functions of order 0 to 3 from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, yn(i, x), label=f'$Y_{i!r}$')
+    >>> ax.set_ylim(-3, 1)
+    >>> ax.legend()
+    >>> plt.show()
+    """)
+
+add_newdoc("yv",
+    r"""
+    yv(v, z, out=None)
+
+    Bessel function of the second kind of real order and complex argument.
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    Y : scalar or ndarray
+        Value of the Bessel function of the second kind, :math:`Y_v(x)`.
+
+    Notes
+    -----
+    For positive `v` values, the computation is carried out using the
+    AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
+    Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
+
+    .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
+
+    For negative `v` values the formula,
+
+    .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
+
+    is used, where :math:`J_v(z)` is the Bessel function of the first kind,
+    computed using the AMOS routine `zbesj`.  Note that the second term is
+    exactly zero for integer `v`; to improve accuracy the second term is
+    explicitly omitted for `v` values such that `v = floor(v)`.
+
+    See also
+    --------
+    yve : :math:`Y_v` with leading exponential behavior stripped off.
+    y0: faster implementation of this function for order 0
+    y1: faster implementation of this function for order 1
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    Evaluate the function of order 0 at one point.
+
+    >>> from scipy.special import yv
+    >>> yv(0, 1.)
+    0.088256964215677
+
+    Evaluate the function at one point for different orders.
+
+    >>> yv(0, 1.), yv(1, 1.), yv(1.5, 1.)
+    (0.088256964215677, -0.7812128213002889, -1.102495575160179)
+
+    The evaluation for different orders can be carried out in one call by
+    providing a list or NumPy array as argument for the `v` parameter:
+
+    >>> yv([0, 1, 1.5], 1.)
+    array([ 0.08825696, -0.78121282, -1.10249558])
+
+    Evaluate the function at several points for order 0 by providing an
+    array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 3., 8.])
+    >>> yv(0, points)
+    array([-0.44451873,  0.37685001,  0.22352149])
+
+    If `z` is an array, the order parameter `v` must be broadcastable to
+    the correct shape if different orders shall be computed in one call.
+    To calculate the orders 0 and 1 for an 1D array:
+
+    >>> orders = np.array([[0], [1]])
+    >>> orders.shape
+    (2, 1)
+
+    >>> yv(orders, points)
+    array([[-0.44451873,  0.37685001,  0.22352149],
+           [-1.47147239,  0.32467442, -0.15806046]])
+
+    Plot the functions of order 0 to 3 from 0 to 10.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> x = np.linspace(0., 10., 1000)
+    >>> for i in range(4):
+    ...     ax.plot(x, yv(i, x), label=f'$Y_{i!r}$')
+    >>> ax.set_ylim(-3, 1)
+    >>> ax.legend()
+    >>> plt.show()
+
+    """)
+
+add_newdoc("yve",
+    r"""
+    yve(v, z, out=None)
+
+    Exponentially scaled Bessel function of the second kind of real order.
+
+    Returns the exponentially scaled Bessel function of the second
+    kind of real order `v` at complex `z`::
+
+        yve(v, z) = yv(v, z) * exp(-abs(z.imag))
+
+    Parameters
+    ----------
+    v : array_like
+        Order (float).
+    z : array_like
+        Argument (float or complex).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    Y : scalar or ndarray
+        Value of the exponentially scaled Bessel function.
+
+    See Also
+    --------
+    yv: Unscaled Bessel function of the second kind of real order.
+
+    Notes
+    -----
+    For positive `v` values, the computation is carried out using the
+    AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
+    Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
+
+    .. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
+
+    For negative `v` values the formula,
+
+    .. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
+
+    is used, where :math:`J_v(z)` is the Bessel function of the first kind,
+    computed using the AMOS routine `zbesj`.  Note that the second term is
+    exactly zero for integer `v`; to improve accuracy the second term is
+    explicitly omitted for `v` values such that `v = floor(v)`.
+
+    Exponentially scaled Bessel functions are useful for large `z`:
+    for these, the unscaled Bessel functions can easily under-or overflow.
+
+    References
+    ----------
+    .. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
+           of a Complex Argument and Nonnegative Order",
+           http://netlib.org/amos/
+
+    Examples
+    --------
+    Compare the output of `yv` and `yve` for large complex arguments for `z`
+    by computing their values for order ``v=1`` at ``z=1000j``. We see that
+    `yv` returns nan but `yve` returns a finite number:
+
+    >>> import numpy as np
+    >>> from scipy.special import yv, yve
+    >>> v = 1
+    >>> z = 1000j
+    >>> yv(v, z), yve(v, z)
+    ((nan+nanj), (-0.012610930256928629+7.721967686709076e-19j))
+
+    For real arguments for `z`, `yve` returns the same as `yv` up to
+    floating point errors.
+
+    >>> v, z = 1, 1000
+    >>> yv(v, z), yve(v, z)
+    (-0.02478433129235178, -0.02478433129235179)
+
+    The function can be evaluated for several orders at the same time by
+    providing a list or NumPy array for `v`:
+
+    >>> yve([1, 2, 3], 1j)
+    array([-0.20791042+0.14096627j,  0.38053618-0.04993878j,
+           0.00815531-1.66311097j])
+
+    In the same way, the function can be evaluated at several points in one
+    call by providing a list or NumPy array for `z`:
+
+    >>> yve(1, np.array([1j, 2j, 3j]))
+    array([-0.20791042+0.14096627j, -0.21526929+0.01205044j,
+           -0.19682671+0.00127278j])
+
+    It is also possible to evaluate several orders at several points
+    at the same time by providing arrays for `v` and `z` with
+    broadcasting compatible shapes. Compute `yve` for two different orders
+    `v` and three points `z` resulting in a 2x3 array.
+
+    >>> v = np.array([[1], [2]])
+    >>> z = np.array([3j, 4j, 5j])
+    >>> v.shape, z.shape
+    ((2, 1), (3,))
+
+    >>> yve(v, z)
+    array([[-1.96826713e-01+1.27277544e-03j, -1.78750840e-01+1.45558819e-04j,
+            -1.63972267e-01+1.73494110e-05j],
+           [1.94960056e-03-1.11782545e-01j,  2.02902325e-04-1.17626501e-01j,
+            2.27727687e-05-1.17951906e-01j]])
+    """)
+
+add_newdoc("_zeta",
+    """
+    _zeta(x, q)
+
+    Internal function, Hurwitz zeta.
+
+    """)
+
+add_newdoc("zetac",
+    """
+    zetac(x, out=None)
+
+    Riemann zeta function minus 1.
+
+    This function is defined as
+
+    .. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
+
+    where ``x > 1``.  For ``x < 1`` the analytic continuation is
+    computed. For more information on the Riemann zeta function, see
+    [dlmf]_.
+
+    Parameters
+    ----------
+    x : array_like of float
+        Values at which to compute zeta(x) - 1 (must be real).
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of zeta(x) - 1.
+
+    See Also
+    --------
+    zeta
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import zetac, zeta
+
+    Some special values:
+
+    >>> zetac(2), np.pi**2/6 - 1
+    (0.64493406684822641, 0.6449340668482264)
+
+    >>> zetac(-1), -1.0/12 - 1
+    (-1.0833333333333333, -1.0833333333333333)
+
+    Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:
+
+    >>> zetac(60), zeta(60) - 1
+    (8.673617380119933e-19, 0.0)
+
+    References
+    ----------
+    .. [dlmf] NIST Digital Library of Mathematical Functions
+              https://dlmf.nist.gov/25
+
+    """)
+
+add_newdoc("_riemann_zeta",
+    """
+    Internal function, use `zeta` instead.
+    """)
+
+add_newdoc("_struve_asymp_large_z",
+    """
+    _struve_asymp_large_z(v, z, is_h)
+
+    Internal function for testing `struve` & `modstruve`
+
+    Evaluates using asymptotic expansion
+
+    Returns
+    -------
+    v, err
+    """)
+
+add_newdoc("_struve_power_series",
+    """
+    _struve_power_series(v, z, is_h)
+
+    Internal function for testing `struve` & `modstruve`
+
+    Evaluates using power series
+
+    Returns
+    -------
+    v, err
+    """)
+
+add_newdoc("_struve_bessel_series",
+    """
+    _struve_bessel_series(v, z, is_h)
+
+    Internal function for testing `struve` & `modstruve`
+
+    Evaluates using Bessel function series
+
+    Returns
+    -------
+    v, err
+    """)
+
+add_newdoc("_spherical_jn",
+    """
+    Internal function, use `spherical_jn` instead.
+    """)
+
+add_newdoc("_spherical_jn_d",
+    """
+    Internal function, use `spherical_jn` instead.
+    """)
+
+add_newdoc("_spherical_yn",
+    """
+    Internal function, use `spherical_yn` instead.
+    """)
+
+add_newdoc("_spherical_yn_d",
+    """
+    Internal function, use `spherical_yn` instead.
+    """)
+
+add_newdoc("_spherical_in",
+    """
+    Internal function, use `spherical_in` instead.
+    """)
+
+add_newdoc("_spherical_in_d",
+    """
+    Internal function, use `spherical_in` instead.
+    """)
+
+add_newdoc("_spherical_kn",
+    """
+    Internal function, use `spherical_kn` instead.
+    """)
+
+add_newdoc("_spherical_kn_d",
+    """
+    Internal function, use `spherical_kn` instead.
+    """)
+
+add_newdoc("loggamma",
+    r"""
+    loggamma(z, out=None)
+
+    Principal branch of the logarithm of the gamma function.
+
+    Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
+    extended to the complex plane by analytic continuation. The
+    function has a single branch cut on the negative real axis.
+
+    .. versionadded:: 0.18.0
+
+    Parameters
+    ----------
+    z : array_like
+        Values in the complex plain at which to compute ``loggamma``
+    out : ndarray, optional
+        Output array for computed values of ``loggamma``
+
+    Returns
+    -------
+    loggamma : scalar or ndarray
+        Values of ``loggamma`` at z.
+
+    Notes
+    -----
+    It is not generally true that :math:`\log\Gamma(z) =
+    \log(\Gamma(z))`, though the real parts of the functions do
+    agree. The benefit of not defining `loggamma` as
+    :math:`\log(\Gamma(z))` is that the latter function has a
+    complicated branch cut structure whereas `loggamma` is analytic
+    except for on the negative real axis.
+
+    The identities
+
+    .. math::
+      \exp(\log\Gamma(z)) &= \Gamma(z) \\
+      \log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
+
+    make `loggamma` useful for working in complex logspace.
+
+    On the real line `loggamma` is related to `gammaln` via
+    ``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to
+    rounding error.
+
+    The implementation here is based on [hare1997]_.
+
+    See also
+    --------
+    gammaln : logarithm of the absolute value of the gamma function
+    gammasgn : sign of the gamma function
+
+    References
+    ----------
+    .. [hare1997] D.E.G. Hare,
+      *Computing the Principal Branch of log-Gamma*,
+      Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
+    """)
+
+add_newdoc("_sinpi",
+    """
+    Internal function, do not use.
+    """)
+
+add_newdoc("_cospi",
+    """
+    Internal function, do not use.
+    """)
+
+add_newdoc("owens_t",
+    """
+    owens_t(h, a, out=None)
+
+    Owen's T Function.
+
+    The function T(h, a) gives the probability of the event
+    (X > h and 0 < Y < a * X) where X and Y are independent
+    standard normal random variables.
+
+    Parameters
+    ----------
+    h: array_like
+        Input value.
+    a: array_like
+        Input value.
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    t: scalar or ndarray
+        Probability of the event (X > h and 0 < Y < a * X),
+        where X and Y are independent standard normal random variables.
+
+    Examples
+    --------
+    >>> from scipy import special
+    >>> a = 3.5
+    >>> h = 0.78
+    >>> special.owens_t(h, a)
+    0.10877216734852274
+
+    References
+    ----------
+    .. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of
+           Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000.
+    """)
+
+add_newdoc("_factorial",
+    """
+    Internal function, do not use.
+    """)
+
+add_newdoc("wright_bessel",
+    r"""
+    wright_bessel(a, b, x, out=None)
+
+    Wright's generalized Bessel function.
+
+    Wright's generalized Bessel function is an entire function and defined as
+
+    .. math:: \Phi(a, b; x) = \sum_{k=0}^\infty \frac{x^k}{k! \Gamma(a k + b)}
+
+    See also [1].
+
+    Parameters
+    ----------
+    a : array_like of float
+        a >= 0
+    b : array_like of float
+        b >= 0
+    x : array_like of float
+        x >= 0
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Value of the Wright's generalized Bessel function
+
+    Notes
+    -----
+    Due to the compexity of the function with its three parameters, only
+    non-negative arguments are implemented.
+
+    Examples
+    --------
+    >>> from scipy.special import wright_bessel
+    >>> a, b, x = 1.5, 1.1, 2.5
+    >>> wright_bessel(a, b-1, x)
+    4.5314465939443025
+
+    Now, let us verify the relation
+
+    .. math:: \Phi(a, b-1; x) = a x \Phi(a, b+a; x) + (b-1) \Phi(a, b; x)
+
+    >>> a * x * wright_bessel(a, b+a, x) + (b-1) * wright_bessel(a, b, x)
+    4.5314465939443025
+
+    References
+    ----------
+    .. [1] Digital Library of Mathematical Functions, 10.46.
+           https://dlmf.nist.gov/10.46.E1
+    """)
+
+
+add_newdoc("ndtri_exp",
+    r"""
+    ndtri_exp(y, out=None)
+
+    Inverse of `log_ndtr` vs x. Allows for greater precision than
+    `ndtri` composed with `numpy.exp` for very small values of y and for
+    y close to 0.
+
+    Parameters
+    ----------
+    y : array_like of float
+        Function argument
+    out : ndarray, optional
+        Optional output array for the function results
+
+    Returns
+    -------
+    scalar or ndarray
+        Inverse of the log CDF of the standard normal distribution, evaluated
+        at y.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import scipy.special as sc
+
+    `ndtri_exp` agrees with the naive implementation when the latter does
+    not suffer from underflow.
+
+    >>> sc.ndtri_exp(-1)
+    -0.33747496376420244
+    >>> sc.ndtri(np.exp(-1))
+    -0.33747496376420244
+
+    For extreme values of y, the naive approach fails
+
+    >>> sc.ndtri(np.exp(-800))
+    -inf
+    >>> sc.ndtri(np.exp(-1e-20))
+    inf
+
+    whereas `ndtri_exp` is still able to compute the result to high precision.
+
+    >>> sc.ndtri_exp(-800)
+    -39.88469483825668
+    >>> sc.ndtri_exp(-1e-20)
+    9.262340089798409
+
+    See Also
+    --------
+    log_ndtr, ndtri, ndtr
+    """)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_basic.py b/__packaged__/coreml/.python_dependencies/scipy/special/_basic.py
new file mode 100644
index 00000000..f852b9c1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_basic.py
@@ -0,0 +1,3020 @@
+#
+# Author:  Travis Oliphant, 2002
+#
+
+import operator
+import numpy as np
+import math
+import warnings
+from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
+                   imag, sqrt, where, mgrid, sin, place, issubdtype,
+                   extract, inexact, nan, zeros, sinc)
+from . import _ufuncs
+from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma,
+                      psi, hankel1, hankel2, yv, kv, poch, binom)
+from . import _specfun
+from ._comb import _comb_int
+
+
+__all__ = [
+    'ai_zeros',
+    'assoc_laguerre',
+    'bei_zeros',
+    'beip_zeros',
+    'ber_zeros',
+    'bernoulli',
+    'berp_zeros',
+    'bi_zeros',
+    'clpmn',
+    'comb',
+    'digamma',
+    'diric',
+    'erf_zeros',
+    'euler',
+    'factorial',
+    'factorial2',
+    'factorialk',
+    'fresnel_zeros',
+    'fresnelc_zeros',
+    'fresnels_zeros',
+    'h1vp',
+    'h2vp',
+    'ivp',
+    'jn_zeros',
+    'jnjnp_zeros',
+    'jnp_zeros',
+    'jnyn_zeros',
+    'jvp',
+    'kei_zeros',
+    'keip_zeros',
+    'kelvin_zeros',
+    'ker_zeros',
+    'kerp_zeros',
+    'kvp',
+    'lmbda',
+    'lpmn',
+    'lpn',
+    'lqmn',
+    'lqn',
+    'mathieu_even_coef',
+    'mathieu_odd_coef',
+    'obl_cv_seq',
+    'pbdn_seq',
+    'pbdv_seq',
+    'pbvv_seq',
+    'perm',
+    'polygamma',
+    'pro_cv_seq',
+    'riccati_jn',
+    'riccati_yn',
+    'sinc',
+    'y0_zeros',
+    'y1_zeros',
+    'y1p_zeros',
+    'yn_zeros',
+    'ynp_zeros',
+    'yvp',
+    'zeta'
+]
+
+
+def _nonneg_int_or_fail(n, var_name, strict=True):
+    try:
+        if strict:
+            # Raises an exception if float
+            n = operator.index(n)
+        elif n == floor(n):
+            n = int(n)
+        else:
+            raise ValueError()
+        if n < 0:
+            raise ValueError()
+    except (ValueError, TypeError) as err:
+        raise err.__class__("{} must be a non-negative integer".format(var_name)) from err
+    return n
+
+
+def diric(x, n):
+    """Periodic sinc function, also called the Dirichlet function.
+
+    The Dirichlet function is defined as::
+
+        diric(x, n) = sin(x * n/2) / (n * sin(x / 2)),
+
+    where `n` is a positive integer.
+
+    Parameters
+    ----------
+    x : array_like
+        Input data
+    n : int
+        Integer defining the periodicity.
+
+    Returns
+    -------
+    diric : ndarray
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+
+    >>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
+    >>> plt.figure(figsize=(8, 8));
+    >>> for idx, n in enumerate([2, 3, 4, 9]):
+    ...     plt.subplot(2, 2, idx+1)
+    ...     plt.plot(x, special.diric(x, n))
+    ...     plt.title('diric, n={}'.format(n))
+    >>> plt.show()
+
+    The following example demonstrates that `diric` gives the magnitudes
+    (modulo the sign and scaling) of the Fourier coefficients of a
+    rectangular pulse.
+
+    Suppress output of values that are effectively 0:
+
+    >>> np.set_printoptions(suppress=True)
+
+    Create a signal `x` of length `m` with `k` ones:
+
+    >>> m = 8
+    >>> k = 3
+    >>> x = np.zeros(m)
+    >>> x[:k] = 1
+
+    Use the FFT to compute the Fourier transform of `x`, and
+    inspect the magnitudes of the coefficients:
+
+    >>> np.abs(np.fft.fft(x))
+    array([ 3.        ,  2.41421356,  1.        ,  0.41421356,  1.        ,
+            0.41421356,  1.        ,  2.41421356])
+
+    Now find the same values (up to sign) using `diric`. We multiply
+    by `k` to account for the different scaling conventions of
+    `numpy.fft.fft` and `diric`:
+
+    >>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
+    >>> k * special.diric(theta, k)
+    array([ 3.        ,  2.41421356,  1.        , -0.41421356, -1.        ,
+           -0.41421356,  1.        ,  2.41421356])
+    """
+    x, n = asarray(x), asarray(n)
+    n = asarray(n + (x-x))
+    x = asarray(x + (n-n))
+    if issubdtype(x.dtype, inexact):
+        ytype = x.dtype
+    else:
+        ytype = float
+    y = zeros(x.shape, ytype)
+
+    # empirical minval for 32, 64 or 128 bit float computations
+    # where sin(x/2) < minval, result is fixed at +1 or -1
+    if np.finfo(ytype).eps < 1e-18:
+        minval = 1e-11
+    elif np.finfo(ytype).eps < 1e-15:
+        minval = 1e-7
+    else:
+        minval = 1e-3
+
+    mask1 = (n <= 0) | (n != floor(n))
+    place(y, mask1, nan)
+
+    x = x / 2
+    denom = sin(x)
+    mask2 = (1-mask1) & (abs(denom) < minval)
+    xsub = extract(mask2, x)
+    nsub = extract(mask2, n)
+    zsub = xsub / pi
+    place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
+
+    mask = (1-mask1) & (1-mask2)
+    xsub = extract(mask, x)
+    nsub = extract(mask, n)
+    dsub = extract(mask, denom)
+    place(y, mask, sin(nsub*xsub)/(nsub*dsub))
+    return y
+
+
+def jnjnp_zeros(nt):
+    """Compute zeros of integer-order Bessel functions Jn and Jn'.
+
+    Results are arranged in order of the magnitudes of the zeros.
+
+    Parameters
+    ----------
+    nt : int
+        Number (<=1200) of zeros to compute
+
+    Returns
+    -------
+    zo[l-1] : ndarray
+        Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
+    n[l-1] : ndarray
+        Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
+    m[l-1] : ndarray
+        Serial number of the zeros of Jn(x) or Jn'(x) associated
+        with lth zero. Of length `nt`.
+    t[l-1] : ndarray
+        0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
+        length `nt`.
+
+    See Also
+    --------
+    jn_zeros, jnp_zeros : to get separated arrays of zeros.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
+        raise ValueError("Number must be integer <= 1200.")
+    nt = int(nt)
+    n, m, t, zo = _specfun.jdzo(nt)
+    return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
+
+
+def jnyn_zeros(n, nt):
+    """Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
+
+    Returns 4 arrays of length `nt`, corresponding to the first `nt`
+    zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. The zeros
+    are returned in ascending order.
+
+    Parameters
+    ----------
+    n : int
+        Order of the Bessel functions
+    nt : int
+        Number (<=1200) of zeros to compute
+
+    Returns
+    -------
+    Jn : ndarray
+        First `nt` zeros of Jn
+    Jnp : ndarray
+        First `nt` zeros of Jn'
+    Yn : ndarray
+        First `nt` zeros of Yn
+    Ynp : ndarray
+        First `nt` zeros of Yn'
+
+    See Also
+    --------
+    jn_zeros, jnp_zeros, yn_zeros, ynp_zeros
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first three roots of :math:`J_1`, :math:`J_1'`,
+    :math:`Y_1` and :math:`Y_1'`.
+
+    >>> from scipy.special import jnyn_zeros
+    >>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3)
+    >>> jn_roots, yn_roots
+    (array([ 3.83170597,  7.01558667, 10.17346814]),
+     array([2.19714133, 5.42968104, 8.59600587]))
+
+    Plot :math:`J_1`, :math:`J_1'`, :math:`Y_1`, :math:`Y_1'` and their roots.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import jnyn_zeros, jvp, jn, yvp, yn
+    >>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3)
+    >>> fig, ax = plt.subplots()
+    >>> xmax= 11
+    >>> x = np.linspace(0, xmax)
+    >>> ax.plot(x, jn(1, x), label=r"$J_1$", c='r')
+    >>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$", c='b')
+    >>> ax.plot(x, yn(1, x), label=r"$Y_1$", c='y')
+    >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$", c='c')
+    >>> zeros = np.zeros((3, ))
+    >>> ax.scatter(jn_roots, zeros, s=30, c='r', zorder=5,
+    ...            label=r"$J_1$ roots")
+    >>> ax.scatter(jnp_roots, zeros, s=30, c='b', zorder=5,
+    ...            label=r"$J_1'$ roots")
+    >>> ax.scatter(yn_roots, zeros, s=30, c='y', zorder=5,
+    ...            label=r"$Y_1$ roots")
+    >>> ax.scatter(ynp_roots, zeros, s=30, c='c', zorder=5,
+    ...            label=r"$Y_1'$ roots")
+    >>> ax.hlines(0, 0, xmax, color='k')
+    >>> ax.set_ylim(-0.6, 0.6)
+    >>> ax.set_xlim(0, xmax)
+    >>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75))
+    >>> plt.tight_layout()
+    >>> plt.show()
+    """
+    if not (isscalar(nt) and isscalar(n)):
+        raise ValueError("Arguments must be scalars.")
+    if (floor(n) != n) or (floor(nt) != nt):
+        raise ValueError("Arguments must be integers.")
+    if (nt <= 0):
+        raise ValueError("nt > 0")
+    return _specfun.jyzo(abs(n), nt)
+
+
+def jn_zeros(n, nt):
+    r"""Compute zeros of integer-order Bessel functions Jn.
+
+    Compute `nt` zeros of the Bessel functions :math:`J_n(x)` on the
+    interval :math:`(0, \infty)`. The zeros are returned in ascending
+    order. Note that this interval excludes the zero at :math:`x = 0`
+    that exists for :math:`n > 0`.
+
+    Parameters
+    ----------
+    n : int
+        Order of Bessel function
+    nt : int
+        Number of zeros to return
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Bessel function.
+
+    See Also
+    --------
+    jv: Real-order Bessel functions of the first kind
+    jnp_zeros: Zeros of :math:`Jn'`
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first four positive roots of :math:`J_3`.
+
+    >>> from scipy.special import jn_zeros
+    >>> jn_zeros(3, 4)
+    array([ 6.3801619 ,  9.76102313, 13.01520072, 16.22346616])
+
+    Plot :math:`J_3` and its first four positive roots. Note
+    that the root located at 0 is not returned by `jn_zeros`.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import jn, jn_zeros
+    >>> j3_roots = jn_zeros(3, 4)
+    >>> xmax = 18
+    >>> xmin = -1
+    >>> x = np.linspace(xmin, xmax, 500)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, jn(3, x), label=r'$J_3$')
+    >>> ax.scatter(j3_roots, np.zeros((4, )), s=30, c='r',
+    ...            label=r"$J_3$_Zeros", zorder=5)
+    >>> ax.scatter(0, 0, s=30, c='k',
+    ...            label=r"Root at 0", zorder=5)
+    >>> ax.hlines(0, 0, xmax, color='k')
+    >>> ax.set_xlim(xmin, xmax)
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    return jnyn_zeros(n, nt)[0]
+
+
+def jnp_zeros(n, nt):
+    r"""Compute zeros of integer-order Bessel function derivatives Jn'.
+
+    Compute `nt` zeros of the functions :math:`J_n'(x)` on the
+    interval :math:`(0, \infty)`. The zeros are returned in ascending
+    order. Note that this interval excludes the zero at :math:`x = 0`
+    that exists for :math:`n > 1`.
+
+    Parameters
+    ----------
+    n : int
+        Order of Bessel function
+    nt : int
+        Number of zeros to return
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Bessel function.
+
+    See Also
+    --------
+    jvp: Derivatives of integer-order Bessel functions of the first kind
+    jv: Float-order Bessel functions of the first kind
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first four roots of :math:`J_2'`.
+
+    >>> from scipy.special import jnp_zeros
+    >>> jnp_zeros(2, 4)
+    array([ 3.05423693,  6.70613319,  9.96946782, 13.17037086])
+
+    As `jnp_zeros` yields the roots of :math:`J_n'`, it can be used to
+    compute the locations of the peaks of :math:`J_n`. Plot
+    :math:`J_2`, :math:`J_2'` and the locations of the roots of :math:`J_2'`.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import jn, jnp_zeros, jvp
+    >>> j2_roots = jnp_zeros(2, 4)
+    >>> xmax = 15
+    >>> x = np.linspace(0, xmax, 500)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, jn(2, x), label=r'$J_2$')
+    >>> ax.plot(x, jvp(2, x, 1), label=r"$J_2'$")
+    >>> ax.hlines(0, 0, xmax, color='k')
+    >>> ax.scatter(j2_roots, np.zeros((4, )), s=30, c='r',
+    ...            label=r"Roots of $J_2'$", zorder=5)
+    >>> ax.set_ylim(-0.4, 0.8)
+    >>> ax.set_xlim(0, xmax)
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    return jnyn_zeros(n, nt)[1]
+
+
+def yn_zeros(n, nt):
+    r"""Compute zeros of integer-order Bessel function Yn(x).
+
+    Compute `nt` zeros of the functions :math:`Y_n(x)` on the interval
+    :math:`(0, \infty)`. The zeros are returned in ascending order.
+
+    Parameters
+    ----------
+    n : int
+        Order of Bessel function
+    nt : int
+        Number of zeros to return
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Bessel function.
+
+    See Also
+    --------
+    yn: Bessel function of the second kind for integer order
+    yv: Bessel function of the second kind for real order
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first four roots of :math:`Y_2`.
+
+    >>> from scipy.special import yn_zeros
+    >>> yn_zeros(2, 4)
+    array([ 3.38424177,  6.79380751, 10.02347798, 13.20998671])
+
+    Plot :math:`Y_2` and its first four roots.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import yn, yn_zeros
+    >>> xmin = 2
+    >>> xmax = 15
+    >>> x = np.linspace(xmin, xmax, 500)
+    >>> fig, ax = plt.subplots()
+    >>> ax.hlines(0, xmin, xmax, color='k')
+    >>> ax.plot(x, yn(2, x), label=r'$Y_2$')
+    >>> ax.scatter(yn_zeros(2, 4), np.zeros((4, )), s=30, c='r',
+    ...            label='Roots', zorder=5)
+    >>> ax.set_ylim(-0.4, 0.4)
+    >>> ax.set_xlim(xmin, xmax)
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    return jnyn_zeros(n, nt)[2]
+
+
+def ynp_zeros(n, nt):
+    r"""Compute zeros of integer-order Bessel function derivatives Yn'(x).
+
+    Compute `nt` zeros of the functions :math:`Y_n'(x)` on the
+    interval :math:`(0, \infty)`. The zeros are returned in ascending
+    order.
+
+    Parameters
+    ----------
+    n : int
+        Order of Bessel function
+    nt : int
+        Number of zeros to return
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Bessel derivative function.
+
+
+    See Also
+    --------
+    yvp
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first four roots of the first derivative of the
+    Bessel function of second kind for order 0 :math:`Y_0'`.
+
+    >>> from scipy.special import ynp_zeros
+    >>> ynp_zeros(0, 4)
+    array([ 2.19714133,  5.42968104,  8.59600587, 11.74915483])
+
+    Plot :math:`Y_0`, :math:`Y_0'` and confirm visually that the roots of
+    :math:`Y_0'` are located at local extrema of :math:`Y_0`.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import yn, ynp_zeros, yvp
+    >>> zeros = ynp_zeros(0, 4)
+    >>> xmax = 13
+    >>> x = np.linspace(0, xmax, 500)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, yn(0, x), label=r'$Y_0$')
+    >>> ax.plot(x, yvp(0, x, 1), label=r"$Y_0'$")
+    >>> ax.scatter(zeros, np.zeros((4, )), s=30, c='r',
+    ...            label=r"Roots of $Y_0'$", zorder=5)
+    >>> for root in zeros:
+    ...     y0_extremum =  yn(0, root)
+    ...     lower = min(0, y0_extremum)
+    ...     upper = max(0, y0_extremum)
+    ...     ax.vlines(root, lower, upper, color='r')
+    >>> ax.hlines(0, 0, xmax, color='k')
+    >>> ax.set_ylim(-0.6, 0.6)
+    >>> ax.set_xlim(0, xmax)
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    return jnyn_zeros(n, nt)[3]
+
+
+def y0_zeros(nt, complex=False):
+    """Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
+
+    The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to return
+    complex : bool, default False
+        Set to False to return only the real zeros; set to True to return only
+        the complex zeros with negative real part and positive imaginary part.
+        Note that the complex conjugates of the latter are also zeros of the
+        function, but are not returned by this routine.
+
+    Returns
+    -------
+    z0n : ndarray
+        Location of nth zero of Y0(z)
+    y0pz0n : ndarray
+        Value of derivative Y0'(z0) for nth zero
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first 4 real roots and the derivatives at the roots of
+    :math:`Y_0`:
+
+    >>> import numpy as np
+    >>> from scipy.special import y0_zeros
+    >>> zeros, grads = y0_zeros(4)
+    >>> with np.printoptions(precision=5):
+    ...     print(f"Roots: {zeros}")
+    ...     print(f"Gradients: {grads}")
+    Roots: [ 0.89358+0.j  3.95768+0.j  7.08605+0.j 10.22235+0.j]
+    Gradients: [-0.87942+0.j  0.40254+0.j -0.3001 +0.j  0.2497 +0.j]
+
+    Plot the real part of :math:`Y_0` and the first four computed roots.
+
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import y0
+    >>> xmin = 0
+    >>> xmax = 11
+    >>> x = np.linspace(xmin, xmax, 500)
+    >>> fig, ax = plt.subplots()
+    >>> ax.hlines(0, xmin, xmax, color='k')
+    >>> ax.plot(x, y0(x), label=r'$Y_0$')
+    >>> zeros, grads = y0_zeros(4)
+    >>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r',
+    ...            label=r'$Y_0$_zeros', zorder=5)
+    >>> ax.set_ylim(-0.5, 0.6)
+    >>> ax.set_xlim(xmin, xmax)
+    >>> plt.legend(ncol=2)
+    >>> plt.show()
+
+    Compute the first 4 complex roots and the derivatives at the roots of
+    :math:`Y_0` by setting ``complex=True``:
+
+    >>> y0_zeros(4, True)
+    (array([ -2.40301663+0.53988231j,  -5.5198767 +0.54718001j,
+             -8.6536724 +0.54841207j, -11.79151203+0.54881912j]),
+     array([ 0.10074769-0.88196771j, -0.02924642+0.5871695j ,
+             0.01490806-0.46945875j, -0.00937368+0.40230454j]))
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("Arguments must be scalar positive integer.")
+    kf = 0
+    kc = not complex
+    return _specfun.cyzo(nt, kf, kc)
+
+
+def y1_zeros(nt, complex=False):
+    """Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
+
+    The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to return
+    complex : bool, default False
+        Set to False to return only the real zeros; set to True to return only
+        the complex zeros with negative real part and positive imaginary part.
+        Note that the complex conjugates of the latter are also zeros of the
+        function, but are not returned by this routine.
+
+    Returns
+    -------
+    z1n : ndarray
+        Location of nth zero of Y1(z)
+    y1pz1n : ndarray
+        Value of derivative Y1'(z1) for nth zero
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first 4 real roots and the derivatives at the roots of
+    :math:`Y_1`:
+
+    >>> import numpy as np
+    >>> from scipy.special import y1_zeros
+    >>> zeros, grads = y1_zeros(4)
+    >>> with np.printoptions(precision=5):
+    ...     print(f"Roots: {zeros}")
+    ...     print(f"Gradients: {grads}")
+    Roots: [ 2.19714+0.j  5.42968+0.j  8.59601+0.j 11.74915+0.j]
+    Gradients: [ 0.52079+0.j -0.34032+0.j  0.27146+0.j -0.23246+0.j]
+
+    Extract the real parts:
+
+    >>> realzeros = zeros.real
+    >>> realzeros
+    array([ 2.19714133,  5.42968104,  8.59600587, 11.74915483])
+
+    Plot :math:`Y_1` and the first four computed roots.
+
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import y1
+    >>> xmin = 0
+    >>> xmax = 13
+    >>> x = np.linspace(xmin, xmax, 500)
+    >>> zeros, grads = y1_zeros(4)
+    >>> fig, ax = plt.subplots()
+    >>> ax.hlines(0, xmin, xmax, color='k')
+    >>> ax.plot(x, y1(x), label=r'$Y_1$')
+    >>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r',
+    ...            label=r'$Y_1$_zeros', zorder=5)
+    >>> ax.set_ylim(-0.5, 0.5)
+    >>> ax.set_xlim(xmin, xmax)
+    >>> plt.legend()
+    >>> plt.show()
+
+    Compute the first 4 complex roots and the derivatives at the roots of
+    :math:`Y_1` by setting ``complex=True``:
+
+    >>> y1_zeros(4, True)
+    (array([ -0.50274327+0.78624371j,  -3.83353519+0.56235654j,
+             -7.01590368+0.55339305j, -10.17357383+0.55127339j]),
+     array([-0.45952768+1.31710194j,  0.04830191-0.69251288j,
+            -0.02012695+0.51864253j,  0.011614  -0.43203296j]))
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("Arguments must be scalar positive integer.")
+    kf = 1
+    kc = not complex
+    return _specfun.cyzo(nt, kf, kc)
+
+
+def y1p_zeros(nt, complex=False):
+    """Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
+
+    The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to return
+    complex : bool, default False
+        Set to False to return only the real zeros; set to True to return only
+        the complex zeros with negative real part and positive imaginary part.
+        Note that the complex conjugates of the latter are also zeros of the
+        function, but are not returned by this routine.
+
+    Returns
+    -------
+    z1pn : ndarray
+        Location of nth zero of Y1'(z)
+    y1z1pn : ndarray
+        Value of derivative Y1(z1) for nth zero
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    Compute the first four roots of :math:`Y_1'` and the values of
+    :math:`Y_1` at these roots.
+
+    >>> import numpy as np
+    >>> from scipy.special import y1p_zeros
+    >>> y1grad_roots, y1_values = y1p_zeros(4)
+    >>> with np.printoptions(precision=5):
+    ...     print(f"Y1' Roots: {y1grad_roots}")
+    ...     print(f"Y1 values: {y1_values}")
+    Y1' Roots: [ 3.68302+0.j  6.9415 +0.j 10.1234 +0.j 13.28576+0.j]
+    Y1 values: [ 0.41673+0.j -0.30317+0.j  0.25091+0.j -0.21897+0.j]
+
+    `y1p_zeros` can be used to calculate the extremal points of :math:`Y_1`
+    directly. Here we plot :math:`Y_1` and the first four extrema.
+
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.special import y1, yvp
+    >>> y1_roots, y1_values_at_roots = y1p_zeros(4)
+    >>> real_roots = y1_roots.real
+    >>> xmax = 15
+    >>> x = np.linspace(0, xmax, 500)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, y1(x), label=r'$Y_1$')
+    >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$")
+    >>> ax.scatter(real_roots, np.zeros((4, )), s=30, c='r',
+    ...            label=r"Roots of $Y_1'$", zorder=5)
+    >>> ax.scatter(real_roots, y1_values_at_roots.real, s=30, c='k',
+    ...            label=r"Extrema of $Y_1$", zorder=5)
+    >>> ax.hlines(0, 0, xmax, color='k')
+    >>> ax.set_ylim(-0.5, 0.5)
+    >>> ax.set_xlim(0, xmax)
+    >>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75))
+    >>> plt.tight_layout()
+    >>> plt.show()
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("Arguments must be scalar positive integer.")
+    kf = 2
+    kc = not complex
+    return _specfun.cyzo(nt, kf, kc)
+
+
+def _bessel_diff_formula(v, z, n, L, phase):
+    # from AMS55.
+    # L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
+    # L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
+    # For K, you can pull out the exp((v-k)*pi*i) into the caller
+    v = asarray(v)
+    p = 1.0
+    s = L(v-n, z)
+    for i in range(1, n+1):
+        p = phase * (p * (n-i+1)) / i   # = choose(k, i)
+        s += p*L(v-n + i*2, z)
+    return s / (2.**n)
+
+
+def jvp(v, z, n=1):
+    """Compute derivatives of Bessel functions of the first kind.
+
+    Compute the nth derivative of the Bessel function `Jv` with
+    respect to `z`.
+
+    Parameters
+    ----------
+    v : array_like or float
+        Order of Bessel function
+    z : complex
+        Argument at which to evaluate the derivative; can be real or
+        complex.
+    n : int, default 1
+        Order of derivative. For 0 returns the Bessel function `jv` itself.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the derivative of the Bessel function.
+
+    Notes
+    -----
+    The derivative is computed using the relation DLFM 10.6.7 [2]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.6.E7
+
+    Examples
+    --------
+
+    Compute the Bessel function of the first kind of order 0 and
+    its first two derivatives at 1.
+
+    >>> from scipy.special import jvp
+    >>> jvp(0, 1, 0), jvp(0, 1, 1), jvp(0, 1, 2)
+    (0.7651976865579666, -0.44005058574493355, -0.3251471008130331)
+
+    Compute the first derivative of the Bessel function of the first
+    kind for several orders at 1 by providing an array for `v`.
+
+    >>> jvp([0, 1, 2], 1, 1)
+    array([-0.44005059,  0.3251471 ,  0.21024362])
+
+    Compute the first derivative of the Bessel function of the first
+    kind of order 0 at several points by providing an array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0., 1.5, 3.])
+    >>> jvp(0, points, 1)
+    array([-0.        , -0.55793651, -0.33905896])
+
+    Plot the Bessel function of the first kind of order 1 and its
+    first three derivatives.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-10, 10, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, jvp(1, x, 0), label=r"$J_1$")
+    >>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$")
+    >>> ax.plot(x, jvp(1, x, 2), label=r"$J_1''$")
+    >>> ax.plot(x, jvp(1, x, 3), label=r"$J_1'''$")
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    n = _nonneg_int_or_fail(n, 'n')
+    if n == 0:
+        return jv(v, z)
+    else:
+        return _bessel_diff_formula(v, z, n, jv, -1)
+
+
+def yvp(v, z, n=1):
+    """Compute derivatives of Bessel functions of the second kind.
+
+    Compute the nth derivative of the Bessel function `Yv` with
+    respect to `z`.
+
+    Parameters
+    ----------
+    v : array_like of float
+        Order of Bessel function
+    z : complex
+        Argument at which to evaluate the derivative
+    n : int, default 1
+        Order of derivative. For 0 returns the BEssel function `yv`
+
+    See Also
+    --------
+    yv
+
+    Returns
+    -------
+    scalar or ndarray
+        nth derivative of the Bessel function.
+
+    Notes
+    -----
+    The derivative is computed using the relation DLFM 10.6.7 [2]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.6.E7
+
+    Examples
+    --------
+    Compute the Bessel function of the second kind of order 0 and
+    its first two derivatives at 1.
+
+    >>> from scipy.special import yvp
+    >>> yvp(0, 1, 0), yvp(0, 1, 1), yvp(0, 1, 2)
+    (0.088256964215677, 0.7812128213002889, -0.8694697855159659)
+
+    Compute the first derivative of the Bessel function of the second
+    kind for several orders at 1 by providing an array for `v`.
+
+    >>> yvp([0, 1, 2], 1, 1)
+    array([0.78121282, 0.86946979, 2.52015239])
+
+    Compute the first derivative of the Bessel function of the
+    second kind of order 0 at several points by providing an array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 1.5, 3.])
+    >>> yvp(0, points, 1)
+    array([ 1.47147239,  0.41230863, -0.32467442])
+
+    Plot the Bessel function of the second kind of order 1 and its
+    first three derivatives.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(0, 5, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, yvp(1, x, 0), label=r"$Y_1$")
+    >>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$")
+    >>> ax.plot(x, yvp(1, x, 2), label=r"$Y_1''$")
+    >>> ax.plot(x, yvp(1, x, 3), label=r"$Y_1'''$")
+    >>> ax.set_ylim(-10, 10)
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    n = _nonneg_int_or_fail(n, 'n')
+    if n == 0:
+        return yv(v, z)
+    else:
+        return _bessel_diff_formula(v, z, n, yv, -1)
+
+
+def kvp(v, z, n=1):
+    """Compute derivatives of real-order modified Bessel function Kv(z)
+
+    Kv(z) is the modified Bessel function of the second kind.
+    Derivative is calculated with respect to `z`.
+
+    Parameters
+    ----------
+    v : array_like of float
+        Order of Bessel function
+    z : array_like of complex
+        Argument at which to evaluate the derivative
+    n : int, default 1
+        Order of derivative. For 0 returns the Bessel function `kv` itself.
+
+    Returns
+    -------
+    out : ndarray
+        The results
+
+    See Also
+    --------
+    kv
+
+    Notes
+    -----
+    The derivative is computed using the relation DLFM 10.29.5 [2]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 6.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.29.E5
+
+    Examples
+    --------
+    Compute the modified bessel function of the second kind of order 0 and
+    its first two derivatives at 1.
+
+    >>> from scipy.special import kvp
+    >>> kvp(0, 1, 0), kvp(0, 1, 1), kvp(0, 1, 2)
+    (0.42102443824070834, -0.6019072301972346, 1.0229316684379428)
+
+    Compute the first derivative of the modified Bessel function of the second
+    kind for several orders at 1 by providing an array for `v`.
+
+    >>> kvp([0, 1, 2], 1, 1)
+    array([-0.60190723, -1.02293167, -3.85158503])
+
+    Compute the first derivative of the modified Bessel function of the
+    second kind of order 0 at several points by providing an array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 1.5, 3.])
+    >>> kvp(0, points, 1)
+    array([-1.65644112, -0.2773878 , -0.04015643])
+
+    Plot the modified bessel function of the second kind and its
+    first three derivatives.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(0, 5, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, kvp(1, x, 0), label=r"$K_1$")
+    >>> ax.plot(x, kvp(1, x, 1), label=r"$K_1'$")
+    >>> ax.plot(x, kvp(1, x, 2), label=r"$K_1''$")
+    >>> ax.plot(x, kvp(1, x, 3), label=r"$K_1'''$")
+    >>> ax.set_ylim(-2.5, 2.5)
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    n = _nonneg_int_or_fail(n, 'n')
+    if n == 0:
+        return kv(v, z)
+    else:
+        return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
+
+
+def ivp(v, z, n=1):
+    """Compute derivatives of modified Bessel functions of the first kind.
+
+    Compute the nth derivative of the modified Bessel function `Iv`
+    with respect to `z`.
+
+    Parameters
+    ----------
+    v : array_like or float
+        Order of Bessel function
+    z : array_like
+        Argument at which to evaluate the derivative; can be real or
+        complex.
+    n : int, default 1
+        Order of derivative. For 0, returns the Bessel function `iv` itself.
+
+    Returns
+    -------
+    scalar or ndarray
+        nth derivative of the modified Bessel function.
+
+    See Also
+    --------
+    iv
+
+    Notes
+    -----
+    The derivative is computed using the relation DLFM 10.29.5 [2]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 6.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.29.E5
+
+    Examples
+    --------
+    Compute the modified Bessel function of the first kind of order 0 and
+    its first two derivatives at 1.
+
+    >>> from scipy.special import ivp
+    >>> ivp(0, 1, 0), ivp(0, 1, 1), ivp(0, 1, 2)
+    (1.2660658777520084, 0.565159103992485, 0.7009067737595233)
+
+    Compute the first derivative of the modified Bessel function of the first
+    kind for several orders at 1 by providing an array for `v`.
+
+    >>> ivp([0, 1, 2], 1, 1)
+    array([0.5651591 , 0.70090677, 0.29366376])
+
+    Compute the first derivative of the modified Bessel function of the
+    first kind of order 0 at several points by providing an array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0., 1.5, 3.])
+    >>> ivp(0, points, 1)
+    array([0.        , 0.98166643, 3.95337022])
+
+    Plot the modified Bessel function of the first kind of order 1 and its
+    first three derivatives.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(-5, 5, 1000)
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, ivp(1, x, 0), label=r"$I_1$")
+    >>> ax.plot(x, ivp(1, x, 1), label=r"$I_1'$")
+    >>> ax.plot(x, ivp(1, x, 2), label=r"$I_1''$")
+    >>> ax.plot(x, ivp(1, x, 3), label=r"$I_1'''$")
+    >>> plt.legend()
+    >>> plt.show()
+    """
+    n = _nonneg_int_or_fail(n, 'n')
+    if n == 0:
+        return iv(v, z)
+    else:
+        return _bessel_diff_formula(v, z, n, iv, 1)
+
+
+def h1vp(v, z, n=1):
+    """Compute derivatives of Hankel function H1v(z) with respect to `z`.
+
+    Parameters
+    ----------
+    v : array_like
+        Order of Hankel function
+    z : array_like
+        Argument at which to evaluate the derivative. Can be real or
+        complex.
+    n : int, default 1
+        Order of derivative. For 0 returns the Hankel function `h1v` itself.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the derivative of the Hankel function.
+
+    See Also
+    --------
+    hankel1
+
+    Notes
+    -----
+    The derivative is computed using the relation DLFM 10.6.7 [2]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.6.E7
+
+    Examples
+    --------
+    Compute the Hankel function of the first kind of order 0 and
+    its first two derivatives at 1.
+
+    >>> from scipy.special import h1vp
+    >>> h1vp(0, 1, 0), h1vp(0, 1, 1), h1vp(0, 1, 2)
+    ((0.7651976865579664+0.088256964215677j),
+     (-0.44005058574493355+0.7812128213002889j),
+     (-0.3251471008130329-0.8694697855159659j))
+
+    Compute the first derivative of the Hankel function of the first kind
+    for several orders at 1 by providing an array for `v`.
+
+    >>> h1vp([0, 1, 2], 1, 1)
+    array([-0.44005059+0.78121282j,  0.3251471 +0.86946979j,
+           0.21024362+2.52015239j])
+
+    Compute the first derivative of the Hankel function of the first kind
+    of order 0 at several points by providing an array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 1.5, 3.])
+    >>> h1vp(0, points, 1)
+    array([-0.24226846+1.47147239j, -0.55793651+0.41230863j,
+           -0.33905896-0.32467442j])
+    """
+    n = _nonneg_int_or_fail(n, 'n')
+    if n == 0:
+        return hankel1(v, z)
+    else:
+        return _bessel_diff_formula(v, z, n, hankel1, -1)
+
+
+def h2vp(v, z, n=1):
+    """Compute derivatives of Hankel function H2v(z) with respect to `z`.
+
+    Parameters
+    ----------
+    v : array_like
+        Order of Hankel function
+    z : array_like
+        Argument at which to evaluate the derivative. Can be real or
+        complex.
+    n : int, default 1
+        Order of derivative. For 0 returns the Hankel function `h2v` itself.
+
+    Returns
+    -------
+    scalar or ndarray
+        Values of the derivative of the Hankel function.
+
+    See Also
+    --------
+    hankel2
+
+    Notes
+    -----
+    The derivative is computed using the relation DLFM 10.6.7 [2]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 5.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.6.E7
+
+    Examples
+    --------
+    Compute the Hankel function of the second kind of order 0 and
+    its first two derivatives at 1.
+
+    >>> from scipy.special import h2vp
+    >>> h2vp(0, 1, 0), h2vp(0, 1, 1), h2vp(0, 1, 2)
+    ((0.7651976865579664-0.088256964215677j),
+     (-0.44005058574493355-0.7812128213002889j),
+     (-0.3251471008130329+0.8694697855159659j))
+
+    Compute the first derivative of the Hankel function of the second kind
+    for several orders at 1 by providing an array for `v`.
+
+    >>> h2vp([0, 1, 2], 1, 1)
+    array([-0.44005059-0.78121282j,  0.3251471 -0.86946979j,
+           0.21024362-2.52015239j])
+
+    Compute the first derivative of the Hankel function of the second kind
+    of order 0 at several points by providing an array for `z`.
+
+    >>> import numpy as np
+    >>> points = np.array([0.5, 1.5, 3.])
+    >>> h2vp(0, points, 1)
+    array([-0.24226846-1.47147239j, -0.55793651-0.41230863j,
+           -0.33905896+0.32467442j])
+    """
+    n = _nonneg_int_or_fail(n, 'n')
+    if n == 0:
+        return hankel2(v, z)
+    else:
+        return _bessel_diff_formula(v, z, n, hankel2, -1)
+
+
+def riccati_jn(n, x):
+    r"""Compute Ricatti-Bessel function of the first kind and its derivative.
+
+    The Ricatti-Bessel function of the first kind is defined as :math:`x
+    j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
+    kind of order :math:`n`.
+
+    This function computes the value and first derivative of the
+    Ricatti-Bessel function for all orders up to and including `n`.
+
+    Parameters
+    ----------
+    n : int
+        Maximum order of function to compute
+    x : float
+        Argument at which to evaluate
+
+    Returns
+    -------
+    jn : ndarray
+        Value of j0(x), ..., jn(x)
+    jnp : ndarray
+        First derivative j0'(x), ..., jn'(x)
+
+    Notes
+    -----
+    The computation is carried out via backward recurrence, using the
+    relation DLMF 10.51.1 [2]_.
+
+    Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
+    Jin [1]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.51.E1
+
+    """
+    if not (isscalar(n) and isscalar(x)):
+        raise ValueError("arguments must be scalars.")
+    n = _nonneg_int_or_fail(n, 'n', strict=False)
+    if (n == 0):
+        n1 = 1
+    else:
+        n1 = n
+    nm, jn, jnp = _specfun.rctj(n1, x)
+    return jn[:(n+1)], jnp[:(n+1)]
+
+
+def riccati_yn(n, x):
+    """Compute Ricatti-Bessel function of the second kind and its derivative.
+
+    The Ricatti-Bessel function of the second kind is defined as :math:`x
+    y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
+    kind of order :math:`n`.
+
+    This function computes the value and first derivative of the function for
+    all orders up to and including `n`.
+
+    Parameters
+    ----------
+    n : int
+        Maximum order of function to compute
+    x : float
+        Argument at which to evaluate
+
+    Returns
+    -------
+    yn : ndarray
+        Value of y0(x), ..., yn(x)
+    ynp : ndarray
+        First derivative y0'(x), ..., yn'(x)
+
+    Notes
+    -----
+    The computation is carried out via ascending recurrence, using the
+    relation DLMF 10.51.1 [2]_.
+
+    Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
+    Jin [1]_.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+    .. [2] NIST Digital Library of Mathematical Functions.
+           https://dlmf.nist.gov/10.51.E1
+
+    """
+    if not (isscalar(n) and isscalar(x)):
+        raise ValueError("arguments must be scalars.")
+    n = _nonneg_int_or_fail(n, 'n', strict=False)
+    if (n == 0):
+        n1 = 1
+    else:
+        n1 = n
+    nm, jn, jnp = _specfun.rcty(n1, x)
+    return jn[:(n+1)], jnp[:(n+1)]
+
+
+def erf_zeros(nt):
+    """Compute the first nt zero in the first quadrant, ordered by absolute value.
+
+    Zeros in the other quadrants can be obtained by using the symmetries erf(-z) = erf(z) and
+    erf(conj(z)) = conj(erf(z)).
+
+
+    Parameters
+    ----------
+    nt : int
+        The number of zeros to compute
+
+    Returns
+    -------
+    The locations of the zeros of erf : ndarray (complex)
+        Complex values at which zeros of erf(z)
+
+    Examples
+    --------
+    >>> from scipy import special
+    >>> special.erf_zeros(1)
+    array([1.45061616+1.880943j])
+
+    Check that erf is (close to) zero for the value returned by erf_zeros
+
+    >>> special.erf(special.erf_zeros(1))
+    array([4.95159469e-14-1.16407394e-16j])
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
+        raise ValueError("Argument must be positive scalar integer.")
+    return _specfun.cerzo(nt)
+
+
+def fresnelc_zeros(nt):
+    """Compute nt complex zeros of cosine Fresnel integral C(z).
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
+        raise ValueError("Argument must be positive scalar integer.")
+    return _specfun.fcszo(1, nt)
+
+
+def fresnels_zeros(nt):
+    """Compute nt complex zeros of sine Fresnel integral S(z).
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
+        raise ValueError("Argument must be positive scalar integer.")
+    return _specfun.fcszo(2, nt)
+
+
+def fresnel_zeros(nt):
+    """Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
+        raise ValueError("Argument must be positive scalar integer.")
+    return _specfun.fcszo(2, nt), _specfun.fcszo(1, nt)
+
+
+def assoc_laguerre(x, n, k=0.0):
+    """Compute the generalized (associated) Laguerre polynomial of degree n and order k.
+
+    The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
+    with weighting function ``exp(-x) * x**k`` with ``k > -1``.
+
+    Notes
+    -----
+    `assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
+    reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
+
+    """
+    return _ufuncs.eval_genlaguerre(n, k, x)
+
+
+digamma = psi
+
+
+def polygamma(n, x):
+    r"""Polygamma functions.
+
+    Defined as :math:`\psi^{(n)}(x)` where :math:`\psi` is the
+    `digamma` function. See [dlmf]_ for details.
+
+    Parameters
+    ----------
+    n : array_like
+        The order of the derivative of the digamma function; must be
+        integral
+    x : array_like
+        Real valued input
+
+    Returns
+    -------
+    ndarray
+        Function results
+
+    See Also
+    --------
+    digamma
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/5.15
+
+    Examples
+    --------
+    >>> from scipy import special
+    >>> x = [2, 3, 25.5]
+    >>> special.polygamma(1, x)
+    array([ 0.64493407,  0.39493407,  0.03999467])
+    >>> special.polygamma(0, x) == special.psi(x)
+    array([ True,  True,  True], dtype=bool)
+
+    """
+    n, x = asarray(n), asarray(x)
+    fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
+    return where(n == 0, psi(x), fac2)
+
+
+def mathieu_even_coef(m, q):
+    r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
+
+    The Fourier series of the even solutions of the Mathieu differential
+    equation are of the form
+
+    .. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
+
+    .. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
+
+    This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
+    input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
+    m=2n+1.
+
+    Parameters
+    ----------
+    m : int
+        Order of Mathieu functions.  Must be non-negative.
+    q : float (>=0)
+        Parameter of Mathieu functions.  Must be non-negative.
+
+    Returns
+    -------
+    Ak : ndarray
+        Even or odd Fourier coefficients, corresponding to even or odd m.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+    .. [2] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/28.4#i
+
+    """
+    if not (isscalar(m) and isscalar(q)):
+        raise ValueError("m and q must be scalars.")
+    if (q < 0):
+        raise ValueError("q >=0")
+    if (m != floor(m)) or (m < 0):
+        raise ValueError("m must be an integer >=0.")
+
+    if (q <= 1):
+        qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
+    else:
+        qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
+    km = int(qm + 0.5*m)
+    if km > 251:
+        warnings.warn("Too many predicted coefficients.", RuntimeWarning, 2)
+    kd = 1
+    m = int(floor(m))
+    if m % 2:
+        kd = 2
+
+    a = mathieu_a(m, q)
+    fc = _specfun.fcoef(kd, m, q, a)
+    return fc[:km]
+
+
+def mathieu_odd_coef(m, q):
+    r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
+
+    The Fourier series of the odd solutions of the Mathieu differential
+    equation are of the form
+
+    .. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
+
+    .. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
+
+    This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
+    input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
+    input m=2n+1.
+
+    Parameters
+    ----------
+    m : int
+        Order of Mathieu functions.  Must be non-negative.
+    q : float (>=0)
+        Parameter of Mathieu functions.  Must be non-negative.
+
+    Returns
+    -------
+    Bk : ndarray
+        Even or odd Fourier coefficients, corresponding to even or odd m.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(m) and isscalar(q)):
+        raise ValueError("m and q must be scalars.")
+    if (q < 0):
+        raise ValueError("q >=0")
+    if (m != floor(m)) or (m <= 0):
+        raise ValueError("m must be an integer > 0")
+
+    if (q <= 1):
+        qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
+    else:
+        qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
+    km = int(qm + 0.5*m)
+    if km > 251:
+        warnings.warn("Too many predicted coefficients.", RuntimeWarning, 2)
+    kd = 4
+    m = int(floor(m))
+    if m % 2:
+        kd = 3
+
+    b = mathieu_b(m, q)
+    fc = _specfun.fcoef(kd, m, q, b)
+    return fc[:km]
+
+
+def lpmn(m, n, z):
+    """Sequence of associated Legendre functions of the first kind.
+
+    Computes the associated Legendre function of the first kind of order m and
+    degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
+    Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
+    ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
+
+    This function takes a real argument ``z``. For complex arguments ``z``
+    use clpmn instead.
+
+    Parameters
+    ----------
+    m : int
+       ``|m| <= n``; the order of the Legendre function.
+    n : int
+       where ``n >= 0``; the degree of the Legendre function.  Often
+       called ``l`` (lower case L) in descriptions of the associated
+       Legendre function
+    z : float
+        Input value.
+
+    Returns
+    -------
+    Pmn_z : (m+1, n+1) array
+       Values for all orders 0..m and degrees 0..n
+    Pmn_d_z : (m+1, n+1) array
+       Derivatives for all orders 0..m and degrees 0..n
+
+    See Also
+    --------
+    clpmn: associated Legendre functions of the first kind for complex z
+
+    Notes
+    -----
+    In the interval (-1, 1), Ferrer's function of the first kind is
+    returned. The phase convention used for the intervals (1, inf)
+    and (-inf, -1) is such that the result is always real.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+    .. [2] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/14.3
+
+    """
+    if not isscalar(m) or (abs(m) > n):
+        raise ValueError("m must be <= n.")
+    if not isscalar(n) or (n < 0):
+        raise ValueError("n must be a non-negative integer.")
+    if not isscalar(z):
+        raise ValueError("z must be scalar.")
+    if iscomplex(z):
+        raise ValueError("Argument must be real. Use clpmn instead.")
+    if (m < 0):
+        mp = -m
+        mf, nf = mgrid[0:mp+1, 0:n+1]
+        with _ufuncs.errstate(all='ignore'):
+            if abs(z) < 1:
+                # Ferrer function; DLMF 14.9.3
+                fixarr = where(mf > nf, 0.0,
+                               (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
+            else:
+                # Match to clpmn; DLMF 14.9.13
+                fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
+    else:
+        mp = m
+    p, pd = _specfun.lpmn(mp, n, z)
+    if (m < 0):
+        p = p * fixarr
+        pd = pd * fixarr
+    return p, pd
+
+
+def clpmn(m, n, z, type=3):
+    """Associated Legendre function of the first kind for complex arguments.
+
+    Computes the associated Legendre function of the first kind of order m and
+    degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
+    Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
+    ``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
+
+    Parameters
+    ----------
+    m : int
+       ``|m| <= n``; the order of the Legendre function.
+    n : int
+       where ``n >= 0``; the degree of the Legendre function.  Often
+       called ``l`` (lower case L) in descriptions of the associated
+       Legendre function
+    z : float or complex
+        Input value.
+    type : int, optional
+       takes values 2 or 3
+       2: cut on the real axis ``|x| > 1``
+       3: cut on the real axis ``-1 < x < 1`` (default)
+
+    Returns
+    -------
+    Pmn_z : (m+1, n+1) array
+       Values for all orders ``0..m`` and degrees ``0..n``
+    Pmn_d_z : (m+1, n+1) array
+       Derivatives for all orders ``0..m`` and degrees ``0..n``
+
+    See Also
+    --------
+    lpmn: associated Legendre functions of the first kind for real z
+
+    Notes
+    -----
+    By default, i.e. for ``type=3``, phase conventions are chosen according
+    to [1]_ such that the function is analytic. The cut lies on the interval
+    (-1, 1). Approaching the cut from above or below in general yields a phase
+    factor with respect to Ferrer's function of the first kind
+    (cf. `lpmn`).
+
+    For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
+    on the interval (-1, 1) in the complex plane yields Ferrer's function
+    of the first kind.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+    .. [2] NIST Digital Library of Mathematical Functions
+           https://dlmf.nist.gov/14.21
+
+    """
+    if not isscalar(m) or (abs(m) > n):
+        raise ValueError("m must be <= n.")
+    if not isscalar(n) or (n < 0):
+        raise ValueError("n must be a non-negative integer.")
+    if not isscalar(z):
+        raise ValueError("z must be scalar.")
+    if not (type == 2 or type == 3):
+        raise ValueError("type must be either 2 or 3.")
+    if (m < 0):
+        mp = -m
+        mf, nf = mgrid[0:mp+1, 0:n+1]
+        with _ufuncs.errstate(all='ignore'):
+            if type == 2:
+                fixarr = where(mf > nf, 0.0,
+                               (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
+            else:
+                fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
+    else:
+        mp = m
+    p, pd = _specfun.clpmn(mp, n, real(z), imag(z), type)
+    if (m < 0):
+        p = p * fixarr
+        pd = pd * fixarr
+    return p, pd
+
+
+def lqmn(m, n, z):
+    """Sequence of associated Legendre functions of the second kind.
+
+    Computes the associated Legendre function of the second kind of order m and
+    degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
+    Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
+    ``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
+
+    Parameters
+    ----------
+    m : int
+       ``|m| <= n``; the order of the Legendre function.
+    n : int
+       where ``n >= 0``; the degree of the Legendre function.  Often
+       called ``l`` (lower case L) in descriptions of the associated
+       Legendre function
+    z : complex
+        Input value.
+
+    Returns
+    -------
+    Qmn_z : (m+1, n+1) array
+       Values for all orders 0..m and degrees 0..n
+    Qmn_d_z : (m+1, n+1) array
+       Derivatives for all orders 0..m and degrees 0..n
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(m) or (m < 0):
+        raise ValueError("m must be a non-negative integer.")
+    if not isscalar(n) or (n < 0):
+        raise ValueError("n must be a non-negative integer.")
+    if not isscalar(z):
+        raise ValueError("z must be scalar.")
+    m = int(m)
+    n = int(n)
+
+    # Ensure neither m nor n == 0
+    mm = max(1, m)
+    nn = max(1, n)
+
+    if iscomplex(z):
+        q, qd = _specfun.clqmn(mm, nn, z)
+    else:
+        q, qd = _specfun.lqmn(mm, nn, z)
+    return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
+
+
+def bernoulli(n):
+    """Bernoulli numbers B0..Bn (inclusive).
+
+    Parameters
+    ----------
+    n : int
+        Indicated the number of terms in the Bernoulli series to generate.
+
+    Returns
+    -------
+    ndarray
+        The Bernoulli numbers ``[B(0), B(1), ..., B(n)]``.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+    .. [2] "Bernoulli number", Wikipedia, https://en.wikipedia.org/wiki/Bernoulli_number
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import bernoulli, zeta
+    >>> bernoulli(4)
+    array([ 1.        , -0.5       ,  0.16666667,  0.        , -0.03333333])
+
+    The Wikipedia article ([2]_) points out the relationship between the
+    Bernoulli numbers and the zeta function, ``B_n^+ = -n * zeta(1 - n)``
+    for ``n > 0``:
+
+    >>> n = np.arange(1, 5)
+    >>> -n * zeta(1 - n)
+    array([ 0.5       ,  0.16666667, -0.        , -0.03333333])
+
+    Note that, in the notation used in the wikipedia article,
+    `bernoulli` computes ``B_n^-`` (i.e. it used the convention that
+    ``B_1`` is -1/2).  The relation given above is for ``B_n^+``, so the
+    sign of 0.5 does not match the output of ``bernoulli(4)``.
+
+    """
+    if not isscalar(n) or (n < 0):
+        raise ValueError("n must be a non-negative integer.")
+    n = int(n)
+    if (n < 2):
+        n1 = 2
+    else:
+        n1 = n
+    return _specfun.bernob(int(n1))[:(n+1)]
+
+
+def euler(n):
+    """Euler numbers E(0), E(1), ..., E(n).
+
+    The Euler numbers [1]_ are also known as the secant numbers.
+
+    Because ``euler(n)`` returns floating point values, it does not give
+    exact values for large `n`.  The first inexact value is E(22).
+
+    Parameters
+    ----------
+    n : int
+        The highest index of the Euler number to be returned.
+
+    Returns
+    -------
+    ndarray
+        The Euler numbers [E(0), E(1), ..., E(n)].
+        The odd Euler numbers, which are all zero, are included.
+
+    References
+    ----------
+    .. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences,
+           https://oeis.org/A122045
+    .. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import euler
+    >>> euler(6)
+    array([  1.,   0.,  -1.,   0.,   5.,   0., -61.])
+
+    >>> euler(13).astype(np.int64)
+    array([      1,       0,      -1,       0,       5,       0,     -61,
+                 0,    1385,       0,  -50521,       0, 2702765,       0])
+
+    >>> euler(22)[-1]  # Exact value of E(22) is -69348874393137901.
+    -69348874393137976.0
+
+    """
+    if not isscalar(n) or (n < 0):
+        raise ValueError("n must be a non-negative integer.")
+    n = int(n)
+    if (n < 2):
+        n1 = 2
+    else:
+        n1 = n
+    return _specfun.eulerb(n1)[:(n+1)]
+
+
+def lpn(n, z):
+    """Legendre function of the first kind.
+
+    Compute sequence of Legendre functions of the first kind (polynomials),
+    Pn(z) and derivatives for all degrees from 0 to n (inclusive).
+
+    See also special.legendre for polynomial class.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(n) and isscalar(z)):
+        raise ValueError("arguments must be scalars.")
+    n = _nonneg_int_or_fail(n, 'n', strict=False)
+    if (n < 1):
+        n1 = 1
+    else:
+        n1 = n
+    if iscomplex(z):
+        pn, pd = _specfun.clpn(n1, z)
+    else:
+        pn, pd = _specfun.lpn(n1, z)
+    return pn[:(n+1)], pd[:(n+1)]
+
+
+def lqn(n, z):
+    """Legendre function of the second kind.
+
+    Compute sequence of Legendre functions of the second kind, Qn(z) and
+    derivatives for all degrees from 0 to n (inclusive).
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(n) and isscalar(z)):
+        raise ValueError("arguments must be scalars.")
+    n = _nonneg_int_or_fail(n, 'n', strict=False)
+    if (n < 1):
+        n1 = 1
+    else:
+        n1 = n
+    if iscomplex(z):
+        qn, qd = _specfun.clqn(n1, z)
+    else:
+        qn, qd = _specfun.lqnb(n1, z)
+    return qn[:(n+1)], qd[:(n+1)]
+
+
+def ai_zeros(nt):
+    """
+    Compute `nt` zeros and values of the Airy function Ai and its derivative.
+
+    Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
+    first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
+    the corresponding values Ai(a');
+    and the corresponding values Ai'(a).
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute
+
+    Returns
+    -------
+    a : ndarray
+        First `nt` zeros of Ai(x)
+    ap : ndarray
+        First `nt` zeros of Ai'(x)
+    ai : ndarray
+        Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
+    aip : ndarray
+        Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
+
+    Examples
+    --------
+    >>> from scipy import special
+    >>> a, ap, ai, aip = special.ai_zeros(3)
+    >>> a
+    array([-2.33810741, -4.08794944, -5.52055983])
+    >>> ap
+    array([-1.01879297, -3.24819758, -4.82009921])
+    >>> ai
+    array([ 0.53565666, -0.41901548,  0.38040647])
+    >>> aip
+    array([ 0.70121082, -0.80311137,  0.86520403])
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    kf = 1
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be a positive integer scalar.")
+    return _specfun.airyzo(nt, kf)
+
+
+def bi_zeros(nt):
+    """
+    Compute `nt` zeros and values of the Airy function Bi and its derivative.
+
+    Computes the first `nt` zeros, b, of the Airy function Bi(x);
+    first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
+    the corresponding values Bi(b');
+    and the corresponding values Bi'(b).
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute
+
+    Returns
+    -------
+    b : ndarray
+        First `nt` zeros of Bi(x)
+    bp : ndarray
+        First `nt` zeros of Bi'(x)
+    bi : ndarray
+        Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
+    bip : ndarray
+        Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
+
+    Examples
+    --------
+    >>> from scipy import special
+    >>> b, bp, bi, bip = special.bi_zeros(3)
+    >>> b
+    array([-1.17371322, -3.2710933 , -4.83073784])
+    >>> bp
+    array([-2.29443968, -4.07315509, -5.51239573])
+    >>> bi
+    array([-0.45494438,  0.39652284, -0.36796916])
+    >>> bip
+    array([ 0.60195789, -0.76031014,  0.83699101])
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    kf = 2
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be a positive integer scalar.")
+    return _specfun.airyzo(nt, kf)
+
+
+def lmbda(v, x):
+    r"""Jahnke-Emden Lambda function, Lambdav(x).
+
+    This function is defined as [2]_,
+
+    .. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
+
+    where :math:`\Gamma` is the gamma function and :math:`J_v` is the
+    Bessel function of the first kind.
+
+    Parameters
+    ----------
+    v : float
+        Order of the Lambda function
+    x : float
+        Value at which to evaluate the function and derivatives
+
+    Returns
+    -------
+    vl : ndarray
+        Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
+    dl : ndarray
+        Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+    .. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
+           Curves" (4th ed.), Dover, 1945
+    """
+    if not (isscalar(v) and isscalar(x)):
+        raise ValueError("arguments must be scalars.")
+    if (v < 0):
+        raise ValueError("argument must be > 0.")
+    n = int(v)
+    v0 = v - n
+    if (n < 1):
+        n1 = 1
+    else:
+        n1 = n
+    v1 = n1 + v0
+    if (v != floor(v)):
+        vm, vl, dl = _specfun.lamv(v1, x)
+    else:
+        vm, vl, dl = _specfun.lamn(v1, x)
+    return vl[:(n+1)], dl[:(n+1)]
+
+
+def pbdv_seq(v, x):
+    """Parabolic cylinder functions Dv(x) and derivatives.
+
+    Parameters
+    ----------
+    v : float
+        Order of the parabolic cylinder function
+    x : float
+        Value at which to evaluate the function and derivatives
+
+    Returns
+    -------
+    dv : ndarray
+        Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
+    dp : ndarray
+        Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 13.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(v) and isscalar(x)):
+        raise ValueError("arguments must be scalars.")
+    n = int(v)
+    v0 = v-n
+    if (n < 1):
+        n1 = 1
+    else:
+        n1 = n
+    v1 = n1 + v0
+    dv, dp, pdf, pdd = _specfun.pbdv(v1, x)
+    return dv[:n1+1], dp[:n1+1]
+
+
+def pbvv_seq(v, x):
+    """Parabolic cylinder functions Vv(x) and derivatives.
+
+    Parameters
+    ----------
+    v : float
+        Order of the parabolic cylinder function
+    x : float
+        Value at which to evaluate the function and derivatives
+
+    Returns
+    -------
+    dv : ndarray
+        Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
+    dp : ndarray
+        Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 13.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(v) and isscalar(x)):
+        raise ValueError("arguments must be scalars.")
+    n = int(v)
+    v0 = v-n
+    if (n <= 1):
+        n1 = 1
+    else:
+        n1 = n
+    v1 = n1 + v0
+    dv, dp, pdf, pdd = _specfun.pbvv(v1, x)
+    return dv[:n1+1], dp[:n1+1]
+
+
+def pbdn_seq(n, z):
+    """Parabolic cylinder functions Dn(z) and derivatives.
+
+    Parameters
+    ----------
+    n : int
+        Order of the parabolic cylinder function
+    z : complex
+        Value at which to evaluate the function and derivatives
+
+    Returns
+    -------
+    dv : ndarray
+        Values of D_i(z), for i=0, ..., i=n.
+    dp : ndarray
+        Derivatives D_i'(z), for i=0, ..., i=n.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996, chapter 13.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(n) and isscalar(z)):
+        raise ValueError("arguments must be scalars.")
+    if (floor(n) != n):
+        raise ValueError("n must be an integer.")
+    if (abs(n) <= 1):
+        n1 = 1
+    else:
+        n1 = n
+    cpb, cpd = _specfun.cpbdn(n1, z)
+    return cpb[:n1+1], cpd[:n1+1]
+
+
+def ber_zeros(nt):
+    """Compute nt zeros of the Kelvin function ber.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Kelvin function.
+
+    See Also
+    --------
+    ber
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 1)
+
+
+def bei_zeros(nt):
+    """Compute nt zeros of the Kelvin function bei.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Kelvin function.
+
+    See Also
+    --------
+    bei
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 2)
+
+
+def ker_zeros(nt):
+    """Compute nt zeros of the Kelvin function ker.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Kelvin function.
+
+    See Also
+    --------
+    ker
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 3)
+
+
+def kei_zeros(nt):
+    """Compute nt zeros of the Kelvin function kei.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the Kelvin function.
+
+    See Also
+    --------
+    kei
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 4)
+
+
+def berp_zeros(nt):
+    """Compute nt zeros of the derivative of the Kelvin function ber.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the derivative of the Kelvin function.
+
+    See Also
+    --------
+    ber, berp
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 5)
+
+
+def beip_zeros(nt):
+    """Compute nt zeros of the derivative of the Kelvin function bei.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the derivative of the Kelvin function.
+
+    See Also
+    --------
+    bei, beip
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 6)
+
+
+def kerp_zeros(nt):
+    """Compute nt zeros of the derivative of the Kelvin function ker.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the derivative of the Kelvin function.
+
+    See Also
+    --------
+    ker, kerp
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 7)
+
+
+def keip_zeros(nt):
+    """Compute nt zeros of the derivative of the Kelvin function kei.
+
+    Parameters
+    ----------
+    nt : int
+        Number of zeros to compute. Must be positive.
+
+    Returns
+    -------
+    ndarray
+        First `nt` zeros of the derivative of the Kelvin function.
+
+    See Also
+    --------
+    kei, keip
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return _specfun.klvnzo(nt, 8)
+
+
+def kelvin_zeros(nt):
+    """Compute nt zeros of all Kelvin functions.
+
+    Returned in a length-8 tuple of arrays of length nt.  The tuple contains
+    the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
+        raise ValueError("nt must be positive integer scalar.")
+    return (_specfun.klvnzo(nt, 1),
+            _specfun.klvnzo(nt, 2),
+            _specfun.klvnzo(nt, 3),
+            _specfun.klvnzo(nt, 4),
+            _specfun.klvnzo(nt, 5),
+            _specfun.klvnzo(nt, 6),
+            _specfun.klvnzo(nt, 7),
+            _specfun.klvnzo(nt, 8))
+
+
+def pro_cv_seq(m, n, c):
+    """Characteristic values for prolate spheroidal wave functions.
+
+    Compute a sequence of characteristic values for the prolate
+    spheroidal wave functions for mode m and n'=m..n and spheroidal
+    parameter c.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(m) and isscalar(n) and isscalar(c)):
+        raise ValueError("Arguments must be scalars.")
+    if (n != floor(n)) or (m != floor(m)):
+        raise ValueError("Modes must be integers.")
+    if (n-m > 199):
+        raise ValueError("Difference between n and m is too large.")
+    maxL = n-m+1
+    return _specfun.segv(m, n, c, 1)[1][:maxL]
+
+
+def obl_cv_seq(m, n, c):
+    """Characteristic values for oblate spheroidal wave functions.
+
+    Compute a sequence of characteristic values for the oblate
+    spheroidal wave functions for mode m and n'=m..n and spheroidal
+    parameter c.
+
+    References
+    ----------
+    .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
+           Functions", John Wiley and Sons, 1996.
+           https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
+
+    """
+    if not (isscalar(m) and isscalar(n) and isscalar(c)):
+        raise ValueError("Arguments must be scalars.")
+    if (n != floor(n)) or (m != floor(m)):
+        raise ValueError("Modes must be integers.")
+    if (n-m > 199):
+        raise ValueError("Difference between n and m is too large.")
+    maxL = n-m+1
+    return _specfun.segv(m, n, c, -1)[1][:maxL]
+
+
+def comb(N, k, exact=False, repetition=False, legacy=True):
+    """The number of combinations of N things taken k at a time.
+
+    This is often expressed as "N choose k".
+
+    Parameters
+    ----------
+    N : int, ndarray
+        Number of things.
+    k : int, ndarray
+        Number of elements taken.
+    exact : bool, optional
+        For integers, if `exact` is False, then floating point precision is
+        used, otherwise the result is computed exactly. For non-integers, if
+        `exact` is True, the inputs are currently cast to integers, though
+        this behavior is deprecated (see below).
+    repetition : bool, optional
+        If `repetition` is True, then the number of combinations with
+        repetition is computed.
+    legacy : bool, optional
+        If `legacy` is True and `exact` is True, then non-integral arguments
+        are cast to ints; if `legacy` is False, the result for non-integral
+        arguments is unaffected by the value of `exact`.
+
+        .. deprecated:: 1.9.0
+            Non-integer arguments are currently being cast to integers when
+            `exact=True`. This behaviour is deprecated and the default will
+            change to avoid the cast in SciPy 1.11.0. To opt into the future
+            behavior set `legacy=False`. If you want to keep the
+            argument-casting but silence this warning, cast your inputs
+            directly, e.g. ``comb(int(your_N), int(your_k), exact=True)``.
+
+    Returns
+    -------
+    val : int, float, ndarray
+        The total number of combinations.
+
+    See Also
+    --------
+    binom : Binomial coefficient considered as a function of two real
+            variables.
+
+    Notes
+    -----
+    - Array arguments accepted only for exact=False case.
+    - If N < 0, or k < 0, then 0 is returned.
+    - If k > N and repetition=False, then 0 is returned.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import comb
+    >>> k = np.array([3, 4])
+    >>> n = np.array([10, 10])
+    >>> comb(n, k, exact=False)
+    array([ 120.,  210.])
+    >>> comb(10, 3, exact=True)
+    120
+    >>> comb(10, 3, exact=True, repetition=True)
+    220
+
+    """
+    if repetition:
+        return comb(N + k - 1, k, exact, legacy=legacy)
+    if exact:
+        if int(N) != N or int(k) != k:
+            if legacy:
+                warnings.warn(
+                    "Non-integer arguments are currently being cast to "
+                    "integers when exact=True. This behaviour is "
+                    "deprecated and the default will change to avoid the cast "
+                    "in SciPy 1.11.0. To opt into the future behavior set "
+                    "legacy=False. If you want to keep the argument-casting "
+                    "but silence this warning, cast your inputs directly, "
+                    "e.g. comb(int(your_N), int(your_k), exact=True).",
+                    DeprecationWarning, stacklevel=2
+                )
+            else:
+                return comb(N, k)
+        # _comb_int casts inputs to integers
+        return _comb_int(N, k)
+    else:
+        k, N = asarray(k), asarray(N)
+        cond = (k <= N) & (N >= 0) & (k >= 0)
+        vals = binom(N, k)
+        if isinstance(vals, np.ndarray):
+            vals[~cond] = 0
+        elif not cond:
+            vals = np.float64(0)
+        return vals
+
+
+def perm(N, k, exact=False):
+    """Permutations of N things taken k at a time, i.e., k-permutations of N.
+
+    It's also known as "partial permutations".
+
+    Parameters
+    ----------
+    N : int, ndarray
+        Number of things.
+    k : int, ndarray
+        Number of elements taken.
+    exact : bool, optional
+        If `exact` is False, then floating point precision is used, otherwise
+        exact long integer is computed.
+
+    Returns
+    -------
+    val : int, ndarray
+        The number of k-permutations of N.
+
+    Notes
+    -----
+    - Array arguments accepted only for exact=False case.
+    - If k > N, N < 0, or k < 0, then a 0 is returned.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import perm
+    >>> k = np.array([3, 4])
+    >>> n = np.array([10, 10])
+    >>> perm(n, k)
+    array([  720.,  5040.])
+    >>> perm(10, 3, exact=True)
+    720
+
+    """
+    if exact:
+        if (k > N) or (N < 0) or (k < 0):
+            return 0
+        val = 1
+        for i in range(N - k + 1, N + 1):
+            val *= i
+        return val
+    else:
+        k, N = asarray(k), asarray(N)
+        cond = (k <= N) & (N >= 0) & (k >= 0)
+        vals = poch(N - k + 1, k)
+        if isinstance(vals, np.ndarray):
+            vals[~cond] = 0
+        elif not cond:
+            vals = np.float64(0)
+        return vals
+
+
+# https://stackoverflow.com/a/16327037
+def _range_prod(lo, hi):
+    """
+    Product of a range of numbers.
+
+    Returns the product of
+    lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
+    = hi! / (lo-1)!
+
+    Breaks into smaller products first for speed:
+    _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
+    """
+    if lo + 1 < hi:
+        mid = (hi + lo) // 2
+        return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
+    if lo == hi:
+        return lo
+    return lo * hi
+
+
+def factorial(n, exact=False):
+    """
+    The factorial of a number or array of numbers.
+
+    The factorial of non-negative integer `n` is the product of all
+    positive integers less than or equal to `n`::
+
+        n! = n * (n - 1) * (n - 2) * ... * 1
+
+    Parameters
+    ----------
+    n : int or array_like of ints
+        Input values.  If ``n < 0``, the return value is 0.
+    exact : bool, optional
+        If True, calculate the answer exactly using long integer arithmetic.
+        If False, result is approximated in floating point rapidly using the
+        `gamma` function.
+        Default is False.
+
+    Returns
+    -------
+    nf : float or int or ndarray
+        Factorial of `n`, as integer or float depending on `exact`.
+
+    Notes
+    -----
+    For arrays with ``exact=True``, the factorial is computed only once, for
+    the largest input, with each other result computed in the process.
+    The output dtype is increased to ``int64`` or ``object`` if necessary.
+
+    With ``exact=False`` the factorial is approximated using the gamma
+    function:
+
+    .. math:: n! = \\Gamma(n+1)
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import factorial
+    >>> arr = np.array([3, 4, 5])
+    >>> factorial(arr, exact=False)
+    array([   6.,   24.,  120.])
+    >>> factorial(arr, exact=True)
+    array([  6,  24, 120])
+    >>> factorial(5, exact=True)
+    120
+
+    """
+    if exact:
+        if np.ndim(n) == 0:
+            if np.isnan(n):
+                return n
+            return 0 if n < 0 else math.factorial(n)
+        else:
+            n = asarray(n)
+            un = np.unique(n).astype(object)
+
+            # Convert to object array of long ints if np.int_ can't handle size
+            if np.isnan(n).any():
+                dt = float
+            elif un[-1] > 20:
+                dt = object
+            elif un[-1] > 12:
+                dt = np.int64
+            else:
+                dt = np.int_
+
+            out = np.empty_like(n, dtype=dt)
+
+            # Handle invalid/trivial values
+            # Ignore runtime warning when less operator used w/np.nan
+            with np.errstate(all='ignore'):
+                un = un[un > 1]
+                out[n < 2] = 1
+                out[n < 0] = 0
+
+            # Calculate products of each range of numbers
+            if un.size:
+                val = math.factorial(un[0])
+                out[n == un[0]] = val
+                for i in range(len(un) - 1):
+                    prev = un[i] + 1
+                    current = un[i + 1]
+                    val *= _range_prod(prev, current)
+                    out[n == current] = val
+
+            if np.isnan(n).any():
+                out = out.astype(np.float64)
+                out[np.isnan(n)] = n[np.isnan(n)]
+            return out
+    else:
+        out = _ufuncs._factorial(n)
+        return out
+
+
+def factorial2(n, exact=False):
+    """Double factorial.
+
+    This is the factorial with every second value skipped.  E.g., ``7!! = 7 * 5
+    * 3 * 1``.  It can be approximated numerically as::
+
+      n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi)  n odd
+          = 2**(n/2) * (n/2)!                           n even
+
+    Parameters
+    ----------
+    n : int or array_like
+        Calculate ``n!!``.  Arrays are only supported with `exact` set
+        to False. If ``n < -1``, the return value is 0.
+        Otherwise if ``n <= 0``, the return value is 1.
+    exact : bool, optional
+        The result can be approximated rapidly using the gamma-formula
+        above (default).  If `exact` is set to True, calculate the
+        answer exactly using integer arithmetic.
+
+    Returns
+    -------
+    nff : float or int
+        Double factorial of `n`, as an int or a float depending on
+        `exact`.
+
+    Examples
+    --------
+    >>> from scipy.special import factorial2
+    >>> factorial2(7, exact=False)
+    array(105.00000000000001)
+    >>> factorial2(7, exact=True)
+    105
+
+    """
+    if exact:
+        if n < -1:
+            return 0
+        if n <= 0:
+            return 1
+        val = 1
+        for k in range(n, 0, -2):
+            val *= k
+        return val
+    else:
+        n = asarray(n)
+        vals = zeros(n.shape, 'd')
+        cond1 = (n % 2) & (n >= -1)
+        cond2 = (1-(n % 2)) & (n >= -1)
+        oddn = extract(cond1, n)
+        evenn = extract(cond2, n)
+        nd2o = oddn / 2.0
+        nd2e = evenn / 2.0
+        place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
+        place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
+        return vals
+
+
+def factorialk(n, k, exact=True):
+    """Multifactorial of n of order k, n(!!...!).
+
+    This is the multifactorial of n skipping k values.  For example,
+
+      factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
+
+    In particular, for any integer ``n``, we have
+
+      factorialk(n, 1) = factorial(n)
+
+      factorialk(n, 2) = factorial2(n)
+
+    Parameters
+    ----------
+    n : int
+        Calculate multifactorial. If ``n < 1 - k``, the return value is 0.
+        Otherwise if ``n <= 0``, the return value is 1.
+    k : int
+        Order of multifactorial.
+    exact : bool, optional
+        If exact is set to True, calculate the answer exactly using
+        integer arithmetic.
+
+    Returns
+    -------
+    val : int
+        Multifactorial of `n`.
+
+    Raises
+    ------
+    NotImplementedError
+        Raises when exact is False
+
+    Examples
+    --------
+    >>> from scipy.special import factorialk
+    >>> factorialk(5, 1, exact=True)
+    120
+    >>> factorialk(5, 3, exact=True)
+    10
+
+    """
+    if exact:
+        if n < 1-k:
+            return 0
+        if n <= 0:
+            return 1
+        val = 1
+        for j in range(n, 0, -k):
+            val = val*j
+        return val
+    else:
+        raise NotImplementedError
+
+
+def zeta(x, q=None, out=None):
+    r"""
+    Riemann or Hurwitz zeta function.
+
+    Parameters
+    ----------
+    x : array_like of float
+        Input data, must be real
+    q : array_like of float, optional
+        Input data, must be real.  Defaults to Riemann zeta.
+    out : ndarray, optional
+        Output array for the computed values.
+
+    Returns
+    -------
+    out : array_like
+        Values of zeta(x).
+
+    Notes
+    -----
+    The two-argument version is the Hurwitz zeta function
+
+    .. math::
+
+        \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x};
+
+    see [dlmf]_ for details. The Riemann zeta function corresponds to
+    the case when ``q = 1``.
+
+    See Also
+    --------
+    zetac
+
+    References
+    ----------
+    .. [dlmf] NIST, Digital Library of Mathematical Functions,
+        https://dlmf.nist.gov/25.11#i
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import zeta, polygamma, factorial
+
+    Some specific values:
+
+    >>> zeta(2), np.pi**2/6
+    (1.6449340668482266, 1.6449340668482264)
+
+    >>> zeta(4), np.pi**4/90
+    (1.0823232337111381, 1.082323233711138)
+
+    Relation to the `polygamma` function:
+
+    >>> m = 3
+    >>> x = 1.25
+    >>> polygamma(m, x)
+    array(2.782144009188397)
+    >>> (-1)**(m+1) * factorial(m) * zeta(m+1, x)
+    2.7821440091883969
+
+    """
+    if q is None:
+        return _ufuncs._riemann_zeta(x, out)
+    else:
+        return _ufuncs._zeta(x, q, out)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_ellip_harm.py b/__packaged__/coreml/.python_dependencies/scipy/special/_ellip_harm.py
new file mode 100644
index 00000000..baaac4c7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_ellip_harm.py
@@ -0,0 +1,208 @@
+import numpy as np
+
+from ._ufuncs import _ellip_harm
+from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm
+
+
+def ellip_harm(h2, k2, n, p, s, signm=1, signn=1):
+    r"""
+    Ellipsoidal harmonic functions E^p_n(l)
+
+    These are also known as Lame functions of the first kind, and are
+    solutions to the Lame equation:
+
+    .. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0
+
+    where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not
+    returned) corresponding to the solutions.
+
+    Parameters
+    ----------
+    h2 : float
+        ``h**2``
+    k2 : float
+        ``k**2``; should be larger than ``h**2``
+    n : int
+        Degree
+    s : float
+        Coordinate
+    p : int
+        Order, can range between [1,2n+1]
+    signm : {1, -1}, optional
+        Sign of prefactor of functions. Can be +/-1. See Notes.
+    signn : {1, -1}, optional
+        Sign of prefactor of functions. Can be +/-1. See Notes.
+
+    Returns
+    -------
+    E : float
+        the harmonic :math:`E^p_n(s)`
+
+    See Also
+    --------
+    ellip_harm_2, ellip_normal
+
+    Notes
+    -----
+    The geometric interpretation of the ellipsoidal functions is
+    explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the
+    sign of prefactors for functions according to their type::
+
+        K : +1
+        L : signm
+        M : signn
+        N : signm*signn
+
+    .. versionadded:: 0.15.0
+
+    References
+    ----------
+    .. [1] Digital Library of Mathematical Functions 29.12
+       https://dlmf.nist.gov/29.12
+    .. [2] Bardhan and Knepley, "Computational science and
+       re-discovery: open-source implementations of
+       ellipsoidal harmonics for problems in potential theory",
+       Comput. Sci. Disc. 5, 014006 (2012)
+       :doi:`10.1088/1749-4699/5/1/014006`.
+    .. [3] David J.and Dechambre P, "Computation of Ellipsoidal
+       Gravity Field Harmonics for small solar system bodies"
+       pp. 30-36, 2000
+    .. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications"
+       pp. 418, 2012
+
+    Examples
+    --------
+    >>> from scipy.special import ellip_harm
+    >>> w = ellip_harm(5,8,1,1,2.5)
+    >>> w
+    2.5
+
+    Check that the functions indeed are solutions to the Lame equation:
+
+    >>> import numpy as np
+    >>> from scipy.interpolate import UnivariateSpline
+    >>> def eigenvalue(f, df, ddf):
+    ...     r = ((s**2 - h**2)*(s**2 - k**2)*ddf + s*(2*s**2 - h**2 - k**2)*df - n*(n+1)*s**2*f)/f
+    ...     return -r.mean(), r.std()
+    >>> s = np.linspace(0.1, 10, 200)
+    >>> k, h, n, p = 8.0, 2.2, 3, 2
+    >>> E = ellip_harm(h**2, k**2, n, p, s)
+    >>> E_spl = UnivariateSpline(s, E)
+    >>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2))
+    >>> a, a_err
+    (583.44366156701483, 6.4580890640310646e-11)
+
+    """
+    return _ellip_harm(h2, k2, n, p, s, signm, signn)
+
+
+_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d')
+
+
+def ellip_harm_2(h2, k2, n, p, s):
+    r"""
+    Ellipsoidal harmonic functions F^p_n(l)
+
+    These are also known as Lame functions of the second kind, and are
+    solutions to the Lame equation:
+
+    .. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0
+
+    where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not
+    returned) corresponding to the solutions.
+
+    Parameters
+    ----------
+    h2 : float
+        ``h**2``
+    k2 : float
+        ``k**2``; should be larger than ``h**2``
+    n : int
+        Degree.
+    p : int
+        Order, can range between [1,2n+1].
+    s : float
+        Coordinate
+
+    Returns
+    -------
+    F : float
+        The harmonic :math:`F^p_n(s)`
+
+    Notes
+    -----
+    Lame functions of the second kind are related to the functions of the first kind:
+
+    .. math::
+
+       F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s}\frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}}
+
+    .. versionadded:: 0.15.0
+
+    See Also
+    --------
+    ellip_harm, ellip_normal
+
+    Examples
+    --------
+    >>> from scipy.special import ellip_harm_2
+    >>> w = ellip_harm_2(5,8,2,1,10)
+    >>> w
+    0.00108056853382
+
+    """
+    with np.errstate(all='ignore'):
+        return _ellip_harm_2_vec(h2, k2, n, p, s)
+
+
+def _ellip_normal_vec(h2, k2, n, p):
+    return _ellipsoid_norm(h2, k2, n, p)
+
+
+_ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d')
+
+
+def ellip_normal(h2, k2, n, p):
+    r"""
+    Ellipsoidal harmonic normalization constants gamma^p_n
+
+    The normalization constant is defined as
+
+    .. math::
+
+       \gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy\frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)}
+
+    Parameters
+    ----------
+    h2 : float
+        ``h**2``
+    k2 : float
+        ``k**2``; should be larger than ``h**2``
+    n : int
+        Degree.
+    p : int
+        Order, can range between [1,2n+1].
+
+    Returns
+    -------
+    gamma : float
+        The normalization constant :math:`\gamma^p_n`
+
+    See Also
+    --------
+    ellip_harm, ellip_harm_2
+
+    Notes
+    -----
+    .. versionadded:: 0.15.0
+
+    Examples
+    --------
+    >>> from scipy.special import ellip_normal
+    >>> w = ellip_normal(5,8,3,7)
+    >>> w
+    1723.38796997
+
+    """
+    with np.errstate(all='ignore'):
+        return _ellip_normal_vec(h2, k2, n, p)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_lambertw.py b/__packaged__/coreml/.python_dependencies/scipy/special/_lambertw.py
new file mode 100644
index 00000000..f82a462c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_lambertw.py
@@ -0,0 +1,106 @@
+from ._ufuncs import _lambertw
+
+
+def lambertw(z, k=0, tol=1e-8):
+    r"""
+    lambertw(z, k=0, tol=1e-8)
+
+    Lambert W function.
+
+    The Lambert W function `W(z)` is defined as the inverse function
+    of ``w * exp(w)``. In other words, the value of ``W(z)`` is
+    such that ``z = W(z) * exp(W(z))`` for any complex number
+    ``z``.
+
+    The Lambert W function is a multivalued function with infinitely
+    many branches. Each branch gives a separate solution of the
+    equation ``z = w exp(w)``. Here, the branches are indexed by the
+    integer `k`.
+
+    Parameters
+    ----------
+    z : array_like
+        Input argument.
+    k : int, optional
+        Branch index.
+    tol : float, optional
+        Evaluation tolerance.
+
+    Returns
+    -------
+    w : array
+        `w` will have the same shape as `z`.
+
+    Notes
+    -----
+    All branches are supported by `lambertw`:
+
+    * ``lambertw(z)`` gives the principal solution (branch 0)
+    * ``lambertw(z, k)`` gives the solution on branch `k`
+
+    The Lambert W function has two partially real branches: the
+    principal branch (`k = 0`) is real for real ``z > -1/e``, and the
+    ``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
+    ``k = 0`` have a logarithmic singularity at ``z = 0``.
+
+    **Possible issues**
+
+    The evaluation can become inaccurate very close to the branch point
+    at ``-1/e``. In some corner cases, `lambertw` might currently
+    fail to converge, or can end up on the wrong branch.
+
+    **Algorithm**
+
+    Halley's iteration is used to invert ``w * exp(w)``, using a first-order
+    asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
+
+    The definition, implementation and choice of branches is based on [2]_.
+
+    See Also
+    --------
+    wrightomega : the Wright Omega function
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Lambert_W_function
+    .. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
+       (1996) 329-359.
+       https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf
+
+    Examples
+    --------
+    The Lambert W function is the inverse of ``w exp(w)``:
+
+    >>> import numpy as np
+    >>> from scipy.special import lambertw
+    >>> w = lambertw(1)
+    >>> w
+    (0.56714329040978384+0j)
+    >>> w * np.exp(w)
+    (1.0+0j)
+
+    Any branch gives a valid inverse:
+
+    >>> w = lambertw(1, k=3)
+    >>> w
+    (-2.8535817554090377+17.113535539412148j)
+    >>> w*np.exp(w)
+    (1.0000000000000002+1.609823385706477e-15j)
+
+    **Applications to equation-solving**
+
+    The Lambert W function may be used to solve various kinds of
+    equations, such as finding the value of the infinite power
+    tower :math:`z^{z^{z^{\ldots}}}`:
+
+    >>> def tower(z, n):
+    ...     if n == 0:
+    ...         return z
+    ...     return z ** tower(z, n-1)
+    ...
+    >>> tower(0.5, 100)
+    0.641185744504986
+    >>> -lambertw(-np.log(0.5)) / np.log(0.5)
+    (0.64118574450498589+0j)
+    """
+    return _lambertw(z, k, tol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_logsumexp.py b/__packaged__/coreml/.python_dependencies/scipy/special/_logsumexp.py
new file mode 100644
index 00000000..7e8e1108
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_logsumexp.py
@@ -0,0 +1,298 @@
+import numpy as np
+from scipy._lib._util import _asarray_validated
+
+__all__ = ["logsumexp", "softmax", "log_softmax"]
+
+
+def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
+    """Compute the log of the sum of exponentials of input elements.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : None or int or tuple of ints, optional
+        Axis or axes over which the sum is taken. By default `axis` is None,
+        and all elements are summed.
+
+        .. versionadded:: 0.11.0
+    b : array-like, optional
+        Scaling factor for exp(`a`) must be of the same shape as `a` or
+        broadcastable to `a`. These values may be negative in order to
+        implement subtraction.
+
+        .. versionadded:: 0.12.0
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left in the
+        result as dimensions with size one. With this option, the result
+        will broadcast correctly against the original array.
+
+        .. versionadded:: 0.15.0
+    return_sign : bool, optional
+        If this is set to True, the result will be a pair containing sign
+        information; if False, results that are negative will be returned
+        as NaN. Default is False (no sign information).
+
+        .. versionadded:: 0.16.0
+
+    Returns
+    -------
+    res : ndarray
+        The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
+        more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
+        is returned.
+    sgn : ndarray
+        If return_sign is True, this will be an array of floating-point
+        numbers matching res and +1, 0, or -1 depending on the sign
+        of the result. If False, only one result is returned.
+
+    See Also
+    --------
+    numpy.logaddexp, numpy.logaddexp2
+
+    Notes
+    -----
+    NumPy has a logaddexp function which is very similar to `logsumexp`, but
+    only handles two arguments. `logaddexp.reduce` is similar to this
+    function, but may be less stable.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import logsumexp
+    >>> a = np.arange(10)
+    >>> logsumexp(a)
+    9.4586297444267107
+    >>> np.log(np.sum(np.exp(a)))
+    9.4586297444267107
+
+    With weights
+
+    >>> a = np.arange(10)
+    >>> b = np.arange(10, 0, -1)
+    >>> logsumexp(a, b=b)
+    9.9170178533034665
+    >>> np.log(np.sum(b*np.exp(a)))
+    9.9170178533034647
+
+    Returning a sign flag
+
+    >>> logsumexp([1,2],b=[1,-1],return_sign=True)
+    (1.5413248546129181, -1.0)
+
+    Notice that `logsumexp` does not directly support masked arrays. To use it
+    on a masked array, convert the mask into zero weights:
+
+    >>> a = np.ma.array([np.log(2), 2, np.log(3)],
+    ...                  mask=[False, True, False])
+    >>> b = (~a.mask).astype(int)
+    >>> logsumexp(a.data, b=b), np.log(5)
+    1.6094379124341005, 1.6094379124341005
+
+    """
+    a = _asarray_validated(a, check_finite=False)
+    if b is not None:
+        a, b = np.broadcast_arrays(a, b)
+        if np.any(b == 0):
+            a = a + 0.  # promote to at least float
+            a[b == 0] = -np.inf
+
+    a_max = np.amax(a, axis=axis, keepdims=True)
+
+    if a_max.ndim > 0:
+        a_max[~np.isfinite(a_max)] = 0
+    elif not np.isfinite(a_max):
+        a_max = 0
+
+    if b is not None:
+        b = np.asarray(b)
+        tmp = b * np.exp(a - a_max)
+    else:
+        tmp = np.exp(a - a_max)
+
+    # suppress warnings about log of zero
+    with np.errstate(divide='ignore'):
+        s = np.sum(tmp, axis=axis, keepdims=keepdims)
+        if return_sign:
+            sgn = np.sign(s)
+            s *= sgn  # /= makes more sense but we need zero -> zero
+        out = np.log(s)
+
+    if not keepdims:
+        a_max = np.squeeze(a_max, axis=axis)
+    out += a_max
+
+    if return_sign:
+        return out, sgn
+    else:
+        return out
+
+
+def softmax(x, axis=None):
+    r"""Compute the softmax function.
+
+    The softmax function transforms each element of a collection by
+    computing the exponential of each element divided by the sum of the
+    exponentials of all the elements. That is, if `x` is a one-dimensional
+    numpy array::
+
+        softmax(x) = np.exp(x)/sum(np.exp(x))
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    axis : int or tuple of ints, optional
+        Axis to compute values along. Default is None and softmax will be
+        computed over the entire array `x`.
+
+    Returns
+    -------
+    s : ndarray
+        An array the same shape as `x`. The result will sum to 1 along the
+        specified axis.
+
+    Notes
+    -----
+    The formula for the softmax function :math:`\sigma(x)` for a vector
+    :math:`x = \{x_0, x_1, ..., x_{n-1}\}` is
+
+    .. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}}
+
+    The `softmax` function is the gradient of `logsumexp`.
+
+    The implementation uses shifting to avoid overflow. See [1]_ for more
+    details.
+
+    .. versionadded:: 1.2.0
+
+    References
+    ----------
+    .. [1] P. Blanchard, D.J. Higham, N.J. Higham, "Accurately computing the
+       log-sum-exp and softmax functions", IMA Journal of Numerical Analysis,
+       Vol.41(4), :doi:`10.1093/imanum/draa038`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import softmax
+    >>> np.set_printoptions(precision=5)
+
+    >>> x = np.array([[1, 0.5, 0.2, 3],
+    ...               [1,  -1,   7, 3],
+    ...               [2,  12,  13, 3]])
+    ...
+
+    Compute the softmax transformation over the entire array.
+
+    >>> m = softmax(x)
+    >>> m
+    array([[  4.48309e-06,   2.71913e-06,   2.01438e-06,   3.31258e-05],
+           [  4.48309e-06,   6.06720e-07,   1.80861e-03,   3.31258e-05],
+           [  1.21863e-05,   2.68421e-01,   7.29644e-01,   3.31258e-05]])
+
+    >>> m.sum()
+    1.0
+
+    Compute the softmax transformation along the first axis (i.e., the
+    columns).
+
+    >>> m = softmax(x, axis=0)
+
+    >>> m
+    array([[  2.11942e-01,   1.01300e-05,   2.75394e-06,   3.33333e-01],
+           [  2.11942e-01,   2.26030e-06,   2.47262e-03,   3.33333e-01],
+           [  5.76117e-01,   9.99988e-01,   9.97525e-01,   3.33333e-01]])
+
+    >>> m.sum(axis=0)
+    array([ 1.,  1.,  1.,  1.])
+
+    Compute the softmax transformation along the second axis (i.e., the rows).
+
+    >>> m = softmax(x, axis=1)
+    >>> m
+    array([[  1.05877e-01,   6.42177e-02,   4.75736e-02,   7.82332e-01],
+           [  2.42746e-03,   3.28521e-04,   9.79307e-01,   1.79366e-02],
+           [  1.22094e-05,   2.68929e-01,   7.31025e-01,   3.31885e-05]])
+
+    >>> m.sum(axis=1)
+    array([ 1.,  1.,  1.])
+
+    """
+    x = _asarray_validated(x, check_finite=False)
+    x_max = np.amax(x, axis=axis, keepdims=True)
+    exp_x_shifted = np.exp(x - x_max)
+    return exp_x_shifted / np.sum(exp_x_shifted, axis=axis, keepdims=True)
+
+
+def log_softmax(x, axis=None):
+    r"""Compute the logarithm of the softmax function.
+
+    In principle::
+
+        log_softmax(x) = log(softmax(x))
+
+    but using a more accurate implementation.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    axis : int or tuple of ints, optional
+        Axis to compute values along. Default is None and softmax will be
+        computed over the entire array `x`.
+
+    Returns
+    -------
+    s : ndarray or scalar
+        An array with the same shape as `x`. Exponential of the result will
+        sum to 1 along the specified axis. If `x` is a scalar, a scalar is
+        returned.
+
+    Notes
+    -----
+    `log_softmax` is more accurate than ``np.log(softmax(x))`` with inputs that
+    make `softmax` saturate (see examples below).
+
+    .. versionadded:: 1.5.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import log_softmax
+    >>> from scipy.special import softmax
+    >>> np.set_printoptions(precision=5)
+
+    >>> x = np.array([1000.0, 1.0])
+
+    >>> y = log_softmax(x)
+    >>> y
+    array([   0., -999.])
+
+    >>> with np.errstate(divide='ignore'):
+    ...   y = np.log(softmax(x))
+    ...
+    >>> y
+    array([  0., -inf])
+
+    """
+
+    x = _asarray_validated(x, check_finite=False)
+
+    x_max = np.amax(x, axis=axis, keepdims=True)
+
+    if x_max.ndim > 0:
+        x_max[~np.isfinite(x_max)] = 0
+    elif not np.isfinite(x_max):
+        x_max = 0
+
+    tmp = x - x_max
+    exp_tmp = np.exp(tmp)
+
+    # suppress warnings about log of zero
+    with np.errstate(divide='ignore'):
+        s = np.sum(exp_tmp, axis=axis, keepdims=True)
+        out = np.log(s)
+
+    out = tmp - out
+    return out
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_mptestutils.py b/__packaged__/coreml/.python_dependencies/scipy/special/_mptestutils.py
new file mode 100644
index 00000000..c7d287ea
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_mptestutils.py
@@ -0,0 +1,447 @@
+import os
+import sys
+import time
+from itertools import zip_longest
+
+import numpy as np
+from numpy.testing import assert_
+import pytest
+
+from scipy.special._testutils import assert_func_equal
+
+try:
+    import mpmath
+except ImportError:
+    pass
+
+
+# ------------------------------------------------------------------------------
+# Machinery for systematic tests with mpmath
+# ------------------------------------------------------------------------------
+
+class Arg:
+    """Generate a set of numbers on the real axis, concentrating on
+    'interesting' regions and covering all orders of magnitude.
+
+    """
+
+    def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True):
+        if a > b:
+            raise ValueError("a should be less than or equal to b")
+        if a == -np.inf:
+            a = -0.5*np.finfo(float).max
+        if b == np.inf:
+            b = 0.5*np.finfo(float).max
+        self.a, self.b = a, b
+
+        self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b
+
+    def _positive_values(self, a, b, n):
+        if a < 0:
+            raise ValueError("a should be positive")
+
+        # Try to put half of the points into a linspace between a and
+        # 10 the other half in a logspace.
+        if n % 2 == 0:
+            nlogpts = n//2
+            nlinpts = nlogpts
+        else:
+            nlogpts = n//2
+            nlinpts = nlogpts + 1
+
+        if a >= 10:
+            # Outside of linspace range; just return a logspace.
+            pts = np.logspace(np.log10(a), np.log10(b), n)
+        elif a > 0 and b < 10:
+            # Outside of logspace range; just return a linspace
+            pts = np.linspace(a, b, n)
+        elif a > 0:
+            # Linspace between a and 10 and a logspace between 10 and
+            # b.
+            linpts = np.linspace(a, 10, nlinpts, endpoint=False)
+            logpts = np.logspace(1, np.log10(b), nlogpts)
+            pts = np.hstack((linpts, logpts))
+        elif a == 0 and b <= 10:
+            # Linspace between 0 and b and a logspace between 0 and
+            # the smallest positive point of the linspace
+            linpts = np.linspace(0, b, nlinpts)
+            if linpts.size > 1:
+                right = np.log10(linpts[1])
+            else:
+                right = -30
+            logpts = np.logspace(-30, right, nlogpts, endpoint=False)
+            pts = np.hstack((logpts, linpts))
+        else:
+            # Linspace between 0 and 10, logspace between 0 and the
+            # smallest positive point of the linspace, and a logspace
+            # between 10 and b.
+            if nlogpts % 2 == 0:
+                nlogpts1 = nlogpts//2
+                nlogpts2 = nlogpts1
+            else:
+                nlogpts1 = nlogpts//2
+                nlogpts2 = nlogpts1 + 1
+            linpts = np.linspace(0, 10, nlinpts, endpoint=False)
+            if linpts.size > 1:
+                right = np.log10(linpts[1])
+            else:
+                right = -30
+            logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False)
+            logpts2 = np.logspace(1, np.log10(b), nlogpts2)
+            pts = np.hstack((logpts1, linpts, logpts2))
+
+        return np.sort(pts)
+
+    def values(self, n):
+        """Return an array containing n numbers."""
+        a, b = self.a, self.b
+        if a == b:
+            return np.zeros(n)
+
+        if not self.inclusive_a:
+            n += 1
+        if not self.inclusive_b:
+            n += 1
+
+        if n % 2 == 0:
+            n1 = n//2
+            n2 = n1
+        else:
+            n1 = n//2
+            n2 = n1 + 1
+
+        if a >= 0:
+            pospts = self._positive_values(a, b, n)
+            negpts = []
+        elif b <= 0:
+            pospts = []
+            negpts = -self._positive_values(-b, -a, n)
+        else:
+            pospts = self._positive_values(0, b, n1)
+            negpts = -self._positive_values(0, -a, n2 + 1)
+            # Don't want to get zero twice
+            negpts = negpts[1:]
+        pts = np.hstack((negpts[::-1], pospts))
+
+        if not self.inclusive_a:
+            pts = pts[1:]
+        if not self.inclusive_b:
+            pts = pts[:-1]
+        return pts
+
+
+class FixedArg:
+    def __init__(self, values):
+        self._values = np.asarray(values)
+
+    def values(self, n):
+        return self._values
+
+
+class ComplexArg:
+    def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)):
+        self.real = Arg(a.real, b.real)
+        self.imag = Arg(a.imag, b.imag)
+
+    def values(self, n):
+        m = int(np.floor(np.sqrt(n)))
+        x = self.real.values(m)
+        y = self.imag.values(m + 1)
+        return (x[:,None] + 1j*y[None,:]).ravel()
+
+
+class IntArg:
+    def __init__(self, a=-1000, b=1000):
+        self.a = a
+        self.b = b
+
+    def values(self, n):
+        v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int)
+        v2 = np.arange(-5, 5)
+        v = np.unique(np.r_[v1, v2])
+        v = v[(v >= self.a) & (v < self.b)]
+        return v
+
+
+def get_args(argspec, n):
+    if isinstance(argspec, np.ndarray):
+        args = argspec.copy()
+    else:
+        nargs = len(argspec)
+        ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec])
+        ms = (n**(ms/sum(ms))).astype(int) + 1
+
+        args = [spec.values(m) for spec, m in zip(argspec, ms)]
+        args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T
+
+    return args
+
+
+class MpmathData:
+    def __init__(self, scipy_func, mpmath_func, arg_spec, name=None,
+                 dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300,
+                 ignore_inf_sign=False, distinguish_nan_and_inf=True,
+                 nan_ok=True, param_filter=None):
+
+        # mpmath tests are really slow (see gh-6989).  Use a small number of
+        # points by default, increase back to 5000 (old default) if XSLOW is
+        # set
+        if n is None:
+            try:
+                is_xslow = int(os.environ.get('SCIPY_XSLOW', '0'))
+            except ValueError:
+                is_xslow = False
+
+            n = 5000 if is_xslow else 500
+
+        self.scipy_func = scipy_func
+        self.mpmath_func = mpmath_func
+        self.arg_spec = arg_spec
+        self.dps = dps
+        self.prec = prec
+        self.n = n
+        self.rtol = rtol
+        self.atol = atol
+        self.ignore_inf_sign = ignore_inf_sign
+        self.nan_ok = nan_ok
+        if isinstance(self.arg_spec, np.ndarray):
+            self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating)
+        else:
+            self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec])
+        self.ignore_inf_sign = ignore_inf_sign
+        self.distinguish_nan_and_inf = distinguish_nan_and_inf
+        if not name or name == '':
+            name = getattr(scipy_func, '__name__', None)
+        if not name or name == '':
+            name = getattr(mpmath_func, '__name__', None)
+        self.name = name
+        self.param_filter = param_filter
+
+    def check(self):
+        np.random.seed(1234)
+
+        # Generate values for the arguments
+        argarr = get_args(self.arg_spec, self.n)
+
+        # Check
+        old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
+        try:
+            if self.dps is not None:
+                dps_list = [self.dps]
+            else:
+                dps_list = [20]
+            if self.prec is not None:
+                mpmath.mp.prec = self.prec
+
+            # Proper casting of mpmath input and output types. Using
+            # native mpmath types as inputs gives improved precision
+            # in some cases.
+            if np.issubdtype(argarr.dtype, np.complexfloating):
+                pytype = mpc2complex
+
+                def mptype(x):
+                    return mpmath.mpc(complex(x))
+            else:
+                def mptype(x):
+                    return mpmath.mpf(float(x))
+
+                def pytype(x):
+                    if abs(x.imag) > 1e-16*(1 + abs(x.real)):
+                        return np.nan
+                    else:
+                        return mpf2float(x.real)
+
+            # Try out different dps until one (or none) works
+            for j, dps in enumerate(dps_list):
+                mpmath.mp.dps = dps
+
+                try:
+                    assert_func_equal(self.scipy_func,
+                                      lambda *a: pytype(self.mpmath_func(*map(mptype, a))),
+                                      argarr,
+                                      vectorized=False,
+                                      rtol=self.rtol, atol=self.atol,
+                                      ignore_inf_sign=self.ignore_inf_sign,
+                                      distinguish_nan_and_inf=self.distinguish_nan_and_inf,
+                                      nan_ok=self.nan_ok,
+                                      param_filter=self.param_filter)
+                    break
+                except AssertionError:
+                    if j >= len(dps_list)-1:
+                        # reraise the Exception
+                        tp, value, tb = sys.exc_info()
+                        if value.__traceback__ is not tb:
+                            raise value.with_traceback(tb)
+                        raise value
+        finally:
+            mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
+
+    def __repr__(self):
+        if self.is_complex:
+            return "" % (self.name,)
+        else:
+            return "" % (self.name,)
+
+
+def assert_mpmath_equal(*a, **kw):
+    d = MpmathData(*a, **kw)
+    d.check()
+
+
+def nonfunctional_tooslow(func):
+    return pytest.mark.skip(reason="    Test not yet functional (too slow), needs more work.")(func)
+
+
+# ------------------------------------------------------------------------------
+# Tools for dealing with mpmath quirks
+# ------------------------------------------------------------------------------
+
+def mpf2float(x):
+    """
+    Convert an mpf to the nearest floating point number. Just using
+    float directly doesn't work because of results like this:
+
+    with mp.workdps(50):
+        float(mpf("0.99999999999999999")) = 0.9999999999999999
+
+    """
+    return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0))
+
+
+def mpc2complex(x):
+    return complex(mpf2float(x.real), mpf2float(x.imag))
+
+
+def trace_args(func):
+    def tofloat(x):
+        if isinstance(x, mpmath.mpc):
+            return complex(x)
+        else:
+            return float(x)
+
+    def wrap(*a, **kw):
+        sys.stderr.write("%r: " % (tuple(map(tofloat, a)),))
+        sys.stderr.flush()
+        try:
+            r = func(*a, **kw)
+            sys.stderr.write("-> %r" % r)
+        finally:
+            sys.stderr.write("\n")
+            sys.stderr.flush()
+        return r
+    return wrap
+
+
+try:
+    import posix
+    import signal
+    POSIX = ('setitimer' in dir(signal))
+except ImportError:
+    POSIX = False
+
+
+class TimeoutError(Exception):
+    pass
+
+
+def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
+    """
+    Decorator for setting a timeout for pure-Python functions.
+
+    If the function does not return within `timeout` seconds, the
+    value `return_val` is returned instead.
+
+    On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
+    used. Do not use this with threads: the SIGALRM implementation
+    does probably not work well. The settrace implementation only
+    traces the current thread.
+
+    The settrace implementation slows down execution speed. Slowdown
+    by a factor around 10 is probably typical.
+    """
+    if POSIX and use_sigalrm:
+        def sigalrm_handler(signum, frame):
+            raise TimeoutError()
+
+        def deco(func):
+            def wrap(*a, **kw):
+                old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
+                signal.setitimer(signal.ITIMER_REAL, timeout)
+                try:
+                    return func(*a, **kw)
+                except TimeoutError:
+                    return return_val
+                finally:
+                    signal.setitimer(signal.ITIMER_REAL, 0)
+                    signal.signal(signal.SIGALRM, old_handler)
+            return wrap
+    else:
+        def deco(func):
+            def wrap(*a, **kw):
+                start_time = time.time()
+
+                def trace(frame, event, arg):
+                    if time.time() - start_time > timeout:
+                        raise TimeoutError()
+                    return trace
+                sys.settrace(trace)
+                try:
+                    return func(*a, **kw)
+                except TimeoutError:
+                    sys.settrace(None)
+                    return return_val
+                finally:
+                    sys.settrace(None)
+            return wrap
+    return deco
+
+
+def exception_to_nan(func):
+    """Decorate function to return nan if it raises an exception"""
+    def wrap(*a, **kw):
+        try:
+            return func(*a, **kw)
+        except Exception:
+            return np.nan
+    return wrap
+
+
+def inf_to_nan(func):
+    """Decorate function to return nan if it returns inf"""
+    def wrap(*a, **kw):
+        v = func(*a, **kw)
+        if not np.isfinite(v):
+            return np.nan
+        return v
+    return wrap
+
+
+def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
+    """
+    Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
+    can be done to higher precision than double.
+    """
+    failures = []
+    for k, (resval, stdval) in enumerate(zip_longest(res, std)):
+        if resval is None or stdval is None:
+            raise ValueError('Lengths of inputs res and std are not equal.')
+        if mpmath.fabs(resval - stdval) > atol + rtol*mpmath.fabs(stdval):
+            failures.append((k, resval, stdval))
+
+    nfail = len(failures)
+    if nfail > 0:
+        ndigits = int(abs(np.log10(rtol)))
+        msg = [""]
+        msg.append("Bad results ({} out of {}) for the following points:"
+                   .format(nfail, k + 1))
+        for k, resval, stdval in failures:
+            resrep = mpmath.nstr(resval, ndigits, min_fixed=0, max_fixed=0)
+            stdrep = mpmath.nstr(stdval, ndigits, min_fixed=0, max_fixed=0)
+            if stdval == 0:
+                rdiff = "inf"
+            else:
+                rdiff = mpmath.fabs((resval - stdval)/stdval)
+                rdiff = mpmath.nstr(rdiff, 3)
+            msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep,
+                                                        rdiff))
+        assert_(False, "\n".join(msg))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.py b/__packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.py
new file mode 100644
index 00000000..76985102
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.py
@@ -0,0 +1,2557 @@
+"""
+A collection of functions to find the weights and abscissas for
+Gaussian Quadrature.
+
+These calculations are done by finding the eigenvalues of a
+tridiagonal matrix whose entries are dependent on the coefficients
+in the recursion formula for the orthogonal polynomials with the
+corresponding weighting function over the interval.
+
+Many recursion relations for orthogonal polynomials are given:
+
+.. math::
+
+    a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x)
+
+The recursion relation of interest is
+
+.. math::
+
+    P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x)
+
+where :math:`P` has a different normalization than :math:`f`.
+
+The coefficients can be found as:
+
+.. math::
+
+    A_n = -a2n / a3n
+    \\qquad
+    B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2
+
+where
+
+.. math::
+
+    h_n = \\int_a^b w(x) f_n(x)^2
+
+assume:
+
+.. math::
+
+    P_0 (x) = 1
+    \\qquad
+    P_{-1} (x) == 0
+
+For the mathematical background, see [golub.welsch-1969-mathcomp]_ and
+[abramowitz.stegun-1965]_.
+
+References
+----------
+.. [golub.welsch-1969-mathcomp]
+   Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss
+   Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10.
+
+.. [abramowitz.stegun-1965]
+   Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of
+   Mathematical Functions: with Formulas, Graphs, and Mathematical
+   Tables*. Gaithersburg, MD: National Bureau of Standards.
+   http://www.math.sfu.ca/~cbm/aands/
+
+.. [townsend.trogdon.olver-2014]
+   Townsend, A. and Trogdon, T. and Olver, S. (2014)
+   *Fast computation of Gauss quadrature nodes and
+   weights on the whole real line*. :arXiv:`1410.5286`.
+
+.. [townsend.trogdon.olver-2015]
+   Townsend, A. and Trogdon, T. and Olver, S. (2015)
+   *Fast computation of Gauss quadrature nodes and
+   weights on the whole real line*.
+   IMA Journal of Numerical Analysis
+   :doi:`10.1093/imanum/drv002`.
+"""
+#
+# Author:  Travis Oliphant 2000
+# Updated Sep. 2003 (fixed bugs --- tested to be accurate)
+
+# SciPy imports.
+import numpy as np
+from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around,
+                   hstack, arccos, arange)
+from scipy import linalg
+from scipy.special import airy
+
+# Local imports.
+from . import _ufuncs
+_gam = _ufuncs.gamma
+# There is no .pyi file for _specfun
+from . import _specfun  # type: ignore
+
+_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
+             'jacobi', 'laguerre', 'genlaguerre', 'hermite',
+             'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt',
+             'sh_chebyu', 'sh_jacobi']
+
+# Correspondence between new and old names of root functions
+_rootfuns_map = {'roots_legendre': 'p_roots',
+                 'roots_chebyt': 't_roots',
+                 'roots_chebyu': 'u_roots',
+                 'roots_chebyc': 'c_roots',
+                 'roots_chebys': 's_roots',
+                 'roots_jacobi': 'j_roots',
+                 'roots_laguerre': 'l_roots',
+                 'roots_genlaguerre': 'la_roots',
+                 'roots_hermite': 'h_roots',
+                 'roots_hermitenorm': 'he_roots',
+                 'roots_gegenbauer': 'cg_roots',
+                 'roots_sh_legendre': 'ps_roots',
+                 'roots_sh_chebyt': 'ts_roots',
+                 'roots_sh_chebyu': 'us_roots',
+                 'roots_sh_jacobi': 'js_roots'}
+
+__all__ = _polyfuns + list(_rootfuns_map.keys())
+
+
+class orthopoly1d(np.poly1d):
+
+    def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None,
+                 limits=None, monic=False, eval_func=None):
+        equiv_weights = [weights[k] / wfunc(roots[k]) for
+                         k in range(len(roots))]
+        mu = sqrt(hn)
+        if monic:
+            evf = eval_func
+            if evf:
+                knn = kn
+                eval_func = lambda x: evf(x) / knn
+            mu = mu / abs(kn)
+            kn = 1.0
+
+        # compute coefficients from roots, then scale
+        poly = np.poly1d(roots, r=True)
+        np.poly1d.__init__(self, poly.coeffs * float(kn))
+
+        self.weights = np.array(list(zip(roots, weights, equiv_weights)))
+        self.weight_func = wfunc
+        self.limits = limits
+        self.normcoef = mu
+
+        # Note: eval_func will be discarded on arithmetic
+        self._eval_func = eval_func
+
+    def __call__(self, v):
+        if self._eval_func and not isinstance(v, np.poly1d):
+            return self._eval_func(v)
+        else:
+            return np.poly1d.__call__(self, v)
+
+    def _scale(self, p):
+        if p == 1.0:
+            return
+        self._coeffs *= p
+
+        evf = self._eval_func
+        if evf:
+            self._eval_func = lambda x: evf(x) * p
+        self.normcoef *= p
+
+
+def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):
+    """[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)
+
+    Returns the roots (x) of an nth order orthogonal polynomial,
+    and weights (w) to use in appropriate Gaussian quadrature with that
+    orthogonal polynomial.
+
+    The polynomials have the recurrence relation
+          P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)
+
+    an_func(n)          should return A_n
+    sqrt_bn_func(n)     should return sqrt(B_n)
+    mu ( = h_0 )        is the integral of the weight over the orthogonal
+                        interval
+    """
+    k = np.arange(n, dtype='d')
+    c = np.zeros((2, n))
+    c[0,1:] = bn_func(k[1:])
+    c[1,:] = an_func(k)
+    x = linalg.eigvals_banded(c, overwrite_a_band=True)
+
+    # improve roots by one application of Newton's method
+    y = f(n, x)
+    dy = df(n, x)
+    x -= y/dy
+
+    # fm and dy may contain very large/small values, so we
+    # log-normalize them to maintain precision in the product fm*dy
+    fm = f(n-1, x)
+    log_fm = np.log(np.abs(fm))
+    log_dy = np.log(np.abs(dy))
+    fm /= np.exp((log_fm.max() + log_fm.min()) / 2.)
+    dy /= np.exp((log_dy.max() + log_dy.min()) / 2.)
+    w = 1.0 / (fm * dy)
+
+    if symmetrize:
+        w = (w + w[::-1]) / 2
+        x = (x - x[::-1]) / 2
+
+    w *= mu0 / w.sum()
+
+    if mu:
+        return x, w, mu0
+    else:
+        return x, w
+
+# Jacobi Polynomials 1               P^(alpha,beta)_n(x)
+
+
+def roots_jacobi(n, alpha, beta, mu=False):
+    r"""Gauss-Jacobi quadrature.
+
+    Compute the sample points and weights for Gauss-Jacobi
+    quadrature. The sample points are the roots of the nth degree
+    Jacobi polynomial, :math:`P^{\alpha, \beta}_n(x)`. These sample
+    points and weights correctly integrate polynomials of degree
+    :math:`2n - 1` or less over the interval :math:`[-1, 1]` with
+    weight function :math:`w(x) = (1 - x)^{\alpha} (1 +
+    x)^{\beta}`. See 22.2.1 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    alpha : float
+        alpha must be > -1
+    beta : float
+        beta must be > -1
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError("n must be a positive integer.")
+    if alpha <= -1 or beta <= -1:
+        raise ValueError("alpha and beta must be greater than -1.")
+
+    if alpha == 0.0 and beta == 0.0:
+        return roots_legendre(m, mu)
+    if alpha == beta:
+        return roots_gegenbauer(m, alpha+0.5, mu)
+
+    if (alpha + beta) <= 1000:
+        mu0 = 2.0**(alpha+beta+1) * _ufuncs.beta(alpha+1, beta+1)
+    else:
+        # Avoid overflows in pow and beta for very large parameters
+        mu0 = np.exp((alpha + beta + 1) * np.log(2.0)
+                     + _ufuncs.betaln(alpha+1, beta+1))
+    a = alpha
+    b = beta
+    if a + b == 0.0:
+        an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0)
+    else:
+        an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b),
+                  (b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2)))
+
+    bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \
+              * np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1)))
+
+    f = lambda n, x: _ufuncs.eval_jacobi(n, a, b, x)
+    df = lambda n, x: (0.5 * (n + a + b + 1)
+                       * _ufuncs.eval_jacobi(n-1, a+1, b+1, x))
+    return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
+
+
+def jacobi(n, alpha, beta, monic=False):
+    r"""Jacobi polynomial.
+
+    Defined to be the solution of
+
+    .. math::
+        (1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)}
+          + (\beta - \alpha - (\alpha + \beta + 2)x)
+            \frac{d}{dx}P_n^{(\alpha, \beta)}
+          + n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0
+
+    for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a
+    polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    alpha : float
+        Parameter, must be greater than -1.
+    beta : float
+        Parameter, must be greater than -1.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    P : orthopoly1d
+        Jacobi polynomial.
+
+    Notes
+    -----
+    For fixed :math:`\alpha, \beta`, the polynomials
+    :math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]`
+    with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`.
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    The Jacobi polynomials satisfy the recurrence relation:
+
+    .. math::
+        P_n^{(\alpha, \beta-1)}(x) - P_n^{(\alpha-1, \beta)}(x)
+          = P_{n-1}^{(\alpha, \beta)}(x)
+
+    This can be verified, for example, for :math:`\alpha = \beta = 2`
+    and :math:`n = 1` over the interval :math:`[-1, 1]`:
+
+    >>> import numpy as np
+    >>> from scipy.special import jacobi
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> np.allclose(jacobi(0, 2, 2)(x),
+    ...             jacobi(1, 2, 1)(x) - jacobi(1, 1, 2)(x))
+    True
+
+    Plot of the Jacobi polynomial :math:`P_5^{(\alpha, -0.5)}` for
+    different values of :math:`\alpha`:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-2.0, 2.0)
+    >>> ax.set_title(r'Jacobi polynomials $P_5^{(\alpha, -0.5)}$')
+    >>> for alpha in np.arange(0, 4, 1):
+    ...     ax.plot(x, jacobi(5, alpha, -0.5)(x), label=rf'$\alpha={alpha}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta
+    if n == 0:
+        return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
+                           eval_func=np.ones_like)
+    x, w, mu = roots_jacobi(n, alpha, beta, mu=True)
+    ab1 = alpha + beta + 1.0
+    hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1)
+    hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1)
+    kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1)
+    # here kn = coefficient on x^n term
+    p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
+                    lambda x: _ufuncs.eval_jacobi(n, alpha, beta, x))
+    return p
+
+# Jacobi Polynomials shifted         G_n(p,q,x)
+
+
+def roots_sh_jacobi(n, p1, q1, mu=False):
+    """Gauss-Jacobi (shifted) quadrature.
+
+    Compute the sample points and weights for Gauss-Jacobi (shifted)
+    quadrature. The sample points are the roots of the nth degree
+    shifted Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample
+    points and weights correctly integrate polynomials of degree
+    :math:`2n - 1` or less over the interval :math:`[0, 1]` with
+    weight function :math:`w(x) = (1 - x)^{p-q} x^{q-1}`. See 22.2.2
+    in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    p1 : float
+        (p1 - q1) must be > -1
+    q1 : float
+        q1 must be > 0
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    if (p1-q1) <= -1 or q1 <= 0:
+        raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.")
+    x, w, m = roots_jacobi(n, p1-q1, q1-1, True)
+    x = (x + 1) / 2
+    scale = 2.0**p1
+    w /= scale
+    m /= scale
+    if mu:
+        return x, w, m
+    else:
+        return x, w
+
+
+def sh_jacobi(n, p, q, monic=False):
+    r"""Shifted Jacobi polynomial.
+
+    Defined by
+
+    .. math::
+
+        G_n^{(p, q)}(x)
+          = \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1),
+
+    where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    p : float
+        Parameter, must have :math:`p > q - 1`.
+    q : float
+        Parameter, must be greater than 0.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    G : orthopoly1d
+        Shifted Jacobi polynomial.
+
+    Notes
+    -----
+    For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are
+    orthogonal over :math:`[0, 1]` with weight function :math:`(1 -
+    x)^{p - q}x^{q - 1}`.
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.)
+    if n == 0:
+        return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
+                           eval_func=np.ones_like)
+    n1 = n
+    x, w = roots_sh_jacobi(n1, p, q)
+    hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)
+    hn /= (2 * n + p) * (_gam(2 * n + p)**2)
+    # kn = 1.0 in standard form so monic is redundant. Kept for compatibility.
+    kn = 1.0
+    pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic,
+                     eval_func=lambda x: _ufuncs.eval_sh_jacobi(n, p, q, x))
+    return pp
+
+# Generalized Laguerre               L^(alpha)_n(x)
+
+
+def roots_genlaguerre(n, alpha, mu=False):
+    r"""Gauss-generalized Laguerre quadrature.
+
+    Compute the sample points and weights for Gauss-generalized
+    Laguerre quadrature. The sample points are the roots of the nth
+    degree generalized Laguerre polynomial, :math:`L^{\alpha}_n(x)`.
+    These sample points and weights correctly integrate polynomials of
+    degree :math:`2n - 1` or less over the interval :math:`[0,
+    \infty]` with weight function :math:`w(x) = x^{\alpha}
+    e^{-x}`. See 22.3.9 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    alpha : float
+        alpha must be > -1
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError("n must be a positive integer.")
+    if alpha < -1:
+        raise ValueError("alpha must be greater than -1.")
+
+    mu0 = _ufuncs.gamma(alpha + 1)
+
+    if m == 1:
+        x = np.array([alpha+1.0], 'd')
+        w = np.array([mu0], 'd')
+        if mu:
+            return x, w, mu0
+        else:
+            return x, w
+
+    an_func = lambda k: 2 * k + alpha + 1
+    bn_func = lambda k: -np.sqrt(k * (k + alpha))
+    f = lambda n, x: _ufuncs.eval_genlaguerre(n, alpha, x)
+    df = lambda n, x: (n*_ufuncs.eval_genlaguerre(n, alpha, x)
+                       - (n + alpha)*_ufuncs.eval_genlaguerre(n-1, alpha, x))/x
+    return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
+
+
+def genlaguerre(n, alpha, monic=False):
+    r"""Generalized (associated) Laguerre polynomial.
+
+    Defined to be the solution of
+
+    .. math::
+        x\frac{d^2}{dx^2}L_n^{(\alpha)}
+          + (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)}
+          + nL_n^{(\alpha)} = 0,
+
+    where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial
+    of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    alpha : float
+        Parameter, must be greater than -1.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    L : orthopoly1d
+        Generalized Laguerre polynomial.
+
+    Notes
+    -----
+    For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}`
+    are orthogonal over :math:`[0, \infty)` with weight function
+    :math:`e^{-x}x^\alpha`.
+
+    The Laguerre polynomials are the special case where :math:`\alpha
+    = 0`.
+
+    See Also
+    --------
+    laguerre : Laguerre polynomial.
+    hyp1f1 : confluent hypergeometric function
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    The generalized Laguerre polynomials are closely related to the confluent
+    hypergeometric function :math:`{}_1F_1`:
+
+        .. math::
+            L_n^{(\alpha)} = \binom{n + \alpha}{n} {}_1F_1(-n, \alpha +1, x)
+
+    This can be verified, for example,  for :math:`n = \alpha = 3` over the
+    interval :math:`[-1, 1]`:
+
+    >>> import numpy as np
+    >>> from scipy.special import binom
+    >>> from scipy.special import genlaguerre
+    >>> from scipy.special import hyp1f1
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> np.allclose(genlaguerre(3, 3)(x), binom(6, 3) * hyp1f1(-3, 4, x))
+    True
+
+    This is the plot of the generalized Laguerre polynomials
+    :math:`L_3^{(\alpha)}` for some values of :math:`\alpha`:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(-4.0, 12.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-5.0, 10.0)
+    >>> ax.set_title(r'Generalized Laguerre polynomials $L_3^{\alpha}$')
+    >>> for alpha in np.arange(0, 5):
+    ...     ax.plot(x, genlaguerre(3, alpha)(x), label=rf'$L_3^{(alpha)}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if alpha <= -1:
+        raise ValueError("alpha must be > -1")
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    if n == 0:
+        n1 = n + 1
+    else:
+        n1 = n
+    x, w = roots_genlaguerre(n1, alpha)
+    wfunc = lambda x: exp(-x) * x**alpha
+    if n == 0:
+        x, w = [], []
+    hn = _gam(n + alpha + 1) / _gam(n + 1)
+    kn = (-1)**n / _gam(n + 1)
+    p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic,
+                    lambda x: _ufuncs.eval_genlaguerre(n, alpha, x))
+    return p
+
+# Laguerre                      L_n(x)
+
+
+def roots_laguerre(n, mu=False):
+    r"""Gauss-Laguerre quadrature.
+
+    Compute the sample points and weights for Gauss-Laguerre
+    quadrature. The sample points are the roots of the nth degree
+    Laguerre polynomial, :math:`L_n(x)`. These sample points and
+    weights correctly integrate polynomials of degree :math:`2n - 1`
+    or less over the interval :math:`[0, \infty]` with weight function
+    :math:`w(x) = e^{-x}`. See 22.2.13 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+    numpy.polynomial.laguerre.laggauss
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    return roots_genlaguerre(n, 0.0, mu=mu)
+
+
+def laguerre(n, monic=False):
+    r"""Laguerre polynomial.
+
+    Defined to be the solution of
+
+    .. math::
+        x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0;
+
+    :math:`L_n` is a polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    L : orthopoly1d
+        Laguerre Polynomial.
+
+    Notes
+    -----
+    The polynomials :math:`L_n` are orthogonal over :math:`[0,
+    \infty)` with weight function :math:`e^{-x}`.
+
+    See Also
+    --------
+    genlaguerre : Generalized (associated) Laguerre polynomial.
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    The Laguerre polynomials :math:`L_n` are the special case
+    :math:`\alpha = 0` of the generalized Laguerre polynomials
+    :math:`L_n^{(\alpha)}`.
+    Let's verify it on the interval :math:`[-1, 1]`:
+
+    >>> import numpy as np
+    >>> from scipy.special import genlaguerre
+    >>> from scipy.special import laguerre
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> np.allclose(genlaguerre(3, 0)(x), laguerre(3)(x))
+    True
+
+    The polynomials :math:`L_n` also satisfy the recurrence relation:
+
+    .. math::
+        (n + 1)L_{n+1}(x) = (2n +1 -x)L_n(x) - nL_{n-1}(x)
+
+    This can be easily checked on :math:`[0, 1]` for :math:`n = 3`:
+
+    >>> x = np.arange(0.0, 1.0, 0.01)
+    >>> np.allclose(4 * laguerre(4)(x),
+    ...             (7 - x) * laguerre(3)(x) - 3 * laguerre(2)(x))
+    True
+
+    This is the plot of the first few Laguerre polynomials :math:`L_n`:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(-1.0, 5.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-5.0, 5.0)
+    >>> ax.set_title(r'Laguerre polynomials $L_n$')
+    >>> for n in np.arange(0, 5):
+    ...     ax.plot(x, laguerre(n)(x), label=rf'$L_{n}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    if n == 0:
+        n1 = n + 1
+    else:
+        n1 = n
+    x, w = roots_laguerre(n1)
+    if n == 0:
+        x, w = [], []
+    hn = 1.0
+    kn = (-1)**n / _gam(n + 1)
+    p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic,
+                    lambda x: _ufuncs.eval_laguerre(n, x))
+    return p
+
+# Hermite  1                         H_n(x)
+
+
+def roots_hermite(n, mu=False):
+    r"""Gauss-Hermite (physicist's) quadrature.
+
+    Compute the sample points and weights for Gauss-Hermite
+    quadrature. The sample points are the roots of the nth degree
+    Hermite polynomial, :math:`H_n(x)`. These sample points and
+    weights correctly integrate polynomials of degree :math:`2n - 1`
+    or less over the interval :math:`[-\infty, \infty]` with weight
+    function :math:`w(x) = e^{-x^2}`. See 22.2.14 in [AS]_ for
+    details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    Notes
+    -----
+    For small n up to 150 a modified version of the Golub-Welsch
+    algorithm is used. Nodes are computed from the eigenvalue
+    problem and improved by one step of a Newton iteration.
+    The weights are computed from the well-known analytical formula.
+
+    For n larger than 150 an optimal asymptotic algorithm is applied
+    which computes nodes and weights in a numerically stable manner.
+    The algorithm has linear runtime making computation for very
+    large n (several thousand or more) feasible.
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+    numpy.polynomial.hermite.hermgauss
+    roots_hermitenorm
+
+    References
+    ----------
+    .. [townsend.trogdon.olver-2014]
+        Townsend, A. and Trogdon, T. and Olver, S. (2014)
+        *Fast computation of Gauss quadrature nodes and
+        weights on the whole real line*. :arXiv:`1410.5286`.
+    .. [townsend.trogdon.olver-2015]
+        Townsend, A. and Trogdon, T. and Olver, S. (2015)
+        *Fast computation of Gauss quadrature nodes and
+        weights on the whole real line*.
+        IMA Journal of Numerical Analysis
+        :doi:`10.1093/imanum/drv002`.
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError("n must be a positive integer.")
+
+    mu0 = np.sqrt(np.pi)
+    if n <= 150:
+        an_func = lambda k: 0.0*k
+        bn_func = lambda k: np.sqrt(k/2.0)
+        f = _ufuncs.eval_hermite
+        df = lambda n, x: 2.0 * n * _ufuncs.eval_hermite(n-1, x)
+        return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
+    else:
+        nodes, weights = _roots_hermite_asy(m)
+        if mu:
+            return nodes, weights, mu0
+        else:
+            return nodes, weights
+
+
+def _compute_tauk(n, k, maxit=5):
+    """Helper function for Tricomi initial guesses
+
+    For details, see formula 3.1 in lemma 3.1 in the
+    original paper.
+
+    Parameters
+    ----------
+    n : int
+        Quadrature order
+    k : ndarray of type int
+        Index of roots :math:`\tau_k` to compute
+    maxit : int
+        Number of Newton maxit performed, the default
+        value of 5 is sufficient.
+
+    Returns
+    -------
+    tauk : ndarray
+        Roots of equation 3.1
+
+    See Also
+    --------
+    initial_nodes_a
+    roots_hermite_asy
+    """
+    a = n % 2 - 0.5
+    c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0)
+    f = lambda x: x - sin(x) - c
+    df = lambda x: 1.0 - cos(x)
+    xi = 0.5*pi
+    for i in range(maxit):
+        xi = xi - f(xi)/df(xi)
+    return xi
+
+
+def _initial_nodes_a(n, k):
+    r"""Tricomi initial guesses
+
+    Computes an initial approximation to the square of the `k`-th
+    (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
+    of order :math:`n`. The formula is the one from lemma 3.1 in the
+    original paper. The guesses are accurate except in the region
+    near :math:`\sqrt{2n + 1}`.
+
+    Parameters
+    ----------
+    n : int
+        Quadrature order
+    k : ndarray of type int
+        Index of roots to compute
+
+    Returns
+    -------
+    xksq : ndarray
+        Square of the approximate roots
+
+    See Also
+    --------
+    initial_nodes
+    roots_hermite_asy
+    """
+    tauk = _compute_tauk(n, k)
+    sigk = cos(0.5*tauk)**2
+    a = n % 2 - 0.5
+    nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
+    # Initial approximation of Hermite roots (square)
+    xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25)
+    return xksq
+
+
+def _initial_nodes_b(n, k):
+    r"""Gatteschi initial guesses
+
+    Computes an initial approximation to the square of the kth
+    (positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
+    of order :math:`n`. The formula is the one from lemma 3.2 in the
+    original paper. The guesses are accurate in the region just
+    below :math:`\sqrt{2n + 1}`.
+
+    Parameters
+    ----------
+    n : int
+        Quadrature order
+    k : ndarray of type int
+        Index of roots to compute
+
+    Returns
+    -------
+    xksq : ndarray
+        Square of the approximate root
+
+    See Also
+    --------
+    initial_nodes
+    roots_hermite_asy
+    """
+    a = n % 2 - 0.5
+    nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
+    # Airy roots by approximation
+    ak = _specfun.airyzo(k.max(), 1)[0][::-1]
+    # Initial approximation of Hermite roots (square)
+    xksq = (nu +
+            2.0**(2.0/3.0) * ak * nu**(1.0/3.0) +
+            1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) +
+            (9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) +
+            (16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) -
+            (15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0))
+    return xksq
+
+
+def _initial_nodes(n):
+    """Initial guesses for the Hermite roots
+
+    Computes an initial approximation to the non-negative
+    roots :math:`x_k` of the Hermite polynomial :math:`H_n`
+    of order :math:`n`. The Tricomi and Gatteschi initial
+    guesses are used in the region where they are accurate.
+
+    Parameters
+    ----------
+    n : int
+        Quadrature order
+
+    Returns
+    -------
+    xk : ndarray
+        Approximate roots
+
+    See Also
+    --------
+    roots_hermite_asy
+    """
+    # Turnover point
+    # linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules
+    fit = 0.49082003*n - 4.37859653
+    turnover = around(fit).astype(int)
+    # Compute all approximations
+    ia = arange(1, int(floor(n*0.5)+1))
+    ib = ia[::-1]
+    xasq = _initial_nodes_a(n, ia[:turnover+1])
+    xbsq = _initial_nodes_b(n, ib[turnover+1:])
+    # Combine
+    iv = sqrt(hstack([xasq, xbsq]))
+    # Central node is always zero
+    if n % 2 == 1:
+        iv = hstack([0.0, iv])
+    return iv
+
+
+def _pbcf(n, theta):
+    r"""Asymptotic series expansion of parabolic cylinder function
+
+    The implementation is based on sections 3.2 and 3.3 from the
+    original paper. Compared to the published version this code
+    adds one more term to the asymptotic series. The detailed
+    formulas can be found at [parabolic-asymptotics]_. The evaluation
+    is done in a transformed variable :math:`\theta := \arccos(t)`
+    where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`.
+
+    Parameters
+    ----------
+    n : int
+        Quadrature order
+    theta : ndarray
+        Transformed position variable
+
+    Returns
+    -------
+    U : ndarray
+        Value of the parabolic cylinder function :math:`U(a, \theta)`.
+    Ud : ndarray
+        Value of the derivative :math:`U^{\prime}(a, \theta)` of
+        the parabolic cylinder function.
+
+    See Also
+    --------
+    roots_hermite_asy
+
+    References
+    ----------
+    .. [parabolic-asymptotics]
+       https://dlmf.nist.gov/12.10#vii
+    """
+    st = sin(theta)
+    ct = cos(theta)
+    # https://dlmf.nist.gov/12.10#vii
+    mu = 2.0*n + 1.0
+    # https://dlmf.nist.gov/12.10#E23
+    eta = 0.5*theta - 0.5*st*ct
+    # https://dlmf.nist.gov/12.10#E39
+    zeta = -(3.0*eta/2.0) ** (2.0/3.0)
+    # https://dlmf.nist.gov/12.10#E40
+    phi = (-zeta / st**2) ** (0.25)
+    # Coefficients
+    # https://dlmf.nist.gov/12.10#E43
+    a0 = 1.0
+    a1 = 0.10416666666666666667
+    a2 = 0.08355034722222222222
+    a3 = 0.12822657455632716049
+    a4 = 0.29184902646414046425
+    a5 = 0.88162726744375765242
+    b0 = 1.0
+    b1 = -0.14583333333333333333
+    b2 = -0.09874131944444444444
+    b3 = -0.14331205391589506173
+    b4 = -0.31722720267841354810
+    b5 = -0.94242914795712024914
+    # Polynomials
+    # https://dlmf.nist.gov/12.10#E9
+    # https://dlmf.nist.gov/12.10#E10
+    ctp = ct ** arange(16).reshape((-1,1))
+    u0 = 1.0
+    u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0
+    u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0
+    u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0
+    u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0
+    u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:]
+          - 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0
+    v0 = 1.0
+    v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0
+    v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0
+    v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0
+    v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0
+    v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:]
+          + 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0
+    # Airy Evaluation (Bi and Bip unused)
+    Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta)
+    # Prefactor for U
+    P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi
+    # Terms for U
+    # https://dlmf.nist.gov/12.10#E42
+    phip = phi ** arange(6, 31, 6).reshape((-1,1))
+    A0 = b0*u0
+    A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3
+    A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6
+    B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2
+    B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5
+    B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8
+    # U
+    # https://dlmf.nist.gov/12.10#E35
+    U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) +
+             Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0))
+    # Prefactor for derivative of U
+    Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi
+    # Terms for derivative of U
+    # https://dlmf.nist.gov/12.10#E46
+    C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta
+    C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4
+    C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7
+    D0 = a0*v0
+    D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3
+    D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6
+    # Derivative of U
+    # https://dlmf.nist.gov/12.10#E36
+    Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) +
+               Aip * (D0 + D1/mu**2.0 + D2/mu**4.0))
+    return U, Ud
+
+
+def _newton(n, x_initial, maxit=5):
+    """Newton iteration for polishing the asymptotic approximation
+    to the zeros of the Hermite polynomials.
+
+    Parameters
+    ----------
+    n : int
+        Quadrature order
+    x_initial : ndarray
+        Initial guesses for the roots
+    maxit : int
+        Maximal number of Newton iterations.
+        The default 5 is sufficient, usually
+        only one or two steps are needed.
+
+    Returns
+    -------
+    nodes : ndarray
+        Quadrature nodes
+    weights : ndarray
+        Quadrature weights
+
+    See Also
+    --------
+    roots_hermite_asy
+    """
+    # Variable transformation
+    mu = sqrt(2.0*n + 1.0)
+    t = x_initial / mu
+    theta = arccos(t)
+    # Newton iteration
+    for i in range(maxit):
+        u, ud = _pbcf(n, theta)
+        dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud)
+        theta = theta + dtheta
+        if max(abs(dtheta)) < 1e-14:
+            break
+    # Undo variable transformation
+    x = mu * cos(theta)
+    # Central node is always zero
+    if n % 2 == 1:
+        x[0] = 0.0
+    # Compute weights
+    w = exp(-x**2) / (2.0*ud**2)
+    return x, w
+
+
+def _roots_hermite_asy(n):
+    r"""Gauss-Hermite (physicist's) quadrature for large n.
+
+    Computes the sample points and weights for Gauss-Hermite quadrature.
+    The sample points are the roots of the nth degree Hermite polynomial,
+    :math:`H_n(x)`. These sample points and weights correctly integrate
+    polynomials of degree :math:`2n - 1` or less over the interval
+    :math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`.
+
+    This method relies on asymptotic expansions which work best for n > 150.
+    The algorithm has linear runtime making computation for very large n
+    feasible.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+
+    Returns
+    -------
+    nodes : ndarray
+        Quadrature nodes
+    weights : ndarray
+        Quadrature weights
+
+    See Also
+    --------
+    roots_hermite
+
+    References
+    ----------
+    .. [townsend.trogdon.olver-2014]
+       Townsend, A. and Trogdon, T. and Olver, S. (2014)
+       *Fast computation of Gauss quadrature nodes and
+       weights on the whole real line*. :arXiv:`1410.5286`.
+
+    .. [townsend.trogdon.olver-2015]
+       Townsend, A. and Trogdon, T. and Olver, S. (2015)
+       *Fast computation of Gauss quadrature nodes and
+       weights on the whole real line*.
+       IMA Journal of Numerical Analysis
+       :doi:`10.1093/imanum/drv002`.
+    """
+    iv = _initial_nodes(n)
+    nodes, weights = _newton(n, iv)
+    # Combine with negative parts
+    if n % 2 == 0:
+        nodes = hstack([-nodes[::-1], nodes])
+        weights = hstack([weights[::-1], weights])
+    else:
+        nodes = hstack([-nodes[-1:0:-1], nodes])
+        weights = hstack([weights[-1:0:-1], weights])
+    # Scale weights
+    weights *= sqrt(pi) / sum(weights)
+    return nodes, weights
+
+
+def hermite(n, monic=False):
+    r"""Physicist's Hermite polynomial.
+
+    Defined by
+
+    .. math::
+
+        H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2};
+
+    :math:`H_n` is a polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    H : orthopoly1d
+        Hermite polynomial.
+
+    Notes
+    -----
+    The polynomials :math:`H_n` are orthogonal over :math:`(-\infty,
+    \infty)` with weight function :math:`e^{-x^2}`.
+
+    Examples
+    --------
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+
+    >>> p_monic = special.hermite(3, monic=True)
+    >>> p_monic
+    poly1d([ 1. ,  0. , -1.5,  0. ])
+    >>> p_monic(1)
+    -0.49999999999999983
+    >>> x = np.linspace(-3, 3, 400)
+    >>> y = p_monic(x)
+    >>> plt.plot(x, y)
+    >>> plt.title("Monic Hermite polynomial of degree 3")
+    >>> plt.xlabel("x")
+    >>> plt.ylabel("H_3(x)")
+    >>> plt.show()
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    if n == 0:
+        n1 = n + 1
+    else:
+        n1 = n
+    x, w = roots_hermite(n1)
+    wfunc = lambda x: exp(-x * x)
+    if n == 0:
+        x, w = [], []
+    hn = 2**n * _gam(n + 1) * sqrt(pi)
+    kn = 2**n
+    p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
+                    lambda x: _ufuncs.eval_hermite(n, x))
+    return p
+
+# Hermite  2                         He_n(x)
+
+
+def roots_hermitenorm(n, mu=False):
+    r"""Gauss-Hermite (statistician's) quadrature.
+
+    Compute the sample points and weights for Gauss-Hermite
+    quadrature. The sample points are the roots of the nth degree
+    Hermite polynomial, :math:`He_n(x)`. These sample points and
+    weights correctly integrate polynomials of degree :math:`2n - 1`
+    or less over the interval :math:`[-\infty, \infty]` with weight
+    function :math:`w(x) = e^{-x^2/2}`. See 22.2.15 in [AS]_ for more
+    details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    Notes
+    -----
+    For small n up to 150 a modified version of the Golub-Welsch
+    algorithm is used. Nodes are computed from the eigenvalue
+    problem and improved by one step of a Newton iteration.
+    The weights are computed from the well-known analytical formula.
+
+    For n larger than 150 an optimal asymptotic algorithm is used
+    which computes nodes and weights in a numerical stable manner.
+    The algorithm has linear runtime making computation for very
+    large n (several thousand or more) feasible.
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+    numpy.polynomial.hermite_e.hermegauss
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError("n must be a positive integer.")
+
+    mu0 = np.sqrt(2.0*np.pi)
+    if n <= 150:
+        an_func = lambda k: 0.0*k
+        bn_func = lambda k: np.sqrt(k)
+        f = _ufuncs.eval_hermitenorm
+        df = lambda n, x: n * _ufuncs.eval_hermitenorm(n-1, x)
+        return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
+    else:
+        nodes, weights = _roots_hermite_asy(m)
+        # Transform
+        nodes *= sqrt(2)
+        weights *= sqrt(2)
+        if mu:
+            return nodes, weights, mu0
+        else:
+            return nodes, weights
+
+
+def hermitenorm(n, monic=False):
+    r"""Normalized (probabilist's) Hermite polynomial.
+
+    Defined by
+
+    .. math::
+
+        He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2};
+
+    :math:`He_n` is a polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    He : orthopoly1d
+        Hermite polynomial.
+
+    Notes
+    -----
+
+    The polynomials :math:`He_n` are orthogonal over :math:`(-\infty,
+    \infty)` with weight function :math:`e^{-x^2/2}`.
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    if n == 0:
+        n1 = n + 1
+    else:
+        n1 = n
+    x, w = roots_hermitenorm(n1)
+    wfunc = lambda x: exp(-x * x / 2.0)
+    if n == 0:
+        x, w = [], []
+    hn = sqrt(2 * pi) * _gam(n + 1)
+    kn = 1.0
+    p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic,
+                    eval_func=lambda x: _ufuncs.eval_hermitenorm(n, x))
+    return p
+
+# The remainder of the polynomials can be derived from the ones above.
+
+# Ultraspherical (Gegenbauer)        C^(alpha)_n(x)
+
+
+def roots_gegenbauer(n, alpha, mu=False):
+    r"""Gauss-Gegenbauer quadrature.
+
+    Compute the sample points and weights for Gauss-Gegenbauer
+    quadrature. The sample points are the roots of the nth degree
+    Gegenbauer polynomial, :math:`C^{\alpha}_n(x)`. These sample
+    points and weights correctly integrate polynomials of degree
+    :math:`2n - 1` or less over the interval :math:`[-1, 1]` with
+    weight function :math:`w(x) = (1 - x^2)^{\alpha - 1/2}`. See
+    22.2.3 in [AS]_ for more details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    alpha : float
+        alpha must be > -0.5
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError("n must be a positive integer.")
+    if alpha < -0.5:
+        raise ValueError("alpha must be greater than -0.5.")
+    elif alpha == 0.0:
+        # C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x)
+        # strictly, we should just error out here, since the roots are not
+        # really defined, but we used to return something useful, so let's
+        # keep doing so.
+        return roots_chebyt(n, mu)
+
+    if alpha <= 170:
+        mu0 = (np.sqrt(np.pi) * _ufuncs.gamma(alpha + 0.5)) \
+              / _ufuncs.gamma(alpha + 1)
+    else:
+        # For large alpha we use a Taylor series expansion around inf,
+        # expressed as a 6th order polynomial of a^-1 and using Horner's
+        # method to minimize computation and maximize precision
+        inv_alpha = 1. / alpha
+        coeffs = np.array([0.000207186, -0.00152206, -0.000640869,
+                           0.00488281, 0.0078125, -0.125, 1.])
+        mu0 = coeffs[0]
+        for term in range(1, len(coeffs)):
+            mu0 = mu0 * inv_alpha + coeffs[term]
+        mu0 = mu0 * np.sqrt(np.pi / alpha)
+    an_func = lambda k: 0.0 * k
+    bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1)
+                        / (4 * (k + alpha) * (k + alpha - 1)))
+    f = lambda n, x: _ufuncs.eval_gegenbauer(n, alpha, x)
+    df = lambda n, x: ((-n*x*_ufuncs.eval_gegenbauer(n, alpha, x)
+                        + ((n + 2*alpha - 1)
+                           * _ufuncs.eval_gegenbauer(n - 1, alpha, x)))
+                       / (1 - x**2))
+    return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
+
+
+def gegenbauer(n, alpha, monic=False):
+    r"""Gegenbauer (ultraspherical) polynomial.
+
+    Defined to be the solution of
+
+    .. math::
+        (1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)}
+          - (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)}
+          + n(n + 2\alpha)C_n^{(\alpha)} = 0
+
+    for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial
+    of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    alpha : float
+        Parameter, must be greater than -0.5.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    C : orthopoly1d
+        Gegenbauer polynomial.
+
+    Notes
+    -----
+    The polynomials :math:`C_n^{(\alpha)}` are orthogonal over
+    :math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha -
+    1/2)}`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import special
+    >>> import matplotlib.pyplot as plt
+
+    We can initialize a variable ``p`` as a Gegenbauer polynomial using the
+    `gegenbauer` function and evaluate at a point ``x = 1``.
+
+    >>> p = special.gegenbauer(3, 0.5, monic=False)
+    >>> p
+    poly1d([ 2.5,  0. , -1.5,  0. ])
+    >>> p(1)
+    1.0
+
+    To evaluate ``p`` at various points ``x`` in the interval ``(-3, 3)``,
+    simply pass an array ``x`` to ``p`` as follows:
+
+    >>> x = np.linspace(-3, 3, 400)
+    >>> y = p(x)
+
+    We can then visualize ``x, y`` using `matplotlib.pyplot`.
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.plot(x, y)
+    >>> ax.set_title("Gegenbauer (ultraspherical) polynomial of degree 3")
+    >>> ax.set_xlabel("x")
+    >>> ax.set_ylabel("G_3(x)")
+    >>> plt.show()
+
+    """
+    base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic)
+    if monic:
+        return base
+    #  Abrahmowitz and Stegan 22.5.20
+    factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) /
+              _gam(2*alpha) / _gam(alpha + 0.5 + n))
+    base._scale(factor)
+    base.__dict__['_eval_func'] = lambda x: _ufuncs.eval_gegenbauer(float(n),
+                                                                    alpha, x)
+    return base
+
+# Chebyshev of the first kind: T_n(x) =
+#     n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x)
+# Computed anew.
+
+
+def roots_chebyt(n, mu=False):
+    r"""Gauss-Chebyshev (first kind) quadrature.
+
+    Computes the sample points and weights for Gauss-Chebyshev
+    quadrature. The sample points are the roots of the nth degree
+    Chebyshev polynomial of the first kind, :math:`T_n(x)`. These
+    sample points and weights correctly integrate polynomials of
+    degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
+    with weight function :math:`w(x) = 1/\sqrt{1 - x^2}`. See 22.2.4
+    in [AS]_ for more details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+    numpy.polynomial.chebyshev.chebgauss
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError('n must be a positive integer.')
+    x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m))
+    w = np.full_like(x, pi/m)
+    if mu:
+        return x, w, pi
+    else:
+        return x, w
+
+
+def chebyt(n, monic=False):
+    r"""Chebyshev polynomial of the first kind.
+
+    Defined to be the solution of
+
+    .. math::
+        (1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0;
+
+    :math:`T_n` is a polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    T : orthopoly1d
+        Chebyshev polynomial of the first kind.
+
+    Notes
+    -----
+    The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]`
+    with weight function :math:`(1 - x^2)^{-1/2}`.
+
+    See Also
+    --------
+    chebyu : Chebyshev polynomial of the second kind.
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    Chebyshev polynomials of the first kind of order :math:`n` can
+    be obtained as the determinant of specific :math:`n \times n`
+    matrices. As an example we can check how the points obtained from
+    the determinant of the following :math:`3 \times 3` matrix
+    lay exacty on :math:`T_3`:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.linalg import det
+    >>> from scipy.special import chebyt
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-2.0, 2.0)
+    >>> ax.set_title(r'Chebyshev polynomial $T_3$')
+    >>> ax.plot(x, chebyt(3)(x), label=rf'$T_3$')
+    >>> for p in np.arange(-1.0, 1.0, 0.1):
+    ...     ax.plot(p,
+    ...             det(np.array([[p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])),
+    ...             'rx')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    They are also related to the Jacobi Polynomials
+    :math:`P_n^{(-0.5, -0.5)}` through the relation:
+
+    .. math::
+        P_n^{(-0.5, -0.5)}(x) = \frac{1}{4^n} \binom{2n}{n} T_n(x)
+
+    Let's verify it for :math:`n = 3`:
+
+    >>> from scipy.special import binom
+    >>> from scipy.special import jacobi
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> np.allclose(jacobi(3, -0.5, -0.5)(x),
+    ...             1/64 * binom(6, 3) * chebyt(3)(x))
+    True
+
+    We can plot the Chebyshev polynomials :math:`T_n` for some values
+    of :math:`n`:
+
+    >>> x = np.arange(-1.5, 1.5, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-4.0, 4.0)
+    >>> ax.set_title(r'Chebyshev polynomials $T_n$')
+    >>> for n in np.arange(2,5):
+    ...     ax.plot(x, chebyt(n)(x), label=rf'$T_n={n}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    wfunc = lambda x: 1.0 / sqrt(1 - x * x)
+    if n == 0:
+        return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic,
+                           lambda x: _ufuncs.eval_chebyt(n, x))
+    n1 = n
+    x, w, mu = roots_chebyt(n1, mu=True)
+    hn = pi / 2
+    kn = 2**(n - 1)
+    p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
+                    lambda x: _ufuncs.eval_chebyt(n, x))
+    return p
+
+# Chebyshev of the second kind
+#    U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x)
+
+
+def roots_chebyu(n, mu=False):
+    r"""Gauss-Chebyshev (second kind) quadrature.
+
+    Computes the sample points and weights for Gauss-Chebyshev
+    quadrature. The sample points are the roots of the nth degree
+    Chebyshev polynomial of the second kind, :math:`U_n(x)`. These
+    sample points and weights correctly integrate polynomials of
+    degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
+    with weight function :math:`w(x) = \sqrt{1 - x^2}`. See 22.2.5 in
+    [AS]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError('n must be a positive integer.')
+    t = np.arange(m, 0, -1) * pi / (m + 1)
+    x = np.cos(t)
+    w = pi * np.sin(t)**2 / (m + 1)
+    if mu:
+        return x, w, pi / 2
+    else:
+        return x, w
+
+
+def chebyu(n, monic=False):
+    r"""Chebyshev polynomial of the second kind.
+
+    Defined to be the solution of
+
+    .. math::
+        (1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n
+          + n(n + 2)U_n = 0;
+
+    :math:`U_n` is a polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    U : orthopoly1d
+        Chebyshev polynomial of the second kind.
+
+    Notes
+    -----
+    The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]`
+    with weight function :math:`(1 - x^2)^{1/2}`.
+
+    See Also
+    --------
+    chebyt : Chebyshev polynomial of the first kind.
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    Chebyshev polynomials of the second kind of order :math:`n` can
+    be obtained as the determinant of specific :math:`n \times n`
+    matrices. As an example we can check how the points obtained from
+    the determinant of the following :math:`3 \times 3` matrix
+    lay exacty on :math:`U_3`:
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.linalg import det
+    >>> from scipy.special import chebyu
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-2.0, 2.0)
+    >>> ax.set_title(r'Chebyshev polynomial $U_3$')
+    >>> ax.plot(x, chebyu(3)(x), label=rf'$U_3$')
+    >>> for p in np.arange(-1.0, 1.0, 0.1):
+    ...     ax.plot(p,
+    ...             det(np.array([[2*p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])),
+    ...             'rx')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    They satisfy the recurrence relation:
+
+    .. math::
+        U_{2n-1}(x) = 2 T_n(x)U_{n-1}(x)
+
+    where the :math:`T_n` are the Chebyshev polynomial of the first kind.
+    Let's verify it for :math:`n = 2`:
+
+    >>> from scipy.special import chebyt
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> np.allclose(chebyu(3)(x), 2 * chebyt(2)(x) * chebyu(1)(x))
+    True
+
+    We can plot the Chebyshev polynomials :math:`U_n` for some values
+    of :math:`n`:
+
+    >>> x = np.arange(-1.0, 1.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-1.5, 1.5)
+    >>> ax.set_title(r'Chebyshev polynomials $U_n$')
+    >>> for n in np.arange(1,5):
+    ...     ax.plot(x, chebyu(n)(x), label=rf'$U_n={n}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    base = jacobi(n, 0.5, 0.5, monic=monic)
+    if monic:
+        return base
+    factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5)
+    base._scale(factor)
+    return base
+
+# Chebyshev of the first kind        C_n(x)
+
+
+def roots_chebyc(n, mu=False):
+    r"""Gauss-Chebyshev (first kind) quadrature.
+
+    Compute the sample points and weights for Gauss-Chebyshev
+    quadrature. The sample points are the roots of the nth degree
+    Chebyshev polynomial of the first kind, :math:`C_n(x)`. These
+    sample points and weights correctly integrate polynomials of
+    degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
+    with weight function :math:`w(x) = 1 / \sqrt{1 - (x/2)^2}`. See
+    22.2.6 in [AS]_ for more details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    x, w, m = roots_chebyt(n, True)
+    x *= 2
+    w *= 2
+    m *= 2
+    if mu:
+        return x, w, m
+    else:
+        return x, w
+
+
+def chebyc(n, monic=False):
+    r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
+
+    Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the
+    nth Chebychev polynomial of the first kind.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    C : orthopoly1d
+        Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
+
+    Notes
+    -----
+    The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]`
+    with weight function :math:`1/\sqrt{1 - (x/2)^2}`.
+
+    See Also
+    --------
+    chebyt : Chebyshev polynomial of the first kind.
+
+    References
+    ----------
+    .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
+           Section 22. National Bureau of Standards, 1972.
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    if n == 0:
+        n1 = n + 1
+    else:
+        n1 = n
+    x, w = roots_chebyc(n1)
+    if n == 0:
+        x, w = [], []
+    hn = 4 * pi * ((n == 0) + 1)
+    kn = 1.0
+    p = orthopoly1d(x, w, hn, kn,
+                    wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0),
+                    limits=(-2, 2), monic=monic)
+    if not monic:
+        p._scale(2.0 / p(2))
+        p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebyc(n, x)
+    return p
+
+# Chebyshev of the second kind       S_n(x)
+
+
+def roots_chebys(n, mu=False):
+    r"""Gauss-Chebyshev (second kind) quadrature.
+
+    Compute the sample points and weights for Gauss-Chebyshev
+    quadrature. The sample points are the roots of the nth degree
+    Chebyshev polynomial of the second kind, :math:`S_n(x)`. These
+    sample points and weights correctly integrate polynomials of
+    degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
+    with weight function :math:`w(x) = \sqrt{1 - (x/2)^2}`. See 22.2.7
+    in [AS]_ for more details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    x, w, m = roots_chebyu(n, True)
+    x *= 2
+    w *= 2
+    m *= 2
+    if mu:
+        return x, w, m
+    else:
+        return x, w
+
+
+def chebys(n, monic=False):
+    r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
+
+    Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the
+    nth Chebychev polynomial of the second kind.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    S : orthopoly1d
+        Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
+
+    Notes
+    -----
+    The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]`
+    with weight function :math:`\sqrt{1 - (x/2)}^2`.
+
+    See Also
+    --------
+    chebyu : Chebyshev polynomial of the second kind
+
+    References
+    ----------
+    .. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
+           Section 22. National Bureau of Standards, 1972.
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    if n == 0:
+        n1 = n + 1
+    else:
+        n1 = n
+    x, w = roots_chebys(n1)
+    if n == 0:
+        x, w = [], []
+    hn = pi
+    kn = 1.0
+    p = orthopoly1d(x, w, hn, kn,
+                    wfunc=lambda x: sqrt(1 - x * x / 4.0),
+                    limits=(-2, 2), monic=monic)
+    if not monic:
+        factor = (n + 1.0) / p(2)
+        p._scale(factor)
+        p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebys(n, x)
+    return p
+
+# Shifted Chebyshev of the first kind     T^*_n(x)
+
+
+def roots_sh_chebyt(n, mu=False):
+    r"""Gauss-Chebyshev (first kind, shifted) quadrature.
+
+    Compute the sample points and weights for Gauss-Chebyshev
+    quadrature. The sample points are the roots of the nth degree
+    shifted Chebyshev polynomial of the first kind, :math:`T_n(x)`.
+    These sample points and weights correctly integrate polynomials of
+    degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
+    with weight function :math:`w(x) = 1/\sqrt{x - x^2}`. See 22.2.8
+    in [AS]_ for more details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    xw = roots_chebyt(n, mu)
+    return ((xw[0] + 1) / 2,) + xw[1:]
+
+
+def sh_chebyt(n, monic=False):
+    r"""Shifted Chebyshev polynomial of the first kind.
+
+    Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth
+    Chebyshev polynomial of the first kind.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    T : orthopoly1d
+        Shifted Chebyshev polynomial of the first kind.
+
+    Notes
+    -----
+    The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]`
+    with weight function :math:`(x - x^2)^{-1/2}`.
+
+    """
+    base = sh_jacobi(n, 0.0, 0.5, monic=monic)
+    if monic:
+        return base
+    if n > 0:
+        factor = 4**n / 2.0
+    else:
+        factor = 1.0
+    base._scale(factor)
+    return base
+
+
+# Shifted Chebyshev of the second kind    U^*_n(x)
+def roots_sh_chebyu(n, mu=False):
+    r"""Gauss-Chebyshev (second kind, shifted) quadrature.
+
+    Computes the sample points and weights for Gauss-Chebyshev
+    quadrature. The sample points are the roots of the nth degree
+    shifted Chebyshev polynomial of the second kind, :math:`U_n(x)`.
+    These sample points and weights correctly integrate polynomials of
+    degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
+    with weight function :math:`w(x) = \sqrt{x - x^2}`. See 22.2.9 in
+    [AS]_ for more details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    x, w, m = roots_chebyu(n, True)
+    x = (x + 1) / 2
+    m_us = _ufuncs.beta(1.5, 1.5)
+    w *= m_us / m
+    if mu:
+        return x, w, m_us
+    else:
+        return x, w
+
+
+def sh_chebyu(n, monic=False):
+    r"""Shifted Chebyshev polynomial of the second kind.
+
+    Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth
+    Chebyshev polynomial of the second kind.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    U : orthopoly1d
+        Shifted Chebyshev polynomial of the second kind.
+
+    Notes
+    -----
+    The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]`
+    with weight function :math:`(x - x^2)^{1/2}`.
+
+    """
+    base = sh_jacobi(n, 2.0, 1.5, monic=monic)
+    if monic:
+        return base
+    factor = 4**n
+    base._scale(factor)
+    return base
+
+# Legendre
+
+
+def roots_legendre(n, mu=False):
+    r"""Gauss-Legendre quadrature.
+
+    Compute the sample points and weights for Gauss-Legendre
+    quadrature [GL]_. The sample points are the roots of the nth degree
+    Legendre polynomial :math:`P_n(x)`. These sample points and
+    weights correctly integrate polynomials of degree :math:`2n - 1`
+    or less over the interval :math:`[-1, 1]` with weight function
+    :math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+    numpy.polynomial.legendre.leggauss
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+    .. [GL] Gauss-Legendre quadrature, Wikipedia,
+        https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import roots_legendre, eval_legendre
+    >>> roots, weights = roots_legendre(9)
+
+    ``roots`` holds the roots, and ``weights`` holds the weights for
+    Gauss-Legendre quadrature.
+
+    >>> roots
+    array([-0.96816024, -0.83603111, -0.61337143, -0.32425342,  0.        ,
+            0.32425342,  0.61337143,  0.83603111,  0.96816024])
+    >>> weights
+    array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936,
+           0.31234708, 0.2606107 , 0.18064816, 0.08127439])
+
+    Verify that we have the roots by evaluating the degree 9 Legendre
+    polynomial at ``roots``.  All the values are approximately zero:
+
+    >>> eval_legendre(9, roots)
+    array([-8.88178420e-16, -2.22044605e-16,  1.11022302e-16,  1.11022302e-16,
+            0.00000000e+00, -5.55111512e-17, -1.94289029e-16,  1.38777878e-16,
+           -8.32667268e-17])
+
+    Here we'll show how the above values can be used to estimate the
+    integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre
+    quadrature [GL]_.  First define the function and the integration
+    limits.
+
+    >>> def f(t):
+    ...    return t + 1/t
+    ...
+    >>> a = 1
+    >>> b = 2
+
+    We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral
+    of f from t=a to t=b.  The sample points in ``roots`` are from the
+    interval [-1, 1], so we'll rewrite the integral with the simple change
+    of variable::
+
+        x = 2/(b - a) * t - (a + b)/(b - a)
+
+    with inverse::
+
+        t = (b - a)/2 * x + (a + 2)/2
+
+    Then::
+
+        integral(f(t), a, b) =
+            (b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1)
+
+    We can approximate the latter integral with the values returned
+    by `roots_legendre`.
+
+    Map the roots computed above from [-1, 1] to [a, b].
+
+    >>> t = (b - a)/2 * roots + (a + b)/2
+
+    Approximate the integral as the weighted sum of the function values.
+
+    >>> (b - a)/2 * f(t).dot(weights)
+    2.1931471805599276
+
+    Compare that to the exact result, which is 3/2 + log(2):
+
+    >>> 1.5 + np.log(2)
+    2.1931471805599454
+
+    """
+    m = int(n)
+    if n < 1 or n != m:
+        raise ValueError("n must be a positive integer.")
+
+    mu0 = 2.0
+    an_func = lambda k: 0.0 * k
+    bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1))
+    f = _ufuncs.eval_legendre
+    df = lambda n, x: (-n*x*_ufuncs.eval_legendre(n, x)
+                       + n*_ufuncs.eval_legendre(n-1, x))/(1-x**2)
+    return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
+
+
+def legendre(n, monic=False):
+    r"""Legendre polynomial.
+
+    Defined to be the solution of
+
+    .. math::
+        \frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right]
+          + n(n + 1)P_n(x) = 0;
+
+    :math:`P_n(x)` is a polynomial of degree :math:`n`.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    P : orthopoly1d
+        Legendre polynomial.
+
+    Notes
+    -----
+    The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]`
+    with weight function 1.
+
+    Examples
+    --------
+    Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0):
+
+    >>> from scipy.special import legendre
+    >>> legendre(3)
+    poly1d([ 2.5,  0. , -1.5,  0. ])
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    if n == 0:
+        n1 = n + 1
+    else:
+        n1 = n
+    x, w = roots_legendre(n1)
+    if n == 0:
+        x, w = [], []
+    hn = 2.0 / (2 * n + 1)
+    kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n
+    p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1),
+                    monic=monic,
+                    eval_func=lambda x: _ufuncs.eval_legendre(n, x))
+    return p
+
+# Shifted Legendre              P^*_n(x)
+
+
+def roots_sh_legendre(n, mu=False):
+    r"""Gauss-Legendre (shifted) quadrature.
+
+    Compute the sample points and weights for Gauss-Legendre
+    quadrature. The sample points are the roots of the nth degree
+    shifted Legendre polynomial :math:`P^*_n(x)`. These sample points
+    and weights correctly integrate polynomials of degree :math:`2n -
+    1` or less over the interval :math:`[0, 1]` with weight function
+    :math:`w(x) = 1.0`. See 2.2.11 in [AS]_ for details.
+
+    Parameters
+    ----------
+    n : int
+        quadrature order
+    mu : bool, optional
+        If True, return the sum of the weights, optional.
+
+    Returns
+    -------
+    x : ndarray
+        Sample points
+    w : ndarray
+        Weights
+    mu : float
+        Sum of the weights
+
+    See Also
+    --------
+    scipy.integrate.quadrature
+    scipy.integrate.fixed_quad
+
+    References
+    ----------
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    """
+    x, w = roots_legendre(n)
+    x = (x + 1) / 2
+    w /= 2
+    if mu:
+        return x, w, 1.0
+    else:
+        return x, w
+
+
+def sh_legendre(n, monic=False):
+    r"""Shifted Legendre polynomial.
+
+    Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth
+    Legendre polynomial.
+
+    Parameters
+    ----------
+    n : int
+        Degree of the polynomial.
+    monic : bool, optional
+        If `True`, scale the leading coefficient to be 1. Default is
+        `False`.
+
+    Returns
+    -------
+    P : orthopoly1d
+        Shifted Legendre polynomial.
+
+    Notes
+    -----
+    The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]`
+    with weight function 1.
+
+    """
+    if n < 0:
+        raise ValueError("n must be nonnegative.")
+
+    wfunc = lambda x: 0.0 * x + 1.0
+    if n == 0:
+        return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic,
+                           lambda x: _ufuncs.eval_sh_legendre(n, x))
+    x, w = roots_sh_legendre(n)
+    hn = 1.0 / (2 * n + 1.0)
+    kn = _gam(2 * n + 1) / _gam(n + 1)**2
+    p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic,
+                    eval_func=lambda x: _ufuncs.eval_sh_legendre(n, x))
+    return p
+
+
+# Make the old root function names an alias for the new ones
+_modattrs = globals()
+for newfun, oldfun in _rootfuns_map.items():
+    _modattrs[oldfun] = _modattrs[newfun]
+    __all__.append(oldfun)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.pyi b/__packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.pyi
new file mode 100644
index 00000000..75f6de52
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_orthogonal.pyi
@@ -0,0 +1,341 @@
+from typing import (
+    Any,
+    Callable,
+    List,
+    Literal,
+    Optional,
+    overload,
+    Tuple,
+    Union,
+)
+
+import numpy
+
+_IntegerType = Union[int, numpy.integer]
+_FloatingType = Union[float, numpy.floating]
+_PointsAndWeights = Tuple[numpy.ndarray, numpy.ndarray]
+_PointsAndWeightsAndMu = Tuple[numpy.ndarray, numpy.ndarray, float]
+
+_ArrayLike0D = Union[
+    bool,
+    int,
+    float,
+    complex,
+    str,
+    bytes,
+    numpy.generic,
+]
+
+__all__ = [
+    'legendre',
+    'chebyt',
+    'chebyu',
+    'chebyc',
+    'chebys',
+    'jacobi',
+    'laguerre',
+    'genlaguerre',
+    'hermite',
+    'hermitenorm',
+    'gegenbauer',
+    'sh_legendre',
+    'sh_chebyt',
+    'sh_chebyu',
+    'sh_jacobi',
+    'roots_legendre',
+    'roots_chebyt',
+    'roots_chebyu',
+    'roots_chebyc',
+    'roots_chebys',
+    'roots_jacobi',
+    'roots_laguerre',
+    'roots_genlaguerre',
+    'roots_hermite',
+    'roots_hermitenorm',
+    'roots_gegenbauer',
+    'roots_sh_legendre',
+    'roots_sh_chebyt',
+    'roots_sh_chebyu',
+    'roots_sh_jacobi',
+]
+
+@overload
+def roots_jacobi(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        beta: _FloatingType,
+) -> _PointsAndWeights: ...
+@overload
+def roots_jacobi(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        beta: _FloatingType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_jacobi(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        beta: _FloatingType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_sh_jacobi(
+        n: _IntegerType,
+        p1: _FloatingType,
+        q1: _FloatingType,
+) -> _PointsAndWeights: ...
+@overload
+def roots_sh_jacobi(
+        n: _IntegerType,
+        p1: _FloatingType,
+        q1: _FloatingType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_sh_jacobi(
+        n: _IntegerType,
+        p1: _FloatingType,
+        q1: _FloatingType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_genlaguerre(
+        n: _IntegerType,
+        alpha: _FloatingType,
+) -> _PointsAndWeights: ...
+@overload
+def roots_genlaguerre(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_genlaguerre(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_laguerre(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_laguerre(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_laguerre(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_hermite(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_hermite(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_hermite(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_hermitenorm(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_hermitenorm(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_hermitenorm(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_gegenbauer(
+        n: _IntegerType,
+        alpha: _FloatingType,
+) -> _PointsAndWeights: ...
+@overload
+def roots_gegenbauer(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_gegenbauer(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_chebyt(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_chebyt(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_chebyt(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_chebyu(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_chebyu(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_chebyu(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_chebyc(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_chebyc(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_chebyc(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_chebys(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_chebys(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_chebys(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_sh_chebyt(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_sh_chebyt(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_sh_chebyt(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_sh_chebyu(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_sh_chebyu(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_sh_chebyu(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_legendre(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_legendre(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_legendre(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+@overload
+def roots_sh_legendre(n: _IntegerType) -> _PointsAndWeights: ...
+@overload
+def roots_sh_legendre(
+        n: _IntegerType,
+        mu: Literal[False],
+) -> _PointsAndWeights: ...
+@overload
+def roots_sh_legendre(
+        n: _IntegerType,
+        mu: Literal[True],
+) -> _PointsAndWeightsAndMu: ...
+
+class orthopoly1d(numpy.poly1d):
+    def __init__(
+            self,
+            roots: numpy.typing.ArrayLike,
+            weights: Optional[numpy.typing.ArrayLike],
+            hn: float = ...,
+            kn: float = ...,
+            wfunc = Optional[Callable[[float], float]],
+            limits = Optional[Tuple[float, float]],
+            monic: bool = ...,
+            eval_func: numpy.ufunc = ...,
+    ) -> None: ...
+    @property
+    def limits(self) -> Tuple[float, float]: ...
+    def weight_func(self, x: float) -> float: ...
+    @overload
+    def __call__(self, x: _ArrayLike0D) -> Any: ...
+    @overload
+    def __call__(self, x: numpy.poly1d) -> numpy.poly1d: ...  # type: ignore[misc]
+    @overload
+    def __call__(self, x: numpy.typing.ArrayLike) -> numpy.ndarray: ...
+
+def legendre(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def chebyt(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def chebyu(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def chebyc(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def chebys(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def jacobi(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        beta: _FloatingType,
+        monic: bool = ...,
+) -> orthopoly1d: ...
+def laguerre(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def genlaguerre(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        monic: bool = ...,
+) -> orthopoly1d: ...
+def hermite(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def hermitenorm(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def gegenbauer(
+        n: _IntegerType,
+        alpha: _FloatingType,
+        monic: bool = ...,
+) -> orthopoly1d: ...
+def sh_legendre(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def sh_chebyt(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def sh_chebyu(n: _IntegerType, monic: bool = ...) -> orthopoly1d: ...
+def sh_jacobi(
+        n: _IntegerType,
+        p: _FloatingType,
+        q: _FloatingType,
+        monic: bool = ...,
+) -> orthopoly1d: ...
+
+# These functions are not public, but still need stubs because they
+# get checked in the tests.
+def _roots_hermite_asy(n: _IntegerType) -> _PointsAndWeights: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/cosine_cdf.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/cosine_cdf.py
new file mode 100644
index 00000000..705138fc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/cosine_cdf.py
@@ -0,0 +1,18 @@
+
+import mpmath
+
+
+def f(x):
+    return (mpmath.pi + x + mpmath.sin(x)) / (2*mpmath.pi)
+
+
+# Note: 40 digits might be overkill; a few more digits than the default
+# might be sufficient.
+mpmath.mp.dps = 40
+ts = mpmath.taylor(f, -mpmath.pi, 20)
+p, q = mpmath.pade(ts, 9, 10)
+
+p = [float(c) for c in p]
+q = [float(c) for c in q]
+print('p =', p)
+print('q =', q)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/expn_asy.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/expn_asy.py
new file mode 100644
index 00000000..f3cace9d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/expn_asy.py
@@ -0,0 +1,54 @@
+"""Precompute the polynomials for the asymptotic expansion of the
+generalized exponential integral.
+
+Sources
+-------
+[1] NIST, Digital Library of Mathematical Functions,
+    https://dlmf.nist.gov/8.20#ii
+
+"""
+import os
+
+try:
+    import sympy
+    from sympy import Poly
+    x = sympy.symbols('x')
+except ImportError:
+    pass
+
+
+def generate_A(K):
+    A = [Poly(1, x)]
+    for k in range(K):
+        A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
+    return A
+
+
+WARNING = """\
+/* This file was automatically generated by _precompute/expn_asy.py.
+ * Do not edit it manually!
+ */
+"""
+
+
+def main():
+    print(__doc__)
+    fn = os.path.join('..', 'cephes', 'expn.h')
+
+    K = 12
+    A = generate_A(K)
+    with open(fn + '.new', 'w') as f:
+        f.write(WARNING)
+        f.write("#define nA {}\n".format(len(A)))
+        for k, Ak in enumerate(A):
+            tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
+            f.write("static const double A{}[] = {{{}}};\n".format(k, tmp))
+        tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
+        f.write("static const double *A[] = {{{}}};\n".format(tmp))
+        tmp = ", ".join([str(Ak.degree()) for Ak in A])
+        f.write("static const int Adegs[] = {{{}}};\n".format(tmp))
+    os.rename(fn + '.new', fn)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_asy.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_asy.py
new file mode 100644
index 00000000..98035457
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_asy.py
@@ -0,0 +1,116 @@
+"""
+Precompute coefficients of Temme's asymptotic expansion for gammainc.
+
+This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram.
+
+Sources:
+[1] NIST, "Digital Library of Mathematical Functions",
+    https://dlmf.nist.gov/
+
+"""
+import os
+from scipy.special._precompute.utils import lagrange_inversion
+
+try:
+    import mpmath as mp
+except ImportError:
+    pass
+
+
+def compute_a(n):
+    """a_k from DLMF 5.11.6"""
+    a = [mp.sqrt(2)/2]
+    for k in range(1, n):
+        ak = a[-1]/k
+        for j in range(1, len(a)):
+            ak -= a[j]*a[-j]/(j + 1)
+        ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
+        a.append(ak)
+    return a
+
+
+def compute_g(n):
+    """g_k from DLMF 5.11.3/5.11.5"""
+    a = compute_a(2*n)
+    g = [mp.sqrt(2)*mp.rf(0.5, k)*a[2*k] for k in range(n)]
+    return g
+
+
+def eta(lam):
+    """Function from DLMF 8.12.1 shifted to be centered at 0."""
+    if lam > 0:
+        return mp.sqrt(2*(lam - mp.log(lam + 1)))
+    elif lam < 0:
+        return -mp.sqrt(2*(lam - mp.log(lam + 1)))
+    else:
+        return 0
+
+
+def compute_alpha(n):
+    """alpha_n from DLMF 8.12.13"""
+    coeffs = mp.taylor(eta, 0, n - 1)
+    return lagrange_inversion(coeffs)
+
+
+def compute_d(K, N):
+    """d_{k, n} from DLMF 8.12.12"""
+    M = N + 2*K
+    d0 = [-mp.mpf(1)/3]
+    alpha = compute_alpha(M + 2)
+    for n in range(1, M):
+        d0.append((n + 2)*alpha[n+2])
+    d = [d0]
+    g = compute_g(K)
+    for k in range(1, K):
+        dk = []
+        for n in range(M - 2*k):
+            dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
+        d.append(dk)
+    for k in range(K):
+        d[k] = d[k][:N]
+    return d
+
+
+header = \
+r"""/* This file was automatically generated by _precomp/gammainc.py.
+ * Do not edit it manually!
+ */
+
+#ifndef IGAM_H
+#define IGAM_H
+
+#define K {}
+#define N {}
+
+static const double d[K][N] =
+{{"""
+
+footer = \
+r"""
+#endif
+"""
+
+
+def main():
+    print(__doc__)
+    K = 25
+    N = 25
+    with mp.workdps(50):
+        d = compute_d(K, N)
+    fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
+    with open(fn + '.new', 'w') as f:
+        f.write(header.format(K, N))
+        for k, row in enumerate(d):
+            row = [mp.nstr(x, 17, min_fixed=0, max_fixed=0) for x in row]
+            f.write('{')
+            f.write(", ".join(row))
+            if k < K - 1:
+                f.write('},\n')
+            else:
+                f.write('}};\n')
+        f.write(footer)
+    os.rename(fn + '.new', fn)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_data.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_data.py
new file mode 100644
index 00000000..b3f23cf4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/gammainc_data.py
@@ -0,0 +1,124 @@
+"""Compute gammainc and gammaincc for large arguments and parameters
+and save the values to data files for use in tests. We can't just
+compare to mpmath's gammainc in test_mpmath.TestSystematic because it
+would take too long.
+
+Note that mpmath's gammainc is computed using hypercomb, but since it
+doesn't allow the user to increase the maximum number of terms used in
+the series it doesn't converge for many arguments. To get around this
+we copy the mpmath implementation but use more terms.
+
+This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB
+ram.
+
+Sources:
+[1] Fredrik Johansson and others. mpmath: a Python library for
+    arbitrary-precision floating-point arithmetic (version 0.19),
+    December 2013. http://mpmath.org/.
+
+"""
+import os
+from time import time
+import numpy as np
+from numpy import pi
+
+from scipy.special._mptestutils import mpf2float
+
+try:
+    import mpmath as mp
+except ImportError:
+    pass
+
+
+def gammainc(a, x, dps=50, maxterms=10**8):
+    """Compute gammainc exactly like mpmath does but allow for more
+    summands in hypercomb. See
+
+    mpmath/functions/expintegrals.py#L134
+
+    in the mpmath github repository.
+
+    """
+    with mp.workdps(dps):
+        z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x)
+        G = [z]
+        negb = mp.fneg(b, exact=True)
+
+        def h(z):
+            T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
+            return (T1,)
+
+        res = mp.hypercomb(h, [z], maxterms=maxterms)
+        return mpf2float(res)
+
+
+def gammaincc(a, x, dps=50, maxterms=10**8):
+    """Compute gammaincc exactly like mpmath does but allow for more
+    terms in hypercomb. See
+
+    mpmath/functions/expintegrals.py#L187
+
+    in the mpmath github repository.
+
+    """
+    with mp.workdps(dps):
+        z, a = a, x
+
+        if mp.isint(z):
+            try:
+                # mpmath has a fast integer path
+                return mpf2float(mp.gammainc(z, a=a, regularized=True))
+            except mp.libmp.NoConvergence:
+                pass
+        nega = mp.fneg(a, exact=True)
+        G = [z]
+        # Use 2F0 series when possible; fall back to lower gamma representation
+        try:
+            def h(z):
+                r = z-1
+                return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
+            return mpf2float(mp.hypercomb(h, [z], force_series=True))
+        except mp.libmp.NoConvergence:
+            def h(z):
+                T1 = [], [1, z-1], [z], G, [], [], 0
+                T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
+                return T1, T2
+            return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms))
+
+
+def main():
+    t0 = time()
+    # It would be nice to have data for larger values, but either this
+    # requires prohibitively large precision (dps > 800) or mpmath has
+    # a bug. For example, gammainc(1e20, 1e20, dps=800) returns a
+    # value around 0.03, while the true value should be close to 0.5
+    # (DLMF 8.12.15).
+    print(__doc__)
+    pwd = os.path.dirname(__file__)
+    r = np.logspace(4, 14, 30)
+    ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30)
+    utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30)
+
+    regimes = [(gammainc, ltheta), (gammaincc, utheta)]
+    for func, theta in regimes:
+        rg, thetag = np.meshgrid(r, theta)
+        a, x = rg*np.cos(thetag), rg*np.sin(thetag)
+        a, x = a.flatten(), x.flatten()
+        dataset = []
+        for i, (a0, x0) in enumerate(zip(a, x)):
+            if func == gammaincc:
+                # Exploit the fast integer path in gammaincc whenever
+                # possible so that the computation doesn't take too
+                # long
+                a0, x0 = np.floor(a0), np.floor(x0)
+            dataset.append((a0, x0, func(a0, x0)))
+        dataset = np.array(dataset)
+        filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
+                                '{}.txt'.format(func.__name__))
+        np.savetxt(filename, dataset)
+
+    print("{} minutes elapsed".format((time() - t0)/60))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/lambertw.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/lambertw.py
new file mode 100644
index 00000000..2f62fa79
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/lambertw.py
@@ -0,0 +1,68 @@
+"""Compute a Pade approximation for the principal branch of the
+Lambert W function around 0 and compare it to various other
+approximations.
+
+"""
+import numpy as np
+
+try:
+    import mpmath
+    import matplotlib.pyplot as plt
+except ImportError:
+    pass
+
+
+def lambertw_pade():
+    derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
+    p, q = mpmath.pade(derivs, 3, 2)
+    return p, q
+
+
+def main():
+    print(__doc__)
+    with mpmath.workdps(50):
+        p, q = lambertw_pade()
+        p, q = p[::-1], q[::-1]
+        print("p = {}".format(p))
+        print("q = {}".format(q))
+
+    x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
+    x, y = np.meshgrid(x, y)
+    z = x + 1j*y
+    lambertw_std = []
+    for z0 in z.flatten():
+        lambertw_std.append(complex(mpmath.lambertw(z0)))
+    lambertw_std = np.array(lambertw_std).reshape(x.shape)
+
+    fig, axes = plt.subplots(nrows=3, ncols=1)
+    # Compare Pade approximation to true result
+    p = np.array([float(p0) for p0 in p])
+    q = np.array([float(q0) for q0 in q])
+    pade_approx = np.polyval(p, z)/np.polyval(q, z)
+    pade_err = abs(pade_approx - lambertw_std)
+    axes[0].pcolormesh(x, y, pade_err)
+    # Compare two terms of asymptotic series to true result
+    asy_approx = np.log(z) - np.log(np.log(z))
+    asy_err = abs(asy_approx - lambertw_std)
+    axes[1].pcolormesh(x, y, asy_err)
+    # Compare two terms of the series around the branch point to the
+    # true result
+    p = np.sqrt(2*(np.exp(1)*z + 1))
+    series_approx = -1 + p - p**2/3
+    series_err = abs(series_approx - lambertw_std)
+    im = axes[2].pcolormesh(x, y, series_err)
+
+    fig.colorbar(im, ax=axes.ravel().tolist())
+    plt.show()
+
+    fig, ax = plt.subplots(nrows=1, ncols=1)
+    pade_better = pade_err < asy_err
+    im = ax.pcolormesh(x, y, pade_better)
+    t = np.linspace(-0.3, 0.3)
+    ax.plot(-2.5*abs(t) - 0.2, t, 'r')
+    fig.colorbar(im, ax=ax)
+    plt.show()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/loggamma.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/loggamma.py
new file mode 100644
index 00000000..74051ac7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/loggamma.py
@@ -0,0 +1,43 @@
+"""Precompute series coefficients for log-Gamma."""
+
+try:
+    import mpmath
+except ImportError:
+    pass
+
+
+def stirling_series(N):
+    with mpmath.workdps(100):
+        coeffs = [mpmath.bernoulli(2*n)/(2*n*(2*n - 1))
+                  for n in range(1, N + 1)]
+    return coeffs
+
+
+def taylor_series_at_1(N):
+    coeffs = []
+    with mpmath.workdps(100):
+        coeffs.append(-mpmath.euler)
+        for n in range(2, N + 1):
+            coeffs.append((-1)**n*mpmath.zeta(n)/n)
+    return coeffs
+
+
+def main():
+    print(__doc__)
+    print()
+    stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
+                       for x in stirling_series(8)[::-1]]
+    taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
+                     for x in taylor_series_at_1(23)[::-1]]
+    print("Stirling series coefficients")
+    print("----------------------------")
+    print("\n".join(stirling_coeffs))
+    print()
+    print("Taylor series coefficients")
+    print("--------------------------")
+    print("\n".join(taylor_coeffs))
+    print()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/struve_convergence.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/struve_convergence.py
new file mode 100644
index 00000000..26c2ff75
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/struve_convergence.py
@@ -0,0 +1,120 @@
+"""
+Convergence regions of the expansions used in ``struve.c``
+
+Note that for v >> z both functions tend rapidly to 0,
+and for v << -z, they tend to infinity.
+
+The floating-point functions over/underflow in the lower left and right
+corners of the figure.
+
+
+Figure legend
+=============
+
+Red region
+    Power series is close (1e-12) to the mpmath result
+
+Blue region
+    Asymptotic series is close to the mpmath result
+
+Green region
+    Bessel series is close to the mpmath result
+
+Dotted colored lines
+    Boundaries of the regions
+
+Solid colored lines
+    Boundaries estimated by the routine itself. These will be used
+    for determining which of the results to use.
+
+Black dashed line
+    The line z = 0.7*|v| + 12
+
+"""
+import numpy as np
+import matplotlib.pyplot as plt
+
+import mpmath
+
+
+def err_metric(a, b, atol=1e-290):
+    m = abs(a - b) / (atol + abs(b))
+    m[np.isinf(b) & (a == b)] = 0
+    return m
+
+
+def do_plot(is_h=True):
+    from scipy.special._ufuncs import (_struve_power_series,
+                                       _struve_asymp_large_z,
+                                       _struve_bessel_series)
+
+    vs = np.linspace(-1000, 1000, 91)
+    zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
+
+    rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
+    ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
+    rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
+
+    mpmath.mp.dps = 50
+    if is_h:
+        sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
+    else:
+        sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
+    ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
+
+    err_a = err_metric(ra[0], ex) + 1e-300
+    err_p = err_metric(rp[0], ex) + 1e-300
+    err_b = err_metric(rb[0], ex) + 1e-300
+
+    err_est_a = abs(ra[1]/ra[0])
+    err_est_p = abs(rp[1]/rp[0])
+    err_est_b = abs(rb[1]/rb[0])
+
+    z_cutoff = 0.7*abs(vs) + 12
+
+    levels = [-1000, -12]
+
+    plt.cla()
+
+    plt.hold(1)
+    plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
+    plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
+    plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
+
+    plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
+    plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
+    plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
+
+    lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
+    la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
+    lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
+
+    plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
+    plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
+    plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
+
+    plt.plot(vs, z_cutoff, 'k--')
+
+    plt.xlim(vs.min(), vs.max())
+    plt.ylim(zs.min(), zs.max())
+
+    plt.xlabel('v')
+    plt.ylabel('z')
+
+
+def main():
+    plt.clf()
+    plt.subplot(121)
+    do_plot(True)
+    plt.title('Struve H')
+
+    plt.subplot(122)
+    do_plot(False)
+    plt.title('Struve L')
+
+    plt.savefig('struve_convergence.png')
+    plt.show()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/utils.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/utils.py
new file mode 100644
index 00000000..55cf4083
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/utils.py
@@ -0,0 +1,38 @@
+try:
+    import mpmath as mp
+except ImportError:
+    pass
+
+try:
+    from sympy.abc import x
+except ImportError:
+    pass
+
+
+def lagrange_inversion(a):
+    """Given a series
+
+    f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
+
+    use the Lagrange inversion formula to compute a series
+
+    g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
+
+    so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
+    necessarily b[0] = 0 too.
+
+    The algorithm is naive and could be improved, but speed isn't an
+    issue here and it's easy to read.
+
+    """
+    n = len(a)
+    f = sum(a[i]*x**i for i in range(n))
+    h = (x/f).series(x, 0, n).removeO()
+    hpower = [h**0]
+    for k in range(n):
+        hpower.append((hpower[-1]*h).expand())
+    b = [mp.mpf(0)]
+    for k in range(1, n):
+        b.append(hpower[k].coeff(x, k - 1)/k)
+    b = [mp.mpf(x) for x in b]
+    return b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel.py
new file mode 100644
index 00000000..c3d82a82
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel.py
@@ -0,0 +1,342 @@
+"""Precompute coefficients of several series expansions
+of Wright's generalized Bessel function Phi(a, b, x).
+
+See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x.
+"""
+from argparse import ArgumentParser, RawTextHelpFormatter
+import numpy as np
+from scipy.integrate import quad
+from scipy.optimize import minimize_scalar, curve_fit
+from time import time
+
+try:
+    import sympy
+    from sympy import EulerGamma, Rational, S, Sum, \
+        factorial, gamma, gammasimp, pi, polygamma, symbols, zeta
+    from sympy.polys.polyfuncs import horner
+except ImportError:
+    pass
+
+
+def series_small_a():
+    """Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.
+    """
+    order = 5
+    a, b, x, k = symbols("a b x k")
+    A = []  # terms with a
+    X = []  # terms with x
+    B = []  # terms with b (polygammas)
+    # Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i])
+    expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
+    expression = gamma(b)/sympy.exp(x) * expression
+
+    # nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
+    for n in range(0, order+1):
+        term = expression.diff(a, n).subs(a, 0).simplify().doit()
+        # set the whole bracket involving polygammas to 1
+        x_part = (term.subs(polygamma(0, b), 1)
+                  .replace(polygamma, lambda *args: 0))
+        # sign convetion: x part always positive
+        x_part *= (-1)**n
+
+        A.append(a**n/factorial(n))
+        X.append(horner(x_part))
+        B.append(horner((term/x_part).simplify()))
+
+    s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n"
+    s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n"
+    for name, c in zip(['A', 'X', 'B'], [A, X, B]):
+        for i in range(len(c)):
+            s += f"\n{name}[{i}] = " + str(c[i])
+    return s
+
+
+# expansion of digamma
+def dg_series(z, n):
+    """Symbolic expansion of digamma(z) in z=0 to order n.
+
+    See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2
+    """
+    k = symbols("k")
+    return -1/z - EulerGamma + \
+        sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1))
+
+
+def pg_series(k, z, n):
+    """Symbolic expansion of polygamma(k, z) in z=0 to order n."""
+    return sympy.diff(dg_series(z, n+k), z, k)
+
+
+def series_small_a_small_b():
+    """Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5.
+
+    Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and
+    polygamma functions.
+
+    digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2)
+    digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2)
+    polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2)
+    and so on.
+    """
+    order = 5
+    a, b, x, k = symbols("a b x k")
+    M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3")
+    c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3}
+    A = []  # terms with a
+    X = []  # terms with x
+    B = []  # terms with b (polygammas expanded)
+    C = []  # terms that generate B
+    # Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i])
+    # B[0] = 1
+    # B[k] = sum(C[k] * b**k/k!, k=0..)
+    # Note: C[k] can be obtained from a series expansion of 1/gamma(b).
+    expression = gamma(b)/sympy.exp(x) * \
+        Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
+
+    # nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
+    for n in range(0, order+1):
+        term = expression.diff(a, n).subs(a, 0).simplify().doit()
+        # set the whole bracket involving polygammas to 1
+        x_part = (term.subs(polygamma(0, b), 1)
+                  .replace(polygamma, lambda *args: 0))
+        # sign convetion: x part always positive
+        x_part *= (-1)**n
+        # expansion of polygamma part with 1/gamma(b)
+        pg_part = term/x_part/gamma(b)
+        if n >= 1:
+            # Note: highest term is digamma^n
+            pg_part = pg_part.replace(polygamma,
+                                      lambda k, x: pg_series(k, x, order+1+n))
+            pg_part = (pg_part.series(b, 0, n=order+1-n)
+                       .removeO()
+                       .subs(polygamma(2, 1), -2*zeta(3))
+                       .simplify()
+                       )
+
+        A.append(a**n/factorial(n))
+        X.append(horner(x_part))
+        B.append(pg_part)
+
+    # Calculate C and put in the k!
+    C = sympy.Poly(B[1].subs(c_subs), b).coeffs()
+    C.reverse()
+    for i in range(len(C)):
+        C[i] = (C[i] * factorial(i)).simplify()
+
+    s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5."
+    s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n"
+    s += "B[0] = 1\n"
+    s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n"
+    s += "\nM_PI = pi"
+    s += "\nM_EG = EulerGamma"
+    s += "\nM_Z3 = zeta(3)"
+    for name, c in zip(['A', 'X'], [A, X]):
+        for i in range(len(c)):
+            s += f"\n{name}[{i}] = "
+            s += str(c[i])
+    # For C, do also compute the values numerically
+    for i in range(len(C)):
+        s += f"\n# C[{i}] = "
+        s += str(C[i])
+        s += f"\nC[{i}] = "
+        s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)})
+                 .evalf(17))
+
+    # Does B have the assumed structure?
+    s += "\n\nTest if B[i] does have the assumed structure."
+    s += "\nC[i] are derived from B[1] allone."
+    s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .."
+    test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)])
+    test = (test - B[2].subs(c_subs)).simplify()
+    s += f"\ntest successful = {test==S(0)}"
+    s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .."
+    test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)])
+    test = (test - B[3].subs(c_subs)).simplify()
+    s += f"\ntest successful = {test==S(0)}"
+    return s
+
+
+def asymptotic_series():
+    """Asymptotic expansion for large x.
+
+    Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
+    Z = (a*x)^(1/(1+a))
+
+    Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and
+    a_1). With slightly different notation, Paris (2017) lists coefficients
+    c_k up to order k=3.
+    Paris (2017) uses ZP = (1+a)/a * Z  (ZP = Z of Paris) and
+    C_k = C_0 * (-a/(1+a))^k * c_k
+    """
+    order = 8
+
+    class g(sympy.Function):
+        """Helper function g according to Wright (1935)
+
+        g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...)
+
+        Note: Wright (1935) uses square root of above definition.
+        """
+        nargs = 3
+
+        @classmethod
+        def eval(cls, n, rho, v):
+            if not n >= 0:
+                raise ValueError("must have n >= 0")
+            elif n == 0:
+                return 1
+            else:
+                return g(n-1, rho, v) \
+                    + gammasimp(gamma(rho+2+n)/gamma(rho+2)) \
+                    / gammasimp(gamma(3+n)/gamma(3))*v**n
+
+    class coef_C(sympy.Function):
+        """Calculate coefficients C_m for integer m.
+
+        C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of
+        Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b)
+            * g(rho, v)^(-m-1/2)
+        """
+        nargs = 3
+
+        @classmethod
+        def eval(cls, m, rho, beta):
+            if not m >= 0:
+                raise ValueError("must have m >= 0")
+
+            v = symbols("v")
+            expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2))
+            res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m)
+            res = res * (gamma(m + Rational(1, 2)) / (2*pi)
+                         * (2/(rho+1))**(m + Rational(1, 2)))
+            return res
+
+    # in order to have nice ordering/sorting of expressions, we set a = xa.
+    xa, b, xap1 = symbols("xa b xap1")
+    C0 = coef_C(0, xa, b)
+    # a1 = a(1, rho, beta)
+    s = "Asymptotic expansion for large x\n"
+    s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n"
+    s += "               * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n"
+    s += "Z      = pow(a * x, 1/(1+a))\n"
+    s += "A[k]   = pow(a, k)\n"
+    s += "B[k]   = pow(b, k)\n"
+    s += "Ap1[k] = pow(1+a, k)\n\n"
+    s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n"
+    for i in range(1, order+1):
+        expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify()
+        factor = [x.denominator() for x in sympy.Poly(expr).coeffs()]
+        factor = sympy.lcm(factor)
+        expr = (expr * factor).simplify().collect(b, sympy.factor)
+        expr = expr.xreplace({xa+1: xap1})
+        s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n"
+        s += f"C[{i}] *= {str(expr)}\n\n"
+    import re
+    re_a = re.compile(r'xa\*\*(\d+)')
+    s = re_a.sub(r'A[\1]', s)
+    re_b = re.compile(r'b\*\*(\d+)')
+    s = re_b.sub(r'B[\1]', s)
+    s = s.replace('xap1', 'Ap1[1]')
+    s = s.replace('xa', 'a')
+    # max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10
+    # or more digits.
+    re_digits = re.compile(r'(\d{10,})')
+    s = re_digits.sub(r'\1.', s)
+    return s
+
+
+def optimal_epsilon_integral():
+    """Fit optimal choice of epsilon for integral representation.
+
+    The integrand of
+        int_0^pi P(eps, a, b, x, phi) * dphi
+    can exhibit oscillatory behaviour. It stems from the cosine of P and can be
+    minimized by minimizing the arc length of the argument
+        f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi
+    of cos(f(phi)).
+    We minimize the arc length in eps for a grid of values (a, b, x) and fit a
+    parametric function to it.
+    """
+    def fp(eps, a, b, x, phi):
+        """Derivative of f w.r.t. phi."""
+        eps_a = np.power(1. * eps, -a)
+        return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b
+
+    def arclength(eps, a, b, x, epsrel=1e-2, limit=100):
+        """Compute Arc length of f.
+
+        Note that the arg length of a function f fro t0 to t1 is given by
+            int_t0^t1 sqrt(1 + f'(t)^2) dt
+        """
+        return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2),
+                    0, np.pi,
+                    epsrel=epsrel, limit=100)[0]
+
+    # grid of minimal arc length values
+    data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
+    data_b = [0, 1, 4, 7, 10]
+    data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4]
+    data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x)
+    data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(),
+                              data_x.flatten())
+    best_eps = []
+    for i in range(data_x.size):
+        best_eps.append(
+            minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i],
+                                                  data_x[i]),
+                            bounds=(1e-3, 1000),
+                            method='Bounded', options={'xatol': 1e-3}).x
+        )
+    best_eps = np.array(best_eps)
+    # pandas would be nice, but here a dictionary is enough
+    df = {'a': data_a,
+          'b': data_b,
+          'x': data_x,
+          'eps': best_eps,
+          }
+
+    def func(data, A0, A1, A2, A3, A4, A5):
+        """Compute parametric function to fit."""
+        a = data['a']
+        b = data['b']
+        x = data['x']
+        return (A0 * b * np.exp(-0.5 * a)
+                + np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a)
+                         + A4 / (1 + np.exp(A5 * a))))
+
+    func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
+
+    s = "Fit optimal eps for integrand P via minimal arc length\n"
+    s += "with parametric function:\n"
+    s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n"
+    s += "              - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n"
+    s += "Fitted parameters A0 to A5 are:\n"
+    s += ', '.join(['{:.5g}'.format(x) for x in func_params])
+    return s
+
+
+def main():
+    t0 = time()
+    parser = ArgumentParser(description=__doc__,
+                            formatter_class=RawTextHelpFormatter)
+    parser.add_argument('action', type=int, choices=[1, 2, 3, 4],
+                        help='chose what expansion to precompute\n'
+                             '1 : Series for small a\n'
+                             '2 : Series for small a and small b\n'
+                             '3 : Asymptotic series for large x\n'
+                             '    This may take some time (>4h).\n'
+                             '4 : Fit optimal eps for integral representation.'
+                        )
+    args = parser.parse_args()
+
+    switch = {1: lambda: print(series_small_a()),
+              2: lambda: print(series_small_a_small_b()),
+              3: lambda: print(asymptotic_series()),
+              4: lambda: print(optimal_epsilon_integral())
+              }
+    switch.get(args.action, lambda: print("Invalid input."))()
+    print("\n{:.1f} minutes elapsed.\n".format((time() - t0)/60))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel_data.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel_data.py
new file mode 100644
index 00000000..434874a6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wright_bessel_data.py
@@ -0,0 +1,152 @@
+"""Compute a grid of values for Wright's generalized Bessel function
+and save the values to data files for use in tests. Using mpmath directly in
+tests would take too long.
+
+This takes about 10 minutes to run on a 2.7 GHz i7 Macbook Pro.
+"""
+from functools import lru_cache
+import os
+from time import time
+
+import numpy as np
+from scipy.special._mptestutils import mpf2float
+
+try:
+    import mpmath as mp
+except ImportError:
+    pass
+
+# exp_inf: smallest value x for which exp(x) == inf
+exp_inf = 709.78271289338403
+
+
+# 64 Byte per value
+@lru_cache(maxsize=100_000)
+def rgamma_cached(x, dps):
+    with mp.workdps(dps):
+        return mp.rgamma(x)
+
+
+def mp_wright_bessel(a, b, x, dps=50, maxterms=2000):
+    """Compute Wright's generalized Bessel function as Series with mpmath.
+    """
+    with mp.workdps(dps):
+        a, b, x = mp.mpf(a), mp.mpf(b), mp.mpf(x)
+        res = mp.nsum(lambda k: x**k / mp.fac(k)
+                      * rgamma_cached(a * k + b, dps=dps),
+                      [0, mp.inf],
+                      tol=dps, method='s', steps=[maxterms]
+                      )
+        return mpf2float(res)
+
+
+def main():
+    t0 = time()
+    print(__doc__)
+    pwd = os.path.dirname(__file__)
+    eps = np.finfo(float).eps * 100
+
+    a_range = np.array([eps,
+                        1e-4 * (1 - eps), 1e-4, 1e-4 * (1 + eps),
+                        1e-3 * (1 - eps), 1e-3, 1e-3 * (1 + eps),
+                        0.1, 0.5,
+                        1 * (1 - eps), 1, 1 * (1 + eps),
+                        1.5, 2, 4.999, 5, 10])
+    b_range = np.array([0, eps, 1e-10, 1e-5, 0.1, 1, 2, 10, 20, 100])
+    x_range = np.array([0, eps, 1 - eps, 1, 1 + eps,
+                        1.5,
+                        2 - eps, 2, 2 + eps,
+                        9 - eps, 9, 9 + eps,
+                        10 * (1 - eps), 10, 10 * (1 + eps),
+                        100 * (1 - eps), 100, 100 * (1 + eps),
+                        500, exp_inf, 1e3, 1e5, 1e10, 1e20])
+
+    a_range, b_range, x_range = np.meshgrid(a_range, b_range, x_range,
+                                            indexing='ij')
+    a_range = a_range.flatten()
+    b_range = b_range.flatten()
+    x_range = x_range.flatten()
+
+    # filter out some values, especially too large x
+    bool_filter = ~((a_range < 5e-3) & (x_range >= exp_inf))
+    bool_filter = bool_filter & ~((a_range < 0.2) & (x_range > exp_inf))
+    bool_filter = bool_filter & ~((a_range < 0.5) & (x_range > 1e3))
+    bool_filter = bool_filter & ~((a_range < 0.56) & (x_range > 5e3))
+    bool_filter = bool_filter & ~((a_range < 1) & (x_range > 1e4))
+    bool_filter = bool_filter & ~((a_range < 1.4) & (x_range > 1e5))
+    bool_filter = bool_filter & ~((a_range < 1.8) & (x_range > 1e6))
+    bool_filter = bool_filter & ~((a_range < 2.2) & (x_range > 1e7))
+    bool_filter = bool_filter & ~((a_range < 2.5) & (x_range > 1e8))
+    bool_filter = bool_filter & ~((a_range < 2.9) & (x_range > 1e9))
+    bool_filter = bool_filter & ~((a_range < 3.3) & (x_range > 1e10))
+    bool_filter = bool_filter & ~((a_range < 3.7) & (x_range > 1e11))
+    bool_filter = bool_filter & ~((a_range < 4) & (x_range > 1e12))
+    bool_filter = bool_filter & ~((a_range < 4.4) & (x_range > 1e13))
+    bool_filter = bool_filter & ~((a_range < 4.7) & (x_range > 1e14))
+    bool_filter = bool_filter & ~((a_range < 5.1) & (x_range > 1e15))
+    bool_filter = bool_filter & ~((a_range < 5.4) & (x_range > 1e16))
+    bool_filter = bool_filter & ~((a_range < 5.8) & (x_range > 1e17))
+    bool_filter = bool_filter & ~((a_range < 6.2) & (x_range > 1e18))
+    bool_filter = bool_filter & ~((a_range < 6.2) & (x_range > 1e18))
+    bool_filter = bool_filter & ~((a_range < 6.5) & (x_range > 1e19))
+    bool_filter = bool_filter & ~((a_range < 6.9) & (x_range > 1e20))
+
+    # filter out known values that do not meet the required numerical accuracy
+    # see test test_wright_data_grid_failures
+    failing = np.array([
+        [0.1, 100, 709.7827128933841],
+        [0.5, 10, 709.7827128933841],
+        [0.5, 10, 1000],
+        [0.5, 100, 1000],
+        [1, 20, 100000],
+        [1, 100, 100000],
+        [1.0000000000000222, 20, 100000],
+        [1.0000000000000222, 100, 100000],
+        [1.5, 0, 500],
+        [1.5, 2.220446049250313e-14, 500],
+        [1.5, 1.e-10, 500],
+        [1.5, 1.e-05, 500],
+        [1.5, 0.1, 500],
+        [1.5, 20, 100000],
+        [1.5, 100, 100000],
+        ]).tolist()
+
+    does_fail = np.full_like(a_range, False, dtype=bool)
+    for i in range(x_range.size):
+        if [a_range[i], b_range[i], x_range[i]] in failing:
+            does_fail[i] = True
+
+    # filter and flatten
+    a_range = a_range[bool_filter]
+    b_range = b_range[bool_filter]
+    x_range = x_range[bool_filter]
+    does_fail = does_fail[bool_filter]
+
+    dataset = []
+    print(f"Computing {x_range.size} single points.")
+    print("Tests will fail for the following data points:")
+    for i in range(x_range.size):
+        a = a_range[i]
+        b = b_range[i]
+        x = x_range[i]
+        # take care of difficult corner cases
+        maxterms = 1000
+        if a < 1e-6 and x >= exp_inf/10:
+            maxterms = 2000
+        f = mp_wright_bessel(a, b, x, maxterms=maxterms)
+        if does_fail[i]:
+            print("failing data point a, b, x, value = "
+                  f"[{a}, {b}, {x}, {f}]")
+        else:
+            dataset.append((a, b, x, f))
+    dataset = np.array(dataset)
+
+    filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
+                            'wright_bessel.txt')
+    np.savetxt(filename, dataset)
+
+    print("{:.1f} minutes elapsed".format((time() - t0)/60))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wrightomega.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wrightomega.py
new file mode 100644
index 00000000..0bcd0345
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/wrightomega.py
@@ -0,0 +1,41 @@
+import numpy as np
+
+try:
+    import mpmath
+except ImportError:
+    pass
+
+
+def mpmath_wrightomega(x):
+    return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5'))
+
+
+def wrightomega_series_error(x):
+    series = x
+    desired = mpmath_wrightomega(x)
+    return abs(series - desired) / desired
+
+
+def wrightomega_exp_error(x):
+    exponential_approx = mpmath.exp(x)
+    desired = mpmath_wrightomega(x)
+    return abs(exponential_approx - desired) / desired
+
+
+def main():
+    desired_error = 2 * np.finfo(float).eps
+    print('Series Error')
+    for x in [1e5, 1e10, 1e15, 1e20]:
+        with mpmath.workdps(100):
+            error = wrightomega_series_error(x)
+        print(x, error, error < desired_error)
+
+    print('Exp error')
+    for x in [-10, -25, -50, -100, -200, -400, -700, -740]:
+        with mpmath.workdps(100):
+            error = wrightomega_exp_error(x)
+        print(x, error, error < desired_error)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/zetac.py b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/zetac.py
new file mode 100644
index 00000000..d408b1a2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_precompute/zetac.py
@@ -0,0 +1,27 @@
+"""Compute the Taylor series for zeta(x) - 1 around x = 0."""
+try:
+    import mpmath
+except ImportError:
+    pass
+
+
+def zetac_series(N):
+    coeffs = []
+    with mpmath.workdps(100):
+        coeffs.append(-1.5)
+        for n in range(1, N):
+            coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n)
+            coeffs.append(coeff)
+    return coeffs
+
+
+def main():
+    print(__doc__)
+    coeffs = zetac_series(10)
+    coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
+              for x in coeffs]
+    print("\n".join(coeffs[::-1]))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_sf_error.py b/__packaged__/coreml/.python_dependencies/scipy/special/_sf_error.py
new file mode 100644
index 00000000..e1edc980
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_sf_error.py
@@ -0,0 +1,15 @@
+"""Warnings and Exceptions that can be raised by special functions."""
+import warnings
+
+
+class SpecialFunctionWarning(Warning):
+    """Warning that can be emitted by special functions."""
+    pass
+
+
+warnings.simplefilter("always", category=SpecialFunctionWarning)
+
+
+class SpecialFunctionError(Exception):
+    """Exception that can be raised by special functions."""
+    pass
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_spfun_stats.py b/__packaged__/coreml/.python_dependencies/scipy/special/_spfun_stats.py
new file mode 100644
index 00000000..98e767f7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_spfun_stats.py
@@ -0,0 +1,107 @@
+# Last Change: Sat Mar 21 02:00 PM 2009 J
+
+# Copyright (c) 2001, 2002 Enthought, Inc.
+#
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#   a. Redistributions of source code must retain the above copyright notice,
+#      this list of conditions and the following disclaimer.
+#   b. Redistributions in binary form must reproduce the above copyright
+#      notice, this list of conditions and the following disclaimer in the
+#      documentation and/or other materials provided with the distribution.
+#   c. Neither the name of the Enthought nor the names of its contributors
+#      may be used to endorse or promote products derived from this software
+#      without specific prior written permission.
+#
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+"""Some more special functions which may be useful for multivariate statistical
+analysis."""
+
+import numpy as np
+from scipy.special import gammaln as loggam
+
+
+__all__ = ['multigammaln']
+
+
+def multigammaln(a, d):
+    r"""Returns the log of multivariate gamma, also sometimes called the
+    generalized gamma.
+
+    Parameters
+    ----------
+    a : ndarray
+        The multivariate gamma is computed for each item of `a`.
+    d : int
+        The dimension of the space of integration.
+
+    Returns
+    -------
+    res : ndarray
+        The values of the log multivariate gamma at the given points `a`.
+
+    Notes
+    -----
+    The formal definition of the multivariate gamma of dimension d for a real
+    `a` is
+
+    .. math::
+
+        \Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA
+
+    with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of
+    all the positive definite matrices of dimension `d`.  Note that `a` is a
+    scalar: the integrand only is multivariate, the argument is not (the
+    function is defined over a subset of the real set).
+
+    This can be proven to be equal to the much friendlier equation
+
+    .. math::
+
+        \Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2).
+
+    References
+    ----------
+    R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in
+    probability and mathematical statistics).
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.special import multigammaln, gammaln
+    >>> a = 23.5
+    >>> d = 10
+    >>> multigammaln(a, d)
+    454.1488605074416
+
+    Verify that the result agrees with the logarithm of the equation
+    shown above:
+
+    >>> d*(d-1)/4*np.log(np.pi) + gammaln(a - 0.5*np.arange(0, d)).sum()
+    454.1488605074416
+    """
+    a = np.asarray(a)
+    if not np.isscalar(d) or (np.floor(d) != d):
+        raise ValueError("d should be a positive integer (dimension)")
+    if np.any(a <= 0.5 * (d - 1)):
+        raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met"
+                         % (a, 0.5 * (d-1)))
+
+    res = (d * (d-1) * 0.25) * np.log(np.pi)
+    res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0)
+    return res
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_spherical_bessel.py b/__packaged__/coreml/.python_dependencies/scipy/special/_spherical_bessel.py
new file mode 100644
index 00000000..4dbd5d3f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_spherical_bessel.py
@@ -0,0 +1,349 @@
+from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in,
+                      _spherical_kn, _spherical_jn_d, _spherical_yn_d,
+                      _spherical_in_d, _spherical_kn_d)
+
+def spherical_jn(n, z, derivative=False):
+    r"""Spherical Bessel function of the first kind or its derivative.
+
+    Defined as [1]_,
+
+    .. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z),
+
+    where :math:`J_n` is the Bessel function of the first kind.
+
+    Parameters
+    ----------
+    n : int, array_like
+        Order of the Bessel function (n >= 0).
+    z : complex or float, array_like
+        Argument of the Bessel function.
+    derivative : bool, optional
+        If True, the value of the derivative (rather than the function
+        itself) is returned.
+
+    Returns
+    -------
+    jn : ndarray
+
+    Notes
+    -----
+    For real arguments greater than the order, the function is computed
+    using the ascending recurrence [2]_. For small real or complex
+    arguments, the definitional relation to the cylindrical Bessel function
+    of the first kind is used.
+
+    The derivative is computed using the relations [3]_,
+
+    .. math::
+        j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z).
+
+        j_0'(z) = -j_1(z)
+
+
+    .. versionadded:: 0.18.0
+
+    References
+    ----------
+    .. [1] https://dlmf.nist.gov/10.47.E3
+    .. [2] https://dlmf.nist.gov/10.51.E1
+    .. [3] https://dlmf.nist.gov/10.51.E2
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    The spherical Bessel functions of the first kind :math:`j_n` accept
+    both real and complex second argument. They can return a complex type:
+
+    >>> from scipy.special import spherical_jn
+    >>> spherical_jn(0, 3+5j)
+    (-9.878987731663194-8.021894345786002j)
+    >>> type(spherical_jn(0, 3+5j))
+    
+
+    We can verify the relation for the derivative from the Notes
+    for :math:`n=3` in the interval :math:`[1, 2]`:
+
+    >>> import numpy as np
+    >>> x = np.arange(1.0, 2.0, 0.01)
+    >>> np.allclose(spherical_jn(3, x, True),
+    ...             spherical_jn(2, x) - 4/x * spherical_jn(3, x))
+    True
+
+    The first few :math:`j_n` with real argument:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(0.0, 10.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-0.5, 1.5)
+    >>> ax.set_title(r'Spherical Bessel functions $j_n$')
+    >>> for n in np.arange(0, 4):
+    ...     ax.plot(x, spherical_jn(n, x), label=rf'$j_{n}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if derivative:
+        return _spherical_jn_d(n, z)
+    else:
+        return _spherical_jn(n, z)
+
+
+def spherical_yn(n, z, derivative=False):
+    r"""Spherical Bessel function of the second kind or its derivative.
+
+    Defined as [1]_,
+
+    .. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z),
+
+    where :math:`Y_n` is the Bessel function of the second kind.
+
+    Parameters
+    ----------
+    n : int, array_like
+        Order of the Bessel function (n >= 0).
+    z : complex or float, array_like
+        Argument of the Bessel function.
+    derivative : bool, optional
+        If True, the value of the derivative (rather than the function
+        itself) is returned.
+
+    Returns
+    -------
+    yn : ndarray
+
+    Notes
+    -----
+    For real arguments, the function is computed using the ascending
+    recurrence [2]_.  For complex arguments, the definitional relation to
+    the cylindrical Bessel function of the second kind is used.
+
+    The derivative is computed using the relations [3]_,
+
+    .. math::
+        y_n' = y_{n-1} - \frac{n + 1}{z} y_n.
+
+        y_0' = -y_1
+
+
+    .. versionadded:: 0.18.0
+
+    References
+    ----------
+    .. [1] https://dlmf.nist.gov/10.47.E4
+    .. [2] https://dlmf.nist.gov/10.51.E1
+    .. [3] https://dlmf.nist.gov/10.51.E2
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    The spherical Bessel functions of the second kind :math:`y_n` accept
+    both real and complex second argument. They can return a complex type:
+
+    >>> from scipy.special import spherical_yn
+    >>> spherical_yn(0, 3+5j)
+    (8.022343088587197-9.880052589376795j)
+    >>> type(spherical_yn(0, 3+5j))
+    
+
+    We can verify the relation for the derivative from the Notes
+    for :math:`n=3` in the interval :math:`[1, 2]`:
+
+    >>> import numpy as np
+    >>> x = np.arange(1.0, 2.0, 0.01)
+    >>> np.allclose(spherical_yn(3, x, True),
+    ...             spherical_yn(2, x) - 4/x * spherical_yn(3, x))
+    True
+
+    The first few :math:`y_n` with real argument:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(0.0, 10.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-2.0, 1.0)
+    >>> ax.set_title(r'Spherical Bessel functions $y_n$')
+    >>> for n in np.arange(0, 4):
+    ...     ax.plot(x, spherical_yn(n, x), label=rf'$y_{n}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if derivative:
+        return _spherical_yn_d(n, z)
+    else:
+        return _spherical_yn(n, z)
+
+
+def spherical_in(n, z, derivative=False):
+    r"""Modified spherical Bessel function of the first kind or its derivative.
+
+    Defined as [1]_,
+
+    .. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z),
+
+    where :math:`I_n` is the modified Bessel function of the first kind.
+
+    Parameters
+    ----------
+    n : int, array_like
+        Order of the Bessel function (n >= 0).
+    z : complex or float, array_like
+        Argument of the Bessel function.
+    derivative : bool, optional
+        If True, the value of the derivative (rather than the function
+        itself) is returned.
+
+    Returns
+    -------
+    in : ndarray
+
+    Notes
+    -----
+    The function is computed using its definitional relation to the
+    modified cylindrical Bessel function of the first kind.
+
+    The derivative is computed using the relations [2]_,
+
+    .. math::
+        i_n' = i_{n-1} - \frac{n + 1}{z} i_n.
+
+        i_1' = i_0
+
+
+    .. versionadded:: 0.18.0
+
+    References
+    ----------
+    .. [1] https://dlmf.nist.gov/10.47.E7
+    .. [2] https://dlmf.nist.gov/10.51.E5
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    The modified spherical Bessel functions of the first kind :math:`i_n`
+    accept both real and complex second argument.
+    They can return a complex type:
+
+    >>> from scipy.special import spherical_in
+    >>> spherical_in(0, 3+5j)
+    (-1.1689867793369182-1.2697305267234222j)
+    >>> type(spherical_in(0, 3+5j))
+    
+
+    We can verify the relation for the derivative from the Notes
+    for :math:`n=3` in the interval :math:`[1, 2]`:
+
+    >>> import numpy as np
+    >>> x = np.arange(1.0, 2.0, 0.01)
+    >>> np.allclose(spherical_in(3, x, True),
+    ...             spherical_in(2, x) - 4/x * spherical_in(3, x))
+    True
+
+    The first few :math:`i_n` with real argument:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(0.0, 6.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(-0.5, 5.0)
+    >>> ax.set_title(r'Modified spherical Bessel functions $i_n$')
+    >>> for n in np.arange(0, 4):
+    ...     ax.plot(x, spherical_in(n, x), label=rf'$i_{n}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if derivative:
+        return _spherical_in_d(n, z)
+    else:
+        return _spherical_in(n, z)
+
+
+def spherical_kn(n, z, derivative=False):
+    r"""Modified spherical Bessel function of the second kind or its derivative.
+
+    Defined as [1]_,
+
+    .. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z),
+
+    where :math:`K_n` is the modified Bessel function of the second kind.
+
+    Parameters
+    ----------
+    n : int, array_like
+        Order of the Bessel function (n >= 0).
+    z : complex or float, array_like
+        Argument of the Bessel function.
+    derivative : bool, optional
+        If True, the value of the derivative (rather than the function
+        itself) is returned.
+
+    Returns
+    -------
+    kn : ndarray
+
+    Notes
+    -----
+    The function is computed using its definitional relation to the
+    modified cylindrical Bessel function of the second kind.
+
+    The derivative is computed using the relations [2]_,
+
+    .. math::
+        k_n' = -k_{n-1} - \frac{n + 1}{z} k_n.
+
+        k_0' = -k_1
+
+
+    .. versionadded:: 0.18.0
+
+    References
+    ----------
+    .. [1] https://dlmf.nist.gov/10.47.E9
+    .. [2] https://dlmf.nist.gov/10.51.E5
+    .. [AS] Milton Abramowitz and Irene A. Stegun, eds.
+        Handbook of Mathematical Functions with Formulas,
+        Graphs, and Mathematical Tables. New York: Dover, 1972.
+
+    Examples
+    --------
+    The modified spherical Bessel functions of the second kind :math:`k_n`
+    accept both real and complex second argument.
+    They can return a complex type:
+
+    >>> from scipy.special import spherical_kn
+    >>> spherical_kn(0, 3+5j)
+    (0.012985785614001561+0.003354691603137546j)
+    >>> type(spherical_kn(0, 3+5j))
+    
+
+    We can verify the relation for the derivative from the Notes
+    for :math:`n=3` in the interval :math:`[1, 2]`:
+
+    >>> import numpy as np
+    >>> x = np.arange(1.0, 2.0, 0.01)
+    >>> np.allclose(spherical_kn(3, x, True),
+    ...             - 4/x * spherical_kn(3, x) - spherical_kn(2, x))
+    True
+
+    The first few :math:`k_n` with real argument:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.arange(0.0, 4.0, 0.01)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_ylim(0.0, 5.0)
+    >>> ax.set_title(r'Modified spherical Bessel functions $k_n$')
+    >>> for n in np.arange(0, 4):
+    ...     ax.plot(x, spherical_kn(n, x), label=rf'$k_{n}$')
+    >>> plt.legend(loc='best')
+    >>> plt.show()
+
+    """
+    if derivative:
+        return _spherical_kn_d(n, z)
+    else:
+        return _spherical_kn(n, z)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_test_internal.pyi b/__packaged__/coreml/.python_dependencies/scipy/special/_test_internal.pyi
new file mode 100644
index 00000000..3ce492e2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_test_internal.pyi
@@ -0,0 +1,10 @@
+from typing import Tuple
+import numpy as np
+
+def have_fenv() -> bool: ...
+def random_double(size: int) -> np.float64: ...
+def test_add_round(size: int, mode: str): ...
+
+def _dd_exp(xhi: float, xlo: float) -> Tuple[float, float]: ...
+def _dd_log(xhi: float, xlo: float) -> Tuple[float, float]: ...
+def _dd_expm1(xhi: float, xlo: float) -> Tuple[float, float]: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_testutils.py b/__packaged__/coreml/.python_dependencies/scipy/special/_testutils.py
new file mode 100644
index 00000000..57bdf7b7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_testutils.py
@@ -0,0 +1,316 @@
+import os
+import functools
+import operator
+from scipy._lib import _pep440
+
+import numpy as np
+from numpy.testing import assert_
+import pytest
+
+import scipy.special as sc
+
+__all__ = ['with_special_errors', 'assert_func_equal', 'FuncData']
+
+
+#------------------------------------------------------------------------------
+# Check if a module is present to be used in tests
+#------------------------------------------------------------------------------
+
+class MissingModule:
+    def __init__(self, name):
+        self.name = name
+
+
+def check_version(module, min_ver):
+    if type(module) == MissingModule:
+        return pytest.mark.skip(reason="{} is not installed".format(module.name))
+    return pytest.mark.skipif(_pep440.parse(module.__version__) < _pep440.Version(min_ver),
+                              reason="{} version >= {} required".format(module.__name__, min_ver))
+
+
+#------------------------------------------------------------------------------
+# Enable convergence and loss of precision warnings -- turn off one by one
+#------------------------------------------------------------------------------
+
+def with_special_errors(func):
+    """
+    Enable special function errors (such as underflow, overflow,
+    loss of precision, etc.)
+    """
+    @functools.wraps(func)
+    def wrapper(*a, **kw):
+        with sc.errstate(all='raise'):
+            res = func(*a, **kw)
+        return res
+    return wrapper
+
+
+#------------------------------------------------------------------------------
+# Comparing function values at many data points at once, with helpful
+# error reports
+#------------------------------------------------------------------------------
+
+def assert_func_equal(func, results, points, rtol=None, atol=None,
+                      param_filter=None, knownfailure=None,
+                      vectorized=True, dtype=None, nan_ok=False,
+                      ignore_inf_sign=False, distinguish_nan_and_inf=True):
+    if hasattr(points, 'next'):
+        # it's a generator
+        points = list(points)
+
+    points = np.asarray(points)
+    if points.ndim == 1:
+        points = points[:,None]
+    nparams = points.shape[1]
+
+    if hasattr(results, '__name__'):
+        # function
+        data = points
+        result_columns = None
+        result_func = results
+    else:
+        # dataset
+        data = np.c_[points, results]
+        result_columns = list(range(nparams, data.shape[1]))
+        result_func = None
+
+    fdata = FuncData(func, data, list(range(nparams)),
+                     result_columns=result_columns, result_func=result_func,
+                     rtol=rtol, atol=atol, param_filter=param_filter,
+                     knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized,
+                     ignore_inf_sign=ignore_inf_sign,
+                     distinguish_nan_and_inf=distinguish_nan_and_inf)
+    fdata.check()
+
+
+class FuncData:
+    """
+    Data set for checking a special function.
+
+    Parameters
+    ----------
+    func : function
+        Function to test
+    data : numpy array
+        columnar data to use for testing
+    param_columns : int or tuple of ints
+        Columns indices in which the parameters to `func` lie.
+        Can be imaginary integers to indicate that the parameter
+        should be cast to complex.
+    result_columns : int or tuple of ints, optional
+        Column indices for expected results from `func`.
+    result_func : callable, optional
+        Function to call to obtain results.
+    rtol : float, optional
+        Required relative tolerance. Default is 5*eps.
+    atol : float, optional
+        Required absolute tolerance. Default is 5*tiny.
+    param_filter : function, or tuple of functions/Nones, optional
+        Filter functions to exclude some parameter ranges.
+        If omitted, no filtering is done.
+    knownfailure : str, optional
+        Known failure error message to raise when the test is run.
+        If omitted, no exception is raised.
+    nan_ok : bool, optional
+        If nan is always an accepted result.
+    vectorized : bool, optional
+        Whether all functions passed in are vectorized.
+    ignore_inf_sign : bool, optional
+        Whether to ignore signs of infinities.
+        (Doesn't matter for complex-valued functions.)
+    distinguish_nan_and_inf : bool, optional
+        If True, treat numbers which contain nans or infs as
+        equal. Sets ignore_inf_sign to be True.
+
+    """
+
+    def __init__(self, func, data, param_columns, result_columns=None,
+                 result_func=None, rtol=None, atol=None, param_filter=None,
+                 knownfailure=None, dataname=None, nan_ok=False, vectorized=True,
+                 ignore_inf_sign=False, distinguish_nan_and_inf=True):
+        self.func = func
+        self.data = data
+        self.dataname = dataname
+        if not hasattr(param_columns, '__len__'):
+            param_columns = (param_columns,)
+        self.param_columns = tuple(param_columns)
+        if result_columns is not None:
+            if not hasattr(result_columns, '__len__'):
+                result_columns = (result_columns,)
+            self.result_columns = tuple(result_columns)
+            if result_func is not None:
+                raise ValueError("Only result_func or result_columns should be provided")
+        elif result_func is not None:
+            self.result_columns = None
+        else:
+            raise ValueError("Either result_func or result_columns should be provided")
+        self.result_func = result_func
+        self.rtol = rtol
+        self.atol = atol
+        if not hasattr(param_filter, '__len__'):
+            param_filter = (param_filter,)
+        self.param_filter = param_filter
+        self.knownfailure = knownfailure
+        self.nan_ok = nan_ok
+        self.vectorized = vectorized
+        self.ignore_inf_sign = ignore_inf_sign
+        self.distinguish_nan_and_inf = distinguish_nan_and_inf
+        if not self.distinguish_nan_and_inf:
+            self.ignore_inf_sign = True
+
+    def get_tolerances(self, dtype):
+        if not np.issubdtype(dtype, np.inexact):
+            dtype = np.dtype(float)
+        info = np.finfo(dtype)
+        rtol, atol = self.rtol, self.atol
+        if rtol is None:
+            rtol = 5*info.eps
+        if atol is None:
+            atol = 5*info.tiny
+        return rtol, atol
+
+    def check(self, data=None, dtype=None, dtypes=None):
+        """Check the special function against the data."""
+        __tracebackhide__ = operator.methodcaller(
+            'errisinstance', AssertionError
+        )
+
+        if self.knownfailure:
+            pytest.xfail(reason=self.knownfailure)
+
+        if data is None:
+            data = self.data
+
+        if dtype is None:
+            dtype = data.dtype
+        else:
+            data = data.astype(dtype)
+
+        rtol, atol = self.get_tolerances(dtype)
+
+        # Apply given filter functions
+        if self.param_filter:
+            param_mask = np.ones((data.shape[0],), np.bool_)
+            for j, filter in zip(self.param_columns, self.param_filter):
+                if filter:
+                    param_mask &= list(filter(data[:,j]))
+            data = data[param_mask]
+
+        # Pick parameters from the correct columns
+        params = []
+        for idx, j in enumerate(self.param_columns):
+            if np.iscomplexobj(j):
+                j = int(j.imag)
+                params.append(data[:,j].astype(complex))
+            elif dtypes and idx < len(dtypes):
+                params.append(data[:, j].astype(dtypes[idx]))
+            else:
+                params.append(data[:,j])
+
+        # Helper for evaluating results
+        def eval_func_at_params(func, skip_mask=None):
+            if self.vectorized:
+                got = func(*params)
+            else:
+                got = []
+                for j in range(len(params[0])):
+                    if skip_mask is not None and skip_mask[j]:
+                        got.append(np.nan)
+                        continue
+                    got.append(func(*tuple([params[i][j] for i in range(len(params))])))
+                got = np.asarray(got)
+            if not isinstance(got, tuple):
+                got = (got,)
+            return got
+
+        # Evaluate function to be tested
+        got = eval_func_at_params(self.func)
+
+        # Grab the correct results
+        if self.result_columns is not None:
+            # Correct results passed in with the data
+            wanted = tuple([data[:,icol] for icol in self.result_columns])
+        else:
+            # Function producing correct results passed in
+            skip_mask = None
+            if self.nan_ok and len(got) == 1:
+                # Don't spend time evaluating what doesn't need to be evaluated
+                skip_mask = np.isnan(got[0])
+            wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask)
+
+        # Check the validity of each output returned
+        assert_(len(got) == len(wanted))
+
+        for output_num, (x, y) in enumerate(zip(got, wanted)):
+            if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign:
+                pinf_x = np.isinf(x)
+                pinf_y = np.isinf(y)
+                minf_x = np.isinf(x)
+                minf_y = np.isinf(y)
+            else:
+                pinf_x = np.isposinf(x)
+                pinf_y = np.isposinf(y)
+                minf_x = np.isneginf(x)
+                minf_y = np.isneginf(y)
+            nan_x = np.isnan(x)
+            nan_y = np.isnan(y)
+
+            with np.errstate(all='ignore'):
+                abs_y = np.absolute(y)
+                abs_y[~np.isfinite(abs_y)] = 0
+                diff = np.absolute(x - y)
+                diff[~np.isfinite(diff)] = 0
+
+                rdiff = diff / np.absolute(y)
+                rdiff[~np.isfinite(rdiff)] = 0
+
+            tol_mask = (diff <= atol + rtol*abs_y)
+            pinf_mask = (pinf_x == pinf_y)
+            minf_mask = (minf_x == minf_y)
+
+            nan_mask = (nan_x == nan_y)
+
+            bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)
+
+            point_count = bad_j.size
+            if self.nan_ok:
+                bad_j &= ~nan_x
+                bad_j &= ~nan_y
+                point_count -= (nan_x | nan_y).sum()
+
+            if not self.distinguish_nan_and_inf and not self.nan_ok:
+                # If nan's are okay we've already covered all these cases
+                inf_x = np.isinf(x)
+                inf_y = np.isinf(y)
+                both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y)
+                bad_j &= ~both_nonfinite
+                point_count -= both_nonfinite.sum()
+
+            if np.any(bad_j):
+                # Some bad results: inform what, where, and how bad
+                msg = [""]
+                msg.append("Max |adiff|: %g" % diff[bad_j].max())
+                msg.append("Max |rdiff|: %g" % rdiff[bad_j].max())
+                msg.append("Bad results (%d out of %d) for the following points (in output %d):"
+                           % (np.sum(bad_j), point_count, output_num,))
+                for j in np.nonzero(bad_j)[0]:
+                    j = int(j)
+                    fmt = lambda x: "%30s" % np.array2string(x[j], precision=18)
+                    a = "  ".join(map(fmt, params))
+                    b = "  ".join(map(fmt, got))
+                    c = "  ".join(map(fmt, wanted))
+                    d = fmt(rdiff)
+                    msg.append("%s => %s != %s  (rdiff %s)" % (a, b, c, d))
+                assert_(False, "\n".join(msg))
+
+    def __repr__(self):
+        """Pretty-printing, esp. for Nose output"""
+        if np.any(list(map(np.iscomplexobj, self.param_columns))):
+            is_complex = " (complex)"
+        else:
+            is_complex = ""
+        if self.dataname:
+            return "" % (self.func.__name__, is_complex,
+                                            os.path.basename(self.dataname))
+        else:
+            return "" % (self.func.__name__, is_complex)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyi b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyi
new file mode 100644
index 00000000..3e9e3b50
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyi
@@ -0,0 +1,520 @@
+# This file is automatically generated by _generate_pyx.py.
+# Do not edit manually!
+
+from typing import Any, Dict
+
+import numpy as np
+
+__all__ = [
+    'geterr',
+    'seterr',
+    'errstate',
+    'agm',
+    'airy',
+    'airye',
+    'bdtr',
+    'bdtrc',
+    'bdtri',
+    'bdtrik',
+    'bdtrin',
+    'bei',
+    'beip',
+    'ber',
+    'berp',
+    'besselpoly',
+    'beta',
+    'betainc',
+    'betaincinv',
+    'betaln',
+    'binom',
+    'boxcox',
+    'boxcox1p',
+    'btdtr',
+    'btdtri',
+    'btdtria',
+    'btdtrib',
+    'cbrt',
+    'chdtr',
+    'chdtrc',
+    'chdtri',
+    'chdtriv',
+    'chndtr',
+    'chndtridf',
+    'chndtrinc',
+    'chndtrix',
+    'cosdg',
+    'cosm1',
+    'cotdg',
+    'dawsn',
+    'ellipe',
+    'ellipeinc',
+    'ellipj',
+    'ellipk',
+    'ellipkinc',
+    'ellipkm1',
+    'elliprc',
+    'elliprd',
+    'elliprf',
+    'elliprg',
+    'elliprj',
+    'entr',
+    'erf',
+    'erfc',
+    'erfcinv',
+    'erfcx',
+    'erfi',
+    'erfinv',
+    'eval_chebyc',
+    'eval_chebys',
+    'eval_chebyt',
+    'eval_chebyu',
+    'eval_gegenbauer',
+    'eval_genlaguerre',
+    'eval_hermite',
+    'eval_hermitenorm',
+    'eval_jacobi',
+    'eval_laguerre',
+    'eval_legendre',
+    'eval_sh_chebyt',
+    'eval_sh_chebyu',
+    'eval_sh_jacobi',
+    'eval_sh_legendre',
+    'exp1',
+    'exp10',
+    'exp2',
+    'expi',
+    'expit',
+    'expm1',
+    'expn',
+    'exprel',
+    'fdtr',
+    'fdtrc',
+    'fdtri',
+    'fdtridfd',
+    'fresnel',
+    'gamma',
+    'gammainc',
+    'gammaincc',
+    'gammainccinv',
+    'gammaincinv',
+    'gammaln',
+    'gammasgn',
+    'gdtr',
+    'gdtrc',
+    'gdtria',
+    'gdtrib',
+    'gdtrix',
+    'hankel1',
+    'hankel1e',
+    'hankel2',
+    'hankel2e',
+    'huber',
+    'hyp0f1',
+    'hyp1f1',
+    'hyp2f1',
+    'hyperu',
+    'i0',
+    'i0e',
+    'i1',
+    'i1e',
+    'inv_boxcox',
+    'inv_boxcox1p',
+    'it2i0k0',
+    'it2j0y0',
+    'it2struve0',
+    'itairy',
+    'iti0k0',
+    'itj0y0',
+    'itmodstruve0',
+    'itstruve0',
+    'iv',
+    'ive',
+    'j0',
+    'j1',
+    'jn',
+    'jv',
+    'jve',
+    'k0',
+    'k0e',
+    'k1',
+    'k1e',
+    'kei',
+    'keip',
+    'kelvin',
+    'ker',
+    'kerp',
+    'kl_div',
+    'kn',
+    'kolmogi',
+    'kolmogorov',
+    'kv',
+    'kve',
+    'log1p',
+    'log_expit',
+    'log_ndtr',
+    'loggamma',
+    'logit',
+    'lpmv',
+    'mathieu_a',
+    'mathieu_b',
+    'mathieu_cem',
+    'mathieu_modcem1',
+    'mathieu_modcem2',
+    'mathieu_modsem1',
+    'mathieu_modsem2',
+    'mathieu_sem',
+    'modfresnelm',
+    'modfresnelp',
+    'modstruve',
+    'nbdtr',
+    'nbdtrc',
+    'nbdtri',
+    'nbdtrik',
+    'nbdtrin',
+    'ncfdtr',
+    'ncfdtri',
+    'ncfdtridfd',
+    'ncfdtridfn',
+    'ncfdtrinc',
+    'nctdtr',
+    'nctdtridf',
+    'nctdtrinc',
+    'nctdtrit',
+    'ndtr',
+    'ndtri',
+    'ndtri_exp',
+    'nrdtrimn',
+    'nrdtrisd',
+    'obl_ang1',
+    'obl_ang1_cv',
+    'obl_cv',
+    'obl_rad1',
+    'obl_rad1_cv',
+    'obl_rad2',
+    'obl_rad2_cv',
+    'owens_t',
+    'pbdv',
+    'pbvv',
+    'pbwa',
+    'pdtr',
+    'pdtrc',
+    'pdtri',
+    'pdtrik',
+    'poch',
+    'powm1',
+    'pro_ang1',
+    'pro_ang1_cv',
+    'pro_cv',
+    'pro_rad1',
+    'pro_rad1_cv',
+    'pro_rad2',
+    'pro_rad2_cv',
+    'pseudo_huber',
+    'psi',
+    'radian',
+    'rel_entr',
+    'rgamma',
+    'round',
+    'shichi',
+    'sici',
+    'sindg',
+    'smirnov',
+    'smirnovi',
+    'spence',
+    'sph_harm',
+    'stdtr',
+    'stdtridf',
+    'stdtrit',
+    'struve',
+    'tandg',
+    'tklmbda',
+    'voigt_profile',
+    'wofz',
+    'wright_bessel',
+    'wrightomega',
+    'xlog1py',
+    'xlogy',
+    'y0',
+    'y1',
+    'yn',
+    'yv',
+    'yve',
+    'zetac'
+]
+
+def geterr() -> Dict[str, str]: ...
+def seterr(**kwargs: str) -> Dict[str, str]: ...
+
+class errstate:
+    def __init__(self, **kargs: str) -> None: ...
+    def __enter__(self) -> None: ...
+    def __exit__(
+        self,
+        exc_type: Any,  # Unused
+        exc_value: Any,  # Unused
+        traceback: Any,  # Unused
+    ) -> None: ...
+
+_cosine_cdf: np.ufunc
+_cosine_invcdf: np.ufunc
+_cospi: np.ufunc
+_ellip_harm: np.ufunc
+_factorial: np.ufunc
+_igam_fac: np.ufunc
+_kolmogc: np.ufunc
+_kolmogci: np.ufunc
+_kolmogp: np.ufunc
+_lambertw: np.ufunc
+_lanczos_sum_expg_scaled: np.ufunc
+_lgam1p: np.ufunc
+_log1pmx: np.ufunc
+_riemann_zeta: np.ufunc
+_sf_error_test_function: np.ufunc
+_sinpi: np.ufunc
+_smirnovc: np.ufunc
+_smirnovci: np.ufunc
+_smirnovp: np.ufunc
+_spherical_in: np.ufunc
+_spherical_in_d: np.ufunc
+_spherical_jn: np.ufunc
+_spherical_jn_d: np.ufunc
+_spherical_kn: np.ufunc
+_spherical_kn_d: np.ufunc
+_spherical_yn: np.ufunc
+_spherical_yn_d: np.ufunc
+_struve_asymp_large_z: np.ufunc
+_struve_bessel_series: np.ufunc
+_struve_power_series: np.ufunc
+_zeta: np.ufunc
+agm: np.ufunc
+airy: np.ufunc
+airye: np.ufunc
+bdtr: np.ufunc
+bdtrc: np.ufunc
+bdtri: np.ufunc
+bdtrik: np.ufunc
+bdtrin: np.ufunc
+bei: np.ufunc
+beip: np.ufunc
+ber: np.ufunc
+berp: np.ufunc
+besselpoly: np.ufunc
+beta: np.ufunc
+betainc: np.ufunc
+betaincinv: np.ufunc
+betaln: np.ufunc
+binom: np.ufunc
+boxcox1p: np.ufunc
+boxcox: np.ufunc
+btdtr: np.ufunc
+btdtri: np.ufunc
+btdtria: np.ufunc
+btdtrib: np.ufunc
+cbrt: np.ufunc
+chdtr: np.ufunc
+chdtrc: np.ufunc
+chdtri: np.ufunc
+chdtriv: np.ufunc
+chndtr: np.ufunc
+chndtridf: np.ufunc
+chndtrinc: np.ufunc
+chndtrix: np.ufunc
+cosdg: np.ufunc
+cosm1: np.ufunc
+cotdg: np.ufunc
+dawsn: np.ufunc
+ellipe: np.ufunc
+ellipeinc: np.ufunc
+ellipj: np.ufunc
+ellipk: np.ufunc
+ellipkinc: np.ufunc
+ellipkm1: np.ufunc
+elliprc: np.ufunc
+elliprd: np.ufunc
+elliprf: np.ufunc
+elliprg: np.ufunc
+elliprj: np.ufunc
+entr: np.ufunc
+erf: np.ufunc
+erfc: np.ufunc
+erfcinv: np.ufunc
+erfcx: np.ufunc
+erfi: np.ufunc
+erfinv: np.ufunc
+eval_chebyc: np.ufunc
+eval_chebys: np.ufunc
+eval_chebyt: np.ufunc
+eval_chebyu: np.ufunc
+eval_gegenbauer: np.ufunc
+eval_genlaguerre: np.ufunc
+eval_hermite: np.ufunc
+eval_hermitenorm: np.ufunc
+eval_jacobi: np.ufunc
+eval_laguerre: np.ufunc
+eval_legendre: np.ufunc
+eval_sh_chebyt: np.ufunc
+eval_sh_chebyu: np.ufunc
+eval_sh_jacobi: np.ufunc
+eval_sh_legendre: np.ufunc
+exp10: np.ufunc
+exp1: np.ufunc
+exp2: np.ufunc
+expi: np.ufunc
+expit: np.ufunc
+expm1: np.ufunc
+expn: np.ufunc
+exprel: np.ufunc
+fdtr: np.ufunc
+fdtrc: np.ufunc
+fdtri: np.ufunc
+fdtridfd: np.ufunc
+fresnel: np.ufunc
+gamma: np.ufunc
+gammainc: np.ufunc
+gammaincc: np.ufunc
+gammainccinv: np.ufunc
+gammaincinv: np.ufunc
+gammaln: np.ufunc
+gammasgn: np.ufunc
+gdtr: np.ufunc
+gdtrc: np.ufunc
+gdtria: np.ufunc
+gdtrib: np.ufunc
+gdtrix: np.ufunc
+hankel1: np.ufunc
+hankel1e: np.ufunc
+hankel2: np.ufunc
+hankel2e: np.ufunc
+huber: np.ufunc
+hyp0f1: np.ufunc
+hyp1f1: np.ufunc
+hyp2f1: np.ufunc
+hyperu: np.ufunc
+i0: np.ufunc
+i0e: np.ufunc
+i1: np.ufunc
+i1e: np.ufunc
+inv_boxcox1p: np.ufunc
+inv_boxcox: np.ufunc
+it2i0k0: np.ufunc
+it2j0y0: np.ufunc
+it2struve0: np.ufunc
+itairy: np.ufunc
+iti0k0: np.ufunc
+itj0y0: np.ufunc
+itmodstruve0: np.ufunc
+itstruve0: np.ufunc
+iv: np.ufunc
+ive: np.ufunc
+j0: np.ufunc
+j1: np.ufunc
+jn: np.ufunc
+jv: np.ufunc
+jve: np.ufunc
+k0: np.ufunc
+k0e: np.ufunc
+k1: np.ufunc
+k1e: np.ufunc
+kei: np.ufunc
+keip: np.ufunc
+kelvin: np.ufunc
+ker: np.ufunc
+kerp: np.ufunc
+kl_div: np.ufunc
+kn: np.ufunc
+kolmogi: np.ufunc
+kolmogorov: np.ufunc
+kv: np.ufunc
+kve: np.ufunc
+log1p: np.ufunc
+log_expit: np.ufunc
+log_ndtr: np.ufunc
+loggamma: np.ufunc
+logit: np.ufunc
+lpmv: np.ufunc
+mathieu_a: np.ufunc
+mathieu_b: np.ufunc
+mathieu_cem: np.ufunc
+mathieu_modcem1: np.ufunc
+mathieu_modcem2: np.ufunc
+mathieu_modsem1: np.ufunc
+mathieu_modsem2: np.ufunc
+mathieu_sem: np.ufunc
+modfresnelm: np.ufunc
+modfresnelp: np.ufunc
+modstruve: np.ufunc
+nbdtr: np.ufunc
+nbdtrc: np.ufunc
+nbdtri: np.ufunc
+nbdtrik: np.ufunc
+nbdtrin: np.ufunc
+ncfdtr: np.ufunc
+ncfdtri: np.ufunc
+ncfdtridfd: np.ufunc
+ncfdtridfn: np.ufunc
+ncfdtrinc: np.ufunc
+nctdtr: np.ufunc
+nctdtridf: np.ufunc
+nctdtrinc: np.ufunc
+nctdtrit: np.ufunc
+ndtr: np.ufunc
+ndtri: np.ufunc
+ndtri_exp: np.ufunc
+nrdtrimn: np.ufunc
+nrdtrisd: np.ufunc
+obl_ang1: np.ufunc
+obl_ang1_cv: np.ufunc
+obl_cv: np.ufunc
+obl_rad1: np.ufunc
+obl_rad1_cv: np.ufunc
+obl_rad2: np.ufunc
+obl_rad2_cv: np.ufunc
+owens_t: np.ufunc
+pbdv: np.ufunc
+pbvv: np.ufunc
+pbwa: np.ufunc
+pdtr: np.ufunc
+pdtrc: np.ufunc
+pdtri: np.ufunc
+pdtrik: np.ufunc
+poch: np.ufunc
+powm1: np.ufunc
+pro_ang1: np.ufunc
+pro_ang1_cv: np.ufunc
+pro_cv: np.ufunc
+pro_rad1: np.ufunc
+pro_rad1_cv: np.ufunc
+pro_rad2: np.ufunc
+pro_rad2_cv: np.ufunc
+pseudo_huber: np.ufunc
+psi: np.ufunc
+radian: np.ufunc
+rel_entr: np.ufunc
+rgamma: np.ufunc
+round: np.ufunc
+shichi: np.ufunc
+sici: np.ufunc
+sindg: np.ufunc
+smirnov: np.ufunc
+smirnovi: np.ufunc
+spence: np.ufunc
+sph_harm: np.ufunc
+stdtr: np.ufunc
+stdtridf: np.ufunc
+stdtrit: np.ufunc
+struve: np.ufunc
+tandg: np.ufunc
+tklmbda: np.ufunc
+voigt_profile: np.ufunc
+wofz: np.ufunc
+wright_bessel: np.ufunc
+wrightomega: np.ufunc
+xlog1py: np.ufunc
+xlogy: np.ufunc
+y0: np.ufunc
+y1: np.ufunc
+yn: np.ufunc
+yv: np.ufunc
+yve: np.ufunc
+zetac: np.ufunc
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyx b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyx
new file mode 100644
index 00000000..841ac457
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs.pyx
@@ -0,0 +1,20949 @@
+# This file is automatically generated by _generate_pyx.py.
+# Do not edit manually!
+
+from libc.math cimport NAN
+
+include "_ufuncs_extra_code_common.pxi"
+include "_ufuncs_extra_code.pxi"
+__all__ = ['agm', 'airy', 'airye', 'bdtr', 'bdtrc', 'bdtri', 'bdtrik', 'bdtrin', 'bei', 'beip', 'ber', 'berp', 'besselpoly', 'beta', 'betainc', 'betaincinv', 'betaln', 'binom', 'boxcox', 'boxcox1p', 'btdtr', 'btdtri', 'btdtria', 'btdtrib', 'cbrt', 'chdtr', 'chdtrc', 'chdtri', 'chdtriv', 'chndtr', 'chndtridf', 'chndtrinc', 'chndtrix', 'cosdg', 'cosm1', 'cotdg', 'dawsn', 'ellipe', 'ellipeinc', 'ellipj', 'ellipk', 'ellipkinc', 'ellipkm1', 'elliprc', 'elliprd', 'elliprf', 'elliprg', 'elliprj', 'entr', 'erf', 'erfc', 'erfcinv', 'erfcx', 'erfi', 'erfinv', 'eval_chebyc', 'eval_chebys', 'eval_chebyt', 'eval_chebyu', 'eval_gegenbauer', 'eval_genlaguerre', 'eval_hermite', 'eval_hermitenorm', 'eval_jacobi', 'eval_laguerre', 'eval_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu', 'eval_sh_jacobi', 'eval_sh_legendre', 'exp1', 'exp10', 'exp2', 'expi', 'expit', 'expm1', 'expn', 'exprel', 'fdtr', 'fdtrc', 'fdtri', 'fdtridfd', 'fresnel', 'gamma', 'gammainc', 'gammaincc', 'gammainccinv', 'gammaincinv', 'gammaln', 'gammasgn', 'gdtr', 'gdtrc', 'gdtria', 'gdtrib', 'gdtrix', 'hankel1', 'hankel1e', 'hankel2', 'hankel2e', 'huber', 'hyp0f1', 'hyp1f1', 'hyp2f1', 'hyperu', 'i0', 'i0e', 'i1', 'i1e', 'inv_boxcox', 'inv_boxcox1p', 'it2i0k0', 'it2j0y0', 'it2struve0', 'itairy', 'iti0k0', 'itj0y0', 'itmodstruve0', 'itstruve0', 'iv', 'ive', 'j0', 'j1', 'jv', 'jve', 'k0', 'k0e', 'k1', 'k1e', 'kei', 'keip', 'kelvin', 'ker', 'kerp', 'kl_div', 'kn', 'kolmogi', 'kolmogorov', 'kv', 'kve', 'log1p', 'log_expit', 'log_ndtr', 'loggamma', 'logit', 'lpmv', 'mathieu_a', 'mathieu_b', 'mathieu_cem', 'mathieu_modcem1', 'mathieu_modcem2', 'mathieu_modsem1', 'mathieu_modsem2', 'mathieu_sem', 'modfresnelm', 'modfresnelp', 'modstruve', 'nbdtr', 'nbdtrc', 'nbdtri', 'nbdtrik', 'nbdtrin', 'ncfdtr', 'ncfdtri', 'ncfdtridfd', 'ncfdtridfn', 'ncfdtrinc', 'nctdtr', 'nctdtridf', 'nctdtrinc', 'nctdtrit', 'ndtr', 'ndtri', 'ndtri_exp', 'nrdtrimn', 'nrdtrisd', 'obl_ang1', 'obl_ang1_cv', 'obl_cv', 'obl_rad1', 'obl_rad1_cv', 'obl_rad2', 'obl_rad2_cv', 'owens_t', 'pbdv', 'pbvv', 'pbwa', 'pdtr', 'pdtrc', 'pdtri', 'pdtrik', 'poch', 'powm1', 'pro_ang1', 'pro_ang1_cv', 'pro_cv', 'pro_rad1', 'pro_rad1_cv', 'pro_rad2', 'pro_rad2_cv', 'pseudo_huber', 'psi', 'radian', 'rel_entr', 'rgamma', 'round', 'shichi', 'sici', 'sindg', 'smirnov', 'smirnovi', 'spence', 'sph_harm', 'stdtr', 'stdtridf', 'stdtrit', 'struve', 'tandg', 'tklmbda', 'voigt_profile', 'wofz', 'wright_bessel', 'wrightomega', 'xlog1py', 'xlogy', 'y0', 'y1', 'yn', 'yv', 'yve', 'zetac', 'geterr', 'seterr', 'errstate', 'jn']
+cdef void loop_D_DDDD__As_DDDD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_DDDD__As_FFFF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_DDD__As_DDD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_DDD__As_FFF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_DD__As_DD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_DD__As_FF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_D__As_D_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        op0 += steps[1]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_D__As_F_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        op0 += steps[1]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_Dld__As_Dld_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_dD__As_dD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_dD__As_fF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_ddD__As_ddD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_ddD__As_ffF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_dddD__As_dddD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_dddD__As_fffF_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_dddd__As_dddd_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_dddd__As_ffff_F(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_iidd__As_lldd_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double complex ov0
+    for i in range(n):
+        if (ip0)[0] == (ip0)[0] and (ip1)[0] == (ip1)[0]:
+            ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        else:
+            sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument")
+            ov0 = NAN
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_D_lD__As_lD_D(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double complex ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_d__As_d_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        op0 += steps[1]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_d__As_f_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        op0 += steps[1]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_dd__As_dd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_dd__As_ff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ddd__As_ddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ddd__As_fff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_dddd__As_dddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_dddd__As_ffff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_dddd_d_As_dddd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef char *op1 = args[5]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+        op1 += steps[5]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_dddd_d_As_ffff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef char *op1 = args[5]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+        op1 += steps[5]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ddddddd__As_ddddddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *ip4 = args[4]
+    cdef char *ip5 = args[5]
+    cdef char *ip6 = args[6]
+    cdef char *op0 = args[7]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        ip4 += steps[4]
+        ip5 += steps[5]
+        ip6 += steps[6]
+        op0 += steps[7]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ddddddd__As_fffffff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *ip4 = args[4]
+    cdef char *ip5 = args[5]
+    cdef char *ip6 = args[6]
+    cdef char *op0 = args[7]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        ip4 += steps[4]
+        ip5 += steps[5]
+        ip6 += steps[6]
+        op0 += steps[7]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ddi_d_As_ddl_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef char *op1 = args[4]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        if (ip2)[0] == (ip2)[0]:
+            ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], &ov1)
+        else:
+            sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument")
+            ov0 = NAN
+            ov1 = NAN
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+        op1 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ddiiddd__As_ddllddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *ip4 = args[4]
+    cdef char *ip5 = args[5]
+    cdef char *ip6 = args[6]
+    cdef char *op0 = args[7]
+    cdef double ov0
+    for i in range(n):
+        if (ip2)[0] == (ip2)[0] and (ip3)[0] == (ip3)[0]:
+            ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], (ip5)[0], (ip6)[0])
+        else:
+            sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument")
+            ov0 = NAN
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        ip4 += steps[4]
+        ip5 += steps[5]
+        ip6 += steps[6]
+        op0 += steps[7]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_did__As_dld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double ov0
+    for i in range(n):
+        if (ip1)[0] == (ip1)[0]:
+            ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        else:
+            sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument")
+            ov0 = NAN
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_id__As_ld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double ov0
+    for i in range(n):
+        if (ip0)[0] == (ip0)[0]:
+            ov0 = (func)((ip0)[0], (ip1)[0])
+        else:
+            sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument")
+            ov0 = NAN
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_iid__As_lld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double ov0
+    for i in range(n):
+        if (ip0)[0] == (ip0)[0] and (ip1)[0] == (ip1)[0]:
+            ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        else:
+            sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument")
+            ov0 = NAN
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ld__As_ld_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_ldd__As_ldd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_d_lddd__As_lddd_d(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *op0 = args[4]
+    cdef double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        op0 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_f_f__As_f_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef float ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        op0 += steps[1]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_f_ff__As_ff_f(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef float ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0], (ip1)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_g_g__As_g_g(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef long double ov0
+    for i in range(n):
+        ov0 = (func)((ip0)[0])
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        op0 += steps[1]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_D_DDDD_As_D_DDDD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef char *op2 = args[3]
+    cdef char *op3 = args[4]
+    cdef double complex ov0
+    cdef double complex ov1
+    cdef double complex ov2
+    cdef double complex ov3
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+        op2 += steps[3]
+        op3 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_D_DDDD_As_F_FFFF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef char *op2 = args[3]
+    cdef char *op3 = args[4]
+    cdef double complex ov0
+    cdef double complex ov1
+    cdef double complex ov2
+    cdef double complex ov3
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+        op2 += steps[3]
+        op3 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_D_DD_As_D_DD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef double complex ov0
+    cdef double complex ov1
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_D_DD_As_F_FF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef double complex ov0
+    cdef double complex ov1
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_DDDD_As_d_DDDD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef char *op2 = args[3]
+    cdef char *op3 = args[4]
+    cdef double complex ov0
+    cdef double complex ov1
+    cdef double complex ov2
+    cdef double complex ov3
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+        op2 += steps[3]
+        op3 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_DDDD_As_f_FFFF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef char *op2 = args[3]
+    cdef char *op3 = args[4]
+    cdef double complex ov0
+    cdef double complex ov1
+    cdef double complex ov2
+    cdef double complex ov3
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+        op2 += steps[3]
+        op3 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_DD_As_d_DD(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef double complex ov0
+    cdef double complex ov1
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_DD_As_f_FF(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef double complex ov0
+    cdef double complex ov1
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_dd_As_d_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_dd_As_f_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_dddd_As_d_dddd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef char *op2 = args[3]
+    cdef char *op3 = args[4]
+    cdef double ov0
+    cdef double ov1
+    cdef double ov2
+    cdef double ov3
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+        op2 += steps[3]
+        op3 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_d_dddd_As_f_ffff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef char *op1 = args[2]
+    cdef char *op2 = args[3]
+    cdef char *op3 = args[4]
+    cdef double ov0
+    cdef double ov1
+    cdef double ov2
+    cdef double ov3
+    for i in range(n):
+        (func)((ip0)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        op0 += steps[1]
+        op1 += steps[2]
+        op2 += steps[3]
+        op3 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_dd_dd_As_dd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef char *op1 = args[3]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+        op1 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_dd_dd_As_ff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef char *op1 = args[3]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+        op1 += steps[3]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_dd_dddd_As_dd_dddd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef char *op1 = args[3]
+    cdef char *op2 = args[4]
+    cdef char *op3 = args[5]
+    cdef double ov0
+    cdef double ov1
+    cdef double ov2
+    cdef double ov3
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+        op1 += steps[3]
+        op2 += steps[4]
+        op3 += steps[5]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_dd_dddd_As_ff_ffff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *op0 = args[2]
+    cdef char *op1 = args[3]
+    cdef char *op2 = args[4]
+    cdef char *op3 = args[5]
+    cdef double ov0
+    cdef double ov1
+    cdef double ov2
+    cdef double ov3
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], &ov0, &ov1, &ov2, &ov3)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        (op2)[0] = ov2
+        (op3)[0] = ov3
+        ip0 += steps[0]
+        ip1 += steps[1]
+        op0 += steps[2]
+        op1 += steps[3]
+        op2 += steps[4]
+        op3 += steps[5]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_ddd_dd_As_ddd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef char *op1 = args[4]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], (ip2)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+        op1 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_ddd_dd_As_fff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *op0 = args[3]
+    cdef char *op1 = args[4]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], (ip2)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        op0 += steps[3]
+        op1 += steps[4]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_ddddd_dd_As_ddddd_dd(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *ip4 = args[4]
+    cdef char *op0 = args[5]
+    cdef char *op1 = args[6]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        ip4 += steps[4]
+        op0 += steps[5]
+        op1 += steps[6]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_ddddd_dd_As_fffff_ff(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *ip1 = args[1]
+    cdef char *ip2 = args[2]
+    cdef char *ip3 = args[3]
+    cdef char *ip4 = args[4]
+    cdef char *op0 = args[5]
+    cdef char *op1 = args[6]
+    cdef double ov0
+    cdef double ov1
+    for i in range(n):
+        (func)((ip0)[0], (ip1)[0], (ip2)[0], (ip3)[0], (ip4)[0], &ov0, &ov1)
+        (op0)[0] = ov0
+        (op1)[0] = ov1
+        ip0 += steps[0]
+        ip1 += steps[1]
+        ip2 += steps[2]
+        ip3 += steps[3]
+        ip4 += steps[4]
+        op0 += steps[5]
+        op1 += steps[6]
+    sf_error.check_fpe(func_name)
+
+cdef void loop_i_i__As_l_l(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:
+    cdef np.npy_intp i, n = dims[0]
+    cdef void *func = (data)[0]
+    cdef char *func_name = (data)[1]
+    cdef char *ip0 = args[0]
+    cdef char *op0 = args[1]
+    cdef int ov0
+    for i in range(n):
+        if (ip0)[0] == (ip0)[0]:
+            ov0 = (func)((ip0)[0])
+        else:
+            sf_error.error(func_name, sf_error.DOMAIN, "invalid input argument")
+            ov0 = 0xbad0bad0
+        (op0)[0] = ov0
+        ip0 += steps[0]
+        op0 += steps[1]
+    sf_error.check_fpe(func_name)
+
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cosine_cdf "cosine_cdf"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cosine_invcdf "cosine_invcdf"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cospi "cospi"(double) nogil
+from ._trig cimport ccospi as _func_ccospi
+ctypedef double complex _proto_ccospi_t(double complex) nogil
+cdef _proto_ccospi_t *_proto_ccospi_t_var = &_func_ccospi
+from ._ellip_harm cimport ellip_harmonic as _func_ellip_harmonic
+ctypedef double _proto_ellip_harmonic_t(double, double, int, int, double, double, double) nogil
+cdef _proto_ellip_harmonic_t *_proto_ellip_harmonic_t_var = &_func_ellip_harmonic
+from ._legacy cimport ellip_harmonic_unsafe as _func_ellip_harmonic_unsafe
+ctypedef double _proto_ellip_harmonic_unsafe_t(double, double, double, double, double, double, double) nogil
+cdef _proto_ellip_harmonic_unsafe_t *_proto_ellip_harmonic_unsafe_t_var = &_func_ellip_harmonic_unsafe
+from ._factorial cimport _factorial as _func__factorial
+ctypedef double _proto__factorial_t(double) nogil
+cdef _proto__factorial_t *_proto__factorial_t_var = &_func__factorial
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_igam_fac "igam_fac"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_kolmogc "kolmogc"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_kolmogci "kolmogci"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_kolmogp "kolmogp"(double) nogil
+from ._lambertw cimport lambertw_scalar as _func_lambertw_scalar
+ctypedef double complex _proto_lambertw_scalar_t(double complex, long, double) nogil
+cdef _proto_lambertw_scalar_t *_proto_lambertw_scalar_t_var = &_func_lambertw_scalar
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_lanczos_sum_expg_scaled "lanczos_sum_expg_scaled"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_lgam1p "lgam1p"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_log1pmx "log1pmx"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_riemann_zeta "riemann_zeta"(double) nogil
+from .sf_error cimport _sf_error_test_function as _func__sf_error_test_function
+ctypedef int _proto__sf_error_test_function_t(int) nogil
+cdef _proto__sf_error_test_function_t *_proto__sf_error_test_function_t_var = &_func__sf_error_test_function
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_sinpi "sinpi"(double) nogil
+from ._trig cimport csinpi as _func_csinpi
+ctypedef double complex _proto_csinpi_t(double complex) nogil
+cdef _proto_csinpi_t *_proto_csinpi_t_var = &_func_csinpi
+from ._legacy cimport smirnovc_unsafe as _func_smirnovc_unsafe
+ctypedef double _proto_smirnovc_unsafe_t(double, double) nogil
+cdef _proto_smirnovc_unsafe_t *_proto_smirnovc_unsafe_t_var = &_func_smirnovc_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_smirnovc "smirnovc"(int, double) nogil
+from ._legacy cimport smirnovci_unsafe as _func_smirnovci_unsafe
+ctypedef double _proto_smirnovci_unsafe_t(double, double) nogil
+cdef _proto_smirnovci_unsafe_t *_proto_smirnovci_unsafe_t_var = &_func_smirnovci_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_smirnovci "smirnovci"(int, double) nogil
+from ._legacy cimport smirnovp_unsafe as _func_smirnovp_unsafe
+ctypedef double _proto_smirnovp_unsafe_t(double, double) nogil
+cdef _proto_smirnovp_unsafe_t *_proto_smirnovp_unsafe_t_var = &_func_smirnovp_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_smirnovp "smirnovp"(int, double) nogil
+from ._spherical_bessel cimport spherical_in_complex as _func_spherical_in_complex
+ctypedef double complex _proto_spherical_in_complex_t(long, double complex) nogil
+cdef _proto_spherical_in_complex_t *_proto_spherical_in_complex_t_var = &_func_spherical_in_complex
+from ._spherical_bessel cimport spherical_in_real as _func_spherical_in_real
+ctypedef double _proto_spherical_in_real_t(long, double) nogil
+cdef _proto_spherical_in_real_t *_proto_spherical_in_real_t_var = &_func_spherical_in_real
+from ._spherical_bessel cimport spherical_in_d_complex as _func_spherical_in_d_complex
+ctypedef double complex _proto_spherical_in_d_complex_t(long, double complex) nogil
+cdef _proto_spherical_in_d_complex_t *_proto_spherical_in_d_complex_t_var = &_func_spherical_in_d_complex
+from ._spherical_bessel cimport spherical_in_d_real as _func_spherical_in_d_real
+ctypedef double _proto_spherical_in_d_real_t(long, double) nogil
+cdef _proto_spherical_in_d_real_t *_proto_spherical_in_d_real_t_var = &_func_spherical_in_d_real
+from ._spherical_bessel cimport spherical_jn_complex as _func_spherical_jn_complex
+ctypedef double complex _proto_spherical_jn_complex_t(long, double complex) nogil
+cdef _proto_spherical_jn_complex_t *_proto_spherical_jn_complex_t_var = &_func_spherical_jn_complex
+from ._spherical_bessel cimport spherical_jn_real as _func_spherical_jn_real
+ctypedef double _proto_spherical_jn_real_t(long, double) nogil
+cdef _proto_spherical_jn_real_t *_proto_spherical_jn_real_t_var = &_func_spherical_jn_real
+from ._spherical_bessel cimport spherical_jn_d_complex as _func_spherical_jn_d_complex
+ctypedef double complex _proto_spherical_jn_d_complex_t(long, double complex) nogil
+cdef _proto_spherical_jn_d_complex_t *_proto_spherical_jn_d_complex_t_var = &_func_spherical_jn_d_complex
+from ._spherical_bessel cimport spherical_jn_d_real as _func_spherical_jn_d_real
+ctypedef double _proto_spherical_jn_d_real_t(long, double) nogil
+cdef _proto_spherical_jn_d_real_t *_proto_spherical_jn_d_real_t_var = &_func_spherical_jn_d_real
+from ._spherical_bessel cimport spherical_kn_complex as _func_spherical_kn_complex
+ctypedef double complex _proto_spherical_kn_complex_t(long, double complex) nogil
+cdef _proto_spherical_kn_complex_t *_proto_spherical_kn_complex_t_var = &_func_spherical_kn_complex
+from ._spherical_bessel cimport spherical_kn_real as _func_spherical_kn_real
+ctypedef double _proto_spherical_kn_real_t(long, double) nogil
+cdef _proto_spherical_kn_real_t *_proto_spherical_kn_real_t_var = &_func_spherical_kn_real
+from ._spherical_bessel cimport spherical_kn_d_complex as _func_spherical_kn_d_complex
+ctypedef double complex _proto_spherical_kn_d_complex_t(long, double complex) nogil
+cdef _proto_spherical_kn_d_complex_t *_proto_spherical_kn_d_complex_t_var = &_func_spherical_kn_d_complex
+from ._spherical_bessel cimport spherical_kn_d_real as _func_spherical_kn_d_real
+ctypedef double _proto_spherical_kn_d_real_t(long, double) nogil
+cdef _proto_spherical_kn_d_real_t *_proto_spherical_kn_d_real_t_var = &_func_spherical_kn_d_real
+from ._spherical_bessel cimport spherical_yn_complex as _func_spherical_yn_complex
+ctypedef double complex _proto_spherical_yn_complex_t(long, double complex) nogil
+cdef _proto_spherical_yn_complex_t *_proto_spherical_yn_complex_t_var = &_func_spherical_yn_complex
+from ._spherical_bessel cimport spherical_yn_real as _func_spherical_yn_real
+ctypedef double _proto_spherical_yn_real_t(long, double) nogil
+cdef _proto_spherical_yn_real_t *_proto_spherical_yn_real_t_var = &_func_spherical_yn_real
+from ._spherical_bessel cimport spherical_yn_d_complex as _func_spherical_yn_d_complex
+ctypedef double complex _proto_spherical_yn_d_complex_t(long, double complex) nogil
+cdef _proto_spherical_yn_d_complex_t *_proto_spherical_yn_d_complex_t_var = &_func_spherical_yn_d_complex
+from ._spherical_bessel cimport spherical_yn_d_real as _func_spherical_yn_d_real
+ctypedef double _proto_spherical_yn_d_real_t(long, double) nogil
+cdef _proto_spherical_yn_d_real_t *_proto_spherical_yn_d_real_t_var = &_func_spherical_yn_d_real
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_struve_asymp_large_z "struve_asymp_large_z"(double, double, int, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_struve_bessel_series "struve_bessel_series"(double, double, int, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_struve_power_series "struve_power_series"(double, double, int, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_zeta "zeta"(double, double) nogil
+from ._agm cimport agm as _func_agm
+ctypedef double _proto_agm_t(double, double) nogil
+cdef _proto_agm_t *_proto_agm_t_var = &_func_agm
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_airy_wrap "airy_wrap"(double, double *, double *, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_cairy_wrap "cairy_wrap"(double complex, double complex *, double complex *, double complex *, double complex *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_cairy_wrap_e "cairy_wrap_e"(double complex, double complex *, double complex *, double complex *, double complex *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_cairy_wrap_e_real "cairy_wrap_e_real"(double, double *, double *, double *, double *) nogil
+from ._legacy cimport bdtr_unsafe as _func_bdtr_unsafe
+ctypedef double _proto_bdtr_unsafe_t(double, double, double) nogil
+cdef _proto_bdtr_unsafe_t *_proto_bdtr_unsafe_t_var = &_func_bdtr_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_bdtr "bdtr"(double, int, double) nogil
+from ._legacy cimport bdtrc_unsafe as _func_bdtrc_unsafe
+ctypedef double _proto_bdtrc_unsafe_t(double, double, double) nogil
+cdef _proto_bdtrc_unsafe_t *_proto_bdtrc_unsafe_t_var = &_func_bdtrc_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_bdtrc "bdtrc"(double, int, double) nogil
+from ._legacy cimport bdtri_unsafe as _func_bdtri_unsafe
+ctypedef double _proto_bdtri_unsafe_t(double, double, double) nogil
+cdef _proto_bdtri_unsafe_t *_proto_bdtri_unsafe_t_var = &_func_bdtri_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_bdtri "bdtri"(double, int, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfbin2_wrap "cdfbin2_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfbin3_wrap "cdfbin3_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_bei_wrap "bei_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_beip_wrap "beip_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ber_wrap "ber_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_berp_wrap "berp_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_besselpoly "besselpoly"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_beta "beta"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_incbet "incbet"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_incbi "incbi"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_lbeta "lbeta"(double, double) nogil
+from .orthogonal_eval cimport binom as _func_binom
+ctypedef double _proto_binom_t(double, double) nogil
+cdef _proto_binom_t *_proto_binom_t_var = &_func_binom
+from ._boxcox cimport boxcox as _func_boxcox
+ctypedef double _proto_boxcox_t(double, double) nogil
+cdef _proto_boxcox_t *_proto_boxcox_t_var = &_func_boxcox
+from ._boxcox cimport boxcox1p as _func_boxcox1p
+ctypedef double _proto_boxcox1p_t(double, double) nogil
+cdef _proto_boxcox1p_t *_proto_boxcox1p_t_var = &_func_boxcox1p
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_btdtr "btdtr"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_incbi "incbi"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfbet3_wrap "cdfbet3_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfbet4_wrap "cdfbet4_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbrt "cbrt"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_chdtr "chdtr"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_chdtrc "chdtrc"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_chdtri "chdtri"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfchi3_wrap "cdfchi3_wrap"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfchn1_wrap "cdfchn1_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfchn3_wrap "cdfchn3_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfchn4_wrap "cdfchn4_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfchn2_wrap "cdfchn2_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cosdg "cosdg"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cosm1 "cosm1"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cotdg "cotdg"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ellpe "ellpe"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ellie "ellie"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_ellpj "ellpj"(double, double, double *, double *, double *, double *) nogil
+from ._ellipk cimport ellipk as _func_ellipk
+ctypedef double _proto_ellipk_t(double) nogil
+cdef _proto_ellipk_t *_proto_ellipk_t_var = &_func_ellipk
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ellik "ellik"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ellpk "ellpk"(double) nogil
+from ._convex_analysis cimport entr as _func_entr
+ctypedef double _proto_entr_t(double) nogil
+cdef _proto_entr_t *_proto_entr_t_var = &_func_entr
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_erf "erf"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_erfc "erfc"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_erfcinv "erfcinv"(double) nogil
+from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc
+ctypedef double complex _proto_eval_chebyc_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebyc_double_complex__t *_proto_eval_chebyc_double_complex__t_var = &_func_eval_chebyc[double_complex]
+from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc
+ctypedef double _proto_eval_chebyc_double__t(double, double) nogil
+cdef _proto_eval_chebyc_double__t *_proto_eval_chebyc_double__t_var = &_func_eval_chebyc[double]
+from .orthogonal_eval cimport eval_chebyc_l as _func_eval_chebyc_l
+ctypedef double _proto_eval_chebyc_l_t(long, double) nogil
+cdef _proto_eval_chebyc_l_t *_proto_eval_chebyc_l_t_var = &_func_eval_chebyc_l
+from .orthogonal_eval cimport eval_chebys as _func_eval_chebys
+ctypedef double complex _proto_eval_chebys_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebys_double_complex__t *_proto_eval_chebys_double_complex__t_var = &_func_eval_chebys[double_complex]
+from .orthogonal_eval cimport eval_chebys as _func_eval_chebys
+ctypedef double _proto_eval_chebys_double__t(double, double) nogil
+cdef _proto_eval_chebys_double__t *_proto_eval_chebys_double__t_var = &_func_eval_chebys[double]
+from .orthogonal_eval cimport eval_chebys_l as _func_eval_chebys_l
+ctypedef double _proto_eval_chebys_l_t(long, double) nogil
+cdef _proto_eval_chebys_l_t *_proto_eval_chebys_l_t_var = &_func_eval_chebys_l
+from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt
+ctypedef double complex _proto_eval_chebyt_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebyt_double_complex__t *_proto_eval_chebyt_double_complex__t_var = &_func_eval_chebyt[double_complex]
+from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt
+ctypedef double _proto_eval_chebyt_double__t(double, double) nogil
+cdef _proto_eval_chebyt_double__t *_proto_eval_chebyt_double__t_var = &_func_eval_chebyt[double]
+from .orthogonal_eval cimport eval_chebyt_l as _func_eval_chebyt_l
+ctypedef double _proto_eval_chebyt_l_t(long, double) nogil
+cdef _proto_eval_chebyt_l_t *_proto_eval_chebyt_l_t_var = &_func_eval_chebyt_l
+from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu
+ctypedef double complex _proto_eval_chebyu_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebyu_double_complex__t *_proto_eval_chebyu_double_complex__t_var = &_func_eval_chebyu[double_complex]
+from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu
+ctypedef double _proto_eval_chebyu_double__t(double, double) nogil
+cdef _proto_eval_chebyu_double__t *_proto_eval_chebyu_double__t_var = &_func_eval_chebyu[double]
+from .orthogonal_eval cimport eval_chebyu_l as _func_eval_chebyu_l
+ctypedef double _proto_eval_chebyu_l_t(long, double) nogil
+cdef _proto_eval_chebyu_l_t *_proto_eval_chebyu_l_t_var = &_func_eval_chebyu_l
+from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer
+ctypedef double complex _proto_eval_gegenbauer_double_complex__t(double, double, double complex) nogil
+cdef _proto_eval_gegenbauer_double_complex__t *_proto_eval_gegenbauer_double_complex__t_var = &_func_eval_gegenbauer[double_complex]
+from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer
+ctypedef double _proto_eval_gegenbauer_double__t(double, double, double) nogil
+cdef _proto_eval_gegenbauer_double__t *_proto_eval_gegenbauer_double__t_var = &_func_eval_gegenbauer[double]
+from .orthogonal_eval cimport eval_gegenbauer_l as _func_eval_gegenbauer_l
+ctypedef double _proto_eval_gegenbauer_l_t(long, double, double) nogil
+cdef _proto_eval_gegenbauer_l_t *_proto_eval_gegenbauer_l_t_var = &_func_eval_gegenbauer_l
+from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre
+ctypedef double complex _proto_eval_genlaguerre_double_complex__t(double, double, double complex) nogil
+cdef _proto_eval_genlaguerre_double_complex__t *_proto_eval_genlaguerre_double_complex__t_var = &_func_eval_genlaguerre[double_complex]
+from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre
+ctypedef double _proto_eval_genlaguerre_double__t(double, double, double) nogil
+cdef _proto_eval_genlaguerre_double__t *_proto_eval_genlaguerre_double__t_var = &_func_eval_genlaguerre[double]
+from .orthogonal_eval cimport eval_genlaguerre_l as _func_eval_genlaguerre_l
+ctypedef double _proto_eval_genlaguerre_l_t(long, double, double) nogil
+cdef _proto_eval_genlaguerre_l_t *_proto_eval_genlaguerre_l_t_var = &_func_eval_genlaguerre_l
+from .orthogonal_eval cimport eval_hermite as _func_eval_hermite
+ctypedef double _proto_eval_hermite_t(long, double) nogil
+cdef _proto_eval_hermite_t *_proto_eval_hermite_t_var = &_func_eval_hermite
+from .orthogonal_eval cimport eval_hermitenorm as _func_eval_hermitenorm
+ctypedef double _proto_eval_hermitenorm_t(long, double) nogil
+cdef _proto_eval_hermitenorm_t *_proto_eval_hermitenorm_t_var = &_func_eval_hermitenorm
+from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi
+ctypedef double complex _proto_eval_jacobi_double_complex__t(double, double, double, double complex) nogil
+cdef _proto_eval_jacobi_double_complex__t *_proto_eval_jacobi_double_complex__t_var = &_func_eval_jacobi[double_complex]
+from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi
+ctypedef double _proto_eval_jacobi_double__t(double, double, double, double) nogil
+cdef _proto_eval_jacobi_double__t *_proto_eval_jacobi_double__t_var = &_func_eval_jacobi[double]
+from .orthogonal_eval cimport eval_jacobi_l as _func_eval_jacobi_l
+ctypedef double _proto_eval_jacobi_l_t(long, double, double, double) nogil
+cdef _proto_eval_jacobi_l_t *_proto_eval_jacobi_l_t_var = &_func_eval_jacobi_l
+from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre
+ctypedef double complex _proto_eval_laguerre_double_complex__t(double, double complex) nogil
+cdef _proto_eval_laguerre_double_complex__t *_proto_eval_laguerre_double_complex__t_var = &_func_eval_laguerre[double_complex]
+from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre
+ctypedef double _proto_eval_laguerre_double__t(double, double) nogil
+cdef _proto_eval_laguerre_double__t *_proto_eval_laguerre_double__t_var = &_func_eval_laguerre[double]
+from .orthogonal_eval cimport eval_laguerre_l as _func_eval_laguerre_l
+ctypedef double _proto_eval_laguerre_l_t(long, double) nogil
+cdef _proto_eval_laguerre_l_t *_proto_eval_laguerre_l_t_var = &_func_eval_laguerre_l
+from .orthogonal_eval cimport eval_legendre as _func_eval_legendre
+ctypedef double complex _proto_eval_legendre_double_complex__t(double, double complex) nogil
+cdef _proto_eval_legendre_double_complex__t *_proto_eval_legendre_double_complex__t_var = &_func_eval_legendre[double_complex]
+from .orthogonal_eval cimport eval_legendre as _func_eval_legendre
+ctypedef double _proto_eval_legendre_double__t(double, double) nogil
+cdef _proto_eval_legendre_double__t *_proto_eval_legendre_double__t_var = &_func_eval_legendre[double]
+from .orthogonal_eval cimport eval_legendre_l as _func_eval_legendre_l
+ctypedef double _proto_eval_legendre_l_t(long, double) nogil
+cdef _proto_eval_legendre_l_t *_proto_eval_legendre_l_t_var = &_func_eval_legendre_l
+from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt
+ctypedef double complex _proto_eval_sh_chebyt_double_complex__t(double, double complex) nogil
+cdef _proto_eval_sh_chebyt_double_complex__t *_proto_eval_sh_chebyt_double_complex__t_var = &_func_eval_sh_chebyt[double_complex]
+from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt
+ctypedef double _proto_eval_sh_chebyt_double__t(double, double) nogil
+cdef _proto_eval_sh_chebyt_double__t *_proto_eval_sh_chebyt_double__t_var = &_func_eval_sh_chebyt[double]
+from .orthogonal_eval cimport eval_sh_chebyt_l as _func_eval_sh_chebyt_l
+ctypedef double _proto_eval_sh_chebyt_l_t(long, double) nogil
+cdef _proto_eval_sh_chebyt_l_t *_proto_eval_sh_chebyt_l_t_var = &_func_eval_sh_chebyt_l
+from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu
+ctypedef double complex _proto_eval_sh_chebyu_double_complex__t(double, double complex) nogil
+cdef _proto_eval_sh_chebyu_double_complex__t *_proto_eval_sh_chebyu_double_complex__t_var = &_func_eval_sh_chebyu[double_complex]
+from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu
+ctypedef double _proto_eval_sh_chebyu_double__t(double, double) nogil
+cdef _proto_eval_sh_chebyu_double__t *_proto_eval_sh_chebyu_double__t_var = &_func_eval_sh_chebyu[double]
+from .orthogonal_eval cimport eval_sh_chebyu_l as _func_eval_sh_chebyu_l
+ctypedef double _proto_eval_sh_chebyu_l_t(long, double) nogil
+cdef _proto_eval_sh_chebyu_l_t *_proto_eval_sh_chebyu_l_t_var = &_func_eval_sh_chebyu_l
+from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi
+ctypedef double complex _proto_eval_sh_jacobi_double_complex__t(double, double, double, double complex) nogil
+cdef _proto_eval_sh_jacobi_double_complex__t *_proto_eval_sh_jacobi_double_complex__t_var = &_func_eval_sh_jacobi[double_complex]
+from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi
+ctypedef double _proto_eval_sh_jacobi_double__t(double, double, double, double) nogil
+cdef _proto_eval_sh_jacobi_double__t *_proto_eval_sh_jacobi_double__t_var = &_func_eval_sh_jacobi[double]
+from .orthogonal_eval cimport eval_sh_jacobi_l as _func_eval_sh_jacobi_l
+ctypedef double _proto_eval_sh_jacobi_l_t(long, double, double, double) nogil
+cdef _proto_eval_sh_jacobi_l_t *_proto_eval_sh_jacobi_l_t_var = &_func_eval_sh_jacobi_l
+from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre
+ctypedef double complex _proto_eval_sh_legendre_double_complex__t(double, double complex) nogil
+cdef _proto_eval_sh_legendre_double_complex__t *_proto_eval_sh_legendre_double_complex__t_var = &_func_eval_sh_legendre[double_complex]
+from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre
+ctypedef double _proto_eval_sh_legendre_double__t(double, double) nogil
+cdef _proto_eval_sh_legendre_double__t *_proto_eval_sh_legendre_double__t_var = &_func_eval_sh_legendre[double]
+from .orthogonal_eval cimport eval_sh_legendre_l as _func_eval_sh_legendre_l
+ctypedef double _proto_eval_sh_legendre_l_t(long, double) nogil
+cdef _proto_eval_sh_legendre_l_t *_proto_eval_sh_legendre_l_t_var = &_func_eval_sh_legendre_l
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cexp1_wrap "cexp1_wrap"(double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_exp1_wrap "exp1_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_exp10 "exp10"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_exp2 "exp2"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cexpi_wrap "cexpi_wrap"(double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_expi_wrap "expi_wrap"(double) nogil
+from ._cunity cimport cexpm1 as _func_cexpm1
+ctypedef double complex _proto_cexpm1_t(double complex) nogil
+cdef _proto_cexpm1_t *_proto_cexpm1_t_var = &_func_cexpm1
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_expm1 "expm1"(double) nogil
+from ._legacy cimport expn_unsafe as _func_expn_unsafe
+ctypedef double _proto_expn_unsafe_t(double, double) nogil
+cdef _proto_expn_unsafe_t *_proto_expn_unsafe_t_var = &_func_expn_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_expn "expn"(int, double) nogil
+from ._exprel cimport exprel as _func_exprel
+ctypedef double _proto_exprel_t(double) nogil
+cdef _proto_exprel_t *_proto_exprel_t_var = &_func_exprel
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_fdtr "fdtr"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_fdtrc "fdtrc"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_fdtri "fdtri"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdff4_wrap "cdff4_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_fresnl "fresnl"(double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_cfresnl_wrap "cfresnl_wrap"(double complex, double complex *, double complex *) nogil
+from ._loggamma cimport cgamma as _func_cgamma
+ctypedef double complex _proto_cgamma_t(double complex) nogil
+cdef _proto_cgamma_t *_proto_cgamma_t_var = &_func_cgamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_Gamma "Gamma"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_igam "igam"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_igamc "igamc"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_igamci "igamci"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_igami "igami"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_lgam "lgam"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_gammasgn "gammasgn"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_gdtr "gdtr"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_gdtrc "gdtrc"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfgam4_wrap "cdfgam4_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfgam3_wrap "cdfgam3_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfgam2_wrap "cdfgam2_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesh_wrap1 "cbesh_wrap1"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesh_wrap1_e "cbesh_wrap1_e"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesh_wrap2 "cbesh_wrap2"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesh_wrap2_e "cbesh_wrap2_e"(double, double complex) nogil
+from ._convex_analysis cimport huber as _func_huber
+ctypedef double _proto_huber_t(double, double) nogil
+cdef _proto_huber_t *_proto_huber_t_var = &_func_huber
+from ._hyp0f1 cimport _hyp0f1_cmplx as _func__hyp0f1_cmplx
+ctypedef double complex _proto__hyp0f1_cmplx_t(double, double complex) nogil
+cdef _proto__hyp0f1_cmplx_t *_proto__hyp0f1_cmplx_t_var = &_func__hyp0f1_cmplx
+from ._hyp0f1 cimport _hyp0f1_real as _func__hyp0f1_real
+ctypedef double _proto__hyp0f1_real_t(double, double) nogil
+cdef _proto__hyp0f1_real_t *_proto__hyp0f1_real_t_var = &_func__hyp0f1_real
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_chyp1f1_wrap "chyp1f1_wrap"(double, double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_hyp2f1 "hyp2f1"(double, double, double, double) nogil
+from ._hyp2f1 cimport hyp2f1_complex as _func_hyp2f1_complex
+ctypedef double complex _proto_hyp2f1_complex_t(double, double, double, double complex) nogil
+cdef _proto_hyp2f1_complex_t *_proto_hyp2f1_complex_t_var = &_func_hyp2f1_complex
+from ._hypergeometric cimport hyperu as _func_hyperu
+ctypedef double _proto_hyperu_t(double, double, double) nogil
+cdef _proto_hyperu_t *_proto_hyperu_t_var = &_func_hyperu
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_i0 "i0"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_i0e "i0e"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_i1 "i1"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_i1e "i1e"(double) nogil
+from ._boxcox cimport inv_boxcox as _func_inv_boxcox
+ctypedef double _proto_inv_boxcox_t(double, double) nogil
+cdef _proto_inv_boxcox_t *_proto_inv_boxcox_t_var = &_func_inv_boxcox
+from ._boxcox cimport inv_boxcox1p as _func_inv_boxcox1p
+ctypedef double _proto_inv_boxcox1p_t(double, double) nogil
+cdef _proto_inv_boxcox1p_t *_proto_inv_boxcox1p_t_var = &_func_inv_boxcox1p
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_it2i0k0_wrap "it2i0k0_wrap"(double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_it2j0y0_wrap "it2j0y0_wrap"(double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_it2struve0_wrap "it2struve0_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_itairy_wrap "itairy_wrap"(double, double *, double *, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_it1i0k0_wrap "it1i0k0_wrap"(double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_it1j0y0_wrap "it1j0y0_wrap"(double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_itmodstruve0_wrap "itmodstruve0_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_itstruve0_wrap "itstruve0_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesi_wrap "cbesi_wrap"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_iv "iv"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesi_wrap_e "cbesi_wrap_e"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesi_wrap_e_real "cbesi_wrap_e_real"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_j0 "j0"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_j1 "j1"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesj_wrap "cbesj_wrap"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesj_wrap_real "cbesj_wrap_real"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesj_wrap_e "cbesj_wrap_e"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesj_wrap_e_real "cbesj_wrap_e_real"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_k0 "k0"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_k0e "k0e"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_k1 "k1"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_k1e "k1e"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_kei_wrap "kei_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_keip_wrap "keip_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_kelvin_wrap "kelvin_wrap"(double, double complex *, double complex *, double complex *, double complex *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ker_wrap "ker_wrap"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_kerp_wrap "kerp_wrap"(double) nogil
+from ._convex_analysis cimport kl_div as _func_kl_div
+ctypedef double _proto_kl_div_t(double, double) nogil
+cdef _proto_kl_div_t *_proto_kl_div_t_var = &_func_kl_div
+from ._legacy cimport kn_unsafe as _func_kn_unsafe
+ctypedef double _proto_kn_unsafe_t(double, double) nogil
+cdef _proto_kn_unsafe_t *_proto_kn_unsafe_t_var = &_func_kn_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesk_wrap_real_int "cbesk_wrap_real_int"(int, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_kolmogi "kolmogi"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_kolmogorov "kolmogorov"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesk_wrap "cbesk_wrap"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesk_wrap_real "cbesk_wrap_real"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesk_wrap_e "cbesk_wrap_e"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesk_wrap_e_real "cbesk_wrap_e_real"(double, double) nogil
+from ._cunity cimport clog1p as _func_clog1p
+ctypedef double complex _proto_clog1p_t(double complex) nogil
+cdef _proto_clog1p_t *_proto_clog1p_t_var = &_func_clog1p
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_log1p "log1p"(double) nogil
+from ._loggamma cimport loggamma_real as _func_loggamma_real
+ctypedef double _proto_loggamma_real_t(double) nogil
+cdef _proto_loggamma_real_t *_proto_loggamma_real_t_var = &_func_loggamma_real
+from ._loggamma cimport loggamma as _func_loggamma
+ctypedef double complex _proto_loggamma_t(double complex) nogil
+cdef _proto_loggamma_t *_proto_loggamma_t_var = &_func_loggamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_pmv_wrap "pmv_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cem_cva_wrap "cem_cva_wrap"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_sem_cva_wrap "sem_cva_wrap"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_cem_wrap "cem_wrap"(double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_mcm1_wrap "mcm1_wrap"(double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_mcm2_wrap "mcm2_wrap"(double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_msm1_wrap "msm1_wrap"(double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_msm2_wrap "msm2_wrap"(double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_sem_wrap "sem_wrap"(double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_modified_fresnel_minus_wrap "modified_fresnel_minus_wrap"(double, double complex *, double complex *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_modified_fresnel_plus_wrap "modified_fresnel_plus_wrap"(double, double complex *, double complex *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_struve_l "struve_l"(double, double) nogil
+from ._legacy cimport nbdtr_unsafe as _func_nbdtr_unsafe
+ctypedef double _proto_nbdtr_unsafe_t(double, double, double) nogil
+cdef _proto_nbdtr_unsafe_t *_proto_nbdtr_unsafe_t_var = &_func_nbdtr_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_nbdtr "nbdtr"(int, int, double) nogil
+from ._legacy cimport nbdtrc_unsafe as _func_nbdtrc_unsafe
+ctypedef double _proto_nbdtrc_unsafe_t(double, double, double) nogil
+cdef _proto_nbdtrc_unsafe_t *_proto_nbdtrc_unsafe_t_var = &_func_nbdtrc_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_nbdtrc "nbdtrc"(int, int, double) nogil
+from ._legacy cimport nbdtri_unsafe as _func_nbdtri_unsafe
+ctypedef double _proto_nbdtri_unsafe_t(double, double, double) nogil
+cdef _proto_nbdtri_unsafe_t *_proto_nbdtri_unsafe_t_var = &_func_nbdtri_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_nbdtri "nbdtri"(int, int, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfnbn2_wrap "cdfnbn2_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfnbn3_wrap "cdfnbn3_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdffnc1_wrap "cdffnc1_wrap"(double, double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdffnc2_wrap "cdffnc2_wrap"(double, double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdffnc4_wrap "cdffnc4_wrap"(double, double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdffnc3_wrap "cdffnc3_wrap"(double, double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdffnc5_wrap "cdffnc5_wrap"(double, double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdftnc1_wrap "cdftnc1_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdftnc3_wrap "cdftnc3_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdftnc4_wrap "cdftnc4_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdftnc2_wrap "cdftnc2_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ndtr "ndtr"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_ndtri "ndtri"(double) nogil
+from ._ndtri_exp cimport ndtri_exp as _func_ndtri_exp
+ctypedef double _proto_ndtri_exp_t(double) nogil
+cdef _proto_ndtri_exp_t *_proto_ndtri_exp_t_var = &_func_ndtri_exp
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfnor3_wrap "cdfnor3_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfnor4_wrap "cdfnor4_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_oblate_aswfa_nocv_wrap "oblate_aswfa_nocv_wrap"(double, double, double, double, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_oblate_aswfa_wrap "oblate_aswfa_wrap"(double, double, double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_oblate_segv_wrap "oblate_segv_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_oblate_radial1_nocv_wrap "oblate_radial1_nocv_wrap"(double, double, double, double, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_oblate_radial1_wrap "oblate_radial1_wrap"(double, double, double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_oblate_radial2_nocv_wrap "oblate_radial2_nocv_wrap"(double, double, double, double, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_oblate_radial2_wrap "oblate_radial2_wrap"(double, double, double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_owens_t "owens_t"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_pbdv_wrap "pbdv_wrap"(double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_pbvv_wrap "pbvv_wrap"(double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_pbwa_wrap "pbwa_wrap"(double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_pdtr "pdtr"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_pdtrc "pdtrc"(double, double) nogil
+from ._legacy cimport pdtri_unsafe as _func_pdtri_unsafe
+ctypedef double _proto_pdtri_unsafe_t(double, double) nogil
+cdef _proto_pdtri_unsafe_t *_proto_pdtri_unsafe_t_var = &_func_pdtri_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_pdtri "pdtri"(int, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdfpoi2_wrap "cdfpoi2_wrap"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_poch "poch"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_prolate_aswfa_nocv_wrap "prolate_aswfa_nocv_wrap"(double, double, double, double, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_prolate_aswfa_wrap "prolate_aswfa_wrap"(double, double, double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_prolate_segv_wrap "prolate_segv_wrap"(double, double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_prolate_radial1_nocv_wrap "prolate_radial1_nocv_wrap"(double, double, double, double, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_prolate_radial1_wrap "prolate_radial1_wrap"(double, double, double, double, double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_prolate_radial2_nocv_wrap "prolate_radial2_nocv_wrap"(double, double, double, double, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_prolate_radial2_wrap "prolate_radial2_wrap"(double, double, double, double, double, double *, double *) nogil
+from ._convex_analysis cimport pseudo_huber as _func_pseudo_huber
+ctypedef double _proto_pseudo_huber_t(double, double) nogil
+cdef _proto_pseudo_huber_t *_proto_pseudo_huber_t_var = &_func_pseudo_huber
+from ._digamma cimport cdigamma as _func_cdigamma
+ctypedef double complex _proto_cdigamma_t(double complex) nogil
+cdef _proto_cdigamma_t *_proto_cdigamma_t_var = &_func_cdigamma
+from ._digamma cimport digamma as _func_digamma
+ctypedef double _proto_digamma_t(double) nogil
+cdef _proto_digamma_t *_proto_digamma_t_var = &_func_digamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_radian "radian"(double, double, double) nogil
+from ._convex_analysis cimport rel_entr as _func_rel_entr
+ctypedef double _proto_rel_entr_t(double, double) nogil
+cdef _proto_rel_entr_t *_proto_rel_entr_t_var = &_func_rel_entr
+from ._loggamma cimport crgamma as _func_crgamma
+ctypedef double complex _proto_crgamma_t(double complex) nogil
+cdef _proto_crgamma_t *_proto_crgamma_t_var = &_func_crgamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_rgamma "rgamma"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_round "round"(double) nogil
+from ._sici cimport cshichi as _func_cshichi
+ctypedef int _proto_cshichi_t(double complex, double complex *, double complex *) nogil
+cdef _proto_cshichi_t *_proto_cshichi_t_var = &_func_cshichi
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_shichi "shichi"(double, double *, double *) nogil
+from ._sici cimport csici as _func_csici
+ctypedef int _proto_csici_t(double complex, double complex *, double complex *) nogil
+cdef _proto_csici_t *_proto_csici_t_var = &_func_csici
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef int _func_sici "sici"(double, double *, double *) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_sindg "sindg"(double) nogil
+from ._legacy cimport smirnov_unsafe as _func_smirnov_unsafe
+ctypedef double _proto_smirnov_unsafe_t(double, double) nogil
+cdef _proto_smirnov_unsafe_t *_proto_smirnov_unsafe_t_var = &_func_smirnov_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_smirnov "smirnov"(int, double) nogil
+from ._legacy cimport smirnovi_unsafe as _func_smirnovi_unsafe
+ctypedef double _proto_smirnovi_unsafe_t(double, double) nogil
+cdef _proto_smirnovi_unsafe_t *_proto_smirnovi_unsafe_t_var = &_func_smirnovi_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_smirnovi "smirnovi"(int, double) nogil
+from ._spence cimport cspence as _func_cspence
+ctypedef double complex _proto_cspence_t(double complex) nogil
+cdef _proto_cspence_t *_proto_cspence_t_var = &_func_cspence
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_spence "spence"(double) nogil
+from ._legacy cimport sph_harmonic_unsafe as _func_sph_harmonic_unsafe
+ctypedef double complex _proto_sph_harmonic_unsafe_t(double, double, double, double) nogil
+cdef _proto_sph_harmonic_unsafe_t *_proto_sph_harmonic_unsafe_t_var = &_func_sph_harmonic_unsafe
+from .sph_harm cimport sph_harmonic as _func_sph_harmonic
+ctypedef double complex _proto_sph_harmonic_t(int, int, double, double) nogil
+cdef _proto_sph_harmonic_t *_proto_sph_harmonic_t_var = &_func_sph_harmonic
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdft1_wrap "cdft1_wrap"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdft3_wrap "cdft3_wrap"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cdft2_wrap "cdft2_wrap"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_struve_h "struve_h"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_tandg "tandg"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_tukeylambdacdf "tukeylambdacdf"(double, double) nogil
+from ._wright_bessel cimport wright_bessel_scalar as _func_wright_bessel_scalar
+ctypedef double _proto_wright_bessel_scalar_t(double, double, double) nogil
+cdef _proto_wright_bessel_scalar_t *_proto_wright_bessel_scalar_t_var = &_func_wright_bessel_scalar
+from ._xlogy cimport xlog1py as _func_xlog1py
+ctypedef double _proto_xlog1py_double__t(double, double) nogil
+cdef _proto_xlog1py_double__t *_proto_xlog1py_double__t_var = &_func_xlog1py[double]
+from ._xlogy cimport xlog1py as _func_xlog1py
+ctypedef double complex _proto_xlog1py_double_complex__t(double complex, double complex) nogil
+cdef _proto_xlog1py_double_complex__t *_proto_xlog1py_double_complex__t_var = &_func_xlog1py[double_complex]
+from ._xlogy cimport xlogy as _func_xlogy
+ctypedef double _proto_xlogy_double__t(double, double) nogil
+cdef _proto_xlogy_double__t *_proto_xlogy_double__t_var = &_func_xlogy[double]
+from ._xlogy cimport xlogy as _func_xlogy
+ctypedef double complex _proto_xlogy_double_complex__t(double complex, double complex) nogil
+cdef _proto_xlogy_double_complex__t *_proto_xlogy_double_complex__t_var = &_func_xlogy[double_complex]
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_y0 "y0"(double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_y1 "y1"(double) nogil
+from ._legacy cimport yn_unsafe as _func_yn_unsafe
+ctypedef double _proto_yn_unsafe_t(double, double) nogil
+cdef _proto_yn_unsafe_t *_proto_yn_unsafe_t_var = &_func_yn_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_yn "yn"(int, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesy_wrap "cbesy_wrap"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesy_wrap_real "cbesy_wrap_real"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double complex _func_cbesy_wrap_e "cbesy_wrap_e"(double, double complex) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_cbesy_wrap_e_real "cbesy_wrap_e_real"(double, double) nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef double _func_zetac "zetac"(double) nogil
+cdef np.PyUFuncGenericFunction ufunc__cosine_cdf_loops[2]
+cdef void *ufunc__cosine_cdf_ptr[4]
+cdef void *ufunc__cosine_cdf_data[2]
+cdef char ufunc__cosine_cdf_types[4]
+cdef char *ufunc__cosine_cdf_doc = (
+    "_cosine_cdf(x)\n"
+    "\n"
+    "Cumulative distribution function (CDF) of the cosine distribution::\n"
+    "\n"
+    "             {             0,              x < -pi\n"
+    "    cdf(x) = { (pi + x + sin(x))/(2*pi),   -pi <= x <= pi\n"
+    "             {             1,              x > pi\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    `x` must contain real numbers.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The cosine distribution CDF evaluated at `x`.")
+ufunc__cosine_cdf_loops[0] = loop_d_d__As_f_f
+ufunc__cosine_cdf_loops[1] = loop_d_d__As_d_d
+ufunc__cosine_cdf_types[0] = NPY_FLOAT
+ufunc__cosine_cdf_types[1] = NPY_FLOAT
+ufunc__cosine_cdf_types[2] = NPY_DOUBLE
+ufunc__cosine_cdf_types[3] = NPY_DOUBLE
+ufunc__cosine_cdf_ptr[2*0] = _func_cosine_cdf
+ufunc__cosine_cdf_ptr[2*0+1] = ("_cosine_cdf")
+ufunc__cosine_cdf_ptr[2*1] = _func_cosine_cdf
+ufunc__cosine_cdf_ptr[2*1+1] = ("_cosine_cdf")
+ufunc__cosine_cdf_data[0] = &ufunc__cosine_cdf_ptr[2*0]
+ufunc__cosine_cdf_data[1] = &ufunc__cosine_cdf_ptr[2*1]
+_cosine_cdf = np.PyUFunc_FromFuncAndData(ufunc__cosine_cdf_loops, ufunc__cosine_cdf_data, ufunc__cosine_cdf_types, 2, 1, 1, 0, "_cosine_cdf", ufunc__cosine_cdf_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__cosine_invcdf_loops[2]
+cdef void *ufunc__cosine_invcdf_ptr[4]
+cdef void *ufunc__cosine_invcdf_data[2]
+cdef char ufunc__cosine_invcdf_types[4]
+cdef char *ufunc__cosine_invcdf_doc = (
+    "_cosine_invcdf(p)\n"
+    "\n"
+    "Inverse of the cumulative distribution function (CDF) of the cosine\n"
+    "distribution.\n"
+    "\n"
+    "The CDF of the cosine distribution is::\n"
+    "\n"
+    "    cdf(x) = (pi + x + sin(x))/(2*pi)\n"
+    "\n"
+    "This function computes the inverse of cdf(x).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    `p` must contain real numbers in the interval ``0 <= p <= 1``.\n"
+    "    `nan` is returned for values of `p` outside the interval [0, 1].\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The inverse of the cosine distribution CDF evaluated at `p`.")
+ufunc__cosine_invcdf_loops[0] = loop_d_d__As_f_f
+ufunc__cosine_invcdf_loops[1] = loop_d_d__As_d_d
+ufunc__cosine_invcdf_types[0] = NPY_FLOAT
+ufunc__cosine_invcdf_types[1] = NPY_FLOAT
+ufunc__cosine_invcdf_types[2] = NPY_DOUBLE
+ufunc__cosine_invcdf_types[3] = NPY_DOUBLE
+ufunc__cosine_invcdf_ptr[2*0] = _func_cosine_invcdf
+ufunc__cosine_invcdf_ptr[2*0+1] = ("_cosine_invcdf")
+ufunc__cosine_invcdf_ptr[2*1] = _func_cosine_invcdf
+ufunc__cosine_invcdf_ptr[2*1+1] = ("_cosine_invcdf")
+ufunc__cosine_invcdf_data[0] = &ufunc__cosine_invcdf_ptr[2*0]
+ufunc__cosine_invcdf_data[1] = &ufunc__cosine_invcdf_ptr[2*1]
+_cosine_invcdf = np.PyUFunc_FromFuncAndData(ufunc__cosine_invcdf_loops, ufunc__cosine_invcdf_data, ufunc__cosine_invcdf_types, 2, 1, 1, 0, "_cosine_invcdf", ufunc__cosine_invcdf_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__cospi_loops[4]
+cdef void *ufunc__cospi_ptr[8]
+cdef void *ufunc__cospi_data[4]
+cdef char ufunc__cospi_types[8]
+cdef char *ufunc__cospi_doc = (
+    "Internal function, do not use.")
+ufunc__cospi_loops[0] = loop_d_d__As_f_f
+ufunc__cospi_loops[1] = loop_d_d__As_d_d
+ufunc__cospi_loops[2] = loop_D_D__As_F_F
+ufunc__cospi_loops[3] = loop_D_D__As_D_D
+ufunc__cospi_types[0] = NPY_FLOAT
+ufunc__cospi_types[1] = NPY_FLOAT
+ufunc__cospi_types[2] = NPY_DOUBLE
+ufunc__cospi_types[3] = NPY_DOUBLE
+ufunc__cospi_types[4] = NPY_CFLOAT
+ufunc__cospi_types[5] = NPY_CFLOAT
+ufunc__cospi_types[6] = NPY_CDOUBLE
+ufunc__cospi_types[7] = NPY_CDOUBLE
+ufunc__cospi_ptr[2*0] = _func_cospi
+ufunc__cospi_ptr[2*0+1] = ("_cospi")
+ufunc__cospi_ptr[2*1] = _func_cospi
+ufunc__cospi_ptr[2*1+1] = ("_cospi")
+ufunc__cospi_ptr[2*2] = _func_ccospi
+ufunc__cospi_ptr[2*2+1] = ("_cospi")
+ufunc__cospi_ptr[2*3] = _func_ccospi
+ufunc__cospi_ptr[2*3+1] = ("_cospi")
+ufunc__cospi_data[0] = &ufunc__cospi_ptr[2*0]
+ufunc__cospi_data[1] = &ufunc__cospi_ptr[2*1]
+ufunc__cospi_data[2] = &ufunc__cospi_ptr[2*2]
+ufunc__cospi_data[3] = &ufunc__cospi_ptr[2*3]
+_cospi = np.PyUFunc_FromFuncAndData(ufunc__cospi_loops, ufunc__cospi_data, ufunc__cospi_types, 4, 1, 1, 0, "_cospi", ufunc__cospi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__ellip_harm_loops[3]
+cdef void *ufunc__ellip_harm_ptr[6]
+cdef void *ufunc__ellip_harm_data[3]
+cdef char ufunc__ellip_harm_types[24]
+cdef char *ufunc__ellip_harm_doc = (
+    "Internal function, use `ellip_harm` instead.")
+ufunc__ellip_harm_loops[0] = loop_d_ddddddd__As_fffffff_f
+ufunc__ellip_harm_loops[1] = loop_d_ddiiddd__As_ddllddd_d
+ufunc__ellip_harm_loops[2] = loop_d_ddddddd__As_ddddddd_d
+ufunc__ellip_harm_types[0] = NPY_FLOAT
+ufunc__ellip_harm_types[1] = NPY_FLOAT
+ufunc__ellip_harm_types[2] = NPY_FLOAT
+ufunc__ellip_harm_types[3] = NPY_FLOAT
+ufunc__ellip_harm_types[4] = NPY_FLOAT
+ufunc__ellip_harm_types[5] = NPY_FLOAT
+ufunc__ellip_harm_types[6] = NPY_FLOAT
+ufunc__ellip_harm_types[7] = NPY_FLOAT
+ufunc__ellip_harm_types[8] = NPY_DOUBLE
+ufunc__ellip_harm_types[9] = NPY_DOUBLE
+ufunc__ellip_harm_types[10] = NPY_LONG
+ufunc__ellip_harm_types[11] = NPY_LONG
+ufunc__ellip_harm_types[12] = NPY_DOUBLE
+ufunc__ellip_harm_types[13] = NPY_DOUBLE
+ufunc__ellip_harm_types[14] = NPY_DOUBLE
+ufunc__ellip_harm_types[15] = NPY_DOUBLE
+ufunc__ellip_harm_types[16] = NPY_DOUBLE
+ufunc__ellip_harm_types[17] = NPY_DOUBLE
+ufunc__ellip_harm_types[18] = NPY_DOUBLE
+ufunc__ellip_harm_types[19] = NPY_DOUBLE
+ufunc__ellip_harm_types[20] = NPY_DOUBLE
+ufunc__ellip_harm_types[21] = NPY_DOUBLE
+ufunc__ellip_harm_types[22] = NPY_DOUBLE
+ufunc__ellip_harm_types[23] = NPY_DOUBLE
+ufunc__ellip_harm_ptr[2*0] = _func_ellip_harmonic_unsafe
+ufunc__ellip_harm_ptr[2*0+1] = ("_ellip_harm")
+ufunc__ellip_harm_ptr[2*1] = _func_ellip_harmonic
+ufunc__ellip_harm_ptr[2*1+1] = ("_ellip_harm")
+ufunc__ellip_harm_ptr[2*2] = _func_ellip_harmonic_unsafe
+ufunc__ellip_harm_ptr[2*2+1] = ("_ellip_harm")
+ufunc__ellip_harm_data[0] = &ufunc__ellip_harm_ptr[2*0]
+ufunc__ellip_harm_data[1] = &ufunc__ellip_harm_ptr[2*1]
+ufunc__ellip_harm_data[2] = &ufunc__ellip_harm_ptr[2*2]
+_ellip_harm = np.PyUFunc_FromFuncAndData(ufunc__ellip_harm_loops, ufunc__ellip_harm_data, ufunc__ellip_harm_types, 3, 7, 1, 0, "_ellip_harm", ufunc__ellip_harm_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__factorial_loops[2]
+cdef void *ufunc__factorial_ptr[4]
+cdef void *ufunc__factorial_data[2]
+cdef char ufunc__factorial_types[4]
+cdef char *ufunc__factorial_doc = (
+    "Internal function, do not use.")
+ufunc__factorial_loops[0] = loop_d_d__As_f_f
+ufunc__factorial_loops[1] = loop_d_d__As_d_d
+ufunc__factorial_types[0] = NPY_FLOAT
+ufunc__factorial_types[1] = NPY_FLOAT
+ufunc__factorial_types[2] = NPY_DOUBLE
+ufunc__factorial_types[3] = NPY_DOUBLE
+ufunc__factorial_ptr[2*0] = _func__factorial
+ufunc__factorial_ptr[2*0+1] = ("_factorial")
+ufunc__factorial_ptr[2*1] = _func__factorial
+ufunc__factorial_ptr[2*1+1] = ("_factorial")
+ufunc__factorial_data[0] = &ufunc__factorial_ptr[2*0]
+ufunc__factorial_data[1] = &ufunc__factorial_ptr[2*1]
+_factorial = np.PyUFunc_FromFuncAndData(ufunc__factorial_loops, ufunc__factorial_data, ufunc__factorial_types, 2, 1, 1, 0, "_factorial", ufunc__factorial_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__igam_fac_loops[2]
+cdef void *ufunc__igam_fac_ptr[4]
+cdef void *ufunc__igam_fac_data[2]
+cdef char ufunc__igam_fac_types[6]
+cdef char *ufunc__igam_fac_doc = (
+    "Internal function, do not use.")
+ufunc__igam_fac_loops[0] = loop_d_dd__As_ff_f
+ufunc__igam_fac_loops[1] = loop_d_dd__As_dd_d
+ufunc__igam_fac_types[0] = NPY_FLOAT
+ufunc__igam_fac_types[1] = NPY_FLOAT
+ufunc__igam_fac_types[2] = NPY_FLOAT
+ufunc__igam_fac_types[3] = NPY_DOUBLE
+ufunc__igam_fac_types[4] = NPY_DOUBLE
+ufunc__igam_fac_types[5] = NPY_DOUBLE
+ufunc__igam_fac_ptr[2*0] = _func_igam_fac
+ufunc__igam_fac_ptr[2*0+1] = ("_igam_fac")
+ufunc__igam_fac_ptr[2*1] = _func_igam_fac
+ufunc__igam_fac_ptr[2*1+1] = ("_igam_fac")
+ufunc__igam_fac_data[0] = &ufunc__igam_fac_ptr[2*0]
+ufunc__igam_fac_data[1] = &ufunc__igam_fac_ptr[2*1]
+_igam_fac = np.PyUFunc_FromFuncAndData(ufunc__igam_fac_loops, ufunc__igam_fac_data, ufunc__igam_fac_types, 2, 2, 1, 0, "_igam_fac", ufunc__igam_fac_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__kolmogc_loops[2]
+cdef void *ufunc__kolmogc_ptr[4]
+cdef void *ufunc__kolmogc_data[2]
+cdef char ufunc__kolmogc_types[4]
+cdef char *ufunc__kolmogc_doc = (
+    "Internal function, do not use.")
+ufunc__kolmogc_loops[0] = loop_d_d__As_f_f
+ufunc__kolmogc_loops[1] = loop_d_d__As_d_d
+ufunc__kolmogc_types[0] = NPY_FLOAT
+ufunc__kolmogc_types[1] = NPY_FLOAT
+ufunc__kolmogc_types[2] = NPY_DOUBLE
+ufunc__kolmogc_types[3] = NPY_DOUBLE
+ufunc__kolmogc_ptr[2*0] = _func_kolmogc
+ufunc__kolmogc_ptr[2*0+1] = ("_kolmogc")
+ufunc__kolmogc_ptr[2*1] = _func_kolmogc
+ufunc__kolmogc_ptr[2*1+1] = ("_kolmogc")
+ufunc__kolmogc_data[0] = &ufunc__kolmogc_ptr[2*0]
+ufunc__kolmogc_data[1] = &ufunc__kolmogc_ptr[2*1]
+_kolmogc = np.PyUFunc_FromFuncAndData(ufunc__kolmogc_loops, ufunc__kolmogc_data, ufunc__kolmogc_types, 2, 1, 1, 0, "_kolmogc", ufunc__kolmogc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__kolmogci_loops[2]
+cdef void *ufunc__kolmogci_ptr[4]
+cdef void *ufunc__kolmogci_data[2]
+cdef char ufunc__kolmogci_types[4]
+cdef char *ufunc__kolmogci_doc = (
+    "Internal function, do not use.")
+ufunc__kolmogci_loops[0] = loop_d_d__As_f_f
+ufunc__kolmogci_loops[1] = loop_d_d__As_d_d
+ufunc__kolmogci_types[0] = NPY_FLOAT
+ufunc__kolmogci_types[1] = NPY_FLOAT
+ufunc__kolmogci_types[2] = NPY_DOUBLE
+ufunc__kolmogci_types[3] = NPY_DOUBLE
+ufunc__kolmogci_ptr[2*0] = _func_kolmogci
+ufunc__kolmogci_ptr[2*0+1] = ("_kolmogci")
+ufunc__kolmogci_ptr[2*1] = _func_kolmogci
+ufunc__kolmogci_ptr[2*1+1] = ("_kolmogci")
+ufunc__kolmogci_data[0] = &ufunc__kolmogci_ptr[2*0]
+ufunc__kolmogci_data[1] = &ufunc__kolmogci_ptr[2*1]
+_kolmogci = np.PyUFunc_FromFuncAndData(ufunc__kolmogci_loops, ufunc__kolmogci_data, ufunc__kolmogci_types, 2, 1, 1, 0, "_kolmogci", ufunc__kolmogci_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__kolmogp_loops[2]
+cdef void *ufunc__kolmogp_ptr[4]
+cdef void *ufunc__kolmogp_data[2]
+cdef char ufunc__kolmogp_types[4]
+cdef char *ufunc__kolmogp_doc = (
+    "Internal function, do not use.")
+ufunc__kolmogp_loops[0] = loop_d_d__As_f_f
+ufunc__kolmogp_loops[1] = loop_d_d__As_d_d
+ufunc__kolmogp_types[0] = NPY_FLOAT
+ufunc__kolmogp_types[1] = NPY_FLOAT
+ufunc__kolmogp_types[2] = NPY_DOUBLE
+ufunc__kolmogp_types[3] = NPY_DOUBLE
+ufunc__kolmogp_ptr[2*0] = _func_kolmogp
+ufunc__kolmogp_ptr[2*0+1] = ("_kolmogp")
+ufunc__kolmogp_ptr[2*1] = _func_kolmogp
+ufunc__kolmogp_ptr[2*1+1] = ("_kolmogp")
+ufunc__kolmogp_data[0] = &ufunc__kolmogp_ptr[2*0]
+ufunc__kolmogp_data[1] = &ufunc__kolmogp_ptr[2*1]
+_kolmogp = np.PyUFunc_FromFuncAndData(ufunc__kolmogp_loops, ufunc__kolmogp_data, ufunc__kolmogp_types, 2, 1, 1, 0, "_kolmogp", ufunc__kolmogp_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__lambertw_loops[1]
+cdef void *ufunc__lambertw_ptr[2]
+cdef void *ufunc__lambertw_data[1]
+cdef char ufunc__lambertw_types[4]
+cdef char *ufunc__lambertw_doc = (
+    "Internal function, use `lambertw` instead.")
+ufunc__lambertw_loops[0] = loop_D_Dld__As_Dld_D
+ufunc__lambertw_types[0] = NPY_CDOUBLE
+ufunc__lambertw_types[1] = NPY_LONG
+ufunc__lambertw_types[2] = NPY_DOUBLE
+ufunc__lambertw_types[3] = NPY_CDOUBLE
+ufunc__lambertw_ptr[2*0] = _func_lambertw_scalar
+ufunc__lambertw_ptr[2*0+1] = ("_lambertw")
+ufunc__lambertw_data[0] = &ufunc__lambertw_ptr[2*0]
+_lambertw = np.PyUFunc_FromFuncAndData(ufunc__lambertw_loops, ufunc__lambertw_data, ufunc__lambertw_types, 1, 3, 1, 0, "_lambertw", ufunc__lambertw_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__lanczos_sum_expg_scaled_loops[2]
+cdef void *ufunc__lanczos_sum_expg_scaled_ptr[4]
+cdef void *ufunc__lanczos_sum_expg_scaled_data[2]
+cdef char ufunc__lanczos_sum_expg_scaled_types[4]
+cdef char *ufunc__lanczos_sum_expg_scaled_doc = (
+    "Internal function, do not use.")
+ufunc__lanczos_sum_expg_scaled_loops[0] = loop_d_d__As_f_f
+ufunc__lanczos_sum_expg_scaled_loops[1] = loop_d_d__As_d_d
+ufunc__lanczos_sum_expg_scaled_types[0] = NPY_FLOAT
+ufunc__lanczos_sum_expg_scaled_types[1] = NPY_FLOAT
+ufunc__lanczos_sum_expg_scaled_types[2] = NPY_DOUBLE
+ufunc__lanczos_sum_expg_scaled_types[3] = NPY_DOUBLE
+ufunc__lanczos_sum_expg_scaled_ptr[2*0] = _func_lanczos_sum_expg_scaled
+ufunc__lanczos_sum_expg_scaled_ptr[2*0+1] = ("_lanczos_sum_expg_scaled")
+ufunc__lanczos_sum_expg_scaled_ptr[2*1] = _func_lanczos_sum_expg_scaled
+ufunc__lanczos_sum_expg_scaled_ptr[2*1+1] = ("_lanczos_sum_expg_scaled")
+ufunc__lanczos_sum_expg_scaled_data[0] = &ufunc__lanczos_sum_expg_scaled_ptr[2*0]
+ufunc__lanczos_sum_expg_scaled_data[1] = &ufunc__lanczos_sum_expg_scaled_ptr[2*1]
+_lanczos_sum_expg_scaled = np.PyUFunc_FromFuncAndData(ufunc__lanczos_sum_expg_scaled_loops, ufunc__lanczos_sum_expg_scaled_data, ufunc__lanczos_sum_expg_scaled_types, 2, 1, 1, 0, "_lanczos_sum_expg_scaled", ufunc__lanczos_sum_expg_scaled_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__lgam1p_loops[2]
+cdef void *ufunc__lgam1p_ptr[4]
+cdef void *ufunc__lgam1p_data[2]
+cdef char ufunc__lgam1p_types[4]
+cdef char *ufunc__lgam1p_doc = (
+    "Internal function, do not use.")
+ufunc__lgam1p_loops[0] = loop_d_d__As_f_f
+ufunc__lgam1p_loops[1] = loop_d_d__As_d_d
+ufunc__lgam1p_types[0] = NPY_FLOAT
+ufunc__lgam1p_types[1] = NPY_FLOAT
+ufunc__lgam1p_types[2] = NPY_DOUBLE
+ufunc__lgam1p_types[3] = NPY_DOUBLE
+ufunc__lgam1p_ptr[2*0] = _func_lgam1p
+ufunc__lgam1p_ptr[2*0+1] = ("_lgam1p")
+ufunc__lgam1p_ptr[2*1] = _func_lgam1p
+ufunc__lgam1p_ptr[2*1+1] = ("_lgam1p")
+ufunc__lgam1p_data[0] = &ufunc__lgam1p_ptr[2*0]
+ufunc__lgam1p_data[1] = &ufunc__lgam1p_ptr[2*1]
+_lgam1p = np.PyUFunc_FromFuncAndData(ufunc__lgam1p_loops, ufunc__lgam1p_data, ufunc__lgam1p_types, 2, 1, 1, 0, "_lgam1p", ufunc__lgam1p_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__log1pmx_loops[2]
+cdef void *ufunc__log1pmx_ptr[4]
+cdef void *ufunc__log1pmx_data[2]
+cdef char ufunc__log1pmx_types[4]
+cdef char *ufunc__log1pmx_doc = (
+    "Internal function, do not use.")
+ufunc__log1pmx_loops[0] = loop_d_d__As_f_f
+ufunc__log1pmx_loops[1] = loop_d_d__As_d_d
+ufunc__log1pmx_types[0] = NPY_FLOAT
+ufunc__log1pmx_types[1] = NPY_FLOAT
+ufunc__log1pmx_types[2] = NPY_DOUBLE
+ufunc__log1pmx_types[3] = NPY_DOUBLE
+ufunc__log1pmx_ptr[2*0] = _func_log1pmx
+ufunc__log1pmx_ptr[2*0+1] = ("_log1pmx")
+ufunc__log1pmx_ptr[2*1] = _func_log1pmx
+ufunc__log1pmx_ptr[2*1+1] = ("_log1pmx")
+ufunc__log1pmx_data[0] = &ufunc__log1pmx_ptr[2*0]
+ufunc__log1pmx_data[1] = &ufunc__log1pmx_ptr[2*1]
+_log1pmx = np.PyUFunc_FromFuncAndData(ufunc__log1pmx_loops, ufunc__log1pmx_data, ufunc__log1pmx_types, 2, 1, 1, 0, "_log1pmx", ufunc__log1pmx_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__riemann_zeta_loops[2]
+cdef void *ufunc__riemann_zeta_ptr[4]
+cdef void *ufunc__riemann_zeta_data[2]
+cdef char ufunc__riemann_zeta_types[4]
+cdef char *ufunc__riemann_zeta_doc = (
+    "Internal function, use `zeta` instead.")
+ufunc__riemann_zeta_loops[0] = loop_d_d__As_f_f
+ufunc__riemann_zeta_loops[1] = loop_d_d__As_d_d
+ufunc__riemann_zeta_types[0] = NPY_FLOAT
+ufunc__riemann_zeta_types[1] = NPY_FLOAT
+ufunc__riemann_zeta_types[2] = NPY_DOUBLE
+ufunc__riemann_zeta_types[3] = NPY_DOUBLE
+ufunc__riemann_zeta_ptr[2*0] = _func_riemann_zeta
+ufunc__riemann_zeta_ptr[2*0+1] = ("_riemann_zeta")
+ufunc__riemann_zeta_ptr[2*1] = _func_riemann_zeta
+ufunc__riemann_zeta_ptr[2*1+1] = ("_riemann_zeta")
+ufunc__riemann_zeta_data[0] = &ufunc__riemann_zeta_ptr[2*0]
+ufunc__riemann_zeta_data[1] = &ufunc__riemann_zeta_ptr[2*1]
+_riemann_zeta = np.PyUFunc_FromFuncAndData(ufunc__riemann_zeta_loops, ufunc__riemann_zeta_data, ufunc__riemann_zeta_types, 2, 1, 1, 0, "_riemann_zeta", ufunc__riemann_zeta_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__sf_error_test_function_loops[1]
+cdef void *ufunc__sf_error_test_function_ptr[2]
+cdef void *ufunc__sf_error_test_function_data[1]
+cdef char ufunc__sf_error_test_function_types[2]
+cdef char *ufunc__sf_error_test_function_doc = (
+    "Private function; do not use.")
+ufunc__sf_error_test_function_loops[0] = loop_i_i__As_l_l
+ufunc__sf_error_test_function_types[0] = NPY_LONG
+ufunc__sf_error_test_function_types[1] = NPY_LONG
+ufunc__sf_error_test_function_ptr[2*0] = _func__sf_error_test_function
+ufunc__sf_error_test_function_ptr[2*0+1] = ("_sf_error_test_function")
+ufunc__sf_error_test_function_data[0] = &ufunc__sf_error_test_function_ptr[2*0]
+_sf_error_test_function = np.PyUFunc_FromFuncAndData(ufunc__sf_error_test_function_loops, ufunc__sf_error_test_function_data, ufunc__sf_error_test_function_types, 1, 1, 1, 0, "_sf_error_test_function", ufunc__sf_error_test_function_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__sinpi_loops[4]
+cdef void *ufunc__sinpi_ptr[8]
+cdef void *ufunc__sinpi_data[4]
+cdef char ufunc__sinpi_types[8]
+cdef char *ufunc__sinpi_doc = (
+    "Internal function, do not use.")
+ufunc__sinpi_loops[0] = loop_d_d__As_f_f
+ufunc__sinpi_loops[1] = loop_d_d__As_d_d
+ufunc__sinpi_loops[2] = loop_D_D__As_F_F
+ufunc__sinpi_loops[3] = loop_D_D__As_D_D
+ufunc__sinpi_types[0] = NPY_FLOAT
+ufunc__sinpi_types[1] = NPY_FLOAT
+ufunc__sinpi_types[2] = NPY_DOUBLE
+ufunc__sinpi_types[3] = NPY_DOUBLE
+ufunc__sinpi_types[4] = NPY_CFLOAT
+ufunc__sinpi_types[5] = NPY_CFLOAT
+ufunc__sinpi_types[6] = NPY_CDOUBLE
+ufunc__sinpi_types[7] = NPY_CDOUBLE
+ufunc__sinpi_ptr[2*0] = _func_sinpi
+ufunc__sinpi_ptr[2*0+1] = ("_sinpi")
+ufunc__sinpi_ptr[2*1] = _func_sinpi
+ufunc__sinpi_ptr[2*1+1] = ("_sinpi")
+ufunc__sinpi_ptr[2*2] = _func_csinpi
+ufunc__sinpi_ptr[2*2+1] = ("_sinpi")
+ufunc__sinpi_ptr[2*3] = _func_csinpi
+ufunc__sinpi_ptr[2*3+1] = ("_sinpi")
+ufunc__sinpi_data[0] = &ufunc__sinpi_ptr[2*0]
+ufunc__sinpi_data[1] = &ufunc__sinpi_ptr[2*1]
+ufunc__sinpi_data[2] = &ufunc__sinpi_ptr[2*2]
+ufunc__sinpi_data[3] = &ufunc__sinpi_ptr[2*3]
+_sinpi = np.PyUFunc_FromFuncAndData(ufunc__sinpi_loops, ufunc__sinpi_data, ufunc__sinpi_types, 4, 1, 1, 0, "_sinpi", ufunc__sinpi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__smirnovc_loops[3]
+cdef void *ufunc__smirnovc_ptr[6]
+cdef void *ufunc__smirnovc_data[3]
+cdef char ufunc__smirnovc_types[9]
+cdef char *ufunc__smirnovc_doc = (
+    "_smirnovc(n, d)\n"
+    " Internal function, do not use.")
+ufunc__smirnovc_loops[0] = loop_d_id__As_ld_d
+ufunc__smirnovc_loops[1] = loop_d_dd__As_ff_f
+ufunc__smirnovc_loops[2] = loop_d_dd__As_dd_d
+ufunc__smirnovc_types[0] = NPY_LONG
+ufunc__smirnovc_types[1] = NPY_DOUBLE
+ufunc__smirnovc_types[2] = NPY_DOUBLE
+ufunc__smirnovc_types[3] = NPY_FLOAT
+ufunc__smirnovc_types[4] = NPY_FLOAT
+ufunc__smirnovc_types[5] = NPY_FLOAT
+ufunc__smirnovc_types[6] = NPY_DOUBLE
+ufunc__smirnovc_types[7] = NPY_DOUBLE
+ufunc__smirnovc_types[8] = NPY_DOUBLE
+ufunc__smirnovc_ptr[2*0] = _func_smirnovc
+ufunc__smirnovc_ptr[2*0+1] = ("_smirnovc")
+ufunc__smirnovc_ptr[2*1] = _func_smirnovc_unsafe
+ufunc__smirnovc_ptr[2*1+1] = ("_smirnovc")
+ufunc__smirnovc_ptr[2*2] = _func_smirnovc_unsafe
+ufunc__smirnovc_ptr[2*2+1] = ("_smirnovc")
+ufunc__smirnovc_data[0] = &ufunc__smirnovc_ptr[2*0]
+ufunc__smirnovc_data[1] = &ufunc__smirnovc_ptr[2*1]
+ufunc__smirnovc_data[2] = &ufunc__smirnovc_ptr[2*2]
+_smirnovc = np.PyUFunc_FromFuncAndData(ufunc__smirnovc_loops, ufunc__smirnovc_data, ufunc__smirnovc_types, 3, 2, 1, 0, "_smirnovc", ufunc__smirnovc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__smirnovci_loops[3]
+cdef void *ufunc__smirnovci_ptr[6]
+cdef void *ufunc__smirnovci_data[3]
+cdef char ufunc__smirnovci_types[9]
+cdef char *ufunc__smirnovci_doc = (
+    "Internal function, do not use.")
+ufunc__smirnovci_loops[0] = loop_d_id__As_ld_d
+ufunc__smirnovci_loops[1] = loop_d_dd__As_ff_f
+ufunc__smirnovci_loops[2] = loop_d_dd__As_dd_d
+ufunc__smirnovci_types[0] = NPY_LONG
+ufunc__smirnovci_types[1] = NPY_DOUBLE
+ufunc__smirnovci_types[2] = NPY_DOUBLE
+ufunc__smirnovci_types[3] = NPY_FLOAT
+ufunc__smirnovci_types[4] = NPY_FLOAT
+ufunc__smirnovci_types[5] = NPY_FLOAT
+ufunc__smirnovci_types[6] = NPY_DOUBLE
+ufunc__smirnovci_types[7] = NPY_DOUBLE
+ufunc__smirnovci_types[8] = NPY_DOUBLE
+ufunc__smirnovci_ptr[2*0] = _func_smirnovci
+ufunc__smirnovci_ptr[2*0+1] = ("_smirnovci")
+ufunc__smirnovci_ptr[2*1] = _func_smirnovci_unsafe
+ufunc__smirnovci_ptr[2*1+1] = ("_smirnovci")
+ufunc__smirnovci_ptr[2*2] = _func_smirnovci_unsafe
+ufunc__smirnovci_ptr[2*2+1] = ("_smirnovci")
+ufunc__smirnovci_data[0] = &ufunc__smirnovci_ptr[2*0]
+ufunc__smirnovci_data[1] = &ufunc__smirnovci_ptr[2*1]
+ufunc__smirnovci_data[2] = &ufunc__smirnovci_ptr[2*2]
+_smirnovci = np.PyUFunc_FromFuncAndData(ufunc__smirnovci_loops, ufunc__smirnovci_data, ufunc__smirnovci_types, 3, 2, 1, 0, "_smirnovci", ufunc__smirnovci_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__smirnovp_loops[3]
+cdef void *ufunc__smirnovp_ptr[6]
+cdef void *ufunc__smirnovp_data[3]
+cdef char ufunc__smirnovp_types[9]
+cdef char *ufunc__smirnovp_doc = (
+    "_smirnovp(n, p)\n"
+    " Internal function, do not use.")
+ufunc__smirnovp_loops[0] = loop_d_id__As_ld_d
+ufunc__smirnovp_loops[1] = loop_d_dd__As_ff_f
+ufunc__smirnovp_loops[2] = loop_d_dd__As_dd_d
+ufunc__smirnovp_types[0] = NPY_LONG
+ufunc__smirnovp_types[1] = NPY_DOUBLE
+ufunc__smirnovp_types[2] = NPY_DOUBLE
+ufunc__smirnovp_types[3] = NPY_FLOAT
+ufunc__smirnovp_types[4] = NPY_FLOAT
+ufunc__smirnovp_types[5] = NPY_FLOAT
+ufunc__smirnovp_types[6] = NPY_DOUBLE
+ufunc__smirnovp_types[7] = NPY_DOUBLE
+ufunc__smirnovp_types[8] = NPY_DOUBLE
+ufunc__smirnovp_ptr[2*0] = _func_smirnovp
+ufunc__smirnovp_ptr[2*0+1] = ("_smirnovp")
+ufunc__smirnovp_ptr[2*1] = _func_smirnovp_unsafe
+ufunc__smirnovp_ptr[2*1+1] = ("_smirnovp")
+ufunc__smirnovp_ptr[2*2] = _func_smirnovp_unsafe
+ufunc__smirnovp_ptr[2*2+1] = ("_smirnovp")
+ufunc__smirnovp_data[0] = &ufunc__smirnovp_ptr[2*0]
+ufunc__smirnovp_data[1] = &ufunc__smirnovp_ptr[2*1]
+ufunc__smirnovp_data[2] = &ufunc__smirnovp_ptr[2*2]
+_smirnovp = np.PyUFunc_FromFuncAndData(ufunc__smirnovp_loops, ufunc__smirnovp_data, ufunc__smirnovp_types, 3, 2, 1, 0, "_smirnovp", ufunc__smirnovp_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_in_loops[2]
+cdef void *ufunc__spherical_in_ptr[4]
+cdef void *ufunc__spherical_in_data[2]
+cdef char ufunc__spherical_in_types[6]
+cdef char *ufunc__spherical_in_doc = (
+    "Internal function, use `spherical_in` instead.")
+ufunc__spherical_in_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_in_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_in_types[0] = NPY_LONG
+ufunc__spherical_in_types[1] = NPY_DOUBLE
+ufunc__spherical_in_types[2] = NPY_DOUBLE
+ufunc__spherical_in_types[3] = NPY_LONG
+ufunc__spherical_in_types[4] = NPY_CDOUBLE
+ufunc__spherical_in_types[5] = NPY_CDOUBLE
+ufunc__spherical_in_ptr[2*0] = _func_spherical_in_real
+ufunc__spherical_in_ptr[2*0+1] = ("_spherical_in")
+ufunc__spherical_in_ptr[2*1] = _func_spherical_in_complex
+ufunc__spherical_in_ptr[2*1+1] = ("_spherical_in")
+ufunc__spherical_in_data[0] = &ufunc__spherical_in_ptr[2*0]
+ufunc__spherical_in_data[1] = &ufunc__spherical_in_ptr[2*1]
+_spherical_in = np.PyUFunc_FromFuncAndData(ufunc__spherical_in_loops, ufunc__spherical_in_data, ufunc__spherical_in_types, 2, 2, 1, 0, "_spherical_in", ufunc__spherical_in_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_in_d_loops[2]
+cdef void *ufunc__spherical_in_d_ptr[4]
+cdef void *ufunc__spherical_in_d_data[2]
+cdef char ufunc__spherical_in_d_types[6]
+cdef char *ufunc__spherical_in_d_doc = (
+    "Internal function, use `spherical_in` instead.")
+ufunc__spherical_in_d_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_in_d_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_in_d_types[0] = NPY_LONG
+ufunc__spherical_in_d_types[1] = NPY_DOUBLE
+ufunc__spherical_in_d_types[2] = NPY_DOUBLE
+ufunc__spherical_in_d_types[3] = NPY_LONG
+ufunc__spherical_in_d_types[4] = NPY_CDOUBLE
+ufunc__spherical_in_d_types[5] = NPY_CDOUBLE
+ufunc__spherical_in_d_ptr[2*0] = _func_spherical_in_d_real
+ufunc__spherical_in_d_ptr[2*0+1] = ("_spherical_in_d")
+ufunc__spherical_in_d_ptr[2*1] = _func_spherical_in_d_complex
+ufunc__spherical_in_d_ptr[2*1+1] = ("_spherical_in_d")
+ufunc__spherical_in_d_data[0] = &ufunc__spherical_in_d_ptr[2*0]
+ufunc__spherical_in_d_data[1] = &ufunc__spherical_in_d_ptr[2*1]
+_spherical_in_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_in_d_loops, ufunc__spherical_in_d_data, ufunc__spherical_in_d_types, 2, 2, 1, 0, "_spherical_in_d", ufunc__spherical_in_d_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_jn_loops[2]
+cdef void *ufunc__spherical_jn_ptr[4]
+cdef void *ufunc__spherical_jn_data[2]
+cdef char ufunc__spherical_jn_types[6]
+cdef char *ufunc__spherical_jn_doc = (
+    "Internal function, use `spherical_jn` instead.")
+ufunc__spherical_jn_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_jn_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_jn_types[0] = NPY_LONG
+ufunc__spherical_jn_types[1] = NPY_DOUBLE
+ufunc__spherical_jn_types[2] = NPY_DOUBLE
+ufunc__spherical_jn_types[3] = NPY_LONG
+ufunc__spherical_jn_types[4] = NPY_CDOUBLE
+ufunc__spherical_jn_types[5] = NPY_CDOUBLE
+ufunc__spherical_jn_ptr[2*0] = _func_spherical_jn_real
+ufunc__spherical_jn_ptr[2*0+1] = ("_spherical_jn")
+ufunc__spherical_jn_ptr[2*1] = _func_spherical_jn_complex
+ufunc__spherical_jn_ptr[2*1+1] = ("_spherical_jn")
+ufunc__spherical_jn_data[0] = &ufunc__spherical_jn_ptr[2*0]
+ufunc__spherical_jn_data[1] = &ufunc__spherical_jn_ptr[2*1]
+_spherical_jn = np.PyUFunc_FromFuncAndData(ufunc__spherical_jn_loops, ufunc__spherical_jn_data, ufunc__spherical_jn_types, 2, 2, 1, 0, "_spherical_jn", ufunc__spherical_jn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_jn_d_loops[2]
+cdef void *ufunc__spherical_jn_d_ptr[4]
+cdef void *ufunc__spherical_jn_d_data[2]
+cdef char ufunc__spherical_jn_d_types[6]
+cdef char *ufunc__spherical_jn_d_doc = (
+    "Internal function, use `spherical_jn` instead.")
+ufunc__spherical_jn_d_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_jn_d_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_jn_d_types[0] = NPY_LONG
+ufunc__spherical_jn_d_types[1] = NPY_DOUBLE
+ufunc__spherical_jn_d_types[2] = NPY_DOUBLE
+ufunc__spherical_jn_d_types[3] = NPY_LONG
+ufunc__spherical_jn_d_types[4] = NPY_CDOUBLE
+ufunc__spherical_jn_d_types[5] = NPY_CDOUBLE
+ufunc__spherical_jn_d_ptr[2*0] = _func_spherical_jn_d_real
+ufunc__spherical_jn_d_ptr[2*0+1] = ("_spherical_jn_d")
+ufunc__spherical_jn_d_ptr[2*1] = _func_spherical_jn_d_complex
+ufunc__spherical_jn_d_ptr[2*1+1] = ("_spherical_jn_d")
+ufunc__spherical_jn_d_data[0] = &ufunc__spherical_jn_d_ptr[2*0]
+ufunc__spherical_jn_d_data[1] = &ufunc__spherical_jn_d_ptr[2*1]
+_spherical_jn_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_jn_d_loops, ufunc__spherical_jn_d_data, ufunc__spherical_jn_d_types, 2, 2, 1, 0, "_spherical_jn_d", ufunc__spherical_jn_d_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_kn_loops[2]
+cdef void *ufunc__spherical_kn_ptr[4]
+cdef void *ufunc__spherical_kn_data[2]
+cdef char ufunc__spherical_kn_types[6]
+cdef char *ufunc__spherical_kn_doc = (
+    "Internal function, use `spherical_kn` instead.")
+ufunc__spherical_kn_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_kn_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_kn_types[0] = NPY_LONG
+ufunc__spherical_kn_types[1] = NPY_DOUBLE
+ufunc__spherical_kn_types[2] = NPY_DOUBLE
+ufunc__spherical_kn_types[3] = NPY_LONG
+ufunc__spherical_kn_types[4] = NPY_CDOUBLE
+ufunc__spherical_kn_types[5] = NPY_CDOUBLE
+ufunc__spherical_kn_ptr[2*0] = _func_spherical_kn_real
+ufunc__spherical_kn_ptr[2*0+1] = ("_spherical_kn")
+ufunc__spherical_kn_ptr[2*1] = _func_spherical_kn_complex
+ufunc__spherical_kn_ptr[2*1+1] = ("_spherical_kn")
+ufunc__spherical_kn_data[0] = &ufunc__spherical_kn_ptr[2*0]
+ufunc__spherical_kn_data[1] = &ufunc__spherical_kn_ptr[2*1]
+_spherical_kn = np.PyUFunc_FromFuncAndData(ufunc__spherical_kn_loops, ufunc__spherical_kn_data, ufunc__spherical_kn_types, 2, 2, 1, 0, "_spherical_kn", ufunc__spherical_kn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_kn_d_loops[2]
+cdef void *ufunc__spherical_kn_d_ptr[4]
+cdef void *ufunc__spherical_kn_d_data[2]
+cdef char ufunc__spherical_kn_d_types[6]
+cdef char *ufunc__spherical_kn_d_doc = (
+    "Internal function, use `spherical_kn` instead.")
+ufunc__spherical_kn_d_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_kn_d_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_kn_d_types[0] = NPY_LONG
+ufunc__spherical_kn_d_types[1] = NPY_DOUBLE
+ufunc__spherical_kn_d_types[2] = NPY_DOUBLE
+ufunc__spherical_kn_d_types[3] = NPY_LONG
+ufunc__spherical_kn_d_types[4] = NPY_CDOUBLE
+ufunc__spherical_kn_d_types[5] = NPY_CDOUBLE
+ufunc__spherical_kn_d_ptr[2*0] = _func_spherical_kn_d_real
+ufunc__spherical_kn_d_ptr[2*0+1] = ("_spherical_kn_d")
+ufunc__spherical_kn_d_ptr[2*1] = _func_spherical_kn_d_complex
+ufunc__spherical_kn_d_ptr[2*1+1] = ("_spherical_kn_d")
+ufunc__spherical_kn_d_data[0] = &ufunc__spherical_kn_d_ptr[2*0]
+ufunc__spherical_kn_d_data[1] = &ufunc__spherical_kn_d_ptr[2*1]
+_spherical_kn_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_kn_d_loops, ufunc__spherical_kn_d_data, ufunc__spherical_kn_d_types, 2, 2, 1, 0, "_spherical_kn_d", ufunc__spherical_kn_d_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_yn_loops[2]
+cdef void *ufunc__spherical_yn_ptr[4]
+cdef void *ufunc__spherical_yn_data[2]
+cdef char ufunc__spherical_yn_types[6]
+cdef char *ufunc__spherical_yn_doc = (
+    "Internal function, use `spherical_yn` instead.")
+ufunc__spherical_yn_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_yn_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_yn_types[0] = NPY_LONG
+ufunc__spherical_yn_types[1] = NPY_DOUBLE
+ufunc__spherical_yn_types[2] = NPY_DOUBLE
+ufunc__spherical_yn_types[3] = NPY_LONG
+ufunc__spherical_yn_types[4] = NPY_CDOUBLE
+ufunc__spherical_yn_types[5] = NPY_CDOUBLE
+ufunc__spherical_yn_ptr[2*0] = _func_spherical_yn_real
+ufunc__spherical_yn_ptr[2*0+1] = ("_spherical_yn")
+ufunc__spherical_yn_ptr[2*1] = _func_spherical_yn_complex
+ufunc__spherical_yn_ptr[2*1+1] = ("_spherical_yn")
+ufunc__spherical_yn_data[0] = &ufunc__spherical_yn_ptr[2*0]
+ufunc__spherical_yn_data[1] = &ufunc__spherical_yn_ptr[2*1]
+_spherical_yn = np.PyUFunc_FromFuncAndData(ufunc__spherical_yn_loops, ufunc__spherical_yn_data, ufunc__spherical_yn_types, 2, 2, 1, 0, "_spherical_yn", ufunc__spherical_yn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__spherical_yn_d_loops[2]
+cdef void *ufunc__spherical_yn_d_ptr[4]
+cdef void *ufunc__spherical_yn_d_data[2]
+cdef char ufunc__spherical_yn_d_types[6]
+cdef char *ufunc__spherical_yn_d_doc = (
+    "Internal function, use `spherical_yn` instead.")
+ufunc__spherical_yn_d_loops[0] = loop_d_ld__As_ld_d
+ufunc__spherical_yn_d_loops[1] = loop_D_lD__As_lD_D
+ufunc__spherical_yn_d_types[0] = NPY_LONG
+ufunc__spherical_yn_d_types[1] = NPY_DOUBLE
+ufunc__spherical_yn_d_types[2] = NPY_DOUBLE
+ufunc__spherical_yn_d_types[3] = NPY_LONG
+ufunc__spherical_yn_d_types[4] = NPY_CDOUBLE
+ufunc__spherical_yn_d_types[5] = NPY_CDOUBLE
+ufunc__spherical_yn_d_ptr[2*0] = _func_spherical_yn_d_real
+ufunc__spherical_yn_d_ptr[2*0+1] = ("_spherical_yn_d")
+ufunc__spherical_yn_d_ptr[2*1] = _func_spherical_yn_d_complex
+ufunc__spherical_yn_d_ptr[2*1+1] = ("_spherical_yn_d")
+ufunc__spherical_yn_d_data[0] = &ufunc__spherical_yn_d_ptr[2*0]
+ufunc__spherical_yn_d_data[1] = &ufunc__spherical_yn_d_ptr[2*1]
+_spherical_yn_d = np.PyUFunc_FromFuncAndData(ufunc__spherical_yn_d_loops, ufunc__spherical_yn_d_data, ufunc__spherical_yn_d_types, 2, 2, 1, 0, "_spherical_yn_d", ufunc__spherical_yn_d_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__struve_asymp_large_z_loops[1]
+cdef void *ufunc__struve_asymp_large_z_ptr[2]
+cdef void *ufunc__struve_asymp_large_z_data[1]
+cdef char ufunc__struve_asymp_large_z_types[5]
+cdef char *ufunc__struve_asymp_large_z_doc = (
+    "_struve_asymp_large_z(v, z, is_h)\n"
+    "\n"
+    "Internal function for testing `struve` & `modstruve`\n"
+    "\n"
+    "Evaluates using asymptotic expansion\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "v, err")
+ufunc__struve_asymp_large_z_loops[0] = loop_d_ddi_d_As_ddl_dd
+ufunc__struve_asymp_large_z_types[0] = NPY_DOUBLE
+ufunc__struve_asymp_large_z_types[1] = NPY_DOUBLE
+ufunc__struve_asymp_large_z_types[2] = NPY_LONG
+ufunc__struve_asymp_large_z_types[3] = NPY_DOUBLE
+ufunc__struve_asymp_large_z_types[4] = NPY_DOUBLE
+ufunc__struve_asymp_large_z_ptr[2*0] = _func_struve_asymp_large_z
+ufunc__struve_asymp_large_z_ptr[2*0+1] = ("_struve_asymp_large_z")
+ufunc__struve_asymp_large_z_data[0] = &ufunc__struve_asymp_large_z_ptr[2*0]
+_struve_asymp_large_z = np.PyUFunc_FromFuncAndData(ufunc__struve_asymp_large_z_loops, ufunc__struve_asymp_large_z_data, ufunc__struve_asymp_large_z_types, 1, 3, 2, 0, "_struve_asymp_large_z", ufunc__struve_asymp_large_z_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__struve_bessel_series_loops[1]
+cdef void *ufunc__struve_bessel_series_ptr[2]
+cdef void *ufunc__struve_bessel_series_data[1]
+cdef char ufunc__struve_bessel_series_types[5]
+cdef char *ufunc__struve_bessel_series_doc = (
+    "_struve_bessel_series(v, z, is_h)\n"
+    "\n"
+    "Internal function for testing `struve` & `modstruve`\n"
+    "\n"
+    "Evaluates using Bessel function series\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "v, err")
+ufunc__struve_bessel_series_loops[0] = loop_d_ddi_d_As_ddl_dd
+ufunc__struve_bessel_series_types[0] = NPY_DOUBLE
+ufunc__struve_bessel_series_types[1] = NPY_DOUBLE
+ufunc__struve_bessel_series_types[2] = NPY_LONG
+ufunc__struve_bessel_series_types[3] = NPY_DOUBLE
+ufunc__struve_bessel_series_types[4] = NPY_DOUBLE
+ufunc__struve_bessel_series_ptr[2*0] = _func_struve_bessel_series
+ufunc__struve_bessel_series_ptr[2*0+1] = ("_struve_bessel_series")
+ufunc__struve_bessel_series_data[0] = &ufunc__struve_bessel_series_ptr[2*0]
+_struve_bessel_series = np.PyUFunc_FromFuncAndData(ufunc__struve_bessel_series_loops, ufunc__struve_bessel_series_data, ufunc__struve_bessel_series_types, 1, 3, 2, 0, "_struve_bessel_series", ufunc__struve_bessel_series_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__struve_power_series_loops[1]
+cdef void *ufunc__struve_power_series_ptr[2]
+cdef void *ufunc__struve_power_series_data[1]
+cdef char ufunc__struve_power_series_types[5]
+cdef char *ufunc__struve_power_series_doc = (
+    "_struve_power_series(v, z, is_h)\n"
+    "\n"
+    "Internal function for testing `struve` & `modstruve`\n"
+    "\n"
+    "Evaluates using power series\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "v, err")
+ufunc__struve_power_series_loops[0] = loop_d_ddi_d_As_ddl_dd
+ufunc__struve_power_series_types[0] = NPY_DOUBLE
+ufunc__struve_power_series_types[1] = NPY_DOUBLE
+ufunc__struve_power_series_types[2] = NPY_LONG
+ufunc__struve_power_series_types[3] = NPY_DOUBLE
+ufunc__struve_power_series_types[4] = NPY_DOUBLE
+ufunc__struve_power_series_ptr[2*0] = _func_struve_power_series
+ufunc__struve_power_series_ptr[2*0+1] = ("_struve_power_series")
+ufunc__struve_power_series_data[0] = &ufunc__struve_power_series_ptr[2*0]
+_struve_power_series = np.PyUFunc_FromFuncAndData(ufunc__struve_power_series_loops, ufunc__struve_power_series_data, ufunc__struve_power_series_types, 1, 3, 2, 0, "_struve_power_series", ufunc__struve_power_series_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc__zeta_loops[2]
+cdef void *ufunc__zeta_ptr[4]
+cdef void *ufunc__zeta_data[2]
+cdef char ufunc__zeta_types[6]
+cdef char *ufunc__zeta_doc = (
+    "_zeta(x, q)\n"
+    "\n"
+    "Internal function, Hurwitz zeta.")
+ufunc__zeta_loops[0] = loop_d_dd__As_ff_f
+ufunc__zeta_loops[1] = loop_d_dd__As_dd_d
+ufunc__zeta_types[0] = NPY_FLOAT
+ufunc__zeta_types[1] = NPY_FLOAT
+ufunc__zeta_types[2] = NPY_FLOAT
+ufunc__zeta_types[3] = NPY_DOUBLE
+ufunc__zeta_types[4] = NPY_DOUBLE
+ufunc__zeta_types[5] = NPY_DOUBLE
+ufunc__zeta_ptr[2*0] = _func_zeta
+ufunc__zeta_ptr[2*0+1] = ("_zeta")
+ufunc__zeta_ptr[2*1] = _func_zeta
+ufunc__zeta_ptr[2*1+1] = ("_zeta")
+ufunc__zeta_data[0] = &ufunc__zeta_ptr[2*0]
+ufunc__zeta_data[1] = &ufunc__zeta_ptr[2*1]
+_zeta = np.PyUFunc_FromFuncAndData(ufunc__zeta_loops, ufunc__zeta_data, ufunc__zeta_types, 2, 2, 1, 0, "_zeta", ufunc__zeta_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_agm_loops[2]
+cdef void *ufunc_agm_ptr[4]
+cdef void *ufunc_agm_data[2]
+cdef char ufunc_agm_types[6]
+cdef char *ufunc_agm_doc = (
+    "agm(a, b, out=None)\n"
+    "\n"
+    "Compute the arithmetic-geometric mean of `a` and `b`.\n"
+    "\n"
+    "Start with a_0 = a and b_0 = b and iteratively compute::\n"
+    "\n"
+    "    a_{n+1} = (a_n + b_n)/2\n"
+    "    b_{n+1} = sqrt(a_n*b_n)\n"
+    "\n"
+    "a_n and b_n converge to the same limit as n increases; their common\n"
+    "limit is agm(a, b).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b : array_like\n"
+    "    Real values only. If the values are both negative, the result\n"
+    "    is negative. If one value is negative and the other is positive,\n"
+    "    `nan` is returned.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The arithmetic-geometric mean of `a` and `b`.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import agm\n"
+    ">>> a, b = 24.0, 6.0\n"
+    ">>> agm(a, b)\n"
+    "13.458171481725614\n"
+    "\n"
+    "Compare that result to the iteration:\n"
+    "\n"
+    ">>> while a != b:\n"
+    "...     a, b = (a + b)/2, np.sqrt(a*b)\n"
+    "...     print(\"a = %19.16f  b=%19.16f\" % (a, b))\n"
+    "...\n"
+    "a = 15.0000000000000000  b=12.0000000000000000\n"
+    "a = 13.5000000000000000  b=13.4164078649987388\n"
+    "a = 13.4582039324993694  b=13.4581390309909850\n"
+    "a = 13.4581714817451772  b=13.4581714817060547\n"
+    "a = 13.4581714817256159  b=13.4581714817256159\n"
+    "\n"
+    "When array-like arguments are given, broadcasting applies:\n"
+    "\n"
+    ">>> a = np.array([[1.5], [3], [6]])  # a has shape (3, 1).\n"
+    ">>> b = np.array([6, 12, 24, 48])    # b has shape (4,).\n"
+    ">>> agm(a, b)\n"
+    "array([[  3.36454287,   5.42363427,   9.05798751,  15.53650756],\n"
+    "       [  4.37037309,   6.72908574,  10.84726853,  18.11597502],\n"
+    "       [  6.        ,   8.74074619,  13.45817148,  21.69453707]])")
+ufunc_agm_loops[0] = loop_d_dd__As_ff_f
+ufunc_agm_loops[1] = loop_d_dd__As_dd_d
+ufunc_agm_types[0] = NPY_FLOAT
+ufunc_agm_types[1] = NPY_FLOAT
+ufunc_agm_types[2] = NPY_FLOAT
+ufunc_agm_types[3] = NPY_DOUBLE
+ufunc_agm_types[4] = NPY_DOUBLE
+ufunc_agm_types[5] = NPY_DOUBLE
+ufunc_agm_ptr[2*0] = _func_agm
+ufunc_agm_ptr[2*0+1] = ("agm")
+ufunc_agm_ptr[2*1] = _func_agm
+ufunc_agm_ptr[2*1+1] = ("agm")
+ufunc_agm_data[0] = &ufunc_agm_ptr[2*0]
+ufunc_agm_data[1] = &ufunc_agm_ptr[2*1]
+agm = np.PyUFunc_FromFuncAndData(ufunc_agm_loops, ufunc_agm_data, ufunc_agm_types, 2, 2, 1, 0, "agm", ufunc_agm_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_airy_loops[4]
+cdef void *ufunc_airy_ptr[8]
+cdef void *ufunc_airy_data[4]
+cdef char ufunc_airy_types[20]
+cdef char *ufunc_airy_doc = (
+    "airy(z, out=None)\n"
+    "\n"
+    "Airy functions and their derivatives.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Real or complex argument.\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Ai, Aip, Bi, Bip : 4-tuple of scalar or ndarray\n"
+    "    Airy functions Ai and Bi, and their derivatives Aip and Bip.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The Airy functions Ai and Bi are two independent solutions of\n"
+    "\n"
+    ".. math:: y''(x) = x y(x).\n"
+    "\n"
+    "For real `z` in [-10, 10], the computation is carried out by calling\n"
+    "the Cephes [1]_ `airy` routine, which uses power series summation\n"
+    "for small `z` and rational minimax approximations for large `z`.\n"
+    "\n"
+    "Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are\n"
+    "employed.  They are computed using power series for :math:`|z| < 1` and\n"
+    "the following relations to modified Bessel functions for larger `z`\n"
+    "(where :math:`t \\equiv 2 z^{3/2}/3`):\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    Ai(z) = \\frac{1}{\\pi \\sqrt{3}} K_{1/3}(t)\n"
+    "\n"
+    "    Ai'(z) = -\\frac{z}{\\pi \\sqrt{3}} K_{2/3}(t)\n"
+    "\n"
+    "    Bi(z) = \\sqrt{\\frac{z}{3}} \\left(I_{-1/3}(t) + I_{1/3}(t) \\right)\n"
+    "\n"
+    "    Bi'(z) = \\frac{z}{\\sqrt{3}} \\left(I_{-2/3}(t) + I_{2/3}(t)\\right)\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "airye : exponentially scaled Airy functions.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    ".. [2] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Compute the Airy functions on the interval [-15, 5].\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> x = np.linspace(-15, 5, 201)\n"
+    ">>> ai, aip, bi, bip = special.airy(x)\n"
+    "\n"
+    "Plot Ai(x) and Bi(x).\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> plt.plot(x, ai, 'r', label='Ai(x)')\n"
+    ">>> plt.plot(x, bi, 'b--', label='Bi(x)')\n"
+    ">>> plt.ylim(-0.5, 1.0)\n"
+    ">>> plt.grid()\n"
+    ">>> plt.legend(loc='upper left')\n"
+    ">>> plt.show()")
+ufunc_airy_loops[0] = loop_i_d_dddd_As_f_ffff
+ufunc_airy_loops[1] = loop_i_d_dddd_As_d_dddd
+ufunc_airy_loops[2] = loop_i_D_DDDD_As_F_FFFF
+ufunc_airy_loops[3] = loop_i_D_DDDD_As_D_DDDD
+ufunc_airy_types[0] = NPY_FLOAT
+ufunc_airy_types[1] = NPY_FLOAT
+ufunc_airy_types[2] = NPY_FLOAT
+ufunc_airy_types[3] = NPY_FLOAT
+ufunc_airy_types[4] = NPY_FLOAT
+ufunc_airy_types[5] = NPY_DOUBLE
+ufunc_airy_types[6] = NPY_DOUBLE
+ufunc_airy_types[7] = NPY_DOUBLE
+ufunc_airy_types[8] = NPY_DOUBLE
+ufunc_airy_types[9] = NPY_DOUBLE
+ufunc_airy_types[10] = NPY_CFLOAT
+ufunc_airy_types[11] = NPY_CFLOAT
+ufunc_airy_types[12] = NPY_CFLOAT
+ufunc_airy_types[13] = NPY_CFLOAT
+ufunc_airy_types[14] = NPY_CFLOAT
+ufunc_airy_types[15] = NPY_CDOUBLE
+ufunc_airy_types[16] = NPY_CDOUBLE
+ufunc_airy_types[17] = NPY_CDOUBLE
+ufunc_airy_types[18] = NPY_CDOUBLE
+ufunc_airy_types[19] = NPY_CDOUBLE
+ufunc_airy_ptr[2*0] = _func_airy_wrap
+ufunc_airy_ptr[2*0+1] = ("airy")
+ufunc_airy_ptr[2*1] = _func_airy_wrap
+ufunc_airy_ptr[2*1+1] = ("airy")
+ufunc_airy_ptr[2*2] = _func_cairy_wrap
+ufunc_airy_ptr[2*2+1] = ("airy")
+ufunc_airy_ptr[2*3] = _func_cairy_wrap
+ufunc_airy_ptr[2*3+1] = ("airy")
+ufunc_airy_data[0] = &ufunc_airy_ptr[2*0]
+ufunc_airy_data[1] = &ufunc_airy_ptr[2*1]
+ufunc_airy_data[2] = &ufunc_airy_ptr[2*2]
+ufunc_airy_data[3] = &ufunc_airy_ptr[2*3]
+airy = np.PyUFunc_FromFuncAndData(ufunc_airy_loops, ufunc_airy_data, ufunc_airy_types, 4, 1, 4, 0, "airy", ufunc_airy_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_airye_loops[4]
+cdef void *ufunc_airye_ptr[8]
+cdef void *ufunc_airye_data[4]
+cdef char ufunc_airye_types[20]
+cdef char *ufunc_airye_doc = (
+    "airye(z, out=None)\n"
+    "\n"
+    "Exponentially scaled Airy functions and their derivatives.\n"
+    "\n"
+    "Scaling::\n"
+    "\n"
+    "    eAi  = Ai  * exp(2.0/3.0*z*sqrt(z))\n"
+    "    eAip = Aip * exp(2.0/3.0*z*sqrt(z))\n"
+    "    eBi  = Bi  * exp(-abs(2.0/3.0*(z*sqrt(z)).real))\n"
+    "    eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Real or complex argument.\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "eAi, eAip, eBi, eBip : 4-tuple of scalar or ndarray\n"
+    "    Exponentially scaled Airy functions eAi and eBi, and their derivatives\n"
+    "    eAip and eBip\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "airy\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "We can compute exponentially scaled Airy functions and their derivatives:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import airye\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> z = np.linspace(0, 50, 500)\n"
+    ">>> eAi, eAip, eBi, eBip = airye(z)\n"
+    ">>> f, ax = plt.subplots(2, 1, sharex=True)\n"
+    ">>> for ind, data in enumerate([[eAi, eAip, [\"eAi\", \"eAip\"]],\n"
+    "...                             [eBi, eBip, [\"eBi\", \"eBip\"]]]):\n"
+    "...     ax[ind].plot(z, data[0], \"-r\", z, data[1], \"-b\")\n"
+    "...     ax[ind].legend(data[2])\n"
+    "...     ax[ind].grid(True)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "We can compute these using usual non-scaled Airy functions by:\n"
+    "\n"
+    ">>> from scipy.special import airy\n"
+    ">>> Ai, Aip, Bi, Bip = airy(z)\n"
+    ">>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z)))\n"
+    "True\n"
+    ">>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z)))\n"
+    "True\n"
+    ">>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))\n"
+    "True\n"
+    ">>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))\n"
+    "True\n"
+    "\n"
+    "Comparing non-scaled and exponentially scaled ones, the usual non-scaled\n"
+    "function quickly underflows for large values, whereas the exponentially\n"
+    "scaled function does not.\n"
+    "\n"
+    ">>> airy(200)\n"
+    "(0.0, 0.0, nan, nan)\n"
+    ">>> airye(200)\n"
+    "(0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093)")
+ufunc_airye_loops[0] = loop_i_d_dddd_As_f_ffff
+ufunc_airye_loops[1] = loop_i_d_dddd_As_d_dddd
+ufunc_airye_loops[2] = loop_i_D_DDDD_As_F_FFFF
+ufunc_airye_loops[3] = loop_i_D_DDDD_As_D_DDDD
+ufunc_airye_types[0] = NPY_FLOAT
+ufunc_airye_types[1] = NPY_FLOAT
+ufunc_airye_types[2] = NPY_FLOAT
+ufunc_airye_types[3] = NPY_FLOAT
+ufunc_airye_types[4] = NPY_FLOAT
+ufunc_airye_types[5] = NPY_DOUBLE
+ufunc_airye_types[6] = NPY_DOUBLE
+ufunc_airye_types[7] = NPY_DOUBLE
+ufunc_airye_types[8] = NPY_DOUBLE
+ufunc_airye_types[9] = NPY_DOUBLE
+ufunc_airye_types[10] = NPY_CFLOAT
+ufunc_airye_types[11] = NPY_CFLOAT
+ufunc_airye_types[12] = NPY_CFLOAT
+ufunc_airye_types[13] = NPY_CFLOAT
+ufunc_airye_types[14] = NPY_CFLOAT
+ufunc_airye_types[15] = NPY_CDOUBLE
+ufunc_airye_types[16] = NPY_CDOUBLE
+ufunc_airye_types[17] = NPY_CDOUBLE
+ufunc_airye_types[18] = NPY_CDOUBLE
+ufunc_airye_types[19] = NPY_CDOUBLE
+ufunc_airye_ptr[2*0] = _func_cairy_wrap_e_real
+ufunc_airye_ptr[2*0+1] = ("airye")
+ufunc_airye_ptr[2*1] = _func_cairy_wrap_e_real
+ufunc_airye_ptr[2*1+1] = ("airye")
+ufunc_airye_ptr[2*2] = _func_cairy_wrap_e
+ufunc_airye_ptr[2*2+1] = ("airye")
+ufunc_airye_ptr[2*3] = _func_cairy_wrap_e
+ufunc_airye_ptr[2*3+1] = ("airye")
+ufunc_airye_data[0] = &ufunc_airye_ptr[2*0]
+ufunc_airye_data[1] = &ufunc_airye_ptr[2*1]
+ufunc_airye_data[2] = &ufunc_airye_ptr[2*2]
+ufunc_airye_data[3] = &ufunc_airye_ptr[2*3]
+airye = np.PyUFunc_FromFuncAndData(ufunc_airye_loops, ufunc_airye_data, ufunc_airye_types, 4, 1, 4, 0, "airye", ufunc_airye_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_bdtr_loops[3]
+cdef void *ufunc_bdtr_ptr[6]
+cdef void *ufunc_bdtr_data[3]
+cdef char ufunc_bdtr_types[12]
+cdef char *ufunc_bdtr_doc = (
+    "bdtr(k, n, p, out=None)\n"
+    "\n"
+    "Binomial distribution cumulative distribution function.\n"
+    "\n"
+    "Sum of the terms 0 through `floor(k)` of the Binomial probability density.\n"
+    "\n"
+    ".. math::\n"
+    "    \\mathrm{bdtr}(k, n, p) = \\sum_{j=0}^{\\lfloor k \\rfloor} {{n}\\choose{j}} p^j (1-p)^{n-j}\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    Number of successes (double), rounded down to the nearest integer.\n"
+    "n : array_like\n"
+    "    Number of events (int).\n"
+    "p : array_like\n"
+    "    Probability of success in a single event (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Probability of `floor(k)` or fewer successes in `n` independent events with\n"
+    "    success probabilities of `p`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The terms are not summed directly; instead the regularized incomplete beta\n"
+    "function is employed, according to the formula,\n"
+    "\n"
+    ".. math::\n"
+    "    \\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - \\lfloor k \\rfloor, \\lfloor k \\rfloor + 1).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `bdtr`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_bdtr_loops[0] = loop_d_ddd__As_fff_f
+ufunc_bdtr_loops[1] = loop_d_did__As_dld_d
+ufunc_bdtr_loops[2] = loop_d_ddd__As_ddd_d
+ufunc_bdtr_types[0] = NPY_FLOAT
+ufunc_bdtr_types[1] = NPY_FLOAT
+ufunc_bdtr_types[2] = NPY_FLOAT
+ufunc_bdtr_types[3] = NPY_FLOAT
+ufunc_bdtr_types[4] = NPY_DOUBLE
+ufunc_bdtr_types[5] = NPY_LONG
+ufunc_bdtr_types[6] = NPY_DOUBLE
+ufunc_bdtr_types[7] = NPY_DOUBLE
+ufunc_bdtr_types[8] = NPY_DOUBLE
+ufunc_bdtr_types[9] = NPY_DOUBLE
+ufunc_bdtr_types[10] = NPY_DOUBLE
+ufunc_bdtr_types[11] = NPY_DOUBLE
+ufunc_bdtr_ptr[2*0] = _func_bdtr_unsafe
+ufunc_bdtr_ptr[2*0+1] = ("bdtr")
+ufunc_bdtr_ptr[2*1] = _func_bdtr
+ufunc_bdtr_ptr[2*1+1] = ("bdtr")
+ufunc_bdtr_ptr[2*2] = _func_bdtr_unsafe
+ufunc_bdtr_ptr[2*2+1] = ("bdtr")
+ufunc_bdtr_data[0] = &ufunc_bdtr_ptr[2*0]
+ufunc_bdtr_data[1] = &ufunc_bdtr_ptr[2*1]
+ufunc_bdtr_data[2] = &ufunc_bdtr_ptr[2*2]
+bdtr = np.PyUFunc_FromFuncAndData(ufunc_bdtr_loops, ufunc_bdtr_data, ufunc_bdtr_types, 3, 3, 1, 0, "bdtr", ufunc_bdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_bdtrc_loops[3]
+cdef void *ufunc_bdtrc_ptr[6]
+cdef void *ufunc_bdtrc_data[3]
+cdef char ufunc_bdtrc_types[12]
+cdef char *ufunc_bdtrc_doc = (
+    "bdtrc(k, n, p, out=None)\n"
+    "\n"
+    "Binomial distribution survival function.\n"
+    "\n"
+    "Sum of the terms `floor(k) + 1` through `n` of the binomial probability\n"
+    "density,\n"
+    "\n"
+    ".. math::\n"
+    "    \\mathrm{bdtrc}(k, n, p) = \\sum_{j=\\lfloor k \\rfloor +1}^n {{n}\\choose{j}} p^j (1-p)^{n-j}\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    Number of successes (double), rounded down to nearest integer.\n"
+    "n : array_like\n"
+    "    Number of events (int)\n"
+    "p : array_like\n"
+    "    Probability of success in a single event.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Probability of `floor(k) + 1` or more successes in `n` independent\n"
+    "    events with success probabilities of `p`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "bdtr\n"
+    "betainc\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The terms are not summed directly; instead the regularized incomplete beta\n"
+    "function is employed, according to the formula,\n"
+    "\n"
+    ".. math::\n"
+    "    \\mathrm{bdtrc}(k, n, p) = I_{p}(\\lfloor k \\rfloor + 1, n - \\lfloor k \\rfloor).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `bdtrc`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_bdtrc_loops[0] = loop_d_ddd__As_fff_f
+ufunc_bdtrc_loops[1] = loop_d_did__As_dld_d
+ufunc_bdtrc_loops[2] = loop_d_ddd__As_ddd_d
+ufunc_bdtrc_types[0] = NPY_FLOAT
+ufunc_bdtrc_types[1] = NPY_FLOAT
+ufunc_bdtrc_types[2] = NPY_FLOAT
+ufunc_bdtrc_types[3] = NPY_FLOAT
+ufunc_bdtrc_types[4] = NPY_DOUBLE
+ufunc_bdtrc_types[5] = NPY_LONG
+ufunc_bdtrc_types[6] = NPY_DOUBLE
+ufunc_bdtrc_types[7] = NPY_DOUBLE
+ufunc_bdtrc_types[8] = NPY_DOUBLE
+ufunc_bdtrc_types[9] = NPY_DOUBLE
+ufunc_bdtrc_types[10] = NPY_DOUBLE
+ufunc_bdtrc_types[11] = NPY_DOUBLE
+ufunc_bdtrc_ptr[2*0] = _func_bdtrc_unsafe
+ufunc_bdtrc_ptr[2*0+1] = ("bdtrc")
+ufunc_bdtrc_ptr[2*1] = _func_bdtrc
+ufunc_bdtrc_ptr[2*1+1] = ("bdtrc")
+ufunc_bdtrc_ptr[2*2] = _func_bdtrc_unsafe
+ufunc_bdtrc_ptr[2*2+1] = ("bdtrc")
+ufunc_bdtrc_data[0] = &ufunc_bdtrc_ptr[2*0]
+ufunc_bdtrc_data[1] = &ufunc_bdtrc_ptr[2*1]
+ufunc_bdtrc_data[2] = &ufunc_bdtrc_ptr[2*2]
+bdtrc = np.PyUFunc_FromFuncAndData(ufunc_bdtrc_loops, ufunc_bdtrc_data, ufunc_bdtrc_types, 3, 3, 1, 0, "bdtrc", ufunc_bdtrc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_bdtri_loops[3]
+cdef void *ufunc_bdtri_ptr[6]
+cdef void *ufunc_bdtri_data[3]
+cdef char ufunc_bdtri_types[12]
+cdef char *ufunc_bdtri_doc = (
+    "bdtri(k, n, y, out=None)\n"
+    "\n"
+    "Inverse function to `bdtr` with respect to `p`.\n"
+    "\n"
+    "Finds the event probability `p` such that the sum of the terms 0 through\n"
+    "`k` of the binomial probability density is equal to the given cumulative\n"
+    "probability `y`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    Number of successes (float), rounded down to the nearest integer.\n"
+    "n : array_like\n"
+    "    Number of events (float)\n"
+    "y : array_like\n"
+    "    Cumulative probability (probability of `k` or fewer successes in `n`\n"
+    "    events).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "p : scalar or ndarray\n"
+    "    The event probability such that `bdtr(\\lfloor k \\rfloor, n, p) = y`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "bdtr\n"
+    "betaincinv\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The computation is carried out using the inverse beta integral function\n"
+    "and the relation,::\n"
+    "\n"
+    "    1 - p = betaincinv(n - k, k + 1, y).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `bdtri`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_bdtri_loops[0] = loop_d_ddd__As_fff_f
+ufunc_bdtri_loops[1] = loop_d_did__As_dld_d
+ufunc_bdtri_loops[2] = loop_d_ddd__As_ddd_d
+ufunc_bdtri_types[0] = NPY_FLOAT
+ufunc_bdtri_types[1] = NPY_FLOAT
+ufunc_bdtri_types[2] = NPY_FLOAT
+ufunc_bdtri_types[3] = NPY_FLOAT
+ufunc_bdtri_types[4] = NPY_DOUBLE
+ufunc_bdtri_types[5] = NPY_LONG
+ufunc_bdtri_types[6] = NPY_DOUBLE
+ufunc_bdtri_types[7] = NPY_DOUBLE
+ufunc_bdtri_types[8] = NPY_DOUBLE
+ufunc_bdtri_types[9] = NPY_DOUBLE
+ufunc_bdtri_types[10] = NPY_DOUBLE
+ufunc_bdtri_types[11] = NPY_DOUBLE
+ufunc_bdtri_ptr[2*0] = _func_bdtri_unsafe
+ufunc_bdtri_ptr[2*0+1] = ("bdtri")
+ufunc_bdtri_ptr[2*1] = _func_bdtri
+ufunc_bdtri_ptr[2*1+1] = ("bdtri")
+ufunc_bdtri_ptr[2*2] = _func_bdtri_unsafe
+ufunc_bdtri_ptr[2*2+1] = ("bdtri")
+ufunc_bdtri_data[0] = &ufunc_bdtri_ptr[2*0]
+ufunc_bdtri_data[1] = &ufunc_bdtri_ptr[2*1]
+ufunc_bdtri_data[2] = &ufunc_bdtri_ptr[2*2]
+bdtri = np.PyUFunc_FromFuncAndData(ufunc_bdtri_loops, ufunc_bdtri_data, ufunc_bdtri_types, 3, 3, 1, 0, "bdtri", ufunc_bdtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_bdtrik_loops[2]
+cdef void *ufunc_bdtrik_ptr[4]
+cdef void *ufunc_bdtrik_data[2]
+cdef char ufunc_bdtrik_types[8]
+cdef char *ufunc_bdtrik_doc = (
+    "bdtrik(y, n, p, out=None)\n"
+    "\n"
+    "Inverse function to `bdtr` with respect to `k`.\n"
+    "\n"
+    "Finds the number of successes `k` such that the sum of the terms 0 through\n"
+    "`k` of the Binomial probability density for `n` events with probability\n"
+    "`p` is equal to the given cumulative probability `y`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : array_like\n"
+    "    Cumulative probability (probability of `k` or fewer successes in `n`\n"
+    "    events).\n"
+    "n : array_like\n"
+    "    Number of events (float).\n"
+    "p : array_like\n"
+    "    Success probability (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "k : scalar or ndarray\n"
+    "    The number of successes `k` such that `bdtr(k, n, p) = y`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "bdtr\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the\n"
+    "cumulative incomplete beta distribution.\n"
+    "\n"
+    "Computation of `k` involves a search for a value that produces the desired\n"
+    "value of `y`. The search relies on the monotonicity of `y` with `k`.\n"
+    "\n"
+    "Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    ".. [2] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.")
+ufunc_bdtrik_loops[0] = loop_d_ddd__As_fff_f
+ufunc_bdtrik_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_bdtrik_types[0] = NPY_FLOAT
+ufunc_bdtrik_types[1] = NPY_FLOAT
+ufunc_bdtrik_types[2] = NPY_FLOAT
+ufunc_bdtrik_types[3] = NPY_FLOAT
+ufunc_bdtrik_types[4] = NPY_DOUBLE
+ufunc_bdtrik_types[5] = NPY_DOUBLE
+ufunc_bdtrik_types[6] = NPY_DOUBLE
+ufunc_bdtrik_types[7] = NPY_DOUBLE
+ufunc_bdtrik_ptr[2*0] = _func_cdfbin2_wrap
+ufunc_bdtrik_ptr[2*0+1] = ("bdtrik")
+ufunc_bdtrik_ptr[2*1] = _func_cdfbin2_wrap
+ufunc_bdtrik_ptr[2*1+1] = ("bdtrik")
+ufunc_bdtrik_data[0] = &ufunc_bdtrik_ptr[2*0]
+ufunc_bdtrik_data[1] = &ufunc_bdtrik_ptr[2*1]
+bdtrik = np.PyUFunc_FromFuncAndData(ufunc_bdtrik_loops, ufunc_bdtrik_data, ufunc_bdtrik_types, 2, 3, 1, 0, "bdtrik", ufunc_bdtrik_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_bdtrin_loops[2]
+cdef void *ufunc_bdtrin_ptr[4]
+cdef void *ufunc_bdtrin_data[2]
+cdef char ufunc_bdtrin_types[8]
+cdef char *ufunc_bdtrin_doc = (
+    "bdtrin(k, y, p, out=None)\n"
+    "\n"
+    "Inverse function to `bdtr` with respect to `n`.\n"
+    "\n"
+    "Finds the number of events `n` such that the sum of the terms 0 through\n"
+    "`k` of the Binomial probability density for events with probability `p` is\n"
+    "equal to the given cumulative probability `y`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    Number of successes (float).\n"
+    "y : array_like\n"
+    "    Cumulative probability (probability of `k` or fewer successes in `n`\n"
+    "    events).\n"
+    "p : array_like\n"
+    "    Success probability (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "n : scalar or ndarray\n"
+    "    The number of events `n` such that `bdtr(k, n, p) = y`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "bdtr\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the\n"
+    "cumulative incomplete beta distribution.\n"
+    "\n"
+    "Computation of `n` involves a search for a value that produces the desired\n"
+    "value of `y`. The search relies on the monotonicity of `y` with `n`.\n"
+    "\n"
+    "Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    ".. [2] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.")
+ufunc_bdtrin_loops[0] = loop_d_ddd__As_fff_f
+ufunc_bdtrin_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_bdtrin_types[0] = NPY_FLOAT
+ufunc_bdtrin_types[1] = NPY_FLOAT
+ufunc_bdtrin_types[2] = NPY_FLOAT
+ufunc_bdtrin_types[3] = NPY_FLOAT
+ufunc_bdtrin_types[4] = NPY_DOUBLE
+ufunc_bdtrin_types[5] = NPY_DOUBLE
+ufunc_bdtrin_types[6] = NPY_DOUBLE
+ufunc_bdtrin_types[7] = NPY_DOUBLE
+ufunc_bdtrin_ptr[2*0] = _func_cdfbin3_wrap
+ufunc_bdtrin_ptr[2*0+1] = ("bdtrin")
+ufunc_bdtrin_ptr[2*1] = _func_cdfbin3_wrap
+ufunc_bdtrin_ptr[2*1+1] = ("bdtrin")
+ufunc_bdtrin_data[0] = &ufunc_bdtrin_ptr[2*0]
+ufunc_bdtrin_data[1] = &ufunc_bdtrin_ptr[2*1]
+bdtrin = np.PyUFunc_FromFuncAndData(ufunc_bdtrin_loops, ufunc_bdtrin_data, ufunc_bdtrin_types, 2, 3, 1, 0, "bdtrin", ufunc_bdtrin_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_bei_loops[2]
+cdef void *ufunc_bei_ptr[4]
+cdef void *ufunc_bei_data[2]
+cdef char ufunc_bei_types[4]
+cdef char *ufunc_bei_doc = (
+    "bei(x, out=None)\n"
+    "\n"
+    "Kelvin function bei.\n"
+    "\n"
+    "Defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\mathrm{bei}(x) = \\Im[J_0(x e^{3 \\pi i / 4})]\n"
+    "\n"
+    "where :math:`J_0` is the Bessel function of the first kind of\n"
+    "order zero (see `jv`). See [dlmf]_ for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Kelvin function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ber : the corresponding real part\n"
+    "beip : the derivative of bei\n"
+    "jv : Bessel function of the first kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10.61\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "It can be expressed using Bessel functions.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n"
+    ">>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).imag\n"
+    "array([0.24956604, 0.97229163, 1.93758679, 2.29269032])\n"
+    ">>> sc.bei(x)\n"
+    "array([0.24956604, 0.97229163, 1.93758679, 2.29269032])")
+ufunc_bei_loops[0] = loop_d_d__As_f_f
+ufunc_bei_loops[1] = loop_d_d__As_d_d
+ufunc_bei_types[0] = NPY_FLOAT
+ufunc_bei_types[1] = NPY_FLOAT
+ufunc_bei_types[2] = NPY_DOUBLE
+ufunc_bei_types[3] = NPY_DOUBLE
+ufunc_bei_ptr[2*0] = _func_bei_wrap
+ufunc_bei_ptr[2*0+1] = ("bei")
+ufunc_bei_ptr[2*1] = _func_bei_wrap
+ufunc_bei_ptr[2*1+1] = ("bei")
+ufunc_bei_data[0] = &ufunc_bei_ptr[2*0]
+ufunc_bei_data[1] = &ufunc_bei_ptr[2*1]
+bei = np.PyUFunc_FromFuncAndData(ufunc_bei_loops, ufunc_bei_data, ufunc_bei_types, 2, 1, 1, 0, "bei", ufunc_bei_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_beip_loops[2]
+cdef void *ufunc_beip_ptr[4]
+cdef void *ufunc_beip_data[2]
+cdef char ufunc_beip_types[4]
+cdef char *ufunc_beip_doc = (
+    "beip(x, out=None)\n"
+    "\n"
+    "Derivative of the Kelvin function bei.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The values of the derivative of bei.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "bei\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10#PT5")
+ufunc_beip_loops[0] = loop_d_d__As_f_f
+ufunc_beip_loops[1] = loop_d_d__As_d_d
+ufunc_beip_types[0] = NPY_FLOAT
+ufunc_beip_types[1] = NPY_FLOAT
+ufunc_beip_types[2] = NPY_DOUBLE
+ufunc_beip_types[3] = NPY_DOUBLE
+ufunc_beip_ptr[2*0] = _func_beip_wrap
+ufunc_beip_ptr[2*0+1] = ("beip")
+ufunc_beip_ptr[2*1] = _func_beip_wrap
+ufunc_beip_ptr[2*1+1] = ("beip")
+ufunc_beip_data[0] = &ufunc_beip_ptr[2*0]
+ufunc_beip_data[1] = &ufunc_beip_ptr[2*1]
+beip = np.PyUFunc_FromFuncAndData(ufunc_beip_loops, ufunc_beip_data, ufunc_beip_types, 2, 1, 1, 0, "beip", ufunc_beip_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ber_loops[2]
+cdef void *ufunc_ber_ptr[4]
+cdef void *ufunc_ber_data[2]
+cdef char ufunc_ber_types[4]
+cdef char *ufunc_ber_doc = (
+    "ber(x, out=None)\n"
+    "\n"
+    "Kelvin function ber.\n"
+    "\n"
+    "Defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\mathrm{ber}(x) = \\Re[J_0(x e^{3 \\pi i / 4})]\n"
+    "\n"
+    "where :math:`J_0` is the Bessel function of the first kind of\n"
+    "order zero (see `jv`). See [dlmf]_ for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Kelvin function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "bei : the corresponding real part\n"
+    "berp : the derivative of bei\n"
+    "jv : Bessel function of the first kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10.61\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "It can be expressed using Bessel functions.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n"
+    ">>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).real\n"
+    "array([ 0.98438178,  0.75173418, -0.22138025, -2.56341656])\n"
+    ">>> sc.ber(x)\n"
+    "array([ 0.98438178,  0.75173418, -0.22138025, -2.56341656])")
+ufunc_ber_loops[0] = loop_d_d__As_f_f
+ufunc_ber_loops[1] = loop_d_d__As_d_d
+ufunc_ber_types[0] = NPY_FLOAT
+ufunc_ber_types[1] = NPY_FLOAT
+ufunc_ber_types[2] = NPY_DOUBLE
+ufunc_ber_types[3] = NPY_DOUBLE
+ufunc_ber_ptr[2*0] = _func_ber_wrap
+ufunc_ber_ptr[2*0+1] = ("ber")
+ufunc_ber_ptr[2*1] = _func_ber_wrap
+ufunc_ber_ptr[2*1+1] = ("ber")
+ufunc_ber_data[0] = &ufunc_ber_ptr[2*0]
+ufunc_ber_data[1] = &ufunc_ber_ptr[2*1]
+ber = np.PyUFunc_FromFuncAndData(ufunc_ber_loops, ufunc_ber_data, ufunc_ber_types, 2, 1, 1, 0, "ber", ufunc_ber_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_berp_loops[2]
+cdef void *ufunc_berp_ptr[4]
+cdef void *ufunc_berp_data[2]
+cdef char ufunc_berp_types[4]
+cdef char *ufunc_berp_doc = (
+    "berp(x, out=None)\n"
+    "\n"
+    "Derivative of the Kelvin function ber.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The values of the derivative of ber.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ber\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10#PT5")
+ufunc_berp_loops[0] = loop_d_d__As_f_f
+ufunc_berp_loops[1] = loop_d_d__As_d_d
+ufunc_berp_types[0] = NPY_FLOAT
+ufunc_berp_types[1] = NPY_FLOAT
+ufunc_berp_types[2] = NPY_DOUBLE
+ufunc_berp_types[3] = NPY_DOUBLE
+ufunc_berp_ptr[2*0] = _func_berp_wrap
+ufunc_berp_ptr[2*0+1] = ("berp")
+ufunc_berp_ptr[2*1] = _func_berp_wrap
+ufunc_berp_ptr[2*1+1] = ("berp")
+ufunc_berp_data[0] = &ufunc_berp_ptr[2*0]
+ufunc_berp_data[1] = &ufunc_berp_ptr[2*1]
+berp = np.PyUFunc_FromFuncAndData(ufunc_berp_loops, ufunc_berp_data, ufunc_berp_types, 2, 1, 1, 0, "berp", ufunc_berp_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_besselpoly_loops[2]
+cdef void *ufunc_besselpoly_ptr[4]
+cdef void *ufunc_besselpoly_data[2]
+cdef char ufunc_besselpoly_types[8]
+cdef char *ufunc_besselpoly_doc = (
+    "besselpoly(a, lmb, nu, out=None)\n"
+    "\n"
+    "Weighted integral of the Bessel function of the first kind.\n"
+    "\n"
+    "Computes\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   \\int_0^1 x^\\lambda J_\\nu(2 a x) \\, dx\n"
+    "\n"
+    "where :math:`J_\\nu` is a Bessel function and :math:`\\lambda=lmb`,\n"
+    ":math:`\\nu=nu`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Scale factor inside the Bessel function.\n"
+    "lmb : array_like\n"
+    "    Power of `x`\n"
+    "nu : array_like\n"
+    "    Order of the Bessel function.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the integral.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function for one parameter set.\n"
+    "\n"
+    ">>> from scipy.special import besselpoly\n"
+    ">>> besselpoly(1, 1, 1)\n"
+    "0.24449718372863877\n"
+    "\n"
+    "Evaluate the function for different scale factors.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> factors = np.array([0., 3., 6.])\n"
+    ">>> besselpoly(factors, 1, 1)\n"
+    "array([ 0.        , -0.00549029,  0.00140174])\n"
+    "\n"
+    "Plot the function for varying powers, orders and scales.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> powers = np.linspace(0, 10, 100)\n"
+    ">>> orders = [1, 2, 3]\n"
+    ">>> scales = [1, 2]\n"
+    ">>> all_combinations = [(order, scale) for order in orders\n"
+    "...                     for scale in scales]\n"
+    ">>> for order, scale in all_combinations:\n"
+    "...     ax.plot(powers, besselpoly(scale, powers, order),\n"
+    "...             label=rf\"$\\nu={order}, a={scale}$\")\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(r\"$\\lambda$\")\n"
+    ">>> ax.set_ylabel(r\"$\\int_0^1 x^{\\lambda} J_{\\nu}(2ax)\\,dx$\")\n"
+    ">>> plt.show()")
+ufunc_besselpoly_loops[0] = loop_d_ddd__As_fff_f
+ufunc_besselpoly_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_besselpoly_types[0] = NPY_FLOAT
+ufunc_besselpoly_types[1] = NPY_FLOAT
+ufunc_besselpoly_types[2] = NPY_FLOAT
+ufunc_besselpoly_types[3] = NPY_FLOAT
+ufunc_besselpoly_types[4] = NPY_DOUBLE
+ufunc_besselpoly_types[5] = NPY_DOUBLE
+ufunc_besselpoly_types[6] = NPY_DOUBLE
+ufunc_besselpoly_types[7] = NPY_DOUBLE
+ufunc_besselpoly_ptr[2*0] = _func_besselpoly
+ufunc_besselpoly_ptr[2*0+1] = ("besselpoly")
+ufunc_besselpoly_ptr[2*1] = _func_besselpoly
+ufunc_besselpoly_ptr[2*1+1] = ("besselpoly")
+ufunc_besselpoly_data[0] = &ufunc_besselpoly_ptr[2*0]
+ufunc_besselpoly_data[1] = &ufunc_besselpoly_ptr[2*1]
+besselpoly = np.PyUFunc_FromFuncAndData(ufunc_besselpoly_loops, ufunc_besselpoly_data, ufunc_besselpoly_types, 2, 3, 1, 0, "besselpoly", ufunc_besselpoly_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_beta_loops[2]
+cdef void *ufunc_beta_ptr[4]
+cdef void *ufunc_beta_data[2]
+cdef char ufunc_beta_types[6]
+cdef char *ufunc_beta_doc = (
+    "beta(a, b, out=None)\n"
+    "\n"
+    "Beta function.\n"
+    "\n"
+    "This function is defined in [1]_ as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    B(a, b) = \\int_0^1 t^{a-1}(1-t)^{b-1}dt\n"
+    "            = \\frac{\\Gamma(a)\\Gamma(b)}{\\Gamma(a+b)},\n"
+    "\n"
+    "where :math:`\\Gamma` is the gamma function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b : array_like\n"
+    "    Real-valued arguments\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function result\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the beta function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gamma : the gamma function\n"
+    "betainc :  the regularized incomplete beta function\n"
+    "betaln : the natural logarithm of the absolute\n"
+    "         value of the beta function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] NIST Digital Library of Mathematical Functions,\n"
+    "       Eq. 5.12.1. https://dlmf.nist.gov/5.12\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "The beta function relates to the gamma function by the\n"
+    "definition given above:\n"
+    "\n"
+    ">>> sc.beta(2, 3)\n"
+    "0.08333333333333333\n"
+    ">>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)\n"
+    "0.08333333333333333\n"
+    "\n"
+    "As this relationship demonstrates, the beta function\n"
+    "is symmetric:\n"
+    "\n"
+    ">>> sc.beta(1.7, 2.4)\n"
+    "0.16567527689031739\n"
+    ">>> sc.beta(2.4, 1.7)\n"
+    "0.16567527689031739\n"
+    "\n"
+    "This function satisfies :math:`B(1, b) = 1/b`:\n"
+    "\n"
+    ">>> sc.beta(1, 4)\n"
+    "0.25")
+ufunc_beta_loops[0] = loop_d_dd__As_ff_f
+ufunc_beta_loops[1] = loop_d_dd__As_dd_d
+ufunc_beta_types[0] = NPY_FLOAT
+ufunc_beta_types[1] = NPY_FLOAT
+ufunc_beta_types[2] = NPY_FLOAT
+ufunc_beta_types[3] = NPY_DOUBLE
+ufunc_beta_types[4] = NPY_DOUBLE
+ufunc_beta_types[5] = NPY_DOUBLE
+ufunc_beta_ptr[2*0] = _func_beta
+ufunc_beta_ptr[2*0+1] = ("beta")
+ufunc_beta_ptr[2*1] = _func_beta
+ufunc_beta_ptr[2*1+1] = ("beta")
+ufunc_beta_data[0] = &ufunc_beta_ptr[2*0]
+ufunc_beta_data[1] = &ufunc_beta_ptr[2*1]
+beta = np.PyUFunc_FromFuncAndData(ufunc_beta_loops, ufunc_beta_data, ufunc_beta_types, 2, 2, 1, 0, "beta", ufunc_beta_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_betainc_loops[2]
+cdef void *ufunc_betainc_ptr[4]
+cdef void *ufunc_betainc_data[2]
+cdef char ufunc_betainc_types[8]
+cdef char *ufunc_betainc_doc = (
+    "betainc(a, b, x, out=None)\n"
+    "\n"
+    "Regularized incomplete beta function.\n"
+    "\n"
+    "Computes the regularized incomplete beta function, defined as [1]_:\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    I_x(a, b) = \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)} \\int_0^x\n"
+    "    t^{a-1}(1-t)^{b-1}dt,\n"
+    "\n"
+    "for :math:`0 \\leq x \\leq 1`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b : array_like\n"
+    "       Positive, real-valued parameters\n"
+    "x : array_like\n"
+    "    Real-valued such that :math:`0 \\leq x \\leq 1`,\n"
+    "    the upper limit of integration\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the regularized incomplete beta function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "beta : beta function\n"
+    "betaincinv : inverse of the regularized incomplete beta function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The term *regularized* in the name of this function refers to the\n"
+    "scaling of the function by the gamma function terms shown in the\n"
+    "formula.  When not qualified as *regularized*, the name *incomplete\n"
+    "beta function* often refers to just the integral expression,\n"
+    "without the gamma terms.  One can use the function `beta` from\n"
+    "`scipy.special` to get this \"nonregularized\" incomplete beta\n"
+    "function by multiplying the result of ``betainc(a, b, x)`` by\n"
+    "``beta(a, b)``.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] NIST Digital Library of Mathematical Functions\n"
+    "       https://dlmf.nist.gov/8.17\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "\n"
+    "Let :math:`B(a, b)` be the `beta` function.\n"
+    "\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "The coefficient in terms of `gamma` is equal to\n"
+    ":math:`1/B(a, b)`. Also, when :math:`x=1`\n"
+    "the integral is equal to :math:`B(a, b)`.\n"
+    "Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.\n"
+    "\n"
+    ">>> sc.betainc(0.2, 3.5, 1.0)\n"
+    "1.0\n"
+    "\n"
+    "It satisfies\n"
+    ":math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,\n"
+    "where :math:`F` is the hypergeometric function `hyp2f1`:\n"
+    "\n"
+    ">>> a, b, x = 1.4, 3.1, 0.5\n"
+    ">>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))\n"
+    "0.8148904036225295\n"
+    ">>> sc.betainc(a, b, x)\n"
+    "0.8148904036225296\n"
+    "\n"
+    "This functions satisfies the relationship\n"
+    ":math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:\n"
+    "\n"
+    ">>> sc.betainc(2.2, 3.1, 0.4)\n"
+    "0.49339638807619446\n"
+    ">>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)\n"
+    "0.49339638807619446")
+ufunc_betainc_loops[0] = loop_d_ddd__As_fff_f
+ufunc_betainc_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_betainc_types[0] = NPY_FLOAT
+ufunc_betainc_types[1] = NPY_FLOAT
+ufunc_betainc_types[2] = NPY_FLOAT
+ufunc_betainc_types[3] = NPY_FLOAT
+ufunc_betainc_types[4] = NPY_DOUBLE
+ufunc_betainc_types[5] = NPY_DOUBLE
+ufunc_betainc_types[6] = NPY_DOUBLE
+ufunc_betainc_types[7] = NPY_DOUBLE
+ufunc_betainc_ptr[2*0] = _func_incbet
+ufunc_betainc_ptr[2*0+1] = ("betainc")
+ufunc_betainc_ptr[2*1] = _func_incbet
+ufunc_betainc_ptr[2*1+1] = ("betainc")
+ufunc_betainc_data[0] = &ufunc_betainc_ptr[2*0]
+ufunc_betainc_data[1] = &ufunc_betainc_ptr[2*1]
+betainc = np.PyUFunc_FromFuncAndData(ufunc_betainc_loops, ufunc_betainc_data, ufunc_betainc_types, 2, 3, 1, 0, "betainc", ufunc_betainc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_betaincinv_loops[2]
+cdef void *ufunc_betaincinv_ptr[4]
+cdef void *ufunc_betaincinv_data[2]
+cdef char ufunc_betaincinv_types[8]
+cdef char *ufunc_betaincinv_doc = (
+    "betaincinv(a, b, y, out=None)\n"
+    "\n"
+    "Inverse of the regularized incomplete beta function.\n"
+    "\n"
+    "Computes :math:`x` such that:\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    y = I_x(a, b) = \\frac{\\Gamma(a+b)}{\\Gamma(a)\\Gamma(b)}\n"
+    "    \\int_0^x t^{a-1}(1-t)^{b-1}dt,\n"
+    "\n"
+    "where :math:`I_x` is the normalized incomplete beta\n"
+    "function `betainc` and\n"
+    ":math:`\\Gamma` is the `gamma` function [1]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b : array_like\n"
+    "    Positive, real-valued parameters\n"
+    "y : array_like\n"
+    "    Real-valued input\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the inverse of the regularized incomplete beta function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "betainc : regularized incomplete beta function\n"
+    "gamma : gamma function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] NIST Digital Library of Mathematical Functions\n"
+    "       https://dlmf.nist.gov/8.17\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "This function is the inverse of `betainc` for fixed\n"
+    "values of :math:`a` and :math:`b`.\n"
+    "\n"
+    ">>> a, b = 1.2, 3.1\n"
+    ">>> y = sc.betainc(a, b, 0.2)\n"
+    ">>> sc.betaincinv(a, b, y)\n"
+    "0.2\n"
+    ">>>\n"
+    ">>> a, b = 7.5, 0.4\n"
+    ">>> x = sc.betaincinv(a, b, 0.5)\n"
+    ">>> sc.betainc(a, b, x)\n"
+    "0.5")
+ufunc_betaincinv_loops[0] = loop_d_ddd__As_fff_f
+ufunc_betaincinv_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_betaincinv_types[0] = NPY_FLOAT
+ufunc_betaincinv_types[1] = NPY_FLOAT
+ufunc_betaincinv_types[2] = NPY_FLOAT
+ufunc_betaincinv_types[3] = NPY_FLOAT
+ufunc_betaincinv_types[4] = NPY_DOUBLE
+ufunc_betaincinv_types[5] = NPY_DOUBLE
+ufunc_betaincinv_types[6] = NPY_DOUBLE
+ufunc_betaincinv_types[7] = NPY_DOUBLE
+ufunc_betaincinv_ptr[2*0] = _func_incbi
+ufunc_betaincinv_ptr[2*0+1] = ("betaincinv")
+ufunc_betaincinv_ptr[2*1] = _func_incbi
+ufunc_betaincinv_ptr[2*1+1] = ("betaincinv")
+ufunc_betaincinv_data[0] = &ufunc_betaincinv_ptr[2*0]
+ufunc_betaincinv_data[1] = &ufunc_betaincinv_ptr[2*1]
+betaincinv = np.PyUFunc_FromFuncAndData(ufunc_betaincinv_loops, ufunc_betaincinv_data, ufunc_betaincinv_types, 2, 3, 1, 0, "betaincinv", ufunc_betaincinv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_betaln_loops[2]
+cdef void *ufunc_betaln_ptr[4]
+cdef void *ufunc_betaln_data[2]
+cdef char ufunc_betaln_types[6]
+cdef char *ufunc_betaln_doc = (
+    "betaln(a, b, out=None)\n"
+    "\n"
+    "Natural logarithm of absolute value of beta function.\n"
+    "\n"
+    "Computes ``ln(abs(beta(a, b)))``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b : array_like\n"
+    "    Positive, real-valued parameters\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the betaln function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gamma : the gamma function\n"
+    "betainc :  the regularized incomplete beta function\n"
+    "beta : the beta function\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import betaln, beta\n"
+    "\n"
+    "Verify that, for moderate values of ``a`` and ``b``, ``betaln(a, b)``\n"
+    "is the same as ``log(beta(a, b))``:\n"
+    "\n"
+    ">>> betaln(3, 4)\n"
+    "-4.0943445622221\n"
+    "\n"
+    ">>> np.log(beta(3, 4))\n"
+    "-4.0943445622221\n"
+    "\n"
+    "In the following ``beta(a, b)`` underflows to 0, so we can't compute\n"
+    "the logarithm of the actual value.\n"
+    "\n"
+    ">>> a = 400\n"
+    ">>> b = 900\n"
+    ">>> beta(a, b)\n"
+    "0.0\n"
+    "\n"
+    "We can compute the logarithm of ``beta(a, b)`` by using `betaln`:\n"
+    "\n"
+    ">>> betaln(a, b)\n"
+    "-804.3069951764146")
+ufunc_betaln_loops[0] = loop_d_dd__As_ff_f
+ufunc_betaln_loops[1] = loop_d_dd__As_dd_d
+ufunc_betaln_types[0] = NPY_FLOAT
+ufunc_betaln_types[1] = NPY_FLOAT
+ufunc_betaln_types[2] = NPY_FLOAT
+ufunc_betaln_types[3] = NPY_DOUBLE
+ufunc_betaln_types[4] = NPY_DOUBLE
+ufunc_betaln_types[5] = NPY_DOUBLE
+ufunc_betaln_ptr[2*0] = _func_lbeta
+ufunc_betaln_ptr[2*0+1] = ("betaln")
+ufunc_betaln_ptr[2*1] = _func_lbeta
+ufunc_betaln_ptr[2*1+1] = ("betaln")
+ufunc_betaln_data[0] = &ufunc_betaln_ptr[2*0]
+ufunc_betaln_data[1] = &ufunc_betaln_ptr[2*1]
+betaln = np.PyUFunc_FromFuncAndData(ufunc_betaln_loops, ufunc_betaln_data, ufunc_betaln_types, 2, 2, 1, 0, "betaln", ufunc_betaln_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_binom_loops[2]
+cdef void *ufunc_binom_ptr[4]
+cdef void *ufunc_binom_data[2]
+cdef char ufunc_binom_types[6]
+cdef char *ufunc_binom_doc = (
+    "binom(x, y, out=None)\n"
+    "\n"
+    "Binomial coefficient considered as a function of two real variables.\n"
+    "\n"
+    "For real arguments, the binomial coefficient is defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\binom{x}{y} = \\frac{\\Gamma(x + 1)}{\\Gamma(y + 1)\\Gamma(x - y + 1)} =\n"
+    "        \\frac{1}{(x + 1)\\mathrm{B}(x - y + 1, y + 1)}\n"
+    "\n"
+    "Where :math:`\\Gamma` is the Gamma function (`gamma`) and :math:`\\mathrm{B}`\n"
+    "is the Beta function (`beta`) [1]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y: array_like\n"
+    "   Real arguments to :math:`\\binom{x}{y}`.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of binomial coefficient.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "comb : The number of combinations of N things taken k at a time.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The Gamma function has poles at non-positive integers and tends to either\n"
+    "positive or negative infinity depending on the direction on the real line\n"
+    "from which a pole is approached. When considered as a function of two real\n"
+    "variables, :math:`\\binom{x}{y}` is thus undefined when `x` is a negative\n"
+    "integer.  `binom` returns ``nan`` when ``x`` is a negative integer. This\n"
+    "is the case even when ``x`` is a negative integer and ``y`` an integer,\n"
+    "contrary to the usual convention for defining :math:`\\binom{n}{k}` when it\n"
+    "is considered as a function of two integer variables.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] https://en.wikipedia.org/wiki/Binomial_coefficient\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "The following examples illustrate the ways in which `binom` differs from\n"
+    "the function `comb`.\n"
+    "\n"
+    ">>> from scipy.special import binom, comb\n"
+    "\n"
+    "When ``exact=False`` and ``x`` and ``y`` are both positive, `comb` calls\n"
+    "`binom` internally.\n"
+    "\n"
+    ">>> x, y = 3, 2\n"
+    ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n"
+    "(3.0, 3.0, 3)\n"
+    "\n"
+    "For larger values, `comb` with ``exact=True`` no longer agrees\n"
+    "with `binom`.\n"
+    "\n"
+    ">>> x, y = 43, 23\n"
+    ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n"
+    "(960566918219.9999, 960566918219.9999, 960566918220)\n"
+    "\n"
+    "`binom` returns ``nan`` when ``x`` is a negative integer, but is otherwise\n"
+    "defined for negative arguments. `comb` returns 0 whenever one of ``x`` or\n"
+    "``y`` is negative or ``x`` is less than ``y``.\n"
+    "\n"
+    ">>> x, y = -3, 2\n"
+    ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n"
+    "(nan, 0.0, 0)\n"
+    "\n"
+    ">>> x, y = -3.1, 2.2\n"
+    ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n"
+    "(18.714147876804432, 0.0, 0)\n"
+    "\n"
+    ">>> x, y = 2.2, 3.1\n"
+    ">>> (binom(x, y), comb(x, y), comb(x, y, exact=True))\n"
+    "(0.037399983365134115, 0.0, 0)")
+ufunc_binom_loops[0] = loop_d_dd__As_ff_f
+ufunc_binom_loops[1] = loop_d_dd__As_dd_d
+ufunc_binom_types[0] = NPY_FLOAT
+ufunc_binom_types[1] = NPY_FLOAT
+ufunc_binom_types[2] = NPY_FLOAT
+ufunc_binom_types[3] = NPY_DOUBLE
+ufunc_binom_types[4] = NPY_DOUBLE
+ufunc_binom_types[5] = NPY_DOUBLE
+ufunc_binom_ptr[2*0] = _func_binom
+ufunc_binom_ptr[2*0+1] = ("binom")
+ufunc_binom_ptr[2*1] = _func_binom
+ufunc_binom_ptr[2*1+1] = ("binom")
+ufunc_binom_data[0] = &ufunc_binom_ptr[2*0]
+ufunc_binom_data[1] = &ufunc_binom_ptr[2*1]
+binom = np.PyUFunc_FromFuncAndData(ufunc_binom_loops, ufunc_binom_data, ufunc_binom_types, 2, 2, 1, 0, "binom", ufunc_binom_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_boxcox_loops[2]
+cdef void *ufunc_boxcox_ptr[4]
+cdef void *ufunc_boxcox_data[2]
+cdef char ufunc_boxcox_types[6]
+cdef char *ufunc_boxcox_doc = (
+    "boxcox(x, lmbda, out=None)\n"
+    "\n"
+    "Compute the Box-Cox transformation.\n"
+    "\n"
+    "The Box-Cox transformation is::\n"
+    "\n"
+    "    y = (x**lmbda - 1) / lmbda  if lmbda != 0\n"
+    "        log(x)                  if lmbda == 0\n"
+    "\n"
+    "Returns `nan` if ``x < 0``.\n"
+    "Returns `-inf` if ``x == 0`` and ``lmbda < 0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Data to be transformed.\n"
+    "lmbda : array_like\n"
+    "    Power parameter of the Box-Cox transform.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Transformed data.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.14.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import boxcox\n"
+    ">>> boxcox([1, 4, 10], 2.5)\n"
+    "array([   0.        ,   12.4       ,  126.09110641])\n"
+    ">>> boxcox(2, [0, 1, 2])\n"
+    "array([ 0.69314718,  1.        ,  1.5       ])")
+ufunc_boxcox_loops[0] = loop_d_dd__As_ff_f
+ufunc_boxcox_loops[1] = loop_d_dd__As_dd_d
+ufunc_boxcox_types[0] = NPY_FLOAT
+ufunc_boxcox_types[1] = NPY_FLOAT
+ufunc_boxcox_types[2] = NPY_FLOAT
+ufunc_boxcox_types[3] = NPY_DOUBLE
+ufunc_boxcox_types[4] = NPY_DOUBLE
+ufunc_boxcox_types[5] = NPY_DOUBLE
+ufunc_boxcox_ptr[2*0] = _func_boxcox
+ufunc_boxcox_ptr[2*0+1] = ("boxcox")
+ufunc_boxcox_ptr[2*1] = _func_boxcox
+ufunc_boxcox_ptr[2*1+1] = ("boxcox")
+ufunc_boxcox_data[0] = &ufunc_boxcox_ptr[2*0]
+ufunc_boxcox_data[1] = &ufunc_boxcox_ptr[2*1]
+boxcox = np.PyUFunc_FromFuncAndData(ufunc_boxcox_loops, ufunc_boxcox_data, ufunc_boxcox_types, 2, 2, 1, 0, "boxcox", ufunc_boxcox_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_boxcox1p_loops[2]
+cdef void *ufunc_boxcox1p_ptr[4]
+cdef void *ufunc_boxcox1p_data[2]
+cdef char ufunc_boxcox1p_types[6]
+cdef char *ufunc_boxcox1p_doc = (
+    "boxcox1p(x, lmbda, out=None)\n"
+    "\n"
+    "Compute the Box-Cox transformation of 1 + `x`.\n"
+    "\n"
+    "The Box-Cox transformation computed by `boxcox1p` is::\n"
+    "\n"
+    "    y = ((1+x)**lmbda - 1) / lmbda  if lmbda != 0\n"
+    "        log(1+x)                    if lmbda == 0\n"
+    "\n"
+    "Returns `nan` if ``x < -1``.\n"
+    "Returns `-inf` if ``x == -1`` and ``lmbda < 0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Data to be transformed.\n"
+    "lmbda : array_like\n"
+    "    Power parameter of the Box-Cox transform.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Transformed data.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.14.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import boxcox1p\n"
+    ">>> boxcox1p(1e-4, [0, 0.5, 1])\n"
+    "array([  9.99950003e-05,   9.99975001e-05,   1.00000000e-04])\n"
+    ">>> boxcox1p([0.01, 0.1], 0.25)\n"
+    "array([ 0.00996272,  0.09645476])")
+ufunc_boxcox1p_loops[0] = loop_d_dd__As_ff_f
+ufunc_boxcox1p_loops[1] = loop_d_dd__As_dd_d
+ufunc_boxcox1p_types[0] = NPY_FLOAT
+ufunc_boxcox1p_types[1] = NPY_FLOAT
+ufunc_boxcox1p_types[2] = NPY_FLOAT
+ufunc_boxcox1p_types[3] = NPY_DOUBLE
+ufunc_boxcox1p_types[4] = NPY_DOUBLE
+ufunc_boxcox1p_types[5] = NPY_DOUBLE
+ufunc_boxcox1p_ptr[2*0] = _func_boxcox1p
+ufunc_boxcox1p_ptr[2*0+1] = ("boxcox1p")
+ufunc_boxcox1p_ptr[2*1] = _func_boxcox1p
+ufunc_boxcox1p_ptr[2*1+1] = ("boxcox1p")
+ufunc_boxcox1p_data[0] = &ufunc_boxcox1p_ptr[2*0]
+ufunc_boxcox1p_data[1] = &ufunc_boxcox1p_ptr[2*1]
+boxcox1p = np.PyUFunc_FromFuncAndData(ufunc_boxcox1p_loops, ufunc_boxcox1p_data, ufunc_boxcox1p_types, 2, 2, 1, 0, "boxcox1p", ufunc_boxcox1p_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_btdtr_loops[2]
+cdef void *ufunc_btdtr_ptr[4]
+cdef void *ufunc_btdtr_data[2]
+cdef char ufunc_btdtr_types[8]
+cdef char *ufunc_btdtr_doc = (
+    "btdtr(a, b, x, out=None)\n"
+    "\n"
+    "Cumulative distribution function of the beta distribution.\n"
+    "\n"
+    "Returns the integral from zero to `x` of the beta probability density\n"
+    "function,\n"
+    "\n"
+    ".. math::\n"
+    "    I = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n"
+    "\n"
+    "where :math:`\\Gamma` is the gamma function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Shape parameter (a > 0).\n"
+    "b : array_like\n"
+    "    Shape parameter (b > 0).\n"
+    "x : array_like\n"
+    "    Upper limit of integration, in [0, 1].\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    Cumulative distribution function of the beta distribution with\n"
+    "    parameters `a` and `b` at `x`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "betainc\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "This function is identical to the incomplete beta integral function\n"
+    "`betainc`.\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `btdtr`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_btdtr_loops[0] = loop_d_ddd__As_fff_f
+ufunc_btdtr_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_btdtr_types[0] = NPY_FLOAT
+ufunc_btdtr_types[1] = NPY_FLOAT
+ufunc_btdtr_types[2] = NPY_FLOAT
+ufunc_btdtr_types[3] = NPY_FLOAT
+ufunc_btdtr_types[4] = NPY_DOUBLE
+ufunc_btdtr_types[5] = NPY_DOUBLE
+ufunc_btdtr_types[6] = NPY_DOUBLE
+ufunc_btdtr_types[7] = NPY_DOUBLE
+ufunc_btdtr_ptr[2*0] = _func_btdtr
+ufunc_btdtr_ptr[2*0+1] = ("btdtr")
+ufunc_btdtr_ptr[2*1] = _func_btdtr
+ufunc_btdtr_ptr[2*1+1] = ("btdtr")
+ufunc_btdtr_data[0] = &ufunc_btdtr_ptr[2*0]
+ufunc_btdtr_data[1] = &ufunc_btdtr_ptr[2*1]
+btdtr = np.PyUFunc_FromFuncAndData(ufunc_btdtr_loops, ufunc_btdtr_data, ufunc_btdtr_types, 2, 3, 1, 0, "btdtr", ufunc_btdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_btdtri_loops[2]
+cdef void *ufunc_btdtri_ptr[4]
+cdef void *ufunc_btdtri_data[2]
+cdef char ufunc_btdtri_types[8]
+cdef char *ufunc_btdtri_doc = (
+    "btdtri(a, b, p, out=None)\n"
+    "\n"
+    "The `p`-th quantile of the beta distribution.\n"
+    "\n"
+    "This function is the inverse of the beta cumulative distribution function,\n"
+    "`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or\n"
+    "\n"
+    ".. math::\n"
+    "    p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Shape parameter (`a` > 0).\n"
+    "b : array_like\n"
+    "    Shape parameter (`b` > 0).\n"
+    "p : array_like\n"
+    "    Cumulative probability, in [0, 1].\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    The quantile corresponding to `p`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "betaincinv\n"
+    "btdtr\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The value of `x` is found by interval halving or Newton iterations.\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent\n"
+    "problem of finding the inverse of the incomplete beta integral.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_btdtri_loops[0] = loop_d_ddd__As_fff_f
+ufunc_btdtri_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_btdtri_types[0] = NPY_FLOAT
+ufunc_btdtri_types[1] = NPY_FLOAT
+ufunc_btdtri_types[2] = NPY_FLOAT
+ufunc_btdtri_types[3] = NPY_FLOAT
+ufunc_btdtri_types[4] = NPY_DOUBLE
+ufunc_btdtri_types[5] = NPY_DOUBLE
+ufunc_btdtri_types[6] = NPY_DOUBLE
+ufunc_btdtri_types[7] = NPY_DOUBLE
+ufunc_btdtri_ptr[2*0] = _func_incbi
+ufunc_btdtri_ptr[2*0+1] = ("btdtri")
+ufunc_btdtri_ptr[2*1] = _func_incbi
+ufunc_btdtri_ptr[2*1+1] = ("btdtri")
+ufunc_btdtri_data[0] = &ufunc_btdtri_ptr[2*0]
+ufunc_btdtri_data[1] = &ufunc_btdtri_ptr[2*1]
+btdtri = np.PyUFunc_FromFuncAndData(ufunc_btdtri_loops, ufunc_btdtri_data, ufunc_btdtri_types, 2, 3, 1, 0, "btdtri", ufunc_btdtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_btdtria_loops[2]
+cdef void *ufunc_btdtria_ptr[4]
+cdef void *ufunc_btdtria_data[2]
+cdef char ufunc_btdtria_types[8]
+cdef char *ufunc_btdtria_doc = (
+    "btdtria(p, b, x, out=None)\n"
+    "\n"
+    "Inverse of `btdtr` with respect to `a`.\n"
+    "\n"
+    "This is the inverse of the beta cumulative distribution function, `btdtr`,\n"
+    "considered as a function of `a`, returning the value of `a` for which\n"
+    "`btdtr(a, b, x) = p`, or\n"
+    "\n"
+    ".. math::\n"
+    "    p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Cumulative probability, in [0, 1].\n"
+    "b : array_like\n"
+    "    Shape parameter (`b` > 0).\n"
+    "x : array_like\n"
+    "    The quantile, in [0, 1].\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "a : scalar or ndarray\n"
+    "    The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "btdtr : Cumulative distribution function of the beta distribution.\n"
+    "btdtri : Inverse with respect to `x`.\n"
+    "btdtrib : Inverse with respect to `b`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.\n"
+    "\n"
+    "The cumulative distribution function `p` is computed using a routine by\n"
+    "DiDinato and Morris [2]_. Computation of `a` involves a search for a value\n"
+    "that produces the desired value of `p`. The search relies on the\n"
+    "monotonicity of `p` with `a`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] DiDinato, A. R. and Morris, A. H.,\n"
+    "       Algorithm 708: Significant Digit Computation of the Incomplete Beta\n"
+    "       Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.")
+ufunc_btdtria_loops[0] = loop_d_ddd__As_fff_f
+ufunc_btdtria_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_btdtria_types[0] = NPY_FLOAT
+ufunc_btdtria_types[1] = NPY_FLOAT
+ufunc_btdtria_types[2] = NPY_FLOAT
+ufunc_btdtria_types[3] = NPY_FLOAT
+ufunc_btdtria_types[4] = NPY_DOUBLE
+ufunc_btdtria_types[5] = NPY_DOUBLE
+ufunc_btdtria_types[6] = NPY_DOUBLE
+ufunc_btdtria_types[7] = NPY_DOUBLE
+ufunc_btdtria_ptr[2*0] = _func_cdfbet3_wrap
+ufunc_btdtria_ptr[2*0+1] = ("btdtria")
+ufunc_btdtria_ptr[2*1] = _func_cdfbet3_wrap
+ufunc_btdtria_ptr[2*1+1] = ("btdtria")
+ufunc_btdtria_data[0] = &ufunc_btdtria_ptr[2*0]
+ufunc_btdtria_data[1] = &ufunc_btdtria_ptr[2*1]
+btdtria = np.PyUFunc_FromFuncAndData(ufunc_btdtria_loops, ufunc_btdtria_data, ufunc_btdtria_types, 2, 3, 1, 0, "btdtria", ufunc_btdtria_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_btdtrib_loops[2]
+cdef void *ufunc_btdtrib_ptr[4]
+cdef void *ufunc_btdtrib_data[2]
+cdef char ufunc_btdtrib_types[8]
+cdef char *ufunc_btdtrib_doc = (
+    "btdtria(a, p, x, out=None)\n"
+    "\n"
+    "Inverse of `btdtr` with respect to `b`.\n"
+    "\n"
+    "This is the inverse of the beta cumulative distribution function, `btdtr`,\n"
+    "considered as a function of `b`, returning the value of `b` for which\n"
+    "`btdtr(a, b, x) = p`, or\n"
+    "\n"
+    ".. math::\n"
+    "    p = \\int_0^x \\frac{\\Gamma(a + b)}{\\Gamma(a)\\Gamma(b)} t^{a-1} (1-t)^{b-1}\\,dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Shape parameter (`a` > 0).\n"
+    "p : array_like\n"
+    "    Cumulative probability, in [0, 1].\n"
+    "x : array_like\n"
+    "    The quantile, in [0, 1].\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "b : scalar or ndarray\n"
+    "    The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "btdtr : Cumulative distribution function of the beta distribution.\n"
+    "btdtri : Inverse with respect to `x`.\n"
+    "btdtria : Inverse with respect to `a`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.\n"
+    "\n"
+    "The cumulative distribution function `p` is computed using a routine by\n"
+    "DiDinato and Morris [2]_. Computation of `b` involves a search for a value\n"
+    "that produces the desired value of `p`. The search relies on the\n"
+    "monotonicity of `p` with `b`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] DiDinato, A. R. and Morris, A. H.,\n"
+    "       Algorithm 708: Significant Digit Computation of the Incomplete Beta\n"
+    "       Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.")
+ufunc_btdtrib_loops[0] = loop_d_ddd__As_fff_f
+ufunc_btdtrib_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_btdtrib_types[0] = NPY_FLOAT
+ufunc_btdtrib_types[1] = NPY_FLOAT
+ufunc_btdtrib_types[2] = NPY_FLOAT
+ufunc_btdtrib_types[3] = NPY_FLOAT
+ufunc_btdtrib_types[4] = NPY_DOUBLE
+ufunc_btdtrib_types[5] = NPY_DOUBLE
+ufunc_btdtrib_types[6] = NPY_DOUBLE
+ufunc_btdtrib_types[7] = NPY_DOUBLE
+ufunc_btdtrib_ptr[2*0] = _func_cdfbet4_wrap
+ufunc_btdtrib_ptr[2*0+1] = ("btdtrib")
+ufunc_btdtrib_ptr[2*1] = _func_cdfbet4_wrap
+ufunc_btdtrib_ptr[2*1+1] = ("btdtrib")
+ufunc_btdtrib_data[0] = &ufunc_btdtrib_ptr[2*0]
+ufunc_btdtrib_data[1] = &ufunc_btdtrib_ptr[2*1]
+btdtrib = np.PyUFunc_FromFuncAndData(ufunc_btdtrib_loops, ufunc_btdtrib_data, ufunc_btdtrib_types, 2, 3, 1, 0, "btdtrib", ufunc_btdtrib_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_cbrt_loops[2]
+cdef void *ufunc_cbrt_ptr[4]
+cdef void *ufunc_cbrt_data[2]
+cdef char ufunc_cbrt_types[4]
+cdef char *ufunc_cbrt_doc = (
+    "cbrt(x, out=None)\n"
+    "\n"
+    "Element-wise cube root of `x`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    `x` must contain real numbers.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The cube root of each value in `x`.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import cbrt\n"
+    "\n"
+    ">>> cbrt(8)\n"
+    "2.0\n"
+    ">>> cbrt([-8, -3, 0.125, 1.331])\n"
+    "array([-2.        , -1.44224957,  0.5       ,  1.1       ])")
+ufunc_cbrt_loops[0] = loop_d_d__As_f_f
+ufunc_cbrt_loops[1] = loop_d_d__As_d_d
+ufunc_cbrt_types[0] = NPY_FLOAT
+ufunc_cbrt_types[1] = NPY_FLOAT
+ufunc_cbrt_types[2] = NPY_DOUBLE
+ufunc_cbrt_types[3] = NPY_DOUBLE
+ufunc_cbrt_ptr[2*0] = _func_cbrt
+ufunc_cbrt_ptr[2*0+1] = ("cbrt")
+ufunc_cbrt_ptr[2*1] = _func_cbrt
+ufunc_cbrt_ptr[2*1+1] = ("cbrt")
+ufunc_cbrt_data[0] = &ufunc_cbrt_ptr[2*0]
+ufunc_cbrt_data[1] = &ufunc_cbrt_ptr[2*1]
+cbrt = np.PyUFunc_FromFuncAndData(ufunc_cbrt_loops, ufunc_cbrt_data, ufunc_cbrt_types, 2, 1, 1, 0, "cbrt", ufunc_cbrt_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chdtr_loops[2]
+cdef void *ufunc_chdtr_ptr[4]
+cdef void *ufunc_chdtr_data[2]
+cdef char ufunc_chdtr_types[6]
+cdef char *ufunc_chdtr_doc = (
+    "chdtr(v, x, out=None)\n"
+    "\n"
+    "Chi square cumulative distribution function.\n"
+    "\n"
+    "Returns the area under the left tail (from 0 to `x`) of the Chi\n"
+    "square probability density function with `v` degrees of freedom:\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\frac{1}{2^{v/2} \\Gamma(v/2)} \\int_0^x t^{v/2 - 1} e^{-t/2} dt\n"
+    "\n"
+    "Here :math:`\\Gamma` is the Gamma function; see `gamma`. This\n"
+    "integral can be expressed in terms of the regularized lower\n"
+    "incomplete gamma function `gammainc` as\n"
+    "``gammainc(v / 2, x / 2)``. [1]_\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Degrees of freedom.\n"
+    "x : array_like\n"
+    "    Upper bound of the integral.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the cumulative distribution function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chdtrc, chdtri, chdtriv, gammainc\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Chi-Square distribution,\n"
+    "    https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It can be expressed in terms of the regularized lower incomplete\n"
+    "gamma function.\n"
+    "\n"
+    ">>> v = 1\n"
+    ">>> x = np.arange(4)\n"
+    ">>> sc.chdtr(v, x)\n"
+    "array([0.        , 0.68268949, 0.84270079, 0.91673548])\n"
+    ">>> sc.gammainc(v / 2, x / 2)\n"
+    "array([0.        , 0.68268949, 0.84270079, 0.91673548])")
+ufunc_chdtr_loops[0] = loop_d_dd__As_ff_f
+ufunc_chdtr_loops[1] = loop_d_dd__As_dd_d
+ufunc_chdtr_types[0] = NPY_FLOAT
+ufunc_chdtr_types[1] = NPY_FLOAT
+ufunc_chdtr_types[2] = NPY_FLOAT
+ufunc_chdtr_types[3] = NPY_DOUBLE
+ufunc_chdtr_types[4] = NPY_DOUBLE
+ufunc_chdtr_types[5] = NPY_DOUBLE
+ufunc_chdtr_ptr[2*0] = _func_chdtr
+ufunc_chdtr_ptr[2*0+1] = ("chdtr")
+ufunc_chdtr_ptr[2*1] = _func_chdtr
+ufunc_chdtr_ptr[2*1+1] = ("chdtr")
+ufunc_chdtr_data[0] = &ufunc_chdtr_ptr[2*0]
+ufunc_chdtr_data[1] = &ufunc_chdtr_ptr[2*1]
+chdtr = np.PyUFunc_FromFuncAndData(ufunc_chdtr_loops, ufunc_chdtr_data, ufunc_chdtr_types, 2, 2, 1, 0, "chdtr", ufunc_chdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chdtrc_loops[2]
+cdef void *ufunc_chdtrc_ptr[4]
+cdef void *ufunc_chdtrc_data[2]
+cdef char ufunc_chdtrc_types[6]
+cdef char *ufunc_chdtrc_doc = (
+    "chdtrc(v, x, out=None)\n"
+    "\n"
+    "Chi square survival function.\n"
+    "\n"
+    "Returns the area under the right hand tail (from `x` to infinity)\n"
+    "of the Chi square probability density function with `v` degrees of\n"
+    "freedom:\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\frac{1}{2^{v/2} \\Gamma(v/2)} \\int_x^\\infty t^{v/2 - 1} e^{-t/2} dt\n"
+    "\n"
+    "Here :math:`\\Gamma` is the Gamma function; see `gamma`. This\n"
+    "integral can be expressed in terms of the regularized upper\n"
+    "incomplete gamma function `gammaincc` as\n"
+    "``gammaincc(v / 2, x / 2)``. [1]_\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Degrees of freedom.\n"
+    "x : array_like\n"
+    "    Lower bound of the integral.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the survival function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chdtr, chdtri, chdtriv, gammaincc\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Chi-Square distribution,\n"
+    "    https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It can be expressed in terms of the regularized upper incomplete\n"
+    "gamma function.\n"
+    "\n"
+    ">>> v = 1\n"
+    ">>> x = np.arange(4)\n"
+    ">>> sc.chdtrc(v, x)\n"
+    "array([1.        , 0.31731051, 0.15729921, 0.08326452])\n"
+    ">>> sc.gammaincc(v / 2, x / 2)\n"
+    "array([1.        , 0.31731051, 0.15729921, 0.08326452])")
+ufunc_chdtrc_loops[0] = loop_d_dd__As_ff_f
+ufunc_chdtrc_loops[1] = loop_d_dd__As_dd_d
+ufunc_chdtrc_types[0] = NPY_FLOAT
+ufunc_chdtrc_types[1] = NPY_FLOAT
+ufunc_chdtrc_types[2] = NPY_FLOAT
+ufunc_chdtrc_types[3] = NPY_DOUBLE
+ufunc_chdtrc_types[4] = NPY_DOUBLE
+ufunc_chdtrc_types[5] = NPY_DOUBLE
+ufunc_chdtrc_ptr[2*0] = _func_chdtrc
+ufunc_chdtrc_ptr[2*0+1] = ("chdtrc")
+ufunc_chdtrc_ptr[2*1] = _func_chdtrc
+ufunc_chdtrc_ptr[2*1+1] = ("chdtrc")
+ufunc_chdtrc_data[0] = &ufunc_chdtrc_ptr[2*0]
+ufunc_chdtrc_data[1] = &ufunc_chdtrc_ptr[2*1]
+chdtrc = np.PyUFunc_FromFuncAndData(ufunc_chdtrc_loops, ufunc_chdtrc_data, ufunc_chdtrc_types, 2, 2, 1, 0, "chdtrc", ufunc_chdtrc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chdtri_loops[2]
+cdef void *ufunc_chdtri_ptr[4]
+cdef void *ufunc_chdtri_data[2]
+cdef char ufunc_chdtri_types[6]
+cdef char *ufunc_chdtri_doc = (
+    "chdtri(v, p, out=None)\n"
+    "\n"
+    "Inverse to `chdtrc` with respect to `x`.\n"
+    "\n"
+    "Returns `x` such that ``chdtrc(v, x) == p``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Degrees of freedom.\n"
+    "p : array_like\n"
+    "    Probability.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    Value so that the probability a Chi square random variable\n"
+    "    with `v` degrees of freedom is greater than `x` equals `p`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chdtrc, chdtr, chdtriv\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Chi-Square distribution,\n"
+    "    https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It inverts `chdtrc`.\n"
+    "\n"
+    ">>> v, p = 1, 0.3\n"
+    ">>> sc.chdtrc(v, sc.chdtri(v, p))\n"
+    "0.3\n"
+    ">>> x = 1\n"
+    ">>> sc.chdtri(v, sc.chdtrc(v, x))\n"
+    "1.0")
+ufunc_chdtri_loops[0] = loop_d_dd__As_ff_f
+ufunc_chdtri_loops[1] = loop_d_dd__As_dd_d
+ufunc_chdtri_types[0] = NPY_FLOAT
+ufunc_chdtri_types[1] = NPY_FLOAT
+ufunc_chdtri_types[2] = NPY_FLOAT
+ufunc_chdtri_types[3] = NPY_DOUBLE
+ufunc_chdtri_types[4] = NPY_DOUBLE
+ufunc_chdtri_types[5] = NPY_DOUBLE
+ufunc_chdtri_ptr[2*0] = _func_chdtri
+ufunc_chdtri_ptr[2*0+1] = ("chdtri")
+ufunc_chdtri_ptr[2*1] = _func_chdtri
+ufunc_chdtri_ptr[2*1+1] = ("chdtri")
+ufunc_chdtri_data[0] = &ufunc_chdtri_ptr[2*0]
+ufunc_chdtri_data[1] = &ufunc_chdtri_ptr[2*1]
+chdtri = np.PyUFunc_FromFuncAndData(ufunc_chdtri_loops, ufunc_chdtri_data, ufunc_chdtri_types, 2, 2, 1, 0, "chdtri", ufunc_chdtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chdtriv_loops[2]
+cdef void *ufunc_chdtriv_ptr[4]
+cdef void *ufunc_chdtriv_data[2]
+cdef char ufunc_chdtriv_types[6]
+cdef char *ufunc_chdtriv_doc = (
+    "chdtriv(p, x, out=None)\n"
+    "\n"
+    "Inverse to `chdtr` with respect to `v`.\n"
+    "\n"
+    "Returns `v` such that ``chdtr(v, x) == p``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Probability that the Chi square random variable is less than\n"
+    "    or equal to `x`.\n"
+    "x : array_like\n"
+    "    Nonnegative input.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Degrees of freedom.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chdtr, chdtrc, chdtri\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Chi-Square distribution,\n"
+    "    https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It inverts `chdtr`.\n"
+    "\n"
+    ">>> p, x = 0.5, 1\n"
+    ">>> sc.chdtr(sc.chdtriv(p, x), x)\n"
+    "0.5000000000202172\n"
+    ">>> v = 1\n"
+    ">>> sc.chdtriv(sc.chdtr(v, x), v)\n"
+    "1.0000000000000013")
+ufunc_chdtriv_loops[0] = loop_d_dd__As_ff_f
+ufunc_chdtriv_loops[1] = loop_d_dd__As_dd_d
+ufunc_chdtriv_types[0] = NPY_FLOAT
+ufunc_chdtriv_types[1] = NPY_FLOAT
+ufunc_chdtriv_types[2] = NPY_FLOAT
+ufunc_chdtriv_types[3] = NPY_DOUBLE
+ufunc_chdtriv_types[4] = NPY_DOUBLE
+ufunc_chdtriv_types[5] = NPY_DOUBLE
+ufunc_chdtriv_ptr[2*0] = _func_cdfchi3_wrap
+ufunc_chdtriv_ptr[2*0+1] = ("chdtriv")
+ufunc_chdtriv_ptr[2*1] = _func_cdfchi3_wrap
+ufunc_chdtriv_ptr[2*1+1] = ("chdtriv")
+ufunc_chdtriv_data[0] = &ufunc_chdtriv_ptr[2*0]
+ufunc_chdtriv_data[1] = &ufunc_chdtriv_ptr[2*1]
+chdtriv = np.PyUFunc_FromFuncAndData(ufunc_chdtriv_loops, ufunc_chdtriv_data, ufunc_chdtriv_types, 2, 2, 1, 0, "chdtriv", ufunc_chdtriv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chndtr_loops[2]
+cdef void *ufunc_chndtr_ptr[4]
+cdef void *ufunc_chndtr_data[2]
+cdef char ufunc_chndtr_types[8]
+cdef char *ufunc_chndtr_doc = (
+    "chndtr(x, df, nc, out=None)\n"
+    "\n"
+    "Non-central chi square cumulative distribution function\n"
+    "\n"
+    "The cumulative distribution function is given by:\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    P(\\chi^{\\prime 2} \\vert \\nu, \\lambda) =\\sum_{j=0}^{\\infty}\n"
+    "    e^{-\\lambda /2}\n"
+    "    \\frac{(\\lambda /2)^j}{j!} P(\\chi^{\\prime 2} \\vert \\nu + 2j),\n"
+    "\n"
+    "where :math:`\\nu > 0` is the degrees of freedom (``df``) and\n"
+    ":math:`\\lambda \\geq 0` is the non-centrality parameter (``nc``).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Upper bound of the integral; must satisfy ``x >= 0``\n"
+    "df : array_like\n"
+    "    Degrees of freedom; must satisfy ``df > 0``\n"
+    "nc : array_like\n"
+    "    Non-centrality parameter; must satisfy ``nc >= 0``\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    Value of the non-central chi square cumulative distribution function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chndtrix, chndtridf, chndtrinc")
+ufunc_chndtr_loops[0] = loop_d_ddd__As_fff_f
+ufunc_chndtr_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_chndtr_types[0] = NPY_FLOAT
+ufunc_chndtr_types[1] = NPY_FLOAT
+ufunc_chndtr_types[2] = NPY_FLOAT
+ufunc_chndtr_types[3] = NPY_FLOAT
+ufunc_chndtr_types[4] = NPY_DOUBLE
+ufunc_chndtr_types[5] = NPY_DOUBLE
+ufunc_chndtr_types[6] = NPY_DOUBLE
+ufunc_chndtr_types[7] = NPY_DOUBLE
+ufunc_chndtr_ptr[2*0] = _func_cdfchn1_wrap
+ufunc_chndtr_ptr[2*0+1] = ("chndtr")
+ufunc_chndtr_ptr[2*1] = _func_cdfchn1_wrap
+ufunc_chndtr_ptr[2*1+1] = ("chndtr")
+ufunc_chndtr_data[0] = &ufunc_chndtr_ptr[2*0]
+ufunc_chndtr_data[1] = &ufunc_chndtr_ptr[2*1]
+chndtr = np.PyUFunc_FromFuncAndData(ufunc_chndtr_loops, ufunc_chndtr_data, ufunc_chndtr_types, 2, 3, 1, 0, "chndtr", ufunc_chndtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chndtridf_loops[2]
+cdef void *ufunc_chndtridf_ptr[4]
+cdef void *ufunc_chndtridf_data[2]
+cdef char ufunc_chndtridf_types[8]
+cdef char *ufunc_chndtridf_doc = (
+    "chndtridf(x, p, nc, out=None)\n"
+    "\n"
+    "Inverse to `chndtr` vs `df`\n"
+    "\n"
+    "Calculated using a search to find a value for `df` that produces the\n"
+    "desired value of `p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Upper bound of the integral; must satisfy ``x >= 0``\n"
+    "p : array_like\n"
+    "    Probability; must satisfy ``0 <= p < 1``\n"
+    "nc : array_like\n"
+    "    Non-centrality parameter; must satisfy ``nc >= 0``\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "df : scalar or ndarray\n"
+    "    Degrees of freedom\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chndtr, chndtrix, chndtrinc")
+ufunc_chndtridf_loops[0] = loop_d_ddd__As_fff_f
+ufunc_chndtridf_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_chndtridf_types[0] = NPY_FLOAT
+ufunc_chndtridf_types[1] = NPY_FLOAT
+ufunc_chndtridf_types[2] = NPY_FLOAT
+ufunc_chndtridf_types[3] = NPY_FLOAT
+ufunc_chndtridf_types[4] = NPY_DOUBLE
+ufunc_chndtridf_types[5] = NPY_DOUBLE
+ufunc_chndtridf_types[6] = NPY_DOUBLE
+ufunc_chndtridf_types[7] = NPY_DOUBLE
+ufunc_chndtridf_ptr[2*0] = _func_cdfchn3_wrap
+ufunc_chndtridf_ptr[2*0+1] = ("chndtridf")
+ufunc_chndtridf_ptr[2*1] = _func_cdfchn3_wrap
+ufunc_chndtridf_ptr[2*1+1] = ("chndtridf")
+ufunc_chndtridf_data[0] = &ufunc_chndtridf_ptr[2*0]
+ufunc_chndtridf_data[1] = &ufunc_chndtridf_ptr[2*1]
+chndtridf = np.PyUFunc_FromFuncAndData(ufunc_chndtridf_loops, ufunc_chndtridf_data, ufunc_chndtridf_types, 2, 3, 1, 0, "chndtridf", ufunc_chndtridf_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chndtrinc_loops[2]
+cdef void *ufunc_chndtrinc_ptr[4]
+cdef void *ufunc_chndtrinc_data[2]
+cdef char ufunc_chndtrinc_types[8]
+cdef char *ufunc_chndtrinc_doc = (
+    "chndtrinc(x, df, p, out=None)\n"
+    "\n"
+    "Inverse to `chndtr` vs `nc`\n"
+    "\n"
+    "Calculated using a search to find a value for `df` that produces the\n"
+    "desired value of `p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Upper bound of the integral; must satisfy ``x >= 0``\n"
+    "df : array_like\n"
+    "    Degrees of freedom; must satisfy ``df > 0``\n"
+    "p : array_like\n"
+    "    Probability; must satisfy ``0 <= p < 1``\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "nc : scalar or ndarray\n"
+    "    Non-centrality\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chndtr, chndtrix, chndtrinc")
+ufunc_chndtrinc_loops[0] = loop_d_ddd__As_fff_f
+ufunc_chndtrinc_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_chndtrinc_types[0] = NPY_FLOAT
+ufunc_chndtrinc_types[1] = NPY_FLOAT
+ufunc_chndtrinc_types[2] = NPY_FLOAT
+ufunc_chndtrinc_types[3] = NPY_FLOAT
+ufunc_chndtrinc_types[4] = NPY_DOUBLE
+ufunc_chndtrinc_types[5] = NPY_DOUBLE
+ufunc_chndtrinc_types[6] = NPY_DOUBLE
+ufunc_chndtrinc_types[7] = NPY_DOUBLE
+ufunc_chndtrinc_ptr[2*0] = _func_cdfchn4_wrap
+ufunc_chndtrinc_ptr[2*0+1] = ("chndtrinc")
+ufunc_chndtrinc_ptr[2*1] = _func_cdfchn4_wrap
+ufunc_chndtrinc_ptr[2*1+1] = ("chndtrinc")
+ufunc_chndtrinc_data[0] = &ufunc_chndtrinc_ptr[2*0]
+ufunc_chndtrinc_data[1] = &ufunc_chndtrinc_ptr[2*1]
+chndtrinc = np.PyUFunc_FromFuncAndData(ufunc_chndtrinc_loops, ufunc_chndtrinc_data, ufunc_chndtrinc_types, 2, 3, 1, 0, "chndtrinc", ufunc_chndtrinc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_chndtrix_loops[2]
+cdef void *ufunc_chndtrix_ptr[4]
+cdef void *ufunc_chndtrix_data[2]
+cdef char ufunc_chndtrix_types[8]
+cdef char *ufunc_chndtrix_doc = (
+    "chndtrix(p, df, nc, out=None)\n"
+    "\n"
+    "Inverse to `chndtr` vs `x`\n"
+    "\n"
+    "Calculated using a search to find a value for `x` that produces the\n"
+    "desired value of `p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Probability; must satisfy ``0 <= p < 1``\n"
+    "df : array_like\n"
+    "    Degrees of freedom; must satisfy ``df > 0``\n"
+    "nc : array_like\n"
+    "    Non-centrality parameter; must satisfy ``nc >= 0``\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    Value so that the probability a non-central Chi square random variable\n"
+    "    with `df` degrees of freedom and non-centrality, `nc`, is greater than\n"
+    "    `x` equals `p`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "chndtr, chndtridf, chndtrinc")
+ufunc_chndtrix_loops[0] = loop_d_ddd__As_fff_f
+ufunc_chndtrix_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_chndtrix_types[0] = NPY_FLOAT
+ufunc_chndtrix_types[1] = NPY_FLOAT
+ufunc_chndtrix_types[2] = NPY_FLOAT
+ufunc_chndtrix_types[3] = NPY_FLOAT
+ufunc_chndtrix_types[4] = NPY_DOUBLE
+ufunc_chndtrix_types[5] = NPY_DOUBLE
+ufunc_chndtrix_types[6] = NPY_DOUBLE
+ufunc_chndtrix_types[7] = NPY_DOUBLE
+ufunc_chndtrix_ptr[2*0] = _func_cdfchn2_wrap
+ufunc_chndtrix_ptr[2*0+1] = ("chndtrix")
+ufunc_chndtrix_ptr[2*1] = _func_cdfchn2_wrap
+ufunc_chndtrix_ptr[2*1+1] = ("chndtrix")
+ufunc_chndtrix_data[0] = &ufunc_chndtrix_ptr[2*0]
+ufunc_chndtrix_data[1] = &ufunc_chndtrix_ptr[2*1]
+chndtrix = np.PyUFunc_FromFuncAndData(ufunc_chndtrix_loops, ufunc_chndtrix_data, ufunc_chndtrix_types, 2, 3, 1, 0, "chndtrix", ufunc_chndtrix_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_cosdg_loops[2]
+cdef void *ufunc_cosdg_ptr[4]
+cdef void *ufunc_cosdg_data[2]
+cdef char ufunc_cosdg_types[4]
+cdef char *ufunc_cosdg_doc = (
+    "cosdg(x, out=None)\n"
+    "\n"
+    "Cosine of the angle `x` given in degrees.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Angle, given in degrees.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Cosine of the input.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "sindg, tandg, cotdg\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is more accurate than using cosine directly.\n"
+    "\n"
+    ">>> x = 90 + 180 * np.arange(3)\n"
+    ">>> sc.cosdg(x)\n"
+    "array([-0.,  0., -0.])\n"
+    ">>> np.cos(x * np.pi / 180)\n"
+    "array([ 6.1232340e-17, -1.8369702e-16,  3.0616170e-16])")
+ufunc_cosdg_loops[0] = loop_d_d__As_f_f
+ufunc_cosdg_loops[1] = loop_d_d__As_d_d
+ufunc_cosdg_types[0] = NPY_FLOAT
+ufunc_cosdg_types[1] = NPY_FLOAT
+ufunc_cosdg_types[2] = NPY_DOUBLE
+ufunc_cosdg_types[3] = NPY_DOUBLE
+ufunc_cosdg_ptr[2*0] = _func_cosdg
+ufunc_cosdg_ptr[2*0+1] = ("cosdg")
+ufunc_cosdg_ptr[2*1] = _func_cosdg
+ufunc_cosdg_ptr[2*1+1] = ("cosdg")
+ufunc_cosdg_data[0] = &ufunc_cosdg_ptr[2*0]
+ufunc_cosdg_data[1] = &ufunc_cosdg_ptr[2*1]
+cosdg = np.PyUFunc_FromFuncAndData(ufunc_cosdg_loops, ufunc_cosdg_data, ufunc_cosdg_types, 2, 1, 1, 0, "cosdg", ufunc_cosdg_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_cosm1_loops[2]
+cdef void *ufunc_cosm1_ptr[4]
+cdef void *ufunc_cosm1_data[2]
+cdef char ufunc_cosm1_types[4]
+cdef char *ufunc_cosm1_doc = (
+    "cosm1(x, out=None)\n"
+    "\n"
+    "cos(x) - 1 for use when `x` is near zero.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real valued argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of ``cos(x) - 1``.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "expm1, log1p\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is more accurate than computing ``cos(x) - 1`` directly for\n"
+    "``x`` around 0.\n"
+    "\n"
+    ">>> x = 1e-30\n"
+    ">>> np.cos(x) - 1\n"
+    "0.0\n"
+    ">>> sc.cosm1(x)\n"
+    "-5.0000000000000005e-61")
+ufunc_cosm1_loops[0] = loop_d_d__As_f_f
+ufunc_cosm1_loops[1] = loop_d_d__As_d_d
+ufunc_cosm1_types[0] = NPY_FLOAT
+ufunc_cosm1_types[1] = NPY_FLOAT
+ufunc_cosm1_types[2] = NPY_DOUBLE
+ufunc_cosm1_types[3] = NPY_DOUBLE
+ufunc_cosm1_ptr[2*0] = _func_cosm1
+ufunc_cosm1_ptr[2*0+1] = ("cosm1")
+ufunc_cosm1_ptr[2*1] = _func_cosm1
+ufunc_cosm1_ptr[2*1+1] = ("cosm1")
+ufunc_cosm1_data[0] = &ufunc_cosm1_ptr[2*0]
+ufunc_cosm1_data[1] = &ufunc_cosm1_ptr[2*1]
+cosm1 = np.PyUFunc_FromFuncAndData(ufunc_cosm1_loops, ufunc_cosm1_data, ufunc_cosm1_types, 2, 1, 1, 0, "cosm1", ufunc_cosm1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_cotdg_loops[2]
+cdef void *ufunc_cotdg_ptr[4]
+cdef void *ufunc_cotdg_data[2]
+cdef char ufunc_cotdg_types[4]
+cdef char *ufunc_cotdg_doc = (
+    "cotdg(x, out=None)\n"
+    "\n"
+    "Cotangent of the angle `x` given in degrees.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Angle, given in degrees.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Cotangent at the input.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "sindg, cosdg, tandg\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is more accurate than using cotangent directly.\n"
+    "\n"
+    ">>> x = 90 + 180 * np.arange(3)\n"
+    ">>> sc.cotdg(x)\n"
+    "array([0., 0., 0.])\n"
+    ">>> 1 / np.tan(x * np.pi / 180)\n"
+    "array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16])")
+ufunc_cotdg_loops[0] = loop_d_d__As_f_f
+ufunc_cotdg_loops[1] = loop_d_d__As_d_d
+ufunc_cotdg_types[0] = NPY_FLOAT
+ufunc_cotdg_types[1] = NPY_FLOAT
+ufunc_cotdg_types[2] = NPY_DOUBLE
+ufunc_cotdg_types[3] = NPY_DOUBLE
+ufunc_cotdg_ptr[2*0] = _func_cotdg
+ufunc_cotdg_ptr[2*0+1] = ("cotdg")
+ufunc_cotdg_ptr[2*1] = _func_cotdg
+ufunc_cotdg_ptr[2*1+1] = ("cotdg")
+ufunc_cotdg_data[0] = &ufunc_cotdg_ptr[2*0]
+ufunc_cotdg_data[1] = &ufunc_cotdg_ptr[2*1]
+cotdg = np.PyUFunc_FromFuncAndData(ufunc_cotdg_loops, ufunc_cotdg_data, ufunc_cotdg_types, 2, 1, 1, 0, "cotdg", ufunc_cotdg_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_dawsn_loops[4]
+cdef void *ufunc_dawsn_ptr[8]
+cdef void *ufunc_dawsn_data[4]
+cdef char ufunc_dawsn_types[8]
+cdef char *ufunc_dawsn_doc = (
+    "dawsn(x, out=None)\n"
+    "\n"
+    "Dawson's integral.\n"
+    "\n"
+    "Computes::\n"
+    "\n"
+    "    exp(-x**2) * integral(exp(t**2), t=0..x).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Function parameter.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Value of the integral.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "wofz, erf, erfc, erfcx, erfi\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n"
+    "   http://ab-initio.mit.edu/Faddeeva\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-15, 15, num=1000)\n"
+    ">>> plt.plot(x, special.dawsn(x))\n"
+    ">>> plt.xlabel('$x$')\n"
+    ">>> plt.ylabel('$dawsn(x)$')\n"
+    ">>> plt.show()")
+ufunc_dawsn_loops[0] = loop_d_d__As_f_f
+ufunc_dawsn_loops[1] = loop_d_d__As_d_d
+ufunc_dawsn_loops[2] = loop_D_D__As_F_F
+ufunc_dawsn_loops[3] = loop_D_D__As_D_D
+ufunc_dawsn_types[0] = NPY_FLOAT
+ufunc_dawsn_types[1] = NPY_FLOAT
+ufunc_dawsn_types[2] = NPY_DOUBLE
+ufunc_dawsn_types[3] = NPY_DOUBLE
+ufunc_dawsn_types[4] = NPY_CFLOAT
+ufunc_dawsn_types[5] = NPY_CFLOAT
+ufunc_dawsn_types[6] = NPY_CDOUBLE
+ufunc_dawsn_types[7] = NPY_CDOUBLE
+ufunc_dawsn_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn
+ufunc_dawsn_ptr[2*0+1] = ("dawsn")
+ufunc_dawsn_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn
+ufunc_dawsn_ptr[2*1+1] = ("dawsn")
+ufunc_dawsn_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex
+ufunc_dawsn_ptr[2*2+1] = ("dawsn")
+ufunc_dawsn_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex
+ufunc_dawsn_ptr[2*3+1] = ("dawsn")
+ufunc_dawsn_data[0] = &ufunc_dawsn_ptr[2*0]
+ufunc_dawsn_data[1] = &ufunc_dawsn_ptr[2*1]
+ufunc_dawsn_data[2] = &ufunc_dawsn_ptr[2*2]
+ufunc_dawsn_data[3] = &ufunc_dawsn_ptr[2*3]
+dawsn = np.PyUFunc_FromFuncAndData(ufunc_dawsn_loops, ufunc_dawsn_data, ufunc_dawsn_types, 4, 1, 1, 0, "dawsn", ufunc_dawsn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ellipe_loops[2]
+cdef void *ufunc_ellipe_ptr[4]
+cdef void *ufunc_ellipe_data[2]
+cdef char ufunc_ellipe_types[4]
+cdef char *ufunc_ellipe_doc = (
+    "ellipe(m, out=None)\n"
+    "\n"
+    "Complete elliptic integral of the second kind\n"
+    "\n"
+    "This function is defined as\n"
+    "\n"
+    ".. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Defines the parameter of the elliptic integral.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "E : scalar or ndarray\n"
+    "    Value of the elliptic integral.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the Cephes [1]_ routine `ellpe`.\n"
+    "\n"
+    "For `m > 0` the computation uses the approximation,\n"
+    "\n"
+    ".. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),\n"
+    "\n"
+    "where :math:`P` and :math:`Q` are tenth-order polynomials.  For\n"
+    "`m < 0`, the relation\n"
+    "\n"
+    ".. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)\n"
+    "\n"
+    "is used.\n"
+    "\n"
+    "The parameterization in terms of :math:`m` follows that of section\n"
+    "17.2 in [2]_. Other parameterizations in terms of the\n"
+    "complementary parameter :math:`1 - m`, modular angle\n"
+    ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n"
+    "used, so be careful that you choose the correct parameter.\n"
+    "\n"
+    "The Legendre E integral is related to Carlson's symmetric R_D or R_G\n"
+    "functions in multiple ways [3]_. For example,\n"
+    "\n"
+    ".. math:: E(m) = 2 R_G(0, 1-k^2, 1) .\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n"
+    "ellipk : Complete elliptic integral of the first kind\n"
+    "ellipkinc : Incomplete elliptic integral of the first kind\n"
+    "ellipeinc : Incomplete elliptic integral of the second kind\n"
+    "elliprd : Symmetric elliptic integral of the second kind.\n"
+    "elliprg : Completely-symmetric elliptic integral of the second kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    ".. [3] NIST Digital Library of Mathematical\n"
+    "       Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n"
+    "       2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "This function is used in finding the circumference of an\n"
+    "ellipse with semi-major axis `a` and semi-minor axis `b`.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    "\n"
+    ">>> a = 3.5\n"
+    ">>> b = 2.1\n"
+    ">>> e_sq = 1.0 - b**2/a**2  # eccentricity squared\n"
+    "\n"
+    "Then the circumference is found using the following:\n"
+    "\n"
+    ">>> C = 4*a*special.ellipe(e_sq)  # circumference formula\n"
+    ">>> C\n"
+    "17.868899204378693\n"
+    "\n"
+    "When `a` and `b` are the same (meaning eccentricity is 0),\n"
+    "this reduces to the circumference of a circle.\n"
+    "\n"
+    ">>> 4*a*special.ellipe(0.0)  # formula for ellipse with a = b\n"
+    "21.991148575128552\n"
+    ">>> 2*np.pi*a  # formula for circle of radius a\n"
+    "21.991148575128552")
+ufunc_ellipe_loops[0] = loop_d_d__As_f_f
+ufunc_ellipe_loops[1] = loop_d_d__As_d_d
+ufunc_ellipe_types[0] = NPY_FLOAT
+ufunc_ellipe_types[1] = NPY_FLOAT
+ufunc_ellipe_types[2] = NPY_DOUBLE
+ufunc_ellipe_types[3] = NPY_DOUBLE
+ufunc_ellipe_ptr[2*0] = _func_ellpe
+ufunc_ellipe_ptr[2*0+1] = ("ellipe")
+ufunc_ellipe_ptr[2*1] = _func_ellpe
+ufunc_ellipe_ptr[2*1+1] = ("ellipe")
+ufunc_ellipe_data[0] = &ufunc_ellipe_ptr[2*0]
+ufunc_ellipe_data[1] = &ufunc_ellipe_ptr[2*1]
+ellipe = np.PyUFunc_FromFuncAndData(ufunc_ellipe_loops, ufunc_ellipe_data, ufunc_ellipe_types, 2, 1, 1, 0, "ellipe", ufunc_ellipe_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ellipeinc_loops[2]
+cdef void *ufunc_ellipeinc_ptr[4]
+cdef void *ufunc_ellipeinc_data[2]
+cdef char ufunc_ellipeinc_types[6]
+cdef char *ufunc_ellipeinc_doc = (
+    "ellipeinc(phi, m, out=None)\n"
+    "\n"
+    "Incomplete elliptic integral of the second kind\n"
+    "\n"
+    "This function is defined as\n"
+    "\n"
+    ".. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "phi : array_like\n"
+    "    amplitude of the elliptic integral.\n"
+    "m : array_like\n"
+    "    parameter of the elliptic integral.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "E : scalar or ndarray\n"
+    "    Value of the elliptic integral.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the Cephes [1]_ routine `ellie`.\n"
+    "\n"
+    "Computation uses arithmetic-geometric means algorithm.\n"
+    "\n"
+    "The parameterization in terms of :math:`m` follows that of section\n"
+    "17.2 in [2]_. Other parameterizations in terms of the\n"
+    "complementary parameter :math:`1 - m`, modular angle\n"
+    ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n"
+    "used, so be careful that you choose the correct parameter.\n"
+    "\n"
+    "The Legendre E incomplete integral can be related to combinations\n"
+    "of Carlson's symmetric integrals R_D, R_F, and R_G in multiple\n"
+    "ways [3]_. For example, with :math:`c = \\csc^2\\phi`,\n"
+    "\n"
+    ".. math::\n"
+    "  E(\\phi, m) = R_F(c-1, c-k^2, c)\n"
+    "    - \\frac{1}{3} k^2 R_D(c-1, c-k^2, c) .\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n"
+    "ellipk : Complete elliptic integral of the first kind\n"
+    "ellipkinc : Incomplete elliptic integral of the first kind\n"
+    "ellipe : Complete elliptic integral of the second kind\n"
+    "elliprd : Symmetric elliptic integral of the second kind.\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "elliprg : Completely-symmetric elliptic integral of the second kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    ".. [3] NIST Digital Library of Mathematical\n"
+    "       Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n"
+    "       2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i")
+ufunc_ellipeinc_loops[0] = loop_d_dd__As_ff_f
+ufunc_ellipeinc_loops[1] = loop_d_dd__As_dd_d
+ufunc_ellipeinc_types[0] = NPY_FLOAT
+ufunc_ellipeinc_types[1] = NPY_FLOAT
+ufunc_ellipeinc_types[2] = NPY_FLOAT
+ufunc_ellipeinc_types[3] = NPY_DOUBLE
+ufunc_ellipeinc_types[4] = NPY_DOUBLE
+ufunc_ellipeinc_types[5] = NPY_DOUBLE
+ufunc_ellipeinc_ptr[2*0] = _func_ellie
+ufunc_ellipeinc_ptr[2*0+1] = ("ellipeinc")
+ufunc_ellipeinc_ptr[2*1] = _func_ellie
+ufunc_ellipeinc_ptr[2*1+1] = ("ellipeinc")
+ufunc_ellipeinc_data[0] = &ufunc_ellipeinc_ptr[2*0]
+ufunc_ellipeinc_data[1] = &ufunc_ellipeinc_ptr[2*1]
+ellipeinc = np.PyUFunc_FromFuncAndData(ufunc_ellipeinc_loops, ufunc_ellipeinc_data, ufunc_ellipeinc_types, 2, 2, 1, 0, "ellipeinc", ufunc_ellipeinc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ellipj_loops[2]
+cdef void *ufunc_ellipj_ptr[4]
+cdef void *ufunc_ellipj_data[2]
+cdef char ufunc_ellipj_types[12]
+cdef char *ufunc_ellipj_doc = (
+    "ellipj(u, m, out=None)\n"
+    "\n"
+    "Jacobian elliptic functions\n"
+    "\n"
+    "Calculates the Jacobian elliptic functions of parameter `m` between\n"
+    "0 and 1, and real argument `u`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Parameter.\n"
+    "u : array_like\n"
+    "    Argument.\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "sn, cn, dn, ph : 4-tuple of scalar or ndarray\n"
+    "    The returned functions::\n"
+    "\n"
+    "        sn(u|m), cn(u|m), dn(u|m)\n"
+    "\n"
+    "    The value `ph` is such that if `u = ellipkinc(ph, m)`,\n"
+    "    then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the Cephes [1]_ routine `ellpj`.\n"
+    "\n"
+    "These functions are periodic, with quarter-period on the real axis\n"
+    "equal to the complete elliptic integral `ellipk(m)`.\n"
+    "\n"
+    "Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then\n"
+    "`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called\n"
+    "the amplitude of `u`.\n"
+    "\n"
+    "Computation is by means of the arithmetic-geometric mean algorithm,\n"
+    "except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`\n"
+    "close to 1, the approximation applies only for `phi < pi/2`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "ellipk : Complete elliptic integral of the first kind\n"
+    "ellipkinc : Incomplete elliptic integral of the first kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_ellipj_loops[0] = loop_i_dd_dddd_As_ff_ffff
+ufunc_ellipj_loops[1] = loop_i_dd_dddd_As_dd_dddd
+ufunc_ellipj_types[0] = NPY_FLOAT
+ufunc_ellipj_types[1] = NPY_FLOAT
+ufunc_ellipj_types[2] = NPY_FLOAT
+ufunc_ellipj_types[3] = NPY_FLOAT
+ufunc_ellipj_types[4] = NPY_FLOAT
+ufunc_ellipj_types[5] = NPY_FLOAT
+ufunc_ellipj_types[6] = NPY_DOUBLE
+ufunc_ellipj_types[7] = NPY_DOUBLE
+ufunc_ellipj_types[8] = NPY_DOUBLE
+ufunc_ellipj_types[9] = NPY_DOUBLE
+ufunc_ellipj_types[10] = NPY_DOUBLE
+ufunc_ellipj_types[11] = NPY_DOUBLE
+ufunc_ellipj_ptr[2*0] = _func_ellpj
+ufunc_ellipj_ptr[2*0+1] = ("ellipj")
+ufunc_ellipj_ptr[2*1] = _func_ellpj
+ufunc_ellipj_ptr[2*1+1] = ("ellipj")
+ufunc_ellipj_data[0] = &ufunc_ellipj_ptr[2*0]
+ufunc_ellipj_data[1] = &ufunc_ellipj_ptr[2*1]
+ellipj = np.PyUFunc_FromFuncAndData(ufunc_ellipj_loops, ufunc_ellipj_data, ufunc_ellipj_types, 2, 2, 4, 0, "ellipj", ufunc_ellipj_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ellipk_loops[2]
+cdef void *ufunc_ellipk_ptr[4]
+cdef void *ufunc_ellipk_data[2]
+cdef char ufunc_ellipk_types[4]
+cdef char *ufunc_ellipk_doc = (
+    "ellipk(m, out=None)\n"
+    "\n"
+    "Complete elliptic integral of the first kind.\n"
+    "\n"
+    "This function is defined as\n"
+    "\n"
+    ".. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    The parameter of the elliptic integral.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "K : scalar or ndarray\n"
+    "    Value of the elliptic integral.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For more precision around point m = 1, use `ellipkm1`, which this\n"
+    "function calls.\n"
+    "\n"
+    "The parameterization in terms of :math:`m` follows that of section\n"
+    "17.2 in [1]_. Other parameterizations in terms of the\n"
+    "complementary parameter :math:`1 - m`, modular angle\n"
+    ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n"
+    "used, so be careful that you choose the correct parameter.\n"
+    "\n"
+    "The Legendre K integral is related to Carlson's symmetric R_F\n"
+    "function by [2]_:\n"
+    "\n"
+    ".. math:: K(m) = R_F(0, 1-k^2, 1) .\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ellipkm1 : Complete elliptic integral of the first kind around m = 1\n"
+    "ellipkinc : Incomplete elliptic integral of the first kind\n"
+    "ellipe : Complete elliptic integral of the second kind\n"
+    "ellipeinc : Incomplete elliptic integral of the second kind\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    ".. [2] NIST Digital Library of Mathematical\n"
+    "       Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n"
+    "       2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i")
+ufunc_ellipk_loops[0] = loop_d_d__As_f_f
+ufunc_ellipk_loops[1] = loop_d_d__As_d_d
+ufunc_ellipk_types[0] = NPY_FLOAT
+ufunc_ellipk_types[1] = NPY_FLOAT
+ufunc_ellipk_types[2] = NPY_DOUBLE
+ufunc_ellipk_types[3] = NPY_DOUBLE
+ufunc_ellipk_ptr[2*0] = _func_ellipk
+ufunc_ellipk_ptr[2*0+1] = ("ellipk")
+ufunc_ellipk_ptr[2*1] = _func_ellipk
+ufunc_ellipk_ptr[2*1+1] = ("ellipk")
+ufunc_ellipk_data[0] = &ufunc_ellipk_ptr[2*0]
+ufunc_ellipk_data[1] = &ufunc_ellipk_ptr[2*1]
+ellipk = np.PyUFunc_FromFuncAndData(ufunc_ellipk_loops, ufunc_ellipk_data, ufunc_ellipk_types, 2, 1, 1, 0, "ellipk", ufunc_ellipk_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ellipkinc_loops[2]
+cdef void *ufunc_ellipkinc_ptr[4]
+cdef void *ufunc_ellipkinc_data[2]
+cdef char ufunc_ellipkinc_types[6]
+cdef char *ufunc_ellipkinc_doc = (
+    "ellipkinc(phi, m, out=None)\n"
+    "\n"
+    "Incomplete elliptic integral of the first kind\n"
+    "\n"
+    "This function is defined as\n"
+    "\n"
+    ".. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt\n"
+    "\n"
+    "This function is also called :math:`F(\\phi, m)`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "phi : array_like\n"
+    "    amplitude of the elliptic integral\n"
+    "m : array_like\n"
+    "    parameter of the elliptic integral\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "K : scalar or ndarray\n"
+    "    Value of the elliptic integral\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the Cephes [1]_ routine `ellik`.  The computation is\n"
+    "carried out using the arithmetic-geometric mean algorithm.\n"
+    "\n"
+    "The parameterization in terms of :math:`m` follows that of section\n"
+    "17.2 in [2]_. Other parameterizations in terms of the\n"
+    "complementary parameter :math:`1 - m`, modular angle\n"
+    ":math:`\\sin^2(\\alpha) = m`, or modulus :math:`k^2 = m` are also\n"
+    "used, so be careful that you choose the correct parameter.\n"
+    "\n"
+    "The Legendre K incomplete integral (or F integral) is related to\n"
+    "Carlson's symmetric R_F function [3]_.\n"
+    "Setting :math:`c = \\csc^2\\phi`,\n"
+    "\n"
+    ".. math:: F(\\phi, m) = R_F(c-1, c-k^2, c) .\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1\n"
+    "ellipk : Complete elliptic integral of the first kind\n"
+    "ellipe : Complete elliptic integral of the second kind\n"
+    "ellipeinc : Incomplete elliptic integral of the second kind\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    ".. [3] NIST Digital Library of Mathematical\n"
+    "       Functions. http://dlmf.nist.gov/, Release 1.0.28 of\n"
+    "       2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i")
+ufunc_ellipkinc_loops[0] = loop_d_dd__As_ff_f
+ufunc_ellipkinc_loops[1] = loop_d_dd__As_dd_d
+ufunc_ellipkinc_types[0] = NPY_FLOAT
+ufunc_ellipkinc_types[1] = NPY_FLOAT
+ufunc_ellipkinc_types[2] = NPY_FLOAT
+ufunc_ellipkinc_types[3] = NPY_DOUBLE
+ufunc_ellipkinc_types[4] = NPY_DOUBLE
+ufunc_ellipkinc_types[5] = NPY_DOUBLE
+ufunc_ellipkinc_ptr[2*0] = _func_ellik
+ufunc_ellipkinc_ptr[2*0+1] = ("ellipkinc")
+ufunc_ellipkinc_ptr[2*1] = _func_ellik
+ufunc_ellipkinc_ptr[2*1+1] = ("ellipkinc")
+ufunc_ellipkinc_data[0] = &ufunc_ellipkinc_ptr[2*0]
+ufunc_ellipkinc_data[1] = &ufunc_ellipkinc_ptr[2*1]
+ellipkinc = np.PyUFunc_FromFuncAndData(ufunc_ellipkinc_loops, ufunc_ellipkinc_data, ufunc_ellipkinc_types, 2, 2, 1, 0, "ellipkinc", ufunc_ellipkinc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ellipkm1_loops[2]
+cdef void *ufunc_ellipkm1_ptr[4]
+cdef void *ufunc_ellipkm1_data[2]
+cdef char ufunc_ellipkm1_types[4]
+cdef char *ufunc_ellipkm1_doc = (
+    "ellipkm1(p, out=None)\n"
+    "\n"
+    "Complete elliptic integral of the first kind around `m` = 1\n"
+    "\n"
+    "This function is defined as\n"
+    "\n"
+    ".. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt\n"
+    "\n"
+    "where `m = 1 - p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Defines the parameter of the elliptic integral as `m = 1 - p`.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "K : scalar or ndarray\n"
+    "    Value of the elliptic integral.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the Cephes [1]_ routine `ellpk`.\n"
+    "\n"
+    "For `p <= 1`, computation uses the approximation,\n"
+    "\n"
+    ".. math:: K(p) \\approx P(p) - \\log(p) Q(p),\n"
+    "\n"
+    "where :math:`P` and :math:`Q` are tenth-order polynomials.  The\n"
+    "argument `p` is used internally rather than `m` so that the logarithmic\n"
+    "singularity at `m = 1` will be shifted to the origin; this preserves\n"
+    "maximum accuracy.  For `p > 1`, the identity\n"
+    "\n"
+    ".. math:: K(p) = K(1/p)/\\sqrt(p)\n"
+    "\n"
+    "is used.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ellipk : Complete elliptic integral of the first kind\n"
+    "ellipkinc : Incomplete elliptic integral of the first kind\n"
+    "ellipe : Complete elliptic integral of the second kind\n"
+    "ellipeinc : Incomplete elliptic integral of the second kind\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_ellipkm1_loops[0] = loop_d_d__As_f_f
+ufunc_ellipkm1_loops[1] = loop_d_d__As_d_d
+ufunc_ellipkm1_types[0] = NPY_FLOAT
+ufunc_ellipkm1_types[1] = NPY_FLOAT
+ufunc_ellipkm1_types[2] = NPY_DOUBLE
+ufunc_ellipkm1_types[3] = NPY_DOUBLE
+ufunc_ellipkm1_ptr[2*0] = _func_ellpk
+ufunc_ellipkm1_ptr[2*0+1] = ("ellipkm1")
+ufunc_ellipkm1_ptr[2*1] = _func_ellpk
+ufunc_ellipkm1_ptr[2*1+1] = ("ellipkm1")
+ufunc_ellipkm1_data[0] = &ufunc_ellipkm1_ptr[2*0]
+ufunc_ellipkm1_data[1] = &ufunc_ellipkm1_ptr[2*1]
+ellipkm1 = np.PyUFunc_FromFuncAndData(ufunc_ellipkm1_loops, ufunc_ellipkm1_data, ufunc_ellipkm1_types, 2, 1, 1, 0, "ellipkm1", ufunc_ellipkm1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_elliprc_loops[4]
+cdef void *ufunc_elliprc_ptr[8]
+cdef void *ufunc_elliprc_data[4]
+cdef char ufunc_elliprc_types[12]
+cdef char *ufunc_elliprc_doc = (
+    "elliprc(x, y, out=None)\n"
+    "\n"
+    "Degenerate symmetric elliptic integral.\n"
+    "\n"
+    "The function RC is defined as [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    R_{\\mathrm{C}}(x, y) =\n"
+    "       \\frac{1}{2} \\int_0^{+\\infty} (t + x)^{-1/2} (t + y)^{-1} dt\n"
+    "       = R_{\\mathrm{F}}(x, y, y)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y : array_like\n"
+    "    Real or complex input parameters. `x` can be any number in the\n"
+    "    complex plane cut along the negative real axis. `y` must be non-zero.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "R : scalar or ndarray\n"
+    "    Value of the integral. If `y` is real and negative, the Cauchy\n"
+    "    principal value is returned. If both of `x` and `y` are real, the\n"
+    "    return value is real. Otherwise, the return value is complex.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) ==\n"
+    "elliprf(x, y, y)``. It is an elementary function rather than an elliptic\n"
+    "integral.\n"
+    "\n"
+    "The code implements Carlson's algorithm based on the duplication theorems\n"
+    "and series expansion up to the 7th order. [2]_\n"
+    "\n"
+    ".. versionadded:: 1.8.0\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "elliprd : Symmetric elliptic integral of the second kind.\n"
+    "elliprg : Completely-symmetric elliptic integral of the second kind.\n"
+    "elliprj : Symmetric elliptic integral of the third kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n"
+    "       Functions,\" NIST, US Dept. of Commerce.\n"
+    "       https://dlmf.nist.gov/19.16.E6\n"
+    ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n"
+    "       integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n"
+    "       https://arxiv.org/abs/math/9409227\n"
+    "       https://doi.org/10.1007/BF02198293\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Basic homogeneity property:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import elliprc\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> y = 5.\n"
+    ">>> scale = 0.3 + 0.4j\n"
+    ">>> elliprc(scale*x, scale*y)\n"
+    "(0.5484493976710874-0.4169557678995833j)\n"
+    "\n"
+    ">>> elliprc(x, y)/np.sqrt(scale)\n"
+    "(0.5484493976710874-0.41695576789958333j)\n"
+    "\n"
+    "When the two arguments coincide, the integral is particularly\n"
+    "simple:\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> elliprc(x, x)\n"
+    "(0.4299173120614631-0.3041729818745595j)\n"
+    "\n"
+    ">>> 1/np.sqrt(x)\n"
+    "(0.4299173120614631-0.30417298187455954j)\n"
+    "\n"
+    "Another simple case: the first argument vanishes:\n"
+    "\n"
+    ">>> y = 1.2 + 3.4j\n"
+    ">>> elliprc(0, y)\n"
+    "(0.6753125346116815-0.47779380263880866j)\n"
+    "\n"
+    ">>> np.pi/2/np.sqrt(y)\n"
+    "(0.6753125346116815-0.4777938026388088j)\n"
+    "\n"
+    "When `x` and `y` are both positive, we can express\n"
+    ":math:`R_C(x,y)` in terms of more elementary functions.  For the\n"
+    "case :math:`0 \\le x < y`,\n"
+    "\n"
+    ">>> x = 3.2\n"
+    ">>> y = 6.\n"
+    ">>> elliprc(x, y)\n"
+    "0.44942991498453444\n"
+    "\n"
+    ">>> np.arctan(np.sqrt((y-x)/x))/np.sqrt(y-x)\n"
+    "0.44942991498453433\n"
+    "\n"
+    "And for the case :math:`0 \\le y < x`,\n"
+    "\n"
+    ">>> x = 6.\n"
+    ">>> y = 3.2\n"
+    ">>> elliprc(x,y)\n"
+    "0.4989837501576147\n"
+    "\n"
+    ">>> np.log((np.sqrt(x)+np.sqrt(x-y))/np.sqrt(y))/np.sqrt(x-y)\n"
+    "0.49898375015761476")
+ufunc_elliprc_loops[0] = loop_d_dd__As_ff_f
+ufunc_elliprc_loops[1] = loop_d_dd__As_dd_d
+ufunc_elliprc_loops[2] = loop_D_DD__As_FF_F
+ufunc_elliprc_loops[3] = loop_D_DD__As_DD_D
+ufunc_elliprc_types[0] = NPY_FLOAT
+ufunc_elliprc_types[1] = NPY_FLOAT
+ufunc_elliprc_types[2] = NPY_FLOAT
+ufunc_elliprc_types[3] = NPY_DOUBLE
+ufunc_elliprc_types[4] = NPY_DOUBLE
+ufunc_elliprc_types[5] = NPY_DOUBLE
+ufunc_elliprc_types[6] = NPY_CFLOAT
+ufunc_elliprc_types[7] = NPY_CFLOAT
+ufunc_elliprc_types[8] = NPY_CFLOAT
+ufunc_elliprc_types[9] = NPY_CDOUBLE
+ufunc_elliprc_types[10] = NPY_CDOUBLE
+ufunc_elliprc_types[11] = NPY_CDOUBLE
+ufunc_elliprc_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RC
+ufunc_elliprc_ptr[2*0+1] = ("elliprc")
+ufunc_elliprc_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RC
+ufunc_elliprc_ptr[2*1+1] = ("elliprc")
+ufunc_elliprc_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RC
+ufunc_elliprc_ptr[2*2+1] = ("elliprc")
+ufunc_elliprc_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RC
+ufunc_elliprc_ptr[2*3+1] = ("elliprc")
+ufunc_elliprc_data[0] = &ufunc_elliprc_ptr[2*0]
+ufunc_elliprc_data[1] = &ufunc_elliprc_ptr[2*1]
+ufunc_elliprc_data[2] = &ufunc_elliprc_ptr[2*2]
+ufunc_elliprc_data[3] = &ufunc_elliprc_ptr[2*3]
+elliprc = np.PyUFunc_FromFuncAndData(ufunc_elliprc_loops, ufunc_elliprc_data, ufunc_elliprc_types, 4, 2, 1, 0, "elliprc", ufunc_elliprc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_elliprd_loops[4]
+cdef void *ufunc_elliprd_ptr[8]
+cdef void *ufunc_elliprd_data[4]
+cdef char ufunc_elliprd_types[16]
+cdef char *ufunc_elliprd_doc = (
+    "elliprd(x, y, z, out=None)\n"
+    "\n"
+    "Symmetric elliptic integral of the second kind.\n"
+    "\n"
+    "The function RD is defined as [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    R_{\\mathrm{D}}(x, y, z) =\n"
+    "       \\frac{3}{2} \\int_0^{+\\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2}\n"
+    "       dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y, z : array_like\n"
+    "    Real or complex input parameters. `x` or `y` can be any number in the\n"
+    "    complex plane cut along the negative real axis, but at most one of them\n"
+    "    can be zero, while `z` must be non-zero.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "R : scalar or ndarray\n"
+    "    Value of the integral. If all of `x`, `y`, and `z` are real, the\n"
+    "    return value is real. Otherwise, the return value is complex.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) ==\n"
+    "elliprj(x, y, z, z)``.\n"
+    "\n"
+    "The code implements Carlson's algorithm based on the duplication theorems\n"
+    "and series expansion up to the 7th order. [2]_\n"
+    "\n"
+    ".. versionadded:: 1.8.0\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "elliprc : Degenerate symmetric elliptic integral.\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "elliprg : Completely-symmetric elliptic integral of the second kind.\n"
+    "elliprj : Symmetric elliptic integral of the third kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n"
+    "       Functions,\" NIST, US Dept. of Commerce.\n"
+    "       https://dlmf.nist.gov/19.16.E5\n"
+    ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n"
+    "       integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n"
+    "       https://arxiv.org/abs/math/9409227\n"
+    "       https://doi.org/10.1007/BF02198293\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Basic homogeneity property:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import elliprd\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> y = 5.\n"
+    ">>> z = 6.\n"
+    ">>> scale = 0.3 + 0.4j\n"
+    ">>> elliprd(scale*x, scale*y, scale*z)\n"
+    "(-0.03703043835680379-0.24500934665683802j)\n"
+    "\n"
+    ">>> elliprd(x, y, z)*np.power(scale, -1.5)\n"
+    "(-0.0370304383568038-0.24500934665683805j)\n"
+    "\n"
+    "All three arguments coincide:\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> elliprd(x, x, x)\n"
+    "(-0.03986825876151896-0.14051741840449586j)\n"
+    "\n"
+    ">>> np.power(x, -1.5)\n"
+    "(-0.03986825876151894-0.14051741840449583j)\n"
+    "\n"
+    "The so-called \"second lemniscate constant\":\n"
+    "\n"
+    ">>> elliprd(0, 2, 1)/3\n"
+    "0.5990701173677961\n"
+    "\n"
+    ">>> from scipy.special import gamma\n"
+    ">>> gamma(0.75)**2/np.sqrt(2*np.pi)\n"
+    "0.5990701173677959")
+ufunc_elliprd_loops[0] = loop_d_ddd__As_fff_f
+ufunc_elliprd_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_elliprd_loops[2] = loop_D_DDD__As_FFF_F
+ufunc_elliprd_loops[3] = loop_D_DDD__As_DDD_D
+ufunc_elliprd_types[0] = NPY_FLOAT
+ufunc_elliprd_types[1] = NPY_FLOAT
+ufunc_elliprd_types[2] = NPY_FLOAT
+ufunc_elliprd_types[3] = NPY_FLOAT
+ufunc_elliprd_types[4] = NPY_DOUBLE
+ufunc_elliprd_types[5] = NPY_DOUBLE
+ufunc_elliprd_types[6] = NPY_DOUBLE
+ufunc_elliprd_types[7] = NPY_DOUBLE
+ufunc_elliprd_types[8] = NPY_CFLOAT
+ufunc_elliprd_types[9] = NPY_CFLOAT
+ufunc_elliprd_types[10] = NPY_CFLOAT
+ufunc_elliprd_types[11] = NPY_CFLOAT
+ufunc_elliprd_types[12] = NPY_CDOUBLE
+ufunc_elliprd_types[13] = NPY_CDOUBLE
+ufunc_elliprd_types[14] = NPY_CDOUBLE
+ufunc_elliprd_types[15] = NPY_CDOUBLE
+ufunc_elliprd_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RD
+ufunc_elliprd_ptr[2*0+1] = ("elliprd")
+ufunc_elliprd_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RD
+ufunc_elliprd_ptr[2*1+1] = ("elliprd")
+ufunc_elliprd_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RD
+ufunc_elliprd_ptr[2*2+1] = ("elliprd")
+ufunc_elliprd_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RD
+ufunc_elliprd_ptr[2*3+1] = ("elliprd")
+ufunc_elliprd_data[0] = &ufunc_elliprd_ptr[2*0]
+ufunc_elliprd_data[1] = &ufunc_elliprd_ptr[2*1]
+ufunc_elliprd_data[2] = &ufunc_elliprd_ptr[2*2]
+ufunc_elliprd_data[3] = &ufunc_elliprd_ptr[2*3]
+elliprd = np.PyUFunc_FromFuncAndData(ufunc_elliprd_loops, ufunc_elliprd_data, ufunc_elliprd_types, 4, 3, 1, 0, "elliprd", ufunc_elliprd_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_elliprf_loops[4]
+cdef void *ufunc_elliprf_ptr[8]
+cdef void *ufunc_elliprf_data[4]
+cdef char ufunc_elliprf_types[16]
+cdef char *ufunc_elliprf_doc = (
+    "elliprf(x, y, z, out=None)\n"
+    "\n"
+    "Completely-symmetric elliptic integral of the first kind.\n"
+    "\n"
+    "The function RF is defined as [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    R_{\\mathrm{F}}(x, y, z) =\n"
+    "       \\frac{1}{2} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y, z : array_like\n"
+    "    Real or complex input parameters. `x`, `y`, or `z` can be any number in\n"
+    "    the complex plane cut along the negative real axis, but at most one of\n"
+    "    them can be zero.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "R : scalar or ndarray\n"
+    "    Value of the integral. If all of `x`, `y`, and `z` are real, the return\n"
+    "    value is real. Otherwise, the return value is complex.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The code implements Carlson's algorithm based on the duplication theorems\n"
+    "and series expansion up to the 7th order (cf.:\n"
+    "https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete\n"
+    "integral. [2]_\n"
+    "\n"
+    ".. versionadded:: 1.8.0\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "elliprc : Degenerate symmetric integral.\n"
+    "elliprd : Symmetric elliptic integral of the second kind.\n"
+    "elliprg : Completely-symmetric elliptic integral of the second kind.\n"
+    "elliprj : Symmetric elliptic integral of the third kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n"
+    "       Functions,\" NIST, US Dept. of Commerce.\n"
+    "       https://dlmf.nist.gov/19.16.E1\n"
+    ".. [2] B. C. Carlson, \"Numerical computation of real or complex elliptic\n"
+    "       integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n"
+    "       https://arxiv.org/abs/math/9409227\n"
+    "       https://doi.org/10.1007/BF02198293\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Basic homogeneity property:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import elliprf\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> y = 5.\n"
+    ">>> z = 6.\n"
+    ">>> scale = 0.3 + 0.4j\n"
+    ">>> elliprf(scale*x, scale*y, scale*z)\n"
+    "(0.5328051227278146-0.4008623567957094j)\n"
+    "\n"
+    ">>> elliprf(x, y, z)/np.sqrt(scale)\n"
+    "(0.5328051227278147-0.4008623567957095j)\n"
+    "\n"
+    "All three arguments coincide:\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> elliprf(x, x, x)\n"
+    "(0.42991731206146316-0.30417298187455954j)\n"
+    "\n"
+    ">>> 1/np.sqrt(x)\n"
+    "(0.4299173120614631-0.30417298187455954j)\n"
+    "\n"
+    "The so-called \"first lemniscate constant\":\n"
+    "\n"
+    ">>> elliprf(0, 1, 2)\n"
+    "1.3110287771460598\n"
+    "\n"
+    ">>> from scipy.special import gamma\n"
+    ">>> gamma(0.25)**2/(4*np.sqrt(2*np.pi))\n"
+    "1.3110287771460598")
+ufunc_elliprf_loops[0] = loop_d_ddd__As_fff_f
+ufunc_elliprf_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_elliprf_loops[2] = loop_D_DDD__As_FFF_F
+ufunc_elliprf_loops[3] = loop_D_DDD__As_DDD_D
+ufunc_elliprf_types[0] = NPY_FLOAT
+ufunc_elliprf_types[1] = NPY_FLOAT
+ufunc_elliprf_types[2] = NPY_FLOAT
+ufunc_elliprf_types[3] = NPY_FLOAT
+ufunc_elliprf_types[4] = NPY_DOUBLE
+ufunc_elliprf_types[5] = NPY_DOUBLE
+ufunc_elliprf_types[6] = NPY_DOUBLE
+ufunc_elliprf_types[7] = NPY_DOUBLE
+ufunc_elliprf_types[8] = NPY_CFLOAT
+ufunc_elliprf_types[9] = NPY_CFLOAT
+ufunc_elliprf_types[10] = NPY_CFLOAT
+ufunc_elliprf_types[11] = NPY_CFLOAT
+ufunc_elliprf_types[12] = NPY_CDOUBLE
+ufunc_elliprf_types[13] = NPY_CDOUBLE
+ufunc_elliprf_types[14] = NPY_CDOUBLE
+ufunc_elliprf_types[15] = NPY_CDOUBLE
+ufunc_elliprf_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RF
+ufunc_elliprf_ptr[2*0+1] = ("elliprf")
+ufunc_elliprf_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RF
+ufunc_elliprf_ptr[2*1+1] = ("elliprf")
+ufunc_elliprf_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RF
+ufunc_elliprf_ptr[2*2+1] = ("elliprf")
+ufunc_elliprf_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RF
+ufunc_elliprf_ptr[2*3+1] = ("elliprf")
+ufunc_elliprf_data[0] = &ufunc_elliprf_ptr[2*0]
+ufunc_elliprf_data[1] = &ufunc_elliprf_ptr[2*1]
+ufunc_elliprf_data[2] = &ufunc_elliprf_ptr[2*2]
+ufunc_elliprf_data[3] = &ufunc_elliprf_ptr[2*3]
+elliprf = np.PyUFunc_FromFuncAndData(ufunc_elliprf_loops, ufunc_elliprf_data, ufunc_elliprf_types, 4, 3, 1, 0, "elliprf", ufunc_elliprf_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_elliprg_loops[4]
+cdef void *ufunc_elliprg_ptr[8]
+cdef void *ufunc_elliprg_data[4]
+cdef char ufunc_elliprg_types[16]
+cdef char *ufunc_elliprg_doc = (
+    "elliprg(x, y, z, out=None)\n"
+    "\n"
+    "Completely-symmetric elliptic integral of the second kind.\n"
+    "\n"
+    "The function RG is defined as [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    R_{\\mathrm{G}}(x, y, z) =\n"
+    "       \\frac{1}{4} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2}\n"
+    "       \\left(\\frac{x}{t + x} + \\frac{y}{t + y} + \\frac{z}{t + z}\\right) t\n"
+    "       dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y, z : array_like\n"
+    "    Real or complex input parameters. `x`, `y`, or `z` can be any number in\n"
+    "    the complex plane cut along the negative real axis.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "R : scalar or ndarray\n"
+    "    Value of the integral. If all of `x`, `y`, and `z` are real, the return\n"
+    "    value is real. Otherwise, the return value is complex.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The implementation uses the relation [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    2 R_{\\mathrm{G}}(x, y, z) =\n"
+    "       z R_{\\mathrm{F}}(x, y, z) -\n"
+    "       \\frac{1}{3} (x - z) (y - z) R_{\\mathrm{D}}(x, y, z) +\n"
+    "       \\sqrt{\\frac{x y}{z}}\n"
+    "\n"
+    "and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can\n"
+    "be chosen as the pivot. When one of the arguments is close to zero, the AGM\n"
+    "method is applied instead. Other special cases are computed following Ref.\n"
+    "[2]_\n"
+    "\n"
+    ".. versionadded:: 1.8.0\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "elliprc : Degenerate symmetric integral.\n"
+    "elliprd : Symmetric elliptic integral of the second kind.\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "elliprj : Symmetric elliptic integral of the third kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] B. C. Carlson, \"Numerical computation of real or complex elliptic\n"
+    "       integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n"
+    "       https://arxiv.org/abs/math/9409227\n"
+    "       https://doi.org/10.1007/BF02198293\n"
+    ".. [2] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n"
+    "       Functions,\" NIST, US Dept. of Commerce.\n"
+    "       https://dlmf.nist.gov/19.16.E1\n"
+    "       https://dlmf.nist.gov/19.20.ii\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Basic homogeneity property:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import elliprg\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> y = 5.\n"
+    ">>> z = 6.\n"
+    ">>> scale = 0.3 + 0.4j\n"
+    ">>> elliprg(scale*x, scale*y, scale*z)\n"
+    "(1.195936862005246+0.8470988320464167j)\n"
+    "\n"
+    ">>> elliprg(x, y, z)*np.sqrt(scale)\n"
+    "(1.195936862005246+0.8470988320464165j)\n"
+    "\n"
+    "Simplifications:\n"
+    "\n"
+    ">>> elliprg(0, y, y)\n"
+    "1.756203682760182\n"
+    "\n"
+    ">>> 0.25*np.pi*np.sqrt(y)\n"
+    "1.7562036827601817\n"
+    "\n"
+    ">>> elliprg(0, 0, z)\n"
+    "1.224744871391589\n"
+    "\n"
+    ">>> 0.5*np.sqrt(z)\n"
+    "1.224744871391589\n"
+    "\n"
+    "The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and\n"
+    "``c`` is given by\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    S = 4 \\pi a b c R_{\\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2).\n"
+    "\n"
+    ">>> def ellipsoid_area(a, b, c):\n"
+    "...     r = 4.0 * np.pi * a * b * c\n"
+    "...     return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c))\n"
+    ">>> print(ellipsoid_area(1, 3, 5))\n"
+    "108.62688289491807")
+ufunc_elliprg_loops[0] = loop_d_ddd__As_fff_f
+ufunc_elliprg_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_elliprg_loops[2] = loop_D_DDD__As_FFF_F
+ufunc_elliprg_loops[3] = loop_D_DDD__As_DDD_D
+ufunc_elliprg_types[0] = NPY_FLOAT
+ufunc_elliprg_types[1] = NPY_FLOAT
+ufunc_elliprg_types[2] = NPY_FLOAT
+ufunc_elliprg_types[3] = NPY_FLOAT
+ufunc_elliprg_types[4] = NPY_DOUBLE
+ufunc_elliprg_types[5] = NPY_DOUBLE
+ufunc_elliprg_types[6] = NPY_DOUBLE
+ufunc_elliprg_types[7] = NPY_DOUBLE
+ufunc_elliprg_types[8] = NPY_CFLOAT
+ufunc_elliprg_types[9] = NPY_CFLOAT
+ufunc_elliprg_types[10] = NPY_CFLOAT
+ufunc_elliprg_types[11] = NPY_CFLOAT
+ufunc_elliprg_types[12] = NPY_CDOUBLE
+ufunc_elliprg_types[13] = NPY_CDOUBLE
+ufunc_elliprg_types[14] = NPY_CDOUBLE
+ufunc_elliprg_types[15] = NPY_CDOUBLE
+ufunc_elliprg_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RG
+ufunc_elliprg_ptr[2*0+1] = ("elliprg")
+ufunc_elliprg_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RG
+ufunc_elliprg_ptr[2*1+1] = ("elliprg")
+ufunc_elliprg_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RG
+ufunc_elliprg_ptr[2*2+1] = ("elliprg")
+ufunc_elliprg_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RG
+ufunc_elliprg_ptr[2*3+1] = ("elliprg")
+ufunc_elliprg_data[0] = &ufunc_elliprg_ptr[2*0]
+ufunc_elliprg_data[1] = &ufunc_elliprg_ptr[2*1]
+ufunc_elliprg_data[2] = &ufunc_elliprg_ptr[2*2]
+ufunc_elliprg_data[3] = &ufunc_elliprg_ptr[2*3]
+elliprg = np.PyUFunc_FromFuncAndData(ufunc_elliprg_loops, ufunc_elliprg_data, ufunc_elliprg_types, 4, 3, 1, 0, "elliprg", ufunc_elliprg_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_elliprj_loops[4]
+cdef void *ufunc_elliprj_ptr[8]
+cdef void *ufunc_elliprj_data[4]
+cdef char ufunc_elliprj_types[20]
+cdef char *ufunc_elliprj_doc = (
+    "elliprj(x, y, z, p, out=None)\n"
+    "\n"
+    "Symmetric elliptic integral of the third kind.\n"
+    "\n"
+    "The function RJ is defined as [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    R_{\\mathrm{J}}(x, y, z, p) =\n"
+    "       \\frac{3}{2} \\int_0^{+\\infty} [(t + x) (t + y) (t + z)]^{-1/2}\n"
+    "       (t + p)^{-1} dt\n"
+    "\n"
+    ".. warning::\n"
+    "    This function should be considered experimental when the inputs are\n"
+    "    unbalanced.  Check correctness with another independent implementation.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y, z, p : array_like\n"
+    "    Real or complex input parameters. `x`, `y`, or `z` are numbers in\n"
+    "    the complex plane cut along the negative real axis (subject to further\n"
+    "    constraints, see Notes), and at most one of them can be zero. `p` must\n"
+    "    be non-zero.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "R : scalar or ndarray\n"
+    "    Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the\n"
+    "    return value is real. Otherwise, the return value is complex.\n"
+    "\n"
+    "    If `p` is real and negative, while `x`, `y`, and `z` are real,\n"
+    "    non-negative, and at most one of them is zero, the Cauchy principal\n"
+    "    value is returned. [1]_ [2]_\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The code implements Carlson's algorithm based on the duplication theorems\n"
+    "and series expansion up to the 7th order. [3]_ The algorithm is slightly\n"
+    "different from its earlier incarnation as it appears in [1]_, in that the\n"
+    "call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in\n"
+    "the inner loop. Asymptotic approximations are used where arguments differ\n"
+    "widely in the order of magnitude. [5]_\n"
+    "\n"
+    "The input values are subject to certain sufficient but not necessary\n"
+    "constaints when input arguments are complex. Notably, ``x``, ``y``, and\n"
+    "``z`` must have non-negative real parts, unless two of them are\n"
+    "non-negative and complex-conjugates to each other while the other is a real\n"
+    "non-negative number. [1]_ If the inputs do not satisfy the sufficient\n"
+    "condition described in Ref. [1]_ they are rejected outright with the output\n"
+    "set to NaN.\n"
+    "\n"
+    "In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the\n"
+    "function ``elliprd`` should be preferred because of its less restrictive\n"
+    "domain.\n"
+    "\n"
+    ".. versionadded:: 1.8.0\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "elliprc : Degenerate symmetric integral.\n"
+    "elliprd : Symmetric elliptic integral of the second kind.\n"
+    "elliprf : Completely-symmetric elliptic integral of the first kind.\n"
+    "elliprg : Completely-symmetric elliptic integral of the second kind.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] B. C. Carlson, \"Numerical computation of real or complex elliptic\n"
+    "       integrals,\" Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.\n"
+    "       https://arxiv.org/abs/math/9409227\n"
+    "       https://doi.org/10.1007/BF02198293\n"
+    ".. [2] B. C. Carlson, ed., Chapter 19 in \"Digital Library of Mathematical\n"
+    "       Functions,\" NIST, US Dept. of Commerce.\n"
+    "       https://dlmf.nist.gov/19.20.iii\n"
+    ".. [3] B. C. Carlson, J. FitzSimmons, \"Reduction Theorems for Elliptic\n"
+    "       Integrands with the Square Root of Two Quadratic Factors,\" J.\n"
+    "       Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000.\n"
+    "       https://doi.org/10.1016/S0377-0427(00)00282-X\n"
+    ".. [4] F. Johansson, \"Numerical Evaluation of Elliptic Functions, Elliptic\n"
+    "       Integrals and Modular Forms,\" in J. Blumlein, C. Schneider, P.\n"
+    "       Paule, eds., \"Elliptic Integrals, Elliptic Functions and Modular\n"
+    "       Forms in Quantum Field Theory,\" pp. 269-293, 2019 (Cham,\n"
+    "       Switzerland: Springer Nature Switzerland)\n"
+    "       https://arxiv.org/abs/1806.06725\n"
+    "       https://doi.org/10.1007/978-3-030-04480-0\n"
+    ".. [5] B. C. Carlson, J. L. Gustafson, \"Asymptotic Approximations for\n"
+    "       Symmetric Elliptic Integrals,\" SIAM J. Math. Anls., vol. 25, no. 2,\n"
+    "       pp. 288-303, 1994.\n"
+    "       https://arxiv.org/abs/math/9310223\n"
+    "       https://doi.org/10.1137/S0036141092228477\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Basic homogeneity property:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import elliprj\n"
+    "\n"
+    ">>> x = 1.2 + 3.4j\n"
+    ">>> y = 5.\n"
+    ">>> z = 6.\n"
+    ">>> p = 7.\n"
+    ">>> scale = 0.3 - 0.4j\n"
+    ">>> elliprj(scale*x, scale*y, scale*z, scale*p)\n"
+    "(0.10834905565679157+0.19694950747103812j)\n"
+    "\n"
+    ">>> elliprj(x, y, z, p)*np.power(scale, -1.5)\n"
+    "(0.10834905565679556+0.19694950747103854j)\n"
+    "\n"
+    "Reduction to simpler elliptic integral:\n"
+    "\n"
+    ">>> elliprj(x, y, z, z)\n"
+    "(0.08288462362195129-0.028376809745123258j)\n"
+    "\n"
+    ">>> from scipy.special import elliprd\n"
+    ">>> elliprd(x, y, z)\n"
+    "(0.08288462362195136-0.028376809745123296j)\n"
+    "\n"
+    "All arguments coincide:\n"
+    "\n"
+    ">>> elliprj(x, x, x, x)\n"
+    "(-0.03986825876151896-0.14051741840449586j)\n"
+    "\n"
+    ">>> np.power(x, -1.5)\n"
+    "(-0.03986825876151894-0.14051741840449583j)")
+ufunc_elliprj_loops[0] = loop_d_dddd__As_ffff_f
+ufunc_elliprj_loops[1] = loop_d_dddd__As_dddd_d
+ufunc_elliprj_loops[2] = loop_D_DDDD__As_FFFF_F
+ufunc_elliprj_loops[3] = loop_D_DDDD__As_DDDD_D
+ufunc_elliprj_types[0] = NPY_FLOAT
+ufunc_elliprj_types[1] = NPY_FLOAT
+ufunc_elliprj_types[2] = NPY_FLOAT
+ufunc_elliprj_types[3] = NPY_FLOAT
+ufunc_elliprj_types[4] = NPY_FLOAT
+ufunc_elliprj_types[5] = NPY_DOUBLE
+ufunc_elliprj_types[6] = NPY_DOUBLE
+ufunc_elliprj_types[7] = NPY_DOUBLE
+ufunc_elliprj_types[8] = NPY_DOUBLE
+ufunc_elliprj_types[9] = NPY_DOUBLE
+ufunc_elliprj_types[10] = NPY_CFLOAT
+ufunc_elliprj_types[11] = NPY_CFLOAT
+ufunc_elliprj_types[12] = NPY_CFLOAT
+ufunc_elliprj_types[13] = NPY_CFLOAT
+ufunc_elliprj_types[14] = NPY_CFLOAT
+ufunc_elliprj_types[15] = NPY_CDOUBLE
+ufunc_elliprj_types[16] = NPY_CDOUBLE
+ufunc_elliprj_types[17] = NPY_CDOUBLE
+ufunc_elliprj_types[18] = NPY_CDOUBLE
+ufunc_elliprj_types[19] = NPY_CDOUBLE
+ufunc_elliprj_ptr[2*0] = scipy.special._ufuncs_cxx._export_fellint_RJ
+ufunc_elliprj_ptr[2*0+1] = ("elliprj")
+ufunc_elliprj_ptr[2*1] = scipy.special._ufuncs_cxx._export_fellint_RJ
+ufunc_elliprj_ptr[2*1+1] = ("elliprj")
+ufunc_elliprj_ptr[2*2] = scipy.special._ufuncs_cxx._export_cellint_RJ
+ufunc_elliprj_ptr[2*2+1] = ("elliprj")
+ufunc_elliprj_ptr[2*3] = scipy.special._ufuncs_cxx._export_cellint_RJ
+ufunc_elliprj_ptr[2*3+1] = ("elliprj")
+ufunc_elliprj_data[0] = &ufunc_elliprj_ptr[2*0]
+ufunc_elliprj_data[1] = &ufunc_elliprj_ptr[2*1]
+ufunc_elliprj_data[2] = &ufunc_elliprj_ptr[2*2]
+ufunc_elliprj_data[3] = &ufunc_elliprj_ptr[2*3]
+elliprj = np.PyUFunc_FromFuncAndData(ufunc_elliprj_loops, ufunc_elliprj_data, ufunc_elliprj_types, 4, 4, 1, 0, "elliprj", ufunc_elliprj_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_entr_loops[2]
+cdef void *ufunc_entr_ptr[4]
+cdef void *ufunc_entr_data[2]
+cdef char ufunc_entr_types[4]
+cdef char *ufunc_entr_doc = (
+    "entr(x, out=None)\n"
+    "\n"
+    "Elementwise function for computing entropy.\n"
+    "\n"
+    ".. math:: \\text{entr}(x) = \\begin{cases} - x \\log(x) & x > 0  \\\\ 0 & x = 0 \\\\ -\\infty & \\text{otherwise} \\end{cases}\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : ndarray\n"
+    "    Input array.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "res : scalar or ndarray\n"
+    "    The value of the elementwise entropy function at the given points `x`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kl_div, rel_entr, scipy.stats.entropy\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    ".. versionadded:: 0.15.0\n"
+    "\n"
+    "This function is concave.\n"
+    "\n"
+    "The origin of this function is in convex programming; see [1]_.\n"
+    "Given a probability distribution :math:`p_1, \\ldots, p_n`,\n"
+    "the definition of entropy in the context of *information theory* is\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\sum_{i = 1}^n \\mathrm{entr}(p_i).\n"
+    "\n"
+    "To compute the latter quantity, use `scipy.stats.entropy`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n"
+    "       Cambridge University Press, 2004.\n"
+    "       :doi:`https://doi.org/10.1017/CBO9780511804441`")
+ufunc_entr_loops[0] = loop_d_d__As_f_f
+ufunc_entr_loops[1] = loop_d_d__As_d_d
+ufunc_entr_types[0] = NPY_FLOAT
+ufunc_entr_types[1] = NPY_FLOAT
+ufunc_entr_types[2] = NPY_DOUBLE
+ufunc_entr_types[3] = NPY_DOUBLE
+ufunc_entr_ptr[2*0] = _func_entr
+ufunc_entr_ptr[2*0+1] = ("entr")
+ufunc_entr_ptr[2*1] = _func_entr
+ufunc_entr_ptr[2*1+1] = ("entr")
+ufunc_entr_data[0] = &ufunc_entr_ptr[2*0]
+ufunc_entr_data[1] = &ufunc_entr_ptr[2*1]
+entr = np.PyUFunc_FromFuncAndData(ufunc_entr_loops, ufunc_entr_data, ufunc_entr_types, 2, 1, 1, 0, "entr", ufunc_entr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_erf_loops[4]
+cdef void *ufunc_erf_ptr[8]
+cdef void *ufunc_erf_data[4]
+cdef char ufunc_erf_types[8]
+cdef char *ufunc_erf_doc = (
+    "erf(z, out=None)\n"
+    "\n"
+    "Returns the error function of complex argument.\n"
+    "\n"
+    "It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : ndarray\n"
+    "    Input array.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "res : scalar or ndarray\n"
+    "    The values of the error function at the given points `x`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erfc, erfinv, erfcinv, wofz, erfcx, erfi\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The cumulative of the unit normal distribution is given by\n"
+    "``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] https://en.wikipedia.org/wiki/Error_function\n"
+    ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover,\n"
+    "    1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm\n"
+    ".. [3] Steven G. Johnson, Faddeeva W function implementation.\n"
+    "   http://ab-initio.mit.edu/Faddeeva\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-3, 3)\n"
+    ">>> plt.plot(x, special.erf(x))\n"
+    ">>> plt.xlabel('$x$')\n"
+    ">>> plt.ylabel('$erf(x)$')\n"
+    ">>> plt.show()")
+ufunc_erf_loops[0] = loop_d_d__As_f_f
+ufunc_erf_loops[1] = loop_d_d__As_d_d
+ufunc_erf_loops[2] = loop_D_D__As_F_F
+ufunc_erf_loops[3] = loop_D_D__As_D_D
+ufunc_erf_types[0] = NPY_FLOAT
+ufunc_erf_types[1] = NPY_FLOAT
+ufunc_erf_types[2] = NPY_DOUBLE
+ufunc_erf_types[3] = NPY_DOUBLE
+ufunc_erf_types[4] = NPY_CFLOAT
+ufunc_erf_types[5] = NPY_CFLOAT
+ufunc_erf_types[6] = NPY_CDOUBLE
+ufunc_erf_types[7] = NPY_CDOUBLE
+ufunc_erf_ptr[2*0] = _func_erf
+ufunc_erf_ptr[2*0+1] = ("erf")
+ufunc_erf_ptr[2*1] = _func_erf
+ufunc_erf_ptr[2*1+1] = ("erf")
+ufunc_erf_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erf
+ufunc_erf_ptr[2*2+1] = ("erf")
+ufunc_erf_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erf
+ufunc_erf_ptr[2*3+1] = ("erf")
+ufunc_erf_data[0] = &ufunc_erf_ptr[2*0]
+ufunc_erf_data[1] = &ufunc_erf_ptr[2*1]
+ufunc_erf_data[2] = &ufunc_erf_ptr[2*2]
+ufunc_erf_data[3] = &ufunc_erf_ptr[2*3]
+erf = np.PyUFunc_FromFuncAndData(ufunc_erf_loops, ufunc_erf_data, ufunc_erf_types, 4, 1, 1, 0, "erf", ufunc_erf_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_erfc_loops[4]
+cdef void *ufunc_erfc_ptr[8]
+cdef void *ufunc_erfc_data[4]
+cdef char ufunc_erfc_types[8]
+cdef char *ufunc_erfc_doc = (
+    "erfc(x, out=None)\n"
+    "\n"
+    "Complementary error function, ``1 - erf(x)``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real or complex valued argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the complementary error function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erf, erfi, erfcx, dawsn, wofz\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n"
+    "   http://ab-initio.mit.edu/Faddeeva\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-3, 3)\n"
+    ">>> plt.plot(x, special.erfc(x))\n"
+    ">>> plt.xlabel('$x$')\n"
+    ">>> plt.ylabel('$erfc(x)$')\n"
+    ">>> plt.show()")
+ufunc_erfc_loops[0] = loop_d_d__As_f_f
+ufunc_erfc_loops[1] = loop_d_d__As_d_d
+ufunc_erfc_loops[2] = loop_D_D__As_F_F
+ufunc_erfc_loops[3] = loop_D_D__As_D_D
+ufunc_erfc_types[0] = NPY_FLOAT
+ufunc_erfc_types[1] = NPY_FLOAT
+ufunc_erfc_types[2] = NPY_DOUBLE
+ufunc_erfc_types[3] = NPY_DOUBLE
+ufunc_erfc_types[4] = NPY_CFLOAT
+ufunc_erfc_types[5] = NPY_CFLOAT
+ufunc_erfc_types[6] = NPY_CDOUBLE
+ufunc_erfc_types[7] = NPY_CDOUBLE
+ufunc_erfc_ptr[2*0] = _func_erfc
+ufunc_erfc_ptr[2*0+1] = ("erfc")
+ufunc_erfc_ptr[2*1] = _func_erfc
+ufunc_erfc_ptr[2*1+1] = ("erfc")
+ufunc_erfc_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex
+ufunc_erfc_ptr[2*2+1] = ("erfc")
+ufunc_erfc_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex
+ufunc_erfc_ptr[2*3+1] = ("erfc")
+ufunc_erfc_data[0] = &ufunc_erfc_ptr[2*0]
+ufunc_erfc_data[1] = &ufunc_erfc_ptr[2*1]
+ufunc_erfc_data[2] = &ufunc_erfc_ptr[2*2]
+ufunc_erfc_data[3] = &ufunc_erfc_ptr[2*3]
+erfc = np.PyUFunc_FromFuncAndData(ufunc_erfc_loops, ufunc_erfc_data, ufunc_erfc_types, 4, 1, 1, 0, "erfc", ufunc_erfc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_erfcinv_loops[2]
+cdef void *ufunc_erfcinv_ptr[4]
+cdef void *ufunc_erfcinv_data[2]
+cdef char ufunc_erfcinv_types[4]
+cdef char *ufunc_erfcinv_doc = (
+    "erfcinv(y, out=None)\n"
+    "\n"
+    "Inverse of the complementary error function.\n"
+    "\n"
+    "Computes the inverse of the complementary error function.\n"
+    "\n"
+    "In the complex domain, there is no unique complex number w satisfying\n"
+    "erfc(w)=z. This indicates a true inverse function would be multivalued.\n"
+    "When the domain restricts to the real, 0 < x < 2, there is a unique real\n"
+    "number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)).\n"
+    "\n"
+    "It is related to inverse of the error function by erfcinv(1-x) = erfinv(x)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : ndarray\n"
+    "    Argument at which to evaluate. Domain: [0, 2]\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "erfcinv : scalar or ndarray\n"
+    "    The inverse of erfc of y, element-wise\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erf : Error function of a complex argument\n"
+    "erfc : Complementary error function, ``1 - erf(x)``\n"
+    "erfinv : Inverse of the error function\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> from scipy.special import erfcinv\n"
+    "\n"
+    ">>> erfcinv(0.5)\n"
+    "0.4769362762044699\n"
+    "\n"
+    ">>> y = np.linspace(0.0, 2.0, num=11)\n"
+    ">>> erfcinv(y)\n"
+    "array([        inf,  0.9061938 ,  0.59511608,  0.37080716,  0.17914345,\n"
+    "       -0.        , -0.17914345, -0.37080716, -0.59511608, -0.9061938 ,\n"
+    "              -inf])\n"
+    "\n"
+    "Plot the function:\n"
+    "\n"
+    ">>> y = np.linspace(0, 2, 200)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> ax.plot(y, erfcinv(y))\n"
+    ">>> ax.grid(True)\n"
+    ">>> ax.set_xlabel('y')\n"
+    ">>> ax.set_title('erfcinv(y)')\n"
+    ">>> plt.show()")
+ufunc_erfcinv_loops[0] = loop_d_d__As_f_f
+ufunc_erfcinv_loops[1] = loop_d_d__As_d_d
+ufunc_erfcinv_types[0] = NPY_FLOAT
+ufunc_erfcinv_types[1] = NPY_FLOAT
+ufunc_erfcinv_types[2] = NPY_DOUBLE
+ufunc_erfcinv_types[3] = NPY_DOUBLE
+ufunc_erfcinv_ptr[2*0] = _func_erfcinv
+ufunc_erfcinv_ptr[2*0+1] = ("erfcinv")
+ufunc_erfcinv_ptr[2*1] = _func_erfcinv
+ufunc_erfcinv_ptr[2*1+1] = ("erfcinv")
+ufunc_erfcinv_data[0] = &ufunc_erfcinv_ptr[2*0]
+ufunc_erfcinv_data[1] = &ufunc_erfcinv_ptr[2*1]
+erfcinv = np.PyUFunc_FromFuncAndData(ufunc_erfcinv_loops, ufunc_erfcinv_data, ufunc_erfcinv_types, 2, 1, 1, 0, "erfcinv", ufunc_erfcinv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_erfcx_loops[4]
+cdef void *ufunc_erfcx_ptr[8]
+cdef void *ufunc_erfcx_data[4]
+cdef char ufunc_erfcx_types[8]
+cdef char *ufunc_erfcx_doc = (
+    "erfcx(x, out=None)\n"
+    "\n"
+    "Scaled complementary error function, ``exp(x**2) * erfc(x)``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real or complex valued argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the scaled complementary error function\n"
+    "\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erf, erfc, erfi, dawsn, wofz\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.12.0\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n"
+    "   http://ab-initio.mit.edu/Faddeeva\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-3, 3)\n"
+    ">>> plt.plot(x, special.erfcx(x))\n"
+    ">>> plt.xlabel('$x$')\n"
+    ">>> plt.ylabel('$erfcx(x)$')\n"
+    ">>> plt.show()")
+ufunc_erfcx_loops[0] = loop_d_d__As_f_f
+ufunc_erfcx_loops[1] = loop_d_d__As_d_d
+ufunc_erfcx_loops[2] = loop_D_D__As_F_F
+ufunc_erfcx_loops[3] = loop_D_D__As_D_D
+ufunc_erfcx_types[0] = NPY_FLOAT
+ufunc_erfcx_types[1] = NPY_FLOAT
+ufunc_erfcx_types[2] = NPY_DOUBLE
+ufunc_erfcx_types[3] = NPY_DOUBLE
+ufunc_erfcx_types[4] = NPY_CFLOAT
+ufunc_erfcx_types[5] = NPY_CFLOAT
+ufunc_erfcx_types[6] = NPY_CDOUBLE
+ufunc_erfcx_types[7] = NPY_CDOUBLE
+ufunc_erfcx_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx
+ufunc_erfcx_ptr[2*0+1] = ("erfcx")
+ufunc_erfcx_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx
+ufunc_erfcx_ptr[2*1+1] = ("erfcx")
+ufunc_erfcx_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex
+ufunc_erfcx_ptr[2*2+1] = ("erfcx")
+ufunc_erfcx_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex
+ufunc_erfcx_ptr[2*3+1] = ("erfcx")
+ufunc_erfcx_data[0] = &ufunc_erfcx_ptr[2*0]
+ufunc_erfcx_data[1] = &ufunc_erfcx_ptr[2*1]
+ufunc_erfcx_data[2] = &ufunc_erfcx_ptr[2*2]
+ufunc_erfcx_data[3] = &ufunc_erfcx_ptr[2*3]
+erfcx = np.PyUFunc_FromFuncAndData(ufunc_erfcx_loops, ufunc_erfcx_data, ufunc_erfcx_types, 4, 1, 1, 0, "erfcx", ufunc_erfcx_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_erfi_loops[4]
+cdef void *ufunc_erfi_ptr[8]
+cdef void *ufunc_erfi_data[4]
+cdef char ufunc_erfi_types[8]
+cdef char *ufunc_erfi_doc = (
+    "erfi(z, out=None)\n"
+    "\n"
+    "Imaginary error function, ``-i erf(i z)``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Real or complex valued argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the imaginary error function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erf, erfc, erfcx, dawsn, wofz\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.12.0\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n"
+    "   http://ab-initio.mit.edu/Faddeeva\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-3, 3)\n"
+    ">>> plt.plot(x, special.erfi(x))\n"
+    ">>> plt.xlabel('$x$')\n"
+    ">>> plt.ylabel('$erfi(x)$')\n"
+    ">>> plt.show()")
+ufunc_erfi_loops[0] = loop_d_d__As_f_f
+ufunc_erfi_loops[1] = loop_d_d__As_d_d
+ufunc_erfi_loops[2] = loop_D_D__As_F_F
+ufunc_erfi_loops[3] = loop_D_D__As_D_D
+ufunc_erfi_types[0] = NPY_FLOAT
+ufunc_erfi_types[1] = NPY_FLOAT
+ufunc_erfi_types[2] = NPY_DOUBLE
+ufunc_erfi_types[3] = NPY_DOUBLE
+ufunc_erfi_types[4] = NPY_CFLOAT
+ufunc_erfi_types[5] = NPY_CFLOAT
+ufunc_erfi_types[6] = NPY_CDOUBLE
+ufunc_erfi_types[7] = NPY_CDOUBLE
+ufunc_erfi_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_erfi
+ufunc_erfi_ptr[2*0+1] = ("erfi")
+ufunc_erfi_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_erfi
+ufunc_erfi_ptr[2*1+1] = ("erfi")
+ufunc_erfi_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex
+ufunc_erfi_ptr[2*2+1] = ("erfi")
+ufunc_erfi_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex
+ufunc_erfi_ptr[2*3+1] = ("erfi")
+ufunc_erfi_data[0] = &ufunc_erfi_ptr[2*0]
+ufunc_erfi_data[1] = &ufunc_erfi_ptr[2*1]
+ufunc_erfi_data[2] = &ufunc_erfi_ptr[2*2]
+ufunc_erfi_data[3] = &ufunc_erfi_ptr[2*3]
+erfi = np.PyUFunc_FromFuncAndData(ufunc_erfi_loops, ufunc_erfi_data, ufunc_erfi_types, 4, 1, 1, 0, "erfi", ufunc_erfi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_erfinv_loops[2]
+cdef void *ufunc_erfinv_ptr[4]
+cdef void *ufunc_erfinv_data[2]
+cdef char ufunc_erfinv_types[4]
+cdef char *ufunc_erfinv_doc = (
+    "erfinv(y, out=None)\n"
+    "\n"
+    "Inverse of the error function.\n"
+    "\n"
+    "Computes the inverse of the error function.\n"
+    "\n"
+    "In the complex domain, there is no unique complex number w satisfying\n"
+    "erf(w)=z. This indicates a true inverse function would be multivalued.\n"
+    "When the domain restricts to the real, -1 < x < 1, there is a unique real\n"
+    "number satisfying erf(erfinv(x)) = x.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : ndarray\n"
+    "    Argument at which to evaluate. Domain: [-1, 1]\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "erfinv : scalar or ndarray\n"
+    "    The inverse of erf of y, element-wise\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erf : Error function of a complex argument\n"
+    "erfc : Complementary error function, ``1 - erf(x)``\n"
+    "erfcinv : Inverse of the complementary error function\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> from scipy.special import erfinv, erf\n"
+    "\n"
+    ">>> erfinv(0.5)\n"
+    "0.4769362762044699\n"
+    "\n"
+    ">>> y = np.linspace(-1.0, 1.0, num=9)\n"
+    ">>> x = erfinv(y)\n"
+    ">>> x\n"
+    "array([       -inf, -0.81341985, -0.47693628, -0.22531206,  0.        ,\n"
+    "        0.22531206,  0.47693628,  0.81341985,         inf])\n"
+    "\n"
+    "Verify that ``erf(erfinv(y))`` is ``y``.\n"
+    "\n"
+    ">>> erf(x)\n"
+    "array([-1.  , -0.75, -0.5 , -0.25,  0.  ,  0.25,  0.5 ,  0.75,  1.  ])\n"
+    "\n"
+    "Plot the function:\n"
+    "\n"
+    ">>> y = np.linspace(-1, 1, 200)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> ax.plot(y, erfinv(y))\n"
+    ">>> ax.grid(True)\n"
+    ">>> ax.set_xlabel('y')\n"
+    ">>> ax.set_title('erfinv(y)')\n"
+    ">>> plt.show()")
+ufunc_erfinv_loops[0] = loop_f_f__As_f_f
+ufunc_erfinv_loops[1] = loop_d_d__As_d_d
+ufunc_erfinv_types[0] = NPY_FLOAT
+ufunc_erfinv_types[1] = NPY_FLOAT
+ufunc_erfinv_types[2] = NPY_DOUBLE
+ufunc_erfinv_types[3] = NPY_DOUBLE
+ufunc_erfinv_ptr[2*0] = scipy.special._ufuncs_cxx._export_erfinv_float
+ufunc_erfinv_ptr[2*0+1] = ("erfinv")
+ufunc_erfinv_ptr[2*1] = scipy.special._ufuncs_cxx._export_erfinv_double
+ufunc_erfinv_ptr[2*1+1] = ("erfinv")
+ufunc_erfinv_data[0] = &ufunc_erfinv_ptr[2*0]
+ufunc_erfinv_data[1] = &ufunc_erfinv_ptr[2*1]
+erfinv = np.PyUFunc_FromFuncAndData(ufunc_erfinv_loops, ufunc_erfinv_data, ufunc_erfinv_types, 2, 1, 1, 0, "erfinv", ufunc_erfinv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_chebyc_loops[5]
+cdef void *ufunc_eval_chebyc_ptr[10]
+cdef void *ufunc_eval_chebyc_data[5]
+cdef char ufunc_eval_chebyc_types[15]
+cdef char *ufunc_eval_chebyc_doc = (
+    "eval_chebyc(n, x, out=None)\n"
+    "\n"
+    "Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a\n"
+    "point.\n"
+    "\n"
+    "These polynomials are defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    C_n(x) = 2 T_n(x/2)\n"
+    "\n"
+    "where :math:`T_n` is a Chebyshev polynomial of the first kind. See\n"
+    "22.5.11 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to `eval_chebyt`.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Chebyshev polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "C : scalar or ndarray\n"
+    "    Values of the Chebyshev polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_chebyc : roots and quadrature weights of Chebyshev\n"
+    "               polynomials of the first kind on [-2, 2]\n"
+    "chebyc : Chebyshev polynomial object\n"
+    "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n"
+    "eval_chebyt : evaluate Chebycshev polynomials of the first kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "They are a scaled version of the Chebyshev polynomials of the\n"
+    "first kind.\n"
+    "\n"
+    ">>> x = np.linspace(-2, 2, 6)\n"
+    ">>> sc.eval_chebyc(3, x)\n"
+    "array([-2.   ,  1.872,  1.136, -1.136, -1.872,  2.   ])\n"
+    ">>> 2 * sc.eval_chebyt(3, x / 2)\n"
+    "array([-2.   ,  1.872,  1.136, -1.136, -1.872,  2.   ])")
+ufunc_eval_chebyc_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_chebyc_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_chebyc_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_chebyc_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_chebyc_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_chebyc_types[0] = NPY_LONG
+ufunc_eval_chebyc_types[1] = NPY_DOUBLE
+ufunc_eval_chebyc_types[2] = NPY_DOUBLE
+ufunc_eval_chebyc_types[3] = NPY_FLOAT
+ufunc_eval_chebyc_types[4] = NPY_FLOAT
+ufunc_eval_chebyc_types[5] = NPY_FLOAT
+ufunc_eval_chebyc_types[6] = NPY_FLOAT
+ufunc_eval_chebyc_types[7] = NPY_CFLOAT
+ufunc_eval_chebyc_types[8] = NPY_CFLOAT
+ufunc_eval_chebyc_types[9] = NPY_DOUBLE
+ufunc_eval_chebyc_types[10] = NPY_DOUBLE
+ufunc_eval_chebyc_types[11] = NPY_DOUBLE
+ufunc_eval_chebyc_types[12] = NPY_DOUBLE
+ufunc_eval_chebyc_types[13] = NPY_CDOUBLE
+ufunc_eval_chebyc_types[14] = NPY_CDOUBLE
+ufunc_eval_chebyc_ptr[2*0] = _func_eval_chebyc_l
+ufunc_eval_chebyc_ptr[2*0+1] = ("eval_chebyc")
+ufunc_eval_chebyc_ptr[2*1] = _func_eval_chebyc[double]
+ufunc_eval_chebyc_ptr[2*1+1] = ("eval_chebyc")
+ufunc_eval_chebyc_ptr[2*2] = _func_eval_chebyc[double_complex]
+ufunc_eval_chebyc_ptr[2*2+1] = ("eval_chebyc")
+ufunc_eval_chebyc_ptr[2*3] = _func_eval_chebyc[double]
+ufunc_eval_chebyc_ptr[2*3+1] = ("eval_chebyc")
+ufunc_eval_chebyc_ptr[2*4] = _func_eval_chebyc[double_complex]
+ufunc_eval_chebyc_ptr[2*4+1] = ("eval_chebyc")
+ufunc_eval_chebyc_data[0] = &ufunc_eval_chebyc_ptr[2*0]
+ufunc_eval_chebyc_data[1] = &ufunc_eval_chebyc_ptr[2*1]
+ufunc_eval_chebyc_data[2] = &ufunc_eval_chebyc_ptr[2*2]
+ufunc_eval_chebyc_data[3] = &ufunc_eval_chebyc_ptr[2*3]
+ufunc_eval_chebyc_data[4] = &ufunc_eval_chebyc_ptr[2*4]
+eval_chebyc = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyc_loops, ufunc_eval_chebyc_data, ufunc_eval_chebyc_types, 5, 2, 1, 0, "eval_chebyc", ufunc_eval_chebyc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_chebys_loops[5]
+cdef void *ufunc_eval_chebys_ptr[10]
+cdef void *ufunc_eval_chebys_data[5]
+cdef char ufunc_eval_chebys_types[15]
+cdef char *ufunc_eval_chebys_doc = (
+    "eval_chebys(n, x, out=None)\n"
+    "\n"
+    "Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a\n"
+    "point.\n"
+    "\n"
+    "These polynomials are defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    S_n(x) = U_n(x/2)\n"
+    "\n"
+    "where :math:`U_n` is a Chebyshev polynomial of the second\n"
+    "kind. See 22.5.13 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to `eval_chebyu`.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Chebyshev polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "S : scalar or ndarray\n"
+    "    Values of the Chebyshev polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_chebys : roots and quadrature weights of Chebyshev\n"
+    "               polynomials of the second kind on [-2, 2]\n"
+    "chebys : Chebyshev polynomial object\n"
+    "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "They are a scaled version of the Chebyshev polynomials of the\n"
+    "second kind.\n"
+    "\n"
+    ">>> x = np.linspace(-2, 2, 6)\n"
+    ">>> sc.eval_chebys(3, x)\n"
+    "array([-4.   ,  0.672,  0.736, -0.736, -0.672,  4.   ])\n"
+    ">>> sc.eval_chebyu(3, x / 2)\n"
+    "array([-4.   ,  0.672,  0.736, -0.736, -0.672,  4.   ])")
+ufunc_eval_chebys_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_chebys_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_chebys_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_chebys_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_chebys_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_chebys_types[0] = NPY_LONG
+ufunc_eval_chebys_types[1] = NPY_DOUBLE
+ufunc_eval_chebys_types[2] = NPY_DOUBLE
+ufunc_eval_chebys_types[3] = NPY_FLOAT
+ufunc_eval_chebys_types[4] = NPY_FLOAT
+ufunc_eval_chebys_types[5] = NPY_FLOAT
+ufunc_eval_chebys_types[6] = NPY_FLOAT
+ufunc_eval_chebys_types[7] = NPY_CFLOAT
+ufunc_eval_chebys_types[8] = NPY_CFLOAT
+ufunc_eval_chebys_types[9] = NPY_DOUBLE
+ufunc_eval_chebys_types[10] = NPY_DOUBLE
+ufunc_eval_chebys_types[11] = NPY_DOUBLE
+ufunc_eval_chebys_types[12] = NPY_DOUBLE
+ufunc_eval_chebys_types[13] = NPY_CDOUBLE
+ufunc_eval_chebys_types[14] = NPY_CDOUBLE
+ufunc_eval_chebys_ptr[2*0] = _func_eval_chebys_l
+ufunc_eval_chebys_ptr[2*0+1] = ("eval_chebys")
+ufunc_eval_chebys_ptr[2*1] = _func_eval_chebys[double]
+ufunc_eval_chebys_ptr[2*1+1] = ("eval_chebys")
+ufunc_eval_chebys_ptr[2*2] = _func_eval_chebys[double_complex]
+ufunc_eval_chebys_ptr[2*2+1] = ("eval_chebys")
+ufunc_eval_chebys_ptr[2*3] = _func_eval_chebys[double]
+ufunc_eval_chebys_ptr[2*3+1] = ("eval_chebys")
+ufunc_eval_chebys_ptr[2*4] = _func_eval_chebys[double_complex]
+ufunc_eval_chebys_ptr[2*4+1] = ("eval_chebys")
+ufunc_eval_chebys_data[0] = &ufunc_eval_chebys_ptr[2*0]
+ufunc_eval_chebys_data[1] = &ufunc_eval_chebys_ptr[2*1]
+ufunc_eval_chebys_data[2] = &ufunc_eval_chebys_ptr[2*2]
+ufunc_eval_chebys_data[3] = &ufunc_eval_chebys_ptr[2*3]
+ufunc_eval_chebys_data[4] = &ufunc_eval_chebys_ptr[2*4]
+eval_chebys = np.PyUFunc_FromFuncAndData(ufunc_eval_chebys_loops, ufunc_eval_chebys_data, ufunc_eval_chebys_types, 5, 2, 1, 0, "eval_chebys", ufunc_eval_chebys_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_chebyt_loops[5]
+cdef void *ufunc_eval_chebyt_ptr[10]
+cdef void *ufunc_eval_chebyt_data[5]
+cdef char ufunc_eval_chebyt_types[15]
+cdef char *ufunc_eval_chebyt_doc = (
+    "eval_chebyt(n, x, out=None)\n"
+    "\n"
+    "Evaluate Chebyshev polynomial of the first kind at a point.\n"
+    "\n"
+    "The Chebyshev polynomials of the first kind can be defined via the\n"
+    "Gauss hypergeometric function :math:`{}_2F_1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).\n"
+    "\n"
+    "When :math:`n` is an integer the result is a polynomial of degree\n"
+    ":math:`n`. See 22.5.47 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to the Gauss hypergeometric\n"
+    "    function.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Chebyshev polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "T : scalar or ndarray\n"
+    "    Values of the Chebyshev polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_chebyt : roots and quadrature weights of Chebyshev\n"
+    "               polynomials of the first kind\n"
+    "chebyu : Chebychev polynomial object\n"
+    "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n"
+    "hyp2f1 : Gauss hypergeometric function\n"
+    "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "This routine is numerically stable for `x` in ``[-1, 1]`` at least\n"
+    "up to order ``10000``.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_chebyt_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_chebyt_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_chebyt_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_chebyt_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_chebyt_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_chebyt_types[0] = NPY_LONG
+ufunc_eval_chebyt_types[1] = NPY_DOUBLE
+ufunc_eval_chebyt_types[2] = NPY_DOUBLE
+ufunc_eval_chebyt_types[3] = NPY_FLOAT
+ufunc_eval_chebyt_types[4] = NPY_FLOAT
+ufunc_eval_chebyt_types[5] = NPY_FLOAT
+ufunc_eval_chebyt_types[6] = NPY_FLOAT
+ufunc_eval_chebyt_types[7] = NPY_CFLOAT
+ufunc_eval_chebyt_types[8] = NPY_CFLOAT
+ufunc_eval_chebyt_types[9] = NPY_DOUBLE
+ufunc_eval_chebyt_types[10] = NPY_DOUBLE
+ufunc_eval_chebyt_types[11] = NPY_DOUBLE
+ufunc_eval_chebyt_types[12] = NPY_DOUBLE
+ufunc_eval_chebyt_types[13] = NPY_CDOUBLE
+ufunc_eval_chebyt_types[14] = NPY_CDOUBLE
+ufunc_eval_chebyt_ptr[2*0] = _func_eval_chebyt_l
+ufunc_eval_chebyt_ptr[2*0+1] = ("eval_chebyt")
+ufunc_eval_chebyt_ptr[2*1] = _func_eval_chebyt[double]
+ufunc_eval_chebyt_ptr[2*1+1] = ("eval_chebyt")
+ufunc_eval_chebyt_ptr[2*2] = _func_eval_chebyt[double_complex]
+ufunc_eval_chebyt_ptr[2*2+1] = ("eval_chebyt")
+ufunc_eval_chebyt_ptr[2*3] = _func_eval_chebyt[double]
+ufunc_eval_chebyt_ptr[2*3+1] = ("eval_chebyt")
+ufunc_eval_chebyt_ptr[2*4] = _func_eval_chebyt[double_complex]
+ufunc_eval_chebyt_ptr[2*4+1] = ("eval_chebyt")
+ufunc_eval_chebyt_data[0] = &ufunc_eval_chebyt_ptr[2*0]
+ufunc_eval_chebyt_data[1] = &ufunc_eval_chebyt_ptr[2*1]
+ufunc_eval_chebyt_data[2] = &ufunc_eval_chebyt_ptr[2*2]
+ufunc_eval_chebyt_data[3] = &ufunc_eval_chebyt_ptr[2*3]
+ufunc_eval_chebyt_data[4] = &ufunc_eval_chebyt_ptr[2*4]
+eval_chebyt = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyt_loops, ufunc_eval_chebyt_data, ufunc_eval_chebyt_types, 5, 2, 1, 0, "eval_chebyt", ufunc_eval_chebyt_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_chebyu_loops[5]
+cdef void *ufunc_eval_chebyu_ptr[10]
+cdef void *ufunc_eval_chebyu_data[5]
+cdef char ufunc_eval_chebyu_types[15]
+cdef char *ufunc_eval_chebyu_doc = (
+    "eval_chebyu(n, x, out=None)\n"
+    "\n"
+    "Evaluate Chebyshev polynomial of the second kind at a point.\n"
+    "\n"
+    "The Chebyshev polynomials of the second kind can be defined via\n"
+    "the Gauss hypergeometric function :math:`{}_2F_1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).\n"
+    "\n"
+    "When :math:`n` is an integer the result is a polynomial of degree\n"
+    ":math:`n`. See 22.5.48 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to the Gauss hypergeometric\n"
+    "    function.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Chebyshev polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "U : scalar or ndarray\n"
+    "    Values of the Chebyshev polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_chebyu : roots and quadrature weights of Chebyshev\n"
+    "               polynomials of the second kind\n"
+    "chebyu : Chebyshev polynomial object\n"
+    "eval_chebyt : evaluate Chebyshev polynomials of the first kind\n"
+    "hyp2f1 : Gauss hypergeometric function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_chebyu_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_chebyu_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_chebyu_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_chebyu_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_chebyu_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_chebyu_types[0] = NPY_LONG
+ufunc_eval_chebyu_types[1] = NPY_DOUBLE
+ufunc_eval_chebyu_types[2] = NPY_DOUBLE
+ufunc_eval_chebyu_types[3] = NPY_FLOAT
+ufunc_eval_chebyu_types[4] = NPY_FLOAT
+ufunc_eval_chebyu_types[5] = NPY_FLOAT
+ufunc_eval_chebyu_types[6] = NPY_FLOAT
+ufunc_eval_chebyu_types[7] = NPY_CFLOAT
+ufunc_eval_chebyu_types[8] = NPY_CFLOAT
+ufunc_eval_chebyu_types[9] = NPY_DOUBLE
+ufunc_eval_chebyu_types[10] = NPY_DOUBLE
+ufunc_eval_chebyu_types[11] = NPY_DOUBLE
+ufunc_eval_chebyu_types[12] = NPY_DOUBLE
+ufunc_eval_chebyu_types[13] = NPY_CDOUBLE
+ufunc_eval_chebyu_types[14] = NPY_CDOUBLE
+ufunc_eval_chebyu_ptr[2*0] = _func_eval_chebyu_l
+ufunc_eval_chebyu_ptr[2*0+1] = ("eval_chebyu")
+ufunc_eval_chebyu_ptr[2*1] = _func_eval_chebyu[double]
+ufunc_eval_chebyu_ptr[2*1+1] = ("eval_chebyu")
+ufunc_eval_chebyu_ptr[2*2] = _func_eval_chebyu[double_complex]
+ufunc_eval_chebyu_ptr[2*2+1] = ("eval_chebyu")
+ufunc_eval_chebyu_ptr[2*3] = _func_eval_chebyu[double]
+ufunc_eval_chebyu_ptr[2*3+1] = ("eval_chebyu")
+ufunc_eval_chebyu_ptr[2*4] = _func_eval_chebyu[double_complex]
+ufunc_eval_chebyu_ptr[2*4+1] = ("eval_chebyu")
+ufunc_eval_chebyu_data[0] = &ufunc_eval_chebyu_ptr[2*0]
+ufunc_eval_chebyu_data[1] = &ufunc_eval_chebyu_ptr[2*1]
+ufunc_eval_chebyu_data[2] = &ufunc_eval_chebyu_ptr[2*2]
+ufunc_eval_chebyu_data[3] = &ufunc_eval_chebyu_ptr[2*3]
+ufunc_eval_chebyu_data[4] = &ufunc_eval_chebyu_ptr[2*4]
+eval_chebyu = np.PyUFunc_FromFuncAndData(ufunc_eval_chebyu_loops, ufunc_eval_chebyu_data, ufunc_eval_chebyu_types, 5, 2, 1, 0, "eval_chebyu", ufunc_eval_chebyu_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_gegenbauer_loops[5]
+cdef void *ufunc_eval_gegenbauer_ptr[10]
+cdef void *ufunc_eval_gegenbauer_data[5]
+cdef char ufunc_eval_gegenbauer_types[20]
+cdef char *ufunc_eval_gegenbauer_doc = (
+    "eval_gegenbauer(n, alpha, x, out=None)\n"
+    "\n"
+    "Evaluate Gegenbauer polynomial at a point.\n"
+    "\n"
+    "The Gegenbauer polynomials can be defined via the Gauss\n"
+    "hypergeometric function :math:`{}_2F_1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    C_n^{(\\alpha)} = \\frac{(2\\alpha)_n}{\\Gamma(n + 1)}\n"
+    "      {}_2F_1(-n, 2\\alpha + n; \\alpha + 1/2; (1 - z)/2).\n"
+    "\n"
+    "When :math:`n` is an integer the result is a polynomial of degree\n"
+    ":math:`n`. See 22.5.46 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to the Gauss hypergeometric\n"
+    "    function.\n"
+    "alpha : array_like\n"
+    "    Parameter\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Gegenbauer polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "C : scalar or ndarray\n"
+    "    Values of the Gegenbauer polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_gegenbauer : roots and quadrature weights of Gegenbauer\n"
+    "                   polynomials\n"
+    "gegenbauer : Gegenbauer polynomial object\n"
+    "hyp2f1 : Gauss hypergeometric function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_gegenbauer_loops[0] = loop_d_ldd__As_ldd_d
+ufunc_eval_gegenbauer_loops[1] = loop_d_ddd__As_fff_f
+ufunc_eval_gegenbauer_loops[2] = loop_D_ddD__As_ffF_F
+ufunc_eval_gegenbauer_loops[3] = loop_d_ddd__As_ddd_d
+ufunc_eval_gegenbauer_loops[4] = loop_D_ddD__As_ddD_D
+ufunc_eval_gegenbauer_types[0] = NPY_LONG
+ufunc_eval_gegenbauer_types[1] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[2] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[3] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[4] = NPY_FLOAT
+ufunc_eval_gegenbauer_types[5] = NPY_FLOAT
+ufunc_eval_gegenbauer_types[6] = NPY_FLOAT
+ufunc_eval_gegenbauer_types[7] = NPY_FLOAT
+ufunc_eval_gegenbauer_types[8] = NPY_FLOAT
+ufunc_eval_gegenbauer_types[9] = NPY_FLOAT
+ufunc_eval_gegenbauer_types[10] = NPY_CFLOAT
+ufunc_eval_gegenbauer_types[11] = NPY_CFLOAT
+ufunc_eval_gegenbauer_types[12] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[13] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[14] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[15] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[16] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[17] = NPY_DOUBLE
+ufunc_eval_gegenbauer_types[18] = NPY_CDOUBLE
+ufunc_eval_gegenbauer_types[19] = NPY_CDOUBLE
+ufunc_eval_gegenbauer_ptr[2*0] = _func_eval_gegenbauer_l
+ufunc_eval_gegenbauer_ptr[2*0+1] = ("eval_gegenbauer")
+ufunc_eval_gegenbauer_ptr[2*1] = _func_eval_gegenbauer[double]
+ufunc_eval_gegenbauer_ptr[2*1+1] = ("eval_gegenbauer")
+ufunc_eval_gegenbauer_ptr[2*2] = _func_eval_gegenbauer[double_complex]
+ufunc_eval_gegenbauer_ptr[2*2+1] = ("eval_gegenbauer")
+ufunc_eval_gegenbauer_ptr[2*3] = _func_eval_gegenbauer[double]
+ufunc_eval_gegenbauer_ptr[2*3+1] = ("eval_gegenbauer")
+ufunc_eval_gegenbauer_ptr[2*4] = _func_eval_gegenbauer[double_complex]
+ufunc_eval_gegenbauer_ptr[2*4+1] = ("eval_gegenbauer")
+ufunc_eval_gegenbauer_data[0] = &ufunc_eval_gegenbauer_ptr[2*0]
+ufunc_eval_gegenbauer_data[1] = &ufunc_eval_gegenbauer_ptr[2*1]
+ufunc_eval_gegenbauer_data[2] = &ufunc_eval_gegenbauer_ptr[2*2]
+ufunc_eval_gegenbauer_data[3] = &ufunc_eval_gegenbauer_ptr[2*3]
+ufunc_eval_gegenbauer_data[4] = &ufunc_eval_gegenbauer_ptr[2*4]
+eval_gegenbauer = np.PyUFunc_FromFuncAndData(ufunc_eval_gegenbauer_loops, ufunc_eval_gegenbauer_data, ufunc_eval_gegenbauer_types, 5, 3, 1, 0, "eval_gegenbauer", ufunc_eval_gegenbauer_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_genlaguerre_loops[5]
+cdef void *ufunc_eval_genlaguerre_ptr[10]
+cdef void *ufunc_eval_genlaguerre_data[5]
+cdef char ufunc_eval_genlaguerre_types[20]
+cdef char *ufunc_eval_genlaguerre_doc = (
+    "eval_genlaguerre(n, alpha, x, out=None)\n"
+    "\n"
+    "Evaluate generalized Laguerre polynomial at a point.\n"
+    "\n"
+    "The generalized Laguerre polynomials can be defined via the\n"
+    "confluent hypergeometric function :math:`{}_1F_1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    L_n^{(\\alpha)}(x) = \\binom{n + \\alpha}{n}\n"
+    "      {}_1F_1(-n, \\alpha + 1, x).\n"
+    "\n"
+    "When :math:`n` is an integer the result is a polynomial of degree\n"
+    ":math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre\n"
+    "polynomials are the special case where :math:`\\alpha = 0`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to the confluent hypergeometric\n"
+    "    function.\n"
+    "alpha : array_like\n"
+    "    Parameter; must have ``alpha > -1``\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the generalized Laguerre\n"
+    "    polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "L : scalar or ndarray\n"
+    "    Values of the generalized Laguerre polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_genlaguerre : roots and quadrature weights of generalized\n"
+    "                    Laguerre polynomials\n"
+    "genlaguerre : generalized Laguerre polynomial object\n"
+    "hyp1f1 : confluent hypergeometric function\n"
+    "eval_laguerre : evaluate Laguerre polynomials\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_genlaguerre_loops[0] = loop_d_ldd__As_ldd_d
+ufunc_eval_genlaguerre_loops[1] = loop_d_ddd__As_fff_f
+ufunc_eval_genlaguerre_loops[2] = loop_D_ddD__As_ffF_F
+ufunc_eval_genlaguerre_loops[3] = loop_d_ddd__As_ddd_d
+ufunc_eval_genlaguerre_loops[4] = loop_D_ddD__As_ddD_D
+ufunc_eval_genlaguerre_types[0] = NPY_LONG
+ufunc_eval_genlaguerre_types[1] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[2] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[3] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[4] = NPY_FLOAT
+ufunc_eval_genlaguerre_types[5] = NPY_FLOAT
+ufunc_eval_genlaguerre_types[6] = NPY_FLOAT
+ufunc_eval_genlaguerre_types[7] = NPY_FLOAT
+ufunc_eval_genlaguerre_types[8] = NPY_FLOAT
+ufunc_eval_genlaguerre_types[9] = NPY_FLOAT
+ufunc_eval_genlaguerre_types[10] = NPY_CFLOAT
+ufunc_eval_genlaguerre_types[11] = NPY_CFLOAT
+ufunc_eval_genlaguerre_types[12] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[13] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[14] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[15] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[16] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[17] = NPY_DOUBLE
+ufunc_eval_genlaguerre_types[18] = NPY_CDOUBLE
+ufunc_eval_genlaguerre_types[19] = NPY_CDOUBLE
+ufunc_eval_genlaguerre_ptr[2*0] = _func_eval_genlaguerre_l
+ufunc_eval_genlaguerre_ptr[2*0+1] = ("eval_genlaguerre")
+ufunc_eval_genlaguerre_ptr[2*1] = _func_eval_genlaguerre[double]
+ufunc_eval_genlaguerre_ptr[2*1+1] = ("eval_genlaguerre")
+ufunc_eval_genlaguerre_ptr[2*2] = _func_eval_genlaguerre[double_complex]
+ufunc_eval_genlaguerre_ptr[2*2+1] = ("eval_genlaguerre")
+ufunc_eval_genlaguerre_ptr[2*3] = _func_eval_genlaguerre[double]
+ufunc_eval_genlaguerre_ptr[2*3+1] = ("eval_genlaguerre")
+ufunc_eval_genlaguerre_ptr[2*4] = _func_eval_genlaguerre[double_complex]
+ufunc_eval_genlaguerre_ptr[2*4+1] = ("eval_genlaguerre")
+ufunc_eval_genlaguerre_data[0] = &ufunc_eval_genlaguerre_ptr[2*0]
+ufunc_eval_genlaguerre_data[1] = &ufunc_eval_genlaguerre_ptr[2*1]
+ufunc_eval_genlaguerre_data[2] = &ufunc_eval_genlaguerre_ptr[2*2]
+ufunc_eval_genlaguerre_data[3] = &ufunc_eval_genlaguerre_ptr[2*3]
+ufunc_eval_genlaguerre_data[4] = &ufunc_eval_genlaguerre_ptr[2*4]
+eval_genlaguerre = np.PyUFunc_FromFuncAndData(ufunc_eval_genlaguerre_loops, ufunc_eval_genlaguerre_data, ufunc_eval_genlaguerre_types, 5, 3, 1, 0, "eval_genlaguerre", ufunc_eval_genlaguerre_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_hermite_loops[1]
+cdef void *ufunc_eval_hermite_ptr[2]
+cdef void *ufunc_eval_hermite_data[1]
+cdef char ufunc_eval_hermite_types[3]
+cdef char *ufunc_eval_hermite_doc = (
+    "eval_hermite(n, x, out=None)\n"
+    "\n"
+    "Evaluate physicist's Hermite polynomial at a point.\n"
+    "\n"
+    "Defined by\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    H_n(x) = (-1)^n e^{x^2} \\frac{d^n}{dx^n} e^{-x^2};\n"
+    "\n"
+    ":math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in\n"
+    "[AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Hermite polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "H : scalar or ndarray\n"
+    "    Values of the Hermite polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_hermite : roots and quadrature weights of physicist's\n"
+    "                Hermite polynomials\n"
+    "hermite : physicist's Hermite polynomial object\n"
+    "numpy.polynomial.hermite.Hermite : Physicist's Hermite series\n"
+    "eval_hermitenorm : evaluate Probabilist's Hermite polynomials\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_hermite_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_hermite_types[0] = NPY_LONG
+ufunc_eval_hermite_types[1] = NPY_DOUBLE
+ufunc_eval_hermite_types[2] = NPY_DOUBLE
+ufunc_eval_hermite_ptr[2*0] = _func_eval_hermite
+ufunc_eval_hermite_ptr[2*0+1] = ("eval_hermite")
+ufunc_eval_hermite_data[0] = &ufunc_eval_hermite_ptr[2*0]
+eval_hermite = np.PyUFunc_FromFuncAndData(ufunc_eval_hermite_loops, ufunc_eval_hermite_data, ufunc_eval_hermite_types, 1, 2, 1, 0, "eval_hermite", ufunc_eval_hermite_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_hermitenorm_loops[1]
+cdef void *ufunc_eval_hermitenorm_ptr[2]
+cdef void *ufunc_eval_hermitenorm_data[1]
+cdef char ufunc_eval_hermitenorm_types[3]
+cdef char *ufunc_eval_hermitenorm_doc = (
+    "eval_hermitenorm(n, x, out=None)\n"
+    "\n"
+    "Evaluate probabilist's (normalized) Hermite polynomial at a\n"
+    "point.\n"
+    "\n"
+    "Defined by\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    He_n(x) = (-1)^n e^{x^2/2} \\frac{d^n}{dx^n} e^{-x^2/2};\n"
+    "\n"
+    ":math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in\n"
+    "[AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Hermite polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "He : scalar or ndarray\n"
+    "    Values of the Hermite polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_hermitenorm : roots and quadrature weights of probabilist's\n"
+    "                    Hermite polynomials\n"
+    "hermitenorm : probabilist's Hermite polynomial object\n"
+    "numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series\n"
+    "eval_hermite : evaluate physicist's Hermite polynomials\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_hermitenorm_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_hermitenorm_types[0] = NPY_LONG
+ufunc_eval_hermitenorm_types[1] = NPY_DOUBLE
+ufunc_eval_hermitenorm_types[2] = NPY_DOUBLE
+ufunc_eval_hermitenorm_ptr[2*0] = _func_eval_hermitenorm
+ufunc_eval_hermitenorm_ptr[2*0+1] = ("eval_hermitenorm")
+ufunc_eval_hermitenorm_data[0] = &ufunc_eval_hermitenorm_ptr[2*0]
+eval_hermitenorm = np.PyUFunc_FromFuncAndData(ufunc_eval_hermitenorm_loops, ufunc_eval_hermitenorm_data, ufunc_eval_hermitenorm_types, 1, 2, 1, 0, "eval_hermitenorm", ufunc_eval_hermitenorm_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_jacobi_loops[5]
+cdef void *ufunc_eval_jacobi_ptr[10]
+cdef void *ufunc_eval_jacobi_data[5]
+cdef char ufunc_eval_jacobi_types[25]
+cdef char *ufunc_eval_jacobi_doc = (
+    "eval_jacobi(n, alpha, beta, x, out=None)\n"
+    "\n"
+    "Evaluate Jacobi polynomial at a point.\n"
+    "\n"
+    "The Jacobi polynomials can be defined via the Gauss hypergeometric\n"
+    "function :math:`{}_2F_1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    P_n^{(\\alpha, \\beta)}(x) = \\frac{(\\alpha + 1)_n}{\\Gamma(n + 1)}\n"
+    "      {}_2F_1(-n, 1 + \\alpha + \\beta + n; \\alpha + 1; (1 - z)/2)\n"
+    "\n"
+    "where :math:`(\\cdot)_n` is the Pochhammer symbol; see `poch`. When\n"
+    ":math:`n` is an integer the result is a polynomial of degree\n"
+    ":math:`n`. See 22.5.42 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer the result is\n"
+    "    determined via the relation to the Gauss hypergeometric\n"
+    "    function.\n"
+    "alpha : array_like\n"
+    "    Parameter\n"
+    "beta : array_like\n"
+    "    Parameter\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "P : scalar or ndarray\n"
+    "    Values of the Jacobi polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_jacobi : roots and quadrature weights of Jacobi polynomials\n"
+    "jacobi : Jacobi polynomial object\n"
+    "hyp2f1 : Gauss hypergeometric function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_jacobi_loops[0] = loop_d_lddd__As_lddd_d
+ufunc_eval_jacobi_loops[1] = loop_d_dddd__As_ffff_f
+ufunc_eval_jacobi_loops[2] = loop_D_dddD__As_fffF_F
+ufunc_eval_jacobi_loops[3] = loop_d_dddd__As_dddd_d
+ufunc_eval_jacobi_loops[4] = loop_D_dddD__As_dddD_D
+ufunc_eval_jacobi_types[0] = NPY_LONG
+ufunc_eval_jacobi_types[1] = NPY_DOUBLE
+ufunc_eval_jacobi_types[2] = NPY_DOUBLE
+ufunc_eval_jacobi_types[3] = NPY_DOUBLE
+ufunc_eval_jacobi_types[4] = NPY_DOUBLE
+ufunc_eval_jacobi_types[5] = NPY_FLOAT
+ufunc_eval_jacobi_types[6] = NPY_FLOAT
+ufunc_eval_jacobi_types[7] = NPY_FLOAT
+ufunc_eval_jacobi_types[8] = NPY_FLOAT
+ufunc_eval_jacobi_types[9] = NPY_FLOAT
+ufunc_eval_jacobi_types[10] = NPY_FLOAT
+ufunc_eval_jacobi_types[11] = NPY_FLOAT
+ufunc_eval_jacobi_types[12] = NPY_FLOAT
+ufunc_eval_jacobi_types[13] = NPY_CFLOAT
+ufunc_eval_jacobi_types[14] = NPY_CFLOAT
+ufunc_eval_jacobi_types[15] = NPY_DOUBLE
+ufunc_eval_jacobi_types[16] = NPY_DOUBLE
+ufunc_eval_jacobi_types[17] = NPY_DOUBLE
+ufunc_eval_jacobi_types[18] = NPY_DOUBLE
+ufunc_eval_jacobi_types[19] = NPY_DOUBLE
+ufunc_eval_jacobi_types[20] = NPY_DOUBLE
+ufunc_eval_jacobi_types[21] = NPY_DOUBLE
+ufunc_eval_jacobi_types[22] = NPY_DOUBLE
+ufunc_eval_jacobi_types[23] = NPY_CDOUBLE
+ufunc_eval_jacobi_types[24] = NPY_CDOUBLE
+ufunc_eval_jacobi_ptr[2*0] = _func_eval_jacobi_l
+ufunc_eval_jacobi_ptr[2*0+1] = ("eval_jacobi")
+ufunc_eval_jacobi_ptr[2*1] = _func_eval_jacobi[double]
+ufunc_eval_jacobi_ptr[2*1+1] = ("eval_jacobi")
+ufunc_eval_jacobi_ptr[2*2] = _func_eval_jacobi[double_complex]
+ufunc_eval_jacobi_ptr[2*2+1] = ("eval_jacobi")
+ufunc_eval_jacobi_ptr[2*3] = _func_eval_jacobi[double]
+ufunc_eval_jacobi_ptr[2*3+1] = ("eval_jacobi")
+ufunc_eval_jacobi_ptr[2*4] = _func_eval_jacobi[double_complex]
+ufunc_eval_jacobi_ptr[2*4+1] = ("eval_jacobi")
+ufunc_eval_jacobi_data[0] = &ufunc_eval_jacobi_ptr[2*0]
+ufunc_eval_jacobi_data[1] = &ufunc_eval_jacobi_ptr[2*1]
+ufunc_eval_jacobi_data[2] = &ufunc_eval_jacobi_ptr[2*2]
+ufunc_eval_jacobi_data[3] = &ufunc_eval_jacobi_ptr[2*3]
+ufunc_eval_jacobi_data[4] = &ufunc_eval_jacobi_ptr[2*4]
+eval_jacobi = np.PyUFunc_FromFuncAndData(ufunc_eval_jacobi_loops, ufunc_eval_jacobi_data, ufunc_eval_jacobi_types, 5, 4, 1, 0, "eval_jacobi", ufunc_eval_jacobi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_laguerre_loops[5]
+cdef void *ufunc_eval_laguerre_ptr[10]
+cdef void *ufunc_eval_laguerre_data[5]
+cdef char ufunc_eval_laguerre_types[15]
+cdef char *ufunc_eval_laguerre_doc = (
+    "eval_laguerre(n, x, out=None)\n"
+    "\n"
+    "Evaluate Laguerre polynomial at a point.\n"
+    "\n"
+    "The Laguerre polynomials can be defined via the confluent\n"
+    "hypergeometric function :math:`{}_1F_1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    L_n(x) = {}_1F_1(-n, 1, x).\n"
+    "\n"
+    "See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an\n"
+    "integer the result is a polynomial of degree :math:`n`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer the result is\n"
+    "    determined via the relation to the confluent hypergeometric\n"
+    "    function.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Laguerre polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "L : scalar or ndarray\n"
+    "    Values of the Laguerre polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_laguerre : roots and quadrature weights of Laguerre\n"
+    "                 polynomials\n"
+    "laguerre : Laguerre polynomial object\n"
+    "numpy.polynomial.laguerre.Laguerre : Laguerre series\n"
+    "eval_genlaguerre : evaluate generalized Laguerre polynomials\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_laguerre_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_laguerre_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_laguerre_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_laguerre_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_laguerre_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_laguerre_types[0] = NPY_LONG
+ufunc_eval_laguerre_types[1] = NPY_DOUBLE
+ufunc_eval_laguerre_types[2] = NPY_DOUBLE
+ufunc_eval_laguerre_types[3] = NPY_FLOAT
+ufunc_eval_laguerre_types[4] = NPY_FLOAT
+ufunc_eval_laguerre_types[5] = NPY_FLOAT
+ufunc_eval_laguerre_types[6] = NPY_FLOAT
+ufunc_eval_laguerre_types[7] = NPY_CFLOAT
+ufunc_eval_laguerre_types[8] = NPY_CFLOAT
+ufunc_eval_laguerre_types[9] = NPY_DOUBLE
+ufunc_eval_laguerre_types[10] = NPY_DOUBLE
+ufunc_eval_laguerre_types[11] = NPY_DOUBLE
+ufunc_eval_laguerre_types[12] = NPY_DOUBLE
+ufunc_eval_laguerre_types[13] = NPY_CDOUBLE
+ufunc_eval_laguerre_types[14] = NPY_CDOUBLE
+ufunc_eval_laguerre_ptr[2*0] = _func_eval_laguerre_l
+ufunc_eval_laguerre_ptr[2*0+1] = ("eval_laguerre")
+ufunc_eval_laguerre_ptr[2*1] = _func_eval_laguerre[double]
+ufunc_eval_laguerre_ptr[2*1+1] = ("eval_laguerre")
+ufunc_eval_laguerre_ptr[2*2] = _func_eval_laguerre[double_complex]
+ufunc_eval_laguerre_ptr[2*2+1] = ("eval_laguerre")
+ufunc_eval_laguerre_ptr[2*3] = _func_eval_laguerre[double]
+ufunc_eval_laguerre_ptr[2*3+1] = ("eval_laguerre")
+ufunc_eval_laguerre_ptr[2*4] = _func_eval_laguerre[double_complex]
+ufunc_eval_laguerre_ptr[2*4+1] = ("eval_laguerre")
+ufunc_eval_laguerre_data[0] = &ufunc_eval_laguerre_ptr[2*0]
+ufunc_eval_laguerre_data[1] = &ufunc_eval_laguerre_ptr[2*1]
+ufunc_eval_laguerre_data[2] = &ufunc_eval_laguerre_ptr[2*2]
+ufunc_eval_laguerre_data[3] = &ufunc_eval_laguerre_ptr[2*3]
+ufunc_eval_laguerre_data[4] = &ufunc_eval_laguerre_ptr[2*4]
+eval_laguerre = np.PyUFunc_FromFuncAndData(ufunc_eval_laguerre_loops, ufunc_eval_laguerre_data, ufunc_eval_laguerre_types, 5, 2, 1, 0, "eval_laguerre", ufunc_eval_laguerre_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_legendre_loops[5]
+cdef void *ufunc_eval_legendre_ptr[10]
+cdef void *ufunc_eval_legendre_data[5]
+cdef char ufunc_eval_legendre_types[15]
+cdef char *ufunc_eval_legendre_doc = (
+    "eval_legendre(n, x, out=None)\n"
+    "\n"
+    "Evaluate Legendre polynomial at a point.\n"
+    "\n"
+    "The Legendre polynomials can be defined via the Gauss\n"
+    "hypergeometric function :math:`{}_2F_1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).\n"
+    "\n"
+    "When :math:`n` is an integer the result is a polynomial of degree\n"
+    ":math:`n`. See 22.5.49 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to the Gauss hypergeometric\n"
+    "    function.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the Legendre polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "P : scalar or ndarray\n"
+    "    Values of the Legendre polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_legendre : roots and quadrature weights of Legendre\n"
+    "                 polynomials\n"
+    "legendre : Legendre polynomial object\n"
+    "hyp2f1 : Gauss hypergeometric function\n"
+    "numpy.polynomial.legendre.Legendre : Legendre series\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import eval_legendre\n"
+    "\n"
+    "Evaluate the zero-order Legendre polynomial at x = 0\n"
+    "\n"
+    ">>> eval_legendre(0, 0)\n"
+    "1.0\n"
+    "\n"
+    "Evaluate the first-order Legendre polynomial between -1 and 1\n"
+    "\n"
+    ">>> X = np.linspace(-1, 1, 5)  # Domain of Legendre polynomials\n"
+    ">>> eval_legendre(1, X)\n"
+    "array([-1. , -0.5,  0. ,  0.5,  1. ])\n"
+    "\n"
+    "Evaluate Legendre polynomials of order 0 through 4 at x = 0\n"
+    "\n"
+    ">>> N = range(0, 5)\n"
+    ">>> eval_legendre(N, 0)\n"
+    "array([ 1.   ,  0.   , -0.5  ,  0.   ,  0.375])\n"
+    "\n"
+    "Plot Legendre polynomials of order 0 through 4\n"
+    "\n"
+    ">>> X = np.linspace(-1, 1)\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> for n in range(0, 5):\n"
+    "...     y = eval_legendre(n, X)\n"
+    "...     plt.plot(X, y, label=r'$P_{}(x)$'.format(n))\n"
+    "\n"
+    ">>> plt.title(\"Legendre Polynomials\")\n"
+    ">>> plt.xlabel(\"x\")\n"
+    ">>> plt.ylabel(r'$P_n(x)$')\n"
+    ">>> plt.legend(loc='lower right')\n"
+    ">>> plt.show()")
+ufunc_eval_legendre_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_legendre_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_legendre_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_legendre_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_legendre_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_legendre_types[0] = NPY_LONG
+ufunc_eval_legendre_types[1] = NPY_DOUBLE
+ufunc_eval_legendre_types[2] = NPY_DOUBLE
+ufunc_eval_legendre_types[3] = NPY_FLOAT
+ufunc_eval_legendre_types[4] = NPY_FLOAT
+ufunc_eval_legendre_types[5] = NPY_FLOAT
+ufunc_eval_legendre_types[6] = NPY_FLOAT
+ufunc_eval_legendre_types[7] = NPY_CFLOAT
+ufunc_eval_legendre_types[8] = NPY_CFLOAT
+ufunc_eval_legendre_types[9] = NPY_DOUBLE
+ufunc_eval_legendre_types[10] = NPY_DOUBLE
+ufunc_eval_legendre_types[11] = NPY_DOUBLE
+ufunc_eval_legendre_types[12] = NPY_DOUBLE
+ufunc_eval_legendre_types[13] = NPY_CDOUBLE
+ufunc_eval_legendre_types[14] = NPY_CDOUBLE
+ufunc_eval_legendre_ptr[2*0] = _func_eval_legendre_l
+ufunc_eval_legendre_ptr[2*0+1] = ("eval_legendre")
+ufunc_eval_legendre_ptr[2*1] = _func_eval_legendre[double]
+ufunc_eval_legendre_ptr[2*1+1] = ("eval_legendre")
+ufunc_eval_legendre_ptr[2*2] = _func_eval_legendre[double_complex]
+ufunc_eval_legendre_ptr[2*2+1] = ("eval_legendre")
+ufunc_eval_legendre_ptr[2*3] = _func_eval_legendre[double]
+ufunc_eval_legendre_ptr[2*3+1] = ("eval_legendre")
+ufunc_eval_legendre_ptr[2*4] = _func_eval_legendre[double_complex]
+ufunc_eval_legendre_ptr[2*4+1] = ("eval_legendre")
+ufunc_eval_legendre_data[0] = &ufunc_eval_legendre_ptr[2*0]
+ufunc_eval_legendre_data[1] = &ufunc_eval_legendre_ptr[2*1]
+ufunc_eval_legendre_data[2] = &ufunc_eval_legendre_ptr[2*2]
+ufunc_eval_legendre_data[3] = &ufunc_eval_legendre_ptr[2*3]
+ufunc_eval_legendre_data[4] = &ufunc_eval_legendre_ptr[2*4]
+eval_legendre = np.PyUFunc_FromFuncAndData(ufunc_eval_legendre_loops, ufunc_eval_legendre_data, ufunc_eval_legendre_types, 5, 2, 1, 0, "eval_legendre", ufunc_eval_legendre_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_sh_chebyt_loops[5]
+cdef void *ufunc_eval_sh_chebyt_ptr[10]
+cdef void *ufunc_eval_sh_chebyt_data[5]
+cdef char ufunc_eval_sh_chebyt_types[15]
+cdef char *ufunc_eval_sh_chebyt_doc = (
+    "eval_sh_chebyt(n, x, out=None)\n"
+    "\n"
+    "Evaluate shifted Chebyshev polynomial of the first kind at a\n"
+    "point.\n"
+    "\n"
+    "These polynomials are defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    T_n^*(x) = T_n(2x - 1)\n"
+    "\n"
+    "where :math:`T_n` is a Chebyshev polynomial of the first kind. See\n"
+    "22.5.14 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to `eval_chebyt`.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the shifted Chebyshev polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "T : scalar or ndarray\n"
+    "    Values of the shifted Chebyshev polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_sh_chebyt : roots and quadrature weights of shifted\n"
+    "                  Chebyshev polynomials of the first kind\n"
+    "sh_chebyt : shifted Chebyshev polynomial object\n"
+    "eval_chebyt : evaluate Chebyshev polynomials of the first kind\n"
+    "numpy.polynomial.chebyshev.Chebyshev : Chebyshev series\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_sh_chebyt_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_sh_chebyt_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_sh_chebyt_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_sh_chebyt_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_sh_chebyt_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_sh_chebyt_types[0] = NPY_LONG
+ufunc_eval_sh_chebyt_types[1] = NPY_DOUBLE
+ufunc_eval_sh_chebyt_types[2] = NPY_DOUBLE
+ufunc_eval_sh_chebyt_types[3] = NPY_FLOAT
+ufunc_eval_sh_chebyt_types[4] = NPY_FLOAT
+ufunc_eval_sh_chebyt_types[5] = NPY_FLOAT
+ufunc_eval_sh_chebyt_types[6] = NPY_FLOAT
+ufunc_eval_sh_chebyt_types[7] = NPY_CFLOAT
+ufunc_eval_sh_chebyt_types[8] = NPY_CFLOAT
+ufunc_eval_sh_chebyt_types[9] = NPY_DOUBLE
+ufunc_eval_sh_chebyt_types[10] = NPY_DOUBLE
+ufunc_eval_sh_chebyt_types[11] = NPY_DOUBLE
+ufunc_eval_sh_chebyt_types[12] = NPY_DOUBLE
+ufunc_eval_sh_chebyt_types[13] = NPY_CDOUBLE
+ufunc_eval_sh_chebyt_types[14] = NPY_CDOUBLE
+ufunc_eval_sh_chebyt_ptr[2*0] = _func_eval_sh_chebyt_l
+ufunc_eval_sh_chebyt_ptr[2*0+1] = ("eval_sh_chebyt")
+ufunc_eval_sh_chebyt_ptr[2*1] = _func_eval_sh_chebyt[double]
+ufunc_eval_sh_chebyt_ptr[2*1+1] = ("eval_sh_chebyt")
+ufunc_eval_sh_chebyt_ptr[2*2] = _func_eval_sh_chebyt[double_complex]
+ufunc_eval_sh_chebyt_ptr[2*2+1] = ("eval_sh_chebyt")
+ufunc_eval_sh_chebyt_ptr[2*3] = _func_eval_sh_chebyt[double]
+ufunc_eval_sh_chebyt_ptr[2*3+1] = ("eval_sh_chebyt")
+ufunc_eval_sh_chebyt_ptr[2*4] = _func_eval_sh_chebyt[double_complex]
+ufunc_eval_sh_chebyt_ptr[2*4+1] = ("eval_sh_chebyt")
+ufunc_eval_sh_chebyt_data[0] = &ufunc_eval_sh_chebyt_ptr[2*0]
+ufunc_eval_sh_chebyt_data[1] = &ufunc_eval_sh_chebyt_ptr[2*1]
+ufunc_eval_sh_chebyt_data[2] = &ufunc_eval_sh_chebyt_ptr[2*2]
+ufunc_eval_sh_chebyt_data[3] = &ufunc_eval_sh_chebyt_ptr[2*3]
+ufunc_eval_sh_chebyt_data[4] = &ufunc_eval_sh_chebyt_ptr[2*4]
+eval_sh_chebyt = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_chebyt_loops, ufunc_eval_sh_chebyt_data, ufunc_eval_sh_chebyt_types, 5, 2, 1, 0, "eval_sh_chebyt", ufunc_eval_sh_chebyt_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_sh_chebyu_loops[5]
+cdef void *ufunc_eval_sh_chebyu_ptr[10]
+cdef void *ufunc_eval_sh_chebyu_data[5]
+cdef char ufunc_eval_sh_chebyu_types[15]
+cdef char *ufunc_eval_sh_chebyu_doc = (
+    "eval_sh_chebyu(n, x, out=None)\n"
+    "\n"
+    "Evaluate shifted Chebyshev polynomial of the second kind at a\n"
+    "point.\n"
+    "\n"
+    "These polynomials are defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    U_n^*(x) = U_n(2x - 1)\n"
+    "\n"
+    "where :math:`U_n` is a Chebyshev polynomial of the first kind. See\n"
+    "22.5.15 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to `eval_chebyu`.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the shifted Chebyshev polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "U : scalar or ndarray\n"
+    "    Values of the shifted Chebyshev polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_sh_chebyu : roots and quadrature weights of shifted\n"
+    "                  Chebychev polynomials of the second kind\n"
+    "sh_chebyu : shifted Chebyshev polynomial object\n"
+    "eval_chebyu : evaluate Chebyshev polynomials of the second kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_sh_chebyu_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_sh_chebyu_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_sh_chebyu_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_sh_chebyu_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_sh_chebyu_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_sh_chebyu_types[0] = NPY_LONG
+ufunc_eval_sh_chebyu_types[1] = NPY_DOUBLE
+ufunc_eval_sh_chebyu_types[2] = NPY_DOUBLE
+ufunc_eval_sh_chebyu_types[3] = NPY_FLOAT
+ufunc_eval_sh_chebyu_types[4] = NPY_FLOAT
+ufunc_eval_sh_chebyu_types[5] = NPY_FLOAT
+ufunc_eval_sh_chebyu_types[6] = NPY_FLOAT
+ufunc_eval_sh_chebyu_types[7] = NPY_CFLOAT
+ufunc_eval_sh_chebyu_types[8] = NPY_CFLOAT
+ufunc_eval_sh_chebyu_types[9] = NPY_DOUBLE
+ufunc_eval_sh_chebyu_types[10] = NPY_DOUBLE
+ufunc_eval_sh_chebyu_types[11] = NPY_DOUBLE
+ufunc_eval_sh_chebyu_types[12] = NPY_DOUBLE
+ufunc_eval_sh_chebyu_types[13] = NPY_CDOUBLE
+ufunc_eval_sh_chebyu_types[14] = NPY_CDOUBLE
+ufunc_eval_sh_chebyu_ptr[2*0] = _func_eval_sh_chebyu_l
+ufunc_eval_sh_chebyu_ptr[2*0+1] = ("eval_sh_chebyu")
+ufunc_eval_sh_chebyu_ptr[2*1] = _func_eval_sh_chebyu[double]
+ufunc_eval_sh_chebyu_ptr[2*1+1] = ("eval_sh_chebyu")
+ufunc_eval_sh_chebyu_ptr[2*2] = _func_eval_sh_chebyu[double_complex]
+ufunc_eval_sh_chebyu_ptr[2*2+1] = ("eval_sh_chebyu")
+ufunc_eval_sh_chebyu_ptr[2*3] = _func_eval_sh_chebyu[double]
+ufunc_eval_sh_chebyu_ptr[2*3+1] = ("eval_sh_chebyu")
+ufunc_eval_sh_chebyu_ptr[2*4] = _func_eval_sh_chebyu[double_complex]
+ufunc_eval_sh_chebyu_ptr[2*4+1] = ("eval_sh_chebyu")
+ufunc_eval_sh_chebyu_data[0] = &ufunc_eval_sh_chebyu_ptr[2*0]
+ufunc_eval_sh_chebyu_data[1] = &ufunc_eval_sh_chebyu_ptr[2*1]
+ufunc_eval_sh_chebyu_data[2] = &ufunc_eval_sh_chebyu_ptr[2*2]
+ufunc_eval_sh_chebyu_data[3] = &ufunc_eval_sh_chebyu_ptr[2*3]
+ufunc_eval_sh_chebyu_data[4] = &ufunc_eval_sh_chebyu_ptr[2*4]
+eval_sh_chebyu = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_chebyu_loops, ufunc_eval_sh_chebyu_data, ufunc_eval_sh_chebyu_types, 5, 2, 1, 0, "eval_sh_chebyu", ufunc_eval_sh_chebyu_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_sh_jacobi_loops[5]
+cdef void *ufunc_eval_sh_jacobi_ptr[10]
+cdef void *ufunc_eval_sh_jacobi_data[5]
+cdef char ufunc_eval_sh_jacobi_types[25]
+cdef char *ufunc_eval_sh_jacobi_doc = (
+    "eval_sh_jacobi(n, p, q, x, out=None)\n"
+    "\n"
+    "Evaluate shifted Jacobi polynomial at a point.\n"
+    "\n"
+    "Defined by\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    G_n^{(p, q)}(x)\n"
+    "      = \\binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),\n"
+    "\n"
+    "where :math:`P_n^{(\\cdot, \\cdot)}` is the n-th Jacobi\n"
+    "polynomial. See 22.5.2 in [AS]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : int\n"
+    "    Degree of the polynomial. If not an integer, the result is\n"
+    "    determined via the relation to `binom` and `eval_jacobi`.\n"
+    "p : float\n"
+    "    Parameter\n"
+    "q : float\n"
+    "    Parameter\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "G : scalar or ndarray\n"
+    "    Values of the shifted Jacobi polynomial.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_sh_jacobi : roots and quadrature weights of shifted Jacobi\n"
+    "                  polynomials\n"
+    "sh_jacobi : shifted Jacobi polynomial object\n"
+    "eval_jacobi : evaluate Jacobi polynomials\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_sh_jacobi_loops[0] = loop_d_lddd__As_lddd_d
+ufunc_eval_sh_jacobi_loops[1] = loop_d_dddd__As_ffff_f
+ufunc_eval_sh_jacobi_loops[2] = loop_D_dddD__As_fffF_F
+ufunc_eval_sh_jacobi_loops[3] = loop_d_dddd__As_dddd_d
+ufunc_eval_sh_jacobi_loops[4] = loop_D_dddD__As_dddD_D
+ufunc_eval_sh_jacobi_types[0] = NPY_LONG
+ufunc_eval_sh_jacobi_types[1] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[2] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[3] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[4] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[5] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[6] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[7] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[8] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[9] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[10] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[11] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[12] = NPY_FLOAT
+ufunc_eval_sh_jacobi_types[13] = NPY_CFLOAT
+ufunc_eval_sh_jacobi_types[14] = NPY_CFLOAT
+ufunc_eval_sh_jacobi_types[15] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[16] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[17] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[18] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[19] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[20] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[21] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[22] = NPY_DOUBLE
+ufunc_eval_sh_jacobi_types[23] = NPY_CDOUBLE
+ufunc_eval_sh_jacobi_types[24] = NPY_CDOUBLE
+ufunc_eval_sh_jacobi_ptr[2*0] = _func_eval_sh_jacobi_l
+ufunc_eval_sh_jacobi_ptr[2*0+1] = ("eval_sh_jacobi")
+ufunc_eval_sh_jacobi_ptr[2*1] = _func_eval_sh_jacobi[double]
+ufunc_eval_sh_jacobi_ptr[2*1+1] = ("eval_sh_jacobi")
+ufunc_eval_sh_jacobi_ptr[2*2] = _func_eval_sh_jacobi[double_complex]
+ufunc_eval_sh_jacobi_ptr[2*2+1] = ("eval_sh_jacobi")
+ufunc_eval_sh_jacobi_ptr[2*3] = _func_eval_sh_jacobi[double]
+ufunc_eval_sh_jacobi_ptr[2*3+1] = ("eval_sh_jacobi")
+ufunc_eval_sh_jacobi_ptr[2*4] = _func_eval_sh_jacobi[double_complex]
+ufunc_eval_sh_jacobi_ptr[2*4+1] = ("eval_sh_jacobi")
+ufunc_eval_sh_jacobi_data[0] = &ufunc_eval_sh_jacobi_ptr[2*0]
+ufunc_eval_sh_jacobi_data[1] = &ufunc_eval_sh_jacobi_ptr[2*1]
+ufunc_eval_sh_jacobi_data[2] = &ufunc_eval_sh_jacobi_ptr[2*2]
+ufunc_eval_sh_jacobi_data[3] = &ufunc_eval_sh_jacobi_ptr[2*3]
+ufunc_eval_sh_jacobi_data[4] = &ufunc_eval_sh_jacobi_ptr[2*4]
+eval_sh_jacobi = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_jacobi_loops, ufunc_eval_sh_jacobi_data, ufunc_eval_sh_jacobi_types, 5, 4, 1, 0, "eval_sh_jacobi", ufunc_eval_sh_jacobi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_eval_sh_legendre_loops[5]
+cdef void *ufunc_eval_sh_legendre_ptr[10]
+cdef void *ufunc_eval_sh_legendre_data[5]
+cdef char ufunc_eval_sh_legendre_types[15]
+cdef char *ufunc_eval_sh_legendre_doc = (
+    "eval_sh_legendre(n, x, out=None)\n"
+    "\n"
+    "Evaluate shifted Legendre polynomial at a point.\n"
+    "\n"
+    "These polynomials are defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    P_n^*(x) = P_n(2x - 1)\n"
+    "\n"
+    "where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_\n"
+    "for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Degree of the polynomial. If not an integer, the value is\n"
+    "    determined via the relation to `eval_legendre`.\n"
+    "x : array_like\n"
+    "    Points at which to evaluate the shifted Legendre polynomial\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "P : scalar or ndarray\n"
+    "    Values of the shifted Legendre polynomial\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "roots_sh_legendre : roots and quadrature weights of shifted\n"
+    "                    Legendre polynomials\n"
+    "sh_legendre : shifted Legendre polynomial object\n"
+    "eval_legendre : evaluate Legendre polynomials\n"
+    "numpy.polynomial.legendre.Legendre : Legendre series\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [AS] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "    Handbook of Mathematical Functions with Formulas,\n"
+    "    Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_eval_sh_legendre_loops[0] = loop_d_ld__As_ld_d
+ufunc_eval_sh_legendre_loops[1] = loop_d_dd__As_ff_f
+ufunc_eval_sh_legendre_loops[2] = loop_D_dD__As_fF_F
+ufunc_eval_sh_legendre_loops[3] = loop_d_dd__As_dd_d
+ufunc_eval_sh_legendre_loops[4] = loop_D_dD__As_dD_D
+ufunc_eval_sh_legendre_types[0] = NPY_LONG
+ufunc_eval_sh_legendre_types[1] = NPY_DOUBLE
+ufunc_eval_sh_legendre_types[2] = NPY_DOUBLE
+ufunc_eval_sh_legendre_types[3] = NPY_FLOAT
+ufunc_eval_sh_legendre_types[4] = NPY_FLOAT
+ufunc_eval_sh_legendre_types[5] = NPY_FLOAT
+ufunc_eval_sh_legendre_types[6] = NPY_FLOAT
+ufunc_eval_sh_legendre_types[7] = NPY_CFLOAT
+ufunc_eval_sh_legendre_types[8] = NPY_CFLOAT
+ufunc_eval_sh_legendre_types[9] = NPY_DOUBLE
+ufunc_eval_sh_legendre_types[10] = NPY_DOUBLE
+ufunc_eval_sh_legendre_types[11] = NPY_DOUBLE
+ufunc_eval_sh_legendre_types[12] = NPY_DOUBLE
+ufunc_eval_sh_legendre_types[13] = NPY_CDOUBLE
+ufunc_eval_sh_legendre_types[14] = NPY_CDOUBLE
+ufunc_eval_sh_legendre_ptr[2*0] = _func_eval_sh_legendre_l
+ufunc_eval_sh_legendre_ptr[2*0+1] = ("eval_sh_legendre")
+ufunc_eval_sh_legendre_ptr[2*1] = _func_eval_sh_legendre[double]
+ufunc_eval_sh_legendre_ptr[2*1+1] = ("eval_sh_legendre")
+ufunc_eval_sh_legendre_ptr[2*2] = _func_eval_sh_legendre[double_complex]
+ufunc_eval_sh_legendre_ptr[2*2+1] = ("eval_sh_legendre")
+ufunc_eval_sh_legendre_ptr[2*3] = _func_eval_sh_legendre[double]
+ufunc_eval_sh_legendre_ptr[2*3+1] = ("eval_sh_legendre")
+ufunc_eval_sh_legendre_ptr[2*4] = _func_eval_sh_legendre[double_complex]
+ufunc_eval_sh_legendre_ptr[2*4+1] = ("eval_sh_legendre")
+ufunc_eval_sh_legendre_data[0] = &ufunc_eval_sh_legendre_ptr[2*0]
+ufunc_eval_sh_legendre_data[1] = &ufunc_eval_sh_legendre_ptr[2*1]
+ufunc_eval_sh_legendre_data[2] = &ufunc_eval_sh_legendre_ptr[2*2]
+ufunc_eval_sh_legendre_data[3] = &ufunc_eval_sh_legendre_ptr[2*3]
+ufunc_eval_sh_legendre_data[4] = &ufunc_eval_sh_legendre_ptr[2*4]
+eval_sh_legendre = np.PyUFunc_FromFuncAndData(ufunc_eval_sh_legendre_loops, ufunc_eval_sh_legendre_data, ufunc_eval_sh_legendre_types, 5, 2, 1, 0, "eval_sh_legendre", ufunc_eval_sh_legendre_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_exp1_loops[4]
+cdef void *ufunc_exp1_ptr[8]
+cdef void *ufunc_exp1_data[4]
+cdef char ufunc_exp1_types[8]
+cdef char *ufunc_exp1_doc = (
+    "exp1(z, out=None)\n"
+    "\n"
+    "Exponential integral E1.\n"
+    "\n"
+    "For complex :math:`z \\ne 0` the exponential integral can be defined as\n"
+    "[1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   E_1(z) = \\int_z^\\infty \\frac{e^{-t}}{t} dt,\n"
+    "\n"
+    "where the path of the integral does not cross the negative real\n"
+    "axis or pass through the origin.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z: array_like\n"
+    "    Real or complex argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the exponential integral E1\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "expi : exponential integral :math:`Ei`\n"
+    "expn : generalization of :math:`E_1`\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For :math:`x > 0` it is related to the exponential integral\n"
+    ":math:`Ei` (see `expi`) via the relation\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   E_1(x) = -Ei(-x).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Digital Library of Mathematical Functions, 6.2.1\n"
+    "       https://dlmf.nist.gov/6.2#E1\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It has a pole at 0.\n"
+    "\n"
+    ">>> sc.exp1(0)\n"
+    "inf\n"
+    "\n"
+    "It has a branch cut on the negative real axis.\n"
+    "\n"
+    ">>> sc.exp1(-1)\n"
+    "nan\n"
+    ">>> sc.exp1(complex(-1, 0))\n"
+    "(-1.8951178163559368-3.141592653589793j)\n"
+    ">>> sc.exp1(complex(-1, -0.0))\n"
+    "(-1.8951178163559368+3.141592653589793j)\n"
+    "\n"
+    "It approaches 0 along the positive real axis.\n"
+    "\n"
+    ">>> sc.exp1([1, 10, 100, 1000])\n"
+    "array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00])\n"
+    "\n"
+    "It is related to `expi`.\n"
+    "\n"
+    ">>> x = np.array([1, 2, 3, 4])\n"
+    ">>> sc.exp1(x)\n"
+    "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n"
+    ">>> -sc.expi(-x)\n"
+    "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])")
+ufunc_exp1_loops[0] = loop_d_d__As_f_f
+ufunc_exp1_loops[1] = loop_d_d__As_d_d
+ufunc_exp1_loops[2] = loop_D_D__As_F_F
+ufunc_exp1_loops[3] = loop_D_D__As_D_D
+ufunc_exp1_types[0] = NPY_FLOAT
+ufunc_exp1_types[1] = NPY_FLOAT
+ufunc_exp1_types[2] = NPY_DOUBLE
+ufunc_exp1_types[3] = NPY_DOUBLE
+ufunc_exp1_types[4] = NPY_CFLOAT
+ufunc_exp1_types[5] = NPY_CFLOAT
+ufunc_exp1_types[6] = NPY_CDOUBLE
+ufunc_exp1_types[7] = NPY_CDOUBLE
+ufunc_exp1_ptr[2*0] = _func_exp1_wrap
+ufunc_exp1_ptr[2*0+1] = ("exp1")
+ufunc_exp1_ptr[2*1] = _func_exp1_wrap
+ufunc_exp1_ptr[2*1+1] = ("exp1")
+ufunc_exp1_ptr[2*2] = _func_cexp1_wrap
+ufunc_exp1_ptr[2*2+1] = ("exp1")
+ufunc_exp1_ptr[2*3] = _func_cexp1_wrap
+ufunc_exp1_ptr[2*3+1] = ("exp1")
+ufunc_exp1_data[0] = &ufunc_exp1_ptr[2*0]
+ufunc_exp1_data[1] = &ufunc_exp1_ptr[2*1]
+ufunc_exp1_data[2] = &ufunc_exp1_ptr[2*2]
+ufunc_exp1_data[3] = &ufunc_exp1_ptr[2*3]
+exp1 = np.PyUFunc_FromFuncAndData(ufunc_exp1_loops, ufunc_exp1_data, ufunc_exp1_types, 4, 1, 1, 0, "exp1", ufunc_exp1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_exp10_loops[2]
+cdef void *ufunc_exp10_ptr[4]
+cdef void *ufunc_exp10_data[2]
+cdef char ufunc_exp10_types[4]
+cdef char *ufunc_exp10_doc = (
+    "exp10(x, out=None)\n"
+    "\n"
+    "Compute ``10**x`` element-wise.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    `x` must contain real numbers.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    ``10**x``, computed element-wise.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import exp10\n"
+    "\n"
+    ">>> exp10(3)\n"
+    "1000.0\n"
+    ">>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])\n"
+    ">>> exp10(x)\n"
+    "array([[  0.1       ,   0.31622777,   1.        ],\n"
+    "       [  3.16227766,  10.        ,  31.6227766 ]])")
+ufunc_exp10_loops[0] = loop_d_d__As_f_f
+ufunc_exp10_loops[1] = loop_d_d__As_d_d
+ufunc_exp10_types[0] = NPY_FLOAT
+ufunc_exp10_types[1] = NPY_FLOAT
+ufunc_exp10_types[2] = NPY_DOUBLE
+ufunc_exp10_types[3] = NPY_DOUBLE
+ufunc_exp10_ptr[2*0] = _func_exp10
+ufunc_exp10_ptr[2*0+1] = ("exp10")
+ufunc_exp10_ptr[2*1] = _func_exp10
+ufunc_exp10_ptr[2*1+1] = ("exp10")
+ufunc_exp10_data[0] = &ufunc_exp10_ptr[2*0]
+ufunc_exp10_data[1] = &ufunc_exp10_ptr[2*1]
+exp10 = np.PyUFunc_FromFuncAndData(ufunc_exp10_loops, ufunc_exp10_data, ufunc_exp10_types, 2, 1, 1, 0, "exp10", ufunc_exp10_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_exp2_loops[2]
+cdef void *ufunc_exp2_ptr[4]
+cdef void *ufunc_exp2_data[2]
+cdef char ufunc_exp2_types[4]
+cdef char *ufunc_exp2_doc = (
+    "exp2(x, out=None)\n"
+    "\n"
+    "Compute ``2**x`` element-wise.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    `x` must contain real numbers.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    ``2**x``, computed element-wise.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import exp2\n"
+    "\n"
+    ">>> exp2(3)\n"
+    "8.0\n"
+    ">>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])\n"
+    ">>> exp2(x)\n"
+    "array([[ 0.5       ,  0.70710678,  1.        ],\n"
+    "       [ 1.41421356,  2.        ,  2.82842712]])")
+ufunc_exp2_loops[0] = loop_d_d__As_f_f
+ufunc_exp2_loops[1] = loop_d_d__As_d_d
+ufunc_exp2_types[0] = NPY_FLOAT
+ufunc_exp2_types[1] = NPY_FLOAT
+ufunc_exp2_types[2] = NPY_DOUBLE
+ufunc_exp2_types[3] = NPY_DOUBLE
+ufunc_exp2_ptr[2*0] = _func_exp2
+ufunc_exp2_ptr[2*0+1] = ("exp2")
+ufunc_exp2_ptr[2*1] = _func_exp2
+ufunc_exp2_ptr[2*1+1] = ("exp2")
+ufunc_exp2_data[0] = &ufunc_exp2_ptr[2*0]
+ufunc_exp2_data[1] = &ufunc_exp2_ptr[2*1]
+exp2 = np.PyUFunc_FromFuncAndData(ufunc_exp2_loops, ufunc_exp2_data, ufunc_exp2_types, 2, 1, 1, 0, "exp2", ufunc_exp2_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_expi_loops[4]
+cdef void *ufunc_expi_ptr[8]
+cdef void *ufunc_expi_data[4]
+cdef char ufunc_expi_types[8]
+cdef char *ufunc_expi_doc = (
+    "expi(x, out=None)\n"
+    "\n"
+    "Exponential integral Ei.\n"
+    "\n"
+    "For real :math:`x`, the exponential integral is defined as [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    Ei(x) = \\int_{-\\infty}^x \\frac{e^t}{t} dt.\n"
+    "\n"
+    "For :math:`x > 0` the integral is understood as a Cauchy principal\n"
+    "value.\n"
+    "\n"
+    "It is extended to the complex plane by analytic continuation of\n"
+    "the function on the interval :math:`(0, \\infty)`. The complex\n"
+    "variant has a branch cut on the negative real axis.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real or complex valued argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the exponential integral\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The exponential integrals :math:`E_1` and :math:`Ei` satisfy the\n"
+    "relation\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    E_1(x) = -Ei(-x)\n"
+    "\n"
+    "for :math:`x > 0`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "exp1 : Exponential integral :math:`E_1`\n"
+    "expn : Generalized exponential integral :math:`E_n`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Digital Library of Mathematical Functions, 6.2.5\n"
+    "       https://dlmf.nist.gov/6.2#E5\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is related to `exp1`.\n"
+    "\n"
+    ">>> x = np.array([1, 2, 3, 4])\n"
+    ">>> -sc.expi(-x)\n"
+    "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n"
+    ">>> sc.exp1(x)\n"
+    "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n"
+    "\n"
+    "The complex variant has a branch cut on the negative real axis.\n"
+    "\n"
+    ">>> sc.expi(-1 + 1e-12j)\n"
+    "(-0.21938393439552062+3.1415926535894254j)\n"
+    ">>> sc.expi(-1 - 1e-12j)\n"
+    "(-0.21938393439552062-3.1415926535894254j)\n"
+    "\n"
+    "As the complex variant approaches the branch cut, the real parts\n"
+    "approach the value of the real variant.\n"
+    "\n"
+    ">>> sc.expi(-1)\n"
+    "-0.21938393439552062\n"
+    "\n"
+    "The SciPy implementation returns the real variant for complex\n"
+    "values on the branch cut.\n"
+    "\n"
+    ">>> sc.expi(complex(-1, 0.0))\n"
+    "(-0.21938393439552062-0j)\n"
+    ">>> sc.expi(complex(-1, -0.0))\n"
+    "(-0.21938393439552062-0j)")
+ufunc_expi_loops[0] = loop_d_d__As_f_f
+ufunc_expi_loops[1] = loop_d_d__As_d_d
+ufunc_expi_loops[2] = loop_D_D__As_F_F
+ufunc_expi_loops[3] = loop_D_D__As_D_D
+ufunc_expi_types[0] = NPY_FLOAT
+ufunc_expi_types[1] = NPY_FLOAT
+ufunc_expi_types[2] = NPY_DOUBLE
+ufunc_expi_types[3] = NPY_DOUBLE
+ufunc_expi_types[4] = NPY_CFLOAT
+ufunc_expi_types[5] = NPY_CFLOAT
+ufunc_expi_types[6] = NPY_CDOUBLE
+ufunc_expi_types[7] = NPY_CDOUBLE
+ufunc_expi_ptr[2*0] = _func_expi_wrap
+ufunc_expi_ptr[2*0+1] = ("expi")
+ufunc_expi_ptr[2*1] = _func_expi_wrap
+ufunc_expi_ptr[2*1+1] = ("expi")
+ufunc_expi_ptr[2*2] = _func_cexpi_wrap
+ufunc_expi_ptr[2*2+1] = ("expi")
+ufunc_expi_ptr[2*3] = _func_cexpi_wrap
+ufunc_expi_ptr[2*3+1] = ("expi")
+ufunc_expi_data[0] = &ufunc_expi_ptr[2*0]
+ufunc_expi_data[1] = &ufunc_expi_ptr[2*1]
+ufunc_expi_data[2] = &ufunc_expi_ptr[2*2]
+ufunc_expi_data[3] = &ufunc_expi_ptr[2*3]
+expi = np.PyUFunc_FromFuncAndData(ufunc_expi_loops, ufunc_expi_data, ufunc_expi_types, 4, 1, 1, 0, "expi", ufunc_expi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_expit_loops[3]
+cdef void *ufunc_expit_ptr[6]
+cdef void *ufunc_expit_data[3]
+cdef char ufunc_expit_types[6]
+cdef char *ufunc_expit_doc = (
+    "expit(x, out=None)\n"
+    "\n"
+    "Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.\n"
+    "\n"
+    "The expit function, also known as the logistic sigmoid function, is\n"
+    "defined as ``expit(x) = 1/(1+exp(-x))``.  It is the inverse of the\n"
+    "logit function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : ndarray\n"
+    "    The ndarray to apply expit to element-wise.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    An ndarray of the same shape as x. Its entries\n"
+    "    are `expit` of the corresponding entry of x.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "logit\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "As a ufunc expit takes a number of optional\n"
+    "keyword arguments. For more information\n"
+    "see `ufuncs `_\n"
+    "\n"
+    ".. versionadded:: 0.10.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import expit, logit\n"
+    "\n"
+    ">>> expit([-np.inf, -1.5, 0, 1.5, np.inf])\n"
+    "array([ 0.        ,  0.18242552,  0.5       ,  0.81757448,  1.        ])\n"
+    "\n"
+    "`logit` is the inverse of `expit`:\n"
+    "\n"
+    ">>> logit(expit([-2.5, 0, 3.1, 5.0]))\n"
+    "array([-2.5,  0. ,  3.1,  5. ])\n"
+    "\n"
+    "Plot expit(x) for x in [-6, 6]:\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-6, 6, 121)\n"
+    ">>> y = expit(x)\n"
+    ">>> plt.plot(x, y)\n"
+    ">>> plt.grid()\n"
+    ">>> plt.xlim(-6, 6)\n"
+    ">>> plt.xlabel('x')\n"
+    ">>> plt.title('expit(x)')\n"
+    ">>> plt.show()")
+ufunc_expit_loops[0] = loop_f_f__As_f_f
+ufunc_expit_loops[1] = loop_d_d__As_d_d
+ufunc_expit_loops[2] = loop_g_g__As_g_g
+ufunc_expit_types[0] = NPY_FLOAT
+ufunc_expit_types[1] = NPY_FLOAT
+ufunc_expit_types[2] = NPY_DOUBLE
+ufunc_expit_types[3] = NPY_DOUBLE
+ufunc_expit_types[4] = NPY_LONGDOUBLE
+ufunc_expit_types[5] = NPY_LONGDOUBLE
+ufunc_expit_ptr[2*0] = scipy.special._ufuncs_cxx._export_expitf
+ufunc_expit_ptr[2*0+1] = ("expit")
+ufunc_expit_ptr[2*1] = scipy.special._ufuncs_cxx._export_expit
+ufunc_expit_ptr[2*1+1] = ("expit")
+ufunc_expit_ptr[2*2] = scipy.special._ufuncs_cxx._export_expitl
+ufunc_expit_ptr[2*2+1] = ("expit")
+ufunc_expit_data[0] = &ufunc_expit_ptr[2*0]
+ufunc_expit_data[1] = &ufunc_expit_ptr[2*1]
+ufunc_expit_data[2] = &ufunc_expit_ptr[2*2]
+expit = np.PyUFunc_FromFuncAndData(ufunc_expit_loops, ufunc_expit_data, ufunc_expit_types, 3, 1, 1, 0, "expit", ufunc_expit_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_expm1_loops[4]
+cdef void *ufunc_expm1_ptr[8]
+cdef void *ufunc_expm1_data[4]
+cdef char ufunc_expm1_types[8]
+cdef char *ufunc_expm1_doc = (
+    "expm1(x, out=None)\n"
+    "\n"
+    "Compute ``exp(x) - 1``.\n"
+    "\n"
+    "When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation\n"
+    "of ``exp(x) - 1`` can suffer from catastrophic loss of precision.\n"
+    "``expm1(x)`` is implemented to avoid the loss of precision that occurs when\n"
+    "`x` is near zero.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    `x` must contain real numbers.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    ``exp(x) - 1`` computed element-wise.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import expm1\n"
+    "\n"
+    ">>> expm1(1.0)\n"
+    "1.7182818284590451\n"
+    ">>> expm1([-0.2, -0.1, 0, 0.1, 0.2])\n"
+    "array([-0.18126925, -0.09516258,  0.        ,  0.10517092,  0.22140276])\n"
+    "\n"
+    "The exact value of ``exp(7.5e-13) - 1`` is::\n"
+    "\n"
+    "    7.5000000000028125000000007031250000001318...*10**-13.\n"
+    "\n"
+    "Here is what ``expm1(7.5e-13)`` gives:\n"
+    "\n"
+    ">>> expm1(7.5e-13)\n"
+    "7.5000000000028135e-13\n"
+    "\n"
+    "Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in\n"
+    "a \"catastrophic\" loss of precision:\n"
+    "\n"
+    ">>> np.exp(7.5e-13) - 1\n"
+    "7.5006667543675576e-13")
+ufunc_expm1_loops[0] = loop_d_d__As_f_f
+ufunc_expm1_loops[1] = loop_d_d__As_d_d
+ufunc_expm1_loops[2] = loop_D_D__As_F_F
+ufunc_expm1_loops[3] = loop_D_D__As_D_D
+ufunc_expm1_types[0] = NPY_FLOAT
+ufunc_expm1_types[1] = NPY_FLOAT
+ufunc_expm1_types[2] = NPY_DOUBLE
+ufunc_expm1_types[3] = NPY_DOUBLE
+ufunc_expm1_types[4] = NPY_CFLOAT
+ufunc_expm1_types[5] = NPY_CFLOAT
+ufunc_expm1_types[6] = NPY_CDOUBLE
+ufunc_expm1_types[7] = NPY_CDOUBLE
+ufunc_expm1_ptr[2*0] = _func_expm1
+ufunc_expm1_ptr[2*0+1] = ("expm1")
+ufunc_expm1_ptr[2*1] = _func_expm1
+ufunc_expm1_ptr[2*1+1] = ("expm1")
+ufunc_expm1_ptr[2*2] = _func_cexpm1
+ufunc_expm1_ptr[2*2+1] = ("expm1")
+ufunc_expm1_ptr[2*3] = _func_cexpm1
+ufunc_expm1_ptr[2*3+1] = ("expm1")
+ufunc_expm1_data[0] = &ufunc_expm1_ptr[2*0]
+ufunc_expm1_data[1] = &ufunc_expm1_ptr[2*1]
+ufunc_expm1_data[2] = &ufunc_expm1_ptr[2*2]
+ufunc_expm1_data[3] = &ufunc_expm1_ptr[2*3]
+expm1 = np.PyUFunc_FromFuncAndData(ufunc_expm1_loops, ufunc_expm1_data, ufunc_expm1_types, 4, 1, 1, 0, "expm1", ufunc_expm1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_expn_loops[3]
+cdef void *ufunc_expn_ptr[6]
+cdef void *ufunc_expn_data[3]
+cdef char ufunc_expn_types[9]
+cdef char *ufunc_expn_doc = (
+    "expn(n, x, out=None)\n"
+    "\n"
+    "Generalized exponential integral En.\n"
+    "\n"
+    "For integer :math:`n \\geq 0` and real :math:`x \\geq 0` the\n"
+    "generalized exponential integral is defined as [dlmf]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    E_n(x) = x^{n - 1} \\int_x^\\infty \\frac{e^{-t}}{t^n} dt.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Non-negative integers\n"
+    "x : array_like\n"
+    "    Real argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the generalized exponential integral\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "exp1 : special case of :math:`E_n` for :math:`n = 1`\n"
+    "expi : related to :math:`E_n` when :math:`n = 1`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] Digital Library of Mathematical Functions, 8.19.2\n"
+    "          https://dlmf.nist.gov/8.19#E2\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "Its domain is nonnegative n and x.\n"
+    "\n"
+    ">>> sc.expn(-1, 1.0), sc.expn(1, -1.0)\n"
+    "(nan, nan)\n"
+    "\n"
+    "It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it\n"
+    "is equal to ``1 / (n - 1)``.\n"
+    "\n"
+    ">>> sc.expn([0, 1, 2, 3, 4], 0)\n"
+    "array([       inf,        inf, 1.        , 0.5       , 0.33333333])\n"
+    "\n"
+    "For n equal to 0 it reduces to ``exp(-x) / x``.\n"
+    "\n"
+    ">>> x = np.array([1, 2, 3, 4])\n"
+    ">>> sc.expn(0, x)\n"
+    "array([0.36787944, 0.06766764, 0.01659569, 0.00457891])\n"
+    ">>> np.exp(-x) / x\n"
+    "array([0.36787944, 0.06766764, 0.01659569, 0.00457891])\n"
+    "\n"
+    "For n equal to 1 it reduces to `exp1`.\n"
+    "\n"
+    ">>> sc.expn(1, x)\n"
+    "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])\n"
+    ">>> sc.exp1(x)\n"
+    "array([0.21938393, 0.04890051, 0.01304838, 0.00377935])")
+ufunc_expn_loops[0] = loop_d_id__As_ld_d
+ufunc_expn_loops[1] = loop_d_dd__As_ff_f
+ufunc_expn_loops[2] = loop_d_dd__As_dd_d
+ufunc_expn_types[0] = NPY_LONG
+ufunc_expn_types[1] = NPY_DOUBLE
+ufunc_expn_types[2] = NPY_DOUBLE
+ufunc_expn_types[3] = NPY_FLOAT
+ufunc_expn_types[4] = NPY_FLOAT
+ufunc_expn_types[5] = NPY_FLOAT
+ufunc_expn_types[6] = NPY_DOUBLE
+ufunc_expn_types[7] = NPY_DOUBLE
+ufunc_expn_types[8] = NPY_DOUBLE
+ufunc_expn_ptr[2*0] = _func_expn
+ufunc_expn_ptr[2*0+1] = ("expn")
+ufunc_expn_ptr[2*1] = _func_expn_unsafe
+ufunc_expn_ptr[2*1+1] = ("expn")
+ufunc_expn_ptr[2*2] = _func_expn_unsafe
+ufunc_expn_ptr[2*2+1] = ("expn")
+ufunc_expn_data[0] = &ufunc_expn_ptr[2*0]
+ufunc_expn_data[1] = &ufunc_expn_ptr[2*1]
+ufunc_expn_data[2] = &ufunc_expn_ptr[2*2]
+expn = np.PyUFunc_FromFuncAndData(ufunc_expn_loops, ufunc_expn_data, ufunc_expn_types, 3, 2, 1, 0, "expn", ufunc_expn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_exprel_loops[2]
+cdef void *ufunc_exprel_ptr[4]
+cdef void *ufunc_exprel_data[2]
+cdef char ufunc_exprel_types[4]
+cdef char *ufunc_exprel_doc = (
+    "exprel(x, out=None)\n"
+    "\n"
+    "Relative error exponential, ``(exp(x) - 1)/x``.\n"
+    "\n"
+    "When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation\n"
+    "of ``exp(x) - 1`` can suffer from catastrophic loss of precision.\n"
+    "``exprel(x)`` is implemented to avoid the loss of precision that occurs when\n"
+    "`x` is near zero.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : ndarray\n"
+    "    Input array.  `x` must contain real numbers.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    ``(exp(x) - 1)/x``, computed element-wise.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "expm1\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    ".. versionadded:: 0.17.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import exprel\n"
+    "\n"
+    ">>> exprel(0.01)\n"
+    "1.0050167084168056\n"
+    ">>> exprel([-0.25, -0.1, 0, 0.1, 0.25])\n"
+    "array([ 0.88479687,  0.95162582,  1.        ,  1.05170918,  1.13610167])\n"
+    "\n"
+    "Compare ``exprel(5e-9)`` to the naive calculation.  The exact value\n"
+    "is ``1.00000000250000000416...``.\n"
+    "\n"
+    ">>> exprel(5e-9)\n"
+    "1.0000000025\n"
+    "\n"
+    ">>> (np.exp(5e-9) - 1)/5e-9\n"
+    "0.99999999392252903")
+ufunc_exprel_loops[0] = loop_d_d__As_f_f
+ufunc_exprel_loops[1] = loop_d_d__As_d_d
+ufunc_exprel_types[0] = NPY_FLOAT
+ufunc_exprel_types[1] = NPY_FLOAT
+ufunc_exprel_types[2] = NPY_DOUBLE
+ufunc_exprel_types[3] = NPY_DOUBLE
+ufunc_exprel_ptr[2*0] = _func_exprel
+ufunc_exprel_ptr[2*0+1] = ("exprel")
+ufunc_exprel_ptr[2*1] = _func_exprel
+ufunc_exprel_ptr[2*1+1] = ("exprel")
+ufunc_exprel_data[0] = &ufunc_exprel_ptr[2*0]
+ufunc_exprel_data[1] = &ufunc_exprel_ptr[2*1]
+exprel = np.PyUFunc_FromFuncAndData(ufunc_exprel_loops, ufunc_exprel_data, ufunc_exprel_types, 2, 1, 1, 0, "exprel", ufunc_exprel_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_fdtr_loops[2]
+cdef void *ufunc_fdtr_ptr[4]
+cdef void *ufunc_fdtr_data[2]
+cdef char ufunc_fdtr_types[8]
+cdef char *ufunc_fdtr_doc = (
+    "fdtr(dfn, dfd, x, out=None)\n"
+    "\n"
+    "F cumulative distribution function.\n"
+    "\n"
+    "Returns the value of the cumulative distribution function of the\n"
+    "F-distribution, also known as Snedecor's F-distribution or the\n"
+    "Fisher-Snedecor distribution.\n"
+    "\n"
+    "The F-distribution with parameters :math:`d_n` and :math:`d_d` is the\n"
+    "distribution of the random variable,\n"
+    "\n"
+    ".. math::\n"
+    "    X = \\frac{U_n/d_n}{U_d/d_d},\n"
+    "\n"
+    "where :math:`U_n` and :math:`U_d` are random variables distributed\n"
+    ":math:`\\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,\n"
+    "respectively.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    First parameter (positive float).\n"
+    "dfd : array_like\n"
+    "    Second parameter (positive float).\n"
+    "x : array_like\n"
+    "    Argument (nonnegative float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "fdtrc : F distribution survival function\n"
+    "fdtri : F distribution inverse cumulative distribution\n"
+    "scipy.stats.f : F distribution\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The regularized incomplete beta function is used, according to the\n"
+    "formula,\n"
+    "\n"
+    ".. math::\n"
+    "    F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `fdtr`. The F distribution is also\n"
+    "available as `scipy.stats.f`. Calling `fdtr` directly can improve\n"
+    "performance compared to the ``cdf`` method of `scipy.stats.f` (see last\n"
+    "example below).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import fdtr\n"
+    ">>> fdtr(1, 2, 1)\n"
+    "0.5773502691896258\n"
+    "\n"
+    "Calculate the function at several points by providing a NumPy array for\n"
+    "`x`.\n"
+    "\n"
+    ">>> x = np.array([0.5, 2., 3.])\n"
+    ">>> fdtr(1, 2, x)\n"
+    "array([0.4472136 , 0.70710678, 0.77459667])\n"
+    "\n"
+    "Plot the function for several parameter sets.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> dfn_parameters = [1, 5, 10, 50]\n"
+    ">>> dfd_parameters = [1, 1, 2, 3]\n"
+    ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n"
+    ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n"
+    "...                            linestyles))\n"
+    ">>> x = np.linspace(0, 30, 1000)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> for parameter_set in parameters_list:\n"
+    "...     dfn, dfd, style = parameter_set\n"
+    "...     fdtr_vals = fdtr(dfn, dfd, x)\n"
+    "...     ax.plot(x, fdtr_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n"
+    "...             ls=style)\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(\"$x$\")\n"
+    ">>> ax.set_title(\"F distribution cumulative distribution function\")\n"
+    ">>> plt.show()\n"
+    "\n"
+    "The F distribution is also available as `scipy.stats.f`. Using `fdtr`\n"
+    "directly can be much faster than calling the ``cdf`` method of\n"
+    "`scipy.stats.f`, especially for small arrays or individual values.\n"
+    "To get the same results one must use the following parametrization:\n"
+    "``stats.f(dfn, dfd).cdf(x)=fdtr(dfn, dfd, x)``.\n"
+    "\n"
+    ">>> from scipy.stats import f\n"
+    ">>> dfn, dfd = 1, 2\n"
+    ">>> x = 1\n"
+    ">>> fdtr_res = fdtr(dfn, dfd, x)  # this will often be faster than below\n"
+    ">>> f_dist_res = f(dfn, dfd).cdf(x)\n"
+    ">>> fdtr_res == f_dist_res  # test that results are equal\n"
+    "True")
+ufunc_fdtr_loops[0] = loop_d_ddd__As_fff_f
+ufunc_fdtr_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_fdtr_types[0] = NPY_FLOAT
+ufunc_fdtr_types[1] = NPY_FLOAT
+ufunc_fdtr_types[2] = NPY_FLOAT
+ufunc_fdtr_types[3] = NPY_FLOAT
+ufunc_fdtr_types[4] = NPY_DOUBLE
+ufunc_fdtr_types[5] = NPY_DOUBLE
+ufunc_fdtr_types[6] = NPY_DOUBLE
+ufunc_fdtr_types[7] = NPY_DOUBLE
+ufunc_fdtr_ptr[2*0] = _func_fdtr
+ufunc_fdtr_ptr[2*0+1] = ("fdtr")
+ufunc_fdtr_ptr[2*1] = _func_fdtr
+ufunc_fdtr_ptr[2*1+1] = ("fdtr")
+ufunc_fdtr_data[0] = &ufunc_fdtr_ptr[2*0]
+ufunc_fdtr_data[1] = &ufunc_fdtr_ptr[2*1]
+fdtr = np.PyUFunc_FromFuncAndData(ufunc_fdtr_loops, ufunc_fdtr_data, ufunc_fdtr_types, 2, 3, 1, 0, "fdtr", ufunc_fdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_fdtrc_loops[2]
+cdef void *ufunc_fdtrc_ptr[4]
+cdef void *ufunc_fdtrc_data[2]
+cdef char ufunc_fdtrc_types[8]
+cdef char *ufunc_fdtrc_doc = (
+    "fdtrc(dfn, dfd, x, out=None)\n"
+    "\n"
+    "F survival function.\n"
+    "\n"
+    "Returns the complemented F-distribution function (the integral of the\n"
+    "density from `x` to infinity).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    First parameter (positive float).\n"
+    "dfd : array_like\n"
+    "    Second parameter (positive float).\n"
+    "x : array_like\n"
+    "    Argument (nonnegative float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    The complemented F-distribution function with parameters `dfn` and\n"
+    "    `dfd` at `x`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "fdtr : F distribution cumulative distribution function\n"
+    "fdtri : F distribution inverse cumulative distribution function\n"
+    "scipy.stats.f : F distribution\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The regularized incomplete beta function is used, according to the\n"
+    "formula,\n"
+    "\n"
+    ".. math::\n"
+    "    F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `fdtrc`. The F distribution is also\n"
+    "available as `scipy.stats.f`. Calling `fdtrc` directly can improve\n"
+    "performance compared to the ``sf`` method of `scipy.stats.f` (see last\n"
+    "example below).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import fdtrc\n"
+    ">>> fdtrc(1, 2, 1)\n"
+    "0.42264973081037427\n"
+    "\n"
+    "Calculate the function at several points by providing a NumPy array for\n"
+    "`x`.\n"
+    "\n"
+    ">>> x = np.array([0.5, 2., 3.])\n"
+    ">>> fdtrc(1, 2, x)\n"
+    "array([0.5527864 , 0.29289322, 0.22540333])\n"
+    "\n"
+    "Plot the function for several parameter sets.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> dfn_parameters = [1, 5, 10, 50]\n"
+    ">>> dfd_parameters = [1, 1, 2, 3]\n"
+    ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n"
+    ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n"
+    "...                            linestyles))\n"
+    ">>> x = np.linspace(0, 30, 1000)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> for parameter_set in parameters_list:\n"
+    "...     dfn, dfd, style = parameter_set\n"
+    "...     fdtrc_vals = fdtrc(dfn, dfd, x)\n"
+    "...     ax.plot(x, fdtrc_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n"
+    "...             ls=style)\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(\"$x$\")\n"
+    ">>> ax.set_title(\"F distribution survival function\")\n"
+    ">>> plt.show()\n"
+    "\n"
+    "The F distribution is also available as `scipy.stats.f`. Using `fdtrc`\n"
+    "directly can be much faster than calling the ``sf`` method of\n"
+    "`scipy.stats.f`, especially for small arrays or individual values.\n"
+    "To get the same results one must use the following parametrization:\n"
+    "``stats.f(dfn, dfd).sf(x)=fdtrc(dfn, dfd, x)``.\n"
+    "\n"
+    ">>> from scipy.stats import f\n"
+    ">>> dfn, dfd = 1, 2\n"
+    ">>> x = 1\n"
+    ">>> fdtrc_res = fdtrc(dfn, dfd, x)  # this will often be faster than below\n"
+    ">>> f_dist_res = f(dfn, dfd).sf(x)\n"
+    ">>> f_dist_res == fdtrc_res  # test that results are equal\n"
+    "True")
+ufunc_fdtrc_loops[0] = loop_d_ddd__As_fff_f
+ufunc_fdtrc_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_fdtrc_types[0] = NPY_FLOAT
+ufunc_fdtrc_types[1] = NPY_FLOAT
+ufunc_fdtrc_types[2] = NPY_FLOAT
+ufunc_fdtrc_types[3] = NPY_FLOAT
+ufunc_fdtrc_types[4] = NPY_DOUBLE
+ufunc_fdtrc_types[5] = NPY_DOUBLE
+ufunc_fdtrc_types[6] = NPY_DOUBLE
+ufunc_fdtrc_types[7] = NPY_DOUBLE
+ufunc_fdtrc_ptr[2*0] = _func_fdtrc
+ufunc_fdtrc_ptr[2*0+1] = ("fdtrc")
+ufunc_fdtrc_ptr[2*1] = _func_fdtrc
+ufunc_fdtrc_ptr[2*1+1] = ("fdtrc")
+ufunc_fdtrc_data[0] = &ufunc_fdtrc_ptr[2*0]
+ufunc_fdtrc_data[1] = &ufunc_fdtrc_ptr[2*1]
+fdtrc = np.PyUFunc_FromFuncAndData(ufunc_fdtrc_loops, ufunc_fdtrc_data, ufunc_fdtrc_types, 2, 3, 1, 0, "fdtrc", ufunc_fdtrc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_fdtri_loops[2]
+cdef void *ufunc_fdtri_ptr[4]
+cdef void *ufunc_fdtri_data[2]
+cdef char ufunc_fdtri_types[8]
+cdef char *ufunc_fdtri_doc = (
+    "fdtri(dfn, dfd, p, out=None)\n"
+    "\n"
+    "The `p`-th quantile of the F-distribution.\n"
+    "\n"
+    "This function is the inverse of the F-distribution CDF, `fdtr`, returning\n"
+    "the `x` such that `fdtr(dfn, dfd, x) = p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    First parameter (positive float).\n"
+    "dfd : array_like\n"
+    "    Second parameter (positive float).\n"
+    "p : array_like\n"
+    "    Cumulative probability, in [0, 1].\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    The quantile corresponding to `p`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "fdtr : F distribution cumulative distribution function\n"
+    "fdtrc : F distribution survival function\n"
+    "scipy.stats.f : F distribution\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The computation is carried out using the relation to the inverse\n"
+    "regularized beta function, :math:`I^{-1}_x(a, b)`.  Let\n"
+    ":math:`z = I^{-1}_p(d_d/2, d_n/2).`  Then,\n"
+    "\n"
+    ".. math::\n"
+    "    x = \\frac{d_d (1 - z)}{d_n z}.\n"
+    "\n"
+    "If `p` is such that :math:`x < 0.5`, the following relation is used\n"
+    "instead for improved stability: let\n"
+    ":math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,\n"
+    "\n"
+    ".. math::\n"
+    "    x = \\frac{d_d z'}{d_n (1 - z')}.\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `fdtri`.\n"
+    "\n"
+    "The F distribution is also available as `scipy.stats.f`. Calling\n"
+    "`fdtri` directly can improve performance compared to the ``ppf``\n"
+    "method of `scipy.stats.f` (see last example below).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "`fdtri` represents the inverse of the F distribution CDF which is\n"
+    "available as `fdtr`. Here, we calculate the CDF for ``df1=1``, ``df2=2``\n"
+    "at ``x=3``. `fdtri` then returns ``3`` given the same values for `df1`,\n"
+    "`df2` and the computed CDF value.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import fdtri, fdtr\n"
+    ">>> df1, df2 = 1, 2\n"
+    ">>> x = 3\n"
+    ">>> cdf_value =  fdtr(df1, df2, x)\n"
+    ">>> fdtri(df1, df2, cdf_value)\n"
+    "3.000000000000006\n"
+    "\n"
+    "Calculate the function at several points by providing a NumPy array for\n"
+    "`x`.\n"
+    "\n"
+    ">>> x = np.array([0.1, 0.4, 0.7])\n"
+    ">>> fdtri(1, 2, x)\n"
+    "array([0.02020202, 0.38095238, 1.92156863])\n"
+    "\n"
+    "Plot the function for several parameter sets.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> dfn_parameters = [50, 10, 1, 50]\n"
+    ">>> dfd_parameters = [0.5, 1, 1, 5]\n"
+    ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n"
+    ">>> parameters_list = list(zip(dfn_parameters, dfd_parameters,\n"
+    "...                            linestyles))\n"
+    ">>> x = np.linspace(0, 1, 1000)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> for parameter_set in parameters_list:\n"
+    "...     dfn, dfd, style = parameter_set\n"
+    "...     fdtri_vals = fdtri(dfn, dfd, x)\n"
+    "...     ax.plot(x, fdtri_vals, label=rf\"$d_n={dfn},\\, d_d={dfd}$\",\n"
+    "...             ls=style)\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(\"$x$\")\n"
+    ">>> title = \"F distribution inverse cumulative distribution function\"\n"
+    ">>> ax.set_title(title)\n"
+    ">>> ax.set_ylim(0, 30)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "The F distribution is also available as `scipy.stats.f`. Using `fdtri`\n"
+    "directly can be much faster than calling the ``ppf`` method of\n"
+    "`scipy.stats.f`, especially for small arrays or individual values.\n"
+    "To get the same results one must use the following parametrization:\n"
+    "``stats.f(dfn, dfd).ppf(x)=fdtri(dfn, dfd, x)``.\n"
+    "\n"
+    ">>> from scipy.stats import f\n"
+    ">>> dfn, dfd = 1, 2\n"
+    ">>> x = 0.7\n"
+    ">>> fdtri_res = fdtri(dfn, dfd, x)  # this will often be faster than below\n"
+    ">>> f_dist_res = f(dfn, dfd).ppf(x)\n"
+    ">>> f_dist_res == fdtri_res  # test that results are equal\n"
+    "True")
+ufunc_fdtri_loops[0] = loop_d_ddd__As_fff_f
+ufunc_fdtri_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_fdtri_types[0] = NPY_FLOAT
+ufunc_fdtri_types[1] = NPY_FLOAT
+ufunc_fdtri_types[2] = NPY_FLOAT
+ufunc_fdtri_types[3] = NPY_FLOAT
+ufunc_fdtri_types[4] = NPY_DOUBLE
+ufunc_fdtri_types[5] = NPY_DOUBLE
+ufunc_fdtri_types[6] = NPY_DOUBLE
+ufunc_fdtri_types[7] = NPY_DOUBLE
+ufunc_fdtri_ptr[2*0] = _func_fdtri
+ufunc_fdtri_ptr[2*0+1] = ("fdtri")
+ufunc_fdtri_ptr[2*1] = _func_fdtri
+ufunc_fdtri_ptr[2*1+1] = ("fdtri")
+ufunc_fdtri_data[0] = &ufunc_fdtri_ptr[2*0]
+ufunc_fdtri_data[1] = &ufunc_fdtri_ptr[2*1]
+fdtri = np.PyUFunc_FromFuncAndData(ufunc_fdtri_loops, ufunc_fdtri_data, ufunc_fdtri_types, 2, 3, 1, 0, "fdtri", ufunc_fdtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_fdtridfd_loops[2]
+cdef void *ufunc_fdtridfd_ptr[4]
+cdef void *ufunc_fdtridfd_data[2]
+cdef char ufunc_fdtridfd_types[8]
+cdef char *ufunc_fdtridfd_doc = (
+    "fdtridfd(dfn, p, x, out=None)\n"
+    "\n"
+    "Inverse to `fdtr` vs dfd\n"
+    "\n"
+    "Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    First parameter (positive float).\n"
+    "p : array_like\n"
+    "    Cumulative probability, in [0, 1].\n"
+    "x : array_like\n"
+    "    Argument (nonnegative float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "dfd : scalar or ndarray\n"
+    "    `dfd` such that ``fdtr(dfn, dfd, x) == p``.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "fdtr, fdtrc, fdtri")
+ufunc_fdtridfd_loops[0] = loop_d_ddd__As_fff_f
+ufunc_fdtridfd_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_fdtridfd_types[0] = NPY_FLOAT
+ufunc_fdtridfd_types[1] = NPY_FLOAT
+ufunc_fdtridfd_types[2] = NPY_FLOAT
+ufunc_fdtridfd_types[3] = NPY_FLOAT
+ufunc_fdtridfd_types[4] = NPY_DOUBLE
+ufunc_fdtridfd_types[5] = NPY_DOUBLE
+ufunc_fdtridfd_types[6] = NPY_DOUBLE
+ufunc_fdtridfd_types[7] = NPY_DOUBLE
+ufunc_fdtridfd_ptr[2*0] = _func_cdff4_wrap
+ufunc_fdtridfd_ptr[2*0+1] = ("fdtridfd")
+ufunc_fdtridfd_ptr[2*1] = _func_cdff4_wrap
+ufunc_fdtridfd_ptr[2*1+1] = ("fdtridfd")
+ufunc_fdtridfd_data[0] = &ufunc_fdtridfd_ptr[2*0]
+ufunc_fdtridfd_data[1] = &ufunc_fdtridfd_ptr[2*1]
+fdtridfd = np.PyUFunc_FromFuncAndData(ufunc_fdtridfd_loops, ufunc_fdtridfd_data, ufunc_fdtridfd_types, 2, 3, 1, 0, "fdtridfd", ufunc_fdtridfd_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_fresnel_loops[4]
+cdef void *ufunc_fresnel_ptr[8]
+cdef void *ufunc_fresnel_data[4]
+cdef char ufunc_fresnel_types[12]
+cdef char *ufunc_fresnel_doc = (
+    "fresnel(z, out=None)\n"
+    "\n"
+    "Fresnel integrals.\n"
+    "\n"
+    "The Fresnel integrals are defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   S(z) &= \\int_0^z \\sin(\\pi t^2 /2) dt \\\\\n"
+    "   C(z) &= \\int_0^z \\cos(\\pi t^2 /2) dt.\n"
+    "\n"
+    "See [dlmf]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Real or complex valued argument\n"
+    "out : 2-tuple of ndarrays, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "S, C : 2-tuple of scalar or ndarray\n"
+    "    Values of the Fresnel integrals\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "fresnel_zeros : zeros of the Fresnel integrals\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/7.2#iii\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "As z goes to infinity along the real axis, S and C converge to 0.5.\n"
+    "\n"
+    ">>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])\n"
+    ">>> S\n"
+    "array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5       ])\n"
+    ">>> C\n"
+    "array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5       ])\n"
+    "\n"
+    "They are related to the error function `erf`.\n"
+    "\n"
+    ">>> z = np.array([1, 2, 3, 4])\n"
+    ">>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z\n"
+    ">>> S, C = sc.fresnel(z)\n"
+    ">>> C + 1j*S\n"
+    "array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,\n"
+    "       0.60572079+0.496313j  , 0.49842603+0.42051575j])\n"
+    ">>> 0.5 * (1 + 1j) * sc.erf(zeta)\n"
+    "array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,\n"
+    "       0.60572079+0.496313j  , 0.49842603+0.42051575j])")
+ufunc_fresnel_loops[0] = loop_i_d_dd_As_f_ff
+ufunc_fresnel_loops[1] = loop_i_d_dd_As_d_dd
+ufunc_fresnel_loops[2] = loop_i_D_DD_As_F_FF
+ufunc_fresnel_loops[3] = loop_i_D_DD_As_D_DD
+ufunc_fresnel_types[0] = NPY_FLOAT
+ufunc_fresnel_types[1] = NPY_FLOAT
+ufunc_fresnel_types[2] = NPY_FLOAT
+ufunc_fresnel_types[3] = NPY_DOUBLE
+ufunc_fresnel_types[4] = NPY_DOUBLE
+ufunc_fresnel_types[5] = NPY_DOUBLE
+ufunc_fresnel_types[6] = NPY_CFLOAT
+ufunc_fresnel_types[7] = NPY_CFLOAT
+ufunc_fresnel_types[8] = NPY_CFLOAT
+ufunc_fresnel_types[9] = NPY_CDOUBLE
+ufunc_fresnel_types[10] = NPY_CDOUBLE
+ufunc_fresnel_types[11] = NPY_CDOUBLE
+ufunc_fresnel_ptr[2*0] = _func_fresnl
+ufunc_fresnel_ptr[2*0+1] = ("fresnel")
+ufunc_fresnel_ptr[2*1] = _func_fresnl
+ufunc_fresnel_ptr[2*1+1] = ("fresnel")
+ufunc_fresnel_ptr[2*2] = _func_cfresnl_wrap
+ufunc_fresnel_ptr[2*2+1] = ("fresnel")
+ufunc_fresnel_ptr[2*3] = _func_cfresnl_wrap
+ufunc_fresnel_ptr[2*3+1] = ("fresnel")
+ufunc_fresnel_data[0] = &ufunc_fresnel_ptr[2*0]
+ufunc_fresnel_data[1] = &ufunc_fresnel_ptr[2*1]
+ufunc_fresnel_data[2] = &ufunc_fresnel_ptr[2*2]
+ufunc_fresnel_data[3] = &ufunc_fresnel_ptr[2*3]
+fresnel = np.PyUFunc_FromFuncAndData(ufunc_fresnel_loops, ufunc_fresnel_data, ufunc_fresnel_types, 4, 1, 2, 0, "fresnel", ufunc_fresnel_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gamma_loops[4]
+cdef void *ufunc_gamma_ptr[8]
+cdef void *ufunc_gamma_data[4]
+cdef char ufunc_gamma_types[8]
+cdef char *ufunc_gamma_doc = (
+    "gamma(z, out=None)\n"
+    "\n"
+    "gamma function.\n"
+    "\n"
+    "The gamma function is defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   \\Gamma(z) = \\int_0^\\infty t^{z-1} e^{-t} dt\n"
+    "\n"
+    "for :math:`\\Re(z) > 0` and is extended to the rest of the complex\n"
+    "plane by analytic continuation. See [dlmf]_ for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Real or complex valued argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the gamma function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The gamma function is often referred to as the generalized\n"
+    "factorial since :math:`\\Gamma(n + 1) = n!` for natural numbers\n"
+    ":math:`n`. More generally it satisfies the recurrence relation\n"
+    ":math:`\\Gamma(z + 1) = z \\cdot \\Gamma(z)` for complex :math:`z`,\n"
+    "which, combined with the fact that :math:`\\Gamma(1) = 1`, implies\n"
+    "the above identity for :math:`z = n`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/5.2#E1\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import gamma, factorial\n"
+    "\n"
+    ">>> gamma([0, 0.5, 1, 5])\n"
+    "array([         inf,   1.77245385,   1.        ,  24.        ])\n"
+    "\n"
+    ">>> z = 2.5 + 1j\n"
+    ">>> gamma(z)\n"
+    "(0.77476210455108352+0.70763120437959293j)\n"
+    ">>> gamma(z+1), z*gamma(z)  # Recurrence property\n"
+    "((1.2292740569981171+2.5438401155000685j),\n"
+    " (1.2292740569981158+2.5438401155000658j))\n"
+    "\n"
+    ">>> gamma(0.5)**2  # gamma(0.5) = sqrt(pi)\n"
+    "3.1415926535897927\n"
+    "\n"
+    "Plot gamma(x) for real x\n"
+    "\n"
+    ">>> x = np.linspace(-3.5, 5.5, 2251)\n"
+    ">>> y = gamma(x)\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')\n"
+    ">>> k = np.arange(1, 7)\n"
+    ">>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,\n"
+    "...          label='(x-1)!, x = 1, 2, ...')\n"
+    ">>> plt.xlim(-3.5, 5.5)\n"
+    ">>> plt.ylim(-10, 25)\n"
+    ">>> plt.grid()\n"
+    ">>> plt.xlabel('x')\n"
+    ">>> plt.legend(loc='lower right')\n"
+    ">>> plt.show()")
+ufunc_gamma_loops[0] = loop_d_d__As_f_f
+ufunc_gamma_loops[1] = loop_d_d__As_d_d
+ufunc_gamma_loops[2] = loop_D_D__As_F_F
+ufunc_gamma_loops[3] = loop_D_D__As_D_D
+ufunc_gamma_types[0] = NPY_FLOAT
+ufunc_gamma_types[1] = NPY_FLOAT
+ufunc_gamma_types[2] = NPY_DOUBLE
+ufunc_gamma_types[3] = NPY_DOUBLE
+ufunc_gamma_types[4] = NPY_CFLOAT
+ufunc_gamma_types[5] = NPY_CFLOAT
+ufunc_gamma_types[6] = NPY_CDOUBLE
+ufunc_gamma_types[7] = NPY_CDOUBLE
+ufunc_gamma_ptr[2*0] = _func_Gamma
+ufunc_gamma_ptr[2*0+1] = ("gamma")
+ufunc_gamma_ptr[2*1] = _func_Gamma
+ufunc_gamma_ptr[2*1+1] = ("gamma")
+ufunc_gamma_ptr[2*2] = _func_cgamma
+ufunc_gamma_ptr[2*2+1] = ("gamma")
+ufunc_gamma_ptr[2*3] = _func_cgamma
+ufunc_gamma_ptr[2*3+1] = ("gamma")
+ufunc_gamma_data[0] = &ufunc_gamma_ptr[2*0]
+ufunc_gamma_data[1] = &ufunc_gamma_ptr[2*1]
+ufunc_gamma_data[2] = &ufunc_gamma_ptr[2*2]
+ufunc_gamma_data[3] = &ufunc_gamma_ptr[2*3]
+gamma = np.PyUFunc_FromFuncAndData(ufunc_gamma_loops, ufunc_gamma_data, ufunc_gamma_types, 4, 1, 1, 0, "gamma", ufunc_gamma_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gammainc_loops[2]
+cdef void *ufunc_gammainc_ptr[4]
+cdef void *ufunc_gammainc_data[2]
+cdef char ufunc_gammainc_types[6]
+cdef char *ufunc_gammainc_doc = (
+    "gammainc(a, x, out=None)\n"
+    "\n"
+    "Regularized lower incomplete gamma function.\n"
+    "\n"
+    "It is defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    P(a, x) = \\frac{1}{\\Gamma(a)} \\int_0^x t^{a - 1}e^{-t} dt\n"
+    "\n"
+    "for :math:`a > 0` and :math:`x \\geq 0`. See [dlmf]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Positive parameter\n"
+    "x : array_like\n"
+    "    Nonnegative argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the lower incomplete gamma function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The function satisfies the relation ``gammainc(a, x) +\n"
+    "gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper\n"
+    "incomplete gamma function.\n"
+    "\n"
+    "The implementation largely follows that of [boost]_.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "gammaincc : regularized upper incomplete gamma function\n"
+    "gammaincinv : inverse of the regularized lower incomplete gamma function\n"
+    "gammainccinv : inverse of the regularized upper incomplete gamma function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical functions\n"
+    "          https://dlmf.nist.gov/8.2#E4\n"
+    ".. [boost] Maddock et. al., \"Incomplete Gamma Functions\",\n"
+    "   https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is the CDF of the gamma distribution, so it starts at 0 and\n"
+    "monotonically increases to 1.\n"
+    "\n"
+    ">>> sc.gammainc(0.5, [0, 1, 10, 100])\n"
+    "array([0.        , 0.84270079, 0.99999226, 1.        ])\n"
+    "\n"
+    "It is equal to one minus the upper incomplete gamma function.\n"
+    "\n"
+    ">>> a, x = 0.5, 0.4\n"
+    ">>> sc.gammainc(a, x)\n"
+    "0.6289066304773024\n"
+    ">>> 1 - sc.gammaincc(a, x)\n"
+    "0.6289066304773024")
+ufunc_gammainc_loops[0] = loop_d_dd__As_ff_f
+ufunc_gammainc_loops[1] = loop_d_dd__As_dd_d
+ufunc_gammainc_types[0] = NPY_FLOAT
+ufunc_gammainc_types[1] = NPY_FLOAT
+ufunc_gammainc_types[2] = NPY_FLOAT
+ufunc_gammainc_types[3] = NPY_DOUBLE
+ufunc_gammainc_types[4] = NPY_DOUBLE
+ufunc_gammainc_types[5] = NPY_DOUBLE
+ufunc_gammainc_ptr[2*0] = _func_igam
+ufunc_gammainc_ptr[2*0+1] = ("gammainc")
+ufunc_gammainc_ptr[2*1] = _func_igam
+ufunc_gammainc_ptr[2*1+1] = ("gammainc")
+ufunc_gammainc_data[0] = &ufunc_gammainc_ptr[2*0]
+ufunc_gammainc_data[1] = &ufunc_gammainc_ptr[2*1]
+gammainc = np.PyUFunc_FromFuncAndData(ufunc_gammainc_loops, ufunc_gammainc_data, ufunc_gammainc_types, 2, 2, 1, 0, "gammainc", ufunc_gammainc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gammaincc_loops[2]
+cdef void *ufunc_gammaincc_ptr[4]
+cdef void *ufunc_gammaincc_data[2]
+cdef char ufunc_gammaincc_types[6]
+cdef char *ufunc_gammaincc_doc = (
+    "gammaincc(a, x, out=None)\n"
+    "\n"
+    "Regularized upper incomplete gamma function.\n"
+    "\n"
+    "It is defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    Q(a, x) = \\frac{1}{\\Gamma(a)} \\int_x^\\infty t^{a - 1}e^{-t} dt\n"
+    "\n"
+    "for :math:`a > 0` and :math:`x \\geq 0`. See [dlmf]_ for details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Positive parameter\n"
+    "x : array_like\n"
+    "    Nonnegative argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the upper incomplete gamma function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The function satisfies the relation ``gammainc(a, x) +\n"
+    "gammaincc(a, x) = 1`` where `gammainc` is the regularized lower\n"
+    "incomplete gamma function.\n"
+    "\n"
+    "The implementation largely follows that of [boost]_.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "gammainc : regularized lower incomplete gamma function\n"
+    "gammaincinv : inverse of the regularized lower incomplete gamma function\n"
+    "gammainccinv : inverse of the regularized upper incomplete gamma function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical functions\n"
+    "          https://dlmf.nist.gov/8.2#E4\n"
+    ".. [boost] Maddock et. al., \"Incomplete Gamma Functions\",\n"
+    "   https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is the survival function of the gamma distribution, so it\n"
+    "starts at 1 and monotonically decreases to 0.\n"
+    "\n"
+    ">>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])\n"
+    "array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,\n"
+    "       0.00000000e+00])\n"
+    "\n"
+    "It is equal to one minus the lower incomplete gamma function.\n"
+    "\n"
+    ">>> a, x = 0.5, 0.4\n"
+    ">>> sc.gammaincc(a, x)\n"
+    "0.37109336952269756\n"
+    ">>> 1 - sc.gammainc(a, x)\n"
+    "0.37109336952269756")
+ufunc_gammaincc_loops[0] = loop_d_dd__As_ff_f
+ufunc_gammaincc_loops[1] = loop_d_dd__As_dd_d
+ufunc_gammaincc_types[0] = NPY_FLOAT
+ufunc_gammaincc_types[1] = NPY_FLOAT
+ufunc_gammaincc_types[2] = NPY_FLOAT
+ufunc_gammaincc_types[3] = NPY_DOUBLE
+ufunc_gammaincc_types[4] = NPY_DOUBLE
+ufunc_gammaincc_types[5] = NPY_DOUBLE
+ufunc_gammaincc_ptr[2*0] = _func_igamc
+ufunc_gammaincc_ptr[2*0+1] = ("gammaincc")
+ufunc_gammaincc_ptr[2*1] = _func_igamc
+ufunc_gammaincc_ptr[2*1+1] = ("gammaincc")
+ufunc_gammaincc_data[0] = &ufunc_gammaincc_ptr[2*0]
+ufunc_gammaincc_data[1] = &ufunc_gammaincc_ptr[2*1]
+gammaincc = np.PyUFunc_FromFuncAndData(ufunc_gammaincc_loops, ufunc_gammaincc_data, ufunc_gammaincc_types, 2, 2, 1, 0, "gammaincc", ufunc_gammaincc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gammainccinv_loops[2]
+cdef void *ufunc_gammainccinv_ptr[4]
+cdef void *ufunc_gammainccinv_data[2]
+cdef char ufunc_gammainccinv_types[6]
+cdef char *ufunc_gammainccinv_doc = (
+    "gammainccinv(a, y, out=None)\n"
+    "\n"
+    "Inverse of the regularized upper incomplete gamma function.\n"
+    "\n"
+    "Given an input :math:`y` between 0 and 1, returns :math:`x` such\n"
+    "that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper\n"
+    "incomplete gamma function; see `gammaincc`. This is well-defined\n"
+    "because the upper incomplete gamma function is monotonic as can\n"
+    "be seen from its definition in [dlmf]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Positive parameter\n"
+    "y : array_like\n"
+    "    Argument between 0 and 1, inclusive\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the inverse of the upper incomplete gamma function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gammaincc : regularized upper incomplete gamma function\n"
+    "gammainc : regularized lower incomplete gamma function\n"
+    "gammaincinv : inverse of the regularized lower incomplete gamma function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/8.2#E4\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It starts at infinity and monotonically decreases to 0.\n"
+    "\n"
+    ">>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])\n"
+    "array([       inf, 1.35277173, 0.22746821, 0.        ])\n"
+    "\n"
+    "It inverts the upper incomplete gamma function.\n"
+    "\n"
+    ">>> a, x = 0.5, [0, 0.1, 0.5, 1]\n"
+    ">>> sc.gammaincc(a, sc.gammainccinv(a, x))\n"
+    "array([0. , 0.1, 0.5, 1. ])\n"
+    "\n"
+    ">>> a, x = 0.5, [0, 10, 50]\n"
+    ">>> sc.gammainccinv(a, sc.gammaincc(a, x))\n"
+    "array([ 0., 10., 50.])")
+ufunc_gammainccinv_loops[0] = loop_d_dd__As_ff_f
+ufunc_gammainccinv_loops[1] = loop_d_dd__As_dd_d
+ufunc_gammainccinv_types[0] = NPY_FLOAT
+ufunc_gammainccinv_types[1] = NPY_FLOAT
+ufunc_gammainccinv_types[2] = NPY_FLOAT
+ufunc_gammainccinv_types[3] = NPY_DOUBLE
+ufunc_gammainccinv_types[4] = NPY_DOUBLE
+ufunc_gammainccinv_types[5] = NPY_DOUBLE
+ufunc_gammainccinv_ptr[2*0] = _func_igamci
+ufunc_gammainccinv_ptr[2*0+1] = ("gammainccinv")
+ufunc_gammainccinv_ptr[2*1] = _func_igamci
+ufunc_gammainccinv_ptr[2*1+1] = ("gammainccinv")
+ufunc_gammainccinv_data[0] = &ufunc_gammainccinv_ptr[2*0]
+ufunc_gammainccinv_data[1] = &ufunc_gammainccinv_ptr[2*1]
+gammainccinv = np.PyUFunc_FromFuncAndData(ufunc_gammainccinv_loops, ufunc_gammainccinv_data, ufunc_gammainccinv_types, 2, 2, 1, 0, "gammainccinv", ufunc_gammainccinv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gammaincinv_loops[2]
+cdef void *ufunc_gammaincinv_ptr[4]
+cdef void *ufunc_gammaincinv_data[2]
+cdef char ufunc_gammaincinv_types[6]
+cdef char *ufunc_gammaincinv_doc = (
+    "gammaincinv(a, y, out=None)\n"
+    "\n"
+    "Inverse to the regularized lower incomplete gamma function.\n"
+    "\n"
+    "Given an input :math:`y` between 0 and 1, returns :math:`x` such\n"
+    "that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower\n"
+    "incomplete gamma function; see `gammainc`. This is well-defined\n"
+    "because the lower incomplete gamma function is monotonic as can be\n"
+    "seen from its definition in [dlmf]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Positive parameter\n"
+    "y : array_like\n"
+    "    Parameter between 0 and 1, inclusive\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the inverse of the lower incomplete gamma function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gammainc : regularized lower incomplete gamma function\n"
+    "gammaincc : regularized upper incomplete gamma function\n"
+    "gammainccinv : inverse of the regularized upper incomplete gamma function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/8.2#E4\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It starts at 0 and monotonically increases to infinity.\n"
+    "\n"
+    ">>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])\n"
+    "array([0.        , 0.00789539, 0.22746821,        inf])\n"
+    "\n"
+    "It inverts the lower incomplete gamma function.\n"
+    "\n"
+    ">>> a, x = 0.5, [0, 0.1, 0.5, 1]\n"
+    ">>> sc.gammainc(a, sc.gammaincinv(a, x))\n"
+    "array([0. , 0.1, 0.5, 1. ])\n"
+    "\n"
+    ">>> a, x = 0.5, [0, 10, 25]\n"
+    ">>> sc.gammaincinv(a, sc.gammainc(a, x))\n"
+    "array([ 0.        , 10.        , 25.00001465])")
+ufunc_gammaincinv_loops[0] = loop_d_dd__As_ff_f
+ufunc_gammaincinv_loops[1] = loop_d_dd__As_dd_d
+ufunc_gammaincinv_types[0] = NPY_FLOAT
+ufunc_gammaincinv_types[1] = NPY_FLOAT
+ufunc_gammaincinv_types[2] = NPY_FLOAT
+ufunc_gammaincinv_types[3] = NPY_DOUBLE
+ufunc_gammaincinv_types[4] = NPY_DOUBLE
+ufunc_gammaincinv_types[5] = NPY_DOUBLE
+ufunc_gammaincinv_ptr[2*0] = _func_igami
+ufunc_gammaincinv_ptr[2*0+1] = ("gammaincinv")
+ufunc_gammaincinv_ptr[2*1] = _func_igami
+ufunc_gammaincinv_ptr[2*1+1] = ("gammaincinv")
+ufunc_gammaincinv_data[0] = &ufunc_gammaincinv_ptr[2*0]
+ufunc_gammaincinv_data[1] = &ufunc_gammaincinv_ptr[2*1]
+gammaincinv = np.PyUFunc_FromFuncAndData(ufunc_gammaincinv_loops, ufunc_gammaincinv_data, ufunc_gammaincinv_types, 2, 2, 1, 0, "gammaincinv", ufunc_gammaincinv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gammaln_loops[2]
+cdef void *ufunc_gammaln_ptr[4]
+cdef void *ufunc_gammaln_data[2]
+cdef char ufunc_gammaln_types[4]
+cdef char *ufunc_gammaln_doc = (
+    "gammaln(x, out=None)\n"
+    "\n"
+    "Logarithm of the absolute value of the gamma function.\n"
+    "\n"
+    "Defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   \\ln(\\lvert\\Gamma(x)\\rvert)\n"
+    "\n"
+    "where :math:`\\Gamma` is the gamma function. For more details on\n"
+    "the gamma function, see [dlmf]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the log of the absolute value of gamma\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gammasgn : sign of the gamma function\n"
+    "loggamma : principal branch of the logarithm of the gamma function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "It is the same function as the Python standard library function\n"
+    ":func:`math.lgamma`.\n"
+    "\n"
+    "When used in conjunction with `gammasgn`, this function is useful\n"
+    "for working in logspace on the real axis without having to deal\n"
+    "with complex numbers via the relation ``exp(gammaln(x)) =\n"
+    "gammasgn(x) * gamma(x)``.\n"
+    "\n"
+    "For complex-valued log-gamma, use `loggamma` instead of `gammaln`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/5\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It has two positive zeros.\n"
+    "\n"
+    ">>> sc.gammaln([1, 2])\n"
+    "array([0., 0.])\n"
+    "\n"
+    "It has poles at nonpositive integers.\n"
+    "\n"
+    ">>> sc.gammaln([0, -1, -2, -3, -4])\n"
+    "array([inf, inf, inf, inf, inf])\n"
+    "\n"
+    "It asymptotically approaches ``x * log(x)`` (Stirling's formula).\n"
+    "\n"
+    ">>> x = np.array([1e10, 1e20, 1e40, 1e80])\n"
+    ">>> sc.gammaln(x)\n"
+    "array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82])\n"
+    ">>> x * np.log(x)\n"
+    "array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82])")
+ufunc_gammaln_loops[0] = loop_d_d__As_f_f
+ufunc_gammaln_loops[1] = loop_d_d__As_d_d
+ufunc_gammaln_types[0] = NPY_FLOAT
+ufunc_gammaln_types[1] = NPY_FLOAT
+ufunc_gammaln_types[2] = NPY_DOUBLE
+ufunc_gammaln_types[3] = NPY_DOUBLE
+ufunc_gammaln_ptr[2*0] = _func_lgam
+ufunc_gammaln_ptr[2*0+1] = ("gammaln")
+ufunc_gammaln_ptr[2*1] = _func_lgam
+ufunc_gammaln_ptr[2*1+1] = ("gammaln")
+ufunc_gammaln_data[0] = &ufunc_gammaln_ptr[2*0]
+ufunc_gammaln_data[1] = &ufunc_gammaln_ptr[2*1]
+gammaln = np.PyUFunc_FromFuncAndData(ufunc_gammaln_loops, ufunc_gammaln_data, ufunc_gammaln_types, 2, 1, 1, 0, "gammaln", ufunc_gammaln_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gammasgn_loops[2]
+cdef void *ufunc_gammasgn_ptr[4]
+cdef void *ufunc_gammasgn_data[2]
+cdef char ufunc_gammasgn_types[4]
+cdef char *ufunc_gammasgn_doc = (
+    "gammasgn(x, out=None)\n"
+    "\n"
+    "Sign of the gamma function.\n"
+    "\n"
+    "It is defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   \\text{gammasgn}(x) =\n"
+    "   \\begin{cases}\n"
+    "     +1 & \\Gamma(x) > 0 \\\\\n"
+    "     -1 & \\Gamma(x) < 0\n"
+    "   \\end{cases}\n"
+    "\n"
+    "where :math:`\\Gamma` is the gamma function; see `gamma`. This\n"
+    "definition is complete since the gamma function is never zero;\n"
+    "see the discussion after [dlmf]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Sign of the gamma function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The gamma function can be computed as ``gammasgn(x) *\n"
+    "np.exp(gammaln(x))``.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gamma : the gamma function\n"
+    "gammaln : log of the absolute value of the gamma function\n"
+    "loggamma : analytic continuation of the log of the gamma function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/5.2#E1\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is 1 for `x > 0`.\n"
+    "\n"
+    ">>> sc.gammasgn([1, 2, 3, 4])\n"
+    "array([1., 1., 1., 1.])\n"
+    "\n"
+    "It alternates between -1 and 1 for negative integers.\n"
+    "\n"
+    ">>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])\n"
+    "array([-1.,  1., -1.,  1.])\n"
+    "\n"
+    "It can be used to compute the gamma function.\n"
+    "\n"
+    ">>> x = [1.5, 0.5, -0.5, -1.5]\n"
+    ">>> sc.gammasgn(x) * np.exp(sc.gammaln(x))\n"
+    "array([ 0.88622693,  1.77245385, -3.5449077 ,  2.3632718 ])\n"
+    ">>> sc.gamma(x)\n"
+    "array([ 0.88622693,  1.77245385, -3.5449077 ,  2.3632718 ])")
+ufunc_gammasgn_loops[0] = loop_d_d__As_f_f
+ufunc_gammasgn_loops[1] = loop_d_d__As_d_d
+ufunc_gammasgn_types[0] = NPY_FLOAT
+ufunc_gammasgn_types[1] = NPY_FLOAT
+ufunc_gammasgn_types[2] = NPY_DOUBLE
+ufunc_gammasgn_types[3] = NPY_DOUBLE
+ufunc_gammasgn_ptr[2*0] = _func_gammasgn
+ufunc_gammasgn_ptr[2*0+1] = ("gammasgn")
+ufunc_gammasgn_ptr[2*1] = _func_gammasgn
+ufunc_gammasgn_ptr[2*1+1] = ("gammasgn")
+ufunc_gammasgn_data[0] = &ufunc_gammasgn_ptr[2*0]
+ufunc_gammasgn_data[1] = &ufunc_gammasgn_ptr[2*1]
+gammasgn = np.PyUFunc_FromFuncAndData(ufunc_gammasgn_loops, ufunc_gammasgn_data, ufunc_gammasgn_types, 2, 1, 1, 0, "gammasgn", ufunc_gammasgn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gdtr_loops[2]
+cdef void *ufunc_gdtr_ptr[4]
+cdef void *ufunc_gdtr_data[2]
+cdef char ufunc_gdtr_types[8]
+cdef char *ufunc_gdtr_doc = (
+    "gdtr(a, b, x, out=None)\n"
+    "\n"
+    "Gamma distribution cumulative distribution function.\n"
+    "\n"
+    "Returns the integral from zero to `x` of the gamma probability density\n"
+    "function,\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    F = \\int_0^x \\frac{a^b}{\\Gamma(b)} t^{b-1} e^{-at}\\,dt,\n"
+    "\n"
+    "where :math:`\\Gamma` is the gamma function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    The rate parameter of the gamma distribution, sometimes denoted\n"
+    "    :math:`\\beta` (float).  It is also the reciprocal of the scale\n"
+    "    parameter :math:`\\theta`.\n"
+    "b : array_like\n"
+    "    The shape parameter of the gamma distribution, sometimes denoted\n"
+    "    :math:`\\alpha` (float).\n"
+    "x : array_like\n"
+    "    The quantile (upper limit of integration; float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "gdtrc : 1 - CDF of the gamma distribution.\n"
+    "scipy.stats.gamma: Gamma distribution\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "F : scalar or ndarray\n"
+    "    The CDF of the gamma distribution with parameters `a` and `b`\n"
+    "    evaluated at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The evaluation is carried out using the relation to the incomplete gamma\n"
+    "integral (regularized gamma function).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `gdtr`. Calling `gdtr` directly can\n"
+    "improve performance compared to the ``cdf`` method of `scipy.stats.gamma`\n"
+    "(see last example below).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Compute the function for ``a=1``, ``b=2`` at ``x=5``.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import gdtr\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> gdtr(1., 2., 5.)\n"
+    "0.9595723180054873\n"
+    "\n"
+    "Compute the function for ``a=1`` and ``b=2`` at several points by\n"
+    "providing a NumPy array for `x`.\n"
+    "\n"
+    ">>> xvalues = np.array([1., 2., 3., 4])\n"
+    ">>> gdtr(1., 1., xvalues)\n"
+    "array([0.63212056, 0.86466472, 0.95021293, 0.98168436])\n"
+    "\n"
+    "`gdtr` can evaluate different parameter sets by providing arrays with\n"
+    "broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the\n"
+    "function for three different `a` at four positions `x` and ``b=3``,\n"
+    "resulting in a 3x4 array.\n"
+    "\n"
+    ">>> a = np.array([[0.5], [1.5], [2.5]])\n"
+    ">>> x = np.array([1., 2., 3., 4])\n"
+    ">>> a.shape, x.shape\n"
+    "((3, 1), (4,))\n"
+    "\n"
+    ">>> gdtr(a, 3., x)\n"
+    "array([[0.01438768, 0.0803014 , 0.19115317, 0.32332358],\n"
+    "       [0.19115317, 0.57680992, 0.82642193, 0.9380312 ],\n"
+    "       [0.45618688, 0.87534798, 0.97974328, 0.9972306 ]])\n"
+    "\n"
+    "Plot the function for four different parameter sets.\n"
+    "\n"
+    ">>> a_parameters = [0.3, 1, 2, 6]\n"
+    ">>> b_parameters = [2, 10, 15, 20]\n"
+    ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n"
+    ">>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))\n"
+    ">>> x = np.linspace(0, 30, 1000)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> for parameter_set in parameters_list:\n"
+    "...     a, b, style = parameter_set\n"
+    "...     gdtr_vals = gdtr(a, b, x)\n"
+    "...     ax.plot(x, gdtr_vals, label=f\"$a= {a},\\, b={b}$\", ls=style)\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(\"$x$\")\n"
+    ">>> ax.set_title(\"Gamma distribution cumulative distribution function\")\n"
+    ">>> plt.show()\n"
+    "\n"
+    "The gamma distribution is also available as `scipy.stats.gamma`. Using\n"
+    "`gdtr` directly can be much faster than calling the ``cdf`` method of\n"
+    "`scipy.stats.gamma`, especially for small arrays or individual values.\n"
+    "To get the same results one must use the following parametrization:\n"
+    "``stats.gamma(b, scale=1/a).cdf(x)=gdtr(a, b, x)``.\n"
+    "\n"
+    ">>> from scipy.stats import gamma\n"
+    ">>> a = 2.\n"
+    ">>> b = 3\n"
+    ">>> x = 1.\n"
+    ">>> gdtr_result = gdtr(a, b, x)  # this will often be faster than below\n"
+    ">>> gamma_dist_result = gamma(b, scale=1/a).cdf(x)\n"
+    ">>> gdtr_result == gamma_dist_result  # test that results are equal\n"
+    "True")
+ufunc_gdtr_loops[0] = loop_d_ddd__As_fff_f
+ufunc_gdtr_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_gdtr_types[0] = NPY_FLOAT
+ufunc_gdtr_types[1] = NPY_FLOAT
+ufunc_gdtr_types[2] = NPY_FLOAT
+ufunc_gdtr_types[3] = NPY_FLOAT
+ufunc_gdtr_types[4] = NPY_DOUBLE
+ufunc_gdtr_types[5] = NPY_DOUBLE
+ufunc_gdtr_types[6] = NPY_DOUBLE
+ufunc_gdtr_types[7] = NPY_DOUBLE
+ufunc_gdtr_ptr[2*0] = _func_gdtr
+ufunc_gdtr_ptr[2*0+1] = ("gdtr")
+ufunc_gdtr_ptr[2*1] = _func_gdtr
+ufunc_gdtr_ptr[2*1+1] = ("gdtr")
+ufunc_gdtr_data[0] = &ufunc_gdtr_ptr[2*0]
+ufunc_gdtr_data[1] = &ufunc_gdtr_ptr[2*1]
+gdtr = np.PyUFunc_FromFuncAndData(ufunc_gdtr_loops, ufunc_gdtr_data, ufunc_gdtr_types, 2, 3, 1, 0, "gdtr", ufunc_gdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gdtrc_loops[2]
+cdef void *ufunc_gdtrc_ptr[4]
+cdef void *ufunc_gdtrc_data[2]
+cdef char ufunc_gdtrc_types[8]
+cdef char *ufunc_gdtrc_doc = (
+    "gdtrc(a, b, x, out=None)\n"
+    "\n"
+    "Gamma distribution survival function.\n"
+    "\n"
+    "Integral from `x` to infinity of the gamma probability density function,\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    F = \\int_x^\\infty \\frac{a^b}{\\Gamma(b)} t^{b-1} e^{-at}\\,dt,\n"
+    "\n"
+    "where :math:`\\Gamma` is the gamma function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    The rate parameter of the gamma distribution, sometimes denoted\n"
+    "    :math:`\\beta` (float). It is also the reciprocal of the scale\n"
+    "    parameter :math:`\\theta`.\n"
+    "b : array_like\n"
+    "    The shape parameter of the gamma distribution, sometimes denoted\n"
+    "    :math:`\\alpha` (float).\n"
+    "x : array_like\n"
+    "    The quantile (lower limit of integration; float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "F : scalar or ndarray\n"
+    "    The survival function of the gamma distribution with parameters `a`\n"
+    "    and `b` evaluated at `x`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gdtr: Gamma distribution cumulative distribution function\n"
+    "scipy.stats.gamma: Gamma distribution\n"
+    "gdtrix\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The evaluation is carried out using the relation to the incomplete gamma\n"
+    "integral (regularized gamma function).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `gdtrc`. Calling `gdtrc` directly can\n"
+    "improve performance compared to the ``sf`` method of `scipy.stats.gamma`\n"
+    "(see last example below).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Compute the function for ``a=1`` and ``b=2`` at ``x=5``.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import gdtrc\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> gdtrc(1., 2., 5.)\n"
+    "0.04042768199451279\n"
+    "\n"
+    "Compute the function for ``a=1``, ``b=2`` at several points by providing\n"
+    "a NumPy array for `x`.\n"
+    "\n"
+    ">>> xvalues = np.array([1., 2., 3., 4])\n"
+    ">>> gdtrc(1., 1., xvalues)\n"
+    "array([0.36787944, 0.13533528, 0.04978707, 0.01831564])\n"
+    "\n"
+    "`gdtrc` can evaluate different parameter sets by providing arrays with\n"
+    "broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the\n"
+    "function for three different `a` at four positions `x` and ``b=3``,\n"
+    "resulting in a 3x4 array.\n"
+    "\n"
+    ">>> a = np.array([[0.5], [1.5], [2.5]])\n"
+    ">>> x = np.array([1., 2., 3., 4])\n"
+    ">>> a.shape, x.shape\n"
+    "((3, 1), (4,))\n"
+    "\n"
+    ">>> gdtrc(a, 3., x)\n"
+    "array([[0.98561232, 0.9196986 , 0.80884683, 0.67667642],\n"
+    "       [0.80884683, 0.42319008, 0.17357807, 0.0619688 ],\n"
+    "       [0.54381312, 0.12465202, 0.02025672, 0.0027694 ]])\n"
+    "\n"
+    "Plot the function for four different parameter sets.\n"
+    "\n"
+    ">>> a_parameters = [0.3, 1, 2, 6]\n"
+    ">>> b_parameters = [2, 10, 15, 20]\n"
+    ">>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']\n"
+    ">>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))\n"
+    ">>> x = np.linspace(0, 30, 1000)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> for parameter_set in parameters_list:\n"
+    "...     a, b, style = parameter_set\n"
+    "...     gdtrc_vals = gdtrc(a, b, x)\n"
+    "...     ax.plot(x, gdtrc_vals, label=f\"$a= {a},\\, b={b}$\", ls=style)\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(\"$x$\")\n"
+    ">>> ax.set_title(\"Gamma distribution survival function\")\n"
+    ">>> plt.show()\n"
+    "\n"
+    "The gamma distribution is also available as `scipy.stats.gamma`.\n"
+    "Using `gdtrc` directly can be much faster than calling the ``sf`` method\n"
+    "of `scipy.stats.gamma`, especially for small arrays or individual\n"
+    "values. To get the same results one must use the following parametrization:\n"
+    "``stats.gamma(b, scale=1/a).sf(x)=gdtrc(a, b, x)``.\n"
+    "\n"
+    ">>> from scipy.stats import gamma\n"
+    ">>> a = 2\n"
+    ">>> b = 3\n"
+    ">>> x = 1.\n"
+    ">>> gdtrc_result = gdtrc(a, b, x)  # this will often be faster than below\n"
+    ">>> gamma_dist_result = gamma(b, scale=1/a).sf(x)\n"
+    ">>> gdtrc_result == gamma_dist_result  # test that results are equal\n"
+    "True")
+ufunc_gdtrc_loops[0] = loop_d_ddd__As_fff_f
+ufunc_gdtrc_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_gdtrc_types[0] = NPY_FLOAT
+ufunc_gdtrc_types[1] = NPY_FLOAT
+ufunc_gdtrc_types[2] = NPY_FLOAT
+ufunc_gdtrc_types[3] = NPY_FLOAT
+ufunc_gdtrc_types[4] = NPY_DOUBLE
+ufunc_gdtrc_types[5] = NPY_DOUBLE
+ufunc_gdtrc_types[6] = NPY_DOUBLE
+ufunc_gdtrc_types[7] = NPY_DOUBLE
+ufunc_gdtrc_ptr[2*0] = _func_gdtrc
+ufunc_gdtrc_ptr[2*0+1] = ("gdtrc")
+ufunc_gdtrc_ptr[2*1] = _func_gdtrc
+ufunc_gdtrc_ptr[2*1+1] = ("gdtrc")
+ufunc_gdtrc_data[0] = &ufunc_gdtrc_ptr[2*0]
+ufunc_gdtrc_data[1] = &ufunc_gdtrc_ptr[2*1]
+gdtrc = np.PyUFunc_FromFuncAndData(ufunc_gdtrc_loops, ufunc_gdtrc_data, ufunc_gdtrc_types, 2, 3, 1, 0, "gdtrc", ufunc_gdtrc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gdtria_loops[2]
+cdef void *ufunc_gdtria_ptr[4]
+cdef void *ufunc_gdtria_data[2]
+cdef char ufunc_gdtria_types[8]
+cdef char *ufunc_gdtria_doc = (
+    "gdtria(p, b, x, out=None)\n"
+    "\n"
+    "Inverse of `gdtr` vs a.\n"
+    "\n"
+    "Returns the inverse with respect to the parameter `a` of ``p =\n"
+    "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n"
+    "distribution.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Probability values.\n"
+    "b : array_like\n"
+    "    `b` parameter values of `gdtr(a, b, x)`. `b` is the \"shape\" parameter\n"
+    "    of the gamma distribution.\n"
+    "x : array_like\n"
+    "    Nonnegative real values, from the domain of the gamma distribution.\n"
+    "out : ndarray, optional\n"
+    "    If a fourth argument is given, it must be a numpy.ndarray whose size\n"
+    "    matches the broadcast result of `a`, `b` and `x`.  `out` is then the\n"
+    "    array returned by the function.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "a : scalar or ndarray\n"
+    "    Values of the `a` parameter such that `p = gdtr(a, b, x)`.  `1/a`\n"
+    "    is the \"scale\" parameter of the gamma distribution.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gdtr : CDF of the gamma distribution.\n"
+    "gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.\n"
+    "gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n"
+    "\n"
+    "The cumulative distribution function `p` is computed using a routine by\n"
+    "DiDinato and Morris [2]_. Computation of `a` involves a search for a value\n"
+    "that produces the desired value of `p`. The search relies on the\n"
+    "monotonicity of `p` with `a`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] DiDinato, A. R. and Morris, A. H.,\n"
+    "       Computation of the incomplete gamma function ratios and their\n"
+    "       inverse.  ACM Trans. Math. Softw. 12 (1986), 377-393.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "First evaluate `gdtr`.\n"
+    "\n"
+    ">>> from scipy.special import gdtr, gdtria\n"
+    ">>> p = gdtr(1.2, 3.4, 5.6)\n"
+    ">>> print(p)\n"
+    "0.94378087442\n"
+    "\n"
+    "Verify the inverse.\n"
+    "\n"
+    ">>> gdtria(p, 3.4, 5.6)\n"
+    "1.2")
+ufunc_gdtria_loops[0] = loop_d_ddd__As_fff_f
+ufunc_gdtria_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_gdtria_types[0] = NPY_FLOAT
+ufunc_gdtria_types[1] = NPY_FLOAT
+ufunc_gdtria_types[2] = NPY_FLOAT
+ufunc_gdtria_types[3] = NPY_FLOAT
+ufunc_gdtria_types[4] = NPY_DOUBLE
+ufunc_gdtria_types[5] = NPY_DOUBLE
+ufunc_gdtria_types[6] = NPY_DOUBLE
+ufunc_gdtria_types[7] = NPY_DOUBLE
+ufunc_gdtria_ptr[2*0] = _func_cdfgam4_wrap
+ufunc_gdtria_ptr[2*0+1] = ("gdtria")
+ufunc_gdtria_ptr[2*1] = _func_cdfgam4_wrap
+ufunc_gdtria_ptr[2*1+1] = ("gdtria")
+ufunc_gdtria_data[0] = &ufunc_gdtria_ptr[2*0]
+ufunc_gdtria_data[1] = &ufunc_gdtria_ptr[2*1]
+gdtria = np.PyUFunc_FromFuncAndData(ufunc_gdtria_loops, ufunc_gdtria_data, ufunc_gdtria_types, 2, 3, 1, 0, "gdtria", ufunc_gdtria_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gdtrib_loops[2]
+cdef void *ufunc_gdtrib_ptr[4]
+cdef void *ufunc_gdtrib_data[2]
+cdef char ufunc_gdtrib_types[8]
+cdef char *ufunc_gdtrib_doc = (
+    "gdtrib(a, p, x, out=None)\n"
+    "\n"
+    "Inverse of `gdtr` vs b.\n"
+    "\n"
+    "Returns the inverse with respect to the parameter `b` of ``p =\n"
+    "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n"
+    "distribution.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    `a` parameter values of `gdtr(a, b, x)`. `1/a` is the \"scale\"\n"
+    "    parameter of the gamma distribution.\n"
+    "p : array_like\n"
+    "    Probability values.\n"
+    "x : array_like\n"
+    "    Nonnegative real values, from the domain of the gamma distribution.\n"
+    "out : ndarray, optional\n"
+    "    If a fourth argument is given, it must be a numpy.ndarray whose size\n"
+    "    matches the broadcast result of `a`, `b` and `x`.  `out` is then the\n"
+    "    array returned by the function.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "b : scalar or ndarray\n"
+    "    Values of the `b` parameter such that `p = gdtr(a, b, x)`.  `b` is\n"
+    "    the \"shape\" parameter of the gamma distribution.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gdtr : CDF of the gamma distribution.\n"
+    "gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.\n"
+    "gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n"
+    "\n"
+    "The cumulative distribution function `p` is computed using a routine by\n"
+    "DiDinato and Morris [2]_. Computation of `b` involves a search for a value\n"
+    "that produces the desired value of `p`. The search relies on the\n"
+    "monotonicity of `p` with `b`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] DiDinato, A. R. and Morris, A. H.,\n"
+    "       Computation of the incomplete gamma function ratios and their\n"
+    "       inverse.  ACM Trans. Math. Softw. 12 (1986), 377-393.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "First evaluate `gdtr`.\n"
+    "\n"
+    ">>> from scipy.special import gdtr, gdtrib\n"
+    ">>> p = gdtr(1.2, 3.4, 5.6)\n"
+    ">>> print(p)\n"
+    "0.94378087442\n"
+    "\n"
+    "Verify the inverse.\n"
+    "\n"
+    ">>> gdtrib(1.2, p, 5.6)\n"
+    "3.3999999999723882")
+ufunc_gdtrib_loops[0] = loop_d_ddd__As_fff_f
+ufunc_gdtrib_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_gdtrib_types[0] = NPY_FLOAT
+ufunc_gdtrib_types[1] = NPY_FLOAT
+ufunc_gdtrib_types[2] = NPY_FLOAT
+ufunc_gdtrib_types[3] = NPY_FLOAT
+ufunc_gdtrib_types[4] = NPY_DOUBLE
+ufunc_gdtrib_types[5] = NPY_DOUBLE
+ufunc_gdtrib_types[6] = NPY_DOUBLE
+ufunc_gdtrib_types[7] = NPY_DOUBLE
+ufunc_gdtrib_ptr[2*0] = _func_cdfgam3_wrap
+ufunc_gdtrib_ptr[2*0+1] = ("gdtrib")
+ufunc_gdtrib_ptr[2*1] = _func_cdfgam3_wrap
+ufunc_gdtrib_ptr[2*1+1] = ("gdtrib")
+ufunc_gdtrib_data[0] = &ufunc_gdtrib_ptr[2*0]
+ufunc_gdtrib_data[1] = &ufunc_gdtrib_ptr[2*1]
+gdtrib = np.PyUFunc_FromFuncAndData(ufunc_gdtrib_loops, ufunc_gdtrib_data, ufunc_gdtrib_types, 2, 3, 1, 0, "gdtrib", ufunc_gdtrib_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_gdtrix_loops[2]
+cdef void *ufunc_gdtrix_ptr[4]
+cdef void *ufunc_gdtrix_data[2]
+cdef char ufunc_gdtrix_types[8]
+cdef char *ufunc_gdtrix_doc = (
+    "gdtrix(a, b, p, out=None)\n"
+    "\n"
+    "Inverse of `gdtr` vs x.\n"
+    "\n"
+    "Returns the inverse with respect to the parameter `x` of ``p =\n"
+    "gdtr(a, b, x)``, the cumulative distribution function of the gamma\n"
+    "distribution. This is also known as the pth quantile of the\n"
+    "distribution.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    `a` parameter values of `gdtr(a, b, x)`. `1/a` is the \"scale\"\n"
+    "    parameter of the gamma distribution.\n"
+    "b : array_like\n"
+    "    `b` parameter values of `gdtr(a, b, x)`. `b` is the \"shape\" parameter\n"
+    "    of the gamma distribution.\n"
+    "p : array_like\n"
+    "    Probability values.\n"
+    "out : ndarray, optional\n"
+    "    If a fourth argument is given, it must be a numpy.ndarray whose size\n"
+    "    matches the broadcast result of `a`, `b` and `x`. `out` is then the\n"
+    "    array returned by the function.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    Values of the `x` parameter such that `p = gdtr(a, b, x)`.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gdtr : CDF of the gamma distribution.\n"
+    "gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.\n"
+    "gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.\n"
+    "\n"
+    "The cumulative distribution function `p` is computed using a routine by\n"
+    "DiDinato and Morris [2]_. Computation of `x` involves a search for a value\n"
+    "that produces the desired value of `p`. The search relies on the\n"
+    "monotonicity of `p` with `x`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] DiDinato, A. R. and Morris, A. H.,\n"
+    "       Computation of the incomplete gamma function ratios and their\n"
+    "       inverse.  ACM Trans. Math. Softw. 12 (1986), 377-393.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "First evaluate `gdtr`.\n"
+    "\n"
+    ">>> from scipy.special import gdtr, gdtrix\n"
+    ">>> p = gdtr(1.2, 3.4, 5.6)\n"
+    ">>> print(p)\n"
+    "0.94378087442\n"
+    "\n"
+    "Verify the inverse.\n"
+    "\n"
+    ">>> gdtrix(1.2, 3.4, p)\n"
+    "5.5999999999999996")
+ufunc_gdtrix_loops[0] = loop_d_ddd__As_fff_f
+ufunc_gdtrix_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_gdtrix_types[0] = NPY_FLOAT
+ufunc_gdtrix_types[1] = NPY_FLOAT
+ufunc_gdtrix_types[2] = NPY_FLOAT
+ufunc_gdtrix_types[3] = NPY_FLOAT
+ufunc_gdtrix_types[4] = NPY_DOUBLE
+ufunc_gdtrix_types[5] = NPY_DOUBLE
+ufunc_gdtrix_types[6] = NPY_DOUBLE
+ufunc_gdtrix_types[7] = NPY_DOUBLE
+ufunc_gdtrix_ptr[2*0] = _func_cdfgam2_wrap
+ufunc_gdtrix_ptr[2*0+1] = ("gdtrix")
+ufunc_gdtrix_ptr[2*1] = _func_cdfgam2_wrap
+ufunc_gdtrix_ptr[2*1+1] = ("gdtrix")
+ufunc_gdtrix_data[0] = &ufunc_gdtrix_ptr[2*0]
+ufunc_gdtrix_data[1] = &ufunc_gdtrix_ptr[2*1]
+gdtrix = np.PyUFunc_FromFuncAndData(ufunc_gdtrix_loops, ufunc_gdtrix_data, ufunc_gdtrix_types, 2, 3, 1, 0, "gdtrix", ufunc_gdtrix_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hankel1_loops[2]
+cdef void *ufunc_hankel1_ptr[4]
+cdef void *ufunc_hankel1_data[2]
+cdef char ufunc_hankel1_types[6]
+cdef char *ufunc_hankel1_doc = (
+    "hankel1(v, z, out=None)\n"
+    "\n"
+    "Hankel function of the first kind\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Hankel function of the first kind.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n"
+    "computation using the relation,\n"
+    "\n"
+    ".. math:: H^{(1)}_v(z) = \\frac{2}{\\imath\\pi} \\exp(-\\imath \\pi v/2) K_v(z \\exp(-\\imath\\pi/2))\n"
+    "\n"
+    "where :math:`K_v` is the modified Bessel function of the second kind.\n"
+    "For negative orders, the relation\n"
+    "\n"
+    ".. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \\exp(\\imath\\pi v)\n"
+    "\n"
+    "is used.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "hankel1e : ndarray\n"
+    "    This function with leading exponential behavior stripped off.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/")
+ufunc_hankel1_loops[0] = loop_D_dD__As_fF_F
+ufunc_hankel1_loops[1] = loop_D_dD__As_dD_D
+ufunc_hankel1_types[0] = NPY_FLOAT
+ufunc_hankel1_types[1] = NPY_CFLOAT
+ufunc_hankel1_types[2] = NPY_CFLOAT
+ufunc_hankel1_types[3] = NPY_DOUBLE
+ufunc_hankel1_types[4] = NPY_CDOUBLE
+ufunc_hankel1_types[5] = NPY_CDOUBLE
+ufunc_hankel1_ptr[2*0] = _func_cbesh_wrap1
+ufunc_hankel1_ptr[2*0+1] = ("hankel1")
+ufunc_hankel1_ptr[2*1] = _func_cbesh_wrap1
+ufunc_hankel1_ptr[2*1+1] = ("hankel1")
+ufunc_hankel1_data[0] = &ufunc_hankel1_ptr[2*0]
+ufunc_hankel1_data[1] = &ufunc_hankel1_ptr[2*1]
+hankel1 = np.PyUFunc_FromFuncAndData(ufunc_hankel1_loops, ufunc_hankel1_data, ufunc_hankel1_types, 2, 2, 1, 0, "hankel1", ufunc_hankel1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hankel1e_loops[2]
+cdef void *ufunc_hankel1e_ptr[4]
+cdef void *ufunc_hankel1e_data[2]
+cdef char ufunc_hankel1e_types[6]
+cdef char *ufunc_hankel1e_doc = (
+    "hankel1e(v, z, out=None)\n"
+    "\n"
+    "Exponentially scaled Hankel function of the first kind\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the exponentially scaled Hankel function.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n"
+    "computation using the relation,\n"
+    "\n"
+    ".. math:: H^{(1)}_v(z) = \\frac{2}{\\imath\\pi} \\exp(-\\imath \\pi v/2) K_v(z \\exp(-\\imath\\pi/2))\n"
+    "\n"
+    "where :math:`K_v` is the modified Bessel function of the second kind.\n"
+    "For negative orders, the relation\n"
+    "\n"
+    ".. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \\exp(\\imath\\pi v)\n"
+    "\n"
+    "is used.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/")
+ufunc_hankel1e_loops[0] = loop_D_dD__As_fF_F
+ufunc_hankel1e_loops[1] = loop_D_dD__As_dD_D
+ufunc_hankel1e_types[0] = NPY_FLOAT
+ufunc_hankel1e_types[1] = NPY_CFLOAT
+ufunc_hankel1e_types[2] = NPY_CFLOAT
+ufunc_hankel1e_types[3] = NPY_DOUBLE
+ufunc_hankel1e_types[4] = NPY_CDOUBLE
+ufunc_hankel1e_types[5] = NPY_CDOUBLE
+ufunc_hankel1e_ptr[2*0] = _func_cbesh_wrap1_e
+ufunc_hankel1e_ptr[2*0+1] = ("hankel1e")
+ufunc_hankel1e_ptr[2*1] = _func_cbesh_wrap1_e
+ufunc_hankel1e_ptr[2*1+1] = ("hankel1e")
+ufunc_hankel1e_data[0] = &ufunc_hankel1e_ptr[2*0]
+ufunc_hankel1e_data[1] = &ufunc_hankel1e_ptr[2*1]
+hankel1e = np.PyUFunc_FromFuncAndData(ufunc_hankel1e_loops, ufunc_hankel1e_data, ufunc_hankel1e_types, 2, 2, 1, 0, "hankel1e", ufunc_hankel1e_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hankel2_loops[2]
+cdef void *ufunc_hankel2_ptr[4]
+cdef void *ufunc_hankel2_data[2]
+cdef char ufunc_hankel2_types[6]
+cdef char *ufunc_hankel2_doc = (
+    "hankel2(v, z, out=None)\n"
+    "\n"
+    "Hankel function of the second kind\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Hankel function of the second kind.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n"
+    "computation using the relation,\n"
+    "\n"
+    ".. math:: H^{(2)}_v(z) = -\\frac{2}{\\imath\\pi} \\exp(\\imath \\pi v/2) K_v(z \\exp(\\imath\\pi/2))\n"
+    "\n"
+    "where :math:`K_v` is the modified Bessel function of the second kind.\n"
+    "For negative orders, the relation\n"
+    "\n"
+    ".. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \\exp(-\\imath\\pi v)\n"
+    "\n"
+    "is used.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "hankel2e : this function with leading exponential behavior stripped off.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/")
+ufunc_hankel2_loops[0] = loop_D_dD__As_fF_F
+ufunc_hankel2_loops[1] = loop_D_dD__As_dD_D
+ufunc_hankel2_types[0] = NPY_FLOAT
+ufunc_hankel2_types[1] = NPY_CFLOAT
+ufunc_hankel2_types[2] = NPY_CFLOAT
+ufunc_hankel2_types[3] = NPY_DOUBLE
+ufunc_hankel2_types[4] = NPY_CDOUBLE
+ufunc_hankel2_types[5] = NPY_CDOUBLE
+ufunc_hankel2_ptr[2*0] = _func_cbesh_wrap2
+ufunc_hankel2_ptr[2*0+1] = ("hankel2")
+ufunc_hankel2_ptr[2*1] = _func_cbesh_wrap2
+ufunc_hankel2_ptr[2*1+1] = ("hankel2")
+ufunc_hankel2_data[0] = &ufunc_hankel2_ptr[2*0]
+ufunc_hankel2_data[1] = &ufunc_hankel2_ptr[2*1]
+hankel2 = np.PyUFunc_FromFuncAndData(ufunc_hankel2_loops, ufunc_hankel2_data, ufunc_hankel2_types, 2, 2, 1, 0, "hankel2", ufunc_hankel2_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hankel2e_loops[2]
+cdef void *ufunc_hankel2e_ptr[4]
+cdef void *ufunc_hankel2e_data[2]
+cdef char ufunc_hankel2e_types[6]
+cdef char *ufunc_hankel2e_doc = (
+    "hankel2e(v, z, out=None)\n"
+    "\n"
+    "Exponentially scaled Hankel function of the second kind\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    hankel2e(v, z) = hankel2(v, z) * exp(1j * z)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the exponentially scaled Hankel function of the second kind.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the\n"
+    "computation using the relation,\n"
+    "\n"
+    ".. math:: H^{(2)}_v(z) = -\\frac{2}{\\imath\\pi} \\exp(\\frac{\\imath \\pi v}{2}) K_v(z exp(\\frac{\\imath\\pi}{2}))\n"
+    "\n"
+    "where :math:`K_v` is the modified Bessel function of the second kind.\n"
+    "For negative orders, the relation\n"
+    "\n"
+    ".. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \\exp(-\\imath\\pi v)\n"
+    "\n"
+    "is used.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/")
+ufunc_hankel2e_loops[0] = loop_D_dD__As_fF_F
+ufunc_hankel2e_loops[1] = loop_D_dD__As_dD_D
+ufunc_hankel2e_types[0] = NPY_FLOAT
+ufunc_hankel2e_types[1] = NPY_CFLOAT
+ufunc_hankel2e_types[2] = NPY_CFLOAT
+ufunc_hankel2e_types[3] = NPY_DOUBLE
+ufunc_hankel2e_types[4] = NPY_CDOUBLE
+ufunc_hankel2e_types[5] = NPY_CDOUBLE
+ufunc_hankel2e_ptr[2*0] = _func_cbesh_wrap2_e
+ufunc_hankel2e_ptr[2*0+1] = ("hankel2e")
+ufunc_hankel2e_ptr[2*1] = _func_cbesh_wrap2_e
+ufunc_hankel2e_ptr[2*1+1] = ("hankel2e")
+ufunc_hankel2e_data[0] = &ufunc_hankel2e_ptr[2*0]
+ufunc_hankel2e_data[1] = &ufunc_hankel2e_ptr[2*1]
+hankel2e = np.PyUFunc_FromFuncAndData(ufunc_hankel2e_loops, ufunc_hankel2e_data, ufunc_hankel2e_types, 2, 2, 1, 0, "hankel2e", ufunc_hankel2e_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_huber_loops[2]
+cdef void *ufunc_huber_ptr[4]
+cdef void *ufunc_huber_data[2]
+cdef char ufunc_huber_types[6]
+cdef char *ufunc_huber_doc = (
+    "huber(delta, r, out=None)\n"
+    "\n"
+    "Huber loss function.\n"
+    "\n"
+    ".. math:: \\text{huber}(\\delta, r) = \\begin{cases} \\infty & \\delta < 0  \\\\ \\frac{1}{2}r^2 & 0 \\le \\delta, | r | \\le \\delta \\\\ \\delta ( |r| - \\frac{1}{2}\\delta ) & \\text{otherwise} \\end{cases}\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "delta : ndarray\n"
+    "    Input array, indicating the quadratic vs. linear loss changepoint.\n"
+    "r : ndarray\n"
+    "    Input array, possibly representing residuals.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The computed Huber loss function values.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "pseudo_huber : smooth approximation of this function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "`huber` is useful as a loss function in robust statistics or machine\n"
+    "learning to reduce the influence of outliers as compared to the common\n"
+    "squared error loss, residuals with a magnitude higher than `delta` are\n"
+    "not squared [1]_.\n"
+    "\n"
+    "Typically, `r` represents residuals, the difference\n"
+    "between a model prediction and data. Then, for :math:`|r|\\leq\\delta`,\n"
+    "`huber` resembles the squared error and for :math:`|r|>\\delta` the\n"
+    "absolute error. This way, the Huber loss often achieves\n"
+    "a fast convergence in model fitting for small residuals like the squared\n"
+    "error loss function and still reduces the influence of outliers\n"
+    "(:math:`|r|>\\delta`) like the absolute error loss. As :math:`\\delta` is\n"
+    "the cutoff between squared and absolute error regimes, it has\n"
+    "to be tuned carefully for each problem. `huber` is also\n"
+    "convex, making it suitable for gradient based optimization.\n"
+    "\n"
+    ".. versionadded:: 0.15.0\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Peter Huber. \"Robust Estimation of a Location Parameter\",\n"
+    "       1964. Annals of Statistics. 53 (1): 73 - 101.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Import all necessary modules.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import huber\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    "\n"
+    "Compute the function for ``delta=1`` at ``r=2``\n"
+    "\n"
+    ">>> huber(1., 2.)\n"
+    "1.5\n"
+    "\n"
+    "Compute the function for different `delta` by providing a NumPy array or\n"
+    "list for `delta`.\n"
+    "\n"
+    ">>> huber([1., 3., 5.], 4.)\n"
+    "array([3.5, 7.5, 8. ])\n"
+    "\n"
+    "Compute the function at different points by providing a NumPy array or\n"
+    "list for `r`.\n"
+    "\n"
+    ">>> huber(2., np.array([1., 1.5, 3.]))\n"
+    "array([0.5  , 1.125, 4.   ])\n"
+    "\n"
+    "The function can be calculated for different `delta` and `r` by\n"
+    "providing arrays for both with compatible shapes for broadcasting.\n"
+    "\n"
+    ">>> r = np.array([1., 2.5, 8., 10.])\n"
+    ">>> deltas = np.array([[1.], [5.], [9.]])\n"
+    ">>> print(r.shape, deltas.shape)\n"
+    "(4,) (3, 1)\n"
+    "\n"
+    ">>> huber(deltas, r)\n"
+    "array([[ 0.5  ,  2.   ,  7.5  ,  9.5  ],\n"
+    "       [ 0.5  ,  3.125, 27.5  , 37.5  ],\n"
+    "       [ 0.5  ,  3.125, 32.   , 49.5  ]])\n"
+    "\n"
+    "Plot the function for different `delta`.\n"
+    "\n"
+    ">>> x = np.linspace(-4, 4, 500)\n"
+    ">>> deltas = [1, 2, 3]\n"
+    ">>> linestyles = [\"dashed\", \"dotted\", \"dashdot\"]\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> combined_plot_parameters = list(zip(deltas, linestyles))\n"
+    ">>> for delta, style in combined_plot_parameters:\n"
+    "...     ax.plot(x, huber(delta, x), label=f\"$\\delta={delta}$\", ls=style)\n"
+    ">>> ax.legend(loc=\"upper center\")\n"
+    ">>> ax.set_xlabel(\"$x$\")\n"
+    ">>> ax.set_title(\"Huber loss function $h_{\\delta}(x)$\")\n"
+    ">>> ax.set_xlim(-4, 4)\n"
+    ">>> ax.set_ylim(0, 8)\n"
+    ">>> plt.show()")
+ufunc_huber_loops[0] = loop_d_dd__As_ff_f
+ufunc_huber_loops[1] = loop_d_dd__As_dd_d
+ufunc_huber_types[0] = NPY_FLOAT
+ufunc_huber_types[1] = NPY_FLOAT
+ufunc_huber_types[2] = NPY_FLOAT
+ufunc_huber_types[3] = NPY_DOUBLE
+ufunc_huber_types[4] = NPY_DOUBLE
+ufunc_huber_types[5] = NPY_DOUBLE
+ufunc_huber_ptr[2*0] = _func_huber
+ufunc_huber_ptr[2*0+1] = ("huber")
+ufunc_huber_ptr[2*1] = _func_huber
+ufunc_huber_ptr[2*1+1] = ("huber")
+ufunc_huber_data[0] = &ufunc_huber_ptr[2*0]
+ufunc_huber_data[1] = &ufunc_huber_ptr[2*1]
+huber = np.PyUFunc_FromFuncAndData(ufunc_huber_loops, ufunc_huber_data, ufunc_huber_types, 2, 2, 1, 0, "huber", ufunc_huber_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hyp0f1_loops[4]
+cdef void *ufunc_hyp0f1_ptr[8]
+cdef void *ufunc_hyp0f1_data[4]
+cdef char ufunc_hyp0f1_types[12]
+cdef char *ufunc_hyp0f1_doc = (
+    "hyp0f1(v, z, out=None)\n"
+    "\n"
+    "Confluent hypergeometric limit function 0F1.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Real-valued parameter\n"
+    "z : array_like\n"
+    "    Real- or complex-valued argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The confluent hypergeometric limit function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "This function is defined as:\n"
+    "\n"
+    ".. math:: _0F_1(v, z) = \\sum_{k=0}^{\\infty}\\frac{z^k}{(v)_k k!}.\n"
+    "\n"
+    "It's also the limit as :math:`q \\to \\infty` of :math:`_1F_1(q; v; z/q)`,\n"
+    "and satisfies the differential equation :math:`f''(z) + vf'(z) =\n"
+    "f(z)`. See [1]_ for more information.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Wolfram MathWorld, \"Confluent Hypergeometric Limit Function\",\n"
+    "       http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is one when `z` is zero.\n"
+    "\n"
+    ">>> sc.hyp0f1(1, 0)\n"
+    "1.0\n"
+    "\n"
+    "It is the limit of the confluent hypergeometric function as `q`\n"
+    "goes to infinity.\n"
+    "\n"
+    ">>> q = np.array([1, 10, 100, 1000])\n"
+    ">>> v = 1\n"
+    ">>> z = 1\n"
+    ">>> sc.hyp1f1(q, v, z / q)\n"
+    "array([2.71828183, 2.31481985, 2.28303778, 2.27992985])\n"
+    ">>> sc.hyp0f1(v, z)\n"
+    "2.2795853023360673\n"
+    "\n"
+    "It is related to Bessel functions.\n"
+    "\n"
+    ">>> n = 1\n"
+    ">>> x = np.linspace(0, 1, 5)\n"
+    ">>> sc.jv(n, x)\n"
+    "array([0.        , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])\n"
+    ">>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)\n"
+    "array([0.        , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])")
+ufunc_hyp0f1_loops[0] = loop_d_dd__As_ff_f
+ufunc_hyp0f1_loops[1] = loop_D_dD__As_fF_F
+ufunc_hyp0f1_loops[2] = loop_d_dd__As_dd_d
+ufunc_hyp0f1_loops[3] = loop_D_dD__As_dD_D
+ufunc_hyp0f1_types[0] = NPY_FLOAT
+ufunc_hyp0f1_types[1] = NPY_FLOAT
+ufunc_hyp0f1_types[2] = NPY_FLOAT
+ufunc_hyp0f1_types[3] = NPY_FLOAT
+ufunc_hyp0f1_types[4] = NPY_CFLOAT
+ufunc_hyp0f1_types[5] = NPY_CFLOAT
+ufunc_hyp0f1_types[6] = NPY_DOUBLE
+ufunc_hyp0f1_types[7] = NPY_DOUBLE
+ufunc_hyp0f1_types[8] = NPY_DOUBLE
+ufunc_hyp0f1_types[9] = NPY_DOUBLE
+ufunc_hyp0f1_types[10] = NPY_CDOUBLE
+ufunc_hyp0f1_types[11] = NPY_CDOUBLE
+ufunc_hyp0f1_ptr[2*0] = _func__hyp0f1_real
+ufunc_hyp0f1_ptr[2*0+1] = ("hyp0f1")
+ufunc_hyp0f1_ptr[2*1] = _func__hyp0f1_cmplx
+ufunc_hyp0f1_ptr[2*1+1] = ("hyp0f1")
+ufunc_hyp0f1_ptr[2*2] = _func__hyp0f1_real
+ufunc_hyp0f1_ptr[2*2+1] = ("hyp0f1")
+ufunc_hyp0f1_ptr[2*3] = _func__hyp0f1_cmplx
+ufunc_hyp0f1_ptr[2*3+1] = ("hyp0f1")
+ufunc_hyp0f1_data[0] = &ufunc_hyp0f1_ptr[2*0]
+ufunc_hyp0f1_data[1] = &ufunc_hyp0f1_ptr[2*1]
+ufunc_hyp0f1_data[2] = &ufunc_hyp0f1_ptr[2*2]
+ufunc_hyp0f1_data[3] = &ufunc_hyp0f1_ptr[2*3]
+hyp0f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp0f1_loops, ufunc_hyp0f1_data, ufunc_hyp0f1_types, 4, 2, 1, 0, "hyp0f1", ufunc_hyp0f1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hyp1f1_loops[4]
+cdef void *ufunc_hyp1f1_ptr[8]
+cdef void *ufunc_hyp1f1_data[4]
+cdef char ufunc_hyp1f1_types[16]
+cdef char *ufunc_hyp1f1_doc = (
+    "hyp1f1(a, b, x, out=None)\n"
+    "\n"
+    "Confluent hypergeometric function 1F1.\n"
+    "\n"
+    "The confluent hypergeometric function is defined by the series\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   {}_1F_1(a; b; x) = \\sum_{k = 0}^\\infty \\frac{(a)_k}{(b)_k k!} x^k.\n"
+    "\n"
+    "See [dlmf]_ for more details. Here :math:`(\\cdot)_k` is the\n"
+    "Pochhammer symbol; see `poch`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b : array_like\n"
+    "    Real parameters\n"
+    "x : array_like\n"
+    "    Real or complex argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the confluent hypergeometric function\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "hyperu : another confluent hypergeometric function\n"
+    "hyp0f1 : confluent hypergeometric limit function\n"
+    "hyp2f1 : Gaussian hypergeometric function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/13.2#E2\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is one when `x` is zero:\n"
+    "\n"
+    ">>> sc.hyp1f1(0.5, 0.5, 0)\n"
+    "1.0\n"
+    "\n"
+    "It is singular when `b` is a nonpositive integer.\n"
+    "\n"
+    ">>> sc.hyp1f1(0.5, -1, 0)\n"
+    "inf\n"
+    "\n"
+    "It is a polynomial when `a` is a nonpositive integer.\n"
+    "\n"
+    ">>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])\n"
+    ">>> sc.hyp1f1(a, b, x)\n"
+    "array([-1., -3., -5., -7.])\n"
+    ">>> 1 + (a / b) * x\n"
+    "array([-1., -3., -5., -7.])\n"
+    "\n"
+    "It reduces to the exponential function when `a = b`.\n"
+    "\n"
+    ">>> sc.hyp1f1(2, 2, [1, 2, 3, 4])\n"
+    "array([ 2.71828183,  7.3890561 , 20.08553692, 54.59815003])\n"
+    ">>> np.exp([1, 2, 3, 4])\n"
+    "array([ 2.71828183,  7.3890561 , 20.08553692, 54.59815003])")
+ufunc_hyp1f1_loops[0] = loop_d_ddd__As_fff_f
+ufunc_hyp1f1_loops[1] = loop_D_ddD__As_ffF_F
+ufunc_hyp1f1_loops[2] = loop_d_ddd__As_ddd_d
+ufunc_hyp1f1_loops[3] = loop_D_ddD__As_ddD_D
+ufunc_hyp1f1_types[0] = NPY_FLOAT
+ufunc_hyp1f1_types[1] = NPY_FLOAT
+ufunc_hyp1f1_types[2] = NPY_FLOAT
+ufunc_hyp1f1_types[3] = NPY_FLOAT
+ufunc_hyp1f1_types[4] = NPY_FLOAT
+ufunc_hyp1f1_types[5] = NPY_FLOAT
+ufunc_hyp1f1_types[6] = NPY_CFLOAT
+ufunc_hyp1f1_types[7] = NPY_CFLOAT
+ufunc_hyp1f1_types[8] = NPY_DOUBLE
+ufunc_hyp1f1_types[9] = NPY_DOUBLE
+ufunc_hyp1f1_types[10] = NPY_DOUBLE
+ufunc_hyp1f1_types[11] = NPY_DOUBLE
+ufunc_hyp1f1_types[12] = NPY_DOUBLE
+ufunc_hyp1f1_types[13] = NPY_DOUBLE
+ufunc_hyp1f1_types[14] = NPY_CDOUBLE
+ufunc_hyp1f1_types[15] = NPY_CDOUBLE
+ufunc_hyp1f1_ptr[2*0] = scipy.special._ufuncs_cxx._export_hyp1f1_double
+ufunc_hyp1f1_ptr[2*0+1] = ("hyp1f1")
+ufunc_hyp1f1_ptr[2*1] = _func_chyp1f1_wrap
+ufunc_hyp1f1_ptr[2*1+1] = ("hyp1f1")
+ufunc_hyp1f1_ptr[2*2] = scipy.special._ufuncs_cxx._export_hyp1f1_double
+ufunc_hyp1f1_ptr[2*2+1] = ("hyp1f1")
+ufunc_hyp1f1_ptr[2*3] = _func_chyp1f1_wrap
+ufunc_hyp1f1_ptr[2*3+1] = ("hyp1f1")
+ufunc_hyp1f1_data[0] = &ufunc_hyp1f1_ptr[2*0]
+ufunc_hyp1f1_data[1] = &ufunc_hyp1f1_ptr[2*1]
+ufunc_hyp1f1_data[2] = &ufunc_hyp1f1_ptr[2*2]
+ufunc_hyp1f1_data[3] = &ufunc_hyp1f1_ptr[2*3]
+hyp1f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp1f1_loops, ufunc_hyp1f1_data, ufunc_hyp1f1_types, 4, 3, 1, 0, "hyp1f1", ufunc_hyp1f1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hyp2f1_loops[4]
+cdef void *ufunc_hyp2f1_ptr[8]
+cdef void *ufunc_hyp2f1_data[4]
+cdef char ufunc_hyp2f1_types[20]
+cdef char *ufunc_hyp2f1_doc = (
+    "hyp2f1(a, b, c, z, out=None)\n"
+    "\n"
+    "Gauss hypergeometric function 2F1(a, b; c; z)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b, c : array_like\n"
+    "    Arguments, should be real-valued.\n"
+    "z : array_like\n"
+    "    Argument, real or complex.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "hyp2f1 : scalar or ndarray\n"
+    "    The values of the gaussian hypergeometric function.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "hyp0f1 : confluent hypergeometric limit function.\n"
+    "hyp1f1 : Kummer's (confluent hypergeometric) function.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "This function is defined for :math:`|z| < 1` as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   \\mathrm{hyp2f1}(a, b, c, z) = \\sum_{n=0}^\\infty\n"
+    "   \\frac{(a)_n (b)_n}{(c)_n}\\frac{z^n}{n!},\n"
+    "\n"
+    "and defined on the rest of the complex z-plane by analytic\n"
+    "continuation [1]_.\n"
+    "Here :math:`(\\cdot)_n` is the Pochhammer symbol; see `poch`. When\n"
+    ":math:`n` is an integer the result is a polynomial of degree :math:`n`.\n"
+    "\n"
+    "The implementation for complex values of ``z`` is described in [2]_,\n"
+    "except for ``z`` in the region defined by\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "     0.9 <= \\left|z\\right| < 1.1,\n"
+    "     \\left|1 - z\\right| >= 0.9,\n"
+    "     \\mathrm{real}(z) >= 0\n"
+    "\n"
+    "in which the implementation follows [4]_.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] NIST Digital Library of Mathematical Functions\n"
+    "       https://dlmf.nist.gov/15.2\n"
+    ".. [2] S. Zhang and J.M. Jin, \"Computation of Special Functions\", Wiley 1996\n"
+    ".. [3] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    ".. [4] J.L. Lopez and N.M. Temme, \"New series expansions of the Gauss\n"
+    "       hypergeometric function\", Adv Comput Math 39, 349-365 (2013).\n"
+    "       https://doi.org/10.1007/s10444-012-9283-y\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It has poles when `c` is a negative integer.\n"
+    "\n"
+    ">>> sc.hyp2f1(1, 1, -2, 1)\n"
+    "inf\n"
+    "\n"
+    "It is a polynomial when `a` or `b` is a negative integer.\n"
+    "\n"
+    ">>> a, b, c = -1, 1, 1.5\n"
+    ">>> z = np.linspace(0, 1, 5)\n"
+    ">>> sc.hyp2f1(a, b, c, z)\n"
+    "array([1.        , 0.83333333, 0.66666667, 0.5       , 0.33333333])\n"
+    ">>> 1 + a * b * z / c\n"
+    "array([1.        , 0.83333333, 0.66666667, 0.5       , 0.33333333])\n"
+    "\n"
+    "It is symmetric in `a` and `b`.\n"
+    "\n"
+    ">>> a = np.linspace(0, 1, 5)\n"
+    ">>> b = np.linspace(0, 1, 5)\n"
+    ">>> sc.hyp2f1(a, b, 1, 0.5)\n"
+    "array([1.        , 1.03997334, 1.1803406 , 1.47074441, 2.        ])\n"
+    ">>> sc.hyp2f1(b, a, 1, 0.5)\n"
+    "array([1.        , 1.03997334, 1.1803406 , 1.47074441, 2.        ])\n"
+    "\n"
+    "It contains many other functions as special cases.\n"
+    "\n"
+    ">>> z = 0.5\n"
+    ">>> sc.hyp2f1(1, 1, 2, z)\n"
+    "1.3862943611198901\n"
+    ">>> -np.log(1 - z) / z\n"
+    "1.3862943611198906\n"
+    "\n"
+    ">>> sc.hyp2f1(0.5, 1, 1.5, z**2)\n"
+    "1.098612288668109\n"
+    ">>> np.log((1 + z) / (1 - z)) / (2 * z)\n"
+    "1.0986122886681098\n"
+    "\n"
+    ">>> sc.hyp2f1(0.5, 1, 1.5, -z**2)\n"
+    "0.9272952180016117\n"
+    ">>> np.arctan(z) / z\n"
+    "0.9272952180016122")
+ufunc_hyp2f1_loops[0] = loop_d_dddd__As_ffff_f
+ufunc_hyp2f1_loops[1] = loop_D_dddD__As_fffF_F
+ufunc_hyp2f1_loops[2] = loop_d_dddd__As_dddd_d
+ufunc_hyp2f1_loops[3] = loop_D_dddD__As_dddD_D
+ufunc_hyp2f1_types[0] = NPY_FLOAT
+ufunc_hyp2f1_types[1] = NPY_FLOAT
+ufunc_hyp2f1_types[2] = NPY_FLOAT
+ufunc_hyp2f1_types[3] = NPY_FLOAT
+ufunc_hyp2f1_types[4] = NPY_FLOAT
+ufunc_hyp2f1_types[5] = NPY_FLOAT
+ufunc_hyp2f1_types[6] = NPY_FLOAT
+ufunc_hyp2f1_types[7] = NPY_FLOAT
+ufunc_hyp2f1_types[8] = NPY_CFLOAT
+ufunc_hyp2f1_types[9] = NPY_CFLOAT
+ufunc_hyp2f1_types[10] = NPY_DOUBLE
+ufunc_hyp2f1_types[11] = NPY_DOUBLE
+ufunc_hyp2f1_types[12] = NPY_DOUBLE
+ufunc_hyp2f1_types[13] = NPY_DOUBLE
+ufunc_hyp2f1_types[14] = NPY_DOUBLE
+ufunc_hyp2f1_types[15] = NPY_DOUBLE
+ufunc_hyp2f1_types[16] = NPY_DOUBLE
+ufunc_hyp2f1_types[17] = NPY_DOUBLE
+ufunc_hyp2f1_types[18] = NPY_CDOUBLE
+ufunc_hyp2f1_types[19] = NPY_CDOUBLE
+ufunc_hyp2f1_ptr[2*0] = _func_hyp2f1
+ufunc_hyp2f1_ptr[2*0+1] = ("hyp2f1")
+ufunc_hyp2f1_ptr[2*1] = _func_hyp2f1_complex
+ufunc_hyp2f1_ptr[2*1+1] = ("hyp2f1")
+ufunc_hyp2f1_ptr[2*2] = _func_hyp2f1
+ufunc_hyp2f1_ptr[2*2+1] = ("hyp2f1")
+ufunc_hyp2f1_ptr[2*3] = _func_hyp2f1_complex
+ufunc_hyp2f1_ptr[2*3+1] = ("hyp2f1")
+ufunc_hyp2f1_data[0] = &ufunc_hyp2f1_ptr[2*0]
+ufunc_hyp2f1_data[1] = &ufunc_hyp2f1_ptr[2*1]
+ufunc_hyp2f1_data[2] = &ufunc_hyp2f1_ptr[2*2]
+ufunc_hyp2f1_data[3] = &ufunc_hyp2f1_ptr[2*3]
+hyp2f1 = np.PyUFunc_FromFuncAndData(ufunc_hyp2f1_loops, ufunc_hyp2f1_data, ufunc_hyp2f1_types, 4, 4, 1, 0, "hyp2f1", ufunc_hyp2f1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_hyperu_loops[2]
+cdef void *ufunc_hyperu_ptr[4]
+cdef void *ufunc_hyperu_data[2]
+cdef char ufunc_hyperu_types[8]
+cdef char *ufunc_hyperu_doc = (
+    "hyperu(a, b, x, out=None)\n"
+    "\n"
+    "Confluent hypergeometric function U\n"
+    "\n"
+    "It is defined as the solution to the equation\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   x \\frac{d^2w}{dx^2} + (b - x) \\frac{dw}{dx} - aw = 0\n"
+    "\n"
+    "which satisfies the property\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   U(a, b, x) \\sim x^{-a}\n"
+    "\n"
+    "as :math:`x \\to \\infty`. See [dlmf]_ for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a, b : array_like\n"
+    "    Real-valued parameters\n"
+    "x : array_like\n"
+    "    Real-valued argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of `U`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematics Functions\n"
+    "          https://dlmf.nist.gov/13.2#E6\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It has a branch cut along the negative `x` axis.\n"
+    "\n"
+    ">>> x = np.linspace(-0.1, -10, 5)\n"
+    ">>> sc.hyperu(1, 1, x)\n"
+    "array([nan, nan, nan, nan, nan])\n"
+    "\n"
+    "It approaches zero as `x` goes to infinity.\n"
+    "\n"
+    ">>> x = np.array([1, 10, 100])\n"
+    ">>> sc.hyperu(1, 1, x)\n"
+    "array([0.59634736, 0.09156333, 0.00990194])\n"
+    "\n"
+    "It satisfies Kummer's transformation.\n"
+    "\n"
+    ">>> a, b, x = 2, 1, 1\n"
+    ">>> sc.hyperu(a, b, x)\n"
+    "0.1926947246463881\n"
+    ">>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)\n"
+    "0.1926947246463881")
+ufunc_hyperu_loops[0] = loop_d_ddd__As_fff_f
+ufunc_hyperu_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_hyperu_types[0] = NPY_FLOAT
+ufunc_hyperu_types[1] = NPY_FLOAT
+ufunc_hyperu_types[2] = NPY_FLOAT
+ufunc_hyperu_types[3] = NPY_FLOAT
+ufunc_hyperu_types[4] = NPY_DOUBLE
+ufunc_hyperu_types[5] = NPY_DOUBLE
+ufunc_hyperu_types[6] = NPY_DOUBLE
+ufunc_hyperu_types[7] = NPY_DOUBLE
+ufunc_hyperu_ptr[2*0] = _func_hyperu
+ufunc_hyperu_ptr[2*0+1] = ("hyperu")
+ufunc_hyperu_ptr[2*1] = _func_hyperu
+ufunc_hyperu_ptr[2*1+1] = ("hyperu")
+ufunc_hyperu_data[0] = &ufunc_hyperu_ptr[2*0]
+ufunc_hyperu_data[1] = &ufunc_hyperu_ptr[2*1]
+hyperu = np.PyUFunc_FromFuncAndData(ufunc_hyperu_loops, ufunc_hyperu_data, ufunc_hyperu_types, 2, 3, 1, 0, "hyperu", ufunc_hyperu_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_i0_loops[2]
+cdef void *ufunc_i0_ptr[4]
+cdef void *ufunc_i0_data[2]
+cdef char ufunc_i0_types[4]
+cdef char *ufunc_i0_doc = (
+    "i0(x, out=None)\n"
+    "\n"
+    "Modified Bessel function of order 0.\n"
+    "\n"
+    "Defined as,\n"
+    "\n"
+    ".. math::\n"
+    "    I_0(x) = \\sum_{k=0}^\\infty \\frac{(x^2/4)^k}{(k!)^2} = J_0(\\imath x),\n"
+    "\n"
+    "where :math:`J_0` is the Bessel function of the first kind of order 0.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    Value of the modified Bessel function of order 0 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `i0`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "iv: Modified Bessel function of any order\n"
+    "i0e: Exponentially scaled modified Bessel function of order 0\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import i0\n"
+    ">>> i0(1.)\n"
+    "1.2660658777520082\n"
+    "\n"
+    "Calculate at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> i0(np.array([-2., 0., 3.5]))\n"
+    "array([2.2795853 , 1.        , 7.37820343])\n"
+    "\n"
+    "Plot the function from -10 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> y = i0(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_i0_loops[0] = loop_d_d__As_f_f
+ufunc_i0_loops[1] = loop_d_d__As_d_d
+ufunc_i0_types[0] = NPY_FLOAT
+ufunc_i0_types[1] = NPY_FLOAT
+ufunc_i0_types[2] = NPY_DOUBLE
+ufunc_i0_types[3] = NPY_DOUBLE
+ufunc_i0_ptr[2*0] = _func_i0
+ufunc_i0_ptr[2*0+1] = ("i0")
+ufunc_i0_ptr[2*1] = _func_i0
+ufunc_i0_ptr[2*1+1] = ("i0")
+ufunc_i0_data[0] = &ufunc_i0_ptr[2*0]
+ufunc_i0_data[1] = &ufunc_i0_ptr[2*1]
+i0 = np.PyUFunc_FromFuncAndData(ufunc_i0_loops, ufunc_i0_data, ufunc_i0_types, 2, 1, 1, 0, "i0", ufunc_i0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_i0e_loops[2]
+cdef void *ufunc_i0e_ptr[4]
+cdef void *ufunc_i0e_data[2]
+cdef char ufunc_i0e_types[4]
+cdef char *ufunc_i0e_doc = (
+    "i0e(x, out=None)\n"
+    "\n"
+    "Exponentially scaled modified Bessel function of order 0.\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    i0e(x) = exp(-abs(x)) * i0(x).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    Value of the exponentially scaled modified Bessel function of order 0\n"
+    "    at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval. The\n"
+    "polynomial expansions used are the same as those in `i0`, but\n"
+    "they are not multiplied by the dominant exponential factor.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `i0e`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "iv: Modified Bessel function of the first kind\n"
+    "i0: Modified Bessel function of order 0\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import i0e\n"
+    ">>> i0e(1.)\n"
+    "0.46575960759364043\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> i0e(np.array([-2., 0., 3.]))\n"
+    "array([0.30850832, 1.        , 0.24300035])\n"
+    "\n"
+    "Plot the function from -10 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> y = i0e(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large arguments for\n"
+    "which the unscaled Bessel functions overflow or lose precision. In the\n"
+    "following example `i0` returns infinity whereas `i0e` still returns\n"
+    "a finite number.\n"
+    "\n"
+    ">>> from scipy.special import i0\n"
+    ">>> i0(1000.), i0e(1000.)\n"
+    "(inf, 0.012617240455891257)")
+ufunc_i0e_loops[0] = loop_d_d__As_f_f
+ufunc_i0e_loops[1] = loop_d_d__As_d_d
+ufunc_i0e_types[0] = NPY_FLOAT
+ufunc_i0e_types[1] = NPY_FLOAT
+ufunc_i0e_types[2] = NPY_DOUBLE
+ufunc_i0e_types[3] = NPY_DOUBLE
+ufunc_i0e_ptr[2*0] = _func_i0e
+ufunc_i0e_ptr[2*0+1] = ("i0e")
+ufunc_i0e_ptr[2*1] = _func_i0e
+ufunc_i0e_ptr[2*1+1] = ("i0e")
+ufunc_i0e_data[0] = &ufunc_i0e_ptr[2*0]
+ufunc_i0e_data[1] = &ufunc_i0e_ptr[2*1]
+i0e = np.PyUFunc_FromFuncAndData(ufunc_i0e_loops, ufunc_i0e_data, ufunc_i0e_types, 2, 1, 1, 0, "i0e", ufunc_i0e_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_i1_loops[2]
+cdef void *ufunc_i1_ptr[4]
+cdef void *ufunc_i1_data[2]
+cdef char ufunc_i1_types[4]
+cdef char *ufunc_i1_doc = (
+    "i1(x, out=None)\n"
+    "\n"
+    "Modified Bessel function of order 1.\n"
+    "\n"
+    "Defined as,\n"
+    "\n"
+    ".. math::\n"
+    "    I_1(x) = \\frac{1}{2}x \\sum_{k=0}^\\infty \\frac{(x^2/4)^k}{k! (k + 1)!}\n"
+    "           = -\\imath J_1(\\imath x),\n"
+    "\n"
+    "where :math:`J_1` is the Bessel function of the first kind of order 1.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    Value of the modified Bessel function of order 1 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `i1`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "iv: Modified Bessel function of the first kind\n"
+    "i1e: Exponentially scaled modified Bessel function of order 1\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import i1\n"
+    ">>> i1(1.)\n"
+    "0.5651591039924851\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> i1(np.array([-2., 0., 6.]))\n"
+    "array([-1.59063685,  0.        , 61.34193678])\n"
+    "\n"
+    "Plot the function between -10 and 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> y = i1(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_i1_loops[0] = loop_d_d__As_f_f
+ufunc_i1_loops[1] = loop_d_d__As_d_d
+ufunc_i1_types[0] = NPY_FLOAT
+ufunc_i1_types[1] = NPY_FLOAT
+ufunc_i1_types[2] = NPY_DOUBLE
+ufunc_i1_types[3] = NPY_DOUBLE
+ufunc_i1_ptr[2*0] = _func_i1
+ufunc_i1_ptr[2*0+1] = ("i1")
+ufunc_i1_ptr[2*1] = _func_i1
+ufunc_i1_ptr[2*1+1] = ("i1")
+ufunc_i1_data[0] = &ufunc_i1_ptr[2*0]
+ufunc_i1_data[1] = &ufunc_i1_ptr[2*1]
+i1 = np.PyUFunc_FromFuncAndData(ufunc_i1_loops, ufunc_i1_data, ufunc_i1_types, 2, 1, 1, 0, "i1", ufunc_i1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_i1e_loops[2]
+cdef void *ufunc_i1e_ptr[4]
+cdef void *ufunc_i1e_data[2]
+cdef char ufunc_i1e_types[4]
+cdef char *ufunc_i1e_doc = (
+    "i1e(x, out=None)\n"
+    "\n"
+    "Exponentially scaled modified Bessel function of order 1.\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    i1e(x) = exp(-abs(x)) * i1(x)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    Value of the exponentially scaled modified Bessel function of order 1\n"
+    "    at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 8] and (8, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval. The\n"
+    "polynomial expansions used are the same as those in `i1`, but\n"
+    "they are not multiplied by the dominant exponential factor.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `i1e`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "iv: Modified Bessel function of the first kind\n"
+    "i1: Modified Bessel function of order 1\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import i1e\n"
+    ">>> i1e(1.)\n"
+    "0.2079104153497085\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> i1e(np.array([-2., 0., 6.]))\n"
+    "array([-0.21526929,  0.        ,  0.15205146])\n"
+    "\n"
+    "Plot the function between -10 and 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> y = i1e(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large arguments for\n"
+    "which the unscaled Bessel functions overflow or lose precision. In the\n"
+    "following example `i1` returns infinity whereas `i1e` still returns a\n"
+    "finite number.\n"
+    "\n"
+    ">>> from scipy.special import i1\n"
+    ">>> i1(1000.), i1e(1000.)\n"
+    "(inf, 0.01261093025692863)")
+ufunc_i1e_loops[0] = loop_d_d__As_f_f
+ufunc_i1e_loops[1] = loop_d_d__As_d_d
+ufunc_i1e_types[0] = NPY_FLOAT
+ufunc_i1e_types[1] = NPY_FLOAT
+ufunc_i1e_types[2] = NPY_DOUBLE
+ufunc_i1e_types[3] = NPY_DOUBLE
+ufunc_i1e_ptr[2*0] = _func_i1e
+ufunc_i1e_ptr[2*0+1] = ("i1e")
+ufunc_i1e_ptr[2*1] = _func_i1e
+ufunc_i1e_ptr[2*1+1] = ("i1e")
+ufunc_i1e_data[0] = &ufunc_i1e_ptr[2*0]
+ufunc_i1e_data[1] = &ufunc_i1e_ptr[2*1]
+i1e = np.PyUFunc_FromFuncAndData(ufunc_i1e_loops, ufunc_i1e_data, ufunc_i1e_types, 2, 1, 1, 0, "i1e", ufunc_i1e_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_inv_boxcox_loops[2]
+cdef void *ufunc_inv_boxcox_ptr[4]
+cdef void *ufunc_inv_boxcox_data[2]
+cdef char ufunc_inv_boxcox_types[6]
+cdef char *ufunc_inv_boxcox_doc = (
+    "inv_boxcox(y, lmbda, out=None)\n"
+    "\n"
+    "Compute the inverse of the Box-Cox transformation.\n"
+    "\n"
+    "Find ``x`` such that::\n"
+    "\n"
+    "    y = (x**lmbda - 1) / lmbda  if lmbda != 0\n"
+    "        log(x)                  if lmbda == 0\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : array_like\n"
+    "    Data to be transformed.\n"
+    "lmbda : array_like\n"
+    "    Power parameter of the Box-Cox transform.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    Transformed data.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.16.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import boxcox, inv_boxcox\n"
+    ">>> y = boxcox([1, 4, 10], 2.5)\n"
+    ">>> inv_boxcox(y, 2.5)\n"
+    "array([1., 4., 10.])")
+ufunc_inv_boxcox_loops[0] = loop_d_dd__As_ff_f
+ufunc_inv_boxcox_loops[1] = loop_d_dd__As_dd_d
+ufunc_inv_boxcox_types[0] = NPY_FLOAT
+ufunc_inv_boxcox_types[1] = NPY_FLOAT
+ufunc_inv_boxcox_types[2] = NPY_FLOAT
+ufunc_inv_boxcox_types[3] = NPY_DOUBLE
+ufunc_inv_boxcox_types[4] = NPY_DOUBLE
+ufunc_inv_boxcox_types[5] = NPY_DOUBLE
+ufunc_inv_boxcox_ptr[2*0] = _func_inv_boxcox
+ufunc_inv_boxcox_ptr[2*0+1] = ("inv_boxcox")
+ufunc_inv_boxcox_ptr[2*1] = _func_inv_boxcox
+ufunc_inv_boxcox_ptr[2*1+1] = ("inv_boxcox")
+ufunc_inv_boxcox_data[0] = &ufunc_inv_boxcox_ptr[2*0]
+ufunc_inv_boxcox_data[1] = &ufunc_inv_boxcox_ptr[2*1]
+inv_boxcox = np.PyUFunc_FromFuncAndData(ufunc_inv_boxcox_loops, ufunc_inv_boxcox_data, ufunc_inv_boxcox_types, 2, 2, 1, 0, "inv_boxcox", ufunc_inv_boxcox_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_inv_boxcox1p_loops[2]
+cdef void *ufunc_inv_boxcox1p_ptr[4]
+cdef void *ufunc_inv_boxcox1p_data[2]
+cdef char ufunc_inv_boxcox1p_types[6]
+cdef char *ufunc_inv_boxcox1p_doc = (
+    "inv_boxcox1p(y, lmbda, out=None)\n"
+    "\n"
+    "Compute the inverse of the Box-Cox transformation.\n"
+    "\n"
+    "Find ``x`` such that::\n"
+    "\n"
+    "    y = ((1+x)**lmbda - 1) / lmbda  if lmbda != 0\n"
+    "        log(1+x)                    if lmbda == 0\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : array_like\n"
+    "    Data to be transformed.\n"
+    "lmbda : array_like\n"
+    "    Power parameter of the Box-Cox transform.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    Transformed data.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.16.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import boxcox1p, inv_boxcox1p\n"
+    ">>> y = boxcox1p([1, 4, 10], 2.5)\n"
+    ">>> inv_boxcox1p(y, 2.5)\n"
+    "array([1., 4., 10.])")
+ufunc_inv_boxcox1p_loops[0] = loop_d_dd__As_ff_f
+ufunc_inv_boxcox1p_loops[1] = loop_d_dd__As_dd_d
+ufunc_inv_boxcox1p_types[0] = NPY_FLOAT
+ufunc_inv_boxcox1p_types[1] = NPY_FLOAT
+ufunc_inv_boxcox1p_types[2] = NPY_FLOAT
+ufunc_inv_boxcox1p_types[3] = NPY_DOUBLE
+ufunc_inv_boxcox1p_types[4] = NPY_DOUBLE
+ufunc_inv_boxcox1p_types[5] = NPY_DOUBLE
+ufunc_inv_boxcox1p_ptr[2*0] = _func_inv_boxcox1p
+ufunc_inv_boxcox1p_ptr[2*0+1] = ("inv_boxcox1p")
+ufunc_inv_boxcox1p_ptr[2*1] = _func_inv_boxcox1p
+ufunc_inv_boxcox1p_ptr[2*1+1] = ("inv_boxcox1p")
+ufunc_inv_boxcox1p_data[0] = &ufunc_inv_boxcox1p_ptr[2*0]
+ufunc_inv_boxcox1p_data[1] = &ufunc_inv_boxcox1p_ptr[2*1]
+inv_boxcox1p = np.PyUFunc_FromFuncAndData(ufunc_inv_boxcox1p_loops, ufunc_inv_boxcox1p_data, ufunc_inv_boxcox1p_types, 2, 2, 1, 0, "inv_boxcox1p", ufunc_inv_boxcox1p_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_it2i0k0_loops[2]
+cdef void *ufunc_it2i0k0_ptr[4]
+cdef void *ufunc_it2i0k0_data[2]
+cdef char ufunc_it2i0k0_types[6]
+cdef char *ufunc_it2i0k0_doc = (
+    "it2i0k0(x, out=None)\n"
+    "\n"
+    "Integrals related to modified Bessel functions of order 0.\n"
+    "\n"
+    "Computes the integrals\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\int_0^x \\frac{I_0(t) - 1}{t} dt \\\\\n"
+    "    \\int_x^\\infty \\frac{K_0(t)}{t} dt.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Values at which to evaluate the integrals.\n"
+    "out : tuple of ndarrays, optional\n"
+    "    Optional output arrays for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "ii0 : scalar or ndarray\n"
+    "    The integral for `i0`\n"
+    "ik0 : scalar or ndarray\n"
+    "    The integral for `k0`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n"
+    "       Wiley 1996\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the functions at one point.\n"
+    "\n"
+    ">>> from scipy.special import it2i0k0\n"
+    ">>> int_i, int_k = it2i0k0(1.)\n"
+    ">>> int_i, int_k\n"
+    "(0.12897944249456852, 0.2085182909001295)\n"
+    "\n"
+    "Evaluate the functions at several points.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([0.5, 1.5, 3.])\n"
+    ">>> int_i, int_k = it2i0k0(points)\n"
+    ">>> int_i, int_k\n"
+    "(array([0.03149527, 0.30187149, 1.50012461]),\n"
+    " array([0.66575102, 0.0823715 , 0.00823631]))\n"
+    "\n"
+    "Plot the functions from 0 to 5.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 5., 1000)\n"
+    ">>> int_i, int_k = it2i0k0(x)\n"
+    ">>> ax.plot(x, int_i, label=r\"$\\int_0^x \\frac{I_0(t)-1}{t}\\,dt$\")\n"
+    ">>> ax.plot(x, int_k, label=r\"$\\int_x^{\\infty} \\frac{K_0(t)}{t}\\,dt$\")\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_ylim(0, 10)\n"
+    ">>> plt.show()")
+ufunc_it2i0k0_loops[0] = loop_i_d_dd_As_f_ff
+ufunc_it2i0k0_loops[1] = loop_i_d_dd_As_d_dd
+ufunc_it2i0k0_types[0] = NPY_FLOAT
+ufunc_it2i0k0_types[1] = NPY_FLOAT
+ufunc_it2i0k0_types[2] = NPY_FLOAT
+ufunc_it2i0k0_types[3] = NPY_DOUBLE
+ufunc_it2i0k0_types[4] = NPY_DOUBLE
+ufunc_it2i0k0_types[5] = NPY_DOUBLE
+ufunc_it2i0k0_ptr[2*0] = _func_it2i0k0_wrap
+ufunc_it2i0k0_ptr[2*0+1] = ("it2i0k0")
+ufunc_it2i0k0_ptr[2*1] = _func_it2i0k0_wrap
+ufunc_it2i0k0_ptr[2*1+1] = ("it2i0k0")
+ufunc_it2i0k0_data[0] = &ufunc_it2i0k0_ptr[2*0]
+ufunc_it2i0k0_data[1] = &ufunc_it2i0k0_ptr[2*1]
+it2i0k0 = np.PyUFunc_FromFuncAndData(ufunc_it2i0k0_loops, ufunc_it2i0k0_data, ufunc_it2i0k0_types, 2, 1, 2, 0, "it2i0k0", ufunc_it2i0k0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_it2j0y0_loops[2]
+cdef void *ufunc_it2j0y0_ptr[4]
+cdef void *ufunc_it2j0y0_data[2]
+cdef char ufunc_it2j0y0_types[6]
+cdef char *ufunc_it2j0y0_doc = (
+    "it2j0y0(x, out=None)\n"
+    "\n"
+    "Integrals related to Bessel functions of the first kind of order 0.\n"
+    "\n"
+    "Computes the integrals\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\int_0^x \\frac{1 - J_0(t)}{t} dt \\\\\n"
+    "    \\int_x^\\infty \\frac{Y_0(t)}{t} dt.\n"
+    "\n"
+    "For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Values at which to evaluate the integrals.\n"
+    "out : tuple of ndarrays, optional\n"
+    "    Optional output arrays for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "ij0 : scalar or ndarray\n"
+    "    The integral for `j0`\n"
+    "iy0 : scalar or ndarray\n"
+    "    The integral for `y0`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n"
+    "       Wiley 1996\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the functions at one point.\n"
+    "\n"
+    ">>> from scipy.special import it2j0y0\n"
+    ">>> int_j, int_y = it2j0y0(1.)\n"
+    ">>> int_j, int_y\n"
+    "(0.12116524699506871, 0.39527290169929336)\n"
+    "\n"
+    "Evaluate the functions at several points.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([0.5, 1.5, 3.])\n"
+    ">>> int_j, int_y = it2j0y0(points)\n"
+    ">>> int_j, int_y\n"
+    "(array([0.03100699, 0.26227724, 0.85614669]),\n"
+    " array([ 0.26968854,  0.29769696, -0.02987272]))\n"
+    "\n"
+    "Plot the functions from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> int_j, int_y = it2j0y0(x)\n"
+    ">>> ax.plot(x, int_j, label=r\"$\\int_0^x \\frac{1-J_0(t)}{t}\\,dt$\")\n"
+    ">>> ax.plot(x, int_y, label=r\"$\\int_x^{\\infty} \\frac{Y_0(t)}{t}\\,dt$\")\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_ylim(-2.5, 2.5)\n"
+    ">>> plt.show()")
+ufunc_it2j0y0_loops[0] = loop_i_d_dd_As_f_ff
+ufunc_it2j0y0_loops[1] = loop_i_d_dd_As_d_dd
+ufunc_it2j0y0_types[0] = NPY_FLOAT
+ufunc_it2j0y0_types[1] = NPY_FLOAT
+ufunc_it2j0y0_types[2] = NPY_FLOAT
+ufunc_it2j0y0_types[3] = NPY_DOUBLE
+ufunc_it2j0y0_types[4] = NPY_DOUBLE
+ufunc_it2j0y0_types[5] = NPY_DOUBLE
+ufunc_it2j0y0_ptr[2*0] = _func_it2j0y0_wrap
+ufunc_it2j0y0_ptr[2*0+1] = ("it2j0y0")
+ufunc_it2j0y0_ptr[2*1] = _func_it2j0y0_wrap
+ufunc_it2j0y0_ptr[2*1+1] = ("it2j0y0")
+ufunc_it2j0y0_data[0] = &ufunc_it2j0y0_ptr[2*0]
+ufunc_it2j0y0_data[1] = &ufunc_it2j0y0_ptr[2*1]
+it2j0y0 = np.PyUFunc_FromFuncAndData(ufunc_it2j0y0_loops, ufunc_it2j0y0_data, ufunc_it2j0y0_types, 2, 1, 2, 0, "it2j0y0", ufunc_it2j0y0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_it2struve0_loops[2]
+cdef void *ufunc_it2struve0_ptr[4]
+cdef void *ufunc_it2struve0_data[2]
+cdef char ufunc_it2struve0_types[4]
+cdef char *ufunc_it2struve0_doc = (
+    "it2struve0(x, out=None)\n"
+    "\n"
+    "Integral related to the Struve function of order 0.\n"
+    "\n"
+    "Returns the integral,\n"
+    "\n"
+    ".. math::\n"
+    "    \\int_x^\\infty \\frac{H_0(t)}{t}\\,dt\n"
+    "\n"
+    "where :math:`H_0` is the Struve function of order 0.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Lower limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    The value of the integral.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "struve\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n"
+    "Jin [1]_.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n"
+    "       Functions\", John Wiley and Sons, 1996.\n"
+    "       https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function at one point.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import it2struve0\n"
+    ">>> it2struve0(1.)\n"
+    "0.9571973506383524\n"
+    "\n"
+    "Evaluate the function at several points by supplying\n"
+    "an array for `x`.\n"
+    "\n"
+    ">>> points = np.array([1., 2., 3.5])\n"
+    ">>> it2struve0(points)\n"
+    "array([0.95719735, 0.46909296, 0.10366042])\n"
+    "\n"
+    "Plot the function from -10 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> it2struve0_values = it2struve0(x)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> ax.plot(x, it2struve0_values)\n"
+    ">>> ax.set_xlabel(r'$x$')\n"
+    ">>> ax.set_ylabel(r'$\\int_x^{\\infty}\\frac{H_0(t)}{t}\\,dt$')\n"
+    ">>> plt.show()")
+ufunc_it2struve0_loops[0] = loop_d_d__As_f_f
+ufunc_it2struve0_loops[1] = loop_d_d__As_d_d
+ufunc_it2struve0_types[0] = NPY_FLOAT
+ufunc_it2struve0_types[1] = NPY_FLOAT
+ufunc_it2struve0_types[2] = NPY_DOUBLE
+ufunc_it2struve0_types[3] = NPY_DOUBLE
+ufunc_it2struve0_ptr[2*0] = _func_it2struve0_wrap
+ufunc_it2struve0_ptr[2*0+1] = ("it2struve0")
+ufunc_it2struve0_ptr[2*1] = _func_it2struve0_wrap
+ufunc_it2struve0_ptr[2*1+1] = ("it2struve0")
+ufunc_it2struve0_data[0] = &ufunc_it2struve0_ptr[2*0]
+ufunc_it2struve0_data[1] = &ufunc_it2struve0_ptr[2*1]
+it2struve0 = np.PyUFunc_FromFuncAndData(ufunc_it2struve0_loops, ufunc_it2struve0_data, ufunc_it2struve0_types, 2, 1, 1, 0, "it2struve0", ufunc_it2struve0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_itairy_loops[2]
+cdef void *ufunc_itairy_ptr[4]
+cdef void *ufunc_itairy_data[2]
+cdef char ufunc_itairy_types[10]
+cdef char *ufunc_itairy_doc = (
+    "itairy(x, out=None)\n"
+    "\n"
+    "Integrals of Airy functions\n"
+    "\n"
+    "Calculates the integrals of Airy functions from 0 to `x`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "\n"
+    "x : array_like\n"
+    "    Upper limit of integration (float).\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Apt : scalar or ndarray\n"
+    "    Integral of Ai(t) from 0 to x.\n"
+    "Bpt : scalar or ndarray\n"
+    "    Integral of Bi(t) from 0 to x.\n"
+    "Ant : scalar or ndarray\n"
+    "    Integral of Ai(-t) from 0 to x.\n"
+    "Bnt : scalar or ndarray\n"
+    "    Integral of Bi(-t) from 0 to x.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n"
+    "Jin [1]_.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    "\n"
+    ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n"
+    "       Functions\", John Wiley and Sons, 1996.\n"
+    "       https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Compute the functions at ``x=1.``.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import itairy\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> apt, bpt, ant, bnt = itairy(1.)\n"
+    ">>> apt, bpt, ant, bnt\n"
+    "(0.23631734191710949,\n"
+    " 0.8727691167380077,\n"
+    " 0.46567398346706845,\n"
+    " 0.3730050096342943)\n"
+    "\n"
+    "Compute the functions at several points by providing a NumPy array for `x`.\n"
+    "\n"
+    ">>> x = np.array([1., 1.5, 2.5, 5])\n"
+    ">>> apt, bpt, ant, bnt = itairy(x)\n"
+    ">>> apt, bpt, ant, bnt\n"
+    "(array([0.23631734, 0.28678675, 0.324638  , 0.33328759]),\n"
+    " array([  0.87276912,   1.62470809,   5.20906691, 321.47831857]),\n"
+    " array([0.46567398, 0.72232876, 0.93187776, 0.7178822 ]),\n"
+    " array([ 0.37300501,  0.35038814, -0.02812939,  0.15873094]))\n"
+    "\n"
+    "Plot the functions from -10 to 10.\n"
+    "\n"
+    ">>> x = np.linspace(-10, 10, 500)\n"
+    ">>> apt, bpt, ant, bnt = itairy(x)\n"
+    ">>> fig, ax = plt.subplots(figsize=(6, 5))\n"
+    ">>> ax.plot(x, apt, label=\"$\\int_0^x\\, Ai(t)\\, dt$\")\n"
+    ">>> ax.plot(x, bpt, ls=\"dashed\", label=\"$\\int_0^x\\, Bi(t)\\, dt$\")\n"
+    ">>> ax.plot(x, ant, ls=\"dashdot\", label=\"$\\int_0^x\\, Ai(-t)\\, dt$\")\n"
+    ">>> ax.plot(x, bnt, ls=\"dotted\", label=\"$\\int_0^x\\, Bi(-t)\\, dt$\")\n"
+    ">>> ax.set_ylim(-2, 1.5)\n"
+    ">>> ax.legend(loc=\"lower right\")\n"
+    ">>> plt.show()")
+ufunc_itairy_loops[0] = loop_i_d_dddd_As_f_ffff
+ufunc_itairy_loops[1] = loop_i_d_dddd_As_d_dddd
+ufunc_itairy_types[0] = NPY_FLOAT
+ufunc_itairy_types[1] = NPY_FLOAT
+ufunc_itairy_types[2] = NPY_FLOAT
+ufunc_itairy_types[3] = NPY_FLOAT
+ufunc_itairy_types[4] = NPY_FLOAT
+ufunc_itairy_types[5] = NPY_DOUBLE
+ufunc_itairy_types[6] = NPY_DOUBLE
+ufunc_itairy_types[7] = NPY_DOUBLE
+ufunc_itairy_types[8] = NPY_DOUBLE
+ufunc_itairy_types[9] = NPY_DOUBLE
+ufunc_itairy_ptr[2*0] = _func_itairy_wrap
+ufunc_itairy_ptr[2*0+1] = ("itairy")
+ufunc_itairy_ptr[2*1] = _func_itairy_wrap
+ufunc_itairy_ptr[2*1+1] = ("itairy")
+ufunc_itairy_data[0] = &ufunc_itairy_ptr[2*0]
+ufunc_itairy_data[1] = &ufunc_itairy_ptr[2*1]
+itairy = np.PyUFunc_FromFuncAndData(ufunc_itairy_loops, ufunc_itairy_data, ufunc_itairy_types, 2, 1, 4, 0, "itairy", ufunc_itairy_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_iti0k0_loops[2]
+cdef void *ufunc_iti0k0_ptr[4]
+cdef void *ufunc_iti0k0_data[2]
+cdef char ufunc_iti0k0_types[6]
+cdef char *ufunc_iti0k0_doc = (
+    "iti0k0(x, out=None)\n"
+    "\n"
+    "Integrals of modified Bessel functions of order 0.\n"
+    "\n"
+    "Computes the integrals\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\int_0^x I_0(t) dt \\\\\n"
+    "    \\int_0^x K_0(t) dt.\n"
+    "\n"
+    "For more on :math:`I_0` and :math:`K_0` see `i0` and `k0`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Values at which to evaluate the integrals.\n"
+    "out : tuple of ndarrays, optional\n"
+    "    Optional output arrays for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "ii0 : scalar or ndarray\n"
+    "    The integral for `i0`\n"
+    "ik0 : scalar or ndarray\n"
+    "    The integral for `k0`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n"
+    "       Wiley 1996\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the functions at one point.\n"
+    "\n"
+    ">>> from scipy.special import iti0k0\n"
+    ">>> int_i, int_k = iti0k0(1.)\n"
+    ">>> int_i, int_k\n"
+    "(1.0865210970235892, 1.2425098486237771)\n"
+    "\n"
+    "Evaluate the functions at several points.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([0., 1.5, 3.])\n"
+    ">>> int_i, int_k = iti0k0(points)\n"
+    ">>> int_i, int_k\n"
+    "(array([0.        , 1.80606937, 6.16096149]),\n"
+    " array([0.        , 1.39458246, 1.53994809]))\n"
+    "\n"
+    "Plot the functions from 0 to 5.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 5., 1000)\n"
+    ">>> int_i, int_k = iti0k0(x)\n"
+    ">>> ax.plot(x, int_i, label=\"$\\int_0^x I_0(t)\\,dt$\")\n"
+    ">>> ax.plot(x, int_k, label=\"$\\int_0^x K_0(t)\\,dt$\")\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()")
+ufunc_iti0k0_loops[0] = loop_i_d_dd_As_f_ff
+ufunc_iti0k0_loops[1] = loop_i_d_dd_As_d_dd
+ufunc_iti0k0_types[0] = NPY_FLOAT
+ufunc_iti0k0_types[1] = NPY_FLOAT
+ufunc_iti0k0_types[2] = NPY_FLOAT
+ufunc_iti0k0_types[3] = NPY_DOUBLE
+ufunc_iti0k0_types[4] = NPY_DOUBLE
+ufunc_iti0k0_types[5] = NPY_DOUBLE
+ufunc_iti0k0_ptr[2*0] = _func_it1i0k0_wrap
+ufunc_iti0k0_ptr[2*0+1] = ("iti0k0")
+ufunc_iti0k0_ptr[2*1] = _func_it1i0k0_wrap
+ufunc_iti0k0_ptr[2*1+1] = ("iti0k0")
+ufunc_iti0k0_data[0] = &ufunc_iti0k0_ptr[2*0]
+ufunc_iti0k0_data[1] = &ufunc_iti0k0_ptr[2*1]
+iti0k0 = np.PyUFunc_FromFuncAndData(ufunc_iti0k0_loops, ufunc_iti0k0_data, ufunc_iti0k0_types, 2, 1, 2, 0, "iti0k0", ufunc_iti0k0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_itj0y0_loops[2]
+cdef void *ufunc_itj0y0_ptr[4]
+cdef void *ufunc_itj0y0_data[2]
+cdef char ufunc_itj0y0_types[6]
+cdef char *ufunc_itj0y0_doc = (
+    "itj0y0(x, out=None)\n"
+    "\n"
+    "Integrals of Bessel functions of the first kind of order 0.\n"
+    "\n"
+    "Computes the integrals\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\int_0^x J_0(t) dt \\\\\n"
+    "    \\int_0^x Y_0(t) dt.\n"
+    "\n"
+    "For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Values at which to evaluate the integrals.\n"
+    "out : tuple of ndarrays, optional\n"
+    "    Optional output arrays for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "ij0 : scalar or ndarray\n"
+    "    The integral of `j0`\n"
+    "iy0 : scalar or ndarray\n"
+    "    The integral of `y0`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] S. Zhang and J.M. Jin, \"Computation of Special Functions\",\n"
+    "       Wiley 1996\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the functions at one point.\n"
+    "\n"
+    ">>> from scipy.special import itj0y0\n"
+    ">>> int_j, int_y = itj0y0(1.)\n"
+    ">>> int_j, int_y\n"
+    "(0.9197304100897596, -0.637069376607422)\n"
+    "\n"
+    "Evaluate the functions at several points.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([0., 1.5, 3.])\n"
+    ">>> int_j, int_y = itj0y0(points)\n"
+    ">>> int_j, int_y\n"
+    "(array([0.        , 1.24144951, 1.38756725]),\n"
+    " array([ 0.        , -0.51175903,  0.19765826]))\n"
+    "\n"
+    "Plot the functions from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> int_j, int_y = itj0y0(x)\n"
+    ">>> ax.plot(x, int_j, label=\"$\\int_0^x J_0(t)\\,dt$\")\n"
+    ">>> ax.plot(x, int_y, label=\"$\\int_0^x Y_0(t)\\,dt$\")\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()")
+ufunc_itj0y0_loops[0] = loop_i_d_dd_As_f_ff
+ufunc_itj0y0_loops[1] = loop_i_d_dd_As_d_dd
+ufunc_itj0y0_types[0] = NPY_FLOAT
+ufunc_itj0y0_types[1] = NPY_FLOAT
+ufunc_itj0y0_types[2] = NPY_FLOAT
+ufunc_itj0y0_types[3] = NPY_DOUBLE
+ufunc_itj0y0_types[4] = NPY_DOUBLE
+ufunc_itj0y0_types[5] = NPY_DOUBLE
+ufunc_itj0y0_ptr[2*0] = _func_it1j0y0_wrap
+ufunc_itj0y0_ptr[2*0+1] = ("itj0y0")
+ufunc_itj0y0_ptr[2*1] = _func_it1j0y0_wrap
+ufunc_itj0y0_ptr[2*1+1] = ("itj0y0")
+ufunc_itj0y0_data[0] = &ufunc_itj0y0_ptr[2*0]
+ufunc_itj0y0_data[1] = &ufunc_itj0y0_ptr[2*1]
+itj0y0 = np.PyUFunc_FromFuncAndData(ufunc_itj0y0_loops, ufunc_itj0y0_data, ufunc_itj0y0_types, 2, 1, 2, 0, "itj0y0", ufunc_itj0y0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_itmodstruve0_loops[2]
+cdef void *ufunc_itmodstruve0_ptr[4]
+cdef void *ufunc_itmodstruve0_data[2]
+cdef char ufunc_itmodstruve0_types[4]
+cdef char *ufunc_itmodstruve0_doc = (
+    "itmodstruve0(x, out=None)\n"
+    "\n"
+    "Integral of the modified Struve function of order 0.\n"
+    "\n"
+    ".. math::\n"
+    "    I = \\int_0^x L_0(t)\\,dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Upper limit of integration (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    The integral of :math:`L_0` from 0 to `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n"
+    "Jin [1]_.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "modstruve: Modified Struve function which is integrated by this function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n"
+    "       Functions\", John Wiley and Sons, 1996.\n"
+    "       https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function at one point.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import itmodstruve0\n"
+    ">>> itmodstruve0(1.)\n"
+    "0.3364726286440384\n"
+    "\n"
+    "Evaluate the function at several points by supplying\n"
+    "an array for `x`.\n"
+    "\n"
+    ">>> points = np.array([1., 2., 3.5])\n"
+    ">>> itmodstruve0(points)\n"
+    "array([0.33647263, 1.588285  , 7.60382578])\n"
+    "\n"
+    "Plot the function from -10 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> itmodstruve0_values = itmodstruve0(x)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> ax.plot(x, itmodstruve0_values)\n"
+    ">>> ax.set_xlabel(r'$x$')\n"
+    ">>> ax.set_ylabel(r'$\\int_0^xL_0(t)\\,dt$')\n"
+    ">>> plt.show()")
+ufunc_itmodstruve0_loops[0] = loop_d_d__As_f_f
+ufunc_itmodstruve0_loops[1] = loop_d_d__As_d_d
+ufunc_itmodstruve0_types[0] = NPY_FLOAT
+ufunc_itmodstruve0_types[1] = NPY_FLOAT
+ufunc_itmodstruve0_types[2] = NPY_DOUBLE
+ufunc_itmodstruve0_types[3] = NPY_DOUBLE
+ufunc_itmodstruve0_ptr[2*0] = _func_itmodstruve0_wrap
+ufunc_itmodstruve0_ptr[2*0+1] = ("itmodstruve0")
+ufunc_itmodstruve0_ptr[2*1] = _func_itmodstruve0_wrap
+ufunc_itmodstruve0_ptr[2*1+1] = ("itmodstruve0")
+ufunc_itmodstruve0_data[0] = &ufunc_itmodstruve0_ptr[2*0]
+ufunc_itmodstruve0_data[1] = &ufunc_itmodstruve0_ptr[2*1]
+itmodstruve0 = np.PyUFunc_FromFuncAndData(ufunc_itmodstruve0_loops, ufunc_itmodstruve0_data, ufunc_itmodstruve0_types, 2, 1, 1, 0, "itmodstruve0", ufunc_itmodstruve0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_itstruve0_loops[2]
+cdef void *ufunc_itstruve0_ptr[4]
+cdef void *ufunc_itstruve0_data[2]
+cdef char ufunc_itstruve0_types[4]
+cdef char *ufunc_itstruve0_doc = (
+    "itstruve0(x, out=None)\n"
+    "\n"
+    "Integral of the Struve function of order 0.\n"
+    "\n"
+    ".. math::\n"
+    "    I = \\int_0^x H_0(t)\\,dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Upper limit of integration (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "I : scalar or ndarray\n"
+    "    The integral of :math:`H_0` from 0 to `x`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "struve: Function which is integrated by this function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for a Fortran routine created by Shanjie Zhang and Jianming\n"
+    "Jin [1]_.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n"
+    "       Functions\", John Wiley and Sons, 1996.\n"
+    "       https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function at one point.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import itstruve0\n"
+    ">>> itstruve0(1.)\n"
+    "0.30109042670805547\n"
+    "\n"
+    "Evaluate the function at several points by supplying\n"
+    "an array for `x`.\n"
+    "\n"
+    ">>> points = np.array([1., 2., 3.5])\n"
+    ">>> itstruve0(points)\n"
+    "array([0.30109043, 1.01870116, 1.96804581])\n"
+    "\n"
+    "Plot the function from -20 to 20.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(-20., 20., 1000)\n"
+    ">>> istruve0_values = itstruve0(x)\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> ax.plot(x, istruve0_values)\n"
+    ">>> ax.set_xlabel(r'$x$')\n"
+    ">>> ax.set_ylabel(r'$\\int_0^{x}H_0(t)\\,dt$')\n"
+    ">>> plt.show()")
+ufunc_itstruve0_loops[0] = loop_d_d__As_f_f
+ufunc_itstruve0_loops[1] = loop_d_d__As_d_d
+ufunc_itstruve0_types[0] = NPY_FLOAT
+ufunc_itstruve0_types[1] = NPY_FLOAT
+ufunc_itstruve0_types[2] = NPY_DOUBLE
+ufunc_itstruve0_types[3] = NPY_DOUBLE
+ufunc_itstruve0_ptr[2*0] = _func_itstruve0_wrap
+ufunc_itstruve0_ptr[2*0+1] = ("itstruve0")
+ufunc_itstruve0_ptr[2*1] = _func_itstruve0_wrap
+ufunc_itstruve0_ptr[2*1+1] = ("itstruve0")
+ufunc_itstruve0_data[0] = &ufunc_itstruve0_ptr[2*0]
+ufunc_itstruve0_data[1] = &ufunc_itstruve0_ptr[2*1]
+itstruve0 = np.PyUFunc_FromFuncAndData(ufunc_itstruve0_loops, ufunc_itstruve0_data, ufunc_itstruve0_types, 2, 1, 1, 0, "itstruve0", ufunc_itstruve0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_iv_loops[4]
+cdef void *ufunc_iv_ptr[8]
+cdef void *ufunc_iv_data[4]
+cdef char ufunc_iv_types[12]
+cdef char *ufunc_iv_doc = (
+    "iv(v, z, out=None)\n"
+    "\n"
+    "Modified Bessel function of the first kind of real order.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order. If `z` is of real type and negative, `v` must be integer\n"
+    "    valued.\n"
+    "z : array_like of float or complex\n"
+    "    Argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the modified Bessel function.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For real `z` and :math:`v \\in [-50, 50]`, the evaluation is carried out\n"
+    "using Temme's method [1]_.  For larger orders, uniform asymptotic\n"
+    "expansions are applied.\n"
+    "\n"
+    "For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is\n"
+    "called. It uses a power series for small `z`, the asymptotic expansion\n"
+    "for large `abs(z)`, the Miller algorithm normalized by the Wronskian\n"
+    "and a Neumann series for intermediate magnitudes, and the uniform\n"
+    "asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large\n"
+    "orders. Backward recurrence is used to generate sequences or reduce\n"
+    "orders when necessary.\n"
+    "\n"
+    "The calculations above are done in the right half plane and continued\n"
+    "into the left half plane by the formula,\n"
+    "\n"
+    ".. math:: I_v(z \\exp(\\pm\\imath\\pi)) = \\exp(\\pm\\pi v) I_v(z)\n"
+    "\n"
+    "(valid when the real part of `z` is positive).  For negative `v`, the\n"
+    "formula\n"
+    "\n"
+    ".. math:: I_{-v}(z) = I_v(z) + \\frac{2}{\\pi} \\sin(\\pi v) K_v(z)\n"
+    "\n"
+    "is used, where :math:`K_v(z)` is the modified Bessel function of the\n"
+    "second kind, evaluated using the AMOS routine `zbesk`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "ive : This function with leading exponential behavior stripped off.\n"
+    "i0 : Faster version of this function for order 0.\n"
+    "i1 : Faster version of this function for order 1.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)\n"
+    ".. [2] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function of order 0 at one point.\n"
+    "\n"
+    ">>> from scipy.special import iv\n"
+    ">>> iv(0, 1.)\n"
+    "1.2660658777520084\n"
+    "\n"
+    "Evaluate the function at one point for different orders.\n"
+    "\n"
+    ">>> iv(0, 1.), iv(1, 1.), iv(1.5, 1.)\n"
+    "(1.2660658777520084, 0.565159103992485, 0.2935253263474798)\n"
+    "\n"
+    "The evaluation for different orders can be carried out in one call by\n"
+    "providing a list or NumPy array as argument for the `v` parameter:\n"
+    "\n"
+    ">>> iv([0, 1, 1.5], 1.)\n"
+    "array([1.26606588, 0.5651591 , 0.29352533])\n"
+    "\n"
+    "Evaluate the function at several points for order 0 by providing an\n"
+    "array for `z`.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([-2., 0., 3.])\n"
+    ">>> iv(0, points)\n"
+    "array([2.2795853 , 1.        , 4.88079259])\n"
+    "\n"
+    "If `z` is an array, the order parameter `v` must be broadcastable to\n"
+    "the correct shape if different orders shall be computed in one call.\n"
+    "To calculate the orders 0 and 1 for an 1D array:\n"
+    "\n"
+    ">>> orders = np.array([[0], [1]])\n"
+    ">>> orders.shape\n"
+    "(2, 1)\n"
+    "\n"
+    ">>> iv(orders, points)\n"
+    "array([[ 2.2795853 ,  1.        ,  4.88079259],\n"
+    "       [-1.59063685,  0.        ,  3.95337022]])\n"
+    "\n"
+    "Plot the functions of order 0 to 3 from -5 to 5.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-5., 5., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, iv(i, x), label=f'$I_{i!r}$')\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()")
+ufunc_iv_loops[0] = loop_d_dd__As_ff_f
+ufunc_iv_loops[1] = loop_D_dD__As_fF_F
+ufunc_iv_loops[2] = loop_d_dd__As_dd_d
+ufunc_iv_loops[3] = loop_D_dD__As_dD_D
+ufunc_iv_types[0] = NPY_FLOAT
+ufunc_iv_types[1] = NPY_FLOAT
+ufunc_iv_types[2] = NPY_FLOAT
+ufunc_iv_types[3] = NPY_FLOAT
+ufunc_iv_types[4] = NPY_CFLOAT
+ufunc_iv_types[5] = NPY_CFLOAT
+ufunc_iv_types[6] = NPY_DOUBLE
+ufunc_iv_types[7] = NPY_DOUBLE
+ufunc_iv_types[8] = NPY_DOUBLE
+ufunc_iv_types[9] = NPY_DOUBLE
+ufunc_iv_types[10] = NPY_CDOUBLE
+ufunc_iv_types[11] = NPY_CDOUBLE
+ufunc_iv_ptr[2*0] = _func_iv
+ufunc_iv_ptr[2*0+1] = ("iv")
+ufunc_iv_ptr[2*1] = _func_cbesi_wrap
+ufunc_iv_ptr[2*1+1] = ("iv")
+ufunc_iv_ptr[2*2] = _func_iv
+ufunc_iv_ptr[2*2+1] = ("iv")
+ufunc_iv_ptr[2*3] = _func_cbesi_wrap
+ufunc_iv_ptr[2*3+1] = ("iv")
+ufunc_iv_data[0] = &ufunc_iv_ptr[2*0]
+ufunc_iv_data[1] = &ufunc_iv_ptr[2*1]
+ufunc_iv_data[2] = &ufunc_iv_ptr[2*2]
+ufunc_iv_data[3] = &ufunc_iv_ptr[2*3]
+iv = np.PyUFunc_FromFuncAndData(ufunc_iv_loops, ufunc_iv_data, ufunc_iv_types, 4, 2, 1, 0, "iv", ufunc_iv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ive_loops[4]
+cdef void *ufunc_ive_ptr[8]
+cdef void *ufunc_ive_data[4]
+cdef char ufunc_ive_types[12]
+cdef char *ufunc_ive_doc = (
+    "ive(v, z, out=None)\n"
+    "\n"
+    "Exponentially scaled modified Bessel function of the first kind.\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    ive(v, z) = iv(v, z) * exp(-abs(z.real))\n"
+    "\n"
+    "For imaginary numbers without a real part, returns the unscaled\n"
+    "Bessel function of the first kind `iv`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like of float\n"
+    "    Order.\n"
+    "z : array_like of float or complex\n"
+    "    Argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the exponentially scaled modified Bessel function.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a\n"
+    "power series for small `z`, the asymptotic expansion for large\n"
+    "`abs(z)`, the Miller algorithm normalized by the Wronskian and a\n"
+    "Neumann series for intermediate magnitudes, and the uniform asymptotic\n"
+    "expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.\n"
+    "Backward recurrence is used to generate sequences or reduce orders when\n"
+    "necessary.\n"
+    "\n"
+    "The calculations above are done in the right half plane and continued\n"
+    "into the left half plane by the formula,\n"
+    "\n"
+    ".. math:: I_v(z \\exp(\\pm\\imath\\pi)) = \\exp(\\pm\\pi v) I_v(z)\n"
+    "\n"
+    "(valid when the real part of `z` is positive).  For negative `v`, the\n"
+    "formula\n"
+    "\n"
+    ".. math:: I_{-v}(z) = I_v(z) + \\frac{2}{\\pi} \\sin(\\pi v) K_v(z)\n"
+    "\n"
+    "is used, where :math:`K_v(z)` is the modified Bessel function of the\n"
+    "second kind, evaluated using the AMOS routine `zbesk`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "iv: Modified Bessel function of the first kind\n"
+    "i0e: Faster implementation of this function for order 0\n"
+    "i1e: Faster implementation of this function for order 1\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function of order 0 at one point.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import iv, ive\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> ive(0, 1.)\n"
+    "0.4657596075936404\n"
+    "\n"
+    "Evaluate the function at one point for different orders by\n"
+    "providing a list or NumPy array as argument for the `v` parameter:\n"
+    "\n"
+    ">>> ive([0, 1, 1.5], 1.)\n"
+    "array([0.46575961, 0.20791042, 0.10798193])\n"
+    "\n"
+    "Evaluate the function at several points for order 0 by providing an\n"
+    "array for `z`.\n"
+    "\n"
+    ">>> points = np.array([-2., 0., 3.])\n"
+    ">>> ive(0, points)\n"
+    "array([0.30850832, 1.        , 0.24300035])\n"
+    "\n"
+    "Evaluate the function at several points for different orders by\n"
+    "providing arrays for both `v` for `z`. Both arrays have to be\n"
+    "broadcastable to the correct shape. To calculate the orders 0, 1\n"
+    "and 2 for a 1D array of points:\n"
+    "\n"
+    ">>> ive([[0], [1], [2]], points)\n"
+    "array([[ 0.30850832,  1.        ,  0.24300035],\n"
+    "       [-0.21526929,  0.        ,  0.19682671],\n"
+    "       [ 0.09323903,  0.        ,  0.11178255]])\n"
+    "\n"
+    "Plot the functions of order 0 to 3 from -5 to 5.\n"
+    "\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-5., 5., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, ive(i, x), label=f'$I_{i!r}(z)\\cdot e^{{-|z|}}$')\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(r\"$z$\")\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large arguments for\n"
+    "which the unscaled Bessel functions over- or underflow. In the\n"
+    "following example `iv` returns infinity whereas `ive` still returns\n"
+    "a finite number.\n"
+    "\n"
+    ">>> iv(3, 1000.), ive(3, 1000.)\n"
+    "(inf, 0.01256056218254712)")
+ufunc_ive_loops[0] = loop_d_dd__As_ff_f
+ufunc_ive_loops[1] = loop_D_dD__As_fF_F
+ufunc_ive_loops[2] = loop_d_dd__As_dd_d
+ufunc_ive_loops[3] = loop_D_dD__As_dD_D
+ufunc_ive_types[0] = NPY_FLOAT
+ufunc_ive_types[1] = NPY_FLOAT
+ufunc_ive_types[2] = NPY_FLOAT
+ufunc_ive_types[3] = NPY_FLOAT
+ufunc_ive_types[4] = NPY_CFLOAT
+ufunc_ive_types[5] = NPY_CFLOAT
+ufunc_ive_types[6] = NPY_DOUBLE
+ufunc_ive_types[7] = NPY_DOUBLE
+ufunc_ive_types[8] = NPY_DOUBLE
+ufunc_ive_types[9] = NPY_DOUBLE
+ufunc_ive_types[10] = NPY_CDOUBLE
+ufunc_ive_types[11] = NPY_CDOUBLE
+ufunc_ive_ptr[2*0] = _func_cbesi_wrap_e_real
+ufunc_ive_ptr[2*0+1] = ("ive")
+ufunc_ive_ptr[2*1] = _func_cbesi_wrap_e
+ufunc_ive_ptr[2*1+1] = ("ive")
+ufunc_ive_ptr[2*2] = _func_cbesi_wrap_e_real
+ufunc_ive_ptr[2*2+1] = ("ive")
+ufunc_ive_ptr[2*3] = _func_cbesi_wrap_e
+ufunc_ive_ptr[2*3+1] = ("ive")
+ufunc_ive_data[0] = &ufunc_ive_ptr[2*0]
+ufunc_ive_data[1] = &ufunc_ive_ptr[2*1]
+ufunc_ive_data[2] = &ufunc_ive_ptr[2*2]
+ufunc_ive_data[3] = &ufunc_ive_ptr[2*3]
+ive = np.PyUFunc_FromFuncAndData(ufunc_ive_loops, ufunc_ive_data, ufunc_ive_types, 4, 2, 1, 0, "ive", ufunc_ive_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_j0_loops[2]
+cdef void *ufunc_j0_ptr[4]
+cdef void *ufunc_j0_data[2]
+cdef char ufunc_j0_types[4]
+cdef char *ufunc_j0_doc = (
+    "j0(x, out=None)\n"
+    "\n"
+    "Bessel function of the first kind of order 0.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "J : scalar or ndarray\n"
+    "    Value of the Bessel function of the first kind of order 0 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The domain is divided into the intervals [0, 5] and (5, infinity). In the\n"
+    "first interval the following rational approximation is used:\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    J_0(x) \\approx (w - r_1^2)(w - r_2^2) \\frac{P_3(w)}{Q_8(w)},\n"
+    "\n"
+    "where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of\n"
+    ":math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3\n"
+    "and 8, respectively.\n"
+    "\n"
+    "In the second interval, the Hankel asymptotic expansion is employed with\n"
+    "two rational functions of degree 6/6 and 7/7.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `j0`.\n"
+    "It should not be confused with the spherical Bessel functions (see\n"
+    "`spherical_jn`).\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "jv : Bessel function of real order and complex argument.\n"
+    "spherical_jn : spherical Bessel functions.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import j0\n"
+    ">>> j0(1.)\n"
+    "0.7651976865579665\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> j0(np.array([-2., 0., 4.]))\n"
+    "array([ 0.22389078,  1.        , -0.39714981])\n"
+    "\n"
+    "Plot the function from -20 to 20.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-20., 20., 1000)\n"
+    ">>> y = j0(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_j0_loops[0] = loop_d_d__As_f_f
+ufunc_j0_loops[1] = loop_d_d__As_d_d
+ufunc_j0_types[0] = NPY_FLOAT
+ufunc_j0_types[1] = NPY_FLOAT
+ufunc_j0_types[2] = NPY_DOUBLE
+ufunc_j0_types[3] = NPY_DOUBLE
+ufunc_j0_ptr[2*0] = _func_j0
+ufunc_j0_ptr[2*0+1] = ("j0")
+ufunc_j0_ptr[2*1] = _func_j0
+ufunc_j0_ptr[2*1+1] = ("j0")
+ufunc_j0_data[0] = &ufunc_j0_ptr[2*0]
+ufunc_j0_data[1] = &ufunc_j0_ptr[2*1]
+j0 = np.PyUFunc_FromFuncAndData(ufunc_j0_loops, ufunc_j0_data, ufunc_j0_types, 2, 1, 1, 0, "j0", ufunc_j0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_j1_loops[2]
+cdef void *ufunc_j1_ptr[4]
+cdef void *ufunc_j1_data[2]
+cdef char ufunc_j1_types[4]
+cdef char *ufunc_j1_doc = (
+    "j1(x, out=None)\n"
+    "\n"
+    "Bessel function of the first kind of order 1.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "J : scalar or ndarray\n"
+    "    Value of the Bessel function of the first kind of order 1 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The domain is divided into the intervals [0, 8] and (8, infinity). In the\n"
+    "first interval a 24 term Chebyshev expansion is used. In the second, the\n"
+    "asymptotic trigonometric representation is employed using two rational\n"
+    "functions of degree 5/5.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `j1`.\n"
+    "It should not be confused with the spherical Bessel functions (see\n"
+    "`spherical_jn`).\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "jv: Bessel function of the first kind\n"
+    "spherical_jn: spherical Bessel functions.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import j1\n"
+    ">>> j1(1.)\n"
+    "0.44005058574493355\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> j1(np.array([-2., 0., 4.]))\n"
+    "array([-0.57672481,  0.        , -0.06604333])\n"
+    "\n"
+    "Plot the function from -20 to 20.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-20., 20., 1000)\n"
+    ">>> y = j1(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_j1_loops[0] = loop_d_d__As_f_f
+ufunc_j1_loops[1] = loop_d_d__As_d_d
+ufunc_j1_types[0] = NPY_FLOAT
+ufunc_j1_types[1] = NPY_FLOAT
+ufunc_j1_types[2] = NPY_DOUBLE
+ufunc_j1_types[3] = NPY_DOUBLE
+ufunc_j1_ptr[2*0] = _func_j1
+ufunc_j1_ptr[2*0+1] = ("j1")
+ufunc_j1_ptr[2*1] = _func_j1
+ufunc_j1_ptr[2*1+1] = ("j1")
+ufunc_j1_data[0] = &ufunc_j1_ptr[2*0]
+ufunc_j1_data[1] = &ufunc_j1_ptr[2*1]
+j1 = np.PyUFunc_FromFuncAndData(ufunc_j1_loops, ufunc_j1_data, ufunc_j1_types, 2, 1, 1, 0, "j1", ufunc_j1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_jv_loops[4]
+cdef void *ufunc_jv_ptr[8]
+cdef void *ufunc_jv_data[4]
+cdef char ufunc_jv_types[12]
+cdef char *ufunc_jv_doc = (
+    "jv(v, z, out=None)\n"
+    "\n"
+    "Bessel function of the first kind of real order and complex argument.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "J : scalar or ndarray\n"
+    "    Value of the Bessel function, :math:`J_v(z)`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "jve : :math:`J_v` with leading exponential behavior stripped off.\n"
+    "spherical_jn : spherical Bessel functions.\n"
+    "j0 : faster version of this function for order 0.\n"
+    "j1 : faster version of this function for order 1.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For positive `v` values, the computation is carried out using the AMOS\n"
+    "[1]_ `zbesj` routine, which exploits the connection to the modified\n"
+    "Bessel function :math:`I_v`,\n"
+    "\n"
+    ".. math::\n"
+    "    J_v(z) = \\exp(v\\pi\\imath/2) I_v(-\\imath z)\\qquad (\\Im z > 0)\n"
+    "\n"
+    "    J_v(z) = \\exp(-v\\pi\\imath/2) I_v(\\imath z)\\qquad (\\Im z < 0)\n"
+    "\n"
+    "For negative `v` values the formula,\n"
+    "\n"
+    ".. math:: J_{-v}(z) = J_v(z) \\cos(\\pi v) - Y_v(z) \\sin(\\pi v)\n"
+    "\n"
+    "is used, where :math:`Y_v(z)` is the Bessel function of the second\n"
+    "kind, computed using the AMOS routine `zbesy`.  Note that the second\n"
+    "term is exactly zero for integer `v`; to improve accuracy the second\n"
+    "term is explicitly omitted for `v` values such that `v = floor(v)`.\n"
+    "\n"
+    "Not to be confused with the spherical Bessel functions (see `spherical_jn`).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function of order 0 at one point.\n"
+    "\n"
+    ">>> from scipy.special import jv\n"
+    ">>> jv(0, 1.)\n"
+    "0.7651976865579666\n"
+    "\n"
+    "Evaluate the function at one point for different orders.\n"
+    "\n"
+    ">>> jv(0, 1.), jv(1, 1.), jv(1.5, 1.)\n"
+    "(0.7651976865579666, 0.44005058574493355, 0.24029783912342725)\n"
+    "\n"
+    "The evaluation for different orders can be carried out in one call by\n"
+    "providing a list or NumPy array as argument for the `v` parameter:\n"
+    "\n"
+    ">>> jv([0, 1, 1.5], 1.)\n"
+    "array([0.76519769, 0.44005059, 0.24029784])\n"
+    "\n"
+    "Evaluate the function at several points for order 0 by providing an\n"
+    "array for `z`.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([-2., 0., 3.])\n"
+    ">>> jv(0, points)\n"
+    "array([ 0.22389078,  1.        , -0.26005195])\n"
+    "\n"
+    "If `z` is an array, the order parameter `v` must be broadcastable to\n"
+    "the correct shape if different orders shall be computed in one call.\n"
+    "To calculate the orders 0 and 1 for an 1D array:\n"
+    "\n"
+    ">>> orders = np.array([[0], [1]])\n"
+    ">>> orders.shape\n"
+    "(2, 1)\n"
+    "\n"
+    ">>> jv(orders, points)\n"
+    "array([[ 0.22389078,  1.        , -0.26005195],\n"
+    "       [-0.57672481,  0.        ,  0.33905896]])\n"
+    "\n"
+    "Plot the functions of order 0 to 3 from -10 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, jv(i, x), label=f'$J_{i!r}$')\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()")
+ufunc_jv_loops[0] = loop_d_dd__As_ff_f
+ufunc_jv_loops[1] = loop_D_dD__As_fF_F
+ufunc_jv_loops[2] = loop_d_dd__As_dd_d
+ufunc_jv_loops[3] = loop_D_dD__As_dD_D
+ufunc_jv_types[0] = NPY_FLOAT
+ufunc_jv_types[1] = NPY_FLOAT
+ufunc_jv_types[2] = NPY_FLOAT
+ufunc_jv_types[3] = NPY_FLOAT
+ufunc_jv_types[4] = NPY_CFLOAT
+ufunc_jv_types[5] = NPY_CFLOAT
+ufunc_jv_types[6] = NPY_DOUBLE
+ufunc_jv_types[7] = NPY_DOUBLE
+ufunc_jv_types[8] = NPY_DOUBLE
+ufunc_jv_types[9] = NPY_DOUBLE
+ufunc_jv_types[10] = NPY_CDOUBLE
+ufunc_jv_types[11] = NPY_CDOUBLE
+ufunc_jv_ptr[2*0] = _func_cbesj_wrap_real
+ufunc_jv_ptr[2*0+1] = ("jv")
+ufunc_jv_ptr[2*1] = _func_cbesj_wrap
+ufunc_jv_ptr[2*1+1] = ("jv")
+ufunc_jv_ptr[2*2] = _func_cbesj_wrap_real
+ufunc_jv_ptr[2*2+1] = ("jv")
+ufunc_jv_ptr[2*3] = _func_cbesj_wrap
+ufunc_jv_ptr[2*3+1] = ("jv")
+ufunc_jv_data[0] = &ufunc_jv_ptr[2*0]
+ufunc_jv_data[1] = &ufunc_jv_ptr[2*1]
+ufunc_jv_data[2] = &ufunc_jv_ptr[2*2]
+ufunc_jv_data[3] = &ufunc_jv_ptr[2*3]
+jv = np.PyUFunc_FromFuncAndData(ufunc_jv_loops, ufunc_jv_data, ufunc_jv_types, 4, 2, 1, 0, "jv", ufunc_jv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_jve_loops[4]
+cdef void *ufunc_jve_ptr[8]
+cdef void *ufunc_jve_data[4]
+cdef char ufunc_jve_types[12]
+cdef char *ufunc_jve_doc = (
+    "jve(v, z, out=None)\n"
+    "\n"
+    "Exponentially scaled Bessel function of the first kind of order `v`.\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    jve(v, z) = jv(v, z) * exp(-abs(z.imag))\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "J : scalar or ndarray\n"
+    "    Value of the exponentially scaled Bessel function.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "jv: Unscaled Bessel function of the first kind\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For positive `v` values, the computation is carried out using the AMOS\n"
+    "[1]_ `zbesj` routine, which exploits the connection to the modified\n"
+    "Bessel function :math:`I_v`,\n"
+    "\n"
+    ".. math::\n"
+    "    J_v(z) = \\exp(v\\pi\\imath/2) I_v(-\\imath z)\\qquad (\\Im z > 0)\n"
+    "\n"
+    "    J_v(z) = \\exp(-v\\pi\\imath/2) I_v(\\imath z)\\qquad (\\Im z < 0)\n"
+    "\n"
+    "For negative `v` values the formula,\n"
+    "\n"
+    ".. math:: J_{-v}(z) = J_v(z) \\cos(\\pi v) - Y_v(z) \\sin(\\pi v)\n"
+    "\n"
+    "is used, where :math:`Y_v(z)` is the Bessel function of the second\n"
+    "kind, computed using the AMOS routine `zbesy`.  Note that the second\n"
+    "term is exactly zero for integer `v`; to improve accuracy the second\n"
+    "term is explicitly omitted for `v` values such that `v = floor(v)`.\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large arguments `z`:\n"
+    "for these, the unscaled Bessel functions can easily under-or overflow.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Compare the output of `jv` and `jve` for large complex arguments for `z`\n"
+    "by computing their values for order ``v=1`` at ``z=1000j``. We see that\n"
+    "`jv` overflows but `jve` returns a finite number:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import jv, jve\n"
+    ">>> v = 1\n"
+    ">>> z = 1000j\n"
+    ">>> jv(v, z), jve(v, z)\n"
+    "((inf+infj), (7.721967686709077e-19+0.012610930256928629j))\n"
+    "\n"
+    "For real arguments for `z`, `jve` returns the same as `jv`.\n"
+    "\n"
+    ">>> v, z = 1, 1000\n"
+    ">>> jv(v, z), jve(v, z)\n"
+    "(0.004728311907089523, 0.004728311907089523)\n"
+    "\n"
+    "The function can be evaluated for several orders at the same time by\n"
+    "providing a list or NumPy array for `v`:\n"
+    "\n"
+    ">>> jve([1, 3, 5], 1j)\n"
+    "array([1.27304208e-17+2.07910415e-01j, -4.99352086e-19-8.15530777e-03j,\n"
+    "       6.11480940e-21+9.98657141e-05j])\n"
+    "\n"
+    "In the same way, the function can be evaluated at several points in one\n"
+    "call by providing a list or NumPy array for `z`:\n"
+    "\n"
+    ">>> jve(1, np.array([1j, 2j, 3j]))\n"
+    "array([1.27308412e-17+0.20791042j, 1.31814423e-17+0.21526929j,\n"
+    "       1.20521602e-17+0.19682671j])\n"
+    "\n"
+    "It is also possible to evaluate several orders at several points\n"
+    "at the same time by providing arrays for `v` and `z` with\n"
+    "compatible shapes for broadcasting. Compute `jve` for two different orders\n"
+    "`v` and three points `z` resulting in a 2x3 array.\n"
+    "\n"
+    ">>> v = np.array([[1], [3]])\n"
+    ">>> z = np.array([1j, 2j, 3j])\n"
+    ">>> v.shape, z.shape\n"
+    "((2, 1), (3,))\n"
+    "\n"
+    ">>> jve(v, z)\n"
+    "array([[1.27304208e-17+0.20791042j,  1.31810070e-17+0.21526929j,\n"
+    "        1.20517622e-17+0.19682671j],\n"
+    "       [-4.99352086e-19-0.00815531j, -1.76289571e-18-0.02879122j,\n"
+    "        -2.92578784e-18-0.04778332j]])")
+ufunc_jve_loops[0] = loop_d_dd__As_ff_f
+ufunc_jve_loops[1] = loop_D_dD__As_fF_F
+ufunc_jve_loops[2] = loop_d_dd__As_dd_d
+ufunc_jve_loops[3] = loop_D_dD__As_dD_D
+ufunc_jve_types[0] = NPY_FLOAT
+ufunc_jve_types[1] = NPY_FLOAT
+ufunc_jve_types[2] = NPY_FLOAT
+ufunc_jve_types[3] = NPY_FLOAT
+ufunc_jve_types[4] = NPY_CFLOAT
+ufunc_jve_types[5] = NPY_CFLOAT
+ufunc_jve_types[6] = NPY_DOUBLE
+ufunc_jve_types[7] = NPY_DOUBLE
+ufunc_jve_types[8] = NPY_DOUBLE
+ufunc_jve_types[9] = NPY_DOUBLE
+ufunc_jve_types[10] = NPY_CDOUBLE
+ufunc_jve_types[11] = NPY_CDOUBLE
+ufunc_jve_ptr[2*0] = _func_cbesj_wrap_e_real
+ufunc_jve_ptr[2*0+1] = ("jve")
+ufunc_jve_ptr[2*1] = _func_cbesj_wrap_e
+ufunc_jve_ptr[2*1+1] = ("jve")
+ufunc_jve_ptr[2*2] = _func_cbesj_wrap_e_real
+ufunc_jve_ptr[2*2+1] = ("jve")
+ufunc_jve_ptr[2*3] = _func_cbesj_wrap_e
+ufunc_jve_ptr[2*3+1] = ("jve")
+ufunc_jve_data[0] = &ufunc_jve_ptr[2*0]
+ufunc_jve_data[1] = &ufunc_jve_ptr[2*1]
+ufunc_jve_data[2] = &ufunc_jve_ptr[2*2]
+ufunc_jve_data[3] = &ufunc_jve_ptr[2*3]
+jve = np.PyUFunc_FromFuncAndData(ufunc_jve_loops, ufunc_jve_data, ufunc_jve_types, 4, 2, 1, 0, "jve", ufunc_jve_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_k0_loops[2]
+cdef void *ufunc_k0_ptr[4]
+cdef void *ufunc_k0_data[2]
+cdef char ufunc_k0_types[4]
+cdef char *ufunc_k0_doc = (
+    "k0(x, out=None)\n"
+    "\n"
+    "Modified Bessel function of the second kind of order 0, :math:`K_0`.\n"
+    "\n"
+    "This function is also sometimes referred to as the modified Bessel\n"
+    "function of the third kind of order 0.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "K : scalar or ndarray\n"
+    "    Value of the modified Bessel function :math:`K_0` at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `k0`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "kv: Modified Bessel function of the second kind of any order\n"
+    "k0e: Exponentially scaled modified Bessel function of the second kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import k0\n"
+    ">>> k0(1.)\n"
+    "0.42102443824070823\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> k0(np.array([0.5, 2., 3.]))\n"
+    "array([0.92441907, 0.11389387, 0.0347395 ])\n"
+    "\n"
+    "Plot the function from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> y = k0(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_k0_loops[0] = loop_d_d__As_f_f
+ufunc_k0_loops[1] = loop_d_d__As_d_d
+ufunc_k0_types[0] = NPY_FLOAT
+ufunc_k0_types[1] = NPY_FLOAT
+ufunc_k0_types[2] = NPY_DOUBLE
+ufunc_k0_types[3] = NPY_DOUBLE
+ufunc_k0_ptr[2*0] = _func_k0
+ufunc_k0_ptr[2*0+1] = ("k0")
+ufunc_k0_ptr[2*1] = _func_k0
+ufunc_k0_ptr[2*1+1] = ("k0")
+ufunc_k0_data[0] = &ufunc_k0_ptr[2*0]
+ufunc_k0_data[1] = &ufunc_k0_ptr[2*1]
+k0 = np.PyUFunc_FromFuncAndData(ufunc_k0_loops, ufunc_k0_data, ufunc_k0_types, 2, 1, 1, 0, "k0", ufunc_k0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_k0e_loops[2]
+cdef void *ufunc_k0e_ptr[4]
+cdef void *ufunc_k0e_data[2]
+cdef char ufunc_k0e_types[4]
+cdef char *ufunc_k0e_doc = (
+    "k0e(x, out=None)\n"
+    "\n"
+    "Exponentially scaled modified Bessel function K of order 0\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    k0e(x) = exp(x) * k0(x).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "K : scalar or ndarray\n"
+    "    Value of the exponentially scaled modified Bessel function K of order\n"
+    "    0 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `k0e`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "kv: Modified Bessel function of the second kind of any order\n"
+    "k0: Modified Bessel function of the second kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import k0e\n"
+    ">>> k0e(1.)\n"
+    "1.1444630798068947\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> k0e(np.array([0.5, 2., 3.]))\n"
+    "array([1.52410939, 0.84156822, 0.6977616 ])\n"
+    "\n"
+    "Plot the function from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> y = k0e(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large arguments for\n"
+    "which the unscaled Bessel functions are not precise enough.\n"
+    "\n"
+    ">>> from scipy.special import k0\n"
+    ">>> k0(1000.)\n"
+    "0.\n"
+    "\n"
+    "While `k0` returns zero, `k0e` still returns a finite number:\n"
+    "\n"
+    ">>> k0e(1000.)\n"
+    "0.03962832160075422")
+ufunc_k0e_loops[0] = loop_d_d__As_f_f
+ufunc_k0e_loops[1] = loop_d_d__As_d_d
+ufunc_k0e_types[0] = NPY_FLOAT
+ufunc_k0e_types[1] = NPY_FLOAT
+ufunc_k0e_types[2] = NPY_DOUBLE
+ufunc_k0e_types[3] = NPY_DOUBLE
+ufunc_k0e_ptr[2*0] = _func_k0e
+ufunc_k0e_ptr[2*0+1] = ("k0e")
+ufunc_k0e_ptr[2*1] = _func_k0e
+ufunc_k0e_ptr[2*1+1] = ("k0e")
+ufunc_k0e_data[0] = &ufunc_k0e_ptr[2*0]
+ufunc_k0e_data[1] = &ufunc_k0e_ptr[2*1]
+k0e = np.PyUFunc_FromFuncAndData(ufunc_k0e_loops, ufunc_k0e_data, ufunc_k0e_types, 2, 1, 1, 0, "k0e", ufunc_k0e_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_k1_loops[2]
+cdef void *ufunc_k1_ptr[4]
+cdef void *ufunc_k1_data[2]
+cdef char ufunc_k1_types[4]
+cdef char *ufunc_k1_doc = (
+    "k1(x, out=None)\n"
+    "\n"
+    "Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "K : scalar or ndarray\n"
+    "    Value of the modified Bessel function K of order 1 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `k1`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "kv: Modified Bessel function of the second kind of any order\n"
+    "k1e: Exponentially scaled modified Bessel function K of order 1\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import k1\n"
+    ">>> k1(1.)\n"
+    "0.6019072301972346\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> k1(np.array([0.5, 2., 3.]))\n"
+    "array([1.65644112, 0.13986588, 0.04015643])\n"
+    "\n"
+    "Plot the function from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> y = k1(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_k1_loops[0] = loop_d_d__As_f_f
+ufunc_k1_loops[1] = loop_d_d__As_d_d
+ufunc_k1_types[0] = NPY_FLOAT
+ufunc_k1_types[1] = NPY_FLOAT
+ufunc_k1_types[2] = NPY_DOUBLE
+ufunc_k1_types[3] = NPY_DOUBLE
+ufunc_k1_ptr[2*0] = _func_k1
+ufunc_k1_ptr[2*0+1] = ("k1")
+ufunc_k1_ptr[2*1] = _func_k1
+ufunc_k1_ptr[2*1+1] = ("k1")
+ufunc_k1_data[0] = &ufunc_k1_ptr[2*0]
+ufunc_k1_data[1] = &ufunc_k1_ptr[2*1]
+k1 = np.PyUFunc_FromFuncAndData(ufunc_k1_loops, ufunc_k1_data, ufunc_k1_types, 2, 1, 1, 0, "k1", ufunc_k1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_k1e_loops[2]
+cdef void *ufunc_k1e_ptr[4]
+cdef void *ufunc_k1e_data[2]
+cdef char ufunc_k1e_types[4]
+cdef char *ufunc_k1e_doc = (
+    "k1e(x, out=None)\n"
+    "\n"
+    "Exponentially scaled modified Bessel function K of order 1\n"
+    "\n"
+    "Defined as::\n"
+    "\n"
+    "    k1e(x) = exp(x) * k1(x)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "K : scalar or ndarray\n"
+    "    Value of the exponentially scaled modified Bessel function K of order\n"
+    "    1 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The range is partitioned into the two intervals [0, 2] and (2, infinity).\n"
+    "Chebyshev polynomial expansions are employed in each interval.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `k1e`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "kv: Modified Bessel function of the second kind of any order\n"
+    "k1: Modified Bessel function of the second kind of order 1\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import k1e\n"
+    ">>> k1e(1.)\n"
+    "1.636153486263258\n"
+    "\n"
+    "Calculate the function at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> k1e(np.array([0.5, 2., 3.]))\n"
+    "array([2.73100971, 1.03347685, 0.80656348])\n"
+    "\n"
+    "Plot the function from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> y = k1e(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large arguments for\n"
+    "which the unscaled Bessel functions are not precise enough. In the\n"
+    "following example `k1` returns zero whereas `k1e` still returns a\n"
+    "useful floating point number.\n"
+    "\n"
+    ">>> from scipy.special import k1\n"
+    ">>> k1(1000.), k1e(1000.)\n"
+    "(0., 0.03964813081296021)")
+ufunc_k1e_loops[0] = loop_d_d__As_f_f
+ufunc_k1e_loops[1] = loop_d_d__As_d_d
+ufunc_k1e_types[0] = NPY_FLOAT
+ufunc_k1e_types[1] = NPY_FLOAT
+ufunc_k1e_types[2] = NPY_DOUBLE
+ufunc_k1e_types[3] = NPY_DOUBLE
+ufunc_k1e_ptr[2*0] = _func_k1e
+ufunc_k1e_ptr[2*0+1] = ("k1e")
+ufunc_k1e_ptr[2*1] = _func_k1e
+ufunc_k1e_ptr[2*1+1] = ("k1e")
+ufunc_k1e_data[0] = &ufunc_k1e_ptr[2*0]
+ufunc_k1e_data[1] = &ufunc_k1e_ptr[2*1]
+k1e = np.PyUFunc_FromFuncAndData(ufunc_k1e_loops, ufunc_k1e_data, ufunc_k1e_types, 2, 1, 1, 0, "k1e", ufunc_k1e_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kei_loops[2]
+cdef void *ufunc_kei_ptr[4]
+cdef void *ufunc_kei_data[2]
+cdef char ufunc_kei_types[4]
+cdef char *ufunc_kei_doc = (
+    "kei(x, out=None)\n"
+    "\n"
+    "Kelvin function kei.\n"
+    "\n"
+    "Defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\mathrm{kei}(x) = \\Im[K_0(x e^{\\pi i / 4})]\n"
+    "\n"
+    "where :math:`K_0` is the modified Bessel function of the second\n"
+    "kind (see `kv`). See [dlmf]_ for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Kelvin function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ker : the corresponding real part\n"
+    "keip : the derivative of kei\n"
+    "kv : modified Bessel function of the second kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10.61\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "It can be expressed using the modified Bessel function of the\n"
+    "second kind.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n"
+    ">>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).imag\n"
+    "array([-0.49499464, -0.20240007, -0.05112188,  0.0021984 ])\n"
+    ">>> sc.kei(x)\n"
+    "array([-0.49499464, -0.20240007, -0.05112188,  0.0021984 ])")
+ufunc_kei_loops[0] = loop_d_d__As_f_f
+ufunc_kei_loops[1] = loop_d_d__As_d_d
+ufunc_kei_types[0] = NPY_FLOAT
+ufunc_kei_types[1] = NPY_FLOAT
+ufunc_kei_types[2] = NPY_DOUBLE
+ufunc_kei_types[3] = NPY_DOUBLE
+ufunc_kei_ptr[2*0] = _func_kei_wrap
+ufunc_kei_ptr[2*0+1] = ("kei")
+ufunc_kei_ptr[2*1] = _func_kei_wrap
+ufunc_kei_ptr[2*1+1] = ("kei")
+ufunc_kei_data[0] = &ufunc_kei_ptr[2*0]
+ufunc_kei_data[1] = &ufunc_kei_ptr[2*1]
+kei = np.PyUFunc_FromFuncAndData(ufunc_kei_loops, ufunc_kei_data, ufunc_kei_types, 2, 1, 1, 0, "kei", ufunc_kei_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_keip_loops[2]
+cdef void *ufunc_keip_ptr[4]
+cdef void *ufunc_keip_data[2]
+cdef char ufunc_keip_types[4]
+cdef char *ufunc_keip_doc = (
+    "keip(x, out=None)\n"
+    "\n"
+    "Derivative of the Kelvin function kei.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The values of the derivative of kei.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kei\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10#PT5")
+ufunc_keip_loops[0] = loop_d_d__As_f_f
+ufunc_keip_loops[1] = loop_d_d__As_d_d
+ufunc_keip_types[0] = NPY_FLOAT
+ufunc_keip_types[1] = NPY_FLOAT
+ufunc_keip_types[2] = NPY_DOUBLE
+ufunc_keip_types[3] = NPY_DOUBLE
+ufunc_keip_ptr[2*0] = _func_keip_wrap
+ufunc_keip_ptr[2*0+1] = ("keip")
+ufunc_keip_ptr[2*1] = _func_keip_wrap
+ufunc_keip_ptr[2*1+1] = ("keip")
+ufunc_keip_data[0] = &ufunc_keip_ptr[2*0]
+ufunc_keip_data[1] = &ufunc_keip_ptr[2*1]
+keip = np.PyUFunc_FromFuncAndData(ufunc_keip_loops, ufunc_keip_data, ufunc_keip_types, 2, 1, 1, 0, "keip", ufunc_keip_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kelvin_loops[2]
+cdef void *ufunc_kelvin_ptr[4]
+cdef void *ufunc_kelvin_data[2]
+cdef char ufunc_kelvin_types[10]
+cdef char *ufunc_kelvin_doc = (
+    "kelvin(x, out=None)\n"
+    "\n"
+    "Kelvin functions as complex numbers\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Be, Ke, Bep, Kep : 4-tuple of scalar or ndarray\n"
+    "    The tuple (Be, Ke, Bep, Kep) contains complex numbers\n"
+    "    representing the real and imaginary Kelvin functions and their\n"
+    "    derivatives evaluated at `x`.  For example, kelvin(x)[0].real =\n"
+    "    ber x and kelvin(x)[0].imag = bei x with similar relationships\n"
+    "    for ker and kei.")
+ufunc_kelvin_loops[0] = loop_i_d_DDDD_As_f_FFFF
+ufunc_kelvin_loops[1] = loop_i_d_DDDD_As_d_DDDD
+ufunc_kelvin_types[0] = NPY_FLOAT
+ufunc_kelvin_types[1] = NPY_CFLOAT
+ufunc_kelvin_types[2] = NPY_CFLOAT
+ufunc_kelvin_types[3] = NPY_CFLOAT
+ufunc_kelvin_types[4] = NPY_CFLOAT
+ufunc_kelvin_types[5] = NPY_DOUBLE
+ufunc_kelvin_types[6] = NPY_CDOUBLE
+ufunc_kelvin_types[7] = NPY_CDOUBLE
+ufunc_kelvin_types[8] = NPY_CDOUBLE
+ufunc_kelvin_types[9] = NPY_CDOUBLE
+ufunc_kelvin_ptr[2*0] = _func_kelvin_wrap
+ufunc_kelvin_ptr[2*0+1] = ("kelvin")
+ufunc_kelvin_ptr[2*1] = _func_kelvin_wrap
+ufunc_kelvin_ptr[2*1+1] = ("kelvin")
+ufunc_kelvin_data[0] = &ufunc_kelvin_ptr[2*0]
+ufunc_kelvin_data[1] = &ufunc_kelvin_ptr[2*1]
+kelvin = np.PyUFunc_FromFuncAndData(ufunc_kelvin_loops, ufunc_kelvin_data, ufunc_kelvin_types, 2, 1, 4, 0, "kelvin", ufunc_kelvin_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ker_loops[2]
+cdef void *ufunc_ker_ptr[4]
+cdef void *ufunc_ker_data[2]
+cdef char ufunc_ker_types[4]
+cdef char *ufunc_ker_doc = (
+    "ker(x, out=None)\n"
+    "\n"
+    "Kelvin function ker.\n"
+    "\n"
+    "Defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\mathrm{ker}(x) = \\Re[K_0(x e^{\\pi i / 4})]\n"
+    "\n"
+    "Where :math:`K_0` is the modified Bessel function of the second\n"
+    "kind (see `kv`). See [dlmf]_ for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Kelvin function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kei : the corresponding imaginary part\n"
+    "kerp : the derivative of ker\n"
+    "kv : modified Bessel function of the second kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10.61\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "It can be expressed using the modified Bessel function of the\n"
+    "second kind.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    ">>> x = np.array([1.0, 2.0, 3.0, 4.0])\n"
+    ">>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real\n"
+    "array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])\n"
+    ">>> sc.ker(x)\n"
+    "array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])")
+ufunc_ker_loops[0] = loop_d_d__As_f_f
+ufunc_ker_loops[1] = loop_d_d__As_d_d
+ufunc_ker_types[0] = NPY_FLOAT
+ufunc_ker_types[1] = NPY_FLOAT
+ufunc_ker_types[2] = NPY_DOUBLE
+ufunc_ker_types[3] = NPY_DOUBLE
+ufunc_ker_ptr[2*0] = _func_ker_wrap
+ufunc_ker_ptr[2*0+1] = ("ker")
+ufunc_ker_ptr[2*1] = _func_ker_wrap
+ufunc_ker_ptr[2*1+1] = ("ker")
+ufunc_ker_data[0] = &ufunc_ker_ptr[2*0]
+ufunc_ker_data[1] = &ufunc_ker_ptr[2*1]
+ker = np.PyUFunc_FromFuncAndData(ufunc_ker_loops, ufunc_ker_data, ufunc_ker_types, 2, 1, 1, 0, "ker", ufunc_ker_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kerp_loops[2]
+cdef void *ufunc_kerp_ptr[4]
+cdef void *ufunc_kerp_data[2]
+cdef char ufunc_kerp_types[4]
+cdef char *ufunc_kerp_doc = (
+    "kerp(x, out=None)\n"
+    "\n"
+    "Derivative of the Kelvin function ker.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the derivative of ker.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ker\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST, Digital Library of Mathematical Functions,\n"
+    "    https://dlmf.nist.gov/10#PT5")
+ufunc_kerp_loops[0] = loop_d_d__As_f_f
+ufunc_kerp_loops[1] = loop_d_d__As_d_d
+ufunc_kerp_types[0] = NPY_FLOAT
+ufunc_kerp_types[1] = NPY_FLOAT
+ufunc_kerp_types[2] = NPY_DOUBLE
+ufunc_kerp_types[3] = NPY_DOUBLE
+ufunc_kerp_ptr[2*0] = _func_kerp_wrap
+ufunc_kerp_ptr[2*0+1] = ("kerp")
+ufunc_kerp_ptr[2*1] = _func_kerp_wrap
+ufunc_kerp_ptr[2*1+1] = ("kerp")
+ufunc_kerp_data[0] = &ufunc_kerp_ptr[2*0]
+ufunc_kerp_data[1] = &ufunc_kerp_ptr[2*1]
+kerp = np.PyUFunc_FromFuncAndData(ufunc_kerp_loops, ufunc_kerp_data, ufunc_kerp_types, 2, 1, 1, 0, "kerp", ufunc_kerp_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kl_div_loops[2]
+cdef void *ufunc_kl_div_ptr[4]
+cdef void *ufunc_kl_div_data[2]
+cdef char ufunc_kl_div_types[6]
+cdef char *ufunc_kl_div_doc = (
+    "kl_div(x, y, out=None)\n"
+    "\n"
+    "Elementwise function for computing Kullback-Leibler divergence.\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\mathrm{kl\\_div}(x, y) =\n"
+    "      \\begin{cases}\n"
+    "        x \\log(x / y) - x + y & x > 0, y > 0 \\\\\n"
+    "        y & x = 0, y \\ge 0 \\\\\n"
+    "        \\infty & \\text{otherwise}\n"
+    "      \\end{cases}\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y : array_like\n"
+    "    Real arguments\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Kullback-Liebler divergence.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "entr, rel_entr, scipy.stats.entropy\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    ".. versionadded:: 0.15.0\n"
+    "\n"
+    "This function is non-negative and is jointly convex in `x` and `y`.\n"
+    "\n"
+    "The origin of this function is in convex programming; see [1]_ for\n"
+    "details. This is why the function contains the extra :math:`-x\n"
+    "+ y` terms over what might be expected from the Kullback-Leibler\n"
+    "divergence. For a version of the function without the extra terms,\n"
+    "see `rel_entr`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n"
+    "       Cambridge University Press, 2004.\n"
+    "       :doi:`https://doi.org/10.1017/CBO9780511804441`")
+ufunc_kl_div_loops[0] = loop_d_dd__As_ff_f
+ufunc_kl_div_loops[1] = loop_d_dd__As_dd_d
+ufunc_kl_div_types[0] = NPY_FLOAT
+ufunc_kl_div_types[1] = NPY_FLOAT
+ufunc_kl_div_types[2] = NPY_FLOAT
+ufunc_kl_div_types[3] = NPY_DOUBLE
+ufunc_kl_div_types[4] = NPY_DOUBLE
+ufunc_kl_div_types[5] = NPY_DOUBLE
+ufunc_kl_div_ptr[2*0] = _func_kl_div
+ufunc_kl_div_ptr[2*0+1] = ("kl_div")
+ufunc_kl_div_ptr[2*1] = _func_kl_div
+ufunc_kl_div_ptr[2*1+1] = ("kl_div")
+ufunc_kl_div_data[0] = &ufunc_kl_div_ptr[2*0]
+ufunc_kl_div_data[1] = &ufunc_kl_div_ptr[2*1]
+kl_div = np.PyUFunc_FromFuncAndData(ufunc_kl_div_loops, ufunc_kl_div_data, ufunc_kl_div_types, 2, 2, 1, 0, "kl_div", ufunc_kl_div_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kn_loops[3]
+cdef void *ufunc_kn_ptr[6]
+cdef void *ufunc_kn_data[3]
+cdef char ufunc_kn_types[9]
+cdef char *ufunc_kn_doc = (
+    "kn(n, x, out=None)\n"
+    "\n"
+    "Modified Bessel function of the second kind of integer order `n`\n"
+    "\n"
+    "Returns the modified Bessel function of the second kind for integer order\n"
+    "`n` at real `z`.\n"
+    "\n"
+    "These are also sometimes called functions of the third kind, Basset\n"
+    "functions, or Macdonald functions.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like of int\n"
+    "    Order of Bessel functions (floats will truncate with a warning)\n"
+    "x : array_like of float\n"
+    "    Argument at which to evaluate the Bessel functions\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the Modified Bessel function of the second kind,\n"
+    "    :math:`K_n(x)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for AMOS [1]_ routine `zbesk`.  For a discussion of the\n"
+    "algorithm used, see [2]_ and the references therein.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kv : Same function, but accepts real order and complex argument\n"
+    "kvp : Derivative of this function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    ".. [2] Donald E. Amos, \"Algorithm 644: A portable package for Bessel\n"
+    "       functions of a complex argument and nonnegative order\", ACM\n"
+    "       TOMS Vol. 12 Issue 3, Sept. 1986, p. 265\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Plot the function of several orders for real input:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import kn\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(0, 5, 1000)\n"
+    ">>> for N in range(6):\n"
+    "...     plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))\n"
+    ">>> plt.ylim(0, 10)\n"
+    ">>> plt.legend()\n"
+    ">>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Calculate for a single value at multiple orders:\n"
+    "\n"
+    ">>> kn([4, 5, 6], 1)\n"
+    "array([   44.23241585,   360.9605896 ,  3653.83831186])")
+ufunc_kn_loops[0] = loop_d_id__As_ld_d
+ufunc_kn_loops[1] = loop_d_dd__As_ff_f
+ufunc_kn_loops[2] = loop_d_dd__As_dd_d
+ufunc_kn_types[0] = NPY_LONG
+ufunc_kn_types[1] = NPY_DOUBLE
+ufunc_kn_types[2] = NPY_DOUBLE
+ufunc_kn_types[3] = NPY_FLOAT
+ufunc_kn_types[4] = NPY_FLOAT
+ufunc_kn_types[5] = NPY_FLOAT
+ufunc_kn_types[6] = NPY_DOUBLE
+ufunc_kn_types[7] = NPY_DOUBLE
+ufunc_kn_types[8] = NPY_DOUBLE
+ufunc_kn_ptr[2*0] = _func_cbesk_wrap_real_int
+ufunc_kn_ptr[2*0+1] = ("kn")
+ufunc_kn_ptr[2*1] = _func_kn_unsafe
+ufunc_kn_ptr[2*1+1] = ("kn")
+ufunc_kn_ptr[2*2] = _func_kn_unsafe
+ufunc_kn_ptr[2*2+1] = ("kn")
+ufunc_kn_data[0] = &ufunc_kn_ptr[2*0]
+ufunc_kn_data[1] = &ufunc_kn_ptr[2*1]
+ufunc_kn_data[2] = &ufunc_kn_ptr[2*2]
+kn = np.PyUFunc_FromFuncAndData(ufunc_kn_loops, ufunc_kn_data, ufunc_kn_types, 3, 2, 1, 0, "kn", ufunc_kn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kolmogi_loops[2]
+cdef void *ufunc_kolmogi_ptr[4]
+cdef void *ufunc_kolmogi_data[2]
+cdef char ufunc_kolmogi_types[4]
+cdef char *ufunc_kolmogi_doc = (
+    "kolmogi(p, out=None)\n"
+    "\n"
+    "Inverse Survival Function of Kolmogorov distribution\n"
+    "\n"
+    "It is the inverse function to `kolmogorov`.\n"
+    "Returns y such that ``kolmogorov(y) == p``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : float array_like\n"
+    "    Probability\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The value(s) of kolmogi(p)\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "`kolmogorov` is used by `stats.kstest` in the application of the\n"
+    "Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this\n"
+    "function is exposed in `scpy.special`, but the recommended way to achieve\n"
+    "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n"
+    "`stats.kstwobign` distribution.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kolmogorov : The Survival Function for the distribution\n"
+    "scipy.stats.kstwobign : Provides the functionality as a continuous distribution\n"
+    "smirnov, smirnovi : Functions for the one-sided distribution\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import kolmogi\n"
+    ">>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])\n"
+    "array([        inf,  1.22384787,  1.01918472,  0.82757356,  0.67644769,\n"
+    "        0.57117327,  0.        ])")
+ufunc_kolmogi_loops[0] = loop_d_d__As_f_f
+ufunc_kolmogi_loops[1] = loop_d_d__As_d_d
+ufunc_kolmogi_types[0] = NPY_FLOAT
+ufunc_kolmogi_types[1] = NPY_FLOAT
+ufunc_kolmogi_types[2] = NPY_DOUBLE
+ufunc_kolmogi_types[3] = NPY_DOUBLE
+ufunc_kolmogi_ptr[2*0] = _func_kolmogi
+ufunc_kolmogi_ptr[2*0+1] = ("kolmogi")
+ufunc_kolmogi_ptr[2*1] = _func_kolmogi
+ufunc_kolmogi_ptr[2*1+1] = ("kolmogi")
+ufunc_kolmogi_data[0] = &ufunc_kolmogi_ptr[2*0]
+ufunc_kolmogi_data[1] = &ufunc_kolmogi_ptr[2*1]
+kolmogi = np.PyUFunc_FromFuncAndData(ufunc_kolmogi_loops, ufunc_kolmogi_data, ufunc_kolmogi_types, 2, 1, 1, 0, "kolmogi", ufunc_kolmogi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kolmogorov_loops[2]
+cdef void *ufunc_kolmogorov_ptr[4]
+cdef void *ufunc_kolmogorov_data[2]
+cdef char ufunc_kolmogorov_types[4]
+cdef char *ufunc_kolmogorov_doc = (
+    "kolmogorov(y, out=None)\n"
+    "\n"
+    "Complementary cumulative distribution (Survival Function) function of\n"
+    "Kolmogorov distribution.\n"
+    "\n"
+    "Returns the complementary cumulative distribution function of\n"
+    "Kolmogorov's limiting distribution (``D_n*\\sqrt(n)`` as n goes to infinity)\n"
+    "of a two-sided test for equality between an empirical and a theoretical\n"
+    "distribution. It is equal to the (limit as n->infinity of the)\n"
+    "probability that ``sqrt(n) * max absolute deviation > y``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : float array_like\n"
+    "  Absolute deviation between the Empirical CDF (ECDF) and the target CDF,\n"
+    "  multiplied by sqrt(n).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The value(s) of kolmogorov(y)\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "`kolmogorov` is used by `stats.kstest` in the application of the\n"
+    "Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this\n"
+    "function is exposed in `scpy.special`, but the recommended way to achieve\n"
+    "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n"
+    "`stats.kstwobign` distribution.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kolmogi : The Inverse Survival Function for the distribution\n"
+    "scipy.stats.kstwobign : Provides the functionality as a continuous distribution\n"
+    "smirnov, smirnovi : Functions for the one-sided distribution\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Show the probability of a gap at least as big as 0, 0.5 and 1.0.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import kolmogorov\n"
+    ">>> from scipy.stats import kstwobign\n"
+    ">>> kolmogorov([0, 0.5, 1.0])\n"
+    "array([ 1.        ,  0.96394524,  0.26999967])\n"
+    "\n"
+    "Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against\n"
+    "the target distribution, a Normal(0, 1) distribution.\n"
+    "\n"
+    ">>> from scipy.stats import norm, laplace\n"
+    ">>> rng = np.random.default_rng()\n"
+    ">>> n = 1000\n"
+    ">>> lap01 = laplace(0, 1)\n"
+    ">>> x = np.sort(lap01.rvs(n, random_state=rng))\n"
+    ">>> np.mean(x), np.std(x)\n"
+    "(-0.05841730131499543, 1.3968109101997568)\n"
+    "\n"
+    "Construct the Empirical CDF and the K-S statistic Dn.\n"
+    "\n"
+    ">>> target = norm(0,1)  # Normal mean 0, stddev 1\n"
+    ">>> cdfs = target.cdf(x)\n"
+    ">>> ecdfs = np.arange(n+1, dtype=float)/n\n"
+    ">>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])\n"
+    ">>> Dn = np.max(gaps)\n"
+    ">>> Kn = np.sqrt(n) * Dn\n"
+    ">>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))\n"
+    "Dn=0.043363, sqrt(n)*Dn=1.371265\n"
+    ">>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',\n"
+    "...   ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' %  (Kn, kolmogorov(Kn)),\n"
+    "...   ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' %  (Kn, kstwobign.cdf(Kn))]))\n"
+    "For a sample of size n drawn from a N(0, 1) distribution:\n"
+    " the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533\n"
+    " the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467\n"
+    "\n"
+    "Plot the Empirical CDF against the target N(0, 1) CDF.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')\n"
+    ">>> x3 = np.linspace(-3, 3, 100)\n"
+    ">>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')\n"
+    ">>> plt.ylim([0, 1]); plt.grid(True); plt.legend();\n"
+    ">>> # Add vertical lines marking Dn+ and Dn-\n"
+    ">>> iminus, iplus = np.argmax(gaps, axis=0)\n"
+    ">>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)\n"
+    ">>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4)\n"
+    ">>> plt.show()")
+ufunc_kolmogorov_loops[0] = loop_d_d__As_f_f
+ufunc_kolmogorov_loops[1] = loop_d_d__As_d_d
+ufunc_kolmogorov_types[0] = NPY_FLOAT
+ufunc_kolmogorov_types[1] = NPY_FLOAT
+ufunc_kolmogorov_types[2] = NPY_DOUBLE
+ufunc_kolmogorov_types[3] = NPY_DOUBLE
+ufunc_kolmogorov_ptr[2*0] = _func_kolmogorov
+ufunc_kolmogorov_ptr[2*0+1] = ("kolmogorov")
+ufunc_kolmogorov_ptr[2*1] = _func_kolmogorov
+ufunc_kolmogorov_ptr[2*1+1] = ("kolmogorov")
+ufunc_kolmogorov_data[0] = &ufunc_kolmogorov_ptr[2*0]
+ufunc_kolmogorov_data[1] = &ufunc_kolmogorov_ptr[2*1]
+kolmogorov = np.PyUFunc_FromFuncAndData(ufunc_kolmogorov_loops, ufunc_kolmogorov_data, ufunc_kolmogorov_types, 2, 1, 1, 0, "kolmogorov", ufunc_kolmogorov_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kv_loops[4]
+cdef void *ufunc_kv_ptr[8]
+cdef void *ufunc_kv_data[4]
+cdef char ufunc_kv_types[12]
+cdef char *ufunc_kv_doc = (
+    "kv(v, z, out=None)\n"
+    "\n"
+    "Modified Bessel function of the second kind of real order `v`\n"
+    "\n"
+    "Returns the modified Bessel function of the second kind for real order\n"
+    "`v` at complex `z`.\n"
+    "\n"
+    "These are also sometimes called functions of the third kind, Basset\n"
+    "functions, or Macdonald functions.  They are defined as those solutions\n"
+    "of the modified Bessel equation for which,\n"
+    "\n"
+    ".. math::\n"
+    "    K_v(x) \\sim \\sqrt{\\pi/(2x)} \\exp(-x)\n"
+    "\n"
+    "as :math:`x \\to \\infty` [3]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like of float\n"
+    "    Order of Bessel functions\n"
+    "z : array_like of complex\n"
+    "    Argument at which to evaluate the Bessel functions\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The results. Note that input must be of complex type to get complex\n"
+    "    output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for AMOS [1]_ routine `zbesk`.  For a discussion of the\n"
+    "algorithm used, see [2]_ and the references therein.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kve : This function with leading exponential behavior stripped off.\n"
+    "kvp : Derivative of this function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    ".. [2] Donald E. Amos, \"Algorithm 644: A portable package for Bessel\n"
+    "       functions of a complex argument and nonnegative order\", ACM\n"
+    "       TOMS Vol. 12 Issue 3, Sept. 1986, p. 265\n"
+    ".. [3] NIST Digital Library of Mathematical Functions,\n"
+    "       Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Plot the function of several orders for real input:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import kv\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(0, 5, 1000)\n"
+    ">>> for N in np.linspace(0, 6, 5):\n"
+    "...     plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))\n"
+    ">>> plt.ylim(0, 10)\n"
+    ">>> plt.legend()\n"
+    ">>> plt.title(r'Modified Bessel function of the second kind $K_\\nu(x)$')\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Calculate for a single value at multiple orders:\n"
+    "\n"
+    ">>> kv([4, 4.5, 5], 1+2j)\n"
+    "array([ 0.1992+2.3892j,  2.3493+3.6j   ,  7.2827+3.8104j])")
+ufunc_kv_loops[0] = loop_d_dd__As_ff_f
+ufunc_kv_loops[1] = loop_D_dD__As_fF_F
+ufunc_kv_loops[2] = loop_d_dd__As_dd_d
+ufunc_kv_loops[3] = loop_D_dD__As_dD_D
+ufunc_kv_types[0] = NPY_FLOAT
+ufunc_kv_types[1] = NPY_FLOAT
+ufunc_kv_types[2] = NPY_FLOAT
+ufunc_kv_types[3] = NPY_FLOAT
+ufunc_kv_types[4] = NPY_CFLOAT
+ufunc_kv_types[5] = NPY_CFLOAT
+ufunc_kv_types[6] = NPY_DOUBLE
+ufunc_kv_types[7] = NPY_DOUBLE
+ufunc_kv_types[8] = NPY_DOUBLE
+ufunc_kv_types[9] = NPY_DOUBLE
+ufunc_kv_types[10] = NPY_CDOUBLE
+ufunc_kv_types[11] = NPY_CDOUBLE
+ufunc_kv_ptr[2*0] = _func_cbesk_wrap_real
+ufunc_kv_ptr[2*0+1] = ("kv")
+ufunc_kv_ptr[2*1] = _func_cbesk_wrap
+ufunc_kv_ptr[2*1+1] = ("kv")
+ufunc_kv_ptr[2*2] = _func_cbesk_wrap_real
+ufunc_kv_ptr[2*2+1] = ("kv")
+ufunc_kv_ptr[2*3] = _func_cbesk_wrap
+ufunc_kv_ptr[2*3+1] = ("kv")
+ufunc_kv_data[0] = &ufunc_kv_ptr[2*0]
+ufunc_kv_data[1] = &ufunc_kv_ptr[2*1]
+ufunc_kv_data[2] = &ufunc_kv_ptr[2*2]
+ufunc_kv_data[3] = &ufunc_kv_ptr[2*3]
+kv = np.PyUFunc_FromFuncAndData(ufunc_kv_loops, ufunc_kv_data, ufunc_kv_types, 4, 2, 1, 0, "kv", ufunc_kv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_kve_loops[4]
+cdef void *ufunc_kve_ptr[8]
+cdef void *ufunc_kve_data[4]
+cdef char ufunc_kve_types[12]
+cdef char *ufunc_kve_doc = (
+    "kve(v, z, out=None)\n"
+    "\n"
+    "Exponentially scaled modified Bessel function of the second kind.\n"
+    "\n"
+    "Returns the exponentially scaled, modified Bessel function of the\n"
+    "second kind (sometimes called the third kind) for real order `v` at\n"
+    "complex `z`::\n"
+    "\n"
+    "    kve(v, z) = kv(v, z) * exp(z)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like of float\n"
+    "    Order of Bessel functions\n"
+    "z : array_like of complex\n"
+    "    Argument at which to evaluate the Bessel functions\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The exponentially scaled modified Bessel function of the second kind.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for AMOS [1]_ routine `zbesk`.  For a discussion of the\n"
+    "algorithm used, see [2]_ and the references therein.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "kv : This function without exponential scaling.\n"
+    "k0e : Faster version of this function for order 0.\n"
+    "k1e : Faster version of this function for order 1.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    ".. [2] Donald E. Amos, \"Algorithm 644: A portable package for Bessel\n"
+    "       functions of a complex argument and nonnegative order\", ACM\n"
+    "       TOMS Vol. 12 Issue 3, Sept. 1986, p. 265\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function of order 0 at one point.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import kv, kve\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> kve(0, 1.)\n"
+    "1.1444630798068949\n"
+    "\n"
+    "Evaluate the function at one point for different orders by\n"
+    "providing a list or NumPy array as argument for the `v` parameter:\n"
+    "\n"
+    ">>> kve([0, 1, 1.5], 1.)\n"
+    "array([1.14446308, 1.63615349, 2.50662827])\n"
+    "\n"
+    "Evaluate the function at several points for order 0 by providing an\n"
+    "array for `z`.\n"
+    "\n"
+    ">>> points = np.array([1., 3., 10.])\n"
+    ">>> kve(0, points)\n"
+    "array([1.14446308, 0.6977616 , 0.39163193])\n"
+    "\n"
+    "Evaluate the function at several points for different orders by\n"
+    "providing arrays for both `v` for `z`. Both arrays have to be\n"
+    "broadcastable to the correct shape. To calculate the orders 0, 1\n"
+    "and 2 for a 1D array of points:\n"
+    "\n"
+    ">>> kve([[0], [1], [2]], points)\n"
+    "array([[1.14446308, 0.6977616 , 0.39163193],\n"
+    "       [1.63615349, 0.80656348, 0.41076657],\n"
+    "       [4.41677005, 1.23547058, 0.47378525]])\n"
+    "\n"
+    "Plot the functions of order 0 to 3 from 0 to 5.\n"
+    "\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 5., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, kve(i, x), label=f'$K_{i!r}(z)\\cdot e^z$')\n"
+    ">>> ax.legend()\n"
+    ">>> ax.set_xlabel(r\"$z$\")\n"
+    ">>> ax.set_ylim(0, 4)\n"
+    ">>> ax.set_xlim(0, 5)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large arguments for\n"
+    "which the unscaled Bessel functions over- or underflow. In the\n"
+    "following example `kv` returns 0 whereas `kve` still returns\n"
+    "a useful finite number.\n"
+    "\n"
+    ">>> kv(3, 1000.), kve(3, 1000.)\n"
+    "(0.0, 0.03980696128440973)")
+ufunc_kve_loops[0] = loop_d_dd__As_ff_f
+ufunc_kve_loops[1] = loop_D_dD__As_fF_F
+ufunc_kve_loops[2] = loop_d_dd__As_dd_d
+ufunc_kve_loops[3] = loop_D_dD__As_dD_D
+ufunc_kve_types[0] = NPY_FLOAT
+ufunc_kve_types[1] = NPY_FLOAT
+ufunc_kve_types[2] = NPY_FLOAT
+ufunc_kve_types[3] = NPY_FLOAT
+ufunc_kve_types[4] = NPY_CFLOAT
+ufunc_kve_types[5] = NPY_CFLOAT
+ufunc_kve_types[6] = NPY_DOUBLE
+ufunc_kve_types[7] = NPY_DOUBLE
+ufunc_kve_types[8] = NPY_DOUBLE
+ufunc_kve_types[9] = NPY_DOUBLE
+ufunc_kve_types[10] = NPY_CDOUBLE
+ufunc_kve_types[11] = NPY_CDOUBLE
+ufunc_kve_ptr[2*0] = _func_cbesk_wrap_e_real
+ufunc_kve_ptr[2*0+1] = ("kve")
+ufunc_kve_ptr[2*1] = _func_cbesk_wrap_e
+ufunc_kve_ptr[2*1+1] = ("kve")
+ufunc_kve_ptr[2*2] = _func_cbesk_wrap_e_real
+ufunc_kve_ptr[2*2+1] = ("kve")
+ufunc_kve_ptr[2*3] = _func_cbesk_wrap_e
+ufunc_kve_ptr[2*3+1] = ("kve")
+ufunc_kve_data[0] = &ufunc_kve_ptr[2*0]
+ufunc_kve_data[1] = &ufunc_kve_ptr[2*1]
+ufunc_kve_data[2] = &ufunc_kve_ptr[2*2]
+ufunc_kve_data[3] = &ufunc_kve_ptr[2*3]
+kve = np.PyUFunc_FromFuncAndData(ufunc_kve_loops, ufunc_kve_data, ufunc_kve_types, 4, 2, 1, 0, "kve", ufunc_kve_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_log1p_loops[4]
+cdef void *ufunc_log1p_ptr[8]
+cdef void *ufunc_log1p_data[4]
+cdef char ufunc_log1p_types[8]
+cdef char *ufunc_log1p_doc = (
+    "log1p(x, out=None)\n"
+    "\n"
+    "Calculates log(1 + x) for use when `x` is near zero.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real or complex valued input.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of ``log(1 + x)``.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "expm1, cosm1\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is more accurate than using ``log(1 + x)`` directly for ``x``\n"
+    "near 0. Note that in the below example ``1 + 1e-17 == 1`` to\n"
+    "double precision.\n"
+    "\n"
+    ">>> sc.log1p(1e-17)\n"
+    "1e-17\n"
+    ">>> np.log(1 + 1e-17)\n"
+    "0.0")
+ufunc_log1p_loops[0] = loop_d_d__As_f_f
+ufunc_log1p_loops[1] = loop_d_d__As_d_d
+ufunc_log1p_loops[2] = loop_D_D__As_F_F
+ufunc_log1p_loops[3] = loop_D_D__As_D_D
+ufunc_log1p_types[0] = NPY_FLOAT
+ufunc_log1p_types[1] = NPY_FLOAT
+ufunc_log1p_types[2] = NPY_DOUBLE
+ufunc_log1p_types[3] = NPY_DOUBLE
+ufunc_log1p_types[4] = NPY_CFLOAT
+ufunc_log1p_types[5] = NPY_CFLOAT
+ufunc_log1p_types[6] = NPY_CDOUBLE
+ufunc_log1p_types[7] = NPY_CDOUBLE
+ufunc_log1p_ptr[2*0] = _func_log1p
+ufunc_log1p_ptr[2*0+1] = ("log1p")
+ufunc_log1p_ptr[2*1] = _func_log1p
+ufunc_log1p_ptr[2*1+1] = ("log1p")
+ufunc_log1p_ptr[2*2] = _func_clog1p
+ufunc_log1p_ptr[2*2+1] = ("log1p")
+ufunc_log1p_ptr[2*3] = _func_clog1p
+ufunc_log1p_ptr[2*3+1] = ("log1p")
+ufunc_log1p_data[0] = &ufunc_log1p_ptr[2*0]
+ufunc_log1p_data[1] = &ufunc_log1p_ptr[2*1]
+ufunc_log1p_data[2] = &ufunc_log1p_ptr[2*2]
+ufunc_log1p_data[3] = &ufunc_log1p_ptr[2*3]
+log1p = np.PyUFunc_FromFuncAndData(ufunc_log1p_loops, ufunc_log1p_data, ufunc_log1p_types, 4, 1, 1, 0, "log1p", ufunc_log1p_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_log_expit_loops[3]
+cdef void *ufunc_log_expit_ptr[6]
+cdef void *ufunc_log_expit_data[3]
+cdef char ufunc_log_expit_types[6]
+cdef char *ufunc_log_expit_doc = (
+    "log_expit(x, out=None)\n"
+    "\n"
+    "Logarithm of the logistic sigmoid function.\n"
+    "\n"
+    "The SciPy implementation of the logistic sigmoid function is\n"
+    "`scipy.special.expit`, so this function is called ``log_expit``.\n"
+    "\n"
+    "The function is mathematically equivalent to ``log(expit(x))``, but\n"
+    "is formulated to avoid loss of precision for inputs with large\n"
+    "(positive or negative) magnitude.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    The values to apply ``log_expit`` to element-wise.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "out : scalar or ndarray\n"
+    "    The computed values, an ndarray of the same shape as ``x``.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "expit\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "As a ufunc, ``log_expit`` takes a number of optional keyword arguments.\n"
+    "For more information see\n"
+    "`ufuncs `_\n"
+    "\n"
+    ".. versionadded:: 1.8.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import log_expit, expit\n"
+    "\n"
+    ">>> log_expit([-3.0, 0.25, 2.5, 5.0])\n"
+    "array([-3.04858735, -0.57593942, -0.07888973, -0.00671535])\n"
+    "\n"
+    "Large negative values:\n"
+    "\n"
+    ">>> log_expit([-100, -500, -1000])\n"
+    "array([ -100.,  -500., -1000.])\n"
+    "\n"
+    "Note that ``expit(-1000)`` returns 0, so the naive implementation\n"
+    "``log(expit(-1000))`` return ``-inf``.\n"
+    "\n"
+    "Large positive values:\n"
+    "\n"
+    ">>> log_expit([29, 120, 400])\n"
+    "array([-2.54366565e-013, -7.66764807e-053, -1.91516960e-174])\n"
+    "\n"
+    "Compare that to the naive implementation:\n"
+    "\n"
+    ">>> np.log(expit([29, 120, 400]))\n"
+    "array([-2.54463117e-13,  0.00000000e+00,  0.00000000e+00])\n"
+    "\n"
+    "The first value is accurate to only 3 digits, and the larger inputs\n"
+    "lose all precision and return 0.")
+ufunc_log_expit_loops[0] = loop_f_f__As_f_f
+ufunc_log_expit_loops[1] = loop_d_d__As_d_d
+ufunc_log_expit_loops[2] = loop_g_g__As_g_g
+ufunc_log_expit_types[0] = NPY_FLOAT
+ufunc_log_expit_types[1] = NPY_FLOAT
+ufunc_log_expit_types[2] = NPY_DOUBLE
+ufunc_log_expit_types[3] = NPY_DOUBLE
+ufunc_log_expit_types[4] = NPY_LONGDOUBLE
+ufunc_log_expit_types[5] = NPY_LONGDOUBLE
+ufunc_log_expit_ptr[2*0] = scipy.special._ufuncs_cxx._export_log_expitf
+ufunc_log_expit_ptr[2*0+1] = ("log_expit")
+ufunc_log_expit_ptr[2*1] = scipy.special._ufuncs_cxx._export_log_expit
+ufunc_log_expit_ptr[2*1+1] = ("log_expit")
+ufunc_log_expit_ptr[2*2] = scipy.special._ufuncs_cxx._export_log_expitl
+ufunc_log_expit_ptr[2*2+1] = ("log_expit")
+ufunc_log_expit_data[0] = &ufunc_log_expit_ptr[2*0]
+ufunc_log_expit_data[1] = &ufunc_log_expit_ptr[2*1]
+ufunc_log_expit_data[2] = &ufunc_log_expit_ptr[2*2]
+log_expit = np.PyUFunc_FromFuncAndData(ufunc_log_expit_loops, ufunc_log_expit_data, ufunc_log_expit_types, 3, 1, 1, 0, "log_expit", ufunc_log_expit_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_log_ndtr_loops[4]
+cdef void *ufunc_log_ndtr_ptr[8]
+cdef void *ufunc_log_ndtr_data[4]
+cdef char ufunc_log_ndtr_types[8]
+cdef char *ufunc_log_ndtr_doc = (
+    "log_ndtr(x, out=None)\n"
+    "\n"
+    "Logarithm of Gaussian cumulative distribution function.\n"
+    "\n"
+    "Returns the log of the area under the standard Gaussian probability\n"
+    "density function, integrated from minus infinity to `x`::\n"
+    "\n"
+    "    log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like, real or complex\n"
+    "    Argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The value of the log of the normal CDF evaluated at `x`\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erf\n"
+    "erfc\n"
+    "scipy.stats.norm\n"
+    "ndtr\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import log_ndtr, ndtr\n"
+    "\n"
+    "The benefit of ``log_ndtr(x)`` over the naive implementation\n"
+    "``np.log(ndtr(x))`` is most evident with moderate to large positive\n"
+    "values of ``x``:\n"
+    "\n"
+    ">>> x = np.array([6, 7, 9, 12, 15, 25])\n"
+    ">>> log_ndtr(x)\n"
+    "array([-9.86587646e-010, -1.27981254e-012, -1.12858841e-019,\n"
+    "       -1.77648211e-033, -3.67096620e-051, -3.05669671e-138])\n"
+    "\n"
+    "The results of the naive calculation for the moderate ``x`` values\n"
+    "have only 5 or 6 correct significant digits. For values of ``x``\n"
+    "greater than approximately 8.3, the naive expression returns 0:\n"
+    "\n"
+    ">>> np.log(ndtr(x))\n"
+    "array([-9.86587701e-10, -1.27986510e-12,  0.00000000e+00,\n"
+    "        0.00000000e+00,  0.00000000e+00,  0.00000000e+00])")
+ufunc_log_ndtr_loops[0] = loop_d_d__As_f_f
+ufunc_log_ndtr_loops[1] = loop_d_d__As_d_d
+ufunc_log_ndtr_loops[2] = loop_D_D__As_F_F
+ufunc_log_ndtr_loops[3] = loop_D_D__As_D_D
+ufunc_log_ndtr_types[0] = NPY_FLOAT
+ufunc_log_ndtr_types[1] = NPY_FLOAT
+ufunc_log_ndtr_types[2] = NPY_DOUBLE
+ufunc_log_ndtr_types[3] = NPY_DOUBLE
+ufunc_log_ndtr_types[4] = NPY_CFLOAT
+ufunc_log_ndtr_types[5] = NPY_CFLOAT
+ufunc_log_ndtr_types[6] = NPY_CDOUBLE
+ufunc_log_ndtr_types[7] = NPY_CDOUBLE
+ufunc_log_ndtr_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr
+ufunc_log_ndtr_ptr[2*0+1] = ("log_ndtr")
+ufunc_log_ndtr_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr
+ufunc_log_ndtr_ptr[2*1+1] = ("log_ndtr")
+ufunc_log_ndtr_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex
+ufunc_log_ndtr_ptr[2*2+1] = ("log_ndtr")
+ufunc_log_ndtr_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex
+ufunc_log_ndtr_ptr[2*3+1] = ("log_ndtr")
+ufunc_log_ndtr_data[0] = &ufunc_log_ndtr_ptr[2*0]
+ufunc_log_ndtr_data[1] = &ufunc_log_ndtr_ptr[2*1]
+ufunc_log_ndtr_data[2] = &ufunc_log_ndtr_ptr[2*2]
+ufunc_log_ndtr_data[3] = &ufunc_log_ndtr_ptr[2*3]
+log_ndtr = np.PyUFunc_FromFuncAndData(ufunc_log_ndtr_loops, ufunc_log_ndtr_data, ufunc_log_ndtr_types, 4, 1, 1, 0, "log_ndtr", ufunc_log_ndtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_loggamma_loops[4]
+cdef void *ufunc_loggamma_ptr[8]
+cdef void *ufunc_loggamma_data[4]
+cdef char ufunc_loggamma_types[8]
+cdef char *ufunc_loggamma_doc = (
+    "loggamma(z, out=None)\n"
+    "\n"
+    "Principal branch of the logarithm of the gamma function.\n"
+    "\n"
+    "Defined to be :math:`\\log(\\Gamma(x))` for :math:`x > 0` and\n"
+    "extended to the complex plane by analytic continuation. The\n"
+    "function has a single branch cut on the negative real axis.\n"
+    "\n"
+    ".. versionadded:: 0.18.0\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Values in the complex plain at which to compute ``loggamma``\n"
+    "out : ndarray, optional\n"
+    "    Output array for computed values of ``loggamma``\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "loggamma : scalar or ndarray\n"
+    "    Values of ``loggamma`` at z.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "It is not generally true that :math:`\\log\\Gamma(z) =\n"
+    "\\log(\\Gamma(z))`, though the real parts of the functions do\n"
+    "agree. The benefit of not defining `loggamma` as\n"
+    ":math:`\\log(\\Gamma(z))` is that the latter function has a\n"
+    "complicated branch cut structure whereas `loggamma` is analytic\n"
+    "except for on the negative real axis.\n"
+    "\n"
+    "The identities\n"
+    "\n"
+    ".. math::\n"
+    "  \\exp(\\log\\Gamma(z)) &= \\Gamma(z) \\\\\n"
+    "  \\log\\Gamma(z + 1) &= \\log(z) + \\log\\Gamma(z)\n"
+    "\n"
+    "make `loggamma` useful for working in complex logspace.\n"
+    "\n"
+    "On the real line `loggamma` is related to `gammaln` via\n"
+    "``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to\n"
+    "rounding error.\n"
+    "\n"
+    "The implementation here is based on [hare1997]_.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "gammaln : logarithm of the absolute value of the gamma function\n"
+    "gammasgn : sign of the gamma function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [hare1997] D.E.G. Hare,\n"
+    "  *Computing the Principal Branch of log-Gamma*,\n"
+    "  Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.")
+ufunc_loggamma_loops[0] = loop_d_d__As_f_f
+ufunc_loggamma_loops[1] = loop_d_d__As_d_d
+ufunc_loggamma_loops[2] = loop_D_D__As_F_F
+ufunc_loggamma_loops[3] = loop_D_D__As_D_D
+ufunc_loggamma_types[0] = NPY_FLOAT
+ufunc_loggamma_types[1] = NPY_FLOAT
+ufunc_loggamma_types[2] = NPY_DOUBLE
+ufunc_loggamma_types[3] = NPY_DOUBLE
+ufunc_loggamma_types[4] = NPY_CFLOAT
+ufunc_loggamma_types[5] = NPY_CFLOAT
+ufunc_loggamma_types[6] = NPY_CDOUBLE
+ufunc_loggamma_types[7] = NPY_CDOUBLE
+ufunc_loggamma_ptr[2*0] = _func_loggamma_real
+ufunc_loggamma_ptr[2*0+1] = ("loggamma")
+ufunc_loggamma_ptr[2*1] = _func_loggamma_real
+ufunc_loggamma_ptr[2*1+1] = ("loggamma")
+ufunc_loggamma_ptr[2*2] = _func_loggamma
+ufunc_loggamma_ptr[2*2+1] = ("loggamma")
+ufunc_loggamma_ptr[2*3] = _func_loggamma
+ufunc_loggamma_ptr[2*3+1] = ("loggamma")
+ufunc_loggamma_data[0] = &ufunc_loggamma_ptr[2*0]
+ufunc_loggamma_data[1] = &ufunc_loggamma_ptr[2*1]
+ufunc_loggamma_data[2] = &ufunc_loggamma_ptr[2*2]
+ufunc_loggamma_data[3] = &ufunc_loggamma_ptr[2*3]
+loggamma = np.PyUFunc_FromFuncAndData(ufunc_loggamma_loops, ufunc_loggamma_data, ufunc_loggamma_types, 4, 1, 1, 0, "loggamma", ufunc_loggamma_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_logit_loops[3]
+cdef void *ufunc_logit_ptr[6]
+cdef void *ufunc_logit_data[3]
+cdef char ufunc_logit_types[6]
+cdef char *ufunc_logit_doc = (
+    "logit(x, out=None)\n"
+    "\n"
+    "Logit ufunc for ndarrays.\n"
+    "\n"
+    "The logit function is defined as logit(p) = log(p/(1-p)).\n"
+    "Note that logit(0) = -inf, logit(1) = inf, and logit(p)\n"
+    "for p<0 or p>1 yields nan.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : ndarray\n"
+    "    The ndarray to apply logit to element-wise.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    An ndarray of the same shape as x. Its entries\n"
+    "    are logit of the corresponding entry of x.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "expit\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "As a ufunc logit takes a number of optional\n"
+    "keyword arguments. For more information\n"
+    "see `ufuncs `_\n"
+    "\n"
+    ".. versionadded:: 0.10.0\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import logit, expit\n"
+    "\n"
+    ">>> logit([0, 0.25, 0.5, 0.75, 1])\n"
+    "array([       -inf, -1.09861229,  0.        ,  1.09861229,         inf])\n"
+    "\n"
+    "`expit` is the inverse of `logit`:\n"
+    "\n"
+    ">>> expit(logit([0.1, 0.75, 0.999]))\n"
+    "array([ 0.1  ,  0.75 ,  0.999])\n"
+    "\n"
+    "Plot logit(x) for x in [0, 1]:\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> x = np.linspace(0, 1, 501)\n"
+    ">>> y = logit(x)\n"
+    ">>> plt.plot(x, y)\n"
+    ">>> plt.grid()\n"
+    ">>> plt.ylim(-6, 6)\n"
+    ">>> plt.xlabel('x')\n"
+    ">>> plt.title('logit(x)')\n"
+    ">>> plt.show()")
+ufunc_logit_loops[0] = loop_f_f__As_f_f
+ufunc_logit_loops[1] = loop_d_d__As_d_d
+ufunc_logit_loops[2] = loop_g_g__As_g_g
+ufunc_logit_types[0] = NPY_FLOAT
+ufunc_logit_types[1] = NPY_FLOAT
+ufunc_logit_types[2] = NPY_DOUBLE
+ufunc_logit_types[3] = NPY_DOUBLE
+ufunc_logit_types[4] = NPY_LONGDOUBLE
+ufunc_logit_types[5] = NPY_LONGDOUBLE
+ufunc_logit_ptr[2*0] = scipy.special._ufuncs_cxx._export_logitf
+ufunc_logit_ptr[2*0+1] = ("logit")
+ufunc_logit_ptr[2*1] = scipy.special._ufuncs_cxx._export_logit
+ufunc_logit_ptr[2*1+1] = ("logit")
+ufunc_logit_ptr[2*2] = scipy.special._ufuncs_cxx._export_logitl
+ufunc_logit_ptr[2*2+1] = ("logit")
+ufunc_logit_data[0] = &ufunc_logit_ptr[2*0]
+ufunc_logit_data[1] = &ufunc_logit_ptr[2*1]
+ufunc_logit_data[2] = &ufunc_logit_ptr[2*2]
+logit = np.PyUFunc_FromFuncAndData(ufunc_logit_loops, ufunc_logit_data, ufunc_logit_types, 3, 1, 1, 0, "logit", ufunc_logit_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_lpmv_loops[2]
+cdef void *ufunc_lpmv_ptr[4]
+cdef void *ufunc_lpmv_data[2]
+cdef char ufunc_lpmv_types[8]
+cdef char *ufunc_lpmv_doc = (
+    "lpmv(m, v, x, out=None)\n"
+    "\n"
+    "Associated Legendre function of integer order and real degree.\n"
+    "\n"
+    "Defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    P_v^m = (-1)^m (1 - x^2)^{m/2} \\frac{d^m}{dx^m} P_v(x)\n"
+    "\n"
+    "where\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    P_v = \\sum_{k = 0}^\\infty \\frac{(-v)_k (v + 1)_k}{(k!)^2}\n"
+    "            \\left(\\frac{1 - x}{2}\\right)^k\n"
+    "\n"
+    "is the Legendre function of the first kind. Here :math:`(\\cdot)_k`\n"
+    "is the Pochhammer symbol; see `poch`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order (int or float). If passed a float not equal to an\n"
+    "    integer the function returns NaN.\n"
+    "v : array_like\n"
+    "    Degree (float).\n"
+    "x : array_like\n"
+    "    Argument (float). Must have ``|x| <= 1``.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "pmv : scalar or ndarray\n"
+    "    Value of the associated Legendre function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "lpmn : Compute the associated Legendre function for all orders\n"
+    "       ``0, ..., m`` and degrees ``0, ..., n``.\n"
+    "clpmn : Compute the associated Legendre function at complex\n"
+    "        arguments.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Note that this implementation includes the Condon-Shortley phase.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Zhang, Jin, \"Computation of Special Functions\", John Wiley\n"
+    "       and Sons, Inc, 1996.")
+ufunc_lpmv_loops[0] = loop_d_ddd__As_fff_f
+ufunc_lpmv_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_lpmv_types[0] = NPY_FLOAT
+ufunc_lpmv_types[1] = NPY_FLOAT
+ufunc_lpmv_types[2] = NPY_FLOAT
+ufunc_lpmv_types[3] = NPY_FLOAT
+ufunc_lpmv_types[4] = NPY_DOUBLE
+ufunc_lpmv_types[5] = NPY_DOUBLE
+ufunc_lpmv_types[6] = NPY_DOUBLE
+ufunc_lpmv_types[7] = NPY_DOUBLE
+ufunc_lpmv_ptr[2*0] = _func_pmv_wrap
+ufunc_lpmv_ptr[2*0+1] = ("lpmv")
+ufunc_lpmv_ptr[2*1] = _func_pmv_wrap
+ufunc_lpmv_ptr[2*1+1] = ("lpmv")
+ufunc_lpmv_data[0] = &ufunc_lpmv_ptr[2*0]
+ufunc_lpmv_data[1] = &ufunc_lpmv_ptr[2*1]
+lpmv = np.PyUFunc_FromFuncAndData(ufunc_lpmv_loops, ufunc_lpmv_data, ufunc_lpmv_types, 2, 3, 1, 0, "lpmv", ufunc_lpmv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_a_loops[2]
+cdef void *ufunc_mathieu_a_ptr[4]
+cdef void *ufunc_mathieu_a_data[2]
+cdef char ufunc_mathieu_a_types[6]
+cdef char *ufunc_mathieu_a_doc = (
+    "mathieu_a(m, q, out=None)\n"
+    "\n"
+    "Characteristic value of even Mathieu functions\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Characteristic value for the even solution, ``ce_m(z, q)``, of\n"
+    "    Mathieu's equation.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_b, mathieu_cem, mathieu_sem")
+ufunc_mathieu_a_loops[0] = loop_d_dd__As_ff_f
+ufunc_mathieu_a_loops[1] = loop_d_dd__As_dd_d
+ufunc_mathieu_a_types[0] = NPY_FLOAT
+ufunc_mathieu_a_types[1] = NPY_FLOAT
+ufunc_mathieu_a_types[2] = NPY_FLOAT
+ufunc_mathieu_a_types[3] = NPY_DOUBLE
+ufunc_mathieu_a_types[4] = NPY_DOUBLE
+ufunc_mathieu_a_types[5] = NPY_DOUBLE
+ufunc_mathieu_a_ptr[2*0] = _func_cem_cva_wrap
+ufunc_mathieu_a_ptr[2*0+1] = ("mathieu_a")
+ufunc_mathieu_a_ptr[2*1] = _func_cem_cva_wrap
+ufunc_mathieu_a_ptr[2*1+1] = ("mathieu_a")
+ufunc_mathieu_a_data[0] = &ufunc_mathieu_a_ptr[2*0]
+ufunc_mathieu_a_data[1] = &ufunc_mathieu_a_ptr[2*1]
+mathieu_a = np.PyUFunc_FromFuncAndData(ufunc_mathieu_a_loops, ufunc_mathieu_a_data, ufunc_mathieu_a_types, 2, 2, 1, 0, "mathieu_a", ufunc_mathieu_a_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_b_loops[2]
+cdef void *ufunc_mathieu_b_ptr[4]
+cdef void *ufunc_mathieu_b_data[2]
+cdef char ufunc_mathieu_b_types[6]
+cdef char *ufunc_mathieu_b_doc = (
+    "mathieu_b(m, q, out=None)\n"
+    "\n"
+    "Characteristic value of odd Mathieu functions\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Characteristic value for the odd solution, ``se_m(z, q)``, of Mathieu's\n"
+    "    equation.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_a, mathieu_cem, mathieu_sem")
+ufunc_mathieu_b_loops[0] = loop_d_dd__As_ff_f
+ufunc_mathieu_b_loops[1] = loop_d_dd__As_dd_d
+ufunc_mathieu_b_types[0] = NPY_FLOAT
+ufunc_mathieu_b_types[1] = NPY_FLOAT
+ufunc_mathieu_b_types[2] = NPY_FLOAT
+ufunc_mathieu_b_types[3] = NPY_DOUBLE
+ufunc_mathieu_b_types[4] = NPY_DOUBLE
+ufunc_mathieu_b_types[5] = NPY_DOUBLE
+ufunc_mathieu_b_ptr[2*0] = _func_sem_cva_wrap
+ufunc_mathieu_b_ptr[2*0+1] = ("mathieu_b")
+ufunc_mathieu_b_ptr[2*1] = _func_sem_cva_wrap
+ufunc_mathieu_b_ptr[2*1+1] = ("mathieu_b")
+ufunc_mathieu_b_data[0] = &ufunc_mathieu_b_ptr[2*0]
+ufunc_mathieu_b_data[1] = &ufunc_mathieu_b_ptr[2*1]
+mathieu_b = np.PyUFunc_FromFuncAndData(ufunc_mathieu_b_loops, ufunc_mathieu_b_data, ufunc_mathieu_b_types, 2, 2, 1, 0, "mathieu_b", ufunc_mathieu_b_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_cem_loops[2]
+cdef void *ufunc_mathieu_cem_ptr[4]
+cdef void *ufunc_mathieu_cem_data[2]
+cdef char ufunc_mathieu_cem_types[10]
+cdef char *ufunc_mathieu_cem_doc = (
+    "mathieu_cem(m, q, x, out=None)\n"
+    "\n"
+    "Even Mathieu function and its derivative\n"
+    "\n"
+    "Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and\n"
+    "parameter `q` evaluated at `x` (given in degrees).  Also returns the\n"
+    "derivative with respect to `x` of ce_m(x, q)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "x : array_like\n"
+    "    Argument of the function, *given in degrees, not radians*\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Value of the function\n"
+    "yp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_a, mathieu_b, mathieu_sem")
+ufunc_mathieu_cem_loops[0] = loop_i_ddd_dd_As_fff_ff
+ufunc_mathieu_cem_loops[1] = loop_i_ddd_dd_As_ddd_dd
+ufunc_mathieu_cem_types[0] = NPY_FLOAT
+ufunc_mathieu_cem_types[1] = NPY_FLOAT
+ufunc_mathieu_cem_types[2] = NPY_FLOAT
+ufunc_mathieu_cem_types[3] = NPY_FLOAT
+ufunc_mathieu_cem_types[4] = NPY_FLOAT
+ufunc_mathieu_cem_types[5] = NPY_DOUBLE
+ufunc_mathieu_cem_types[6] = NPY_DOUBLE
+ufunc_mathieu_cem_types[7] = NPY_DOUBLE
+ufunc_mathieu_cem_types[8] = NPY_DOUBLE
+ufunc_mathieu_cem_types[9] = NPY_DOUBLE
+ufunc_mathieu_cem_ptr[2*0] = _func_cem_wrap
+ufunc_mathieu_cem_ptr[2*0+1] = ("mathieu_cem")
+ufunc_mathieu_cem_ptr[2*1] = _func_cem_wrap
+ufunc_mathieu_cem_ptr[2*1+1] = ("mathieu_cem")
+ufunc_mathieu_cem_data[0] = &ufunc_mathieu_cem_ptr[2*0]
+ufunc_mathieu_cem_data[1] = &ufunc_mathieu_cem_ptr[2*1]
+mathieu_cem = np.PyUFunc_FromFuncAndData(ufunc_mathieu_cem_loops, ufunc_mathieu_cem_data, ufunc_mathieu_cem_types, 2, 3, 2, 0, "mathieu_cem", ufunc_mathieu_cem_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_modcem1_loops[2]
+cdef void *ufunc_mathieu_modcem1_ptr[4]
+cdef void *ufunc_mathieu_modcem1_data[2]
+cdef char ufunc_mathieu_modcem1_types[10]
+cdef char *ufunc_mathieu_modcem1_doc = (
+    "mathieu_modcem1(m, q, x, out=None)\n"
+    "\n"
+    "Even modified Mathieu function of the first kind and its derivative\n"
+    "\n"
+    "Evaluates the even modified Mathieu function of the first kind,\n"
+    "``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter\n"
+    "`q`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "x : array_like\n"
+    "    Argument of the function, *given in degrees, not radians*\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Value of the function\n"
+    "yp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_modsem1")
+ufunc_mathieu_modcem1_loops[0] = loop_i_ddd_dd_As_fff_ff
+ufunc_mathieu_modcem1_loops[1] = loop_i_ddd_dd_As_ddd_dd
+ufunc_mathieu_modcem1_types[0] = NPY_FLOAT
+ufunc_mathieu_modcem1_types[1] = NPY_FLOAT
+ufunc_mathieu_modcem1_types[2] = NPY_FLOAT
+ufunc_mathieu_modcem1_types[3] = NPY_FLOAT
+ufunc_mathieu_modcem1_types[4] = NPY_FLOAT
+ufunc_mathieu_modcem1_types[5] = NPY_DOUBLE
+ufunc_mathieu_modcem1_types[6] = NPY_DOUBLE
+ufunc_mathieu_modcem1_types[7] = NPY_DOUBLE
+ufunc_mathieu_modcem1_types[8] = NPY_DOUBLE
+ufunc_mathieu_modcem1_types[9] = NPY_DOUBLE
+ufunc_mathieu_modcem1_ptr[2*0] = _func_mcm1_wrap
+ufunc_mathieu_modcem1_ptr[2*0+1] = ("mathieu_modcem1")
+ufunc_mathieu_modcem1_ptr[2*1] = _func_mcm1_wrap
+ufunc_mathieu_modcem1_ptr[2*1+1] = ("mathieu_modcem1")
+ufunc_mathieu_modcem1_data[0] = &ufunc_mathieu_modcem1_ptr[2*0]
+ufunc_mathieu_modcem1_data[1] = &ufunc_mathieu_modcem1_ptr[2*1]
+mathieu_modcem1 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modcem1_loops, ufunc_mathieu_modcem1_data, ufunc_mathieu_modcem1_types, 2, 3, 2, 0, "mathieu_modcem1", ufunc_mathieu_modcem1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_modcem2_loops[2]
+cdef void *ufunc_mathieu_modcem2_ptr[4]
+cdef void *ufunc_mathieu_modcem2_data[2]
+cdef char ufunc_mathieu_modcem2_types[10]
+cdef char *ufunc_mathieu_modcem2_doc = (
+    "mathieu_modcem2(m, q, x, out=None)\n"
+    "\n"
+    "Even modified Mathieu function of the second kind and its derivative\n"
+    "\n"
+    "Evaluates the even modified Mathieu function of the second kind,\n"
+    "Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`\n"
+    "and parameter `q`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "x : array_like\n"
+    "    Argument of the function, *given in degrees, not radians*\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Value of the function\n"
+    "yp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_modsem2")
+ufunc_mathieu_modcem2_loops[0] = loop_i_ddd_dd_As_fff_ff
+ufunc_mathieu_modcem2_loops[1] = loop_i_ddd_dd_As_ddd_dd
+ufunc_mathieu_modcem2_types[0] = NPY_FLOAT
+ufunc_mathieu_modcem2_types[1] = NPY_FLOAT
+ufunc_mathieu_modcem2_types[2] = NPY_FLOAT
+ufunc_mathieu_modcem2_types[3] = NPY_FLOAT
+ufunc_mathieu_modcem2_types[4] = NPY_FLOAT
+ufunc_mathieu_modcem2_types[5] = NPY_DOUBLE
+ufunc_mathieu_modcem2_types[6] = NPY_DOUBLE
+ufunc_mathieu_modcem2_types[7] = NPY_DOUBLE
+ufunc_mathieu_modcem2_types[8] = NPY_DOUBLE
+ufunc_mathieu_modcem2_types[9] = NPY_DOUBLE
+ufunc_mathieu_modcem2_ptr[2*0] = _func_mcm2_wrap
+ufunc_mathieu_modcem2_ptr[2*0+1] = ("mathieu_modcem2")
+ufunc_mathieu_modcem2_ptr[2*1] = _func_mcm2_wrap
+ufunc_mathieu_modcem2_ptr[2*1+1] = ("mathieu_modcem2")
+ufunc_mathieu_modcem2_data[0] = &ufunc_mathieu_modcem2_ptr[2*0]
+ufunc_mathieu_modcem2_data[1] = &ufunc_mathieu_modcem2_ptr[2*1]
+mathieu_modcem2 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modcem2_loops, ufunc_mathieu_modcem2_data, ufunc_mathieu_modcem2_types, 2, 3, 2, 0, "mathieu_modcem2", ufunc_mathieu_modcem2_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_modsem1_loops[2]
+cdef void *ufunc_mathieu_modsem1_ptr[4]
+cdef void *ufunc_mathieu_modsem1_data[2]
+cdef char ufunc_mathieu_modsem1_types[10]
+cdef char *ufunc_mathieu_modsem1_doc = (
+    "mathieu_modsem1(m, q, x, out=None)\n"
+    "\n"
+    "Odd modified Mathieu function of the first kind and its derivative\n"
+    "\n"
+    "Evaluates the odd modified Mathieu function of the first kind,\n"
+    "Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`\n"
+    "and parameter `q`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "x : array_like\n"
+    "    Argument of the function, *given in degrees, not radians*\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Value of the function\n"
+    "yp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_modcem1")
+ufunc_mathieu_modsem1_loops[0] = loop_i_ddd_dd_As_fff_ff
+ufunc_mathieu_modsem1_loops[1] = loop_i_ddd_dd_As_ddd_dd
+ufunc_mathieu_modsem1_types[0] = NPY_FLOAT
+ufunc_mathieu_modsem1_types[1] = NPY_FLOAT
+ufunc_mathieu_modsem1_types[2] = NPY_FLOAT
+ufunc_mathieu_modsem1_types[3] = NPY_FLOAT
+ufunc_mathieu_modsem1_types[4] = NPY_FLOAT
+ufunc_mathieu_modsem1_types[5] = NPY_DOUBLE
+ufunc_mathieu_modsem1_types[6] = NPY_DOUBLE
+ufunc_mathieu_modsem1_types[7] = NPY_DOUBLE
+ufunc_mathieu_modsem1_types[8] = NPY_DOUBLE
+ufunc_mathieu_modsem1_types[9] = NPY_DOUBLE
+ufunc_mathieu_modsem1_ptr[2*0] = _func_msm1_wrap
+ufunc_mathieu_modsem1_ptr[2*0+1] = ("mathieu_modsem1")
+ufunc_mathieu_modsem1_ptr[2*1] = _func_msm1_wrap
+ufunc_mathieu_modsem1_ptr[2*1+1] = ("mathieu_modsem1")
+ufunc_mathieu_modsem1_data[0] = &ufunc_mathieu_modsem1_ptr[2*0]
+ufunc_mathieu_modsem1_data[1] = &ufunc_mathieu_modsem1_ptr[2*1]
+mathieu_modsem1 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modsem1_loops, ufunc_mathieu_modsem1_data, ufunc_mathieu_modsem1_types, 2, 3, 2, 0, "mathieu_modsem1", ufunc_mathieu_modsem1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_modsem2_loops[2]
+cdef void *ufunc_mathieu_modsem2_ptr[4]
+cdef void *ufunc_mathieu_modsem2_data[2]
+cdef char ufunc_mathieu_modsem2_types[10]
+cdef char *ufunc_mathieu_modsem2_doc = (
+    "mathieu_modsem2(m, q, x, out=None)\n"
+    "\n"
+    "Odd modified Mathieu function of the second kind and its derivative\n"
+    "\n"
+    "Evaluates the odd modified Mathieu function of the second kind,\n"
+    "Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`\n"
+    "and parameter q.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "x : array_like\n"
+    "    Argument of the function, *given in degrees, not radians*\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Value of the function\n"
+    "yp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_modcem2")
+ufunc_mathieu_modsem2_loops[0] = loop_i_ddd_dd_As_fff_ff
+ufunc_mathieu_modsem2_loops[1] = loop_i_ddd_dd_As_ddd_dd
+ufunc_mathieu_modsem2_types[0] = NPY_FLOAT
+ufunc_mathieu_modsem2_types[1] = NPY_FLOAT
+ufunc_mathieu_modsem2_types[2] = NPY_FLOAT
+ufunc_mathieu_modsem2_types[3] = NPY_FLOAT
+ufunc_mathieu_modsem2_types[4] = NPY_FLOAT
+ufunc_mathieu_modsem2_types[5] = NPY_DOUBLE
+ufunc_mathieu_modsem2_types[6] = NPY_DOUBLE
+ufunc_mathieu_modsem2_types[7] = NPY_DOUBLE
+ufunc_mathieu_modsem2_types[8] = NPY_DOUBLE
+ufunc_mathieu_modsem2_types[9] = NPY_DOUBLE
+ufunc_mathieu_modsem2_ptr[2*0] = _func_msm2_wrap
+ufunc_mathieu_modsem2_ptr[2*0+1] = ("mathieu_modsem2")
+ufunc_mathieu_modsem2_ptr[2*1] = _func_msm2_wrap
+ufunc_mathieu_modsem2_ptr[2*1+1] = ("mathieu_modsem2")
+ufunc_mathieu_modsem2_data[0] = &ufunc_mathieu_modsem2_ptr[2*0]
+ufunc_mathieu_modsem2_data[1] = &ufunc_mathieu_modsem2_ptr[2*1]
+mathieu_modsem2 = np.PyUFunc_FromFuncAndData(ufunc_mathieu_modsem2_loops, ufunc_mathieu_modsem2_data, ufunc_mathieu_modsem2_types, 2, 3, 2, 0, "mathieu_modsem2", ufunc_mathieu_modsem2_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_mathieu_sem_loops[2]
+cdef void *ufunc_mathieu_sem_ptr[4]
+cdef void *ufunc_mathieu_sem_data[2]
+cdef char ufunc_mathieu_sem_types[10]
+cdef char *ufunc_mathieu_sem_doc = (
+    "mathieu_sem(m, q, x, out=None)\n"
+    "\n"
+    "Odd Mathieu function and its derivative\n"
+    "\n"
+    "Returns the odd Mathieu function, se_m(x, q), of order `m` and\n"
+    "parameter `q` evaluated at `x` (given in degrees).  Also returns the\n"
+    "derivative with respect to `x` of se_m(x, q).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the function\n"
+    "q : array_like\n"
+    "    Parameter of the function\n"
+    "x : array_like\n"
+    "    Argument of the function, *given in degrees, not radians*.\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y : scalar or ndarray\n"
+    "    Value of the function\n"
+    "yp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "mathieu_a, mathieu_b, mathieu_cem")
+ufunc_mathieu_sem_loops[0] = loop_i_ddd_dd_As_fff_ff
+ufunc_mathieu_sem_loops[1] = loop_i_ddd_dd_As_ddd_dd
+ufunc_mathieu_sem_types[0] = NPY_FLOAT
+ufunc_mathieu_sem_types[1] = NPY_FLOAT
+ufunc_mathieu_sem_types[2] = NPY_FLOAT
+ufunc_mathieu_sem_types[3] = NPY_FLOAT
+ufunc_mathieu_sem_types[4] = NPY_FLOAT
+ufunc_mathieu_sem_types[5] = NPY_DOUBLE
+ufunc_mathieu_sem_types[6] = NPY_DOUBLE
+ufunc_mathieu_sem_types[7] = NPY_DOUBLE
+ufunc_mathieu_sem_types[8] = NPY_DOUBLE
+ufunc_mathieu_sem_types[9] = NPY_DOUBLE
+ufunc_mathieu_sem_ptr[2*0] = _func_sem_wrap
+ufunc_mathieu_sem_ptr[2*0+1] = ("mathieu_sem")
+ufunc_mathieu_sem_ptr[2*1] = _func_sem_wrap
+ufunc_mathieu_sem_ptr[2*1+1] = ("mathieu_sem")
+ufunc_mathieu_sem_data[0] = &ufunc_mathieu_sem_ptr[2*0]
+ufunc_mathieu_sem_data[1] = &ufunc_mathieu_sem_ptr[2*1]
+mathieu_sem = np.PyUFunc_FromFuncAndData(ufunc_mathieu_sem_loops, ufunc_mathieu_sem_data, ufunc_mathieu_sem_types, 2, 3, 2, 0, "mathieu_sem", ufunc_mathieu_sem_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_modfresnelm_loops[2]
+cdef void *ufunc_modfresnelm_ptr[4]
+cdef void *ufunc_modfresnelm_data[2]
+cdef char ufunc_modfresnelm_types[6]
+cdef char *ufunc_modfresnelm_doc = (
+    "modfresnelm(x, out=None)\n"
+    "\n"
+    "Modified Fresnel negative integrals\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Function argument\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "fm : scalar or ndarray\n"
+    "    Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``\n"
+    "km : scalar or ndarray\n"
+    "    Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "modfresnelp")
+ufunc_modfresnelm_loops[0] = loop_i_d_DD_As_f_FF
+ufunc_modfresnelm_loops[1] = loop_i_d_DD_As_d_DD
+ufunc_modfresnelm_types[0] = NPY_FLOAT
+ufunc_modfresnelm_types[1] = NPY_CFLOAT
+ufunc_modfresnelm_types[2] = NPY_CFLOAT
+ufunc_modfresnelm_types[3] = NPY_DOUBLE
+ufunc_modfresnelm_types[4] = NPY_CDOUBLE
+ufunc_modfresnelm_types[5] = NPY_CDOUBLE
+ufunc_modfresnelm_ptr[2*0] = _func_modified_fresnel_minus_wrap
+ufunc_modfresnelm_ptr[2*0+1] = ("modfresnelm")
+ufunc_modfresnelm_ptr[2*1] = _func_modified_fresnel_minus_wrap
+ufunc_modfresnelm_ptr[2*1+1] = ("modfresnelm")
+ufunc_modfresnelm_data[0] = &ufunc_modfresnelm_ptr[2*0]
+ufunc_modfresnelm_data[1] = &ufunc_modfresnelm_ptr[2*1]
+modfresnelm = np.PyUFunc_FromFuncAndData(ufunc_modfresnelm_loops, ufunc_modfresnelm_data, ufunc_modfresnelm_types, 2, 1, 2, 0, "modfresnelm", ufunc_modfresnelm_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_modfresnelp_loops[2]
+cdef void *ufunc_modfresnelp_ptr[4]
+cdef void *ufunc_modfresnelp_data[2]
+cdef char ufunc_modfresnelp_types[6]
+cdef char *ufunc_modfresnelp_doc = (
+    "modfresnelp(x, out=None)\n"
+    "\n"
+    "Modified Fresnel positive integrals\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Function argument\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "fp : scalar or ndarray\n"
+    "    Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``\n"
+    "kp : scalar or ndarray\n"
+    "    Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "modfresnelm")
+ufunc_modfresnelp_loops[0] = loop_i_d_DD_As_f_FF
+ufunc_modfresnelp_loops[1] = loop_i_d_DD_As_d_DD
+ufunc_modfresnelp_types[0] = NPY_FLOAT
+ufunc_modfresnelp_types[1] = NPY_CFLOAT
+ufunc_modfresnelp_types[2] = NPY_CFLOAT
+ufunc_modfresnelp_types[3] = NPY_DOUBLE
+ufunc_modfresnelp_types[4] = NPY_CDOUBLE
+ufunc_modfresnelp_types[5] = NPY_CDOUBLE
+ufunc_modfresnelp_ptr[2*0] = _func_modified_fresnel_plus_wrap
+ufunc_modfresnelp_ptr[2*0+1] = ("modfresnelp")
+ufunc_modfresnelp_ptr[2*1] = _func_modified_fresnel_plus_wrap
+ufunc_modfresnelp_ptr[2*1+1] = ("modfresnelp")
+ufunc_modfresnelp_data[0] = &ufunc_modfresnelp_ptr[2*0]
+ufunc_modfresnelp_data[1] = &ufunc_modfresnelp_ptr[2*1]
+modfresnelp = np.PyUFunc_FromFuncAndData(ufunc_modfresnelp_loops, ufunc_modfresnelp_data, ufunc_modfresnelp_types, 2, 1, 2, 0, "modfresnelp", ufunc_modfresnelp_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_modstruve_loops[2]
+cdef void *ufunc_modstruve_ptr[4]
+cdef void *ufunc_modstruve_data[2]
+cdef char ufunc_modstruve_types[6]
+cdef char *ufunc_modstruve_doc = (
+    "modstruve(v, x, out=None)\n"
+    "\n"
+    "Modified Struve function.\n"
+    "\n"
+    "Return the value of the modified Struve function of order `v` at `x`.  The\n"
+    "modified Struve function is defined as,\n"
+    "\n"
+    ".. math::\n"
+    "    L_v(x) = -\\imath \\exp(-\\pi\\imath v/2) H_v(\\imath x),\n"
+    "\n"
+    "where :math:`H_v` is the Struve function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order of the modified Struve function (float).\n"
+    "x : array_like\n"
+    "    Argument of the Struve function (float; must be positive unless `v` is\n"
+    "    an integer).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "L : scalar or ndarray\n"
+    "    Value of the modified Struve function of order `v` at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Three methods discussed in [1]_ are used to evaluate the function:\n"
+    "\n"
+    "- power series\n"
+    "- expansion in Bessel functions (if :math:`|x| < |v| + 20`)\n"
+    "- asymptotic large-x expansion (if :math:`x \\geq 0.7v + 12`)\n"
+    "\n"
+    "Rounding errors are estimated based on the largest terms in the sums, and\n"
+    "the result associated with the smallest error is returned.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "struve\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] NIST Digital Library of Mathematical Functions\n"
+    "       https://dlmf.nist.gov/11\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the modified Struve function of order 1 at 2.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import modstruve\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> modstruve(1, 2.)\n"
+    "1.102759787367716\n"
+    "\n"
+    "Calculate the modified Struve function at 2 for orders 1, 2 and 3 by\n"
+    "providing a list for the order parameter `v`.\n"
+    "\n"
+    ">>> modstruve([1, 2, 3], 2.)\n"
+    "array([1.10275979, 0.41026079, 0.11247294])\n"
+    "\n"
+    "Calculate the modified Struve function of order 1 for several points\n"
+    "by providing an array for `x`.\n"
+    "\n"
+    ">>> points = np.array([2., 5., 8.])\n"
+    ">>> modstruve(1, points)\n"
+    "array([  1.10275979,  23.72821578, 399.24709139])\n"
+    "\n"
+    "Compute the modified Struve function for several orders at several\n"
+    "points by providing arrays for `v` and `z`. The arrays have to be\n"
+    "broadcastable to the correct shapes.\n"
+    "\n"
+    ">>> orders = np.array([[1], [2], [3]])\n"
+    ">>> points.shape, orders.shape\n"
+    "((3,), (3, 1))\n"
+    "\n"
+    ">>> modstruve(orders, points)\n"
+    "array([[1.10275979e+00, 2.37282158e+01, 3.99247091e+02],\n"
+    "       [4.10260789e-01, 1.65535979e+01, 3.25973609e+02],\n"
+    "       [1.12472937e-01, 9.42430454e+00, 2.33544042e+02]])\n"
+    "\n"
+    "Plot the modified Struve functions of order 0 to 3 from -5 to 5.\n"
+    "\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-5., 5., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, modstruve(i, x), label=f'$L_{i!r}$')\n"
+    ">>> ax.legend(ncol=2)\n"
+    ">>> ax.set_xlim(-5, 5)\n"
+    ">>> ax.set_title(r\"Modified Struve functions $L_{\\nu}$\")\n"
+    ">>> plt.show()")
+ufunc_modstruve_loops[0] = loop_d_dd__As_ff_f
+ufunc_modstruve_loops[1] = loop_d_dd__As_dd_d
+ufunc_modstruve_types[0] = NPY_FLOAT
+ufunc_modstruve_types[1] = NPY_FLOAT
+ufunc_modstruve_types[2] = NPY_FLOAT
+ufunc_modstruve_types[3] = NPY_DOUBLE
+ufunc_modstruve_types[4] = NPY_DOUBLE
+ufunc_modstruve_types[5] = NPY_DOUBLE
+ufunc_modstruve_ptr[2*0] = _func_struve_l
+ufunc_modstruve_ptr[2*0+1] = ("modstruve")
+ufunc_modstruve_ptr[2*1] = _func_struve_l
+ufunc_modstruve_ptr[2*1+1] = ("modstruve")
+ufunc_modstruve_data[0] = &ufunc_modstruve_ptr[2*0]
+ufunc_modstruve_data[1] = &ufunc_modstruve_ptr[2*1]
+modstruve = np.PyUFunc_FromFuncAndData(ufunc_modstruve_loops, ufunc_modstruve_data, ufunc_modstruve_types, 2, 2, 1, 0, "modstruve", ufunc_modstruve_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nbdtr_loops[3]
+cdef void *ufunc_nbdtr_ptr[6]
+cdef void *ufunc_nbdtr_data[3]
+cdef char ufunc_nbdtr_types[12]
+cdef char *ufunc_nbdtr_doc = (
+    "nbdtr(k, n, p, out=None)\n"
+    "\n"
+    "Negative binomial cumulative distribution function.\n"
+    "\n"
+    "Returns the sum of the terms 0 through `k` of the negative binomial\n"
+    "distribution probability mass function,\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    F = \\sum_{j=0}^k {{n + j - 1}\\choose{j}} p^n (1 - p)^j.\n"
+    "\n"
+    "In a sequence of Bernoulli trials with individual success probabilities\n"
+    "`p`, this is the probability that `k` or fewer failures precede the nth\n"
+    "success.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    The maximum number of allowed failures (nonnegative int).\n"
+    "n : array_like\n"
+    "    The target number of successes (positive int).\n"
+    "p : array_like\n"
+    "    Probability of success in a single event (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "F : scalar or ndarray\n"
+    "    The probability of `k` or fewer failures before `n` successes in a\n"
+    "    sequence of events with individual success probability `p`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "nbdtrc\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "If floating point values are passed for `k` or `n`, they will be truncated\n"
+    "to integers.\n"
+    "\n"
+    "The terms are not summed directly; instead the regularized incomplete beta\n"
+    "function is employed, according to the formula,\n"
+    "\n"
+    ".. math::\n"
+    "    \\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `nbdtr`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_nbdtr_loops[0] = loop_d_iid__As_lld_d
+ufunc_nbdtr_loops[1] = loop_d_ddd__As_fff_f
+ufunc_nbdtr_loops[2] = loop_d_ddd__As_ddd_d
+ufunc_nbdtr_types[0] = NPY_LONG
+ufunc_nbdtr_types[1] = NPY_LONG
+ufunc_nbdtr_types[2] = NPY_DOUBLE
+ufunc_nbdtr_types[3] = NPY_DOUBLE
+ufunc_nbdtr_types[4] = NPY_FLOAT
+ufunc_nbdtr_types[5] = NPY_FLOAT
+ufunc_nbdtr_types[6] = NPY_FLOAT
+ufunc_nbdtr_types[7] = NPY_FLOAT
+ufunc_nbdtr_types[8] = NPY_DOUBLE
+ufunc_nbdtr_types[9] = NPY_DOUBLE
+ufunc_nbdtr_types[10] = NPY_DOUBLE
+ufunc_nbdtr_types[11] = NPY_DOUBLE
+ufunc_nbdtr_ptr[2*0] = _func_nbdtr
+ufunc_nbdtr_ptr[2*0+1] = ("nbdtr")
+ufunc_nbdtr_ptr[2*1] = _func_nbdtr_unsafe
+ufunc_nbdtr_ptr[2*1+1] = ("nbdtr")
+ufunc_nbdtr_ptr[2*2] = _func_nbdtr_unsafe
+ufunc_nbdtr_ptr[2*2+1] = ("nbdtr")
+ufunc_nbdtr_data[0] = &ufunc_nbdtr_ptr[2*0]
+ufunc_nbdtr_data[1] = &ufunc_nbdtr_ptr[2*1]
+ufunc_nbdtr_data[2] = &ufunc_nbdtr_ptr[2*2]
+nbdtr = np.PyUFunc_FromFuncAndData(ufunc_nbdtr_loops, ufunc_nbdtr_data, ufunc_nbdtr_types, 3, 3, 1, 0, "nbdtr", ufunc_nbdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nbdtrc_loops[3]
+cdef void *ufunc_nbdtrc_ptr[6]
+cdef void *ufunc_nbdtrc_data[3]
+cdef char ufunc_nbdtrc_types[12]
+cdef char *ufunc_nbdtrc_doc = (
+    "nbdtrc(k, n, p, out=None)\n"
+    "\n"
+    "Negative binomial survival function.\n"
+    "\n"
+    "Returns the sum of the terms `k + 1` to infinity of the negative binomial\n"
+    "distribution probability mass function,\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    F = \\sum_{j=k + 1}^\\infty {{n + j - 1}\\choose{j}} p^n (1 - p)^j.\n"
+    "\n"
+    "In a sequence of Bernoulli trials with individual success probabilities\n"
+    "`p`, this is the probability that more than `k` failures precede the nth\n"
+    "success.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    The maximum number of allowed failures (nonnegative int).\n"
+    "n : array_like\n"
+    "    The target number of successes (positive int).\n"
+    "p : array_like\n"
+    "    Probability of success in a single event (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "F : scalar or ndarray\n"
+    "    The probability of `k + 1` or more failures before `n` successes in a\n"
+    "    sequence of events with individual success probability `p`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "If floating point values are passed for `k` or `n`, they will be truncated\n"
+    "to integers.\n"
+    "\n"
+    "The terms are not summed directly; instead the regularized incomplete beta\n"
+    "function is employed, according to the formula,\n"
+    "\n"
+    ".. math::\n"
+    "    \\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).\n"
+    "\n"
+    "Wrapper for the Cephes [1]_ routine `nbdtrc`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_nbdtrc_loops[0] = loop_d_iid__As_lld_d
+ufunc_nbdtrc_loops[1] = loop_d_ddd__As_fff_f
+ufunc_nbdtrc_loops[2] = loop_d_ddd__As_ddd_d
+ufunc_nbdtrc_types[0] = NPY_LONG
+ufunc_nbdtrc_types[1] = NPY_LONG
+ufunc_nbdtrc_types[2] = NPY_DOUBLE
+ufunc_nbdtrc_types[3] = NPY_DOUBLE
+ufunc_nbdtrc_types[4] = NPY_FLOAT
+ufunc_nbdtrc_types[5] = NPY_FLOAT
+ufunc_nbdtrc_types[6] = NPY_FLOAT
+ufunc_nbdtrc_types[7] = NPY_FLOAT
+ufunc_nbdtrc_types[8] = NPY_DOUBLE
+ufunc_nbdtrc_types[9] = NPY_DOUBLE
+ufunc_nbdtrc_types[10] = NPY_DOUBLE
+ufunc_nbdtrc_types[11] = NPY_DOUBLE
+ufunc_nbdtrc_ptr[2*0] = _func_nbdtrc
+ufunc_nbdtrc_ptr[2*0+1] = ("nbdtrc")
+ufunc_nbdtrc_ptr[2*1] = _func_nbdtrc_unsafe
+ufunc_nbdtrc_ptr[2*1+1] = ("nbdtrc")
+ufunc_nbdtrc_ptr[2*2] = _func_nbdtrc_unsafe
+ufunc_nbdtrc_ptr[2*2+1] = ("nbdtrc")
+ufunc_nbdtrc_data[0] = &ufunc_nbdtrc_ptr[2*0]
+ufunc_nbdtrc_data[1] = &ufunc_nbdtrc_ptr[2*1]
+ufunc_nbdtrc_data[2] = &ufunc_nbdtrc_ptr[2*2]
+nbdtrc = np.PyUFunc_FromFuncAndData(ufunc_nbdtrc_loops, ufunc_nbdtrc_data, ufunc_nbdtrc_types, 3, 3, 1, 0, "nbdtrc", ufunc_nbdtrc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nbdtri_loops[3]
+cdef void *ufunc_nbdtri_ptr[6]
+cdef void *ufunc_nbdtri_data[3]
+cdef char ufunc_nbdtri_types[12]
+cdef char *ufunc_nbdtri_doc = (
+    "nbdtri(k, n, y, out=None)\n"
+    "\n"
+    "Inverse of `nbdtr` vs `p`.\n"
+    "\n"
+    "Returns the inverse with respect to the parameter `p` of\n"
+    "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n"
+    "function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    The maximum number of allowed failures (nonnegative int).\n"
+    "n : array_like\n"
+    "    The target number of successes (positive int).\n"
+    "y : array_like\n"
+    "    The probability of `k` or fewer failures before `n` successes (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "p : scalar or ndarray\n"
+    "    Probability of success in a single event (float) such that\n"
+    "    `nbdtr(k, n, p) = y`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "nbdtr : Cumulative distribution function of the negative binomial.\n"
+    "nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.\n"
+    "nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the Cephes [1]_ routine `nbdtri`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/")
+ufunc_nbdtri_loops[0] = loop_d_iid__As_lld_d
+ufunc_nbdtri_loops[1] = loop_d_ddd__As_fff_f
+ufunc_nbdtri_loops[2] = loop_d_ddd__As_ddd_d
+ufunc_nbdtri_types[0] = NPY_LONG
+ufunc_nbdtri_types[1] = NPY_LONG
+ufunc_nbdtri_types[2] = NPY_DOUBLE
+ufunc_nbdtri_types[3] = NPY_DOUBLE
+ufunc_nbdtri_types[4] = NPY_FLOAT
+ufunc_nbdtri_types[5] = NPY_FLOAT
+ufunc_nbdtri_types[6] = NPY_FLOAT
+ufunc_nbdtri_types[7] = NPY_FLOAT
+ufunc_nbdtri_types[8] = NPY_DOUBLE
+ufunc_nbdtri_types[9] = NPY_DOUBLE
+ufunc_nbdtri_types[10] = NPY_DOUBLE
+ufunc_nbdtri_types[11] = NPY_DOUBLE
+ufunc_nbdtri_ptr[2*0] = _func_nbdtri
+ufunc_nbdtri_ptr[2*0+1] = ("nbdtri")
+ufunc_nbdtri_ptr[2*1] = _func_nbdtri_unsafe
+ufunc_nbdtri_ptr[2*1+1] = ("nbdtri")
+ufunc_nbdtri_ptr[2*2] = _func_nbdtri_unsafe
+ufunc_nbdtri_ptr[2*2+1] = ("nbdtri")
+ufunc_nbdtri_data[0] = &ufunc_nbdtri_ptr[2*0]
+ufunc_nbdtri_data[1] = &ufunc_nbdtri_ptr[2*1]
+ufunc_nbdtri_data[2] = &ufunc_nbdtri_ptr[2*2]
+nbdtri = np.PyUFunc_FromFuncAndData(ufunc_nbdtri_loops, ufunc_nbdtri_data, ufunc_nbdtri_types, 3, 3, 1, 0, "nbdtri", ufunc_nbdtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nbdtrik_loops[2]
+cdef void *ufunc_nbdtrik_ptr[4]
+cdef void *ufunc_nbdtrik_data[2]
+cdef char ufunc_nbdtrik_types[8]
+cdef char *ufunc_nbdtrik_doc = (
+    "nbdtrik(y, n, p, out=None)\n"
+    "\n"
+    "Inverse of `nbdtr` vs `k`.\n"
+    "\n"
+    "Returns the inverse with respect to the parameter `k` of\n"
+    "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n"
+    "function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : array_like\n"
+    "    The probability of `k` or fewer failures before `n` successes (float).\n"
+    "n : array_like\n"
+    "    The target number of successes (positive int).\n"
+    "p : array_like\n"
+    "    Probability of success in a single event (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "k : scalar or ndarray\n"
+    "    The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "nbdtr : Cumulative distribution function of the negative binomial.\n"
+    "nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.\n"
+    "nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.\n"
+    "\n"
+    "Formula 26.5.26 of [2]_,\n"
+    "\n"
+    ".. math::\n"
+    "    \\sum_{j=k + 1}^\\infty {{n + j - 1}\\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),\n"
+    "\n"
+    "is used to reduce calculation of the cumulative distribution function to\n"
+    "that of a regularized incomplete beta :math:`I`.\n"
+    "\n"
+    "Computation of `k` involves a search for a value that produces the desired\n"
+    "value of `y`.  The search relies on the monotonicity of `y` with `k`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_nbdtrik_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nbdtrik_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nbdtrik_types[0] = NPY_FLOAT
+ufunc_nbdtrik_types[1] = NPY_FLOAT
+ufunc_nbdtrik_types[2] = NPY_FLOAT
+ufunc_nbdtrik_types[3] = NPY_FLOAT
+ufunc_nbdtrik_types[4] = NPY_DOUBLE
+ufunc_nbdtrik_types[5] = NPY_DOUBLE
+ufunc_nbdtrik_types[6] = NPY_DOUBLE
+ufunc_nbdtrik_types[7] = NPY_DOUBLE
+ufunc_nbdtrik_ptr[2*0] = _func_cdfnbn2_wrap
+ufunc_nbdtrik_ptr[2*0+1] = ("nbdtrik")
+ufunc_nbdtrik_ptr[2*1] = _func_cdfnbn2_wrap
+ufunc_nbdtrik_ptr[2*1+1] = ("nbdtrik")
+ufunc_nbdtrik_data[0] = &ufunc_nbdtrik_ptr[2*0]
+ufunc_nbdtrik_data[1] = &ufunc_nbdtrik_ptr[2*1]
+nbdtrik = np.PyUFunc_FromFuncAndData(ufunc_nbdtrik_loops, ufunc_nbdtrik_data, ufunc_nbdtrik_types, 2, 3, 1, 0, "nbdtrik", ufunc_nbdtrik_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nbdtrin_loops[2]
+cdef void *ufunc_nbdtrin_ptr[4]
+cdef void *ufunc_nbdtrin_data[2]
+cdef char ufunc_nbdtrin_types[8]
+cdef char *ufunc_nbdtrin_doc = (
+    "nbdtrin(k, y, p, out=None)\n"
+    "\n"
+    "Inverse of `nbdtr` vs `n`.\n"
+    "\n"
+    "Returns the inverse with respect to the parameter `n` of\n"
+    "`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution\n"
+    "function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    The maximum number of allowed failures (nonnegative int).\n"
+    "y : array_like\n"
+    "    The probability of `k` or fewer failures before `n` successes (float).\n"
+    "p : array_like\n"
+    "    Probability of success in a single event (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "n : scalar or ndarray\n"
+    "    The number of successes `n` such that `nbdtr(k, n, p) = y`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "nbdtr : Cumulative distribution function of the negative binomial.\n"
+    "nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.\n"
+    "nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.\n"
+    "\n"
+    "Formula 26.5.26 of [2]_,\n"
+    "\n"
+    ".. math::\n"
+    "    \\sum_{j=k + 1}^\\infty {{n + j - 1}\\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),\n"
+    "\n"
+    "is used to reduce calculation of the cumulative distribution function to\n"
+    "that of a regularized incomplete beta :math:`I`.\n"
+    "\n"
+    "Computation of `n` involves a search for a value that produces the desired\n"
+    "value of `y`.  The search relies on the monotonicity of `y` with `n`.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.")
+ufunc_nbdtrin_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nbdtrin_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nbdtrin_types[0] = NPY_FLOAT
+ufunc_nbdtrin_types[1] = NPY_FLOAT
+ufunc_nbdtrin_types[2] = NPY_FLOAT
+ufunc_nbdtrin_types[3] = NPY_FLOAT
+ufunc_nbdtrin_types[4] = NPY_DOUBLE
+ufunc_nbdtrin_types[5] = NPY_DOUBLE
+ufunc_nbdtrin_types[6] = NPY_DOUBLE
+ufunc_nbdtrin_types[7] = NPY_DOUBLE
+ufunc_nbdtrin_ptr[2*0] = _func_cdfnbn3_wrap
+ufunc_nbdtrin_ptr[2*0+1] = ("nbdtrin")
+ufunc_nbdtrin_ptr[2*1] = _func_cdfnbn3_wrap
+ufunc_nbdtrin_ptr[2*1+1] = ("nbdtrin")
+ufunc_nbdtrin_data[0] = &ufunc_nbdtrin_ptr[2*0]
+ufunc_nbdtrin_data[1] = &ufunc_nbdtrin_ptr[2*1]
+nbdtrin = np.PyUFunc_FromFuncAndData(ufunc_nbdtrin_loops, ufunc_nbdtrin_data, ufunc_nbdtrin_types, 2, 3, 1, 0, "nbdtrin", ufunc_nbdtrin_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ncfdtr_loops[2]
+cdef void *ufunc_ncfdtr_ptr[4]
+cdef void *ufunc_ncfdtr_data[2]
+cdef char ufunc_ncfdtr_types[10]
+cdef char *ufunc_ncfdtr_doc = (
+    "ncfdtr(dfn, dfd, nc, f, out=None)\n"
+    "\n"
+    "Cumulative distribution function of the non-central F distribution.\n"
+    "\n"
+    "The non-central F describes the distribution of,\n"
+    "\n"
+    ".. math::\n"
+    "    Z = \\frac{X/d_n}{Y/d_d}\n"
+    "\n"
+    "where :math:`X` and :math:`Y` are independently distributed, with\n"
+    ":math:`X` distributed non-central :math:`\\chi^2` with noncentrality\n"
+    "parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`\n"
+    "distributed :math:`\\chi^2` with :math:`d_d` degrees of freedom.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    Degrees of freedom of the numerator sum of squares.  Range (0, inf).\n"
+    "dfd : array_like\n"
+    "    Degrees of freedom of the denominator sum of squares.  Range (0, inf).\n"
+    "nc : array_like\n"
+    "    Noncentrality parameter.  Should be in range (0, 1e4).\n"
+    "f : array_like\n"
+    "    Quantiles, i.e. the upper limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "cdf : scalar or ndarray\n"
+    "    The calculated CDF.  If all inputs are scalar, the return will be a\n"
+    "    float.  Otherwise it will be an array.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n"
+    "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n"
+    "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n"
+    "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.\n"
+    "\n"
+    "The cumulative distribution function is computed using Formula 26.6.20 of\n"
+    "[2]_:\n"
+    "\n"
+    ".. math::\n"
+    "    F(d_n, d_d, n_c, f) = \\sum_{j=0}^\\infty e^{-n_c/2} \\frac{(n_c/2)^j}{j!} I_{x}(\\frac{d_n}{2} + j, \\frac{d_d}{2}),\n"
+    "\n"
+    "where :math:`I` is the regularized incomplete beta function, and\n"
+    ":math:`x = f d_n/(f d_n + d_d)`.\n"
+    "\n"
+    "The computation time required for this routine is proportional to the\n"
+    "noncentrality parameter `nc`.  Very large values of this parameter can\n"
+    "consume immense computer resources.  This is why the search range is\n"
+    "bounded by 10,000.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Barry Brown, James Lovato, and Kathy Russell,\n"
+    "       CDFLIB: Library of Fortran Routines for Cumulative Distribution\n"
+    "       Functions, Inverses, and Other Parameters.\n"
+    ".. [2] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> from scipy import stats\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    "\n"
+    "Plot the CDF of the non-central F distribution, for nc=0.  Compare with the\n"
+    "F-distribution from scipy.stats:\n"
+    "\n"
+    ">>> x = np.linspace(-1, 8, num=500)\n"
+    ">>> dfn = 3\n"
+    ">>> dfd = 2\n"
+    ">>> ncf_stats = stats.f.cdf(x, dfn, dfd)\n"
+    ">>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)\n"
+    "\n"
+    ">>> fig = plt.figure()\n"
+    ">>> ax = fig.add_subplot(111)\n"
+    ">>> ax.plot(x, ncf_stats, 'b-', lw=3)\n"
+    ">>> ax.plot(x, ncf_special, 'r-')\n"
+    ">>> plt.show()")
+ufunc_ncfdtr_loops[0] = loop_d_dddd__As_ffff_f
+ufunc_ncfdtr_loops[1] = loop_d_dddd__As_dddd_d
+ufunc_ncfdtr_types[0] = NPY_FLOAT
+ufunc_ncfdtr_types[1] = NPY_FLOAT
+ufunc_ncfdtr_types[2] = NPY_FLOAT
+ufunc_ncfdtr_types[3] = NPY_FLOAT
+ufunc_ncfdtr_types[4] = NPY_FLOAT
+ufunc_ncfdtr_types[5] = NPY_DOUBLE
+ufunc_ncfdtr_types[6] = NPY_DOUBLE
+ufunc_ncfdtr_types[7] = NPY_DOUBLE
+ufunc_ncfdtr_types[8] = NPY_DOUBLE
+ufunc_ncfdtr_types[9] = NPY_DOUBLE
+ufunc_ncfdtr_ptr[2*0] = _func_cdffnc1_wrap
+ufunc_ncfdtr_ptr[2*0+1] = ("ncfdtr")
+ufunc_ncfdtr_ptr[2*1] = _func_cdffnc1_wrap
+ufunc_ncfdtr_ptr[2*1+1] = ("ncfdtr")
+ufunc_ncfdtr_data[0] = &ufunc_ncfdtr_ptr[2*0]
+ufunc_ncfdtr_data[1] = &ufunc_ncfdtr_ptr[2*1]
+ncfdtr = np.PyUFunc_FromFuncAndData(ufunc_ncfdtr_loops, ufunc_ncfdtr_data, ufunc_ncfdtr_types, 2, 4, 1, 0, "ncfdtr", ufunc_ncfdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ncfdtri_loops[2]
+cdef void *ufunc_ncfdtri_ptr[4]
+cdef void *ufunc_ncfdtri_data[2]
+cdef char ufunc_ncfdtri_types[10]
+cdef char *ufunc_ncfdtri_doc = (
+    "ncfdtri(dfn, dfd, nc, p, out=None)\n"
+    "\n"
+    "Inverse with respect to `f` of the CDF of the non-central F distribution.\n"
+    "\n"
+    "See `ncfdtr` for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    Degrees of freedom of the numerator sum of squares.  Range (0, inf).\n"
+    "dfd : array_like\n"
+    "    Degrees of freedom of the denominator sum of squares.  Range (0, inf).\n"
+    "nc : array_like\n"
+    "    Noncentrality parameter.  Should be in range (0, 1e4).\n"
+    "p : array_like\n"
+    "    Value of the cumulative distribution function.  Must be in the\n"
+    "    range [0, 1].\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "f : scalar or ndarray\n"
+    "    Quantiles, i.e., the upper limit of integration.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ncfdtr : CDF of the non-central F distribution.\n"
+    "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n"
+    "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n"
+    "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import ncfdtr, ncfdtri\n"
+    "\n"
+    "Compute the CDF for several values of `f`:\n"
+    "\n"
+    ">>> f = [0.5, 1, 1.5]\n"
+    ">>> p = ncfdtr(2, 3, 1.5, f)\n"
+    ">>> p\n"
+    "array([ 0.20782291,  0.36107392,  0.47345752])\n"
+    "\n"
+    "Compute the inverse.  We recover the values of `f`, as expected:\n"
+    "\n"
+    ">>> ncfdtri(2, 3, 1.5, p)\n"
+    "array([ 0.5,  1. ,  1.5])")
+ufunc_ncfdtri_loops[0] = loop_d_dddd__As_ffff_f
+ufunc_ncfdtri_loops[1] = loop_d_dddd__As_dddd_d
+ufunc_ncfdtri_types[0] = NPY_FLOAT
+ufunc_ncfdtri_types[1] = NPY_FLOAT
+ufunc_ncfdtri_types[2] = NPY_FLOAT
+ufunc_ncfdtri_types[3] = NPY_FLOAT
+ufunc_ncfdtri_types[4] = NPY_FLOAT
+ufunc_ncfdtri_types[5] = NPY_DOUBLE
+ufunc_ncfdtri_types[6] = NPY_DOUBLE
+ufunc_ncfdtri_types[7] = NPY_DOUBLE
+ufunc_ncfdtri_types[8] = NPY_DOUBLE
+ufunc_ncfdtri_types[9] = NPY_DOUBLE
+ufunc_ncfdtri_ptr[2*0] = _func_cdffnc2_wrap
+ufunc_ncfdtri_ptr[2*0+1] = ("ncfdtri")
+ufunc_ncfdtri_ptr[2*1] = _func_cdffnc2_wrap
+ufunc_ncfdtri_ptr[2*1+1] = ("ncfdtri")
+ufunc_ncfdtri_data[0] = &ufunc_ncfdtri_ptr[2*0]
+ufunc_ncfdtri_data[1] = &ufunc_ncfdtri_ptr[2*1]
+ncfdtri = np.PyUFunc_FromFuncAndData(ufunc_ncfdtri_loops, ufunc_ncfdtri_data, ufunc_ncfdtri_types, 2, 4, 1, 0, "ncfdtri", ufunc_ncfdtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ncfdtridfd_loops[2]
+cdef void *ufunc_ncfdtridfd_ptr[4]
+cdef void *ufunc_ncfdtridfd_data[2]
+cdef char ufunc_ncfdtridfd_types[10]
+cdef char *ufunc_ncfdtridfd_doc = (
+    "ncfdtridfd(dfn, p, nc, f, out=None)\n"
+    "\n"
+    "Calculate degrees of freedom (denominator) for the noncentral F-distribution.\n"
+    "\n"
+    "This is the inverse with respect to `dfd` of `ncfdtr`.\n"
+    "See `ncfdtr` for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    Degrees of freedom of the numerator sum of squares.  Range (0, inf).\n"
+    "p : array_like\n"
+    "    Value of the cumulative distribution function.  Must be in the\n"
+    "    range [0, 1].\n"
+    "nc : array_like\n"
+    "    Noncentrality parameter.  Should be in range (0, 1e4).\n"
+    "f : array_like\n"
+    "    Quantiles, i.e., the upper limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "dfd : scalar or ndarray\n"
+    "    Degrees of freedom of the denominator sum of squares.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ncfdtr : CDF of the non-central F distribution.\n"
+    "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n"
+    "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n"
+    "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The value of the cumulative noncentral F distribution is not necessarily\n"
+    "monotone in either degrees of freedom. There thus may be two values that\n"
+    "provide a given CDF value. This routine assumes monotonicity and will\n"
+    "find an arbitrary one of the two values.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import ncfdtr, ncfdtridfd\n"
+    "\n"
+    "Compute the CDF for several values of `dfd`:\n"
+    "\n"
+    ">>> dfd = [1, 2, 3]\n"
+    ">>> p = ncfdtr(2, dfd, 0.25, 15)\n"
+    ">>> p\n"
+    "array([ 0.8097138 ,  0.93020416,  0.96787852])\n"
+    "\n"
+    "Compute the inverse.  We recover the values of `dfd`, as expected:\n"
+    "\n"
+    ">>> ncfdtridfd(2, p, 0.25, 15)\n"
+    "array([ 1.,  2.,  3.])")
+ufunc_ncfdtridfd_loops[0] = loop_d_dddd__As_ffff_f
+ufunc_ncfdtridfd_loops[1] = loop_d_dddd__As_dddd_d
+ufunc_ncfdtridfd_types[0] = NPY_FLOAT
+ufunc_ncfdtridfd_types[1] = NPY_FLOAT
+ufunc_ncfdtridfd_types[2] = NPY_FLOAT
+ufunc_ncfdtridfd_types[3] = NPY_FLOAT
+ufunc_ncfdtridfd_types[4] = NPY_FLOAT
+ufunc_ncfdtridfd_types[5] = NPY_DOUBLE
+ufunc_ncfdtridfd_types[6] = NPY_DOUBLE
+ufunc_ncfdtridfd_types[7] = NPY_DOUBLE
+ufunc_ncfdtridfd_types[8] = NPY_DOUBLE
+ufunc_ncfdtridfd_types[9] = NPY_DOUBLE
+ufunc_ncfdtridfd_ptr[2*0] = _func_cdffnc4_wrap
+ufunc_ncfdtridfd_ptr[2*0+1] = ("ncfdtridfd")
+ufunc_ncfdtridfd_ptr[2*1] = _func_cdffnc4_wrap
+ufunc_ncfdtridfd_ptr[2*1+1] = ("ncfdtridfd")
+ufunc_ncfdtridfd_data[0] = &ufunc_ncfdtridfd_ptr[2*0]
+ufunc_ncfdtridfd_data[1] = &ufunc_ncfdtridfd_ptr[2*1]
+ncfdtridfd = np.PyUFunc_FromFuncAndData(ufunc_ncfdtridfd_loops, ufunc_ncfdtridfd_data, ufunc_ncfdtridfd_types, 2, 4, 1, 0, "ncfdtridfd", ufunc_ncfdtridfd_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ncfdtridfn_loops[2]
+cdef void *ufunc_ncfdtridfn_ptr[4]
+cdef void *ufunc_ncfdtridfn_data[2]
+cdef char ufunc_ncfdtridfn_types[10]
+cdef char *ufunc_ncfdtridfn_doc = (
+    "ncfdtridfn(p, dfd, nc, f, out=None)\n"
+    "\n"
+    "Calculate degrees of freedom (numerator) for the noncentral F-distribution.\n"
+    "\n"
+    "This is the inverse with respect to `dfn` of `ncfdtr`.\n"
+    "See `ncfdtr` for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Value of the cumulative distribution function. Must be in the\n"
+    "    range [0, 1].\n"
+    "dfd : array_like\n"
+    "    Degrees of freedom of the denominator sum of squares. Range (0, inf).\n"
+    "nc : array_like\n"
+    "    Noncentrality parameter.  Should be in range (0, 1e4).\n"
+    "f : float\n"
+    "    Quantiles, i.e., the upper limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "dfn : scalar or ndarray\n"
+    "    Degrees of freedom of the numerator sum of squares.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ncfdtr : CDF of the non-central F distribution.\n"
+    "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n"
+    "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n"
+    "ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The value of the cumulative noncentral F distribution is not necessarily\n"
+    "monotone in either degrees of freedom. There thus may be two values that\n"
+    "provide a given CDF value. This routine assumes monotonicity and will\n"
+    "find an arbitrary one of the two values.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import ncfdtr, ncfdtridfn\n"
+    "\n"
+    "Compute the CDF for several values of `dfn`:\n"
+    "\n"
+    ">>> dfn = [1, 2, 3]\n"
+    ">>> p = ncfdtr(dfn, 2, 0.25, 15)\n"
+    ">>> p\n"
+    "array([ 0.92562363,  0.93020416,  0.93188394])\n"
+    "\n"
+    "Compute the inverse. We recover the values of `dfn`, as expected:\n"
+    "\n"
+    ">>> ncfdtridfn(p, 2, 0.25, 15)\n"
+    "array([ 1.,  2.,  3.])")
+ufunc_ncfdtridfn_loops[0] = loop_d_dddd__As_ffff_f
+ufunc_ncfdtridfn_loops[1] = loop_d_dddd__As_dddd_d
+ufunc_ncfdtridfn_types[0] = NPY_FLOAT
+ufunc_ncfdtridfn_types[1] = NPY_FLOAT
+ufunc_ncfdtridfn_types[2] = NPY_FLOAT
+ufunc_ncfdtridfn_types[3] = NPY_FLOAT
+ufunc_ncfdtridfn_types[4] = NPY_FLOAT
+ufunc_ncfdtridfn_types[5] = NPY_DOUBLE
+ufunc_ncfdtridfn_types[6] = NPY_DOUBLE
+ufunc_ncfdtridfn_types[7] = NPY_DOUBLE
+ufunc_ncfdtridfn_types[8] = NPY_DOUBLE
+ufunc_ncfdtridfn_types[9] = NPY_DOUBLE
+ufunc_ncfdtridfn_ptr[2*0] = _func_cdffnc3_wrap
+ufunc_ncfdtridfn_ptr[2*0+1] = ("ncfdtridfn")
+ufunc_ncfdtridfn_ptr[2*1] = _func_cdffnc3_wrap
+ufunc_ncfdtridfn_ptr[2*1+1] = ("ncfdtridfn")
+ufunc_ncfdtridfn_data[0] = &ufunc_ncfdtridfn_ptr[2*0]
+ufunc_ncfdtridfn_data[1] = &ufunc_ncfdtridfn_ptr[2*1]
+ncfdtridfn = np.PyUFunc_FromFuncAndData(ufunc_ncfdtridfn_loops, ufunc_ncfdtridfn_data, ufunc_ncfdtridfn_types, 2, 4, 1, 0, "ncfdtridfn", ufunc_ncfdtridfn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ncfdtrinc_loops[2]
+cdef void *ufunc_ncfdtrinc_ptr[4]
+cdef void *ufunc_ncfdtrinc_data[2]
+cdef char ufunc_ncfdtrinc_types[10]
+cdef char *ufunc_ncfdtrinc_doc = (
+    "ncfdtrinc(dfn, dfd, p, f, out=None)\n"
+    "\n"
+    "Calculate non-centrality parameter for non-central F distribution.\n"
+    "\n"
+    "This is the inverse with respect to `nc` of `ncfdtr`.\n"
+    "See `ncfdtr` for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "dfn : array_like\n"
+    "    Degrees of freedom of the numerator sum of squares. Range (0, inf).\n"
+    "dfd : array_like\n"
+    "    Degrees of freedom of the denominator sum of squares. Range (0, inf).\n"
+    "p : array_like\n"
+    "    Value of the cumulative distribution function. Must be in the\n"
+    "    range [0, 1].\n"
+    "f : array_like\n"
+    "    Quantiles, i.e., the upper limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "nc : scalar or ndarray\n"
+    "    Noncentrality parameter.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ncfdtr : CDF of the non-central F distribution.\n"
+    "ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.\n"
+    "ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.\n"
+    "ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import ncfdtr, ncfdtrinc\n"
+    "\n"
+    "Compute the CDF for several values of `nc`:\n"
+    "\n"
+    ">>> nc = [0.5, 1.5, 2.0]\n"
+    ">>> p = ncfdtr(2, 3, nc, 15)\n"
+    ">>> p\n"
+    "array([ 0.96309246,  0.94327955,  0.93304098])\n"
+    "\n"
+    "Compute the inverse. We recover the values of `nc`, as expected:\n"
+    "\n"
+    ">>> ncfdtrinc(2, 3, p, 15)\n"
+    "array([ 0.5,  1.5,  2. ])")
+ufunc_ncfdtrinc_loops[0] = loop_d_dddd__As_ffff_f
+ufunc_ncfdtrinc_loops[1] = loop_d_dddd__As_dddd_d
+ufunc_ncfdtrinc_types[0] = NPY_FLOAT
+ufunc_ncfdtrinc_types[1] = NPY_FLOAT
+ufunc_ncfdtrinc_types[2] = NPY_FLOAT
+ufunc_ncfdtrinc_types[3] = NPY_FLOAT
+ufunc_ncfdtrinc_types[4] = NPY_FLOAT
+ufunc_ncfdtrinc_types[5] = NPY_DOUBLE
+ufunc_ncfdtrinc_types[6] = NPY_DOUBLE
+ufunc_ncfdtrinc_types[7] = NPY_DOUBLE
+ufunc_ncfdtrinc_types[8] = NPY_DOUBLE
+ufunc_ncfdtrinc_types[9] = NPY_DOUBLE
+ufunc_ncfdtrinc_ptr[2*0] = _func_cdffnc5_wrap
+ufunc_ncfdtrinc_ptr[2*0+1] = ("ncfdtrinc")
+ufunc_ncfdtrinc_ptr[2*1] = _func_cdffnc5_wrap
+ufunc_ncfdtrinc_ptr[2*1+1] = ("ncfdtrinc")
+ufunc_ncfdtrinc_data[0] = &ufunc_ncfdtrinc_ptr[2*0]
+ufunc_ncfdtrinc_data[1] = &ufunc_ncfdtrinc_ptr[2*1]
+ncfdtrinc = np.PyUFunc_FromFuncAndData(ufunc_ncfdtrinc_loops, ufunc_ncfdtrinc_data, ufunc_ncfdtrinc_types, 2, 4, 1, 0, "ncfdtrinc", ufunc_ncfdtrinc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nctdtr_loops[2]
+cdef void *ufunc_nctdtr_ptr[4]
+cdef void *ufunc_nctdtr_data[2]
+cdef char ufunc_nctdtr_types[8]
+cdef char *ufunc_nctdtr_doc = (
+    "nctdtr(df, nc, t, out=None)\n"
+    "\n"
+    "Cumulative distribution function of the non-central `t` distribution.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "df : array_like\n"
+    "    Degrees of freedom of the distribution. Should be in range (0, inf).\n"
+    "nc : array_like\n"
+    "    Noncentrality parameter. Should be in range (-1e6, 1e6).\n"
+    "t : array_like\n"
+    "    Quantiles, i.e., the upper limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "cdf : scalar or ndarray\n"
+    "    The calculated CDF. If all inputs are scalar, the return will be a\n"
+    "    float. Otherwise, it will be an array.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n"
+    "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n"
+    "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> from scipy import stats\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    "\n"
+    "Plot the CDF of the non-central t distribution, for nc=0. Compare with the\n"
+    "t-distribution from scipy.stats:\n"
+    "\n"
+    ">>> x = np.linspace(-5, 5, num=500)\n"
+    ">>> df = 3\n"
+    ">>> nct_stats = stats.t.cdf(x, df)\n"
+    ">>> nct_special = special.nctdtr(df, 0, x)\n"
+    "\n"
+    ">>> fig = plt.figure()\n"
+    ">>> ax = fig.add_subplot(111)\n"
+    ">>> ax.plot(x, nct_stats, 'b-', lw=3)\n"
+    ">>> ax.plot(x, nct_special, 'r-')\n"
+    ">>> plt.show()")
+ufunc_nctdtr_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nctdtr_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nctdtr_types[0] = NPY_FLOAT
+ufunc_nctdtr_types[1] = NPY_FLOAT
+ufunc_nctdtr_types[2] = NPY_FLOAT
+ufunc_nctdtr_types[3] = NPY_FLOAT
+ufunc_nctdtr_types[4] = NPY_DOUBLE
+ufunc_nctdtr_types[5] = NPY_DOUBLE
+ufunc_nctdtr_types[6] = NPY_DOUBLE
+ufunc_nctdtr_types[7] = NPY_DOUBLE
+ufunc_nctdtr_ptr[2*0] = _func_cdftnc1_wrap
+ufunc_nctdtr_ptr[2*0+1] = ("nctdtr")
+ufunc_nctdtr_ptr[2*1] = _func_cdftnc1_wrap
+ufunc_nctdtr_ptr[2*1+1] = ("nctdtr")
+ufunc_nctdtr_data[0] = &ufunc_nctdtr_ptr[2*0]
+ufunc_nctdtr_data[1] = &ufunc_nctdtr_ptr[2*1]
+nctdtr = np.PyUFunc_FromFuncAndData(ufunc_nctdtr_loops, ufunc_nctdtr_data, ufunc_nctdtr_types, 2, 3, 1, 0, "nctdtr", ufunc_nctdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nctdtridf_loops[2]
+cdef void *ufunc_nctdtridf_ptr[4]
+cdef void *ufunc_nctdtridf_data[2]
+cdef char ufunc_nctdtridf_types[8]
+cdef char *ufunc_nctdtridf_doc = (
+    "nctdtridf(p, nc, t, out=None)\n"
+    "\n"
+    "Calculate degrees of freedom for non-central t distribution.\n"
+    "\n"
+    "See `nctdtr` for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    CDF values, in range (0, 1].\n"
+    "nc : array_like\n"
+    "    Noncentrality parameter. Should be in range (-1e6, 1e6).\n"
+    "t : array_like\n"
+    "    Quantiles, i.e., the upper limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "cdf : scalar or ndarray\n"
+    "    The calculated CDF. If all inputs are scalar, the return will be a\n"
+    "    float. Otherwise, it will be an array.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "nctdtr :  CDF of the non-central `t` distribution.\n"
+    "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n"
+    "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.")
+ufunc_nctdtridf_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nctdtridf_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nctdtridf_types[0] = NPY_FLOAT
+ufunc_nctdtridf_types[1] = NPY_FLOAT
+ufunc_nctdtridf_types[2] = NPY_FLOAT
+ufunc_nctdtridf_types[3] = NPY_FLOAT
+ufunc_nctdtridf_types[4] = NPY_DOUBLE
+ufunc_nctdtridf_types[5] = NPY_DOUBLE
+ufunc_nctdtridf_types[6] = NPY_DOUBLE
+ufunc_nctdtridf_types[7] = NPY_DOUBLE
+ufunc_nctdtridf_ptr[2*0] = _func_cdftnc3_wrap
+ufunc_nctdtridf_ptr[2*0+1] = ("nctdtridf")
+ufunc_nctdtridf_ptr[2*1] = _func_cdftnc3_wrap
+ufunc_nctdtridf_ptr[2*1+1] = ("nctdtridf")
+ufunc_nctdtridf_data[0] = &ufunc_nctdtridf_ptr[2*0]
+ufunc_nctdtridf_data[1] = &ufunc_nctdtridf_ptr[2*1]
+nctdtridf = np.PyUFunc_FromFuncAndData(ufunc_nctdtridf_loops, ufunc_nctdtridf_data, ufunc_nctdtridf_types, 2, 3, 1, 0, "nctdtridf", ufunc_nctdtridf_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nctdtrinc_loops[2]
+cdef void *ufunc_nctdtrinc_ptr[4]
+cdef void *ufunc_nctdtrinc_data[2]
+cdef char ufunc_nctdtrinc_types[8]
+cdef char *ufunc_nctdtrinc_doc = (
+    "nctdtrinc(df, p, t, out=None)\n"
+    "\n"
+    "Calculate non-centrality parameter for non-central t distribution.\n"
+    "\n"
+    "See `nctdtr` for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "df : array_like\n"
+    "    Degrees of freedom of the distribution. Should be in range (0, inf).\n"
+    "p : array_like\n"
+    "    CDF values, in range (0, 1].\n"
+    "t : array_like\n"
+    "    Quantiles, i.e., the upper limit of integration.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "nc : scalar or ndarray\n"
+    "    Noncentrality parameter\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "nctdtr :  CDF of the non-central `t` distribution.\n"
+    "nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.\n"
+    "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.")
+ufunc_nctdtrinc_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nctdtrinc_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nctdtrinc_types[0] = NPY_FLOAT
+ufunc_nctdtrinc_types[1] = NPY_FLOAT
+ufunc_nctdtrinc_types[2] = NPY_FLOAT
+ufunc_nctdtrinc_types[3] = NPY_FLOAT
+ufunc_nctdtrinc_types[4] = NPY_DOUBLE
+ufunc_nctdtrinc_types[5] = NPY_DOUBLE
+ufunc_nctdtrinc_types[6] = NPY_DOUBLE
+ufunc_nctdtrinc_types[7] = NPY_DOUBLE
+ufunc_nctdtrinc_ptr[2*0] = _func_cdftnc4_wrap
+ufunc_nctdtrinc_ptr[2*0+1] = ("nctdtrinc")
+ufunc_nctdtrinc_ptr[2*1] = _func_cdftnc4_wrap
+ufunc_nctdtrinc_ptr[2*1+1] = ("nctdtrinc")
+ufunc_nctdtrinc_data[0] = &ufunc_nctdtrinc_ptr[2*0]
+ufunc_nctdtrinc_data[1] = &ufunc_nctdtrinc_ptr[2*1]
+nctdtrinc = np.PyUFunc_FromFuncAndData(ufunc_nctdtrinc_loops, ufunc_nctdtrinc_data, ufunc_nctdtrinc_types, 2, 3, 1, 0, "nctdtrinc", ufunc_nctdtrinc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nctdtrit_loops[2]
+cdef void *ufunc_nctdtrit_ptr[4]
+cdef void *ufunc_nctdtrit_data[2]
+cdef char ufunc_nctdtrit_types[8]
+cdef char *ufunc_nctdtrit_doc = (
+    "nctdtrit(df, nc, p, out=None)\n"
+    "\n"
+    "Inverse cumulative distribution function of the non-central t distribution.\n"
+    "\n"
+    "See `nctdtr` for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "df : array_like\n"
+    "    Degrees of freedom of the distribution. Should be in range (0, inf).\n"
+    "nc : array_like\n"
+    "    Noncentrality parameter. Should be in range (-1e6, 1e6).\n"
+    "p : array_like\n"
+    "    CDF values, in range (0, 1].\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "t : scalar or ndarray\n"
+    "    Quantiles\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "nctdtr :  CDF of the non-central `t` distribution.\n"
+    "nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.\n"
+    "nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.")
+ufunc_nctdtrit_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nctdtrit_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nctdtrit_types[0] = NPY_FLOAT
+ufunc_nctdtrit_types[1] = NPY_FLOAT
+ufunc_nctdtrit_types[2] = NPY_FLOAT
+ufunc_nctdtrit_types[3] = NPY_FLOAT
+ufunc_nctdtrit_types[4] = NPY_DOUBLE
+ufunc_nctdtrit_types[5] = NPY_DOUBLE
+ufunc_nctdtrit_types[6] = NPY_DOUBLE
+ufunc_nctdtrit_types[7] = NPY_DOUBLE
+ufunc_nctdtrit_ptr[2*0] = _func_cdftnc2_wrap
+ufunc_nctdtrit_ptr[2*0+1] = ("nctdtrit")
+ufunc_nctdtrit_ptr[2*1] = _func_cdftnc2_wrap
+ufunc_nctdtrit_ptr[2*1+1] = ("nctdtrit")
+ufunc_nctdtrit_data[0] = &ufunc_nctdtrit_ptr[2*0]
+ufunc_nctdtrit_data[1] = &ufunc_nctdtrit_ptr[2*1]
+nctdtrit = np.PyUFunc_FromFuncAndData(ufunc_nctdtrit_loops, ufunc_nctdtrit_data, ufunc_nctdtrit_types, 2, 3, 1, 0, "nctdtrit", ufunc_nctdtrit_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ndtr_loops[4]
+cdef void *ufunc_ndtr_ptr[8]
+cdef void *ufunc_ndtr_data[4]
+cdef char ufunc_ndtr_types[8]
+cdef char *ufunc_ndtr_doc = (
+    "ndtr(x, out=None)\n"
+    "\n"
+    "Gaussian cumulative distribution function.\n"
+    "\n"
+    "Returns the area under the standard Gaussian probability\n"
+    "density function, integrated from minus infinity to `x`\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   \\frac{1}{\\sqrt{2\\pi}} \\int_{-\\infty}^x \\exp(-t^2/2) dt\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like, real or complex\n"
+    "    Argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The value of the normal CDF evaluated at `x`\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "erf, erfc, scipy.stats.norm, log_ndtr")
+ufunc_ndtr_loops[0] = loop_d_d__As_f_f
+ufunc_ndtr_loops[1] = loop_d_d__As_d_d
+ufunc_ndtr_loops[2] = loop_D_D__As_F_F
+ufunc_ndtr_loops[3] = loop_D_D__As_D_D
+ufunc_ndtr_types[0] = NPY_FLOAT
+ufunc_ndtr_types[1] = NPY_FLOAT
+ufunc_ndtr_types[2] = NPY_DOUBLE
+ufunc_ndtr_types[3] = NPY_DOUBLE
+ufunc_ndtr_types[4] = NPY_CFLOAT
+ufunc_ndtr_types[5] = NPY_CFLOAT
+ufunc_ndtr_types[6] = NPY_CDOUBLE
+ufunc_ndtr_types[7] = NPY_CDOUBLE
+ufunc_ndtr_ptr[2*0] = _func_ndtr
+ufunc_ndtr_ptr[2*0+1] = ("ndtr")
+ufunc_ndtr_ptr[2*1] = _func_ndtr
+ufunc_ndtr_ptr[2*1+1] = ("ndtr")
+ufunc_ndtr_ptr[2*2] = scipy.special._ufuncs_cxx._export_faddeeva_ndtr
+ufunc_ndtr_ptr[2*2+1] = ("ndtr")
+ufunc_ndtr_ptr[2*3] = scipy.special._ufuncs_cxx._export_faddeeva_ndtr
+ufunc_ndtr_ptr[2*3+1] = ("ndtr")
+ufunc_ndtr_data[0] = &ufunc_ndtr_ptr[2*0]
+ufunc_ndtr_data[1] = &ufunc_ndtr_ptr[2*1]
+ufunc_ndtr_data[2] = &ufunc_ndtr_ptr[2*2]
+ufunc_ndtr_data[3] = &ufunc_ndtr_ptr[2*3]
+ndtr = np.PyUFunc_FromFuncAndData(ufunc_ndtr_loops, ufunc_ndtr_data, ufunc_ndtr_types, 4, 1, 1, 0, "ndtr", ufunc_ndtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ndtri_loops[2]
+cdef void *ufunc_ndtri_ptr[4]
+cdef void *ufunc_ndtri_data[2]
+cdef char ufunc_ndtri_types[4]
+cdef char *ufunc_ndtri_doc = (
+    "ndtri(y, out=None)\n"
+    "\n"
+    "Inverse of `ndtr` vs x\n"
+    "\n"
+    "Returns the argument x for which the area under the Gaussian\n"
+    "probability density function (integrated from minus infinity to `x`)\n"
+    "is equal to y.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Probability\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "x : scalar or ndarray\n"
+    "    Value of x such that ``ndtr(x) == p``.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ndtr")
+ufunc_ndtri_loops[0] = loop_d_d__As_f_f
+ufunc_ndtri_loops[1] = loop_d_d__As_d_d
+ufunc_ndtri_types[0] = NPY_FLOAT
+ufunc_ndtri_types[1] = NPY_FLOAT
+ufunc_ndtri_types[2] = NPY_DOUBLE
+ufunc_ndtri_types[3] = NPY_DOUBLE
+ufunc_ndtri_ptr[2*0] = _func_ndtri
+ufunc_ndtri_ptr[2*0+1] = ("ndtri")
+ufunc_ndtri_ptr[2*1] = _func_ndtri
+ufunc_ndtri_ptr[2*1+1] = ("ndtri")
+ufunc_ndtri_data[0] = &ufunc_ndtri_ptr[2*0]
+ufunc_ndtri_data[1] = &ufunc_ndtri_ptr[2*1]
+ndtri = np.PyUFunc_FromFuncAndData(ufunc_ndtri_loops, ufunc_ndtri_data, ufunc_ndtri_types, 2, 1, 1, 0, "ndtri", ufunc_ndtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_ndtri_exp_loops[2]
+cdef void *ufunc_ndtri_exp_ptr[4]
+cdef void *ufunc_ndtri_exp_data[2]
+cdef char ufunc_ndtri_exp_types[4]
+cdef char *ufunc_ndtri_exp_doc = (
+    "ndtri_exp(y, out=None)\n"
+    "\n"
+    "Inverse of `log_ndtr` vs x. Allows for greater precision than\n"
+    "`ndtri` composed with `numpy.exp` for very small values of y and for\n"
+    "y close to 0.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "y : array_like of float\n"
+    "    Function argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Inverse of the log CDF of the standard normal distribution, evaluated\n"
+    "    at y.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "`ndtri_exp` agrees with the naive implementation when the latter does\n"
+    "not suffer from underflow.\n"
+    "\n"
+    ">>> sc.ndtri_exp(-1)\n"
+    "-0.33747496376420244\n"
+    ">>> sc.ndtri(np.exp(-1))\n"
+    "-0.33747496376420244\n"
+    "\n"
+    "For extreme values of y, the naive approach fails\n"
+    "\n"
+    ">>> sc.ndtri(np.exp(-800))\n"
+    "-inf\n"
+    ">>> sc.ndtri(np.exp(-1e-20))\n"
+    "inf\n"
+    "\n"
+    "whereas `ndtri_exp` is still able to compute the result to high precision.\n"
+    "\n"
+    ">>> sc.ndtri_exp(-800)\n"
+    "-39.88469483825668\n"
+    ">>> sc.ndtri_exp(-1e-20)\n"
+    "9.262340089798409\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "log_ndtr, ndtri, ndtr")
+ufunc_ndtri_exp_loops[0] = loop_d_d__As_f_f
+ufunc_ndtri_exp_loops[1] = loop_d_d__As_d_d
+ufunc_ndtri_exp_types[0] = NPY_FLOAT
+ufunc_ndtri_exp_types[1] = NPY_FLOAT
+ufunc_ndtri_exp_types[2] = NPY_DOUBLE
+ufunc_ndtri_exp_types[3] = NPY_DOUBLE
+ufunc_ndtri_exp_ptr[2*0] = _func_ndtri_exp
+ufunc_ndtri_exp_ptr[2*0+1] = ("ndtri_exp")
+ufunc_ndtri_exp_ptr[2*1] = _func_ndtri_exp
+ufunc_ndtri_exp_ptr[2*1+1] = ("ndtri_exp")
+ufunc_ndtri_exp_data[0] = &ufunc_ndtri_exp_ptr[2*0]
+ufunc_ndtri_exp_data[1] = &ufunc_ndtri_exp_ptr[2*1]
+ndtri_exp = np.PyUFunc_FromFuncAndData(ufunc_ndtri_exp_loops, ufunc_ndtri_exp_data, ufunc_ndtri_exp_types, 2, 1, 1, 0, "ndtri_exp", ufunc_ndtri_exp_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nrdtrimn_loops[2]
+cdef void *ufunc_nrdtrimn_ptr[4]
+cdef void *ufunc_nrdtrimn_data[2]
+cdef char ufunc_nrdtrimn_types[8]
+cdef char *ufunc_nrdtrimn_doc = (
+    "nrdtrimn(p, x, std, out=None)\n"
+    "\n"
+    "Calculate mean of normal distribution given other params.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    CDF values, in range (0, 1].\n"
+    "x : array_like\n"
+    "    Quantiles, i.e. the upper limit of integration.\n"
+    "std : array_like\n"
+    "    Standard deviation.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "mn : scalar or ndarray\n"
+    "    The mean of the normal distribution.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "nrdtrimn, ndtr")
+ufunc_nrdtrimn_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nrdtrimn_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nrdtrimn_types[0] = NPY_FLOAT
+ufunc_nrdtrimn_types[1] = NPY_FLOAT
+ufunc_nrdtrimn_types[2] = NPY_FLOAT
+ufunc_nrdtrimn_types[3] = NPY_FLOAT
+ufunc_nrdtrimn_types[4] = NPY_DOUBLE
+ufunc_nrdtrimn_types[5] = NPY_DOUBLE
+ufunc_nrdtrimn_types[6] = NPY_DOUBLE
+ufunc_nrdtrimn_types[7] = NPY_DOUBLE
+ufunc_nrdtrimn_ptr[2*0] = _func_cdfnor3_wrap
+ufunc_nrdtrimn_ptr[2*0+1] = ("nrdtrimn")
+ufunc_nrdtrimn_ptr[2*1] = _func_cdfnor3_wrap
+ufunc_nrdtrimn_ptr[2*1+1] = ("nrdtrimn")
+ufunc_nrdtrimn_data[0] = &ufunc_nrdtrimn_ptr[2*0]
+ufunc_nrdtrimn_data[1] = &ufunc_nrdtrimn_ptr[2*1]
+nrdtrimn = np.PyUFunc_FromFuncAndData(ufunc_nrdtrimn_loops, ufunc_nrdtrimn_data, ufunc_nrdtrimn_types, 2, 3, 1, 0, "nrdtrimn", ufunc_nrdtrimn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_nrdtrisd_loops[2]
+cdef void *ufunc_nrdtrisd_ptr[4]
+cdef void *ufunc_nrdtrisd_data[2]
+cdef char ufunc_nrdtrisd_types[8]
+cdef char *ufunc_nrdtrisd_doc = (
+    "nrdtrisd(p, x, mn, out=None)\n"
+    "\n"
+    "Calculate standard deviation of normal distribution given other params.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    CDF values, in range (0, 1].\n"
+    "x : array_like\n"
+    "    Quantiles, i.e. the upper limit of integration.\n"
+    "mn : scalar or ndarray\n"
+    "    The mean of the normal distribution.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "std : scalar or ndarray\n"
+    "    Standard deviation.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "ndtr")
+ufunc_nrdtrisd_loops[0] = loop_d_ddd__As_fff_f
+ufunc_nrdtrisd_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_nrdtrisd_types[0] = NPY_FLOAT
+ufunc_nrdtrisd_types[1] = NPY_FLOAT
+ufunc_nrdtrisd_types[2] = NPY_FLOAT
+ufunc_nrdtrisd_types[3] = NPY_FLOAT
+ufunc_nrdtrisd_types[4] = NPY_DOUBLE
+ufunc_nrdtrisd_types[5] = NPY_DOUBLE
+ufunc_nrdtrisd_types[6] = NPY_DOUBLE
+ufunc_nrdtrisd_types[7] = NPY_DOUBLE
+ufunc_nrdtrisd_ptr[2*0] = _func_cdfnor4_wrap
+ufunc_nrdtrisd_ptr[2*0+1] = ("nrdtrisd")
+ufunc_nrdtrisd_ptr[2*1] = _func_cdfnor4_wrap
+ufunc_nrdtrisd_ptr[2*1+1] = ("nrdtrisd")
+ufunc_nrdtrisd_data[0] = &ufunc_nrdtrisd_ptr[2*0]
+ufunc_nrdtrisd_data[1] = &ufunc_nrdtrisd_ptr[2*1]
+nrdtrisd = np.PyUFunc_FromFuncAndData(ufunc_nrdtrisd_loops, ufunc_nrdtrisd_data, ufunc_nrdtrisd_types, 2, 3, 1, 0, "nrdtrisd", ufunc_nrdtrisd_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_obl_ang1_loops[2]
+cdef void *ufunc_obl_ang1_ptr[4]
+cdef void *ufunc_obl_ang1_data[2]
+cdef char ufunc_obl_ang1_types[12]
+cdef char *ufunc_obl_ang1_doc = (
+    "obl_ang1(m, n, c, x, out=None)\n"
+    "\n"
+    "Oblate spheroidal angular function of the first kind and its derivative\n"
+    "\n"
+    "Computes the oblate spheroidal angular function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Mode parameter m (nonnegative)\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "x : array_like\n"
+    "    Parameter x (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "obl_ang1_cv")
+ufunc_obl_ang1_loops[0] = loop_d_dddd_d_As_ffff_ff
+ufunc_obl_ang1_loops[1] = loop_d_dddd_d_As_dddd_dd
+ufunc_obl_ang1_types[0] = NPY_FLOAT
+ufunc_obl_ang1_types[1] = NPY_FLOAT
+ufunc_obl_ang1_types[2] = NPY_FLOAT
+ufunc_obl_ang1_types[3] = NPY_FLOAT
+ufunc_obl_ang1_types[4] = NPY_FLOAT
+ufunc_obl_ang1_types[5] = NPY_FLOAT
+ufunc_obl_ang1_types[6] = NPY_DOUBLE
+ufunc_obl_ang1_types[7] = NPY_DOUBLE
+ufunc_obl_ang1_types[8] = NPY_DOUBLE
+ufunc_obl_ang1_types[9] = NPY_DOUBLE
+ufunc_obl_ang1_types[10] = NPY_DOUBLE
+ufunc_obl_ang1_types[11] = NPY_DOUBLE
+ufunc_obl_ang1_ptr[2*0] = _func_oblate_aswfa_nocv_wrap
+ufunc_obl_ang1_ptr[2*0+1] = ("obl_ang1")
+ufunc_obl_ang1_ptr[2*1] = _func_oblate_aswfa_nocv_wrap
+ufunc_obl_ang1_ptr[2*1+1] = ("obl_ang1")
+ufunc_obl_ang1_data[0] = &ufunc_obl_ang1_ptr[2*0]
+ufunc_obl_ang1_data[1] = &ufunc_obl_ang1_ptr[2*1]
+obl_ang1 = np.PyUFunc_FromFuncAndData(ufunc_obl_ang1_loops, ufunc_obl_ang1_data, ufunc_obl_ang1_types, 2, 4, 2, 0, "obl_ang1", ufunc_obl_ang1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_obl_ang1_cv_loops[2]
+cdef void *ufunc_obl_ang1_cv_ptr[4]
+cdef void *ufunc_obl_ang1_cv_data[2]
+cdef char ufunc_obl_ang1_cv_types[14]
+cdef char *ufunc_obl_ang1_cv_doc = (
+    "obl_ang1_cv(m, n, c, cv, x, out=None)\n"
+    "\n"
+    "Oblate spheroidal angular function obl_ang1 for precomputed characteristic value\n"
+    "\n"
+    "Computes the oblate spheroidal angular function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n"
+    "pre-computed characteristic value.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Mode parameter m (nonnegative)\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "cv : array_like\n"
+    "    Characteristic value\n"
+    "x : array_like\n"
+    "    Parameter x (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "obl_ang1")
+ufunc_obl_ang1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff
+ufunc_obl_ang1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd
+ufunc_obl_ang1_cv_types[0] = NPY_FLOAT
+ufunc_obl_ang1_cv_types[1] = NPY_FLOAT
+ufunc_obl_ang1_cv_types[2] = NPY_FLOAT
+ufunc_obl_ang1_cv_types[3] = NPY_FLOAT
+ufunc_obl_ang1_cv_types[4] = NPY_FLOAT
+ufunc_obl_ang1_cv_types[5] = NPY_FLOAT
+ufunc_obl_ang1_cv_types[6] = NPY_FLOAT
+ufunc_obl_ang1_cv_types[7] = NPY_DOUBLE
+ufunc_obl_ang1_cv_types[8] = NPY_DOUBLE
+ufunc_obl_ang1_cv_types[9] = NPY_DOUBLE
+ufunc_obl_ang1_cv_types[10] = NPY_DOUBLE
+ufunc_obl_ang1_cv_types[11] = NPY_DOUBLE
+ufunc_obl_ang1_cv_types[12] = NPY_DOUBLE
+ufunc_obl_ang1_cv_types[13] = NPY_DOUBLE
+ufunc_obl_ang1_cv_ptr[2*0] = _func_oblate_aswfa_wrap
+ufunc_obl_ang1_cv_ptr[2*0+1] = ("obl_ang1_cv")
+ufunc_obl_ang1_cv_ptr[2*1] = _func_oblate_aswfa_wrap
+ufunc_obl_ang1_cv_ptr[2*1+1] = ("obl_ang1_cv")
+ufunc_obl_ang1_cv_data[0] = &ufunc_obl_ang1_cv_ptr[2*0]
+ufunc_obl_ang1_cv_data[1] = &ufunc_obl_ang1_cv_ptr[2*1]
+obl_ang1_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_ang1_cv_loops, ufunc_obl_ang1_cv_data, ufunc_obl_ang1_cv_types, 2, 5, 2, 0, "obl_ang1_cv", ufunc_obl_ang1_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_obl_cv_loops[2]
+cdef void *ufunc_obl_cv_ptr[4]
+cdef void *ufunc_obl_cv_data[2]
+cdef char ufunc_obl_cv_types[8]
+cdef char *ufunc_obl_cv_doc = (
+    "obl_cv(m, n, c, out=None)\n"
+    "\n"
+    "Characteristic value of oblate spheroidal function\n"
+    "\n"
+    "Computes the characteristic value of oblate spheroidal wave\n"
+    "functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Mode parameter m (nonnegative)\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "cv : scalar or ndarray\n"
+    "    Characteristic value")
+ufunc_obl_cv_loops[0] = loop_d_ddd__As_fff_f
+ufunc_obl_cv_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_obl_cv_types[0] = NPY_FLOAT
+ufunc_obl_cv_types[1] = NPY_FLOAT
+ufunc_obl_cv_types[2] = NPY_FLOAT
+ufunc_obl_cv_types[3] = NPY_FLOAT
+ufunc_obl_cv_types[4] = NPY_DOUBLE
+ufunc_obl_cv_types[5] = NPY_DOUBLE
+ufunc_obl_cv_types[6] = NPY_DOUBLE
+ufunc_obl_cv_types[7] = NPY_DOUBLE
+ufunc_obl_cv_ptr[2*0] = _func_oblate_segv_wrap
+ufunc_obl_cv_ptr[2*0+1] = ("obl_cv")
+ufunc_obl_cv_ptr[2*1] = _func_oblate_segv_wrap
+ufunc_obl_cv_ptr[2*1+1] = ("obl_cv")
+ufunc_obl_cv_data[0] = &ufunc_obl_cv_ptr[2*0]
+ufunc_obl_cv_data[1] = &ufunc_obl_cv_ptr[2*1]
+obl_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_cv_loops, ufunc_obl_cv_data, ufunc_obl_cv_types, 2, 3, 1, 0, "obl_cv", ufunc_obl_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_obl_rad1_loops[2]
+cdef void *ufunc_obl_rad1_ptr[4]
+cdef void *ufunc_obl_rad1_data[2]
+cdef char ufunc_obl_rad1_types[12]
+cdef char *ufunc_obl_rad1_doc = (
+    "obl_rad1(m, n, c, x, out=None)\n"
+    "\n"
+    "Oblate spheroidal radial function of the first kind and its derivative\n"
+    "\n"
+    "Computes the oblate spheroidal radial function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Mode parameter m (nonnegative)\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "x : array_like\n"
+    "    Parameter x (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "obl_rad1_cv")
+ufunc_obl_rad1_loops[0] = loop_d_dddd_d_As_ffff_ff
+ufunc_obl_rad1_loops[1] = loop_d_dddd_d_As_dddd_dd
+ufunc_obl_rad1_types[0] = NPY_FLOAT
+ufunc_obl_rad1_types[1] = NPY_FLOAT
+ufunc_obl_rad1_types[2] = NPY_FLOAT
+ufunc_obl_rad1_types[3] = NPY_FLOAT
+ufunc_obl_rad1_types[4] = NPY_FLOAT
+ufunc_obl_rad1_types[5] = NPY_FLOAT
+ufunc_obl_rad1_types[6] = NPY_DOUBLE
+ufunc_obl_rad1_types[7] = NPY_DOUBLE
+ufunc_obl_rad1_types[8] = NPY_DOUBLE
+ufunc_obl_rad1_types[9] = NPY_DOUBLE
+ufunc_obl_rad1_types[10] = NPY_DOUBLE
+ufunc_obl_rad1_types[11] = NPY_DOUBLE
+ufunc_obl_rad1_ptr[2*0] = _func_oblate_radial1_nocv_wrap
+ufunc_obl_rad1_ptr[2*0+1] = ("obl_rad1")
+ufunc_obl_rad1_ptr[2*1] = _func_oblate_radial1_nocv_wrap
+ufunc_obl_rad1_ptr[2*1+1] = ("obl_rad1")
+ufunc_obl_rad1_data[0] = &ufunc_obl_rad1_ptr[2*0]
+ufunc_obl_rad1_data[1] = &ufunc_obl_rad1_ptr[2*1]
+obl_rad1 = np.PyUFunc_FromFuncAndData(ufunc_obl_rad1_loops, ufunc_obl_rad1_data, ufunc_obl_rad1_types, 2, 4, 2, 0, "obl_rad1", ufunc_obl_rad1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_obl_rad1_cv_loops[2]
+cdef void *ufunc_obl_rad1_cv_ptr[4]
+cdef void *ufunc_obl_rad1_cv_data[2]
+cdef char ufunc_obl_rad1_cv_types[14]
+cdef char *ufunc_obl_rad1_cv_doc = (
+    "obl_rad1_cv(m, n, c, cv, x, out=None)\n"
+    "\n"
+    "Oblate spheroidal radial function obl_rad1 for precomputed characteristic value\n"
+    "\n"
+    "Computes the oblate spheroidal radial function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n"
+    "pre-computed characteristic value.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Mode parameter m (nonnegative)\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "cv : array_like\n"
+    "    Characteristic value\n"
+    "x : array_like\n"
+    "    Parameter x (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "obl_rad1")
+ufunc_obl_rad1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff
+ufunc_obl_rad1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd
+ufunc_obl_rad1_cv_types[0] = NPY_FLOAT
+ufunc_obl_rad1_cv_types[1] = NPY_FLOAT
+ufunc_obl_rad1_cv_types[2] = NPY_FLOAT
+ufunc_obl_rad1_cv_types[3] = NPY_FLOAT
+ufunc_obl_rad1_cv_types[4] = NPY_FLOAT
+ufunc_obl_rad1_cv_types[5] = NPY_FLOAT
+ufunc_obl_rad1_cv_types[6] = NPY_FLOAT
+ufunc_obl_rad1_cv_types[7] = NPY_DOUBLE
+ufunc_obl_rad1_cv_types[8] = NPY_DOUBLE
+ufunc_obl_rad1_cv_types[9] = NPY_DOUBLE
+ufunc_obl_rad1_cv_types[10] = NPY_DOUBLE
+ufunc_obl_rad1_cv_types[11] = NPY_DOUBLE
+ufunc_obl_rad1_cv_types[12] = NPY_DOUBLE
+ufunc_obl_rad1_cv_types[13] = NPY_DOUBLE
+ufunc_obl_rad1_cv_ptr[2*0] = _func_oblate_radial1_wrap
+ufunc_obl_rad1_cv_ptr[2*0+1] = ("obl_rad1_cv")
+ufunc_obl_rad1_cv_ptr[2*1] = _func_oblate_radial1_wrap
+ufunc_obl_rad1_cv_ptr[2*1+1] = ("obl_rad1_cv")
+ufunc_obl_rad1_cv_data[0] = &ufunc_obl_rad1_cv_ptr[2*0]
+ufunc_obl_rad1_cv_data[1] = &ufunc_obl_rad1_cv_ptr[2*1]
+obl_rad1_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_rad1_cv_loops, ufunc_obl_rad1_cv_data, ufunc_obl_rad1_cv_types, 2, 5, 2, 0, "obl_rad1_cv", ufunc_obl_rad1_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_obl_rad2_loops[2]
+cdef void *ufunc_obl_rad2_ptr[4]
+cdef void *ufunc_obl_rad2_data[2]
+cdef char ufunc_obl_rad2_types[12]
+cdef char *ufunc_obl_rad2_doc = (
+    "obl_rad2(m, n, c, x, out=None)\n"
+    "\n"
+    "Oblate spheroidal radial function of the second kind and its derivative.\n"
+    "\n"
+    "Computes the oblate spheroidal radial function of the second kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Mode parameter m (nonnegative)\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "x : array_like\n"
+    "    Parameter x (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "obl_rad2_cv")
+ufunc_obl_rad2_loops[0] = loop_d_dddd_d_As_ffff_ff
+ufunc_obl_rad2_loops[1] = loop_d_dddd_d_As_dddd_dd
+ufunc_obl_rad2_types[0] = NPY_FLOAT
+ufunc_obl_rad2_types[1] = NPY_FLOAT
+ufunc_obl_rad2_types[2] = NPY_FLOAT
+ufunc_obl_rad2_types[3] = NPY_FLOAT
+ufunc_obl_rad2_types[4] = NPY_FLOAT
+ufunc_obl_rad2_types[5] = NPY_FLOAT
+ufunc_obl_rad2_types[6] = NPY_DOUBLE
+ufunc_obl_rad2_types[7] = NPY_DOUBLE
+ufunc_obl_rad2_types[8] = NPY_DOUBLE
+ufunc_obl_rad2_types[9] = NPY_DOUBLE
+ufunc_obl_rad2_types[10] = NPY_DOUBLE
+ufunc_obl_rad2_types[11] = NPY_DOUBLE
+ufunc_obl_rad2_ptr[2*0] = _func_oblate_radial2_nocv_wrap
+ufunc_obl_rad2_ptr[2*0+1] = ("obl_rad2")
+ufunc_obl_rad2_ptr[2*1] = _func_oblate_radial2_nocv_wrap
+ufunc_obl_rad2_ptr[2*1+1] = ("obl_rad2")
+ufunc_obl_rad2_data[0] = &ufunc_obl_rad2_ptr[2*0]
+ufunc_obl_rad2_data[1] = &ufunc_obl_rad2_ptr[2*1]
+obl_rad2 = np.PyUFunc_FromFuncAndData(ufunc_obl_rad2_loops, ufunc_obl_rad2_data, ufunc_obl_rad2_types, 2, 4, 2, 0, "obl_rad2", ufunc_obl_rad2_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_obl_rad2_cv_loops[2]
+cdef void *ufunc_obl_rad2_cv_ptr[4]
+cdef void *ufunc_obl_rad2_cv_data[2]
+cdef char ufunc_obl_rad2_cv_types[14]
+cdef char *ufunc_obl_rad2_cv_doc = (
+    "obl_rad2_cv(m, n, c, cv, x, out=None)\n"
+    "\n"
+    "Oblate spheroidal radial function obl_rad2 for precomputed characteristic value\n"
+    "\n"
+    "Computes the oblate spheroidal radial function of the second kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n"
+    "pre-computed characteristic value.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Mode parameter m (nonnegative)\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "cv : array_like\n"
+    "    Characteristic value\n"
+    "x : array_like\n"
+    "    Parameter x (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "obl_rad2")
+ufunc_obl_rad2_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff
+ufunc_obl_rad2_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd
+ufunc_obl_rad2_cv_types[0] = NPY_FLOAT
+ufunc_obl_rad2_cv_types[1] = NPY_FLOAT
+ufunc_obl_rad2_cv_types[2] = NPY_FLOAT
+ufunc_obl_rad2_cv_types[3] = NPY_FLOAT
+ufunc_obl_rad2_cv_types[4] = NPY_FLOAT
+ufunc_obl_rad2_cv_types[5] = NPY_FLOAT
+ufunc_obl_rad2_cv_types[6] = NPY_FLOAT
+ufunc_obl_rad2_cv_types[7] = NPY_DOUBLE
+ufunc_obl_rad2_cv_types[8] = NPY_DOUBLE
+ufunc_obl_rad2_cv_types[9] = NPY_DOUBLE
+ufunc_obl_rad2_cv_types[10] = NPY_DOUBLE
+ufunc_obl_rad2_cv_types[11] = NPY_DOUBLE
+ufunc_obl_rad2_cv_types[12] = NPY_DOUBLE
+ufunc_obl_rad2_cv_types[13] = NPY_DOUBLE
+ufunc_obl_rad2_cv_ptr[2*0] = _func_oblate_radial2_wrap
+ufunc_obl_rad2_cv_ptr[2*0+1] = ("obl_rad2_cv")
+ufunc_obl_rad2_cv_ptr[2*1] = _func_oblate_radial2_wrap
+ufunc_obl_rad2_cv_ptr[2*1+1] = ("obl_rad2_cv")
+ufunc_obl_rad2_cv_data[0] = &ufunc_obl_rad2_cv_ptr[2*0]
+ufunc_obl_rad2_cv_data[1] = &ufunc_obl_rad2_cv_ptr[2*1]
+obl_rad2_cv = np.PyUFunc_FromFuncAndData(ufunc_obl_rad2_cv_loops, ufunc_obl_rad2_cv_data, ufunc_obl_rad2_cv_types, 2, 5, 2, 0, "obl_rad2_cv", ufunc_obl_rad2_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_owens_t_loops[2]
+cdef void *ufunc_owens_t_ptr[4]
+cdef void *ufunc_owens_t_data[2]
+cdef char ufunc_owens_t_types[6]
+cdef char *ufunc_owens_t_doc = (
+    "owens_t(h, a, out=None)\n"
+    "\n"
+    "Owen's T Function.\n"
+    "\n"
+    "The function T(h, a) gives the probability of the event\n"
+    "(X > h and 0 < Y < a * X) where X and Y are independent\n"
+    "standard normal random variables.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "h: array_like\n"
+    "    Input value.\n"
+    "a: array_like\n"
+    "    Input value.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "t: scalar or ndarray\n"
+    "    Probability of the event (X > h and 0 < Y < a * X),\n"
+    "    where X and Y are independent standard normal random variables.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy import special\n"
+    ">>> a = 3.5\n"
+    ">>> h = 0.78\n"
+    ">>> special.owens_t(h, a)\n"
+    "0.10877216734852274\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] M. Patefield and D. Tandy, \"Fast and accurate calculation of\n"
+    "       Owen's T Function\", Statistical Software vol. 5, pp. 1-25, 2000.")
+ufunc_owens_t_loops[0] = loop_d_dd__As_ff_f
+ufunc_owens_t_loops[1] = loop_d_dd__As_dd_d
+ufunc_owens_t_types[0] = NPY_FLOAT
+ufunc_owens_t_types[1] = NPY_FLOAT
+ufunc_owens_t_types[2] = NPY_FLOAT
+ufunc_owens_t_types[3] = NPY_DOUBLE
+ufunc_owens_t_types[4] = NPY_DOUBLE
+ufunc_owens_t_types[5] = NPY_DOUBLE
+ufunc_owens_t_ptr[2*0] = _func_owens_t
+ufunc_owens_t_ptr[2*0+1] = ("owens_t")
+ufunc_owens_t_ptr[2*1] = _func_owens_t
+ufunc_owens_t_ptr[2*1+1] = ("owens_t")
+ufunc_owens_t_data[0] = &ufunc_owens_t_ptr[2*0]
+ufunc_owens_t_data[1] = &ufunc_owens_t_ptr[2*1]
+owens_t = np.PyUFunc_FromFuncAndData(ufunc_owens_t_loops, ufunc_owens_t_data, ufunc_owens_t_types, 2, 2, 1, 0, "owens_t", ufunc_owens_t_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pbdv_loops[2]
+cdef void *ufunc_pbdv_ptr[4]
+cdef void *ufunc_pbdv_data[2]
+cdef char ufunc_pbdv_types[8]
+cdef char *ufunc_pbdv_doc = (
+    "pbdv(v, x, out=None)\n"
+    "\n"
+    "Parabolic cylinder function D\n"
+    "\n"
+    "Returns (d, dp) the parabolic cylinder function Dv(x) in d and the\n"
+    "derivative, Dv'(x) in dp.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Real parameter\n"
+    "x : array_like\n"
+    "    Real argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "d : scalar or ndarray\n"
+    "    Value of the function\n"
+    "dp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pbdv_loops[0] = loop_i_dd_dd_As_ff_ff
+ufunc_pbdv_loops[1] = loop_i_dd_dd_As_dd_dd
+ufunc_pbdv_types[0] = NPY_FLOAT
+ufunc_pbdv_types[1] = NPY_FLOAT
+ufunc_pbdv_types[2] = NPY_FLOAT
+ufunc_pbdv_types[3] = NPY_FLOAT
+ufunc_pbdv_types[4] = NPY_DOUBLE
+ufunc_pbdv_types[5] = NPY_DOUBLE
+ufunc_pbdv_types[6] = NPY_DOUBLE
+ufunc_pbdv_types[7] = NPY_DOUBLE
+ufunc_pbdv_ptr[2*0] = _func_pbdv_wrap
+ufunc_pbdv_ptr[2*0+1] = ("pbdv")
+ufunc_pbdv_ptr[2*1] = _func_pbdv_wrap
+ufunc_pbdv_ptr[2*1+1] = ("pbdv")
+ufunc_pbdv_data[0] = &ufunc_pbdv_ptr[2*0]
+ufunc_pbdv_data[1] = &ufunc_pbdv_ptr[2*1]
+pbdv = np.PyUFunc_FromFuncAndData(ufunc_pbdv_loops, ufunc_pbdv_data, ufunc_pbdv_types, 2, 2, 2, 0, "pbdv", ufunc_pbdv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pbvv_loops[2]
+cdef void *ufunc_pbvv_ptr[4]
+cdef void *ufunc_pbvv_data[2]
+cdef char ufunc_pbvv_types[8]
+cdef char *ufunc_pbvv_doc = (
+    "pbvv(v, x, out=None)\n"
+    "\n"
+    "Parabolic cylinder function V\n"
+    "\n"
+    "Returns the parabolic cylinder function Vv(x) in v and the\n"
+    "derivative, Vv'(x) in vp.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Real parameter\n"
+    "x : array_like\n"
+    "    Real argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "v : scalar or ndarray\n"
+    "    Value of the function\n"
+    "vp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pbvv_loops[0] = loop_i_dd_dd_As_ff_ff
+ufunc_pbvv_loops[1] = loop_i_dd_dd_As_dd_dd
+ufunc_pbvv_types[0] = NPY_FLOAT
+ufunc_pbvv_types[1] = NPY_FLOAT
+ufunc_pbvv_types[2] = NPY_FLOAT
+ufunc_pbvv_types[3] = NPY_FLOAT
+ufunc_pbvv_types[4] = NPY_DOUBLE
+ufunc_pbvv_types[5] = NPY_DOUBLE
+ufunc_pbvv_types[6] = NPY_DOUBLE
+ufunc_pbvv_types[7] = NPY_DOUBLE
+ufunc_pbvv_ptr[2*0] = _func_pbvv_wrap
+ufunc_pbvv_ptr[2*0+1] = ("pbvv")
+ufunc_pbvv_ptr[2*1] = _func_pbvv_wrap
+ufunc_pbvv_ptr[2*1+1] = ("pbvv")
+ufunc_pbvv_data[0] = &ufunc_pbvv_ptr[2*0]
+ufunc_pbvv_data[1] = &ufunc_pbvv_ptr[2*1]
+pbvv = np.PyUFunc_FromFuncAndData(ufunc_pbvv_loops, ufunc_pbvv_data, ufunc_pbvv_types, 2, 2, 2, 0, "pbvv", ufunc_pbvv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pbwa_loops[2]
+cdef void *ufunc_pbwa_ptr[4]
+cdef void *ufunc_pbwa_data[2]
+cdef char ufunc_pbwa_types[8]
+cdef char *ufunc_pbwa_doc = (
+    "pbwa(a, x, out=None)\n"
+    "\n"
+    "Parabolic cylinder function W.\n"
+    "\n"
+    "The function is a particular solution to the differential equation\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    y'' + \\left(\\frac{1}{4}x^2 - a\\right)y = 0,\n"
+    "\n"
+    "for a full definition see section 12.14 in [1]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like\n"
+    "    Real parameter\n"
+    "x : array_like\n"
+    "    Real argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "w : scalar or ndarray\n"
+    "    Value of the function\n"
+    "wp : scalar or ndarray\n"
+    "    Value of the derivative in x\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The function is a wrapper for a Fortran routine by Zhang and Jin\n"
+    "[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and\n"
+    "returns NaN outside that range.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Digital Library of Mathematical Functions, 14.30.\n"
+    "       https://dlmf.nist.gov/14.30\n"
+    ".. [2] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n"
+    "       Functions\", John Wiley and Sons, 1996.\n"
+    "       https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html")
+ufunc_pbwa_loops[0] = loop_i_dd_dd_As_ff_ff
+ufunc_pbwa_loops[1] = loop_i_dd_dd_As_dd_dd
+ufunc_pbwa_types[0] = NPY_FLOAT
+ufunc_pbwa_types[1] = NPY_FLOAT
+ufunc_pbwa_types[2] = NPY_FLOAT
+ufunc_pbwa_types[3] = NPY_FLOAT
+ufunc_pbwa_types[4] = NPY_DOUBLE
+ufunc_pbwa_types[5] = NPY_DOUBLE
+ufunc_pbwa_types[6] = NPY_DOUBLE
+ufunc_pbwa_types[7] = NPY_DOUBLE
+ufunc_pbwa_ptr[2*0] = _func_pbwa_wrap
+ufunc_pbwa_ptr[2*0+1] = ("pbwa")
+ufunc_pbwa_ptr[2*1] = _func_pbwa_wrap
+ufunc_pbwa_ptr[2*1+1] = ("pbwa")
+ufunc_pbwa_data[0] = &ufunc_pbwa_ptr[2*0]
+ufunc_pbwa_data[1] = &ufunc_pbwa_ptr[2*1]
+pbwa = np.PyUFunc_FromFuncAndData(ufunc_pbwa_loops, ufunc_pbwa_data, ufunc_pbwa_types, 2, 2, 2, 0, "pbwa", ufunc_pbwa_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pdtr_loops[2]
+cdef void *ufunc_pdtr_ptr[4]
+cdef void *ufunc_pdtr_data[2]
+cdef char ufunc_pdtr_types[6]
+cdef char *ufunc_pdtr_doc = (
+    "pdtr(k, m, out=None)\n"
+    "\n"
+    "Poisson cumulative distribution function.\n"
+    "\n"
+    "Defined as the probability that a Poisson-distributed random\n"
+    "variable with event rate :math:`m` is less than or equal to\n"
+    ":math:`k`. More concretely, this works out to be [1]_\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "   \\exp(-m) \\sum_{j = 0}^{\\lfloor{k}\\rfloor} \\frac{m^j}{j!}.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    Number of occurrences (nonnegative, real)\n"
+    "m : array_like\n"
+    "    Shape parameter (nonnegative, real)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Poisson cumulative distribution function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "pdtrc : Poisson survival function\n"
+    "pdtrik : inverse of `pdtr` with respect to `k`\n"
+    "pdtri : inverse of `pdtr` with respect to `m`\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] https://en.wikipedia.org/wiki/Poisson_distribution\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is a cumulative distribution function, so it converges to 1\n"
+    "monotonically as `k` goes to infinity.\n"
+    "\n"
+    ">>> sc.pdtr([1, 10, 100, np.inf], 1)\n"
+    "array([0.73575888, 0.99999999, 1.        , 1.        ])\n"
+    "\n"
+    "It is discontinuous at integers and constant between integers.\n"
+    "\n"
+    ">>> sc.pdtr([1, 1.5, 1.9, 2], 1)\n"
+    "array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ])")
+ufunc_pdtr_loops[0] = loop_d_dd__As_ff_f
+ufunc_pdtr_loops[1] = loop_d_dd__As_dd_d
+ufunc_pdtr_types[0] = NPY_FLOAT
+ufunc_pdtr_types[1] = NPY_FLOAT
+ufunc_pdtr_types[2] = NPY_FLOAT
+ufunc_pdtr_types[3] = NPY_DOUBLE
+ufunc_pdtr_types[4] = NPY_DOUBLE
+ufunc_pdtr_types[5] = NPY_DOUBLE
+ufunc_pdtr_ptr[2*0] = _func_pdtr
+ufunc_pdtr_ptr[2*0+1] = ("pdtr")
+ufunc_pdtr_ptr[2*1] = _func_pdtr
+ufunc_pdtr_ptr[2*1+1] = ("pdtr")
+ufunc_pdtr_data[0] = &ufunc_pdtr_ptr[2*0]
+ufunc_pdtr_data[1] = &ufunc_pdtr_ptr[2*1]
+pdtr = np.PyUFunc_FromFuncAndData(ufunc_pdtr_loops, ufunc_pdtr_data, ufunc_pdtr_types, 2, 2, 1, 0, "pdtr", ufunc_pdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pdtrc_loops[2]
+cdef void *ufunc_pdtrc_ptr[4]
+cdef void *ufunc_pdtrc_data[2]
+cdef char ufunc_pdtrc_types[6]
+cdef char *ufunc_pdtrc_doc = (
+    "pdtrc(k, m, out=None)\n"
+    "\n"
+    "Poisson survival function\n"
+    "\n"
+    "Returns the sum of the terms from k+1 to infinity of the Poisson\n"
+    "distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(\n"
+    "k+1, m). Arguments must both be non-negative doubles.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    Number of occurrences (nonnegative, real)\n"
+    "m : array_like\n"
+    "    Shape parameter (nonnegative, real)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the Poisson survival function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "pdtr : Poisson cumulative distribution function\n"
+    "pdtrik : inverse of `pdtr` with respect to `k`\n"
+    "pdtri : inverse of `pdtr` with respect to `m`")
+ufunc_pdtrc_loops[0] = loop_d_dd__As_ff_f
+ufunc_pdtrc_loops[1] = loop_d_dd__As_dd_d
+ufunc_pdtrc_types[0] = NPY_FLOAT
+ufunc_pdtrc_types[1] = NPY_FLOAT
+ufunc_pdtrc_types[2] = NPY_FLOAT
+ufunc_pdtrc_types[3] = NPY_DOUBLE
+ufunc_pdtrc_types[4] = NPY_DOUBLE
+ufunc_pdtrc_types[5] = NPY_DOUBLE
+ufunc_pdtrc_ptr[2*0] = _func_pdtrc
+ufunc_pdtrc_ptr[2*0+1] = ("pdtrc")
+ufunc_pdtrc_ptr[2*1] = _func_pdtrc
+ufunc_pdtrc_ptr[2*1+1] = ("pdtrc")
+ufunc_pdtrc_data[0] = &ufunc_pdtrc_ptr[2*0]
+ufunc_pdtrc_data[1] = &ufunc_pdtrc_ptr[2*1]
+pdtrc = np.PyUFunc_FromFuncAndData(ufunc_pdtrc_loops, ufunc_pdtrc_data, ufunc_pdtrc_types, 2, 2, 1, 0, "pdtrc", ufunc_pdtrc_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pdtri_loops[3]
+cdef void *ufunc_pdtri_ptr[6]
+cdef void *ufunc_pdtri_data[3]
+cdef char ufunc_pdtri_types[9]
+cdef char *ufunc_pdtri_doc = (
+    "pdtri(k, y, out=None)\n"
+    "\n"
+    "Inverse to `pdtr` vs m\n"
+    "\n"
+    "Returns the Poisson variable `m` such that the sum from 0 to `k` of\n"
+    "the Poisson density is equal to the given probability `y`:\n"
+    "calculated by ``gammaincinv(k + 1, y)``. `k` must be a nonnegative\n"
+    "integer and `y` between 0 and 1.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "k : array_like\n"
+    "    Number of occurrences (nonnegative, real)\n"
+    "y : array_like\n"
+    "    Probability\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the shape paramter `m` such that ``pdtr(k, m) = p``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "pdtr : Poisson cumulative distribution function\n"
+    "pdtrc : Poisson survival function\n"
+    "pdtrik : inverse of `pdtr` with respect to `k`")
+ufunc_pdtri_loops[0] = loop_d_id__As_ld_d
+ufunc_pdtri_loops[1] = loop_d_dd__As_ff_f
+ufunc_pdtri_loops[2] = loop_d_dd__As_dd_d
+ufunc_pdtri_types[0] = NPY_LONG
+ufunc_pdtri_types[1] = NPY_DOUBLE
+ufunc_pdtri_types[2] = NPY_DOUBLE
+ufunc_pdtri_types[3] = NPY_FLOAT
+ufunc_pdtri_types[4] = NPY_FLOAT
+ufunc_pdtri_types[5] = NPY_FLOAT
+ufunc_pdtri_types[6] = NPY_DOUBLE
+ufunc_pdtri_types[7] = NPY_DOUBLE
+ufunc_pdtri_types[8] = NPY_DOUBLE
+ufunc_pdtri_ptr[2*0] = _func_pdtri
+ufunc_pdtri_ptr[2*0+1] = ("pdtri")
+ufunc_pdtri_ptr[2*1] = _func_pdtri_unsafe
+ufunc_pdtri_ptr[2*1+1] = ("pdtri")
+ufunc_pdtri_ptr[2*2] = _func_pdtri_unsafe
+ufunc_pdtri_ptr[2*2+1] = ("pdtri")
+ufunc_pdtri_data[0] = &ufunc_pdtri_ptr[2*0]
+ufunc_pdtri_data[1] = &ufunc_pdtri_ptr[2*1]
+ufunc_pdtri_data[2] = &ufunc_pdtri_ptr[2*2]
+pdtri = np.PyUFunc_FromFuncAndData(ufunc_pdtri_loops, ufunc_pdtri_data, ufunc_pdtri_types, 3, 2, 1, 0, "pdtri", ufunc_pdtri_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pdtrik_loops[2]
+cdef void *ufunc_pdtrik_ptr[4]
+cdef void *ufunc_pdtrik_data[2]
+cdef char ufunc_pdtrik_types[6]
+cdef char *ufunc_pdtrik_doc = (
+    "pdtrik(p, m, out=None)\n"
+    "\n"
+    "Inverse to `pdtr` vs `m`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Shape parameter (nonnegative, real)\n"
+    "p : array_like\n"
+    "    Probability\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The number of occurrences `k` such that ``pdtr(k, m) = p``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "pdtr : Poisson cumulative distribution function\n"
+    "pdtrc : Poisson survival function\n"
+    "pdtri : inverse of `pdtr` with respect to `m`")
+ufunc_pdtrik_loops[0] = loop_d_dd__As_ff_f
+ufunc_pdtrik_loops[1] = loop_d_dd__As_dd_d
+ufunc_pdtrik_types[0] = NPY_FLOAT
+ufunc_pdtrik_types[1] = NPY_FLOAT
+ufunc_pdtrik_types[2] = NPY_FLOAT
+ufunc_pdtrik_types[3] = NPY_DOUBLE
+ufunc_pdtrik_types[4] = NPY_DOUBLE
+ufunc_pdtrik_types[5] = NPY_DOUBLE
+ufunc_pdtrik_ptr[2*0] = _func_cdfpoi2_wrap
+ufunc_pdtrik_ptr[2*0+1] = ("pdtrik")
+ufunc_pdtrik_ptr[2*1] = _func_cdfpoi2_wrap
+ufunc_pdtrik_ptr[2*1+1] = ("pdtrik")
+ufunc_pdtrik_data[0] = &ufunc_pdtrik_ptr[2*0]
+ufunc_pdtrik_data[1] = &ufunc_pdtrik_ptr[2*1]
+pdtrik = np.PyUFunc_FromFuncAndData(ufunc_pdtrik_loops, ufunc_pdtrik_data, ufunc_pdtrik_types, 2, 2, 1, 0, "pdtrik", ufunc_pdtrik_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_poch_loops[2]
+cdef void *ufunc_poch_ptr[4]
+cdef void *ufunc_poch_data[2]
+cdef char ufunc_poch_types[6]
+cdef char *ufunc_poch_doc = (
+    "poch(z, m, out=None)\n"
+    "\n"
+    "Pochhammer symbol.\n"
+    "\n"
+    "The Pochhammer symbol (rising factorial) is defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    (z)_m = \\frac{\\Gamma(z + m)}{\\Gamma(z)}\n"
+    "\n"
+    "For positive integer `m` it reads\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    (z)_m = z (z + 1) ... (z + m - 1)\n"
+    "\n"
+    "See [dlmf]_ for more details.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z, m : array_like\n"
+    "    Real-valued arguments.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The value of the function.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] Nist, Digital Library of Mathematical Functions\n"
+    "    https://dlmf.nist.gov/5.2#iii\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is 1 when m is 0.\n"
+    "\n"
+    ">>> sc.poch([1, 2, 3, 4], 0)\n"
+    "array([1., 1., 1., 1.])\n"
+    "\n"
+    "For z equal to 1 it reduces to the factorial function.\n"
+    "\n"
+    ">>> sc.poch(1, 5)\n"
+    "120.0\n"
+    ">>> 1 * 2 * 3 * 4 * 5\n"
+    "120\n"
+    "\n"
+    "It can be expressed in terms of the gamma function.\n"
+    "\n"
+    ">>> z, m = 3.7, 2.1\n"
+    ">>> sc.poch(z, m)\n"
+    "20.529581933776953\n"
+    ">>> sc.gamma(z + m) / sc.gamma(z)\n"
+    "20.52958193377696")
+ufunc_poch_loops[0] = loop_d_dd__As_ff_f
+ufunc_poch_loops[1] = loop_d_dd__As_dd_d
+ufunc_poch_types[0] = NPY_FLOAT
+ufunc_poch_types[1] = NPY_FLOAT
+ufunc_poch_types[2] = NPY_FLOAT
+ufunc_poch_types[3] = NPY_DOUBLE
+ufunc_poch_types[4] = NPY_DOUBLE
+ufunc_poch_types[5] = NPY_DOUBLE
+ufunc_poch_ptr[2*0] = _func_poch
+ufunc_poch_ptr[2*0+1] = ("poch")
+ufunc_poch_ptr[2*1] = _func_poch
+ufunc_poch_ptr[2*1+1] = ("poch")
+ufunc_poch_data[0] = &ufunc_poch_ptr[2*0]
+ufunc_poch_data[1] = &ufunc_poch_ptr[2*1]
+poch = np.PyUFunc_FromFuncAndData(ufunc_poch_loops, ufunc_poch_data, ufunc_poch_types, 2, 2, 1, 0, "poch", ufunc_poch_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_powm1_loops[2]
+cdef void *ufunc_powm1_ptr[4]
+cdef void *ufunc_powm1_data[2]
+cdef char ufunc_powm1_types[6]
+cdef char *ufunc_powm1_doc = (
+    "powm1(x, y, out=None)\n"
+    "\n"
+    "Computes ``x**y - 1``.\n"
+    "\n"
+    "This function is useful when `y` is near 0, or when `x` is near 1.\n"
+    "\n"
+    "The function is implemented for real types only (unlike ``numpy.power``,\n"
+    "which accepts complex inputs).\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    The base. Must be a real type (i.e. integer or float, not complex).\n"
+    "y : array_like\n"
+    "    The exponent. Must be a real type (i.e. integer or float, not complex).\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "array_like\n"
+    "    Result of the calculation\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    ".. versionadded:: 1.10.0\n"
+    "\n"
+    "The underlying code is implemented for single precision and double\n"
+    "precision floats only.  Unlike `numpy.power`, integer inputs to\n"
+    "`powm1` are converted to floating point, and complex inputs are\n"
+    "not accepted.\n"
+    "\n"
+    "Note the following edge cases:\n"
+    "\n"
+    "* ``powm1(x, 0)`` returns 0 for any ``x``, including 0, ``inf``\n"
+    "  and ``nan``.\n"
+    "* ``powm1(1, y)`` returns 0 for any ``y``, including ``nan``\n"
+    "  and ``inf``.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import powm1\n"
+    "\n"
+    ">>> x = np.array([1.2, 10.0, 0.9999999975])\n"
+    ">>> y = np.array([1e-9, 1e-11, 0.1875])\n"
+    ">>> powm1(x, y)\n"
+    "array([ 1.82321557e-10,  2.30258509e-11, -4.68749998e-10])\n"
+    "\n"
+    "It can be verified that the relative errors in those results\n"
+    "are less than 2.5e-16.\n"
+    "\n"
+    "Compare that to the result of ``x**y - 1``, where the\n"
+    "relative errors are all larger than 8e-8:\n"
+    "\n"
+    ">>> x**y - 1\n"
+    "array([ 1.82321491e-10,  2.30258035e-11, -4.68750039e-10])")
+ufunc_powm1_loops[0] = loop_f_ff__As_ff_f
+ufunc_powm1_loops[1] = loop_d_dd__As_dd_d
+ufunc_powm1_types[0] = NPY_FLOAT
+ufunc_powm1_types[1] = NPY_FLOAT
+ufunc_powm1_types[2] = NPY_FLOAT
+ufunc_powm1_types[3] = NPY_DOUBLE
+ufunc_powm1_types[4] = NPY_DOUBLE
+ufunc_powm1_types[5] = NPY_DOUBLE
+ufunc_powm1_ptr[2*0] = scipy.special._ufuncs_cxx._export_powm1_float
+ufunc_powm1_ptr[2*0+1] = ("powm1")
+ufunc_powm1_ptr[2*1] = scipy.special._ufuncs_cxx._export_powm1_double
+ufunc_powm1_ptr[2*1+1] = ("powm1")
+ufunc_powm1_data[0] = &ufunc_powm1_ptr[2*0]
+ufunc_powm1_data[1] = &ufunc_powm1_ptr[2*1]
+powm1 = np.PyUFunc_FromFuncAndData(ufunc_powm1_loops, ufunc_powm1_data, ufunc_powm1_types, 2, 2, 1, 0, "powm1", ufunc_powm1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pro_ang1_loops[2]
+cdef void *ufunc_pro_ang1_ptr[4]
+cdef void *ufunc_pro_ang1_data[2]
+cdef char ufunc_pro_ang1_types[12]
+cdef char *ufunc_pro_ang1_doc = (
+    "pro_ang1(m, n, c, x, out=None)\n"
+    "\n"
+    "Prolate spheroidal angular function of the first kind and its derivative\n"
+    "\n"
+    "Computes the prolate spheroidal angular function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Nonnegative mode parameter m\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "x : array_like\n"
+    "    Real parameter (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pro_ang1_loops[0] = loop_d_dddd_d_As_ffff_ff
+ufunc_pro_ang1_loops[1] = loop_d_dddd_d_As_dddd_dd
+ufunc_pro_ang1_types[0] = NPY_FLOAT
+ufunc_pro_ang1_types[1] = NPY_FLOAT
+ufunc_pro_ang1_types[2] = NPY_FLOAT
+ufunc_pro_ang1_types[3] = NPY_FLOAT
+ufunc_pro_ang1_types[4] = NPY_FLOAT
+ufunc_pro_ang1_types[5] = NPY_FLOAT
+ufunc_pro_ang1_types[6] = NPY_DOUBLE
+ufunc_pro_ang1_types[7] = NPY_DOUBLE
+ufunc_pro_ang1_types[8] = NPY_DOUBLE
+ufunc_pro_ang1_types[9] = NPY_DOUBLE
+ufunc_pro_ang1_types[10] = NPY_DOUBLE
+ufunc_pro_ang1_types[11] = NPY_DOUBLE
+ufunc_pro_ang1_ptr[2*0] = _func_prolate_aswfa_nocv_wrap
+ufunc_pro_ang1_ptr[2*0+1] = ("pro_ang1")
+ufunc_pro_ang1_ptr[2*1] = _func_prolate_aswfa_nocv_wrap
+ufunc_pro_ang1_ptr[2*1+1] = ("pro_ang1")
+ufunc_pro_ang1_data[0] = &ufunc_pro_ang1_ptr[2*0]
+ufunc_pro_ang1_data[1] = &ufunc_pro_ang1_ptr[2*1]
+pro_ang1 = np.PyUFunc_FromFuncAndData(ufunc_pro_ang1_loops, ufunc_pro_ang1_data, ufunc_pro_ang1_types, 2, 4, 2, 0, "pro_ang1", ufunc_pro_ang1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pro_ang1_cv_loops[2]
+cdef void *ufunc_pro_ang1_cv_ptr[4]
+cdef void *ufunc_pro_ang1_cv_data[2]
+cdef char ufunc_pro_ang1_cv_types[14]
+cdef char *ufunc_pro_ang1_cv_doc = (
+    "pro_ang1_cv(m, n, c, cv, x, out=None)\n"
+    "\n"
+    "Prolate spheroidal angular function pro_ang1 for precomputed characteristic value\n"
+    "\n"
+    "Computes the prolate spheroidal angular function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n"
+    "pre-computed characteristic value.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Nonnegative mode parameter m\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "cv : array_like\n"
+    "    Characteristic value\n"
+    "x : array_like\n"
+    "    Real parameter (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pro_ang1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff
+ufunc_pro_ang1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd
+ufunc_pro_ang1_cv_types[0] = NPY_FLOAT
+ufunc_pro_ang1_cv_types[1] = NPY_FLOAT
+ufunc_pro_ang1_cv_types[2] = NPY_FLOAT
+ufunc_pro_ang1_cv_types[3] = NPY_FLOAT
+ufunc_pro_ang1_cv_types[4] = NPY_FLOAT
+ufunc_pro_ang1_cv_types[5] = NPY_FLOAT
+ufunc_pro_ang1_cv_types[6] = NPY_FLOAT
+ufunc_pro_ang1_cv_types[7] = NPY_DOUBLE
+ufunc_pro_ang1_cv_types[8] = NPY_DOUBLE
+ufunc_pro_ang1_cv_types[9] = NPY_DOUBLE
+ufunc_pro_ang1_cv_types[10] = NPY_DOUBLE
+ufunc_pro_ang1_cv_types[11] = NPY_DOUBLE
+ufunc_pro_ang1_cv_types[12] = NPY_DOUBLE
+ufunc_pro_ang1_cv_types[13] = NPY_DOUBLE
+ufunc_pro_ang1_cv_ptr[2*0] = _func_prolate_aswfa_wrap
+ufunc_pro_ang1_cv_ptr[2*0+1] = ("pro_ang1_cv")
+ufunc_pro_ang1_cv_ptr[2*1] = _func_prolate_aswfa_wrap
+ufunc_pro_ang1_cv_ptr[2*1+1] = ("pro_ang1_cv")
+ufunc_pro_ang1_cv_data[0] = &ufunc_pro_ang1_cv_ptr[2*0]
+ufunc_pro_ang1_cv_data[1] = &ufunc_pro_ang1_cv_ptr[2*1]
+pro_ang1_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_ang1_cv_loops, ufunc_pro_ang1_cv_data, ufunc_pro_ang1_cv_types, 2, 5, 2, 0, "pro_ang1_cv", ufunc_pro_ang1_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pro_cv_loops[2]
+cdef void *ufunc_pro_cv_ptr[4]
+cdef void *ufunc_pro_cv_data[2]
+cdef char ufunc_pro_cv_types[8]
+cdef char *ufunc_pro_cv_doc = (
+    "pro_cv(m, n, c, out=None)\n"
+    "\n"
+    "Characteristic value of prolate spheroidal function\n"
+    "\n"
+    "Computes the characteristic value of prolate spheroidal wave\n"
+    "functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Nonnegative mode parameter m\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "cv : scalar or ndarray\n"
+    "    Characteristic value")
+ufunc_pro_cv_loops[0] = loop_d_ddd__As_fff_f
+ufunc_pro_cv_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_pro_cv_types[0] = NPY_FLOAT
+ufunc_pro_cv_types[1] = NPY_FLOAT
+ufunc_pro_cv_types[2] = NPY_FLOAT
+ufunc_pro_cv_types[3] = NPY_FLOAT
+ufunc_pro_cv_types[4] = NPY_DOUBLE
+ufunc_pro_cv_types[5] = NPY_DOUBLE
+ufunc_pro_cv_types[6] = NPY_DOUBLE
+ufunc_pro_cv_types[7] = NPY_DOUBLE
+ufunc_pro_cv_ptr[2*0] = _func_prolate_segv_wrap
+ufunc_pro_cv_ptr[2*0+1] = ("pro_cv")
+ufunc_pro_cv_ptr[2*1] = _func_prolate_segv_wrap
+ufunc_pro_cv_ptr[2*1+1] = ("pro_cv")
+ufunc_pro_cv_data[0] = &ufunc_pro_cv_ptr[2*0]
+ufunc_pro_cv_data[1] = &ufunc_pro_cv_ptr[2*1]
+pro_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_cv_loops, ufunc_pro_cv_data, ufunc_pro_cv_types, 2, 3, 1, 0, "pro_cv", ufunc_pro_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pro_rad1_loops[2]
+cdef void *ufunc_pro_rad1_ptr[4]
+cdef void *ufunc_pro_rad1_data[2]
+cdef char ufunc_pro_rad1_types[12]
+cdef char *ufunc_pro_rad1_doc = (
+    "pro_rad1(m, n, c, x, out=None)\n"
+    "\n"
+    "Prolate spheroidal radial function of the first kind and its derivative\n"
+    "\n"
+    "Computes the prolate spheroidal radial function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Nonnegative mode parameter m\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "x : array_like\n"
+    "    Real parameter (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pro_rad1_loops[0] = loop_d_dddd_d_As_ffff_ff
+ufunc_pro_rad1_loops[1] = loop_d_dddd_d_As_dddd_dd
+ufunc_pro_rad1_types[0] = NPY_FLOAT
+ufunc_pro_rad1_types[1] = NPY_FLOAT
+ufunc_pro_rad1_types[2] = NPY_FLOAT
+ufunc_pro_rad1_types[3] = NPY_FLOAT
+ufunc_pro_rad1_types[4] = NPY_FLOAT
+ufunc_pro_rad1_types[5] = NPY_FLOAT
+ufunc_pro_rad1_types[6] = NPY_DOUBLE
+ufunc_pro_rad1_types[7] = NPY_DOUBLE
+ufunc_pro_rad1_types[8] = NPY_DOUBLE
+ufunc_pro_rad1_types[9] = NPY_DOUBLE
+ufunc_pro_rad1_types[10] = NPY_DOUBLE
+ufunc_pro_rad1_types[11] = NPY_DOUBLE
+ufunc_pro_rad1_ptr[2*0] = _func_prolate_radial1_nocv_wrap
+ufunc_pro_rad1_ptr[2*0+1] = ("pro_rad1")
+ufunc_pro_rad1_ptr[2*1] = _func_prolate_radial1_nocv_wrap
+ufunc_pro_rad1_ptr[2*1+1] = ("pro_rad1")
+ufunc_pro_rad1_data[0] = &ufunc_pro_rad1_ptr[2*0]
+ufunc_pro_rad1_data[1] = &ufunc_pro_rad1_ptr[2*1]
+pro_rad1 = np.PyUFunc_FromFuncAndData(ufunc_pro_rad1_loops, ufunc_pro_rad1_data, ufunc_pro_rad1_types, 2, 4, 2, 0, "pro_rad1", ufunc_pro_rad1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pro_rad1_cv_loops[2]
+cdef void *ufunc_pro_rad1_cv_ptr[4]
+cdef void *ufunc_pro_rad1_cv_data[2]
+cdef char ufunc_pro_rad1_cv_types[14]
+cdef char *ufunc_pro_rad1_cv_doc = (
+    "pro_rad1_cv(m, n, c, cv, x, out=None)\n"
+    "\n"
+    "Prolate spheroidal radial function pro_rad1 for precomputed characteristic value\n"
+    "\n"
+    "Computes the prolate spheroidal radial function of the first kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n"
+    "pre-computed characteristic value.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Nonnegative mode parameter m\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "cv : array_like\n"
+    "    Characteristic value\n"
+    "x : array_like\n"
+    "    Real parameter (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pro_rad1_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff
+ufunc_pro_rad1_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd
+ufunc_pro_rad1_cv_types[0] = NPY_FLOAT
+ufunc_pro_rad1_cv_types[1] = NPY_FLOAT
+ufunc_pro_rad1_cv_types[2] = NPY_FLOAT
+ufunc_pro_rad1_cv_types[3] = NPY_FLOAT
+ufunc_pro_rad1_cv_types[4] = NPY_FLOAT
+ufunc_pro_rad1_cv_types[5] = NPY_FLOAT
+ufunc_pro_rad1_cv_types[6] = NPY_FLOAT
+ufunc_pro_rad1_cv_types[7] = NPY_DOUBLE
+ufunc_pro_rad1_cv_types[8] = NPY_DOUBLE
+ufunc_pro_rad1_cv_types[9] = NPY_DOUBLE
+ufunc_pro_rad1_cv_types[10] = NPY_DOUBLE
+ufunc_pro_rad1_cv_types[11] = NPY_DOUBLE
+ufunc_pro_rad1_cv_types[12] = NPY_DOUBLE
+ufunc_pro_rad1_cv_types[13] = NPY_DOUBLE
+ufunc_pro_rad1_cv_ptr[2*0] = _func_prolate_radial1_wrap
+ufunc_pro_rad1_cv_ptr[2*0+1] = ("pro_rad1_cv")
+ufunc_pro_rad1_cv_ptr[2*1] = _func_prolate_radial1_wrap
+ufunc_pro_rad1_cv_ptr[2*1+1] = ("pro_rad1_cv")
+ufunc_pro_rad1_cv_data[0] = &ufunc_pro_rad1_cv_ptr[2*0]
+ufunc_pro_rad1_cv_data[1] = &ufunc_pro_rad1_cv_ptr[2*1]
+pro_rad1_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_rad1_cv_loops, ufunc_pro_rad1_cv_data, ufunc_pro_rad1_cv_types, 2, 5, 2, 0, "pro_rad1_cv", ufunc_pro_rad1_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pro_rad2_loops[2]
+cdef void *ufunc_pro_rad2_ptr[4]
+cdef void *ufunc_pro_rad2_data[2]
+cdef char ufunc_pro_rad2_types[12]
+cdef char *ufunc_pro_rad2_doc = (
+    "pro_rad2(m, n, c, x, out=None)\n"
+    "\n"
+    "Prolate spheroidal radial function of the second kind and its derivative\n"
+    "\n"
+    "Computes the prolate spheroidal radial function of the second kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Nonnegative mode parameter m\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "cv : array_like\n"
+    "    Characteristic value\n"
+    "x : array_like\n"
+    "    Real parameter (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pro_rad2_loops[0] = loop_d_dddd_d_As_ffff_ff
+ufunc_pro_rad2_loops[1] = loop_d_dddd_d_As_dddd_dd
+ufunc_pro_rad2_types[0] = NPY_FLOAT
+ufunc_pro_rad2_types[1] = NPY_FLOAT
+ufunc_pro_rad2_types[2] = NPY_FLOAT
+ufunc_pro_rad2_types[3] = NPY_FLOAT
+ufunc_pro_rad2_types[4] = NPY_FLOAT
+ufunc_pro_rad2_types[5] = NPY_FLOAT
+ufunc_pro_rad2_types[6] = NPY_DOUBLE
+ufunc_pro_rad2_types[7] = NPY_DOUBLE
+ufunc_pro_rad2_types[8] = NPY_DOUBLE
+ufunc_pro_rad2_types[9] = NPY_DOUBLE
+ufunc_pro_rad2_types[10] = NPY_DOUBLE
+ufunc_pro_rad2_types[11] = NPY_DOUBLE
+ufunc_pro_rad2_ptr[2*0] = _func_prolate_radial2_nocv_wrap
+ufunc_pro_rad2_ptr[2*0+1] = ("pro_rad2")
+ufunc_pro_rad2_ptr[2*1] = _func_prolate_radial2_nocv_wrap
+ufunc_pro_rad2_ptr[2*1+1] = ("pro_rad2")
+ufunc_pro_rad2_data[0] = &ufunc_pro_rad2_ptr[2*0]
+ufunc_pro_rad2_data[1] = &ufunc_pro_rad2_ptr[2*1]
+pro_rad2 = np.PyUFunc_FromFuncAndData(ufunc_pro_rad2_loops, ufunc_pro_rad2_data, ufunc_pro_rad2_types, 2, 4, 2, 0, "pro_rad2", ufunc_pro_rad2_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pro_rad2_cv_loops[2]
+cdef void *ufunc_pro_rad2_cv_ptr[4]
+cdef void *ufunc_pro_rad2_cv_data[2]
+cdef char ufunc_pro_rad2_cv_types[14]
+cdef char *ufunc_pro_rad2_cv_doc = (
+    "pro_rad2_cv(m, n, c, cv, x, out=None)\n"
+    "\n"
+    "Prolate spheroidal radial function pro_rad2 for precomputed characteristic value\n"
+    "\n"
+    "Computes the prolate spheroidal radial function of the second kind\n"
+    "and its derivative (with respect to `x`) for mode parameters m>=0\n"
+    "and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires\n"
+    "pre-computed characteristic value.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Nonnegative mode parameter m\n"
+    "n : array_like\n"
+    "    Mode parameter n (>= m)\n"
+    "c : array_like\n"
+    "    Spheroidal parameter\n"
+    "cv : array_like\n"
+    "    Characteristic value\n"
+    "x : array_like\n"
+    "    Real parameter (``|x| < 1.0``)\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Value of the function\n"
+    "sp : scalar or ndarray\n"
+    "    Value of the derivative vs x")
+ufunc_pro_rad2_cv_loops[0] = loop_i_ddddd_dd_As_fffff_ff
+ufunc_pro_rad2_cv_loops[1] = loop_i_ddddd_dd_As_ddddd_dd
+ufunc_pro_rad2_cv_types[0] = NPY_FLOAT
+ufunc_pro_rad2_cv_types[1] = NPY_FLOAT
+ufunc_pro_rad2_cv_types[2] = NPY_FLOAT
+ufunc_pro_rad2_cv_types[3] = NPY_FLOAT
+ufunc_pro_rad2_cv_types[4] = NPY_FLOAT
+ufunc_pro_rad2_cv_types[5] = NPY_FLOAT
+ufunc_pro_rad2_cv_types[6] = NPY_FLOAT
+ufunc_pro_rad2_cv_types[7] = NPY_DOUBLE
+ufunc_pro_rad2_cv_types[8] = NPY_DOUBLE
+ufunc_pro_rad2_cv_types[9] = NPY_DOUBLE
+ufunc_pro_rad2_cv_types[10] = NPY_DOUBLE
+ufunc_pro_rad2_cv_types[11] = NPY_DOUBLE
+ufunc_pro_rad2_cv_types[12] = NPY_DOUBLE
+ufunc_pro_rad2_cv_types[13] = NPY_DOUBLE
+ufunc_pro_rad2_cv_ptr[2*0] = _func_prolate_radial2_wrap
+ufunc_pro_rad2_cv_ptr[2*0+1] = ("pro_rad2_cv")
+ufunc_pro_rad2_cv_ptr[2*1] = _func_prolate_radial2_wrap
+ufunc_pro_rad2_cv_ptr[2*1+1] = ("pro_rad2_cv")
+ufunc_pro_rad2_cv_data[0] = &ufunc_pro_rad2_cv_ptr[2*0]
+ufunc_pro_rad2_cv_data[1] = &ufunc_pro_rad2_cv_ptr[2*1]
+pro_rad2_cv = np.PyUFunc_FromFuncAndData(ufunc_pro_rad2_cv_loops, ufunc_pro_rad2_cv_data, ufunc_pro_rad2_cv_types, 2, 5, 2, 0, "pro_rad2_cv", ufunc_pro_rad2_cv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_pseudo_huber_loops[2]
+cdef void *ufunc_pseudo_huber_ptr[4]
+cdef void *ufunc_pseudo_huber_data[2]
+cdef char ufunc_pseudo_huber_types[6]
+cdef char *ufunc_pseudo_huber_doc = (
+    "pseudo_huber(delta, r, out=None)\n"
+    "\n"
+    "Pseudo-Huber loss function.\n"
+    "\n"
+    ".. math:: \\mathrm{pseudo\\_huber}(\\delta, r) = \\delta^2 \\left( \\sqrt{ 1 + \\left( \\frac{r}{\\delta} \\right)^2 } - 1 \\right)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "delta : array_like\n"
+    "    Input array, indicating the soft quadratic vs. linear loss changepoint.\n"
+    "r : array_like\n"
+    "    Input array, possibly representing residuals.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "res : scalar or ndarray\n"
+    "    The computed Pseudo-Huber loss function values.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "huber: Similar function which this function approximates\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Like `huber`, `pseudo_huber` often serves as a robust loss function\n"
+    "in statistics or machine learning to reduce the influence of outliers.\n"
+    "Unlike `huber`, `pseudo_huber` is smooth.\n"
+    "\n"
+    "Typically, `r` represents residuals, the difference\n"
+    "between a model prediction and data. Then, for :math:`|r|\\leq\\delta`,\n"
+    "`pseudo_huber` resembles the squared error and for :math:`|r|>\\delta` the\n"
+    "absolute error. This way, the Pseudo-Huber loss often achieves\n"
+    "a fast convergence in model fitting for small residuals like the squared\n"
+    "error loss function and still reduces the influence of outliers\n"
+    "(:math:`|r|>\\delta`) like the absolute error loss. As :math:`\\delta` is\n"
+    "the cutoff between squared and absolute error regimes, it has\n"
+    "to be tuned carefully for each problem. `pseudo_huber` is also\n"
+    "convex, making it suitable for gradient based optimization. [1]_ [2]_\n"
+    "\n"
+    ".. versionadded:: 0.15.0\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Hartley, Zisserman, \"Multiple View Geometry in Computer Vision\".\n"
+    "       2003. Cambridge University Press. p. 619\n"
+    ".. [2] Charbonnier et al. \"Deterministic edge-preserving regularization\n"
+    "       in computed imaging\". 1997. IEEE Trans. Image Processing.\n"
+    "       6 (2): 298 - 311.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Import all necessary modules.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import pseudo_huber, huber\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    "\n"
+    "Calculate the function for ``delta=1`` at ``r=2``.\n"
+    "\n"
+    ">>> pseudo_huber(1., 2.)\n"
+    "1.2360679774997898\n"
+    "\n"
+    "Calculate the function at ``r=2`` for different `delta` by providing\n"
+    "a list or NumPy array for `delta`.\n"
+    "\n"
+    ">>> pseudo_huber([1., 2., 4.], 3.)\n"
+    "array([2.16227766, 3.21110255, 4.        ])\n"
+    "\n"
+    "Calculate the function for ``delta=1`` at several points by providing\n"
+    "a list or NumPy array for `r`.\n"
+    "\n"
+    ">>> pseudo_huber(2., np.array([1., 1.5, 3., 4.]))\n"
+    "array([0.47213595, 1.        , 3.21110255, 4.94427191])\n"
+    "\n"
+    "The function can be calculated for different `delta` and `r` by\n"
+    "providing arrays for both with compatible shapes for broadcasting.\n"
+    "\n"
+    ">>> r = np.array([1., 2.5, 8., 10.])\n"
+    ">>> deltas = np.array([[1.], [5.], [9.]])\n"
+    ">>> print(r.shape, deltas.shape)\n"
+    "(4,) (3, 1)\n"
+    "\n"
+    ">>> pseudo_huber(deltas, r)\n"
+    "array([[ 0.41421356,  1.6925824 ,  7.06225775,  9.04987562],\n"
+    "       [ 0.49509757,  2.95084972, 22.16990566, 30.90169944],\n"
+    "       [ 0.49846624,  3.06693762, 27.37435121, 40.08261642]])\n"
+    "\n"
+    "Plot the function for different `delta`.\n"
+    "\n"
+    ">>> x = np.linspace(-4, 4, 500)\n"
+    ">>> deltas = [1, 2, 3]\n"
+    ">>> linestyles = [\"dashed\", \"dotted\", \"dashdot\"]\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> combined_plot_parameters = list(zip(deltas, linestyles))\n"
+    ">>> for delta, style in combined_plot_parameters:\n"
+    "...     ax.plot(x, pseudo_huber(delta, x), label=f\"$\\delta={delta}$\",\n"
+    "...             ls=style)\n"
+    ">>> ax.legend(loc=\"upper center\")\n"
+    ">>> ax.set_xlabel(\"$x$\")\n"
+    ">>> ax.set_title(\"Pseudo-Huber loss function $h_{\\delta}(x)$\")\n"
+    ">>> ax.set_xlim(-4, 4)\n"
+    ">>> ax.set_ylim(0, 8)\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Finally, illustrate the difference between `huber` and `pseudo_huber` by\n"
+    "plotting them and their gradients with respect to `r`. The plot shows\n"
+    "that `pseudo_huber` is continuously differentiable while `huber` is not\n"
+    "at the points :math:`\\pm\\delta`.\n"
+    "\n"
+    ">>> def huber_grad(delta, x):\n"
+    "...     grad = np.copy(x)\n"
+    "...     linear_area = np.argwhere(np.abs(x) > delta)\n"
+    "...     grad[linear_area]=delta*np.sign(x[linear_area])\n"
+    "...     return grad\n"
+    ">>> def pseudo_huber_grad(delta, x):\n"
+    "...     return x* (1+(x/delta)**2)**(-0.5)\n"
+    ">>> x=np.linspace(-3, 3, 500)\n"
+    ">>> delta = 1.\n"
+    ">>> fig, ax = plt.subplots(figsize=(7, 7))\n"
+    ">>> ax.plot(x, huber(delta, x), label=\"Huber\", ls=\"dashed\")\n"
+    ">>> ax.plot(x, huber_grad(delta, x), label=\"Huber Gradient\", ls=\"dashdot\")\n"
+    ">>> ax.plot(x, pseudo_huber(delta, x), label=\"Pseudo-Huber\", ls=\"dotted\")\n"
+    ">>> ax.plot(x, pseudo_huber_grad(delta, x), label=\"Pseudo-Huber Gradient\",\n"
+    "...         ls=\"solid\")\n"
+    ">>> ax.legend(loc=\"upper center\")\n"
+    ">>> plt.show()")
+ufunc_pseudo_huber_loops[0] = loop_d_dd__As_ff_f
+ufunc_pseudo_huber_loops[1] = loop_d_dd__As_dd_d
+ufunc_pseudo_huber_types[0] = NPY_FLOAT
+ufunc_pseudo_huber_types[1] = NPY_FLOAT
+ufunc_pseudo_huber_types[2] = NPY_FLOAT
+ufunc_pseudo_huber_types[3] = NPY_DOUBLE
+ufunc_pseudo_huber_types[4] = NPY_DOUBLE
+ufunc_pseudo_huber_types[5] = NPY_DOUBLE
+ufunc_pseudo_huber_ptr[2*0] = _func_pseudo_huber
+ufunc_pseudo_huber_ptr[2*0+1] = ("pseudo_huber")
+ufunc_pseudo_huber_ptr[2*1] = _func_pseudo_huber
+ufunc_pseudo_huber_ptr[2*1+1] = ("pseudo_huber")
+ufunc_pseudo_huber_data[0] = &ufunc_pseudo_huber_ptr[2*0]
+ufunc_pseudo_huber_data[1] = &ufunc_pseudo_huber_ptr[2*1]
+pseudo_huber = np.PyUFunc_FromFuncAndData(ufunc_pseudo_huber_loops, ufunc_pseudo_huber_data, ufunc_pseudo_huber_types, 2, 2, 1, 0, "pseudo_huber", ufunc_pseudo_huber_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_psi_loops[4]
+cdef void *ufunc_psi_ptr[8]
+cdef void *ufunc_psi_data[4]
+cdef char ufunc_psi_types[8]
+cdef char *ufunc_psi_doc = (
+    "psi(z, out=None)\n"
+    "\n"
+    "The digamma function.\n"
+    "\n"
+    "The logarithmic derivative of the gamma function evaluated at ``z``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Real or complex argument.\n"
+    "out : ndarray, optional\n"
+    "    Array for the computed values of ``psi``.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "digamma : scalar or ndarray\n"
+    "    Computed values of ``psi``.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For large values not close to the negative real axis, ``psi`` is\n"
+    "computed using the asymptotic series (5.11.2) from [1]_. For small\n"
+    "arguments not close to the negative real axis, the recurrence\n"
+    "relation (5.5.2) from [1]_ is used until the argument is large\n"
+    "enough to use the asymptotic series. For values close to the\n"
+    "negative real axis, the reflection formula (5.5.4) from [1]_ is\n"
+    "used first. Note that ``psi`` has a family of zeros on the\n"
+    "negative real axis which occur between the poles at nonpositive\n"
+    "integers. Around the zeros the reflection formula suffers from\n"
+    "cancellation and the implementation loses precision. The sole\n"
+    "positive zero and the first negative zero, however, are handled\n"
+    "separately by precomputing series expansions using [2]_, so the\n"
+    "function should maintain full accuracy around the origin.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] NIST Digital Library of Mathematical Functions\n"
+    "       https://dlmf.nist.gov/5\n"
+    ".. [2] Fredrik Johansson and others.\n"
+    "       \"mpmath: a Python library for arbitrary-precision floating-point arithmetic\"\n"
+    "       (Version 0.19) http://mpmath.org/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import psi\n"
+    ">>> z = 3 + 4j\n"
+    ">>> psi(z)\n"
+    "(1.55035981733341+1.0105022091860445j)\n"
+    "\n"
+    "Verify psi(z) = psi(z + 1) - 1/z:\n"
+    "\n"
+    ">>> psi(z + 1) - 1/z\n"
+    "(1.55035981733341+1.0105022091860445j)")
+ufunc_psi_loops[0] = loop_d_d__As_f_f
+ufunc_psi_loops[1] = loop_d_d__As_d_d
+ufunc_psi_loops[2] = loop_D_D__As_F_F
+ufunc_psi_loops[3] = loop_D_D__As_D_D
+ufunc_psi_types[0] = NPY_FLOAT
+ufunc_psi_types[1] = NPY_FLOAT
+ufunc_psi_types[2] = NPY_DOUBLE
+ufunc_psi_types[3] = NPY_DOUBLE
+ufunc_psi_types[4] = NPY_CFLOAT
+ufunc_psi_types[5] = NPY_CFLOAT
+ufunc_psi_types[6] = NPY_CDOUBLE
+ufunc_psi_types[7] = NPY_CDOUBLE
+ufunc_psi_ptr[2*0] = _func_digamma
+ufunc_psi_ptr[2*0+1] = ("psi")
+ufunc_psi_ptr[2*1] = _func_digamma
+ufunc_psi_ptr[2*1+1] = ("psi")
+ufunc_psi_ptr[2*2] = _func_cdigamma
+ufunc_psi_ptr[2*2+1] = ("psi")
+ufunc_psi_ptr[2*3] = _func_cdigamma
+ufunc_psi_ptr[2*3+1] = ("psi")
+ufunc_psi_data[0] = &ufunc_psi_ptr[2*0]
+ufunc_psi_data[1] = &ufunc_psi_ptr[2*1]
+ufunc_psi_data[2] = &ufunc_psi_ptr[2*2]
+ufunc_psi_data[3] = &ufunc_psi_ptr[2*3]
+psi = np.PyUFunc_FromFuncAndData(ufunc_psi_loops, ufunc_psi_data, ufunc_psi_types, 4, 1, 1, 0, "psi", ufunc_psi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_radian_loops[2]
+cdef void *ufunc_radian_ptr[4]
+cdef void *ufunc_radian_data[2]
+cdef char ufunc_radian_types[8]
+cdef char *ufunc_radian_doc = (
+    "radian(d, m, s, out=None)\n"
+    "\n"
+    "Convert from degrees to radians.\n"
+    "\n"
+    "Returns the angle given in (d)egrees, (m)inutes, and (s)econds in\n"
+    "radians.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "d : array_like\n"
+    "    Degrees, can be real-valued.\n"
+    "m : array_like\n"
+    "    Minutes, can be real-valued.\n"
+    "s : array_like\n"
+    "    Seconds, can be real-valued.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of the inputs in radians.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "There are many ways to specify an angle.\n"
+    "\n"
+    ">>> sc.radian(90, 0, 0)\n"
+    "1.5707963267948966\n"
+    ">>> sc.radian(0, 60 * 90, 0)\n"
+    "1.5707963267948966\n"
+    ">>> sc.radian(0, 0, 60**2 * 90)\n"
+    "1.5707963267948966\n"
+    "\n"
+    "The inputs can be real-valued.\n"
+    "\n"
+    ">>> sc.radian(1.5, 0, 0)\n"
+    "0.02617993877991494\n"
+    ">>> sc.radian(1, 30, 0)\n"
+    "0.02617993877991494")
+ufunc_radian_loops[0] = loop_d_ddd__As_fff_f
+ufunc_radian_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_radian_types[0] = NPY_FLOAT
+ufunc_radian_types[1] = NPY_FLOAT
+ufunc_radian_types[2] = NPY_FLOAT
+ufunc_radian_types[3] = NPY_FLOAT
+ufunc_radian_types[4] = NPY_DOUBLE
+ufunc_radian_types[5] = NPY_DOUBLE
+ufunc_radian_types[6] = NPY_DOUBLE
+ufunc_radian_types[7] = NPY_DOUBLE
+ufunc_radian_ptr[2*0] = _func_radian
+ufunc_radian_ptr[2*0+1] = ("radian")
+ufunc_radian_ptr[2*1] = _func_radian
+ufunc_radian_ptr[2*1+1] = ("radian")
+ufunc_radian_data[0] = &ufunc_radian_ptr[2*0]
+ufunc_radian_data[1] = &ufunc_radian_ptr[2*1]
+radian = np.PyUFunc_FromFuncAndData(ufunc_radian_loops, ufunc_radian_data, ufunc_radian_types, 2, 3, 1, 0, "radian", ufunc_radian_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_rel_entr_loops[2]
+cdef void *ufunc_rel_entr_ptr[4]
+cdef void *ufunc_rel_entr_data[2]
+cdef char ufunc_rel_entr_types[6]
+cdef char *ufunc_rel_entr_doc = (
+    "rel_entr(x, y, out=None)\n"
+    "\n"
+    "Elementwise function for computing relative entropy.\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\mathrm{rel\\_entr}(x, y) =\n"
+    "        \\begin{cases}\n"
+    "            x \\log(x / y) & x > 0, y > 0 \\\\\n"
+    "            0 & x = 0, y \\ge 0 \\\\\n"
+    "            \\infty & \\text{otherwise}\n"
+    "        \\end{cases}\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, y : array_like\n"
+    "    Input arrays\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Relative entropy of the inputs\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "entr, kl_div, scipy.stats.entropy\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    ".. versionadded:: 0.15.0\n"
+    "\n"
+    "This function is jointly convex in x and y.\n"
+    "\n"
+    "The origin of this function is in convex programming; see\n"
+    "[1]_. Given two discrete probability distributions :math:`p_1,\n"
+    "\\ldots, p_n` and :math:`q_1, \\ldots, q_n`, the definition of relative\n"
+    "entropy in the context of *information theory* is\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\sum_{i = 1}^n \\mathrm{rel\\_entr}(p_i, q_i).\n"
+    "\n"
+    "To compute the latter quantity, use `scipy.stats.entropy`.\n"
+    "\n"
+    "See [2]_ for details.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.\n"
+    "       Cambridge University Press, 2004.\n"
+    "       :doi:`https://doi.org/10.1017/CBO9780511804441`\n"
+    ".. [2] Kullback-Leibler divergence,\n"
+    "       https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence")
+ufunc_rel_entr_loops[0] = loop_d_dd__As_ff_f
+ufunc_rel_entr_loops[1] = loop_d_dd__As_dd_d
+ufunc_rel_entr_types[0] = NPY_FLOAT
+ufunc_rel_entr_types[1] = NPY_FLOAT
+ufunc_rel_entr_types[2] = NPY_FLOAT
+ufunc_rel_entr_types[3] = NPY_DOUBLE
+ufunc_rel_entr_types[4] = NPY_DOUBLE
+ufunc_rel_entr_types[5] = NPY_DOUBLE
+ufunc_rel_entr_ptr[2*0] = _func_rel_entr
+ufunc_rel_entr_ptr[2*0+1] = ("rel_entr")
+ufunc_rel_entr_ptr[2*1] = _func_rel_entr
+ufunc_rel_entr_ptr[2*1+1] = ("rel_entr")
+ufunc_rel_entr_data[0] = &ufunc_rel_entr_ptr[2*0]
+ufunc_rel_entr_data[1] = &ufunc_rel_entr_ptr[2*1]
+rel_entr = np.PyUFunc_FromFuncAndData(ufunc_rel_entr_loops, ufunc_rel_entr_data, ufunc_rel_entr_types, 2, 2, 1, 0, "rel_entr", ufunc_rel_entr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_rgamma_loops[4]
+cdef void *ufunc_rgamma_ptr[8]
+cdef void *ufunc_rgamma_data[4]
+cdef char ufunc_rgamma_types[8]
+cdef char *ufunc_rgamma_doc = (
+    "rgamma(z, out=None)\n"
+    "\n"
+    "Reciprocal of the gamma function.\n"
+    "\n"
+    "Defined as :math:`1 / \\Gamma(z)`, where :math:`\\Gamma` is the\n"
+    "gamma function. For more on the gamma function see `gamma`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Real or complex valued input\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Function results\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "The gamma function has no zeros and has simple poles at\n"
+    "nonpositive integers, so `rgamma` is an entire function with zeros\n"
+    "at the nonpositive integers. See the discussion in [dlmf]_ for\n"
+    "more details.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "gamma, gammaln, loggamma\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] Nist, Digital Library of Mathematical functions,\n"
+    "    https://dlmf.nist.gov/5.2#i\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is the reciprocal of the gamma function.\n"
+    "\n"
+    ">>> sc.rgamma([1, 2, 3, 4])\n"
+    "array([1.        , 1.        , 0.5       , 0.16666667])\n"
+    ">>> 1 / sc.gamma([1, 2, 3, 4])\n"
+    "array([1.        , 1.        , 0.5       , 0.16666667])\n"
+    "\n"
+    "It is zero at nonpositive integers.\n"
+    "\n"
+    ">>> sc.rgamma([0, -1, -2, -3])\n"
+    "array([0., 0., 0., 0.])\n"
+    "\n"
+    "It rapidly underflows to zero along the positive real axis.\n"
+    "\n"
+    ">>> sc.rgamma([10, 100, 179])\n"
+    "array([2.75573192e-006, 1.07151029e-156, 0.00000000e+000])")
+ufunc_rgamma_loops[0] = loop_d_d__As_f_f
+ufunc_rgamma_loops[1] = loop_d_d__As_d_d
+ufunc_rgamma_loops[2] = loop_D_D__As_F_F
+ufunc_rgamma_loops[3] = loop_D_D__As_D_D
+ufunc_rgamma_types[0] = NPY_FLOAT
+ufunc_rgamma_types[1] = NPY_FLOAT
+ufunc_rgamma_types[2] = NPY_DOUBLE
+ufunc_rgamma_types[3] = NPY_DOUBLE
+ufunc_rgamma_types[4] = NPY_CFLOAT
+ufunc_rgamma_types[5] = NPY_CFLOAT
+ufunc_rgamma_types[6] = NPY_CDOUBLE
+ufunc_rgamma_types[7] = NPY_CDOUBLE
+ufunc_rgamma_ptr[2*0] = _func_rgamma
+ufunc_rgamma_ptr[2*0+1] = ("rgamma")
+ufunc_rgamma_ptr[2*1] = _func_rgamma
+ufunc_rgamma_ptr[2*1+1] = ("rgamma")
+ufunc_rgamma_ptr[2*2] = _func_crgamma
+ufunc_rgamma_ptr[2*2+1] = ("rgamma")
+ufunc_rgamma_ptr[2*3] = _func_crgamma
+ufunc_rgamma_ptr[2*3+1] = ("rgamma")
+ufunc_rgamma_data[0] = &ufunc_rgamma_ptr[2*0]
+ufunc_rgamma_data[1] = &ufunc_rgamma_ptr[2*1]
+ufunc_rgamma_data[2] = &ufunc_rgamma_ptr[2*2]
+ufunc_rgamma_data[3] = &ufunc_rgamma_ptr[2*3]
+rgamma = np.PyUFunc_FromFuncAndData(ufunc_rgamma_loops, ufunc_rgamma_data, ufunc_rgamma_types, 4, 1, 1, 0, "rgamma", ufunc_rgamma_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_round_loops[2]
+cdef void *ufunc_round_ptr[4]
+cdef void *ufunc_round_data[2]
+cdef char ufunc_round_types[4]
+cdef char *ufunc_round_doc = (
+    "round(x, out=None)\n"
+    "\n"
+    "Round to the nearest integer.\n"
+    "\n"
+    "Returns the nearest integer to `x`.  If `x` ends in 0.5 exactly,\n"
+    "the nearest even integer is chosen.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real valued input.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The nearest integers to the elements of `x`. The result is of\n"
+    "    floating type, not integer type.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It rounds to even.\n"
+    "\n"
+    ">>> sc.round([0.5, 1.5])\n"
+    "array([0., 2.])")
+ufunc_round_loops[0] = loop_d_d__As_f_f
+ufunc_round_loops[1] = loop_d_d__As_d_d
+ufunc_round_types[0] = NPY_FLOAT
+ufunc_round_types[1] = NPY_FLOAT
+ufunc_round_types[2] = NPY_DOUBLE
+ufunc_round_types[3] = NPY_DOUBLE
+ufunc_round_ptr[2*0] = _func_round
+ufunc_round_ptr[2*0+1] = ("round")
+ufunc_round_ptr[2*1] = _func_round
+ufunc_round_ptr[2*1+1] = ("round")
+ufunc_round_data[0] = &ufunc_round_ptr[2*0]
+ufunc_round_data[1] = &ufunc_round_ptr[2*1]
+round = np.PyUFunc_FromFuncAndData(ufunc_round_loops, ufunc_round_data, ufunc_round_types, 2, 1, 1, 0, "round", ufunc_round_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_shichi_loops[4]
+cdef void *ufunc_shichi_ptr[8]
+cdef void *ufunc_shichi_data[4]
+cdef char ufunc_shichi_types[12]
+cdef char *ufunc_shichi_doc = (
+    "shichi(x, out=None)\n"
+    "\n"
+    "Hyperbolic sine and cosine integrals.\n"
+    "\n"
+    "The hyperbolic sine integral is\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "  \\int_0^x \\frac{\\sinh{t}}{t}dt\n"
+    "\n"
+    "and the hyperbolic cosine integral is\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "  \\gamma + \\log(x) + \\int_0^x \\frac{\\cosh{t} - 1}{t} dt\n"
+    "\n"
+    "where :math:`\\gamma` is Euler's constant and :math:`\\log` is the\n"
+    "principal branch of the logarithm [1]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real or complex points at which to compute the hyperbolic sine\n"
+    "    and cosine integrals.\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "si : scalar or ndarray\n"
+    "    Hyperbolic sine integral at ``x``\n"
+    "ci : scalar or ndarray\n"
+    "    Hyperbolic cosine integral at ``x``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "sici : Sine and cosine integrals.\n"
+    "exp1 : Exponential integral E1.\n"
+    "expi : Exponential integral Ei.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For real arguments with ``x < 0``, ``chi`` is the real part of the\n"
+    "hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x\n"
+    "+ 0j)`` differ by a factor of ``1j*pi``.\n"
+    "\n"
+    "For real arguments the function is computed by calling Cephes'\n"
+    "[2]_ *shichi* routine. For complex arguments the algorithm is based\n"
+    "on Mpmath's [3]_ *shi* and *chi* routines.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    "       (See Section 5.2.)\n"
+    ".. [2] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    ".. [3] Fredrik Johansson and others.\n"
+    "       \"mpmath: a Python library for arbitrary-precision floating-point\n"
+    "       arithmetic\" (Version 0.19) http://mpmath.org/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> from scipy.special import shichi, sici\n"
+    "\n"
+    "`shichi` accepts real or complex input:\n"
+    "\n"
+    ">>> shichi(0.5)\n"
+    "(0.5069967498196671, -0.05277684495649357)\n"
+    ">>> shichi(0.5 + 2.5j)\n"
+    "((0.11772029666668238+1.831091777729851j),\n"
+    " (0.29912435887648825+1.7395351121166562j))\n"
+    "\n"
+    "The hyperbolic sine and cosine integrals Shi(z) and Chi(z) are\n"
+    "related to the sine and cosine integrals Si(z) and Ci(z) by\n"
+    "\n"
+    "* Shi(z) = -i*Si(i*z)\n"
+    "* Chi(z) = Ci(-i*z) + i*pi/2\n"
+    "\n"
+    ">>> z = 0.25 + 5j\n"
+    ">>> shi, chi = shichi(z)\n"
+    ">>> shi, -1j*sici(1j*z)[0]            # Should be the same.\n"
+    "((-0.04834719325101729+1.5469354086921228j),\n"
+    " (-0.04834719325101729+1.5469354086921228j))\n"
+    ">>> chi, sici(-1j*z)[1] + 1j*np.pi/2  # Should be the same.\n"
+    "((-0.19568708973868087+1.556276312103824j),\n"
+    " (-0.19568708973868087+1.556276312103824j))\n"
+    "\n"
+    "Plot the functions evaluated on the real axis:\n"
+    "\n"
+    ">>> xp = np.geomspace(1e-8, 4.0, 250)\n"
+    ">>> x = np.concatenate((-xp[::-1], xp))\n"
+    ">>> shi, chi = shichi(x)\n"
+    "\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> ax.plot(x, shi, label='Shi(x)')\n"
+    ">>> ax.plot(x, chi, '--', label='Chi(x)')\n"
+    ">>> ax.set_xlabel('x')\n"
+    ">>> ax.set_title('Hyperbolic Sine and Cosine Integrals')\n"
+    ">>> ax.legend(shadow=True, framealpha=1, loc='lower right')\n"
+    ">>> ax.grid(True)\n"
+    ">>> plt.show()")
+ufunc_shichi_loops[0] = loop_i_d_dd_As_f_ff
+ufunc_shichi_loops[1] = loop_i_d_dd_As_d_dd
+ufunc_shichi_loops[2] = loop_i_D_DD_As_F_FF
+ufunc_shichi_loops[3] = loop_i_D_DD_As_D_DD
+ufunc_shichi_types[0] = NPY_FLOAT
+ufunc_shichi_types[1] = NPY_FLOAT
+ufunc_shichi_types[2] = NPY_FLOAT
+ufunc_shichi_types[3] = NPY_DOUBLE
+ufunc_shichi_types[4] = NPY_DOUBLE
+ufunc_shichi_types[5] = NPY_DOUBLE
+ufunc_shichi_types[6] = NPY_CFLOAT
+ufunc_shichi_types[7] = NPY_CFLOAT
+ufunc_shichi_types[8] = NPY_CFLOAT
+ufunc_shichi_types[9] = NPY_CDOUBLE
+ufunc_shichi_types[10] = NPY_CDOUBLE
+ufunc_shichi_types[11] = NPY_CDOUBLE
+ufunc_shichi_ptr[2*0] = _func_shichi
+ufunc_shichi_ptr[2*0+1] = ("shichi")
+ufunc_shichi_ptr[2*1] = _func_shichi
+ufunc_shichi_ptr[2*1+1] = ("shichi")
+ufunc_shichi_ptr[2*2] = _func_cshichi
+ufunc_shichi_ptr[2*2+1] = ("shichi")
+ufunc_shichi_ptr[2*3] = _func_cshichi
+ufunc_shichi_ptr[2*3+1] = ("shichi")
+ufunc_shichi_data[0] = &ufunc_shichi_ptr[2*0]
+ufunc_shichi_data[1] = &ufunc_shichi_ptr[2*1]
+ufunc_shichi_data[2] = &ufunc_shichi_ptr[2*2]
+ufunc_shichi_data[3] = &ufunc_shichi_ptr[2*3]
+shichi = np.PyUFunc_FromFuncAndData(ufunc_shichi_loops, ufunc_shichi_data, ufunc_shichi_types, 4, 1, 2, 0, "shichi", ufunc_shichi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_sici_loops[4]
+cdef void *ufunc_sici_ptr[8]
+cdef void *ufunc_sici_data[4]
+cdef char ufunc_sici_types[12]
+cdef char *ufunc_sici_doc = (
+    "sici(x, out=None)\n"
+    "\n"
+    "Sine and cosine integrals.\n"
+    "\n"
+    "The sine integral is\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "  \\int_0^x \\frac{\\sin{t}}{t}dt\n"
+    "\n"
+    "and the cosine integral is\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "  \\gamma + \\log(x) + \\int_0^x \\frac{\\cos{t} - 1}{t}dt\n"
+    "\n"
+    "where :math:`\\gamma` is Euler's constant and :math:`\\log` is the\n"
+    "principal branch of the logarithm [1]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real or complex points at which to compute the sine and cosine\n"
+    "    integrals.\n"
+    "out : tuple of ndarray, optional\n"
+    "    Optional output arrays for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "si : scalar or ndarray\n"
+    "    Sine integral at ``x``\n"
+    "ci : scalar or ndarray\n"
+    "    Cosine integral at ``x``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "shichi : Hyperbolic sine and cosine integrals.\n"
+    "exp1 : Exponential integral E1.\n"
+    "expi : Exponential integral Ei.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For real arguments with ``x < 0``, ``ci`` is the real part of the\n"
+    "cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``\n"
+    "differ by a factor of ``1j*pi``.\n"
+    "\n"
+    "For real arguments the function is computed by calling Cephes'\n"
+    "[2]_ *sici* routine. For complex arguments the algorithm is based\n"
+    "on Mpmath's [3]_ *si* and *ci* routines.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Milton Abramowitz and Irene A. Stegun, eds.\n"
+    "       Handbook of Mathematical Functions with Formulas,\n"
+    "       Graphs, and Mathematical Tables. New York: Dover, 1972.\n"
+    "       (See Section 5.2.)\n"
+    ".. [2] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    ".. [3] Fredrik Johansson and others.\n"
+    "       \"mpmath: a Python library for arbitrary-precision floating-point\n"
+    "       arithmetic\" (Version 0.19) http://mpmath.org/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> from scipy.special import sici, exp1\n"
+    "\n"
+    "`sici` accepts real or complex input:\n"
+    "\n"
+    ">>> sici(2.5)\n"
+    "(1.7785201734438267, 0.2858711963653835)\n"
+    ">>> sici(2.5 + 3j)\n"
+    "((4.505735874563953+0.06863305018999577j),\n"
+    "(0.0793644206906966-2.935510262937543j))\n"
+    "\n"
+    "For z in the right half plane, the sine and cosine integrals are\n"
+    "related to the exponential integral E1 (implemented in SciPy as\n"
+    "`scipy.special.exp1`) by\n"
+    "\n"
+    "* Si(z) = (E1(i*z) - E1(-i*z))/2i + pi/2\n"
+    "* Ci(z) = -(E1(i*z) + E1(-i*z))/2\n"
+    "\n"
+    "See [1]_ (equations 5.2.21 and 5.2.23).\n"
+    "\n"
+    "We can verify these relations:\n"
+    "\n"
+    ">>> z = 2 - 3j\n"
+    ">>> sici(z)\n"
+    "((4.54751388956229-1.3991965806460565j),\n"
+    "(1.408292501520851+2.9836177420296055j))\n"
+    "\n"
+    ">>> (exp1(1j*z) - exp1(-1j*z))/2j + np.pi/2  # Same as sine integral\n"
+    "(4.54751388956229-1.3991965806460565j)\n"
+    "\n"
+    ">>> -(exp1(1j*z) + exp1(-1j*z))/2            # Same as cosine integral\n"
+    "(1.408292501520851+2.9836177420296055j)\n"
+    "\n"
+    "Plot the functions evaluated on the real axis; the dotted horizontal\n"
+    "lines are at pi/2 and -pi/2:\n"
+    "\n"
+    ">>> x = np.linspace(-16, 16, 150)\n"
+    ">>> si, ci = sici(x)\n"
+    "\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> ax.plot(x, si, label='Si(x)')\n"
+    ">>> ax.plot(x, ci, '--', label='Ci(x)')\n"
+    ">>> ax.legend(shadow=True, framealpha=1, loc='upper left')\n"
+    ">>> ax.set_xlabel('x')\n"
+    ">>> ax.set_title('Sine and Cosine Integrals')\n"
+    ">>> ax.axhline(np.pi/2, linestyle=':', alpha=0.5, color='k')\n"
+    ">>> ax.axhline(-np.pi/2, linestyle=':', alpha=0.5, color='k')\n"
+    ">>> ax.grid(True)\n"
+    ">>> plt.show()")
+ufunc_sici_loops[0] = loop_i_d_dd_As_f_ff
+ufunc_sici_loops[1] = loop_i_d_dd_As_d_dd
+ufunc_sici_loops[2] = loop_i_D_DD_As_F_FF
+ufunc_sici_loops[3] = loop_i_D_DD_As_D_DD
+ufunc_sici_types[0] = NPY_FLOAT
+ufunc_sici_types[1] = NPY_FLOAT
+ufunc_sici_types[2] = NPY_FLOAT
+ufunc_sici_types[3] = NPY_DOUBLE
+ufunc_sici_types[4] = NPY_DOUBLE
+ufunc_sici_types[5] = NPY_DOUBLE
+ufunc_sici_types[6] = NPY_CFLOAT
+ufunc_sici_types[7] = NPY_CFLOAT
+ufunc_sici_types[8] = NPY_CFLOAT
+ufunc_sici_types[9] = NPY_CDOUBLE
+ufunc_sici_types[10] = NPY_CDOUBLE
+ufunc_sici_types[11] = NPY_CDOUBLE
+ufunc_sici_ptr[2*0] = _func_sici
+ufunc_sici_ptr[2*0+1] = ("sici")
+ufunc_sici_ptr[2*1] = _func_sici
+ufunc_sici_ptr[2*1+1] = ("sici")
+ufunc_sici_ptr[2*2] = _func_csici
+ufunc_sici_ptr[2*2+1] = ("sici")
+ufunc_sici_ptr[2*3] = _func_csici
+ufunc_sici_ptr[2*3+1] = ("sici")
+ufunc_sici_data[0] = &ufunc_sici_ptr[2*0]
+ufunc_sici_data[1] = &ufunc_sici_ptr[2*1]
+ufunc_sici_data[2] = &ufunc_sici_ptr[2*2]
+ufunc_sici_data[3] = &ufunc_sici_ptr[2*3]
+sici = np.PyUFunc_FromFuncAndData(ufunc_sici_loops, ufunc_sici_data, ufunc_sici_types, 4, 1, 2, 0, "sici", ufunc_sici_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_sindg_loops[2]
+cdef void *ufunc_sindg_ptr[4]
+cdef void *ufunc_sindg_data[2]
+cdef char ufunc_sindg_types[4]
+cdef char *ufunc_sindg_doc = (
+    "sindg(x, out=None)\n"
+    "\n"
+    "Sine of the angle `x` given in degrees.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Angle, given in degrees.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Sine at the input.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "cosdg, tandg, cotdg\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is more accurate than using sine directly.\n"
+    "\n"
+    ">>> x = 180 * np.arange(3)\n"
+    ">>> sc.sindg(x)\n"
+    "array([ 0., -0.,  0.])\n"
+    ">>> np.sin(x * np.pi / 180)\n"
+    "array([ 0.0000000e+00,  1.2246468e-16, -2.4492936e-16])")
+ufunc_sindg_loops[0] = loop_d_d__As_f_f
+ufunc_sindg_loops[1] = loop_d_d__As_d_d
+ufunc_sindg_types[0] = NPY_FLOAT
+ufunc_sindg_types[1] = NPY_FLOAT
+ufunc_sindg_types[2] = NPY_DOUBLE
+ufunc_sindg_types[3] = NPY_DOUBLE
+ufunc_sindg_ptr[2*0] = _func_sindg
+ufunc_sindg_ptr[2*0+1] = ("sindg")
+ufunc_sindg_ptr[2*1] = _func_sindg
+ufunc_sindg_ptr[2*1+1] = ("sindg")
+ufunc_sindg_data[0] = &ufunc_sindg_ptr[2*0]
+ufunc_sindg_data[1] = &ufunc_sindg_ptr[2*1]
+sindg = np.PyUFunc_FromFuncAndData(ufunc_sindg_loops, ufunc_sindg_data, ufunc_sindg_types, 2, 1, 1, 0, "sindg", ufunc_sindg_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_smirnov_loops[3]
+cdef void *ufunc_smirnov_ptr[6]
+cdef void *ufunc_smirnov_data[3]
+cdef char ufunc_smirnov_types[9]
+cdef char *ufunc_smirnov_doc = (
+    "smirnov(n, d, out=None)\n"
+    "\n"
+    "Kolmogorov-Smirnov complementary cumulative distribution function\n"
+    "\n"
+    "Returns the exact Kolmogorov-Smirnov complementary cumulative\n"
+    "distribution function,(aka the Survival Function) of Dn+ (or Dn-)\n"
+    "for a one-sided test of equality between an empirical and a\n"
+    "theoretical distribution. It is equal to the probability that the\n"
+    "maximum difference between a theoretical distribution and an empirical\n"
+    "one based on `n` samples is greater than d.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : int\n"
+    "  Number of samples\n"
+    "d : float array_like\n"
+    "  Deviation between the Empirical CDF (ECDF) and the target CDF.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "smirnovi : The Inverse Survival Function for the distribution\n"
+    "scipy.stats.ksone : Provides the functionality as a continuous distribution\n"
+    "kolmogorov, kolmogi : Functions for the two-sided distribution\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "`smirnov` is used by `stats.kstest` in the application of the\n"
+    "Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this\n"
+    "function is exposed in `scpy.special`, but the recommended way to achieve\n"
+    "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n"
+    "`stats.ksone` distribution.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import smirnov\n"
+    ">>> from scipy.stats import norm\n"
+    "\n"
+    "Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a\n"
+    "sample of size 5.\n"
+    "\n"
+    ">>> smirnov(5, [0, 0.5, 1.0])\n"
+    "array([ 1.   ,  0.056,  0.   ])\n"
+    "\n"
+    "Compare a sample of size 5 against N(0, 1), the standard normal\n"
+    "distribution with mean 0 and standard deviation 1.\n"
+    "\n"
+    "`x` is the sample.\n"
+    "\n"
+    ">>> x = np.array([-1.392, -0.135, 0.114, 0.190, 1.82])\n"
+    "\n"
+    ">>> target = norm(0, 1)\n"
+    ">>> cdfs = target.cdf(x)\n"
+    ">>> cdfs\n"
+    "array([0.0819612 , 0.44630594, 0.5453811 , 0.57534543, 0.9656205 ])\n"
+    "\n"
+    "Construct the empirical CDF and the K-S statistics (Dn+, Dn-, Dn).\n"
+    "\n"
+    ">>> n = len(x)\n"
+    ">>> ecdfs = np.arange(n+1, dtype=float)/n\n"
+    ">>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n],\n"
+    "...                        ecdfs[1:] - cdfs])\n"
+    ">>> with np.printoptions(precision=3):\n"
+    "...    print(cols)\n"
+    "[[-1.392  0.2    0.082  0.082  0.118]\n"
+    " [-0.135  0.4    0.446  0.246 -0.046]\n"
+    " [ 0.114  0.6    0.545  0.145  0.055]\n"
+    " [ 0.19   0.8    0.575 -0.025  0.225]\n"
+    " [ 1.82   1.     0.966  0.166  0.034]]\n"
+    ">>> gaps = cols[:, -2:]\n"
+    ">>> Dnpm = np.max(gaps, axis=0)\n"
+    ">>> print(f'Dn-={Dnpm[0]:f}, Dn+={Dnpm[1]:f}')\n"
+    "Dn-=0.246306, Dn+=0.224655\n"
+    ">>> probs = smirnov(n, Dnpm)\n"
+    ">>> print(f'For a sample of size {n} drawn from N(0, 1):',\n"
+    "...       f' Smirnov n={n}: Prob(Dn- >= {Dnpm[0]:f}) = {probs[0]:.4f}',\n"
+    "...       f' Smirnov n={n}: Prob(Dn+ >= {Dnpm[1]:f}) = {probs[1]:.4f}',\n"
+    "...       sep='\\n')\n"
+    "For a sample of size 5 drawn from N(0, 1):\n"
+    " Smirnov n=5: Prob(Dn- >= 0.246306) = 0.4711\n"
+    " Smirnov n=5: Prob(Dn+ >= 0.224655) = 0.5245\n"
+    "\n"
+    "Plot the empirical CDF and the standard normal CDF.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> plt.step(np.concatenate(([-2.5], x, [2.5])),\n"
+    "...          np.concatenate((ecdfs, [1])),\n"
+    "...          where='post', label='Empirical CDF')\n"
+    ">>> xx = np.linspace(-2.5, 2.5, 100)\n"
+    ">>> plt.plot(xx, target.cdf(xx), '--', label='CDF for N(0, 1)')\n"
+    "\n"
+    "Add vertical lines marking Dn+ and Dn-.\n"
+    "\n"
+    ">>> iminus, iplus = np.argmax(gaps, axis=0)\n"
+    ">>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r',\n"
+    "...            alpha=0.5, lw=4)\n"
+    ">>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m',\n"
+    "...            alpha=0.5, lw=4)\n"
+    "\n"
+    ">>> plt.grid(True)\n"
+    ">>> plt.legend(framealpha=1, shadow=True)\n"
+    ">>> plt.show()")
+ufunc_smirnov_loops[0] = loop_d_id__As_ld_d
+ufunc_smirnov_loops[1] = loop_d_dd__As_ff_f
+ufunc_smirnov_loops[2] = loop_d_dd__As_dd_d
+ufunc_smirnov_types[0] = NPY_LONG
+ufunc_smirnov_types[1] = NPY_DOUBLE
+ufunc_smirnov_types[2] = NPY_DOUBLE
+ufunc_smirnov_types[3] = NPY_FLOAT
+ufunc_smirnov_types[4] = NPY_FLOAT
+ufunc_smirnov_types[5] = NPY_FLOAT
+ufunc_smirnov_types[6] = NPY_DOUBLE
+ufunc_smirnov_types[7] = NPY_DOUBLE
+ufunc_smirnov_types[8] = NPY_DOUBLE
+ufunc_smirnov_ptr[2*0] = _func_smirnov
+ufunc_smirnov_ptr[2*0+1] = ("smirnov")
+ufunc_smirnov_ptr[2*1] = _func_smirnov_unsafe
+ufunc_smirnov_ptr[2*1+1] = ("smirnov")
+ufunc_smirnov_ptr[2*2] = _func_smirnov_unsafe
+ufunc_smirnov_ptr[2*2+1] = ("smirnov")
+ufunc_smirnov_data[0] = &ufunc_smirnov_ptr[2*0]
+ufunc_smirnov_data[1] = &ufunc_smirnov_ptr[2*1]
+ufunc_smirnov_data[2] = &ufunc_smirnov_ptr[2*2]
+smirnov = np.PyUFunc_FromFuncAndData(ufunc_smirnov_loops, ufunc_smirnov_data, ufunc_smirnov_types, 3, 2, 1, 0, "smirnov", ufunc_smirnov_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_smirnovi_loops[3]
+cdef void *ufunc_smirnovi_ptr[6]
+cdef void *ufunc_smirnovi_data[3]
+cdef char ufunc_smirnovi_types[9]
+cdef char *ufunc_smirnovi_doc = (
+    "smirnovi(n, p, out=None)\n"
+    "\n"
+    "Inverse to `smirnov`\n"
+    "\n"
+    "Returns `d` such that ``smirnov(n, d) == p``, the critical value\n"
+    "corresponding to `p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : int\n"
+    "  Number of samples\n"
+    "p : float array_like\n"
+    "    Probability\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The value(s) of smirnovi(n, p), the critical values.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "smirnov : The Survival Function (SF) for the distribution\n"
+    "scipy.stats.ksone : Provides the functionality as a continuous distribution\n"
+    "kolmogorov, kolmogi : Functions for the two-sided distribution\n"
+    "scipy.stats.kstwobign : Two-sided Kolmogorov-Smirnov distribution, large n\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "`smirnov` is used by `stats.kstest` in the application of the\n"
+    "Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this\n"
+    "function is exposed in `scpy.special`, but the recommended way to achieve\n"
+    "the most accurate CDF/SF/PDF/PPF/ISF computations is to use the\n"
+    "`stats.ksone` distribution.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import smirnovi, smirnov\n"
+    "\n"
+    ">>> n = 24\n"
+    ">>> deviations = [0.1, 0.2, 0.3]\n"
+    "\n"
+    "Use `smirnov` to compute the complementary CDF of the Smirnov\n"
+    "distribution for the given number of samples and deviations.\n"
+    "\n"
+    ">>> p = smirnov(n, deviations)\n"
+    ">>> p\n"
+    "array([0.58105083, 0.12826832, 0.01032231])\n"
+    "\n"
+    "The inverse function ``smirnovi(n, p)`` returns ``deviations``.\n"
+    "\n"
+    ">>> smirnovi(n, p)\n"
+    "array([0.1, 0.2, 0.3])")
+ufunc_smirnovi_loops[0] = loop_d_id__As_ld_d
+ufunc_smirnovi_loops[1] = loop_d_dd__As_ff_f
+ufunc_smirnovi_loops[2] = loop_d_dd__As_dd_d
+ufunc_smirnovi_types[0] = NPY_LONG
+ufunc_smirnovi_types[1] = NPY_DOUBLE
+ufunc_smirnovi_types[2] = NPY_DOUBLE
+ufunc_smirnovi_types[3] = NPY_FLOAT
+ufunc_smirnovi_types[4] = NPY_FLOAT
+ufunc_smirnovi_types[5] = NPY_FLOAT
+ufunc_smirnovi_types[6] = NPY_DOUBLE
+ufunc_smirnovi_types[7] = NPY_DOUBLE
+ufunc_smirnovi_types[8] = NPY_DOUBLE
+ufunc_smirnovi_ptr[2*0] = _func_smirnovi
+ufunc_smirnovi_ptr[2*0+1] = ("smirnovi")
+ufunc_smirnovi_ptr[2*1] = _func_smirnovi_unsafe
+ufunc_smirnovi_ptr[2*1+1] = ("smirnovi")
+ufunc_smirnovi_ptr[2*2] = _func_smirnovi_unsafe
+ufunc_smirnovi_ptr[2*2+1] = ("smirnovi")
+ufunc_smirnovi_data[0] = &ufunc_smirnovi_ptr[2*0]
+ufunc_smirnovi_data[1] = &ufunc_smirnovi_ptr[2*1]
+ufunc_smirnovi_data[2] = &ufunc_smirnovi_ptr[2*2]
+smirnovi = np.PyUFunc_FromFuncAndData(ufunc_smirnovi_loops, ufunc_smirnovi_data, ufunc_smirnovi_types, 3, 2, 1, 0, "smirnovi", ufunc_smirnovi_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_spence_loops[4]
+cdef void *ufunc_spence_ptr[8]
+cdef void *ufunc_spence_data[4]
+cdef char ufunc_spence_types[8]
+cdef char *ufunc_spence_doc = (
+    "spence(z, out=None)\n"
+    "\n"
+    "Spence's function, also known as the dilogarithm.\n"
+    "\n"
+    "It is defined to be\n"
+    "\n"
+    ".. math::\n"
+    "  \\int_1^z \\frac{\\log(t)}{1 - t}dt\n"
+    "\n"
+    "for complex :math:`z`, where the contour of integration is taken\n"
+    "to avoid the branch cut of the logarithm. Spence's function is\n"
+    "analytic everywhere except the negative real axis where it has a\n"
+    "branch cut.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Points at which to evaluate Spence's function\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "s : scalar or ndarray\n"
+    "    Computed values of Spence's function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "There is a different convention which defines Spence's function by\n"
+    "the integral\n"
+    "\n"
+    ".. math::\n"
+    "  -\\int_0^z \\frac{\\log(1 - t)}{t}dt;\n"
+    "\n"
+    "this is our ``spence(1 - z)``.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import spence\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    "\n"
+    "The function is defined for complex inputs:\n"
+    "\n"
+    ">>> spence([1-1j, 1.5+2j, 3j, -10-5j])\n"
+    "array([-0.20561676+0.91596559j, -0.86766909-1.39560134j,\n"
+    "       -0.59422064-2.49129918j, -1.14044398+6.80075924j])\n"
+    "\n"
+    "For complex inputs on the branch cut, which is the negative real axis,\n"
+    "the function returns the limit for ``z`` with positive imaginary part.\n"
+    "For example, in the following, note the sign change of the imaginary\n"
+    "part of the output for ``z = -2`` and ``z = -2 - 1e-8j``:\n"
+    "\n"
+    ">>> spence([-2 + 1e-8j, -2, -2 - 1e-8j])\n"
+    "array([2.32018041-3.45139229j, 2.32018042-3.4513923j ,\n"
+    "       2.32018041+3.45139229j])\n"
+    "\n"
+    "The function returns ``nan`` for real inputs on the branch cut:\n"
+    "\n"
+    ">>> spence(-1.5)\n"
+    "nan\n"
+    "\n"
+    "Verify some particular values: ``spence(0) = pi**2/6``,\n"
+    "``spence(1) = 0`` and ``spence(2) = -pi**2/12``.\n"
+    "\n"
+    ">>> spence([0, 1, 2])\n"
+    "array([ 1.64493407,  0.        , -0.82246703])\n"
+    ">>> np.pi**2/6, -np.pi**2/12\n"
+    "(1.6449340668482264, -0.8224670334241132)\n"
+    "\n"
+    "Verify the identity::\n"
+    "\n"
+    "    spence(z) + spence(1 - z) = pi**2/6 - log(z)*log(1 - z)\n"
+    "\n"
+    ">>> z = 3 + 4j\n"
+    ">>> spence(z) + spence(1 - z)\n"
+    "(-2.6523186143876067+1.8853470951513935j)\n"
+    ">>> np.pi**2/6 - np.log(z)*np.log(1 - z)\n"
+    "(-2.652318614387606+1.885347095151394j)\n"
+    "\n"
+    "Plot the function for positive real input.\n"
+    "\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0, 6, 400)\n"
+    ">>> ax.plot(x, spence(x))\n"
+    ">>> ax.grid()\n"
+    ">>> ax.set_xlabel('x')\n"
+    ">>> ax.set_title('spence(x)')\n"
+    ">>> plt.show()")
+ufunc_spence_loops[0] = loop_d_d__As_f_f
+ufunc_spence_loops[1] = loop_d_d__As_d_d
+ufunc_spence_loops[2] = loop_D_D__As_F_F
+ufunc_spence_loops[3] = loop_D_D__As_D_D
+ufunc_spence_types[0] = NPY_FLOAT
+ufunc_spence_types[1] = NPY_FLOAT
+ufunc_spence_types[2] = NPY_DOUBLE
+ufunc_spence_types[3] = NPY_DOUBLE
+ufunc_spence_types[4] = NPY_CFLOAT
+ufunc_spence_types[5] = NPY_CFLOAT
+ufunc_spence_types[6] = NPY_CDOUBLE
+ufunc_spence_types[7] = NPY_CDOUBLE
+ufunc_spence_ptr[2*0] = _func_spence
+ufunc_spence_ptr[2*0+1] = ("spence")
+ufunc_spence_ptr[2*1] = _func_spence
+ufunc_spence_ptr[2*1+1] = ("spence")
+ufunc_spence_ptr[2*2] = _func_cspence
+ufunc_spence_ptr[2*2+1] = ("spence")
+ufunc_spence_ptr[2*3] = _func_cspence
+ufunc_spence_ptr[2*3+1] = ("spence")
+ufunc_spence_data[0] = &ufunc_spence_ptr[2*0]
+ufunc_spence_data[1] = &ufunc_spence_ptr[2*1]
+ufunc_spence_data[2] = &ufunc_spence_ptr[2*2]
+ufunc_spence_data[3] = &ufunc_spence_ptr[2*3]
+spence = np.PyUFunc_FromFuncAndData(ufunc_spence_loops, ufunc_spence_data, ufunc_spence_types, 4, 1, 1, 0, "spence", ufunc_spence_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_sph_harm_loops[3]
+cdef void *ufunc_sph_harm_ptr[6]
+cdef void *ufunc_sph_harm_data[3]
+cdef char ufunc_sph_harm_types[15]
+cdef char *ufunc_sph_harm_doc = (
+    "sph_harm(m, n, theta, phi, out=None)\n"
+    "\n"
+    "Compute spherical harmonics.\n"
+    "\n"
+    "The spherical harmonics are defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    Y^m_n(\\theta,\\phi) = \\sqrt{\\frac{2n+1}{4\\pi} \\frac{(n-m)!}{(n+m)!}}\n"
+    "      e^{i m \\theta} P^m_n(\\cos(\\phi))\n"
+    "\n"
+    "where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "m : array_like\n"
+    "    Order of the harmonic (int); must have ``|m| <= n``.\n"
+    "n : array_like\n"
+    "   Degree of the harmonic (int); must have ``n >= 0``. This is\n"
+    "   often denoted by ``l`` (lower case L) in descriptions of\n"
+    "   spherical harmonics.\n"
+    "theta : array_like\n"
+    "   Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.\n"
+    "phi : array_like\n"
+    "   Polar (colatitudinal) coordinate; must be in ``[0, pi]``.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "y_mn : complex scalar or ndarray\n"
+    "   The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "There are different conventions for the meanings of the input\n"
+    "arguments ``theta`` and ``phi``. In SciPy ``theta`` is the\n"
+    "azimuthal angle and ``phi`` is the polar angle. It is common to\n"
+    "see the opposite convention, that is, ``theta`` as the polar angle\n"
+    "and ``phi`` as the azimuthal angle.\n"
+    "\n"
+    "Note that SciPy's spherical harmonics include the Condon-Shortley\n"
+    "phase [2]_ because it is part of `lpmv`.\n"
+    "\n"
+    "With SciPy's conventions, the first several spherical harmonics\n"
+    "are\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    Y_0^0(\\theta, \\phi) &= \\frac{1}{2} \\sqrt{\\frac{1}{\\pi}} \\\\\n"
+    "    Y_1^{-1}(\\theta, \\phi) &= \\frac{1}{2} \\sqrt{\\frac{3}{2\\pi}}\n"
+    "                                e^{-i\\theta} \\sin(\\phi) \\\\\n"
+    "    Y_1^0(\\theta, \\phi) &= \\frac{1}{2} \\sqrt{\\frac{3}{\\pi}}\n"
+    "                             \\cos(\\phi) \\\\\n"
+    "    Y_1^1(\\theta, \\phi) &= -\\frac{1}{2} \\sqrt{\\frac{3}{2\\pi}}\n"
+    "                             e^{i\\theta} \\sin(\\phi).\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Digital Library of Mathematical Functions, 14.30.\n"
+    "       https://dlmf.nist.gov/14.30\n"
+    ".. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase")
+ufunc_sph_harm_loops[0] = loop_D_iidd__As_lldd_D
+ufunc_sph_harm_loops[1] = loop_D_dddd__As_ffff_F
+ufunc_sph_harm_loops[2] = loop_D_dddd__As_dddd_D
+ufunc_sph_harm_types[0] = NPY_LONG
+ufunc_sph_harm_types[1] = NPY_LONG
+ufunc_sph_harm_types[2] = NPY_DOUBLE
+ufunc_sph_harm_types[3] = NPY_DOUBLE
+ufunc_sph_harm_types[4] = NPY_CDOUBLE
+ufunc_sph_harm_types[5] = NPY_FLOAT
+ufunc_sph_harm_types[6] = NPY_FLOAT
+ufunc_sph_harm_types[7] = NPY_FLOAT
+ufunc_sph_harm_types[8] = NPY_FLOAT
+ufunc_sph_harm_types[9] = NPY_CFLOAT
+ufunc_sph_harm_types[10] = NPY_DOUBLE
+ufunc_sph_harm_types[11] = NPY_DOUBLE
+ufunc_sph_harm_types[12] = NPY_DOUBLE
+ufunc_sph_harm_types[13] = NPY_DOUBLE
+ufunc_sph_harm_types[14] = NPY_CDOUBLE
+ufunc_sph_harm_ptr[2*0] = _func_sph_harmonic
+ufunc_sph_harm_ptr[2*0+1] = ("sph_harm")
+ufunc_sph_harm_ptr[2*1] = _func_sph_harmonic_unsafe
+ufunc_sph_harm_ptr[2*1+1] = ("sph_harm")
+ufunc_sph_harm_ptr[2*2] = _func_sph_harmonic_unsafe
+ufunc_sph_harm_ptr[2*2+1] = ("sph_harm")
+ufunc_sph_harm_data[0] = &ufunc_sph_harm_ptr[2*0]
+ufunc_sph_harm_data[1] = &ufunc_sph_harm_ptr[2*1]
+ufunc_sph_harm_data[2] = &ufunc_sph_harm_ptr[2*2]
+sph_harm = np.PyUFunc_FromFuncAndData(ufunc_sph_harm_loops, ufunc_sph_harm_data, ufunc_sph_harm_types, 3, 4, 1, 0, "sph_harm", ufunc_sph_harm_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_stdtr_loops[2]
+cdef void *ufunc_stdtr_ptr[4]
+cdef void *ufunc_stdtr_data[2]
+cdef char ufunc_stdtr_types[6]
+cdef char *ufunc_stdtr_doc = (
+    "stdtr(df, t, out=None)\n"
+    "\n"
+    "Student t distribution cumulative distribution function\n"
+    "\n"
+    "Returns the integral from minus infinity to t of the Student t\n"
+    "distribution with df > 0 degrees of freedom::\n"
+    "\n"
+    "   gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *\n"
+    "   integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "df : array_like\n"
+    "    Degrees of freedom\n"
+    "t : array_like\n"
+    "    Upper bound of the integral\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the Student t CDF at t\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "stdtridf : inverse of stdtr with respect to `df`\n"
+    "stdtrit : inverse of stdtr with respect to `t`")
+ufunc_stdtr_loops[0] = loop_d_dd__As_ff_f
+ufunc_stdtr_loops[1] = loop_d_dd__As_dd_d
+ufunc_stdtr_types[0] = NPY_FLOAT
+ufunc_stdtr_types[1] = NPY_FLOAT
+ufunc_stdtr_types[2] = NPY_FLOAT
+ufunc_stdtr_types[3] = NPY_DOUBLE
+ufunc_stdtr_types[4] = NPY_DOUBLE
+ufunc_stdtr_types[5] = NPY_DOUBLE
+ufunc_stdtr_ptr[2*0] = _func_cdft1_wrap
+ufunc_stdtr_ptr[2*0+1] = ("stdtr")
+ufunc_stdtr_ptr[2*1] = _func_cdft1_wrap
+ufunc_stdtr_ptr[2*1+1] = ("stdtr")
+ufunc_stdtr_data[0] = &ufunc_stdtr_ptr[2*0]
+ufunc_stdtr_data[1] = &ufunc_stdtr_ptr[2*1]
+stdtr = np.PyUFunc_FromFuncAndData(ufunc_stdtr_loops, ufunc_stdtr_data, ufunc_stdtr_types, 2, 2, 1, 0, "stdtr", ufunc_stdtr_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_stdtridf_loops[2]
+cdef void *ufunc_stdtridf_ptr[4]
+cdef void *ufunc_stdtridf_data[2]
+cdef char ufunc_stdtridf_types[6]
+cdef char *ufunc_stdtridf_doc = (
+    "stdtridf(p, t, out=None)\n"
+    "\n"
+    "Inverse of `stdtr` vs df\n"
+    "\n"
+    "Returns the argument df such that stdtr(df, t) is equal to `p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "p : array_like\n"
+    "    Probability\n"
+    "t : array_like\n"
+    "    Upper bound of the integral\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "df : scalar or ndarray\n"
+    "    Value of `df` such that ``stdtr(df, t) == p``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "stdtr : Student t CDF\n"
+    "stdtrit : inverse of stdtr with respect to `t`")
+ufunc_stdtridf_loops[0] = loop_d_dd__As_ff_f
+ufunc_stdtridf_loops[1] = loop_d_dd__As_dd_d
+ufunc_stdtridf_types[0] = NPY_FLOAT
+ufunc_stdtridf_types[1] = NPY_FLOAT
+ufunc_stdtridf_types[2] = NPY_FLOAT
+ufunc_stdtridf_types[3] = NPY_DOUBLE
+ufunc_stdtridf_types[4] = NPY_DOUBLE
+ufunc_stdtridf_types[5] = NPY_DOUBLE
+ufunc_stdtridf_ptr[2*0] = _func_cdft3_wrap
+ufunc_stdtridf_ptr[2*0+1] = ("stdtridf")
+ufunc_stdtridf_ptr[2*1] = _func_cdft3_wrap
+ufunc_stdtridf_ptr[2*1+1] = ("stdtridf")
+ufunc_stdtridf_data[0] = &ufunc_stdtridf_ptr[2*0]
+ufunc_stdtridf_data[1] = &ufunc_stdtridf_ptr[2*1]
+stdtridf = np.PyUFunc_FromFuncAndData(ufunc_stdtridf_loops, ufunc_stdtridf_data, ufunc_stdtridf_types, 2, 2, 1, 0, "stdtridf", ufunc_stdtridf_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_stdtrit_loops[2]
+cdef void *ufunc_stdtrit_ptr[4]
+cdef void *ufunc_stdtrit_data[2]
+cdef char ufunc_stdtrit_types[6]
+cdef char *ufunc_stdtrit_doc = (
+    "stdtrit(df, p, out=None)\n"
+    "\n"
+    "Inverse of `stdtr` vs `t`\n"
+    "\n"
+    "Returns the argument `t` such that stdtr(df, t) is equal to `p`.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "df : array_like\n"
+    "    Degrees of freedom\n"
+    "p : array_like\n"
+    "    Probability\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "t : scalar or ndarray\n"
+    "    Value of `t` such that ``stdtr(df, t) == p``\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "stdtr : Student t CDF\n"
+    "stdtridf : inverse of stdtr with respect to `df`")
+ufunc_stdtrit_loops[0] = loop_d_dd__As_ff_f
+ufunc_stdtrit_loops[1] = loop_d_dd__As_dd_d
+ufunc_stdtrit_types[0] = NPY_FLOAT
+ufunc_stdtrit_types[1] = NPY_FLOAT
+ufunc_stdtrit_types[2] = NPY_FLOAT
+ufunc_stdtrit_types[3] = NPY_DOUBLE
+ufunc_stdtrit_types[4] = NPY_DOUBLE
+ufunc_stdtrit_types[5] = NPY_DOUBLE
+ufunc_stdtrit_ptr[2*0] = _func_cdft2_wrap
+ufunc_stdtrit_ptr[2*0+1] = ("stdtrit")
+ufunc_stdtrit_ptr[2*1] = _func_cdft2_wrap
+ufunc_stdtrit_ptr[2*1+1] = ("stdtrit")
+ufunc_stdtrit_data[0] = &ufunc_stdtrit_ptr[2*0]
+ufunc_stdtrit_data[1] = &ufunc_stdtrit_ptr[2*1]
+stdtrit = np.PyUFunc_FromFuncAndData(ufunc_stdtrit_loops, ufunc_stdtrit_data, ufunc_stdtrit_types, 2, 2, 1, 0, "stdtrit", ufunc_stdtrit_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_struve_loops[2]
+cdef void *ufunc_struve_ptr[4]
+cdef void *ufunc_struve_data[2]
+cdef char ufunc_struve_types[6]
+cdef char *ufunc_struve_doc = (
+    "struve(v, x, out=None)\n"
+    "\n"
+    "Struve function.\n"
+    "\n"
+    "Return the value of the Struve function of order `v` at `x`.  The Struve\n"
+    "function is defined as,\n"
+    "\n"
+    ".. math::\n"
+    "    H_v(x) = (z/2)^{v + 1} \\sum_{n=0}^\\infty \\frac{(-1)^n (z/2)^{2n}}{\\Gamma(n + \\frac{3}{2}) \\Gamma(n + v + \\frac{3}{2})},\n"
+    "\n"
+    "where :math:`\\Gamma` is the gamma function.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order of the Struve function (float).\n"
+    "x : array_like\n"
+    "    Argument of the Struve function (float; must be positive unless `v` is\n"
+    "    an integer).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "H : scalar or ndarray\n"
+    "    Value of the Struve function of order `v` at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Three methods discussed in [1]_ are used to evaluate the Struve function:\n"
+    "\n"
+    "- power series\n"
+    "- expansion in Bessel functions (if :math:`|z| < |v| + 20`)\n"
+    "- asymptotic large-z expansion (if :math:`z \\geq 0.7v + 12`)\n"
+    "\n"
+    "Rounding errors are estimated based on the largest terms in the sums, and\n"
+    "the result associated with the smallest error is returned.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "modstruve: Modified Struve function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] NIST Digital Library of Mathematical Functions\n"
+    "       https://dlmf.nist.gov/11\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the Struve function of order 1 at 2.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import struve\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> struve(1, 2.)\n"
+    "0.6467637282835622\n"
+    "\n"
+    "Calculate the Struve function at 2 for orders 1, 2 and 3 by providing\n"
+    "a list for the order parameter `v`.\n"
+    "\n"
+    ">>> struve([1, 2, 3], 2.)\n"
+    "array([0.64676373, 0.28031806, 0.08363767])\n"
+    "\n"
+    "Calculate the Struve function of order 1 for several points by providing\n"
+    "an array for `x`.\n"
+    "\n"
+    ">>> points = np.array([2., 5., 8.])\n"
+    ">>> struve(1, points)\n"
+    "array([0.64676373, 0.80781195, 0.48811605])\n"
+    "\n"
+    "Compute the Struve function for several orders at several points by\n"
+    "providing arrays for `v` and `z`. The arrays have to be broadcastable\n"
+    "to the correct shapes.\n"
+    "\n"
+    ">>> orders = np.array([[1], [2], [3]])\n"
+    ">>> points.shape, orders.shape\n"
+    "((3,), (3, 1))\n"
+    "\n"
+    ">>> struve(orders, points)\n"
+    "array([[0.64676373, 0.80781195, 0.48811605],\n"
+    "       [0.28031806, 1.56937455, 1.51769363],\n"
+    "       [0.08363767, 1.50872065, 2.98697513]])\n"
+    "\n"
+    "Plot the Struve functions of order 0 to 3 from -10 to 10.\n"
+    "\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(-10., 10., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, struve(i, x), label=f'$H_{i!r}$')\n"
+    ">>> ax.legend(ncol=2)\n"
+    ">>> ax.set_xlim(-10, 10)\n"
+    ">>> ax.set_title(r\"Struve functions $H_{\\nu}$\")\n"
+    ">>> plt.show()")
+ufunc_struve_loops[0] = loop_d_dd__As_ff_f
+ufunc_struve_loops[1] = loop_d_dd__As_dd_d
+ufunc_struve_types[0] = NPY_FLOAT
+ufunc_struve_types[1] = NPY_FLOAT
+ufunc_struve_types[2] = NPY_FLOAT
+ufunc_struve_types[3] = NPY_DOUBLE
+ufunc_struve_types[4] = NPY_DOUBLE
+ufunc_struve_types[5] = NPY_DOUBLE
+ufunc_struve_ptr[2*0] = _func_struve_h
+ufunc_struve_ptr[2*0+1] = ("struve")
+ufunc_struve_ptr[2*1] = _func_struve_h
+ufunc_struve_ptr[2*1+1] = ("struve")
+ufunc_struve_data[0] = &ufunc_struve_ptr[2*0]
+ufunc_struve_data[1] = &ufunc_struve_ptr[2*1]
+struve = np.PyUFunc_FromFuncAndData(ufunc_struve_loops, ufunc_struve_data, ufunc_struve_types, 2, 2, 1, 0, "struve", ufunc_struve_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_tandg_loops[2]
+cdef void *ufunc_tandg_ptr[4]
+cdef void *ufunc_tandg_data[2]
+cdef char ufunc_tandg_types[4]
+cdef char *ufunc_tandg_doc = (
+    "tandg(x, out=None)\n"
+    "\n"
+    "Tangent of angle `x` given in degrees.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Angle, given in degrees.\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results.\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Tangent at the input.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "sindg, cosdg, cotdg\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> import scipy.special as sc\n"
+    "\n"
+    "It is more accurate than using tangent directly.\n"
+    "\n"
+    ">>> x = 180 * np.arange(3)\n"
+    ">>> sc.tandg(x)\n"
+    "array([0., 0., 0.])\n"
+    ">>> np.tan(x * np.pi / 180)\n"
+    "array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16])")
+ufunc_tandg_loops[0] = loop_d_d__As_f_f
+ufunc_tandg_loops[1] = loop_d_d__As_d_d
+ufunc_tandg_types[0] = NPY_FLOAT
+ufunc_tandg_types[1] = NPY_FLOAT
+ufunc_tandg_types[2] = NPY_DOUBLE
+ufunc_tandg_types[3] = NPY_DOUBLE
+ufunc_tandg_ptr[2*0] = _func_tandg
+ufunc_tandg_ptr[2*0+1] = ("tandg")
+ufunc_tandg_ptr[2*1] = _func_tandg
+ufunc_tandg_ptr[2*1+1] = ("tandg")
+ufunc_tandg_data[0] = &ufunc_tandg_ptr[2*0]
+ufunc_tandg_data[1] = &ufunc_tandg_ptr[2*1]
+tandg = np.PyUFunc_FromFuncAndData(ufunc_tandg_loops, ufunc_tandg_data, ufunc_tandg_types, 2, 1, 1, 0, "tandg", ufunc_tandg_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_tklmbda_loops[2]
+cdef void *ufunc_tklmbda_ptr[4]
+cdef void *ufunc_tklmbda_data[2]
+cdef char ufunc_tklmbda_types[6]
+cdef char *ufunc_tklmbda_doc = (
+    "tklmbda(x, lmbda, out=None)\n"
+    "\n"
+    "Tukey-Lambda cumulative distribution function\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x, lmbda : array_like\n"
+    "    Parameters\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "cdf : scalar or ndarray\n"
+    "    Value of the Tukey-Lambda CDF")
+ufunc_tklmbda_loops[0] = loop_d_dd__As_ff_f
+ufunc_tklmbda_loops[1] = loop_d_dd__As_dd_d
+ufunc_tklmbda_types[0] = NPY_FLOAT
+ufunc_tklmbda_types[1] = NPY_FLOAT
+ufunc_tklmbda_types[2] = NPY_FLOAT
+ufunc_tklmbda_types[3] = NPY_DOUBLE
+ufunc_tklmbda_types[4] = NPY_DOUBLE
+ufunc_tklmbda_types[5] = NPY_DOUBLE
+ufunc_tklmbda_ptr[2*0] = _func_tukeylambdacdf
+ufunc_tklmbda_ptr[2*0+1] = ("tklmbda")
+ufunc_tklmbda_ptr[2*1] = _func_tukeylambdacdf
+ufunc_tklmbda_ptr[2*1+1] = ("tklmbda")
+ufunc_tklmbda_data[0] = &ufunc_tklmbda_ptr[2*0]
+ufunc_tklmbda_data[1] = &ufunc_tklmbda_ptr[2*1]
+tklmbda = np.PyUFunc_FromFuncAndData(ufunc_tklmbda_loops, ufunc_tklmbda_data, ufunc_tklmbda_types, 2, 2, 1, 0, "tklmbda", ufunc_tklmbda_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_voigt_profile_loops[2]
+cdef void *ufunc_voigt_profile_ptr[4]
+cdef void *ufunc_voigt_profile_data[2]
+cdef char ufunc_voigt_profile_types[8]
+cdef char *ufunc_voigt_profile_doc = (
+    "voigt_profile(x, sigma, gamma, out=None)\n"
+    "\n"
+    "Voigt profile.\n"
+    "\n"
+    "The Voigt profile is a convolution of a 1-D Normal distribution with\n"
+    "standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at\n"
+    "half-maximum ``gamma``.\n"
+    "\n"
+    "If ``sigma = 0``, PDF of Cauchy distribution is returned.\n"
+    "Conversely, if ``gamma = 0``, PDF of Normal distribution is returned.\n"
+    "If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``, and ``0`` for all other ``x``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Real argument\n"
+    "sigma : array_like\n"
+    "    The standard deviation of the Normal distribution part\n"
+    "gamma : array_like\n"
+    "    The half-width at half-maximum of the Cauchy distribution part\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    The Voigt profile at the given arguments\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "It can be expressed in terms of Faddeeva function\n"
+    "\n"
+    ".. math:: V(x; \\sigma, \\gamma) = \\frac{Re[w(z)]}{\\sigma\\sqrt{2\\pi}},\n"
+    ".. math:: z = \\frac{x + i\\gamma}{\\sqrt{2}\\sigma}\n"
+    "\n"
+    "where :math:`w(z)` is the Faddeeva function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "wofz : Faddeeva function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] https://en.wikipedia.org/wiki/Voigt_profile\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at point 2 for ``sigma=1`` and ``gamma=1``.\n"
+    "\n"
+    ">>> from scipy.special import voigt_profile\n"
+    ">>> import numpy as np\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> voigt_profile(2, 1., 1.)\n"
+    "0.09071519942627544\n"
+    "\n"
+    "Calculate the function at several points by providing a NumPy array\n"
+    "for `x`.\n"
+    "\n"
+    ">>> values = np.array([-2., 0., 5])\n"
+    ">>> voigt_profile(values, 1., 1.)\n"
+    "array([0.0907152 , 0.20870928, 0.01388492])\n"
+    "\n"
+    "Plot the function for different parameter sets.\n"
+    "\n"
+    ">>> fig, ax = plt.subplots(figsize=(8, 8))\n"
+    ">>> x = np.linspace(-10, 10, 500)\n"
+    ">>> parameters_list = [(1.5, 0., \"solid\"), (1.3, 0.5, \"dashed\"),\n"
+    "...                    (0., 1.8, \"dotted\"), (1., 1., \"dashdot\")]\n"
+    ">>> for params in parameters_list:\n"
+    "...     sigma, gamma, linestyle = params\n"
+    "...     voigt = voigt_profile(x, sigma, gamma)\n"
+    "...     ax.plot(x, voigt, label=rf\"$\\sigma={sigma},\\, \\gamma={gamma}$\",\n"
+    "...             ls=linestyle)\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()\n"
+    "\n"
+    "Verify visually that the Voigt profile indeed arises as the convolution\n"
+    "of a normal and a Cauchy distribution.\n"
+    "\n"
+    ">>> from scipy.signal import convolve\n"
+    ">>> x, dx = np.linspace(-10, 10, 500, retstep=True)\n"
+    ">>> def gaussian(x, sigma):\n"
+    "...     return np.exp(-0.5 * x**2/sigma**2)/(sigma * np.sqrt(2*np.pi))\n"
+    ">>> def cauchy(x, gamma):\n"
+    "...     return gamma/(np.pi * (np.square(x)+gamma**2))\n"
+    ">>> sigma = 2\n"
+    ">>> gamma = 1\n"
+    ">>> gauss_profile = gaussian(x, sigma)\n"
+    ">>> cauchy_profile = cauchy(x, gamma)\n"
+    ">>> convolved = dx * convolve(cauchy_profile, gauss_profile, mode=\"same\")\n"
+    ">>> voigt = voigt_profile(x, sigma, gamma)\n"
+    ">>> fig, ax = plt.subplots(figsize=(8, 8))\n"
+    ">>> ax.plot(x, gauss_profile, label=\"Gauss: $G$\", c='b')\n"
+    ">>> ax.plot(x, cauchy_profile, label=\"Cauchy: $C$\", c='y', ls=\"dashed\")\n"
+    ">>> xx = 0.5*(x[1:] + x[:-1])  # midpoints\n"
+    ">>> ax.plot(xx, convolved[1:], label=\"Convolution: $G * C$\", ls='dashdot',\n"
+    "...         c='k')\n"
+    ">>> ax.plot(x, voigt, label=\"Voigt\", ls='dotted', c='r')\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()")
+ufunc_voigt_profile_loops[0] = loop_d_ddd__As_fff_f
+ufunc_voigt_profile_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_voigt_profile_types[0] = NPY_FLOAT
+ufunc_voigt_profile_types[1] = NPY_FLOAT
+ufunc_voigt_profile_types[2] = NPY_FLOAT
+ufunc_voigt_profile_types[3] = NPY_FLOAT
+ufunc_voigt_profile_types[4] = NPY_DOUBLE
+ufunc_voigt_profile_types[5] = NPY_DOUBLE
+ufunc_voigt_profile_types[6] = NPY_DOUBLE
+ufunc_voigt_profile_types[7] = NPY_DOUBLE
+ufunc_voigt_profile_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile
+ufunc_voigt_profile_ptr[2*0+1] = ("voigt_profile")
+ufunc_voigt_profile_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile
+ufunc_voigt_profile_ptr[2*1+1] = ("voigt_profile")
+ufunc_voigt_profile_data[0] = &ufunc_voigt_profile_ptr[2*0]
+ufunc_voigt_profile_data[1] = &ufunc_voigt_profile_ptr[2*1]
+voigt_profile = np.PyUFunc_FromFuncAndData(ufunc_voigt_profile_loops, ufunc_voigt_profile_data, ufunc_voigt_profile_types, 2, 3, 1, 0, "voigt_profile", ufunc_voigt_profile_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_wofz_loops[2]
+cdef void *ufunc_wofz_ptr[4]
+cdef void *ufunc_wofz_data[2]
+cdef char ufunc_wofz_types[4]
+cdef char *ufunc_wofz_doc = (
+    "wofz(z, out=None)\n"
+    "\n"
+    "Faddeeva function\n"
+    "\n"
+    "Returns the value of the Faddeeva function for complex argument::\n"
+    "\n"
+    "    exp(-z**2) * erfc(-i*z)\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    complex argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the Faddeeva function\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "dawsn, erf, erfc, erfcx, erfi\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Steven G. Johnson, Faddeeva W function implementation.\n"
+    "   http://ab-initio.mit.edu/Faddeeva\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy import special\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    "\n"
+    ">>> x = np.linspace(-3, 3)\n"
+    ">>> z = special.wofz(x)\n"
+    "\n"
+    ">>> plt.plot(x, z.real, label='wofz(x).real')\n"
+    ">>> plt.plot(x, z.imag, label='wofz(x).imag')\n"
+    ">>> plt.xlabel('$x$')\n"
+    ">>> plt.legend(framealpha=1, shadow=True)\n"
+    ">>> plt.grid(alpha=0.25)\n"
+    ">>> plt.show()")
+ufunc_wofz_loops[0] = loop_D_D__As_F_F
+ufunc_wofz_loops[1] = loop_D_D__As_D_D
+ufunc_wofz_types[0] = NPY_CFLOAT
+ufunc_wofz_types[1] = NPY_CFLOAT
+ufunc_wofz_types[2] = NPY_CDOUBLE
+ufunc_wofz_types[3] = NPY_CDOUBLE
+ufunc_wofz_ptr[2*0] = scipy.special._ufuncs_cxx._export_faddeeva_w
+ufunc_wofz_ptr[2*0+1] = ("wofz")
+ufunc_wofz_ptr[2*1] = scipy.special._ufuncs_cxx._export_faddeeva_w
+ufunc_wofz_ptr[2*1+1] = ("wofz")
+ufunc_wofz_data[0] = &ufunc_wofz_ptr[2*0]
+ufunc_wofz_data[1] = &ufunc_wofz_ptr[2*1]
+wofz = np.PyUFunc_FromFuncAndData(ufunc_wofz_loops, ufunc_wofz_data, ufunc_wofz_types, 2, 1, 1, 0, "wofz", ufunc_wofz_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_wright_bessel_loops[2]
+cdef void *ufunc_wright_bessel_ptr[4]
+cdef void *ufunc_wright_bessel_data[2]
+cdef char ufunc_wright_bessel_types[8]
+cdef char *ufunc_wright_bessel_doc = (
+    "wright_bessel(a, b, x, out=None)\n"
+    "\n"
+    "Wright's generalized Bessel function.\n"
+    "\n"
+    "Wright's generalized Bessel function is an entire function and defined as\n"
+    "\n"
+    ".. math:: \\Phi(a, b; x) = \\sum_{k=0}^\\infty \\frac{x^k}{k! \\Gamma(a k + b)}\n"
+    "\n"
+    "See also [1].\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "a : array_like of float\n"
+    "    a >= 0\n"
+    "b : array_like of float\n"
+    "    b >= 0\n"
+    "x : array_like of float\n"
+    "    x >= 0\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Value of the Wright's generalized Bessel function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Due to the compexity of the function with its three parameters, only\n"
+    "non-negative arguments are implemented.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> from scipy.special import wright_bessel\n"
+    ">>> a, b, x = 1.5, 1.1, 2.5\n"
+    ">>> wright_bessel(a, b-1, x)\n"
+    "4.5314465939443025\n"
+    "\n"
+    "Now, let us verify the relation\n"
+    "\n"
+    ".. math:: \\Phi(a, b-1; x) = a x \\Phi(a, b+a; x) + (b-1) \\Phi(a, b; x)\n"
+    "\n"
+    ">>> a * x * wright_bessel(a, b+a, x) + (b-1) * wright_bessel(a, b, x)\n"
+    "4.5314465939443025\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Digital Library of Mathematical Functions, 10.46.\n"
+    "       https://dlmf.nist.gov/10.46.E1")
+ufunc_wright_bessel_loops[0] = loop_d_ddd__As_fff_f
+ufunc_wright_bessel_loops[1] = loop_d_ddd__As_ddd_d
+ufunc_wright_bessel_types[0] = NPY_FLOAT
+ufunc_wright_bessel_types[1] = NPY_FLOAT
+ufunc_wright_bessel_types[2] = NPY_FLOAT
+ufunc_wright_bessel_types[3] = NPY_FLOAT
+ufunc_wright_bessel_types[4] = NPY_DOUBLE
+ufunc_wright_bessel_types[5] = NPY_DOUBLE
+ufunc_wright_bessel_types[6] = NPY_DOUBLE
+ufunc_wright_bessel_types[7] = NPY_DOUBLE
+ufunc_wright_bessel_ptr[2*0] = _func_wright_bessel_scalar
+ufunc_wright_bessel_ptr[2*0+1] = ("wright_bessel")
+ufunc_wright_bessel_ptr[2*1] = _func_wright_bessel_scalar
+ufunc_wright_bessel_ptr[2*1+1] = ("wright_bessel")
+ufunc_wright_bessel_data[0] = &ufunc_wright_bessel_ptr[2*0]
+ufunc_wright_bessel_data[1] = &ufunc_wright_bessel_ptr[2*1]
+wright_bessel = np.PyUFunc_FromFuncAndData(ufunc_wright_bessel_loops, ufunc_wright_bessel_data, ufunc_wright_bessel_types, 2, 3, 1, 0, "wright_bessel", ufunc_wright_bessel_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_wrightomega_loops[4]
+cdef void *ufunc_wrightomega_ptr[8]
+cdef void *ufunc_wrightomega_data[4]
+cdef char ufunc_wrightomega_types[8]
+cdef char *ufunc_wrightomega_doc = (
+    "wrightomega(z, out=None)\n"
+    "\n"
+    "Wright Omega function.\n"
+    "\n"
+    "Defined as the solution to\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\omega + \\log(\\omega) = z\n"
+    "\n"
+    "where :math:`\\log` is the principal branch of the complex logarithm.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "z : array_like\n"
+    "    Points at which to evaluate the Wright Omega function\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function values\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "omega : scalar or ndarray\n"
+    "    Values of the Wright Omega function\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    ".. versionadded:: 0.19.0\n"
+    "\n"
+    "The function can also be defined as\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    \\omega(z) = W_{K(z)}(e^z)\n"
+    "\n"
+    "where :math:`K(z) = \\lceil (\\Im(z) - \\pi)/(2\\pi) \\rceil` is the\n"
+    "unwinding number and :math:`W` is the Lambert W function.\n"
+    "\n"
+    "The implementation here is taken from [1]_.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "lambertw : The Lambert W function\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Lawrence, Corless, and Jeffrey, \"Algorithm 917: Complex\n"
+    "       Double-Precision Evaluation of the Wright :math:`\\omega`\n"
+    "       Function.\" ACM Transactions on Mathematical Software,\n"
+    "       2012. :doi:`10.1145/2168773.2168779`.\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import wrightomega, lambertw\n"
+    "\n"
+    ">>> wrightomega([-2, -1, 0, 1, 2])\n"
+    "array([0.12002824, 0.27846454, 0.56714329, 1.        , 1.5571456 ])\n"
+    "\n"
+    "Complex input:\n"
+    "\n"
+    ">>> wrightomega(3 + 5j)\n"
+    "(1.5804428632097158+3.8213626783287937j)\n"
+    "\n"
+    "Verify that ``wrightomega(z)`` satisfies ``w + log(w) = z``:\n"
+    "\n"
+    ">>> w = -5 + 4j\n"
+    ">>> wrightomega(w + np.log(w))\n"
+    "(-5+4j)\n"
+    "\n"
+    "Verify the connection to ``lambertw``:\n"
+    "\n"
+    ">>> z = 0.5 + 3j\n"
+    ">>> wrightomega(z)\n"
+    "(0.0966015889280649+1.4937828458191993j)\n"
+    ">>> lambertw(np.exp(z))\n"
+    "(0.09660158892806493+1.4937828458191993j)\n"
+    "\n"
+    ">>> z = 0.5 + 4j\n"
+    ">>> wrightomega(z)\n"
+    "(-0.3362123489037213+2.282986001579032j)\n"
+    ">>> lambertw(np.exp(z), k=1)\n"
+    "(-0.33621234890372115+2.282986001579032j)")
+ufunc_wrightomega_loops[0] = loop_d_d__As_f_f
+ufunc_wrightomega_loops[1] = loop_d_d__As_d_d
+ufunc_wrightomega_loops[2] = loop_D_D__As_F_F
+ufunc_wrightomega_loops[3] = loop_D_D__As_D_D
+ufunc_wrightomega_types[0] = NPY_FLOAT
+ufunc_wrightomega_types[1] = NPY_FLOAT
+ufunc_wrightomega_types[2] = NPY_DOUBLE
+ufunc_wrightomega_types[3] = NPY_DOUBLE
+ufunc_wrightomega_types[4] = NPY_CFLOAT
+ufunc_wrightomega_types[5] = NPY_CFLOAT
+ufunc_wrightomega_types[6] = NPY_CDOUBLE
+ufunc_wrightomega_types[7] = NPY_CDOUBLE
+ufunc_wrightomega_ptr[2*0] = scipy.special._ufuncs_cxx._export_wrightomega_real
+ufunc_wrightomega_ptr[2*0+1] = ("wrightomega")
+ufunc_wrightomega_ptr[2*1] = scipy.special._ufuncs_cxx._export_wrightomega_real
+ufunc_wrightomega_ptr[2*1+1] = ("wrightomega")
+ufunc_wrightomega_ptr[2*2] = scipy.special._ufuncs_cxx._export_wrightomega
+ufunc_wrightomega_ptr[2*2+1] = ("wrightomega")
+ufunc_wrightomega_ptr[2*3] = scipy.special._ufuncs_cxx._export_wrightomega
+ufunc_wrightomega_ptr[2*3+1] = ("wrightomega")
+ufunc_wrightomega_data[0] = &ufunc_wrightomega_ptr[2*0]
+ufunc_wrightomega_data[1] = &ufunc_wrightomega_ptr[2*1]
+ufunc_wrightomega_data[2] = &ufunc_wrightomega_ptr[2*2]
+ufunc_wrightomega_data[3] = &ufunc_wrightomega_ptr[2*3]
+wrightomega = np.PyUFunc_FromFuncAndData(ufunc_wrightomega_loops, ufunc_wrightomega_data, ufunc_wrightomega_types, 4, 1, 1, 0, "wrightomega", ufunc_wrightomega_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_xlog1py_loops[4]
+cdef void *ufunc_xlog1py_ptr[8]
+cdef void *ufunc_xlog1py_data[4]
+cdef char ufunc_xlog1py_types[12]
+cdef char *ufunc_xlog1py_doc = (
+    "xlog1py(x, y, out=None)\n"
+    "\n"
+    "Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Multiplier\n"
+    "y : array_like\n"
+    "    Argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "z : scalar or ndarray\n"
+    "    Computed x*log1p(y)\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.13.0")
+ufunc_xlog1py_loops[0] = loop_d_dd__As_ff_f
+ufunc_xlog1py_loops[1] = loop_d_dd__As_dd_d
+ufunc_xlog1py_loops[2] = loop_D_DD__As_FF_F
+ufunc_xlog1py_loops[3] = loop_D_DD__As_DD_D
+ufunc_xlog1py_types[0] = NPY_FLOAT
+ufunc_xlog1py_types[1] = NPY_FLOAT
+ufunc_xlog1py_types[2] = NPY_FLOAT
+ufunc_xlog1py_types[3] = NPY_DOUBLE
+ufunc_xlog1py_types[4] = NPY_DOUBLE
+ufunc_xlog1py_types[5] = NPY_DOUBLE
+ufunc_xlog1py_types[6] = NPY_CFLOAT
+ufunc_xlog1py_types[7] = NPY_CFLOAT
+ufunc_xlog1py_types[8] = NPY_CFLOAT
+ufunc_xlog1py_types[9] = NPY_CDOUBLE
+ufunc_xlog1py_types[10] = NPY_CDOUBLE
+ufunc_xlog1py_types[11] = NPY_CDOUBLE
+ufunc_xlog1py_ptr[2*0] = _func_xlog1py[double]
+ufunc_xlog1py_ptr[2*0+1] = ("xlog1py")
+ufunc_xlog1py_ptr[2*1] = _func_xlog1py[double]
+ufunc_xlog1py_ptr[2*1+1] = ("xlog1py")
+ufunc_xlog1py_ptr[2*2] = _func_xlog1py[double_complex]
+ufunc_xlog1py_ptr[2*2+1] = ("xlog1py")
+ufunc_xlog1py_ptr[2*3] = _func_xlog1py[double_complex]
+ufunc_xlog1py_ptr[2*3+1] = ("xlog1py")
+ufunc_xlog1py_data[0] = &ufunc_xlog1py_ptr[2*0]
+ufunc_xlog1py_data[1] = &ufunc_xlog1py_ptr[2*1]
+ufunc_xlog1py_data[2] = &ufunc_xlog1py_ptr[2*2]
+ufunc_xlog1py_data[3] = &ufunc_xlog1py_ptr[2*3]
+xlog1py = np.PyUFunc_FromFuncAndData(ufunc_xlog1py_loops, ufunc_xlog1py_data, ufunc_xlog1py_types, 4, 2, 1, 0, "xlog1py", ufunc_xlog1py_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_xlogy_loops[4]
+cdef void *ufunc_xlogy_ptr[8]
+cdef void *ufunc_xlogy_data[4]
+cdef char ufunc_xlogy_types[12]
+cdef char *ufunc_xlogy_doc = (
+    "xlogy(x, y, out=None)\n"
+    "\n"
+    "Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Multiplier\n"
+    "y : array_like\n"
+    "    Argument\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "z : scalar or ndarray\n"
+    "    Computed x*log(y)\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    ".. versionadded:: 0.13.0")
+ufunc_xlogy_loops[0] = loop_d_dd__As_ff_f
+ufunc_xlogy_loops[1] = loop_d_dd__As_dd_d
+ufunc_xlogy_loops[2] = loop_D_DD__As_FF_F
+ufunc_xlogy_loops[3] = loop_D_DD__As_DD_D
+ufunc_xlogy_types[0] = NPY_FLOAT
+ufunc_xlogy_types[1] = NPY_FLOAT
+ufunc_xlogy_types[2] = NPY_FLOAT
+ufunc_xlogy_types[3] = NPY_DOUBLE
+ufunc_xlogy_types[4] = NPY_DOUBLE
+ufunc_xlogy_types[5] = NPY_DOUBLE
+ufunc_xlogy_types[6] = NPY_CFLOAT
+ufunc_xlogy_types[7] = NPY_CFLOAT
+ufunc_xlogy_types[8] = NPY_CFLOAT
+ufunc_xlogy_types[9] = NPY_CDOUBLE
+ufunc_xlogy_types[10] = NPY_CDOUBLE
+ufunc_xlogy_types[11] = NPY_CDOUBLE
+ufunc_xlogy_ptr[2*0] = _func_xlogy[double]
+ufunc_xlogy_ptr[2*0+1] = ("xlogy")
+ufunc_xlogy_ptr[2*1] = _func_xlogy[double]
+ufunc_xlogy_ptr[2*1+1] = ("xlogy")
+ufunc_xlogy_ptr[2*2] = _func_xlogy[double_complex]
+ufunc_xlogy_ptr[2*2+1] = ("xlogy")
+ufunc_xlogy_ptr[2*3] = _func_xlogy[double_complex]
+ufunc_xlogy_ptr[2*3+1] = ("xlogy")
+ufunc_xlogy_data[0] = &ufunc_xlogy_ptr[2*0]
+ufunc_xlogy_data[1] = &ufunc_xlogy_ptr[2*1]
+ufunc_xlogy_data[2] = &ufunc_xlogy_ptr[2*2]
+ufunc_xlogy_data[3] = &ufunc_xlogy_ptr[2*3]
+xlogy = np.PyUFunc_FromFuncAndData(ufunc_xlogy_loops, ufunc_xlogy_data, ufunc_xlogy_types, 4, 2, 1, 0, "xlogy", ufunc_xlogy_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_y0_loops[2]
+cdef void *ufunc_y0_ptr[4]
+cdef void *ufunc_y0_data[2]
+cdef char ufunc_y0_types[4]
+cdef char *ufunc_y0_doc = (
+    "y0(x, out=None)\n"
+    "\n"
+    "Bessel function of the second kind of order 0.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Y : scalar or ndarray\n"
+    "    Value of the Bessel function of the second kind of order 0 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    "The domain is divided into the intervals [0, 5] and (5, infinity). In the\n"
+    "first interval a rational approximation :math:`R(x)` is employed to\n"
+    "compute,\n"
+    "\n"
+    ".. math::\n"
+    "\n"
+    "    Y_0(x) = R(x) + \\frac{2 \\log(x) J_0(x)}{\\pi},\n"
+    "\n"
+    "where :math:`J_0` is the Bessel function of the first kind of order 0.\n"
+    "\n"
+    "In the second interval, the Hankel asymptotic expansion is employed with\n"
+    "two rational functions of degree 6/6 and 7/7.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `y0`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "j0: Bessel function of the first kind of order 0\n"
+    "yv: Bessel function of the first kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import y0\n"
+    ">>> y0(1.)\n"
+    "0.08825696421567697\n"
+    "\n"
+    "Calculate at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> y0(np.array([0.5, 2., 3.]))\n"
+    "array([-0.44451873,  0.51037567,  0.37685001])\n"
+    "\n"
+    "Plot the function from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> y = y0(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_y0_loops[0] = loop_d_d__As_f_f
+ufunc_y0_loops[1] = loop_d_d__As_d_d
+ufunc_y0_types[0] = NPY_FLOAT
+ufunc_y0_types[1] = NPY_FLOAT
+ufunc_y0_types[2] = NPY_DOUBLE
+ufunc_y0_types[3] = NPY_DOUBLE
+ufunc_y0_ptr[2*0] = _func_y0
+ufunc_y0_ptr[2*0+1] = ("y0")
+ufunc_y0_ptr[2*1] = _func_y0
+ufunc_y0_ptr[2*1+1] = ("y0")
+ufunc_y0_data[0] = &ufunc_y0_ptr[2*0]
+ufunc_y0_data[1] = &ufunc_y0_ptr[2*1]
+y0 = np.PyUFunc_FromFuncAndData(ufunc_y0_loops, ufunc_y0_data, ufunc_y0_types, 2, 1, 1, 0, "y0", ufunc_y0_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_y1_loops[2]
+cdef void *ufunc_y1_ptr[4]
+cdef void *ufunc_y1_data[2]
+cdef char ufunc_y1_types[4]
+cdef char *ufunc_y1_doc = (
+    "y1(x, out=None)\n"
+    "\n"
+    "Bessel function of the second kind of order 1.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like\n"
+    "    Argument (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Y : scalar or ndarray\n"
+    "    Value of the Bessel function of the second kind of order 1 at `x`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "\n"
+    "The domain is divided into the intervals [0, 8] and (8, infinity). In the\n"
+    "first interval a 25 term Chebyshev expansion is used, and computing\n"
+    ":math:`J_1` (the Bessel function of the first kind) is required. In the\n"
+    "second, the asymptotic trigonometric representation is employed using two\n"
+    "rational functions of degree 5/5.\n"
+    "\n"
+    "This function is a wrapper for the Cephes [1]_ routine `y1`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "j1: Bessel function of the first kind of order 1\n"
+    "yn: Bessel function of the second kind\n"
+    "yv: Bessel function of the second kind\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Calculate the function at one point:\n"
+    "\n"
+    ">>> from scipy.special import y1\n"
+    ">>> y1(1.)\n"
+    "-0.7812128213002888\n"
+    "\n"
+    "Calculate at several points:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> y1(np.array([0.5, 2., 3.]))\n"
+    "array([-1.47147239, -0.10703243,  0.32467442])\n"
+    "\n"
+    "Plot the function from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> y = y1(x)\n"
+    ">>> ax.plot(x, y)\n"
+    ">>> plt.show()")
+ufunc_y1_loops[0] = loop_d_d__As_f_f
+ufunc_y1_loops[1] = loop_d_d__As_d_d
+ufunc_y1_types[0] = NPY_FLOAT
+ufunc_y1_types[1] = NPY_FLOAT
+ufunc_y1_types[2] = NPY_DOUBLE
+ufunc_y1_types[3] = NPY_DOUBLE
+ufunc_y1_ptr[2*0] = _func_y1
+ufunc_y1_ptr[2*0+1] = ("y1")
+ufunc_y1_ptr[2*1] = _func_y1
+ufunc_y1_ptr[2*1+1] = ("y1")
+ufunc_y1_data[0] = &ufunc_y1_ptr[2*0]
+ufunc_y1_data[1] = &ufunc_y1_ptr[2*1]
+y1 = np.PyUFunc_FromFuncAndData(ufunc_y1_loops, ufunc_y1_data, ufunc_y1_types, 2, 1, 1, 0, "y1", ufunc_y1_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_yn_loops[3]
+cdef void *ufunc_yn_ptr[6]
+cdef void *ufunc_yn_data[3]
+cdef char ufunc_yn_types[9]
+cdef char *ufunc_yn_doc = (
+    "yn(n, x, out=None)\n"
+    "\n"
+    "Bessel function of the second kind of integer order and real argument.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "n : array_like\n"
+    "    Order (integer).\n"
+    "x : array_like\n"
+    "    Argument (float).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Y : scalar or ndarray\n"
+    "    Value of the Bessel function, :math:`Y_n(x)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "Wrapper for the Cephes [1]_ routine `yn`.\n"
+    "\n"
+    "The function is evaluated by forward recurrence on `n`, starting with\n"
+    "values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,\n"
+    "the routine for `y0` or `y1` is called directly.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "yv : For real order and real or complex argument.\n"
+    "y0: faster implementation of this function for order 0\n"
+    "y1: faster implementation of this function for order 1\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Cephes Mathematical Functions Library,\n"
+    "       http://www.netlib.org/cephes/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function of order 0 at one point.\n"
+    "\n"
+    ">>> from scipy.special import yn\n"
+    ">>> yn(0, 1.)\n"
+    "0.08825696421567697\n"
+    "\n"
+    "Evaluate the function at one point for different orders.\n"
+    "\n"
+    ">>> yn(0, 1.), yn(1, 1.), yn(2, 1.)\n"
+    "(0.08825696421567697, -0.7812128213002888, -1.6506826068162546)\n"
+    "\n"
+    "The evaluation for different orders can be carried out in one call by\n"
+    "providing a list or NumPy array as argument for the `v` parameter:\n"
+    "\n"
+    ">>> yn([0, 1, 2], 1.)\n"
+    "array([ 0.08825696, -0.78121282, -1.65068261])\n"
+    "\n"
+    "Evaluate the function at several points for order 0 by providing an\n"
+    "array for `z`.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([0.5, 3., 8.])\n"
+    ">>> yn(0, points)\n"
+    "array([-0.44451873,  0.37685001,  0.22352149])\n"
+    "\n"
+    "If `z` is an array, the order parameter `v` must be broadcastable to\n"
+    "the correct shape if different orders shall be computed in one call.\n"
+    "To calculate the orders 0 and 1 for an 1D array:\n"
+    "\n"
+    ">>> orders = np.array([[0], [1]])\n"
+    ">>> orders.shape\n"
+    "(2, 1)\n"
+    "\n"
+    ">>> yn(orders, points)\n"
+    "array([[-0.44451873,  0.37685001,  0.22352149],\n"
+    "       [-1.47147239,  0.32467442, -0.15806046]])\n"
+    "\n"
+    "Plot the functions of order 0 to 3 from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, yn(i, x), label=f'$Y_{i!r}$')\n"
+    ">>> ax.set_ylim(-3, 1)\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()")
+ufunc_yn_loops[0] = loop_d_id__As_ld_d
+ufunc_yn_loops[1] = loop_d_dd__As_ff_f
+ufunc_yn_loops[2] = loop_d_dd__As_dd_d
+ufunc_yn_types[0] = NPY_LONG
+ufunc_yn_types[1] = NPY_DOUBLE
+ufunc_yn_types[2] = NPY_DOUBLE
+ufunc_yn_types[3] = NPY_FLOAT
+ufunc_yn_types[4] = NPY_FLOAT
+ufunc_yn_types[5] = NPY_FLOAT
+ufunc_yn_types[6] = NPY_DOUBLE
+ufunc_yn_types[7] = NPY_DOUBLE
+ufunc_yn_types[8] = NPY_DOUBLE
+ufunc_yn_ptr[2*0] = _func_yn
+ufunc_yn_ptr[2*0+1] = ("yn")
+ufunc_yn_ptr[2*1] = _func_yn_unsafe
+ufunc_yn_ptr[2*1+1] = ("yn")
+ufunc_yn_ptr[2*2] = _func_yn_unsafe
+ufunc_yn_ptr[2*2+1] = ("yn")
+ufunc_yn_data[0] = &ufunc_yn_ptr[2*0]
+ufunc_yn_data[1] = &ufunc_yn_ptr[2*1]
+ufunc_yn_data[2] = &ufunc_yn_ptr[2*2]
+yn = np.PyUFunc_FromFuncAndData(ufunc_yn_loops, ufunc_yn_data, ufunc_yn_types, 3, 2, 1, 0, "yn", ufunc_yn_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_yv_loops[4]
+cdef void *ufunc_yv_ptr[8]
+cdef void *ufunc_yv_data[4]
+cdef char ufunc_yv_types[12]
+cdef char *ufunc_yv_doc = (
+    "yv(v, z, out=None)\n"
+    "\n"
+    "Bessel function of the second kind of real order and complex argument.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Y : scalar or ndarray\n"
+    "    Value of the Bessel function of the second kind, :math:`Y_v(x)`.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For positive `v` values, the computation is carried out using the\n"
+    "AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel\n"
+    "Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,\n"
+    "\n"
+    ".. math:: Y_v(z) = \\frac{1}{2\\imath} (H_v^{(1)} - H_v^{(2)}).\n"
+    "\n"
+    "For negative `v` values the formula,\n"
+    "\n"
+    ".. math:: Y_{-v}(z) = Y_v(z) \\cos(\\pi v) + J_v(z) \\sin(\\pi v)\n"
+    "\n"
+    "is used, where :math:`J_v(z)` is the Bessel function of the first kind,\n"
+    "computed using the AMOS routine `zbesj`.  Note that the second term is\n"
+    "exactly zero for integer `v`; to improve accuracy the second term is\n"
+    "explicitly omitted for `v` values such that `v = floor(v)`.\n"
+    "\n"
+    "See also\n"
+    "--------\n"
+    "yve : :math:`Y_v` with leading exponential behavior stripped off.\n"
+    "y0: faster implementation of this function for order 0\n"
+    "y1: faster implementation of this function for order 1\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Evaluate the function of order 0 at one point.\n"
+    "\n"
+    ">>> from scipy.special import yv\n"
+    ">>> yv(0, 1.)\n"
+    "0.088256964215677\n"
+    "\n"
+    "Evaluate the function at one point for different orders.\n"
+    "\n"
+    ">>> yv(0, 1.), yv(1, 1.), yv(1.5, 1.)\n"
+    "(0.088256964215677, -0.7812128213002889, -1.102495575160179)\n"
+    "\n"
+    "The evaluation for different orders can be carried out in one call by\n"
+    "providing a list or NumPy array as argument for the `v` parameter:\n"
+    "\n"
+    ">>> yv([0, 1, 1.5], 1.)\n"
+    "array([ 0.08825696, -0.78121282, -1.10249558])\n"
+    "\n"
+    "Evaluate the function at several points for order 0 by providing an\n"
+    "array for `z`.\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> points = np.array([0.5, 3., 8.])\n"
+    ">>> yv(0, points)\n"
+    "array([-0.44451873,  0.37685001,  0.22352149])\n"
+    "\n"
+    "If `z` is an array, the order parameter `v` must be broadcastable to\n"
+    "the correct shape if different orders shall be computed in one call.\n"
+    "To calculate the orders 0 and 1 for an 1D array:\n"
+    "\n"
+    ">>> orders = np.array([[0], [1]])\n"
+    ">>> orders.shape\n"
+    "(2, 1)\n"
+    "\n"
+    ">>> yv(orders, points)\n"
+    "array([[-0.44451873,  0.37685001,  0.22352149],\n"
+    "       [-1.47147239,  0.32467442, -0.15806046]])\n"
+    "\n"
+    "Plot the functions of order 0 to 3 from 0 to 10.\n"
+    "\n"
+    ">>> import matplotlib.pyplot as plt\n"
+    ">>> fig, ax = plt.subplots()\n"
+    ">>> x = np.linspace(0., 10., 1000)\n"
+    ">>> for i in range(4):\n"
+    "...     ax.plot(x, yv(i, x), label=f'$Y_{i!r}$')\n"
+    ">>> ax.set_ylim(-3, 1)\n"
+    ">>> ax.legend()\n"
+    ">>> plt.show()")
+ufunc_yv_loops[0] = loop_d_dd__As_ff_f
+ufunc_yv_loops[1] = loop_D_dD__As_fF_F
+ufunc_yv_loops[2] = loop_d_dd__As_dd_d
+ufunc_yv_loops[3] = loop_D_dD__As_dD_D
+ufunc_yv_types[0] = NPY_FLOAT
+ufunc_yv_types[1] = NPY_FLOAT
+ufunc_yv_types[2] = NPY_FLOAT
+ufunc_yv_types[3] = NPY_FLOAT
+ufunc_yv_types[4] = NPY_CFLOAT
+ufunc_yv_types[5] = NPY_CFLOAT
+ufunc_yv_types[6] = NPY_DOUBLE
+ufunc_yv_types[7] = NPY_DOUBLE
+ufunc_yv_types[8] = NPY_DOUBLE
+ufunc_yv_types[9] = NPY_DOUBLE
+ufunc_yv_types[10] = NPY_CDOUBLE
+ufunc_yv_types[11] = NPY_CDOUBLE
+ufunc_yv_ptr[2*0] = _func_cbesy_wrap_real
+ufunc_yv_ptr[2*0+1] = ("yv")
+ufunc_yv_ptr[2*1] = _func_cbesy_wrap
+ufunc_yv_ptr[2*1+1] = ("yv")
+ufunc_yv_ptr[2*2] = _func_cbesy_wrap_real
+ufunc_yv_ptr[2*2+1] = ("yv")
+ufunc_yv_ptr[2*3] = _func_cbesy_wrap
+ufunc_yv_ptr[2*3+1] = ("yv")
+ufunc_yv_data[0] = &ufunc_yv_ptr[2*0]
+ufunc_yv_data[1] = &ufunc_yv_ptr[2*1]
+ufunc_yv_data[2] = &ufunc_yv_ptr[2*2]
+ufunc_yv_data[3] = &ufunc_yv_ptr[2*3]
+yv = np.PyUFunc_FromFuncAndData(ufunc_yv_loops, ufunc_yv_data, ufunc_yv_types, 4, 2, 1, 0, "yv", ufunc_yv_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_yve_loops[4]
+cdef void *ufunc_yve_ptr[8]
+cdef void *ufunc_yve_data[4]
+cdef char ufunc_yve_types[12]
+cdef char *ufunc_yve_doc = (
+    "yve(v, z, out=None)\n"
+    "\n"
+    "Exponentially scaled Bessel function of the second kind of real order.\n"
+    "\n"
+    "Returns the exponentially scaled Bessel function of the second\n"
+    "kind of real order `v` at complex `z`::\n"
+    "\n"
+    "    yve(v, z) = yv(v, z) * exp(-abs(z.imag))\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "v : array_like\n"
+    "    Order (float).\n"
+    "z : array_like\n"
+    "    Argument (float or complex).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "Y : scalar or ndarray\n"
+    "    Value of the exponentially scaled Bessel function.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "yv: Unscaled Bessel function of the second kind of real order.\n"
+    "\n"
+    "Notes\n"
+    "-----\n"
+    "For positive `v` values, the computation is carried out using the\n"
+    "AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel\n"
+    "Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,\n"
+    "\n"
+    ".. math:: Y_v(z) = \\frac{1}{2\\imath} (H_v^{(1)} - H_v^{(2)}).\n"
+    "\n"
+    "For negative `v` values the formula,\n"
+    "\n"
+    ".. math:: Y_{-v}(z) = Y_v(z) \\cos(\\pi v) + J_v(z) \\sin(\\pi v)\n"
+    "\n"
+    "is used, where :math:`J_v(z)` is the Bessel function of the first kind,\n"
+    "computed using the AMOS routine `zbesj`.  Note that the second term is\n"
+    "exactly zero for integer `v`; to improve accuracy the second term is\n"
+    "explicitly omitted for `v` values such that `v = floor(v)`.\n"
+    "\n"
+    "Exponentially scaled Bessel functions are useful for large `z`:\n"
+    "for these, the unscaled Bessel functions can easily under-or overflow.\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [1] Donald E. Amos, \"AMOS, A Portable Package for Bessel Functions\n"
+    "       of a Complex Argument and Nonnegative Order\",\n"
+    "       http://netlib.org/amos/\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    "Compare the output of `yv` and `yve` for large complex arguments for `z`\n"
+    "by computing their values for order ``v=1`` at ``z=1000j``. We see that\n"
+    "`yv` returns nan but `yve` returns a finite number:\n"
+    "\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import yv, yve\n"
+    ">>> v = 1\n"
+    ">>> z = 1000j\n"
+    ">>> yv(v, z), yve(v, z)\n"
+    "((nan+nanj), (-0.012610930256928629+7.721967686709076e-19j))\n"
+    "\n"
+    "For real arguments for `z`, `yve` returns the same as `yv` up to\n"
+    "floating point errors.\n"
+    "\n"
+    ">>> v, z = 1, 1000\n"
+    ">>> yv(v, z), yve(v, z)\n"
+    "(-0.02478433129235178, -0.02478433129235179)\n"
+    "\n"
+    "The function can be evaluated for several orders at the same time by\n"
+    "providing a list or NumPy array for `v`:\n"
+    "\n"
+    ">>> yve([1, 2, 3], 1j)\n"
+    "array([-0.20791042+0.14096627j,  0.38053618-0.04993878j,\n"
+    "       0.00815531-1.66311097j])\n"
+    "\n"
+    "In the same way, the function can be evaluated at several points in one\n"
+    "call by providing a list or NumPy array for `z`:\n"
+    "\n"
+    ">>> yve(1, np.array([1j, 2j, 3j]))\n"
+    "array([-0.20791042+0.14096627j, -0.21526929+0.01205044j,\n"
+    "       -0.19682671+0.00127278j])\n"
+    "\n"
+    "It is also possible to evaluate several orders at several points\n"
+    "at the same time by providing arrays for `v` and `z` with\n"
+    "broadcasting compatible shapes. Compute `yve` for two different orders\n"
+    "`v` and three points `z` resulting in a 2x3 array.\n"
+    "\n"
+    ">>> v = np.array([[1], [2]])\n"
+    ">>> z = np.array([3j, 4j, 5j])\n"
+    ">>> v.shape, z.shape\n"
+    "((2, 1), (3,))\n"
+    "\n"
+    ">>> yve(v, z)\n"
+    "array([[-1.96826713e-01+1.27277544e-03j, -1.78750840e-01+1.45558819e-04j,\n"
+    "        -1.63972267e-01+1.73494110e-05j],\n"
+    "       [1.94960056e-03-1.11782545e-01j,  2.02902325e-04-1.17626501e-01j,\n"
+    "        2.27727687e-05-1.17951906e-01j]])")
+ufunc_yve_loops[0] = loop_d_dd__As_ff_f
+ufunc_yve_loops[1] = loop_D_dD__As_fF_F
+ufunc_yve_loops[2] = loop_d_dd__As_dd_d
+ufunc_yve_loops[3] = loop_D_dD__As_dD_D
+ufunc_yve_types[0] = NPY_FLOAT
+ufunc_yve_types[1] = NPY_FLOAT
+ufunc_yve_types[2] = NPY_FLOAT
+ufunc_yve_types[3] = NPY_FLOAT
+ufunc_yve_types[4] = NPY_CFLOAT
+ufunc_yve_types[5] = NPY_CFLOAT
+ufunc_yve_types[6] = NPY_DOUBLE
+ufunc_yve_types[7] = NPY_DOUBLE
+ufunc_yve_types[8] = NPY_DOUBLE
+ufunc_yve_types[9] = NPY_DOUBLE
+ufunc_yve_types[10] = NPY_CDOUBLE
+ufunc_yve_types[11] = NPY_CDOUBLE
+ufunc_yve_ptr[2*0] = _func_cbesy_wrap_e_real
+ufunc_yve_ptr[2*0+1] = ("yve")
+ufunc_yve_ptr[2*1] = _func_cbesy_wrap_e
+ufunc_yve_ptr[2*1+1] = ("yve")
+ufunc_yve_ptr[2*2] = _func_cbesy_wrap_e_real
+ufunc_yve_ptr[2*2+1] = ("yve")
+ufunc_yve_ptr[2*3] = _func_cbesy_wrap_e
+ufunc_yve_ptr[2*3+1] = ("yve")
+ufunc_yve_data[0] = &ufunc_yve_ptr[2*0]
+ufunc_yve_data[1] = &ufunc_yve_ptr[2*1]
+ufunc_yve_data[2] = &ufunc_yve_ptr[2*2]
+ufunc_yve_data[3] = &ufunc_yve_ptr[2*3]
+yve = np.PyUFunc_FromFuncAndData(ufunc_yve_loops, ufunc_yve_data, ufunc_yve_types, 4, 2, 1, 0, "yve", ufunc_yve_doc, 0)
+
+cdef np.PyUFuncGenericFunction ufunc_zetac_loops[2]
+cdef void *ufunc_zetac_ptr[4]
+cdef void *ufunc_zetac_data[2]
+cdef char ufunc_zetac_types[4]
+cdef char *ufunc_zetac_doc = (
+    "zetac(x, out=None)\n"
+    "\n"
+    "Riemann zeta function minus 1.\n"
+    "\n"
+    "This function is defined as\n"
+    "\n"
+    ".. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,\n"
+    "\n"
+    "where ``x > 1``.  For ``x < 1`` the analytic continuation is\n"
+    "computed. For more information on the Riemann zeta function, see\n"
+    "[dlmf]_.\n"
+    "\n"
+    "Parameters\n"
+    "----------\n"
+    "x : array_like of float\n"
+    "    Values at which to compute zeta(x) - 1 (must be real).\n"
+    "out : ndarray, optional\n"
+    "    Optional output array for the function results\n"
+    "\n"
+    "Returns\n"
+    "-------\n"
+    "scalar or ndarray\n"
+    "    Values of zeta(x) - 1.\n"
+    "\n"
+    "See Also\n"
+    "--------\n"
+    "zeta\n"
+    "\n"
+    "Examples\n"
+    "--------\n"
+    ">>> import numpy as np\n"
+    ">>> from scipy.special import zetac, zeta\n"
+    "\n"
+    "Some special values:\n"
+    "\n"
+    ">>> zetac(2), np.pi**2/6 - 1\n"
+    "(0.64493406684822641, 0.6449340668482264)\n"
+    "\n"
+    ">>> zetac(-1), -1.0/12 - 1\n"
+    "(-1.0833333333333333, -1.0833333333333333)\n"
+    "\n"
+    "Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:\n"
+    "\n"
+    ">>> zetac(60), zeta(60) - 1\n"
+    "(8.673617380119933e-19, 0.0)\n"
+    "\n"
+    "References\n"
+    "----------\n"
+    ".. [dlmf] NIST Digital Library of Mathematical Functions\n"
+    "          https://dlmf.nist.gov/25")
+ufunc_zetac_loops[0] = loop_d_d__As_f_f
+ufunc_zetac_loops[1] = loop_d_d__As_d_d
+ufunc_zetac_types[0] = NPY_FLOAT
+ufunc_zetac_types[1] = NPY_FLOAT
+ufunc_zetac_types[2] = NPY_DOUBLE
+ufunc_zetac_types[3] = NPY_DOUBLE
+ufunc_zetac_ptr[2*0] = _func_zetac
+ufunc_zetac_ptr[2*0+1] = ("zetac")
+ufunc_zetac_ptr[2*1] = _func_zetac
+ufunc_zetac_ptr[2*1+1] = ("zetac")
+ufunc_zetac_data[0] = &ufunc_zetac_ptr[2*0]
+ufunc_zetac_data[1] = &ufunc_zetac_ptr[2*1]
+zetac = np.PyUFunc_FromFuncAndData(ufunc_zetac_loops, ufunc_zetac_data, ufunc_zetac_types, 2, 1, 1, 0, "zetac", ufunc_zetac_doc, 0)
+
+#
+# Aliases
+#
+jn = jv
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pxd b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pxd
new file mode 100644
index 00000000..ff823445
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pxd
@@ -0,0 +1,41 @@
+from . cimport sf_error
+cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) nogil
+cdef void *_export_faddeeva_dawsn
+cdef void *_export_faddeeva_dawsn_complex
+cdef void *_export_fellint_RC
+cdef void *_export_cellint_RC
+cdef void *_export_fellint_RD
+cdef void *_export_cellint_RD
+cdef void *_export_fellint_RF
+cdef void *_export_cellint_RF
+cdef void *_export_fellint_RG
+cdef void *_export_cellint_RG
+cdef void *_export_fellint_RJ
+cdef void *_export_cellint_RJ
+cdef void *_export_faddeeva_erf
+cdef void *_export_faddeeva_erfc_complex
+cdef void *_export_faddeeva_erfcx
+cdef void *_export_faddeeva_erfcx_complex
+cdef void *_export_faddeeva_erfi
+cdef void *_export_faddeeva_erfi_complex
+cdef void *_export_erfinv_float
+cdef void *_export_erfinv_double
+cdef void *_export_expit
+cdef void *_export_expitf
+cdef void *_export_expitl
+cdef void *_export_hyp1f1_double
+cdef void *_export_log_expit
+cdef void *_export_log_expitf
+cdef void *_export_log_expitl
+cdef void *_export_faddeeva_log_ndtr
+cdef void *_export_faddeeva_log_ndtr_complex
+cdef void *_export_logit
+cdef void *_export_logitf
+cdef void *_export_logitl
+cdef void *_export_faddeeva_ndtr
+cdef void *_export_powm1_float
+cdef void *_export_powm1_double
+cdef void *_export_faddeeva_voigt_profile
+cdef void *_export_faddeeva_w
+cdef void *_export_wrightomega
+cdef void *_export_wrightomega_real
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pyx b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pyx
new file mode 100644
index 00000000..09301f95
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx.pyx
@@ -0,0 +1,125 @@
+# This file is automatically generated by _generate_pyx.py.
+# Do not edit manually!
+
+from libc.math cimport NAN
+
+include "_ufuncs_extra_code_common.pxi"
+
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_faddeeva_dawsn "faddeeva_dawsn"(double) nogil
+cdef void *_export_faddeeva_dawsn = _func_faddeeva_dawsn
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_dawsn_complex "faddeeva_dawsn_complex"(double complex) nogil
+cdef void *_export_faddeeva_dawsn_complex = _func_faddeeva_dawsn_complex
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_fellint_RC "fellint_RC"(double, double) nogil
+cdef void *_export_fellint_RC = _func_fellint_RC
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_cellint_RC "cellint_RC"(double complex, double complex) nogil
+cdef void *_export_cellint_RC = _func_cellint_RC
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_fellint_RD "fellint_RD"(double, double, double) nogil
+cdef void *_export_fellint_RD = _func_fellint_RD
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_cellint_RD "cellint_RD"(double complex, double complex, double complex) nogil
+cdef void *_export_cellint_RD = _func_cellint_RD
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_fellint_RF "fellint_RF"(double, double, double) nogil
+cdef void *_export_fellint_RF = _func_fellint_RF
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_cellint_RF "cellint_RF"(double complex, double complex, double complex) nogil
+cdef void *_export_cellint_RF = _func_cellint_RF
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_fellint_RG "fellint_RG"(double, double, double) nogil
+cdef void *_export_fellint_RG = _func_fellint_RG
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_cellint_RG "cellint_RG"(double complex, double complex, double complex) nogil
+cdef void *_export_cellint_RG = _func_cellint_RG
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_fellint_RJ "fellint_RJ"(double, double, double, double) nogil
+cdef void *_export_fellint_RJ = _func_fellint_RJ
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_cellint_RJ "cellint_RJ"(double complex, double complex, double complex, double complex) nogil
+cdef void *_export_cellint_RJ = _func_cellint_RJ
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_erf "faddeeva_erf"(double complex) nogil
+cdef void *_export_faddeeva_erf = _func_faddeeva_erf
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_erfc_complex "faddeeva_erfc_complex"(double complex) nogil
+cdef void *_export_faddeeva_erfc_complex = _func_faddeeva_erfc_complex
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_faddeeva_erfcx "faddeeva_erfcx"(double) nogil
+cdef void *_export_faddeeva_erfcx = _func_faddeeva_erfcx
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_erfcx_complex "faddeeva_erfcx_complex"(double complex) nogil
+cdef void *_export_faddeeva_erfcx_complex = _func_faddeeva_erfcx_complex
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_faddeeva_erfi "faddeeva_erfi"(double) nogil
+cdef void *_export_faddeeva_erfi = _func_faddeeva_erfi
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_erfi_complex "faddeeva_erfi_complex"(double complex) nogil
+cdef void *_export_faddeeva_erfi_complex = _func_faddeeva_erfi_complex
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef float _func_erfinv_float "erfinv_float"(float) nogil
+cdef void *_export_erfinv_float = _func_erfinv_float
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_erfinv_double "erfinv_double"(double) nogil
+cdef void *_export_erfinv_double = _func_erfinv_double
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_expit "expit"(double) nogil
+cdef void *_export_expit = _func_expit
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef float _func_expitf "expitf"(float) nogil
+cdef void *_export_expitf = _func_expitf
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef long double _func_expitl "expitl"(long double) nogil
+cdef void *_export_expitl = _func_expitl
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_hyp1f1_double "hyp1f1_double"(double, double, double) nogil
+cdef void *_export_hyp1f1_double = _func_hyp1f1_double
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_log_expit "log_expit"(double) nogil
+cdef void *_export_log_expit = _func_log_expit
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef float _func_log_expitf "log_expitf"(float) nogil
+cdef void *_export_log_expitf = _func_log_expitf
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef long double _func_log_expitl "log_expitl"(long double) nogil
+cdef void *_export_log_expitl = _func_log_expitl
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_faddeeva_log_ndtr "faddeeva_log_ndtr"(double) nogil
+cdef void *_export_faddeeva_log_ndtr = _func_faddeeva_log_ndtr
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_log_ndtr_complex "faddeeva_log_ndtr_complex"(double complex) nogil
+cdef void *_export_faddeeva_log_ndtr_complex = _func_faddeeva_log_ndtr_complex
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_logit "logit"(double) nogil
+cdef void *_export_logit = _func_logit
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef float _func_logitf "logitf"(float) nogil
+cdef void *_export_logitf = _func_logitf
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef long double _func_logitl "logitl"(long double) nogil
+cdef void *_export_logitl = _func_logitl
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_ndtr "faddeeva_ndtr"(double complex) nogil
+cdef void *_export_faddeeva_ndtr = _func_faddeeva_ndtr
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef float _func_powm1_float "powm1_float"(float, float) nogil
+cdef void *_export_powm1_float = _func_powm1_float
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_powm1_double "powm1_double"(double, double) nogil
+cdef void *_export_powm1_double = _func_powm1_double
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_faddeeva_voigt_profile "faddeeva_voigt_profile"(double, double, double) nogil
+cdef void *_export_faddeeva_voigt_profile = _func_faddeeva_voigt_profile
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_faddeeva_w "faddeeva_w"(double complex) nogil
+cdef void *_export_faddeeva_w = _func_faddeeva_w
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double complex _func_wrightomega "wrightomega"(double complex) nogil
+cdef void *_export_wrightomega = _func_wrightomega
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_cxx_defs.h":
+    cdef double _func_wrightomega_real "wrightomega_real"(double) nogil
+cdef void *_export_wrightomega_real = _func_wrightomega_real
+# distutils: language = c++
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx_defs.h b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx_defs.h
new file mode 100644
index 00000000..0e74a4c9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_cxx_defs.h
@@ -0,0 +1,47 @@
+#ifndef UFUNCS_PROTO_H
+#define UFUNCS_PROTO_H 1
+#include "_faddeeva.h"
+npy_double faddeeva_dawsn(npy_double);
+npy_cdouble faddeeva_dawsn_complex(npy_cdouble);
+#include "ellint_carlson_wrap.hh"
+npy_double fellint_RC(npy_double, npy_double);
+npy_cdouble cellint_RC(npy_cdouble, npy_cdouble);
+npy_double fellint_RD(npy_double, npy_double, npy_double);
+npy_cdouble cellint_RD(npy_cdouble, npy_cdouble, npy_cdouble);
+npy_double fellint_RF(npy_double, npy_double, npy_double);
+npy_cdouble cellint_RF(npy_cdouble, npy_cdouble, npy_cdouble);
+npy_double fellint_RG(npy_double, npy_double, npy_double);
+npy_cdouble cellint_RG(npy_cdouble, npy_cdouble, npy_cdouble);
+npy_double fellint_RJ(npy_double, npy_double, npy_double, npy_double);
+npy_cdouble cellint_RJ(npy_cdouble, npy_cdouble, npy_cdouble, npy_cdouble);
+npy_cdouble faddeeva_erf(npy_cdouble);
+npy_cdouble faddeeva_erfc_complex(npy_cdouble);
+npy_double faddeeva_erfcx(npy_double);
+npy_cdouble faddeeva_erfcx_complex(npy_cdouble);
+npy_double faddeeva_erfi(npy_double);
+npy_cdouble faddeeva_erfi_complex(npy_cdouble);
+#include "boost_special_functions.h"
+npy_float erfinv_float(npy_float);
+npy_double erfinv_double(npy_double);
+#include "_logit.h"
+npy_double expit(npy_double);
+npy_float expitf(npy_float);
+npy_longdouble expitl(npy_longdouble);
+npy_double hyp1f1_double(npy_double, npy_double, npy_double);
+npy_double log_expit(npy_double);
+npy_float log_expitf(npy_float);
+npy_longdouble log_expitl(npy_longdouble);
+npy_double faddeeva_log_ndtr(npy_double);
+npy_cdouble faddeeva_log_ndtr_complex(npy_cdouble);
+npy_double logit(npy_double);
+npy_float logitf(npy_float);
+npy_longdouble logitl(npy_longdouble);
+npy_cdouble faddeeva_ndtr(npy_cdouble);
+npy_float powm1_float(npy_float, npy_float);
+npy_double powm1_double(npy_double, npy_double);
+npy_double faddeeva_voigt_profile(npy_double, npy_double, npy_double);
+npy_cdouble faddeeva_w(npy_cdouble);
+#include "_wright.h"
+npy_cdouble wrightomega(npy_cdouble);
+npy_double wrightomega_real(npy_double);
+#endif
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_defs.h b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_defs.h
new file mode 100644
index 00000000..02fcdaf4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/_ufuncs_defs.h
@@ -0,0 +1,215 @@
+#ifndef UFUNCS_PROTO_H
+#define UFUNCS_PROTO_H 1
+#include "_cosine.h"
+npy_double cosine_cdf(npy_double);
+npy_double cosine_invcdf(npy_double);
+#include "cephes.h"
+npy_double cospi(npy_double);
+npy_double igam_fac(npy_double, npy_double);
+npy_double kolmogc(npy_double);
+npy_double kolmogci(npy_double);
+npy_double kolmogp(npy_double);
+npy_double lanczos_sum_expg_scaled(npy_double);
+npy_double lgam1p(npy_double);
+npy_double log1pmx(npy_double);
+npy_double riemann_zeta(npy_double);
+npy_double sinpi(npy_double);
+npy_double smirnovc(npy_int, npy_double);
+npy_double smirnovci(npy_int, npy_double);
+npy_double smirnovp(npy_int, npy_double);
+npy_double struve_asymp_large_z(npy_double, npy_double, npy_int, npy_double *);
+npy_double struve_bessel_series(npy_double, npy_double, npy_int, npy_double *);
+npy_double struve_power_series(npy_double, npy_double, npy_int, npy_double *);
+npy_double zeta(npy_double, npy_double);
+#include "amos_wrappers.h"
+npy_int airy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
+npy_int cairy_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *);
+npy_int cairy_wrap_e(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *);
+npy_int cairy_wrap_e_real(npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
+npy_double bdtr(npy_double, npy_int, npy_double);
+npy_double bdtrc(npy_double, npy_int, npy_double);
+npy_double bdtri(npy_double, npy_int, npy_double);
+#include "cdf_wrappers.h"
+npy_double cdfbin2_wrap(npy_double, npy_double, npy_double);
+npy_double cdfbin3_wrap(npy_double, npy_double, npy_double);
+#include "specfun_wrappers.h"
+npy_double bei_wrap(npy_double);
+npy_double beip_wrap(npy_double);
+npy_double ber_wrap(npy_double);
+npy_double berp_wrap(npy_double);
+npy_double besselpoly(npy_double, npy_double, npy_double);
+npy_double beta(npy_double, npy_double);
+npy_double incbet(npy_double, npy_double, npy_double);
+npy_double incbi(npy_double, npy_double, npy_double);
+npy_double lbeta(npy_double, npy_double);
+npy_double btdtr(npy_double, npy_double, npy_double);
+npy_double cdfbet3_wrap(npy_double, npy_double, npy_double);
+npy_double cdfbet4_wrap(npy_double, npy_double, npy_double);
+npy_double cbrt(npy_double);
+npy_double chdtr(npy_double, npy_double);
+npy_double chdtrc(npy_double, npy_double);
+npy_double chdtri(npy_double, npy_double);
+npy_double cdfchi3_wrap(npy_double, npy_double);
+npy_double cdfchn1_wrap(npy_double, npy_double, npy_double);
+npy_double cdfchn3_wrap(npy_double, npy_double, npy_double);
+npy_double cdfchn4_wrap(npy_double, npy_double, npy_double);
+npy_double cdfchn2_wrap(npy_double, npy_double, npy_double);
+npy_double cosdg(npy_double);
+npy_double cosm1(npy_double);
+npy_double cotdg(npy_double);
+npy_double ellpe(npy_double);
+npy_double ellie(npy_double, npy_double);
+npy_int ellpj(npy_double, npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
+npy_double ellik(npy_double, npy_double);
+npy_double ellpk(npy_double);
+npy_double erf(npy_double);
+npy_double erfc(npy_double);
+npy_double erfcinv(npy_double);
+npy_cdouble cexp1_wrap(npy_cdouble);
+npy_double exp1_wrap(npy_double);
+npy_double exp10(npy_double);
+npy_double exp2(npy_double);
+npy_cdouble cexpi_wrap(npy_cdouble);
+npy_double expi_wrap(npy_double);
+npy_double expm1(npy_double);
+npy_double expn(npy_int, npy_double);
+npy_double fdtr(npy_double, npy_double, npy_double);
+npy_double fdtrc(npy_double, npy_double, npy_double);
+npy_double fdtri(npy_double, npy_double, npy_double);
+npy_double cdff4_wrap(npy_double, npy_double, npy_double);
+npy_int fresnl(npy_double, npy_double *, npy_double *);
+npy_int cfresnl_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *);
+npy_double Gamma(npy_double);
+npy_double igam(npy_double, npy_double);
+npy_double igamc(npy_double, npy_double);
+npy_double igamci(npy_double, npy_double);
+npy_double igami(npy_double, npy_double);
+npy_double lgam(npy_double);
+npy_double gammasgn(npy_double);
+npy_double gdtr(npy_double, npy_double, npy_double);
+npy_double gdtrc(npy_double, npy_double, npy_double);
+npy_double cdfgam4_wrap(npy_double, npy_double, npy_double);
+npy_double cdfgam3_wrap(npy_double, npy_double, npy_double);
+npy_double cdfgam2_wrap(npy_double, npy_double, npy_double);
+npy_cdouble cbesh_wrap1(npy_double, npy_cdouble);
+npy_cdouble cbesh_wrap1_e(npy_double, npy_cdouble);
+npy_cdouble cbesh_wrap2(npy_double, npy_cdouble);
+npy_cdouble cbesh_wrap2_e(npy_double, npy_cdouble);
+npy_cdouble chyp1f1_wrap(npy_double, npy_double, npy_cdouble);
+npy_double hyp2f1(npy_double, npy_double, npy_double, npy_double);
+npy_double i0(npy_double);
+npy_double i0e(npy_double);
+npy_double i1(npy_double);
+npy_double i1e(npy_double);
+npy_int it2i0k0_wrap(npy_double, npy_double *, npy_double *);
+npy_int it2j0y0_wrap(npy_double, npy_double *, npy_double *);
+npy_double it2struve0_wrap(npy_double);
+npy_int itairy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
+npy_int it1i0k0_wrap(npy_double, npy_double *, npy_double *);
+npy_int it1j0y0_wrap(npy_double, npy_double *, npy_double *);
+npy_double itmodstruve0_wrap(npy_double);
+npy_double itstruve0_wrap(npy_double);
+npy_cdouble cbesi_wrap(npy_double, npy_cdouble);
+npy_double iv(npy_double, npy_double);
+npy_cdouble cbesi_wrap_e(npy_double, npy_cdouble);
+npy_double cbesi_wrap_e_real(npy_double, npy_double);
+npy_double j0(npy_double);
+npy_double j1(npy_double);
+npy_cdouble cbesj_wrap(npy_double, npy_cdouble);
+npy_double cbesj_wrap_real(npy_double, npy_double);
+npy_cdouble cbesj_wrap_e(npy_double, npy_cdouble);
+npy_double cbesj_wrap_e_real(npy_double, npy_double);
+npy_double k0(npy_double);
+npy_double k0e(npy_double);
+npy_double k1(npy_double);
+npy_double k1e(npy_double);
+npy_double kei_wrap(npy_double);
+npy_double keip_wrap(npy_double);
+npy_int kelvin_wrap(npy_double, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *);
+npy_double ker_wrap(npy_double);
+npy_double kerp_wrap(npy_double);
+npy_double cbesk_wrap_real_int(npy_int, npy_double);
+npy_double kolmogi(npy_double);
+npy_double kolmogorov(npy_double);
+npy_cdouble cbesk_wrap(npy_double, npy_cdouble);
+npy_double cbesk_wrap_real(npy_double, npy_double);
+npy_cdouble cbesk_wrap_e(npy_double, npy_cdouble);
+npy_double cbesk_wrap_e_real(npy_double, npy_double);
+npy_double log1p(npy_double);
+npy_double pmv_wrap(npy_double, npy_double, npy_double);
+npy_double cem_cva_wrap(npy_double, npy_double);
+npy_double sem_cva_wrap(npy_double, npy_double);
+npy_int cem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_int mcm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_int mcm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_int msm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_int msm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_int sem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_int modified_fresnel_minus_wrap(npy_double, npy_cdouble *, npy_cdouble *);
+npy_int modified_fresnel_plus_wrap(npy_double, npy_cdouble *, npy_cdouble *);
+npy_double struve_l(npy_double, npy_double);
+npy_double nbdtr(npy_int, npy_int, npy_double);
+npy_double nbdtrc(npy_int, npy_int, npy_double);
+npy_double nbdtri(npy_int, npy_int, npy_double);
+npy_double cdfnbn2_wrap(npy_double, npy_double, npy_double);
+npy_double cdfnbn3_wrap(npy_double, npy_double, npy_double);
+npy_double cdffnc1_wrap(npy_double, npy_double, npy_double, npy_double);
+npy_double cdffnc2_wrap(npy_double, npy_double, npy_double, npy_double);
+npy_double cdffnc4_wrap(npy_double, npy_double, npy_double, npy_double);
+npy_double cdffnc3_wrap(npy_double, npy_double, npy_double, npy_double);
+npy_double cdffnc5_wrap(npy_double, npy_double, npy_double, npy_double);
+npy_double cdftnc1_wrap(npy_double, npy_double, npy_double);
+npy_double cdftnc3_wrap(npy_double, npy_double, npy_double);
+npy_double cdftnc4_wrap(npy_double, npy_double, npy_double);
+npy_double cdftnc2_wrap(npy_double, npy_double, npy_double);
+npy_double ndtr(npy_double);
+npy_double ndtri(npy_double);
+npy_double cdfnor3_wrap(npy_double, npy_double, npy_double);
+npy_double cdfnor4_wrap(npy_double, npy_double, npy_double);
+npy_double oblate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
+npy_int oblate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_double oblate_segv_wrap(npy_double, npy_double, npy_double);
+npy_double oblate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
+npy_int oblate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_double oblate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
+npy_int oblate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_double owens_t(npy_double, npy_double);
+npy_int pbdv_wrap(npy_double, npy_double, npy_double *, npy_double *);
+npy_int pbvv_wrap(npy_double, npy_double, npy_double *, npy_double *);
+npy_int pbwa_wrap(npy_double, npy_double, npy_double *, npy_double *);
+npy_double pdtr(npy_double, npy_double);
+npy_double pdtrc(npy_double, npy_double);
+npy_double pdtri(npy_int, npy_double);
+npy_double cdfpoi2_wrap(npy_double, npy_double);
+npy_double poch(npy_double, npy_double);
+npy_double prolate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
+npy_int prolate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_double prolate_segv_wrap(npy_double, npy_double, npy_double);
+npy_double prolate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
+npy_int prolate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_double prolate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
+npy_int prolate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
+npy_double radian(npy_double, npy_double, npy_double);
+npy_double rgamma(npy_double);
+npy_double round(npy_double);
+npy_int shichi(npy_double, npy_double *, npy_double *);
+npy_int sici(npy_double, npy_double *, npy_double *);
+npy_double sindg(npy_double);
+npy_double smirnov(npy_int, npy_double);
+npy_double smirnovi(npy_int, npy_double);
+npy_double spence(npy_double);
+npy_double cdft1_wrap(npy_double, npy_double);
+npy_double cdft3_wrap(npy_double, npy_double);
+npy_double cdft2_wrap(npy_double, npy_double);
+npy_double struve_h(npy_double, npy_double);
+npy_double tandg(npy_double);
+npy_double tukeylambdacdf(npy_double, npy_double);
+npy_double y0(npy_double);
+npy_double y1(npy_double);
+npy_double yn(npy_int, npy_double);
+npy_cdouble cbesy_wrap(npy_double, npy_cdouble);
+npy_double cbesy_wrap_real(npy_double, npy_double);
+npy_cdouble cbesy_wrap_e(npy_double, npy_cdouble);
+npy_double cbesy_wrap_e_real(npy_double, npy_double);
+npy_double zetac(npy_double);
+#endif
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/add_newdocs.py b/__packaged__/coreml/.python_dependencies/scipy/special/add_newdocs.py
new file mode 100644
index 00000000..b98dcaec
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/add_newdocs.py
@@ -0,0 +1,23 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+
+import warnings
+from . import _add_newdocs
+
+__all__ = ['get', 'add_newdoc', 'Dict', 'docdict']  # noqa: F822
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.special.add_newdocs is deprecated and has no attribute "
+            f"{name}.")
+
+    warnings.warn("The `scipy.special.add_newdocs` namespace is deprecated."
+                  " and will be removed in SciPy v2.0.0.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_add_newdocs, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/basic.py b/__packaged__/coreml/.python_dependencies/scipy/special/basic.py
new file mode 100644
index 00000000..66097020
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/basic.py
@@ -0,0 +1,97 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.special` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _basic
+from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma,
+                      psi, hankel1, hankel2, yv, kv)
+
+
+__all__ = [  # noqa: F822
+    'ai_zeros',
+    'assoc_laguerre',
+    'bei_zeros',
+    'beip_zeros',
+    'ber_zeros',
+    'bernoulli',
+    'berp_zeros',
+    'bi_zeros',
+    'clpmn',
+    'comb',
+    'digamma',
+    'diric',
+    'erf_zeros',
+    'euler',
+    'factorial',
+    'factorial2',
+    'factorialk',
+    'fresnel_zeros',
+    'fresnelc_zeros',
+    'fresnels_zeros',
+    'gamma',
+    'h1vp',
+    'h2vp',
+    'hankel1',
+    'hankel2',
+    'iv',
+    'ivp',
+    'jn_zeros',
+    'jnjnp_zeros',
+    'jnp_zeros',
+    'jnyn_zeros',
+    'jv',
+    'jvp',
+    'kei_zeros',
+    'keip_zeros',
+    'kelvin_zeros',
+    'ker_zeros',
+    'kerp_zeros',
+    'kv',
+    'kvp',
+    'lmbda',
+    'lpmn',
+    'lpn',
+    'lqmn',
+    'lqn',
+    'mathieu_a',
+    'mathieu_b',
+    'mathieu_even_coef',
+    'mathieu_odd_coef',
+    'obl_cv_seq',
+    'pbdn_seq',
+    'pbdv_seq',
+    'pbvv_seq',
+    'perm',
+    'polygamma',
+    'pro_cv_seq',
+    'psi',
+    'riccati_jn',
+    'riccati_yn',
+    'sinc',
+    'y0_zeros',
+    'y1_zeros',
+    'y1p_zeros',
+    'yn_zeros',
+    'ynp_zeros',
+    'yv',
+    'yvp',
+    'zeta'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.special.basic is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.special instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
+                  "the `scipy.special.basic` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_basic, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pxd b/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pxd
new file mode 100644
index 00000000..260da475
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pxd
@@ -0,0 +1,259 @@
+# This file is automatically generated by _generate_pyx.py.
+# Do not edit manually!
+
+ctypedef fused number_t:
+    double complex
+    double
+
+cpdef number_t spherical_jn(long n, number_t z, bint derivative=*) nogil
+cpdef number_t spherical_yn(long n, number_t z, bint derivative=*) nogil
+cpdef number_t spherical_in(long n, number_t z, bint derivative=*) nogil
+cpdef number_t spherical_kn(long n, number_t z, bint derivative=*) nogil
+
+ctypedef fused Dd_number_t:
+    double complex
+    double
+
+ctypedef fused df_number_t:
+    double
+    float
+
+ctypedef fused dfg_number_t:
+    double
+    float
+    long double
+
+ctypedef fused dl_number_t:
+    double
+    long
+
+cpdef double voigt_profile(double x0, double x1, double x2) nogil
+cpdef double agm(double x0, double x1) nogil
+cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil
+cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil
+cpdef double bdtr(double x0, dl_number_t x1, double x2) nogil
+cpdef double bdtrc(double x0, dl_number_t x1, double x2) nogil
+cpdef double bdtri(double x0, dl_number_t x1, double x2) nogil
+cpdef double bdtrik(double x0, double x1, double x2) nogil
+cpdef double bdtrin(double x0, double x1, double x2) nogil
+cpdef double bei(double x0) nogil
+cpdef double beip(double x0) nogil
+cpdef double ber(double x0) nogil
+cpdef double berp(double x0) nogil
+cpdef double besselpoly(double x0, double x1, double x2) nogil
+cpdef double beta(double x0, double x1) nogil
+cpdef double betainc(double x0, double x1, double x2) nogil
+cpdef double betaincinv(double x0, double x1, double x2) nogil
+cpdef double betaln(double x0, double x1) nogil
+cpdef double binom(double x0, double x1) nogil
+cpdef double boxcox(double x0, double x1) nogil
+cpdef double boxcox1p(double x0, double x1) nogil
+cpdef double btdtr(double x0, double x1, double x2) nogil
+cpdef double btdtri(double x0, double x1, double x2) nogil
+cpdef double btdtria(double x0, double x1, double x2) nogil
+cpdef double btdtrib(double x0, double x1, double x2) nogil
+cpdef double cbrt(double x0) nogil
+cpdef double chdtr(double x0, double x1) nogil
+cpdef double chdtrc(double x0, double x1) nogil
+cpdef double chdtri(double x0, double x1) nogil
+cpdef double chdtriv(double x0, double x1) nogil
+cpdef double chndtr(double x0, double x1, double x2) nogil
+cpdef double chndtridf(double x0, double x1, double x2) nogil
+cpdef double chndtrinc(double x0, double x1, double x2) nogil
+cpdef double chndtrix(double x0, double x1, double x2) nogil
+cpdef double cosdg(double x0) nogil
+cpdef double cosm1(double x0) nogil
+cpdef double cotdg(double x0) nogil
+cpdef Dd_number_t dawsn(Dd_number_t x0) nogil
+cpdef double ellipe(double x0) nogil
+cpdef double ellipeinc(double x0, double x1) nogil
+cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) nogil
+cpdef double ellipkinc(double x0, double x1) nogil
+cpdef double ellipkm1(double x0) nogil
+cpdef double ellipk(double x0) nogil
+cpdef Dd_number_t elliprc(Dd_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t elliprd(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) nogil
+cpdef Dd_number_t elliprf(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) nogil
+cpdef Dd_number_t elliprg(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) nogil
+cpdef Dd_number_t elliprj(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2, Dd_number_t x3) nogil
+cpdef double entr(double x0) nogil
+cpdef Dd_number_t erf(Dd_number_t x0) nogil
+cpdef Dd_number_t erfc(Dd_number_t x0) nogil
+cpdef Dd_number_t erfcx(Dd_number_t x0) nogil
+cpdef Dd_number_t erfi(Dd_number_t x0) nogil
+cpdef df_number_t erfinv(df_number_t x0) nogil
+cpdef double erfcinv(double x0) nogil
+cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) nogil
+cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) nogil
+cpdef double eval_hermite(long x0, double x1) nogil
+cpdef double eval_hermitenorm(long x0, double x1) nogil
+cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil
+cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil
+cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t exp1(Dd_number_t x0) nogil
+cpdef double exp10(double x0) nogil
+cpdef double exp2(double x0) nogil
+cpdef Dd_number_t expi(Dd_number_t x0) nogil
+cpdef dfg_number_t expit(dfg_number_t x0) nogil
+cpdef Dd_number_t expm1(Dd_number_t x0) nogil
+cpdef double expn(dl_number_t x0, double x1) nogil
+cpdef double exprel(double x0) nogil
+cpdef double fdtr(double x0, double x1, double x2) nogil
+cpdef double fdtrc(double x0, double x1, double x2) nogil
+cpdef double fdtri(double x0, double x1, double x2) nogil
+cpdef double fdtridfd(double x0, double x1, double x2) nogil
+cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil
+cpdef Dd_number_t gamma(Dd_number_t x0) nogil
+cpdef double gammainc(double x0, double x1) nogil
+cpdef double gammaincc(double x0, double x1) nogil
+cpdef double gammainccinv(double x0, double x1) nogil
+cpdef double gammaincinv(double x0, double x1) nogil
+cpdef double gammaln(double x0) nogil
+cpdef double gammasgn(double x0) nogil
+cpdef double gdtr(double x0, double x1, double x2) nogil
+cpdef double gdtrc(double x0, double x1, double x2) nogil
+cpdef double gdtria(double x0, double x1, double x2) nogil
+cpdef double gdtrib(double x0, double x1, double x2) nogil
+cpdef double gdtrix(double x0, double x1, double x2) nogil
+cpdef double complex hankel1(double x0, double complex x1) nogil
+cpdef double complex hankel1e(double x0, double complex x1) nogil
+cpdef double complex hankel2(double x0, double complex x1) nogil
+cpdef double complex hankel2e(double x0, double complex x1) nogil
+cpdef double huber(double x0, double x1) nogil
+cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) nogil
+cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) nogil
+cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) nogil
+cpdef double hyperu(double x0, double x1, double x2) nogil
+cpdef double i0(double x0) nogil
+cpdef double i0e(double x0) nogil
+cpdef double i1(double x0) nogil
+cpdef double i1e(double x0) nogil
+cpdef double inv_boxcox(double x0, double x1) nogil
+cpdef double inv_boxcox1p(double x0, double x1) nogil
+cdef void it2i0k0(double x0, double *y0, double *y1) nogil
+cdef void it2j0y0(double x0, double *y0, double *y1) nogil
+cpdef double it2struve0(double x0) nogil
+cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) nogil
+cdef void iti0k0(double x0, double *y0, double *y1) nogil
+cdef void itj0y0(double x0, double *y0, double *y1) nogil
+cpdef double itmodstruve0(double x0) nogil
+cpdef double itstruve0(double x0) nogil
+cpdef Dd_number_t iv(double x0, Dd_number_t x1) nogil
+cpdef Dd_number_t ive(double x0, Dd_number_t x1) nogil
+cpdef double j0(double x0) nogil
+cpdef double j1(double x0) nogil
+cpdef Dd_number_t jv(double x0, Dd_number_t x1) nogil
+cpdef Dd_number_t jve(double x0, Dd_number_t x1) nogil
+cpdef double k0(double x0) nogil
+cpdef double k0e(double x0) nogil
+cpdef double k1(double x0) nogil
+cpdef double k1e(double x0) nogil
+cpdef double kei(double x0) nogil
+cpdef double keip(double x0) nogil
+cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) nogil
+cpdef double ker(double x0) nogil
+cpdef double kerp(double x0) nogil
+cpdef double kl_div(double x0, double x1) nogil
+cpdef double kn(dl_number_t x0, double x1) nogil
+cpdef double kolmogi(double x0) nogil
+cpdef double kolmogorov(double x0) nogil
+cpdef Dd_number_t kv(double x0, Dd_number_t x1) nogil
+cpdef Dd_number_t kve(double x0, Dd_number_t x1) nogil
+cpdef Dd_number_t log1p(Dd_number_t x0) nogil
+cpdef dfg_number_t log_expit(dfg_number_t x0) nogil
+cpdef Dd_number_t log_ndtr(Dd_number_t x0) nogil
+cpdef Dd_number_t loggamma(Dd_number_t x0) nogil
+cpdef dfg_number_t logit(dfg_number_t x0) nogil
+cpdef double lpmv(double x0, double x1, double x2) nogil
+cpdef double mathieu_a(double x0, double x1) nogil
+cpdef double mathieu_b(double x0, double x1) nogil
+cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) nogil
+cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) nogil
+cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) nogil
+cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) nogil
+cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) nogil
+cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) nogil
+cdef void modfresnelm(double x0, double complex *y0, double complex *y1) nogil
+cdef void modfresnelp(double x0, double complex *y0, double complex *y1) nogil
+cpdef double modstruve(double x0, double x1) nogil
+cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) nogil
+cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) nogil
+cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) nogil
+cpdef double nbdtrik(double x0, double x1, double x2) nogil
+cpdef double nbdtrin(double x0, double x1, double x2) nogil
+cpdef double ncfdtr(double x0, double x1, double x2, double x3) nogil
+cpdef double ncfdtri(double x0, double x1, double x2, double x3) nogil
+cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) nogil
+cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) nogil
+cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) nogil
+cpdef double nctdtr(double x0, double x1, double x2) nogil
+cpdef double nctdtridf(double x0, double x1, double x2) nogil
+cpdef double nctdtrinc(double x0, double x1, double x2) nogil
+cpdef double nctdtrit(double x0, double x1, double x2) nogil
+cpdef Dd_number_t ndtr(Dd_number_t x0) nogil
+cpdef double ndtri(double x0) nogil
+cpdef double nrdtrimn(double x0, double x1, double x2) nogil
+cpdef double nrdtrisd(double x0, double x1, double x2) nogil
+cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
+cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
+cpdef double obl_cv(double x0, double x1, double x2) nogil
+cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
+cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
+cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
+cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
+cpdef double owens_t(double x0, double x1) nogil
+cdef void pbdv(double x0, double x1, double *y0, double *y1) nogil
+cdef void pbvv(double x0, double x1, double *y0, double *y1) nogil
+cdef void pbwa(double x0, double x1, double *y0, double *y1) nogil
+cpdef double pdtr(double x0, double x1) nogil
+cpdef double pdtrc(double x0, double x1) nogil
+cpdef double pdtri(dl_number_t x0, double x1) nogil
+cpdef double pdtrik(double x0, double x1) nogil
+cpdef double poch(double x0, double x1) nogil
+cpdef df_number_t powm1(df_number_t x0, df_number_t x1) nogil
+cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
+cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
+cpdef double pro_cv(double x0, double x1, double x2) nogil
+cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
+cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
+cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
+cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
+cpdef double pseudo_huber(double x0, double x1) nogil
+cpdef Dd_number_t psi(Dd_number_t x0) nogil
+cpdef double radian(double x0, double x1, double x2) nogil
+cpdef double rel_entr(double x0, double x1) nogil
+cpdef Dd_number_t rgamma(Dd_number_t x0) nogil
+cpdef double round(double x0) nogil
+cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil
+cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil
+cpdef double sindg(double x0) nogil
+cpdef double smirnov(dl_number_t x0, double x1) nogil
+cpdef double smirnovi(dl_number_t x0, double x1) nogil
+cpdef Dd_number_t spence(Dd_number_t x0) nogil
+cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) nogil
+cpdef double stdtr(double x0, double x1) nogil
+cpdef double stdtridf(double x0, double x1) nogil
+cpdef double stdtrit(double x0, double x1) nogil
+cpdef double struve(double x0, double x1) nogil
+cpdef double tandg(double x0) nogil
+cpdef double tklmbda(double x0, double x1) nogil
+cpdef double complex wofz(double complex x0) nogil
+cpdef Dd_number_t wrightomega(Dd_number_t x0) nogil
+cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) nogil
+cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) nogil
+cpdef double y0(double x0) nogil
+cpdef double y1(double x0) nogil
+cpdef double yn(dl_number_t x0, double x1) nogil
+cpdef Dd_number_t yv(double x0, Dd_number_t x1) nogil
+cpdef Dd_number_t yve(double x0, Dd_number_t x1) nogil
+cpdef double zetac(double x0) nogil
+cpdef double wright_bessel(double x0, double x1, double x2) nogil
+cpdef double ndtri_exp(double x0) nogil
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyi b/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyi
new file mode 100644
index 00000000..024e962b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyi
@@ -0,0 +1,3 @@
+from typing import Any
+
+def __getattr__(name) -> Any: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyx b/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyx
new file mode 100644
index 00000000..37e5d78d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/cython_special.pyx
@@ -0,0 +1,3641 @@
+# This file is automatically generated by _generate_pyx.py.
+# Do not edit manually!
+"""
+.. highlight:: cython
+
+Cython API for special functions
+================================
+
+Scalar, typed versions of many of the functions in ``scipy.special``
+can be accessed directly from Cython; the complete list is given
+below. Functions are overloaded using Cython fused types so their
+names match their Python counterpart. The module follows the following
+conventions:
+
+- If a function's Python counterpart returns multiple values, then the
+  function returns its outputs via pointers in the final arguments.
+- If a function's Python counterpart returns a single value, then the
+  function's output is returned directly.
+
+The module is usable from Cython via::
+
+    cimport scipy.special.cython_special
+
+Error handling
+--------------
+
+Functions can indicate an error by returning ``nan``; however they
+cannot emit warnings like their counterparts in ``scipy.special``.
+
+Available functions
+-------------------
+
+- :py:func:`~scipy.special.voigt_profile`::
+
+        double voigt_profile(double, double, double)
+
+- :py:func:`~scipy.special.agm`::
+
+        double agm(double, double)
+
+- :py:func:`~scipy.special.airy`::
+
+        void airy(double, double *, double *, double *, double *)
+        void airy(double complex, double complex *, double complex *, double complex *, double complex *)
+
+- :py:func:`~scipy.special.airye`::
+
+        void airye(double complex, double complex *, double complex *, double complex *, double complex *)
+        void airye(double, double *, double *, double *, double *)
+
+- :py:func:`~scipy.special.bdtr`::
+
+        double bdtr(double, double, double)
+        double bdtr(double, long, double)
+
+- :py:func:`~scipy.special.bdtrc`::
+
+        double bdtrc(double, double, double)
+        double bdtrc(double, long, double)
+
+- :py:func:`~scipy.special.bdtri`::
+
+        double bdtri(double, double, double)
+        double bdtri(double, long, double)
+
+- :py:func:`~scipy.special.bdtrik`::
+
+        double bdtrik(double, double, double)
+
+- :py:func:`~scipy.special.bdtrin`::
+
+        double bdtrin(double, double, double)
+
+- :py:func:`~scipy.special.bei`::
+
+        double bei(double)
+
+- :py:func:`~scipy.special.beip`::
+
+        double beip(double)
+
+- :py:func:`~scipy.special.ber`::
+
+        double ber(double)
+
+- :py:func:`~scipy.special.berp`::
+
+        double berp(double)
+
+- :py:func:`~scipy.special.besselpoly`::
+
+        double besselpoly(double, double, double)
+
+- :py:func:`~scipy.special.beta`::
+
+        double beta(double, double)
+
+- :py:func:`~scipy.special.betainc`::
+
+        double betainc(double, double, double)
+
+- :py:func:`~scipy.special.betaincinv`::
+
+        double betaincinv(double, double, double)
+
+- :py:func:`~scipy.special.betaln`::
+
+        double betaln(double, double)
+
+- :py:func:`~scipy.special.binom`::
+
+        double binom(double, double)
+
+- :py:func:`~scipy.special.boxcox`::
+
+        double boxcox(double, double)
+
+- :py:func:`~scipy.special.boxcox1p`::
+
+        double boxcox1p(double, double)
+
+- :py:func:`~scipy.special.btdtr`::
+
+        double btdtr(double, double, double)
+
+- :py:func:`~scipy.special.btdtri`::
+
+        double btdtri(double, double, double)
+
+- :py:func:`~scipy.special.btdtria`::
+
+        double btdtria(double, double, double)
+
+- :py:func:`~scipy.special.btdtrib`::
+
+        double btdtrib(double, double, double)
+
+- :py:func:`~scipy.special.cbrt`::
+
+        double cbrt(double)
+
+- :py:func:`~scipy.special.chdtr`::
+
+        double chdtr(double, double)
+
+- :py:func:`~scipy.special.chdtrc`::
+
+        double chdtrc(double, double)
+
+- :py:func:`~scipy.special.chdtri`::
+
+        double chdtri(double, double)
+
+- :py:func:`~scipy.special.chdtriv`::
+
+        double chdtriv(double, double)
+
+- :py:func:`~scipy.special.chndtr`::
+
+        double chndtr(double, double, double)
+
+- :py:func:`~scipy.special.chndtridf`::
+
+        double chndtridf(double, double, double)
+
+- :py:func:`~scipy.special.chndtrinc`::
+
+        double chndtrinc(double, double, double)
+
+- :py:func:`~scipy.special.chndtrix`::
+
+        double chndtrix(double, double, double)
+
+- :py:func:`~scipy.special.cosdg`::
+
+        double cosdg(double)
+
+- :py:func:`~scipy.special.cosm1`::
+
+        double cosm1(double)
+
+- :py:func:`~scipy.special.cotdg`::
+
+        double cotdg(double)
+
+- :py:func:`~scipy.special.dawsn`::
+
+        double dawsn(double)
+        double complex dawsn(double complex)
+
+- :py:func:`~scipy.special.ellipe`::
+
+        double ellipe(double)
+
+- :py:func:`~scipy.special.ellipeinc`::
+
+        double ellipeinc(double, double)
+
+- :py:func:`~scipy.special.ellipj`::
+
+        void ellipj(double, double, double *, double *, double *, double *)
+
+- :py:func:`~scipy.special.ellipkinc`::
+
+        double ellipkinc(double, double)
+
+- :py:func:`~scipy.special.ellipkm1`::
+
+        double ellipkm1(double)
+
+- :py:func:`~scipy.special.ellipk`::
+
+        double ellipk(double)
+
+- :py:func:`~scipy.special.elliprc`::
+
+        double elliprc(double, double)
+        double complex elliprc(double complex, double complex)
+
+- :py:func:`~scipy.special.elliprd`::
+
+        double elliprd(double, double, double)
+        double complex elliprd(double complex, double complex, double complex)
+
+- :py:func:`~scipy.special.elliprf`::
+
+        double elliprf(double, double, double)
+        double complex elliprf(double complex, double complex, double complex)
+
+- :py:func:`~scipy.special.elliprg`::
+
+        double elliprg(double, double, double)
+        double complex elliprg(double complex, double complex, double complex)
+
+- :py:func:`~scipy.special.elliprj`::
+
+        double elliprj(double, double, double, double)
+        double complex elliprj(double complex, double complex, double complex, double complex)
+
+- :py:func:`~scipy.special.entr`::
+
+        double entr(double)
+
+- :py:func:`~scipy.special.erf`::
+
+        double complex erf(double complex)
+        double erf(double)
+
+- :py:func:`~scipy.special.erfc`::
+
+        double complex erfc(double complex)
+        double erfc(double)
+
+- :py:func:`~scipy.special.erfcx`::
+
+        double erfcx(double)
+        double complex erfcx(double complex)
+
+- :py:func:`~scipy.special.erfi`::
+
+        double erfi(double)
+        double complex erfi(double complex)
+
+- :py:func:`~scipy.special.erfinv`::
+
+        float erfinv(float)
+        double erfinv(double)
+
+- :py:func:`~scipy.special.erfcinv`::
+
+        double erfcinv(double)
+
+- :py:func:`~scipy.special.eval_chebyc`::
+
+        double complex eval_chebyc(double, double complex)
+        double eval_chebyc(double, double)
+        double eval_chebyc(long, double)
+
+- :py:func:`~scipy.special.eval_chebys`::
+
+        double complex eval_chebys(double, double complex)
+        double eval_chebys(double, double)
+        double eval_chebys(long, double)
+
+- :py:func:`~scipy.special.eval_chebyt`::
+
+        double complex eval_chebyt(double, double complex)
+        double eval_chebyt(double, double)
+        double eval_chebyt(long, double)
+
+- :py:func:`~scipy.special.eval_chebyu`::
+
+        double complex eval_chebyu(double, double complex)
+        double eval_chebyu(double, double)
+        double eval_chebyu(long, double)
+
+- :py:func:`~scipy.special.eval_gegenbauer`::
+
+        double complex eval_gegenbauer(double, double, double complex)
+        double eval_gegenbauer(double, double, double)
+        double eval_gegenbauer(long, double, double)
+
+- :py:func:`~scipy.special.eval_genlaguerre`::
+
+        double complex eval_genlaguerre(double, double, double complex)
+        double eval_genlaguerre(double, double, double)
+        double eval_genlaguerre(long, double, double)
+
+- :py:func:`~scipy.special.eval_hermite`::
+
+        double eval_hermite(long, double)
+
+- :py:func:`~scipy.special.eval_hermitenorm`::
+
+        double eval_hermitenorm(long, double)
+
+- :py:func:`~scipy.special.eval_jacobi`::
+
+        double complex eval_jacobi(double, double, double, double complex)
+        double eval_jacobi(double, double, double, double)
+        double eval_jacobi(long, double, double, double)
+
+- :py:func:`~scipy.special.eval_laguerre`::
+
+        double complex eval_laguerre(double, double complex)
+        double eval_laguerre(double, double)
+        double eval_laguerre(long, double)
+
+- :py:func:`~scipy.special.eval_legendre`::
+
+        double complex eval_legendre(double, double complex)
+        double eval_legendre(double, double)
+        double eval_legendre(long, double)
+
+- :py:func:`~scipy.special.eval_sh_chebyt`::
+
+        double complex eval_sh_chebyt(double, double complex)
+        double eval_sh_chebyt(double, double)
+        double eval_sh_chebyt(long, double)
+
+- :py:func:`~scipy.special.eval_sh_chebyu`::
+
+        double complex eval_sh_chebyu(double, double complex)
+        double eval_sh_chebyu(double, double)
+        double eval_sh_chebyu(long, double)
+
+- :py:func:`~scipy.special.eval_sh_jacobi`::
+
+        double complex eval_sh_jacobi(double, double, double, double complex)
+        double eval_sh_jacobi(double, double, double, double)
+        double eval_sh_jacobi(long, double, double, double)
+
+- :py:func:`~scipy.special.eval_sh_legendre`::
+
+        double complex eval_sh_legendre(double, double complex)
+        double eval_sh_legendre(double, double)
+        double eval_sh_legendre(long, double)
+
+- :py:func:`~scipy.special.exp1`::
+
+        double complex exp1(double complex)
+        double exp1(double)
+
+- :py:func:`~scipy.special.exp10`::
+
+        double exp10(double)
+
+- :py:func:`~scipy.special.exp2`::
+
+        double exp2(double)
+
+- :py:func:`~scipy.special.expi`::
+
+        double complex expi(double complex)
+        double expi(double)
+
+- :py:func:`~scipy.special.expit`::
+
+        double expit(double)
+        float expit(float)
+        long double expit(long double)
+
+- :py:func:`~scipy.special.expm1`::
+
+        double complex expm1(double complex)
+        double expm1(double)
+
+- :py:func:`~scipy.special.expn`::
+
+        double expn(double, double)
+        double expn(long, double)
+
+- :py:func:`~scipy.special.exprel`::
+
+        double exprel(double)
+
+- :py:func:`~scipy.special.fdtr`::
+
+        double fdtr(double, double, double)
+
+- :py:func:`~scipy.special.fdtrc`::
+
+        double fdtrc(double, double, double)
+
+- :py:func:`~scipy.special.fdtri`::
+
+        double fdtri(double, double, double)
+
+- :py:func:`~scipy.special.fdtridfd`::
+
+        double fdtridfd(double, double, double)
+
+- :py:func:`~scipy.special.fresnel`::
+
+        void fresnel(double, double *, double *)
+        void fresnel(double complex, double complex *, double complex *)
+
+- :py:func:`~scipy.special.gamma`::
+
+        double complex gamma(double complex)
+        double gamma(double)
+
+- :py:func:`~scipy.special.gammainc`::
+
+        double gammainc(double, double)
+
+- :py:func:`~scipy.special.gammaincc`::
+
+        double gammaincc(double, double)
+
+- :py:func:`~scipy.special.gammainccinv`::
+
+        double gammainccinv(double, double)
+
+- :py:func:`~scipy.special.gammaincinv`::
+
+        double gammaincinv(double, double)
+
+- :py:func:`~scipy.special.gammaln`::
+
+        double gammaln(double)
+
+- :py:func:`~scipy.special.gammasgn`::
+
+        double gammasgn(double)
+
+- :py:func:`~scipy.special.gdtr`::
+
+        double gdtr(double, double, double)
+
+- :py:func:`~scipy.special.gdtrc`::
+
+        double gdtrc(double, double, double)
+
+- :py:func:`~scipy.special.gdtria`::
+
+        double gdtria(double, double, double)
+
+- :py:func:`~scipy.special.gdtrib`::
+
+        double gdtrib(double, double, double)
+
+- :py:func:`~scipy.special.gdtrix`::
+
+        double gdtrix(double, double, double)
+
+- :py:func:`~scipy.special.hankel1`::
+
+        double complex hankel1(double, double complex)
+
+- :py:func:`~scipy.special.hankel1e`::
+
+        double complex hankel1e(double, double complex)
+
+- :py:func:`~scipy.special.hankel2`::
+
+        double complex hankel2(double, double complex)
+
+- :py:func:`~scipy.special.hankel2e`::
+
+        double complex hankel2e(double, double complex)
+
+- :py:func:`~scipy.special.huber`::
+
+        double huber(double, double)
+
+- :py:func:`~scipy.special.hyp0f1`::
+
+        double complex hyp0f1(double, double complex)
+        double hyp0f1(double, double)
+
+- :py:func:`~scipy.special.hyp1f1`::
+
+        double hyp1f1(double, double, double)
+        double complex hyp1f1(double, double, double complex)
+
+- :py:func:`~scipy.special.hyp2f1`::
+
+        double hyp2f1(double, double, double, double)
+        double complex hyp2f1(double, double, double, double complex)
+
+- :py:func:`~scipy.special.hyperu`::
+
+        double hyperu(double, double, double)
+
+- :py:func:`~scipy.special.i0`::
+
+        double i0(double)
+
+- :py:func:`~scipy.special.i0e`::
+
+        double i0e(double)
+
+- :py:func:`~scipy.special.i1`::
+
+        double i1(double)
+
+- :py:func:`~scipy.special.i1e`::
+
+        double i1e(double)
+
+- :py:func:`~scipy.special.inv_boxcox`::
+
+        double inv_boxcox(double, double)
+
+- :py:func:`~scipy.special.inv_boxcox1p`::
+
+        double inv_boxcox1p(double, double)
+
+- :py:func:`~scipy.special.it2i0k0`::
+
+        void it2i0k0(double, double *, double *)
+
+- :py:func:`~scipy.special.it2j0y0`::
+
+        void it2j0y0(double, double *, double *)
+
+- :py:func:`~scipy.special.it2struve0`::
+
+        double it2struve0(double)
+
+- :py:func:`~scipy.special.itairy`::
+
+        void itairy(double, double *, double *, double *, double *)
+
+- :py:func:`~scipy.special.iti0k0`::
+
+        void iti0k0(double, double *, double *)
+
+- :py:func:`~scipy.special.itj0y0`::
+
+        void itj0y0(double, double *, double *)
+
+- :py:func:`~scipy.special.itmodstruve0`::
+
+        double itmodstruve0(double)
+
+- :py:func:`~scipy.special.itstruve0`::
+
+        double itstruve0(double)
+
+- :py:func:`~scipy.special.iv`::
+
+        double complex iv(double, double complex)
+        double iv(double, double)
+
+- :py:func:`~scipy.special.ive`::
+
+        double complex ive(double, double complex)
+        double ive(double, double)
+
+- :py:func:`~scipy.special.j0`::
+
+        double j0(double)
+
+- :py:func:`~scipy.special.j1`::
+
+        double j1(double)
+
+- :py:func:`~scipy.special.jv`::
+
+        double complex jv(double, double complex)
+        double jv(double, double)
+
+- :py:func:`~scipy.special.jve`::
+
+        double complex jve(double, double complex)
+        double jve(double, double)
+
+- :py:func:`~scipy.special.k0`::
+
+        double k0(double)
+
+- :py:func:`~scipy.special.k0e`::
+
+        double k0e(double)
+
+- :py:func:`~scipy.special.k1`::
+
+        double k1(double)
+
+- :py:func:`~scipy.special.k1e`::
+
+        double k1e(double)
+
+- :py:func:`~scipy.special.kei`::
+
+        double kei(double)
+
+- :py:func:`~scipy.special.keip`::
+
+        double keip(double)
+
+- :py:func:`~scipy.special.kelvin`::
+
+        void kelvin(double, double complex *, double complex *, double complex *, double complex *)
+
+- :py:func:`~scipy.special.ker`::
+
+        double ker(double)
+
+- :py:func:`~scipy.special.kerp`::
+
+        double kerp(double)
+
+- :py:func:`~scipy.special.kl_div`::
+
+        double kl_div(double, double)
+
+- :py:func:`~scipy.special.kn`::
+
+        double kn(double, double)
+        double kn(long, double)
+
+- :py:func:`~scipy.special.kolmogi`::
+
+        double kolmogi(double)
+
+- :py:func:`~scipy.special.kolmogorov`::
+
+        double kolmogorov(double)
+
+- :py:func:`~scipy.special.kv`::
+
+        double complex kv(double, double complex)
+        double kv(double, double)
+
+- :py:func:`~scipy.special.kve`::
+
+        double complex kve(double, double complex)
+        double kve(double, double)
+
+- :py:func:`~scipy.special.log1p`::
+
+        double complex log1p(double complex)
+        double log1p(double)
+
+- :py:func:`~scipy.special.log_expit`::
+
+        double log_expit(double)
+        float log_expit(float)
+        long double log_expit(long double)
+
+- :py:func:`~scipy.special.log_ndtr`::
+
+        double log_ndtr(double)
+        double complex log_ndtr(double complex)
+
+- :py:func:`~scipy.special.loggamma`::
+
+        double loggamma(double)
+        double complex loggamma(double complex)
+
+- :py:func:`~scipy.special.logit`::
+
+        double logit(double)
+        float logit(float)
+        long double logit(long double)
+
+- :py:func:`~scipy.special.lpmv`::
+
+        double lpmv(double, double, double)
+
+- :py:func:`~scipy.special.mathieu_a`::
+
+        double mathieu_a(double, double)
+
+- :py:func:`~scipy.special.mathieu_b`::
+
+        double mathieu_b(double, double)
+
+- :py:func:`~scipy.special.mathieu_cem`::
+
+        void mathieu_cem(double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.mathieu_modcem1`::
+
+        void mathieu_modcem1(double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.mathieu_modcem2`::
+
+        void mathieu_modcem2(double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.mathieu_modsem1`::
+
+        void mathieu_modsem1(double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.mathieu_modsem2`::
+
+        void mathieu_modsem2(double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.mathieu_sem`::
+
+        void mathieu_sem(double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.modfresnelm`::
+
+        void modfresnelm(double, double complex *, double complex *)
+
+- :py:func:`~scipy.special.modfresnelp`::
+
+        void modfresnelp(double, double complex *, double complex *)
+
+- :py:func:`~scipy.special.modstruve`::
+
+        double modstruve(double, double)
+
+- :py:func:`~scipy.special.nbdtr`::
+
+        double nbdtr(double, double, double)
+        double nbdtr(long, long, double)
+
+- :py:func:`~scipy.special.nbdtrc`::
+
+        double nbdtrc(double, double, double)
+        double nbdtrc(long, long, double)
+
+- :py:func:`~scipy.special.nbdtri`::
+
+        double nbdtri(double, double, double)
+        double nbdtri(long, long, double)
+
+- :py:func:`~scipy.special.nbdtrik`::
+
+        double nbdtrik(double, double, double)
+
+- :py:func:`~scipy.special.nbdtrin`::
+
+        double nbdtrin(double, double, double)
+
+- :py:func:`~scipy.special.ncfdtr`::
+
+        double ncfdtr(double, double, double, double)
+
+- :py:func:`~scipy.special.ncfdtri`::
+
+        double ncfdtri(double, double, double, double)
+
+- :py:func:`~scipy.special.ncfdtridfd`::
+
+        double ncfdtridfd(double, double, double, double)
+
+- :py:func:`~scipy.special.ncfdtridfn`::
+
+        double ncfdtridfn(double, double, double, double)
+
+- :py:func:`~scipy.special.ncfdtrinc`::
+
+        double ncfdtrinc(double, double, double, double)
+
+- :py:func:`~scipy.special.nctdtr`::
+
+        double nctdtr(double, double, double)
+
+- :py:func:`~scipy.special.nctdtridf`::
+
+        double nctdtridf(double, double, double)
+
+- :py:func:`~scipy.special.nctdtrinc`::
+
+        double nctdtrinc(double, double, double)
+
+- :py:func:`~scipy.special.nctdtrit`::
+
+        double nctdtrit(double, double, double)
+
+- :py:func:`~scipy.special.ndtr`::
+
+        double complex ndtr(double complex)
+        double ndtr(double)
+
+- :py:func:`~scipy.special.ndtri`::
+
+        double ndtri(double)
+
+- :py:func:`~scipy.special.nrdtrimn`::
+
+        double nrdtrimn(double, double, double)
+
+- :py:func:`~scipy.special.nrdtrisd`::
+
+        double nrdtrisd(double, double, double)
+
+- :py:func:`~scipy.special.obl_ang1`::
+
+        void obl_ang1(double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.obl_ang1_cv`::
+
+        void obl_ang1_cv(double, double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.obl_cv`::
+
+        double obl_cv(double, double, double)
+
+- :py:func:`~scipy.special.obl_rad1`::
+
+        void obl_rad1(double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.obl_rad1_cv`::
+
+        void obl_rad1_cv(double, double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.obl_rad2`::
+
+        void obl_rad2(double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.obl_rad2_cv`::
+
+        void obl_rad2_cv(double, double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.owens_t`::
+
+        double owens_t(double, double)
+
+- :py:func:`~scipy.special.pbdv`::
+
+        void pbdv(double, double, double *, double *)
+
+- :py:func:`~scipy.special.pbvv`::
+
+        void pbvv(double, double, double *, double *)
+
+- :py:func:`~scipy.special.pbwa`::
+
+        void pbwa(double, double, double *, double *)
+
+- :py:func:`~scipy.special.pdtr`::
+
+        double pdtr(double, double)
+
+- :py:func:`~scipy.special.pdtrc`::
+
+        double pdtrc(double, double)
+
+- :py:func:`~scipy.special.pdtri`::
+
+        double pdtri(double, double)
+        double pdtri(long, double)
+
+- :py:func:`~scipy.special.pdtrik`::
+
+        double pdtrik(double, double)
+
+- :py:func:`~scipy.special.poch`::
+
+        double poch(double, double)
+
+- :py:func:`~scipy.special.powm1`::
+
+        float powm1(float, float)
+        double powm1(double, double)
+
+- :py:func:`~scipy.special.pro_ang1`::
+
+        void pro_ang1(double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.pro_ang1_cv`::
+
+        void pro_ang1_cv(double, double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.pro_cv`::
+
+        double pro_cv(double, double, double)
+
+- :py:func:`~scipy.special.pro_rad1`::
+
+        void pro_rad1(double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.pro_rad1_cv`::
+
+        void pro_rad1_cv(double, double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.pro_rad2`::
+
+        void pro_rad2(double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.pro_rad2_cv`::
+
+        void pro_rad2_cv(double, double, double, double, double, double *, double *)
+
+- :py:func:`~scipy.special.pseudo_huber`::
+
+        double pseudo_huber(double, double)
+
+- :py:func:`~scipy.special.psi`::
+
+        double complex psi(double complex)
+        double psi(double)
+
+- :py:func:`~scipy.special.radian`::
+
+        double radian(double, double, double)
+
+- :py:func:`~scipy.special.rel_entr`::
+
+        double rel_entr(double, double)
+
+- :py:func:`~scipy.special.rgamma`::
+
+        double complex rgamma(double complex)
+        double rgamma(double)
+
+- :py:func:`~scipy.special.round`::
+
+        double round(double)
+
+- :py:func:`~scipy.special.shichi`::
+
+        void shichi(double complex, double complex *, double complex *)
+        void shichi(double, double *, double *)
+
+- :py:func:`~scipy.special.sici`::
+
+        void sici(double complex, double complex *, double complex *)
+        void sici(double, double *, double *)
+
+- :py:func:`~scipy.special.sindg`::
+
+        double sindg(double)
+
+- :py:func:`~scipy.special.smirnov`::
+
+        double smirnov(double, double)
+        double smirnov(long, double)
+
+- :py:func:`~scipy.special.smirnovi`::
+
+        double smirnovi(double, double)
+        double smirnovi(long, double)
+
+- :py:func:`~scipy.special.spence`::
+
+        double complex spence(double complex)
+        double spence(double)
+
+- :py:func:`~scipy.special.sph_harm`::
+
+        double complex sph_harm(double, double, double, double)
+        double complex sph_harm(long, long, double, double)
+
+- :py:func:`~scipy.special.stdtr`::
+
+        double stdtr(double, double)
+
+- :py:func:`~scipy.special.stdtridf`::
+
+        double stdtridf(double, double)
+
+- :py:func:`~scipy.special.stdtrit`::
+
+        double stdtrit(double, double)
+
+- :py:func:`~scipy.special.struve`::
+
+        double struve(double, double)
+
+- :py:func:`~scipy.special.tandg`::
+
+        double tandg(double)
+
+- :py:func:`~scipy.special.tklmbda`::
+
+        double tklmbda(double, double)
+
+- :py:func:`~scipy.special.wofz`::
+
+        double complex wofz(double complex)
+
+- :py:func:`~scipy.special.wrightomega`::
+
+        double complex wrightomega(double complex)
+        double wrightomega(double)
+
+- :py:func:`~scipy.special.xlog1py`::
+
+        double xlog1py(double, double)
+        double complex xlog1py(double complex, double complex)
+
+- :py:func:`~scipy.special.xlogy`::
+
+        double xlogy(double, double)
+        double complex xlogy(double complex, double complex)
+
+- :py:func:`~scipy.special.y0`::
+
+        double y0(double)
+
+- :py:func:`~scipy.special.y1`::
+
+        double y1(double)
+
+- :py:func:`~scipy.special.yn`::
+
+        double yn(double, double)
+        double yn(long, double)
+
+- :py:func:`~scipy.special.yv`::
+
+        double complex yv(double, double complex)
+        double yv(double, double)
+
+- :py:func:`~scipy.special.yve`::
+
+        double complex yve(double, double complex)
+        double yve(double, double)
+
+- :py:func:`~scipy.special.zetac`::
+
+        double zetac(double)
+
+- :py:func:`~scipy.special.wright_bessel`::
+
+        double wright_bessel(double, double, double)
+
+- :py:func:`~scipy.special.ndtri_exp`::
+
+        double ndtri_exp(double)
+
+
+Custom functions
+----------------
+
+Some functions in ``scipy.special`` which are not ufuncs have custom
+Cython wrappers.
+
+Spherical Bessel functions
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The optional ``derivative`` boolean argument is replaced with an
+optional Cython ``bint``, leading to the following signatures.
+
+- :py:func:`~scipy.special.spherical_jn`::
+
+        double complex spherical_jn(long, double complex)
+        double complex spherical_jn(long, double complex, bint)
+        double spherical_jn(long, double)
+        double spherical_jn(long, double, bint)
+
+- :py:func:`~scipy.special.spherical_yn`::
+
+        double complex spherical_yn(long, double complex)
+        double complex spherical_yn(long, double complex, bint)
+        double spherical_yn(long, double)
+        double spherical_yn(long, double, bint)
+
+- :py:func:`~scipy.special.spherical_in`::
+
+        double complex spherical_in(long, double complex)
+        double complex spherical_in(long, double complex, bint)
+        double spherical_in(long, double)
+        double spherical_in(long, double, bint)
+
+- :py:func:`~scipy.special.spherical_kn`::
+
+        double complex spherical_kn(long, double complex)
+        double complex spherical_kn(long, double complex, bint)
+        double spherical_kn(long, double)
+        double spherical_kn(long, double, bint)
+
+"""
+
+from libc.math cimport NAN
+
+include "_cython_special.pxi"
+include "_cython_special_custom.pxi"
+
+from ._agm cimport agm as _func_agm
+ctypedef double _proto_agm_t(double, double) nogil
+cdef _proto_agm_t *_proto_agm_t_var = &_func_agm
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_airy_wrap "airy_wrap"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_cairy_wrap "cairy_wrap"(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_cairy_wrap_e "cairy_wrap_e"(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_cairy_wrap_e_real "cairy_wrap_e_real"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil
+from ._legacy cimport bdtr_unsafe as _func_bdtr_unsafe
+ctypedef double _proto_bdtr_unsafe_t(double, double, double) nogil
+cdef _proto_bdtr_unsafe_t *_proto_bdtr_unsafe_t_var = &_func_bdtr_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_bdtr "bdtr"(npy_double, npy_int, npy_double)nogil
+from ._legacy cimport bdtrc_unsafe as _func_bdtrc_unsafe
+ctypedef double _proto_bdtrc_unsafe_t(double, double, double) nogil
+cdef _proto_bdtrc_unsafe_t *_proto_bdtrc_unsafe_t_var = &_func_bdtrc_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_bdtrc "bdtrc"(npy_double, npy_int, npy_double)nogil
+from ._legacy cimport bdtri_unsafe as _func_bdtri_unsafe
+ctypedef double _proto_bdtri_unsafe_t(double, double, double) nogil
+cdef _proto_bdtri_unsafe_t *_proto_bdtri_unsafe_t_var = &_func_bdtri_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_bdtri "bdtri"(npy_double, npy_int, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfbin2_wrap "cdfbin2_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfbin3_wrap "cdfbin3_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_bei_wrap "bei_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_beip_wrap "beip_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ber_wrap "ber_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_berp_wrap "berp_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_besselpoly "besselpoly"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_beta "beta"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_incbet "incbet"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_incbi "incbi"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_lbeta "lbeta"(npy_double, npy_double)nogil
+from .orthogonal_eval cimport binom as _func_binom
+ctypedef double _proto_binom_t(double, double) nogil
+cdef _proto_binom_t *_proto_binom_t_var = &_func_binom
+from ._boxcox cimport boxcox as _func_boxcox
+ctypedef double _proto_boxcox_t(double, double) nogil
+cdef _proto_boxcox_t *_proto_boxcox_t_var = &_func_boxcox
+from ._boxcox cimport boxcox1p as _func_boxcox1p
+ctypedef double _proto_boxcox1p_t(double, double) nogil
+cdef _proto_boxcox1p_t *_proto_boxcox1p_t_var = &_func_boxcox1p
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_btdtr "btdtr"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_incbi "incbi"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfbet3_wrap "cdfbet3_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfbet4_wrap "cdfbet4_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbrt "cbrt"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_chdtr "chdtr"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_chdtrc "chdtrc"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_chdtri "chdtri"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfchi3_wrap "cdfchi3_wrap"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfchn1_wrap "cdfchn1_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfchn3_wrap "cdfchn3_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfchn4_wrap "cdfchn4_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfchn2_wrap "cdfchn2_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cosdg "cosdg"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cosm1 "cosm1"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cotdg "cotdg"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ellpe "ellpe"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ellie "ellie"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_ellpj "ellpj"(npy_double, npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ellik "ellik"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ellpk "ellpk"(npy_double)nogil
+from ._ellipk cimport ellipk as _func_ellipk
+ctypedef double _proto_ellipk_t(double) nogil
+cdef _proto_ellipk_t *_proto_ellipk_t_var = &_func_ellipk
+from ._convex_analysis cimport entr as _func_entr
+ctypedef double _proto_entr_t(double) nogil
+cdef _proto_entr_t *_proto_entr_t_var = &_func_entr
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_erf "erf"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_erfc "erfc"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_erfcinv "erfcinv"(npy_double)nogil
+from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc
+ctypedef double complex _proto_eval_chebyc_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebyc_double_complex__t *_proto_eval_chebyc_double_complex__t_var = &_func_eval_chebyc[double_complex]
+from .orthogonal_eval cimport eval_chebyc as _func_eval_chebyc
+ctypedef double _proto_eval_chebyc_double__t(double, double) nogil
+cdef _proto_eval_chebyc_double__t *_proto_eval_chebyc_double__t_var = &_func_eval_chebyc[double]
+from .orthogonal_eval cimport eval_chebyc_l as _func_eval_chebyc_l
+ctypedef double _proto_eval_chebyc_l_t(long, double) nogil
+cdef _proto_eval_chebyc_l_t *_proto_eval_chebyc_l_t_var = &_func_eval_chebyc_l
+from .orthogonal_eval cimport eval_chebys as _func_eval_chebys
+ctypedef double complex _proto_eval_chebys_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebys_double_complex__t *_proto_eval_chebys_double_complex__t_var = &_func_eval_chebys[double_complex]
+from .orthogonal_eval cimport eval_chebys as _func_eval_chebys
+ctypedef double _proto_eval_chebys_double__t(double, double) nogil
+cdef _proto_eval_chebys_double__t *_proto_eval_chebys_double__t_var = &_func_eval_chebys[double]
+from .orthogonal_eval cimport eval_chebys_l as _func_eval_chebys_l
+ctypedef double _proto_eval_chebys_l_t(long, double) nogil
+cdef _proto_eval_chebys_l_t *_proto_eval_chebys_l_t_var = &_func_eval_chebys_l
+from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt
+ctypedef double complex _proto_eval_chebyt_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebyt_double_complex__t *_proto_eval_chebyt_double_complex__t_var = &_func_eval_chebyt[double_complex]
+from .orthogonal_eval cimport eval_chebyt as _func_eval_chebyt
+ctypedef double _proto_eval_chebyt_double__t(double, double) nogil
+cdef _proto_eval_chebyt_double__t *_proto_eval_chebyt_double__t_var = &_func_eval_chebyt[double]
+from .orthogonal_eval cimport eval_chebyt_l as _func_eval_chebyt_l
+ctypedef double _proto_eval_chebyt_l_t(long, double) nogil
+cdef _proto_eval_chebyt_l_t *_proto_eval_chebyt_l_t_var = &_func_eval_chebyt_l
+from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu
+ctypedef double complex _proto_eval_chebyu_double_complex__t(double, double complex) nogil
+cdef _proto_eval_chebyu_double_complex__t *_proto_eval_chebyu_double_complex__t_var = &_func_eval_chebyu[double_complex]
+from .orthogonal_eval cimport eval_chebyu as _func_eval_chebyu
+ctypedef double _proto_eval_chebyu_double__t(double, double) nogil
+cdef _proto_eval_chebyu_double__t *_proto_eval_chebyu_double__t_var = &_func_eval_chebyu[double]
+from .orthogonal_eval cimport eval_chebyu_l as _func_eval_chebyu_l
+ctypedef double _proto_eval_chebyu_l_t(long, double) nogil
+cdef _proto_eval_chebyu_l_t *_proto_eval_chebyu_l_t_var = &_func_eval_chebyu_l
+from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer
+ctypedef double complex _proto_eval_gegenbauer_double_complex__t(double, double, double complex) nogil
+cdef _proto_eval_gegenbauer_double_complex__t *_proto_eval_gegenbauer_double_complex__t_var = &_func_eval_gegenbauer[double_complex]
+from .orthogonal_eval cimport eval_gegenbauer as _func_eval_gegenbauer
+ctypedef double _proto_eval_gegenbauer_double__t(double, double, double) nogil
+cdef _proto_eval_gegenbauer_double__t *_proto_eval_gegenbauer_double__t_var = &_func_eval_gegenbauer[double]
+from .orthogonal_eval cimport eval_gegenbauer_l as _func_eval_gegenbauer_l
+ctypedef double _proto_eval_gegenbauer_l_t(long, double, double) nogil
+cdef _proto_eval_gegenbauer_l_t *_proto_eval_gegenbauer_l_t_var = &_func_eval_gegenbauer_l
+from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre
+ctypedef double complex _proto_eval_genlaguerre_double_complex__t(double, double, double complex) nogil
+cdef _proto_eval_genlaguerre_double_complex__t *_proto_eval_genlaguerre_double_complex__t_var = &_func_eval_genlaguerre[double_complex]
+from .orthogonal_eval cimport eval_genlaguerre as _func_eval_genlaguerre
+ctypedef double _proto_eval_genlaguerre_double__t(double, double, double) nogil
+cdef _proto_eval_genlaguerre_double__t *_proto_eval_genlaguerre_double__t_var = &_func_eval_genlaguerre[double]
+from .orthogonal_eval cimport eval_genlaguerre_l as _func_eval_genlaguerre_l
+ctypedef double _proto_eval_genlaguerre_l_t(long, double, double) nogil
+cdef _proto_eval_genlaguerre_l_t *_proto_eval_genlaguerre_l_t_var = &_func_eval_genlaguerre_l
+from .orthogonal_eval cimport eval_hermite as _func_eval_hermite
+ctypedef double _proto_eval_hermite_t(long, double) nogil
+cdef _proto_eval_hermite_t *_proto_eval_hermite_t_var = &_func_eval_hermite
+from .orthogonal_eval cimport eval_hermitenorm as _func_eval_hermitenorm
+ctypedef double _proto_eval_hermitenorm_t(long, double) nogil
+cdef _proto_eval_hermitenorm_t *_proto_eval_hermitenorm_t_var = &_func_eval_hermitenorm
+from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi
+ctypedef double complex _proto_eval_jacobi_double_complex__t(double, double, double, double complex) nogil
+cdef _proto_eval_jacobi_double_complex__t *_proto_eval_jacobi_double_complex__t_var = &_func_eval_jacobi[double_complex]
+from .orthogonal_eval cimport eval_jacobi as _func_eval_jacobi
+ctypedef double _proto_eval_jacobi_double__t(double, double, double, double) nogil
+cdef _proto_eval_jacobi_double__t *_proto_eval_jacobi_double__t_var = &_func_eval_jacobi[double]
+from .orthogonal_eval cimport eval_jacobi_l as _func_eval_jacobi_l
+ctypedef double _proto_eval_jacobi_l_t(long, double, double, double) nogil
+cdef _proto_eval_jacobi_l_t *_proto_eval_jacobi_l_t_var = &_func_eval_jacobi_l
+from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre
+ctypedef double complex _proto_eval_laguerre_double_complex__t(double, double complex) nogil
+cdef _proto_eval_laguerre_double_complex__t *_proto_eval_laguerre_double_complex__t_var = &_func_eval_laguerre[double_complex]
+from .orthogonal_eval cimport eval_laguerre as _func_eval_laguerre
+ctypedef double _proto_eval_laguerre_double__t(double, double) nogil
+cdef _proto_eval_laguerre_double__t *_proto_eval_laguerre_double__t_var = &_func_eval_laguerre[double]
+from .orthogonal_eval cimport eval_laguerre_l as _func_eval_laguerre_l
+ctypedef double _proto_eval_laguerre_l_t(long, double) nogil
+cdef _proto_eval_laguerre_l_t *_proto_eval_laguerre_l_t_var = &_func_eval_laguerre_l
+from .orthogonal_eval cimport eval_legendre as _func_eval_legendre
+ctypedef double complex _proto_eval_legendre_double_complex__t(double, double complex) nogil
+cdef _proto_eval_legendre_double_complex__t *_proto_eval_legendre_double_complex__t_var = &_func_eval_legendre[double_complex]
+from .orthogonal_eval cimport eval_legendre as _func_eval_legendre
+ctypedef double _proto_eval_legendre_double__t(double, double) nogil
+cdef _proto_eval_legendre_double__t *_proto_eval_legendre_double__t_var = &_func_eval_legendre[double]
+from .orthogonal_eval cimport eval_legendre_l as _func_eval_legendre_l
+ctypedef double _proto_eval_legendre_l_t(long, double) nogil
+cdef _proto_eval_legendre_l_t *_proto_eval_legendre_l_t_var = &_func_eval_legendre_l
+from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt
+ctypedef double complex _proto_eval_sh_chebyt_double_complex__t(double, double complex) nogil
+cdef _proto_eval_sh_chebyt_double_complex__t *_proto_eval_sh_chebyt_double_complex__t_var = &_func_eval_sh_chebyt[double_complex]
+from .orthogonal_eval cimport eval_sh_chebyt as _func_eval_sh_chebyt
+ctypedef double _proto_eval_sh_chebyt_double__t(double, double) nogil
+cdef _proto_eval_sh_chebyt_double__t *_proto_eval_sh_chebyt_double__t_var = &_func_eval_sh_chebyt[double]
+from .orthogonal_eval cimport eval_sh_chebyt_l as _func_eval_sh_chebyt_l
+ctypedef double _proto_eval_sh_chebyt_l_t(long, double) nogil
+cdef _proto_eval_sh_chebyt_l_t *_proto_eval_sh_chebyt_l_t_var = &_func_eval_sh_chebyt_l
+from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu
+ctypedef double complex _proto_eval_sh_chebyu_double_complex__t(double, double complex) nogil
+cdef _proto_eval_sh_chebyu_double_complex__t *_proto_eval_sh_chebyu_double_complex__t_var = &_func_eval_sh_chebyu[double_complex]
+from .orthogonal_eval cimport eval_sh_chebyu as _func_eval_sh_chebyu
+ctypedef double _proto_eval_sh_chebyu_double__t(double, double) nogil
+cdef _proto_eval_sh_chebyu_double__t *_proto_eval_sh_chebyu_double__t_var = &_func_eval_sh_chebyu[double]
+from .orthogonal_eval cimport eval_sh_chebyu_l as _func_eval_sh_chebyu_l
+ctypedef double _proto_eval_sh_chebyu_l_t(long, double) nogil
+cdef _proto_eval_sh_chebyu_l_t *_proto_eval_sh_chebyu_l_t_var = &_func_eval_sh_chebyu_l
+from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi
+ctypedef double complex _proto_eval_sh_jacobi_double_complex__t(double, double, double, double complex) nogil
+cdef _proto_eval_sh_jacobi_double_complex__t *_proto_eval_sh_jacobi_double_complex__t_var = &_func_eval_sh_jacobi[double_complex]
+from .orthogonal_eval cimport eval_sh_jacobi as _func_eval_sh_jacobi
+ctypedef double _proto_eval_sh_jacobi_double__t(double, double, double, double) nogil
+cdef _proto_eval_sh_jacobi_double__t *_proto_eval_sh_jacobi_double__t_var = &_func_eval_sh_jacobi[double]
+from .orthogonal_eval cimport eval_sh_jacobi_l as _func_eval_sh_jacobi_l
+ctypedef double _proto_eval_sh_jacobi_l_t(long, double, double, double) nogil
+cdef _proto_eval_sh_jacobi_l_t *_proto_eval_sh_jacobi_l_t_var = &_func_eval_sh_jacobi_l
+from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre
+ctypedef double complex _proto_eval_sh_legendre_double_complex__t(double, double complex) nogil
+cdef _proto_eval_sh_legendre_double_complex__t *_proto_eval_sh_legendre_double_complex__t_var = &_func_eval_sh_legendre[double_complex]
+from .orthogonal_eval cimport eval_sh_legendre as _func_eval_sh_legendre
+ctypedef double _proto_eval_sh_legendre_double__t(double, double) nogil
+cdef _proto_eval_sh_legendre_double__t *_proto_eval_sh_legendre_double__t_var = &_func_eval_sh_legendre[double]
+from .orthogonal_eval cimport eval_sh_legendre_l as _func_eval_sh_legendre_l
+ctypedef double _proto_eval_sh_legendre_l_t(long, double) nogil
+cdef _proto_eval_sh_legendre_l_t *_proto_eval_sh_legendre_l_t_var = &_func_eval_sh_legendre_l
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cexp1_wrap "cexp1_wrap"(npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_exp1_wrap "exp1_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_exp10 "exp10"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_exp2 "exp2"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cexpi_wrap "cexpi_wrap"(npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_expi_wrap "expi_wrap"(npy_double)nogil
+from ._cunity cimport cexpm1 as _func_cexpm1
+ctypedef double complex _proto_cexpm1_t(double complex) nogil
+cdef _proto_cexpm1_t *_proto_cexpm1_t_var = &_func_cexpm1
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_expm1 "expm1"(npy_double)nogil
+from ._legacy cimport expn_unsafe as _func_expn_unsafe
+ctypedef double _proto_expn_unsafe_t(double, double) nogil
+cdef _proto_expn_unsafe_t *_proto_expn_unsafe_t_var = &_func_expn_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_expn "expn"(npy_int, npy_double)nogil
+from ._exprel cimport exprel as _func_exprel
+ctypedef double _proto_exprel_t(double) nogil
+cdef _proto_exprel_t *_proto_exprel_t_var = &_func_exprel
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_fdtr "fdtr"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_fdtrc "fdtrc"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_fdtri "fdtri"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdff4_wrap "cdff4_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_fresnl "fresnl"(npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_cfresnl_wrap "cfresnl_wrap"(npy_cdouble, npy_cdouble *, npy_cdouble *)nogil
+from ._loggamma cimport cgamma as _func_cgamma
+ctypedef double complex _proto_cgamma_t(double complex) nogil
+cdef _proto_cgamma_t *_proto_cgamma_t_var = &_func_cgamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_Gamma "Gamma"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_igam "igam"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_igamc "igamc"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_igamci "igamci"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_igami "igami"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_lgam "lgam"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_gammasgn "gammasgn"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_gdtr "gdtr"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_gdtrc "gdtrc"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfgam4_wrap "cdfgam4_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfgam3_wrap "cdfgam3_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfgam2_wrap "cdfgam2_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesh_wrap1 "cbesh_wrap1"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesh_wrap1_e "cbesh_wrap1_e"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesh_wrap2 "cbesh_wrap2"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesh_wrap2_e "cbesh_wrap2_e"(npy_double, npy_cdouble)nogil
+from ._convex_analysis cimport huber as _func_huber
+ctypedef double _proto_huber_t(double, double) nogil
+cdef _proto_huber_t *_proto_huber_t_var = &_func_huber
+from ._hyp0f1 cimport _hyp0f1_cmplx as _func__hyp0f1_cmplx
+ctypedef double complex _proto__hyp0f1_cmplx_t(double, double complex) nogil
+cdef _proto__hyp0f1_cmplx_t *_proto__hyp0f1_cmplx_t_var = &_func__hyp0f1_cmplx
+from ._hyp0f1 cimport _hyp0f1_real as _func__hyp0f1_real
+ctypedef double _proto__hyp0f1_real_t(double, double) nogil
+cdef _proto__hyp0f1_real_t *_proto__hyp0f1_real_t_var = &_func__hyp0f1_real
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_chyp1f1_wrap "chyp1f1_wrap"(npy_double, npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_hyp2f1 "hyp2f1"(npy_double, npy_double, npy_double, npy_double)nogil
+from ._hyp2f1 cimport hyp2f1_complex as _func_hyp2f1_complex
+ctypedef double complex _proto_hyp2f1_complex_t(double, double, double, double complex) nogil
+cdef _proto_hyp2f1_complex_t *_proto_hyp2f1_complex_t_var = &_func_hyp2f1_complex
+from ._hypergeometric cimport hyperu as _func_hyperu
+ctypedef double _proto_hyperu_t(double, double, double) nogil
+cdef _proto_hyperu_t *_proto_hyperu_t_var = &_func_hyperu
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_i0 "i0"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_i0e "i0e"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_i1 "i1"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_i1e "i1e"(npy_double)nogil
+from ._boxcox cimport inv_boxcox as _func_inv_boxcox
+ctypedef double _proto_inv_boxcox_t(double, double) nogil
+cdef _proto_inv_boxcox_t *_proto_inv_boxcox_t_var = &_func_inv_boxcox
+from ._boxcox cimport inv_boxcox1p as _func_inv_boxcox1p
+ctypedef double _proto_inv_boxcox1p_t(double, double) nogil
+cdef _proto_inv_boxcox1p_t *_proto_inv_boxcox1p_t_var = &_func_inv_boxcox1p
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_it2i0k0_wrap "it2i0k0_wrap"(npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_it2j0y0_wrap "it2j0y0_wrap"(npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_it2struve0_wrap "it2struve0_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_itairy_wrap "itairy_wrap"(npy_double, npy_double *, npy_double *, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_it1i0k0_wrap "it1i0k0_wrap"(npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_it1j0y0_wrap "it1j0y0_wrap"(npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_itmodstruve0_wrap "itmodstruve0_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_itstruve0_wrap "itstruve0_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesi_wrap "cbesi_wrap"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_iv "iv"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesi_wrap_e "cbesi_wrap_e"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesi_wrap_e_real "cbesi_wrap_e_real"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_j0 "j0"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_j1 "j1"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesj_wrap "cbesj_wrap"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesj_wrap_real "cbesj_wrap_real"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesj_wrap_e "cbesj_wrap_e"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesj_wrap_e_real "cbesj_wrap_e_real"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_k0 "k0"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_k0e "k0e"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_k1 "k1"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_k1e "k1e"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_kei_wrap "kei_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_keip_wrap "keip_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_kelvin_wrap "kelvin_wrap"(npy_double, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ker_wrap "ker_wrap"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_kerp_wrap "kerp_wrap"(npy_double)nogil
+from ._convex_analysis cimport kl_div as _func_kl_div
+ctypedef double _proto_kl_div_t(double, double) nogil
+cdef _proto_kl_div_t *_proto_kl_div_t_var = &_func_kl_div
+from ._legacy cimport kn_unsafe as _func_kn_unsafe
+ctypedef double _proto_kn_unsafe_t(double, double) nogil
+cdef _proto_kn_unsafe_t *_proto_kn_unsafe_t_var = &_func_kn_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesk_wrap_real_int "cbesk_wrap_real_int"(npy_int, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_kolmogi "kolmogi"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_kolmogorov "kolmogorov"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesk_wrap "cbesk_wrap"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesk_wrap_real "cbesk_wrap_real"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesk_wrap_e "cbesk_wrap_e"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesk_wrap_e_real "cbesk_wrap_e_real"(npy_double, npy_double)nogil
+from ._cunity cimport clog1p as _func_clog1p
+ctypedef double complex _proto_clog1p_t(double complex) nogil
+cdef _proto_clog1p_t *_proto_clog1p_t_var = &_func_clog1p
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_log1p "log1p"(npy_double)nogil
+from ._loggamma cimport loggamma_real as _func_loggamma_real
+ctypedef double _proto_loggamma_real_t(double) nogil
+cdef _proto_loggamma_real_t *_proto_loggamma_real_t_var = &_func_loggamma_real
+from ._loggamma cimport loggamma as _func_loggamma
+ctypedef double complex _proto_loggamma_t(double complex) nogil
+cdef _proto_loggamma_t *_proto_loggamma_t_var = &_func_loggamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_pmv_wrap "pmv_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cem_cva_wrap "cem_cva_wrap"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_sem_cva_wrap "sem_cva_wrap"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_cem_wrap "cem_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_mcm1_wrap "mcm1_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_mcm2_wrap "mcm2_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_msm1_wrap "msm1_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_msm2_wrap "msm2_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_sem_wrap "sem_wrap"(npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_modified_fresnel_minus_wrap "modified_fresnel_minus_wrap"(npy_double, npy_cdouble *, npy_cdouble *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_modified_fresnel_plus_wrap "modified_fresnel_plus_wrap"(npy_double, npy_cdouble *, npy_cdouble *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_struve_l "struve_l"(npy_double, npy_double)nogil
+from ._legacy cimport nbdtr_unsafe as _func_nbdtr_unsafe
+ctypedef double _proto_nbdtr_unsafe_t(double, double, double) nogil
+cdef _proto_nbdtr_unsafe_t *_proto_nbdtr_unsafe_t_var = &_func_nbdtr_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_nbdtr "nbdtr"(npy_int, npy_int, npy_double)nogil
+from ._legacy cimport nbdtrc_unsafe as _func_nbdtrc_unsafe
+ctypedef double _proto_nbdtrc_unsafe_t(double, double, double) nogil
+cdef _proto_nbdtrc_unsafe_t *_proto_nbdtrc_unsafe_t_var = &_func_nbdtrc_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_nbdtrc "nbdtrc"(npy_int, npy_int, npy_double)nogil
+from ._legacy cimport nbdtri_unsafe as _func_nbdtri_unsafe
+ctypedef double _proto_nbdtri_unsafe_t(double, double, double) nogil
+cdef _proto_nbdtri_unsafe_t *_proto_nbdtri_unsafe_t_var = &_func_nbdtri_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_nbdtri "nbdtri"(npy_int, npy_int, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfnbn2_wrap "cdfnbn2_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfnbn3_wrap "cdfnbn3_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdffnc1_wrap "cdffnc1_wrap"(npy_double, npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdffnc2_wrap "cdffnc2_wrap"(npy_double, npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdffnc4_wrap "cdffnc4_wrap"(npy_double, npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdffnc3_wrap "cdffnc3_wrap"(npy_double, npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdffnc5_wrap "cdffnc5_wrap"(npy_double, npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdftnc1_wrap "cdftnc1_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdftnc3_wrap "cdftnc3_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdftnc4_wrap "cdftnc4_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdftnc2_wrap "cdftnc2_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ndtr "ndtr"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_ndtri "ndtri"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfnor3_wrap "cdfnor3_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfnor4_wrap "cdfnor4_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_oblate_aswfa_nocv_wrap "oblate_aswfa_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_oblate_aswfa_wrap "oblate_aswfa_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_oblate_segv_wrap "oblate_segv_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_oblate_radial1_nocv_wrap "oblate_radial1_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_oblate_radial1_wrap "oblate_radial1_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_oblate_radial2_nocv_wrap "oblate_radial2_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_oblate_radial2_wrap "oblate_radial2_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_owens_t "owens_t"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_pbdv_wrap "pbdv_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_pbvv_wrap "pbvv_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_pbwa_wrap "pbwa_wrap"(npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_pdtr "pdtr"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_pdtrc "pdtrc"(npy_double, npy_double)nogil
+from ._legacy cimport pdtri_unsafe as _func_pdtri_unsafe
+ctypedef double _proto_pdtri_unsafe_t(double, double) nogil
+cdef _proto_pdtri_unsafe_t *_proto_pdtri_unsafe_t_var = &_func_pdtri_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_pdtri "pdtri"(npy_int, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdfpoi2_wrap "cdfpoi2_wrap"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_poch "poch"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_prolate_aswfa_nocv_wrap "prolate_aswfa_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_prolate_aswfa_wrap "prolate_aswfa_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_prolate_segv_wrap "prolate_segv_wrap"(npy_double, npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_prolate_radial1_nocv_wrap "prolate_radial1_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_prolate_radial1_wrap "prolate_radial1_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_prolate_radial2_nocv_wrap "prolate_radial2_nocv_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_prolate_radial2_wrap "prolate_radial2_wrap"(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *)nogil
+from ._convex_analysis cimport pseudo_huber as _func_pseudo_huber
+ctypedef double _proto_pseudo_huber_t(double, double) nogil
+cdef _proto_pseudo_huber_t *_proto_pseudo_huber_t_var = &_func_pseudo_huber
+from ._digamma cimport cdigamma as _func_cdigamma
+ctypedef double complex _proto_cdigamma_t(double complex) nogil
+cdef _proto_cdigamma_t *_proto_cdigamma_t_var = &_func_cdigamma
+from ._digamma cimport digamma as _func_digamma
+ctypedef double _proto_digamma_t(double) nogil
+cdef _proto_digamma_t *_proto_digamma_t_var = &_func_digamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_radian "radian"(npy_double, npy_double, npy_double)nogil
+from ._convex_analysis cimport rel_entr as _func_rel_entr
+ctypedef double _proto_rel_entr_t(double, double) nogil
+cdef _proto_rel_entr_t *_proto_rel_entr_t_var = &_func_rel_entr
+from ._loggamma cimport crgamma as _func_crgamma
+ctypedef double complex _proto_crgamma_t(double complex) nogil
+cdef _proto_crgamma_t *_proto_crgamma_t_var = &_func_crgamma
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_rgamma "rgamma"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_round "round"(npy_double)nogil
+from ._sici cimport cshichi as _func_cshichi
+ctypedef int _proto_cshichi_t(double complex, double complex *, double complex *) nogil
+cdef _proto_cshichi_t *_proto_cshichi_t_var = &_func_cshichi
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_shichi "shichi"(npy_double, npy_double *, npy_double *)nogil
+from ._sici cimport csici as _func_csici
+ctypedef int _proto_csici_t(double complex, double complex *, double complex *) nogil
+cdef _proto_csici_t *_proto_csici_t_var = &_func_csici
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_int _func_sici "sici"(npy_double, npy_double *, npy_double *)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_sindg "sindg"(npy_double)nogil
+from ._legacy cimport smirnov_unsafe as _func_smirnov_unsafe
+ctypedef double _proto_smirnov_unsafe_t(double, double) nogil
+cdef _proto_smirnov_unsafe_t *_proto_smirnov_unsafe_t_var = &_func_smirnov_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_smirnov "smirnov"(npy_int, npy_double)nogil
+from ._legacy cimport smirnovi_unsafe as _func_smirnovi_unsafe
+ctypedef double _proto_smirnovi_unsafe_t(double, double) nogil
+cdef _proto_smirnovi_unsafe_t *_proto_smirnovi_unsafe_t_var = &_func_smirnovi_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_smirnovi "smirnovi"(npy_int, npy_double)nogil
+from ._spence cimport cspence as _func_cspence
+ctypedef double complex _proto_cspence_t(double complex) nogil
+cdef _proto_cspence_t *_proto_cspence_t_var = &_func_cspence
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_spence "spence"(npy_double)nogil
+from ._legacy cimport sph_harmonic_unsafe as _func_sph_harmonic_unsafe
+ctypedef double complex _proto_sph_harmonic_unsafe_t(double, double, double, double) nogil
+cdef _proto_sph_harmonic_unsafe_t *_proto_sph_harmonic_unsafe_t_var = &_func_sph_harmonic_unsafe
+from .sph_harm cimport sph_harmonic as _func_sph_harmonic
+ctypedef double complex _proto_sph_harmonic_t(int, int, double, double) nogil
+cdef _proto_sph_harmonic_t *_proto_sph_harmonic_t_var = &_func_sph_harmonic
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdft1_wrap "cdft1_wrap"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdft3_wrap "cdft3_wrap"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cdft2_wrap "cdft2_wrap"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_struve_h "struve_h"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_tandg "tandg"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_tukeylambdacdf "tukeylambdacdf"(npy_double, npy_double)nogil
+from ._xlogy cimport xlog1py as _func_xlog1py
+ctypedef double _proto_xlog1py_double__t(double, double) nogil
+cdef _proto_xlog1py_double__t *_proto_xlog1py_double__t_var = &_func_xlog1py[double]
+from ._xlogy cimport xlog1py as _func_xlog1py
+ctypedef double complex _proto_xlog1py_double_complex__t(double complex, double complex) nogil
+cdef _proto_xlog1py_double_complex__t *_proto_xlog1py_double_complex__t_var = &_func_xlog1py[double_complex]
+from ._xlogy cimport xlogy as _func_xlogy
+ctypedef double _proto_xlogy_double__t(double, double) nogil
+cdef _proto_xlogy_double__t *_proto_xlogy_double__t_var = &_func_xlogy[double]
+from ._xlogy cimport xlogy as _func_xlogy
+ctypedef double complex _proto_xlogy_double_complex__t(double complex, double complex) nogil
+cdef _proto_xlogy_double_complex__t *_proto_xlogy_double_complex__t_var = &_func_xlogy[double_complex]
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_y0 "y0"(npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_y1 "y1"(npy_double)nogil
+from ._legacy cimport yn_unsafe as _func_yn_unsafe
+ctypedef double _proto_yn_unsafe_t(double, double) nogil
+cdef _proto_yn_unsafe_t *_proto_yn_unsafe_t_var = &_func_yn_unsafe
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_yn "yn"(npy_int, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesy_wrap "cbesy_wrap"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesy_wrap_real "cbesy_wrap_real"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_cdouble _func_cbesy_wrap_e "cbesy_wrap_e"(npy_double, npy_cdouble)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_cbesy_wrap_e_real "cbesy_wrap_e_real"(npy_double, npy_double)nogil
+cdef extern from r"/private/var/folders/_f/lyvxf0v13gs7984d7sf7j83c0000gn/T/cirrus-ci-build/.mesonpy-1bkoe_fs/build/scipy/special/_ufuncs_defs.h":
+    cdef npy_double _func_zetac "zetac"(npy_double)nogil
+from ._wright_bessel cimport wright_bessel_scalar as _func_wright_bessel_scalar
+ctypedef double _proto_wright_bessel_scalar_t(double, double, double) nogil
+cdef _proto_wright_bessel_scalar_t *_proto_wright_bessel_scalar_t_var = &_func_wright_bessel_scalar
+from ._ndtri_exp cimport ndtri_exp as _func_ndtri_exp
+ctypedef double _proto_ndtri_exp_t(double) nogil
+cdef _proto_ndtri_exp_t *_proto_ndtri_exp_t_var = &_func_ndtri_exp
+
+cpdef double voigt_profile(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.voigt_profile"""
+    return (scipy.special._ufuncs_cxx._export_faddeeva_voigt_profile)(x0, x1, x2)
+
+cpdef double agm(double x0, double x1) nogil:
+    """See the documentation for scipy.special.agm"""
+    return _func_agm(x0, x1)
+
+cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil:
+    """See the documentation for scipy.special.airy"""
+    cdef npy_cdouble tmp0
+    cdef npy_cdouble tmp1
+    cdef npy_cdouble tmp2
+    cdef npy_cdouble tmp3
+    if Dd_number_t is double:
+        _func_airy_wrap(x0, y0, y1, y2, y3)
+    elif Dd_number_t is double_complex:
+        _func_cairy_wrap(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1, &tmp2, &tmp3)
+        y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0)
+        y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1)
+        y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2)
+        y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3)
+    else:
+        if Dd_number_t is double_complex:
+            y0[0] = NAN
+            y1[0] = NAN
+            y2[0] = NAN
+            y3[0] = NAN
+        else:
+            y0[0] = NAN
+            y1[0] = NAN
+            y2[0] = NAN
+            y3[0] = NAN
+
+def _airy_pywrap(Dd_number_t x0):
+    cdef Dd_number_t y0
+    cdef Dd_number_t y1
+    cdef Dd_number_t y2
+    cdef Dd_number_t y3
+    airy(x0, &y0, &y1, &y2, &y3)
+    return y0, y1, y2, y3
+
+cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil:
+    """See the documentation for scipy.special.airye"""
+    cdef npy_cdouble tmp0
+    cdef npy_cdouble tmp1
+    cdef npy_cdouble tmp2
+    cdef npy_cdouble tmp3
+    if Dd_number_t is double_complex:
+        _func_cairy_wrap_e(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1, &tmp2, &tmp3)
+        y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0)
+        y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1)
+        y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2)
+        y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3)
+    elif Dd_number_t is double:
+        _func_cairy_wrap_e_real(x0, y0, y1, y2, y3)
+    else:
+        if Dd_number_t is double_complex:
+            y0[0] = NAN
+            y1[0] = NAN
+            y2[0] = NAN
+            y3[0] = NAN
+        else:
+            y0[0] = NAN
+            y1[0] = NAN
+            y2[0] = NAN
+            y3[0] = NAN
+
+def _airye_pywrap(Dd_number_t x0):
+    cdef Dd_number_t y0
+    cdef Dd_number_t y1
+    cdef Dd_number_t y2
+    cdef Dd_number_t y3
+    airye(x0, &y0, &y1, &y2, &y3)
+    return y0, y1, y2, y3
+
+cpdef double bdtr(double x0, dl_number_t x1, double x2) nogil:
+    """See the documentation for scipy.special.bdtr"""
+    if dl_number_t is double:
+        return _func_bdtr_unsafe(x0, x1, x2)
+    elif dl_number_t is long:
+        return _func_bdtr(x0, x1, x2)
+    else:
+        return NAN
+
+cpdef double bdtrc(double x0, dl_number_t x1, double x2) nogil:
+    """See the documentation for scipy.special.bdtrc"""
+    if dl_number_t is double:
+        return _func_bdtrc_unsafe(x0, x1, x2)
+    elif dl_number_t is long:
+        return _func_bdtrc(x0, x1, x2)
+    else:
+        return NAN
+
+cpdef double bdtri(double x0, dl_number_t x1, double x2) nogil:
+    """See the documentation for scipy.special.bdtri"""
+    if dl_number_t is double:
+        return _func_bdtri_unsafe(x0, x1, x2)
+    elif dl_number_t is long:
+        return _func_bdtri(x0, x1, x2)
+    else:
+        return NAN
+
+cpdef double bdtrik(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.bdtrik"""
+    return _func_cdfbin2_wrap(x0, x1, x2)
+
+cpdef double bdtrin(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.bdtrin"""
+    return _func_cdfbin3_wrap(x0, x1, x2)
+
+cpdef double bei(double x0) nogil:
+    """See the documentation for scipy.special.bei"""
+    return _func_bei_wrap(x0)
+
+cpdef double beip(double x0) nogil:
+    """See the documentation for scipy.special.beip"""
+    return _func_beip_wrap(x0)
+
+cpdef double ber(double x0) nogil:
+    """See the documentation for scipy.special.ber"""
+    return _func_ber_wrap(x0)
+
+cpdef double berp(double x0) nogil:
+    """See the documentation for scipy.special.berp"""
+    return _func_berp_wrap(x0)
+
+cpdef double besselpoly(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.besselpoly"""
+    return _func_besselpoly(x0, x1, x2)
+
+cpdef double beta(double x0, double x1) nogil:
+    """See the documentation for scipy.special.beta"""
+    return _func_beta(x0, x1)
+
+cpdef double betainc(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.betainc"""
+    return _func_incbet(x0, x1, x2)
+
+cpdef double betaincinv(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.betaincinv"""
+    return _func_incbi(x0, x1, x2)
+
+cpdef double betaln(double x0, double x1) nogil:
+    """See the documentation for scipy.special.betaln"""
+    return _func_lbeta(x0, x1)
+
+cpdef double binom(double x0, double x1) nogil:
+    """See the documentation for scipy.special.binom"""
+    return _func_binom(x0, x1)
+
+cpdef double boxcox(double x0, double x1) nogil:
+    """See the documentation for scipy.special.boxcox"""
+    return _func_boxcox(x0, x1)
+
+cpdef double boxcox1p(double x0, double x1) nogil:
+    """See the documentation for scipy.special.boxcox1p"""
+    return _func_boxcox1p(x0, x1)
+
+cpdef double btdtr(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.btdtr"""
+    return _func_btdtr(x0, x1, x2)
+
+cpdef double btdtri(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.btdtri"""
+    return _func_incbi(x0, x1, x2)
+
+cpdef double btdtria(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.btdtria"""
+    return _func_cdfbet3_wrap(x0, x1, x2)
+
+cpdef double btdtrib(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.btdtrib"""
+    return _func_cdfbet4_wrap(x0, x1, x2)
+
+cpdef double cbrt(double x0) nogil:
+    """See the documentation for scipy.special.cbrt"""
+    return _func_cbrt(x0)
+
+cpdef double chdtr(double x0, double x1) nogil:
+    """See the documentation for scipy.special.chdtr"""
+    return _func_chdtr(x0, x1)
+
+cpdef double chdtrc(double x0, double x1) nogil:
+    """See the documentation for scipy.special.chdtrc"""
+    return _func_chdtrc(x0, x1)
+
+cpdef double chdtri(double x0, double x1) nogil:
+    """See the documentation for scipy.special.chdtri"""
+    return _func_chdtri(x0, x1)
+
+cpdef double chdtriv(double x0, double x1) nogil:
+    """See the documentation for scipy.special.chdtriv"""
+    return _func_cdfchi3_wrap(x0, x1)
+
+cpdef double chndtr(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.chndtr"""
+    return _func_cdfchn1_wrap(x0, x1, x2)
+
+cpdef double chndtridf(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.chndtridf"""
+    return _func_cdfchn3_wrap(x0, x1, x2)
+
+cpdef double chndtrinc(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.chndtrinc"""
+    return _func_cdfchn4_wrap(x0, x1, x2)
+
+cpdef double chndtrix(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.chndtrix"""
+    return _func_cdfchn2_wrap(x0, x1, x2)
+
+cpdef double cosdg(double x0) nogil:
+    """See the documentation for scipy.special.cosdg"""
+    return _func_cosdg(x0)
+
+cpdef double cosm1(double x0) nogil:
+    """See the documentation for scipy.special.cosm1"""
+    return _func_cosm1(x0)
+
+cpdef double cotdg(double x0) nogil:
+    """See the documentation for scipy.special.cotdg"""
+    return _func_cotdg(x0)
+
+cpdef Dd_number_t dawsn(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.dawsn"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_dawsn)(x0)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_dawsn_complex)(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double ellipe(double x0) nogil:
+    """See the documentation for scipy.special.ellipe"""
+    return _func_ellpe(x0)
+
+cpdef double ellipeinc(double x0, double x1) nogil:
+    """See the documentation for scipy.special.ellipeinc"""
+    return _func_ellie(x0, x1)
+
+cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) nogil:
+    """See the documentation for scipy.special.ellipj"""
+    _func_ellpj(x0, x1, y0, y1, y2, y3)
+
+def _ellipj_pywrap(double x0, double x1):
+    cdef double y0
+    cdef double y1
+    cdef double y2
+    cdef double y3
+    ellipj(x0, x1, &y0, &y1, &y2, &y3)
+    return y0, y1, y2, y3
+
+cpdef double ellipkinc(double x0, double x1) nogil:
+    """See the documentation for scipy.special.ellipkinc"""
+    return _func_ellik(x0, x1)
+
+cpdef double ellipkm1(double x0) nogil:
+    """See the documentation for scipy.special.ellipkm1"""
+    return _func_ellpk(x0)
+
+cpdef double ellipk(double x0) nogil:
+    """See the documentation for scipy.special.ellipk"""
+    return _func_ellipk(x0)
+
+cpdef Dd_number_t elliprc(Dd_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.elliprc"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_fellint_RC)(x0, x1)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_cellint_RC)(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t elliprd(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) nogil:
+    """See the documentation for scipy.special.elliprd"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_fellint_RD)(x0, x1, x2)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_cellint_RD)(x0, x1, x2)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t elliprf(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) nogil:
+    """See the documentation for scipy.special.elliprf"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_fellint_RF)(x0, x1, x2)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_cellint_RF)(x0, x1, x2)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t elliprg(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2) nogil:
+    """See the documentation for scipy.special.elliprg"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_fellint_RG)(x0, x1, x2)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_cellint_RG)(x0, x1, x2)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t elliprj(Dd_number_t x0, Dd_number_t x1, Dd_number_t x2, Dd_number_t x3) nogil:
+    """See the documentation for scipy.special.elliprj"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_fellint_RJ)(x0, x1, x2, x3)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_cellint_RJ)(x0, x1, x2, x3)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double entr(double x0) nogil:
+    """See the documentation for scipy.special.entr"""
+    return _func_entr(x0)
+
+cpdef Dd_number_t erf(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.erf"""
+    if Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_erf)(x0)
+    elif Dd_number_t is double:
+        return _func_erf(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t erfc(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.erfc"""
+    if Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_erfc_complex)(x0)
+    elif Dd_number_t is double:
+        return _func_erfc(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t erfcx(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.erfcx"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_erfcx)(x0)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_erfcx_complex)(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t erfi(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.erfi"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_erfi)(x0)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_erfi_complex)(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef df_number_t erfinv(df_number_t x0) nogil:
+    """See the documentation for scipy.special.erfinv"""
+    if df_number_t is float:
+        return (scipy.special._ufuncs_cxx._export_erfinv_float)(x0)
+    elif df_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_erfinv_double)(x0)
+    else:
+        if df_number_t is double:
+            return NAN
+        else:
+            return NAN
+
+cpdef double erfcinv(double x0) nogil:
+    """See the documentation for scipy.special.erfcinv"""
+    return _func_erfcinv(x0)
+
+cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_chebyc"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_chebyc[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_chebyc[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_chebyc_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_chebys"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_chebys[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_chebys[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_chebys_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_chebyt"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_chebyt[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_chebyt[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_chebyt_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_chebyu"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_chebyu[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_chebyu[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_chebyu_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) nogil:
+    """See the documentation for scipy.special.eval_gegenbauer"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_gegenbauer[double_complex](x0, x1, x2)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_gegenbauer[double](x0, x1, x2)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_gegenbauer_l(x0, x1, x2)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) nogil:
+    """See the documentation for scipy.special.eval_genlaguerre"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_genlaguerre[double_complex](x0, x1, x2)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_genlaguerre[double](x0, x1, x2)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_genlaguerre_l(x0, x1, x2)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double eval_hermite(long x0, double x1) nogil:
+    """See the documentation for scipy.special.eval_hermite"""
+    return _func_eval_hermite(x0, x1)
+
+cpdef double eval_hermitenorm(long x0, double x1) nogil:
+    """See the documentation for scipy.special.eval_hermitenorm"""
+    return _func_eval_hermitenorm(x0, x1)
+
+cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil:
+    """See the documentation for scipy.special.eval_jacobi"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_jacobi[double_complex](x0, x1, x2, x3)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_jacobi[double](x0, x1, x2, x3)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_jacobi_l(x0, x1, x2, x3)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_laguerre"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_laguerre[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_laguerre[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_laguerre_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_legendre"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_legendre[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_legendre[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_legendre_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_sh_chebyt"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_sh_chebyt[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_sh_chebyt[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_sh_chebyt_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_sh_chebyu"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_sh_chebyu[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_sh_chebyu[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_sh_chebyu_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil:
+    """See the documentation for scipy.special.eval_sh_jacobi"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_sh_jacobi[double_complex](x0, x1, x2, x3)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_sh_jacobi[double](x0, x1, x2, x3)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_sh_jacobi_l(x0, x1, x2, x3)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.eval_sh_legendre"""
+    if dl_number_t is double and Dd_number_t is double_complex:
+        return _func_eval_sh_legendre[double_complex](x0, x1)
+    elif dl_number_t is double and Dd_number_t is double:
+        return _func_eval_sh_legendre[double](x0, x1)
+    elif dl_number_t is long and Dd_number_t is double:
+        return _func_eval_sh_legendre_l(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t exp1(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.exp1"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cexp1_wrap(_complexstuff.npy_cdouble_from_double_complex(x0)))
+    elif Dd_number_t is double:
+        return _func_exp1_wrap(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double exp10(double x0) nogil:
+    """See the documentation for scipy.special.exp10"""
+    return _func_exp10(x0)
+
+cpdef double exp2(double x0) nogil:
+    """See the documentation for scipy.special.exp2"""
+    return _func_exp2(x0)
+
+cpdef Dd_number_t expi(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.expi"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cexpi_wrap(_complexstuff.npy_cdouble_from_double_complex(x0)))
+    elif Dd_number_t is double:
+        return _func_expi_wrap(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef dfg_number_t expit(dfg_number_t x0) nogil:
+    """See the documentation for scipy.special.expit"""
+    if dfg_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_expit)(x0)
+    elif dfg_number_t is float:
+        return (scipy.special._ufuncs_cxx._export_expitf)(x0)
+    elif dfg_number_t is long_double:
+        return (scipy.special._ufuncs_cxx._export_expitl)(x0)
+    else:
+        if dfg_number_t is double:
+            return NAN
+        elif dfg_number_t is float:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t expm1(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.expm1"""
+    if Dd_number_t is double_complex:
+        return _func_cexpm1(x0)
+    elif Dd_number_t is double:
+        return _func_expm1(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double expn(dl_number_t x0, double x1) nogil:
+    """See the documentation for scipy.special.expn"""
+    if dl_number_t is double:
+        return _func_expn_unsafe(x0, x1)
+    elif dl_number_t is long:
+        return _func_expn(x0, x1)
+    else:
+        return NAN
+
+cpdef double exprel(double x0) nogil:
+    """See the documentation for scipy.special.exprel"""
+    return _func_exprel(x0)
+
+cpdef double fdtr(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.fdtr"""
+    return _func_fdtr(x0, x1, x2)
+
+cpdef double fdtrc(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.fdtrc"""
+    return _func_fdtrc(x0, x1, x2)
+
+cpdef double fdtri(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.fdtri"""
+    return _func_fdtri(x0, x1, x2)
+
+cpdef double fdtridfd(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.fdtridfd"""
+    return _func_cdff4_wrap(x0, x1, x2)
+
+cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil:
+    """See the documentation for scipy.special.fresnel"""
+    cdef npy_cdouble tmp0
+    cdef npy_cdouble tmp1
+    if Dd_number_t is double:
+        _func_fresnl(x0, y0, y1)
+    elif Dd_number_t is double_complex:
+        _func_cfresnl_wrap(_complexstuff.npy_cdouble_from_double_complex(x0), &tmp0, &tmp1)
+        y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0)
+        y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1)
+    else:
+        if Dd_number_t is double_complex:
+            y0[0] = NAN
+            y1[0] = NAN
+        else:
+            y0[0] = NAN
+            y1[0] = NAN
+
+def _fresnel_pywrap(Dd_number_t x0):
+    cdef Dd_number_t y0
+    cdef Dd_number_t y1
+    fresnel(x0, &y0, &y1)
+    return y0, y1
+
+cpdef Dd_number_t gamma(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.gamma"""
+    if Dd_number_t is double_complex:
+        return _func_cgamma(x0)
+    elif Dd_number_t is double:
+        return _func_Gamma(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double gammainc(double x0, double x1) nogil:
+    """See the documentation for scipy.special.gammainc"""
+    return _func_igam(x0, x1)
+
+cpdef double gammaincc(double x0, double x1) nogil:
+    """See the documentation for scipy.special.gammaincc"""
+    return _func_igamc(x0, x1)
+
+cpdef double gammainccinv(double x0, double x1) nogil:
+    """See the documentation for scipy.special.gammainccinv"""
+    return _func_igamci(x0, x1)
+
+cpdef double gammaincinv(double x0, double x1) nogil:
+    """See the documentation for scipy.special.gammaincinv"""
+    return _func_igami(x0, x1)
+
+cpdef double gammaln(double x0) nogil:
+    """See the documentation for scipy.special.gammaln"""
+    return _func_lgam(x0)
+
+cpdef double gammasgn(double x0) nogil:
+    """See the documentation for scipy.special.gammasgn"""
+    return _func_gammasgn(x0)
+
+cpdef double gdtr(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.gdtr"""
+    return _func_gdtr(x0, x1, x2)
+
+cpdef double gdtrc(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.gdtrc"""
+    return _func_gdtrc(x0, x1, x2)
+
+cpdef double gdtria(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.gdtria"""
+    return _func_cdfgam4_wrap(x0, x1, x2)
+
+cpdef double gdtrib(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.gdtrib"""
+    return _func_cdfgam3_wrap(x0, x1, x2)
+
+cpdef double gdtrix(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.gdtrix"""
+    return _func_cdfgam2_wrap(x0, x1, x2)
+
+cpdef double complex hankel1(double x0, double complex x1) nogil:
+    """See the documentation for scipy.special.hankel1"""
+    return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap1(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+
+cpdef double complex hankel1e(double x0, double complex x1) nogil:
+    """See the documentation for scipy.special.hankel1e"""
+    return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap1_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+
+cpdef double complex hankel2(double x0, double complex x1) nogil:
+    """See the documentation for scipy.special.hankel2"""
+    return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap2(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+
+cpdef double complex hankel2e(double x0, double complex x1) nogil:
+    """See the documentation for scipy.special.hankel2e"""
+    return _complexstuff.double_complex_from_npy_cdouble(_func_cbesh_wrap2_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+
+cpdef double huber(double x0, double x1) nogil:
+    """See the documentation for scipy.special.huber"""
+    return _func_huber(x0, x1)
+
+cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.hyp0f1"""
+    if Dd_number_t is double_complex:
+        return _func__hyp0f1_cmplx(x0, x1)
+    elif Dd_number_t is double:
+        return _func__hyp0f1_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) nogil:
+    """See the documentation for scipy.special.hyp1f1"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_hyp1f1_double)(x0, x1, x2)
+    elif Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_chyp1f1_wrap(x0, x1, _complexstuff.npy_cdouble_from_double_complex(x2)))
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) nogil:
+    """See the documentation for scipy.special.hyp2f1"""
+    if Dd_number_t is double:
+        return _func_hyp2f1(x0, x1, x2, x3)
+    elif Dd_number_t is double_complex:
+        return _func_hyp2f1_complex(x0, x1, x2, x3)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double hyperu(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.hyperu"""
+    return _func_hyperu(x0, x1, x2)
+
+cpdef double i0(double x0) nogil:
+    """See the documentation for scipy.special.i0"""
+    return _func_i0(x0)
+
+cpdef double i0e(double x0) nogil:
+    """See the documentation for scipy.special.i0e"""
+    return _func_i0e(x0)
+
+cpdef double i1(double x0) nogil:
+    """See the documentation for scipy.special.i1"""
+    return _func_i1(x0)
+
+cpdef double i1e(double x0) nogil:
+    """See the documentation for scipy.special.i1e"""
+    return _func_i1e(x0)
+
+cpdef double inv_boxcox(double x0, double x1) nogil:
+    """See the documentation for scipy.special.inv_boxcox"""
+    return _func_inv_boxcox(x0, x1)
+
+cpdef double inv_boxcox1p(double x0, double x1) nogil:
+    """See the documentation for scipy.special.inv_boxcox1p"""
+    return _func_inv_boxcox1p(x0, x1)
+
+cdef void it2i0k0(double x0, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.it2i0k0"""
+    _func_it2i0k0_wrap(x0, y0, y1)
+
+def _it2i0k0_pywrap(double x0):
+    cdef double y0
+    cdef double y1
+    it2i0k0(x0, &y0, &y1)
+    return y0, y1
+
+cdef void it2j0y0(double x0, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.it2j0y0"""
+    _func_it2j0y0_wrap(x0, y0, y1)
+
+def _it2j0y0_pywrap(double x0):
+    cdef double y0
+    cdef double y1
+    it2j0y0(x0, &y0, &y1)
+    return y0, y1
+
+cpdef double it2struve0(double x0) nogil:
+    """See the documentation for scipy.special.it2struve0"""
+    return _func_it2struve0_wrap(x0)
+
+cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) nogil:
+    """See the documentation for scipy.special.itairy"""
+    _func_itairy_wrap(x0, y0, y1, y2, y3)
+
+def _itairy_pywrap(double x0):
+    cdef double y0
+    cdef double y1
+    cdef double y2
+    cdef double y3
+    itairy(x0, &y0, &y1, &y2, &y3)
+    return y0, y1, y2, y3
+
+cdef void iti0k0(double x0, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.iti0k0"""
+    _func_it1i0k0_wrap(x0, y0, y1)
+
+def _iti0k0_pywrap(double x0):
+    cdef double y0
+    cdef double y1
+    iti0k0(x0, &y0, &y1)
+    return y0, y1
+
+cdef void itj0y0(double x0, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.itj0y0"""
+    _func_it1j0y0_wrap(x0, y0, y1)
+
+def _itj0y0_pywrap(double x0):
+    cdef double y0
+    cdef double y1
+    itj0y0(x0, &y0, &y1)
+    return y0, y1
+
+cpdef double itmodstruve0(double x0) nogil:
+    """See the documentation for scipy.special.itmodstruve0"""
+    return _func_itmodstruve0_wrap(x0)
+
+cpdef double itstruve0(double x0) nogil:
+    """See the documentation for scipy.special.itstruve0"""
+    return _func_itstruve0_wrap(x0)
+
+cpdef Dd_number_t iv(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.iv"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesi_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_iv(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t ive(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.ive"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesi_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_cbesi_wrap_e_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double j0(double x0) nogil:
+    """See the documentation for scipy.special.j0"""
+    return _func_j0(x0)
+
+cpdef double j1(double x0) nogil:
+    """See the documentation for scipy.special.j1"""
+    return _func_j1(x0)
+
+cpdef Dd_number_t jv(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.jv"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesj_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_cbesj_wrap_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t jve(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.jve"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesj_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_cbesj_wrap_e_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double k0(double x0) nogil:
+    """See the documentation for scipy.special.k0"""
+    return _func_k0(x0)
+
+cpdef double k0e(double x0) nogil:
+    """See the documentation for scipy.special.k0e"""
+    return _func_k0e(x0)
+
+cpdef double k1(double x0) nogil:
+    """See the documentation for scipy.special.k1"""
+    return _func_k1(x0)
+
+cpdef double k1e(double x0) nogil:
+    """See the documentation for scipy.special.k1e"""
+    return _func_k1e(x0)
+
+cpdef double kei(double x0) nogil:
+    """See the documentation for scipy.special.kei"""
+    return _func_kei_wrap(x0)
+
+cpdef double keip(double x0) nogil:
+    """See the documentation for scipy.special.keip"""
+    return _func_keip_wrap(x0)
+
+cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) nogil:
+    """See the documentation for scipy.special.kelvin"""
+    cdef npy_cdouble tmp0
+    cdef npy_cdouble tmp1
+    cdef npy_cdouble tmp2
+    cdef npy_cdouble tmp3
+    _func_kelvin_wrap(x0, &tmp0, &tmp1, &tmp2, &tmp3)
+    y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0)
+    y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1)
+    y2[0] = _complexstuff.double_complex_from_npy_cdouble(tmp2)
+    y3[0] = _complexstuff.double_complex_from_npy_cdouble(tmp3)
+
+def _kelvin_pywrap(double x0):
+    cdef double complex y0
+    cdef double complex y1
+    cdef double complex y2
+    cdef double complex y3
+    kelvin(x0, &y0, &y1, &y2, &y3)
+    return y0, y1, y2, y3
+
+cpdef double ker(double x0) nogil:
+    """See the documentation for scipy.special.ker"""
+    return _func_ker_wrap(x0)
+
+cpdef double kerp(double x0) nogil:
+    """See the documentation for scipy.special.kerp"""
+    return _func_kerp_wrap(x0)
+
+cpdef double kl_div(double x0, double x1) nogil:
+    """See the documentation for scipy.special.kl_div"""
+    return _func_kl_div(x0, x1)
+
+cpdef double kn(dl_number_t x0, double x1) nogil:
+    """See the documentation for scipy.special.kn"""
+    if dl_number_t is double:
+        return _func_kn_unsafe(x0, x1)
+    elif dl_number_t is long:
+        return _func_cbesk_wrap_real_int(x0, x1)
+    else:
+        return NAN
+
+cpdef double kolmogi(double x0) nogil:
+    """See the documentation for scipy.special.kolmogi"""
+    return _func_kolmogi(x0)
+
+cpdef double kolmogorov(double x0) nogil:
+    """See the documentation for scipy.special.kolmogorov"""
+    return _func_kolmogorov(x0)
+
+cpdef Dd_number_t kv(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.kv"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesk_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_cbesk_wrap_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t kve(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.kve"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesk_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_cbesk_wrap_e_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t log1p(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.log1p"""
+    if Dd_number_t is double_complex:
+        return _func_clog1p(x0)
+    elif Dd_number_t is double:
+        return _func_log1p(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef dfg_number_t log_expit(dfg_number_t x0) nogil:
+    """See the documentation for scipy.special.log_expit"""
+    if dfg_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_log_expit)(x0)
+    elif dfg_number_t is float:
+        return (scipy.special._ufuncs_cxx._export_log_expitf)(x0)
+    elif dfg_number_t is long_double:
+        return (scipy.special._ufuncs_cxx._export_log_expitl)(x0)
+    else:
+        if dfg_number_t is double:
+            return NAN
+        elif dfg_number_t is float:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t log_ndtr(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.log_ndtr"""
+    if Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr)(x0)
+    elif Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_log_ndtr_complex)(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t loggamma(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.loggamma"""
+    if Dd_number_t is double:
+        return _func_loggamma_real(x0)
+    elif Dd_number_t is double_complex:
+        return _func_loggamma(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef dfg_number_t logit(dfg_number_t x0) nogil:
+    """See the documentation for scipy.special.logit"""
+    if dfg_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_logit)(x0)
+    elif dfg_number_t is float:
+        return (scipy.special._ufuncs_cxx._export_logitf)(x0)
+    elif dfg_number_t is long_double:
+        return (scipy.special._ufuncs_cxx._export_logitl)(x0)
+    else:
+        if dfg_number_t is double:
+            return NAN
+        elif dfg_number_t is float:
+            return NAN
+        else:
+            return NAN
+
+cpdef double lpmv(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.lpmv"""
+    return _func_pmv_wrap(x0, x1, x2)
+
+cpdef double mathieu_a(double x0, double x1) nogil:
+    """See the documentation for scipy.special.mathieu_a"""
+    return _func_cem_cva_wrap(x0, x1)
+
+cpdef double mathieu_b(double x0, double x1) nogil:
+    """See the documentation for scipy.special.mathieu_b"""
+    return _func_sem_cva_wrap(x0, x1)
+
+cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.mathieu_cem"""
+    _func_cem_wrap(x0, x1, x2, y0, y1)
+
+def _mathieu_cem_pywrap(double x0, double x1, double x2):
+    cdef double y0
+    cdef double y1
+    mathieu_cem(x0, x1, x2, &y0, &y1)
+    return y0, y1
+
+cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.mathieu_modcem1"""
+    _func_mcm1_wrap(x0, x1, x2, y0, y1)
+
+def _mathieu_modcem1_pywrap(double x0, double x1, double x2):
+    cdef double y0
+    cdef double y1
+    mathieu_modcem1(x0, x1, x2, &y0, &y1)
+    return y0, y1
+
+cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.mathieu_modcem2"""
+    _func_mcm2_wrap(x0, x1, x2, y0, y1)
+
+def _mathieu_modcem2_pywrap(double x0, double x1, double x2):
+    cdef double y0
+    cdef double y1
+    mathieu_modcem2(x0, x1, x2, &y0, &y1)
+    return y0, y1
+
+cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.mathieu_modsem1"""
+    _func_msm1_wrap(x0, x1, x2, y0, y1)
+
+def _mathieu_modsem1_pywrap(double x0, double x1, double x2):
+    cdef double y0
+    cdef double y1
+    mathieu_modsem1(x0, x1, x2, &y0, &y1)
+    return y0, y1
+
+cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.mathieu_modsem2"""
+    _func_msm2_wrap(x0, x1, x2, y0, y1)
+
+def _mathieu_modsem2_pywrap(double x0, double x1, double x2):
+    cdef double y0
+    cdef double y1
+    mathieu_modsem2(x0, x1, x2, &y0, &y1)
+    return y0, y1
+
+cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.mathieu_sem"""
+    _func_sem_wrap(x0, x1, x2, y0, y1)
+
+def _mathieu_sem_pywrap(double x0, double x1, double x2):
+    cdef double y0
+    cdef double y1
+    mathieu_sem(x0, x1, x2, &y0, &y1)
+    return y0, y1
+
+cdef void modfresnelm(double x0, double complex *y0, double complex *y1) nogil:
+    """See the documentation for scipy.special.modfresnelm"""
+    cdef npy_cdouble tmp0
+    cdef npy_cdouble tmp1
+    _func_modified_fresnel_minus_wrap(x0, &tmp0, &tmp1)
+    y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0)
+    y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1)
+
+def _modfresnelm_pywrap(double x0):
+    cdef double complex y0
+    cdef double complex y1
+    modfresnelm(x0, &y0, &y1)
+    return y0, y1
+
+cdef void modfresnelp(double x0, double complex *y0, double complex *y1) nogil:
+    """See the documentation for scipy.special.modfresnelp"""
+    cdef npy_cdouble tmp0
+    cdef npy_cdouble tmp1
+    _func_modified_fresnel_plus_wrap(x0, &tmp0, &tmp1)
+    y0[0] = _complexstuff.double_complex_from_npy_cdouble(tmp0)
+    y1[0] = _complexstuff.double_complex_from_npy_cdouble(tmp1)
+
+def _modfresnelp_pywrap(double x0):
+    cdef double complex y0
+    cdef double complex y1
+    modfresnelp(x0, &y0, &y1)
+    return y0, y1
+
+cpdef double modstruve(double x0, double x1) nogil:
+    """See the documentation for scipy.special.modstruve"""
+    return _func_struve_l(x0, x1)
+
+cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) nogil:
+    """See the documentation for scipy.special.nbdtr"""
+    if dl_number_t is double:
+        return _func_nbdtr_unsafe(x0, x1, x2)
+    elif dl_number_t is long:
+        return _func_nbdtr(x0, x1, x2)
+    else:
+        return NAN
+
+cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) nogil:
+    """See the documentation for scipy.special.nbdtrc"""
+    if dl_number_t is double:
+        return _func_nbdtrc_unsafe(x0, x1, x2)
+    elif dl_number_t is long:
+        return _func_nbdtrc(x0, x1, x2)
+    else:
+        return NAN
+
+cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) nogil:
+    """See the documentation for scipy.special.nbdtri"""
+    if dl_number_t is double:
+        return _func_nbdtri_unsafe(x0, x1, x2)
+    elif dl_number_t is long:
+        return _func_nbdtri(x0, x1, x2)
+    else:
+        return NAN
+
+cpdef double nbdtrik(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nbdtrik"""
+    return _func_cdfnbn2_wrap(x0, x1, x2)
+
+cpdef double nbdtrin(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nbdtrin"""
+    return _func_cdfnbn3_wrap(x0, x1, x2)
+
+cpdef double ncfdtr(double x0, double x1, double x2, double x3) nogil:
+    """See the documentation for scipy.special.ncfdtr"""
+    return _func_cdffnc1_wrap(x0, x1, x2, x3)
+
+cpdef double ncfdtri(double x0, double x1, double x2, double x3) nogil:
+    """See the documentation for scipy.special.ncfdtri"""
+    return _func_cdffnc2_wrap(x0, x1, x2, x3)
+
+cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) nogil:
+    """See the documentation for scipy.special.ncfdtridfd"""
+    return _func_cdffnc4_wrap(x0, x1, x2, x3)
+
+cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) nogil:
+    """See the documentation for scipy.special.ncfdtridfn"""
+    return _func_cdffnc3_wrap(x0, x1, x2, x3)
+
+cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) nogil:
+    """See the documentation for scipy.special.ncfdtrinc"""
+    return _func_cdffnc5_wrap(x0, x1, x2, x3)
+
+cpdef double nctdtr(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nctdtr"""
+    return _func_cdftnc1_wrap(x0, x1, x2)
+
+cpdef double nctdtridf(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nctdtridf"""
+    return _func_cdftnc3_wrap(x0, x1, x2)
+
+cpdef double nctdtrinc(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nctdtrinc"""
+    return _func_cdftnc4_wrap(x0, x1, x2)
+
+cpdef double nctdtrit(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nctdtrit"""
+    return _func_cdftnc2_wrap(x0, x1, x2)
+
+cpdef Dd_number_t ndtr(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.ndtr"""
+    if Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_faddeeva_ndtr)(x0)
+    elif Dd_number_t is double:
+        return _func_ndtr(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double ndtri(double x0) nogil:
+    """See the documentation for scipy.special.ndtri"""
+    return _func_ndtri(x0)
+
+cpdef double nrdtrimn(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nrdtrimn"""
+    return _func_cdfnor3_wrap(x0, x1, x2)
+
+cpdef double nrdtrisd(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.nrdtrisd"""
+    return _func_cdfnor4_wrap(x0, x1, x2)
+
+cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.obl_ang1"""
+    y0[0] = _func_oblate_aswfa_nocv_wrap(x0, x1, x2, x3, y1)
+
+def _obl_ang1_pywrap(double x0, double x1, double x2, double x3):
+    cdef double y0
+    cdef double y1
+    obl_ang1(x0, x1, x2, x3, &y0, &y1)
+    return y0, y1
+
+cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.obl_ang1_cv"""
+    _func_oblate_aswfa_wrap(x0, x1, x2, x3, x4, y0, y1)
+
+def _obl_ang1_cv_pywrap(double x0, double x1, double x2, double x3, double x4):
+    cdef double y0
+    cdef double y1
+    obl_ang1_cv(x0, x1, x2, x3, x4, &y0, &y1)
+    return y0, y1
+
+cpdef double obl_cv(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.obl_cv"""
+    return _func_oblate_segv_wrap(x0, x1, x2)
+
+cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.obl_rad1"""
+    y0[0] = _func_oblate_radial1_nocv_wrap(x0, x1, x2, x3, y1)
+
+def _obl_rad1_pywrap(double x0, double x1, double x2, double x3):
+    cdef double y0
+    cdef double y1
+    obl_rad1(x0, x1, x2, x3, &y0, &y1)
+    return y0, y1
+
+cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.obl_rad1_cv"""
+    _func_oblate_radial1_wrap(x0, x1, x2, x3, x4, y0, y1)
+
+def _obl_rad1_cv_pywrap(double x0, double x1, double x2, double x3, double x4):
+    cdef double y0
+    cdef double y1
+    obl_rad1_cv(x0, x1, x2, x3, x4, &y0, &y1)
+    return y0, y1
+
+cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.obl_rad2"""
+    y0[0] = _func_oblate_radial2_nocv_wrap(x0, x1, x2, x3, y1)
+
+def _obl_rad2_pywrap(double x0, double x1, double x2, double x3):
+    cdef double y0
+    cdef double y1
+    obl_rad2(x0, x1, x2, x3, &y0, &y1)
+    return y0, y1
+
+cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.obl_rad2_cv"""
+    _func_oblate_radial2_wrap(x0, x1, x2, x3, x4, y0, y1)
+
+def _obl_rad2_cv_pywrap(double x0, double x1, double x2, double x3, double x4):
+    cdef double y0
+    cdef double y1
+    obl_rad2_cv(x0, x1, x2, x3, x4, &y0, &y1)
+    return y0, y1
+
+cpdef double owens_t(double x0, double x1) nogil:
+    """See the documentation for scipy.special.owens_t"""
+    return _func_owens_t(x0, x1)
+
+cdef void pbdv(double x0, double x1, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pbdv"""
+    _func_pbdv_wrap(x0, x1, y0, y1)
+
+def _pbdv_pywrap(double x0, double x1):
+    cdef double y0
+    cdef double y1
+    pbdv(x0, x1, &y0, &y1)
+    return y0, y1
+
+cdef void pbvv(double x0, double x1, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pbvv"""
+    _func_pbvv_wrap(x0, x1, y0, y1)
+
+def _pbvv_pywrap(double x0, double x1):
+    cdef double y0
+    cdef double y1
+    pbvv(x0, x1, &y0, &y1)
+    return y0, y1
+
+cdef void pbwa(double x0, double x1, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pbwa"""
+    _func_pbwa_wrap(x0, x1, y0, y1)
+
+def _pbwa_pywrap(double x0, double x1):
+    cdef double y0
+    cdef double y1
+    pbwa(x0, x1, &y0, &y1)
+    return y0, y1
+
+cpdef double pdtr(double x0, double x1) nogil:
+    """See the documentation for scipy.special.pdtr"""
+    return _func_pdtr(x0, x1)
+
+cpdef double pdtrc(double x0, double x1) nogil:
+    """See the documentation for scipy.special.pdtrc"""
+    return _func_pdtrc(x0, x1)
+
+cpdef double pdtri(dl_number_t x0, double x1) nogil:
+    """See the documentation for scipy.special.pdtri"""
+    if dl_number_t is double:
+        return _func_pdtri_unsafe(x0, x1)
+    elif dl_number_t is long:
+        return _func_pdtri(x0, x1)
+    else:
+        return NAN
+
+cpdef double pdtrik(double x0, double x1) nogil:
+    """See the documentation for scipy.special.pdtrik"""
+    return _func_cdfpoi2_wrap(x0, x1)
+
+cpdef double poch(double x0, double x1) nogil:
+    """See the documentation for scipy.special.poch"""
+    return _func_poch(x0, x1)
+
+cpdef df_number_t powm1(df_number_t x0, df_number_t x1) nogil:
+    """See the documentation for scipy.special.powm1"""
+    if df_number_t is float:
+        return (scipy.special._ufuncs_cxx._export_powm1_float)(x0, x1)
+    elif df_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_powm1_double)(x0, x1)
+    else:
+        if df_number_t is double:
+            return NAN
+        else:
+            return NAN
+
+cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pro_ang1"""
+    y0[0] = _func_prolate_aswfa_nocv_wrap(x0, x1, x2, x3, y1)
+
+def _pro_ang1_pywrap(double x0, double x1, double x2, double x3):
+    cdef double y0
+    cdef double y1
+    pro_ang1(x0, x1, x2, x3, &y0, &y1)
+    return y0, y1
+
+cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pro_ang1_cv"""
+    _func_prolate_aswfa_wrap(x0, x1, x2, x3, x4, y0, y1)
+
+def _pro_ang1_cv_pywrap(double x0, double x1, double x2, double x3, double x4):
+    cdef double y0
+    cdef double y1
+    pro_ang1_cv(x0, x1, x2, x3, x4, &y0, &y1)
+    return y0, y1
+
+cpdef double pro_cv(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.pro_cv"""
+    return _func_prolate_segv_wrap(x0, x1, x2)
+
+cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pro_rad1"""
+    y0[0] = _func_prolate_radial1_nocv_wrap(x0, x1, x2, x3, y1)
+
+def _pro_rad1_pywrap(double x0, double x1, double x2, double x3):
+    cdef double y0
+    cdef double y1
+    pro_rad1(x0, x1, x2, x3, &y0, &y1)
+    return y0, y1
+
+cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pro_rad1_cv"""
+    _func_prolate_radial1_wrap(x0, x1, x2, x3, x4, y0, y1)
+
+def _pro_rad1_cv_pywrap(double x0, double x1, double x2, double x3, double x4):
+    cdef double y0
+    cdef double y1
+    pro_rad1_cv(x0, x1, x2, x3, x4, &y0, &y1)
+    return y0, y1
+
+cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pro_rad2"""
+    y0[0] = _func_prolate_radial2_nocv_wrap(x0, x1, x2, x3, y1)
+
+def _pro_rad2_pywrap(double x0, double x1, double x2, double x3):
+    cdef double y0
+    cdef double y1
+    pro_rad2(x0, x1, x2, x3, &y0, &y1)
+    return y0, y1
+
+cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil:
+    """See the documentation for scipy.special.pro_rad2_cv"""
+    _func_prolate_radial2_wrap(x0, x1, x2, x3, x4, y0, y1)
+
+def _pro_rad2_cv_pywrap(double x0, double x1, double x2, double x3, double x4):
+    cdef double y0
+    cdef double y1
+    pro_rad2_cv(x0, x1, x2, x3, x4, &y0, &y1)
+    return y0, y1
+
+cpdef double pseudo_huber(double x0, double x1) nogil:
+    """See the documentation for scipy.special.pseudo_huber"""
+    return _func_pseudo_huber(x0, x1)
+
+cpdef Dd_number_t psi(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.psi"""
+    if Dd_number_t is double_complex:
+        return _func_cdigamma(x0)
+    elif Dd_number_t is double:
+        return _func_digamma(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double radian(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.radian"""
+    return _func_radian(x0, x1, x2)
+
+cpdef double rel_entr(double x0, double x1) nogil:
+    """See the documentation for scipy.special.rel_entr"""
+    return _func_rel_entr(x0, x1)
+
+cpdef Dd_number_t rgamma(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.rgamma"""
+    if Dd_number_t is double_complex:
+        return _func_crgamma(x0)
+    elif Dd_number_t is double:
+        return _func_rgamma(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double round(double x0) nogil:
+    """See the documentation for scipy.special.round"""
+    return _func_round(x0)
+
+cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil:
+    """See the documentation for scipy.special.shichi"""
+    if Dd_number_t is double_complex:
+        _func_cshichi(x0, y0, y1)
+    elif Dd_number_t is double:
+        _func_shichi(x0, y0, y1)
+    else:
+        if Dd_number_t is double_complex:
+            y0[0] = NAN
+            y1[0] = NAN
+        else:
+            y0[0] = NAN
+            y1[0] = NAN
+
+def _shichi_pywrap(Dd_number_t x0):
+    cdef Dd_number_t y0
+    cdef Dd_number_t y1
+    shichi(x0, &y0, &y1)
+    return y0, y1
+
+cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil:
+    """See the documentation for scipy.special.sici"""
+    if Dd_number_t is double_complex:
+        _func_csici(x0, y0, y1)
+    elif Dd_number_t is double:
+        _func_sici(x0, y0, y1)
+    else:
+        if Dd_number_t is double_complex:
+            y0[0] = NAN
+            y1[0] = NAN
+        else:
+            y0[0] = NAN
+            y1[0] = NAN
+
+def _sici_pywrap(Dd_number_t x0):
+    cdef Dd_number_t y0
+    cdef Dd_number_t y1
+    sici(x0, &y0, &y1)
+    return y0, y1
+
+cpdef double sindg(double x0) nogil:
+    """See the documentation for scipy.special.sindg"""
+    return _func_sindg(x0)
+
+cpdef double smirnov(dl_number_t x0, double x1) nogil:
+    """See the documentation for scipy.special.smirnov"""
+    if dl_number_t is double:
+        return _func_smirnov_unsafe(x0, x1)
+    elif dl_number_t is long:
+        return _func_smirnov(x0, x1)
+    else:
+        return NAN
+
+cpdef double smirnovi(dl_number_t x0, double x1) nogil:
+    """See the documentation for scipy.special.smirnovi"""
+    if dl_number_t is double:
+        return _func_smirnovi_unsafe(x0, x1)
+    elif dl_number_t is long:
+        return _func_smirnovi(x0, x1)
+    else:
+        return NAN
+
+cpdef Dd_number_t spence(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.spence"""
+    if Dd_number_t is double_complex:
+        return _func_cspence(x0)
+    elif Dd_number_t is double:
+        return _func_spence(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) nogil:
+    """See the documentation for scipy.special.sph_harm"""
+    if dl_number_t is double:
+        return _func_sph_harmonic_unsafe(x0, x1, x2, x3)
+    elif dl_number_t is long:
+        return _func_sph_harmonic(x0, x1, x2, x3)
+    else:
+        return NAN
+
+cpdef double stdtr(double x0, double x1) nogil:
+    """See the documentation for scipy.special.stdtr"""
+    return _func_cdft1_wrap(x0, x1)
+
+cpdef double stdtridf(double x0, double x1) nogil:
+    """See the documentation for scipy.special.stdtridf"""
+    return _func_cdft3_wrap(x0, x1)
+
+cpdef double stdtrit(double x0, double x1) nogil:
+    """See the documentation for scipy.special.stdtrit"""
+    return _func_cdft2_wrap(x0, x1)
+
+cpdef double struve(double x0, double x1) nogil:
+    """See the documentation for scipy.special.struve"""
+    return _func_struve_h(x0, x1)
+
+cpdef double tandg(double x0) nogil:
+    """See the documentation for scipy.special.tandg"""
+    return _func_tandg(x0)
+
+cpdef double tklmbda(double x0, double x1) nogil:
+    """See the documentation for scipy.special.tklmbda"""
+    return _func_tukeylambdacdf(x0, x1)
+
+cpdef double complex wofz(double complex x0) nogil:
+    """See the documentation for scipy.special.wofz"""
+    return (scipy.special._ufuncs_cxx._export_faddeeva_w)(x0)
+
+cpdef Dd_number_t wrightomega(Dd_number_t x0) nogil:
+    """See the documentation for scipy.special.wrightomega"""
+    if Dd_number_t is double_complex:
+        return (scipy.special._ufuncs_cxx._export_wrightomega)(x0)
+    elif Dd_number_t is double:
+        return (scipy.special._ufuncs_cxx._export_wrightomega_real)(x0)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.xlog1py"""
+    if Dd_number_t is double:
+        return _func_xlog1py[double](x0, x1)
+    elif Dd_number_t is double_complex:
+        return _func_xlog1py[double_complex](x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.xlogy"""
+    if Dd_number_t is double:
+        return _func_xlogy[double](x0, x1)
+    elif Dd_number_t is double_complex:
+        return _func_xlogy[double_complex](x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double y0(double x0) nogil:
+    """See the documentation for scipy.special.y0"""
+    return _func_y0(x0)
+
+cpdef double y1(double x0) nogil:
+    """See the documentation for scipy.special.y1"""
+    return _func_y1(x0)
+
+cpdef double yn(dl_number_t x0, double x1) nogil:
+    """See the documentation for scipy.special.yn"""
+    if dl_number_t is double:
+        return _func_yn_unsafe(x0, x1)
+    elif dl_number_t is long:
+        return _func_yn(x0, x1)
+    else:
+        return NAN
+
+cpdef Dd_number_t yv(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.yv"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesy_wrap(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_cbesy_wrap_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef Dd_number_t yve(double x0, Dd_number_t x1) nogil:
+    """See the documentation for scipy.special.yve"""
+    if Dd_number_t is double_complex:
+        return _complexstuff.double_complex_from_npy_cdouble(_func_cbesy_wrap_e(x0, _complexstuff.npy_cdouble_from_double_complex(x1)))
+    elif Dd_number_t is double:
+        return _func_cbesy_wrap_e_real(x0, x1)
+    else:
+        if Dd_number_t is double_complex:
+            return NAN
+        else:
+            return NAN
+
+cpdef double zetac(double x0) nogil:
+    """See the documentation for scipy.special.zetac"""
+    return _func_zetac(x0)
+
+cpdef double wright_bessel(double x0, double x1, double x2) nogil:
+    """See the documentation for scipy.special.wright_bessel"""
+    return _func_wright_bessel_scalar(x0, x1, x2)
+
+cpdef double ndtri_exp(double x0) nogil:
+    """See the documentation for scipy.special.ndtri_exp"""
+    return _func_ndtri_exp(x0)
+
+def _bench_airy_d_py(int N, double x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.airy(x0)
+
+def _bench_airy_d_cy(int N, double x0):
+    cdef int n
+    cdef double y0
+    cdef double y1
+    cdef double y2
+    cdef double y3
+    for n in range(N):
+        airy(x0, &y0, &y1, &y2, &y3)
+
+def _bench_airy_D_py(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.airy(x0)
+
+def _bench_airy_D_cy(int N, double complex x0):
+    cdef int n
+    cdef double complex y0
+    cdef double complex y1
+    cdef double complex y2
+    cdef double complex y3
+    for n in range(N):
+        airy(x0, &y0, &y1, &y2, &y3)
+
+def _bench_beta_dd_py(int N, double x0, double x1):
+    cdef int n
+    for n in range(N):
+        _ufuncs.beta(x0, x1)
+
+def _bench_beta_dd_cy(int N, double x0, double x1):
+    cdef int n
+    for n in range(N):
+        beta(x0, x1)
+
+def _bench_erf_d_py(int N, double x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.erf(x0)
+
+def _bench_erf_d_cy(int N, double x0):
+    cdef int n
+    for n in range(N):
+        erf(x0)
+
+def _bench_erf_D_py(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.erf(x0)
+
+def _bench_erf_D_cy(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        erf(x0)
+
+def _bench_exprel_d_py(int N, double x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.exprel(x0)
+
+def _bench_exprel_d_cy(int N, double x0):
+    cdef int n
+    for n in range(N):
+        exprel(x0)
+
+def _bench_gamma_d_py(int N, double x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.gamma(x0)
+
+def _bench_gamma_d_cy(int N, double x0):
+    cdef int n
+    for n in range(N):
+        gamma(x0)
+
+def _bench_gamma_D_py(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.gamma(x0)
+
+def _bench_gamma_D_cy(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        gamma(x0)
+
+def _bench_jv_dd_py(int N, double x0, double x1):
+    cdef int n
+    for n in range(N):
+        _ufuncs.jv(x0, x1)
+
+def _bench_jv_dd_cy(int N, double x0, double x1):
+    cdef int n
+    for n in range(N):
+        jv(x0, x1)
+
+def _bench_jv_dD_py(int N, double x0, double complex x1):
+    cdef int n
+    for n in range(N):
+        _ufuncs.jv(x0, x1)
+
+def _bench_jv_dD_cy(int N, double x0, double complex x1):
+    cdef int n
+    for n in range(N):
+        jv(x0, x1)
+
+def _bench_loggamma_D_py(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.loggamma(x0)
+
+def _bench_loggamma_D_cy(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        loggamma(x0)
+
+def _bench_logit_d_py(int N, double x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.logit(x0)
+
+def _bench_logit_d_cy(int N, double x0):
+    cdef int n
+    for n in range(N):
+        logit(x0)
+
+def _bench_psi_d_py(int N, double x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.psi(x0)
+
+def _bench_psi_d_cy(int N, double x0):
+    cdef int n
+    for n in range(N):
+        psi(x0)
+
+def _bench_psi_D_py(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        _ufuncs.psi(x0)
+
+def _bench_psi_D_cy(int N, double complex x0):
+    cdef int n
+    for n in range(N):
+        psi(x0)
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/orthogonal.py b/__packaged__/coreml/.python_dependencies/scipy/special/orthogonal.py
new file mode 100644
index 00000000..dc8247d2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/orthogonal.py
@@ -0,0 +1,55 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.special` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _orthogonal
+
+
+_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
+             'jacobi', 'laguerre', 'genlaguerre', 'hermite',
+             'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt',
+             'sh_chebyu', 'sh_jacobi']
+
+# Correspondence between new and old names of root functions
+_rootfuns_map = {'roots_legendre': 'p_roots',
+               'roots_chebyt': 't_roots',
+               'roots_chebyu': 'u_roots',
+               'roots_chebyc': 'c_roots',
+               'roots_chebys': 's_roots',
+               'roots_jacobi': 'j_roots',
+               'roots_laguerre': 'l_roots',
+               'roots_genlaguerre': 'la_roots',
+               'roots_hermite': 'h_roots',
+               'roots_hermitenorm': 'he_roots',
+               'roots_gegenbauer': 'cg_roots',
+               'roots_sh_legendre': 'ps_roots',
+               'roots_sh_chebyt': 'ts_roots',
+               'roots_sh_chebyu': 'us_roots',
+               'roots_sh_jacobi': 'js_roots'}
+
+
+__all__ = _polyfuns + list(_rootfuns_map.keys()) + [  # noqa: F822
+    'exp', 'inf', 'floor', 'around', 'hstack', 'arange',
+    'linalg', 'airy', 'orthopoly1d', 'newfun',
+    'oldfun', 'p_roots', 't_roots', 'u_roots', 'c_roots', 's_roots',
+    'j_roots', 'l_roots', 'la_roots', 'h_roots', 'he_roots', 'cg_roots',
+    'ps_roots', 'ts_roots', 'us_roots', 'js_roots'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.special.orthogonal is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.special instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
+                  "the `scipy.special.orthogonal` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_orthogonal, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/sf_error.py b/__packaged__/coreml/.python_dependencies/scipy/special/sf_error.py
new file mode 100644
index 00000000..9d10888c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/sf_error.py
@@ -0,0 +1,28 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.special` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _sf_error
+
+__all__ = [  # noqa: F822
+    'SpecialFunctionWarning',
+    'SpecialFunctionError'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.special.sf_error is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.special instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
+                  "the `scipy.special.sf_error` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_sf_error, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/specfun.py b/__packaged__/coreml/.python_dependencies/scipy/special/specfun.py
new file mode 100644
index 00000000..72d5a5bb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/specfun.py
@@ -0,0 +1,51 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.special` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _specfun  # type: ignore
+
+__all__ = [  # noqa: F822
+    'airyzo',
+    'bernob',
+    'cerzo',
+    'clpmn',
+    'clpn',
+    'clqmn',
+    'clqn',
+    'cpbdn',
+    'cyzo',
+    'eulerb',
+    'fcoef',
+    'fcszo',
+    'jdzo',
+    'jyzo',
+    'klvnzo',
+    'lamn',
+    'lamv',
+    'lpmn',
+    'lpn',
+    'lqmn',
+    'lqnb',
+    'pbdv',
+    'rctj',
+    'rcty',
+    'segv'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.special.specfun is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.special instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
+                  "the `scipy.special.specfun` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_specfun, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/spfun_stats.py b/__packaged__/coreml/.python_dependencies/scipy/special/spfun_stats.py
new file mode 100644
index 00000000..8bb9fb0f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/spfun_stats.py
@@ -0,0 +1,25 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.special` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _spfun_stats
+
+__all__ = ['multigammaln', 'loggam']  # noqa: F822
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.special.spfun_stats is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.special instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
+                  "the `scipy.special.spfun_stats` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_spfun_stats, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/boost.npz b/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/boost.npz
new file mode 100644
index 00000000..97e5bec8
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/boost.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/gsl.npz b/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/gsl.npz
new file mode 100644
index 00000000..82ef727e
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/gsl.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/local.npz b/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/local.npz
new file mode 100644
index 00000000..f1d67ad3
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/special/tests/data/local.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_basic.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_basic.py
new file mode 100644
index 00000000..c2363fc9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_basic.py
@@ -0,0 +1,3656 @@
+# this program corresponds to special.py
+
+### Means test is not done yet
+# E   Means test is giving error (E)
+# F   Means test is failing (F)
+# EF  Means test is giving error and Failing
+#!   Means test is segfaulting
+# 8   Means test runs forever
+
+###  test_besselpoly
+###  test_mathieu_a
+###  test_mathieu_even_coef
+###  test_mathieu_odd_coef
+###  test_modfresnelp
+###  test_modfresnelm
+#    test_pbdv_seq
+###  test_pbvv_seq
+###  test_sph_harm
+
+import itertools
+import platform
+import sys
+
+import numpy as np
+from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
+        log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
+
+import pytest
+from pytest import raises as assert_raises
+from numpy.testing import (assert_equal, assert_almost_equal,
+        assert_array_equal, assert_array_almost_equal, assert_approx_equal,
+        assert_, assert_allclose, assert_array_almost_equal_nulp,
+        suppress_warnings)
+
+from scipy import special
+import scipy.special._ufuncs as cephes
+from scipy.special import ellipe, ellipk, ellipkm1
+from scipy.special import elliprc, elliprd, elliprf, elliprg, elliprj
+from scipy.special import mathieu_odd_coef, mathieu_even_coef
+
+from scipy.special._testutils import with_special_errors, \
+     assert_func_equal, FuncData
+
+import math
+
+
+class TestCephes:
+    def test_airy(self):
+        cephes.airy(0)
+
+    def test_airye(self):
+        cephes.airye(0)
+
+    def test_binom(self):
+        n = np.array([0.264, 4, 5.2, 17])
+        k = np.array([2, 0.4, 7, 3.3])
+        nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
+                      ).reshape(2, -1).T
+        rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
+            -0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
+            [10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
+            [136, 3.5252179590758828, 19448, 1024.5526916174495]])
+        assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
+
+        # Test branches in implementation
+        np.random.seed(1234)
+        n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
+        k = np.arange(0, 102)
+        nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
+                      ).reshape(2, -1).T
+
+        assert_func_equal(cephes.binom,
+                          cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
+                          nk,
+                          atol=1e-10, rtol=1e-10)
+
+    def test_binom_2(self):
+        # Test branches in implementation
+        np.random.seed(1234)
+        n = np.r_[np.logspace(1, 300, 20)]
+        k = np.arange(0, 102)
+        nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
+                      ).reshape(2, -1).T
+
+        assert_func_equal(cephes.binom,
+                          cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
+                          nk,
+                          atol=1e-10, rtol=1e-10)
+
+    def test_binom_exact(self):
+        @np.vectorize
+        def binom_int(n, k):
+            n = int(n)
+            k = int(k)
+            num = int(1)
+            den = int(1)
+            for i in range(1, k+1):
+                num *= i + n - k
+                den *= i
+            return float(num/den)
+
+        np.random.seed(1234)
+        n = np.arange(1, 15)
+        k = np.arange(0, 15)
+        nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
+                      ).reshape(2, -1).T
+        nk = nk[nk[:,0] >= nk[:,1]]
+        assert_func_equal(cephes.binom,
+                          binom_int(nk[:,0], nk[:,1]),
+                          nk,
+                          atol=0, rtol=0)
+
+    def test_binom_nooverflow_8346(self):
+        # Test (binom(n, k) doesn't overflow prematurely */
+        dataset = [
+            (1000, 500, 2.70288240945436551e+299),
+            (1002, 501, 1.08007396880791225e+300),
+            (1004, 502, 4.31599279169058121e+300),
+            (1006, 503, 1.72468101616263781e+301),
+            (1008, 504, 6.89188009236419153e+301),
+            (1010, 505, 2.75402257948335448e+302),
+            (1012, 506, 1.10052048531923757e+303),
+            (1014, 507, 4.39774063758732849e+303),
+            (1016, 508, 1.75736486108312519e+304),
+            (1018, 509, 7.02255427788423734e+304),
+            (1020, 510, 2.80626776829962255e+305),
+            (1022, 511, 1.12140876377061240e+306),
+            (1024, 512, 4.48125455209897109e+306),
+            (1026, 513, 1.79075474304149900e+307),
+            (1028, 514, 7.15605105487789676e+307)
+        ]
+        dataset = np.asarray(dataset)
+        FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check()
+
+    def test_bdtr(self):
+        assert_equal(cephes.bdtr(1,1,0.5),1.0)
+
+    def test_bdtri(self):
+        assert_equal(cephes.bdtri(1,3,0.5),0.5)
+
+    def test_bdtrc(self):
+        assert_equal(cephes.bdtrc(1,3,0.5),0.5)
+
+    def test_bdtrin(self):
+        assert_equal(cephes.bdtrin(1,0,1),5.0)
+
+    def test_bdtrik(self):
+        cephes.bdtrik(1,3,0.5)
+
+    def test_bei(self):
+        assert_equal(cephes.bei(0),0.0)
+
+    def test_beip(self):
+        assert_equal(cephes.beip(0),0.0)
+
+    def test_ber(self):
+        assert_equal(cephes.ber(0),1.0)
+
+    def test_berp(self):
+        assert_equal(cephes.berp(0),0.0)
+
+    def test_besselpoly(self):
+        assert_equal(cephes.besselpoly(0,0,0),1.0)
+
+    def test_beta(self):
+        assert_equal(cephes.beta(1,1),1.0)
+        assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
+        assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
+                        rtol=1e-13, atol=0)
+
+    def test_betainc(self):
+        assert_equal(cephes.betainc(1,1,1),1.0)
+        assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
+
+    def test_betaln(self):
+        assert_equal(cephes.betaln(1,1),0.0)
+        assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
+        assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
+                        rtol=1e-14, atol=0)
+
+    def test_betaincinv(self):
+        assert_equal(cephes.betaincinv(1,1,1),1.0)
+        assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
+                        8.4231316935498957e-21, rtol=3e-12, atol=0)
+
+    def test_beta_inf(self):
+        assert_(np.isinf(special.beta(-1, 2)))
+
+    def test_btdtr(self):
+        assert_equal(cephes.btdtr(1,1,1),1.0)
+
+    def test_btdtri(self):
+        assert_equal(cephes.btdtri(1,1,1),1.0)
+
+    def test_btdtria(self):
+        assert_equal(cephes.btdtria(1,1,1),5.0)
+
+    def test_btdtrib(self):
+        assert_equal(cephes.btdtrib(1,1,1),5.0)
+
+    def test_cbrt(self):
+        assert_approx_equal(cephes.cbrt(1),1.0)
+
+    def test_chdtr(self):
+        assert_equal(cephes.chdtr(1,0),0.0)
+
+    def test_chdtrc(self):
+        assert_equal(cephes.chdtrc(1,0),1.0)
+
+    def test_chdtri(self):
+        assert_equal(cephes.chdtri(1,1),0.0)
+
+    def test_chdtriv(self):
+        assert_equal(cephes.chdtriv(0,0),5.0)
+
+    def test_chndtr(self):
+        assert_equal(cephes.chndtr(0,1,0),0.0)
+
+        # Each row holds (x, nu, lam, expected_value)
+        # These values were computed using Wolfram Alpha with
+        #     CDF[NoncentralChiSquareDistribution[nu, lam], x]
+        values = np.array([
+            [25.00, 20.0, 400, 4.1210655112396197139e-57],
+            [25.00, 8.00, 250, 2.3988026526832425878e-29],
+            [0.001, 8.00, 40., 5.3761806201366039084e-24],
+            [0.010, 8.00, 40., 5.45396231055999457039e-20],
+            [20.00, 2.00, 107, 1.39390743555819597802e-9],
+            [22.50, 2.00, 107, 7.11803307138105870671e-9],
+            [25.00, 2.00, 107, 3.11041244829864897313e-8],
+            [3.000, 2.00, 1.0, 0.62064365321954362734],
+            [350.0, 300., 10., 0.93880128006276407710],
+            [100.0, 13.5, 10., 0.99999999650104210949],
+            [700.0, 20.0, 400, 0.99999999925680650105],
+            [150.0, 13.5, 10., 0.99999999999999983046],
+            [160.0, 13.5, 10., 0.99999999999999999518],  # 1.0
+        ])
+        cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2])
+        assert_allclose(cdf, values[:, 3], rtol=1e-12)
+
+        assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
+        assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
+        assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
+        assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
+        assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
+
+    def test_chndtridf(self):
+        assert_equal(cephes.chndtridf(0,0,1),5.0)
+
+    def test_chndtrinc(self):
+        assert_equal(cephes.chndtrinc(0,1,0),5.0)
+
+    def test_chndtrix(self):
+        assert_equal(cephes.chndtrix(0,1,0),0.0)
+
+    def test_cosdg(self):
+        assert_equal(cephes.cosdg(0),1.0)
+
+    def test_cosm1(self):
+        assert_equal(cephes.cosm1(0),0.0)
+
+    def test_cotdg(self):
+        assert_almost_equal(cephes.cotdg(45),1.0)
+
+    def test_dawsn(self):
+        assert_equal(cephes.dawsn(0),0.0)
+        assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
+
+    def test_diric(self):
+        # Test behavior near multiples of 2pi.  Regression test for issue
+        # described in gh-4001.
+        n_odd = [1, 5, 25]
+        x = np.array(2*np.pi + 5e-5).astype(np.float32)
+        assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
+        x = np.array(2*np.pi + 1e-9).astype(np.float64)
+        assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
+        x = np.array(2*np.pi + 1e-15).astype(np.float64)
+        assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
+        if hasattr(np, 'float128'):
+            # No float128 available in 32-bit numpy
+            x = np.array(2*np.pi + 1e-12).astype(np.float128)
+            assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
+
+        n_even = [2, 4, 24]
+        x = np.array(2*np.pi + 1e-9).astype(np.float64)
+        assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
+
+        # Test at some values not near a multiple of pi
+        x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
+        octave_result = [0.872677996249965, 0.539344662916632,
+                         0.127322003750035, -0.206011329583298]
+        assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
+
+    def test_diric_broadcasting(self):
+        x = np.arange(5)
+        n = np.array([1, 3, 7])
+        assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
+
+    def test_ellipe(self):
+        assert_equal(cephes.ellipe(1),1.0)
+
+    def test_ellipeinc(self):
+        assert_equal(cephes.ellipeinc(0,1),0.0)
+
+    def test_ellipj(self):
+        cephes.ellipj(0,1)
+
+    def test_ellipk(self):
+        assert_allclose(ellipk(0), pi/2)
+
+    def test_ellipkinc(self):
+        assert_equal(cephes.ellipkinc(0,0),0.0)
+
+    def test_erf(self):
+        assert_equal(cephes.erf(0), 0.0)
+
+    def test_erf_symmetry(self):
+        x = 5.905732037710919
+        assert_equal(cephes.erf(x) + cephes.erf(-x), 0.0)
+
+    def test_erfc(self):
+        assert_equal(cephes.erfc(0), 1.0)
+
+    def test_exp10(self):
+        assert_approx_equal(cephes.exp10(2),100.0)
+
+    def test_exp2(self):
+        assert_equal(cephes.exp2(2),4.0)
+
+    def test_expm1(self):
+        assert_equal(cephes.expm1(0),0.0)
+        assert_equal(cephes.expm1(np.inf), np.inf)
+        assert_equal(cephes.expm1(-np.inf), -1)
+        assert_equal(cephes.expm1(np.nan), np.nan)
+
+    def test_expm1_complex(self):
+        expm1 = cephes.expm1
+        assert_equal(expm1(0 + 0j), 0 + 0j)
+        assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
+        assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
+        assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
+        assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
+        assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
+        assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
+        assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
+        assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
+        assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
+        assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
+        assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
+        assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
+        assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
+        assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
+        assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
+
+    @pytest.mark.xfail(reason='The real part of expm1(z) bad at these points')
+    def test_expm1_complex_hard(self):
+        # The real part of this function is difficult to evaluate when
+        # z.real = -log(cos(z.imag)).
+        y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
+        x = -np.log(np.cos(y))
+        z = x + 1j*y
+
+        # evaluate using mpmath.expm1 with dps=1000
+        expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
+                              2.4289354732893695e-18+0.20271003550867248j,
+                              4.5235500262585768e-17+0.30933624960962319j,
+                              7.8234305217489006e-17-3.3805150062465863j,
+                             -1.3685191953697676e-16-225.95084645419513j,
+                              8.7175620481291045e-17+2.2371609442247422j])
+        found = cephes.expm1(z)
+        # this passes.
+        assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
+        # this fails.
+        assert_array_almost_equal_nulp(found.real, expected.real, 20)
+
+    def test_fdtr(self):
+        assert_equal(cephes.fdtr(1, 1, 0), 0.0)
+        # Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10]
+        assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488,
+                        rtol=1e-12)
+
+    def test_fdtrc(self):
+        assert_equal(cephes.fdtrc(1, 1, 0), 1.0)
+        # Computed using Wolfram Alpha:
+        #   1 - CDF[FRatioDistribution[2, 1/10], 1e10]
+        assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512,
+                        rtol=1e-12)
+
+    def test_fdtri(self):
+        assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
+                        array([0.9937365, 1.00630298]), rtol=1e-6)
+        # From Wolfram Alpha:
+        #   CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874...
+        p = 0.8756751669632105666874
+        assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12)
+
+    @pytest.mark.xfail(reason='Returns nan on i686.')
+    def test_fdtri_mysterious_failure(self):
+        assert_allclose(cephes.fdtri(1, 1, 0.5), 1)
+
+    def test_fdtridfd(self):
+        assert_equal(cephes.fdtridfd(1,0,0),5.0)
+
+    def test_fresnel(self):
+        assert_equal(cephes.fresnel(0),(0.0,0.0))
+
+    def test_gamma(self):
+        assert_equal(cephes.gamma(5),24.0)
+
+    def test_gammainccinv(self):
+        assert_equal(cephes.gammainccinv(5,1),0.0)
+
+    def test_gammaln(self):
+        cephes.gammaln(10)
+
+    def test_gammasgn(self):
+        vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
+        assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
+
+    def test_gdtr(self):
+        assert_equal(cephes.gdtr(1,1,0),0.0)
+
+    def test_gdtr_inf(self):
+        assert_equal(cephes.gdtr(1,1,np.inf),1.0)
+
+    def test_gdtrc(self):
+        assert_equal(cephes.gdtrc(1,1,0),1.0)
+
+    def test_gdtria(self):
+        assert_equal(cephes.gdtria(0,1,1),0.0)
+
+    def test_gdtrib(self):
+        cephes.gdtrib(1,0,1)
+        # assert_equal(cephes.gdtrib(1,0,1),5.0)
+
+    def test_gdtrix(self):
+        cephes.gdtrix(1,1,.1)
+
+    def test_hankel1(self):
+        cephes.hankel1(1,1)
+
+    def test_hankel1e(self):
+        cephes.hankel1e(1,1)
+
+    def test_hankel2(self):
+        cephes.hankel2(1,1)
+
+    def test_hankel2e(self):
+        cephes.hankel2e(1,1)
+
+    def test_hyp1f1(self):
+        assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
+        assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
+        cephes.hyp1f1(1,1,1)
+
+    def test_hyp2f1(self):
+        assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
+
+    def test_i0(self):
+        assert_equal(cephes.i0(0),1.0)
+
+    def test_i0e(self):
+        assert_equal(cephes.i0e(0),1.0)
+
+    def test_i1(self):
+        assert_equal(cephes.i1(0),0.0)
+
+    def test_i1e(self):
+        assert_equal(cephes.i1e(0),0.0)
+
+    def test_it2i0k0(self):
+        cephes.it2i0k0(1)
+
+    def test_it2j0y0(self):
+        cephes.it2j0y0(1)
+
+    def test_it2struve0(self):
+        cephes.it2struve0(1)
+
+    def test_itairy(self):
+        cephes.itairy(1)
+
+    def test_iti0k0(self):
+        assert_equal(cephes.iti0k0(0),(0.0,0.0))
+
+    def test_itj0y0(self):
+        assert_equal(cephes.itj0y0(0),(0.0,0.0))
+
+    def test_itmodstruve0(self):
+        assert_equal(cephes.itmodstruve0(0),0.0)
+
+    def test_itstruve0(self):
+        assert_equal(cephes.itstruve0(0),0.0)
+
+    def test_iv(self):
+        assert_equal(cephes.iv(1,0),0.0)
+
+    def _check_ive(self):
+        assert_equal(cephes.ive(1,0),0.0)
+
+    def test_j0(self):
+        assert_equal(cephes.j0(0),1.0)
+
+    def test_j1(self):
+        assert_equal(cephes.j1(0),0.0)
+
+    def test_jn(self):
+        assert_equal(cephes.jn(0,0),1.0)
+
+    def test_jv(self):
+        assert_equal(cephes.jv(0,0),1.0)
+
+    def _check_jve(self):
+        assert_equal(cephes.jve(0,0),1.0)
+
+    def test_k0(self):
+        cephes.k0(2)
+
+    def test_k0e(self):
+        cephes.k0e(2)
+
+    def test_k1(self):
+        cephes.k1(2)
+
+    def test_k1e(self):
+        cephes.k1e(2)
+
+    def test_kei(self):
+        cephes.kei(2)
+
+    def test_keip(self):
+        assert_equal(cephes.keip(0),0.0)
+
+    def test_ker(self):
+        cephes.ker(2)
+
+    def test_kerp(self):
+        cephes.kerp(2)
+
+    def _check_kelvin(self):
+        cephes.kelvin(2)
+
+    def test_kn(self):
+        cephes.kn(1,1)
+
+    def test_kolmogi(self):
+        assert_equal(cephes.kolmogi(1),0.0)
+        assert_(np.isnan(cephes.kolmogi(np.nan)))
+
+    def test_kolmogorov(self):
+        assert_equal(cephes.kolmogorov(0), 1.0)
+
+    def test_kolmogp(self):
+        assert_equal(cephes._kolmogp(0), -0.0)
+
+    def test_kolmogc(self):
+        assert_equal(cephes._kolmogc(0), 0.0)
+
+    def test_kolmogci(self):
+        assert_equal(cephes._kolmogci(0), 0.0)
+        assert_(np.isnan(cephes._kolmogci(np.nan)))
+
+    def _check_kv(self):
+        cephes.kv(1,1)
+
+    def _check_kve(self):
+        cephes.kve(1,1)
+
+    def test_log1p(self):
+        log1p = cephes.log1p
+        assert_equal(log1p(0), 0.0)
+        assert_equal(log1p(-1), -np.inf)
+        assert_equal(log1p(-2), np.nan)
+        assert_equal(log1p(np.inf), np.inf)
+
+    def test_log1p_complex(self):
+        log1p = cephes.log1p
+        c = complex
+        assert_equal(log1p(0 + 0j), 0 + 0j)
+        assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in multiply")
+            assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
+            assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
+            assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
+            assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
+            assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
+            assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
+            assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
+            assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
+            assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
+            assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
+            assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
+
+    def test_lpmv(self):
+        assert_equal(cephes.lpmv(0,0,1),1.0)
+
+    def test_mathieu_a(self):
+        assert_equal(cephes.mathieu_a(1,0),1.0)
+
+    def test_mathieu_b(self):
+        assert_equal(cephes.mathieu_b(1,0),1.0)
+
+    def test_mathieu_cem(self):
+        assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
+
+        # Test AMS 20.2.27
+        @np.vectorize
+        def ce_smallq(m, q, z):
+            z *= np.pi/180
+            if m == 0:
+                return 2**(-0.5) * (1 - .5*q*cos(2*z))  # + O(q^2)
+            elif m == 1:
+                return cos(z) - q/8 * cos(3*z)  # + O(q^2)
+            elif m == 2:
+                return cos(2*z) - q*(cos(4*z)/12 - 1/4)  # + O(q^2)
+            else:
+                return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1)))  # + O(q^2)
+        m = np.arange(0, 100)
+        q = np.r_[0, np.logspace(-30, -9, 10)]
+        assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
+                        ce_smallq(m[:,None], q[None,:], 0.123),
+                        rtol=1e-14, atol=0)
+
+    def test_mathieu_sem(self):
+        assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
+
+        # Test AMS 20.2.27
+        @np.vectorize
+        def se_smallq(m, q, z):
+            z *= np.pi/180
+            if m == 1:
+                return sin(z) - q/8 * sin(3*z)  # + O(q^2)
+            elif m == 2:
+                return sin(2*z) - q*sin(4*z)/12  # + O(q^2)
+            else:
+                return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1)))  # + O(q^2)
+        m = np.arange(1, 100)
+        q = np.r_[0, np.logspace(-30, -9, 10)]
+        assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
+                        se_smallq(m[:,None], q[None,:], 0.123),
+                        rtol=1e-14, atol=0)
+
+    def test_mathieu_modcem1(self):
+        assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
+
+    def test_mathieu_modcem2(self):
+        cephes.mathieu_modcem2(1,1,1)
+
+        # Test reflection relation AMS 20.6.19
+        m = np.arange(0, 4)[:,None,None]
+        q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
+        z = np.linspace(0, 1, 7)[None,None,:]
+
+        y1 = cephes.mathieu_modcem2(m, q, -z)[0]
+
+        fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
+        y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
+
+        assert_allclose(y1, y2, rtol=1e-10)
+
+    def test_mathieu_modsem1(self):
+        assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
+
+    def test_mathieu_modsem2(self):
+        cephes.mathieu_modsem2(1,1,1)
+
+        # Test reflection relation AMS 20.6.20
+        m = np.arange(1, 4)[:,None,None]
+        q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
+        z = np.linspace(0, 1, 7)[None,None,:]
+
+        y1 = cephes.mathieu_modsem2(m, q, -z)[0]
+        fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
+        y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
+        assert_allclose(y1, y2, rtol=1e-10)
+
+    def test_mathieu_overflow(self):
+        # Check that these return NaNs instead of causing a SEGV
+        assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
+        assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
+        assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
+        assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
+        assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
+        assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
+        assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
+        assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
+
+    def test_mathieu_ticket_1847(self):
+        # Regression test --- this call had some out-of-bounds access
+        # and could return nan occasionally
+        for k in range(60):
+            v = cephes.mathieu_modsem2(2, 100, -1)
+            # Values from ACM TOMS 804 (derivate by numerical differentiation)
+            assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
+            assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
+
+    def test_modfresnelm(self):
+        cephes.modfresnelm(0)
+
+    def test_modfresnelp(self):
+        cephes.modfresnelp(0)
+
+    def _check_modstruve(self):
+        assert_equal(cephes.modstruve(1,0),0.0)
+
+    def test_nbdtr(self):
+        assert_equal(cephes.nbdtr(1,1,1),1.0)
+
+    def test_nbdtrc(self):
+        assert_equal(cephes.nbdtrc(1,1,1),0.0)
+
+    def test_nbdtri(self):
+        assert_equal(cephes.nbdtri(1,1,1),1.0)
+
+    def __check_nbdtrik(self):
+        cephes.nbdtrik(1,.4,.5)
+
+    def test_nbdtrin(self):
+        assert_equal(cephes.nbdtrin(1,0,0),5.0)
+
+    def test_ncfdtr(self):
+        assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
+
+    def test_ncfdtri(self):
+        assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0)
+        f = [0.5, 1, 1.5]
+        p = cephes.ncfdtr(2, 3, 1.5, f)
+        assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f)
+
+    def test_ncfdtridfd(self):
+        dfd = [1, 2, 3]
+        p = cephes.ncfdtr(2, dfd, 0.25, 15)
+        assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd)
+
+    def test_ncfdtridfn(self):
+        dfn = [0.1, 1, 2, 3, 1e4]
+        p = cephes.ncfdtr(dfn, 2, 0.25, 15)
+        assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5)
+
+    def test_ncfdtrinc(self):
+        nc = [0.5, 1.5, 2.0]
+        p = cephes.ncfdtr(2, 3, nc, 15)
+        assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc)
+
+    def test_nctdtr(self):
+        assert_equal(cephes.nctdtr(1,0,0),0.5)
+        assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
+
+        assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
+        assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
+        assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
+
+        assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
+        assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
+        assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
+
+    def __check_nctdtridf(self):
+        cephes.nctdtridf(1,0.5,0)
+
+    def test_nctdtrinc(self):
+        cephes.nctdtrinc(1,0,0)
+
+    def test_nctdtrit(self):
+        cephes.nctdtrit(.1,0.2,.5)
+
+    def test_nrdtrimn(self):
+        assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
+
+    def test_nrdtrisd(self):
+        assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
+                         atol=0, rtol=0)
+
+    def test_obl_ang1(self):
+        cephes.obl_ang1(1,1,1,0)
+
+    def test_obl_ang1_cv(self):
+        result = cephes.obl_ang1_cv(1,1,1,1,0)
+        assert_almost_equal(result[0],1.0)
+        assert_almost_equal(result[1],0.0)
+
+    def _check_obl_cv(self):
+        assert_equal(cephes.obl_cv(1,1,0),2.0)
+
+    def test_obl_rad1(self):
+        cephes.obl_rad1(1,1,1,0)
+
+    def test_obl_rad1_cv(self):
+        cephes.obl_rad1_cv(1,1,1,1,0)
+
+    def test_obl_rad2(self):
+        cephes.obl_rad2(1,1,1,0)
+
+    def test_obl_rad2_cv(self):
+        cephes.obl_rad2_cv(1,1,1,1,0)
+
+    def test_pbdv(self):
+        assert_equal(cephes.pbdv(1,0),(0.0,1.0))
+
+    def test_pbvv(self):
+        cephes.pbvv(1,0)
+
+    def test_pbwa(self):
+        cephes.pbwa(1,0)
+
+    def test_pdtr(self):
+        val = cephes.pdtr(0, 1)
+        assert_almost_equal(val, np.exp(-1))
+        # Edge case: m = 0.
+        val = cephes.pdtr([0, 1, 2], 0)
+        assert_array_equal(val, [1, 1, 1])
+
+    def test_pdtrc(self):
+        val = cephes.pdtrc(0, 1)
+        assert_almost_equal(val, 1 - np.exp(-1))
+        # Edge case: m = 0.
+        val = cephes.pdtrc([0, 1, 2], 0.0)
+        assert_array_equal(val, [0, 0, 0])
+
+    def test_pdtri(self):
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "floating point number truncated to an integer")
+            cephes.pdtri(0.5,0.5)
+
+    def test_pdtrik(self):
+        k = cephes.pdtrik(0.5, 1)
+        assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
+        # Edge case: m = 0 or very small.
+        k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
+        assert_array_equal(k, np.zeros((3, 3)))
+
+    def test_pro_ang1(self):
+        cephes.pro_ang1(1,1,1,0)
+
+    def test_pro_ang1_cv(self):
+        assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
+                                  array((1.0,0.0)))
+
+    def _check_pro_cv(self):
+        assert_equal(cephes.pro_cv(1,1,0),2.0)
+
+    def test_pro_rad1(self):
+        cephes.pro_rad1(1,1,1,0.1)
+
+    def test_pro_rad1_cv(self):
+        cephes.pro_rad1_cv(1,1,1,1,0)
+
+    def test_pro_rad2(self):
+        cephes.pro_rad2(1,1,1,0)
+
+    def test_pro_rad2_cv(self):
+        cephes.pro_rad2_cv(1,1,1,1,0)
+
+    def test_psi(self):
+        cephes.psi(1)
+
+    def test_radian(self):
+        assert_equal(cephes.radian(0,0,0),0)
+
+    def test_rgamma(self):
+        assert_equal(cephes.rgamma(1),1.0)
+
+    def test_round(self):
+        assert_equal(cephes.round(3.4),3.0)
+        assert_equal(cephes.round(-3.4),-3.0)
+        assert_equal(cephes.round(3.6),4.0)
+        assert_equal(cephes.round(-3.6),-4.0)
+        assert_equal(cephes.round(3.5),4.0)
+        assert_equal(cephes.round(-3.5),-4.0)
+
+    def test_shichi(self):
+        cephes.shichi(1)
+
+    def test_sici(self):
+        cephes.sici(1)
+
+        s, c = cephes.sici(np.inf)
+        assert_almost_equal(s, np.pi * 0.5)
+        assert_almost_equal(c, 0)
+
+        s, c = cephes.sici(-np.inf)
+        assert_almost_equal(s, -np.pi * 0.5)
+        assert_(np.isnan(c), "cosine integral(-inf) is not nan")
+
+    def test_sindg(self):
+        assert_equal(cephes.sindg(90),1.0)
+
+    def test_smirnov(self):
+        assert_equal(cephes.smirnov(1,.1),0.9)
+        assert_(np.isnan(cephes.smirnov(1,np.nan)))
+
+    def test_smirnovp(self):
+        assert_equal(cephes._smirnovp(1, .1), -1)
+        assert_equal(cephes._smirnovp(2, 0.75), -2*(0.25)**(2-1))
+        assert_equal(cephes._smirnovp(3, 0.75), -3*(0.25)**(3-1))
+        assert_(np.isnan(cephes._smirnovp(1, np.nan)))
+
+    def test_smirnovc(self):
+        assert_equal(cephes._smirnovc(1,.1),0.1)
+        assert_(np.isnan(cephes._smirnovc(1,np.nan)))
+        x10 = np.linspace(0, 1, 11, endpoint=True)
+        assert_almost_equal(cephes._smirnovc(3, x10), 1-cephes.smirnov(3, x10))
+        x4 = np.linspace(0, 1, 5, endpoint=True)
+        assert_almost_equal(cephes._smirnovc(4, x4), 1-cephes.smirnov(4, x4))
+
+    def test_smirnovi(self):
+        assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
+        assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
+        assert_(np.isnan(cephes.smirnovi(1,np.nan)))
+
+    def test_smirnovci(self):
+        assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.4)),0.4)
+        assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.6)),0.6)
+        assert_(np.isnan(cephes._smirnovci(1,np.nan)))
+
+    def test_spence(self):
+        assert_equal(cephes.spence(1),0.0)
+
+    def test_stdtr(self):
+        assert_equal(cephes.stdtr(1,0),0.5)
+        assert_almost_equal(cephes.stdtr(1,1), 0.75)
+        assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
+
+    def test_stdtridf(self):
+        cephes.stdtridf(0.7,1)
+
+    def test_stdtrit(self):
+        cephes.stdtrit(1,0.7)
+
+    def test_struve(self):
+        assert_equal(cephes.struve(0,0),0.0)
+
+    def test_tandg(self):
+        assert_equal(cephes.tandg(45),1.0)
+
+    def test_tklmbda(self):
+        assert_almost_equal(cephes.tklmbda(1,1),1.0)
+
+    def test_y0(self):
+        cephes.y0(1)
+
+    def test_y1(self):
+        cephes.y1(1)
+
+    def test_yn(self):
+        cephes.yn(1,1)
+
+    def test_yv(self):
+        cephes.yv(1,1)
+
+    def _check_yve(self):
+        cephes.yve(1,1)
+
+    def test_wofz(self):
+        z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
+             complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
+             complex(-0.0000000234545,1.1234), complex(-3.,5.1),
+             complex(-53,30.1), complex(0.0,0.12345),
+             complex(11,1), complex(-22,-2), complex(9,-28),
+             complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
+             ]
+        w = [
+            complex(-3.78270245518980507452677445620103199303131110e-7,
+                    0.000903861276433172057331093754199933411710053155),
+            complex(0.1764906227004816847297495349730234591778719532788,
+                    -0.02146550539468457616788719893991501311573031095617),
+            complex(0.2410250715772692146133539023007113781272362309451,
+                    0.06087579663428089745895459735240964093522265589350),
+            complex(0.30474420525691259245713884106959496013413834051768,
+                    -0.20821893820283162728743734725471561394145872072738),
+            complex(7.317131068972378096865595229600561710140617977e34,
+                    8.321873499714402777186848353320412813066170427e34),
+            complex(0.0615698507236323685519612934241429530190806818395,
+                    -0.00676005783716575013073036218018565206070072304635),
+            complex(0.3960793007699874918961319170187598400134746631,
+                    -5.593152259116644920546186222529802777409274656e-9),
+            complex(0.08217199226739447943295069917990417630675021771804,
+                    -0.04701291087643609891018366143118110965272615832184),
+            complex(0.00457246000350281640952328010227885008541748668738,
+                    -0.00804900791411691821818731763401840373998654987934),
+            complex(0.8746342859608052666092782112565360755791467973338452,
+                    0.),
+            complex(0.00468190164965444174367477874864366058339647648741,
+                    0.0510735563901306197993676329845149741675029197050),
+            complex(-0.0023193175200187620902125853834909543869428763219,
+                    -0.025460054739731556004902057663500272721780776336),
+            complex(9.11463368405637174660562096516414499772662584e304,
+                    3.97101807145263333769664875189354358563218932e305),
+            complex(-4.4927207857715598976165541011143706155432296e281,
+                    -2.8019591213423077494444700357168707775769028e281),
+            complex(2.820947917809305132678577516325951485807107151e-6,
+                    2.820947917668257736791638444590253942253354058e-6),
+            complex(2.82094791773878143474039725787438662716372268e-15,
+                    2.82094791773878143474039725773333923127678361e-15)
+        ]
+        assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
+
+
+class TestAiry:
+    def test_airy(self):
+        # This tests the airy function to ensure 8 place accuracy in computation
+
+        x = special.airy(.99)
+        assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
+        x = special.airy(.41)
+        assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
+        x = special.airy(-.36)
+        assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
+
+    def test_airye(self):
+        a = special.airye(0.01)
+        b = special.airy(0.01)
+        b1 = [None]*4
+        for n in range(2):
+            b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
+        for n in range(2,4):
+            b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
+        assert_array_almost_equal(a,b1,6)
+
+    def test_bi_zeros(self):
+        bi = special.bi_zeros(2)
+        bia = (array([-1.17371322, -3.2710930]),
+               array([-2.29443968, -4.07315509]),
+               array([-0.45494438, 0.39652284]),
+               array([0.60195789, -0.76031014]))
+        assert_array_almost_equal(bi,bia,4)
+
+        bi = special.bi_zeros(5)
+        assert_array_almost_equal(bi[0],array([-1.173713222709127,
+                                               -3.271093302836352,
+                                               -4.830737841662016,
+                                               -6.169852128310251,
+                                               -7.376762079367764]),11)
+
+        assert_array_almost_equal(bi[1],array([-2.294439682614122,
+                                               -4.073155089071828,
+                                               -5.512395729663599,
+                                               -6.781294445990305,
+                                               -7.940178689168587]),10)
+
+        assert_array_almost_equal(bi[2],array([-0.454944383639657,
+                                               0.396522836094465,
+                                               -0.367969161486959,
+                                               0.349499116831805,
+                                               -0.336026240133662]),11)
+
+        assert_array_almost_equal(bi[3],array([0.601957887976239,
+                                               -0.760310141492801,
+                                               0.836991012619261,
+                                               -0.88947990142654,
+                                               0.929983638568022]),10)
+
+    def test_ai_zeros(self):
+        ai = special.ai_zeros(1)
+        assert_array_almost_equal(ai,(array([-2.33810741]),
+                                     array([-1.01879297]),
+                                     array([0.5357]),
+                                     array([0.7012])),4)
+
+    def test_ai_zeros_big(self):
+        z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
+        ai_z, aip_z, _, _ = special.airy(z)
+        ai_zp, aip_zp, _, _ = special.airy(zp)
+
+        ai_envelope = 1/abs(z)**(1./4)
+        aip_envelope = abs(zp)**(1./4)
+
+        # Check values
+        assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
+        assert_allclose(aip_zx, aip_z, rtol=1e-10)
+
+        # Check they are zeros
+        assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
+        assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
+
+        # Check first zeros, DLMF 9.9.1
+        assert_allclose(z[:6],
+            [-2.3381074105, -4.0879494441, -5.5205598281,
+             -6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
+        assert_allclose(zp[:6],
+            [-1.0187929716, -3.2481975822, -4.8200992112,
+             -6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
+
+    def test_bi_zeros_big(self):
+        z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
+        _, _, bi_z, bip_z = special.airy(z)
+        _, _, bi_zp, bip_zp = special.airy(zp)
+
+        bi_envelope = 1/abs(z)**(1./4)
+        bip_envelope = abs(zp)**(1./4)
+
+        # Check values
+        assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
+        assert_allclose(bip_zx, bip_z, rtol=1e-10)
+
+        # Check they are zeros
+        assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
+        assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
+
+        # Check first zeros, DLMF 9.9.2
+        assert_allclose(z[:6],
+            [-1.1737132227, -3.2710933028, -4.8307378417,
+             -6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
+        assert_allclose(zp[:6],
+            [-2.2944396826, -4.0731550891, -5.5123957297,
+             -6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
+
+
+class TestAssocLaguerre:
+    def test_assoc_laguerre(self):
+        a1 = special.genlaguerre(11,1)
+        a2 = special.assoc_laguerre(.2,11,1)
+        assert_array_almost_equal(a2,a1(.2),8)
+        a2 = special.assoc_laguerre(1,11,1)
+        assert_array_almost_equal(a2,a1(1),8)
+
+
+class TestBesselpoly:
+    def test_besselpoly(self):
+        pass
+
+
+class TestKelvin:
+    def test_bei(self):
+        mbei = special.bei(2)
+        assert_almost_equal(mbei, 0.9722916273066613,5)  # this may not be exact
+
+    def test_beip(self):
+        mbeip = special.beip(2)
+        assert_almost_equal(mbeip,0.91701361338403631,5)  # this may not be exact
+
+    def test_ber(self):
+        mber = special.ber(2)
+        assert_almost_equal(mber,0.75173418271380821,5)  # this may not be exact
+
+    def test_berp(self):
+        mberp = special.berp(2)
+        assert_almost_equal(mberp,-0.49306712470943909,5)  # this may not be exact
+
+    def test_bei_zeros(self):
+        # Abramowitz & Stegun, Table 9.12
+        bi = special.bei_zeros(5)
+        assert_array_almost_equal(bi,array([5.02622,
+                                            9.45541,
+                                            13.89349,
+                                            18.33398,
+                                            22.77544]),4)
+
+    def test_beip_zeros(self):
+        bip = special.beip_zeros(5)
+        assert_array_almost_equal(bip,array([3.772673304934953,
+                                               8.280987849760042,
+                                               12.742147523633703,
+                                               17.193431752512542,
+                                               21.641143941167325]),8)
+
+    def test_ber_zeros(self):
+        ber = special.ber_zeros(5)
+        assert_array_almost_equal(ber,array([2.84892,
+                                             7.23883,
+                                             11.67396,
+                                             16.11356,
+                                             20.55463]),4)
+
+    def test_berp_zeros(self):
+        brp = special.berp_zeros(5)
+        assert_array_almost_equal(brp,array([6.03871,
+                                             10.51364,
+                                             14.96844,
+                                             19.41758,
+                                             23.86430]),4)
+
+    def test_kelvin(self):
+        mkelv = special.kelvin(2)
+        assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
+                                         special.ker(2) + special.kei(2)*1j,
+                                         special.berp(2) + special.beip(2)*1j,
+                                         special.kerp(2) + special.keip(2)*1j),8)
+
+    def test_kei(self):
+        mkei = special.kei(2)
+        assert_almost_equal(mkei,-0.20240006776470432,5)
+
+    def test_keip(self):
+        mkeip = special.keip(2)
+        assert_almost_equal(mkeip,0.21980790991960536,5)
+
+    def test_ker(self):
+        mker = special.ker(2)
+        assert_almost_equal(mker,-0.041664513991509472,5)
+
+    def test_kerp(self):
+        mkerp = special.kerp(2)
+        assert_almost_equal(mkerp,-0.10660096588105264,5)
+
+    def test_kei_zeros(self):
+        kei = special.kei_zeros(5)
+        assert_array_almost_equal(kei,array([3.91467,
+                                              8.34422,
+                                              12.78256,
+                                              17.22314,
+                                              21.66464]),4)
+
+    def test_keip_zeros(self):
+        keip = special.keip_zeros(5)
+        assert_array_almost_equal(keip,array([4.93181,
+                                                9.40405,
+                                                13.85827,
+                                                18.30717,
+                                                22.75379]),4)
+
+    # numbers come from 9.9 of A&S pg. 381
+    def test_kelvin_zeros(self):
+        tmp = special.kelvin_zeros(5)
+        berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
+        assert_array_almost_equal(berz,array([2.84892,
+                                               7.23883,
+                                               11.67396,
+                                               16.11356,
+                                               20.55463]),4)
+        assert_array_almost_equal(beiz,array([5.02622,
+                                               9.45541,
+                                               13.89349,
+                                               18.33398,
+                                               22.77544]),4)
+        assert_array_almost_equal(kerz,array([1.71854,
+                                               6.12728,
+                                               10.56294,
+                                               15.00269,
+                                               19.44382]),4)
+        assert_array_almost_equal(keiz,array([3.91467,
+                                               8.34422,
+                                               12.78256,
+                                               17.22314,
+                                               21.66464]),4)
+        assert_array_almost_equal(berpz,array([6.03871,
+                                                10.51364,
+                                                14.96844,
+                                                19.41758,
+                                                23.86430]),4)
+        assert_array_almost_equal(beipz,array([3.77267,
+                 # table from 1927 had 3.77320
+                 #  but this is more accurate
+                                                8.28099,
+                                                12.74215,
+                                                17.19343,
+                                                21.64114]),4)
+        assert_array_almost_equal(kerpz,array([2.66584,
+                                                7.17212,
+                                                11.63218,
+                                                16.08312,
+                                                20.53068]),4)
+        assert_array_almost_equal(keipz,array([4.93181,
+                                                9.40405,
+                                                13.85827,
+                                                18.30717,
+                                                22.75379]),4)
+
+    def test_ker_zeros(self):
+        ker = special.ker_zeros(5)
+        assert_array_almost_equal(ker,array([1.71854,
+                                               6.12728,
+                                               10.56294,
+                                               15.00269,
+                                               19.44381]),4)
+
+    def test_kerp_zeros(self):
+        kerp = special.kerp_zeros(5)
+        assert_array_almost_equal(kerp,array([2.66584,
+                                                7.17212,
+                                                11.63218,
+                                                16.08312,
+                                                20.53068]),4)
+
+
+class TestBernoulli:
+    def test_bernoulli(self):
+        brn = special.bernoulli(5)
+        assert_array_almost_equal(brn,array([1.0000,
+                                             -0.5000,
+                                             0.1667,
+                                             0.0000,
+                                             -0.0333,
+                                             0.0000]),4)
+
+
+class TestBeta:
+    def test_beta(self):
+        bet = special.beta(2,4)
+        betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
+        assert_almost_equal(bet,betg,8)
+
+    def test_betaln(self):
+        betln = special.betaln(2,4)
+        bet = log(abs(special.beta(2,4)))
+        assert_almost_equal(betln,bet,8)
+
+    def test_betainc(self):
+        btinc = special.betainc(1,1,.2)
+        assert_almost_equal(btinc,0.2,8)
+
+    def test_betaincinv(self):
+        y = special.betaincinv(2,4,.5)
+        comp = special.betainc(2,4,y)
+        assert_almost_equal(comp,.5,5)
+
+
+class TestCombinatorics:
+    def test_comb(self):
+        assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
+        assert_almost_equal(special.comb(10, 3), 120.)
+        assert_equal(special.comb(10, 3, exact=True), 120)
+        assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
+
+        assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
+                        special.comb(20, list(range(21))), atol=1e-15)
+
+        ii = np.iinfo(int).max + 1
+        assert_equal(special.comb(ii, ii-1, exact=True), ii)
+
+        expected = 100891344545564193334812497256
+        assert special.comb(100, 50, exact=True) == expected
+
+    @pytest.mark.parametrize("repetition", [True, False])
+    @pytest.mark.parametrize("legacy", [True, False])
+    @pytest.mark.parametrize("k", [3.5, 3])
+    @pytest.mark.parametrize("N", [4.5, 4])
+    def test_comb_legacy(self, N, k, legacy, repetition):
+        # test is only relevant for exact=True
+        if legacy and (N != int(N) or k != int(k)):
+            with pytest.warns(
+                DeprecationWarning,
+                match=r"Non-integer arguments are currently being cast to",
+            ):
+                result = special.comb(N, k, exact=True, legacy=legacy,
+                                      repetition=repetition)
+        else:
+            result = special.comb(N, k, exact=True, legacy=legacy,
+                                  repetition=repetition)
+        if legacy:
+            # for exact=True and legacy=True, cast input arguments, else don't
+            if repetition:
+                # the casting in legacy mode happens AFTER transforming N & k,
+                # so rounding can change (e.g. both floats, but sum to int);
+                # hence we need to emulate the repetition-transformation here
+                N, k = int(N + k - 1), int(k)
+                repetition = False
+            else:
+                N, k = int(N), int(k)
+        # expected result is the same as with exact=False
+        expected = special.comb(N, k, legacy=legacy, repetition=repetition)
+        assert_equal(result, expected)
+
+    def test_comb_with_np_int64(self):
+        n = 70
+        k = 30
+        np_n = np.int64(n)
+        np_k = np.int64(k)
+        res_np = special.comb(np_n, np_k, exact=True)
+        res_py = special.comb(n, k, exact=True)
+        assert res_np == res_py
+
+    def test_comb_zeros(self):
+        assert_equal(special.comb(2, 3, exact=True), 0)
+        assert_equal(special.comb(-1, 3, exact=True), 0)
+        assert_equal(special.comb(2, -1, exact=True), 0)
+        assert_equal(special.comb(2, -1, exact=False), 0)
+        assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
+                [0., 0., 0., 120.])
+
+    def test_perm(self):
+        assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
+        assert_almost_equal(special.perm(10, 3), 720.)
+        assert_equal(special.perm(10, 3, exact=True), 720)
+
+    def test_perm_zeros(self):
+        assert_equal(special.perm(2, 3, exact=True), 0)
+        assert_equal(special.perm(-1, 3, exact=True), 0)
+        assert_equal(special.perm(2, -1, exact=True), 0)
+        assert_equal(special.perm(2, -1, exact=False), 0)
+        assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
+                [0., 0., 0., 720.])
+
+
+class TestTrigonometric:
+    def test_cbrt(self):
+        cb = special.cbrt(27)
+        cbrl = 27**(1.0/3.0)
+        assert_approx_equal(cb,cbrl)
+
+    def test_cbrtmore(self):
+        cb1 = special.cbrt(27.9)
+        cbrl1 = 27.9**(1.0/3.0)
+        assert_almost_equal(cb1,cbrl1,8)
+
+    def test_cosdg(self):
+        cdg = special.cosdg(90)
+        cdgrl = cos(pi/2.0)
+        assert_almost_equal(cdg,cdgrl,8)
+
+    def test_cosdgmore(self):
+        cdgm = special.cosdg(30)
+        cdgmrl = cos(pi/6.0)
+        assert_almost_equal(cdgm,cdgmrl,8)
+
+    def test_cosm1(self):
+        cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
+        csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
+        assert_array_almost_equal(cs,csrl,8)
+
+    def test_cotdg(self):
+        ct = special.cotdg(30)
+        ctrl = tan(pi/6.0)**(-1)
+        assert_almost_equal(ct,ctrl,8)
+
+    def test_cotdgmore(self):
+        ct1 = special.cotdg(45)
+        ctrl1 = tan(pi/4.0)**(-1)
+        assert_almost_equal(ct1,ctrl1,8)
+
+    def test_specialpoints(self):
+        assert_almost_equal(special.cotdg(45), 1.0, 14)
+        assert_almost_equal(special.cotdg(-45), -1.0, 14)
+        assert_almost_equal(special.cotdg(90), 0.0, 14)
+        assert_almost_equal(special.cotdg(-90), 0.0, 14)
+        assert_almost_equal(special.cotdg(135), -1.0, 14)
+        assert_almost_equal(special.cotdg(-135), 1.0, 14)
+        assert_almost_equal(special.cotdg(225), 1.0, 14)
+        assert_almost_equal(special.cotdg(-225), -1.0, 14)
+        assert_almost_equal(special.cotdg(270), 0.0, 14)
+        assert_almost_equal(special.cotdg(-270), 0.0, 14)
+        assert_almost_equal(special.cotdg(315), -1.0, 14)
+        assert_almost_equal(special.cotdg(-315), 1.0, 14)
+        assert_almost_equal(special.cotdg(765), 1.0, 14)
+
+    def test_sinc(self):
+        # the sinc implementation and more extensive sinc tests are in numpy
+        assert_array_equal(special.sinc([0]), 1)
+        assert_equal(special.sinc(0.0), 1.0)
+
+    def test_sindg(self):
+        sn = special.sindg(90)
+        assert_equal(sn,1.0)
+
+    def test_sindgmore(self):
+        snm = special.sindg(30)
+        snmrl = sin(pi/6.0)
+        assert_almost_equal(snm,snmrl,8)
+        snm1 = special.sindg(45)
+        snmrl1 = sin(pi/4.0)
+        assert_almost_equal(snm1,snmrl1,8)
+
+
+class TestTandg:
+
+    def test_tandg(self):
+        tn = special.tandg(30)
+        tnrl = tan(pi/6.0)
+        assert_almost_equal(tn,tnrl,8)
+
+    def test_tandgmore(self):
+        tnm = special.tandg(45)
+        tnmrl = tan(pi/4.0)
+        assert_almost_equal(tnm,tnmrl,8)
+        tnm1 = special.tandg(60)
+        tnmrl1 = tan(pi/3.0)
+        assert_almost_equal(tnm1,tnmrl1,8)
+
+    def test_specialpoints(self):
+        assert_almost_equal(special.tandg(0), 0.0, 14)
+        assert_almost_equal(special.tandg(45), 1.0, 14)
+        assert_almost_equal(special.tandg(-45), -1.0, 14)
+        assert_almost_equal(special.tandg(135), -1.0, 14)
+        assert_almost_equal(special.tandg(-135), 1.0, 14)
+        assert_almost_equal(special.tandg(180), 0.0, 14)
+        assert_almost_equal(special.tandg(-180), 0.0, 14)
+        assert_almost_equal(special.tandg(225), 1.0, 14)
+        assert_almost_equal(special.tandg(-225), -1.0, 14)
+        assert_almost_equal(special.tandg(315), -1.0, 14)
+        assert_almost_equal(special.tandg(-315), 1.0, 14)
+
+
+class TestEllip:
+    def test_ellipj_nan(self):
+        """Regression test for #912."""
+        special.ellipj(0.5, np.nan)
+
+    def test_ellipj(self):
+        el = special.ellipj(0.2,0)
+        rel = [sin(0.2),cos(0.2),1.0,0.20]
+        assert_array_almost_equal(el,rel,13)
+
+    def test_ellipk(self):
+        elk = special.ellipk(.2)
+        assert_almost_equal(elk,1.659623598610528,11)
+
+        assert_equal(special.ellipkm1(0.0), np.inf)
+        assert_equal(special.ellipkm1(1.0), pi/2)
+        assert_equal(special.ellipkm1(np.inf), 0.0)
+        assert_equal(special.ellipkm1(np.nan), np.nan)
+        assert_equal(special.ellipkm1(-1), np.nan)
+        assert_allclose(special.ellipk(-10), 0.7908718902387385)
+
+    def test_ellipkinc(self):
+        elkinc = special.ellipkinc(pi/2,.2)
+        elk = special.ellipk(0.2)
+        assert_almost_equal(elkinc,elk,15)
+        alpha = 20*pi/180
+        phi = 45*pi/180
+        m = sin(alpha)**2
+        elkinc = special.ellipkinc(phi,m)
+        assert_almost_equal(elkinc,0.79398143,8)
+        # From pg. 614 of A & S
+
+        assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
+        assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
+        assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
+        assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
+        assert_equal(special.ellipkinc(pi/2, 2), np.nan)
+        assert_equal(special.ellipkinc(0, 0.5), 0.0)
+        assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
+        assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
+        assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
+        assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
+        assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
+        assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
+        assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
+        assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
+
+        assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
+        assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
+
+    def test_ellipkinc_2(self):
+        # Regression test for gh-3550
+        # ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
+        mbad = 0.68359375000000011
+        phi = 0.9272952180016123
+        m = np.nextafter(mbad, 0)
+        mvals = []
+        for j in range(10):
+            mvals.append(m)
+            m = np.nextafter(m, 1)
+        f = special.ellipkinc(phi, mvals)
+        assert_array_almost_equal_nulp(f, np.full_like(f, 1.0259330100195334), 1)
+        # this bug also appears at phi + n * pi for at least small n
+        f1 = special.ellipkinc(phi + pi, mvals)
+        assert_array_almost_equal_nulp(f1, np.full_like(f1, 5.1296650500976675), 2)
+
+    def test_ellipkinc_singular(self):
+        # ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
+        xlog = np.logspace(-300, -17, 25)
+        xlin = np.linspace(1e-17, 0.1, 25)
+        xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
+
+        assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
+        assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
+        assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
+        assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
+        assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
+        assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
+        assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
+        assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
+
+    def test_ellipe(self):
+        ele = special.ellipe(.2)
+        assert_almost_equal(ele,1.4890350580958529,8)
+
+        assert_equal(special.ellipe(0.0), pi/2)
+        assert_equal(special.ellipe(1.0), 1.0)
+        assert_equal(special.ellipe(-np.inf), np.inf)
+        assert_equal(special.ellipe(np.nan), np.nan)
+        assert_equal(special.ellipe(2), np.nan)
+        assert_allclose(special.ellipe(-10), 3.6391380384177689)
+
+    def test_ellipeinc(self):
+        eleinc = special.ellipeinc(pi/2,.2)
+        ele = special.ellipe(0.2)
+        assert_almost_equal(eleinc,ele,14)
+        # pg 617 of A & S
+        alpha, phi = 52*pi/180,35*pi/180
+        m = sin(alpha)**2
+        eleinc = special.ellipeinc(phi,m)
+        assert_almost_equal(eleinc, 0.58823065, 8)
+
+        assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
+        assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
+        assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
+        assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
+        assert_equal(special.ellipeinc(pi/2, 2), np.nan)
+        assert_equal(special.ellipeinc(0, 0.5), 0.0)
+        assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
+        assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
+        assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
+        assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
+        assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
+        assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
+        assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
+        assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
+        assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
+
+    def test_ellipeinc_2(self):
+        # Regression test for gh-3550
+        # ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
+        mbad = 0.68359375000000011
+        phi = 0.9272952180016123
+        m = np.nextafter(mbad, 0)
+        mvals = []
+        for j in range(10):
+            mvals.append(m)
+            m = np.nextafter(m, 1)
+        f = special.ellipeinc(phi, mvals)
+        assert_array_almost_equal_nulp(f, np.full_like(f, 0.84442884574781019), 2)
+        # this bug also appears at phi + n * pi for at least small n
+        f1 = special.ellipeinc(phi + pi, mvals)
+        assert_array_almost_equal_nulp(f1, np.full_like(f1, 3.3471442287390509), 4)
+
+
+class TestEllipCarlson(object):
+    """Test for Carlson elliptic integrals ellipr[cdfgj].
+    The special values used in these tests can be found in Sec. 3 of Carlson
+    (1994), https://arxiv.org/abs/math/9409227
+    """
+    def test_elliprc(self):
+        assert_allclose(elliprc(1, 1), 1)
+        assert elliprc(1, inf) == 0.0
+        assert isnan(elliprc(1, 0))
+        assert elliprc(1, complex(1, inf)) == 0.0
+        args = array([[0.0, 0.25],
+                      [2.25, 2.0],
+                      [0.0, 1.0j],
+                      [-1.0j, 1.0j],
+                      [0.25, -2.0],
+                      [1.0j, -1.0]])
+        expected_results = array([np.pi,
+                                  np.log(2.0),
+                                  1.1107207345396 * (1.0-1.0j),
+                                  1.2260849569072-0.34471136988768j,
+                                  np.log(2.0) / 3.0,
+                                  0.77778596920447+0.19832484993429j])
+        for i, arr in enumerate(args):
+            assert_allclose(elliprc(*arr), expected_results[i])
+
+    def test_elliprd(self):
+        assert_allclose(elliprd(1, 1, 1), 1)
+        assert_allclose(elliprd(0, 2, 1) / 3.0, 0.59907011736779610371)
+        assert elliprd(1, 1, inf) == 0.0
+        assert np.isinf(elliprd(1, 1, 0))
+        assert np.isinf(elliprd(1, 1, complex(0, 0)))
+        assert np.isinf(elliprd(0, 1, complex(0, 0)))
+        assert isnan(elliprd(1, 1, -np.finfo(np.double).tiny / 2.0))
+        assert isnan(elliprd(1, 1, complex(-1, 0)))
+        args = array([[0.0, 2.0, 1.0],
+                      [2.0, 3.0, 4.0],
+                      [1.0j, -1.0j, 2.0],
+                      [0.0, 1.0j, -1.0j],
+                      [0.0, -1.0+1.0j, 1.0j],
+                      [-2.0-1.0j, -1.0j, -1.0+1.0j]])
+        expected_results = array([1.7972103521034,
+                                  0.16510527294261,
+                                  0.65933854154220,
+                                  1.2708196271910+2.7811120159521j,
+                                  -1.8577235439239-0.96193450888839j,
+                                  1.8249027393704-1.2218475784827j])
+        for i, arr in enumerate(args):
+            assert_allclose(elliprd(*arr), expected_results[i])
+
+    def test_elliprf(self):
+        assert_allclose(elliprf(1, 1, 1), 1)
+        assert_allclose(elliprf(0, 1, 2), 1.31102877714605990523)
+        assert elliprf(1, inf, 1) == 0.0
+        assert np.isinf(elliprf(0, 1, 0))
+        assert isnan(elliprf(1, 1, -1))
+        assert elliprf(complex(inf), 0, 1) == 0.0
+        assert isnan(elliprf(1, 1, complex(-inf, 1)))
+        args = array([[1.0, 2.0, 0.0],
+                      [1.0j, -1.0j, 0.0],
+                      [0.5, 1.0, 0.0],
+                      [-1.0+1.0j, 1.0j, 0.0],
+                      [2.0, 3.0, 4.0],
+                      [1.0j, -1.0j, 2.0],
+                      [-1.0+1.0j, 1.0j, 1.0-1.0j]])
+        expected_results = array([1.3110287771461,
+                                  1.8540746773014,
+                                  1.8540746773014,
+                                  0.79612586584234-1.2138566698365j,
+                                  0.58408284167715,
+                                  1.0441445654064,
+                                  0.93912050218619-0.53296252018635j])
+        for i, arr in enumerate(args):
+            assert_allclose(elliprf(*arr), expected_results[i])
+
+    def test_elliprg(self):
+        assert_allclose(elliprg(1, 1, 1), 1)
+        assert_allclose(elliprg(0, 0, 1), 0.5)
+        assert_allclose(elliprg(0, 0, 0), 0)
+        assert np.isinf(elliprg(1, inf, 1))
+        assert np.isinf(elliprg(complex(inf), 1, 1))
+        args = array([[0.0, 16.0, 16.0],
+                      [2.0, 3.0, 4.0],
+                      [0.0, 1.0j, -1.0j],
+                      [-1.0+1.0j, 1.0j, 0.0],
+                      [-1.0j, -1.0+1.0j, 1.0j],
+                      [0.0, 0.0796, 4.0]])
+        expected_results = array([np.pi,
+                                  1.7255030280692,
+                                  0.42360654239699,
+                                  0.44660591677018+0.70768352357515j,
+                                  0.36023392184473+0.40348623401722j,
+                                  1.0284758090288])
+        for i, arr in enumerate(args):
+            assert_allclose(elliprg(*arr), expected_results[i])
+
+    def test_elliprj(self):
+        assert_allclose(elliprj(1, 1, 1, 1), 1)
+        assert elliprj(1, 1, inf, 1) == 0.0
+        assert isnan(elliprj(1, 0, 0, 0))
+        assert isnan(elliprj(-1, 1, 1, 1))
+        assert elliprj(1, 1, 1, inf) == 0.0
+        args = array([[0.0, 1.0, 2.0, 3.0],
+                      [2.0, 3.0, 4.0, 5.0],
+                      [2.0, 3.0, 4.0, -1.0+1.0j],
+                      [1.0j, -1.0j, 0.0, 2.0],
+                      [-1.0+1.0j, -1.0-1.0j, 1.0, 2.0],
+                      [1.0j, -1.0j, 0.0, 1.0-1.0j],
+                      [-1.0+1.0j, -1.0-1.0j, 1.0, -3.0+1.0j],
+                      [2.0, 3.0, 4.0, -0.5],    # Cauchy principal value
+                      [2.0, 3.0, 4.0, -5.0]])   # Cauchy principal value
+        expected_results = array([0.77688623778582,
+                                  0.14297579667157,
+                                  0.13613945827771-0.38207561624427j,
+                                  1.6490011662711,
+                                  0.94148358841220,
+                                  1.8260115229009+1.2290661908643j,
+                                  -0.61127970812028-1.0684038390007j,
+                                  0.24723819703052,    # Cauchy principal value
+                                  -0.12711230042964])  # Caucny principal value
+        for i, arr in enumerate(args):
+            assert_allclose(elliprj(*arr), expected_results[i])
+
+    @pytest.mark.xfail(reason="Insufficient accuracy on 32-bit")
+    def test_elliprj_hard(self):
+        assert_allclose(elliprj(6.483625725195452e-08,
+                                1.1649136528196886e-27,
+                                3.6767340167168e+13,
+                                0.493704617023468),
+                        8.63426920644241857617477551054e-6,
+                        rtol=5e-15, atol=1e-20)
+        assert_allclose(elliprj(14.375105857849121,
+                                9.993988969725365e-11,
+                                1.72844262269944e-26,
+                                5.898871222598245e-06),
+                        829774.1424801627252574054378691828,
+                        rtol=5e-15, atol=1e-20)
+
+
+class TestEllipLegendreCarlsonIdentities(object):
+    """Test identities expressing the Legendre elliptic integrals in terms
+    of Carlson's symmetric integrals.  These identities can be found
+    in the DLMF https://dlmf.nist.gov/19.25#i .
+    """
+
+    def setup_class(self):
+        self.m_n1_1 = np.arange(-1., 1., 0.01)
+        # For double, this is -(2**1024)
+        self.max_neg = finfo(float_).min
+        # Lots of very negative numbers
+        self.very_neg_m = -1. * 2.**arange(-1 +
+                                           np.log2(-self.max_neg), 0.,
+                                           -1.)
+        self.ms_up_to_1 = np.concatenate(([self.max_neg],
+                                          self.very_neg_m,
+                                          self.m_n1_1))
+
+    def test_k(self):
+        """Test identity:
+        K(m) = R_F(0, 1-m, 1)
+        """
+        m = self.ms_up_to_1
+        assert_allclose(ellipk(m), elliprf(0., 1.-m, 1.))
+
+    def test_km1(self):
+        """Test identity:
+        K(m) = R_F(0, 1-m, 1)
+        But with the ellipkm1 function
+        """
+        # For double, this is 2**-1022
+        tiny = finfo(float_).tiny
+        # All these small powers of 2, up to 2**-1
+        m1 = tiny * 2.**arange(0., -np.log2(tiny))
+        assert_allclose(ellipkm1(m1), elliprf(0., m1, 1.))
+
+    def test_e(self):
+        """Test identity:
+        E(m) = 2*R_G(0, 1-k^2, 1)
+        """
+        m = self.ms_up_to_1
+        assert_allclose(ellipe(m), 2.*elliprg(0., 1.-m, 1.))
+
+class TestErf:
+
+    def test_erf(self):
+        er = special.erf(.25)
+        assert_almost_equal(er,0.2763263902,8)
+
+    def test_erf_zeros(self):
+        erz = special.erf_zeros(5)
+        erzr = array([1.45061616+1.88094300j,
+                     2.24465928+2.61657514j,
+                     2.83974105+3.17562810j,
+                     3.33546074+3.64617438j,
+                     3.76900557+4.06069723j])
+        assert_array_almost_equal(erz,erzr,4)
+
+    def _check_variant_func(self, func, other_func, rtol, atol=0):
+        np.random.seed(1234)
+        n = 10000
+        x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
+        y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
+        z = x + 1j*y
+
+        with np.errstate(all='ignore'):
+            w = other_func(z)
+            w_real = other_func(x).real
+
+            mask = np.isfinite(w)
+            w = w[mask]
+            z = z[mask]
+
+            mask = np.isfinite(w_real)
+            w_real = w_real[mask]
+            x = x[mask]
+
+            # test both real and complex variants
+            assert_func_equal(func, w, z, rtol=rtol, atol=atol)
+            assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
+
+    def test_erfc_consistent(self):
+        self._check_variant_func(
+            cephes.erfc,
+            lambda z: 1 - cephes.erf(z),
+            rtol=1e-12,
+            atol=1e-14  # <- the test function loses precision
+            )
+
+    def test_erfcx_consistent(self):
+        self._check_variant_func(
+            cephes.erfcx,
+            lambda z: np.exp(z*z) * cephes.erfc(z),
+            rtol=1e-12
+            )
+
+    def test_erfi_consistent(self):
+        self._check_variant_func(
+            cephes.erfi,
+            lambda z: -1j * cephes.erf(1j*z),
+            rtol=1e-12
+            )
+
+    def test_dawsn_consistent(self):
+        self._check_variant_func(
+            cephes.dawsn,
+            lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
+            rtol=1e-12
+            )
+
+    def test_erf_nan_inf(self):
+        vals = [np.nan, -np.inf, np.inf]
+        expected = [np.nan, -1, 1]
+        assert_allclose(special.erf(vals), expected, rtol=1e-15)
+
+    def test_erfc_nan_inf(self):
+        vals = [np.nan, -np.inf, np.inf]
+        expected = [np.nan, 2, 0]
+        assert_allclose(special.erfc(vals), expected, rtol=1e-15)
+
+    def test_erfcx_nan_inf(self):
+        vals = [np.nan, -np.inf, np.inf]
+        expected = [np.nan, np.inf, 0]
+        assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
+
+    def test_erfi_nan_inf(self):
+        vals = [np.nan, -np.inf, np.inf]
+        expected = [np.nan, -np.inf, np.inf]
+        assert_allclose(special.erfi(vals), expected, rtol=1e-15)
+
+    def test_dawsn_nan_inf(self):
+        vals = [np.nan, -np.inf, np.inf]
+        expected = [np.nan, -0.0, 0.0]
+        assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
+
+    def test_wofz_nan_inf(self):
+        vals = [np.nan, -np.inf, np.inf]
+        expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
+        assert_allclose(special.wofz(vals), expected, rtol=1e-15)
+
+
+class TestEuler:
+    def test_euler(self):
+        eu0 = special.euler(0)
+        eu1 = special.euler(1)
+        eu2 = special.euler(2)   # just checking segfaults
+        assert_allclose(eu0, [1], rtol=1e-15)
+        assert_allclose(eu1, [1, 0], rtol=1e-15)
+        assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
+        eu24 = special.euler(24)
+        mathworld = [1,1,5,61,1385,50521,2702765,199360981,
+                     19391512145,2404879675441,
+                     370371188237525,69348874393137901,
+                     15514534163557086905]
+        correct = zeros((25,),'d')
+        for k in range(0,13):
+            if (k % 2):
+                correct[2*k] = -float(mathworld[k])
+            else:
+                correct[2*k] = float(mathworld[k])
+        with np.errstate(all='ignore'):
+            err = nan_to_num((eu24-correct)/correct)
+            errmax = max(err)
+        assert_almost_equal(errmax, 0.0, 14)
+
+
+class TestExp:
+    def test_exp2(self):
+        ex = special.exp2(2)
+        exrl = 2**2
+        assert_equal(ex,exrl)
+
+    def test_exp2more(self):
+        exm = special.exp2(2.5)
+        exmrl = 2**(2.5)
+        assert_almost_equal(exm,exmrl,8)
+
+    def test_exp10(self):
+        ex = special.exp10(2)
+        exrl = 10**2
+        assert_approx_equal(ex,exrl)
+
+    def test_exp10more(self):
+        exm = special.exp10(2.5)
+        exmrl = 10**(2.5)
+        assert_almost_equal(exm,exmrl,8)
+
+    def test_expm1(self):
+        ex = (special.expm1(2),special.expm1(3),special.expm1(4))
+        exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
+        assert_array_almost_equal(ex,exrl,8)
+
+    def test_expm1more(self):
+        ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
+        exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
+        assert_array_almost_equal(ex1,exrl1,8)
+
+
+class TestFactorialFunctions:
+    def test_factorial(self):
+        # Some known values, float math
+        assert_array_almost_equal(special.factorial(0), 1)
+        assert_array_almost_equal(special.factorial(1), 1)
+        assert_array_almost_equal(special.factorial(2), 2)
+        assert_array_almost_equal([6., 24., 120.],
+                                  special.factorial([3, 4, 5], exact=False))
+        assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
+                                  [[120, 6], [24, 6]])
+
+        # Some known values, integer math
+        assert_equal(special.factorial(0, exact=True), 1)
+        assert_equal(special.factorial(1, exact=True), 1)
+        assert_equal(special.factorial(2, exact=True), 2)
+        assert_equal(special.factorial(5, exact=True), 120)
+        assert_equal(special.factorial(15, exact=True), 1307674368000)
+
+        # ndarray shape is maintained
+        assert_equal(special.factorial([7, 4, 15, 10], exact=True),
+                     [5040, 24, 1307674368000, 3628800])
+
+        assert_equal(special.factorial([[5, 3], [4, 3]], True),
+                     [[120, 6], [24, 6]])
+
+        # object arrays
+        assert_equal(special.factorial(np.arange(-3, 22), True),
+                     special.factorial(np.arange(-3, 22), False))
+
+        # int64 array
+        assert_equal(special.factorial(np.arange(-3, 15), True),
+                     special.factorial(np.arange(-3, 15), False))
+
+        # int32 array
+        assert_equal(special.factorial(np.arange(-3, 5), True),
+                     special.factorial(np.arange(-3, 5), False))
+
+        # Consistent output for n < 0
+        for exact in (True, False):
+            assert_array_equal(0, special.factorial(-3, exact))
+            assert_array_equal([1, 2, 0, 0],
+                               special.factorial([1, 2, -5, -4], exact))
+
+        for n in range(0, 22):
+            # Compare all with math.factorial
+            correct = math.factorial(n)
+            assert_array_equal(correct, special.factorial(n, True))
+            assert_array_equal(correct, special.factorial([n], True)[0])
+
+            assert_allclose(float(correct), special.factorial(n, False))
+            assert_allclose(float(correct), special.factorial([n], False)[0])
+
+            # Compare exact=True vs False, scalar vs array
+            assert_array_equal(special.factorial(n, True),
+                               special.factorial(n, False))
+
+            assert_array_equal(special.factorial([n], True),
+                               special.factorial([n], False))
+
+    @pytest.mark.parametrize('x, exact', [
+        (1, True),
+        (1, False),
+        (np.array(1), True),
+        (np.array(1), False),
+    ])
+    def test_factorial_0d_return_type(self, x, exact):
+        assert np.isscalar(special.factorial(x, exact=exact))
+
+    def test_factorial2(self):
+        assert_array_almost_equal([105., 384., 945.],
+                                  special.factorial2([7, 8, 9], exact=False))
+        assert_equal(special.factorial2(7, exact=True), 105)
+
+    def test_factorialk(self):
+        assert_equal(special.factorialk(5, 1, exact=True), 120)
+        assert_equal(special.factorialk(5, 3, exact=True), 10)
+
+    @pytest.mark.parametrize('x, exact', [
+        (np.nan, True),
+        (np.nan, False),
+        (np.array([np.nan]), True),
+        (np.array([np.nan]), False),
+    ])
+    def test_nan_inputs(self, x, exact):
+        result = special.factorial(x, exact=exact)
+        assert_(np.isnan(result))
+
+    # GH-13122: special.factorial() argument should be an array of integers.
+    # On Python 3.10, math.factorial() reject float.
+    # On Python 3.9, a DeprecationWarning is emitted.
+    # A numpy array casts all integers to float if the array contains a
+    # single NaN.
+    @pytest.mark.skipif(sys.version_info >= (3, 10),
+                        reason="Python 3.10+ math.factorial() requires int")
+    def test_mixed_nan_inputs(self):
+        x = np.array([np.nan, 1, 2, 3, np.nan])
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Using factorial\\(\\) with floats is deprecated")
+            result = special.factorial(x, exact=True)
+            assert_equal(np.array([np.nan, 1, 2, 6, np.nan]), result)
+            result = special.factorial(x, exact=False)
+            assert_equal(np.array([np.nan, 1, 2, 6, np.nan]), result)
+
+
+class TestFresnel:
+    @pytest.mark.parametrize("z, s, c", [
+        # some positive value
+        (.5, 0.064732432859999287, 0.49234422587144644),
+        (.5 + .0j, 0.064732432859999287, 0.49234422587144644),
+        # negative half annulus
+        # https://github.com/scipy/scipy/issues/12309
+        # Reference values can be reproduced with
+        # https://www.wolframalpha.com/input/?i=FresnelS%5B-2.0+%2B+0.1i%5D
+        # https://www.wolframalpha.com/input/?i=FresnelC%5B-2.0+%2B+0.1i%5D
+        (
+            -2.0 + 0.1j,
+            -0.3109538687728942-0.0005870728836383176j,
+            -0.4879956866358554+0.10670801832903172j
+        ),
+        (
+            -0.1 - 1.5j,
+            -0.03918309471866977+0.7197508454568574j,
+            0.09605692502968956-0.43625191013617465j
+        ),
+        # a different algorithm kicks in for "large" values, i.e., |z| >= 4.5,
+        # make sure to test both float and complex values; a different
+        # algorithm is used
+        (6.0, 0.44696076, 0.49953147),
+        (6.0 + 0.0j, 0.44696076, 0.49953147),
+        (6.0j, -0.44696076j, 0.49953147j),
+        (-6.0 + 0.0j, -0.44696076, -0.49953147),
+        (-6.0j, 0.44696076j, -0.49953147j),
+        # inf
+        (np.inf, 0.5, 0.5),
+        (-np.inf, -0.5, -0.5),
+    ])
+    def test_fresnel_values(self, z, s, c):
+        frs = array(special.fresnel(z))
+        assert_array_almost_equal(frs, array([s, c]), 8)
+
+    # values from pg 329  Table 7.11 of A & S
+    #  slightly corrected in 4th decimal place
+    def test_fresnel_zeros(self):
+        szo, czo = special.fresnel_zeros(5)
+        assert_array_almost_equal(szo,
+                                  array([2.0093+0.2885j,
+                                          2.8335+0.2443j,
+                                          3.4675+0.2185j,
+                                          4.0026+0.2009j,
+                                          4.4742+0.1877j]),3)
+        assert_array_almost_equal(czo,
+                                  array([1.7437+0.3057j,
+                                          2.6515+0.2529j,
+                                          3.3204+0.2240j,
+                                          3.8757+0.2047j,
+                                          4.3611+0.1907j]),3)
+        vals1 = special.fresnel(szo)[0]
+        vals2 = special.fresnel(czo)[1]
+        assert_array_almost_equal(vals1,0,14)
+        assert_array_almost_equal(vals2,0,14)
+
+    def test_fresnelc_zeros(self):
+        szo, czo = special.fresnel_zeros(6)
+        frc = special.fresnelc_zeros(6)
+        assert_array_almost_equal(frc,czo,12)
+
+    def test_fresnels_zeros(self):
+        szo, czo = special.fresnel_zeros(5)
+        frs = special.fresnels_zeros(5)
+        assert_array_almost_equal(frs,szo,12)
+
+
+class TestGamma:
+    def test_gamma(self):
+        gam = special.gamma(5)
+        assert_equal(gam,24.0)
+
+    def test_gammaln(self):
+        gamln = special.gammaln(3)
+        lngam = log(special.gamma(3))
+        assert_almost_equal(gamln,lngam,8)
+
+    def test_gammainccinv(self):
+        gccinv = special.gammainccinv(.5,.5)
+        gcinv = special.gammaincinv(.5,.5)
+        assert_almost_equal(gccinv,gcinv,8)
+
+    @with_special_errors
+    def test_gammaincinv(self):
+        y = special.gammaincinv(.4,.4)
+        x = special.gammainc(.4,y)
+        assert_almost_equal(x,0.4,1)
+        y = special.gammainc(10, 0.05)
+        x = special.gammaincinv(10, 2.5715803516000736e-20)
+        assert_almost_equal(0.05, x, decimal=10)
+        assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
+        x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
+        assert_almost_equal(11.0, x, decimal=10)
+
+    @with_special_errors
+    def test_975(self):
+        # Regression test for ticket #975 -- switch point in algorithm
+        # check that things work OK at the point, immediately next floats
+        # around it, and a bit further away
+        pts = [0.25,
+               np.nextafter(0.25, 0), 0.25 - 1e-12,
+               np.nextafter(0.25, 1), 0.25 + 1e-12]
+        for xp in pts:
+            y = special.gammaincinv(.4, xp)
+            x = special.gammainc(0.4, y)
+            assert_allclose(x, xp, rtol=1e-12)
+
+    def test_rgamma(self):
+        rgam = special.rgamma(8)
+        rlgam = 1/special.gamma(8)
+        assert_almost_equal(rgam,rlgam,8)
+
+    def test_infinity(self):
+        assert_(np.isinf(special.gamma(-1)))
+        assert_equal(special.rgamma(-1), 0)
+
+
+class TestHankel:
+
+    def test_negv1(self):
+        assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
+
+    def test_hankel1(self):
+        hank1 = special.hankel1(1,.1)
+        hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
+        assert_almost_equal(hank1,hankrl,8)
+
+    def test_negv1e(self):
+        assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
+
+    def test_hankel1e(self):
+        hank1e = special.hankel1e(1,.1)
+        hankrle = special.hankel1(1,.1)*exp(-.1j)
+        assert_almost_equal(hank1e,hankrle,8)
+
+    def test_negv2(self):
+        assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
+
+    def test_hankel2(self):
+        hank2 = special.hankel2(1,.1)
+        hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
+        assert_almost_equal(hank2,hankrl2,8)
+
+    def test_neg2e(self):
+        assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
+
+    def test_hankl2e(self):
+        hank2e = special.hankel2e(1,.1)
+        hankrl2e = special.hankel2e(1,.1)
+        assert_almost_equal(hank2e,hankrl2e,8)
+
+
+class TestHyper:
+    def test_h1vp(self):
+        h1 = special.h1vp(1,.1)
+        h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
+        assert_almost_equal(h1,h1real,8)
+
+    def test_h2vp(self):
+        h2 = special.h2vp(1,.1)
+        h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
+        assert_almost_equal(h2,h2real,8)
+
+    def test_hyp0f1(self):
+        # scalar input
+        assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
+        assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
+
+        # float input, expected values match mpmath
+        x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
+        expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
+                             1.37789689539747, 1.60373685288480])
+        assert_allclose(x, expected, rtol=1e-12)
+
+        # complex input
+        x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
+        assert_allclose(x, expected.astype(complex), rtol=1e-12)
+
+        # test broadcasting
+        x1 = [0.5, 1.5, 2.5]
+        x2 = [0, 1, 0.5]
+        x = special.hyp0f1(x1, x2)
+        expected = [1.0, 1.8134302039235093, 1.21482702689997]
+        assert_allclose(x, expected, rtol=1e-12)
+        x = special.hyp0f1(np.row_stack([x1] * 2), x2)
+        assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
+        assert_raises(ValueError, special.hyp0f1,
+                      np.row_stack([x1] * 3), [0, 1])
+
+    def test_hyp0f1_gh5764(self):
+        # Just checks the point that failed; there's a more systematic
+        # test in test_mpmath
+        res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
+        # The expected value was generated using mpmath
+        assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
+
+    def test_hyp1f1(self):
+        hyp1 = special.hyp1f1(.1,.1,.3)
+        assert_almost_equal(hyp1, 1.3498588075760032,7)
+
+        # test contributed by Moritz Deger (2008-05-29)
+        # https://github.com/scipy/scipy/issues/1186 (Trac #659)
+
+        # reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
+        # produced with test_hyp1f1.nb
+        ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
+                          [2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
+                          [-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
+                          [5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
+                          [-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
+                          [4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
+                          [1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
+                          [2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
+                          [1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
+                          [1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
+                          [-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
+                          [8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
+                          [1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
+                          [-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
+                          [2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
+                          [2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
+                          [6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
+                          [-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
+                          [2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
+                          [8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
+                          [1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
+                          [-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
+                          [2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
+                          [-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
+                          [3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
+                          [-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
+                          [2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
+                          [-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
+                          [1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
+                          [-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
+                          [-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
+                          [-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
+                          [-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
+                          [3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
+                          [6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
+                          [-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
+                          [2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
+                          [1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
+                          [1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
+                          [1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
+                          [1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
+                          [-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
+                          [-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
+                          [7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
+                          [2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
+                          [-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
+                          [-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
+                          [-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
+                          [-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
+                          [-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
+                          [2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
+                          [5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
+                          [-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
+                          [-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
+                          [5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
+                          [-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
+                          [1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
+                          [2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
+                          [5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
+                          [-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
+                          [1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
+                          [6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
+                          [1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
+                          [-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
+                          [-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
+                          [-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
+                          [-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
+                          [1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
+                          [2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
+                          [-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
+                          [2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
+                          [-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
+                          [2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
+                          [1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
+                          [-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
+                          [7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
+                          [2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
+                          [8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
+                          [-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
+                          [-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
+                          [-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
+                          [-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
+                          [-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
+                          [-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
+                          [6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
+                          [-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
+                          [-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
+                          [6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
+                          [-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
+                          [7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
+                          [-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
+                          [5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
+                          [3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
+                          [-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
+                          [2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
+                          [2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
+                          [-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
+                          [-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
+                          [-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
+                          [-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
+
+        for a,b,c,expected in ref_data:
+            result = special.hyp1f1(a,b,c)
+            assert_(abs(expected - result)/expected < 1e-4)
+
+    def test_hyp1f1_gh2957(self):
+        hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
+        hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
+        assert_almost_equal(hyp1, hyp2, 12)
+
+    def test_hyp1f1_gh2282(self):
+        hyp = special.hyp1f1(0.5, 1.5, -1000)
+        assert_almost_equal(hyp, 0.028024956081989643, 12)
+
+    def test_hyp2f1(self):
+        # a collection of special cases taken from AMS 55
+        values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
+                  [0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
+                  [1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
+                  [3, 3.5, 1.5, 0.2**2,
+                      0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
+                  [-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
+                  [3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
+                  [3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
+                      special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
+                  [5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
+                      special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
+                  [4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
+                      special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
+                  # and some others
+                  # ticket #424
+                  [1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
+                  # negative integer a or b, with c-a-b integer and x > 0.9
+                  [-2,3,1,0.95,0.715],
+                  [2,-3,1,0.95,-0.007],
+                  [-6,3,1,0.95,0.0000810625],
+                  [2,-5,1,0.95,-0.000029375],
+                  # huge negative integers
+                  (10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
+                  (10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
+                  ]
+        for i, (a, b, c, x, v) in enumerate(values):
+            cv = special.hyp2f1(a, b, c, x)
+            assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
+
+    def test_hyperu(self):
+        val1 = special.hyperu(1,0.1,100)
+        assert_almost_equal(val1,0.0098153,7)
+        a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
+        a,b = asarray(a), asarray(b)
+        z = 0.5
+        hypu = special.hyperu(a,b,z)
+        hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
+                               (special.gamma(1+a-b)*special.gamma(b)) -
+                               z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
+                               / (special.gamma(a)*special.gamma(2-b)))
+        assert_array_almost_equal(hypu,hprl,12)
+
+    def test_hyperu_gh2287(self):
+        assert_almost_equal(special.hyperu(1, 1.5, 20.2),
+                            0.048360918656699191, 12)
+
+
+class TestBessel:
+    def test_itj0y0(self):
+        it0 = array(special.itj0y0(.2))
+        assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
+
+    def test_it2j0y0(self):
+        it2 = array(special.it2j0y0(.2))
+        assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
+
+    def test_negv_iv(self):
+        assert_equal(special.iv(3,2), special.iv(-3,2))
+
+    def test_j0(self):
+        oz = special.j0(.1)
+        ozr = special.jn(0,.1)
+        assert_almost_equal(oz,ozr,8)
+
+    def test_j1(self):
+        o1 = special.j1(.1)
+        o1r = special.jn(1,.1)
+        assert_almost_equal(o1,o1r,8)
+
+    def test_jn(self):
+        jnnr = special.jn(1,.2)
+        assert_almost_equal(jnnr,0.099500832639235995,8)
+
+    def test_negv_jv(self):
+        assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
+
+    def test_jv(self):
+        values = [[0, 0.1, 0.99750156206604002],
+                  [2./3, 1e-8, 0.3239028506761532e-5],
+                  [2./3, 1e-10, 0.1503423854873779e-6],
+                  [3.1, 1e-10, 0.1711956265409013e-32],
+                  [2./3, 4.0, -0.2325440850267039],
+                  ]
+        for i, (v, x, y) in enumerate(values):
+            yc = special.jv(v, x)
+            assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
+
+    def test_negv_jve(self):
+        assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
+
+    def test_jve(self):
+        jvexp = special.jve(1,.2)
+        assert_almost_equal(jvexp,0.099500832639235995,8)
+        jvexp1 = special.jve(1,.2+1j)
+        z = .2+1j
+        jvexpr = special.jv(1,z)*exp(-abs(z.imag))
+        assert_almost_equal(jvexp1,jvexpr,8)
+
+    def test_jn_zeros(self):
+        jn0 = special.jn_zeros(0,5)
+        jn1 = special.jn_zeros(1,5)
+        assert_array_almost_equal(jn0,array([2.4048255577,
+                                              5.5200781103,
+                                              8.6537279129,
+                                              11.7915344391,
+                                              14.9309177086]),4)
+        assert_array_almost_equal(jn1,array([3.83171,
+                                              7.01559,
+                                              10.17347,
+                                              13.32369,
+                                              16.47063]),4)
+
+        jn102 = special.jn_zeros(102,5)
+        assert_allclose(jn102, array([110.89174935992040343,
+                                       117.83464175788308398,
+                                       123.70194191713507279,
+                                       129.02417238949092824,
+                                       134.00114761868422559]), rtol=1e-13)
+
+        jn301 = special.jn_zeros(301,5)
+        assert_allclose(jn301, array([313.59097866698830153,
+                                       323.21549776096288280,
+                                       331.22338738656748796,
+                                       338.39676338872084500,
+                                       345.03284233056064157]), rtol=1e-13)
+
+    def test_jn_zeros_slow(self):
+        jn0 = special.jn_zeros(0, 300)
+        assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13)
+        assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13)
+        assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13)
+
+        jn10 = special.jn_zeros(10, 300)
+        assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13)
+        assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13)
+        assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13)
+
+        jn3010 = special.jn_zeros(3010,5)
+        assert_allclose(jn3010, array([3036.86590780927,
+                                        3057.06598526482,
+                                        3073.66360690272,
+                                        3088.37736494778,
+                                        3101.86438139042]), rtol=1e-8)
+
+    def test_jnjnp_zeros(self):
+        jn = special.jn
+
+        def jnp(n, x):
+            return (jn(n-1,x) - jn(n+1,x))/2
+        for nt in range(1, 30):
+            z, n, m, t = special.jnjnp_zeros(nt)
+            for zz, nn, tt in zip(z, n, t):
+                if tt == 0:
+                    assert_allclose(jn(nn, zz), 0, atol=1e-6)
+                elif tt == 1:
+                    assert_allclose(jnp(nn, zz), 0, atol=1e-6)
+                else:
+                    raise AssertionError("Invalid t return for nt=%d" % nt)
+
+    def test_jnp_zeros(self):
+        jnp = special.jnp_zeros(1,5)
+        assert_array_almost_equal(jnp, array([1.84118,
+                                                5.33144,
+                                                8.53632,
+                                                11.70600,
+                                                14.86359]),4)
+        jnp = special.jnp_zeros(443,5)
+        assert_allclose(special.jvp(443, jnp), 0, atol=1e-15)
+
+    def test_jnyn_zeros(self):
+        jnz = special.jnyn_zeros(1,5)
+        assert_array_almost_equal(jnz,(array([3.83171,
+                                                7.01559,
+                                                10.17347,
+                                                13.32369,
+                                                16.47063]),
+                                       array([1.84118,
+                                                5.33144,
+                                                8.53632,
+                                                11.70600,
+                                                14.86359]),
+                                       array([2.19714,
+                                                5.42968,
+                                                8.59601,
+                                                11.74915,
+                                                14.89744]),
+                                       array([3.68302,
+                                                6.94150,
+                                                10.12340,
+                                                13.28576,
+                                                16.44006])),5)
+
+    def test_jvp(self):
+        jvprim = special.jvp(2,2)
+        jv0 = (special.jv(1,2)-special.jv(3,2))/2
+        assert_almost_equal(jvprim,jv0,10)
+
+    def test_k0(self):
+        ozk = special.k0(.1)
+        ozkr = special.kv(0,.1)
+        assert_almost_equal(ozk,ozkr,8)
+
+    def test_k0e(self):
+        ozke = special.k0e(.1)
+        ozker = special.kve(0,.1)
+        assert_almost_equal(ozke,ozker,8)
+
+    def test_k1(self):
+        o1k = special.k1(.1)
+        o1kr = special.kv(1,.1)
+        assert_almost_equal(o1k,o1kr,8)
+
+    def test_k1e(self):
+        o1ke = special.k1e(.1)
+        o1ker = special.kve(1,.1)
+        assert_almost_equal(o1ke,o1ker,8)
+
+    def test_jacobi(self):
+        a = 5*np.random.random() - 1
+        b = 5*np.random.random() - 1
+        P0 = special.jacobi(0,a,b)
+        P1 = special.jacobi(1,a,b)
+        P2 = special.jacobi(2,a,b)
+        P3 = special.jacobi(3,a,b)
+
+        assert_array_almost_equal(P0.c,[1],13)
+        assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
+        cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
+        p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
+        assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
+        cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
+              12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
+        p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
+        assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
+
+    def test_kn(self):
+        kn1 = special.kn(0,.2)
+        assert_almost_equal(kn1,1.7527038555281462,8)
+
+    def test_negv_kv(self):
+        assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
+
+    def test_kv0(self):
+        kv0 = special.kv(0,.2)
+        assert_almost_equal(kv0, 1.7527038555281462, 10)
+
+    def test_kv1(self):
+        kv1 = special.kv(1,0.2)
+        assert_almost_equal(kv1, 4.775972543220472, 10)
+
+    def test_kv2(self):
+        kv2 = special.kv(2,0.2)
+        assert_almost_equal(kv2, 49.51242928773287, 10)
+
+    def test_kn_largeorder(self):
+        assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
+
+    def test_kv_largearg(self):
+        assert_equal(special.kv(0, 1e19), 0)
+
+    def test_negv_kve(self):
+        assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
+
+    def test_kve(self):
+        kve1 = special.kve(0,.2)
+        kv1 = special.kv(0,.2)*exp(.2)
+        assert_almost_equal(kve1,kv1,8)
+        z = .2+1j
+        kve2 = special.kve(0,z)
+        kv2 = special.kv(0,z)*exp(z)
+        assert_almost_equal(kve2,kv2,8)
+
+    def test_kvp_v0n1(self):
+        z = 2.2
+        assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
+
+    def test_kvp_n1(self):
+        v = 3.
+        z = 2.2
+        xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
+        x = special.kvp(v,z, n=1)
+        assert_almost_equal(xc, x, 10)   # this function (kvp) is broken
+
+    def test_kvp_n2(self):
+        v = 3.
+        z = 2.2
+        xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
+        x = special.kvp(v, z, n=2)
+        assert_almost_equal(xc, x, 10)
+
+    def test_y0(self):
+        oz = special.y0(.1)
+        ozr = special.yn(0,.1)
+        assert_almost_equal(oz,ozr,8)
+
+    def test_y1(self):
+        o1 = special.y1(.1)
+        o1r = special.yn(1,.1)
+        assert_almost_equal(o1,o1r,8)
+
+    def test_y0_zeros(self):
+        yo,ypo = special.y0_zeros(2)
+        zo,zpo = special.y0_zeros(2,complex=1)
+        all = r_[yo,zo]
+        allval = r_[ypo,zpo]
+        assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
+        assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
+
+    def test_y1_zeros(self):
+        y1 = special.y1_zeros(1)
+        assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
+
+    def test_y1p_zeros(self):
+        y1p = special.y1p_zeros(1,complex=1)
+        assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
+
+    def test_yn_zeros(self):
+        an = special.yn_zeros(4,2)
+        assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
+        an = special.yn_zeros(443,5)
+        assert_allclose(an, [450.13573091578090314, 463.05692376675001542,
+                              472.80651546418663566, 481.27353184725625838,
+                              488.98055964441374646], rtol=1e-15)
+
+    def test_ynp_zeros(self):
+        ao = special.ynp_zeros(0,2)
+        assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
+        ao = special.ynp_zeros(43,5)
+        assert_allclose(special.yvp(43, ao), 0, atol=1e-15)
+        ao = special.ynp_zeros(443,5)
+        assert_allclose(special.yvp(443, ao), 0, atol=1e-9)
+
+    def test_ynp_zeros_large_order(self):
+        ao = special.ynp_zeros(443,5)
+        assert_allclose(special.yvp(443, ao), 0, atol=1e-14)
+
+    def test_yn(self):
+        yn2n = special.yn(1,.2)
+        assert_almost_equal(yn2n,-3.3238249881118471,8)
+
+    def test_negv_yv(self):
+        assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
+
+    def test_yv(self):
+        yv2 = special.yv(1,.2)
+        assert_almost_equal(yv2,-3.3238249881118471,8)
+
+    def test_negv_yve(self):
+        assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
+
+    def test_yve(self):
+        yve2 = special.yve(1,.2)
+        assert_almost_equal(yve2,-3.3238249881118471,8)
+        yve2r = special.yv(1,.2+1j)*exp(-1)
+        yve22 = special.yve(1,.2+1j)
+        assert_almost_equal(yve22,yve2r,8)
+
+    def test_yvp(self):
+        yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
+        yvp1 = special.yvp(2,.2)
+        assert_array_almost_equal(yvp1,yvpr,10)
+
+    def _cephes_vs_amos_points(self):
+        """Yield points at which to compare Cephes implementation to AMOS"""
+        # check several points, including large-amplitude ones
+        v = [-120, -100.3, -20., -10., -1., -.5, 0., 1., 12.49, 120., 301]
+        z = [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, 700.6, 1300,
+             10003]
+        yield from itertools.product(v, z)
+
+        # check half-integers; these are problematic points at least
+        # for cephes/iv
+        yield from itertools.product(0.5 + arange(-60, 60), [3.5])
+
+    def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
+        for v, z in self._cephes_vs_amos_points():
+            if skip is not None and skip(v, z):
+                continue
+            c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
+            if np.isinf(c1):
+                assert_(np.abs(c2) >= 1e300, (v, z))
+            elif np.isnan(c1):
+                assert_(c2.imag != 0, (v, z))
+            else:
+                assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
+                if v == int(v):
+                    assert_allclose(c3, c2, err_msg=(v, z),
+                                     rtol=rtol, atol=atol)
+
+    @pytest.mark.xfail(platform.machine() == 'ppc64le',
+                       reason="fails on ppc64le")
+    def test_jv_cephes_vs_amos(self):
+        self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
+
+    @pytest.mark.xfail(platform.machine() == 'ppc64le',
+                       reason="fails on ppc64le")
+    def test_yv_cephes_vs_amos(self):
+        self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
+
+    def test_yv_cephes_vs_amos_only_small_orders(self):
+        skipper = lambda v, z: (abs(v) > 50)
+        self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
+
+    def test_iv_cephes_vs_amos(self):
+        with np.errstate(all='ignore'):
+            self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
+
+    @pytest.mark.slow
+    def test_iv_cephes_vs_amos_mass_test(self):
+        N = 1000000
+        np.random.seed(1)
+        v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
+        x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
+
+        imsk = (np.random.randint(8, size=N) == 0)
+        v[imsk] = v[imsk].astype(int)
+
+        with np.errstate(all='ignore'):
+            c1 = special.iv(v, x)
+            c2 = special.iv(v, x+0j)
+
+            # deal with differences in the inf and zero cutoffs
+            c1[abs(c1) > 1e300] = np.inf
+            c2[abs(c2) > 1e300] = np.inf
+            c1[abs(c1) < 1e-300] = 0
+            c2[abs(c2) < 1e-300] = 0
+
+            dc = abs(c1/c2 - 1)
+            dc[np.isnan(dc)] = 0
+
+        k = np.argmax(dc)
+
+        # Most error apparently comes from AMOS and not our implementation;
+        # there are some problems near integer orders there
+        assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
+
+    def test_kv_cephes_vs_amos(self):
+        self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
+        self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
+
+    def test_ticket_623(self):
+        assert_allclose(special.jv(3, 4), 0.43017147387562193)
+        assert_allclose(special.jv(301, 1300), 0.0183487151115275)
+        assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048)
+
+    def test_ticket_853(self):
+        """Negative-order Bessels"""
+        # cephes
+        assert_allclose(special.jv(-1, 1), -0.4400505857449335)
+        assert_allclose(special.jv(-2, 1), 0.1149034849319005)
+        assert_allclose(special.yv(-1, 1), 0.7812128213002887)
+        assert_allclose(special.yv(-2, 1), -1.650682606816255)
+        assert_allclose(special.iv(-1, 1), 0.5651591039924851)
+        assert_allclose(special.iv(-2, 1), 0.1357476697670383)
+        assert_allclose(special.kv(-1, 1), 0.6019072301972347)
+        assert_allclose(special.kv(-2, 1), 1.624838898635178)
+        assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952)
+        assert_allclose(special.yv(-0.5, 1), 0.6713967071418031)
+        assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
+        assert_allclose(special.kv(-0.5, 1), 0.4610685044478945)
+        # amos
+        assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335)
+        assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005)
+        assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887)
+        assert_allclose(special.yv(-2, 1+0j), -1.650682606816255)
+
+        assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851)
+        assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383)
+        assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347)
+        assert_allclose(special.kv(-2, 1+0j), 1.624838898635178)
+
+        assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952)
+        assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
+        assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031)
+        assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
+
+        assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967)
+        assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
+        assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945)
+        assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
+
+        assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
+        assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
+        assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
+        assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
+
+        assert_allclose(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
+        assert_allclose(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
+
+    def test_ticket_854(self):
+        """Real-valued Bessel domains"""
+        assert_(isnan(special.jv(0.5, -1)))
+        assert_(isnan(special.iv(0.5, -1)))
+        assert_(isnan(special.yv(0.5, -1)))
+        assert_(isnan(special.yv(1, -1)))
+        assert_(isnan(special.kv(0.5, -1)))
+        assert_(isnan(special.kv(1, -1)))
+        assert_(isnan(special.jve(0.5, -1)))
+        assert_(isnan(special.ive(0.5, -1)))
+        assert_(isnan(special.yve(0.5, -1)))
+        assert_(isnan(special.yve(1, -1)))
+        assert_(isnan(special.kve(0.5, -1)))
+        assert_(isnan(special.kve(1, -1)))
+        assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
+        assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
+
+    def test_gh_7909(self):
+        assert_(special.kv(1.5, 0) == np.inf)
+        assert_(special.kve(1.5, 0) == np.inf)
+
+    def test_ticket_503(self):
+        """Real-valued Bessel I overflow"""
+        assert_allclose(special.iv(1, 700), 1.528500390233901e302)
+        assert_allclose(special.iv(1000, 1120), 1.301564549405821e301)
+
+    def test_iv_hyperg_poles(self):
+        assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
+
+    def iv_series(self, v, z, n=200):
+        k = arange(0, n).astype(float_)
+        r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
+        r[isnan(r)] = inf
+        r = exp(r)
+        err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
+        return r.sum(), err
+
+    def test_i0_series(self):
+        for z in [1., 10., 200.5]:
+            value, err = self.iv_series(0, z)
+            assert_allclose(special.i0(z), value, atol=err, err_msg=z)
+
+    def test_i1_series(self):
+        for z in [1., 10., 200.5]:
+            value, err = self.iv_series(1, z)
+            assert_allclose(special.i1(z), value, atol=err, err_msg=z)
+
+    def test_iv_series(self):
+        for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
+            for z in [1., 10., 200.5, -1+2j]:
+                value, err = self.iv_series(v, z)
+                assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z))
+
+    def test_i0(self):
+        values = [[0.0, 1.0],
+                  [1e-10, 1.0],
+                  [0.1, 0.9071009258],
+                  [0.5, 0.6450352706],
+                  [1.0, 0.4657596077],
+                  [2.5, 0.2700464416],
+                  [5.0, 0.1835408126],
+                  [20.0, 0.0897803119],
+                  ]
+        for i, (x, v) in enumerate(values):
+            cv = special.i0(x) * exp(-x)
+            assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
+
+    def test_i0e(self):
+        oize = special.i0e(.1)
+        oizer = special.ive(0,.1)
+        assert_almost_equal(oize,oizer,8)
+
+    def test_i1(self):
+        values = [[0.0, 0.0],
+                  [1e-10, 0.4999999999500000e-10],
+                  [0.1, 0.0452984468],
+                  [0.5, 0.1564208032],
+                  [1.0, 0.2079104154],
+                  [5.0, 0.1639722669],
+                  [20.0, 0.0875062222],
+                  ]
+        for i, (x, v) in enumerate(values):
+            cv = special.i1(x) * exp(-x)
+            assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
+
+    def test_i1e(self):
+        oi1e = special.i1e(.1)
+        oi1er = special.ive(1,.1)
+        assert_almost_equal(oi1e,oi1er,8)
+
+    def test_iti0k0(self):
+        iti0 = array(special.iti0k0(5))
+        assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
+
+    def test_it2i0k0(self):
+        it2k = special.it2i0k0(.1)
+        assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
+
+    def test_iv(self):
+        iv1 = special.iv(0,.1)*exp(-.1)
+        assert_almost_equal(iv1,0.90710092578230106,10)
+
+    def test_negv_ive(self):
+        assert_equal(special.ive(3,2), special.ive(-3,2))
+
+    def test_ive(self):
+        ive1 = special.ive(0,.1)
+        iv1 = special.iv(0,.1)*exp(-.1)
+        assert_almost_equal(ive1,iv1,10)
+
+    def test_ivp0(self):
+        assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
+
+    def test_ivp(self):
+        y = (special.iv(0,2) + special.iv(2,2))/2
+        x = special.ivp(1,2)
+        assert_almost_equal(x,y,10)
+
+
+class TestLaguerre:
+    def test_laguerre(self):
+        lag0 = special.laguerre(0)
+        lag1 = special.laguerre(1)
+        lag2 = special.laguerre(2)
+        lag3 = special.laguerre(3)
+        lag4 = special.laguerre(4)
+        lag5 = special.laguerre(5)
+        assert_array_almost_equal(lag0.c,[1],13)
+        assert_array_almost_equal(lag1.c,[-1,1],13)
+        assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
+        assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
+        assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
+        assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
+
+    def test_genlaguerre(self):
+        k = 5*np.random.random() - 0.9
+        lag0 = special.genlaguerre(0,k)
+        lag1 = special.genlaguerre(1,k)
+        lag2 = special.genlaguerre(2,k)
+        lag3 = special.genlaguerre(3,k)
+        assert_equal(lag0.c,[1])
+        assert_equal(lag1.c,[-1,k+1])
+        assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
+        assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
+
+
+# Base polynomials come from Abrahmowitz and Stegan
+class TestLegendre:
+    def test_legendre(self):
+        leg0 = special.legendre(0)
+        leg1 = special.legendre(1)
+        leg2 = special.legendre(2)
+        leg3 = special.legendre(3)
+        leg4 = special.legendre(4)
+        leg5 = special.legendre(5)
+        assert_equal(leg0.c, [1])
+        assert_equal(leg1.c, [1,0])
+        assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
+        assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
+        assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
+        assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
+
+
+class TestLambda:
+    def test_lmbda(self):
+        lam = special.lmbda(1,.1)
+        lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
+                array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
+        assert_array_almost_equal(lam,lamr,8)
+
+
+class TestLog1p:
+    def test_log1p(self):
+        l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
+        l1prl = (log(11), log(12), log(13))
+        assert_array_almost_equal(l1p,l1prl,8)
+
+    def test_log1pmore(self):
+        l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
+        l1pmrl = (log(2),log(2.1),log(2.2))
+        assert_array_almost_equal(l1pm,l1pmrl,8)
+
+
+class TestLegendreFunctions:
+    def test_clpmn(self):
+        z = 0.5+0.3j
+        clp = special.clpmn(2, 2, z, 3)
+        assert_array_almost_equal(clp,
+                   (array([[1.0000, z, 0.5*(3*z*z-1)],
+                           [0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
+                           [0.0000, 0.0000, 3*(z*z-1)]]),
+                    array([[0.0000, 1.0000, 3*z],
+                           [0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
+                           [0.0000, 0.0000, 6*z]])),
+                    7)
+
+    def test_clpmn_close_to_real_2(self):
+        eps = 1e-10
+        m = 1
+        n = 3
+        x = 0.5
+        clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
+        clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
+        assert_array_almost_equal(array([clp_plus, clp_minus]),
+                                  array([special.lpmv(m, n, x),
+                                         special.lpmv(m, n, x)]),
+                                  7)
+
+    def test_clpmn_close_to_real_3(self):
+        eps = 1e-10
+        m = 1
+        n = 3
+        x = 0.5
+        clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
+        clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
+        assert_array_almost_equal(array([clp_plus, clp_minus]),
+                                  array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
+                                         special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
+                                  7)
+
+    def test_clpmn_across_unit_circle(self):
+        eps = 1e-7
+        m = 1
+        n = 1
+        x = 1j
+        for type in [2, 3]:
+            assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
+                            special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
+
+    def test_inf(self):
+        for z in (1, -1):
+            for n in range(4):
+                for m in range(1, n):
+                    lp = special.clpmn(m, n, z)
+                    assert_(np.isinf(lp[1][1,1:]).all())
+                    lp = special.lpmn(m, n, z)
+                    assert_(np.isinf(lp[1][1,1:]).all())
+
+    def test_deriv_clpmn(self):
+        # data inside and outside of the unit circle
+        zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
+                 1+1j, -1+1j, -1-1j, 1-1j]
+        m = 2
+        n = 3
+        for type in [2, 3]:
+            for z in zvals:
+                for h in [1e-3, 1e-3j]:
+                    approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
+                                         - special.clpmn(m, n, z-0.5*h, type)[0])/h
+                    assert_allclose(special.clpmn(m, n, z, type)[1],
+                                    approx_derivative,
+                                    rtol=1e-4)
+
+    def test_lpmn(self):
+        lp = special.lpmn(0,2,.5)
+        assert_array_almost_equal(lp,(array([[1.00000,
+                                                      0.50000,
+                                                      -0.12500]]),
+                                      array([[0.00000,
+                                                      1.00000,
+                                                      1.50000]])),4)
+
+    def test_lpn(self):
+        lpnf = special.lpn(2,.5)
+        assert_array_almost_equal(lpnf,(array([1.00000,
+                                                        0.50000,
+                                                        -0.12500]),
+                                      array([0.00000,
+                                                      1.00000,
+                                                      1.50000])),4)
+
+    def test_lpmv(self):
+        lp = special.lpmv(0,2,.5)
+        assert_almost_equal(lp,-0.125,7)
+        lp = special.lpmv(0,40,.001)
+        assert_almost_equal(lp,0.1252678976534484,7)
+
+        # XXX: this is outside the domain of the current implementation,
+        #      so ensure it returns a NaN rather than a wrong answer.
+        with np.errstate(all='ignore'):
+            lp = special.lpmv(-1,-1,.001)
+        assert_(lp != 0 or np.isnan(lp))
+
+    def test_lqmn(self):
+        lqmnf = special.lqmn(0,2,.5)
+        lqf = special.lqn(2,.5)
+        assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
+        assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
+
+    def test_lqmn_gt1(self):
+        """algorithm for real arguments changes at 1.0001
+           test against analytical result for m=2, n=1
+        """
+        x0 = 1.0001
+        delta = 0.00002
+        for x in (x0-delta, x0+delta):
+            lq = special.lqmn(2, 1, x)[0][-1, -1]
+            expected = 2/(x*x-1)
+            assert_almost_equal(lq, expected)
+
+    def test_lqmn_shape(self):
+        a, b = special.lqmn(4, 4, 1.1)
+        assert_equal(a.shape, (5, 5))
+        assert_equal(b.shape, (5, 5))
+
+        a, b = special.lqmn(4, 0, 1.1)
+        assert_equal(a.shape, (5, 1))
+        assert_equal(b.shape, (5, 1))
+
+    def test_lqn(self):
+        lqf = special.lqn(2,.5)
+        assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
+                                       array([1.3333, 1.216, -0.8427])),4)
+
+
+class TestMathieu:
+
+    def test_mathieu_a(self):
+        pass
+
+    def test_mathieu_even_coef(self):
+        special.mathieu_even_coef(2,5)
+        # Q not defined broken and cannot figure out proper reporting order
+
+    def test_mathieu_odd_coef(self):
+        # same problem as above
+        pass
+
+
+class TestFresnelIntegral:
+
+    def test_modfresnelp(self):
+        pass
+
+    def test_modfresnelm(self):
+        pass
+
+
+class TestOblCvSeq:
+    def test_obl_cv_seq(self):
+        obl = special.obl_cv_seq(0,3,1)
+        assert_array_almost_equal(obl,array([-0.348602,
+                                              1.393206,
+                                              5.486800,
+                                              11.492120]),5)
+
+
+class TestParabolicCylinder:
+    def test_pbdn_seq(self):
+        pb = special.pbdn_seq(1,.1)
+        assert_array_almost_equal(pb,(array([0.9975,
+                                              0.0998]),
+                                      array([-0.0499,
+                                             0.9925])),4)
+
+    def test_pbdv(self):
+        special.pbdv(1,.2)
+        1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
+
+    def test_pbdv_seq(self):
+        pbn = special.pbdn_seq(1,.1)
+        pbv = special.pbdv_seq(1,.1)
+        assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
+
+    def test_pbdv_points(self):
+        # simple case
+        eta = np.linspace(-10, 10, 5)
+        z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
+        assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
+
+        # some points
+        assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
+        assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
+
+    def test_pbdv_gradient(self):
+        x = np.linspace(-4, 4, 8)[:,None]
+        eta = np.linspace(-10, 10, 5)[None,:]
+
+        p = special.pbdv(eta, x)
+        eps = 1e-7 + 1e-7*abs(x)
+        dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
+        assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
+
+    def test_pbvv_gradient(self):
+        x = np.linspace(-4, 4, 8)[:,None]
+        eta = np.linspace(-10, 10, 5)[None,:]
+
+        p = special.pbvv(eta, x)
+        eps = 1e-7 + 1e-7*abs(x)
+        dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
+        assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
+
+
+class TestPolygamma:
+    # from Table 6.2 (pg. 271) of A&S
+    def test_polygamma(self):
+        poly2 = special.polygamma(2,1)
+        poly3 = special.polygamma(3,1)
+        assert_almost_equal(poly2,-2.4041138063,10)
+        assert_almost_equal(poly3,6.4939394023,10)
+
+        # Test polygamma(0, x) == psi(x)
+        x = [2, 3, 1.1e14]
+        assert_almost_equal(special.polygamma(0, x), special.psi(x))
+
+        # Test broadcasting
+        n = [0, 1, 2]
+        x = [0.5, 1.5, 2.5]
+        expected = [-1.9635100260214238, 0.93480220054467933,
+                    -0.23620405164172739]
+        assert_almost_equal(special.polygamma(n, x), expected)
+        expected = np.row_stack([expected]*2)
+        assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
+                            expected)
+        assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
+                            expected)
+
+
+class TestProCvSeq:
+    def test_pro_cv_seq(self):
+        prol = special.pro_cv_seq(0,3,1)
+        assert_array_almost_equal(prol,array([0.319000,
+                                               2.593084,
+                                               6.533471,
+                                               12.514462]),5)
+
+
+class TestPsi:
+    def test_psi(self):
+        ps = special.psi(1)
+        assert_almost_equal(ps,-0.57721566490153287,8)
+
+
+class TestRadian:
+    def test_radian(self):
+        rad = special.radian(90,0,0)
+        assert_almost_equal(rad,pi/2.0,5)
+
+    def test_radianmore(self):
+        rad1 = special.radian(90,1,60)
+        assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
+
+
+class TestRiccati:
+    def test_riccati_jn(self):
+        N, x = 2, 0.2
+        S = np.empty((N, N))
+        for n in range(N):
+            j = special.spherical_jn(n, x)
+            jp = special.spherical_jn(n, x, derivative=True)
+            S[0,n] = x*j
+            S[1,n] = x*jp + j
+        assert_array_almost_equal(S, special.riccati_jn(n, x), 8)
+
+    def test_riccati_yn(self):
+        N, x = 2, 0.2
+        C = np.empty((N, N))
+        for n in range(N):
+            y = special.spherical_yn(n, x)
+            yp = special.spherical_yn(n, x, derivative=True)
+            C[0,n] = x*y
+            C[1,n] = x*yp + y
+        assert_array_almost_equal(C, special.riccati_yn(n, x), 8)
+
+
+class TestRound:
+    def test_round(self):
+        rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
+
+        # Note: According to the documentation, scipy.special.round is
+        # supposed to round to the nearest even number if the fractional
+        # part is exactly 0.5. On some platforms, this does not appear
+        # to work and thus this test may fail. However, this unit test is
+        # correctly written.
+        rndrl = (10,10,10,11)
+        assert_array_equal(rnd,rndrl)
+
+
+def test_sph_harm():
+    # Tests derived from tables in
+    # https://en.wikipedia.org/wiki/Table_of_spherical_harmonics
+    sh = special.sph_harm
+    pi = np.pi
+    exp = np.exp
+    sqrt = np.sqrt
+    sin = np.sin
+    cos = np.cos
+    assert_array_almost_equal(sh(0,0,0,0),
+           0.5/sqrt(pi))
+    assert_array_almost_equal(sh(-2,2,0.,pi/4),
+           0.25*sqrt(15./(2.*pi)) *
+           (sin(pi/4))**2.)
+    assert_array_almost_equal(sh(-2,2,0.,pi/2),
+           0.25*sqrt(15./(2.*pi)))
+    assert_array_almost_equal(sh(2,2,pi,pi/2),
+           0.25*sqrt(15/(2.*pi)) *
+           exp(0+2.*pi*1j)*sin(pi/2.)**2.)
+    assert_array_almost_equal(sh(2,4,pi/4.,pi/3.),
+           (3./8.)*sqrt(5./(2.*pi)) *
+           exp(0+2.*pi/4.*1j) *
+           sin(pi/3.)**2. *
+           (7.*cos(pi/3.)**2.-1))
+    assert_array_almost_equal(sh(4,4,pi/8.,pi/6.),
+           (3./16.)*sqrt(35./(2.*pi)) *
+           exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
+
+
+def test_sph_harm_ufunc_loop_selection():
+    # see https://github.com/scipy/scipy/issues/4895
+    dt = np.dtype(np.complex128)
+    assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
+    assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
+    assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
+    assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
+    assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
+    assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
+
+
+class TestStruve:
+    def _series(self, v, z, n=100):
+        """Compute Struve function & error estimate from its power series."""
+        k = arange(0, n)
+        r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
+        err = abs(r).max() * finfo(float_).eps * n
+        return r.sum(), err
+
+    def test_vs_series(self):
+        """Check Struve function versus its power series"""
+        for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
+            for z in [1, 10, 19, 21, 30]:
+                value, err = self._series(v, z)
+                assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z)
+
+    def test_some_values(self):
+        assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
+        assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
+        assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
+        assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
+        assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
+        assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
+        assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
+        assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
+
+        assert_(isnan(special.struve(-7.1, -1)))
+        assert_(isnan(special.struve(-10.1, -1)))
+
+    def test_regression_679(self):
+        """Regression test for #679"""
+        assert_allclose(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
+        assert_allclose(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
+        assert_allclose(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
+
+
+def test_chi2_smalldf():
+    assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
+
+
+def test_ch2_inf():
+    assert_equal(special.chdtr(0.7,np.inf), 1.0)
+
+
+def test_chi2c_smalldf():
+    assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
+
+
+def test_chi2_inv_smalldf():
+    assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
+
+
+def test_agm_simple():
+    rtol = 1e-13
+
+    # Gauss's constant
+    assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186,
+                    rtol=rtol)
+
+    # These values were computed using Wolfram Alpha, with the
+    # function ArithmeticGeometricMean[a, b].
+    agm13 = 1.863616783244897
+    agm15 = 2.604008190530940
+    agm35 = 3.936235503649555
+    assert_allclose(special.agm([[1], [3]], [1, 3, 5]),
+                    [[1, agm13, agm15],
+                     [agm13, 3, agm35]], rtol=rtol)
+
+    # Computed by the iteration formula using mpmath,
+    # with mpmath.mp.prec = 1000:
+    agm12 = 1.4567910310469068
+    assert_allclose(special.agm(1, 2), agm12, rtol=rtol)
+    assert_allclose(special.agm(2, 1), agm12, rtol=rtol)
+    assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol)
+    assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol)
+    assert_allclose(special.agm(13, 123456789.5), 11111458.498599306,
+                    rtol=rtol)
+    assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol)
+    assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol)
+    assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178,
+                    rtol=rtol)
+    assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177,
+                    rtol=rtol)
+    assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152,
+                    rtol=rtol)
+    fi = np.finfo(1.0)
+    assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305,
+                    rtol=rtol)
+    assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308,
+                    rtol=rtol)
+    assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308,
+                    rtol=rtol)
+
+    # zero, nan and inf cases.
+    assert_equal(special.agm(0, 0), 0)
+    assert_equal(special.agm(99, 0), 0)
+
+    assert_equal(special.agm(-1, 10), np.nan)
+    assert_equal(special.agm(0, np.inf), np.nan)
+    assert_equal(special.agm(np.inf, 0), np.nan)
+    assert_equal(special.agm(0, -np.inf), np.nan)
+    assert_equal(special.agm(-np.inf, 0), np.nan)
+    assert_equal(special.agm(np.inf, -np.inf), np.nan)
+    assert_equal(special.agm(-np.inf, np.inf), np.nan)
+    assert_equal(special.agm(1, np.nan), np.nan)
+    assert_equal(special.agm(np.nan, -1), np.nan)
+
+    assert_equal(special.agm(1, np.inf), np.inf)
+    assert_equal(special.agm(np.inf, 1), np.inf)
+    assert_equal(special.agm(-1, -np.inf), -np.inf)
+    assert_equal(special.agm(-np.inf, -1), -np.inf)
+
+
+def test_legacy():
+    # Legacy behavior: truncating arguments to integers
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning, "floating point number truncated to an integer")
+        assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
+        assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
+        assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
+        assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
+        assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
+        assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
+        assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
+        assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
+        assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
+
+
+@with_special_errors
+def test_error_raising():
+    assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j)
+
+
+def test_xlogy():
+    def xfunc(x, y):
+        with np.errstate(invalid='ignore'):
+            if x == 0 and not np.isnan(y):
+                return x
+            else:
+                return x*np.log(y)
+
+    z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
+    z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
+
+    w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
+    assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
+    w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
+    assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
+
+
+def test_xlog1py():
+    def xfunc(x, y):
+        with np.errstate(invalid='ignore'):
+            if x == 0 and not np.isnan(y):
+                return x
+            else:
+                return x * np.log1p(y)
+
+    z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
+                     (1, 1e-30)], dtype=float)
+    w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
+    assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
+
+
+def test_entr():
+    def xfunc(x):
+        if x < 0:
+            return -np.inf
+        else:
+            return -special.xlogy(x, x)
+    values = (0, 0.5, 1.0, np.inf)
+    signs = [-1, 1]
+    arr = []
+    for sgn, v in itertools.product(signs, values):
+        arr.append(sgn * v)
+    z = np.array(arr, dtype=float)
+    w = np.vectorize(xfunc, otypes=[np.float64])(z)
+    assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
+
+
+def test_kl_div():
+    def xfunc(x, y):
+        if x < 0 or y < 0 or (y == 0 and x != 0):
+            # extension of natural domain to preserve convexity
+            return np.inf
+        elif np.isposinf(x) or np.isposinf(y):
+            # limits within the natural domain
+            return np.inf
+        elif x == 0:
+            return y
+        else:
+            return special.xlogy(x, x/y) - x + y
+    values = (0, 0.5, 1.0)
+    signs = [-1, 1]
+    arr = []
+    for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
+        arr.append((sgna*va, sgnb*vb))
+    z = np.array(arr, dtype=float)
+    w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
+    assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
+
+
+def test_rel_entr():
+    def xfunc(x, y):
+        if x > 0 and y > 0:
+            return special.xlogy(x, x/y)
+        elif x == 0 and y >= 0:
+            return 0
+        else:
+            return np.inf
+    values = (0, 0.5, 1.0)
+    signs = [-1, 1]
+    arr = []
+    for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
+        arr.append((sgna*va, sgnb*vb))
+    z = np.array(arr, dtype=float)
+    w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
+    assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
+
+
+def test_huber():
+    assert_equal(special.huber(-1, 1.5), np.inf)
+    assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
+    assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
+
+    def xfunc(delta, r):
+        if delta < 0:
+            return np.inf
+        elif np.abs(r) < delta:
+            return 0.5 * np.square(r)
+        else:
+            return delta * (np.abs(r) - 0.5 * delta)
+
+    z = np.random.randn(10, 2)
+    w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
+    assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
+
+
+def test_pseudo_huber():
+    def xfunc(delta, r):
+        if delta < 0:
+            return np.inf
+        elif (not delta) or (not r):
+            return 0
+        else:
+            return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
+
+    z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
+    w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
+    assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
+
+
+def test_pseudo_huber_small_r():
+    delta = 1.0
+    r = 1e-18
+    y = special.pseudo_huber(delta, r)
+    # expected computed with mpmath:
+    #     import mpmath
+    #     mpmath.mp.dps = 200
+    #     r = mpmath.mpf(1e-18)
+    #     expected = float(mpmath.sqrt(1 + r**2) - 1)
+    expected = 5.0000000000000005e-37
+    assert_allclose(y, expected, rtol=1e-13)
+
+
+def test_runtime_warning():
+    with pytest.warns(RuntimeWarning,
+                      match=r'Too many predicted coefficients'):
+        mathieu_odd_coef(1000, 1000)
+    with pytest.warns(RuntimeWarning,
+                      match=r'Too many predicted coefficients'):
+        mathieu_even_coef(1000, 1000)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_bdtr.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_bdtr.py
new file mode 100644
index 00000000..57694bec
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_bdtr.py
@@ -0,0 +1,112 @@
+import numpy as np
+import scipy.special as sc
+import pytest
+from numpy.testing import assert_allclose, assert_array_equal, suppress_warnings
+
+
+class TestBdtr:
+    def test(self):
+        val = sc.bdtr(0, 1, 0.5)
+        assert_allclose(val, 0.5)
+
+    def test_sum_is_one(self):
+        val = sc.bdtr([0, 1, 2], 2, 0.5)
+        assert_array_equal(val, [0.25, 0.75, 1.0])
+
+    def test_rounding(self):
+        double_val = sc.bdtr([0.1, 1.1, 2.1], 2, 0.5)
+        int_val = sc.bdtr([0, 1, 2], 2, 0.5)
+        assert_array_equal(double_val, int_val)
+
+    @pytest.mark.parametrize('k, n, p', [
+        (np.inf, 2, 0.5),
+        (1.0, np.inf, 0.5),
+        (1.0, 2, np.inf)
+    ])
+    def test_inf(self, k, n, p):
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            val = sc.bdtr(k, n, p)
+        assert np.isnan(val)
+
+    def test_domain(self):
+        val = sc.bdtr(-1.1, 1, 0.5)
+        assert np.isnan(val)
+
+
+class TestBdtrc:
+    def test_value(self):
+        val = sc.bdtrc(0, 1, 0.5)
+        assert_allclose(val, 0.5)
+
+    def test_sum_is_one(self):
+        val = sc.bdtrc([0, 1, 2], 2, 0.5)
+        assert_array_equal(val, [0.75, 0.25, 0.0])
+
+    def test_rounding(self):
+        double_val = sc.bdtrc([0.1, 1.1, 2.1], 2, 0.5)
+        int_val = sc.bdtrc([0, 1, 2], 2, 0.5)
+        assert_array_equal(double_val, int_val)
+
+    @pytest.mark.parametrize('k, n, p', [
+        (np.inf, 2, 0.5),
+        (1.0, np.inf, 0.5),
+        (1.0, 2, np.inf)
+    ])
+    def test_inf(self, k, n, p):
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            val = sc.bdtrc(k, n, p)
+        assert np.isnan(val)
+
+    def test_domain(self):
+        val = sc.bdtrc(-1.1, 1, 0.5)
+        val2 = sc.bdtrc(2.1, 1, 0.5)
+        assert np.isnan(val2)
+        assert_allclose(val, 1.0)
+
+    def test_bdtr_bdtrc_sum_to_one(self):
+        bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
+        bdtrc_vals = sc.bdtrc([0, 1, 2], 2, 0.5)
+        vals = bdtr_vals + bdtrc_vals
+        assert_allclose(vals, [1.0, 1.0, 1.0])
+
+
+class TestBdtri:
+    def test_value(self):
+        val = sc.bdtri(0, 1, 0.5)
+        assert_allclose(val, 0.5)
+
+    def test_sum_is_one(self):
+        val = sc.bdtri([0, 1], 2, 0.5)
+        actual = np.asarray([1 - 1/np.sqrt(2), 1/np.sqrt(2)])
+        assert_allclose(val, actual)
+
+    def test_rounding(self):
+        double_val = sc.bdtri([0.1, 1.1], 2, 0.5)
+        int_val = sc.bdtri([0, 1], 2, 0.5)
+        assert_allclose(double_val, int_val)
+
+    @pytest.mark.parametrize('k, n, p', [
+        (np.inf, 2, 0.5),
+        (1.0, np.inf, 0.5),
+        (1.0, 2, np.inf)
+    ])
+    def test_inf(self, k, n, p):
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning)
+            val = sc.bdtri(k, n, p)
+        assert np.isnan(val)
+
+    @pytest.mark.parametrize('k, n, p', [
+        (-1.1, 1, 0.5),
+        (2.1, 1, 0.5)
+    ])
+    def test_domain(self, k, n, p):
+        val = sc.bdtri(k, n, p)
+        assert np.isnan(val)
+
+    def test_bdtr_bdtri_roundtrip(self):
+        bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
+        roundtrip_vals = sc.bdtri([0, 1, 2], 2, bdtr_vals)
+        assert_allclose(roundtrip_vals, [0.5, 0.5, np.nan])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_boxcox.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_boxcox.py
new file mode 100644
index 00000000..f6a0d430
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_boxcox.py
@@ -0,0 +1,106 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_almost_equal, assert_allclose
+from scipy.special import boxcox, boxcox1p, inv_boxcox, inv_boxcox1p
+
+
+# There are more tests of boxcox and boxcox1p in test_mpmath.py.
+
+def test_boxcox_basic():
+    x = np.array([0.5, 1, 2, 4])
+
+    # lambda = 0  =>  y = log(x)
+    y = boxcox(x, 0)
+    assert_almost_equal(y, np.log(x))
+
+    # lambda = 1  =>  y = x - 1
+    y = boxcox(x, 1)
+    assert_almost_equal(y, x - 1)
+
+    # lambda = 2  =>  y = 0.5*(x**2 - 1)
+    y = boxcox(x, 2)
+    assert_almost_equal(y, 0.5*(x**2 - 1))
+
+    # x = 0 and lambda > 0  =>  y = -1 / lambda
+    lam = np.array([0.5, 1, 2])
+    y = boxcox(0, lam)
+    assert_almost_equal(y, -1.0 / lam)
+
+def test_boxcox_underflow():
+    x = 1 + 1e-15
+    lmbda = 1e-306
+    y = boxcox(x, lmbda)
+    assert_allclose(y, np.log(x), rtol=1e-14)
+
+
+def test_boxcox_nonfinite():
+    # x < 0  =>  y = nan
+    x = np.array([-1, -1, -0.5])
+    y = boxcox(x, [0.5, 2.0, -1.5])
+    assert_equal(y, np.array([np.nan, np.nan, np.nan]))
+
+    # x = 0 and lambda <= 0  =>  y = -inf
+    x = 0
+    y = boxcox(x, [-2.5, 0])
+    assert_equal(y, np.array([-np.inf, -np.inf]))
+
+
+def test_boxcox1p_basic():
+    x = np.array([-0.25, -1e-20, 0, 1e-20, 0.25, 1, 3])
+
+    # lambda = 0  =>  y = log(1+x)
+    y = boxcox1p(x, 0)
+    assert_almost_equal(y, np.log1p(x))
+
+    # lambda = 1  =>  y = x
+    y = boxcox1p(x, 1)
+    assert_almost_equal(y, x)
+
+    # lambda = 2  =>  y = 0.5*((1+x)**2 - 1) = 0.5*x*(2 + x)
+    y = boxcox1p(x, 2)
+    assert_almost_equal(y, 0.5*x*(2 + x))
+
+    # x = -1 and lambda > 0  =>  y = -1 / lambda
+    lam = np.array([0.5, 1, 2])
+    y = boxcox1p(-1, lam)
+    assert_almost_equal(y, -1.0 / lam)
+
+
+def test_boxcox1p_underflow():
+    x = np.array([1e-15, 1e-306])
+    lmbda = np.array([1e-306, 1e-18])
+    y = boxcox1p(x, lmbda)
+    assert_allclose(y, np.log1p(x), rtol=1e-14)
+
+
+def test_boxcox1p_nonfinite():
+    # x < -1  =>  y = nan
+    x = np.array([-2, -2, -1.5])
+    y = boxcox1p(x, [0.5, 2.0, -1.5])
+    assert_equal(y, np.array([np.nan, np.nan, np.nan]))
+
+    # x = -1 and lambda <= 0  =>  y = -inf
+    x = -1
+    y = boxcox1p(x, [-2.5, 0])
+    assert_equal(y, np.array([-np.inf, -np.inf]))
+
+
+def test_inv_boxcox():
+    x = np.array([0., 1., 2.])
+    lam = np.array([0., 1., 2.])
+    y = boxcox(x, lam)
+    x2 = inv_boxcox(y, lam)
+    assert_almost_equal(x, x2)
+
+    x = np.array([0., 1., 2.])
+    lam = np.array([0., 1., 2.])
+    y = boxcox1p(x, lam)
+    x2 = inv_boxcox1p(y, lam)
+    assert_almost_equal(x, x2)
+
+
+def test_inv_boxcox1p_underflow():
+    x = 1e-15
+    lam = 1e-306
+    y = inv_boxcox1p(x, lam)
+    assert_allclose(y, x, rtol=1e-14)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdflib.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdflib.py
new file mode 100644
index 00000000..bef3a095
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdflib.py
@@ -0,0 +1,424 @@
+"""
+Test cdflib functions versus mpmath, if available.
+
+The following functions still need tests:
+
+- ncfdtr
+- ncfdtri
+- ncfdtridfn
+- ncfdtridfd
+- ncfdtrinc
+- nbdtrik
+- nbdtrin
+- nrdtrimn
+- nrdtrisd
+- pdtrik
+- nctdtr
+- nctdtrit
+- nctdtridf
+- nctdtrinc
+
+"""
+import itertools
+
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+import pytest
+
+import scipy.special as sp
+from scipy.special._testutils import (
+    MissingModule, check_version, FuncData)
+from scipy.special._mptestutils import (
+    Arg, IntArg, get_args, mpf2float, assert_mpmath_equal)
+
+try:
+    import mpmath
+except ImportError:
+    mpmath = MissingModule('mpmath')
+
+
+class ProbArg:
+    """Generate a set of probabilities on [0, 1]."""
+    def __init__(self):
+        # Include the endpoints for compatibility with Arg et. al.
+        self.a = 0
+        self.b = 1
+
+    def values(self, n):
+        """Return an array containing approximatively n numbers."""
+        m = max(1, n//3)
+        v1 = np.logspace(-30, np.log10(0.3), m)
+        v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
+        v3 = 1 - np.logspace(np.log10(0.3), -15, m)
+        v = np.r_[v1, v2, v3]
+        return np.unique(v)
+
+
+class EndpointFilter:
+    def __init__(self, a, b, rtol, atol):
+        self.a = a
+        self.b = b
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self, x):
+        mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol
+        mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol
+        return np.where(mask1 | mask2, False, True)
+
+
+class _CDFData:
+    def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True,
+                 dps=20, n=5000, rtol=None, atol=None,
+                 endpt_rtol=None, endpt_atol=None):
+        self.spfunc = spfunc
+        self.mpfunc = mpfunc
+        self.index = index
+        self.argspec = argspec
+        self.spfunc_first = spfunc_first
+        self.dps = dps
+        self.n = n
+        self.rtol = rtol
+        self.atol = atol
+
+        if not isinstance(argspec, list):
+            self.endpt_rtol = None
+            self.endpt_atol = None
+        elif endpt_rtol is not None or endpt_atol is not None:
+            if isinstance(endpt_rtol, list):
+                self.endpt_rtol = endpt_rtol
+            else:
+                self.endpt_rtol = [endpt_rtol]*len(self.argspec)
+            if isinstance(endpt_atol, list):
+                self.endpt_atol = endpt_atol
+            else:
+                self.endpt_atol = [endpt_atol]*len(self.argspec)
+        else:
+            self.endpt_rtol = None
+            self.endpt_atol = None
+
+    def idmap(self, *args):
+        if self.spfunc_first:
+            res = self.spfunc(*args)
+            if np.isnan(res):
+                return np.nan
+            args = list(args)
+            args[self.index] = res
+            with mpmath.workdps(self.dps):
+                res = self.mpfunc(*tuple(args))
+                # Imaginary parts are spurious
+                res = mpf2float(res.real)
+        else:
+            with mpmath.workdps(self.dps):
+                res = self.mpfunc(*args)
+                res = mpf2float(res.real)
+            args = list(args)
+            args[self.index] = res
+            res = self.spfunc(*tuple(args))
+        return res
+
+    def get_param_filter(self):
+        if self.endpt_rtol is None and self.endpt_atol is None:
+            return None
+
+        filters = []
+        for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec):
+            if rtol is None and atol is None:
+                filters.append(None)
+                continue
+            elif rtol is None:
+                rtol = 0.0
+            elif atol is None:
+                atol = 0.0
+
+            filters.append(EndpointFilter(spec.a, spec.b, rtol, atol))
+        return filters
+
+    def check(self):
+        # Generate values for the arguments
+        args = get_args(self.argspec, self.n)
+        param_filter = self.get_param_filter()
+        param_columns = tuple(range(args.shape[1]))
+        result_columns = args.shape[1]
+        args = np.hstack((args, args[:,self.index].reshape(args.shape[0], 1)))
+        FuncData(self.idmap, args,
+                 param_columns=param_columns, result_columns=result_columns,
+                 rtol=self.rtol, atol=self.atol, vectorized=False,
+                 param_filter=param_filter).check()
+
+
+def _assert_inverts(*a, **kw):
+    d = _CDFData(*a, **kw)
+    d.check()
+
+
+def _binomial_cdf(k, n, p):
+    k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p)
+    if k <= 0:
+        return mpmath.mpf(0)
+    elif k >= n:
+        return mpmath.mpf(1)
+
+    onemp = mpmath.fsub(1, p, exact=True)
+    return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True)
+
+
+def _f_cdf(dfn, dfd, x):
+    if x < 0:
+        return mpmath.mpf(0)
+    dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x)
+    ub = dfn*x/(dfn*x + dfd)
+    res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True)
+    return res
+
+
+def _student_t_cdf(df, t, dps=None):
+    if dps is None:
+        dps = mpmath.mp.dps
+    with mpmath.workdps(dps):
+        df, t = mpmath.mpf(df), mpmath.mpf(t)
+        fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df)
+        fac *= t*mpmath.gamma(0.5*(df + 1))
+        fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df)
+        return 0.5 + fac
+
+
+def _noncentral_chi_pdf(t, df, nc):
+    res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t))
+    res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2
+    return res
+
+
+def _noncentral_chi_cdf(x, df, nc, dps=None):
+    if dps is None:
+        dps = mpmath.mp.dps
+    x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc)
+    with mpmath.workdps(dps):
+        res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x])
+        return res
+
+
+def _tukey_lmbda_quantile(p, lmbda):
+    # For lmbda != 0
+    return (p**lmbda - (1 - p)**lmbda)/lmbda
+
+
+@pytest.mark.slow
+@check_version(mpmath, '0.19')
+class TestCDFlib:
+
+    @pytest.mark.xfail(run=False)
+    def test_bdtrik(self):
+        _assert_inverts(
+            sp.bdtrik,
+            _binomial_cdf,
+            0, [ProbArg(), IntArg(1, 1000), ProbArg()],
+            rtol=1e-4)
+
+    def test_bdtrin(self):
+        _assert_inverts(
+            sp.bdtrin,
+            _binomial_cdf,
+            1, [IntArg(1, 1000), ProbArg(), ProbArg()],
+            rtol=1e-4, endpt_atol=[None, None, 1e-6])
+
+    def test_btdtria(self):
+        _assert_inverts(
+            sp.btdtria,
+            lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
+            0, [ProbArg(), Arg(0, 1e2, inclusive_a=False),
+                Arg(0, 1, inclusive_a=False, inclusive_b=False)],
+            rtol=1e-6)
+
+    def test_btdtrib(self):
+        # Use small values of a or mpmath doesn't converge
+        _assert_inverts(
+            sp.btdtrib,
+            lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
+            1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
+             Arg(0, 1, inclusive_a=False, inclusive_b=False)],
+            rtol=1e-7, endpt_atol=[None, 1e-18, 1e-15])
+
+    @pytest.mark.xfail(run=False)
+    def test_fdtridfd(self):
+        _assert_inverts(
+            sp.fdtridfd,
+            _f_cdf,
+            1, [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)],
+            rtol=1e-7)
+
+    def test_gdtria(self):
+        _assert_inverts(
+            sp.gdtria,
+            lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
+            0, [ProbArg(), Arg(0, 1e3, inclusive_a=False),
+                Arg(0, 1e4, inclusive_a=False)], rtol=1e-7,
+            endpt_atol=[None, 1e-7, 1e-10])
+
+    def test_gdtrib(self):
+        # Use small values of a and x or mpmath doesn't converge
+        _assert_inverts(
+            sp.gdtrib,
+            lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
+            1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
+                Arg(0, 1e3, inclusive_a=False)], rtol=1e-5)
+
+    def test_gdtrix(self):
+        _assert_inverts(
+            sp.gdtrix,
+            lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
+            2, [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False),
+                ProbArg()], rtol=1e-7,
+            endpt_atol=[None, 1e-7, 1e-10])
+
+    def test_stdtr(self):
+        # Ideally the left endpoint for Arg() should be 0.
+        assert_mpmath_equal(
+            sp.stdtr,
+            _student_t_cdf,
+            [IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7)
+
+    @pytest.mark.xfail(run=False)
+    def test_stdtridf(self):
+        _assert_inverts(
+            sp.stdtridf,
+            _student_t_cdf,
+            0, [ProbArg(), Arg()], rtol=1e-7)
+
+    def test_stdtrit(self):
+        _assert_inverts(
+            sp.stdtrit,
+            _student_t_cdf,
+            1, [IntArg(1, 100), ProbArg()], rtol=1e-7,
+            endpt_atol=[None, 1e-10])
+
+    def test_chdtriv(self):
+        _assert_inverts(
+            sp.chdtriv,
+            lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True),
+            0, [ProbArg(), IntArg(1, 100)], rtol=1e-4)
+
+    @pytest.mark.xfail(run=False)
+    def test_chndtridf(self):
+        # Use a larger atol since mpmath is doing numerical integration
+        _assert_inverts(
+            sp.chndtridf,
+            _noncentral_chi_cdf,
+            1, [Arg(0, 100, inclusive_a=False), ProbArg(),
+                Arg(0, 100, inclusive_a=False)],
+            n=1000, rtol=1e-4, atol=1e-15)
+
+    @pytest.mark.xfail(run=False)
+    def test_chndtrinc(self):
+        # Use a larger atol since mpmath is doing numerical integration
+        _assert_inverts(
+            sp.chndtrinc,
+            _noncentral_chi_cdf,
+            2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()],
+            n=1000, rtol=1e-4, atol=1e-15)
+
+    def test_chndtrix(self):
+        # Use a larger atol since mpmath is doing numerical integration
+        _assert_inverts(
+            sp.chndtrix,
+            _noncentral_chi_cdf,
+            0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)],
+            n=1000, rtol=1e-4, atol=1e-15,
+            endpt_atol=[1e-6, None, None])
+
+    def test_tklmbda_zero_shape(self):
+        # When lmbda = 0 the CDF has a simple closed form
+        one = mpmath.mpf(1)
+        assert_mpmath_equal(
+            lambda x: sp.tklmbda(x, 0),
+            lambda x: one/(mpmath.exp(-x) + one),
+            [Arg()], rtol=1e-7)
+
+    def test_tklmbda_neg_shape(self):
+        _assert_inverts(
+            sp.tklmbda,
+            _tukey_lmbda_quantile,
+            0, [ProbArg(), Arg(-25, 0, inclusive_b=False)],
+            spfunc_first=False, rtol=1e-5,
+            endpt_atol=[1e-9, 1e-5])
+
+    @pytest.mark.xfail(run=False)
+    def test_tklmbda_pos_shape(self):
+        _assert_inverts(
+            sp.tklmbda,
+            _tukey_lmbda_quantile,
+            0, [ProbArg(), Arg(0, 100, inclusive_a=False)],
+            spfunc_first=False, rtol=1e-5)
+
+
+def test_nonfinite():
+    funcs = [
+        ("btdtria", 3),
+        ("btdtrib", 3),
+        ("bdtrik", 3),
+        ("bdtrin", 3),
+        ("chdtriv", 2),
+        ("chndtr", 3),
+        ("chndtrix", 3),
+        ("chndtridf", 3),
+        ("chndtrinc", 3),
+        ("fdtridfd", 3),
+        ("ncfdtr", 4),
+        ("ncfdtri", 4),
+        ("ncfdtridfn", 4),
+        ("ncfdtridfd", 4),
+        ("ncfdtrinc", 4),
+        ("gdtrix", 3),
+        ("gdtrib", 3),
+        ("gdtria", 3),
+        ("nbdtrik", 3),
+        ("nbdtrin", 3),
+        ("nrdtrimn", 3),
+        ("nrdtrisd", 3),
+        ("pdtrik", 2),
+        ("stdtr", 2),
+        ("stdtrit", 2),
+        ("stdtridf", 2),
+        ("nctdtr", 3),
+        ("nctdtrit", 3),
+        ("nctdtridf", 3),
+        ("nctdtrinc", 3),
+        ("tklmbda", 2),
+    ]
+
+    np.random.seed(1)
+
+    for func, numargs in funcs:
+        func = getattr(sp, func)
+
+        args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in
+                        np.random.rand(numargs)]
+
+        for args in itertools.product(*args_choices):
+            res = func(*args)
+
+            if any(np.isnan(x) for x in args):
+                # Nan inputs should result to nan output
+                assert_equal(res, np.nan)
+            else:
+                # All other inputs should return something (but not
+                # raise exceptions or cause hangs)
+                pass
+
+
+def test_chndtrix_gh2158():
+    # test that gh-2158 is resolved; previously this blew up
+    res = sp.chndtrix(0.999999, 2, np.arange(20.)+1e-6)
+
+    # Generated in R
+    # options(digits=16)
+    # ncp <- seq(0, 19) + 1e-6
+    # print(qchisq(0.999999, df = 2, ncp = ncp))
+    res_exp = [27.63103493142305, 35.25728589950540, 39.97396073236288,
+               43.88033702110538, 47.35206403482798, 50.54112500166103,
+               53.52720257322766, 56.35830042867810, 59.06600769498512,
+               61.67243118946381, 64.19376191277179, 66.64228141346548,
+               69.02756927200180, 71.35726934749408, 73.63759723904816,
+               75.87368842650227, 78.06984431185720, 80.22971052389806,
+               82.35640899964173, 84.45263768373256]
+    assert_allclose(res, res_exp)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdft_asymptotic.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdft_asymptotic.py
new file mode 100644
index 00000000..5b6837a4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cdft_asymptotic.py
@@ -0,0 +1,49 @@
+# gh-14777 regression tests
+# Test stdtr and stdtrit with infinite df and large values of df
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal
+from scipy.special import stdtr, stdtrit, ndtr, ndtri
+
+
+def test_stdtr_vs_R_large_df():
+    df = [1e10, 1e12, 1e120, np.inf]
+    t = 1.
+    res = stdtr(df, t)
+    # R Code:
+    #   options(digits=20)
+    #   pt(1., c(1e10, 1e12, 1e120, Inf))
+    res_R = [0.84134474605644460343,
+             0.84134474606842180044,
+             0.84134474606854281475,
+             0.84134474606854292578]
+    assert_allclose(res, res_R, rtol=2e-15)
+    # last value should also agree with ndtr
+    assert_equal(res[3], ndtr(1.))
+
+
+def test_stdtrit_vs_R_large_df():
+    df = [1e10, 1e12, 1e120, np.inf]
+    p = 0.1
+    res = stdtrit(df, p)
+    # R Code:
+    #   options(digits=20)
+    #   qt(0.1, c(1e10, 1e12, 1e120, Inf))
+    res_R = [-1.2815515656292593150,
+             -1.2815515655454472466,
+             -1.2815515655446008125,
+             -1.2815515655446008125]
+    assert_allclose(res, res_R, rtol=1e-15)
+    # last value should also agree with ndtri
+    assert_equal(res[3], ndtri(0.1))
+
+
+def test_stdtr_stdtri_invalid():
+    # a mix of large and inf df with t/p equal to nan
+    df = [1e10, 1e12, 1e120, np.inf]
+    x = np.nan
+    res1 = stdtr(df, x)
+    res2 = stdtrit(df, x)
+    res_ex = 4*[np.nan]
+    assert_equal(res1, res_ex)
+    assert_equal(res2, res_ex)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cosine_distr.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cosine_distr.py
new file mode 100644
index 00000000..cc87ed3d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cosine_distr.py
@@ -0,0 +1,84 @@
+
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from scipy.special._ufuncs import _cosine_cdf, _cosine_invcdf
+
+
+# These values are (x, p) where p is the expected exact value of
+# _cosine_cdf(x).  These values will be tested for exact agreement.
+_coscdf_exact = [
+    (-4.0, 0.0),
+    (0, 0.5),
+    (np.pi, 1.0),
+    (4.0, 1.0),
+]
+
+@pytest.mark.parametrize("x, expected", _coscdf_exact)
+def test_cosine_cdf_exact(x, expected):
+    assert _cosine_cdf(x) == expected
+
+
+# These values are (x, p), where p is the expected value of
+# _cosine_cdf(x). The expected values were computed with mpmath using
+# 50 digits of precision.  These values will be tested for agreement
+# with the computed values using a very small relative tolerance.
+# The value at -np.pi is not 0, because -np.pi does not equal -π.
+_coscdf_close = [
+    (3.1409, 0.999999999991185),
+    (2.25, 0.9819328173287907),
+    # -1.6 is the threshold below which the Pade approximant is used.
+    (-1.599, 0.08641959838382553),
+    (-1.601, 0.086110582992713),
+    (-2.0, 0.0369709335961611),
+    (-3.0, 7.522387241801384e-05),
+    (-3.1415, 2.109869685443648e-14),
+    (-3.14159, 4.956444476505336e-19),
+    (-np.pi, 4.871934450264861e-50),
+]
+
+@pytest.mark.parametrize("x, expected", _coscdf_close)
+def test_cosine_cdf(x, expected):
+    assert_allclose(_cosine_cdf(x), expected, rtol=5e-15)
+
+
+# These values are (p, x) where x is the expected exact value of
+# _cosine_invcdf(p).  These values will be tested for exact agreement.
+_cosinvcdf_exact = [
+    (0.0, -np.pi),
+    (0.5, 0.0),
+    (1.0, np.pi),
+]
+
+@pytest.mark.parametrize("p, expected", _cosinvcdf_exact)
+def test_cosine_invcdf_exact(p, expected):
+    assert _cosine_invcdf(p) == expected
+
+
+def test_cosine_invcdf_invalid_p():
+    # Check that p values outside of [0, 1] return nan.
+    assert np.isnan(_cosine_invcdf([-0.1, 1.1])).all()
+
+
+# These values are (p, x), where x is the expected value of _cosine_invcdf(p).
+# The expected values were computed with mpmath using 50 digits of precision.
+_cosinvcdf_close = [
+    (1e-50, -np.pi),
+    (1e-14, -3.1415204137058454),
+    (1e-08, -3.1343686589124524),
+    (0.0018001, -2.732563923138336),
+    (0.010, -2.41276589008678),
+    (0.060, -1.7881244975330157),
+    (0.125, -1.3752523669869274),
+    (0.250, -0.831711193579736),
+    (0.400, -0.3167954512395289),
+    (0.419, -0.25586025626919906),
+    (0.421, -0.24947570750445663),
+    (0.750, 0.831711193579736),
+    (0.940, 1.7881244975330153),
+    (0.9999999996, 3.1391220839917167),
+]
+
+@pytest.mark.parametrize("p, expected", _cosinvcdf_close)
+def test_cosine_invcdf(p, expected):
+    assert_allclose(_cosine_invcdf(p), expected, rtol=1e-14)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cython_special.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cython_special.py
new file mode 100644
index 00000000..cd2cd5bd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_cython_special.py
@@ -0,0 +1,352 @@
+from __future__ import annotations
+from typing import List, Tuple, Callable, Optional
+
+import pytest
+from itertools import product
+from numpy.testing import assert_allclose, suppress_warnings
+from scipy import special
+from scipy.special import cython_special
+
+
+bint_points = [True, False]
+int_points = [-10, -1, 1, 10]
+real_points = [-10.0, -1.0, 1.0, 10.0]
+complex_points = [complex(*tup) for tup in product(real_points, repeat=2)]
+
+
+CYTHON_SIGNATURE_MAP = {
+    'b': 'bint',
+    'f': 'float',
+    'd': 'double',
+    'g': 'long double',
+    'F': 'float complex',
+    'D': 'double complex',
+    'G': 'long double complex',
+    'i': 'int',
+    'l': 'long'
+}
+
+
+TEST_POINTS = {
+    'b': bint_points,
+    'f': real_points,
+    'd': real_points,
+    'g': real_points,
+    'F': complex_points,
+    'D': complex_points,
+    'G': complex_points,
+    'i': int_points,
+    'l': int_points,
+}
+
+
+PARAMS: List[Tuple[Callable, Callable, Tuple[str, ...], Optional[str]]] = [
+    (special.agm, cython_special.agm, ('dd',), None),
+    (special.airy, cython_special._airy_pywrap, ('d', 'D'), None),
+    (special.airye, cython_special._airye_pywrap, ('d', 'D'), None),
+    (special.bdtr, cython_special.bdtr, ('dld', 'ddd'), None),
+    (special.bdtrc, cython_special.bdtrc, ('dld', 'ddd'), None),
+    (special.bdtri, cython_special.bdtri, ('dld', 'ddd'), None),
+    (special.bdtrik, cython_special.bdtrik, ('ddd',), None),
+    (special.bdtrin, cython_special.bdtrin, ('ddd',), None),
+    (special.bei, cython_special.bei, ('d',), None),
+    (special.beip, cython_special.beip, ('d',), None),
+    (special.ber, cython_special.ber, ('d',), None),
+    (special.berp, cython_special.berp, ('d',), None),
+    (special.besselpoly, cython_special.besselpoly, ('ddd',), None),
+    (special.beta, cython_special.beta, ('dd',), None),
+    (special.betainc, cython_special.betainc, ('ddd',), None),
+    (special.betaincinv, cython_special.betaincinv, ('ddd',), None),
+    (special.betaln, cython_special.betaln, ('dd',), None),
+    (special.binom, cython_special.binom, ('dd',), None),
+    (special.boxcox, cython_special.boxcox, ('dd',), None),
+    (special.boxcox1p, cython_special.boxcox1p, ('dd',), None),
+    (special.btdtr, cython_special.btdtr, ('ddd',), None),
+    (special.btdtri, cython_special.btdtri, ('ddd',), None),
+    (special.btdtria, cython_special.btdtria, ('ddd',), None),
+    (special.btdtrib, cython_special.btdtrib, ('ddd',), None),
+    (special.cbrt, cython_special.cbrt, ('d',), None),
+    (special.chdtr, cython_special.chdtr, ('dd',), None),
+    (special.chdtrc, cython_special.chdtrc, ('dd',), None),
+    (special.chdtri, cython_special.chdtri, ('dd',), None),
+    (special.chdtriv, cython_special.chdtriv, ('dd',), None),
+    (special.chndtr, cython_special.chndtr, ('ddd',), None),
+    (special.chndtridf, cython_special.chndtridf, ('ddd',), None),
+    (special.chndtrinc, cython_special.chndtrinc, ('ddd',), None),
+    (special.chndtrix, cython_special.chndtrix, ('ddd',), None),
+    (special.cosdg, cython_special.cosdg, ('d',), None),
+    (special.cosm1, cython_special.cosm1, ('d',), None),
+    (special.cotdg, cython_special.cotdg, ('d',), None),
+    (special.dawsn, cython_special.dawsn, ('d', 'D'), None),
+    (special.ellipe, cython_special.ellipe, ('d',), None),
+    (special.ellipeinc, cython_special.ellipeinc, ('dd',), None),
+    (special.ellipj, cython_special._ellipj_pywrap, ('dd',), None),
+    (special.ellipkinc, cython_special.ellipkinc, ('dd',), None),
+    (special.ellipkm1, cython_special.ellipkm1, ('d',), None),
+    (special.ellipk, cython_special.ellipk, ('d',), None),
+    (special.elliprc, cython_special.elliprc, ('dd', 'DD'), None),
+    (special.elliprd, cython_special.elliprd, ('ddd', 'DDD'), None),
+    (special.elliprf, cython_special.elliprf, ('ddd', 'DDD'), None),
+    (special.elliprg, cython_special.elliprg, ('ddd', 'DDD'), None),
+    (special.elliprj, cython_special.elliprj, ('dddd', 'DDDD'), None),
+    (special.entr, cython_special.entr, ('d',), None),
+    (special.erf, cython_special.erf, ('d', 'D'), None),
+    (special.erfc, cython_special.erfc, ('d', 'D'), None),
+    (special.erfcx, cython_special.erfcx, ('d', 'D'), None),
+    (special.erfi, cython_special.erfi, ('d', 'D'), None),
+    (special.erfinv, cython_special.erfinv, ('d',), None),
+    (special.erfcinv, cython_special.erfcinv, ('d',), None),
+    (special.eval_chebyc, cython_special.eval_chebyc, ('dd', 'dD', 'ld'), None),
+    (special.eval_chebys, cython_special.eval_chebys, ('dd', 'dD', 'ld'),
+     'd and l differ for negative int'),
+    (special.eval_chebyt, cython_special.eval_chebyt, ('dd', 'dD', 'ld'),
+     'd and l differ for negative int'),
+    (special.eval_chebyu, cython_special.eval_chebyu, ('dd', 'dD', 'ld'),
+     'd and l differ for negative int'),
+    (special.eval_gegenbauer, cython_special.eval_gegenbauer, ('ddd', 'ddD', 'ldd'),
+     'd and l differ for negative int'),
+    (special.eval_genlaguerre, cython_special.eval_genlaguerre, ('ddd', 'ddD', 'ldd'),
+     'd and l differ for negative int'),
+    (special.eval_hermite, cython_special.eval_hermite, ('ld',), None),
+    (special.eval_hermitenorm, cython_special.eval_hermitenorm, ('ld',), None),
+    (special.eval_jacobi, cython_special.eval_jacobi, ('dddd', 'dddD', 'lddd'),
+     'd and l differ for negative int'),
+    (special.eval_laguerre, cython_special.eval_laguerre, ('dd', 'dD', 'ld'),
+     'd and l differ for negative int'),
+    (special.eval_legendre, cython_special.eval_legendre, ('dd', 'dD', 'ld'), None),
+    (special.eval_sh_chebyt, cython_special.eval_sh_chebyt, ('dd', 'dD', 'ld'), None),
+    (special.eval_sh_chebyu, cython_special.eval_sh_chebyu, ('dd', 'dD', 'ld'),
+     'd and l differ for negative int'),
+    (special.eval_sh_jacobi, cython_special.eval_sh_jacobi, ('dddd', 'dddD', 'lddd'),
+     'd and l differ for negative int'),
+    (special.eval_sh_legendre, cython_special.eval_sh_legendre, ('dd', 'dD', 'ld'), None),
+    (special.exp1, cython_special.exp1, ('d', 'D'), None),
+    (special.exp10, cython_special.exp10, ('d',), None),
+    (special.exp2, cython_special.exp2, ('d',), None),
+    (special.expi, cython_special.expi, ('d', 'D'), None),
+    (special.expit, cython_special.expit, ('f', 'd', 'g'), None),
+    (special.expm1, cython_special.expm1, ('d', 'D'), None),
+    (special.expn, cython_special.expn, ('ld', 'dd'), None),
+    (special.exprel, cython_special.exprel, ('d',), None),
+    (special.fdtr, cython_special.fdtr, ('ddd',), None),
+    (special.fdtrc, cython_special.fdtrc, ('ddd',), None),
+    (special.fdtri, cython_special.fdtri, ('ddd',), None),
+    (special.fdtridfd, cython_special.fdtridfd, ('ddd',), None),
+    (special.fresnel, cython_special._fresnel_pywrap, ('d', 'D'), None),
+    (special.gamma, cython_special.gamma, ('d', 'D'), None),
+    (special.gammainc, cython_special.gammainc, ('dd',), None),
+    (special.gammaincc, cython_special.gammaincc, ('dd',), None),
+    (special.gammainccinv, cython_special.gammainccinv, ('dd',), None),
+    (special.gammaincinv, cython_special.gammaincinv, ('dd',), None),
+    (special.gammaln, cython_special.gammaln, ('d',), None),
+    (special.gammasgn, cython_special.gammasgn, ('d',), None),
+    (special.gdtr, cython_special.gdtr, ('ddd',), None),
+    (special.gdtrc, cython_special.gdtrc, ('ddd',), None),
+    (special.gdtria, cython_special.gdtria, ('ddd',), None),
+    (special.gdtrib, cython_special.gdtrib, ('ddd',), None),
+    (special.gdtrix, cython_special.gdtrix, ('ddd',), None),
+    (special.hankel1, cython_special.hankel1, ('dD',), None),
+    (special.hankel1e, cython_special.hankel1e, ('dD',), None),
+    (special.hankel2, cython_special.hankel2, ('dD',), None),
+    (special.hankel2e, cython_special.hankel2e, ('dD',), None),
+    (special.huber, cython_special.huber, ('dd',), None),
+    (special.hyp0f1, cython_special.hyp0f1, ('dd', 'dD'), None),
+    (special.hyp1f1, cython_special.hyp1f1, ('ddd', 'ddD'), None),
+    (special.hyp2f1, cython_special.hyp2f1, ('dddd', 'dddD'), None),
+    (special.hyperu, cython_special.hyperu, ('ddd',), None),
+    (special.i0, cython_special.i0, ('d',), None),
+    (special.i0e, cython_special.i0e, ('d',), None),
+    (special.i1, cython_special.i1, ('d',), None),
+    (special.i1e, cython_special.i1e, ('d',), None),
+    (special.inv_boxcox, cython_special.inv_boxcox, ('dd',), None),
+    (special.inv_boxcox1p, cython_special.inv_boxcox1p, ('dd',), None),
+    (special.it2i0k0, cython_special._it2i0k0_pywrap, ('d',), None),
+    (special.it2j0y0, cython_special._it2j0y0_pywrap, ('d',), None),
+    (special.it2struve0, cython_special.it2struve0, ('d',), None),
+    (special.itairy, cython_special._itairy_pywrap, ('d',), None),
+    (special.iti0k0, cython_special._iti0k0_pywrap, ('d',), None),
+    (special.itj0y0, cython_special._itj0y0_pywrap, ('d',), None),
+    (special.itmodstruve0, cython_special.itmodstruve0, ('d',), None),
+    (special.itstruve0, cython_special.itstruve0, ('d',), None),
+    (special.iv, cython_special.iv, ('dd', 'dD'), None),
+    (special.ive, cython_special.ive, ('dd', 'dD'), None),
+    (special.j0, cython_special.j0, ('d',), None),
+    (special.j1, cython_special.j1, ('d',), None),
+    (special.jv, cython_special.jv, ('dd', 'dD'), None),
+    (special.jve, cython_special.jve, ('dd', 'dD'), None),
+    (special.k0, cython_special.k0, ('d',), None),
+    (special.k0e, cython_special.k0e, ('d',), None),
+    (special.k1, cython_special.k1, ('d',), None),
+    (special.k1e, cython_special.k1e, ('d',), None),
+    (special.kei, cython_special.kei, ('d',), None),
+    (special.keip, cython_special.keip, ('d',), None),
+    (special.kelvin, cython_special._kelvin_pywrap, ('d',), None),
+    (special.ker, cython_special.ker, ('d',), None),
+    (special.kerp, cython_special.kerp, ('d',), None),
+    (special.kl_div, cython_special.kl_div, ('dd',), None),
+    (special.kn, cython_special.kn, ('ld', 'dd'), None),
+    (special.kolmogi, cython_special.kolmogi, ('d',), None),
+    (special.kolmogorov, cython_special.kolmogorov, ('d',), None),
+    (special.kv, cython_special.kv, ('dd', 'dD'), None),
+    (special.kve, cython_special.kve, ('dd', 'dD'), None),
+    (special.log1p, cython_special.log1p, ('d', 'D'), None),
+    (special.log_expit, cython_special.log_expit, ('f', 'd', 'g'), None),
+    (special.log_ndtr, cython_special.log_ndtr, ('d', 'D'), None),
+    (special.ndtri_exp, cython_special.ndtri_exp, ('d',), None),
+    (special.loggamma, cython_special.loggamma, ('D',), None),
+    (special.logit, cython_special.logit, ('f', 'd', 'g'), None),
+    (special.lpmv, cython_special.lpmv, ('ddd',), None),
+    (special.mathieu_a, cython_special.mathieu_a, ('dd',), None),
+    (special.mathieu_b, cython_special.mathieu_b, ('dd',), None),
+    (special.mathieu_cem, cython_special._mathieu_cem_pywrap, ('ddd',), None),
+    (special.mathieu_modcem1, cython_special._mathieu_modcem1_pywrap, ('ddd',), None),
+    (special.mathieu_modcem2, cython_special._mathieu_modcem2_pywrap, ('ddd',), None),
+    (special.mathieu_modsem1, cython_special._mathieu_modsem1_pywrap, ('ddd',), None),
+    (special.mathieu_modsem2, cython_special._mathieu_modsem2_pywrap, ('ddd',), None),
+    (special.mathieu_sem, cython_special._mathieu_sem_pywrap, ('ddd',), None),
+    (special.modfresnelm, cython_special._modfresnelm_pywrap, ('d',), None),
+    (special.modfresnelp, cython_special._modfresnelp_pywrap, ('d',), None),
+    (special.modstruve, cython_special.modstruve, ('dd',), None),
+    (special.nbdtr, cython_special.nbdtr, ('lld', 'ddd'), None),
+    (special.nbdtrc, cython_special.nbdtrc, ('lld', 'ddd'), None),
+    (special.nbdtri, cython_special.nbdtri, ('lld', 'ddd'), None),
+    (special.nbdtrik, cython_special.nbdtrik, ('ddd',), None),
+    (special.nbdtrin, cython_special.nbdtrin, ('ddd',), None),
+    (special.ncfdtr, cython_special.ncfdtr, ('dddd',), None),
+    (special.ncfdtri, cython_special.ncfdtri, ('dddd',), None),
+    (special.ncfdtridfd, cython_special.ncfdtridfd, ('dddd',), None),
+    (special.ncfdtridfn, cython_special.ncfdtridfn, ('dddd',), None),
+    (special.ncfdtrinc, cython_special.ncfdtrinc, ('dddd',), None),
+    (special.nctdtr, cython_special.nctdtr, ('ddd',), None),
+    (special.nctdtridf, cython_special.nctdtridf, ('ddd',), None),
+    (special.nctdtrinc, cython_special.nctdtrinc, ('ddd',), None),
+    (special.nctdtrit, cython_special.nctdtrit, ('ddd',), None),
+    (special.ndtr, cython_special.ndtr, ('d', 'D'), None),
+    (special.ndtri, cython_special.ndtri, ('d',), None),
+    (special.nrdtrimn, cython_special.nrdtrimn, ('ddd',), None),
+    (special.nrdtrisd, cython_special.nrdtrisd, ('ddd',), None),
+    (special.obl_ang1, cython_special._obl_ang1_pywrap, ('dddd',), None),
+    (special.obl_ang1_cv, cython_special._obl_ang1_cv_pywrap, ('ddddd',), None),
+    (special.obl_cv, cython_special.obl_cv, ('ddd',), None),
+    (special.obl_rad1, cython_special._obl_rad1_pywrap, ('dddd',), "see gh-6211"),
+    (special.obl_rad1_cv, cython_special._obl_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
+    (special.obl_rad2, cython_special._obl_rad2_pywrap, ('dddd',), "see gh-6211"),
+    (special.obl_rad2_cv, cython_special._obl_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
+    (special.pbdv, cython_special._pbdv_pywrap, ('dd',), None),
+    (special.pbvv, cython_special._pbvv_pywrap, ('dd',), None),
+    (special.pbwa, cython_special._pbwa_pywrap, ('dd',), None),
+    (special.pdtr, cython_special.pdtr, ('dd', 'dd'), None),
+    (special.pdtrc, cython_special.pdtrc, ('dd', 'dd'), None),
+    (special.pdtri, cython_special.pdtri, ('ld', 'dd'), None),
+    (special.pdtrik, cython_special.pdtrik, ('dd',), None),
+    (special.poch, cython_special.poch, ('dd',), None),
+    (special.powm1, cython_special.powm1, ('dd',), None),
+    (special.pro_ang1, cython_special._pro_ang1_pywrap, ('dddd',), None),
+    (special.pro_ang1_cv, cython_special._pro_ang1_cv_pywrap, ('ddddd',), None),
+    (special.pro_cv, cython_special.pro_cv, ('ddd',), None),
+    (special.pro_rad1, cython_special._pro_rad1_pywrap, ('dddd',), "see gh-6211"),
+    (special.pro_rad1_cv, cython_special._pro_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
+    (special.pro_rad2, cython_special._pro_rad2_pywrap, ('dddd',), "see gh-6211"),
+    (special.pro_rad2_cv, cython_special._pro_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
+    (special.pseudo_huber, cython_special.pseudo_huber, ('dd',), None),
+    (special.psi, cython_special.psi, ('d', 'D'), None),
+    (special.radian, cython_special.radian, ('ddd',), None),
+    (special.rel_entr, cython_special.rel_entr, ('dd',), None),
+    (special.rgamma, cython_special.rgamma, ('d', 'D'), None),
+    (special.round, cython_special.round, ('d',), None),
+    (special.spherical_jn, cython_special.spherical_jn, ('ld', 'ldb', 'lD', 'lDb'), None),
+    (special.spherical_yn, cython_special.spherical_yn, ('ld', 'ldb', 'lD', 'lDb'), None),
+    (special.spherical_in, cython_special.spherical_in, ('ld', 'ldb', 'lD', 'lDb'), None),
+    (special.spherical_kn, cython_special.spherical_kn, ('ld', 'ldb', 'lD', 'lDb'), None),
+    (special.shichi, cython_special._shichi_pywrap, ('d', 'D'), None),
+    (special.sici, cython_special._sici_pywrap, ('d', 'D'), None),
+    (special.sindg, cython_special.sindg, ('d',), None),
+    (special.smirnov, cython_special.smirnov, ('ld', 'dd'), None),
+    (special.smirnovi, cython_special.smirnovi, ('ld', 'dd'), None),
+    (special.spence, cython_special.spence, ('d', 'D'), None),
+    (special.sph_harm, cython_special.sph_harm, ('lldd', 'dddd'), None),
+    (special.stdtr, cython_special.stdtr, ('dd',), None),
+    (special.stdtridf, cython_special.stdtridf, ('dd',), None),
+    (special.stdtrit, cython_special.stdtrit, ('dd',), None),
+    (special.struve, cython_special.struve, ('dd',), None),
+    (special.tandg, cython_special.tandg, ('d',), None),
+    (special.tklmbda, cython_special.tklmbda, ('dd',), None),
+    (special.voigt_profile, cython_special.voigt_profile, ('ddd',), None),
+    (special.wofz, cython_special.wofz, ('D',), None),
+    (special.wright_bessel, cython_special.wright_bessel, ('ddd',), None),
+    (special.wrightomega, cython_special.wrightomega, ('D',), None),
+    (special.xlog1py, cython_special.xlog1py, ('dd', 'DD'), None),
+    (special.xlogy, cython_special.xlogy, ('dd', 'DD'), None),
+    (special.y0, cython_special.y0, ('d',), None),
+    (special.y1, cython_special.y1, ('d',), None),
+    (special.yn, cython_special.yn, ('ld', 'dd'), None),
+    (special.yv, cython_special.yv, ('dd', 'dD'), None),
+    (special.yve, cython_special.yve, ('dd', 'dD'), None),
+    (special.zetac, cython_special.zetac, ('d',), None),
+    (special.owens_t, cython_special.owens_t, ('dd',), None)
+]
+
+
+IDS = [x[0].__name__ for x in PARAMS]
+
+
+def _generate_test_points(typecodes):
+    axes = tuple(TEST_POINTS[x] for x in typecodes)
+    pts = list(product(*axes))
+    return pts
+
+
+def test_cython_api_completeness():
+    # Check that everything is tested
+    for name in dir(cython_special):
+        func = getattr(cython_special, name)
+        if callable(func) and not name.startswith('_'):
+            for _, cyfun, _, _ in PARAMS:
+                if cyfun is func:
+                    break
+            else:
+                raise RuntimeError(f"{name} missing from tests!")
+
+
+@pytest.mark.parametrize("param", PARAMS, ids=IDS)
+def test_cython_api(param):
+    pyfunc, cyfunc, specializations, knownfailure = param
+    if knownfailure:
+        pytest.xfail(reason=knownfailure)
+
+    # Check which parameters are expected to be fused types
+    max_params = max(len(spec) for spec in specializations)
+    values = [set() for _ in range(max_params)]
+    for typecodes in specializations:
+        for j, v in enumerate(typecodes):
+            values[j].add(v)
+    seen = set()
+    is_fused_code = [False] * len(values)
+    for j, v in enumerate(values):
+        vv = tuple(sorted(v))
+        if vv in seen:
+            continue
+        is_fused_code[j] = (len(v) > 1)
+        seen.add(vv)
+
+    # Check results
+    for typecodes in specializations:
+        # Pick the correct specialized function
+        signature = [CYTHON_SIGNATURE_MAP[code]
+                     for j, code in enumerate(typecodes)
+                     if is_fused_code[j]]
+
+        if signature:
+            cy_spec_func = cyfunc[tuple(signature)]
+        else:
+            signature = None
+            cy_spec_func = cyfunc
+
+        # Test it
+        pts = _generate_test_points(typecodes)
+        for pt in pts:
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning)
+                pyval = pyfunc(*pt)
+                cyval = cy_spec_func(*pt)
+            assert_allclose(cyval, pyval, err_msg="{} {} {}".format(pt, typecodes, signature))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_data.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_data.py
new file mode 100644
index 00000000..081bbbd8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_data.py
@@ -0,0 +1,617 @@
+import os
+
+import numpy as np
+from numpy.testing import suppress_warnings
+import pytest
+
+from scipy.special import (
+    lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite,
+    eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta,
+    jn, jv, jvp, yn, yv, yvp, iv, ivp, kn, kv, kvp,
+    gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma,
+    beta, betainc, betaincinv, poch,
+    ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc, ellipj,
+    elliprc, elliprd, elliprf, elliprg, elliprj,
+    erf, erfc, erfinv, erfcinv, exp1, expi, expn,
+    bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib,
+    nbdtrik, pdtrik, owens_t,
+    mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1,
+    mathieu_modsem1, mathieu_modcem2, mathieu_modsem2,
+    ellip_harm, ellip_harm_2, spherical_jn, spherical_yn, wright_bessel
+)
+from scipy.integrate import IntegrationWarning
+
+from scipy.special._testutils import FuncData
+
+DATASETS_BOOST = np.load(os.path.join(os.path.dirname(__file__),
+                                      "data", "boost.npz"))
+
+DATASETS_GSL = np.load(os.path.join(os.path.dirname(__file__),
+                                    "data", "gsl.npz"))
+
+DATASETS_LOCAL = np.load(os.path.join(os.path.dirname(__file__),
+                                    "data", "local.npz"))
+
+
+def data(func, dataname, *a, **kw):
+    kw.setdefault('dataname', dataname)
+    return FuncData(func, DATASETS_BOOST[dataname], *a, **kw)
+
+
+def data_gsl(func, dataname, *a, **kw):
+    kw.setdefault('dataname', dataname)
+    return FuncData(func, DATASETS_GSL[dataname], *a, **kw)
+
+
+def data_local(func, dataname, *a, **kw):
+    kw.setdefault('dataname', dataname)
+    return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw)
+
+
+def ellipk_(k):
+    return ellipk(k*k)
+
+
+def ellipkinc_(f, k):
+    return ellipkinc(f, k*k)
+
+
+def ellipe_(k):
+    return ellipe(k*k)
+
+
+def ellipeinc_(f, k):
+    return ellipeinc(f, k*k)
+
+
+def ellipj_(k):
+    return ellipj(k*k)
+
+
+def zeta_(x):
+    return zeta(x, 1.)
+
+
+def assoc_legendre_p_boost_(nu, mu, x):
+    # the boost test data is for integer orders only
+    return lpmv(mu, nu.astype(int), x)
+
+def legendre_p_via_assoc_(nu, x):
+    return lpmv(0, nu, x)
+
+def lpn_(n, x):
+    return lpn(n.astype('l'), x)[0][-1]
+
+def lqn_(n, x):
+    return lqn(n.astype('l'), x)[0][-1]
+
+def legendre_p_via_lpmn(n, x):
+    return lpmn(0, n, x)[0][0,-1]
+
+def legendre_q_via_lqmn(n, x):
+    return lqmn(0, n, x)[0][0,-1]
+
+def mathieu_ce_rad(m, q, x):
+    return mathieu_cem(m, q, x*180/np.pi)[0]
+
+
+def mathieu_se_rad(m, q, x):
+    return mathieu_sem(m, q, x*180/np.pi)[0]
+
+
+def mathieu_mc1_scaled(m, q, x):
+    # GSL follows a different normalization.
+    # We follow Abramowitz & Stegun, they apparently something else.
+    return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2)
+
+
+def mathieu_ms1_scaled(m, q, x):
+    return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2)
+
+
+def mathieu_mc2_scaled(m, q, x):
+    return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2)
+
+
+def mathieu_ms2_scaled(m, q, x):
+    return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2)
+
+def eval_legendre_ld(n, x):
+    return eval_legendre(n.astype('l'), x)
+
+def eval_legendre_dd(n, x):
+    return eval_legendre(n.astype('d'), x)
+
+def eval_hermite_ld(n, x):
+    return eval_hermite(n.astype('l'), x)
+
+def eval_laguerre_ld(n, x):
+    return eval_laguerre(n.astype('l'), x)
+
+def eval_laguerre_dd(n, x):
+    return eval_laguerre(n.astype('d'), x)
+
+def eval_genlaguerre_ldd(n, a, x):
+    return eval_genlaguerre(n.astype('l'), a, x)
+
+def eval_genlaguerre_ddd(n, a, x):
+    return eval_genlaguerre(n.astype('d'), a, x)
+
+def bdtrik_comp(y, n, p):
+    return bdtrik(1-y, n, p)
+
+def btdtri_comp(a, b, p):
+    return btdtri(a, b, 1-p)
+
+def btdtria_comp(p, b, x):
+    return btdtria(1-p, b, x)
+
+def btdtrib_comp(a, p, x):
+    return btdtrib(a, 1-p, x)
+
+def gdtr_(p, x):
+    return gdtr(1.0, p, x)
+
+def gdtrc_(p, x):
+    return gdtrc(1.0, p, x)
+
+def gdtrix_(b, p):
+    return gdtrix(1.0, b, p)
+
+def gdtrix_comp(b, p):
+    return gdtrix(1.0, b, 1-p)
+
+def gdtrib_(p, x):
+    return gdtrib(1.0, p, x)
+
+def gdtrib_comp(p, x):
+    return gdtrib(1.0, 1-p, x)
+
+def nbdtrik_comp(y, n, p):
+    return nbdtrik(1-y, n, p)
+
+def pdtrik_comp(p, m):
+    return pdtrik(1-p, m)
+
+def poch_(z, m):
+    return 1.0 / poch(z, m)
+
+def poch_minus(z, m):
+    return 1.0 / poch(z, -m)
+
+def spherical_jn_(n, x):
+    return spherical_jn(n.astype('l'), x)
+
+def spherical_yn_(n, x):
+    return spherical_yn(n.astype('l'), x)
+
+def sph_harm_(m, n, theta, phi):
+    y = sph_harm(m, n, theta, phi)
+    return (y.real, y.imag)
+
+def cexpm1(x, y):
+    z = expm1(x + 1j*y)
+    return z.real, z.imag
+
+def clog1p(x, y):
+    z = log1p(x + 1j*y)
+    return z.real, z.imag
+
+
+BOOST_TESTS = [
+        data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', (0,1,2), 3, rtol=1e-11),
+
+        data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=1e-11),
+        data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=9.6e-14),
+        data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False),
+        data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=9.6e-14, vectorized=False),
+        data(lpn_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False),
+        data(lpn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=3e-13, vectorized=False),
+        data(eval_legendre_ld, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=6e-14),
+        data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13),
+        data(eval_legendre_dd, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=2e-14),
+        data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13),
+
+        data(lqn_, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False),
+        data(lqn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False),
+        data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False),
+        data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False),
+
+        data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13),
+        data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13),
+        data(beta, 'beta_med_data_ipp-beta_med_data', (0,1), 2, rtol=5e-13),
+
+        data(betainc, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15),
+        data(betainc, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=5e-13),
+        data(betainc, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14),
+        data(betainc, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10),
+
+        data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5),
+
+        data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15),
+        data(btdtr, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=4e-13),
+        data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14),
+        data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10),
+
+        data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5),
+        data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 4, rtol=8e-7),
+
+        data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 3, rtol=5e-9),
+        data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 4, rtol=5e-9),
+
+        data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 5, rtol=5e-9),
+        data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 6, rtol=5e-9),
+
+        data(binom, 'binomial_data_ipp-binomial_data', (0,1), 2, rtol=1e-13),
+        data(binom, 'binomial_large_data_ipp-binomial_large_data', (0,1), 2, rtol=5e-13),
+
+        data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 3, rtol=5e-9),
+        data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 4, rtol=5e-9),
+
+        data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 3, rtol=4e-9),
+        data(nbdtrik_comp, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 4, rtol=4e-9),
+
+        data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 2, rtol=3e-9),
+        data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 3, rtol=4e-9),
+
+        data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0),
+
+        data(digamma, 'digamma_data_ipp-digamma_data', 0, 1),
+        data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1),
+        data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13),
+        data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13),
+        data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15),
+        data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15),
+        data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15),
+        data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14),
+
+        data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1),
+        data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14),
+        data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1),
+        data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14),
+
+        data(erf, 'erf_data_ipp-erf_data', 0, 1),
+        data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13),
+        data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15),
+        data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1),
+        data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1),
+        data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14),
+        data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1),
+        data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13),
+        data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2),
+
+        data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1),
+        data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1),
+        data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data', 0, 1, param_filter=(lambda s: s > 0)),
+
+        data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13),
+        data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9),
+        data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13),
+        data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13),
+        data(expi, 'expinti_data_long_ipp-expinti_data_long', 0, 1),
+
+        data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2),
+        data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14),
+
+        data(gamma, 'test_gamma_data_ipp-near_0', 0, 1),
+        data(gamma, 'test_gamma_data_ipp-near_1', 0, 1),
+        data(gamma, 'test_gamma_data_ipp-near_2', 0, 1),
+        data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1),
+        data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12),
+        data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14),
+        data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9),
+        data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9),
+        data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9),
+        data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9),
+        data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9),
+        data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13),
+        data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11),
+        data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11),
+        data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10),
+        data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11),
+        data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11),
+        data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2),
+
+        data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15),
+        data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
+        data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
+        data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12),
+
+        data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13),
+        data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
+        data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
+        data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9),
+
+        data(gammaincc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13),
+        data(gammaincc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13),
+        data(gammaincc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14),
+        data(gammaincc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11),
+
+        data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13),
+        data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13),
+        data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14),
+        data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11),
+
+        data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9),
+        data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9),
+
+        data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 2, rtol=2e-13),
+        data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 2,),
+        data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 2,),
+        data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 3, rtol=2e-13),
+        data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 3),
+        data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 3),
+
+        data(eval_hermite_ld, 'hermite_ipp-hermite', (0,1), 2, rtol=2e-14),
+
+        data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2', (0,1), 2, rtol=7e-12),
+        data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2', (0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'),
+        data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, rtol=2e-13),
+        data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'),
+
+        data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1),
+        data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2),
+
+        data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1), 2, rtol=1e-12),
+        data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1j), 2, rtol=2e-10, atol=1e-306),
+        data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1), 2, rtol=1e-9),
+        data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1j), 2, rtol=2e-10),
+
+        data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', (0,1), 2, rtol=1.2e-13),
+        data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', (0,1j), 2, rtol=1.2e-13, atol=1e-300),
+
+        data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
+        data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
+        data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11),
+        data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11),
+
+        data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
+        data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
+        data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12),
+        data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12),
+
+        data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', (0,1), 2, rtol=1e-13),
+        data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', (0,1j), 2, rtol=1e-13),
+        data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', (0,1), 2, rtol=1e-11),
+        data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', (0,1j), 2, rtol=1e-11),
+
+        data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
+
+        data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
+        data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12),
+        data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12),
+        data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12),
+
+        data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', (0,1), 2, rtol=3e-14),
+        data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', (0,1j), 2, rtol=3e-14),
+        data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1), 2, rtol=7e-14),
+        data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1j), 2, rtol=7e-14),
+
+        data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12),
+        data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
+
+        data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
+        data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12),
+        data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10),
+        data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10),
+
+        data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', (0, 1), 2, rtol=4e-9),
+        data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', (0, 1j), 2, rtol=4e-9),
+
+        data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, param_filter=(lambda s: s > 1)),
+        data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, param_filter=(lambda s: s > 1)),
+        data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, param_filter=(lambda s: s > 1)),
+        data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, param_filter=(lambda s: s > 1)),
+
+        data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=1e-11),
+        data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=1e-14),
+        data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2, rtol=1e-11),
+
+        data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 3, rtol=1e-12),
+        data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=1e-14),
+        data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3, rtol=1e-14),
+
+        data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'),
+        data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=3e-15),
+        data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2),
+        data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, knownfailure='gdtrix bad some points'),
+        data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=6e-15),
+        data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3),
+
+        data(chndtr, 'nccs_ipp-nccs', (2,0,1), 3, rtol=3e-5),
+        data(chndtr, 'nccs_big_ipp-nccs_big', (2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'),
+
+        data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic', (1,0,3,2), (4,5), rtol=5e-11,
+             param_filter=(lambda p: np.ones(p.shape, '?'),
+                           lambda p: np.ones(p.shape, '?'),
+                           lambda p: np.logical_and(p < 2*np.pi, p >= 0),
+                           lambda p: np.logical_and(p < np.pi, p >= 0))),
+
+        data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data', (0,1), 2, rtol=1e-13),
+        data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data', (0,1), 2, rtol=8e-15),
+
+        data(owens_t, 'owens_t_ipp-owens_t', (0, 1), 2, rtol=5e-14),
+        data(owens_t, 'owens_t_large_data_ipp-owens_t_large_data', (0, 1), 2, rtol=8e-12),
+
+        # -- test data exists in boost but is not used in scipy --
+
+        # ibeta_derivative_data_ipp/ibeta_derivative_data.txt
+        # ibeta_derivative_int_data_ipp/ibeta_derivative_int_data.txt
+        # ibeta_derivative_large_data_ipp/ibeta_derivative_large_data.txt
+        # ibeta_derivative_small_data_ipp/ibeta_derivative_small_data.txt
+
+        # bessel_y01_prime_data_ipp/bessel_y01_prime_data.txt
+        # bessel_yn_prime_data_ipp/bessel_yn_prime_data.txt
+        # sph_bessel_prime_data_ipp/sph_bessel_prime_data.txt
+        # sph_neumann_prime_data_ipp/sph_neumann_prime_data.txt
+
+        # ellint_d2_data_ipp/ellint_d2_data.txt
+        # ellint_d_data_ipp/ellint_d_data.txt
+        # ellint_pi2_data_ipp/ellint_pi2_data.txt
+        # ellint_pi3_data_ipp/ellint_pi3_data.txt
+        # ellint_pi3_large_data_ipp/ellint_pi3_large_data.txt
+        data(elliprc, 'ellint_rc_data_ipp-ellint_rc_data', (0, 1), 2,
+             rtol=5e-16),
+        data(elliprd, 'ellint_rd_data_ipp-ellint_rd_data', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprd, 'ellint_rd_0xy_ipp-ellint_rd_0xy', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprd, 'ellint_rd_0yy_ipp-ellint_rd_0yy', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprd, 'ellint_rd_xxx_ipp-ellint_rd_xxx', (0, 1, 2), 3,
+             rtol=5e-16),
+        # Some of the following rtol for elliprd may be larger than 5e-16 to
+        # work around some hard cases in the Boost test where we get slightly
+        # larger error than the ideal bound when the x (==y) input is close to
+        # zero.
+        # Also the accuracy on 32-bit buids with g++ may suffer from excess
+        # loss of precision; see GCC bugzilla 323
+        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323
+        data(elliprd, 'ellint_rd_xxz_ipp-ellint_rd_xxz', (0, 1, 2), 3,
+             rtol=6.5e-16),
+        data(elliprd, 'ellint_rd_xyy_ipp-ellint_rd_xyy', (0, 1, 2), 3,
+             rtol=6e-16),
+        data(elliprf, 'ellint_rf_data_ipp-ellint_rf_data', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprf, 'ellint_rf_xxx_ipp-ellint_rf_xxx', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprf, 'ellint_rf_xyy_ipp-ellint_rf_xyy', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprf, 'ellint_rf_xy0_ipp-ellint_rf_xy0', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprf, 'ellint_rf_0yy_ipp-ellint_rf_0yy', (0, 1, 2), 3,
+             rtol=5e-16),
+        # The accuracy of R_G is primarily limited by R_D that is used
+        # internally. It is generally worse than R_D. Notice that we increased
+        # the rtol for R_G here. The cases with duplicate arguments are
+        # slightly less likely to be unbalanced (at least two arguments are
+        # already balanced) so the error bound is slightly better. Again,
+        # precision with g++ 32-bit is even worse.
+        data(elliprg, 'ellint_rg_ipp-ellint_rg', (0, 1, 2), 3,
+             rtol=8.0e-16),
+        data(elliprg, 'ellint_rg_xxx_ipp-ellint_rg_xxx', (0, 1, 2), 3,
+             rtol=6e-16),
+        data(elliprg, 'ellint_rg_xyy_ipp-ellint_rg_xyy', (0, 1, 2), 3,
+             rtol=7.5e-16),
+        data(elliprg, 'ellint_rg_xy0_ipp-ellint_rg_xy0', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprg, 'ellint_rg_00x_ipp-ellint_rg_00x', (0, 1, 2), 3,
+             rtol=5e-16),
+        data(elliprj, 'ellint_rj_data_ipp-ellint_rj_data', (0, 1, 2, 3), 4,
+             rtol=5e-16, atol=1e-25,
+             param_filter=(lambda s: s <= 5e-26,)),
+        # ellint_rc_data_ipp/ellint_rc_data.txt
+        # ellint_rd_0xy_ipp/ellint_rd_0xy.txt
+        # ellint_rd_0yy_ipp/ellint_rd_0yy.txt
+        # ellint_rd_data_ipp/ellint_rd_data.txt
+        # ellint_rd_xxx_ipp/ellint_rd_xxx.txt
+        # ellint_rd_xxz_ipp/ellint_rd_xxz.txt
+        # ellint_rd_xyy_ipp/ellint_rd_xyy.txt
+        # ellint_rf_0yy_ipp/ellint_rf_0yy.txt
+        # ellint_rf_data_ipp/ellint_rf_data.txt
+        # ellint_rf_xxx_ipp/ellint_rf_xxx.txt
+        # ellint_rf_xy0_ipp/ellint_rf_xy0.txt
+        # ellint_rf_xyy_ipp/ellint_rf_xyy.txt
+        # ellint_rg_00x_ipp/ellint_rg_00x.txt
+        # ellint_rg_ipp/ellint_rg.txt
+        # ellint_rg_xxx_ipp/ellint_rg_xxx.txt
+        # ellint_rg_xy0_ipp/ellint_rg_xy0.txt
+        # ellint_rg_xyy_ipp/ellint_rg_xyy.txt
+        # ellint_rj_data_ipp/ellint_rj_data.txt
+        # ellint_rj_e2_ipp/ellint_rj_e2.txt
+        # ellint_rj_e3_ipp/ellint_rj_e3.txt
+        # ellint_rj_e4_ipp/ellint_rj_e4.txt
+        # ellint_rj_zp_ipp/ellint_rj_zp.txt
+
+        # jacobi_elliptic_ipp/jacobi_elliptic.txt
+        # jacobi_elliptic_small_ipp/jacobi_elliptic_small.txt
+        # jacobi_large_phi_ipp/jacobi_large_phi.txt
+        # jacobi_near_1_ipp/jacobi_near_1.txt
+        # jacobi_zeta_big_phi_ipp/jacobi_zeta_big_phi.txt
+        # jacobi_zeta_data_ipp/jacobi_zeta_data.txt
+
+        # heuman_lambda_data_ipp/heuman_lambda_data.txt
+
+        # hypergeometric_0F2_ipp/hypergeometric_0F2.txt
+        # hypergeometric_1F1_big_ipp/hypergeometric_1F1_big.txt
+        # hypergeometric_1F1_ipp/hypergeometric_1F1.txt
+        # hypergeometric_1F1_small_random_ipp/hypergeometric_1F1_small_random.txt
+        # hypergeometric_1F2_ipp/hypergeometric_1F2.txt
+        # hypergeometric_1f1_large_regularized_ipp/hypergeometric_1f1_large_regularized.txt
+        # hypergeometric_1f1_log_large_unsolved_ipp/hypergeometric_1f1_log_large_unsolved.txt
+        # hypergeometric_2F0_half_ipp/hypergeometric_2F0_half.txt
+        # hypergeometric_2F0_integer_a2_ipp/hypergeometric_2F0_integer_a2.txt
+        # hypergeometric_2F0_ipp/hypergeometric_2F0.txt
+        # hypergeometric_2F0_large_z_ipp/hypergeometric_2F0_large_z.txt
+        # hypergeometric_2F1_ipp/hypergeometric_2F1.txt
+        # hypergeometric_2F2_ipp/hypergeometric_2F2.txt
+
+        # ncbeta_big_ipp/ncbeta_big.txt
+        # nct_small_delta_ipp/nct_small_delta.txt
+        # nct_asym_ipp/nct_asym.txt
+        # ncbeta_ipp/ncbeta.txt
+
+        # powm1_data_ipp/powm1_big_data.txt
+        # powm1_sqrtp1m1_test_hpp/sqrtp1m1_data.txt
+
+        # sinc_data_ipp/sinc_data.txt
+
+        # test_gamma_data_ipp/gammap1m1_data.txt
+        # tgamma_ratio_data_ipp/tgamma_ratio_data.txt
+
+        # trig_data_ipp/trig_data.txt
+        # trig_data2_ipp/trig_data2.txt
+]
+
+
+@pytest.mark.parametrize('test', BOOST_TESTS, ids=repr)
+def test_boost(test):
+    _test_factory(test)
+
+
+GSL_TESTS = [
+        data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13),
+        data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13),
+
+        # Also the GSL output has limited accuracy...
+        data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13),
+        data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13),
+
+        data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms', (0, 1, 2), 3, rtol=1e-7, atol=1e-13),
+        data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms', (0, 1, 2), 4, rtol=1e-7, atol=1e-13),
+
+        data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms', (0, 1, 2), 5, rtol=1e-7, atol=1e-13),
+        data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms', (0, 1, 2), 6, rtol=1e-7, atol=1e-13),
+]
+
+
+@pytest.mark.parametrize('test', GSL_TESTS, ids=repr)
+def test_gsl(test):
+    _test_factory(test)
+
+
+LOCAL_TESTS = [
+    data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2),
+    data_local(ellipkm1, 'ellipkm1', 0, 1),
+    data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2),
+    data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14),
+    data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14),
+    data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12),
+    data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11),
+    data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13),
+    data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13),
+    data_local(wright_bessel, 'wright_bessel', (0, 1, 2), 3, rtol=1e-11),
+]
+
+
+@pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr)
+def test_local(test):
+    _test_factory(test)
+
+
+def _test_factory(test, dtype=np.double):
+    """Boost test"""
+    with suppress_warnings() as sup:
+        sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected")
+        with np.errstate(all='ignore'):
+            test.check(dtype=dtype)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_dd.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_dd.py
new file mode 100644
index 00000000..45c8c88a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_dd.py
@@ -0,0 +1,46 @@
+# Tests for a few of the "double-double" C functions defined in cephes/dd_*.
+
+import pytest
+from numpy.testing import assert_allclose
+from scipy.special._test_internal import _dd_exp, _dd_log, _dd_expm1
+
+
+# Each tuple in test_data contains:
+#   (dd_func, xhi, xlo, expected_yhi, expected_ylo)
+# The expected values were computed with mpmath, e.g.
+#
+#   import mpmath
+#   mpmath.mp.dps = 100
+#   xhi = 10.0
+#   xlo = 0.0
+#   x = mpmath.mpf(xhi) + mpmath.mpf(xlo)
+#   y = mpmath.log(x)
+#   expected_yhi = float(y)
+#   expected_ylo = float(y - expected_yhi)
+#
+test_data = [
+    (_dd_exp, -0.3333333333333333, -1.850371707708594e-17,
+     0.7165313105737893, -2.0286948382455594e-17),
+    (_dd_exp, 0.0, 0.0, 1.0, 0.0),
+    (_dd_exp, 10.0, 0.0, 22026.465794806718, -1.3780134700517372e-12),
+    (_dd_log, 0.03125, 0.0, -3.4657359027997265, -4.930038229799327e-18),
+    (_dd_log, 10.0, 0.0, 2.302585092994046, -2.1707562233822494e-16),
+    (_dd_expm1, -1.25, 0.0, -0.7134952031398099, -4.7031321153650186e-17),
+    (_dd_expm1, -0.484375, 0.0, -0.3839178722093218, 7.609376052156984e-18),
+    (_dd_expm1, -0.25, 0.0, -0.22119921692859512, -1.0231869534531498e-17),
+    (_dd_expm1, -0.0625, 0.0, -0.06058693718652421, -7.077887227488846e-19),
+    (_dd_expm1, 0.0, 0.0, 0.0, 0.0),
+    (_dd_expm1, 0.0625, 3.5e-18, 0.06449445891785943, 1.4323095758164254e-18),
+    (_dd_expm1, 0.25, 0.0, 0.2840254166877415, -2.133257464457841e-17),
+    (_dd_expm1, 0.498046875, 0.0, 0.645504254608231, -9.198435524984236e-18),
+    (_dd_expm1, 1.25, 0.0, 2.4903429574618414, -4.604261945372796e-17)
+]
+
+
+@pytest.mark.parametrize('dd_func, xhi, xlo, expected_yhi, expected_ylo',
+                         test_data)
+def test_dd(dd_func, xhi, xlo, expected_yhi, expected_ylo):
+    yhi, ylo = dd_func(xhi, xlo)
+    assert yhi == expected_yhi, (f"high double ({yhi}) does not equal the "
+                                 f"expected value {expected_yhi}")
+    assert_allclose(ylo, expected_ylo, rtol=5e-15)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_digamma.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_digamma.py
new file mode 100644
index 00000000..835ed9ea
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_digamma.py
@@ -0,0 +1,42 @@
+import numpy as np
+from numpy import pi, log, sqrt
+from numpy.testing import assert_, assert_equal
+
+from scipy.special._testutils import FuncData
+import scipy.special as sc
+
+# Euler-Mascheroni constant
+euler = 0.57721566490153286
+
+
+def test_consistency():
+    # Make sure the implementation of digamma for real arguments
+    # agrees with the implementation of digamma for complex arguments.
+
+    # It's all poles after -1e16
+    x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)]
+    dataset = np.vstack((x + 0j, sc.digamma(x))).T
+    FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check()
+
+
+def test_special_values():
+    # Test special values from Gauss's digamma theorem. See
+    #
+    # https://en.wikipedia.org/wiki/Digamma_function
+
+    dataset = [(1, -euler),
+               (0.5, -2*log(2) - euler),
+               (1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler),
+               (1/4, -pi/2 - 3*log(2) - euler),
+               (1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler),
+               (1/8, -pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler)]
+
+    dataset = np.asarray(dataset)
+    FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
+
+
+def test_nonfinite():
+    pts = [0.0, -0.0, np.inf]
+    std = [-np.inf, np.inf, np.inf]
+    assert_equal(sc.digamma(pts), std)
+    assert_(all(np.isnan(sc.digamma([-np.inf, -1]))))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ellip_harm.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ellip_harm.py
new file mode 100644
index 00000000..a97c2468
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ellip_harm.py
@@ -0,0 +1,278 @@
+#
+# Tests for the Ellipsoidal Harmonic Function,
+# Distributed under the same license as SciPy itself.
+#
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
+                           assert_, suppress_warnings)
+from scipy.special._testutils import assert_func_equal
+from scipy.special import ellip_harm, ellip_harm_2, ellip_normal
+from scipy.integrate import IntegrationWarning
+from numpy import sqrt, pi
+
+
+def test_ellip_potential():
+    def change_coefficient(lambda1, mu, nu, h2, k2):
+        x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2))
+        y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2)))
+        z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2)))
+        return x, y, z
+
+    def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2):
+        return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu)
+               * ellip_harm(h2, k2, n, p, nu))
+
+    def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2):
+        return (ellip_harm_2(h2, k2, n, p, lambda1)
+                * ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu))
+
+    def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
+        tol = 1e-8
+        sum1 = 0
+        for n in range(20):
+            xsum = 0
+            for p in range(1, 2*n+2):
+                xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2)
+                    * solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) /
+                    (ellip_normal(h2, k2, n, p)*(2*n + 1)))
+            if abs(xsum) < 0.1*tol*abs(sum1):
+                break
+            sum1 += xsum
+        return sum1, xsum
+
+    def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
+        x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2)
+        x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2)
+        res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
+        return 1/res
+
+    pts = [
+        (120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25),
+        (120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20),
+       ]
+
+    with suppress_warnings() as sup:
+        sup.filter(IntegrationWarning, "The occurrence of roundoff error")
+        sup.filter(IntegrationWarning, "The maximum number of subdivisions")
+
+        for p in pts:
+            err_msg = repr(p)
+            exact = potential(*p)
+            result, last_term = summation(*p)
+            assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg)
+            assert_(abs(result - exact) < 10*abs(last_term), err_msg)
+
+
+def test_ellip_norm():
+
+    def G01(h2, k2):
+        return 4*pi
+
+    def G11(h2, k2):
+        return 4*pi*h2*k2/3
+
+    def G12(h2, k2):
+        return 4*pi*h2*(k2 - h2)/3
+
+    def G13(h2, k2):
+        return 4*pi*k2*(k2 - h2)/3
+
+    def G22(h2, k2):
+        res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 +
+        sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2)))
+        return 16*pi/405*res
+
+    def G21(h2, k2):
+        res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2
+        + sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2)))
+        return 16*pi/405*res
+
+    def G23(h2, k2):
+        return 4*pi*h2**2*k2*(k2 - h2)/15
+
+    def G24(h2, k2):
+        return 4*pi*h2*k2**2*(k2 - h2)/15
+
+    def G25(h2, k2):
+        return 4*pi*h2*k2*(k2 - h2)**2/15
+
+    def G32(h2, k2):
+        res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+        + sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) +
+        11*h2*k2*(h2 + k2)))
+        return 16*pi/13125*k2*h2*res
+
+    def G31(h2, k2):
+        res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+        + sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) -
+        11*h2*k2*(h2 + k2)))
+        return 16*pi/13125*h2*k2*res
+
+    def G34(h2, k2):
+        res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+        + sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 +
+                                            13*h2*k2**2))
+        return 16*pi/13125*h2*(k2 - h2)*res
+
+    def G33(h2, k2):
+        res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+        + sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 -
+        13*h2*k2**2))
+        return 16*pi/13125*h2*(k2 - h2)*res
+
+    def G36(h2, k2):
+        res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+        + sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 +
+        9*h2*k2**2))
+        return 16*pi/13125*k2*(k2 - h2)*res
+
+    def G35(h2, k2):
+        res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+        + sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 -
+        9*h2*k2**2))
+        return 16*pi/13125*k2*(k2 - h2)*res
+
+    def G37(h2, k2):
+        return 4*pi*h2**2*k2**2*(k2 - h2)**2/105
+
+    known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13,
+                   (2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24,
+                   (2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33,
+                   (3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37}
+
+    def _ellip_norm(n, p, h2, k2):
+        func = known_funcs[n, p]
+        return func(h2, k2)
+    _ellip_norm = np.vectorize(_ellip_norm)
+
+    def ellip_normal_known(h2, k2, n, p):
+        return _ellip_norm(n, p, h2, k2)
+
+    # generate both large and small h2 < k2 pairs
+    np.random.seed(1234)
+    h2 = np.random.pareto(0.5, size=1)
+    k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size))
+
+    points = []
+    for n in range(4):
+        for p in range(1, 2*n+2):
+            points.append((h2, k2, np.full(h2.size, n), np.full(h2.size, p)))
+    points = np.array(points)
+    with suppress_warnings() as sup:
+        sup.filter(IntegrationWarning, "The occurrence of roundoff error")
+        assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12)
+
+
+def test_ellip_harm_2():
+
+    def I1(h2, k2, s):
+        res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s))
+        + ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) +
+        ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s)))
+        return res
+
+    with suppress_warnings() as sup:
+        sup.filter(IntegrationWarning, "The occurrence of roundoff error")
+        assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8))))
+
+        # Values produced by code from arXiv:1204.0267
+        assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382)
+        assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809)
+        assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743)
+        assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306)
+        assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454)
+
+
+def test_ellip_harm():
+
+    def E01(h2, k2, s):
+        return 1
+
+    def E11(h2, k2, s):
+        return s
+
+    def E12(h2, k2, s):
+        return sqrt(abs(s*s - h2))
+
+    def E13(h2, k2, s):
+        return sqrt(abs(s*s - k2))
+
+    def E21(h2, k2, s):
+        return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
+
+    def E22(h2, k2, s):
+        return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
+
+    def E23(h2, k2, s):
+        return s * sqrt(abs(s*s - h2))
+
+    def E24(h2, k2, s):
+        return s * sqrt(abs(s*s - k2))
+
+    def E25(h2, k2, s):
+        return sqrt(abs((s*s - h2)*(s*s - k2)))
+
+    def E31(h2, k2, s):
+        return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) -
+        15*h2*k2))
+
+    def E32(h2, k2, s):
+        return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) -
+        15*h2*k2))
+
+    def E33(h2, k2, s):
+        return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 +
+        2*k2)*(h2 + 2*k2) - 5*h2*k2))))
+
+    def E34(h2, k2, s):
+        return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 +
+        2*k2)*(h2 + 2*k2) - 5*h2*k2))))
+
+    def E35(h2, k2, s):
+        return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2
+        + k2)*(2*h2 + k2) - 5*h2*k2))))
+
+    def E36(h2, k2, s):
+        return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2
+        + k2)*(2*h2 + k2) - 5*h2*k2))))
+
+    def E37(h2, k2, s):
+        return s * sqrt(abs((s*s - h2)*(s*s - k2)))
+
+    assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1),
+    ellip_harm(5, 8, 1, 2, 2.5))
+
+    known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13,
+                   (2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24,
+                   (2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33,
+                   (3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37}
+
+    point_ref = []
+
+    def ellip_harm_known(h2, k2, n, p, s):
+        for i in range(h2.size):
+            func = known_funcs[(int(n[i]), int(p[i]))]
+            point_ref.append(func(h2[i], k2[i], s[i]))
+        return point_ref
+
+    np.random.seed(1234)
+    h2 = np.random.pareto(0.5, size=30)
+    k2 = h2*(1 + np.random.pareto(0.5, size=h2.size))
+    s = np.random.pareto(0.5, size=h2.size)
+    points = []
+    for i in range(h2.size):
+        for n in range(4):
+            for p in range(1, 2*n+2):
+                points.append((h2[i], k2[i], n, p, s[i]))
+    points = np.array(points)
+    assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12)
+
+
+def test_ellip_harm_invalid_p():
+    # Regression test. This should return nan.
+    n = 4
+    # Make p > 2*n + 1.
+    p = 2*n + 2
+    result = ellip_harm(0.5, 2.0, n, p, 0.2)
+    assert np.isnan(result)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_erfinv.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_erfinv.py
new file mode 100644
index 00000000..98739b93
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_erfinv.py
@@ -0,0 +1,89 @@
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal
+import pytest
+
+import scipy.special as sc
+
+
+class TestInverseErrorFunction:
+    def test_compliment(self):
+        # Test erfcinv(1 - x) == erfinv(x)
+        x = np.linspace(-1, 1, 101)
+        assert_allclose(sc.erfcinv(1 - x), sc.erfinv(x), rtol=0, atol=1e-15)
+
+    def test_literal_values(self):
+        # The expected values were calculated with mpmath:
+        #
+        #   import mpmath
+        #   mpmath.mp.dps = 200
+        #   for y in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
+        #       x = mpmath.erfinv(y)
+        #       print(x)
+        #
+        y = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
+        actual = sc.erfinv(y)
+        expected = [
+            0.0,
+            0.08885599049425769,
+            0.1791434546212917,
+            0.2724627147267543,
+            0.37080715859355795,
+            0.4769362762044699,
+            0.5951160814499948,
+            0.7328690779592167,
+            0.9061938024368233,
+            1.1630871536766743,
+        ]
+        assert_allclose(actual, expected, rtol=0, atol=1e-15)
+
+    @pytest.mark.parametrize(
+        'f, x, y',
+        [
+            (sc.erfinv, -1, -np.inf),
+            (sc.erfinv, 0, 0),
+            (sc.erfinv, 1, np.inf),
+            (sc.erfinv, -100, np.nan),
+            (sc.erfinv, 100, np.nan),
+            (sc.erfcinv, 0, np.inf),
+            (sc.erfcinv, 1, -0.0),
+            (sc.erfcinv, 2, -np.inf),
+            (sc.erfcinv, -100, np.nan),
+            (sc.erfcinv, 100, np.nan),
+        ],
+        ids=[
+            'erfinv at lower bound',
+            'erfinv at midpoint',
+            'erfinv at upper bound',
+            'erfinv below lower bound',
+            'erfinv above upper bound',
+            'erfcinv at lower bound',
+            'erfcinv at midpoint',
+            'erfcinv at upper bound',
+            'erfcinv below lower bound',
+            'erfcinv above upper bound',
+        ]
+    )
+    def test_domain_bounds(self, f, x, y):
+        assert_equal(f(x), y)
+
+    def test_erfinv_asympt(self):
+        # regression test for gh-12758: erfinv(x) loses precision at small x
+        # expected values precomputed with mpmath:
+        # >>> mpmath.mp.dps = 100
+        # >>> expected = [float(mpmath.erfinv(t)) for t in x]
+        x = np.array([1e-20, 1e-15, 1e-14, 1e-10, 1e-8, 0.9e-7, 1.1e-7, 1e-6])
+        expected = np.array([8.86226925452758e-21,
+                             8.862269254527581e-16,
+                             8.86226925452758e-15,
+                             8.862269254527581e-11,
+                             8.86226925452758e-09,
+                             7.97604232907484e-08,
+                             9.74849617998037e-08,
+                             8.8622692545299e-07])
+        assert_allclose(sc.erfinv(x), expected,
+                        rtol=1e-15)
+
+        # also test the roundtrip consistency
+        assert_allclose(sc.erf(sc.erfinv(x)),
+                        x,
+                        rtol=5e-15)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_exponential_integrals.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_exponential_integrals.py
new file mode 100644
index 00000000..9354c22b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_exponential_integrals.py
@@ -0,0 +1,75 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_allclose
+import scipy.special as sc
+
+
+class TestExp1:
+
+    def test_branch_cut(self):
+        assert np.isnan(sc.exp1(-1))
+        assert sc.exp1(complex(-1, 0)).imag == (
+            -sc.exp1(complex(-1, -0.0)).imag
+        )
+
+        assert_allclose(
+            sc.exp1(complex(-1, 0)),
+            sc.exp1(-1 + 1e-20j),
+            atol=0,
+            rtol=1e-15
+        )
+        assert_allclose(
+            sc.exp1(complex(-1, -0.0)),
+            sc.exp1(-1 - 1e-20j),
+            atol=0,
+            rtol=1e-15
+        )
+
+    def test_834(self):
+        # Regression test for #834
+        a = sc.exp1(-complex(19.9999990))
+        b = sc.exp1(-complex(19.9999991))
+        assert_allclose(a.imag, b.imag, atol=0, rtol=1e-15)
+
+
+class TestExpi:
+
+    @pytest.mark.parametrize('result', [
+        sc.expi(complex(-1, 0)),
+        sc.expi(complex(-1, -0.0)),
+        sc.expi(-1)
+    ])
+    def test_branch_cut(self, result):
+        desired = -0.21938393439552027368  # Computed using Mpmath
+        assert_allclose(result, desired, atol=0, rtol=1e-14)
+
+    def test_near_branch_cut(self):
+        lim_from_above = sc.expi(-1 + 1e-20j)
+        lim_from_below = sc.expi(-1 - 1e-20j)
+        assert_allclose(
+            lim_from_above.real,
+            lim_from_below.real,
+            atol=0,
+            rtol=1e-15
+        )
+        assert_allclose(
+            lim_from_above.imag,
+            -lim_from_below.imag,
+            atol=0,
+            rtol=1e-15
+        )
+
+    def test_continuity_on_positive_real_axis(self):
+        assert_allclose(
+            sc.expi(complex(1, 0)),
+            sc.expi(complex(1, -0.0)),
+            atol=0,
+            rtol=1e-15
+        )
+
+
+class TestExpn:
+
+    def test_out_of_domain(self):
+        assert all(np.isnan([sc.expn(-1, 1.0), sc.expn(1, -1.0)]))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_faddeeva.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_faddeeva.py
new file mode 100644
index 00000000..8868f66c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_faddeeva.py
@@ -0,0 +1,85 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_allclose
+import scipy.special as sc
+from scipy.special._testutils import FuncData
+
+
+class TestVoigtProfile:
+
+    @pytest.mark.parametrize('x, sigma, gamma', [
+        (np.nan, 1, 1),
+        (0, np.nan, 1),
+        (0, 1, np.nan),
+        (1, np.nan, 0),
+        (np.nan, 1, 0),
+        (1, 0, np.nan),
+        (np.nan, 0, 1),
+        (np.nan, 0, 0)
+    ])
+    def test_nan(self, x, sigma, gamma):
+        assert np.isnan(sc.voigt_profile(x, sigma, gamma))
+
+    @pytest.mark.parametrize('x, desired', [
+        (-np.inf, 0),
+        (np.inf, 0)
+    ])
+    def test_inf(self, x, desired):
+        assert sc.voigt_profile(x, 1, 1) == desired
+
+    def test_against_mathematica(self):
+        # Results obtained from Mathematica by computing
+        #
+        # PDF[VoigtDistribution[gamma, sigma], x]
+        #
+        points = np.array([
+            [-7.89, 45.06, 6.66, 0.0077921073660388806401],
+            [-0.05, 7.98, 24.13, 0.012068223646769913478],
+            [-13.98, 16.83, 42.37, 0.0062442236362132357833],
+            [-12.66, 0.21, 6.32, 0.010052516161087379402],
+            [11.34, 4.25, 21.96, 0.0113698923627278917805],
+            [-11.56, 20.40, 30.53, 0.0076332760432097464987],
+            [-9.17, 25.61, 8.32, 0.011646345779083005429],
+            [16.59, 18.05, 2.50, 0.013637768837526809181],
+            [9.11, 2.12, 39.33, 0.0076644040807277677585],
+            [-43.33, 0.30, 45.68, 0.0036680463875330150996]
+        ])
+        FuncData(
+            sc.voigt_profile,
+            points,
+            (0, 1, 2),
+            3,
+            atol=0,
+            rtol=1e-15
+        ).check()
+
+    def test_symmetry(self):
+        x = np.linspace(0, 10, 20)
+        assert_allclose(
+            sc.voigt_profile(x, 1, 1),
+            sc.voigt_profile(-x, 1, 1),
+            rtol=1e-15,
+            atol=0
+        )
+
+    @pytest.mark.parametrize('x, sigma, gamma, desired', [
+        (0, 0, 0, np.inf),
+        (1, 0, 0, 0)
+    ])
+    def test_corner_cases(self, x, sigma, gamma, desired):
+        assert sc.voigt_profile(x, sigma, gamma) == desired
+
+    @pytest.mark.parametrize('sigma1, gamma1, sigma2, gamma2', [
+        (0, 1, 1e-16, 1),
+        (1, 0, 1, 1e-16),
+        (0, 0, 1e-16, 1e-16)
+    ])
+    def test_continuity(self, sigma1, gamma1, sigma2, gamma2):
+        x = np.linspace(1, 10, 20)
+        assert_allclose(
+            sc.voigt_profile(x, sigma1, gamma1),
+            sc.voigt_profile(x, sigma2, gamma2),
+            rtol=1e-16,
+            atol=1e-16
+        )
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_gamma.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_gamma.py
new file mode 100644
index 00000000..2e3fbd17
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_gamma.py
@@ -0,0 +1,12 @@
+import numpy as np
+import scipy.special as sc
+
+
+class TestRgamma:
+
+    def test_gh_11315(self):
+        assert sc.rgamma(-35) == 0
+
+    def test_rgamma_zeros(self):
+        x = np.array([0, -10, -100, -1000, -10000])
+        assert np.all(sc.rgamma(x) == 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_gammainc.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_gammainc.py
new file mode 100644
index 00000000..aae34e5c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_gammainc.py
@@ -0,0 +1,136 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_array_equal
+
+import scipy.special as sc
+from scipy.special._testutils import FuncData
+
+
+INVALID_POINTS = [
+    (1, -1),
+    (0, 0),
+    (-1, 1),
+    (np.nan, 1),
+    (1, np.nan)
+]
+
+
+class TestGammainc:
+
+    @pytest.mark.parametrize('a, x', INVALID_POINTS)
+    def test_domain(self, a, x):
+        assert np.isnan(sc.gammainc(a, x))
+
+    def test_a_eq_0_x_gt_0(self):
+        assert sc.gammainc(0, 1) == 1
+
+    @pytest.mark.parametrize('a, x, desired', [
+        (np.inf, 1, 0),
+        (np.inf, 0, 0),
+        (np.inf, np.inf, np.nan),
+        (1, np.inf, 1)
+    ])
+    def test_infinite_arguments(self, a, x, desired):
+        result = sc.gammainc(a, x)
+        if np.isnan(desired):
+            assert np.isnan(result)
+        else:
+            assert result == desired
+
+    def test_infinite_limits(self):
+        # Test that large arguments converge to the hard-coded limits
+        # at infinity.
+        assert_allclose(
+            sc.gammainc(1000, 100),
+            sc.gammainc(np.inf, 100),
+            atol=1e-200,  # Use `atol` since the function converges to 0.
+            rtol=0
+        )
+        assert sc.gammainc(100, 1000) == sc.gammainc(100, np.inf)
+
+    def test_x_zero(self):
+        a = np.arange(1, 10)
+        assert_array_equal(sc.gammainc(a, 0), 0)
+
+    def test_limit_check(self):
+        result = sc.gammainc(1e-10, 1)
+        limit = sc.gammainc(0, 1)
+        assert np.isclose(result, limit)
+
+    def gammainc_line(self, x):
+        # The line a = x where a simpler asymptotic expansion (analog
+        # of DLMF 8.12.15) is available.
+        c = np.array([-1/3, -1/540, 25/6048, 101/155520,
+                      -3184811/3695155200, -2745493/8151736420])
+        res = 0
+        xfac = 1
+        for ck in c:
+            res -= ck*xfac
+            xfac /= x
+        res /= np.sqrt(2*np.pi*x)
+        res += 0.5
+        return res
+
+    def test_line(self):
+        x = np.logspace(np.log10(25), 300, 500)
+        a = x
+        dataset = np.vstack((a, x, self.gammainc_line(x))).T
+        FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
+
+    def test_roundtrip(self):
+        a = np.logspace(-5, 10, 100)
+        x = np.logspace(-5, 10, 100)
+
+        y = sc.gammaincinv(a, sc.gammainc(a, x))
+        assert_allclose(x, y, rtol=1e-10)
+
+
+class TestGammaincc:
+
+    @pytest.mark.parametrize('a, x', INVALID_POINTS)
+    def test_domain(self, a, x):
+        assert np.isnan(sc.gammaincc(a, x))
+
+    def test_a_eq_0_x_gt_0(self):
+        assert sc.gammaincc(0, 1) == 0
+
+    @pytest.mark.parametrize('a, x, desired', [
+        (np.inf, 1, 1),
+        (np.inf, 0, 1),
+        (np.inf, np.inf, np.nan),
+        (1, np.inf, 0)
+    ])
+    def test_infinite_arguments(self, a, x, desired):
+        result = sc.gammaincc(a, x)
+        if np.isnan(desired):
+            assert np.isnan(result)
+        else:
+            assert result == desired
+
+    def test_infinite_limits(self):
+        # Test that large arguments converge to the hard-coded limits
+        # at infinity.
+        assert sc.gammaincc(1000, 100) == sc.gammaincc(np.inf, 100)
+        assert_allclose(
+            sc.gammaincc(100, 1000),
+            sc.gammaincc(100, np.inf),
+            atol=1e-200,  # Use `atol` since the function converges to 0.
+            rtol=0
+        )
+
+    def test_limit_check(self):
+        result = sc.gammaincc(1e-10,1)
+        limit = sc.gammaincc(0,1)
+        assert np.isclose(result, limit)
+
+    def test_x_zero(self):
+        a = np.arange(1, 10)
+        assert_array_equal(sc.gammaincc(a, 0), 1)
+
+    def test_roundtrip(self):
+        a = np.logspace(-5, 10, 100)
+        x = np.logspace(-5, 10, 100)
+
+        y = sc.gammainccinv(a, sc.gammaincc(a, x))
+        assert_allclose(x, y, rtol=1e-14)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_hyp2f1.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_hyp2f1.py
new file mode 100644
index 00000000..b3f6e099
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_hyp2f1.py
@@ -0,0 +1,2180 @@
+"""Tests for hyp2f1 for complex values.
+
+Author: Albert Steppi, with credit to Adam Kullberg (FormerPhycisist) for
+the implementation of mp_hyp2f1 below, which modifies mpmath's hyp2f1 to
+return the same branch as scipy's on the standard branch cut.
+"""
+
+import sys
+import pytest
+import numpy as np
+from typing import NamedTuple
+from numpy.testing import assert_allclose
+
+from scipy.special import hyp2f1
+from scipy.special._testutils import check_version, MissingModule
+
+
+try:
+    import mpmath
+except ImportError:
+    mpmath = MissingModule("mpmath")
+
+
+def mp_hyp2f1(a, b, c, z):
+    """Return mpmath hyp2f1 calculated on same branch as scipy hyp2f1.
+
+    For most values of a,b,c mpmath returns the x - 0j branch of hyp2f1 on the
+    branch cut x=(1,inf) whereas scipy's hyp2f1 calculates the x + 0j branch.
+    Thus, to generate the right comparison values on the branch cut, we
+    evaluate mpmath.hyp2f1 at x + 1e-15*j.
+
+    The exception to this occurs when c-a=-m in which case both mpmath and
+    scipy calculate the x + 0j branch on the branch cut. When this happens
+    mpmath.hyp2f1 will be evaluated at the original z point.
+    """
+    on_branch_cut = z.real > 1.0 and abs(z.imag) < 1.0e-15
+    cond1 = abs(c - a - round(c - a)) < 1.0e-15 and round(c - a) <= 0
+    cond2 = abs(c - b - round(c - b)) < 1.0e-15 and round(c - b) <= 0
+    # Make sure imaginary part is *exactly* zero
+    if on_branch_cut:
+        z = z.real + 0.0j
+    if on_branch_cut and not (cond1 or cond2):
+        z_mpmath = z.real + 1.0e-15j
+    else:
+        z_mpmath = z
+    return complex(mpmath.hyp2f1(a, b, c, z_mpmath))
+
+
+class Hyp2f1TestCase(NamedTuple):
+    a: float
+    b: float
+    c: float
+    z: complex
+    expected: complex
+    rtol: float
+
+
+class TestHyp2f1:
+    """Tests for hyp2f1 for complex values.
+
+    Expected values for test cases were computed using mpmath. See
+    `scipy.special._precompute.hyp2f1_data`. The verbose style of specifying
+    test cases is used for readability and to make it easier to mark individual
+    cases as expected to fail. Expected failures are used to highlight cases
+    where improvements are needed. See
+    `scipy.special._precompute.hyp2f1_data.make_hyp2f1_test_cases` for a
+    function to generate the boilerplate for the test cases.
+
+    Assertions have been added to each test to ensure that the test cases match
+    the situations that are intended. A final test `test_test_hyp2f1` checks
+    that the expected values in the test cases actually match what is computed
+    by mpmath. This test is marked slow even though it isn't particularly slow
+    so that it won't run by default on continuous integration builds.
+    """
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0.2,
+                    c=-10,
+                    z=0.2 + 0.2j,
+                    expected=np.inf + 0j,
+                    rtol=0
+                )
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0.2,
+                    c=-10,
+                    z=0 + 0j,
+                    expected=1 + 0j,
+                    rtol=0
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0,
+                    c=-10,
+                    z=0.2 + 0.2j,
+                    expected=1 + 0j,
+                    rtol=0
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0,
+                    c=0,
+                    z=0.2 + 0.2j,
+                    expected=1 + 0j,
+                    rtol=0,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0.2,
+                    c=0,
+                    z=0.2 + 0.2j,
+                    expected=np.inf + 0j,
+                    rtol=0,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0.2,
+                    c=0,
+                    z=0 + 0j,
+                    expected=np.nan + 0j,
+                    rtol=0,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=-5,
+                    c=-10,
+                    z=0.2 + 0.2j,
+                    expected=(1.0495404166666666+0.05708208333333334j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=-10,
+                    c=-10,
+                    z=0.2 + 0.2j,
+                    expected=(1.092966013125+0.13455014673750001j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-10,
+                    b=-20,
+                    c=-10,
+                    z=0.2 + 0.2j,
+                    expected=(-0.07712512000000005+0.12752814080000005j),
+                    rtol=1e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1,
+                    b=3.2,
+                    c=-1,
+                    z=0.2 + 0.2j,
+                    expected=(1.6400000000000001+0.6400000000000001j),
+                    rtol=1e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-2,
+                    b=1.2,
+                    c=-4,
+                    z=1 + 0j,
+                    expected=1.8200000000000001 + 0j,
+                    rtol=1e-15,
+                ),
+            ),
+        ]
+    )
+    def test_c_non_positive_int(self, hyp2f1_test_case):
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0.2,
+                    c=1.5,
+                    z=1 + 0j,
+                    expected=1.1496439092239847 + 0j,
+                    rtol=1e-15
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=12.3,
+                    b=8.0,
+                    c=20.31,
+                    z=1 + 0j,
+                    expected=69280986.75273195 + 0j,
+                    rtol=1e-15
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=290.2,
+                    b=321.5,
+                    c=700.1,
+                    z=1 + 0j,
+                    expected=1.3396562400934e117 + 0j,
+                    rtol=1e-12,
+                ),
+            ),
+            # Note that here even mpmath produces different results for
+            # results that should be equivalent.
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=9.2,
+                    b=621.5,
+                    c=700.1,
+                    z=(1+0j),
+                    expected=(952726652.4158565+0j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=621.5,
+                    b=9.2,
+                    c=700.1,
+                    z=(1+0j),
+                    expected=(952726652.4160284+0j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-101.2,
+                    b=-400.4,
+                    c=-172.1,
+                    z=(1+0j),
+                    expected=(2.2253618341394838e+37+0j),
+                    rtol=1e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-400.4,
+                    b=-101.2,
+                    c=-172.1,
+                    z=(1+0j),
+                    expected=(2.2253618341394838e+37+0j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=172.5,
+                    b=-201.3,
+                    c=151.2,
+                    z=(1+0j),
+                    expected=(7.072266653650905e-135+0j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-201.3,
+                    b=172.5,
+                    c=151.2,
+                    z=(1+0j),
+                    expected=(7.072266653650905e-135+0j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-102.1,
+                    b=-20.3,
+                    c=1.3,
+                    z=1 + 0j,
+                    expected=2.7899070752746906e22 + 0j,
+                    rtol=3e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-202.6,
+                    b=60.3,
+                    c=1.5,
+                    z=1 + 0j,
+                    expected=-1.3113641413099326e-56 + 0j,
+                    rtol=1e-12,
+                ),
+            ),
+        ],
+    )
+    def test_unital_argument(self, hyp2f1_test_case):
+        """Tests for case z = 1, c - a - b > 0.
+
+        Expected answers computed using mpmath.
+        """
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert z == 1 and c - a - b > 0  # Tests the test
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=0.5,
+                    b=0.2,
+                    c=1.3,
+                    z=-1 + 0j,
+                    expected=0.9428846409614143 + 0j,
+                    rtol=1e-15),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=12.3,
+                    b=8.0,
+                    c=5.300000000000001,
+                    z=-1 + 0j,
+                    expected=-4.845809986595704e-06 + 0j,
+                    rtol=1e-15
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=221.5,
+                    b=90.2,
+                    c=132.3,
+                    z=-1 + 0j,
+                    expected=2.0490488728377282e-42 + 0j,
+                    rtol=1e-7,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-102.1,
+                    b=-20.3,
+                    c=-80.8,
+                    z=-1 + 0j,
+                    expected=45143784.46783885 + 0j,
+                    rtol=1e-7,
+                ),
+                marks=pytest.mark.xfail(
+                    condition=sys.maxsize < 2**32,
+                    reason="Fails on 32 bit.",
+                )
+            ),
+        ],
+    )
+    def test_special_case_z_near_minus_1(self, hyp2f1_test_case):
+        """Tests for case z ~ -1, c ~ 1 + a - b
+
+        Expected answers computed using mpmath.
+        """
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert abs(1 + a - b - c) < 1e-15 and abs(z + 1) < 1e-15
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-4,
+                    b=2.02764642551431,
+                    c=1.0561196186065624,
+                    z=(0.9473684210526314-0.10526315789473695j),
+                    expected=(0.0031961077109535375-0.0011313924606557173j),
+                    rtol=1e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-8,
+                    b=-7.937789122896016,
+                    c=-15.964218273004214,
+                    z=(2-0.10526315789473695j),
+                    expected=(0.005543763196412503-0.0025948879065698306j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-8,
+                    b=8.095813935368371,
+                    c=4.0013768449590685,
+                    z=(0.9473684210526314-0.10526315789473695j),
+                    expected=(-0.0003054674127221263-9.261359291755414e-05j),
+                    rtol=1e-10,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-4,
+                    b=-3.956227226099288,
+                    c=-3.9316537064827854,
+                    z=(1.1578947368421053-0.3157894736842106j),
+                    expected=(-0.0020809502580892937-0.0041877333232365095j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=-4,
+                    c=2.050308316530781,
+                    z=(0.9473684210526314-0.10526315789473695j),
+                    expected=(0.0011282435590058734+0.0002027062303465851j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=-8,
+                    c=-15.964218273004214,
+                    z=(1.3684210526315788+0.10526315789473673j),
+                    expected=(-9.134907719238265e-05-0.00040219233987390723j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=-4,
+                    c=4.0013768449590685,
+                    z=(0.9473684210526314-0.10526315789473695j),
+                    expected=(-0.000519013062087489-0.0005855883076830948j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-10000,
+                    b=2.2,
+                    c=93459345.3,
+                    z=(2+2j),
+                    expected=(0.9995292071559088-0.00047047067522659253j),
+                    rtol=1e-12,
+                ),
+            ),
+        ]
+    )
+    def test_a_b_negative_int(self, hyp2f1_test_case):
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert a == int(a) and a < 0 or b == int(b) and b < 0  # Tests the test
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.5,
+                    b=-0.9629749245209605,
+                    c=-15.5,
+                    z=(1.1578947368421053-1.1578947368421053j),
+                    expected=(0.9778506962676361+0.044083801141231616j),
+                    rtol=1e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.5,
+                    b=-3.9316537064827854,
+                    c=1.5,
+                    z=(0.9473684210526314-0.10526315789473695j),
+                    expected=(4.0793167523167675-10.11694246310966j),
+                    rtol=6e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.5,
+                    b=-0.9629749245209605,
+                    c=2.5,
+                    z=(1.1578947368421053-0.10526315789473695j),
+                    expected=(-2.9692999501916915+0.6394599899845594j),
+                    rtol=1e-11,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.5,
+                    b=-0.9629749245209605,
+                    c=-15.5,
+                    z=(1.5789473684210522-1.1578947368421053j),
+                    expected=(0.9493076367106102-0.04316852977183447j),
+                    rtol=1e-11,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-0.5,
+                    c=-15.5,
+                    z=(0.5263157894736841+0.10526315789473673j),
+                    expected=(0.9844377175631795-0.003120587561483841j),
+                    rtol=1e-10,
+                ),
+            ),
+        ],
+    )
+    def test_a_b_neg_int_after_euler_hypergeometric_transformation(
+        self, hyp2f1_test_case
+    ):
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert (  # Tests the test
+            (abs(c - a - int(c - a)) < 1e-15 and c - a < 0) or
+            (abs(c - b - int(c - b)) < 1e-15 and c - b < 0)
+        )
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-0.9629749245209605,
+                    c=-15.963511401609862,
+                    z=(0.10526315789473673-0.3157894736842106j),
+                    expected=(0.9941449585778349+0.01756335047931358j),
+                    rtol=1e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=-0.9629749245209605,
+                    c=-15.963511401609862,
+                    z=(0.5263157894736841+0.5263157894736841j),
+                    expected=(1.0388722293372104-0.09549450380041416j),
+                    rtol=5e-11,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=1.0561196186065624,
+                    c=-7.93846038215665,
+                    z=(0.10526315789473673+0.7368421052631575j),
+                    expected=(2.1948378809826434+24.934157235172222j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=16.088264119063613,
+                    c=8.031683612216888,
+                    z=(0.3157894736842106-0.736842105263158j),
+                    expected=(-0.4075277891264672-0.06819344579666956j),
+                    rtol=2e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=2.050308316530781,
+                    c=8.031683612216888,
+                    z=(0.7368421052631575-0.10526315789473695j),
+                    expected=(2.833535530740603-0.6925373701408158j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=2.050308316530781,
+                    c=4.078873014294075,
+                    z=(0.10526315789473673-0.3157894736842106j),
+                    expected=(1.005347176329683-0.3580736009337313j),
+                    rtol=5e-16,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-0.9629749245209605,
+                    c=-15.963511401609862,
+                    z=(0.3157894736842106-0.5263157894736843j),
+                    expected=(0.9824353641135369+0.029271018868990268j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-0.9629749245209605,
+                    c=-159.63511401609862,
+                    z=(0.3157894736842106-0.5263157894736843j),
+                    expected=(0.9982436200365834+0.002927268199671111j),
+                    rtol=1e-7,
+                ),
+                marks=pytest.mark.xfail(reason="Poor convergence.")
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=16.088264119063613,
+                    c=8.031683612216888,
+                    z=(0.5263157894736841-0.5263157894736843j),
+                    expected=(-0.6906825165778091+0.8176575137504892j),
+                    rtol=5e-13,
+                ),
+            ),
+        ]
+    )
+    def test_region1(self, hyp2f1_test_case):
+        """|z| < 0.9 and real(z) >= 0."""
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert abs(z) < 0.9 and z.real >= 0  # Tests the test
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=1.0561196186065624,
+                    c=4.078873014294075,
+                    z=(-0.3157894736842106+0.7368421052631575j),
+                    expected=(0.7751915029081136+0.24068493258607315j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=16.088264119063613,
+                    c=2.0397202577726152,
+                    z=(-0.9473684210526316-0.3157894736842106j),
+                    expected=(6.564549348474962e-07+1.6761570598334562e-06j),
+                    rtol=5e-09,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=2.050308316530781,
+                    c=16.056809865262608,
+                    z=(-0.10526315789473695-0.10526315789473695j),
+                    expected=(0.9862043298997204-0.013293151372712681j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=8.077282662161238,
+                    c=16.056809865262608,
+                    z=(-0.3157894736842106-0.736842105263158j),
+                    expected=(0.16163826638754716-0.41378530376373734j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=2.050308316530781,
+                    c=-0.906685989801748,
+                    z=(-0.5263157894736843+0.3157894736842106j),
+                    expected=(-6.256871535165936+0.13824973858225484j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=8.077282662161238,
+                    c=-3.9924618758357022,
+                    z=(-0.9473684210526316-0.3157894736842106j),
+                    expected=(75.54672526086316+50.56157041797548j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=8.077282662161238,
+                    c=-1.9631175993998025,
+                    z=(-0.5263157894736843+0.5263157894736841j),
+                    expected=(282.0602536306534-82.31597306936214j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=-3.9316537064827854,
+                    c=8.031683612216888,
+                    z=(-0.5263157894736843-0.10526315789473695j),
+                    expected=(5.179603735575851+1.4445374002099813j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=-7.949900487447654,
+                    c=1.0651378143226575,
+                    z=(-0.3157894736842106-0.9473684210526316j),
+                    expected=(2317.623517606141-269.51476321010324j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=-1.92872979730171,
+                    c=2.0397202577726152,
+                    z=(-0.736842105263158-0.3157894736842106j),
+                    expected=(29.179154096175836+22.126690357535043j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=-3.9316537064827854,
+                    c=-15.963511401609862,
+                    z=(-0.736842105263158-0.10526315789473695j),
+                    expected=(0.20820247892032057-0.04763956711248794j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=-15.964218273004214,
+                    c=-1.9631175993998025,
+                    z=(-0.3157894736842106-0.5263157894736843j),
+                    expected=(-157471.63920142158+991294.0587828817j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=-7.949900487447654,
+                    c=-7.93846038215665,
+                    z=(-0.10526315789473695-0.10526315789473695j),
+                    expected=(0.30765349653210194-0.2979706363594157j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=1.0561196186065624,
+                    c=8.031683612216888,
+                    z=(-0.9473684210526316-0.10526315789473695j),
+                    expected=(1.6787607400597109+0.10056620134616838j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=16.088264119063613,
+                    c=4.078873014294075,
+                    z=(-0.5263157894736843-0.736842105263158j),
+                    expected=(7062.07842506049-12768.77955655703j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=16.088264119063613,
+                    c=2.0397202577726152,
+                    z=(-0.3157894736842106+0.7368421052631575j),
+                    expected=(54749.216391029935-23078.144720887536j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=1.0561196186065624,
+                    c=-0.906685989801748,
+                    z=(-0.10526315789473695-0.10526315789473695j),
+                    expected=(1.21521766411428-4.449385173946672j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.980848054962111,
+                    b=4.0013768449590685,
+                    c=-1.9631175993998025,
+                    z=(-0.736842105263158+0.5263157894736841j),
+                    expected=(19234693144.196907+1617913967.7294445j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=1.0561196186065624,
+                    c=-15.963511401609862,
+                    z=(-0.5263157894736843+0.3157894736842106j),
+                    expected=(0.9345201094534371+0.03745712558992195j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=-0.9629749245209605,
+                    c=2.0397202577726152,
+                    z=(-0.10526315789473695+0.10526315789473673j),
+                    expected=(0.605732446296829+0.398171533680972j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=-15.964218273004214,
+                    c=2.0397202577726152,
+                    z=(-0.10526315789473695-0.5263157894736843j),
+                    expected=(-9.753761888305416-4.590126012666959j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=-1.92872979730171,
+                    c=2.0397202577726152,
+                    z=(-0.10526315789473695+0.3157894736842106j),
+                    expected=(0.45587226291120714+1.0694545265819797j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-7.949900487447654,
+                    c=-0.906685989801748,
+                    z=(-0.736842105263158+0.3157894736842106j),
+                    expected=(12.334808243233418-76.26089051819054j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-7.949900487447654,
+                    c=-15.963511401609862,
+                    z=(-0.5263157894736843+0.10526315789473673j),
+                    expected=(1.2396019687632678-0.047507973161146286j),
+                    rtol=1e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.980848054962111,
+                    b=-0.9629749245209605,
+                    c=-0.906685989801748,
+                    z=(-0.3157894736842106-0.5263157894736843j),
+                    expected=(97.7889554372208-18.999754543400016j),
+                    rtol=5e-13,
+                ),
+            ),
+        ]
+    )
+    def test_region2(self, hyp2f1_test_case):
+        """|z| < 1 and real(z) < 0."""
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert abs(z) < 1 and z.real < 0  # Tests the test
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.25,
+                    b=4.25,
+                    c=2.5,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(38.41207903409937-30.510151276075792j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.0,
+                    b=16.087593263474208,
+                    c=16.088264119063613,
+                    z=(0.5689655172413794-0.7965517241379311j),
+                    expected=(-0.6667857912761286-1.0206224321443573j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.0,
+                    b=1.0272592605282642,
+                    c=-7.949900487447654,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(1679024.1647997478-2748129.775857212j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=16.0,
+                    c=-7.949900487447654,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(424747226301.16986-1245539049327.2856j),
+                    rtol=1e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=-15.964218273004214,
+                    c=4.0,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(-0.0057826199201757595+0.026359861999025885j),
+                    rtol=5e-06,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=-0.9629749245209605,
+                    c=2.0397202577726152,
+                    z=(0.5689655172413794-0.7965517241379311j),
+                    expected=(0.4671901063492606+0.7769632229834897j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.0,
+                    b=-3.956227226099288,
+                    c=-7.949900487447654,
+                    z=(0.4931034482758623+0.7965517241379312j),
+                    expected=(0.9422283708145973+1.3476905754773343j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0,
+                    b=-15.980848054962111,
+                    c=-15.964218273004214,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(0.4168719497319604-0.9770953555235625j),
+                    rtol=5e-10,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.5,
+                    b=16.088264119063613,
+                    c=2.5,
+                    z=(0.5689655172413794+0.7965517241379312j),
+                    expected=(1.279096377550619-2.173827694297929j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=4.0013768449590685,
+                    c=2.0397202577726152,
+                    z=(0.4931034482758623+0.7965517241379312j),
+                    expected=(-2.071520656161738-0.7846098268395909j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=8.0,
+                    c=-0.9629749245209605,
+                    z=(0.5689655172413794-0.7965517241379311j),
+                    expected=(-7.740015495862889+3.386766435696699j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=16.088264119063613,
+                    c=-7.93846038215665,
+                    z=(0.4931034482758623+0.7965517241379312j),
+                    expected=(-6318.553685853241-7133.416085202879j),
+                    rtol=1e-10,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.980848054962111,
+                    b=-3.9316537064827854,
+                    c=16.056809865262608,
+                    z=(0.5689655172413794+0.7965517241379312j),
+                    expected=(-0.8854577905547399+8.135089099967278j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=-0.9629749245209605,
+                    c=4.078873014294075,
+                    z=(0.4931034482758623+0.7965517241379312j),
+                    expected=(1.224291301521487+0.36014711766402485j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.75,
+                    b=-0.75,
+                    c=-1.5,
+                    z=(0.4931034482758623+0.7965517241379312j),
+                    expected=(-1.5765685855028473-3.9399766961046323j),
+                    rtol=1e-3,
+                ),
+                marks=pytest.mark.xfail(
+                    reason="Unhandled parameters."
+                )
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.980848054962111,
+                    b=-1.92872979730171,
+                    c=-7.93846038215665,
+                    z=(0.5689655172413794-0.7965517241379311j),
+                    expected=(56.794588688231194+4.556286783533971j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.5,
+                    b=4.5,
+                    c=2.050308316530781,
+                    z=(0.5689655172413794+0.7965517241379312j),
+                    expected=(-4.251456563455306+6.737837111569671j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.5,
+                    b=8.5,
+                    c=-1.92872979730171,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(2177143.9156599627-3313617.2748088865j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.5,
+                    b=-1.5,
+                    c=4.0013768449590685,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(0.45563554481603946+0.6212000158060831j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.5,
+                    b=-7.5,
+                    c=-15.964218273004214,
+                    z=(0.4931034482758623+0.7965517241379312j),
+                    expected=(61.03201617828073-37.185626416756214j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.5,
+                    b=16.5,
+                    c=4.0013768449590685,
+                    z=(0.4931034482758623+0.7965517241379312j),
+                    expected=(-33143.425963520735+20790.608514722644j),
+                    rtol=1e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.5,
+                    b=4.5,
+                    c=-0.9629749245209605,
+                    z=(0.5689655172413794+0.7965517241379312j),
+                    expected=(30.778600270824423-26.65160354466787j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.5,
+                    b=-3.5,
+                    c=16.088264119063613,
+                    z=(0.5689655172413794-0.7965517241379311j),
+                    expected=(1.0629792615560487-0.08308454486044772j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.5,
+                    b=-7.5,
+                    c=-0.9629749245209605,
+                    z=(0.4931034482758623-0.7965517241379311j),
+                    expected=(17431.571802591767+3553.7129767034507j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.25,
+                    b=8.25,
+                    c=16.5,
+                    z=(0.11379310344827598+0.9482758620689657j),
+                    expected=(0.4468600750211926+0.7313214934036885j),
+                    rtol=1e-3,
+                ),
+                marks=pytest.mark.xfail(
+                    reason="Unhandled parameters."
+                )
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.25,
+                    b=16.25,
+                    c=4.5,
+                    z=(0.3413793103448277+0.8724137931034486j),
+                    expected=(-3.905704438293991+3.693347860329299j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.25,
+                    b=4.25,
+                    c=-0.5,
+                    z=(0.11379310344827598-0.9482758620689655j),
+                    expected=(-40.31777941834244-89.89852492432011j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=8.0,
+                    c=-15.964218273004214,
+                    z=(0.11379310344827598-0.9482758620689655j),
+                    expected=(52584.347773055284-109197.86244309516j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=-15.964218273004214,
+                    c=16.056809865262608,
+                    z=(0.03793103448275881+0.9482758620689657j),
+                    expected=(-1.187733570412592-1.5147865053584582j),
+                    rtol=5e-10,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=-3.9316537064827854,
+                    c=1.0651378143226575,
+                    z=(0.26551724137931054+0.9482758620689657j),
+                    expected=(13.077494677898947+35.071599628224966j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=-3.5,
+                    c=-3.5,
+                    z=(0.26551724137931054+0.8724137931034486j),
+                    expected=(-0.5359656237994614-0.2344483936591811j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.25,
+                    b=-3.75,
+                    c=-1.5,
+                    z=(0.26551724137931054+0.9482758620689657j),
+                    expected=(1204.8114871663133+64.41022826840198j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=16.0,
+                    c=4.0013768449590685,
+                    z=(0.03793103448275881-0.9482758620689655j),
+                    expected=(-9.85268872413994+7.011107558429154j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=16.0,
+                    c=4.0013768449590685,
+                    z=(0.3413793103448277-0.8724137931034484j),
+                    expected=(528.5522951158454-1412.21630264791j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.5,
+                    b=1.0561196186065624,
+                    c=-7.5,
+                    z=(0.4172413793103451+0.8724137931034486j),
+                    expected=(133306.45260685298+256510.7045225382j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=8.077282662161238,
+                    c=-15.963511401609862,
+                    z=(0.3413793103448277-0.8724137931034484j),
+                    expected=(-0.998555715276967+2.774198742229889j),
+                    rtol=5e-11,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.75,
+                    b=-0.75,
+                    c=1.5,
+                    z=(0.11379310344827598-0.9482758620689655j),
+                    expected=(2.072445019723025-2.9793504811373515j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.5,
+                    b=-1.92872979730171,
+                    c=1.5,
+                    z=(0.11379310344827598-0.9482758620689655j),
+                    expected=(-41.87581944176649-32.52980303527139j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.75,
+                    b=-15.75,
+                    c=-0.5,
+                    z=(0.11379310344827598-0.9482758620689655j),
+                    expected=(-3729.6214864209774-30627.510509112635j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=-15.964218273004214,
+                    c=-0.906685989801748,
+                    z=(0.03793103448275881+0.9482758620689657j),
+                    expected=(-131615.07820609974+145596.13384245415j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.5,
+                    b=16.5,
+                    c=16.088264119063613,
+                    z=(0.26551724137931054+0.8724137931034486j),
+                    expected=(0.18981844071070744+0.7855036242583742j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.5,
+                    b=8.5,
+                    c=-3.9316537064827854,
+                    z=(0.11379310344827598-0.9482758620689655j),
+                    expected=(110224529.2376068+128287212.04290268j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.5,
+                    b=-7.5,
+                    c=4.0013768449590685,
+                    z=(0.3413793103448277-0.8724137931034484j),
+                    expected=(0.2722302180888523-0.21790187837266162j),
+                    rtol=1e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.5,
+                    b=-7.5,
+                    c=-15.964218273004214,
+                    z=(0.11379310344827598-0.9482758620689655j),
+                    expected=(-2.8252338010989035+2.430661949756161j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.5,
+                    b=16.5,
+                    c=4.0013768449590685,
+                    z=(0.03793103448275881+0.9482758620689657j),
+                    expected=(-20.604894257647945+74.5109432558078j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.5,
+                    b=8.5,
+                    c=-0.9629749245209605,
+                    z=(0.3413793103448277+0.8724137931034486j),
+                    expected=(-2764422.521269463-3965966.9965808876j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.5,
+                    b=-0.5,
+                    c=1.0561196186065624,
+                    z=(0.26551724137931054+0.9482758620689657j),
+                    expected=(1.2262338560994905+0.6545051266925549j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.5,
+                    b=-15.5,
+                    c=-7.949900487447654,
+                    z=(0.4172413793103451-0.8724137931034484j),
+                    expected=(-2258.1590330318213+8860.193389158803j),
+                    rtol=1e-10,
+                ),
+            ),
+        ]
+    )
+    def test_region4(self, hyp2f1_test_case):
+        """0.9 <= |z| <= 1 and |1 - z| >= 1.
+
+        This region is unhandled by of the standard transformations and
+        needs special care.
+        """
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert 0.9 <= abs(z) <= 1 and abs(1 - z) >= 0.9  # Tests the test
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.5,
+                    b=16.088264119063613,
+                    c=8.5,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(0.018601324701770394-0.07618420586062377j),
+                    rtol=5e-08,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.25,
+                    b=4.25,
+                    c=4.5,
+                    z=(0.6448275862068968-0.8724137931034484j),
+                    expected=(-1.391549471425551-0.118036604903893j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=2.050308316530781,
+                    c=-1.9631175993998025,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(-2309.178768155151-1932.7247727595172j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=1.0,
+                    c=-15.964218273004214,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(85592537010.05054-8061416766688.324j),
+                    rtol=1e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=-0.5,
+                    c=1.5,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(1.2334498208515172-2.1639498536219732j),
+                    rtol=5e-11,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=-15.964218273004214,
+                    c=4.0,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(102266.35398605966-44976.97828737755j),
+                    rtol=1e-3,
+                ),
+                marks=pytest.mark.xfail(
+                    reason="Unhandled parameters."
+                )
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.0,
+                    b=-3.956227226099288,
+                    c=-15.964218273004214,
+                    z=(0.6448275862068968-0.8724137931034484j),
+                    expected=(-2.9590030930007236-4.190770764773225j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=-15.5,
+                    c=-7.5,
+                    z=(0.5689655172413794-0.8724137931034484j),
+                    expected=(-112554838.92074208+174941462.9202412j),
+                    rtol=5e-05,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.980848054962111,
+                    b=2.050308316530781,
+                    c=1.0,
+                    z=(0.6448275862068968-0.8724137931034484j),
+                    expected=(3.7519882374080145+7.360753798667486j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=2.050308316530781,
+                    c=4.0,
+                    z=(0.6448275862068968-0.8724137931034484j),
+                    expected=(0.000181132943964693+0.07742903103815582j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=4.0013768449590685,
+                    c=-1.9631175993998025,
+                    z=(0.5689655172413794+0.8724137931034486j),
+                    expected=(386338.760913596-386166.51762171905j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.980848054962111,
+                    b=8.0,
+                    c=-1.92872979730171,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(1348667126.3444858-2375132427.158893j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.5,
+                    b=-0.9629749245209605,
+                    c=4.5,
+                    z=(0.5689655172413794+0.8724137931034486j),
+                    expected=(1.428353429538678+0.6472718120804372j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=-0.9629749245209605,
+                    c=2.0397202577726152,
+                    z=(0.5689655172413794-0.8724137931034484j),
+                    expected=(3.1439267526119643-3.145305240375117j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=-15.964218273004214,
+                    c=-7.93846038215665,
+                    z=(0.6448275862068968-0.8724137931034484j),
+                    expected=(75.27467675681773+144.0946946292215j),
+                    rtol=1e-07,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.75,
+                    b=-7.75,
+                    c=-7.5,
+                    z=(0.5689655172413794+0.8724137931034486j),
+                    expected=(-0.3699450626264222+0.8732812475910993j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.5,
+                    b=16.5,
+                    c=1.0561196186065624,
+                    z=(0.5689655172413794-0.8724137931034484j),
+                    expected=(5.5361025821300665-2.4709693474656285j),
+                    rtol=5e-09,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.5,
+                    b=8.5,
+                    c=-3.9316537064827854,
+                    z=(0.6448275862068968-0.8724137931034484j),
+                    expected=(-782805.6699207705-537192.581278909j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.5,
+                    b=-15.5,
+                    c=1.0561196186065624,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(12.345113400639693-14.993248992902007j),
+                    rtol=0.0005,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.5,
+                    b=-0.5,
+                    c=-15.964218273004214,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(23.698109392667842+97.15002033534108j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.5,
+                    b=16.5,
+                    c=4.0013768449590685,
+                    z=(0.6448275862068968-0.8724137931034484j),
+                    expected=(1115.2978631811834+915.9212658718577j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.5,
+                    b=16.5,
+                    c=-0.9629749245209605,
+                    z=(0.6448275862068968+0.8724137931034486j),
+                    expected=(642077722221.6489+535274495398.21027j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.5,
+                    b=-3.5,
+                    c=4.0013768449590685,
+                    z=(0.5689655172413794+0.8724137931034486j),
+                    expected=(-5.689219222945697+16.877463062787143j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.5,
+                    b=-1.5,
+                    c=-0.9629749245209605,
+                    z=(0.5689655172413794-0.8724137931034484j),
+                    expected=(-44.32070290703576+1026.9127058617403j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.25,
+                    b=2.25,
+                    c=4.5,
+                    z=(0.11379310344827598-1.024137931034483j),
+                    expected=(-0.021965227124574663+0.009908300237809064j),
+                    rtol=1e-3,
+                ),
+                marks=pytest.mark.xfail(
+                    reason="Unhandled parameters."
+                )
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.02764642551431,
+                    b=1.5,
+                    c=16.5,
+                    z=(0.26551724137931054+1.024137931034483j),
+                    expected=(1.0046072901244183+0.19945500134119992j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=1.0,
+                    c=-3.9316537064827854,
+                    z=(0.3413793103448277+0.9482758620689657j),
+                    expected=(21022.30133421465+49175.98317370489j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=16.088264119063613,
+                    c=-1.9631175993998025,
+                    z=(0.4172413793103451-0.9482758620689655j),
+                    expected=(-7024239.358547302+2481375.02681063j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.25,
+                    b=-15.75,
+                    c=1.5,
+                    z=(0.18965517241379315+1.024137931034483j),
+                    expected=(92371704.94848-403546832.548352j),
+                    rtol=5e-06,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.5,
+                    b=-7.949900487447654,
+                    c=8.5,
+                    z=(0.26551724137931054-1.024137931034483j),
+                    expected=(1.9335109845308265+5.986542524829654j),
+                    rtol=5e-10,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=-1.92872979730171,
+                    c=-7.93846038215665,
+                    z=(0.4931034482758623+0.8724137931034486j),
+                    expected=(-122.52639696039328-59.72428067512221j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.25,
+                    b=-1.75,
+                    c=-1.5,
+                    z=(0.4931034482758623+0.9482758620689657j),
+                    expected=(-90.40642053579428+50.50649180047921j),
+                    rtol=5e-08,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.5,
+                    b=8.077282662161238,
+                    c=16.5,
+                    z=(0.4931034482758623+0.9482758620689657j),
+                    expected=(-0.2155745818150323-0.564628986876639j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=1.0561196186065624,
+                    c=8.031683612216888,
+                    z=(0.4172413793103451-0.9482758620689655j),
+                    expected=(0.9503140488280465+0.11574960074292677j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.75,
+                    b=2.25,
+                    c=-15.5,
+                    z=(0.4172413793103451+0.9482758620689657j),
+                    expected=(0.9285862488442175+0.8203699266719692j),
+                    rtol=5e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.75,
+                    b=4.25,
+                    c=-15.5,
+                    z=(0.3413793103448277-0.9482758620689655j),
+                    expected=(-1.0509834850116921-1.1145522325486075j),
+                    rtol=1e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=-0.9629749245209605,
+                    c=2.0397202577726152,
+                    z=(0.4931034482758623-0.9482758620689655j),
+                    expected=(2.88119116536769-3.4249933450696806j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.5,
+                    b=-15.964218273004214,
+                    c=16.5,
+                    z=(0.18965517241379315+1.024137931034483j),
+                    expected=(199.65868451496038+347.79384207302877j),
+                    rtol=1e-13,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.75,
+                    b=-15.75,
+                    c=-3.5,
+                    z=(0.4931034482758623-0.8724137931034484j),
+                    expected=(-208138312553.07013+58631611809.026955j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=-15.5,
+                    c=-7.5,
+                    z=(0.3413793103448277+0.9482758620689657j),
+                    expected=(-23032.90519856288-18256.94050457296j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.5,
+                    b=1.5,
+                    c=1.0561196186065624,
+                    z=(0.4931034482758623-0.8724137931034484j),
+                    expected=(1.507342459587056+1.2332023580148403j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=2.5,
+                    b=4.5,
+                    c=-3.9316537064827854,
+                    z=(0.4172413793103451+0.9482758620689657j),
+                    expected=(7044.766127108853-40210.365567285575j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.5,
+                    b=-1.5,
+                    c=1.0561196186065624,
+                    z=(0.03793103448275881+1.024137931034483j),
+                    expected=(0.2725347741628333-2.247314875514784j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.5,
+                    b=-1.5,
+                    c=-7.949900487447654,
+                    z=(0.26551724137931054+1.024137931034483j),
+                    expected=(-11.250200011017546+12.597393659160472j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.5,
+                    b=8.5,
+                    c=16.088264119063613,
+                    z=(0.26551724137931054+1.024137931034483j),
+                    expected=(-0.18515160890991517+0.7959014164484782j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.5,
+                    b=16.5,
+                    c=-3.9316537064827854,
+                    z=(0.3413793103448277-1.024137931034483j),
+                    expected=(998246378.8556538+1112032928.103645j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.5,
+                    b=-3.5,
+                    c=2.050308316530781,
+                    z=(0.03793103448275881+1.024137931034483j),
+                    expected=(0.5527670397711952+2.697662715303637j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-15.5,
+                    b=-1.5,
+                    c=-0.9629749245209605,
+                    z=(0.4931034482758623-0.8724137931034484j),
+                    expected=(55.396931662136886+968.467463806326j),
+                    rtol=5e-14,
+                ),
+            ),
+        ]
+    )
+    def test_region5(self, hyp2f1_test_case):
+        """1 < |z| < 1.1 and |1 - z| >= 0.9 and real(z) >= 0"""
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert 1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.parametrize(
+        "hyp2f1_test_case",
+        [
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=4.0013768449590685,
+                    c=4.078873014294075,
+                    z=(-0.9473684210526316+0.5263157894736841j),
+                    expected=(-0.0018093573941378783+0.003481887377423739j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=2.050308316530781,
+                    c=1.0651378143226575,
+                    z=(-0.736842105263158-0.736842105263158j),
+                    expected=(-0.00023401243818780545-1.7983496305603562e-05j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=8.077282662161238,
+                    c=4.078873014294075,
+                    z=(-0.5263157894736843-0.9473684210526316j),
+                    expected=(0.22359773002226846-0.24092487123993353j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=2.050308316530781,
+                    c=-15.963511401609862,
+                    z=(-0.9473684210526316-0.5263157894736843j),
+                    expected=(1.191573745740011+0.14347394589721466j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=4.0013768449590685,
+                    c=-15.963511401609862,
+                    z=(-0.9473684210526316-0.5263157894736843j),
+                    expected=(31.822620756901784-66.09094396747611j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=8.077282662161238,
+                    c=-7.93846038215665,
+                    z=(-0.9473684210526316+0.5263157894736841j),
+                    expected=(207.16750179245952+34.80478274924269j),
+                    rtol=5e-12,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=8.095813935368371,
+                    b=-7.949900487447654,
+                    c=8.031683612216888,
+                    z=(-0.736842105263158+0.7368421052631575j),
+                    expected=(-159.62429364277145+9.154224290644898j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=-1.92872979730171,
+                    c=16.056809865262608,
+                    z=(-0.9473684210526316+0.5263157894736841j),
+                    expected=(1.121122351247184-0.07170260470126685j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=16.087593263474208,
+                    b=-0.9629749245209605,
+                    c=16.056809865262608,
+                    z=(-0.9473684210526316+0.5263157894736841j),
+                    expected=(1.9040596681316053-0.4951799449960107j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=-1.92872979730171,
+                    c=-0.906685989801748,
+                    z=(-0.9473684210526316-0.5263157894736843j),
+                    expected=(-14.496623497780739-21.897524523299875j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=4.080187217753502,
+                    b=-3.9316537064827854,
+                    c=-3.9924618758357022,
+                    z=(-0.5263157894736843-0.9473684210526316j),
+                    expected=(36.33473466026878+253.88728442029577j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=1.0272592605282642,
+                    b=-15.964218273004214,
+                    c=-0.906685989801748,
+                    z=(-0.9473684210526316+0.5263157894736841j),
+                    expected=(1505052.5653144997-50820766.81043443j),
+                    rtol=1e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=4.0013768449590685,
+                    c=1.0651378143226575,
+                    z=(-0.5263157894736843+0.9473684210526314j),
+                    expected=(-127.79407519260877-28.69899444941112j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=8.077282662161238,
+                    c=16.056809865262608,
+                    z=(-0.9473684210526316-0.5263157894736843j),
+                    expected=(2.0623331933754976+0.741234463565458j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=8.077282662161238,
+                    c=2.0397202577726152,
+                    z=(-0.9473684210526316+0.5263157894736841j),
+                    expected=(30.729193458862525-292.5700835046965j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=1.0561196186065624,
+                    c=-1.9631175993998025,
+                    z=(-0.5263157894736843-0.9473684210526316j),
+                    expected=(1.1285917906203495-0.735264575450189j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=1.0561196186065624,
+                    c=-3.9924618758357022,
+                    z=(-0.736842105263158+0.7368421052631575j),
+                    expected=(0.6356474446678052-0.02429663008952248j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-1.9214641416286231,
+                    b=16.088264119063613,
+                    c=-7.93846038215665,
+                    z=(-0.736842105263158+0.7368421052631575j),
+                    expected=(0.4718880510273174+0.655083067736377j),
+                    rtol=1e-11,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-7.937789122896016,
+                    b=-3.9316537064827854,
+                    c=16.056809865262608,
+                    z=(-0.9473684210526316+0.5263157894736841j),
+                    expected=(-0.14681550942352714+0.16092206364265146j),
+                    rtol=5e-11,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-15.964218273004214,
+                    c=1.0651378143226575,
+                    z=(-0.5263157894736843+0.9473684210526314j),
+                    expected=(-6.436835190526225+22.883156700606182j),
+                    rtol=5e-14,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-0.9220024191881196,
+                    b=-7.949900487447654,
+                    c=4.078873014294075,
+                    z=(-0.9473684210526316-0.5263157894736843j),
+                    expected=(-0.7505682955068583-1.1026583264249945j),
+                    rtol=1e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=-3.9316537064827854,
+                    c=-7.93846038215665,
+                    z=(-0.9473684210526316-0.5263157894736843j),
+                    expected=(3.6247814989198166+2.596041360148318j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=-15.964218273004214,
+                    c=-1.9631175993998025,
+                    z=(-0.5263157894736843-0.9473684210526316j),
+                    expected=(-59537.65287927933-669074.4342539902j),
+                    rtol=5e-15,
+                ),
+            ),
+            pytest.param(
+                Hyp2f1TestCase(
+                    a=-3.956227226099288,
+                    b=-15.964218273004214,
+                    c=-1.9631175993998025,
+                    z=(-0.9473684210526316-0.5263157894736843j),
+                    expected=(-433084.9970266166+431088.393918521j),
+                    rtol=5e-14,
+                ),
+            ),
+        ]
+    )
+    def test_region6(self, hyp2f1_test_case):
+        """|z| > 1 but not in region 5."""
+        a, b, c, z, expected, rtol = hyp2f1_test_case
+        assert (
+            abs(z) > 1 and
+            not (1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0)
+        )
+        assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
+
+    @pytest.mark.slow
+    @check_version(mpmath, "1.0.0")
+    def test_test_hyp2f1(self):
+        """Test that expected values match what is computed by mpmath.
+
+        This gathers the parameters for the test cases out of the pytest marks.
+        The parameters are a, b, c, z, expected, rtol, where expected should
+        be the value of hyp2f1(a, b, c, z) computed with mpmath. The test
+        recomputes hyp2f1(a, b, c, z) using mpmath and verifies that expected
+        actually is the correct value. This allows the data for the tests to
+        live within the test code instead of an external datafile, while
+        avoiding having to compute the results with mpmath during the test,
+        except for when slow tests are being run.
+        """
+        test_methods = [
+            test_method for test_method in dir(self)
+            if test_method.startswith('test') and
+            # Filter properties and attributes (futureproofing).
+            callable(getattr(self, test_method)) and
+            # Filter out this test
+            test_method != 'test_test_hyp2f1'
+        ]
+        for test_method in test_methods:
+            params = self._get_test_parameters(getattr(self, test_method))
+            for a, b, c, z, expected, _ in params:
+                assert_allclose(mp_hyp2f1(a, b, c, z), expected, rtol=2.25e-16)
+
+    def _get_test_parameters(self, test_method):
+        """Get pytest.mark parameters for a test in this class."""
+        return [
+            case.values[0] for mark in test_method.pytestmark
+            if mark.name == 'parametrize'
+            for case in mark.args[1]
+        ]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_hypergeometric.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_hypergeometric.py
new file mode 100644
index 00000000..b23b5c02
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_hypergeometric.py
@@ -0,0 +1,140 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal
+import scipy.special as sc
+
+
+class TestHyperu:
+
+    def test_negative_x(self):
+        a, b, x = np.meshgrid(
+            [-1, -0.5, 0, 0.5, 1],
+            [-1, -0.5, 0, 0.5, 1],
+            np.linspace(-100, -1, 10),
+        )
+        assert np.all(np.isnan(sc.hyperu(a, b, x)))
+
+    def test_special_cases(self):
+        assert sc.hyperu(0, 1, 1) == 1.0
+
+    @pytest.mark.parametrize('a', [0.5, 1, np.nan])
+    @pytest.mark.parametrize('b', [1, 2, np.nan])
+    @pytest.mark.parametrize('x', [0.25, 3, np.nan])
+    def test_nan_inputs(self, a, b, x):
+        assert np.isnan(sc.hyperu(a, b, x)) == np.any(np.isnan([a, b, x]))
+
+
+class TestHyp1f1:
+
+    @pytest.mark.parametrize('a, b, x', [
+        (np.nan, 1, 1),
+        (1, np.nan, 1),
+        (1, 1, np.nan)
+    ])
+    def test_nan_inputs(self, a, b, x):
+        assert np.isnan(sc.hyp1f1(a, b, x))
+
+    def test_poles(self):
+        assert_equal(sc.hyp1f1(1, [0, -1, -2, -3, -4], 0.5), np.infty)
+
+    @pytest.mark.parametrize('a, b, x, result', [
+        (-1, 1, 0.5, 0.5),
+        (1, 1, 0.5, 1.6487212707001281468),
+        (2, 1, 0.5, 2.4730819060501922203),
+        (1, 2, 0.5, 1.2974425414002562937),
+        (-10, 1, 0.5, -0.38937441413785204475)
+    ])
+    def test_special_cases(self, a, b, x, result):
+        # Hit all the special case branches at the beginning of the
+        # function. Desired answers computed using Mpmath.
+        assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
+
+    @pytest.mark.parametrize('a, b, x, result', [
+        (1, 1, 0.44, 1.5527072185113360455),
+        (-1, 1, 0.44, 0.55999999999999999778),
+        (100, 100, 0.89, 2.4351296512898745592),
+        (-100, 100, 0.89, 0.40739062490768104667),
+        (1.5, 100, 59.99, 3.8073513625965598107),
+        (-1.5, 100, 59.99, 0.25099240047125826943)
+    ])
+    def test_geometric_convergence(self, a, b, x, result):
+        # Test the region where we are relying on the ratio of
+        #
+        # (|a| + 1) * |x| / |b|
+        #
+        # being small. Desired answers computed using Mpmath
+        assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
+
+    @pytest.mark.parametrize('a, b, x, result', [
+        (-1, 1, 1.5, -0.5),
+        (-10, 1, 1.5, 0.41801777430943080357),
+        (-25, 1, 1.5, 0.25114491646037839809),
+        (-50, 1, 1.5, -0.25683643975194756115),
+        (-80, 1, 1.5, -0.24554329325751503601),
+        (-150, 1, 1.5, -0.173364795515420454496),
+    ])
+    def test_a_negative_integer(self, a, b, x, result):
+        # Desired answers computed using Mpmath.
+        assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-14)
+
+    @pytest.mark.parametrize('a, b, x, expected', [
+        (0.01, 150, -4, 0.99973683897677527773),        # gh-3492
+        (1, 5, 0.01, 1.0020033381011970966),            # gh-3593
+        (50, 100, 0.01, 1.0050126452421463411),         # gh-3593
+        (1, 0.3, -1e3, -7.011932249442947651455e-04),   # gh-14149
+        (1, 0.3, -1e4, -7.001190321418937164734e-05),   # gh-14149
+        (9, 8.5, -350, -5.224090831922378361082e-20),   # gh-17120
+        (9, 8.5, -355, -4.595407159813368193322e-20),   # gh-17120
+        (75, -123.5, 15, 3.425753920814889017493e+06),
+    ])
+    def test_assorted_cases(self, a, b, x, expected):
+        # Expected values were computed with mpmath.hyp1f1(a, b, x).
+        assert_allclose(sc.hyp1f1(a, b, x), expected, atol=0, rtol=1e-14)
+
+    def test_a_neg_int_and_b_equal_x(self):
+        # This is a case where the Boost wrapper will call hypergeometric_pFq
+        # instead of hypergeometric_1F1.  When we use a version of Boost in
+        # which https://github.com/boostorg/math/issues/833 is fixed, this
+        # test case can probably be moved into test_assorted_cases.
+        # The expected value was computed with mpmath.hyp1f1(a, b, x).
+        a = -10.0
+        b = 2.5
+        x = 2.5
+        expected = 0.0365323664364104338721
+        computed = sc.hyp1f1(a, b, x)
+        assert_allclose(computed, expected, atol=0, rtol=1e-13)
+
+    @pytest.mark.parametrize('a, b, x, desired', [
+        (-1, -2, 2, 2),
+        (-1, -4, 10, 3.5),
+        (-2, -2, 1, 2.5)
+    ])
+    def test_gh_11099(self, a, b, x, desired):
+        # All desired results computed using Mpmath
+        assert sc.hyp1f1(a, b, x) == desired
+
+    @pytest.mark.parametrize('a', [-3, -2])
+    def test_x_zero_a_and_b_neg_ints_and_a_ge_b(self, a):
+        assert sc.hyp1f1(a, -3, 0) == 1
+
+    # The "legacy edge cases" mentioned in the comments in the following
+    # tests refers to the behavior of hyp1f1(a, b, x) when b is a nonpositive
+    # integer.  In some subcases, the behavior of SciPy does not match that
+    # of Boost (1.81+), mpmath and Mathematica (via Wolfram Alpha online).
+    # If the handling of these edges cases is changed to agree with those
+    # libraries, these test will have to be updated.
+
+    @pytest.mark.parametrize('b', [0, -1, -5])
+    def test_legacy_case1(self, b):
+        # Test results of hyp1f1(0, n, x) for n <= 0.
+        # This is a legacy edge case.
+        # Boost (versions greater than 1.80), Mathematica (via Wolfram Alpha
+        # online) and mpmath all return 1 in this case, but SciPy's hyp1f1
+        # returns inf.
+        assert_equal(sc.hyp1f1(0, b, [-1.5, 0, 1.5]), [np.inf, np.inf, np.inf])
+
+    def test_legacy_case2(self):
+        # This is a legacy edge case.
+        # In software such as boost (1.81+), mpmath and Mathematica,
+        # the value is 1.
+        assert sc.hyp1f1(-4, -3, 0) == np.inf
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_kolmogorov.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_kolmogorov.py
new file mode 100644
index 00000000..58da1b83
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_kolmogorov.py
@@ -0,0 +1,412 @@
+import itertools
+import sys
+import pytest
+
+import numpy as np
+from numpy.testing import assert_
+from scipy.special._testutils import FuncData
+
+from scipy.special import kolmogorov, kolmogi, smirnov, smirnovi
+from scipy.special._ufuncs import (_kolmogc, _kolmogci, _kolmogp,
+                                   _smirnovc, _smirnovci, _smirnovp)
+
+_rtol = 1e-10
+
+class TestSmirnov:
+    def test_nan(self):
+        assert_(np.isnan(smirnov(1, np.nan)))
+
+    def test_basic(self):
+        dataset = [(1, 0.1, 0.9),
+                   (1, 0.875, 0.125),
+                   (2, 0.875, 0.125 * 0.125),
+                   (3, 0.875, 0.125 * 0.125 * 0.125)]
+
+        dataset = np.asarray(dataset)
+        FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, -1] = 1 - dataset[:, -1]
+        FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_x_equals_0(self):
+        dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
+        dataset = np.asarray(dataset)
+        FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, -1] = 1 - dataset[:, -1]
+        FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_x_equals_1(self):
+        dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
+        dataset = np.asarray(dataset)
+        FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, -1] = 1 - dataset[:, -1]
+        FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_x_equals_0point5(self):
+        dataset = [(1, 0.5, 0.5),
+                   (2, 0.5, 0.25),
+                   (3, 0.5, 0.166666666667),
+                   (4, 0.5, 0.09375),
+                   (5, 0.5, 0.056),
+                   (6, 0.5, 0.0327932098765),
+                   (7, 0.5, 0.0191958707681),
+                   (8, 0.5, 0.0112953186035),
+                   (9, 0.5, 0.00661933257355),
+                   (10, 0.5, 0.003888705)]
+
+        dataset = np.asarray(dataset)
+        FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, -1] = 1 - dataset[:, -1]
+        FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_n_equals_1(self):
+        x = np.linspace(0, 1, 101, endpoint=True)
+        dataset = np.column_stack([[1]*len(x), x, 1-x])
+        FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, -1] = 1 - dataset[:, -1]
+        FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_n_equals_2(self):
+        x = np.linspace(0.5, 1, 101, endpoint=True)
+        p = np.power(1-x, 2)
+        n = np.array([2] * len(x))
+        dataset = np.column_stack([n, x, p])
+        FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, -1] = 1 - dataset[:, -1]
+        FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_n_equals_3(self):
+        x = np.linspace(0.7, 1, 31, endpoint=True)
+        p = np.power(1-x, 3)
+        n = np.array([3] * len(x))
+        dataset = np.column_stack([n, x, p])
+        FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, -1] = 1 - dataset[:, -1]
+        FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_n_large(self):
+        # test for large values of n
+        # Probabilities should go down as n goes up
+        x = 0.4
+        pvals = np.array([smirnov(n, x) for n in range(400, 1100, 20)])
+        dfs = np.diff(pvals)
+        assert_(np.all(dfs <= 0), msg='Not all diffs negative %s' % dfs)
+
+
+class TestSmirnovi:
+    def test_nan(self):
+        assert_(np.isnan(smirnovi(1, np.nan)))
+
+    def test_basic(self):
+        dataset = [(1, 0.4, 0.6),
+                   (1, 0.6, 0.4),
+                   (1, 0.99, 0.01),
+                   (1, 0.01, 0.99),
+                   (2, 0.125 * 0.125, 0.875),
+                   (3, 0.125 * 0.125 * 0.125, 0.875),
+                   (10, 1.0 / 16 ** 10, 1 - 1.0 / 16)]
+
+        dataset = np.asarray(dataset)
+        FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, 1] = 1 - dataset[:, 1]
+        FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_x_equals_0(self):
+        dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
+        dataset = np.asarray(dataset)
+        FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, 1] = 1 - dataset[:, 1]
+        FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_x_equals_1(self):
+        dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
+        dataset = np.asarray(dataset)
+        FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, 1] = 1 - dataset[:, 1]
+        FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_n_equals_1(self):
+        pp = np.linspace(0, 1, 101, endpoint=True)
+        # dataset = np.array([(1, p, 1-p) for p in pp])
+        dataset = np.column_stack([[1]*len(pp), pp, 1-pp])
+        FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, 1] = 1 - dataset[:, 1]
+        FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_n_equals_2(self):
+        x = np.linspace(0.5, 1, 101, endpoint=True)
+        p = np.power(1-x, 2)
+        n = np.array([2] * len(x))
+        dataset = np.column_stack([n, p, x])
+        FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, 1] = 1 - dataset[:, 1]
+        FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_n_equals_3(self):
+        x = np.linspace(0.7, 1, 31, endpoint=True)
+        p = np.power(1-x, 3)
+        n = np.array([3] * len(x))
+        dataset = np.column_stack([n, p, x])
+        FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, 1] = 1 - dataset[:, 1]
+        FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_round_trip(self):
+        def _sm_smi(n, p):
+            return smirnov(n, smirnovi(n, p))
+
+        def _smc_smci(n, p):
+            return _smirnovc(n, _smirnovci(n, p))
+
+        dataset = [(1, 0.4, 0.4),
+                   (1, 0.6, 0.6),
+                   (2, 0.875, 0.875),
+                   (3, 0.875, 0.875),
+                   (3, 0.125, 0.125),
+                   (10, 0.999, 0.999),
+                   (10, 0.0001, 0.0001)]
+
+        dataset = np.asarray(dataset)
+        FuncData(_sm_smi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        FuncData(_smc_smci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_x_equals_0point5(self):
+        dataset = [(1, 0.5, 0.5),
+                   (2, 0.5, 0.366025403784),
+                   (2, 0.25, 0.5),
+                   (3, 0.5, 0.297156508177),
+                   (4, 0.5, 0.255520481121),
+                   (5, 0.5, 0.234559536069),
+                   (6, 0.5, 0.21715965898),
+                   (7, 0.5, 0.202722580034),
+                   (8, 0.5, 0.190621765256),
+                   (9, 0.5, 0.180363501362),
+                   (10, 0.5, 0.17157867006)]
+
+        dataset = np.asarray(dataset)
+        FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+        dataset[:, 1] = 1 - dataset[:, 1]
+        FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+
+class TestSmirnovp:
+    def test_nan(self):
+        assert_(np.isnan(_smirnovp(1, np.nan)))
+
+    def test_basic(self):
+        # Check derivative at endpoints
+        n1_10 = np.arange(1, 10)
+        dataset0 = np.column_stack([n1_10, np.full_like(n1_10, 0), np.full_like(n1_10, -1)])
+        FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+        n2_10 = np.arange(2, 10)
+        dataset1 = np.column_stack([n2_10, np.full_like(n2_10, 1.0), np.full_like(n2_10, 0)])
+        FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_oneminusoneovern(self):
+        # Check derivative at x=1-1/n
+        n = np.arange(1, 20)
+        x = 1.0/n
+        xm1 = 1-1.0/n
+        pp1 = -n * x**(n-1)
+        pp1 -= (1-np.sign(n-2)**2) * 0.5  # n=2, x=0.5, 1-1/n = 0.5, need to adjust
+        dataset1 = np.column_stack([n, xm1, pp1])
+        FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_oneovertwon(self):
+        # Check derivative at x=1/2n  (Discontinuous at x=1/n, so check at x=1/2n)
+        n = np.arange(1, 20)
+        x = 1.0/2/n
+        pp = -(n*x+1) * (1+x)**(n-2)
+        dataset0 = np.column_stack([n, x, pp])
+        FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    def test_oneovern(self):
+        # Check derivative at x=1/n  (Discontinuous at x=1/n, hard to tell if x==1/n, only use n=power of 2)
+        n = 2**np.arange(1, 10)
+        x = 1.0/n
+        pp = -(n*x+1) * (1+x)**(n-2) + 0.5
+        dataset0 = np.column_stack([n, x, pp])
+        FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+    @pytest.mark.xfail(sys.maxsize <= 2**32,
+                       reason="requires 64-bit platform")
+    def test_oneovernclose(self):
+        # Check derivative at x=1/n  (Discontinuous at x=1/n, test on either side: x=1/n +/- 2epsilon)
+        n = np.arange(3, 20)
+
+        x = 1.0/n - 2*np.finfo(float).eps
+        pp = -(n*x+1) * (1+x)**(n-2)
+        dataset0 = np.column_stack([n, x, pp])
+        FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+        x = 1.0/n + 2*np.finfo(float).eps
+        pp = -(n*x+1) * (1+x)**(n-2) + 1
+        dataset1 = np.column_stack([n, x, pp])
+        FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
+
+
+class TestKolmogorov:
+    def test_nan(self):
+        assert_(np.isnan(kolmogorov(np.nan)))
+
+    def test_basic(self):
+        dataset = [(0, 1.0),
+                   (0.5, 0.96394524366487511),
+                   (0.8275735551899077, 0.5000000000000000),
+                   (1, 0.26999967167735456),
+                   (2, 0.00067092525577969533)]
+
+        dataset = np.asarray(dataset)
+        FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
+
+    def test_linspace(self):
+        x = np.linspace(0, 2.0, 21)
+        dataset = [1.0000000000000000, 1.0000000000000000, 0.9999999999994950,
+                   0.9999906941986655, 0.9971923267772983, 0.9639452436648751,
+                   0.8642827790506042, 0.7112351950296890, 0.5441424115741981,
+                   0.3927307079406543, 0.2699996716773546, 0.1777181926064012,
+                   0.1122496666707249, 0.0680922218447664, 0.0396818795381144,
+                   0.0222179626165251, 0.0119520432391966, 0.0061774306344441,
+                   0.0030676213475797, 0.0014636048371873, 0.0006709252557797]
+
+        dataset_c = [0.0000000000000000, 6.609305242245699e-53, 5.050407338670114e-13,
+                     9.305801334566668e-06, 0.0028076732227017, 0.0360547563351249,
+                     0.1357172209493958, 0.2887648049703110, 0.4558575884258019,
+                     0.6072692920593457, 0.7300003283226455, 0.8222818073935988,
+                     0.8877503333292751, 0.9319077781552336, 0.9603181204618857,
+                     0.9777820373834749, 0.9880479567608034, 0.9938225693655559,
+                     0.9969323786524203, 0.9985363951628127, 0.9993290747442203]
+
+        dataset = np.column_stack([x, dataset])
+        FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
+        dataset_c = np.column_stack([x, dataset_c])
+        FuncData(_kolmogc, dataset_c, (0,), 1, rtol=_rtol).check()
+
+    def test_linspacei(self):
+        p = np.linspace(0, 1.0, 21, endpoint=True)
+        dataset = [np.inf, 1.3580986393225507, 1.2238478702170823,
+                   1.1379465424937751, 1.0727491749396481, 1.0191847202536859,
+                   0.9730633753323726, 0.9320695842357622, 0.8947644549851197,
+                   0.8601710725555463, 0.8275735551899077, 0.7964065373291559,
+                   0.7661855555617682, 0.7364542888171910, 0.7067326523068980,
+                   0.6764476915028201, 0.6448126061663567, 0.6105590999244391,
+                   0.5711732651063401, 0.5196103791686224, 0.0000000000000000]
+
+        dataset_c = [0.0000000000000000, 0.5196103791686225, 0.5711732651063401,
+                     0.6105590999244391, 0.6448126061663567, 0.6764476915028201,
+                     0.7067326523068980, 0.7364542888171910, 0.7661855555617682,
+                     0.7964065373291559, 0.8275735551899077, 0.8601710725555463,
+                     0.8947644549851196, 0.9320695842357622, 0.9730633753323727,
+                     1.0191847202536859, 1.0727491749396481, 1.1379465424937754,
+                     1.2238478702170825, 1.3580986393225509, np.inf]
+
+        dataset = np.column_stack([p[1:], dataset[1:]])
+        FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
+        dataset_c = np.column_stack([p[:-1], dataset_c[:-1]])
+        FuncData(_kolmogci, dataset_c, (0,), 1, rtol=_rtol).check()
+
+    def test_smallx(self):
+        epsilon = 0.1 ** np.arange(1, 14)
+        x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217,
+                      0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254,
+                      0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658,
+                      0.19487060742])
+
+        dataset = np.column_stack([x, 1-epsilon])
+        FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
+
+    def test_round_trip(self):
+        def _ki_k(_x):
+            return kolmogi(kolmogorov(_x))
+
+        def _kci_kc(_x):
+            return _kolmogci(_kolmogc(_x))
+
+        x = np.linspace(0.0, 2.0, 21, endpoint=True)
+        x02 = x[(x == 0) | (x > 0.21)]  # Exclude 0.1, 0.2.  0.2 almost makes succeeds, but 0.1 has no chance.
+        dataset02 = np.column_stack([x02, x02])
+        FuncData(_ki_k, dataset02, (0,), 1, rtol=_rtol).check()
+
+        dataset = np.column_stack([x, x])
+        FuncData(_kci_kc, dataset, (0,), 1, rtol=_rtol).check()
+
+
+class TestKolmogi:
+    def test_nan(self):
+        assert_(np.isnan(kolmogi(np.nan)))
+
+    def test_basic(self):
+        dataset = [(1.0, 0),
+                   (0.96394524366487511, 0.5),
+                   (0.9, 0.571173265106),
+                   (0.5000000000000000, 0.8275735551899077),
+                   (0.26999967167735456, 1),
+                   (0.00067092525577969533, 2)]
+
+        dataset = np.asarray(dataset)
+        FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
+
+    def test_smallpcdf(self):
+        epsilon = 0.5 ** np.arange(1, 55, 3)
+        # kolmogi(1-p) == _kolmogci(p) if  1-(1-p) == p, but not necessarily otherwise
+        # Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results
+
+        x = np.array([0.8275735551899077, 0.5345255069097583, 0.4320114038786941,
+                      0.3736868442620478, 0.3345161714909591, 0.3057833329315859,
+                      0.2835052890528936, 0.2655578150208676, 0.2506869966107999,
+                      0.2380971058736669, 0.2272549289962079, 0.2177876361600040,
+                      0.2094254686862041, 0.2019676748836232, 0.1952612948137504,
+                      0.1891874239646641, 0.1836520225050326, 0.1785795904846466])
+
+        dataset = np.column_stack([1-epsilon, x])
+        FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
+
+        dataset = np.column_stack([epsilon, x])
+        FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
+
+    def test_smallpsf(self):
+        epsilon = 0.5 ** np.arange(1, 55, 3)
+        # kolmogi(p) == _kolmogci(1-p) if  1-(1-p) == p, but not necessarily otherwise
+        # Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results
+
+        x = np.array([0.8275735551899077, 1.3163786275161036, 1.6651092133663343,
+                      1.9525136345289607, 2.2027324540033235, 2.4272929437460848,
+                      2.6327688477341593, 2.8233300509220260, 3.0018183401530627,
+                      3.1702735084088891, 3.3302184446307912, 3.4828258153113318,
+                      3.6290214150152051, 3.7695513262825959, 3.9050272690877326,
+                      4.0359582187082550, 4.1627730557884890, 4.2858371743264527])
+
+        dataset = np.column_stack([epsilon, x])
+        FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
+
+        dataset = np.column_stack([1-epsilon, x])
+        FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
+
+    def test_round_trip(self):
+        def _k_ki(_p):
+            return kolmogorov(kolmogi(_p))
+
+        p = np.linspace(0.1, 1.0, 10, endpoint=True)
+        dataset = np.column_stack([p, p])
+        FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check()
+
+
+class TestKolmogp:
+    def test_nan(self):
+        assert_(np.isnan(_kolmogp(np.nan)))
+
+    def test_basic(self):
+        dataset = [(0.000000, -0.0),
+                   (0.200000, -1.532420541338916e-10),
+                   (0.400000, -0.1012254419260496),
+                   (0.600000, -1.324123244249925),
+                   (0.800000, -1.627024345636592),
+                   (1.000000, -1.071948558356941),
+                   (1.200000, -0.538512430720529),
+                   (1.400000, -0.2222133182429472),
+                   (1.600000, -0.07649302775520538),
+                   (1.800000, -0.02208687346347873),
+                   (2.000000, -0.005367402045629683)]
+
+        dataset = np.asarray(dataset)
+        FuncData(_kolmogp, dataset, (0,), 1, rtol=_rtol).check()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_lambertw.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_lambertw.py
new file mode 100644
index 00000000..075051a7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_lambertw.py
@@ -0,0 +1,109 @@
+#
+# Tests for the lambertw function,
+# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il
+# Distributed under the same license as SciPy itself.
+#
+# [1] mpmath source code, Subversion revision 992
+#     http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992
+
+import pytest
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_array_almost_equal
+from scipy.special import lambertw
+from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_
+
+from scipy.special._testutils import FuncData
+
+
+def test_values():
+    assert_(isnan(lambertw(nan)))
+    assert_equal(lambertw(inf,1).real, inf)
+    assert_equal(lambertw(inf,1).imag, 2*pi)
+    assert_equal(lambertw(-inf,1).real, inf)
+    assert_equal(lambertw(-inf,1).imag, 3*pi)
+
+    assert_equal(lambertw(1.), lambertw(1., 0))
+
+    data = [
+        (0,0, 0),
+        (0+0j,0, 0),
+        (inf,0, inf),
+        (0,-1, -inf),
+        (0,1, -inf),
+        (0,3, -inf),
+        (e,0, 1),
+        (1,0, 0.567143290409783873),
+        (-pi/2,0, 1j*pi/2),
+        (-log(2)/2,0, -log(2)),
+        (0.25,0, 0.203888354702240164),
+        (-0.25,0, -0.357402956181388903),
+        (-1./10000,0, -0.000100010001500266719),
+        (-0.25,-1, -2.15329236411034965),
+        (0.25,-1, -3.00899800997004620-4.07652978899159763j),
+        (-0.25,-1, -2.15329236411034965),
+        (0.25,1, -3.00899800997004620+4.07652978899159763j),
+        (-0.25,1, -3.48973228422959210+7.41405453009603664j),
+        (-4,0, 0.67881197132094523+1.91195078174339937j),
+        (-4,1, -0.66743107129800988+7.76827456802783084j),
+        (-4,-1, 0.67881197132094523-1.91195078174339937j),
+        (1000,0, 5.24960285240159623),
+        (1000,1, 4.91492239981054535+5.44652615979447070j),
+        (1000,-1, 4.91492239981054535-5.44652615979447070j),
+        (1000,5, 3.5010625305312892+29.9614548941181328j),
+        (3+4j,0, 1.281561806123775878+0.533095222020971071j),
+        (-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j),
+        (3+4j,1, -0.11691092896595324+5.61888039871282334j),
+        (3+4j,-1, 0.25856740686699742-3.85211668616143559j),
+        (-0.5,-1, -0.794023632344689368-0.770111750510379110j),
+        (-1./10000,1, -11.82350837248724344+6.80546081842002101j),
+        (-1./10000,-1, -11.6671145325663544),
+        (-1./10000,-2, -11.82350837248724344-6.80546081842002101j),
+        (-1./100000,4, -14.9186890769540539+26.1856750178782046j),
+        (-1./100000,5, -15.0931437726379218666+32.5525721210262290086j),
+        ((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j),
+        ((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j),
+        ((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j),
+        ((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j),
+        (-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j),
+        (-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j),
+        (-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j),
+        (-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j),
+        (pi,0, 1.073658194796149172092178407024821347547745350410314531),
+
+        # Former bug in generated branch,
+        (-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j),
+        (-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j),
+        (-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j),
+        (-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j),
+    ]
+    data = array(data, dtype=complex_)
+
+    def w(x, y):
+        return lambertw(x, y.real.astype(int))
+    with np.errstate(all='ignore'):
+        FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check()
+
+
+def test_ufunc():
+    assert_array_almost_equal(
+        lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873])
+
+
+def test_lambertw_ufunc_loop_selection():
+    # see https://github.com/scipy/scipy/issues/4895
+    dt = np.dtype(np.complex128)
+    assert_equal(lambertw(0, 0, 0).dtype, dt)
+    assert_equal(lambertw([0], 0, 0).dtype, dt)
+    assert_equal(lambertw(0, [0], 0).dtype, dt)
+    assert_equal(lambertw(0, 0, [0]).dtype, dt)
+    assert_equal(lambertw([0], [0], [0]).dtype, dt)
+
+
+@pytest.mark.parametrize('z', [1e-316, -2e-320j, -5e-318+1e-320j])
+def test_lambertw_subnormal_k0(z):
+    # Verify that subnormal inputs are handled correctly on
+    # the branch k=0 (regression test for gh-16291).
+    w = lambertw(z)
+    # For values this small, we can be sure that numerically,
+    # lambertw(z) is z.
+    assert w == z
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_log_softmax.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_log_softmax.py
new file mode 100644
index 00000000..4b3a5071
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_log_softmax.py
@@ -0,0 +1,109 @@
+import numpy as np
+from numpy.testing import assert_allclose
+
+import pytest
+
+import scipy.special as sc
+
+
+@pytest.mark.parametrize('x, expected', [
+    (np.array([1000, 1]), np.array([0, -999])),
+
+    # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
+    # converted to float.
+    (np.arange(4), np.array([-3.4401896985611953,
+                             -2.4401896985611953,
+                             -1.4401896985611953,
+                             -0.44018969856119533]))
+])
+def test_log_softmax(x, expected):
+    assert_allclose(sc.log_softmax(x), expected, rtol=1e-13)
+
+
+@pytest.fixture
+def log_softmax_x():
+    x = np.arange(4)
+    return x
+
+
+@pytest.fixture
+def log_softmax_expected():
+    # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
+    # converted to float.
+    expected = np.array([-3.4401896985611953,
+                         -2.4401896985611953,
+                         -1.4401896985611953,
+                         -0.44018969856119533])
+    return expected
+
+
+def test_log_softmax_translation(log_softmax_x, log_softmax_expected):
+    # Translation property.  If all the values are changed by the same amount,
+    # the softmax result does not change.
+    x = log_softmax_x + 100
+    expected = log_softmax_expected
+    assert_allclose(sc.log_softmax(x), expected, rtol=1e-13)
+
+
+def test_log_softmax_noneaxis(log_softmax_x, log_softmax_expected):
+    # When axis=None, softmax operates on the entire array, and preserves
+    # the shape.
+    x = log_softmax_x.reshape(2, 2)
+    expected = log_softmax_expected.reshape(2, 2)
+    assert_allclose(sc.log_softmax(x), expected, rtol=1e-13)
+
+
+@pytest.mark.parametrize('axis_2d, expected_2d', [
+    (0, np.log(0.5) * np.ones((2, 2))),
+    (1, np.array([[0, -999], [0, -999]]))
+])
+def test_axes(axis_2d, expected_2d):
+    assert_allclose(
+        sc.log_softmax([[1000, 1], [1000, 1]], axis=axis_2d),
+        expected_2d,
+        rtol=1e-13,
+    )
+
+
+@pytest.fixture
+def log_softmax_2d_x():
+    x = np.arange(8).reshape(2, 4)
+    return x
+
+
+@pytest.fixture
+def log_softmax_2d_expected():
+    # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
+    # converted to float.
+    expected = np.array([[-3.4401896985611953,
+                         -2.4401896985611953,
+                         -1.4401896985611953,
+                         -0.44018969856119533],
+                        [-3.4401896985611953,
+                         -2.4401896985611953,
+                         -1.4401896985611953,
+                         -0.44018969856119533]])
+    return expected
+
+
+def test_log_softmax_2d_axis1(log_softmax_2d_x, log_softmax_2d_expected):
+    x = log_softmax_2d_x
+    expected = log_softmax_2d_expected
+    assert_allclose(sc.log_softmax(x, axis=1), expected, rtol=1e-13)
+
+
+def test_log_softmax_2d_axis0(log_softmax_2d_x, log_softmax_2d_expected):
+    x = log_softmax_2d_x.T
+    expected = log_softmax_2d_expected.T
+    assert_allclose(sc.log_softmax(x, axis=0), expected, rtol=1e-13)
+
+
+def test_log_softmax_3d(log_softmax_2d_x, log_softmax_2d_expected):
+    # 3-d input, with a tuple for the axis.
+    x_3d = log_softmax_2d_x.reshape(2, 2, 2)
+    expected_3d = log_softmax_2d_expected.reshape(2, 2, 2)
+    assert_allclose(sc.log_softmax(x_3d, axis=(1, 2)), expected_3d, rtol=1e-13)
+
+
+def test_log_softmax_scalar():
+    assert_allclose(sc.log_softmax(1.0), 0.0, rtol=1e-13)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_loggamma.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_loggamma.py
new file mode 100644
index 00000000..2fcb5a20
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_loggamma.py
@@ -0,0 +1,70 @@
+import numpy as np
+from numpy.testing import assert_allclose, assert_
+
+from scipy.special._testutils import FuncData
+from scipy.special import gamma, gammaln, loggamma
+
+
+def test_identities1():
+    # test the identity exp(loggamma(z)) = gamma(z)
+    x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
+    y = x.copy()
+    x, y = np.meshgrid(x, y)
+    z = (x + 1J*y).flatten()
+    dataset = np.vstack((z, gamma(z))).T
+
+    def f(z):
+        return np.exp(loggamma(z))
+
+    FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
+
+
+def test_identities2():
+    # test the identity loggamma(z + 1) = log(z) + loggamma(z)
+    x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
+    y = x.copy()
+    x, y = np.meshgrid(x, y)
+    z = (x + 1J*y).flatten()
+    dataset = np.vstack((z, np.log(z) + loggamma(z))).T
+
+    def f(z):
+        return loggamma(z + 1)
+
+    FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
+
+
+def test_complex_dispatch_realpart():
+    # Test that the real parts of loggamma and gammaln agree on the
+    # real axis.
+    x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5
+
+    dataset = np.vstack((x, gammaln(x))).T
+
+    def f(z):
+        z = np.array(z, dtype='complex128')
+        return loggamma(z).real
+
+    FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
+
+
+def test_real_dispatch():
+    x = np.logspace(-10, 10) + 0.5
+    dataset = np.vstack((x, gammaln(x))).T
+
+    FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
+    assert_(loggamma(0) == np.inf)
+    assert_(np.isnan(loggamma(-1)))
+
+
+def test_gh_6536():
+    z = loggamma(complex(-3.4, +0.0))
+    zbar = loggamma(complex(-3.4, -0.0))
+    assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
+
+
+def test_branch_cut():
+    # Make sure negative zero is treated correctly
+    x = -np.logspace(300, -30, 100)
+    z = np.asarray([complex(x0, 0.0) for x0 in x])
+    zbar = np.asarray([complex(x0, -0.0) for x0 in x])
+    assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_logit.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_logit.py
new file mode 100644
index 00000000..24247ac6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_logit.py
@@ -0,0 +1,145 @@
+import numpy as np
+from numpy.testing import (assert_equal, assert_almost_equal,
+                           assert_allclose)
+from scipy.special import logit, expit, log_expit
+
+
+class TestLogit:
+    def check_logit_out(self, dtype, expected):
+        a = np.linspace(0, 1, 10)
+        a = np.array(a, dtype=dtype)
+        with np.errstate(divide='ignore'):
+            actual = logit(a)
+
+        assert_almost_equal(actual, expected)
+
+        assert_equal(actual.dtype, np.dtype(dtype))
+
+    def test_float32(self):
+        expected = np.array([-np.inf, -2.07944155,
+                            -1.25276291, -0.69314718,
+                            -0.22314353, 0.22314365,
+                            0.6931473, 1.25276303,
+                            2.07944155, np.inf], dtype=np.float32)
+        self.check_logit_out('f4', expected)
+
+    def test_float64(self):
+        expected = np.array([-np.inf, -2.07944154,
+                            -1.25276297, -0.69314718,
+                            -0.22314355, 0.22314355,
+                            0.69314718, 1.25276297,
+                            2.07944154, np.inf])
+        self.check_logit_out('f8', expected)
+
+    def test_nan(self):
+        expected = np.array([np.nan]*4)
+        with np.errstate(invalid='ignore'):
+            actual = logit(np.array([-3., -2., 2., 3.]))
+
+        assert_equal(expected, actual)
+
+
+class TestExpit:
+    def check_expit_out(self, dtype, expected):
+        a = np.linspace(-4, 4, 10)
+        a = np.array(a, dtype=dtype)
+        actual = expit(a)
+        assert_almost_equal(actual, expected)
+        assert_equal(actual.dtype, np.dtype(dtype))
+
+    def test_float32(self):
+        expected = np.array([0.01798621, 0.04265125,
+                            0.09777259, 0.20860852,
+                            0.39068246, 0.60931754,
+                            0.79139149, 0.9022274,
+                            0.95734876, 0.98201376], dtype=np.float32)
+        self.check_expit_out('f4', expected)
+
+    def test_float64(self):
+        expected = np.array([0.01798621, 0.04265125,
+                            0.0977726, 0.20860853,
+                            0.39068246, 0.60931754,
+                            0.79139147, 0.9022274,
+                            0.95734875, 0.98201379])
+        self.check_expit_out('f8', expected)
+
+    def test_large(self):
+        for dtype in (np.float32, np.float64, np.longdouble):
+            for n in (88, 89, 709, 710, 11356, 11357):
+                n = np.array(n, dtype=dtype)
+                assert_allclose(expit(n), 1.0, atol=1e-20)
+                assert_allclose(expit(-n), 0.0, atol=1e-20)
+                assert_equal(expit(n).dtype, dtype)
+                assert_equal(expit(-n).dtype, dtype)
+
+
+class TestLogExpit:
+
+    def test_large_negative(self):
+        x = np.array([-10000.0, -750.0, -500.0, -35.0])
+        y = log_expit(x)
+        assert_equal(y, x)
+
+    def test_large_positive(self):
+        x = np.array([750.0, 1000.0, 10000.0])
+        y = log_expit(x)
+        # y will contain -0.0, and -0.0 is used in the expected value,
+        # but assert_equal does not check the sign of zeros, and I don't
+        # think the sign is an essential part of the test (i.e. it would
+        # probably be OK if log_expit(1000) returned 0.0 instead of -0.0).
+        assert_equal(y, np.array([-0.0, -0.0, -0.0]))
+
+    def test_basic_float64(self):
+        x = np.array([-32, -20, -10, -3, -1, -0.1, -1e-9,
+                      0, 1e-9, 0.1, 1, 10, 100, 500, 710, 725, 735])
+        y = log_expit(x)
+        #
+        # Expected values were computed with mpmath:
+        #
+        #   import mpmath
+        #
+        #   mpmath.mp.dps = 100
+        #
+        #   def mp_log_expit(x):
+        #       return -mpmath.log1p(mpmath.exp(-x))
+        #
+        #   expected = [float(mp_log_expit(t)) for t in x]
+        #
+        expected = [-32.000000000000014, -20.000000002061153,
+                    -10.000045398899218, -3.048587351573742,
+                    -1.3132616875182228, -0.7443966600735709,
+                    -0.6931471810599453, -0.6931471805599453,
+                    -0.6931471800599454, -0.6443966600735709,
+                    -0.3132616875182228, -4.539889921686465e-05,
+                    -3.720075976020836e-44, -7.124576406741286e-218,
+                    -4.47628622567513e-309, -1.36930634e-315,
+                    -6.217e-320]
+
+        # When tested locally, only one value in y was not exactly equal to
+        # expected.  That was for x=1, and the y value differed from the
+        # expected by 1 ULP.  For this test, however, I'll use rtol=1e-15.
+        assert_allclose(y, expected, rtol=1e-15)
+
+    def test_basic_float32(self):
+        x = np.array([-32, -20, -10, -3, -1, -0.1, -1e-9,
+                      0, 1e-9, 0.1, 1, 10, 100], dtype=np.float32)
+        y = log_expit(x)
+        #
+        # Expected values were computed with mpmath:
+        #
+        #   import mpmath
+        #
+        #   mpmath.mp.dps = 100
+        #
+        #   def mp_log_expit(x):
+        #       return -mpmath.log1p(mpmath.exp(-x))
+        #
+        #   expected = [np.float32(mp_log_expit(t)) for t in x]
+        #
+        expected = np.array([-32.0, -20.0, -10.000046, -3.0485873,
+                             -1.3132616, -0.7443967, -0.6931472,
+                             -0.6931472, -0.6931472, -0.64439666,
+                             -0.3132617, -4.5398898e-05, -3.8e-44],
+                            dtype=np.float32)
+
+        assert_allclose(y, expected, rtol=5e-7)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_logsumexp.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_logsumexp.py
new file mode 100644
index 00000000..6f96408d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_logsumexp.py
@@ -0,0 +1,194 @@
+import numpy as np
+from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
+                           assert_array_almost_equal, assert_)
+
+from scipy.special import logsumexp, softmax
+
+
+def test_logsumexp():
+    # Test whether logsumexp() function correctly handles large inputs.
+    a = np.arange(200)
+    desired = np.log(np.sum(np.exp(a)))
+    assert_almost_equal(logsumexp(a), desired)
+
+    # Now test with large numbers
+    b = [1000, 1000]
+    desired = 1000.0 + np.log(2.0)
+    assert_almost_equal(logsumexp(b), desired)
+
+    n = 1000
+    b = np.full(n, 10000, dtype='float64')
+    desired = 10000.0 + np.log(n)
+    assert_almost_equal(logsumexp(b), desired)
+
+    x = np.array([1e-40] * 1000000)
+    logx = np.log(x)
+
+    X = np.vstack([x, x])
+    logX = np.vstack([logx, logx])
+    assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum())
+    assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
+    assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
+
+    # Handling special values properly
+    assert_equal(logsumexp(np.inf), np.inf)
+    assert_equal(logsumexp(-np.inf), -np.inf)
+    assert_equal(logsumexp(np.nan), np.nan)
+    assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf)
+
+    # Handling an array with different magnitudes on the axes
+    assert_array_almost_equal(logsumexp([[1e10, 1e-10],
+                                         [-1e10, -np.inf]], axis=-1),
+                              [1e10, -1e10])
+
+    # Test keeping dimensions
+    assert_array_almost_equal(logsumexp([[1e10, 1e-10],
+                                         [-1e10, -np.inf]],
+                                        axis=-1,
+                                        keepdims=True),
+                              [[1e10], [-1e10]])
+
+    # Test multiple axes
+    assert_array_almost_equal(logsumexp([[1e10, 1e-10],
+                                         [-1e10, -np.inf]],
+                                        axis=(-1,-2)),
+                              1e10)
+
+
+def test_logsumexp_b():
+    a = np.arange(200)
+    b = np.arange(200, 0, -1)
+    desired = np.log(np.sum(b*np.exp(a)))
+    assert_almost_equal(logsumexp(a, b=b), desired)
+
+    a = [1000, 1000]
+    b = [1.2, 1.2]
+    desired = 1000 + np.log(2 * 1.2)
+    assert_almost_equal(logsumexp(a, b=b), desired)
+
+    x = np.array([1e-40] * 100000)
+    b = np.linspace(1, 1000, 100000)
+    logx = np.log(x)
+
+    X = np.vstack((x, x))
+    logX = np.vstack((logx, logx))
+    B = np.vstack((b, b))
+    assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
+    assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
+                                (B * X).sum(axis=0))
+    assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
+                                (B * X).sum(axis=1))
+
+
+def test_logsumexp_sign():
+    a = [1,1,1]
+    b = [1,-1,-1]
+
+    r, s = logsumexp(a, b=b, return_sign=True)
+    assert_almost_equal(r,1)
+    assert_equal(s,-1)
+
+
+def test_logsumexp_sign_zero():
+    a = [1,1]
+    b = [1,-1]
+
+    r, s = logsumexp(a, b=b, return_sign=True)
+    assert_(not np.isfinite(r))
+    assert_(not np.isnan(r))
+    assert_(r < 0)
+    assert_equal(s,0)
+
+
+def test_logsumexp_sign_shape():
+    a = np.ones((1,2,3,4))
+    b = np.ones_like(a)
+
+    r, s = logsumexp(a, axis=2, b=b, return_sign=True)
+
+    assert_equal(r.shape, s.shape)
+    assert_equal(r.shape, (1,2,4))
+
+    r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True)
+
+    assert_equal(r.shape, s.shape)
+    assert_equal(r.shape, (1,3))
+
+
+def test_logsumexp_shape():
+    a = np.ones((1, 2, 3, 4))
+    b = np.ones_like(a)
+
+    r = logsumexp(a, axis=2, b=b)
+    assert_equal(r.shape, (1, 2, 4))
+
+    r = logsumexp(a, axis=(1, 3), b=b)
+    assert_equal(r.shape, (1, 3))
+
+
+def test_logsumexp_b_zero():
+    a = [1,10000]
+    b = [1,0]
+
+    assert_almost_equal(logsumexp(a, b=b), 1)
+
+
+def test_logsumexp_b_shape():
+    a = np.zeros((4,1,2,1))
+    b = np.ones((3,1,5))
+
+    logsumexp(a, b=b)
+
+
+def test_softmax_fixtures():
+    assert_allclose(softmax([1000, 0, 0, 0]), np.array([1, 0, 0, 0]),
+                    rtol=1e-13)
+    assert_allclose(softmax([1, 1]), np.array([.5, .5]), rtol=1e-13)
+    assert_allclose(softmax([0, 1]), np.array([1, np.e])/(1 + np.e),
+                    rtol=1e-13)
+
+    # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
+    # converted to float.
+    x = np.arange(4)
+    expected = np.array([0.03205860328008499,
+                         0.08714431874203256,
+                         0.23688281808991013,
+                         0.6439142598879722])
+
+    assert_allclose(softmax(x), expected, rtol=1e-13)
+
+    # Translation property.  If all the values are changed by the same amount,
+    # the softmax result does not change.
+    assert_allclose(softmax(x + 100), expected, rtol=1e-13)
+
+    # When axis=None, softmax operates on the entire array, and preserves
+    # the shape.
+    assert_allclose(softmax(x.reshape(2, 2)), expected.reshape(2, 2),
+                    rtol=1e-13)
+
+
+def test_softmax_multi_axes():
+    assert_allclose(softmax([[1000, 0], [1000, 0]], axis=0),
+                    np.array([[.5, .5], [.5, .5]]), rtol=1e-13)
+    assert_allclose(softmax([[1000, 0], [1000, 0]], axis=1),
+                    np.array([[1, 0], [1, 0]]), rtol=1e-13)
+
+    # Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
+    # converted to float.
+    x = np.array([[-25, 0, 25, 50],
+                  [1, 325, 749, 750]])
+    expected = np.array([[2.678636961770877e-33,
+                          1.9287498479371314e-22,
+                          1.3887943864771144e-11,
+                          0.999999999986112],
+                         [0.0,
+                          1.9444526359919372e-185,
+                          0.2689414213699951,
+                          0.7310585786300048]])
+    assert_allclose(softmax(x, axis=1), expected, rtol=1e-13)
+    assert_allclose(softmax(x.T, axis=0), expected.T, rtol=1e-13)
+
+    # 3-d input, with a tuple for the axis.
+    x3d = x.reshape(2, 2, 2)
+    assert_allclose(softmax(x3d, axis=(1, 2)), expected.reshape(2, 2, 2),
+                    rtol=1e-13)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_mpmath.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_mpmath.py
new file mode 100644
index 00000000..0d011b1b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_mpmath.py
@@ -0,0 +1,2027 @@
+"""
+Test SciPy functions versus mpmath, if available.
+
+"""
+import numpy as np
+from numpy.testing import assert_, assert_allclose
+from numpy import pi
+import pytest
+import itertools
+
+from scipy._lib import _pep440
+
+import scipy.special as sc
+from scipy.special._testutils import (
+    MissingModule, check_version, FuncData,
+    assert_func_equal)
+from scipy.special._mptestutils import (
+    Arg, FixedArg, ComplexArg, IntArg, assert_mpmath_equal,
+    nonfunctional_tooslow, trace_args, time_limited, exception_to_nan,
+    inf_to_nan)
+from scipy.special._ufuncs import (
+    _sinpi, _cospi, _lgam1p, _lanczos_sum_expg_scaled, _log1pmx,
+    _igam_fac)
+
+try:
+    import mpmath
+except ImportError:
+    mpmath = MissingModule('mpmath')
+
+
+# ------------------------------------------------------------------------------
+# expi
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.10')
+def test_expi_complex():
+    dataset = []
+    for r in np.logspace(-99, 2, 10):
+        for p in np.linspace(0, 2*np.pi, 30):
+            z = r*np.exp(1j*p)
+            dataset.append((z, complex(mpmath.ei(z))))
+    dataset = np.array(dataset, dtype=np.complex_)
+
+    FuncData(sc.expi, dataset, 0, 1).check()
+
+
+# ------------------------------------------------------------------------------
+# expn
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.19')
+def test_expn_large_n():
+    # Test the transition to the asymptotic regime of n.
+    dataset = []
+    for n in [50, 51]:
+        for x in np.logspace(0, 4, 200):
+            with mpmath.workdps(100):
+                dataset.append((n, x, float(mpmath.expint(n, x))))
+    dataset = np.asarray(dataset)
+
+    FuncData(sc.expn, dataset, (0, 1), 2, rtol=1e-13).check()
+
+# ------------------------------------------------------------------------------
+# hyp0f1
+# ------------------------------------------------------------------------------
+
+
+@check_version(mpmath, '0.19')
+def test_hyp0f1_gh5764():
+    # Do a small and somewhat systematic test that runs quickly
+    dataset = []
+    axis = [-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]
+    for v in axis:
+        for x in axis:
+            for y in axis:
+                z = x + 1j*y
+                # mpmath computes the answer correctly at dps ~ 17 but
+                # fails for 20 < dps < 120 (uses a different method);
+                # set the dps high enough that this isn't an issue
+                with mpmath.workdps(120):
+                    res = complex(mpmath.hyp0f1(v, z))
+                dataset.append((v, z, res))
+    dataset = np.array(dataset)
+
+    FuncData(lambda v, z: sc.hyp0f1(v.real, z), dataset, (0, 1), 2,
+             rtol=1e-13).check()
+
+
+@check_version(mpmath, '0.19')
+def test_hyp0f1_gh_1609():
+    # this is a regression test for gh-1609
+    vv = np.linspace(150, 180, 21)
+    af = sc.hyp0f1(vv, 0.5)
+    mf = np.array([mpmath.hyp0f1(v, 0.5) for v in vv])
+    assert_allclose(af, mf.astype(float), rtol=1e-12)
+
+
+# ------------------------------------------------------------------------------
+# hyperu
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '1.1.0')
+def test_hyperu_around_0():
+    dataset = []
+    # DLMF 13.2.14-15 test points.
+    for n in np.arange(-5, 5):
+        for b in np.linspace(-5, 5, 20):
+            a = -n
+            dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0))))
+            a = -n + b - 1
+            dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0))))
+    # DLMF 13.2.16-22 test points.
+    for a in [-10.5, -1.5, -0.5, 0, 0.5, 1, 10]:
+        for b in [-1.0, -0.5, 0, 0.5, 1, 1.5, 2, 2.5]:
+            dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0))))
+    dataset = np.array(dataset)
+
+    FuncData(sc.hyperu, dataset, (0, 1, 2), 3, rtol=1e-15, atol=5e-13).check()
+
+
+# ------------------------------------------------------------------------------
+# hyp2f1
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '1.0.0')
+def test_hyp2f1_strange_points():
+    pts = [
+        (2, -1, -1, 0.7),  # expected: 2.4
+        (2, -2, -2, 0.7),  # expected: 3.87
+    ]
+    pts += list(itertools.product([2, 1, -0.7, -1000], repeat=4))
+    pts = [
+        (a, b, c, x) for a, b, c, x in pts
+        if b == c and round(b) == b and b < 0 and b != -1000
+    ]
+    kw = dict(eliminate=True)
+    dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts]
+    dataset = np.array(dataset, dtype=np.float_)
+
+    FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
+
+
+@check_version(mpmath, '0.13')
+def test_hyp2f1_real_some_points():
+    pts = [
+        (1, 2, 3, 0),
+        (1./3, 2./3, 5./6, 27./32),
+        (1./4, 1./2, 3./4, 80./81),
+        (2,-2, -3, 3),
+        (2, -3, -2, 3),
+        (2, -1.5, -1.5, 3),
+        (1, 2, 3, 0),
+        (0.7235, -1, -5, 0.3),
+        (0.25, 1./3, 2, 0.999),
+        (0.25, 1./3, 2, -1),
+        (2, 3, 5, 0.99),
+        (3./2, -0.5, 3, 0.99),
+        (2, 2.5, -3.25, 0.999),
+        (-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001),
+        (-10, 900, -10.5, 0.99),
+        (-10, 900, 10.5, 0.99),
+        (-1, 2, 1, 1.0),
+        (-1, 2, 1, -1.0),
+        (-3, 13, 5, 1.0),
+        (-3, 13, 5, -1.0),
+        (0.5, 1 - 270.5, 1.5, 0.999**2),  # from issue 1561
+    ]
+    dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts]
+    dataset = np.array(dataset, dtype=np.float_)
+
+    with np.errstate(invalid='ignore'):
+        FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
+
+
+@check_version(mpmath, '0.14')
+def test_hyp2f1_some_points_2():
+    # Taken from mpmath unit tests -- this point failed for mpmath 0.13 but
+    # was fixed in their SVN since then
+    pts = [
+        (112, (51,10), (-9,10), -0.99999),
+        (10,-900,10.5,0.99),
+        (10,-900,-10.5,0.99),
+    ]
+
+    def fev(x):
+        if isinstance(x, tuple):
+            return float(x[0]) / x[1]
+        else:
+            return x
+
+    dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts]
+    dataset = np.array(dataset, dtype=np.float_)
+
+    FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
+
+
+@check_version(mpmath, '0.13')
+def test_hyp2f1_real_some():
+    dataset = []
+    for a in [-10, -5, -1.8, 1.8, 5, 10]:
+        for b in [-2.5, -1, 1, 7.4]:
+            for c in [-9, -1.8, 5, 20.4]:
+                for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]:
+                    try:
+                        v = float(mpmath.hyp2f1(a, b, c, z))
+                    except Exception:
+                        continue
+                    dataset.append((a, b, c, z, v))
+    dataset = np.array(dataset, dtype=np.float_)
+
+    with np.errstate(invalid='ignore'):
+        FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9,
+                 ignore_inf_sign=True).check()
+
+
+@check_version(mpmath, '0.12')
+@pytest.mark.slow
+def test_hyp2f1_real_random():
+    npoints = 500
+    dataset = np.zeros((npoints, 5), np.float_)
+
+    np.random.seed(1234)
+    dataset[:, 0] = np.random.pareto(1.5, npoints)
+    dataset[:, 1] = np.random.pareto(1.5, npoints)
+    dataset[:, 2] = np.random.pareto(1.5, npoints)
+    dataset[:, 3] = 2*np.random.rand(npoints) - 1
+
+    dataset[:, 0] *= (-1)**np.random.randint(2, npoints)
+    dataset[:, 1] *= (-1)**np.random.randint(2, npoints)
+    dataset[:, 2] *= (-1)**np.random.randint(2, npoints)
+
+    for ds in dataset:
+        if mpmath.__version__ < '0.14':
+            # mpmath < 0.14 fails for c too much smaller than a, b
+            if abs(ds[:2]).max() > abs(ds[2]):
+                ds[2] = abs(ds[:2]).max()
+        ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4])))
+
+    FuncData(sc.hyp2f1, dataset, (0, 1, 2, 3), 4, rtol=1e-9).check()
+
+
+# ------------------------------------------------------------------------------
+# erf (complex)
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.14')
+def test_erf_complex():
+    # need to increase mpmath precision for this test
+    old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
+    try:
+        mpmath.mp.dps = 70
+        x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11))
+        x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11))
+        points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(), y2.ravel()]
+
+        assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points,
+                          vectorized=False, rtol=1e-13)
+        assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points,
+                          vectorized=False, rtol=1e-13)
+    finally:
+        mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
+
+
+# ------------------------------------------------------------------------------
+# lpmv
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.15')
+def test_lpmv():
+    pts = []
+    for x in [-0.99, -0.557, 1e-6, 0.132, 1]:
+        pts.extend([
+            (1, 1, x),
+            (1, -1, x),
+            (-1, 1, x),
+            (-1, -2, x),
+            (1, 1.7, x),
+            (1, -1.7, x),
+            (-1, 1.7, x),
+            (-1, -2.7, x),
+            (1, 10, x),
+            (1, 11, x),
+            (3, 8, x),
+            (5, 11, x),
+            (-3, 8, x),
+            (-5, 11, x),
+            (3, -8, x),
+            (5, -11, x),
+            (-3, -8, x),
+            (-5, -11, x),
+            (3, 8.3, x),
+            (5, 11.3, x),
+            (-3, 8.3, x),
+            (-5, 11.3, x),
+            (3, -8.3, x),
+            (5, -11.3, x),
+            (-3, -8.3, x),
+            (-5, -11.3, x),
+        ])
+
+    def mplegenp(nu, mu, x):
+        if mu == int(mu) and x == 1:
+            # mpmath 0.17 gets this wrong
+            if mu == 0:
+                return 1
+            else:
+                return 0
+        return mpmath.legenp(nu, mu, x)
+
+    dataset = [p + (mplegenp(p[1], p[0], p[2]),) for p in pts]
+    dataset = np.array(dataset, dtype=np.float_)
+
+    def evf(mu, nu, x):
+        return sc.lpmv(mu.astype(int), nu, x)
+
+    with np.errstate(invalid='ignore'):
+        FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check()
+
+
+# ------------------------------------------------------------------------------
+# beta
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.15')
+def test_beta():
+    np.random.seed(1234)
+
+    b = np.r_[np.logspace(-200, 200, 4),
+              np.logspace(-10, 10, 4),
+              np.logspace(-1, 1, 4),
+              np.arange(-10, 11, 1),
+              np.arange(-10, 11, 1) + 0.5,
+              -1, -2.3, -3, -100.3, -10003.4]
+    a = b
+
+    ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T
+
+    old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
+    try:
+        mpmath.mp.dps = 400
+
+        assert_func_equal(sc.beta,
+                          lambda a, b: float(mpmath.beta(a, b)),
+                          ab,
+                          vectorized=False,
+                          rtol=1e-10,
+                          ignore_inf_sign=True)
+
+        assert_func_equal(
+            sc.betaln,
+            lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))),
+            ab,
+            vectorized=False,
+            rtol=1e-10)
+    finally:
+        mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
+
+
+# ------------------------------------------------------------------------------
+# loggamma
+# ------------------------------------------------------------------------------
+
+LOGGAMMA_TAYLOR_RADIUS = 0.2
+
+
+@check_version(mpmath, '0.19')
+def test_loggamma_taylor_transition():
+    # Make sure there isn't a big jump in accuracy when we move from
+    # using the Taylor series to using the recurrence relation.
+
+    r = LOGGAMMA_TAYLOR_RADIUS + np.array([-0.1, -0.01, 0, 0.01, 0.1])
+    theta = np.linspace(0, 2*np.pi, 20)
+    r, theta = np.meshgrid(r, theta)
+    dz = r*np.exp(1j*theta)
+    z = np.r_[1 + dz, 2 + dz].flatten()
+
+    dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z]
+    dataset = np.array(dataset)
+
+    FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check()
+
+
+@check_version(mpmath, '0.19')
+def test_loggamma_taylor():
+    # Test around the zeros at z = 1, 2.
+
+    r = np.logspace(-16, np.log10(LOGGAMMA_TAYLOR_RADIUS), 10)
+    theta = np.linspace(0, 2*np.pi, 20)
+    r, theta = np.meshgrid(r, theta)
+    dz = r*np.exp(1j*theta)
+    z = np.r_[1 + dz, 2 + dz].flatten()
+
+    dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z]
+    dataset = np.array(dataset)
+
+    FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check()
+
+
+# ------------------------------------------------------------------------------
+# rgamma
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.19')
+@pytest.mark.slow
+def test_rgamma_zeros():
+    # Test around the zeros at z = 0, -1, -2, ...,  -169. (After -169 we
+    # get values that are out of floating point range even when we're
+    # within 0.1 of the zero.)
+
+    # Can't use too many points here or the test takes forever.
+    dx = np.r_[-np.logspace(-1, -13, 3), 0, np.logspace(-13, -1, 3)]
+    dy = dx.copy()
+    dx, dy = np.meshgrid(dx, dy)
+    dz = dx + 1j*dy
+    zeros = np.arange(0, -170, -1).reshape(1, 1, -1)
+    z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
+    with mpmath.workdps(100):
+        dataset = [(z0, complex(mpmath.rgamma(z0))) for z0 in z]
+
+    dataset = np.array(dataset)
+    FuncData(sc.rgamma, dataset, 0, 1, rtol=1e-12).check()
+
+
+# ------------------------------------------------------------------------------
+# digamma
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.19')
+@pytest.mark.slow
+def test_digamma_roots():
+    # Test the special-cased roots for digamma.
+    root = mpmath.findroot(mpmath.digamma, 1.5)
+    roots = [float(root)]
+    root = mpmath.findroot(mpmath.digamma, -0.5)
+    roots.append(float(root))
+    roots = np.array(roots)
+
+    # If we test beyond a radius of 0.24 mpmath will take forever.
+    dx = np.r_[-0.24, -np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10), 0.24]
+    dy = dx.copy()
+    dx, dy = np.meshgrid(dx, dy)
+    dz = dx + 1j*dy
+    z = (roots + np.dstack((dz,)*roots.size)).flatten()
+    with mpmath.workdps(30):
+        dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z]
+
+    dataset = np.array(dataset)
+    FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
+
+
+@check_version(mpmath, '0.19')
+def test_digamma_negreal():
+    # Test digamma around the negative real axis. Don't do this in
+    # TestSystematic because the points need some jiggering so that
+    # mpmath doesn't take forever.
+
+    digamma = exception_to_nan(mpmath.digamma)
+
+    x = -np.logspace(300, -30, 100)
+    y = np.r_[-np.logspace(0, -3, 5), 0, np.logspace(-3, 0, 5)]
+    x, y = np.meshgrid(x, y)
+    z = (x + 1j*y).flatten()
+
+    with mpmath.workdps(40):
+        dataset = [(z0, complex(digamma(z0))) for z0 in z]
+    dataset = np.asarray(dataset)
+
+    FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check()
+
+
+@check_version(mpmath, '0.19')
+def test_digamma_boundary():
+    # Check that there isn't a jump in accuracy when we switch from
+    # using the asymptotic series to the reflection formula.
+
+    x = -np.logspace(300, -30, 100)
+    y = np.array([-6.1, -5.9, 5.9, 6.1])
+    x, y = np.meshgrid(x, y)
+    z = (x + 1j*y).flatten()
+
+    with mpmath.workdps(30):
+        dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z]
+    dataset = np.asarray(dataset)
+
+    FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check()
+
+
+# ------------------------------------------------------------------------------
+# gammainc
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.19')
+@pytest.mark.slow
+def test_gammainc_boundary():
+    # Test the transition to the asymptotic series.
+    small = 20
+    a = np.linspace(0.5*small, 2*small, 50)
+    x = a.copy()
+    a, x = np.meshgrid(a, x)
+    a, x = a.flatten(), x.flatten()
+    with mpmath.workdps(100):
+        dataset = [(a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True)))
+                   for a0, x0 in zip(a, x)]
+    dataset = np.array(dataset)
+
+    FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-12).check()
+
+
+# ------------------------------------------------------------------------------
+# spence
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.19')
+@pytest.mark.slow
+def test_spence_circle():
+    # The trickiest region for spence is around the circle |z - 1| = 1,
+    # so test that region carefully.
+
+    def spence(z):
+        return complex(mpmath.polylog(2, 1 - z))
+
+    r = np.linspace(0.5, 1.5)
+    theta = np.linspace(0, 2*pi)
+    z = (1 + np.outer(r, np.exp(1j*theta))).flatten()
+    dataset = np.asarray([(z0, spence(z0)) for z0 in z])
+
+    FuncData(sc.spence, dataset, 0, 1, rtol=1e-14).check()
+
+
+# ------------------------------------------------------------------------------
+# sinpi and cospi
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.19')
+def test_sinpi_zeros():
+    eps = np.finfo(float).eps
+    dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)]
+    dy = dx.copy()
+    dx, dy = np.meshgrid(dx, dy)
+    dz = dx + 1j*dy
+    zeros = np.arange(-100, 100, 1).reshape(1, 1, -1)
+    z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
+    dataset = np.asarray([(z0, complex(mpmath.sinpi(z0)))
+                          for z0 in z])
+    FuncData(_sinpi, dataset, 0, 1, rtol=2*eps).check()
+
+
+@check_version(mpmath, '0.19')
+def test_cospi_zeros():
+    eps = np.finfo(float).eps
+    dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)]
+    dy = dx.copy()
+    dx, dy = np.meshgrid(dx, dy)
+    dz = dx + 1j*dy
+    zeros = (np.arange(-100, 100, 1) + 0.5).reshape(1, 1, -1)
+    z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
+    dataset = np.asarray([(z0, complex(mpmath.cospi(z0)))
+                          for z0 in z])
+
+    FuncData(_cospi, dataset, 0, 1, rtol=2*eps).check()
+
+
+# ------------------------------------------------------------------------------
+# ellipj
+# ------------------------------------------------------------------------------
+
+@check_version(mpmath, '0.19')
+def test_dn_quarter_period():
+    def dn(u, m):
+        return sc.ellipj(u, m)[2]
+
+    def mpmath_dn(u, m):
+        return float(mpmath.ellipfun("dn", u=u, m=m))
+
+    m = np.linspace(0, 1, 20)
+    du = np.r_[-np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10)]
+    dataset = []
+    for m0 in m:
+        u0 = float(mpmath.ellipk(m0))
+        for du0 in du:
+            p = u0 + du0
+            dataset.append((p, m0, mpmath_dn(p, m0)))
+    dataset = np.asarray(dataset)
+
+    FuncData(dn, dataset, (0, 1), 2, rtol=1e-10).check()
+
+
+# ------------------------------------------------------------------------------
+# Wright Omega
+# ------------------------------------------------------------------------------
+
+def _mpmath_wrightomega(z, dps):
+    with mpmath.workdps(dps):
+        z = mpmath.mpc(z)
+        unwind = mpmath.ceil((z.imag - mpmath.pi)/(2*mpmath.pi))
+        res = mpmath.lambertw(mpmath.exp(z), unwind)
+    return res
+
+
+@pytest.mark.slow
+@check_version(mpmath, '0.19')
+def test_wrightomega_branch():
+    x = -np.logspace(10, 0, 25)
+    picut_above = [np.nextafter(np.pi, np.inf)]
+    picut_below = [np.nextafter(np.pi, -np.inf)]
+    npicut_above = [np.nextafter(-np.pi, np.inf)]
+    npicut_below = [np.nextafter(-np.pi, -np.inf)]
+    for i in range(50):
+        picut_above.append(np.nextafter(picut_above[-1], np.inf))
+        picut_below.append(np.nextafter(picut_below[-1], -np.inf))
+        npicut_above.append(np.nextafter(npicut_above[-1], np.inf))
+        npicut_below.append(np.nextafter(npicut_below[-1], -np.inf))
+    y = np.hstack((picut_above, picut_below, npicut_above, npicut_below))
+    x, y = np.meshgrid(x, y)
+    z = (x + 1j*y).flatten()
+
+    dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25)))
+                          for z0 in z])
+
+    FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-8).check()
+
+
+@pytest.mark.slow
+@check_version(mpmath, '0.19')
+def test_wrightomega_region1():
+    # This region gets less coverage in the TestSystematic test
+    x = np.linspace(-2, 1)
+    y = np.linspace(1, 2*np.pi)
+    x, y = np.meshgrid(x, y)
+    z = (x + 1j*y).flatten()
+
+    dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25)))
+                          for z0 in z])
+
+    FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check()
+
+
+@pytest.mark.slow
+@check_version(mpmath, '0.19')
+def test_wrightomega_region2():
+    # This region gets less coverage in the TestSystematic test
+    x = np.linspace(-2, 1)
+    y = np.linspace(-2*np.pi, -1)
+    x, y = np.meshgrid(x, y)
+    z = (x + 1j*y).flatten()
+
+    dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25)))
+                          for z0 in z])
+
+    FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check()
+
+
+# ------------------------------------------------------------------------------
+# lambertw
+# ------------------------------------------------------------------------------
+
+@pytest.mark.slow
+@check_version(mpmath, '0.19')
+def test_lambertw_smallz():
+    x, y = np.linspace(-1, 1, 25), np.linspace(-1, 1, 25)
+    x, y = np.meshgrid(x, y)
+    z = (x + 1j*y).flatten()
+
+    dataset = np.asarray([(z0, complex(mpmath.lambertw(z0)))
+                          for z0 in z])
+
+    FuncData(sc.lambertw, dataset, 0, 1, rtol=1e-13).check()
+
+
+# ------------------------------------------------------------------------------
+# Systematic tests
+# ------------------------------------------------------------------------------
+
+HYPERKW = dict(maxprec=200, maxterms=200)
+
+
+@pytest.mark.slow
+@check_version(mpmath, '0.17')
+class TestSystematic:
+
+    def test_airyai(self):
+        # oscillating function, limit range
+        assert_mpmath_equal(lambda z: sc.airy(z)[0],
+                            mpmath.airyai,
+                            [Arg(-1e8, 1e8)],
+                            rtol=1e-5)
+        assert_mpmath_equal(lambda z: sc.airy(z)[0],
+                            mpmath.airyai,
+                            [Arg(-1e3, 1e3)])
+
+    def test_airyai_complex(self):
+        assert_mpmath_equal(lambda z: sc.airy(z)[0],
+                            mpmath.airyai,
+                            [ComplexArg()])
+
+    def test_airyai_prime(self):
+        # oscillating function, limit range
+        assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
+                            mpmath.airyai(z, derivative=1),
+                            [Arg(-1e8, 1e8)],
+                            rtol=1e-5)
+        assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
+                            mpmath.airyai(z, derivative=1),
+                            [Arg(-1e3, 1e3)])
+
+    def test_airyai_prime_complex(self):
+        assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
+                            mpmath.airyai(z, derivative=1),
+                            [ComplexArg()])
+
+    def test_airybi(self):
+        # oscillating function, limit range
+        assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
+                            mpmath.airybi(z),
+                            [Arg(-1e8, 1e8)],
+                            rtol=1e-5)
+        assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
+                            mpmath.airybi(z),
+                            [Arg(-1e3, 1e3)])
+
+    def test_airybi_complex(self):
+        assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
+                            mpmath.airybi(z),
+                            [ComplexArg()])
+
+    def test_airybi_prime(self):
+        # oscillating function, limit range
+        assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
+                            mpmath.airybi(z, derivative=1),
+                            [Arg(-1e8, 1e8)],
+                            rtol=1e-5)
+        assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
+                            mpmath.airybi(z, derivative=1),
+                            [Arg(-1e3, 1e3)])
+
+    def test_airybi_prime_complex(self):
+        assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
+                            mpmath.airybi(z, derivative=1),
+                            [ComplexArg()])
+
+    def test_bei(self):
+        assert_mpmath_equal(sc.bei,
+                            exception_to_nan(lambda z: mpmath.bei(0, z, **HYPERKW)),
+                            [Arg(-1e3, 1e3)])
+
+    def test_ber(self):
+        assert_mpmath_equal(sc.ber,
+                            exception_to_nan(lambda z: mpmath.ber(0, z, **HYPERKW)),
+                            [Arg(-1e3, 1e3)])
+
+    def test_bernoulli(self):
+        assert_mpmath_equal(lambda n: sc.bernoulli(int(n))[int(n)],
+                            lambda n: float(mpmath.bernoulli(int(n))),
+                            [IntArg(0, 13000)],
+                            rtol=1e-9, n=13000)
+
+    def test_besseli(self):
+        assert_mpmath_equal(sc.iv,
+                            exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)),
+                            [Arg(-1e100, 1e100), Arg()],
+                            atol=1e-270)
+
+    def test_besseli_complex(self):
+        assert_mpmath_equal(lambda v, z: sc.iv(v.real, z),
+                            exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)),
+                            [Arg(-1e100, 1e100), ComplexArg()])
+
+    def test_besselj(self):
+        assert_mpmath_equal(sc.jv,
+                            exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
+                            [Arg(-1e100, 1e100), Arg(-1e3, 1e3)],
+                            ignore_inf_sign=True)
+
+        # loss of precision at large arguments due to oscillation
+        assert_mpmath_equal(sc.jv,
+                            exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
+                            [Arg(-1e100, 1e100), Arg(-1e8, 1e8)],
+                            ignore_inf_sign=True,
+                            rtol=1e-5)
+
+    def test_besselj_complex(self):
+        assert_mpmath_equal(lambda v, z: sc.jv(v.real, z),
+                            exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
+                            [Arg(), ComplexArg()])
+
+    def test_besselk(self):
+        assert_mpmath_equal(sc.kv,
+                            mpmath.besselk,
+                            [Arg(-200, 200), Arg(0, np.inf)],
+                            nan_ok=False, rtol=1e-12)
+
+    def test_besselk_int(self):
+        assert_mpmath_equal(sc.kn,
+                            mpmath.besselk,
+                            [IntArg(-200, 200), Arg(0, np.inf)],
+                            nan_ok=False, rtol=1e-12)
+
+    def test_besselk_complex(self):
+        assert_mpmath_equal(lambda v, z: sc.kv(v.real, z),
+                            exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)),
+                            [Arg(-1e100, 1e100), ComplexArg()])
+
+    def test_bessely(self):
+        def mpbessely(v, x):
+            r = float(mpmath.bessely(v, x, **HYPERKW))
+            if abs(r) > 1e305:
+                # overflowing to inf a bit earlier is OK
+                r = np.inf * np.sign(r)
+            if abs(r) == 0 and x == 0:
+                # invalid result from mpmath, point x=0 is a divergence
+                return np.nan
+            return r
+        assert_mpmath_equal(sc.yv,
+                            exception_to_nan(mpbessely),
+                            [Arg(-1e100, 1e100), Arg(-1e8, 1e8)],
+                            n=5000)
+
+    def test_bessely_complex(self):
+        def mpbessely(v, x):
+            r = complex(mpmath.bessely(v, x, **HYPERKW))
+            if abs(r) > 1e305:
+                # overflowing to inf a bit earlier is OK
+                with np.errstate(invalid='ignore'):
+                    r = np.inf * np.sign(r)
+            return r
+        assert_mpmath_equal(lambda v, z: sc.yv(v.real, z),
+                            exception_to_nan(mpbessely),
+                            [Arg(), ComplexArg()],
+                            n=15000)
+
+    def test_bessely_int(self):
+        def mpbessely(v, x):
+            r = float(mpmath.bessely(v, x))
+            if abs(r) == 0 and x == 0:
+                # invalid result from mpmath, point x=0 is a divergence
+                return np.nan
+            return r
+        assert_mpmath_equal(lambda v, z: sc.yn(int(v), z),
+                            exception_to_nan(mpbessely),
+                            [IntArg(-1000, 1000), Arg(-1e8, 1e8)])
+
+    def test_beta(self):
+        bad_points = []
+
+        def beta(a, b, nonzero=False):
+            if a < -1e12 or b < -1e12:
+                # Function is defined here only at integers, but due
+                # to loss of precision this is numerically
+                # ill-defined. Don't compare values here.
+                return np.nan
+            if (a < 0 or b < 0) and (abs(float(a + b)) % 1) == 0:
+                # close to a zero of the function: mpmath and scipy
+                # will not round here the same, so the test needs to be
+                # run with an absolute tolerance
+                if nonzero:
+                    bad_points.append((float(a), float(b)))
+                    return np.nan
+            return mpmath.beta(a, b)
+
+        assert_mpmath_equal(sc.beta,
+                            lambda a, b: beta(a, b, nonzero=True),
+                            [Arg(), Arg()],
+                            dps=400,
+                            ignore_inf_sign=True)
+
+        assert_mpmath_equal(sc.beta,
+                            beta,
+                            np.array(bad_points),
+                            dps=400,
+                            ignore_inf_sign=True,
+                            atol=1e-11)
+
+    def test_betainc(self):
+        assert_mpmath_equal(sc.betainc,
+                            time_limited()(exception_to_nan(lambda a, b, x: mpmath.betainc(a, b, 0, x, regularized=True))),
+                            [Arg(), Arg(), Arg()])
+
+    def test_binom(self):
+        bad_points = []
+
+        def binomial(n, k, nonzero=False):
+            if abs(k) > 1e8*(abs(n) + 1):
+                # The binomial is rapidly oscillating in this region,
+                # and the function is numerically ill-defined. Don't
+                # compare values here.
+                return np.nan
+            if n < k and abs(float(n-k) - np.round(float(n-k))) < 1e-15:
+                # close to a zero of the function: mpmath and scipy
+                # will not round here the same, so the test needs to be
+                # run with an absolute tolerance
+                if nonzero:
+                    bad_points.append((float(n), float(k)))
+                    return np.nan
+            return mpmath.binomial(n, k)
+
+        assert_mpmath_equal(sc.binom,
+                            lambda n, k: binomial(n, k, nonzero=True),
+                            [Arg(), Arg()],
+                            dps=400)
+
+        assert_mpmath_equal(sc.binom,
+                            binomial,
+                            np.array(bad_points),
+                            dps=400,
+                            atol=1e-14)
+
+    def test_chebyt_int(self):
+        assert_mpmath_equal(lambda n, x: sc.eval_chebyt(int(n), x),
+                            exception_to_nan(lambda n, x: mpmath.chebyt(n, x, **HYPERKW)),
+                            [IntArg(), Arg()], dps=50)
+
+    @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate")
+    def test_chebyt(self):
+        assert_mpmath_equal(sc.eval_chebyt,
+                            lambda n, x: time_limited()(exception_to_nan(mpmath.chebyt))(n, x, **HYPERKW),
+                            [Arg(-101, 101), Arg()], n=10000)
+
+    def test_chebyu_int(self):
+        assert_mpmath_equal(lambda n, x: sc.eval_chebyu(int(n), x),
+                            exception_to_nan(lambda n, x: mpmath.chebyu(n, x, **HYPERKW)),
+                            [IntArg(), Arg()], dps=50)
+
+    @pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate")
+    def test_chebyu(self):
+        assert_mpmath_equal(sc.eval_chebyu,
+                            lambda n, x: time_limited()(exception_to_nan(mpmath.chebyu))(n, x, **HYPERKW),
+                            [Arg(-101, 101), Arg()])
+
+    def test_chi(self):
+        def chi(x):
+            return sc.shichi(x)[1]
+        assert_mpmath_equal(chi, mpmath.chi, [Arg()])
+        # check asymptotic series cross-over
+        assert_mpmath_equal(chi, mpmath.chi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])])
+
+    def test_chi_complex(self):
+        def chi(z):
+            return sc.shichi(z)[1]
+        # chi oscillates as Im[z] -> +- inf, so limit range
+        assert_mpmath_equal(chi,
+                            mpmath.chi,
+                            [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
+                            rtol=1e-12)
+
+    def test_ci(self):
+        def ci(x):
+            return sc.sici(x)[1]
+        # oscillating function: limit range
+        assert_mpmath_equal(ci,
+                            mpmath.ci,
+                            [Arg(-1e8, 1e8)])
+
+    def test_ci_complex(self):
+        def ci(z):
+            return sc.sici(z)[1]
+        # ci oscillates as Re[z] -> +- inf, so limit range
+        assert_mpmath_equal(ci,
+                            mpmath.ci,
+                            [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))],
+                            rtol=1e-8)
+
+    def test_cospi(self):
+        eps = np.finfo(float).eps
+        assert_mpmath_equal(_cospi,
+                            mpmath.cospi,
+                            [Arg()], nan_ok=False, rtol=2*eps)
+
+    def test_cospi_complex(self):
+        assert_mpmath_equal(_cospi,
+                            mpmath.cospi,
+                            [ComplexArg()], nan_ok=False, rtol=1e-13)
+
+    def test_digamma(self):
+        assert_mpmath_equal(sc.digamma,
+                            exception_to_nan(mpmath.digamma),
+                            [Arg()], rtol=1e-12, dps=50)
+
+    def test_digamma_complex(self):
+        # Test on a cut plane because mpmath will hang. See
+        # test_digamma_negreal for tests on the negative real axis.
+        def param_filter(z):
+            return np.where((z.real < 0) & (np.abs(z.imag) < 1.12), False, True)
+
+        assert_mpmath_equal(sc.digamma,
+                            exception_to_nan(mpmath.digamma),
+                            [ComplexArg()], rtol=1e-13, dps=40,
+                            param_filter=param_filter)
+
+    def test_e1(self):
+        assert_mpmath_equal(sc.exp1,
+                            mpmath.e1,
+                            [Arg()], rtol=1e-14)
+
+    def test_e1_complex(self):
+        # E_1 oscillates as Im[z] -> +- inf, so limit range
+        assert_mpmath_equal(sc.exp1,
+                            mpmath.e1,
+                            [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
+                            rtol=1e-11)
+
+        # Check cross-over region
+        assert_mpmath_equal(sc.exp1,
+                            mpmath.e1,
+                            (np.linspace(-50, 50, 171)[:, None] +
+                             np.r_[0, np.logspace(-3, 2, 61),
+                                   -np.logspace(-3, 2, 11)]*1j).ravel(),
+                            rtol=1e-11)
+        assert_mpmath_equal(sc.exp1,
+                            mpmath.e1,
+                            (np.linspace(-50, -35, 10000) + 0j),
+                            rtol=1e-11)
+
+    def test_exprel(self):
+        assert_mpmath_equal(sc.exprel,
+                            lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'),
+                            [Arg(a=-np.log(np.finfo(np.double).max), b=np.log(np.finfo(np.double).max))])
+        assert_mpmath_equal(sc.exprel,
+                            lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'),
+                            np.array([1e-12, 1e-24, 0, 1e12, 1e24, np.inf]), rtol=1e-11)
+        assert_(np.isinf(sc.exprel(np.inf)))
+        assert_(sc.exprel(-np.inf) == 0)
+
+    def test_expm1_complex(self):
+        # Oscillates as a function of Im[z], so limit range to avoid loss of precision
+        assert_mpmath_equal(sc.expm1,
+                            mpmath.expm1,
+                            [ComplexArg(complex(-np.inf, -1e7), complex(np.inf, 1e7))])
+
+    def test_log1p_complex(self):
+        assert_mpmath_equal(sc.log1p,
+                            lambda x: mpmath.log(x+1),
+                            [ComplexArg()], dps=60)
+
+    def test_log1pmx(self):
+        assert_mpmath_equal(_log1pmx,
+                            lambda x: mpmath.log(x + 1) - x,
+                            [Arg()], dps=60, rtol=1e-14)
+
+    def test_ei(self):
+        assert_mpmath_equal(sc.expi,
+                            mpmath.ei,
+                            [Arg()],
+                            rtol=1e-11)
+
+    def test_ei_complex(self):
+        # Ei oscillates as Im[z] -> +- inf, so limit range
+        assert_mpmath_equal(sc.expi,
+                            mpmath.ei,
+                            [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
+                            rtol=1e-9)
+
+    def test_ellipe(self):
+        assert_mpmath_equal(sc.ellipe,
+                            mpmath.ellipe,
+                            [Arg(b=1.0)])
+
+    def test_ellipeinc(self):
+        assert_mpmath_equal(sc.ellipeinc,
+                            mpmath.ellipe,
+                            [Arg(-1e3, 1e3), Arg(b=1.0)])
+
+    def test_ellipeinc_largephi(self):
+        assert_mpmath_equal(sc.ellipeinc,
+                            mpmath.ellipe,
+                            [Arg(), Arg()])
+
+    def test_ellipf(self):
+        assert_mpmath_equal(sc.ellipkinc,
+                            mpmath.ellipf,
+                            [Arg(-1e3, 1e3), Arg()])
+
+    def test_ellipf_largephi(self):
+        assert_mpmath_equal(sc.ellipkinc,
+                            mpmath.ellipf,
+                            [Arg(), Arg()])
+
+    def test_ellipk(self):
+        assert_mpmath_equal(sc.ellipk,
+                            mpmath.ellipk,
+                            [Arg(b=1.0)])
+        assert_mpmath_equal(sc.ellipkm1,
+                            lambda m: mpmath.ellipk(1 - m),
+                            [Arg(a=0.0)],
+                            dps=400)
+
+    def test_ellipkinc(self):
+        def ellipkinc(phi, m):
+            return mpmath.ellippi(0, phi, m)
+        assert_mpmath_equal(sc.ellipkinc,
+                            ellipkinc,
+                            [Arg(-1e3, 1e3), Arg(b=1.0)],
+                            ignore_inf_sign=True)
+
+    def test_ellipkinc_largephi(self):
+        def ellipkinc(phi, m):
+            return mpmath.ellippi(0, phi, m)
+        assert_mpmath_equal(sc.ellipkinc,
+                            ellipkinc,
+                            [Arg(), Arg(b=1.0)],
+                            ignore_inf_sign=True)
+
+    def test_ellipfun_sn(self):
+        def sn(u, m):
+            # mpmath doesn't get the zero at u = 0--fix that
+            if u == 0:
+                return 0
+            else:
+                return mpmath.ellipfun("sn", u=u, m=m)
+
+        # Oscillating function --- limit range of first argument; the
+        # loss of precision there is an expected numerical feature
+        # rather than an actual bug
+        assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[0],
+                            sn,
+                            [Arg(-1e6, 1e6), Arg(a=0, b=1)],
+                            rtol=1e-8)
+
+    def test_ellipfun_cn(self):
+        # see comment in ellipfun_sn
+        assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[1],
+                            lambda u, m: mpmath.ellipfun("cn", u=u, m=m),
+                            [Arg(-1e6, 1e6), Arg(a=0, b=1)],
+                            rtol=1e-8)
+
+    def test_ellipfun_dn(self):
+        # see comment in ellipfun_sn
+        assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[2],
+                            lambda u, m: mpmath.ellipfun("dn", u=u, m=m),
+                            [Arg(-1e6, 1e6), Arg(a=0, b=1)],
+                            rtol=1e-8)
+
+    def test_erf(self):
+        assert_mpmath_equal(sc.erf,
+                            lambda z: mpmath.erf(z),
+                            [Arg()])
+
+    def test_erf_complex(self):
+        assert_mpmath_equal(sc.erf,
+                            lambda z: mpmath.erf(z),
+                            [ComplexArg()], n=200)
+
+    def test_erfc(self):
+        assert_mpmath_equal(sc.erfc,
+                            exception_to_nan(lambda z: mpmath.erfc(z)),
+                            [Arg()], rtol=1e-13)
+
+    def test_erfc_complex(self):
+        assert_mpmath_equal(sc.erfc,
+                            exception_to_nan(lambda z: mpmath.erfc(z)),
+                            [ComplexArg()], n=200)
+
+    def test_erfi(self):
+        assert_mpmath_equal(sc.erfi,
+                            mpmath.erfi,
+                            [Arg()], n=200)
+
+    def test_erfi_complex(self):
+        assert_mpmath_equal(sc.erfi,
+                            mpmath.erfi,
+                            [ComplexArg()], n=200)
+
+    def test_ndtr(self):
+        assert_mpmath_equal(sc.ndtr,
+                            exception_to_nan(lambda z: mpmath.ncdf(z)),
+                            [Arg()], n=200)
+
+    def test_ndtr_complex(self):
+        assert_mpmath_equal(sc.ndtr,
+                            lambda z: mpmath.erfc(-z/np.sqrt(2.))/2.,
+                            [ComplexArg(a=complex(-10000, -10000), b=complex(10000, 10000))], n=400)
+
+    def test_log_ndtr(self):
+        assert_mpmath_equal(sc.log_ndtr,
+                            exception_to_nan(lambda z: mpmath.log(mpmath.ncdf(z))),
+                            [Arg()], n=600, dps=300, rtol=1e-13)
+
+    def test_log_ndtr_complex(self):
+        assert_mpmath_equal(sc.log_ndtr,
+                            exception_to_nan(lambda z: mpmath.log(mpmath.erfc(-z/np.sqrt(2.))/2.)),
+                            [ComplexArg(a=complex(-10000, -100),
+                                        b=complex(10000, 100))], n=200, dps=300)
+
+    def test_eulernum(self):
+        assert_mpmath_equal(lambda n: sc.euler(n)[-1],
+                            mpmath.eulernum,
+                            [IntArg(1, 10000)], n=10000)
+
+    def test_expint(self):
+        assert_mpmath_equal(sc.expn,
+                            mpmath.expint,
+                            [IntArg(0, 200), Arg(0, np.inf)],
+                            rtol=1e-13, dps=160)
+
+    def test_fresnels(self):
+        def fresnels(x):
+            return sc.fresnel(x)[0]
+        assert_mpmath_equal(fresnels,
+                            mpmath.fresnels,
+                            [Arg()])
+
+    def test_fresnelc(self):
+        def fresnelc(x):
+            return sc.fresnel(x)[1]
+        assert_mpmath_equal(fresnelc,
+                            mpmath.fresnelc,
+                            [Arg()])
+
+    def test_gamma(self):
+        assert_mpmath_equal(sc.gamma,
+                            exception_to_nan(mpmath.gamma),
+                            [Arg()])
+
+    def test_gamma_complex(self):
+        assert_mpmath_equal(sc.gamma,
+                            exception_to_nan(mpmath.gamma),
+                            [ComplexArg()], rtol=5e-13)
+
+    def test_gammainc(self):
+        # Larger arguments are tested in test_data.py:test_local
+        assert_mpmath_equal(sc.gammainc,
+                            lambda z, b: mpmath.gammainc(z, b=b, regularized=True),
+                            [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)],
+                            nan_ok=False, rtol=1e-11)
+
+    def test_gammaincc(self):
+        # Larger arguments are tested in test_data.py:test_local
+        assert_mpmath_equal(sc.gammaincc,
+                            lambda z, a: mpmath.gammainc(z, a=a, regularized=True),
+                            [Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)],
+                            nan_ok=False, rtol=1e-11)
+
+    def test_gammaln(self):
+        # The real part of loggamma is log(|gamma(z)|).
+        def f(z):
+            return mpmath.loggamma(z).real
+
+        assert_mpmath_equal(sc.gammaln, exception_to_nan(f), [Arg()])
+
+    @pytest.mark.xfail(run=False)
+    def test_gegenbauer(self):
+        assert_mpmath_equal(sc.eval_gegenbauer,
+                            exception_to_nan(mpmath.gegenbauer),
+                            [Arg(-1e3, 1e3), Arg(), Arg()])
+
+    def test_gegenbauer_int(self):
+        # Redefine functions to deal with numerical + mpmath issues
+        def gegenbauer(n, a, x):
+            # Avoid overflow at large `a` (mpmath would need an even larger
+            # dps to handle this correctly, so just skip this region)
+            if abs(a) > 1e100:
+                return np.nan
+
+            # Deal with n=0, n=1 correctly; mpmath 0.17 doesn't do these
+            # always correctly
+            if n == 0:
+                r = 1.0
+            elif n == 1:
+                r = 2*a*x
+            else:
+                r = mpmath.gegenbauer(n, a, x)
+
+            # Mpmath 0.17 gives wrong results (spurious zero) in some cases, so
+            # compute the value by perturbing the result
+            if float(r) == 0 and a < -1 and float(a) == int(float(a)):
+                r = mpmath.gegenbauer(n, a + mpmath.mpf('1e-50'), x)
+                if abs(r) < mpmath.mpf('1e-50'):
+                    r = mpmath.mpf('0.0')
+
+            # Differing overflow thresholds in scipy vs. mpmath
+            if abs(r) > 1e270:
+                return np.inf
+            return r
+
+        def sc_gegenbauer(n, a, x):
+            r = sc.eval_gegenbauer(int(n), a, x)
+            # Differing overflow thresholds in scipy vs. mpmath
+            if abs(r) > 1e270:
+                return np.inf
+            return r
+        assert_mpmath_equal(sc_gegenbauer,
+                            exception_to_nan(gegenbauer),
+                            [IntArg(0, 100), Arg(-1e9, 1e9), Arg()],
+                            n=40000, dps=100,
+                            ignore_inf_sign=True, rtol=1e-6)
+
+        # Check the small-x expansion
+        assert_mpmath_equal(sc_gegenbauer,
+                            exception_to_nan(gegenbauer),
+                            [IntArg(0, 100), Arg(), FixedArg(np.logspace(-30, -4, 30))],
+                            dps=100,
+                            ignore_inf_sign=True)
+
+    @pytest.mark.xfail(run=False)
+    def test_gegenbauer_complex(self):
+        assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(int(n), a.real, x),
+                            exception_to_nan(mpmath.gegenbauer),
+                            [IntArg(0, 100), Arg(), ComplexArg()])
+
+    @nonfunctional_tooslow
+    def test_gegenbauer_complex_general(self):
+        assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(n.real, a.real, x),
+                            exception_to_nan(mpmath.gegenbauer),
+                            [Arg(-1e3, 1e3), Arg(), ComplexArg()])
+
+    def test_hankel1(self):
+        assert_mpmath_equal(sc.hankel1,
+                            exception_to_nan(lambda v, x: mpmath.hankel1(v, x,
+                                                                          **HYPERKW)),
+                            [Arg(-1e20, 1e20), Arg()])
+
+    def test_hankel2(self):
+        assert_mpmath_equal(sc.hankel2,
+                            exception_to_nan(lambda v, x: mpmath.hankel2(v, x, **HYPERKW)),
+                            [Arg(-1e20, 1e20), Arg()])
+
+    @pytest.mark.xfail(run=False, reason="issues at intermediately large orders")
+    def test_hermite(self):
+        assert_mpmath_equal(lambda n, x: sc.eval_hermite(int(n), x),
+                            exception_to_nan(mpmath.hermite),
+                            [IntArg(0, 10000), Arg()])
+
+    # hurwitz: same as zeta
+
+    def test_hyp0f1(self):
+        # mpmath reports no convergence unless maxterms is large enough
+        KW = dict(maxprec=400, maxterms=1500)
+        # n=500 (non-xslow default) fails for one bad point
+        assert_mpmath_equal(sc.hyp0f1,
+                            lambda a, x: mpmath.hyp0f1(a, x, **KW),
+                            [Arg(-1e7, 1e7), Arg(0, 1e5)],
+                            n=5000)
+        # NB: The range of the second parameter ("z") is limited from below
+        # because of an overflow in the intermediate calculations. The way
+        # for fix it is to implement an asymptotic expansion for Bessel J
+        # (similar to what is implemented for Bessel I here).
+
+    def test_hyp0f1_complex(self):
+        assert_mpmath_equal(lambda a, z: sc.hyp0f1(a.real, z),
+                            exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)),
+                            [Arg(-10, 10), ComplexArg(complex(-120, -120), complex(120, 120))])
+        # NB: The range of the first parameter ("v") are limited by an overflow
+        # in the intermediate calculations. Can be fixed by implementing an
+        # asymptotic expansion for Bessel functions for large order.
+
+    def test_hyp1f1(self):
+        def mpmath_hyp1f1(a, b, x):
+            try:
+                return mpmath.hyp1f1(a, b, x)
+            except ZeroDivisionError:
+                return np.inf
+
+        assert_mpmath_equal(
+            sc.hyp1f1,
+            mpmath_hyp1f1,
+            [Arg(-50, 50), Arg(1, 50, inclusive_a=False), Arg(-50, 50)],
+            n=500,
+            nan_ok=False
+        )
+
+    @pytest.mark.xfail(run=False)
+    def test_hyp1f1_complex(self):
+        assert_mpmath_equal(inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)),
+                            exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)),
+                            [Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()],
+                            n=2000)
+
+    @nonfunctional_tooslow
+    def test_hyp2f1_complex(self):
+        # SciPy's hyp2f1 seems to have performance and accuracy problems
+        assert_mpmath_equal(lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x),
+                            exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)),
+                            [Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()],
+                            n=10)
+
+    @pytest.mark.xfail(run=False)
+    def test_hyperu(self):
+        assert_mpmath_equal(sc.hyperu,
+                            exception_to_nan(lambda a, b, x: mpmath.hyperu(a, b, x, **HYPERKW)),
+                            [Arg(), Arg(), Arg()])
+
+    @pytest.mark.xfail_on_32bit("mpmath issue gh-342: unsupported operand mpz, long for pow")
+    def test_igam_fac(self):
+        def mp_igam_fac(a, x):
+            return mpmath.power(x, a)*mpmath.exp(-x)/mpmath.gamma(a)
+
+        assert_mpmath_equal(_igam_fac,
+                            mp_igam_fac,
+                            [Arg(0, 1e14, inclusive_a=False), Arg(0, 1e14)],
+                            rtol=1e-10)
+
+    def test_j0(self):
+        # The Bessel function at large arguments is j0(x) ~ cos(x + phi)/sqrt(x)
+        # and at large arguments the phase of the cosine loses precision.
+        #
+        # This is numerically expected behavior, so we compare only up to
+        # 1e8 = 1e15 * 1e-7
+        assert_mpmath_equal(sc.j0,
+                            mpmath.j0,
+                            [Arg(-1e3, 1e3)])
+        assert_mpmath_equal(sc.j0,
+                            mpmath.j0,
+                            [Arg(-1e8, 1e8)],
+                            rtol=1e-5)
+
+    def test_j1(self):
+        # See comment in test_j0
+        assert_mpmath_equal(sc.j1,
+                            mpmath.j1,
+                            [Arg(-1e3, 1e3)])
+        assert_mpmath_equal(sc.j1,
+                            mpmath.j1,
+                            [Arg(-1e8, 1e8)],
+                            rtol=1e-5)
+
+    @pytest.mark.xfail(run=False)
+    def test_jacobi(self):
+        assert_mpmath_equal(sc.eval_jacobi,
+                            exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)),
+                            [Arg(), Arg(), Arg(), Arg()])
+        assert_mpmath_equal(lambda n, b, c, x: sc.eval_jacobi(int(n), b, c, x),
+                            exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)),
+                            [IntArg(), Arg(), Arg(), Arg()])
+
+    def test_jacobi_int(self):
+        # Redefine functions to deal with numerical + mpmath issues
+        def jacobi(n, a, b, x):
+            # Mpmath does not handle n=0 case always correctly
+            if n == 0:
+                return 1.0
+            return mpmath.jacobi(n, a, b, x)
+        assert_mpmath_equal(lambda n, a, b, x: sc.eval_jacobi(int(n), a, b, x),
+                            lambda n, a, b, x: exception_to_nan(jacobi)(n, a, b, x, **HYPERKW),
+                            [IntArg(), Arg(), Arg(), Arg()],
+                            n=20000, dps=50)
+
+    def test_kei(self):
+        def kei(x):
+            if x == 0:
+                # work around mpmath issue at x=0
+                return -pi/4
+            return exception_to_nan(mpmath.kei)(0, x, **HYPERKW)
+        assert_mpmath_equal(sc.kei,
+                            kei,
+                            [Arg(-1e30, 1e30)], n=1000)
+
+    def test_ker(self):
+        assert_mpmath_equal(sc.ker,
+                            exception_to_nan(lambda x: mpmath.ker(0, x, **HYPERKW)),
+                            [Arg(-1e30, 1e30)], n=1000)
+
+    @nonfunctional_tooslow
+    def test_laguerre(self):
+        assert_mpmath_equal(trace_args(sc.eval_laguerre),
+                            lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW),
+                            [Arg(), Arg()])
+
+    def test_laguerre_int(self):
+        assert_mpmath_equal(lambda n, x: sc.eval_laguerre(int(n), x),
+                            lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW),
+                            [IntArg(), Arg()], n=20000)
+
+    @pytest.mark.xfail_on_32bit("see gh-3551 for bad points")
+    def test_lambertw_real(self):
+        assert_mpmath_equal(lambda x, k: sc.lambertw(x, int(k.real)),
+                            lambda x, k: mpmath.lambertw(x, int(k.real)),
+                            [ComplexArg(-np.inf, np.inf), IntArg(0, 10)],
+                            rtol=1e-13, nan_ok=False)
+
+    def test_lanczos_sum_expg_scaled(self):
+        maxgamma = 171.624376956302725
+        e = np.exp(1)
+        g = 6.024680040776729583740234375
+
+        def gamma(x):
+            with np.errstate(over='ignore'):
+                fac = ((x + g - 0.5)/e)**(x - 0.5)
+                if fac != np.inf:
+                    res = fac*_lanczos_sum_expg_scaled(x)
+                else:
+                    fac = ((x + g - 0.5)/e)**(0.5*(x - 0.5))
+                    res = fac*_lanczos_sum_expg_scaled(x)
+                    res *= fac
+            return res
+
+        assert_mpmath_equal(gamma,
+                            mpmath.gamma,
+                            [Arg(0, maxgamma, inclusive_a=False)],
+                            rtol=1e-13)
+
+    @nonfunctional_tooslow
+    def test_legendre(self):
+        assert_mpmath_equal(sc.eval_legendre,
+                            mpmath.legendre,
+                            [Arg(), Arg()])
+
+    def test_legendre_int(self):
+        assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x),
+                            lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW),
+                            [IntArg(), Arg()],
+                            n=20000)
+
+        # Check the small-x expansion
+        assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x),
+                            lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW),
+                            [IntArg(), FixedArg(np.logspace(-30, -4, 20))])
+
+    def test_legenp(self):
+        def lpnm(n, m, z):
+            try:
+                v = sc.lpmn(m, n, z)[0][-1,-1]
+            except ValueError:
+                return np.nan
+            if abs(v) > 1e306:
+                # harmonize overflow to inf
+                v = np.inf * np.sign(v.real)
+            return v
+
+        def lpnm_2(n, m, z):
+            v = sc.lpmv(m, n, z)
+            if abs(v) > 1e306:
+                # harmonize overflow to inf
+                v = np.inf * np.sign(v.real)
+            return v
+
+        def legenp(n, m, z):
+            if (z == 1 or z == -1) and int(n) == n:
+                # Special case (mpmath may give inf, we take the limit by
+                # continuity)
+                if m == 0:
+                    if n < 0:
+                        n = -n - 1
+                    return mpmath.power(mpmath.sign(z), n)
+                else:
+                    return 0
+
+            if abs(z) < 1e-15:
+                # mpmath has bad performance here
+                return np.nan
+
+            typ = 2 if abs(z) < 1 else 3
+            v = exception_to_nan(mpmath.legenp)(n, m, z, type=typ)
+
+            if abs(v) > 1e306:
+                # harmonize overflow to inf
+                v = mpmath.inf * mpmath.sign(v.real)
+
+            return v
+
+        assert_mpmath_equal(lpnm,
+                            legenp,
+                            [IntArg(-100, 100), IntArg(-100, 100), Arg()])
+
+        assert_mpmath_equal(lpnm_2,
+                            legenp,
+                            [IntArg(-100, 100), Arg(-100, 100), Arg(-1, 1)],
+                            atol=1e-10)
+
+    def test_legenp_complex_2(self):
+        def clpnm(n, m, z):
+            try:
+                return sc.clpmn(m.real, n.real, z, type=2)[0][-1,-1]
+            except ValueError:
+                return np.nan
+
+        def legenp(n, m, z):
+            if abs(z) < 1e-15:
+                # mpmath has bad performance here
+                return np.nan
+            return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=2)
+
+        # mpmath is quite slow here
+        x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3])
+        y = np.array([-1e3, -0.5, 0.5, 1.3])
+        z = (x[:,None] + 1j*y[None,:]).ravel()
+
+        assert_mpmath_equal(clpnm,
+                            legenp,
+                            [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)],
+                            rtol=1e-6,
+                            n=500)
+
+    def test_legenp_complex_3(self):
+        def clpnm(n, m, z):
+            try:
+                return sc.clpmn(m.real, n.real, z, type=3)[0][-1,-1]
+            except ValueError:
+                return np.nan
+
+        def legenp(n, m, z):
+            if abs(z) < 1e-15:
+                # mpmath has bad performance here
+                return np.nan
+            return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=3)
+
+        # mpmath is quite slow here
+        x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3])
+        y = np.array([-1e3, -0.5, 0.5, 1.3])
+        z = (x[:,None] + 1j*y[None,:]).ravel()
+
+        assert_mpmath_equal(clpnm,
+                            legenp,
+                            [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)],
+                            rtol=1e-6,
+                            n=500)
+
+    @pytest.mark.xfail(run=False, reason="apparently picks wrong function at |z| > 1")
+    def test_legenq(self):
+        def lqnm(n, m, z):
+            return sc.lqmn(m, n, z)[0][-1,-1]
+
+        def legenq(n, m, z):
+            if abs(z) < 1e-15:
+                # mpmath has bad performance here
+                return np.nan
+            return exception_to_nan(mpmath.legenq)(n, m, z, type=2)
+
+        assert_mpmath_equal(lqnm,
+                            legenq,
+                            [IntArg(0, 100), IntArg(0, 100), Arg()])
+
+    @nonfunctional_tooslow
+    def test_legenq_complex(self):
+        def lqnm(n, m, z):
+            return sc.lqmn(int(m.real), int(n.real), z)[0][-1,-1]
+
+        def legenq(n, m, z):
+            if abs(z) < 1e-15:
+                # mpmath has bad performance here
+                return np.nan
+            return exception_to_nan(mpmath.legenq)(int(n.real), int(m.real), z, type=2)
+
+        assert_mpmath_equal(lqnm,
+                            legenq,
+                            [IntArg(0, 100), IntArg(0, 100), ComplexArg()],
+                            n=100)
+
+    def test_lgam1p(self):
+        def param_filter(x):
+            # Filter the poles
+            return np.where((np.floor(x) == x) & (x <= 0), False, True)
+
+        def mp_lgam1p(z):
+            # The real part of loggamma is log(|gamma(z)|)
+            return mpmath.loggamma(1 + z).real
+
+        assert_mpmath_equal(_lgam1p,
+                            mp_lgam1p,
+                            [Arg()], rtol=1e-13, dps=100,
+                            param_filter=param_filter)
+
+    def test_loggamma(self):
+        def mpmath_loggamma(z):
+            try:
+                res = mpmath.loggamma(z)
+            except ValueError:
+                res = complex(np.nan, np.nan)
+            return res
+
+        assert_mpmath_equal(sc.loggamma,
+                            mpmath_loggamma,
+                            [ComplexArg()], nan_ok=False,
+                            distinguish_nan_and_inf=False, rtol=5e-14)
+
+    @pytest.mark.xfail(run=False)
+    def test_pcfd(self):
+        def pcfd(v, x):
+            return sc.pbdv(v, x)[0]
+        assert_mpmath_equal(pcfd,
+                            exception_to_nan(lambda v, x: mpmath.pcfd(v, x, **HYPERKW)),
+                            [Arg(), Arg()])
+
+    @pytest.mark.xfail(run=False, reason="it's not the same as the mpmath function --- maybe different definition?")
+    def test_pcfv(self):
+        def pcfv(v, x):
+            return sc.pbvv(v, x)[0]
+        assert_mpmath_equal(pcfv,
+                            lambda v, x: time_limited()(exception_to_nan(mpmath.pcfv))(v, x, **HYPERKW),
+                            [Arg(), Arg()], n=1000)
+
+    def test_pcfw(self):
+        def pcfw(a, x):
+            return sc.pbwa(a, x)[0]
+
+        def dpcfw(a, x):
+            return sc.pbwa(a, x)[1]
+
+        def mpmath_dpcfw(a, x):
+            return mpmath.diff(mpmath.pcfw, (a, x), (0, 1))
+
+        # The Zhang and Jin implementation only uses Taylor series and
+        # is thus accurate in only a very small range.
+        assert_mpmath_equal(pcfw,
+                            mpmath.pcfw,
+                            [Arg(-5, 5), Arg(-5, 5)], rtol=2e-8, n=100)
+
+        assert_mpmath_equal(dpcfw,
+                            mpmath_dpcfw,
+                            [Arg(-5, 5), Arg(-5, 5)], rtol=2e-9, n=100)
+
+    @pytest.mark.xfail(run=False, reason="issues at large arguments (atol OK, rtol not) and = _pep440.Version("1.0.0"):
+            # no workarounds needed
+            mppoch = mpmath.rf
+        else:
+            def mppoch(a, m):
+                # deal with cases where the result in double precision
+                # hits exactly a non-positive integer, but the
+                # corresponding extended-precision mpf floats don't
+                if float(a + m) == int(a + m) and float(a + m) <= 0:
+                    a = mpmath.mpf(a)
+                    m = int(a + m) - a
+                return mpmath.rf(a, m)
+
+        assert_mpmath_equal(sc.poch,
+                            mppoch,
+                            [Arg(), Arg()],
+                            dps=400)
+
+    def test_sinpi(self):
+        eps = np.finfo(float).eps
+        assert_mpmath_equal(_sinpi, mpmath.sinpi,
+                            [Arg()], nan_ok=False, rtol=2*eps)
+
+    def test_sinpi_complex(self):
+        assert_mpmath_equal(_sinpi, mpmath.sinpi,
+                            [ComplexArg()], nan_ok=False, rtol=2e-14)
+
+    def test_shi(self):
+        def shi(x):
+            return sc.shichi(x)[0]
+        assert_mpmath_equal(shi, mpmath.shi, [Arg()])
+        # check asymptotic series cross-over
+        assert_mpmath_equal(shi, mpmath.shi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])])
+
+    def test_shi_complex(self):
+        def shi(z):
+            return sc.shichi(z)[0]
+        # shi oscillates as Im[z] -> +- inf, so limit range
+        assert_mpmath_equal(shi,
+                            mpmath.shi,
+                            [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
+                            rtol=1e-12)
+
+    def test_si(self):
+        def si(x):
+            return sc.sici(x)[0]
+        assert_mpmath_equal(si, mpmath.si, [Arg()])
+
+    def test_si_complex(self):
+        def si(z):
+            return sc.sici(z)[0]
+        # si oscillates as Re[z] -> +- inf, so limit range
+        assert_mpmath_equal(si,
+                            mpmath.si,
+                            [ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))],
+                            rtol=1e-12)
+
+    def test_spence(self):
+        # mpmath uses a different convention for the dilogarithm
+        def dilog(x):
+            return mpmath.polylog(2, 1 - x)
+        # Spence has a branch cut on the negative real axis
+        assert_mpmath_equal(sc.spence,
+                            exception_to_nan(dilog),
+                            [Arg(0, np.inf)], rtol=1e-14)
+
+    def test_spence_complex(self):
+        def dilog(z):
+            return mpmath.polylog(2, 1 - z)
+        assert_mpmath_equal(sc.spence,
+                            exception_to_nan(dilog),
+                            [ComplexArg()], rtol=1e-14)
+
+    def test_spherharm(self):
+        def spherharm(l, m, theta, phi):
+            if m > l:
+                return np.nan
+            return sc.sph_harm(m, l, phi, theta)
+        assert_mpmath_equal(spherharm,
+                            mpmath.spherharm,
+                            [IntArg(0, 100), IntArg(0, 100),
+                             Arg(a=0, b=pi), Arg(a=0, b=2*pi)],
+                            atol=1e-8, n=6000,
+                            dps=150)
+
+    def test_struveh(self):
+        assert_mpmath_equal(sc.struve,
+                            exception_to_nan(mpmath.struveh),
+                            [Arg(-1e4, 1e4), Arg(0, 1e4)],
+                            rtol=5e-10)
+
+    def test_struvel(self):
+        def mp_struvel(v, z):
+            if v < 0 and z < -v and abs(v) > 1000:
+                # larger DPS needed for correct results
+                old_dps = mpmath.mp.dps
+                try:
+                    mpmath.mp.dps = 300
+                    return mpmath.struvel(v, z)
+                finally:
+                    mpmath.mp.dps = old_dps
+            return mpmath.struvel(v, z)
+
+        assert_mpmath_equal(sc.modstruve,
+                            exception_to_nan(mp_struvel),
+                            [Arg(-1e4, 1e4), Arg(0, 1e4)],
+                            rtol=5e-10,
+                            ignore_inf_sign=True)
+
+    def test_wrightomega_real(self):
+        def mpmath_wrightomega_real(x):
+            return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5'))
+
+        # For x < -1000 the Wright Omega function is just 0 to double
+        # precision, and for x > 1e21 it is just x to double
+        # precision.
+        assert_mpmath_equal(
+            sc.wrightomega,
+            mpmath_wrightomega_real,
+            [Arg(-1000, 1e21)],
+            rtol=5e-15,
+            atol=0,
+            nan_ok=False,
+        )
+
+    def test_wrightomega(self):
+        assert_mpmath_equal(sc.wrightomega,
+                            lambda z: _mpmath_wrightomega(z, 25),
+                            [ComplexArg()], rtol=1e-14, nan_ok=False)
+
+    def test_hurwitz_zeta(self):
+        assert_mpmath_equal(sc.zeta,
+                            exception_to_nan(mpmath.zeta),
+                            [Arg(a=1, b=1e10, inclusive_a=False),
+                             Arg(a=0, inclusive_a=False)])
+
+    def test_riemann_zeta(self):
+        assert_mpmath_equal(
+            sc.zeta,
+            lambda x: mpmath.zeta(x) if x != 1 else mpmath.inf,
+            [Arg(-100, 100)],
+            nan_ok=False,
+            rtol=5e-13,
+        )
+
+    def test_zetac(self):
+        assert_mpmath_equal(sc.zetac,
+                            lambda x: (mpmath.zeta(x) - 1
+                                       if x != 1 else mpmath.inf),
+                            [Arg(-100, 100)],
+                            nan_ok=False, dps=45, rtol=5e-13)
+
+    def test_boxcox(self):
+
+        def mp_boxcox(x, lmbda):
+            x = mpmath.mp.mpf(x)
+            lmbda = mpmath.mp.mpf(lmbda)
+            if lmbda == 0:
+                return mpmath.mp.log(x)
+            else:
+                return mpmath.mp.powm1(x, lmbda) / lmbda
+
+        assert_mpmath_equal(sc.boxcox,
+                            exception_to_nan(mp_boxcox),
+                            [Arg(a=0, inclusive_a=False), Arg()],
+                            n=200,
+                            dps=60,
+                            rtol=1e-13)
+
+    def test_boxcox1p(self):
+
+        def mp_boxcox1p(x, lmbda):
+            x = mpmath.mp.mpf(x)
+            lmbda = mpmath.mp.mpf(lmbda)
+            one = mpmath.mp.mpf(1)
+            if lmbda == 0:
+                return mpmath.mp.log(one + x)
+            else:
+                return mpmath.mp.powm1(one + x, lmbda) / lmbda
+
+        assert_mpmath_equal(sc.boxcox1p,
+                            exception_to_nan(mp_boxcox1p),
+                            [Arg(a=-1, inclusive_a=False), Arg()],
+                            n=200,
+                            dps=60,
+                            rtol=1e-13)
+
+    def test_spherical_jn(self):
+        def mp_spherical_jn(n, z):
+            arg = mpmath.mpmathify(z)
+            out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) /
+                   mpmath.sqrt(2*arg/mpmath.pi))
+            if arg.imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n), z),
+                            exception_to_nan(mp_spherical_jn),
+                            [IntArg(0, 200), Arg(-1e8, 1e8)],
+                            dps=300)
+
+    def test_spherical_jn_complex(self):
+        def mp_spherical_jn(n, z):
+            arg = mpmath.mpmathify(z)
+            out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) /
+                   mpmath.sqrt(2*arg/mpmath.pi))
+            if arg.imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n.real), z),
+                            exception_to_nan(mp_spherical_jn),
+                            [IntArg(0, 200), ComplexArg()])
+
+    def test_spherical_yn(self):
+        def mp_spherical_yn(n, z):
+            arg = mpmath.mpmathify(z)
+            out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) /
+                   mpmath.sqrt(2*arg/mpmath.pi))
+            if arg.imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n), z),
+                            exception_to_nan(mp_spherical_yn),
+                            [IntArg(0, 200), Arg(-1e10, 1e10)],
+                            dps=100)
+
+    def test_spherical_yn_complex(self):
+        def mp_spherical_yn(n, z):
+            arg = mpmath.mpmathify(z)
+            out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) /
+                   mpmath.sqrt(2*arg/mpmath.pi))
+            if arg.imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n.real), z),
+                            exception_to_nan(mp_spherical_yn),
+                            [IntArg(0, 200), ComplexArg()])
+
+    def test_spherical_in(self):
+        def mp_spherical_in(n, z):
+            arg = mpmath.mpmathify(z)
+            out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) /
+                   mpmath.sqrt(2*arg/mpmath.pi))
+            if arg.imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n), z),
+                            exception_to_nan(mp_spherical_in),
+                            [IntArg(0, 200), Arg()],
+                            dps=200, atol=10**(-278))
+
+    def test_spherical_in_complex(self):
+        def mp_spherical_in(n, z):
+            arg = mpmath.mpmathify(z)
+            out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) /
+                   mpmath.sqrt(2*arg/mpmath.pi))
+            if arg.imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n.real), z),
+                            exception_to_nan(mp_spherical_in),
+                            [IntArg(0, 200), ComplexArg()])
+
+    def test_spherical_kn(self):
+        def mp_spherical_kn(n, z):
+            out = (mpmath.besselk(n + mpmath.mpf(1)/2, z) *
+                   mpmath.sqrt(mpmath.pi/(2*mpmath.mpmathify(z))))
+            if mpmath.mpmathify(z).imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n), z),
+                            exception_to_nan(mp_spherical_kn),
+                            [IntArg(0, 150), Arg()],
+                            dps=100)
+
+    @pytest.mark.xfail(run=False, reason="Accuracy issues near z = -1 inherited from kv.")
+    def test_spherical_kn_complex(self):
+        def mp_spherical_kn(n, z):
+            arg = mpmath.mpmathify(z)
+            out = (mpmath.besselk(n + mpmath.mpf(1)/2, arg) /
+                   mpmath.sqrt(2*arg/mpmath.pi))
+            if arg.imag == 0:
+                return out.real
+            else:
+                return out
+
+        assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n.real), z),
+                            exception_to_nan(mp_spherical_kn),
+                            [IntArg(0, 200), ComplexArg()],
+                            dps=200)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_nan_inputs.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_nan_inputs.py
new file mode 100644
index 00000000..627ba571
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_nan_inputs.py
@@ -0,0 +1,64 @@
+"""Test how the ufuncs in special handle nan inputs.
+
+"""
+from typing import Callable, Dict
+
+import numpy as np
+from numpy.testing import assert_array_equal, assert_, suppress_warnings
+import pytest
+import scipy.special as sc
+
+
+KNOWNFAILURES: Dict[str, Callable] = {}
+
+POSTPROCESSING: Dict[str, Callable] = {}
+
+
+def _get_ufuncs():
+    ufuncs = []
+    ufunc_names = []
+    for name in sorted(sc.__dict__):
+        obj = sc.__dict__[name]
+        if not isinstance(obj, np.ufunc):
+            continue
+        msg = KNOWNFAILURES.get(obj)
+        if msg is None:
+            ufuncs.append(obj)
+            ufunc_names.append(name)
+        else:
+            fail = pytest.mark.xfail(run=False, reason=msg)
+            ufuncs.append(pytest.param(obj, marks=fail))
+            ufunc_names.append(name)
+    return ufuncs, ufunc_names
+
+
+UFUNCS, UFUNC_NAMES = _get_ufuncs()
+
+
+@pytest.mark.parametrize("func", UFUNCS, ids=UFUNC_NAMES)
+def test_nan_inputs(func):
+    args = (np.nan,)*func.nin
+    with suppress_warnings() as sup:
+        # Ignore warnings about unsafe casts from legacy wrappers
+        sup.filter(RuntimeWarning,
+                   "floating point number truncated to an integer")
+        try:
+            with suppress_warnings() as sup:
+                sup.filter(DeprecationWarning)
+                res = func(*args)
+        except TypeError:
+            # One of the arguments doesn't take real inputs
+            return
+    if func in POSTPROCESSING:
+        res = POSTPROCESSING[func](*res)
+
+    msg = "got {} instead of nan".format(res)
+    assert_array_equal(np.isnan(res), True, err_msg=msg)
+
+
+def test_legacy_cast():
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning,
+                   "floating point number truncated to an integer")
+        res = sc.bdtrc(np.nan, 1, 0.5)
+        assert_(np.isnan(res))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtr.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtr.py
new file mode 100644
index 00000000..ba9b689b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtr.py
@@ -0,0 +1,77 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+import scipy.special as sc
+
+
+def test_ndtr():
+    assert_equal(sc.ndtr(0), 0.5)
+    assert_allclose(sc.ndtr(1), 0.8413447460685429)
+
+
+class TestNdtri:
+
+    def test_zero(self):
+        assert sc.ndtri(0.5) == 0.0
+
+    def test_asymptotes(self):
+        assert_equal(sc.ndtri([0.0, 1.0]), [-np.inf, np.inf])
+
+    def test_outside_of_domain(self):
+        assert all(np.isnan(sc.ndtri([-1.5, 1.5])))
+
+
+class TestLogNdtr:
+
+    # The expected values in these tests were computed with mpmath:
+    #
+    #   def log_ndtr_mp(x):
+    #       return mpmath.log(mpmath.ncdf(x))
+    #
+
+    def test_log_ndtr_moderate_le8(self):
+        x = np.array([-0.75, -0.25, 0, 0.5, 1.5, 2.5, 3, 4, 5, 7, 8])
+        expected = np.array([-1.4844482299196562,
+                             -0.9130617648111351,
+                             -0.6931471805599453,
+                             -0.3689464152886564,
+                             -0.06914345561223398,
+                             -0.006229025485860002,
+                             -0.0013508099647481938,
+                             -3.167174337748927e-05,
+                             -2.866516129637636e-07,
+                             -1.279812543886654e-12,
+                             -6.220960574271786e-16])
+        y = sc.log_ndtr(x)
+        assert_allclose(y, expected, rtol=1e-14)
+
+    def test_log_ndtr_values_8_16(self):
+        x = np.array([8.001, 8.06, 8.15, 8.5, 10, 12, 14, 16])
+        expected = [-6.170639424817055e-16,
+                    -3.814722443652823e-16,
+                    -1.819621363526629e-16,
+                    -9.479534822203318e-18,
+                    -7.619853024160525e-24,
+                    -1.776482112077679e-33,
+                    -7.7935368191928e-45,
+                    -6.388754400538087e-58]
+        y = sc.log_ndtr(x)
+        assert_allclose(y, expected, rtol=5e-14)
+
+    def test_log_ndtr_values_16_31(self):
+        x = np.array([16.15, 20.3, 21.4, 26.2, 30.9])
+        expected = [-5.678084565148492e-59,
+                    -6.429244467698346e-92,
+                    -6.680402412553295e-102,
+                    -1.328698078458869e-151,
+                    -5.972288641838264e-210]
+        y = sc.log_ndtr(x)
+        assert_allclose(y, expected, rtol=2e-13)
+
+    def test_log_ndtr_values_gt31(self):
+        x = np.array([31.6, 32.8, 34.9, 37.1])
+        expected = [-1.846036234858162e-219,
+                    -2.9440539964066835e-236,
+                    -3.71721649450857e-267,
+                    -1.4047119663106221e-301]
+        y = sc.log_ndtr(x)
+        assert_allclose(y, expected, rtol=3e-13)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtri_exp.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtri_exp.py
new file mode 100644
index 00000000..82a9fbd3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_ndtri_exp.py
@@ -0,0 +1,94 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+from scipy.special import log_ndtr, ndtri_exp
+from scipy.special._testutils import assert_func_equal
+
+
+def log_ndtr_ndtri_exp(y):
+    return log_ndtr(ndtri_exp(y))
+
+
+@pytest.fixture(scope="class")
+def uniform_random_points():
+    random_state = np.random.RandomState(1234)
+    points = random_state.random_sample(1000)
+    return points
+
+
+class TestNdtriExp:
+    """Tests that ndtri_exp is sufficiently close to an inverse of log_ndtr.
+
+    We have separate tests for the five intervals (-inf, -10),
+    [-10, -2), [-2, -0.14542), [-0.14542, -1e-6), and [-1e-6, 0).
+    ndtri_exp(y) is computed in three different ways depending on if y
+    is in (-inf, -2), [-2, log(1 - exp(-2))], or [log(1 - exp(-2), 0).
+    Each of these intervals is given its own test with two additional tests
+    for handling very small values and values very close to zero.
+    """
+
+    @pytest.mark.parametrize(
+        "test_input", [-1e1, -1e2, -1e10, -1e20, -np.finfo(float).max]
+    )
+    def test_very_small_arg(self, test_input, uniform_random_points):
+        scale = test_input
+        points = scale * (0.5 * uniform_random_points + 0.5)
+        assert_func_equal(
+            log_ndtr_ndtri_exp,
+            lambda y: y, points,
+            rtol=1e-14,
+            nan_ok=True
+        )
+
+    @pytest.mark.parametrize(
+        "interval,expected_rtol",
+        [
+            ((-10, -2), 1e-14),
+            ((-2, -0.14542), 1e-12),
+            ((-0.14542, -1e-6), 1e-10),
+            ((-1e-6, 0), 1e-6),
+        ],
+    )
+    def test_in_interval(self, interval, expected_rtol, uniform_random_points):
+        left, right = interval
+        points = (right - left) * uniform_random_points + left
+        assert_func_equal(
+            log_ndtr_ndtri_exp,
+            lambda y: y, points,
+            rtol=expected_rtol,
+            nan_ok=True
+        )
+
+    def test_extreme(self):
+        # bigneg is not quite the largest negative double precision value.
+        # Here's why:
+        # The round-trip calculation
+        #    y = ndtri_exp(bigneg)
+        #    bigneg2 = log_ndtr(y)
+        # where bigneg is a very large negative value, would--with infinite
+        # precision--result in bigneg2 == bigneg.  When bigneg is large enough,
+        # y is effectively equal to -sqrt(2)*sqrt(-bigneg), and log_ndtr(y) is
+        # effectively -(y/sqrt(2))**2.  If we use bigneg = np.finfo(float).min,
+        # then by construction, the theoretical value is the most negative
+        # finite value that can be represented with 64 bit float point.  This
+        # means tiny changes in how the computation proceeds can result in the
+        # return value being -inf.  (E.g. changing the constant representation
+        # of 1/sqrt(2) from 0.7071067811865475--which is the value returned by
+        # 1/np.sqrt(2)--to 0.7071067811865476--which is the most accurate 64
+        # bit floating point representation of 1/sqrt(2)--results in the
+        # round-trip that starts with np.finfo(float).min returning -inf.  So
+        # we'll move the bigneg value a few ULPs towards 0 to avoid this
+        # sensitivity.
+        # Use the reduce method to apply nextafter four times.
+        bigneg = np.nextafter.reduce([np.finfo(float).min, 0, 0, 0, 0])
+        # tinyneg is approx. -2.225e-308.
+        tinyneg = -np.finfo(float).tiny
+        x = np.array([tinyneg, bigneg])
+        result = log_ndtr_ndtri_exp(x)
+        assert_allclose(result, x, rtol=1e-12)
+
+    def test_asymptotes(self):
+        assert_equal(ndtri_exp([-np.inf, 0.0]), [-np.inf, np.inf])
+
+    def test_outside_domain(self):
+        assert np.isnan(ndtri_exp(1.0))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal.py
new file mode 100644
index 00000000..24839bbf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal.py
@@ -0,0 +1,791 @@
+import numpy as np
+from numpy import array, sqrt
+from numpy.testing import (assert_array_almost_equal, assert_equal,
+                           assert_almost_equal, assert_allclose)
+from pytest import raises as assert_raises
+
+from scipy import integrate
+import scipy.special as sc
+from scipy.special import gamma
+import scipy.special._orthogonal as orth
+
+
+class TestCheby:
+    def test_chebyc(self):
+        C0 = orth.chebyc(0)
+        C1 = orth.chebyc(1)
+        with np.errstate(all='ignore'):
+            C2 = orth.chebyc(2)
+            C3 = orth.chebyc(3)
+            C4 = orth.chebyc(4)
+            C5 = orth.chebyc(5)
+
+        assert_array_almost_equal(C0.c,[2],13)
+        assert_array_almost_equal(C1.c,[1,0],13)
+        assert_array_almost_equal(C2.c,[1,0,-2],13)
+        assert_array_almost_equal(C3.c,[1,0,-3,0],13)
+        assert_array_almost_equal(C4.c,[1,0,-4,0,2],13)
+        assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13)
+
+    def test_chebys(self):
+        S0 = orth.chebys(0)
+        S1 = orth.chebys(1)
+        S2 = orth.chebys(2)
+        S3 = orth.chebys(3)
+        S4 = orth.chebys(4)
+        S5 = orth.chebys(5)
+        assert_array_almost_equal(S0.c,[1],13)
+        assert_array_almost_equal(S1.c,[1,0],13)
+        assert_array_almost_equal(S2.c,[1,0,-1],13)
+        assert_array_almost_equal(S3.c,[1,0,-2,0],13)
+        assert_array_almost_equal(S4.c,[1,0,-3,0,1],13)
+        assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13)
+
+    def test_chebyt(self):
+        T0 = orth.chebyt(0)
+        T1 = orth.chebyt(1)
+        T2 = orth.chebyt(2)
+        T3 = orth.chebyt(3)
+        T4 = orth.chebyt(4)
+        T5 = orth.chebyt(5)
+        assert_array_almost_equal(T0.c,[1],13)
+        assert_array_almost_equal(T1.c,[1,0],13)
+        assert_array_almost_equal(T2.c,[2,0,-1],13)
+        assert_array_almost_equal(T3.c,[4,0,-3,0],13)
+        assert_array_almost_equal(T4.c,[8,0,-8,0,1],13)
+        assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13)
+
+    def test_chebyu(self):
+        U0 = orth.chebyu(0)
+        U1 = orth.chebyu(1)
+        U2 = orth.chebyu(2)
+        U3 = orth.chebyu(3)
+        U4 = orth.chebyu(4)
+        U5 = orth.chebyu(5)
+        assert_array_almost_equal(U0.c,[1],13)
+        assert_array_almost_equal(U1.c,[2,0],13)
+        assert_array_almost_equal(U2.c,[4,0,-1],13)
+        assert_array_almost_equal(U3.c,[8,0,-4,0],13)
+        assert_array_almost_equal(U4.c,[16,0,-12,0,1],13)
+        assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13)
+
+
+class TestGegenbauer:
+
+    def test_gegenbauer(self):
+        a = 5*np.random.random() - 0.5
+        if np.any(a == 0):
+            a = -0.2
+        Ca0 = orth.gegenbauer(0,a)
+        Ca1 = orth.gegenbauer(1,a)
+        Ca2 = orth.gegenbauer(2,a)
+        Ca3 = orth.gegenbauer(3,a)
+        Ca4 = orth.gegenbauer(4,a)
+        Ca5 = orth.gegenbauer(5,a)
+
+        assert_array_almost_equal(Ca0.c,array([1]),13)
+        assert_array_almost_equal(Ca1.c,array([2*a,0]),13)
+        assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13)
+        assert_array_almost_equal(Ca3.c,array([4*sc.poch(a,3),0,-6*a*(a+1),
+                                               0])/3.0,11)
+        assert_array_almost_equal(Ca4.c,array([4*sc.poch(a,4),0,-12*sc.poch(a,3),
+                                               0,3*a*(a+1)])/6.0,11)
+        assert_array_almost_equal(Ca5.c,array([4*sc.poch(a,5),0,-20*sc.poch(a,4),
+                                               0,15*sc.poch(a,3),0])/15.0,11)
+
+
+class TestHermite:
+    def test_hermite(self):
+        H0 = orth.hermite(0)
+        H1 = orth.hermite(1)
+        H2 = orth.hermite(2)
+        H3 = orth.hermite(3)
+        H4 = orth.hermite(4)
+        H5 = orth.hermite(5)
+        assert_array_almost_equal(H0.c,[1],13)
+        assert_array_almost_equal(H1.c,[2,0],13)
+        assert_array_almost_equal(H2.c,[4,0,-2],13)
+        assert_array_almost_equal(H3.c,[8,0,-12,0],13)
+        assert_array_almost_equal(H4.c,[16,0,-48,0,12],12)
+        assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12)
+
+    def test_hermitenorm(self):
+        # He_n(x) = 2**(-n/2) H_n(x/sqrt(2))
+        psub = np.poly1d([1.0/sqrt(2),0])
+        H0 = orth.hermitenorm(0)
+        H1 = orth.hermitenorm(1)
+        H2 = orth.hermitenorm(2)
+        H3 = orth.hermitenorm(3)
+        H4 = orth.hermitenorm(4)
+        H5 = orth.hermitenorm(5)
+        he0 = orth.hermite(0)(psub)
+        he1 = orth.hermite(1)(psub) / sqrt(2)
+        he2 = orth.hermite(2)(psub) / 2.0
+        he3 = orth.hermite(3)(psub) / (2*sqrt(2))
+        he4 = orth.hermite(4)(psub) / 4.0
+        he5 = orth.hermite(5)(psub) / (4.0*sqrt(2))
+
+        assert_array_almost_equal(H0.c,he0.c,13)
+        assert_array_almost_equal(H1.c,he1.c,13)
+        assert_array_almost_equal(H2.c,he2.c,13)
+        assert_array_almost_equal(H3.c,he3.c,13)
+        assert_array_almost_equal(H4.c,he4.c,13)
+        assert_array_almost_equal(H5.c,he5.c,13)
+
+
+class _test_sh_legendre:
+
+    def test_sh_legendre(self):
+        # P*_n(x) = P_n(2x-1)
+        psub = np.poly1d([2,-1])
+        Ps0 = orth.sh_legendre(0)
+        Ps1 = orth.sh_legendre(1)
+        Ps2 = orth.sh_legendre(2)
+        Ps3 = orth.sh_legendre(3)
+        Ps4 = orth.sh_legendre(4)
+        Ps5 = orth.sh_legendre(5)
+        pse0 = orth.legendre(0)(psub)
+        pse1 = orth.legendre(1)(psub)
+        pse2 = orth.legendre(2)(psub)
+        pse3 = orth.legendre(3)(psub)
+        pse4 = orth.legendre(4)(psub)
+        pse5 = orth.legendre(5)(psub)
+        assert_array_almost_equal(Ps0.c,pse0.c,13)
+        assert_array_almost_equal(Ps1.c,pse1.c,13)
+        assert_array_almost_equal(Ps2.c,pse2.c,13)
+        assert_array_almost_equal(Ps3.c,pse3.c,13)
+        assert_array_almost_equal(Ps4.c,pse4.c,12)
+        assert_array_almost_equal(Ps5.c,pse5.c,12)
+
+
+class _test_sh_chebyt:
+
+    def test_sh_chebyt(self):
+        # T*_n(x) = T_n(2x-1)
+        psub = np.poly1d([2,-1])
+        Ts0 = orth.sh_chebyt(0)
+        Ts1 = orth.sh_chebyt(1)
+        Ts2 = orth.sh_chebyt(2)
+        Ts3 = orth.sh_chebyt(3)
+        Ts4 = orth.sh_chebyt(4)
+        Ts5 = orth.sh_chebyt(5)
+        tse0 = orth.chebyt(0)(psub)
+        tse1 = orth.chebyt(1)(psub)
+        tse2 = orth.chebyt(2)(psub)
+        tse3 = orth.chebyt(3)(psub)
+        tse4 = orth.chebyt(4)(psub)
+        tse5 = orth.chebyt(5)(psub)
+        assert_array_almost_equal(Ts0.c,tse0.c,13)
+        assert_array_almost_equal(Ts1.c,tse1.c,13)
+        assert_array_almost_equal(Ts2.c,tse2.c,13)
+        assert_array_almost_equal(Ts3.c,tse3.c,13)
+        assert_array_almost_equal(Ts4.c,tse4.c,12)
+        assert_array_almost_equal(Ts5.c,tse5.c,12)
+
+
+class _test_sh_chebyu:
+
+    def test_sh_chebyu(self):
+        # U*_n(x) = U_n(2x-1)
+        psub = np.poly1d([2,-1])
+        Us0 = orth.sh_chebyu(0)
+        Us1 = orth.sh_chebyu(1)
+        Us2 = orth.sh_chebyu(2)
+        Us3 = orth.sh_chebyu(3)
+        Us4 = orth.sh_chebyu(4)
+        Us5 = orth.sh_chebyu(5)
+        use0 = orth.chebyu(0)(psub)
+        use1 = orth.chebyu(1)(psub)
+        use2 = orth.chebyu(2)(psub)
+        use3 = orth.chebyu(3)(psub)
+        use4 = orth.chebyu(4)(psub)
+        use5 = orth.chebyu(5)(psub)
+        assert_array_almost_equal(Us0.c,use0.c,13)
+        assert_array_almost_equal(Us1.c,use1.c,13)
+        assert_array_almost_equal(Us2.c,use2.c,13)
+        assert_array_almost_equal(Us3.c,use3.c,13)
+        assert_array_almost_equal(Us4.c,use4.c,12)
+        assert_array_almost_equal(Us5.c,use5.c,11)
+
+
+class _test_sh_jacobi:
+    def test_sh_jacobi(self):
+        # G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
+        conv = lambda n,p: gamma(n+1)*gamma(n+p)/gamma(2*n+p)
+        psub = np.poly1d([2,-1])
+        q = 4 * np.random.random()
+        p = q-1 + 2*np.random.random()
+        # print("shifted jacobi p,q = ", p, q)
+        G0 = orth.sh_jacobi(0,p,q)
+        G1 = orth.sh_jacobi(1,p,q)
+        G2 = orth.sh_jacobi(2,p,q)
+        G3 = orth.sh_jacobi(3,p,q)
+        G4 = orth.sh_jacobi(4,p,q)
+        G5 = orth.sh_jacobi(5,p,q)
+        ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p)
+        ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p)
+        ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p)
+        ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p)
+        ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p)
+        ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p)
+
+        assert_array_almost_equal(G0.c,ge0.c,13)
+        assert_array_almost_equal(G1.c,ge1.c,13)
+        assert_array_almost_equal(G2.c,ge2.c,13)
+        assert_array_almost_equal(G3.c,ge3.c,13)
+        assert_array_almost_equal(G4.c,ge4.c,13)
+        assert_array_almost_equal(G5.c,ge5.c,13)
+
+
+class TestCall:
+    def test_call(self):
+        poly = []
+        for n in range(5):
+            poly.extend([x.strip() for x in
+                ("""
+                orth.jacobi(%(n)d,0.3,0.9)
+                orth.sh_jacobi(%(n)d,0.3,0.9)
+                orth.genlaguerre(%(n)d,0.3)
+                orth.laguerre(%(n)d)
+                orth.hermite(%(n)d)
+                orth.hermitenorm(%(n)d)
+                orth.gegenbauer(%(n)d,0.3)
+                orth.chebyt(%(n)d)
+                orth.chebyu(%(n)d)
+                orth.chebyc(%(n)d)
+                orth.chebys(%(n)d)
+                orth.sh_chebyt(%(n)d)
+                orth.sh_chebyu(%(n)d)
+                orth.legendre(%(n)d)
+                orth.sh_legendre(%(n)d)
+                """ % dict(n=n)).split()
+            ])
+        with np.errstate(all='ignore'):
+            for pstr in poly:
+                p = eval(pstr)
+                assert_almost_equal(p(0.315), np.poly1d(p.coef)(0.315),
+                                    err_msg=pstr)
+
+
+class TestGenlaguerre:
+    def test_regression(self):
+        assert_equal(orth.genlaguerre(1, 1, monic=False)(0), 2.)
+        assert_equal(orth.genlaguerre(1, 1, monic=True)(0), -2.)
+        assert_equal(orth.genlaguerre(1, 1, monic=False), np.poly1d([-1, 2]))
+        assert_equal(orth.genlaguerre(1, 1, monic=True), np.poly1d([1, -2]))
+
+
+def verify_gauss_quad(root_func, eval_func, weight_func, a, b, N,
+                      rtol=1e-15, atol=5e-14):
+    # this test is copied from numpy's TestGauss in test_hermite.py
+    x, w, mu = root_func(N, True)
+
+    n = np.arange(N)
+    v = eval_func(n[:,np.newaxis], x)
+    vv = np.dot(v*w, v.T)
+    vd = 1 / np.sqrt(vv.diagonal())
+    vv = vd[:, np.newaxis] * vv * vd
+    assert_allclose(vv, np.eye(N), rtol, atol)
+
+    # check that the integral of 1 is correct
+    assert_allclose(w.sum(), mu, rtol, atol)
+
+    # compare the results of integrating a function with quad.
+    f = lambda x: x**3 - 3*x**2 + x - 2
+    resI = integrate.quad(lambda x: f(x)*weight_func(x), a, b)
+    resG = np.vdot(f(x), w)
+    rtol = 1e-6 if 1e-6 < resI[1] else resI[1] * 10
+    assert_allclose(resI[0], resG, rtol=rtol)
+
+def test_roots_jacobi():
+    rf = lambda a, b: lambda n, mu: sc.roots_jacobi(n, a, b, mu)
+    ef = lambda a, b: lambda n, x: sc.eval_jacobi(n, a, b, x)
+    wf = lambda a, b: lambda x: (1 - x)**a * (1 + x)**b
+
+    vgq = verify_gauss_quad
+    vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., 5)
+    vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
+        25, atol=1e-12)
+    vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
+        100, atol=1e-11)
+
+    vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 5)
+    vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 25, atol=1.5e-13)
+    vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 100, atol=2e-12)
+
+    vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 5, atol=2e-13)
+    vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 25, atol=2e-13)
+    vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 100, atol=1e-12)
+
+    vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 5)
+    vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 25, atol=1e-13)
+    vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 100, atol=3e-13)
+
+    vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 5)
+    vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 25,
+        atol=1.1e-14)
+    vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1.,
+        100, atol=1e-13)
+
+    vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 5, atol=1e-13)
+    vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 25, atol=2e-13)
+    vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1.,
+        100, atol=1e-11)
+
+    vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 5, atol=2e-13)
+    vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 25, atol=1e-12)
+    vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 100, atol=1e-11)
+    vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 250, atol=1e-11)
+
+    vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 5,
+        atol=1e-12)
+    vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 25,
+        atol=1e-11)
+    vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 100,
+        atol=1e-10)
+
+    vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 5,
+        atol=1e-12)
+    vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 25,
+        atol=1e-11)
+    vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 100,
+        atol=1e-10)
+
+    vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 5,
+        atol=1e-12)
+    vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 25,
+        atol=1e-11)
+    vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 100,
+        atol=1e-10)
+
+    vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 5)
+    vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 25,
+        atol=1e-13)
+    vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 100,
+        atol=1e-13)
+
+    # when alpha == beta == 0, P_n^{a,b}(x) == P_n(x)
+    xj, wj = sc.roots_jacobi(6, 0.0, 0.0)
+    xl, wl = sc.roots_legendre(6)
+    assert_allclose(xj, xl, 1e-14, 1e-14)
+    assert_allclose(wj, wl, 1e-14, 1e-14)
+
+    # when alpha == beta != 0, P_n^{a,b}(x) == C_n^{alpha+0.5}(x)
+    xj, wj = sc.roots_jacobi(6, 4.0, 4.0)
+    xc, wc = sc.roots_gegenbauer(6, 4.5)
+    assert_allclose(xj, xc, 1e-14, 1e-14)
+    assert_allclose(wj, wc, 1e-14, 1e-14)
+
+    x, w = sc.roots_jacobi(5, 2, 3, False)
+    y, v, m = sc.roots_jacobi(5, 2, 3, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(wf(2,3), -1, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_jacobi, 0, 1, 1)
+    assert_raises(ValueError, sc.roots_jacobi, 3.3, 1, 1)
+    assert_raises(ValueError, sc.roots_jacobi, 3, -2, 1)
+    assert_raises(ValueError, sc.roots_jacobi, 3, 1, -2)
+    assert_raises(ValueError, sc.roots_jacobi, 3, -2, -2)
+
+def test_roots_sh_jacobi():
+    rf = lambda a, b: lambda n, mu: sc.roots_sh_jacobi(n, a, b, mu)
+    ef = lambda a, b: lambda n, x: sc.eval_sh_jacobi(n, a, b, x)
+    wf = lambda a, b: lambda x: (1. - x)**(a - b) * (x)**(b - 1.)
+
+    vgq = verify_gauss_quad
+    vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., 5)
+    vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
+        25, atol=1e-12)
+    vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
+        100, atol=1e-11)
+
+    vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 5)
+    vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 25, atol=1e-13)
+    vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 100, atol=1e-12)
+
+    vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 5)
+    vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 25, atol=1.5e-13)
+    vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 100, atol=2e-12)
+
+    vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 5)
+    vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 25, atol=1e-13)
+    vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 100, atol=1e-12)
+
+    vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 5)
+    vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 25)
+    vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1.,
+        100, atol=1e-13)
+
+    vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 5, atol=1e-12)
+    vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 25, atol=1e-11)
+    vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 100, atol=1e-10)
+
+    vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 5, atol=3.5e-14)
+    vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 25, atol=2e-13)
+    vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1.,
+        100, atol=1e-12)
+
+    x, w = sc.roots_sh_jacobi(5, 3, 2, False)
+    y, v, m = sc.roots_sh_jacobi(5, 3, 2, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(wf(3,2), 0, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_sh_jacobi, 0, 1, 1)
+    assert_raises(ValueError, sc.roots_sh_jacobi, 3.3, 1, 1)
+    assert_raises(ValueError, sc.roots_sh_jacobi, 3, 1, 2)    # p - q <= -1
+    assert_raises(ValueError, sc.roots_sh_jacobi, 3, 2, -1)   # q <= 0
+    assert_raises(ValueError, sc.roots_sh_jacobi, 3, -2, -1)  # both
+
+def test_roots_hermite():
+    rootf = sc.roots_hermite
+    evalf = sc.eval_hermite
+    weightf = orth.hermite(5).weight_func
+
+    verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
+    verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
+    verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
+
+    # Golub-Welsch branch
+    x, w = sc.roots_hermite(5, False)
+    y, v, m = sc.roots_hermite(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    # Asymptotic branch (switch over at n >= 150)
+    x, w = sc.roots_hermite(200, False)
+    y, v, m = sc.roots_hermite(200, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+    assert_allclose(sum(v), m, 1e-14, 1e-14)
+
+    assert_raises(ValueError, sc.roots_hermite, 0)
+    assert_raises(ValueError, sc.roots_hermite, 3.3)
+
+def test_roots_hermite_asy():
+    # Recursion for Hermite functions
+    def hermite_recursion(n, nodes):
+        H = np.zeros((n, nodes.size))
+        H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2)
+        if n > 1:
+            H[1,:] = sqrt(2.0) * nodes * H[0,:]
+            for k in range(2, n):
+                H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:]
+        return H
+
+    # This tests only the nodes
+    def test(N, rtol=1e-15, atol=1e-14):
+        x, w = orth._roots_hermite_asy(N)
+        H = hermite_recursion(N+1, x)
+        assert_allclose(H[-1,:], np.zeros(N), rtol, atol)
+        assert_allclose(sum(w), sqrt(np.pi), rtol, atol)
+
+    test(150, atol=1e-12)
+    test(151, atol=1e-12)
+    test(300, atol=1e-12)
+    test(301, atol=1e-12)
+    test(500, atol=1e-12)
+    test(501, atol=1e-12)
+    test(999, atol=1e-12)
+    test(1000, atol=1e-12)
+    test(2000, atol=1e-12)
+    test(5000, atol=1e-12)
+
+def test_roots_hermitenorm():
+    rootf = sc.roots_hermitenorm
+    evalf = sc.eval_hermitenorm
+    weightf = orth.hermitenorm(5).weight_func
+
+    verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
+    verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
+    verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
+
+    x, w = sc.roots_hermitenorm(5, False)
+    y, v, m = sc.roots_hermitenorm(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_hermitenorm, 0)
+    assert_raises(ValueError, sc.roots_hermitenorm, 3.3)
+
+def test_roots_gegenbauer():
+    rootf = lambda a: lambda n, mu: sc.roots_gegenbauer(n, a, mu)
+    evalf = lambda a: lambda n, x: sc.eval_gegenbauer(n, a, x)
+    weightf = lambda a: lambda x: (1 - x**2)**(a - 0.5)
+
+    vgq = verify_gauss_quad
+    vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 5)
+    vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 25, atol=1e-12)
+    vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 100, atol=1e-11)
+
+    vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 5)
+    vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 25, atol=1e-13)
+    vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 100, atol=1e-12)
+
+    vgq(rootf(1), evalf(1), weightf(1), -1., 1., 5)
+    vgq(rootf(1), evalf(1), weightf(1), -1., 1., 25, atol=1e-13)
+    vgq(rootf(1), evalf(1), weightf(1), -1., 1., 100, atol=1e-12)
+
+    vgq(rootf(10), evalf(10), weightf(10), -1., 1., 5)
+    vgq(rootf(10), evalf(10), weightf(10), -1., 1., 25, atol=1e-13)
+    vgq(rootf(10), evalf(10), weightf(10), -1., 1., 100, atol=1e-12)
+
+    vgq(rootf(50), evalf(50), weightf(50), -1., 1., 5, atol=1e-13)
+    vgq(rootf(50), evalf(50), weightf(50), -1., 1., 25, atol=1e-12)
+    vgq(rootf(50), evalf(50), weightf(50), -1., 1., 100, atol=1e-11)
+
+    # Alpha=170 is where the approximation used in roots_gegenbauer changes
+    vgq(rootf(170), evalf(170), weightf(170), -1., 1., 5, atol=1e-13)
+    vgq(rootf(170), evalf(170), weightf(170), -1., 1., 25, atol=1e-12)
+    vgq(rootf(170), evalf(170), weightf(170), -1., 1., 100, atol=1e-11)
+    vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 5, atol=1.25e-13)
+    vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 25, atol=1e-12)
+    vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 100, atol=1e-11)
+
+    # Test for failures, e.g. overflows, resulting from large alphas
+    vgq(rootf(238), evalf(238), weightf(238), -1., 1., 5, atol=1e-13)
+    vgq(rootf(238), evalf(238), weightf(238), -1., 1., 25, atol=1e-12)
+    vgq(rootf(238), evalf(238), weightf(238), -1., 1., 100, atol=1e-11)
+    vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 5, atol=1e-12)
+    vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 25, atol=1e-11)
+    vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 100, atol=1e-10)
+
+    # this is a special case that the old code supported.
+    # when alpha = 0, the gegenbauer polynomial is uniformly 0. but it goes
+    # to a scaled down copy of T_n(x) there.
+    vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 5)
+    vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 25)
+    vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 100, atol=1e-12)
+
+    x, w = sc.roots_gegenbauer(5, 2, False)
+    y, v, m = sc.roots_gegenbauer(5, 2, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf(2), -1, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_gegenbauer, 0, 2)
+    assert_raises(ValueError, sc.roots_gegenbauer, 3.3, 2)
+    assert_raises(ValueError, sc.roots_gegenbauer, 3, -.75)
+
+def test_roots_chebyt():
+    weightf = orth.chebyt(5).weight_func
+    verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 5)
+    verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 25)
+    verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 100, atol=1e-12)
+
+    x, w = sc.roots_chebyt(5, False)
+    y, v, m = sc.roots_chebyt(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, -1, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_chebyt, 0)
+    assert_raises(ValueError, sc.roots_chebyt, 3.3)
+
+def test_chebyt_symmetry():
+    x, w = sc.roots_chebyt(21)
+    pos, neg = x[:10], x[11:]
+    assert_equal(neg, -pos[::-1])
+    assert_equal(x[10], 0)
+
+def test_roots_chebyu():
+    weightf = orth.chebyu(5).weight_func
+    verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 5)
+    verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 25)
+    verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 100)
+
+    x, w = sc.roots_chebyu(5, False)
+    y, v, m = sc.roots_chebyu(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, -1, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_chebyu, 0)
+    assert_raises(ValueError, sc.roots_chebyu, 3.3)
+
+def test_roots_chebyc():
+    weightf = orth.chebyc(5).weight_func
+    verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 5)
+    verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 25)
+    verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 100, atol=1e-12)
+
+    x, w = sc.roots_chebyc(5, False)
+    y, v, m = sc.roots_chebyc(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, -2, 2)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_chebyc, 0)
+    assert_raises(ValueError, sc.roots_chebyc, 3.3)
+
+def test_roots_chebys():
+    weightf = orth.chebys(5).weight_func
+    verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 5)
+    verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 25)
+    verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 100)
+
+    x, w = sc.roots_chebys(5, False)
+    y, v, m = sc.roots_chebys(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, -2, 2)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_chebys, 0)
+    assert_raises(ValueError, sc.roots_chebys, 3.3)
+
+def test_roots_sh_chebyt():
+    weightf = orth.sh_chebyt(5).weight_func
+    verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 5)
+    verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 25)
+    verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1.,
+                      100, atol=1e-13)
+
+    x, w = sc.roots_sh_chebyt(5, False)
+    y, v, m = sc.roots_sh_chebyt(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, 0, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_sh_chebyt, 0)
+    assert_raises(ValueError, sc.roots_sh_chebyt, 3.3)
+
+def test_roots_sh_chebyu():
+    weightf = orth.sh_chebyu(5).weight_func
+    verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 5)
+    verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 25)
+    verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1.,
+                      100, atol=1e-13)
+
+    x, w = sc.roots_sh_chebyu(5, False)
+    y, v, m = sc.roots_sh_chebyu(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, 0, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_sh_chebyu, 0)
+    assert_raises(ValueError, sc.roots_sh_chebyu, 3.3)
+
+def test_roots_legendre():
+    weightf = orth.legendre(5).weight_func
+    verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1., 5)
+    verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1.,
+                      25, atol=1e-13)
+    verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1.,
+                      100, atol=1e-12)
+
+    x, w = sc.roots_legendre(5, False)
+    y, v, m = sc.roots_legendre(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, -1, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_legendre, 0)
+    assert_raises(ValueError, sc.roots_legendre, 3.3)
+
+def test_roots_sh_legendre():
+    weightf = orth.sh_legendre(5).weight_func
+    verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1., 5)
+    verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1.,
+                      25, atol=1e-13)
+    verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1.,
+                      100, atol=1e-12)
+
+    x, w = sc.roots_sh_legendre(5, False)
+    y, v, m = sc.roots_sh_legendre(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, 0, 1)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_sh_legendre, 0)
+    assert_raises(ValueError, sc.roots_sh_legendre, 3.3)
+
+def test_roots_laguerre():
+    weightf = orth.laguerre(5).weight_func
+    verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf, 5)
+    verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf,
+                      25, atol=1e-13)
+    verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf,
+                      100, atol=1e-12)
+
+    x, w = sc.roots_laguerre(5, False)
+    y, v, m = sc.roots_laguerre(5, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf, 0, np.inf)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_laguerre, 0)
+    assert_raises(ValueError, sc.roots_laguerre, 3.3)
+
+def test_roots_genlaguerre():
+    rootf = lambda a: lambda n, mu: sc.roots_genlaguerre(n, a, mu)
+    evalf = lambda a: lambda n, x: sc.eval_genlaguerre(n, a, x)
+    weightf = lambda a: lambda x: x**a * np.exp(-x)
+
+    vgq = verify_gauss_quad
+    vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 5)
+    vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 25, atol=1e-13)
+    vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 100, atol=1e-12)
+
+    vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 5)
+    vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 25, atol=1e-13)
+    vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 100, atol=1.6e-13)
+
+    vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 5)
+    vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 25, atol=1e-13)
+    vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 100, atol=1.03e-13)
+
+    vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 5)
+    vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 25, atol=1e-13)
+    vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 100, atol=1e-12)
+
+    vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 5)
+    vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 25, atol=1e-13)
+    vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 100, rtol=1e-14, atol=2e-13)
+
+    x, w = sc.roots_genlaguerre(5, 2, False)
+    y, v, m = sc.roots_genlaguerre(5, 2, True)
+    assert_allclose(x, y, 1e-14, 1e-14)
+    assert_allclose(w, v, 1e-14, 1e-14)
+
+    muI, muI_err = integrate.quad(weightf(2.), 0., np.inf)
+    assert_allclose(m, muI, rtol=muI_err)
+
+    assert_raises(ValueError, sc.roots_genlaguerre, 0, 2)
+    assert_raises(ValueError, sc.roots_genlaguerre, 3.3, 2)
+    assert_raises(ValueError, sc.roots_genlaguerre, 3, -1.1)
+
+
+def test_gh_6721():
+    # Regresssion test for gh_6721. This should not raise.
+    sc.chebyt(65)(0.2)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal_eval.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal_eval.py
new file mode 100644
index 00000000..5973171a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_orthogonal_eval.py
@@ -0,0 +1,268 @@
+import numpy as np
+from numpy.testing import assert_, assert_allclose
+import pytest
+
+from scipy.special import _ufuncs
+import scipy.special._orthogonal as orth
+from scipy.special._testutils import FuncData
+
+
+def test_eval_chebyt():
+    n = np.arange(0, 10000, 7)
+    x = 2*np.random.rand() - 1
+    v1 = np.cos(n*np.arccos(x))
+    v2 = _ufuncs.eval_chebyt(n, x)
+    assert_(np.allclose(v1, v2, rtol=1e-15))
+
+
+def test_eval_genlaguerre_restriction():
+    # check it returns nan for alpha <= -1
+    assert_(np.isnan(_ufuncs.eval_genlaguerre(0, -1, 0)))
+    assert_(np.isnan(_ufuncs.eval_genlaguerre(0.1, -1, 0)))
+
+
+def test_warnings():
+    # ticket 1334
+    with np.errstate(all='raise'):
+        # these should raise no fp warnings
+        _ufuncs.eval_legendre(1, 0)
+        _ufuncs.eval_laguerre(1, 1)
+        _ufuncs.eval_gegenbauer(1, 1, 0)
+
+
+class TestPolys:
+    """
+    Check that the eval_* functions agree with the constructed polynomials
+
+    """
+
+    def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10,
+                   nparam=10, nx=10, rtol=1e-8):
+        np.random.seed(1234)
+
+        dataset = []
+        for n in np.arange(nn):
+            params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
+            params = np.asarray(params).T
+            if not param_ranges:
+                params = [0]
+            for p in params:
+                if param_ranges:
+                    p = (n,) + tuple(p)
+                else:
+                    p = (n,)
+                x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
+                x[0] = x_range[0]  # always include domain start point
+                x[1] = x_range[1]  # always include domain end point
+                poly = np.poly1d(cls(*p).coef)
+                z = np.c_[np.tile(p, (nx,1)), x, poly(x)]
+                dataset.append(z)
+
+        dataset = np.concatenate(dataset, axis=0)
+
+        def polyfunc(*p):
+            p = (p[0].astype(int),) + p[1:]
+            return func(*p)
+
+        with np.errstate(all='raise'):
+            ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
+                          rtol=rtol)
+            ds.check()
+
+    def test_jacobi(self):
+        self.check_poly(_ufuncs.eval_jacobi, orth.jacobi,
+                        param_ranges=[(-0.99, 10), (-0.99, 10)],
+                        x_range=[-1, 1], rtol=1e-5)
+
+    def test_sh_jacobi(self):
+        self.check_poly(_ufuncs.eval_sh_jacobi, orth.sh_jacobi,
+                        param_ranges=[(1, 10), (0, 1)], x_range=[0, 1],
+                        rtol=1e-5)
+
+    def test_gegenbauer(self):
+        self.check_poly(_ufuncs.eval_gegenbauer, orth.gegenbauer,
+                        param_ranges=[(-0.499, 10)], x_range=[-1, 1],
+                        rtol=1e-7)
+
+    def test_chebyt(self):
+        self.check_poly(_ufuncs.eval_chebyt, orth.chebyt,
+                        param_ranges=[], x_range=[-1, 1])
+
+    def test_chebyu(self):
+        self.check_poly(_ufuncs.eval_chebyu, orth.chebyu,
+                        param_ranges=[], x_range=[-1, 1])
+
+    def test_chebys(self):
+        self.check_poly(_ufuncs.eval_chebys, orth.chebys,
+                        param_ranges=[], x_range=[-2, 2])
+
+    def test_chebyc(self):
+        self.check_poly(_ufuncs.eval_chebyc, orth.chebyc,
+                        param_ranges=[], x_range=[-2, 2])
+
+    def test_sh_chebyt(self):
+        with np.errstate(all='ignore'):
+            self.check_poly(_ufuncs.eval_sh_chebyt, orth.sh_chebyt,
+                            param_ranges=[], x_range=[0, 1])
+
+    def test_sh_chebyu(self):
+        self.check_poly(_ufuncs.eval_sh_chebyu, orth.sh_chebyu,
+                        param_ranges=[], x_range=[0, 1])
+
+    def test_legendre(self):
+        self.check_poly(_ufuncs.eval_legendre, orth.legendre,
+                        param_ranges=[], x_range=[-1, 1])
+
+    def test_sh_legendre(self):
+        with np.errstate(all='ignore'):
+            self.check_poly(_ufuncs.eval_sh_legendre, orth.sh_legendre,
+                            param_ranges=[], x_range=[0, 1])
+
+    def test_genlaguerre(self):
+        self.check_poly(_ufuncs.eval_genlaguerre, orth.genlaguerre,
+                        param_ranges=[(-0.99, 10)], x_range=[0, 100])
+
+    def test_laguerre(self):
+        self.check_poly(_ufuncs.eval_laguerre, orth.laguerre,
+                        param_ranges=[], x_range=[0, 100])
+
+    def test_hermite(self):
+        self.check_poly(_ufuncs.eval_hermite, orth.hermite,
+                        param_ranges=[], x_range=[-100, 100])
+
+    def test_hermitenorm(self):
+        self.check_poly(_ufuncs.eval_hermitenorm, orth.hermitenorm,
+                        param_ranges=[], x_range=[-100, 100])
+
+
+class TestRecurrence:
+    """
+    Check that the eval_* functions sig='ld->d' and 'dd->d' agree.
+
+    """
+
+    def check_poly(self, func, param_ranges=[], x_range=[], nn=10,
+                   nparam=10, nx=10, rtol=1e-8):
+        np.random.seed(1234)
+
+        dataset = []
+        for n in np.arange(nn):
+            params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
+            params = np.asarray(params).T
+            if not param_ranges:
+                params = [0]
+            for p in params:
+                if param_ranges:
+                    p = (n,) + tuple(p)
+                else:
+                    p = (n,)
+                x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
+                x[0] = x_range[0]  # always include domain start point
+                x[1] = x_range[1]  # always include domain end point
+                kw = dict(sig=(len(p)+1)*'d'+'->d')
+                z = np.c_[np.tile(p, (nx,1)), x, func(*(p + (x,)), **kw)]
+                dataset.append(z)
+
+        dataset = np.concatenate(dataset, axis=0)
+
+        def polyfunc(*p):
+            p = (p[0].astype(int),) + p[1:]
+            kw = dict(sig='l'+(len(p)-1)*'d'+'->d')
+            return func(*p, **kw)
+
+        with np.errstate(all='raise'):
+            ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
+                          rtol=rtol)
+            ds.check()
+
+    def test_jacobi(self):
+        self.check_poly(_ufuncs.eval_jacobi,
+                        param_ranges=[(-0.99, 10), (-0.99, 10)],
+                        x_range=[-1, 1])
+
+    def test_sh_jacobi(self):
+        self.check_poly(_ufuncs.eval_sh_jacobi,
+                        param_ranges=[(1, 10), (0, 1)], x_range=[0, 1])
+
+    def test_gegenbauer(self):
+        self.check_poly(_ufuncs.eval_gegenbauer,
+                        param_ranges=[(-0.499, 10)], x_range=[-1, 1])
+
+    def test_chebyt(self):
+        self.check_poly(_ufuncs.eval_chebyt,
+                        param_ranges=[], x_range=[-1, 1])
+
+    def test_chebyu(self):
+        self.check_poly(_ufuncs.eval_chebyu,
+                        param_ranges=[], x_range=[-1, 1])
+
+    def test_chebys(self):
+        self.check_poly(_ufuncs.eval_chebys,
+                        param_ranges=[], x_range=[-2, 2])
+
+    def test_chebyc(self):
+        self.check_poly(_ufuncs.eval_chebyc,
+                        param_ranges=[], x_range=[-2, 2])
+
+    def test_sh_chebyt(self):
+        self.check_poly(_ufuncs.eval_sh_chebyt,
+                        param_ranges=[], x_range=[0, 1])
+
+    def test_sh_chebyu(self):
+        self.check_poly(_ufuncs.eval_sh_chebyu,
+                        param_ranges=[], x_range=[0, 1])
+
+    def test_legendre(self):
+        self.check_poly(_ufuncs.eval_legendre,
+                        param_ranges=[], x_range=[-1, 1])
+
+    def test_sh_legendre(self):
+        self.check_poly(_ufuncs.eval_sh_legendre,
+                        param_ranges=[], x_range=[0, 1])
+
+    def test_genlaguerre(self):
+        self.check_poly(_ufuncs.eval_genlaguerre,
+                        param_ranges=[(-0.99, 10)], x_range=[0, 100])
+
+    def test_laguerre(self):
+        self.check_poly(_ufuncs.eval_laguerre,
+                        param_ranges=[], x_range=[0, 100])
+
+    def test_hermite(self):
+        v = _ufuncs.eval_hermite(70, 1.0)
+        a = -1.457076485701412e60
+        assert_allclose(v, a)
+
+
+def test_hermite_domain():
+    # Regression test for gh-11091.
+    assert np.isnan(_ufuncs.eval_hermite(-1, 1.0))
+    assert np.isnan(_ufuncs.eval_hermitenorm(-1, 1.0))
+
+
+@pytest.mark.parametrize("n", [0, 1, 2])
+@pytest.mark.parametrize("x", [0, 1, np.nan])
+def test_hermite_nan(n, x):
+    # Regression test for gh-11369.
+    assert np.isnan(_ufuncs.eval_hermite(n, x)) == np.any(np.isnan([n, x]))
+    assert np.isnan(_ufuncs.eval_hermitenorm(n, x)) == np.any(np.isnan([n, x]))
+
+
+@pytest.mark.parametrize('n', [0, 1, 2, 3.2])
+@pytest.mark.parametrize('alpha', [1, np.nan])
+@pytest.mark.parametrize('x', [2, np.nan])
+def test_genlaguerre_nan(n, alpha, x):
+    # Regression test for gh-11361.
+    nan_laguerre = np.isnan(_ufuncs.eval_genlaguerre(n, alpha, x))
+    nan_arg = np.any(np.isnan([n, alpha, x]))
+    assert nan_laguerre == nan_arg
+
+
+@pytest.mark.parametrize('n', [0, 1, 2, 3.2])
+@pytest.mark.parametrize('alpha', [0.0, 1, np.nan])
+@pytest.mark.parametrize('x', [1e-6, 2, np.nan])
+def test_gegenbauer_nan(n, alpha, x):
+    # Regression test for gh-11370.
+    nan_gegenbauer = np.isnan(_ufuncs.eval_gegenbauer(n, alpha, x))
+    nan_arg = np.any(np.isnan([n, alpha, x]))
+    assert nan_gegenbauer == nan_arg
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_owens_t.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_owens_t.py
new file mode 100644
index 00000000..8d15aead
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_owens_t.py
@@ -0,0 +1,53 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+
+import scipy.special as sc
+
+
+def test_symmetries():
+    np.random.seed(1234)
+    a, h = np.random.rand(100), np.random.rand(100)
+    assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a))
+    assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a))
+
+
+def test_special_cases():
+    assert_equal(sc.owens_t(5, 0), 0)
+    assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi,
+                    rtol=5e-14)
+    # Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the
+    # standard normal distribution
+    assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07,
+                    rtol=5e-14)
+
+
+def test_nans():
+    assert_equal(sc.owens_t(20, np.nan), np.nan)
+    assert_equal(sc.owens_t(np.nan, 20), np.nan)
+    assert_equal(sc.owens_t(np.nan, np.nan), np.nan)
+
+
+def test_infs():
+    h, a = 0, np.inf
+    # T(0, a) = 1/2π * arctan(a)
+    res = 1/(2*np.pi) * np.arctan(a)
+    assert_allclose(sc.owens_t(h, a), res, rtol=5e-14)
+    assert_allclose(sc.owens_t(h, -a), -res, rtol=5e-14)
+
+    h = 1
+    # Refer Owens T function definition in Wikipedia
+    # https://en.wikipedia.org/wiki/Owen%27s_T_function
+    # Value approximated through Numerical Integration
+    # using scipy.integrate.quad
+    # quad(lambda x: 1/(2*pi)*(exp(-0.5*(1*1)*(1+x*x))/(1+x*x)), 0, inf)
+    res = 0.07932762696572854
+    assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14)
+    assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14)
+
+    assert_equal(sc.owens_t(np.inf, 1), 0)
+    assert_equal(sc.owens_t(-np.inf, 1), 0)
+
+    assert_equal(sc.owens_t(np.inf, np.inf), 0)
+    assert_equal(sc.owens_t(-np.inf, np.inf), 0)
+    assert_equal(sc.owens_t(np.inf, -np.inf), -0.0)
+    assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_pcf.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_pcf.py
new file mode 100644
index 00000000..a8c42aa6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_pcf.py
@@ -0,0 +1,24 @@
+"""Tests for parabolic cylinder functions.
+
+"""
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal
+import scipy.special as sc
+
+
+def test_pbwa_segfault():
+    # Regression test for https://github.com/scipy/scipy/issues/6208.
+    #
+    # Data generated by mpmath.
+    #
+    w = 1.02276567211316867161
+    wp = -0.48887053372346189882
+    assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0)
+
+
+def test_pbwa_nan():
+    # Check that NaN's are returned outside of the range in which the
+    # implementation is accurate.
+    pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)]
+    for p in pts:
+        assert_equal(sc.pbwa(*p), (np.nan, np.nan))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_pdtr.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_pdtr.py
new file mode 100644
index 00000000..122e6009
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_pdtr.py
@@ -0,0 +1,48 @@
+import numpy as np
+import scipy.special as sc
+from numpy.testing import assert_almost_equal, assert_array_equal
+
+
+class TestPdtr:
+    def test(self):
+        val = sc.pdtr(0, 1)
+        assert_almost_equal(val, np.exp(-1))
+
+    def test_m_zero(self):
+        val = sc.pdtr([0, 1, 2], 0)
+        assert_array_equal(val, [1, 1, 1])
+
+    def test_rounding(self):
+        double_val = sc.pdtr([0.1, 1.1, 2.1], 1.0)
+        int_val = sc.pdtr([0, 1, 2], 1.0)
+        assert_array_equal(double_val, int_val)
+
+    def test_inf(self):
+        val = sc.pdtr(np.inf, 1.0)
+        assert_almost_equal(val, 1.0)
+
+    def test_domain(self):
+        val = sc.pdtr(-1.1, 1.0)
+        assert np.isnan(val)
+
+class TestPdtrc:
+    def test_value(self):
+        val = sc.pdtrc(0, 1)
+        assert_almost_equal(val, 1 - np.exp(-1))
+
+    def test_m_zero(self):
+        val = sc.pdtrc([0, 1, 2], 0.0)
+        assert_array_equal(val, [0, 0, 0])
+
+    def test_rounding(self):
+        double_val = sc.pdtrc([0.1, 1.1, 2.1], 1.0)
+        int_val = sc.pdtrc([0, 1, 2], 1.0)
+        assert_array_equal(double_val, int_val)
+
+    def test_inf(self):
+        val = sc.pdtrc(np.inf, 1.0)
+        assert_almost_equal(val, 0.0)
+
+    def test_domain(self):
+        val = sc.pdtrc(-1.1, 1.0)
+        assert np.isnan(val)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_powm1.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_powm1.py
new file mode 100644
index 00000000..3d809963
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_powm1.py
@@ -0,0 +1,65 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose
+from scipy.special import powm1
+
+
+# Expected values were computed with mpmath, e.g.
+#
+#   >>> import mpmath
+#   >>> mpmath.np.dps = 200
+#   >>> print(float(mpmath.powm1(2.0, 1e-7))
+#   6.931472045825965e-08
+#
+powm1_test_cases = [
+    (1.25, 0.75, 0.18217701125396976, 1e-15),
+    (2.0, 1e-7, 6.931472045825965e-08, 1e-15),
+    (25.0, 5e-11, 1.6094379125636148e-10, 1e-15),
+    (0.99996, 0.75, -3.0000150002530058e-05, 1e-15),
+    (0.9999999999990905, 20, -1.81898940353014e-11, 1e-15),
+    (-1.25, 751.0, -6.017550852453444e+72, 2e-15)
+]
+
+
+@pytest.mark.parametrize('x, y, expected, rtol', powm1_test_cases)
+def test_powm1(x, y, expected, rtol):
+    p = powm1(x, y)
+    assert_allclose(p, expected, rtol=rtol)
+
+
+@pytest.mark.parametrize('x, y, expected',
+                         [(0.0, 0.0, 0.0),
+                          (0.0, -1.5, np.inf),
+                          (0.0, 1.75, -1.0),
+                          (-1.5, 2.0, 1.25),
+                          (-1.5, 3.0, -4.375),
+                          (np.nan, 0.0, 0.0),
+                          (1.0, np.nan, 0.0),
+                          (1.0, np.inf, 0.0),
+                          (1.0, -np.inf, 0.0),
+                          (np.inf, 7.5, np.inf),
+                          (np.inf, -7.5, -1.0),
+                          (3.25, np.inf, np.inf),
+                          (np.inf, np.inf, np.inf),
+                          (np.inf, -np.inf, -1.0),
+                          (np.inf, 0.0, 0.0),
+                          (-np.inf, 0.0, 0.0),
+                          (-np.inf, 2.0, np.inf),
+                          (-np.inf, 3.0, -np.inf),
+                          (-1.0, float(2**53 - 1), -2.0)])
+def test_powm1_exact_cases(x, y, expected):
+    # Test cases where we have an exact expected value.
+    p = powm1(x, y)
+    assert p == expected
+
+
+@pytest.mark.parametrize('x, y',
+                         [(-1.25, 751.03),
+                          (-1.25, np.inf),
+                          (np.nan, np.nan),
+                          (-np.inf, -np.inf),
+                          (-np.inf, 2.5)])
+def test_powm1_return_nan(x, y):
+    # Test cases where the expected return value is nan.
+    p = powm1(x, y)
+    assert np.isnan(p)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_expn_asy.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_expn_asy.py
new file mode 100644
index 00000000..7b6c6cba
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_expn_asy.py
@@ -0,0 +1,24 @@
+from numpy.testing import assert_equal
+
+from scipy.special._testutils import check_version, MissingModule
+from scipy.special._precompute.expn_asy import generate_A
+
+try:
+    import sympy
+    from sympy import Poly
+except ImportError:
+    sympy = MissingModule("sympy")
+
+
+@check_version(sympy, "1.0")
+def test_generate_A():
+    # Data from DLMF 8.20.5
+    x = sympy.symbols('x')
+    Astd = [Poly(1, x),
+            Poly(1, x),
+            Poly(1 - 2*x),
+            Poly(1 - 8*x + 6*x**2)]
+    Ares = generate_A(len(Astd))
+
+    for p, q in zip(Astd, Ares):
+        assert_equal(p, q)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_gammainc.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_gammainc.py
new file mode 100644
index 00000000..17bb6661
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_gammainc.py
@@ -0,0 +1,109 @@
+import numpy as np  # np is actually used, in the decorators below.
+import pytest
+
+from scipy.special._testutils import MissingModule, check_version
+from scipy.special._mptestutils import (
+    Arg, IntArg, mp_assert_allclose, assert_mpmath_equal)
+from scipy.special._precompute.gammainc_asy import (
+    compute_g, compute_alpha, compute_d)
+from scipy.special._precompute.gammainc_data import gammainc, gammaincc
+
+try:
+    import sympy
+except ImportError:
+    sympy = MissingModule('sympy')
+
+try:
+    import mpmath as mp
+except ImportError:
+    mp = MissingModule('mpmath')
+
+
+@check_version(mp, '0.19')
+def test_g():
+    # Test data for the g_k. See DLMF 5.11.4.
+    with mp.workdps(30):
+        g = [mp.mpf(1), mp.mpf(1)/12, mp.mpf(1)/288,
+             -mp.mpf(139)/51840, -mp.mpf(571)/2488320,
+             mp.mpf(163879)/209018880, mp.mpf(5246819)/75246796800]
+        mp_assert_allclose(compute_g(7), g)
+
+
+@pytest.mark.slow
+@check_version(mp, '0.19')
+@check_version(sympy, '0.7')
+@pytest.mark.xfail_on_32bit("rtol only 2e-11, see gh-6938")
+def test_alpha():
+    # Test data for the alpha_k. See DLMF 8.12.14.
+    with mp.workdps(30):
+        alpha = [mp.mpf(0), mp.mpf(1), mp.mpf(1)/3, mp.mpf(1)/36,
+                 -mp.mpf(1)/270, mp.mpf(1)/4320, mp.mpf(1)/17010,
+                 -mp.mpf(139)/5443200, mp.mpf(1)/204120]
+        mp_assert_allclose(compute_alpha(9), alpha)
+
+
+@pytest.mark.xslow
+@check_version(mp, '0.19')
+@check_version(sympy, '0.7')
+def test_d():
+    # Compare the d_{k, n} to the results in appendix F of [1].
+    #
+    # Sources
+    # -------
+    # [1] DiDonato and Morris, Computation of the Incomplete Gamma
+    #     Function Ratios and their Inverse, ACM Transactions on
+    #     Mathematical Software, 1986.
+
+    with mp.workdps(50):
+        dataset = [(0, 0, -mp.mpf('0.333333333333333333333333333333')),
+                   (0, 12, mp.mpf('0.102618097842403080425739573227e-7')),
+                   (1, 0, -mp.mpf('0.185185185185185185185185185185e-2')),
+                   (1, 12, mp.mpf('0.119516285997781473243076536700e-7')),
+                   (2, 0, mp.mpf('0.413359788359788359788359788360e-2')),
+                   (2, 12, -mp.mpf('0.140925299108675210532930244154e-7')),
+                   (3, 0, mp.mpf('0.649434156378600823045267489712e-3')),
+                   (3, 12, -mp.mpf('0.191111684859736540606728140873e-7')),
+                   (4, 0, -mp.mpf('0.861888290916711698604702719929e-3')),
+                   (4, 12, mp.mpf('0.288658297427087836297341274604e-7')),
+                   (5, 0, -mp.mpf('0.336798553366358150308767592718e-3')),
+                   (5, 12, mp.mpf('0.482409670378941807563762631739e-7')),
+                   (6, 0, mp.mpf('0.531307936463992223165748542978e-3')),
+                   (6, 12, -mp.mpf('0.882860074633048352505085243179e-7')),
+                   (7, 0, mp.mpf('0.344367606892377671254279625109e-3')),
+                   (7, 12, -mp.mpf('0.175629733590604619378669693914e-6')),
+                   (8, 0, -mp.mpf('0.652623918595309418922034919727e-3')),
+                   (8, 12, mp.mpf('0.377358774161109793380344937299e-6')),
+                   (9, 0, -mp.mpf('0.596761290192746250124390067179e-3')),
+                   (9, 12, mp.mpf('0.870823417786464116761231237189e-6'))]
+        d = compute_d(10, 13)
+        res = [d[k][n] for k, n, std in dataset]
+        std = [x[2] for x in dataset]
+        mp_assert_allclose(res, std)
+
+
+@check_version(mp, '0.19')
+def test_gammainc():
+    # Quick check that the gammainc in
+    # special._precompute.gammainc_data agrees with mpmath's
+    # gammainc.
+    assert_mpmath_equal(gammainc,
+                        lambda a, x: mp.gammainc(a, b=x, regularized=True),
+                        [Arg(0, 100, inclusive_a=False), Arg(0, 100)],
+                        nan_ok=False, rtol=1e-17, n=50, dps=50)
+
+
+@pytest.mark.xslow
+@check_version(mp, '0.19')
+def test_gammaincc():
+    # Check that the gammaincc in special._precompute.gammainc_data
+    # agrees with mpmath's gammainc.
+    assert_mpmath_equal(lambda a, x: gammaincc(a, x, dps=1000),
+                        lambda a, x: mp.gammainc(a, a=x, regularized=True),
+                        [Arg(20, 100), Arg(20, 100)],
+                        nan_ok=False, rtol=1e-17, n=50, dps=1000)
+
+    # Test the fast integer path
+    assert_mpmath_equal(gammaincc,
+                        lambda a, x: mp.gammainc(a, a=x, regularized=True),
+                        [IntArg(1, 100), Arg(0, 100)],
+                        nan_ok=False, rtol=1e-17, n=50, dps=50)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_utils.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_utils.py
new file mode 100644
index 00000000..89616b92
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_precompute_utils.py
@@ -0,0 +1,36 @@
+import pytest
+
+from scipy.special._testutils import MissingModule, check_version
+from scipy.special._mptestutils import mp_assert_allclose
+from scipy.special._precompute.utils import lagrange_inversion
+
+try:
+    import sympy
+except ImportError:
+    sympy = MissingModule('sympy')
+
+try:
+    import mpmath as mp
+except ImportError:
+    mp = MissingModule('mpmath')
+
+
+@pytest.mark.slow
+@check_version(sympy, '0.7')
+@check_version(mp, '0.19')
+class TestInversion:
+    @pytest.mark.xfail_on_32bit("rtol only 2e-9, see gh-6938")
+    def test_log(self):
+        with mp.workdps(30):
+            logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10)
+            expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10)
+            invlogcoeffs = lagrange_inversion(logcoeffs)
+            mp_assert_allclose(invlogcoeffs, expcoeffs)
+
+    @pytest.mark.xfail_on_32bit("rtol only 1e-15, see gh-6938")
+    def test_sin(self):
+        with mp.workdps(30):
+            sincoeffs = mp.taylor(mp.sin, 0, 10)
+            asincoeffs = mp.taylor(mp.asin, 0, 10)
+            invsincoeffs = lagrange_inversion(sincoeffs)
+            mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_round.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_round.py
new file mode 100644
index 00000000..ec27e7ee
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_round.py
@@ -0,0 +1,16 @@
+import numpy as np
+import pytest
+
+from scipy.special import _test_internal
+
+
+@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()")
+def test_add_round_up():
+    np.random.seed(1234)
+    _test_internal.test_add_round(10**5, 'up')
+
+
+@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()")
+def test_add_round_down():
+    np.random.seed(1234)
+    _test_internal.test_add_round(10**5, 'down')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sf_error.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sf_error.py
new file mode 100644
index 00000000..371cdec4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sf_error.py
@@ -0,0 +1,122 @@
+import sys
+import warnings
+
+from numpy.testing import assert_, assert_equal, IS_PYPY
+import pytest
+from pytest import raises as assert_raises
+
+import scipy.special as sc
+from scipy.special._ufuncs import _sf_error_test_function
+
+_sf_error_code_map = {
+    # skip 'ok'
+    'singular': 1,
+    'underflow': 2,
+    'overflow': 3,
+    'slow': 4,
+    'loss': 5,
+    'no_result': 6,
+    'domain': 7,
+    'arg': 8,
+    'other': 9
+}
+
+_sf_error_actions = [
+    'ignore',
+    'warn',
+    'raise'
+]
+
+
+def _check_action(fun, args, action):
+    if action == 'warn':
+        with pytest.warns(sc.SpecialFunctionWarning):
+            fun(*args)
+    elif action == 'raise':
+        with assert_raises(sc.SpecialFunctionError):
+            fun(*args)
+    else:
+        # action == 'ignore', make sure there are no warnings/exceptions
+        with warnings.catch_warnings():
+            warnings.simplefilter("error")
+            fun(*args)
+
+
+def test_geterr():
+    err = sc.geterr()
+    for key, value in err.items():
+        assert_(key in _sf_error_code_map)
+        assert_(value in _sf_error_actions)
+
+
+def test_seterr():
+    entry_err = sc.geterr()
+    try:
+        for category, error_code in _sf_error_code_map.items():
+            for action in _sf_error_actions:
+                geterr_olderr = sc.geterr()
+                seterr_olderr = sc.seterr(**{category: action})
+                assert_(geterr_olderr == seterr_olderr)
+                newerr = sc.geterr()
+                assert_(newerr[category] == action)
+                geterr_olderr.pop(category)
+                newerr.pop(category)
+                assert_(geterr_olderr == newerr)
+                _check_action(_sf_error_test_function, (error_code,), action)
+    finally:
+        sc.seterr(**entry_err)
+
+
+@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
+def test_sf_error_special_refcount():
+    # Regression test for gh-16233.
+    # Check that the reference count of scipy.special is not increased
+    # when a SpecialFunctionError is raised.
+    refcount_before = sys.getrefcount(sc)
+    with sc.errstate(all='raise'):
+        with pytest.raises(sc.SpecialFunctionError, match='domain error'):
+            sc.ndtri(2.0)
+    refcount_after = sys.getrefcount(sc)
+    assert refcount_after == refcount_before
+
+
+def test_errstate_pyx_basic():
+    olderr = sc.geterr()
+    with sc.errstate(singular='raise'):
+        with assert_raises(sc.SpecialFunctionError):
+            sc.loggamma(0)
+    assert_equal(olderr, sc.geterr())
+
+
+def test_errstate_c_basic():
+    olderr = sc.geterr()
+    with sc.errstate(domain='raise'):
+        with assert_raises(sc.SpecialFunctionError):
+            sc.spence(-1)
+    assert_equal(olderr, sc.geterr())
+
+
+def test_errstate_cpp_basic():
+    olderr = sc.geterr()
+    with sc.errstate(underflow='raise'):
+        with assert_raises(sc.SpecialFunctionError):
+            sc.wrightomega(-1000)
+    assert_equal(olderr, sc.geterr())
+
+
+def test_errstate():
+    for category, error_code in _sf_error_code_map.items():
+        for action in _sf_error_actions:
+            olderr = sc.geterr()
+            with sc.errstate(**{category: action}):
+                _check_action(_sf_error_test_function, (error_code,), action)
+            assert_equal(olderr, sc.geterr())
+
+
+def test_errstate_all_but_one():
+    olderr = sc.geterr()
+    with sc.errstate(all='raise', singular='ignore'):
+        sc.gammaln(0)
+        with assert_raises(sc.SpecialFunctionError):
+            sc.spence(-1.0)
+    assert_equal(olderr, sc.geterr())
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sici.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sici.py
new file mode 100644
index 00000000..d33c1795
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sici.py
@@ -0,0 +1,36 @@
+import numpy as np
+
+import scipy.special as sc
+from scipy.special._testutils import FuncData
+
+
+def test_sici_consistency():
+    # Make sure the implementation of sici for real arguments agrees
+    # with the implementation of sici for complex arguments.
+
+    # On the negative real axis Cephes drops the imaginary part in ci
+    def sici(x):
+        si, ci = sc.sici(x + 0j)
+        return si.real, ci.real
+    
+    x = np.r_[-np.logspace(8, -30, 200), 0, np.logspace(-30, 8, 200)]
+    si, ci = sc.sici(x)
+    dataset = np.column_stack((x, si, ci))
+    FuncData(sici, dataset, 0, (1, 2), rtol=1e-12).check()
+
+
+def test_shichi_consistency():
+    # Make sure the implementation of shichi for real arguments agrees
+    # with the implementation of shichi for complex arguments.
+
+    # On the negative real axis Cephes drops the imaginary part in chi
+    def shichi(x):
+        shi, chi = sc.shichi(x + 0j)
+        return shi.real, chi.real
+
+    # Overflow happens quickly, so limit range
+    x = np.r_[-np.logspace(np.log10(700), -30, 200), 0,
+              np.logspace(-30, np.log10(700), 200)]
+    shi, chi = sc.shichi(x)
+    dataset = np.column_stack((x, shi, chi))
+    FuncData(shichi, dataset, 0, (1, 2), rtol=1e-14).check()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spence.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spence.py
new file mode 100644
index 00000000..fbb26ac2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spence.py
@@ -0,0 +1,32 @@
+import numpy as np
+from numpy import sqrt, log, pi
+from scipy.special._testutils import FuncData
+from scipy.special import spence
+
+
+def test_consistency():
+    # Make sure the implementation of spence for real arguments
+    # agrees with the implementation of spence for imaginary arguments.
+
+    x = np.logspace(-30, 300, 200)
+    dataset = np.vstack((x + 0j, spence(x))).T
+    FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
+
+
+def test_special_points():
+    # Check against known values of Spence's function.
+
+    phi = (1 + sqrt(5))/2
+    dataset = [(1, 0),
+               (2, -pi**2/12),
+               (0.5, pi**2/12 - log(2)**2/2),
+               (0, pi**2/6),
+               (-1, pi**2/4 - 1j*pi*log(2)),
+               ((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2),
+               ((3 - sqrt(5))/2, pi**2/10 - log(phi)**2),
+               (phi, -pi**2/15 + log(phi)**2/2),
+               # Corrected from Zagier, "The Dilogarithm Function"
+               ((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)]
+
+    dataset = np.asarray(dataset)
+    FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spfun_stats.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spfun_stats.py
new file mode 100644
index 00000000..eda32a55
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spfun_stats.py
@@ -0,0 +1,61 @@
+import numpy as np
+from numpy.testing import (assert_array_equal,
+        assert_array_almost_equal_nulp, assert_almost_equal)
+from pytest import raises as assert_raises
+
+from scipy.special import gammaln, multigammaln
+
+
+class TestMultiGammaLn:
+
+    def test1(self):
+        # A test of the identity
+        #     Gamma_1(a) = Gamma(a)
+        np.random.seed(1234)
+        a = np.abs(np.random.randn())
+        assert_array_equal(multigammaln(a, 1), gammaln(a))
+
+    def test2(self):
+        # A test of the identity
+        #     Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5)
+        a = np.array([2.5, 10.0])
+        result = multigammaln(a, 2)
+        expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5)
+        assert_almost_equal(result, expected)
+
+    def test_bararg(self):
+        assert_raises(ValueError, multigammaln, 0.5, 1.2)
+
+
+def _check_multigammaln_array_result(a, d):
+    # Test that the shape of the array returned by multigammaln
+    # matches the input shape, and that all the values match
+    # the value computed when multigammaln is called with a scalar.
+    result = multigammaln(a, d)
+    assert_array_equal(a.shape, result.shape)
+    a1 = a.ravel()
+    result1 = result.ravel()
+    for i in range(a.size):
+        assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d))
+
+
+def test_multigammaln_array_arg():
+    # Check that the array returned by multigammaln has the correct
+    # shape and contains the correct values.  The cases have arrays
+    # with several differnent shapes.
+    # The cases include a regression test for ticket #1849
+    # (a = np.array([2.0]), an array with a single element).
+    np.random.seed(1234)
+
+    cases = [
+        # a, d
+        (np.abs(np.random.randn(3, 2)) + 5, 5),
+        (np.abs(np.random.randn(1, 2)) + 5, 5),
+        (np.arange(10.0, 18.0).reshape(2, 2, 2), 3),
+        (np.array([2.0]), 3),
+        (np.float64(2.0), 3),
+    ]
+
+    for a, d in cases:
+        _check_multigammaln_array_result(a, d)
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sph_harm.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sph_harm.py
new file mode 100644
index 00000000..904ee98d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_sph_harm.py
@@ -0,0 +1,37 @@
+import numpy as np
+from numpy.testing import assert_allclose
+import scipy.special as sc
+
+
+def test_first_harmonics():
+    # Test against explicit representations of the first four
+    # spherical harmonics which use `theta` as the azimuthal angle,
+    # `phi` as the polar angle, and include the Condon-Shortley
+    # phase.
+
+    # Notation is Ymn
+    def Y00(theta, phi):
+        return 0.5*np.sqrt(1/np.pi)
+
+    def Yn11(theta, phi):
+        return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi)
+
+    def Y01(theta, phi):
+        return 0.5*np.sqrt(3/np.pi)*np.cos(phi)
+
+    def Y11(theta, phi):
+        return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi)
+
+    harms = [Y00, Yn11, Y01, Y11]
+    m = [0, -1, 0, 1]
+    n = [0, 1, 1, 1]
+
+    theta = np.linspace(0, 2*np.pi)
+    phi = np.linspace(0, np.pi)
+    theta, phi = np.meshgrid(theta, phi)
+
+    for harm, m, n in zip(harms, m, n):
+        assert_allclose(sc.sph_harm(m, n, theta, phi),
+                        harm(theta, phi),
+                        rtol=1e-15, atol=1e-15,
+                        err_msg="Y^{}_{} incorrect".format(m, n))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spherical_bessel.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spherical_bessel.py
new file mode 100644
index 00000000..0cf67bdf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_spherical_bessel.py
@@ -0,0 +1,379 @@
+#
+# Tests of spherical Bessel functions.
+#
+import numpy as np
+from numpy.testing import (assert_almost_equal, assert_allclose,
+                           assert_array_almost_equal, suppress_warnings)
+import pytest
+from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi
+
+from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn
+from scipy.integrate import quad
+
+
+class TestSphericalJn:
+    def test_spherical_jn_exact(self):
+        # https://dlmf.nist.gov/10.49.E3
+        # Note: exact expression is numerically stable only for small
+        # n or z >> n.
+        x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
+        assert_allclose(spherical_jn(2, x),
+                        (-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x))
+
+    def test_spherical_jn_recurrence_complex(self):
+        # https://dlmf.nist.gov/10.51.E1
+        n = np.array([1, 2, 3, 7, 12])
+        x = 1.1 + 1.5j
+        assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x),
+                        (2*n + 1)/x*spherical_jn(n, x))
+
+    def test_spherical_jn_recurrence_real(self):
+        # https://dlmf.nist.gov/10.51.E1
+        n = np.array([1, 2, 3, 7, 12])
+        x = 0.12
+        assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x),
+                        (2*n + 1)/x*spherical_jn(n, x))
+
+    def test_spherical_jn_inf_real(self):
+        # https://dlmf.nist.gov/10.52.E3
+        n = 6
+        x = np.array([-inf, inf])
+        assert_allclose(spherical_jn(n, x), np.array([0, 0]))
+
+    def test_spherical_jn_inf_complex(self):
+        # https://dlmf.nist.gov/10.52.E3
+        n = 7
+        x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in multiply")
+            assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)]))
+
+    def test_spherical_jn_large_arg_1(self):
+        # https://github.com/scipy/scipy/issues/2165
+        # Reference value computed using mpmath, via
+        # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
+        assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747)
+
+    def test_spherical_jn_large_arg_2(self):
+        # https://github.com/scipy/scipy/issues/1641
+        # Reference value computed using mpmath, via
+        # besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
+        assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05)
+
+    def test_spherical_jn_at_zero(self):
+        # https://dlmf.nist.gov/10.52.E1
+        # But note that n = 0 is a special case: j0 = sin(x)/x -> 1
+        n = np.array([0, 1, 2, 5, 10, 100])
+        x = 0
+        assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0]))
+
+
+class TestSphericalYn:
+    def test_spherical_yn_exact(self):
+        # https://dlmf.nist.gov/10.49.E5
+        # Note: exact expression is numerically stable only for small
+        # n or z >> n.
+        x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
+        assert_allclose(spherical_yn(2, x),
+                        (1/x - 3/x**3)*cos(x) - 3/x**2*sin(x))
+
+    def test_spherical_yn_recurrence_real(self):
+        # https://dlmf.nist.gov/10.51.E1
+        n = np.array([1, 2, 3, 7, 12])
+        x = 0.12
+        assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x),
+                        (2*n + 1)/x*spherical_yn(n, x))
+
+    def test_spherical_yn_recurrence_complex(self):
+        # https://dlmf.nist.gov/10.51.E1
+        n = np.array([1, 2, 3, 7, 12])
+        x = 1.1 + 1.5j
+        assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x),
+                        (2*n + 1)/x*spherical_yn(n, x))
+
+    def test_spherical_yn_inf_real(self):
+        # https://dlmf.nist.gov/10.52.E3
+        n = 6
+        x = np.array([-inf, inf])
+        assert_allclose(spherical_yn(n, x), np.array([0, 0]))
+
+    def test_spherical_yn_inf_complex(self):
+        # https://dlmf.nist.gov/10.52.E3
+        n = 7
+        x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in multiply")
+            assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)]))
+
+    def test_spherical_yn_at_zero(self):
+        # https://dlmf.nist.gov/10.52.E2
+        n = np.array([0, 1, 2, 5, 10, 100])
+        x = 0
+        assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf))
+
+    def test_spherical_yn_at_zero_complex(self):
+        # Consistently with numpy:
+        # >>> -np.cos(0)/0
+        # -inf
+        # >>> -np.cos(0+0j)/(0+0j)
+        # (-inf + nan*j)
+        n = np.array([0, 1, 2, 5, 10, 100])
+        x = 0 + 0j
+        assert_allclose(spherical_yn(n, x), np.full(n.shape, nan))
+
+
+class TestSphericalJnYnCrossProduct:
+    def test_spherical_jn_yn_cross_product_1(self):
+        # https://dlmf.nist.gov/10.50.E3
+        n = np.array([1, 5, 8])
+        x = np.array([0.1, 1, 10])
+        left = (spherical_jn(n + 1, x) * spherical_yn(n, x) -
+                spherical_jn(n, x) * spherical_yn(n + 1, x))
+        right = 1/x**2
+        assert_allclose(left, right)
+
+    def test_spherical_jn_yn_cross_product_2(self):
+        # https://dlmf.nist.gov/10.50.E3
+        n = np.array([1, 5, 8])
+        x = np.array([0.1, 1, 10])
+        left = (spherical_jn(n + 2, x) * spherical_yn(n, x) -
+                spherical_jn(n, x) * spherical_yn(n + 2, x))
+        right = (2*n + 3)/x**3
+        assert_allclose(left, right)
+
+
+class TestSphericalIn:
+    def test_spherical_in_exact(self):
+        # https://dlmf.nist.gov/10.49.E9
+        x = np.array([0.12, 1.23, 12.34, 123.45])
+        assert_allclose(spherical_in(2, x),
+                        (1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x))
+
+    def test_spherical_in_recurrence_real(self):
+        # https://dlmf.nist.gov/10.51.E4
+        n = np.array([1, 2, 3, 7, 12])
+        x = 0.12
+        assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
+                        (2*n + 1)/x*spherical_in(n, x))
+
+    def test_spherical_in_recurrence_complex(self):
+        # https://dlmf.nist.gov/10.51.E1
+        n = np.array([1, 2, 3, 7, 12])
+        x = 1.1 + 1.5j
+        assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
+                        (2*n + 1)/x*spherical_in(n, x))
+
+    def test_spherical_in_inf_real(self):
+        # https://dlmf.nist.gov/10.52.E3
+        n = 5
+        x = np.array([-inf, inf])
+        assert_allclose(spherical_in(n, x), np.array([-inf, inf]))
+
+    def test_spherical_in_inf_complex(self):
+        # https://dlmf.nist.gov/10.52.E5
+        # Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but
+        # this appears impossible to achieve because C99 regards any complex
+        # value with at least one infinite  part as a complex infinity, so
+        # 1j*inf cannot be distinguished from (1+1j)*inf.  Therefore, nan is
+        # the correct return value.
+        n = 7
+        x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
+        assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan]))
+
+    def test_spherical_in_at_zero(self):
+        # https://dlmf.nist.gov/10.52.E1
+        # But note that n = 0 is a special case: i0 = sinh(x)/x -> 1
+        n = np.array([0, 1, 2, 5, 10, 100])
+        x = 0
+        assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0]))
+
+
+class TestSphericalKn:
+    def test_spherical_kn_exact(self):
+        # https://dlmf.nist.gov/10.49.E13
+        x = np.array([0.12, 1.23, 12.34, 123.45])
+        assert_allclose(spherical_kn(2, x),
+                        pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3))
+
+    def test_spherical_kn_recurrence_real(self):
+        # https://dlmf.nist.gov/10.51.E4
+        n = np.array([1, 2, 3, 7, 12])
+        x = 0.12
+        assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
+                        (-1)**n*(2*n + 1)/x*spherical_kn(n, x))
+
+    def test_spherical_kn_recurrence_complex(self):
+        # https://dlmf.nist.gov/10.51.E4
+        n = np.array([1, 2, 3, 7, 12])
+        x = 1.1 + 1.5j
+        assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
+                        (-1)**n*(2*n + 1)/x*spherical_kn(n, x))
+
+    def test_spherical_kn_inf_real(self):
+        # https://dlmf.nist.gov/10.52.E6
+        n = 5
+        x = np.array([-inf, inf])
+        assert_allclose(spherical_kn(n, x), np.array([-inf, 0]))
+
+    def test_spherical_kn_inf_complex(self):
+        # https://dlmf.nist.gov/10.52.E6
+        # The behavior at complex infinity depends on the sign of the real
+        # part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's
+        # z*inf.  This distinction cannot be captured, so we return nan.
+        n = 7
+        x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
+        assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan]))
+
+    def test_spherical_kn_at_zero(self):
+        # https://dlmf.nist.gov/10.52.E2
+        n = np.array([0, 1, 2, 5, 10, 100])
+        x = 0
+        assert_allclose(spherical_kn(n, x), np.full(n.shape, inf))
+
+    def test_spherical_kn_at_zero_complex(self):
+        # https://dlmf.nist.gov/10.52.E2
+        n = np.array([0, 1, 2, 5, 10, 100])
+        x = 0 + 0j
+        assert_allclose(spherical_kn(n, x), np.full(n.shape, nan))
+
+
+class SphericalDerivativesTestCase:
+    def fundamental_theorem(self, n, a, b):
+        integral, tolerance = quad(lambda z: self.df(n, z), a, b)
+        assert_allclose(integral,
+                        self.f(n, b) - self.f(n, a),
+                        atol=tolerance)
+
+    @pytest.mark.slow
+    def test_fundamental_theorem_0(self):
+        self.fundamental_theorem(0, 3.0, 15.0)
+
+    @pytest.mark.slow
+    def test_fundamental_theorem_7(self):
+        self.fundamental_theorem(7, 0.5, 1.2)
+
+
+class TestSphericalJnDerivatives(SphericalDerivativesTestCase):
+    def f(self, n, z):
+        return spherical_jn(n, z)
+
+    def df(self, n, z):
+        return spherical_jn(n, z, derivative=True)
+
+    def test_spherical_jn_d_zero(self):
+        n = np.array([0, 1, 2, 3, 7, 15])
+        assert_allclose(spherical_jn(n, 0, derivative=True),
+                        np.array([0, 1/3, 0, 0, 0, 0]))
+
+
+class TestSphericalYnDerivatives(SphericalDerivativesTestCase):
+    def f(self, n, z):
+        return spherical_yn(n, z)
+
+    def df(self, n, z):
+        return spherical_yn(n, z, derivative=True)
+
+
+class TestSphericalInDerivatives(SphericalDerivativesTestCase):
+    def f(self, n, z):
+        return spherical_in(n, z)
+
+    def df(self, n, z):
+        return spherical_in(n, z, derivative=True)
+
+    def test_spherical_in_d_zero(self):
+        n = np.array([1, 2, 3, 7, 15])
+        assert_allclose(spherical_in(n, 0, derivative=True),
+                        np.zeros(5))
+
+
+class TestSphericalKnDerivatives(SphericalDerivativesTestCase):
+    def f(self, n, z):
+        return spherical_kn(n, z)
+
+    def df(self, n, z):
+        return spherical_kn(n, z, derivative=True)
+
+
+class TestSphericalOld:
+    # These are tests from the TestSpherical class of test_basic.py,
+    # rewritten to use spherical_* instead of sph_* but otherwise unchanged.
+
+    def test_sph_in(self):
+        # This test reproduces test_basic.TestSpherical.test_sph_in.
+        i1n = np.empty((2,2))
+        x = 0.2
+
+        i1n[0][0] = spherical_in(0, x)
+        i1n[0][1] = spherical_in(1, x)
+        i1n[1][0] = spherical_in(0, x, derivative=True)
+        i1n[1][1] = spherical_in(1, x, derivative=True)
+
+        inp0 = (i1n[0][1])
+        inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
+        assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381,
+                                                0.066933714568029540839]),12)
+        assert_array_almost_equal(i1n[1],[inp0,inp1],12)
+
+    def test_sph_in_kn_order0(self):
+        x = 1.
+        sph_i0 = np.empty((2,))
+        sph_i0[0] = spherical_in(0, x)
+        sph_i0[1] = spherical_in(0, x, derivative=True)
+        sph_i0_expected = np.array([np.sinh(x)/x,
+                                    np.cosh(x)/x-np.sinh(x)/x**2])
+        assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
+
+        sph_k0 = np.empty((2,))
+        sph_k0[0] = spherical_kn(0, x)
+        sph_k0[1] = spherical_kn(0, x, derivative=True)
+        sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
+                                    -0.5*pi*exp(-x)*(1/x+1/x**2)])
+        assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
+
+    def test_sph_jn(self):
+        s1 = np.empty((2,3))
+        x = 0.2
+
+        s1[0][0] = spherical_jn(0, x)
+        s1[0][1] = spherical_jn(1, x)
+        s1[0][2] = spherical_jn(2, x)
+        s1[1][0] = spherical_jn(0, x, derivative=True)
+        s1[1][1] = spherical_jn(1, x, derivative=True)
+        s1[1][2] = spherical_jn(2, x, derivative=True)
+
+        s10 = -s1[0][1]
+        s11 = s1[0][0]-2.0/0.2*s1[0][1]
+        s12 = s1[0][1]-3.0/0.2*s1[0][2]
+        assert_array_almost_equal(s1[0],[0.99334665397530607731,
+                                      0.066400380670322230863,
+                                      0.0026590560795273856680],12)
+        assert_array_almost_equal(s1[1],[s10,s11,s12],12)
+
+    def test_sph_kn(self):
+        kn = np.empty((2,3))
+        x = 0.2
+
+        kn[0][0] = spherical_kn(0, x)
+        kn[0][1] = spherical_kn(1, x)
+        kn[0][2] = spherical_kn(2, x)
+        kn[1][0] = spherical_kn(0, x, derivative=True)
+        kn[1][1] = spherical_kn(1, x, derivative=True)
+        kn[1][2] = spherical_kn(2, x, derivative=True)
+
+        kn0 = -kn[0][1]
+        kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
+        kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
+        assert_array_almost_equal(kn[0],[6.4302962978445670140,
+                                         38.581777787067402086,
+                                         585.15696310385559829],12)
+        assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
+
+    def test_sph_yn(self):
+        sy1 = spherical_yn(2, 0.2)
+        sy2 = spherical_yn(0, 0.2)
+        assert_almost_equal(sy1,-377.52483,5)  # previous values in the system
+        assert_almost_equal(sy2,-4.9003329,5)
+        sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3
+        sy3 = spherical_yn(1, 0.2, derivative=True)
+        assert_almost_equal(sy3,sphpy,4)  # compare correct derivative val. (correct =-system val).
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_trig.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_trig.py
new file mode 100644
index 00000000..7eaa8586
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_trig.py
@@ -0,0 +1,66 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose, suppress_warnings
+
+from scipy.special._ufuncs import _sinpi as sinpi
+from scipy.special._ufuncs import _cospi as cospi
+
+
+def test_integer_real_part():
+    x = np.arange(-100, 101)
+    y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
+    x, y = np.meshgrid(x, y)
+    z = x + 1j*y
+    # In the following we should be *exactly* right
+    res = sinpi(z)
+    assert_equal(res.real, 0.0)
+    res = cospi(z)
+    assert_equal(res.imag, 0.0)
+
+
+def test_half_integer_real_part():
+    x = np.arange(-100, 101) + 0.5
+    y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
+    x, y = np.meshgrid(x, y)
+    z = x + 1j*y
+    # In the following we should be *exactly* right
+    res = sinpi(z)
+    assert_equal(res.imag, 0.0)
+    res = cospi(z)
+    assert_equal(res.real, 0.0)
+
+
+def test_intermediate_overlow():
+    # Make sure we avoid overflow in situations where cosh/sinh would
+    # overflow but the product with sin/cos would not
+    sinpi_pts = [complex(1 + 1e-14, 227),
+                 complex(1e-35, 250),
+                 complex(1e-301, 445)]
+    # Data generated with mpmath
+    sinpi_std = [complex(-8.113438309924894e+295, -np.inf),
+                 complex(1.9507801934611995e+306, np.inf),
+                 complex(2.205958493464539e+306, np.inf)]
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning, "invalid value encountered in multiply")
+        for p, std in zip(sinpi_pts, sinpi_std):
+            assert_allclose(sinpi(p), std)
+
+    # Test for cosine, less interesting because cos(0) = 1.
+    p = complex(0.5 + 1e-14, 227)
+    std = complex(-8.113438309924894e+295, -np.inf)
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning, "invalid value encountered in multiply")
+        assert_allclose(cospi(p), std)
+
+
+def test_zero_sign():
+    y = sinpi(-0.0)
+    assert y == 0.0
+    assert np.signbit(y)
+
+    y = sinpi(0.0)
+    assert y == 0.0
+    assert not np.signbit(y)
+
+    y = cospi(0.5)
+    assert y == 0.0
+    assert not np.signbit(y)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_wright_bessel.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_wright_bessel.py
new file mode 100644
index 00000000..319db817
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_wright_bessel.py
@@ -0,0 +1,115 @@
+# Reference MPMATH implementation:
+#
+# import mpmath
+# from mpmath import nsum
+#
+# def Wright_Series_MPMATH(a, b, z, dps=50, method='r+s+e', steps=[1000]):
+#    """Compute Wright' generalized Bessel function as Series.
+#
+#    This uses mpmath for arbitrary precision.
+#    """
+#    with mpmath.workdps(dps):
+#        res = nsum(lambda k: z**k/mpmath.fac(k) * mpmath.rgamma(a*k+b),
+#                          [0, mpmath.inf],
+#                          tol=dps, method=method, steps=steps
+#                          )
+#
+#    return res
+
+import pytest
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+
+import scipy.special as sc
+from scipy.special import rgamma, wright_bessel
+
+
+@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10])
+@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10])
+def test_wright_bessel_zero(a, b):
+    """Test at x = 0."""
+    assert_equal(wright_bessel(a, b, 0.), rgamma(b))
+
+
+@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10])
+@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1])
+def test_wright_bessel_iv(b, x):
+    """Test relation of wright_bessel and modified bessel function iv.
+
+    iv(z) = (1/2*z)**v * Phi(1, v+1; 1/4*z**2).
+    See https://dlmf.nist.gov/10.46.E2
+    """
+    if x != 0:
+        v = b - 1
+        wb = wright_bessel(1, v + 1, x**2 / 4.)
+        # Note: iv(v, x) has precision of less than 1e-12 for some cases
+        # e.g v=1-1e-6 and x=1e-06)
+        assert_allclose(np.power(x / 2., v) * wb,
+                        sc.iv(v, x),
+                        rtol=1e-11, atol=1e-11)
+
+
+@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10])
+@pytest.mark.parametrize('b', [1, 1 + 1e-3, 2, 5, 10])
+@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1, 5, 10, 100])
+def test_wright_functional(a, b, x):
+    """Test functional relation of wright_bessel.
+
+    Phi(a, b-1, z) = a*z*Phi(a, b+a, z) + (b-1)*Phi(a, b, z)
+
+    Note that d/dx Phi(a, b, x) = Phi(a, b-1, x)
+    See Eq. (22) of
+    B. Stankovic, On the Function of E. M. Wright,
+    Publ. de l' Institut Mathematique, Beograd,
+    Nouvelle S`er. 10 (1970), 113-124.
+    """
+    assert_allclose(wright_bessel(a, b - 1, x),
+                    a * x * wright_bessel(a, b + a, x)
+                    + (b - 1) * wright_bessel(a, b, x),
+                    rtol=1e-8, atol=1e-8)
+
+
+# grid of rows [a, b, x, value, accuracy] that do not reach 1e-11 accuracy
+# see output of:
+# cd scipy/scipy/_precompute
+# python wright_bessel_data.py
+grid_a_b_x_value_acc = np.array([
+    [0.1, 100.0, 709.7827128933841, 8.026353022981087e+34, 2e-8],
+    [0.5, 10.0, 709.7827128933841, 2.680788404494657e+48, 9e-8],
+    [0.5, 10.0, 1000.0, 2.005901980702872e+64, 1e-8],
+    [0.5, 100.0, 1000.0, 3.4112367580445246e-117, 6e-8],
+    [1.0, 20.0, 100000.0, 1.7717158630699857e+225, 3e-11],
+    [1.0, 100.0, 100000.0, 1.0269334596230763e+22, np.nan],
+    [1.0000000000000222, 20.0, 100000.0, 1.7717158630001672e+225, 3e-11],
+    [1.0000000000000222, 100.0, 100000.0, 1.0269334595866202e+22, np.nan],
+    [1.5, 0.0, 500.0, 15648961196.432373, 3e-11],
+    [1.5, 2.220446049250313e-14, 500.0, 15648961196.431465, 3e-11],
+    [1.5, 1e-10, 500.0, 15648961192.344728, 3e-11],
+    [1.5, 1e-05, 500.0, 15648552437.334162, 3e-11],
+    [1.5, 0.1, 500.0, 12049870581.10317, 2e-11],
+    [1.5, 20.0, 100000.0, 7.81930438331405e+43, 3e-9],
+    [1.5, 100.0, 100000.0, 9.653370857459075e-130, np.nan],
+    ])
+
+
+@pytest.mark.xfail
+@pytest.mark.parametrize(
+    'a, b, x, phi',
+    grid_a_b_x_value_acc[:, :4].tolist())
+def test_wright_data_grid_failures(a, b, x, phi):
+    """Test cases of test_data that do not reach relative accuracy of 1e-11"""
+    assert_allclose(wright_bessel(a, b, x), phi, rtol=1e-11)
+
+
+@pytest.mark.parametrize(
+    'a, b, x, phi, accuracy',
+    grid_a_b_x_value_acc.tolist())
+def test_wright_data_grid_less_accurate(a, b, x, phi, accuracy):
+    """Test cases of test_data that do not reach relative accuracy of 1e-11
+
+    Here we test for reduced accuracy or even nan.
+    """
+    if np.isnan(accuracy):
+        assert np.isnan(wright_bessel(a, b, x))
+    else:
+        assert_allclose(wright_bessel(a, b, x), phi, rtol=accuracy)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_wrightomega.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_wrightomega.py
new file mode 100644
index 00000000..e2d48c8d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_wrightomega.py
@@ -0,0 +1,117 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_allclose
+
+import scipy.special as sc
+from scipy.special._testutils import assert_func_equal
+
+
+def test_wrightomega_nan():
+    pts = [complex(np.nan, 0),
+           complex(0, np.nan),
+           complex(np.nan, np.nan),
+           complex(np.nan, 1),
+           complex(1, np.nan)]
+    for p in pts:
+        res = sc.wrightomega(p)
+        assert_(np.isnan(res.real))
+        assert_(np.isnan(res.imag))
+
+
+def test_wrightomega_inf_branch():
+    pts = [complex(-np.inf, np.pi/4),
+           complex(-np.inf, -np.pi/4),
+           complex(-np.inf, 3*np.pi/4),
+           complex(-np.inf, -3*np.pi/4)]
+    expected_results = [complex(0.0, 0.0),
+                        complex(0.0, -0.0),
+                        complex(-0.0, 0.0),
+                        complex(-0.0, -0.0)]
+    for p, expected in zip(pts, expected_results):
+        res = sc.wrightomega(p)
+        # We can't use assert_equal(res, expected) because in older versions of
+        # numpy, assert_equal doesn't check the sign of the real and imaginary
+        # parts when comparing complex zeros. It does check the sign when the
+        # arguments are *real* scalars.
+        assert_equal(res.real, expected.real)
+        assert_equal(res.imag, expected.imag)
+
+
+def test_wrightomega_inf():
+    pts = [complex(np.inf, 10),
+           complex(-np.inf, 10),
+           complex(10, np.inf),
+           complex(10, -np.inf)]
+    for p in pts:
+        assert_equal(sc.wrightomega(p), p)
+
+
+def test_wrightomega_singular():
+    pts = [complex(-1.0, np.pi),
+           complex(-1.0, -np.pi)]
+    for p in pts:
+        res = sc.wrightomega(p)
+        assert_equal(res, -1.0)
+        assert_(np.signbit(res.imag) == False)
+
+
+@pytest.mark.parametrize('x, desired', [
+    (-np.inf, 0),
+    (np.inf, np.inf),
+])
+def test_wrightomega_real_infinities(x, desired):
+    assert sc.wrightomega(x) == desired
+
+
+def test_wrightomega_real_nan():
+    assert np.isnan(sc.wrightomega(np.nan))
+
+
+def test_wrightomega_real_series_crossover():
+    desired_error = 2 * np.finfo(float).eps
+    crossover = 1e20
+    x_before_crossover = np.nextafter(crossover, -np.inf)
+    x_after_crossover = np.nextafter(crossover, np.inf)
+    # Computed using Mpmath
+    desired_before_crossover = 99999999999999983569.948
+    desired_after_crossover = 100000000000000016337.948
+    assert_allclose(
+        sc.wrightomega(x_before_crossover),
+        desired_before_crossover,
+        atol=0,
+        rtol=desired_error,
+    )
+    assert_allclose(
+        sc.wrightomega(x_after_crossover),
+        desired_after_crossover,
+        atol=0,
+        rtol=desired_error,
+    )
+
+
+def test_wrightomega_exp_approximation_crossover():
+    desired_error = 2 * np.finfo(float).eps
+    crossover = -50
+    x_before_crossover = np.nextafter(crossover, np.inf)
+    x_after_crossover = np.nextafter(crossover, -np.inf)
+    # Computed using Mpmath
+    desired_before_crossover = 1.9287498479639314876e-22
+    desired_after_crossover = 1.9287498479639040784e-22
+    assert_allclose(
+        sc.wrightomega(x_before_crossover),
+        desired_before_crossover,
+        atol=0,
+        rtol=desired_error,
+    )
+    assert_allclose(
+        sc.wrightomega(x_after_crossover),
+        desired_after_crossover,
+        atol=0,
+        rtol=desired_error,
+    )
+
+
+def test_wrightomega_real_versus_complex():
+    x = np.linspace(-500, 500, 1001)
+    results = sc.wrightomega(x + 0j).real
+    assert_func_equal(sc.wrightomega, results, x, atol=0, rtol=1e-14)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_zeta.py b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_zeta.py
new file mode 100644
index 00000000..82b3245c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/special/tests/test_zeta.py
@@ -0,0 +1,49 @@
+import scipy.special as sc
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+
+
+def test_zeta():
+    assert_allclose(sc.zeta(2,2), np.pi**2/6 - 1, rtol=1e-12)
+
+
+def test_zetac():
+    # Expected values in the following were computed using Wolfram
+    # Alpha's `Zeta[x] - 1`
+    x = [-2.1, 0.8, 0.9999, 9, 50, 75]
+    desired = [
+        -0.9972705002153750,
+        -5.437538415895550,
+        -10000.42279161673,
+        0.002008392826082214,
+        8.881784210930816e-16,
+        2.646977960169853e-23,
+    ]
+    assert_allclose(sc.zetac(x), desired, rtol=1e-12)
+
+
+def test_zetac_special_cases():
+    assert sc.zetac(np.inf) == 0
+    assert np.isnan(sc.zetac(-np.inf))
+    assert sc.zetac(0) == -1.5
+    assert sc.zetac(1.0) == np.inf
+
+    assert_equal(sc.zetac([-2, -50, -100]), -1)
+
+
+def test_riemann_zeta_special_cases():
+    assert np.isnan(sc.zeta(np.nan))
+    assert sc.zeta(np.inf) == 1
+    assert sc.zeta(0) == -0.5
+
+    # Riemann zeta is zero add negative even integers.
+    assert_equal(sc.zeta([-2, -4, -6, -8, -10]), 0)
+
+    assert_allclose(sc.zeta(2), np.pi**2/6, rtol=1e-12)
+    assert_allclose(sc.zeta(4), np.pi**4/90, rtol=1e-12)
+
+
+def test_riemann_zeta_avoid_overflow():
+    s = -260.00000000001
+    desired = -5.6966307844402683127e+297  # Computed with Mpmath
+    assert_allclose(sc.zeta(s), desired, atol=0, rtol=5e-14)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/stats/__init__.py
new file mode 100644
index 00000000..7cc270dc
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/__init__.py
@@ -0,0 +1,515 @@
+"""
+.. _statsrefmanual:
+
+==========================================
+Statistical functions (:mod:`scipy.stats`)
+==========================================
+
+.. currentmodule:: scipy.stats
+
+This module contains a large number of probability distributions,
+summary and frequency statistics, correlation functions and statistical
+tests, masked statistics, kernel density estimation, quasi-Monte Carlo
+functionality, and more.
+
+Statistics is a very large area, and there are topics that are out of scope
+for SciPy and are covered by other packages. Some of the most important ones
+are:
+
+- `statsmodels `__:
+  regression, linear models, time series analysis, extensions to topics
+  also covered by ``scipy.stats``.
+- `Pandas `__: tabular data, time series
+  functionality, interfaces to other statistical languages.
+- `PyMC `__: Bayesian statistical
+  modeling, probabilistic machine learning.
+- `scikit-learn `__: classification, regression,
+  model selection.
+- `Seaborn `__: statistical data visualization.
+- `rpy2 `__: Python to R bridge.
+
+
+Probability distributions
+=========================
+
+Each univariate distribution is an instance of a subclass of `rv_continuous`
+(`rv_discrete` for discrete distributions):
+
+.. autosummary::
+   :toctree: generated/
+
+   rv_continuous
+   rv_discrete
+   rv_histogram
+
+Continuous distributions
+------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   alpha             -- Alpha
+   anglit            -- Anglit
+   arcsine           -- Arcsine
+   argus             -- Argus
+   beta              -- Beta
+   betaprime         -- Beta Prime
+   bradford          -- Bradford
+   burr              -- Burr (Type III)
+   burr12            -- Burr (Type XII)
+   cauchy            -- Cauchy
+   chi               -- Chi
+   chi2              -- Chi-squared
+   cosine            -- Cosine
+   crystalball       -- Crystalball
+   dgamma            -- Double Gamma
+   dweibull          -- Double Weibull
+   erlang            -- Erlang
+   expon             -- Exponential
+   exponnorm         -- Exponentially Modified Normal
+   exponweib         -- Exponentiated Weibull
+   exponpow          -- Exponential Power
+   f                 -- F (Snecdor F)
+   fatiguelife       -- Fatigue Life (Birnbaum-Saunders)
+   fisk              -- Fisk
+   foldcauchy        -- Folded Cauchy
+   foldnorm          -- Folded Normal
+   genlogistic       -- Generalized Logistic
+   gennorm           -- Generalized normal
+   genpareto         -- Generalized Pareto
+   genexpon          -- Generalized Exponential
+   genextreme        -- Generalized Extreme Value
+   gausshyper        -- Gauss Hypergeometric
+   gamma             -- Gamma
+   gengamma          -- Generalized gamma
+   genhalflogistic   -- Generalized Half Logistic
+   genhyperbolic     -- Generalized Hyperbolic
+   geninvgauss       -- Generalized Inverse Gaussian
+   gibrat            -- Gibrat
+   gilbrat           -- Gilbrat
+   gompertz          -- Gompertz (Truncated Gumbel)
+   gumbel_r          -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
+   gumbel_l          -- Left Sided Gumbel, etc.
+   halfcauchy        -- Half Cauchy
+   halflogistic      -- Half Logistic
+   halfnorm          -- Half Normal
+   halfgennorm       -- Generalized Half Normal
+   hypsecant         -- Hyperbolic Secant
+   invgamma          -- Inverse Gamma
+   invgauss          -- Inverse Gaussian
+   invweibull        -- Inverse Weibull
+   johnsonsb         -- Johnson SB
+   johnsonsu         -- Johnson SU
+   kappa4            -- Kappa 4 parameter
+   kappa3            -- Kappa 3 parameter
+   ksone             -- Distribution of Kolmogorov-Smirnov one-sided test statistic
+   kstwo             -- Distribution of Kolmogorov-Smirnov two-sided test statistic
+   kstwobign         -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
+   laplace           -- Laplace
+   laplace_asymmetric    -- Asymmetric Laplace
+   levy              -- Levy
+   levy_l
+   levy_stable
+   logistic          -- Logistic
+   loggamma          -- Log-Gamma
+   loglaplace        -- Log-Laplace (Log Double Exponential)
+   lognorm           -- Log-Normal
+   loguniform        -- Log-Uniform
+   lomax             -- Lomax (Pareto of the second kind)
+   maxwell           -- Maxwell
+   mielke            -- Mielke's Beta-Kappa
+   moyal             -- Moyal
+   nakagami          -- Nakagami
+   ncx2              -- Non-central chi-squared
+   ncf               -- Non-central F
+   nct               -- Non-central Student's T
+   norm              -- Normal (Gaussian)
+   norminvgauss      -- Normal Inverse Gaussian
+   pareto            -- Pareto
+   pearson3          -- Pearson type III
+   powerlaw          -- Power-function
+   powerlognorm      -- Power log normal
+   powernorm         -- Power normal
+   rdist             -- R-distribution
+   rayleigh          -- Rayleigh
+   rice              -- Rice
+   recipinvgauss     -- Reciprocal Inverse Gaussian
+   semicircular      -- Semicircular
+   skewcauchy        -- Skew Cauchy
+   skewnorm          -- Skew normal
+   studentized_range    -- Studentized Range
+   t                 -- Student's T
+   trapezoid         -- Trapezoidal
+   triang            -- Triangular
+   truncexpon        -- Truncated Exponential
+   truncnorm         -- Truncated Normal
+   truncpareto       -- Truncated Pareto
+   truncweibull_min  -- Truncated minimum Weibull distribution
+   tukeylambda       -- Tukey-Lambda
+   uniform           -- Uniform
+   vonmises          -- Von-Mises (Circular)
+   vonmises_line     -- Von-Mises (Line)
+   wald              -- Wald
+   weibull_min       -- Minimum Weibull (see Frechet)
+   weibull_max       -- Maximum Weibull (see Frechet)
+   wrapcauchy        -- Wrapped Cauchy
+
+Multivariate distributions
+--------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   multivariate_normal    -- Multivariate normal distribution
+   matrix_normal          -- Matrix normal distribution
+   dirichlet              -- Dirichlet
+   wishart                -- Wishart
+   invwishart             -- Inverse Wishart
+   multinomial            -- Multinomial distribution
+   special_ortho_group    -- SO(N) group
+   ortho_group            -- O(N) group
+   unitary_group          -- U(N) group
+   random_correlation     -- random correlation matrices
+   multivariate_t         -- Multivariate t-distribution
+   multivariate_hypergeom -- Multivariate hypergeometric distribution
+   random_table           -- Distribution of random tables with given marginals
+   uniform_direction      -- Uniform distribution on S(N-1)
+
+`scipy.stats.multivariate_normal` methods accept instances
+of the following class to represent the covariance.
+
+.. autosummary::
+   :toctree: generated/
+
+   Covariance             -- Representation of a covariance matrix
+
+
+Discrete distributions
+----------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   bernoulli                -- Bernoulli
+   betabinom                -- Beta-Binomial
+   binom                    -- Binomial
+   boltzmann                -- Boltzmann (Truncated Discrete Exponential)
+   dlaplace                 -- Discrete Laplacian
+   geom                     -- Geometric
+   hypergeom                -- Hypergeometric
+   logser                   -- Logarithmic (Log-Series, Series)
+   nbinom                   -- Negative Binomial
+   nchypergeom_fisher       -- Fisher's Noncentral Hypergeometric
+   nchypergeom_wallenius    -- Wallenius's Noncentral Hypergeometric
+   nhypergeom               -- Negative Hypergeometric
+   planck                   -- Planck (Discrete Exponential)
+   poisson                  -- Poisson
+   randint                  -- Discrete Uniform
+   skellam                  -- Skellam
+   yulesimon                -- Yule-Simon
+   zipf                     -- Zipf (Zeta)
+   zipfian                  -- Zipfian
+
+An overview of statistical functions is given below.  Many of these functions
+have a similar version in `scipy.stats.mstats` which work for masked arrays.
+
+Summary statistics
+==================
+
+.. autosummary::
+   :toctree: generated/
+
+   describe          -- Descriptive statistics
+   gmean             -- Geometric mean
+   hmean             -- Harmonic mean
+   pmean             -- Power mean
+   kurtosis          -- Fisher or Pearson kurtosis
+   mode              -- Modal value
+   moment            -- Central moment
+   expectile         -- Expectile
+   skew              -- Skewness
+   kstat             --
+   kstatvar          --
+   tmean             -- Truncated arithmetic mean
+   tvar              -- Truncated variance
+   tmin              --
+   tmax              --
+   tstd              --
+   tsem              --
+   variation         -- Coefficient of variation
+   find_repeats
+   trim_mean
+   gstd              -- Geometric Standard Deviation
+   iqr
+   sem
+   bayes_mvs
+   mvsdist
+   entropy
+   differential_entropy
+   median_abs_deviation
+
+Frequency statistics
+====================
+
+.. autosummary::
+   :toctree: generated/
+
+   cumfreq
+   percentileofscore
+   scoreatpercentile
+   relfreq
+
+.. autosummary::
+   :toctree: generated/
+
+   binned_statistic     -- Compute a binned statistic for a set of data.
+   binned_statistic_2d  -- Compute a 2-D binned statistic for a set of data.
+   binned_statistic_dd  -- Compute a d-D binned statistic for a set of data.
+
+Correlation functions
+=====================
+
+.. autosummary::
+   :toctree: generated/
+
+   f_oneway
+   alexandergovern
+   pearsonr
+   spearmanr
+   pointbiserialr
+   kendalltau
+   weightedtau
+   somersd
+   linregress
+   siegelslopes
+   theilslopes
+   multiscale_graphcorr
+
+Statistical tests
+=================
+
+.. autosummary::
+   :toctree: generated/
+
+   ttest_1samp
+   ttest_ind
+   ttest_ind_from_stats
+   ttest_rel
+   chisquare
+   cramervonmises
+   cramervonmises_2samp
+   power_divergence
+   kstest
+   ks_1samp
+   ks_2samp
+   epps_singleton_2samp
+   mannwhitneyu
+   tiecorrect
+   rankdata
+   ranksums
+   wilcoxon
+   kruskal
+   friedmanchisquare
+   brunnermunzel
+   combine_pvalues
+   jarque_bera
+   page_trend_test
+   tukey_hsd
+   poisson_means_test
+
+.. autosummary::
+   :toctree: generated/
+
+   ansari
+   bartlett
+   levene
+   shapiro
+   anderson
+   anderson_ksamp
+   binom_test
+   binomtest
+   fligner
+   median_test
+   mood
+   skewtest
+   kurtosistest
+   normaltest
+   goodness_of_fit
+
+
+Quasi-Monte Carlo
+=================
+
+.. toctree::
+   :maxdepth: 4
+
+   stats.qmc
+
+Resampling Methods
+==================
+
+.. autosummary::
+   :toctree: generated/
+
+   bootstrap
+   permutation_test
+   monte_carlo_test
+
+Masked statistics functions
+===========================
+
+.. toctree::
+
+   stats.mstats
+
+
+Other statistical functionality
+===============================
+
+Transformations
+---------------
+
+.. autosummary::
+   :toctree: generated/
+
+   boxcox
+   boxcox_normmax
+   boxcox_llf
+   yeojohnson
+   yeojohnson_normmax
+   yeojohnson_llf
+   obrientransform
+   sigmaclip
+   trimboth
+   trim1
+   zmap
+   zscore
+   gzscore
+
+Statistical distances
+---------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   wasserstein_distance
+   energy_distance
+
+Sampling
+--------
+
+.. toctree::
+   :maxdepth: 4
+
+   stats.sampling
+
+Random variate generation / CDF Inversion
+-----------------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   rvs_ratio_uniforms
+
+Distribution Fitting
+--------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   fit
+
+Directional statistical functions
+---------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   directional_stats
+   circmean
+   circvar
+   circstd
+
+Contingency table functions
+---------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   chi2_contingency
+   contingency.crosstab
+   contingency.expected_freq
+   contingency.margins
+   contingency.relative_risk
+   contingency.association
+   contingency.odds_ratio
+   fisher_exact
+   barnard_exact
+   boschloo_exact
+
+Plot-tests
+----------
+
+.. autosummary::
+   :toctree: generated/
+
+   ppcc_max
+   ppcc_plot
+   probplot
+   boxcox_normplot
+   yeojohnson_normplot
+
+Univariate and multivariate kernel density estimation
+-----------------------------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   gaussian_kde
+
+Warnings / Errors used in :mod:`scipy.stats`
+--------------------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   DegenerateDataWarning
+   ConstantInputWarning
+   NearConstantInputWarning
+   FitError
+
+"""
+
+from ._warnings_errors import (ConstantInputWarning, NearConstantInputWarning,
+                               DegenerateDataWarning, FitError)
+from ._stats_py import *
+from ._variation import variation
+from .distributions import *
+from ._morestats import *
+from ._binomtest import binomtest
+from ._binned_statistic import *
+from ._kde import gaussian_kde
+from . import mstats
+from . import qmc
+from ._multivariate import *
+from . import contingency
+from .contingency import chi2_contingency
+from ._resampling import bootstrap, monte_carlo_test, permutation_test
+from ._entropy import *
+from ._hypotests import *
+from ._rvs_sampling import rvs_ratio_uniforms
+from ._page_trend_test import page_trend_test
+from ._mannwhitneyu import mannwhitneyu
+from ._fit import fit, goodness_of_fit
+from ._covariance import Covariance
+
+# Deprecated namespaces, to be removed in v2.0.0
+from . import (
+    biasedurn, kde, morestats, mstats_basic, mstats_extras, mvn, statlib, stats
+)
+
+__all__ = [s for s in dir() if not s.startswith("_")]  # Remove dunders.
+
+from scipy._lib._testutils import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_axis_nan_policy.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_axis_nan_policy.py
new file mode 100644
index 00000000..937ba611
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_axis_nan_policy.py
@@ -0,0 +1,605 @@
+# Many scipy.stats functions support `axis` and `nan_policy` parameters.
+# When the two are combined, it can be tricky to get all the behavior just
+# right. This file contains utility functions useful for scipy.stats functions
+# that support `axis` and `nan_policy`, including a decorator that
+# automatically adds `axis` and `nan_policy` arguments to a function.
+
+import numpy as np
+from functools import wraps
+from scipy._lib._docscrape import FunctionDoc, Parameter
+from scipy._lib._util import _contains_nan
+import inspect
+
+
+def _broadcast_arrays(arrays, axis=None):
+    """
+    Broadcast shapes of arrays, ignoring incompatibility of specified axes
+    """
+    new_shapes = _broadcast_array_shapes(arrays, axis=axis)
+    if axis is None:
+        new_shapes = [new_shapes]*len(arrays)
+    return [np.broadcast_to(array, new_shape)
+            for array, new_shape in zip(arrays, new_shapes)]
+
+
+def _broadcast_array_shapes(arrays, axis=None):
+    """
+    Broadcast shapes of arrays, ignoring incompatibility of specified axes
+    """
+    shapes = [np.asarray(arr).shape for arr in arrays]
+    return _broadcast_shapes(shapes, axis)
+
+
+def _broadcast_shapes(shapes, axis=None):
+    """
+    Broadcast shapes, ignoring incompatibility of specified axes
+    """
+    if not shapes:
+        return shapes
+
+    # input validation
+    if axis is not None:
+        axis = np.atleast_1d(axis)
+        axis_int = axis.astype(int)
+        if not np.array_equal(axis_int, axis):
+            raise np.AxisError('`axis` must be an integer, a '
+                               'tuple of integers, or `None`.')
+        axis = axis_int
+
+    # First, ensure all shapes have same number of dimensions by prepending 1s.
+    n_dims = max([len(shape) for shape in shapes])
+    new_shapes = np.ones((len(shapes), n_dims), dtype=int)
+    for row, shape in zip(new_shapes, shapes):
+        row[len(row)-len(shape):] = shape  # can't use negative indices (-0:)
+
+    # Remove the shape elements of the axes to be ignored, but remember them.
+    if axis is not None:
+        axis[axis < 0] = n_dims + axis[axis < 0]
+        axis = np.sort(axis)
+        if axis[-1] >= n_dims or axis[0] < 0:
+            message = (f"`axis` is out of bounds "
+                       f"for array of dimension {n_dims}")
+            raise np.AxisError(message)
+
+        if len(np.unique(axis)) != len(axis):
+            raise np.AxisError("`axis` must contain only distinct elements")
+
+        removed_shapes = new_shapes[:, axis]
+        new_shapes = np.delete(new_shapes, axis, axis=1)
+
+    # If arrays are broadcastable, shape elements that are 1 may be replaced
+    # with a corresponding non-1 shape element. Assuming arrays are
+    # broadcastable, that final shape element can be found with:
+    new_shape = np.max(new_shapes, axis=0)
+    # except in case of an empty array:
+    new_shape *= new_shapes.all(axis=0)
+
+    # Among all arrays, there can only be one unique non-1 shape element.
+    # Therefore, if any non-1 shape element does not match what we found
+    # above, the arrays must not be broadcastable after all.
+    if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):
+        raise ValueError("Array shapes are incompatible for broadcasting.")
+
+    if axis is not None:
+        # Add back the shape elements that were ignored
+        new_axis = axis - np.arange(len(axis))
+        new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))
+                      for removed_shape in removed_shapes]
+        return new_shapes
+    else:
+        return tuple(new_shape)
+
+
+def _broadcast_array_shapes_remove_axis(arrays, axis=None):
+    """
+    Broadcast shapes of arrays, dropping specified axes
+
+    Given a sequence of arrays `arrays` and an integer or tuple `axis`, find
+    the shape of the broadcast result after consuming/dropping `axis`.
+    In other words, return output shape of a typical hypothesis test on
+    `arrays` vectorized along `axis`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.zeros((5, 2, 1))
+    >>> b = np.zeros((9, 3))
+    >>> _broadcast_array_shapes((a, b), 1)
+    (5, 3)
+    """
+    # Note that here, `axis=None` means do not consume/drop any axes - _not_
+    # ravel arrays before broadcasting.
+    shapes = [arr.shape for arr in arrays]
+    return _broadcast_shapes_remove_axis(shapes, axis)
+
+
+def _broadcast_shapes_remove_axis(shapes, axis=None):
+    """
+    Broadcast shapes, dropping specified axes
+
+    Same as _broadcast_array_shapes, but given a sequence
+    of array shapes `shapes` instead of the arrays themselves.
+    """
+    shapes = _broadcast_shapes(shapes, axis)
+    shape = shapes[0]
+    if axis is not None:
+        shape = np.delete(shape, axis)
+    return tuple(shape)
+
+
+def _broadcast_concatenate(arrays, axis):
+    """Concatenate arrays along an axis with broadcasting."""
+    arrays = _broadcast_arrays(arrays, axis)
+    res = np.concatenate(arrays, axis=axis)
+    return res
+
+
+# TODO: add support for `axis` tuples
+def _remove_nans(samples, paired):
+    "Remove nans from paired or unpaired 1D samples"
+    # potential optimization: don't copy arrays that don't contain nans
+    if not paired:
+        return [sample[~np.isnan(sample)] for sample in samples]
+
+    # for paired samples, we need to remove the whole pair when any part
+    # has a nan
+    nans = np.isnan(samples[0])
+    for sample in samples[1:]:
+        nans = nans | np.isnan(sample)
+    not_nans = ~nans
+    return [sample[not_nans] for sample in samples]
+
+
+def _remove_sentinel(samples, paired, sentinel):
+    "Remove sentinel values from paired or unpaired 1D samples"
+    # could consolidate with `_remove_nans`, but it's not quite as simple as
+    # passing `sentinel=np.nan` because `(np.nan == np.nan) is False`
+
+    # potential optimization: don't copy arrays that don't contain sentinel
+    if not paired:
+        return [sample[sample != sentinel] for sample in samples]
+
+    # for paired samples, we need to remove the whole pair when any part
+    # has a nan
+    sentinels = (samples[0] == sentinel)
+    for sample in samples[1:]:
+        sentinels = sentinels | (sample == sentinel)
+    not_sentinels = ~sentinels
+    return [sample[not_sentinels] for sample in samples]
+
+
+def _masked_arrays_2_sentinel_arrays(samples):
+    # masked arrays in `samples` are converted to regular arrays, and values
+    # corresponding with masked elements are replaced with a sentinel value
+
+    # return without modifying arrays if none have a mask
+    has_mask = False
+    for sample in samples:
+        mask = getattr(sample, 'mask', False)
+        has_mask = has_mask or np.any(mask)
+    if not has_mask:
+        return samples, None  # None means there is no sentinel value
+
+    # Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)
+    # values are always omitted, but there are different nan policies.
+    dtype = np.result_type(*samples)
+    dtype = dtype if np.issubdtype(dtype, np.number) else np.float64
+    for i in range(len(samples)):
+        # Things get more complicated if the arrays are of different types.
+        # We could have different sentinel values for each array, but
+        # the purpose of this code is convenience, not efficiency.
+        samples[i] = samples[i].astype(dtype, copy=False)
+
+    inexact = np.issubdtype(dtype, np.inexact)
+    info = np.finfo if inexact else np.iinfo
+    max_possible, min_possible = info(dtype).max, info(dtype).min
+    nextafter = np.nextafter if inexact else (lambda x, _: x - 1)
+
+    sentinel = max_possible
+    # For simplicity, min_possible/np.infs are not candidate sentinel values
+    while sentinel > min_possible:
+        for sample in samples:
+            if np.any(sample == sentinel):  # choose a new sentinel value
+                sentinel = nextafter(sentinel, -np.inf)
+                break
+        else:  # when sentinel value is OK, break the while loop
+            break
+    else:
+        message = ("This function replaces masked elements with sentinel "
+                   "values, but the data contains all distinct values of this "
+                   "data type. Consider promoting the dtype to `np.float64`.")
+        raise ValueError(message)
+
+    # replace masked elements with sentinel value
+    out_samples = []
+    for sample in samples:
+        mask = getattr(sample, 'mask', None)
+        if mask is not None:  # turn all masked arrays into sentinel arrays
+            mask = np.broadcast_to(mask, sample.shape)
+            sample = sample.data.copy() if np.any(mask) else sample.data
+            sample = np.asarray(sample)  # `sample.data` could be a memoryview?
+            sample[mask] = sentinel
+        out_samples.append(sample)
+
+    return out_samples, sentinel
+
+
+def _check_empty_inputs(samples, axis):
+    """
+    Check for empty sample; return appropriate output for a vectorized hypotest
+    """
+    # if none of the samples are empty, we need to perform the test
+    if not any((sample.size == 0 for sample in samples)):
+        return None
+    # otherwise, the statistic and p-value will be either empty arrays or
+    # arrays with NaNs. Produce the appropriate array and return it.
+    output_shape = _broadcast_array_shapes_remove_axis(samples, axis)
+    output = np.ones(output_shape) * np.nan
+    return output
+
+
+def _add_reduced_axes(res, reduced_axes, keepdims):
+    """
+    Add reduced axes back to all the arrays in the result object
+    if keepdims = True.
+    """
+    return ([np.expand_dims(output, reduced_axes) for output in res]
+            if keepdims else res)
+
+
+# Standard docstring / signature entries for `axis`, `nan_policy`, `keepdims`
+_name = 'axis'
+_desc = (
+    """If an int, the axis of the input along which to compute the statistic.
+The statistic of each axis-slice (e.g. row) of the input will appear in a
+corresponding element of the output.
+If ``None``, the input will be raveled before computing the statistic."""
+    .split('\n'))
+
+
+def _get_axis_params(default_axis=0, _name=_name, _desc=_desc):  # bind NOW
+    _type = f"int or None, default: {default_axis}"
+    _axis_parameter_doc = Parameter(_name, _type, _desc)
+    _axis_parameter = inspect.Parameter(_name,
+                                        inspect.Parameter.KEYWORD_ONLY,
+                                        default=default_axis)
+    return _axis_parameter_doc, _axis_parameter
+
+
+_name = 'nan_policy'
+_type = "{'propagate', 'omit', 'raise'}"
+_desc = (
+    """Defines how to handle input NaNs.
+
+- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
+  which the  statistic is computed, the corresponding entry of the output
+  will be NaN.
+- ``omit``: NaNs will be omitted when performing the calculation.
+  If insufficient data remains in the axis slice along which the
+  statistic is computed, the corresponding entry of the output will be
+  NaN.
+- ``raise``: if a NaN is present, a ``ValueError`` will be raised."""
+    .split('\n'))
+_nan_policy_parameter_doc = Parameter(_name, _type, _desc)
+_nan_policy_parameter = inspect.Parameter(_name,
+                                          inspect.Parameter.KEYWORD_ONLY,
+                                          default='propagate')
+
+_name = 'keepdims'
+_type = "bool, default: False"
+_desc = (
+    """If this is set to True, the axes which are reduced are left
+in the result as dimensions with size one. With this option,
+the result will broadcast correctly against the input array."""
+    .split('\n'))
+_keepdims_parameter_doc = Parameter(_name, _type, _desc)
+_keepdims_parameter = inspect.Parameter(_name,
+                                        inspect.Parameter.KEYWORD_ONLY,
+                                        default=False)
+
+_standard_note_addition = (
+    """\nBeginning in SciPy 1.9, ``np.matrix`` inputs (not recommended for new
+code) are converted to ``np.ndarray`` before the calculation is performed. In
+this case, the output will be a scalar or ``np.ndarray`` of appropriate shape
+rather than a 2D ``np.matrix``. Similarly, while masked elements of masked
+arrays are ignored, the output will be a scalar or ``np.ndarray`` rather than a
+masked array with ``mask=False``.""").split('\n')
+
+
+def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
+                             n_samples=1, paired=False,
+                             result_to_tuple=None, too_small=0,
+                             n_outputs=2, kwd_samples=[]):
+    """Factory for a wrapper that adds axis/nan_policy params to a function.
+
+    Parameters
+    ----------
+    tuple_to_result : callable
+        Callable that returns an object of the type returned by the function
+        being wrapped (e.g. the namedtuple or dataclass returned by a
+        statistical test) provided the separate components (e.g. statistic,
+        pvalue).
+    default_axis : int, default: 0
+        The default value of the axis argument. Standard is 0 except when
+        backwards compatibility demands otherwise (e.g. `None`).
+    n_samples : int or callable, default: 1
+        The number of data samples accepted by the function
+        (e.g. `mannwhitneyu`), a callable that accepts a dictionary of
+        parameters passed into the function and returns the number of data
+        samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number
+        of samples (e.g. `kruskal`).
+    paired : {False, True}
+        Whether the function being wrapped treats the samples as paired (i.e.
+        corresponding elements of each sample should be considered as different
+        components of the same sample.)
+    result_to_tuple : callable, optional
+        Function that unpacks the results of the function being wrapped into
+        a tuple. This is essentially the inverse of `tuple_to_result`. Default
+        is `None`, which is appropriate for statistical tests that return a
+        statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).
+    too_small : int, default: 0
+        The largest unnacceptably small sample for the function being wrapped.
+        For example, some functions require samples of size two or more or they
+        raise an error. This argument prevents the error from being raised when
+        input is not 1D and instead places a NaN in the corresponding element
+        of the result.
+    n_outputs : int or callable, default: 2
+        The number of outputs produced by the function given 1d sample(s). For
+        example, hypothesis tests that return a namedtuple or result object
+        with attributes ``statistic`` and ``pvalue`` use the default
+        ``n_outputs=2``; summary statistics with scalar output use
+        ``n_outputs=1``. Alternatively, may be a callable that accepts a
+        dictionary of arguments passed into the wrapped function and returns
+        the number of outputs corresponding with those arguments.
+    kwd_samples : sequence, default: []
+        The names of keyword parameters that should be treated as samples. For
+        example, `gmean` accepts as its first argument a sample `a` but
+        also `weights` as a fourth, optional keyword argument. In this case, we
+        use `n_samples=1` and kwd_samples=['weights'].
+    """
+
+    if result_to_tuple is None:
+        def result_to_tuple(res):
+            return res
+
+    def is_too_small(samples):
+        for sample in samples:
+            if len(sample) <= too_small:
+                return True
+        return False
+
+    def axis_nan_policy_decorator(hypotest_fun_in):
+        @wraps(hypotest_fun_in)
+        def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):
+
+            if _no_deco:  # for testing, decorator does nothing
+                return hypotest_fun_in(*args, **kwds)
+
+            # We need to be flexible about whether position or keyword
+            # arguments are used, but we need to make sure users don't pass
+            # both for the same parameter. To complicate matters, some
+            # functions accept samples with *args, and some functions already
+            # accept `axis` and `nan_policy` as positional arguments.
+            # The strategy is to make sure that there is no duplication
+            # between `args` and `kwds`, combine the two into `kwds`, then
+            # the samples, `nan_policy`, and `axis` from `kwds`, as they are
+            # dealt with separately.
+
+            # Check for intersection between positional and keyword args
+            params = list(inspect.signature(hypotest_fun_in).parameters)
+            if n_samples is None:
+                # Give unique names to each positional sample argument
+                # Note that *args can't be provided as a keyword argument
+                params = [f"arg{i}" for i in range(len(args))] + params[1:]
+
+            d_args = dict(zip(params, args))
+            intersection = set(d_args) & set(kwds)
+            if intersection:
+                message = (f"{hypotest_fun_in.__name__}() got multiple values "
+                           f"for argument '{list(intersection)[0]}'")
+                raise TypeError(message)
+
+            # Consolidate other positional and keyword args into `kwds`
+            kwds.update(d_args)
+
+            # rename avoids UnboundLocalError
+            if callable(n_samples):
+                # Future refactoring idea: no need for callable n_samples.
+                # Just replace `n_samples` and `kwd_samples` with a single
+                # list of the names of all samples, and treat all of them
+                # as `kwd_samples` are treated below.
+                n_samp = n_samples(kwds)
+            else:
+                n_samp = n_samples or len(args)
+
+            # get the number of outputs
+            n_out = n_outputs  # rename to avoid UnboundLocalError
+            if callable(n_out):
+                n_out = n_out(kwds)
+
+            # If necessary, rearrange function signature: accept other samples
+            # as positional args right after the first n_samp args
+            kwd_samp = [name for name in kwd_samples
+                        if kwds.get(name, None) is not None]
+            n_kwd_samp = len(kwd_samp)
+            if not kwd_samp:
+                hypotest_fun_out = hypotest_fun_in
+            else:
+                def hypotest_fun_out(*samples, **kwds):
+                    new_kwds = dict(zip(kwd_samp, samples[n_samp:]))
+                    kwds.update(new_kwds)
+                    return hypotest_fun_in(*samples[:n_samp], **kwds)
+
+            # Extract the things we need here
+            samples = [np.atleast_1d(kwds.pop(param))
+                       for param in (params[:n_samp] + kwd_samp)]
+            vectorized = True if 'axis' in params else False
+            axis = kwds.pop('axis', default_axis)
+            nan_policy = kwds.pop('nan_policy', 'propagate')
+            keepdims = kwds.pop("keepdims", False)
+            del args  # avoid the possibility of passing both `args` and `kwds`
+
+            # convert masked arrays to regular arrays with sentinel values
+            samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)
+
+            # standardize to always work along last axis
+            reduced_axes = axis
+            if axis is None:
+                if samples:
+                    # when axis=None, take the maximum of all dimensions since
+                    # all the dimensions are reduced.
+                    n_dims = np.max([sample.ndim for sample in samples])
+                    reduced_axes = tuple(range(n_dims))
+                samples = [np.asarray(sample.ravel()) for sample in samples]
+            else:
+                samples = _broadcast_arrays(samples, axis=axis)
+                axis = np.atleast_1d(axis)
+                n_axes = len(axis)
+                # move all axes in `axis` to the end to be raveled
+                samples = [np.moveaxis(sample, axis, range(-len(axis), 0))
+                           for sample in samples]
+                shapes = [sample.shape for sample in samples]
+                # New shape is unchanged for all axes _not_ in `axis`
+                # At the end, we append the product of the shapes of the axes
+                # in `axis`. Appending -1 doesn't work for zero-size arrays!
+                new_shapes = [shape[:-n_axes] + (np.prod(shape[-n_axes:]),)
+                              for shape in shapes]
+                samples = [sample.reshape(new_shape)
+                           for sample, new_shape in zip(samples, new_shapes)]
+            axis = -1  # work over the last axis
+
+            # if axis is not needed, just handle nan_policy and return
+            ndims = np.array([sample.ndim for sample in samples])
+            if np.all(ndims <= 1):
+                # Addresses nan_policy == "raise"
+                contains_nans = []
+                for sample in samples:
+                    contains_nan, _ = _contains_nan(sample, nan_policy)
+                    contains_nans.append(contains_nan)
+
+                # Addresses nan_policy == "propagate"
+                # Consider adding option to let function propagate nans, but
+                # currently the hypothesis tests this is applied to do not
+                # propagate nans in a sensible way
+                if any(contains_nans) and nan_policy == 'propagate':
+                    res = np.full(n_out, np.nan)
+                    res = _add_reduced_axes(res, reduced_axes, keepdims)
+                    return tuple_to_result(*res)
+
+                # Addresses nan_policy == "omit"
+                if any(contains_nans) and nan_policy == 'omit':
+                    # consider passing in contains_nans
+                    samples = _remove_nans(samples, paired)
+
+                # ideally, this is what the behavior would be:
+                # if is_too_small(samples):
+                #     return tuple_to_result(np.nan, np.nan)
+                # but some existing functions raise exceptions, and changing
+                # behavior of those would break backward compatibility.
+
+                if sentinel:
+                    samples = _remove_sentinel(samples, paired, sentinel)
+                res = hypotest_fun_out(*samples, **kwds)
+                res = result_to_tuple(res)
+                res = _add_reduced_axes(res, reduced_axes, keepdims)
+                return tuple_to_result(*res)
+
+            # check for empty input
+            # ideally, move this to the top, but some existing functions raise
+            # exceptions for empty input, so overriding it would break
+            # backward compatibility.
+            empty_output = _check_empty_inputs(samples, axis)
+            if empty_output is not None:
+                res = [empty_output.copy() for i in range(n_out)]
+                res = _add_reduced_axes(res, reduced_axes, keepdims)
+                return tuple_to_result(*res)
+
+            # otherwise, concatenate all samples along axis, remembering where
+            # each separate sample begins
+            lengths = np.array([sample.shape[axis] for sample in samples])
+            split_indices = np.cumsum(lengths)
+            x = _broadcast_concatenate(samples, axis)
+
+            # Addresses nan_policy == "raise"
+            contains_nan, _ = _contains_nan(x, nan_policy)
+
+            if vectorized and not contains_nan and not sentinel:
+                res = hypotest_fun_out(*samples, axis=axis, **kwds)
+                res = result_to_tuple(res)
+                res = _add_reduced_axes(res, reduced_axes, keepdims)
+                return tuple_to_result(*res)
+
+            # Addresses nan_policy == "omit"
+            if contains_nan and nan_policy == 'omit':
+                def hypotest_fun(x):
+                    samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
+                    samples = _remove_nans(samples, paired)
+                    if sentinel:
+                        samples = _remove_sentinel(samples, paired, sentinel)
+                    if is_too_small(samples):
+                        return np.full(n_out, np.nan)
+                    return result_to_tuple(hypotest_fun_out(*samples, **kwds))
+
+            # Addresses nan_policy == "propagate"
+            elif contains_nan and nan_policy == 'propagate':
+                def hypotest_fun(x):
+                    if np.isnan(x).any():
+                        return np.full(n_out, np.nan)
+
+                    samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
+                    if sentinel:
+                        samples = _remove_sentinel(samples, paired, sentinel)
+                    if is_too_small(samples):
+                        return np.full(n_out, np.nan)
+                    return result_to_tuple(hypotest_fun_out(*samples, **kwds))
+
+            else:
+                def hypotest_fun(x):
+                    samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
+                    if sentinel:
+                        samples = _remove_sentinel(samples, paired, sentinel)
+                    if is_too_small(samples):
+                        return np.full(n_out, np.nan)
+                    return result_to_tuple(hypotest_fun_out(*samples, **kwds))
+
+            x = np.moveaxis(x, axis, 0)
+            res = np.apply_along_axis(hypotest_fun, axis=0, arr=x)
+            res = _add_reduced_axes(res, reduced_axes, keepdims)
+            return tuple_to_result(*res)
+
+        _axis_parameter_doc, _axis_parameter = _get_axis_params(default_axis)
+        doc = FunctionDoc(axis_nan_policy_wrapper)
+        parameter_names = [param.name for param in doc['Parameters']]
+        if 'axis' in parameter_names:
+            doc['Parameters'][parameter_names.index('axis')] = (
+                _axis_parameter_doc)
+        else:
+            doc['Parameters'].append(_axis_parameter_doc)
+        if 'nan_policy' in parameter_names:
+            doc['Parameters'][parameter_names.index('nan_policy')] = (
+                _nan_policy_parameter_doc)
+        else:
+            doc['Parameters'].append(_nan_policy_parameter_doc)
+        if 'keepdims' in parameter_names:
+            doc['Parameters'][parameter_names.index('keepdims')] = (
+                _keepdims_parameter_doc)
+        else:
+            doc['Parameters'].append(_keepdims_parameter_doc)
+        doc['Notes'] += _standard_note_addition
+        doc = str(doc).split("\n", 1)[1]  # remove signature
+        axis_nan_policy_wrapper.__doc__ = str(doc)
+
+        sig = inspect.signature(axis_nan_policy_wrapper)
+        parameters = sig.parameters
+        parameter_list = list(parameters.values())
+        if 'axis' not in parameters:
+            parameter_list.append(_axis_parameter)
+        if 'nan_policy' not in parameters:
+            parameter_list.append(_nan_policy_parameter)
+        if 'keepdims' not in parameters:
+            parameter_list.append(_keepdims_parameter)
+        sig = sig.replace(parameters=parameter_list)
+        axis_nan_policy_wrapper.__signature__ = sig
+
+        return axis_nan_policy_wrapper
+    return axis_nan_policy_decorator
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_biasedurn.pxd b/__packaged__/coreml/.python_dependencies/scipy/stats/_biasedurn.pxd
new file mode 100644
index 00000000..92785f08
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_biasedurn.pxd
@@ -0,0 +1,27 @@
+# Declare the class with cdef
+cdef extern from "biasedurn/stocc.h" nogil:
+    cdef cppclass CFishersNCHypergeometric:
+        CFishersNCHypergeometric(int, int, int, double, double) except +
+        int mode()
+        double mean()
+        double variance()
+        double probability(int x)
+        double moments(double * mean, double * var)
+
+    cdef cppclass CWalleniusNCHypergeometric:
+        CWalleniusNCHypergeometric() except +
+        CWalleniusNCHypergeometric(int, int, int, double, double) except +
+        int mode()
+        double mean()
+        double variance()
+        double probability(int x)
+        double moments(double * mean, double * var)
+
+    cdef cppclass StochasticLib3:
+        StochasticLib3(int seed) except +
+        double Random() except +
+        void SetAccuracy(double accur)
+        int FishersNCHyp (int n, int m, int N, double odds) except +
+        int WalleniusNCHyp (int n, int m, int N, double odds) except +
+        double(*next_double)()
+        double(*next_normal)(const double m, const double s)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_binned_statistic.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_binned_statistic.py
new file mode 100644
index 00000000..9837fbcd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_binned_statistic.py
@@ -0,0 +1,795 @@
+import builtins
+import numpy as np
+from numpy.testing import suppress_warnings
+from operator import index
+from collections import namedtuple
+
+__all__ = ['binned_statistic',
+           'binned_statistic_2d',
+           'binned_statistic_dd']
+
+
+BinnedStatisticResult = namedtuple('BinnedStatisticResult',
+                                   ('statistic', 'bin_edges', 'binnumber'))
+
+
+def binned_statistic(x, values, statistic='mean',
+                     bins=10, range=None):
+    """
+    Compute a binned statistic for one or more sets of data.
+
+    This is a generalization of a histogram function.  A histogram divides
+    the space into bins, and returns the count of the number of points in
+    each bin.  This function allows the computation of the sum, mean, median,
+    or other statistic of the values (or set of values) within each bin.
+
+    Parameters
+    ----------
+    x : (N,) array_like
+        A sequence of values to be binned.
+    values : (N,) array_like or list of (N,) array_like
+        The data on which the statistic will be computed.  This must be
+        the same shape as `x`, or a set of sequences - each the same shape as
+        `x`.  If `values` is a set of sequences, the statistic will be computed
+        on each independently.
+    statistic : string or callable, optional
+        The statistic to compute (default is 'mean').
+        The following statistics are available:
+
+          * 'mean' : compute the mean of values for points within each bin.
+            Empty bins will be represented by NaN.
+          * 'std' : compute the standard deviation within each bin. This
+            is implicitly calculated with ddof=0.
+          * 'median' : compute the median of values for points within each
+            bin. Empty bins will be represented by NaN.
+          * 'count' : compute the count of points within each bin.  This is
+            identical to an unweighted histogram.  `values` array is not
+            referenced.
+          * 'sum' : compute the sum of values for points within each bin.
+            This is identical to a weighted histogram.
+          * 'min' : compute the minimum of values for points within each bin.
+            Empty bins will be represented by NaN.
+          * 'max' : compute the maximum of values for point within each bin.
+            Empty bins will be represented by NaN.
+          * function : a user-defined function which takes a 1D array of
+            values, and outputs a single numerical statistic. This function
+            will be called on the values in each bin.  Empty bins will be
+            represented by function([]), or NaN if this returns an error.
+
+    bins : int or sequence of scalars, optional
+        If `bins` is an int, it defines the number of equal-width bins in the
+        given range (10 by default).  If `bins` is a sequence, it defines the
+        bin edges, including the rightmost edge, allowing for non-uniform bin
+        widths.  Values in `x` that are smaller than lowest bin edge are
+        assigned to bin number 0, values beyond the highest bin are assigned to
+        ``bins[-1]``.  If the bin edges are specified, the number of bins will
+        be, (nx = len(bins)-1).
+    range : (float, float) or [(float, float)], optional
+        The lower and upper range of the bins.  If not provided, range
+        is simply ``(x.min(), x.max())``.  Values outside the range are
+        ignored.
+
+    Returns
+    -------
+    statistic : array
+        The values of the selected statistic in each bin.
+    bin_edges : array of dtype float
+        Return the bin edges ``(length(statistic)+1)``.
+    binnumber: 1-D ndarray of ints
+        Indices of the bins (corresponding to `bin_edges`) in which each value
+        of `x` belongs.  Same length as `values`.  A binnumber of `i` means the
+        corresponding value is between (bin_edges[i-1], bin_edges[i]).
+
+    See Also
+    --------
+    numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
+
+    Notes
+    -----
+    All but the last (righthand-most) bin is half-open.  In other words, if
+    `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
+    but excluding 2) and the second ``[2, 3)``.  The last bin, however, is
+    ``[3, 4]``, which *includes* 4.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    First some basic examples:
+
+    Create two evenly spaced bins in the range of the given sample, and sum the
+    corresponding values in each of those bins:
+
+    >>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
+    >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
+    BinnedStatisticResult(statistic=array([4. , 4.5]),
+            bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
+
+    Multiple arrays of values can also be passed.  The statistic is calculated
+    on each set independently:
+
+    >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
+    >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
+    BinnedStatisticResult(statistic=array([[4. , 4.5],
+           [8. , 9. ]]), bin_edges=array([1., 4., 7.]),
+           binnumber=array([1, 1, 1, 2, 2]))
+
+    >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
+    ...                        bins=3)
+    BinnedStatisticResult(statistic=array([1., 2., 4.]),
+            bin_edges=array([1., 2., 3., 4.]),
+            binnumber=array([1, 2, 1, 2, 3]))
+
+    As a second example, we now generate some random data of sailing boat speed
+    as a function of wind speed, and then determine how fast our boat is for
+    certain wind speeds:
+
+    >>> rng = np.random.default_rng()
+    >>> windspeed = 8 * rng.random(500)
+    >>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
+    >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
+    ...                 boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
+    >>> plt.figure()
+    >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
+    >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
+    ...            label='binned statistic of data')
+    >>> plt.legend()
+
+    Now we can use ``binnumber`` to select all datapoints with a windspeed
+    below 1:
+
+    >>> low_boatspeed = boatspeed[binnumber == 0]
+
+    As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
+    plot of a distribution that shows the mean and distribution around that
+    mean per bin, on top of a regular histogram and the probability
+    distribution function:
+
+    >>> x = np.linspace(0, 5, num=500)
+    >>> x_pdf = stats.maxwell.pdf(x)
+    >>> samples = stats.maxwell.rvs(size=10000)
+
+    >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
+    ...         statistic='mean', bins=25)
+    >>> bin_width = (bin_edges[1] - bin_edges[0])
+    >>> bin_centers = bin_edges[1:] - bin_width/2
+
+    >>> plt.figure()
+    >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
+    ...          alpha=0.2, label='histogram of data')
+    >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
+    >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
+    ...            label='binned statistic of data')
+    >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
+    >>> plt.legend(fontsize=10)
+    >>> plt.show()
+
+    """
+    try:
+        N = len(bins)
+    except TypeError:
+        N = 1
+
+    if N != 1:
+        bins = [np.asarray(bins, float)]
+
+    if range is not None:
+        if len(range) == 2:
+            range = [range]
+
+    medians, edges, binnumbers = binned_statistic_dd(
+        [x], values, statistic, bins, range)
+
+    return BinnedStatisticResult(medians, edges[0], binnumbers)
+
+
+BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
+                                     ('statistic', 'x_edge', 'y_edge',
+                                      'binnumber'))
+
+
+def binned_statistic_2d(x, y, values, statistic='mean',
+                        bins=10, range=None, expand_binnumbers=False):
+    """
+    Compute a bidimensional binned statistic for one or more sets of data.
+
+    This is a generalization of a histogram2d function.  A histogram divides
+    the space into bins, and returns the count of the number of points in
+    each bin.  This function allows the computation of the sum, mean, median,
+    or other statistic of the values (or set of values) within each bin.
+
+    Parameters
+    ----------
+    x : (N,) array_like
+        A sequence of values to be binned along the first dimension.
+    y : (N,) array_like
+        A sequence of values to be binned along the second dimension.
+    values : (N,) array_like or list of (N,) array_like
+        The data on which the statistic will be computed.  This must be
+        the same shape as `x`, or a list of sequences - each with the same
+        shape as `x`.  If `values` is such a list, the statistic will be
+        computed on each independently.
+    statistic : string or callable, optional
+        The statistic to compute (default is 'mean').
+        The following statistics are available:
+
+          * 'mean' : compute the mean of values for points within each bin.
+            Empty bins will be represented by NaN.
+          * 'std' : compute the standard deviation within each bin. This
+            is implicitly calculated with ddof=0.
+          * 'median' : compute the median of values for points within each
+            bin. Empty bins will be represented by NaN.
+          * 'count' : compute the count of points within each bin.  This is
+            identical to an unweighted histogram.  `values` array is not
+            referenced.
+          * 'sum' : compute the sum of values for points within each bin.
+            This is identical to a weighted histogram.
+          * 'min' : compute the minimum of values for points within each bin.
+            Empty bins will be represented by NaN.
+          * 'max' : compute the maximum of values for point within each bin.
+            Empty bins will be represented by NaN.
+          * function : a user-defined function which takes a 1D array of
+            values, and outputs a single numerical statistic. This function
+            will be called on the values in each bin.  Empty bins will be
+            represented by function([]), or NaN if this returns an error.
+
+    bins : int or [int, int] or array_like or [array, array], optional
+        The bin specification:
+
+          * the number of bins for the two dimensions (nx = ny = bins),
+          * the number of bins in each dimension (nx, ny = bins),
+          * the bin edges for the two dimensions (x_edge = y_edge = bins),
+          * the bin edges in each dimension (x_edge, y_edge = bins).
+
+        If the bin edges are specified, the number of bins will be,
+        (nx = len(x_edge)-1, ny = len(y_edge)-1).
+
+    range : (2,2) array_like, optional
+        The leftmost and rightmost edges of the bins along each dimension
+        (if not specified explicitly in the `bins` parameters):
+        [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
+        considered outliers and not tallied in the histogram.
+    expand_binnumbers : bool, optional
+        'False' (default): the returned `binnumber` is a shape (N,) array of
+        linearized bin indices.
+        'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
+        ndarray, where each row gives the bin numbers in the corresponding
+        dimension.
+        See the `binnumber` returned value, and the `Examples` section.
+
+        .. versionadded:: 0.17.0
+
+    Returns
+    -------
+    statistic : (nx, ny) ndarray
+        The values of the selected statistic in each two-dimensional bin.
+    x_edge : (nx + 1) ndarray
+        The bin edges along the first dimension.
+    y_edge : (ny + 1) ndarray
+        The bin edges along the second dimension.
+    binnumber : (N,) array of ints or (2,N) ndarray of ints
+        This assigns to each element of `sample` an integer that represents the
+        bin in which this observation falls.  The representation depends on the
+        `expand_binnumbers` argument.  See `Notes` for details.
+
+
+    See Also
+    --------
+    numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
+
+    Notes
+    -----
+    Binedges:
+    All but the last (righthand-most) bin is half-open.  In other words, if
+    `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
+    but excluding 2) and the second ``[2, 3)``.  The last bin, however, is
+    ``[3, 4]``, which *includes* 4.
+
+    `binnumber`:
+    This returned argument assigns to each element of `sample` an integer that
+    represents the bin in which it belongs.  The representation depends on the
+    `expand_binnumbers` argument. If 'False' (default): The returned
+    `binnumber` is a shape (N,) array of linearized indices mapping each
+    element of `sample` to its corresponding bin (using row-major ordering).
+    Note that the returned linearized bin indices are used for an array with
+    extra bins on the outer binedges to capture values outside of the defined
+    bin bounds.
+    If 'True': The returned `binnumber` is a shape (2,N) ndarray where
+    each row indicates bin placements for each dimension respectively.  In each
+    dimension, a binnumber of `i` means the corresponding value is between
+    (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> from scipy import stats
+
+    Calculate the counts with explicit bin-edges:
+
+    >>> x = [0.1, 0.1, 0.1, 0.6]
+    >>> y = [2.1, 2.6, 2.1, 2.1]
+    >>> binx = [0.0, 0.5, 1.0]
+    >>> biny = [2.0, 2.5, 3.0]
+    >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
+    >>> ret.statistic
+    array([[2., 1.],
+           [1., 0.]])
+
+    The bin in which each sample is placed is given by the `binnumber`
+    returned parameter.  By default, these are the linearized bin indices:
+
+    >>> ret.binnumber
+    array([5, 6, 5, 9])
+
+    The bin indices can also be expanded into separate entries for each
+    dimension using the `expand_binnumbers` parameter:
+
+    >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
+    ...                                 expand_binnumbers=True)
+    >>> ret.binnumber
+    array([[1, 1, 1, 2],
+           [1, 2, 1, 1]])
+
+    Which shows that the first three elements belong in the xbin 1, and the
+    fourth into xbin 2; and so on for y.
+
+    """
+
+    # This code is based on np.histogram2d
+    try:
+        N = len(bins)
+    except TypeError:
+        N = 1
+
+    if N != 1 and N != 2:
+        xedges = yedges = np.asarray(bins, float)
+        bins = [xedges, yedges]
+
+    medians, edges, binnumbers = binned_statistic_dd(
+        [x, y], values, statistic, bins, range,
+        expand_binnumbers=expand_binnumbers)
+
+    return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
+
+
+BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
+                                     ('statistic', 'bin_edges',
+                                      'binnumber'))
+
+
+def _bincount(x, weights):
+    if np.iscomplexobj(weights):
+        a = np.bincount(x, np.real(weights))
+        b = np.bincount(x, np.imag(weights))
+        z = a + b*1j
+
+    else:
+        z = np.bincount(x, weights)
+    return z
+
+
+def binned_statistic_dd(sample, values, statistic='mean',
+                        bins=10, range=None, expand_binnumbers=False,
+                        binned_statistic_result=None):
+    """
+    Compute a multidimensional binned statistic for a set of data.
+
+    This is a generalization of a histogramdd function.  A histogram divides
+    the space into bins, and returns the count of the number of points in
+    each bin.  This function allows the computation of the sum, mean, median,
+    or other statistic of the values within each bin.
+
+    Parameters
+    ----------
+    sample : array_like
+        Data to histogram passed as a sequence of N arrays of length D, or
+        as an (N,D) array.
+    values : (N,) array_like or list of (N,) array_like
+        The data on which the statistic will be computed.  This must be
+        the same shape as `sample`, or a list of sequences - each with the
+        same shape as `sample`.  If `values` is such a list, the statistic
+        will be computed on each independently.
+    statistic : string or callable, optional
+        The statistic to compute (default is 'mean').
+        The following statistics are available:
+
+          * 'mean' : compute the mean of values for points within each bin.
+            Empty bins will be represented by NaN.
+          * 'median' : compute the median of values for points within each
+            bin. Empty bins will be represented by NaN.
+          * 'count' : compute the count of points within each bin.  This is
+            identical to an unweighted histogram.  `values` array is not
+            referenced.
+          * 'sum' : compute the sum of values for points within each bin.
+            This is identical to a weighted histogram.
+          * 'std' : compute the standard deviation within each bin. This
+            is implicitly calculated with ddof=0. If the number of values
+            within a given bin is 0 or 1, the computed standard deviation value
+            will be 0 for the bin.
+          * 'min' : compute the minimum of values for points within each bin.
+            Empty bins will be represented by NaN.
+          * 'max' : compute the maximum of values for point within each bin.
+            Empty bins will be represented by NaN.
+          * function : a user-defined function which takes a 1D array of
+            values, and outputs a single numerical statistic. This function
+            will be called on the values in each bin.  Empty bins will be
+            represented by function([]), or NaN if this returns an error.
+
+    bins : sequence or positive int, optional
+        The bin specification must be in one of the following forms:
+
+          * A sequence of arrays describing the bin edges along each dimension.
+          * The number of bins for each dimension (nx, ny, ... = bins).
+          * The number of bins for all dimensions (nx = ny = ... = bins).
+    range : sequence, optional
+        A sequence of lower and upper bin edges to be used if the edges are
+        not given explicitly in `bins`. Defaults to the minimum and maximum
+        values along each dimension.
+    expand_binnumbers : bool, optional
+        'False' (default): the returned `binnumber` is a shape (N,) array of
+        linearized bin indices.
+        'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
+        ndarray, where each row gives the bin numbers in the corresponding
+        dimension.
+        See the `binnumber` returned value, and the `Examples` section of
+        `binned_statistic_2d`.
+    binned_statistic_result : binnedStatisticddResult
+        Result of a previous call to the function in order to reuse bin edges
+        and bin numbers with new values and/or a different statistic.
+        To reuse bin numbers, `expand_binnumbers` must have been set to False
+        (the default)
+
+        .. versionadded:: 0.17.0
+
+    Returns
+    -------
+    statistic : ndarray, shape(nx1, nx2, nx3,...)
+        The values of the selected statistic in each two-dimensional bin.
+    bin_edges : list of ndarrays
+        A list of D arrays describing the (nxi + 1) bin edges for each
+        dimension.
+    binnumber : (N,) array of ints or (D,N) ndarray of ints
+        This assigns to each element of `sample` an integer that represents the
+        bin in which this observation falls.  The representation depends on the
+        `expand_binnumbers` argument.  See `Notes` for details.
+
+
+    See Also
+    --------
+    numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
+
+    Notes
+    -----
+    Binedges:
+    All but the last (righthand-most) bin is half-open in each dimension.  In
+    other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
+    ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``.  The
+    last bin, however, is ``[3, 4]``, which *includes* 4.
+
+    `binnumber`:
+    This returned argument assigns to each element of `sample` an integer that
+    represents the bin in which it belongs.  The representation depends on the
+    `expand_binnumbers` argument. If 'False' (default): The returned
+    `binnumber` is a shape (N,) array of linearized indices mapping each
+    element of `sample` to its corresponding bin (using row-major ordering).
+    If 'True': The returned `binnumber` is a shape (D,N) ndarray where
+    each row indicates bin placements for each dimension respectively.  In each
+    dimension, a binnumber of `i` means the corresponding value is between
+    (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
+
+    .. versionadded:: 0.11.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+    >>> from mpl_toolkits.mplot3d import Axes3D
+
+    Take an array of 600 (x, y) coordinates as an example.
+    `binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
+    of dimension `D+1` is required.
+
+    >>> mu = np.array([0., 1.])
+    >>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
+    >>> multinormal = stats.multivariate_normal(mu, sigma)
+    >>> data = multinormal.rvs(size=600, random_state=235412)
+    >>> data.shape
+    (600, 2)
+
+    Create bins and count how many arrays fall in each bin:
+
+    >>> N = 60
+    >>> x = np.linspace(-3, 3, N)
+    >>> y = np.linspace(-3, 4, N)
+    >>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
+    ...                                 statistic='count')
+    >>> bincounts = ret.statistic
+
+    Set the volume and the location of bars:
+
+    >>> dx = x[1] - x[0]
+    >>> dy = y[1] - y[0]
+    >>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
+    >>> z = 0
+
+    >>> bincounts = bincounts.ravel()
+    >>> x = x.ravel()
+    >>> y = y.ravel()
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111, projection='3d')
+    >>> with np.errstate(divide='ignore'):   # silence random axes3d warning
+    ...     ax.bar3d(x, y, z, dx, dy, bincounts)
+
+    Reuse bin numbers and bin edges with new values:
+
+    >>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
+    ...                                  binned_statistic_result=ret,
+    ...                                  statistic='mean')
+    """
+    known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
+    if not callable(statistic) and statistic not in known_stats:
+        raise ValueError('invalid statistic %r' % (statistic,))
+
+    try:
+        bins = index(bins)
+    except TypeError:
+        # bins is not an integer
+        pass
+    # If bins was an integer-like object, now it is an actual Python int.
+
+    # NOTE: for _bin_edges(), see e.g. gh-11365
+    if isinstance(bins, int) and not np.isfinite(sample).all():
+        raise ValueError('%r contains non-finite values.' % (sample,))
+
+    # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
+    # `Dlen` is the length of elements along each dimension.
+    # This code is based on np.histogramdd
+    try:
+        # `sample` is an ND-array.
+        Dlen, Ndim = sample.shape
+    except (AttributeError, ValueError):
+        # `sample` is a sequence of 1D arrays.
+        sample = np.atleast_2d(sample).T
+        Dlen, Ndim = sample.shape
+
+    # Store initial shape of `values` to preserve it in the output
+    values = np.asarray(values)
+    input_shape = list(values.shape)
+    # Make sure that `values` is 2D to iterate over rows
+    values = np.atleast_2d(values)
+    Vdim, Vlen = values.shape
+
+    # Make sure `values` match `sample`
+    if statistic != 'count' and Vlen != Dlen:
+        raise AttributeError('The number of `values` elements must match the '
+                             'length of each `sample` dimension.')
+
+    try:
+        M = len(bins)
+        if M != Ndim:
+            raise AttributeError('The dimension of bins must be equal '
+                                 'to the dimension of the sample x.')
+    except TypeError:
+        bins = Ndim * [bins]
+
+    if binned_statistic_result is None:
+        nbin, edges, dedges = _bin_edges(sample, bins, range)
+        binnumbers = _bin_numbers(sample, nbin, edges, dedges)
+    else:
+        edges = binned_statistic_result.bin_edges
+        nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
+        # +1 for outlier bins
+        dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
+        binnumbers = binned_statistic_result.binnumber
+
+    # Avoid overflow with double precision. Complex `values` -> `complex128`.
+    result_type = np.result_type(values, np.float64)
+    result = np.empty([Vdim, nbin.prod()], dtype=result_type)
+
+    if statistic in {'mean', np.mean}:
+        result.fill(np.nan)
+        flatcount = _bincount(binnumbers, None)
+        a = flatcount.nonzero()
+        for vv in builtins.range(Vdim):
+            flatsum = _bincount(binnumbers, values[vv])
+            result[vv, a] = flatsum[a] / flatcount[a]
+    elif statistic in {'std', np.std}:
+        result.fill(np.nan)
+        flatcount = _bincount(binnumbers, None)
+        a = flatcount.nonzero()
+        for vv in builtins.range(Vdim):
+            flatsum = _bincount(binnumbers, values[vv])
+            delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers]
+            std = np.sqrt(
+                _bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a]
+            )
+            result[vv, a] = std
+        result = np.real(result)
+    elif statistic == 'count':
+        result = np.empty([Vdim, nbin.prod()], dtype=np.float64)
+        result.fill(0)
+        flatcount = _bincount(binnumbers, None)
+        a = np.arange(len(flatcount))
+        result[:, a] = flatcount[np.newaxis, :]
+    elif statistic in {'sum', np.sum}:
+        result.fill(0)
+        for vv in builtins.range(Vdim):
+            flatsum = _bincount(binnumbers, values[vv])
+            a = np.arange(len(flatsum))
+            result[vv, a] = flatsum
+    elif statistic in {'median', np.median}:
+        result.fill(np.nan)
+        for vv in builtins.range(Vdim):
+            i = np.lexsort((values[vv], binnumbers))
+            _, j, counts = np.unique(binnumbers[i],
+                                     return_index=True, return_counts=True)
+            mid = j + (counts - 1) / 2
+            mid_a = values[vv, i][np.floor(mid).astype(int)]
+            mid_b = values[vv, i][np.ceil(mid).astype(int)]
+            medians = (mid_a + mid_b) / 2
+            result[vv, binnumbers[i][j]] = medians
+    elif statistic in {'min', np.min}:
+        result.fill(np.nan)
+        for vv in builtins.range(Vdim):
+            i = np.argsort(values[vv])[::-1]  # Reversed so the min is last
+            result[vv, binnumbers[i]] = values[vv, i]
+    elif statistic in {'max', np.max}:
+        result.fill(np.nan)
+        for vv in builtins.range(Vdim):
+            i = np.argsort(values[vv])
+            result[vv, binnumbers[i]] = values[vv, i]
+    elif callable(statistic):
+        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            try:
+                null = statistic([])
+            except Exception:
+                null = np.nan
+        if np.iscomplexobj(null):
+            result = result.astype(np.complex128)
+        result.fill(null)
+        try:
+            _calc_binned_statistic(
+                Vdim, binnumbers, result, values, statistic
+            )
+        except ValueError:
+            result = result.astype(np.complex128)
+            _calc_binned_statistic(
+                Vdim, binnumbers, result, values, statistic
+            )
+
+    # Shape into a proper matrix
+    result = result.reshape(np.append(Vdim, nbin))
+
+    # Remove outliers (indices 0 and -1 for each bin-dimension).
+    core = tuple([slice(None)] + Ndim * [slice(1, -1)])
+    result = result[core]
+
+    # Unravel binnumbers into an ndarray, each row the bins for each dimension
+    if expand_binnumbers and Ndim > 1:
+        binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
+
+    if np.any(result.shape[1:] != nbin - 2):
+        raise RuntimeError('Internal Shape Error')
+
+    # Reshape to have output (`result`) match input (`values`) shape
+    result = result.reshape(input_shape[:-1] + list(nbin-2))
+
+    return BinnedStatisticddResult(result, edges, binnumbers)
+
+
+def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func):
+    unique_bin_numbers = np.unique(bin_numbers)
+    for vv in builtins.range(Vdim):
+        bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
+                                      values, vv)
+        for i in unique_bin_numbers:
+            stat = stat_func(np.array(bin_map[i]))
+            if np.iscomplexobj(stat) and not np.iscomplexobj(result):
+                raise ValueError("The statistic function returns complex ")
+            result[vv, i] = stat
+
+
+def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
+    """ Create hashmap of bin ids to values in bins
+    key: bin number
+    value: list of binned data
+    """
+    bin_map = dict()
+    for i in unique_bin_numbers:
+        bin_map[i] = []
+    for i in builtins.range(len(bin_numbers)):
+        bin_map[bin_numbers[i]].append(values[vv, i])
+    return bin_map
+
+
+def _bin_edges(sample, bins=None, range=None):
+    """ Create edge arrays
+    """
+    Dlen, Ndim = sample.shape
+
+    nbin = np.empty(Ndim, int)    # Number of bins in each dimension
+    edges = Ndim * [None]         # Bin edges for each dim (will be 2D array)
+    dedges = Ndim * [None]        # Spacing between edges (will be 2D array)
+
+    # Select range for each dimension
+    # Used only if number of bins is given.
+    if range is None:
+        smin = np.atleast_1d(np.array(sample.min(axis=0), float))
+        smax = np.atleast_1d(np.array(sample.max(axis=0), float))
+    else:
+        if len(range) != Ndim:
+            raise ValueError(
+                f"range given for {len(range)} dimensions; {Ndim} required")
+        smin = np.empty(Ndim)
+        smax = np.empty(Ndim)
+        for i in builtins.range(Ndim):
+            if range[i][1] < range[i][0]:
+                raise ValueError(
+                    "In {}range, start must be <= stop".format(
+                        f"dimension {i + 1} of " if Ndim > 1 else ""))
+            smin[i], smax[i] = range[i]
+
+    # Make sure the bins have a finite width.
+    for i in builtins.range(len(smin)):
+        if smin[i] == smax[i]:
+            smin[i] = smin[i] - .5
+            smax[i] = smax[i] + .5
+
+    # Preserve sample floating point precision in bin edges
+    edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
+                   else float)
+
+    # Create edge arrays
+    for i in builtins.range(Ndim):
+        if np.isscalar(bins[i]):
+            nbin[i] = bins[i] + 2  # +2 for outlier bins
+            edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
+                                   dtype=edges_dtype)
+        else:
+            edges[i] = np.asarray(bins[i], edges_dtype)
+            nbin[i] = len(edges[i]) + 1  # +1 for outlier bins
+        dedges[i] = np.diff(edges[i])
+
+    nbin = np.asarray(nbin)
+
+    return nbin, edges, dedges
+
+
+def _bin_numbers(sample, nbin, edges, dedges):
+    """Compute the bin number each sample falls into, in each dimension
+    """
+    Dlen, Ndim = sample.shape
+
+    sampBin = [
+        np.digitize(sample[:, i], edges[i])
+        for i in range(Ndim)
+    ]
+
+    # Using `digitize`, values that fall on an edge are put in the right bin.
+    # For the rightmost bin, we want values equal to the right
+    # edge to be counted in the last bin, and not as an outlier.
+    for i in range(Ndim):
+        # Find the rounding precision
+        dedges_min = dedges[i].min()
+        if dedges_min == 0:
+            raise ValueError('The smallest edge difference is numerically 0.')
+        decimal = int(-np.log10(dedges_min)) + 6
+        # Find which points are on the rightmost edge.
+        on_edge = np.where((sample[:, i] >= edges[i][-1]) &
+                           (np.around(sample[:, i], decimal) ==
+                            np.around(edges[i][-1], decimal)))[0]
+        # Shift these points one bin to the left.
+        sampBin[i][on_edge] -= 1
+
+    # Compute the sample indices in the flattened statistic matrix.
+    binnumbers = np.ravel_multi_index(sampBin, nbin)
+
+    return binnumbers
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_binomtest.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_binomtest.py
new file mode 100644
index 00000000..cc9106a4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_binomtest.py
@@ -0,0 +1,375 @@
+from math import sqrt
+import numpy as np
+from scipy._lib._util import _validate_int
+from scipy.optimize import brentq
+from scipy.special import ndtri
+from ._discrete_distns import binom
+from ._common import ConfidenceInterval
+
+
+class BinomTestResult:
+    """
+    Result of `scipy.stats.binomtest`.
+
+    Attributes
+    ----------
+    k : int
+        The number of successes (copied from `binomtest` input).
+    n : int
+        The number of trials (copied from `binomtest` input).
+    alternative : str
+        Indicates the alternative hypothesis specified in the input
+        to `binomtest`.  It will be one of ``'two-sided'``, ``'greater'``,
+        or ``'less'``.
+    statistic: float
+        The estimate of the proportion of successes.
+    pvalue : float
+        The p-value of the hypothesis test.
+
+    """
+    def __init__(self, k, n, alternative, statistic, pvalue):
+        self.k = k
+        self.n = n
+        self.alternative = alternative
+        self.statistic = statistic
+        self.pvalue = pvalue
+
+        # add alias for backward compatibility
+        self.proportion_estimate = statistic
+
+    def __repr__(self):
+        s = ("BinomTestResult("
+             f"k={self.k}, "
+             f"n={self.n}, "
+             f"alternative={self.alternative!r}, "
+             f"statistic={self.statistic}, "
+             f"pvalue={self.pvalue})")
+        return s
+
+    def proportion_ci(self, confidence_level=0.95, method='exact'):
+        """
+        Compute the confidence interval for ``statistic``.
+
+        Parameters
+        ----------
+        confidence_level : float, optional
+            Confidence level for the computed confidence interval
+            of the estimated proportion. Default is 0.95.
+        method : {'exact', 'wilson', 'wilsoncc'}, optional
+            Selects the method used to compute the confidence interval
+            for the estimate of the proportion:
+
+            'exact' :
+                Use the Clopper-Pearson exact method [1]_.
+            'wilson' :
+                Wilson's method, without continuity correction ([2]_, [3]_).
+            'wilsoncc' :
+                Wilson's method, with continuity correction ([2]_, [3]_).
+
+            Default is ``'exact'``.
+
+        Returns
+        -------
+        ci : ``ConfidenceInterval`` object
+            The object has attributes ``low`` and ``high`` that hold the
+            lower and upper bounds of the confidence interval.
+
+        References
+        ----------
+        .. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
+               fiducial limits illustrated in the case of the binomial,
+               Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
+        .. [2] E. B. Wilson, Probable inference, the law of succession, and
+               statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
+               (1927).
+        .. [3] Robert G. Newcombe, Two-sided confidence intervals for the
+               single proportion: comparison of seven methods, Statistics
+               in Medicine, 17, pp 857-872 (1998).
+
+        Examples
+        --------
+        >>> from scipy.stats import binomtest
+        >>> result = binomtest(k=7, n=50, p=0.1)
+        >>> result.statistic
+        0.14
+        >>> result.proportion_ci()
+        ConfidenceInterval(low=0.05819170033997342, high=0.26739600249700846)
+        """
+        if method not in ('exact', 'wilson', 'wilsoncc'):
+            raise ValueError("method must be one of 'exact', 'wilson' or "
+                             "'wilsoncc'.")
+        if not (0 <= confidence_level <= 1):
+            raise ValueError('confidence_level must be in the interval '
+                             '[0, 1].')
+        if method == 'exact':
+            low, high = _binom_exact_conf_int(self.k, self.n,
+                                              confidence_level,
+                                              self.alternative)
+        else:
+            # method is 'wilson' or 'wilsoncc'
+            low, high = _binom_wilson_conf_int(self.k, self.n,
+                                               confidence_level,
+                                               self.alternative,
+                                               correction=method == 'wilsoncc')
+        return ConfidenceInterval(low=low, high=high)
+
+
+def _findp(func):
+    try:
+        p = brentq(func, 0, 1)
+    except RuntimeError:
+        raise RuntimeError('numerical solver failed to converge when '
+                           'computing the confidence limits') from None
+    except ValueError as exc:
+        raise ValueError('brentq raised a ValueError; report this to the '
+                         'SciPy developers') from exc
+    return p
+
+
+def _binom_exact_conf_int(k, n, confidence_level, alternative):
+    """
+    Compute the estimate and confidence interval for the binomial test.
+
+    Returns proportion, prop_low, prop_high
+    """
+    if alternative == 'two-sided':
+        alpha = (1 - confidence_level) / 2
+        if k == 0:
+            plow = 0.0
+        else:
+            plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
+        if k == n:
+            phigh = 1.0
+        else:
+            phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
+    elif alternative == 'less':
+        alpha = 1 - confidence_level
+        plow = 0.0
+        if k == n:
+            phigh = 1.0
+        else:
+            phigh = _findp(lambda p: binom.cdf(k, n, p) - alpha)
+    elif alternative == 'greater':
+        alpha = 1 - confidence_level
+        if k == 0:
+            plow = 0.0
+        else:
+            plow = _findp(lambda p: binom.sf(k-1, n, p) - alpha)
+        phigh = 1.0
+    return plow, phigh
+
+
+def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
+    # This function assumes that the arguments have already been validated.
+    # In particular, `alternative` must be one of 'two-sided', 'less' or
+    # 'greater'.
+    p = k / n
+    if alternative == 'two-sided':
+        z = ndtri(0.5 + 0.5*confidence_level)
+    else:
+        z = ndtri(confidence_level)
+
+    # For reference, the formulas implemented here are from
+    # Newcombe (1998) (ref. [3] in the proportion_ci docstring).
+    denom = 2*(n + z**2)
+    center = (2*n*p + z**2)/denom
+    q = 1 - p
+    if correction:
+        if alternative == 'less' or k == 0:
+            lo = 0.0
+        else:
+            dlo = (1 + z*sqrt(z**2 - 2 - 1/n + 4*p*(n*q + 1))) / denom
+            lo = center - dlo
+        if alternative == 'greater' or k == n:
+            hi = 1.0
+        else:
+            dhi = (1 + z*sqrt(z**2 + 2 - 1/n + 4*p*(n*q - 1))) / denom
+            hi = center + dhi
+    else:
+        delta = z/denom * sqrt(4*n*p*q + z**2)
+        if alternative == 'less' or k == 0:
+            lo = 0.0
+        else:
+            lo = center - delta
+        if alternative == 'greater' or k == n:
+            hi = 1.0
+        else:
+            hi = center + delta
+
+    return lo, hi
+
+
+def binomtest(k, n, p=0.5, alternative='two-sided'):
+    """
+    Perform a test that the probability of success is p.
+
+    The binomial test [1]_ is a test of the null hypothesis that the
+    probability of success in a Bernoulli experiment is `p`.
+
+    Details of the test can be found in many texts on statistics, such
+    as section 24.5 of [2]_.
+
+    Parameters
+    ----------
+    k : int
+        The number of successes.
+    n : int
+        The number of trials.
+    p : float, optional
+        The hypothesized probability of success, i.e. the expected
+        proportion of successes.  The value must be in the interval
+        ``0 <= p <= 1``. The default value is ``p = 0.5``.
+    alternative : {'two-sided', 'greater', 'less'}, optional
+        Indicates the alternative hypothesis. The default value is
+        'two-sided'.
+
+    Returns
+    -------
+    result : `~scipy.stats._result_classes.BinomTestResult` instance
+        The return value is an object with the following attributes:
+
+        k : int
+            The number of successes (copied from `binomtest` input).
+        n : int
+            The number of trials (copied from `binomtest` input).
+        alternative : str
+            Indicates the alternative hypothesis specified in the input
+            to `binomtest`.  It will be one of ``'two-sided'``, ``'greater'``,
+            or ``'less'``.
+        statistic : float
+            The estimate of the proportion of successes.
+        pvalue : float
+            The p-value of the hypothesis test.
+
+        The object has the following methods:
+
+        proportion_ci(confidence_level=0.95, method='exact') :
+            Compute the confidence interval for ``statistic``.
+
+    Notes
+    -----
+    .. versionadded:: 1.7.0
+
+    References
+    ----------
+    .. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
+    .. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
+           Prentice Hall, Upper Saddle River, New Jersey USA (2010)
+
+    Examples
+    --------
+    >>> from scipy.stats import binomtest
+
+    A car manufacturer claims that no more than 10% of their cars are unsafe.
+    15 cars are inspected for safety, 3 were found to be unsafe. Test the
+    manufacturer's claim:
+
+    >>> result = binomtest(3, n=15, p=0.1, alternative='greater')
+    >>> result.pvalue
+    0.18406106910639114
+
+    The null hypothesis cannot be rejected at the 5% level of significance
+    because the returned p-value is greater than the critical value of 5%.
+
+    The test statistic is equal to the estimated proportion, which is simply
+    ``3/15``:
+
+    >>> result.statistic
+    0.2
+
+    We can use the `proportion_ci()` method of the result to compute the
+    confidence interval of the estimate:
+
+    >>> result.proportion_ci(confidence_level=0.95)
+    ConfidenceInterval(low=0.05684686759024681, high=1.0)
+
+    """
+    k = _validate_int(k, 'k', minimum=0)
+    n = _validate_int(n, 'n', minimum=1)
+    if k > n:
+        raise ValueError('k must not be greater than n.')
+
+    if not (0 <= p <= 1):
+        raise ValueError("p must be in range [0,1]")
+
+    if alternative not in ('two-sided', 'less', 'greater'):
+        raise ValueError("alternative not recognized; \n"
+                         "must be 'two-sided', 'less' or 'greater'")
+    if alternative == 'less':
+        pval = binom.cdf(k, n, p)
+    elif alternative == 'greater':
+        pval = binom.sf(k-1, n, p)
+    else:
+        # alternative is 'two-sided'
+        d = binom.pmf(k, n, p)
+        rerr = 1 + 1e-7
+        if k == p * n:
+            # special case as shortcut, would also be handled by `else` below
+            pval = 1.
+        elif k < p * n:
+            ix = _binary_search_for_binom_tst(lambda x1: -binom.pmf(x1, n, p),
+                                              -d*rerr, np.ceil(p * n), n)
+            # y is the number of terms between mode and n that are <= d*rerr.
+            # ix gave us the first term where a(ix) <= d*rerr < a(ix-1)
+            # if the first equality doesn't hold, y=n-ix. Otherwise, we
+            # need to include ix as well as the equality holds. Note that
+            # the equality will hold in very very rare situations due to rerr.
+            y = n - ix + int(d*rerr == binom.pmf(ix, n, p))
+            pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
+        else:
+            ix = _binary_search_for_binom_tst(lambda x1: binom.pmf(x1, n, p),
+                                              d*rerr, 0, np.floor(p * n))
+            # y is the number of terms between 0 and mode that are <= d*rerr.
+            # we need to add a 1 to account for the 0 index.
+            # For comparing this with old behavior, see
+            # tst_binary_srch_for_binom_tst method in test_morestats.
+            y = ix + 1
+            pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
+
+        pval = min(1.0, pval)
+
+    result = BinomTestResult(k=k, n=n, alternative=alternative,
+                             statistic=k/n, pvalue=pval)
+    return result
+
+
+def _binary_search_for_binom_tst(a, d, lo, hi):
+    """
+    Conducts an implicit binary search on a function specified by `a`.
+
+    Meant to be used on the binomial PMF for the case of two-sided tests
+    to obtain the value on the other side of the mode where the tail
+    probability should be computed. The values on either side of
+    the mode are always in order, meaning binary search is applicable.
+
+    Parameters
+    ----------
+    a : callable
+      The function over which to perform binary search. Its values
+      for inputs lo and hi should be in ascending order.
+    d : float
+      The value to search.
+    lo : int
+      The lower end of range to search.
+    hi : int
+      The higher end of the range to search.
+
+    Returns
+    -------
+    int
+      The index, i between lo and hi
+      such that a(i)<=d d:
+            hi = mid-1
+        else:
+            return mid
+    if a(lo) <= d:
+        return lo
+    else:
+        return lo-1
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_boost/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_boost/__init__.py
new file mode 100644
index 00000000..68169419
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_boost/__init__.py
@@ -0,0 +1,53 @@
+from scipy.stats._boost.beta_ufunc import (
+    _beta_pdf, _beta_cdf, _beta_sf, _beta_ppf,
+    _beta_isf, _beta_mean, _beta_variance,
+    _beta_skewness, _beta_kurtosis_excess,
+)
+
+from scipy.stats._boost.binom_ufunc import (
+    _binom_pdf, _binom_cdf, _binom_sf, _binom_ppf,
+    _binom_isf, _binom_mean, _binom_variance,
+    _binom_skewness, _binom_kurtosis_excess,
+)
+
+from scipy.stats._boost.nbinom_ufunc import (
+    _nbinom_pdf, _nbinom_cdf, _nbinom_sf, _nbinom_ppf,
+    _nbinom_isf, _nbinom_mean, _nbinom_variance,
+    _nbinom_skewness, _nbinom_kurtosis_excess,
+)
+
+from scipy.stats._boost.hypergeom_ufunc import (
+    _hypergeom_pdf, _hypergeom_cdf, _hypergeom_sf, _hypergeom_ppf,
+    _hypergeom_isf, _hypergeom_mean, _hypergeom_variance,
+    _hypergeom_skewness, _hypergeom_kurtosis_excess,
+)
+
+from scipy.stats._boost.ncf_ufunc import (
+    _ncf_pdf, _ncf_cdf, _ncf_sf, _ncf_ppf,
+    _ncf_isf, _ncf_mean, _ncf_variance,
+    _ncf_skewness, _ncf_kurtosis_excess,
+)
+
+from scipy.stats._boost.ncx2_ufunc import (
+    _ncx2_pdf, _ncx2_cdf, _ncx2_sf, _ncx2_ppf,
+    _ncx2_isf, _ncx2_mean, _ncx2_variance,
+    _ncx2_skewness, _ncx2_kurtosis_excess,
+)
+
+from scipy.stats._boost.nct_ufunc import (
+    _nct_pdf, _nct_cdf, _nct_sf, _nct_ppf,
+    _nct_isf, _nct_mean, _nct_variance,
+    _nct_skewness, _nct_kurtosis_excess,
+)
+
+from scipy.stats._boost.skewnorm_ufunc import (
+    _skewnorm_pdf, _skewnorm_cdf, _skewnorm_sf, _skewnorm_ppf,
+    _skewnorm_isf, _skewnorm_mean, _skewnorm_variance,
+    _skewnorm_skewness, _skewnorm_kurtosis_excess,
+)
+
+from scipy.stats._boost.invgauss_ufunc import (
+    _invgauss_pdf, _invgauss_cdf, _invgauss_sf, _invgauss_ppf,
+    _invgauss_isf, _invgauss_mean, _invgauss_variance,
+    _invgauss_skewness, _invgauss_kurtosis_excess,
+)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_common.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_common.py
new file mode 100644
index 00000000..978f3b46
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_common.py
@@ -0,0 +1,6 @@
+
+from collections import namedtuple
+
+
+ConfidenceInterval = namedtuple("ConfidenceInterval", ["low", "high"])
+ConfidenceInterval. __doc__ = "Class for confidence intervals."
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_constants.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_constants.py
new file mode 100644
index 00000000..b571302a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_constants.py
@@ -0,0 +1,34 @@
+"""
+Statistics-related constants.
+
+"""
+import numpy as np
+
+
+# The smallest representable positive number such that 1.0 + _EPS != 1.0.
+_EPS = np.finfo(float).eps
+
+# The largest [in magnitude] usable floating value.
+_XMAX = np.finfo(float).max
+
+# The log of the largest usable floating value; useful for knowing
+# when exp(something) will overflow
+_LOGXMAX = np.log(_XMAX)
+
+# The smallest [in magnitude] usable floating value.
+_XMIN = np.finfo(float).tiny
+
+# -special.psi(1)
+_EULER = 0.577215664901532860606512090082402431042
+
+# special.zeta(3, 1)  Apery's constant
+_ZETA3 = 1.202056903159594285399738161511449990765
+
+# sqrt(pi)
+_SQRT_PI = 1.772453850905516027298167483341145182798
+
+# sqrt(2/pi)
+_SQRT_2_OVER_PI = 0.7978845608028654
+
+# log(sqrt(2/pi))
+_LOG_SQRT_2_OVER_PI = -0.22579135264472744
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_continuous_distns.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_continuous_distns.py
new file mode 100644
index 00000000..8b2c91e1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_continuous_distns.py
@@ -0,0 +1,10314 @@
+# -*- coding: utf-8 -*-
+#
+# Author:  Travis Oliphant  2002-2011 with contributions from
+#          SciPy Developers 2004-2011
+#
+import warnings
+from collections.abc import Iterable
+from functools import wraps, cached_property
+import ctypes
+
+import numpy as np
+from numpy.polynomial import Polynomial
+from scipy._lib.doccer import (extend_notes_in_docstring,
+                               replace_notes_in_docstring,
+                               inherit_docstring_from)
+from scipy._lib._ccallback import LowLevelCallable
+from scipy import optimize
+from scipy import integrate
+import scipy.special as sc
+
+import scipy.special._ufuncs as scu
+from scipy._lib._util import _lazyselect, _lazywhere
+from . import _stats
+from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
+                                 tukeylambda_kurtosis as _tlkurt)
+from ._distn_infrastructure import (
+    get_distribution_names, _kurtosis,
+    rv_continuous, _skew, _get_fixed_fit_value, _check_shape, _ShapeInfo)
+from ._ksstats import kolmogn, kolmognp, kolmogni
+from ._constants import (_XMIN, _EULER, _ZETA3, _SQRT_PI,
+                         _SQRT_2_OVER_PI, _LOG_SQRT_2_OVER_PI)
+import scipy.stats._boost as _boost
+from scipy.optimize import root_scalar
+from scipy.stats._warnings_errors import FitError
+import scipy.stats as stats
+
+
+def _remove_optimizer_parameters(kwds):
+    """
+    Remove the optimizer-related keyword arguments 'loc', 'scale' and
+    'optimizer' from `kwds`.  Then check that `kwds` is empty, and
+    raise `TypeError("Unknown arguments: %s." % kwds)` if it is not.
+
+    This function is used in the fit method of distributions that override
+    the default method and do not use the default optimization code.
+
+    `kwds` is modified in-place.
+    """
+    kwds.pop('loc', None)
+    kwds.pop('scale', None)
+    kwds.pop('optimizer', None)
+    kwds.pop('method', None)
+    if kwds:
+        raise TypeError("Unknown arguments: %s." % kwds)
+
+
+def _call_super_mom(fun):
+    # if fit method is overridden only for MLE and doesn't specify what to do
+    # if method == 'mm', this decorator calls generic implementation
+    @wraps(fun)
+    def wrapper(self, *args, **kwds):
+        method = kwds.get('method', 'mle').lower()
+        if method != 'mle':
+            return super(type(self), self).fit(*args, **kwds)
+        else:
+            return fun(self, *args, **kwds)
+    return wrapper
+
+
+def _get_left_bracket(fun, rbrack, lbrack=None):
+    # find left bracket for `root_scalar`. A guess for lbrack may be provided.
+    lbrack = lbrack or rbrack - 1
+    diff = rbrack - lbrack
+
+    # if there is no sign change in `fun` between the brackets, expand
+    # rbrack - lbrack until a sign change occurs
+    def interval_contains_root(lbrack, rbrack):
+        # return true if the signs disagree.
+        return np.sign(fun(lbrack)) != np.sign(fun(rbrack))
+
+    while not interval_contains_root(lbrack, rbrack):
+        diff *= 2
+        lbrack = rbrack - diff
+
+        msg = ("The solver could not find a bracket containing a "
+               "root to an MLE first order condition.")
+        if np.isinf(lbrack):
+            raise FitSolverError(msg)
+
+    return lbrack
+
+
+class ksone_gen(rv_continuous):
+    r"""Kolmogorov-Smirnov one-sided test statistic distribution.
+
+    This is the distribution of the one-sided Kolmogorov-Smirnov (KS)
+    statistics :math:`D_n^+` and :math:`D_n^-`
+    for a finite sample size ``n >= 1`` (the shape parameter).
+
+    %(before_notes)s
+
+    See Also
+    --------
+    kstwobign, kstwo, kstest
+
+    Notes
+    -----
+    :math:`D_n^+` and :math:`D_n^-` are given by
+
+    .. math::
+
+        D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\
+        D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\
+
+    where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
+    `ksone` describes the distribution under the null hypothesis of the KS test
+    that the empirical CDF corresponds to :math:`n` i.i.d. random variates
+    with CDF :math:`F`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours
+       for probability distribution functions", The Annals of Mathematical
+       Statistics, 22(4), pp 592-596 (1951).
+
+    %(example)s
+
+    """
+    def _argcheck(self, n):
+        return (n >= 1) & (n == np.round(n))
+
+    def _shape_info(self):
+        return [_ShapeInfo("n", True, (1, np.inf), (True, False))]
+
+    def _pdf(self, x, n):
+        return -scu._smirnovp(n, x)
+
+    def _cdf(self, x, n):
+        return scu._smirnovc(n, x)
+
+    def _sf(self, x, n):
+        return sc.smirnov(n, x)
+
+    def _ppf(self, q, n):
+        return scu._smirnovci(n, q)
+
+    def _isf(self, q, n):
+        return sc.smirnovi(n, q)
+
+
+ksone = ksone_gen(a=0.0, b=1.0, name='ksone')
+
+
+class kstwo_gen(rv_continuous):
+    r"""Kolmogorov-Smirnov two-sided test statistic distribution.
+
+    This is the distribution of the two-sided Kolmogorov-Smirnov (KS)
+    statistic :math:`D_n` for a finite sample size ``n >= 1``
+    (the shape parameter).
+
+    %(before_notes)s
+
+    See Also
+    --------
+    kstwobign, ksone, kstest
+
+    Notes
+    -----
+    :math:`D_n` is given by
+
+    .. math::
+
+        D_n = \text{sup}_x |F_n(x) - F(x)|
+
+    where :math:`F` is a (continuous) CDF and :math:`F_n` is an empirical CDF.
+    `kstwo` describes the distribution under the null hypothesis of the KS test
+    that the empirical CDF corresponds to :math:`n` i.i.d. random variates
+    with CDF :math:`F`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Simard, R., L'Ecuyer, P. "Computing the Two-Sided
+       Kolmogorov-Smirnov Distribution",  Journal of Statistical Software,
+       Vol 39, 11, 1-18 (2011).
+
+    %(example)s
+
+    """
+    def _argcheck(self, n):
+        return (n >= 1) & (n == np.round(n))
+
+    def _shape_info(self):
+        return [_ShapeInfo("n", True, (1, np.inf), (True, False))]
+
+    def _get_support(self, n):
+        return (0.5/(n if not isinstance(n, Iterable) else np.asanyarray(n)),
+                1.0)
+
+    def _pdf(self, x, n):
+        return kolmognp(n, x)
+
+    def _cdf(self, x, n):
+        return kolmogn(n, x)
+
+    def _sf(self, x, n):
+        return kolmogn(n, x, cdf=False)
+
+    def _ppf(self, q, n):
+        return kolmogni(n, q, cdf=True)
+
+    def _isf(self, q, n):
+        return kolmogni(n, q, cdf=False)
+
+
+# Use the pdf, (not the ppf) to compute moments
+kstwo = kstwo_gen(momtype=0, a=0.0, b=1.0, name='kstwo')
+
+
+class kstwobign_gen(rv_continuous):
+    r"""Limiting distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
+
+    This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov
+    statistic :math:`\sqrt{n} D_n` that measures the maximum absolute
+    distance of the theoretical (continuous) CDF from the empirical CDF.
+    (see `kstest`).
+
+    %(before_notes)s
+
+    See Also
+    --------
+    ksone, kstwo, kstest
+
+    Notes
+    -----
+    :math:`\sqrt{n} D_n` is given by
+
+    .. math::
+
+        D_n = \text{sup}_x |F_n(x) - F(x)|
+
+    where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
+    `kstwobign`  describes the asymptotic distribution (i.e. the limit of
+    :math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the
+    empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Feller, W. "On the Kolmogorov-Smirnov Limit Theorems for Empirical
+       Distributions",  Ann. Math. Statist. Vol 19, 177-189 (1948).
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        return -scu._kolmogp(x)
+
+    def _cdf(self, x):
+        return scu._kolmogc(x)
+
+    def _sf(self, x):
+        return sc.kolmogorov(x)
+
+    def _ppf(self, q):
+        return scu._kolmogci(q)
+
+    def _isf(self, q):
+        return sc.kolmogi(q)
+
+
+kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
+
+
+## Normal distribution
+
+# loc = mu, scale = std
+# Keep these implementations out of the class definition so they can be reused
+# by other distributions.
+_norm_pdf_C = np.sqrt(2*np.pi)
+_norm_pdf_logC = np.log(_norm_pdf_C)
+
+
+def _norm_pdf(x):
+    return np.exp(-x**2/2.0) / _norm_pdf_C
+
+
+def _norm_logpdf(x):
+    return -x**2 / 2.0 - _norm_pdf_logC
+
+
+def _norm_cdf(x):
+    return sc.ndtr(x)
+
+
+def _norm_logcdf(x):
+    return sc.log_ndtr(x)
+
+
+def _norm_ppf(q):
+    return sc.ndtri(q)
+
+
+def _norm_sf(x):
+    return _norm_cdf(-x)
+
+
+def _norm_logsf(x):
+    return _norm_logcdf(-x)
+
+
+def _norm_isf(q):
+    return -_norm_ppf(q)
+
+
+class norm_gen(rv_continuous):
+    r"""A normal continuous random variable.
+
+    The location (``loc``) keyword specifies the mean.
+    The scale (``scale``) keyword specifies the standard deviation.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `norm` is:
+
+    .. math::
+
+        f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
+
+    for a real number :math:`x`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return random_state.standard_normal(size)
+
+    def _pdf(self, x):
+        # norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
+        return _norm_pdf(x)
+
+    def _logpdf(self, x):
+        return _norm_logpdf(x)
+
+    def _cdf(self, x):
+        return _norm_cdf(x)
+
+    def _logcdf(self, x):
+        return _norm_logcdf(x)
+
+    def _sf(self, x):
+        return _norm_sf(x)
+
+    def _logsf(self, x):
+        return _norm_logsf(x)
+
+    def _ppf(self, q):
+        return _norm_ppf(q)
+
+    def _isf(self, q):
+        return _norm_isf(q)
+
+    def _stats(self):
+        return 0.0, 1.0, 0.0, 0.0
+
+    def _entropy(self):
+        return 0.5*(np.log(2*np.pi)+1)
+
+    @_call_super_mom
+    @replace_notes_in_docstring(rv_continuous, notes="""\
+        For the normal distribution, method of moments and maximum likelihood
+        estimation give identical fits, and explicit formulas for the estimates
+        are available.
+        This function uses these explicit formulas for the maximum likelihood
+        estimation of the normal distribution parameters, so the
+        `optimizer` and `method` arguments are ignored.\n\n""")
+    def fit(self, data, **kwds):
+
+        floc = kwds.pop('floc', None)
+        fscale = kwds.pop('fscale', None)
+
+        _remove_optimizer_parameters(kwds)
+
+        if floc is not None and fscale is not None:
+            # This check is for consistency with `rv_continuous.fit`.
+            # Without this check, this function would just return the
+            # parameters that were given.
+            raise ValueError("All parameters fixed. There is nothing to "
+                             "optimize.")
+
+        data = np.asarray(data)
+
+        if not np.isfinite(data).all():
+            raise ValueError("The data contains non-finite values.")
+
+        if floc is None:
+            loc = data.mean()
+        else:
+            loc = floc
+
+        if fscale is None:
+            scale = np.sqrt(((data - loc)**2).mean())
+        else:
+            scale = fscale
+
+        return loc, scale
+
+    def _munp(self, n):
+        """
+        @returns Moments of standard normal distribution for integer n >= 0
+
+        See eq. 16 of https://arxiv.org/abs/1209.4340v2
+        """
+        if n % 2 == 0:
+            return sc.factorial2(n - 1)
+        else:
+            return 0.
+
+
+norm = norm_gen(name='norm')
+
+
+class alpha_gen(rv_continuous):
+    r"""An alpha continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `alpha` ([1]_, [2]_) is:
+
+    .. math::
+
+        f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
+                  \exp(-\frac{1}{2} (a-1/x)^2)
+
+    where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
+
+    `alpha` takes ``a`` as a shape parameter.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
+           Distributions, Volume 1", Second Edition, John Wiley and Sons,
+           p. 173 (1994).
+    .. [2] Anthony A. Salvia, "Reliability applications of the Alpha
+           Distribution", IEEE Transactions on Reliability, Vol. R-34,
+           No. 3, pp. 251-252 (1985).
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, a):
+        # alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
+        return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
+
+    def _logpdf(self, x, a):
+        return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
+
+    def _cdf(self, x, a):
+        return _norm_cdf(a-1.0/x) / _norm_cdf(a)
+
+    def _ppf(self, q, a):
+        return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
+
+    def _stats(self, a):
+        return [np.inf]*2 + [np.nan]*2
+
+
+alpha = alpha_gen(a=0.0, name='alpha')
+
+
+class anglit_gen(rv_continuous):
+    r"""An anglit continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `anglit` is:
+
+    .. math::
+
+        f(x) = \sin(2x + \pi/2) = \cos(2x)
+
+    for :math:`-\pi/4 \le x \le \pi/4`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
+        return np.cos(2*x)
+
+    def _cdf(self, x):
+        return np.sin(x+np.pi/4)**2.0
+
+    def _ppf(self, q):
+        return np.arcsin(np.sqrt(q))-np.pi/4
+
+    def _stats(self):
+        return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
+
+    def _entropy(self):
+        return 1-np.log(2)
+
+
+anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
+
+
+class arcsine_gen(rv_continuous):
+    r"""An arcsine continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `arcsine` is:
+
+    .. math::
+
+        f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
+
+    for :math:`0 < x < 1`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
+        with np.errstate(divide='ignore'):
+            return 1.0/np.pi/np.sqrt(x*(1-x))
+
+    def _cdf(self, x):
+        return 2.0/np.pi*np.arcsin(np.sqrt(x))
+
+    def _ppf(self, q):
+        return np.sin(np.pi/2.0*q)**2.0
+
+    def _stats(self):
+        mu = 0.5
+        mu2 = 1.0/8
+        g1 = 0
+        g2 = -3.0/2.0
+        return mu, mu2, g1, g2
+
+    def _entropy(self):
+        return -0.24156447527049044468
+
+
+arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
+
+
+class FitDataError(ValueError):
+    """Raised when input data is inconsistent with fixed parameters."""
+    # This exception is raised by, for example, beta_gen.fit when both floc
+    # and fscale are fixed and there are values in the data not in the open
+    # interval (floc, floc+fscale).
+    def __init__(self, distr, lower, upper):
+        self.args = (
+            "Invalid values in `data`.  Maximum likelihood "
+            "estimation with {distr!r} requires that {lower!r} < "
+            "(x - loc)/scale  < {upper!r} for each x in `data`.".format(
+                distr=distr, lower=lower, upper=upper),
+        )
+
+
+class FitSolverError(FitError):
+    """
+    Raised when a solver fails to converge while fitting a distribution.
+    """
+    # This exception is raised by, for example, beta_gen.fit when
+    # optimize.fsolve returns with ier != 1.
+    def __init__(self, mesg):
+        emsg = "Solver for the MLE equations failed to converge: "
+        emsg += mesg.replace('\n', '')
+        self.args = (emsg,)
+
+
+def _beta_mle_a(a, b, n, s1):
+    # The zeros of this function give the MLE for `a`, with
+    # `b`, `n` and `s1` given.  `s1` is the sum of the logs of
+    # the data. `n` is the number of data points.
+    psiab = sc.psi(a + b)
+    func = s1 - n * (-psiab + sc.psi(a))
+    return func
+
+
+def _beta_mle_ab(theta, n, s1, s2):
+    # Zeros of this function are critical points of
+    # the maximum likelihood function.  Solving this system
+    # for theta (which contains a and b) gives the MLE for a and b
+    # given `n`, `s1` and `s2`.  `s1` is the sum of the logs of the data,
+    # and `s2` is the sum of the logs of 1 - data.  `n` is the number
+    # of data points.
+    a, b = theta
+    psiab = sc.psi(a + b)
+    func = [s1 - n * (-psiab + sc.psi(a)),
+            s2 - n * (-psiab + sc.psi(b))]
+    return func
+
+
+class beta_gen(rv_continuous):
+    r"""A beta continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `beta` is:
+
+    .. math::
+
+        f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
+                          {\Gamma(a) \Gamma(b)}
+
+    for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
+    :math:`\Gamma` is the gamma function (`scipy.special.gamma`).
+
+    `beta` takes :math:`a` and :math:`b` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        return [ia, ib]
+
+    def _rvs(self, a, b, size=None, random_state=None):
+        return random_state.beta(a, b, size)
+
+    def _pdf(self, x, a, b):
+        #                     gamma(a+b) * x**(a-1) * (1-x)**(b-1)
+        # beta.pdf(x, a, b) = ------------------------------------
+        #                              gamma(a)*gamma(b)
+        return _boost._beta_pdf(x, a, b)
+
+    def _logpdf(self, x, a, b):
+        lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
+        lPx -= sc.betaln(a, b)
+        return lPx
+
+    def _cdf(self, x, a, b):
+        return _boost._beta_cdf(x, a, b)
+
+    def _sf(self, x, a, b):
+        return _boost._beta_sf(x, a, b)
+
+    def _isf(self, x, a, b):
+        with warnings.catch_warnings():
+            # See gh-14901
+            message = "overflow encountered in _beta_isf"
+            warnings.filterwarnings('ignore', message=message)
+            return _boost._beta_isf(x, a, b)
+
+    def _ppf(self, q, a, b):
+        with warnings.catch_warnings():
+            message = "overflow encountered in _beta_ppf"
+            warnings.filterwarnings('ignore', message=message)
+            return _boost._beta_ppf(q, a, b)
+
+    def _stats(self, a, b):
+        return (
+            _boost._beta_mean(a, b),
+            _boost._beta_variance(a, b),
+            _boost._beta_skewness(a, b),
+            _boost._beta_kurtosis_excess(a, b))
+
+    def _fitstart(self, data):
+        g1 = _skew(data)
+        g2 = _kurtosis(data)
+
+        def func(x):
+            a, b = x
+            sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
+            ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
+            ku /= a*b*(a+b+2)*(a+b+3)
+            ku *= 6
+            return [sk-g1, ku-g2]
+        a, b = optimize.fsolve(func, (1.0, 1.0))
+        return super()._fitstart(data, args=(a, b))
+
+    @_call_super_mom
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        In the special case where `method="MLE"` and
+        both `floc` and `fscale` are given, a
+        `ValueError` is raised if any value `x` in `data` does not satisfy
+        `floc < x < floc + fscale`.\n\n""")
+    def fit(self, data, *args, **kwds):
+        # Override rv_continuous.fit, so we can more efficiently handle the
+        # case where floc and fscale are given.
+
+        floc = kwds.get('floc', None)
+        fscale = kwds.get('fscale', None)
+
+        if floc is None or fscale is None:
+            # do general fit
+            return super().fit(data, *args, **kwds)
+
+        # We already got these from kwds, so just pop them.
+        kwds.pop('floc', None)
+        kwds.pop('fscale', None)
+
+        f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
+        f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
+
+        _remove_optimizer_parameters(kwds)
+
+        if f0 is not None and f1 is not None:
+            # This check is for consistency with `rv_continuous.fit`.
+            raise ValueError("All parameters fixed. There is nothing to "
+                             "optimize.")
+
+        # Special case: loc and scale are constrained, so we are fitting
+        # just the shape parameters.  This can be done much more efficiently
+        # than the method used in `rv_continuous.fit`.  (See the subsection
+        # "Two unknown parameters" in the section "Maximum likelihood" of
+        # the Wikipedia article on the Beta distribution for the formulas.)
+
+        if not np.isfinite(data).all():
+            raise ValueError("The data contains non-finite values.")
+
+        # Normalize the data to the interval [0, 1].
+        data = (np.ravel(data) - floc) / fscale
+        if np.any(data <= 0) or np.any(data >= 1):
+            raise FitDataError("beta", lower=floc, upper=floc + fscale)
+
+        xbar = data.mean()
+
+        if f0 is not None or f1 is not None:
+            # One of the shape parameters is fixed.
+
+            if f0 is not None:
+                # The shape parameter a is fixed, so swap the parameters
+                # and flip the data.  We always solve for `a`.  The result
+                # will be swapped back before returning.
+                b = f0
+                data = 1 - data
+                xbar = 1 - xbar
+            else:
+                b = f1
+
+            # Initial guess for a.  Use the formula for the mean of the beta
+            # distribution, E[x] = a / (a + b), to generate a reasonable
+            # starting point based on the mean of the data and the given
+            # value of b.
+            a = b * xbar / (1 - xbar)
+
+            # Compute the MLE for `a` by solving _beta_mle_a.
+            theta, info, ier, mesg = optimize.fsolve(
+                _beta_mle_a, a,
+                args=(b, len(data), np.log(data).sum()),
+                full_output=True
+            )
+            if ier != 1:
+                raise FitSolverError(mesg=mesg)
+            a = theta[0]
+
+            if f0 is not None:
+                # The shape parameter a was fixed, so swap back the
+                # parameters.
+                a, b = b, a
+
+        else:
+            # Neither of the shape parameters is fixed.
+
+            # s1 and s2 are used in the extra arguments passed to _beta_mle_ab
+            # by optimize.fsolve.
+            s1 = np.log(data).sum()
+            s2 = sc.log1p(-data).sum()
+
+            # Use the "method of moments" to estimate the initial
+            # guess for a and b.
+            fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
+            a = xbar * fac
+            b = (1 - xbar) * fac
+
+            # Compute the MLE for a and b by solving _beta_mle_ab.
+            theta, info, ier, mesg = optimize.fsolve(
+                _beta_mle_ab, [a, b],
+                args=(len(data), s1, s2),
+                full_output=True
+            )
+            if ier != 1:
+                raise FitSolverError(mesg=mesg)
+            a, b = theta
+
+        return a, b, floc, fscale
+
+
+beta = beta_gen(a=0.0, b=1.0, name='beta')
+
+
+class betaprime_gen(rv_continuous):
+    r"""A beta prime continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `betaprime` is:
+
+    .. math::
+
+        f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
+
+    for :math:`x >= 0`, :math:`a > 0`, :math:`b > 0`, where
+    :math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
+
+    `betaprime` takes ``a`` and ``b`` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        return [ia, ib]
+
+    def _rvs(self, a, b, size=None, random_state=None):
+        u1 = gamma.rvs(a, size=size, random_state=random_state)
+        u2 = gamma.rvs(b, size=size, random_state=random_state)
+        return u1 / u2
+
+    def _pdf(self, x, a, b):
+        # betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
+        return np.exp(self._logpdf(x, a, b))
+
+    def _logpdf(self, x, a, b):
+        return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
+
+    def _cdf(self, x, a, b):
+        return sc.betainc(a, b, x/(1.+x))
+
+    def _munp(self, n, a, b):
+        if n == 1.0:
+            return np.where(b > 1,
+                            a/(b-1.0),
+                            np.inf)
+        elif n == 2.0:
+            return np.where(b > 2,
+                            a*(a+1.0)/((b-2.0)*(b-1.0)),
+                            np.inf)
+        elif n == 3.0:
+            return np.where(b > 3,
+                            a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
+                            np.inf)
+        elif n == 4.0:
+            return np.where(b > 4,
+                            (a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
+                             ((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
+                            np.inf)
+        else:
+            raise NotImplementedError
+
+
+betaprime = betaprime_gen(a=0.0, name='betaprime')
+
+
+class bradford_gen(rv_continuous):
+    r"""A Bradford continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `bradford` is:
+
+    .. math::
+
+        f(x, c) = \frac{c}{\log(1+c) (1+cx)}
+
+    for :math:`0 <= x <= 1` and :math:`c > 0`.
+
+    `bradford` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # bradford.pdf(x, c) = c / (k * (1+c*x))
+        return c / (c*x + 1.0) / sc.log1p(c)
+
+    def _cdf(self, x, c):
+        return sc.log1p(c*x) / sc.log1p(c)
+
+    def _ppf(self, q, c):
+        return sc.expm1(q * sc.log1p(c)) / c
+
+    def _stats(self, c, moments='mv'):
+        k = np.log(1.0+c)
+        mu = (c-k)/(c*k)
+        mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
+        g1 = None
+        g2 = None
+        if 's' in moments:
+            g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
+            g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
+        if 'k' in moments:
+            g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
+                  6*c*k*k*(3*k-14) + 12*k**3)
+            g2 /= 3*c*(c*(k-2)+2*k)**2
+        return mu, mu2, g1, g2
+
+    def _entropy(self, c):
+        k = np.log(1+c)
+        return k/2.0 - np.log(c/k)
+
+
+bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
+
+
+class burr_gen(rv_continuous):
+    r"""A Burr (Type III) continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    fisk : a special case of either `burr` or `burr12` with ``d=1``
+    burr12 : Burr Type XII distribution
+    mielke : Mielke Beta-Kappa / Dagum distribution
+
+    Notes
+    -----
+    The probability density function for `burr` is:
+
+    .. math::
+
+        f(x; c, d) = c d \frac{x^{-c - 1}}
+                              {{(1 + x^{-c})}^{d + 1}}
+
+    for :math:`x >= 0` and :math:`c, d > 0`.
+
+    `burr` takes ``c`` and ``d`` as shape parameters for :math:`c` and
+    :math:`d`.
+
+    This is the PDF corresponding to the third CDF given in Burr's list;
+    specifically, it is equation (11) in Burr's paper [1]_. The distribution
+    is also commonly referred to as the Dagum distribution [2]_. If the
+    parameter :math:`c < 1` then the mean of the distribution does not
+    exist and if :math:`c < 2` the variance does not exist [2]_.
+    The PDF is finite at the left endpoint :math:`x = 0` if :math:`c * d >= 1`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Burr, I. W. "Cumulative frequency functions", Annals of
+       Mathematical Statistics, 13(2), pp 215-232 (1942).
+    .. [2] https://en.wikipedia.org/wiki/Dagum_distribution
+    .. [3] Kleiber, Christian. "A guide to the Dagum distributions."
+       Modeling Income Distributions and Lorenz Curves  pp 97-117 (2008).
+
+    %(example)s
+
+    """
+    # Do not set _support_mask to rv_continuous._open_support_mask
+    # Whether the left-hand endpoint is suitable for pdf evaluation is dependent
+    # on the values of c and d: if c*d >= 1, the pdf is finite, otherwise infinite.
+
+    def _shape_info(self):
+        ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
+        id = _ShapeInfo("d", False, (0, np.inf), (False, False))
+        return [ic, id]
+
+    def _pdf(self, x, c, d):
+        # burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
+        output = _lazywhere(x == 0, [x, c, d],
+                   lambda x_, c_, d_: c_ * d_ * (x_**(c_*d_-1)) / (1 + x_**c_),
+                   f2 = lambda x_, c_, d_: (c_ * d_ * (x_ ** (-c_ - 1.0)) /
+                                            ((1 + x_ ** (-c_)) ** (d_ + 1.0))))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def _logpdf(self, x, c, d):
+        output = _lazywhere(
+            x == 0, [x, c, d],
+            lambda x_, c_, d_: (np.log(c_) + np.log(d_) + sc.xlogy(c_*d_ - 1, x_)
+                                - (d_+1) * sc.log1p(x_**(c_))),
+            f2 = lambda x_, c_, d_: (np.log(c_) + np.log(d_)
+                                     + sc.xlogy(-c_ - 1, x_)
+                                     - sc.xlog1py(d_+1, x_**(-c_))))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def _cdf(self, x, c, d):
+        return (1 + x**(-c))**(-d)
+
+    def _logcdf(self, x, c, d):
+        return sc.log1p(x**(-c)) * (-d)
+
+    def _sf(self, x, c, d):
+        return np.exp(self._logsf(x, c, d))
+
+    def _logsf(self, x, c, d):
+        return np.log1p(- (1 + x**(-c))**(-d))
+
+    def _ppf(self, q, c, d):
+        return (q**(-1.0/d) - 1)**(-1.0/c)
+
+    def _stats(self, c, d):
+        nc = np.arange(1, 5).reshape(4,1) / c
+        #ek is the kth raw moment, e1 is the mean e2-e1**2 variance etc.
+        e1, e2, e3, e4 = sc.beta(d + nc, 1. - nc) * d
+        mu = np.where(c > 1.0, e1, np.nan)
+        mu2_if_c = e2 - mu**2
+        mu2 = np.where(c > 2.0, mu2_if_c, np.nan)
+        g1 = _lazywhere(
+            c > 3.0,
+            (c, e1, e2, e3, mu2_if_c),
+            lambda c, e1, e2, e3, mu2_if_c: (e3 - 3*e2*e1 + 2*e1**3) / np.sqrt((mu2_if_c)**3),
+            fillvalue=np.nan)
+        g2 = _lazywhere(
+            c > 4.0,
+            (c, e1, e2, e3, e4, mu2_if_c),
+            lambda c, e1, e2, e3, e4, mu2_if_c: (
+                ((e4 - 4*e3*e1 + 6*e2*e1**2 - 3*e1**4) / mu2_if_c**2) - 3),
+            fillvalue=np.nan)
+        if np.ndim(c) == 0:
+            return mu.item(), mu2.item(), g1.item(), g2.item()
+        return mu, mu2, g1, g2
+
+    def _munp(self, n, c, d):
+        def __munp(n, c, d):
+            nc = 1. * n / c
+            return d * sc.beta(1.0 - nc, d + nc)
+        n, c, d = np.asarray(n), np.asarray(c), np.asarray(d)
+        return _lazywhere((c > n) & (n == n) & (d == d), (c, d, n),
+                          lambda c, d, n: __munp(n, c, d),
+                          np.nan)
+
+
+burr = burr_gen(a=0.0, name='burr')
+
+
+class burr12_gen(rv_continuous):
+    r"""A Burr (Type XII) continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    fisk : a special case of either `burr` or `burr12` with ``d=1``
+    burr : Burr Type III distribution
+
+    Notes
+    -----
+    The probability density function for `burr12` is:
+
+    .. math::
+
+        f(x; c, d) = c d \frac{x^{c-1}}
+                              {(1 + x^c)^{d + 1}}
+
+    for :math:`x >= 0` and :math:`c, d > 0`.
+
+    `burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
+    and :math:`d`.
+
+    This is the PDF corresponding to the twelfth CDF given in Burr's list;
+    specifically, it is equation (20) in Burr's paper [1]_.
+
+    %(after_notes)s
+
+    The Burr type 12 distribution is also sometimes referred to as
+    the Singh-Maddala distribution from NIST [2]_.
+
+    References
+    ----------
+    .. [1] Burr, I. W. "Cumulative frequency functions", Annals of
+       Mathematical Statistics, 13(2), pp 215-232 (1942).
+
+    .. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
+
+    .. [3] "Burr distribution",
+       https://en.wikipedia.org/wiki/Burr_distribution
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
+        id = _ShapeInfo("d", False, (0, np.inf), (False, False))
+        return [ic, id]
+
+    def _pdf(self, x, c, d):
+        # burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
+        return np.exp(self._logpdf(x, c, d))
+
+    def _logpdf(self, x, c, d):
+        return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
+
+    def _cdf(self, x, c, d):
+        return -sc.expm1(self._logsf(x, c, d))
+
+    def _logcdf(self, x, c, d):
+        return sc.log1p(-(1 + x**c)**(-d))
+
+    def _sf(self, x, c, d):
+        return np.exp(self._logsf(x, c, d))
+
+    def _logsf(self, x, c, d):
+        return sc.xlog1py(-d, x**c)
+
+    def _ppf(self, q, c, d):
+        # The following is an implementation of
+        #   ((1 - q)**(-1.0/d) - 1)**(1.0/c)
+        # that does a better job handling small values of q.
+        return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
+
+    def _munp(self, n, c, d):
+        nc = 1. * n / c
+        return d * sc.beta(1.0 + nc, d - nc)
+
+
+burr12 = burr12_gen(a=0.0, name='burr12')
+
+
+class fisk_gen(burr_gen):
+    r"""A Fisk continuous random variable.
+
+    The Fisk distribution is also known as the log-logistic distribution.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    burr
+
+    Notes
+    -----
+    The probability density function for `fisk` is:
+
+    .. math::
+
+        f(x, c) = \frac{c x^{c-1}}
+                       {(1 + x^c)^2}
+
+    for :math:`x >= 0` and :math:`c > 0`.
+
+    Please note that the above expression can be transformed into the following
+    one, which is also commonly used:
+
+    .. math::
+
+        f(x, c) = \frac{c x^{-c-1}}
+                       {(1 + x^{-c})^2}
+
+    `fisk` takes ``c`` as a shape parameter for :math:`c`.
+
+    `fisk` is a special case of `burr` or `burr12` with ``d=1``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
+        return burr._pdf(x, c, 1.0)
+
+    def _cdf(self, x, c):
+        return burr._cdf(x, c, 1.0)
+
+    def _sf(self, x, c):
+        return burr._sf(x, c, 1.0)
+
+    def _logpdf(self, x, c):
+        # fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
+        return burr._logpdf(x, c, 1.0)
+
+    def _logcdf(self, x, c):
+        return burr._logcdf(x, c, 1.0)
+
+    def _logsf(self, x, c):
+        return burr._logsf(x, c, 1.0)
+
+    def _ppf(self, x, c):
+        return burr._ppf(x, c, 1.0)
+
+    def _munp(self, n, c):
+        return burr._munp(n, c, 1.0)
+
+    def _stats(self, c):
+        return burr._stats(c, 1.0)
+
+    def _entropy(self, c):
+        return 2 - np.log(c)
+
+
+fisk = fisk_gen(a=0.0, name='fisk')
+
+
+class cauchy_gen(rv_continuous):
+    r"""A Cauchy continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `cauchy` is
+
+    .. math::
+
+        f(x) = \frac{1}{\pi (1 + x^2)}
+
+    for a real number :math:`x`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # cauchy.pdf(x) = 1 / (pi * (1 + x**2))
+        return 1.0/np.pi/(1.0+x*x)
+
+    def _cdf(self, x):
+        return 0.5 + 1.0/np.pi*np.arctan(x)
+
+    def _ppf(self, q):
+        return np.tan(np.pi*q-np.pi/2.0)
+
+    def _sf(self, x):
+        return 0.5 - 1.0/np.pi*np.arctan(x)
+
+    def _isf(self, q):
+        return np.tan(np.pi/2.0-np.pi*q)
+
+    def _stats(self):
+        return np.nan, np.nan, np.nan, np.nan
+
+    def _entropy(self):
+        return np.log(4*np.pi)
+
+    def _fitstart(self, data, args=None):
+        # Initialize ML guesses using quartiles instead of moments.
+        p25, p50, p75 = np.percentile(data, [25, 50, 75])
+        return p50, (p75 - p25)/2
+
+
+cauchy = cauchy_gen(name='cauchy')
+
+
+class chi_gen(rv_continuous):
+    r"""A chi continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `chi` is:
+
+    .. math::
+
+        f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
+                   x^{k-1} \exp \left( -x^2/2 \right)
+
+    for :math:`x >= 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
+    in the implementation). :math:`\Gamma` is the gamma function
+    (`scipy.special.gamma`).
+
+    Special cases of `chi` are:
+
+        - ``chi(1, loc, scale)`` is equivalent to `halfnorm`
+        - ``chi(2, 0, scale)`` is equivalent to `rayleigh`
+        - ``chi(3, 0, scale)`` is equivalent to `maxwell`
+
+    `chi` takes ``df`` as a shape parameter.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("df", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, df, size=None, random_state=None):
+        return np.sqrt(chi2.rvs(df, size=size, random_state=random_state))
+
+    def _pdf(self, x, df):
+        #                   x**(df-1) * exp(-x**2/2)
+        # chi.pdf(x, df) =  -------------------------
+        #                   2**(df/2-1) * gamma(df/2)
+        return np.exp(self._logpdf(x, df))
+
+    def _logpdf(self, x, df):
+        l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
+        return l + sc.xlogy(df - 1., x) - .5*x**2
+
+    def _cdf(self, x, df):
+        return sc.gammainc(.5*df, .5*x**2)
+
+    def _sf(self, x, df):
+        return sc.gammaincc(.5*df, .5*x**2)
+
+    def _ppf(self, q, df):
+        return np.sqrt(2*sc.gammaincinv(.5*df, q))
+
+    def _isf(self, q, df):
+        return np.sqrt(2*sc.gammainccinv(.5*df, q))
+
+    def _stats(self, df):
+        mu = np.sqrt(2)*np.exp(sc.gammaln(df/2.0+0.5)-sc.gammaln(df/2.0))
+        mu2 = df - mu*mu
+        g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
+        g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
+        g2 /= np.asarray(mu2**2.0)
+        return mu, mu2, g1, g2
+
+
+chi = chi_gen(a=0.0, name='chi')
+
+
+class chi2_gen(rv_continuous):
+    r"""A chi-squared continuous random variable.
+
+    For the noncentral chi-square distribution, see `ncx2`.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    ncx2
+
+    Notes
+    -----
+    The probability density function for `chi2` is:
+
+    .. math::
+
+        f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
+                   x^{k/2-1} \exp \left( -x/2 \right)
+
+    for :math:`x > 0`  and :math:`k > 0` (degrees of freedom, denoted ``df``
+    in the implementation).
+
+    `chi2` takes ``df`` as a shape parameter.
+
+    The chi-squared distribution is a special case of the gamma
+    distribution, with gamma parameters ``a = df/2``, ``loc = 0`` and
+    ``scale = 2``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("df", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, df, size=None, random_state=None):
+        return random_state.chisquare(df, size)
+
+    def _pdf(self, x, df):
+        # chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
+        return np.exp(self._logpdf(x, df))
+
+    def _logpdf(self, x, df):
+        return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
+
+    def _cdf(self, x, df):
+        return sc.chdtr(df, x)
+
+    def _sf(self, x, df):
+        return sc.chdtrc(df, x)
+
+    def _isf(self, p, df):
+        return sc.chdtri(df, p)
+
+    def _ppf(self, p, df):
+        return 2*sc.gammaincinv(df/2, p)
+
+    def _stats(self, df):
+        mu = df
+        mu2 = 2*df
+        g1 = 2*np.sqrt(2.0/df)
+        g2 = 12.0/df
+        return mu, mu2, g1, g2
+
+
+chi2 = chi2_gen(a=0.0, name='chi2')
+
+
+class cosine_gen(rv_continuous):
+    r"""A cosine continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The cosine distribution is an approximation to the normal distribution.
+    The probability density function for `cosine` is:
+
+    .. math::
+
+        f(x) = \frac{1}{2\pi} (1+\cos(x))
+
+    for :math:`-\pi \le x \le \pi`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
+        return 1.0/2/np.pi*(1+np.cos(x))
+
+    def _logpdf(self, x):
+        c = np.cos(x)
+        return _lazywhere(c != -1, (c,),
+                          lambda c: np.log1p(c) - np.log(2*np.pi),
+                          fillvalue=-np.inf)
+
+    def _cdf(self, x):
+        return scu._cosine_cdf(x)
+
+    def _sf(self, x):
+        return scu._cosine_cdf(-x)
+
+    def _ppf(self, p):
+        return scu._cosine_invcdf(p)
+
+    def _isf(self, p):
+        return -scu._cosine_invcdf(p)
+
+    def _stats(self):
+        return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
+
+    def _entropy(self):
+        return np.log(4*np.pi)-1.0
+
+
+cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
+
+
+class dgamma_gen(rv_continuous):
+    r"""A double gamma continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `dgamma` is:
+
+    .. math::
+
+        f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
+
+    for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
+    gamma function (`scipy.special.gamma`).
+
+    `dgamma` takes ``a`` as a shape parameter for :math:`a`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, a, size=None, random_state=None):
+        u = random_state.uniform(size=size)
+        gm = gamma.rvs(a, size=size, random_state=random_state)
+        return gm * np.where(u >= 0.5, 1, -1)
+
+    def _pdf(self, x, a):
+        # dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
+        ax = abs(x)
+        return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
+
+    def _logpdf(self, x, a):
+        ax = abs(x)
+        return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
+
+    def _cdf(self, x, a):
+        fac = 0.5*sc.gammainc(a, abs(x))
+        return np.where(x > 0, 0.5 + fac, 0.5 - fac)
+
+    def _sf(self, x, a):
+        fac = 0.5*sc.gammainc(a, abs(x))
+        return np.where(x > 0, 0.5-fac, 0.5+fac)
+
+    def _ppf(self, q, a):
+        fac = sc.gammainccinv(a, 1-abs(2*q-1))
+        return np.where(q > 0.5, fac, -fac)
+
+    def _stats(self, a):
+        mu2 = a*(a+1.0)
+        return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
+
+
+dgamma = dgamma_gen(name='dgamma')
+
+
+class dweibull_gen(rv_continuous):
+    r"""A double Weibull continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `dweibull` is given by
+
+    .. math::
+
+        f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
+
+    for a real number :math:`x` and :math:`c > 0`.
+
+    `dweibull` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, c, size=None, random_state=None):
+        u = random_state.uniform(size=size)
+        w = weibull_min.rvs(c, size=size, random_state=random_state)
+        return w * (np.where(u >= 0.5, 1, -1))
+
+    def _pdf(self, x, c):
+        # dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
+        ax = abs(x)
+        Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
+        return Px
+
+    def _logpdf(self, x, c):
+        ax = abs(x)
+        return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
+
+    def _cdf(self, x, c):
+        Cx1 = 0.5 * np.exp(-abs(x)**c)
+        return np.where(x > 0, 1 - Cx1, Cx1)
+
+    def _ppf(self, q, c):
+        fac = 2. * np.where(q <= 0.5, q, 1. - q)
+        fac = np.power(-np.log(fac), 1.0 / c)
+        return np.where(q > 0.5, fac, -fac)
+
+    def _munp(self, n, c):
+        return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
+
+    # since we know that all odd moments are zeros, return them at once.
+    # returning Nones from _stats makes the public stats call _munp
+    # so overall we're saving one or two gamma function evaluations here.
+    def _stats(self, c):
+        return 0, None, 0, None
+
+
+dweibull = dweibull_gen(name='dweibull')
+
+
+class expon_gen(rv_continuous):
+    r"""An exponential continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `expon` is:
+
+    .. math::
+
+        f(x) = \exp(-x)
+
+    for :math:`x \ge 0`.
+
+    %(after_notes)s
+
+    A common parameterization for `expon` is in terms of the rate parameter
+    ``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
+    parameterization corresponds to using ``scale = 1 / lambda``.
+
+    The exponential distribution is a special case of the gamma
+    distributions, with gamma shape parameter ``a = 1``.
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return random_state.standard_exponential(size)
+
+    def _pdf(self, x):
+        # expon.pdf(x) = exp(-x)
+        return np.exp(-x)
+
+    def _logpdf(self, x):
+        return -x
+
+    def _cdf(self, x):
+        return -sc.expm1(-x)
+
+    def _ppf(self, q):
+        return -sc.log1p(-q)
+
+    def _sf(self, x):
+        return np.exp(-x)
+
+    def _logsf(self, x):
+        return -x
+
+    def _isf(self, q):
+        return -np.log(q)
+
+    def _stats(self):
+        return 1.0, 1.0, 2.0, 6.0
+
+    def _entropy(self):
+        return 1.0
+
+    @_call_super_mom
+    @replace_notes_in_docstring(rv_continuous, notes="""\
+        When `method='MLE'`,
+        this function uses explicit formulas for the maximum likelihood
+        estimation of the exponential distribution parameters, so the
+        `optimizer`, `loc` and `scale` keyword arguments are
+        ignored.\n\n""")
+    def fit(self, data, *args, **kwds):
+        if len(args) > 0:
+            raise TypeError("Too many arguments.")
+
+        floc = kwds.pop('floc', None)
+        fscale = kwds.pop('fscale', None)
+
+        _remove_optimizer_parameters(kwds)
+
+        if floc is not None and fscale is not None:
+            # This check is for consistency with `rv_continuous.fit`.
+            raise ValueError("All parameters fixed. There is nothing to "
+                             "optimize.")
+
+        data = np.asarray(data)
+
+        if not np.isfinite(data).all():
+            raise ValueError("The data contains non-finite values.")
+
+        data_min = data.min()
+
+        if floc is None:
+            # ML estimate of the location is the minimum of the data.
+            loc = data_min
+        else:
+            loc = floc
+            if data_min < loc:
+                # There are values that are less than the specified loc.
+                raise FitDataError("expon", lower=floc, upper=np.inf)
+
+        if fscale is None:
+            # ML estimate of the scale is the shifted mean.
+            scale = data.mean() - loc
+        else:
+            scale = fscale
+
+        # We expect the return values to be floating point, so ensure it
+        # by explicitly converting to float.
+        return float(loc), float(scale)
+
+
+expon = expon_gen(a=0.0, name='expon')
+
+
+class exponnorm_gen(rv_continuous):
+    r"""An exponentially modified Normal continuous random variable.
+
+    Also known as the exponentially modified Gaussian distribution [1]_.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `exponnorm` is:
+
+    .. math::
+
+        f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
+                  \text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
+
+    where :math:`x` is a real number and :math:`K > 0`.
+
+    It can be thought of as the sum of a standard normal random variable
+    and an independent exponentially distributed random variable with rate
+    ``1/K``.
+
+    %(after_notes)s
+
+    An alternative parameterization of this distribution (for example, in
+    the Wikpedia article [1]_) involves three parameters, :math:`\mu`,
+    :math:`\lambda` and :math:`\sigma`.
+
+    In the present parameterization this corresponds to having ``loc`` and
+    ``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
+    shape parameter :math:`K = 1/(\sigma\lambda)`.
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] Exponentially modified Gaussian distribution, Wikipedia,
+           https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("K", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, K, size=None, random_state=None):
+        expval = random_state.standard_exponential(size) * K
+        gval = random_state.standard_normal(size)
+        return expval + gval
+
+    def _pdf(self, x, K):
+        return np.exp(self._logpdf(x, K))
+
+    def _logpdf(self, x, K):
+        invK = 1.0 / K
+        exparg = invK * (0.5 * invK - x)
+        return exparg + _norm_logcdf(x - invK) - np.log(K)
+
+    def _cdf(self, x, K):
+        invK = 1.0 / K
+        expval = invK * (0.5 * invK - x)
+        logprod = expval + _norm_logcdf(x - invK)
+        return _norm_cdf(x) - np.exp(logprod)
+
+    def _sf(self, x, K):
+        invK = 1.0 / K
+        expval = invK * (0.5 * invK - x)
+        logprod = expval + _norm_logcdf(x - invK)
+        return _norm_cdf(-x) + np.exp(logprod)
+
+    def _stats(self, K):
+        K2 = K * K
+        opK2 = 1.0 + K2
+        skw = 2 * K**3 * opK2**(-1.5)
+        krt = 6.0 * K2 * K2 * opK2**(-2)
+        return K, opK2, skw, krt
+
+
+exponnorm = exponnorm_gen(name='exponnorm')
+
+
+class exponweib_gen(rv_continuous):
+    r"""An exponentiated Weibull continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    weibull_min, numpy.random.Generator.weibull
+
+    Notes
+    -----
+    The probability density function for `exponweib` is:
+
+    .. math::
+
+        f(x, a, c) = a c [1-\exp(-x^c)]^{a-1} \exp(-x^c) x^{c-1}
+
+    and its cumulative distribution function is:
+
+    .. math::
+
+        F(x, a, c) = [1-\exp(-x^c)]^a
+
+    for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
+
+    `exponweib` takes :math:`a` and :math:`c` as shape parameters:
+
+    * :math:`a` is the exponentiation parameter,
+      with the special case :math:`a=1` corresponding to the
+      (non-exponentiated) Weibull distribution `weibull_min`.
+    * :math:`c` is the shape parameter of the non-exponentiated Weibull law.
+
+    %(after_notes)s
+
+    References
+    ----------
+    https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
+        return [ia, ic]
+
+    def _pdf(self, x, a, c):
+        # exponweib.pdf(x, a, c) =
+        #     a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
+        return np.exp(self._logpdf(x, a, c))
+
+    def _logpdf(self, x, a, c):
+        negxc = -x**c
+        exm1c = -sc.expm1(negxc)
+        logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
+                negxc + sc.xlogy(c - 1.0, x))
+        return logp
+
+    def _cdf(self, x, a, c):
+        exm1c = -sc.expm1(-x**c)
+        return exm1c**a
+
+    def _ppf(self, q, a, c):
+        return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
+
+
+exponweib = exponweib_gen(a=0.0, name='exponweib')
+
+
+class exponpow_gen(rv_continuous):
+    r"""An exponential power continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `exponpow` is:
+
+    .. math::
+
+        f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
+
+    for :math:`x \ge 0`, :math:`b > 0`.  Note that this is a different
+    distribution from the exponential power distribution that is also known
+    under the names "generalized normal" or "generalized Gaussian".
+
+    `exponpow` takes ``b`` as a shape parameter for :math:`b`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("b", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, b):
+        # exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
+        return np.exp(self._logpdf(x, b))
+
+    def _logpdf(self, x, b):
+        xb = x**b
+        f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
+        return f
+
+    def _cdf(self, x, b):
+        return -sc.expm1(-sc.expm1(x**b))
+
+    def _sf(self, x, b):
+        return np.exp(-sc.expm1(x**b))
+
+    def _isf(self, x, b):
+        return (sc.log1p(-np.log(x)))**(1./b)
+
+    def _ppf(self, q, b):
+        return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
+
+
+exponpow = exponpow_gen(a=0.0, name='exponpow')
+
+
+class fatiguelife_gen(rv_continuous):
+    r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `fatiguelife` is:
+
+    .. math::
+
+        f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
+
+    for :math:`x >= 0` and :math:`c > 0`.
+
+    `fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "Birnbaum-Saunders distribution",
+           https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, c, size=None, random_state=None):
+        z = random_state.standard_normal(size)
+        x = 0.5*c*z
+        x2 = x*x
+        t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
+        return t
+
+    def _pdf(self, x, c):
+        # fatiguelife.pdf(x, c) =
+        #     (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
+        return np.exp(self._logpdf(x, c))
+
+    def _logpdf(self, x, c):
+        return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
+                0.5*(np.log(2*np.pi) + 3*np.log(x)))
+
+    def _cdf(self, x, c):
+        return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
+
+    def _ppf(self, q, c):
+        tmp = c*sc.ndtri(q)
+        return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
+
+    def _sf(self, x, c):
+        return _norm_sf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
+
+    def _isf(self, q, c):
+        tmp = -c*sc.ndtri(q)
+        return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
+
+    def _stats(self, c):
+        # NB: the formula for kurtosis in wikipedia seems to have an error:
+        # it's 40, not 41. At least it disagrees with the one from Wolfram
+        # Alpha.  And the latter one, below, passes the tests, while the wiki
+        # one doesn't So far I didn't have the guts to actually check the
+        # coefficients from the expressions for the raw moments.
+        c2 = c*c
+        mu = c2 / 2.0 + 1.0
+        den = 5.0 * c2 + 4.0
+        mu2 = c2*den / 4.0
+        g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
+        g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
+        return mu, mu2, g1, g2
+
+
+fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
+
+
+class foldcauchy_gen(rv_continuous):
+    r"""A folded Cauchy continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `foldcauchy` is:
+
+    .. math::
+
+        f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
+
+    for :math:`x \ge 0` and :math:`c \ge 0`.
+
+    `foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(example)s
+
+    """
+    def _argcheck(self, c):
+        return c >= 0
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (True, False))]
+
+    def _rvs(self, c, size=None, random_state=None):
+        return abs(cauchy.rvs(loc=c, size=size,
+                              random_state=random_state))
+
+    def _pdf(self, x, c):
+        # foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
+        return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
+
+    def _cdf(self, x, c):
+        return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
+
+    def _stats(self, c):
+        return np.inf, np.inf, np.nan, np.nan
+
+
+foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
+
+
+class f_gen(rv_continuous):
+    r"""An F continuous random variable.
+
+    For the noncentral F distribution, see `ncf`.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    ncf
+
+    Notes
+    -----
+    The probability density function for `f` is:
+
+    .. math::
+
+        f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
+                                {(df_2+df_1 x)^{(df_1+df_2)/2}
+                                 B(df_1/2, df_2/2)}
+
+    for :math:`x > 0` and parameters :math:`df_1, df_2 > 0` .
+
+    `f` takes ``dfn`` and ``dfd`` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        idfn = _ShapeInfo("dfn", False, (0, np.inf), (False, False))
+        idfd = _ShapeInfo("dfd", False, (0, np.inf), (False, False))
+        return [idfn, idfd]
+
+    def _rvs(self, dfn, dfd, size=None, random_state=None):
+        return random_state.f(dfn, dfd, size)
+
+    def _pdf(self, x, dfn, dfd):
+        #                      df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
+        # F.pdf(x, df1, df2) = --------------------------------------------
+        #                      (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
+        return np.exp(self._logpdf(x, dfn, dfd))
+
+    def _logpdf(self, x, dfn, dfd):
+        n = 1.0 * dfn
+        m = 1.0 * dfd
+        lPx = (m/2 * np.log(m) + n/2 * np.log(n) + sc.xlogy(n/2 - 1, x)
+               - (((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)))
+        return lPx
+
+    def _cdf(self, x, dfn, dfd):
+        return sc.fdtr(dfn, dfd, x)
+
+    def _sf(self, x, dfn, dfd):
+        return sc.fdtrc(dfn, dfd, x)
+
+    def _ppf(self, q, dfn, dfd):
+        return sc.fdtri(dfn, dfd, q)
+
+    def _stats(self, dfn, dfd):
+        v1, v2 = 1. * dfn, 1. * dfd
+        v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
+
+        mu = _lazywhere(
+            v2 > 2, (v2, v2_2),
+            lambda v2, v2_2: v2 / v2_2,
+            np.inf)
+
+        mu2 = _lazywhere(
+            v2 > 4, (v1, v2, v2_2, v2_4),
+            lambda v1, v2, v2_2, v2_4:
+            2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
+            np.inf)
+
+        g1 = _lazywhere(
+            v2 > 6, (v1, v2_2, v2_4, v2_6),
+            lambda v1, v2_2, v2_4, v2_6:
+            (2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
+            np.nan)
+        g1 *= np.sqrt(8.)
+
+        g2 = _lazywhere(
+            v2 > 8, (g1, v2_6, v2_8),
+            lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
+            np.nan)
+        g2 *= 3. / 2.
+
+        return mu, mu2, g1, g2
+
+
+f = f_gen(a=0.0, name='f')
+
+
+## Folded Normal
+##   abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
+##
+##  note: regress docs have scale parameter correct, but first parameter
+##    he gives is a shape parameter A = c * scale
+
+##  Half-normal is folded normal with shape-parameter c=0.
+
+class foldnorm_gen(rv_continuous):
+    r"""A folded normal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `foldnorm` is:
+
+    .. math::
+
+        f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
+
+    for :math:`x \ge 0` and :math:`c \ge 0`.
+
+    `foldnorm` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, c):
+        return c >= 0
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (True, False))]
+
+    def _rvs(self, c, size=None, random_state=None):
+        return abs(random_state.standard_normal(size) + c)
+
+    def _pdf(self, x, c):
+        # foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
+        return _norm_pdf(x + c) + _norm_pdf(x-c)
+
+    def _cdf(self, x, c):
+        return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
+
+    def _stats(self, c):
+        # Regina C. Elandt, Technometrics 3, 551 (1961)
+        # https://www.jstor.org/stable/1266561
+        #
+        c2 = c*c
+        expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
+
+        mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
+        mu2 = c2 + 1 - mu*mu
+
+        g1 = 2. * (mu*mu*mu - c2*mu - expfac)
+        g1 /= np.power(mu2, 1.5)
+
+        g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
+        g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
+        g2 = g2 / mu2**2.0 - 3.
+
+        return mu, mu2, g1, g2
+
+
+foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
+
+
+class weibull_min_gen(rv_continuous):
+    r"""Weibull minimum continuous random variable.
+
+    The Weibull Minimum Extreme Value distribution, from extreme value theory
+    (Fisher-Gnedenko theorem), is also often simply called the Weibull
+    distribution. It arises as the limiting distribution of the rescaled
+    minimum of iid random variables.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    weibull_max, numpy.random.Generator.weibull, exponweib
+
+    Notes
+    -----
+    The probability density function for `weibull_min` is:
+
+    .. math::
+
+        f(x, c) = c x^{c-1} \exp(-x^c)
+
+    for :math:`x > 0`, :math:`c > 0`.
+
+    `weibull_min` takes ``c`` as a shape parameter for :math:`c`.
+    (named :math:`k` in Wikipedia article and :math:`a` in
+    ``numpy.random.weibull``).  Special shape values are :math:`c=1` and
+    :math:`c=2` where Weibull distribution reduces to the `expon` and
+    `rayleigh` distributions respectively.
+
+    %(after_notes)s
+
+    References
+    ----------
+    https://en.wikipedia.org/wiki/Weibull_distribution
+
+    https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # weibull_min.pdf(x, c) = c * x**(c-1) * exp(-x**c)
+        return c*pow(x, c-1)*np.exp(-pow(x, c))
+
+    def _logpdf(self, x, c):
+        return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
+
+    def _cdf(self, x, c):
+        return -sc.expm1(-pow(x, c))
+
+    def _sf(self, x, c):
+        return np.exp(-pow(x, c))
+
+    def _logsf(self, x, c):
+        return -pow(x, c)
+
+    def _ppf(self, q, c):
+        return pow(-sc.log1p(-q), 1.0/c)
+
+    def _munp(self, n, c):
+        return sc.gamma(1.0+n*1.0/c)
+
+    def _entropy(self, c):
+        return -_EULER / c - np.log(c) + _EULER + 1
+
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        If ``method='mm'``, parameters fixed by the user are respected, and the
+        remaining parameters are used to match distribution and sample moments
+        where possible. For example, if the user fixes the location with
+        ``floc``, the parameters will only match the distribution skewness and
+        variance to the sample skewness and variance; no attempt will be made
+        to match the means or minimize a norm of the errors.
+        \n\n""")
+    def fit(self, data, *args, **kwds):
+        if kwds.pop('superfit', False):
+            return super().fit(data, *args, **kwds)
+
+        # this extracts fixed shape, location, and scale however they
+        # are specified, and also leaves them in `kwds`
+        data, fc, floc, fscale = _check_fit_input_parameters(self, data,
+                                                             args, kwds)
+        method = kwds.get("method", "mle").lower()
+
+        # See https://en.wikipedia.org/wiki/Weibull_distribution#Moments for
+        # moment formulas.
+        def skew(c):
+            gamma1 = sc.gamma(1+1/c)
+            gamma2 = sc.gamma(1+2/c)
+            gamma3 = sc.gamma(1+3/c)
+            num = 2 * gamma1**3 - 3*gamma1*gamma2 + gamma3
+            den = (gamma2 - gamma1**2)**(3/2)
+            return num/den
+
+        # For c in [1e2, 3e4], population skewness appears to approach
+        # asymptote near -1.139, but past c > 3e4, skewness begins to vary
+        # wildly, and MoM won't provide a good guess. Get out early.
+        s = stats.skew(data)
+        max_c = 1e4
+        s_min = skew(max_c)
+        if s < s_min and method != "mm" and fc is None and not args:
+            return super().fit(data, *args, **kwds)
+
+        # If method is method of moments, we don't need the user's guesses.
+        # Otherwise, extract the guesses from args and kwds.
+        if method == "mm":
+            c, loc, scale = None, None, None
+        else:
+            c = args[0] if len(args) else None
+            loc = kwds.pop('loc', None)
+            scale = kwds.pop('scale', None)
+
+        if fc is None and c is None:  # not fixed and no guess: use MoM
+            # Solve for c that matches sample distribution skewness to sample
+            # skewness.
+            # we start having numerical issues with `weibull_min` with
+            # parameters outside this range - and not just in this method.
+            # We could probably improve the situation by doing everything
+            # in the log space, but that is for another time.
+            c = root_scalar(lambda c: skew(c) - s, bracket=[0.02, max_c],
+                            method='bisect').root
+        elif fc is not None:  # fixed: use it
+            c = fc
+
+        if fscale is None and scale is None:
+            v = np.var(data)
+            scale = np.sqrt(v / (sc.gamma(1+2/c) - sc.gamma(1+1/c)**2))
+        elif fscale is not None:
+            scale = fscale
+
+        if floc is None and loc is None:
+            m = np.mean(data)
+            loc = m - scale*sc.gamma(1 + 1/c)
+        elif floc is not None:
+            loc = floc
+
+        if method == 'mm':
+            return c, loc, scale
+        else:
+            # At this point, parameter "guesses" may equal the fixed parameters
+            # in kwds. No harm in passing them as guesses, too.
+            return super().fit(data, c, loc=loc, scale=scale, **kwds)
+
+
+weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
+
+
+class truncweibull_min_gen(rv_continuous):
+    r"""A doubly truncated Weibull minimum continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    weibull_min, truncexpon
+
+    Notes
+    -----
+    The probability density function for `truncweibull_min` is:
+
+    .. math::
+
+        f(x, a, b, c) = \frac{c x^{c-1} \exp(-x^c)}{\exp(-a^c) - \exp(-b^c)}
+
+    for :math:`a < x <= b`, :math:`0 \le a < b` and :math:`c > 0`.
+
+    `truncweibull_min` takes :math:`a`, :math:`b`, and :math:`c` as shape
+    parameters.
+
+    Notice that the truncation values, :math:`a` and :math:`b`, are defined in
+    standardized form:
+
+    .. math::
+
+        a = (u_l - loc)/scale
+        b = (u_r - loc)/scale
+
+    where :math:`u_l` and :math:`u_r` are the specific left and right
+    truncation values, respectively. In other words, the support of the
+    distribution becomes :math:`(a*scale + loc) < x <= (b*scale + loc)` when
+    :math:`loc` and/or :math:`scale` are provided.
+
+    %(after_notes)s
+
+    References
+    ----------
+
+    .. [1] Rinne, H. "The Weibull Distribution: A Handbook". CRC Press (2009).
+
+    %(example)s
+
+    """
+    def _argcheck(self, c, a, b):
+        return (a >= 0.) & (b > a) & (c > 0.)
+
+    def _shape_info(self):
+        ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
+        ia = _ShapeInfo("a", False, (0, np.inf), (True, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        return [ic, ia, ib]
+
+    def _fitstart(self, data):
+        # Arbitrary, but default a=b=c=1 is not valid
+        return super()._fitstart(data, args=(1, 0, 1))
+
+    def _get_support(self, c, a, b):
+        return a, b
+
+    def _pdf(self, x, c, a, b):
+        denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
+        return (c * pow(x, c-1) * np.exp(-pow(x, c))) / denum
+
+    def _logpdf(self, x, c, a, b):
+        logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
+        return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c) - logdenum
+
+    def _cdf(self, x, c, a, b):
+        num = (np.exp(-pow(a, c)) - np.exp(-pow(x, c)))
+        denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
+        return num / denum
+
+    def _logcdf(self, x, c, a, b):
+        lognum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(x, c)))
+        logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
+        return lognum - logdenum
+
+    def _sf(self, x, c, a, b):
+        num = (np.exp(-pow(x, c)) - np.exp(-pow(b, c)))
+        denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
+        return num / denum
+
+    def _logsf(self, x, c, a, b):
+        lognum = np.log(np.exp(-pow(x, c)) - np.exp(-pow(b, c)))
+        logdenum = np.log(np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
+        return lognum - logdenum
+
+    def _isf(self, q, c, a, b):
+        return pow(
+            -np.log((1 - q) * np.exp(-pow(b, c)) + q * np.exp(-pow(a, c))), 1/c
+            )
+
+    def _ppf(self, q, c, a, b):
+        return pow(
+            -np.log((1 - q) * np.exp(-pow(a, c)) + q * np.exp(-pow(b, c))), 1/c
+            )
+
+    def _munp(self, n, c, a, b):
+        gamma_fun = sc.gamma(n/c + 1.) * (
+            sc.gammainc(n/c + 1., pow(b, c)) - sc.gammainc(n/c + 1., pow(a, c))
+            )
+        denum = (np.exp(-pow(a, c)) - np.exp(-pow(b, c)))
+        return gamma_fun / denum
+
+
+truncweibull_min = truncweibull_min_gen(name='truncweibull_min')
+
+
+class weibull_max_gen(rv_continuous):
+    r"""Weibull maximum continuous random variable.
+
+    The Weibull Maximum Extreme Value distribution, from extreme value theory
+    (Fisher-Gnedenko theorem), is the limiting distribution of rescaled
+    maximum of iid random variables. This is the distribution of -X
+    if X is from the `weibull_min` function.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    weibull_min
+
+    Notes
+    -----
+    The probability density function for `weibull_max` is:
+
+    .. math::
+
+        f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
+
+    for :math:`x < 0`, :math:`c > 0`.
+
+    `weibull_max` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    https://en.wikipedia.org/wiki/Weibull_distribution
+
+    https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # weibull_max.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
+        return c*pow(-x, c-1)*np.exp(-pow(-x, c))
+
+    def _logpdf(self, x, c):
+        return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
+
+    def _cdf(self, x, c):
+        return np.exp(-pow(-x, c))
+
+    def _logcdf(self, x, c):
+        return -pow(-x, c)
+
+    def _sf(self, x, c):
+        return -sc.expm1(-pow(-x, c))
+
+    def _ppf(self, q, c):
+        return -pow(-np.log(q), 1.0/c)
+
+    def _munp(self, n, c):
+        val = sc.gamma(1.0+n*1.0/c)
+        if int(n) % 2:
+            sgn = -1
+        else:
+            sgn = 1
+        return sgn * val
+
+    def _entropy(self, c):
+        return -_EULER / c - np.log(c) + _EULER + 1
+
+
+weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
+
+
+class genlogistic_gen(rv_continuous):
+    r"""A generalized logistic continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `genlogistic` is:
+
+    .. math::
+
+        f(x, c) = c \frac{\exp(-x)}
+                         {(1 + \exp(-x))^{c+1}}
+
+    for :math:`x >= 0`, :math:`c > 0`.
+
+    `genlogistic` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
+        return np.exp(self._logpdf(x, c))
+
+    def _logpdf(self, x, c):
+        # Two mathematically equivalent expressions for log(pdf(x, c)):
+        #     log(pdf(x, c)) = log(c) - x - (c + 1)*log(1 + exp(-x))
+        #                    = log(c) + c*x - (c + 1)*log(1 + exp(x))
+        mult = -(c - 1) * (x < 0) - 1
+        absx = np.abs(x)
+        return np.log(c) + mult*absx - (c+1) * sc.log1p(np.exp(-absx))
+
+    def _cdf(self, x, c):
+        Cx = (1+np.exp(-x))**(-c)
+        return Cx
+
+    def _ppf(self, q, c):
+        vals = -np.log(pow(q, -1.0/c)-1)
+        return vals
+
+    def _stats(self, c):
+        mu = _EULER + sc.psi(c)
+        mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
+        g1 = -2*sc.zeta(3, c) + 2*_ZETA3
+        g1 /= np.power(mu2, 1.5)
+        g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
+        g2 /= mu2**2.0
+        return mu, mu2, g1, g2
+
+
+genlogistic = genlogistic_gen(name='genlogistic')
+
+
+class genpareto_gen(rv_continuous):
+    r"""A generalized Pareto continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `genpareto` is:
+
+    .. math::
+
+        f(x, c) = (1 + c x)^{-1 - 1/c}
+
+    defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
+    :math:`0 \le x \le -1/c` if :math:`c < 0`.
+
+    `genpareto` takes ``c`` as a shape parameter for :math:`c`.
+
+    For :math:`c=0`, `genpareto` reduces to the exponential
+    distribution, `expon`:
+
+    .. math::
+
+        f(x, 0) = \exp(-x)
+
+    For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
+
+    .. math::
+
+        f(x, -1) = 1
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, c):
+        return np.isfinite(c)
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (-np.inf, np.inf), (False, False))]
+
+    def _get_support(self, c):
+        c = np.asarray(c)
+        b = _lazywhere(c < 0, (c,),
+                       lambda c: -1. / c,
+                       np.inf)
+        a = np.where(c >= 0, self.a, self.a)
+        return a, b
+
+    def _pdf(self, x, c):
+        # genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
+        return np.exp(self._logpdf(x, c))
+
+    def _logpdf(self, x, c):
+        return _lazywhere((x == x) & (c != 0), (x, c),
+                          lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
+                          -x)
+
+    def _cdf(self, x, c):
+        return -sc.inv_boxcox1p(-x, -c)
+
+    def _sf(self, x, c):
+        return sc.inv_boxcox(-x, -c)
+
+    def _logsf(self, x, c):
+        return _lazywhere((x == x) & (c != 0), (x, c),
+                          lambda x, c: -sc.log1p(c*x) / c,
+                          -x)
+
+    def _ppf(self, q, c):
+        return -sc.boxcox1p(-q, -c)
+
+    def _isf(self, q, c):
+        return -sc.boxcox(q, -c)
+
+    def _stats(self, c, moments='mv'):
+        if 'm' not in moments:
+            m = None
+        else:
+            m = _lazywhere(c < 1, (c,),
+                           lambda xi: 1/(1 - xi),
+                           np.inf)
+        if 'v' not in moments:
+            v = None
+        else:
+            v = _lazywhere(c < 1/2, (c,),
+                           lambda xi: 1 / (1 - xi)**2 / (1 - 2*xi),
+                           np.nan)
+        if 's' not in moments:
+            s = None
+        else:
+            s = _lazywhere(c < 1/3, (c,),
+                           lambda xi: 2 * (1 + xi) * np.sqrt(1 - 2*xi) /
+                                      (1 - 3*xi),
+                           np.nan)
+        if 'k' not in moments:
+            k = None
+        else:
+            k = _lazywhere(c < 1/4, (c,),
+                           lambda xi: 3 * (1 - 2*xi) * (2*xi**2 + xi + 3) /
+                                      (1 - 3*xi) / (1 - 4*xi) - 3,
+                           np.nan)
+        return m, v, s, k
+
+    def _munp(self, n, c):
+        def __munp(n, c):
+            val = 0.0
+            k = np.arange(0, n + 1)
+            for ki, cnk in zip(k, sc.comb(n, k)):
+                val = val + cnk * (-1) ** ki / (1.0 - c * ki)
+            return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
+        return _lazywhere(c != 0, (c,),
+                          lambda c: __munp(n, c),
+                          sc.gamma(n + 1))
+
+    def _entropy(self, c):
+        return 1. + c
+
+
+genpareto = genpareto_gen(a=0.0, name='genpareto')
+
+
+class genexpon_gen(rv_continuous):
+    r"""A generalized exponential continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `genexpon` is:
+
+    .. math::
+
+        f(x, a, b, c) = (a + b (1 - \exp(-c x)))
+                        \exp(-a x - b x + \frac{b}{c}  (1-\exp(-c x)))
+
+    for :math:`x \ge 0`, :math:`a, b, c > 0`.
+
+    `genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
+
+    %(after_notes)s
+
+    References
+    ----------
+    H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
+    Distribution", Journal of the American Statistical Association, 1993.
+
+    N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
+    Applications", Asit P. Basu.
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
+        return [ia, ib, ic]
+
+    def _pdf(self, x, a, b, c):
+        # genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
+        #                            exp(-a*x - b*x + b/c * (1-exp(-c*x)))
+        return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
+                                                b*(-sc.expm1(-c*x))/c)
+
+    def _logpdf(self, x, a, b, c):
+        return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
+
+    def _cdf(self, x, a, b, c):
+        return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
+
+    def _sf(self, x, a, b, c):
+        return np.exp((-a-b)*x + b*(-sc.expm1(-c*x))/c)
+
+
+genexpon = genexpon_gen(a=0.0, name='genexpon')
+
+
+class genextreme_gen(rv_continuous):
+    r"""A generalized extreme value continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    gumbel_r
+
+    Notes
+    -----
+    For :math:`c=0`, `genextreme` is equal to `gumbel_r` with
+    probability density function
+
+    .. math::
+
+        f(x) = \exp(-\exp(-x)) \exp(-x),
+
+    where :math:`-\infty < x < \infty`.
+
+    For :math:`c \ne 0`, the probability density function for `genextreme` is:
+
+    .. math::
+
+        f(x, c) = \exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1},
+
+    where :math:`-\infty < x \le 1/c` if :math:`c > 0` and
+    :math:`1/c \le x < \infty` if :math:`c < 0`.
+
+    Note that several sources and software packages use the opposite
+    convention for the sign of the shape parameter :math:`c`.
+
+    `genextreme` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, c):
+        return np.isfinite(c)
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (-np.inf, np.inf), (False, False))]
+
+    def _get_support(self, c):
+        _b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
+        _a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
+        return _a, _b
+
+    def _loglogcdf(self, x, c):
+        # Returns log(-log(cdf(x, c)))
+        return _lazywhere((x == x) & (c != 0), (x, c),
+                          lambda x, c: sc.log1p(-c*x)/c, -x)
+
+    def _pdf(self, x, c):
+        # genextreme.pdf(x, c) =
+        #     exp(-exp(-x))*exp(-x),                    for c==0
+        #     exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1),    for x \le 1/c, c > 0
+        return np.exp(self._logpdf(x, c))
+
+    def _logpdf(self, x, c):
+        cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
+        logex2 = sc.log1p(-cx)
+        logpex2 = self._loglogcdf(x, c)
+        pex2 = np.exp(logpex2)
+        # Handle special cases
+        np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
+        logpdf = _lazywhere(~((cx == 1) | (cx == -np.inf)),
+                            (pex2, logpex2, logex2),
+                            lambda pex2, lpex2, lex2: -pex2 + lpex2 - lex2,
+                            fillvalue=-np.inf)
+        np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
+        return logpdf
+
+    def _logcdf(self, x, c):
+        return -np.exp(self._loglogcdf(x, c))
+
+    def _cdf(self, x, c):
+        return np.exp(self._logcdf(x, c))
+
+    def _sf(self, x, c):
+        return -sc.expm1(self._logcdf(x, c))
+
+    def _ppf(self, q, c):
+        x = -np.log(-np.log(q))
+        return _lazywhere((x == x) & (c != 0), (x, c),
+                          lambda x, c: -sc.expm1(-c * x) / c, x)
+
+    def _isf(self, q, c):
+        x = -np.log(-sc.log1p(-q))
+        return _lazywhere((x == x) & (c != 0), (x, c),
+                          lambda x, c: -sc.expm1(-c * x) / c, x)
+
+    def _stats(self, c):
+        g = lambda n: sc.gamma(n*c + 1)
+        g1 = g(1)
+        g2 = g(2)
+        g3 = g(3)
+        g4 = g(4)
+        g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
+        gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
+                         sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
+        eps = 1e-14
+        gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
+
+        m = np.where(c < -1.0, np.nan, -gamk)
+        v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
+
+        # skewness
+        sk1 = _lazywhere(c >= -1./3,
+                         (c, g1, g2, g3, g2mg12),
+                         lambda c, g1, g2, g3, g2gm12:
+                             np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
+                         fillvalue=np.nan)
+        sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
+
+        # kurtosis
+        ku1 = _lazywhere(c >= -1./4,
+                         (g1, g2, g3, g4, g2mg12),
+                         lambda g1, g2, g3, g4, g2mg12:
+                             (g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
+                         fillvalue=np.nan)
+        ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
+        return m, v, sk, ku
+
+    def _fitstart(self, data):
+        # This is better than the default shape of (1,).
+        g = _skew(data)
+        if g < 0:
+            a = 0.5
+        else:
+            a = -0.5
+        return super()._fitstart(data, args=(a,))
+
+    def _munp(self, n, c):
+        k = np.arange(0, n+1)
+        vals = 1.0/c**n * np.sum(
+            sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
+            axis=0)
+        return np.where(c*n > -1, vals, np.inf)
+
+    def _entropy(self, c):
+        return _EULER*(1 - c) + 1
+
+
+genextreme = genextreme_gen(name='genextreme')
+
+
+def _digammainv(y):
+    """Inverse of the digamma function (real positive arguments only).
+
+    This function is used in the `fit` method of `gamma_gen`.
+    The function uses either optimize.fsolve or optimize.newton
+    to solve `sc.digamma(x) - y = 0`.  There is probably room for
+    improvement, but currently it works over a wide range of y:
+
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> y = 64*rng.standard_normal(1000000)
+    >>> y.min(), y.max()
+    (-311.43592651416662, 351.77388222276869)
+    >>> x = [_digammainv(t) for t in y]
+    >>> np.abs(sc.digamma(x) - y).max()
+    1.1368683772161603e-13
+
+    """
+    _em = 0.5772156649015328606065120
+    func = lambda x: sc.digamma(x) - y
+    if y > -0.125:
+        x0 = np.exp(y) + 0.5
+        if y < 10:
+            # Some experimentation shows that newton reliably converges
+            # must faster than fsolve in this y range.  For larger y,
+            # newton sometimes fails to converge.
+            value = optimize.newton(func, x0, tol=1e-10)
+            return value
+    elif y > -3:
+        x0 = np.exp(y/2.332) + 0.08661
+    else:
+        x0 = 1.0 / (-y - _em)
+
+    value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
+                                             full_output=True)
+    if ier != 1:
+        raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
+
+    return value[0]
+
+
+## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
+
+## gamma(a, loc, scale)  with a an integer is the Erlang distribution
+## gamma(1, loc, scale)  is the Exponential distribution
+## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
+
+class gamma_gen(rv_continuous):
+    r"""A gamma continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    erlang, expon
+
+    Notes
+    -----
+    The probability density function for `gamma` is:
+
+    .. math::
+
+        f(x, a) = \frac{x^{a-1} e^{-x}}{\Gamma(a)}
+
+    for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
+    gamma function.
+
+    `gamma` takes ``a`` as a shape parameter for :math:`a`.
+
+    When :math:`a` is an integer, `gamma` reduces to the Erlang
+    distribution, and when :math:`a=1` to the exponential distribution.
+
+    Gamma distributions are sometimes parameterized with two variables,
+    with a probability density function of:
+
+    .. math::
+
+        f(x, \alpha, \beta) = \frac{\beta^\alpha x^{\alpha - 1} e^{-\beta x }}{\Gamma(\alpha)}
+
+    Note that this parameterization is equivalent to the above, with
+    ``scale = 1 / beta``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, a, size=None, random_state=None):
+        return random_state.standard_gamma(a, size)
+
+    def _pdf(self, x, a):
+        # gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
+        return np.exp(self._logpdf(x, a))
+
+    def _logpdf(self, x, a):
+        return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
+
+    def _cdf(self, x, a):
+        return sc.gammainc(a, x)
+
+    def _sf(self, x, a):
+        return sc.gammaincc(a, x)
+
+    def _ppf(self, q, a):
+        return sc.gammaincinv(a, q)
+
+    def _isf(self, q, a):
+        return sc.gammainccinv(a, q)
+
+    def _stats(self, a):
+        return a, a, 2.0/np.sqrt(a), 6.0/a
+
+    def _entropy(self, a):
+        return sc.psi(a)*(1-a) + a + sc.gammaln(a)
+
+    def _fitstart(self, data):
+        # The skewness of the gamma distribution is `2 / np.sqrt(a)`.
+        # We invert that to estimate the shape `a` using the skewness
+        # of the data.  The formula is regularized with 1e-8 in the
+        # denominator to allow for degenerate data where the skewness
+        # is close to 0.
+        a = 4 / (1e-8 + _skew(data)**2)
+        return super()._fitstart(data, args=(a,))
+
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        When the location is fixed by using the argument `floc`
+        and `method='MLE'`, this
+        function uses explicit formulas or solves a simpler numerical
+        problem than the full ML optimization problem.  So in that case,
+        the `optimizer`, `loc` and `scale` arguments are ignored.
+        \n\n""")
+    def fit(self, data, *args, **kwds):
+        floc = kwds.get('floc', None)
+        method = kwds.get('method', 'mle')
+
+        if floc is None or method.lower() == 'mm':
+            # loc is not fixed.  Use the default fit method.
+            return super().fit(data, *args, **kwds)
+
+        # We already have this value, so just pop it from kwds.
+        kwds.pop('floc', None)
+
+        f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
+        fscale = kwds.pop('fscale', None)
+
+        _remove_optimizer_parameters(kwds)
+
+        # Special case: loc is fixed.
+
+        if f0 is not None and fscale is not None:
+            # This check is for consistency with `rv_continuous.fit`.
+            # Without this check, this function would just return the
+            # parameters that were given.
+            raise ValueError("All parameters fixed. There is nothing to "
+                             "optimize.")
+
+        # Fixed location is handled by shifting the data.
+        data = np.asarray(data)
+
+        if not np.isfinite(data).all():
+            raise ValueError("The data contains non-finite values.")
+
+        if np.any(data <= floc):
+            raise FitDataError("gamma", lower=floc, upper=np.inf)
+
+        if floc != 0:
+            # Don't do the subtraction in-place, because `data` might be a
+            # view of the input array.
+            data = data - floc
+        xbar = data.mean()
+
+        # Three cases to handle:
+        # * shape and scale both free
+        # * shape fixed, scale free
+        # * shape free, scale fixed
+
+        if fscale is None:
+            # scale is free
+            if f0 is not None:
+                # shape is fixed
+                a = f0
+            else:
+                # shape and scale are both free.
+                # The MLE for the shape parameter `a` is the solution to:
+                # np.log(a) - sc.digamma(a) - np.log(xbar) +
+                #                             np.log(data).mean() = 0
+                s = np.log(xbar) - np.log(data).mean()
+                func = lambda a: np.log(a) - sc.digamma(a) - s
+                aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
+                xa = aest*(1-0.4)
+                xb = aest*(1+0.4)
+                a = optimize.brentq(func, xa, xb, disp=0)
+
+            # The MLE for the scale parameter is just the data mean
+            # divided by the shape parameter.
+            scale = xbar / a
+        else:
+            # scale is fixed, shape is free
+            # The MLE for the shape parameter `a` is the solution to:
+            # sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
+            c = np.log(data).mean() - np.log(fscale)
+            a = _digammainv(c)
+            scale = fscale
+
+        return a, floc, scale
+
+
+gamma = gamma_gen(a=0.0, name='gamma')
+
+
+class erlang_gen(gamma_gen):
+    """An Erlang continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    gamma
+
+    Notes
+    -----
+    The Erlang distribution is a special case of the Gamma distribution, with
+    the shape parameter `a` an integer.  Note that this restriction is not
+    enforced by `erlang`. It will, however, generate a warning the first time
+    a non-integer value is used for the shape parameter.
+
+    Refer to `gamma` for examples.
+
+    """
+
+    def _argcheck(self, a):
+        allint = np.all(np.floor(a) == a)
+        if not allint:
+            # An Erlang distribution shouldn't really have a non-integer
+            # shape parameter, so warn the user.
+            warnings.warn(
+                'The shape parameter of the erlang distribution '
+                'has been given a non-integer value %r.' % (a,),
+                RuntimeWarning)
+        return a > 0
+
+    def _shape_info(self):
+        return [_ShapeInfo("a", True, (1, np.inf), (True, False))]
+
+    def _fitstart(self, data):
+        # Override gamma_gen_fitstart so that an integer initial value is
+        # used.  (Also regularize the division, to avoid issues when
+        # _skew(data) is 0 or close to 0.)
+        a = int(4.0 / (1e-8 + _skew(data)**2))
+        return super(gamma_gen, self)._fitstart(data, args=(a,))
+
+    # Trivial override of the fit method, so we can monkey-patch its
+    # docstring.
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        The Erlang distribution is generally defined to have integer values
+        for the shape parameter.  This is not enforced by the `erlang` class.
+        When fitting the distribution, it will generally return a non-integer
+        value for the shape parameter.  By using the keyword argument
+        `f0=`, the fit method can be constrained to fit the data to
+        a specific integer shape parameter.""")
+    def fit(self, data, *args, **kwds):
+        return super().fit(data, *args, **kwds)
+
+
+erlang = erlang_gen(a=0.0, name='erlang')
+
+
+class gengamma_gen(rv_continuous):
+    r"""A generalized gamma continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    gamma, invgamma, weibull_min
+
+    Notes
+    -----
+    The probability density function for `gengamma` is ([1]_):
+
+    .. math::
+
+        f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
+
+    for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
+    :math:`\Gamma` is the gamma function (`scipy.special.gamma`).
+
+    `gengamma` takes :math:`a` and :math:`c` as shape parameters.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] E.W. Stacy, "A Generalization of the Gamma Distribution",
+       Annals of Mathematical Statistics, Vol 33(3), pp. 1187--1192.
+
+    %(example)s
+
+    """
+    def _argcheck(self, a, c):
+        return (a > 0) & (c != 0)
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ic = _ShapeInfo("c", False, (-np.inf, np.inf), (False, False))
+        return [ia, ic]
+
+    def _pdf(self, x, a, c):
+        return np.exp(self._logpdf(x, a, c))
+
+    def _logpdf(self, x, a, c):
+        return _lazywhere((x != 0) | (c > 0), (x, c),
+                          lambda x, c: (np.log(abs(c)) + sc.xlogy(c*a - 1, x)
+                                        - x**c - sc.gammaln(a)),
+                          fillvalue=-np.inf)
+
+    def _cdf(self, x, a, c):
+        xc = x**c
+        val1 = sc.gammainc(a, xc)
+        val2 = sc.gammaincc(a, xc)
+        return np.where(c > 0, val1, val2)
+
+    def _rvs(self, a, c, size=None, random_state=None):
+        r = random_state.standard_gamma(a, size=size)
+        return r**(1./c)
+
+    def _sf(self, x, a, c):
+        xc = x**c
+        val1 = sc.gammainc(a, xc)
+        val2 = sc.gammaincc(a, xc)
+        return np.where(c > 0, val2, val1)
+
+    def _ppf(self, q, a, c):
+        val1 = sc.gammaincinv(a, q)
+        val2 = sc.gammainccinv(a, q)
+        return np.where(c > 0, val1, val2)**(1.0/c)
+
+    def _isf(self, q, a, c):
+        val1 = sc.gammaincinv(a, q)
+        val2 = sc.gammainccinv(a, q)
+        return np.where(c > 0, val2, val1)**(1.0/c)
+
+    def _munp(self, n, a, c):
+        # Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
+        return sc.poch(a, n*1.0/c)
+
+    def _entropy(self, a, c):
+        val = sc.psi(a)
+        return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
+
+
+gengamma = gengamma_gen(a=0.0, name='gengamma')
+
+
+class genhalflogistic_gen(rv_continuous):
+    r"""A generalized half-logistic continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `genhalflogistic` is:
+
+    .. math::
+
+        f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
+
+    for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
+
+    `genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _get_support(self, c):
+        return self.a, 1.0/c
+
+    def _pdf(self, x, c):
+        # genhalflogistic.pdf(x, c) =
+        #    2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
+        limit = 1.0/c
+        tmp = np.asarray(1-c*x)
+        tmp0 = tmp**(limit-1)
+        tmp2 = tmp0*tmp
+        return 2*tmp0 / (1+tmp2)**2
+
+    def _cdf(self, x, c):
+        limit = 1.0/c
+        tmp = np.asarray(1-c*x)
+        tmp2 = tmp**(limit)
+        return (1.0-tmp2) / (1+tmp2)
+
+    def _ppf(self, q, c):
+        return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
+
+    def _entropy(self, c):
+        return 2 - (2*c+1)*np.log(2)
+
+
+genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
+
+
+class genhyperbolic_gen(rv_continuous):
+    r"""A generalized hyperbolic continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    t, norminvgauss, geninvgauss, laplace, cauchy
+
+    Notes
+    -----
+    The probability density function for `genhyperbolic` is:
+
+    .. math::
+
+        f(x, p, a, b) =
+            \frac{(a^2 - b^2)^{p/2}}
+            {\sqrt{2\pi}a^{p-0.5}
+            K_p\Big(\sqrt{a^2 - b^2}\Big)}
+            e^{bx} \times \frac{K_{p - 1/2}
+            (a \sqrt{1 + x^2})}
+            {(\sqrt{1 + x^2})^{1/2 - p}}
+
+    for :math:`x, p \in ( - \infty; \infty)`,
+    :math:`|b| < a` if :math:`p \ge 0`,
+    :math:`|b| \le a` if :math:`p < 0`.
+    :math:`K_{p}(.)` denotes the modified Bessel function of the second
+    kind and order :math:`p` (`scipy.special.kn`)
+
+    `genhyperbolic` takes ``p`` as a tail parameter,
+    ``a`` as a shape parameter,
+    ``b`` as a skewness parameter.
+
+    %(after_notes)s
+
+    The original parameterization of the Generalized Hyperbolic Distribution
+    is found in [1]_ as follows
+
+    .. math::
+
+        f(x, \lambda, \alpha, \beta, \delta, \mu) =
+           \frac{(\gamma/\delta)^\lambda}{\sqrt{2\pi}K_\lambda(\delta \gamma)}
+           e^{\beta (x - \mu)} \times \frac{K_{\lambda - 1/2}
+           (\alpha \sqrt{\delta^2 + (x - \mu)^2})}
+           {(\sqrt{\delta^2 + (x - \mu)^2} / \alpha)^{1/2 - \lambda}}
+
+    for :math:`x \in ( - \infty; \infty)`,
+    :math:`\gamma := \sqrt{\alpha^2 - \beta^2}`,
+    :math:`\lambda, \mu \in ( - \infty; \infty)`,
+    :math:`\delta \ge 0, |\beta| < \alpha` if :math:`\lambda \ge 0`,
+    :math:`\delta > 0, |\beta| \le \alpha` if :math:`\lambda < 0`.
+
+    The location-scale-based parameterization implemented in
+    SciPy is based on [2]_, where :math:`a = \alpha\delta`,
+    :math:`b = \beta\delta`, :math:`p = \lambda`,
+    :math:`scale=\delta` and :math:`loc=\mu`
+
+    Moments are implemented based on [3]_ and [4]_.
+
+    For the distributions that are a special case such as Student's t,
+    it is not recommended to rely on the implementation of genhyperbolic.
+    To avoid potential numerical problems and for performance reasons,
+    the methods of the specific distributions should be used.
+
+    References
+    ----------
+    .. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions
+       on Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
+       pp. 151-157, 1978. https://www.jstor.org/stable/4615705
+
+    .. [2] Eberlein E., Prause K. (2002) The Generalized Hyperbolic Model:
+        Financial Derivatives and Risk Measures. In: Geman H., Madan D.,
+        Pliska S.R., Vorst T. (eds) Mathematical Finance - Bachelier
+        Congress 2000. Springer Finance. Springer, Berlin, Heidelberg.
+        :doi:`10.1007/978-3-662-12429-1_12`
+
+    .. [3] Scott, David J, Würtz, Diethelm, Dong, Christine and Tran,
+       Thanh Tam, (2009), Moments of the generalized hyperbolic
+       distribution, MPRA Paper, University Library of Munich, Germany,
+       https://EconPapers.repec.org/RePEc:pra:mprapa:19081.
+
+    .. [4] E. Eberlein and E. A. von Hammerstein. Generalized hyperbolic
+       and inverse Gaussian distributions: Limiting cases and approximation
+       of processes. FDM Preprint 80, April 2003. University of Freiburg.
+       https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
+
+    %(example)s
+
+    """
+
+    def _argcheck(self, p, a, b):
+        return (np.logical_and(np.abs(b) < a, p >= 0)
+                | np.logical_and(np.abs(b) <= a, p < 0))
+
+    def _shape_info(self):
+        ip = _ShapeInfo("p", False, (-np.inf, np.inf), (False, False))
+        ia = _ShapeInfo("a", False, (0, np.inf), (True, False))
+        ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, False))
+        return [ip, ia, ib]
+
+    def _fitstart(self, data):
+        # Arbitrary, but the default a=b=1 is not valid
+        return super()._fitstart(data, args=(1, 1, 0.5))
+
+    def _logpdf(self, x, p, a, b):
+        # kve instead of kv works better for large values of p
+        # and smaller values of sqrt(a^2  - b^2)
+        @np.vectorize
+        def _logpdf_single(x, p, a, b):
+            return _stats.genhyperbolic_logpdf(x, p, a, b)
+
+        return _logpdf_single(x, p, a, b)
+
+    def _pdf(self, x, p, a, b):
+        # kve instead of kv works better for large values of p
+        # and smaller values of sqrt(a^2  - b^2)
+        @np.vectorize
+        def _pdf_single(x, p, a, b):
+            return _stats.genhyperbolic_pdf(x, p, a, b)
+
+        return _pdf_single(x, p, a, b)
+
+    def _cdf(self, x, p, a, b):
+
+        @np.vectorize
+        def _cdf_single(x, p, a, b):
+            user_data = np.array(
+                [p, a, b], float
+                ).ctypes.data_as(ctypes.c_void_p)
+            llc = LowLevelCallable.from_cython(
+                _stats, '_genhyperbolic_pdf', user_data
+                )
+
+            t1 = integrate.quad(llc, -np.inf, x)[0]
+
+            if np.isnan(t1):
+                msg = ("Infinite values encountered in scipy.special.kve. "
+                       "Values replaced by NaN to avoid incorrect results.")
+                warnings.warn(msg, RuntimeWarning)
+
+            return t1
+
+        return _cdf_single(x, p, a, b)
+
+    def _rvs(self, p, a, b, size=None, random_state=None):
+        # note: X = b * V + sqrt(V) * X  has a
+        # generalized hyperbolic distribution
+        # if X is standard normal and V is
+        # geninvgauss(p = p, b = t2, loc = loc, scale = t3)
+        t1 = np.float_power(a, 2) - np.float_power(b, 2)
+        # b in the GIG
+        t2 = np.float_power(t1, 0.5)
+        # scale in the GIG
+        t3 = np.float_power(t1, - 0.5)
+        gig = geninvgauss.rvs(
+            p=p,
+            b=t2,
+            scale=t3,
+            size=size,
+            random_state=random_state
+            )
+        normst = norm.rvs(size=size, random_state=random_state)
+
+        return b * gig + np.sqrt(gig) * normst
+
+    def _stats(self, p, a, b):
+        # https://mpra.ub.uni-muenchen.de/19081/1/MPRA_paper_19081.pdf
+        # https://freidok.uni-freiburg.de/fedora/objects/freidok:7974/datastreams/FILE1/content
+        # standardized moments
+        p, a, b = np.broadcast_arrays(p, a, b)
+        t1 = np.float_power(a, 2) - np.float_power(b, 2)
+        t1 = np.float_power(t1, 0.5)
+        t2 = np.float_power(1, 2) * np.float_power(t1, - 1)
+        integers = np.linspace(0, 4, 5)
+        # make integers perpendicular to existing dimensions
+        integers = integers.reshape(integers.shape + (1,) * p.ndim)
+        b0, b1, b2, b3, b4 = sc.kv(p + integers, t1)
+        r1, r2, r3, r4 = [b / b0 for b in (b1, b2, b3, b4)]
+
+        m = b * t2 * r1
+        v = (
+            t2 * r1 + np.float_power(b, 2) * np.float_power(t2, 2) *
+            (r2 - np.float_power(r1, 2))
+        )
+        m3e = (
+            np.float_power(b, 3) * np.float_power(t2, 3) *
+            (r3 - 3 * b2 * b1 * np.float_power(b0, -2) +
+             2 * np.float_power(r1, 3)) +
+            3 * b * np.float_power(t2, 2) *
+            (r2 - np.float_power(r1, 2))
+        )
+        s = m3e * np.float_power(v, - 3 / 2)
+        m4e = (
+            np.float_power(b, 4) * np.float_power(t2, 4) *
+            (r4 - 4 * b3 * b1 * np.float_power(b0, - 2) +
+             6 * b2 * np.float_power(b1, 2) * np.float_power(b0, - 3) -
+             3 * np.float_power(r1, 4)) +
+            np.float_power(b, 2) * np.float_power(t2, 3) *
+            (6 * r3 - 12 * b2 * b1 * np.float_power(b0, - 2) +
+             6 * np.float_power(r1, 3)) +
+            3 * np.float_power(t2, 2) * r2
+        )
+        k = m4e * np.float_power(v, -2) - 3
+
+        return m, v, s, k
+
+
+genhyperbolic = genhyperbolic_gen(name='genhyperbolic')
+
+
+class gompertz_gen(rv_continuous):
+    r"""A Gompertz (or truncated Gumbel) continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `gompertz` is:
+
+    .. math::
+
+        f(x, c) = c \exp(x) \exp(-c (e^x-1))
+
+    for :math:`x \ge 0`, :math:`c > 0`.
+
+    `gompertz` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
+        return np.exp(self._logpdf(x, c))
+
+    def _logpdf(self, x, c):
+        return np.log(c) + x - c * sc.expm1(x)
+
+    def _cdf(self, x, c):
+        return -sc.expm1(-c * sc.expm1(x))
+
+    def _ppf(self, q, c):
+        return sc.log1p(-1.0 / c * sc.log1p(-q))
+
+    def _entropy(self, c):
+        return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
+
+
+gompertz = gompertz_gen(a=0.0, name='gompertz')
+
+
+def _average_with_log_weights(x, logweights):
+    x = np.asarray(x)
+    logweights = np.asarray(logweights)
+    maxlogw = logweights.max()
+    weights = np.exp(logweights - maxlogw)
+    return np.average(x, weights=weights)
+
+
+class gumbel_r_gen(rv_continuous):
+    r"""A right-skewed Gumbel continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    gumbel_l, gompertz, genextreme
+
+    Notes
+    -----
+    The probability density function for `gumbel_r` is:
+
+    .. math::
+
+        f(x) = \exp(-(x + e^{-x}))
+
+    The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
+    distribution.  It is also related to the extreme value distribution,
+    log-Weibull and Gompertz distributions.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # gumbel_r.pdf(x) = exp(-(x + exp(-x)))
+        return np.exp(self._logpdf(x))
+
+    def _logpdf(self, x):
+        return -x - np.exp(-x)
+
+    def _cdf(self, x):
+        return np.exp(-np.exp(-x))
+
+    def _logcdf(self, x):
+        return -np.exp(-x)
+
+    def _ppf(self, q):
+        return -np.log(-np.log(q))
+
+    def _sf(self, x):
+        return -sc.expm1(-np.exp(-x))
+
+    def _isf(self, p):
+        return -np.log(-np.log1p(-p))
+
+    def _stats(self):
+        return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
+
+    def _entropy(self):
+        # https://en.wikipedia.org/wiki/Gumbel_distribution
+        return _EULER + 1.
+
+    @_call_super_mom
+    @inherit_docstring_from(rv_continuous)
+    def fit(self, data, *args, **kwds):
+        data, floc, fscale = _check_fit_input_parameters(self, data,
+                                                         args, kwds)
+
+        # By the method of maximum likelihood, the estimators of the
+        # location and scale are the roots of the equations defined in
+        # `func` and the value of the expression for `loc` that follows.
+        # The first `func` is a first order derivative of the log-likelihood
+        # equation and the second is from Source: Statistical Distributions,
+        # 3rd Edition. Evans, Hastings, and Peacock (2000), Page 101.
+
+        def get_loc_from_scale(scale):
+            return -scale * (sc.logsumexp(-data / scale) - np.log(len(data)))
+
+        if fscale is not None:
+            # if the scale is fixed, the location can be analytically
+            # determined.
+            scale = fscale
+            loc = get_loc_from_scale(scale)
+        else:
+            # A different function is solved depending on whether the location
+            # is fixed.
+            if floc is not None:
+                loc = floc
+
+                # equation to use if the location is fixed.
+                # note that one cannot use the equation in Evans, Hastings,
+                # and Peacock (2000) (since it assumes that the derivative
+                # w.r.t. the log-likelihood is zero). however, it is easy to
+                # derive the MLE condition directly if loc is fixed
+                def func(scale):
+                    term1 = (loc - data) * np.exp((loc - data) / scale) + data
+                    term2 = len(data) * (loc + scale)
+                    return term1.sum() - term2
+            else:
+
+                # equation to use if both location and scale are free
+                def func(scale):
+                    sdata = -data / scale
+                    wavg = _average_with_log_weights(data, logweights=sdata)
+                    return data.mean() - wavg - scale
+
+            # set brackets for `root_scalar` to use when optimizing over the
+            # scale such that a root is likely between them. Use user supplied
+            # guess or default 1.
+            brack_start = kwds.get('scale', 1)
+            lbrack, rbrack = brack_start / 2, brack_start * 2
+
+            # if a root is not between the brackets, iteratively expand them
+            # until they include a sign change, checking after each bracket is
+            # modified.
+            def interval_contains_root(lbrack, rbrack):
+                # return true if the signs disagree.
+                return (np.sign(func(lbrack)) !=
+                        np.sign(func(rbrack)))
+            while (not interval_contains_root(lbrack, rbrack)
+                   and (lbrack > 0 or rbrack < np.inf)):
+                lbrack /= 2
+                rbrack *= 2
+
+            res = optimize.root_scalar(func, bracket=(lbrack, rbrack),
+                                       rtol=1e-14, xtol=1e-14)
+            scale = res.root
+            loc = floc if floc is not None else get_loc_from_scale(scale)
+        return loc, scale
+
+
+gumbel_r = gumbel_r_gen(name='gumbel_r')
+
+
+class gumbel_l_gen(rv_continuous):
+    r"""A left-skewed Gumbel continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    gumbel_r, gompertz, genextreme
+
+    Notes
+    -----
+    The probability density function for `gumbel_l` is:
+
+    .. math::
+
+        f(x) = \exp(x - e^x)
+
+    The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
+    distribution.  It is also related to the extreme value distribution,
+    log-Weibull and Gompertz distributions.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # gumbel_l.pdf(x) = exp(x - exp(x))
+        return np.exp(self._logpdf(x))
+
+    def _logpdf(self, x):
+        return x - np.exp(x)
+
+    def _cdf(self, x):
+        return -sc.expm1(-np.exp(x))
+
+    def _ppf(self, q):
+        return np.log(-sc.log1p(-q))
+
+    def _logsf(self, x):
+        return -np.exp(x)
+
+    def _sf(self, x):
+        return np.exp(-np.exp(x))
+
+    def _isf(self, x):
+        return np.log(-np.log(x))
+
+    def _stats(self):
+        return -_EULER, np.pi*np.pi/6.0, \
+               -12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
+
+    def _entropy(self):
+        return _EULER + 1.
+
+    @_call_super_mom
+    @inherit_docstring_from(rv_continuous)
+    def fit(self, data, *args, **kwds):
+        # The fit method of `gumbel_r` can be used for this distribution with
+        # small modifications. The process to do this is
+        # 1. pass the sign negated data into `gumbel_r.fit`
+        #    - if the location is fixed, it should also be negated.
+        # 2. negate the sign of the resulting location, leaving the scale
+        #    unmodified.
+        # `gumbel_r.fit` holds necessary input checks.
+
+        if kwds.get('floc') is not None:
+            kwds['floc'] = -kwds['floc']
+        loc_r, scale_r, = gumbel_r.fit(-np.asarray(data), *args, **kwds)
+        return -loc_r, scale_r
+
+
+gumbel_l = gumbel_l_gen(name='gumbel_l')
+
+
+class halfcauchy_gen(rv_continuous):
+    r"""A Half-Cauchy continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `halfcauchy` is:
+
+    .. math::
+
+        f(x) = \frac{2}{\pi (1 + x^2)}
+
+    for :math:`x \ge 0`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
+        return 2.0/np.pi/(1.0+x*x)
+
+    def _logpdf(self, x):
+        return np.log(2.0/np.pi) - sc.log1p(x*x)
+
+    def _cdf(self, x):
+        return 2.0/np.pi*np.arctan(x)
+
+    def _ppf(self, q):
+        return np.tan(np.pi/2*q)
+
+    def _stats(self):
+        return np.inf, np.inf, np.nan, np.nan
+
+    def _entropy(self):
+        return np.log(2*np.pi)
+
+
+halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
+
+
+class halflogistic_gen(rv_continuous):
+    r"""A half-logistic continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `halflogistic` is:
+
+    .. math::
+
+        f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
+             = \frac{1}{2} \text{sech}(x/2)^2
+
+    for :math:`x \ge 0`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
+        #                     = 1/2 * sech(x/2)**2
+        return np.exp(self._logpdf(x))
+
+    def _logpdf(self, x):
+        return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
+
+    def _cdf(self, x):
+        return np.tanh(x/2.0)
+
+    def _ppf(self, q):
+        return 2*np.arctanh(q)
+
+    def _munp(self, n):
+        if n == 1:
+            return 2*np.log(2)
+        if n == 2:
+            return np.pi*np.pi/3.0
+        if n == 3:
+            return 9*_ZETA3
+        if n == 4:
+            return 7*np.pi**4 / 15.0
+        return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
+
+    def _entropy(self):
+        return 2-np.log(2)
+
+
+halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
+
+
+class halfnorm_gen(rv_continuous):
+    r"""A half-normal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `halfnorm` is:
+
+    .. math::
+
+        f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
+
+    for :math:`x >= 0`.
+
+    `halfnorm` is a special case of `chi` with ``df=1``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return abs(random_state.standard_normal(size=size))
+
+    def _pdf(self, x):
+        # halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
+        return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
+
+    def _logpdf(self, x):
+        return 0.5 * np.log(2.0/np.pi) - x*x/2.0
+
+    def _cdf(self, x):
+        return _norm_cdf(x)*2-1.0
+
+    def _ppf(self, q):
+        return sc.ndtri((1+q)/2.0)
+
+    def _stats(self):
+        return (np.sqrt(2.0/np.pi),
+                1-2.0/np.pi,
+                np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
+                8*(np.pi-3)/(np.pi-2)**2)
+
+    def _entropy(self):
+        return 0.5*np.log(np.pi/2.0)+0.5
+
+
+halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
+
+
+class hypsecant_gen(rv_continuous):
+    r"""A hyperbolic secant continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `hypsecant` is:
+
+    .. math::
+
+        f(x) = \frac{1}{\pi} \text{sech}(x)
+
+    for a real number :math:`x`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # hypsecant.pdf(x) = 1/pi * sech(x)
+        return 1.0/(np.pi*np.cosh(x))
+
+    def _cdf(self, x):
+        return 2.0/np.pi*np.arctan(np.exp(x))
+
+    def _ppf(self, q):
+        return np.log(np.tan(np.pi*q/2.0))
+
+    def _stats(self):
+        return 0, np.pi*np.pi/4, 0, 2
+
+    def _entropy(self):
+        return np.log(2*np.pi)
+
+
+hypsecant = hypsecant_gen(name='hypsecant')
+
+
+class gausshyper_gen(rv_continuous):
+    r"""A Gauss hypergeometric continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `gausshyper` is:
+
+    .. math::
+
+        f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
+
+    for :math:`0 \le x \le 1`, :math:`a,b > 0`, :math:`c` a real number,
+    :math:`z > -1`, and :math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
+    :math:`F[2, 1]` is the Gauss hypergeometric function
+    `scipy.special.hyp2f1`.
+
+    `gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
+    parameters.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Armero, C., and M. J. Bayarri. "Prior Assessments for Prediction in
+           Queues." *Journal of the Royal Statistical Society*. Series D (The
+           Statistician) 43, no. 1 (1994): 139-53. doi:10.2307/2348939
+
+    %(example)s
+
+    """
+
+    def _argcheck(self, a, b, c, z):
+        # z > -1 per gh-10134
+        return (a > 0) & (b > 0) & (c == c) & (z > -1)
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        ic = _ShapeInfo("c", False, (-np.inf, np.inf), (False, False))
+        iz = _ShapeInfo("z", False, (-1, np.inf), (False, False))
+        return [ia, ib, ic, iz]
+
+    def _pdf(self, x, a, b, c, z):
+        # gausshyper.pdf(x, a, b, c, z) =
+        #   C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
+        Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
+        return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
+
+    def _munp(self, n, a, b, c, z):
+        fac = sc.beta(n+a, b) / sc.beta(a, b)
+        num = sc.hyp2f1(c, a+n, a+b+n, -z)
+        den = sc.hyp2f1(c, a, a+b, -z)
+        return fac*num / den
+
+
+gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
+
+
+class invgamma_gen(rv_continuous):
+    r"""An inverted gamma continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `invgamma` is:
+
+    .. math::
+
+        f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
+
+    for :math:`x >= 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
+    (`scipy.special.gamma`).
+
+    `invgamma` takes ``a`` as a shape parameter for :math:`a`.
+
+    `invgamma` is a special case of `gengamma` with ``c=-1``, and it is a
+    different parameterization of the scaled inverse chi-squared distribution.
+    Specifically, if the scaled inverse chi-squared distribution is
+    parameterized with degrees of freedom :math:`\nu` and scaling parameter
+    :math:`\tau^2`, then it can be modeled using `invgamma` with
+    ``a=`` :math:`\nu/2` and ``scale=`` :math:`\nu \tau^2/2`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, a):
+        # invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
+        return np.exp(self._logpdf(x, a))
+
+    def _logpdf(self, x, a):
+        return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
+
+    def _cdf(self, x, a):
+        return sc.gammaincc(a, 1.0 / x)
+
+    def _ppf(self, q, a):
+        return 1.0 / sc.gammainccinv(a, q)
+
+    def _sf(self, x, a):
+        return sc.gammainc(a, 1.0 / x)
+
+    def _isf(self, q, a):
+        return 1.0 / sc.gammaincinv(a, q)
+
+    def _stats(self, a, moments='mvsk'):
+        m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
+        m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
+                        np.inf)
+
+        g1, g2 = None, None
+        if 's' in moments:
+            g1 = _lazywhere(
+                a > 3, (a,),
+                lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
+        if 'k' in moments:
+            g2 = _lazywhere(
+                a > 4, (a,),
+                lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
+        return m1, m2, g1, g2
+
+    def _entropy(self, a):
+        return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
+
+
+invgamma = invgamma_gen(a=0.0, name='invgamma')
+
+
+class invgauss_gen(rv_continuous):
+    r"""An inverse Gaussian continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `invgauss` is:
+
+    .. math::
+
+        f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
+                    \exp(-\frac{(x-\mu)^2}{2 x \mu^2})
+
+    for :math:`x >= 0` and :math:`\mu > 0`.
+
+    `invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return [_ShapeInfo("mu", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, mu, size=None, random_state=None):
+        return random_state.wald(mu, 1.0, size=size)
+
+    def _pdf(self, x, mu):
+        # invgauss.pdf(x, mu) =
+        #                  1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
+        return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
+
+    def _logpdf(self, x, mu):
+        return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
+
+    # approach adapted from equations in
+    # https://journal.r-project.org/archive/2016-1/giner-smyth.pdf,
+    # not R code. see gh-13616
+
+    def _logcdf(self, x, mu):
+        fac = 1 / np.sqrt(x)
+        a = _norm_logcdf(fac * ((x / mu) - 1))
+        b = 2 / mu + _norm_logcdf(-fac * ((x / mu) + 1))
+        return a + np.log1p(np.exp(b - a))
+
+    def _logsf(self, x, mu):
+        fac = 1 / np.sqrt(x)
+        a = _norm_logsf(fac * ((x / mu) - 1))
+        b = 2 / mu + _norm_logcdf(-fac * (x + mu) / mu)
+        return a + np.log1p(-np.exp(b - a))
+
+    def _sf(self, x, mu):
+        return np.exp(self._logsf(x, mu))
+
+    def _cdf(self, x, mu):
+        return np.exp(self._logcdf(x, mu))
+
+    def _ppf(self, x, mu):
+        with np.errstate(divide='ignore', over='ignore', invalid='ignore'):
+            x, mu = np.broadcast_arrays(x, mu)
+            ppf = _boost._invgauss_ppf(x, mu, 1)
+            i_wt = x > 0.5  # "wrong tail" - sometimes too inaccurate
+            ppf[i_wt] = _boost._invgauss_isf(1-x[i_wt], mu[i_wt], 1)
+            i_nan = np.isnan(ppf)
+            ppf[i_nan] = super()._ppf(x[i_nan], mu[i_nan])
+        return ppf
+
+    def _isf(self, x, mu):
+        with np.errstate(divide='ignore', over='ignore', invalid='ignore'):
+            x, mu = np.broadcast_arrays(x, mu)
+            isf = _boost._invgauss_isf(x, mu, 1)
+            i_wt = x > 0.5  # "wrong tail" - sometimes too inaccurate
+            isf[i_wt] = _boost._invgauss_ppf(1-x[i_wt], mu[i_wt], 1)
+            i_nan = np.isnan(isf)
+            isf[i_nan] = super()._isf(x[i_nan], mu[i_nan])
+        return isf
+
+    def _stats(self, mu):
+        return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
+
+    @inherit_docstring_from(rv_continuous)
+    def fit(self, data, *args, **kwds):
+        method = kwds.get('method', 'mle')
+
+        if type(self) == wald_gen or method.lower() == 'mm':
+            return super().fit(data, *args, **kwds)
+
+        data, fshape_s, floc, fscale = _check_fit_input_parameters(self, data,
+                                                                   args, kwds)
+        '''
+        Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
+        and Peacock (2000), Page 121. Their shape parameter is equivilent to
+        SciPy's with the conversion `fshape_s = fshape / scale`.
+
+        MLE formulas are not used in 3 condtions:
+        - `loc` is not fixed
+        - `mu` is fixed
+        These cases fall back on the superclass fit method.
+        - `loc` is fixed but translation results in negative data raises
+          a `FitDataError`.
+        '''
+        if floc is None or fshape_s is not None:
+            return super().fit(data, *args, **kwds)
+        elif np.any(data - floc < 0):
+            raise FitDataError("invgauss", lower=0, upper=np.inf)
+        else:
+            data = data - floc
+            fshape_n = np.mean(data)
+            if fscale is None:
+                fscale = len(data) / (np.sum(data ** -1 - fshape_n ** -1))
+            fshape_s = fshape_n / fscale
+        return fshape_s, floc, fscale
+
+
+invgauss = invgauss_gen(a=0.0, name='invgauss')
+
+
+class geninvgauss_gen(rv_continuous):
+    r"""A Generalized Inverse Gaussian continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `geninvgauss` is:
+
+    .. math::
+
+        f(x, p, b) = x^{p-1} \exp(-b (x + 1/x) / 2) / (2 K_p(b))
+
+    where `x > 0`, `p` is a real number and `b > 0`([1]_).
+    :math:`K_p` is the modified Bessel function of second kind of order `p`
+    (`scipy.special.kv`).
+
+    %(after_notes)s
+
+    The inverse Gaussian distribution `stats.invgauss(mu)` is a special case of
+    `geninvgauss` with `p = -1/2`, `b = 1 / mu` and `scale = mu`.
+
+    Generating random variates is challenging for this distribution. The
+    implementation is based on [2]_.
+
+    References
+    ----------
+    .. [1] O. Barndorff-Nielsen, P. Blaesild, C. Halgreen, "First hitting time
+       models for the generalized inverse gaussian distribution",
+       Stochastic Processes and their Applications 7, pp. 49--54, 1978.
+
+    .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
+       random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
+
+    %(example)s
+
+    """
+    def _argcheck(self, p, b):
+        return (p == p) & (b > 0)
+
+    def _shape_info(self):
+        ip = _ShapeInfo("p", False, (-np.inf, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        return [ip, ib]
+
+    def _logpdf(self, x, p, b):
+        # kve instead of kv works better for large values of b
+        # warn if kve produces infinite values and replace by nan
+        # otherwise c = -inf and the results are often incorrect
+        @np.vectorize
+        def logpdf_single(x, p, b):
+            return _stats.geninvgauss_logpdf(x, p, b)
+
+        z = logpdf_single(x, p, b)
+        if np.isnan(z).any():
+            msg = ("Infinite values encountered in scipy.special.kve(p, b). "
+                   "Values replaced by NaN to avoid incorrect results.")
+            warnings.warn(msg, RuntimeWarning)
+        return z
+
+    def _pdf(self, x, p, b):
+        # relying on logpdf avoids overflow of x**(p-1) for large x and p
+        return np.exp(self._logpdf(x, p, b))
+
+    def _cdf(self, x, *args):
+        _a, _b = self._get_support(*args)
+
+        @np.vectorize
+        def _cdf_single(x, *args):
+            p, b = args
+            user_data = np.array([p, b], float).ctypes.data_as(ctypes.c_void_p)
+            llc = LowLevelCallable.from_cython(_stats, '_geninvgauss_pdf',
+                                               user_data)
+
+            return integrate.quad(llc, _a, x)[0]
+
+        return _cdf_single(x, *args)
+
+    def _logquasipdf(self, x, p, b):
+        # log of the quasi-density (w/o normalizing constant) used in _rvs
+        return _lazywhere(x > 0, (x, p, b),
+                          lambda x, p, b: (p - 1)*np.log(x) - b*(x + 1/x)/2,
+                          -np.inf)
+
+    def _rvs(self, p, b, size=None, random_state=None):
+        # if p and b are scalar, use _rvs_scalar, otherwise need to create
+        # output by iterating over parameters
+        if np.isscalar(p) and np.isscalar(b):
+            out = self._rvs_scalar(p, b, size, random_state)
+        elif p.size == 1 and b.size == 1:
+            out = self._rvs_scalar(p.item(), b.item(), size, random_state)
+        else:
+            # When this method is called, size will be a (possibly empty)
+            # tuple of integers.  It will not be None; if `size=None` is passed
+            # to `rvs()`, size will be the empty tuple ().
+
+            p, b = np.broadcast_arrays(p, b)
+            # p and b now have the same shape.
+
+            # `shp` is the shape of the blocks of random variates that are
+            # generated for each combination of parameters associated with
+            # broadcasting p and b.
+            # bc is a tuple the same lenth as size.  The values
+            # in bc are bools.  If bc[j] is True, it means that
+            # entire axis is filled in for a given combination of the
+            # broadcast arguments.
+            shp, bc = _check_shape(p.shape, size)
+
+            # `numsamples` is the total number of variates to be generated
+            # for each combination of the input arguments.
+            numsamples = int(np.prod(shp))
+
+            # `out` is the array to be returned.  It is filled in the
+            # loop below.
+            out = np.empty(size)
+
+            it = np.nditer([p, b],
+                           flags=['multi_index'],
+                           op_flags=[['readonly'], ['readonly']])
+            while not it.finished:
+                # Convert the iterator's multi_index into an index into the
+                # `out` array where the call to _rvs_scalar() will be stored.
+                # Where bc is True, we use a full slice; otherwise we use the
+                # index value from it.multi_index.  len(it.multi_index) might
+                # be less than len(bc), and in that case we want to align these
+                # two sequences to the right, so the loop variable j runs from
+                # -len(size) to 0.  This doesn't cause an IndexError, as
+                # bc[j] will be True in those cases where it.multi_index[j]
+                # would cause an IndexError.
+                idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
+                            for j in range(-len(size), 0))
+                out[idx] = self._rvs_scalar(it[0], it[1], numsamples,
+                                            random_state).reshape(shp)
+                it.iternext()
+
+        if size == ():
+            out = out.item()
+        return out
+
+    def _rvs_scalar(self, p, b, numsamples, random_state):
+        # following [2], the quasi-pdf is used instead of the pdf for the
+        # generation of rvs
+        invert_res = False
+        if not numsamples:
+            numsamples = 1
+        if p < 0:
+            # note: if X is geninvgauss(p, b), then 1/X is geninvgauss(-p, b)
+            p = -p
+            invert_res = True
+        m = self._mode(p, b)
+
+        # determine method to be used following [2]
+        ratio_unif = True
+        if p >= 1 or b > 1:
+            # ratio of uniforms with mode shift below
+            mode_shift = True
+        elif b >= min(0.5, 2 * np.sqrt(1 - p) / 3):
+            # ratio of uniforms without mode shift below
+            mode_shift = False
+        else:
+            # new algorithm in [2]
+            ratio_unif = False
+
+        # prepare sampling of rvs
+        size1d = tuple(np.atleast_1d(numsamples))
+        N = np.prod(size1d)  # number of rvs needed, reshape upon return
+        x = np.zeros(N)
+        simulated = 0
+
+        if ratio_unif:
+            # use ratio of uniforms method
+            if mode_shift:
+                a2 = -2 * (p + 1) / b - m
+                a1 = 2 * m * (p - 1) / b - 1
+                # find roots of x**3 + a2*x**2 + a1*x + m (Cardano's formula)
+                p1 = a1 - a2**2 / 3
+                q1 = 2 * a2**3 / 27 - a2 * a1 / 3 + m
+                phi = np.arccos(-q1 * np.sqrt(-27 / p1**3) / 2)
+                s1 = -np.sqrt(-4 * p1 / 3)
+                root1 = s1 * np.cos(phi / 3 + np.pi / 3) - a2 / 3
+                root2 = -s1 * np.cos(phi / 3) - a2 / 3
+                # root3 = s1 * np.cos(phi / 3 - np.pi / 3) - a2 / 3
+
+                # if g is the quasipdf, rescale: g(x) / g(m) which we can write
+                # as exp(log(g(x)) - log(g(m))). This is important
+                # since for large values of p and b, g cannot be evaluated.
+                # denote the rescaled quasipdf by h
+                lm = self._logquasipdf(m, p, b)
+                d1 = self._logquasipdf(root1, p, b) - lm
+                d2 = self._logquasipdf(root2, p, b) - lm
+                # compute the bounding rectangle w.r.t. h. Note that
+                # np.exp(0.5*d1) = np.sqrt(g(root1)/g(m)) = np.sqrt(h(root1))
+                vmin = (root1 - m) * np.exp(0.5 * d1)
+                vmax = (root2 - m) * np.exp(0.5 * d2)
+                umax = 1  # umax = sqrt(h(m)) = 1
+
+                logqpdf = lambda x: self._logquasipdf(x, p, b) - lm
+                c = m
+            else:
+                # ratio of uniforms without mode shift
+                # compute np.sqrt(quasipdf(m))
+                umax = np.exp(0.5*self._logquasipdf(m, p, b))
+                xplus = ((1 + p) + np.sqrt((1 + p)**2 + b**2))/b
+                vmin = 0
+                # compute xplus * np.sqrt(quasipdf(xplus))
+                vmax = xplus * np.exp(0.5 * self._logquasipdf(xplus, p, b))
+                c = 0
+                logqpdf = lambda x: self._logquasipdf(x, p, b)
+
+            if vmin >= vmax:
+                raise ValueError("vmin must be smaller than vmax.")
+            if umax <= 0:
+                raise ValueError("umax must be positive.")
+
+            i = 1
+            while simulated < N:
+                k = N - simulated
+                # simulate uniform rvs on [0, umax] and [vmin, vmax]
+                u = umax * random_state.uniform(size=k)
+                v = random_state.uniform(size=k)
+                v = vmin + (vmax - vmin) * v
+                rvs = v / u + c
+                # rewrite acceptance condition u**2 <= pdf(rvs) by taking logs
+                accept = (2*np.log(u) <= logqpdf(rvs))
+                num_accept = np.sum(accept)
+                if num_accept > 0:
+                    x[simulated:(simulated + num_accept)] = rvs[accept]
+                    simulated += num_accept
+
+                if (simulated == 0) and (i*N >= 50000):
+                    msg = ("Not a single random variate could be generated "
+                           "in {} attempts. Sampling does not appear to "
+                           "work for the provided parameters.".format(i*N))
+                    raise RuntimeError(msg)
+                i += 1
+        else:
+            # use new algorithm in [2]
+            x0 = b / (1 - p)
+            xs = np.max((x0, 2 / b))
+            k1 = np.exp(self._logquasipdf(m, p, b))
+            A1 = k1 * x0
+            if x0 < 2 / b:
+                k2 = np.exp(-b)
+                if p > 0:
+                    A2 = k2 * ((2 / b)**p - x0**p) / p
+                else:
+                    A2 = k2 * np.log(2 / b**2)
+            else:
+                k2, A2 = 0, 0
+            k3 = xs**(p - 1)
+            A3 = 2 * k3 * np.exp(-xs * b / 2) / b
+            A = A1 + A2 + A3
+
+            # [2]: rejection constant is < 2.73; so expected runtime is finite
+            while simulated < N:
+                k = N - simulated
+                h, rvs = np.zeros(k), np.zeros(k)
+                # simulate uniform rvs on [x1, x2] and [0, y2]
+                u = random_state.uniform(size=k)
+                v = A * random_state.uniform(size=k)
+                cond1 = v <= A1
+                cond2 = np.logical_not(cond1) & (v <= A1 + A2)
+                cond3 = np.logical_not(cond1 | cond2)
+                # subdomain (0, x0)
+                rvs[cond1] = x0 * v[cond1] / A1
+                h[cond1] = k1
+                # subdomain (x0, 2 / b)
+                if p > 0:
+                    rvs[cond2] = (x0**p + (v[cond2] - A1) * p / k2)**(1 / p)
+                else:
+                    rvs[cond2] = b * np.exp((v[cond2] - A1) * np.exp(b))
+                h[cond2] = k2 * rvs[cond2]**(p - 1)
+                # subdomain (xs, infinity)
+                z = np.exp(-xs * b / 2) - b * (v[cond3] - A1 - A2) / (2 * k3)
+                rvs[cond3] = -2 / b * np.log(z)
+                h[cond3] = k3 * np.exp(-rvs[cond3] * b / 2)
+                # apply rejection method
+                accept = (np.log(u * h) <= self._logquasipdf(rvs, p, b))
+                num_accept = sum(accept)
+                if num_accept > 0:
+                    x[simulated:(simulated + num_accept)] = rvs[accept]
+                    simulated += num_accept
+
+        rvs = np.reshape(x, size1d)
+        if invert_res:
+            rvs = 1 / rvs
+        return rvs
+
+    def _mode(self, p, b):
+        # distinguish cases to avoid catastrophic cancellation (see [2])
+        if p < 1:
+            return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
+        else:
+            return (np.sqrt((1 - p)**2 + b**2) - (1 - p)) / b
+
+    def _munp(self, n, p, b):
+        num = sc.kve(p + n, b)
+        denom = sc.kve(p, b)
+        inf_vals = np.isinf(num) | np.isinf(denom)
+        if inf_vals.any():
+            msg = ("Infinite values encountered in the moment calculation "
+                   "involving scipy.special.kve. Values replaced by NaN to "
+                   "avoid incorrect results.")
+            warnings.warn(msg, RuntimeWarning)
+            m = np.full_like(num, np.nan, dtype=np.double)
+            m[~inf_vals] = num[~inf_vals] / denom[~inf_vals]
+        else:
+            m = num / denom
+        return m
+
+
+geninvgauss = geninvgauss_gen(a=0.0, name="geninvgauss")
+
+
+class norminvgauss_gen(rv_continuous):
+    r"""A Normal Inverse Gaussian continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `norminvgauss` is:
+
+    .. math::
+
+        f(x, a, b) = \frac{a \, K_1(a \sqrt{1 + x^2})}{\pi \sqrt{1 + x^2}} \,
+                     \exp(\sqrt{a^2 - b^2} + b x)
+
+    where :math:`x` is a real number, the parameter :math:`a` is the tail
+    heaviness and :math:`b` is the asymmetry parameter satisfying
+    :math:`a > 0` and :math:`|b| <= a`.
+    :math:`K_1` is the modified Bessel function of second kind
+    (`scipy.special.k1`).
+
+    %(after_notes)s
+
+    A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
+    can be expressed as a normal mean-variance mixture:
+    `Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
+    `invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
+    to generate random variates.
+
+    Another common parametrization of the distribution (see Equation 2.1 in
+    [2]_) is given by the following expression of the pdf:
+
+    .. math::
+
+        g(x, \alpha, \beta, \delta, \mu) =
+        \frac{\alpha\delta K_1\left(\alpha\sqrt{\delta^2 + (x - \mu)^2}\right)}
+        {\pi \sqrt{\delta^2 + (x - \mu)^2}} \,
+        e^{\delta \sqrt{\alpha^2 - \beta^2} + \beta (x - \mu)}
+
+    In SciPy, this corresponds to
+    `a = alpha * delta, b = beta * delta, loc = mu, scale=delta`.
+
+    References
+    ----------
+    .. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
+           Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
+           pp. 151-157, 1978.
+
+    .. [2] O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and
+           Stochastic Volatility Modelling", Scandinavian Journal of
+           Statistics, Vol. 24, pp. 1-13, 1997.
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _argcheck(self, a, b):
+        return (a > 0) & (np.absolute(b) < a)
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, False))
+        return [ia, ib]
+
+    def _fitstart(self, data):
+        # Arbitrary, but the default a=b=1 is not valid
+        return super()._fitstart(data, args=(1, 0.5))
+
+    def _pdf(self, x, a, b):
+        gamma = np.sqrt(a**2 - b**2)
+        fac1 = a / np.pi * np.exp(gamma)
+        sq = np.hypot(1, x)  # reduce overflows
+        return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
+
+    def _sf(self, x, a, b):
+        if np.isscalar(x):
+            # If x is a scalar, then so are a and b.
+            return integrate.quad(self._pdf, x, np.inf, args=(a, b))[0]
+        else:
+            result = []
+            for (x0, a0, b0) in zip(x, a, b):
+                result.append(integrate.quad(self._pdf, x0, np.inf,
+                                             args=(a0, b0))[0])
+            return np.array(result)
+
+    def _isf(self, q, a, b):
+        def _isf_scalar(q, a, b):
+
+            def eq(x, a, b, q):
+                # Solve eq(x, a, b, q) = 0 to obtain isf(x, a, b) = q.
+                return self._sf(x, a, b) - q
+
+            # Find a bracketing interval for the root.
+            # Start at the mean, and grow the length of the interval
+            # by 2 each iteration until there is a sign change in eq.
+            xm = self.mean(a, b)
+            em = eq(xm, a, b, q)
+            if em == 0:
+                # Unlikely, but might as well check.
+                return xm
+            if em > 0:
+                delta = 1
+                left = xm
+                right = xm + delta
+                while eq(right, a, b, q) > 0:
+                    delta = 2*delta
+                    right = xm + delta
+            else:
+                # em < 0
+                delta = 1
+                right = xm
+                left = xm - delta
+                while eq(left, a, b, q) < 0:
+                    delta = 2*delta
+                    left = xm - delta
+            result = optimize.brentq(eq, left, right, args=(a, b, q),
+                                     xtol=self.xtol)
+            return result
+
+        if np.isscalar(q):
+            return _isf_scalar(q, a, b)
+        else:
+            result = []
+            for (q0, a0, b0) in zip(q, a, b):
+                result.append(_isf_scalar(q0, a0, b0))
+            return np.array(result)
+
+    def _rvs(self, a, b, size=None, random_state=None):
+        # note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
+        # normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
+        gamma = np.sqrt(a**2 - b**2)
+        ig = invgauss.rvs(mu=1/gamma, size=size, random_state=random_state)
+        return b * ig + np.sqrt(ig) * norm.rvs(size=size,
+                                               random_state=random_state)
+
+    def _stats(self, a, b):
+        gamma = np.sqrt(a**2 - b**2)
+        mean = b / gamma
+        variance = a**2 / gamma**3
+        skewness = 3.0 * b / (a * np.sqrt(gamma))
+        kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
+        return mean, variance, skewness, kurtosis
+
+
+norminvgauss = norminvgauss_gen(name="norminvgauss")
+
+
+class invweibull_gen(rv_continuous):
+    """An inverted Weibull continuous random variable.
+
+    This distribution is also known as the Fréchet distribution or the
+    type II extreme value distribution.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `invweibull` is:
+
+    .. math::
+
+        f(x, c) = c x^{-c-1} \\exp(-x^{-c})
+
+    for :math:`x > 0`, :math:`c > 0`.
+
+    `invweibull` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
+    Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
+        xc1 = np.power(x, -c - 1.0)
+        xc2 = np.power(x, -c)
+        xc2 = np.exp(-xc2)
+        return c * xc1 * xc2
+
+    def _cdf(self, x, c):
+        xc1 = np.power(x, -c)
+        return np.exp(-xc1)
+
+    def _sf(self, x, c):
+        return -np.expm1(-x**-c)
+
+    def _ppf(self, q, c):
+        return np.power(-np.log(q), -1.0/c)
+
+    def _isf(self, p, c):
+        return (-np.log1p(-p))**(-1/c)
+
+    def _munp(self, n, c):
+        return sc.gamma(1 - n / c)
+
+    def _entropy(self, c):
+        return 1+_EULER + _EULER / c - np.log(c)
+
+    def _fitstart(self, data, args=None):
+        # invweibull requires c > 1 for the first moment to exist, so use 2.0
+        args = (2.0,) if args is None else args
+        return super(invweibull_gen, self)._fitstart(data, args=args)
+
+
+invweibull = invweibull_gen(a=0, name='invweibull')
+
+
+class johnsonsb_gen(rv_continuous):
+    r"""A Johnson SB continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    johnsonsu
+
+    Notes
+    -----
+    The probability density function for `johnsonsb` is:
+
+    .. math::
+
+        f(x, a, b) = \frac{b}{x(1-x)}  \phi(a + b \log \frac{x}{1-x} )
+
+    where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`
+    and :math:`x \in [0,1]`.  :math:`\phi` is the pdf of the normal
+    distribution.
+
+    `johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _argcheck(self, a, b):
+        return (b > 0) & (a == a)
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (-np.inf, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        return [ia, ib]
+
+    def _pdf(self, x, a, b):
+        # johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
+        trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
+        return b*1.0/(x*(1-x))*trm
+
+    def _cdf(self, x, a, b):
+        return _norm_cdf(a + b*np.log(x/(1.0-x)))
+
+    def _ppf(self, q, a, b):
+        return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
+
+
+johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
+
+
+class johnsonsu_gen(rv_continuous):
+    r"""A Johnson SU continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    johnsonsb
+
+    Notes
+    -----
+    The probability density function for `johnsonsu` is:
+
+    .. math::
+
+        f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
+                     \phi(a + b \log(x + \sqrt{x^2 + 1}))
+
+    where :math:`x`, :math:`a`, and :math:`b` are real scalars; :math:`b > 0`.
+    :math:`\phi` is the pdf of the normal distribution.
+
+    `johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, a, b):
+        return (b > 0) & (a == a)
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (-np.inf, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        return [ia, ib]
+
+    def _pdf(self, x, a, b):
+        # johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
+        #                          phi(a + b * log(x + sqrt(x**2 + 1)))
+        x2 = x*x
+        trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
+        return b*1.0/np.sqrt(x2+1.0)*trm
+
+    def _cdf(self, x, a, b):
+        return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
+
+    def _ppf(self, q, a, b):
+        return np.sinh((_norm_ppf(q) - a) / b)
+
+
+johnsonsu = johnsonsu_gen(name='johnsonsu')
+
+
+class laplace_gen(rv_continuous):
+    r"""A Laplace continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `laplace` is
+
+    .. math::
+
+        f(x) = \frac{1}{2} \exp(-|x|)
+
+    for a real number :math:`x`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return random_state.laplace(0, 1, size=size)
+
+    def _pdf(self, x):
+        # laplace.pdf(x) = 1/2 * exp(-abs(x))
+        return 0.5*np.exp(-abs(x))
+
+    def _cdf(self, x):
+        with np.errstate(over='ignore'):
+            return np.where(x > 0, 1.0 - 0.5*np.exp(-x), 0.5*np.exp(x))
+
+    def _sf(self, x):
+        # By symmetry...
+        return self._cdf(-x)
+
+    def _ppf(self, q):
+        return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
+
+    def _isf(self, q):
+        # By symmetry...
+        return -self._ppf(q)
+
+    def _stats(self):
+        return 0, 2, 0, 3
+
+    def _entropy(self):
+        return np.log(2)+1
+
+    @_call_super_mom
+    @replace_notes_in_docstring(rv_continuous, notes="""\
+        This function uses explicit formulas for the maximum likelihood
+        estimation of the Laplace distribution parameters, so the keyword
+        arguments `loc`, `scale`, and `optimizer` are ignored.\n\n""")
+    def fit(self, data, *args, **kwds):
+        data, floc, fscale = _check_fit_input_parameters(self, data,
+                                                         args, kwds)
+
+        # Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
+        # and Peacock (2000), Page 124
+
+        if floc is None:
+            floc = np.median(data)
+
+        if fscale is None:
+            fscale = (np.sum(np.abs(data - floc))) / len(data)
+
+        return floc, fscale
+
+
+laplace = laplace_gen(name='laplace')
+
+
+class laplace_asymmetric_gen(rv_continuous):
+    r"""An asymmetric Laplace continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    laplace : Laplace distribution
+
+    Notes
+    -----
+    The probability density function for `laplace_asymmetric` is
+
+    .. math::
+
+       f(x, \kappa) &= \frac{1}{\kappa+\kappa^{-1}}\exp(-x\kappa),\quad x\ge0\\
+                    &= \frac{1}{\kappa+\kappa^{-1}}\exp(x/\kappa),\quad x<0\\
+
+    for :math:`-\infty < x < \infty`, :math:`\kappa > 0`.
+
+    `laplace_asymmetric` takes ``kappa`` as a shape parameter for
+    :math:`\kappa`. For :math:`\kappa = 1`, it is identical to a
+    Laplace distribution.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "Asymmetric Laplace distribution", Wikipedia
+            https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution
+
+    .. [2] Kozubowski TJ and Podgórski K. A Multivariate and
+           Asymmetric Generalization of Laplace Distribution,
+           Computational Statistics 15, 531--540 (2000).
+           :doi:`10.1007/PL00022717`
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("kappa", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, kappa):
+        return np.exp(self._logpdf(x, kappa))
+
+    def _logpdf(self, x, kappa):
+        kapinv = 1/kappa
+        lPx = x * np.where(x >= 0, -kappa, kapinv)
+        lPx -= np.log(kappa+kapinv)
+        return lPx
+
+    def _cdf(self, x, kappa):
+        kapinv = 1/kappa
+        kappkapinv = kappa+kapinv
+        return np.where(x >= 0,
+                        1 - np.exp(-x*kappa)*(kapinv/kappkapinv),
+                        np.exp(x*kapinv)*(kappa/kappkapinv))
+
+    def _sf(self, x, kappa):
+        kapinv = 1/kappa
+        kappkapinv = kappa+kapinv
+        return np.where(x >= 0,
+                        np.exp(-x*kappa)*(kapinv/kappkapinv),
+                        1 - np.exp(x*kapinv)*(kappa/kappkapinv))
+
+    def _ppf(self, q, kappa):
+        kapinv = 1/kappa
+        kappkapinv = kappa+kapinv
+        return np.where(q >= kappa/kappkapinv,
+                        -np.log((1 - q)*kappkapinv*kappa)*kapinv,
+                        np.log(q*kappkapinv/kappa)*kappa)
+
+    def _isf(self, q, kappa):
+        kapinv = 1/kappa
+        kappkapinv = kappa+kapinv
+        return np.where(q <= kapinv/kappkapinv,
+                        -np.log(q*kappkapinv*kappa)*kapinv,
+                        np.log((1 - q)*kappkapinv/kappa)*kappa)
+
+    def _stats(self, kappa):
+        kapinv = 1/kappa
+        mn = kapinv - kappa
+        var = kapinv*kapinv + kappa*kappa
+        g1 = 2.0*(1-np.power(kappa, 6))/np.power(1+np.power(kappa, 4), 1.5)
+        g2 = 6.0*(1+np.power(kappa, 8))/np.power(1+np.power(kappa, 4), 2)
+        return mn, var, g1, g2
+
+    def _entropy(self, kappa):
+        return 1 + np.log(kappa+1/kappa)
+
+
+laplace_asymmetric = laplace_asymmetric_gen(name='laplace_asymmetric')
+
+
+def _check_fit_input_parameters(dist, data, args, kwds):
+    data = np.asarray(data)
+    floc = kwds.get('floc', None)
+    fscale = kwds.get('fscale', None)
+
+    num_shapes = len(dist.shapes.split(",")) if dist.shapes else 0
+    fshape_keys = []
+    fshapes = []
+
+    # user has many options for fixing the shape, so here we standardize it
+    # into 'f' + the number of the shape.
+    # Adapted from `_reduce_func` in `_distn_infrastructure.py`:
+    if dist.shapes:
+        shapes = dist.shapes.replace(',', ' ').split()
+        for j, s in enumerate(shapes):
+            key = 'f' + str(j)
+            names = [key, 'f' + s, 'fix_' + s]
+            val = _get_fixed_fit_value(kwds, names)
+            fshape_keys.append(key)
+            fshapes.append(val)
+            if val is not None:
+                kwds[key] = val
+
+    # determine if there are any unknown arguments in kwds
+    known_keys = {'loc', 'scale', 'optimizer', 'method',
+                  'floc', 'fscale', *fshape_keys}
+    unknown_keys = set(kwds).difference(known_keys)
+    if unknown_keys:
+        raise TypeError(f"Unknown keyword arguments: {unknown_keys}.")
+
+    if len(args) > num_shapes:
+        raise TypeError("Too many positional arguments.")
+
+    if None not in {floc, fscale, *fshapes}:
+        # This check is for consistency with `rv_continuous.fit`.
+        # Without this check, this function would just return the
+        # parameters that were given.
+        raise RuntimeError("All parameters fixed. There is nothing to "
+                           "optimize.")
+
+    if not np.isfinite(data).all():
+        raise ValueError("The data contains non-finite values.")
+
+    return (data, *fshapes, floc, fscale)
+
+
+class levy_gen(rv_continuous):
+    r"""A Levy continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    levy_stable, levy_l
+
+    Notes
+    -----
+    The probability density function for `levy` is:
+
+    .. math::
+
+        f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
+
+    for :math:`x >= 0`.
+
+    This is the same as the Levy-stable distribution with :math:`a=1/2` and
+    :math:`b=1`.
+
+    %(after_notes)s
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import levy
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots(1, 1)
+
+    Calculate the first four moments:
+
+    >>> mean, var, skew, kurt = levy.stats(moments='mvsk')
+
+    Display the probability density function (``pdf``):
+
+    >>> # `levy` is very heavy-tailed.
+    >>> # To show a nice plot, let's cut off the upper 40 percent.
+    >>> a, b = levy.ppf(0), levy.ppf(0.6)
+    >>> x = np.linspace(a, b, 100)
+    >>> ax.plot(x, levy.pdf(x),
+    ...        'r-', lw=5, alpha=0.6, label='levy pdf')
+
+    Alternatively, the distribution object can be called (as a function)
+    to fix the shape, location and scale parameters. This returns a "frozen"
+    RV object holding the given parameters fixed.
+
+    Freeze the distribution and display the frozen ``pdf``:
+
+    >>> rv = levy()
+    >>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
+
+    Check accuracy of ``cdf`` and ``ppf``:
+
+    >>> vals = levy.ppf([0.001, 0.5, 0.999])
+    >>> np.allclose([0.001, 0.5, 0.999], levy.cdf(vals))
+    True
+
+    Generate random numbers:
+
+    >>> r = levy.rvs(size=1000)
+
+    And compare the histogram:
+
+    >>> # manual binning to ignore the tail
+    >>> bins = np.concatenate((np.linspace(a, b, 20), [np.max(r)]))
+    >>> ax.hist(r, bins=bins, density=True, histtype='stepfilled', alpha=0.2)
+    >>> ax.set_xlim([x[0], x[-1]])
+    >>> ax.legend(loc='best', frameon=False)
+    >>> plt.show()
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
+        return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
+
+    def _cdf(self, x):
+        # Equivalent to 2*norm.sf(np.sqrt(1/x))
+        return sc.erfc(np.sqrt(0.5 / x))
+
+    def _sf(self, x):
+        return sc.erf(np.sqrt(0.5 / x))
+
+    def _ppf(self, q):
+        # Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
+        val = -sc.ndtri(q/2)
+        return 1.0 / (val * val)
+
+    def _isf(self, p):
+        return 1/(2*sc.erfinv(p)**2)
+
+    def _stats(self):
+        return np.inf, np.inf, np.nan, np.nan
+
+
+levy = levy_gen(a=0.0, name="levy")
+
+
+class levy_l_gen(rv_continuous):
+    r"""A left-skewed Levy continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    levy, levy_stable
+
+    Notes
+    -----
+    The probability density function for `levy_l` is:
+
+    .. math::
+        f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
+
+    for :math:`x <= 0`.
+
+    This is the same as the Levy-stable distribution with :math:`a=1/2` and
+    :math:`b=-1`.
+
+    %(after_notes)s
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import levy_l
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots(1, 1)
+
+    Calculate the first four moments:
+
+    >>> mean, var, skew, kurt = levy_l.stats(moments='mvsk')
+
+    Display the probability density function (``pdf``):
+
+    >>> # `levy_l` is very heavy-tailed.
+    >>> # To show a nice plot, let's cut off the lower 40 percent.
+    >>> a, b = levy_l.ppf(0.4), levy_l.ppf(1)
+    >>> x = np.linspace(a, b, 100)
+    >>> ax.plot(x, levy_l.pdf(x),
+    ...        'r-', lw=5, alpha=0.6, label='levy_l pdf')
+
+    Alternatively, the distribution object can be called (as a function)
+    to fix the shape, location and scale parameters. This returns a "frozen"
+    RV object holding the given parameters fixed.
+
+    Freeze the distribution and display the frozen ``pdf``:
+
+    >>> rv = levy_l()
+    >>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
+
+    Check accuracy of ``cdf`` and ``ppf``:
+
+    >>> vals = levy_l.ppf([0.001, 0.5, 0.999])
+    >>> np.allclose([0.001, 0.5, 0.999], levy_l.cdf(vals))
+    True
+
+    Generate random numbers:
+
+    >>> r = levy_l.rvs(size=1000)
+
+    And compare the histogram:
+
+    >>> # manual binning to ignore the tail
+    >>> bins = np.concatenate(([np.min(r)], np.linspace(a, b, 20)))
+    >>> ax.hist(r, bins=bins, density=True, histtype='stepfilled', alpha=0.2)
+    >>> ax.set_xlim([x[0], x[-1]])
+    >>> ax.legend(loc='best', frameon=False)
+    >>> plt.show()
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        # levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
+        ax = abs(x)
+        return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
+
+    def _cdf(self, x):
+        ax = abs(x)
+        return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
+
+    def _sf(self, x):
+        ax = abs(x)
+        return 2 * _norm_sf(1 / np.sqrt(ax))
+
+    def _ppf(self, q):
+        val = _norm_ppf((q + 1.0) / 2)
+        return -1.0 / (val * val)
+
+    def _isf(self, p):
+        return -1/_norm_isf(p/2)**2
+
+    def _stats(self):
+        return np.inf, np.inf, np.nan, np.nan
+
+
+levy_l = levy_l_gen(b=0.0, name="levy_l")
+
+
+class logistic_gen(rv_continuous):
+    r"""A logistic (or Sech-squared) continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `logistic` is:
+
+    .. math::
+
+        f(x) = \frac{\exp(-x)}
+                    {(1+\exp(-x))^2}
+
+    `logistic` is a special case of `genlogistic` with ``c=1``.
+
+    Remark that the survival function (``logistic.sf``) is equal to the
+    Fermi-Dirac distribution describing fermionic statistics.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return random_state.logistic(size=size)
+
+    def _pdf(self, x):
+        # logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
+        return np.exp(self._logpdf(x))
+
+    def _logpdf(self, x):
+        y = -np.abs(x)
+        return y - 2. * sc.log1p(np.exp(y))
+
+    def _cdf(self, x):
+        return sc.expit(x)
+
+    def _logcdf(self, x):
+        return sc.log_expit(x)
+
+    def _ppf(self, q):
+        return sc.logit(q)
+
+    def _sf(self, x):
+        return sc.expit(-x)
+
+    def _logsf(self, x):
+        return sc.log_expit(-x)
+
+    def _isf(self, q):
+        return -sc.logit(q)
+
+    def _stats(self):
+        return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
+
+    def _entropy(self):
+        # https://en.wikipedia.org/wiki/Logistic_distribution
+        return 2.0
+
+    @_call_super_mom
+    @inherit_docstring_from(rv_continuous)
+    def fit(self, data, *args, **kwds):
+        if kwds.pop('superfit', False):
+            return super().fit(data, *args, **kwds)
+
+        data, floc, fscale = _check_fit_input_parameters(self, data,
+                                                         args, kwds)
+        n = len(data)
+
+        # rv_continuous provided guesses
+        loc, scale = self._fitstart(data)
+        # these are trumped by user-provided guesses
+        loc, scale = kwds.get('loc', loc), kwds.get('scale', scale)
+
+        # the maximum likelihood estimators `a` and `b` of the location and
+        # scale parameters are roots of the two equations described in `func`.
+        # Source: Statistical Distributions, 3rd Edition. Evans, Hastings, and
+        # Peacock (2000), Page 130
+        def dl_dloc(loc, scale=fscale):
+            c = (data - loc) / scale
+            return np.sum(sc.expit(c)) - n/2
+
+        def dl_dscale(scale, loc=floc):
+            c = (data - loc) / scale
+            return np.sum(c*np.tanh(c/2)) - n
+
+        def func(params):
+            loc, scale = params
+            return dl_dloc(loc, scale), dl_dscale(scale, loc)
+
+        if fscale is not None and floc is None:
+            res = optimize.root(dl_dloc, (loc,))
+            loc = res.x[0]
+            scale = fscale
+        elif floc is not None and fscale is None:
+            res = optimize.root(dl_dscale, (scale,))
+            scale = res.x[0]
+            loc = floc
+        else:
+            res = optimize.root(func, (loc, scale))
+            loc, scale = res.x
+
+        return ((loc, scale) if res.success
+                else super().fit(data, *args, **kwds))
+
+
+logistic = logistic_gen(name='logistic')
+
+
+class loggamma_gen(rv_continuous):
+    r"""A log gamma continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `loggamma` is:
+
+    .. math::
+
+        f(x, c) = \frac{\exp(c x - \exp(x))}
+                       {\Gamma(c)}
+
+    for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
+    gamma function (`scipy.special.gamma`).
+
+    `loggamma` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, c, size=None, random_state=None):
+        # Use the property of the gamma distribution Gamma(c)
+        #    Gamma(c) ~ Gamma(c + 1)*U**(1/c),
+        # where U is uniform on [0, 1]. (See, e.g.,
+        # G. Marsaglia and W.W. Tsang, "A simple method for generating gamma
+        # variables", https://doi.org/10.1145/358407.358414)
+        # So
+        #    log(Gamma(c)) ~ log(Gamma(c + 1)) + log(U)/c
+        # Generating a sample with this formulation is a bit slower
+        # than the more obvious log(Gamma(c)), but it avoids loss
+        # of precision when c << 1.
+        return (np.log(random_state.gamma(c + 1, size=size))
+                + np.log(random_state.uniform(size=size))/c)
+
+    def _pdf(self, x, c):
+        # loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
+        return np.exp(c*x-np.exp(x)-sc.gammaln(c))
+
+    def _logpdf(self, x, c):
+        return c*x - np.exp(x) - sc.gammaln(c)
+
+    def _cdf(self, x, c):
+        return sc.gammainc(c, np.exp(x))
+
+    def _ppf(self, q, c):
+        return np.log(sc.gammaincinv(c, q))
+
+    def _sf(self, x, c):
+        return sc.gammaincc(c, np.exp(x))
+
+    def _isf(self, q, c):
+        return np.log(sc.gammainccinv(c, q))
+
+    def _stats(self, c):
+        # See, for example, "A Statistical Study of Log-Gamma Distribution", by
+        # Ping Shing Chan (thesis, McMaster University, 1993).
+        mean = sc.digamma(c)
+        var = sc.polygamma(1, c)
+        skewness = sc.polygamma(2, c) / np.power(var, 1.5)
+        excess_kurtosis = sc.polygamma(3, c) / (var*var)
+        return mean, var, skewness, excess_kurtosis
+
+
+loggamma = loggamma_gen(name='loggamma')
+
+
+class loglaplace_gen(rv_continuous):
+    r"""A log-Laplace continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `loglaplace` is:
+
+    .. math::
+
+        f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1}  &\text{for } 0 < x < 1\\
+                               \frac{c}{2} x^{-c-1}  &\text{for } x \ge 1
+                  \end{cases}
+
+    for :math:`c > 0`.
+
+    `loglaplace` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
+    The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # loglaplace.pdf(x, c) = c / 2 * x**(c-1),   for 0 < x < 1
+        #                      = c / 2 * x**(-c-1),  for x >= 1
+        cd2 = c/2.0
+        c = np.where(x < 1, c, -c)
+        return cd2*x**(c-1)
+
+    def _cdf(self, x, c):
+        return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
+
+    def _ppf(self, q, c):
+        return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
+
+    def _munp(self, n, c):
+        return c**2 / (c**2 - n**2)
+
+    def _entropy(self, c):
+        return np.log(2.0/c) + 1.0
+
+
+loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
+
+
+def _lognorm_logpdf(x, s):
+    return _lazywhere(x != 0, (x, s),
+                      lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
+                      -np.inf)
+
+
+class lognorm_gen(rv_continuous):
+    r"""A lognormal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `lognorm` is:
+
+    .. math::
+
+        f(x, s) = \frac{1}{s x \sqrt{2\pi}}
+                  \exp\left(-\frac{\log^2(x)}{2s^2}\right)
+
+    for :math:`x > 0`, :math:`s > 0`.
+
+    `lognorm` takes ``s`` as a shape parameter for :math:`s`.
+
+    %(after_notes)s
+
+    Suppose a normally distributed random variable ``X`` has  mean ``mu`` and
+    standard deviation ``sigma``. Then ``Y = exp(X)`` is lognormally
+    distributed with ``s = sigma`` and ``scale = exp(mu)``.
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return [_ShapeInfo("s", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, s, size=None, random_state=None):
+        return np.exp(s * random_state.standard_normal(size))
+
+    def _pdf(self, x, s):
+        # lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
+        return np.exp(self._logpdf(x, s))
+
+    def _logpdf(self, x, s):
+        return _lognorm_logpdf(x, s)
+
+    def _cdf(self, x, s):
+        return _norm_cdf(np.log(x) / s)
+
+    def _logcdf(self, x, s):
+        return _norm_logcdf(np.log(x) / s)
+
+    def _ppf(self, q, s):
+        return np.exp(s * _norm_ppf(q))
+
+    def _sf(self, x, s):
+        return _norm_sf(np.log(x) / s)
+
+    def _logsf(self, x, s):
+        return _norm_logsf(np.log(x) / s)
+
+    def _stats(self, s):
+        p = np.exp(s*s)
+        mu = np.sqrt(p)
+        mu2 = p*(p-1)
+        g1 = np.sqrt((p-1))*(2+p)
+        g2 = np.polyval([1, 2, 3, 0, -6.0], p)
+        return mu, mu2, g1, g2
+
+    def _entropy(self, s):
+        return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
+
+    @_call_super_mom
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        When `method='MLE'` and
+        the location parameter is fixed by using the `floc` argument,
+        this function uses explicit formulas for the maximum likelihood
+        estimation of the log-normal shape and scale parameters, so the
+        `optimizer`, `loc` and `scale` keyword arguments are ignored.
+        \n\n""")
+    def fit(self, data, *args, **kwds):
+        floc = kwds.get('floc', None)
+        if floc is None:
+            # fall back on the default fit method.
+            return super().fit(data, *args, **kwds)
+
+        f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
+              kwds.get('fix_s', None))
+        fscale = kwds.get('fscale', None)
+
+        if len(args) > 1:
+            raise TypeError("Too many input arguments.")
+        for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
+                     'optimizer', 'method']:
+            kwds.pop(name, None)
+        if kwds:
+            raise TypeError("Unknown arguments: %s." % kwds)
+
+        # Special case: loc is fixed.  Use the maximum likelihood formulas
+        # instead of the numerical solver.
+
+        if f0 is not None and fscale is not None:
+            # This check is for consistency with `rv_continuous.fit`.
+            raise ValueError("All parameters fixed. There is nothing to "
+                             "optimize.")
+
+        data = np.asarray(data)
+
+        if not np.isfinite(data).all():
+            raise ValueError("The data contains non-finite values.")
+
+        floc = float(floc)
+        if floc != 0:
+            # Shifting the data by floc. Don't do the subtraction in-place,
+            # because `data` might be a view of the input array.
+            data = data - floc
+        if np.any(data <= 0):
+            raise FitDataError("lognorm", lower=floc, upper=np.inf)
+        lndata = np.log(data)
+
+        # Three cases to handle:
+        # * shape and scale both free
+        # * shape fixed, scale free
+        # * shape free, scale fixed
+
+        if fscale is None:
+            # scale is free.
+            scale = np.exp(lndata.mean())
+            if f0 is None:
+                # shape is free.
+                shape = lndata.std()
+            else:
+                # shape is fixed.
+                shape = float(f0)
+        else:
+            # scale is fixed, shape is free
+            scale = float(fscale)
+            shape = np.sqrt(((lndata - np.log(scale))**2).mean())
+
+        return shape, floc, scale
+
+
+lognorm = lognorm_gen(a=0.0, name='lognorm')
+
+
+class gibrat_gen(rv_continuous):
+    r"""A Gibrat continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `gibrat` is:
+
+    .. math::
+
+        f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
+
+    `gibrat` is a special case of `lognorm` with ``s=1``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return np.exp(random_state.standard_normal(size))
+
+    def _pdf(self, x):
+        # gibrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
+        return np.exp(self._logpdf(x))
+
+    def _logpdf(self, x):
+        return _lognorm_logpdf(x, 1.0)
+
+    def _cdf(self, x):
+        return _norm_cdf(np.log(x))
+
+    def _ppf(self, q):
+        return np.exp(_norm_ppf(q))
+
+    def _stats(self):
+        p = np.e
+        mu = np.sqrt(p)
+        mu2 = p * (p - 1)
+        g1 = np.sqrt((p - 1)) * (2 + p)
+        g2 = np.polyval([1, 2, 3, 0, -6.0], p)
+        return mu, mu2, g1, g2
+
+    def _entropy(self):
+        return 0.5 * np.log(2 * np.pi) + 0.5
+
+
+# deprecation of gilbrat, see #15911
+deprmsg = ("`gilbrat` is a misspelling of the correct name for the `gibrat` "
+           "distribution, and will be removed in SciPy 1.11.")
+
+
+class gilbrat_gen(gibrat_gen):
+    # override __call__ protocol from rv_generic to also
+    # deprecate instantiation of frozen distributions
+    r"""
+
+    .. deprecated:: 1.9.0
+        `gilbrat` is deprecated, use `gibrat` instead!
+        `gilbrat` is a misspelling of the correct name for the `gibrat`
+        distribution, and will be removed in SciPy 1.11.
+
+    """
+    def __call__(self, *args, **kwds):
+        # align with warning text from np.deprecated that's used for methods
+        msg = "`gilbrat` is deprecated, use `gibrat` instead!\n" + deprmsg
+        warnings.warn(msg, DeprecationWarning, stacklevel=2)
+        return self.freeze(*args, **kwds)
+
+
+gibrat = gibrat_gen(a=0.0, name='gibrat')
+gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
+
+
+# since the deprecated class gets intantiated upon import (and we only want to
+# warn upon use), add the deprecation to each (documented) class method, c.f.
+# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gilbrat.html
+_gibrat_method_names = [
+    "cdf", "entropy", "expect", "fit", "interval", "isf", "logcdf", "logpdf",
+    "logsf", "mean", "median", "moment", "pdf", "ppf", "rvs", "sf", "stats",
+    "std", "var"
+]
+for m in _gibrat_method_names:
+    wrapper = np.deprecate(getattr(gilbrat, m), f"gilbrat.{m}", f"gibrat.{m}",
+                           deprmsg)
+    setattr(gilbrat, m, wrapper)
+
+
+class maxwell_gen(rv_continuous):
+    r"""A Maxwell continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    A special case of a `chi` distribution,  with ``df=3``, ``loc=0.0``,
+    and given ``scale = a``, where ``a`` is the parameter used in the
+    Mathworld description [1]_.
+
+    The probability density function for `maxwell` is:
+
+    .. math::
+
+        f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
+
+    for :math:`x >= 0`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
+
+    %(example)s
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return chi.rvs(3.0, size=size, random_state=random_state)
+
+    def _pdf(self, x):
+        # maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
+        return _SQRT_2_OVER_PI*x*x*np.exp(-x*x/2.0)
+
+    def _logpdf(self, x):
+        # Allow x=0 without 'divide by zero' warnings
+        with np.errstate(divide='ignore'):
+            return _LOG_SQRT_2_OVER_PI + 2*np.log(x) - 0.5*x*x
+
+    def _cdf(self, x):
+        return sc.gammainc(1.5, x*x/2.0)
+
+    def _ppf(self, q):
+        return np.sqrt(2*sc.gammaincinv(1.5, q))
+
+    def _stats(self):
+        val = 3*np.pi-8
+        return (2*np.sqrt(2.0/np.pi),
+                3-8/np.pi,
+                np.sqrt(2)*(32-10*np.pi)/val**1.5,
+                (-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
+
+    def _entropy(self):
+        return _EULER + 0.5*np.log(2*np.pi)-0.5
+
+
+maxwell = maxwell_gen(a=0.0, name='maxwell')
+
+
+class mielke_gen(rv_continuous):
+    r"""A Mielke Beta-Kappa / Dagum continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `mielke` is:
+
+    .. math::
+
+        f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
+
+    for :math:`x > 0` and :math:`k, s > 0`. The distribution is sometimes
+    called Dagum distribution ([2]_). It was already defined in [3]_, called
+    a Burr Type III distribution (`burr` with parameters ``c=s`` and
+    ``d=k/s``).
+
+    `mielke` takes ``k`` and ``s`` as shape parameters.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Mielke, P.W., 1973 "Another Family of Distributions for Describing
+           and Analyzing Precipitation Data." J. Appl. Meteor., 12, 275-280
+    .. [2] Dagum, C., 1977 "A new model for personal income distribution."
+           Economie Appliquee, 33, 327-367.
+    .. [3] Burr, I. W. "Cumulative frequency functions", Annals of
+           Mathematical Statistics, 13(2), pp 215-232 (1942).
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        ik = _ShapeInfo("k", False, (0, np.inf), (False, False))
+        i_s = _ShapeInfo("s", False, (0, np.inf), (False, False))
+        return [ik, i_s]
+
+    def _pdf(self, x, k, s):
+        return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
+
+    def _logpdf(self, x, k, s):
+        # Allow x=0 without 'divide by zero' warnings.
+        with np.errstate(divide='ignore'):
+            return np.log(k) + np.log(x)*(k - 1) - np.log1p(x**s)*(1 + k/s)
+
+    def _cdf(self, x, k, s):
+        return x**k / (1.0+x**s)**(k*1.0/s)
+
+    def _ppf(self, q, k, s):
+        qsk = pow(q, s*1.0/k)
+        return pow(qsk/(1.0-qsk), 1.0/s)
+
+    def _munp(self, n, k, s):
+        def nth_moment(n, k, s):
+            # n-th moment is defined for -k < n < s
+            return sc.gamma((k+n)/s)*sc.gamma(1-n/s)/sc.gamma(k/s)
+
+        return _lazywhere(n < s, (n, k, s), nth_moment, np.inf)
+
+
+mielke = mielke_gen(a=0.0, name='mielke')
+
+
+class kappa4_gen(rv_continuous):
+    r"""Kappa 4 parameter distribution.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for kappa4 is:
+
+    .. math::
+
+        f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
+
+    if :math:`h` and :math:`k` are not equal to 0.
+
+    If :math:`h` or :math:`k` are zero then the pdf can be simplified:
+
+    h = 0 and k != 0::
+
+        kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
+                              exp(-(1.0 - k*x)**(1.0/k))
+
+    h != 0 and k = 0::
+
+        kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
+
+    h = 0 and k = 0::
+
+        kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
+
+    kappa4 takes :math:`h` and :math:`k` as shape parameters.
+
+    The kappa4 distribution returns other distributions when certain
+    :math:`h` and :math:`k` values are used.
+
+    +------+-------------+----------------+------------------+
+    | h    | k=0.0       | k=1.0          | -inf<=k<=inf     |
+    +======+=============+================+==================+
+    | -1.0 | Logistic    |                | Generalized      |
+    |      |             |                | Logistic(1)      |
+    |      |             |                |                  |
+    |      | logistic(x) |                |                  |
+    +------+-------------+----------------+------------------+
+    |  0.0 | Gumbel      | Reverse        | Generalized      |
+    |      |             | Exponential(2) | Extreme Value    |
+    |      |             |                |                  |
+    |      | gumbel_r(x) |                | genextreme(x, k) |
+    +------+-------------+----------------+------------------+
+    |  1.0 | Exponential | Uniform        | Generalized      |
+    |      |             |                | Pareto           |
+    |      |             |                |                  |
+    |      | expon(x)    | uniform(x)     | genpareto(x, -k) |
+    +------+-------------+----------------+------------------+
+
+    (1) There are at least five generalized logistic distributions.
+        Four are described here:
+        https://en.wikipedia.org/wiki/Generalized_logistic_distribution
+        The "fifth" one is the one kappa4 should match which currently
+        isn't implemented in scipy:
+        https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
+        https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
+    (2) This distribution is currently not in scipy.
+
+    References
+    ----------
+    J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
+    to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
+    Faculty of the Louisiana State University and Agricultural and Mechanical
+    College, (August, 2004),
+    https://digitalcommons.lsu.edu/gradschool_dissertations/3672
+
+    J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
+    Develop. 38 (3), 25 1-258 (1994).
+
+    B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
+    Site in the Chi River Basin, Thailand", Journal of Water Resource and
+    Protection, vol. 4, 866-869, (2012).
+    :doi:`10.4236/jwarp.2012.410101`
+
+    C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
+    Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
+    2000).
+    http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, h, k):
+        shape = np.broadcast_arrays(h, k)[0].shape
+        return np.full(shape, fill_value=True)
+
+    def _shape_info(self):
+        ih = _ShapeInfo("h", False, (-np.inf, np.inf), (False, False))
+        ik = _ShapeInfo("k", False, (-np.inf, np.inf), (False, False))
+        return [ih, ik]
+
+    def _get_support(self, h, k):
+        condlist = [np.logical_and(h > 0, k > 0),
+                    np.logical_and(h > 0, k == 0),
+                    np.logical_and(h > 0, k < 0),
+                    np.logical_and(h <= 0, k > 0),
+                    np.logical_and(h <= 0, k == 0),
+                    np.logical_and(h <= 0, k < 0)]
+
+        def f0(h, k):
+            return (1.0 - np.float_power(h, -k))/k
+
+        def f1(h, k):
+            return np.log(h)
+
+        def f3(h, k):
+            a = np.empty(np.shape(h))
+            a[:] = -np.inf
+            return a
+
+        def f5(h, k):
+            return 1.0/k
+
+        _a = _lazyselect(condlist,
+                             [f0, f1, f0, f3, f3, f5],
+                             [h, k],
+                             default=np.nan)
+
+        def f0(h, k):
+            return 1.0/k
+
+        def f1(h, k):
+            a = np.empty(np.shape(h))
+            a[:] = np.inf
+            return a
+
+        _b = _lazyselect(condlist,
+                             [f0, f1, f1, f0, f1, f1],
+                             [h, k],
+                             default=np.nan)
+        return _a, _b
+
+    def _pdf(self, x, h, k):
+        # kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
+        #                       (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
+        return np.exp(self._logpdf(x, h, k))
+
+    def _logpdf(self, x, h, k):
+        condlist = [np.logical_and(h != 0, k != 0),
+                    np.logical_and(h == 0, k != 0),
+                    np.logical_and(h != 0, k == 0),
+                    np.logical_and(h == 0, k == 0)]
+
+        def f0(x, h, k):
+            '''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
+                      1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
+               logpdf = ...
+            '''
+            return (sc.xlog1py(1.0/k - 1.0, -k*x) +
+                    sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
+
+        def f1(x, h, k):
+            '''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
+                      1.0 - k*x)**(1.0/k))
+               logpdf = ...
+            '''
+            return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
+
+        def f2(x, h, k):
+            '''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
+               logpdf = ...
+            '''
+            return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
+
+        def f3(x, h, k):
+            '''pdf = np.exp(-x-np.exp(-x))
+               logpdf = ...
+            '''
+            return -x - np.exp(-x)
+
+        return _lazyselect(condlist,
+                           [f0, f1, f2, f3],
+                           [x, h, k],
+                           default=np.nan)
+
+    def _cdf(self, x, h, k):
+        return np.exp(self._logcdf(x, h, k))
+
+    def _logcdf(self, x, h, k):
+        condlist = [np.logical_and(h != 0, k != 0),
+                    np.logical_and(h == 0, k != 0),
+                    np.logical_and(h != 0, k == 0),
+                    np.logical_and(h == 0, k == 0)]
+
+        def f0(x, h, k):
+            '''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
+               logcdf = ...
+            '''
+            return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
+
+        def f1(x, h, k):
+            '''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
+               logcdf = ...
+            '''
+            return -(1.0 - k*x)**(1.0/k)
+
+        def f2(x, h, k):
+            '''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
+               logcdf = ...
+            '''
+            return (1.0/h)*sc.log1p(-h*np.exp(-x))
+
+        def f3(x, h, k):
+            '''cdf = np.exp(-np.exp(-x))
+               logcdf = ...
+            '''
+            return -np.exp(-x)
+
+        return _lazyselect(condlist,
+                           [f0, f1, f2, f3],
+                           [x, h, k],
+                           default=np.nan)
+
+    def _ppf(self, q, h, k):
+        condlist = [np.logical_and(h != 0, k != 0),
+                    np.logical_and(h == 0, k != 0),
+                    np.logical_and(h != 0, k == 0),
+                    np.logical_and(h == 0, k == 0)]
+
+        def f0(q, h, k):
+            return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
+
+        def f1(q, h, k):
+            return 1.0/k*(1.0 - (-np.log(q))**k)
+
+        def f2(q, h, k):
+            '''ppf = -np.log((1.0 - (q**h))/h)
+            '''
+            return -sc.log1p(-(q**h)) + np.log(h)
+
+        def f3(q, h, k):
+            return -np.log(-np.log(q))
+
+        return _lazyselect(condlist,
+                           [f0, f1, f2, f3],
+                           [q, h, k],
+                           default=np.nan)
+
+    def _get_stats_info(self, h, k):
+        condlist = [
+            np.logical_and(h < 0, k >= 0),
+            k < 0,
+        ]
+
+        def f0(h, k):
+            return (-1.0/h*k).astype(int)
+
+        def f1(h, k):
+            return (-1.0/k).astype(int)
+
+        return _lazyselect(condlist, [f0, f1], [h, k], default=5)
+
+    def _stats(self, h, k):
+        maxr = self._get_stats_info(h, k)
+        outputs = [None if np.any(r < maxr) else np.nan for r in range(1, 5)]
+        return outputs[:]
+
+    def _mom1_sc(self, m, *args):
+        maxr = self._get_stats_info(args[0], args[1])
+        if m >= maxr:
+            return np.nan
+        return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
+
+
+kappa4 = kappa4_gen(name='kappa4')
+
+
+class kappa3_gen(rv_continuous):
+    r"""Kappa 3 parameter distribution.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `kappa3` is:
+
+    .. math::
+
+        f(x, a) = a (a + x^a)^{-(a + 1)/a}
+
+    for :math:`x > 0` and :math:`a > 0`.
+
+    `kappa3` takes ``a`` as a shape parameter for :math:`a`.
+
+    References
+    ----------
+    P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
+    Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
+    701-707, (September, 1973),
+    :doi:`10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2`
+
+    B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
+    Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
+    415-419 (2012), :doi:`10.4236/ojs.2012.24050`
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, a):
+        # kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a),     for x > 0
+        return a*(a + x**a)**(-1.0/a-1)
+
+    def _cdf(self, x, a):
+        return x*(a + x**a)**(-1.0/a)
+
+    def _ppf(self, q, a):
+        return (a/(q**-a - 1.0))**(1.0/a)
+
+    def _stats(self, a):
+        outputs = [None if np.any(i < a) else np.nan for i in range(1, 5)]
+        return outputs[:]
+
+    def _mom1_sc(self, m, *args):
+        if np.any(m >= args[0]):
+            return np.nan
+        return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
+
+
+kappa3 = kappa3_gen(a=0.0, name='kappa3')
+
+
+class moyal_gen(rv_continuous):
+    r"""A Moyal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `moyal` is:
+
+    .. math::
+
+        f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
+
+    for a real number :math:`x`.
+
+    %(after_notes)s
+
+    This distribution has utility in high-energy physics and radiation
+    detection. It describes the energy loss of a charged relativistic
+    particle due to ionization of the medium [1]_. It also provides an
+    approximation for the Landau distribution. For an in depth description
+    see [2]_. For additional description, see [3]_.
+
+    References
+    ----------
+    .. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
+           The London, Edinburgh, and Dublin Philosophical Magazine
+           and Journal of Science, vol 46, 263-280, (1955).
+           :doi:`10.1080/14786440308521076` (gated)
+    .. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
+           International Journal of Research and Reviews in Applied Sciences,
+           vol 10, 171-192, (2012).
+           http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
+    .. [3] C. Walck, "Handbook on Statistical Distributions for
+           Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
+           University of Stockholm: Stockholm, Sweden, (2007).
+           http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
+
+    .. versionadded:: 1.1.0
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        u1 = gamma.rvs(a=0.5, scale=2, size=size,
+                       random_state=random_state)
+        return -np.log(u1)
+
+    def _pdf(self, x):
+        return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
+
+    def _cdf(self, x):
+        return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
+
+    def _sf(self, x):
+        return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
+
+    def _ppf(self, x):
+        return -np.log(2 * sc.erfcinv(x)**2)
+
+    def _stats(self):
+        mu = np.log(2) + np.euler_gamma
+        mu2 = np.pi**2 / 2
+        g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
+        g2 = 4.
+        return mu, mu2, g1, g2
+
+    def _munp(self, n):
+        if n == 1.0:
+            return np.log(2) + np.euler_gamma
+        elif n == 2.0:
+            return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
+        elif n == 3.0:
+            tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
+            tmp2 = (np.log(2)+np.euler_gamma)**3
+            tmp3 = 14 * sc.zeta(3)
+            return tmp1 + tmp2 + tmp3
+        elif n == 4.0:
+            tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
+            tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
+            tmp3 = (np.log(2) + np.euler_gamma)**4
+            tmp4 = 7 * np.pi**4 / 4
+            return tmp1 + tmp2 + tmp3 + tmp4
+        else:
+            # return generic for higher moments
+            # return rv_continuous._mom1_sc(self, n, b)
+            return self._mom1_sc(n)
+
+
+moyal = moyal_gen(name="moyal")
+
+
+class nakagami_gen(rv_continuous):
+    r"""A Nakagami continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `nakagami` is:
+
+    .. math::
+
+        f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
+
+    for :math:`x >= 0`, :math:`\nu > 0`. The distribution was introduced in
+    [2]_, see also [1]_ for further information.
+
+    `nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "Nakagami distribution", Wikipedia
+           https://en.wikipedia.org/wiki/Nakagami_distribution
+    .. [2] M. Nakagami, "The m-distribution - A general formula of intensity
+           distribution of rapid fading", Statistical methods in radio wave
+           propagation, Pergamon Press, 1960, 3-36.
+           :doi:`10.1016/B978-0-08-009306-2.50005-4`
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("nu", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, nu):
+        return np.exp(self._logpdf(x, nu))
+
+    def _logpdf(self, x, nu):
+        # nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
+        #                       x**(2*nu-1) * exp(-nu*x**2)
+        return (np.log(2) + sc.xlogy(nu, nu) - sc.gammaln(nu) +
+                sc.xlogy(2*nu - 1, x) - nu*x**2)
+
+    def _cdf(self, x, nu):
+        return sc.gammainc(nu, nu*x*x)
+
+    def _ppf(self, q, nu):
+        return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
+
+    def _sf(self, x, nu):
+        return sc.gammaincc(nu, nu*x*x)
+
+    def _isf(self, p, nu):
+        return np.sqrt(1/nu * sc.gammainccinv(nu, p))
+
+    def _stats(self, nu):
+        mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
+        mu2 = 1.0-mu*mu
+        g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
+        g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
+        g2 /= nu*mu2**2.0
+        return mu, mu2, g1, g2
+
+    def _rvs(self, nu, size=None, random_state=None):
+        # this relationship can be found in [1] or by a direct calculation
+        return np.sqrt(random_state.standard_gamma(nu, size=size) / nu)
+
+    def _fitstart(self, data, args=None):
+        if args is None:
+            args = (1.0,) * self.numargs
+        # Analytical justified estimates
+        # see: https://docs.scipy.org/doc/scipy/reference/tutorial/stats/continuous_nakagami.html
+        loc = np.min(data)
+        scale = np.sqrt(np.sum((data - loc)**2) / len(data))
+        return args + (loc, scale)
+
+
+nakagami = nakagami_gen(a=0.0, name="nakagami")
+
+
+# The function name ncx2 is an abbreviation for noncentral chi squared.
+def _ncx2_log_pdf(x, df, nc):
+    # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2  + xs*ns, and include the
+    # factor of exp(-xs*ns) into the ive function to improve numerical
+    # stability at large values of xs. See also `rice.pdf`.
+    df2 = df/2.0 - 1.0
+    xs, ns = np.sqrt(x), np.sqrt(nc)
+    res = sc.xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
+    corr = sc.ive(df2, xs*ns) / 2.0
+    # Return res + np.log(corr) avoiding np.log(0)
+    return _lazywhere(
+        corr > 0,
+        (res, corr),
+        f=lambda r, c: r + np.log(c),
+        fillvalue=-np.inf)
+
+
+class ncx2_gen(rv_continuous):
+    r"""A non-central chi-squared continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `ncx2` is:
+
+    .. math::
+
+        f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
+            (x/\lambda)^{(k-2)/4}  I_{(k-2)/2}(\sqrt{\lambda x})
+
+    for :math:`x >= 0`, :math:`k > 0` and :math:`\lambda \ge 0`.
+    :math:`k` specifies the degrees of freedom (denoted ``df`` in the
+    implementation) and :math:`\lambda` is the non-centrality parameter
+    (denoted ``nc`` in the implementation). :math:`I_\nu` denotes the
+    modified Bessel function of first order of degree :math:`\nu`
+    (`scipy.special.iv`).
+
+    `ncx2` takes ``df`` and ``nc`` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, df, nc):
+        return (df > 0) & np.isfinite(df) & (nc >= 0)
+
+    def _shape_info(self):
+        idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
+        inc = _ShapeInfo("nc", False, (0, np.inf), (True, False))
+        return [idf, inc]
+
+    def _rvs(self, df, nc, size=None, random_state=None):
+        return random_state.noncentral_chisquare(df, nc, size)
+
+    def _logpdf(self, x, df, nc):
+        cond = np.ones_like(x, dtype=bool) & (nc != 0)
+        return _lazywhere(cond, (x, df, nc), f=_ncx2_log_pdf,
+                          f2=lambda x, df, _: chi2._logpdf(x, df))
+
+    def _pdf(self, x, df, nc):
+        cond = np.ones_like(x, dtype=bool) & (nc != 0)
+        with warnings.catch_warnings():
+            message = "overflow encountered in _ncx2_pdf"
+            warnings.filterwarnings("ignore", message=message)
+            return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_pdf,
+                              f2=lambda x, df, _: chi2._pdf(x, df))
+
+    def _cdf(self, x, df, nc):
+        cond = np.ones_like(x, dtype=bool) & (nc != 0)
+        return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_cdf,
+                          f2=lambda x, df, _: chi2._cdf(x, df))
+
+    def _ppf(self, q, df, nc):
+        cond = np.ones_like(q, dtype=bool) & (nc != 0)
+        with warnings.catch_warnings():
+            message = "overflow encountered in _ncx2_ppf"
+            warnings.filterwarnings("ignore", message=message)
+            return _lazywhere(cond, (q, df, nc), f=_boost._ncx2_ppf,
+                              f2=lambda x, df, _: chi2._ppf(x, df))
+
+    def _sf(self, x, df, nc):
+        cond = np.ones_like(x, dtype=bool) & (nc != 0)
+        return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_sf,
+                          f2=lambda x, df, _: chi2._sf(x, df))
+
+    def _isf(self, x, df, nc):
+        cond = np.ones_like(x, dtype=bool) & (nc != 0)
+        with warnings.catch_warnings():
+            message = "overflow encountered in _ncx2_isf"
+            warnings.filterwarnings("ignore", message=message)
+            return _lazywhere(cond, (x, df, nc), f=_boost._ncx2_isf,
+                              f2=lambda x, df, _: chi2._isf(x, df))
+
+    def _stats(self, df, nc):
+        return (
+            _boost._ncx2_mean(df, nc),
+            _boost._ncx2_variance(df, nc),
+            _boost._ncx2_skewness(df, nc),
+            _boost._ncx2_kurtosis_excess(df, nc),
+        )
+
+
+ncx2 = ncx2_gen(a=0.0, name='ncx2')
+
+
+class ncf_gen(rv_continuous):
+    r"""A non-central F distribution continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    scipy.stats.f : Fisher distribution
+
+    Notes
+    -----
+    The probability density function for `ncf` is:
+
+    .. math::
+
+        f(x, n_1, n_2, \lambda) =
+            \exp\left(\frac{\lambda}{2} +
+                      \lambda n_1 \frac{x}{2(n_1 x + n_2)}
+                \right)
+            n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
+            (n_2 + n_1 x)^{-(n_1 + n_2)/2}
+            \gamma(n_1/2) \gamma(1 + n_2/2) \\
+            \frac{L^{\frac{n_1}{2}-1}_{n_2/2}
+                \left(-\lambda n_1 \frac{x}{2(n_1 x + n_2)}\right)}
+            {B(n_1/2, n_2/2)
+                \gamma\left(\frac{n_1 + n_2}{2}\right)}
+
+    for :math:`n_1, n_2 > 0`, :math:`\lambda \ge 0`.  Here :math:`n_1` is the
+    degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
+    the denominator, :math:`\lambda` the non-centrality parameter,
+    :math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
+    generalized Laguerre polynomial and :math:`B` is the beta function.
+
+    `ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. If ``nc=0``,
+    the distribution becomes equivalent to the Fisher distribution.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, df1, df2, nc):
+        return (df1 > 0) & (df2 > 0) & (nc >= 0)
+
+    def _shape_info(self):
+        idf1 = _ShapeInfo("df1", False, (0, np.inf), (False, False))
+        idf2 = _ShapeInfo("df2", False, (0, np.inf), (False, False))
+        inc = _ShapeInfo("nc", False, (0, np.inf), (True, False))
+        return [idf1, idf2, inc]
+
+    def _rvs(self, dfn, dfd, nc, size=None, random_state=None):
+        return random_state.noncentral_f(dfn, dfd, nc, size)
+
+    def _pdf(self, x, dfn, dfd, nc):
+        # ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
+        #             df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
+        #             (df2+df1*x)**(-(df1+df2)/2) *
+        #             gamma(df1/2)*gamma(1+df2/2) *
+        #             L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
+        #             (B(v1/2, v2/2) * gamma((v1+v2)/2))
+        return _boost._ncf_pdf(x, dfn, dfd, nc)
+
+    def _cdf(self, x, dfn, dfd, nc):
+        return _boost._ncf_cdf(x, dfn, dfd, nc)
+
+    def _ppf(self, q, dfn, dfd, nc):
+        return _boost._ncf_ppf(q, dfn, dfd, nc)
+
+    def _sf(self, x, dfn, dfd, nc):
+        return _boost._ncf_sf(x, dfn, dfd, nc)
+
+    def _isf(self, x, dfn, dfd, nc):
+        return _boost._ncf_isf(x, dfn, dfd, nc)
+
+    def _munp(self, n, dfn, dfd, nc):
+        val = (dfn * 1.0/dfd)**n
+        term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
+        val *= np.exp(-nc / 2.0+term)
+        val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
+        return val
+
+    def _stats(self, dfn, dfd, nc, moments='mv'):
+        mu = _boost._ncf_mean(dfn, dfd, nc)
+        mu2 = _boost._ncf_variance(dfn, dfd, nc)
+        g1 = _boost._ncf_skewness(dfn, dfd, nc) if 's' in moments else None
+        g2 = _boost._ncf_kurtosis_excess(
+            dfn, dfd, nc) if 'k' in moments else None
+        return mu, mu2, g1, g2
+
+
+ncf = ncf_gen(a=0.0, name='ncf')
+
+
+class t_gen(rv_continuous):
+    r"""A Student's t continuous random variable.
+
+    For the noncentral t distribution, see `nct`.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    nct
+
+    Notes
+    -----
+    The probability density function for `t` is:
+
+    .. math::
+
+        f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
+                        {\sqrt{\pi \nu} \Gamma(\nu/2)}
+                    (1+x^2/\nu)^{-(\nu+1)/2}
+
+    where :math:`x` is a real number and the degrees of freedom parameter
+    :math:`\nu` (denoted ``df`` in the implementation) satisfies
+    :math:`\nu > 0`. :math:`\Gamma` is the gamma function
+    (`scipy.special.gamma`).
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("df", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, df, size=None, random_state=None):
+        return random_state.standard_t(df, size=size)
+
+    def _pdf(self, x, df):
+        return _lazywhere(
+            df == np.inf, (x, df),
+            f=lambda x, df: norm._pdf(x),
+            f2=lambda x, df: (
+                np.exp(sc.gammaln((df+1)/2)-sc.gammaln(df/2))
+                / (np.sqrt(df*np.pi)*(1+(x**2)/df)**((df+1)/2))
+            )
+        )
+
+    def _logpdf(self, x, df):
+        return _lazywhere(
+            df == np.inf, (x, df),
+            f=lambda x, df: norm._logpdf(x),
+            f2=lambda x, df: (
+                sc.gammaln((df+1)/2) - sc.gammaln(df/2)
+                - (0.5*np.log(df*np.pi)
+                   + (df+1)/2*np.log(1+(x**2)/df))
+            )
+        )
+
+    def _cdf(self, x, df):
+        return sc.stdtr(df, x)
+
+    def _sf(self, x, df):
+        return sc.stdtr(df, -x)
+
+    def _ppf(self, q, df):
+        return sc.stdtrit(df, q)
+
+    def _isf(self, q, df):
+        return -sc.stdtrit(df, q)
+
+    def _stats(self, df):
+        # infinite df -> normal distribution (0.0, 1.0, 0.0, 0.0)
+        infinite_df = np.isposinf(df)
+
+        mu = np.where(df > 1, 0.0, np.inf)
+
+        condlist = ((df > 1) & (df <= 2),
+                    (df > 2) & np.isfinite(df),
+                    infinite_df)
+        choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
+                      lambda df: df / (df-2.0),
+                      lambda df: np.broadcast_to(1, df.shape))
+        mu2 = _lazyselect(condlist, choicelist, (df,), np.nan)
+
+        g1 = np.where(df > 3, 0.0, np.nan)
+
+        condlist = ((df > 2) & (df <= 4),
+                    (df > 4) & np.isfinite(df),
+                    infinite_df)
+        choicelist = (lambda df: np.broadcast_to(np.inf, df.shape),
+                      lambda df: 6.0 / (df-4.0),
+                      lambda df: np.broadcast_to(0, df.shape))
+        g2 = _lazyselect(condlist, choicelist, (df,), np.nan)
+
+        return mu, mu2, g1, g2
+
+    def _entropy(self, df):
+        if df == np.inf:
+            return norm._entropy()
+        half = df/2
+        half1 = (df + 1)/2
+        return (half1*(sc.digamma(half1) - sc.digamma(half))
+                + np.log(np.sqrt(df)*sc.beta(half, 0.5)))
+
+
+t = t_gen(name='t')
+
+
+class nct_gen(rv_continuous):
+    r"""A non-central Student's t continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    If :math:`Y` is a standard normal random variable and :math:`V` is
+    an independent chi-square random variable (`chi2`) with :math:`k` degrees
+    of freedom, then
+
+    .. math::
+
+        X = \frac{Y + c}{\sqrt{V/k}}
+
+    has a non-central Student's t distribution on the real line.
+    The degrees of freedom parameter :math:`k` (denoted ``df`` in the
+    implementation) satisfies :math:`k > 0` and the noncentrality parameter
+    :math:`c` (denoted ``nc`` in the implementation) is a real number.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, df, nc):
+        return (df > 0) & (nc == nc)
+
+    def _shape_info(self):
+        idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
+        inc = _ShapeInfo("nc", False, (-np.inf, np.inf), (False, False))
+        return [idf, inc]
+
+    def _rvs(self, df, nc, size=None, random_state=None):
+        n = norm.rvs(loc=nc, size=size, random_state=random_state)
+        c2 = chi2.rvs(df, size=size, random_state=random_state)
+        return n * np.sqrt(df) / np.sqrt(c2)
+
+    def _pdf(self, x, df, nc):
+        # Boost version has accuracy issues in left tail; see gh-16591
+        n = df*1.0
+        nc = nc*1.0
+        x2 = x*x
+        ncx2 = nc*nc*x2
+        fac1 = n + x2
+        trm1 = (n/2.*np.log(n) + sc.gammaln(n+1)
+                - (n*np.log(2) + nc*nc/2 + (n/2)*np.log(fac1)
+                   + sc.gammaln(n/2)))
+        Px = np.exp(trm1)
+        valF = ncx2 / (2*fac1)
+        trm1 = (np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
+                / np.asarray(fac1*sc.gamma((n+1)/2)))
+        trm2 = (sc.hyp1f1((n+1)/2, 0.5, valF)
+                / np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1)))
+        Px *= trm1+trm2
+        return np.clip(Px, 0, None)
+
+    def _cdf(self, x, df, nc):
+        return np.clip(_boost._nct_cdf(x, df, nc), 0, 1)
+
+    def _ppf(self, q, df, nc):
+        return _boost._nct_ppf(q, df, nc)
+
+    def _sf(self, x, df, nc):
+        return np.clip(_boost._nct_sf(x, df, nc), 0, 1)
+
+    def _isf(self, x, df, nc):
+        return _boost._nct_isf(x, df, nc)
+
+    def _stats(self, df, nc, moments='mv'):
+        mu = _boost._nct_mean(df, nc)
+        mu2 = _boost._nct_variance(df, nc)
+        g1 = _boost._nct_skewness(df, nc) if 's' in moments else None
+        g2 = _boost._nct_kurtosis_excess(df, nc)-3 if 'k' in moments else None
+        return mu, mu2, g1, g2
+
+
+nct = nct_gen(name="nct")
+
+
+class pareto_gen(rv_continuous):
+    r"""A Pareto continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `pareto` is:
+
+    .. math::
+
+        f(x, b) = \frac{b}{x^{b+1}}
+
+    for :math:`x \ge 1`, :math:`b > 0`.
+
+    `pareto` takes ``b`` as a shape parameter for :math:`b`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("b", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, b):
+        # pareto.pdf(x, b) = b / x**(b+1)
+        return b * x**(-b-1)
+
+    def _cdf(self, x, b):
+        return 1 - x**(-b)
+
+    def _ppf(self, q, b):
+        return pow(1-q, -1.0/b)
+
+    def _sf(self, x, b):
+        return x**(-b)
+
+    def _stats(self, b, moments='mv'):
+        mu, mu2, g1, g2 = None, None, None, None
+        if 'm' in moments:
+            mask = b > 1
+            bt = np.extract(mask, b)
+            mu = np.full(np.shape(b), fill_value=np.inf)
+            np.place(mu, mask, bt / (bt-1.0))
+        if 'v' in moments:
+            mask = b > 2
+            bt = np.extract(mask, b)
+            mu2 = np.full(np.shape(b), fill_value=np.inf)
+            np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
+        if 's' in moments:
+            mask = b > 3
+            bt = np.extract(mask, b)
+            g1 = np.full(np.shape(b), fill_value=np.nan)
+            vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
+            np.place(g1, mask, vals)
+        if 'k' in moments:
+            mask = b > 4
+            bt = np.extract(mask, b)
+            g2 = np.full(np.shape(b), fill_value=np.nan)
+            vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
+                    np.polyval([1.0, -7.0, 12.0, 0.0], bt))
+            np.place(g2, mask, vals)
+        return mu, mu2, g1, g2
+
+    def _entropy(self, c):
+        return 1 + 1.0/c - np.log(c)
+
+    @_call_super_mom
+    @inherit_docstring_from(rv_continuous)
+    def fit(self, data, *args, **kwds):
+        parameters = _check_fit_input_parameters(self, data, args, kwds)
+        data, fshape, floc, fscale = parameters
+
+        # ensure that any fixed parameters don't violate constraints of the
+        # distribution before continuing.
+        if floc is not None and np.min(data) - floc < (fscale or 0):
+            raise FitDataError("pareto", lower=1, upper=np.inf)
+
+        ndata = data.shape[0]
+
+        def get_shape(scale, location):
+            # The first-order necessary condition on `shape` can be solved in
+            # closed form
+            return ndata / np.sum(np.log((data - location) / scale))
+
+        if floc is fscale is None:
+            # The support of the distribution is `(x - loc)/scale > 0`.
+            # The method of Lagrange multipliers turns this constraint
+            # into an equation that can be solved numerically.
+            # See gh-12545 for details.
+
+            def dL_dScale(shape, scale):
+                # The partial derivative of the log-likelihood function w.r.t.
+                # the scale.
+                return ndata * shape / scale
+
+            def dL_dLocation(shape, location):
+                # The partial derivative of the log-likelihood function w.r.t.
+                # the location.
+                return (shape + 1) * np.sum(1 / (data - location))
+
+            def fun_to_solve(scale):
+                # optimize the scale by setting the partial derivatives
+                # w.r.t. to location and scale equal and solving.
+                location = np.min(data) - scale
+                shape = fshape or get_shape(scale, location)
+                return dL_dLocation(shape, location) - dL_dScale(shape, scale)
+
+            def interval_contains_root(lbrack, rbrack):
+                # return true if the signs disagree.
+                return (np.sign(fun_to_solve(lbrack)) !=
+                        np.sign(fun_to_solve(rbrack)))
+
+            # set brackets for `root_scalar` to use when optimizing over the
+            # scale such that a root is likely between them. Use user supplied
+            # guess or default 1.
+            brack_start = kwds.get('scale', 1)
+            lbrack, rbrack = brack_start / 2, brack_start * 2
+            # if a root is not between the brackets, iteratively expand them
+            # until they include a sign change, checking after each bracket is
+            # modified.
+            while (not interval_contains_root(lbrack, rbrack)
+                   and (lbrack > 0 or rbrack < np.inf)):
+                lbrack /= 2
+                rbrack *= 2
+            res = root_scalar(fun_to_solve, bracket=[lbrack, rbrack])
+            if res.converged:
+                scale = res.root
+                loc = np.min(data) - scale
+                shape = fshape or get_shape(scale, loc)
+
+                # The Pareto distribution requires that its parameters satisfy
+                # the condition `fscale + floc <= min(data)`. However, to
+                # avoid numerical issues, we require that `fscale + floc`
+                # is strictly less than `min(data)`. If this condition
+                # is not satisfied, reduce the scale with `np.nextafter` to
+                # ensure that data does not fall outside of the support.
+                if not (scale + loc) < np.min(data):
+                    scale = np.min(data) - loc
+                    scale = np.nextafter(scale, 0)
+                return shape, loc, scale
+            else:
+                return super().fit(data, **kwds)
+        elif floc is None:
+            loc = np.min(data) - fscale
+        else:
+            loc = floc
+        # Source: Evans, Hastings, and Peacock (2000), Statistical
+        # Distributions, 3rd. Ed., John Wiley and Sons. Page 149.
+        scale = fscale or np.min(data) - loc
+        shape = fshape or get_shape(scale, loc)
+        return shape, loc, scale
+
+
+pareto = pareto_gen(a=1.0, name="pareto")
+
+
+class lomax_gen(rv_continuous):
+    r"""A Lomax (Pareto of the second kind) continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `lomax` is:
+
+    .. math::
+
+        f(x, c) = \frac{c}{(1+x)^{c+1}}
+
+    for :math:`x \ge 0`, :math:`c > 0`.
+
+    `lomax` takes ``c`` as a shape parameter for :math:`c`.
+
+    `lomax` is a special case of `pareto` with ``loc=-1.0``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # lomax.pdf(x, c) = c / (1+x)**(c+1)
+        return c*1.0/(1.0+x)**(c+1.0)
+
+    def _logpdf(self, x, c):
+        return np.log(c) - (c+1)*sc.log1p(x)
+
+    def _cdf(self, x, c):
+        return -sc.expm1(-c*sc.log1p(x))
+
+    def _sf(self, x, c):
+        return np.exp(-c*sc.log1p(x))
+
+    def _logsf(self, x, c):
+        return -c*sc.log1p(x)
+
+    def _ppf(self, q, c):
+        return sc.expm1(-sc.log1p(-q)/c)
+
+    def _stats(self, c):
+        mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
+        return mu, mu2, g1, g2
+
+    def _entropy(self, c):
+        return 1+1.0/c-np.log(c)
+
+
+lomax = lomax_gen(a=0.0, name="lomax")
+
+
+class pearson3_gen(rv_continuous):
+    r"""A pearson type III continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `pearson3` is:
+
+    .. math::
+
+        f(x, \kappa) = \frac{|\beta|}{\Gamma(\alpha)}
+                       (\beta (x - \zeta))^{\alpha - 1}
+                       \exp(-\beta (x - \zeta))
+
+    where:
+
+    .. math::
+
+            \beta = \frac{2}{\kappa}
+
+            \alpha = \beta^2 = \frac{4}{\kappa^2}
+
+            \zeta = -\frac{\alpha}{\beta} = -\beta
+
+    :math:`\Gamma` is the gamma function (`scipy.special.gamma`).
+    Pass the skew :math:`\kappa` into `pearson3` as the shape parameter
+    ``skew``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    References
+    ----------
+    R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
+    Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
+    Resources Research, Vol.27, 3149-3158 (1991).
+
+    L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
+    Vol.1, 191-198 (1930).
+
+    "Using Modern Computing Tools to Fit the Pearson Type III Distribution to
+    Aviation Loads Data", Office of Aviation Research (2003).
+
+    """
+    def _preprocess(self, x, skew):
+        # The real 'loc' and 'scale' are handled in the calling pdf(...). The
+        # local variables 'loc' and 'scale' within pearson3._pdf are set to
+        # the defaults just to keep them as part of the equations for
+        # documentation.
+        loc = 0.0
+        scale = 1.0
+
+        # If skew is small, return _norm_pdf. The divide between pearson3
+        # and norm was found by brute force and is approximately a skew of
+        # 0.000016.  No one, I hope, would actually use a skew value even
+        # close to this small.
+        norm2pearson_transition = 0.000016
+
+        ans, x, skew = np.broadcast_arrays(1.0, x, skew)
+        ans = ans.copy()
+
+        # mask is True where skew is small enough to use the normal approx.
+        mask = np.absolute(skew) < norm2pearson_transition
+        invmask = ~mask
+
+        beta = 2.0 / (skew[invmask] * scale)
+        alpha = (scale * beta)**2
+        zeta = loc - alpha / beta
+
+        transx = beta * (x[invmask] - zeta)
+        return ans, x, transx, mask, invmask, beta, alpha, zeta
+
+    def _argcheck(self, skew):
+        # The _argcheck function in rv_continuous only allows positive
+        # arguments.  The skew argument for pearson3 can be zero (which I want
+        # to handle inside pearson3._pdf) or negative.  So just return True
+        # for all skew args.
+        return np.isfinite(skew)
+
+    def _shape_info(self):
+        return [_ShapeInfo("skew", False, (-np.inf, np.inf), (False, False))]
+
+    def _stats(self, skew):
+        m = 0.0
+        v = 1.0
+        s = skew
+        k = 1.5*skew**2
+        return m, v, s, k
+
+    def _pdf(self, x, skew):
+        # pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
+        #     (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
+        # Do the calculation in _logpdf since helps to limit
+        # overflow/underflow problems
+        ans = np.exp(self._logpdf(x, skew))
+        if ans.ndim == 0:
+            if np.isnan(ans):
+                return 0.0
+            return ans
+        ans[np.isnan(ans)] = 0.0
+        return ans
+
+    def _logpdf(self, x, skew):
+        #   PEARSON3 logpdf                           GAMMA logpdf
+        #   np.log(abs(beta))
+        # + (alpha - 1)*np.log(beta*(x - zeta))          + (a - 1)*np.log(x)
+        # - beta*(x - zeta)                           - x
+        # - sc.gammalnalpha)                              - sc.gammalna)
+        ans, x, transx, mask, invmask, beta, alpha, _ = (
+            self._preprocess(x, skew))
+
+        ans[mask] = np.log(_norm_pdf(x[mask]))
+        # use logpdf instead of _logpdf to fix issue mentioned in gh-12640
+        # (_logpdf does not return correct result for alpha = 1)
+        ans[invmask] = np.log(abs(beta)) + gamma.logpdf(transx, alpha)
+        return ans
+
+    def _cdf(self, x, skew):
+        ans, x, transx, mask, invmask, _, alpha, _ = (
+            self._preprocess(x, skew))
+
+        ans[mask] = _norm_cdf(x[mask])
+
+        skew = np.broadcast_to(skew, invmask.shape)
+        invmask1a = np.logical_and(invmask, skew > 0)
+        invmask1b = skew[invmask] > 0
+        # use cdf instead of _cdf to fix issue mentioned in gh-12640
+        # (_cdf produces NaNs for inputs outside support)
+        ans[invmask1a] = gamma.cdf(transx[invmask1b], alpha[invmask1b])
+
+        # The gamma._cdf approach wasn't working with negative skew.
+        # Note that multiplying the skew by -1 reflects about x=0.
+        # So instead of evaluating the CDF with negative skew at x,
+        # evaluate the SF with positive skew at -x.
+        invmask2a = np.logical_and(invmask, skew < 0)
+        invmask2b = skew[invmask] < 0
+        # gamma._sf produces NaNs when transx < 0, so use gamma.sf
+        ans[invmask2a] = gamma.sf(transx[invmask2b], alpha[invmask2b])
+
+        return ans
+
+    def _rvs(self, skew, size=None, random_state=None):
+        skew = np.broadcast_to(skew, size)
+        ans, _, _, mask, invmask, beta, alpha, zeta = (
+            self._preprocess([0], skew))
+
+        nsmall = mask.sum()
+        nbig = mask.size - nsmall
+        ans[mask] = random_state.standard_normal(nsmall)
+        ans[invmask] = random_state.standard_gamma(alpha, nbig)/beta + zeta
+
+        if size == ():
+            ans = ans[0]
+        return ans
+
+    def _ppf(self, q, skew):
+        ans, q, _, mask, invmask, beta, alpha, zeta = (
+            self._preprocess(q, skew))
+        ans[mask] = _norm_ppf(q[mask])
+        q = q[invmask]
+        q[beta < 0] = 1 - q[beta < 0]  # for negative skew; see gh-17050
+        ans[invmask] = sc.gammaincinv(alpha, q)/beta + zeta
+        return ans
+
+    @_call_super_mom
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        Note that method of moments (`method='MM'`) is not
+        available for this distribution.\n\n""")
+    def fit(self, data, *args, **kwds):
+        if kwds.get("method", None) == 'MM':
+            raise NotImplementedError("Fit `method='MM'` is not available for "
+                                      "the Pearson3 distribution. Please try "
+                                      "the default `method='MLE'`.")
+        else:
+            return super(type(self), self).fit(data, *args, **kwds)
+
+
+pearson3 = pearson3_gen(name="pearson3")
+
+
+class powerlaw_gen(rv_continuous):
+    r"""A power-function continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    pareto
+
+    Notes
+    -----
+    The probability density function for `powerlaw` is:
+
+    .. math::
+
+        f(x, a) = a x^{a-1}
+
+    for :math:`0 \le x \le 1`, :math:`a > 0`.
+
+    `powerlaw` takes ``a`` as a shape parameter for :math:`a`.
+
+    %(after_notes)s
+
+    For example, the support of `powerlaw` can be adjusted from the default
+    interval ``[0, 1]`` to the interval ``[c, c+d]`` by setting ``loc=c`` and
+    ``scale=d``. For a power-law distribution with infinite support, see
+    `pareto`.
+
+    `powerlaw` is a special case of `beta` with ``b=1``.
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, a):
+        # powerlaw.pdf(x, a) = a * x**(a-1)
+        return a*x**(a-1.0)
+
+    def _logpdf(self, x, a):
+        return np.log(a) + sc.xlogy(a - 1, x)
+
+    def _cdf(self, x, a):
+        return x**(a*1.0)
+
+    def _logcdf(self, x, a):
+        return a*np.log(x)
+
+    def _ppf(self, q, a):
+        return pow(q, 1.0/a)
+
+    def _stats(self, a):
+        return (a / (a + 1.0),
+                a / (a + 2.0) / (a + 1.0) ** 2,
+                -2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
+                6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
+
+    def _entropy(self, a):
+        return 1 - 1.0/a - np.log(a)
+
+    def _support_mask(self, x, a):
+        return (super(powerlaw_gen, self)._support_mask(x, a)
+                & ((x != 0) | (a >= 1)))
+
+    @_call_super_mom
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        Notes specifically for ``powerlaw.fit``: If the location is a free
+        parameter and the value returned for the shape parameter is less than
+        one, the true maximum likelihood approaches infinity. This causes
+        numerical difficulties, and the resulting estimates are approximate.
+        \n\n""")
+    def fit(self, data, *args, **kwds):
+        # Summary of the strategy:
+        #
+        # 1) If the scale and location are fixed, return the shape according
+        #    to a formula.
+        #
+        # 2) If the scale is fixed, there are two possibilities for the other
+        #    parameters - one corresponding with shape less than one, and
+        #    another with shape greater than one. Calculate both, and return
+        #    whichever has the better log-likelihood.
+        #
+        # At this point, the scale is known to be free.
+        #
+        # 3) If the location is fixed, return the scale and shape according to
+        #    formulas (or, if the shape is fixed, the fixed shape).
+        #
+        # At this point, the location and scale are both free. There are
+        # separate equations depending on whether the shape is less than one or
+        # greater than one.
+        #
+        # 4a) If the shape is less than one, there are formulas for shape,
+        #     location, and scale.
+        # 4b) If the shape is greater than one, there are formulas for shape
+        #     and scale, but there is a condition for location to be solved
+        #     numerically.
+        #
+        # If the shape is fixed and less than one, we use 4a.
+        # If the shape is fixed and greater than one, we use 4b.
+        # If the shape is also free, we calculate fits using both 4a and 4b
+        # and choose the one that results a better log-likelihood.
+        #
+        # In many cases, the use of `np.nextafter` is used to avoid numerical
+        # issues.
+        if kwds.pop('superfit', False):
+            return super().fit(data, *args, **kwds)
+
+        if len(np.unique(data)) == 1:
+            return super().fit(data, *args, **kwds)
+
+        data, fshape, floc, fscale = _check_fit_input_parameters(self, data,
+                                                                 args, kwds)
+        penalized_nllf_args = [data, (self._fitstart(data),)]
+        penalized_nllf = self._reduce_func(penalized_nllf_args, {})[1]
+
+        # ensure that any fixed parameters don't violate constraints of the
+        # distribution before continuing. The support of the distribution
+        # is `0 < (x - loc)/scale < 1`.
+        if floc is not None:
+            if not data.min() > floc:
+                raise FitDataError('powerlaw', 0, 1)
+            if fscale is not None and not data.max() <= floc + fscale:
+                raise FitDataError('powerlaw', 0, 1)
+
+        if fscale is not None:
+            if fscale <= 0:
+                raise ValueError("Negative or zero `fscale` is outside the "
+                                 "range allowed by the distribution.")
+            if fscale <= data.ptp():
+                msg = "`fscale` must be greater than the range of data."
+                raise ValueError(msg)
+
+        def get_shape(data, loc, scale):
+            # The first-order necessary condition on `shape` can be solved in
+            # closed form. It can be used no matter the assumption of the
+            # value of the shape.
+            N = len(data)
+            return - N / (np.sum(np.log(data - loc)) - N*np.log(scale))
+
+        def get_scale(data, loc):
+            # analytical solution for `scale` based on the location.
+            # It can be used no matter the assumption of the value of the
+            # shape.
+            return data.max() - loc
+
+        # 1) The location and scale are both fixed. Analytically determine the
+        # shape.
+        if fscale is not None and floc is not None:
+            return get_shape(data, floc, fscale), floc, fscale
+
+        # 2) The scale is fixed. There are two possibilities for the other
+        # parameters. Choose the option with better log-likelihood.
+        if fscale is not None:
+            # using `data.min()` as the optimal location
+            loc_lt1 = np.nextafter(data.min(), -np.inf)
+            shape_lt1 = fshape or get_shape(data, loc_lt1, fscale)
+            ll_lt1 = penalized_nllf((shape_lt1, loc_lt1, fscale), data)
+
+            # using `data.max() - scale` as the optimal location
+            loc_gt1 = np.nextafter(data.max() - fscale, np.inf)
+            shape_gt1 = fshape or get_shape(data, loc_gt1, fscale)
+            ll_gt1 = penalized_nllf((shape_gt1, loc_gt1, fscale), data)
+
+            if ll_lt1 < ll_gt1:
+                return shape_lt1, loc_lt1, fscale
+            else:
+                return shape_gt1, loc_gt1, fscale
+
+        # 3) The location is fixed. Return the analytical scale and the
+        # analytical (or fixed) shape.
+        if floc is not None:
+            scale = get_scale(data, floc)
+            shape = fshape or get_shape(data, floc, scale)
+            return shape, floc, scale
+
+        # 4) Location and scale are both free
+        # 4a) Use formulas that assume `shape <= 1`.
+
+        def fit_loc_scale_w_shape_lt_1():
+            loc = np.nextafter(data.min(), -np.inf)
+            if np.abs(loc) < np.finfo(loc.dtype).tiny:
+                loc = np.sign(loc) * np.finfo(loc.dtype).tiny
+            scale = np.nextafter(get_scale(data, loc), np.inf)
+            shape = fshape or get_shape(data, loc, scale)
+            return shape, loc, scale
+
+        # 4b) Fit under the assumption that `shape > 1`. The support
+        # of the distribution is `(x - loc)/scale <= 1`. The method of Lagrange
+        # multipliers turns this constraint into the condition that
+        # dL_dScale - dL_dLocation must be zero, which is solved numerically.
+        # (Alternatively, substitute the constraint into the objective
+        # function before deriving the likelihood equation for location.)
+
+        def dL_dScale(data, shape, scale):
+            # The partial derivative of the log-likelihood function w.r.t.
+            # the scale.
+            return -data.shape[0] * shape / scale
+
+        def dL_dLocation(data, shape, loc):
+            # The partial derivative of the log-likelihood function w.r.t.
+            # the location.
+            return (shape - 1) * np.sum(1 / (loc - data))  # -1/(data-loc)
+
+        def dL_dLocation_star(loc):
+            # The derivative of the log-likelihood function w.r.t.
+            # the location, given optimal shape and scale
+            scale = np.nextafter(get_scale(data, loc), -np.inf)
+            shape = fshape or get_shape(data, loc, scale)
+            return dL_dLocation(data, shape, loc)
+
+        def fun_to_solve(loc):
+            # optimize the location by setting the partial derivatives
+            # w.r.t. to location and scale equal and solving.
+            scale = np.nextafter(get_scale(data, loc), -np.inf)
+            shape = fshape or get_shape(data, loc, scale)
+            return (dL_dScale(data, shape, scale)
+                    - dL_dLocation(data, shape, loc))
+
+        def fit_loc_scale_w_shape_gt_1():
+            # set brackets for `root_scalar` to use when optimizing over the
+            # location such that a root is likely between them.
+            rbrack = np.nextafter(data.min(), -np.inf)
+
+            # if the sign of `dL_dLocation_star` is positive at rbrack,
+            # we're not going to find the root we're looking for
+            delta = (data.min() - rbrack)
+            while dL_dLocation_star(rbrack) > 0:
+                rbrack = data.min() - delta
+                delta *= 2
+
+            def interval_contains_root(lbrack, rbrack):
+                # Check if the interval (lbrack, rbrack) contains the root.
+                return (np.sign(fun_to_solve(lbrack))
+                        != np.sign(fun_to_solve(rbrack)))
+
+            lbrack = rbrack - 1
+
+            # if the sign doesn't change between the brackets, move the left
+            # bracket until it does. (The right bracket remains fixed at the
+            # maximum permissible value.)
+            i = 1.0
+            while (not interval_contains_root(lbrack, rbrack)
+                   and lbrack != -np.inf):
+                lbrack = (data.min() - i)
+                i *= 2
+
+            root = optimize.root_scalar(fun_to_solve, bracket=(lbrack, rbrack))
+
+            loc = np.nextafter(root.root, -np.inf)
+            scale = np.nextafter(get_scale(data, loc), np.inf)
+            shape = fshape or get_shape(data, loc, scale)
+            return shape, loc, scale
+
+        # Shape is fixed - choose 4a or 4b accordingly.
+        if fshape is not None and fshape <= 1:
+            return fit_loc_scale_w_shape_lt_1()
+        elif fshape is not None and fshape > 1:
+            return fit_loc_scale_w_shape_gt_1()
+
+        # Shape is free
+        fit_shape_lt1 = fit_loc_scale_w_shape_lt_1()
+        ll_lt1 = self.nnlf(fit_shape_lt1, data)
+
+        fit_shape_gt1 = fit_loc_scale_w_shape_gt_1()
+        ll_gt1 = self.nnlf(fit_shape_gt1, data)
+
+        if ll_lt1 <= ll_gt1 and fit_shape_lt1[0] <= 1:
+            return fit_shape_lt1
+        elif ll_lt1 > ll_gt1 and fit_shape_gt1[0] > 1:
+            return fit_shape_gt1
+        else:
+            return super().fit(data, *args, **kwds)
+
+
+powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
+
+
+class powerlognorm_gen(rv_continuous):
+    r"""A power log-normal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `powerlognorm` is:
+
+    .. math::
+
+        f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
+                     (\Phi(-\log(x)/s))^{c-1}
+
+    where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
+    and :math:`x > 0`, :math:`s, c > 0`.
+
+    `powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        ic = _ShapeInfo("c", False, (0, np.inf), (False, False))
+        i_s = _ShapeInfo("s", False, (0, np.inf), (False, False))
+        return [ic, i_s]
+
+    def _pdf(self, x, c, s):
+        # powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
+        #                                         (Phi(-log(x)/s))**(c-1),
+        return (c/(x*s) * _norm_pdf(np.log(x)/s) *
+                pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
+
+    def _cdf(self, x, c, s):
+        return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
+
+    def _ppf(self, q, c, s):
+        return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
+
+
+powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
+
+
+class powernorm_gen(rv_continuous):
+    r"""A power normal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `powernorm` is:
+
+    .. math::
+
+        f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
+
+    where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
+    and :math:`x >= 0`, :math:`c > 0`.
+
+    `powernorm` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, c):
+        # powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
+        return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
+
+    def _logpdf(self, x, c):
+        return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
+
+    def _cdf(self, x, c):
+        return 1.0-_norm_cdf(-x)**(c*1.0)
+
+    def _ppf(self, q, c):
+        return -_norm_ppf(pow(1.0 - q, 1.0 / c))
+
+
+powernorm = powernorm_gen(name='powernorm')
+
+
+class rdist_gen(rv_continuous):
+    r"""An R-distributed (symmetric beta) continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `rdist` is:
+
+    .. math::
+
+        f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
+
+    for :math:`-1 \le x \le 1`, :math:`c > 0`. `rdist` is also called the
+    symmetric beta distribution: if B has a `beta` distribution with
+    parameters (c/2, c/2), then X = 2*B - 1 follows a R-distribution with
+    parameter c.
+
+    `rdist` takes ``c`` as a shape parameter for :math:`c`.
+
+    This distribution includes the following distribution kernels as
+    special cases::
+
+        c = 2:  uniform
+        c = 3:  `semicircular`
+        c = 4:  Epanechnikov (parabolic)
+        c = 6:  quartic (biweight)
+        c = 8:  triweight
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
+
+    # use relation to the beta distribution for pdf, cdf, etc
+    def _pdf(self, x, c):
+        return np.exp(self._logpdf(x, c))
+
+    def _logpdf(self, x, c):
+        return -np.log(2) + beta._logpdf((x + 1)/2, c/2, c/2)
+
+    def _cdf(self, x, c):
+        return beta._cdf((x + 1)/2, c/2, c/2)
+
+    def _ppf(self, q, c):
+        return 2*beta._ppf(q, c/2, c/2) - 1
+
+    def _rvs(self, c, size=None, random_state=None):
+        return 2 * random_state.beta(c/2, c/2, size) - 1
+
+    def _munp(self, n, c):
+        numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
+        return numerator / sc.beta(1. / 2, c / 2.)
+
+
+rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
+
+
+class rayleigh_gen(rv_continuous):
+    r"""A Rayleigh continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `rayleigh` is:
+
+    .. math::
+
+        f(x) = x \exp(-x^2/2)
+
+    for :math:`x \ge 0`.
+
+    `rayleigh` is a special case of `chi` with ``df=2``.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return chi.rvs(2, size=size, random_state=random_state)
+
+    def _pdf(self, r):
+        # rayleigh.pdf(r) = r * exp(-r**2/2)
+        return np.exp(self._logpdf(r))
+
+    def _logpdf(self, r):
+        return np.log(r) - 0.5 * r * r
+
+    def _cdf(self, r):
+        return -sc.expm1(-0.5 * r**2)
+
+    def _ppf(self, q):
+        return np.sqrt(-2 * sc.log1p(-q))
+
+    def _sf(self, r):
+        return np.exp(self._logsf(r))
+
+    def _logsf(self, r):
+        return -0.5 * r * r
+
+    def _isf(self, q):
+        return np.sqrt(-2 * np.log(q))
+
+    def _stats(self):
+        val = 4 - np.pi
+        return (np.sqrt(np.pi/2),
+                val/2,
+                2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
+                6*np.pi/val-16/val**2)
+
+    def _entropy(self):
+        return _EULER/2.0 + 1 - 0.5*np.log(2)
+
+    @_call_super_mom
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        Notes specifically for ``rayleigh.fit``: If the location is fixed with
+        the `floc` parameter, this method uses an analytical formula to find
+        the scale.  Otherwise, this function uses a numerical root finder on
+        the first order conditions of the log-likelihood function to find the
+        MLE.  Only the (optional) `loc` parameter is used as the initial guess
+        for the root finder; the `scale` parameter and any other parameters
+        for the optimizer are ignored.\n\n""")
+    def fit(self, data, *args, **kwds):
+        if kwds.pop('superfit', False):
+            return super().fit(data, *args, **kwds)
+        data, floc, fscale = _check_fit_input_parameters(self, data,
+                                                         args, kwds)
+
+        def scale_mle(loc):
+            # Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
+            # and Peacock (2000), Page 175
+            return (np.sum((data - loc) ** 2) / (2 * len(data))) ** .5
+
+        def loc_mle(loc):
+            # This implicit equation for `loc` is used when
+            # both `loc` and `scale` are free.
+            xm = data - loc
+            s1 = xm.sum()
+            s2 = (xm**2).sum()
+            s3 = (1/xm).sum()
+            return s1 - s2/(2*len(data))*s3
+
+        def loc_mle_scale_fixed(loc, scale=fscale):
+            # This implicit equation for `loc` is used when
+            # `scale` is fixed but `loc` is not.
+            xm = data - loc
+            return xm.sum() - scale**2 * (1/xm).sum()
+
+        if floc is not None:
+            # `loc` is fixed, analytically determine `scale`.
+            if np.any(data - floc <= 0):
+                raise FitDataError("rayleigh", lower=1, upper=np.inf)
+            else:
+                return floc, scale_mle(floc)
+
+        # Account for user provided guess of `loc`.
+        loc0 = kwds.get('loc')
+        if loc0 is None:
+            # Use _fitstart to estimate loc; ignore the returned scale.
+            loc0 = self._fitstart(data)[0]
+
+        fun = loc_mle if fscale is None else loc_mle_scale_fixed
+        rbrack = np.nextafter(np.min(data), -np.inf)
+        lbrack = _get_left_bracket(fun, rbrack)
+        res = optimize.root_scalar(fun, bracket=(lbrack, rbrack))
+        if not res.converged:
+            raise FitSolverError(res.flag)
+        loc = res.root
+        scale = fscale or scale_mle(loc)
+        return loc, scale
+
+
+rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
+
+
+class reciprocal_gen(rv_continuous):
+    r"""A loguniform or reciprocal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for this class is:
+
+    .. math::
+
+        f(x, a, b) = \frac{1}{x \log(b/a)}
+
+    for :math:`a \le x \le b`, :math:`b > a > 0`. This class takes
+    :math:`a` and :math:`b` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    This doesn't show the equal probability of ``0.01``, ``0.1`` and
+    ``1``. This is best when the x-axis is log-scaled:
+
+    >>> import numpy as np
+    >>> fig, ax = plt.subplots(1, 1)
+    >>> ax.hist(np.log10(r))
+    >>> ax.set_ylabel("Frequency")
+    >>> ax.set_xlabel("Value of random variable")
+    >>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
+    >>> ticks = ["$10^{{ {} }}$".format(i) for i in [-2, -1, 0]]
+    >>> ax.set_xticklabels(ticks)  # doctest: +SKIP
+    >>> plt.show()
+
+    This random variable will be log-uniform regardless of the base chosen for
+    ``a`` and ``b``. Let's specify with base ``2`` instead:
+
+    >>> rvs = %(name)s(2**-2, 2**0).rvs(size=1000)
+
+    Values of ``1/4``, ``1/2`` and ``1`` are equally likely with this random
+    variable.  Here's the histogram:
+
+    >>> fig, ax = plt.subplots(1, 1)
+    >>> ax.hist(np.log2(rvs))
+    >>> ax.set_ylabel("Frequency")
+    >>> ax.set_xlabel("Value of random variable")
+    >>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
+    >>> ticks = ["$2^{{ {} }}$".format(i) for i in [-2, -1, 0]]
+    >>> ax.set_xticklabels(ticks)  # doctest: +SKIP
+    >>> plt.show()
+
+    """
+    def _argcheck(self, a, b):
+        return (a > 0) & (b > a)
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
+        ib = _ShapeInfo("b", False, (0, np.inf), (False, False))
+        return [ia, ib]
+
+    def _fitstart(self, data):
+        # Reasonable, since support is [a, b]
+        return super()._fitstart(data, args=(np.min(data), np.max(data)))
+
+    def _get_support(self, a, b):
+        return a, b
+
+    def _pdf(self, x, a, b):
+        # reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
+        return 1.0 / (x * np.log(b * 1.0 / a))
+
+    def _logpdf(self, x, a, b):
+        return -np.log(x) - np.log(np.log(b * 1.0 / a))
+
+    def _cdf(self, x, a, b):
+        return (np.log(x)-np.log(a)) / np.log(b * 1.0 / a)
+
+    def _ppf(self, q, a, b):
+        return a*pow(b*1.0/a, q)
+
+    def _munp(self, n, a, b):
+        return 1.0/np.log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n))
+
+    def _entropy(self, a, b):
+        return 0.5*np.log(a*b)+np.log(np.log(b*1.0/a))
+
+    fit_note = """\
+        `loguniform`/`reciprocal` is over-parameterized. `fit` automatically
+         fixes `scale` to 1 unless `fscale` is provided by the user.\n\n"""
+
+    @extend_notes_in_docstring(rv_continuous, notes=fit_note)
+    def fit(self, data, *args, **kwds):
+        fscale = kwds.pop('fscale', 1)
+        return super().fit(data, *args, fscale=fscale, **kwds)
+
+
+loguniform = reciprocal_gen(name="loguniform")
+reciprocal = reciprocal_gen(name="reciprocal")
+
+
+class rice_gen(rv_continuous):
+    r"""A Rice continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `rice` is:
+
+    .. math::
+
+        f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
+
+    for :math:`x >= 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
+    function of order zero (`scipy.special.i0`).
+
+    `rice` takes ``b`` as a shape parameter for :math:`b`.
+
+    %(after_notes)s
+
+    The Rice distribution describes the length, :math:`r`, of a 2-D vector with
+    components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
+    v` are independent Gaussian random variables with standard deviation
+    :math:`s`.  Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
+    ``rice.pdf(x, R/s, scale=s)``.
+
+    %(example)s
+
+    """
+    def _argcheck(self, b):
+        return b >= 0
+
+    def _shape_info(self):
+        return [_ShapeInfo("b", False, (0, np.inf), (True, False))]
+
+    def _rvs(self, b, size=None, random_state=None):
+        # https://en.wikipedia.org/wiki/Rice_distribution
+        t = b/np.sqrt(2) + random_state.standard_normal(size=(2,) + size)
+        return np.sqrt((t*t).sum(axis=0))
+
+    def _cdf(self, x, b):
+        return sc.chndtr(np.square(x), 2, np.square(b))
+
+    def _ppf(self, q, b):
+        return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
+
+    def _pdf(self, x, b):
+        # rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
+        #
+        # We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
+        # The factor of np.exp(-xb) is then included in the i0e function
+        # in place of the modified Bessel function, i0, improving
+        # numerical stability for large values of xb.
+        return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
+
+    def _munp(self, n, b):
+        nd2 = n/2.0
+        n1 = 1 + nd2
+        b2 = b*b/2.0
+        return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
+                sc.hyp1f1(n1, 1, b2))
+
+
+rice = rice_gen(a=0.0, name="rice")
+
+
+class recipinvgauss_gen(rv_continuous):
+    r"""A reciprocal inverse Gaussian continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `recipinvgauss` is:
+
+    .. math::
+
+        f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
+                    \exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
+
+    for :math:`x \ge 0`.
+
+    `recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("mu", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, mu):
+        # recipinvgauss.pdf(x, mu) =
+        #                     1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
+        return np.exp(self._logpdf(x, mu))
+
+    def _logpdf(self, x, mu):
+        return _lazywhere(x > 0, (x, mu),
+                          lambda x, mu: (-(1 - mu*x)**2.0 / (2*x*mu**2.0)
+                                         - 0.5*np.log(2*np.pi*x)),
+                          fillvalue=-np.inf)
+
+    def _cdf(self, x, mu):
+        trm1 = 1.0/mu - x
+        trm2 = 1.0/mu + x
+        isqx = 1.0/np.sqrt(x)
+        return _norm_cdf(-isqx*trm1) - np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
+
+    def _sf(self, x, mu):
+        trm1 = 1.0/mu - x
+        trm2 = 1.0/mu + x
+        isqx = 1.0/np.sqrt(x)
+        return _norm_cdf(isqx*trm1) + np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
+
+    def _rvs(self, mu, size=None, random_state=None):
+        return 1.0/random_state.wald(mu, 1.0, size=size)
+
+
+recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
+
+
+class semicircular_gen(rv_continuous):
+    r"""A semicircular continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    rdist
+
+    Notes
+    -----
+    The probability density function for `semicircular` is:
+
+    .. math::
+
+        f(x) = \frac{2}{\pi} \sqrt{1-x^2}
+
+    for :math:`-1 \le x \le 1`.
+
+    The distribution is a special case of `rdist` with `c = 3`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "Wigner semicircle distribution",
+           https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _pdf(self, x):
+        return 2.0/np.pi*np.sqrt(1-x*x)
+
+    def _logpdf(self, x):
+        return np.log(2/np.pi) + 0.5*sc.log1p(-x*x)
+
+    def _cdf(self, x):
+        return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
+
+    def _ppf(self, q):
+        return rdist._ppf(q, 3)
+
+    def _rvs(self, size=None, random_state=None):
+        # generate values uniformly distributed on the area under the pdf
+        # (semi-circle) by randomly generating the radius and angle
+        r = np.sqrt(random_state.uniform(size=size))
+        a = np.cos(np.pi * random_state.uniform(size=size))
+        return r * a
+
+    def _stats(self):
+        return 0, 0.25, 0, -1.0
+
+    def _entropy(self):
+        return 0.64472988584940017414
+
+
+semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
+
+
+class skewcauchy_gen(rv_continuous):
+    r"""A skewed Cauchy random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    cauchy : Cauchy distribution
+
+    Notes
+    -----
+
+    The probability density function for `skewcauchy` is:
+
+    .. math::
+
+        f(x) = \frac{1}{\pi \left(\frac{x^2}{\left(a\, \text{sign}(x) + 1
+                                                   \right)^2} + 1 \right)}
+
+    for a real number :math:`x` and skewness parameter :math:`-1 < a < 1`.
+
+    When :math:`a=0`, the distribution reduces to the usual Cauchy
+    distribution.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "Skewed generalized *t* distribution", Wikipedia
+       https://en.wikipedia.org/wiki/Skewed_generalized_t_distribution#Skewed_Cauchy_distribution
+
+    %(example)s
+
+    """
+    def _argcheck(self, a):
+        return np.abs(a) < 1
+
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (-1.0, 1.0), (False, False))]
+
+    def _pdf(self, x, a):
+        return 1 / (np.pi * (x**2 / (a * np.sign(x) + 1)**2 + 1))
+
+    def _cdf(self, x, a):
+        return np.where(x <= 0,
+                        (1 - a) / 2 + (1 - a) / np.pi * np.arctan(x / (1 - a)),
+                        (1 - a) / 2 + (1 + a) / np.pi * np.arctan(x / (1 + a)))
+
+    def _ppf(self, x, a):
+        i = x < self._cdf(0, a)
+        return np.where(i,
+                        np.tan(np.pi / (1 - a) * (x - (1 - a) / 2)) * (1 - a),
+                        np.tan(np.pi / (1 + a) * (x - (1 - a) / 2)) * (1 + a))
+
+    def _stats(self, a, moments='mvsk'):
+        return np.nan, np.nan, np.nan, np.nan
+
+    def _fitstart(self, data):
+        # Use 0 as the initial guess of the skewness shape parameter.
+        # For the location and scale, estimate using the median and
+        # quartiles.
+        p25, p50, p75 = np.percentile(data, [25, 50, 75])
+        return 0.0, p50, (p75 - p25)/2
+
+
+skewcauchy = skewcauchy_gen(name='skewcauchy')
+
+
+class skew_norm_gen(rv_continuous):
+    r"""A skew-normal random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The pdf is::
+
+        skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
+
+    `skewnorm` takes a real number :math:`a` as a skewness parameter
+    When ``a = 0`` the distribution is identical to a normal distribution
+    (`norm`). `rvs` implements the method of [1]_.
+
+    %(after_notes)s
+
+    %(example)s
+
+    References
+    ----------
+    .. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of
+        the multivariate skew-normal distribution. J. Roy. Statist. Soc.,
+        B 61, 579-602. :arxiv:`0911.2093`
+
+    """
+    def _argcheck(self, a):
+        return np.isfinite(a)
+
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (-np.inf, np.inf), (False, False))]
+
+    def _pdf(self, x, a):
+        return _lazywhere(
+            a == 0, (x, a), lambda x, a: _norm_pdf(x),
+            f2=lambda x, a: 2.*_norm_pdf(x)*_norm_cdf(a*x)
+        )
+
+    def _cdf(self, x, a):
+        cdf = _boost._skewnorm_cdf(x, 0, 1, a)
+        # for some reason, a isn't broadcasted if some of x are invalid
+        a = np.broadcast_to(a, cdf.shape)
+        # Boost is not accurate in left tail when a > 0
+        i_small_cdf = (cdf < 1e-6) & (a > 0)
+        cdf[i_small_cdf] = super()._cdf(x[i_small_cdf], a[i_small_cdf])
+        return np.clip(cdf, 0, 1)
+
+    def _ppf(self, x, a):
+        return _boost._skewnorm_ppf(x, 0, 1, a)
+
+    def _sf(self, x, a):
+        # Boost's SF is implemented this way. Use whatever customizations
+        # we made in the _cdf.
+        return self._cdf(-x, -a)
+
+    def _isf(self, x, a):
+        return _boost._skewnorm_isf(x, 0, 1, a)
+
+    def _rvs(self, a, size=None, random_state=None):
+        u0 = random_state.normal(size=size)
+        v = random_state.normal(size=size)
+        d = a/np.sqrt(1 + a**2)
+        u1 = d*u0 + v*np.sqrt(1 - d**2)
+        return np.where(u0 >= 0, u1, -u1)
+
+    def _stats(self, a, moments='mvsk'):
+        output = [None, None, None, None]
+        const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
+
+        if 'm' in moments:
+            output[0] = const
+        if 'v' in moments:
+            output[1] = 1 - const**2
+        if 's' in moments:
+            output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
+        if 'k' in moments:
+            output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
+
+        return output
+
+    # For odd order, the each noncentral moment of the skew-normal distribution
+    # with location 0 and scale 1 can be expressed as a polynomial in delta,
+    # where delta = a/sqrt(1 + a**2) and `a` is the skew-normal shape
+    # parameter.  The dictionary _skewnorm_odd_moments defines those
+    # polynomials for orders up to 19.  The dict is implemented as a cached
+    # property to reduce the impact of the creation of the dict on import time.
+    @cached_property
+    def _skewnorm_odd_moments(self):
+        skewnorm_odd_moments = {
+            1: Polynomial([1]),
+            3: Polynomial([3, -1]),
+            5: Polynomial([15, -10, 3]),
+            7: Polynomial([105, -105, 63, -15]),
+            9: Polynomial([945, -1260, 1134, -540, 105]),
+            11: Polynomial([10395, -17325, 20790, -14850, 5775, -945]),
+            13: Polynomial([135135, -270270, 405405, -386100, 225225, -73710,
+                            10395]),
+            15: Polynomial([2027025, -4729725, 8513505, -10135125, 7882875,
+                            -3869775, 1091475, -135135]),
+            17: Polynomial([34459425, -91891800, 192972780, -275675400,
+                            268017750, -175429800, 74220300, -18378360,
+                            2027025]),
+            19: Polynomial([654729075, -1964187225, 4714049340, -7856748900,
+                            9166207050, -7499623950, 4230557100, -1571349780,
+                            346621275, -34459425]),
+        }
+        return skewnorm_odd_moments
+
+    def _munp(self, order, a):
+        if order & 1:
+            if order > 19:
+                raise NotImplementedError("skewnorm noncentral moments not "
+                                          "implemented for odd orders greater "
+                                          "than 19.")
+            # Use the precomputed polynomials that were derived from the
+            # moment generating function.
+            delta = a/np.sqrt(1 + a**2)
+            return (delta * self._skewnorm_odd_moments[order](delta**2)
+                    * _SQRT_2_OVER_PI)
+        else:
+            # For even order, the moment is just (order-1)!!, where !! is the
+            # notation for the double factorial; for an odd integer m, m!! is
+            # m*(m-2)*...*3*1.
+            # We could use special.factorial2, but we know the argument is odd,
+            # so avoid the overhead of that function and compute the result
+            # directly here.
+            return sc.gamma((order + 1)/2) * 2**(order/2) / _SQRT_PI
+
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        If ``method='mm'``, parameters fixed by the user are respected, and the
+        remaining parameters are used to match distribution and sample moments
+        where possible. For example, if the user fixes the location with
+        ``floc``, the parameters will only match the distribution skewness and
+        variance to the sample skewness and variance; no attempt will be made
+        to match the means or minimize a norm of the errors.
+        Note that the maximum possible skewness magnitude of a
+        `scipy.stats.skewnorm` distribution is approximately 0.9952717; if the
+        magnitude of the data's sample skewness exceeds this, the returned
+        shape parameter ``a`` will be infinite.
+        \n\n""")
+    def fit(self, data, *args, **kwds):
+        # this extracts fixed shape, location, and scale however they
+        # are specified, and also leaves them in `kwds`
+        data, fa, floc, fscale = _check_fit_input_parameters(self, data,
+                                                             args, kwds)
+        method = kwds.get("method", "mle").lower()
+
+        # See https://en.wikipedia.org/wiki/Skew_normal_distribution for
+        # moment formulas.
+        def skew_d(d):  # skewness in terms of delta
+            return (4-np.pi)/2 * ((d * np.sqrt(2 / np.pi))**3
+                                  / (1 - 2*d**2 / np.pi)**(3/2))
+
+        # If skewness of data is greater than max possible population skewness,
+        # MoM won't provide a good guess. Get out early.
+        s = stats.skew(data)
+        s_max = skew_d(1)
+        if abs(s) >= s_max and method != "mm" and fa is None and not args:
+            return super().fit(data, *args, **kwds)
+
+        # If method is method of moments, we don't need the user's guesses.
+        # Otherwise, extract the guesses from args and kwds.
+        if method == "mm":
+            a, loc, scale = None, None, None
+        else:
+            a = args[0] if len(args) else None
+            loc = kwds.pop('loc', None)
+            scale = kwds.pop('scale', None)
+
+        if fa is None and a is None:  # not fixed and no guess: use MoM
+            # Solve for a that matches sample distribution skewness to sample
+            # skewness.
+            s = np.clip(s, -s_max, s_max)
+            d = root_scalar(lambda d: skew_d(d) - s, bracket=[-1, 1]).root
+            with np.errstate(divide='ignore'):
+                a = np.sqrt(np.divide(d**2, (1-d**2)))*np.sign(s)
+        else:
+            a = fa if fa is not None else a
+            d = a / np.sqrt(1 + a**2)
+
+        if fscale is None and scale is None:
+            v = np.var(data)
+            scale = np.sqrt(v / (1 - 2*d**2/np.pi))
+        elif fscale is not None:
+            scale = fscale
+
+        if floc is None and loc is None:
+            m = np.mean(data)
+            loc = m - scale*d*np.sqrt(2/np.pi)
+        elif floc is not None:
+            loc = floc
+
+        if method == 'mm':
+            return a, loc, scale
+        else:
+            # At this point, parameter "guesses" may equal the fixed parameters
+            # in kwds. No harm in passing them as guesses, too.
+            return super().fit(data, a, loc=loc, scale=scale, **kwds)
+
+
+skewnorm = skew_norm_gen(name='skewnorm')
+
+
+class trapezoid_gen(rv_continuous):
+    r"""A trapezoidal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The trapezoidal distribution can be represented with an up-sloping line
+    from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
+    and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.  This
+    defines the trapezoid base from ``loc`` to ``(loc+scale)`` and the flat
+    top from ``c`` to ``d`` proportional to the position along the base
+    with ``0 <= c <= d <= 1``.  When ``c=d``, this is equivalent to `triang`
+    with the same values for `loc`, `scale` and `c`.
+    The method of [1]_ is used for computing moments.
+
+    `trapezoid` takes :math:`c` and :math:`d` as shape parameters.
+
+    %(after_notes)s
+
+    The standard form is in the range [0, 1] with c the mode.
+    The location parameter shifts the start to `loc`.
+    The scale parameter changes the width from 1 to `scale`.
+
+    %(example)s
+
+    References
+    ----------
+    .. [1] Kacker, R.N. and Lawrence, J.F. (2007). Trapezoidal and triangular
+       distributions for Type B evaluation of standard uncertainty.
+       Metrologia 44, 117-127. :doi:`10.1088/0026-1394/44/2/003`
+
+
+    """
+    def _argcheck(self, c, d):
+        return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
+
+    def _shape_info(self):
+        ic = _ShapeInfo("c", False, (0, 1.0), (True, True))
+        id = _ShapeInfo("d", False, (0, 1.0), (True, True))
+        return [ic, id]
+
+    def _pdf(self, x, c, d):
+        u = 2 / (d-c+1)
+
+        return _lazyselect([x < c,
+                            (c <= x) & (x <= d),
+                            x > d],
+                           [lambda x, c, d, u: u * x / c,
+                            lambda x, c, d, u: u,
+                            lambda x, c, d, u: u * (1-x) / (1-d)],
+                            (x, c, d, u))
+
+    def _cdf(self, x, c, d):
+        return _lazyselect([x < c,
+                            (c <= x) & (x <= d),
+                            x > d],
+                           [lambda x, c, d: x**2 / c / (d-c+1),
+                            lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
+                            lambda x, c, d: 1-((1-x) ** 2
+                                               / (d-c+1) / (1-d))],
+                            (x, c, d))
+
+    def _ppf(self, q, c, d):
+        qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
+        condlist = [q < qc, q <= qd, q > qd]
+        choicelist = [np.sqrt(q * c * (1 + d - c)),
+                      0.5 * q * (1 + d - c) + 0.5 * c,
+                      1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
+        return np.select(condlist, choicelist)
+
+    def _munp(self, n, c, d):
+        # Using the parameterization from Kacker, 2007, with
+        # a=bottom left, c=top left, d=top right, b=bottom right, then
+        #     E[X^n] = h/(n+1)/(n+2) [(b^{n+2}-d^{n+2})/(b-d)
+        #                             - ((c^{n+2} - a^{n+2})/(c-a)]
+        # with h = 2/((b-a) - (d-c)). The corresponding parameterization
+        # in scipy, has a'=loc, c'=loc+c*scale, d'=loc+d*scale, b'=loc+scale,
+        # which for standard form reduces to a'=0, b'=1, c'=c, d'=d.
+        # Substituting into E[X^n] gives the bd' term as (1 - d^{n+2})/(1 - d)
+        # and the ac' term as c^{n-1} for the standard form. The bd' term has
+        # numerical difficulties near d=1, so replace (1 - d^{n+2})/(1-d)
+        # with expm1((n+2)*log(d))/(d-1).
+        # Testing with n=18 for c=(1e-30,1-eps) shows that this is stable.
+        # We still require an explicit test for d=1 to prevent divide by zero,
+        # and now a test for d=0 to prevent log(0).
+        ab_term = c**(n+1)
+        dc_term = _lazyselect(
+            [d == 0.0, (0.0 < d) & (d < 1.0), d == 1.0],
+            [lambda d: 1.0,
+             lambda d: np.expm1((n+2) * np.log(d)) / (d-1.0),
+             lambda d: n+2],
+            [d])
+        val = 2.0 / (1.0+d-c) * (dc_term - ab_term) / ((n+1) * (n+2))
+        return val
+
+    def _entropy(self, c, d):
+        # Using the parameterization from Wikipedia (van Dorp, 2003)
+        # with a=bottom left, c=top left, d=top right, b=bottom right
+        # gives a'=loc, b'=loc+c*scale, c'=loc+d*scale, d'=loc+scale,
+        # which for loc=0, scale=1 is a'=0, b'=c, c'=d, d'=1.
+        # Substituting into the entropy formula from Wikipedia gives
+        # the following result.
+        return 0.5 * (1.0-d+c) / (1.0+d-c) + np.log(0.5 * (1.0+d-c))
+
+
+trapezoid = trapezoid_gen(a=0.0, b=1.0, name="trapezoid")
+# Note: alias kept for backwards compatibility. Rename was done
+# because trapz is a slur in colloquial English (see gh-12924).
+trapz = trapezoid_gen(a=0.0, b=1.0, name="trapz")
+if trapz.__doc__:
+    trapz.__doc__ = "trapz is an alias for `trapezoid`"
+
+
+class triang_gen(rv_continuous):
+    r"""A triangular continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The triangular distribution can be represented with an up-sloping line from
+    ``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
+    to ``(loc + scale)``.
+
+    `triang` takes ``c`` as a shape parameter for :math:`0 \le c \le 1`.
+
+    %(after_notes)s
+
+    The standard form is in the range [0, 1] with c the mode.
+    The location parameter shifts the start to `loc`.
+    The scale parameter changes the width from 1 to `scale`.
+
+    %(example)s
+
+    """
+    def _rvs(self, c, size=None, random_state=None):
+        return random_state.triangular(0, c, 1, size)
+
+    def _argcheck(self, c):
+        return (c >= 0) & (c <= 1)
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, 1.0), (True, True))]
+
+    def _pdf(self, x, c):
+        # 0: edge case where c=0
+        # 1: generalised case for x < c, don't use x <= c, as it doesn't cope
+        #    with c = 0.
+        # 2: generalised case for x >= c, but doesn't cope with c = 1
+        # 3: edge case where c=1
+        r = _lazyselect([c == 0,
+                         x < c,
+                         (x >= c) & (c != 1),
+                         c == 1],
+                        [lambda x, c: 2 - 2 * x,
+                         lambda x, c: 2 * x / c,
+                         lambda x, c: 2 * (1 - x) / (1 - c),
+                         lambda x, c: 2 * x],
+                        (x, c))
+        return r
+
+    def _cdf(self, x, c):
+        r = _lazyselect([c == 0,
+                         x < c,
+                         (x >= c) & (c != 1),
+                         c == 1],
+                        [lambda x, c: 2*x - x*x,
+                         lambda x, c: x * x / c,
+                         lambda x, c: (x*x - 2*x + c) / (c-1),
+                         lambda x, c: x * x],
+                        (x, c))
+        return r
+
+    def _ppf(self, q, c):
+        return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
+
+    def _stats(self, c):
+        return ((c+1.0)/3.0,
+                (1.0-c+c*c)/18,
+                np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
+                -3.0/5.0)
+
+    def _entropy(self, c):
+        return 0.5-np.log(2)
+
+
+triang = triang_gen(a=0.0, b=1.0, name="triang")
+
+
+class truncexpon_gen(rv_continuous):
+    r"""A truncated exponential continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `truncexpon` is:
+
+    .. math::
+
+        f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
+
+    for :math:`0 <= x <= b`.
+
+    `truncexpon` takes ``b`` as a shape parameter for :math:`b`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("b", False, (0, np.inf), (False, False))]
+
+    def _get_support(self, b):
+        return self.a, b
+
+    def _pdf(self, x, b):
+        # truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
+        return np.exp(-x)/(-sc.expm1(-b))
+
+    def _logpdf(self, x, b):
+        return -x - np.log(-sc.expm1(-b))
+
+    def _cdf(self, x, b):
+        return sc.expm1(-x)/sc.expm1(-b)
+
+    def _ppf(self, q, b):
+        return -sc.log1p(q*sc.expm1(-b))
+
+    def _munp(self, n, b):
+        # wrong answer with formula, same as in continuous.pdf
+        # return sc.gamman+1)-sc.gammainc1+n, b)
+        if n == 1:
+            return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
+        elif n == 2:
+            return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
+        else:
+            # return generic for higher moments
+            # return rv_continuous._mom1_sc(self, n, b)
+            return self._mom1_sc(n, b)
+
+    def _entropy(self, b):
+        eB = np.exp(b)
+        return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
+
+
+truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
+
+
+# logsumexp trick for log(p + q) with only log(p) and log(q)
+def _log_sum(log_p, log_q):
+    return sc.logsumexp([log_p, log_q], axis=0)
+
+
+# same as above, but using -exp(x) = exp(x + πi)
+def _log_diff(log_p, log_q):
+    return sc.logsumexp([log_p, log_q+np.pi*1j], axis=0)
+
+
+def _log_gauss_mass(a, b):
+    """Log of Gaussian probability mass within an interval"""
+    a, b = np.atleast_1d(a), np.atleast_1d(b)
+    a, b = np.broadcast_arrays(a, b)
+
+    # Calculations in right tail are inaccurate, so we'll exploit the
+    # symmetry and work only in the left tail
+    case_left = b <= 0
+    case_right = a > 0
+    case_central = ~(case_left | case_right)
+
+    def mass_case_left(a, b):
+        return _log_diff(sc.log_ndtr(b), sc.log_ndtr(a))
+
+    def mass_case_right(a, b):
+        return mass_case_left(-b, -a)
+
+    def mass_case_central(a, b):
+        # Previously, this was implemented as:
+        # left_mass = mass_case_left(a, 0)
+        # right_mass = mass_case_right(0, b)
+        # return _log_sum(left_mass, right_mass)
+        # Catastrophic cancellation occurs as np.exp(log_mass) approaches 1.
+        # Correct for this with an alternative formulation.
+        # We're not concerned with underflow here: if only one term
+        # underflows, it was insignificant; if both terms underflow,
+        # the result can't accurately be represented in logspace anyway
+        # because sc.log1p(x) ~ x for small x.
+        return sc.log1p(-sc.ndtr(a) - sc.ndtr(-b))
+
+    # _lazyselect not working; don't care to debug it
+    out = np.full_like(a, fill_value=np.nan, dtype=np.complex128)
+    if a[case_left].size:
+        out[case_left] = mass_case_left(a[case_left], b[case_left])
+    if a[case_right].size:
+        out[case_right] = mass_case_right(a[case_right], b[case_right])
+    if a[case_central].size:
+        out[case_central] = mass_case_central(a[case_central], b[case_central])
+    return np.real(out)  # discard ~0j
+
+
+class truncnorm_gen(rv_continuous):
+    r"""A truncated normal continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    This distribution is the normal distribution centered on ``loc`` (default
+    0), with standard deviation ``scale`` (default 1), and clipped at ``a``,
+    ``b`` standard deviations to the left, right (respectively) from ``loc``.
+    If ``myclip_a`` and ``myclip_b`` are clip values in the sample space (as
+    opposed to the number of standard deviations) then they can be converted
+    to the required form according to::
+
+        a, b = (myclip_a - loc) / scale, (myclip_b - loc) / scale
+
+    %(example)s
+
+    """
+
+    def _argcheck(self, a, b):
+        return a < b
+
+    def _shape_info(self):
+        ia = _ShapeInfo("a", False, (-np.inf, np.inf), (True, False))
+        ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, True))
+        return [ia, ib]
+
+    def _fitstart(self, data):
+        # Reasonable, since support is [a, b]
+        return super()._fitstart(data, args=(np.min(data), np.max(data)))
+
+    def _get_support(self, a, b):
+        return a, b
+
+    def _pdf(self, x, a, b):
+        return np.exp(self._logpdf(x, a, b))
+
+    def _logpdf(self, x, a, b):
+        return _norm_logpdf(x) - _log_gauss_mass(a, b)
+
+    def _cdf(self, x, a, b):
+        return np.exp(self._logcdf(x, a, b))
+
+    def _logcdf(self, x, a, b):
+        x, a, b = np.broadcast_arrays(x, a, b)
+        logcdf = _log_gauss_mass(a, x) - _log_gauss_mass(a, b)
+        i = logcdf > -0.1  # avoid catastrophic cancellation
+        if np.any(i):
+            logcdf[i] = np.log1p(-np.exp(self._logsf(x[i], a[i], b[i])))
+        return logcdf
+
+    def _sf(self, x, a, b):
+        return np.exp(self._logsf(x, a, b))
+
+    def _logsf(self, x, a, b):
+        x, a, b = np.broadcast_arrays(x, a, b)
+        logsf = _log_gauss_mass(x, b) - _log_gauss_mass(a, b)
+        i = logsf > -0.1  # avoid catastrophic cancellation
+        if np.any(i):
+            logsf[i] = np.log1p(-np.exp(self._logcdf(x[i], a[i], b[i])))
+        return logsf
+
+    def _ppf(self, q, a, b):
+        q, a, b = np.broadcast_arrays(q, a, b)
+
+        case_left = a < 0
+        case_right = ~case_left
+
+        def ppf_left(q, a, b):
+            log_Phi_x = _log_sum(sc.log_ndtr(a),
+                                 np.log(q) + _log_gauss_mass(a, b))
+            return sc.ndtri_exp(log_Phi_x)
+
+        def ppf_right(q, a, b):
+            log_Phi_x = _log_sum(sc.log_ndtr(-b),
+                                 np.log1p(-q) + _log_gauss_mass(a, b))
+            return -sc.ndtri_exp(log_Phi_x)
+
+        out = np.empty_like(q)
+
+        q_left = q[case_left]
+        q_right = q[case_right]
+
+        if q_left.size:
+            out[case_left] = ppf_left(q_left, a[case_left], b[case_left])
+        if q_right.size:
+            out[case_right] = ppf_right(q_right, a[case_right], b[case_right])
+
+        return out
+
+    def _isf(self, q, a, b):
+        # Mostly copy-paste of _ppf, but I think this is simpler than combining
+        q, a, b = np.broadcast_arrays(q, a, b)
+
+        case_left = b < 0
+        case_right = ~case_left
+
+        def isf_left(q, a, b):
+            log_Phi_x = _log_diff(sc.log_ndtr(b),
+                                  np.log(q) + _log_gauss_mass(a, b))
+            return sc.ndtri_exp(np.real(log_Phi_x))
+
+        def isf_right(q, a, b):
+            log_Phi_x = _log_diff(sc.log_ndtr(-a),
+                                  np.log1p(-q) + _log_gauss_mass(a, b))
+            return -sc.ndtri_exp(np.real(log_Phi_x))
+
+        out = np.empty_like(q)
+
+        q_left = q[case_left]
+        q_right = q[case_right]
+
+        if q_left.size:
+            out[case_left] = isf_left(q_left, a[case_left], b[case_left])
+        if q_right.size:
+            out[case_right] = isf_right(q_right, a[case_right], b[case_right])
+
+        return out
+
+    def _munp(self, n, a, b):
+        def n_th_moment(n, a, b):
+            """
+            Returns n-th moment. Defined only if n >= 0.
+            Function cannot broadcast due to the loop over n
+            """
+            pA, pB = self._pdf([a, b], a, b)
+            probs = [pA, -pB]
+            moments = [0, 1]
+            for k in range(1, n+1):
+                # a or b might be infinite, and the corresponding pdf value
+                # is 0 in that case, but nan is returned for the
+                # multiplication.  However, as b->infinity,  pdf(b)*b**k -> 0.
+                # So it is safe to use _lazywhere to avoid the nan.
+                vals = _lazywhere(probs, [probs, [a, b]],
+                                  lambda x, y: x * y**(k-1), fillvalue=0)
+                mk = np.sum(vals) + (k-1) * moments[-2]
+                moments.append(mk)
+            return moments[-1]
+
+        return _lazywhere((n >= 0) & (a == a) & (b == b), (n, a, b),
+                          np.vectorize(n_th_moment, otypes=[np.float64]),
+                          np.nan)
+
+    def _stats(self, a, b, moments='mv'):
+        pA, pB = self.pdf(np.array([a, b]), a, b)
+
+        def _truncnorm_stats_scalar(a, b, pA, pB, moments):
+            m1 = pA - pB
+            mu = m1
+            # use _lazywhere to avoid nan (See detailed comment in _munp)
+            probs = [pA, -pB]
+            vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y,
+                              fillvalue=0)
+            m2 = 1 + np.sum(vals)
+            vals = _lazywhere(probs, [probs, [a-mu, b-mu]], lambda x, y: x*y,
+                              fillvalue=0)
+            # mu2 = m2 - mu**2, but not as numerically stable as:
+            # mu2 = (a-mu)*pA - (b-mu)*pB + 1
+            mu2 = 1 + np.sum(vals)
+            vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**2,
+                              fillvalue=0)
+            m3 = 2*m1 + np.sum(vals)
+            vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**3,
+                              fillvalue=0)
+            m4 = 3*m2 + np.sum(vals)
+
+            mu3 = m3 + m1 * (-3*m2 + 2*m1**2)
+            g1 = mu3 / np.power(mu2, 1.5)
+            mu4 = m4 + m1*(-4*m3 + 3*m1*(2*m2 - m1**2))
+            g2 = mu4 / mu2**2 - 3
+            return mu, mu2, g1, g2
+
+        _truncnorm_stats = np.vectorize(_truncnorm_stats_scalar,
+                                        excluded=('moments',))
+        return _truncnorm_stats(a, b, pA, pB, moments)
+
+
+truncnorm = truncnorm_gen(name='truncnorm', momtype=1)
+
+
+class truncpareto_gen(rv_continuous):
+    r"""An upper truncated Pareto continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    pareto : Pareto distribution
+
+    Notes
+    -----
+    The probability density function for `truncpareto` is:
+
+    .. math::
+
+        f(x, b, c) = \frac{b}{1 - c^{-b}} \frac{1}{x^{b+1}}
+
+    for :math:`b > 0`, :math:`c > 1` and :math:`1 \le x \le c`.
+
+    `truncpareto` takes `b` and `c` as shape parameters for :math:`b` and
+    :math:`c`.
+
+    Notice that the upper truncation value :math:`c` is defined in
+    standardized form so that random values of an unscaled, unshifted variable
+    are within the range ``[1, c]``.
+    If ``u_r`` is the upper bound to a scaled and/or shifted variable,
+    then ``c = (u_r - loc) / scale``. In other words, the support of the
+    distribution becomes ``(scale + loc) <= x <= (c*scale + loc)`` when
+    `scale` and/or `loc` are provided.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Burroughs, S. M., and Tebbens S. F.
+        "Upper-truncated power laws in natural systems."
+        Pure and Applied Geophysics 158.4 (2001): 741-757.
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        ib = _ShapeInfo("b", False, (0.0, np.inf), (False, False))
+        ic = _ShapeInfo("c", False, (1.0, np.inf), (False, False))
+        return [ib, ic]
+
+    def _argcheck(self, b, c):
+        return (b > 0) & (c > 1)
+
+    def _get_support(self, b, c):
+        return self.a, c
+
+    def _pdf(self, x, b, c):
+        return b * x**-(b+1) / (1 - c**-b)
+
+    def _logpdf(self, x, b, c):
+        return np.log(b) - np.log1p(-c**-b) - (b+1)*np.log(x)
+
+    def _cdf(self, x, b, c):
+        return (1 - x**-b) / (1 - c**-b)
+
+    def _logcdf(self, x, b, c):
+        return np.log1p(-x**-b) - np.log1p(-c**-b)
+
+    def _ppf(self, q, b, c):
+        return pow(1 - (1 - c**-b)*q, -1/b)
+
+    def _sf(self, x, b, c):
+        return (x**-b - c**-b) / (1 - c**-b)
+
+    def _logsf(self, x, b, c):
+        return np.log(x**-b - c**-b) - np.log1p(-c**-b)
+
+    def _isf(self, q, b, c):
+        return pow(c**-b + (1 - c**-b)*q, -1/b)
+
+    def _entropy(self, b, c):
+        return -(np.log(b/(1 - c**-b))
+                 + (b+1)*(np.log(c)/(c**b - 1) - 1/b))
+
+    def _munp(self, n, b, c):
+        if n == b:
+            return b*np.log(c) / (1 - c**-b)
+        else:
+            return b / (b-n) * (c**b - c**n) / (c**b - 1)
+
+    def _fitstart(self, data):
+        b, loc, scale = pareto.fit(data)
+        c = (max(data) - loc)/scale
+        return b, c, loc, scale
+
+
+truncpareto = truncpareto_gen(a=1.0, name='truncpareto')
+
+
+class tukeylambda_gen(rv_continuous):
+    r"""A Tukey-Lamdba continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    A flexible distribution, able to represent and interpolate between the
+    following distributions:
+
+    - Cauchy                (:math:`lambda = -1`)
+    - logistic              (:math:`lambda = 0`)
+    - approx Normal         (:math:`lambda = 0.14`)
+    - uniform from -1 to 1  (:math:`lambda = 1`)
+
+    `tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
+    in the implementation) as a shape parameter.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, lam):
+        return np.isfinite(lam)
+
+    def _shape_info(self):
+        return [_ShapeInfo("lam", False, (-np.inf, np.inf), (False, False))]
+
+    def _pdf(self, x, lam):
+        Fx = np.asarray(sc.tklmbda(x, lam))
+        Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
+        Px = 1.0/np.asarray(Px)
+        return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
+
+    def _cdf(self, x, lam):
+        return sc.tklmbda(x, lam)
+
+    def _ppf(self, q, lam):
+        return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
+
+    def _stats(self, lam):
+        return 0, _tlvar(lam), 0, _tlkurt(lam)
+
+    def _entropy(self, lam):
+        def integ(p):
+            return np.log(pow(p, lam-1)+pow(1-p, lam-1))
+        return integrate.quad(integ, 0, 1)[0]
+
+
+tukeylambda = tukeylambda_gen(name='tukeylambda')
+
+
+class FitUniformFixedScaleDataError(FitDataError):
+    def __init__(self, ptp, fscale):
+        self.args = (
+            "Invalid values in `data`.  Maximum likelihood estimation with "
+            "the uniform distribution and fixed scale requires that "
+            "data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
+            (ptp, fscale),
+        )
+
+
+class uniform_gen(rv_continuous):
+    r"""A uniform continuous random variable.
+
+    In the standard form, the distribution is uniform on ``[0, 1]``. Using
+    the parameters ``loc`` and ``scale``, one obtains the uniform distribution
+    on ``[loc, loc + scale]``.
+
+    %(before_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return random_state.uniform(0.0, 1.0, size)
+
+    def _pdf(self, x):
+        return 1.0*(x == x)
+
+    def _cdf(self, x):
+        return x
+
+    def _ppf(self, q):
+        return q
+
+    def _stats(self):
+        return 0.5, 1.0/12, 0, -1.2
+
+    def _entropy(self):
+        return 0.0
+
+    @_call_super_mom
+    def fit(self, data, *args, **kwds):
+        """
+        Maximum likelihood estimate for the location and scale parameters.
+
+        `uniform.fit` uses only the following parameters.  Because exact
+        formulas are used, the parameters related to optimization that are
+        available in the `fit` method of other distributions are ignored
+        here.  The only positional argument accepted is `data`.
+
+        Parameters
+        ----------
+        data : array_like
+            Data to use in calculating the maximum likelihood estimate.
+        floc : float, optional
+            Hold the location parameter fixed to the specified value.
+        fscale : float, optional
+            Hold the scale parameter fixed to the specified value.
+
+        Returns
+        -------
+        loc, scale : float
+            Maximum likelihood estimates for the location and scale.
+
+        Notes
+        -----
+        An error is raised if `floc` is given and any values in `data` are
+        less than `floc`, or if `fscale` is given and `fscale` is less
+        than ``data.max() - data.min()``.  An error is also raised if both
+        `floc` and `fscale` are given.
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy.stats import uniform
+
+        We'll fit the uniform distribution to `x`:
+
+        >>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
+
+        For a uniform distribution MLE, the location is the minimum of the
+        data, and the scale is the maximum minus the minimum.
+
+        >>> loc, scale = uniform.fit(x)
+        >>> loc
+        2.0
+        >>> scale
+        11.0
+
+        If we know the data comes from a uniform distribution where the support
+        starts at 0, we can use `floc=0`:
+
+        >>> loc, scale = uniform.fit(x, floc=0)
+        >>> loc
+        0.0
+        >>> scale
+        13.0
+
+        Alternatively, if we know the length of the support is 12, we can use
+        `fscale=12`:
+
+        >>> loc, scale = uniform.fit(x, fscale=12)
+        >>> loc
+        1.5
+        >>> scale
+        12.0
+
+        In that last example, the support interval is [1.5, 13.5].  This
+        solution is not unique.  For example, the distribution with ``loc=2``
+        and ``scale=12`` has the same likelihood as the one above.  When
+        `fscale` is given and it is larger than ``data.max() - data.min()``,
+        the parameters returned by the `fit` method center the support over
+        the interval ``[data.min(), data.max()]``.
+
+        """
+        if len(args) > 0:
+            raise TypeError("Too many arguments.")
+
+        floc = kwds.pop('floc', None)
+        fscale = kwds.pop('fscale', None)
+
+        _remove_optimizer_parameters(kwds)
+
+        if floc is not None and fscale is not None:
+            # This check is for consistency with `rv_continuous.fit`.
+            raise ValueError("All parameters fixed. There is nothing to "
+                             "optimize.")
+
+        data = np.asarray(data)
+
+        if not np.isfinite(data).all():
+            raise ValueError("The data contains non-finite values.")
+
+        # MLE for the uniform distribution
+        # --------------------------------
+        # The PDF is
+        #
+        #     f(x, loc, scale) = {1/scale  for loc <= x <= loc + scale
+        #                        {0        otherwise}
+        #
+        # The likelihood function is
+        #     L(x, loc, scale) = (1/scale)**n
+        # where n is len(x), assuming loc <= x <= loc + scale for all x.
+        # The log-likelihood is
+        #     l(x, loc, scale) = -n*log(scale)
+        # The log-likelihood is maximized by making scale as small as possible,
+        # while keeping loc <= x <= loc + scale.   So if neither loc nor scale
+        # are fixed, the log-likelihood is maximized by choosing
+        #     loc = x.min()
+        #     scale = x.ptp()
+        # If loc is fixed, it must be less than or equal to x.min(), and then
+        # the scale is
+        #     scale = x.max() - loc
+        # If scale is fixed, it must not be less than x.ptp().  If scale is
+        # greater than x.ptp(), the solution is not unique.  Note that the
+        # likelihood does not depend on loc, except for the requirement that
+        # loc <= x <= loc + scale.  All choices of loc for which
+        #     x.max() - scale <= loc <= x.min()
+        # have the same log-likelihood.  In this case, we choose loc such that
+        # the support is centered over the interval [data.min(), data.max()]:
+        #     loc = x.min() = 0.5*(scale - x.ptp())
+
+        if fscale is None:
+            # scale is not fixed.
+            if floc is None:
+                # loc is not fixed, scale is not fixed.
+                loc = data.min()
+                scale = data.ptp()
+            else:
+                # loc is fixed, scale is not fixed.
+                loc = floc
+                scale = data.max() - loc
+                if data.min() < loc:
+                    raise FitDataError("uniform", lower=loc, upper=loc + scale)
+        else:
+            # loc is not fixed, scale is fixed.
+            ptp = data.ptp()
+            if ptp > fscale:
+                raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
+            # If ptp < fscale, the ML estimate is not unique; see the comments
+            # above.  We choose the distribution for which the support is
+            # centered over the interval [data.min(), data.max()].
+            loc = data.min() - 0.5*(fscale - ptp)
+            scale = fscale
+
+        # We expect the return values to be floating point, so ensure it
+        # by explicitly converting to float.
+        return float(loc), float(scale)
+
+
+uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
+
+
+class vonmises_gen(rv_continuous):
+    r"""A Von Mises continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `vonmises` and `vonmises_line` is:
+
+    .. math::
+
+        f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
+
+    for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
+    modified Bessel function of order zero (`scipy.special.i0`).
+
+    `vonmises` is a circular distribution which does not restrict the
+    distribution to a fixed interval. Currently, there is no circular
+    distribution framework in scipy. The ``cdf`` is implemented such that
+    ``cdf(x + 2*np.pi) == cdf(x) + 1``.
+
+    `vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
+    on the real line. This is a regular (i.e. non-circular) distribution.
+
+    `vonmises` and `vonmises_line` take ``kappa`` as a shape parameter.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("kappa", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, kappa, size=None, random_state=None):
+        return random_state.vonmises(0.0, kappa, size=size)
+
+    @inherit_docstring_from(rv_continuous)
+    def rvs(self, *args, **kwds):
+        rvs = super().rvs(*args, **kwds)
+        return np.mod(rvs + np.pi, 2*np.pi) - np.pi
+
+    def _pdf(self, x, kappa):
+        # vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
+        #                        = exp(kappa * (cos(x) - 1)) /
+        #                          (2*pi*exp(-kappa)*I[0](kappa))
+        #                        = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
+        return np.exp(kappa*sc.cosm1(x)) / (2*np.pi*sc.i0e(kappa))
+
+    def _logpdf(self, x, kappa):
+        # vonmises.pdf(x, kappa) = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
+        return kappa * sc.cosm1(x) - np.log(2*np.pi) - np.log(sc.i0e(kappa))
+
+    def _cdf(self, x, kappa):
+        return _stats.von_mises_cdf(kappa, x)
+
+    def _stats_skip(self, kappa):
+        return 0, None, 0, None
+
+    def _entropy(self, kappa):
+        # vonmises.entropy(kappa) = -kappa * I[1](kappa) / I[0](kappa) +
+        #                           log(2 * np.pi * I[0](kappa))
+        #                         = -kappa * I[1](kappa) * exp(-kappa) /
+        #                           (I[0](kappa) * exp(-kappa)) +
+        #                           log(2 * np.pi *
+        #                           I[0](kappa) * exp(-kappa) / exp(-kappa))
+        #                         = -kappa * sc.i1e(kappa) / sc.i0e(kappa) +
+        #                           log(2 * np.pi * i0e(kappa)) + kappa
+        return (-kappa * sc.i1e(kappa) / sc.i0e(kappa) +
+                np.log(2 * np.pi * sc.i0e(kappa)) + kappa)
+
+    @extend_notes_in_docstring(rv_continuous, notes="""\
+        The default limits of integration are endpoints of the interval
+        of width ``2*pi`` centered at `loc` (e.g. ``[-pi, pi]`` when
+        ``loc=0``).\n\n""")
+    def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
+               conditional=False, **kwds):
+        _a, _b = -np.pi, np.pi
+
+        if lb is None:
+            lb = loc + _a
+        if ub is None:
+            ub = loc + _b
+
+        return super().expect(func, args, loc,
+                              scale, lb, ub, conditional, **kwds)
+
+
+vonmises = vonmises_gen(name='vonmises')
+vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
+
+
+class wald_gen(invgauss_gen):
+    r"""A Wald continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `wald` is:
+
+    .. math::
+
+        f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
+
+    for :math:`x >= 0`.
+
+    `wald` is a special case of `invgauss` with ``mu=1``.
+
+    %(after_notes)s
+
+    %(example)s
+    """
+    _support_mask = rv_continuous._open_support_mask
+
+    def _shape_info(self):
+        return []
+
+    def _rvs(self, size=None, random_state=None):
+        return random_state.wald(1.0, 1.0, size=size)
+
+    def _pdf(self, x):
+        # wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
+        return invgauss._pdf(x, 1.0)
+
+    def _cdf(self, x):
+        return invgauss._cdf(x, 1.0)
+
+    def _sf(self, x):
+        return invgauss._sf(x, 1.0)
+
+    def _ppf(self, x):
+        return invgauss._ppf(x, 1.0)
+
+    def _isf(self, x):
+        return invgauss._isf(x, 1.0)
+
+    def _logpdf(self, x):
+        return invgauss._logpdf(x, 1.0)
+
+    def _logcdf(self, x):
+        return invgauss._logcdf(x, 1.0)
+
+    def _logsf(self, x):
+        return invgauss._logsf(x, 1.0)
+
+    def _stats(self):
+        return 1.0, 1.0, 3.0, 15.0
+
+
+wald = wald_gen(a=0.0, name="wald")
+
+
+class wrapcauchy_gen(rv_continuous):
+    r"""A wrapped Cauchy continuous random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `wrapcauchy` is:
+
+    .. math::
+
+        f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
+
+    for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
+
+    `wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _argcheck(self, c):
+        return (c > 0) & (c < 1)
+
+    def _shape_info(self):
+        return [_ShapeInfo("c", False, (0, 1), (False, False))]
+
+    def _pdf(self, x, c):
+        # wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
+        return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
+
+    def _cdf(self, x, c):
+
+        def f1(x, cr):
+            # CDF for 0 <= x < pi
+            return 1/np.pi * np.arctan(cr*np.tan(x/2))
+
+        def f2(x, cr):
+            # CDF for pi <= x <= 2*pi
+            return 1 - 1/np.pi * np.arctan(cr*np.tan((2*np.pi - x)/2))
+
+        cr = (1 + c)/(1 - c)
+        return _lazywhere(x < np.pi, (x, cr), f=f1, f2=f2)
+
+    def _ppf(self, q, c):
+        val = (1.0-c)/(1.0+c)
+        rcq = 2*np.arctan(val*np.tan(np.pi*q))
+        rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
+        return np.where(q < 1.0/2, rcq, rcmq)
+
+    def _entropy(self, c):
+        return np.log(2*np.pi*(1-c*c))
+
+    def _fitstart(self, data):
+        # Use 0.5 as the initial guess of the shape parameter.
+        # For the location and scale, use the minimum and
+        # peak-to-peak/(2*pi), respectively.
+        return 0.5, np.min(data), np.ptp(data)/(2*np.pi)
+
+
+wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
+
+
+class gennorm_gen(rv_continuous):
+    r"""A generalized normal continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    laplace : Laplace distribution
+    norm : normal distribution
+
+    Notes
+    -----
+    The probability density function for `gennorm` is [1]_:
+
+    .. math::
+
+        f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta),
+
+    where :math:`x` is a real number, :math:`\beta > 0` and
+    :math:`\Gamma` is the gamma function (`scipy.special.gamma`).
+
+    `gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
+    For :math:`\beta = 1`, it is identical to a Laplace distribution.
+    For :math:`\beta = 2`, it is identical to a normal distribution
+    (with ``scale=1/sqrt(2)``).
+
+    References
+    ----------
+
+    .. [1] "Generalized normal distribution, Version 1",
+           https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
+
+    .. [2] Nardon, Martina, and Paolo Pianca. "Simulation techniques for
+           generalized Gaussian densities." Journal of Statistical
+           Computation and Simulation 79.11 (2009): 1317-1329
+
+    .. [3] Wicklin, Rick. "Simulate data from a generalized Gaussian
+           distribution" in The DO Loop blog, September 21, 2016,
+           https://blogs.sas.com/content/iml/2016/09/21/simulate-generalized-gaussian-sas.html
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("beta", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, beta):
+        return np.exp(self._logpdf(x, beta))
+
+    def _logpdf(self, x, beta):
+        return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
+
+    def _cdf(self, x, beta):
+        c = 0.5 * np.sign(x)
+        # evaluating (.5 + c) first prevents numerical cancellation
+        return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
+
+    def _ppf(self, x, beta):
+        c = np.sign(x - 0.5)
+        # evaluating (1. + c) first prevents numerical cancellation
+        return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
+
+    def _sf(self, x, beta):
+        return self._cdf(-x, beta)
+
+    def _isf(self, x, beta):
+        return -self._ppf(x, beta)
+
+    def _stats(self, beta):
+        c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
+        return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
+
+    def _entropy(self, beta):
+        return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
+
+    def _rvs(self, beta, size=None, random_state=None):
+        # see [2]_ for the algorithm
+        # see [3]_ for reference implementation in SAS
+        z = random_state.gamma(1/beta, size=size)
+        y = z ** (1/beta)
+        # convert y to array to ensure masking support
+        y = np.asarray(y)
+        mask = random_state.random(size=y.shape) < 0.5
+        y[mask] = -y[mask]
+        return y
+
+
+gennorm = gennorm_gen(name='gennorm')
+
+
+class halfgennorm_gen(rv_continuous):
+    r"""The upper half of a generalized normal continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    gennorm : generalized normal distribution
+    expon : exponential distribution
+    halfnorm : half normal distribution
+
+    Notes
+    -----
+    The probability density function for `halfgennorm` is:
+
+    .. math::
+
+        f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
+
+    for :math:`x, \beta > 0`. :math:`\Gamma` is the gamma function
+    (`scipy.special.gamma`).
+
+    `halfgennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
+    For :math:`\beta = 1`, it is identical to an exponential distribution.
+    For :math:`\beta = 2`, it is identical to a half normal distribution
+    (with ``scale=1/sqrt(2)``).
+
+    References
+    ----------
+
+    .. [1] "Generalized normal distribution, Version 1",
+           https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("beta", False, (0, np.inf), (False, False))]
+
+    def _pdf(self, x, beta):
+        #                                 beta
+        # halfgennorm.pdf(x, beta) =  -------------  exp(-|x|**beta)
+        #                             gamma(1/beta)
+        return np.exp(self._logpdf(x, beta))
+
+    def _logpdf(self, x, beta):
+        return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
+
+    def _cdf(self, x, beta):
+        return sc.gammainc(1.0/beta, x**beta)
+
+    def _ppf(self, x, beta):
+        return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
+
+    def _sf(self, x, beta):
+        return sc.gammaincc(1.0/beta, x**beta)
+
+    def _isf(self, x, beta):
+        return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
+
+    def _entropy(self, beta):
+        return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
+
+
+halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
+
+
+class crystalball_gen(rv_continuous):
+    r"""
+    Crystalball distribution
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `crystalball` is:
+
+    .. math::
+
+        f(x, \beta, m) =  \begin{cases}
+                            N \exp(-x^2 / 2),  &\text{for } x > -\beta\\
+                            N A (B - x)^{-m}  &\text{for } x \le -\beta
+                          \end{cases}
+
+    where :math:`A = (m / |\beta|)^m  \exp(-\beta^2 / 2)`,
+    :math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
+
+    `crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
+    parameters.  :math:`\beta` defines the point where the pdf changes
+    from a power-law to a Gaussian distribution.  :math:`m` is the power
+    of the power-law tail.
+
+    References
+    ----------
+    .. [1] "Crystal Ball Function",
+           https://en.wikipedia.org/wiki/Crystal_Ball_function
+
+    %(after_notes)s
+
+    .. versionadded:: 0.19.0
+
+    %(example)s
+    """
+    def _argcheck(self, beta, m):
+        """
+        Shape parameter bounds are m > 1 and beta > 0.
+        """
+        return (m > 1) & (beta > 0)
+
+    def _shape_info(self):
+        ibeta = _ShapeInfo("beta", False, (0, np.inf), (False, False))
+        im = _ShapeInfo("m", False, (1, np.inf), (False, False))
+        return [ibeta, im]
+
+    def _fitstart(self, data):
+        # Arbitrary, but the default m=1 is not valid
+        return super()._fitstart(data, args=(1, 1.5))
+
+    def _pdf(self, x, beta, m):
+        """
+        Return PDF of the crystalball function.
+
+                                            --
+                                           | exp(-x**2 / 2),  for x > -beta
+        crystalball.pdf(x, beta, m) =  N * |
+                                           | A * (B - x)**(-m), for x <= -beta
+                                            --
+        """
+        N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
+                   _norm_pdf_C * _norm_cdf(beta))
+
+        def rhs(x, beta, m):
+            return np.exp(-x**2 / 2)
+
+        def lhs(x, beta, m):
+            return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
+                    (m/beta - beta - x)**(-m))
+
+        return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
+
+    def _logpdf(self, x, beta, m):
+        """
+        Return the log of the PDF of the crystalball function.
+        """
+        N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
+                   _norm_pdf_C * _norm_cdf(beta))
+
+        def rhs(x, beta, m):
+            return -x**2/2
+
+        def lhs(x, beta, m):
+            return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
+
+        return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
+
+    def _cdf(self, x, beta, m):
+        """
+        Return CDF of the crystalball function
+        """
+        N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
+                   _norm_pdf_C * _norm_cdf(beta))
+
+        def rhs(x, beta, m):
+            return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
+                    _norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
+
+        def lhs(x, beta, m):
+            return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
+                    (m/beta - beta - x)**(-m+1) / (m-1))
+
+        return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
+
+    def _ppf(self, p, beta, m):
+        N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
+                   _norm_pdf_C * _norm_cdf(beta))
+        pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
+
+        def ppf_less(p, beta, m):
+            eb2 = np.exp(-beta**2/2)
+            C = (m/beta) * eb2 / (m-1)
+            N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
+            return (m/beta - beta -
+                    ((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
+
+        def ppf_greater(p, beta, m):
+            eb2 = np.exp(-beta**2/2)
+            C = (m/beta) * eb2 / (m-1)
+            N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
+            return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
+
+        return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
+
+    def _munp(self, n, beta, m):
+        """
+        Returns the n-th non-central moment of the crystalball function.
+        """
+        N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
+                   _norm_pdf_C * _norm_cdf(beta))
+
+        def n_th_moment(n, beta, m):
+            """
+            Returns n-th moment. Defined only if n+1 < m
+            Function cannot broadcast due to the loop over n
+            """
+            A = (m/beta)**m * np.exp(-beta**2 / 2.0)
+            B = m/beta - beta
+            rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
+                   (1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
+            lhs = np.zeros(rhs.shape)
+            for k in range(n + 1):
+                lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
+                        (m/beta)**(-m + k + 1))
+            return A * lhs + rhs
+
+        return N * _lazywhere(n + 1 < m, (n, beta, m),
+                              np.vectorize(n_th_moment, otypes=[np.float64]),
+                              np.inf)
+
+
+crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
+
+
+def _argus_phi(chi):
+    """
+    Utility function for the argus distribution used in the pdf, sf and
+    moment calculation.
+    Note that for all x > 0:
+    gammainc(1.5, x**2/2) = 2 * (_norm_cdf(x) - x * _norm_pdf(x) - 0.5).
+    This can be verified directly by noting that the cdf of Gamma(1.5) can
+    be written as erf(sqrt(x)) - 2*sqrt(x)*exp(-x)/sqrt(Pi).
+    We use gammainc instead of the usual definition because it is more precise
+    for small chi.
+    """
+    return sc.gammainc(1.5, chi**2/2) / 2
+
+
+class argus_gen(rv_continuous):
+    r"""
+    Argus distribution
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability density function for `argus` is:
+
+    .. math::
+
+        f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
+                     \exp(-\chi^2 (1 - x^2)/2)
+
+    for :math:`0 < x < 1` and :math:`\chi > 0`, where
+
+    .. math::
+
+        \Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
+
+    with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
+    normal distribution, respectively.
+
+    `argus` takes :math:`\chi` as shape a parameter.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "ARGUS distribution",
+           https://en.wikipedia.org/wiki/ARGUS_distribution
+
+    .. versionadded:: 0.19.0
+
+    %(example)s
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("chi", False, (0, np.inf), (False, False))]
+
+    def _logpdf(self, x, chi):
+        # for x = 0 or 1, logpdf returns -np.inf
+        with np.errstate(divide='ignore'):
+            y = 1.0 - x*x
+            A = 3*np.log(chi) - _norm_pdf_logC - np.log(_argus_phi(chi))
+            return A + np.log(x) + 0.5*np.log1p(-x*x) - chi**2 * y / 2
+
+    def _pdf(self, x, chi):
+        return np.exp(self._logpdf(x, chi))
+
+    def _cdf(self, x, chi):
+        return 1.0 - self._sf(x, chi)
+
+    def _sf(self, x, chi):
+        return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
+
+    def _rvs(self, chi, size=None, random_state=None):
+        chi = np.asarray(chi)
+        if chi.size == 1:
+            out = self._rvs_scalar(chi, numsamples=size,
+                                   random_state=random_state)
+        else:
+            shp, bc = _check_shape(chi.shape, size)
+            numsamples = int(np.prod(shp))
+            out = np.empty(size)
+            it = np.nditer([chi],
+                           flags=['multi_index'],
+                           op_flags=[['readonly']])
+            while not it.finished:
+                idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
+                            for j in range(-len(size), 0))
+                r = self._rvs_scalar(it[0], numsamples=numsamples,
+                                     random_state=random_state)
+                out[idx] = r.reshape(shp)
+                it.iternext()
+
+        if size == ():
+            out = out[()]
+        return out
+
+    def _rvs_scalar(self, chi, numsamples=None, random_state=None):
+        # if chi <= 1.8:
+        # use rejection method, see Devroye:
+        # Non-Uniform Random Variate Generation, 1986, section II.3.2.
+        # write: PDF f(x) = c * g(x) * h(x), where
+        # h is [0,1]-valued and g is a density
+        # we use two ways to write f
+        #
+        # Case 1:
+        # write g(x) = 3*x*sqrt(1-x**2), h(x) = exp(-chi**2 (1-x**2) / 2)
+        # If X has a distribution with density g its ppf G_inv is given by:
+        # G_inv(u) = np.sqrt(1 - u**(2/3))
+        #
+        # Case 2:
+        # g(x) = chi**2 * x * exp(-chi**2 * (1-x**2)/2) / (1 - exp(-chi**2 /2))
+        # h(x) = sqrt(1 - x**2), 0 <= x <= 1
+        # one can show that
+        # G_inv(u) = np.sqrt(2*np.log(u*(np.exp(chi**2/2)-1)+1))/chi
+        #          = np.sqrt(1 + 2*np.log(np.exp(-chi**2/2)*(1-u)+u)/chi**2)
+        # the latter expression is used for precision with small chi
+        #
+        # In both cases, the inverse cdf of g can be written analytically, and
+        # we can apply the rejection method:
+        #
+        # REPEAT
+        #    Generate U uniformly distributed on [0, 1]
+        #    Generate X with density g (e.g. via inverse transform sampling:
+        #    X = G_inv(V) with V uniformly distributed on [0, 1])
+        # UNTIL X <= h(X)
+        # RETURN X
+        #
+        # We use case 1 for chi <= 0.5 as it maintains precision for small chi
+        # and case 2 for 0.5 < chi <= 1.8 due to its speed for moderate chi.
+        #
+        # if chi > 1.8:
+        # use relation to the Gamma distribution: if X is ARGUS with parameter
+        # chi), then Y = chi**2 * (1 - X**2) / 2 has density proportional to
+        # sqrt(u) * exp(-u) on [0, chi**2 / 2], i.e. a Gamma(3/2) distribution
+        # conditioned on [0, chi**2 / 2]). Therefore, to sample X from the
+        # ARGUS distribution, we sample Y from the gamma distribution, keeping
+        # only samples on [0, chi**2 / 2], and apply the inverse
+        # transformation X = (1 - 2*Y/chi**2)**(1/2). Since we only
+        # look at chi > 1.8, gamma(1.5).cdf(chi**2/2) is large enough such
+        # Y falls in the inteval [0, chi**2 / 2] with a high probability:
+        # stats.gamma(1.5).cdf(1.8**2/2) = 0.644...
+        #
+        # The points to switch between the different methods are determined
+        # by a comparison of the runtime of the different methods. However,
+        # the runtime is platform-dependent. The implemented values should
+        # ensure a good overall performance and are supported by an analysis
+        # of the rejection constants of different methods.
+
+        size1d = tuple(np.atleast_1d(numsamples))
+        N = int(np.prod(size1d))
+        x = np.zeros(N)
+        simulated = 0
+        chi2 = chi * chi
+        if chi <= 0.5:
+            d = -chi2 / 2
+            while simulated < N:
+                k = N - simulated
+                u = random_state.uniform(size=k)
+                v = random_state.uniform(size=k)
+                z = v**(2/3)
+                # acceptance condition: u <= h(G_inv(v)). This simplifies to
+                accept = (np.log(u) <= d * z)
+                num_accept = np.sum(accept)
+                if num_accept > 0:
+                    # we still need to transform z=v**(2/3) to X = G_inv(v)
+                    rvs = np.sqrt(1 - z[accept])
+                    x[simulated:(simulated + num_accept)] = rvs
+                    simulated += num_accept
+        elif chi <= 1.8:
+            echi = np.exp(-chi2 / 2)
+            while simulated < N:
+                k = N - simulated
+                u = random_state.uniform(size=k)
+                v = random_state.uniform(size=k)
+                z = 2 * np.log(echi * (1 - v) + v) / chi2
+                # as in case one, simplify u <= h(G_inv(v)) and then transform
+                # z to the target distribution X = G_inv(v)
+                accept = (u**2 + z <= 0)
+                num_accept = np.sum(accept)
+                if num_accept > 0:
+                    rvs = np.sqrt(1 + z[accept])
+                    x[simulated:(simulated + num_accept)] = rvs
+                    simulated += num_accept
+        else:
+            # conditional Gamma for chi > 1.8
+            while simulated < N:
+                k = N - simulated
+                g = random_state.standard_gamma(1.5, size=k)
+                accept = (g <= chi2 / 2)
+                num_accept = np.sum(accept)
+                if num_accept > 0:
+                    x[simulated:(simulated + num_accept)] = g[accept]
+                    simulated += num_accept
+            x = np.sqrt(1 - 2 * x / chi2)
+
+        return np.reshape(x, size1d)
+
+    def _stats(self, chi):
+        # need to ensure that dtype is float
+        # otherwise the mask below does not work for integers
+        chi = np.asarray(chi, dtype=float)
+        phi = _argus_phi(chi)
+        m = np.sqrt(np.pi/8) * chi * sc.ive(1, chi**2/4) / phi
+        # compute second moment, use Taylor expansion for small chi (<= 0.1)
+        mu2 = np.empty_like(chi)
+        mask = chi > 0.1
+        c = chi[mask]
+        mu2[mask] = 1 - 3 / c**2 + c * _norm_pdf(c) / phi[mask]
+        c = chi[~mask]
+        coef = [-358/65690625, 0, -94/1010625, 0, 2/2625, 0, 6/175, 0, 0.4]
+        mu2[~mask] = np.polyval(coef, c)
+        return m, mu2 - m**2, None, None
+
+
+argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
+
+
+class rv_histogram(rv_continuous):
+    """
+    Generates a distribution given by a histogram.
+    This is useful to generate a template distribution from a binned
+    datasample.
+
+    As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
+    a collection of generic methods (see `rv_continuous` for the full list),
+    and implements them based on the properties of the provided binned
+    datasample.
+
+    Parameters
+    ----------
+    histogram : tuple of array_like
+        Tuple containing two array_like objects.
+        The first containing the content of n bins,
+        the second containing the (n+1) bin boundaries.
+        In particular, the return value of `numpy.histogram` is accepted.
+
+    density : bool, optional
+        If False, assumes the histogram is proportional to counts per bin;
+        otherwise, assumes it is proportional to a density.
+        For constant bin widths, these are equivalent, but the distinction
+        is important when bin widths vary (see Notes).
+        If None (default), sets ``density=True`` for backwards compatibility,
+        but warns if the bin widths are variable. Set `density` explicitly
+        to silence the warning.
+
+        .. versionadded:: 1.10.0
+
+    Notes
+    -----
+    When a histogram has unequal bin widths, there is a distinction between
+    histograms that are proportional to counts per bin and histograms that are
+    proportional to probability density over a bin. If `numpy.histogram` is
+    called with its default ``density=False``, the resulting histogram is the
+    number of counts per bin, so ``density=False`` should be passed to
+    `rv_histogram`. If `numpy.histogram` is called with ``density=True``, the
+    resulting histogram is in terms of probability density, so ``density=True``
+    should be passed to `rv_histogram`. To avoid warnings, always pass
+    ``density`` explicitly when the input histogram has unequal bin widths.
+
+    There are no additional shape parameters except for the loc and scale.
+    The pdf is defined as a stepwise function from the provided histogram.
+    The cdf is a linear interpolation of the pdf.
+
+    .. versionadded:: 0.19.0
+
+    Examples
+    --------
+
+    Create a scipy.stats distribution from a numpy histogram
+
+    >>> import scipy.stats
+    >>> import numpy as np
+    >>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
+    >>> hist = np.histogram(data, bins=100)
+    >>> hist_dist = scipy.stats.rv_histogram(hist, density=False)
+
+    Behaves like an ordinary scipy rv_continuous distribution
+
+    >>> hist_dist.pdf(1.0)
+    0.20538577847618705
+    >>> hist_dist.cdf(2.0)
+    0.90818568543056499
+
+    PDF is zero above (below) the highest (lowest) bin of the histogram,
+    defined by the max (min) of the original dataset
+
+    >>> hist_dist.pdf(np.max(data))
+    0.0
+    >>> hist_dist.cdf(np.max(data))
+    1.0
+    >>> hist_dist.pdf(np.min(data))
+    7.7591907244498314e-05
+    >>> hist_dist.cdf(np.min(data))
+    0.0
+
+    PDF and CDF follow the histogram
+
+    >>> import matplotlib.pyplot as plt
+    >>> X = np.linspace(-5.0, 5.0, 100)
+    >>> fig, ax = plt.subplots()
+    >>> ax.set_title("PDF from Template")
+    >>> ax.hist(data, density=True, bins=100)
+    >>> ax.plot(X, hist_dist.pdf(X), label='PDF')
+    >>> ax.plot(X, hist_dist.cdf(X), label='CDF')
+    >>> ax.legend()
+    >>> fig.show()
+
+    """
+    _support_mask = rv_continuous._support_mask
+
+    def __init__(self, histogram, *args, density=None, **kwargs):
+        """
+        Create a new distribution using the given histogram
+
+        Parameters
+        ----------
+        histogram : tuple of array_like
+            Tuple containing two array_like objects.
+            The first containing the content of n bins,
+            the second containing the (n+1) bin boundaries.
+            In particular, the return value of np.histogram is accepted.
+        density : bool, optional
+            If False, assumes the histogram is proportional to counts per bin;
+            otherwise, assumes it is proportional to a density.
+            For constant bin widths, these are equivalent.
+            If None (default), sets ``density=True`` for backward
+            compatibility, but warns if the bin widths are variable. Set
+            `density` explicitly to silence the warning.
+        """
+        self._histogram = histogram
+        self._density = density
+        if len(histogram) != 2:
+            raise ValueError("Expected length 2 for parameter histogram")
+        self._hpdf = np.asarray(histogram[0])
+        self._hbins = np.asarray(histogram[1])
+        if len(self._hpdf) + 1 != len(self._hbins):
+            raise ValueError("Number of elements in histogram content "
+                             "and histogram boundaries do not match, "
+                             "expected n and n+1.")
+        self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
+        bins_vary = not np.allclose(self._hbin_widths, self._hbin_widths[0])
+        if density is None and bins_vary:
+            message = ("Bin widths are not constant. Assuming `density=True`."
+                       "Specify `density` explicitly to silence this warning.")
+            warnings.warn(message, RuntimeWarning, stacklevel=2)
+            density = True
+        elif not density:
+            self._hpdf = self._hpdf / self._hbin_widths
+
+        self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
+        self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
+        self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
+        self._hcdf = np.hstack([0.0, self._hcdf])
+        # Set support
+        kwargs['a'] = self.a = self._hbins[0]
+        kwargs['b'] = self.b = self._hbins[-1]
+        super().__init__(*args, **kwargs)
+
+    def _pdf(self, x):
+        """
+        PDF of the histogram
+        """
+        return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
+
+    def _cdf(self, x):
+        """
+        CDF calculated from the histogram
+        """
+        return np.interp(x, self._hbins, self._hcdf)
+
+    def _ppf(self, x):
+        """
+        Percentile function calculated from the histogram
+        """
+        return np.interp(x, self._hcdf, self._hbins)
+
+    def _munp(self, n):
+        """Compute the n-th non-central moment."""
+        integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
+        return np.sum(self._hpdf[1:-1] * integrals)
+
+    def _entropy(self):
+        """Compute entropy of distribution"""
+        res = _lazywhere(self._hpdf[1:-1] > 0.0,
+                         (self._hpdf[1:-1],),
+                         np.log,
+                         0.0)
+        return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
+
+    def _updated_ctor_param(self):
+        """
+        Set the histogram as additional constructor argument
+        """
+        dct = super()._updated_ctor_param()
+        dct['histogram'] = self._histogram
+        dct['density'] = self._density
+        return dct
+
+
+class studentized_range_gen(rv_continuous):
+    r"""A studentized range continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    t: Student's t distribution
+
+    Notes
+    -----
+    The probability density function for `studentized_range` is:
+
+    .. math::
+
+         f(x; k, \nu) = \frac{k(k-1)\nu^{\nu/2}}{\Gamma(\nu/2)
+                        2^{\nu/2-1}} \int_{0}^{\infty} \int_{-\infty}^{\infty}
+                        s^{\nu} e^{-\nu s^2/2} \phi(z) \phi(sx + z)
+                        [\Phi(sx + z) - \Phi(z)]^{k-2} \,dz \,ds
+
+    for :math:`x ≥ 0`, :math:`k > 1`, and :math:`\nu > 0`.
+
+    `studentized_range` takes ``k`` for :math:`k` and ``df`` for :math:`\nu`
+    as shape parameters.
+
+    When :math:`\nu` exceeds 100,000, an asymptotic approximation (infinite
+    degrees of freedom) is used to compute the cumulative distribution
+    function [4]_ and probability distribution function.
+
+    %(after_notes)s
+
+    References
+    ----------
+
+    .. [1] "Studentized range distribution",
+           https://en.wikipedia.org/wiki/Studentized_range_distribution
+    .. [2] Batista, Ben Dêivide, et al. "Externally Studentized Normal Midrange
+           Distribution." Ciência e Agrotecnologia, vol. 41, no. 4, 2017, pp.
+           378-389., doi:10.1590/1413-70542017414047716.
+    .. [3] Harter, H. Leon. "Tables of Range and Studentized Range." The Annals
+           of Mathematical Statistics, vol. 31, no. 4, 1960, pp. 1122-1147.
+           JSTOR, www.jstor.org/stable/2237810. Accessed 18 Feb. 2021.
+    .. [4] Lund, R. E., and J. R. Lund. "Algorithm AS 190: Probabilities and
+           Upper Quantiles for the Studentized Range." Journal of the Royal
+           Statistical Society. Series C (Applied Statistics), vol. 32, no. 2,
+           1983, pp. 204-210. JSTOR, www.jstor.org/stable/2347300. Accessed 18
+           Feb. 2021.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import studentized_range
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots(1, 1)
+
+    Calculate the first four moments:
+
+    >>> k, df = 3, 10
+    >>> mean, var, skew, kurt = studentized_range.stats(k, df, moments='mvsk')
+
+    Display the probability density function (``pdf``):
+
+    >>> x = np.linspace(studentized_range.ppf(0.01, k, df),
+    ...                 studentized_range.ppf(0.99, k, df), 100)
+    >>> ax.plot(x, studentized_range.pdf(x, k, df),
+    ...         'r-', lw=5, alpha=0.6, label='studentized_range pdf')
+
+    Alternatively, the distribution object can be called (as a function)
+    to fix the shape, location and scale parameters. This returns a "frozen"
+    RV object holding the given parameters fixed.
+
+    Freeze the distribution and display the frozen ``pdf``:
+
+    >>> rv = studentized_range(k, df)
+    >>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
+
+    Check accuracy of ``cdf`` and ``ppf``:
+
+    >>> vals = studentized_range.ppf([0.001, 0.5, 0.999], k, df)
+    >>> np.allclose([0.001, 0.5, 0.999], studentized_range.cdf(vals, k, df))
+    True
+
+    Rather than using (``studentized_range.rvs``) to generate random variates,
+    which is very slow for this distribution, we can approximate the inverse
+    CDF using an interpolator, and then perform inverse transform sampling
+    with this approximate inverse CDF.
+
+    This distribution has an infinite but thin right tail, so we focus our
+    attention on the leftmost 99.9 percent.
+
+    >>> a, b = studentized_range.ppf([0, .999], k, df)
+    >>> a, b
+    0, 7.41058083802274
+
+    >>> from scipy.interpolate import interp1d
+    >>> rng = np.random.default_rng()
+    >>> xs = np.linspace(a, b, 50)
+    >>> cdf = studentized_range.cdf(xs, k, df)
+    # Create an interpolant of the inverse CDF
+    >>> ppf = interp1d(cdf, xs, fill_value='extrapolate')
+    # Perform inverse transform sampling using the interpolant
+    >>> r = ppf(rng.uniform(size=1000))
+
+    And compare the histogram:
+
+    >>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
+    >>> ax.legend(loc='best', frameon=False)
+    >>> plt.show()
+
+    """
+
+    def _argcheck(self, k, df):
+        return (k > 1) & (df > 0)
+
+    def _shape_info(self):
+        ik = _ShapeInfo("k", False, (1, np.inf), (False, False))
+        idf = _ShapeInfo("df", False, (0, np.inf), (False, False))
+        return [ik, idf]
+
+    def _fitstart(self, data):
+        # Default is k=1, but that is not a valid value of the parameter.
+        return super(studentized_range_gen, self)._fitstart(data, args=(2, 1))
+
+    def _munp(self, K, k, df):
+        cython_symbol = '_studentized_range_moment'
+        _a, _b = self._get_support()
+        # all three of these are used to create a numpy array so they must
+        # be the same shape.
+
+        def _single_moment(K, k, df):
+            log_const = _stats._studentized_range_pdf_logconst(k, df)
+            arg = [K, k, df, log_const]
+            usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
+
+            llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
+
+            ranges = [(-np.inf, np.inf), (0, np.inf), (_a, _b)]
+            opts = dict(epsabs=1e-11, epsrel=1e-12)
+
+            return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
+
+        ufunc = np.frompyfunc(_single_moment, 3, 1)
+        return np.float64(ufunc(K, k, df))
+
+    def _pdf(self, x, k, df):
+
+        def _single_pdf(q, k, df):
+            # The infinite form of the PDF is derived from the infinite
+            # CDF.
+            if df < 100000:
+                cython_symbol = '_studentized_range_pdf'
+                log_const = _stats._studentized_range_pdf_logconst(k, df)
+                arg = [q, k, df, log_const]
+                usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
+                ranges = [(-np.inf, np.inf), (0, np.inf)]
+
+            else:
+                cython_symbol = '_studentized_range_pdf_asymptotic'
+                arg = [q, k]
+                usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
+                ranges = [(-np.inf, np.inf)]
+
+            llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
+            opts = dict(epsabs=1e-11, epsrel=1e-12)
+            return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
+
+        ufunc = np.frompyfunc(_single_pdf, 3, 1)
+        return np.float64(ufunc(x, k, df))
+
+    def _cdf(self, x, k, df):
+
+        def _single_cdf(q, k, df):
+            # "When the degrees of freedom V are infinite the probability
+            # integral takes [on a] simpler form," and a single asymptotic
+            # integral is evaluated rather than the standard double integral.
+            # (Lund, Lund, page 205)
+            if df < 100000:
+                cython_symbol = '_studentized_range_cdf'
+                log_const = _stats._studentized_range_cdf_logconst(k, df)
+                arg = [q, k, df, log_const]
+                usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
+                ranges = [(-np.inf, np.inf), (0, np.inf)]
+
+            else:
+                cython_symbol = '_studentized_range_cdf_asymptotic'
+                arg = [q, k]
+                usr_data = np.array(arg, float).ctypes.data_as(ctypes.c_void_p)
+                ranges = [(-np.inf, np.inf)]
+
+            llc = LowLevelCallable.from_cython(_stats, cython_symbol, usr_data)
+            opts = dict(epsabs=1e-11, epsrel=1e-12)
+            return integrate.nquad(llc, ranges=ranges, opts=opts)[0]
+
+        ufunc = np.frompyfunc(_single_cdf, 3, 1)
+
+        # clip p-values to ensure they are in [0, 1].
+        return np.clip(np.float64(ufunc(x, k, df)), 0, 1)
+
+
+studentized_range = studentized_range_gen(name='studentized_range', a=0,
+                                          b=np.inf)
+
+
+# Collect names of classes and objects in this module.
+pairs = list(globals().copy().items())
+_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
+
+__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_covariance.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_covariance.py
new file mode 100644
index 00000000..6a605072
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_covariance.py
@@ -0,0 +1,629 @@
+from functools import cached_property
+
+import numpy as np
+from scipy import linalg
+from scipy.stats import _multivariate
+
+
+__all__ = ["Covariance"]
+
+
+class Covariance:
+    """
+    Representation of a covariance matrix
+
+    Calculations involving covariance matrices (e.g. data whitening,
+    multivariate normal function evaluation) are often performed more
+    efficiently using a decomposition of the covariance matrix instead of the
+    covariance metrix itself. This class allows the user to construct an
+    object representing a covariance matrix using any of several
+    decompositions and perform calculations using a common interface.
+
+    .. note::
+
+        The `Covariance` class cannot be instantiated directly. Instead, use
+        one of the factory methods (e.g. `Covariance.from_diagonal`).
+
+    Examples
+    --------
+    The `Covariance` class is is used by calling one of its
+    factory methods to create a `Covariance` object, then pass that
+    representation of the `Covariance` matrix as a shape parameter of a
+    multivariate distribution.
+
+    For instance, the multivariate normal distribution can accept an array
+    representing a covariance matrix:
+
+    >>> from scipy import stats
+    >>> import numpy as np
+    >>> d = [1, 2, 3]
+    >>> A = np.diag(d)  # a diagonal covariance matrix
+    >>> x = [4, -2, 5]  # a point of interest
+    >>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=A)
+    >>> dist.pdf(x)
+    4.9595685102808205e-08
+
+    but the calculations are performed in a very generic way that does not
+    take advantage of any special properties of the covariance matrix. Because
+    our covariance matrix is diagonal, we can use ``Covariance.from_diagonal``
+    to create an object representing the covariance matrix, and
+    `multivariate_normal` can use this to compute the probability density
+    function more efficiently.
+
+    >>> cov = stats.Covariance.from_diagonal(d)
+    >>> dist = stats.multivariate_normal(mean=[0, 0, 0], cov=cov)
+    >>> dist.pdf(x)
+    4.9595685102808205e-08
+
+    """
+    def __init__(self):
+        message = ("The `Covariance` class cannot be instantiated directly. "
+                   "Please use one of the factory methods "
+                   "(e.g. `Covariance.from_diagonal`).")
+        raise NotImplementedError(message)
+
+    @staticmethod
+    def from_diagonal(diagonal):
+        r"""
+        Return a representation of a covariance matrix from its diagonal.
+
+        Parameters
+        ----------
+        diagonal : array_like
+            The diagonal elements of a diagonal matrix.
+
+        Notes
+        -----
+        Let the diagonal elements of a diagonal covariance matrix :math:`D` be
+        stored in the vector :math:`d`.
+
+        When all elements of :math:`d` are strictly positive, whitening of a
+        data point :math:`x` is performed by computing
+        :math:`x \cdot d^{-1/2}`, where the inverse square root can be taken
+        element-wise.
+        :math:`\log\det{D}` is calculated as :math:`-2 \sum(\log{d})`,
+        where the :math:`\log` operation is performed element-wise.
+
+        This `Covariance` class supports singular covariance matrices. When
+        computing ``_log_pdet``, non-positive elements of :math:`d` are
+        ignored. Whitening is not well defined when the point to be whitened
+        does not lie in the span of the columns of the covariance matrix. The
+        convention taken here is to treat the inverse square root of
+        non-positive elements of :math:`d` as zeros.
+
+        Examples
+        --------
+        Prepare a symmetric positive definite covariance matrix ``A`` and a
+        data point ``x``.
+
+        >>> import numpy as np
+        >>> from scipy import stats
+        >>> rng = np.random.default_rng()
+        >>> n = 5
+        >>> A = np.diag(rng.random(n))
+        >>> x = rng.random(size=n)
+
+        Extract the diagonal from ``A`` and create the `Covariance` object.
+
+        >>> d = np.diag(A)
+        >>> cov = stats.Covariance.from_diagonal(d)
+
+        Compare the functionality of the `Covariance` object against a
+        reference implementations.
+
+        >>> res = cov.whiten(x)
+        >>> ref = np.diag(d**-0.5) @ x
+        >>> np.allclose(res, ref)
+        True
+        >>> res = cov.log_pdet
+        >>> ref = np.linalg.slogdet(A)[-1]
+        >>> np.allclose(res, ref)
+        True
+
+        """
+        return CovViaDiagonal(diagonal)
+
+    @staticmethod
+    def from_precision(precision, covariance=None):
+        r"""
+        Return a representation of a covariance from its precision matrix.
+
+        Parameters
+        ----------
+        precision : array_like
+            The precision matrix; that is, the inverse of a square, symmetric,
+            positive definite covariance matrix.
+        covariance : array_like, optional
+            The square, symmetric, positive definite covariance matrix. If not
+            provided, this may need to be calculated (e.g. to evaluate the
+            cumulative distribution function of
+            `scipy.stats.multivariate_normal`) by inverting `precision`.
+
+        Notes
+        -----
+        Let the covariance matrix be :math:`A`, its precision matrix be
+        :math:`P = A^{-1}`, and :math:`L` be the lower Cholesky factor such
+        that :math:`L L^T = P`.
+        Whitening of a data point :math:`x` is performed by computing
+        :math:`x^T L`. :math:`\log\det{A}` is calculated as
+        :math:`-2tr(\log{L})`, where the :math:`\log` operation is performed
+        element-wise.
+
+        This `Covariance` class does not support singular covariance matrices
+        because the precision matrix does not exist for a singular covariance
+        matrix.
+
+        Examples
+        --------
+        Prepare a symmetric positive definite precision matrix ``P`` and a
+        data point ``x``. (If the precision matrix is not already available,
+        consider the other factory methods of the `Covariance` class.)
+
+        >>> import numpy as np
+        >>> from scipy import stats
+        >>> rng = np.random.default_rng()
+        >>> n = 5
+        >>> P = rng.random(size=(n, n))
+        >>> P = P @ P.T  # a precision matrix must be positive definite
+        >>> x = rng.random(size=n)
+
+        Create the `Covariance` object.
+
+        >>> cov = stats.Covariance.from_precision(P)
+
+        Compare the functionality of the `Covariance` object against
+        reference implementations.
+
+        >>> res = cov.whiten(x)
+        >>> ref = x @ np.linalg.cholesky(P)
+        >>> np.allclose(res, ref)
+        True
+        >>> res = cov.log_pdet
+        >>> ref = -np.linalg.slogdet(P)[-1]
+        >>> np.allclose(res, ref)
+        True
+
+        """
+        return CovViaPrecision(precision, covariance)
+
+    @staticmethod
+    def from_cholesky(cholesky):
+        r"""
+        Representation of a covariance provided via the (lower) Cholesky factor
+
+        Parameters
+        ----------
+        cholesky : array_like
+            The lower triangular Cholesky factor of the covariance matrix.
+
+        Notes
+        -----
+        Let the covariance matrix be :math:`A`and :math:`L` be the lower
+        Cholesky factor such that :math:`L L^T = A`.
+        Whitening of a data point :math:`x` is performed by computing
+        :math:`L^{-1} x`. :math:`\log\det{A}` is calculated as
+        :math:`2tr(\log{L})`, where the :math:`\log` operation is performed
+        element-wise.
+
+        This `Covariance` class does not support singular covariance matrices
+        because the Cholesky decomposition does not exist for a singular
+        covariance matrix.
+
+        Examples
+        --------
+        Prepare a symmetric positive definite covariance matrix ``A`` and a
+        data point ``x``.
+
+        >>> import numpy as np
+        >>> from scipy import stats
+        >>> rng = np.random.default_rng()
+        >>> n = 5
+        >>> A = rng.random(size=(n, n))
+        >>> A = A @ A.T  # make the covariance symmetric positive definite
+        >>> x = rng.random(size=n)
+
+        Perform the Cholesky decomposition of ``A`` and create the
+        `Covariance` object.
+
+        >>> L = np.linalg.cholesky(A)
+        >>> cov = stats.Covariance.from_cholesky(L)
+
+        Compare the functionality of the `Covariance` object against
+        reference implementation.
+
+        >>> from scipy.linalg import solve_triangular
+        >>> res = cov.whiten(x)
+        >>> ref = solve_triangular(L, x, lower=True)
+        >>> np.allclose(res, ref)
+        True
+        >>> res = cov.log_pdet
+        >>> ref = np.linalg.slogdet(A)[-1]
+        >>> np.allclose(res, ref)
+        True
+
+        """
+        return CovViaCholesky(cholesky)
+
+    @staticmethod
+    def from_eigendecomposition(eigendecomposition):
+        r"""
+        Representation of a covariance provided via eigendecomposition
+
+        Parameters
+        ----------
+        eigendecomposition : sequence
+            A sequence (nominally a tuple) containing the eigenvalue and
+            eigenvector arrays as computed by `scipy.linalg.eigh` or
+            `numpy.linalg.eigh`.
+
+        Notes
+        -----
+        Let the covariance matrix be :math:`A`, let :math:`V` be matrix of
+        eigenvectors, and let :math:`W` be the diagonal matrix of eigenvalues
+        such that `V W V^T = A`.
+
+        When all of the eigenvalues are strictly positive, whitening of a
+        data point :math:`x` is performed by computing
+        :math:`x^T (V W^{-1/2})`, where the inverse square root can be taken
+        element-wise.
+        :math:`\log\det{A}` is calculated as  :math:`tr(\log{W})`,
+        where the :math:`\log` operation is performed element-wise.
+
+        This `Covariance` class supports singular covariance matrices. When
+        computing ``_log_pdet``, non-positive eigenvalues are ignored.
+        Whitening is not well defined when the point to be whitened
+        does not lie in the span of the columns of the covariance matrix. The
+        convention taken here is to treat the inverse square root of
+        non-positive eigenvalues as zeros.
+
+        Examples
+        --------
+        Prepare a symmetric positive definite covariance matrix ``A`` and a
+        data point ``x``.
+
+        >>> import numpy as np
+        >>> from scipy import stats
+        >>> rng = np.random.default_rng()
+        >>> n = 5
+        >>> A = rng.random(size=(n, n))
+        >>> A = A @ A.T  # make the covariance symmetric positive definite
+        >>> x = rng.random(size=n)
+
+        Perform the eigendecomposition of ``A`` and create the `Covariance`
+        object.
+
+        >>> w, v = np.linalg.eigh(A)
+        >>> cov = stats.Covariance.from_eigendecomposition((w, v))
+
+        Compare the functionality of the `Covariance` object against
+        reference implementations.
+
+        >>> res = cov.whiten(x)
+        >>> ref = x @ (v @ np.diag(w**-0.5))
+        >>> np.allclose(res, ref)
+        True
+        >>> res = cov.log_pdet
+        >>> ref = np.linalg.slogdet(A)[-1]
+        >>> np.allclose(res, ref)
+        True
+
+        """
+        return CovViaEigendecomposition(eigendecomposition)
+
+    def whiten(self, x):
+        """
+        Perform a whitening transformation on data.
+
+        "Whitening" ("white" as in "white noise", in which each frequency has
+        equal magnitude) transforms a set of random variables into a new set of
+        random variables with unit-diagonal covariance. When a whitening
+        transform is applied to a sample of points distributed according to
+        a multivariate normal distribution with zero mean, the covariance of
+        the transformed sample is approximately the identity matrix.
+
+        Parameters
+        ----------
+        x : array_like
+            An array of points. The last dimension must correspond with the
+            dimensionality of the space, i.e., the number of columns in the
+            covariance matrix.
+
+        Returns
+        -------
+        x_ : array_like
+            The transformed array of points.
+
+        References
+        ----------
+        .. [1] "Whitening Transformation". Wikipedia.
+               https://en.wikipedia.org/wiki/Whitening_transformation
+        .. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
+               coloring linear transformation". Transactions of VSB 18.2
+               (2018): 31-35. :doi:`10.31490/tces-2018-0013`
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy import stats
+        >>> rng = np.random.default_rng()
+        >>> n = 3
+        >>> A = rng.random(size=(n, n))
+        >>> cov_array = A @ A.T  # make matrix symmetric positive definite
+        >>> precision = np.linalg.inv(cov_array)
+        >>> cov_object = stats.Covariance.from_precision(precision)
+        >>> x = rng.multivariate_normal(np.zeros(n), cov_array, size=(10000))
+        >>> x_ = cov_object.whiten(x)
+        >>> np.cov(x_, rowvar=False)  # near-identity covariance
+        array([[0.97862122, 0.00893147, 0.02430451],
+               [0.00893147, 0.96719062, 0.02201312],
+               [0.02430451, 0.02201312, 0.99206881]])
+
+        """
+        return self._whiten(np.asarray(x))
+
+    def colorize(self, x):
+        """
+        Perform a colorizing transformation on data.
+
+        "Colorizing" ("color" as in "colored noise", in which different
+        frequencies may have different magnitudes) transforms a set of
+        uncorrelated random variables into a new set of random variables with
+        the desired covariance. When a coloring transform is applied to a
+        sample of points distributed according to a multivariate normal
+        distribution with identity covariance and zero mean, the covariance of
+        the transformed sample is approximately the covariance matrix used
+        in the coloring transform.
+
+        Parameters
+        ----------
+        x : array_like
+            An array of points. The last dimension must correspond with the
+            dimensionality of the space, i.e., the number of columns in the
+            covariance matrix.
+
+        Returns
+        -------
+        x_ : array_like
+            The transformed array of points.
+
+        References
+        ----------
+        .. [1] "Whitening Transformation". Wikipedia.
+               https://en.wikipedia.org/wiki/Whitening_transformation
+        .. [2] Novak, Lukas, and Miroslav Vorechovsky. "Generalization of
+               coloring linear transformation". Transactions of VSB 18.2
+               (2018): 31-35. :doi:`10.31490/tces-2018-0013`
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> from scipy import stats
+        >>> rng = np.random.default_rng(1638083107694713882823079058616272161)
+        >>> n = 3
+        >>> A = rng.random(size=(n, n))
+        >>> cov_array = A @ A.T  # make matrix symmetric positive definite
+        >>> cholesky = np.linalg.cholesky(cov_array)
+        >>> cov_object = stats.Covariance.from_cholesky(cholesky)
+        >>> x = rng.multivariate_normal(np.zeros(n), np.eye(n), size=(10000))
+        >>> x_ = cov_object.colorize(x)
+        >>> cov_data = np.cov(x_, rowvar=False)
+        >>> np.allclose(cov_data, cov_array, rtol=3e-2)
+        True
+        """
+        return self._colorize(np.asarray(x))
+
+    @property
+    def log_pdet(self):
+        """
+        Log of the pseudo-determinant of the covariance matrix
+        """
+        return np.array(self._log_pdet, dtype=float)[()]
+
+    @property
+    def rank(self):
+        """
+        Rank of the covariance matrix
+        """
+        return np.array(self._rank, dtype=int)[()]
+
+    @property
+    def covariance(self):
+        """
+        Explicit representation of the covariance matrix
+        """
+        return self._covariance
+
+    @property
+    def shape(self):
+        """
+        Shape of the covariance array
+        """
+        return self._shape
+
+    def _validate_matrix(self, A, name):
+        A = np.atleast_2d(A)
+        m, n = A.shape[-2:]
+        if m != n or A.ndim != 2 or not (np.issubdtype(A.dtype, np.integer) or
+                                         np.issubdtype(A.dtype, np.floating)):
+            message = (f"The input `{name}` must be a square, "
+                       "two-dimensional array of real numbers.")
+            raise ValueError(message)
+        return A
+
+    def _validate_vector(self, A, name):
+        A = np.atleast_1d(A)
+        if A.ndim != 1 or not (np.issubdtype(A.dtype, np.integer) or
+                               np.issubdtype(A.dtype, np.floating)):
+            message = (f"The input `{name}` must be a one-dimensional array "
+                       "of real numbers.")
+            raise ValueError(message)
+        return A
+
+
+class CovViaPrecision(Covariance):
+
+    def __init__(self, precision, covariance=None):
+        precision = self._validate_matrix(precision, 'precision')
+        if covariance is not None:
+            covariance = self._validate_matrix(covariance, 'covariance')
+            message = "`precision.shape` must equal `covariance.shape`."
+            if precision.shape != covariance.shape:
+                raise ValueError(message)
+
+        self._chol_P = np.linalg.cholesky(precision)
+        self._log_pdet = -2*np.log(np.diag(self._chol_P)).sum(axis=-1)
+        self._rank = precision.shape[-1]  # must be full rank if invertible
+        self._precision = precision
+        self._cov_matrix = covariance
+        self._shape = precision.shape
+        self._allow_singular = False
+
+    def _whiten(self, x):
+        return x @ self._chol_P
+
+    @cached_property
+    def _covariance(self):
+        n = self._shape[-1]
+        return (linalg.cho_solve((self._chol_P, True), np.eye(n))
+                if self._cov_matrix is None else self._cov_matrix)
+
+    def _colorize(self, x):
+        return linalg.solve_triangular(self._chol_P.T, x.T, lower=False).T
+
+
+def _dot_diag(x, d):
+    # If d were a full diagonal matrix, x @ d would always do what we want.
+    # Special treatment is needed for n-dimensional `d` in which each row
+    # includes only the diagonal elements of a covariance matrix.
+    return x * d if x.ndim < 2 else x * np.expand_dims(d, -2)
+
+
+class CovViaDiagonal(Covariance):
+
+    def __init__(self, diagonal):
+        diagonal = self._validate_vector(diagonal, 'diagonal')
+
+        i_zero = diagonal <= 0
+        positive_diagonal = np.array(diagonal, dtype=np.float64)
+
+        positive_diagonal[i_zero] = 1  # ones don't affect determinant
+        self._log_pdet = np.sum(np.log(positive_diagonal), axis=-1)
+
+        psuedo_reciprocals = 1 / np.sqrt(positive_diagonal)
+        psuedo_reciprocals[i_zero] = 0
+
+        self._sqrt_diagonal = np.sqrt(diagonal)
+        self._LP = psuedo_reciprocals
+        self._rank = positive_diagonal.shape[-1] - i_zero.sum(axis=-1)
+        self._covariance = np.apply_along_axis(np.diag, -1, diagonal)
+        self._i_zero = i_zero
+        self._shape = self._covariance.shape
+        self._allow_singular = True
+
+    def _whiten(self, x):
+        return _dot_diag(x, self._LP)
+
+    def _colorize(self, x):
+        return _dot_diag(x, self._sqrt_diagonal)
+
+    def _support_mask(self, x):
+        """
+        Check whether x lies in the support of the distribution.
+        """
+        return ~np.any(_dot_diag(x, self._i_zero), axis=-1)
+
+
+class CovViaCholesky(Covariance):
+
+    def __init__(self, cholesky):
+        L = self._validate_matrix(cholesky, 'cholesky')
+
+        self._factor = L
+        self._log_pdet = 2*np.log(np.diag(self._factor)).sum(axis=-1)
+        self._rank = L.shape[-1]  # must be full rank for cholesky
+        self._covariance = L @ L.T
+        self._shape = L.shape
+        self._allow_singular = False
+
+    def _whiten(self, x):
+        res = linalg.solve_triangular(self._factor, x.T, lower=True).T
+        return res
+
+    def _colorize(self, x):
+        return x @ self._factor.T
+
+
+class CovViaEigendecomposition(Covariance):
+
+    def __init__(self, eigendecomposition):
+        eigenvalues, eigenvectors = eigendecomposition
+        eigenvalues = self._validate_vector(eigenvalues, 'eigenvalues')
+        eigenvectors = self._validate_matrix(eigenvectors, 'eigenvectors')
+        message = ("The shapes of `eigenvalues` and `eigenvectors` "
+                   "must be compatible.")
+        try:
+            eigenvalues = np.expand_dims(eigenvalues, -2)
+            eigenvectors, eigenvalues = np.broadcast_arrays(eigenvectors,
+                                                            eigenvalues)
+            eigenvalues = eigenvalues[..., 0, :]
+        except ValueError:
+            raise ValueError(message)
+
+        i_zero = eigenvalues <= 0
+        positive_eigenvalues = np.array(eigenvalues, dtype=np.float64)
+
+        positive_eigenvalues[i_zero] = 1  # ones don't affect determinant
+        self._log_pdet = np.sum(np.log(positive_eigenvalues), axis=-1)
+
+        psuedo_reciprocals = 1 / np.sqrt(positive_eigenvalues)
+        psuedo_reciprocals[i_zero] = 0
+
+        self._LP = eigenvectors * psuedo_reciprocals
+        self._LA = eigenvectors * np.sqrt(positive_eigenvalues)
+        self._rank = positive_eigenvalues.shape[-1] - i_zero.sum(axis=-1)
+        self._w = eigenvalues
+        self._v = eigenvectors
+        self._shape = eigenvectors.shape
+        self._null_basis = eigenvectors * i_zero
+        # This is only used for `_support_mask`, not to decide whether
+        # the covariance is singular or not.
+        self._eps = _multivariate._eigvalsh_to_eps(eigenvalues) * 10**3
+        self._allow_singular = True
+
+    def _whiten(self, x):
+        return x @ self._LP
+
+    def _colorize(self, x):
+        return x @ self._LA.T
+
+    @cached_property
+    def _covariance(self):
+        return (self._v * self._w) @ self._v.T
+
+    def _support_mask(self, x):
+        """
+        Check whether x lies in the support of the distribution.
+        """
+        residual = np.linalg.norm(x @ self._null_basis, axis=-1)
+        in_support = residual < self._eps
+        return in_support
+
+class CovViaPSD(Covariance):
+    """
+    Representation of a covariance provided via an instance of _PSD
+    """
+
+    def __init__(self, psd):
+        self._LP = psd.U
+        self._log_pdet = psd.log_pdet
+        self._rank = psd.rank
+        self._covariance = psd._M
+        self._shape = psd._M.shape
+        self._psd = psd
+        self._allow_singular = False  # by default
+
+    def _whiten(self, x):
+        return x @ self._LP
+
+    def _support_mask(self, x):
+        return self._psd._support_mask(x)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_crosstab.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_crosstab.py
new file mode 100644
index 00000000..ae7eaa45
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_crosstab.py
@@ -0,0 +1,203 @@
+import numpy as np
+from scipy.sparse import coo_matrix
+from scipy._lib._bunch import _make_tuple_bunch
+
+
+CrosstabResult = _make_tuple_bunch(
+    "CrosstabResult", ["elements", "count"]
+)
+
+def crosstab(*args, levels=None, sparse=False):
+    """
+    Return table of counts for each possible unique combination in ``*args``.
+
+    When ``len(args) > 1``, the array computed by this function is
+    often referred to as a *contingency table* [1]_.
+
+    The arguments must be sequences with the same length.  The second return
+    value, `count`, is an integer array with ``len(args)`` dimensions.  If
+    `levels` is None, the shape of `count` is ``(n0, n1, ...)``, where ``nk``
+    is the number of unique elements in ``args[k]``.
+
+    Parameters
+    ----------
+    *args : sequences
+        A sequence of sequences whose unique aligned elements are to be
+        counted.  The sequences in args must all be the same length.
+    levels : sequence, optional
+        If `levels` is given, it must be a sequence that is the same length as
+        `args`.  Each element in `levels` is either a sequence or None.  If it
+        is a sequence, it gives the values in the corresponding sequence in
+        `args` that are to be counted.  If any value in the sequences in `args`
+        does not occur in the corresponding sequence in `levels`, that value
+        is ignored and not counted in the returned array `count`.  The default
+        value of `levels` for ``args[i]`` is ``np.unique(args[i])``
+    sparse : bool, optional
+        If True, return a sparse matrix.  The matrix will be an instance of
+        the `scipy.sparse.coo_matrix` class.  Because SciPy's sparse matrices
+        must be 2-d, only two input sequences are allowed when `sparse` is
+        True.  Default is False.
+
+    Returns
+    -------
+    res : CrosstabResult
+        An object containing the following attributes:
+
+        elements : tuple of numpy.ndarrays.
+            Tuple of length ``len(args)`` containing the arrays of elements
+            that are counted in `count`.  These can be interpreted as the
+            labels of the corresponding dimensions of `count`. If `levels` was
+            given, then if ``levels[i]`` is not None, ``elements[i]`` will
+            hold the values given in ``levels[i]``.
+        count : numpy.ndarray or scipy.sparse.coo_matrix
+            Counts of the unique elements in ``zip(*args)``, stored in an
+            array. Also known as a *contingency table* when ``len(args) > 1``.
+
+    See Also
+    --------
+    numpy.unique
+
+    Notes
+    -----
+    .. versionadded:: 1.7.0
+
+    References
+    ----------
+    .. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table
+
+    Examples
+    --------
+    >>> from scipy.stats.contingency import crosstab
+
+    Given the lists `a` and `x`, create a contingency table that counts the
+    frequencies of the corresponding pairs.
+
+    >>> a = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B']
+    >>> x = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z']
+    >>> res = crosstab(a, x)
+    >>> avals, xvals = res.elements
+    >>> avals
+    array(['A', 'B'], dtype='>> xvals
+    array(['X', 'Y', 'Z'], dtype='>> res.count
+    array([[2, 3, 0],
+           [1, 0, 4]])
+
+    So `('A', 'X')` occurs twice, `('A', 'Y')` occurs three times, etc.
+
+    Higher dimensional contingency tables can be created.
+
+    >>> p = [0, 0, 0, 0, 1, 1, 1, 0, 0, 1]
+    >>> res = crosstab(a, x, p)
+    >>> res.count
+    array([[[2, 0],
+            [2, 1],
+            [0, 0]],
+           [[1, 0],
+            [0, 0],
+            [1, 3]]])
+    >>> res.count.shape
+    (2, 3, 2)
+
+    The values to be counted can be set by using the `levels` argument.
+    It allows the elements of interest in each input sequence to be
+    given explicitly instead finding the unique elements of the sequence.
+
+    For example, suppose one of the arguments is an array containing the
+    answers to a survey question, with integer values 1 to 4.  Even if the
+    value 1 does not occur in the data, we want an entry for it in the table.
+
+    >>> q1 = [2, 3, 3, 2, 4, 4, 2, 3, 4, 4, 4, 3, 3, 3, 4]  # 1 does not occur.
+    >>> q2 = [4, 4, 2, 2, 2, 4, 1, 1, 2, 2, 4, 2, 2, 2, 4]  # 3 does not occur.
+    >>> options = [1, 2, 3, 4]
+    >>> res = crosstab(q1, q2, levels=(options, options))
+    >>> res.count
+    array([[0, 0, 0, 0],
+           [1, 1, 0, 1],
+           [1, 4, 0, 1],
+           [0, 3, 0, 3]])
+
+    If `levels` is given, but an element of `levels` is None, the unique values
+    of the corresponding argument are used. For example,
+
+    >>> res = crosstab(q1, q2, levels=(None, options))
+    >>> res.elements
+    [array([2, 3, 4]), [1, 2, 3, 4]]
+    >>> res.count
+    array([[1, 1, 0, 1],
+           [1, 4, 0, 1],
+           [0, 3, 0, 3]])
+
+    If we want to ignore the pairs where 4 occurs in ``q2``, we can
+    give just the values [1, 2] to `levels`, and the 4 will be ignored:
+
+    >>> res = crosstab(q1, q2, levels=(None, [1, 2]))
+    >>> res.elements
+    [array([2, 3, 4]), [1, 2]]
+    >>> res.count
+    array([[1, 1],
+           [1, 4],
+           [0, 3]])
+
+    Finally, let's repeat the first example, but return a sparse matrix:
+
+    >>> res = crosstab(a, x, sparse=True)
+    >>> res.count
+    <2x3 sparse matrix of type ''
+            with 4 stored elements in COOrdinate format>
+    >>> res.count.A
+    array([[2, 3, 0],
+           [1, 0, 4]])
+
+    """
+    nargs = len(args)
+    if nargs == 0:
+        raise TypeError("At least one input sequence is required.")
+
+    len0 = len(args[0])
+    if not all(len(a) == len0 for a in args[1:]):
+        raise ValueError("All input sequences must have the same length.")
+
+    if sparse and nargs != 2:
+        raise ValueError("When `sparse` is True, only two input sequences "
+                         "are allowed.")
+
+    if levels is None:
+        # Call np.unique with return_inverse=True on each argument.
+        actual_levels, indices = zip(*[np.unique(a, return_inverse=True)
+                                       for a in args])
+    else:
+        # `levels` is not None...
+        if len(levels) != nargs:
+            raise ValueError('len(levels) must equal the number of input '
+                             'sequences')
+
+        args = [np.asarray(arg) for arg in args]
+        mask = np.zeros((nargs, len0), dtype=np.bool_)
+        inv = np.zeros((nargs, len0), dtype=np.intp)
+        actual_levels = []
+        for k, (levels_list, arg) in enumerate(zip(levels, args)):
+            if levels_list is None:
+                levels_list, inv[k, :] = np.unique(arg, return_inverse=True)
+                mask[k, :] = True
+            else:
+                q = arg == np.asarray(levels_list).reshape(-1, 1)
+                mask[k, :] = np.any(q, axis=0)
+                qnz = q.T.nonzero()
+                inv[k, qnz[0]] = qnz[1]
+            actual_levels.append(levels_list)
+
+        mask_all = mask.all(axis=0)
+        indices = tuple(inv[:, mask_all])
+
+    if sparse:
+        count = coo_matrix((np.ones(len(indices[0]), dtype=int),
+                            (indices[0], indices[1])))
+        count.sum_duplicates()
+    else:
+        shape = [len(u) for u in actual_levels]
+        count = np.zeros(shape, dtype=int)
+        np.add.at(count, indices, 1)
+
+    return CrosstabResult(actual_levels, count)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_discrete_distns.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_discrete_distns.py
new file mode 100644
index 00000000..892751ba
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_discrete_distns.py
@@ -0,0 +1,1814 @@
+#
+# Author:  Travis Oliphant  2002-2011 with contributions from
+#          SciPy Developers 2004-2011
+#
+from functools import partial
+import warnings
+
+from scipy import special
+from scipy.special import entr, logsumexp, betaln, gammaln as gamln, zeta
+from scipy._lib._util import _lazywhere, rng_integers
+from scipy.interpolate import interp1d
+
+from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
+
+import numpy as np
+
+from ._distn_infrastructure import (rv_discrete, get_distribution_names,
+                                    _check_shape, _ShapeInfo)
+import scipy.stats._boost as _boost
+from ._biasedurn import (_PyFishersNCHypergeometric,
+                        _PyWalleniusNCHypergeometric,
+                        _PyStochasticLib3)
+
+
+def _isintegral(x):
+    return x == np.round(x)
+
+
+class binom_gen(rv_discrete):
+    r"""A binomial discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `binom` is:
+
+    .. math::
+
+       f(k) = \binom{n}{k} p^k (1-p)^{n-k}
+
+    for :math:`k \in \{0, 1, \dots, n\}`, :math:`0 \leq p \leq 1`
+
+    `binom` takes :math:`n` and :math:`p` as shape parameters,
+    where :math:`p` is the probability of a single success
+    and :math:`1-p` is the probability of a single failure.
+
+    %(after_notes)s
+
+    %(example)s
+
+    See Also
+    --------
+    hypergeom, nbinom, nhypergeom
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
+                _ShapeInfo("p", False, (0, 1), (True, True))]
+
+    def _rvs(self, n, p, size=None, random_state=None):
+        return random_state.binomial(n, p, size)
+
+    def _argcheck(self, n, p):
+        return (n >= 0) & _isintegral(n) & (p >= 0) & (p <= 1)
+
+    def _get_support(self, n, p):
+        return self.a, n
+
+    def _logpmf(self, x, n, p):
+        k = floor(x)
+        combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
+        return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
+
+    def _pmf(self, x, n, p):
+        # binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
+        return _boost._binom_pdf(x, n, p)
+
+    def _cdf(self, x, n, p):
+        k = floor(x)
+        return _boost._binom_cdf(k, n, p)
+
+    def _sf(self, x, n, p):
+        k = floor(x)
+        return _boost._binom_sf(k, n, p)
+
+    def _isf(self, x, n, p):
+        return _boost._binom_isf(x, n, p)
+
+    def _ppf(self, q, n, p):
+        return _boost._binom_ppf(q, n, p)
+
+    def _stats(self, n, p, moments='mv'):
+        mu = _boost._binom_mean(n, p)
+        var = _boost._binom_variance(n, p)
+        g1, g2 = None, None
+        if 's' in moments:
+            g1 = _boost._binom_skewness(n, p)
+        if 'k' in moments:
+            g2 = _boost._binom_kurtosis_excess(n, p)
+        return mu, var, g1, g2
+
+    def _entropy(self, n, p):
+        k = np.r_[0:n + 1]
+        vals = self._pmf(k, n, p)
+        return np.sum(entr(vals), axis=0)
+
+
+binom = binom_gen(name='binom')
+
+
+class bernoulli_gen(binom_gen):
+    r"""A Bernoulli discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `bernoulli` is:
+
+    .. math::
+
+       f(k) = \begin{cases}1-p  &\text{if } k = 0\\
+                           p    &\text{if } k = 1\end{cases}
+
+    for :math:`k` in :math:`\{0, 1\}`, :math:`0 \leq p \leq 1`
+
+    `bernoulli` takes :math:`p` as shape parameter,
+    where :math:`p` is the probability of a single success
+    and :math:`1-p` is the probability of a single failure.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("p", False, (0, 1), (True, True))]
+
+    def _rvs(self, p, size=None, random_state=None):
+        return binom_gen._rvs(self, 1, p, size=size, random_state=random_state)
+
+    def _argcheck(self, p):
+        return (p >= 0) & (p <= 1)
+
+    def _get_support(self, p):
+        # Overrides binom_gen._get_support!x
+        return self.a, self.b
+
+    def _logpmf(self, x, p):
+        return binom._logpmf(x, 1, p)
+
+    def _pmf(self, x, p):
+        # bernoulli.pmf(k) = 1-p  if k = 0
+        #                  = p    if k = 1
+        return binom._pmf(x, 1, p)
+
+    def _cdf(self, x, p):
+        return binom._cdf(x, 1, p)
+
+    def _sf(self, x, p):
+        return binom._sf(x, 1, p)
+
+    def _isf(self, x, p):
+        return binom._isf(x, 1, p)
+
+    def _ppf(self, q, p):
+        return binom._ppf(q, 1, p)
+
+    def _stats(self, p):
+        return binom._stats(1, p)
+
+    def _entropy(self, p):
+        return entr(p) + entr(1-p)
+
+
+bernoulli = bernoulli_gen(b=1, name='bernoulli')
+
+
+class betabinom_gen(rv_discrete):
+    r"""A beta-binomial discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The beta-binomial distribution is a binomial distribution with a
+    probability of success `p` that follows a beta distribution.
+
+    The probability mass function for `betabinom` is:
+
+    .. math::
+
+       f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)}
+
+    for :math:`k \in \{0, 1, \dots, n\}`, :math:`n \geq 0`, :math:`a > 0`,
+    :math:`b > 0`, where :math:`B(a, b)` is the beta function.
+
+    `betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution
+
+    %(after_notes)s
+
+    .. versionadded:: 1.4.0
+
+    See Also
+    --------
+    beta, binom
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
+                _ShapeInfo("a", False, (0, np.inf), (False, False)),
+                _ShapeInfo("b", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, n, a, b, size=None, random_state=None):
+        p = random_state.beta(a, b, size)
+        return random_state.binomial(n, p, size)
+
+    def _get_support(self, n, a, b):
+        return 0, n
+
+    def _argcheck(self, n, a, b):
+        return (n >= 0) & _isintegral(n) & (a > 0) & (b > 0)
+
+    def _logpmf(self, x, n, a, b):
+        k = floor(x)
+        combiln = -log(n + 1) - betaln(n - k + 1, k + 1)
+        return combiln + betaln(k + a, n - k + b) - betaln(a, b)
+
+    def _pmf(self, x, n, a, b):
+        return exp(self._logpmf(x, n, a, b))
+
+    def _stats(self, n, a, b, moments='mv'):
+        e_p = a / (a + b)
+        e_q = 1 - e_p
+        mu = n * e_p
+        var = n * (a + b + n) * e_p * e_q / (a + b + 1)
+        g1, g2 = None, None
+        if 's' in moments:
+            g1 = 1.0 / sqrt(var)
+            g1 *= (a + b + 2 * n) * (b - a)
+            g1 /= (a + b + 2) * (a + b)
+        if 'k' in moments:
+            g2 = a + b
+            g2 *= (a + b - 1 + 6 * n)
+            g2 += 3 * a * b * (n - 2)
+            g2 += 6 * n ** 2
+            g2 -= 3 * e_p * b * n * (6 - n)
+            g2 -= 18 * e_p * e_q * n ** 2
+            g2 *= (a + b) ** 2 * (1 + a + b)
+            g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n))
+            g2 -= 3
+        return mu, var, g1, g2
+
+
+betabinom = betabinom_gen(name='betabinom')
+
+
+class nbinom_gen(rv_discrete):
+    r"""A negative binomial discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    Negative binomial distribution describes a sequence of i.i.d. Bernoulli
+    trials, repeated until a predefined, non-random number of successes occurs.
+
+    The probability mass function of the number of failures for `nbinom` is:
+
+    .. math::
+
+       f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k
+
+    for :math:`k \ge 0`, :math:`0 < p \leq 1`
+
+    `nbinom` takes :math:`n` and :math:`p` as shape parameters where :math:`n`
+    is the number of successes, :math:`p` is the probability of a single
+    success, and :math:`1-p` is the probability of a single failure.
+
+    Another common parameterization of the negative binomial distribution is
+    in terms of the mean number of failures :math:`\mu` to achieve :math:`n`
+    successes. The mean :math:`\mu` is related to the probability of success
+    as
+
+    .. math::
+
+       p = \frac{n}{n + \mu}
+
+    The number of successes :math:`n` may also be specified in terms of a
+    "dispersion", "heterogeneity", or "aggregation" parameter :math:`\alpha`,
+    which relates the mean :math:`\mu` to the variance :math:`\sigma^2`,
+    e.g. :math:`\sigma^2 = \mu + \alpha \mu^2`. Regardless of the convention
+    used for :math:`\alpha`,
+
+    .. math::
+
+       p &= \frac{\mu}{\sigma^2} \\
+       n &= \frac{\mu^2}{\sigma^2 - \mu}
+
+    %(after_notes)s
+
+    %(example)s
+
+    See Also
+    --------
+    hypergeom, binom, nhypergeom
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("n", True, (0, np.inf), (True, False)),
+                _ShapeInfo("p", False, (0, 1), (True, True))]
+
+    def _rvs(self, n, p, size=None, random_state=None):
+        return random_state.negative_binomial(n, p, size)
+
+    def _argcheck(self, n, p):
+        return (n > 0) & (p > 0) & (p <= 1)
+
+    def _pmf(self, x, n, p):
+        # nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
+        return _boost._nbinom_pdf(x, n, p)
+
+    def _logpmf(self, x, n, p):
+        coeff = gamln(n+x) - gamln(x+1) - gamln(n)
+        return coeff + n*log(p) + special.xlog1py(x, -p)
+
+    def _cdf(self, x, n, p):
+        k = floor(x)
+        return _boost._nbinom_cdf(k, n, p)
+
+    def _logcdf(self, x, n, p):
+        k = floor(x)
+        cdf = self._cdf(k, n, p)
+        cond = cdf > 0.5
+
+        def f1(k, n, p):
+            return np.log1p(-special.betainc(k + 1, n, 1 - p))
+
+        # do calc in place
+        logcdf = cdf
+        with np.errstate(divide='ignore'):
+            logcdf[cond] = f1(k[cond], n[cond], p[cond])
+            logcdf[~cond] = np.log(cdf[~cond])
+        return logcdf
+
+    def _sf(self, x, n, p):
+        k = floor(x)
+        return _boost._nbinom_sf(k, n, p)
+
+    def _isf(self, x, n, p):
+        with warnings.catch_warnings():
+            # See gh-14901
+            message = "overflow encountered in _nbinom_isf"
+            warnings.filterwarnings('ignore', message=message)
+            return _boost._nbinom_isf(x, n, p)
+
+    def _ppf(self, q, n, p):
+        with warnings.catch_warnings():
+            message = "overflow encountered in _nbinom_ppf"
+            warnings.filterwarnings('ignore', message=message)
+            return _boost._nbinom_ppf(q, n, p)
+
+    def _stats(self, n, p):
+        return (
+            _boost._nbinom_mean(n, p),
+            _boost._nbinom_variance(n, p),
+            _boost._nbinom_skewness(n, p),
+            _boost._nbinom_kurtosis_excess(n, p),
+        )
+
+
+nbinom = nbinom_gen(name='nbinom')
+
+
+class geom_gen(rv_discrete):
+    r"""A geometric discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `geom` is:
+
+    .. math::
+
+        f(k) = (1-p)^{k-1} p
+
+    for :math:`k \ge 1`, :math:`0 < p \leq 1`
+
+    `geom` takes :math:`p` as shape parameter,
+    where :math:`p` is the probability of a single success
+    and :math:`1-p` is the probability of a single failure.
+
+    %(after_notes)s
+
+    See Also
+    --------
+    planck
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("p", False, (0, 1), (True, True))]
+
+    def _rvs(self, p, size=None, random_state=None):
+        return random_state.geometric(p, size=size)
+
+    def _argcheck(self, p):
+        return (p <= 1) & (p > 0)
+
+    def _pmf(self, k, p):
+        return np.power(1-p, k-1) * p
+
+    def _logpmf(self, k, p):
+        return special.xlog1py(k - 1, -p) + log(p)
+
+    def _cdf(self, x, p):
+        k = floor(x)
+        return -expm1(log1p(-p)*k)
+
+    def _sf(self, x, p):
+        return np.exp(self._logsf(x, p))
+
+    def _logsf(self, x, p):
+        k = floor(x)
+        return k*log1p(-p)
+
+    def _ppf(self, q, p):
+        vals = ceil(log1p(-q) / log1p(-p))
+        temp = self._cdf(vals-1, p)
+        return np.where((temp >= q) & (vals > 0), vals-1, vals)
+
+    def _stats(self, p):
+        mu = 1.0/p
+        qr = 1.0-p
+        var = qr / p / p
+        g1 = (2.0-p) / sqrt(qr)
+        g2 = np.polyval([1, -6, 6], p)/(1.0-p)
+        return mu, var, g1, g2
+
+
+geom = geom_gen(a=1, name='geom', longname="A geometric")
+
+
+class hypergeom_gen(rv_discrete):
+    r"""A hypergeometric discrete random variable.
+
+    The hypergeometric distribution models drawing objects from a bin.
+    `M` is the total number of objects, `n` is total number of Type I objects.
+    The random variate represents the number of Type I objects in `N` drawn
+    without replacement from the total population.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
+    universally accepted.  See the Examples for a clarification of the
+    definitions used here.
+
+    The probability mass function is defined as,
+
+    .. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}
+                                   {\binom{M}{N}}
+
+    for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
+    coefficients are defined as,
+
+    .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
+
+    %(after_notes)s
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import hypergeom
+    >>> import matplotlib.pyplot as plt
+
+    Suppose we have a collection of 20 animals, of which 7 are dogs.  Then if
+    we want to know the probability of finding a given number of dogs if we
+    choose at random 12 of the 20 animals, we can initialize a frozen
+    distribution and plot the probability mass function:
+
+    >>> [M, n, N] = [20, 7, 12]
+    >>> rv = hypergeom(M, n, N)
+    >>> x = np.arange(0, n+1)
+    >>> pmf_dogs = rv.pmf(x)
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(x, pmf_dogs, 'bo')
+    >>> ax.vlines(x, 0, pmf_dogs, lw=2)
+    >>> ax.set_xlabel('# of dogs in our group of chosen animals')
+    >>> ax.set_ylabel('hypergeom PMF')
+    >>> plt.show()
+
+    Instead of using a frozen distribution we can also use `hypergeom`
+    methods directly.  To for example obtain the cumulative distribution
+    function, use:
+
+    >>> prb = hypergeom.cdf(x, M, n, N)
+
+    And to generate random numbers:
+
+    >>> R = hypergeom.rvs(M, n, N, size=10)
+
+    See Also
+    --------
+    nhypergeom, binom, nbinom
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
+                _ShapeInfo("n", True, (0, np.inf), (True, False)),
+                _ShapeInfo("N", True, (0, np.inf), (True, False))]
+
+    def _rvs(self, M, n, N, size=None, random_state=None):
+        return random_state.hypergeometric(n, M-n, N, size=size)
+
+    def _get_support(self, M, n, N):
+        return np.maximum(N-(M-n), 0), np.minimum(n, N)
+
+    def _argcheck(self, M, n, N):
+        cond = (M > 0) & (n >= 0) & (N >= 0)
+        cond &= (n <= M) & (N <= M)
+        cond &= _isintegral(M) & _isintegral(n) & _isintegral(N)
+        return cond
+
+    def _logpmf(self, k, M, n, N):
+        tot, good = M, n
+        bad = tot - good
+        result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
+                  betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
+                  betaln(tot+1, 1))
+        return result
+
+    def _pmf(self, k, M, n, N):
+        return _boost._hypergeom_pdf(k, n, N, M)
+
+    def _cdf(self, k, M, n, N):
+        return _boost._hypergeom_cdf(k, n, N, M)
+
+    def _stats(self, M, n, N):
+        M, n, N = 1. * M, 1. * n, 1. * N
+        m = M - n
+
+        # Boost kurtosis_excess doesn't return the same as the value
+        # computed here.
+        g2 = M * (M + 1) - 6. * N * (M - N) - 6. * n * m
+        g2 *= (M - 1) * M * M
+        g2 += 6. * n * N * (M - N) * m * (5. * M - 6)
+        g2 /= n * N * (M - N) * m * (M - 2.) * (M - 3.)
+        return (
+            _boost._hypergeom_mean(n, N, M),
+            _boost._hypergeom_variance(n, N, M),
+            _boost._hypergeom_skewness(n, N, M),
+            g2,
+        )
+
+    def _entropy(self, M, n, N):
+        k = np.r_[N - (M - n):min(n, N) + 1]
+        vals = self.pmf(k, M, n, N)
+        return np.sum(entr(vals), axis=0)
+
+    def _sf(self, k, M, n, N):
+        return _boost._hypergeom_sf(k, n, N, M)
+
+    def _logsf(self, k, M, n, N):
+        res = []
+        for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
+            if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5):
+                # Less terms to sum if we calculate log(1-cdf)
+                res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
+            else:
+                # Integration over probability mass function using logsumexp
+                k2 = np.arange(quant + 1, draw + 1)
+                res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
+        return np.asarray(res)
+
+    def _logcdf(self, k, M, n, N):
+        res = []
+        for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
+            if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5):
+                # Less terms to sum if we calculate log(1-sf)
+                res.append(log1p(-exp(self.logsf(quant, tot, good, draw))))
+            else:
+                # Integration over probability mass function using logsumexp
+                k2 = np.arange(0, quant + 1)
+                res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
+        return np.asarray(res)
+
+
+hypergeom = hypergeom_gen(name='hypergeom')
+
+
+class nhypergeom_gen(rv_discrete):
+    r"""A negative hypergeometric discrete random variable.
+
+    Consider a box containing :math:`M` balls:, :math:`n` red and
+    :math:`M-n` blue. We randomly sample balls from the box, one
+    at a time and *without* replacement, until we have picked :math:`r`
+    blue balls. `nhypergeom` is the distribution of the number of
+    red balls :math:`k` we have picked.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The symbols used to denote the shape parameters (`M`, `n`, and `r`) are not
+    universally accepted. See the Examples for a clarification of the
+    definitions used here.
+
+    The probability mass function is defined as,
+
+    .. math:: f(k; M, n, r) = \frac{{{k+r-1}\choose{k}}{{M-r-k}\choose{n-k}}}
+                                   {{M \choose n}}
+
+    for :math:`k \in [0, n]`, :math:`n \in [0, M]`, :math:`r \in [0, M-n]`,
+    and the binomial coefficient is:
+
+    .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
+
+    It is equivalent to observing :math:`k` successes in :math:`k+r-1`
+    samples with :math:`k+r`'th sample being a failure. The former
+    can be modelled as a hypergeometric distribution. The probability
+    of the latter is simply the number of failures remaining
+    :math:`M-n-(r-1)` divided by the size of the remaining population
+    :math:`M-(k+r-1)`. This relationship can be shown as:
+
+    .. math:: NHG(k;M,n,r) = HG(k;M,n,k+r-1)\frac{(M-n-(r-1))}{(M-(k+r-1))}
+
+    where :math:`NHG` is probability mass function (PMF) of the
+    negative hypergeometric distribution and :math:`HG` is the
+    PMF of the hypergeometric distribution.
+
+    %(after_notes)s
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import nhypergeom
+    >>> import matplotlib.pyplot as plt
+
+    Suppose we have a collection of 20 animals, of which 7 are dogs.
+    Then if we want to know the probability of finding a given number
+    of dogs (successes) in a sample with exactly 12 animals that
+    aren't dogs (failures), we can initialize a frozen distribution
+    and plot the probability mass function:
+
+    >>> M, n, r = [20, 7, 12]
+    >>> rv = nhypergeom(M, n, r)
+    >>> x = np.arange(0, n+2)
+    >>> pmf_dogs = rv.pmf(x)
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(x, pmf_dogs, 'bo')
+    >>> ax.vlines(x, 0, pmf_dogs, lw=2)
+    >>> ax.set_xlabel('# of dogs in our group with given 12 failures')
+    >>> ax.set_ylabel('nhypergeom PMF')
+    >>> plt.show()
+
+    Instead of using a frozen distribution we can also use `nhypergeom`
+    methods directly.  To for example obtain the probability mass
+    function, use:
+
+    >>> prb = nhypergeom.pmf(x, M, n, r)
+
+    And to generate random numbers:
+
+    >>> R = nhypergeom.rvs(M, n, r, size=10)
+
+    To verify the relationship between `hypergeom` and `nhypergeom`, use:
+
+    >>> from scipy.stats import hypergeom, nhypergeom
+    >>> M, n, r = 45, 13, 8
+    >>> k = 6
+    >>> nhypergeom.pmf(k, M, n, r)
+    0.06180776620271643
+    >>> hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
+    0.06180776620271644
+
+    See Also
+    --------
+    hypergeom, binom, nbinom
+
+    References
+    ----------
+    .. [1] Negative Hypergeometric Distribution on Wikipedia
+           https://en.wikipedia.org/wiki/Negative_hypergeometric_distribution
+
+    .. [2] Negative Hypergeometric Distribution from
+           http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Negativehypergeometric.pdf
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
+                _ShapeInfo("n", True, (0, np.inf), (True, False)),
+                _ShapeInfo("r", True, (0, np.inf), (True, False))]
+
+    def _get_support(self, M, n, r):
+        return 0, n
+
+    def _argcheck(self, M, n, r):
+        cond = (n >= 0) & (n <= M) & (r >= 0) & (r <= M-n)
+        cond &= _isintegral(M) & _isintegral(n) & _isintegral(r)
+        return cond
+
+    def _rvs(self, M, n, r, size=None, random_state=None):
+
+        @_vectorize_rvs_over_shapes
+        def _rvs1(M, n, r, size, random_state):
+            # invert cdf by calculating all values in support, scalar M, n, r
+            a, b = self.support(M, n, r)
+            ks = np.arange(a, b+1)
+            cdf = self.cdf(ks, M, n, r)
+            ppf = interp1d(cdf, ks, kind='next', fill_value='extrapolate')
+            rvs = ppf(random_state.uniform(size=size)).astype(int)
+            if size is None:
+                return rvs.item()
+            return rvs
+
+        return _rvs1(M, n, r, size=size, random_state=random_state)
+
+    def _logpmf(self, k, M, n, r):
+        cond = ((r == 0) & (k == 0))
+        result = _lazywhere(~cond, (k, M, n, r),
+                            lambda k, M, n, r:
+                                (-betaln(k+1, r) + betaln(k+r, 1) -
+                                 betaln(n-k+1, M-r-n+1) + betaln(M-r-k+1, 1) +
+                                 betaln(n+1, M-n+1) - betaln(M+1, 1)),
+                            fillvalue=0.0)
+        return result
+
+    def _pmf(self, k, M, n, r):
+        # same as the following but numerically more precise
+        # return comb(k+r-1, k) * comb(M-r-k, n-k) / comb(M, n)
+        return exp(self._logpmf(k, M, n, r))
+
+    def _stats(self, M, n, r):
+        # Promote the datatype to at least float
+        # mu = rn / (M-n+1)
+        M, n, r = 1.*M, 1.*n, 1.*r
+        mu = r*n / (M-n+1)
+
+        var = r*(M+1)*n / ((M-n+1)*(M-n+2)) * (1 - r / (M-n+1))
+
+        # The skew and kurtosis are mathematically
+        # intractable so return `None`. See [2]_.
+        g1, g2 = None, None
+        return mu, var, g1, g2
+
+
+nhypergeom = nhypergeom_gen(name='nhypergeom')
+
+
+# FIXME: Fails _cdfvec
+class logser_gen(rv_discrete):
+    r"""A Logarithmic (Log-Series, Series) discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `logser` is:
+
+    .. math::
+
+        f(k) = - \frac{p^k}{k \log(1-p)}
+
+    for :math:`k \ge 1`, :math:`0 < p < 1`
+
+    `logser` takes :math:`p` as shape parameter,
+    where :math:`p` is the probability of a single success
+    and :math:`1-p` is the probability of a single failure.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("p", False, (0, 1), (True, True))]
+
+    def _rvs(self, p, size=None, random_state=None):
+        # looks wrong for p>0.5, too few k=1
+        # trying to use generic is worse, no k=1 at all
+        return random_state.logseries(p, size=size)
+
+    def _argcheck(self, p):
+        return (p > 0) & (p < 1)
+
+    def _pmf(self, k, p):
+        # logser.pmf(k) = - p**k / (k*log(1-p))
+        return -np.power(p, k) * 1.0 / k / special.log1p(-p)
+
+    def _stats(self, p):
+        r = special.log1p(-p)
+        mu = p / (p - 1.0) / r
+        mu2p = -p / r / (p - 1.0)**2
+        var = mu2p - mu*mu
+        mu3p = -p / r * (1.0+p) / (1.0 - p)**3
+        mu3 = mu3p - 3*mu*mu2p + 2*mu**3
+        g1 = mu3 / np.power(var, 1.5)
+
+        mu4p = -p / r * (
+            1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
+        mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
+        g2 = mu4 / var**2 - 3.0
+        return mu, var, g1, g2
+
+
+logser = logser_gen(a=1, name='logser', longname='A logarithmic')
+
+
+class poisson_gen(rv_discrete):
+    r"""A Poisson discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `poisson` is:
+
+    .. math::
+
+        f(k) = \exp(-\mu) \frac{\mu^k}{k!}
+
+    for :math:`k \ge 0`.
+
+    `poisson` takes :math:`\mu \geq 0` as shape parameter.
+    When :math:`\mu = 0`, the ``pmf`` method
+    returns ``1.0`` at quantile :math:`k = 0`.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("mu", False, (0, np.inf), (True, False))]
+
+    # Override rv_discrete._argcheck to allow mu=0.
+    def _argcheck(self, mu):
+        return mu >= 0
+
+    def _rvs(self, mu, size=None, random_state=None):
+        return random_state.poisson(mu, size)
+
+    def _logpmf(self, k, mu):
+        Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
+        return Pk
+
+    def _pmf(self, k, mu):
+        # poisson.pmf(k) = exp(-mu) * mu**k / k!
+        return exp(self._logpmf(k, mu))
+
+    def _cdf(self, x, mu):
+        k = floor(x)
+        return special.pdtr(k, mu)
+
+    def _sf(self, x, mu):
+        k = floor(x)
+        return special.pdtrc(k, mu)
+
+    def _ppf(self, q, mu):
+        vals = ceil(special.pdtrik(q, mu))
+        vals1 = np.maximum(vals - 1, 0)
+        temp = special.pdtr(vals1, mu)
+        return np.where(temp >= q, vals1, vals)
+
+    def _stats(self, mu):
+        var = mu
+        tmp = np.asarray(mu)
+        mu_nonzero = tmp > 0
+        g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
+        g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
+        return mu, var, g1, g2
+
+
+poisson = poisson_gen(name="poisson", longname='A Poisson')
+
+
+class planck_gen(rv_discrete):
+    r"""A Planck discrete exponential random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `planck` is:
+
+    .. math::
+
+        f(k) = (1-\exp(-\lambda)) \exp(-\lambda k)
+
+    for :math:`k \ge 0` and :math:`\lambda > 0`.
+
+    `planck` takes :math:`\lambda` as shape parameter. The Planck distribution
+    can be written as a geometric distribution (`geom`) with
+    :math:`p = 1 - \exp(-\lambda)` shifted by ``loc = -1``.
+
+    %(after_notes)s
+
+    See Also
+    --------
+    geom
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("lambda", False, (0, np.inf), (False, False))]
+
+    def _argcheck(self, lambda_):
+        return lambda_ > 0
+
+    def _pmf(self, k, lambda_):
+        return -expm1(-lambda_)*exp(-lambda_*k)
+
+    def _cdf(self, x, lambda_):
+        k = floor(x)
+        return -expm1(-lambda_*(k+1))
+
+    def _sf(self, x, lambda_):
+        return exp(self._logsf(x, lambda_))
+
+    def _logsf(self, x, lambda_):
+        k = floor(x)
+        return -lambda_*(k+1)
+
+    def _ppf(self, q, lambda_):
+        vals = ceil(-1.0/lambda_ * log1p(-q)-1)
+        vals1 = (vals-1).clip(*(self._get_support(lambda_)))
+        temp = self._cdf(vals1, lambda_)
+        return np.where(temp >= q, vals1, vals)
+
+    def _rvs(self, lambda_, size=None, random_state=None):
+        # use relation to geometric distribution for sampling
+        p = -expm1(-lambda_)
+        return random_state.geometric(p, size=size) - 1.0
+
+    def _stats(self, lambda_):
+        mu = 1/expm1(lambda_)
+        var = exp(-lambda_)/(expm1(-lambda_))**2
+        g1 = 2*cosh(lambda_/2.0)
+        g2 = 4+2*cosh(lambda_)
+        return mu, var, g1, g2
+
+    def _entropy(self, lambda_):
+        C = -expm1(-lambda_)
+        return lambda_*exp(-lambda_)/C - log(C)
+
+
+planck = planck_gen(a=0, name='planck', longname='A discrete exponential ')
+
+
+class boltzmann_gen(rv_discrete):
+    r"""A Boltzmann (Truncated Discrete Exponential) random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `boltzmann` is:
+
+    .. math::
+
+        f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N))
+
+    for :math:`k = 0,..., N-1`.
+
+    `boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("lambda_", False, (0, np.inf), (False, False)),
+                _ShapeInfo("N", True, (0, np.inf), (False, False))]
+
+    def _argcheck(self, lambda_, N):
+        return (lambda_ > 0) & (N > 0) & _isintegral(N)
+
+    def _get_support(self, lambda_, N):
+        return self.a, N - 1
+
+    def _pmf(self, k, lambda_, N):
+        # boltzmann.pmf(k) =
+        #               (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
+        fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
+        return fact*exp(-lambda_*k)
+
+    def _cdf(self, x, lambda_, N):
+        k = floor(x)
+        return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
+
+    def _ppf(self, q, lambda_, N):
+        qnew = q*(1-exp(-lambda_*N))
+        vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
+        vals1 = (vals-1).clip(0.0, np.inf)
+        temp = self._cdf(vals1, lambda_, N)
+        return np.where(temp >= q, vals1, vals)
+
+    def _stats(self, lambda_, N):
+        z = exp(-lambda_)
+        zN = exp(-lambda_*N)
+        mu = z/(1.0-z)-N*zN/(1-zN)
+        var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
+        trm = (1-zN)/(1-z)
+        trm2 = (z*trm**2 - N*N*zN)
+        g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
+        g1 = g1 / trm2**(1.5)
+        g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
+        g2 = g2 / trm2 / trm2
+        return mu, var, g1, g2
+
+
+boltzmann = boltzmann_gen(name='boltzmann', a=0,
+                          longname='A truncated discrete exponential ')
+
+
+class randint_gen(rv_discrete):
+    r"""A uniform discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `randint` is:
+
+    .. math::
+
+        f(k) = \frac{1}{\texttt{high} - \texttt{low}}
+
+    for :math:`k \in \{\texttt{low}, \dots, \texttt{high} - 1\}`.
+
+    `randint` takes :math:`\texttt{low}` and :math:`\texttt{high}` as shape
+    parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("low", True, (-np.inf, np.inf), (False, False)),
+                _ShapeInfo("high", True, (-np.inf, np.inf), (False, False))]
+
+    def _argcheck(self, low, high):
+        return (high > low) & _isintegral(low) & _isintegral(high)
+
+    def _get_support(self, low, high):
+        return low, high-1
+
+    def _pmf(self, k, low, high):
+        # randint.pmf(k) = 1./(high - low)
+        p = np.ones_like(k) / (high - low)
+        return np.where((k >= low) & (k < high), p, 0.)
+
+    def _cdf(self, x, low, high):
+        k = floor(x)
+        return (k - low + 1.) / (high - low)
+
+    def _ppf(self, q, low, high):
+        vals = ceil(q * (high - low) + low) - 1
+        vals1 = (vals - 1).clip(low, high)
+        temp = self._cdf(vals1, low, high)
+        return np.where(temp >= q, vals1, vals)
+
+    def _stats(self, low, high):
+        m2, m1 = np.asarray(high), np.asarray(low)
+        mu = (m2 + m1 - 1.0) / 2
+        d = m2 - m1
+        var = (d*d - 1) / 12.0
+        g1 = 0.0
+        g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
+        return mu, var, g1, g2
+
+    def _rvs(self, low, high, size=None, random_state=None):
+        """An array of *size* random integers >= ``low`` and < ``high``."""
+        if np.asarray(low).size == 1 and np.asarray(high).size == 1:
+            # no need to vectorize in that case
+            return rng_integers(random_state, low, high, size=size)
+
+        if size is not None:
+            # NumPy's RandomState.randint() doesn't broadcast its arguments.
+            # Use `broadcast_to()` to extend the shapes of low and high
+            # up to size.  Then we can use the numpy.vectorize'd
+            # randint without needing to pass it a `size` argument.
+            low = np.broadcast_to(low, size)
+            high = np.broadcast_to(high, size)
+        randint = np.vectorize(partial(rng_integers, random_state),
+                               otypes=[np.int_])
+        return randint(low, high)
+
+    def _entropy(self, low, high):
+        return log(high - low)
+
+
+randint = randint_gen(name='randint', longname='A discrete uniform '
+                      '(random integer)')
+
+
+# FIXME: problems sampling.
+class zipf_gen(rv_discrete):
+    r"""A Zipf (Zeta) discrete random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    zipfian
+
+    Notes
+    -----
+    The probability mass function for `zipf` is:
+
+    .. math::
+
+        f(k, a) = \frac{1}{\zeta(a) k^a}
+
+    for :math:`k \ge 1`, :math:`a > 1`.
+
+    `zipf` takes :math:`a > 1` as shape parameter. :math:`\zeta` is the
+    Riemann zeta function (`scipy.special.zeta`)
+
+    The Zipf distribution is also known as the zeta distribution, which is
+    a special case of the Zipfian distribution (`zipfian`).
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "Zeta Distribution", Wikipedia,
+           https://en.wikipedia.org/wiki/Zeta_distribution
+
+    %(example)s
+
+    Confirm that `zipf` is the large `n` limit of `zipfian`.
+
+    >>> import numpy as np
+    >>> from scipy.stats import zipfian
+    >>> k = np.arange(11)
+    >>> np.allclose(zipf.pmf(k, a), zipfian.pmf(k, a, n=10000000))
+    True
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (1, np.inf), (False, False))]
+
+    def _rvs(self, a, size=None, random_state=None):
+        return random_state.zipf(a, size=size)
+
+    def _argcheck(self, a):
+        return a > 1
+
+    def _pmf(self, k, a):
+        # zipf.pmf(k, a) = 1/(zeta(a) * k**a)
+        Pk = 1.0 / special.zeta(a, 1) / k**a
+        return Pk
+
+    def _munp(self, n, a):
+        return _lazywhere(
+            a > n + 1, (a, n),
+            lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
+            np.inf)
+
+
+zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
+
+
+def _gen_harmonic_gt1(n, a):
+    """Generalized harmonic number, a > 1"""
+    # See https://en.wikipedia.org/wiki/Harmonic_number; search for "hurwitz"
+    return zeta(a, 1) - zeta(a, n+1)
+
+
+def _gen_harmonic_leq1(n, a):
+    """Generalized harmonic number, a <= 1"""
+    if not np.size(n):
+        return n
+    n_max = np.max(n)  # loop starts at maximum of all n
+    out = np.zeros_like(a, dtype=float)
+    # add terms of harmonic series; starting from smallest to avoid roundoff
+    for i in np.arange(n_max, 0, -1, dtype=float):
+        mask = i <= n  # don't add terms after nth
+        out[mask] += 1/i**a[mask]
+    return out
+
+
+def _gen_harmonic(n, a):
+    """Generalized harmonic number"""
+    n, a = np.broadcast_arrays(n, a)
+    return _lazywhere(a > 1, (n, a),
+                      f=_gen_harmonic_gt1, f2=_gen_harmonic_leq1)
+
+
+class zipfian_gen(rv_discrete):
+    r"""A Zipfian discrete random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    zipf
+
+    Notes
+    -----
+    The probability mass function for `zipfian` is:
+
+    .. math::
+
+        f(k, a, n) = \frac{1}{H_{n,a} k^a}
+
+    for :math:`k \in \{1, 2, \dots, n-1, n\}`, :math:`a \ge 0`,
+    :math:`n \in \{1, 2, 3, \dots\}`.
+
+    `zipfian` takes :math:`a` and :math:`n` as shape parameters.
+    :math:`H_{n,a}` is the :math:`n`:sup:`th` generalized harmonic
+    number of order :math:`a`.
+
+    The Zipfian distribution reduces to the Zipf (zeta) distribution as
+    :math:`n \rightarrow \infty`.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] "Zipf's Law", Wikipedia, https://en.wikipedia.org/wiki/Zipf's_law
+    .. [2] Larry Leemis, "Zipf Distribution", Univariate Distribution
+           Relationships. http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
+
+    %(example)s
+
+    Confirm that `zipfian` reduces to `zipf` for large `n`, `a > 1`.
+
+    >>> import numpy as np
+    >>> from scipy.stats import zipf
+    >>> k = np.arange(11)
+    >>> np.allclose(zipfian.pmf(k, a=3.5, n=10000000), zipf.pmf(k, a=3.5))
+    True
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (0, np.inf), (True, False)),
+                _ShapeInfo("n", True, (0, np.inf), (False, False))]
+
+    def _argcheck(self, a, n):
+        # we need np.asarray here because moment (maybe others) don't convert
+        return (a >= 0) & (n > 0) & (n == np.asarray(n, dtype=int))
+
+    def _get_support(self, a, n):
+        return 1, n
+
+    def _pmf(self, k, a, n):
+        return 1.0 / _gen_harmonic(n, a) / k**a
+
+    def _cdf(self, k, a, n):
+        return _gen_harmonic(k, a) / _gen_harmonic(n, a)
+
+    def _sf(self, k, a, n):
+        k = k + 1  # # to match SciPy convention
+        # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
+        return ((k**a*(_gen_harmonic(n, a) - _gen_harmonic(k, a)) + 1)
+                / (k**a*_gen_harmonic(n, a)))
+
+    def _stats(self, a, n):
+        # see # see http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Zipf.pdf
+        Hna = _gen_harmonic(n, a)
+        Hna1 = _gen_harmonic(n, a-1)
+        Hna2 = _gen_harmonic(n, a-2)
+        Hna3 = _gen_harmonic(n, a-3)
+        Hna4 = _gen_harmonic(n, a-4)
+        mu1 = Hna1/Hna
+        mu2n = (Hna2*Hna - Hna1**2)
+        mu2d = Hna**2
+        mu2 = mu2n / mu2d
+        g1 = (Hna3/Hna - 3*Hna1*Hna2/Hna**2 + 2*Hna1**3/Hna**3)/mu2**(3/2)
+        g2 = (Hna**3*Hna4 - 4*Hna**2*Hna1*Hna3 + 6*Hna*Hna1**2*Hna2
+              - 3*Hna1**4) / mu2n**2
+        g2 -= 3
+        return mu1, mu2, g1, g2
+
+
+zipfian = zipfian_gen(a=1, name='zipfian', longname='A Zipfian')
+
+
+class dlaplace_gen(rv_discrete):
+    r"""A  Laplacian discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    The probability mass function for `dlaplace` is:
+
+    .. math::
+
+        f(k) = \tanh(a/2) \exp(-a |k|)
+
+    for integers :math:`k` and :math:`a > 0`.
+
+    `dlaplace` takes :math:`a` as shape parameter.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+
+    def _shape_info(self):
+        return [_ShapeInfo("a", False, (0, np.inf), (False, False))]
+
+    def _pmf(self, k, a):
+        # dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
+        return tanh(a/2.0) * exp(-a * abs(k))
+
+    def _cdf(self, x, a):
+        k = floor(x)
+        f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
+        f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
+        return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
+
+    def _ppf(self, q, a):
+        const = 1 + exp(a)
+        vals = ceil(np.where(q < 1.0 / (1 + exp(-a)),
+                             log(q*const) / a - 1,
+                             -log((1-q) * const) / a))
+        vals1 = vals - 1
+        return np.where(self._cdf(vals1, a) >= q, vals1, vals)
+
+    def _stats(self, a):
+        ea = exp(a)
+        mu2 = 2.*ea/(ea-1.)**2
+        mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
+        return 0., mu2, 0., mu4/mu2**2 - 3.
+
+    def _entropy(self, a):
+        return a / sinh(a) - log(tanh(a/2.0))
+
+    def _rvs(self, a, size=None, random_state=None):
+        # The discrete Laplace is equivalent to the two-sided geometric
+        # distribution with PMF:
+        #   f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k)
+        #   Reference:
+        #     https://www.sciencedirect.com/science/
+        #     article/abs/pii/S0378375804003519
+        # Furthermore, the two-sided geometric distribution is
+        # equivalent to the difference between two iid geometric
+        # distributions.
+        #   Reference (page 179):
+        #     https://pdfs.semanticscholar.org/61b3/
+        #     b99f466815808fd0d03f5d2791eea8b541a1.pdf
+        # Thus, we can leverage the following:
+        #   1) alpha = e^-a
+        #   2) probability_of_success = 1 - alpha (Bernoulli trial)
+        probOfSuccess = -np.expm1(-np.asarray(a))
+        x = random_state.geometric(probOfSuccess, size=size)
+        y = random_state.geometric(probOfSuccess, size=size)
+        return x - y
+
+
+dlaplace = dlaplace_gen(a=-np.inf,
+                        name='dlaplace', longname='A discrete Laplacian')
+
+
+class skellam_gen(rv_discrete):
+    r"""A  Skellam discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+    Probability distribution of the difference of two correlated or
+    uncorrelated Poisson random variables.
+
+    Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with
+    expected values :math:`\lambda_1` and :math:`\lambda_2`. Then,
+    :math:`k_1 - k_2` follows a Skellam distribution with parameters
+    :math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and
+    :math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where
+    :math:`\rho` is the correlation coefficient between :math:`k_1` and
+    :math:`k_2`. If the two Poisson-distributed r.v. are independent then
+    :math:`\rho = 0`.
+
+    Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive.
+
+    For details see: https://en.wikipedia.org/wiki/Skellam_distribution
+
+    `skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters.
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("mu1", False, (0, np.inf), (False, False)),
+                _ShapeInfo("mu2", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, mu1, mu2, size=None, random_state=None):
+        n = size
+        return (random_state.poisson(mu1, n) -
+                random_state.poisson(mu2, n))
+
+    def _pmf(self, x, mu1, mu2):
+        with warnings.catch_warnings():
+            message = "overflow encountered in _ncx2_pdf"
+            warnings.filterwarnings("ignore", message=message)
+            px = np.where(x < 0,
+                          _boost._ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
+                          _boost._ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
+            # ncx2.pdf() returns nan's for extremely low probabilities
+        return px
+
+    def _cdf(self, x, mu1, mu2):
+        x = floor(x)
+        px = np.where(x < 0,
+                      _boost._ncx2_cdf(2*mu2, -2*x, 2*mu1),
+                      1 - _boost._ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
+        return px
+
+    def _stats(self, mu1, mu2):
+        mean = mu1 - mu2
+        var = mu1 + mu2
+        g1 = mean / sqrt((var)**3)
+        g2 = 1 / var
+        return mean, var, g1, g2
+
+
+skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
+
+
+class yulesimon_gen(rv_discrete):
+    r"""A Yule-Simon discrete random variable.
+
+    %(before_notes)s
+
+    Notes
+    -----
+
+    The probability mass function for the `yulesimon` is:
+
+    .. math::
+
+        f(k) =  \alpha B(k, \alpha+1)
+
+    for :math:`k=1,2,3,...`, where :math:`\alpha>0`.
+    Here :math:`B` refers to the `scipy.special.beta` function.
+
+    The sampling of random variates is based on pg 553, Section 6.3 of [1]_.
+    Our notation maps to the referenced logic via :math:`\alpha=a-1`.
+
+    For details see the wikipedia entry [2]_.
+
+    References
+    ----------
+    .. [1] Devroye, Luc. "Non-uniform Random Variate Generation",
+         (1986) Springer, New York.
+
+    .. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution
+
+    %(after_notes)s
+
+    %(example)s
+
+    """
+    def _shape_info(self):
+        return [_ShapeInfo("alpha", False, (0, np.inf), (False, False))]
+
+    def _rvs(self, alpha, size=None, random_state=None):
+        E1 = random_state.standard_exponential(size)
+        E2 = random_state.standard_exponential(size)
+        ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
+        return ans
+
+    def _pmf(self, x, alpha):
+        return alpha * special.beta(x, alpha + 1)
+
+    def _argcheck(self, alpha):
+        return (alpha > 0)
+
+    def _logpmf(self, x, alpha):
+        return log(alpha) + special.betaln(x, alpha + 1)
+
+    def _cdf(self, x, alpha):
+        return 1 - x * special.beta(x, alpha + 1)
+
+    def _sf(self, x, alpha):
+        return x * special.beta(x, alpha + 1)
+
+    def _logsf(self, x, alpha):
+        return log(x) + special.betaln(x, alpha + 1)
+
+    def _stats(self, alpha):
+        mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1))
+        mu2 = np.where(alpha > 2,
+                alpha**2 / ((alpha - 2.0) * (alpha - 1)**2),
+                np.inf)
+        mu2 = np.where(alpha <= 1, np.nan, mu2)
+        g1 = np.where(alpha > 3,
+                sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)),
+                np.inf)
+        g1 = np.where(alpha <= 2, np.nan, g1)
+        g2 = np.where(alpha > 4,
+                (alpha + 3) + (alpha**3 - 49 * alpha - 22) / (alpha *
+                        (alpha - 4) * (alpha - 3)), np.inf)
+        g2 = np.where(alpha <= 2, np.nan, g2)
+        return mu, mu2, g1, g2
+
+
+yulesimon = yulesimon_gen(name='yulesimon', a=1)
+
+
+def _vectorize_rvs_over_shapes(_rvs1):
+    """Decorator that vectorizes _rvs method to work on ndarray shapes"""
+    # _rvs1 must be a _function_ that accepts _scalar_ args as positional
+    # arguments, `size` and `random_state` as keyword arguments.
+    # _rvs1 must return a random variate array with shape `size`. If `size` is
+    # None, _rvs1 must return a scalar.
+    # When applied to _rvs1, this decorator broadcasts ndarray args
+    # and loops over them, calling _rvs1 for each set of scalar args.
+    # For usage example, see _nchypergeom_gen
+    def _rvs(*args, size, random_state):
+        _rvs1_size, _rvs1_indices = _check_shape(args[0].shape, size)
+
+        size = np.array(size)
+        _rvs1_size = np.array(_rvs1_size)
+        _rvs1_indices = np.array(_rvs1_indices)
+
+        if np.all(_rvs1_indices):  # all args are scalars
+            return _rvs1(*args, size, random_state)
+
+        out = np.empty(size)
+
+        # out.shape can mix dimensions associated with arg_shape and _rvs1_size
+        # Sort them to arg_shape + _rvs1_size for easy indexing of dimensions
+        # corresponding with the different sets of scalar args
+        j0 = np.arange(out.ndim)
+        j1 = np.hstack((j0[~_rvs1_indices], j0[_rvs1_indices]))
+        out = np.moveaxis(out, j1, j0)
+
+        for i in np.ndindex(*size[~_rvs1_indices]):
+            # arg can be squeezed because singleton dimensions will be
+            # associated with _rvs1_size, not arg_shape per _check_shape
+            out[i] = _rvs1(*[np.squeeze(arg)[i] for arg in args],
+                           _rvs1_size, random_state)
+
+        return np.moveaxis(out, j0, j1)  # move axes back before returning
+    return _rvs
+
+
+class _nchypergeom_gen(rv_discrete):
+    r"""A noncentral hypergeometric discrete random variable.
+
+    For subclassing by nchypergeom_fisher_gen and nchypergeom_wallenius_gen.
+
+    """
+
+    rvs_name = None
+    dist = None
+
+    def _shape_info(self):
+        return [_ShapeInfo("M", True, (0, np.inf), (True, False)),
+                _ShapeInfo("n", True, (0, np.inf), (True, False)),
+                _ShapeInfo("N", True, (0, np.inf), (True, False)),
+                _ShapeInfo("odds", False, (0, np.inf), (False, False))]
+
+    def _get_support(self, M, n, N, odds):
+        N, m1, n = M, n, N  # follow Wikipedia notation
+        m2 = N - m1
+        x_min = np.maximum(0, n - m2)
+        x_max = np.minimum(n, m1)
+        return x_min, x_max
+
+    def _argcheck(self, M, n, N, odds):
+        M, n = np.asarray(M), np.asarray(n),
+        N, odds = np.asarray(N), np.asarray(odds)
+        cond1 = (M.astype(int) == M) & (M >= 0)
+        cond2 = (n.astype(int) == n) & (n >= 0)
+        cond3 = (N.astype(int) == N) & (N >= 0)
+        cond4 = odds > 0
+        cond5 = N <= M
+        cond6 = n <= M
+        return cond1 & cond2 & cond3 & cond4 & cond5 & cond6
+
+    def _rvs(self, M, n, N, odds, size=None, random_state=None):
+
+        @_vectorize_rvs_over_shapes
+        def _rvs1(M, n, N, odds, size, random_state):
+            length = np.prod(size)
+            urn = _PyStochasticLib3()
+            rv_gen = getattr(urn, self.rvs_name)
+            rvs = rv_gen(N, n, M, odds, length, random_state)
+            rvs = rvs.reshape(size)
+            return rvs
+
+        return _rvs1(M, n, N, odds, size=size, random_state=random_state)
+
+    def _pmf(self, x, M, n, N, odds):
+
+        x, M, n, N, odds = np.broadcast_arrays(x, M, n, N, odds)
+        if x.size == 0:  # np.vectorize doesn't work with zero size input
+            return np.empty_like(x)
+
+        @np.vectorize
+        def _pmf1(x, M, n, N, odds):
+            urn = self.dist(N, n, M, odds, 1e-12)
+            return urn.probability(x)
+
+        return _pmf1(x, M, n, N, odds)
+
+    def _stats(self, M, n, N, odds, moments):
+
+        @np.vectorize
+        def _moments1(M, n, N, odds):
+            urn = self.dist(N, n, M, odds, 1e-12)
+            return urn.moments()
+
+        m, v = (_moments1(M, n, N, odds) if ("m" in moments or "v" in moments)
+                else (None, None))
+        s, k = None, None
+        return m, v, s, k
+
+
+class nchypergeom_fisher_gen(_nchypergeom_gen):
+    r"""A Fisher's noncentral hypergeometric discrete random variable.
+
+    Fisher's noncentral hypergeometric distribution models drawing objects of
+    two types from a bin. `M` is the total number of objects, `n` is the
+    number of Type I objects, and `odds` is the odds ratio: the odds of
+    selecting a Type I object rather than a Type II object when there is only
+    one object of each type.
+    The random variate represents the number of Type I objects drawn if we
+    take a handful of objects from the bin at once and find out afterwards
+    that we took `N` objects.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    nchypergeom_wallenius, hypergeom, nhypergeom
+
+    Notes
+    -----
+    Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
+    with parameters `N`, `n`, and `M` (respectively) as defined above.
+
+    The probability mass function is defined as
+
+    .. math::
+
+        p(x; M, n, N, \omega) =
+        \frac{\binom{n}{x}\binom{M - n}{N-x}\omega^x}{P_0},
+
+    for
+    :math:`x \in [x_l, x_u]`,
+    :math:`M \in {\mathbb N}`,
+    :math:`n \in [0, M]`,
+    :math:`N \in [0, M]`,
+    :math:`\omega > 0`,
+    where
+    :math:`x_l = \max(0, N - (M - n))`,
+    :math:`x_u = \min(N, n)`,
+
+    .. math::
+
+        P_0 = \sum_{y=x_l}^{x_u} \binom{n}{y}\binom{M - n}{N-y}\omega^y,
+
+    and the binomial coefficients are defined as
+
+    .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
+
+    `nchypergeom_fisher` uses the BiasedUrn package by Agner Fog with
+    permission for it to be distributed under SciPy's license.
+
+    The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
+    universally accepted; they are chosen for consistency with `hypergeom`.
+
+    Note that Fisher's noncentral hypergeometric distribution is distinct
+    from Wallenius' noncentral hypergeometric distribution, which models
+    drawing a pre-determined `N` objects from a bin one by one.
+    When the odds ratio is unity, however, both distributions reduce to the
+    ordinary hypergeometric distribution.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Agner Fog, "Biased Urn Theory".
+           https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
+
+    .. [2] "Fisher's noncentral hypergeometric distribution", Wikipedia,
+           https://en.wikipedia.org/wiki/Fisher's_noncentral_hypergeometric_distribution
+
+    %(example)s
+
+    """
+
+    rvs_name = "rvs_fisher"
+    dist = _PyFishersNCHypergeometric
+
+
+nchypergeom_fisher = nchypergeom_fisher_gen(
+    name='nchypergeom_fisher',
+    longname="A Fisher's noncentral hypergeometric")
+
+
+class nchypergeom_wallenius_gen(_nchypergeom_gen):
+    r"""A Wallenius' noncentral hypergeometric discrete random variable.
+
+    Wallenius' noncentral hypergeometric distribution models drawing objects of
+    two types from a bin. `M` is the total number of objects, `n` is the
+    number of Type I objects, and `odds` is the odds ratio: the odds of
+    selecting a Type I object rather than a Type II object when there is only
+    one object of each type.
+    The random variate represents the number of Type I objects drawn if we
+    draw a pre-determined `N` objects from a bin one by one.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    nchypergeom_fisher, hypergeom, nhypergeom
+
+    Notes
+    -----
+    Let mathematical symbols :math:`N`, :math:`n`, and :math:`M` correspond
+    with parameters `N`, `n`, and `M` (respectively) as defined above.
+
+    The probability mass function is defined as
+
+    .. math::
+
+        p(x; N, n, M) = \binom{n}{x} \binom{M - n}{N-x}
+        \int_0^1 \left(1-t^{\omega/D}\right)^x\left(1-t^{1/D}\right)^{N-x} dt
+
+    for
+    :math:`x \in [x_l, x_u]`,
+    :math:`M \in {\mathbb N}`,
+    :math:`n \in [0, M]`,
+    :math:`N \in [0, M]`,
+    :math:`\omega > 0`,
+    where
+    :math:`x_l = \max(0, N - (M - n))`,
+    :math:`x_u = \min(N, n)`,
+
+    .. math::
+
+        D = \omega(n - x) + ((M - n)-(N-x)),
+
+    and the binomial coefficients are defined as
+
+    .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
+
+    `nchypergeom_wallenius` uses the BiasedUrn package by Agner Fog with
+    permission for it to be distributed under SciPy's license.
+
+    The symbols used to denote the shape parameters (`N`, `n`, and `M`) are not
+    universally accepted; they are chosen for consistency with `hypergeom`.
+
+    Note that Wallenius' noncentral hypergeometric distribution is distinct
+    from Fisher's noncentral hypergeometric distribution, which models
+    take a handful of objects from the bin at once, finding out afterwards
+    that `N` objects were taken.
+    When the odds ratio is unity, however, both distributions reduce to the
+    ordinary hypergeometric distribution.
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [1] Agner Fog, "Biased Urn Theory".
+           https://cran.r-project.org/web/packages/BiasedUrn/vignettes/UrnTheory.pdf
+
+    .. [2] "Wallenius' noncentral hypergeometric distribution", Wikipedia,
+           https://en.wikipedia.org/wiki/Wallenius'_noncentral_hypergeometric_distribution
+
+    %(example)s
+
+    """
+
+    rvs_name = "rvs_wallenius"
+    dist = _PyWalleniusNCHypergeometric
+
+
+nchypergeom_wallenius = nchypergeom_wallenius_gen(
+    name='nchypergeom_wallenius',
+    longname="A Wallenius' noncentral hypergeometric")
+
+
+# Collect names of classes and objects in this module.
+pairs = list(globals().copy().items())
+_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
+
+__all__ = _distn_names + _distn_gen_names
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_distn_infrastructure.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_distn_infrastructure.py
new file mode 100644
index 00000000..1b538f2f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_distn_infrastructure.py
@@ -0,0 +1,4062 @@
+#
+# Author:  Travis Oliphant  2002-2011 with contributions from
+#          SciPy Developers 2004-2011
+#
+from scipy._lib._util import getfullargspec_no_self as _getfullargspec
+
+import sys
+import keyword
+import re
+import types
+import warnings
+from itertools import zip_longest
+
+from scipy._lib import doccer
+from ._distr_params import distcont, distdiscrete
+from scipy._lib._util import check_random_state
+
+from scipy.special import comb, entr
+
+# for root finding for continuous distribution ppf, and max likelihood
+# estimation
+from scipy import optimize
+
+# for functions of continuous distributions (e.g. moments, entropy, cdf)
+from scipy import integrate
+
+# to approximate the pdf of a continuous distribution given its cdf
+from scipy._lib._finite_differences import _derivative
+
+# for scipy.stats.entropy. Attempts to import just that function or file
+# have cause import problems
+from scipy import stats
+
+from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
+                   logical_and, log, sqrt, place, argmax, vectorize, asarray,
+                   nan, inf, isinf, NINF, empty)
+
+import numpy as np
+from ._constants import _XMAX
+from scipy.stats._warnings_errors import FitError
+
+# These are the docstring parts used for substitution in specific
+# distribution docstrings
+
+docheaders = {'methods': """\nMethods\n-------\n""",
+              'notes': """\nNotes\n-----\n""",
+              'examples': """\nExamples\n--------\n"""}
+
+_doc_rvs = """\
+rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
+    Random variates.
+"""
+_doc_pdf = """\
+pdf(x, %(shapes)s, loc=0, scale=1)
+    Probability density function.
+"""
+_doc_logpdf = """\
+logpdf(x, %(shapes)s, loc=0, scale=1)
+    Log of the probability density function.
+"""
+_doc_pmf = """\
+pmf(k, %(shapes)s, loc=0, scale=1)
+    Probability mass function.
+"""
+_doc_logpmf = """\
+logpmf(k, %(shapes)s, loc=0, scale=1)
+    Log of the probability mass function.
+"""
+_doc_cdf = """\
+cdf(x, %(shapes)s, loc=0, scale=1)
+    Cumulative distribution function.
+"""
+_doc_logcdf = """\
+logcdf(x, %(shapes)s, loc=0, scale=1)
+    Log of the cumulative distribution function.
+"""
+_doc_sf = """\
+sf(x, %(shapes)s, loc=0, scale=1)
+    Survival function  (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
+"""
+_doc_logsf = """\
+logsf(x, %(shapes)s, loc=0, scale=1)
+    Log of the survival function.
+"""
+_doc_ppf = """\
+ppf(q, %(shapes)s, loc=0, scale=1)
+    Percent point function (inverse of ``cdf`` --- percentiles).
+"""
+_doc_isf = """\
+isf(q, %(shapes)s, loc=0, scale=1)
+    Inverse survival function (inverse of ``sf``).
+"""
+_doc_moment = """\
+moment(order, %(shapes)s, loc=0, scale=1)
+    Non-central moment of the specified order.
+"""
+_doc_stats = """\
+stats(%(shapes)s, loc=0, scale=1, moments='mv')
+    Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
+"""
+_doc_entropy = """\
+entropy(%(shapes)s, loc=0, scale=1)
+    (Differential) entropy of the RV.
+"""
+_doc_fit = """\
+fit(data)
+    Parameter estimates for generic data.
+    See `scipy.stats.rv_continuous.fit `__ for detailed documentation of the
+    keyword arguments.
+"""
+_doc_expect = """\
+expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
+    Expected value of a function (of one argument) with respect to the distribution.
+"""
+_doc_expect_discrete = """\
+expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
+    Expected value of a function (of one argument) with respect to the distribution.
+"""
+_doc_median = """\
+median(%(shapes)s, loc=0, scale=1)
+    Median of the distribution.
+"""
+_doc_mean = """\
+mean(%(shapes)s, loc=0, scale=1)
+    Mean of the distribution.
+"""
+_doc_var = """\
+var(%(shapes)s, loc=0, scale=1)
+    Variance of the distribution.
+"""
+_doc_std = """\
+std(%(shapes)s, loc=0, scale=1)
+    Standard deviation of the distribution.
+"""
+_doc_interval = """\
+interval(confidence, %(shapes)s, loc=0, scale=1)
+    Confidence interval with equal areas around the median.
+"""
+_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
+                           _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
+                           _doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
+                           _doc_stats, _doc_entropy, _doc_fit,
+                           _doc_expect, _doc_median,
+                           _doc_mean, _doc_var, _doc_std, _doc_interval])
+
+_doc_default_longsummary = """\
+As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
+a collection of generic methods (see below for the full list),
+and completes them with details specific for this particular distribution.
+"""
+
+_doc_default_frozen_note = """
+Alternatively, the object may be called (as a function) to fix the shape,
+location, and scale parameters returning a "frozen" continuous RV object:
+
+rv = %(name)s(%(shapes)s, loc=0, scale=1)
+    - Frozen RV object with the same methods but holding the given shape,
+      location, and scale fixed.
+"""
+_doc_default_example = """\
+Examples
+--------
+>>> import numpy as np
+>>> from scipy.stats import %(name)s
+>>> import matplotlib.pyplot as plt
+>>> fig, ax = plt.subplots(1, 1)
+
+Calculate the first four moments:
+
+%(set_vals_stmt)s
+>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
+
+Display the probability density function (``pdf``):
+
+>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
+...                 %(name)s.ppf(0.99, %(shapes)s), 100)
+>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
+...        'r-', lw=5, alpha=0.6, label='%(name)s pdf')
+
+Alternatively, the distribution object can be called (as a function)
+to fix the shape, location and scale parameters. This returns a "frozen"
+RV object holding the given parameters fixed.
+
+Freeze the distribution and display the frozen ``pdf``:
+
+>>> rv = %(name)s(%(shapes)s)
+>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
+
+Check accuracy of ``cdf`` and ``ppf``:
+
+>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
+>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
+True
+
+Generate random numbers:
+
+>>> r = %(name)s.rvs(%(shapes)s, size=1000)
+
+And compare the histogram:
+
+>>> ax.hist(r, density=True, bins='auto', histtype='stepfilled', alpha=0.2)
+>>> ax.set_xlim([x[0], x[-1]])
+>>> ax.legend(loc='best', frameon=False)
+>>> plt.show()
+
+"""
+
+_doc_default_locscale = """\
+The probability density above is defined in the "standardized" form. To shift
+and/or scale the distribution use the ``loc`` and ``scale`` parameters.
+Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
+equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
+``y = (x - loc) / scale``. Note that shifting the location of a distribution
+does not make it a "noncentral" distribution; noncentral generalizations of
+some distributions are available in separate classes.
+"""
+
+_doc_default = ''.join([_doc_default_longsummary,
+                        _doc_allmethods,
+                        '\n',
+                        _doc_default_example])
+
+_doc_default_before_notes = ''.join([_doc_default_longsummary,
+                                     _doc_allmethods])
+
+docdict = {
+    'rvs': _doc_rvs,
+    'pdf': _doc_pdf,
+    'logpdf': _doc_logpdf,
+    'cdf': _doc_cdf,
+    'logcdf': _doc_logcdf,
+    'sf': _doc_sf,
+    'logsf': _doc_logsf,
+    'ppf': _doc_ppf,
+    'isf': _doc_isf,
+    'stats': _doc_stats,
+    'entropy': _doc_entropy,
+    'fit': _doc_fit,
+    'moment': _doc_moment,
+    'expect': _doc_expect,
+    'interval': _doc_interval,
+    'mean': _doc_mean,
+    'std': _doc_std,
+    'var': _doc_var,
+    'median': _doc_median,
+    'allmethods': _doc_allmethods,
+    'longsummary': _doc_default_longsummary,
+    'frozennote': _doc_default_frozen_note,
+    'example': _doc_default_example,
+    'default': _doc_default,
+    'before_notes': _doc_default_before_notes,
+    'after_notes': _doc_default_locscale
+}
+
+# Reuse common content between continuous and discrete docs, change some
+# minor bits.
+docdict_discrete = docdict.copy()
+
+docdict_discrete['pmf'] = _doc_pmf
+docdict_discrete['logpmf'] = _doc_logpmf
+docdict_discrete['expect'] = _doc_expect_discrete
+_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
+                     'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
+                     'mean', 'var', 'std', 'interval']
+for obj in _doc_disc_methods:
+    docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
+
+_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
+for obj in _doc_disc_methods_err_varname:
+    docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
+
+docdict_discrete.pop('pdf')
+docdict_discrete.pop('logpdf')
+
+_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
+docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
+
+docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
+    'rv_continuous', 'rv_discrete')
+
+_doc_default_frozen_note = """
+Alternatively, the object may be called (as a function) to fix the shape and
+location parameters returning a "frozen" discrete RV object:
+
+rv = %(name)s(%(shapes)s, loc=0)
+    - Frozen RV object with the same methods but holding the given shape and
+      location fixed.
+"""
+docdict_discrete['frozennote'] = _doc_default_frozen_note
+
+_doc_default_discrete_example = """\
+Examples
+--------
+>>> import numpy as np
+>>> from scipy.stats import %(name)s
+>>> import matplotlib.pyplot as plt
+>>> fig, ax = plt.subplots(1, 1)
+
+Calculate the first four moments:
+
+%(set_vals_stmt)s
+>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
+
+Display the probability mass function (``pmf``):
+
+>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
+...               %(name)s.ppf(0.99, %(shapes)s))
+>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
+>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
+
+Alternatively, the distribution object can be called (as a function)
+to fix the shape and location. This returns a "frozen" RV object holding
+the given parameters fixed.
+
+Freeze the distribution and display the frozen ``pmf``:
+
+>>> rv = %(name)s(%(shapes)s)
+>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
+...         label='frozen pmf')
+>>> ax.legend(loc='best', frameon=False)
+>>> plt.show()
+
+Check accuracy of ``cdf`` and ``ppf``:
+
+>>> prob = %(name)s.cdf(x, %(shapes)s)
+>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
+True
+
+Generate random numbers:
+
+>>> r = %(name)s.rvs(%(shapes)s, size=1000)
+"""
+
+
+_doc_default_discrete_locscale = """\
+The probability mass function above is defined in the "standardized" form.
+To shift distribution use the ``loc`` parameter.
+Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
+equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
+"""
+
+docdict_discrete['example'] = _doc_default_discrete_example
+docdict_discrete['after_notes'] = _doc_default_discrete_locscale
+
+_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
+                                     docdict_discrete['allmethods']])
+docdict_discrete['before_notes'] = _doc_default_before_notes
+
+_doc_default_disc = ''.join([docdict_discrete['longsummary'],
+                             docdict_discrete['allmethods'],
+                             docdict_discrete['frozennote'],
+                             docdict_discrete['example']])
+docdict_discrete['default'] = _doc_default_disc
+
+# clean up all the separate docstring elements, we do not need them anymore
+for obj in [s for s in dir() if s.startswith('_doc_')]:
+    exec('del ' + obj)
+del obj
+
+
+def _moment(data, n, mu=None):
+    if mu is None:
+        mu = data.mean()
+    return ((data - mu)**n).mean()
+
+
+def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
+    if (n == 0):
+        return 1.0
+    elif (n == 1):
+        if mu is None:
+            val = moment_func(1, *args)
+        else:
+            val = mu
+    elif (n == 2):
+        if mu2 is None or mu is None:
+            val = moment_func(2, *args)
+        else:
+            val = mu2 + mu*mu
+    elif (n == 3):
+        if g1 is None or mu2 is None or mu is None:
+            val = moment_func(3, *args)
+        else:
+            mu3 = g1 * np.power(mu2, 1.5)  # 3rd central moment
+            val = mu3+3*mu*mu2+mu*mu*mu  # 3rd non-central moment
+    elif (n == 4):
+        if g1 is None or g2 is None or mu2 is None or mu is None:
+            val = moment_func(4, *args)
+        else:
+            mu4 = (g2+3.0)*(mu2**2.0)  # 4th central moment
+            mu3 = g1*np.power(mu2, 1.5)  # 3rd central moment
+            val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
+    else:
+        val = moment_func(n, *args)
+
+    return val
+
+
+def _skew(data):
+    """
+    skew is third central moment / variance**(1.5)
+    """
+    data = np.ravel(data)
+    mu = data.mean()
+    m2 = ((data - mu)**2).mean()
+    m3 = ((data - mu)**3).mean()
+    return m3 / np.power(m2, 1.5)
+
+
+def _kurtosis(data):
+    """kurtosis is fourth central moment / variance**2 - 3."""
+    data = np.ravel(data)
+    mu = data.mean()
+    m2 = ((data - mu)**2).mean()
+    m4 = ((data - mu)**4).mean()
+    return m4 / m2**2 - 3
+
+
+def _fit_determine_optimizer(optimizer):
+    if not callable(optimizer) and isinstance(optimizer, str):
+        if not optimizer.startswith('fmin_'):
+            optimizer = "fmin_"+optimizer
+        if optimizer == 'fmin_':
+            optimizer = 'fmin'
+        try:
+            optimizer = getattr(optimize, optimizer)
+        except AttributeError as e:
+            raise ValueError("%s is not a valid optimizer" % optimizer) from e
+    return optimizer
+
+
+# Frozen RV class
+class rv_frozen:
+
+    def __init__(self, dist, *args, **kwds):
+        self.args = args
+        self.kwds = kwds
+
+        # create a new instance
+        self.dist = dist.__class__(**dist._updated_ctor_param())
+
+        shapes, _, _ = self.dist._parse_args(*args, **kwds)
+        self.a, self.b = self.dist._get_support(*shapes)
+
+    @property
+    def random_state(self):
+        return self.dist._random_state
+
+    @random_state.setter
+    def random_state(self, seed):
+        self.dist._random_state = check_random_state(seed)
+
+    def cdf(self, x):
+        return self.dist.cdf(x, *self.args, **self.kwds)
+
+    def logcdf(self, x):
+        return self.dist.logcdf(x, *self.args, **self.kwds)
+
+    def ppf(self, q):
+        return self.dist.ppf(q, *self.args, **self.kwds)
+
+    def isf(self, q):
+        return self.dist.isf(q, *self.args, **self.kwds)
+
+    def rvs(self, size=None, random_state=None):
+        kwds = self.kwds.copy()
+        kwds.update({'size': size, 'random_state': random_state})
+        return self.dist.rvs(*self.args, **kwds)
+
+    def sf(self, x):
+        return self.dist.sf(x, *self.args, **self.kwds)
+
+    def logsf(self, x):
+        return self.dist.logsf(x, *self.args, **self.kwds)
+
+    def stats(self, moments='mv'):
+        kwds = self.kwds.copy()
+        kwds.update({'moments': moments})
+        return self.dist.stats(*self.args, **kwds)
+
+    def median(self):
+        return self.dist.median(*self.args, **self.kwds)
+
+    def mean(self):
+        return self.dist.mean(*self.args, **self.kwds)
+
+    def var(self):
+        return self.dist.var(*self.args, **self.kwds)
+
+    def std(self):
+        return self.dist.std(*self.args, **self.kwds)
+
+    def moment(self, order=None, **kwds):
+        return self.dist.moment(order, *self.args, **self.kwds, **kwds)
+
+    def entropy(self):
+        return self.dist.entropy(*self.args, **self.kwds)
+
+    def interval(self, confidence=None, **kwds):
+        return self.dist.interval(confidence, *self.args, **self.kwds, **kwds)
+
+    def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
+        # expect method only accepts shape parameters as positional args
+        # hence convert self.args, self.kwds, also loc/scale
+        # See the .expect method docstrings for the meaning of
+        # other parameters.
+        a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
+        if isinstance(self.dist, rv_discrete):
+            return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
+        else:
+            return self.dist.expect(func, a, loc, scale, lb, ub,
+                                    conditional, **kwds)
+
+    def support(self):
+        return self.dist.support(*self.args, **self.kwds)
+
+
+class rv_discrete_frozen(rv_frozen):
+
+    def pmf(self, k):
+        return self.dist.pmf(k, *self.args, **self.kwds)
+
+    def logpmf(self, k):  # No error
+        return self.dist.logpmf(k, *self.args, **self.kwds)
+
+
+class rv_continuous_frozen(rv_frozen):
+
+    def pdf(self, x):
+        return self.dist.pdf(x, *self.args, **self.kwds)
+
+    def logpdf(self, x):
+        return self.dist.logpdf(x, *self.args, **self.kwds)
+
+
+def argsreduce(cond, *args):
+    """Clean arguments to:
+
+    1. Ensure all arguments are iterable (arrays of dimension at least one
+    2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
+       True, in 1D.
+
+    Return list of processed arguments.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> A = rng.random((4, 5))
+    >>> B = 2
+    >>> C = rng.random((1, 5))
+    >>> cond = np.ones(A.shape)
+    >>> [A1, B1, C1] = argsreduce(cond, A, B, C)
+    >>> A1.shape
+    (4, 5)
+    >>> B1.shape
+    (1,)
+    >>> C1.shape
+    (1, 5)
+    >>> cond[2,:] = 0
+    >>> [A1, B1, C1] = argsreduce(cond, A, B, C)
+    >>> A1.shape
+    (15,)
+    >>> B1.shape
+    (1,)
+    >>> C1.shape
+    (15,)
+
+    """
+    # some distributions assume arguments are iterable.
+    newargs = np.atleast_1d(*args)
+
+    # np.atleast_1d returns an array if only one argument, or a list of arrays
+    # if more than one argument.
+    if not isinstance(newargs, list):
+        newargs = [newargs, ]
+
+    if np.all(cond):
+        # broadcast arrays with cond
+        *newargs, cond = np.broadcast_arrays(*newargs, cond)
+        return [arg.ravel() for arg in newargs]
+
+    s = cond.shape
+    # np.extract returns flattened arrays, which are not broadcastable together
+    # unless they are either the same size or size == 1.
+    return [(arg if np.size(arg) == 1
+            else np.extract(cond, np.broadcast_to(arg, s)))
+            for arg in newargs]
+
+
+parse_arg_template = """
+def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
+    return (%(shape_arg_str)s), %(locscale_out)s
+
+def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
+    return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
+
+def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
+    return (%(shape_arg_str)s), %(locscale_out)s, moments
+"""
+
+
+class rv_generic:
+    """Class which encapsulates common functionality between rv_discrete
+    and rv_continuous.
+
+    """
+    def __init__(self, seed=None):
+        super().__init__()
+
+        # figure out if _stats signature has 'moments' keyword
+        sig = _getfullargspec(self._stats)
+        self._stats_has_moments = ((sig.varkw is not None) or
+                                   ('moments' in sig.args) or
+                                   ('moments' in sig.kwonlyargs))
+        self._random_state = check_random_state(seed)
+
+    @property
+    def random_state(self):
+        """Get or set the generator object for generating random variates.
+
+        If `random_state` is None (or `np.random`), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance, that instance is used.
+
+        """
+        return self._random_state
+
+    @random_state.setter
+    def random_state(self, seed):
+        self._random_state = check_random_state(seed)
+
+    def __setstate__(self, state):
+        try:
+            self.__dict__.update(state)
+            # attaches the dynamically created methods on each instance.
+            # if a subclass overrides rv_generic.__setstate__, or implements
+            # it's own _attach_methods, then it must make sure that
+            # _attach_argparser_methods is called.
+            self._attach_methods()
+        except ValueError:
+            # reconstitute an old pickle scipy<1.6, that contains
+            # (_ctor_param, random_state) as state
+            self._ctor_param = state[0]
+            self._random_state = state[1]
+            self.__init__()
+
+    def _attach_methods(self):
+        """Attaches dynamically created methods to the rv_* instance.
+
+        This method must be overridden by subclasses, and must itself call
+         _attach_argparser_methods. This method is called in __init__ in
+         subclasses, and in __setstate__
+        """
+        raise NotImplementedError
+
+    def _attach_argparser_methods(self):
+        """
+        Generates the argument-parsing functions dynamically and attaches
+        them to the instance.
+
+        Should be called from `_attach_methods`, typically in __init__ and
+        during unpickling (__setstate__)
+        """
+        ns = {}
+        exec(self._parse_arg_template, ns)
+        # NB: attach to the instance, not class
+        for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
+            setattr(self, name, types.MethodType(ns[name], self))
+
+    def _construct_argparser(
+            self, meths_to_inspect, locscale_in, locscale_out):
+        """Construct the parser string for the shape arguments.
+
+        This method should be called in __init__ of a class for each
+        distribution. It creates the `_parse_arg_template` attribute that is
+        then used by `_attach_argparser_methods` to dynamically create and
+        attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
+        methods to the instance.
+
+        If self.shapes is a non-empty string, interprets it as a
+        comma-separated list of shape parameters.
+
+        Otherwise inspects the call signatures of `meths_to_inspect`
+        and constructs the argument-parsing functions from these.
+        In this case also sets `shapes` and `numargs`.
+        """
+
+        if self.shapes:
+            # sanitize the user-supplied shapes
+            if not isinstance(self.shapes, str):
+                raise TypeError('shapes must be a string.')
+
+            shapes = self.shapes.replace(',', ' ').split()
+
+            for field in shapes:
+                if keyword.iskeyword(field):
+                    raise SyntaxError('keywords cannot be used as shapes.')
+                if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
+                    raise SyntaxError(
+                        'shapes must be valid python identifiers')
+        else:
+            # find out the call signatures (_pdf, _cdf etc), deduce shape
+            # arguments. Generic methods only have 'self, x', any further args
+            # are shapes.
+            shapes_list = []
+            for meth in meths_to_inspect:
+                shapes_args = _getfullargspec(meth)  # NB does not contain self
+                args = shapes_args.args[1:]       # peel off 'x', too
+
+                if args:
+                    shapes_list.append(args)
+
+                    # *args or **kwargs are not allowed w/automatic shapes
+                    if shapes_args.varargs is not None:
+                        raise TypeError(
+                            '*args are not allowed w/out explicit shapes')
+                    if shapes_args.varkw is not None:
+                        raise TypeError(
+                            '**kwds are not allowed w/out explicit shapes')
+                    if shapes_args.kwonlyargs:
+                        raise TypeError(
+                            'kwonly args are not allowed w/out explicit shapes')
+                    if shapes_args.defaults is not None:
+                        raise TypeError('defaults are not allowed for shapes')
+
+            if shapes_list:
+                shapes = shapes_list[0]
+
+                # make sure the signatures are consistent
+                for item in shapes_list:
+                    if item != shapes:
+                        raise TypeError('Shape arguments are inconsistent.')
+            else:
+                shapes = []
+
+        # have the arguments, construct the method from template
+        shapes_str = ', '.join(shapes) + ', ' if shapes else ''  # NB: not None
+        dct = dict(shape_arg_str=shapes_str,
+                   locscale_in=locscale_in,
+                   locscale_out=locscale_out,
+                   )
+
+        # this string is used by _attach_argparser_methods
+        self._parse_arg_template = parse_arg_template % dct
+
+        self.shapes = ', '.join(shapes) if shapes else None
+        if not hasattr(self, 'numargs'):
+            # allows more general subclassing with *args
+            self.numargs = len(shapes)
+
+    def _construct_doc(self, docdict, shapes_vals=None):
+        """Construct the instance docstring with string substitutions."""
+        tempdict = docdict.copy()
+        tempdict['name'] = self.name or 'distname'
+        tempdict['shapes'] = self.shapes or ''
+
+        if shapes_vals is None:
+            shapes_vals = ()
+        vals = ', '.join('%.3g' % val for val in shapes_vals)
+        tempdict['vals'] = vals
+
+        tempdict['shapes_'] = self.shapes or ''
+        if self.shapes and self.numargs == 1:
+            tempdict['shapes_'] += ','
+
+        if self.shapes:
+            tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
+        else:
+            tempdict['set_vals_stmt'] = ''
+
+        if self.shapes is None:
+            # remove shapes from call parameters if there are none
+            for item in ['default', 'before_notes']:
+                tempdict[item] = tempdict[item].replace(
+                    "\n%(shapes)s : array_like\n    shape parameters", "")
+        for i in range(2):
+            if self.shapes is None:
+                # necessary because we use %(shapes)s in two forms (w w/o ", ")
+                self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
+            try:
+                self.__doc__ = doccer.docformat(self.__doc__, tempdict)
+            except TypeError as e:
+                raise Exception("Unable to construct docstring for "
+                                "distribution \"%s\": %s" %
+                                (self.name, repr(e))) from e
+
+        # correct for empty shapes
+        self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
+
+    def _construct_default_doc(self, longname=None, extradoc=None,
+                               docdict=None, discrete='continuous'):
+        """Construct instance docstring from the default template."""
+        if longname is None:
+            longname = 'A'
+        if extradoc is None:
+            extradoc = ''
+        if extradoc.startswith('\n\n'):
+            extradoc = extradoc[2:]
+        self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
+                                '\n\n%(before_notes)s\n', docheaders['notes'],
+                                extradoc, '\n%(example)s'])
+        self._construct_doc(docdict)
+
+    def freeze(self, *args, **kwds):
+        """Freeze the distribution for the given arguments.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution.  Should include all
+            the non-optional arguments, may include ``loc`` and ``scale``.
+
+        Returns
+        -------
+        rv_frozen : rv_frozen instance
+            The frozen distribution.
+
+        """
+        if isinstance(self, rv_continuous):
+            return rv_continuous_frozen(self, *args, **kwds)
+        else:
+            return rv_discrete_frozen(self, *args, **kwds)
+
+    def __call__(self, *args, **kwds):
+        return self.freeze(*args, **kwds)
+    __call__.__doc__ = freeze.__doc__
+
+    # The actual calculation functions (no basic checking need be done)
+    # If these are defined, the others won't be looked at.
+    # Otherwise, the other set can be defined.
+    def _stats(self, *args, **kwds):
+        return None, None, None, None
+
+    # Noncentral moments (also known as the moment about the origin).
+    # Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
+    # The primed mu is a widely used notation for the noncentral moment.
+    def _munp(self, n, *args):
+        # Silence floating point warnings from integration.
+        with np.errstate(all='ignore'):
+            vals = self.generic_moment(n, *args)
+        return vals
+
+    def _argcheck_rvs(self, *args, **kwargs):
+        # Handle broadcasting and size validation of the rvs method.
+        # Subclasses should not have to override this method.
+        # The rule is that if `size` is not None, then `size` gives the
+        # shape of the result (integer values of `size` are treated as
+        # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
+        #
+        # `args` is expected to contain the shape parameters (if any), the
+        # location and the scale in a flat tuple (e.g. if there are two
+        # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
+        # The only keyword argument expected is 'size'.
+        size = kwargs.get('size', None)
+        all_bcast = np.broadcast_arrays(*args)
+
+        def squeeze_left(a):
+            while a.ndim > 0 and a.shape[0] == 1:
+                a = a[0]
+            return a
+
+        # Eliminate trivial leading dimensions.  In the convention
+        # used by numpy's random variate generators, trivial leading
+        # dimensions are effectively ignored.  In other words, when `size`
+        # is given, trivial leading dimensions of the broadcast parameters
+        # in excess of the number of dimensions  in size are ignored, e.g.
+        #   >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
+        #   array([ 1.00104267,  3.00422496,  4.99799278])
+        # If `size` is not given, the exact broadcast shape is preserved:
+        #   >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
+        #   array([[[[ 1.00862899,  3.00061431,  4.99867122]]]])
+        #
+        all_bcast = [squeeze_left(a) for a in all_bcast]
+        bcast_shape = all_bcast[0].shape
+        bcast_ndim = all_bcast[0].ndim
+
+        if size is None:
+            size_ = bcast_shape
+        else:
+            size_ = tuple(np.atleast_1d(size))
+
+        # Check compatibility of size_ with the broadcast shape of all
+        # the parameters.  This check is intended to be consistent with
+        # how the numpy random variate generators (e.g. np.random.normal,
+        # np.random.beta) handle their arguments.   The rule is that, if size
+        # is given, it determines the shape of the output.  Broadcasting
+        # can't change the output size.
+
+        # This is the standard broadcasting convention of extending the
+        # shape with fewer dimensions with enough dimensions of length 1
+        # so that the two shapes have the same number of dimensions.
+        ndiff = bcast_ndim - len(size_)
+        if ndiff < 0:
+            bcast_shape = (1,)*(-ndiff) + bcast_shape
+        elif ndiff > 0:
+            size_ = (1,)*ndiff + size_
+
+        # This compatibility test is not standard.  In "regular" broadcasting,
+        # two shapes are compatible if for each dimension, the lengths are the
+        # same or one of the lengths is 1.  Here, the length of a dimension in
+        # size_ must not be less than the corresponding length in bcast_shape.
+        ok = all([bcdim == 1 or bcdim == szdim
+                  for (bcdim, szdim) in zip(bcast_shape, size_)])
+        if not ok:
+            raise ValueError("size does not match the broadcast shape of "
+                             "the parameters. %s, %s, %s" % (size, size_,
+                                                             bcast_shape))
+
+        param_bcast = all_bcast[:-2]
+        loc_bcast = all_bcast[-2]
+        scale_bcast = all_bcast[-1]
+
+        return param_bcast, loc_bcast, scale_bcast, size_
+
+    # These are the methods you must define (standard form functions)
+    # NB: generic _pdf, _logpdf, _cdf are different for
+    # rv_continuous and rv_discrete hence are defined in there
+    def _argcheck(self, *args):
+        """Default check for correct values on args and keywords.
+
+        Returns condition array of 1's where arguments are correct and
+         0's where they are not.
+
+        """
+        cond = 1
+        for arg in args:
+            cond = logical_and(cond, (asarray(arg) > 0))
+        return cond
+
+    def _get_support(self, *args, **kwargs):
+        """Return the support of the (unscaled, unshifted) distribution.
+
+        *Must* be overridden by distributions which have support dependent
+        upon the shape parameters of the distribution.  Any such override
+        *must not* set or change any of the class members, as these members
+        are shared amongst all instances of the distribution.
+
+        Parameters
+        ----------
+        arg1, arg2, ... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+
+        Returns
+        -------
+        a, b : numeric (float, or int or +/-np.inf)
+            end-points of the distribution's support for the specified
+            shape parameters.
+        """
+        return self.a, self.b
+
+    def _support_mask(self, x, *args):
+        a, b = self._get_support(*args)
+        with np.errstate(invalid='ignore'):
+            return (a <= x) & (x <= b)
+
+    def _open_support_mask(self, x, *args):
+        a, b = self._get_support(*args)
+        with np.errstate(invalid='ignore'):
+            return (a < x) & (x < b)
+
+    def _rvs(self, *args, size=None, random_state=None):
+        # This method must handle size being a tuple, and it must
+        # properly broadcast *args and size.  size might be
+        # an empty tuple, which means a scalar random variate is to be
+        # generated.
+
+        # Use basic inverse cdf algorithm for RV generation as default.
+        U = random_state.uniform(size=size)
+        Y = self._ppf(U, *args)
+        return Y
+
+    def _logcdf(self, x, *args):
+        with np.errstate(divide='ignore'):
+            return log(self._cdf(x, *args))
+
+    def _sf(self, x, *args):
+        return 1.0-self._cdf(x, *args)
+
+    def _logsf(self, x, *args):
+        with np.errstate(divide='ignore'):
+            return log(self._sf(x, *args))
+
+    def _ppf(self, q, *args):
+        return self._ppfvec(q, *args)
+
+    def _isf(self, q, *args):
+        return self._ppf(1.0-q, *args)  # use correct _ppf for subclasses
+
+    # These are actually called, and should not be overwritten if you
+    # want to keep error checking.
+    def rvs(self, *args, **kwds):
+        """Random variates of given type.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+        scale : array_like, optional
+            Scale parameter (default=1).
+        size : int or tuple of ints, optional
+            Defining number of random variates (default is 1).
+        random_state : {None, int, `numpy.random.Generator`,
+                        `numpy.random.RandomState`}, optional
+
+            If `random_state` is None (or `np.random`), the
+            `numpy.random.RandomState` singleton is used.
+            If `random_state` is an int, a new ``RandomState`` instance is
+            used, seeded with `random_state`.
+            If `random_state` is already a ``Generator`` or ``RandomState``
+            instance, that instance is used.
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random variates of given `size`.
+
+        """
+        discrete = kwds.pop('discrete', None)
+        rndm = kwds.pop('random_state', None)
+        args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
+        cond = logical_and(self._argcheck(*args), (scale >= 0))
+        if not np.all(cond):
+            message = ("Domain error in arguments. The `scale` parameter must "
+                       "be positive for all distributions, and many "
+                       "distributions have restrictions on shape parameters. "
+                       f"Please see the `scipy.stats.{self.name}` "
+                       "documentation for details.")
+            raise ValueError(message)
+
+        if np.all(scale == 0):
+            return loc*ones(size, 'd')
+
+        # extra gymnastics needed for a custom random_state
+        if rndm is not None:
+            random_state_saved = self._random_state
+            random_state = check_random_state(rndm)
+        else:
+            random_state = self._random_state
+
+        vals = self._rvs(*args, size=size, random_state=random_state)
+
+        vals = vals * scale + loc
+
+        # do not forget to restore the _random_state
+        if rndm is not None:
+            self._random_state = random_state_saved
+
+        # Cast to int if discrete
+        if discrete and not isinstance(self, rv_sample):
+            if size == ():
+                vals = int(vals)
+            else:
+                vals = vals.astype(np.int64)
+
+        return vals
+
+    def stats(self, *args, **kwds):
+        """Some statistics of the given RV.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional (continuous RVs only)
+            scale parameter (default=1)
+        moments : str, optional
+            composed of letters ['mvsk'] defining which moments to compute:
+            'm' = mean,
+            'v' = variance,
+            's' = (Fisher's) skew,
+            'k' = (Fisher's) kurtosis.
+            (default is 'mv')
+
+        Returns
+        -------
+        stats : sequence
+            of requested moments.
+
+        """
+        args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
+        # scale = 1 by construction for discrete RVs
+        loc, scale = map(asarray, (loc, scale))
+        args = tuple(map(asarray, args))
+        cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
+        output = []
+        default = np.full(shape(cond), fill_value=self.badvalue)
+
+        # Use only entries that are valid in calculation
+        if np.any(cond):
+            goodargs = argsreduce(cond, *(args+(scale, loc)))
+            scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
+
+            if self._stats_has_moments:
+                mu, mu2, g1, g2 = self._stats(*goodargs,
+                                              **{'moments': moments})
+            else:
+                mu, mu2, g1, g2 = self._stats(*goodargs)
+
+            if 'm' in moments:
+                if mu is None:
+                    mu = self._munp(1, *goodargs)
+                out0 = default.copy()
+                place(out0, cond, mu * scale + loc)
+                output.append(out0)
+
+            if 'v' in moments:
+                if mu2 is None:
+                    mu2p = self._munp(2, *goodargs)
+                    if mu is None:
+                        mu = self._munp(1, *goodargs)
+                    # if mean is inf then var is also inf
+                    with np.errstate(invalid='ignore'):
+                        mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf)
+                out0 = default.copy()
+                place(out0, cond, mu2 * scale * scale)
+                output.append(out0)
+
+            if 's' in moments:
+                if g1 is None:
+                    mu3p = self._munp(3, *goodargs)
+                    if mu is None:
+                        mu = self._munp(1, *goodargs)
+                    if mu2 is None:
+                        mu2p = self._munp(2, *goodargs)
+                        mu2 = mu2p - mu * mu
+                    with np.errstate(invalid='ignore'):
+                        mu3 = (-mu*mu - 3*mu2)*mu + mu3p
+                        g1 = mu3 / np.power(mu2, 1.5)
+                out0 = default.copy()
+                place(out0, cond, g1)
+                output.append(out0)
+
+            if 'k' in moments:
+                if g2 is None:
+                    mu4p = self._munp(4, *goodargs)
+                    if mu is None:
+                        mu = self._munp(1, *goodargs)
+                    if mu2 is None:
+                        mu2p = self._munp(2, *goodargs)
+                        mu2 = mu2p - mu * mu
+                    if g1 is None:
+                        mu3 = None
+                    else:
+                        # (mu2**1.5) breaks down for nan and inf
+                        mu3 = g1 * np.power(mu2, 1.5)
+                    if mu3 is None:
+                        mu3p = self._munp(3, *goodargs)
+                        with np.errstate(invalid='ignore'):
+                            mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
+                    with np.errstate(invalid='ignore'):
+                        mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
+                        g2 = mu4 / mu2**2.0 - 3.0
+                out0 = default.copy()
+                place(out0, cond, g2)
+                output.append(out0)
+        else:  # no valid args
+            output = [default.copy() for _ in moments]
+
+        output = [out[()] for out in output]
+        if len(output) == 1:
+            return output[0]
+        else:
+            return tuple(output)
+
+    def entropy(self, *args, **kwds):
+        """Differential entropy of the RV.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+        scale : array_like, optional  (continuous distributions only).
+            Scale parameter (default=1).
+
+        Notes
+        -----
+        Entropy is defined base `e`:
+
+        >>> import numpy as np
+        >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
+        >>> np.allclose(drv.entropy(), np.log(2.0))
+        True
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        # NB: for discrete distributions scale=1 by construction in _parse_args
+        loc, scale = map(asarray, (loc, scale))
+        args = tuple(map(asarray, args))
+        cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
+        output = zeros(shape(cond0), 'd')
+        place(output, (1-cond0), self.badvalue)
+        goodargs = argsreduce(cond0, scale, *args)
+        goodscale = goodargs[0]
+        goodargs = goodargs[1:]
+        place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
+        return output[()]
+
+    def moment(self, order=None, *args, **kwds):
+        """non-central moment of distribution of specified order.
+
+        .. deprecated:: 1.9.0
+           Parameter `n` is replaced by parameter `order` to avoid name
+           collisions with the shape parameter `n` of several distributions.
+           Parameter `n` will be removed in SciPy 1.11.0.
+
+        Parameters
+        ----------
+        order : int, order >= 1
+            Order of moment.
+        arg1, arg2, arg3,... : float
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        """
+        # This function was originally written with parameter `n`, but `n`
+        # is also the name of many distribution shape parameters.
+        # This block allows the function to accept both `n` and its
+        # replacement `order` during a deprecation period; it can be removed
+        # in the second release after 1.9.0.
+        # The logic to provide a DeprecationWarning only when `n` is passed
+        # as a keyword, accept the new keyword `order`, and otherwise be
+        # backward-compatible deserves explanation. We need to look out for
+        # the following:
+        # * Does the distribution have a shape named `n`?
+        # * Is `order` provided? It doesn't matter whether it is provided as a
+        #   positional or keyword argument; it will be used as the order of the
+        #   moment rather than a distribution shape parameter because:
+        #   - The first positional argument of `moment` has always been the
+        #     order of the moment.
+        #   - The keyword `order` is new, so it's unambiguous that it refers to
+        #     the order of the moment.
+        # * Is `n` provided as a keyword argument? It _does_ matter whether it
+        #   is provided as a positional or keyword argument.
+        #   - The first positional argument of `moment` has always been the
+        #     order of moment, but
+        #   - if `n` is provided as a keyword argument, its meaning depends
+        #     on whether the distribution accepts `n` as a shape parameter.
+        has_shape_n = (self.shapes is not None
+                       and "n" in (self.shapes.split(", ")))
+        got_order = order is not None
+        got_keyword_n = kwds.get("n", None) is not None
+
+        # These lead to the following cases.
+        # Case A: If the distribution _does_ accept `n` as a shape
+        # 1. If both `order` and `n` are provided, this is now OK:
+        #    it is unambiguous that `order` is the order of the moment and `n`
+        #    is the shape parameter. Previously, this would have caused an
+        #    error because `n` was provided both as a keyword argument and
+        #    as the first positional argument. I don't think it is credible for
+        #    users to rely on this error in their code, though, so I don't see
+        #    this as a backward compatibility break.
+        # 2. If only `n` is provided (as a keyword argument), this would have
+        #    been an error in the past because `n` would have been treated as
+        #    the order of the moment while the shape parameter would be
+        #    missing. It is still the same type of error, but for a different
+        #    reason: now, `n` is treated as the shape parameter while the
+        #    order of the moment is missing.
+        # 3. If only `order` is provided, no special treament is needed.
+        #    Clearly this value is intended to be the order of the moment,
+        #    and the rest of the function will determine whether `n` is
+        #    available as a shape parameter in `args`.
+        # 4. If neither `n` nor `order` is provided, this would have been an
+        #    error (order of the moment is not provided) and it is still an
+        #    error for the same reason.
+
+        # Case B: the distribution does _not_ accept `n` as a shape
+        # 1. If both `order` and `n` are provided, this was an error, and it
+        #    still is an error: two values for same parameter.
+        # 2. If only `n` is provided (as a keyword argument), this was OK and
+        #    is still OK, but there shold now be a `DeprecationWarning`. The
+        #    value of `n` should be removed from `kwds` and stored in `order`.
+        # 3. If only `order` is provided, there was no problem before providing
+        #    only the first argument of `moment`, and there is no problem with
+        #    that now.
+        # 4. If neither `n` nor `order` is provided, this would have been an
+        #    error (order of the moment is not provided), and it is still an
+        #    error for the same reason.
+        if not got_order and ((not got_keyword_n)  # A4 and B4
+                              or (got_keyword_n and has_shape_n)):  # A2
+            message = ("moment() missing 1 required "
+                       "positional argument: `order`")
+            raise TypeError(message)
+
+        if got_keyword_n and not has_shape_n:
+            if got_order:  # B1
+                # this will change to "moment got unexpected argument n"
+                message = "moment() got multiple values for first argument"
+                raise TypeError(message)
+            else:  # B2
+                message = ("Use of keyword argument 'n' for method 'moment is"
+                           " deprecated and will be removed in SciPy 1.11.0. "
+                           "Use first positional argument or keyword argument"
+                           " 'order' instead.")
+                order = kwds.pop("n")
+                warnings.warn(message, DeprecationWarning, stacklevel=2)
+        n = order
+        # No special treatment of A1, A3, or B3 is needed because the order
+        # of the moment is now in variable `n` and the shape parameter, if
+        # needed, will be fished out of `args` or `kwds` by _parse_args
+        # A3 might still cause an error if the shape parameter called `n`
+        # is not found in `args`.
+
+        shapes, loc, scale = self._parse_args(*args, **kwds)
+        args = np.broadcast_arrays(*(*shapes, loc, scale))
+        *shapes, loc, scale = args
+
+        i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
+        i1 = np.logical_and(i0, loc == 0)
+        i2 = np.logical_and(i0, loc != 0)
+
+        args = argsreduce(i0, *shapes, loc, scale)
+        *shapes, loc, scale = args
+
+        if (floor(n) != n):
+            raise ValueError("Moment must be an integer.")
+        if (n < 0):
+            raise ValueError("Moment must be positive.")
+        mu, mu2, g1, g2 = None, None, None, None
+        if (n > 0) and (n < 5):
+            if self._stats_has_moments:
+                mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'mvsk'}[n]}
+            else:
+                mdict = {}
+            mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
+        val = np.empty(loc.shape)  # val needs to be indexed by loc
+        val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
+
+        # Convert to transformed  X = L + S*Y
+        # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
+        result = zeros(i0.shape)
+        place(result, ~i0, self.badvalue)
+
+        if i1.any():
+            res1 = scale[loc == 0]**n * val[loc == 0]
+            place(result, i1, res1)
+
+        if i2.any():
+            mom = [mu, mu2, g1, g2]
+            arrs = [i for i in mom if i is not None]
+            idx = [i for i in range(4) if mom[i] is not None]
+            if any(idx):
+                arrs = argsreduce(loc != 0, *arrs)
+                j = 0
+                for i in idx:
+                    mom[i] = arrs[j]
+                    j += 1
+            mu, mu2, g1, g2 = mom
+            args = argsreduce(loc != 0, *shapes, loc, scale, val)
+            *shapes, loc, scale, val = args
+
+            res2 = zeros(loc.shape, dtype='d')
+            fac = scale / loc
+            for k in range(n):
+                valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
+                                          shapes)
+                res2 += comb(n, k, exact=True)*fac**k * valk
+            res2 += fac**n * val
+            res2 *= loc**n
+            place(result, i2, res2)
+
+        return result[()]
+
+    def median(self, *args, **kwds):
+        """Median of the distribution.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            Location parameter, Default is 0.
+        scale : array_like, optional
+            Scale parameter, Default is 1.
+
+        Returns
+        -------
+        median : float
+            The median of the distribution.
+
+        See Also
+        --------
+        rv_discrete.ppf
+            Inverse of the CDF
+
+        """
+        return self.ppf(0.5, *args, **kwds)
+
+    def mean(self, *args, **kwds):
+        """Mean of the distribution.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        mean : float
+            the mean of the distribution
+
+        """
+        kwds['moments'] = 'm'
+        res = self.stats(*args, **kwds)
+        if isinstance(res, ndarray) and res.ndim == 0:
+            return res[()]
+        return res
+
+    def var(self, *args, **kwds):
+        """Variance of the distribution.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        var : float
+            the variance of the distribution
+
+        """
+        kwds['moments'] = 'v'
+        res = self.stats(*args, **kwds)
+        if isinstance(res, ndarray) and res.ndim == 0:
+            return res[()]
+        return res
+
+    def std(self, *args, **kwds):
+        """Standard deviation of the distribution.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        std : float
+            standard deviation of the distribution
+
+        """
+        kwds['moments'] = 'v'
+        res = sqrt(self.stats(*args, **kwds))
+        return res
+
+    def interval(self, confidence=None, *args, **kwds):
+        """Confidence interval with equal areas around the median.
+
+        .. deprecated:: 1.9.0
+           Parameter `alpha` is replaced by parameter `confidence` to avoid
+           name collisions with the shape parameter `alpha` of some
+           distributions. Parameter `alpha` will be removed in SciPy 1.11.0.
+
+        Parameters
+        ----------
+        confidence : array_like of float
+            Probability that an rv will be drawn from the returned range.
+            Each value should be in the range [0, 1].
+        arg1, arg2, ... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            location parameter, Default is 0.
+        scale : array_like, optional
+            scale parameter, Default is 1.
+
+        Returns
+        -------
+        a, b : ndarray of float
+            end-points of range that contain ``100 * alpha %`` of the rv's
+            possible values.
+
+        Notes
+        -----
+        This is implemented as ``ppf([p_tail, 1-p_tail])``, where
+        ``ppf`` is the inverse cumulative distribution function and
+        ``p_tail = (1-confidence)/2``. Suppose ``[c, d]`` is the support of a
+        discrete distribution; then ``ppf([0, 1]) == (c-1, d)``. Therefore,
+        when ``confidence=1`` and the distribution is discrete, the left end
+        of the interval will be beyond the support of the distribution.
+        For discrete distributions, the interval will limit the probability
+        in each tail to be less than or equal to ``p_tail`` (usually
+        strictly less).
+
+        """
+        # This function was originally written with parameter `alpha`, but
+        # `alpha` is also the name of a shape parameter of two distributions.
+        # This block allows the function to accept both `alpha` and its
+        # replacement `confidence` during a deprecation period; it can be
+        # removed in the second release after 1.9.0.
+        # See description of logic in `moment` method.
+        has_shape_alpha = (self.shapes is not None
+                           and "alpha" in (self.shapes.split(", ")))
+        got_confidence = confidence is not None
+        got_keyword_alpha = kwds.get("alpha", None) is not None
+
+        if not got_confidence and ((not got_keyword_alpha)
+                                   or (got_keyword_alpha and has_shape_alpha)):
+            message = ("interval() missing 1 required positional argument: "
+                       "`confidence`")
+            raise TypeError(message)
+
+        if got_keyword_alpha and not has_shape_alpha:
+            if got_confidence:
+                # this will change to "interval got unexpected argument alpha"
+                message = "interval() got multiple values for first argument"
+                raise TypeError(message)
+            else:
+                message = ("Use of keyword argument 'alpha' for method "
+                           "'interval' is deprecated and wil be removed in "
+                           "SciPy 1.11.0. Use first positional argument or "
+                           "keyword argument 'confidence' instead.")
+                confidence = kwds.pop("alpha")
+                warnings.warn(message, DeprecationWarning, stacklevel=2)
+        alpha = confidence
+
+        alpha = asarray(alpha)
+        if np.any((alpha > 1) | (alpha < 0)):
+            raise ValueError("alpha must be between 0 and 1 inclusive")
+        q1 = (1.0-alpha)/2
+        q2 = (1.0+alpha)/2
+        a = self.ppf(q1, *args, **kwds)
+        b = self.ppf(q2, *args, **kwds)
+        return a, b
+
+    def support(self, *args, **kwargs):
+        """Support of the distribution.
+
+        Parameters
+        ----------
+        arg1, arg2, ... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            location parameter, Default is 0.
+        scale : array_like, optional
+            scale parameter, Default is 1.
+
+        Returns
+        -------
+        a, b : array_like
+            end-points of the distribution's support.
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwargs)
+        arrs = np.broadcast_arrays(*args, loc, scale)
+        args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
+        cond = self._argcheck(*args) & (scale > 0)
+        _a, _b = self._get_support(*args)
+        if cond.all():
+            return _a * scale + loc, _b * scale + loc
+        elif cond.ndim == 0:
+            return self.badvalue, self.badvalue
+        # promote bounds to at least float to fill in the badvalue
+        _a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
+        out_a, out_b = _a * scale + loc, _b * scale + loc
+        place(out_a, 1-cond, self.badvalue)
+        place(out_b, 1-cond, self.badvalue)
+        return out_a, out_b
+
+    def nnlf(self, theta, x):
+        """Negative loglikelihood function.
+        Notes
+        -----
+        This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
+        parameters (including loc and scale).
+        """
+        loc, scale, args = self._unpack_loc_scale(theta)
+        if not self._argcheck(*args) or scale <= 0:
+            return inf
+        x = (asarray(x)-loc) / scale
+        n_log_scale = len(x) * log(scale)
+        if np.any(~self._support_mask(x, *args)):
+            return inf
+        return self._nnlf(x, *args) + n_log_scale
+
+    def _nnlf(self, x, *args):
+        return -np.sum(self._logpxf(x, *args), axis=0)
+
+    def _nlff_and_penalty(self, x, args, log_fitfun):
+        # negative log fit function
+        cond0 = ~self._support_mask(x, *args)
+        n_bad = np.count_nonzero(cond0, axis=0)
+        if n_bad > 0:
+            x = argsreduce(~cond0, x)[0]
+        logff = log_fitfun(x, *args)
+        finite_logff = np.isfinite(logff)
+        n_bad += np.sum(~finite_logff, axis=0)
+        if n_bad > 0:
+            penalty = n_bad * log(_XMAX) * 100
+            return -np.sum(logff[finite_logff], axis=0) + penalty
+        return -np.sum(logff, axis=0)
+
+    def _penalized_nnlf(self, theta, x):
+        """Penalized negative loglikelihood function.
+        i.e., - sum (log pdf(x, theta), axis=0) + penalty
+        where theta are the parameters (including loc and scale)
+        """
+        loc, scale, args = self._unpack_loc_scale(theta)
+        if not self._argcheck(*args) or scale <= 0:
+            return inf
+        x = asarray((x-loc) / scale)
+        n_log_scale = len(x) * log(scale)
+        return self._nlff_and_penalty(x, args, self._logpxf) + n_log_scale
+
+    def _penalized_nlpsf(self, theta, x):
+        """Penalized negative log product spacing function.
+        i.e., - sum (log (diff (cdf (x, theta))), axis=0) + penalty
+        where theta are the parameters (including loc and scale)
+        Follows reference [1] of scipy.stats.fit
+        """
+        loc, scale, args = self._unpack_loc_scale(theta)
+        if not self._argcheck(*args) or scale <= 0:
+            return inf
+        x = (np.sort(x) - loc)/scale
+
+        def log_psf(x, *args):
+            x, lj = np.unique(x, return_counts=True)  # fast for sorted x
+            cdf_data = self._cdf(x, *args) if x.size else []
+            if not (x.size and 1 - cdf_data[-1] <= 0):
+                cdf = np.concatenate(([0], cdf_data, [1]))
+                lj = np.concatenate((lj, [1]))
+            else:
+                cdf = np.concatenate(([0], cdf_data))
+            # here we could use logcdf w/ logsumexp trick to take differences,
+            # but in the context of the method, it seems unlikely to matter
+            return lj * np.log(np.diff(cdf) / lj)
+
+        return self._nlff_and_penalty(x, args, log_psf)
+
+
+class _ShapeInfo:
+    def __init__(self, name, integrality=False, domain=(-np.inf, np.inf),
+                 inclusive=(True, True)):
+        self.name = name
+        self.integrality = integrality
+
+        domain = list(domain)
+        if np.isfinite(domain[0]) and not inclusive[0]:
+            domain[0] = np.nextafter(domain[0], np.inf)
+        if np.isfinite(domain[1]) and not inclusive[1]:
+            domain[1] = np.nextafter(domain[1], -np.inf)
+        self.domain = domain
+
+
+def _get_fixed_fit_value(kwds, names):
+    """
+    Given names such as `['f0', 'fa', 'fix_a']`, check that there is
+    at most one non-None value in `kwds` associaed with those names.
+    Return that value, or None if none of the names occur in `kwds`.
+    As a side effect, all occurrences of those names in `kwds` are
+    removed.
+    """
+    vals = [(name, kwds.pop(name)) for name in names if name in kwds]
+    if len(vals) > 1:
+        repeated = [name for name, val in vals]
+        raise ValueError("fit method got multiple keyword arguments to "
+                         "specify the same fixed parameter: " +
+                         ', '.join(repeated))
+    return vals[0][1] if vals else None
+
+
+#  continuous random variables: implement maybe later
+#
+#  hf  --- Hazard Function (PDF / SF)
+#  chf  --- Cumulative hazard function (-log(SF))
+#  psf --- Probability sparsity function (reciprocal of the pdf) in
+#                units of percent-point-function (as a function of q).
+#                Also, the derivative of the percent-point function.
+
+
+class rv_continuous(rv_generic):
+    """A generic continuous random variable class meant for subclassing.
+
+    `rv_continuous` is a base class to construct specific distribution classes
+    and instances for continuous random variables. It cannot be used
+    directly as a distribution.
+
+    Parameters
+    ----------
+    momtype : int, optional
+        The type of generic moment calculation to use: 0 for pdf, 1 (default)
+        for ppf.
+    a : float, optional
+        Lower bound of the support of the distribution, default is minus
+        infinity.
+    b : float, optional
+        Upper bound of the support of the distribution, default is plus
+        infinity.
+    xtol : float, optional
+        The tolerance for fixed point calculation for generic ppf.
+    badvalue : float, optional
+        The value in a result arrays that indicates a value that for which
+        some argument restriction is violated, default is np.nan.
+    name : str, optional
+        The name of the instance. This string is used to construct the default
+        example for distributions.
+    longname : str, optional
+        This string is used as part of the first line of the docstring returned
+        when a subclass has no docstring of its own. Note: `longname` exists
+        for backwards compatibility, do not use for new subclasses.
+    shapes : str, optional
+        The shape of the distribution. For example ``"m, n"`` for a
+        distribution that takes two integers as the two shape arguments for all
+        its methods. If not provided, shape parameters will be inferred from
+        the signature of the private methods, ``_pdf`` and ``_cdf`` of the
+        instance.
+    extradoc :  str, optional, deprecated
+        This string is used as the last part of the docstring returned when a
+        subclass has no docstring of its own. Note: `extradoc` exists for
+        backwards compatibility and will be removed in SciPy 1.11.0, do not
+        use for new subclasses.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Methods
+    -------
+    rvs
+    pdf
+    logpdf
+    cdf
+    logcdf
+    sf
+    logsf
+    ppf
+    isf
+    moment
+    stats
+    entropy
+    expect
+    median
+    mean
+    std
+    var
+    interval
+    __call__
+    fit
+    fit_loc_scale
+    nnlf
+    support
+
+    Notes
+    -----
+    Public methods of an instance of a distribution class (e.g., ``pdf``,
+    ``cdf``) check their arguments and pass valid arguments to private,
+    computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
+    if it is within the support of the distribution.
+    Whether a shape parameter is valid is decided by an ``_argcheck`` method
+    (which defaults to checking that its arguments are strictly positive.)
+
+    **Subclassing**
+
+    New random variables can be defined by subclassing the `rv_continuous` class
+    and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
+    to location 0 and scale 1).
+
+    If positive argument checking is not correct for your RV
+    then you will also need to re-define the ``_argcheck`` method.
+
+    For most of the scipy.stats distributions, the support interval doesn't
+    depend on the shape parameters. ``x`` being in the support interval is
+    equivalent to ``self.a <= x <= self.b``.  If either of the endpoints of
+    the support do depend on the shape parameters, then
+    i) the distribution must implement the ``_get_support`` method; and
+    ii) those dependent endpoints must be omitted from the distribution's
+    call to the ``rv_continuous`` initializer.
+
+    Correct, but potentially slow defaults exist for the remaining
+    methods but for speed and/or accuracy you can over-ride::
+
+      _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
+
+    The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
+    applied to a uniform random variate. In order to generate random variates
+    efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
+    if the inverse cdf can expressed in an explicit form) or a sampling
+    method needs to be implemented in a custom ``_rvs`` method.
+
+    If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
+    The main reason would be to improve numerical accuracy: for example,
+    the survival function ``_sf`` is computed as ``1 - _cdf`` which can
+    result in loss of precision if ``_cdf(x)`` is close to one.
+
+    **Methods that can be overwritten by subclasses**
+    ::
+
+      _rvs
+      _pdf
+      _cdf
+      _sf
+      _ppf
+      _isf
+      _stats
+      _munp
+      _entropy
+      _argcheck
+      _get_support
+
+    There are additional (internal and private) generic methods that can
+    be useful for cross-checking and for debugging, but might work in all
+    cases when directly called.
+
+    A note on ``shapes``: subclasses need not specify them explicitly. In this
+    case, `shapes` will be automatically deduced from the signatures of the
+    overridden methods (`pdf`, `cdf` etc).
+    If, for some reason, you prefer to avoid relying on introspection, you can
+    specify ``shapes`` explicitly as an argument to the instance constructor.
+
+
+    **Frozen Distributions**
+
+    Normally, you must provide shape parameters (and, optionally, location and
+    scale parameters to each call of a method of a distribution.
+
+    Alternatively, the object may be called (as a function) to fix the shape,
+    location, and scale parameters returning a "frozen" continuous RV object:
+
+    rv = generic(, loc=0, scale=1)
+        `rv_frozen` object with the same methods but holding the given shape,
+        location, and scale fixed
+
+    **Statistics**
+
+    Statistics are computed using numerical integration by default.
+    For speed you can redefine this using ``_stats``:
+
+     - take shape parameters and return mu, mu2, g1, g2
+     - If you can't compute one of these, return it as None
+     - Can also be defined with a keyword argument ``moments``, which is a
+       string composed of "m", "v", "s", and/or "k".
+       Only the components appearing in string should be computed and
+       returned in the order "m", "v", "s", or "k"  with missing values
+       returned as None.
+
+    Alternatively, you can override ``_munp``, which takes ``n`` and shape
+    parameters and returns the n-th non-central moment of the distribution.
+
+    Examples
+    --------
+    To create a new Gaussian distribution, we would do the following:
+
+    >>> from scipy.stats import rv_continuous
+    >>> class gaussian_gen(rv_continuous):
+    ...     "Gaussian distribution"
+    ...     def _pdf(self, x):
+    ...         return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
+    >>> gaussian = gaussian_gen(name='gaussian')
+
+    ``scipy.stats`` distributions are *instances*, so here we subclass
+    `rv_continuous` and create an instance. With this, we now have
+    a fully functional distribution with all relevant methods automagically
+    generated by the framework.
+
+    Note that above we defined a standard normal distribution, with zero mean
+    and unit variance. Shifting and scaling of the distribution can be done
+    by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
+    essentially computes ``y = (x - loc) / scale`` and
+    ``gaussian._pdf(y) / scale``.
+
+    """
+    def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
+                 badvalue=None, name=None, longname=None,
+                 shapes=None, extradoc=None, seed=None):
+
+        super().__init__(seed)
+
+        if extradoc is not None:
+            warnings.warn("extradoc is deprecated and will be removed in "
+                          "SciPy 1.11.0", DeprecationWarning)
+
+        # save the ctor parameters, cf generic freeze
+        self._ctor_param = dict(
+            momtype=momtype, a=a, b=b, xtol=xtol,
+            badvalue=badvalue, name=name, longname=longname,
+            shapes=shapes, extradoc=extradoc, seed=seed)
+
+        if badvalue is None:
+            badvalue = nan
+        if name is None:
+            name = 'Distribution'
+        self.badvalue = badvalue
+        self.name = name
+        self.a = a
+        self.b = b
+        if a is None:
+            self.a = -inf
+        if b is None:
+            self.b = inf
+        self.xtol = xtol
+        self.moment_type = momtype
+        self.shapes = shapes
+        self.extradoc = extradoc
+
+        self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
+                                  locscale_in='loc=0, scale=1',
+                                  locscale_out='loc, scale')
+        self._attach_methods()
+
+        if longname is None:
+            if name[0] in ['aeiouAEIOU']:
+                hstr = "An "
+            else:
+                hstr = "A "
+            longname = hstr + name
+
+        if sys.flags.optimize < 2:
+            # Skip adding docstrings if interpreter is run with -OO
+            if self.__doc__ is None:
+                self._construct_default_doc(longname=longname,
+                                            extradoc=extradoc,
+                                            docdict=docdict,
+                                            discrete='continuous')
+            else:
+                dct = dict(distcont)
+                self._construct_doc(docdict, dct.get(self.name))
+
+    def __getstate__(self):
+        dct = self.__dict__.copy()
+
+        # these methods will be remade in __setstate__
+        # _random_state attribute is taken care of by rv_generic
+        attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
+                 "_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
+        [dct.pop(attr, None) for attr in attrs]
+        return dct
+
+    def _attach_methods(self):
+        """
+        Attaches dynamically created methods to the rv_continuous instance.
+        """
+        # _attach_methods is responsible for calling _attach_argparser_methods
+        self._attach_argparser_methods()
+
+        # nin correction
+        self._ppfvec = vectorize(self._ppf_single, otypes='d')
+        self._ppfvec.nin = self.numargs + 1
+        self.vecentropy = vectorize(self._entropy, otypes='d')
+        self._cdfvec = vectorize(self._cdf_single, otypes='d')
+        self._cdfvec.nin = self.numargs + 1
+
+        if self.moment_type == 0:
+            self.generic_moment = vectorize(self._mom0_sc, otypes='d')
+        else:
+            self.generic_moment = vectorize(self._mom1_sc, otypes='d')
+        # Because of the *args argument of _mom0_sc, vectorize cannot count the
+        # number of arguments correctly.
+        self.generic_moment.nin = self.numargs + 1
+
+    def _updated_ctor_param(self):
+        """Return the current version of _ctor_param, possibly updated by user.
+
+        Used by freezing.
+        Keep this in sync with the signature of __init__.
+        """
+        dct = self._ctor_param.copy()
+        dct['a'] = self.a
+        dct['b'] = self.b
+        dct['xtol'] = self.xtol
+        dct['badvalue'] = self.badvalue
+        dct['name'] = self.name
+        dct['shapes'] = self.shapes
+        dct['extradoc'] = self.extradoc
+        return dct
+
+    def _ppf_to_solve(self, x, q, *args):
+        return self.cdf(*(x, )+args)-q
+
+    def _ppf_single(self, q, *args):
+        factor = 10.
+        left, right = self._get_support(*args)
+
+        if np.isinf(left):
+            left = min(-factor, right)
+            while self._ppf_to_solve(left, q, *args) > 0.:
+                left, right = left * factor, left
+            # left is now such that cdf(left) <= q
+            # if right has changed, then cdf(right) > q
+
+        if np.isinf(right):
+            right = max(factor, left)
+            while self._ppf_to_solve(right, q, *args) < 0.:
+                left, right = right, right * factor
+            # right is now such that cdf(right) >= q
+
+        return optimize.brentq(self._ppf_to_solve,
+                               left, right, args=(q,)+args, xtol=self.xtol)
+
+    # moment from definition
+    def _mom_integ0(self, x, m, *args):
+        return x**m * self.pdf(x, *args)
+
+    def _mom0_sc(self, m, *args):
+        _a, _b = self._get_support(*args)
+        return integrate.quad(self._mom_integ0, _a, _b,
+                              args=(m,)+args)[0]
+
+    # moment calculated using ppf
+    def _mom_integ1(self, q, m, *args):
+        return (self.ppf(q, *args))**m
+
+    def _mom1_sc(self, m, *args):
+        return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
+
+    def _pdf(self, x, *args):
+        return _derivative(self._cdf, x, dx=1e-5, args=args, order=5)
+
+    # Could also define any of these
+    def _logpdf(self, x, *args):
+        p = self._pdf(x, *args)
+        with np.errstate(divide='ignore'):
+            return log(p)
+
+    def _logpxf(self, x, *args):
+        # continuous distributions have PDF, discrete have PMF, but sometimes
+        # the distinction doesn't matter. This lets us use `_logpxf` for both
+        # discrete and continuous distributions.
+        return self._logpdf(x, *args)
+
+    def _cdf_single(self, x, *args):
+        _a, _b = self._get_support(*args)
+        return integrate.quad(self._pdf, _a, x, args=args)[0]
+
+    def _cdf(self, x, *args):
+        return self._cdfvec(x, *args)
+
+    # generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
+    # in rv_generic
+
+    def pdf(self, x, *args, **kwds):
+        """Probability density function at x of the given RV.
+
+        Parameters
+        ----------
+        x : array_like
+            quantiles
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        pdf : ndarray
+            Probability density function evaluated at x
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        x, loc, scale = map(asarray, (x, loc, scale))
+        args = tuple(map(asarray, args))
+        dtyp = np.promote_types(x.dtype, np.float64)
+        x = np.asarray((x - loc)/scale, dtype=dtyp)
+        cond0 = self._argcheck(*args) & (scale > 0)
+        cond1 = self._support_mask(x, *args) & (scale > 0)
+        cond = cond0 & cond1
+        output = zeros(shape(cond), dtyp)
+        putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((x,)+args+(scale,)))
+            scale, goodargs = goodargs[-1], goodargs[:-1]
+            place(output, cond, self._pdf(*goodargs) / scale)
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def logpdf(self, x, *args, **kwds):
+        """Log of the probability density function at x of the given RV.
+
+        This uses a more numerically accurate calculation if available.
+
+        Parameters
+        ----------
+        x : array_like
+            quantiles
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        logpdf : array_like
+            Log of the probability density function evaluated at x
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        x, loc, scale = map(asarray, (x, loc, scale))
+        args = tuple(map(asarray, args))
+        dtyp = np.promote_types(x.dtype, np.float64)
+        x = np.asarray((x - loc)/scale, dtype=dtyp)
+        cond0 = self._argcheck(*args) & (scale > 0)
+        cond1 = self._support_mask(x, *args) & (scale > 0)
+        cond = cond0 & cond1
+        output = empty(shape(cond), dtyp)
+        output.fill(NINF)
+        putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((x,)+args+(scale,)))
+            scale, goodargs = goodargs[-1], goodargs[:-1]
+            place(output, cond, self._logpdf(*goodargs) - log(scale))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def cdf(self, x, *args, **kwds):
+        """
+        Cumulative distribution function of the given RV.
+
+        Parameters
+        ----------
+        x : array_like
+            quantiles
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        cdf : ndarray
+            Cumulative distribution function evaluated at `x`
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        x, loc, scale = map(asarray, (x, loc, scale))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        dtyp = np.promote_types(x.dtype, np.float64)
+        x = np.asarray((x - loc)/scale, dtype=dtyp)
+        cond0 = self._argcheck(*args) & (scale > 0)
+        cond1 = self._open_support_mask(x, *args) & (scale > 0)
+        cond2 = (x >= np.asarray(_b)) & cond0
+        cond = cond0 & cond1
+        output = zeros(shape(cond), dtyp)
+        place(output, (1-cond0)+np.isnan(x), self.badvalue)
+        place(output, cond2, 1.0)
+        if np.any(cond):  # call only if at least 1 entry
+            goodargs = argsreduce(cond, *((x,)+args))
+            place(output, cond, self._cdf(*goodargs))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def logcdf(self, x, *args, **kwds):
+        """Log of the cumulative distribution function at x of the given RV.
+
+        Parameters
+        ----------
+        x : array_like
+            quantiles
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        logcdf : array_like
+            Log of the cumulative distribution function evaluated at x
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        x, loc, scale = map(asarray, (x, loc, scale))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        dtyp = np.promote_types(x.dtype, np.float64)
+        x = np.asarray((x - loc)/scale, dtype=dtyp)
+        cond0 = self._argcheck(*args) & (scale > 0)
+        cond1 = self._open_support_mask(x, *args) & (scale > 0)
+        cond2 = (x >= _b) & cond0
+        cond = cond0 & cond1
+        output = empty(shape(cond), dtyp)
+        output.fill(NINF)
+        place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
+        place(output, cond2, 0.0)
+        if np.any(cond):  # call only if at least 1 entry
+            goodargs = argsreduce(cond, *((x,)+args))
+            place(output, cond, self._logcdf(*goodargs))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def sf(self, x, *args, **kwds):
+        """Survival function (1 - `cdf`) at x of the given RV.
+
+        Parameters
+        ----------
+        x : array_like
+            quantiles
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        sf : array_like
+            Survival function evaluated at x
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        x, loc, scale = map(asarray, (x, loc, scale))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        dtyp = np.promote_types(x.dtype, np.float64)
+        x = np.asarray((x - loc)/scale, dtype=dtyp)
+        cond0 = self._argcheck(*args) & (scale > 0)
+        cond1 = self._open_support_mask(x, *args) & (scale > 0)
+        cond2 = cond0 & (x <= _a)
+        cond = cond0 & cond1
+        output = zeros(shape(cond), dtyp)
+        place(output, (1-cond0)+np.isnan(x), self.badvalue)
+        place(output, cond2, 1.0)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((x,)+args))
+            place(output, cond, self._sf(*goodargs))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def logsf(self, x, *args, **kwds):
+        """Log of the survival function of the given RV.
+
+        Returns the log of the "survival function," defined as (1 - `cdf`),
+        evaluated at `x`.
+
+        Parameters
+        ----------
+        x : array_like
+            quantiles
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        logsf : ndarray
+            Log of the survival function evaluated at `x`.
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        x, loc, scale = map(asarray, (x, loc, scale))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        dtyp = np.promote_types(x.dtype, np.float64)
+        x = np.asarray((x - loc)/scale, dtype=dtyp)
+        cond0 = self._argcheck(*args) & (scale > 0)
+        cond1 = self._open_support_mask(x, *args) & (scale > 0)
+        cond2 = cond0 & (x <= _a)
+        cond = cond0 & cond1
+        output = empty(shape(cond), dtyp)
+        output.fill(NINF)
+        place(output, (1-cond0)+np.isnan(x), self.badvalue)
+        place(output, cond2, 0.0)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((x,)+args))
+            place(output, cond, self._logsf(*goodargs))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def ppf(self, q, *args, **kwds):
+        """Percent point function (inverse of `cdf`) at q of the given RV.
+
+        Parameters
+        ----------
+        q : array_like
+            lower tail probability
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        x : array_like
+            quantile corresponding to the lower tail probability q.
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        q, loc, scale = map(asarray, (q, loc, scale))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
+        cond1 = (0 < q) & (q < 1)
+        cond2 = cond0 & (q == 0)
+        cond3 = cond0 & (q == 1)
+        cond = cond0 & cond1
+        output = np.full(shape(cond), fill_value=self.badvalue)
+
+        lower_bound = _a * scale + loc
+        upper_bound = _b * scale + loc
+        place(output, cond2, argsreduce(cond2, lower_bound)[0])
+        place(output, cond3, argsreduce(cond3, upper_bound)[0])
+
+        if np.any(cond):  # call only if at least 1 entry
+            goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
+            scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
+            place(output, cond, self._ppf(*goodargs) * scale + loc)
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def isf(self, q, *args, **kwds):
+        """Inverse survival function (inverse of `sf`) at q of the given RV.
+
+        Parameters
+        ----------
+        q : array_like
+            upper tail probability
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            location parameter (default=0)
+        scale : array_like, optional
+            scale parameter (default=1)
+
+        Returns
+        -------
+        x : ndarray or scalar
+            Quantile corresponding to the upper tail probability q.
+
+        """
+        args, loc, scale = self._parse_args(*args, **kwds)
+        q, loc, scale = map(asarray, (q, loc, scale))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
+        cond1 = (0 < q) & (q < 1)
+        cond2 = cond0 & (q == 1)
+        cond3 = cond0 & (q == 0)
+        cond = cond0 & cond1
+        output = np.full(shape(cond), fill_value=self.badvalue)
+
+        lower_bound = _a * scale + loc
+        upper_bound = _b * scale + loc
+        place(output, cond2, argsreduce(cond2, lower_bound)[0])
+        place(output, cond3, argsreduce(cond3, upper_bound)[0])
+
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
+            scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
+            place(output, cond, self._isf(*goodargs) * scale + loc)
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def _unpack_loc_scale(self, theta):
+        try:
+            loc = theta[-2]
+            scale = theta[-1]
+            args = tuple(theta[:-2])
+        except IndexError as e:
+            raise ValueError("Not enough input arguments.") from e
+        return loc, scale, args
+
+    def _fitstart(self, data, args=None):
+        """Starting point for fit (shape arguments + loc + scale)."""
+        if args is None:
+            args = (1.0,)*self.numargs
+        loc, scale = self._fit_loc_scale_support(data, *args)
+        return args + (loc, scale)
+
+    def _reduce_func(self, args, kwds, data=None):
+        """
+        Return the (possibly reduced) function to optimize in order to find MLE
+        estimates for the .fit method.
+        """
+        # Convert fixed shape parameters to the standard numeric form: e.g. for
+        # stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
+        # for `f0`, `fa` or 'fix_a'.  The following converts the latter two
+        # into the first (numeric) form.
+        shapes = []
+        if self.shapes:
+            shapes = self.shapes.replace(',', ' ').split()
+            for j, s in enumerate(shapes):
+                key = 'f' + str(j)
+                names = [key, 'f' + s, 'fix_' + s]
+                val = _get_fixed_fit_value(kwds, names)
+                if val is not None:
+                    kwds[key] = val
+
+        args = list(args)
+        Nargs = len(args)
+        fixedn = []
+        names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
+        x0 = []
+        for n, key in enumerate(names):
+            if key in kwds:
+                fixedn.append(n)
+                args[n] = kwds.pop(key)
+            else:
+                x0.append(args[n])
+
+        methods = {"mle", "mm"}
+        method = kwds.pop('method', "mle").lower()
+        if method == "mm":
+            n_params = len(shapes) + 2 - len(fixedn)
+            exponents = (np.arange(1, n_params+1))[:, np.newaxis]
+            data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
+
+            def objective(theta, x):
+                return self._moment_error(theta, x, data_moments)
+        elif method == "mle":
+            objective = self._penalized_nnlf
+        else:
+            raise ValueError("Method '{0}' not available; must be one of {1}"
+                             .format(method, methods))
+
+        if len(fixedn) == 0:
+            func = objective
+            restore = None
+        else:
+            if len(fixedn) == Nargs:
+                raise ValueError(
+                    "All parameters fixed. There is nothing to optimize.")
+
+            def restore(args, theta):
+                # Replace with theta for all numbers not in fixedn
+                # This allows the non-fixed values to vary, but
+                #  we still call self.nnlf with all parameters.
+                i = 0
+                for n in range(Nargs):
+                    if n not in fixedn:
+                        args[n] = theta[i]
+                        i += 1
+                return args
+
+            def func(theta, x):
+                newtheta = restore(args[:], theta)
+                return objective(newtheta, x)
+
+        return x0, func, restore, args
+
+    def _moment_error(self, theta, x, data_moments):
+        loc, scale, args = self._unpack_loc_scale(theta)
+        if not self._argcheck(*args) or scale <= 0:
+            return inf
+
+        dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
+                                 for i in range(len(data_moments))])
+        if np.any(np.isnan(dist_moments)):
+            raise ValueError("Method of moments encountered a non-finite "
+                             "distribution moment and cannot continue. "
+                             "Consider trying method='MLE'.")
+
+        return (((data_moments - dist_moments) /
+                 np.maximum(np.abs(data_moments), 1e-8))**2).sum()
+
+    def fit(self, data, *args, **kwds):
+        """
+        Return estimates of shape (if applicable), location, and scale
+        parameters from data. The default estimation method is Maximum
+        Likelihood Estimation (MLE), but Method of Moments (MM)
+        is also available.
+
+        Starting estimates for
+        the fit are given by input arguments; for any arguments not provided
+        with starting estimates, ``self._fitstart(data)`` is called to generate
+        such.
+
+        One can hold some parameters fixed to specific values by passing in
+        keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
+        and ``floc`` and ``fscale`` (for location and scale parameters,
+        respectively).
+
+        Parameters
+        ----------
+        data : array_like
+            Data to use in estimating the distribution parameters.
+        arg1, arg2, arg3,... : floats, optional
+            Starting value(s) for any shape-characterizing arguments (those not
+            provided will be determined by a call to ``_fitstart(data)``).
+            No default value.
+        **kwds : floats, optional
+            - `loc`: initial guess of the distribution's location parameter.
+            - `scale`: initial guess of the distribution's scale parameter.
+
+            Special keyword arguments are recognized as holding certain
+            parameters fixed:
+
+            - f0...fn : hold respective shape parameters fixed.
+              Alternatively, shape parameters to fix can be specified by name.
+              For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
+              are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
+              equivalent to ``f1``.
+
+            - floc : hold location parameter fixed to specified value.
+
+            - fscale : hold scale parameter fixed to specified value.
+
+            - optimizer : The optimizer to use.
+              The optimizer must take ``func``,
+              and starting position as the first two arguments,
+              plus ``args`` (for extra arguments to pass to the
+              function to be optimized) and ``disp=0`` to suppress
+              output as keyword arguments.
+
+            - method : The method to use. The default is "MLE" (Maximum
+              Likelihood Estimate); "MM" (Method of Moments)
+              is also available.
+
+        Raises
+        ------
+        TypeError, ValueError
+            If an input is invalid
+        `~scipy.stats.FitError`
+            If fitting fails or the fit produced would be invalid
+
+        Returns
+        -------
+        parameter_tuple : tuple of floats
+            Estimates for any shape parameters (if applicable),
+            followed by those for location and scale.
+            For most random variables, shape statistics
+            will be returned, but there are exceptions (e.g. ``norm``).
+
+        Notes
+        -----
+        With ``method="MLE"`` (default), the fit is computed by minimizing
+        the negative log-likelihood function. A large, finite penalty
+        (rather than infinite negative log-likelihood) is applied for
+        observations beyond the support of the distribution.
+
+        With ``method="MM"``, the fit is computed by minimizing the L2 norm
+        of the relative errors between the first *k* raw (about zero) data
+        moments and the corresponding distribution moments, where *k* is the
+        number of non-fixed parameters.
+        More precisely, the objective function is::
+
+            (((data_moments - dist_moments)
+              / np.maximum(np.abs(data_moments), 1e-8))**2).sum()
+
+        where the constant ``1e-8`` avoids division by zero in case of
+        vanishing data moments. Typically, this error norm can be reduced to
+        zero.
+        Note that the standard method of moments can produce parameters for
+        which some data are outside the support of the fitted distribution;
+        this implementation does nothing to prevent this.
+
+        For either method,
+        the returned answer is not guaranteed to be globally optimal; it
+        may only be locally optimal, or the optimization may fail altogether.
+        If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
+        the `fit` method will raise a ``RuntimeError``.
+
+        Examples
+        --------
+
+        Generate some data to fit: draw random variates from the `beta`
+        distribution
+
+        >>> from scipy.stats import beta
+        >>> a, b = 1., 2.
+        >>> x = beta.rvs(a, b, size=1000)
+
+        Now we can fit all four parameters (``a``, ``b``, ``loc``
+        and ``scale``):
+
+        >>> a1, b1, loc1, scale1 = beta.fit(x)
+
+        We can also use some prior knowledge about the dataset: let's keep
+        ``loc`` and ``scale`` fixed:
+
+        >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
+        >>> loc1, scale1
+        (0, 1)
+
+        We can also keep shape parameters fixed by using ``f``-keywords. To
+        keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
+        equivalently, ``fa=1``:
+
+        >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
+        >>> a1
+        1
+
+        Not all distributions return estimates for the shape parameters.
+        ``norm`` for example just returns estimates for location and scale:
+
+        >>> from scipy.stats import norm
+        >>> x = norm.rvs(a, b, size=1000, random_state=123)
+        >>> loc1, scale1 = norm.fit(x)
+        >>> loc1, scale1
+        (0.92087172783841631, 2.0015750750324668)
+        """
+        data = np.asarray(data)
+        method = kwds.get('method', "mle").lower()
+
+        # memory for method of moments
+        Narg = len(args)
+        if Narg > self.numargs:
+            raise TypeError("Too many input arguments.")
+
+        if not np.isfinite(data).all():
+            raise ValueError("The data contains non-finite values.")
+
+        start = [None]*2
+        if (Narg < self.numargs) or not ('loc' in kwds and
+                                         'scale' in kwds):
+            # get distribution specific starting locations
+            start = self._fitstart(data)
+            args += start[Narg:-2]
+        loc = kwds.pop('loc', start[-2])
+        scale = kwds.pop('scale', start[-1])
+        args += (loc, scale)
+        x0, func, restore, args = self._reduce_func(args, kwds, data=data)
+        optimizer = kwds.pop('optimizer', optimize.fmin)
+        # convert string to function in scipy.optimize
+        optimizer = _fit_determine_optimizer(optimizer)
+        # by now kwds must be empty, since everybody took what they needed
+        if kwds:
+            raise TypeError("Unknown arguments: %s." % kwds)
+
+        # In some cases, method of moments can be done with fsolve/root
+        # instead of an optimizer, but sometimes no solution exists,
+        # especially when the user fixes parameters. Minimizing the sum
+        # of squares of the error generalizes to these cases.
+        vals = optimizer(func, x0, args=(ravel(data),), disp=0)
+        obj = func(vals, data)
+
+        if restore is not None:
+            vals = restore(args, vals)
+        vals = tuple(vals)
+
+        loc, scale, shapes = self._unpack_loc_scale(vals)
+        if not (np.all(self._argcheck(*shapes)) and scale > 0):
+            raise FitError("Optimization converged to parameters that are "
+                           "outside the range allowed by the distribution.")
+
+        if method == 'mm':
+            if not np.isfinite(obj):
+                raise FitError("Optimization failed: either a data moment "
+                               "or fitted distribution moment is "
+                               "non-finite.")
+
+        return vals
+
+    def _fit_loc_scale_support(self, data, *args):
+        """Estimate loc and scale parameters from data accounting for support.
+
+        Parameters
+        ----------
+        data : array_like
+            Data to fit.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+
+        Returns
+        -------
+        Lhat : float
+            Estimated location parameter for the data.
+        Shat : float
+            Estimated scale parameter for the data.
+
+        """
+        data = np.asarray(data)
+
+        # Estimate location and scale according to the method of moments.
+        loc_hat, scale_hat = self.fit_loc_scale(data, *args)
+
+        # Compute the support according to the shape parameters.
+        self._argcheck(*args)
+        _a, _b = self._get_support(*args)
+        a, b = _a, _b
+        support_width = b - a
+
+        # If the support is empty then return the moment-based estimates.
+        if support_width <= 0:
+            return loc_hat, scale_hat
+
+        # Compute the proposed support according to the loc and scale
+        # estimates.
+        a_hat = loc_hat + a * scale_hat
+        b_hat = loc_hat + b * scale_hat
+
+        # Use the moment-based estimates if they are compatible with the data.
+        data_a = np.min(data)
+        data_b = np.max(data)
+        if a_hat < data_a and data_b < b_hat:
+            return loc_hat, scale_hat
+
+        # Otherwise find other estimates that are compatible with the data.
+        data_width = data_b - data_a
+        rel_margin = 0.1
+        margin = data_width * rel_margin
+
+        # For a finite interval, both the location and scale
+        # should have interesting values.
+        if support_width < np.inf:
+            loc_hat = (data_a - a) - margin
+            scale_hat = (data_width + 2 * margin) / support_width
+            return loc_hat, scale_hat
+
+        # For a one-sided interval, use only an interesting location parameter.
+        if a > -np.inf:
+            return (data_a - a) - margin, 1
+        elif b < np.inf:
+            return (data_b - b) + margin, 1
+        else:
+            raise RuntimeError
+
+    def fit_loc_scale(self, data, *args):
+        """
+        Estimate loc and scale parameters from data using 1st and 2nd moments.
+
+        Parameters
+        ----------
+        data : array_like
+            Data to fit.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+
+        Returns
+        -------
+        Lhat : float
+            Estimated location parameter for the data.
+        Shat : float
+            Estimated scale parameter for the data.
+
+        """
+        mu, mu2 = self.stats(*args, **{'moments': 'mv'})
+        tmp = asarray(data)
+        muhat = tmp.mean()
+        mu2hat = tmp.var()
+        Shat = sqrt(mu2hat / mu2)
+        Lhat = muhat - Shat*mu
+        if not np.isfinite(Lhat):
+            Lhat = 0
+        if not (np.isfinite(Shat) and (0 < Shat)):
+            Shat = 1
+        return Lhat, Shat
+
+    def _entropy(self, *args):
+        def integ(x):
+            val = self._pdf(x, *args)
+            return entr(val)
+
+        # upper limit is often inf, so suppress warnings when integrating
+        _a, _b = self._get_support(*args)
+        with np.errstate(over='ignore'):
+            h = integrate.quad(integ, _a, _b)[0]
+
+        if not np.isnan(h):
+            return h
+        else:
+            # try with different limits if integration problems
+            low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
+            if np.isinf(_b):
+                upper = upp
+            else:
+                upper = _b
+            if np.isinf(_a):
+                lower = low
+            else:
+                lower = _a
+            return integrate.quad(integ, lower, upper)[0]
+
+    def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
+               conditional=False, **kwds):
+        """Calculate expected value of a function with respect to the
+        distribution by numerical integration.
+
+        The expected value of a function ``f(x)`` with respect to a
+        distribution ``dist`` is defined as::
+
+                    ub
+            E[f(x)] = Integral(f(x) * dist.pdf(x)),
+                    lb
+
+        where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
+        distribution. If the bounds ``lb`` and ``ub`` correspond to the
+        support of the distribution, e.g. ``[-inf, inf]`` in the default
+        case, then the integral is the unrestricted expectation of ``f(x)``.
+        Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
+        outside a finite interval in which case the expectation is
+        calculated within the finite range ``[lb, ub]``.
+
+        Parameters
+        ----------
+        func : callable, optional
+            Function for which integral is calculated. Takes only one argument.
+            The default is the identity mapping f(x) = x.
+        args : tuple, optional
+            Shape parameters of the distribution.
+        loc : float, optional
+            Location parameter (default=0).
+        scale : float, optional
+            Scale parameter (default=1).
+        lb, ub : scalar, optional
+            Lower and upper bound for integration. Default is set to the
+            support of the distribution.
+        conditional : bool, optional
+            If True, the integral is corrected by the conditional probability
+            of the integration interval.  The return value is the expectation
+            of the function, conditional on being in the given interval.
+            Default is False.
+
+        Additional keyword arguments are passed to the integration routine.
+
+        Returns
+        -------
+        expect : float
+            The calculated expected value.
+
+        Notes
+        -----
+        The integration behavior of this function is inherited from
+        `scipy.integrate.quad`. Neither this function nor
+        `scipy.integrate.quad` can verify whether the integral exists or is
+        finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
+        ``cauchy(0).expect()`` returns ``0.0``.
+
+        Likewise, the accuracy of results is not verified by the function.
+        `scipy.integrate.quad` is typically reliable for integrals that are
+        numerically favorable, but it is not guaranteed to converge
+        to a correct value for all possible intervals and integrands. This
+        function is provided for convenience; for critical applications,
+        check results against other integration methods.
+
+        The function is not vectorized.
+
+        Examples
+        --------
+
+        To understand the effect of the bounds of integration consider
+
+        >>> from scipy.stats import expon
+        >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
+        0.6321205588285578
+
+        This is close to
+
+        >>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
+        0.6321205588285577
+
+        If ``conditional=True``
+
+        >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
+        1.0000000000000002
+
+        The slight deviation from 1 is due to numerical integration.
+
+        The integrand can be treated as a complex-valued function
+        by passing ``complex_func=True`` to `scipy.integrate.quad` .
+
+        >>> import numpy as np
+        >>> from scipy.stats import vonmises
+        >>> res = vonmises(loc=2, kappa=1).expect(lambda x: np.exp(1j*x),
+        ...                                       complex_func=True)
+        >>> res
+        (-0.18576377217422957+0.40590124735052263j)
+
+        >>> np.angle(res)  # location of the (circular) distribution
+        2.0
+
+        """
+        lockwds = {'loc': loc,
+                   'scale': scale}
+        self._argcheck(*args)
+        _a, _b = self._get_support(*args)
+        if func is None:
+            def fun(x, *args):
+                return x * self.pdf(x, *args, **lockwds)
+        else:
+            def fun(x, *args):
+                return func(x) * self.pdf(x, *args, **lockwds)
+        if lb is None:
+            lb = loc + _a * scale
+        if ub is None:
+            ub = loc + _b * scale
+
+        cdf_bounds = self.cdf([lb, ub], *args, **lockwds)
+        invfac = cdf_bounds[1] - cdf_bounds[0]
+
+        kwds['args'] = args
+
+        # split interval to help integrator w/ infinite support; see gh-8928
+        alpha = 0.05  # split body from tails at probability mass `alpha`
+        inner_bounds = np.array([alpha, 1-alpha])
+        cdf_inner_bounds = cdf_bounds[0] + invfac * inner_bounds
+        c, d = loc + self._ppf(cdf_inner_bounds, *args) * scale
+
+        # Do not silence warnings from integration.
+        lbc = integrate.quad(fun, lb, c, **kwds)[0]
+        cd = integrate.quad(fun, c, d, **kwds)[0]
+        dub = integrate.quad(fun, d, ub, **kwds)[0]
+        vals = (lbc + cd + dub)
+
+        if conditional:
+            vals /= invfac
+        return np.array(vals)[()]  # make it a numpy scalar like other methods
+
+    def _param_info(self):
+        shape_info = self._shape_info()
+        loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False))
+        scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False))
+        param_info = shape_info + [loc_info, scale_info]
+        return param_info
+
+# Helpers for the discrete distributions
+def _drv2_moment(self, n, *args):
+    """Non-central moment of discrete distribution."""
+    def fun(x):
+        return np.power(x, n) * self._pmf(x, *args)
+
+    _a, _b = self._get_support(*args)
+    return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
+
+
+def _drv2_ppfsingle(self, q, *args):  # Use basic bisection algorithm
+    _a, _b = self._get_support(*args)
+    b = _b
+    a = _a
+    if isinf(b):            # Be sure ending point is > q
+        b = int(max(100*q, 10))
+        while 1:
+            if b >= _b:
+                qb = 1.0
+                break
+            qb = self._cdf(b, *args)
+            if (qb < q):
+                b += 10
+            else:
+                break
+    else:
+        qb = 1.0
+    if isinf(a):    # be sure starting point < q
+        a = int(min(-100*q, -10))
+        while 1:
+            if a <= _a:
+                qb = 0.0
+                break
+            qa = self._cdf(a, *args)
+            if (qa > q):
+                a -= 10
+            else:
+                break
+    else:
+        qa = self._cdf(a, *args)
+
+    while 1:
+        if (qa == q):
+            return a
+        if (qb == q):
+            return b
+        if b <= a+1:
+            if qa > q:
+                return a
+            else:
+                return b
+        c = int((a+b)/2.0)
+        qc = self._cdf(c, *args)
+        if (qc < q):
+            if a != c:
+                a = c
+            else:
+                raise RuntimeError('updating stopped, endless loop')
+            qa = qc
+        elif (qc > q):
+            if b != c:
+                b = c
+            else:
+                raise RuntimeError('updating stopped, endless loop')
+            qb = qc
+        else:
+            return c
+
+
+# Must over-ride one of _pmf or _cdf or pass in
+#  x_k, p(x_k) lists in initialization
+
+
+class rv_discrete(rv_generic):
+    """A generic discrete random variable class meant for subclassing.
+
+    `rv_discrete` is a base class to construct specific distribution classes
+    and instances for discrete random variables. It can also be used
+    to construct an arbitrary distribution defined by a list of support
+    points and corresponding probabilities.
+
+    Parameters
+    ----------
+    a : float, optional
+        Lower bound of the support of the distribution, default: 0
+    b : float, optional
+        Upper bound of the support of the distribution, default: plus infinity
+    moment_tol : float, optional
+        The tolerance for the generic calculation of moments.
+    values : tuple of two array_like, optional
+        ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
+        probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
+        and ``pk`` must have the same shape.
+    inc : integer, optional
+        Increment for the support of the distribution.
+        Default is 1. (other values have not been tested)
+    badvalue : float, optional
+        The value in a result arrays that indicates a value that for which
+        some argument restriction is violated, default is np.nan.
+    name : str, optional
+        The name of the instance. This string is used to construct the default
+        example for distributions.
+    longname : str, optional
+        This string is used as part of the first line of the docstring returned
+        when a subclass has no docstring of its own. Note: `longname` exists
+        for backwards compatibility, do not use for new subclasses.
+    shapes : str, optional
+        The shape of the distribution. For example "m, n" for a distribution
+        that takes two integers as the two shape arguments for all its methods
+        If not provided, shape parameters will be inferred from
+        the signatures of the private methods, ``_pmf`` and ``_cdf`` of
+        the instance.
+    extradoc :  str, optional, deprecated
+        This string is used as the last part of the docstring returned when a
+        subclass has no docstring of its own. Note: `extradoc` exists for
+        backwards compatibility and will be removed in SciPy 1.11.0, do not
+        use for new subclasses.
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Methods
+    -------
+    rvs
+    pmf
+    logpmf
+    cdf
+    logcdf
+    sf
+    logsf
+    ppf
+    isf
+    moment
+    stats
+    entropy
+    expect
+    median
+    mean
+    std
+    var
+    interval
+    __call__
+    support
+
+    Notes
+    -----
+    This class is similar to `rv_continuous`. Whether a shape parameter is
+    valid is decided by an ``_argcheck`` method (which defaults to checking
+    that its arguments are strictly positive.)
+    The main differences are:
+
+    - the support of the distribution is a set of integers
+    - instead of the probability density function, ``pdf`` (and the
+      corresponding private ``_pdf``), this class defines the
+      *probability mass function*, `pmf` (and the corresponding
+      private ``_pmf``.)
+    - scale parameter is not defined.
+
+    To create a new discrete distribution, we would do the following:
+
+    >>> from scipy.stats import rv_discrete
+    >>> class poisson_gen(rv_discrete):
+    ...     "Poisson distribution"
+    ...     def _pmf(self, k, mu):
+    ...         return exp(-mu) * mu**k / factorial(k)
+
+    and create an instance::
+
+    >>> poisson = poisson_gen(name="poisson")
+
+    Note that above we defined the Poisson distribution in the standard form.
+    Shifting the distribution can be done by providing the ``loc`` parameter
+    to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
+    delegates the work to ``poisson._pmf(x-loc, mu)``.
+
+    **Discrete distributions from a list of probabilities**
+
+    Alternatively, you can construct an arbitrary discrete rv defined
+    on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
+    ``values`` keyword argument to the `rv_discrete` constructor.
+
+    Examples
+    --------
+    Custom made discrete distribution:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> xk = np.arange(7)
+    >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
+    >>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
+    >>>
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots(1, 1)
+    >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
+    >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
+    >>> plt.show()
+
+    Random number generation:
+
+    >>> R = custm.rvs(size=100)
+
+    """
+    def __new__(cls, a=0, b=inf, name=None, badvalue=None,
+                moment_tol=1e-8, values=None, inc=1, longname=None,
+                shapes=None, extradoc=None, seed=None):
+
+        if values is not None:
+            # dispatch to a subclass
+            return super(rv_discrete, cls).__new__(rv_sample)
+        else:
+            # business as usual
+            return super(rv_discrete, cls).__new__(cls)
+
+    def __init__(self, a=0, b=inf, name=None, badvalue=None,
+                 moment_tol=1e-8, values=None, inc=1, longname=None,
+                 shapes=None, extradoc=None, seed=None):
+
+        super().__init__(seed)
+
+        if extradoc is not None:
+            warnings.warn("extradoc is deprecated and will be removed in "
+                          "SciPy 1.11.0", DeprecationWarning)
+
+        # cf generic freeze
+        self._ctor_param = dict(
+            a=a, b=b, name=name, badvalue=badvalue,
+            moment_tol=moment_tol, values=values, inc=inc,
+            longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
+
+        if badvalue is None:
+            badvalue = nan
+        self.badvalue = badvalue
+        self.a = a
+        self.b = b
+        self.moment_tol = moment_tol
+        self.inc = inc
+        self.shapes = shapes
+
+        if values is not None:
+            raise ValueError("rv_discrete.__init__(..., values != None, ...)")
+
+        self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
+                                  locscale_in='loc=0',
+                                  # scale=1 for discrete RVs
+                                  locscale_out='loc, 1')
+        self._attach_methods()
+        self._construct_docstrings(name, longname, extradoc)
+
+    def __getstate__(self):
+        dct = self.__dict__.copy()
+        # these methods will be remade in __setstate__
+        attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
+                 "_cdfvec", "_ppfvec", "generic_moment"]
+        [dct.pop(attr, None) for attr in attrs]
+        return dct
+
+    def _attach_methods(self):
+        """Attaches dynamically created methods to the rv_discrete instance."""
+        self._cdfvec = vectorize(self._cdf_single, otypes='d')
+        self.vecentropy = vectorize(self._entropy)
+
+        # _attach_methods is responsible for calling _attach_argparser_methods
+        self._attach_argparser_methods()
+
+        # nin correction needs to be after we know numargs
+        # correct nin for generic moment vectorization
+        _vec_generic_moment = vectorize(_drv2_moment, otypes='d')
+        _vec_generic_moment.nin = self.numargs + 2
+        self.generic_moment = types.MethodType(_vec_generic_moment, self)
+
+        # correct nin for ppf vectorization
+        _vppf = vectorize(_drv2_ppfsingle, otypes='d')
+        _vppf.nin = self.numargs + 2
+        self._ppfvec = types.MethodType(_vppf, self)
+
+        # now that self.numargs is defined, we can adjust nin
+        self._cdfvec.nin = self.numargs + 1
+
+    def _construct_docstrings(self, name, longname, extradoc):
+        if name is None:
+            name = 'Distribution'
+        self.name = name
+        self.extradoc = extradoc
+
+        # generate docstring for subclass instances
+        if longname is None:
+            if name[0] in ['aeiouAEIOU']:
+                hstr = "An "
+            else:
+                hstr = "A "
+            longname = hstr + name
+
+        if sys.flags.optimize < 2:
+            # Skip adding docstrings if interpreter is run with -OO
+            if self.__doc__ is None:
+                self._construct_default_doc(longname=longname,
+                                            extradoc=extradoc,
+                                            docdict=docdict_discrete,
+                                            discrete='discrete')
+            else:
+                dct = dict(distdiscrete)
+                self._construct_doc(docdict_discrete, dct.get(self.name))
+
+            # discrete RV do not have the scale parameter, remove it
+            self.__doc__ = self.__doc__.replace(
+                '\n    scale : array_like, '
+                'optional\n        scale parameter (default=1)', '')
+
+    def _updated_ctor_param(self):
+        """Return the current version of _ctor_param, possibly updated by user.
+
+        Used by freezing.
+        Keep this in sync with the signature of __init__.
+        """
+        dct = self._ctor_param.copy()
+        dct['a'] = self.a
+        dct['b'] = self.b
+        dct['badvalue'] = self.badvalue
+        dct['moment_tol'] = self.moment_tol
+        dct['inc'] = self.inc
+        dct['name'] = self.name
+        dct['shapes'] = self.shapes
+        dct['extradoc'] = self.extradoc
+        return dct
+
+    def _nonzero(self, k, *args):
+        return floor(k) == k
+
+    def _pmf(self, k, *args):
+        return self._cdf(k, *args) - self._cdf(k-1, *args)
+
+    def _logpmf(self, k, *args):
+        return log(self._pmf(k, *args))
+
+    def _logpxf(self, k, *args):
+        # continuous distributions have PDF, discrete have PMF, but sometimes
+        # the distinction doesn't matter. This lets us use `_logpxf` for both
+        # discrete and continuous distributions.
+        return self._logpmf(k, *args)
+
+    def _unpack_loc_scale(self, theta):
+        try:
+            loc = theta[-1]
+            scale = 1
+            args = tuple(theta[:-1])
+        except IndexError as e:
+            raise ValueError("Not enough input arguments.") from e
+        return loc, scale, args
+
+    def _cdf_single(self, k, *args):
+        _a, _b = self._get_support(*args)
+        m = arange(int(_a), k+1)
+        return np.sum(self._pmf(m, *args), axis=0)
+
+    def _cdf(self, x, *args):
+        k = floor(x)
+        return self._cdfvec(k, *args)
+
+    # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
+
+    def rvs(self, *args, **kwargs):
+        """Random variates of given type.
+
+        Parameters
+        ----------
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+        size : int or tuple of ints, optional
+            Defining number of random variates (Default is 1). Note that `size`
+            has to be given as keyword, not as positional argument.
+        random_state : {None, int, `numpy.random.Generator`,
+                        `numpy.random.RandomState`}, optional
+
+            If `random_state` is None (or `np.random`), the
+            `numpy.random.RandomState` singleton is used.
+            If `random_state` is an int, a new ``RandomState`` instance is
+            used, seeded with `random_state`.
+            If `random_state` is already a ``Generator`` or ``RandomState``
+            instance, that instance is used.
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random variates of given `size`.
+
+        """
+        kwargs['discrete'] = True
+        return super().rvs(*args, **kwargs)
+
+    def pmf(self, k, *args, **kwds):
+        """Probability mass function at k of the given RV.
+
+        Parameters
+        ----------
+        k : array_like
+            Quantiles.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information)
+        loc : array_like, optional
+            Location parameter (default=0).
+
+        Returns
+        -------
+        pmf : array_like
+            Probability mass function evaluated at k
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        k, loc = map(asarray, (k, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        k = asarray((k-loc))
+        cond0 = self._argcheck(*args)
+        cond1 = (k >= _a) & (k <= _b)
+        if not isinstance(self, rv_sample):
+            cond1 = cond1 & self._nonzero(k, *args)
+        cond = cond0 & cond1
+        output = zeros(shape(cond), 'd')
+        place(output, (1-cond0) + np.isnan(k), self.badvalue)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((k,)+args))
+            place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def logpmf(self, k, *args, **kwds):
+        """Log of the probability mass function at k of the given RV.
+
+        Parameters
+        ----------
+        k : array_like
+            Quantiles.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter. Default is 0.
+
+        Returns
+        -------
+        logpmf : array_like
+            Log of the probability mass function evaluated at k.
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        k, loc = map(asarray, (k, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        k = asarray((k-loc))
+        cond0 = self._argcheck(*args)
+        cond1 = (k >= _a) & (k <= _b)
+        if not isinstance(self, rv_sample):
+            cond1 = cond1 & self._nonzero(k, *args)
+        cond = cond0 & cond1
+        output = empty(shape(cond), 'd')
+        output.fill(NINF)
+        place(output, (1-cond0) + np.isnan(k), self.badvalue)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((k,)+args))
+            place(output, cond, self._logpmf(*goodargs))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def cdf(self, k, *args, **kwds):
+        """Cumulative distribution function of the given RV.
+
+        Parameters
+        ----------
+        k : array_like, int
+            Quantiles.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+
+        Returns
+        -------
+        cdf : ndarray
+            Cumulative distribution function evaluated at `k`.
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        k, loc = map(asarray, (k, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        k = asarray((k-loc))
+        cond0 = self._argcheck(*args)
+        cond1 = (k >= _a) & (k < _b)
+        cond2 = (k >= _b)
+        cond3 = np.isneginf(k)
+        cond = cond0 & cond1 & np.isfinite(k)
+
+        output = zeros(shape(cond), 'd')
+        place(output, cond2*(cond0 == cond0), 1.0)
+        place(output, cond3*(cond0 == cond0), 0.0)
+        place(output, (1-cond0) + np.isnan(k), self.badvalue)
+
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((k,)+args))
+            place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def logcdf(self, k, *args, **kwds):
+        """Log of the cumulative distribution function at k of the given RV.
+
+        Parameters
+        ----------
+        k : array_like, int
+            Quantiles.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+
+        Returns
+        -------
+        logcdf : array_like
+            Log of the cumulative distribution function evaluated at k.
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        k, loc = map(asarray, (k, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        k = asarray((k-loc))
+        cond0 = self._argcheck(*args)
+        cond1 = (k >= _a) & (k < _b)
+        cond2 = (k >= _b)
+        cond = cond0 & cond1
+        output = empty(shape(cond), 'd')
+        output.fill(NINF)
+        place(output, (1-cond0) + np.isnan(k), self.badvalue)
+        place(output, cond2*(cond0 == cond0), 0.0)
+
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((k,)+args))
+            place(output, cond, self._logcdf(*goodargs))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def sf(self, k, *args, **kwds):
+        """Survival function (1 - `cdf`) at k of the given RV.
+
+        Parameters
+        ----------
+        k : array_like
+            Quantiles.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+
+        Returns
+        -------
+        sf : array_like
+            Survival function evaluated at k.
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        k, loc = map(asarray, (k, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        k = asarray(k-loc)
+        cond0 = self._argcheck(*args)
+        cond1 = (k >= _a) & (k < _b)
+        cond2 = ((k < _a) | np.isneginf(k)) & cond0
+        cond = cond0 & cond1 & np.isfinite(k)
+        output = zeros(shape(cond), 'd')
+        place(output, (1-cond0) + np.isnan(k), self.badvalue)
+        place(output, cond2, 1.0)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((k,)+args))
+            place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def logsf(self, k, *args, **kwds):
+        """Log of the survival function of the given RV.
+
+        Returns the log of the "survival function," defined as 1 - `cdf`,
+        evaluated at `k`.
+
+        Parameters
+        ----------
+        k : array_like
+            Quantiles.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+
+        Returns
+        -------
+        logsf : ndarray
+            Log of the survival function evaluated at `k`.
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        k, loc = map(asarray, (k, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        k = asarray(k-loc)
+        cond0 = self._argcheck(*args)
+        cond1 = (k >= _a) & (k < _b)
+        cond2 = (k < _a) & cond0
+        cond = cond0 & cond1
+        output = empty(shape(cond), 'd')
+        output.fill(NINF)
+        place(output, (1-cond0) + np.isnan(k), self.badvalue)
+        place(output, cond2, 0.0)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((k,)+args))
+            place(output, cond, self._logsf(*goodargs))
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def ppf(self, q, *args, **kwds):
+        """Percent point function (inverse of `cdf`) at q of the given RV.
+
+        Parameters
+        ----------
+        q : array_like
+            Lower tail probability.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+
+        Returns
+        -------
+        k : array_like
+            Quantile corresponding to the lower tail probability, q.
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        q, loc = map(asarray, (q, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        cond0 = self._argcheck(*args) & (loc == loc)
+        cond1 = (q > 0) & (q < 1)
+        cond2 = (q == 1) & cond0
+        cond = cond0 & cond1
+        output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
+        # output type 'd' to handle nin and inf
+        place(output, (q == 0)*(cond == cond), _a-1 + loc)
+        place(output, cond2, _b + loc)
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((q,)+args+(loc,)))
+            loc, goodargs = goodargs[-1], goodargs[:-1]
+            place(output, cond, self._ppf(*goodargs) + loc)
+
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def isf(self, q, *args, **kwds):
+        """Inverse survival function (inverse of `sf`) at q of the given RV.
+
+        Parameters
+        ----------
+        q : array_like
+            Upper tail probability.
+        arg1, arg2, arg3,... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+        loc : array_like, optional
+            Location parameter (default=0).
+
+        Returns
+        -------
+        k : ndarray or scalar
+            Quantile corresponding to the upper tail probability, q.
+
+        """
+        args, loc, _ = self._parse_args(*args, **kwds)
+        q, loc = map(asarray, (q, loc))
+        args = tuple(map(asarray, args))
+        _a, _b = self._get_support(*args)
+        cond0 = self._argcheck(*args) & (loc == loc)
+        cond1 = (q > 0) & (q < 1)
+        cond2 = (q == 1) & cond0
+        cond3 = (q == 0) & cond0
+        cond = cond0 & cond1
+
+        # same problem as with ppf; copied from ppf and changed
+        output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
+        # output type 'd' to handle nin and inf
+        lower_bound = _a - 1 + loc
+        upper_bound = _b + loc
+        place(output, cond2*(cond == cond), lower_bound)
+        place(output, cond3*(cond == cond), upper_bound)
+
+        # call place only if at least 1 valid argument
+        if np.any(cond):
+            goodargs = argsreduce(cond, *((q,)+args+(loc,)))
+            loc, goodargs = goodargs[-1], goodargs[:-1]
+            # PB same as ticket 766
+            place(output, cond, self._isf(*goodargs) + loc)
+
+        if output.ndim == 0:
+            return output[()]
+        return output
+
+    def _entropy(self, *args):
+        if hasattr(self, 'pk'):
+            return stats.entropy(self.pk)
+        else:
+            _a, _b = self._get_support(*args)
+            return _expect(lambda x: entr(self.pmf(x, *args)),
+                           _a, _b, self.ppf(0.5, *args), self.inc)
+
+    def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
+               conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
+        """
+        Calculate expected value of a function with respect to the distribution
+        for discrete distribution by numerical summation.
+
+        Parameters
+        ----------
+        func : callable, optional
+            Function for which the expectation value is calculated.
+            Takes only one argument.
+            The default is the identity mapping f(k) = k.
+        args : tuple, optional
+            Shape parameters of the distribution.
+        loc : float, optional
+            Location parameter.
+            Default is 0.
+        lb, ub : int, optional
+            Lower and upper bound for the summation, default is set to the
+            support of the distribution, inclusive (``lb <= k <= ub``).
+        conditional : bool, optional
+            If true then the expectation is corrected by the conditional
+            probability of the summation interval. The return value is the
+            expectation of the function, `func`, conditional on being in
+            the given interval (k such that ``lb <= k <= ub``).
+            Default is False.
+        maxcount : int, optional
+            Maximal number of terms to evaluate (to avoid an endless loop for
+            an infinite sum). Default is 1000.
+        tolerance : float, optional
+            Absolute tolerance for the summation. Default is 1e-10.
+        chunksize : int, optional
+            Iterate over the support of a distributions in chunks of this size.
+            Default is 32.
+
+        Returns
+        -------
+        expect : float
+            Expected value.
+
+        Notes
+        -----
+        For heavy-tailed distributions, the expected value may or
+        may not exist,
+        depending on the function, `func`. If it does exist, but the
+        sum converges
+        slowly, the accuracy of the result may be rather low. For instance, for
+        ``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
+        increasing `maxcount` and/or `chunksize` may improve the result,
+        but may also make zipf very slow.
+
+        The function is not vectorized.
+
+        """
+        if func is None:
+            def fun(x):
+                # loc and args from outer scope
+                return (x+loc)*self._pmf(x, *args)
+        else:
+            def fun(x):
+                # loc and args from outer scope
+                return func(x+loc)*self._pmf(x, *args)
+        # used pmf because _pmf does not check support in randint and there
+        # might be problems(?) with correct self.a, self.b at this stage maybe
+        # not anymore, seems to work now with _pmf
+
+        _a, _b = self._get_support(*args)
+        if lb is None:
+            lb = _a
+        else:
+            lb = lb - loc   # convert bound for standardized distribution
+        if ub is None:
+            ub = _b
+        else:
+            ub = ub - loc   # convert bound for standardized distribution
+        if conditional:
+            invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
+        else:
+            invfac = 1.0
+
+        if isinstance(self, rv_sample):
+            res = self._expect(fun, lb, ub)
+            return res / invfac
+
+        # iterate over the support, starting from the median
+        x0 = self.ppf(0.5, *args)
+        res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
+        return res / invfac
+
+    def _param_info(self):
+        shape_info = self._shape_info()
+        loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False))
+        param_info = shape_info + [loc_info]
+        return param_info
+
+
+def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
+            chunksize=32):
+    """Helper for computing the expectation value of `fun`."""
+    # short-circuit if the support size is small enough
+    if (ub - lb) <= chunksize:
+        supp = np.arange(lb, ub+1, inc)
+        vals = fun(supp)
+        return np.sum(vals)
+
+    # otherwise, iterate starting from x0
+    if x0 < lb:
+        x0 = lb
+    if x0 > ub:
+        x0 = ub
+
+    count, tot = 0, 0.
+    # iterate over [x0, ub] inclusive
+    for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
+        count += x.size
+        delta = np.sum(fun(x))
+        tot += delta
+        if abs(delta) < tolerance * x.size:
+            break
+        if count > maxcount:
+            warnings.warn('expect(): sum did not converge', RuntimeWarning)
+            return tot
+
+    # iterate over [lb, x0)
+    for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
+        count += x.size
+        delta = np.sum(fun(x))
+        tot += delta
+        if abs(delta) < tolerance * x.size:
+            break
+        if count > maxcount:
+            warnings.warn('expect(): sum did not converge', RuntimeWarning)
+            break
+
+    return tot
+
+
+def _iter_chunked(x0, x1, chunksize=4, inc=1):
+    """Iterate from x0 to x1 in chunks of chunksize and steps inc.
+
+    x0 must be finite, x1 need not be. In the latter case, the iterator is
+    infinite.
+    Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
+    (make sure to set inc < 0.)
+
+    >>> [x for x in _iter_chunked(2, 5, inc=2)]
+    [array([2, 4])]
+    >>> [x for x in _iter_chunked(2, 11, inc=2)]
+    [array([2, 4, 6, 8]), array([10])]
+    >>> [x for x in _iter_chunked(2, -5, inc=-2)]
+    [array([ 2,  0, -2, -4])]
+    >>> [x for x in _iter_chunked(2, -9, inc=-2)]
+    [array([ 2,  0, -2, -4]), array([-6, -8])]
+
+    """
+    if inc == 0:
+        raise ValueError('Cannot increment by zero.')
+    if chunksize <= 0:
+        raise ValueError('Chunk size must be positive; got %s.' % chunksize)
+
+    s = 1 if inc > 0 else -1
+    stepsize = abs(chunksize * inc)
+
+    x = x0
+    while (x - x1) * inc < 0:
+        delta = min(stepsize, abs(x - x1))
+        step = delta * s
+        supp = np.arange(x, x + step, inc)
+        x += step
+        yield supp
+
+
+class rv_sample(rv_discrete):
+    """A 'sample' discrete distribution defined by the support and values.
+
+    The ctor ignores most of the arguments, only needs the `values` argument.
+    """
+    def __init__(self, a=0, b=inf, name=None, badvalue=None,
+                 moment_tol=1e-8, values=None, inc=1, longname=None,
+                 shapes=None, extradoc=None, seed=None):
+
+        super(rv_discrete, self).__init__(seed)
+
+        if extradoc is not None:
+            warnings.warn("extradoc is deprecated and will be removed in "
+                          "SciPy 1.11.0", DeprecationWarning)
+
+        if values is None:
+            raise ValueError("rv_sample.__init__(..., values=None,...)")
+
+        # cf generic freeze
+        self._ctor_param = dict(
+            a=a, b=b, name=name, badvalue=badvalue,
+            moment_tol=moment_tol, values=values, inc=inc,
+            longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
+
+        if badvalue is None:
+            badvalue = nan
+        self.badvalue = badvalue
+        self.moment_tol = moment_tol
+        self.inc = inc
+        self.shapes = shapes
+        self.vecentropy = self._entropy
+
+        xk, pk = values
+
+        if np.shape(xk) != np.shape(pk):
+            raise ValueError("xk and pk must have the same shape.")
+        if np.less(pk, 0.0).any():
+            raise ValueError("All elements of pk must be non-negative.")
+        if not np.allclose(np.sum(pk), 1):
+            raise ValueError("The sum of provided pk is not 1.")
+
+        indx = np.argsort(np.ravel(xk))
+        self.xk = np.take(np.ravel(xk), indx, 0)
+        self.pk = np.take(np.ravel(pk), indx, 0)
+        self.a = self.xk[0]
+        self.b = self.xk[-1]
+
+        self.qvals = np.cumsum(self.pk, axis=0)
+
+        self.shapes = ' '   # bypass inspection
+
+        self._construct_argparser(meths_to_inspect=[self._pmf],
+                                  locscale_in='loc=0',
+                                  # scale=1 for discrete RVs
+                                  locscale_out='loc, 1')
+
+        self._attach_methods()
+
+        self._construct_docstrings(name, longname, extradoc)
+
+    def __getstate__(self):
+        dct = self.__dict__.copy()
+
+        # these methods will be remade in rv_generic.__setstate__,
+        # which calls rv_generic._attach_methods
+        attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
+        [dct.pop(attr, None) for attr in attrs]
+
+        return dct
+
+    def _attach_methods(self):
+        """Attaches dynamically created argparser methods."""
+        self._attach_argparser_methods()
+
+    def _get_support(self, *args):
+        """Return the support of the (unscaled, unshifted) distribution.
+
+        Parameters
+        ----------
+        arg1, arg2, ... : array_like
+            The shape parameter(s) for the distribution (see docstring of the
+            instance object for more information).
+
+        Returns
+        -------
+        a, b : numeric (float, or int or +/-np.inf)
+            end-points of the distribution's support.
+        """
+        return self.a, self.b
+
+    def _pmf(self, x):
+        return np.select([x == k for k in self.xk],
+                         [np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
+
+    def _cdf(self, x):
+        xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
+        indx = np.argmax(xxk > xx, axis=-1) - 1
+        return self.qvals[indx]
+
+    def _ppf(self, q):
+        qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
+        indx = argmax(sqq >= qq, axis=-1)
+        return self.xk[indx]
+
+    def _rvs(self, size=None, random_state=None):
+        # Need to define it explicitly, otherwise .rvs() with size=None
+        # fails due to explicit broadcasting in _ppf
+        U = random_state.uniform(size=size)
+        if size is None:
+            U = np.array(U, ndmin=1)
+            Y = self._ppf(U)[0]
+        else:
+            Y = self._ppf(U)
+        return Y
+
+    def _entropy(self):
+        return stats.entropy(self.pk)
+
+    def generic_moment(self, n):
+        n = asarray(n)
+        return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
+
+    def _expect(self, fun, lb, ub, *args, **kwds):
+        # ignore all args, just do a brute force summation
+        supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
+        vals = fun(supp)
+        return np.sum(vals)
+
+
+def _check_shape(argshape, size):
+    """
+    This is a utility function used by `_rvs()` in the class geninvgauss_gen.
+    It compares the tuple argshape to the tuple size.
+
+    Parameters
+    ----------
+    argshape : tuple of integers
+        Shape of the arguments.
+    size : tuple of integers or integer
+        Size argument of rvs().
+
+    Returns
+    -------
+    The function returns two tuples, scalar_shape and bc.
+
+    scalar_shape : tuple
+        Shape to which the 1-d array of random variates returned by
+        _rvs_scalar() is converted when it is copied into the
+        output array of _rvs().
+
+    bc : tuple of booleans
+        bc is an tuple the same length as size. bc[j] is True if the data
+        associated with that index is generated in one call of _rvs_scalar().
+
+    """
+    scalar_shape = []
+    bc = []
+    for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
+                                       fillvalue=1):
+        if sizedim > argdim or (argdim == sizedim == 1):
+            scalar_shape.append(sizedim)
+            bc.append(True)
+        else:
+            bc.append(False)
+    return tuple(scalar_shape[::-1]), tuple(bc[::-1])
+
+
+def get_distribution_names(namespace_pairs, rv_base_class):
+    """Collect names of statistical distributions and their generators.
+
+    Parameters
+    ----------
+    namespace_pairs : sequence
+        A snapshot of (name, value) pairs in the namespace of a module.
+    rv_base_class : class
+        The base class of random variable generator classes in a module.
+
+    Returns
+    -------
+    distn_names : list of strings
+        Names of the statistical distributions.
+    distn_gen_names : list of strings
+        Names of the generators of the statistical distributions.
+        Note that these are not simply the names of the statistical
+        distributions, with a _gen suffix added.
+
+    """
+    distn_names = []
+    distn_gen_names = []
+    for name, value in namespace_pairs:
+        if name.startswith('_'):
+            continue
+        if name.endswith('_gen') and issubclass(value, rv_base_class):
+            distn_gen_names.append(name)
+        if isinstance(value, rv_base_class):
+            distn_names.append(name)
+    return distn_names, distn_gen_names
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_distr_params.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_distr_params.py
new file mode 100644
index 00000000..5baececa
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_distr_params.py
@@ -0,0 +1,281 @@
+"""
+Sane parameters for stats.distributions.
+"""
+import numpy as np
+
+distcont = [
+    ['alpha', (3.5704770516650459,)],
+    ['anglit', ()],
+    ['arcsine', ()],
+    ['argus', (1.0,)],
+    ['beta', (2.3098496451481823, 0.62687954300963677)],
+    ['betaprime', (5, 6)],
+    ['bradford', (0.29891359763170633,)],
+    ['burr', (10.5, 4.3)],
+    ['burr12', (10, 4)],
+    ['cauchy', ()],
+    ['chi', (78,)],
+    ['chi2', (55,)],
+    ['cosine', ()],
+    ['crystalball', (2.0, 3.0)],
+    ['dgamma', (1.1023326088288166,)],
+    ['dweibull', (2.0685080649914673,)],
+    ['erlang', (10,)],
+    ['expon', ()],
+    ['exponnorm', (1.5,)],
+    ['exponpow', (2.697119160358469,)],
+    ['exponweib', (2.8923945291034436, 1.9505288745913174)],
+    ['f', (29, 18)],
+    ['fatiguelife', (29,)],   # correction numargs = 1
+    ['fisk', (3.0857548622253179,)],
+    ['foldcauchy', (4.7164673455831894,)],
+    ['foldnorm', (1.9521253373555869,)],
+    ['gamma', (1.9932305483800778,)],
+    ['gausshyper', (13.763771604130699, 3.1189636648681431,
+                    2.5145980350183019, 5.1811649903971615)],  # veryslow
+    ['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
+    ['genextreme', (-0.1,)],
+    ['gengamma', (4.4162385429431925, 3.1193091679242761)],
+    ['gengamma', (4.4162385429431925, -3.1193091679242761)],
+    ['genhalflogistic', (0.77274727809929322,)],
+    ['genhyperbolic', (0.5, 1.5, -0.5,)],
+    ['geninvgauss', (2.3, 1.5)],
+    ['genlogistic', (0.41192440799679475,)],
+    ['gennorm', (1.2988442399460265,)],
+    ['halfgennorm', (0.6748054997000371,)],
+    ['genpareto', (0.1,)],   # use case with finite moments
+    ['gibrat', ()],
+    ['gompertz', (0.94743713075105251,)],
+    ['gumbel_l', ()],
+    ['gumbel_r', ()],
+    ['halfcauchy', ()],
+    ['halflogistic', ()],
+    ['halfnorm', ()],
+    ['hypsecant', ()],
+    ['invgamma', (4.0668996136993067,)],
+    ['invgauss', (0.14546264555347513,)],
+    ['invweibull', (10.58,)],
+    ['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
+    ['johnsonsu', (2.554395574161155, 2.2482281679651965)],
+    ['kappa4', (0.0, 0.0)],
+    ['kappa4', (-0.1, 0.1)],
+    ['kappa4', (0.0, 0.1)],
+    ['kappa4', (0.1, 0.0)],
+    ['kappa3', (1.0,)],
+    ['ksone', (1000,)],  # replace 22 by 100 to avoid failing range, ticket 956
+    ['kstwo', (10,)],
+    ['kstwobign', ()],
+    ['laplace', ()],
+    ['laplace_asymmetric', (2,)],
+    ['levy', ()],
+    ['levy_l', ()],
+    ['levy_stable', (1.8, -0.5)],
+    ['loggamma', (0.41411931826052117,)],
+    ['logistic', ()],
+    ['loglaplace', (3.2505926592051435,)],
+    ['lognorm', (0.95368226960575331,)],
+    ['loguniform', (0.01, 1.25)],
+    ['lomax', (1.8771398388773268,)],
+    ['maxwell', ()],
+    ['mielke', (10.4, 4.6)],
+    ['moyal', ()],
+    ['nakagami', (4.9673794866666237,)],
+    ['ncf', (27, 27, 0.41578441799226107)],
+    ['nct', (14, 0.24045031331198066)],
+    ['ncx2', (21, 1.0560465975116415)],
+    ['norm', ()],
+    ['norminvgauss', (1.25, 0.5)],
+    ['pareto', (2.621716532144454,)],
+    ['pearson3', (0.1,)],
+    ['pearson3', (-2,)],
+    ['powerlaw', (1.6591133289905851,)],
+    ['powerlaw', (0.6591133289905851,)],
+    ['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
+    ['powernorm', (4.4453652254590779,)],
+    ['rayleigh', ()],
+    ['rdist', (1.6,)],
+    ['recipinvgauss', (0.63004267809369119,)],
+    ['reciprocal', (0.01, 1.25)],
+    ['rice', (0.7749725210111873,)],
+    ['semicircular', ()],
+    ['skewcauchy', (0.5,)],
+    ['skewnorm', (4.0,)],
+    ['studentized_range', (3.0, 10.0)],
+    ['t', (2.7433514990818093,)],
+    ['trapezoid', (0.2, 0.8)],
+    ['triang', (0.15785029824528218,)],
+    ['truncexpon', (4.6907725456810478,)],
+    ['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
+    ['truncnorm', (0.1, 2.)],
+    ['truncpareto', (1.8, 5.3)],
+    ['truncweibull_min', (2.5, 0.25, 1.75)],
+    ['tukeylambda', (3.1321477856738267,)],
+    ['uniform', ()],
+    ['vonmises', (3.9939042581071398,)],
+    ['vonmises_line', (3.9939042581071398,)],
+    ['wald', ()],
+    ['weibull_max', (2.8687961709100187,)],
+    ['weibull_min', (1.7866166930421596,)],
+    ['wrapcauchy', (0.031071279018614728,)]]
+
+
+distdiscrete = [
+    ['bernoulli',(0.3,)],
+    ['betabinom', (5, 2.3, 0.63)],
+    ['binom', (5, 0.4)],
+    ['boltzmann',(1.4, 19)],
+    ['dlaplace', (0.8,)],  # 0.5
+    ['geom', (0.5,)],
+    ['hypergeom',(30, 12, 6)],
+    ['hypergeom',(21,3,12)],  # numpy.random (3,18,12) numpy ticket:921
+    ['hypergeom',(21,18,11)],  # numpy.random (18,3,11) numpy ticket:921
+    ['nchypergeom_fisher', (140, 80, 60, 0.5)],
+    ['nchypergeom_wallenius', (140, 80, 60, 0.5)],
+    ['logser', (0.6,)],  # re-enabled, numpy ticket:921
+    ['nbinom', (0.4, 0.4)],  # from tickets: 583
+    ['nbinom', (5, 0.5)],
+    ['planck', (0.51,)],   # 4.1
+    ['poisson', (0.6,)],
+    ['randint', (7, 31)],
+    ['skellam', (15, 8)],
+    ['zipf', (6.5,)],
+    ['zipfian', (0.75, 15)],
+    ['zipfian', (1.25, 10)],
+    ['yulesimon', (11.0,)],
+    ['nhypergeom', (20, 7, 1)]
+]
+
+
+invdistdiscrete = [
+    # In each of the following, at least one shape parameter is invalid
+    ['hypergeom', (3, 3, 4)],
+    ['nhypergeom', (5, 2, 8)],
+    ['nchypergeom_fisher', (3, 3, 4, 1)],
+    ['nchypergeom_wallenius', (3, 3, 4, 1)],
+    ['bernoulli', (1.5, )],
+    ['binom', (10, 1.5)],
+    ['betabinom', (10, -0.4, -0.5)],
+    ['boltzmann', (-1, 4)],
+    ['dlaplace', (-0.5, )],
+    ['geom', (1.5, )],
+    ['logser', (1.5, )],
+    ['nbinom', (10, 1.5)],
+    ['planck', (-0.5, )],
+    ['poisson', (-0.5, )],
+    ['randint', (5, 2)],
+    ['skellam', (-5, -2)],
+    ['zipf', (-2, )],
+    ['yulesimon', (-2, )],
+    ['zipfian', (-0.75, 15)]
+]
+
+
+invdistcont = [
+    # In each of the following, at least one shape parameter is invalid
+    ['alpha', (-1, )],
+    ['anglit', ()],
+    ['arcsine', ()],
+    ['argus', (-1, )],
+    ['beta', (-2, 2)],
+    ['betaprime', (-2, 2)],
+    ['bradford', (-1, )],
+    ['burr', (-1, 1)],
+    ['burr12', (-1, 1)],
+    ['cauchy', ()],
+    ['chi', (-1, )],
+    ['chi2', (-1, )],
+    ['cosine', ()],
+    ['crystalball', (-1, 2)],
+    ['dgamma', (-1, )],
+    ['dweibull', (-1, )],
+    ['erlang', (-1, )],
+    ['expon', ()],
+    ['exponnorm', (-1, )],
+    ['exponweib', (1, -1)],
+    ['exponpow', (-1, )],
+    ['f', (10, -10)],
+    ['fatiguelife', (-1, )],
+    ['fisk', (-1, )],
+    ['foldcauchy', (-1, )],
+    ['foldnorm', (-1, )],
+    ['genlogistic', (-1, )],
+    ['gennorm', (-1, )],
+    ['genpareto', (np.inf, )],
+    ['genexpon', (1, 2, -3)],
+    ['genextreme', (np.inf, )],
+    ['genhyperbolic', (0.5, -0.5, -1.5,)],
+    ['gausshyper', (1, 2, 3, -4)],
+    ['gamma', (-1, )],
+    ['gengamma', (-1, 0)],
+    ['genhalflogistic', (-1, )],
+    ['geninvgauss', (1, 0)],
+    ['gibrat', ()],
+    ['gompertz', (-1, )],
+    ['gumbel_r', ()],
+    ['gumbel_l', ()],
+    ['halfcauchy', ()],
+    ['halflogistic', ()],
+    ['halfnorm', ()],
+    ['halfgennorm', (-1, )],
+    ['hypsecant', ()],
+    ['invgamma', (-1, )],
+    ['invgauss', (-1, )],
+    ['invweibull', (-1, )],
+    ['johnsonsb', (1, -2)],
+    ['johnsonsu', (1, -2)],
+    ['kappa4', (np.nan, 0)],
+    ['kappa3', (-1, )],
+    ['ksone', (-1, )],
+    ['kstwo', (-1, )],
+    ['kstwobign', ()],
+    ['laplace', ()],
+    ['laplace_asymmetric', (-1, )],
+    ['levy', ()],
+    ['levy_l', ()],
+    ['levy_stable', (-1, 1)],
+    ['logistic', ()],
+    ['loggamma', (-1, )],
+    ['loglaplace', (-1, )],
+    ['lognorm', (-1, )],
+    ['loguniform', (10, 5)],
+    ['lomax', (-1, )],
+    ['maxwell', ()],
+    ['mielke', (1, -2)],
+    ['moyal', ()],
+    ['nakagami', (-1, )],
+    ['ncx2', (-1, 2)],
+    ['ncf', (10, 20, -1)],
+    ['nct', (-1, 2)],
+    ['norm', ()],
+    ['norminvgauss', (5, -10)],
+    ['pareto', (-1, )],
+    ['pearson3', (np.nan, )],
+    ['powerlaw', (-1, )],
+    ['powerlognorm', (1, -2)],
+    ['powernorm', (-1, )],
+    ['rdist', (-1, )],
+    ['rayleigh', ()],
+    ['rice', (-1, )],
+    ['recipinvgauss', (-1, )],
+    ['semicircular', ()],
+    ['skewnorm', (np.inf, )],
+    ['studentized_range', (-1, 1)],
+    ['t', (-1, )],
+    ['trapezoid', (0, 2)],
+    ['triang', (2, )],
+    ['truncexpon', (-1, )],
+    ['truncnorm', (10, 5)],
+    ['truncpareto', (-1, 5)],
+    ['truncpareto', (1.8, .5)],
+    ['truncweibull_min', (-2.5, 0.25, 1.75)],
+    ['tukeylambda', (np.nan, )],
+    ['uniform', ()],
+    ['vonmises', (-1, )],
+    ['vonmises_line', (-1, )],
+    ['wald', ()],
+    ['weibull_min', (-1, )],
+    ['weibull_max', (-1, )],
+    ['wrapcauchy', (2, )],
+    ['reciprocal', (15, 10)],
+    ['skewcauchy', (2, )]
+]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_entropy.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_entropy.py
new file mode 100644
index 00000000..83f5a606
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_entropy.py
@@ -0,0 +1,399 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Apr  2 09:06:05 2021
+
+@author: matth
+"""
+
+from __future__ import annotations
+import math
+import numpy as np
+from scipy import special
+from typing import Optional, Union
+
+__all__ = ['entropy', 'differential_entropy']
+
+
+def entropy(pk: np.typing.ArrayLike,
+            qk: Optional[np.typing.ArrayLike] = None,
+            base: Optional[float] = None,
+            axis: int = 0
+            ) -> Union[np.number, np.ndarray]:
+    """
+    Calculate the Shannon entropy/relative entropy of given distribution(s).
+
+    If only probabilities `pk` are given, the Shannon entropy is calculated as
+    ``H = -sum(pk * log(pk))``.
+
+    If `qk` is not None, then compute the relative entropy
+    ``D = sum(pk * log(pk / qk))``. This quantity is also known
+    as the Kullback-Leibler divergence.
+
+    This routine will normalize `pk` and `qk` if they don't sum to 1.
+
+    Parameters
+    ----------
+    pk : array_like
+        Defines the (discrete) distribution. Along each axis-slice of ``pk``,
+        element ``i`` is the  (possibly unnormalized) probability of event
+        ``i``.
+    qk : array_like, optional
+        Sequence against which the relative entropy is computed. Should be in
+        the same format as `pk`.
+    base : float, optional
+        The logarithmic base to use, defaults to ``e`` (natural logarithm).
+    axis : int, optional
+        The axis along which the entropy is calculated. Default is 0.
+
+    Returns
+    -------
+    S : {float, array_like}
+        The calculated entropy.
+
+    Notes
+    -----
+    Informally, the Shannon entropy quantifies the expected uncertainty
+    inherent in the possible outcomes of a discrete random variable.
+    For example,
+    if messages consisting of sequences of symbols from a set are to be
+    encoded and transmitted over a noiseless channel, then the Shannon entropy
+    ``H(pk)`` gives a tight lower bound for the average number of units of
+    information needed per symbol if the symbols occur with frequencies
+    governed by the discrete distribution `pk` [1]_. The choice of base
+    determines the choice of units; e.g., ``e`` for nats, ``2`` for bits, etc.
+
+    The relative entropy, ``D(pk|qk)``, quantifies the increase in the average
+    number of units of information needed per symbol if the encoding is
+    optimized for the probability distribution `qk` instead of the true
+    distribution `pk`. Informally, the relative entropy quantifies the expected
+    excess in surprise experienced if one believes the true distribution is
+    `qk` when it is actually `pk`.
+
+    A related quantity, the cross entropy ``CE(pk, qk)``, satisfies the
+    equation ``CE(pk, qk) = H(pk) + D(pk|qk)`` and can also be calculated with
+    the formula ``CE = -sum(pk * log(qk))``. It gives the average
+    number of units of information needed per symbol if an encoding is
+    optimized for the probability distribution `qk` when the true distribution
+    is `pk`. It is not computed directly by `entropy`, but it can be computed
+    using two calls to the function (see Examples).
+
+    See [2]_ for more information.
+
+    References
+    ----------
+    .. [1] Shannon, C.E. (1948), A Mathematical Theory of Communication.
+           Bell System Technical Journal, 27: 379-423.
+           https://doi.org/10.1002/j.1538-7305.1948.tb01338.x
+    .. [2] Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information
+           Theory (Wiley Series in Telecommunications and Signal Processing).
+           Wiley-Interscience, USA.
+
+
+    Examples
+    --------
+    The outcome of a fair coin is the most uncertain:
+
+    >>> import numpy as np
+    >>> from scipy.stats import entropy
+    >>> base = 2  # work in units of bits
+    >>> pk = np.array([1/2, 1/2])  # fair coin
+    >>> H = entropy(pk, base=base)
+    >>> H
+    1.0
+    >>> H == -np.sum(pk * np.log(pk)) / np.log(base)
+    True
+
+    The outcome of a biased coin is less uncertain:
+
+    >>> qk = np.array([9/10, 1/10])  # biased coin
+    >>> entropy(qk, base=base)
+    0.46899559358928117
+
+    The relative entropy between the fair coin and biased coin is calculated
+    as:
+
+    >>> D = entropy(pk, qk, base=base)
+    >>> D
+    0.7369655941662062
+    >>> D == np.sum(pk * np.log(pk/qk)) / np.log(base)
+    True
+
+    The cross entropy can be calculated as the sum of the entropy and
+    relative entropy`:
+
+    >>> CE = entropy(pk, base=base) + entropy(pk, qk, base=base)
+    >>> CE
+    1.736965594166206
+    >>> CE == -np.sum(pk * np.log(qk)) / np.log(base)
+    True
+
+    """
+    if base is not None and base <= 0:
+        raise ValueError("`base` must be a positive number or `None`.")
+
+    pk = np.asarray(pk)
+    pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)
+    if qk is None:
+        vec = special.entr(pk)
+    else:
+        qk = np.asarray(qk)
+        pk, qk = np.broadcast_arrays(pk, qk)
+        qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)
+        vec = special.rel_entr(pk, qk)
+    S = np.sum(vec, axis=axis)
+    if base is not None:
+        S /= np.log(base)
+    return S
+
+
+def differential_entropy(
+    values: np.typing.ArrayLike,
+    *,
+    window_length: Optional[int] = None,
+    base: Optional[float] = None,
+    axis: int = 0,
+    method: str = "auto",
+) -> Union[np.number, np.ndarray]:
+    r"""Given a sample of a distribution, estimate the differential entropy.
+
+    Several estimation methods are available using the `method` parameter. By
+    default, a method is selected based the size of the sample.
+
+    Parameters
+    ----------
+    values : sequence
+        Sample from a continuous distribution.
+    window_length : int, optional
+        Window length for computing Vasicek estimate. Must be an integer
+        between 1 and half of the sample size. If ``None`` (the default), it
+        uses the heuristic value
+
+        .. math::
+            \left \lfloor \sqrt{n} + 0.5 \right \rfloor
+
+        where :math:`n` is the sample size. This heuristic was originally
+        proposed in [2]_ and has become common in the literature.
+    base : float, optional
+        The logarithmic base to use, defaults to ``e`` (natural logarithm).
+    axis : int, optional
+        The axis along which the differential entropy is calculated.
+        Default is 0.
+    method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional
+        The method used to estimate the differential entropy from the sample.
+        Default is ``'auto'``.  See Notes for more information.
+
+    Returns
+    -------
+    entropy : float
+        The calculated differential entropy.
+
+    Notes
+    -----
+    This function will converge to the true differential entropy in the limit
+
+    .. math::
+        n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0
+
+    The optimal choice of ``window_length`` for a given sample size depends on
+    the (unknown) distribution. Typically, the smoother the density of the
+    distribution, the larger the optimal value of ``window_length`` [1]_.
+
+    The following options are available for the `method` parameter.
+
+    * ``'vasicek'`` uses the estimator presented in [1]_. This is
+      one of the first and most influential estimators of differential entropy.
+    * ``'van es'`` uses the bias-corrected estimator presented in [3]_, which
+      is not only consistent but, under some conditions, asymptotically normal.
+    * ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown
+      in simulation to have smaller bias and mean squared error than
+      the Vasicek estimator.
+    * ``'correa'`` uses the estimator presented in [5]_ based on local linear
+      regression. In a simulation study, it had consistently smaller mean
+      square error than the Vasiceck estimator, but it is more expensive to
+      compute.
+    * ``'auto'`` selects the method automatically (default). Currently,
+      this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'``
+      for moderate sample sizes (11-1000), and ``'vasicek'`` for larger
+      samples, but this behavior is subject to change in future versions.
+
+    All estimators are implemented as described in [6]_.
+
+    References
+    ----------
+    .. [1] Vasicek, O. (1976). A test for normality based on sample entropy.
+           Journal of the Royal Statistical Society:
+           Series B (Methodological), 38(1), 54-59.
+    .. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based
+           goodness-of-fit test for exponentiality. Communications in
+           Statistics-Theory and Methods, 28(5), 1183-1202.
+    .. [3] Van Es, B. (1992). Estimating functionals related to a density by a
+           class of statistics based on spacings. Scandinavian Journal of
+           Statistics, 61-72.
+    .. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures
+           of sample entropy. Statistics & Probability Letters, 20(3), 225-234.
+    .. [5] Correa, J. C. (1995). A new estimator of entropy. Communications
+           in Statistics-Theory and Methods, 24(10), 2439-2449.
+    .. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods.
+           Annals of Data Science, 2(2), 231-241.
+           https://link.springer.com/article/10.1007/s40745-015-0045-9
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import differential_entropy, norm
+
+    Entropy of a standard normal distribution:
+
+    >>> rng = np.random.default_rng()
+    >>> values = rng.standard_normal(100)
+    >>> differential_entropy(values)
+    1.3407817436640392
+
+    Compare with the true entropy:
+
+    >>> float(norm.entropy())
+    1.4189385332046727
+
+    For several sample sizes between 5 and 1000, compare the accuracy of
+    the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically,
+    compare the root mean squared error (over 1000 trials) between the estimate
+    and the true differential entropy of the distribution.
+
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+    >>>
+    >>>
+    >>> def rmse(res, expected):
+    ...     '''Root mean squared error'''
+    ...     return np.sqrt(np.mean((res - expected)**2))
+    >>>
+    >>>
+    >>> a, b = np.log10(5), np.log10(1000)
+    >>> ns = np.round(np.logspace(a, b, 10)).astype(int)
+    >>> reps = 1000  # number of repetitions for each sample size
+    >>> expected = stats.expon.entropy()
+    >>>
+    >>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []}
+    >>> for method in method_errors:
+    ...     for n in ns:
+    ...        rvs = stats.expon.rvs(size=(reps, n), random_state=rng)
+    ...        res = stats.differential_entropy(rvs, method=method, axis=-1)
+    ...        error = rmse(res, expected)
+    ...        method_errors[method].append(error)
+    >>>
+    >>> for method, errors in method_errors.items():
+    ...     plt.loglog(ns, errors, label=method)
+    >>>
+    >>> plt.legend()
+    >>> plt.xlabel('sample size')
+    >>> plt.ylabel('RMSE (1000 trials)')
+    >>> plt.title('Entropy Estimator Error (Exponential Distribution)')
+
+    """
+    values = np.asarray(values)
+    values = np.moveaxis(values, axis, -1)
+    n = values.shape[-1]  # number of observations
+
+    if window_length is None:
+        window_length = math.floor(math.sqrt(n) + 0.5)
+
+    if not 2 <= 2 * window_length < n:
+        raise ValueError(
+            f"Window length ({window_length}) must be positive and less "
+            f"than half the sample size ({n}).",
+        )
+
+    if base is not None and base <= 0:
+        raise ValueError("`base` must be a positive number or `None`.")
+
+    sorted_data = np.sort(values, axis=-1)
+
+    methods = {"vasicek": _vasicek_entropy,
+               "van es": _van_es_entropy,
+               "correa": _correa_entropy,
+               "ebrahimi": _ebrahimi_entropy,
+               "auto": _vasicek_entropy}
+    method = method.lower()
+    if method not in methods:
+        message = f"`method` must be one of {set(methods)}"
+        raise ValueError(message)
+
+    if method == "auto":
+        if n <= 10:
+            method = 'van es'
+        elif n <= 1000:
+            method = 'ebrahimi'
+        else:
+            method = 'vasicek'
+
+    res = methods[method](sorted_data, window_length)
+
+    if base is not None:
+        res /= np.log(base)
+
+    return res
+
+
+def _pad_along_last_axis(X, m):
+    """Pad the data for computing the rolling window difference."""
+    # scales a  bit better than method in _vasicek_like_entropy
+    shape = np.array(X.shape)
+    shape[-1] = m
+    Xl = np.broadcast_to(X[..., [0]], shape)  # [0] vs 0 to maintain shape
+    Xr = np.broadcast_to(X[..., [-1]], shape)
+    return np.concatenate((Xl, X, Xr), axis=-1)
+
+
+def _vasicek_entropy(X, m):
+    """Compute the Vasicek estimator as described in [6] Eq. 1.3."""
+    n = X.shape[-1]
+    X = _pad_along_last_axis(X, m)
+    differences = X[..., 2 * m:] - X[..., : -2 * m:]
+    logs = np.log(n/(2*m) * differences)
+    return np.mean(logs, axis=-1)
+
+
+def _van_es_entropy(X, m):
+    """Compute the van Es estimator as described in [6]."""
+    # No equation number, but referred to as HVE_mn.
+    # Typo: there should be a log within the summation.
+    n = X.shape[-1]
+    difference = X[..., m:] - X[..., :-m]
+    term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1)
+    k = np.arange(m, n+1)
+    return term1 + np.sum(1/k) + np.log(m) - np.log(n+1)
+
+
+def _ebrahimi_entropy(X, m):
+    """Compute the Ebrahimi estimator as described in [6]."""
+    # No equation number, but referred to as HE_mn
+    n = X.shape[-1]
+    X = _pad_along_last_axis(X, m)
+
+    differences = X[..., 2 * m:] - X[..., : -2 * m:]
+
+    i = np.arange(1, n+1).astype(float)
+    ci = np.ones_like(i)*2
+    ci[i <= m] = 1 + (i[i <= m] - 1)/m
+    ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m
+
+    logs = np.log(n * differences / (ci * m))
+    return np.mean(logs, axis=-1)
+
+
+def _correa_entropy(X, m):
+    """Compute the Correa estimator as described in [6]."""
+    # No equation number, but referred to as HC_mn
+    n = X.shape[-1]
+    X = _pad_along_last_axis(X, m)
+
+    i = np.arange(1, n+1)
+    dj = np.arange(-m, m+1)[:, None]
+    j = i + dj
+    j0 = j + m - 1  # 0-indexed version of j
+
+    Xibar = np.mean(X[..., j0], axis=-2, keepdims=True)
+    difference = X[..., j0] - Xibar
+    num = np.sum(difference*dj, axis=-2)  # dj is d-i
+    den = n*np.sum(difference**2, axis=-2)
+    return -np.mean(np.log(num/den), axis=-1)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_fit.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_fit.py
new file mode 100644
index 00000000..89db861c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_fit.py
@@ -0,0 +1,1284 @@
+import warnings
+from collections import namedtuple
+import numpy as np
+from scipy import optimize, stats
+from scipy._lib._util import check_random_state
+
+
+def _combine_bounds(name, user_bounds, shape_domain, integral):
+    """Intersection of user-defined bounds and distribution PDF/PMF domain"""
+
+    user_bounds = np.atleast_1d(user_bounds)
+
+    if user_bounds[0] > user_bounds[1]:
+        message = (f"There are no values for `{name}` on the interval "
+                   f"{list(user_bounds)}.")
+        raise ValueError(message)
+
+    bounds = (max(user_bounds[0], shape_domain[0]),
+              min(user_bounds[1], shape_domain[1]))
+
+    if integral and (np.ceil(bounds[0]) > np.floor(bounds[1])):
+        message = (f"There are no integer values for `{name}` on the interval "
+                   f"defined by the user-provided bounds and the domain "
+                   "of the distribution.")
+        raise ValueError(message)
+    elif not integral and (bounds[0] > bounds[1]):
+        message = (f"There are no values for `{name}` on the interval "
+                   f"defined by the user-provided bounds and the domain "
+                   "of the distribution.")
+        raise ValueError(message)
+
+    if not np.all(np.isfinite(bounds)):
+        message = (f"The intersection of user-provided bounds for `{name}` "
+                   f"and the domain of the distribution is not finite. Please "
+                   f"provide finite bounds for shape `{name}` in `bounds`.")
+        raise ValueError(message)
+
+    return bounds
+
+
+class FitResult:
+    r"""Result of fitting a discrete or continuous distribution to data
+
+    Attributes
+    ----------
+    params : namedtuple
+        A namedtuple containing the maximum likelihood estimates of the
+        shape parameters, location, and (if applicable) scale of the
+        distribution.
+    success : bool or None
+        Whether the optimizer considered the optimization to terminate
+        successfully or not.
+    message : str or None
+        Any status message provided by the optimizer.
+
+    """
+
+    def __init__(self, dist, data, discrete, res):
+        self._dist = dist
+        self._data = data
+        self.discrete = discrete
+        self.pxf = getattr(dist, "pmf", None) or getattr(dist, "pdf", None)
+
+        shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
+        if not discrete:
+            FitParams = namedtuple('FitParams', shape_names + ['loc', 'scale'])
+        else:
+            FitParams = namedtuple('FitParams', shape_names + ['loc'])
+
+        self.params = FitParams(*res.x)
+
+        # Optimizer can report success even when nllf is infinite
+        if res.success and not np.isfinite(self.nllf()):
+            res.success = False
+            res.message = ("Optimization converged to parameter values that "
+                           "are inconsistent with the data.")
+        self.success = getattr(res, "success", None)
+        self.message = getattr(res, "message", None)
+
+    def __repr__(self):
+        keys = ["params", "success", "message"]
+        m = max(map(len, keys)) + 1
+        return '\n'.join([key.rjust(m) + ': ' + repr(getattr(self, key))
+                          for key in keys if getattr(self, key) is not None])
+
+    def nllf(self, params=None, data=None):
+        """Negative log-likelihood function
+
+        Evaluates the negative of the log-likelihood function of the provided
+        data at the provided parameters.
+
+        Parameters
+        ----------
+        params : tuple, optional
+            The shape parameters, location, and (if applicable) scale of the
+            distribution as a single tuple. Default is the maximum likelihood
+            estimates (``self.params``).
+        data : array_like, optional
+            The data for which the log-likelihood function is to be evaluated.
+            Default is the data to which the distribution was fit.
+
+        Returns
+        -------
+        nllf : float
+            The negative of the log-likelihood function.
+
+        """
+        params = params if params is not None else self.params
+        data = data if data is not None else self._data
+        return self._dist.nnlf(theta=params, x=data)
+
+    def plot(self, ax=None, *, plot_type="hist"):
+        """Visually compare the data against the fitted distribution.
+
+        Available only if ``matplotlib`` is installed.
+
+        Parameters
+        ----------
+        ax : matplotlib.axes.Axes
+            Axes object to draw the plot onto, otherwise uses the current Axes.
+        plot_type : {"hist", "qq", "pp", "cdf"}
+            Type of plot to draw. Options include:
+
+            - "hist": Superposes the PDF/PMF of the fitted distribution
+              over a normalized histogram of the data.
+            - "qq": Scatter plot of theoretical quantiles against the
+              empirical quantiles. Specifically, the x-coordinates are the
+              values of the fitted distribution PPF evaluated at the
+              percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is the
+              number of data points, and the y-coordinates are the sorted
+              data points.
+            - "pp": Scatter plot of theoretical percentiles against the
+              observed percentiles. Specifically, the x-coordinates are the
+              percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
+              the number of data points, and the y-coordinates are the values
+              of the fitted distribution CDF evaluated at the sorted
+              data points.
+            - "cdf": Superposes the CDF of the fitted distribution over the
+              empirical CDF. Specifically, the x-coordinates of the empirical
+              CDF are the sorted data points, and the y-coordinates are the
+              percentiles ``(np.arange(1, n) - 0.5)/n``, where ``n`` is
+              the number of data points.
+
+        Returns
+        -------
+        ax : matplotlib.axes.Axes
+            The matplotlib Axes object on which the plot was drawn.
+        """
+        try:
+            import matplotlib  # noqa
+        except ModuleNotFoundError as exc:
+            message = "matplotlib must be installed to use method `plot`."
+            raise ModuleNotFoundError(message) from exc
+
+        plots = {'histogram': self._hist_plot, 'qq': self._qq_plot,
+                 'pp': self._pp_plot, 'cdf': self._cdf_plot,
+                 'hist': self._hist_plot}
+        if plot_type.lower() not in plots:
+            message = f"`plot_type` must be one of {set(plots.keys())}"
+            raise ValueError(message)
+        plot = plots[plot_type.lower()]
+
+        if ax is None:
+            import matplotlib.pyplot as plt
+            ax = plt.gca()
+
+        fit_params = np.atleast_1d(self.params)
+
+        return plot(ax=ax, fit_params=fit_params)
+
+    def _hist_plot(self, ax, fit_params):
+        from matplotlib.ticker import MaxNLocator
+
+        support = self._dist.support(*fit_params)
+        lb = support[0] if np.isfinite(support[0]) else min(self._data)
+        ub = support[1] if np.isfinite(support[1]) else max(self._data)
+        pxf = "PMF" if self.discrete else "PDF"
+
+        if self.discrete:
+            x = np.arange(lb, ub + 2)
+            y = self.pxf(x, *fit_params)
+            ax.vlines(x[:-1], 0, y[:-1], label='Fitted Distribution PMF',
+                      color='C0')
+            options = dict(density=True, bins=x, align='left', color='C1')
+            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
+            ax.set_xlabel('k')
+            ax.set_ylabel('PMF')
+        else:
+            x = np.linspace(lb, ub, 200)
+            y = self.pxf(x, *fit_params)
+            ax.plot(x, y, '--', label='Fitted Distribution PDF', color='C0')
+            options = dict(density=True, bins=50, align='mid', color='C1')
+            ax.set_xlabel('x')
+            ax.set_ylabel('PDF')
+
+        if len(self._data) > 50 or self.discrete:
+            ax.hist(self._data, label="Histogram of Data", **options)
+        else:
+            ax.plot(self._data, np.zeros_like(self._data), "*",
+                    label='Data', color='C1')
+
+        ax.set_title(rf"Fitted $\tt {self._dist.name}$ {pxf} and Histogram")
+        ax.legend(*ax.get_legend_handles_labels())
+        return ax
+
+    def _qp_plot(self, ax, fit_params, qq):
+        data = np.sort(self._data)
+        ps = self._plotting_positions(len(self._data))
+
+        if qq:
+            qp = "Quantiles"
+            plot_type = 'Q-Q'
+            x = self._dist.ppf(ps, *fit_params)
+            y = data
+        else:
+            qp = "Percentiles"
+            plot_type = 'P-P'
+            x = ps
+            y = self._dist.cdf(data, *fit_params)
+
+        ax.plot(x, y, '.', label=f'Fitted Distribution {plot_type}',
+                color='C0', zorder=1)
+        xlim = ax.get_xlim()
+        ylim = ax.get_ylim()
+        lim = [min(xlim[0], ylim[0]), max(xlim[1], ylim[1])]
+        if not qq:
+            lim = max(lim[0], 0), min(lim[1], 1)
+
+        if self.discrete and qq:
+            q_min, q_max = int(lim[0]), int(lim[1]+1)
+            q_ideal = np.arange(q_min, q_max)
+            # q_ideal = np.unique(self._dist.ppf(ps, *fit_params))
+            ax.plot(q_ideal, q_ideal, 'o', label='Reference', color='k',
+                    alpha=0.25, markerfacecolor='none', clip_on=True)
+        elif self.discrete and not qq:
+            # The intent of this is to match the plot that would be produced
+            # if x were continuous on [0, 1] and y were cdf(ppf(x)).
+            # It can be approximated by letting x = np.linspace(0, 1, 1000),
+            # but this might not look great when zooming in. The vertical
+            # portions are included to indicate where the transition occurs
+            # where the data completely obscures the horizontal portions.
+            p_min, p_max = lim
+            a, b = self._dist.support(*fit_params)
+            p_min = max(p_min, 0 if np.isfinite(a) else 1e-3)
+            p_max = min(p_max, 1 if np.isfinite(b) else 1-1e-3)
+            q_min, q_max = self._dist.ppf([p_min, p_max], *fit_params)
+            qs = np.arange(q_min-1, q_max+1)
+            ps = self._dist.cdf(qs, *fit_params)
+            ax.step(ps, ps, '-', label='Reference', color='k', alpha=0.25,
+                    clip_on=True)
+        else:
+            ax.plot(lim, lim, '-', label='Reference', color='k', alpha=0.25,
+                    clip_on=True)
+
+        ax.set_xlim(lim)
+        ax.set_ylim(lim)
+        ax.set_xlabel(rf"Fitted $\tt {self._dist.name}$ Theoretical {qp}")
+        ax.set_ylabel(f"Data {qp}")
+        ax.set_title(rf"Fitted $\tt {self._dist.name}$ {plot_type} Plot")
+        ax.legend(*ax.get_legend_handles_labels())
+        ax.set_aspect('equal')
+        return ax
+
+    def _qq_plot(self, **kwargs):
+        return self._qp_plot(qq=True, **kwargs)
+
+    def _pp_plot(self, **kwargs):
+        return self._qp_plot(qq=False, **kwargs)
+
+    def _plotting_positions(self, n, a=.5):
+        # See https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot#Plotting_positions
+        k = np.arange(1, n+1)
+        return (k-a) / (n + 1 - 2*a)
+
+    def _cdf_plot(self, ax, fit_params):
+        data = np.sort(self._data)
+        ecdf = self._plotting_positions(len(self._data))
+        ls = '--' if len(np.unique(data)) < 30 else '.'
+        xlabel = 'k' if self.discrete else 'x'
+        ax.step(data, ecdf, ls, label='Empirical CDF', color='C1', zorder=0)
+
+        xlim = ax.get_xlim()
+        q = np.linspace(*xlim, 300)
+        tcdf = self._dist.cdf(q, *fit_params)
+
+        ax.plot(q, tcdf, label='Fitted Distribution CDF', color='C0', zorder=1)
+        ax.set_xlim(xlim)
+        ax.set_ylim(0, 1)
+        ax.set_xlabel(xlabel)
+        ax.set_ylabel("CDF")
+        ax.set_title(rf"Fitted $\tt {self._dist.name}$ and Empirical CDF")
+        handles, labels = ax.get_legend_handles_labels()
+        ax.legend(handles[::-1], labels[::-1])
+        return ax
+
+
+def fit(dist, data, bounds=None, *, guess=None, method='mle',
+        optimizer=optimize.differential_evolution):
+    r"""Fit a discrete or continuous distribution to data
+
+    Given a distribution, data, and bounds on the parameters of the
+    distribution, return maximum likelihood estimates of the parameters.
+
+    Parameters
+    ----------
+    dist : `scipy.stats.rv_continuous` or `scipy.stats.rv_discrete`
+        The object representing the distribution to be fit to the data.
+    data : 1D array_like
+        The data to which the distribution is to be fit. If the data contain
+        any of ``np.nan``, ``np.inf``, or -``np.inf``, the fit method will
+        raise a ``ValueError``.
+    bounds : dict or sequence of tuples, optional
+        If a dictionary, each key is the name of a parameter of the
+        distribution, and the corresponding value is a tuple containing the
+        lower and upper bound on that parameter.  If the distribution is
+        defined only for a finite range of values of that parameter, no entry
+        for that parameter is required; e.g., some distributions have
+        parameters which must be on the interval [0, 1]. Bounds for parameters
+        location (``loc``) and scale (``scale``) are optional; by default,
+        they are fixed to 0 and 1, respectively.
+
+        If a sequence, element *i* is a tuple containing the lower and upper
+        bound on the *i*\ th parameter of the distribution. In this case,
+        bounds for *all* distribution shape parameters must be provided.
+        Optionally, bounds for location and scale may follow the
+        distribution shape parameters.
+
+        If a shape is to be held fixed (e.g. if it is known), the
+        lower and upper bounds may be equal. If a user-provided lower or upper
+        bound is beyond a bound of the domain for which the distribution is
+        defined, the bound of the distribution's domain will replace the
+        user-provided value. Similarly, parameters which must be integral
+        will be constrained to integral values within the user-provided bounds.
+    guess : dict or array_like, optional
+        If a dictionary, each key is the name of a parameter of the
+        distribution, and the corresponding value is a guess for the value
+        of the parameter.
+
+        If a sequence, element *i* is a guess for the *i*\ th parameter of the
+        distribution. In this case, guesses for *all* distribution shape
+        parameters must be provided.
+
+        If `guess` is not provided, guesses for the decision variables will
+        not be passed to the optimizer. If `guess` is provided, guesses for
+        any missing parameters will be set at the mean of the lower and
+        upper bounds. Guesses for parameters which must be integral will be
+        rounded to integral values, and guesses that lie outside the
+        intersection of the user-provided bounds and the domain of the
+        distribution will be clipped.
+    method : {'mle', 'mse'}
+        With ``method="mle"`` (default), the fit is computed by minimizing
+        the negative log-likelihood function. A large, finite penalty
+        (rather than infinite negative log-likelihood) is applied for
+        observations beyond the support of the distribution.
+        With ``method="mse"``, the fit is computed by minimizing
+        the negative log-product spacing function. The same penalty is applied
+        for observations beyond the support. We follow the approach of [1]_,
+        which is generalized for samples with repeated observations.
+    optimizer : callable, optional
+        `optimizer` is a callable that accepts the following positional
+        argument.
+
+        fun : callable
+            The objective function to be optimized. `fun` accepts one argument
+            ``x``, candidate shape parameters of the distribution, and returns
+            the objective function value given ``x``, `dist`, and the provided
+            `data`.
+            The job of `optimizer` is to find values of the decision variables
+            that minimizes `fun`.
+
+        `optimizer` must also accept the following keyword argument.
+
+        bounds : sequence of tuples
+            The bounds on values of the decision variables; each element will
+            be a tuple containing the lower and upper bound on a decision
+            variable.
+
+        If `guess` is provided, `optimizer` must also accept the following
+        keyword argument.
+
+        x0 : array_like
+            The guesses for each decision variable.
+
+        If the distribution has any shape parameters that must be integral or
+        if the distribution is discrete and the location parameter is not
+        fixed, `optimizer` must also accept the following keyword argument.
+
+        integrality : array_like of bools
+            For each decision variable, True if the decision variable
+            must be constrained to integer values and False if the decision
+            variable is continuous.
+
+        `optimizer` must return an object, such as an instance of
+        `scipy.optimize.OptimizeResult`, which holds the optimal values of
+        the decision variables in an attribute ``x``. If attributes
+        ``fun``, ``status``, or ``message`` are provided, they will be
+        included in the result object returned by `fit`.
+
+    Returns
+    -------
+    result : `~scipy.stats._result_classes.FitResult`
+        An object with the following fields.
+
+        params : namedtuple
+            A namedtuple containing the maximum likelihood estimates of the
+            shape parameters, location, and (if applicable) scale of the
+            distribution.
+        success : bool or None
+            Whether the optimizer considered the optimization to terminate
+            successfully or not.
+        message : str or None
+            Any status message provided by the optimizer.
+
+        The object has the following method:
+
+        nllf(params=None, data=None)
+            By default, the negative log-likehood function at the fitted
+            `params` for the given `data`. Accepts a tuple containing
+            alternative shapes, location, and scale of the distribution and
+            an array of alternative data.
+
+        plot(ax=None)
+            Superposes the PDF/PMF of the fitted distribution over a normalized
+            histogram of the data.
+
+    See Also
+    --------
+    rv_continuous,  rv_discrete
+
+    Notes
+    -----
+    Optimization is more likely to converge to the maximum likelihood estimate
+    when the user provides tight bounds containing the maximum likelihood
+    estimate. For example, when fitting a binomial distribution to data, the
+    number of experiments underlying each sample may be known, in which case
+    the corresponding shape parameter ``n`` can be fixed.
+
+    References
+    ----------
+    .. [1] Shao, Yongzhao, and Marjorie G. Hahn. "Maximum product of spacings
+           method: a unified formulation with illustration of strong
+           consistency." Illinois Journal of Mathematics 43.3 (1999): 489-499.
+
+    Examples
+    --------
+    Suppose we wish to fit a distribution to the following data.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> dist = stats.nbinom
+    >>> shapes = (5, 0.5)
+    >>> data = dist.rvs(*shapes, size=1000, random_state=rng)
+
+    Suppose we do not know how the data were generated, but we suspect that
+    it follows a negative binomial distribution with parameters *n* and *p*\.
+    (See `scipy.stats.nbinom`.) We believe that the parameter *n* was fewer
+    than 30, and we know that the parameter *p* must lie on the interval
+    [0, 1]. We record this information in a variable `bounds` and pass
+    this information to `fit`.
+
+    >>> bounds = [(0, 30), (0, 1)]
+    >>> res = stats.fit(dist, data, bounds)
+
+    `fit` searches within the user-specified `bounds` for the
+    values that best match the data (in the sense of maximum likelihood
+    estimation). In this case, it found shape values similar to those
+    from which the data were actually generated.
+
+    >>> res.params
+    FitParams(n=5.0, p=0.5028157644634368, loc=0.0)  # may vary
+
+    We can visualize the results by superposing the probability mass function
+    of the distribution (with the shapes fit to the data) over a normalized
+    histogram of the data.
+
+    >>> import matplotlib.pyplot as plt  # matplotlib must be installed to plot
+    >>> res.plot()
+    >>> plt.show()
+
+    Note that the estimate for *n* was exactly integral; this is because
+    the domain of the `nbinom` PMF includes only integral *n*, and the `nbinom`
+    object "knows" that. `nbinom` also knows that the shape *p* must be a
+    value between 0 and 1. In such a case - when the domain of the distribution
+    with respect to a parameter is finite - we are not required to specify
+    bounds for the parameter.
+
+    >>> bounds = {'n': (0, 30)}  # omit parameter p using a `dict`
+    >>> res2 = stats.fit(dist, data, bounds)
+    >>> res2.params
+    FitParams(n=5.0, p=0.5016492009232932, loc=0.0)  # may vary
+
+    If we wish to force the distribution to be fit with *n* fixed at 6, we can
+    set both the lower and upper bounds on *n* to 6. Note, however, that the
+    value of the objective function being optimized is typically worse (higher)
+    in this case.
+
+    >>> bounds = {'n': (6, 6)}  # fix parameter `n`
+    >>> res3 = stats.fit(dist, data, bounds)
+    >>> res3.params
+    FitParams(n=6.0, p=0.5486556076755706, loc=0.0)  # may vary
+    >>> res3.nllf() > res.nllf()
+    True  # may vary
+
+    Note that the numerical results of the previous examples are typical, but
+    they may vary because the default optimizer used by `fit`,
+    `scipy.optimize.differential_evolution`, is stochastic. However, we can
+    customize the settings used by the optimizer to ensure reproducibility -
+    or even use a different optimizer entirely - using the `optimizer`
+    parameter.
+
+    >>> from scipy.optimize import differential_evolution
+    >>> rng = np.random.default_rng(767585560716548)
+    >>> def optimizer(fun, bounds, *, integrality):
+    ...     return differential_evolution(fun, bounds, strategy='best2bin',
+    ...                                   seed=rng, integrality=integrality)
+    >>> bounds = [(0, 30), (0, 1)]
+    >>> res4 = stats.fit(dist, data, bounds, optimizer=optimizer)
+    >>> res4.params
+    FitParams(n=5.0, p=0.5015183149259951, loc=0.0)
+
+    """
+    # --- Input Validation / Standardization --- #
+    user_bounds = bounds
+    user_guess = guess
+
+    # distribution input validation and information collection
+    if hasattr(dist, "pdf"):  # can't use isinstance for types
+        default_bounds = {'loc': (0, 0), 'scale': (1, 1)}
+        discrete = False
+    elif hasattr(dist, "pmf"):
+        default_bounds = {'loc': (0, 0)}
+        discrete = True
+    else:
+        message = ("`dist` must be an instance of `rv_continuous` "
+                   "or `rv_discrete.`")
+        raise ValueError(message)
+
+    try:
+        param_info = dist._param_info()
+    except AttributeError as e:
+        message = (f"Distribution `{dist.name}` is not yet supported by "
+                   "`scipy.stats.fit` because shape information has "
+                   "not been defined.")
+        raise ValueError(message) from e
+
+    # data input validation
+    data = np.asarray(data)
+    if data.ndim != 1:
+        message = "`data` must be exactly one-dimensional."
+        raise ValueError(message)
+    if not (np.issubdtype(data.dtype, np.number)
+            and np.all(np.isfinite(data))):
+        message = "All elements of `data` must be finite numbers."
+        raise ValueError(message)
+
+    # bounds input validation and information collection
+    n_params = len(param_info)
+    n_shapes = n_params - (1 if discrete else 2)
+    param_list = [param.name for param in param_info]
+    param_names = ", ".join(param_list)
+    shape_names = ", ".join(param_list[:n_shapes])
+
+    if user_bounds is None:
+        user_bounds = {}
+
+    if isinstance(user_bounds, dict):
+        default_bounds.update(user_bounds)
+        user_bounds = default_bounds
+        user_bounds_array = np.empty((n_params, 2))
+        for i in range(n_params):
+            param_name = param_info[i].name
+            user_bound = user_bounds.pop(param_name, None)
+            if user_bound is None:
+                user_bound = param_info[i].domain
+            user_bounds_array[i] = user_bound
+        if user_bounds:
+            message = ("Bounds provided for the following unrecognized "
+                       f"parameters will be ignored: {set(user_bounds)}")
+            warnings.warn(message, RuntimeWarning, stacklevel=2)
+
+    else:
+        try:
+            user_bounds = np.asarray(user_bounds, dtype=float)
+            if user_bounds.size == 0:
+                user_bounds = np.empty((0, 2))
+        except ValueError as e:
+            message = ("Each element of a `bounds` sequence must be a tuple "
+                       "containing two elements: the lower and upper bound of "
+                       "a distribution parameter.")
+            raise ValueError(message) from e
+        if (user_bounds.ndim != 2 or user_bounds.shape[1] != 2):
+            message = ("Each element of `bounds` must be a tuple specifying "
+                       "the lower and upper bounds of a shape parameter")
+            raise ValueError(message)
+        if user_bounds.shape[0] < n_shapes:
+            message = (f"A `bounds` sequence must contain at least {n_shapes} "
+                       "elements: tuples specifying the lower and upper "
+                       f"bounds of all shape parameters {shape_names}.")
+            raise ValueError(message)
+        if user_bounds.shape[0] > n_params:
+            message = ("A `bounds` sequence may not contain more than "
+                       f"{n_params} elements: tuples specifying the lower and "
+                       "upper bounds of distribution parameters "
+                       f"{param_names}.")
+            raise ValueError(message)
+
+        user_bounds_array = np.empty((n_params, 2))
+        user_bounds_array[n_shapes:] = list(default_bounds.values())
+        user_bounds_array[:len(user_bounds)] = user_bounds
+
+    user_bounds = user_bounds_array
+    validated_bounds = []
+    for i in range(n_params):
+        name = param_info[i].name
+        user_bound = user_bounds_array[i]
+        param_domain = param_info[i].domain
+        integral = param_info[i].integrality
+        combined = _combine_bounds(name, user_bound, param_domain, integral)
+        validated_bounds.append(combined)
+
+    bounds = np.asarray(validated_bounds)
+    integrality = [param.integrality for param in param_info]
+
+    # guess input validation
+
+    if user_guess is None:
+        guess_array = None
+    elif isinstance(user_guess, dict):
+        default_guess = {param.name: np.mean(bound)
+                         for param, bound in zip(param_info, bounds)}
+        unrecognized = set(user_guess) - set(default_guess)
+        if unrecognized:
+            message = ("Guesses provided for the following unrecognized "
+                       f"parameters will be ignored: {unrecognized}")
+            warnings.warn(message, RuntimeWarning, stacklevel=2)
+        default_guess.update(user_guess)
+
+        message = ("Each element of `guess` must be a scalar "
+                   "guess for a distribution parameter.")
+        try:
+            guess_array = np.asarray([default_guess[param.name]
+                                      for param in param_info], dtype=float)
+        except ValueError as e:
+            raise ValueError(message) from e
+
+    else:
+        message = ("Each element of `guess` must be a scalar "
+                   "guess for a distribution parameter.")
+        try:
+            user_guess = np.asarray(user_guess, dtype=float)
+        except ValueError as e:
+            raise ValueError(message) from e
+        if user_guess.ndim != 1:
+            raise ValueError(message)
+        if user_guess.shape[0] < n_shapes:
+            message = (f"A `guess` sequence must contain at least {n_shapes} "
+                       "elements: scalar guesses for the distribution shape "
+                       f"parameters {shape_names}.")
+            raise ValueError(message)
+        if user_guess.shape[0] > n_params:
+            message = ("A `guess` sequence may not contain more than "
+                       f"{n_params} elements: scalar guesses for the "
+                       f"distribution parameters {param_names}.")
+            raise ValueError(message)
+
+        guess_array = np.mean(bounds, axis=1)
+        guess_array[:len(user_guess)] = user_guess
+
+    if guess_array is not None:
+        guess_rounded = guess_array.copy()
+
+        guess_rounded[integrality] = np.round(guess_rounded[integrality])
+        rounded = np.where(guess_rounded != guess_array)[0]
+        for i in rounded:
+            message = (f"Guess for parameter `{param_info[i].name}` "
+                       f"rounded from {guess_array[i]} to {guess_rounded[i]}.")
+            warnings.warn(message, RuntimeWarning, stacklevel=2)
+
+        guess_clipped = np.clip(guess_rounded, bounds[:, 0], bounds[:, 1])
+        clipped = np.where(guess_clipped != guess_rounded)[0]
+        for i in clipped:
+            message = (f"Guess for parameter `{param_info[i].name}` "
+                       f"clipped from {guess_rounded[i]} to "
+                       f"{guess_clipped[i]}.")
+            warnings.warn(message, RuntimeWarning, stacklevel=2)
+
+        guess = guess_clipped
+    else:
+        guess = None
+
+    # --- Fitting --- #
+    def nllf(free_params, data=data):  # bind data NOW
+        with np.errstate(invalid='ignore', divide='ignore'):
+            return dist._penalized_nnlf(free_params, data)
+
+    def nlpsf(free_params, data=data):  # bind data NOW
+        with np.errstate(invalid='ignore', divide='ignore'):
+            return dist._penalized_nlpsf(free_params, data)
+
+    methods = {'mle': nllf, 'mse': nlpsf}
+    objective = methods[method.lower()]
+
+    with np.errstate(invalid='ignore', divide='ignore'):
+        kwds = {}
+        if bounds is not None:
+            kwds['bounds'] = bounds
+        if np.any(integrality):
+            kwds['integrality'] = integrality
+        if guess is not None:
+            kwds['x0'] = guess
+        res = optimizer(objective, **kwds)
+
+    return FitResult(dist, data, discrete, res)
+
+
+GoodnessOfFitResult = namedtuple('GoodnessOfFitResult',
+                                 ('fit_result', 'statistic', 'pvalue',
+                                  'null_distribution'))
+
+
+def goodness_of_fit(dist, data, *, known_params=None, fit_params=None,
+                    guessed_params=None, statistic='ad', n_mc_samples=9999,
+                    random_state=None):
+    r"""
+    Perform a goodness of fit test comparing data to a distribution family.
+
+    Given a distribution family and data, perform a test of the null hypothesis
+    that the data were drawn from a distribution in that family. Any known
+    parameters of the distribution may be specified. Remaining parameters of
+    the distribution will be fit to the data, and the p-value of the test
+    is computed accordingly. Several statistics for comparing the distribution
+    to data are available.
+
+    Parameters
+    ----------
+    dist : `scipy.stats.rv_continuous`
+        The object representing the distribution family under the null
+        hypothesis.
+    data : 1D array_like
+        Finite, uncensored data to be tested.
+    known_params : dict, optional
+        A dictionary containing name-value pairs of known distribution
+        parameters. Monte Carlo samples are randomly drawn from the
+        null-hypothesized distribution with these values of the parameters.
+        Before the statistic is evaluated for each Monte Carlo sample, only
+        remaining unknown parameters of the null-hypothesized distribution
+        family are fit to the samples; the known parameters are held fixed.
+        If all parameters of the distribution family are known, then the step
+        of fitting the distribution family to each sample is omitted.
+    fit_params : dict, optional
+        A dictionary containing name-value pairs of distribution parameters
+        that have already been fit to the data, e.g. using `scipy.stats.fit`
+        or the ``fit`` method of `dist`. Monte Carlo samples are drawn from the
+        null-hypothesized distribution with these specified values of the
+        parameter. On those Monte Carlo samples, however, these and all other
+        unknown parameters of the null-hypothesized distribution family are
+        fit before the statistic is evaluated.
+    guessed_params : dict, optional
+        A dictionary containing name-value pairs of distribution parameters
+        which have been guessed. These parameters are always considered as
+        free parameters and are fit both to the provided `data` as well as
+        to the Monte Carlo samples drawn from the null-hypothesized
+        distribution. The purpose of these `guessed_params` is to be used as
+        initial values for the numerical fitting procedure.
+    statistic : {"ad", "ks", "cvm"}, optional
+        The statistic used to compare data to a distribution after fitting
+        unknown parameters of the distribution family to the data. The
+        Anderson-Darling ("ad"), Kolmogorov-Smirnov ("ks"), and
+        Cramer-von Mises ("cvm") statistics are available [1]_.
+    n_mc_samples : int, default: 9999
+        The number of Monte Carlo samples drawn from the null hypothesized
+        distribution to form the null distribution of the statistic. The
+        sample size of each is the same as the given `data`.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate the Monte Carlo
+        samples.
+
+        If `random_state` is ``None`` (default), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance, then the provided instance is used.
+
+    Returns
+    -------
+    res : GoodnessOfFitResult
+        An object with the following attributes.
+
+        fit_result : `~scipy.stats._result_classes.FitResult`
+            An object representing the fit of the provided `dist` to `data`.
+            This  object includes the values of distribution family parameters
+            that fully define the null-hypothesized distribution, that is,
+            the distribution from which Monte Carlo samples are drawn.
+        statistic : float
+            The value of the statistic comparing provided `data` to the
+            null-hypothesized distribution.
+        pvalue : float
+            The proportion of elements in the null distribution with
+            statistic values at least as extreme as the statistic value of the
+            provided `data`.
+        null_distribution : ndarray
+            The value of the statistic for each Monte Carlo sample
+            drawn from the null-hypothesized distribution.
+
+    Notes
+    -----
+    This is a generalized Monte Carlo goodness-of-fit procedure, special cases
+    of which correspond with various Anderson-Darling tests, Lilliefors' test,
+    etc. The test is described in [2]_, [3]_, and [4]_ as a parametric
+    bootstrap test. This is a Monte Carlo test in which parameters that
+    specify the distribution from which samples are drawn have been estimated
+    from the data. We describe the test using "Monte Carlo" rather than
+    "parametric bootstrap" throughout to avoid confusion with the more familiar
+    nonparametric bootstrap, and describe how the test is performed below.
+
+    *Traditional goodness of fit tests*
+
+    Traditionally, critical values corresponding with a fixed set of
+    significance levels are pre-calculated using Monte Carlo methods. Users
+    perform the test by calculating the value of the test statistic only for
+    their observed `data` and comparing this value to tabulated critical
+    values. This practice is not very flexible, as tables are not available for
+    all distributions and combinations of known and unknown parameter values.
+    Also, results can be inaccurate when critical values are interpolated from
+    limited tabulated data to correspond with the user's sample size and
+    fitted parameter values. To overcome these shortcomings, this function
+    allows the user to perform the Monte Carlo trials adapted to their
+    particular data.
+
+    *Algorithmic overview*
+
+    In brief, this routine executes the following steps:
+
+      1. Fit unknown parameters to the given `data`, thereby forming the
+         "null-hypothesized" distribution, and compute the statistic of
+         this pair of data and distribution.
+      2. Draw random samples from this null-hypothesized distribution.
+      3. Fit the unknown parameters to each random sample.
+      4. Calculate the statistic between each sample and the distribution that
+         has been fit to the sample.
+      5. Compare the value of the statistic corresponding with `data` from (1)
+         against the values of the statistic corresponding with the random
+         samples from (4). The p-value is the proportion of samples with a
+         statistic value greater than or equal to the statistic of the observed
+         data.
+
+    In more detail, the steps are as follows.
+
+    First, any unknown parameters of the distribution family specified by
+    `dist` are fit to the provided `data` using maximum likelihood estimation.
+    (One exception is the normal distribution with unknown location and scale:
+    we use the bias-corrected standard deviation ``np.std(data, ddof=1)`` for
+    the scale as recommended in [1]_.)
+    These values of the parameters specify a particular member of the
+    distribution family referred to as the "null-hypothesized distribution",
+    that is, the distribution from which the data were sampled under the null
+    hypothesis. The `statistic`, which compares data to a distribution, is
+    computed between `data` and the null-hypothesized distribution.
+
+    Next, many (specifically `n_mc_samples`) new samples, each containing the
+    same number of observations as `data`, are drawn from the
+    null-hypothesized distribution. All unknown parameters of the distribution
+    family `dist` are fit to *each resample*, and the `statistic` is computed
+    between each sample and its corresponding fitted distribution. These
+    values of the statistic form the Monte Carlo null distribution (not to be
+    confused with the "null-hypothesized distribution" above).
+
+    The p-value of the test is the proportion of statistic values in the Monte
+    Carlo null distribution that are at least as extreme as the statistic value
+    of the provided `data`. More precisely, the p-value is given by
+
+    .. math::
+
+        p = \frac{b + 1}
+                 {m + 1}
+
+    where :math:`b` is the number of statistic values in the Monte Carlo null
+    distribution that are greater than or equal to the the statistic value
+    calculated for `data`, and :math:`m` is the number of elements in the
+    Monte Carlo null distribution (`n_mc_samples`). The addition of :math:`1`
+    to the numerator and denominator can be thought of as including the
+    value of the statistic corresponding with `data` in the null distribution,
+    but a more formal explanation is given in [5]_.
+
+    *Limitations*
+
+    The test can be very slow for some distribution families because unknown
+    parameters of the distribution family must be fit to each of the Monte
+    Carlo samples, and for most distributions in SciPy, distribution fitting
+    performed via numerical optimization.
+
+    *Anti-Pattern*
+
+    For this reason, it may be tempting
+    to treat parameters of the distribution pre-fit to `data` (by the user)
+    as though they were `known_params`, as specification of all parameters of
+    the distribution precludes the need to fit the distribution to each Monte
+    Carlo sample. (This is essentially how the original Kilmogorov-Smirnov
+    test is performed.) Although such a test can provide evidence against the
+    null hypothesis, the test is conservative in the sense that small p-values
+    will tend to (greatly) *overestimate* the probability of making a type I
+    error (that is, rejecting the null hypothesis although it is true), and the
+    power of the test is low (that is, it is less likely to reject the null
+    hypothesis even when the null hypothesis is false).
+    This is because the Monte Carlo samples are less likely to agree with the
+    null-hypothesized distribution as well as `data`. This tends to increase
+    the values of the statistic recorded in the null distribution, so that a
+    larger number of them exceed the value of statistic for `data`, thereby
+    inflating the p-value.
+
+    References
+    ----------
+    .. [1] M. A. Stephens (1974). "EDF Statistics for Goodness of Fit and
+           Some Comparisons." Journal of the American Statistical Association,
+           Vol. 69, pp. 730-737.
+    .. [2] W. Stute, W. G. Manteiga, and M. P. Quindimil (1993).
+           "Bootstrap based goodness-of-fit-tests." Metrika 40.1: 243-256.
+    .. [3] C. Genest, & B Rémillard. (2008). "Validity of the parametric
+           bootstrap for goodness-of-fit testing in semiparametric models."
+           Annales de l'IHP Probabilités et statistiques. Vol. 44. No. 6.
+    .. [4] I. Kojadinovic and J. Yan (2012). "Goodness-of-fit testing based on
+           a weighted bootstrap: A fast large-sample alternative to the
+           parametric bootstrap." Canadian Journal of Statistics 40.3: 480-500.
+    .. [5] B. Phipson and G. K. Smyth (2010). "Permutation P-values Should
+           Never Be Zero: Calculating Exact P-values When Permutations Are
+           Randomly Drawn." Statistical Applications in Genetics and Molecular
+           Biology 9.1.
+    .. [6] H. W. Lilliefors (1967). "On the Kolmogorov-Smirnov test for
+           normality with mean and variance unknown." Journal of the American
+           statistical Association 62.318: 399-402.
+
+    Examples
+    --------
+    A well-known test of the null hypothesis that data were drawn from a
+    given distribution is the Kolmogorov-Smirnov (KS) test, available in SciPy
+    as `scipy.stats.ks_1samp`. Suppose we wish to test whether the following
+    data:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> x = stats.uniform.rvs(size=75, random_state=rng)
+
+    were sampled from a normal distribution. To perform a KS test, the
+    empirical distribution function of the observed data will be compared
+    against the (theoretical) cumulative distribution function of a normal
+    distribution. Of course, to do this, the normal distribution under the null
+    hypothesis must be fully specified. This is commonly done by first fitting
+    the ``loc`` and ``scale`` parameters of the distribution to the observed
+    data, then performing the test.
+
+    >>> loc, scale = np.mean(x), np.std(x, ddof=1)
+    >>> cdf = stats.norm(loc, scale).cdf
+    >>> stats.ks_1samp(x, cdf)
+    KstestResult(statistic=0.1119257570456813, pvalue=0.2827756409939257)
+
+    An advantage of the KS-test is that the p-value - the probability of
+    obtaining a value of the test statistic under the null hypothesis as
+    extreme as the value obtained from the observed data - can be calculated
+    exactly and efficiently. `goodness_of_fit` can only approximate these
+    results.
+
+    >>> known_params = {'loc': loc, 'scale': scale}
+    >>> res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
+    ...                             statistic='ks', random_state=rng)
+    >>> res.statistic, res.pvalue
+    (0.1119257570456813, 0.2788)
+
+    The statistic matches exactly, but the p-value is estimated by forming
+    a "Monte Carlo null distribution", that is, by explicitly drawing random
+    samples from `scipy.stats.norm` with the provided parameters and
+    calculating the stastic for each. The fraction of these statistic values
+    at least as extreme as ``res.statistic`` approximates the exact p-value
+    calculated by `scipy.stats.ks_1samp`.
+
+    However, in many cases, we would prefer to test only that the data were
+    sampled from one of *any* member of the normal distribution family, not
+    specifically from the normal distribution with the location and scale
+    fitted to the observed sample. In this case, Lilliefors [6]_ argued that
+    the KS test is far too conservative (that is, the p-value overstates
+    the actual probability of rejecting a true null hypothesis) and thus lacks
+    power - the ability to reject the null hypothesis when the null hypothesis
+    is actually false.
+    Indeed, our p-value above is approximately 0.28, which is far too large
+    to reject the null hypothesis at any common significance level.
+
+    Consider why this might be. Note that in the KS test above, the statistic
+    always compares data against the CDF of a normal distribution fitted to the
+    *observed data*. This tends to reduce the value of the statistic for the
+    observed data, but it is "unfair" when computing the statistic for other
+    samples, such as those we randomly draw to form the Monte Carlo null
+    distribution. It is easy to correct for this: whenever we compute the KS
+    statistic of a sample, we use the CDF of a normal distribution fitted
+    to *that sample*. The null distribution in this case has not been
+    calculated exactly and is tyically approximated using Monte Carlo methods
+    as described above. This is where `goodness_of_fit` excels.
+
+    >>> res = stats.goodness_of_fit(stats.norm, x, statistic='ks',
+    ...                             random_state=rng)
+    >>> res.statistic, res.pvalue
+    (0.1119257570456813, 0.0196)
+
+    Indeed, this p-value is much smaller, and small enough to (correctly)
+    reject the null hypothesis at common signficance levels, including 5% and
+    2.5%.
+
+    However, the KS statistic is not very sensitive to all deviations from
+    normality. The original advantage of the KS statistic was the ability
+    to compute the null distribution theoretically, but a more sensitive
+    statistic - resulting in a higher test power - can be used now that we can
+    approximate the null distribution
+    computationally. The Anderson-Darling statistic [1]_ tends to be more
+    sensitive, and critical values of the this statistic have been tabulated
+    for various significance levels and sample sizes using Monte Carlo methods.
+
+    >>> res = stats.anderson(x, 'norm')
+    >>> print(res.statistic)
+    1.2139573337497467
+    >>> print(res.critical_values)
+    [0.549 0.625 0.75  0.875 1.041]
+    >>> print(res.significance_level)
+    [15.  10.   5.   2.5  1. ]
+
+    Here, the observed value of the statistic exceeds the critical value
+    corresponding with a 1% significance level. This tells us that the p-value
+    of the observed data is less than 1%, but what is it? We could interpolate
+    from these (already-interpolated) values, but `goodness_of_fit` can
+    estimate it directly.
+
+    >>> res = stats.goodness_of_fit(stats.norm, x, statistic='ad',
+    ...                             random_state=rng)
+    >>> res.statistic, res.pvalue
+    (1.2139573337497467, 0.0034)
+
+    A further advantage is that use of `goodness_of_fit` is not limited to
+    a particular set of distributions or conditions on which parameters
+    are known versus which must be estimated from data. Instead,
+    `goodness_of_fit` can estimate p-values relatively quickly for any
+    distribution with a sufficiently fast and reliable ``fit`` method. For
+    instance, here we perform a goodness of fit test using the Cramer-von Mises
+    statistic against the Rayleigh distribution with known location and unknown
+    scale.
+
+    >>> rng = np.random.default_rng()
+    >>> x = stats.chi(df=2.2, loc=0, scale=2).rvs(size=1000, random_state=rng)
+    >>> res = stats.goodness_of_fit(stats.rayleigh, x, statistic='cvm',
+    ...                             known_params={'loc': 0}, random_state=rng)
+
+    This executes fairly quickly, but to check the reliability of the ``fit``
+    method, we should inspect the fit result.
+
+    >>> res.fit_result  # location is as specified, and scale is reasonable
+      params: FitParams(loc=0.0, scale=2.1026719844231243)
+     success: True
+     message: 'The fit was performed successfully.'
+    >>> import matplotlib.pyplot as plt  # matplotlib must be installed to plot
+    >>> res.fit_result.plot()
+    >>> plt.show()
+
+    If the distribution is not fit to the observed data as well as possible,
+    the test may not control the type I error rate, that is, the chance of
+    rejecting the null hypothesis even when it is true.
+
+    We should also look for extreme outliers in the null distribution that
+    may be caused by unreliable fitting. These do not necessarily invalidate
+    the result, but they tend to reduce the test's power.
+
+    >>> _, ax = plt.subplots()
+    >>> ax.hist(np.log10(res.null_distribution))
+    >>> ax.set_xlabel("log10 of CVM statistic under the null hypothesis")
+    >>> ax.set_ylabel("Frequency")
+    >>> ax.set_title("Histogram of the Monte Carlo null distribution")
+    >>> plt.show()
+
+    This plot seems reassuring.
+
+    If ``fit`` method is working reliably, and if the distribution of the test
+    statistic is not particularly sensitive to the values of the fitted
+    parameters, then the p-value provided by `goodness_of_fit` is expected to
+    be a good approximation.
+
+    >>> res.statistic, res.pvalue
+    (0.2231991510248692, 0.0525)
+
+    """
+    args = _gof_iv(dist, data, known_params, fit_params, guessed_params,
+                   statistic, n_mc_samples, random_state)
+    (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
+     guessed_rfd_params, statistic, n_mc_samples_int, random_state) = args
+
+    # Fit null hypothesis distribution to data
+    nhd_fit_fun = _get_fit_fun(dist, data, guessed_nhd_params,
+                               fixed_nhd_params)
+    nhd_vals = nhd_fit_fun(data)
+    nhd_dist = dist(*nhd_vals)
+
+    def rvs(size):
+        return nhd_dist.rvs(size=size, random_state=random_state)
+
+    # Define statistic
+    fit_fun = _get_fit_fun(dist, data, guessed_rfd_params, fixed_rfd_params)
+    compare_fun = _compare_dict[statistic]
+
+    def statistic_fun(data, axis=-1):
+        # Make things simple by always working along the last axis.
+        data = np.moveaxis(data, axis, -1)
+        rfd_vals = fit_fun(data)
+        rfd_dist = dist(*rfd_vals)
+        return compare_fun(rfd_dist, data)
+
+    res = stats.monte_carlo_test(data, rvs, statistic_fun, vectorized=True,
+                                 n_resamples=n_mc_samples, axis=-1,
+                                 alternative='greater')
+    opt_res = optimize.OptimizeResult()
+    opt_res.success = True
+    opt_res.message = "The fit was performed successfully."
+    opt_res.x = nhd_vals
+    # Only continuous distributions for now, hence discrete=False
+    # There's no fundamental limitation; it's just that we're not using
+    # stats.fit, discrete distributions don't have `fit` method, and
+    # we haven't written any vectorized fit functions for a discrete
+    # distribution yet.
+    return GoodnessOfFitResult(FitResult(dist, data, False, opt_res),
+                               res.statistic, res.pvalue,
+                               res.null_distribution)
+
+
+def _get_fit_fun(dist, data, guessed_params, fixed_params):
+
+    shape_names = [] if dist.shapes is None else dist.shapes.split(", ")
+    param_names = shape_names + ['loc', 'scale']
+    fparam_names = ['f'+name for name in param_names]
+    all_fixed = not set(fparam_names).difference(fixed_params)
+    guessed_shapes = [guessed_params.pop(x, None)
+                      for x in shape_names if x in guessed_params]
+
+    # Define statistic, including fitting distribution to data
+    if dist in _fit_funs:
+        def fit_fun(data):
+            params = _fit_funs[dist](data, **fixed_params)
+            params = np.asarray(np.broadcast_arrays(*params))
+            if params.ndim > 1:
+                params = params[..., np.newaxis]
+            return params
+
+    elif all_fixed:
+        def fit_fun(data):
+            return [fixed_params[name] for name in fparam_names]
+
+    else:
+        def fit_fun_1d(data):
+            return dist.fit(data, *guessed_shapes, **guessed_params,
+                            **fixed_params)
+
+        def fit_fun(data):
+            params = np.apply_along_axis(fit_fun_1d, axis=-1, arr=data)
+            if params.ndim > 1:
+                params = params.T[..., np.newaxis]
+            return params
+
+    return fit_fun
+
+
+# Vectorized fitting functions. These are to accept ND `data` in which each
+# row (slice along last axis) is a sample to fit and scalar fixed parameters.
+# They return a tuple of shape parameter arrays, each of shape data.shape[:-1].
+def _fit_norm(data, floc=None, fscale=None):
+    loc = floc
+    scale = fscale
+    if loc is None and scale is None:
+        loc = np.mean(data, axis=-1)
+        scale = np.std(data, ddof=1, axis=-1)
+    elif loc is None:
+        loc = np.mean(data, axis=-1)
+    elif scale is None:
+        scale = np.sqrt(((data - loc)**2).mean(axis=-1))
+    return loc, scale
+
+
+_fit_funs = {stats.norm: _fit_norm}  # type: ignore[attr-defined]
+
+
+# Vectorized goodness of fit statistic functions. These accept a frozen
+# distribution object and `data` in which each row (slice along last axis) is
+# a sample.
+
+
+def _anderson_darling(dist, data):
+    x = np.sort(data, axis=-1)
+    n = data.shape[-1]
+    i = np.arange(1, n+1)
+    Si = (2*i - 1)/n * (dist.logcdf(x) + dist.logsf(x[..., ::-1]))
+    S = np.sum(Si, axis=-1)
+    return -n - S
+
+
+def _compute_dplus(cdfvals):  # adapted from _stats_py before gh-17062
+    n = cdfvals.shape[-1]
+    return (np.arange(1.0, n + 1) / n - cdfvals).max(axis=-1)
+
+
+def _compute_dminus(cdfvals, axis=-1):
+    n = cdfvals.shape[-1]
+    return (cdfvals - np.arange(0.0, n)/n).max(axis=-1)
+
+
+def _kolmogorov_smirnov(dist, data):
+    x = np.sort(data, axis=-1)
+    cdfvals = dist.cdf(x)
+    Dplus = _compute_dplus(cdfvals)  # always works along last axis
+    Dminus = _compute_dminus(cdfvals)
+    return np.maximum(Dplus, Dminus)
+
+
+def _cramer_von_mises(dist, data):
+    x = np.sort(data, axis=-1)
+    n = data.shape[-1]
+    cdfvals = dist.cdf(x)
+    u = (2*np.arange(1, n+1) - 1)/(2*n)
+    w = 1 / (12*n) + np.sum((u - cdfvals)**2, axis=-1)
+    return w
+
+
+_compare_dict = {"ad": _anderson_darling, "ks": _kolmogorov_smirnov,
+                 "cvm": _cramer_von_mises}
+
+
+def _gof_iv(dist, data, known_params, fit_params, guessed_params, statistic,
+            n_mc_samples, random_state):
+
+    if not isinstance(dist, stats.rv_continuous):
+        message = ("`dist` must be a (non-frozen) instance of "
+                   "`stats.rv_continuous`.")
+        raise TypeError(message)
+
+    data = np.asarray(data, dtype=float)
+    if not data.ndim == 1:
+        message = "`data` must be a one-dimensional array of numbers."
+        raise ValueError(message)
+
+    # Leave validation of these key/value pairs to the `fit` method,
+    # but collect these into dictionaries that will be used
+    known_params = known_params or dict()
+    fit_params = fit_params or dict()
+    guessed_params = guessed_params or dict()
+
+    known_params_f = {("f"+key): val for key, val in known_params.items()}
+    fit_params_f = {("f"+key): val for key, val in fit_params.items()}
+
+    # These the the values of parameters of the null distribution family
+    # with which resamples are drawn
+    fixed_nhd_params = known_params_f.copy()
+    fixed_nhd_params.update(fit_params_f)
+
+    # These are fixed when fitting the distribution family to resamples
+    fixed_rfd_params = known_params_f.copy()
+
+    # These are used as guesses when fitting the distribution family to
+    # the original data
+    guessed_nhd_params = guessed_params.copy()
+
+    # These are used as guesses when fitting the distribution family to
+    # resamples
+    guessed_rfd_params = fit_params.copy()
+    guessed_rfd_params.update(guessed_params)
+
+    statistics = {'ad', 'ks', 'cvm'}
+    if statistic.lower() not in statistics:
+        message = f"`statistic` must be one of {statistics}."
+        raise ValueError(message)
+
+    n_mc_samples_int = int(n_mc_samples)
+    if n_mc_samples_int != n_mc_samples:
+        message = "`n_mc_samples` must be an integer."
+        raise TypeError(message)
+
+    random_state = check_random_state(random_state)
+
+    return (dist, data, fixed_nhd_params, fixed_rfd_params, guessed_nhd_params,
+            guessed_rfd_params, statistic, n_mc_samples_int, random_state)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_generate_pyx.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_generate_pyx.py
new file mode 100644
index 00000000..07861467
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_generate_pyx.py
@@ -0,0 +1,75 @@
+import pathlib
+import subprocess
+import sys
+import os
+import argparse
+
+
+def isNPY_OLD():
+    '''
+    A new random C API was added in 1.18 and became stable in 1.19.
+    Prefer the new random C API when building with recent numpy.
+    '''
+    import numpy as np
+    ver = tuple(int(num) for num in np.__version__.split('.')[:2])
+    return ver < (1, 19)
+
+
+def make_biasedurn(outdir):
+    '''Substitute True/False values for NPY_OLD Cython build variable.'''
+    biasedurn_base = (pathlib.Path(__file__).parent / '_biasedurn').absolute()
+    with open(biasedurn_base.with_suffix('.pyx.templ'), 'r') as src:
+        contents = src.read()
+
+    outfile = outdir / '_biasedurn.pyx'
+    with open(outfile, 'w') as dest:
+        dest.write(contents.format(NPY_OLD=str(bool(isNPY_OLD()))))
+
+
+def make_unuran(srcdir, outdir):
+    """Substitute True/False values for NPY_OLD Cython build variable."""
+    import re
+    with open(srcdir / "unuran_wrapper.pyx.templ", "r") as src:
+        contents = src.read()
+    with open(outdir / "unuran_wrapper.pyx", "w") as dest:
+        dest.write(re.sub("DEF NPY_OLD = isNPY_OLD",
+                          f"DEF NPY_OLD = {isNPY_OLD()}",
+                          contents))
+
+
+def make_boost(outdir, distutils_build=False):
+    # Call code generator inside _boost directory
+    code_gen = pathlib.Path(__file__).parent / '_boost/include/code_gen.py'
+    if distutils_build:
+        subprocess.run([sys.executable, str(code_gen), '-o', outdir,
+                        '--distutils-build', 'True'], check=True)
+    else:
+        subprocess.run([sys.executable, str(code_gen), '-o', outdir],
+                       check=True)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-o", "--outdir", type=str,
+                        help="Path to the output directory")
+    args = parser.parse_args()
+
+    if not args.outdir:
+        # We're dealing with a distutils build here, write in-place:
+        outdir_abs = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
+        make_biasedurn(outdir_abs)
+
+        outdir_abs_boost = outdir_abs / '_boost' / 'src'
+        if not os.path.exists(outdir_abs_boost):
+            os.makedirs(outdir_abs_boost)
+        make_boost(outdir_abs_boost, distutils_build=True)
+
+        outdir_abs_unuran = outdir_abs / '_unuran'
+        make_unuran(outdir_abs_unuran, outdir_abs_unuran)
+    else:
+        # Meson build
+        srcdir_abs = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
+        outdir_abs = pathlib.Path(os.getcwd()) / args.outdir
+        make_biasedurn(outdir_abs)
+        make_boost(outdir_abs)
+        make_unuran(srcdir_abs / '_unuran', outdir_abs)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_hypotests.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_hypotests.py
new file mode 100644
index 00000000..3943a96a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_hypotests.py
@@ -0,0 +1,2006 @@
+from collections import namedtuple
+from dataclasses import make_dataclass
+from math import comb
+import numpy as np
+import warnings
+from itertools import combinations
+import scipy.stats
+from scipy.optimize import shgo
+from . import distributions
+from ._common import ConfidenceInterval
+from ._continuous_distns import chi2, norm
+from scipy.special import gamma, kv, gammaln
+from scipy.fft import ifft
+from ._stats_pythran import _a_ij_Aij_Dij2
+from ._stats_pythran import (
+    _concordant_pairs as _P, _discordant_pairs as _Q
+)
+from scipy.stats import _stats_py
+
+__all__ = ['epps_singleton_2samp', 'cramervonmises', 'somersd',
+           'barnard_exact', 'boschloo_exact', 'cramervonmises_2samp',
+           'tukey_hsd', 'poisson_means_test']
+
+Epps_Singleton_2sampResult = namedtuple('Epps_Singleton_2sampResult',
+                                        ('statistic', 'pvalue'))
+
+
+def epps_singleton_2samp(x, y, t=(0.4, 0.8)):
+    """Compute the Epps-Singleton (ES) test statistic.
+
+    Test the null hypothesis that two samples have the same underlying
+    probability distribution.
+
+    Parameters
+    ----------
+    x, y : array-like
+        The two samples of observations to be tested. Input must not have more
+        than one dimension. Samples can have different lengths.
+    t : array-like, optional
+        The points (t1, ..., tn) where the empirical characteristic function is
+        to be evaluated. It should be positive distinct numbers. The default
+        value (0.4, 0.8) is proposed in [1]_. Input must not have more than
+        one dimension.
+
+    Returns
+    -------
+    statistic : float
+        The test statistic.
+    pvalue : float
+        The associated p-value based on the asymptotic chi2-distribution.
+
+    See Also
+    --------
+    ks_2samp, anderson_ksamp
+
+    Notes
+    -----
+    Testing whether two samples are generated by the same underlying
+    distribution is a classical question in statistics. A widely used test is
+    the Kolmogorov-Smirnov (KS) test which relies on the empirical
+    distribution function. Epps and Singleton introduce a test based on the
+    empirical characteristic function in [1]_.
+
+    One advantage of the ES test compared to the KS test is that is does
+    not assume a continuous distribution. In [1]_, the authors conclude
+    that the test also has a higher power than the KS test in many
+    examples. They recommend the use of the ES test for discrete samples as
+    well as continuous samples with at least 25 observations each, whereas
+    `anderson_ksamp` is recommended for smaller sample sizes in the
+    continuous case.
+
+    The p-value is computed from the asymptotic distribution of the test
+    statistic which follows a `chi2` distribution. If the sample size of both
+    `x` and `y` is below 25, the small sample correction proposed in [1]_ is
+    applied to the test statistic.
+
+    The default values of `t` are determined in [1]_ by considering
+    various distributions and finding good values that lead to a high power
+    of the test in general. Table III in [1]_ gives the optimal values for
+    the distributions tested in that study. The values of `t` are scaled by
+    the semi-interquartile range in the implementation, see [1]_.
+
+    References
+    ----------
+    .. [1] T. W. Epps and K. J. Singleton, "An omnibus test for the two-sample
+       problem using the empirical characteristic function", Journal of
+       Statistical Computation and Simulation 26, p. 177--203, 1986.
+
+    .. [2] S. J. Goerg and J. Kaiser, "Nonparametric testing of distributions
+       - the Epps-Singleton two-sample test using the empirical characteristic
+       function", The Stata Journal 9(3), p. 454--465, 2009.
+
+    """
+    x, y, t = np.asarray(x), np.asarray(y), np.asarray(t)
+    # check if x and y are valid inputs
+    if x.ndim > 1:
+        raise ValueError('x must be 1d, but x.ndim equals {}.'.format(x.ndim))
+    if y.ndim > 1:
+        raise ValueError('y must be 1d, but y.ndim equals {}.'.format(y.ndim))
+    nx, ny = len(x), len(y)
+    if (nx < 5) or (ny < 5):
+        raise ValueError('x and y should have at least 5 elements, but len(x) '
+                         '= {} and len(y) = {}.'.format(nx, ny))
+    if not np.isfinite(x).all():
+        raise ValueError('x must not contain nonfinite values.')
+    if not np.isfinite(y).all():
+        raise ValueError('y must not contain nonfinite values.')
+    n = nx + ny
+
+    # check if t is valid
+    if t.ndim > 1:
+        raise ValueError('t must be 1d, but t.ndim equals {}.'.format(t.ndim))
+    if np.less_equal(t, 0).any():
+        raise ValueError('t must contain positive elements only.')
+
+    # rescale t with semi-iqr as proposed in [1]; import iqr here to avoid
+    # circular import
+    from scipy.stats import iqr
+    sigma = iqr(np.hstack((x, y))) / 2
+    ts = np.reshape(t, (-1, 1)) / sigma
+
+    # covariance estimation of ES test
+    gx = np.vstack((np.cos(ts*x), np.sin(ts*x))).T  # shape = (nx, 2*len(t))
+    gy = np.vstack((np.cos(ts*y), np.sin(ts*y))).T
+    cov_x = np.cov(gx.T, bias=True)  # the test uses biased cov-estimate
+    cov_y = np.cov(gy.T, bias=True)
+    est_cov = (n/nx)*cov_x + (n/ny)*cov_y
+    est_cov_inv = np.linalg.pinv(est_cov)
+    r = np.linalg.matrix_rank(est_cov_inv)
+    if r < 2*len(t):
+        warnings.warn('Estimated covariance matrix does not have full rank. '
+                      'This indicates a bad choice of the input t and the '
+                      'test might not be consistent.')  # see p. 183 in [1]_
+
+    # compute test statistic w distributed asympt. as chisquare with df=r
+    g_diff = np.mean(gx, axis=0) - np.mean(gy, axis=0)
+    w = n*np.dot(g_diff.T, np.dot(est_cov_inv, g_diff))
+
+    # apply small-sample correction
+    if (max(nx, ny) < 25):
+        corr = 1.0/(1.0 + n**(-0.45) + 10.1*(nx**(-1.7) + ny**(-1.7)))
+        w = corr * w
+
+    p = chi2.sf(w, r)
+
+    return Epps_Singleton_2sampResult(w, p)
+
+
+def poisson_means_test(k1, n1, k2, n2, *, diff=0, alternative='two-sided'):
+    r"""
+    Performs the Poisson means test, AKA the "E-test".
+
+    This is a test of the null hypothesis that the difference between means of
+    two Poisson distributions is `diff`. The samples are provided as the
+    number of events `k1` and `k2` observed within measurement intervals
+    (e.g. of time, space, number of observations) of sizes `n1` and `n2`.
+
+    Parameters
+    ----------
+    k1 : int
+        Number of events observed from distribution 1.
+    n1: float
+        Size of sample from distribution 1.
+    k2 : int
+        Number of events observed from distribution 2.
+    n2 : float
+        Size of sample from distribution 2.
+    diff : float, default=0
+        The hypothesized difference in means between the distributions
+        underlying the samples.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+          * 'two-sided': the difference between distribution means is not
+            equal to `diff`
+          * 'less': the difference between distribution means is less than
+            `diff`
+          * 'greater': the difference between distribution means is greater
+            than `diff`
+
+    Returns
+    -------
+    statistic : float
+        The test statistic (see [1]_ equation 3.3).
+    pvalue : float
+        The probability of achieving such an extreme value of the test
+        statistic under the null hypothesis.
+
+    Notes
+    -----
+
+    Let:
+
+    .. math:: X_1 \sim \mbox{Poisson}(\mathtt{n1}\lambda_1)
+
+    be a random variable independent of
+
+    .. math:: X_2  \sim \mbox{Poisson}(\mathtt{n2}\lambda_2)
+
+    and let ``k1`` and ``k2`` be the observed values of :math:`X_1`
+    and :math:`X_2`, respectively. Then `poisson_means_test` uses the number
+    of observed events ``k1`` and ``k2`` from samples of size ``n1`` and
+    ``n2``, respectively, to test the null hypothesis that
+
+    .. math::
+       H_0: \lambda_1 - \lambda_2 = \mathtt{diff}
+
+    A benefit of the E-test is that it has good power for small sample sizes,
+    which can reduce sampling costs [1]_. It has been evaluated and determined
+    to be more powerful than the comparable C-test, sometimes referred to as
+    the Poisson exact test.
+
+    References
+    ----------
+    .. [1]  Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
+       comparing two Poisson means. Journal of Statistical Planning and
+       Inference, 119(1), 23-35.
+
+    .. [2]  Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
+       testing samples from Poisson series: With an application to testing
+       clover seed for dodder. Biometrika, 31(3/4), 313-323.
+
+    Examples
+    --------
+
+    Suppose that a gardener wishes to test the number of dodder (weed) seeds
+    in a sack of clover seeds that they buy from a seed company. It has
+    previously been established that the number of dodder seeds in clover
+    follows the Poisson distribution.
+
+    A 100 gram sample is drawn from the sack before being shipped to the
+    gardener. The sample is analyzed, and it is found to contain no dodder
+    seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
+    another 100 gram sample from the sack. This time, three dodder seeds are
+    found in the sample; that is, `k2` is 3. The gardener would like to
+    know if the difference is significant and not due to chance. The
+    null hypothesis is that the difference between the two samples is merely
+    due to chance, or that :math:`\lambda_1 - \lambda_2 = \mathtt{diff}`
+    where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
+    difference is not due to chance, or :math:`\lambda_1 - \lambda_2 \ne 0`.
+    The gardener selects a significance level of 5% to reject the null
+    hypothesis in favor of the alternative [2]_.
+
+    >>> import scipy.stats as stats
+    >>> res = stats.poisson_means_test(0, 100, 3, 100)
+    >>> res.statistic, res.pvalue
+    (-1.7320508075688772, 0.08837900929018157)
+
+    The p-value is .088, indicating a near 9% chance of observing a value of
+    the test statistic under the null hypothesis. This exceeds 5%, so the
+    gardener does not reject the null hypothesis as the difference cannot be
+    regarded as significant at this level.
+    """
+
+    _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative)
+
+    # "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
+    lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
+
+    # "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
+    # case the null hypothesis cannot be rejected ... [and] it is not necessary
+    # to compute the p-value". [1] page 26 below eq. (3.6).
+    if lmbd_hat2 <= 0:
+        return _stats_py.SignificanceResult(0, 1)
+
+    # The unbiased variance estimate [1] (3.2)
+    var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
+
+    # The _observed_ pivot statistic from the input. It follows the
+    # unnumbered equation following equation (3.3) This is used later in
+    # comparison with the computed pivot statistics in an indicator function.
+    t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
+
+    # Equation (3.5) of [1] is lengthy, so it is broken into several parts,
+    # beginning here. Note that the probability mass function of poisson is
+    # exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
+    # here as nlmbd_hat*. The strategy for evaluating the double summation in
+    # (3.5) is to create two arrays of the values of the two products inside
+    # the summation and then broadcast them together into a matrix, and then
+    # sum across the entire matrix.
+
+    # Compute constants (as seen in the first and second separated products in
+    # (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
+    nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
+    nlmbd_hat2 = n2 * lmbd_hat2
+
+    # Determine summation bounds for tail ends of distribution rather than
+    # summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
+    # sum.
+    x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
+    x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
+
+    # Construct arrays to function as the x_1 and x_2 counters on the summation
+    # in (3.5). `x1` is in columns and `x2` is in rows to allow for
+    # broadcasting.
+    x1 = np.arange(x1_lb, x1_ub + 1)
+    x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
+
+    # These are the two products in equation (3.5) with `prob_x1` being the
+    # first (left side) and `prob_x2` being the second (right side). (To
+    # make as clear as possible: the 1st contains a "+ d" term, the 2nd does
+    # not.)
+    prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
+    prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
+
+    # compute constants for use in the "pivot statistic" per the
+    # unnumbered equation following (3.3).
+    lmbd_x1 = x1 / n1
+    lmbd_x2 = x2 / n2
+    lmbds_diff = lmbd_x1 - lmbd_x2 - diff
+    var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
+
+    # This is the 'pivot statistic' for use in the indicator of the summation
+    # (left side of "I[.]").
+    with np.errstate(invalid='ignore', divide='ignore'):
+        t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
+
+    # `[indicator]` implements the "I[.] ... the indicator function" per
+    # the paragraph following equation (3.5).
+    if alternative == 'two-sided':
+        indicator = np.abs(t_x1x2) >= np.abs(t_k1k2)
+    elif alternative == 'less':
+        indicator = t_x1x2 <= t_k1k2
+    else:
+        indicator = t_x1x2 >= t_k1k2
+
+    # Multiply all combinations of the products together, exclude terms
+    # based on the `indicator` and then sum. (3.5)
+    pvalue = np.sum((prob_x1 * prob_x2)[indicator])
+    return _stats_py.SignificanceResult(t_k1k2, pvalue)
+
+
+def _poisson_means_test_iv(k1, n1, k2, n2, diff, alternative):
+    # """check for valid types and values of input to `poisson_mean_test`."""
+    if k1 != int(k1) or k2 != int(k2):
+        raise TypeError('`k1` and `k2` must be integers.')
+
+    count_err = '`k1` and `k2` must be greater than or equal to 0.'
+    if k1 < 0 or k2 < 0:
+        raise ValueError(count_err)
+
+    if n1 <= 0 or n2 <= 0:
+        raise ValueError('`n1` and `n2` must be greater than 0.')
+
+    if diff < 0:
+        raise ValueError('diff must be greater than or equal to 0.')
+
+    alternatives = {'two-sided', 'less', 'greater'}
+    if alternative.lower() not in alternatives:
+        raise ValueError(f"Alternative must be one of '{alternatives}'.")
+
+
+class CramerVonMisesResult:
+    def __init__(self, statistic, pvalue):
+        self.statistic = statistic
+        self.pvalue = pvalue
+
+    def __repr__(self):
+        return (f"{self.__class__.__name__}(statistic={self.statistic}, "
+                f"pvalue={self.pvalue})")
+
+
+def _psi1_mod(x):
+    """
+    psi1 is defined in equation 1.10 in Csörgő, S. and Faraway, J. (1996).
+    This implements a modified version by excluding the term V(x) / 12
+    (here: _cdf_cvm_inf(x) / 12) to avoid evaluating _cdf_cvm_inf(x)
+    twice in _cdf_cvm.
+
+    Implementation based on MAPLE code of Julian Faraway and R code of the
+    function pCvM in the package goftest (v1.1.1), permission granted
+    by Adrian Baddeley. Main difference in the implementation: the code
+    here keeps adding terms of the series until the terms are small enough.
+    """
+
+    def _ed2(y):
+        z = y**2 / 4
+        b = kv(1/4, z) + kv(3/4, z)
+        return np.exp(-z) * (y/2)**(3/2) * b / np.sqrt(np.pi)
+
+    def _ed3(y):
+        z = y**2 / 4
+        c = np.exp(-z) / np.sqrt(np.pi)
+        return c * (y/2)**(5/2) * (2*kv(1/4, z) + 3*kv(3/4, z) - kv(5/4, z))
+
+    def _Ak(k, x):
+        m = 2*k + 1
+        sx = 2 * np.sqrt(x)
+        y1 = x**(3/4)
+        y2 = x**(5/4)
+
+        e1 = m * gamma(k + 1/2) * _ed2((4 * k + 3)/sx) / (9 * y1)
+        e2 = gamma(k + 1/2) * _ed3((4 * k + 1) / sx) / (72 * y2)
+        e3 = 2 * (m + 2) * gamma(k + 3/2) * _ed3((4 * k + 5) / sx) / (12 * y2)
+        e4 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 1) / sx) / (144 * y1)
+        e5 = 7 * m * gamma(k + 1/2) * _ed2((4 * k + 5) / sx) / (144 * y1)
+
+        return e1 + e2 + e3 + e4 + e5
+
+    x = np.asarray(x)
+    tot = np.zeros_like(x, dtype='float')
+    cond = np.ones_like(x, dtype='bool')
+    k = 0
+    while np.any(cond):
+        z = -_Ak(k, x[cond]) / (np.pi * gamma(k + 1))
+        tot[cond] = tot[cond] + z
+        cond[cond] = np.abs(z) >= 1e-7
+        k += 1
+
+    return tot
+
+
+def _cdf_cvm_inf(x):
+    """
+    Calculate the cdf of the Cramér-von Mises statistic (infinite sample size).
+
+    See equation 1.2 in Csörgő, S. and Faraway, J. (1996).
+
+    Implementation based on MAPLE code of Julian Faraway and R code of the
+    function pCvM in the package goftest (v1.1.1), permission granted
+    by Adrian Baddeley. Main difference in the implementation: the code
+    here keeps adding terms of the series until the terms are small enough.
+
+    The function is not expected to be accurate for large values of x, say
+    x > 4, when the cdf is very close to 1.
+    """
+    x = np.asarray(x)
+
+    def term(x, k):
+        # this expression can be found in [2], second line of (1.3)
+        u = np.exp(gammaln(k + 0.5) - gammaln(k+1)) / (np.pi**1.5 * np.sqrt(x))
+        y = 4*k + 1
+        q = y**2 / (16*x)
+        b = kv(0.25, q)
+        return u * np.sqrt(y) * np.exp(-q) * b
+
+    tot = np.zeros_like(x, dtype='float')
+    cond = np.ones_like(x, dtype='bool')
+    k = 0
+    while np.any(cond):
+        z = term(x[cond], k)
+        tot[cond] = tot[cond] + z
+        cond[cond] = np.abs(z) >= 1e-7
+        k += 1
+
+    return tot
+
+
+def _cdf_cvm(x, n=None):
+    """
+    Calculate the cdf of the Cramér-von Mises statistic for a finite sample
+    size n. If N is None, use the asymptotic cdf (n=inf).
+
+    See equation 1.8 in Csörgő, S. and Faraway, J. (1996) for finite samples,
+    1.2 for the asymptotic cdf.
+
+    The function is not expected to be accurate for large values of x, say
+    x > 2, when the cdf is very close to 1 and it might return values > 1
+    in that case, e.g. _cdf_cvm(2.0, 12) = 1.0000027556716846. Moreover, it
+    is not accurate for small values of n, especially close to the bounds of
+    the distribution's domain, [1/(12*n), n/3], where the value jumps to 0
+    and 1, respectively. These are limitations of the approximation by Csörgő
+    and Faraway (1996) implemented in this function.
+    """
+    x = np.asarray(x)
+    if n is None:
+        y = _cdf_cvm_inf(x)
+    else:
+        # support of the test statistic is [12/n, n/3], see 1.1 in [2]
+        y = np.zeros_like(x, dtype='float')
+        sup = (1./(12*n) < x) & (x < n/3.)
+        # note: _psi1_mod does not include the term _cdf_cvm_inf(x) / 12
+        # therefore, we need to add it here
+        y[sup] = _cdf_cvm_inf(x[sup]) * (1 + 1./(12*n)) + _psi1_mod(x[sup]) / n
+        y[x >= n/3] = 1
+
+    if y.ndim == 0:
+        return y[()]
+    return y
+
+
+def cramervonmises(rvs, cdf, args=()):
+    """Perform the one-sample Cramér-von Mises test for goodness of fit.
+
+    This performs a test of the goodness of fit of a cumulative distribution
+    function (cdf) :math:`F` compared to the empirical distribution function
+    :math:`F_n` of observed random variates :math:`X_1, ..., X_n` that are
+    assumed to be independent and identically distributed ([1]_).
+    The null hypothesis is that the :math:`X_i` have cumulative distribution
+    :math:`F`.
+
+    Parameters
+    ----------
+    rvs : array_like
+        A 1-D array of observed values of the random variables :math:`X_i`.
+    cdf : str or callable
+        The cumulative distribution function :math:`F` to test the
+        observations against. If a string, it should be the name of a
+        distribution in `scipy.stats`. If a callable, that callable is used
+        to calculate the cdf: ``cdf(x, *args) -> float``.
+    args : tuple, optional
+        Distribution parameters. These are assumed to be known; see Notes.
+
+    Returns
+    -------
+    res : object with attributes
+        statistic : float
+            Cramér-von Mises statistic.
+        pvalue : float
+            The p-value.
+
+    See Also
+    --------
+    kstest, cramervonmises_2samp
+
+    Notes
+    -----
+    .. versionadded:: 1.6.0
+
+    The p-value relies on the approximation given by equation 1.8 in [2]_.
+    It is important to keep in mind that the p-value is only accurate if
+    one tests a simple hypothesis, i.e. the parameters of the reference
+    distribution are known. If the parameters are estimated from the data
+    (composite hypothesis), the computed p-value is not reliable.
+
+    References
+    ----------
+    .. [1] Cramér-von Mises criterion, Wikipedia,
+           https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93von_Mises_criterion
+    .. [2] Csörgő, S. and Faraway, J. (1996). The Exact and Asymptotic
+           Distribution of Cramér-von Mises Statistics. Journal of the
+           Royal Statistical Society, pp. 221-234.
+
+    Examples
+    --------
+
+    Suppose we wish to test whether data generated by ``scipy.stats.norm.rvs``
+    were, in fact, drawn from the standard normal distribution. We choose a
+    significance level of alpha=0.05.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> x = stats.norm.rvs(size=500, random_state=rng)
+    >>> res = stats.cramervonmises(x, 'norm')
+    >>> res.statistic, res.pvalue
+    (0.49121480855028343, 0.04189256516661377)
+
+    The p-value 0.79 exceeds our chosen significance level, so we do not
+    reject the null hypothesis that the observed sample is drawn from the
+    standard normal distribution.
+
+    Now suppose we wish to check whether the same samples shifted by 2.1 is
+    consistent with being drawn from a normal distribution with a mean of 2.
+
+    >>> y = x + 2.1
+    >>> res = stats.cramervonmises(y, 'norm', args=(2,))
+    >>> res.statistic, res.pvalue
+    (0.07400330012187435, 0.7274595666160468)
+
+    Here we have used the `args` keyword to specify the mean (``loc``)
+    of the normal distribution to test the data against. This is equivalent
+    to the following, in which we create a frozen normal distribution with
+    mean 2.1, then pass its ``cdf`` method as an argument.
+
+    >>> frozen_dist = stats.norm(loc=2)
+    >>> res = stats.cramervonmises(y, frozen_dist.cdf)
+    >>> res.statistic, res.pvalue
+    (0.07400330012187435, 0.7274595666160468)
+
+    In either case, we would reject the null hypothesis that the observed
+    sample is drawn from a normal distribution with a mean of 2 (and default
+    variance of 1) because the p-value 0.04 is less than our chosen
+    significance level.
+
+    """
+    if isinstance(cdf, str):
+        cdf = getattr(distributions, cdf).cdf
+
+    vals = np.sort(np.asarray(rvs))
+
+    if vals.size <= 1:
+        raise ValueError('The sample must contain at least two observations.')
+    if vals.ndim > 1:
+        raise ValueError('The sample must be one-dimensional.')
+
+    n = len(vals)
+    cdfvals = cdf(vals, *args)
+
+    u = (2*np.arange(1, n+1) - 1)/(2*n)
+    w = 1/(12*n) + np.sum((u - cdfvals)**2)
+
+    # avoid small negative values that can occur due to the approximation
+    p = max(0, 1. - _cdf_cvm(w, n))
+
+    return CramerVonMisesResult(statistic=w, pvalue=p)
+
+
+def _get_wilcoxon_distr(n):
+    """
+    Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
+    of ranks of positive differences).
+    Returns an array with the probabilities of all the possible ranks
+    r = 0, ..., n*(n+1)/2
+    """
+    c = np.ones(1, dtype=np.double)
+    for k in range(1, n + 1):
+        prev_c = c
+        c = np.zeros(k * (k + 1) // 2 + 1, dtype=np.double)
+        m = len(prev_c)
+        c[:m] = prev_c * 0.5
+        c[-m:] += prev_c * 0.5
+    return c
+
+
+def _get_wilcoxon_distr2(n):
+    """
+    Distribution of probability of the Wilcoxon ranksum statistic r_plus (sum
+    of ranks of positive differences).
+    Returns an array with the probabilities of all the possible ranks
+    r = 0, ..., n*(n+1)/2
+    This is a slower reference function
+    References
+    ----------
+    .. [1] 1. Harris T, Hardin JW. Exact Wilcoxon Signed-Rank and Wilcoxon
+        Mann-Whitney Ranksum Tests. The Stata Journal. 2013;13(2):337-343.
+    """
+    ai = np.arange(1, n+1)[:, None]
+    t = n*(n+1)/2
+    q = 2*t
+    j = np.arange(q)
+    theta = 2*np.pi/q*j
+    phi_sp = np.prod(np.cos(theta*ai), axis=0)
+    phi_s = np.exp(1j*theta*t) * phi_sp
+    p = np.real(ifft(phi_s))
+    res = np.zeros(int(t)+1)
+    res[:-1:] = p[::2]
+    res[0] /= 2
+    res[-1] = res[0]
+    return res
+
+
+def _tau_b(A):
+    """Calculate Kendall's tau-b and p-value from contingency table."""
+    # See [2] 2.2 and 4.2
+
+    # contingency table must be truly 2D
+    if A.shape[0] == 1 or A.shape[1] == 1:
+        return np.nan, np.nan
+
+    NA = A.sum()
+    PA = _P(A)
+    QA = _Q(A)
+    Sri2 = (A.sum(axis=1)**2).sum()
+    Scj2 = (A.sum(axis=0)**2).sum()
+    denominator = (NA**2 - Sri2)*(NA**2 - Scj2)
+
+    tau = (PA-QA)/(denominator)**0.5
+
+    numerator = 4*(_a_ij_Aij_Dij2(A) - (PA - QA)**2 / NA)
+    s02_tau_b = numerator/denominator
+    if s02_tau_b == 0:  # Avoid divide by zero
+        return tau, 0
+    Z = tau/s02_tau_b**0.5
+    p = 2*norm.sf(abs(Z))  # 2-sided p-value
+
+    return tau, p
+
+
+def _somers_d(A, alternative='two-sided'):
+    """Calculate Somers' D and p-value from contingency table."""
+    # See [3] page 1740
+
+    # contingency table must be truly 2D
+    if A.shape[0] <= 1 or A.shape[1] <= 1:
+        return np.nan, np.nan
+
+    NA = A.sum()
+    NA2 = NA**2
+    PA = _P(A)
+    QA = _Q(A)
+    Sri2 = (A.sum(axis=1)**2).sum()
+
+    d = (PA - QA)/(NA2 - Sri2)
+
+    S = _a_ij_Aij_Dij2(A) - (PA-QA)**2/NA
+
+    with np.errstate(divide='ignore'):
+        Z = (PA - QA)/(4*(S))**0.5
+
+    _, p = scipy.stats._stats_py._normtest_finish(Z, alternative)
+
+    return d, p
+
+
+SomersDResult = make_dataclass("SomersDResult",
+                               ("statistic", "pvalue", "table"))
+
+
+def somersd(x, y=None, alternative='two-sided'):
+    r"""Calculates Somers' D, an asymmetric measure of ordinal association.
+
+    Like Kendall's :math:`\tau`, Somers' :math:`D` is a measure of the
+    correspondence between two rankings. Both statistics consider the
+    difference between the number of concordant and discordant pairs in two
+    rankings :math:`X` and :math:`Y`, and both are normalized such that values
+    close  to 1 indicate strong agreement and values close to -1 indicate
+    strong disagreement. They differ in how they are normalized. To show the
+    relationship, Somers' :math:`D` can be defined in terms of Kendall's
+    :math:`\tau_a`:
+
+    .. math::
+        D(Y|X) = \frac{\tau_a(X, Y)}{\tau_a(X, X)}
+
+    Suppose the first ranking :math:`X` has :math:`r` distinct ranks and the
+    second ranking :math:`Y` has :math:`s` distinct ranks. These two lists of
+    :math:`n` rankings can also be viewed as an :math:`r \times s` contingency
+    table in which element :math:`i, j` is the number of rank pairs with rank
+    :math:`i` in ranking :math:`X` and rank :math:`j` in ranking :math:`Y`.
+    Accordingly, `somersd` also allows the input data to be supplied as a
+    single, 2D contingency table instead of as two separate, 1D rankings.
+
+    Note that the definition of Somers' :math:`D` is asymmetric: in general,
+    :math:`D(Y|X) \neq D(X|Y)`. ``somersd(x, y)`` calculates Somers'
+    :math:`D(Y|X)`: the "row" variable :math:`X` is treated as an independent
+    variable, and the "column" variable :math:`Y` is dependent. For Somers'
+    :math:`D(X|Y)`, swap the input lists or transpose the input table.
+
+    Parameters
+    ----------
+    x : array_like
+        1D array of rankings, treated as the (row) independent variable.
+        Alternatively, a 2D contingency table.
+    y : array_like, optional
+        If `x` is a 1D array of rankings, `y` is a 1D array of rankings of the
+        same length, treated as the (column) dependent variable.
+        If `x` is 2D, `y` is ignored.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+        * 'two-sided': the rank correlation is nonzero
+        * 'less': the rank correlation is negative (less than zero)
+        * 'greater':  the rank correlation is positive (greater than zero)
+
+    Returns
+    -------
+    res : SomersDResult
+        A `SomersDResult` object with the following fields:
+
+            statistic : float
+               The Somers' :math:`D` statistic.
+            pvalue : float
+               The p-value for a hypothesis test whose null
+               hypothesis is an absence of association, :math:`D=0`.
+               See notes for more information.
+            table : 2D array
+               The contingency table formed from rankings `x` and `y` (or the
+               provided contingency table, if `x` is a 2D array)
+
+    See Also
+    --------
+    kendalltau : Calculates Kendall's tau, another correlation measure.
+    weightedtau : Computes a weighted version of Kendall's tau.
+    spearmanr : Calculates a Spearman rank-order correlation coefficient.
+    pearsonr : Calculates a Pearson correlation coefficient.
+
+    Notes
+    -----
+    This function follows the contingency table approach of [2]_ and
+    [3]_. *p*-values are computed based on an asymptotic approximation of
+    the test statistic distribution under the null hypothesis :math:`D=0`.
+
+    Theoretically, hypothesis tests based on Kendall's :math:`tau` and Somers'
+    :math:`D` should be identical.
+    However, the *p*-values returned by `kendalltau` are based
+    on the null hypothesis of *independence* between :math:`X` and :math:`Y`
+    (i.e. the population from which pairs in :math:`X` and :math:`Y` are
+    sampled contains equal numbers of all possible pairs), which is more
+    specific than the null hypothesis :math:`D=0` used here. If the null
+    hypothesis of independence is desired, it is acceptable to use the
+    *p*-value returned by `kendalltau` with the statistic returned by
+    `somersd` and vice versa. For more information, see [2]_.
+
+    Contingency tables are formatted according to the convention used by
+    SAS and R: the first ranking supplied (``x``) is the "row" variable, and
+    the second ranking supplied (``y``) is the "column" variable. This is
+    opposite the convention of Somers' original paper [1]_.
+
+    References
+    ----------
+    .. [1] Robert H. Somers, "A New Asymmetric Measure of Association for
+           Ordinal Variables", *American Sociological Review*, Vol. 27, No. 6,
+           pp. 799--811, 1962.
+
+    .. [2] Morton B. Brown and Jacqueline K. Benedetti, "Sampling Behavior of
+           Tests for Correlation in Two-Way Contingency Tables", *Journal of
+           the American Statistical Association* Vol. 72, No. 358, pp.
+           309--315, 1977.
+
+    .. [3] SAS Institute, Inc., "The FREQ Procedure (Book Excerpt)",
+           *SAS/STAT 9.2 User's Guide, Second Edition*, SAS Publishing, 2009.
+
+    .. [4] Laerd Statistics, "Somers' d using SPSS Statistics", *SPSS
+           Statistics Tutorials and Statistical Guides*,
+           https://statistics.laerd.com/spss-tutorials/somers-d-using-spss-statistics.php,
+           Accessed July 31, 2020.
+
+    Examples
+    --------
+    We calculate Somers' D for the example given in [4]_, in which a hotel
+    chain owner seeks to determine the association between hotel room
+    cleanliness and customer satisfaction. The independent variable, hotel
+    room cleanliness, is ranked on an ordinal scale: "below average (1)",
+    "average (2)", or "above average (3)". The dependent variable, customer
+    satisfaction, is ranked on a second scale: "very dissatisfied (1)",
+    "moderately dissatisfied (2)", "neither dissatisfied nor satisfied (3)",
+    "moderately satisfied (4)", or "very satisfied (5)". 189 customers
+    respond to the survey, and the results are cast into a contingency table
+    with the hotel room cleanliness as the "row" variable and customer
+    satisfaction as the "column" variable.
+
+    +-----+-----+-----+-----+-----+-----+
+    |     | (1) | (2) | (3) | (4) | (5) |
+    +=====+=====+=====+=====+=====+=====+
+    | (1) | 27  | 25  | 14  | 7   | 0   |
+    +-----+-----+-----+-----+-----+-----+
+    | (2) | 7   | 14  | 18  | 35  | 12  |
+    +-----+-----+-----+-----+-----+-----+
+    | (3) | 1   | 3   | 2   | 7   | 17  |
+    +-----+-----+-----+-----+-----+-----+
+
+    For example, 27 customers assigned their room a cleanliness ranking of
+    "below average (1)" and a corresponding satisfaction of "very
+    dissatisfied (1)". We perform the analysis as follows.
+
+    >>> from scipy.stats import somersd
+    >>> table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
+    >>> res = somersd(table)
+    >>> res.statistic
+    0.6032766111513396
+    >>> res.pvalue
+    1.0007091191074533e-27
+
+    The value of the Somers' D statistic is approximately 0.6, indicating
+    a positive correlation between room cleanliness and customer satisfaction
+    in the sample.
+    The *p*-value is very small, indicating a very small probability of
+    observing such an extreme value of the statistic under the null
+    hypothesis that the statistic of the entire population (from which
+    our sample of 189 customers is drawn) is zero. This supports the
+    alternative hypothesis that the true value of Somers' D for the population
+    is nonzero.
+
+    """
+    x, y = np.array(x), np.array(y)
+    if x.ndim == 1:
+        if x.size != y.size:
+            raise ValueError("Rankings must be of equal length.")
+        table = scipy.stats.contingency.crosstab(x, y)[1]
+    elif x.ndim == 2:
+        if np.any(x < 0):
+            raise ValueError("All elements of the contingency table must be "
+                             "non-negative.")
+        if np.any(x != x.astype(int)):
+            raise ValueError("All elements of the contingency table must be "
+                             "integer.")
+        if x.nonzero()[0].size < 2:
+            raise ValueError("At least two elements of the contingency table "
+                             "must be nonzero.")
+        table = x
+    else:
+        raise ValueError("x must be either a 1D or 2D array")
+    d, p = _somers_d(table, alternative)
+
+    # add alias for consistency with other correlation functions
+    res = SomersDResult(d, p, table)
+    res.correlation = d
+    return res
+
+
+# This could be combined with `_all_partitions` in `_resampling.py`
+def _all_partitions(nx, ny):
+    """
+    Partition a set of indices into two fixed-length sets in all possible ways
+
+    Partition a set of indices 0 ... nx + ny - 1 into two sets of length nx and
+    ny in all possible ways (ignoring order of elements).
+    """
+    z = np.arange(nx+ny)
+    for c in combinations(z, nx):
+        x = np.array(c)
+        mask = np.ones(nx+ny, bool)
+        mask[x] = False
+        y = z[mask]
+        yield x, y
+
+
+def _compute_log_combinations(n):
+    """Compute all log combination of C(n, k)."""
+    gammaln_arr = gammaln(np.arange(n + 1) + 1)
+    return gammaln(n + 1) - gammaln_arr - gammaln_arr[::-1]
+
+
+BarnardExactResult = make_dataclass(
+    "BarnardExactResult", [("statistic", float), ("pvalue", float)]
+)
+
+
+def barnard_exact(table, alternative="two-sided", pooled=True, n=32):
+    r"""Perform a Barnard exact test on a 2x2 contingency table.
+
+    Parameters
+    ----------
+    table : array_like of ints
+        A 2x2 contingency table.  Elements should be non-negative integers.
+
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the null and alternative hypotheses. Default is 'two-sided'.
+        Please see explanations in the Notes section below.
+
+    pooled : bool, optional
+        Whether to compute score statistic with pooled variance (as in
+        Student's t-test, for example) or unpooled variance (as in Welch's
+        t-test). Default is ``True``.
+
+    n : int, optional
+        Number of sampling points used in the construction of the sampling
+        method. Note that this argument will automatically be converted to
+        the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
+        select sample points. Default is 32. Must be positive. In most cases,
+        32 points is enough to reach good precision. More points comes at
+        performance cost.
+
+    Returns
+    -------
+    ber : BarnardExactResult
+        A result object with the following attributes.
+
+        statistic : float
+            The Wald statistic with pooled or unpooled variance, depending
+            on the user choice of `pooled`.
+
+        pvalue : float
+            P-value, the probability of obtaining a distribution at least as
+            extreme as the one that was actually observed, assuming that the
+            null hypothesis is true.
+
+    See Also
+    --------
+    chi2_contingency : Chi-square test of independence of variables in a
+        contingency table.
+    fisher_exact : Fisher exact test on a 2x2 contingency table.
+    boschloo_exact : Boschloo's exact test on a 2x2 contingency table,
+        which is an uniformly more powerful alternative to Fisher's exact test.
+
+    Notes
+    -----
+    Barnard's test is an exact test used in the analysis of contingency
+    tables. It examines the association of two categorical variables, and
+    is a more powerful alternative than Fisher's exact test
+    for 2x2 contingency tables.
+
+    Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
+    where each column stores the binomial experiment, as in the example
+    below. Let's also define :math:`p_1, p_2` the theoretical binomial
+    probabilities for  :math:`x_{11}` and :math:`x_{12}`. When using
+    Barnard exact test, we can assert three different null hypotheses :
+
+    - :math:`H_0 : p_1 \geq p_2` versus :math:`H_1 : p_1 < p_2`,
+      with `alternative` = "less"
+
+    - :math:`H_0 : p_1 \leq p_2` versus :math:`H_1 : p_1 > p_2`,
+      with `alternative` = "greater"
+
+    - :math:`H_0 : p_1 = p_2` versus :math:`H_1 : p_1 \neq p_2`,
+      with `alternative` = "two-sided" (default one)
+
+    In order to compute Barnard's exact test, we are using the Wald
+    statistic [3]_ with pooled or unpooled variance.
+    Under the default assumption that both variances are equal
+    (``pooled = True``), the statistic is computed as:
+
+    .. math::
+
+        T(X) = \frac{
+            \hat{p}_1 - \hat{p}_2
+        }{
+            \sqrt{
+                \hat{p}(1 - \hat{p})
+                (\frac{1}{c_1} +
+                \frac{1}{c_2})
+            }
+        }
+
+    with :math:`\hat{p}_1, \hat{p}_2` and :math:`\hat{p}` the estimator of
+    :math:`p_1, p_2` and :math:`p`, the latter being the combined probability,
+    given the assumption that :math:`p_1 = p_2`.
+
+    If this assumption is invalid (``pooled = False``), the statistic is:
+
+    .. math::
+
+        T(X) = \frac{
+            \hat{p}_1 - \hat{p}_2
+        }{
+            \sqrt{
+                \frac{\hat{p}_1 (1 - \hat{p}_1)}{c_1} +
+                \frac{\hat{p}_2 (1 - \hat{p}_2)}{c_2}
+            }
+        }
+
+    The p-value is then computed as:
+
+    .. math::
+
+        \sum
+            \binom{c_1}{x_{11}}
+            \binom{c_2}{x_{12}}
+            \pi^{x_{11} + x_{12}}
+            (1 - \pi)^{t - x_{11} - x_{12}}
+
+    where the sum is over all  2x2 contingency tables :math:`X` such that:
+    * :math:`T(X) \leq T(X_0)` when `alternative` = "less",
+    * :math:`T(X) \geq T(X_0)` when `alternative` = "greater", or
+    * :math:`T(X) \geq |T(X_0)|` when `alternative` = "two-sided".
+    Above, :math:`c_1, c_2` are the sum of the columns 1 and 2,
+    and :math:`t` the total (sum of the 4 sample's element).
+
+    The returned p-value is the maximum p-value taken over the nuisance
+    parameter :math:`\pi`, where :math:`0 \leq \pi \leq 1`.
+
+    This function's complexity is :math:`O(n c_1 c_2)`, where `n` is the
+    number of sample points.
+
+    References
+    ----------
+    .. [1] Barnard, G. A. "Significance Tests for 2x2 Tables". *Biometrika*.
+           34.1/2 (1947): 123-138. :doi:`dpgkg3`
+
+    .. [2] Mehta, Cyrus R., and Pralay Senchaudhuri. "Conditional versus
+           unconditional exact tests for comparing two binomials."
+           *Cytel Software Corporation* 675 (2003): 1-5.
+
+    .. [3] "Wald Test". *Wikipedia*. https://en.wikipedia.org/wiki/Wald_test
+
+    Examples
+    --------
+    An example use of Barnard's test is presented in [2]_.
+
+        Consider the following example of a vaccine efficacy study
+        (Chan, 1998). In a randomized clinical trial of 30 subjects, 15 were
+        inoculated with a recombinant DNA influenza vaccine and the 15 were
+        inoculated with a placebo. Twelve of the 15 subjects in the placebo
+        group (80%) eventually became infected with influenza whereas for the
+        vaccine group, only 7 of the 15 subjects (47%) became infected. The
+        data are tabulated as a 2 x 2 table::
+
+                Vaccine  Placebo
+            Yes     7        12
+            No      8        3
+
+    When working with statistical hypothesis testing, we usually use a
+    threshold probability or significance level upon which we decide
+    to reject the null hypothesis :math:`H_0`. Suppose we choose the common
+    significance level of 5%.
+
+    Our alternative hypothesis is that the vaccine will lower the chance of
+    becoming infected with the virus; that is, the probability :math:`p_1` of
+    catching the virus with the vaccine will be *less than* the probability
+    :math:`p_2` of catching the virus without the vaccine.  Therefore, we call
+    `barnard_exact` with the ``alternative="less"`` option:
+
+    >>> import scipy.stats as stats
+    >>> res = stats.barnard_exact([[7, 12], [8, 3]], alternative="less")
+    >>> res.statistic
+    -1.894...
+    >>> res.pvalue
+    0.03407...
+
+    Under the null hypothesis that the vaccine will not lower the chance of
+    becoming infected, the probability of obtaining test results at least as
+    extreme as the observed data is approximately 3.4%. Since this p-value is
+    less than our chosen significance level, we have evidence to reject
+    :math:`H_0` in favor of the alternative.
+
+    Suppose we had used Fisher's exact test instead:
+
+    >>> _, pvalue = stats.fisher_exact([[7, 12], [8, 3]], alternative="less")
+    >>> pvalue
+    0.0640...
+
+    With the same threshold significance of 5%, we would not have been able
+    to reject the null hypothesis in favor of the alternative. As stated in
+    [2]_, Barnard's test is uniformly more powerful than Fisher's exact test
+    because Barnard's test does not condition on any margin. Fisher's test
+    should only be used when both sets of marginals are fixed.
+
+    """
+    if n <= 0:
+        raise ValueError(
+            "Number of points `n` must be strictly positive, "
+            f"found {n!r}"
+        )
+
+    table = np.asarray(table, dtype=np.int64)
+
+    if not table.shape == (2, 2):
+        raise ValueError("The input `table` must be of shape (2, 2).")
+
+    if np.any(table < 0):
+        raise ValueError("All values in `table` must be nonnegative.")
+
+    if 0 in table.sum(axis=0):
+        # If both values in column are zero, the p-value is 1 and
+        # the score's statistic is NaN.
+        return BarnardExactResult(np.nan, 1.0)
+
+    total_col_1, total_col_2 = table.sum(axis=0)
+
+    x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(-1, 1)
+    x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(1, -1)
+
+    # We need to calculate the wald statistics for each combination of x1 and
+    # x2.
+    p1, p2 = x1 / total_col_1, x2 / total_col_2
+
+    if pooled:
+        p = (x1 + x2) / (total_col_1 + total_col_2)
+        variances = p * (1 - p) * (1 / total_col_1 + 1 / total_col_2)
+    else:
+        variances = p1 * (1 - p1) / total_col_1 + p2 * (1 - p2) / total_col_2
+
+    # To avoid warning when dividing by 0
+    with np.errstate(divide="ignore", invalid="ignore"):
+        wald_statistic = np.divide((p1 - p2), np.sqrt(variances))
+
+    wald_statistic[p1 == p2] = 0  # Removing NaN values
+
+    wald_stat_obs = wald_statistic[table[0, 0], table[0, 1]]
+
+    if alternative == "two-sided":
+        index_arr = np.abs(wald_statistic) >= abs(wald_stat_obs)
+    elif alternative == "less":
+        index_arr = wald_statistic <= wald_stat_obs
+    elif alternative == "greater":
+        index_arr = wald_statistic >= wald_stat_obs
+    else:
+        msg = (
+            "`alternative` should be one of {'two-sided', 'less', 'greater'},"
+            f" found {alternative!r}"
+        )
+        raise ValueError(msg)
+
+    x1_sum_x2 = x1 + x2
+
+    x1_log_comb = _compute_log_combinations(total_col_1)
+    x2_log_comb = _compute_log_combinations(total_col_2)
+    x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
+
+    result = shgo(
+        _get_binomial_log_p_value_with_nuisance_param,
+        args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
+        bounds=((0, 1),),
+        n=n,
+        sampling_method="sobol",
+    )
+
+    # result.fun is the negative log pvalue and therefore needs to be
+    # changed before return
+    p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
+    return BarnardExactResult(wald_stat_obs, p_value)
+
+
+BoschlooExactResult = make_dataclass(
+    "BoschlooExactResult", [("statistic", float), ("pvalue", float)]
+)
+
+
+def boschloo_exact(table, alternative="two-sided", n=32):
+    r"""Perform Boschloo's exact test on a 2x2 contingency table.
+
+    Parameters
+    ----------
+    table : array_like of ints
+        A 2x2 contingency table.  Elements should be non-negative integers.
+
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the null and alternative hypotheses. Default is 'two-sided'.
+        Please see explanations in the Notes section below.
+
+    n : int, optional
+        Number of sampling points used in the construction of the sampling
+        method. Note that this argument will automatically be converted to
+        the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
+        select sample points. Default is 32. Must be positive. In most cases,
+        32 points is enough to reach good precision. More points comes at
+        performance cost.
+
+    Returns
+    -------
+    ber : BoschlooExactResult
+        A result object with the following attributes.
+
+        statistic : float
+            The statistic used in Boschloo's test; that is, the p-value
+            from Fisher's exact test.
+
+        pvalue : float
+            P-value, the probability of obtaining a distribution at least as
+            extreme as the one that was actually observed, assuming that the
+            null hypothesis is true.
+
+    See Also
+    --------
+    chi2_contingency : Chi-square test of independence of variables in a
+        contingency table.
+    fisher_exact : Fisher exact test on a 2x2 contingency table.
+    barnard_exact : Barnard's exact test, which is a more powerful alternative
+        than Fisher's exact test for 2x2 contingency tables.
+
+    Notes
+    -----
+    Boschloo's test is an exact test used in the analysis of contingency
+    tables. It examines the association of two categorical variables, and
+    is a uniformly more powerful alternative to Fisher's exact test
+    for 2x2 contingency tables.
+
+    Boschloo's exact test uses the p-value of Fisher's exact test as a
+    statistic, and Boschloo's p-value is the probability under the null
+    hypothesis of observing such an extreme value of this statistic.
+
+    Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
+    where each column stores the binomial experiment, as in the example
+    below. Let's also define :math:`p_1, p_2` the theoretical binomial
+    probabilities for  :math:`x_{11}` and :math:`x_{12}`. When using
+    Boschloo exact test, we can assert three different alternative hypotheses:
+
+    - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
+      with `alternative` = "less"
+
+    - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
+      with `alternative` = "greater"
+
+    - :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
+      with `alternative` = "two-sided" (default)
+
+    There are multiple conventions for computing a two-sided p-value when the
+    null distribution is asymmetric. Here, we apply the convention that the
+    p-value of a two-sided test is twice the minimum of the p-values of the
+    one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a
+    different convention, so for a given `table`, the statistic reported by
+    `boschloo_exact` may differ from the p-value reported by `fisher_exact`
+    when ``alternative='two-sided'``.
+
+    .. versionadded:: 1.7.0
+
+    References
+    ----------
+    .. [1] R.D. Boschloo. "Raised conditional level of significance for the
+       2 x 2-table when testing the equality of two probabilities",
+       Statistica Neerlandica, 24(1), 1970
+
+    .. [2] "Boschloo's test", Wikipedia,
+       https://en.wikipedia.org/wiki/Boschloo%27s_test
+
+    .. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
+       Human Resource Management, 43(4), 395-407, 2004,
+       :doi:`10.1002/hrm.20032`.
+
+    Examples
+    --------
+    In the following example, we consider the article "Employee
+    attitudes and job satisfaction" [3]_
+    which reports the results of a survey from 63 scientists and 117 college
+    professors. Of the 63 scientists, 31 said they were very satisfied with
+    their jobs, whereas 74 of the college professors were very satisfied
+    with their work. Is this significant evidence that college
+    professors are happier with their work than scientists?
+    The following table summarizes the data mentioned above::
+
+                         college professors   scientists
+        Very Satisfied   74                     31
+        Dissatisfied     43                     32
+
+    When working with statistical hypothesis testing, we usually use a
+    threshold probability or significance level upon which we decide
+    to reject the null hypothesis :math:`H_0`. Suppose we choose the common
+    significance level of 5%.
+
+    Our alternative hypothesis is that college professors are truly more
+    satisfied with their work than scientists. Therefore, we expect
+    :math:`p_1` the proportion of very satisfied college professors to be
+    greater than :math:`p_2`, the proportion of very satisfied scientists.
+    We thus call `boschloo_exact` with the ``alternative="greater"`` option:
+
+    >>> import scipy.stats as stats
+    >>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
+    >>> res.statistic
+    0.0483...
+    >>> res.pvalue
+    0.0355...
+
+    Under the null hypothesis that scientists are happier in their work than
+    college professors, the probability of obtaining test
+    results at least as extreme as the observed data is approximately 3.55%.
+    Since this p-value is less than our chosen significance level, we have
+    evidence to reject :math:`H_0` in favor of the alternative hypothesis.
+
+    """
+    hypergeom = distributions.hypergeom
+
+    if n <= 0:
+        raise ValueError(
+            "Number of points `n` must be strictly positive,"
+            f" found {n!r}"
+        )
+
+    table = np.asarray(table, dtype=np.int64)
+
+    if not table.shape == (2, 2):
+        raise ValueError("The input `table` must be of shape (2, 2).")
+
+    if np.any(table < 0):
+        raise ValueError("All values in `table` must be nonnegative.")
+
+    if 0 in table.sum(axis=0):
+        # If both values in column are zero, the p-value is 1 and
+        # the score's statistic is NaN.
+        return BoschlooExactResult(np.nan, np.nan)
+
+    total_col_1, total_col_2 = table.sum(axis=0)
+    total = total_col_1 + total_col_2
+    x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
+    x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
+    x1_sum_x2 = x1 + x2
+
+    if alternative == 'less':
+        pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
+    elif alternative == 'greater':
+        # Same formula as the 'less' case, but with the second column.
+        pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
+    elif alternative == 'two-sided':
+        boschloo_less = boschloo_exact(table, alternative="less", n=n)
+        boschloo_greater = boschloo_exact(table, alternative="greater", n=n)
+
+        res = (
+            boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue
+            else boschloo_greater
+        )
+
+        # Two-sided p-value is defined as twice the minimum of the one-sided
+        # p-values
+        pvalue = np.clip(2 * res.pvalue, a_min=0, a_max=1)
+        return BoschlooExactResult(res.statistic, pvalue)
+    else:
+        msg = (
+            f"`alternative` should be one of {'two-sided', 'less', 'greater'},"
+            f" found {alternative!r}"
+        )
+        raise ValueError(msg)
+
+    fisher_stat = pvalues[table[0, 0], table[0, 1]]
+
+    # fisher_stat * (1+1e-13) guards us from small numerical error. It is
+    # equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0
+    # For more throughout explanations, see gh-14178
+    index_arr = pvalues <= fisher_stat * (1+1e-13)
+
+    x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
+    x1_log_comb = _compute_log_combinations(total_col_1)
+    x2_log_comb = _compute_log_combinations(total_col_2)
+    x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
+
+    result = shgo(
+        _get_binomial_log_p_value_with_nuisance_param,
+        args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
+        bounds=((0, 1),),
+        n=n,
+        sampling_method="sobol",
+    )
+
+    # result.fun is the negative log pvalue and therefore needs to be
+    # changed before return
+    p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
+    return BoschlooExactResult(fisher_stat, p_value)
+
+
+def _get_binomial_log_p_value_with_nuisance_param(
+    nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr
+):
+    r"""
+    Compute the log pvalue in respect of a nuisance parameter considering
+    a 2x2 sample space.
+
+    Parameters
+    ----------
+    nuisance_param : float
+        nuisance parameter used in the computation of the maximisation of
+        the p-value. Must be between 0 and 1
+
+    x1_sum_x2 : ndarray
+        Sum of x1 and x2 inside barnard_exact
+
+    x1_sum_x2_log_comb : ndarray
+        sum of the log combination of x1 and x2
+
+    index_arr : ndarray of boolean
+
+    Returns
+    -------
+    p_value : float
+        Return the maximum p-value considering every nuisance paramater
+        between 0 and 1
+
+    Notes
+    -----
+
+    Both Barnard's test and Boschloo's test iterate over a nuisance parameter
+    :math:`\pi \in [0, 1]` to find the maximum p-value. To search this
+    maxima, this function return the negative log pvalue with respect to the
+    nuisance parameter passed in params. This negative log p-value is then
+    used in `shgo` to find the minimum negative pvalue which is our maximum
+    pvalue.
+
+    Also, to compute the different combination used in the
+    p-values' computation formula, this function uses `gammaln` which is
+    more tolerant for large value than `scipy.special.comb`. `gammaln` gives
+    a log combination. For the little precision loss, performances are
+    improved a lot.
+    """
+    t1, t2 = x1_sum_x2.shape
+    n = t1 + t2 - 2
+    with np.errstate(divide="ignore", invalid="ignore"):
+        log_nuisance = np.log(
+            nuisance_param,
+            out=np.zeros_like(nuisance_param),
+            where=nuisance_param >= 0,
+        )
+        log_1_minus_nuisance = np.log(
+            1 - nuisance_param,
+            out=np.zeros_like(nuisance_param),
+            where=1 - nuisance_param >= 0,
+        )
+
+        nuisance_power_x1_x2 = log_nuisance * x1_sum_x2
+        nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0
+
+        nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2)
+        nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0
+
+        tmp_log_values_arr = (
+            x1_sum_x2_log_comb
+            + nuisance_power_x1_x2
+            + nuisance_power_n_minus_x1_x2
+        )
+
+    tmp_values_from_index = tmp_log_values_arr[index_arr]
+
+    # To avoid dividing by zero in log function and getting inf value,
+    # values are centered according to the max
+    max_value = tmp_values_from_index.max()
+
+    # To have better result's precision, the log pvalue is taken here.
+    # Indeed, pvalue is included inside [0, 1] interval. Passing the
+    # pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus
+    # help us to achieve better precision
+    with np.errstate(divide="ignore", invalid="ignore"):
+        log_probs = np.exp(tmp_values_from_index - max_value).sum()
+        log_pvalue = max_value + np.log(
+            log_probs,
+            out=np.full_like(log_probs, -np.inf),
+            where=log_probs > 0,
+        )
+
+    # Since shgo find the minima, minus log pvalue is returned
+    return -log_pvalue
+
+
+def _pval_cvm_2samp_exact(s, m, n):
+    """
+    Compute the exact p-value of the Cramer-von Mises two-sample test
+    for a given value s of the test statistic.
+    m and n are the sizes of the samples.
+
+    [1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for
+        the Cramér-Von Mises Two-Sample Test", J. Stat. Soft.,
+        vol. 17, no. 8, pp. 1-15, Dec. 2006.
+    [2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises
+        Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist.
+        33(3), 1148-1159, (September, 1962)
+    """
+
+    # [1, p. 3]
+    lcm = np.lcm(m, n)
+    # [1, p. 4], below eq. 3
+    a = lcm // m
+    b = lcm // n
+    # Combine Eq. 9 in [2] with Eq. 2 in [1] and solve for $\zeta$
+    # Hint: `s` is $U$ in [2], and $T_2$ in [1] is $T$ in [2]
+    mn = m * n
+    zeta = lcm ** 2 * (m + n) * (6 * s - mn * (4 * mn - 1)) // (6 * mn ** 2)
+
+    # bound maximum value that may appear in `gs` (remember both rows!)
+    zeta_bound = lcm**2 * (m + n)  # bound elements in row 1
+    combinations = comb(m + n, m)  # sum of row 2
+    max_gs = max(zeta_bound, combinations)
+    dtype = np.min_scalar_type(max_gs)
+
+    # the frequency table of $g_{u, v}^+$ defined in [1, p. 6]
+    gs = ([np.array([[0], [1]], dtype=dtype)]
+          + [np.empty((2, 0), dtype=dtype) for _ in range(m)])
+    for u in range(n + 1):
+        next_gs = []
+        tmp = np.empty((2, 0), dtype=dtype)
+        for v, g in enumerate(gs):
+            # Calculate g recursively with eq. 11 in [1]. Even though it
+            # doesn't look like it, this also does 12/13 (all of Algorithm 1).
+            vi, i0, i1 = np.intersect1d(tmp[0], g[0], return_indices=True)
+            tmp = np.concatenate([
+                np.stack([vi, tmp[1, i0] + g[1, i1]]),
+                np.delete(tmp, i0, 1),
+                np.delete(g, i1, 1)
+            ], 1)
+            tmp[0] += (a * v - b * u) ** 2
+            next_gs.append(tmp)
+        gs = next_gs
+    value, freq = gs[m]
+    return np.float64(np.sum(freq[value >= zeta]) / combinations)
+
+
+def cramervonmises_2samp(x, y, method='auto'):
+    """Perform the two-sample Cramér-von Mises test for goodness of fit.
+
+    This is the two-sample version of the Cramér-von Mises test ([1]_):
+    for two independent samples :math:`X_1, ..., X_n` and
+    :math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
+    come from the same (unspecified) continuous distribution.
+
+    Parameters
+    ----------
+    x : array_like
+        A 1-D array of observed values of the random variables :math:`X_i`.
+    y : array_like
+        A 1-D array of observed values of the random variables :math:`Y_i`.
+    method : {'auto', 'asymptotic', 'exact'}, optional
+        The method used to compute the p-value, see Notes for details.
+        The default is 'auto'.
+
+    Returns
+    -------
+    res : object with attributes
+        statistic : float
+            Cramér-von Mises statistic.
+        pvalue : float
+            The p-value.
+
+    See Also
+    --------
+    cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp
+
+    Notes
+    -----
+    .. versionadded:: 1.7.0
+
+    The statistic is computed according to equation 9 in [2]_. The
+    calculation of the p-value depends on the keyword `method`:
+
+    - ``asymptotic``: The p-value is approximated by using the limiting
+      distribution of the test statistic.
+    - ``exact``: The exact p-value is computed by enumerating all
+      possible combinations of the test statistic, see [2]_.
+
+    If ``method='auto'``, the exact approach is used
+    if both samples contain equal to or less than 20 observations,
+    otherwise the asymptotic distribution is used.
+
+    If the underlying distribution is not continuous, the p-value is likely to
+    be conservative (Section 6.2 in [3]_). When ranking the data to compute
+    the test statistic, midranks are used if there are ties.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
+    .. [2] Anderson, T.W. (1962). On the distribution of the two-sample
+           Cramer-von-Mises criterion. The Annals of Mathematical
+           Statistics, pp. 1148-1159.
+    .. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.
+
+    Examples
+    --------
+
+    Suppose we wish to test whether two samples generated by
+    ``scipy.stats.norm.rvs`` have the same distribution. We choose a
+    significance level of alpha=0.05.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> x = stats.norm.rvs(size=100, random_state=rng)
+    >>> y = stats.norm.rvs(size=70, random_state=rng)
+    >>> res = stats.cramervonmises_2samp(x, y)
+    >>> res.statistic, res.pvalue
+    (0.29376470588235293, 0.1412873014573014)
+
+    The p-value exceeds our chosen significance level, so we do not
+    reject the null hypothesis that the observed samples are drawn from the
+    same distribution.
+
+    For small sample sizes, one can compute the exact p-values:
+
+    >>> x = stats.norm.rvs(size=7, random_state=rng)
+    >>> y = stats.t.rvs(df=2, size=6, random_state=rng)
+    >>> res = stats.cramervonmises_2samp(x, y, method='exact')
+    >>> res.statistic, res.pvalue
+    (0.197802197802198, 0.31643356643356646)
+
+    The p-value based on the asymptotic distribution is a good approximation
+    even though the sample size is small.
+
+    >>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
+    >>> res.statistic, res.pvalue
+    (0.197802197802198, 0.2966041181527128)
+
+    Independent of the method, one would not reject the null hypothesis at the
+    chosen significance level in this example.
+
+    """
+    xa = np.sort(np.asarray(x))
+    ya = np.sort(np.asarray(y))
+
+    if xa.size <= 1 or ya.size <= 1:
+        raise ValueError('x and y must contain at least two observations.')
+    if xa.ndim > 1 or ya.ndim > 1:
+        raise ValueError('The samples must be one-dimensional.')
+    if method not in ['auto', 'exact', 'asymptotic']:
+        raise ValueError('method must be either auto, exact or asymptotic.')
+
+    nx = len(xa)
+    ny = len(ya)
+
+    if method == 'auto':
+        if max(nx, ny) > 20:
+            method = 'asymptotic'
+        else:
+            method = 'exact'
+
+    # get ranks of x and y in the pooled sample
+    z = np.concatenate([xa, ya])
+    # in case of ties, use midrank (see [1])
+    r = scipy.stats.rankdata(z, method='average')
+    rx = r[:nx]
+    ry = r[nx:]
+
+    # compute U (eq. 10 in [2])
+    u = nx * np.sum((rx - np.arange(1, nx+1))**2)
+    u += ny * np.sum((ry - np.arange(1, ny+1))**2)
+
+    # compute T (eq. 9 in [2])
+    k, N = nx*ny, nx + ny
+    t = u / (k*N) - (4*k - 1)/(6*N)
+
+    if method == 'exact':
+        p = _pval_cvm_2samp_exact(u, nx, ny)
+    else:
+        # compute expected value and variance of T (eq. 11 and 14 in [2])
+        et = (1 + 1/N)/6
+        vt = (N+1) * (4*k*N - 3*(nx**2 + ny**2) - 2*k)
+        vt = vt / (45 * N**2 * 4 * k)
+
+        # computed the normalized statistic (eq. 15 in [2])
+        tn = 1/6 + (t - et) / np.sqrt(45 * vt)
+
+        # approximate distribution of tn with limiting distribution
+        # of the one-sample test statistic
+        # if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly
+        if tn < 0.003:
+            p = 1.0
+        else:
+            p = max(0, 1. - _cdf_cvm_inf(tn))
+
+    return CramerVonMisesResult(statistic=t, pvalue=p)
+
+
+class TukeyHSDResult:
+    """Result of `scipy.stats.tukey_hsd`.
+
+    Attributes
+    ----------
+    statistic : float ndarray
+        The computed statistic of the test for each comparison. The element
+        at index ``(i, j)`` is the statistic for the comparison between groups
+        ``i`` and ``j``.
+    pvalue : float ndarray
+        The associated p-value from the studentized range distribution. The
+        element at index ``(i, j)`` is the p-value for the comparison
+        between groups ``i`` and ``j``.
+
+    Notes
+    -----
+    The string representation of this object displays the most recently
+    calculated confidence interval, and if none have been previously
+    calculated, it will evaluate ``confidence_interval()``.
+
+    References
+    ----------
+    .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
+           Method."
+           https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
+           28 November 2020.
+    """
+
+    def __init__(self, statistic, pvalue, _nobs, _ntreatments, _stand_err):
+        self.statistic = statistic
+        self.pvalue = pvalue
+        self._ntreatments = _ntreatments
+        self._nobs = _nobs
+        self._stand_err = _stand_err
+        self._ci = None
+        self._ci_cl = None
+
+    def __str__(self):
+        # Note: `__str__` prints the confidence intervals from the most
+        # recent call to `confidence_interval`. If it has not been called,
+        # it will be called with the default CL of .95.
+        if self._ci is None:
+            self.confidence_interval(confidence_level=.95)
+        s = ("Tukey's HSD Pairwise Group Comparisons"
+             f" ({self._ci_cl*100:.1f}% Confidence Interval)\n")
+        s += "Comparison  Statistic  p-value  Lower CI  Upper CI\n"
+        for i in range(self.pvalue.shape[0]):
+            for j in range(self.pvalue.shape[0]):
+                if i != j:
+                    s += (f" ({i} - {j}) {self.statistic[i, j]:>10.3f}"
+                          f"{self.pvalue[i, j]:>10.3f}"
+                          f"{self._ci.low[i, j]:>10.3f}"
+                          f"{self._ci.high[i, j]:>10.3f}\n")
+        return s
+
+    def confidence_interval(self, confidence_level=.95):
+        """Compute the confidence interval for the specified confidence level.
+
+        Parameters
+        ----------
+        confidence_level : float, optional
+            Confidence level for the computed confidence interval
+            of the estimated proportion. Default is .95.
+
+        Returns
+        -------
+        ci : ``ConfidenceInterval`` object
+            The object has attributes ``low`` and ``high`` that hold the
+            lower and upper bounds of the confidence intervals for each
+            comparison. The high and low values are accessible for each
+            comparison at index ``(i, j)`` between groups ``i`` and ``j``.
+
+        References
+        ----------
+        .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1.
+               Tukey's Method."
+               https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
+               28 November 2020.
+
+        Examples
+        --------
+        >>> from scipy.stats import tukey_hsd
+        >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
+        >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
+        >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
+        >>> result = tukey_hsd(group0, group1, group2)
+        >>> ci = result.confidence_interval()
+        >>> ci.low
+        array([[-3.649159, -8.249159, -3.909159],
+               [ 0.950841, -3.649159,  0.690841],
+               [-3.389159, -7.989159, -3.649159]])
+        >>> ci.high
+        array([[ 3.649159, -0.950841,  3.389159],
+               [ 8.249159,  3.649159,  7.989159],
+               [ 3.909159, -0.690841,  3.649159]])
+        """
+        # check to see if the supplied confidence level matches that of the
+        # previously computed CI.
+        if (self._ci is not None and self._ci_cl is not None and
+                confidence_level == self._ci_cl):
+            return self._ci
+
+        if not 0 < confidence_level < 1:
+            raise ValueError("Confidence level must be between 0 and 1.")
+        # determine the critical value of the studentized range using the
+        # appropriate confidence level, number of treatments, and degrees
+        # of freedom as determined by the number of data less the number of
+        # treatments. ("Confidence limits for Tukey's method")[1]. Note that
+        # in the cases of unequal sample sizes there will be a criterion for
+        # each group comparison.
+        params = (confidence_level, self._nobs, self._ntreatments - self._nobs)
+        srd = distributions.studentized_range.ppf(*params)
+        # also called maximum critical value, the Tukey criterion is the
+        # studentized range critical value * the square root of mean square
+        # error over the sample size.
+        tukey_criterion = srd * self._stand_err
+        # the confidence levels are determined by the
+        # `mean_differences` +- `tukey_criterion`
+        upper_conf = self.statistic + tukey_criterion
+        lower_conf = self.statistic - tukey_criterion
+        self._ci = ConfidenceInterval(low=lower_conf, high=upper_conf)
+        self._ci_cl = confidence_level
+        return self._ci
+
+
+def _tukey_hsd_iv(args):
+    if (len(args)) < 2:
+        raise ValueError("There must be more than 1 treatment.")
+    args = [np.asarray(arg) for arg in args]
+    for arg in args:
+        if arg.ndim != 1:
+            raise ValueError("Input samples must be one-dimensional.")
+        if arg.size <= 1:
+            raise ValueError("Input sample size must be greater than one.")
+        if np.isinf(arg).any():
+            raise ValueError("Input samples must be finite.")
+    return args
+
+
+def tukey_hsd(*args):
+    """Perform Tukey's HSD test for equality of means over multiple treatments.
+
+    Tukey's honestly significant difference (HSD) test performs pairwise
+    comparison of means for a set of samples. Whereas ANOVA (e.g. `f_oneway`)
+    assesses whether the true means underlying each sample are identical,
+    Tukey's HSD is a post hoc test used to compare the mean of each sample
+    to the mean of each other sample.
+
+    The null hypothesis is that the distributions underlying the samples all
+    have the same mean. The test statistic, which is computed for every
+    possible pairing of samples, is simply the difference between the sample
+    means. For each pair, the p-value is the probability under the null
+    hypothesis (and other assumptions; see notes) of observing such an extreme
+    value of the statistic, considering that many pairwise comparisons are
+    being performed. Confidence intervals for the difference between each pair
+    of means are also available.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        The sample measurements for each group. There must be at least
+        two arguments.
+
+    Returns
+    -------
+    result : `~scipy.stats._result_classes.TukeyHSDResult` instance
+        The return value is an object with the following attributes:
+
+        statistic : float ndarray
+            The computed statistic of the test for each comparison. The element
+            at index ``(i, j)`` is the statistic for the comparison between
+            groups ``i`` and ``j``.
+        pvalue : float ndarray
+            The computed p-value of the test for each comparison. The element
+            at index ``(i, j)`` is the p-value for the comparison between
+            groups ``i`` and ``j``.
+
+        The object has the following methods:
+
+        confidence_interval(confidence_level=0.95):
+            Compute the confidence interval for the specified confidence level.
+
+    Notes
+    -----
+    The use of this test relies on several assumptions.
+
+    1. The observations are independent within and among groups.
+    2. The observations within each group are normally distributed.
+    3. The distributions from which the samples are drawn have the same finite
+       variance.
+
+    The original formulation of the test was for samples of equal size [6]_.
+    In case of unequal sample sizes, the test uses the Tukey-Kramer method
+    [4]_.
+
+    References
+    ----------
+    .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.7.1. Tukey's
+           Method."
+           https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm,
+           28 November 2020.
+    .. [2] Abdi, Herve & Williams, Lynne. (2021). "Tukey's Honestly Significant
+           Difference (HSD) Test."
+           https://personal.utdallas.edu/~herve/abdi-HSD2010-pretty.pdf
+    .. [3] "One-Way ANOVA Using SAS PROC ANOVA & PROC GLM." SAS
+           Tutorials, 2007, www.stattutorials.com/SAS/TUTORIAL-PROC-GLM.htm.
+    .. [4] Kramer, Clyde Young. "Extension of Multiple Range Tests to Group
+           Means with Unequal Numbers of Replications." Biometrics, vol. 12,
+           no. 3, 1956, pp. 307-310. JSTOR, www.jstor.org/stable/3001469.
+           Accessed 25 May 2021.
+    .. [5] NIST/SEMATECH e-Handbook of Statistical Methods, "7.4.3.3.
+           The ANOVA table and tests of hypotheses about means"
+           https://www.itl.nist.gov/div898/handbook/prc/section4/prc433.htm,
+           2 June 2021.
+    .. [6] Tukey, John W. "Comparing Individual Means in the Analysis of
+           Variance." Biometrics, vol. 5, no. 2, 1949, pp. 99-114. JSTOR,
+           www.jstor.org/stable/3001913. Accessed 14 June 2021.
+
+
+    Examples
+    --------
+    Here are some data comparing the time to relief of three brands of
+    headache medicine, reported in minutes. Data adapted from [3]_.
+
+    >>> import numpy as np
+    >>> from scipy.stats import tukey_hsd
+    >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
+    >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
+    >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
+
+    We would like to see if the means between any of the groups are
+    significantly different. First, visually examine a box and whisker plot.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots(1, 1)
+    >>> ax.boxplot([group0, group1, group2])
+    >>> ax.set_xticklabels(["group0", "group1", "group2"]) # doctest: +SKIP
+    >>> ax.set_ylabel("mean") # doctest: +SKIP
+    >>> plt.show()
+
+    From the box and whisker plot, we can see overlap in the interquartile
+    ranges group 1 to group 2 and group 3, but we can apply the ``tukey_hsd``
+    test to determine if the difference between means is significant. We
+    set a significance level of .05 to reject the null hypothesis.
+
+    >>> res = tukey_hsd(group0, group1, group2)
+    >>> print(res)
+    Tukey's HSD Pairwise Group Comparisons (95.0% Confidence Interval)
+    Comparison  Statistic  p-value   Lower CI   Upper CI
+    (0 - 1)     -4.600      0.014     -8.249     -0.951
+    (0 - 2)     -0.260      0.980     -3.909      3.389
+    (1 - 0)      4.600      0.014      0.951      8.249
+    (1 - 2)      4.340      0.020      0.691      7.989
+    (2 - 0)      0.260      0.980     -3.389      3.909
+    (2 - 1)     -4.340      0.020     -7.989     -0.691
+
+    The null hypothesis is that each group has the same mean. The p-value for
+    comparisons between ``group0`` and ``group1`` as well as ``group1`` and
+    ``group2`` do not exceed .05, so we reject the null hypothesis that they
+    have the same means. The p-value of the comparison between ``group0``
+    and ``group2`` exceeds .05, so we accept the null hypothesis that there
+    is not a significant difference between their means.
+
+    We can also compute the confidence interval associated with our chosen
+    confidence level.
+
+    >>> group0 = [24.5, 23.5, 26.4, 27.1, 29.9]
+    >>> group1 = [28.4, 34.2, 29.5, 32.2, 30.1]
+    >>> group2 = [26.1, 28.3, 24.3, 26.2, 27.8]
+    >>> result = tukey_hsd(group0, group1, group2)
+    >>> conf = res.confidence_interval(confidence_level=.99)
+    >>> for ((i, j), l) in np.ndenumerate(conf.low):
+    ...     # filter out self comparisons
+    ...     if i != j:
+    ...         h = conf.high[i,j]
+    ...         print(f"({i} - {j}) {l:>6.3f} {h:>6.3f}")
+    (0 - 1) -9.480  0.280
+    (0 - 2) -5.140  4.620
+    (1 - 0) -0.280  9.480
+    (1 - 2) -0.540  9.220
+    (2 - 0) -4.620  5.140
+    (2 - 1) -9.220  0.540
+    """
+    args = _tukey_hsd_iv(args)
+    ntreatments = len(args)
+    means = np.asarray([np.mean(arg) for arg in args])
+    nsamples_treatments = np.asarray([a.size for a in args])
+    nobs = np.sum(nsamples_treatments)
+
+    # determine mean square error [5]. Note that this is sometimes called
+    # mean square error within.
+    mse = (np.sum([np.var(arg, ddof=1) for arg in args] *
+                  (nsamples_treatments - 1)) / (nobs - ntreatments))
+
+    # The calculation of the standard error differs when treatments differ in
+    # size. See ("Unequal sample sizes")[1].
+    if np.unique(nsamples_treatments).size == 1:
+        # all input groups are the same length, so only one value needs to be
+        # calculated [1].
+        normalize = 2 / nsamples_treatments[0]
+    else:
+        # to compare groups of differing sizes, we must compute a variance
+        # value for each individual comparison. Use broadcasting to get the
+        # resulting matrix. [3], verified against [4] (page 308).
+        normalize = 1 / nsamples_treatments + 1 / nsamples_treatments[None].T
+
+    # the standard error is used in the computation of the tukey criterion and
+    # finding the p-values.
+    stand_err = np.sqrt(normalize * mse / 2)
+
+    # the mean difference is the test statistic.
+    mean_differences = means[None].T - means
+
+    # Calculate the t-statistic to use within the survival function of the
+    # studentized range to get the p-value.
+    t_stat = np.abs(mean_differences) / stand_err
+
+    params = t_stat, ntreatments, nobs - ntreatments
+    pvalues = distributions.studentized_range.sf(*params)
+
+    return TukeyHSDResult(mean_differences, pvalues, ntreatments,
+                          nobs, stand_err)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_kde.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_kde.py
new file mode 100644
index 00000000..4da9180c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_kde.py
@@ -0,0 +1,725 @@
+#-------------------------------------------------------------------------------
+#
+#  Define classes for (uni/multi)-variate kernel density estimation.
+#
+#  Currently, only Gaussian kernels are implemented.
+#
+#  Written by: Robert Kern
+#
+#  Date: 2004-08-09
+#
+#  Modified: 2005-02-10 by Robert Kern.
+#              Contributed to SciPy
+#            2005-10-07 by Robert Kern.
+#              Some fixes to match the new scipy_core
+#
+#  Copyright 2004-2005 by Enthought, Inc.
+#
+#-------------------------------------------------------------------------------
+
+# Standard library imports.
+import warnings
+
+# SciPy imports.
+from scipy import linalg, special
+from scipy._lib._util import check_random_state
+
+from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, exp, pi,
+                   sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
+                   ones, cov)
+import numpy as np
+
+# Local imports.
+from . import _mvn
+from ._stats import gaussian_kernel_estimate, gaussian_kernel_estimate_log
+
+
+__all__ = ['gaussian_kde']
+
+
+class gaussian_kde:
+    """Representation of a kernel-density estimate using Gaussian kernels.
+
+    Kernel density estimation is a way to estimate the probability density
+    function (PDF) of a random variable in a non-parametric way.
+    `gaussian_kde` works for both uni-variate and multi-variate data.   It
+    includes automatic bandwidth determination.  The estimation works best for
+    a unimodal distribution; bimodal or multi-modal distributions tend to be
+    oversmoothed.
+
+    Parameters
+    ----------
+    dataset : array_like
+        Datapoints to estimate from. In case of univariate data this is a 1-D
+        array, otherwise a 2-D array with shape (# of dims, # of data).
+    bw_method : str, scalar or callable, optional
+        The method used to calculate the estimator bandwidth.  This can be
+        'scott', 'silverman', a scalar constant or a callable.  If a scalar,
+        this will be used directly as `kde.factor`.  If a callable, it should
+        take a `gaussian_kde` instance as only parameter and return a scalar.
+        If None (default), 'scott' is used.  See Notes for more details.
+    weights : array_like, optional
+        weights of datapoints. This must be the same shape as dataset.
+        If None (default), the samples are assumed to be equally weighted
+
+    Attributes
+    ----------
+    dataset : ndarray
+        The dataset with which `gaussian_kde` was initialized.
+    d : int
+        Number of dimensions.
+    n : int
+        Number of datapoints.
+    neff : int
+        Effective number of datapoints.
+
+        .. versionadded:: 1.2.0
+    factor : float
+        The bandwidth factor, obtained from `kde.covariance_factor`. The square
+        of `kde.factor` multiplies the covariance matrix of the data in the kde
+        estimation.
+    covariance : ndarray
+        The covariance matrix of `dataset`, scaled by the calculated bandwidth
+        (`kde.factor`).
+    inv_cov : ndarray
+        The inverse of `covariance`.
+
+    Methods
+    -------
+    evaluate
+    __call__
+    integrate_gaussian
+    integrate_box_1d
+    integrate_box
+    integrate_kde
+    pdf
+    logpdf
+    resample
+    set_bandwidth
+    covariance_factor
+
+    Notes
+    -----
+    Bandwidth selection strongly influences the estimate obtained from the KDE
+    (much more so than the actual shape of the kernel).  Bandwidth selection
+    can be done by a "rule of thumb", by cross-validation, by "plug-in
+    methods" or by other means; see [3]_, [4]_ for reviews.  `gaussian_kde`
+    uses a rule of thumb, the default is Scott's Rule.
+
+    Scott's Rule [1]_, implemented as `scotts_factor`, is::
+
+        n**(-1./(d+4)),
+
+    with ``n`` the number of data points and ``d`` the number of dimensions.
+    In the case of unequally weighted points, `scotts_factor` becomes::
+
+        neff**(-1./(d+4)),
+
+    with ``neff`` the effective number of datapoints.
+    Silverman's Rule [2]_, implemented as `silverman_factor`, is::
+
+        (n * (d + 2) / 4.)**(-1. / (d + 4)).
+
+    or in the case of unequally weighted points::
+
+        (neff * (d + 2) / 4.)**(-1. / (d + 4)).
+
+    Good general descriptions of kernel density estimation can be found in [1]_
+    and [2]_, the mathematics for this multi-dimensional implementation can be
+    found in [1]_.
+
+    With a set of weighted samples, the effective number of datapoints ``neff``
+    is defined by::
+
+        neff = sum(weights)^2 / sum(weights^2)
+
+    as detailed in [5]_.
+
+    `gaussian_kde` does not currently support data that lies in a
+    lower-dimensional subspace of the space in which it is expressed. For such
+    data, consider performing principle component analysis / dimensionality
+    reduction and using `gaussian_kde` with the transformed data.
+
+    References
+    ----------
+    .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
+           Visualization", John Wiley & Sons, New York, Chicester, 1992.
+    .. [2] B.W. Silverman, "Density Estimation for Statistics and Data
+           Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
+           Chapman and Hall, London, 1986.
+    .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
+           Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
+    .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
+           conditional density estimation", Computational Statistics & Data
+           Analysis, Vol. 36, pp. 279-298, 2001.
+    .. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
+           Series A (General), 132, 272
+
+    Examples
+    --------
+    Generate some random two-dimensional data:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> def measure(n):
+    ...     "Measurement model, return two coupled measurements."
+    ...     m1 = np.random.normal(size=n)
+    ...     m2 = np.random.normal(scale=0.5, size=n)
+    ...     return m1+m2, m1-m2
+
+    >>> m1, m2 = measure(2000)
+    >>> xmin = m1.min()
+    >>> xmax = m1.max()
+    >>> ymin = m2.min()
+    >>> ymax = m2.max()
+
+    Perform a kernel density estimate on the data:
+
+    >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
+    >>> positions = np.vstack([X.ravel(), Y.ravel()])
+    >>> values = np.vstack([m1, m2])
+    >>> kernel = stats.gaussian_kde(values)
+    >>> Z = np.reshape(kernel(positions).T, X.shape)
+
+    Plot the results:
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
+    ...           extent=[xmin, xmax, ymin, ymax])
+    >>> ax.plot(m1, m2, 'k.', markersize=2)
+    >>> ax.set_xlim([xmin, xmax])
+    >>> ax.set_ylim([ymin, ymax])
+    >>> plt.show()
+
+    """
+    def __init__(self, dataset, bw_method=None, weights=None):
+        self.dataset = atleast_2d(asarray(dataset))
+        if not self.dataset.size > 1:
+            raise ValueError("`dataset` input should have multiple elements.")
+
+        self.d, self.n = self.dataset.shape
+
+        if weights is not None:
+            self._weights = atleast_1d(weights).astype(float)
+            self._weights /= sum(self._weights)
+            if self.weights.ndim != 1:
+                raise ValueError("`weights` input should be one-dimensional.")
+            if len(self._weights) != self.n:
+                raise ValueError("`weights` input should be of length n")
+            self._neff = 1/sum(self._weights**2)
+
+        # This can be converted to a warning once gh-10205 is resolved
+        if self.d > self.n:
+            msg = ("Number of dimensions is greater than number of samples. "
+                   "This results in a singular data covariance matrix, which "
+                   "cannot be treated using the algorithms implemented in "
+                   "`gaussian_kde`. Note that `gaussian_kde` interprets each "
+                   "*column* of `dataset` to be a point; consider transposing "
+                   "the input to `dataset`.")
+            raise ValueError(msg)
+
+        try:
+            self.set_bandwidth(bw_method=bw_method)
+        except linalg.LinAlgError as e:
+            msg = ("The data appears to lie in a lower-dimensional subspace "
+                   "of the space in which it is expressed. This has resulted "
+                   "in a singular data covariance matrix, which cannot be "
+                   "treated using the algorithms implemented in "
+                   "`gaussian_kde`. Consider performing principle component "
+                   "analysis / dimensionality reduction and using "
+                   "`gaussian_kde` with the transformed data.")
+            raise linalg.LinAlgError(msg) from e
+
+    def evaluate(self, points):
+        """Evaluate the estimated pdf on a set of points.
+
+        Parameters
+        ----------
+        points : (# of dimensions, # of points)-array
+            Alternatively, a (# of dimensions,) vector can be passed in and
+            treated as a single point.
+
+        Returns
+        -------
+        values : (# of points,)-array
+            The values at each point.
+
+        Raises
+        ------
+        ValueError : if the dimensionality of the input points is different than
+                     the dimensionality of the KDE.
+
+        """
+        points = atleast_2d(asarray(points))
+
+        d, m = points.shape
+        if d != self.d:
+            if d == 1 and m == self.d:
+                # points was passed in as a row vector
+                points = reshape(points, (self.d, 1))
+                m = 1
+            else:
+                msg = "points have dimension %s, dataset has dimension %s" % (d,
+                    self.d)
+                raise ValueError(msg)
+
+        output_dtype, spec = _get_output_dtype(self.covariance, points)
+        result = gaussian_kernel_estimate[spec](
+            self.dataset.T, self.weights[:, None],
+            points.T, self.cho_cov, output_dtype)
+
+        return result[:, 0]
+
+    __call__ = evaluate
+
+    def integrate_gaussian(self, mean, cov):
+        """
+        Multiply estimated density by a multivariate Gaussian and integrate
+        over the whole space.
+
+        Parameters
+        ----------
+        mean : aray_like
+            A 1-D array, specifying the mean of the Gaussian.
+        cov : array_like
+            A 2-D array, specifying the covariance matrix of the Gaussian.
+
+        Returns
+        -------
+        result : scalar
+            The value of the integral.
+
+        Raises
+        ------
+        ValueError
+            If the mean or covariance of the input Gaussian differs from
+            the KDE's dimensionality.
+
+        """
+        mean = atleast_1d(squeeze(mean))
+        cov = atleast_2d(cov)
+
+        if mean.shape != (self.d,):
+            raise ValueError("mean does not have dimension %s" % self.d)
+        if cov.shape != (self.d, self.d):
+            raise ValueError("covariance does not have dimension %s" % self.d)
+
+        # make mean a column vector
+        mean = mean[:, newaxis]
+
+        sum_cov = self.covariance + cov
+
+        # This will raise LinAlgError if the new cov matrix is not s.p.d
+        # cho_factor returns (ndarray, bool) where bool is a flag for whether
+        # or not ndarray is upper or lower triangular
+        sum_cov_chol = linalg.cho_factor(sum_cov)
+
+        diff = self.dataset - mean
+        tdiff = linalg.cho_solve(sum_cov_chol, diff)
+
+        sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
+        norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
+
+        energies = sum(diff * tdiff, axis=0) / 2.0
+        result = sum(exp(-energies)*self.weights, axis=0) / norm_const
+
+        return result
+
+    def integrate_box_1d(self, low, high):
+        """
+        Computes the integral of a 1D pdf between two bounds.
+
+        Parameters
+        ----------
+        low : scalar
+            Lower bound of integration.
+        high : scalar
+            Upper bound of integration.
+
+        Returns
+        -------
+        value : scalar
+            The result of the integral.
+
+        Raises
+        ------
+        ValueError
+            If the KDE is over more than one dimension.
+
+        """
+        if self.d != 1:
+            raise ValueError("integrate_box_1d() only handles 1D pdfs")
+
+        stdev = ravel(sqrt(self.covariance))[0]
+
+        normalized_low = ravel((low - self.dataset) / stdev)
+        normalized_high = ravel((high - self.dataset) / stdev)
+
+        value = np.sum(self.weights*(
+                        special.ndtr(normalized_high) -
+                        special.ndtr(normalized_low)))
+        return value
+
+    def integrate_box(self, low_bounds, high_bounds, maxpts=None):
+        """Computes the integral of a pdf over a rectangular interval.
+
+        Parameters
+        ----------
+        low_bounds : array_like
+            A 1-D array containing the lower bounds of integration.
+        high_bounds : array_like
+            A 1-D array containing the upper bounds of integration.
+        maxpts : int, optional
+            The maximum number of points to use for integration.
+
+        Returns
+        -------
+        value : scalar
+            The result of the integral.
+
+        """
+        if maxpts is not None:
+            extra_kwds = {'maxpts': maxpts}
+        else:
+            extra_kwds = {}
+
+        value, inform = _mvn.mvnun_weighted(low_bounds, high_bounds,
+                                           self.dataset, self.weights,
+                                           self.covariance, **extra_kwds)
+        if inform:
+            msg = ('An integral in _mvn.mvnun requires more points than %s' %
+                   (self.d * 1000))
+            warnings.warn(msg)
+
+        return value
+
+    def integrate_kde(self, other):
+        """
+        Computes the integral of the product of this  kernel density estimate
+        with another.
+
+        Parameters
+        ----------
+        other : gaussian_kde instance
+            The other kde.
+
+        Returns
+        -------
+        value : scalar
+            The result of the integral.
+
+        Raises
+        ------
+        ValueError
+            If the KDEs have different dimensionality.
+
+        """
+        if other.d != self.d:
+            raise ValueError("KDEs are not the same dimensionality")
+
+        # we want to iterate over the smallest number of points
+        if other.n < self.n:
+            small = other
+            large = self
+        else:
+            small = self
+            large = other
+
+        sum_cov = small.covariance + large.covariance
+        sum_cov_chol = linalg.cho_factor(sum_cov)
+        result = 0.0
+        for i in range(small.n):
+            mean = small.dataset[:, i, newaxis]
+            diff = large.dataset - mean
+            tdiff = linalg.cho_solve(sum_cov_chol, diff)
+
+            energies = sum(diff * tdiff, axis=0) / 2.0
+            result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
+
+        sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
+        norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
+
+        result /= norm_const
+
+        return result
+
+    def resample(self, size=None, seed=None):
+        """Randomly sample a dataset from the estimated pdf.
+
+        Parameters
+        ----------
+        size : int, optional
+            The number of samples to draw.  If not provided, then the size is
+            the same as the effective number of samples in the underlying
+            dataset.
+        seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance then
+            that instance is used.
+
+        Returns
+        -------
+        resample : (self.d, `size`) ndarray
+            The sampled dataset.
+
+        """
+        if size is None:
+            size = int(self.neff)
+
+        random_state = check_random_state(seed)
+        norm = transpose(random_state.multivariate_normal(
+            zeros((self.d,), float), self.covariance, size=size
+        ))
+        indices = random_state.choice(self.n, size=size, p=self.weights)
+        means = self.dataset[:, indices]
+
+        return means + norm
+
+    def scotts_factor(self):
+        """Compute Scott's factor.
+
+        Returns
+        -------
+        s : float
+            Scott's factor.
+        """
+        return power(self.neff, -1./(self.d+4))
+
+    def silverman_factor(self):
+        """Compute the Silverman factor.
+
+        Returns
+        -------
+        s : float
+            The silverman factor.
+        """
+        return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
+
+    #  Default method to calculate bandwidth, can be overwritten by subclass
+    covariance_factor = scotts_factor
+    covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
+        multiplies the data covariance matrix to obtain the kernel covariance
+        matrix. The default is `scotts_factor`.  A subclass can overwrite this
+        method to provide a different method, or set it through a call to
+        `kde.set_bandwidth`."""
+
+    def set_bandwidth(self, bw_method=None):
+        """Compute the estimator bandwidth with given method.
+
+        The new bandwidth calculated after a call to `set_bandwidth` is used
+        for subsequent evaluations of the estimated density.
+
+        Parameters
+        ----------
+        bw_method : str, scalar or callable, optional
+            The method used to calculate the estimator bandwidth.  This can be
+            'scott', 'silverman', a scalar constant or a callable.  If a
+            scalar, this will be used directly as `kde.factor`.  If a callable,
+            it should take a `gaussian_kde` instance as only parameter and
+            return a scalar.  If None (default), nothing happens; the current
+            `kde.covariance_factor` method is kept.
+
+        Notes
+        -----
+        .. versionadded:: 0.11
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> import scipy.stats as stats
+        >>> x1 = np.array([-7, -5, 1, 4, 5.])
+        >>> kde = stats.gaussian_kde(x1)
+        >>> xs = np.linspace(-10, 10, num=50)
+        >>> y1 = kde(xs)
+        >>> kde.set_bandwidth(bw_method='silverman')
+        >>> y2 = kde(xs)
+        >>> kde.set_bandwidth(bw_method=kde.factor / 3.)
+        >>> y3 = kde(xs)
+
+        >>> import matplotlib.pyplot as plt
+        >>> fig, ax = plt.subplots()
+        >>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
+        ...         label='Data points (rescaled)')
+        >>> ax.plot(xs, y1, label='Scott (default)')
+        >>> ax.plot(xs, y2, label='Silverman')
+        >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
+        >>> ax.legend()
+        >>> plt.show()
+
+        """
+        if bw_method is None:
+            pass
+        elif bw_method == 'scott':
+            self.covariance_factor = self.scotts_factor
+        elif bw_method == 'silverman':
+            self.covariance_factor = self.silverman_factor
+        elif np.isscalar(bw_method) and not isinstance(bw_method, str):
+            self._bw_method = 'use constant'
+            self.covariance_factor = lambda: bw_method
+        elif callable(bw_method):
+            self._bw_method = bw_method
+            self.covariance_factor = lambda: self._bw_method(self)
+        else:
+            msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
+                  "or a callable."
+            raise ValueError(msg)
+
+        self._compute_covariance()
+
+    def _compute_covariance(self):
+        """Computes the covariance matrix for each Gaussian kernel using
+        covariance_factor().
+        """
+        self.factor = self.covariance_factor()
+        # Cache covariance and Cholesky decomp of covariance
+        if not hasattr(self, '_data_cho_cov'):
+            self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
+                                               bias=False,
+                                               aweights=self.weights))
+            self._data_cho_cov = linalg.cholesky(self._data_covariance,
+                                                 lower=True)
+
+        self.covariance = self._data_covariance * self.factor**2
+        self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64)
+        self.log_det = 2*np.log(np.diag(self.cho_cov
+                                        * np.sqrt(2*pi))).sum()
+
+    @property
+    def inv_cov(self):
+        # Re-compute from scratch each time because I'm not sure how this is
+        # used in the wild. (Perhaps users change the `dataset`, since it's
+        # not a private attribute?) `_compute_covariance` used to recalculate
+        # all these, so we'll recalculate everything now that this is a
+        # a property.
+        self.factor = self.covariance_factor()
+        self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
+                                           bias=False, aweights=self.weights))
+        return linalg.inv(self._data_covariance) / self.factor**2
+
+    def pdf(self, x):
+        """
+        Evaluate the estimated pdf on a provided set of points.
+
+        Notes
+        -----
+        This is an alias for `gaussian_kde.evaluate`.  See the ``evaluate``
+        docstring for more details.
+
+        """
+        return self.evaluate(x)
+
+    def logpdf(self, x):
+        """
+        Evaluate the log of the estimated pdf on a provided set of points.
+        """
+        points = atleast_2d(x)
+
+        d, m = points.shape
+        if d != self.d:
+            if d == 1 and m == self.d:
+                # points was passed in as a row vector
+                points = reshape(points, (self.d, 1))
+                m = 1
+            else:
+                msg = (f"points have dimension {d}, "
+                       f"dataset has dimension {self.d}")
+                raise ValueError(msg)
+
+        output_dtype, spec = _get_output_dtype(self.covariance, points)
+        result = gaussian_kernel_estimate_log[spec](
+            self.dataset.T, self.weights[:, None],
+            points.T, self.cho_cov, output_dtype)
+
+        return result[:, 0]
+
+    def marginal(self, dimensions):
+        """Return a marginal KDE distribution
+
+        Parameters
+        ----------
+        dimensions : int or 1-d array_like
+            The dimensions of the multivariate distribution corresponding
+            with the marginal variables, that is, the indices of the dimensions
+            that are being retained. The other dimensions are marginalized out.
+
+        Returns
+        -------
+        marginal_kde : gaussian_kde
+            An object representing the marginal distribution.
+
+        Notes
+        -----
+        .. versionadded:: 1.10.0
+
+        """
+
+        dims = np.atleast_1d(dimensions)
+
+        if not np.issubdtype(dims.dtype, np.integer):
+            msg = ("Elements of `dimensions` must be integers - the indices "
+                   "of the marginal variables being retained.")
+            raise ValueError(msg)
+
+        n = len(self.dataset)  # number of dimensions
+        original_dims = dims.copy()
+
+        dims[dims < 0] = n + dims[dims < 0]
+
+        if len(np.unique(dims)) != len(dims):
+            msg = ("All elements of `dimensions` must be unique.")
+            raise ValueError(msg)
+
+        i_invalid = (dims < 0) | (dims >= n)
+        if np.any(i_invalid):
+            msg = (f"Dimensions {original_dims[i_invalid]} are invalid "
+                   f"for a distribution in {n} dimensions.")
+            raise ValueError(msg)
+
+        dataset = self.dataset[dims]
+        weights = self.weights
+
+        return gaussian_kde(dataset, bw_method=self.covariance_factor(),
+                            weights=weights)
+
+    @property
+    def weights(self):
+        try:
+            return self._weights
+        except AttributeError:
+            self._weights = ones(self.n)/self.n
+            return self._weights
+
+    @property
+    def neff(self):
+        try:
+            return self._neff
+        except AttributeError:
+            self._neff = 1/sum(self.weights**2)
+            return self._neff
+
+
+def _get_output_dtype(covariance, points):
+    """
+    Calculates the output dtype and the "spec" (=C type name).
+
+    This was necessary in order to deal with the fused types in the Cython
+    routine `gaussian_kernel_estimate`. See gh-10824 for details.
+    """
+    output_dtype = np.common_type(covariance, points)
+    itemsize = np.dtype(output_dtype).itemsize
+    if itemsize == 4:
+        spec = 'float'
+    elif itemsize == 8:
+        spec = 'double'
+    elif itemsize in (12, 16):
+        spec = 'long double'
+    else:
+        raise ValueError(
+                f"{output_dtype} has unexpected item size: {itemsize}"
+            )
+
+    return output_dtype, spec
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_ksstats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_ksstats.py
new file mode 100644
index 00000000..37cb9568
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_ksstats.py
@@ -0,0 +1,596 @@
+# Compute the two-sided one-sample Kolmogorov-Smirnov Prob(Dn <= d) where:
+#    D_n = sup_x{|F_n(x) - F(x)|},
+#    F_n(x) is the empirical CDF for a sample of size n {x_i: i=1,...,n},
+#    F(x) is the CDF of a probability distribution.
+#
+# Exact methods:
+# Prob(D_n >= d) can be computed via a matrix algorithm of Durbin[1]
+#   or a recursion algorithm due to Pomeranz[2].
+# Marsaglia, Tsang & Wang[3] gave a computation-efficient way to perform
+#   the Durbin algorithm.
+#   D_n >= d <==>  D_n+ >= d or D_n- >= d (the one-sided K-S statistics), hence
+#   Prob(D_n >= d) = 2*Prob(D_n+ >= d) - Prob(D_n+ >= d and D_n- >= d).
+#   For d > 0.5, the latter intersection probability is 0.
+#
+# Approximate methods:
+# For d close to 0.5, ignoring that intersection term may still give a
+#   reasonable approximation.
+# Li-Chien[4] and Korolyuk[5] gave an asymptotic formula extending
+# Kolmogorov's initial asymptotic, suitable for large d. (See
+#   scipy.special.kolmogorov for that asymptotic)
+# Pelz-Good[6] used the functional equation for Jacobi theta functions to
+#   transform the Li-Chien/Korolyuk formula produce a computational formula
+#   suitable for small d.
+#
+# Simard and L'Ecuyer[7] provided an algorithm to decide when to use each of
+#   the above approaches and it is that which is used here.
+#
+# Other approaches:
+# Carvalho[8] optimizes Durbin's matrix algorithm for large values of d.
+# Moscovich and Nadler[9] use FFTs to compute the convolutions.
+
+# References:
+# [1] Durbin J (1968).
+#     "The Probability that the Sample Distribution Function Lies Between Two
+#     Parallel Straight Lines."
+#     Annals of Mathematical Statistics, 39, 398-411.
+# [2] Pomeranz J (1974).
+#     "Exact Cumulative Distribution of the Kolmogorov-Smirnov Statistic for
+#     Small Samples (Algorithm 487)."
+#     Communications of the ACM, 17(12), 703-704.
+# [3] Marsaglia G, Tsang WW, Wang J (2003).
+#     "Evaluating Kolmogorov's Distribution."
+#     Journal of Statistical Software, 8(18), 1-4.
+# [4] LI-CHIEN, C. (1956).
+#     "On the exact distribution of the statistics of A. N. Kolmogorov and
+#     their asymptotic expansion."
+#     Acta Matematica Sinica, 6, 55-81.
+# [5] KOROLYUK, V. S. (1960).
+#     "Asymptotic analysis of the distribution of the maximum deviation in
+#     the Bernoulli scheme."
+#     Theor. Probability Appl., 4, 339-366.
+# [6] Pelz W, Good IJ (1976).
+#     "Approximating the Lower Tail-areas of the Kolmogorov-Smirnov One-sample
+#     Statistic."
+#     Journal of the Royal Statistical Society, Series B, 38(2), 152-156.
+#  [7] Simard, R., L'Ecuyer, P. (2011)
+# 	  "Computing the Two-Sided Kolmogorov-Smirnov Distribution",
+# 	  Journal of Statistical Software, Vol 39, 11, 1-18.
+#  [8] Carvalho, Luis (2015)
+#     "An Improved Evaluation of Kolmogorov's Distribution"
+#     Journal of Statistical Software, Code Snippets; Vol 65(3), 1-8.
+#  [9] Amit Moscovich, Boaz Nadler (2017)
+#     "Fast calculation of boundary crossing probabilities for Poisson
+#     processes",
+#     Statistics & Probability Letters, Vol 123, 177-182.
+
+
+import numpy as np
+import scipy.special
+import scipy.special._ufuncs as scu
+from scipy._lib._finite_differences import _derivative
+
+_E128 = 128
+_EP128 = np.ldexp(np.longdouble(1), _E128)
+_EM128 = np.ldexp(np.longdouble(1), -_E128)
+
+_SQRT2PI = np.sqrt(2 * np.pi)
+_LOG_2PI = np.log(2 * np.pi)
+_MIN_LOG = -708
+_SQRT3 = np.sqrt(3)
+_PI_SQUARED = np.pi ** 2
+_PI_FOUR = np.pi ** 4
+_PI_SIX = np.pi ** 6
+
+# [Lifted from _loggamma.pxd.] If B_m are the Bernoulli numbers,
+# then Stirling coeffs are B_{2j}/(2j)/(2j-1) for j=8,...1.
+_STIRLING_COEFFS = [-2.955065359477124183e-2, 6.4102564102564102564e-3,
+                    -1.9175269175269175269e-3, 8.4175084175084175084e-4,
+                    -5.952380952380952381e-4, 7.9365079365079365079e-4,
+                    -2.7777777777777777778e-3, 8.3333333333333333333e-2]
+
+def _log_nfactorial_div_n_pow_n(n):
+    # Computes n! / n**n
+    #    = (n-1)! / n**(n-1)
+    # Uses Stirling's approximation, but removes n*log(n) up-front to
+    # avoid subtractive cancellation.
+    #    = log(n)/2 - n + log(sqrt(2pi)) + sum B_{2j}/(2j)/(2j-1)/n**(2j-1)
+    rn = 1.0/n
+    return np.log(n)/2 - n + _LOG_2PI/2 + rn * np.polyval(_STIRLING_COEFFS, rn/n)
+
+
+def _clip_prob(p):
+    """clips a probability to range 0<=p<=1."""
+    return np.clip(p, 0.0, 1.0)
+
+
+def _select_and_clip_prob(cdfprob, sfprob, cdf=True):
+    """Selects either the CDF or SF, and then clips to range 0<=p<=1."""
+    p = np.where(cdf, cdfprob, sfprob)
+    return _clip_prob(p)
+
+
+def _kolmogn_DMTW(n, d, cdf=True):
+    r"""Computes the Kolmogorov CDF:  Pr(D_n <= d) using the MTW approach to
+    the Durbin matrix algorithm.
+
+    Durbin (1968); Marsaglia, Tsang, Wang (2003). [1], [3].
+    """
+    # Write d = (k-h)/n, where k is positive integer and 0 <= h < 1
+    # Generate initial matrix H of size m*m where m=(2k-1)
+    # Compute k-th row of (n!/n^n) * H^n, scaling intermediate results.
+    # Requires memory O(m^2) and computation O(m^2 log(n)).
+    # Most suitable for small m.
+
+    if d >= 1.0:
+        return _select_and_clip_prob(1.0, 0.0, cdf)
+    nd = n * d
+    if nd <= 0.5:
+        return _select_and_clip_prob(0.0, 1.0, cdf)
+    k = int(np.ceil(nd))
+    h = k - nd
+    m = 2 * k - 1
+
+    H = np.zeros([m, m])
+
+    # Initialize: v is first column (and last row) of H
+    #  v[j] = (1-h^(j+1)/(j+1)!  (except for v[-1])
+    #  w[j] = 1/(j)!
+    # q = k-th row of H (actually i!/n^i*H^i)
+    intm = np.arange(1, m + 1)
+    v = 1.0 - h ** intm
+    w = np.empty(m)
+    fac = 1.0
+    for j in intm:
+        w[j - 1] = fac
+        fac /= j  # This might underflow.  Isn't a problem.
+        v[j - 1] *= fac
+    tt = max(2 * h - 1.0, 0)**m - 2*h**m
+    v[-1] = (1.0 + tt) * fac
+
+    for i in range(1, m):
+        H[i - 1:, i] = w[:m - i + 1]
+    H[:, 0] = v
+    H[-1, :] = np.flip(v, axis=0)
+
+    Hpwr = np.eye(np.shape(H)[0])  # Holds intermediate powers of H
+    nn = n
+    expnt = 0  # Scaling of Hpwr
+    Hexpnt = 0  # Scaling of H
+    while nn > 0:
+        if nn % 2:
+            Hpwr = np.matmul(Hpwr, H)
+            expnt += Hexpnt
+        H = np.matmul(H, H)
+        Hexpnt *= 2
+        # Scale as needed.
+        if np.abs(H[k - 1, k - 1]) > _EP128:
+            H /= _EP128
+            Hexpnt += _E128
+        nn = nn // 2
+
+    p = Hpwr[k - 1, k - 1]
+
+    # Multiply by n!/n^n
+    for i in range(1, n + 1):
+        p = i * p / n
+        if np.abs(p) < _EM128:
+            p *= _EP128
+            expnt -= _E128
+
+    # unscale
+    if expnt != 0:
+        p = np.ldexp(p, expnt)
+
+    return _select_and_clip_prob(p, 1.0-p, cdf)
+
+
+def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf):
+    """Compute the endpoints of the interval for row i."""
+    if i == 0:
+        j1, j2 = -ll - ceilf - 1, ll + ceilf - 1
+    else:
+        # i + 1 = 2*ip1div2 + ip1mod2
+        ip1div2, ip1mod2 = divmod(i + 1, 2)
+        if ip1mod2 == 0:  # i is odd
+            if ip1div2 == n + 1:
+                j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1
+            else:
+                j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1
+        else:
+            j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1
+
+    return max(j1 + 2, 0), min(j2, n)
+
+
+def _kolmogn_Pomeranz(n, x, cdf=True):
+    r"""Computes Pr(D_n <= d) using the Pomeranz recursion algorithm.
+
+    Pomeranz (1974) [2]
+    """
+
+    # V is n*(2n+2) matrix.
+    # Each row is convolution of the previous row and probabilities from a
+    #  Poisson distribution.
+    # Desired CDF probability is n! V[n-1, 2n+1]  (final entry in final row).
+    # Only two rows are needed at any given stage:
+    #  - Call them V0 and V1.
+    #  - Swap each iteration
+    # Only a few (contiguous) entries in each row can be non-zero.
+    #  - Keep track of start and end (j1 and j2 below)
+    #  - V0s and V1s track the start in the two rows
+    # Scale intermediate results as needed.
+    # Only a few different Poisson distributions can occur
+    t = n * x
+    ll = int(np.floor(t))
+    f = 1.0 * (t - ll)  # fractional part of t
+    g = min(f, 1.0 - f)
+    ceilf = (1 if f > 0 else 0)
+    roundf = (1 if f > 0.5 else 0)
+    npwrs = 2 * (ll + 1)    # Maximum number of powers needed in convolutions
+    gpower = np.empty(npwrs)  # gpower = (g/n)^m/m!
+    twogpower = np.empty(npwrs)  # twogpower = (2g/n)^m/m!
+    onem2gpower = np.empty(npwrs)  # onem2gpower = ((1-2g)/n)^m/m!
+    # gpower etc are *almost* Poisson probs, just missing normalizing factor.
+
+    gpower[0] = 1.0
+    twogpower[0] = 1.0
+    onem2gpower[0] = 1.0
+    expnt = 0
+    g_over_n, two_g_over_n, one_minus_two_g_over_n = g/n, 2*g/n, (1 - 2*g)/n
+    for m in range(1, npwrs):
+        gpower[m] = gpower[m - 1] * g_over_n / m
+        twogpower[m] = twogpower[m - 1] * two_g_over_n / m
+        onem2gpower[m] = onem2gpower[m - 1] * one_minus_two_g_over_n / m
+
+    V0 = np.zeros([npwrs])
+    V1 = np.zeros([npwrs])
+    V1[0] = 1  # first row
+    V0s, V1s = 0, 0  # start indices of the two rows
+
+    j1, j2 = _pomeranz_compute_j1j2(0, n, ll, ceilf, roundf)
+    for i in range(1, 2 * n + 2):
+        # Preserve j1, V1, V1s, V0s from last iteration
+        k1 = j1
+        V0, V1 = V1, V0
+        V0s, V1s = V1s, V0s
+        V1.fill(0.0)
+        j1, j2 = _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf)
+        if i == 1 or i == 2 * n + 1:
+            pwrs = gpower
+        else:
+            pwrs = (twogpower if i % 2 else onem2gpower)
+        ln2 = j2 - k1 + 1
+        if ln2 > 0:
+            conv = np.convolve(V0[k1 - V0s:k1 - V0s + ln2], pwrs[:ln2])
+            conv_start = j1 - k1  # First index to use from conv
+            conv_len = j2 - j1 + 1  # Number of entries to use from conv
+            V1[:conv_len] = conv[conv_start:conv_start + conv_len]
+            # Scale to avoid underflow.
+            if 0 < np.max(V1) < _EM128:
+                V1 *= _EP128
+                expnt -= _E128
+            V1s = V0s + j1 - k1
+
+    # multiply by n!
+    ans = V1[n - V1s]
+    for m in range(1, n + 1):
+        if np.abs(ans) > _EP128:
+            ans *= _EM128
+            expnt += _E128
+        ans *= m
+
+    # Undo any intermediate scaling
+    if expnt != 0:
+        ans = np.ldexp(ans, expnt)
+    ans = _select_and_clip_prob(ans, 1.0 - ans, cdf)
+    return ans
+
+
+def _kolmogn_PelzGood(n, x, cdf=True):
+    """Computes the Pelz-Good approximation to Prob(Dn <= x) with 0<=x<=1.
+
+    Start with Li-Chien, Korolyuk approximation:
+        Prob(Dn <= x) ~ K0(z) + K1(z)/sqrt(n) + K2(z)/n + K3(z)/n**1.5
+    where z = x*sqrt(n).
+    Transform each K_(z) using Jacobi theta functions into a form suitable
+    for small z.
+    Pelz-Good (1976). [6]
+    """
+    if x <= 0.0:
+        return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
+    if x >= 1.0:
+        return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
+
+    z = np.sqrt(n) * x
+    zsquared, zthree, zfour, zsix = z**2, z**3, z**4, z**6
+
+    qlog = -_PI_SQUARED / 8 / zsquared
+    if qlog < _MIN_LOG:  # z ~ 0.041743441416853426
+        return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
+
+    q = np.exp(qlog)
+
+    # Coefficients of terms in the sums for K1, K2 and K3
+    k1a = -zsquared
+    k1b = _PI_SQUARED / 4
+
+    k2a = 6 * zsix + 2 * zfour
+    k2b = (2 * zfour - 5 * zsquared) * _PI_SQUARED / 4
+    k2c = _PI_FOUR * (1 - 2 * zsquared) / 16
+
+    k3d = _PI_SIX * (5 - 30 * zsquared) / 64
+    k3c = _PI_FOUR * (-60 * zsquared + 212 * zfour) / 16
+    k3b = _PI_SQUARED * (135 * zfour - 96 * zsix) / 4
+    k3a = -30 * zsix - 90 * z**8
+
+    K0to3 = np.zeros(4)
+    # Use a Horner scheme to evaluate sum c_i q^(i^2)
+    # Reduces to a sum over odd integers.
+    maxk = int(np.ceil(16 * z / np.pi))
+    for k in range(maxk, 0, -1):
+        m = 2 * k - 1
+        msquared, mfour, msix = m**2, m**4, m**6
+        qpower = np.power(q, 8 * k)
+        coeffs = np.array([1.0,
+                           k1a + k1b*msquared,
+                           k2a + k2b*msquared + k2c*mfour,
+                           k3a + k3b*msquared + k3c*mfour + k3d*msix])
+        K0to3 *= qpower
+        K0to3 += coeffs
+    K0to3 *= q
+    K0to3 *= _SQRT2PI
+    # z**10 > 0 as z > 0.04
+    K0to3 /= np.array([z, 6 * zfour, 72 * z**7, 6480 * z**10])
+
+    # Now do the other sum over the other terms, all integers k
+    # K_2:  (pi^2 k^2) q^(k^2),
+    # K_3:  (3pi^2 k^2 z^2 - pi^4 k^4)*q^(k^2)
+    # Don't expect much subtractive cancellation so use direct calculation
+    q = np.exp(-_PI_SQUARED / 2 / zsquared)
+    ks = np.arange(maxk, 0, -1)
+    ksquared = ks ** 2
+    sqrt3z = _SQRT3 * z
+    kspi = np.pi * ks
+    qpwers = q ** ksquared
+    k2extra = np.sum(ksquared * qpwers)
+    k2extra *= _PI_SQUARED * _SQRT2PI/(-36 * zthree)
+    K0to3[2] += k2extra
+    k3extra = np.sum((sqrt3z + kspi) * (sqrt3z - kspi) * ksquared * qpwers)
+    k3extra *= _PI_SQUARED * _SQRT2PI/(216 * zsix)
+    K0to3[3] += k3extra
+    powers_of_n = np.power(n * 1.0, np.arange(len(K0to3)) / 2.0)
+    K0to3 /= powers_of_n
+
+    if not cdf:
+        K0to3 *= -1
+        K0to3[0] += 1
+
+    Ksum = sum(K0to3)
+    return Ksum
+
+
+def _kolmogn(n, x, cdf=True):
+    """Computes the CDF(or SF) for the two-sided Kolmogorov-Smirnov statistic.
+
+    x must be of type float, n of type integer.
+
+    Simard & L'Ecuyer (2011) [7].
+    """
+    if np.isnan(n):
+        return n  # Keep the same type of nan
+    if int(n) != n or n <= 0:
+        return np.nan
+    if x >= 1.0:
+        return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
+    if x <= 0.0:
+        return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
+    t = n * x
+    if t <= 1.0:  # Ruben-Gambino: 1/2n <= x <= 1/n
+        if t <= 0.5:
+            return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
+        if n <= 140:
+            prob = np.prod(np.arange(1, n+1) * (1.0/n) * (2*t - 1))
+        else:
+            prob = np.exp(_log_nfactorial_div_n_pow_n(n) + n * np.log(2*t-1))
+        return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
+    if t >= n - 1:  # Ruben-Gambino
+        prob = 2 * (1.0 - x)**n
+        return _select_and_clip_prob(1 - prob, prob, cdf=cdf)
+    if x >= 0.5:  # Exact: 2 * smirnov
+        prob = 2 * scipy.special.smirnov(n, x)
+        return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
+
+    nxsquared = t * x
+    if n <= 140:
+        if nxsquared <= 0.754693:
+            prob = _kolmogn_DMTW(n, x, cdf=True)
+            return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
+        if nxsquared <= 4:
+            prob = _kolmogn_Pomeranz(n, x, cdf=True)
+            return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
+        # Now use Miller approximation of 2*smirnov
+        prob = 2 * scipy.special.smirnov(n, x)
+        return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
+
+    # Split CDF and SF as they have different cutoffs on nxsquared.
+    if not cdf:
+        if nxsquared >= 370.0:
+            return 0.0
+        if nxsquared >= 2.2:
+            prob = 2 * scipy.special.smirnov(n, x)
+            return _clip_prob(prob)
+        # Fall through and compute the SF as 1.0-CDF
+    if nxsquared >= 18.0:
+        cdfprob = 1.0
+    elif n <= 100000 and n * x**1.5 <= 1.4:
+        cdfprob = _kolmogn_DMTW(n, x, cdf=True)
+    else:
+        cdfprob = _kolmogn_PelzGood(n, x, cdf=True)
+    return _select_and_clip_prob(cdfprob, 1.0 - cdfprob, cdf=cdf)
+
+
+def _kolmogn_p(n, x):
+    """Computes the PDF for the two-sided Kolmogorov-Smirnov statistic.
+
+    x must be of type float, n of type integer.
+    """
+    if np.isnan(n):
+        return n  # Keep the same type of nan
+    if int(n) != n or n <= 0:
+        return np.nan
+    if x >= 1.0 or x <= 0:
+        return 0
+    t = n * x
+    if t <= 1.0:
+        # Ruben-Gambino: n!/n^n * (2t-1)^n -> 2 n!/n^n * n^2 * (2t-1)^(n-1)
+        if t <= 0.5:
+            return 0.0
+        if n <= 140:
+            prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1))
+        else:
+            prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n-1) * np.log(2 * t - 1))
+        return prd * 2 * n**2
+    if t >= n - 1:
+        # Ruben-Gambino : 1-2(1-x)**n -> 2n*(1-x)**(n-1)
+        return 2 * (1.0 - x) ** (n-1) * n
+    if x >= 0.5:
+        return 2 * scipy.stats.ksone.pdf(x, n)
+
+    # Just take a small delta.
+    # Ideally x +/- delta would stay within [i/n, (i+1)/n] for some integer a.
+    # as the CDF is a piecewise degree n polynomial.
+    # It has knots at 1/n, 2/n, ... (n-1)/n
+    # and is not a C-infinity function at the knots
+    delta = x / 2.0**16
+    delta = min(delta, x - 1.0/n)
+    delta = min(delta, 0.5 - x)
+
+    def _kk(_x):
+        return kolmogn(n, _x)
+
+    return _derivative(_kk, x, dx=delta, order=5)
+
+
+def _kolmogni(n, p, q):
+    """Computes the PPF/ISF of kolmogn.
+
+    n of type integer, n>= 1
+    p is the CDF, q the SF, p+q=1
+    """
+    if np.isnan(n):
+        return n  # Keep the same type of nan
+    if int(n) != n or n <= 0:
+        return np.nan
+    if p <= 0:
+        return 1.0/n
+    if q <= 0:
+        return 1.0
+    delta = np.exp((np.log(p) - scipy.special.loggamma(n+1))/n)
+    if delta <= 1.0/n:
+        return (delta + 1.0 / n) / 2
+    x = -np.expm1(np.log(q/2.0)/n)
+    if x >= 1 - 1.0/n:
+        return x
+    x1 = scu._kolmogci(p)/np.sqrt(n)
+    x1 = min(x1, 1.0 - 1.0/n)
+    _f = lambda x: _kolmogn(n, x) - p
+    return scipy.optimize.brentq(_f, 1.0/n, x1, xtol=1e-14)
+
+
+def kolmogn(n, x, cdf=True):
+    """Computes the CDF for the two-sided Kolmogorov-Smirnov distribution.
+
+    The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x),
+    for a sample of size n drawn from a distribution with CDF F(t), where
+    D_n &= sup_t |F_n(t) - F(t)|, and
+    F_n(t) is the Empirical Cumulative Distribution Function of the sample.
+
+    Parameters
+    ----------
+    n : integer, array_like
+        the number of samples
+    x : float, array_like
+        The K-S statistic, float between 0 and 1
+    cdf : bool, optional
+        whether to compute the CDF(default=true) or the SF.
+
+    Returns
+    -------
+    cdf : ndarray
+        CDF (or SF it cdf is False) at the specified locations.
+
+    The return value has shape the result of numpy broadcasting n and x.
+    """
+    it = np.nditer([n, x, cdf, None],
+                   op_dtypes=[None, np.float64, np.bool_, np.float64])
+    for _n, _x, _cdf, z in it:
+        if np.isnan(_n):
+            z[...] = _n
+            continue
+        if int(_n) != _n:
+            raise ValueError(f'n is not integral: {_n}')
+        z[...] = _kolmogn(int(_n), _x, cdf=_cdf)
+    result = it.operands[-1]
+    return result
+
+
+def kolmognp(n, x):
+    """Computes the PDF for the two-sided Kolmogorov-Smirnov distribution.
+
+    Parameters
+    ----------
+    n : integer, array_like
+        the number of samples
+    x : float, array_like
+        The K-S statistic, float between 0 and 1
+
+    Returns
+    -------
+    pdf : ndarray
+        The PDF at the specified locations
+
+    The return value has shape the result of numpy broadcasting n and x.
+    """
+    it = np.nditer([n, x, None])
+    for _n, _x, z in it:
+        if np.isnan(_n):
+            z[...] = _n
+            continue
+        if int(_n) != _n:
+            raise ValueError(f'n is not integral: {_n}')
+        z[...] = _kolmogn_p(int(_n), _x)
+    result = it.operands[-1]
+    return result
+
+
+def kolmogni(n, q, cdf=True):
+    """Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution.
+
+    Parameters
+    ----------
+    n : integer, array_like
+        the number of samples
+    q : float, array_like
+        Probabilities, float between 0 and 1
+    cdf : bool, optional
+        whether to compute the PPF(default=true) or the ISF.
+
+    Returns
+    -------
+    ppf : ndarray
+        PPF (or ISF if cdf is False) at the specified locations
+
+    The return value has shape the result of numpy broadcasting n and x.
+    """
+    it = np.nditer([n, q, cdf, None])
+    for _n, _q, _cdf, z in it:
+        if np.isnan(_n):
+            z[...] = _n
+            continue
+        if int(_n) != _n:
+            raise ValueError(f'n is not integral: {_n}')
+        _pcdf, _psf = (_q, 1-_q) if _cdf else (1-_q, _q)
+        z[...] = _kolmogni(int(_n), _pcdf, _psf)
+    result = it.operands[-1]
+    return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_levy_stable/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_levy_stable/__init__.py
new file mode 100644
index 00000000..93dd652e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_levy_stable/__init__.py
@@ -0,0 +1,1200 @@
+# -*- coding: utf-8 -*-
+#
+
+import warnings
+from functools import partial
+
+import numpy as np
+
+from scipy import optimize
+from scipy import integrate
+from scipy.integrate._quadrature import _builtincoeffs
+from scipy import interpolate
+from scipy.interpolate import RectBivariateSpline
+import scipy.special as sc
+from scipy._lib._util import _lazywhere
+from .._distn_infrastructure import rv_continuous, _ShapeInfo
+from .._continuous_distns import uniform, expon, _norm_pdf, _norm_cdf
+from .levyst import Nolan
+from scipy._lib.doccer import inherit_docstring_from
+
+
+__all__ = ["levy_stable", "levy_stable_gen", "pdf_from_cf_with_fft"]
+
+# Stable distributions are known for various parameterisations
+# some being advantageous for numerical considerations and others
+# useful due to their location/scale awareness.
+#
+# Here we follow [NO] convention (see the references in the docstring
+# for levy_stable_gen below).
+#
+# S0 / Z0 / x0 (aka Zoleterav's M)
+# S1 / Z1 / x1
+#
+# Where S* denotes parameterisation, Z* denotes standardized
+# version where gamma = 1, delta = 0 and x* denotes variable.
+#
+# Scipy's original Stable was a random variate generator. It
+# uses S1 and unfortunately is not a location/scale aware.
+
+
+# default numerical integration tolerance
+# used for epsrel in piecewise and both epsrel and epsabs in dni
+# (epsabs needed in dni since weighted quad requires epsabs > 0)
+_QUAD_EPS = 1.2e-14
+
+
+def _Phi_Z0(alpha, t):
+    return (
+        -np.tan(np.pi * alpha / 2) * (np.abs(t) ** (1 - alpha) - 1)
+        if alpha != 1
+        else -2.0 * np.log(np.abs(t)) / np.pi
+    )
+
+
+def _Phi_Z1(alpha, t):
+    return (
+        np.tan(np.pi * alpha / 2)
+        if alpha != 1
+        else -2.0 * np.log(np.abs(t)) / np.pi
+    )
+
+
+def _cf(Phi, t, alpha, beta):
+    """Characteristic function."""
+    return np.exp(
+        -(np.abs(t) ** alpha) * (1 - 1j * beta * np.sign(t) * Phi(alpha, t))
+    )
+
+
+_cf_Z0 = partial(_cf, _Phi_Z0)
+_cf_Z1 = partial(_cf, _Phi_Z1)
+
+
+def _pdf_single_value_cf_integrate(Phi, x, alpha, beta, **kwds):
+    """To improve DNI accuracy convert characteristic function in to real
+    valued integral using Euler's formula, then exploit cosine symmetry to
+    change limits to [0, inf). Finally use cosine addition formula to split
+    into two parts that can be handled by weighted quad pack.
+    """
+    quad_eps = kwds.get("quad_eps", _QUAD_EPS)
+
+    def integrand1(t):
+        if t == 0:
+            return 0
+        return np.exp(-(t ** alpha)) * (
+            np.cos(beta * (t ** alpha) * Phi(alpha, t))
+        )
+
+    def integrand2(t):
+        if t == 0:
+            return 0
+        return np.exp(-(t ** alpha)) * (
+            np.sin(beta * (t ** alpha) * Phi(alpha, t))
+        )
+
+    with np.errstate(invalid="ignore"):
+        int1, *ret1 = integrate.quad(
+            integrand1,
+            0,
+            np.inf,
+            weight="cos",
+            wvar=x,
+            limit=1000,
+            epsabs=quad_eps,
+            epsrel=quad_eps,
+            full_output=1,
+        )
+
+        int2, *ret2 = integrate.quad(
+            integrand2,
+            0,
+            np.inf,
+            weight="sin",
+            wvar=x,
+            limit=1000,
+            epsabs=quad_eps,
+            epsrel=quad_eps,
+            full_output=1,
+        )
+
+    return (int1 + int2) / np.pi
+
+
+_pdf_single_value_cf_integrate_Z0 = partial(
+    _pdf_single_value_cf_integrate, _Phi_Z0
+)
+_pdf_single_value_cf_integrate_Z1 = partial(
+    _pdf_single_value_cf_integrate, _Phi_Z1
+)
+
+
+def _nolan_round_difficult_input(
+    x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
+):
+    """Round difficult input values for Nolan's method in [NO]."""
+
+    # following Nolan's STABLE,
+    #   "1. When 0 < |alpha-1| < 0.005, the program has numerical problems
+    #   evaluating the pdf and cdf.  The current version of the program sets
+    #   alpha=1 in these cases. This approximation is not bad in the S0
+    #   parameterization."
+    if np.abs(alpha - 1) < alpha_tol_near_one:
+        alpha = 1.0
+
+    #   "2. When alpha=1 and |beta| < 0.005, the program has numerical
+    #   problems.  The current version sets beta=0."
+    # We seem to have addressed this through re-expression of g(theta) here
+
+    #   "8. When |x0-beta*tan(pi*alpha/2)| is small, the
+    #   computations of the density and cumulative have numerical problems.
+    #   The program works around this by setting
+    #   z = beta*tan(pi*alpha/2) when
+    #   |z-beta*tan(pi*alpha/2)| < tol(5)*alpha**(1/alpha).
+    #   (The bound on the right is ad hoc, to get reasonable behavior
+    #   when alpha is small)."
+    # where tol(5) = 0.5e-2 by default.
+    #
+    # We seem to have partially addressed this through re-expression of
+    # g(theta) here, but it still needs to be used in some extreme cases.
+    # Perhaps tol(5) = 0.5e-2 could be reduced for our implementation.
+    if np.abs(x0 - zeta) < x_tol_near_zeta * alpha ** (1 / alpha):
+        x0 = zeta
+
+    return x0, alpha, beta
+
+
+def _pdf_single_value_piecewise_Z1(x, alpha, beta, **kwds):
+    # convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M)
+    # parameterization
+
+    zeta = -beta * np.tan(np.pi * alpha / 2.0)
+    x0 = x + zeta if alpha != 1 else x
+
+    return _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds)
+
+
+def _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds):
+
+    quad_eps = kwds.get("quad_eps", _QUAD_EPS)
+    x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005)
+    alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005)
+
+    zeta = -beta * np.tan(np.pi * alpha / 2.0)
+    x0, alpha, beta = _nolan_round_difficult_input(
+        x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
+    )
+
+    # some other known distribution pdfs / analytical cases
+    # TODO: add more where possible with test coverage,
+    # eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases
+    if alpha == 2.0:
+        # normal
+        return _norm_pdf(x0 / np.sqrt(2)) / np.sqrt(2)
+    elif alpha == 0.5 and beta == 1.0:
+        # levy
+        # since S(1/2, 1, gamma, delta; ) ==
+        # S(1/2, 1, gamma, gamma + delta; ).
+        _x = x0 + 1
+        if _x <= 0:
+            return 0
+
+        return 1 / np.sqrt(2 * np.pi * _x) / _x * np.exp(-1 / (2 * _x))
+    elif alpha == 0.5 and beta == 0.0 and x0 != 0:
+        # analytical solution [HO]
+        S, C = sc.fresnel([1 / np.sqrt(2 * np.pi * np.abs(x0))])
+        arg = 1 / (4 * np.abs(x0))
+        return (
+            np.sin(arg) * (0.5 - S[0]) + np.cos(arg) * (0.5 - C[0])
+        ) / np.sqrt(2 * np.pi * np.abs(x0) ** 3)
+    elif alpha == 1.0 and beta == 0.0:
+        # cauchy
+        return 1 / (1 + x0 ** 2) / np.pi
+
+    return _pdf_single_value_piecewise_post_rounding_Z0(
+        x0, alpha, beta, quad_eps
+    )
+
+
+def _pdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps):
+    """Calculate pdf using Nolan's methods as detailed in [NO].
+    """
+
+    _nolan = Nolan(alpha, beta, x0)
+    zeta = _nolan.zeta
+    xi = _nolan.xi
+    c2 = _nolan.c2
+    g = _nolan.g
+
+    # handle Nolan's initial case logic
+    if x0 == zeta:
+        return (
+            sc.gamma(1 + 1 / alpha)
+            * np.cos(xi)
+            / np.pi
+            / ((1 + zeta ** 2) ** (1 / alpha / 2))
+        )
+    elif x0 < zeta:
+        return _pdf_single_value_piecewise_post_rounding_Z0(
+            -x0, alpha, -beta, quad_eps
+        )
+
+    # following Nolan, we may now assume
+    #   x0 > zeta when alpha != 1
+    #   beta != 0 when alpha == 1
+
+    # spare calculating integral on null set
+    # use isclose as macos has fp differences
+    if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014):
+        return 0.0
+
+    def integrand(theta):
+        # limit any numerical issues leading to g_1 < 0 near theta limits
+        g_1 = g(theta)
+        if not np.isfinite(g_1) or g_1 < 0:
+            g_1 = 0
+        return g_1 * np.exp(-g_1)
+
+    with np.errstate(all="ignore"):
+        peak = optimize.bisect(
+            lambda t: g(t) - 1, -xi, np.pi / 2, xtol=quad_eps
+        )
+
+        # this integrand can be very peaked, so we need to force
+        # QUADPACK to evaluate the function inside its support
+        #
+
+        # lastly, we add additional samples at
+        #   ~exp(-100), ~exp(-10), ~exp(-5), ~exp(-1)
+        # to improve QUADPACK's detection of rapidly descending tail behavior
+        # (this choice is fairly ad hoc)
+        tail_points = [
+            optimize.bisect(lambda t: g(t) - exp_height, -xi, np.pi / 2)
+            for exp_height in [100, 10, 5]
+            # exp_height = 1 is handled by peak
+        ]
+        intg_points = [0, peak] + tail_points
+        intg, *ret = integrate.quad(
+            integrand,
+            -xi,
+            np.pi / 2,
+            points=intg_points,
+            limit=100,
+            epsrel=quad_eps,
+            epsabs=0,
+            full_output=1,
+        )
+
+    return c2 * intg
+
+
+def _cdf_single_value_piecewise_Z1(x, alpha, beta, **kwds):
+    # convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M)
+    # parameterization
+
+    zeta = -beta * np.tan(np.pi * alpha / 2.0)
+    x0 = x + zeta if alpha != 1 else x
+
+    return _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds)
+
+
+def _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds):
+
+    quad_eps = kwds.get("quad_eps", _QUAD_EPS)
+    x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005)
+    alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005)
+
+    zeta = -beta * np.tan(np.pi * alpha / 2.0)
+    x0, alpha, beta = _nolan_round_difficult_input(
+        x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
+    )
+
+    # some other known distribution cdfs / analytical cases
+    # TODO: add more where possible with test coverage,
+    # eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases
+    if alpha == 2.0:
+        # normal
+        return _norm_cdf(x0 / np.sqrt(2))
+    elif alpha == 0.5 and beta == 1.0:
+        # levy
+        # since S(1/2, 1, gamma, delta; ) ==
+        # S(1/2, 1, gamma, gamma + delta; ).
+        _x = x0 + 1
+        if _x <= 0:
+            return 0
+
+        return sc.erfc(np.sqrt(0.5 / _x))
+    elif alpha == 1.0 and beta == 0.0:
+        # cauchy
+        return 0.5 + np.arctan(x0) / np.pi
+
+    return _cdf_single_value_piecewise_post_rounding_Z0(
+        x0, alpha, beta, quad_eps
+    )
+
+
+def _cdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps):
+    """Calculate cdf using Nolan's methods as detailed in [NO].
+    """
+    _nolan = Nolan(alpha, beta, x0)
+    zeta = _nolan.zeta
+    xi = _nolan.xi
+    c1 = _nolan.c1
+    # c2 = _nolan.c2
+    c3 = _nolan.c3
+    g = _nolan.g
+
+    # handle Nolan's initial case logic
+    if (alpha == 1 and beta < 0) or x0 < zeta:
+        # NOTE: Nolan's paper has a typo here!
+        # He states F(x) = 1 - F(x, alpha, -beta), but this is clearly
+        # incorrect since F(-infty) would be 1.0 in this case
+        # Indeed, the alpha != 1, x0 < zeta case is correct here.
+        return 1 - _cdf_single_value_piecewise_post_rounding_Z0(
+            -x0, alpha, -beta, quad_eps
+        )
+    elif x0 == zeta:
+        return 0.5 - xi / np.pi
+
+    # following Nolan, we may now assume
+    #   x0 > zeta when alpha != 1
+    #   beta > 0 when alpha == 1
+
+    # spare calculating integral on null set
+    # use isclose as macos has fp differences
+    if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014):
+        return c1
+
+    def integrand(theta):
+        g_1 = g(theta)
+        return np.exp(-g_1)
+
+    with np.errstate(all="ignore"):
+        # shrink supports where required
+        left_support = -xi
+        right_support = np.pi / 2
+        if alpha > 1:
+            # integrand(t) monotonic 0 to 1
+            if integrand(-xi) != 0.0:
+                res = optimize.minimize(
+                    integrand,
+                    (-xi,),
+                    method="L-BFGS-B",
+                    bounds=[(-xi, np.pi / 2)],
+                )
+                left_support = res.x[0]
+        else:
+            # integrand(t) monotonic 1 to 0
+            if integrand(np.pi / 2) != 0.0:
+                res = optimize.minimize(
+                    integrand,
+                    (np.pi / 2,),
+                    method="L-BFGS-B",
+                    bounds=[(-xi, np.pi / 2)],
+                )
+                right_support = res.x[0]
+
+        intg, *ret = integrate.quad(
+            integrand,
+            left_support,
+            right_support,
+            points=[left_support, right_support],
+            limit=100,
+            epsrel=quad_eps,
+            epsabs=0,
+            full_output=1,
+        )
+
+    return c1 + c3 * intg
+
+
+def _rvs_Z1(alpha, beta, size=None, random_state=None):
+    """Simulate random variables using Nolan's methods as detailed in [NO].
+    """
+
+    def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
+        return (
+            2
+            / np.pi
+            * (
+                (np.pi / 2 + bTH) * tanTH
+                - beta * np.log((np.pi / 2 * W * cosTH) / (np.pi / 2 + bTH))
+            )
+        )
+
+    def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
+        return (
+            W
+            / (cosTH / np.tan(aTH) + np.sin(TH))
+            * ((np.cos(aTH) + np.sin(aTH) * tanTH) / W) ** (1.0 / alpha)
+        )
+
+    def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
+        # alpha is not 1 and beta is not 0
+        val0 = beta * np.tan(np.pi * alpha / 2)
+        th0 = np.arctan(val0) / alpha
+        val3 = W / (cosTH / np.tan(alpha * (th0 + TH)) + np.sin(TH))
+        res3 = val3 * (
+            (
+                np.cos(aTH)
+                + np.sin(aTH) * tanTH
+                - val0 * (np.sin(aTH) - np.cos(aTH) * tanTH)
+            )
+            / W
+        ) ** (1.0 / alpha)
+        return res3
+
+    def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
+        res = _lazywhere(
+            beta == 0,
+            (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
+            beta0func,
+            f2=otherwise,
+        )
+        return res
+
+    alpha = np.broadcast_to(alpha, size)
+    beta = np.broadcast_to(beta, size)
+    TH = uniform.rvs(
+        loc=-np.pi / 2.0, scale=np.pi, size=size, random_state=random_state
+    )
+    W = expon.rvs(size=size, random_state=random_state)
+    aTH = alpha * TH
+    bTH = beta * TH
+    cosTH = np.cos(TH)
+    tanTH = np.tan(TH)
+    res = _lazywhere(
+        alpha == 1,
+        (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
+        alpha1func,
+        f2=alphanot1func,
+    )
+    return res
+
+
+def _fitstart_S0(data):
+    alpha, beta, delta1, gamma = _fitstart_S1(data)
+
+    # Formulas for mapping parameters in S1 parameterization to
+    # those in S0 parameterization can be found in [NO]. Note that
+    # only delta changes.
+    if alpha != 1:
+        delta0 = delta1 + beta * gamma * np.tan(np.pi * alpha / 2.0)
+    else:
+        delta0 = delta1 + 2 * beta * gamma * np.log(gamma) / np.pi
+
+    return alpha, beta, delta0, gamma
+
+
+def _fitstart_S1(data):
+    # We follow McCullock 1986 method - Simple Consistent Estimators
+    # of Stable Distribution Parameters
+
+    # fmt: off
+    # Table III and IV
+    nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4,
+                      5, 6, 8, 10, 15, 25]
+    nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1]
+
+    # table III - alpha = psi_1(nu_alpha, nu_beta)
+    alpha_table = np.array([
+        [2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
+        [1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924],
+        [1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829],
+        [1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745],
+        [1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676],
+        [1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547],
+        [1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438],
+        [1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318],
+        [1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150],
+        [1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973],
+        [1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874],
+        [0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769],
+        [0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691],
+        [0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597],
+        [0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]).T
+    # transpose because interpolation with `RectBivariateSpline` is with
+    # `nu_beta` as `x` and `nu_alpha` as `y`
+
+    # table IV - beta = psi_2(nu_alpha, nu_beta)
+    beta_table = np.array([
+        [0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000],
+        [0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000],
+        [0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000],
+        [0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000],
+        [0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000],
+        [0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000],
+        [0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000],
+        [0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000],
+        [0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195],
+        [0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917],
+        [0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759],
+        [0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596],
+        [0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482],
+        [0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362],
+        [0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]).T
+
+    # Table V and VII
+    # These are ordered with decreasing `alpha_range`; so we will need to
+    # reverse them as required by RectBivariateSpline.
+    alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1,
+                   1, 0.9, 0.8, 0.7, 0.6, 0.5][::-1]
+    beta_range = [0, 0.25, 0.5, 0.75, 1]
+
+    # Table V - nu_c = psi_3(alpha, beta)
+    nu_c_table = np.array([
+        [1.908, 1.908, 1.908, 1.908, 1.908],
+        [1.914, 1.915, 1.916, 1.918, 1.921],
+        [1.921, 1.922, 1.927, 1.936, 1.947],
+        [1.927, 1.930, 1.943, 1.961, 1.987],
+        [1.933, 1.940, 1.962, 1.997, 2.043],
+        [1.939, 1.952, 1.988, 2.045, 2.116],
+        [1.946, 1.967, 2.022, 2.106, 2.211],
+        [1.955, 1.984, 2.067, 2.188, 2.333],
+        [1.965, 2.007, 2.125, 2.294, 2.491],
+        [1.980, 2.040, 2.205, 2.435, 2.696],
+        [2.000, 2.085, 2.311, 2.624, 2.973],
+        [2.040, 2.149, 2.461, 2.886, 3.356],
+        [2.098, 2.244, 2.676, 3.265, 3.912],
+        [2.189, 2.392, 3.004, 3.844, 4.775],
+        [2.337, 2.634, 3.542, 4.808, 6.247],
+        [2.588, 3.073, 4.534, 6.636, 9.144]])[::-1].T
+    # transpose because interpolation with `RectBivariateSpline` is with
+    # `beta` as `x` and `alpha` as `y`
+
+    # Table VII - nu_zeta = psi_5(alpha, beta)
+    nu_zeta_table = np.array([
+        [0, 0.000, 0.000, 0.000, 0.000],
+        [0, -0.017, -0.032, -0.049, -0.064],
+        [0, -0.030, -0.061, -0.092, -0.123],
+        [0, -0.043, -0.088, -0.132, -0.179],
+        [0, -0.056, -0.111, -0.170, -0.232],
+        [0, -0.066, -0.134, -0.206, -0.283],
+        [0, -0.075, -0.154, -0.241, -0.335],
+        [0, -0.084, -0.173, -0.276, -0.390],
+        [0, -0.090, -0.192, -0.310, -0.447],
+        [0, -0.095, -0.208, -0.346, -0.508],
+        [0, -0.098, -0.223, -0.380, -0.576],
+        [0, -0.099, -0.237, -0.424, -0.652],
+        [0, -0.096, -0.250, -0.469, -0.742],
+        [0, -0.089, -0.262, -0.520, -0.853],
+        [0, -0.078, -0.272, -0.581, -0.997],
+        [0, -0.061, -0.279, -0.659, -1.198]])[::-1].T
+    # fmt: on
+
+    psi_1 = RectBivariateSpline(nu_beta_range, nu_alpha_range,
+                                alpha_table, kx=1, ky=1, s=0)
+
+    def psi_1_1(nu_beta, nu_alpha):
+        return psi_1(nu_beta, nu_alpha) \
+            if nu_beta > 0 else psi_1(-nu_beta, nu_alpha)
+
+    psi_2 = RectBivariateSpline(nu_beta_range, nu_alpha_range,
+                                beta_table, kx=1, ky=1, s=0)
+
+    def psi_2_1(nu_beta, nu_alpha):
+        return psi_2(nu_beta, nu_alpha) \
+            if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha)
+
+    phi_3 = RectBivariateSpline(beta_range, alpha_range, nu_c_table,
+                                kx=1, ky=1, s=0)
+
+    def phi_3_1(beta, alpha):
+        return phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha)
+
+    phi_5 = RectBivariateSpline(beta_range, alpha_range, nu_zeta_table,
+                                kx=1, ky=1, s=0)
+
+    def phi_5_1(beta, alpha):
+        return phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha)
+
+    # quantiles
+    p05 = np.percentile(data, 5)
+    p50 = np.percentile(data, 50)
+    p95 = np.percentile(data, 95)
+    p25 = np.percentile(data, 25)
+    p75 = np.percentile(data, 75)
+
+    nu_alpha = (p95 - p05) / (p75 - p25)
+    nu_beta = (p95 + p05 - 2 * p50) / (p95 - p05)
+
+    if nu_alpha >= 2.439:
+        eps = np.finfo(float).eps
+        alpha = np.clip(psi_1_1(nu_beta, nu_alpha)[0, 0], eps, 2.)
+        beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0, 0], -1.0, 1.0)
+    else:
+        alpha = 2.0
+        beta = np.sign(nu_beta)
+    c = (p75 - p25) / phi_3_1(beta, alpha)[0, 0]
+    zeta = p50 + c * phi_5_1(beta, alpha)[0, 0]
+    delta = zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha != 1. else zeta
+
+    return (alpha, beta, delta, c)
+
+
+class levy_stable_gen(rv_continuous):
+    r"""A Levy-stable continuous random variable.
+
+    %(before_notes)s
+
+    See Also
+    --------
+    levy, levy_l, cauchy, norm
+
+    Notes
+    -----
+    The distribution for `levy_stable` has characteristic function:
+
+    .. math::
+
+        \varphi(t, \alpha, \beta, c, \mu) =
+        e^{it\mu -|ct|^{\alpha}(1-i\beta\operatorname{sign}(t)\Phi(\alpha, t))}
+
+    where two different parameterizations are supported. The first :math:`S_1`:
+
+    .. math::
+
+        \Phi = \begin{cases}
+                \tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\
+                -{\frac {2}{\pi }}\log |t|&\alpha =1
+                \end{cases}
+
+    The second :math:`S_0`:
+
+    .. math::
+
+        \Phi = \begin{cases}
+                -\tan \left({\frac {\pi \alpha }{2}}\right)(|ct|^{1-\alpha}-1)
+                &\alpha \neq 1\\
+                -{\frac {2}{\pi }}\log |ct|&\alpha =1
+                \end{cases}
+
+
+    The probability density function for `levy_stable` is:
+
+    .. math::
+
+        f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt
+
+    where :math:`-\infty < t < \infty`. This integral does not have a known
+    closed form.
+
+    `levy_stable` generalizes several distributions.  Where possible, they
+    should be used instead.  Specifically, when the shape parameters
+    assume the values in the table below, the corresponding equivalent
+    distribution should be used.
+
+    =========  ========  ===========
+    ``alpha``  ``beta``   Equivalent
+    =========  ========  ===========
+     1/2       -1        `levy_l`
+     1/2       1         `levy`
+     1         0         `cauchy`
+     2         any       `norm` (with ``scale=sqrt(2)``)
+    =========  ========  ===========
+
+    Evaluation of the pdf uses Nolan's piecewise integration approach with the
+    Zolotarev :math:`M` parameterization by default. There is also the option
+    to use direct numerical integration of the standard parameterization of the
+    characteristic function or to evaluate by taking the FFT of the
+    characteristic function.
+
+    The default method can changed by setting the class variable
+    ``levy_stable.pdf_default_method`` to one of 'piecewise' for Nolan's
+    approach, 'dni' for direct numerical integration, or 'fft-simpson' for the
+    FFT based approach. For the sake of backwards compatibility, the methods
+    'best' and 'zolotarev' are equivalent to 'piecewise' and the method
+    'quadrature' is equivalent to 'dni'.
+
+    The parameterization can be changed  by setting the class variable
+    ``levy_stable.parameterization`` to either 'S0' or 'S1'.
+    The default is 'S1'.
+
+    To improve performance of piecewise and direct numerical integration one
+    can specify ``levy_stable.quad_eps`` (defaults to 1.2e-14). This is used
+    as both the absolute and relative quadrature tolerance for direct numerical
+    integration and as the relative quadrature tolerance for the piecewise
+    method. One can also specify ``levy_stable.piecewise_x_tol_near_zeta``
+    (defaults to 0.005) for how close x is to zeta before it is considered the
+    same as x [NO]. The exact check is
+    ``abs(x0 - zeta) < piecewise_x_tol_near_zeta*alpha**(1/alpha)``. One can
+    also specify ``levy_stable.piecewise_alpha_tol_near_one`` (defaults to
+    0.005) for how close alpha is to 1 before being considered equal to 1.
+
+    To increase accuracy of FFT calculation one can specify
+    ``levy_stable.pdf_fft_grid_spacing`` (defaults to 0.001) and
+    ``pdf_fft_n_points_two_power`` (defaults to None which means a value is
+    calculated that sufficiently covers the input range).
+
+    Further control over FFT calculation is available by setting
+    ``pdf_fft_interpolation_degree`` (defaults to 3) for spline order and
+    ``pdf_fft_interpolation_level`` for determining the number of points to use
+    in the Newton-Cotes formula when approximating the characteristic function
+    (considered experimental).
+
+    Evaluation of the cdf uses Nolan's piecewise integration approach with the
+    Zolatarev :math:`S_0` parameterization by default. There is also the option
+    to evaluate through integration of an interpolated spline of the pdf
+    calculated by means of the FFT method. The settings affecting FFT
+    calculation are the same as for pdf calculation. The default cdf method can
+    be changed by setting ``levy_stable.cdf_default_method`` to either
+    'piecewise' or 'fft-simpson'.  For cdf calculations the Zolatarev method is
+    superior in accuracy, so FFT is disabled by default.
+
+    Fitting estimate uses quantile estimation method in [MC]. MLE estimation of
+    parameters in fit method uses this quantile estimate initially. Note that
+    MLE doesn't always converge if using FFT for pdf calculations; this will be
+    the case if alpha <= 1 where the FFT approach doesn't give good
+    approximations.
+
+    Any non-missing value for the attribute
+    ``levy_stable.pdf_fft_min_points_threshold`` will set
+    ``levy_stable.pdf_default_method`` to 'fft-simpson' if a valid
+    default method is not otherwise set.
+
+
+
+    .. warning::
+
+        For pdf calculations FFT calculation is considered experimental.
+
+        For cdf calculations FFT calculation is considered experimental. Use
+        Zolatarev's method instead (default).
+
+    %(after_notes)s
+
+    References
+    ----------
+    .. [MC] McCulloch, J., 1986. Simple consistent estimators of stable
+        distribution parameters. Communications in Statistics - Simulation and
+        Computation 15, 11091136.
+    .. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method
+        to compute densities of stable distribution.
+    .. [NO] Nolan, J., 1997. Numerical Calculation of Stable Densities and
+        distributions Functions.
+    .. [HO] Hopcraft, K. I., Jakeman, E., Tanner, R. M. J., 1999. Lévy random
+        walks with fluctuating step number and multiscale behavior.
+
+    %(example)s
+
+    """
+    # Configurable options as class variables
+    # (accesible from self by attribute lookup).
+    parameterization = "S1"
+    pdf_default_method = "piecewise"
+    cdf_default_method = "piecewise"
+    quad_eps = _QUAD_EPS
+    piecewise_x_tol_near_zeta = 0.005
+    piecewise_alpha_tol_near_one = 0.005
+    pdf_fft_min_points_threshold = None
+    pdf_fft_grid_spacing = 0.001
+    pdf_fft_n_points_two_power = None
+    pdf_fft_interpolation_level = 3
+    pdf_fft_interpolation_degree = 3
+
+    def _argcheck(self, alpha, beta):
+        return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
+
+    def _shape_info(self):
+        ialpha = _ShapeInfo("alpha", False, (0, 2), (False, True))
+        ibeta = _ShapeInfo("beta", False, (-1, 1), (True, True))
+        return [ialpha, ibeta]
+
+    def _parameterization(self):
+        allowed = ("S0", "S1")
+        pz = self.parameterization
+        if pz not in allowed:
+            raise RuntimeError(
+                f"Parameterization '{pz}' in supported list: {allowed}"
+            )
+        return pz
+
+    @inherit_docstring_from(rv_continuous)
+    def rvs(self, *args, **kwds):
+        X1 = super().rvs(*args, **kwds)
+
+        discrete = kwds.pop("discrete", None)  # noqa
+        rndm = kwds.pop("random_state", None)  # noqa
+        (alpha, beta), delta, gamma, size = self._parse_args_rvs(*args, **kwds)
+
+        # shift location for this parameterisation (S1)
+        X1 = np.where(
+            alpha == 1.0, X1 + 2 * beta * gamma * np.log(gamma) / np.pi, X1
+        )
+
+        if self._parameterization() == "S0":
+            return np.where(
+                alpha == 1.0,
+                X1 - (beta * 2 * gamma * np.log(gamma) / np.pi),
+                X1 - gamma * beta * np.tan(np.pi * alpha / 2.0),
+            )
+        elif self._parameterization() == "S1":
+            return X1
+
+    def _rvs(self, alpha, beta, size=None, random_state=None):
+        return _rvs_Z1(alpha, beta, size, random_state)
+
+    @inherit_docstring_from(rv_continuous)
+    def pdf(self, x, *args, **kwds):
+        # override base class version to correct
+        # location for S1 parameterization
+        if self._parameterization() == "S0":
+            return super().pdf(x, *args, **kwds)
+        elif self._parameterization() == "S1":
+            (alpha, beta), delta, gamma = self._parse_args(*args, **kwds)
+            if np.all(np.reshape(alpha, (1, -1))[0, :] != 1):
+                return super().pdf(x, *args, **kwds)
+            else:
+                # correct location for this parameterisation
+                x = np.reshape(x, (1, -1))[0, :]
+                x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
+
+                data_in = np.dstack((x, alpha, beta))[0]
+                data_out = np.empty(shape=(len(data_in), 1))
+                # group data in unique arrays of alpha, beta pairs
+                uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
+                for pair in uniq_param_pairs:
+                    _alpha, _beta = pair
+                    _delta = (
+                        delta + 2 * _beta * gamma * np.log(gamma) / np.pi
+                        if _alpha == 1.0
+                        else delta
+                    )
+                    data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
+                    _x = data_in[data_mask, 0]
+                    data_out[data_mask] = (
+                        super()
+                        .pdf(_x, _alpha, _beta, loc=_delta, scale=gamma)
+                        .reshape(len(_x), 1)
+                    )
+                output = data_out.T[0]
+                if output.shape == (1,):
+                    return output[0]
+                return output
+
+    def _pdf(self, x, alpha, beta):
+        if self._parameterization() == "S0":
+            _pdf_single_value_piecewise = _pdf_single_value_piecewise_Z0
+            _pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z0
+            _cf = _cf_Z0
+        elif self._parameterization() == "S1":
+            _pdf_single_value_piecewise = _pdf_single_value_piecewise_Z1
+            _pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z1
+            _cf = _cf_Z1
+
+        x = np.asarray(x).reshape(1, -1)[0, :]
+
+        x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
+
+        data_in = np.dstack((x, alpha, beta))[0]
+        data_out = np.empty(shape=(len(data_in), 1))
+
+        pdf_default_method_name = levy_stable_gen.pdf_default_method
+        if pdf_default_method_name in ("piecewise", "best", "zolotarev"):
+            pdf_single_value_method = _pdf_single_value_piecewise
+        elif pdf_default_method_name in ("dni", "quadrature"):
+            pdf_single_value_method = _pdf_single_value_cf_integrate
+        elif (
+            pdf_default_method_name == "fft-simpson"
+            or self.pdf_fft_min_points_threshold is not None
+        ):
+            pdf_single_value_method = None
+
+        pdf_single_value_kwds = {
+            "quad_eps": self.quad_eps,
+            "piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta,
+            "piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one,
+        }
+
+        fft_grid_spacing = self.pdf_fft_grid_spacing
+        fft_n_points_two_power = self.pdf_fft_n_points_two_power
+        fft_interpolation_level = self.pdf_fft_interpolation_level
+        fft_interpolation_degree = self.pdf_fft_interpolation_degree
+
+        # group data in unique arrays of alpha, beta pairs
+        uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
+        for pair in uniq_param_pairs:
+            data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
+            data_subset = data_in[data_mask]
+            if pdf_single_value_method is not None:
+                data_out[data_mask] = np.array(
+                    [
+                        pdf_single_value_method(
+                            _x, _alpha, _beta, **pdf_single_value_kwds
+                        )
+                        for _x, _alpha, _beta in data_subset
+                    ]
+                ).reshape(len(data_subset), 1)
+            else:
+                warnings.warn(
+                    "Density calculations experimental for FFT method."
+                    + " Use combination of piecewise and dni methods instead.",
+                    RuntimeWarning,
+                )
+                _alpha, _beta = pair
+                _x = data_subset[:, (0,)]
+
+                if _alpha < 1.0:
+                    raise RuntimeError(
+                        "FFT method does not work well for alpha less than 1."
+                    )
+
+                # need enough points to "cover" _x for interpolation
+                if fft_grid_spacing is None and fft_n_points_two_power is None:
+                    raise ValueError(
+                        "One of fft_grid_spacing or fft_n_points_two_power "
+                        + "needs to be set."
+                    )
+                max_abs_x = np.max(np.abs(_x))
+                h = (
+                    2 ** (3 - fft_n_points_two_power) * max_abs_x
+                    if fft_grid_spacing is None
+                    else fft_grid_spacing
+                )
+                q = (
+                    np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2
+                    if fft_n_points_two_power is None
+                    else int(fft_n_points_two_power)
+                )
+
+                # for some parameters, the range of x can be quite
+                # large, let's choose an arbitrary cut off (8GB) to save on
+                # computer memory.
+                MAX_Q = 30
+                if q > MAX_Q:
+                    raise RuntimeError(
+                        "fft_n_points_two_power has a maximum "
+                        + f"value of {MAX_Q}"
+                    )
+
+                density_x, density = pdf_from_cf_with_fft(
+                    lambda t: _cf(t, _alpha, _beta),
+                    h=h,
+                    q=q,
+                    level=fft_interpolation_level,
+                )
+                f = interpolate.InterpolatedUnivariateSpline(
+                    density_x, np.real(density), k=fft_interpolation_degree
+                )  # patch FFT to use cubic
+                data_out[data_mask] = f(_x)
+
+        return data_out.T[0]
+
+    @inherit_docstring_from(rv_continuous)
+    def cdf(self, x, *args, **kwds):
+        # override base class version to correct
+        # location for S1 parameterization
+        # NOTE: this is near identical to pdf() above
+        if self._parameterization() == "S0":
+            return super().cdf(x, *args, **kwds)
+        elif self._parameterization() == "S1":
+            (alpha, beta), delta, gamma = self._parse_args(*args, **kwds)
+            if np.all(np.reshape(alpha, (1, -1))[0, :] != 1):
+                return super().cdf(x, *args, **kwds)
+            else:
+                # correct location for this parameterisation
+                x = np.reshape(x, (1, -1))[0, :]
+                x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
+
+                data_in = np.dstack((x, alpha, beta))[0]
+                data_out = np.empty(shape=(len(data_in), 1))
+                # group data in unique arrays of alpha, beta pairs
+                uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
+                for pair in uniq_param_pairs:
+                    _alpha, _beta = pair
+                    _delta = (
+                        delta + 2 * _beta * gamma * np.log(gamma) / np.pi
+                        if _alpha == 1.0
+                        else delta
+                    )
+                    data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
+                    _x = data_in[data_mask, 0]
+                    data_out[data_mask] = (
+                        super()
+                        .cdf(_x, _alpha, _beta, loc=_delta, scale=gamma)
+                        .reshape(len(_x), 1)
+                    )
+                output = data_out.T[0]
+                if output.shape == (1,):
+                    return output[0]
+                return output
+
+    def _cdf(self, x, alpha, beta):
+        if self._parameterization() == "S0":
+            _cdf_single_value_piecewise = _cdf_single_value_piecewise_Z0
+            _cf = _cf_Z0
+        elif self._parameterization() == "S1":
+            _cdf_single_value_piecewise = _cdf_single_value_piecewise_Z1
+            _cf = _cf_Z1
+
+        x = np.asarray(x).reshape(1, -1)[0, :]
+
+        x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
+
+        data_in = np.dstack((x, alpha, beta))[0]
+        data_out = np.empty(shape=(len(data_in), 1))
+
+        cdf_default_method_name = self.cdf_default_method
+        if cdf_default_method_name == "piecewise":
+            cdf_single_value_method = _cdf_single_value_piecewise
+        elif cdf_default_method_name == "fft-simpson":
+            cdf_single_value_method = None
+
+        cdf_single_value_kwds = {
+            "quad_eps": self.quad_eps,
+            "piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta,
+            "piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one,
+        }
+
+        fft_grid_spacing = self.pdf_fft_grid_spacing
+        fft_n_points_two_power = self.pdf_fft_n_points_two_power
+        fft_interpolation_level = self.pdf_fft_interpolation_level
+        fft_interpolation_degree = self.pdf_fft_interpolation_degree
+
+        # group data in unique arrays of alpha, beta pairs
+        uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
+        for pair in uniq_param_pairs:
+            data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
+            data_subset = data_in[data_mask]
+            if cdf_single_value_method is not None:
+                data_out[data_mask] = np.array(
+                    [
+                        cdf_single_value_method(
+                            _x, _alpha, _beta, **cdf_single_value_kwds
+                        )
+                        for _x, _alpha, _beta in data_subset
+                    ]
+                ).reshape(len(data_subset), 1)
+            else:
+                warnings.warn(
+                    "Cumulative density calculations experimental for FFT"
+                    + " method. Use piecewise method instead.",
+                    RuntimeWarning,
+                )
+                _alpha, _beta = pair
+                _x = data_subset[:, (0,)]
+
+                # need enough points to "cover" _x for interpolation
+                if fft_grid_spacing is None and fft_n_points_two_power is None:
+                    raise ValueError(
+                        "One of fft_grid_spacing or fft_n_points_two_power "
+                        + "needs to be set."
+                    )
+                max_abs_x = np.max(np.abs(_x))
+                h = (
+                    2 ** (3 - fft_n_points_two_power) * max_abs_x
+                    if fft_grid_spacing is None
+                    else fft_grid_spacing
+                )
+                q = (
+                    np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2
+                    if fft_n_points_two_power is None
+                    else int(fft_n_points_two_power)
+                )
+
+                density_x, density = pdf_from_cf_with_fft(
+                    lambda t: _cf(t, _alpha, _beta),
+                    h=h,
+                    q=q,
+                    level=fft_interpolation_level,
+                )
+                f = interpolate.InterpolatedUnivariateSpline(
+                    density_x, np.real(density), k=fft_interpolation_degree
+                )
+                data_out[data_mask] = np.array(
+                    [f.integral(self.a, x_1) for x_1 in _x]
+                ).reshape(data_out[data_mask].shape)
+
+        return data_out.T[0]
+
+    def _fitstart(self, data):
+        if self._parameterization() == "S0":
+            _fitstart = _fitstart_S0
+        elif self._parameterization() == "S1":
+            _fitstart = _fitstart_S1
+        return _fitstart(data)
+
+    def _stats(self, alpha, beta):
+        mu = 0 if alpha > 1 else np.nan
+        mu2 = 2 if alpha == 2 else np.inf
+        g1 = 0.0 if alpha == 2.0 else np.NaN
+        g2 = 0.0 if alpha == 2.0 else np.NaN
+        return mu, mu2, g1, g2
+
+
+# cotes numbers - see sequence from http://oeis.org/A100642
+Cotes_table = np.array(
+    [[], [1]] + [v[2] for v in _builtincoeffs.values()], dtype=object
+)
+Cotes = np.array(
+    [
+        np.pad(r, (0, len(Cotes_table) - 1 - len(r)), mode='constant')
+        for r in Cotes_table
+    ]
+)
+
+
+def pdf_from_cf_with_fft(cf, h=0.01, q=9, level=3):
+    """Calculates pdf from characteristic function.
+
+    Uses fast Fourier transform with Newton-Cotes integration following [WZ].
+    Defaults to using Simpson's method (3-point Newton-Cotes integration).
+
+    Parameters
+    ----------
+    cf : callable
+        Single argument function from float -> complex expressing a
+        characteristic function for some distribution.
+    h : Optional[float]
+        Step size for Newton-Cotes integration. Default: 0.01
+    q : Optional[int]
+        Use 2**q steps when peforming Newton-Cotes integration.
+        The infinite integral in the inverse Fourier transform will then
+        be restricted to the interval [-2**q * h / 2, 2**q * h / 2]. Setting
+        the number of steps equal to a power of 2 allows the fft to be
+        calculated in O(n*log(n)) time rather than O(n**2).
+        Default: 9
+    level : Optional[int]
+        Calculate integral using n-point Newton-Cotes integration for
+        n = level. The 3-point Newton-Cotes formula corresponds to Simpson's
+        rule. Default: 3
+
+    Returns
+    -------
+    x_l : ndarray
+        Array of points x at which pdf is estimated. 2**q equally spaced
+        points from -pi/h up to but not including pi/h.
+    density : ndarray
+        Estimated values of pdf corresponding to cf at points in x_l.
+
+    References
+    ----------
+    .. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method
+        to compute densities of stable distribution.
+    """
+    n = level
+    N = 2**q
+    steps = np.arange(0, N)
+    L = N * h / 2
+    x_l = np.pi * (steps - N / 2) / L
+    if level > 1:
+        indices = np.arange(n).reshape(n, 1)
+        s1 = np.sum(
+            (-1) ** steps * Cotes[n, indices] * np.fft.fft(
+                (-1)**steps * cf(-L + h * steps + h * indices / (n - 1))
+            ) * np.exp(
+                1j * np.pi * indices / (n - 1)
+                - 2 * 1j * np.pi * indices * steps /
+                (N * (n - 1))
+            ),
+            axis=0
+        )
+    else:
+        s1 = (-1) ** steps * Cotes[n, 0] * np.fft.fft(
+            (-1) ** steps * cf(-L + h * steps)
+        )
+    density = h * s1 / (2 * np.pi * np.sum(Cotes[n]))
+    return (x_l, density)
+
+
+levy_stable = levy_stable_gen(name="levy_stable")
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_mannwhitneyu.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_mannwhitneyu.py
new file mode 100644
index 00000000..3bd56fac
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_mannwhitneyu.py
@@ -0,0 +1,493 @@
+import numpy as np
+from collections import namedtuple
+from scipy import special
+from scipy import stats
+from ._axis_nan_policy import _axis_nan_policy_factory
+
+
+def _broadcast_concatenate(x, y, axis):
+    '''Broadcast then concatenate arrays, leaving concatenation axis last'''
+    x = np.moveaxis(x, axis, -1)
+    y = np.moveaxis(y, axis, -1)
+    z = np.broadcast(x[..., 0], y[..., 0])
+    x = np.broadcast_to(x, z.shape + (x.shape[-1],))
+    y = np.broadcast_to(y, z.shape + (y.shape[-1],))
+    z = np.concatenate((x, y), axis=-1)
+    return x, y, z
+
+
+class _MWU:
+    '''Distribution of MWU statistic under the null hypothesis'''
+    # Possible improvement: if m and n are small enough, use integer arithmetic
+
+    def __init__(self):
+        '''Minimal initializer'''
+        self._fmnks = -np.ones((1, 1, 1))
+        self._recursive = None
+
+    def pmf(self, k, m, n):
+        if (self._recursive is None and m <= 500 and n <= 500
+                or self._recursive):
+            return self.pmf_recursive(k, m, n)
+        else:
+            return self.pmf_iterative(k, m, n)
+
+    def pmf_recursive(self, k, m, n):
+        '''Probability mass function, recursive version'''
+        self._resize_fmnks(m, n, np.max(k))
+        # could loop over just the unique elements, but probably not worth
+        # the time to find them
+        for i in np.ravel(k):
+            self._f(m, n, i)
+        return self._fmnks[m, n, k] / special.binom(m + n, m)
+
+    def pmf_iterative(self, k, m, n):
+        '''Probability mass function, iterative version'''
+        fmnks = {}
+        for i in np.ravel(k):
+            fmnks = _mwu_f_iterative(m, n, i, fmnks)
+        return (np.array([fmnks[(m, n, ki)] for ki in k])
+                / special.binom(m + n, m))
+
+    def cdf(self, k, m, n):
+        '''Cumulative distribution function'''
+        # We could use the fact that the distribution is symmetric to avoid
+        # summing more than m*n/2 terms, but it might not be worth the
+        # overhead. Let's leave that to an improvement.
+        pmfs = self.pmf(np.arange(0, np.max(k) + 1), m, n)
+        cdfs = np.cumsum(pmfs)
+        return cdfs[k]
+
+    def sf(self, k, m, n):
+        '''Survival function'''
+        # Use the fact that the distribution is symmetric; i.e.
+        # _f(m, n, m*n-k) = _f(m, n, k), and sum from the left
+        k = m*n - k
+        # Note that both CDF and SF include the PMF at k. The p-value is
+        # calculated from the SF and should include the mass at k, so this
+        # is desirable
+        return self.cdf(k, m, n)
+
+    def _resize_fmnks(self, m, n, k):
+        '''If necessary, expand the array that remembers PMF values'''
+        # could probably use `np.pad` but I'm not sure it would save code
+        shape_old = np.array(self._fmnks.shape)
+        shape_new = np.array((m+1, n+1, k+1))
+        if np.any(shape_new > shape_old):
+            shape = np.maximum(shape_old, shape_new)
+            fmnks = -np.ones(shape)             # create the new array
+            m0, n0, k0 = shape_old
+            fmnks[:m0, :n0, :k0] = self._fmnks  # copy remembered values
+            self._fmnks = fmnks
+
+    def _f(self, m, n, k):
+        '''Recursive implementation of function of [3] Theorem 2.5'''
+
+        # [3] Theorem 2.5 Line 1
+        if k < 0 or m < 0 or n < 0 or k > m*n:
+            return 0
+
+        # if already calculated, return the value
+        if self._fmnks[m, n, k] >= 0:
+            return self._fmnks[m, n, k]
+
+        if k == 0 and m >= 0 and n >= 0:  # [3] Theorem 2.5 Line 2
+            fmnk = 1
+        else:   # [3] Theorem 2.5 Line 3 / Equation 3
+            fmnk = self._f(m-1, n, k-n) + self._f(m, n-1, k)
+
+        self._fmnks[m, n, k] = fmnk  # remember result
+
+        return fmnk
+
+
+# Maintain state for faster repeat calls to mannwhitneyu w/ method='exact'
+_mwu_state = _MWU()
+
+
+def _mwu_f_iterative(m, n, k, fmnks):
+    '''Iterative implementation of function of [3] Theorem 2.5'''
+
+    def _base_case(m, n, k):
+        '''Base cases from recursive version'''
+
+        # if already calculated, return the value
+        if fmnks.get((m, n, k), -1) >= 0:
+            return fmnks[(m, n, k)]
+
+        # [3] Theorem 2.5 Line 1
+        elif k < 0 or m < 0 or n < 0 or k > m*n:
+            return 0
+
+        # [3] Theorem 2.5 Line 2
+        elif k == 0 and m >= 0 and n >= 0:
+            return 1
+
+        return None
+
+    stack = [(m, n, k)]
+    fmnk = None
+
+    while stack:
+        # Popping only if necessary would save a tiny bit of time, but NWI.
+        m, n, k = stack.pop()
+
+        # If we're at a base case, continue (stack unwinds)
+        fmnk = _base_case(m, n, k)
+        if fmnk is not None:
+            fmnks[(m, n, k)] = fmnk
+            continue
+
+        # If both terms are base cases, continue (stack unwinds)
+        f1 = _base_case(m-1, n, k-n)
+        f2 = _base_case(m, n-1, k)
+        if f1 is not None and f2 is not None:
+            # [3] Theorem 2.5 Line 3 / Equation 3
+            fmnk = f1 + f2
+            fmnks[(m, n, k)] = fmnk
+            continue
+
+        # recurse deeper
+        stack.append((m, n, k))
+        if f1 is None:
+            stack.append((m-1, n, k-n))
+        if f2 is None:
+            stack.append((m, n-1, k))
+
+    return fmnks
+
+
+def _tie_term(ranks):
+    """Tie correction term"""
+    # element i of t is the number of elements sharing rank i
+    _, t = np.unique(ranks, return_counts=True, axis=-1)
+    return (t**3 - t).sum(axis=-1)
+
+
+def _get_mwu_z(U, n1, n2, ranks, axis=0, continuity=True):
+    '''Standardized MWU statistic'''
+    # Follows mannwhitneyu [2]
+    mu = n1 * n2 / 2
+    n = n1 + n2
+
+    # Tie correction according to [2]
+    tie_term = np.apply_along_axis(_tie_term, -1, ranks)
+    s = np.sqrt(n1*n2/12 * ((n + 1) - tie_term/(n*(n-1))))
+
+    # equivalent to using scipy.stats.tiecorrect
+    # T = np.apply_along_axis(stats.tiecorrect, -1, ranks)
+    # s = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
+
+    numerator = U - mu
+
+    # Continuity correction.
+    # Because SF is always used to calculate the p-value, we can always
+    # _subtract_ 0.5 for the continuity correction. This always increases the
+    # p-value to account for the rest of the probability mass _at_ q = U.
+    if continuity:
+        numerator -= 0.5
+
+    # no problem evaluating the norm SF at an infinity
+    with np.errstate(divide='ignore', invalid='ignore'):
+        z = numerator / s
+    return z
+
+
+def _mwu_input_validation(x, y, use_continuity, alternative, axis, method):
+    ''' Input validation and standardization for mannwhitneyu '''
+    # Would use np.asarray_chkfinite, but infs are OK
+    x, y = np.atleast_1d(x), np.atleast_1d(y)
+    if np.isnan(x).any() or np.isnan(y).any():
+        raise ValueError('`x` and `y` must not contain NaNs.')
+    if np.size(x) == 0 or np.size(y) == 0:
+        raise ValueError('`x` and `y` must be of nonzero size.')
+
+    bools = {True, False}
+    if use_continuity not in bools:
+        raise ValueError(f'`use_continuity` must be one of {bools}.')
+
+    alternatives = {"two-sided", "less", "greater"}
+    alternative = alternative.lower()
+    if alternative not in alternatives:
+        raise ValueError(f'`alternative` must be one of {alternatives}.')
+
+    axis_int = int(axis)
+    if axis != axis_int:
+        raise ValueError('`axis` must be an integer.')
+
+    methods = {"asymptotic", "exact", "auto"}
+    method = method.lower()
+    if method not in methods:
+        raise ValueError(f'`method` must be one of {methods}.')
+
+    return x, y, use_continuity, alternative, axis_int, method
+
+
+def _tie_check(xy):
+    """Find any ties in data"""
+    _, t = np.unique(xy, return_counts=True, axis=-1)
+    return np.any(t != 1)
+
+
+def _mwu_choose_method(n1, n2, xy, method):
+    """Choose method 'asymptotic' or 'exact' depending on input size, ties"""
+
+    # if both inputs are large, asymptotic is OK
+    if n1 > 8 and n2 > 8:
+        return "asymptotic"
+
+    # if there are any ties, asymptotic is preferred
+    if np.apply_along_axis(_tie_check, -1, xy).any():
+        return "asymptotic"
+
+    return "exact"
+
+
+MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
+
+
+@_axis_nan_policy_factory(MannwhitneyuResult, n_samples=2)
+def mannwhitneyu(x, y, use_continuity=True, alternative="two-sided",
+                 axis=0, method="auto"):
+    r'''Perform the Mann-Whitney U rank test on two independent samples.
+
+    The Mann-Whitney U test is a nonparametric test of the null hypothesis
+    that the distribution underlying sample `x` is the same as the
+    distribution underlying sample `y`. It is often used as a test of
+    difference in location between distributions.
+
+    Parameters
+    ----------
+    x, y : array-like
+        N-d arrays of samples. The arrays must be broadcastable except along
+        the dimension given by `axis`.
+    use_continuity : bool, optional
+            Whether a continuity correction (1/2) should be applied.
+            Default is True when `method` is ``'asymptotic'``; has no effect
+            otherwise.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        Let *F(u)* and *G(u)* be the cumulative distribution functions of the
+        distributions underlying `x` and `y`, respectively. Then the following
+        alternative hypotheses are available:
+
+        * 'two-sided': the distributions are not equal, i.e. *F(u) ≠ G(u)* for
+          at least one *u*.
+        * 'less': the distribution underlying `x` is stochastically less
+          than the distribution underlying `y`, i.e. *F(u) > G(u)* for all *u*.
+        * 'greater': the distribution underlying `x` is stochastically greater
+          than the distribution underlying `y`, i.e. *F(u) < G(u)* for all *u*.
+
+        Under a more restrictive set of assumptions, the alternative hypotheses
+        can be expressed in terms of the locations of the distributions;
+        see [5] section 5.1.
+    axis : int, optional
+        Axis along which to perform the test. Default is 0.
+    method : {'auto', 'asymptotic', 'exact'}, optional
+        Selects the method used to calculate the *p*-value.
+        Default is 'auto'. The following options are available.
+
+        * ``'asymptotic'``: compares the standardized test statistic
+          against the normal distribution, correcting for ties.
+        * ``'exact'``: computes the exact *p*-value by comparing the observed
+          :math:`U` statistic against the exact distribution of the :math:`U`
+          statistic under the null hypothesis. No correction is made for ties.
+        * ``'auto'``: chooses ``'exact'`` when the size of one of the samples
+          is less than 8 and there are no ties; chooses ``'asymptotic'``
+          otherwise.
+
+    Returns
+    -------
+    res : MannwhitneyuResult
+        An object containing attributes:
+
+        statistic : float
+            The Mann-Whitney U statistic corresponding with sample `x`. See
+            Notes for the test statistic corresponding with sample `y`.
+        pvalue : float
+            The associated *p*-value for the chosen `alternative`.
+
+    Notes
+    -----
+    If ``U1`` is the statistic corresponding with sample `x`, then the
+    statistic corresponding with sample `y` is
+    `U2 = `x.shape[axis] * y.shape[axis] - U1``.
+
+    `mannwhitneyu` is for independent samples. For related / paired samples,
+    consider `scipy.stats.wilcoxon`.
+
+    `method` ``'exact'`` is recommended when there are no ties and when either
+    sample size is less than 8 [1]_. The implementation follows the recurrence
+    relation originally proposed in [1]_ as it is described in [3]_.
+    Note that the exact method is *not* corrected for ties, but
+    `mannwhitneyu` will not raise errors or warnings if there are ties in the
+    data.
+
+    The Mann-Whitney U test is a non-parametric version of the t-test for
+    independent samples. When the means of samples from the populations
+    are normally distributed, consider `scipy.stats.ttest_ind`.
+
+    See Also
+    --------
+    scipy.stats.wilcoxon, scipy.stats.ranksums, scipy.stats.ttest_ind
+
+    References
+    ----------
+    .. [1] H.B. Mann and D.R. Whitney, "On a test of whether one of two random
+           variables is stochastically larger than the other", The Annals of
+           Mathematical Statistics, Vol. 18, pp. 50-60, 1947.
+    .. [2] Mann-Whitney U Test, Wikipedia,
+           http://en.wikipedia.org/wiki/Mann-Whitney_U_test
+    .. [3] A. Di Bucchianico, "Combinatorics, computer algebra, and the
+           Wilcoxon-Mann-Whitney test", Journal of Statistical Planning and
+           Inference, Vol. 79, pp. 349-364, 1999.
+    .. [4] Rosie Shier, "Statistics: 2.3 The Mann-Whitney U Test", Mathematics
+           Learning Support Centre, 2004.
+    .. [5] Michael P. Fay and Michael A. Proschan. "Wilcoxon-Mann-Whitney
+           or t-test? On assumptions for hypothesis tests and multiple \
+           interpretations of decision rules." Statistics surveys, Vol. 4, pp.
+           1-39, 2010. https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2857732/
+
+    Examples
+    --------
+    We follow the example from [4]_: nine randomly sampled young adults were
+    diagnosed with type II diabetes at the ages below.
+
+    >>> males = [19, 22, 16, 29, 24]
+    >>> females = [20, 11, 17, 12]
+
+    We use the Mann-Whitney U test to assess whether there is a statistically
+    significant difference in the diagnosis age of males and females.
+    The null hypothesis is that the distribution of male diagnosis ages is
+    the same as the distribution of female diagnosis ages. We decide
+    that a confidence level of 95% is required to reject the null hypothesis
+    in favor of the alternative that the distributions are different.
+    Since the number of samples is very small and there are no ties in the
+    data, we can compare the observed test statistic against the *exact*
+    distribution of the test statistic under the null hypothesis.
+
+    >>> from scipy.stats import mannwhitneyu
+    >>> U1, p = mannwhitneyu(males, females, method="exact")
+    >>> print(U1)
+    17.0
+
+    `mannwhitneyu` always reports the statistic associated with the first
+    sample, which, in this case, is males. This agrees with :math:`U_M = 17`
+    reported in [4]_. The statistic associated with the second statistic
+    can be calculated:
+
+    >>> nx, ny = len(males), len(females)
+    >>> U2 = nx*ny - U1
+    >>> print(U2)
+    3.0
+
+    This agrees with :math:`U_F = 3` reported in [4]_. The two-sided
+    *p*-value can be calculated from either statistic, and the value produced
+    by `mannwhitneyu` agrees with :math:`p = 0.11` reported in [4]_.
+
+    >>> print(p)
+    0.1111111111111111
+
+    The exact distribution of the test statistic is asymptotically normal, so
+    the example continues by comparing the exact *p*-value against the
+    *p*-value produced using the normal approximation.
+
+    >>> _, pnorm = mannwhitneyu(males, females, method="asymptotic")
+    >>> print(pnorm)
+    0.11134688653314041
+
+    Here `mannwhitneyu`'s reported *p*-value appears to conflict with the
+    value :math:`p = 0.09` given in [4]_. The reason is that [4]_
+    does not apply the continuity correction performed by `mannwhitneyu`;
+    `mannwhitneyu` reduces the distance between the test statistic and the
+    mean :math:`\mu = n_x n_y / 2` by 0.5 to correct for the fact that the
+    discrete statistic is being compared against a continuous distribution.
+    Here, the :math:`U` statistic used is less than the mean, so we reduce
+    the distance by adding 0.5 in the numerator.
+
+    >>> import numpy as np
+    >>> from scipy.stats import norm
+    >>> U = min(U1, U2)
+    >>> N = nx + ny
+    >>> z = (U - nx*ny/2 + 0.5) / np.sqrt(nx*ny * (N + 1)/ 12)
+    >>> p = 2 * norm.cdf(z)  # use CDF to get p-value from smaller statistic
+    >>> print(p)
+    0.11134688653314041
+
+    If desired, we can disable the continuity correction to get a result
+    that agrees with that reported in [4]_.
+
+    >>> _, pnorm = mannwhitneyu(males, females, use_continuity=False,
+    ...                         method="asymptotic")
+    >>> print(pnorm)
+    0.0864107329737
+
+    Regardless of whether we perform an exact or asymptotic test, the
+    probability of the test statistic being as extreme or more extreme by
+    chance exceeds 5%, so we do not consider the results statistically
+    significant.
+
+    Suppose that, before seeing the data, we had hypothesized that females
+    would tend to be diagnosed at a younger age than males.
+    In that case, it would be natural to provide the female ages as the
+    first input, and we would have performed a one-sided test using
+    ``alternative = 'less'``: females are diagnosed at an age that is
+    stochastically less than that of males.
+
+    >>> res = mannwhitneyu(females, males, alternative="less", method="exact")
+    >>> print(res)
+    MannwhitneyuResult(statistic=3.0, pvalue=0.05555555555555555)
+
+    Again, the probability of getting a sufficiently low value of the
+    test statistic by chance under the null hypothesis is greater than 5%,
+    so we do not reject the null hypothesis in favor of our alternative.
+
+    If it is reasonable to assume that the means of samples from the
+    populations are normally distributed, we could have used a t-test to
+    perform the analysis.
+
+    >>> from scipy.stats import ttest_ind
+    >>> res = ttest_ind(females, males, alternative="less")
+    >>> print(res)
+    Ttest_indResult(statistic=-2.239334696520584, pvalue=0.030068441095757924)
+
+    Under this assumption, the *p*-value would be low enough to reject the
+    null hypothesis in favor of the alternative.
+
+    '''
+
+    x, y, use_continuity, alternative, axis_int, method = (
+        _mwu_input_validation(x, y, use_continuity, alternative, axis, method))
+
+    x, y, xy = _broadcast_concatenate(x, y, axis)
+
+    n1, n2 = x.shape[-1], y.shape[-1]
+
+    if method == "auto":
+        method = _mwu_choose_method(n1, n2, xy, method)
+
+    # Follows [2]
+    ranks = stats.rankdata(xy, axis=-1)  # method 2, step 1
+    R1 = ranks[..., :n1].sum(axis=-1)    # method 2, step 2
+    U1 = R1 - n1*(n1+1)/2                # method 2, step 3
+    U2 = n1 * n2 - U1                    # as U1 + U2 = n1 * n2
+
+    if alternative == "greater":
+        U, f = U1, 1  # U is the statistic to use for p-value, f is a factor
+    elif alternative == "less":
+        U, f = U2, 1  # Due to symmetry, use SF of U2 rather than CDF of U1
+    else:
+        U, f = np.maximum(U1, U2), 2  # multiply SF by two for two-sided test
+
+    if method == "exact":
+        p = _mwu_state.sf(U.astype(int), n1, n2)
+    elif method == "asymptotic":
+        z = _get_mwu_z(U, n1, n2, ranks, continuity=use_continuity)
+        p = stats.norm.sf(z)
+    p *= f
+
+    # Ensure that test statistic is not greater than 1
+    # This could happen for exact test when U = m*n/2
+    p = np.clip(p, 0, 1)
+
+    return MannwhitneyuResult(U1, p)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_morestats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_morestats.py
new file mode 100644
index 00000000..0a72b347
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_morestats.py
@@ -0,0 +1,4186 @@
+from __future__ import annotations
+import math
+import warnings
+from collections import namedtuple
+
+import numpy as np
+from numpy import (isscalar, r_, log, around, unique, asarray, zeros,
+                   arange, sort, amin, amax, atleast_1d, sqrt, array,
+                   compress, pi, exp, ravel, count_nonzero, sin, cos,
+                   arctan2, hypot)
+
+from scipy import optimize
+from scipy import special
+from scipy._lib._bunch import _make_tuple_bunch
+from scipy._lib._util import _rename_parameter, _contains_nan
+
+from . import _statlib
+from . import _stats_py
+from ._fit import FitResult
+from ._stats_py import find_repeats, _normtest_finish, SignificanceResult
+from .contingency import chi2_contingency
+from . import distributions
+from ._distn_infrastructure import rv_generic
+from ._hypotests import _get_wilcoxon_distr
+from ._axis_nan_policy import _axis_nan_policy_factory
+from .._lib.deprecation import _deprecated
+
+
+__all__ = ['mvsdist',
+           'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
+           'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
+           'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
+           'fligner', 'mood', 'wilcoxon', 'median_test',
+           'circmean', 'circvar', 'circstd', 'anderson_ksamp',
+           'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
+           'yeojohnson_normplot', 'directional_stats'
+           ]
+
+
+Mean = namedtuple('Mean', ('statistic', 'minmax'))
+Variance = namedtuple('Variance', ('statistic', 'minmax'))
+Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
+
+
+def bayes_mvs(data, alpha=0.90):
+    r"""
+    Bayesian confidence intervals for the mean, var, and std.
+
+    Parameters
+    ----------
+    data : array_like
+        Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
+        Requires 2 or more data points.
+    alpha : float, optional
+        Probability that the returned confidence interval contains
+        the true parameter.
+
+    Returns
+    -------
+    mean_cntr, var_cntr, std_cntr : tuple
+        The three results are for the mean, variance and standard deviation,
+        respectively.  Each result is a tuple of the form::
+
+            (center, (lower, upper))
+
+        with `center` the mean of the conditional pdf of the value given the
+        data, and `(lower, upper)` a confidence interval, centered on the
+        median, containing the estimate to a probability ``alpha``.
+
+    See Also
+    --------
+    mvsdist
+
+    Notes
+    -----
+    Each tuple of mean, variance, and standard deviation estimates represent
+    the (center, (lower, upper)) with center the mean of the conditional pdf
+    of the value given the data and (lower, upper) is a confidence interval
+    centered on the median, containing the estimate to a probability
+    ``alpha``.
+
+    Converts data to 1-D and assumes all data has the same mean and variance.
+    Uses Jeffrey's prior for variance and std.
+
+    Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
+
+    References
+    ----------
+    T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
+    standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
+    2006.
+
+    Examples
+    --------
+    First a basic example to demonstrate the outputs:
+
+    >>> from scipy import stats
+    >>> data = [6, 9, 12, 7, 8, 8, 13]
+    >>> mean, var, std = stats.bayes_mvs(data)
+    >>> mean
+    Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467))
+    >>> var
+    Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
+    >>> std
+    Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631))
+
+    Now we generate some normally distributed random data, and get estimates of
+    mean and standard deviation with 95% confidence intervals for those
+    estimates:
+
+    >>> n_samples = 100000
+    >>> data = stats.norm.rvs(size=n_samples)
+    >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.hist(data, bins=100, density=True, label='Histogram of data')
+    >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
+    >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
+    ...            alpha=0.2, label=r'Estimated mean (95% limits)')
+    >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
+    >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
+    ...            label=r'Estimated scale (95% limits)')
+
+    >>> ax.legend(fontsize=10)
+    >>> ax.set_xlim([-4, 4])
+    >>> ax.set_ylim([0, 0.5])
+    >>> plt.show()
+
+    """
+    m, v, s = mvsdist(data)
+    if alpha >= 1 or alpha <= 0:
+        raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
+                         % alpha)
+
+    m_res = Mean(m.mean(), m.interval(alpha))
+    v_res = Variance(v.mean(), v.interval(alpha))
+    s_res = Std_dev(s.mean(), s.interval(alpha))
+
+    return m_res, v_res, s_res
+
+
+def mvsdist(data):
+    """
+    'Frozen' distributions for mean, variance, and standard deviation of data.
+
+    Parameters
+    ----------
+    data : array_like
+        Input array. Converted to 1-D using ravel.
+        Requires 2 or more data-points.
+
+    Returns
+    -------
+    mdist : "frozen" distribution object
+        Distribution object representing the mean of the data.
+    vdist : "frozen" distribution object
+        Distribution object representing the variance of the data.
+    sdist : "frozen" distribution object
+        Distribution object representing the standard deviation of the data.
+
+    See Also
+    --------
+    bayes_mvs
+
+    Notes
+    -----
+    The return values from ``bayes_mvs(data)`` is equivalent to
+    ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
+
+    In other words, calling ``.mean()`` and ``.interval(0.90)``
+    on the three distribution objects returned from this function will give
+    the same results that are returned from `bayes_mvs`.
+
+    References
+    ----------
+    T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
+    standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
+    2006.
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> data = [6, 9, 12, 7, 8, 8, 13]
+    >>> mean, var, std = stats.mvsdist(data)
+
+    We now have frozen distribution objects "mean", "var" and "std" that we can
+    examine:
+
+    >>> mean.mean()
+    9.0
+    >>> mean.interval(0.95)
+    (6.6120585482655692, 11.387941451734431)
+    >>> mean.std()
+    1.1952286093343936
+
+    """
+    x = ravel(data)
+    n = len(x)
+    if n < 2:
+        raise ValueError("Need at least 2 data-points.")
+    xbar = x.mean()
+    C = x.var()
+    if n > 1000:  # gaussian approximations for large n
+        mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
+        sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
+        vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
+    else:
+        nm1 = n - 1
+        fac = n * C / 2.
+        val = nm1 / 2.
+        mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
+        sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
+        vdist = distributions.invgamma(val, scale=fac)
+    return mdist, vdist, sdist
+
+
+@_axis_nan_policy_factory(
+    lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
+)
+def kstat(data, n=2):
+    r"""
+    Return the nth k-statistic (1<=n<=4 so far).
+
+    The nth k-statistic k_n is the unique symmetric unbiased estimator of the
+    nth cumulant kappa_n.
+
+    Parameters
+    ----------
+    data : array_like
+        Input array. Note that n-D input gets flattened.
+    n : int, {1, 2, 3, 4}, optional
+        Default is equal to 2.
+
+    Returns
+    -------
+    kstat : float
+        The nth k-statistic.
+
+    See Also
+    --------
+    kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
+    moment: Returns the n-th central moment about the mean for a sample.
+
+    Notes
+    -----
+    For a sample size n, the first few k-statistics are given by:
+
+    .. math::
+
+        k_{1} = \mu
+        k_{2} = \frac{n}{n-1} m_{2}
+        k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
+        k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
+
+    where :math:`\mu` is the sample mean, :math:`m_2` is the sample
+    variance, and :math:`m_i` is the i-th sample central moment.
+
+    References
+    ----------
+    http://mathworld.wolfram.com/k-Statistic.html
+
+    http://mathworld.wolfram.com/Cumulant.html
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> from numpy.random import default_rng
+    >>> rng = default_rng()
+
+    As sample size increases, n-th moment and n-th k-statistic converge to the
+    same number (although they aren't identical). In the case of the normal
+    distribution, they converge to zero.
+
+    >>> for n in [2, 3, 4, 5, 6, 7]:
+    ...     x = rng.normal(size=10**n)
+    ...     m, k = stats.moment(x, 3), stats.kstat(x, 3)
+    ...     print("%.3g %.3g %.3g" % (m, k, m-k))
+    -0.631 -0.651 0.0194  # random
+    0.0282 0.0283 -8.49e-05
+    -0.0454 -0.0454 1.36e-05
+    7.53e-05 7.53e-05 -2.26e-09
+    0.00166 0.00166 -4.99e-09
+    -2.88e-06 -2.88e-06 8.63e-13
+    """
+    if n > 4 or n < 1:
+        raise ValueError("k-statistics only supported for 1<=n<=4")
+    n = int(n)
+    S = np.zeros(n + 1, np.float64)
+    data = ravel(data)
+    N = data.size
+
+    # raise ValueError on empty input
+    if N == 0:
+        raise ValueError("Data input must not be empty")
+
+    # on nan input, return nan without warning
+    if np.isnan(np.sum(data)):
+        return np.nan
+
+    for k in range(1, n + 1):
+        S[k] = np.sum(data**k, axis=0)
+    if n == 1:
+        return S[1] * 1.0/N
+    elif n == 2:
+        return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
+    elif n == 3:
+        return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
+    elif n == 4:
+        return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
+                 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
+                (N*(N-1.0)*(N-2.0)*(N-3.0)))
+    else:
+        raise ValueError("Should not be here.")
+
+
+@_axis_nan_policy_factory(
+    lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1, default_axis=None
+)
+def kstatvar(data, n=2):
+    r"""Return an unbiased estimator of the variance of the k-statistic.
+
+    See `kstat` for more details of the k-statistic.
+
+    Parameters
+    ----------
+    data : array_like
+        Input array. Note that n-D input gets flattened.
+    n : int, {1, 2}, optional
+        Default is equal to 2.
+
+    Returns
+    -------
+    kstatvar : float
+        The nth k-statistic variance.
+
+    See Also
+    --------
+    kstat: Returns the n-th k-statistic.
+    moment: Returns the n-th central moment about the mean for a sample.
+
+    Notes
+    -----
+    The variances of the first few k-statistics are given by:
+
+    .. math::
+
+        var(k_{1}) = \frac{\kappa^2}{n}
+        var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
+        var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
+                     \frac{9 \kappa^2_{3}}{n - 1} +
+                     \frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
+        var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
+                     \frac{48 \kappa_{3} \kappa_5}{n - 1} +
+                     \frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
+                     \frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
+                     \frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
+    """
+    data = ravel(data)
+    N = len(data)
+    if n == 1:
+        return kstat(data, n=2) * 1.0/N
+    elif n == 2:
+        k2 = kstat(data, n=2)
+        k4 = kstat(data, n=4)
+        return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
+    else:
+        raise ValueError("Only n=1 or n=2 supported.")
+
+
+def _calc_uniform_order_statistic_medians(n):
+    """Approximations of uniform order statistic medians.
+
+    Parameters
+    ----------
+    n : int
+        Sample size.
+
+    Returns
+    -------
+    v : 1d float array
+        Approximations of the order statistic medians.
+
+    References
+    ----------
+    .. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
+           Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
+
+    Examples
+    --------
+    Order statistics of the uniform distribution on the unit interval
+    are marginally distributed according to beta distributions.
+    The expectations of these order statistic are evenly spaced across
+    the interval, but the distributions are skewed in a way that
+    pushes the medians slightly towards the endpoints of the unit interval:
+
+    >>> import numpy as np
+    >>> n = 4
+    >>> k = np.arange(1, n+1)
+    >>> from scipy.stats import beta
+    >>> a = k
+    >>> b = n-k+1
+    >>> beta.mean(a, b)
+    array([0.2, 0.4, 0.6, 0.8])
+    >>> beta.median(a, b)
+    array([0.15910358, 0.38572757, 0.61427243, 0.84089642])
+
+    The Filliben approximation uses the exact medians of the smallest
+    and greatest order statistics, and the remaining medians are approximated
+    by points spread evenly across a sub-interval of the unit interval:
+
+    >>> from scipy.stats._morestats import _calc_uniform_order_statistic_medians
+    >>> _calc_uniform_order_statistic_medians(n)
+    array([0.15910358, 0.38545246, 0.61454754, 0.84089642])
+
+    This plot shows the skewed distributions of the order statistics
+    of a sample of size four from a uniform distribution on the unit interval:
+
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
+    >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
+    >>> plt.figure()
+    >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
+
+    """
+    v = np.empty(n, dtype=np.float64)
+    v[-1] = 0.5**(1.0 / n)
+    v[0] = 1 - v[-1]
+    i = np.arange(2, n)
+    v[1:-1] = (i - 0.3175) / (n + 0.365)
+    return v
+
+
+def _parse_dist_kw(dist, enforce_subclass=True):
+    """Parse `dist` keyword.
+
+    Parameters
+    ----------
+    dist : str or stats.distributions instance.
+        Several functions take `dist` as a keyword, hence this utility
+        function.
+    enforce_subclass : bool, optional
+        If True (default), `dist` needs to be a
+        `_distn_infrastructure.rv_generic` instance.
+        It can sometimes be useful to set this keyword to False, if a function
+        wants to accept objects that just look somewhat like such an instance
+        (for example, they have a ``ppf`` method).
+
+    """
+    if isinstance(dist, rv_generic):
+        pass
+    elif isinstance(dist, str):
+        try:
+            dist = getattr(distributions, dist)
+        except AttributeError as e:
+            raise ValueError("%s is not a valid distribution name" % dist) from e
+    elif enforce_subclass:
+        msg = ("`dist` should be a stats.distributions instance or a string "
+               "with the name of such a distribution.")
+        raise ValueError(msg)
+
+    return dist
+
+
+def _add_axis_labels_title(plot, xlabel, ylabel, title):
+    """Helper function to add axes labels and a title to stats plots."""
+    try:
+        if hasattr(plot, 'set_title'):
+            # Matplotlib Axes instance or something that looks like it
+            plot.set_title(title)
+            plot.set_xlabel(xlabel)
+            plot.set_ylabel(ylabel)
+        else:
+            # matplotlib.pyplot module
+            plot.title(title)
+            plot.xlabel(xlabel)
+            plot.ylabel(ylabel)
+    except Exception:
+        # Not an MPL object or something that looks (enough) like it.
+        # Don't crash on adding labels or title
+        pass
+
+
+def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
+    """
+    Calculate quantiles for a probability plot, and optionally show the plot.
+
+    Generates a probability plot of sample data against the quantiles of a
+    specified theoretical distribution (the normal distribution by default).
+    `probplot` optionally calculates a best-fit line for the data and plots the
+    results using Matplotlib or a given plot function.
+
+    Parameters
+    ----------
+    x : array_like
+        Sample/response data from which `probplot` creates the plot.
+    sparams : tuple, optional
+        Distribution-specific shape parameters (shape parameters plus location
+        and scale).
+    dist : str or stats.distributions instance, optional
+        Distribution or distribution function name. The default is 'norm' for a
+        normal probability plot.  Objects that look enough like a
+        stats.distributions instance (i.e. they have a ``ppf`` method) are also
+        accepted.
+    fit : bool, optional
+        Fit a least-squares regression (best-fit) line to the sample data if
+        True (default).
+    plot : object, optional
+        If given, plots the quantiles.
+        If given and `fit` is True, also plots the least squares fit.
+        `plot` is an object that has to have methods "plot" and "text".
+        The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
+        or a custom object with the same methods.
+        Default is None, which means that no plot is created.
+    rvalue : bool, optional
+        If `plot` is provided and `fit` is True, setting `rvalue` to True
+        includes the coefficient of determination on the plot.
+        Default is False.
+
+    Returns
+    -------
+    (osm, osr) : tuple of ndarrays
+        Tuple of theoretical quantiles (osm, or order statistic medians) and
+        ordered responses (osr).  `osr` is simply sorted input `x`.
+        For details on how `osm` is calculated see the Notes section.
+    (slope, intercept, r) : tuple of floats, optional
+        Tuple  containing the result of the least-squares fit, if that is
+        performed by `probplot`. `r` is the square root of the coefficient of
+        determination.  If ``fit=False`` and ``plot=None``, this tuple is not
+        returned.
+
+    Notes
+    -----
+    Even if `plot` is given, the figure is not shown or saved by `probplot`;
+    ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
+    calling `probplot`.
+
+    `probplot` generates a probability plot, which should not be confused with
+    a Q-Q or a P-P plot.  Statsmodels has more extensive functionality of this
+    type, see ``statsmodels.api.ProbPlot``.
+
+    The formula used for the theoretical quantiles (horizontal axis of the
+    probability plot) is Filliben's estimate::
+
+        quantiles = dist.ppf(val), for
+
+                0.5**(1/n),                  for i = n
+          val = (i - 0.3175) / (n + 0.365),  for i = 2, ..., n-1
+                1 - 0.5**(1/n),              for i = 1
+
+    where ``i`` indicates the i-th ordered value and ``n`` is the total number
+    of values.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+    >>> nsample = 100
+    >>> rng = np.random.default_rng()
+
+    A t distribution with small degrees of freedom:
+
+    >>> ax1 = plt.subplot(221)
+    >>> x = stats.t.rvs(3, size=nsample, random_state=rng)
+    >>> res = stats.probplot(x, plot=plt)
+
+    A t distribution with larger degrees of freedom:
+
+    >>> ax2 = plt.subplot(222)
+    >>> x = stats.t.rvs(25, size=nsample, random_state=rng)
+    >>> res = stats.probplot(x, plot=plt)
+
+    A mixture of two normal distributions with broadcasting:
+
+    >>> ax3 = plt.subplot(223)
+    >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
+    ...                    size=(nsample//2,2), random_state=rng).ravel()
+    >>> res = stats.probplot(x, plot=plt)
+
+    A standard normal distribution:
+
+    >>> ax4 = plt.subplot(224)
+    >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample, random_state=rng)
+    >>> res = stats.probplot(x, plot=plt)
+
+    Produce a new figure with a loggamma distribution, using the ``dist`` and
+    ``sparams`` keywords:
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> x = stats.loggamma.rvs(c=2.5, size=500, random_state=rng)
+    >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
+    >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
+
+    Show the results with Matplotlib:
+
+    >>> plt.show()
+
+    """
+    x = np.asarray(x)
+    if x.size == 0:
+        if fit:
+            return (x, x), (np.nan, np.nan, 0.0)
+        else:
+            return x, x
+
+    osm_uniform = _calc_uniform_order_statistic_medians(len(x))
+    dist = _parse_dist_kw(dist, enforce_subclass=False)
+    if sparams is None:
+        sparams = ()
+    if isscalar(sparams):
+        sparams = (sparams,)
+    if not isinstance(sparams, tuple):
+        sparams = tuple(sparams)
+
+    osm = dist.ppf(osm_uniform, *sparams)
+    osr = sort(x)
+    if fit:
+        # perform a linear least squares fit.
+        slope, intercept, r, prob, _ = _stats_py.linregress(osm, osr)
+
+    if plot is not None:
+        plot.plot(osm, osr, 'bo')
+        if fit:
+            plot.plot(osm, slope*osm + intercept, 'r-')
+        _add_axis_labels_title(plot, xlabel='Theoretical quantiles',
+                               ylabel='Ordered Values',
+                               title='Probability Plot')
+
+        # Add R^2 value to the plot as text
+        if fit and rvalue:
+            xmin = amin(osm)
+            xmax = amax(osm)
+            ymin = amin(x)
+            ymax = amax(x)
+            posx = xmin + 0.70 * (xmax - xmin)
+            posy = ymin + 0.01 * (ymax - ymin)
+            plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
+
+    if fit:
+        return (osm, osr), (slope, intercept, r)
+    else:
+        return osm, osr
+
+
+def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
+    """Calculate the shape parameter that maximizes the PPCC.
+
+    The probability plot correlation coefficient (PPCC) plot can be used
+    to determine the optimal shape parameter for a one-parameter family
+    of distributions. ``ppcc_max`` returns the shape parameter that would
+    maximize the probability plot correlation coefficient for the given
+    data to a one-parameter family of distributions.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    brack : tuple, optional
+        Triple (a,b,c) where (a>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> c = 2.5
+    >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng)
+
+    Generate the PPCC plot for this data with the Weibull distribution.
+
+    >>> fig, ax = plt.subplots(figsize=(8, 6))
+    >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax)
+
+    We calculate the value where the shape should reach its maximum and a
+    red line is drawn there. The line should coincide with the highest
+    point in the PPCC graph.
+
+    >>> cmax = stats.ppcc_max(x, brack=(c/2, 2*c), dist='weibull_min')
+    >>> ax.axvline(cmax, color='r')
+    >>> plt.show()
+
+    """
+    dist = _parse_dist_kw(dist)
+    osm_uniform = _calc_uniform_order_statistic_medians(len(x))
+    osr = sort(x)
+
+    # this function computes the x-axis values of the probability plot
+    #  and computes a linear regression (including the correlation)
+    #  and returns 1-r so that a minimization function maximizes the
+    #  correlation
+    def tempfunc(shape, mi, yvals, func):
+        xvals = func(mi, shape)
+        r, prob = _stats_py.pearsonr(xvals, yvals)
+        return 1 - r
+
+    return optimize.brent(tempfunc, brack=brack,
+                          args=(osm_uniform, osr, dist.ppf))
+
+
+def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
+    """Calculate and optionally plot probability plot correlation coefficient.
+
+    The probability plot correlation coefficient (PPCC) plot can be used to
+    determine the optimal shape parameter for a one-parameter family of
+    distributions.  It cannot be used for distributions without shape
+    parameters
+    (like the normal distribution) or with multiple shape parameters.
+
+    By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
+    Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
+    distributions via an approximately normal one, and is therefore
+    particularly useful in practice.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    a, b : scalar
+        Lower and upper bounds of the shape parameter to use.
+    dist : str or stats.distributions instance, optional
+        Distribution or distribution function name.  Objects that look enough
+        like a stats.distributions instance (i.e. they have a ``ppf`` method)
+        are also accepted.  The default is ``'tukeylambda'``.
+    plot : object, optional
+        If given, plots PPCC against the shape parameter.
+        `plot` is an object that has to have methods "plot" and "text".
+        The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
+        or a custom object with the same methods.
+        Default is None, which means that no plot is created.
+    N : int, optional
+        Number of points on the horizontal axis (equally distributed from
+        `a` to `b`).
+
+    Returns
+    -------
+    svals : ndarray
+        The shape values for which `ppcc` was calculated.
+    ppcc : ndarray
+        The calculated probability plot correlation coefficient values.
+
+    See Also
+    --------
+    ppcc_max, probplot, boxcox_normplot, tukeylambda
+
+    References
+    ----------
+    J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
+    Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
+
+    Examples
+    --------
+    First we generate some random data from a Weibull distribution
+    with shape parameter 2.5, and plot the histogram of the data:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.default_rng()
+    >>> c = 2.5
+    >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng)
+
+    Take a look at the histogram of the data.
+
+    >>> fig1, ax = plt.subplots(figsize=(9, 4))
+    >>> ax.hist(x, bins=50)
+    >>> ax.set_title('Histogram of x')
+    >>> plt.show()
+
+    Now we explore this data with a PPCC plot as well as the related
+    probability plot and Box-Cox normplot.  A red line is drawn where we
+    expect the PPCC value to be maximal (at the shape parameter ``c``
+    used above):
+
+    >>> fig2 = plt.figure(figsize=(12, 4))
+    >>> ax1 = fig2.add_subplot(1, 3, 1)
+    >>> ax2 = fig2.add_subplot(1, 3, 2)
+    >>> ax3 = fig2.add_subplot(1, 3, 3)
+    >>> res = stats.probplot(x, plot=ax1)
+    >>> res = stats.boxcox_normplot(x, -4, 4, plot=ax2)
+    >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax3)
+    >>> ax3.axvline(c, color='r')
+    >>> plt.show()
+
+    """
+    if b <= a:
+        raise ValueError("`b` has to be larger than `a`.")
+
+    svals = np.linspace(a, b, num=N)
+    ppcc = np.empty_like(svals)
+    for k, sval in enumerate(svals):
+        _, r2 = probplot(x, sval, dist=dist, fit=True)
+        ppcc[k] = r2[-1]
+
+    if plot is not None:
+        plot.plot(svals, ppcc, 'x')
+        _add_axis_labels_title(plot, xlabel='Shape Values',
+                               ylabel='Prob Plot Corr. Coef.',
+                               title='(%s) PPCC Plot' % dist)
+
+    return svals, ppcc
+
+
+def boxcox_llf(lmb, data):
+    r"""The boxcox log-likelihood function.
+
+    Parameters
+    ----------
+    lmb : scalar
+        Parameter for Box-Cox transformation.  See `boxcox` for details.
+    data : array_like
+        Data to calculate Box-Cox log-likelihood for.  If `data` is
+        multi-dimensional, the log-likelihood is calculated along the first
+        axis.
+
+    Returns
+    -------
+    llf : float or ndarray
+        Box-Cox log-likelihood of `data` given `lmb`.  A float for 1-D `data`,
+        an array otherwise.
+
+    See Also
+    --------
+    boxcox, probplot, boxcox_normplot, boxcox_normmax
+
+    Notes
+    -----
+    The Box-Cox log-likelihood function is defined here as
+
+    .. math::
+
+        llf = (\lambda - 1) \sum_i(\log(x_i)) -
+              N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
+
+    where ``y`` is the Box-Cox transformed input data ``x``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+    >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
+
+    Generate some random variates and calculate Box-Cox log-likelihood values
+    for them for a range of ``lmbda`` values:
+
+    >>> rng = np.random.default_rng()
+    >>> x = stats.loggamma.rvs(5, loc=10, size=1000, random_state=rng)
+    >>> lmbdas = np.linspace(-2, 10)
+    >>> llf = np.zeros(lmbdas.shape, dtype=float)
+    >>> for ii, lmbda in enumerate(lmbdas):
+    ...     llf[ii] = stats.boxcox_llf(lmbda, x)
+
+    Also find the optimal lmbda value with `boxcox`:
+
+    >>> x_most_normal, lmbda_optimal = stats.boxcox(x)
+
+    Plot the log-likelihood as function of lmbda.  Add the optimal lmbda as a
+    horizontal line to check that that's really the optimum:
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(lmbdas, llf, 'b.-')
+    >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
+    >>> ax.set_xlabel('lmbda parameter')
+    >>> ax.set_ylabel('Box-Cox log-likelihood')
+
+    Now add some probability plots to show that where the log-likelihood is
+    maximized the data transformed with `boxcox` looks closest to normal:
+
+    >>> locs = [3, 10, 4]  # 'lower left', 'center', 'lower right'
+    >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
+    ...     xt = stats.boxcox(x, lmbda=lmbda)
+    ...     (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
+    ...     ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
+    ...     ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
+    ...     ax_inset.set_xticklabels([])
+    ...     ax_inset.set_yticklabels([])
+    ...     ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
+
+    >>> plt.show()
+
+    """
+    data = np.asarray(data)
+    N = data.shape[0]
+    if N == 0:
+        return np.nan
+
+    logdata = np.log(data)
+
+    # Compute the variance of the transformed data.
+    if lmb == 0:
+        variance = np.var(logdata, axis=0)
+    else:
+        # Transform without the constant offset 1/lmb.  The offset does
+        # not effect the variance, and the subtraction of the offset can
+        # lead to loss of precision.
+        variance = np.var(data**lmb / lmb, axis=0)
+
+    return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * np.log(variance)
+
+
+def _boxcox_conf_interval(x, lmax, alpha):
+    # Need to find the lambda for which
+    #  f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
+    fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
+    target = boxcox_llf(lmax, x) - fac
+
+    def rootfunc(lmbda, data, target):
+        return boxcox_llf(lmbda, data) - target
+
+    # Find positive endpoint of interval in which answer is to be found
+    newlm = lmax + 0.5
+    N = 0
+    while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
+        newlm += 0.1
+        N += 1
+
+    if N == 500:
+        raise RuntimeError("Could not find endpoint.")
+
+    lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
+
+    # Now find negative interval in the same way
+    newlm = lmax - 0.5
+    N = 0
+    while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
+        newlm -= 0.1
+        N += 1
+
+    if N == 500:
+        raise RuntimeError("Could not find endpoint.")
+
+    lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
+    return lmminus, lmplus
+
+
+def boxcox(x, lmbda=None, alpha=None, optimizer=None):
+    r"""Return a dataset transformed by a Box-Cox power transformation.
+
+    Parameters
+    ----------
+    x : ndarray
+        Input array to be transformed.
+
+        If `lmbda` is not None, this is an alias of
+        `scipy.special.boxcox`.
+        Returns nan if ``x < 0``; returns -inf if ``x == 0 and lmbda < 0``.
+
+        If `lmbda` is None, array must be positive, 1-dimensional, and
+        non-constant.
+
+    lmbda : scalar, optional
+        If `lmbda` is None (default), find the value of `lmbda` that maximizes
+        the log-likelihood function and return it as the second output
+        argument.
+
+        If `lmbda` is not None, do the transformation for that value.
+
+    alpha : float, optional
+        If `lmbda` is None and `alpha` is not None (default), return the
+        ``100 * (1-alpha)%`` confidence  interval for `lmbda` as the third
+        output argument. Must be between 0.0 and 1.0.
+
+        If `lmbda` is not None, `alpha` is ignored.
+    optimizer : callable, optional
+        If `lmbda` is None, `optimizer` is the scalar optimizer used to find
+        the value of `lmbda` that minimizes the negative log-likelihood
+        function. `optimizer` is a callable that accepts one argument:
+
+        fun : callable
+            The objective function, which evaluates the negative
+            log-likelihood function at a provided value of `lmbda`
+
+        and returns an object, such as an instance of
+        `scipy.optimize.OptimizeResult`, which holds the optimal value of
+        `lmbda` in an attribute `x`.
+
+        See the example in `boxcox_normmax` or the documentation of
+        `scipy.optimize.minimize_scalar` for more information.
+
+        If `lmbda` is not None, `optimizer` is ignored.
+
+    Returns
+    -------
+    boxcox : ndarray
+        Box-Cox power transformed array.
+    maxlog : float, optional
+        If the `lmbda` parameter is None, the second returned argument is
+        the `lmbda` that maximizes the log-likelihood function.
+    (min_ci, max_ci) : tuple of float, optional
+        If `lmbda` parameter is None and `alpha` is not None, this returned
+        tuple of floats represents the minimum and maximum confidence limits
+        given `alpha`.
+
+    See Also
+    --------
+    probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
+
+    Notes
+    -----
+    The Box-Cox transform is given by::
+
+        y = (x**lmbda - 1) / lmbda,  for lmbda != 0
+            log(x),                  for lmbda = 0
+
+    `boxcox` requires the input data to be positive.  Sometimes a Box-Cox
+    transformation provides a shift parameter to achieve this; `boxcox` does
+    not.  Such a shift parameter is equivalent to adding a positive constant to
+    `x` before calling `boxcox`.
+
+    The confidence limits returned when `alpha` is provided give the interval
+    where:
+
+    .. math::
+
+        llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
+
+    with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
+    function.
+
+    References
+    ----------
+    G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
+    Royal Statistical Society B, 26, 211-252 (1964).
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    We generate some random variates from a non-normal distribution and make a
+    probability plot for it, to show it is non-normal in the tails:
+
+    >>> fig = plt.figure()
+    >>> ax1 = fig.add_subplot(211)
+    >>> x = stats.loggamma.rvs(5, size=500) + 5
+    >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
+    >>> ax1.set_xlabel('')
+    >>> ax1.set_title('Probplot against normal distribution')
+
+    We now use `boxcox` to transform the data so it's closest to normal:
+
+    >>> ax2 = fig.add_subplot(212)
+    >>> xt, _ = stats.boxcox(x)
+    >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
+    >>> ax2.set_title('Probplot after Box-Cox transformation')
+
+    >>> plt.show()
+
+    """
+    x = np.asarray(x)
+
+    if lmbda is not None:  # single transformation
+        return special.boxcox(x, lmbda)
+
+    if x.ndim != 1:
+        raise ValueError("Data must be 1-dimensional.")
+
+    if x.size == 0:
+        return x
+
+    if np.all(x == x[0]):
+        raise ValueError("Data must not be constant.")
+
+    if np.any(x <= 0):
+        raise ValueError("Data must be positive.")
+
+    # If lmbda=None, find the lmbda that maximizes the log-likelihood function.
+    lmax = boxcox_normmax(x, method='mle', optimizer=optimizer)
+    y = boxcox(x, lmax)
+
+    if alpha is None:
+        return y, lmax
+    else:
+        # Find confidence interval
+        interval = _boxcox_conf_interval(x, lmax, alpha)
+        return y, lmax, interval
+
+
+def boxcox_normmax(x, brack=None, method='pearsonr', optimizer=None):
+    """Compute optimal Box-Cox transform parameter for input data.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    brack : 2-tuple, optional, default (-2.0, 2.0)
+         The starting interval for a downhill bracket search for the default
+         `optimize.brent` solver. Note that this is in most cases not
+         critical; the final result is allowed to be outside this bracket.
+         If `optimizer` is passed, `brack` must be None.
+    method : str, optional
+        The method to determine the optimal transform parameter (`boxcox`
+        ``lmbda`` parameter). Options are:
+
+        'pearsonr'  (default)
+            Maximizes the Pearson correlation coefficient between
+            ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
+            normally-distributed.
+
+        'mle'
+            Minimizes the log-likelihood `boxcox_llf`.  This is the method used
+            in `boxcox`.
+
+        'all'
+            Use all optimization methods available, and return all results.
+            Useful to compare different methods.
+    optimizer : callable, optional
+        `optimizer` is a callable that accepts one argument:
+
+        fun : callable
+            The objective function to be optimized. `fun` accepts one argument,
+            the Box-Cox transform parameter `lmbda`, and returns the negative
+            log-likelihood function at the provided value. The job of `optimizer`
+            is to find the value of `lmbda` that minimizes `fun`.
+
+        and returns an object, such as an instance of
+        `scipy.optimize.OptimizeResult`, which holds the optimal value of
+        `lmbda` in an attribute `x`.
+
+        See the example below or the documentation of
+        `scipy.optimize.minimize_scalar` for more information.
+
+    Returns
+    -------
+    maxlog : float or ndarray
+        The optimal transform parameter found.  An array instead of a scalar
+        for ``method='all'``.
+
+    See Also
+    --------
+    boxcox, boxcox_llf, boxcox_normplot, scipy.optimize.minimize_scalar
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    We can generate some data and determine the optimal ``lmbda`` in various
+    ways:
+
+    >>> rng = np.random.default_rng()
+    >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5
+    >>> y, lmax_mle = stats.boxcox(x)
+    >>> lmax_pearsonr = stats.boxcox_normmax(x)
+
+    >>> lmax_mle
+    2.217563431465757
+    >>> lmax_pearsonr
+    2.238318660200961
+    >>> stats.boxcox_normmax(x, method='all')
+    array([2.23831866, 2.21756343])
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
+    >>> ax.axvline(lmax_mle, color='r')
+    >>> ax.axvline(lmax_pearsonr, color='g', ls='--')
+
+    >>> plt.show()
+
+    Alternatively, we can define our own `optimizer` function. Suppose we
+    are only interested in values of `lmbda` on the interval [6, 7], we
+    want to use `scipy.optimize.minimize_scalar` with ``method='bounded'``,
+    and we want to use tighter tolerances when optimizing the log-likelihood
+    function. To do this, we define a function that accepts positional argument
+    `fun` and uses `scipy.optimize.minimize_scalar` to minimize `fun` subject
+    to the provided bounds and tolerances:
+
+    >>> from scipy import optimize
+    >>> options = {'xatol': 1e-12}  # absolute tolerance on `x`
+    >>> def optimizer(fun):
+    ...     return optimize.minimize_scalar(fun, bounds=(6, 7),
+    ...                                     method="bounded", options=options)
+    >>> stats.boxcox_normmax(x, optimizer=optimizer)
+    6.000...
+    """
+    # If optimizer is not given, define default 'brent' optimizer.
+    if optimizer is None:
+
+        # Set default value for `brack`.
+        if brack is None:
+            brack = (-2.0, 2.0)
+
+        def _optimizer(func, args):
+            return optimize.brent(func, args=args, brack=brack)
+
+    # Otherwise check optimizer.
+    else:
+        if not callable(optimizer):
+            raise ValueError("`optimizer` must be a callable")
+
+        if brack is not None:
+            raise ValueError("`brack` must be None if `optimizer` is given")
+
+        # `optimizer` is expected to return a `OptimizeResult` object, we here
+        # get the solution to the optimization problem.
+        def _optimizer(func, args):
+            def func_wrapped(x):
+                return func(x, *args)
+            return getattr(optimizer(func_wrapped), 'x', None)
+
+    def _pearsonr(x):
+        osm_uniform = _calc_uniform_order_statistic_medians(len(x))
+        xvals = distributions.norm.ppf(osm_uniform)
+
+        def _eval_pearsonr(lmbda, xvals, samps):
+            # This function computes the x-axis values of the probability plot
+            # and computes a linear regression (including the correlation) and
+            # returns ``1 - r`` so that a minimization function maximizes the
+            # correlation.
+            y = boxcox(samps, lmbda)
+            yvals = np.sort(y)
+            r, prob = _stats_py.pearsonr(xvals, yvals)
+            return 1 - r
+
+        return _optimizer(_eval_pearsonr, args=(xvals, x))
+
+    def _mle(x):
+        def _eval_mle(lmb, data):
+            # function to minimize
+            return -boxcox_llf(lmb, data)
+
+        return _optimizer(_eval_mle, args=(x,))
+
+    def _all(x):
+        maxlog = np.empty(2, dtype=float)
+        maxlog[0] = _pearsonr(x)
+        maxlog[1] = _mle(x)
+        return maxlog
+
+    methods = {'pearsonr': _pearsonr,
+               'mle': _mle,
+               'all': _all}
+    if method not in methods.keys():
+        raise ValueError("Method %s not recognized." % method)
+
+    optimfunc = methods[method]
+    res = optimfunc(x)
+    if res is None:
+        message = ("`optimizer` must return an object containing the optimal "
+                   "`lmbda` in attribute `x`")
+        raise ValueError(message)
+    return res
+
+
+def _normplot(method, x, la, lb, plot=None, N=80):
+    """Compute parameters for a Box-Cox or Yeo-Johnson normality plot,
+    optionally show it.
+
+    See `boxcox_normplot` or `yeojohnson_normplot` for details.
+    """
+
+    if method == 'boxcox':
+        title = 'Box-Cox Normality Plot'
+        transform_func = boxcox
+    else:
+        title = 'Yeo-Johnson Normality Plot'
+        transform_func = yeojohnson
+
+    x = np.asarray(x)
+    if x.size == 0:
+        return x
+
+    if lb <= la:
+        raise ValueError("`lb` has to be larger than `la`.")
+
+    if method == 'boxcox' and np.any(x <= 0):
+        raise ValueError("Data must be positive.")
+
+    lmbdas = np.linspace(la, lb, num=N)
+    ppcc = lmbdas * 0.0
+    for i, val in enumerate(lmbdas):
+        # Determine for each lmbda the square root of correlation coefficient
+        # of transformed x
+        z = transform_func(x, lmbda=val)
+        _, (_, _, r) = probplot(z, dist='norm', fit=True)
+        ppcc[i] = r
+
+    if plot is not None:
+        plot.plot(lmbdas, ppcc, 'x')
+        _add_axis_labels_title(plot, xlabel='$\\lambda$',
+                               ylabel='Prob Plot Corr. Coef.',
+                               title=title)
+
+    return lmbdas, ppcc
+
+
+def boxcox_normplot(x, la, lb, plot=None, N=80):
+    """Compute parameters for a Box-Cox normality plot, optionally show it.
+
+    A Box-Cox normality plot shows graphically what the best transformation
+    parameter is to use in `boxcox` to obtain a distribution that is close
+    to normal.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    la, lb : scalar
+        The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
+        for Box-Cox transformations.  These are also the limits of the
+        horizontal axis of the plot if that is generated.
+    plot : object, optional
+        If given, plots the quantiles and least squares fit.
+        `plot` is an object that has to have methods "plot" and "text".
+        The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
+        or a custom object with the same methods.
+        Default is None, which means that no plot is created.
+    N : int, optional
+        Number of points on the horizontal axis (equally distributed from
+        `la` to `lb`).
+
+    Returns
+    -------
+    lmbdas : ndarray
+        The ``lmbda`` values for which a Box-Cox transform was done.
+    ppcc : ndarray
+        Probability Plot Correlelation Coefficient, as obtained from `probplot`
+        when fitting the Box-Cox transformed input `x` against a normal
+        distribution.
+
+    See Also
+    --------
+    probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
+
+    Notes
+    -----
+    Even if `plot` is given, the figure is not shown or saved by
+    `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
+    should be used after calling `probplot`.
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    Generate some non-normally distributed data, and create a Box-Cox plot:
+
+    >>> x = stats.loggamma.rvs(5, size=500) + 5
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
+
+    Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
+    the same plot:
+
+    >>> _, maxlog = stats.boxcox(x)
+    >>> ax.axvline(maxlog, color='r')
+
+    >>> plt.show()
+
+    """
+    return _normplot('boxcox', x, la, lb, plot, N)
+
+
+def yeojohnson(x, lmbda=None):
+    r"""Return a dataset transformed by a Yeo-Johnson power transformation.
+
+    Parameters
+    ----------
+    x : ndarray
+        Input array.  Should be 1-dimensional.
+    lmbda : float, optional
+        If ``lmbda`` is ``None``, find the lambda that maximizes the
+        log-likelihood function and return it as the second output argument.
+        Otherwise the transformation is done for the given value.
+
+    Returns
+    -------
+    yeojohnson: ndarray
+        Yeo-Johnson power transformed array.
+    maxlog : float, optional
+        If the `lmbda` parameter is None, the second returned argument is
+        the lambda that maximizes the log-likelihood function.
+
+    See Also
+    --------
+    probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox
+
+    Notes
+    -----
+    The Yeo-Johnson transform is given by::
+
+        y = ((x + 1)**lmbda - 1) / lmbda,                for x >= 0, lmbda != 0
+            log(x + 1),                                  for x >= 0, lmbda = 0
+            -((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda),  for x < 0, lmbda != 2
+            -log(-x + 1),                                for x < 0, lmbda = 2
+
+    Unlike `boxcox`, `yeojohnson` does not require the input data to be
+    positive.
+
+    .. versionadded:: 1.2.0
+
+
+    References
+    ----------
+    I. Yeo and R.A. Johnson, "A New Family of Power Transformations to
+    Improve Normality or Symmetry", Biometrika 87.4 (2000):
+
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    We generate some random variates from a non-normal distribution and make a
+    probability plot for it, to show it is non-normal in the tails:
+
+    >>> fig = plt.figure()
+    >>> ax1 = fig.add_subplot(211)
+    >>> x = stats.loggamma.rvs(5, size=500) + 5
+    >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
+    >>> ax1.set_xlabel('')
+    >>> ax1.set_title('Probplot against normal distribution')
+
+    We now use `yeojohnson` to transform the data so it's closest to normal:
+
+    >>> ax2 = fig.add_subplot(212)
+    >>> xt, lmbda = stats.yeojohnson(x)
+    >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
+    >>> ax2.set_title('Probplot after Yeo-Johnson transformation')
+
+    >>> plt.show()
+
+    """
+    x = np.asarray(x)
+    if x.size == 0:
+        return x
+
+    if np.issubdtype(x.dtype, np.complexfloating):
+        raise ValueError('Yeo-Johnson transformation is not defined for '
+                         'complex numbers.')
+
+    if np.issubdtype(x.dtype, np.integer):
+        x = x.astype(np.float64, copy=False)
+
+    if lmbda is not None:
+        return _yeojohnson_transform(x, lmbda)
+
+    # if lmbda=None, find the lmbda that maximizes the log-likelihood function.
+    lmax = yeojohnson_normmax(x)
+    y = _yeojohnson_transform(x, lmax)
+
+    return y, lmax
+
+
+def _yeojohnson_transform(x, lmbda):
+    """Returns `x` transformed by the Yeo-Johnson power transform with given
+    parameter `lmbda`.
+    """
+    out = np.zeros_like(x)
+    pos = x >= 0  # binary mask
+
+    # when x >= 0
+    if abs(lmbda) < np.spacing(1.):
+        out[pos] = np.log1p(x[pos])
+    else:  # lmbda != 0
+        out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
+
+    # when x < 0
+    if abs(lmbda - 2) > np.spacing(1.):
+        out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
+    else:  # lmbda == 2
+        out[~pos] = -np.log1p(-x[~pos])
+
+    return out
+
+
+def yeojohnson_llf(lmb, data):
+    r"""The yeojohnson log-likelihood function.
+
+    Parameters
+    ----------
+    lmb : scalar
+        Parameter for Yeo-Johnson transformation. See `yeojohnson` for
+        details.
+    data : array_like
+        Data to calculate Yeo-Johnson log-likelihood for. If `data` is
+        multi-dimensional, the log-likelihood is calculated along the first
+        axis.
+
+    Returns
+    -------
+    llf : float
+        Yeo-Johnson log-likelihood of `data` given `lmb`.
+
+    See Also
+    --------
+    yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax
+
+    Notes
+    -----
+    The Yeo-Johnson log-likelihood function is defined here as
+
+    .. math::
+
+        llf = -N/2 \log(\hat{\sigma}^2) + (\lambda - 1)
+              \sum_i \text{ sign }(x_i)\log(|x_i| + 1)
+
+    where :math:`\hat{\sigma}^2` is estimated variance of the Yeo-Johnson
+    transformed input data ``x``.
+
+    .. versionadded:: 1.2.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+    >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
+
+    Generate some random variates and calculate Yeo-Johnson log-likelihood
+    values for them for a range of ``lmbda`` values:
+
+    >>> x = stats.loggamma.rvs(5, loc=10, size=1000)
+    >>> lmbdas = np.linspace(-2, 10)
+    >>> llf = np.zeros(lmbdas.shape, dtype=float)
+    >>> for ii, lmbda in enumerate(lmbdas):
+    ...     llf[ii] = stats.yeojohnson_llf(lmbda, x)
+
+    Also find the optimal lmbda value with `yeojohnson`:
+
+    >>> x_most_normal, lmbda_optimal = stats.yeojohnson(x)
+
+    Plot the log-likelihood as function of lmbda.  Add the optimal lmbda as a
+    horizontal line to check that that's really the optimum:
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(lmbdas, llf, 'b.-')
+    >>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r')
+    >>> ax.set_xlabel('lmbda parameter')
+    >>> ax.set_ylabel('Yeo-Johnson log-likelihood')
+
+    Now add some probability plots to show that where the log-likelihood is
+    maximized the data transformed with `yeojohnson` looks closest to normal:
+
+    >>> locs = [3, 10, 4]  # 'lower left', 'center', 'lower right'
+    >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
+    ...     xt = stats.yeojohnson(x, lmbda=lmbda)
+    ...     (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
+    ...     ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
+    ...     ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
+    ...     ax_inset.set_xticklabels([])
+    ...     ax_inset.set_yticklabels([])
+    ...     ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
+
+    >>> plt.show()
+
+    """
+    data = np.asarray(data)
+    n_samples = data.shape[0]
+
+    if n_samples == 0:
+        return np.nan
+
+    trans = _yeojohnson_transform(data, lmb)
+    trans_var = trans.var(axis=0)
+    loglike = np.empty_like(trans_var)
+
+    # Avoid RuntimeWarning raised by np.log when the variance is too low
+    tiny_variance = trans_var < np.finfo(trans_var.dtype).tiny
+    loglike[tiny_variance] = np.inf
+
+    loglike[~tiny_variance] = (
+        -n_samples / 2 * np.log(trans_var[~tiny_variance]))
+    loglike[~tiny_variance] += (
+        (lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0))
+    return loglike
+
+
+def yeojohnson_normmax(x, brack=(-2, 2)):
+    """Compute optimal Yeo-Johnson transform parameter.
+
+    Compute optimal Yeo-Johnson transform parameter for input data, using
+    maximum likelihood estimation.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    brack : 2-tuple, optional
+        The starting interval for a downhill bracket search with
+        `optimize.brent`. Note that this is in most cases not critical; the
+        final result is allowed to be outside this bracket.
+
+    Returns
+    -------
+    maxlog : float
+        The optimal transform parameter found.
+
+    See Also
+    --------
+    yeojohnson, yeojohnson_llf, yeojohnson_normplot
+
+    Notes
+    -----
+    .. versionadded:: 1.2.0
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    Generate some data and determine optimal ``lmbda``
+
+    >>> rng = np.random.default_rng()
+    >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5
+    >>> lmax = stats.yeojohnson_normmax(x)
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax)
+    >>> ax.axvline(lmax, color='r')
+
+    >>> plt.show()
+
+    """
+    def _neg_llf(lmbda, data):
+        llf = yeojohnson_llf(lmbda, data)
+        # reject likelihoods that are inf which are likely due to small
+        # variance in the transformed space
+        llf[np.isinf(llf)] = -np.inf
+        return -llf
+
+    with np.errstate(invalid='ignore'):
+        return optimize.brent(_neg_llf, brack=brack, args=(x,))
+
+
+def yeojohnson_normplot(x, la, lb, plot=None, N=80):
+    """Compute parameters for a Yeo-Johnson normality plot, optionally show it.
+
+    A Yeo-Johnson normality plot shows graphically what the best
+    transformation parameter is to use in `yeojohnson` to obtain a
+    distribution that is close to normal.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+    la, lb : scalar
+        The lower and upper bounds for the ``lmbda`` values to pass to
+        `yeojohnson` for Yeo-Johnson transformations. These are also the
+        limits of the horizontal axis of the plot if that is generated.
+    plot : object, optional
+        If given, plots the quantiles and least squares fit.
+        `plot` is an object that has to have methods "plot" and "text".
+        The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
+        or a custom object with the same methods.
+        Default is None, which means that no plot is created.
+    N : int, optional
+        Number of points on the horizontal axis (equally distributed from
+        `la` to `lb`).
+
+    Returns
+    -------
+    lmbdas : ndarray
+        The ``lmbda`` values for which a Yeo-Johnson transform was done.
+    ppcc : ndarray
+        Probability Plot Correlelation Coefficient, as obtained from `probplot`
+        when fitting the Box-Cox transformed input `x` against a normal
+        distribution.
+
+    See Also
+    --------
+    probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max
+
+    Notes
+    -----
+    Even if `plot` is given, the figure is not shown or saved by
+    `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
+    should be used after calling `probplot`.
+
+    .. versionadded:: 1.2.0
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    Generate some non-normally distributed data, and create a Yeo-Johnson plot:
+
+    >>> x = stats.loggamma.rvs(5, size=500) + 5
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax)
+
+    Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
+    the same plot:
+
+    >>> _, maxlog = stats.yeojohnson(x)
+    >>> ax.axvline(maxlog, color='r')
+
+    >>> plt.show()
+
+    """
+    return _normplot('yeojohnson', x, la, lb, plot, N)
+
+
+ShapiroResult = namedtuple('ShapiroResult', ('statistic', 'pvalue'))
+
+
+def shapiro(x):
+    """Perform the Shapiro-Wilk test for normality.
+
+    The Shapiro-Wilk test tests the null hypothesis that the
+    data was drawn from a normal distribution.
+
+    Parameters
+    ----------
+    x : array_like
+        Array of sample data.
+
+    Returns
+    -------
+    statistic : float
+        The test statistic.
+    p-value : float
+        The p-value for the hypothesis test.
+
+    See Also
+    --------
+    anderson : The Anderson-Darling test for normality
+    kstest : The Kolmogorov-Smirnov test for goodness of fit.
+
+    Notes
+    -----
+    The algorithm used is described in [4]_ but censoring parameters as
+    described are not implemented. For N > 5000 the W test statistic is accurate
+    but the p-value may not be.
+
+    The chance of rejecting the null hypothesis when it is true is close to 5%
+    regardless of sample size.
+
+    References
+    ----------
+    .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
+    .. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
+           normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
+    .. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
+           Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
+           Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
+    .. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> x = stats.norm.rvs(loc=5, scale=3, size=100, random_state=rng)
+    >>> shapiro_test = stats.shapiro(x)
+    >>> shapiro_test
+    ShapiroResult(statistic=0.9813305735588074, pvalue=0.16855233907699585)
+    >>> shapiro_test.statistic
+    0.9813305735588074
+    >>> shapiro_test.pvalue
+    0.16855233907699585
+
+    """
+    x = np.ravel(x)
+
+    N = len(x)
+    if N < 3:
+        raise ValueError("Data must be at least length 3.")
+
+    x = x - np.median(x)
+
+    a = zeros(N, 'f')
+    init = 0
+
+    y = sort(x)
+    a, w, pw, ifault = _statlib.swilk(y, a[:N//2], init)
+    if ifault not in [0, 2]:
+        warnings.warn("Input data for shapiro has range zero. The results "
+                      "may not be accurate.")
+    if N > 5000:
+        warnings.warn("p-value may not be accurate for N > 5000.")
+
+    return ShapiroResult(w, pw)
+
+
+# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
+#             Some Comparisons", Journal of the American Statistical
+#             Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
+_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
+_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
+# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
+#             Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
+_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
+# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
+#             on the Empirical Distribution Function.", Biometrika,
+#             Vol. 66, Issue 3, Dec. 1979, pp 591-595.
+_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
+
+
+AndersonResult = _make_tuple_bunch('AndersonResult',
+                                   ['statistic', 'critical_values',
+                                    'significance_level'], ['fit_result'])
+
+
+def anderson(x, dist='norm'):
+    """Anderson-Darling test for data coming from a particular distribution.
+
+    The Anderson-Darling test tests the null hypothesis that a sample is
+    drawn from a population that follows a particular distribution.
+    For the Anderson-Darling test, the critical values depend on
+    which distribution is being tested against.  This function works
+    for normal, exponential, logistic, or Gumbel (Extreme Value
+    Type I) distributions.
+
+    Parameters
+    ----------
+    x : array_like
+        Array of sample data.
+    dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1'}, optional
+        The type of distribution to test against.  The default is 'norm'.
+        The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the
+        same distribution.
+
+    Returns
+    -------
+    result : AndersonResult
+        An object with the following attributes:
+
+        statistic : float
+            The Anderson-Darling test statistic.
+        critical_values : list
+            The critical values for this distribution.
+        significance_level : list
+            The significance levels for the corresponding critical values
+            in percents.  The function returns critical values for a
+            differing set of significance levels depending on the
+            distribution that is being tested against.
+        fit_result : `~scipy.stats._result_classes.FitResult`
+            An object containing the results of fitting the distribution to
+            the data.
+
+    See Also
+    --------
+    kstest : The Kolmogorov-Smirnov test for goodness-of-fit.
+
+    Notes
+    -----
+    Critical values provided are for the following significance levels:
+
+    normal/exponential
+        15%, 10%, 5%, 2.5%, 1%
+    logistic
+        25%, 10%, 5%, 2.5%, 1%, 0.5%
+    Gumbel
+        25%, 10%, 5%, 2.5%, 1%
+
+    If the returned statistic is larger than these critical values then
+    for the corresponding significance level, the null hypothesis that
+    the data come from the chosen distribution can be rejected.
+    The returned statistic is referred to as 'A2' in the references.
+
+    References
+    ----------
+    .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
+    .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
+           Some Comparisons, Journal of the American Statistical Association,
+           Vol. 69, pp. 730-737.
+    .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
+           Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
+           pp. 357-369.
+    .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
+           Distribution, Biometrika, Vol. 64, pp. 583-588.
+    .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
+           to Tests for Exponentiality , Technical Report No. 262,
+           Department of Statistics, Stanford University, Stanford, CA.
+    .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
+           Based on the Empirical Distribution Function, Biometrika, Vol. 66,
+           pp. 591-595.
+
+    Examples
+    --------
+    Test the null hypothesis that a random sample was drawn from a normal
+    distribution (with unspecified mean and standard deviation).
+
+    >>> import numpy as np
+    >>> from scipy.stats import anderson
+    >>> rng = np.random.default_rng()
+    >>> data = rng.random(size=35)
+    >>> res = anderson(data)
+    >>> res.statistic
+    0.8398018749744764
+    >>> res.critical_values
+    array([0.527, 0.6  , 0.719, 0.839, 0.998])
+    >>> res.significance_level
+    array([15. , 10. ,  5. ,  2.5,  1. ])
+
+    The value of the statistic (barely) exceeds the critical value associated
+    with a significance level of 2.5%, so the null hypothesis may be rejected
+    at a significance level of 2.5%, but not at a significance level of 1%.
+
+    """  # noqa
+    dist = dist.lower()
+    if dist in {'extreme1', 'gumbel'}:
+        dist = 'gumbel_l'
+    dists = {'norm', 'expon', 'gumbel_l', 'gumbel_r', 'logistic'}
+    if dist not in dists:
+        raise ValueError(f"Invalid distribution; dist must be in {dists}.")
+    y = sort(x)
+    xbar = np.mean(x, axis=0)
+    N = len(y)
+    if dist == 'norm':
+        s = np.std(x, ddof=1, axis=0)
+        w = (y - xbar) / s
+        fit_params = xbar, s
+        logcdf = distributions.norm.logcdf(w)
+        logsf = distributions.norm.logsf(w)
+        sig = array([15, 10, 5, 2.5, 1])
+        critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
+    elif dist == 'expon':
+        w = y / xbar
+        fit_params = 0, xbar
+        logcdf = distributions.expon.logcdf(w)
+        logsf = distributions.expon.logsf(w)
+        sig = array([15, 10, 5, 2.5, 1])
+        critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
+    elif dist == 'logistic':
+        def rootfunc(ab, xj, N):
+            a, b = ab
+            tmp = (xj - a) / b
+            tmp2 = exp(tmp)
+            val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
+                   np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
+            return array(val)
+
+        sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
+        sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
+        w = (y - sol[0]) / sol[1]
+        fit_params = sol
+        logcdf = distributions.logistic.logcdf(w)
+        logsf = distributions.logistic.logsf(w)
+        sig = array([25, 10, 5, 2.5, 1, 0.5])
+        critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
+    elif dist == 'gumbel_r':
+        xbar, s = distributions.gumbel_r.fit(x)
+        w = (y - xbar) / s
+        fit_params = xbar, s
+        logcdf = distributions.gumbel_r.logcdf(w)
+        logsf = distributions.gumbel_r.logsf(w)
+        sig = array([25, 10, 5, 2.5, 1])
+        critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
+    elif dist == 'gumbel_l':
+        xbar, s = distributions.gumbel_l.fit(x)
+        w = (y - xbar) / s
+        fit_params = xbar, s
+        logcdf = distributions.gumbel_l.logcdf(w)
+        logsf = distributions.gumbel_l.logsf(w)
+        sig = array([25, 10, 5, 2.5, 1])
+        critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
+
+    i = arange(1, N + 1)
+    A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
+
+    # FitResult initializer expects an optimize result, so let's work with it
+    message = '`anderson` successfully fit the distribution to the data.'
+    res = optimize.OptimizeResult(success=True, message=message)
+    res.x = np.array(fit_params)
+    fit_result = FitResult(getattr(distributions, dist), y,
+                           discrete=False, res=res)
+
+    return AndersonResult(A2, critical, sig, fit_result=fit_result)
+
+
+def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
+    """Compute A2akN equation 7 of Scholz and Stephens.
+
+    Parameters
+    ----------
+    samples : sequence of 1-D array_like
+        Array of sample arrays.
+    Z : array_like
+        Sorted array of all observations.
+    Zstar : array_like
+        Sorted array of unique observations.
+    k : int
+        Number of samples.
+    n : array_like
+        Number of observations in each sample.
+    N : int
+        Total number of observations.
+
+    Returns
+    -------
+    A2aKN : float
+        The A2aKN statistics of Scholz and Stephens 1987.
+
+    """
+    A2akN = 0.
+    Z_ssorted_left = Z.searchsorted(Zstar, 'left')
+    if N == Zstar.size:
+        lj = 1.
+    else:
+        lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
+    Bj = Z_ssorted_left + lj / 2.
+    for i in arange(0, k):
+        s = np.sort(samples[i])
+        s_ssorted_right = s.searchsorted(Zstar, side='right')
+        Mij = s_ssorted_right.astype(float)
+        fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
+        Mij -= fij / 2.
+        inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
+        A2akN += inner.sum() / n[i]
+    A2akN *= (N - 1.) / N
+    return A2akN
+
+
+def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
+    """Compute A2akN equation 6 of Scholz & Stephens.
+
+    Parameters
+    ----------
+    samples : sequence of 1-D array_like
+        Array of sample arrays.
+    Z : array_like
+        Sorted array of all observations.
+    Zstar : array_like
+        Sorted array of unique observations.
+    k : int
+        Number of samples.
+    n : array_like
+        Number of observations in each sample.
+    N : int
+        Total number of observations.
+
+    Returns
+    -------
+    A2KN : float
+        The A2KN statistics of Scholz and Stephens 1987.
+
+    """
+    A2kN = 0.
+    lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
+                                                              'left')
+    Bj = lj.cumsum()
+    for i in arange(0, k):
+        s = np.sort(samples[i])
+        Mij = s.searchsorted(Zstar[:-1], side='right')
+        inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
+        A2kN += inner.sum() / n[i]
+    return A2kN
+
+
+Anderson_ksampResult = _make_tuple_bunch(
+    'Anderson_ksampResult',
+    ['statistic', 'critical_values', 'pvalue'], []
+)
+
+
+def anderson_ksamp(samples, midrank=True):
+    """The Anderson-Darling test for k-samples.
+
+    The k-sample Anderson-Darling test is a modification of the
+    one-sample Anderson-Darling test. It tests the null hypothesis
+    that k-samples are drawn from the same population without having
+    to specify the distribution function of that population. The
+    critical values depend on the number of samples.
+
+    Parameters
+    ----------
+    samples : sequence of 1-D array_like
+        Array of sample data in arrays.
+    midrank : bool, optional
+        Type of Anderson-Darling test which is computed. Default
+        (True) is the midrank test applicable to continuous and
+        discrete populations. If False, the right side empirical
+        distribution is used.
+
+    Returns
+    -------
+    res : Anderson_ksampResult
+        An object containing attributes:
+
+        statistic : float
+            Normalized k-sample Anderson-Darling test statistic.
+        critical_values : array
+            The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%,
+            0.5%, 0.1%.
+        pvalue : float
+            The approximate p-value of the test. The value is floored / capped
+            at 0.1% / 25%.
+
+    Raises
+    ------
+    ValueError
+        If less than 2 samples are provided, a sample is empty, or no
+        distinct observations are in the samples.
+
+    See Also
+    --------
+    ks_2samp : 2 sample Kolmogorov-Smirnov test
+    anderson : 1 sample Anderson-Darling test
+
+    Notes
+    -----
+    [1]_ defines three versions of the k-sample Anderson-Darling test:
+    one for continuous distributions and two for discrete
+    distributions, in which ties between samples may occur. The
+    default of this routine is to compute the version based on the
+    midrank empirical distribution function. This test is applicable
+    to continuous and discrete data. If midrank is set to False, the
+    right side empirical distribution is used for a test for discrete
+    data. According to [1]_, the two discrete test statistics differ
+    only slightly if a few collisions due to round-off errors occur in
+    the test not adjusted for ties between samples.
+
+    The critical values corresponding to the significance levels from 0.01
+    to 0.25 are taken from [1]_. p-values are floored / capped
+    at 0.1% / 25%. Since the range of critical values might be extended in
+    future releases, it is recommended not to test ``p == 0.25``, but rather
+    ``p >= 0.25`` (analogously for the lower bound).
+
+    .. versionadded:: 0.14.0
+
+    References
+    ----------
+    .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
+           Anderson-Darling Tests, Journal of the American Statistical
+           Association, Vol. 82, pp. 918-924.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> res = stats.anderson_ksamp([rng.normal(size=50),
+    ... rng.normal(loc=0.5, size=30)])
+    >>> res.statistic, res.pvalue
+    (1.974403288713695, 0.04991293614572478)
+    >>> res.critical_values
+    array([0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546])
+
+    The null hypothesis that the two random samples come from the same
+    distribution can be rejected at the 5% level because the returned
+    test value is greater than the critical value for 5% (1.961) but
+    not at the 2.5% level. The interpolation gives an approximate
+    p-value of 4.99%.
+
+    >>> res = stats.anderson_ksamp([rng.normal(size=50),
+    ... rng.normal(size=30), rng.normal(size=20)])
+    >>> res.statistic, res.pvalue
+    (-0.29103725200789504, 0.25)
+    >>> res.critical_values
+    array([ 0.44925884,  1.3052767 ,  1.9434184 ,  2.57696569,  3.41634856,
+      4.07210043, 5.56419101])
+
+    The null hypothesis cannot be rejected for three samples from an
+    identical distribution. The reported p-value (25%) has been capped and
+    may not be very accurate (since it corresponds to the value 0.449
+    whereas the statistic is -0.291).
+
+    """
+    k = len(samples)
+    if (k < 2):
+        raise ValueError("anderson_ksamp needs at least two samples")
+
+    samples = list(map(np.asarray, samples))
+    Z = np.sort(np.hstack(samples))
+    N = Z.size
+    Zstar = np.unique(Z)
+    if Zstar.size < 2:
+        raise ValueError("anderson_ksamp needs more than one distinct "
+                         "observation")
+
+    n = np.array([sample.size for sample in samples])
+    if np.any(n == 0):
+        raise ValueError("anderson_ksamp encountered sample without "
+                         "observations")
+
+    if midrank:
+        A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
+    else:
+        A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
+
+    H = (1. / n).sum()
+    hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
+    h = hs_cs[-1] + 1
+    g = (hs_cs / arange(2, N)).sum()
+
+    a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
+    b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
+    c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
+    d = (2*h + 6)*k**2 - 4*h*k
+    sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
+    m = k - 1
+    A2 = (A2kN - m) / math.sqrt(sigmasq)
+
+    # The b_i values are the interpolation coefficients from Table 2
+    # of Scholz and Stephens 1987
+    b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085])
+    b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615])
+    b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154])
+    critical = b0 + b1 / math.sqrt(m) + b2 / m
+
+    sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001])
+    if A2 < critical.min():
+        p = sig.max()
+        warnings.warn("p-value capped: true value larger than {}".format(p),
+                      stacklevel=2)
+    elif A2 > critical.max():
+        p = sig.min()
+        warnings.warn("p-value floored: true value smaller than {}".format(p),
+                      stacklevel=2)
+    else:
+        # interpolation of probit of significance level
+        pf = np.polyfit(critical, log(sig), 2)
+        p = math.exp(np.polyval(pf, A2))
+
+    # create result object with alias for backward compatibility
+    res = Anderson_ksampResult(A2, critical, p)
+    res.significance_level = p
+    return res
+
+
+AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
+
+
+class _ABW:
+    """Distribution of Ansari-Bradley W-statistic under the null hypothesis."""
+    # TODO: calculate exact distribution considering ties
+    # We could avoid summing over more than half the frequencies,
+    # but inititally it doesn't seem worth the extra complexity
+
+    def __init__(self):
+        """Minimal initializer."""
+        self.m = None
+        self.n = None
+        self.astart = None
+        self.total = None
+        self.freqs = None
+
+    def _recalc(self, n, m):
+        """When necessary, recalculate exact distribution."""
+        if n != self.n or m != self.m:
+            self.n, self.m = n, m
+            # distribution is NOT symmetric when m + n is odd
+            # n is len(x), m is len(y), and ratio of scales is defined x/y
+            astart, a1, _ = _statlib.gscale(n, m)
+            self.astart = astart  # minimum value of statistic
+            # Exact distribution of test statistic under null hypothesis
+            # expressed as frequencies/counts/integers to maintain precision.
+            # Stored as floats to avoid overflow of sums.
+            self.freqs = a1.astype(np.float64)
+            self.total = self.freqs.sum()  # could calculate from m and n
+            # probability mass is self.freqs / self.total;
+
+    def pmf(self, k, n, m):
+        """Probability mass function."""
+        self._recalc(n, m)
+        # The convention here is that PMF at k = 12.5 is the same as at k = 12,
+        # -> use `floor` in case of ties.
+        ind = np.floor(k - self.astart).astype(int)
+        return self.freqs[ind] / self.total
+
+    def cdf(self, k, n, m):
+        """Cumulative distribution function."""
+        self._recalc(n, m)
+        # Null distribution derived without considering ties is
+        # approximate. Round down to avoid Type I error.
+        ind = np.ceil(k - self.astart).astype(int)
+        return self.freqs[:ind+1].sum() / self.total
+
+    def sf(self, k, n, m):
+        """Survival function."""
+        self._recalc(n, m)
+        # Null distribution derived without considering ties is
+        # approximate. Round down to avoid Type I error.
+        ind = np.floor(k - self.astart).astype(int)
+        return self.freqs[ind:].sum() / self.total
+
+
+# Maintain state for faster repeat calls to ansari w/ method='exact'
+_abw_state = _ABW()
+
+
+def ansari(x, y, alternative='two-sided'):
+    """Perform the Ansari-Bradley test for equal scale parameters.
+
+    The Ansari-Bradley test ([1]_, [2]_) is a non-parametric test
+    for the equality of the scale parameter of the distributions
+    from which two samples were drawn. The null hypothesis states that
+    the ratio of the scale of the distribution underlying `x` to the scale
+    of the distribution underlying `y` is 1.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Arrays of sample data.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the ratio of scales is not equal to 1.
+        * 'less': the ratio of scales is less than 1.
+        * 'greater': the ratio of scales is greater than 1.
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : float
+        The Ansari-Bradley test statistic.
+    pvalue : float
+        The p-value of the hypothesis test.
+
+    See Also
+    --------
+    fligner : A non-parametric test for the equality of k variances
+    mood : A non-parametric test for the equality of two scale parameters
+
+    Notes
+    -----
+    The p-value given is exact when the sample sizes are both less than
+    55 and there are no ties, otherwise a normal approximation for the
+    p-value is used.
+
+    References
+    ----------
+    .. [1] Ansari, A. R. and Bradley, R. A. (1960) Rank-sum tests for
+           dispersions, Annals of Mathematical Statistics, 31, 1174-1189.
+    .. [2] Sprent, Peter and N.C. Smeeton.  Applied nonparametric
+           statistical methods.  3rd ed. Chapman and Hall/CRC. 2001.
+           Section 5.8.2.
+    .. [3] Nathaniel E. Helwig "Nonparametric Dispersion and Equality
+           Tests" at http://users.stat.umn.edu/~helwig/notes/npde-Notes.pdf
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import ansari
+    >>> rng = np.random.default_rng()
+
+    For these examples, we'll create three random data sets.  The first
+    two, with sizes 35 and 25, are drawn from a normal distribution with
+    mean 0 and standard deviation 2.  The third data set has size 25 and
+    is drawn from a normal distribution with standard deviation 1.25.
+
+    >>> x1 = rng.normal(loc=0, scale=2, size=35)
+    >>> x2 = rng.normal(loc=0, scale=2, size=25)
+    >>> x3 = rng.normal(loc=0, scale=1.25, size=25)
+
+    First we apply `ansari` to `x1` and `x2`.  These samples are drawn
+    from the same distribution, so we expect the Ansari-Bradley test
+    should not lead us to conclude that the scales of the distributions
+    are different.
+
+    >>> ansari(x1, x2)
+    AnsariResult(statistic=541.0, pvalue=0.9762532927399098)
+
+    With a p-value close to 1, we cannot conclude that there is a
+    significant difference in the scales (as expected).
+
+    Now apply the test to `x1` and `x3`:
+
+    >>> ansari(x1, x3)
+    AnsariResult(statistic=425.0, pvalue=0.0003087020407974518)
+
+    The probability of observing such an extreme value of the statistic
+    under the null hypothesis of equal scales is only 0.03087%. We take this
+    as evidence against the null hypothesis in favor of the alternative:
+    the scales of the distributions from which the samples were drawn
+    are not equal.
+
+    We can use the `alternative` parameter to perform a one-tailed test.
+    In the above example, the scale of `x1` is greater than `x3` and so
+    the ratio of scales of `x1` and `x3` is greater than 1. This means
+    that the p-value when ``alternative='greater'`` should be near 0 and
+    hence we should be able to reject the null hypothesis:
+
+    >>> ansari(x1, x3, alternative='greater')
+    AnsariResult(statistic=425.0, pvalue=0.0001543510203987259)
+
+    As we can see, the p-value is indeed quite low. Use of
+    ``alternative='less'`` should thus yield a large p-value:
+
+    >>> ansari(x1, x3, alternative='less')
+    AnsariResult(statistic=425.0, pvalue=0.9998643258449039)
+
+    """
+    if alternative not in {'two-sided', 'greater', 'less'}:
+        raise ValueError("'alternative' must be 'two-sided',"
+                         " 'greater', or 'less'.")
+    x, y = asarray(x), asarray(y)
+    n = len(x)
+    m = len(y)
+    if m < 1:
+        raise ValueError("Not enough other observations.")
+    if n < 1:
+        raise ValueError("Not enough test observations.")
+
+    N = m + n
+    xy = r_[x, y]  # combine
+    rank = _stats_py.rankdata(xy)
+    symrank = amin(array((rank, N - rank + 1)), 0)
+    AB = np.sum(symrank[:n], axis=0)
+    uxy = unique(xy)
+    repeats = (len(uxy) != len(xy))
+    exact = ((m < 55) and (n < 55) and not repeats)
+    if repeats and (m < 55 or n < 55):
+        warnings.warn("Ties preclude use of exact statistic.")
+    if exact:
+        if alternative == 'two-sided':
+            pval = 2.0 * np.minimum(_abw_state.cdf(AB, n, m),
+                                    _abw_state.sf(AB, n, m))
+        elif alternative == 'greater':
+            # AB statistic is _smaller_ when ratio of scales is larger,
+            # so this is the opposite of the usual calculation
+            pval = _abw_state.cdf(AB, n, m)
+        else:
+            pval = _abw_state.sf(AB, n, m)
+        return AnsariResult(AB, min(1.0, pval))
+
+    # otherwise compute normal approximation
+    if N % 2:  # N odd
+        mnAB = n * (N+1.0)**2 / 4.0 / N
+        varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
+    else:
+        mnAB = n * (N+2.0) / 4.0
+        varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
+    if repeats:   # adjust variance estimates
+        # compute np.sum(tj * rj**2,axis=0)
+        fac = np.sum(symrank**2, axis=0)
+        if N % 2:  # N odd
+            varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
+        else:  # N even
+            varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
+
+    # Small values of AB indicate larger dispersion for the x sample.
+    # Large values of AB indicate larger dispersion for the y sample.
+    # This is opposite to the way we define the ratio of scales. see [1]_.
+    z = (mnAB - AB) / sqrt(varAB)
+    z, pval = _normtest_finish(z, alternative)
+    return AnsariResult(AB, pval)
+
+
+BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
+
+
+def bartlett(*samples):
+    """Perform Bartlett's test for equal variances.
+
+    Bartlett's test tests the null hypothesis that all input samples
+    are from populations with equal variances.  For samples
+    from significantly non-normal populations, Levene's test
+    `levene` is more robust.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        arrays of sample data.  Only 1d arrays are accepted, they may have
+        different lengths.
+
+    Returns
+    -------
+    statistic : float
+        The test statistic.
+    pvalue : float
+        The p-value of the test.
+
+    See Also
+    --------
+    fligner : A non-parametric test for the equality of k variances
+    levene : A robust parametric test for equality of k variances
+
+    Notes
+    -----
+    Conover et al. (1981) examine many of the existing parametric and
+    nonparametric tests by extensive simulations and they conclude that the
+    tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
+    superior in terms of robustness of departures from normality and power
+    ([3]_).
+
+    References
+    ----------
+    .. [1]  https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
+
+    .. [2]  Snedecor, George W. and Cochran, William G. (1989), Statistical
+              Methods, Eighth Edition, Iowa State University Press.
+
+    .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
+           Hypothesis Testing based on Quadratic Inference Function. Technical
+           Report #99-03, Center for Likelihood Studies, Pennsylvania State
+           University.
+
+    .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
+           Tests. Proceedings of the Royal Society of London. Series A,
+           Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
+
+    Examples
+    --------
+    Test whether or not the lists `a`, `b` and `c` come from populations
+    with equal variances.
+
+    >>> import numpy as np
+    >>> from scipy.stats import bartlett
+    >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
+    >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
+    >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
+    >>> stat, p = bartlett(a, b, c)
+    >>> p
+    1.1254782518834628e-05
+
+    The very small p-value suggests that the populations do not have equal
+    variances.
+
+    This is not surprising, given that the sample variance of `b` is much
+    larger than that of `a` and `c`:
+
+    >>> [np.var(x, ddof=1) for x in [a, b, c]]
+    [0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
+
+    """
+    # Handle empty input and input that is not 1d
+    for sample in samples:
+        if np.asanyarray(sample).size == 0:
+            return BartlettResult(np.nan, np.nan)
+        if np.asanyarray(sample).ndim > 1:
+            raise ValueError('Samples must be one-dimensional.')
+
+    k = len(samples)
+    if k < 2:
+        raise ValueError("Must enter at least two input sample vectors.")
+    Ni = np.empty(k)
+    ssq = np.empty(k, 'd')
+    for j in range(k):
+        Ni[j] = len(samples[j])
+        ssq[j] = np.var(samples[j], ddof=1)
+    Ntot = np.sum(Ni, axis=0)
+    spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
+    numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
+    denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
+                                     1.0/(Ntot - k))
+    T = numer / denom
+    pval = distributions.chi2.sf(T, k - 1)  # 1 - cdf
+
+    return BartlettResult(T, pval)
+
+
+LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
+
+
+def levene(*samples, center='median', proportiontocut=0.05):
+    """Perform Levene test for equal variances.
+
+    The Levene test tests the null hypothesis that all input samples
+    are from populations with equal variances.  Levene's test is an
+    alternative to Bartlett's test `bartlett` in the case where
+    there are significant deviations from normality.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        The sample data, possibly with different lengths. Only one-dimensional
+        samples are accepted.
+    center : {'mean', 'median', 'trimmed'}, optional
+        Which function of the data to use in the test.  The default
+        is 'median'.
+    proportiontocut : float, optional
+        When `center` is 'trimmed', this gives the proportion of data points
+        to cut from each end. (See `scipy.stats.trim_mean`.)
+        Default is 0.05.
+
+    Returns
+    -------
+    statistic : float
+        The test statistic.
+    pvalue : float
+        The p-value for the test.
+
+    Notes
+    -----
+    Three variations of Levene's test are possible.  The possibilities
+    and their recommended usages are:
+
+      * 'median' : Recommended for skewed (non-normal) distributions>
+      * 'mean' : Recommended for symmetric, moderate-tailed distributions.
+      * 'trimmed' : Recommended for heavy-tailed distributions.
+
+    The test version using the mean was proposed in the original article
+    of Levene ([2]_) while the median and trimmed mean have been studied by
+    Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe
+    test.
+
+    References
+    ----------
+    .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
+    .. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
+           Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
+           Stanford University Press, pp. 278-292.
+    .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
+           Statistical Association, 69, 364-367
+
+    Examples
+    --------
+    Test whether or not the lists `a`, `b` and `c` come from populations
+    with equal variances.
+
+    >>> import numpy as np
+    >>> from scipy.stats import levene
+    >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
+    >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
+    >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
+    >>> stat, p = levene(a, b, c)
+    >>> p
+    0.002431505967249681
+
+    The small p-value suggests that the populations do not have equal
+    variances.
+
+    This is not surprising, given that the sample variance of `b` is much
+    larger than that of `a` and `c`:
+
+    >>> [np.var(x, ddof=1) for x in [a, b, c]]
+    [0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
+
+    """
+    if center not in ['mean', 'median', 'trimmed']:
+        raise ValueError("center must be 'mean', 'median' or 'trimmed'.")
+
+    k = len(samples)
+    if k < 2:
+        raise ValueError("Must enter at least two input sample vectors.")
+    # check for 1d input
+    for j in range(k):
+        if np.asanyarray(samples[j]).ndim > 1:
+            raise ValueError('Samples must be one-dimensional.')
+
+    Ni = np.empty(k)
+    Yci = np.empty(k, 'd')
+
+    if center == 'median':
+        func = lambda x: np.median(x, axis=0)
+    elif center == 'mean':
+        func = lambda x: np.mean(x, axis=0)
+    else:  # center == 'trimmed'
+        samples = tuple(_stats_py.trimboth(np.sort(sample), proportiontocut)
+                        for sample in samples)
+        func = lambda x: np.mean(x, axis=0)
+
+    for j in range(k):
+        Ni[j] = len(samples[j])
+        Yci[j] = func(samples[j])
+    Ntot = np.sum(Ni, axis=0)
+
+    # compute Zij's
+    Zij = [None] * k
+    for i in range(k):
+        Zij[i] = abs(asarray(samples[i]) - Yci[i])
+
+    # compute Zbari
+    Zbari = np.empty(k, 'd')
+    Zbar = 0.0
+    for i in range(k):
+        Zbari[i] = np.mean(Zij[i], axis=0)
+        Zbar += Zbari[i] * Ni[i]
+
+    Zbar /= Ntot
+    numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
+
+    # compute denom_variance
+    dvar = 0.0
+    for i in range(k):
+        dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
+
+    denom = (k - 1.0) * dvar
+
+    W = numer / denom
+    pval = distributions.f.sf(W, k-1, Ntot-k)  # 1 - cdf
+    return LeveneResult(W, pval)
+
+
+@_deprecated("'binom_test' is deprecated in favour of"
+             " 'binomtest' from version 1.7.0 and will"
+             " be removed in Scipy 1.12.0.")
+def binom_test(x, n=None, p=0.5, alternative='two-sided'):
+    """Perform a test that the probability of success is p.
+
+    This is an exact, two-sided test of the null hypothesis
+    that the probability of success in a Bernoulli experiment
+    is `p`.
+
+    .. deprecated:: 1.10.0
+        `binom_test` is deprecated in favour of `binomtest` and will
+        be removed in Scipy 1.12.0.
+
+    Parameters
+    ----------
+    x : int or array_like
+        The number of successes, or if x has length 2, it is the
+        number of successes and the number of failures.
+    n : int
+        The number of trials.  This is ignored if x gives both the
+        number of successes and failures.
+    p : float, optional
+        The hypothesized probability of success.  ``0 <= p <= 1``. The
+        default value is ``p = 0.5``.
+    alternative : {'two-sided', 'greater', 'less'}, optional
+        Indicates the alternative hypothesis. The default value is
+        'two-sided'.
+
+    Returns
+    -------
+    p-value : float
+        The p-value of the hypothesis test.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Binomial_test
+
+    Examples
+    --------
+    >>> from scipy import stats
+
+    A car manufacturer claims that no more than 10% of their cars are unsafe.
+    15 cars are inspected for safety, 3 were found to be unsafe. Test the
+    manufacturer's claim:
+
+    >>> stats.binom_test(3, n=15, p=0.1, alternative='greater')
+    0.18406106910639114
+
+    The null hypothesis cannot be rejected at the 5% level of significance
+    because the returned p-value is greater than the critical value of 5%.
+
+    """
+    x = atleast_1d(x).astype(np.int_)
+    if len(x) == 2:
+        n = x[1] + x[0]
+        x = x[0]
+    elif len(x) == 1:
+        x = x[0]
+        if n is None or n < x:
+            raise ValueError("n must be >= x")
+        n = np.int_(n)
+    else:
+        raise ValueError("Incorrect length for x.")
+
+    if (p > 1.0) or (p < 0.0):
+        raise ValueError("p must be in range [0,1]")
+
+    if alternative not in ('two-sided', 'less', 'greater'):
+        raise ValueError("alternative not recognized\n"
+                         "should be 'two-sided', 'less' or 'greater'")
+
+    if alternative == 'less':
+        pval = distributions.binom.cdf(x, n, p)
+        return pval
+
+    if alternative == 'greater':
+        pval = distributions.binom.sf(x-1, n, p)
+        return pval
+
+    # if alternative was neither 'less' nor 'greater', then it's 'two-sided'
+    d = distributions.binom.pmf(x, n, p)
+    rerr = 1 + 1e-7
+    if x == p * n:
+        # special case as shortcut, would also be handled by `else` below
+        pval = 1.
+    elif x < p * n:
+        i = np.arange(np.ceil(p * n), n+1)
+        y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
+        pval = (distributions.binom.cdf(x, n, p) +
+                distributions.binom.sf(n - y, n, p))
+    else:
+        i = np.arange(np.floor(p*n) + 1)
+        y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
+        pval = (distributions.binom.cdf(y-1, n, p) +
+                distributions.binom.sf(x-1, n, p))
+
+    return min(1.0, pval)
+
+
+def _apply_func(x, g, func):
+    # g is list of indices into x
+    #  separating x into different groups
+    #  func should be applied over the groups
+    g = unique(r_[0, g, len(x)])
+    output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)]
+
+    return asarray(output)
+
+
+FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
+
+
+def fligner(*samples, center='median', proportiontocut=0.05):
+    """Perform Fligner-Killeen test for equality of variance.
+
+    Fligner's test tests the null hypothesis that all input samples
+    are from populations with equal variances.  Fligner-Killeen's test is
+    distribution free when populations are identical [2]_.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        Arrays of sample data.  Need not be the same length.
+    center : {'mean', 'median', 'trimmed'}, optional
+        Keyword argument controlling which function of the data is used in
+        computing the test statistic.  The default is 'median'.
+    proportiontocut : float, optional
+        When `center` is 'trimmed', this gives the proportion of data points
+        to cut from each end. (See `scipy.stats.trim_mean`.)
+        Default is 0.05.
+
+    Returns
+    -------
+    statistic : float
+        The test statistic.
+    pvalue : float
+        The p-value for the hypothesis test.
+
+    See Also
+    --------
+    bartlett : A parametric test for equality of k variances in normal samples
+    levene : A robust parametric test for equality of k variances
+
+    Notes
+    -----
+    As with Levene's test there are three variants of Fligner's test that
+    differ by the measure of central tendency used in the test.  See `levene`
+    for more information.
+
+    Conover et al. (1981) examine many of the existing parametric and
+    nonparametric tests by extensive simulations and they conclude that the
+    tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
+    superior in terms of robustness of departures from normality and power [3]_.
+
+    References
+    ----------
+    .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
+           Hypothesis Testing based on Quadratic Inference Function. Technical
+           Report #99-03, Center for Likelihood Studies, Pennsylvania State
+           University.
+           https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf
+
+    .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
+           tests for scale. 'Journal of the American Statistical Association.'
+           71(353), 210-213.
+
+    .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
+           Hypothesis Testing based on Quadratic Inference Function. Technical
+           Report #99-03, Center for Likelihood Studies, Pennsylvania State
+           University.
+
+    .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
+           comparative study of tests for homogeneity of variances, with
+           applications to the outer continental shelf biding data.
+           Technometrics, 23(4), 351-361.
+
+    Examples
+    --------
+    Test whether or not the lists `a`, `b` and `c` come from populations
+    with equal variances.
+
+    >>> import numpy as np
+    >>> from scipy.stats import fligner
+    >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
+    >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
+    >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
+    >>> stat, p = fligner(a, b, c)
+    >>> p
+    0.00450826080004775
+
+    The small p-value suggests that the populations do not have equal
+    variances.
+
+    This is not surprising, given that the sample variance of `b` is much
+    larger than that of `a` and `c`:
+
+    >>> [np.var(x, ddof=1) for x in [a, b, c]]
+    [0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
+
+    """
+    if center not in ['mean', 'median', 'trimmed']:
+        raise ValueError("center must be 'mean', 'median' or 'trimmed'.")
+
+    # Handle empty input
+    for sample in samples:
+        if np.asanyarray(sample).size == 0:
+            return FlignerResult(np.nan, np.nan)
+
+    k = len(samples)
+    if k < 2:
+        raise ValueError("Must enter at least two input sample vectors.")
+
+    if center == 'median':
+        func = lambda x: np.median(x, axis=0)
+    elif center == 'mean':
+        func = lambda x: np.mean(x, axis=0)
+    else:  # center == 'trimmed'
+        samples = tuple(_stats_py.trimboth(sample, proportiontocut)
+                        for sample in samples)
+        func = lambda x: np.mean(x, axis=0)
+
+    Ni = asarray([len(samples[j]) for j in range(k)])
+    Yci = asarray([func(samples[j]) for j in range(k)])
+    Ntot = np.sum(Ni, axis=0)
+    # compute Zij's
+    Zij = [abs(asarray(samples[i]) - Yci[i]) for i in range(k)]
+    allZij = []
+    g = [0]
+    for i in range(k):
+        allZij.extend(list(Zij[i]))
+        g.append(len(allZij))
+
+    ranks = _stats_py.rankdata(allZij)
+    sample = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
+
+    # compute Aibar
+    Aibar = _apply_func(sample, g, np.sum) / Ni
+    anbar = np.mean(sample, axis=0)
+    varsq = np.var(sample, axis=0, ddof=1)
+    Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
+    pval = distributions.chi2.sf(Xsq, k - 1)  # 1 - cdf
+    return FlignerResult(Xsq, pval)
+
+
+@_axis_nan_policy_factory(lambda x1: (x1,), n_samples=4, n_outputs=1)
+def _mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N) -> float:
+    # Obtain the unique values and their frequencies from the pooled samples.
+    # "a_j, + b_j, = t_j, for j = 1, ... k" where `k` is the number of unique
+    # classes, and "[t]he number of values associated with the x's and y's in
+    # the jth class will be denoted by a_j, and b_j respectively."
+    # (Mielke, 312)
+    # Reuse previously computed sorted array and `diff` arrays to obtain the
+    # unique values and counts. Prepend `diffs` with a non-zero to indicate
+    # that the first element should be marked as not matching what preceded it.
+    diffs_prep = np.concatenate(([1], diffs))
+    # Unique elements are where the was a difference between elements in the
+    # sorted array
+    uniques = sorted_xy[diffs_prep != 0]
+    # The count of each element is the bin size for each set of consecutive
+    # differences where the difference is zero. Replace nonzero differences
+    # with 1 and then use the cumulative sum to count the indices.
+    t = np.bincount(np.cumsum(np.asarray(diffs_prep != 0, dtype=int)))[1:]
+    k = len(uniques)
+    js = np.arange(1, k + 1, dtype=int)
+    # the `b` array mentioned in the paper is not used, outside of the
+    # calculation of `t`, so we do not need to calculate it separately. Here
+    # we calculate `a`. In plain language, `a[j]` is the number of values in
+    # `x` that equal `uniques[j]`.
+    sorted_xyx = np.sort(np.concatenate((xy, x)))
+    diffs = np.diff(sorted_xyx)
+    diffs_prep = np.concatenate(([1], diffs))
+    diff_is_zero = np.asarray(diffs_prep != 0, dtype=int)
+    xyx_counts = np.bincount(np.cumsum(diff_is_zero))[1:]
+    a = xyx_counts - t
+    # "Define .. a_0 = b_0 = t_0 = S_0 = 0" (Mielke 312) so we shift  `a`
+    # and `t` arrays over 1 to allow a first element of 0 to accommodate this
+    # indexing.
+    t = np.concatenate(([0], t))
+    a = np.concatenate(([0], a))
+    # S is built from `t`, so it does not need a preceding zero added on.
+    S = np.cumsum(t)
+    # define a copy of `S` with a prepending zero for later use to avoid
+    # the need for indexing.
+    S_i_m1 = np.concatenate(([0], S[:-1]))
+
+    # Psi, as defined by the 6th unnumbered equation on page 313 (Mielke).
+    # Note that in the paper there is an error where the denominator `2` is
+    # squared when it should be the entire equation.
+    def psi(indicator):
+        return (indicator - (N + 1)/2)**2
+
+    # define summation range for use in calculation of phi, as seen in sum
+    # in the unnumbered equation on the bottom of page 312 (Mielke).
+    s_lower = S[js - 1] + 1
+    s_upper = S[js] + 1
+    phi_J = [np.arange(s_lower[idx], s_upper[idx]) for idx in range(k)]
+
+    # for every range in the above array, determine the sum of psi(I) for
+    # every element in the range. Divide all the sums by `t`. Following the
+    # last unnumbered equation on page 312.
+    phis = [np.sum(psi(I_j)) for I_j in phi_J] / t[js]
+
+    # `T` is equal to a[j] * phi[j], per the first unnumbered equation on
+    # page 312. `phis` is already in the order based on `js`, so we index
+    # into `a` with `js` as well.
+    T = sum(phis * a[js])
+
+    # The approximate statistic
+    E_0_T = n * (N * N - 1) / 12
+
+    varM = (m * n * (N + 1.0) * (N ** 2 - 4) / 180 -
+            m * n / (180 * N * (N - 1)) * np.sum(
+                t * (t**2 - 1) * (t**2 - 4 + (15 * (N - S - S_i_m1) ** 2))
+            ))
+
+    return ((T - E_0_T) / np.sqrt(varM),)
+
+
+def mood(x, y, axis=0, alternative="two-sided"):
+    """Perform Mood's test for equal scale parameters.
+
+    Mood's two-sample test for scale parameters is a non-parametric
+    test for the null hypothesis that two samples are drawn from the
+    same distribution with the same scale parameter.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Arrays of sample data.
+    axis : int, optional
+        The axis along which the samples are tested.  `x` and `y` can be of
+        different length along `axis`.
+        If `axis` is None, `x` and `y` are flattened and the test is done on
+        all values in the flattened arrays.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the scales of the distributions underlying `x` and `y`
+          are different.
+        * 'less': the scale of the distribution underlying `x` is less than
+          the scale of the distribution underlying `y`.
+        * 'greater': the scale of the distribution underlying `x` is greater
+          than the scale of the distribution underlying `y`.
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    res : SignificanceResult
+        An object containing attributes:
+
+        statistic : scalar or ndarray
+            The z-score for the hypothesis test.  For 1-D inputs a scalar is
+            returned.
+        pvalue : scalar ndarray
+            The p-value for the hypothesis test.
+
+    See Also
+    --------
+    fligner : A non-parametric test for the equality of k variances
+    ansari : A non-parametric test for the equality of 2 variances
+    bartlett : A parametric test for equality of k variances in normal samples
+    levene : A parametric test for equality of k variances
+
+    Notes
+    -----
+    The data are assumed to be drawn from probability distributions ``f(x)``
+    and ``f(x/s) / s`` respectively, for some probability density function f.
+    The null hypothesis is that ``s == 1``.
+
+    For multi-dimensional arrays, if the inputs are of shapes
+    ``(n0, n1, n2, n3)``  and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
+    resulting z and p values will have shape ``(n0, n2, n3)``.  Note that
+    ``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
+
+    References
+    ----------
+    [1] Mielke, Paul W. "Note on Some Squared Rank Tests with Existing Ties."
+        Technometrics, vol. 9, no. 2, 1967, pp. 312-14. JSTOR,
+        https://doi.org/10.2307/1266427. Accessed 18 May 2022.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> x2 = rng.standard_normal((2, 45, 6, 7))
+    >>> x1 = rng.standard_normal((2, 30, 6, 7))
+    >>> res = stats.mood(x1, x2, axis=1)
+    >>> res.pvalue.shape
+    (2, 6, 7)
+
+    Find the number of points where the difference in scale is not significant:
+
+    >>> (res.pvalue > 0.1).sum()
+    78
+
+    Perform the test with different scales:
+
+    >>> x1 = rng.standard_normal((2, 30))
+    >>> x2 = rng.standard_normal((2, 35)) * 10.0
+    >>> stats.mood(x1, x2, axis=1)
+    SignificanceResult(statistic=array([-5.76174136, -6.12650783]),
+                       pvalue=array([8.32505043e-09, 8.98287869e-10]))
+
+    """
+    x = np.asarray(x, dtype=float)
+    y = np.asarray(y, dtype=float)
+
+    if axis is None:
+        x = x.flatten()
+        y = y.flatten()
+        axis = 0
+
+    if axis < 0:
+        axis = x.ndim + axis
+
+    # Determine shape of the result arrays
+    res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
+    if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
+                                ax != axis])):
+        raise ValueError("Dimensions of x and y on all axes except `axis` "
+                         "should match")
+
+    n = x.shape[axis]
+    m = y.shape[axis]
+    N = m + n
+    if N < 3:
+        raise ValueError("Not enough observations.")
+
+    xy = np.concatenate((x, y), axis=axis)
+    # determine if any of the samples contain ties
+    sorted_xy = np.sort(xy, axis=axis)
+    diffs = np.diff(sorted_xy, axis=axis)
+    if 0 in diffs:
+        z = np.asarray(_mood_inner_lc(xy, x, diffs, sorted_xy, n, m, N,
+                                      axis=axis))
+    else:
+        if axis != 0:
+            xy = np.moveaxis(xy, axis, 0)
+
+        xy = xy.reshape(xy.shape[0], -1)
+        # Generalized to the n-dimensional case by adding the axis argument,
+        # and using for loops, since rankdata is not vectorized.  For improving
+        # performance consider vectorizing rankdata function.
+        all_ranks = np.empty_like(xy)
+        for j in range(xy.shape[1]):
+            all_ranks[:, j] = _stats_py.rankdata(xy[:, j])
+
+        Ri = all_ranks[:n]
+        M = np.sum((Ri - (N + 1.0) / 2) ** 2, axis=0)
+        # Approx stat.
+        mnM = n * (N * N - 1.0) / 12
+        varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
+        z = (M - mnM) / sqrt(varM)
+    z, pval = _normtest_finish(z, alternative)
+
+    if res_shape == ():
+        # Return scalars, not 0-D arrays
+        z = z[0]
+        pval = pval[0]
+    else:
+        z.shape = res_shape
+        pval.shape = res_shape
+    return SignificanceResult(z, pval)
+
+
+WilcoxonResult = _make_tuple_bunch('WilcoxonResult', ['statistic', 'pvalue'])
+
+
+def wilcoxon_result_unpacker(res):
+    if hasattr(res, 'zstatistic'):
+        return res.statistic, res.pvalue, res.zstatistic
+    else:
+        return res.statistic, res.pvalue
+
+
+def wilcoxon_result_object(statistic, pvalue, zstatistic=None):
+    res = WilcoxonResult(statistic, pvalue)
+    if zstatistic is not None:
+        res.zstatistic = zstatistic
+    return res
+
+
+def wilcoxon_outputs(kwds):
+    method = kwds.get('method', 'auto')
+    if method == 'approx':
+        return 3
+    return 2
+
+
+@_rename_parameter("mode", "method")
+@_axis_nan_policy_factory(
+    wilcoxon_result_object, paired=True,
+    n_samples=lambda kwds: 2 if kwds.get('y', None) is not None else 1,
+    result_to_tuple=wilcoxon_result_unpacker, n_outputs=wilcoxon_outputs,
+)
+def wilcoxon(x, y=None, zero_method="wilcox", correction=False,
+             alternative="two-sided", method='auto'):
+    """Calculate the Wilcoxon signed-rank test.
+
+    The Wilcoxon signed-rank test tests the null hypothesis that two
+    related paired samples come from the same distribution. In particular,
+    it tests whether the distribution of the differences ``x - y`` is symmetric
+    about zero. It is a non-parametric version of the paired T-test.
+
+    Parameters
+    ----------
+    x : array_like
+        Either the first set of measurements (in which case ``y`` is the second
+        set of measurements), or the differences between two sets of
+        measurements (in which case ``y`` is not to be specified.)  Must be
+        one-dimensional.
+    y : array_like, optional
+        Either the second set of measurements (if ``x`` is the first set of
+        measurements), or not specified (if ``x`` is the differences between
+        two sets of measurements.)  Must be one-dimensional.
+    zero_method : {"wilcox", "pratt", "zsplit"}, optional
+        There are different conventions for handling pairs of observations
+        with equal values ("zero-differences", or "zeros").
+
+        * "wilcox": Discards all zero-differences (default); see [4]_.
+        * "pratt": Includes zero-differences in the ranking process,
+          but drops the ranks of the zeros (more conservative); see [3]_.
+          In this case, the normal approximation is adjusted as in [5]_.
+        * "zsplit": Includes zero-differences in the ranking process and
+          splits the zero rank between positive and negative ones.
+
+    correction : bool, optional
+        If True, apply continuity correction by adjusting the Wilcoxon rank
+        statistic by 0.5 towards the mean value when computing the
+        z-statistic if a normal approximation is used.  Default is False.
+    alternative : {"two-sided", "greater", "less"}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        In the following, let ``d`` represent the difference between the paired
+        samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or
+        ``d = x`` otherwise.
+
+        * 'two-sided': the distribution underlying ``d`` is not symmetric
+          about zero.
+        * 'less': the distribution underlying ``d`` is stochastically less
+          than a distribution symmetric about zero.
+        * 'greater': the distribution underlying ``d`` is stochastically
+          greater than a distribution symmetric about zero.
+
+    method : {"auto", "exact", "approx"}, optional
+        Method to calculate the p-value, see Notes. Default is "auto".
+
+    Returns
+    -------
+    An object with the following attributes.
+
+    statistic : array_like
+        If `alternative` is "two-sided", the sum of the ranks of the
+        differences above or below zero, whichever is smaller.
+        Otherwise the sum of the ranks of the differences above zero.
+    pvalue : array_like
+        The p-value for the test depending on `alternative` and `method`.
+    zstatistic : array_like
+        When ``method = 'approx'``, this is the normalized z-statistic::
+
+            z = (T - mn - d) / se
+
+        where ``T`` is `statistic` as defined above, ``mn`` is the mean of the
+        distribution under the null hypothesis, ``d`` is a continuity
+        correction, and ``se`` is the standard error.
+        When ``method != 'approx'``, this attribute is not available.
+
+    See Also
+    --------
+    kruskal, mannwhitneyu
+
+    Notes
+    -----
+    In the following, let ``d`` represent the difference between the paired
+    samples: ``d = x - y`` if both ``x`` and ``y`` are provided, or ``d = x``
+    otherwise. Assume that all elements of ``d`` are independent and
+    identically distributed observations, and all are distinct and nonzero.
+
+    - When ``len(d)`` is sufficiently large, the null distribution of the
+      normalized test statistic (`zstatistic` above) is approximately normal,
+      and ``method = 'approx'`` can be used to compute the p-value.
+
+    - When ``len(d)`` is small, the normal approximation may not be accurate,
+      and ``method='exact'`` is preferred (at the cost of additional
+      execution time).
+
+    - The default, ``method='auto'``, selects between the two: when
+      ``len(d) <= 50``, the exact method is used; otherwise, the approximate
+      method is used.
+
+    The presence of "ties" (i.e. not all elements of ``d`` are unique) and
+    "zeros" (i.e. elements of ``d`` are zero) changes the null distribution
+    of the test statistic, and ``method='exact'`` no longer calculates
+    the exact p-value. If ``method='approx'``, the z-statistic is adjusted
+    for more accurate comparison against the standard normal, but still,
+    for finite sample sizes, the standard normal is only an approximation of
+    the true null distribution of the z-statistic. There is no clear
+    consensus among references on which method most accurately approximates
+    the p-value for small samples in the presence of zeros and/or ties. In any
+    case, this is the behavior of `wilcoxon` when ``method='auto':
+    ``method='exact'`` is used when ``len(d) <= 50`` *and there are no zeros*;
+    otherwise, ``method='approx'`` is used.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
+    .. [2] Conover, W.J., Practical Nonparametric Statistics, 1971.
+    .. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed
+       Rank Procedures, Journal of the American Statistical Association,
+       Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`
+    .. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods,
+       Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`
+    .. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank
+       Sampling Distribution When Zero Differences are Present,
+       Journal of the American Statistical Association, Vol. 62, 1967,
+       pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`
+
+    Examples
+    --------
+    In [4]_, the differences in height between cross- and self-fertilized
+    corn plants is given as follows:
+
+    >>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]
+
+    Cross-fertilized plants appear to be higher. To test the null
+    hypothesis that there is no height difference, we can apply the
+    two-sided test:
+
+    >>> from scipy.stats import wilcoxon
+    >>> res = wilcoxon(d)
+    >>> res.statistic, res.pvalue
+    (24.0, 0.041259765625)
+
+    Hence, we would reject the null hypothesis at a confidence level of 5%,
+    concluding that there is a difference in height between the groups.
+    To confirm that the median of the differences can be assumed to be
+    positive, we use:
+
+    >>> res = wilcoxon(d, alternative='greater')
+    >>> res.statistic, res.pvalue
+    (96.0, 0.0206298828125)
+
+    This shows that the null hypothesis that the median is negative can be
+    rejected at a confidence level of 5% in favor of the alternative that
+    the median is greater than zero. The p-values above are exact. Using the
+    normal approximation gives very similar values:
+
+    >>> res = wilcoxon(d, method='approx')
+    >>> res.statistic, res.pvalue
+    (24.0, 0.04088813291185591)
+
+    Note that the statistic changed to 96 in the one-sided case (the sum
+    of ranks of positive differences) whereas it is 24 in the two-sided
+    case (the minimum of sum of ranks above and below zero).
+
+    """
+    mode = method
+
+    if mode not in ["auto", "approx", "exact"]:
+        raise ValueError("mode must be either 'auto', 'approx' or 'exact'")
+
+    if zero_method not in ["wilcox", "pratt", "zsplit"]:
+        raise ValueError("Zero method must be either 'wilcox' "
+                         "or 'pratt' or 'zsplit'")
+
+    if alternative not in ["two-sided", "less", "greater"]:
+        raise ValueError("Alternative must be either 'two-sided', "
+                         "'greater' or 'less'")
+
+    if y is None:
+        d = asarray(x)
+        if d.ndim > 1:
+            raise ValueError('Sample x must be one-dimensional.')
+    else:
+        x, y = map(asarray, (x, y))
+        if x.ndim > 1 or y.ndim > 1:
+            raise ValueError('Samples x and y must be one-dimensional.')
+        if len(x) != len(y):
+            raise ValueError('The samples x and y must have the same length.')
+        d = x - y
+
+    if len(d) == 0:
+        res = WilcoxonResult(np.nan, np.nan)
+        if method == 'approx':
+            res.zstatistic = np.nan
+        return res
+
+    if mode == "auto":
+        if len(d) <= 50:
+            mode = "exact"
+        else:
+            mode = "approx"
+
+    n_zero = np.sum(d == 0)
+    if n_zero > 0 and mode == "exact":
+        mode = "approx"
+        warnings.warn("Exact p-value calculation does not work if there are "
+                      "zeros. Switching to normal approximation.")
+
+    if mode == "approx":
+        if zero_method in ["wilcox", "pratt"]:
+            if n_zero == len(d):
+                raise ValueError("zero_method 'wilcox' and 'pratt' do not "
+                                 "work if x - y is zero for all elements.")
+        if zero_method == "wilcox":
+            # Keep all non-zero differences
+            d = compress(np.not_equal(d, 0), d)
+
+    count = len(d)
+    if count < 10 and mode == "approx":
+        warnings.warn("Sample size too small for normal approximation.")
+
+    r = _stats_py.rankdata(abs(d))
+    r_plus = np.sum((d > 0) * r)
+    r_minus = np.sum((d < 0) * r)
+
+    if zero_method == "zsplit":
+        r_zero = np.sum((d == 0) * r)
+        r_plus += r_zero / 2.
+        r_minus += r_zero / 2.
+
+    # return min for two-sided test, but r_plus for one-sided test
+    # the literature is not consistent here
+    # r_plus is more informative since r_plus + r_minus = count*(count+1)/2,
+    # i.e. the sum of the ranks, so r_minus and the min can be inferred
+    # (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)
+    # [3] uses the r_plus for the one-sided test, keep min for two-sided test
+    # to keep backwards compatibility
+    if alternative == "two-sided":
+        T = min(r_plus, r_minus)
+    else:
+        T = r_plus
+
+    if mode == "approx":
+        mn = count * (count + 1.) * 0.25
+        se = count * (count + 1.) * (2. * count + 1.)
+
+        if zero_method == "pratt":
+            r = r[d != 0]
+            # normal approximation needs to be adjusted, see Cureton (1967)
+            mn -= n_zero * (n_zero + 1.) * 0.25
+            se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
+
+        replist, repnum = find_repeats(r)
+        if repnum.size != 0:
+            # Correction for repeated elements.
+            se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
+
+        se = sqrt(se / 24)
+
+        # apply continuity correction if applicable
+        d = 0
+        if correction:
+            if alternative == "two-sided":
+                d = 0.5 * np.sign(T - mn)
+            elif alternative == "less":
+                d = -0.5
+            else:
+                d = 0.5
+
+        # compute statistic and p-value using normal approximation
+        z = (T - mn - d) / se
+        if alternative == "two-sided":
+            prob = 2. * distributions.norm.sf(abs(z))
+        elif alternative == "greater":
+            # large T = r_plus indicates x is greater than y; i.e.
+            # accept alternative in that case and return small p-value (sf)
+            prob = distributions.norm.sf(z)
+        else:
+            prob = distributions.norm.cdf(z)
+    elif mode == "exact":
+        # get pmf of the possible positive ranksums r_plus
+        pmf = _get_wilcoxon_distr(count)
+        # note: r_plus is int (ties not allowed), need int for slices below
+        r_plus = int(r_plus)
+        if alternative == "two-sided":
+            if r_plus == (len(pmf) - 1) // 2:
+                # r_plus is the center of the distribution.
+                prob = 1.0
+            else:
+                p_less = np.sum(pmf[:r_plus + 1])
+                p_greater = np.sum(pmf[r_plus:])
+                prob = 2*min(p_greater, p_less)
+        elif alternative == "greater":
+            prob = np.sum(pmf[r_plus:])
+        else:
+            prob = np.sum(pmf[:r_plus + 1])
+        prob = np.clip(prob, 0, 1)
+
+    res = WilcoxonResult(T, prob)
+    if method == 'approx':
+        res.zstatistic = z
+    return res
+
+
+MedianTestResult = _make_tuple_bunch(
+    'MedianTestResult',
+    ['statistic', 'pvalue', 'median', 'table'], []
+)
+
+
+def median_test(*samples, ties='below', correction=True, lambda_=1,
+                nan_policy='propagate'):
+    """Perform a Mood's median test.
+
+    Test that two or more samples come from populations with the same median.
+
+    Let ``n = len(samples)`` be the number of samples.  The "grand median" of
+    all the data is computed, and a contingency table is formed by
+    classifying the values in each sample as being above or below the grand
+    median.  The contingency table, along with `correction` and `lambda_`,
+    are passed to `scipy.stats.chi2_contingency` to compute the test statistic
+    and p-value.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        The set of samples.  There must be at least two samples.
+        Each sample must be a one-dimensional sequence containing at least
+        one value.  The samples are not required to have the same length.
+    ties : str, optional
+        Determines how values equal to the grand median are classified in
+        the contingency table.  The string must be one of::
+
+            "below":
+                Values equal to the grand median are counted as "below".
+            "above":
+                Values equal to the grand median are counted as "above".
+            "ignore":
+                Values equal to the grand median are not counted.
+
+        The default is "below".
+    correction : bool, optional
+        If True, *and* there are just two samples, apply Yates' correction
+        for continuity when computing the test statistic associated with
+        the contingency table.  Default is True.
+    lambda_ : float or str, optional
+        By default, the statistic computed in this test is Pearson's
+        chi-squared statistic.  `lambda_` allows a statistic from the
+        Cressie-Read power divergence family to be used instead.  See
+        `power_divergence` for details.
+        Default is 1 (Pearson's chi-squared statistic).
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.
+
+    Returns
+    -------
+    res : MedianTestResult
+        An object containing attributes:
+
+        statistic : float
+            The test statistic.  The statistic that is returned is determined
+            by `lambda_`.  The default is Pearson's chi-squared statistic.
+        pvalue : float
+            The p-value of the test.
+        median : float
+            The grand median.
+        table : ndarray
+            The contingency table.  The shape of the table is (2, n), where
+            n is the number of samples.  The first row holds the counts of the
+            values above the grand median, and the second row holds the counts
+            of the values below the grand median.  The table allows further
+            analysis with, for example, `scipy.stats.chi2_contingency`, or with
+            `scipy.stats.fisher_exact` if there are two samples, without having
+            to recompute the table.  If ``nan_policy`` is "propagate" and there
+            are nans in the input, the return value for ``table`` is ``None``.
+
+    See Also
+    --------
+    kruskal : Compute the Kruskal-Wallis H-test for independent samples.
+    mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
+
+    Notes
+    -----
+    .. versionadded:: 0.15.0
+
+    References
+    ----------
+    .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
+        (1950), pp. 394-399.
+    .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
+        See Sections 8.12 and 10.15.
+
+    Examples
+    --------
+    A biologist runs an experiment in which there are three groups of plants.
+    Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
+    Each plant produces a number of seeds.  The seed counts for each group
+    are::
+
+        Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
+        Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
+        Group 3:  0  3  9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
+
+    The following code applies Mood's median test to these samples.
+
+    >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
+    >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
+    >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
+    >>> from scipy.stats import median_test
+    >>> res = median_test(g1, g2, g3)
+
+    The median is
+
+    >>> res.median
+    34.0
+
+    and the contingency table is
+
+    >>> res.table
+    array([[ 5, 10,  7],
+           [11,  5, 10]])
+
+    `p` is too large to conclude that the medians are not the same:
+
+    >>> res.pvalue
+    0.12609082774093244
+
+    The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
+    `median_test`.
+
+    >>> res = median_test(g1, g2, g3, lambda_="log-likelihood")
+    >>> res.pvalue
+    0.12224779737117837
+
+    The median occurs several times in the data, so we'll get a different
+    result if, for example, ``ties="above"`` is used:
+
+    >>> res = median_test(g1, g2, g3, ties="above")
+    >>> res.pvalue
+    0.063873276069553273
+
+    >>> res.table
+    array([[ 5, 11,  9],
+           [11,  4,  8]])
+
+    This example demonstrates that if the data set is not large and there
+    are values equal to the median, the p-value can be sensitive to the
+    choice of `ties`.
+
+    """
+    if len(samples) < 2:
+        raise ValueError('median_test requires two or more samples.')
+
+    ties_options = ['below', 'above', 'ignore']
+    if ties not in ties_options:
+        raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
+                         "of: %s" % (ties, str(ties_options)[1:-1]))
+
+    data = [np.asarray(sample) for sample in samples]
+
+    # Validate the sizes and shapes of the arguments.
+    for k, d in enumerate(data):
+        if d.size == 0:
+            raise ValueError("Sample %d is empty. All samples must "
+                             "contain at least one value." % (k + 1))
+        if d.ndim != 1:
+            raise ValueError("Sample %d has %d dimensions.  All "
+                             "samples must be one-dimensional sequences." %
+                             (k + 1, d.ndim))
+
+    cdata = np.concatenate(data)
+    contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
+    if contains_nan and nan_policy == 'propagate':
+        return MedianTestResult(np.nan, np.nan, np.nan, None)
+
+    if contains_nan:
+        grand_median = np.median(cdata[~np.isnan(cdata)])
+    else:
+        grand_median = np.median(cdata)
+    # When the minimum version of numpy supported by scipy is 1.9.0,
+    # the above if/else statement can be replaced by the single line:
+    #     grand_median = np.nanmedian(cdata)
+
+    # Create the contingency table.
+    table = np.zeros((2, len(data)), dtype=np.int64)
+    for k, sample in enumerate(data):
+        sample = sample[~np.isnan(sample)]
+
+        nabove = count_nonzero(sample > grand_median)
+        nbelow = count_nonzero(sample < grand_median)
+        nequal = sample.size - (nabove + nbelow)
+        table[0, k] += nabove
+        table[1, k] += nbelow
+        if ties == "below":
+            table[1, k] += nequal
+        elif ties == "above":
+            table[0, k] += nequal
+
+    # Check that no row or column of the table is all zero.
+    # Such a table can not be given to chi2_contingency, because it would have
+    # a zero in the table of expected frequencies.
+    rowsums = table.sum(axis=1)
+    if rowsums[0] == 0:
+        raise ValueError("All values are below the grand median (%r)." %
+                         grand_median)
+    if rowsums[1] == 0:
+        raise ValueError("All values are above the grand median (%r)." %
+                         grand_median)
+    if ties == "ignore":
+        # We already checked that each sample has at least one value, but it
+        # is possible that all those values equal the grand median.  If `ties`
+        # is "ignore", that would result in a column of zeros in `table`.  We
+        # check for that case here.
+        zero_cols = np.nonzero((table == 0).all(axis=0))[0]
+        if len(zero_cols) > 0:
+            msg = ("All values in sample %d are equal to the grand "
+                   "median (%r), so they are ignored, resulting in an "
+                   "empty sample." % (zero_cols[0] + 1, grand_median))
+            raise ValueError(msg)
+
+    stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
+                                              correction=correction)
+    return MedianTestResult(stat, p, grand_median, table)
+
+
+def _circfuncs_common(samples, high, low, nan_policy='propagate'):
+    # Ensure samples are array-like and size is not zero
+    samples = np.asarray(samples)
+    if samples.size == 0:
+        return np.nan, np.asarray(np.nan), np.asarray(np.nan), None
+
+    # Recast samples as radians that range between 0 and 2 pi and calculate
+    # the sine and cosine
+    sin_samp = sin((samples - low)*2.*pi / (high - low))
+    cos_samp = cos((samples - low)*2.*pi / (high - low))
+
+    # Apply the NaN policy
+    contains_nan, nan_policy = _contains_nan(samples, nan_policy)
+    if contains_nan and nan_policy == 'omit':
+        mask = np.isnan(samples)
+        # Set the sines and cosines that are NaN to zero
+        sin_samp[mask] = 0.0
+        cos_samp[mask] = 0.0
+    else:
+        mask = None
+
+    return samples, sin_samp, cos_samp, mask
+
+
+def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
+    """Compute the circular mean for samples in a range.
+
+    Parameters
+    ----------
+    samples : array_like
+        Input array.
+    high : float or int, optional
+        High boundary for the sample range. Default is ``2*pi``.
+    low : float or int, optional
+        Low boundary for the sample range. Default is 0.
+    axis : int, optional
+        Axis along which means are computed. The default is to compute
+        the mean of the flattened array.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.
+
+    Returns
+    -------
+    circmean : float
+        Circular mean.
+
+    See Also
+    --------
+    circstd : Circular standard deviation.
+    circvar : Circular variance.
+
+    Examples
+    --------
+    For simplicity, all angles are printed out in degrees.
+
+    >>> import numpy as np
+    >>> from scipy.stats import circmean
+    >>> import matplotlib.pyplot as plt
+    >>> angles = np.deg2rad(np.array([20, 30, 330]))
+    >>> circmean = circmean(angles)
+    >>> np.rad2deg(circmean)
+    7.294976657784009
+
+    >>> mean = angles.mean()
+    >>> np.rad2deg(mean)
+    126.66666666666666
+
+    Plot and compare the circular mean against the arithmetic mean.
+
+    >>> plt.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
+    ...          np.sin(np.linspace(0, 2*np.pi, 500)),
+    ...          c='k')
+    >>> plt.scatter(np.cos(angles), np.sin(angles), c='k')
+    >>> plt.scatter(np.cos(circmean), np.sin(circmean), c='b',
+    ...             label='circmean')
+    >>> plt.scatter(np.cos(mean), np.sin(mean), c='r', label='mean')
+    >>> plt.legend()
+    >>> plt.axis('equal')
+    >>> plt.show()
+
+    """
+    samples, sin_samp, cos_samp, nmask = _circfuncs_common(samples, high, low,
+                                                           nan_policy=nan_policy)
+    sin_sum = sin_samp.sum(axis=axis)
+    cos_sum = cos_samp.sum(axis=axis)
+    res = arctan2(sin_sum, cos_sum)
+
+    mask_nan = ~np.isnan(res)
+    if mask_nan.ndim > 0:
+        mask = res[mask_nan] < 0
+    else:
+        mask = res < 0
+
+    if mask.ndim > 0:
+        mask_nan[mask_nan] = mask
+        res[mask_nan] += 2*pi
+    elif mask:
+        res += 2*pi
+
+    # Set output to NaN if no samples went into the mean
+    if nmask is not None:
+        if nmask.all():
+            res = np.full(shape=res.shape, fill_value=np.nan)
+        else:
+            # Find out if any of the axis that are being averaged consist
+            # entirely of NaN.  If one exists, set the result (res) to NaN
+            nshape = 0 if axis is None else axis
+            smask = nmask.shape[nshape] == nmask.sum(axis=axis)
+            if smask.any():
+                res[smask] = np.nan
+
+    return res*(high - low)/2.0/pi + low
+
+
+def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
+    """Compute the circular variance for samples assumed to be in a range.
+
+    Parameters
+    ----------
+    samples : array_like
+        Input array.
+    high : float or int, optional
+        High boundary for the sample range. Default is ``2*pi``.
+    low : float or int, optional
+        Low boundary for the sample range. Default is 0.
+    axis : int, optional
+        Axis along which variances are computed. The default is to compute
+        the variance of the flattened array.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.
+
+    Returns
+    -------
+    circvar : float
+        Circular variance.
+
+    See Also
+    --------
+    circmean : Circular mean.
+    circstd : Circular standard deviation.
+
+    Notes
+    -----
+    This uses the following definition of circular variance: ``1-R``, where
+    ``R`` is the mean resultant vector. The
+    returned value is in the range [0, 1], 0 standing for no variance, and 1
+    for a large variance. In the limit of small angles, this value is similar
+    to half the 'linear' variance.
+
+    References
+    ----------
+    .. [1] Fisher, N.I. *Statistical analysis of circular data*. Cambridge
+          University Press, 1993.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import circvar
+    >>> import matplotlib.pyplot as plt
+    >>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286,
+    ...                       0.133, -0.473, -0.001, -0.348, 0.131])
+    >>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421,
+    ...                       0.104, -0.136, -0.867,  0.012,  0.105])
+    >>> circvar_1 = circvar(samples_1)
+    >>> circvar_2 = circvar(samples_2)
+
+    Plot the samples.
+
+    >>> fig, (left, right) = plt.subplots(ncols=2)
+    >>> for image in (left, right):
+    ...     image.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
+    ...                np.sin(np.linspace(0, 2*np.pi, 500)),
+    ...                c='k')
+    ...     image.axis('equal')
+    ...     image.axis('off')
+    >>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15)
+    >>> left.set_title(f"circular variance: {np.round(circvar_1, 2)!r}")
+    >>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15)
+    >>> right.set_title(f"circular variance: {np.round(circvar_2, 2)!r}")
+    >>> plt.show()
+
+    """
+    samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
+                                                          nan_policy=nan_policy)
+    if mask is None:
+        sin_mean = sin_samp.mean(axis=axis)
+        cos_mean = cos_samp.mean(axis=axis)
+    else:
+        nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
+        nsum[nsum == 0] = np.nan
+        sin_mean = sin_samp.sum(axis=axis) / nsum
+        cos_mean = cos_samp.sum(axis=axis) / nsum
+    # hypot can go slightly above 1 due to rounding errors
+    with np.errstate(invalid='ignore'):
+        R = np.minimum(1, hypot(sin_mean, cos_mean))
+
+    res = 1. - R
+    return res
+
+
+def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate', *,
+            normalize=False):
+    """
+    Compute the circular standard deviation for samples assumed to be in the
+    range [low to high].
+
+    Parameters
+    ----------
+    samples : array_like
+        Input array.
+    high : float or int, optional
+        High boundary for the sample range. Default is ``2*pi``.
+    low : float or int, optional
+        Low boundary for the sample range. Default is 0.
+    axis : int, optional
+        Axis along which standard deviations are computed. The default is
+        to compute the standard deviation of the flattened array.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.
+    normalize : boolean, optional
+        If True, the returned value is equal to ``sqrt(-2*log(R))`` and does
+        not depend on the variable units. If False (default), the returned
+        value is scaled by ``((high-low)/(2*pi))``.
+
+    Returns
+    -------
+    circstd : float
+        Circular standard deviation.
+
+    See Also
+    --------
+    circmean : Circular mean.
+    circvar : Circular variance.
+
+    Notes
+    -----
+    This uses a definition of circular standard deviation from [1]_.
+    Essentially, the calculation is as follows.
+
+    .. code-block:: python
+
+        import numpy as np
+        C = np.cos(samples).mean()
+        S = np.sin(samples).mean()
+        R = np.sqrt(C**2 + S**2)
+        l = 2*np.pi / (high-low)
+        circstd = np.sqrt(-2*np.log(R)) / l
+
+    In the limit of small angles, it returns a number close to the 'linear'
+    standard deviation.
+
+    References
+    ----------
+    .. [1] Mardia, K. V. (1972). 2. In *Statistics of Directional Data*
+       (pp. 18-24). Academic Press. :doi:`10.1016/C2013-0-07425-7`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import circstd
+    >>> import matplotlib.pyplot as plt
+    >>> samples_1 = np.array([0.072, -0.158, 0.077, 0.108, 0.286,
+    ...                       0.133, -0.473, -0.001, -0.348, 0.131])
+    >>> samples_2 = np.array([0.111, -0.879, 0.078, 0.733, 0.421,
+    ...                       0.104, -0.136, -0.867,  0.012,  0.105])
+    >>> circstd_1 = circstd(samples_1)
+    >>> circstd_2 = circstd(samples_2)
+
+    Plot the samples.
+
+    >>> fig, (left, right) = plt.subplots(ncols=2)
+    >>> for image in (left, right):
+    ...     image.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
+    ...                np.sin(np.linspace(0, 2*np.pi, 500)),
+    ...                c='k')
+    ...     image.axis('equal')
+    ...     image.axis('off')
+    >>> left.scatter(np.cos(samples_1), np.sin(samples_1), c='k', s=15)
+    >>> left.set_title(f"circular std: {np.round(circstd_1, 2)!r}")
+    >>> right.plot(np.cos(np.linspace(0, 2*np.pi, 500)),
+    ...            np.sin(np.linspace(0, 2*np.pi, 500)),
+    ...            c='k')
+    >>> right.scatter(np.cos(samples_2), np.sin(samples_2), c='k', s=15)
+    >>> right.set_title(f"circular std: {np.round(circstd_2, 2)!r}")
+    >>> plt.show()
+
+    """
+    samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
+                                                          nan_policy=nan_policy)
+    if mask is None:
+        sin_mean = sin_samp.mean(axis=axis)  # [1] (2.2.3)
+        cos_mean = cos_samp.mean(axis=axis)  # [1] (2.2.3)
+    else:
+        nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
+        nsum[nsum == 0] = np.nan
+        sin_mean = sin_samp.sum(axis=axis) / nsum
+        cos_mean = cos_samp.sum(axis=axis) / nsum
+    # hypot can go slightly above 1 due to rounding errors
+    with np.errstate(invalid='ignore'):
+        R = np.minimum(1, hypot(sin_mean, cos_mean))  # [1] (2.2.4)
+
+    res = sqrt(-2*log(R))
+    if not normalize:
+        res *= (high-low)/(2.*pi)  # [1] (2.3.14) w/ (2.3.7)
+    return res
+
+
+class DirectionalStats:
+    def __init__(self, mean_direction, mean_resultant_length):
+        self.mean_direction = mean_direction
+        self.mean_resultant_length = mean_resultant_length
+
+    def __repr__(self):
+        return (f"DirectionalStats(mean_direction={self.mean_direction},"
+                f" mean_resultant_length={self.mean_resultant_length})")
+
+
+def directional_stats(samples, *, axis=0, normalize=True):
+    """
+    Computes sample statistics for directional data.
+
+    Computes the directional mean (also called the mean direction vector) and
+    mean resultant length of a sample of vectors.
+
+    The directional mean is a measure of "preferred direction" of vector data.
+    It is analogous to the sample mean, but it is for use when the length of
+    the data is irrelevant (e.g. unit vectors).
+
+    The mean resultant length is a value between 0 and 1 used to quantify the
+    dispersion of directional data: the smaller the mean resultant length, the
+    greater the dispersion. Several definitions of directional variance
+    involving the mean resultant length are given in [1]_ and [2]_.
+
+    Parameters
+    ----------
+    samples : array_like
+        Input array. Must be at least two-dimensional, and the last axis of the
+        input must correspond with the dimensionality of the vector space.
+        When the input is exactly two dimensional, this means that each row
+        of the data is a vector observation.
+    axis : int, default: 0
+        Axis along which the directional mean is computed.
+    normalize: boolean, default: True
+        If True, normalize the input to ensure that each observation is a
+        unit vector. It the observations are already unit vectors, consider
+        setting this to False to avoid unnecessary computation.
+
+    Returns
+    -------
+    res : DirectionalStats
+        An object containing attributes:
+
+        mean_direction : ndarray
+            Directional mean.
+        mean_resultant_length : ndarray
+            The mean resultant length [1]_.
+
+    See also
+    --------
+    circmean: circular mean; i.e. directional mean for 2D *angles*
+    circvar: circular variance; i.e. directional variance for 2D *angles*
+
+    Notes
+    -----
+    This uses a definition of directional mean from [1]_.
+    Assuming the observations are unit vectors, the calculation is as follows.
+
+    .. code-block:: python
+
+        mean = samples.mean(axis=0)
+        mean_resultant_length = np.linalg.norm(mean)
+        mean_direction = mean / mean_resultant_length
+
+    This definition is appropriate for *directional* data (i.e. vector data
+    for which the magnitude of each observation is irrelevant) but not
+    for *axial* data (i.e. vector data for which the magnitude and *sign* of
+    each observation is irrelevant).
+
+    Several definitions of directional variance involving the mean resultant
+    length ``R`` have been proposed, including ``1 - R`` [1]_, ``1 - R**2``
+    [2]_, and ``2 * (1 - R)`` [2]_. Rather than choosing one, this function
+    returns ``R`` as attribute `mean_resultant_length` so the user can compute
+    their preferred measure of dispersion.
+
+    References
+    ----------
+    .. [1] Mardia, Jupp. (2000). *Directional Statistics*
+       (p. 163). Wiley.
+
+    .. [2] https://en.wikipedia.org/wiki/Directional_statistics
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import directional_stats
+    >>> data = np.array([[3, 4],    # first observation, 2D vector space
+    ...                  [6, -8]])  # second observation
+    >>> dirstats = directional_stats(data)
+    >>> dirstats.mean_direction
+    array([1., 0.])
+
+    In contrast, the regular sample mean of the vectors would be influenced
+    by the magnitude of each observation. Furthermore, the result would not be
+    a unit vector.
+
+    >>> data.mean(axis=0)
+    array([4.5, -2.])
+
+    An exemplary use case for `directional_stats` is to find a *meaningful*
+    center for a set of observations on a sphere, e.g. geographical locations.
+
+    >>> data = np.array([[0.8660254, 0.5, 0.],
+    ...                  [0.8660254, -0.5, 0.]])
+    >>> dirstats = directional_stats(data)
+    >>> dirstats.mean_direction
+    array([1., 0., 0.])
+
+    The regular sample mean on the other hand yields a result which does not
+    lie on the surface of the sphere.
+
+    >>> data.mean(axis=0)
+    array([0.8660254, 0., 0.])
+
+    The function also returns the mean resultant length, which
+    can be used to calculate a directional variance. For example, using the
+    definition ``Var(z) = 1 - R`` from [2]_ where ``R`` is the
+    mean resultant length, we can calculate the directional variance of the
+    vectors in the above example as:
+
+    >>> 1 - dirstats.mean_resultant_length
+    0.13397459716167093
+    """
+    samples = np.asarray(samples)
+    if samples.ndim < 2:
+        raise ValueError("samples must at least be two-dimensional. "
+                         f"Instead samples has shape: {samples.shape!r}")
+    samples = np.moveaxis(samples, axis, 0)
+    if normalize:
+        vectornorms = np.linalg.norm(samples, axis=-1, keepdims=True)
+        samples = samples/vectornorms
+    mean = np.mean(samples, axis=0)
+    mean_resultant_length = np.linalg.norm(mean, axis=-1, keepdims=True)
+    mean_direction = mean / mean_resultant_length
+    return DirectionalStats(mean_direction,
+                            mean_resultant_length.squeeze(-1)[()])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_mstats_basic.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_mstats_basic.py
new file mode 100644
index 00000000..145323c0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_mstats_basic.py
@@ -0,0 +1,3521 @@
+"""
+An extension of scipy.stats._stats_py to support masked arrays
+
+"""
+# Original author (2007): Pierre GF Gerard-Marchant
+
+
+__all__ = ['argstoarray',
+           'count_tied_groups',
+           'describe',
+           'f_oneway', 'find_repeats','friedmanchisquare',
+           'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
+           'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest',
+           'ks_1samp', 'kstest',
+           'linregress',
+           'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
+           'normaltest',
+           'obrientransform',
+           'pearsonr','plotting_positions','pointbiserialr',
+           'rankdata',
+           'scoreatpercentile','sem',
+           'sen_seasonal_slopes','skew','skewtest','spearmanr',
+           'siegelslopes', 'theilslopes',
+           'tmax','tmean','tmin','trim','trimboth',
+           'trimtail','trima','trimr','trimmed_mean','trimmed_std',
+           'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
+           'ttest_ind','ttest_rel','tvar',
+           'variation',
+           'winsorize',
+           'brunnermunzel',
+           ]
+
+import numpy as np
+from numpy import ndarray
+import numpy.ma as ma
+from numpy.ma import masked, nomask
+import math
+
+import itertools
+import warnings
+from collections import namedtuple
+
+from . import distributions
+from scipy._lib._util import _rename_parameter, _contains_nan
+from scipy._lib._bunch import _make_tuple_bunch
+import scipy.special as special
+import scipy.stats._stats_py
+
+from ._stats_mstats_common import (
+        _find_repeats,
+        linregress as stats_linregress,
+        LinregressResult as stats_LinregressResult,
+        theilslopes as stats_theilslopes,
+        siegelslopes as stats_siegelslopes
+        )
+
+def _chk_asarray(a, axis):
+    # Always returns a masked array, raveled for axis=None
+    a = ma.asanyarray(a)
+    if axis is None:
+        a = ma.ravel(a)
+        outaxis = 0
+    else:
+        outaxis = axis
+    return a, outaxis
+
+
+def _chk2_asarray(a, b, axis):
+    a = ma.asanyarray(a)
+    b = ma.asanyarray(b)
+    if axis is None:
+        a = ma.ravel(a)
+        b = ma.ravel(b)
+        outaxis = 0
+    else:
+        outaxis = axis
+    return a, b, outaxis
+
+
+def _chk_size(a, b):
+    a = ma.asanyarray(a)
+    b = ma.asanyarray(b)
+    (na, nb) = (a.size, b.size)
+    if na != nb:
+        raise ValueError("The size of the input array should match!"
+                         " (%s <> %s)" % (na, nb))
+    return (a, b, na)
+
+
+def argstoarray(*args):
+    """
+    Constructs a 2D array from a group of sequences.
+
+    Sequences are filled with missing values to match the length of the longest
+    sequence.
+
+    Parameters
+    ----------
+    *args : sequences
+        Group of sequences.
+
+    Returns
+    -------
+    argstoarray : MaskedArray
+        A ( `m` x `n` ) masked array, where `m` is the number of arguments and
+        `n` the length of the longest argument.
+
+    Notes
+    -----
+    `numpy.ma.row_stack` has identical behavior, but is called with a sequence
+    of sequences.
+
+    Examples
+    --------
+    A 2D masked array constructed from a group of sequences is returned.
+
+    >>> from scipy.stats.mstats import argstoarray
+    >>> argstoarray([1, 2, 3], [4, 5, 6])
+    masked_array(
+     data=[[1.0, 2.0, 3.0],
+           [4.0, 5.0, 6.0]],
+     mask=[[False, False, False],
+           [False, False, False]],
+     fill_value=1e+20)
+
+    The returned masked array filled with missing values when the lengths of
+    sequences are different.
+
+    >>> argstoarray([1, 3], [4, 5, 6])
+    masked_array(
+     data=[[1.0, 3.0, --],
+           [4.0, 5.0, 6.0]],
+     mask=[[False, False,  True],
+           [False, False, False]],
+     fill_value=1e+20)
+
+    """
+    if len(args) == 1 and not isinstance(args[0], ndarray):
+        output = ma.asarray(args[0])
+        if output.ndim != 2:
+            raise ValueError("The input should be 2D")
+    else:
+        n = len(args)
+        m = max([len(k) for k in args])
+        output = ma.array(np.empty((n,m), dtype=float), mask=True)
+        for (k,v) in enumerate(args):
+            output[k,:len(v)] = v
+
+    output[np.logical_not(np.isfinite(output._data))] = masked
+    return output
+
+
+def find_repeats(arr):
+    """Find repeats in arr and return a tuple (repeats, repeat_count).
+
+    The input is cast to float64. Masked values are discarded.
+
+    Parameters
+    ----------
+    arr : sequence
+        Input array. The array is flattened if it is not 1D.
+
+    Returns
+    -------
+    repeats : ndarray
+        Array of repeated values.
+    counts : ndarray
+        Array of counts.
+
+    Examples
+    --------
+    >>> from scipy.stats import mstats
+    >>> mstats.find_repeats([2, 1, 2, 3, 2, 2, 5])
+    (array([2.]), array([4]))
+
+    In the above example, 2 repeats 4 times.
+
+    >>> mstats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
+    (array([4., 5.]), array([2, 2]))
+
+    In the above example, both 4 and 5 repeat 2 times.
+
+    """
+    # Make sure we get a copy. ma.compressed promises a "new array", but can
+    # actually return a reference.
+    compr = np.asarray(ma.compressed(arr), dtype=np.float64)
+    try:
+        need_copy = np.may_share_memory(compr, arr)
+    except AttributeError:
+        # numpy < 1.8.2 bug: np.may_share_memory([], []) raises,
+        # while in numpy 1.8.2 and above it just (correctly) returns False.
+        need_copy = False
+    if need_copy:
+        compr = compr.copy()
+    return _find_repeats(compr)
+
+
+def count_tied_groups(x, use_missing=False):
+    """
+    Counts the number of tied values.
+
+    Parameters
+    ----------
+    x : sequence
+        Sequence of data on which to counts the ties
+    use_missing : bool, optional
+        Whether to consider missing values as tied.
+
+    Returns
+    -------
+    count_tied_groups : dict
+        Returns a dictionary (nb of ties: nb of groups).
+
+    Examples
+    --------
+    >>> from scipy.stats import mstats
+    >>> import numpy as np
+    >>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
+    >>> mstats.count_tied_groups(z)
+    {2: 1, 3: 2}
+
+    In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
+
+    >>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
+    >>> mstats.count_tied_groups(z)
+    {2: 2, 3: 1}
+    >>> z[[1,-1]] = np.ma.masked
+    >>> mstats.count_tied_groups(z, use_missing=True)
+    {2: 2, 3: 1}
+
+    """
+    nmasked = ma.getmask(x).sum()
+    # We need the copy as find_repeats will overwrite the initial data
+    data = ma.compressed(x).copy()
+    (ties, counts) = find_repeats(data)
+    nties = {}
+    if len(ties):
+        nties = dict(zip(np.unique(counts), itertools.repeat(1)))
+        nties.update(dict(zip(*find_repeats(counts))))
+
+    if nmasked and use_missing:
+        try:
+            nties[nmasked] += 1
+        except KeyError:
+            nties[nmasked] = 1
+
+    return nties
+
+
+def rankdata(data, axis=None, use_missing=False):
+    """Returns the rank (also known as order statistics) of each data point
+    along the given axis.
+
+    If some values are tied, their rank is averaged.
+    If some values are masked, their rank is set to 0 if use_missing is False,
+    or set to the average rank of the unmasked values if use_missing is True.
+
+    Parameters
+    ----------
+    data : sequence
+        Input data. The data is transformed to a masked array
+    axis : {None,int}, optional
+        Axis along which to perform the ranking.
+        If None, the array is first flattened. An exception is raised if
+        the axis is specified for arrays with a dimension larger than 2
+    use_missing : bool, optional
+        Whether the masked values have a rank of 0 (False) or equal to the
+        average rank of the unmasked values (True).
+
+    """
+    def _rank1d(data, use_missing=False):
+        n = data.count()
+        rk = np.empty(data.size, dtype=float)
+        idx = data.argsort()
+        rk[idx[:n]] = np.arange(1,n+1)
+
+        if use_missing:
+            rk[idx[n:]] = (n+1)/2.
+        else:
+            rk[idx[n:]] = 0
+
+        repeats = find_repeats(data.copy())
+        for r in repeats[0]:
+            condition = (data == r).filled(False)
+            rk[condition] = rk[condition].mean()
+        return rk
+
+    data = ma.array(data, copy=False)
+    if axis is None:
+        if data.ndim > 1:
+            return _rank1d(data.ravel(), use_missing).reshape(data.shape)
+        else:
+            return _rank1d(data, use_missing)
+    else:
+        return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
+
+
+ModeResult = namedtuple('ModeResult', ('mode', 'count'))
+
+
+def mode(a, axis=0):
+    """
+    Returns an array of the modal (most common) value in the passed array.
+
+    Parameters
+    ----------
+    a : array_like
+        n-dimensional array of which to find mode(s).
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over
+        the whole array `a`.
+
+    Returns
+    -------
+    mode : ndarray
+        Array of modal values.
+    count : ndarray
+        Array of counts for each mode.
+
+    Notes
+    -----
+    For more details, see `scipy.stats.mode`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> from scipy.stats import mstats
+    >>> m_arr = np.ma.array([1, 1, 0, 0, 0, 0], mask=[0, 0, 1, 1, 1, 0])
+    >>> mstats.mode(m_arr)  # note that most zeros are masked
+    ModeResult(mode=array([1.]), count=array([2.]))
+
+    """
+    return _mode(a, axis=axis, keepdims=True)
+
+
+def _mode(a, axis=0, keepdims=True):
+    # Don't want to expose `keepdims` from the public `mstats.mode`
+    a, axis = _chk_asarray(a, axis)
+
+    def _mode1D(a):
+        (rep,cnt) = find_repeats(a)
+        if not cnt.ndim:
+            return (0, 0)
+        elif cnt.size:
+            return (rep[cnt.argmax()], cnt.max())
+        else:
+            return (a.min(), 1)
+
+    if axis is None:
+        output = _mode1D(ma.ravel(a))
+        output = (ma.array(output[0]), ma.array(output[1]))
+    else:
+        output = ma.apply_along_axis(_mode1D, axis, a)
+        if keepdims is None or keepdims:
+            newshape = list(a.shape)
+            newshape[axis] = 1
+            slices = [slice(None)] * output.ndim
+            slices[axis] = 0
+            modes = output[tuple(slices)].reshape(newshape)
+            slices[axis] = 1
+            counts = output[tuple(slices)].reshape(newshape)
+            output = (modes, counts)
+        else:
+            output = np.moveaxis(output, axis, 0)
+
+    return ModeResult(*output)
+
+
+def _betai(a, b, x):
+    x = np.asanyarray(x)
+    x = ma.where(x < 1.0, x, 1.0)  # if x > 1 then return 1.0
+    return special.betainc(a, b, x)
+
+
+def msign(x):
+    """Returns the sign of x, or 0 if x is masked."""
+    return ma.filled(np.sign(x), 0)
+
+
+def pearsonr(x, y):
+    r"""
+    Pearson correlation coefficient and p-value for testing non-correlation.
+
+    The Pearson correlation coefficient [1]_ measures the linear relationship
+    between two datasets.  The calculation of the p-value relies on the
+    assumption that each dataset is normally distributed.  (See Kowalski [3]_
+    for a discussion of the effects of non-normality of the input on the
+    distribution of the correlation coefficient.)  Like other correlation
+    coefficients, this one varies between -1 and +1 with 0 implying no
+    correlation. Correlations of -1 or +1 imply an exact linear relationship.
+
+    Parameters
+    ----------
+    x : (N,) array_like
+        Input array.
+    y : (N,) array_like
+        Input array.
+
+    Returns
+    -------
+    r : float
+        Pearson's correlation coefficient.
+    p-value : float
+        Two-tailed p-value.
+
+    Warns
+    -----
+    PearsonRConstantInputWarning
+        Raised if an input is a constant array.  The correlation coefficient
+        is not defined in this case, so ``np.nan`` is returned.
+
+    PearsonRNearConstantInputWarning
+        Raised if an input is "nearly" constant.  The array ``x`` is considered
+        nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
+        Numerical errors in the calculation ``x - mean(x)`` in this case might
+        result in an inaccurate calculation of r.
+
+    See Also
+    --------
+    spearmanr : Spearman rank-order correlation coefficient.
+    kendalltau : Kendall's tau, a correlation measure for ordinal data.
+
+    Notes
+    -----
+    The correlation coefficient is calculated as follows:
+
+    .. math::
+
+        r = \frac{\sum (x - m_x) (y - m_y)}
+                 {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
+
+    where :math:`m_x` is the mean of the vector x and :math:`m_y` is
+    the mean of the vector y.
+
+    Under the assumption that x and y are drawn from
+    independent normal distributions (so the population correlation coefficient
+    is 0), the probability density function of the sample correlation
+    coefficient r is ([1]_, [2]_):
+
+    .. math::
+
+        f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
+
+    where n is the number of samples, and B is the beta function.  This
+    is sometimes referred to as the exact distribution of r.  This is
+    the distribution that is used in `pearsonr` to compute the p-value.
+    The distribution is a beta distribution on the interval [-1, 1],
+    with equal shape parameters a = b = n/2 - 1.  In terms of SciPy's
+    implementation of the beta distribution, the distribution of r is::
+
+        dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
+
+    The p-value returned by `pearsonr` is a two-sided p-value. The p-value
+    roughly indicates the probability of an uncorrelated system
+    producing datasets that have a Pearson correlation at least as extreme
+    as the one computed from these datasets. More precisely, for a
+    given sample with correlation coefficient r, the p-value is
+    the probability that abs(r') of a random sample x' and y' drawn from
+    the population with zero correlation would be greater than or equal
+    to abs(r). In terms of the object ``dist`` shown above, the p-value
+    for a given r and length n can be computed as::
+
+        p = 2*dist.cdf(-abs(r))
+
+    When n is 2, the above continuous distribution is not well-defined.
+    One can interpret the limit of the beta distribution as the shape
+    parameters a and b approach a = b = 0 as a discrete distribution with
+    equal probability masses at r = 1 and r = -1.  More directly, one
+    can observe that, given the data x = [x1, x2] and y = [y1, y2], and
+    assuming x1 != x2 and y1 != y2, the only possible values for r are 1
+    and -1.  Because abs(r') for any sample x' and y' with length 2 will
+    be 1, the two-sided p-value for a sample of length 2 is always 1.
+
+    References
+    ----------
+    .. [1] "Pearson correlation coefficient", Wikipedia,
+           https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
+    .. [2] Student, "Probable error of a correlation coefficient",
+           Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
+    .. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
+           of the Sample Product-Moment Correlation Coefficient"
+           Journal of the Royal Statistical Society. Series C (Applied
+           Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> from scipy.stats import mstats
+    >>> mstats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
+    (-0.7426106572325057, 0.1505558088534455)
+
+    There is a linear dependence between x and y if y = a + b*x + e, where
+    a,b are constants and e is a random error term, assumed to be independent
+    of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
+    e follow a normal distribution with mean zero and standard deviation s>0.
+
+    >>> s = 0.5
+    >>> x = stats.norm.rvs(size=500)
+    >>> e = stats.norm.rvs(scale=s, size=500)
+    >>> y = x + e
+    >>> mstats.pearsonr(x, y)
+    (0.9029601878969703, 8.428978827629898e-185) # may vary
+
+    This should be close to the exact value given by
+
+    >>> 1/np.sqrt(1 + s**2)
+    0.8944271909999159
+
+    For s=0.5, we observe a high level of correlation. In general, a large
+    variance of the noise reduces the correlation, while the correlation
+    approaches one as the variance of the error goes to zero.
+
+    It is important to keep in mind that no correlation does not imply
+    independence unless (x, y) is jointly normal. Correlation can even be zero
+    when there is a very simple dependence structure: if X follows a
+    standard normal distribution, let y = abs(x). Note that the correlation
+    between x and y is zero. Indeed, since the expectation of x is zero,
+    cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
+    by symmetry. The following lines of code illustrate this observation:
+
+    >>> y = np.abs(x)
+    >>> mstats.pearsonr(x, y)
+    (-0.016172891856853524, 0.7182823678751942) # may vary
+
+    A non-zero correlation coefficient can be misleading. For example, if X has
+    a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
+    A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
+    implying a high level of correlation:
+
+    >>> y = np.where(x < 0, x, 0)
+    >>> mstats.pearsonr(x, y)
+    (0.8537091583771509, 3.183461621422181e-143) # may vary
+
+    This is unintuitive since there is no dependence of x and y if x is larger
+    than zero which happens in about half of the cases if we sample x and y.
+    """
+    (x, y, n) = _chk_size(x, y)
+    (x, y) = (x.ravel(), y.ravel())
+    # Get the common mask and the total nb of unmasked elements
+    m = ma.mask_or(ma.getmask(x), ma.getmask(y))
+    n -= m.sum()
+    df = n-2
+    if df < 0:
+        return (masked, masked)
+
+    return scipy.stats._stats_py.pearsonr(ma.masked_array(x, mask=m).compressed(),
+                                      ma.masked_array(y, mask=m).compressed())
+
+
+def spearmanr(x, y=None, use_ties=True, axis=None, nan_policy='propagate',
+              alternative='two-sided'):
+    """
+    Calculates a Spearman rank-order correlation coefficient and the p-value
+    to test for non-correlation.
+
+    The Spearman correlation is a nonparametric measure of the linear
+    relationship between two datasets. Unlike the Pearson correlation, the
+    Spearman correlation does not assume that both datasets are normally
+    distributed. Like other correlation coefficients, this one varies
+    between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+    +1 imply a monotonic relationship. Positive correlations imply that
+    as `x` increases, so does `y`. Negative correlations imply that as `x`
+    increases, `y` decreases.
+
+    Missing values are discarded pair-wise: if a value is missing in `x`, the
+    corresponding value in `y` is masked.
+
+    The p-value roughly indicates the probability of an uncorrelated system
+    producing datasets that have a Spearman correlation at least as extreme
+    as the one computed from these datasets. The p-values are not entirely
+    reliable but are probably reasonable for datasets larger than 500 or so.
+
+    Parameters
+    ----------
+    x, y : 1D or 2D array_like, y is optional
+        One or two 1-D or 2-D arrays containing multiple variables and
+        observations. When these are 1-D, each represents a vector of
+        observations of a single variable. For the behavior in the 2-D case,
+        see under ``axis``, below.
+    use_ties : bool, optional
+        DO NOT USE.  Does not do anything, keyword is only left in place for
+        backwards compatibility reasons.
+    axis : int or None, optional
+        If axis=0 (default), then each column represents a variable, with
+        observations in the rows. If axis=1, the relationship is transposed:
+        each row represents a variable, while the columns contain observations.
+        If axis=None, then both arrays will be raveled.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the correlation is nonzero
+        * 'less': the correlation is negative (less than zero)
+        * 'greater':  the correlation is positive (greater than zero)
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    res : SignificanceResult
+        An object containing attributes:
+
+        statistic : float or ndarray (2-D square)
+            Spearman correlation matrix or correlation coefficient (if only 2
+            variables are given as parameters). Correlation matrix is square
+            with length equal to total number of variables (columns or rows) in
+            ``a`` and ``b`` combined.
+        pvalue : float
+            The p-value for a hypothesis test whose null hypothesis
+            is that two sets of data are linearly uncorrelated. See
+            `alternative` above for alternative hypotheses. `pvalue` has the
+            same shape as `statistic`.
+
+    References
+    ----------
+    [CRCProbStat2000] section 14.7
+
+    """
+    if not use_ties:
+        raise ValueError("`use_ties=False` is not supported in SciPy >= 1.2.0")
+
+    # Always returns a masked array, raveled if axis=None
+    x, axisout = _chk_asarray(x, axis)
+    if y is not None:
+        # Deal only with 2-D `x` case.
+        y, _ = _chk_asarray(y, axis)
+        if axisout == 0:
+            x = ma.column_stack((x, y))
+        else:
+            x = ma.row_stack((x, y))
+
+    if axisout == 1:
+        # To simplify the code that follow (always use `n_obs, n_vars` shape)
+        x = x.T
+
+    if nan_policy == 'omit':
+        x = ma.masked_invalid(x)
+
+    def _spearmanr_2cols(x):
+        # Mask the same observations for all variables, and then drop those
+        # observations (can't leave them masked, rankdata is weird).
+        x = ma.mask_rowcols(x, axis=0)
+        x = x[~x.mask.any(axis=1), :]
+
+        # If either column is entirely NaN or Inf
+        if not np.any(x.data):
+            res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan)
+            res.correlation = np.nan
+            return res
+
+        m = ma.getmask(x)
+        n_obs = x.shape[0]
+        dof = n_obs - 2 - int(m.sum(axis=0)[0])
+        if dof < 0:
+            raise ValueError("The input must have at least 3 entries!")
+
+        # Gets the ranks and rank differences
+        x_ranked = rankdata(x, axis=0)
+        rs = ma.corrcoef(x_ranked, rowvar=False).data
+
+        # rs can have elements equal to 1, so avoid zero division warnings
+        with np.errstate(divide='ignore'):
+            # clip the small negative values possibly caused by rounding
+            # errors before taking the square root
+            t = rs * np.sqrt((dof / ((rs+1.0) * (1.0-rs))).clip(0))
+
+        t, prob = scipy.stats._stats_py._ttest_finish(dof, t, alternative)
+
+        # For backwards compatibility, return scalars when comparing 2 columns
+        if rs.shape == (2, 2):
+            res = scipy.stats._stats_py.SignificanceResult(rs[1, 0],
+                                                           prob[1, 0])
+            res.correlation = rs[1, 0]
+            return res
+        else:
+            res = scipy.stats._stats_py.SignificanceResult(rs, prob)
+            res.correlation = rs
+            return res
+
+    # Need to do this per pair of variables, otherwise the dropped observations
+    # in a third column mess up the result for a pair.
+    n_vars = x.shape[1]
+    if n_vars == 2:
+        return _spearmanr_2cols(x)
+    else:
+        rs = np.ones((n_vars, n_vars), dtype=float)
+        prob = np.zeros((n_vars, n_vars), dtype=float)
+        for var1 in range(n_vars - 1):
+            for var2 in range(var1+1, n_vars):
+                result = _spearmanr_2cols(x[:, [var1, var2]])
+                rs[var1, var2] = result.correlation
+                rs[var2, var1] = result.correlation
+                prob[var1, var2] = result.pvalue
+                prob[var2, var1] = result.pvalue
+
+        res = scipy.stats._stats_py.SignificanceResult(rs, prob)
+        res.correlation = rs
+        return res
+
+
+def _kendall_p_exact(n, c, alternative='two-sided'):
+
+    # Use the fact that distribution is symmetric: always calculate a CDF in
+    # the left tail.
+    # This will be the one-sided p-value if `c` is on the side of
+    # the null distribution predicted by the alternative hypothesis.
+    # The two-sided p-value will be twice this value.
+    # If `c` is on the other side of the null distribution, we'll need to
+    # take the complement and add back the probability mass at `c`.
+    in_right_tail = (c >= (n*(n-1))//2 - c)
+    alternative_greater = (alternative == 'greater')
+    c = int(min(c, (n*(n-1))//2 - c))
+
+    # Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods"
+    # (4th Edition), Charles Griffin & Co., 1970.
+    if n <= 0:
+        raise ValueError(f'n ({n}) must be positive')
+    elif c < 0 or 4*c > n*(n-1):
+        raise ValueError(f'c ({c}) must satisfy 0 <= 4c <= n(n-1) = {n*(n-1)}.')
+    elif n == 1:
+        prob = 1.0
+        p_mass_at_c = 1
+    elif n == 2:
+        prob = 1.0
+        p_mass_at_c = 0.5
+    elif c == 0:
+        prob = 2.0/math.factorial(n) if n < 171 else 0.0
+        p_mass_at_c = prob/2
+    elif c == 1:
+        prob = 2.0/math.factorial(n-1) if n < 172 else 0.0
+        p_mass_at_c = (n-1)/math.factorial(n)
+    elif 4*c == n*(n-1) and alternative == 'two-sided':
+        # I'm sure there's a simple formula for p_mass_at_c in this
+        # case, but I don't know it. Use generic formula for one-sided p-value.
+        prob = 1.0
+    elif n < 171:
+        new = np.zeros(c+1)
+        new[0:2] = 1.0
+        for j in range(3,n+1):
+            new = np.cumsum(new)
+            if j <= c:
+                new[j:] -= new[:c+1-j]
+        prob = 2.0*np.sum(new)/math.factorial(n)
+        p_mass_at_c = new[-1]/math.factorial(n)
+    else:
+        new = np.zeros(c+1)
+        new[0:2] = 1.0
+        for j in range(3, n+1):
+            new = np.cumsum(new)/j
+            if j <= c:
+                new[j:] -= new[:c+1-j]
+        prob = np.sum(new)
+        p_mass_at_c = new[-1]/2
+
+    if alternative != 'two-sided':
+        # if the alternative hypothesis and alternative agree,
+        # one-sided p-value is half the two-sided p-value
+        if in_right_tail == alternative_greater:
+            prob /= 2
+        else:
+            prob = 1 - prob/2 + p_mass_at_c
+
+    prob = np.clip(prob, 0, 1)
+
+    return prob
+
+
+def kendalltau(x, y, use_ties=True, use_missing=False, method='auto',
+               alternative='two-sided'):
+    """
+    Computes Kendall's rank correlation tau on two variables *x* and *y*.
+
+    Parameters
+    ----------
+    x : sequence
+        First data list (for example, time).
+    y : sequence
+        Second data list.
+    use_ties : {True, False}, optional
+        Whether ties correction should be performed.
+    use_missing : {False, True}, optional
+        Whether missing data should be allocated a rank of 0 (False) or the
+        average rank (True)
+    method : {'auto', 'asymptotic', 'exact'}, optional
+        Defines which method is used to calculate the p-value [1]_.
+        'asymptotic' uses a normal approximation valid for large samples.
+        'exact' computes the exact p-value, but can only be used if no ties
+        are present. As the sample size increases, the 'exact' computation
+        time may grow and the result may lose some precision.
+        'auto' is the default and selects the appropriate
+        method based on a trade-off between speed and accuracy.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the rank correlation is nonzero
+        * 'less': the rank correlation is negative (less than zero)
+        * 'greater':  the rank correlation is positive (greater than zero)
+
+    Returns
+    -------
+    res : SignificanceResult
+        An object containing attributes:
+
+        statistic : float
+           The tau statistic.
+        pvalue : float
+           The p-value for a hypothesis test whose null hypothesis is
+           an absence of association, tau = 0.
+
+    References
+    ----------
+    .. [1] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
+           Charles Griffin & Co., 1970.
+
+    """
+    (x, y, n) = _chk_size(x, y)
+    (x, y) = (x.flatten(), y.flatten())
+    m = ma.mask_or(ma.getmask(x), ma.getmask(y))
+    if m is not nomask:
+        x = ma.array(x, mask=m, copy=True)
+        y = ma.array(y, mask=m, copy=True)
+        # need int() here, otherwise numpy defaults to 32 bit
+        # integer on all Windows architectures, causing overflow.
+        # int() will keep it infinite precision.
+        n -= int(m.sum())
+
+    if n < 2:
+        res = scipy.stats._stats_py.SignificanceResult(np.nan, np.nan)
+        res.correlation = np.nan
+        return res
+
+    rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
+    ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
+    idx = rx.argsort()
+    (rx, ry) = (rx[idx], ry[idx])
+    C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
+                for i in range(len(ry)-1)], dtype=float)
+    D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
+                for i in range(len(ry)-1)], dtype=float)
+    xties = count_tied_groups(x)
+    yties = count_tied_groups(y)
+    if use_ties:
+        corr_x = np.sum([v*k*(k-1) for (k,v) in xties.items()], dtype=float)
+        corr_y = np.sum([v*k*(k-1) for (k,v) in yties.items()], dtype=float)
+        denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
+    else:
+        denom = n*(n-1)/2.
+    tau = (C-D) / denom
+
+    if method == 'exact' and (xties or yties):
+        raise ValueError("Ties found, exact method cannot be used.")
+
+    if method == 'auto':
+        if (not xties and not yties) and (n <= 33 or min(C, n*(n-1)/2.0-C) <= 1):
+            method = 'exact'
+        else:
+            method = 'asymptotic'
+
+    if not xties and not yties and method == 'exact':
+        prob = _kendall_p_exact(n, C, alternative)
+
+    elif method == 'asymptotic':
+        var_s = n*(n-1)*(2*n+5)
+        if use_ties:
+            var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in xties.items()])
+            var_s -= np.sum([v*k*(k-1)*(2*k+5)*1. for (k,v) in yties.items()])
+            v1 = np.sum([v*k*(k-1) for (k, v) in xties.items()], dtype=float) *\
+                 np.sum([v*k*(k-1) for (k, v) in yties.items()], dtype=float)
+            v1 /= 2.*n*(n-1)
+            if n > 2:
+                v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in xties.items()],
+                            dtype=float) * \
+                     np.sum([v*k*(k-1)*(k-2) for (k,v) in yties.items()],
+                            dtype=float)
+                v2 /= 9.*n*(n-1)*(n-2)
+            else:
+                v2 = 0
+        else:
+            v1 = v2 = 0
+
+        var_s /= 18.
+        var_s += (v1 + v2)
+        z = (C-D)/np.sqrt(var_s)
+        _, prob = scipy.stats._stats_py._normtest_finish(z, alternative)
+    else:
+        raise ValueError("Unknown method "+str(method)+" specified, please "
+                         "use auto, exact or asymptotic.")
+
+    res = scipy.stats._stats_py.SignificanceResult(tau, prob)
+    res.correlation = tau
+    return res
+
+
+def kendalltau_seasonal(x):
+    """
+    Computes a multivariate Kendall's rank correlation tau, for seasonal data.
+
+    Parameters
+    ----------
+    x : 2-D ndarray
+        Array of seasonal data, with seasons in columns.
+
+    """
+    x = ma.array(x, subok=True, copy=False, ndmin=2)
+    (n,m) = x.shape
+    n_p = x.count(0)
+
+    S_szn = sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
+    S_tot = S_szn.sum()
+
+    n_tot = x.count()
+    ties = count_tied_groups(x.compressed())
+    corr_ties = sum(v*k*(k-1) for (k,v) in ties.items())
+    denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
+
+    R = rankdata(x, axis=0, use_missing=True)
+    K = ma.empty((m,m), dtype=int)
+    covmat = ma.empty((m,m), dtype=float)
+    denom_szn = ma.empty(m, dtype=float)
+    for j in range(m):
+        ties_j = count_tied_groups(x[:,j].compressed())
+        corr_j = sum(v*k*(k-1) for (k,v) in ties_j.items())
+        cmb = n_p[j]*(n_p[j]-1)
+        for k in range(j,m,1):
+            K[j,k] = sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
+                               for i in range(n))
+            covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
+                           n*(n_p[j]+1)*(n_p[k]+1))/3.
+            K[k,j] = K[j,k]
+            covmat[k,j] = covmat[j,k]
+
+        denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
+
+    var_szn = covmat.diagonal()
+
+    z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
+    z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
+    z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
+
+    prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
+    prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
+    prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
+
+    chi2_tot = (z_szn*z_szn).sum()
+    chi2_trd = m * z_szn.mean()**2
+    output = {'seasonal tau': S_szn/denom_szn,
+              'global tau': S_tot/denom_tot,
+              'global tau (alt)': S_tot/denom_szn.sum(),
+              'seasonal p-value': prob_szn,
+              'global p-value (indep)': prob_tot_ind,
+              'global p-value (dep)': prob_tot_dep,
+              'chi2 total': chi2_tot,
+              'chi2 trend': chi2_trd,
+              }
+    return output
+
+
+PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
+                                                           'pvalue'))
+
+
+def pointbiserialr(x, y):
+    """Calculates a point biserial correlation coefficient and its p-value.
+
+    Parameters
+    ----------
+    x : array_like of bools
+        Input array.
+    y : array_like
+        Input array.
+
+    Returns
+    -------
+    correlation : float
+        R value
+    pvalue : float
+        2-tailed p-value
+
+    Notes
+    -----
+    Missing values are considered pair-wise: if a value is missing in x,
+    the corresponding value in y is masked.
+
+    For more details on `pointbiserialr`, see `scipy.stats.pointbiserialr`.
+
+    """
+    x = ma.fix_invalid(x, copy=True).astype(bool)
+    y = ma.fix_invalid(y, copy=True).astype(float)
+    # Get rid of the missing data
+    m = ma.mask_or(ma.getmask(x), ma.getmask(y))
+    if m is not nomask:
+        unmask = np.logical_not(m)
+        x = x[unmask]
+        y = y[unmask]
+
+    n = len(x)
+    # phat is the fraction of x values that are True
+    phat = x.sum() / float(n)
+    y0 = y[~x]  # y-values where x is False
+    y1 = y[x]  # y-values where x is True
+    y0m = y0.mean()
+    y1m = y1.mean()
+
+    rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
+
+    df = n-2
+    t = rpb*ma.sqrt(df/(1.0-rpb**2))
+    prob = _betai(0.5*df, 0.5, df/(df+t*t))
+
+    return PointbiserialrResult(rpb, prob)
+
+
+def linregress(x, y=None):
+    r"""
+    Linear regression calculation
+
+    Note that the non-masked version is used, and that this docstring is
+    replaced by the non-masked docstring + some info on missing data.
+
+    """
+    if y is None:
+        x = ma.array(x)
+        if x.shape[0] == 2:
+            x, y = x
+        elif x.shape[1] == 2:
+            x, y = x.T
+        else:
+            raise ValueError("If only `x` is given as input, "
+                             "it has to be of shape (2, N) or (N, 2), "
+                             f"provided shape was {x.shape}")
+    else:
+        x = ma.array(x)
+        y = ma.array(y)
+
+    x = x.flatten()
+    y = y.flatten()
+
+    if np.amax(x) == np.amin(x) and len(x) > 1:
+        raise ValueError("Cannot calculate a linear regression "
+                         "if all x values are identical")
+
+    m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
+    if m is not nomask:
+        x = ma.array(x, mask=m)
+        y = ma.array(y, mask=m)
+        if np.any(~m):
+            result = stats_linregress(x.data[~m], y.data[~m])
+        else:
+            # All data is masked
+            result = stats_LinregressResult(slope=None, intercept=None,
+                                            rvalue=None, pvalue=None,
+                                            stderr=None,
+                                            intercept_stderr=None)
+    else:
+        result = stats_linregress(x.data, y.data)
+
+    return result
+
+
+def theilslopes(y, x=None, alpha=0.95, method='separate'):
+    r"""
+    Computes the Theil-Sen estimator for a set of points (x, y).
+
+    `theilslopes` implements a method for robust linear regression.  It
+    computes the slope as the median of all slopes between paired values.
+
+    Parameters
+    ----------
+    y : array_like
+        Dependent variable.
+    x : array_like or None, optional
+        Independent variable. If None, use ``arange(len(y))`` instead.
+    alpha : float, optional
+        Confidence degree between 0 and 1. Default is 95% confidence.
+        Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
+        interpreted as "find the 90% confidence interval".
+    method : {'joint', 'separate'}, optional
+        Method to be used for computing estimate for intercept.
+        Following methods are supported,
+
+            * 'joint': Uses np.median(y - slope * x) as intercept.
+            * 'separate': Uses np.median(y) - slope * np.median(x)
+                          as intercept.
+
+        The default is 'separate'.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    result : ``TheilslopesResult`` instance
+        The return value is an object with the following attributes:
+
+        slope : float
+            Theil slope.
+        intercept : float
+            Intercept of the Theil line.
+        low_slope : float
+            Lower bound of the confidence interval on `slope`.
+        high_slope : float
+            Upper bound of the confidence interval on `slope`.
+
+    See Also
+    --------
+    siegelslopes : a similar technique using repeated medians
+
+
+    Notes
+    -----
+    For more details on `theilslopes`, see `scipy.stats.theilslopes`.
+
+    """
+    y = ma.asarray(y).flatten()
+    if x is None:
+        x = ma.arange(len(y), dtype=float)
+    else:
+        x = ma.asarray(x).flatten()
+        if len(x) != len(y):
+            raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x)))
+
+    m = ma.mask_or(ma.getmask(x), ma.getmask(y))
+    y._mask = x._mask = m
+    # Disregard any masked elements of x or y
+    y = y.compressed()
+    x = x.compressed().astype(float)
+    # We now have unmasked arrays so can use `scipy.stats.theilslopes`
+    return stats_theilslopes(y, x, alpha=alpha, method=method)
+
+
+def siegelslopes(y, x=None, method="hierarchical"):
+    r"""
+    Computes the Siegel estimator for a set of points (x, y).
+
+    `siegelslopes` implements a method for robust linear regression
+    using repeated medians to fit a line to the points (x, y).
+    The method is robust to outliers with an asymptotic breakdown point
+    of 50%.
+
+    Parameters
+    ----------
+    y : array_like
+        Dependent variable.
+    x : array_like or None, optional
+        Independent variable. If None, use ``arange(len(y))`` instead.
+    method : {'hierarchical', 'separate'}
+        If 'hierarchical', estimate the intercept using the estimated
+        slope ``slope`` (default option).
+        If 'separate', estimate the intercept independent of the estimated
+        slope. See Notes for details.
+
+    Returns
+    -------
+    result : ``SiegelslopesResult`` instance
+        The return value is an object with the following attributes:
+
+        slope : float
+            Estimate of the slope of the regression line.
+        intercept : float
+            Estimate of the intercept of the regression line.
+
+    See Also
+    --------
+    theilslopes : a similar technique without repeated medians
+
+    Notes
+    -----
+    For more details on `siegelslopes`, see `scipy.stats.siegelslopes`.
+
+    """
+    y = ma.asarray(y).ravel()
+    if x is None:
+        x = ma.arange(len(y), dtype=float)
+    else:
+        x = ma.asarray(x).ravel()
+        if len(x) != len(y):
+            raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
+
+    m = ma.mask_or(ma.getmask(x), ma.getmask(y))
+    y._mask = x._mask = m
+    # Disregard any masked elements of x or y
+    y = y.compressed()
+    x = x.compressed().astype(float)
+    # We now have unmasked arrays so can use `scipy.stats.siegelslopes`
+    return stats_siegelslopes(y, x, method=method)
+
+
+SenSeasonalSlopesResult = _make_tuple_bunch('SenSeasonalSlopesResult',
+                                            ['intra_slope', 'inter_slope'])
+
+
+def sen_seasonal_slopes(x):
+    r"""
+    Computes seasonal Theil-Sen and Kendall slope estimators.
+
+    The seasonal generalization of Sen's slope computes the slopes between all
+    pairs of values within a "season" (column) of a 2D array. It returns an
+    array containing the median of these "within-season" slopes for each
+    season (the Theil-Sen slope estimator of each season), and it returns the
+    median of the within-season slopes across all seasons (the seasonal Kendall
+    slope estimator).
+
+    Parameters
+    ----------
+    x : 2D array_like
+        Each column of `x` contains measurements of the dependent variable
+        within a season. The independent variable (usually time) of each season
+        is assumed to be ``np.arange(x.shape[0])``.
+
+    Returns
+    -------
+    result : ``SenSeasonalSlopesResult`` instance
+        The return value is an object with the following attributes:
+
+        intra_slope : ndarray
+            For each season, the Theil-Sen slope estimator: the median of
+            within-season slopes.
+        inter_slope : float
+            The seasonal Kendall slope estimateor: the median of within-season
+            slopes *across all* seasons.
+
+    See Also
+    --------
+    theilslopes : the analogous function for non-seasonal data
+    scipy.stats.theilslopes : non-seasonal slopes for non-masked arrays
+
+    Notes
+    -----
+    The slopes :math:`d_{ijk}` within season :math:`i` are:
+
+    .. math::
+
+        d_{ijk} = \frac{x_{ij} - x_{ik}}
+                            {j - k}
+
+    for pairs of distinct integer indices :math:`j, k` of :math:`x`.
+
+    Element :math:`i` of the returned `intra_slope` array is the median of the
+    :math:`d_{ijk}` over all :math:`j < k`; this is the Theil-Sen slope
+    estimator of season :math:`i`. The returned `inter_slope` value, better
+    known as the seasonal Kendall slope estimator, is the median of the
+    :math:`d_{ijk}` over all :math:`i, j, k`.
+
+    References
+    ----------
+    .. [1] Hirsch, Robert M., James R. Slack, and Richard A. Smith.
+           "Techniques of trend analysis for monthly water quality data."
+           *Water Resources Research* 18.1 (1982): 107-121.
+
+    Examples
+    --------
+    Suppose we have 100 observations of a dependent variable for each of four
+    seasons:
+
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> x = rng.random(size=(100, 4))
+
+    We compute the seasonal slopes as:
+
+    >>> from scipy import stats
+    >>> intra_slope, inter_slope = stats.mstats.sen_seasonal_slopes(x)
+
+    If we define a function to compute all slopes between observations within
+    a season:
+
+    >>> def dijk(yi):
+    ...     n = len(yi)
+    ...     x = np.arange(n)
+    ...     dy = yi - yi[:, np.newaxis]
+    ...     dx = x - x[:, np.newaxis]
+    ...     # we only want unique pairs of distinct indices
+    ...     mask = np.triu(np.ones((n, n), dtype=bool), k=1)
+    ...     return dy[mask]/dx[mask]
+
+    then element ``i`` of ``intra_slope`` is the median of ``dijk[x[:, i]]``:
+
+    >>> i = 2
+    >>> np.allclose(np.median(dijk(x[:, i])), intra_slope[i])
+    True
+
+    and ``inter_slope`` is the median of the values returned by ``dijk`` for
+    all seasons:
+
+    >>> all_slopes = np.concatenate([dijk(x[:, i]) for i in range(x.shape[1])])
+    >>> np.allclose(np.median(all_slopes), inter_slope)
+    True
+
+    Because the data are randomly generated, we would expect the median slopes
+    to be nearly zero both within and across all seasons, and indeed they are:
+
+    >>> intra_slope.data
+    array([ 0.00124504, -0.00277761, -0.00221245, -0.00036338])
+    >>> inter_slope
+    -0.0010511779872922058
+
+    """
+    x = ma.array(x, subok=True, copy=False, ndmin=2)
+    (n,_) = x.shape
+    # Get list of slopes per season
+    szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
+                            for i in range(n)])
+    szn_medslopes = ma.median(szn_slopes, axis=0)
+    medslope = ma.median(szn_slopes, axis=None)
+    return SenSeasonalSlopesResult(szn_medslopes, medslope)
+
+
+Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
+
+
+def ttest_1samp(a, popmean, axis=0, alternative='two-sided'):
+    """
+    Calculates the T-test for the mean of ONE group of scores.
+
+    Parameters
+    ----------
+    a : array_like
+        sample observation
+    popmean : float or array_like
+        expected value in null hypothesis, if array_like than it must have the
+        same shape as `a` excluding the axis dimension
+    axis : int or None, optional
+        Axis along which to compute test. If None, compute over the whole
+        array `a`.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the mean of the underlying distribution of the sample
+          is different than the given population mean (`popmean`)
+        * 'less': the mean of the underlying distribution of the sample is
+          less than the given population mean (`popmean`)
+        * 'greater': the mean of the underlying distribution of the sample is
+          greater than the given population mean (`popmean`)
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : float or array
+        t-statistic
+    pvalue : float or array
+        The p-value
+
+    Notes
+    -----
+    For more details on `ttest_1samp`, see `scipy.stats.ttest_1samp`.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    if a.size == 0:
+        return (np.nan, np.nan)
+
+    x = a.mean(axis=axis)
+    v = a.var(axis=axis, ddof=1)
+    n = a.count(axis=axis)
+    # force df to be an array for masked division not to throw a warning
+    df = ma.asanyarray(n - 1.0)
+    svar = ((n - 1.0) * v) / df
+    with np.errstate(divide='ignore', invalid='ignore'):
+        t = (x - popmean) / ma.sqrt(svar / n)
+
+    t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
+    return Ttest_1sampResult(t, prob)
+
+
+ttest_onesamp = ttest_1samp
+
+
+Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
+
+
+def ttest_ind(a, b, axis=0, equal_var=True, alternative='two-sided'):
+    """
+    Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
+
+    Parameters
+    ----------
+    a, b : array_like
+        The arrays must have the same shape, except in the dimension
+        corresponding to `axis` (the first, by default).
+    axis : int or None, optional
+        Axis along which to compute test. If None, compute over the whole
+        arrays, `a`, and `b`.
+    equal_var : bool, optional
+        If True, perform a standard independent 2 sample test that assumes equal
+        population variances.
+        If False, perform Welch's t-test, which does not assume equal population
+        variance.
+
+        .. versionadded:: 0.17.0
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the means of the distributions underlying the samples
+          are unequal.
+        * 'less': the mean of the distribution underlying the first sample
+          is less than the mean of the distribution underlying the second
+          sample.
+        * 'greater': the mean of the distribution underlying the first
+          sample is greater than the mean of the distribution underlying
+          the second sample.
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : float or array
+        The calculated t-statistic.
+    pvalue : float or array
+        The p-value.
+
+    Notes
+    -----
+    For more details on `ttest_ind`, see `scipy.stats.ttest_ind`.
+
+    """
+    a, b, axis = _chk2_asarray(a, b, axis)
+
+    if a.size == 0 or b.size == 0:
+        return Ttest_indResult(np.nan, np.nan)
+
+    (x1, x2) = (a.mean(axis), b.mean(axis))
+    (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
+    (n1, n2) = (a.count(axis), b.count(axis))
+
+    if equal_var:
+        # force df to be an array for masked division not to throw a warning
+        df = ma.asanyarray(n1 + n2 - 2.0)
+        svar = ((n1-1)*v1+(n2-1)*v2) / df
+        denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2))  # n-D computation here!
+    else:
+        vn1 = v1/n1
+        vn2 = v2/n2
+        with np.errstate(divide='ignore', invalid='ignore'):
+            df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
+
+        # If df is undefined, variances are zero.
+        # It doesn't matter what df is as long as it is not NaN.
+        df = np.where(np.isnan(df), 1, df)
+        denom = ma.sqrt(vn1 + vn2)
+
+    with np.errstate(divide='ignore', invalid='ignore'):
+        t = (x1-x2) / denom
+
+    t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
+    return Ttest_indResult(t, prob)
+
+
+Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
+
+
+def ttest_rel(a, b, axis=0, alternative='two-sided'):
+    """
+    Calculates the T-test on TWO RELATED samples of scores, a and b.
+
+    Parameters
+    ----------
+    a, b : array_like
+        The arrays must have the same shape.
+    axis : int or None, optional
+        Axis along which to compute test. If None, compute over the whole
+        arrays, `a`, and `b`.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the means of the distributions underlying the samples
+          are unequal.
+        * 'less': the mean of the distribution underlying the first sample
+          is less than the mean of the distribution underlying the second
+          sample.
+        * 'greater': the mean of the distribution underlying the first
+          sample is greater than the mean of the distribution underlying
+          the second sample.
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : float or array
+        t-statistic
+    pvalue : float or array
+        two-tailed p-value
+
+    Notes
+    -----
+    For more details on `ttest_rel`, see `scipy.stats.ttest_rel`.
+
+    """
+    a, b, axis = _chk2_asarray(a, b, axis)
+    if len(a) != len(b):
+        raise ValueError('unequal length arrays')
+
+    if a.size == 0 or b.size == 0:
+        return Ttest_relResult(np.nan, np.nan)
+
+    n = a.count(axis)
+    df = ma.asanyarray(n-1.0)
+    d = (a-b).astype('d')
+    dm = d.mean(axis)
+    v = d.var(axis=axis, ddof=1)
+    denom = ma.sqrt(v / n)
+    with np.errstate(divide='ignore', invalid='ignore'):
+        t = dm / denom
+
+    t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
+    return Ttest_relResult(t, prob)
+
+
+MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
+                                                       'pvalue'))
+
+
+def mannwhitneyu(x,y, use_continuity=True):
+    """
+    Computes the Mann-Whitney statistic
+
+    Missing values in `x` and/or `y` are discarded.
+
+    Parameters
+    ----------
+    x : sequence
+        Input
+    y : sequence
+        Input
+    use_continuity : {True, False}, optional
+        Whether a continuity correction (1/2.) should be taken into account.
+
+    Returns
+    -------
+    statistic : float
+        The minimum of the Mann-Whitney statistics
+    pvalue : float
+        Approximate two-sided p-value assuming a normal distribution.
+
+    """
+    x = ma.asarray(x).compressed().view(ndarray)
+    y = ma.asarray(y).compressed().view(ndarray)
+    ranks = rankdata(np.concatenate([x,y]))
+    (nx, ny) = (len(x), len(y))
+    nt = nx + ny
+    U = ranks[:nx].sum() - nx*(nx+1)/2.
+    U = max(U, nx*ny - U)
+    u = nx*ny - U
+
+    mu = (nx*ny)/2.
+    sigsq = (nt**3 - nt)/12.
+    ties = count_tied_groups(ranks)
+    sigsq -= sum(v*(k**3-k) for (k,v) in ties.items())/12.
+    sigsq *= nx*ny/float(nt*(nt-1))
+
+    if use_continuity:
+        z = (U - 1/2. - mu) / ma.sqrt(sigsq)
+    else:
+        z = (U - mu) / ma.sqrt(sigsq)
+
+    prob = special.erfc(abs(z)/np.sqrt(2))
+    return MannwhitneyuResult(u, prob)
+
+
+KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
+
+
+def kruskal(*args):
+    """
+    Compute the Kruskal-Wallis H-test for independent samples
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+       Two or more arrays with the sample measurements can be given as
+       arguments.
+
+    Returns
+    -------
+    statistic : float
+       The Kruskal-Wallis H statistic, corrected for ties
+    pvalue : float
+       The p-value for the test using the assumption that H has a chi
+       square distribution
+
+    Notes
+    -----
+    For more details on `kruskal`, see `scipy.stats.kruskal`.
+
+    Examples
+    --------
+    >>> from scipy.stats.mstats import kruskal
+
+    Random samples from three different brands of batteries were tested
+    to see how long the charge lasted. Results were as follows:
+
+    >>> a = [6.3, 5.4, 5.7, 5.2, 5.0]
+    >>> b = [6.9, 7.0, 6.1, 7.9]
+    >>> c = [7.2, 6.9, 6.1, 6.5]
+
+    Test the hypotesis that the distribution functions for all of the brands'
+    durations are identical. Use 5% level of significance.
+
+    >>> kruskal(a, b, c)
+    KruskalResult(statistic=7.113812154696133, pvalue=0.028526948491942164)
+
+    The null hypothesis is rejected at the 5% level of significance
+    because the returned p-value is less than the critical value of 5%.
+
+    """
+    output = argstoarray(*args)
+    ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
+    sumrk = ranks.sum(-1)
+    ngrp = ranks.count(-1)
+    ntot = ranks.count()
+    H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
+    # Tie correction
+    ties = count_tied_groups(ranks)
+    T = 1. - sum(v*(k**3-k) for (k,v) in ties.items())/float(ntot**3-ntot)
+    if T == 0:
+        raise ValueError('All numbers are identical in kruskal')
+
+    H /= T
+    df = len(output) - 1
+    prob = distributions.chi2.sf(H, df)
+    return KruskalResult(H, prob)
+
+
+kruskalwallis = kruskal
+
+
+@_rename_parameter("mode", "method")
+def ks_1samp(x, cdf, args=(), alternative="two-sided", method='auto'):
+    """
+    Computes the Kolmogorov-Smirnov test on one sample of masked values.
+
+    Missing values in `x` are discarded.
+
+    Parameters
+    ----------
+    x : array_like
+        a 1-D array of observations of random variables.
+    cdf : str or callable
+        If a string, it should be the name of a distribution in `scipy.stats`.
+        If a callable, that callable is used to calculate the cdf.
+    args : tuple, sequence, optional
+        Distribution parameters, used if `cdf` is a string.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Indicates the alternative hypothesis.  Default is 'two-sided'.
+    method : {'auto', 'exact', 'asymp'}, optional
+        Defines the method used for calculating the p-value.
+        The following options are available (default is 'auto'):
+
+          * 'auto' : use 'exact' for small size arrays, 'asymp' for large
+          * 'exact' : use approximation to exact distribution of test statistic
+          * 'asymp' : use asymptotic distribution of test statistic
+
+    Returns
+    -------
+    d : float
+        Value of the Kolmogorov Smirnov test
+    p : float
+        Corresponding p-value.
+
+    """
+    alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
+       alternative.lower()[0], alternative)
+    return scipy.stats._stats_py.ks_1samp(
+        x, cdf, args=args, alternative=alternative, method=method)
+
+
+@_rename_parameter("mode", "method")
+def ks_2samp(data1, data2, alternative="two-sided", method='auto'):
+    """
+    Computes the Kolmogorov-Smirnov test on two samples.
+
+    Missing values in `x` and/or `y` are discarded.
+
+    Parameters
+    ----------
+    data1 : array_like
+        First data set
+    data2 : array_like
+        Second data set
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Indicates the alternative hypothesis.  Default is 'two-sided'.
+    method : {'auto', 'exact', 'asymp'}, optional
+        Defines the method used for calculating the p-value.
+        The following options are available (default is 'auto'):
+
+          * 'auto' : use 'exact' for small size arrays, 'asymp' for large
+          * 'exact' : use approximation to exact distribution of test statistic
+          * 'asymp' : use asymptotic distribution of test statistic
+
+    Returns
+    -------
+    d : float
+        Value of the Kolmogorov Smirnov test
+    p : float
+        Corresponding p-value.
+
+    """
+    # Ideally this would be accomplished by
+    # ks_2samp = scipy.stats._stats_py.ks_2samp
+    # but the circular dependencies between _mstats_basic and stats prevent that.
+    alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
+       alternative.lower()[0], alternative)
+    return scipy.stats._stats_py.ks_2samp(data1, data2,
+                                          alternative=alternative,
+                                          method=method)
+
+
+ks_twosamp = ks_2samp
+
+
+@_rename_parameter("mode", "method")
+def kstest(data1, data2, args=(), alternative='two-sided', method='auto'):
+    """
+
+    Parameters
+    ----------
+    data1 : array_like
+    data2 : str, callable or array_like
+    args : tuple, sequence, optional
+        Distribution parameters, used if `data1` or `data2` are strings.
+    alternative : str, as documented in stats.kstest
+    method : str, as documented in stats.kstest
+
+    Returns
+    -------
+    tuple of (K-S statistic, probability)
+
+    """
+    return scipy.stats._stats_py.kstest(data1, data2, args,
+                                        alternative=alternative, method=method)
+
+
+def trima(a, limits=None, inclusive=(True,True)):
+    """
+    Trims an array by masking the data outside some given limits.
+
+    Returns a masked version of the input array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    limits : {None, tuple}, optional
+        Tuple of (lower limit, upper limit) in absolute values.
+        Values of the input array lower (greater) than the lower (upper) limit
+        will be masked.  A limit is None indicates an open interval.
+    inclusive : (bool, bool) tuple, optional
+        Tuple of (lower flag, upper flag), indicating whether values exactly
+        equal to the lower (upper) limit are allowed.
+
+    Examples
+    --------
+    >>> from scipy.stats.mstats import trima
+    >>> import numpy as np
+
+    >>> a = np.arange(10)
+
+    The interval is left-closed and right-open, i.e., `[2, 8)`.
+    Trim the array by keeping only values in the interval.
+
+    >>> trima(a, limits=(2, 8), inclusive=(True, False))
+    masked_array(data=[--, --, 2, 3, 4, 5, 6, 7, --, --],
+                 mask=[ True,  True, False, False, False, False, False, False,
+                        True,  True],
+           fill_value=999999)
+
+    """
+    a = ma.asarray(a)
+    a.unshare_mask()
+    if (limits is None) or (limits == (None, None)):
+        return a
+
+    (lower_lim, upper_lim) = limits
+    (lower_in, upper_in) = inclusive
+    condition = False
+    if lower_lim is not None:
+        if lower_in:
+            condition |= (a < lower_lim)
+        else:
+            condition |= (a <= lower_lim)
+
+    if upper_lim is not None:
+        if upper_in:
+            condition |= (a > upper_lim)
+        else:
+            condition |= (a >= upper_lim)
+
+    a[condition.filled(True)] = masked
+    return a
+
+
+def trimr(a, limits=None, inclusive=(True, True), axis=None):
+    """
+    Trims an array by masking some proportion of the data on each end.
+    Returns a masked version of the input array.
+
+    Parameters
+    ----------
+    a : sequence
+        Input array.
+    limits : {None, tuple}, optional
+        Tuple of the percentages to cut on each side of the array, with respect
+        to the number of unmasked data, as floats between 0. and 1.
+        Noting n the number of unmasked data before trimming, the
+        (n*limits[0])th smallest data and the (n*limits[1])th largest data are
+        masked, and the total number of unmasked data after trimming is
+        n*(1.-sum(limits)).  The value of one limit can be set to None to
+        indicate an open interval.
+    inclusive : {(True,True) tuple}, optional
+        Tuple of flags indicating whether the number of data being masked on
+        the left (right) end should be truncated (True) or rounded (False) to
+        integers.
+    axis : {None,int}, optional
+        Axis along which to trim. If None, the whole array is trimmed, but its
+        shape is maintained.
+
+    """
+    def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
+        n = a.count()
+        idx = a.argsort()
+        if low_limit:
+            if low_inclusive:
+                lowidx = int(low_limit*n)
+            else:
+                lowidx = int(np.round(low_limit*n))
+            a[idx[:lowidx]] = masked
+        if up_limit is not None:
+            if up_inclusive:
+                upidx = n - int(n*up_limit)
+            else:
+                upidx = n - int(np.round(n*up_limit))
+            a[idx[upidx:]] = masked
+        return a
+
+    a = ma.asarray(a)
+    a.unshare_mask()
+    if limits is None:
+        return a
+
+    # Check the limits
+    (lolim, uplim) = limits
+    errmsg = "The proportion to cut from the %s should be between 0. and 1."
+    if lolim is not None:
+        if lolim > 1. or lolim < 0:
+            raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
+    if uplim is not None:
+        if uplim > 1. or uplim < 0:
+            raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
+
+    (loinc, upinc) = inclusive
+
+    if axis is None:
+        shp = a.shape
+        return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
+    else:
+        return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
+
+
+trimdoc = """
+    Parameters
+    ----------
+    a : sequence
+        Input array
+    limits : {None, tuple}, optional
+        If `relative` is False, tuple (lower limit, upper limit) in absolute values.
+        Values of the input array lower (greater) than the lower (upper) limit are
+        masked.
+
+        If `relative` is True, tuple (lower percentage, upper percentage) to cut
+        on each side of the  array, with respect to the number of unmasked data.
+
+        Noting n the number of unmasked data before trimming, the (n*limits[0])th
+        smallest data and the (n*limits[1])th largest data are masked, and the
+        total number of unmasked data after trimming is n*(1.-sum(limits))
+        In each case, the value of one limit can be set to None to indicate an
+        open interval.
+
+        If limits is None, no trimming is performed
+    inclusive : {(bool, bool) tuple}, optional
+        If `relative` is False, tuple indicating whether values exactly equal
+        to the absolute limits are allowed.
+        If `relative` is True, tuple indicating whether the number of data
+        being masked on each side should be rounded (True) or truncated
+        (False).
+    relative : bool, optional
+        Whether to consider the limits as absolute values (False) or proportions
+        to cut (True).
+    axis : int, optional
+        Axis along which to trim.
+"""
+
+
+def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
+    """
+    Trims an array by masking the data outside some given limits.
+
+    Returns a masked version of the input array.
+
+    %s
+
+    Examples
+    --------
+    >>> from scipy.stats.mstats import trim
+    >>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
+    >>> print(trim(z,(3,8)))
+    [-- -- 3 4 5 6 7 8 -- --]
+    >>> print(trim(z,(0.1,0.2),relative=True))
+    [-- 2 3 4 5 6 7 8 -- --]
+
+    """
+    if relative:
+        return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
+    else:
+        return trima(a, limits=limits, inclusive=inclusive)
+
+
+if trim.__doc__:
+    trim.__doc__ = trim.__doc__ % trimdoc
+
+
+def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
+    """
+    Trims the smallest and largest data values.
+
+    Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
+    ``int(proportiontocut * n)`` largest values of data along the given axis,
+    where n is the number of unmasked values before trimming.
+
+    Parameters
+    ----------
+    data : ndarray
+        Data to trim.
+    proportiontocut : float, optional
+        Percentage of trimming (as a float between 0 and 1).
+        If n is the number of unmasked values before trimming, the number of
+        values after trimming is ``(1 - 2*proportiontocut) * n``.
+        Default is 0.2.
+    inclusive : {(bool, bool) tuple}, optional
+        Tuple indicating whether the number of data being masked on each side
+        should be rounded (True) or truncated (False).
+    axis : int, optional
+        Axis along which to perform the trimming.
+        If None, the input array is first flattened.
+
+    """
+    return trimr(data, limits=(proportiontocut,proportiontocut),
+                 inclusive=inclusive, axis=axis)
+
+
+def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
+             axis=None):
+    """
+    Trims the data by masking values from one tail.
+
+    Parameters
+    ----------
+    data : array_like
+        Data to trim.
+    proportiontocut : float, optional
+        Percentage of trimming. If n is the number of unmasked values
+        before trimming, the number of values after trimming is
+        ``(1 - proportiontocut) * n``.  Default is 0.2.
+    tail : {'left','right'}, optional
+        If 'left' the `proportiontocut` lowest values will be masked.
+        If 'right' the `proportiontocut` highest values will be masked.
+        Default is 'left'.
+    inclusive : {(bool, bool) tuple}, optional
+        Tuple indicating whether the number of data being masked on each side
+        should be rounded (True) or truncated (False).  Default is
+        (True, True).
+    axis : int, optional
+        Axis along which to perform the trimming.
+        If None, the input array is first flattened.  Default is None.
+
+    Returns
+    -------
+    trimtail : ndarray
+        Returned array of same shape as `data` with masked tail values.
+
+    """
+    tail = str(tail).lower()[0]
+    if tail == 'l':
+        limits = (proportiontocut,None)
+    elif tail == 'r':
+        limits = (None, proportiontocut)
+    else:
+        raise TypeError("The tail argument should be in ('left','right')")
+
+    return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
+
+
+trim1 = trimtail
+
+
+def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
+                 axis=None):
+    """Returns the trimmed mean of the data along the given axis.
+
+    %s
+
+    """
+    if (not isinstance(limits,tuple)) and isinstance(limits,float):
+        limits = (limits, limits)
+    if relative:
+        return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
+    else:
+        return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
+
+
+if trimmed_mean.__doc__:
+    trimmed_mean.__doc__ = trimmed_mean.__doc__ % trimdoc
+
+
+def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
+                axis=None, ddof=0):
+    """Returns the trimmed variance of the data along the given axis.
+
+    %s
+    ddof : {0,integer}, optional
+        Means Delta Degrees of Freedom. The denominator used during computations
+        is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
+        biased estimate of the variance.
+
+    """
+    if (not isinstance(limits,tuple)) and isinstance(limits,float):
+        limits = (limits, limits)
+    if relative:
+        out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
+    else:
+        out = trima(a,limits=limits,inclusive=inclusive)
+
+    return out.var(axis=axis, ddof=ddof)
+
+
+if trimmed_var.__doc__:
+    trimmed_var.__doc__ = trimmed_var.__doc__ % trimdoc
+
+
+def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
+                axis=None, ddof=0):
+    """Returns the trimmed standard deviation of the data along the given axis.
+
+    %s
+    ddof : {0,integer}, optional
+        Means Delta Degrees of Freedom. The denominator used during computations
+        is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
+        biased estimate of the variance.
+
+    """
+    if (not isinstance(limits,tuple)) and isinstance(limits,float):
+        limits = (limits, limits)
+    if relative:
+        out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
+    else:
+        out = trima(a,limits=limits,inclusive=inclusive)
+    return out.std(axis=axis,ddof=ddof)
+
+
+if trimmed_std.__doc__:
+    trimmed_std.__doc__ = trimmed_std.__doc__ % trimdoc
+
+
+def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
+    """
+    Returns the standard error of the trimmed mean along the given axis.
+
+    Parameters
+    ----------
+    a : sequence
+        Input array
+    limits : {(0.1,0.1), tuple of float}, optional
+        tuple (lower percentage, upper percentage) to cut  on each side of the
+        array, with respect to the number of unmasked data.
+
+        If n is the number of unmasked data before trimming, the values
+        smaller than ``n * limits[0]`` and the values larger than
+        ``n * `limits[1]`` are masked, and the total number of unmasked
+        data after trimming is ``n * (1.-sum(limits))``.  In each case,
+        the value of one limit can be set to None to indicate an open interval.
+        If `limits` is None, no trimming is performed.
+    inclusive : {(bool, bool) tuple} optional
+        Tuple indicating whether the number of data being masked on each side
+        should be rounded (True) or truncated (False).
+    axis : int, optional
+        Axis along which to trim.
+
+    Returns
+    -------
+    trimmed_stde : scalar or ndarray
+
+    """
+    def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
+        "Returns the standard error of the trimmed mean for a 1D input data."
+        n = a.count()
+        idx = a.argsort()
+        if low_limit:
+            if low_inclusive:
+                lowidx = int(low_limit*n)
+            else:
+                lowidx = np.round(low_limit*n)
+            a[idx[:lowidx]] = masked
+        if up_limit is not None:
+            if up_inclusive:
+                upidx = n - int(n*up_limit)
+            else:
+                upidx = n - np.round(n*up_limit)
+            a[idx[upidx:]] = masked
+        a[idx[:lowidx]] = a[idx[lowidx]]
+        a[idx[upidx:]] = a[idx[upidx-1]]
+        winstd = a.std(ddof=1)
+        return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
+
+    a = ma.array(a, copy=True, subok=True)
+    a.unshare_mask()
+    if limits is None:
+        return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
+    if (not isinstance(limits,tuple)) and isinstance(limits,float):
+        limits = (limits, limits)
+
+    # Check the limits
+    (lolim, uplim) = limits
+    errmsg = "The proportion to cut from the %s should be between 0. and 1."
+    if lolim is not None:
+        if lolim > 1. or lolim < 0:
+            raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
+    if uplim is not None:
+        if uplim > 1. or uplim < 0:
+            raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
+
+    (loinc, upinc) = inclusive
+    if (axis is None):
+        return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
+    else:
+        if a.ndim > 2:
+            raise ValueError("Array 'a' must be at most two dimensional, "
+                             "but got a.ndim = %d" % a.ndim)
+        return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
+                                   lolim,uplim,loinc,upinc)
+
+
+def _mask_to_limits(a, limits, inclusive):
+    """Mask an array for values outside of given limits.
+
+    This is primarily a utility function.
+
+    Parameters
+    ----------
+    a : array
+    limits : (float or None, float or None)
+    A tuple consisting of the (lower limit, upper limit).  Values in the
+    input array less than the lower limit or greater than the upper limit
+    will be masked out. None implies no limit.
+    inclusive : (bool, bool)
+    A tuple consisting of the (lower flag, upper flag).  These flags
+    determine whether values exactly equal to lower or upper are allowed.
+
+    Returns
+    -------
+    A MaskedArray.
+
+    Raises
+    ------
+    A ValueError if there are no values within the given limits.
+    """
+    lower_limit, upper_limit = limits
+    lower_include, upper_include = inclusive
+    am = ma.MaskedArray(a)
+    if lower_limit is not None:
+        if lower_include:
+            am = ma.masked_less(am, lower_limit)
+        else:
+            am = ma.masked_less_equal(am, lower_limit)
+
+    if upper_limit is not None:
+        if upper_include:
+            am = ma.masked_greater(am, upper_limit)
+        else:
+            am = ma.masked_greater_equal(am, upper_limit)
+
+    if am.count() == 0:
+        raise ValueError("No array values within given limits")
+
+    return am
+
+
+def tmean(a, limits=None, inclusive=(True, True), axis=None):
+    """
+    Compute the trimmed mean.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    limits : None or (lower limit, upper limit), optional
+        Values in the input array less than the lower limit or greater than the
+        upper limit will be ignored.  When limits is None (default), then all
+        values are used.  Either of the limit values in the tuple can also be
+        None representing a half-open interval.
+    inclusive : (bool, bool), optional
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to the lower or upper limits
+        are included.  The default value is (True, True).
+    axis : int or None, optional
+        Axis along which to operate. If None, compute over the
+        whole array. Default is None.
+
+    Returns
+    -------
+    tmean : float
+
+    Notes
+    -----
+    For more details on `tmean`, see `scipy.stats.tmean`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import mstats
+    >>> a = np.array([[6, 8, 3, 0],
+    ...               [3, 9, 1, 2],
+    ...               [8, 7, 8, 2],
+    ...               [5, 6, 0, 2],
+    ...               [4, 5, 5, 2]])
+    ...
+    ...
+    >>> mstats.tmean(a, (2,5))
+    3.3
+    >>> mstats.tmean(a, (2,5), axis=0)
+    masked_array(data=[4.0, 5.0, 4.0, 2.0],
+                 mask=[False, False, False, False],
+           fill_value=1e+20)
+
+    """
+    return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis)
+
+
+def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
+    """
+    Compute the trimmed variance
+
+    This function computes the sample variance of an array of values,
+    while ignoring values which are outside of given `limits`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    limits : None or (lower limit, upper limit), optional
+        Values in the input array less than the lower limit or greater than the
+        upper limit will be ignored. When limits is None, then all values are
+        used. Either of the limit values in the tuple can also be None
+        representing a half-open interval.  The default value is None.
+    inclusive : (bool, bool), optional
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to the lower or upper limits
+        are included.  The default value is (True, True).
+    axis : int or None, optional
+        Axis along which to operate. If None, compute over the
+        whole array. Default is zero.
+    ddof : int, optional
+        Delta degrees of freedom. Default is 1.
+
+    Returns
+    -------
+    tvar : float
+        Trimmed variance.
+
+    Notes
+    -----
+    For more details on `tvar`, see `scipy.stats.tvar`.
+
+    """
+    a = a.astype(float).ravel()
+    if limits is None:
+        n = (~a.mask).sum()  # todo: better way to do that?
+        return np.ma.var(a) * n/(n-1.)
+    am = _mask_to_limits(a, limits=limits, inclusive=inclusive)
+
+    return np.ma.var(am, axis=axis, ddof=ddof)
+
+
+def tmin(a, lowerlimit=None, axis=0, inclusive=True):
+    """
+    Compute the trimmed minimum
+
+    Parameters
+    ----------
+    a : array_like
+        array of values
+    lowerlimit : None or float, optional
+        Values in the input array less than the given limit will be ignored.
+        When lowerlimit is None, then all values are used. The default value
+        is None.
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over the
+        whole array `a`.
+    inclusive : {True, False}, optional
+        This flag determines whether values exactly equal to the lower limit
+        are included.  The default value is True.
+
+    Returns
+    -------
+    tmin : float, int or ndarray
+
+    Notes
+    -----
+    For more details on `tmin`, see `scipy.stats.tmin`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import mstats
+    >>> a = np.array([[6, 8, 3, 0],
+    ...               [3, 2, 1, 2],
+    ...               [8, 1, 8, 2],
+    ...               [5, 3, 0, 2],
+    ...               [4, 7, 5, 2]])
+    ...
+    >>> mstats.tmin(a, 5)
+    masked_array(data=[5, 7, 5, --],
+                 mask=[False, False, False,  True],
+           fill_value=999999)
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    am = trima(a, (lowerlimit, None), (inclusive, False))
+    return ma.minimum.reduce(am, axis)
+
+
+def tmax(a, upperlimit=None, axis=0, inclusive=True):
+    """
+    Compute the trimmed maximum
+
+    This function computes the maximum value of an array along a given axis,
+    while ignoring values larger than a specified upper limit.
+
+    Parameters
+    ----------
+    a : array_like
+        array of values
+    upperlimit : None or float, optional
+        Values in the input array greater than the given limit will be ignored.
+        When upperlimit is None, then all values are used. The default value
+        is None.
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over the
+        whole array `a`.
+    inclusive : {True, False}, optional
+        This flag determines whether values exactly equal to the upper limit
+        are included.  The default value is True.
+
+    Returns
+    -------
+    tmax : float, int or ndarray
+
+    Notes
+    -----
+    For more details on `tmax`, see `scipy.stats.tmax`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import mstats
+    >>> a = np.array([[6, 8, 3, 0],
+    ...               [3, 9, 1, 2],
+    ...               [8, 7, 8, 2],
+    ...               [5, 6, 0, 2],
+    ...               [4, 5, 5, 2]])
+    ...
+    ...
+    >>> mstats.tmax(a, 4)
+    masked_array(data=[4, --, 3, 2],
+                 mask=[False,  True, False, False],
+           fill_value=999999)
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    am = trima(a, (None, upperlimit), (False, inclusive))
+    return ma.maximum.reduce(am, axis)
+
+
+def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
+    """
+    Compute the trimmed standard error of the mean.
+
+    This function finds the standard error of the mean for given
+    values, ignoring values outside the given `limits`.
+
+    Parameters
+    ----------
+    a : array_like
+        array of values
+    limits : None or (lower limit, upper limit), optional
+        Values in the input array less than the lower limit or greater than the
+        upper limit will be ignored. When limits is None, then all values are
+        used. Either of the limit values in the tuple can also be None
+        representing a half-open interval.  The default value is None.
+    inclusive : (bool, bool), optional
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to the lower or upper limits
+        are included.  The default value is (True, True).
+    axis : int or None, optional
+        Axis along which to operate. If None, compute over the
+        whole array. Default is zero.
+    ddof : int, optional
+        Delta degrees of freedom. Default is 1.
+
+    Returns
+    -------
+    tsem : float
+
+    Notes
+    -----
+    For more details on `tsem`, see `scipy.stats.tsem`.
+
+    """
+    a = ma.asarray(a).ravel()
+    if limits is None:
+        n = float(a.count())
+        return a.std(axis=axis, ddof=ddof)/ma.sqrt(n)
+
+    am = trima(a.ravel(), limits, inclusive)
+    sd = np.sqrt(am.var(axis=axis, ddof=ddof))
+    return sd / np.sqrt(am.count())
+
+
+def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
+              axis=None, nan_policy='propagate'):
+    """Returns a Winsorized version of the input array.
+
+    The (limits[0])th lowest values are set to the (limits[0])th percentile,
+    and the (limits[1])th highest values are set to the (1 - limits[1])th
+    percentile.
+    Masked values are skipped.
+
+
+    Parameters
+    ----------
+    a : sequence
+        Input array.
+    limits : {None, tuple of float}, optional
+        Tuple of the percentages to cut on each side of the array, with respect
+        to the number of unmasked data, as floats between 0. and 1.
+        Noting n the number of unmasked data before trimming, the
+        (n*limits[0])th smallest data and the (n*limits[1])th largest data are
+        masked, and the total number of unmasked data after trimming
+        is n*(1.-sum(limits)) The value of one limit can be set to None to
+        indicate an open interval.
+    inclusive : {(True, True) tuple}, optional
+        Tuple indicating whether the number of data being masked on each side
+        should be truncated (True) or rounded (False).
+    inplace : {False, True}, optional
+        Whether to winsorize in place (True) or to use a copy (False)
+    axis : {None, int}, optional
+        Axis along which to trim. If None, the whole array is trimmed, but its
+        shape is maintained.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': allows nan values and may overwrite or propagate them
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Notes
+    -----
+    This function is applied to reduce the effect of possibly spurious outliers
+    by limiting the extreme values.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats.mstats import winsorize
+
+    A shuffled array contains integers from 1 to 10.
+
+    >>> a = np.array([10, 4, 9, 8, 5, 3, 7, 2, 1, 6])
+
+    The 10% of the lowest value (i.e., `1`) and the 20% of the highest
+    values (i.e., `9` and `10`) are replaced.
+
+    >>> winsorize(a, limits=[0.1, 0.2])
+    masked_array(data=[8, 4, 8, 8, 5, 3, 7, 2, 2, 6],
+                 mask=False,
+           fill_value=999999)
+
+    """
+    def _winsorize1D(a, low_limit, up_limit, low_include, up_include,
+                     contains_nan, nan_policy):
+        n = a.count()
+        idx = a.argsort()
+        if contains_nan:
+            nan_count = np.count_nonzero(np.isnan(a))
+        if low_limit:
+            if low_include:
+                lowidx = int(low_limit * n)
+            else:
+                lowidx = np.round(low_limit * n).astype(int)
+            if contains_nan and nan_policy == 'omit':
+                lowidx = min(lowidx, n-nan_count-1)
+            a[idx[:lowidx]] = a[idx[lowidx]]
+        if up_limit is not None:
+            if up_include:
+                upidx = n - int(n * up_limit)
+            else:
+                upidx = n - np.round(n * up_limit).astype(int)
+            if contains_nan and nan_policy == 'omit':
+                a[idx[upidx:-nan_count]] = a[idx[upidx - 1]]
+            else:
+                a[idx[upidx:]] = a[idx[upidx - 1]]
+        return a
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+    # We are going to modify a: better make a copy
+    a = ma.array(a, copy=np.logical_not(inplace))
+
+    if limits is None:
+        return a
+    if (not isinstance(limits, tuple)) and isinstance(limits, float):
+        limits = (limits, limits)
+
+    # Check the limits
+    (lolim, uplim) = limits
+    errmsg = "The proportion to cut from the %s should be between 0. and 1."
+    if lolim is not None:
+        if lolim > 1. or lolim < 0:
+            raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
+    if uplim is not None:
+        if uplim > 1. or uplim < 0:
+            raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
+
+    (loinc, upinc) = inclusive
+
+    if axis is None:
+        shp = a.shape
+        return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc,
+                            contains_nan, nan_policy).reshape(shp)
+    else:
+        return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
+                                   upinc, contains_nan, nan_policy)
+
+
+def moment(a, moment=1, axis=0):
+    """
+    Calculates the nth moment about the mean for a sample.
+
+    Parameters
+    ----------
+    a : array_like
+       data
+    moment : int, optional
+       order of central moment that is returned
+    axis : int or None, optional
+       Axis along which the central moment is computed. Default is 0.
+       If None, compute over the whole array `a`.
+
+    Returns
+    -------
+    n-th central moment : ndarray or float
+       The appropriate moment along the given axis or over all values if axis
+       is None. The denominator for the moment calculation is the number of
+       observations, no degrees of freedom correction is done.
+
+    Notes
+    -----
+    For more details about `moment`, see `scipy.stats.moment`.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    if a.size == 0:
+        moment_shape = list(a.shape)
+        del moment_shape[axis]
+        dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
+        # empty array, return nan(s) with shape matching `moment`
+        out_shape = (moment_shape if np.isscalar(moment)
+                    else [len(moment)] + moment_shape)
+        if len(out_shape) == 0:
+            return dtype(np.nan)
+        else:
+            return ma.array(np.full(out_shape, np.nan, dtype=dtype))
+
+    # for array_like moment input, return a value for each.
+    if not np.isscalar(moment):
+        mean = a.mean(axis, keepdims=True)
+        mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
+        return ma.array(mmnt)
+    else:
+        return _moment(a, moment, axis)
+
+# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
+def _moment(a, moment, axis, *, mean=None):
+    if np.abs(moment - np.round(moment)) > 0:
+        raise ValueError("All moment parameters must be integers")
+
+    if moment == 0 or moment == 1:
+        # By definition the zeroth moment about the mean is 1, and the first
+        # moment is 0.
+        shape = list(a.shape)
+        del shape[axis]
+        dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
+
+        if len(shape) == 0:
+            return dtype(1.0 if moment == 0 else 0.0)
+        else:
+            return (ma.ones(shape, dtype=dtype) if moment == 0
+                    else ma.zeros(shape, dtype=dtype))
+    else:
+        # Exponentiation by squares: form exponent sequence
+        n_list = [moment]
+        current_n = moment
+        while current_n > 2:
+            if current_n % 2:
+                current_n = (current_n-1)/2
+            else:
+                current_n /= 2
+            n_list.append(current_n)
+
+        # Starting point for exponentiation by squares
+        mean = a.mean(axis, keepdims=True) if mean is None else mean
+        a_zero_mean = a - mean
+        if n_list[-1] == 1:
+            s = a_zero_mean.copy()
+        else:
+            s = a_zero_mean**2
+
+        # Perform multiplications
+        for n in n_list[-2::-1]:
+            s = s**2
+            if n % 2:
+                s *= a_zero_mean
+        return s.mean(axis)
+
+
+def variation(a, axis=0, ddof=0):
+    """
+    Compute the coefficient of variation.
+
+    The coefficient of variation is the standard deviation divided by the
+    mean.  This function is equivalent to::
+
+        np.std(x, axis=axis, ddof=ddof) / np.mean(x)
+
+    The default for ``ddof`` is 0, but many definitions of the coefficient
+    of variation use the square root of the unbiased sample variance
+    for the sample standard deviation, which corresponds to ``ddof=1``.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int or None, optional
+        Axis along which to calculate the coefficient of variation. Default
+        is 0. If None, compute over the whole array `a`.
+    ddof : int, optional
+        Delta degrees of freedom.  Default is 0.
+
+    Returns
+    -------
+    variation : ndarray
+        The calculated variation along the requested axis.
+
+    Notes
+    -----
+    For more details about `variation`, see `scipy.stats.variation`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats.mstats import variation
+    >>> a = np.array([2,8,4])
+    >>> variation(a)
+    0.5345224838248487
+    >>> b = np.array([2,8,3,4])
+    >>> c = np.ma.masked_array(b, mask=[0,0,1,0])
+    >>> variation(c)
+    0.5345224838248487
+
+    In the example above, it can be seen that this works the same as
+    `scipy.stats.variation` except 'stats.mstats.variation' ignores masked
+    array elements.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    return a.std(axis, ddof=ddof)/a.mean(axis)
+
+
+def skew(a, axis=0, bias=True):
+    """
+    Computes the skewness of a data set.
+
+    Parameters
+    ----------
+    a : ndarray
+        data
+    axis : int or None, optional
+        Axis along which skewness is calculated. Default is 0.
+        If None, compute over the whole array `a`.
+    bias : bool, optional
+        If False, then the calculations are corrected for statistical bias.
+
+    Returns
+    -------
+    skewness : ndarray
+        The skewness of values along an axis, returning 0 where all values are
+        equal.
+
+    Notes
+    -----
+    For more details about `skew`, see `scipy.stats.skew`.
+
+    """
+    a, axis = _chk_asarray(a,axis)
+    mean = a.mean(axis, keepdims=True)
+    m2 = _moment(a, 2, axis, mean=mean)
+    m3 = _moment(a, 3, axis, mean=mean)
+    zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
+    with np.errstate(all='ignore'):
+        vals = ma.where(zero, 0, m3 / m2**1.5)
+
+    if not bias and zero is not ma.masked and m2 is not ma.masked:
+        n = a.count(axis)
+        can_correct = ~zero & (n > 2)
+        if can_correct.any():
+            n = np.extract(can_correct, n)
+            m2 = np.extract(can_correct, m2)
+            m3 = np.extract(can_correct, m3)
+            nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
+            np.place(vals, can_correct, nval)
+    return vals
+
+
+def kurtosis(a, axis=0, fisher=True, bias=True):
+    """
+    Computes the kurtosis (Fisher or Pearson) of a dataset.
+
+    Kurtosis is the fourth central moment divided by the square of the
+    variance. If Fisher's definition is used, then 3.0 is subtracted from
+    the result to give 0.0 for a normal distribution.
+
+    If bias is False then the kurtosis is calculated using k statistics to
+    eliminate bias coming from biased moment estimators
+
+    Use `kurtosistest` to see if result is close enough to normal.
+
+    Parameters
+    ----------
+    a : array
+        data for which the kurtosis is calculated
+    axis : int or None, optional
+        Axis along which the kurtosis is calculated. Default is 0.
+        If None, compute over the whole array `a`.
+    fisher : bool, optional
+        If True, Fisher's definition is used (normal ==> 0.0). If False,
+        Pearson's definition is used (normal ==> 3.0).
+    bias : bool, optional
+        If False, then the calculations are corrected for statistical bias.
+
+    Returns
+    -------
+    kurtosis : array
+        The kurtosis of values along an axis. If all values are equal,
+        return -3 for Fisher's definition and 0 for Pearson's definition.
+
+    Notes
+    -----
+    For more details about `kurtosis`, see `scipy.stats.kurtosis`.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    mean = a.mean(axis, keepdims=True)
+    m2 = _moment(a, 2, axis, mean=mean)
+    m4 = _moment(a, 4, axis, mean=mean)
+    zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
+    with np.errstate(all='ignore'):
+        vals = ma.where(zero, 0, m4 / m2**2.0)
+
+    if not bias and zero is not ma.masked and m2 is not ma.masked:
+        n = a.count(axis)
+        can_correct = ~zero & (n > 3)
+        if can_correct.any():
+            n = np.extract(can_correct, n)
+            m2 = np.extract(can_correct, m2)
+            m4 = np.extract(can_correct, m4)
+            nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
+            np.place(vals, can_correct, nval+3.0)
+    if fisher:
+        return vals - 3
+    else:
+        return vals
+
+
+DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
+                                               'variance', 'skewness',
+                                               'kurtosis'))
+
+
+def describe(a, axis=0, ddof=0, bias=True):
+    """
+    Computes several descriptive statistics of the passed array.
+
+    Parameters
+    ----------
+    a : array_like
+        Data array
+    axis : int or None, optional
+        Axis along which to calculate statistics. Default 0. If None,
+        compute over the whole array `a`.
+    ddof : int, optional
+        degree of freedom (default 0); note that default ddof is different
+        from the same routine in stats.describe
+    bias : bool, optional
+        If False, then the skewness and kurtosis calculations are corrected for
+        statistical bias.
+
+    Returns
+    -------
+    nobs : int
+        (size of the data (discarding missing values)
+
+    minmax : (int, int)
+        min, max
+
+    mean : float
+        arithmetic mean
+
+    variance : float
+        unbiased variance
+
+    skewness : float
+        biased skewness
+
+    kurtosis : float
+        biased kurtosis
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats.mstats import describe
+    >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
+    >>> describe(ma)
+    DescribeResult(nobs=3, minmax=(masked_array(data=0,
+                 mask=False,
+           fill_value=999999), masked_array(data=2,
+                 mask=False,
+           fill_value=999999)), mean=1.0, variance=0.6666666666666666,
+           skewness=masked_array(data=0., mask=False, fill_value=1e+20),
+            kurtosis=-1.5)
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    n = a.count(axis)
+    mm = (ma.minimum.reduce(a, axis=axis), ma.maximum.reduce(a, axis=axis))
+    m = a.mean(axis)
+    v = a.var(axis, ddof=ddof)
+    sk = skew(a, axis, bias=bias)
+    kurt = kurtosis(a, axis, bias=bias)
+
+    return DescribeResult(n, mm, m, v, sk, kurt)
+
+
+def stde_median(data, axis=None):
+    """Returns the McKean-Schrader estimate of the standard error of the sample
+    median along the given axis. masked values are discarded.
+
+    Parameters
+    ----------
+    data : ndarray
+        Data to trim.
+    axis : {None,int}, optional
+        Axis along which to perform the trimming.
+        If None, the input array is first flattened.
+
+    """
+    def _stdemed_1D(data):
+        data = np.sort(data.compressed())
+        n = len(data)
+        z = 2.5758293035489004
+        k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
+        return ((data[n-k] - data[k-1])/(2.*z))
+
+    data = ma.array(data, copy=False, subok=True)
+    if (axis is None):
+        return _stdemed_1D(data)
+    else:
+        if data.ndim > 2:
+            raise ValueError("Array 'data' must be at most two dimensional, "
+                             "but got data.ndim = %d" % data.ndim)
+        return ma.apply_along_axis(_stdemed_1D, axis, data)
+
+
+SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
+
+
+def skewtest(a, axis=0, alternative='two-sided'):
+    """
+    Tests whether the skew is different from the normal distribution.
+
+    Parameters
+    ----------
+    a : array_like
+        The data to be tested
+    axis : int or None, optional
+       Axis along which statistics are calculated. Default is 0.
+       If None, compute over the whole array `a`.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the skewness of the distribution underlying the sample
+          is different from that of the normal distribution (i.e. 0)
+        * 'less': the skewness of the distribution underlying the sample
+          is less than that of the normal distribution
+        * 'greater': the skewness of the distribution underlying the sample
+          is greater than that of the normal distribution
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : array_like
+        The computed z-score for this test.
+    pvalue : array_like
+        A p-value for the hypothesis test
+
+    Notes
+    -----
+    For more details about `skewtest`, see `scipy.stats.skewtest`.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    if axis is None:
+        a = a.ravel()
+        axis = 0
+    b2 = skew(a,axis)
+    n = a.count(axis)
+    if np.min(n) < 8:
+        raise ValueError(
+            "skewtest is not valid with less than 8 samples; %i samples"
+            " were given." % np.min(n))
+
+    y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
+    beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
+    W2 = -1 + ma.sqrt(2*(beta2-1))
+    delta = 1/ma.sqrt(0.5*ma.log(W2))
+    alpha = ma.sqrt(2.0/(W2-1))
+    y = ma.where(y == 0, 1, y)
+    Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
+
+    return SkewtestResult(*scipy.stats._stats_py._normtest_finish(Z, alternative))
+
+
+KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
+
+
+def kurtosistest(a, axis=0, alternative='two-sided'):
+    """
+    Tests whether a dataset has normal kurtosis
+
+    Parameters
+    ----------
+    a : array_like
+        array of the sample data
+    axis : int or None, optional
+       Axis along which to compute test. Default is 0. If None,
+       compute over the whole array `a`.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the kurtosis of the distribution underlying the sample
+          is different from that of the normal distribution
+        * 'less': the kurtosis of the distribution underlying the sample
+          is less than that of the normal distribution
+        * 'greater': the kurtosis of the distribution underlying the sample
+          is greater than that of the normal distribution
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : array_like
+        The computed z-score for this test.
+    pvalue : array_like
+        The p-value for the hypothesis test
+
+    Notes
+    -----
+    For more details about `kurtosistest`, see `scipy.stats.kurtosistest`.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    n = a.count(axis=axis)
+    if np.min(n) < 5:
+        raise ValueError(
+            "kurtosistest requires at least 5 observations; %i observations"
+            " were given." % np.min(n))
+    if np.min(n) < 20:
+        warnings.warn(
+            "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
+            np.min(n))
+
+    b2 = kurtosis(a, axis, fisher=False)
+    E = 3.0*(n-1) / (n+1)
+    varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
+    x = (b2-E)/ma.sqrt(varb2)
+    sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
+                                                        (n*(n-2)*(n-3)))
+    A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
+    term1 = 1 - 2./(9.0*A)
+    denom = 1 + x*ma.sqrt(2/(A-4.0))
+    if np.ma.isMaskedArray(denom):
+        # For multi-dimensional array input
+        denom[denom == 0.0] = masked
+    elif denom == 0.0:
+        denom = masked
+
+    term2 = np.ma.where(denom > 0, ma.power((1-2.0/A)/denom, 1/3.0),
+                        -ma.power(-(1-2.0/A)/denom, 1/3.0))
+    Z = (term1 - term2) / np.sqrt(2/(9.0*A))
+
+    return KurtosistestResult(
+        *scipy.stats._stats_py._normtest_finish(Z, alternative)
+    )
+
+
+NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
+
+
+def normaltest(a, axis=0):
+    """
+    Tests whether a sample differs from a normal distribution.
+
+    Parameters
+    ----------
+    a : array_like
+        The array containing the data to be tested.
+    axis : int or None, optional
+        Axis along which to compute test. Default is 0. If None,
+        compute over the whole array `a`.
+
+    Returns
+    -------
+    statistic : float or array
+        ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
+        ``k`` is the z-score returned by `kurtosistest`.
+    pvalue : float or array
+       A 2-sided chi squared probability for the hypothesis test.
+
+    Notes
+    -----
+    For more details about `normaltest`, see `scipy.stats.normaltest`.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    s, _ = skewtest(a, axis)
+    k, _ = kurtosistest(a, axis)
+    k2 = s*s + k*k
+
+    return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
+
+
+def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
+               limit=()):
+    """
+    Computes empirical quantiles for a data array.
+
+    Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
+    where ``x[j]`` is the j-th order statistic, and gamma is a function of
+    ``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
+    ``g = n*p + m - j``.
+
+    Reinterpreting the above equations to compare to **R** lead to the
+    equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
+
+    Typical values of (alphap,betap) are:
+        - (0,1)    : ``p(k) = k/n`` : linear interpolation of cdf
+          (**R** type 4)
+        - (.5,.5)  : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
+          (**R** type 5)
+        - (0,0)    : ``p(k) = k/(n+1)`` :
+          (**R** type 6)
+        - (1,1)    : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
+          (**R** type 7, **R** default)
+        - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
+          The resulting quantile estimates are approximately median-unbiased
+          regardless of the distribution of x.
+          (**R** type 8)
+        - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
+          The resulting quantile estimates are approximately unbiased
+          if x is normally distributed
+          (**R** type 9)
+        - (.4,.4)  : approximately quantile unbiased (Cunnane)
+        - (.35,.35): APL, used with PWM
+
+    Parameters
+    ----------
+    a : array_like
+        Input data, as a sequence or array of dimension at most 2.
+    prob : array_like, optional
+        List of quantiles to compute.
+    alphap : float, optional
+        Plotting positions parameter, default is 0.4.
+    betap : float, optional
+        Plotting positions parameter, default is 0.4.
+    axis : int, optional
+        Axis along which to perform the trimming.
+        If None (default), the input array is first flattened.
+    limit : tuple, optional
+        Tuple of (lower, upper) values.
+        Values of `a` outside this open interval are ignored.
+
+    Returns
+    -------
+    mquantiles : MaskedArray
+        An array containing the calculated quantiles.
+
+    Notes
+    -----
+    This formulation is very similar to **R** except the calculation of
+    ``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
+    with each type.
+
+    References
+    ----------
+    .. [1] *R* statistical software: https://www.r-project.org/
+    .. [2] *R* ``quantile`` function:
+            http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats.mstats import mquantiles
+    >>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
+    >>> mquantiles(a)
+    array([ 19.2,  40. ,  42.8])
+
+    Using a 2D array, specifying axis and limit.
+
+    >>> data = np.array([[   6.,    7.,    1.],
+    ...                  [  47.,   15.,    2.],
+    ...                  [  49.,   36.,    3.],
+    ...                  [  15.,   39.,    4.],
+    ...                  [  42.,   40., -999.],
+    ...                  [  41.,   41., -999.],
+    ...                  [   7., -999., -999.],
+    ...                  [  39., -999., -999.],
+    ...                  [  43., -999., -999.],
+    ...                  [  40., -999., -999.],
+    ...                  [  36., -999., -999.]])
+    >>> print(mquantiles(data, axis=0, limit=(0, 50)))
+    [[19.2  14.6   1.45]
+     [40.   37.5   2.5 ]
+     [42.8  40.05  3.55]]
+
+    >>> data[:, 2] = -999.
+    >>> print(mquantiles(data, axis=0, limit=(0, 50)))
+    [[19.200000000000003 14.6 --]
+     [40.0 37.5 --]
+     [42.800000000000004 40.05 --]]
+
+    """
+    def _quantiles1D(data,m,p):
+        x = np.sort(data.compressed())
+        n = len(x)
+        if n == 0:
+            return ma.array(np.empty(len(p), dtype=float), mask=True)
+        elif n == 1:
+            return ma.array(np.resize(x, p.shape), mask=nomask)
+        aleph = (n*p + m)
+        k = np.floor(aleph.clip(1, n-1)).astype(int)
+        gamma = (aleph-k).clip(0,1)
+        return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
+
+    data = ma.array(a, copy=False)
+    if data.ndim > 2:
+        raise TypeError("Array should be 2D at most !")
+
+    if limit:
+        condition = (limit[0] < data) & (data < limit[1])
+        data[~condition.filled(True)] = masked
+
+    p = np.array(prob, copy=False, ndmin=1)
+    m = alphap + p*(1.-alphap-betap)
+    # Computes quantiles along axis (or globally)
+    if (axis is None):
+        return _quantiles1D(data, m, p)
+
+    return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
+
+
+def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
+    """Calculate the score at the given 'per' percentile of the
+    sequence a.  For example, the score at per=50 is the median.
+
+    This function is a shortcut to mquantile
+
+    """
+    if (per < 0) or (per > 100.):
+        raise ValueError("The percentile should be between 0. and 100. !"
+                         " (got %s)" % per)
+
+    return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
+                      limit=limit, axis=0).squeeze()
+
+
+def plotting_positions(data, alpha=0.4, beta=0.4):
+    """
+    Returns plotting positions (or empirical percentile points) for the data.
+
+    Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
+        - i is the rank order statistics
+        - n is the number of unmasked values along the given axis
+        - `alpha` and `beta` are two parameters.
+
+    Typical values for `alpha` and `beta` are:
+        - (0,1)    : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
+        - (.5,.5)  : ``p(k) = (k-1/2.)/n``, piecewise linear function
+          (R, type 5)
+        - (0,0)    : ``p(k) = k/(n+1)``, Weibull (R type 6)
+        - (1,1)    : ``p(k) = (k-1)/(n-1)``, in this case,
+          ``p(k) = mode[F(x[k])]``. That's R default (R type 7)
+        - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
+          ``p(k) ~ median[F(x[k])]``.
+          The resulting quantile estimates are approximately median-unbiased
+          regardless of the distribution of x. (R type 8)
+        - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
+          The resulting quantile estimates are approximately unbiased
+          if x is normally distributed (R type 9)
+        - (.4,.4)  : approximately quantile unbiased (Cunnane)
+        - (.35,.35): APL, used with PWM
+        - (.3175, .3175): used in scipy.stats.probplot
+
+    Parameters
+    ----------
+    data : array_like
+        Input data, as a sequence or array of dimension at most 2.
+    alpha : float, optional
+        Plotting positions parameter. Default is 0.4.
+    beta : float, optional
+        Plotting positions parameter. Default is 0.4.
+
+    Returns
+    -------
+    positions : MaskedArray
+        The calculated plotting positions.
+
+    """
+    data = ma.array(data, copy=False).reshape(1,-1)
+    n = data.count()
+    plpos = np.empty(data.size, dtype=float)
+    plpos[n:] = 0
+    plpos[data.argsort(axis=None)[:n]] = ((np.arange(1, n+1) - alpha) /
+                                          (n + 1.0 - alpha - beta))
+    return ma.array(plpos, mask=data._mask)
+
+
+meppf = plotting_positions
+
+
+def obrientransform(*args):
+    """
+    Computes a transform on input data (any number of columns).  Used to
+    test for homogeneity of variance prior to running one-way stats.  Each
+    array in ``*args`` is one level of a factor.  If an `f_oneway()` run on
+    the transformed data and found significant, variances are unequal.   From
+    Maxwell and Delaney, p.112.
+
+    Returns: transformed data for use in an ANOVA
+    """
+    data = argstoarray(*args).T
+    v = data.var(axis=0,ddof=1)
+    m = data.mean(0)
+    n = data.count(0).astype(float)
+    # result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
+    data -= m
+    data **= 2
+    data *= (n-1.5)*n
+    data -= 0.5*v*(n-1)
+    data /= (n-1.)*(n-2.)
+    if not ma.allclose(v,data.mean(0)):
+        raise ValueError("Lack of convergence in obrientransform.")
+
+    return data
+
+
+def sem(a, axis=0, ddof=1):
+    """
+    Calculates the standard error of the mean of the input array.
+
+    Also sometimes called standard error of measurement.
+
+    Parameters
+    ----------
+    a : array_like
+        An array containing the values for which the standard error is
+        returned.
+    axis : int or None, optional
+        If axis is None, ravel `a` first. If axis is an integer, this will be
+        the axis over which to operate. Defaults to 0.
+    ddof : int, optional
+        Delta degrees-of-freedom. How many degrees of freedom to adjust
+        for bias in limited samples relative to the population estimate
+        of variance. Defaults to 1.
+
+    Returns
+    -------
+    s : ndarray or float
+        The standard error of the mean in the sample(s), along the input axis.
+
+    Notes
+    -----
+    The default value for `ddof` changed in scipy 0.15.0 to be consistent with
+    `scipy.stats.sem` as well as with the most common definition used (like in
+    the R documentation).
+
+    Examples
+    --------
+    Find standard error along the first axis:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> a = np.arange(20).reshape(5,4)
+    >>> print(stats.mstats.sem(a))
+    [2.8284271247461903 2.8284271247461903 2.8284271247461903
+     2.8284271247461903]
+
+    Find standard error across the whole array, using n degrees of freedom:
+
+    >>> print(stats.mstats.sem(a, axis=None, ddof=0))
+    1.2893796958227628
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    n = a.count(axis=axis)
+    s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
+    return s
+
+
+F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
+
+
+def f_oneway(*args):
+    """
+    Performs a 1-way ANOVA, returning an F-value and probability given
+    any number of groups.  From Heiman, pp.394-7.
+
+    Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
+    one per treatment group.
+
+    Returns
+    -------
+    statistic : float
+        The computed F-value of the test.
+    pvalue : float
+        The associated p-value from the F-distribution.
+
+    """
+    # Construct a single array of arguments: each row is a group
+    data = argstoarray(*args)
+    ngroups = len(data)
+    ntot = data.count()
+    sstot = (data**2).sum() - (data.sum())**2/float(ntot)
+    ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
+    sswg = sstot-ssbg
+    dfbg = ngroups-1
+    dfwg = ntot - ngroups
+    msb = ssbg/float(dfbg)
+    msw = sswg/float(dfwg)
+    f = msb/msw
+    prob = special.fdtrc(dfbg, dfwg, f)  # equivalent to stats.f.sf
+
+    return F_onewayResult(f, prob)
+
+
+FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
+                                     ('statistic', 'pvalue'))
+
+
+def friedmanchisquare(*args):
+    """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
+    This function calculates the Friedman Chi-square test for repeated measures
+    and returns the result, along with the associated probability value.
+
+    Each input is considered a given group. Ideally, the number of treatments
+    among each group should be equal. If this is not the case, only the first
+    n treatments are taken into account, where n is the number of treatments
+    of the smallest group.
+    If a group has some missing values, the corresponding treatments are masked
+    in the other groups.
+    The test statistic is corrected for ties.
+
+    Masked values in one group are propagated to the other groups.
+
+    Returns
+    -------
+    statistic : float
+        the test statistic.
+    pvalue : float
+        the associated p-value.
+
+    """
+    data = argstoarray(*args).astype(float)
+    k = len(data)
+    if k < 3:
+        raise ValueError("Less than 3 groups (%i): " % k +
+                         "the Friedman test is NOT appropriate.")
+
+    ranked = ma.masked_values(rankdata(data, axis=0), 0)
+    if ranked._mask is not nomask:
+        ranked = ma.mask_cols(ranked)
+        ranked = ranked.compressed().reshape(k,-1).view(ndarray)
+    else:
+        ranked = ranked._data
+    (k,n) = ranked.shape
+    # Ties correction
+    repeats = [find_repeats(row) for row in ranked.T]
+    ties = np.array([y for x, y in repeats if x.size > 0])
+    tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
+
+    ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
+    chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
+
+    return FriedmanchisquareResult(chisq,
+                                   distributions.chi2.sf(chisq, k-1))
+
+
+BrunnerMunzelResult = namedtuple('BrunnerMunzelResult', ('statistic', 'pvalue'))
+
+
+def brunnermunzel(x, y, alternative="two-sided", distribution="t"):
+    """
+    Computes the Brunner-Munzel test on samples x and y
+
+    Missing values in `x` and/or `y` are discarded.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Array of samples, should be one-dimensional.
+    alternative : 'less', 'two-sided', or 'greater', optional
+        Whether to get the p-value for the one-sided hypothesis ('less'
+        or 'greater') or for the two-sided hypothesis ('two-sided').
+        Defaults value is 'two-sided' .
+    distribution : 't' or 'normal', optional
+        Whether to get the p-value by t-distribution or by standard normal
+        distribution.
+        Defaults value is 't' .
+
+    Returns
+    -------
+    statistic : float
+        The Brunner-Munzer W statistic.
+    pvalue : float
+        p-value assuming an t distribution. One-sided or
+        two-sided, depending on the choice of `alternative` and `distribution`.
+
+    See Also
+    --------
+    mannwhitneyu : Mann-Whitney rank test on two samples.
+
+    Notes
+    -----
+    For more details on `brunnermunzel`, see `scipy.stats.brunnermunzel`.
+
+    """
+    x = ma.asarray(x).compressed().view(ndarray)
+    y = ma.asarray(y).compressed().view(ndarray)
+    nx = len(x)
+    ny = len(y)
+    if nx == 0 or ny == 0:
+        return BrunnerMunzelResult(np.nan, np.nan)
+    rankc = rankdata(np.concatenate((x,y)))
+    rankcx = rankc[0:nx]
+    rankcy = rankc[nx:nx+ny]
+    rankcx_mean = np.mean(rankcx)
+    rankcy_mean = np.mean(rankcy)
+    rankx = rankdata(x)
+    ranky = rankdata(y)
+    rankx_mean = np.mean(rankx)
+    ranky_mean = np.mean(ranky)
+
+    Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
+    Sx /= nx - 1
+    Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
+    Sy /= ny - 1
+
+    wbfn = nx * ny * (rankcy_mean - rankcx_mean)
+    wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
+
+    if distribution == "t":
+        df_numer = np.power(nx * Sx + ny * Sy, 2.0)
+        df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
+        df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
+        df = df_numer / df_denom
+        p = distributions.t.cdf(wbfn, df)
+    elif distribution == "normal":
+        p = distributions.norm.cdf(wbfn)
+    else:
+        raise ValueError(
+            "distribution should be 't' or 'normal'")
+
+    if alternative == "greater":
+        pass
+    elif alternative == "less":
+        p = 1 - p
+    elif alternative == "two-sided":
+        p = 2 * np.min([p, 1-p])
+    else:
+        raise ValueError(
+            "alternative should be 'less', 'greater' or 'two-sided'")
+
+    return BrunnerMunzelResult(wbfn, p)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_mstats_extras.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_mstats_extras.py
new file mode 100644
index 00000000..b38bba2f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_mstats_extras.py
@@ -0,0 +1,500 @@
+"""
+Additional statistics functions with support for masked arrays.
+
+"""
+
+# Original author (2007): Pierre GF Gerard-Marchant
+
+
+__all__ = ['compare_medians_ms',
+           'hdquantiles', 'hdmedian', 'hdquantiles_sd',
+           'idealfourths',
+           'median_cihs','mjci','mquantiles_cimj',
+           'rsh',
+           'trimmed_mean_ci',]
+
+
+import numpy as np
+from numpy import float_, int_, ndarray
+
+import numpy.ma as ma
+from numpy.ma import MaskedArray
+
+from . import _mstats_basic as mstats
+
+from scipy.stats.distributions import norm, beta, t, binom
+
+
+def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,):
+    """
+    Computes quantile estimates with the Harrell-Davis method.
+
+    The quantile estimates are calculated as a weighted linear combination
+    of order statistics.
+
+    Parameters
+    ----------
+    data : array_like
+        Data array.
+    prob : sequence, optional
+        Sequence of quantiles to compute.
+    axis : int or None, optional
+        Axis along which to compute the quantiles. If None, use a flattened
+        array.
+    var : bool, optional
+        Whether to return the variance of the estimate.
+
+    Returns
+    -------
+    hdquantiles : MaskedArray
+        A (p,) array of quantiles (if `var` is False), or a (2,p) array of
+        quantiles and variances (if `var` is True), where ``p`` is the
+        number of quantiles.
+
+    See Also
+    --------
+    hdquantiles_sd
+
+    """
+    def _hd_1D(data,prob,var):
+        "Computes the HD quantiles for a 1D array. Returns nan for invalid data."
+        xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))
+        # Don't use length here, in case we have a numpy scalar
+        n = xsorted.size
+
+        hd = np.empty((2,len(prob)), float_)
+        if n < 2:
+            hd.flat = np.nan
+            if var:
+                return hd
+            return hd[0]
+
+        v = np.arange(n+1) / float(n)
+        betacdf = beta.cdf
+        for (i,p) in enumerate(prob):
+            _w = betacdf(v, (n+1)*p, (n+1)*(1-p))
+            w = _w[1:] - _w[:-1]
+            hd_mean = np.dot(w, xsorted)
+            hd[0,i] = hd_mean
+            #
+            hd[1,i] = np.dot(w, (xsorted-hd_mean)**2)
+            #
+        hd[0, prob == 0] = xsorted[0]
+        hd[0, prob == 1] = xsorted[-1]
+        if var:
+            hd[1, prob == 0] = hd[1, prob == 1] = np.nan
+            return hd
+        return hd[0]
+    # Initialization & checks
+    data = ma.array(data, copy=False, dtype=float_)
+    p = np.array(prob, copy=False, ndmin=1)
+    # Computes quantiles along axis (or globally)
+    if (axis is None) or (data.ndim == 1):
+        result = _hd_1D(data, p, var)
+    else:
+        if data.ndim > 2:
+            raise ValueError("Array 'data' must be at most two dimensional, "
+                             "but got data.ndim = %d" % data.ndim)
+        result = ma.apply_along_axis(_hd_1D, axis, data, p, var)
+
+    return ma.fix_invalid(result, copy=False)
+
+
+def hdmedian(data, axis=-1, var=False):
+    """
+    Returns the Harrell-Davis estimate of the median along the given axis.
+
+    Parameters
+    ----------
+    data : ndarray
+        Data array.
+    axis : int, optional
+        Axis along which to compute the quantiles. If None, use a flattened
+        array.
+    var : bool, optional
+        Whether to return the variance of the estimate.
+
+    Returns
+    -------
+    hdmedian : MaskedArray
+        The median values.  If ``var=True``, the variance is returned inside
+        the masked array.  E.g. for a 1-D array the shape change from (1,) to
+        (2,).
+
+    """
+    result = hdquantiles(data,[0.5], axis=axis, var=var)
+    return result.squeeze()
+
+
+def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
+    """
+    The standard error of the Harrell-Davis quantile estimates by jackknife.
+
+    Parameters
+    ----------
+    data : array_like
+        Data array.
+    prob : sequence, optional
+        Sequence of quantiles to compute.
+    axis : int, optional
+        Axis along which to compute the quantiles. If None, use a flattened
+        array.
+
+    Returns
+    -------
+    hdquantiles_sd : MaskedArray
+        Standard error of the Harrell-Davis quantile estimates.
+
+    See Also
+    --------
+    hdquantiles
+
+    """
+    def _hdsd_1D(data, prob):
+        "Computes the std error for 1D arrays."
+        xsorted = np.sort(data.compressed())
+        n = len(xsorted)
+
+        hdsd = np.empty(len(prob), float_)
+        if n < 2:
+            hdsd.flat = np.nan
+
+        vv = np.arange(n) / float(n-1)
+        betacdf = beta.cdf
+
+        for (i,p) in enumerate(prob):
+            _w = betacdf(vv, n*p, n*(1-p))
+            w = _w[1:] - _w[:-1]
+            # cumulative sum of weights and data points if
+            # ith point is left out for jackknife
+            mx_ = np.zeros_like(xsorted)
+            mx_[1:] = np.cumsum(w * xsorted[:-1])
+            # similar but from the right
+            mx_[:-1] += np.cumsum(w[::-1] * xsorted[:0:-1])[::-1]
+            hdsd[i] = np.sqrt(mx_.var() * (n - 1))
+        return hdsd
+
+    # Initialization & checks
+    data = ma.array(data, copy=False, dtype=float_)
+    p = np.array(prob, copy=False, ndmin=1)
+    # Computes quantiles along axis (or globally)
+    if (axis is None):
+        result = _hdsd_1D(data, p)
+    else:
+        if data.ndim > 2:
+            raise ValueError("Array 'data' must be at most two dimensional, "
+                             "but got data.ndim = %d" % data.ndim)
+        result = ma.apply_along_axis(_hdsd_1D, axis, data, p)
+
+    return ma.fix_invalid(result, copy=False).ravel()
+
+
+def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True),
+                    alpha=0.05, axis=None):
+    """
+    Selected confidence interval of the trimmed mean along the given axis.
+
+    Parameters
+    ----------
+    data : array_like
+        Input data.
+    limits : {None, tuple}, optional
+        None or a two item tuple.
+        Tuple of the percentages to cut on each side of the array, with respect
+        to the number of unmasked data, as floats between 0. and 1. If ``n``
+        is the number of unmasked data before trimming, then
+        (``n * limits[0]``)th smallest data and (``n * limits[1]``)th
+        largest data are masked.  The total number of unmasked data after
+        trimming is ``n * (1. - sum(limits))``.
+        The value of one limit can be set to None to indicate an open interval.
+
+        Defaults to (0.2, 0.2).
+    inclusive : (2,) tuple of boolean, optional
+        If relative==False, tuple indicating whether values exactly equal to
+        the absolute limits are allowed.
+        If relative==True, tuple indicating whether the number of data being
+        masked on each side should be rounded (True) or truncated (False).
+
+        Defaults to (True, True).
+    alpha : float, optional
+        Confidence level of the intervals.
+
+        Defaults to 0.05.
+    axis : int, optional
+        Axis along which to cut. If None, uses a flattened version of `data`.
+
+        Defaults to None.
+
+    Returns
+    -------
+    trimmed_mean_ci : (2,) ndarray
+        The lower and upper confidence intervals of the trimmed data.
+
+    """
+    data = ma.array(data, copy=False)
+    trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)
+    tmean = trimmed.mean(axis)
+    tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis)
+    df = trimmed.count(axis) - 1
+    tppf = t.ppf(1-alpha/2.,df)
+    return np.array((tmean - tppf*tstde, tmean+tppf*tstde))
+
+
+def mjci(data, prob=[0.25,0.5,0.75], axis=None):
+    """
+    Returns the Maritz-Jarrett estimators of the standard error of selected
+    experimental quantiles of the data.
+
+    Parameters
+    ----------
+    data : ndarray
+        Data array.
+    prob : sequence, optional
+        Sequence of quantiles to compute.
+    axis : int or None, optional
+        Axis along which to compute the quantiles. If None, use a flattened
+        array.
+
+    """
+    def _mjci_1D(data, p):
+        data = np.sort(data.compressed())
+        n = data.size
+        prob = (np.array(p) * n + 0.5).astype(int_)
+        betacdf = beta.cdf
+
+        mj = np.empty(len(prob), float_)
+        x = np.arange(1,n+1, dtype=float_) / n
+        y = x - 1./n
+        for (i,m) in enumerate(prob):
+            W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
+            C1 = np.dot(W,data)
+            C2 = np.dot(W,data**2)
+            mj[i] = np.sqrt(C2 - C1**2)
+        return mj
+
+    data = ma.array(data, copy=False)
+    if data.ndim > 2:
+        raise ValueError("Array 'data' must be at most two dimensional, "
+                         "but got data.ndim = %d" % data.ndim)
+
+    p = np.array(prob, copy=False, ndmin=1)
+    # Computes quantiles along axis (or globally)
+    if (axis is None):
+        return _mjci_1D(data, p)
+    else:
+        return ma.apply_along_axis(_mjci_1D, axis, data, p)
+
+
+def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None):
+    """
+    Computes the alpha confidence interval for the selected quantiles of the
+    data, with Maritz-Jarrett estimators.
+
+    Parameters
+    ----------
+    data : ndarray
+        Data array.
+    prob : sequence, optional
+        Sequence of quantiles to compute.
+    alpha : float, optional
+        Confidence level of the intervals.
+    axis : int or None, optional
+        Axis along which to compute the quantiles.
+        If None, use a flattened array.
+
+    Returns
+    -------
+    ci_lower : ndarray
+        The lower boundaries of the confidence interval.  Of the same length as
+        `prob`.
+    ci_upper : ndarray
+        The upper boundaries of the confidence interval.  Of the same length as
+        `prob`.
+
+    """
+    alpha = min(alpha, 1 - alpha)
+    z = norm.ppf(1 - alpha/2.)
+    xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)
+    smj = mjci(data, prob, axis=axis)
+    return (xq - z * smj, xq + z * smj)
+
+
+def median_cihs(data, alpha=0.05, axis=None):
+    """
+    Computes the alpha-level confidence interval for the median of the data.
+
+    Uses the Hettmasperger-Sheather method.
+
+    Parameters
+    ----------
+    data : array_like
+        Input data. Masked values are discarded. The input should be 1D only,
+        or `axis` should be set to None.
+    alpha : float, optional
+        Confidence level of the intervals.
+    axis : int or None, optional
+        Axis along which to compute the quantiles. If None, use a flattened
+        array.
+
+    Returns
+    -------
+    median_cihs
+        Alpha level confidence interval.
+
+    """
+    def _cihs_1D(data, alpha):
+        data = np.sort(data.compressed())
+        n = len(data)
+        alpha = min(alpha, 1-alpha)
+        k = int(binom._ppf(alpha/2., n, 0.5))
+        gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
+        if gk < 1-alpha:
+            k -= 1
+            gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
+        gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
+        I = (gk - 1 + alpha)/(gk - gkk)
+        lambd = (n-k) * I / float(k + (n-2*k)*I)
+        lims = (lambd*data[k] + (1-lambd)*data[k-1],
+                lambd*data[n-k-1] + (1-lambd)*data[n-k])
+        return lims
+    data = ma.array(data, copy=False)
+    # Computes quantiles along axis (or globally)
+    if (axis is None):
+        result = _cihs_1D(data, alpha)
+    else:
+        if data.ndim > 2:
+            raise ValueError("Array 'data' must be at most two dimensional, "
+                             "but got data.ndim = %d" % data.ndim)
+        result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)
+
+    return result
+
+
+def compare_medians_ms(group_1, group_2, axis=None):
+    """
+    Compares the medians from two independent groups along the given axis.
+
+    The comparison is performed using the McKean-Schrader estimate of the
+    standard error of the medians.
+
+    Parameters
+    ----------
+    group_1 : array_like
+        First dataset.  Has to be of size >=7.
+    group_2 : array_like
+        Second dataset.  Has to be of size >=7.
+    axis : int, optional
+        Axis along which the medians are estimated. If None, the arrays are
+        flattened.  If `axis` is not None, then `group_1` and `group_2`
+        should have the same shape.
+
+    Returns
+    -------
+    compare_medians_ms : {float, ndarray}
+        If `axis` is None, then returns a float, otherwise returns a 1-D
+        ndarray of floats with a length equal to the length of `group_1`
+        along `axis`.
+
+    Examples
+    --------
+
+    >>> from scipy import stats
+    >>> a = [1, 2, 3, 4, 5, 6, 7]
+    >>> b = [8, 9, 10, 11, 12, 13, 14]
+    >>> stats.mstats.compare_medians_ms(a, b, axis=None)
+    1.0693225866553746e-05
+
+    The function is vectorized to compute along a given axis.
+
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> x = rng.random(size=(3, 7))
+    >>> y = rng.random(size=(3, 8))
+    >>> stats.mstats.compare_medians_ms(x, y, axis=1)
+    array([0.36908985, 0.36092538, 0.2765313 ])
+
+    References
+    ----------
+    .. [1] McKean, Joseph W., and Ronald M. Schrader. "A comparison of methods
+       for studentizing the sample median." Communications in
+       Statistics-Simulation and Computation 13.6 (1984): 751-773.
+
+    """
+    (med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
+    (std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
+                      mstats.stde_median(group_2, axis=axis))
+    W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
+    return 1 - norm.cdf(W)
+
+
+def idealfourths(data, axis=None):
+    """
+    Returns an estimate of the lower and upper quartiles.
+
+    Uses the ideal fourths algorithm.
+
+    Parameters
+    ----------
+    data : array_like
+        Input array.
+    axis : int, optional
+        Axis along which the quartiles are estimated. If None, the arrays are
+        flattened.
+
+    Returns
+    -------
+    idealfourths : {list of floats, masked array}
+        Returns the two internal values that divide `data` into four parts
+        using the ideal fourths algorithm either along the flattened array
+        (if `axis` is None) or along `axis` of `data`.
+
+    """
+    def _idf(data):
+        x = data.compressed()
+        n = len(x)
+        if n < 3:
+            return [np.nan,np.nan]
+        (j,h) = divmod(n/4. + 5/12.,1)
+        j = int(j)
+        qlo = (1-h)*x[j-1] + h*x[j]
+        k = n - j
+        qup = (1-h)*x[k] + h*x[k-1]
+        return [qlo, qup]
+    data = ma.sort(data, axis=axis).view(MaskedArray)
+    if (axis is None):
+        return _idf(data)
+    else:
+        return ma.apply_along_axis(_idf, axis, data)
+
+
+def rsh(data, points=None):
+    """
+    Evaluates Rosenblatt's shifted histogram estimators for each data point.
+
+    Rosenblatt's estimator is a centered finite-difference approximation to the
+    derivative of the empirical cumulative distribution function.
+
+    Parameters
+    ----------
+    data : sequence
+        Input data, should be 1-D. Masked values are ignored.
+    points : sequence or None, optional
+        Sequence of points where to evaluate Rosenblatt shifted histogram.
+        If None, use the data.
+
+    """
+    data = ma.array(data, copy=False)
+    if points is None:
+        points = data
+    else:
+        points = np.array(points, copy=False, ndmin=1)
+
+    if data.ndim != 1:
+        raise AttributeError("The input array should be 1D only !")
+
+    n = data.count()
+    r = idealfourths(data, axis=None)
+    h = 1.2 * (r[-1]-r[0]) / n**(1./5)
+    nhi = (data[:,None] <= points[None,:] + h).sum(0)
+    nlo = (data[:,None] < points[None,:] - h).sum(0)
+    return (nhi-nlo) / (2.*n*h)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_multivariate.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_multivariate.py
new file mode 100644
index 00000000..707834ff
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_multivariate.py
@@ -0,0 +1,5691 @@
+#
+# Author: Joris Vankerschaver 2013
+#
+import math
+import numpy as np
+from numpy import asarray_chkfinite, asarray
+from numpy.lib import NumpyVersion
+import scipy.linalg
+from scipy._lib import doccer
+from scipy.special import gammaln, psi, multigammaln, xlogy, entr, betaln
+from scipy._lib._util import check_random_state
+from scipy.linalg.blas import drot
+from scipy.linalg._misc import LinAlgError
+from scipy.linalg.lapack import get_lapack_funcs
+
+from ._discrete_distns import binom
+from . import _mvn, _covariance, _rcont
+
+__all__ = ['multivariate_normal',
+           'matrix_normal',
+           'dirichlet',
+           'wishart',
+           'invwishart',
+           'multinomial',
+           'special_ortho_group',
+           'ortho_group',
+           'random_correlation',
+           'unitary_group',
+           'multivariate_t',
+           'multivariate_hypergeom',
+           'random_table',
+           'uniform_direction']
+
+_LOG_2PI = np.log(2 * np.pi)
+_LOG_2 = np.log(2)
+_LOG_PI = np.log(np.pi)
+
+
+_doc_random_state = """\
+seed : {None, int, np.random.RandomState, np.random.Generator}, optional
+    Used for drawing random variates.
+    If `seed` is `None`, the `~np.random.RandomState` singleton is used.
+    If `seed` is an int, a new ``RandomState`` instance is used, seeded
+    with seed.
+    If `seed` is already a ``RandomState`` or ``Generator`` instance,
+    then that object is used.
+    Default is `None`.
+"""
+
+
+def _squeeze_output(out):
+    """
+    Remove single-dimensional entries from array and convert to scalar,
+    if necessary.
+    """
+    out = out.squeeze()
+    if out.ndim == 0:
+        out = out[()]
+    return out
+
+
+def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
+    """Determine which eigenvalues are "small" given the spectrum.
+
+    This is for compatibility across various linear algebra functions
+    that should agree about whether or not a Hermitian matrix is numerically
+    singular and what is its numerical matrix rank.
+    This is designed to be compatible with scipy.linalg.pinvh.
+
+    Parameters
+    ----------
+    spectrum : 1d ndarray
+        Array of eigenvalues of a Hermitian matrix.
+    cond, rcond : float, optional
+        Cutoff for small eigenvalues.
+        Singular values smaller than rcond * largest_eigenvalue are
+        considered zero.
+        If None or -1, suitable machine precision is used.
+
+    Returns
+    -------
+    eps : float
+        Magnitude cutoff for numerical negligibility.
+
+    """
+    if rcond is not None:
+        cond = rcond
+    if cond in [None, -1]:
+        t = spectrum.dtype.char.lower()
+        factor = {'f': 1E3, 'd': 1E6}
+        cond = factor[t] * np.finfo(t).eps
+    eps = cond * np.max(abs(spectrum))
+    return eps
+
+
+def _pinv_1d(v, eps=1e-5):
+    """A helper function for computing the pseudoinverse.
+
+    Parameters
+    ----------
+    v : iterable of numbers
+        This may be thought of as a vector of eigenvalues or singular values.
+    eps : float
+        Values with magnitude no greater than eps are considered negligible.
+
+    Returns
+    -------
+    v_pinv : 1d float ndarray
+        A vector of pseudo-inverted numbers.
+
+    """
+    return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
+
+
+class _PSD:
+    """
+    Compute coordinated functions of a symmetric positive semidefinite matrix.
+
+    This class addresses two issues.  Firstly it allows the pseudoinverse,
+    the logarithm of the pseudo-determinant, and the rank of the matrix
+    to be computed using one call to eigh instead of three.
+    Secondly it allows these functions to be computed in a way
+    that gives mutually compatible results.
+    All of the functions are computed with a common understanding as to
+    which of the eigenvalues are to be considered negligibly small.
+    The functions are designed to coordinate with scipy.linalg.pinvh()
+    but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
+
+    Parameters
+    ----------
+    M : array_like
+        Symmetric positive semidefinite matrix (2-D).
+    cond, rcond : float, optional
+        Cutoff for small eigenvalues.
+        Singular values smaller than rcond * largest_eigenvalue are
+        considered zero.
+        If None or -1, suitable machine precision is used.
+    lower : bool, optional
+        Whether the pertinent array data is taken from the lower
+        or upper triangle of M. (Default: lower)
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite
+        numbers. Disabling may give a performance gain, but may result
+        in problems (crashes, non-termination) if the inputs do contain
+        infinities or NaNs.
+    allow_singular : bool, optional
+        Whether to allow a singular matrix.  (Default: True)
+
+    Notes
+    -----
+    The arguments are similar to those of scipy.linalg.pinvh().
+
+    """
+
+    def __init__(self, M, cond=None, rcond=None, lower=True,
+                 check_finite=True, allow_singular=True):
+        self._M = np.asarray(M)
+
+        # Compute the symmetric eigendecomposition.
+        # Note that eigh takes care of array conversion, chkfinite,
+        # and assertion that the matrix is square.
+        s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
+
+        eps = _eigvalsh_to_eps(s, cond, rcond)
+        if np.min(s) < -eps:
+            msg = "The input matrix must be symmetric positive semidefinite."
+            raise ValueError(msg)
+        d = s[s > eps]
+        if len(d) < len(s) and not allow_singular:
+            msg = ("When `allow_singular is False`, the input matrix must be "
+                   "symmetric positive definite.")
+            raise np.linalg.LinAlgError(msg)
+        s_pinv = _pinv_1d(s, eps)
+        U = np.multiply(u, np.sqrt(s_pinv))
+
+        # Save the eigenvector basis, and tolerance for testing support
+        self.eps = 1e3*eps
+        self.V = u[:, s <= eps]
+
+        # Initialize the eagerly precomputed attributes.
+        self.rank = len(d)
+        self.U = U
+        self.log_pdet = np.sum(np.log(d))
+
+        # Initialize attributes to be lazily computed.
+        self._pinv = None
+
+    def _support_mask(self, x):
+        """
+        Check whether x lies in the support of the distribution.
+        """
+        residual = np.linalg.norm(x @ self.V, axis=-1)
+        in_support = residual < self.eps
+        return in_support
+
+    @property
+    def pinv(self):
+        if self._pinv is None:
+            self._pinv = np.dot(self.U, self.U.T)
+        return self._pinv
+
+
+class multi_rv_generic:
+    """
+    Class which encapsulates common functionality between all multivariate
+    distributions.
+    """
+    def __init__(self, seed=None):
+        super().__init__()
+        self._random_state = check_random_state(seed)
+
+    @property
+    def random_state(self):
+        """ Get or set the Generator object for generating random variates.
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+        """
+        return self._random_state
+
+    @random_state.setter
+    def random_state(self, seed):
+        self._random_state = check_random_state(seed)
+
+    def _get_random_state(self, random_state):
+        if random_state is not None:
+            return check_random_state(random_state)
+        else:
+            return self._random_state
+
+
+class multi_rv_frozen:
+    """
+    Class which encapsulates common functionality between all frozen
+    multivariate distributions.
+    """
+    @property
+    def random_state(self):
+        return self._dist._random_state
+
+    @random_state.setter
+    def random_state(self, seed):
+        self._dist._random_state = check_random_state(seed)
+
+
+_mvn_doc_default_callparams = """\
+mean : array_like, default: ``[0]``
+    Mean of the distribution.
+cov : array_like or `Covariance`, default: ``[1]``
+    Symmetric positive (semi)definite covariance matrix of the distribution.
+allow_singular : bool, default: ``False``
+    Whether to allow a singular covariance matrix. This is ignored if `cov` is
+    a `Covariance` object.
+"""
+
+_mvn_doc_callparams_note = """\
+Setting the parameter `mean` to `None` is equivalent to having `mean`
+be the zero-vector. The parameter `cov` can be a scalar, in which case
+the covariance matrix is the identity times that value, a vector of
+diagonal entries for the covariance matrix, a two-dimensional array_like,
+or a `Covariance` object.
+"""
+
+_mvn_doc_frozen_callparams = ""
+
+_mvn_doc_frozen_callparams_note = """\
+See class definition for a detailed description of parameters."""
+
+mvn_docdict_params = {
+    '_mvn_doc_default_callparams': _mvn_doc_default_callparams,
+    '_mvn_doc_callparams_note': _mvn_doc_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+mvn_docdict_noparams = {
+    '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
+    '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+
+class multivariate_normal_gen(multi_rv_generic):
+    r"""A multivariate normal random variable.
+
+    The `mean` keyword specifies the mean. The `cov` keyword specifies the
+    covariance matrix.
+
+    Methods
+    -------
+    pdf(x, mean=None, cov=1, allow_singular=False)
+        Probability density function.
+    logpdf(x, mean=None, cov=1, allow_singular=False)
+        Log of the probability density function.
+    cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5, lower_limit=None)  # noqa
+        Cumulative distribution function.
+    logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)
+        Log of the cumulative distribution function.
+    rvs(mean=None, cov=1, size=1, random_state=None)
+        Draw random samples from a multivariate normal distribution.
+    entropy()
+        Compute the differential entropy of the multivariate normal.
+
+    Parameters
+    ----------
+    %(_mvn_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Notes
+    -----
+    %(_mvn_doc_callparams_note)s
+
+    The covariance matrix `cov` may be an instance of a subclass of
+    `Covariance`, e.g. `scipy.stats.CovViaPrecision`. If so, `allow_singular`
+    is ignored.
+
+    Otherwise, `cov` must be a symmetric positive semidefinite
+    matrix when `allow_singular` is True; it must be (strictly) positive
+    definite when `allow_singular` is False.
+    Symmetry is not checked; only the lower triangular portion is used.
+    The determinant and inverse of `cov` are computed
+    as the pseudo-determinant and pseudo-inverse, respectively, so
+    that `cov` does not need to have full rank.
+
+    The probability density function for `multivariate_normal` is
+
+    .. math::
+
+        f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
+               \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
+
+    where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
+    :math:`k` the rank of :math:`\Sigma`. In case of singular :math:`\Sigma`,
+    SciPy extends this definition according to [1]_.
+
+    .. versionadded:: 0.14.0
+
+    References
+    ----------
+    .. [1] Multivariate Normal Distribution - Degenerate Case, Wikipedia,
+           https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Degenerate_case
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.stats import multivariate_normal
+
+    >>> x = np.linspace(0, 5, 10, endpoint=False)
+    >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
+    array([ 0.00108914,  0.01033349,  0.05946514,  0.20755375,  0.43939129,
+            0.56418958,  0.43939129,  0.20755375,  0.05946514,  0.01033349])
+    >>> fig1 = plt.figure()
+    >>> ax = fig1.add_subplot(111)
+    >>> ax.plot(x, y)
+    >>> plt.show()
+
+    Alternatively, the object may be called (as a function) to fix the mean
+    and covariance parameters, returning a "frozen" multivariate normal
+    random variable:
+
+    >>> rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
+    >>> # Frozen object with the same methods but holding the given
+    >>> # mean and covariance fixed.
+
+    The input quantiles can be any shape of array, as long as the last
+    axis labels the components.  This allows us for instance to
+    display the frozen pdf for a non-isotropic random variable in 2D as
+    follows:
+
+    >>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
+    >>> pos = np.dstack((x, y))
+    >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
+    >>> fig2 = plt.figure()
+    >>> ax2 = fig2.add_subplot(111)
+    >>> ax2.contourf(x, y, rv.pdf(pos))
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
+
+    def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
+        """Create a frozen multivariate normal distribution.
+
+        See `multivariate_normal_frozen` for more information.
+        """
+        return multivariate_normal_frozen(mean, cov,
+                                          allow_singular=allow_singular,
+                                          seed=seed)
+
+    def _process_parameters(self, mean, cov, allow_singular=True):
+        """
+        Infer dimensionality from mean or covariance matrix, ensure that
+        mean and covariance are full vector resp. matrix.
+        """
+        if isinstance(cov, _covariance.Covariance):
+            return self._process_parameters_Covariance(mean, cov)
+        else:
+            # Before `Covariance` classes were introduced,
+            # `multivariate_normal` accepted plain arrays as `cov` and used the
+            # following input validation. To avoid disturbing the behavior of
+            # `multivariate_normal` when plain arrays are used, we use the
+            # original input validation here.
+            dim, mean, cov = self._process_parameters_psd(None, mean, cov)
+            # After input validation, some methods then processed the arrays
+            # with a `_PSD` object and used that to perform computation.
+            # To avoid branching statements in each method depending on whether
+            # `cov` is an array or `Covariance` object, we always process the
+            # array with `_PSD`, and then use wrapper that satisfies the
+            # `Covariance` interface, `CovViaPSD`.
+            psd = _PSD(cov, allow_singular=allow_singular)
+            cov_object = _covariance.CovViaPSD(psd)
+            return dim, mean, cov_object
+
+    def _process_parameters_Covariance(self, mean, cov):
+        dim = cov.shape[-1]
+        mean = np.array([0.]) if mean is None else mean
+        message = (f"`cov` represents a covariance matrix in {dim} dimensions,"
+                   f"and so `mean` must be broadcastable to shape {(dim,)}")
+        try:
+            mean = np.broadcast_to(mean, dim)
+        except ValueError as e:
+            raise ValueError(message) from e
+        return dim, mean, cov
+
+    def _process_parameters_psd(self, dim, mean, cov):
+        # Try to infer dimensionality
+        if dim is None:
+            if mean is None:
+                if cov is None:
+                    dim = 1
+                else:
+                    cov = np.asarray(cov, dtype=float)
+                    if cov.ndim < 2:
+                        dim = 1
+                    else:
+                        dim = cov.shape[0]
+            else:
+                mean = np.asarray(mean, dtype=float)
+                dim = mean.size
+        else:
+            if not np.isscalar(dim):
+                raise ValueError("Dimension of random variable must be "
+                                 "a scalar.")
+
+        # Check input sizes and return full arrays for mean and cov if
+        # necessary
+        if mean is None:
+            mean = np.zeros(dim)
+        mean = np.asarray(mean, dtype=float)
+
+        if cov is None:
+            cov = 1.0
+        cov = np.asarray(cov, dtype=float)
+
+        if dim == 1:
+            mean = mean.reshape(1)
+            cov = cov.reshape(1, 1)
+
+        if mean.ndim != 1 or mean.shape[0] != dim:
+            raise ValueError("Array 'mean' must be a vector of length %d." %
+                             dim)
+        if cov.ndim == 0:
+            cov = cov * np.eye(dim)
+        elif cov.ndim == 1:
+            cov = np.diag(cov)
+        elif cov.ndim == 2 and cov.shape != (dim, dim):
+            rows, cols = cov.shape
+            if rows != cols:
+                msg = ("Array 'cov' must be square if it is two dimensional,"
+                       " but cov.shape = %s." % str(cov.shape))
+            else:
+                msg = ("Dimension mismatch: array 'cov' is of shape %s,"
+                       " but 'mean' is a vector of length %d.")
+                msg = msg % (str(cov.shape), len(mean))
+            raise ValueError(msg)
+        elif cov.ndim > 2:
+            raise ValueError("Array 'cov' must be at most two-dimensional,"
+                             " but cov.ndim = %d" % cov.ndim)
+
+        return dim, mean, cov
+
+    def _process_quantiles(self, x, dim):
+        """
+        Adjust quantiles array so that last axis labels the components of
+        each data point.
+        """
+        x = np.asarray(x, dtype=float)
+
+        if x.ndim == 0:
+            x = x[np.newaxis]
+        elif x.ndim == 1:
+            if dim == 1:
+                x = x[:, np.newaxis]
+            else:
+                x = x[np.newaxis, :]
+
+        return x
+
+    def _logpdf(self, x, mean, cov_object):
+        """Log of the multivariate normal probability density function.
+
+        Parameters
+        ----------
+        x : ndarray
+            Points at which to evaluate the log of the probability
+            density function
+        mean : ndarray
+            Mean of the distribution
+        cov_object : Covariance
+            An object representing the Covariance matrix
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'logpdf' instead.
+
+        """
+        log_det_cov, rank = cov_object.log_pdet, cov_object.rank
+        dev = x - mean
+        if dev.ndim > 1:
+            log_det_cov = log_det_cov[..., np.newaxis]
+            rank = rank[..., np.newaxis]
+        maha = np.sum(np.square(cov_object.whiten(dev)), axis=-1)
+        return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
+
+    def logpdf(self, x, mean=None, cov=1, allow_singular=False):
+        """Log of the multivariate normal probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_mvn_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray or scalar
+            Log of the probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_mvn_doc_callparams_note)s
+
+        """
+        params = self._process_parameters(mean, cov, allow_singular)
+        dim, mean, cov_object = params
+        x = self._process_quantiles(x, dim)
+        out = self._logpdf(x, mean, cov_object)
+        if np.any(cov_object.rank < dim):
+            out_of_bounds = ~cov_object._support_mask(x-mean)
+            out[out_of_bounds] = -np.inf
+        return _squeeze_output(out)
+
+    def pdf(self, x, mean=None, cov=1, allow_singular=False):
+        """Multivariate normal probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_mvn_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray or scalar
+            Probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_mvn_doc_callparams_note)s
+
+        """
+        params = self._process_parameters(mean, cov, allow_singular)
+        dim, mean, cov_object = params
+        x = self._process_quantiles(x, dim)
+        out = np.exp(self._logpdf(x, mean, cov_object))
+        if np.any((cov_object.rank < dim)):
+            out_of_bounds = ~cov_object._support_mask(x-mean)
+            out[out_of_bounds] = 0.0
+        return _squeeze_output(out)
+
+    def _cdf(self, x, mean, cov, maxpts, abseps, releps, lower_limit):
+        """Multivariate normal cumulative distribution function.
+
+        Parameters
+        ----------
+        x : ndarray
+            Points at which to evaluate the cumulative distribution function.
+        mean : ndarray
+            Mean of the distribution
+        cov : array_like
+            Covariance matrix of the distribution
+        maxpts : integer
+            The maximum number of points to use for integration
+        abseps : float
+            Absolute error tolerance
+        releps : float
+            Relative error tolerance
+        lower_limit : array_like, optional
+            Lower limit of integration of the cumulative distribution function.
+            Default is negative infinity. Must be broadcastable with `x`.
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'cdf' instead.
+
+
+        .. versionadded:: 1.0.0
+
+        """
+        lower = (np.full(mean.shape, -np.inf)
+                 if lower_limit is None else lower_limit)
+        # In 2d, _mvn.mvnun accepts input in which `lower` bound elements
+        # are greater than `x`. Not so in other dimensions. Fix this by
+        # ensuring that lower bounds are indeed lower when passed, then
+        # set signs of resulting CDF manually.
+        b, a = np.broadcast_arrays(x, lower)
+        i_swap = b < a
+        signs = (-1)**(i_swap.sum(axis=-1))  # odd # of swaps -> negative
+        a, b = a.copy(), b.copy()
+        a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
+        n = x.shape[-1]
+        limits = np.concatenate((a, b), axis=-1)
+
+        # mvnun expects 1-d arguments, so process points sequentially
+        def func1d(limits):
+            return _mvn.mvnun(limits[:n], limits[n:], mean, cov,
+                              maxpts, abseps, releps)[0]
+
+        out = np.apply_along_axis(func1d, -1, limits) * signs
+        return _squeeze_output(out)
+
+    def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
+               abseps=1e-5, releps=1e-5, *, lower_limit=None):
+        """Log of the multivariate normal cumulative distribution function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_mvn_doc_default_callparams)s
+        maxpts : integer, optional
+            The maximum number of points to use for integration
+            (default `1000000*dim`)
+        abseps : float, optional
+            Absolute error tolerance (default 1e-5)
+        releps : float, optional
+            Relative error tolerance (default 1e-5)
+        lower_limit : array_like, optional
+            Lower limit of integration of the cumulative distribution function.
+            Default is negative infinity. Must be broadcastable with `x`.
+
+        Returns
+        -------
+        cdf : ndarray or scalar
+            Log of the cumulative distribution function evaluated at `x`
+
+        Notes
+        -----
+        %(_mvn_doc_callparams_note)s
+
+        .. versionadded:: 1.0.0
+
+        """
+        params = self._process_parameters(mean, cov, allow_singular)
+        dim, mean, cov_object = params
+        cov = cov_object.covariance
+        x = self._process_quantiles(x, dim)
+        if not maxpts:
+            maxpts = 1000000 * dim
+        cdf = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit)
+        # the log of a negative real is complex, and cdf can be negative
+        # if lower limit is greater than upper limit
+        cdf = cdf + 0j if np.any(cdf < 0) else cdf
+        out = np.log(cdf)
+        return out
+
+    def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None,
+            abseps=1e-5, releps=1e-5, *, lower_limit=None):
+        """Multivariate normal cumulative distribution function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_mvn_doc_default_callparams)s
+        maxpts : integer, optional
+            The maximum number of points to use for integration
+            (default `1000000*dim`)
+        abseps : float, optional
+            Absolute error tolerance (default 1e-5)
+        releps : float, optional
+            Relative error tolerance (default 1e-5)
+        lower_limit : array_like, optional
+            Lower limit of integration of the cumulative distribution function.
+            Default is negative infinity. Must be broadcastable with `x`.
+
+        Returns
+        -------
+        cdf : ndarray or scalar
+            Cumulative distribution function evaluated at `x`
+
+        Notes
+        -----
+        %(_mvn_doc_callparams_note)s
+
+        .. versionadded:: 1.0.0
+
+        """
+        params = self._process_parameters(mean, cov, allow_singular)
+        dim, mean, cov_object = params
+        cov = cov_object.covariance
+        x = self._process_quantiles(x, dim)
+        if not maxpts:
+            maxpts = 1000000 * dim
+        out = self._cdf(x, mean, cov, maxpts, abseps, releps, lower_limit)
+        return out
+
+    def rvs(self, mean=None, cov=1, size=1, random_state=None):
+        """Draw random samples from a multivariate normal distribution.
+
+        Parameters
+        ----------
+        %(_mvn_doc_default_callparams)s
+        size : integer, optional
+            Number of samples to draw (default 1).
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random variates of size (`size`, `N`), where `N` is the
+            dimension of the random variable.
+
+        Notes
+        -----
+        %(_mvn_doc_callparams_note)s
+
+        """
+        dim, mean, cov_object = self._process_parameters(mean, cov)
+        random_state = self._get_random_state(random_state)
+
+        if isinstance(cov_object, _covariance.CovViaPSD):
+            cov = cov_object.covariance
+            out = random_state.multivariate_normal(mean, cov, size)
+            out = _squeeze_output(out)
+        else:
+            size = size or tuple()
+            if not np.iterable(size):
+                size = (size,)
+            shape = tuple(size) + (cov_object.shape[-1],)
+            x = random_state.normal(size=shape)
+            out = mean + cov_object.colorize(x)
+        return out
+
+    def entropy(self, mean=None, cov=1):
+        """Compute the differential entropy of the multivariate normal.
+
+        Parameters
+        ----------
+        %(_mvn_doc_default_callparams)s
+
+        Returns
+        -------
+        h : scalar
+            Entropy of the multivariate normal distribution
+
+        Notes
+        -----
+        %(_mvn_doc_callparams_note)s
+
+        """
+        dim, mean, cov_object = self._process_parameters(mean, cov)
+        return 0.5 * (cov_object.rank * (_LOG_2PI + 1) + cov_object.log_pdet)
+
+
+multivariate_normal = multivariate_normal_gen()
+
+
+class multivariate_normal_frozen(multi_rv_frozen):
+    def __init__(self, mean=None, cov=1, allow_singular=False, seed=None,
+                 maxpts=None, abseps=1e-5, releps=1e-5):
+        """Create a frozen multivariate normal distribution.
+
+        Parameters
+        ----------
+        mean : array_like, default: ``[0]``
+            Mean of the distribution.
+        cov : array_like, default: ``[1]``
+            Symmetric positive (semi)definite covariance matrix of the
+            distribution.
+        allow_singular : bool, default: ``False``
+            Whether to allow a singular covariance matrix.
+        seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance
+            then that instance is used.
+        maxpts : integer, optional
+            The maximum number of points to use for integration of the
+            cumulative distribution function (default `1000000*dim`)
+        abseps : float, optional
+            Absolute error tolerance for the cumulative distribution function
+            (default 1e-5)
+        releps : float, optional
+            Relative error tolerance for the cumulative distribution function
+            (default 1e-5)
+
+        Examples
+        --------
+        When called with the default parameters, this will create a 1D random
+        variable with mean 0 and covariance 1:
+
+        >>> from scipy.stats import multivariate_normal
+        >>> r = multivariate_normal()
+        >>> r.mean
+        array([ 0.])
+        >>> r.cov
+        array([[1.]])
+
+        """
+        self._dist = multivariate_normal_gen(seed)
+        self.dim, self.mean, self.cov_object = (
+            self._dist._process_parameters(mean, cov, allow_singular))
+        self.allow_singular = allow_singular or self.cov_object._allow_singular
+        if not maxpts:
+            maxpts = 1000000 * self.dim
+        self.maxpts = maxpts
+        self.abseps = abseps
+        self.releps = releps
+
+    @property
+    def cov(self):
+        return self.cov_object.covariance
+
+    def logpdf(self, x):
+        x = self._dist._process_quantiles(x, self.dim)
+        out = self._dist._logpdf(x, self.mean, self.cov_object)
+        if np.any(self.cov_object.rank < self.dim):
+            out_of_bounds = ~self.cov_object._support_mask(x-self.mean)
+            out[out_of_bounds] = -np.inf
+        return _squeeze_output(out)
+
+    def pdf(self, x):
+        return np.exp(self.logpdf(x))
+
+    def logcdf(self, x, *, lower_limit=None):
+        cdf = self.cdf(x, lower_limit=lower_limit)
+        # the log of a negative real is complex, and cdf can be negative
+        # if lower limit is greater than upper limit
+        cdf = cdf + 0j if np.any(cdf < 0) else cdf
+        out = np.log(cdf)
+        return out
+
+    def cdf(self, x, *, lower_limit=None):
+        x = self._dist._process_quantiles(x, self.dim)
+        out = self._dist._cdf(x, self.mean, self.cov_object.covariance,
+                              self.maxpts, self.abseps, self.releps,
+                              lower_limit)
+        return _squeeze_output(out)
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.mean, self.cov_object, size, random_state)
+
+    def entropy(self):
+        """Computes the differential entropy of the multivariate normal.
+
+        Returns
+        -------
+        h : scalar
+            Entropy of the multivariate normal distribution
+
+        """
+        log_pdet = self.cov_object.log_pdet
+        rank = self.cov_object.rank
+        return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# multivariate_normal_gen and fill in default strings in class docstrings
+for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']:
+    method = multivariate_normal_gen.__dict__[name]
+    method_frozen = multivariate_normal_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(method.__doc__,
+                                             mvn_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
+
+_matnorm_doc_default_callparams = """\
+mean : array_like, optional
+    Mean of the distribution (default: `None`)
+rowcov : array_like, optional
+    Among-row covariance matrix of the distribution (default: `1`)
+colcov : array_like, optional
+    Among-column covariance matrix of the distribution (default: `1`)
+"""
+
+_matnorm_doc_callparams_note = """\
+If `mean` is set to `None` then a matrix of zeros is used for the mean.
+The dimensions of this matrix are inferred from the shape of `rowcov` and
+`colcov`, if these are provided, or set to `1` if ambiguous.
+
+`rowcov` and `colcov` can be two-dimensional array_likes specifying the
+covariance matrices directly. Alternatively, a one-dimensional array will
+be be interpreted as the entries of a diagonal matrix, and a scalar or
+zero-dimensional array will be interpreted as this value times the
+identity matrix.
+"""
+
+_matnorm_doc_frozen_callparams = ""
+
+_matnorm_doc_frozen_callparams_note = """\
+See class definition for a detailed description of parameters."""
+
+matnorm_docdict_params = {
+    '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
+    '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+matnorm_docdict_noparams = {
+    '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
+    '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+
+class matrix_normal_gen(multi_rv_generic):
+    r"""A matrix normal random variable.
+
+    The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
+    among-row covariance matrix. The 'colcov' keyword specifies the
+    among-column covariance matrix.
+
+    Methods
+    -------
+    pdf(X, mean=None, rowcov=1, colcov=1)
+        Probability density function.
+    logpdf(X, mean=None, rowcov=1, colcov=1)
+        Log of the probability density function.
+    rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)
+        Draw random samples.
+
+    Parameters
+    ----------
+    %(_matnorm_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Notes
+    -----
+    %(_matnorm_doc_callparams_note)s
+
+    The covariance matrices specified by `rowcov` and `colcov` must be
+    (symmetric) positive definite. If the samples in `X` are
+    :math:`m \times n`, then `rowcov` must be :math:`m \times m` and
+    `colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
+
+    The probability density function for `matrix_normal` is
+
+    .. math::
+
+        f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
+               \exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
+               (X-M)^T \right] \right),
+
+    where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
+    :math:`V` the among-column covariance matrix.
+
+    The `allow_singular` behaviour of the `multivariate_normal`
+    distribution is not currently supported. Covariance matrices must be
+    full rank.
+
+    The `matrix_normal` distribution is closely related to the
+    `multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
+    (the vector formed by concatenating the columns  of :math:`X`) has a
+    multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
+    and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
+    product). Sampling and pdf evaluation are
+    :math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
+    :math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
+    making this equivalent form algorithmically inefficient.
+
+    .. versionadded:: 0.17.0
+
+    Examples
+    --------
+
+    >>> import numpy as np
+    >>> from scipy.stats import matrix_normal
+
+    >>> M = np.arange(6).reshape(3,2); M
+    array([[0, 1],
+           [2, 3],
+           [4, 5]])
+    >>> U = np.diag([1,2,3]); U
+    array([[1, 0, 0],
+           [0, 2, 0],
+           [0, 0, 3]])
+    >>> V = 0.3*np.identity(2); V
+    array([[ 0.3,  0. ],
+           [ 0. ,  0.3]])
+    >>> X = M + 0.1; X
+    array([[ 0.1,  1.1],
+           [ 2.1,  3.1],
+           [ 4.1,  5.1]])
+    >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
+    0.023410202050005054
+
+    >>> # Equivalent multivariate normal
+    >>> from scipy.stats import multivariate_normal
+    >>> vectorised_X = X.T.flatten()
+    >>> equiv_mean = M.T.flatten()
+    >>> equiv_cov = np.kron(V,U)
+    >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
+    0.023410202050005054
+
+    Alternatively, the object may be called (as a function) to fix the mean
+    and covariance parameters, returning a "frozen" matrix normal
+    random variable:
+
+    >>> rv = matrix_normal(mean=None, rowcov=1, colcov=1)
+    >>> # Frozen object with the same methods but holding the given
+    >>> # mean and covariance fixed.
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
+
+    def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
+        """Create a frozen matrix normal distribution.
+
+        See `matrix_normal_frozen` for more information.
+
+        """
+        return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
+
+    def _process_parameters(self, mean, rowcov, colcov):
+        """
+        Infer dimensionality from mean or covariance matrices. Handle
+        defaults. Ensure compatible dimensions.
+        """
+
+        # Process mean
+        if mean is not None:
+            mean = np.asarray(mean, dtype=float)
+            meanshape = mean.shape
+            if len(meanshape) != 2:
+                raise ValueError("Array `mean` must be two dimensional.")
+            if np.any(meanshape == 0):
+                raise ValueError("Array `mean` has invalid shape.")
+
+        # Process among-row covariance
+        rowcov = np.asarray(rowcov, dtype=float)
+        if rowcov.ndim == 0:
+            if mean is not None:
+                rowcov = rowcov * np.identity(meanshape[0])
+            else:
+                rowcov = rowcov * np.identity(1)
+        elif rowcov.ndim == 1:
+            rowcov = np.diag(rowcov)
+        rowshape = rowcov.shape
+        if len(rowshape) != 2:
+            raise ValueError("`rowcov` must be a scalar or a 2D array.")
+        if rowshape[0] != rowshape[1]:
+            raise ValueError("Array `rowcov` must be square.")
+        if rowshape[0] == 0:
+            raise ValueError("Array `rowcov` has invalid shape.")
+        numrows = rowshape[0]
+
+        # Process among-column covariance
+        colcov = np.asarray(colcov, dtype=float)
+        if colcov.ndim == 0:
+            if mean is not None:
+                colcov = colcov * np.identity(meanshape[1])
+            else:
+                colcov = colcov * np.identity(1)
+        elif colcov.ndim == 1:
+            colcov = np.diag(colcov)
+        colshape = colcov.shape
+        if len(colshape) != 2:
+            raise ValueError("`colcov` must be a scalar or a 2D array.")
+        if colshape[0] != colshape[1]:
+            raise ValueError("Array `colcov` must be square.")
+        if colshape[0] == 0:
+            raise ValueError("Array `colcov` has invalid shape.")
+        numcols = colshape[0]
+
+        # Ensure mean and covariances compatible
+        if mean is not None:
+            if meanshape[0] != numrows:
+                raise ValueError("Arrays `mean` and `rowcov` must have the "
+                                 "same number of rows.")
+            if meanshape[1] != numcols:
+                raise ValueError("Arrays `mean` and `colcov` must have the "
+                                 "same number of columns.")
+        else:
+            mean = np.zeros((numrows, numcols))
+
+        dims = (numrows, numcols)
+
+        return dims, mean, rowcov, colcov
+
+    def _process_quantiles(self, X, dims):
+        """
+        Adjust quantiles array so that last two axes labels the components of
+        each data point.
+        """
+        X = np.asarray(X, dtype=float)
+        if X.ndim == 2:
+            X = X[np.newaxis, :]
+        if X.shape[-2:] != dims:
+            raise ValueError("The shape of array `X` is not compatible "
+                             "with the distribution parameters.")
+        return X
+
+    def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
+                col_prec_rt, log_det_colcov):
+        """Log of the matrix normal probability density function.
+
+        Parameters
+        ----------
+        dims : tuple
+            Dimensions of the matrix variates
+        X : ndarray
+            Points at which to evaluate the log of the probability
+            density function
+        mean : ndarray
+            Mean of the distribution
+        row_prec_rt : ndarray
+            A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
+            is the inverse of the among-row covariance matrix
+        log_det_rowcov : float
+            Logarithm of the determinant of the among-row covariance matrix
+        col_prec_rt : ndarray
+            A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
+            is the inverse of the among-column covariance matrix
+        log_det_colcov : float
+            Logarithm of the determinant of the among-column covariance matrix
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'logpdf' instead.
+
+        """
+        numrows, numcols = dims
+        roll_dev = np.moveaxis(X-mean, -1, 0)
+        scale_dev = np.tensordot(col_prec_rt.T,
+                                 np.dot(roll_dev, row_prec_rt), 1)
+        maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
+        return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+                       + numrows*log_det_colcov + maha)
+
+    def logpdf(self, X, mean=None, rowcov=1, colcov=1):
+        """Log of the matrix normal probability density function.
+
+        Parameters
+        ----------
+        X : array_like
+            Quantiles, with the last two axes of `X` denoting the components.
+        %(_matnorm_doc_default_callparams)s
+
+        Returns
+        -------
+        logpdf : ndarray
+            Log of the probability density function evaluated at `X`
+
+        Notes
+        -----
+        %(_matnorm_doc_callparams_note)s
+
+        """
+        dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
+                                                              colcov)
+        X = self._process_quantiles(X, dims)
+        rowpsd = _PSD(rowcov, allow_singular=False)
+        colpsd = _PSD(colcov, allow_singular=False)
+        out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
+                           colpsd.log_pdet)
+        return _squeeze_output(out)
+
+    def pdf(self, X, mean=None, rowcov=1, colcov=1):
+        """Matrix normal probability density function.
+
+        Parameters
+        ----------
+        X : array_like
+            Quantiles, with the last two axes of `X` denoting the components.
+        %(_matnorm_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray
+            Probability density function evaluated at `X`
+
+        Notes
+        -----
+        %(_matnorm_doc_callparams_note)s
+
+        """
+        return np.exp(self.logpdf(X, mean, rowcov, colcov))
+
+    def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
+        """Draw random samples from a matrix normal distribution.
+
+        Parameters
+        ----------
+        %(_matnorm_doc_default_callparams)s
+        size : integer, optional
+            Number of samples to draw (default 1).
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random variates of size (`size`, `dims`), where `dims` is the
+            dimension of the random matrices.
+
+        Notes
+        -----
+        %(_matnorm_doc_callparams_note)s
+
+        """
+        size = int(size)
+        dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
+                                                              colcov)
+        rowchol = scipy.linalg.cholesky(rowcov, lower=True)
+        colchol = scipy.linalg.cholesky(colcov, lower=True)
+        random_state = self._get_random_state(random_state)
+        # We aren't generating standard normal variates with size=(size,
+        # dims[0], dims[1]) directly to ensure random variates remain backwards
+        # compatible. See https://github.com/scipy/scipy/pull/12312 for more
+        # details.
+        std_norm = random_state.standard_normal(
+            size=(dims[1], size, dims[0])
+        ).transpose(1, 2, 0)
+        out = mean + np.einsum('jp,ipq,kq->ijk',
+                               rowchol, std_norm, colchol,
+                               optimize=True)
+        if size == 1:
+            out = out.reshape(mean.shape)
+        return out
+
+
+matrix_normal = matrix_normal_gen()
+
+
+class matrix_normal_frozen(multi_rv_frozen):
+    """
+    Create a frozen matrix normal distribution.
+
+    Parameters
+    ----------
+    %(_matnorm_doc_default_callparams)s
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is `None` the `~np.random.RandomState` singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used, seeded
+        with seed.
+        If `seed` is already a ``RandomState`` or ``Generator`` instance,
+        then that object is used.
+        Default is `None`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import matrix_normal
+
+    >>> distn = matrix_normal(mean=np.zeros((3,3)))
+    >>> X = distn.rvs(); X
+    array([[-0.02976962,  0.93339138, -0.09663178],
+           [ 0.67405524,  0.28250467, -0.93308929],
+           [-0.31144782,  0.74535536,  1.30412916]])
+    >>> distn.pdf(X)
+    2.5160642368346784e-05
+    >>> distn.logpdf(X)
+    -10.590229595124615
+    """
+
+    def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
+        self._dist = matrix_normal_gen(seed)
+        self.dims, self.mean, self.rowcov, self.colcov = \
+            self._dist._process_parameters(mean, rowcov, colcov)
+        self.rowpsd = _PSD(self.rowcov, allow_singular=False)
+        self.colpsd = _PSD(self.colcov, allow_singular=False)
+
+    def logpdf(self, X):
+        X = self._dist._process_quantiles(X, self.dims)
+        out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
+                                 self.rowpsd.log_pdet, self.colpsd.U,
+                                 self.colpsd.log_pdet)
+        return _squeeze_output(out)
+
+    def pdf(self, X):
+        return np.exp(self.logpdf(X))
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
+                              random_state)
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# matrix_normal_gen and fill in default strings in class docstrings
+for name in ['logpdf', 'pdf', 'rvs']:
+    method = matrix_normal_gen.__dict__[name]
+    method_frozen = matrix_normal_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(method.__doc__,
+                                             matnorm_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
+
+_dirichlet_doc_default_callparams = """\
+alpha : array_like
+    The concentration parameters. The number of entries determines the
+    dimensionality of the distribution.
+"""
+_dirichlet_doc_frozen_callparams = ""
+
+_dirichlet_doc_frozen_callparams_note = """\
+See class definition for a detailed description of parameters."""
+
+dirichlet_docdict_params = {
+    '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
+    '_doc_random_state': _doc_random_state
+}
+
+dirichlet_docdict_noparams = {
+    '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
+    '_doc_random_state': _doc_random_state
+}
+
+
+def _dirichlet_check_parameters(alpha):
+    alpha = np.asarray(alpha)
+    if np.min(alpha) <= 0:
+        raise ValueError("All parameters must be greater than 0")
+    elif alpha.ndim != 1:
+        raise ValueError("Parameter vector 'a' must be one dimensional, "
+                         "but a.shape = %s." % (alpha.shape, ))
+    return alpha
+
+
+def _dirichlet_check_input(alpha, x):
+    x = np.asarray(x)
+
+    if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
+        raise ValueError("Vector 'x' must have either the same number "
+                         "of entries as, or one entry fewer than, "
+                         "parameter vector 'a', but alpha.shape = %s "
+                         "and x.shape = %s." % (alpha.shape, x.shape))
+
+    if x.shape[0] != alpha.shape[0]:
+        xk = np.array([1 - np.sum(x, 0)])
+        if xk.ndim == 1:
+            x = np.append(x, xk)
+        elif xk.ndim == 2:
+            x = np.vstack((x, xk))
+        else:
+            raise ValueError("The input must be one dimensional or a two "
+                             "dimensional matrix containing the entries.")
+
+    if np.min(x) < 0:
+        raise ValueError("Each entry in 'x' must be greater than or equal "
+                         "to zero.")
+
+    if np.max(x) > 1:
+        raise ValueError("Each entry in 'x' must be smaller or equal one.")
+
+    # Check x_i > 0 or alpha_i > 1
+    xeq0 = (x == 0)
+    alphalt1 = (alpha < 1)
+    if x.shape != alpha.shape:
+        alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape)
+    chk = np.logical_and(xeq0, alphalt1)
+
+    if np.sum(chk):
+        raise ValueError("Each entry in 'x' must be greater than zero if its "
+                         "alpha is less than one.")
+
+    if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
+        raise ValueError("The input vector 'x' must lie within the normal "
+                         "simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
+
+    return x
+
+
+def _lnB(alpha):
+    r"""Internal helper function to compute the log of the useful quotient.
+
+    .. math::
+
+        B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}
+                         {\Gamma\left(\sum_{i=1}^{K} \alpha_i \right)}
+
+    Parameters
+    ----------
+    %(_dirichlet_doc_default_callparams)s
+
+    Returns
+    -------
+    B : scalar
+        Helper quotient, internal use only
+
+    """
+    return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
+
+
+class dirichlet_gen(multi_rv_generic):
+    r"""A Dirichlet random variable.
+
+    The ``alpha`` keyword specifies the concentration parameters of the
+    distribution.
+
+    .. versionadded:: 0.15.0
+
+    Methods
+    -------
+    pdf(x, alpha)
+        Probability density function.
+    logpdf(x, alpha)
+        Log of the probability density function.
+    rvs(alpha, size=1, random_state=None)
+        Draw random samples from a Dirichlet distribution.
+    mean(alpha)
+        The mean of the Dirichlet distribution
+    var(alpha)
+        The variance of the Dirichlet distribution
+    entropy(alpha)
+        Compute the differential entropy of the Dirichlet distribution.
+
+    Parameters
+    ----------
+    %(_dirichlet_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Notes
+    -----
+    Each :math:`\alpha` entry must be positive. The distribution has only
+    support on the simplex defined by
+
+    .. math::
+        \sum_{i=1}^{K} x_i = 1
+
+    where :math:`0 < x_i < 1`.
+
+    If the quantiles don't lie within the simplex, a ValueError is raised.
+
+    The probability density function for `dirichlet` is
+
+    .. math::
+
+        f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
+
+    where
+
+    .. math::
+
+        \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
+                                     {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
+
+    and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
+    concentration parameters and :math:`K` is the dimension of the space
+    where :math:`x` takes values.
+
+    Note that the `dirichlet` interface is somewhat inconsistent.
+    The array returned by the rvs function is transposed
+    with respect to the format expected by the pdf and logpdf.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import dirichlet
+
+    Generate a dirichlet random variable
+
+    >>> quantiles = np.array([0.2, 0.2, 0.6])  # specify quantiles
+    >>> alpha = np.array([0.4, 5, 15])  # specify concentration parameters
+    >>> dirichlet.pdf(quantiles, alpha)
+    0.2843831684937255
+
+    The same PDF but following a log scale
+
+    >>> dirichlet.logpdf(quantiles, alpha)
+    -1.2574327653159187
+
+    Once we specify the dirichlet distribution
+    we can then calculate quantities of interest
+
+    >>> dirichlet.mean(alpha)  # get the mean of the distribution
+    array([0.01960784, 0.24509804, 0.73529412])
+    >>> dirichlet.var(alpha) # get variance
+    array([0.00089829, 0.00864603, 0.00909517])
+    >>> dirichlet.entropy(alpha)  # calculate the differential entropy
+    -4.3280162474082715
+
+    We can also return random samples from the distribution
+
+    >>> dirichlet.rvs(alpha, size=1, random_state=1)
+    array([[0.00766178, 0.24670518, 0.74563305]])
+    >>> dirichlet.rvs(alpha, size=2, random_state=2)
+    array([[0.01639427, 0.1292273 , 0.85437844],
+           [0.00156917, 0.19033695, 0.80809388]])
+
+    Alternatively, the object may be called (as a function) to fix
+    concentration parameters, returning a "frozen" Dirichlet
+    random variable:
+
+    >>> rv = dirichlet(alpha)
+    >>> # Frozen object with the same methods but holding the given
+    >>> # concentration parameters fixed.
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
+
+    def __call__(self, alpha, seed=None):
+        return dirichlet_frozen(alpha, seed=seed)
+
+    def _logpdf(self, x, alpha):
+        """Log of the Dirichlet probability density function.
+
+        Parameters
+        ----------
+        x : ndarray
+            Points at which to evaluate the log of the probability
+            density function
+        %(_dirichlet_doc_default_callparams)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'logpdf' instead.
+
+        """
+        lnB = _lnB(alpha)
+        return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0)
+
+    def logpdf(self, x, alpha):
+        """Log of the Dirichlet probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_dirichlet_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray or scalar
+            Log of the probability density function evaluated at `x`.
+
+        """
+        alpha = _dirichlet_check_parameters(alpha)
+        x = _dirichlet_check_input(alpha, x)
+
+        out = self._logpdf(x, alpha)
+        return _squeeze_output(out)
+
+    def pdf(self, x, alpha):
+        """The Dirichlet probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_dirichlet_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray or scalar
+            The probability density function evaluated at `x`.
+
+        """
+        alpha = _dirichlet_check_parameters(alpha)
+        x = _dirichlet_check_input(alpha, x)
+
+        out = np.exp(self._logpdf(x, alpha))
+        return _squeeze_output(out)
+
+    def mean(self, alpha):
+        """Mean of the Dirichlet distribution.
+
+        Parameters
+        ----------
+        %(_dirichlet_doc_default_callparams)s
+
+        Returns
+        -------
+        mu : ndarray or scalar
+            Mean of the Dirichlet distribution.
+
+        """
+        alpha = _dirichlet_check_parameters(alpha)
+
+        out = alpha / (np.sum(alpha))
+        return _squeeze_output(out)
+
+    def var(self, alpha):
+        """Variance of the Dirichlet distribution.
+
+        Parameters
+        ----------
+        %(_dirichlet_doc_default_callparams)s
+
+        Returns
+        -------
+        v : ndarray or scalar
+            Variance of the Dirichlet distribution.
+
+        """
+
+        alpha = _dirichlet_check_parameters(alpha)
+
+        alpha0 = np.sum(alpha)
+        out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
+        return _squeeze_output(out)
+
+    def entropy(self, alpha):
+        """
+        Differential entropy of the Dirichlet distribution.
+
+        Parameters
+        ----------
+        %(_dirichlet_doc_default_callparams)s
+
+        Returns
+        -------
+        h : scalar
+            Entropy of the Dirichlet distribution
+
+        """
+
+        alpha = _dirichlet_check_parameters(alpha)
+
+        alpha0 = np.sum(alpha)
+        lnB = _lnB(alpha)
+        K = alpha.shape[0]
+
+        out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
+            (alpha - 1) * scipy.special.psi(alpha))
+        return _squeeze_output(out)
+
+    def rvs(self, alpha, size=1, random_state=None):
+        """
+        Draw random samples from a Dirichlet distribution.
+
+        Parameters
+        ----------
+        %(_dirichlet_doc_default_callparams)s
+        size : int, optional
+            Number of samples to draw (default 1).
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random variates of size (`size`, `N`), where `N` is the
+            dimension of the random variable.
+
+        """
+        alpha = _dirichlet_check_parameters(alpha)
+        random_state = self._get_random_state(random_state)
+        return random_state.dirichlet(alpha, size=size)
+
+
+dirichlet = dirichlet_gen()
+
+
+class dirichlet_frozen(multi_rv_frozen):
+    def __init__(self, alpha, seed=None):
+        self.alpha = _dirichlet_check_parameters(alpha)
+        self._dist = dirichlet_gen(seed)
+
+    def logpdf(self, x):
+        return self._dist.logpdf(x, self.alpha)
+
+    def pdf(self, x):
+        return self._dist.pdf(x, self.alpha)
+
+    def mean(self):
+        return self._dist.mean(self.alpha)
+
+    def var(self):
+        return self._dist.var(self.alpha)
+
+    def entropy(self):
+        return self._dist.entropy(self.alpha)
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.alpha, size, random_state)
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# multivariate_normal_gen and fill in default strings in class docstrings
+for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
+    method = dirichlet_gen.__dict__[name]
+    method_frozen = dirichlet_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(
+        method.__doc__, dirichlet_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
+
+
+_wishart_doc_default_callparams = """\
+df : int
+    Degrees of freedom, must be greater than or equal to dimension of the
+    scale matrix
+scale : array_like
+    Symmetric positive definite scale matrix of the distribution
+"""
+
+_wishart_doc_callparams_note = ""
+
+_wishart_doc_frozen_callparams = ""
+
+_wishart_doc_frozen_callparams_note = """\
+See class definition for a detailed description of parameters."""
+
+wishart_docdict_params = {
+    '_doc_default_callparams': _wishart_doc_default_callparams,
+    '_doc_callparams_note': _wishart_doc_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+wishart_docdict_noparams = {
+    '_doc_default_callparams': _wishart_doc_frozen_callparams,
+    '_doc_callparams_note': _wishart_doc_frozen_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+
+class wishart_gen(multi_rv_generic):
+    r"""A Wishart random variable.
+
+    The `df` keyword specifies the degrees of freedom. The `scale` keyword
+    specifies the scale matrix, which must be symmetric and positive definite.
+    In this context, the scale matrix is often interpreted in terms of a
+    multivariate normal precision matrix (the inverse of the covariance
+    matrix). These arguments must satisfy the relationship
+    ``df > scale.ndim - 1``, but see notes on using the `rvs` method with
+    ``df < scale.ndim``.
+
+    Methods
+    -------
+    pdf(x, df, scale)
+        Probability density function.
+    logpdf(x, df, scale)
+        Log of the probability density function.
+    rvs(df, scale, size=1, random_state=None)
+        Draw random samples from a Wishart distribution.
+    entropy()
+        Compute the differential entropy of the Wishart distribution.
+
+    Parameters
+    ----------
+    %(_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Raises
+    ------
+    scipy.linalg.LinAlgError
+        If the scale matrix `scale` is not positive definite.
+
+    See Also
+    --------
+    invwishart, chi2
+
+    Notes
+    -----
+    %(_doc_callparams_note)s
+
+    The scale matrix `scale` must be a symmetric positive definite
+    matrix. Singular matrices, including the symmetric positive semi-definite
+    case, are not supported. Symmetry is not checked; only the lower triangular
+    portion is used.
+
+    The Wishart distribution is often denoted
+
+    .. math::
+
+        W_p(\nu, \Sigma)
+
+    where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
+    :math:`p \times p` scale matrix.
+
+    The probability density function for `wishart` has support over positive
+    definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
+    its PDF is given by:
+
+    .. math::
+
+        f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
+               |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
+               \exp\left( -tr(\Sigma^{-1} S) / 2 \right)
+
+    If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
+    :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
+
+    If the scale matrix is 1-dimensional and equal to one, then the Wishart
+    distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
+    distribution.
+
+    The algorithm [2]_ implemented by the `rvs` method may
+    produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
+    user may wish to check for this condition and generate replacement samples
+    as necessary.
+
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
+           Wiley, 1983.
+    .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
+           Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.stats import wishart, chi2
+    >>> x = np.linspace(1e-5, 8, 100)
+    >>> w = wishart.pdf(x, df=3, scale=1); w[:5]
+    array([ 0.00126156,  0.10892176,  0.14793434,  0.17400548,  0.1929669 ])
+    >>> c = chi2.pdf(x, 3); c[:5]
+    array([ 0.00126156,  0.10892176,  0.14793434,  0.17400548,  0.1929669 ])
+    >>> plt.plot(x, w)
+    >>> plt.show()
+
+    The input quantiles can be any shape of array, as long as the last
+    axis labels the components.
+
+    Alternatively, the object may be called (as a function) to fix the degrees
+    of freedom and scale parameters, returning a "frozen" Wishart random
+    variable:
+
+    >>> rv = wishart(df=1, scale=1)
+    >>> # Frozen object with the same methods but holding the given
+    >>> # degrees of freedom and scale fixed.
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
+
+    def __call__(self, df=None, scale=None, seed=None):
+        """Create a frozen Wishart distribution.
+
+        See `wishart_frozen` for more information.
+        """
+        return wishart_frozen(df, scale, seed)
+
+    def _process_parameters(self, df, scale):
+        if scale is None:
+            scale = 1.0
+        scale = np.asarray(scale, dtype=float)
+
+        if scale.ndim == 0:
+            scale = scale[np.newaxis, np.newaxis]
+        elif scale.ndim == 1:
+            scale = np.diag(scale)
+        elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
+            raise ValueError("Array 'scale' must be square if it is two"
+                             " dimensional, but scale.scale = %s."
+                             % str(scale.shape))
+        elif scale.ndim > 2:
+            raise ValueError("Array 'scale' must be at most two-dimensional,"
+                             " but scale.ndim = %d" % scale.ndim)
+
+        dim = scale.shape[0]
+
+        if df is None:
+            df = dim
+        elif not np.isscalar(df):
+            raise ValueError("Degrees of freedom must be a scalar.")
+        elif df <= dim - 1:
+            raise ValueError("Degrees of freedom must be greater than the "
+                             "dimension of scale matrix minus 1.")
+
+        return dim, df, scale
+
+    def _process_quantiles(self, x, dim):
+        """
+        Adjust quantiles array so that last axis labels the components of
+        each data point.
+        """
+        x = np.asarray(x, dtype=float)
+
+        if x.ndim == 0:
+            x = x * np.eye(dim)[:, :, np.newaxis]
+        if x.ndim == 1:
+            if dim == 1:
+                x = x[np.newaxis, np.newaxis, :]
+            else:
+                x = np.diag(x)[:, :, np.newaxis]
+        elif x.ndim == 2:
+            if not x.shape[0] == x.shape[1]:
+                raise ValueError("Quantiles must be square if they are two"
+                                 " dimensional, but x.shape = %s."
+                                 % str(x.shape))
+            x = x[:, :, np.newaxis]
+        elif x.ndim == 3:
+            if not x.shape[0] == x.shape[1]:
+                raise ValueError("Quantiles must be square in the first two"
+                                 " dimensions if they are three dimensional"
+                                 ", but x.shape = %s." % str(x.shape))
+        elif x.ndim > 3:
+            raise ValueError("Quantiles must be at most two-dimensional with"
+                             " an additional dimension for multiple"
+                             "components, but x.ndim = %d" % x.ndim)
+
+        # Now we have 3-dim array; should have shape [dim, dim, *]
+        if not x.shape[0:2] == (dim, dim):
+            raise ValueError('Quantiles have incompatible dimensions: should'
+                             ' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
+
+        return x
+
+    def _process_size(self, size):
+        size = np.asarray(size)
+
+        if size.ndim == 0:
+            size = size[np.newaxis]
+        elif size.ndim > 1:
+            raise ValueError('Size must be an integer or tuple of integers;'
+                             ' thus must have dimension <= 1.'
+                             ' Got size.ndim = %s' % str(tuple(size)))
+        n = size.prod()
+        shape = tuple(size)
+
+        return n, shape
+
+    def _logpdf(self, x, dim, df, scale, log_det_scale, C):
+        """Log of the Wishart probability density function.
+
+        Parameters
+        ----------
+        x : ndarray
+            Points at which to evaluate the log of the probability
+            density function
+        dim : int
+            Dimension of the scale matrix
+        df : int
+            Degrees of freedom
+        scale : ndarray
+            Scale matrix
+        log_det_scale : float
+            Logarithm of the determinant of the scale matrix
+        C : ndarray
+            Cholesky factorization of the scale matrix, lower triagular.
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'logpdf' instead.
+
+        """
+        # log determinant of x
+        # Note: x has components along the last axis, so that x.T has
+        # components alone the 0-th axis. Then since det(A) = det(A'), this
+        # gives us a 1-dim vector of determinants
+
+        # Retrieve tr(scale^{-1} x)
+        log_det_x = np.empty(x.shape[-1])
+        scale_inv_x = np.empty(x.shape)
+        tr_scale_inv_x = np.empty(x.shape[-1])
+        for i in range(x.shape[-1]):
+            _, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
+            scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
+            tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
+
+        # Log PDF
+        out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
+               (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
+                multigammaln(0.5*df, dim)))
+
+        return out
+
+    def logpdf(self, x, df, scale):
+        """Log of the Wishart probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+            Each quantile must be a symmetric positive definite matrix.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray
+            Log of the probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        x = self._process_quantiles(x, dim)
+
+        # Cholesky decomposition of scale, get log(det(scale))
+        C, log_det_scale = self._cholesky_logdet(scale)
+
+        out = self._logpdf(x, dim, df, scale, log_det_scale, C)
+        return _squeeze_output(out)
+
+    def pdf(self, x, df, scale):
+        """Wishart probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+            Each quantile must be a symmetric positive definite matrix.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray
+            Probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        """
+        return np.exp(self.logpdf(x, df, scale))
+
+    def _mean(self, dim, df, scale):
+        """Mean of the Wishart distribution.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of the scale matrix
+        %(_doc_default_callparams)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'mean' instead.
+
+        """
+        return df * scale
+
+    def mean(self, df, scale):
+        """Mean of the Wishart distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        mean : float
+            The mean of the distribution
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        out = self._mean(dim, df, scale)
+        return _squeeze_output(out)
+
+    def _mode(self, dim, df, scale):
+        """Mode of the Wishart distribution.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of the scale matrix
+        %(_doc_default_callparams)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'mode' instead.
+
+        """
+        if df >= dim + 1:
+            out = (df-dim-1) * scale
+        else:
+            out = None
+        return out
+
+    def mode(self, df, scale):
+        """Mode of the Wishart distribution
+
+        Only valid if the degrees of freedom are greater than the dimension of
+        the scale matrix.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        mode : float or None
+            The Mode of the distribution
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        out = self._mode(dim, df, scale)
+        return _squeeze_output(out) if out is not None else out
+
+    def _var(self, dim, df, scale):
+        """Variance of the Wishart distribution.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of the scale matrix
+        %(_doc_default_callparams)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'var' instead.
+
+        """
+        var = scale**2
+        diag = scale.diagonal()  # 1 x dim array
+        var += np.outer(diag, diag)
+        var *= df
+        return var
+
+    def var(self, df, scale):
+        """Variance of the Wishart distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        var : float
+            The variance of the distribution
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        out = self._var(dim, df, scale)
+        return _squeeze_output(out)
+
+    def _standard_rvs(self, n, shape, dim, df, random_state):
+        """
+        Parameters
+        ----------
+        n : integer
+            Number of variates to generate
+        shape : iterable
+            Shape of the variates to generate
+        dim : int
+            Dimension of the scale matrix
+        df : int
+            Degrees of freedom
+        random_state : {None, int, `numpy.random.Generator`,
+                        `numpy.random.RandomState`}, optional
+
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance
+            then that instance is used.
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'rvs' instead.
+
+        """
+        # Random normal variates for off-diagonal elements
+        n_tril = dim * (dim-1) // 2
+        covariances = random_state.normal(
+            size=n*n_tril).reshape(shape+(n_tril,))
+
+        # Random chi-square variates for diagonal elements
+        variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
+                            for i in range(dim)]].reshape((dim,) +
+                                                          shape[::-1]).T)
+
+        # Create the A matri(ces) - lower triangular
+        A = np.zeros(shape + (dim, dim))
+
+        # Input the covariances
+        size_idx = tuple([slice(None, None, None)]*len(shape))
+        tril_idx = np.tril_indices(dim, k=-1)
+        A[size_idx + tril_idx] = covariances
+
+        # Input the variances
+        diag_idx = np.diag_indices(dim)
+        A[size_idx + diag_idx] = variances
+
+        return A
+
+    def _rvs(self, n, shape, dim, df, C, random_state):
+        """Draw random samples from a Wishart distribution.
+
+        Parameters
+        ----------
+        n : integer
+            Number of variates to generate
+        shape : iterable
+            Shape of the variates to generate
+        dim : int
+            Dimension of the scale matrix
+        df : int
+            Degrees of freedom
+        C : ndarray
+            Cholesky factorization of the scale matrix, lower triangular.
+        %(_doc_random_state)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'rvs' instead.
+
+        """
+        random_state = self._get_random_state(random_state)
+        # Calculate the matrices A, which are actually lower triangular
+        # Cholesky factorizations of a matrix B such that B ~ W(df, I)
+        A = self._standard_rvs(n, shape, dim, df, random_state)
+
+        # Calculate SA = C A A' C', where SA ~ W(df, scale)
+        # Note: this is the product of a (lower) (lower) (lower)' (lower)'
+        #       or, denoting B = AA', it is C B C' where C is the lower
+        #       triangular Cholesky factorization of the scale matrix.
+        #       this appears to conflict with the instructions in [1]_, which
+        #       suggest that it should be D' B D where D is the lower
+        #       triangular factorization of the scale matrix. However, it is
+        #       meant to refer to the Bartlett (1933) representation of a
+        #       Wishart random variate as L A A' L' where L is lower triangular
+        #       so it appears that understanding D' to be upper triangular
+        #       is either a typo in or misreading of [1]_.
+        for index in np.ndindex(shape):
+            CA = np.dot(C, A[index])
+            A[index] = np.dot(CA, CA.T)
+
+        return A
+
+    def rvs(self, df, scale, size=1, random_state=None):
+        """Draw random samples from a Wishart distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+        size : integer or iterable of integers, optional
+            Number of samples to draw (default 1).
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray
+            Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
+            the dimension of the scale matrix.
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        """
+        n, shape = self._process_size(size)
+        dim, df, scale = self._process_parameters(df, scale)
+
+        # Cholesky decomposition of scale
+        C = scipy.linalg.cholesky(scale, lower=True)
+
+        out = self._rvs(n, shape, dim, df, C, random_state)
+
+        return _squeeze_output(out)
+
+    def _entropy(self, dim, df, log_det_scale):
+        """Compute the differential entropy of the Wishart.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of the scale matrix
+        df : int
+            Degrees of freedom
+        log_det_scale : float
+            Logarithm of the determinant of the scale matrix
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'entropy' instead.
+
+        """
+        return (
+            0.5 * (dim+1) * log_det_scale +
+            0.5 * dim * (dim+1) * _LOG_2 +
+            multigammaln(0.5*df, dim) -
+            0.5 * (df - dim - 1) * np.sum(
+                [psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
+            ) +
+            0.5 * df * dim
+        )
+
+    def entropy(self, df, scale):
+        """Compute the differential entropy of the Wishart.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        h : scalar
+            Entropy of the Wishart distribution
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        _, log_det_scale = self._cholesky_logdet(scale)
+        return self._entropy(dim, df, log_det_scale)
+
+    def _cholesky_logdet(self, scale):
+        """Compute Cholesky decomposition and determine (log(det(scale)).
+
+        Parameters
+        ----------
+        scale : ndarray
+            Scale matrix.
+
+        Returns
+        -------
+        c_decomp : ndarray
+            The Cholesky decomposition of `scale`.
+        logdet : scalar
+            The log of the determinant of `scale`.
+
+        Notes
+        -----
+        This computation of ``logdet`` is equivalent to
+        ``np.linalg.slogdet(scale)``.  It is ~2x faster though.
+
+        """
+        c_decomp = scipy.linalg.cholesky(scale, lower=True)
+        logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
+        return c_decomp, logdet
+
+
+wishart = wishart_gen()
+
+
+class wishart_frozen(multi_rv_frozen):
+    """Create a frozen Wishart distribution.
+
+    Parameters
+    ----------
+    df : array_like
+        Degrees of freedom of the distribution
+    scale : array_like
+        Scale matrix of the distribution
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    """
+    def __init__(self, df, scale, seed=None):
+        self._dist = wishart_gen(seed)
+        self.dim, self.df, self.scale = self._dist._process_parameters(
+            df, scale)
+        self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
+
+    def logpdf(self, x):
+        x = self._dist._process_quantiles(x, self.dim)
+
+        out = self._dist._logpdf(x, self.dim, self.df, self.scale,
+                                 self.log_det_scale, self.C)
+        return _squeeze_output(out)
+
+    def pdf(self, x):
+        return np.exp(self.logpdf(x))
+
+    def mean(self):
+        out = self._dist._mean(self.dim, self.df, self.scale)
+        return _squeeze_output(out)
+
+    def mode(self):
+        out = self._dist._mode(self.dim, self.df, self.scale)
+        return _squeeze_output(out) if out is not None else out
+
+    def var(self):
+        out = self._dist._var(self.dim, self.df, self.scale)
+        return _squeeze_output(out)
+
+    def rvs(self, size=1, random_state=None):
+        n, shape = self._dist._process_size(size)
+        out = self._dist._rvs(n, shape, self.dim, self.df,
+                              self.C, random_state)
+        return _squeeze_output(out)
+
+    def entropy(self):
+        return self._dist._entropy(self.dim, self.df, self.log_det_scale)
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# Wishart and fill in default strings in class docstrings
+for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
+    method = wishart_gen.__dict__[name]
+    method_frozen = wishart_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(
+        method.__doc__, wishart_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
+
+
+def _cho_inv_batch(a, check_finite=True):
+    """
+    Invert the matrices a_i, using a Cholesky factorization of A, where
+    a_i resides in the last two dimensions of a and the other indices describe
+    the index i.
+
+    Overwrites the data in a.
+
+    Parameters
+    ----------
+    a : array
+        Array of matrices to invert, where the matrices themselves are stored
+        in the last two dimensions.
+    check_finite : bool, optional
+        Whether to check that the input matrices contain only finite numbers.
+        Disabling may give a performance gain, but may result in problems
+        (crashes, non-termination) if the inputs do contain infinities or NaNs.
+
+    Returns
+    -------
+    x : array
+        Array of inverses of the matrices ``a_i``.
+
+    See Also
+    --------
+    scipy.linalg.cholesky : Cholesky factorization of a matrix
+
+    """
+    if check_finite:
+        a1 = asarray_chkfinite(a)
+    else:
+        a1 = asarray(a)
+    if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
+        raise ValueError('expected square matrix in last two dimensions')
+
+    potrf, potri = get_lapack_funcs(('potrf', 'potri'), (a1,))
+
+    triu_rows, triu_cols = np.triu_indices(a.shape[-2], k=1)
+    for index in np.ndindex(a1.shape[:-2]):
+
+        # Cholesky decomposition
+        a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
+                                clean=False)
+        if info > 0:
+            raise LinAlgError("%d-th leading minor not positive definite"
+                              % info)
+        if info < 0:
+            raise ValueError('illegal value in %d-th argument of internal'
+                             ' potrf' % -info)
+        # Inversion
+        a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
+        if info > 0:
+            raise LinAlgError("the inverse could not be computed")
+        if info < 0:
+            raise ValueError('illegal value in %d-th argument of internal'
+                             ' potrf' % -info)
+
+        # Make symmetric (dpotri only fills in the lower triangle)
+        a1[index][triu_rows, triu_cols] = a1[index][triu_cols, triu_rows]
+
+    return a1
+
+
+class invwishart_gen(wishart_gen):
+    r"""An inverse Wishart random variable.
+
+    The `df` keyword specifies the degrees of freedom. The `scale` keyword
+    specifies the scale matrix, which must be symmetric and positive definite.
+    In this context, the scale matrix is often interpreted in terms of a
+    multivariate normal covariance matrix.
+
+    Methods
+    -------
+    pdf(x, df, scale)
+        Probability density function.
+    logpdf(x, df, scale)
+        Log of the probability density function.
+    rvs(df, scale, size=1, random_state=None)
+        Draw random samples from an inverse Wishart distribution.
+
+    Parameters
+    ----------
+    %(_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Raises
+    ------
+    scipy.linalg.LinAlgError
+        If the scale matrix `scale` is not positive definite.
+
+    See Also
+    --------
+    wishart
+
+    Notes
+    -----
+    %(_doc_callparams_note)s
+
+    The scale matrix `scale` must be a symmetric positive definite
+    matrix. Singular matrices, including the symmetric positive semi-definite
+    case, are not supported. Symmetry is not checked; only the lower triangular
+    portion is used.
+
+    The inverse Wishart distribution is often denoted
+
+    .. math::
+
+        W_p^{-1}(\nu, \Psi)
+
+    where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
+    :math:`p \times p` scale matrix.
+
+    The probability density function for `invwishart` has support over positive
+    definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
+    then its PDF is given by:
+
+    .. math::
+
+        f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
+               |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
+               \exp\left( -tr(\Sigma S^{-1}) / 2 \right)
+
+    If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
+    :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
+
+    If the scale matrix is 1-dimensional and equal to one, then the inverse
+    Wishart distribution :math:`W_1(\nu, 1)` collapses to the
+    inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
+    and scale = :math:`\frac{1}{2}`.
+
+    .. versionadded:: 0.16.0
+
+    References
+    ----------
+    .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
+           Wiley, 1983.
+    .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications
+           in Statistics - Simulation and Computation, vol. 14.2, pp.511-514,
+           1985.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.stats import invwishart, invgamma
+    >>> x = np.linspace(0.01, 1, 100)
+    >>> iw = invwishart.pdf(x, df=6, scale=1)
+    >>> iw[:3]
+    array([  1.20546865e-15,   5.42497807e-06,   4.45813929e-03])
+    >>> ig = invgamma.pdf(x, 6/2., scale=1./2)
+    >>> ig[:3]
+    array([  1.20546865e-15,   5.42497807e-06,   4.45813929e-03])
+    >>> plt.plot(x, iw)
+    >>> plt.show()
+
+    The input quantiles can be any shape of array, as long as the last
+    axis labels the components.
+
+    Alternatively, the object may be called (as a function) to fix the degrees
+    of freedom and scale parameters, returning a "frozen" inverse Wishart
+    random variable:
+
+    >>> rv = invwishart(df=1, scale=1)
+    >>> # Frozen object with the same methods but holding the given
+    >>> # degrees of freedom and scale fixed.
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
+
+    def __call__(self, df=None, scale=None, seed=None):
+        """Create a frozen inverse Wishart distribution.
+
+        See `invwishart_frozen` for more information.
+
+        """
+        return invwishart_frozen(df, scale, seed)
+
+    def _logpdf(self, x, dim, df, scale, log_det_scale):
+        """Log of the inverse Wishart probability density function.
+
+        Parameters
+        ----------
+        x : ndarray
+            Points at which to evaluate the log of the probability
+            density function.
+        dim : int
+            Dimension of the scale matrix
+        df : int
+            Degrees of freedom
+        scale : ndarray
+            Scale matrix
+        log_det_scale : float
+            Logarithm of the determinant of the scale matrix
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'logpdf' instead.
+
+        """
+        log_det_x = np.empty(x.shape[-1])
+        x_inv = np.copy(x).T
+        if dim > 1:
+            _cho_inv_batch(x_inv)  # works in-place
+        else:
+            x_inv = 1./x_inv
+        tr_scale_x_inv = np.empty(x.shape[-1])
+
+        for i in range(x.shape[-1]):
+            C, lower = scipy.linalg.cho_factor(x[:, :, i], lower=True)
+
+            log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
+
+            tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
+
+        # Log PDF
+        out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
+               (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
+               multigammaln(0.5*df, dim))
+
+        return out
+
+    def logpdf(self, x, df, scale):
+        """Log of the inverse Wishart probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+            Each quantile must be a symmetric positive definite matrix.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray
+            Log of the probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        x = self._process_quantiles(x, dim)
+        _, log_det_scale = self._cholesky_logdet(scale)
+        out = self._logpdf(x, dim, df, scale, log_det_scale)
+        return _squeeze_output(out)
+
+    def pdf(self, x, df, scale):
+        """Inverse Wishart probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+            Each quantile must be a symmetric positive definite matrix.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : ndarray
+            Probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        """
+        return np.exp(self.logpdf(x, df, scale))
+
+    def _mean(self, dim, df, scale):
+        """Mean of the inverse Wishart distribution.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of the scale matrix
+        %(_doc_default_callparams)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'mean' instead.
+
+        """
+        if df > dim + 1:
+            out = scale / (df - dim - 1)
+        else:
+            out = None
+        return out
+
+    def mean(self, df, scale):
+        """Mean of the inverse Wishart distribution.
+
+        Only valid if the degrees of freedom are greater than the dimension of
+        the scale matrix plus one.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        mean : float or None
+            The mean of the distribution
+
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        out = self._mean(dim, df, scale)
+        return _squeeze_output(out) if out is not None else out
+
+    def _mode(self, dim, df, scale):
+        """Mode of the inverse Wishart distribution.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of the scale matrix
+        %(_doc_default_callparams)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'mode' instead.
+
+        """
+        return scale / (df + dim + 1)
+
+    def mode(self, df, scale):
+        """Mode of the inverse Wishart distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        mode : float
+            The Mode of the distribution
+
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        out = self._mode(dim, df, scale)
+        return _squeeze_output(out)
+
+    def _var(self, dim, df, scale):
+        """Variance of the inverse Wishart distribution.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of the scale matrix
+        %(_doc_default_callparams)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'var' instead.
+
+        """
+        if df > dim + 3:
+            var = (df - dim + 1) * scale**2
+            diag = scale.diagonal()  # 1 x dim array
+            var += (df - dim - 1) * np.outer(diag, diag)
+            var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
+        else:
+            var = None
+        return var
+
+    def var(self, df, scale):
+        """Variance of the inverse Wishart distribution.
+
+        Only valid if the degrees of freedom are greater than the dimension of
+        the scale matrix plus three.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        var : float
+            The variance of the distribution
+        """
+        dim, df, scale = self._process_parameters(df, scale)
+        out = self._var(dim, df, scale)
+        return _squeeze_output(out) if out is not None else out
+
+    def _rvs(self, n, shape, dim, df, C, random_state):
+        """Draw random samples from an inverse Wishart distribution.
+
+        Parameters
+        ----------
+        n : integer
+            Number of variates to generate
+        shape : iterable
+            Shape of the variates to generate
+        dim : int
+            Dimension of the scale matrix
+        df : int
+            Degrees of freedom
+        C : ndarray
+            Cholesky factorization of the scale matrix, lower triagular.
+        %(_doc_random_state)s
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be
+        called directly; use 'rvs' instead.
+
+        """
+        random_state = self._get_random_state(random_state)
+        # Get random draws A such that A ~ W(df, I)
+        A = super()._standard_rvs(n, shape, dim, df, random_state)
+
+        # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
+        eye = np.eye(dim)
+        trtrs = get_lapack_funcs(('trtrs'), (A,))
+
+        for index in np.ndindex(A.shape[:-2]):
+            # Calculate CA
+            CA = np.dot(C, A[index])
+            # Get (C A)^{-1} via triangular solver
+            if dim > 1:
+                CA, info = trtrs(CA, eye, lower=True)
+                if info > 0:
+                    raise LinAlgError("Singular matrix.")
+                if info < 0:
+                    raise ValueError('Illegal value in %d-th argument of'
+                                     ' internal trtrs' % -info)
+            else:
+                CA = 1. / CA
+            # Get SA
+            A[index] = np.dot(CA.T, CA)
+
+        return A
+
+    def rvs(self, df, scale, size=1, random_state=None):
+        """Draw random samples from an inverse Wishart distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+        size : integer or iterable of integers, optional
+            Number of samples to draw (default 1).
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray
+            Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
+            the dimension of the scale matrix.
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        """
+        n, shape = self._process_size(size)
+        dim, df, scale = self._process_parameters(df, scale)
+
+        # Invert the scale
+        eye = np.eye(dim)
+        L, lower = scipy.linalg.cho_factor(scale, lower=True)
+        inv_scale = scipy.linalg.cho_solve((L, lower), eye)
+        # Cholesky decomposition of inverted scale
+        C = scipy.linalg.cholesky(inv_scale, lower=True)
+
+        out = self._rvs(n, shape, dim, df, C, random_state)
+
+        return _squeeze_output(out)
+
+    def entropy(self):
+        # Need to find reference for inverse Wishart entropy
+        raise AttributeError
+
+
+invwishart = invwishart_gen()
+
+
+class invwishart_frozen(multi_rv_frozen):
+    def __init__(self, df, scale, seed=None):
+        """Create a frozen inverse Wishart distribution.
+
+        Parameters
+        ----------
+        df : array_like
+            Degrees of freedom of the distribution
+        scale : array_like
+            Scale matrix of the distribution
+        seed : {None, int, `numpy.random.Generator`}, optional
+            If `seed` is None the `numpy.random.Generator` singleton is used.
+            If `seed` is an int, a new ``Generator`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` instance then that instance is
+            used.
+
+        """
+        self._dist = invwishart_gen(seed)
+        self.dim, self.df, self.scale = self._dist._process_parameters(
+            df, scale
+        )
+
+        # Get the determinant via Cholesky factorization
+        C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
+        self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
+
+        # Get the inverse using the Cholesky factorization
+        eye = np.eye(self.dim)
+        self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
+
+        # Get the Cholesky factorization of the inverse scale
+        self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
+
+    def logpdf(self, x):
+        x = self._dist._process_quantiles(x, self.dim)
+        out = self._dist._logpdf(x, self.dim, self.df, self.scale,
+                                 self.log_det_scale)
+        return _squeeze_output(out)
+
+    def pdf(self, x):
+        return np.exp(self.logpdf(x))
+
+    def mean(self):
+        out = self._dist._mean(self.dim, self.df, self.scale)
+        return _squeeze_output(out) if out is not None else out
+
+    def mode(self):
+        out = self._dist._mode(self.dim, self.df, self.scale)
+        return _squeeze_output(out)
+
+    def var(self):
+        out = self._dist._var(self.dim, self.df, self.scale)
+        return _squeeze_output(out) if out is not None else out
+
+    def rvs(self, size=1, random_state=None):
+        n, shape = self._dist._process_size(size)
+
+        out = self._dist._rvs(n, shape, self.dim, self.df,
+                              self.C, random_state)
+
+        return _squeeze_output(out)
+
+    def entropy(self):
+        # Need to find reference for inverse Wishart entropy
+        raise AttributeError
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# inverse Wishart and fill in default strings in class docstrings
+for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
+    method = invwishart_gen.__dict__[name]
+    method_frozen = wishart_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(
+        method.__doc__, wishart_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
+
+_multinomial_doc_default_callparams = """\
+n : int
+    Number of trials
+p : array_like
+    Probability of a trial falling into each category; should sum to 1
+"""
+
+_multinomial_doc_callparams_note = """\
+`n` should be a positive integer. Each element of `p` should be in the
+interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
+1, the last element of the `p` array is not used and is replaced with the
+remaining probability left over from the earlier elements.
+"""
+
+_multinomial_doc_frozen_callparams = ""
+
+_multinomial_doc_frozen_callparams_note = """\
+See class definition for a detailed description of parameters."""
+
+multinomial_docdict_params = {
+    '_doc_default_callparams': _multinomial_doc_default_callparams,
+    '_doc_callparams_note': _multinomial_doc_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+multinomial_docdict_noparams = {
+    '_doc_default_callparams': _multinomial_doc_frozen_callparams,
+    '_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+
+class multinomial_gen(multi_rv_generic):
+    r"""A multinomial random variable.
+
+    Methods
+    -------
+    pmf(x, n, p)
+        Probability mass function.
+    logpmf(x, n, p)
+        Log of the probability mass function.
+    rvs(n, p, size=1, random_state=None)
+        Draw random samples from a multinomial distribution.
+    entropy(n, p)
+        Compute the entropy of the multinomial distribution.
+    cov(n, p)
+        Compute the covariance matrix of the multinomial distribution.
+
+    Parameters
+    ----------
+    %(_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Notes
+    -----
+    %(_doc_callparams_note)s
+
+    The probability mass function for `multinomial` is
+
+    .. math::
+
+        f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
+
+    supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
+    nonnegative integer and their sum is :math:`n`.
+
+    .. versionadded:: 0.19.0
+
+    Examples
+    --------
+
+    >>> from scipy.stats import multinomial
+    >>> rv = multinomial(8, [0.3, 0.2, 0.5])
+    >>> rv.pmf([1, 3, 4])
+    0.042000000000000072
+
+    The multinomial distribution for :math:`k=2` is identical to the
+    corresponding binomial distribution (tiny numerical differences
+    notwithstanding):
+
+    >>> from scipy.stats import binom
+    >>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
+    0.29030399999999973
+    >>> binom.pmf(3, 7, 0.4)
+    0.29030400000000012
+
+    The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
+    broadcasting, under the convention that the vector parameters (``x`` and
+    ``p``) are interpreted as if each row along the last axis is a single
+    object. For instance:
+
+    >>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
+    array([0.2268945,  0.25412184])
+
+    Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
+    but following the rules mentioned above they behave as if the rows
+    ``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
+    object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
+    ``p.shape = ()``. To obtain the individual elements without broadcasting,
+    we would do this:
+
+    >>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
+    0.2268945
+    >>> multinomial.pmf([3, 5], 8, p=[.3, .7])
+    0.25412184
+
+    This broadcasting also works for ``cov``, where the output objects are
+    square matrices of size ``p.shape[-1]``. For example:
+
+    >>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
+    array([[[ 0.84, -0.84],
+            [-0.84,  0.84]],
+           [[ 1.2 , -1.2 ],
+            [-1.2 ,  1.2 ]]])
+
+    In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
+    following the rules above, these broadcast as if ``p.shape == (2,)``.
+    Thus the result should also be of shape ``(2,)``, but since each output is
+    a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
+    where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
+    ``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
+
+    Alternatively, the object may be called (as a function) to fix the `n` and
+    `p` parameters, returning a "frozen" multinomial random variable:
+
+    >>> rv = multinomial(n=7, p=[.3, .7])
+    >>> # Frozen object with the same methods but holding the given
+    >>> # degrees of freedom and scale fixed.
+
+    See also
+    --------
+    scipy.stats.binom : The binomial distribution.
+    numpy.random.Generator.multinomial : Sampling from the multinomial distribution.
+    scipy.stats.multivariate_hypergeom :
+        The multivariate hypergeometric distribution.
+    """  # noqa: E501
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = \
+            doccer.docformat(self.__doc__, multinomial_docdict_params)
+
+    def __call__(self, n, p, seed=None):
+        """Create a frozen multinomial distribution.
+
+        See `multinomial_frozen` for more information.
+        """
+        return multinomial_frozen(n, p, seed)
+
+    def _process_parameters(self, n, p, eps=1e-15):
+        """Returns: n_, p_, npcond.
+
+        n_ and p_ are arrays of the correct shape; npcond is a boolean array
+        flagging values out of the domain.
+        """
+        p = np.array(p, dtype=np.float64, copy=True)
+        p_adjusted = 1. - p[..., :-1].sum(axis=-1)
+        i_adjusted = np.abs(p_adjusted) > eps
+        p[i_adjusted, -1] = p_adjusted[i_adjusted]
+
+        # true for bad p
+        pcond = np.any(p < 0, axis=-1)
+        pcond |= np.any(p > 1, axis=-1)
+
+        n = np.array(n, dtype=np.int_, copy=True)
+
+        # true for bad n
+        ncond = n <= 0
+
+        return n, p, ncond | pcond
+
+    def _process_quantiles(self, x, n, p):
+        """Returns: x_, xcond.
+
+        x_ is an int array; xcond is a boolean array flagging values out of the
+        domain.
+        """
+        xx = np.asarray(x, dtype=np.int_)
+
+        if xx.ndim == 0:
+            raise ValueError("x must be an array.")
+
+        if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
+            raise ValueError("Size of each quantile should be size of p: "
+                             "received %d, but expected %d." %
+                             (xx.shape[-1], p.shape[-1]))
+
+        # true for x out of the domain
+        cond = np.any(xx != x, axis=-1)
+        cond |= np.any(xx < 0, axis=-1)
+        cond = cond | (np.sum(xx, axis=-1) != n)
+
+        return xx, cond
+
+    def _checkresult(self, result, cond, bad_value):
+        result = np.asarray(result)
+
+        if cond.ndim != 0:
+            result[cond] = bad_value
+        elif cond:
+            if result.ndim == 0:
+                return bad_value
+            result[...] = bad_value
+        return result
+
+    def _logpmf(self, x, n, p):
+        return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
+
+    def logpmf(self, x, n, p):
+        """Log of the Multinomial probability mass function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        logpmf : ndarray or scalar
+            Log of the probability mass function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+        """
+        n, p, npcond = self._process_parameters(n, p)
+        x, xcond = self._process_quantiles(x, n, p)
+
+        result = self._logpmf(x, n, p)
+
+        # replace values for which x was out of the domain; broadcast
+        # xcond to the right shape
+        xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
+        result = self._checkresult(result, xcond_, np.NINF)
+
+        # replace values bad for n or p; broadcast npcond to the right shape
+        npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
+        return self._checkresult(result, npcond_, np.NAN)
+
+    def pmf(self, x, n, p):
+        """Multinomial probability mass function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        pmf : ndarray or scalar
+            Probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+        """
+        return np.exp(self.logpmf(x, n, p))
+
+    def mean(self, n, p):
+        """Mean of the Multinomial distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        mean : float
+            The mean of the distribution
+        """
+        n, p, npcond = self._process_parameters(n, p)
+        result = n[..., np.newaxis]*p
+        return self._checkresult(result, npcond, np.NAN)
+
+    def cov(self, n, p):
+        """Covariance matrix of the multinomial distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        cov : ndarray
+            The covariance matrix of the distribution
+        """
+        n, p, npcond = self._process_parameters(n, p)
+
+        nn = n[..., np.newaxis, np.newaxis]
+        result = nn * np.einsum('...j,...k->...jk', -p, p)
+
+        # change the diagonal
+        for i in range(p.shape[-1]):
+            result[..., i, i] += n*p[..., i]
+
+        return self._checkresult(result, npcond, np.nan)
+
+    def entropy(self, n, p):
+        r"""Compute the entropy of the multinomial distribution.
+
+        The entropy is computed using this expression:
+
+        .. math::
+
+            f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
+            \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        h : scalar
+            Entropy of the Multinomial distribution
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+        """
+        n, p, npcond = self._process_parameters(n, p)
+
+        x = np.r_[1:np.max(n)+1]
+
+        term1 = n*np.sum(entr(p), axis=-1)
+        term1 -= gammaln(n+1)
+
+        n = n[..., np.newaxis]
+        new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
+        x.shape += (1,)*new_axes_needed
+
+        term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
+                       axis=(-1, -1-new_axes_needed))
+
+        return self._checkresult(term1 + term2, npcond, np.nan)
+
+    def rvs(self, n, p, size=None, random_state=None):
+        """Draw random samples from a Multinomial distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+        size : integer or iterable of integers, optional
+            Number of samples to draw (default 1).
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random variates of shape (`size`, `len(p)`)
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+        """
+        n, p, npcond = self._process_parameters(n, p)
+        random_state = self._get_random_state(random_state)
+        return random_state.multinomial(n, p, size)
+
+
+multinomial = multinomial_gen()
+
+
+class multinomial_frozen(multi_rv_frozen):
+    r"""Create a frozen Multinomial distribution.
+
+    Parameters
+    ----------
+    n : int
+        number of trials
+    p: array_like
+        probability of a trial falling into each category; should sum to 1
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+    """
+    def __init__(self, n, p, seed=None):
+        self._dist = multinomial_gen(seed)
+        self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
+
+        # monkey patch self._dist
+        def _process_parameters(n, p):
+            return self.n, self.p, self.npcond
+
+        self._dist._process_parameters = _process_parameters
+
+    def logpmf(self, x):
+        return self._dist.logpmf(x, self.n, self.p)
+
+    def pmf(self, x):
+        return self._dist.pmf(x, self.n, self.p)
+
+    def mean(self):
+        return self._dist.mean(self.n, self.p)
+
+    def cov(self):
+        return self._dist.cov(self.n, self.p)
+
+    def entropy(self):
+        return self._dist.entropy(self.n, self.p)
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.n, self.p, size, random_state)
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# multinomial and fill in default strings in class docstrings
+for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
+    method = multinomial_gen.__dict__[name]
+    method_frozen = multinomial_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(
+        method.__doc__, multinomial_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__,
+                                      multinomial_docdict_params)
+
+
+class special_ortho_group_gen(multi_rv_generic):
+    r"""A Special Orthogonal matrix (SO(N)) random variable.
+
+    Return a random rotation matrix, drawn from the Haar distribution
+    (the only uniform distribution on SO(N)) with a determinant of +1.
+
+    The `dim` keyword specifies the dimension N.
+
+    Methods
+    -------
+    rvs(dim=None, size=1, random_state=None)
+        Draw random samples from SO(N).
+
+    Parameters
+    ----------
+    dim : scalar
+        Dimension of matrices
+    seed : {None, int, np.random.RandomState, np.random.Generator}, optional
+        Used for drawing random variates.
+        If `seed` is `None`, the `~np.random.RandomState` singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used, seeded
+        with seed.
+        If `seed` is already a ``RandomState`` or ``Generator`` instance,
+        then that object is used.
+        Default is `None`.
+
+    Notes
+    -----
+    This class is wrapping the random_rot code from the MDP Toolkit,
+    https://github.com/mdp-toolkit/mdp-toolkit
+
+    Return a random rotation matrix, drawn from the Haar distribution
+    (the only uniform distribution on SO(N)).
+    The algorithm is described in the paper
+    Stewart, G.W., "The efficient generation of random orthogonal
+    matrices with an application to condition estimators", SIAM Journal
+    on Numerical Analysis, 17(3), pp. 403-409, 1980.
+    For more information see
+    https://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
+
+    See also the similar `ortho_group`. For a random rotation in three
+    dimensions, see `scipy.spatial.transform.Rotation.random`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import special_ortho_group
+    >>> x = special_ortho_group.rvs(3)
+
+    >>> np.dot(x, x.T)
+    array([[  1.00000000e+00,   1.13231364e-17,  -2.86852790e-16],
+           [  1.13231364e-17,   1.00000000e+00,  -1.46845020e-16],
+           [ -2.86852790e-16,  -1.46845020e-16,   1.00000000e+00]])
+
+    >>> import scipy.linalg
+    >>> scipy.linalg.det(x)
+    1.0
+
+    This generates one random matrix from SO(3). It is orthogonal and
+    has a determinant of 1.
+
+    Alternatively, the object may be called (as a function) to fix the `dim`
+    parameter, returning a "frozen" special_ortho_group random variable:
+
+    >>> rv = special_ortho_group(5)
+    >>> # Frozen object with the same methods but holding the
+    >>> # dimension parameter fixed.
+
+    See Also
+    --------
+    ortho_group, scipy.spatial.transform.Rotation.random
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__)
+
+    def __call__(self, dim=None, seed=None):
+        """Create a frozen SO(N) distribution.
+
+        See `special_ortho_group_frozen` for more information.
+        """
+        return special_ortho_group_frozen(dim, seed=seed)
+
+    def _process_parameters(self, dim):
+        """Dimension N must be specified; it cannot be inferred."""
+        if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
+            raise ValueError("""Dimension of rotation must be specified,
+                                and must be a scalar greater than 1.""")
+
+        return dim
+
+    def rvs(self, dim, size=1, random_state=None):
+        """Draw random samples from SO(N).
+
+        Parameters
+        ----------
+        dim : integer
+            Dimension of rotation space (N).
+        size : integer, optional
+            Number of samples to draw (default 1).
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random size N-dimensional matrices, dimension (size, dim, dim)
+
+        """
+        random_state = self._get_random_state(random_state)
+
+        size = int(size)
+        size = (size,) if size > 1 else ()
+
+        dim = self._process_parameters(dim)
+
+        # H represents a (dim, dim) matrix, while D represents the diagonal of
+        # a (dim, dim) diagonal matrix. The algorithm that follows is
+        # broadcasted on the leading shape in `size` to vectorize along
+        # samples.
+        H = np.empty(size + (dim, dim))
+        H[..., :, :] = np.eye(dim)
+        D = np.empty(size + (dim,))
+
+        for n in range(dim-1):
+
+            # x is a vector with length dim-n, xrow and xcol are views of it as
+            # a row vector and column vector respectively. It's important they
+            # are views and not copies because we are going to modify x
+            # in-place.
+            x = random_state.normal(size=size + (dim-n,))
+            xrow = x[..., None, :]
+            xcol = x[..., :, None]
+
+            # This is the squared norm of x, without vectorization it would be
+            # dot(x, x), to have proper broadcasting we use matmul and squeeze
+            # out (convert to scalar) the resulting 1x1 matrix
+            norm2 = np.matmul(xrow, xcol).squeeze((-2, -1))
+
+            x0 = x[..., 0].copy()
+            D[..., n] = np.where(x0 != 0, np.sign(x0), 1)
+            x[..., 0] += D[..., n]*np.sqrt(norm2)
+
+            # In renormalizing x we have to append an additional axis with
+            # [..., None] to broadcast the scalar against the vector x
+            x /= np.sqrt((norm2 - x0**2 + x[..., 0]**2) / 2.)[..., None]
+
+            # Householder transformation, without vectorization the RHS can be
+            # written as outer(H @ x, x) (apart from the slicing)
+            H[..., :, n:] -= np.matmul(H[..., :, n:], xcol) * xrow
+
+        D[..., -1] = (-1)**(dim-1)*D[..., :-1].prod(axis=-1)
+
+        # Without vectorization this could be written as H = diag(D) @ H,
+        # left-multiplication by a diagonal matrix amounts to multiplying each
+        # row of H by an element of the diagonal, so we add a dummy axis for
+        # the column index
+        H *= D[..., :, None]
+        return H
+
+
+special_ortho_group = special_ortho_group_gen()
+
+
+class special_ortho_group_frozen(multi_rv_frozen):
+    def __init__(self, dim=None, seed=None):
+        """Create a frozen SO(N) distribution.
+
+        Parameters
+        ----------
+        dim : scalar
+            Dimension of matrices
+        seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance
+            then that instance is used.
+
+        Examples
+        --------
+        >>> from scipy.stats import special_ortho_group
+        >>> g = special_ortho_group(5)
+        >>> x = g.rvs()
+
+        """
+        self._dist = special_ortho_group_gen(seed)
+        self.dim = self._dist._process_parameters(dim)
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.dim, size, random_state)
+
+
+class ortho_group_gen(multi_rv_generic):
+    r"""An Orthogonal matrix (O(N)) random variable.
+
+    Return a random orthogonal matrix, drawn from the O(N) Haar
+    distribution (the only uniform distribution on O(N)).
+
+    The `dim` keyword specifies the dimension N.
+
+    Methods
+    -------
+    rvs(dim=None, size=1, random_state=None)
+        Draw random samples from O(N).
+
+    Parameters
+    ----------
+    dim : scalar
+        Dimension of matrices
+    seed : {None, int, np.random.RandomState, np.random.Generator}, optional
+        Used for drawing random variates.
+        If `seed` is `None`, the `~np.random.RandomState` singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used, seeded
+        with seed.
+        If `seed` is already a ``RandomState`` or ``Generator`` instance,
+        then that object is used.
+        Default is `None`.
+
+    Notes
+    -----
+    This class is closely related to `special_ortho_group`.
+
+    Some care is taken to avoid numerical error, as per the paper by Mezzadri.
+
+    References
+    ----------
+    .. [1] F. Mezzadri, "How to generate random matrices from the classical
+           compact groups", :arXiv:`math-ph/0609050v2`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import ortho_group
+    >>> x = ortho_group.rvs(3)
+
+    >>> np.dot(x, x.T)
+    array([[  1.00000000e+00,   1.13231364e-17,  -2.86852790e-16],
+           [  1.13231364e-17,   1.00000000e+00,  -1.46845020e-16],
+           [ -2.86852790e-16,  -1.46845020e-16,   1.00000000e+00]])
+
+    >>> import scipy.linalg
+    >>> np.fabs(scipy.linalg.det(x))
+    1.0
+
+    This generates one random matrix from O(3). It is orthogonal and
+    has a determinant of +1 or -1.
+
+    Alternatively, the object may be called (as a function) to fix the `dim`
+    parameter, returning a "frozen" ortho_group random variable:
+
+    >>> rv = ortho_group(5)
+    >>> # Frozen object with the same methods but holding the
+    >>> # dimension parameter fixed.
+
+    See Also
+    --------
+    special_ortho_group
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__)
+
+    def __call__(self, dim=None, seed=None):
+        """Create a frozen O(N) distribution.
+
+        See `ortho_group_frozen` for more information.
+        """
+        return ortho_group_frozen(dim, seed=seed)
+
+    def _process_parameters(self, dim):
+        """Dimension N must be specified; it cannot be inferred."""
+        if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
+            raise ValueError("Dimension of rotation must be specified,"
+                             "and must be a scalar greater than 1.")
+
+        return dim
+
+    def rvs(self, dim, size=1, random_state=None):
+        """Draw random samples from O(N).
+
+        Parameters
+        ----------
+        dim : integer
+            Dimension of rotation space (N).
+        size : integer, optional
+            Number of samples to draw (default 1).
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random size N-dimensional matrices, dimension (size, dim, dim)
+
+        """
+        random_state = self._get_random_state(random_state)
+
+        size = int(size)
+        if size > 1 and NumpyVersion(np.__version__) < '1.22.0':
+            return np.array([self.rvs(dim, size=1, random_state=random_state)
+                             for i in range(size)])
+
+        dim = self._process_parameters(dim)
+
+        size = (size,) if size > 1 else ()
+        z = random_state.normal(size=size + (dim, dim))
+        q, r = np.linalg.qr(z)
+        # The last two dimensions are the rows and columns of R matrices.
+        # Extract the diagonals. Note that this eliminates a dimension.
+        d = r.diagonal(offset=0, axis1=-2, axis2=-1)
+        # Add back a dimension for proper broadcasting: we're dividing
+        # each row of each R matrix by the diagonal of the R matrix.
+        q *= (d/abs(d))[..., np.newaxis, :]  # to broadcast properly
+        return q
+
+
+ortho_group = ortho_group_gen()
+
+
+class ortho_group_frozen(multi_rv_frozen):
+    def __init__(self, dim=None, seed=None):
+        """Create a frozen O(N) distribution.
+
+        Parameters
+        ----------
+        dim : scalar
+            Dimension of matrices
+        seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance
+            then that instance is used.
+
+        Examples
+        --------
+        >>> from scipy.stats import ortho_group
+        >>> g = ortho_group(5)
+        >>> x = g.rvs()
+
+        """
+        self._dist = ortho_group_gen(seed)
+        self.dim = self._dist._process_parameters(dim)
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.dim, size, random_state)
+
+
+class random_correlation_gen(multi_rv_generic):
+    r"""A random correlation matrix.
+
+    Return a random correlation matrix, given a vector of eigenvalues.
+
+    The `eigs` keyword specifies the eigenvalues of the correlation matrix,
+    and implies the dimension.
+
+    Methods
+    -------
+    rvs(eigs=None, random_state=None)
+        Draw random correlation matrices, all with eigenvalues eigs.
+
+    Parameters
+    ----------
+    eigs : 1d ndarray
+        Eigenvalues of correlation matrix
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance
+        then that instance is used.
+    tol : float, optional
+        Tolerance for input parameter checks
+    diag_tol : float, optional
+        Tolerance for deviation of the diagonal of the resulting
+        matrix. Default: 1e-7
+
+    Raises
+    ------
+    RuntimeError
+        Floating point error prevented generating a valid correlation
+        matrix.
+
+    Returns
+    -------
+    rvs : ndarray or scalar
+        Random size N-dimensional matrices, dimension (size, dim, dim),
+        each having eigenvalues eigs.
+
+    Notes
+    -----
+
+    Generates a random correlation matrix following a numerically stable
+    algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
+    similarity transformation to construct a symmetric positive semi-definite
+    matrix, and applies a series of Givens rotations to scale it to have ones
+    on the diagonal.
+
+    References
+    ----------
+
+    .. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
+           of correlation matrices and their factors", BIT 2000, Vol. 40,
+           No. 4, pp. 640 651
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import random_correlation
+    >>> rng = np.random.default_rng()
+    >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=rng)
+    >>> x
+    array([[ 1.        , -0.02423399,  0.03130519,  0.4946965 ],
+           [-0.02423399,  1.        ,  0.20334736,  0.04039817],
+           [ 0.03130519,  0.20334736,  1.        ,  0.02694275],
+           [ 0.4946965 ,  0.04039817,  0.02694275,  1.        ]])
+    >>> import scipy.linalg
+    >>> e, v = scipy.linalg.eigh(x)
+    >>> e
+    array([ 0.5,  0.8,  1.2,  1.5])
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__)
+
+    def __call__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7):
+        """Create a frozen random correlation matrix.
+
+        See `random_correlation_frozen` for more information.
+        """
+        return random_correlation_frozen(eigs, seed=seed, tol=tol,
+                                         diag_tol=diag_tol)
+
+    def _process_parameters(self, eigs, tol):
+        eigs = np.asarray(eigs, dtype=float)
+        dim = eigs.size
+
+        if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
+            raise ValueError("Array 'eigs' must be a vector of length "
+                             "greater than 1.")
+
+        if np.fabs(np.sum(eigs) - dim) > tol:
+            raise ValueError("Sum of eigenvalues must equal dimensionality.")
+
+        for x in eigs:
+            if x < -tol:
+                raise ValueError("All eigenvalues must be non-negative.")
+
+        return dim, eigs
+
+    def _givens_to_1(self, aii, ajj, aij):
+        """Computes a 2x2 Givens matrix to put 1's on the diagonal.
+
+        The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
+
+        The output matrix g is a 2x2 anti-symmetric matrix of the form
+        [ c s ; -s c ];  the elements c and s are returned.
+
+        Applying the output matrix to the input matrix (as b=g.T M g)
+        results in a matrix with bii=1, provided tr(M) - det(M) >= 1
+        and floating point issues do not occur. Otherwise, some other
+        valid rotation is returned. When tr(M)==2, also bjj=1.
+
+        """
+        aiid = aii - 1.
+        ajjd = ajj - 1.
+
+        if ajjd == 0:
+            # ajj==1, so swap aii and ajj to avoid division by zero
+            return 0., 1.
+
+        dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
+
+        # The choice of t should be chosen to avoid cancellation [1]
+        t = (aij + math.copysign(dd, aij)) / ajjd
+        c = 1. / math.sqrt(1. + t*t)
+        if c == 0:
+            # Underflow
+            s = 1.0
+        else:
+            s = c*t
+        return c, s
+
+    def _to_corr(self, m):
+        """
+        Given a psd matrix m, rotate to put one's on the diagonal, turning it
+        into a correlation matrix.  This also requires the trace equal the
+        dimensionality. Note: modifies input matrix
+        """
+        # Check requirements for in-place Givens
+        if not (m.flags.c_contiguous and m.dtype == np.float64 and
+                m.shape[0] == m.shape[1]):
+            raise ValueError()
+
+        d = m.shape[0]
+        for i in range(d-1):
+            if m[i, i] == 1:
+                continue
+            elif m[i, i] > 1:
+                for j in range(i+1, d):
+                    if m[j, j] < 1:
+                        break
+            else:
+                for j in range(i+1, d):
+                    if m[j, j] > 1:
+                        break
+
+            c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])
+
+            # Use BLAS to apply Givens rotations in-place. Equivalent to:
+            # g = np.eye(d)
+            # g[i, i] = g[j,j] = c
+            # g[j, i] = -s; g[i, j] = s
+            # m = np.dot(g.T, np.dot(m, g))
+            mv = m.ravel()
+            drot(mv, mv, c, -s, n=d,
+                 offx=i*d, incx=1, offy=j*d, incy=1,
+                 overwrite_x=True, overwrite_y=True)
+            drot(mv, mv, c, -s, n=d,
+                 offx=i, incx=d, offy=j, incy=d,
+                 overwrite_x=True, overwrite_y=True)
+
+        return m
+
+    def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
+        """Draw random correlation matrices.
+
+        Parameters
+        ----------
+        eigs : 1d ndarray
+            Eigenvalues of correlation matrix
+        tol : float, optional
+            Tolerance for input parameter checks
+        diag_tol : float, optional
+            Tolerance for deviation of the diagonal of the resulting
+            matrix. Default: 1e-7
+
+        Raises
+        ------
+        RuntimeError
+            Floating point error prevented generating a valid correlation
+            matrix.
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random size N-dimensional matrices, dimension (size, dim, dim),
+            each having eigenvalues eigs.
+
+        """
+        dim, eigs = self._process_parameters(eigs, tol=tol)
+
+        random_state = self._get_random_state(random_state)
+
+        m = ortho_group.rvs(dim, random_state=random_state)
+        m = np.dot(np.dot(m, np.diag(eigs)), m.T)  # Set the trace of m
+        m = self._to_corr(m)  # Carefully rotate to unit diagonal
+
+        # Check diagonal
+        if abs(m.diagonal() - 1).max() > diag_tol:
+            raise RuntimeError("Failed to generate a valid correlation matrix")
+
+        return m
+
+
+random_correlation = random_correlation_gen()
+
+
+class random_correlation_frozen(multi_rv_frozen):
+    def __init__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-7):
+        """Create a frozen random correlation matrix distribution.
+
+        Parameters
+        ----------
+        eigs : 1d ndarray
+            Eigenvalues of correlation matrix
+        seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance
+            then that instance is used.
+        tol : float, optional
+            Tolerance for input parameter checks
+        diag_tol : float, optional
+            Tolerance for deviation of the diagonal of the resulting
+            matrix. Default: 1e-7
+
+        Raises
+        ------
+        RuntimeError
+            Floating point error prevented generating a valid correlation
+            matrix.
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random size N-dimensional matrices, dimension (size, dim, dim),
+            each having eigenvalues eigs.
+        """
+
+        self._dist = random_correlation_gen(seed)
+        self.tol = tol
+        self.diag_tol = diag_tol
+        _, self.eigs = self._dist._process_parameters(eigs, tol=self.tol)
+
+    def rvs(self, random_state=None):
+        return self._dist.rvs(self.eigs, random_state=random_state,
+                              tol=self.tol, diag_tol=self.diag_tol)
+
+
+class unitary_group_gen(multi_rv_generic):
+    r"""A matrix-valued U(N) random variable.
+
+    Return a random unitary matrix.
+
+    The `dim` keyword specifies the dimension N.
+
+    Methods
+    -------
+    rvs(dim=None, size=1, random_state=None)
+        Draw random samples from U(N).
+
+    Parameters
+    ----------
+    dim : scalar
+        Dimension of matrices
+    seed : {None, int, np.random.RandomState, np.random.Generator}, optional
+        Used for drawing random variates.
+        If `seed` is `None`, the `~np.random.RandomState` singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used, seeded
+        with seed.
+        If `seed` is already a ``RandomState`` or ``Generator`` instance,
+        then that object is used.
+        Default is `None`.
+
+    Notes
+    -----
+    This class is similar to `ortho_group`.
+
+    References
+    ----------
+    .. [1] F. Mezzadri, "How to generate random matrices from the classical
+           compact groups", :arXiv:`math-ph/0609050v2`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import unitary_group
+    >>> x = unitary_group.rvs(3)
+
+    >>> np.dot(x, x.conj().T)
+    array([[  1.00000000e+00,   1.13231364e-17,  -2.86852790e-16],
+           [  1.13231364e-17,   1.00000000e+00,  -1.46845020e-16],
+           [ -2.86852790e-16,  -1.46845020e-16,   1.00000000e+00]])
+
+    This generates one random matrix from U(3). The dot product confirms that
+    it is unitary up to machine precision.
+
+    Alternatively, the object may be called (as a function) to fix the `dim`
+    parameter, return a "frozen" unitary_group random variable:
+
+    >>> rv = unitary_group(5)
+
+    See Also
+    --------
+    ortho_group
+
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__)
+
+    def __call__(self, dim=None, seed=None):
+        """Create a frozen (U(N)) n-dimensional unitary matrix distribution.
+
+        See `unitary_group_frozen` for more information.
+        """
+        return unitary_group_frozen(dim, seed=seed)
+
+    def _process_parameters(self, dim):
+        """Dimension N must be specified; it cannot be inferred."""
+        if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
+            raise ValueError("Dimension of rotation must be specified,"
+                             "and must be a scalar greater than 1.")
+
+        return dim
+
+    def rvs(self, dim, size=1, random_state=None):
+        """Draw random samples from U(N).
+
+        Parameters
+        ----------
+        dim : integer
+            Dimension of space (N).
+        size : integer, optional
+            Number of samples to draw (default 1).
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random size N-dimensional matrices, dimension (size, dim, dim)
+
+        """
+        random_state = self._get_random_state(random_state)
+
+        size = int(size)
+        if size > 1 and NumpyVersion(np.__version__) < '1.22.0':
+            return np.array([self.rvs(dim, size=1, random_state=random_state)
+                             for i in range(size)])
+
+        dim = self._process_parameters(dim)
+
+        size = (size,) if size > 1 else ()
+        z = 1/math.sqrt(2)*(random_state.normal(size=size + (dim, dim)) +
+                            1j*random_state.normal(size=size + (dim, dim)))
+        q, r = np.linalg.qr(z)
+        # The last two dimensions are the rows and columns of R matrices.
+        # Extract the diagonals. Note that this eliminates a dimension.
+        d = r.diagonal(offset=0, axis1=-2, axis2=-1)
+        # Add back a dimension for proper broadcasting: we're dividing
+        # each row of each R matrix by the diagonal of the R matrix.
+        q *= (d/abs(d))[..., np.newaxis, :]  # to broadcast properly
+        return q
+
+
+unitary_group = unitary_group_gen()
+
+
+class unitary_group_frozen(multi_rv_frozen):
+    def __init__(self, dim=None, seed=None):
+        """Create a frozen (U(N)) n-dimensional unitary matrix distribution.
+
+        Parameters
+        ----------
+        dim : scalar
+            Dimension of matrices
+        seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance
+            then that instance is used.
+
+        Examples
+        --------
+        >>> from scipy.stats import unitary_group
+        >>> x = unitary_group(3)
+        >>> x.rvs()
+
+        """
+        self._dist = unitary_group_gen(seed)
+        self.dim = self._dist._process_parameters(dim)
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.dim, size, random_state)
+
+
+_mvt_doc_default_callparams = """\
+loc : array_like, optional
+    Location of the distribution. (default ``0``)
+shape : array_like, optional
+    Positive semidefinite matrix of the distribution. (default ``1``)
+df : float, optional
+    Degrees of freedom of the distribution; must be greater than zero.
+    If ``np.inf`` then results are multivariate normal. The default is ``1``.
+allow_singular : bool, optional
+    Whether to allow a singular matrix. (default ``False``)
+"""
+
+_mvt_doc_callparams_note = """\
+Setting the parameter `loc` to ``None`` is equivalent to having `loc`
+be the zero-vector. The parameter `shape` can be a scalar, in which case
+the shape matrix is the identity times that value, a vector of
+diagonal entries for the shape matrix, or a two-dimensional array_like.
+"""
+
+_mvt_doc_frozen_callparams_note = """\
+See class definition for a detailed description of parameters."""
+
+mvt_docdict_params = {
+    '_mvt_doc_default_callparams': _mvt_doc_default_callparams,
+    '_mvt_doc_callparams_note': _mvt_doc_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+mvt_docdict_noparams = {
+    '_mvt_doc_default_callparams': "",
+    '_mvt_doc_callparams_note': _mvt_doc_frozen_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+
+class multivariate_t_gen(multi_rv_generic):
+    r"""A multivariate t-distributed random variable.
+
+    The `loc` parameter specifies the location. The `shape` parameter specifies
+    the positive semidefinite shape matrix. The `df` parameter specifies the
+    degrees of freedom.
+
+    In addition to calling the methods below, the object itself may be called
+    as a function to fix the location, shape matrix, and degrees of freedom
+    parameters, returning a "frozen" multivariate t-distribution random.
+
+    Methods
+    -------
+    pdf(x, loc=None, shape=1, df=1, allow_singular=False)
+        Probability density function.
+    logpdf(x, loc=None, shape=1, df=1, allow_singular=False)
+        Log of the probability density function.
+    rvs(loc=None, shape=1, df=1, size=1, random_state=None)
+        Draw random samples from a multivariate t-distribution.
+
+    Parameters
+    ----------
+    %(_mvt_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Notes
+    -----
+    %(_mvt_doc_callparams_note)s
+    The matrix `shape` must be a (symmetric) positive semidefinite matrix. The
+    determinant and inverse of `shape` are computed as the pseudo-determinant
+    and pseudo-inverse, respectively, so that `shape` does not need to have
+    full rank.
+
+    The probability density function for `multivariate_t` is
+
+    .. math::
+
+        f(x) = \frac{\Gamma(\nu + p)/2}{\Gamma(\nu/2)\nu^{p/2}\pi^{p/2}|\Sigma|^{1/2}}
+               \left[1 + \frac{1}{\nu} (\mathbf{x} - \boldsymbol{\mu})^{\top}
+               \boldsymbol{\Sigma}^{-1}
+               (\mathbf{x} - \boldsymbol{\mu}) \right]^{-(\nu + p)/2},
+
+    where :math:`p` is the dimension of :math:`\mathbf{x}`,
+    :math:`\boldsymbol{\mu}` is the :math:`p`-dimensional location,
+    :math:`\boldsymbol{\Sigma}` the :math:`p \times p`-dimensional shape
+    matrix, and :math:`\nu` is the degrees of freedom.
+
+    .. versionadded:: 1.6.0
+
+    Examples
+    --------
+    The object may be called (as a function) to fix the `loc`, `shape`,
+    `df`, and `allow_singular` parameters, returning a "frozen"
+    multivariate_t random variable:
+
+    >>> import numpy as np
+    >>> from scipy.stats import multivariate_t
+    >>> rv = multivariate_t([1.0, -0.5], [[2.1, 0.3], [0.3, 1.5]], df=2)
+    >>> # Frozen object with the same methods but holding the given location,
+    >>> # scale, and degrees of freedom fixed.
+
+    Create a contour plot of the PDF.
+
+    >>> import matplotlib.pyplot as plt
+    >>> x, y = np.mgrid[-1:3:.01, -2:1.5:.01]
+    >>> pos = np.dstack((x, y))
+    >>> fig, ax = plt.subplots(1, 1)
+    >>> ax.set_aspect('equal')
+    >>> plt.contourf(x, y, rv.pdf(pos))
+
+    """
+
+    def __init__(self, seed=None):
+        """Initialize a multivariate t-distributed random variable.
+
+        Parameters
+        ----------
+        seed : Random state.
+
+        """
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__, mvt_docdict_params)
+        self._random_state = check_random_state(seed)
+
+    def __call__(self, loc=None, shape=1, df=1, allow_singular=False,
+                 seed=None):
+        """Create a frozen multivariate t-distribution.
+
+        See `multivariate_t_frozen` for parameters.
+        """
+        if df == np.inf:
+            return multivariate_normal_frozen(mean=loc, cov=shape,
+                                              allow_singular=allow_singular,
+                                              seed=seed)
+        return multivariate_t_frozen(loc=loc, shape=shape, df=df,
+                                     allow_singular=allow_singular, seed=seed)
+
+    def pdf(self, x, loc=None, shape=1, df=1, allow_singular=False):
+        """Multivariate t-distribution probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Points at which to evaluate the probability density function.
+        %(_mvt_doc_default_callparams)s
+
+        Returns
+        -------
+        pdf : Probability density function evaluated at `x`.
+
+        Examples
+        --------
+        >>> from scipy.stats import multivariate_t
+        >>> x = [0.4, 5]
+        >>> loc = [0, 1]
+        >>> shape = [[1, 0.1], [0.1, 1]]
+        >>> df = 7
+        >>> multivariate_t.pdf(x, loc, shape, df)
+        array([0.00075713])
+
+        """
+        dim, loc, shape, df = self._process_parameters(loc, shape, df)
+        x = self._process_quantiles(x, dim)
+        shape_info = _PSD(shape, allow_singular=allow_singular)
+        logpdf = self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df,
+                              dim, shape_info.rank)
+        return np.exp(logpdf)
+
+    def logpdf(self, x, loc=None, shape=1, df=1):
+        """Log of the multivariate t-distribution probability density function.
+
+        Parameters
+        ----------
+        x : array_like
+            Points at which to evaluate the log of the probability density
+            function.
+        %(_mvt_doc_default_callparams)s
+
+        Returns
+        -------
+        logpdf : Log of the probability density function evaluated at `x`.
+
+        Examples
+        --------
+        >>> from scipy.stats import multivariate_t
+        >>> x = [0.4, 5]
+        >>> loc = [0, 1]
+        >>> shape = [[1, 0.1], [0.1, 1]]
+        >>> df = 7
+        >>> multivariate_t.logpdf(x, loc, shape, df)
+        array([-7.1859802])
+
+        See Also
+        --------
+        pdf : Probability density function.
+
+        """
+        dim, loc, shape, df = self._process_parameters(loc, shape, df)
+        x = self._process_quantiles(x, dim)
+        shape_info = _PSD(shape)
+        return self._logpdf(x, loc, shape_info.U, shape_info.log_pdet, df, dim,
+                            shape_info.rank)
+
+    def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):
+        """Utility method `pdf`, `logpdf` for parameters.
+
+        Parameters
+        ----------
+        x : ndarray
+            Points at which to evaluate the log of the probability density
+            function.
+        loc : ndarray
+            Location of the distribution.
+        prec_U : ndarray
+            A decomposition such that `np.dot(prec_U, prec_U.T)` is the inverse
+            of the shape matrix.
+        log_pdet : float
+            Logarithm of the determinant of the shape matrix.
+        df : float
+            Degrees of freedom of the distribution.
+        dim : int
+            Dimension of the quantiles x.
+        rank : int
+            Rank of the shape matrix.
+
+        Notes
+        -----
+        As this function does no argument checking, it should not be called
+        directly; use 'logpdf' instead.
+
+        """
+        if df == np.inf:
+            return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)
+
+        dev = x - loc
+        maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)
+
+        t = 0.5 * (df + dim)
+        A = gammaln(t)
+        B = gammaln(0.5 * df)
+        C = dim/2. * np.log(df * np.pi)
+        D = 0.5 * log_pdet
+        E = -t * np.log(1 + (1./df) * maha)
+
+        return _squeeze_output(A - B - C - D + E)
+
+    def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):
+        """Draw random samples from a multivariate t-distribution.
+
+        Parameters
+        ----------
+        %(_mvt_doc_default_callparams)s
+        size : integer, optional
+            Number of samples to draw (default 1).
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray or scalar
+            Random variates of size (`size`, `P`), where `P` is the
+            dimension of the random variable.
+
+        Examples
+        --------
+        >>> from scipy.stats import multivariate_t
+        >>> x = [0.4, 5]
+        >>> loc = [0, 1]
+        >>> shape = [[1, 0.1], [0.1, 1]]
+        >>> df = 7
+        >>> multivariate_t.rvs(loc, shape, df)
+        array([[0.93477495, 3.00408716]])
+
+        """
+        # For implementation details, see equation (3):
+        #
+        #    Hofert, "On Sampling from the Multivariatet Distribution", 2013
+        #     http://rjournal.github.io/archive/2013-2/hofert.pdf
+        #
+        dim, loc, shape, df = self._process_parameters(loc, shape, df)
+        if random_state is not None:
+            rng = check_random_state(random_state)
+        else:
+            rng = self._random_state
+
+        if np.isinf(df):
+            x = np.ones(size)
+        else:
+            x = rng.chisquare(df, size=size) / df
+
+        z = rng.multivariate_normal(np.zeros(dim), shape, size=size)
+        samples = loc + z / np.sqrt(x)[..., None]
+        return _squeeze_output(samples)
+
+    def _process_quantiles(self, x, dim):
+        """
+        Adjust quantiles array so that last axis labels the components of
+        each data point.
+        """
+        x = np.asarray(x, dtype=float)
+        if x.ndim == 0:
+            x = x[np.newaxis]
+        elif x.ndim == 1:
+            if dim == 1:
+                x = x[:, np.newaxis]
+            else:
+                x = x[np.newaxis, :]
+        return x
+
+    def _process_parameters(self, loc, shape, df):
+        """
+        Infer dimensionality from location array and shape matrix, handle
+        defaults, and ensure compatible dimensions.
+        """
+        if loc is None and shape is None:
+            loc = np.asarray(0, dtype=float)
+            shape = np.asarray(1, dtype=float)
+            dim = 1
+        elif loc is None:
+            shape = np.asarray(shape, dtype=float)
+            if shape.ndim < 2:
+                dim = 1
+            else:
+                dim = shape.shape[0]
+            loc = np.zeros(dim)
+        elif shape is None:
+            loc = np.asarray(loc, dtype=float)
+            dim = loc.size
+            shape = np.eye(dim)
+        else:
+            shape = np.asarray(shape, dtype=float)
+            loc = np.asarray(loc, dtype=float)
+            dim = loc.size
+
+        if dim == 1:
+            loc = loc.reshape(1)
+            shape = shape.reshape(1, 1)
+
+        if loc.ndim != 1 or loc.shape[0] != dim:
+            raise ValueError("Array 'loc' must be a vector of length %d." %
+                             dim)
+        if shape.ndim == 0:
+            shape = shape * np.eye(dim)
+        elif shape.ndim == 1:
+            shape = np.diag(shape)
+        elif shape.ndim == 2 and shape.shape != (dim, dim):
+            rows, cols = shape.shape
+            if rows != cols:
+                msg = ("Array 'cov' must be square if it is two dimensional,"
+                       " but cov.shape = %s." % str(shape.shape))
+            else:
+                msg = ("Dimension mismatch: array 'cov' is of shape %s,"
+                       " but 'loc' is a vector of length %d.")
+                msg = msg % (str(shape.shape), len(loc))
+            raise ValueError(msg)
+        elif shape.ndim > 2:
+            raise ValueError("Array 'cov' must be at most two-dimensional,"
+                             " but cov.ndim = %d" % shape.ndim)
+
+        # Process degrees of freedom.
+        if df is None:
+            df = 1
+        elif df <= 0:
+            raise ValueError("'df' must be greater than zero.")
+        elif np.isnan(df):
+            raise ValueError("'df' is 'nan' but must be greater than zero or 'np.inf'.")
+
+        return dim, loc, shape, df
+
+
+class multivariate_t_frozen(multi_rv_frozen):
+
+    def __init__(self, loc=None, shape=1, df=1, allow_singular=False,
+                 seed=None):
+        """Create a frozen multivariate t distribution.
+
+        Parameters
+        ----------
+        %(_mvt_doc_default_callparams)s
+
+        Examples
+        --------
+        >>> import numpy as np
+        >>> loc = np.zeros(3)
+        >>> shape = np.eye(3)
+        >>> df = 10
+        >>> dist = multivariate_t(loc, shape, df)
+        >>> dist.rvs()
+        array([[ 0.81412036, -1.53612361,  0.42199647]])
+        >>> dist.pdf([1, 1, 1])
+        array([0.01237803])
+
+        """
+        self._dist = multivariate_t_gen(seed)
+        dim, loc, shape, df = self._dist._process_parameters(loc, shape, df)
+        self.dim, self.loc, self.shape, self.df = dim, loc, shape, df
+        self.shape_info = _PSD(shape, allow_singular=allow_singular)
+
+    def logpdf(self, x):
+        x = self._dist._process_quantiles(x, self.dim)
+        U = self.shape_info.U
+        log_pdet = self.shape_info.log_pdet
+        return self._dist._logpdf(x, self.loc, U, log_pdet, self.df, self.dim,
+                                  self.shape_info.rank)
+
+    def pdf(self, x):
+        return np.exp(self.logpdf(x))
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(loc=self.loc,
+                              shape=self.shape,
+                              df=self.df,
+                              size=size,
+                              random_state=random_state)
+
+
+multivariate_t = multivariate_t_gen()
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# matrix_normal_gen and fill in default strings in class docstrings
+for name in ['logpdf', 'pdf', 'rvs']:
+    method = multivariate_t_gen.__dict__[name]
+    method_frozen = multivariate_t_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(method.__doc__,
+                                             mvt_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__, mvt_docdict_params)
+
+
+_mhg_doc_default_callparams = """\
+m : array_like
+    The number of each type of object in the population.
+    That is, :math:`m[i]` is the number of objects of
+    type :math:`i`.
+n : array_like
+    The number of samples taken from the population.
+"""
+
+_mhg_doc_callparams_note = """\
+`m` must be an array of positive integers. If the quantile
+:math:`i` contains values out of the range :math:`[0, m_i]`
+where :math:`m_i` is the number of objects of type :math:`i`
+in the population or if the parameters are inconsistent with one
+another (e.g. ``x.sum() != n``), methods return the appropriate
+value (e.g. ``0`` for ``pmf``). If `m` or `n` contain negative
+values, the result will contain ``nan`` there.
+"""
+
+_mhg_doc_frozen_callparams = ""
+
+_mhg_doc_frozen_callparams_note = """\
+See class definition for a detailed description of parameters."""
+
+mhg_docdict_params = {
+    '_doc_default_callparams': _mhg_doc_default_callparams,
+    '_doc_callparams_note': _mhg_doc_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+mhg_docdict_noparams = {
+    '_doc_default_callparams': _mhg_doc_frozen_callparams,
+    '_doc_callparams_note': _mhg_doc_frozen_callparams_note,
+    '_doc_random_state': _doc_random_state
+}
+
+
+class multivariate_hypergeom_gen(multi_rv_generic):
+    r"""A multivariate hypergeometric random variable.
+
+    Methods
+    -------
+    pmf(x, m, n)
+        Probability mass function.
+    logpmf(x, m, n)
+        Log of the probability mass function.
+    rvs(m, n, size=1, random_state=None)
+        Draw random samples from a multivariate hypergeometric
+        distribution.
+    mean(m, n)
+        Mean of the multivariate hypergeometric distribution.
+    var(m, n)
+        Variance of the multivariate hypergeometric distribution.
+    cov(m, n)
+        Compute the covariance matrix of the multivariate
+        hypergeometric distribution.
+
+    Parameters
+    ----------
+    %(_doc_default_callparams)s
+    %(_doc_random_state)s
+
+    Notes
+    -----
+    %(_doc_callparams_note)s
+
+    The probability mass function for `multivariate_hypergeom` is
+
+    .. math::
+
+        P(X_1 = x_1, X_2 = x_2, \ldots, X_k = x_k) = \frac{\binom{m_1}{x_1}
+        \binom{m_2}{x_2} \cdots \binom{m_k}{x_k}}{\binom{M}{n}}, \\ \quad
+        (x_1, x_2, \ldots, x_k) \in \mathbb{N}^k \text{ with }
+        \sum_{i=1}^k x_i = n
+
+    where :math:`m_i` are the number of objects of type :math:`i`, :math:`M`
+    is the total number of objects in the population (sum of all the
+    :math:`m_i`), and :math:`n` is the size of the sample to be taken
+    from the population.
+
+    .. versionadded:: 1.6.0
+
+    Examples
+    --------
+    To evaluate the probability mass function of the multivariate
+    hypergeometric distribution, with a dichotomous population of size
+    :math:`10` and :math:`20`, at a sample of size :math:`12` with
+    :math:`8` objects of the first type and :math:`4` objects of the
+    second type, use:
+
+    >>> from scipy.stats import multivariate_hypergeom
+    >>> multivariate_hypergeom.pmf(x=[8, 4], m=[10, 20], n=12)
+    0.0025207176631464523
+
+    The `multivariate_hypergeom` distribution is identical to the
+    corresponding `hypergeom` distribution (tiny numerical differences
+    notwithstanding) when only two types (good and bad) of objects
+    are present in the population as in the example above. Consider
+    another example for a comparison with the hypergeometric distribution:
+
+    >>> from scipy.stats import hypergeom
+    >>> multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
+    0.4395604395604395
+    >>> hypergeom.pmf(k=3, M=15, n=4, N=10)
+    0.43956043956044005
+
+    The functions ``pmf``, ``logpmf``, ``mean``, ``var``, ``cov``, and ``rvs``
+    support broadcasting, under the convention that the vector parameters
+    (``x``, ``m``, and ``n``) are interpreted as if each row along the last
+    axis is a single object. For instance, we can combine the previous two
+    calls to `multivariate_hypergeom` as
+
+    >>> multivariate_hypergeom.pmf(x=[[8, 4], [3, 1]], m=[[10, 20], [10, 5]],
+    ...                            n=[12, 4])
+    array([0.00252072, 0.43956044])
+
+    This broadcasting also works for ``cov``, where the output objects are
+    square matrices of size ``m.shape[-1]``. For example:
+
+    >>> multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
+    array([[[ 1.05, -1.05],
+            [-1.05,  1.05]],
+           [[ 1.56, -1.56],
+            [-1.56,  1.56]]])
+
+    That is, ``result[0]`` is equal to
+    ``multivariate_hypergeom.cov(m=[7, 9], n=8)`` and ``result[1]`` is equal
+    to ``multivariate_hypergeom.cov(m=[10, 15], n=12)``.
+
+    Alternatively, the object may be called (as a function) to fix the `m`
+    and `n` parameters, returning a "frozen" multivariate hypergeometric
+    random variable.
+
+    >>> rv = multivariate_hypergeom(m=[10, 20], n=12)
+    >>> rv.pmf(x=[8, 4])
+    0.0025207176631464523
+
+    See Also
+    --------
+    scipy.stats.hypergeom : The hypergeometric distribution.
+    scipy.stats.multinomial : The multinomial distribution.
+
+    References
+    ----------
+    .. [1] The Multivariate Hypergeometric Distribution,
+           http://www.randomservices.org/random/urn/MultiHypergeometric.html
+    .. [2] Thomas J. Sargent and John Stachurski, 2020,
+           Multivariate Hypergeometric Distribution
+           https://python.quantecon.org/_downloads/pdf/multi_hyper.pdf
+    """
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__, mhg_docdict_params)
+
+    def __call__(self, m, n, seed=None):
+        """Create a frozen multivariate_hypergeom distribution.
+
+        See `multivariate_hypergeom_frozen` for more information.
+        """
+        return multivariate_hypergeom_frozen(m, n, seed=seed)
+
+    def _process_parameters(self, m, n):
+        m = np.asarray(m)
+        n = np.asarray(n)
+        if m.size == 0:
+            m = m.astype(int)
+        if n.size == 0:
+            n = n.astype(int)
+        if not np.issubdtype(m.dtype, np.integer):
+            raise TypeError("'m' must an array of integers.")
+        if not np.issubdtype(n.dtype, np.integer):
+            raise TypeError("'n' must an array of integers.")
+        if m.ndim == 0:
+            raise ValueError("'m' must be an array with"
+                             " at least one dimension.")
+
+        # check for empty arrays
+        if m.size != 0:
+            n = n[..., np.newaxis]
+
+        m, n = np.broadcast_arrays(m, n)
+
+        # check for empty arrays
+        if m.size != 0:
+            n = n[..., 0]
+
+        mcond = m < 0
+
+        M = m.sum(axis=-1)
+
+        ncond = (n < 0) | (n > M)
+        return M, m, n, mcond, ncond, np.any(mcond, axis=-1) | ncond
+
+    def _process_quantiles(self, x, M, m, n):
+        x = np.asarray(x)
+        if not np.issubdtype(x.dtype, np.integer):
+            raise TypeError("'x' must an array of integers.")
+        if x.ndim == 0:
+            raise ValueError("'x' must be an array with"
+                             " at least one dimension.")
+        if not x.shape[-1] == m.shape[-1]:
+            raise ValueError(f"Size of each quantile must be size of 'm': "
+                             f"received {x.shape[-1]}, "
+                             f"but expected {m.shape[-1]}.")
+
+        # check for empty arrays
+        if m.size != 0:
+            n = n[..., np.newaxis]
+            M = M[..., np.newaxis]
+
+        x, m, n, M = np.broadcast_arrays(x, m, n, M)
+
+        # check for empty arrays
+        if m.size != 0:
+            n, M = n[..., 0], M[..., 0]
+
+        xcond = (x < 0) | (x > m)
+        return (x, M, m, n, xcond,
+                np.any(xcond, axis=-1) | (x.sum(axis=-1) != n))
+
+    def _checkresult(self, result, cond, bad_value):
+        result = np.asarray(result)
+        if cond.ndim != 0:
+            result[cond] = bad_value
+        elif cond:
+            return bad_value
+        if result.ndim == 0:
+            return result[()]
+        return result
+
+    def _logpmf(self, x, M, m, n, mxcond, ncond):
+        # This equation of the pmf comes from the relation,
+        # n combine r = beta(n+1, 1) / beta(r+1, n-r+1)
+        num = np.zeros_like(m, dtype=np.float_)
+        den = np.zeros_like(n, dtype=np.float_)
+        m, x = m[~mxcond], x[~mxcond]
+        M, n = M[~ncond], n[~ncond]
+        num[~mxcond] = (betaln(m+1, 1) - betaln(x+1, m-x+1))
+        den[~ncond] = (betaln(M+1, 1) - betaln(n+1, M-n+1))
+        num[mxcond] = np.nan
+        den[ncond] = np.nan
+        num = num.sum(axis=-1)
+        return num - den
+
+    def logpmf(self, x, m, n):
+        """Log of the multivariate hypergeometric probability mass function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        logpmf : ndarray or scalar
+            Log of the probability mass function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+        """
+        M, m, n, mcond, ncond, mncond = self._process_parameters(m, n)
+        (x, M, m, n, xcond,
+         xcond_reduced) = self._process_quantiles(x, M, m, n)
+        mxcond = mcond | xcond
+        ncond = ncond | np.zeros(n.shape, dtype=np.bool_)
+
+        result = self._logpmf(x, M, m, n, mxcond, ncond)
+
+        # replace values for which x was out of the domain; broadcast
+        # xcond to the right shape
+        xcond_ = xcond_reduced | np.zeros(mncond.shape, dtype=np.bool_)
+        result = self._checkresult(result, xcond_, np.NINF)
+
+        # replace values bad for n or m; broadcast
+        # mncond to the right shape
+        mncond_ = mncond | np.zeros(xcond_reduced.shape, dtype=np.bool_)
+        return self._checkresult(result, mncond_, np.nan)
+
+    def pmf(self, x, m, n):
+        """Multivariate hypergeometric probability mass function.
+
+        Parameters
+        ----------
+        x : array_like
+            Quantiles, with the last axis of `x` denoting the components.
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        pmf : ndarray or scalar
+            Probability density function evaluated at `x`
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+        """
+        out = np.exp(self.logpmf(x, m, n))
+        return out
+
+    def mean(self, m, n):
+        """Mean of the multivariate hypergeometric distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        mean : array_like or scalar
+            The mean of the distribution
+        """
+        M, m, n, _, _, mncond = self._process_parameters(m, n)
+        # check for empty arrays
+        if m.size != 0:
+            M, n = M[..., np.newaxis], n[..., np.newaxis]
+        cond = (M == 0)
+        M = np.ma.masked_array(M, mask=cond)
+        mu = n*(m/M)
+        if m.size != 0:
+            mncond = (mncond[..., np.newaxis] |
+                      np.zeros(mu.shape, dtype=np.bool_))
+        return self._checkresult(mu, mncond, np.nan)
+
+    def var(self, m, n):
+        """Variance of the multivariate hypergeometric distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        array_like
+            The variances of the components of the distribution.  This is
+            the diagonal of the covariance matrix of the distribution
+        """
+        M, m, n, _, _, mncond = self._process_parameters(m, n)
+        # check for empty arrays
+        if m.size != 0:
+            M, n = M[..., np.newaxis], n[..., np.newaxis]
+        cond = (M == 0) & (M-1 == 0)
+        M = np.ma.masked_array(M, mask=cond)
+        output = n * m/M * (M-m)/M * (M-n)/(M-1)
+        if m.size != 0:
+            mncond = (mncond[..., np.newaxis] |
+                      np.zeros(output.shape, dtype=np.bool_))
+        return self._checkresult(output, mncond, np.nan)
+
+    def cov(self, m, n):
+        """Covariance matrix of the multivariate hypergeometric distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+
+        Returns
+        -------
+        cov : array_like
+            The covariance matrix of the distribution
+        """
+        # see [1]_ for the formula and [2]_ for implementation
+        # cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)
+        M, m, n, _, _, mncond = self._process_parameters(m, n)
+        # check for empty arrays
+        if m.size != 0:
+            M = M[..., np.newaxis, np.newaxis]
+            n = n[..., np.newaxis, np.newaxis]
+        cond = (M == 0) & (M-1 == 0)
+        M = np.ma.masked_array(M, mask=cond)
+        output = (-n * (M-n)/(M-1) *
+                  np.einsum("...i,...j->...ij", m, m) / (M**2))
+        # check for empty arrays
+        if m.size != 0:
+            M, n = M[..., 0, 0], n[..., 0, 0]
+            cond = cond[..., 0, 0]
+        dim = m.shape[-1]
+        # diagonal entries need to be computed differently
+        for i in range(dim):
+            output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))
+            output[..., i, i] = output[..., i, i] / (M-1)
+            output[..., i, i] = output[..., i, i] / (M**2)
+        if m.size != 0:
+            mncond = (mncond[..., np.newaxis, np.newaxis] |
+                      np.zeros(output.shape, dtype=np.bool_))
+        return self._checkresult(output, mncond, np.nan)
+
+    def rvs(self, m, n, size=None, random_state=None):
+        """Draw random samples from a multivariate hypergeometric distribution.
+
+        Parameters
+        ----------
+        %(_doc_default_callparams)s
+        size : integer or iterable of integers, optional
+            Number of samples to draw. Default is ``None``, in which case a
+            single variate is returned as an array with shape ``m.shape``.
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : array_like
+            Random variates of shape ``size`` or ``m.shape``
+            (if ``size=None``).
+
+        Notes
+        -----
+        %(_doc_callparams_note)s
+
+        Also note that NumPy's `multivariate_hypergeometric` sampler is not
+        used as it doesn't support broadcasting.
+        """
+        M, m, n, _, _, _ = self._process_parameters(m, n)
+
+        random_state = self._get_random_state(random_state)
+
+        if size is not None and isinstance(size, int):
+            size = (size, )
+
+        if size is None:
+            rvs = np.empty(m.shape, dtype=m.dtype)
+        else:
+            rvs = np.empty(size + (m.shape[-1], ), dtype=m.dtype)
+        rem = M
+
+        # This sampler has been taken from numpy gh-13794
+        # https://github.com/numpy/numpy/pull/13794
+        for c in range(m.shape[-1] - 1):
+            rem = rem - m[..., c]
+            n0mask = n == 0
+            rvs[..., c] = (~n0mask *
+                           random_state.hypergeometric(m[..., c],
+                                                       rem + n0mask,
+                                                       n + n0mask,
+                                                       size=size))
+            n = n - rvs[..., c]
+        rvs[..., m.shape[-1] - 1] = n
+
+        return rvs
+
+
+multivariate_hypergeom = multivariate_hypergeom_gen()
+
+
+class multivariate_hypergeom_frozen(multi_rv_frozen):
+    def __init__(self, m, n, seed=None):
+        self._dist = multivariate_hypergeom_gen(seed)
+        (self.M, self.m, self.n,
+         self.mcond, self.ncond,
+         self.mncond) = self._dist._process_parameters(m, n)
+
+        # monkey patch self._dist
+        def _process_parameters(m, n):
+            return (self.M, self.m, self.n,
+                    self.mcond, self.ncond,
+                    self.mncond)
+        self._dist._process_parameters = _process_parameters
+
+    def logpmf(self, x):
+        return self._dist.logpmf(x, self.m, self.n)
+
+    def pmf(self, x):
+        return self._dist.pmf(x, self.m, self.n)
+
+    def mean(self):
+        return self._dist.mean(self.m, self.n)
+
+    def var(self):
+        return self._dist.var(self.m, self.n)
+
+    def cov(self):
+        return self._dist.cov(self.m, self.n)
+
+    def rvs(self, size=1, random_state=None):
+        return self._dist.rvs(self.m, self.n,
+                              size=size,
+                              random_state=random_state)
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# multivariate_hypergeom and fill in default strings in class docstrings
+for name in ['logpmf', 'pmf', 'mean', 'var', 'cov', 'rvs']:
+    method = multivariate_hypergeom_gen.__dict__[name]
+    method_frozen = multivariate_hypergeom_frozen.__dict__[name]
+    method_frozen.__doc__ = doccer.docformat(
+        method.__doc__, mhg_docdict_noparams)
+    method.__doc__ = doccer.docformat(method.__doc__,
+                                      mhg_docdict_params)
+
+
+class random_table_gen(multi_rv_generic):
+    r"""Contingency tables from independent samples with fixed marginal sums.
+
+    This is the distribution of random tables with given row and column vector
+    sums. This distribution represents the set of random tables under the null
+    hypothesis that rows and columns are independent. It is used in hypothesis
+    tests of independence.
+
+    Because of assumed independence, the expected frequency of each table
+    element can be computed from the row and column sums, so that the
+    distribution is completely determined by these two vectors.
+
+    Methods
+    -------
+    logpmf(x)
+        Log-probability of table `x` to occur in the distribution.
+    pmf(x)
+        Probability of table `x` to occur in the distribution.
+    mean(row, col)
+        Mean table.
+    rvs(row, col, size=None, method=None, random_state=None)
+        Draw random tables with given row and column vector sums.
+
+    Parameters
+    ----------
+    %(_doc_row_col)s
+    %(_doc_random_state)s
+
+    Notes
+    -----
+    %(_doc_row_col_note)s
+
+    Random elements from the distribution are generated either with Boyett's
+    [1]_ or Patefield's algorithm [2]_. Boyett's algorithm has
+    O(N) time and space complexity, where N is the total sum of entries in the
+    table. Patefield's algorithm has O(K x log(N)) time complexity, where K is
+    the number of cells in the table and requires only a small constant work
+    space. By default, the `rvs` method selects the fastest algorithm based on
+    the input, but you can specify the algorithm with the keyword `method`.
+    Allowed values are "boyett" and "patefield".
+
+    .. versionadded:: 1.10.0
+
+    Examples
+    --------
+    >>> from scipy.stats import random_table
+
+    >>> row = [1, 5]
+    >>> col = [2, 3, 1]
+    >>> random_table.mean(row, col)
+    array([[0.33333333, 0.5       , 0.16666667],
+           [1.66666667, 2.5       , 0.83333333]])
+
+    Alternatively, the object may be called (as a function) to fix the row
+    and column vector sums, returning a "frozen" distribution.
+
+    >>> dist = random_table(row, col)
+    >>> dist.rvs(random_state=123)
+    array([[1., 0., 0.],
+           [1., 3., 1.]])
+
+    References
+    ----------
+    .. [1] J. Boyett, AS 144 Appl. Statist. 28 (1979) 329-332
+    .. [2] W.M. Patefield, AS 159 Appl. Statist. 30 (1981) 91-97
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+
+    def __call__(self, row, col, *, seed=None):
+        """Create a frozen distribution of tables with given marginals.
+
+        See `random_table_frozen` for more information.
+        """
+        return random_table_frozen(row, col, seed=seed)
+
+    def logpmf(self, x, row, col):
+        """Log-probability of table to occur in the distribution.
+
+        Parameters
+        ----------
+        %(_doc_x)s
+        %(_doc_row_col)s
+
+        Returns
+        -------
+        logpmf : ndarray or scalar
+            Log of the probability mass function evaluated at `x`.
+
+        Notes
+        -----
+        %(_doc_row_col_note)s
+
+        If row and column marginals of `x` do not match `row` and `col`,
+        negative infinity is returned.
+
+        Examples
+        --------
+        >>> from scipy.stats import random_table
+        >>> import numpy as np
+
+        >>> x = [[1, 5, 1], [2, 3, 1]]
+        >>> row = np.sum(x, axis=1)
+        >>> col = np.sum(x, axis=0)
+        >>> random_table.logpmf(x, row, col)
+        -1.6306401200847027
+
+        Alternatively, the object may be called (as a function) to fix the row
+        and column vector sums, returning a "frozen" distribution.
+
+        >>> d = random_table(row, col)
+        >>> d.logpmf(x)
+        -1.6306401200847027
+        """
+        r, c, n = self._process_parameters(row, col)
+        x = np.asarray(x)
+
+        if x.ndim < 2:
+            raise ValueError("`x` must be at least two-dimensional")
+
+        dtype_is_int = np.issubdtype(x.dtype, np.integer)
+        with np.errstate(invalid='ignore'):
+            if not dtype_is_int and not np.all(x.astype(int) == x):
+                raise ValueError("`x` must contain only integral values")
+
+        # x does not contain NaN if we arrive here
+        if np.any(x < 0):
+            raise ValueError("`x` must contain only non-negative values")
+
+        r2 = np.sum(x, axis=-1)
+        c2 = np.sum(x, axis=-2)
+
+        if r2.shape[-1] != len(r):
+            raise ValueError("shape of `x` must agree with `row`")
+
+        if c2.shape[-1] != len(c):
+            raise ValueError("shape of `x` must agree with `col`")
+
+        res = np.empty(x.shape[:-2])
+
+        mask = np.all(r2 == r, axis=-1) & np.all(c2 == c, axis=-1)
+
+        def lnfac(x):
+            return gammaln(x + 1)
+
+        res[mask] = (np.sum(lnfac(r), axis=-1) + np.sum(lnfac(c), axis=-1)
+                     - lnfac(n) - np.sum(lnfac(x[mask]), axis=(-1, -2)))
+        res[~mask] = -np.inf
+
+        return res[()]
+
+    def pmf(self, x, row, col):
+        """Probability of table to occur in the distribution.
+
+        Parameters
+        ----------
+        %(_doc_x)s
+        %(_doc_row_col)s
+
+        Returns
+        -------
+        pmf : ndarray or scalar
+            Probability mass function evaluated at `x`.
+
+        Notes
+        -----
+        %(_doc_row_col_note)s
+
+        If row and column marginals of `x` do not match `row` and `col`,
+        zero is returned.
+
+        Examples
+        --------
+        >>> from scipy.stats import random_table
+        >>> import numpy as np
+
+        >>> x = [[1, 5, 1], [2, 3, 1]]
+        >>> row = np.sum(x, axis=1)
+        >>> col = np.sum(x, axis=0)
+        >>> random_table.pmf(x, row, col)
+        0.19580419580419592
+
+        Alternatively, the object may be called (as a function) to fix the row
+        and column vector sums, returning a "frozen" distribution.
+
+        >>> d = random_table(row, col)
+        >>> d.pmf(x)
+        0.19580419580419592
+        """
+        return np.exp(self.logpmf(x, row, col))
+
+    def mean(self, row, col):
+        """Mean of distribution of conditional tables.
+        %(_doc_mean_params)s
+
+        Returns
+        -------
+        mean: ndarray
+            Mean of the distribution.
+
+        Notes
+        -----
+        %(_doc_row_col_note)s
+
+        Examples
+        --------
+        >>> from scipy.stats import random_table
+
+        >>> row = [1, 5]
+        >>> col = [2, 3, 1]
+        >>> random_table.mean(row, col)
+        array([[0.33333333, 0.5       , 0.16666667],
+               [1.66666667, 2.5       , 0.83333333]])
+
+        Alternatively, the object may be called (as a function) to fix the row
+        and column vector sums, returning a "frozen" distribution.
+
+        >>> d = random_table(row, col)
+        >>> d.mean()
+        array([[0.33333333, 0.5       , 0.16666667],
+               [1.66666667, 2.5       , 0.83333333]])
+        """
+        r, c, n = self._process_parameters(row, col)
+        return np.outer(r, c) / n
+
+    def rvs(self, row, col, *, size=None, method=None, random_state=None):
+        """Draw random tables with fixed column and row marginals.
+
+        Parameters
+        ----------
+        %(_doc_row_col)s
+        size : integer, optional
+            Number of samples to draw (default 1).
+        method : str, optional
+            Which method to use, "boyett" or "patefield". If None (default),
+            selects the fastest method for this input.
+        %(_doc_random_state)s
+
+        Returns
+        -------
+        rvs : ndarray
+            Random 2D tables of shape (`size`, `len(row)`, `len(col)`).
+
+        Notes
+        -----
+        %(_doc_row_col_note)s
+
+        Examples
+        --------
+        >>> from scipy.stats import random_table
+
+        >>> row = [1, 5]
+        >>> col = [2, 3, 1]
+        >>> random_table.rvs(row, col, random_state=123)
+        array([[1., 0., 0.],
+               [1., 3., 1.]])
+
+        Alternatively, the object may be called (as a function) to fix the row
+        and column vector sums, returning a "frozen" distribution.
+
+        >>> d = random_table(row, col)
+        >>> d.rvs(random_state=123)
+        array([[1., 0., 0.],
+               [1., 3., 1.]])
+        """
+        r, c, n = self._process_parameters(row, col)
+        size, shape = self._process_size_shape(size, r, c)
+
+        random_state = self._get_random_state(random_state)
+        meth = self._process_rvs_method(method, r, c, n)
+
+        return meth(r, c, n, size, random_state).reshape(shape)
+
+    @staticmethod
+    def _process_parameters(row, col):
+        """
+        Check that row and column vectors are one-dimensional, that they do
+        not contain negative or non-integer entries, and that the sums over
+        both vectors are equal.
+        """
+        r = np.array(row, dtype=np.int64, copy=True)
+        c = np.array(col, dtype=np.int64, copy=True)
+
+        if np.ndim(r) != 1:
+            raise ValueError("`row` must be one-dimensional")
+        if np.ndim(c) != 1:
+            raise ValueError("`col` must be one-dimensional")
+
+        if np.any(r < 0):
+            raise ValueError("each element of `row` must be non-negative")
+        if np.any(c < 0):
+            raise ValueError("each element of `col` must be non-negative")
+
+        n = np.sum(r)
+        if n != np.sum(c):
+            raise ValueError("sums over `row` and `col` must be equal")
+
+        if not np.all(r == np.asarray(row)):
+            raise ValueError("each element of `row` must be an integer")
+        if not np.all(c == np.asarray(col)):
+            raise ValueError("each element of `col` must be an integer")
+
+        return r, c, n
+
+    @staticmethod
+    def _process_size_shape(size, r, c):
+        """
+        Compute the number of samples to be drawn and the shape of the output
+        """
+        shape = (len(r), len(c))
+
+        if size is None:
+            return 1, shape
+
+        size = np.atleast_1d(size)
+        if not np.issubdtype(size.dtype, np.integer) or np.any(size < 0):
+            raise ValueError("`size` must be a non-negative integer or `None`")
+
+        return np.prod(size), tuple(size) + shape
+
+    @classmethod
+    def _process_rvs_method(cls, method, r, c, n):
+        known_methods = {
+            None: cls._rvs_select(r, c, n),
+            "boyett": cls._rvs_boyett,
+            "patefield": cls._rvs_patefield,
+        }
+        try:
+            return known_methods[method]
+        except KeyError:
+            raise ValueError(f"'{method}' not recognized, "
+                             f"must be one of {set(known_methods)}")
+
+    @classmethod
+    def _rvs_select(cls, r, c, n):
+        fac = 1.0  # benchmarks show that this value is about 1
+        k = len(r) * len(c)  # number of cells
+        # n + 1 guards against failure if n == 0
+        if n > fac * np.log(n + 1) * k:
+            return cls._rvs_patefield
+        return cls._rvs_boyett
+
+    @staticmethod
+    def _rvs_boyett(row, col, ntot, size, random_state):
+        return _rcont.rvs_rcont1(row, col, ntot, size, random_state)
+
+    @staticmethod
+    def _rvs_patefield(row, col, ntot, size, random_state):
+        return _rcont.rvs_rcont2(row, col, ntot, size, random_state)
+
+
+random_table = random_table_gen()
+
+
+class random_table_frozen(multi_rv_frozen):
+    def __init__(self, row, col, *, seed=None):
+        self._dist = random_table_gen(seed)
+        self._params = self._dist._process_parameters(row, col)
+
+        # monkey patch self._dist
+        def _process_parameters(r, c):
+            return self._params
+        self._dist._process_parameters = _process_parameters
+
+    def logpmf(self, x):
+        return self._dist.logpmf(x, None, None)
+
+    def pmf(self, x):
+        return self._dist.pmf(x, None, None)
+
+    def mean(self):
+        return self._dist.mean(None, None)
+
+    def rvs(self, size=None, method=None, random_state=None):
+        # optimisations are possible here
+        return self._dist.rvs(None, None, size=size, method=method,
+                              random_state=random_state)
+
+
+_ctab_doc_row_col = """\
+row : array_like
+    Sum of table entries in each row.
+col : array_like
+    Sum of table entries in each column."""
+
+_ctab_doc_x = """\
+x : array-like
+   Two-dimensional table of non-negative integers, or a
+   multi-dimensional array with the last two dimensions
+   corresponding with the tables."""
+
+_ctab_doc_row_col_note = """\
+The row and column vectors must be one-dimensional, not empty,
+and each sum up to the same value. They cannot contain negative
+or noninteger entries."""
+
+_ctab_doc_mean_params = f"""
+Parameters
+----------
+{_ctab_doc_row_col}"""
+
+_ctab_doc_row_col_note_frozen = """\
+See class definition for a detailed description of parameters."""
+
+_ctab_docdict = {
+    "_doc_random_state": _doc_random_state,
+    "_doc_row_col": _ctab_doc_row_col,
+    "_doc_x": _ctab_doc_x,
+    "_doc_mean_params": _ctab_doc_mean_params,
+    "_doc_row_col_note": _ctab_doc_row_col_note,
+}
+
+_ctab_docdict_frozen = _ctab_docdict.copy()
+_ctab_docdict_frozen.update({
+    "_doc_row_col": "",
+    "_doc_mean_params": "",
+    "_doc_row_col_note": _ctab_doc_row_col_note_frozen,
+})
+
+
+def _docfill(obj, docdict, template=None):
+    obj.__doc__ = doccer.docformat(template or obj.__doc__, docdict)
+
+
+# Set frozen generator docstrings from corresponding docstrings in
+# random_table and fill in default strings in class docstrings
+_docfill(random_table_gen, _ctab_docdict)
+for name in ['logpmf', 'pmf', 'mean', 'rvs']:
+    method = random_table_gen.__dict__[name]
+    method_frozen = random_table_frozen.__dict__[name]
+    _docfill(method_frozen, _ctab_docdict_frozen, method.__doc__)
+    _docfill(method, _ctab_docdict)
+
+
+class uniform_direction_gen(multi_rv_generic):
+    r"""A vector-valued uniform direction.
+
+    Return a random direction (unit vector). The `dim` keyword specifies
+    the dimensionality of the space.
+
+    Methods
+    -------
+    rvs(dim=None, size=1, random_state=None)
+        Draw random directions.
+
+    Parameters
+    ----------
+    dim : scalar
+        Dimension of directions.
+    seed : {None, int, `numpy.random.Generator`,
+            `numpy.random.RandomState`}, optional
+
+        Used for drawing random variates.
+        If `seed` is `None`, the `~np.random.RandomState` singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used, seeded
+        with seed.
+        If `seed` is already a ``RandomState`` or ``Generator`` instance,
+        then that object is used.
+        Default is `None`.
+
+    Notes
+    -----
+    This distribution generates unit vectors uniformly distributed on
+    the surface of a hypersphere. These can be interpreted as random
+    directions.
+    For example, if `dim` is 3, 3D vectors from the surface of :math:`S^2`
+    will be sampled.
+
+    References
+    ----------
+    .. [1] Marsaglia, G. (1972). "Choosing a Point from the Surface of a
+           Sphere". Annals of Mathematical Statistics. 43 (2): 645-646.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import uniform_direction
+    >>> x = uniform_direction.rvs(3)
+    >>> np.linalg.norm(x)
+    1.
+
+    This generates one random direction, a vector on the surface of
+    :math:`S^2`.
+
+    Alternatively, the object may be called (as a function) to return a frozen
+    distribution with fixed `dim` parameter. Here,
+    we create a `uniform_direction` with ``dim=3`` and draw 5 observations.
+    The samples are then arranged in an array of shape 5x3.
+
+    >>> rng = np.random.default_rng()
+    >>> uniform_sphere_dist = uniform_direction(3)
+    >>> unit_vectors = uniform_sphere_dist.rvs(5, random_state=rng)
+    >>> unit_vectors
+    array([[ 0.56688642, -0.1332634 , -0.81294566],
+           [-0.427126  , -0.74779278,  0.50830044],
+           [ 0.3793989 ,  0.92346629,  0.05715323],
+           [ 0.36428383, -0.92449076, -0.11231259],
+           [-0.27733285,  0.94410968, -0.17816678]])
+    """
+
+    def __init__(self, seed=None):
+        super().__init__(seed)
+        self.__doc__ = doccer.docformat(self.__doc__)
+
+    def __call__(self, dim=None, seed=None):
+        """Create a frozen n-dimensional uniform direction distribution.
+
+        See `uniform_direction` for more information.
+        """
+        return uniform_direction_frozen(dim, seed=seed)
+
+    def _process_parameters(self, dim):
+        """Dimension N must be specified; it cannot be inferred."""
+        if dim is None or not np.isscalar(dim) or dim < 1 or dim != int(dim):
+            raise ValueError("Dimension of vector must be specified, "
+                             "and must be an integer greater than 0.")
+
+        return int(dim)
+
+    def rvs(self, dim, size=None, random_state=None):
+        """Draw random samples from S(N-1).
+
+        Parameters
+        ----------
+        dim : integer
+            Dimension of space (N).
+        size : int or tuple of ints, optional
+            Given a shape of, for example, (m,n,k), m*n*k samples are
+            generated, and packed in an m-by-n-by-k arrangement.
+            Because each sample is N-dimensional, the output shape
+            is (m,n,k,N). If no shape is specified, a single (N-D)
+            sample is returned.
+        random_state : {None, int, `numpy.random.Generator`,
+                        `numpy.random.RandomState`}, optional
+
+            Pseudorandom number generator state used to generate resamples.
+
+            If `random_state` is ``None`` (or `np.random`), the
+            `numpy.random.RandomState` singleton is used.
+            If `random_state` is an int, a new ``RandomState`` instance is
+            used, seeded with `random_state`.
+            If `random_state` is already a ``Generator`` or ``RandomState``
+            instance then that instance is used.
+
+        Returns
+        -------
+        rvs : ndarray
+            Random direction vectors
+
+        """
+        random_state = self._get_random_state(random_state)
+        if size is None:
+            size = np.array([], dtype=int)
+        size = np.atleast_1d(size)
+
+        dim = self._process_parameters(dim)
+
+        samples = _sample_uniform_direction(dim, size, random_state)
+        return samples
+
+
+uniform_direction = uniform_direction_gen()
+
+
+class uniform_direction_frozen(multi_rv_frozen):
+    def __init__(self, dim=None, seed=None):
+        """Create a frozen n-dimensional uniform direction distribution.
+
+        Parameters
+        ----------
+        dim : int
+            Dimension of matrices
+        seed : {None, int, `numpy.random.Generator`,
+                `numpy.random.RandomState`}, optional
+
+            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+            singleton is used.
+            If `seed` is an int, a new ``RandomState`` instance is used,
+            seeded with `seed`.
+            If `seed` is already a ``Generator`` or ``RandomState`` instance
+            then that instance is used.
+
+        Examples
+        --------
+        >>> from scipy.stats import uniform_direction
+        >>> x = uniform_direction(3)
+        >>> x.rvs()
+
+        """
+        self._dist = uniform_direction_gen(seed)
+        self.dim = self._dist._process_parameters(dim)
+
+    def rvs(self, size=None, random_state=None):
+        return self._dist.rvs(self.dim, size, random_state)
+
+
+def _sample_uniform_direction(dim, size, random_state):
+    """
+    Private method to generate uniform directions
+    Reference: Marsaglia, G. (1972). "Choosing a Point from the Surface of a
+               Sphere". Annals of Mathematical Statistics. 43 (2): 645-646.
+    """
+    samples_shape = np.append(size, dim)
+    samples = random_state.standard_normal(samples_shape)
+    samples /= np.linalg.norm(samples, axis=-1, keepdims=True)
+    return samples
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_odds_ratio.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_odds_ratio.py
new file mode 100644
index 00000000..17dacbc7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_odds_ratio.py
@@ -0,0 +1,465 @@
+
+import numpy as np
+
+from scipy.special import ndtri
+from scipy.optimize import brentq
+from ._discrete_distns import nchypergeom_fisher
+from ._common import ConfidenceInterval
+
+
+def _sample_odds_ratio(table):
+    """
+    Given a table [[a, b], [c, d]], compute a*d/(b*c).
+
+    Return nan if the numerator and denominator are 0.
+    Return inf if just the denominator is 0.
+    """
+    # table must be a 2x2 numpy array.
+    if table[1, 0] > 0 and table[0, 1] > 0:
+        oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1])
+    elif table[0, 0] == 0 or table[1, 1] == 0:
+        oddsratio = np.nan
+    else:
+        oddsratio = np.inf
+    return oddsratio
+
+
+def _solve(func):
+    """
+    Solve func(nc) = 0.  func must be an increasing function.
+    """
+    # We could just as well call the variable `x` instead of `nc`, but we
+    # always call this function with functions for which nc (the noncentrality
+    # parameter) is the variable for which we are solving.
+    nc = 1.0
+    value = func(nc)
+    if value == 0:
+        return nc
+
+    # Multiplicative factor by which to increase or decrease nc when
+    # searching for a bracketing interval.
+    factor = 2.0
+    # Find a bracketing interval.
+    if value > 0:
+        nc /= factor
+        while func(nc) > 0:
+            nc /= factor
+        lo = nc
+        hi = factor*nc
+    else:
+        nc *= factor
+        while func(nc) < 0:
+            nc *= factor
+        lo = nc/factor
+        hi = nc
+
+    # lo and hi bracket the solution for nc.
+    nc = brentq(func, lo, hi, xtol=1e-13)
+    return nc
+
+
+def _nc_hypergeom_mean_inverse(x, M, n, N):
+    """
+    For the given noncentral hypergeometric parameters x, M, n,and N
+    (table[0,0], total, row 0 sum and column 0 sum, resp., of a 2x2
+    contingency table), find the noncentrality parameter of Fisher's
+    noncentral hypergeometric distribution whose mean is x.
+    """
+    nc = _solve(lambda nc: nchypergeom_fisher.mean(M, n, N, nc) - x)
+    return nc
+
+
+def _hypergeom_params_from_table(table):
+    # The notation M, n and N is consistent with stats.hypergeom and
+    # stats.nchypergeom_fisher.
+    x = table[0, 0]
+    M = table.sum()
+    n = table[0].sum()
+    N = table[:, 0].sum()
+    return x, M, n, N
+
+
+def _ci_upper(table, alpha):
+    """
+    Compute the upper end of the confidence interval.
+    """
+    if _sample_odds_ratio(table) == np.inf:
+        return np.inf
+
+    x, M, n, N = _hypergeom_params_from_table(table)
+
+    # nchypergeom_fisher.cdf is a decreasing function of nc, so we negate
+    # it in the lambda expression.
+    nc = _solve(lambda nc: -nchypergeom_fisher.cdf(x, M, n, N, nc) + alpha)
+    return nc
+
+
+def _ci_lower(table, alpha):
+    """
+    Compute the lower end of the confidence interval.
+    """
+    if _sample_odds_ratio(table) == 0:
+        return 0
+
+    x, M, n, N = _hypergeom_params_from_table(table)
+
+    nc = _solve(lambda nc: nchypergeom_fisher.sf(x - 1, M, n, N, nc) - alpha)
+    return nc
+
+
+def _conditional_oddsratio(table):
+    """
+    Conditional MLE of the odds ratio for the 2x2 contingency table.
+    """
+    x, M, n, N = _hypergeom_params_from_table(table)
+    # Get the bounds of the support.  The support of the noncentral
+    # hypergeometric distribution with parameters M, n, and N is the same
+    # for all values of the noncentrality parameter, so we can use 1 here.
+    lo, hi = nchypergeom_fisher.support(M, n, N, 1)
+
+    # Check if x is at one of the extremes of the support.  If so, we know
+    # the odds ratio is either 0 or inf.
+    if x == lo:
+        # x is at the low end of the support.
+        return 0
+    if x == hi:
+        # x is at the high end of the support.
+        return np.inf
+
+    nc = _nc_hypergeom_mean_inverse(x, M, n, N)
+    return nc
+
+
+def _conditional_oddsratio_ci(table, confidence_level=0.95,
+                              alternative='two-sided'):
+    """
+    Conditional exact confidence interval for the odds ratio.
+    """
+    if alternative == 'two-sided':
+        alpha = 0.5*(1 - confidence_level)
+        lower = _ci_lower(table, alpha)
+        upper = _ci_upper(table, alpha)
+    elif alternative == 'less':
+        lower = 0.0
+        upper = _ci_upper(table, 1 - confidence_level)
+    else:
+        # alternative == 'greater'
+        lower = _ci_lower(table, 1 - confidence_level)
+        upper = np.inf
+
+    return lower, upper
+
+
+def _sample_odds_ratio_ci(table, confidence_level=0.95,
+                          alternative='two-sided'):
+    oddsratio = _sample_odds_ratio(table)
+    log_or = np.log(oddsratio)
+    se = np.sqrt((1/table).sum())
+    if alternative == 'less':
+        z = ndtri(confidence_level)
+        loglow = -np.inf
+        loghigh = log_or + z*se
+    elif alternative == 'greater':
+        z = ndtri(confidence_level)
+        loglow = log_or - z*se
+        loghigh = np.inf
+    else:
+        # alternative is 'two-sided'
+        z = ndtri(0.5*confidence_level + 0.5)
+        loglow = log_or - z*se
+        loghigh = log_or + z*se
+
+    return np.exp(loglow), np.exp(loghigh)
+
+
+class OddsRatioResult:
+    """
+    Result of `scipy.stats.contingency.odds_ratio`.  See the
+    docstring for `odds_ratio` for more details.
+
+    Attributes
+    ----------
+    statistic : float
+        The computed odds ratio.
+
+        * If `kind` is ``'sample'``, this is sample (or unconditional)
+          estimate, given by
+          ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
+        * If `kind` is ``'conditional'``, this is the conditional
+          maximum likelihood estimate for the odds ratio. It is
+          the noncentrality parameter of Fisher's noncentral
+          hypergeometric distribution with the same hypergeometric
+          parameters as `table` and whose mean is ``table[0, 0]``.
+
+    Methods
+    -------
+    confidence_interval :
+        Confidence interval for the odds ratio.
+    """
+
+    def __init__(self, _table, _kind, statistic):
+        # for now, no need to make _table and _kind public, since this sort of
+        # information is returned in very few `scipy.stats` results
+        self._table = _table
+        self._kind = _kind
+        self.statistic = statistic
+
+    def __repr__(self):
+        return f"OddsRatioResult(statistic={self.statistic})"
+
+    def confidence_interval(self, confidence_level=0.95,
+                            alternative='two-sided'):
+        """
+        Confidence interval for the odds ratio.
+
+        Parameters
+        ----------
+        confidence_level: float
+            Desired confidence level for the confidence interval.
+            The value must be given as a fraction between 0 and 1.
+            Default is 0.95 (meaning 95%).
+
+        alternative : {'two-sided', 'less', 'greater'}, optional
+            The alternative hypothesis of the hypothesis test to which the
+            confidence interval corresponds. That is, suppose the null
+            hypothesis is that the true odds ratio equals ``OR`` and the
+            confidence interval is ``(low, high)``. Then the following options
+            for `alternative` are available (default is 'two-sided'):
+
+            * 'two-sided': the true odds ratio is not equal to ``OR``. There
+              is evidence against the null hypothesis at the chosen
+              `confidence_level` if ``high < OR`` or ``low > OR``.
+            * 'less': the true odds ratio is less than ``OR``. The ``low`` end
+              of the confidence interval is 0, and there is evidence against
+              the null hypothesis at  the chosen `confidence_level` if
+              ``high < OR``.
+            * 'greater': the true odds ratio is greater than ``OR``.  The
+              ``high`` end of the confidence interval is ``np.inf``, and there
+              is evidence against the null hypothesis at the chosen
+              `confidence_level` if ``low > OR``.
+
+        Returns
+        -------
+        ci : ``ConfidenceInterval`` instance
+            The confidence interval, represented as an object with
+            attributes ``low`` and ``high``.
+
+        Notes
+        -----
+        When `kind` is ``'conditional'``, the limits of the confidence
+        interval are the conditional "exact confidence limits" as described
+        by Fisher [1]_. The conditional odds ratio and confidence interval are
+        also discussed in Section 4.1.2 of the text by Sahai and Khurshid [2]_.
+
+        When `kind` is ``'sample'``, the confidence interval is computed
+        under the assumption that the logarithm of the odds ratio is normally
+        distributed with standard error given by::
+
+            se = sqrt(1/a + 1/b + 1/c + 1/d)
+
+        where ``a``, ``b``, ``c`` and ``d`` are the elements of the
+        contingency table.  (See, for example, [2]_, section 3.1.3.2,
+        or [3]_, section 2.3.3).
+
+        References
+        ----------
+        .. [1] R. A. Fisher (1935), The logic of inductive inference,
+               Journal of the Royal Statistical Society, Vol. 98, No. 1,
+               pp. 39-82.
+        .. [2] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
+               Methods, Techniques, and Applications, CRC Press LLC, Boca
+               Raton, Florida.
+        .. [3] Alan Agresti, An Introduction to Categorical Data Analyis
+               (second edition), Wiley, Hoboken, NJ, USA (2007).
+        """
+        if alternative not in ['two-sided', 'less', 'greater']:
+            raise ValueError("`alternative` must be 'two-sided', 'less' or "
+                             "'greater'.")
+
+        if confidence_level < 0 or confidence_level > 1:
+            raise ValueError('confidence_level must be between 0 and 1')
+
+        if self._kind == 'conditional':
+            ci = self._conditional_odds_ratio_ci(confidence_level, alternative)
+        else:
+            ci = self._sample_odds_ratio_ci(confidence_level, alternative)
+        return ci
+
+    def _conditional_odds_ratio_ci(self, confidence_level=0.95,
+                                   alternative='two-sided'):
+        """
+        Confidence interval for the conditional odds ratio.
+        """
+
+        table = self._table
+        if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
+            # If both values in a row or column are zero, the p-value is 1,
+            # the odds ratio is NaN and the confidence interval is (0, inf).
+            ci = (0, np.inf)
+        else:
+            ci = _conditional_oddsratio_ci(table,
+                                           confidence_level=confidence_level,
+                                           alternative=alternative)
+        return ConfidenceInterval(low=ci[0], high=ci[1])
+
+    def _sample_odds_ratio_ci(self, confidence_level=0.95,
+                              alternative='two-sided'):
+        """
+        Confidence interval for the sample odds ratio.
+        """
+        if confidence_level < 0 or confidence_level > 1:
+            raise ValueError('confidence_level must be between 0 and 1')
+
+        table = self._table
+        if 0 in table.sum(axis=0) or 0 in table.sum(axis=1):
+            # If both values in a row or column are zero, the p-value is 1,
+            # the odds ratio is NaN and the confidence interval is (0, inf).
+            ci = (0, np.inf)
+        else:
+            ci = _sample_odds_ratio_ci(table,
+                                       confidence_level=confidence_level,
+                                       alternative=alternative)
+        return ConfidenceInterval(low=ci[0], high=ci[1])
+
+
+def odds_ratio(table, *, kind='conditional'):
+    r"""
+    Compute the odds ratio for a 2x2 contingency table.
+
+    Parameters
+    ----------
+    table : array_like of ints
+        A 2x2 contingency table.  Elements must be non-negative integers.
+    kind : str, optional
+        Which kind of odds ratio to compute, either the sample
+        odds ratio (``kind='sample'``) or the conditional odds ratio
+        (``kind='conditional'``).  Default is ``'conditional'``.
+
+    Returns
+    -------
+    result : `~scipy.stats._result_classes.OddsRatioResult` instance
+        The returned object has two computed attributes:
+
+        statistic : float
+            * If `kind` is ``'sample'``, this is sample (or unconditional)
+              estimate, given by
+              ``table[0, 0]*table[1, 1]/(table[0, 1]*table[1, 0])``.
+            * If `kind` is ``'conditional'``, this is the conditional
+              maximum likelihood estimate for the odds ratio. It is
+              the noncentrality parameter of Fisher's noncentral
+              hypergeometric distribution with the same hypergeometric
+              parameters as `table` and whose mean is ``table[0, 0]``.
+
+        The object has the method `confidence_interval` that computes
+        the confidence interval of the odds ratio.
+
+    See Also
+    --------
+    scipy.stats.fisher_exact
+    relative_risk
+
+    Notes
+    -----
+    The conditional odds ratio was discussed by Fisher (see "Example 1"
+    of [1]_).  Texts that cover the odds ratio include [2]_ and [3]_.
+
+    .. versionadded:: 1.10.0
+
+    References
+    ----------
+    .. [1] R. A. Fisher (1935), The logic of inductive inference,
+           Journal of the Royal Statistical Society, Vol. 98, No. 1,
+           pp. 39-82.
+    .. [2] Breslow NE, Day NE (1980). Statistical methods in cancer research.
+           Volume I - The analysis of case-control studies. IARC Sci Publ.
+           (32):5-338. PMID: 7216345. (See section 4.2.)
+    .. [3] H. Sahai and A. Khurshid (1996), Statistics in Epidemiology:
+           Methods, Techniques, and Applications, CRC Press LLC, Boca
+           Raton, Florida.
+
+    Examples
+    --------
+    In epidemiology, individuals are classified as "exposed" or
+    "unexposed" to some factor or treatment. If the occurrence of some
+    illness is under study, those who have the illness are often
+    classifed as "cases", and those without it are "noncases".  The
+    counts of the occurrences of these classes gives a contingency
+    table::
+
+                    exposed    unexposed
+        cases          a           b
+        noncases       c           d
+
+    The sample odds ratio may be written ``(a/c) / (b/d)``.  ``a/c`` can
+    be interpreted as the odds of a case occurring in the exposed group,
+    and ``b/d`` as the odds of a case occurring in the unexposed group.
+    The sample odds ratio is the ratio of these odds.  If the odds ratio
+    is greater than 1, it suggests that there is a positive association
+    between being exposed and being a case.
+
+    Interchanging the rows or columns of the contingency table inverts
+    the odds ratio, so it is import to understand the meaning of labels
+    given to the rows and columns of the table when interpreting the
+    odds ratio.
+
+    Consider a hypothetical example where it is hypothesized that
+    exposure to a certain chemical is assocated with increased occurrence
+    of a certain disease.  Suppose we have the following table for a
+    collection of 410 people::
+
+                  exposed   unexposed
+        cases         7         15
+        noncases     58        472
+
+    The question we ask is "Is exposure to the chemical associated with
+    increased risk of the disease?"
+
+    Compute the odds ratio:
+
+    >>> from scipy.stats.contingency import odds_ratio
+    >>> res = odds_ratio([[7, 15], [58, 472]])
+    >>> res.statistic
+    3.7836687705553493
+
+    For this sample, the odds of getting the disease for those who have
+    been exposed to the chemical are almost 3.8 times that of those who
+    have not been exposed.
+
+    We can compute the 95% confidence interval for the odds ratio:
+
+    >>> res.confidence_interval(confidence_level=0.95)
+    ConfidenceInterval(low=1.2514829132266785, high=10.363493716701269)
+
+    The 95% confidence interval for the conditional odds ratio is
+    approximately (1.25, 10.4).
+    """
+    if kind not in ['conditional', 'sample']:
+        raise ValueError("`kind` must be 'conditional' or 'sample'.")
+
+    c = np.asarray(table)
+
+    if c.shape != (2, 2):
+        raise ValueError(f"Invalid shape {c.shape}. The input `table` must be "
+                         "of shape (2, 2).")
+
+    if not np.issubdtype(c.dtype, np.integer):
+        raise ValueError("`table` must be an array of integers, but got "
+                         f"type {c.dtype}")
+    c = c.astype(np.int64)
+
+    if np.any(c < 0):
+        raise ValueError("All values in `table` must be nonnegative.")
+
+    if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
+        # If both values in a row or column are zero, the p-value is NaN and
+        # the odds ratio is NaN.
+        result = OddsRatioResult(_table=c, _kind=kind, statistic=np.nan)
+        return result
+
+    if kind == 'sample':
+        oddsratio = _sample_odds_ratio(c)
+    else:  # kind is 'conditional'
+        oddsratio = _conditional_oddsratio(c)
+
+    result = OddsRatioResult(_table=c, _kind=kind, statistic=oddsratio)
+    return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_page_trend_test.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_page_trend_test.py
new file mode 100644
index 00000000..0aee148a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_page_trend_test.py
@@ -0,0 +1,476 @@
+from itertools import permutations
+import numpy as np
+import math
+from ._continuous_distns import norm
+import scipy.stats
+from dataclasses import make_dataclass
+
+
+PageTrendTestResult = make_dataclass("PageTrendTestResult",
+                                     ("statistic", "pvalue", "method"))
+
+
+def page_trend_test(data, ranked=False, predicted_ranks=None, method='auto'):
+    r"""
+    Perform Page's Test, a measure of trend in observations between treatments.
+
+    Page's Test (also known as Page's :math:`L` test) is useful when:
+
+    * there are :math:`n \geq 3` treatments,
+    * :math:`m \geq 2` subjects are observed for each treatment, and
+    * the observations are hypothesized to have a particular order.
+
+    Specifically, the test considers the null hypothesis that
+
+    .. math::
+
+        m_1 = m_2 = m_3 \cdots = m_n,
+
+    where :math:`m_j` is the mean of the observed quantity under treatment
+    :math:`j`, against the alternative hypothesis that
+
+    .. math::
+
+        m_1 \leq m_2 \leq m_3 \leq \cdots \leq m_n,
+
+    where at least one inequality is strict.
+
+    As noted by [4]_, Page's :math:`L` test has greater statistical power than
+    the Friedman test against the alternative that there is a difference in
+    trend, as Friedman's test only considers a difference in the means of the
+    observations without considering their order. Whereas Spearman :math:`\rho`
+    considers the correlation between the ranked observations of two variables
+    (e.g. the airspeed velocity of a swallow vs. the weight of the coconut it
+    carries), Page's :math:`L` is concerned with a trend in an observation
+    (e.g. the airspeed velocity of a swallow) across several distinct
+    treatments (e.g. carrying each of five coconuts of different weight) even
+    as the observation is repeated with multiple subjects (e.g. one European
+    swallow and one African swallow).
+
+    Parameters
+    ----------
+    data : array-like
+        A :math:`m \times n` array; the element in row :math:`i` and
+        column :math:`j` is the observation corresponding with subject
+        :math:`i` and treatment :math:`j`. By default, the columns are
+        assumed to be arranged in order of increasing predicted mean.
+
+    ranked : boolean, optional
+        By default, `data` is assumed to be observations rather than ranks;
+        it will be ranked with `scipy.stats.rankdata` along ``axis=1``. If
+        `data` is provided in the form of ranks, pass argument ``True``.
+
+    predicted_ranks : array-like, optional
+        The predicted ranks of the column means. If not specified,
+        the columns are assumed to be arranged in order of increasing
+        predicted mean, so the default `predicted_ranks` are
+        :math:`[1, 2, \dots, n-1, n]`.
+
+    method : {'auto', 'asymptotic', 'exact'}, optional
+        Selects the method used to calculate the *p*-value. The following
+        options are available.
+
+        * 'auto': selects between 'exact' and 'asymptotic' to
+          achieve reasonably accurate results in reasonable time (default)
+        * 'asymptotic': compares the standardized test statistic against
+          the normal distribution
+        * 'exact': computes the exact *p*-value by comparing the observed
+          :math:`L` statistic against those realized by all possible
+          permutations of ranks (under the null hypothesis that each
+          permutation is equally likely)
+
+    Returns
+    -------
+    res : PageTrendTestResult
+        An object containing attributes:
+
+        statistic : float
+            Page's :math:`L` test statistic.
+        pvalue : float
+            The associated *p*-value
+        method : {'asymptotic', 'exact'}
+            The method used to compute the *p*-value
+
+    See Also
+    --------
+    rankdata, friedmanchisquare, spearmanr
+
+    Notes
+    -----
+    As noted in [1]_, "the :math:`n` 'treatments' could just as well represent
+    :math:`n` objects or events or performances or persons or trials ranked."
+    Similarly, the :math:`m` 'subjects' could equally stand for :math:`m`
+    "groupings by ability or some other control variable, or judges doing
+    the ranking, or random replications of some other sort."
+
+    The procedure for calculating the :math:`L` statistic, adapted from
+    [1]_, is:
+
+    1. "Predetermine with careful logic the appropriate hypotheses
+       concerning the predicted ording of the experimental results.
+       If no reasonable basis for ordering any treatments is known, the
+       :math:`L` test is not appropriate."
+    2. "As in other experiments, determine at what level of confidence
+       you will reject the null hypothesis that there is no agreement of
+       experimental results with the monotonic hypothesis."
+    3. "Cast the experimental material into a two-way table of :math:`n`
+       columns (treatments, objects ranked, conditions) and :math:`m`
+       rows (subjects, replication groups, levels of control variables)."
+    4. "When experimental observations are recorded, rank them across each
+       row", e.g. ``ranks = scipy.stats.rankdata(data, axis=1)``.
+    5. "Add the ranks in each column", e.g.
+       ``colsums = np.sum(ranks, axis=0)``.
+    6. "Multiply each sum of ranks by the predicted rank for that same
+       column", e.g. ``products = predicted_ranks * colsums``.
+    7. "Sum all such products", e.g. ``L = products.sum()``.
+
+    [1]_ continues by suggesting use of the standardized statistic
+
+    .. math::
+
+        \chi_L^2 = \frac{\left[12L-3mn(n+1)^2\right]^2}{mn^2(n^2-1)(n+1)}
+
+    "which is distributed approximately as chi-square with 1 degree of
+    freedom. The ordinary use of :math:`\chi^2` tables would be
+    equivalent to a two-sided test of agreement. If a one-sided test
+    is desired, *as will almost always be the case*, the probability
+    discovered in the chi-square table should be *halved*."
+
+    However, this standardized statistic does not distinguish between the
+    observed values being well correlated with the predicted ranks and being
+    _anti_-correlated with the predicted ranks. Instead, we follow [2]_
+    and calculate the standardized statistic
+
+    .. math::
+
+        \Lambda = \frac{L - E_0}{\sqrt{V_0}},
+
+    where :math:`E_0 = \frac{1}{4} mn(n+1)^2` and
+    :math:`V_0 = \frac{1}{144} mn^2(n+1)(n^2-1)`, "which is asymptotically
+    normal under the null hypothesis".
+
+    The *p*-value for ``method='exact'`` is generated by comparing the observed
+    value of :math:`L` against the :math:`L` values generated for all
+    :math:`(n!)^m` possible permutations of ranks. The calculation is performed
+    using the recursive method of [5].
+
+    The *p*-values are not adjusted for the possibility of ties. When
+    ties are present, the reported  ``'exact'`` *p*-values may be somewhat
+    larger (i.e. more conservative) than the true *p*-value [2]_. The
+    ``'asymptotic'``` *p*-values, however, tend to be smaller (i.e. less
+    conservative) than the ``'exact'`` *p*-values.
+
+    References
+    ----------
+    .. [1] Ellis Batten Page, "Ordered hypotheses for multiple treatments:
+       a significant test for linear ranks", *Journal of the American
+       Statistical Association* 58(301), p. 216--230, 1963.
+
+    .. [2] Markus Neuhauser, *Nonparametric Statistical Test: A computational
+       approach*, CRC Press, p. 150--152, 2012.
+
+    .. [3] Statext LLC, "Page's L Trend Test - Easy Statistics", *Statext -
+       Statistics Study*, https://www.statext.com/practice/PageTrendTest03.php,
+       Accessed July 12, 2020.
+
+    .. [4] "Page's Trend Test", *Wikipedia*, WikimediaFoundation,
+       https://en.wikipedia.org/wiki/Page%27s_trend_test,
+       Accessed July 12, 2020.
+
+    .. [5] Robert E. Odeh, "The exact distribution of Page's L-statistic in
+       the two-way layout", *Communications in Statistics - Simulation and
+       Computation*,  6(1), p. 49--61, 1977.
+
+    Examples
+    --------
+    We use the example from [3]_: 10 students are asked to rate three
+    teaching methods - tutorial, lecture, and seminar - on a scale of 1-5,
+    with 1 being the lowest and 5 being the highest. We have decided that
+    a confidence level of 99% is required to reject the null hypothesis in
+    favor of our alternative: that the seminar will have the highest ratings
+    and the tutorial will have the lowest. Initially, the data have been
+    tabulated with each row representing an individual student's ratings of
+    the three methods in the following order: tutorial, lecture, seminar.
+
+    >>> table = [[3, 4, 3],
+    ...          [2, 2, 4],
+    ...          [3, 3, 5],
+    ...          [1, 3, 2],
+    ...          [2, 3, 2],
+    ...          [2, 4, 5],
+    ...          [1, 2, 4],
+    ...          [3, 4, 4],
+    ...          [2, 4, 5],
+    ...          [1, 3, 4]]
+
+    Because the tutorial is hypothesized to have the lowest ratings, the
+    column corresponding with tutorial rankings should be first; the seminar
+    is hypothesized to have the highest ratings, so its column should be last.
+    Since the columns are already arranged in this order of increasing
+    predicted mean, we can pass the table directly into `page_trend_test`.
+
+    >>> from scipy.stats import page_trend_test
+    >>> res = page_trend_test(table)
+    >>> res
+    PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
+                        method='exact')
+
+    This *p*-value indicates that there is a 0.1819% chance that
+    the :math:`L` statistic would reach such an extreme value under the null
+    hypothesis. Because 0.1819% is less than 1%, we have evidence to reject
+    the null hypothesis in favor of our alternative at a 99% confidence level.
+
+    The value of the :math:`L` statistic is 133.5. To check this manually,
+    we rank the data such that high scores correspond with high ranks, settling
+    ties with an average rank:
+
+    >>> from scipy.stats import rankdata
+    >>> ranks = rankdata(table, axis=1)
+    >>> ranks
+    array([[1.5, 3. , 1.5],
+           [1.5, 1.5, 3. ],
+           [1.5, 1.5, 3. ],
+           [1. , 3. , 2. ],
+           [1.5, 3. , 1.5],
+           [1. , 2. , 3. ],
+           [1. , 2. , 3. ],
+           [1. , 2.5, 2.5],
+           [1. , 2. , 3. ],
+           [1. , 2. , 3. ]])
+
+    We add the ranks within each column, multiply the sums by the
+    predicted ranks, and sum the products.
+
+    >>> import numpy as np
+    >>> m, n = ranks.shape
+    >>> predicted_ranks = np.arange(1, n+1)
+    >>> L = (predicted_ranks * np.sum(ranks, axis=0)).sum()
+    >>> res.statistic == L
+    True
+
+    As presented in [3]_, the asymptotic approximation of the *p*-value is the
+    survival function of the normal distribution evaluated at the standardized
+    test statistic:
+
+    >>> from scipy.stats import norm
+    >>> E0 = (m*n*(n+1)**2)/4
+    >>> V0 = (m*n**2*(n+1)*(n**2-1))/144
+    >>> Lambda = (L-E0)/np.sqrt(V0)
+    >>> p = norm.sf(Lambda)
+    >>> p
+    0.0012693433690751756
+
+    This does not precisely match the *p*-value reported by `page_trend_test`
+    above. The asymptotic distribution is not very accurate, nor conservative,
+    for :math:`m \leq 12` and :math:`n \leq 8`, so `page_trend_test` chose to
+    use ``method='exact'`` based on the dimensions of the table and the
+    recommendations in Page's original paper [1]_. To override
+    `page_trend_test`'s choice, provide the `method` argument.
+
+    >>> res = page_trend_test(table, method="asymptotic")
+    >>> res
+    PageTrendTestResult(statistic=133.5, pvalue=0.0012693433690751756,
+                        method='asymptotic')
+
+    If the data are already ranked, we can pass in the ``ranks`` instead of
+    the ``table`` to save computation time.
+
+    >>> res = page_trend_test(ranks,             # ranks of data
+    ...                       ranked=True,       # data is already ranked
+    ...                       )
+    >>> res
+    PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
+                        method='exact')
+
+    Suppose the raw data had been tabulated in an order different from the
+    order of predicted means, say lecture, seminar, tutorial.
+
+    >>> table = np.asarray(table)[:, [1, 2, 0]]
+
+    Since the arrangement of this table is not consistent with the assumed
+    ordering, we can either rearrange the table or provide the
+    `predicted_ranks`. Remembering that the lecture is predicted
+    to have the middle rank, the seminar the highest, and tutorial the lowest,
+    we pass:
+
+    >>> res = page_trend_test(table,             # data as originally tabulated
+    ...                       predicted_ranks=[2, 3, 1],  # our predicted order
+    ...                       )
+    >>> res
+    PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
+                        method='exact')
+
+    """
+
+    # Possible values of the method parameter and the corresponding function
+    # used to evaluate the p value
+    methods = {"asymptotic": _l_p_asymptotic,
+               "exact": _l_p_exact,
+               "auto": None}
+    if method not in methods:
+        raise ValueError(f"`method` must be in {set(methods)}")
+
+    ranks = np.array(data, copy=False)
+    if ranks.ndim != 2:  # TODO: relax this to accept 3d arrays?
+        raise ValueError("`data` must be a 2d array.")
+
+    m, n = ranks.shape
+    if m < 2 or n < 3:
+        raise ValueError("Page's L is only appropriate for data with two "
+                         "or more rows and three or more columns.")
+
+    if np.any(np.isnan(data)):
+        raise ValueError("`data` contains NaNs, which cannot be ranked "
+                         "meaningfully")
+
+    # ensure NumPy array and rank the data if it's not already ranked
+    if ranked:
+        # Only a basic check on whether data is ranked. Checking that the data
+        # is properly ranked could take as much time as ranking it.
+        if not (ranks.min() >= 1 and ranks.max() <= ranks.shape[1]):
+            raise ValueError("`data` is not properly ranked. Rank the data or "
+                             "pass `ranked=False`.")
+    else:
+        ranks = scipy.stats.rankdata(data, axis=-1)
+
+    # generate predicted ranks if not provided, ensure valid NumPy array
+    if predicted_ranks is None:
+        predicted_ranks = np.arange(1, n+1)
+    else:
+        predicted_ranks = np.array(predicted_ranks, copy=False)
+        if (predicted_ranks.ndim < 1 or
+                (set(predicted_ranks) != set(range(1, n+1)) or
+                 len(predicted_ranks) != n)):
+            raise ValueError(f"`predicted_ranks` must include each integer "
+                             f"from 1 to {n} (the number of columns in "
+                             f"`data`) exactly once.")
+
+    if type(ranked) is not bool:
+        raise TypeError("`ranked` must be boolean.")
+
+    # Calculate the L statistic
+    L = _l_vectorized(ranks, predicted_ranks)
+
+    # Calculate the p-value
+    if method == "auto":
+        method = _choose_method(ranks)
+    p_fun = methods[method]  # get the function corresponding with the method
+    p = p_fun(L, m, n)
+
+    page_result = PageTrendTestResult(statistic=L, pvalue=p, method=method)
+    return page_result
+
+
+def _choose_method(ranks):
+    '''Choose method for computing p-value automatically'''
+    m, n = ranks.shape
+    if n > 8 or (m > 12 and n > 3) or m > 20:  # as in [1], [4]
+        method = "asymptotic"
+    else:
+        method = "exact"
+    return method
+
+
+def _l_vectorized(ranks, predicted_ranks):
+    '''Calculate's Page's L statistic for each page of a 3d array'''
+    colsums = ranks.sum(axis=-2, keepdims=True)
+    products = predicted_ranks * colsums
+    Ls = products.sum(axis=-1)
+    Ls = Ls[0] if Ls.size == 1 else Ls.ravel()
+    return Ls
+
+
+def _l_p_asymptotic(L, m, n):
+    '''Calculate the p-value of Page's L from the asymptotic distribution'''
+    # Using [1] as a reference, the asymptotic p-value would be calculated as:
+    # chi_L = (12*L - 3*m*n*(n+1)**2)**2/(m*n**2*(n**2-1)*(n+1))
+    # p = chi2.sf(chi_L, df=1, loc=0, scale=1)/2
+    # but this is insentive to the direction of the hypothesized ranking
+
+    # See [2] page 151
+    E0 = (m*n*(n+1)**2)/4
+    V0 = (m*n**2*(n+1)*(n**2-1))/144
+    Lambda = (L-E0)/np.sqrt(V0)
+    # This is a one-sided "greater" test - calculate the probability that the
+    # L statistic under H0 would be greater than the observed L statistic
+    p = norm.sf(Lambda)
+    return p
+
+
+def _l_p_exact(L, m, n):
+    '''Calculate the p-value of Page's L exactly'''
+    # [1] uses m, n; [5] uses n, k.
+    # Switch convention here because exact calculation code references [5].
+    L, n, k = int(L), int(m), int(n)
+    _pagel_state.set_k(k)
+    return _pagel_state.sf(L, n)
+
+
+class _PageL:
+    '''Maintains state between `page_trend_test` executions'''
+
+    def __init__(self):
+        '''Lightweight initialization'''
+        self.all_pmfs = {}
+
+    def set_k(self, k):
+        '''Calculate lower and upper limits of L for single row'''
+        self.k = k
+        # See [5] top of page 52
+        self.a, self.b = (k*(k+1)*(k+2))//6, (k*(k+1)*(2*k+1))//6
+
+    def sf(self, l, n):
+        '''Survival function of Page's L statistic'''
+        ps = [self.pmf(l, n) for l in range(l, n*self.b + 1)]
+        return np.sum(ps)
+
+    def p_l_k_1(self):
+        '''Relative frequency of each L value over all possible single rows'''
+
+        # See [5] Equation (6)
+        ranks = range(1, self.k+1)
+        # generate all possible rows of length k
+        rank_perms = np.array(list(permutations(ranks)))
+        # compute Page's L for all possible rows
+        Ls = (ranks*rank_perms).sum(axis=1)
+        # count occurences of each L value
+        counts = np.histogram(Ls, np.arange(self.a-0.5, self.b+1.5))[0]
+        # factorial(k) is number of possible permutations
+        return counts/math.factorial(self.k)
+
+    def pmf(self, l, n):
+        '''Recursive function to evaluate p(l, k, n); see [5] Equation 1'''
+
+        if n not in self.all_pmfs:
+            self.all_pmfs[n] = {}
+        if self.k not in self.all_pmfs[n]:
+            self.all_pmfs[n][self.k] = {}
+
+        # Cache results to avoid repeating calculation. Initially this was
+        # written with lru_cache, but this seems faster? Also, we could add
+        # an option to save this for future lookup.
+        if l in self.all_pmfs[n][self.k]:
+            return self.all_pmfs[n][self.k][l]
+
+        if n == 1:
+            ps = self.p_l_k_1()  # [5] Equation 6
+            ls = range(self.a, self.b+1)
+            # not fast, but we'll only be here once
+            self.all_pmfs[n][self.k] = {l: p for l, p in zip(ls, ps)}
+            return self.all_pmfs[n][self.k][l]
+
+        p = 0
+        low = max(l-(n-1)*self.b, self.a)  # [5] Equation 2
+        high = min(l-(n-1)*self.a, self.b)
+
+        # [5] Equation 1
+        for t in range(low, high+1):
+            p1 = self.pmf(l-t, n-1)
+            p2 = self.pmf(t, 1)
+            p += p1*p2
+        self.all_pmfs[n][self.k][l] = p
+        return p
+
+
+# Maintain state for faster repeat calls to page_trend_test w/ method='exact'
+_pagel_state = _PageL()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_qmc.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_qmc.py
new file mode 100644
index 00000000..8f629e2e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_qmc.py
@@ -0,0 +1,2570 @@
+"""Quasi-Monte Carlo engines and helpers."""
+from __future__ import annotations
+
+import copy
+import math
+import numbers
+import os
+import warnings
+from abc import ABC, abstractmethod
+from functools import partial
+from typing import (
+    Callable,
+    ClassVar,
+    Dict,
+    List,
+    Literal,
+    Optional,
+    overload,
+    Tuple,
+    TYPE_CHECKING,
+)
+
+import numpy as np
+
+if TYPE_CHECKING:
+    import numpy.typing as npt
+    from scipy._lib._util import (
+        DecimalNumber, GeneratorType, IntNumber, SeedType
+    )
+
+import scipy.stats as stats
+from scipy._lib._util import rng_integers
+from scipy.spatial import distance, Voronoi
+from scipy.special import gammainc
+from ._sobol import (
+    _initialize_v, _cscramble, _fill_p_cumulative, _draw, _fast_forward,
+    _categorize, _MAXDIM
+)
+from ._qmc_cy import (
+    _cy_wrapper_centered_discrepancy,
+    _cy_wrapper_wrap_around_discrepancy,
+    _cy_wrapper_mixture_discrepancy,
+    _cy_wrapper_l2_star_discrepancy,
+    _cy_wrapper_update_discrepancy,
+    _cy_van_der_corput_scrambled,
+    _cy_van_der_corput,
+)
+
+
+__all__ = ['scale', 'discrepancy', 'update_discrepancy',
+           'QMCEngine', 'Sobol', 'Halton', 'LatinHypercube', 'PoissonDisk',
+           'MultinomialQMC', 'MultivariateNormalQMC']
+
+
+@overload
+def check_random_state(seed: Optional[IntNumber] = ...) -> np.random.Generator:
+    ...
+
+@overload
+def check_random_state(seed: GeneratorType) -> GeneratorType:
+    ...
+
+
+# Based on scipy._lib._util.check_random_state
+def check_random_state(seed=None):
+    """Turn `seed` into a `numpy.random.Generator` instance.
+
+    Parameters
+    ----------
+    seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional  # noqa
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance, then
+        the provided instance is used.
+
+    Returns
+    -------
+    seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
+        Random number generator.
+
+    """
+    if seed is None or isinstance(seed, (numbers.Integral, np.integer)):
+        return np.random.default_rng(seed)
+    elif isinstance(seed, (np.random.RandomState, np.random.Generator)):
+        return seed
+    else:
+        raise ValueError(f'{seed!r} cannot be used to seed a'
+                         ' numpy.random.Generator instance')
+
+
+def scale(
+    sample: npt.ArrayLike,
+    l_bounds: npt.ArrayLike,
+    u_bounds: npt.ArrayLike,
+    *,
+    reverse: bool = False
+) -> np.ndarray:
+    r"""Sample scaling from unit hypercube to different bounds.
+
+    To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
+    with :math:`a` the lower bounds and :math:`b` the upper bounds.
+    The following transformation is used:
+
+    .. math::
+
+        (b - a) \cdot \text{sample} + a
+
+    Parameters
+    ----------
+    sample : array_like (n, d)
+        Sample to scale.
+    l_bounds, u_bounds : array_like (d,)
+        Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
+        data. If `reverse` is True, range of the original data to transform
+        to the unit hypercube.
+    reverse : bool, optional
+        Reverse the transformation from different bounds to the unit hypercube.
+        Default is False.
+
+    Returns
+    -------
+    sample : array_like (n, d)
+        Scaled sample.
+
+    Examples
+    --------
+    Transform 3 samples in the unit hypercube to bounds:
+
+    >>> from scipy.stats import qmc
+    >>> l_bounds = [-2, 0]
+    >>> u_bounds = [6, 5]
+    >>> sample = [[0.5 , 0.75],
+    ...           [0.5 , 0.5],
+    ...           [0.75, 0.25]]
+    >>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
+    >>> sample_scaled
+    array([[2.  , 3.75],
+           [2.  , 2.5 ],
+           [4.  , 1.25]])
+
+    And convert back to the unit hypercube:
+
+    >>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
+    >>> sample_
+    array([[0.5 , 0.75],
+           [0.5 , 0.5 ],
+           [0.75, 0.25]])
+
+    """
+    sample = np.asarray(sample)
+
+    # Checking bounds and sample
+    if not sample.ndim == 2:
+        raise ValueError('Sample is not a 2D array')
+
+    lower, upper = _validate_bounds(
+        l_bounds=l_bounds, u_bounds=u_bounds, d=sample.shape[1]
+    )
+
+    if not reverse:
+        # Checking that sample is within the hypercube
+        if (sample.max() > 1.) or (sample.min() < 0.):
+            raise ValueError('Sample is not in unit hypercube')
+
+        return sample * (upper - lower) + lower
+    else:
+        # Checking that sample is within the bounds
+        if not (np.all(sample >= lower) and np.all(sample <= upper)):
+            raise ValueError('Sample is out of bounds')
+
+        return (sample - lower) / (upper - lower)
+
+
+def discrepancy(
+        sample: npt.ArrayLike,
+        *,
+        iterative: bool = False,
+        method: Literal["CD", "WD", "MD", "L2-star"] = "CD",
+        workers: IntNumber = 1) -> float:
+    """Discrepancy of a given sample.
+
+    Parameters
+    ----------
+    sample : array_like (n, d)
+        The sample to compute the discrepancy from.
+    iterative : bool, optional
+        Must be False if not using it for updating the discrepancy.
+        Default is False. Refer to the notes for more details.
+    method : str, optional
+        Type of discrepancy, can be ``CD``, ``WD``, ``MD`` or ``L2-star``.
+        Refer to the notes for more details. Default is ``CD``.
+    workers : int, optional
+        Number of workers to use for parallel processing. If -1 is given all
+        CPU threads are used. Default is 1.
+
+    Returns
+    -------
+    discrepancy : float
+        Discrepancy.
+
+    Notes
+    -----
+    The discrepancy is a uniformity criterion used to assess the space filling
+    of a number of samples in a hypercube. A discrepancy quantifies the
+    distance between the continuous uniform distribution on a hypercube and the
+    discrete uniform distribution on :math:`n` distinct sample points.
+
+    The lower the value is, the better the coverage of the parameter space is.
+
+    For a collection of subsets of the hypercube, the discrepancy is the
+    difference between the fraction of sample points in one of those
+    subsets and the volume of that subset. There are different definitions of
+    discrepancy corresponding to different collections of subsets. Some
+    versions take a root mean square difference over subsets instead of
+    a maximum.
+
+    A measure of uniformity is reasonable if it satisfies the following
+    criteria [1]_:
+
+    1. It is invariant under permuting factors and/or runs.
+    2. It is invariant under rotation of the coordinates.
+    3. It can measure not only uniformity of the sample over the hypercube,
+       but also the projection uniformity of the sample over non-empty
+       subset of lower dimension hypercubes.
+    4. There is some reasonable geometric meaning.
+    5. It is easy to compute.
+    6. It satisfies the Koksma-Hlawka-like inequality.
+    7. It is consistent with other criteria in experimental design.
+
+    Four methods are available:
+
+    * ``CD``: Centered Discrepancy - subspace involves a corner of the
+      hypercube
+    * ``WD``: Wrap-around Discrepancy - subspace can wrap around bounds
+    * ``MD``: Mixture Discrepancy - mix between CD/WD covering more criteria
+    * ``L2-star``: L2-star discrepancy - like CD BUT variant to rotation
+
+    See [2]_ for precise definitions of each method.
+
+    Lastly, using ``iterative=True``, it is possible to compute the
+    discrepancy as if we had :math:`n+1` samples. This is useful if we want
+    to add a point to a sampling and check the candidate which would give the
+    lowest discrepancy. Then you could just update the discrepancy with
+    each candidate using `update_discrepancy`. This method is faster than
+    computing the discrepancy for a large number of candidates.
+
+    References
+    ----------
+    .. [1] Fang et al. "Design and modeling for computer experiments".
+       Computer Science and Data Analysis Series, 2006.
+    .. [2] Zhou Y.-D. et al. "Mixture discrepancy for quasi-random point sets."
+       Journal of Complexity, 29 (3-4) , pp. 283-301, 2013.
+    .. [3] T. T. Warnock. "Computational investigations of low discrepancy
+       point sets." Applications of Number Theory to Numerical
+       Analysis, Academic Press, pp. 319-343, 1972.
+
+    Examples
+    --------
+    Calculate the quality of the sample using the discrepancy:
+
+    >>> import numpy as np
+    >>> from scipy.stats import qmc
+    >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
+    >>> l_bounds = [0.5, 0.5]
+    >>> u_bounds = [6.5, 6.5]
+    >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
+    >>> space
+    array([[0.08333333, 0.41666667],
+           [0.25      , 0.91666667],
+           [0.41666667, 0.25      ],
+           [0.58333333, 0.75      ],
+           [0.75      , 0.08333333],
+           [0.91666667, 0.58333333]])
+    >>> qmc.discrepancy(space)
+    0.008142039609053464
+
+    We can also compute iteratively the ``CD`` discrepancy by using
+    ``iterative=True``.
+
+    >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
+    >>> disc_init
+    0.04769081147119336
+    >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
+    0.008142039609053513
+
+    """
+    sample = np.asarray(sample, dtype=np.float64, order="C")
+
+    # Checking that sample is within the hypercube and 2D
+    if not sample.ndim == 2:
+        raise ValueError("Sample is not a 2D array")
+
+    if (sample.max() > 1.) or (sample.min() < 0.):
+        raise ValueError("Sample is not in unit hypercube")
+
+    workers = _validate_workers(workers)
+
+    methods = {
+        "CD": _cy_wrapper_centered_discrepancy,
+        "WD": _cy_wrapper_wrap_around_discrepancy,
+        "MD": _cy_wrapper_mixture_discrepancy,
+        "L2-star": _cy_wrapper_l2_star_discrepancy,
+    }
+
+    if method in methods:
+        return methods[method](sample, iterative, workers=workers)
+    else:
+        raise ValueError(f"{method!r} is not a valid method. It must be one of"
+                         f" {set(methods)!r}")
+
+
+def update_discrepancy(
+        x_new: npt.ArrayLike,
+        sample: npt.ArrayLike,
+        initial_disc: DecimalNumber) -> float:
+    """Update the centered discrepancy with a new sample.
+
+    Parameters
+    ----------
+    x_new : array_like (1, d)
+        The new sample to add in `sample`.
+    sample : array_like (n, d)
+        The initial sample.
+    initial_disc : float
+        Centered discrepancy of the `sample`.
+
+    Returns
+    -------
+    discrepancy : float
+        Centered discrepancy of the sample composed of `x_new` and `sample`.
+
+    Examples
+    --------
+    We can also compute iteratively the discrepancy by using
+    ``iterative=True``.
+
+    >>> import numpy as np
+    >>> from scipy.stats import qmc
+    >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
+    >>> l_bounds = [0.5, 0.5]
+    >>> u_bounds = [6.5, 6.5]
+    >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
+    >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
+    >>> disc_init
+    0.04769081147119336
+    >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
+    0.008142039609053513
+
+    """
+    sample = np.asarray(sample, dtype=np.float64, order="C")
+    x_new = np.asarray(x_new, dtype=np.float64, order="C")
+
+    # Checking that sample is within the hypercube and 2D
+    if not sample.ndim == 2:
+        raise ValueError('Sample is not a 2D array')
+
+    if (sample.max() > 1.) or (sample.min() < 0.):
+        raise ValueError('Sample is not in unit hypercube')
+
+    # Checking that x_new is within the hypercube and 1D
+    if not x_new.ndim == 1:
+        raise ValueError('x_new is not a 1D array')
+
+    if not (np.all(x_new >= 0) and np.all(x_new <= 1)):
+        raise ValueError('x_new is not in unit hypercube')
+
+    if x_new.shape[0] != sample.shape[1]:
+        raise ValueError("x_new and sample must be broadcastable")
+
+    return _cy_wrapper_update_discrepancy(x_new, sample, initial_disc)
+
+
+def _perturb_discrepancy(sample: np.ndarray, i1: int, i2: int, k: int,
+                         disc: float):
+    """Centered discrepancy after an elementary perturbation of a LHS.
+
+    An elementary perturbation consists of an exchange of coordinates between
+    two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
+    this operation conserves the LHS properties.
+
+    Parameters
+    ----------
+    sample : array_like (n, d)
+        The sample (before permutation) to compute the discrepancy from.
+    i1 : int
+        The first line of the elementary permutation.
+    i2 : int
+        The second line of the elementary permutation.
+    k : int
+        The column of the elementary permutation.
+    disc : float
+        Centered discrepancy of the design before permutation.
+
+    Returns
+    -------
+    discrepancy : float
+        Centered discrepancy of the design after permutation.
+
+    References
+    ----------
+    .. [1] Jin et al. "An efficient algorithm for constructing optimal design
+       of computer experiments", Journal of Statistical Planning and
+       Inference, 2005.
+
+    """
+    n = sample.shape[0]
+
+    z_ij = sample - 0.5
+
+    # Eq (19)
+    c_i1j = (1. / n ** 2.
+             * np.prod(0.5 * (2. + abs(z_ij[i1, :])
+                              + abs(z_ij) - abs(z_ij[i1, :] - z_ij)), axis=1))
+    c_i2j = (1. / n ** 2.
+             * np.prod(0.5 * (2. + abs(z_ij[i2, :])
+                              + abs(z_ij) - abs(z_ij[i2, :] - z_ij)), axis=1))
+
+    # Eq (20)
+    c_i1i1 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i1, :]))
+              - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i1, :])
+                                 - 0.5 * z_ij[i1, :] ** 2))
+    c_i2i2 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i2, :]))
+              - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i2, :])
+                                 - 0.5 * z_ij[i2, :] ** 2))
+
+    # Eq (22), typo in the article in the denominator i2 -> i1
+    num = (2 + abs(z_ij[i2, k]) + abs(z_ij[:, k])
+           - abs(z_ij[i2, k] - z_ij[:, k]))
+    denum = (2 + abs(z_ij[i1, k]) + abs(z_ij[:, k])
+             - abs(z_ij[i1, k] - z_ij[:, k]))
+    gamma = num / denum
+
+    # Eq (23)
+    c_p_i1j = gamma * c_i1j
+    # Eq (24)
+    c_p_i2j = c_i2j / gamma
+
+    alpha = (1 + abs(z_ij[i2, k])) / (1 + abs(z_ij[i1, k]))
+    beta = (2 - abs(z_ij[i2, k])) / (2 - abs(z_ij[i1, k]))
+
+    g_i1 = np.prod(1. + abs(z_ij[i1, :]))
+    g_i2 = np.prod(1. + abs(z_ij[i2, :]))
+    h_i1 = np.prod(1. + 0.5 * abs(z_ij[i1, :]) - 0.5 * (z_ij[i1, :] ** 2))
+    h_i2 = np.prod(1. + 0.5 * abs(z_ij[i2, :]) - 0.5 * (z_ij[i2, :] ** 2))
+
+    # Eq (25), typo in the article g is missing
+    c_p_i1i1 = ((g_i1 * alpha) / (n ** 2) - 2. * alpha * beta * h_i1 / n)
+    # Eq (26), typo in the article n ** 2
+    c_p_i2i2 = ((g_i2 / ((n ** 2) * alpha)) - (2. * h_i2 / (n * alpha * beta)))
+
+    # Eq (26)
+    sum_ = c_p_i1j - c_i1j + c_p_i2j - c_i2j
+
+    mask = np.ones(n, dtype=bool)
+    mask[[i1, i2]] = False
+    sum_ = sum(sum_[mask])
+
+    disc_ep = (disc + c_p_i1i1 - c_i1i1 + c_p_i2i2 - c_i2i2 + 2 * sum_)
+
+    return disc_ep
+
+
+def primes_from_2_to(n: int) -> np.ndarray:
+    """Prime numbers from 2 to *n*.
+
+    Parameters
+    ----------
+    n : int
+        Sup bound with ``n >= 6``.
+
+    Returns
+    -------
+    primes : list(int)
+        Primes in ``2 <= p < n``.
+
+    Notes
+    -----
+    Taken from [1]_ by P.T. Roy, written consent given on 23.04.2021
+    by the original author, Bruno Astrolino, for free use in SciPy under
+    the 3-clause BSD.
+
+    References
+    ----------
+    .. [1] `StackOverflow `_.
+
+    """
+    sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)
+    for i in range(1, int(n ** 0.5) // 3 + 1):
+        k = 3 * i + 1 | 1
+        sieve[k * k // 3::2 * k] = False
+        sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False
+    return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
+
+
+def n_primes(n: IntNumber) -> List[int]:
+    """List of the n-first prime numbers.
+
+    Parameters
+    ----------
+    n : int
+        Number of prime numbers wanted.
+
+    Returns
+    -------
+    primes : list(int)
+        List of primes.
+
+    """
+    primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
+              61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
+              131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
+              197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,
+              271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
+              353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
+              433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
+              509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,
+              601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,
+              677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
+              769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
+              859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,
+              953, 967, 971, 977, 983, 991, 997][:n]  # type: ignore[misc]
+
+    if len(primes) < n:
+        big_number = 2000
+        while 'Not enough primes':
+            primes = primes_from_2_to(big_number)[:n]  # type: ignore
+            if len(primes) == n:
+                break
+            big_number += 1000
+
+    return primes
+
+
+def van_der_corput(
+        n: IntNumber,
+        base: IntNumber = 2,
+        *,
+        start_index: IntNumber = 0,
+        scramble: bool = False,
+        seed: SeedType = None,
+        workers: IntNumber = 1) -> np.ndarray:
+    """Van der Corput sequence.
+
+    Pseudo-random number generator based on a b-adic expansion.
+
+    Scrambling uses permutations of the remainders (see [1]_). Multiple
+    permutations are applied to construct a point. The sequence of
+    permutations has to be the same for all points of the sequence.
+
+    Parameters
+    ----------
+    n : int
+        Number of element of the sequence.
+    base : int, optional
+        Base of the sequence. Default is 2.
+    start_index : int, optional
+        Index to start the sequence from. Default is 0.
+    scramble : bool, optional
+        If True, use Owen scrambling. Otherwise no scrambling is done.
+        Default is True.
+    seed : {None, int, `numpy.random.Generator`}, optional
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+    workers : int, optional
+        Number of workers to use for parallel processing. If -1 is
+        given all CPU threads are used. Default is 1.
+
+    Returns
+    -------
+    sequence : list (n,)
+        Sequence of Van der Corput.
+
+    References
+    ----------
+    .. [1] A. B. Owen. "A randomized Halton algorithm in R",
+       :arxiv:`1706.02808`, 2017.
+
+    """
+    if base < 2:
+        raise ValueError("'base' must be at least 2")
+
+    if scramble:
+        rng = check_random_state(seed)
+        # In Algorithm 1 of Owen 2017, a permutation of `np.arange(base)` is
+        # created for each positive integer `k` such that `1 - base**-k < 1`
+        # using floating-point arithmetic. For double precision floats, the
+        # condition `1 - base**-k < 1` can also be written as `base**-k >
+        # 2**-54`, which makes it more apparent how many permutations we need
+        # to create.
+        count = math.ceil(54 / math.log2(base)) - 1
+        permutations = np.repeat(np.arange(base)[None], count, axis=0)
+        for perm in permutations:
+            rng.shuffle(perm)
+
+        return _cy_van_der_corput_scrambled(n, base, start_index,
+                                            permutations, workers)
+
+    else:
+        return _cy_van_der_corput(n, base, start_index, workers)
+
+
+class QMCEngine(ABC):
+    """A generic Quasi-Monte Carlo sampler class meant for subclassing.
+
+    QMCEngine is a base class to construct a specific Quasi-Monte Carlo
+    sampler. It cannot be used directly as a sampler.
+
+    Parameters
+    ----------
+    d : int
+        Dimension of the parameter space.
+    optimization : {None, "random-cd", "lloyd"}, optional
+        Whether to use an optimization scheme to improve the quality after
+        sampling. Note that this is a post-processing step that does not
+        guarantee that all properties of the sample will be conserved.
+        Default is None.
+
+        * ``random-cd``: random permutations of coordinates to lower the
+          centered discrepancy. The best sample based on the centered
+          discrepancy is constantly updated. Centered discrepancy-based
+          sampling shows better space-filling robustness toward 2D and 3D
+          subprojections compared to using other discrepancy measures.
+        * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
+          The process converges to equally spaced samples.
+
+        .. versionadded:: 1.10.0
+    seed : {None, int, `numpy.random.Generator`}, optional
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+
+    Notes
+    -----
+    By convention samples are distributed over the half-open interval
+    ``[0, 1)``. Instances of the class can access the attributes: ``d`` for
+    the dimension; and ``rng`` for the random number generator (used for the
+    ``seed``).
+
+    **Subclassing**
+
+    When subclassing `QMCEngine` to create a new sampler,  ``__init__`` and
+    ``random`` must be redefined.
+
+    * ``__init__(d, seed=None)``: at least fix the dimension. If the sampler
+      does not take advantage of a ``seed`` (deterministic methods like
+      Halton), this parameter can be omitted.
+    * ``_random(n, *, workers=1)``: draw ``n`` from the engine. ``workers``
+      is used for parallelism. See `Halton` for example.
+
+    Optionally, two other methods can be overwritten by subclasses:
+
+    * ``reset``: Reset the engine to its original state.
+    * ``fast_forward``: If the sequence is deterministic (like Halton
+      sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw.
+
+    Examples
+    --------
+    To create a random sampler based on ``np.random.random``, we would do the
+    following:
+
+    >>> from scipy.stats import qmc
+    >>> class RandomEngine(qmc.QMCEngine):
+    ...     def __init__(self, d, seed=None):
+    ...         super().__init__(d=d, seed=seed)
+    ...
+    ...
+    ...     def _random(self, n=1, *, workers=1):
+    ...         return self.rng.random((n, self.d))
+    ...
+    ...
+    ...     def reset(self):
+    ...         super().__init__(d=self.d, seed=self.rng_seed)
+    ...         return self
+    ...
+    ...
+    ...     def fast_forward(self, n):
+    ...         self.random(n)
+    ...         return self
+
+    After subclassing `QMCEngine` to define the sampling strategy we want to
+    use, we can create an instance to sample from.
+
+    >>> engine = RandomEngine(2)
+    >>> engine.random(5)
+    array([[0.22733602, 0.31675834],  # random
+           [0.79736546, 0.67625467],
+           [0.39110955, 0.33281393],
+           [0.59830875, 0.18673419],
+           [0.67275604, 0.94180287]])
+
+    We can also reset the state of the generator and resample again.
+
+    >>> _ = engine.reset()
+    >>> engine.random(5)
+    array([[0.22733602, 0.31675834],  # random
+           [0.79736546, 0.67625467],
+           [0.39110955, 0.33281393],
+           [0.59830875, 0.18673419],
+           [0.67275604, 0.94180287]])
+
+    """
+
+    @abstractmethod
+    def __init__(
+        self,
+        d: IntNumber,
+        *,
+        optimization: Optional[Literal["random-cd", "lloyd"]] = None,
+        seed: SeedType = None
+    ) -> None:
+        if not np.issubdtype(type(d), np.integer) or d < 0:
+            raise ValueError('d must be a non-negative integer value')
+
+        self.d = d
+        self.rng = check_random_state(seed)
+        self.rng_seed = copy.deepcopy(seed)
+        self.num_generated = 0
+
+        config = {
+            # random-cd
+            "n_nochange": 100,
+            "n_iters": 10_000,
+            "rng": self.rng,
+
+            # lloyd
+            "tol": 1e-5,
+            "maxiter": 10,
+            "qhull_options": None,
+        }
+        self.optimization_method = _select_optimizer(optimization, config)
+
+    @abstractmethod
+    def _random(
+        self, n: IntNumber = 1, *, workers: IntNumber = 1
+    ) -> np.ndarray:
+        ...
+
+    def random(
+        self, n: IntNumber = 1, *, workers: IntNumber = 1
+    ) -> np.ndarray:
+        """Draw `n` in the half-open interval ``[0, 1)``.
+
+        Parameters
+        ----------
+        n : int, optional
+            Number of samples to generate in the parameter space.
+            Default is 1.
+        workers : int, optional
+            Only supported with `Halton`.
+            Number of workers to use for parallel processing. If -1 is
+            given all CPU threads are used. Default is 1. It becomes faster
+            than one worker for `n` greater than :math:`10^3`.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            QMC sample.
+
+        """
+        sample = self._random(n, workers=workers)
+        if self.optimization_method is not None:
+            sample = self.optimization_method(sample)
+
+        self.num_generated += n
+        return sample
+
+    def integers(
+        self,
+        l_bounds: npt.ArrayLike,
+        *,
+        u_bounds: Optional[npt.ArrayLike] = None,
+        n: IntNumber = 1,
+        endpoint: bool = False,
+        workers: IntNumber = 1
+    ) -> np.ndarray:
+        r"""
+        Draw `n` integers from `l_bounds` (inclusive) to `u_bounds`
+        (exclusive), or if endpoint=True, `l_bounds` (inclusive) to
+        `u_bounds` (inclusive).
+
+        Parameters
+        ----------
+        l_bounds : int or array-like of ints
+            Lowest (signed) integers to be drawn (unless ``u_bounds=None``,
+            in which case this parameter is 0 and this value is used for
+            `u_bounds`).
+        u_bounds : int or array-like of ints, optional
+            If provided, one above the largest (signed) integer to be drawn
+            (see above for behavior if ``u_bounds=None``).
+            If array-like, must contain integer values.
+        n : int, optional
+            Number of samples to generate in the parameter space.
+            Default is 1.
+        endpoint : bool, optional
+            If true, sample from the interval ``[l_bounds, u_bounds]`` instead
+            of the default ``[l_bounds, u_bounds)``. Defaults is False.
+        workers : int, optional
+            Number of workers to use for parallel processing. If -1 is
+            given all CPU threads are used. Only supported when using `Halton`
+            Default is 1.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            QMC sample.
+
+        Notes
+        -----
+        It is safe to just use the same ``[0, 1)`` to integer mapping
+        with QMC that you would use with MC. You still get unbiasedness,
+        a strong law of large numbers, an asymptotically infinite variance
+        reduction and a finite sample variance bound.
+
+        To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
+        with :math:`a` the lower bounds and :math:`b` the upper bounds,
+        the following transformation is used:
+
+        .. math::
+
+            \text{floor}((b - a) \cdot \text{sample} + a)
+
+        """
+        if u_bounds is None:
+            u_bounds = l_bounds
+            l_bounds = 0
+
+        u_bounds = np.atleast_1d(u_bounds)
+        l_bounds = np.atleast_1d(l_bounds)
+
+        if endpoint:
+            u_bounds = u_bounds + 1
+
+        if (not np.issubdtype(l_bounds.dtype, np.integer) or
+                not np.issubdtype(u_bounds.dtype, np.integer)):
+            message = ("'u_bounds' and 'l_bounds' must be integers or"
+                       " array-like of integers")
+            raise ValueError(message)
+
+        if isinstance(self, Halton):
+            sample = self.random(n=n, workers=workers)
+        else:
+            sample = self.random(n=n)
+
+        sample = scale(sample, l_bounds=l_bounds, u_bounds=u_bounds)
+        sample = np.floor(sample).astype(np.int64)
+
+        return sample
+
+    def reset(self) -> QMCEngine:
+        """Reset the engine to base state.
+
+        Returns
+        -------
+        engine : QMCEngine
+            Engine reset to its base state.
+
+        """
+        seed = copy.deepcopy(self.rng_seed)
+        self.rng = check_random_state(seed)
+        self.num_generated = 0
+        return self
+
+    def fast_forward(self, n: IntNumber) -> QMCEngine:
+        """Fast-forward the sequence by `n` positions.
+
+        Parameters
+        ----------
+        n : int
+            Number of points to skip in the sequence.
+
+        Returns
+        -------
+        engine : QMCEngine
+            Engine reset to its base state.
+
+        """
+        self.random(n=n)
+        return self
+
+
+class Halton(QMCEngine):
+    """Halton sequence.
+
+    Pseudo-random number generator that generalize the Van der Corput sequence
+    for multiple dimensions. The Halton sequence uses the base-two Van der
+    Corput sequence for the first dimension, base-three for its second and
+    base-:math:`n` for its n-dimension.
+
+    Parameters
+    ----------
+    d : int
+        Dimension of the parameter space.
+    scramble : bool, optional
+        If True, use Owen scrambling. Otherwise no scrambling is done.
+        Default is True.
+    optimization : {None, "random-cd", "lloyd"}, optional
+        Whether to use an optimization scheme to improve the quality after
+        sampling. Note that this is a post-processing step that does not
+        guarantee that all properties of the sample will be conserved.
+        Default is None.
+
+        * ``random-cd``: random permutations of coordinates to lower the
+          centered discrepancy. The best sample based on the centered
+          discrepancy is constantly updated. Centered discrepancy-based
+          sampling shows better space-filling robustness toward 2D and 3D
+          subprojections compared to using other discrepancy measures.
+        * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
+          The process converges to equally spaced samples.
+
+        .. versionadded:: 1.10.0
+    seed : {None, int, `numpy.random.Generator`}, optional
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+
+    Notes
+    -----
+    The Halton sequence has severe striping artifacts for even modestly
+    large dimensions. These can be ameliorated by scrambling. Scrambling
+    also supports replication-based error estimates and extends
+    applicabiltiy to unbounded integrands.
+
+    References
+    ----------
+    .. [1] Halton, "On the efficiency of certain quasi-random sequences of
+       points in evaluating multi-dimensional integrals", Numerische
+       Mathematik, 1960.
+    .. [2] A. B. Owen. "A randomized Halton algorithm in R",
+       :arxiv:`1706.02808`, 2017.
+
+    Examples
+    --------
+    Generate samples from a low discrepancy sequence of Halton.
+
+    >>> from scipy.stats import qmc
+    >>> sampler = qmc.Halton(d=2, scramble=False)
+    >>> sample = sampler.random(n=5)
+    >>> sample
+    array([[0.        , 0.        ],
+           [0.5       , 0.33333333],
+           [0.25      , 0.66666667],
+           [0.75      , 0.11111111],
+           [0.125     , 0.44444444]])
+
+    Compute the quality of the sample using the discrepancy criterion.
+
+    >>> qmc.discrepancy(sample)
+    0.088893711419753
+
+    If some wants to continue an existing design, extra points can be obtained
+    by calling again `random`. Alternatively, you can skip some points like:
+
+    >>> _ = sampler.fast_forward(5)
+    >>> sample_continued = sampler.random(n=5)
+    >>> sample_continued
+    array([[0.3125    , 0.37037037],
+           [0.8125    , 0.7037037 ],
+           [0.1875    , 0.14814815],
+           [0.6875    , 0.48148148],
+           [0.4375    , 0.81481481]])
+
+    Finally, samples can be scaled to bounds.
+
+    >>> l_bounds = [0, 2]
+    >>> u_bounds = [10, 5]
+    >>> qmc.scale(sample_continued, l_bounds, u_bounds)
+    array([[3.125     , 3.11111111],
+           [8.125     , 4.11111111],
+           [1.875     , 2.44444444],
+           [6.875     , 3.44444444],
+           [4.375     , 4.44444444]])
+
+    """
+
+    def __init__(
+        self, d: IntNumber, *, scramble: bool = True,
+        optimization: Optional[Literal["random-cd", "lloyd"]] = None,
+        seed: SeedType = None
+    ) -> None:
+        # Used in `scipy.integrate.qmc_quad`
+        self._init_quad = {'d': d, 'scramble': True,
+                           'optimization': optimization}
+        super().__init__(d=d, optimization=optimization, seed=seed)
+        self.seed = seed
+        self.base = n_primes(d)
+        self.scramble = scramble
+
+    def _random(
+        self, n: IntNumber = 1, *, workers: IntNumber = 1
+    ) -> np.ndarray:
+        """Draw `n` in the half-open interval ``[0, 1)``.
+
+        Parameters
+        ----------
+        n : int, optional
+            Number of samples to generate in the parameter space. Default is 1.
+        workers : int, optional
+            Number of workers to use for parallel processing. If -1 is
+            given all CPU threads are used. Default is 1. It becomes faster
+            than one worker for `n` greater than :math:`10^3`.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            QMC sample.
+
+        """
+        workers = _validate_workers(workers)
+        # Generate a sample using a Van der Corput sequence per dimension.
+        # important to have ``type(bdim) == int`` for performance reason
+        sample = [van_der_corput(n, int(bdim), start_index=self.num_generated,
+                                 scramble=self.scramble,
+                                 seed=copy.deepcopy(self.seed),
+                                 workers=workers)
+                  for bdim in self.base]
+
+        return np.array(sample).T.reshape(n, self.d)
+
+
+class LatinHypercube(QMCEngine):
+    r"""Latin hypercube sampling (LHS).
+
+    A Latin hypercube sample [1]_ generates :math:`n` points in
+    :math:`[0,1)^{d}`. Each univariate marginal distribution is stratified,
+    placing exactly one point in :math:`[j/n, (j+1)/n)` for
+    :math:`j=0,1,...,n-1`. They are still applicable when :math:`n << d`.
+
+    Parameters
+    ----------
+    d : int
+        Dimension of the parameter space.
+    centered : bool, optional
+        Center samples within cells of a multi-dimensional grid.
+        Default is False.
+
+        .. deprecated:: 1.10.0
+            `centered` is deprecated as of SciPy 1.10.0 and will be removed in
+            1.12.0. Use `scramble` instead. ``centered=True`` corresponds to
+            ``scramble=False``.
+
+    scramble : bool, optional
+        When False, center samples within cells of a multi-dimensional grid.
+        Otherwise, samples are randomly placed within cells of the grid.
+
+        .. note::
+            Setting ``scramble=False`` does not ensure deterministic output.
+            For that, use the `seed` parameter.
+
+        Default is True.
+
+        .. versionadded:: 1.10.0
+
+    optimization : {None, "random-cd", "lloyd"}, optional
+        Whether to use an optimization scheme to improve the quality after
+        sampling. Note that this is a post-processing step that does not
+        guarantee that all properties of the sample will be conserved.
+        Default is None.
+
+        * ``random-cd``: random permutations of coordinates to lower the
+          centered discrepancy. The best sample based on the centered
+          discrepancy is constantly updated. Centered discrepancy-based
+          sampling shows better space-filling robustness toward 2D and 3D
+          subprojections compared to using other discrepancy measures.
+        * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
+          The process converges to equally spaced samples.
+
+        .. versionadded:: 1.8.0
+        .. versionchanged:: 1.10.0
+            Add ``lloyd``.
+
+    strength : {1, 2}, optional
+        Strength of the LHS. ``strength=1`` produces a plain LHS while
+        ``strength=2`` produces an orthogonal array based LHS of strength 2
+        [7]_, [8]_. In that case, only ``n=p**2`` points can be sampled,
+        with ``p`` a prime number. It also constrains ``d <= p + 1``.
+        Default is 1.
+
+        .. versionadded:: 1.8.0
+
+    seed : {None, int, `numpy.random.Generator`}, optional
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+
+    Notes
+    -----
+
+    When LHS is used for integrating a function :math:`f` over :math:`n`,
+    LHS is extremely effective on integrands that are nearly additive [2]_.
+    With a LHS of :math:`n` points, the variance of the integral is always
+    lower than plain MC on :math:`n-1` points [3]_. There is a central limit
+    theorem for LHS on the mean and variance of the integral [4]_, but not
+    necessarily for optimized LHS due to the randomization.
+
+    :math:`A` is called an orthogonal array of strength :math:`t` if in each
+    n-row-by-t-column submatrix of :math:`A`: all :math:`p^t` possible
+    distinct rows occur the same number of times. The elements of :math:`A`
+    are in the set :math:`\{0, 1, ..., p-1\}`, also called symbols.
+    The constraint that :math:`p` must be a prime number is to allow modular
+    arithmetic. Increasing strength adds some symmetry to the sub-projections
+    of a sample. With strength 2, samples are symmetric along the diagonals of
+    2D sub-projections. This may be undesirable, but on the other hand, the
+    sample dispersion is improved.
+
+    Strength 1 (plain LHS) brings an advantage over strength 0 (MC) and
+    strength 2 is a useful increment over strength 1. Going to strength 3 is
+    a smaller increment and scrambled QMC like Sobol', Halton are more
+    performant [7]_.
+
+    To create a LHS of strength 2, the orthogonal array :math:`A` is
+    randomized by applying a random, bijective map of the set of symbols onto
+    itself. For example, in column 0, all 0s might become 2; in column 1,
+    all 0s might become 1, etc.
+    Then, for each column :math:`i` and symbol :math:`j`, we add a plain,
+    one-dimensional LHS of size :math:`p` to the subarray where
+    :math:`A^i = j`. The resulting matrix is finally divided by :math:`p`.
+
+    References
+    ----------
+    .. [1] Mckay et al., "A Comparison of Three Methods for Selecting Values
+       of Input Variables in the Analysis of Output from a Computer Code."
+       Technometrics, 1979.
+    .. [2] M. Stein, "Large sample properties of simulations using Latin
+       hypercube sampling." Technometrics 29, no. 2: 143-151, 1987.
+    .. [3] A. B. Owen, "Monte Carlo variance of scrambled net quadrature."
+       SIAM Journal on Numerical Analysis 34, no. 5: 1884-1910, 1997
+    .. [4]  Loh, W.-L. "On Latin hypercube sampling." The annals of statistics
+       24, no. 5: 2058-2080, 1996.
+    .. [5] Fang et al. "Design and modeling for computer experiments".
+       Computer Science and Data Analysis Series, 2006.
+    .. [6] Damblin et al., "Numerical studies of space filling designs:
+       optimization of Latin Hypercube Samples and subprojection properties."
+       Journal of Simulation, 2013.
+    .. [7] A. B. Owen , "Orthogonal arrays for computer experiments,
+       integration and visualization." Statistica Sinica, 1992.
+    .. [8] B. Tang, "Orthogonal Array-Based Latin Hypercubes."
+       Journal of the American Statistical Association, 1993.
+
+    Examples
+    --------
+    Generate samples from a Latin hypercube generator.
+
+    >>> from scipy.stats import qmc
+    >>> sampler = qmc.LatinHypercube(d=2)
+    >>> sample = sampler.random(n=5)
+    >>> sample
+    array([[0.1545328 , 0.53664833],  # random
+           [0.84052691, 0.06474907],
+           [0.52177809, 0.93343721],
+           [0.68033825, 0.36265316],
+           [0.26544879, 0.61163943]])
+
+    Compute the quality of the sample using the discrepancy criterion.
+
+    >>> qmc.discrepancy(sample)
+    0.0196...  # random
+
+    Samples can be scaled to bounds.
+
+    >>> l_bounds = [0, 2]
+    >>> u_bounds = [10, 5]
+    >>> qmc.scale(sample, l_bounds, u_bounds)
+    array([[1.54532796, 3.609945  ],  # random
+           [8.40526909, 2.1942472 ],
+           [5.2177809 , 4.80031164],
+           [6.80338249, 3.08795949],
+           [2.65448791, 3.83491828]])
+
+    Use the `optimization` keyword argument to produce a LHS with
+    lower discrepancy at higher computational cost.
+
+    >>> sampler = qmc.LatinHypercube(d=2, optimization="random-cd")
+    >>> sample = sampler.random(n=5)
+    >>> qmc.discrepancy(sample)
+    0.0176...  # random
+
+    Use the `strength` keyword argument to produce an orthogonal array based
+    LHS of strength 2. In this case, the number of sample points must be the
+    square of a prime number.
+
+    >>> sampler = qmc.LatinHypercube(d=2, strength=2)
+    >>> sample = sampler.random(n=9)
+    >>> qmc.discrepancy(sample)
+    0.00526...  # random
+
+    Options could be combined to produce an optimized centered
+    orthogonal array based LHS. After optimization, the result would not
+    be guaranteed to be of strength 2.
+
+    """
+
+    def __init__(
+        self, d: IntNumber, *, centered: bool = False,
+        scramble: bool = True,
+        strength: int = 1,
+        optimization: Optional[Literal["random-cd", "lloyd"]] = None,
+        seed: SeedType = None
+    ) -> None:
+        if centered:
+            scramble = False
+            warnings.warn(
+                "'centered' is deprecated and will be removed in SciPy 1.12."
+                " Please use 'scramble' instead. 'centered=True' corresponds"
+                " to 'scramble=False'.",
+                stacklevel=2
+            )
+
+        # Used in `scipy.integrate.qmc_quad`
+        self._init_quad = {'d': d, 'scramble': True, 'strength': strength,
+                           'optimization': optimization}
+        super().__init__(d=d, seed=seed, optimization=optimization)
+        self.scramble = scramble
+
+        lhs_method_strength = {
+            1: self._random_lhs,
+            2: self._random_oa_lhs
+        }
+
+        try:
+            self.lhs_method: Callable = lhs_method_strength[strength]
+        except KeyError as exc:
+            message = (f"{strength!r} is not a valid strength. It must be one"
+                       f" of {set(lhs_method_strength)!r}")
+            raise ValueError(message) from exc
+
+    def _random(
+        self, n: IntNumber = 1, *, workers: IntNumber = 1
+    ) -> np.ndarray:
+        lhs = self.lhs_method(n)
+        return lhs
+
+    def _random_lhs(self, n: IntNumber = 1) -> np.ndarray:
+        """Base LHS algorithm."""
+        if not self.scramble:
+            samples: np.ndarray | float = 0.5
+        else:
+            samples = self.rng.uniform(size=(n, self.d))
+
+        perms = np.tile(np.arange(1, n + 1),
+                        (self.d, 1))  # type: ignore[arg-type]
+        for i in range(self.d):
+            self.rng.shuffle(perms[i, :])
+        perms = perms.T
+
+        samples = (perms - samples) / n
+        return samples
+
+    def _random_oa_lhs(self, n: IntNumber = 4) -> np.ndarray:
+        """Orthogonal array based LHS of strength 2."""
+        p = np.sqrt(n).astype(int)
+        n_row = p**2
+        n_col = p + 1
+
+        primes = primes_from_2_to(p + 1)
+        if p not in primes or n != n_row:
+            raise ValueError(
+                "n is not the square of a prime number. Close"
+                f" values are {primes[-2:]**2}"
+            )
+        if self.d > p + 1:
+            raise ValueError("n is too small for d. Must be n > (d-1)**2")
+
+        oa_sample = np.zeros(shape=(n_row, n_col), dtype=int)
+
+        # OA of strength 2
+        arrays = np.tile(np.arange(p), (2, 1))
+        oa_sample[:, :2] = np.stack(np.meshgrid(*arrays),
+                                    axis=-1).reshape(-1, 2)
+        for p_ in range(1, p):
+            oa_sample[:, 2+p_-1] = np.mod(oa_sample[:, 0]
+                                          + p_*oa_sample[:, 1], p)
+
+        # scramble the OA
+        oa_sample_ = np.empty(shape=(n_row, n_col), dtype=int)
+        for j in range(n_col):
+            perms = self.rng.permutation(p)
+            oa_sample_[:, j] = perms[oa_sample[:, j]]
+
+        # following is making a scrambled OA into an OA-LHS
+        oa_lhs_sample = np.zeros(shape=(n_row, n_col))
+        lhs_engine = LatinHypercube(d=1, scramble=self.scramble, strength=1,
+                                    seed=self.rng)  # type: QMCEngine
+        for j in range(n_col):
+            for k in range(p):
+                idx = oa_sample[:, j] == k
+                lhs = lhs_engine.random(p).flatten()
+                oa_lhs_sample[:, j][idx] = lhs + oa_sample[:, j][idx]
+
+                lhs_engine = lhs_engine.reset()
+
+        oa_lhs_sample /= p
+
+        return oa_lhs_sample[:, :self.d]  # type: ignore
+
+
+class Sobol(QMCEngine):
+    """Engine for generating (scrambled) Sobol' sequences.
+
+    Sobol' sequences are low-discrepancy, quasi-random numbers. Points
+    can be drawn using two methods:
+
+    * `random_base2`: safely draw :math:`n=2^m` points. This method
+      guarantees the balance properties of the sequence.
+    * `random`: draw an arbitrary number of points from the
+      sequence. See warning below.
+
+    Parameters
+    ----------
+    d : int
+        Dimensionality of the sequence. Max dimensionality is 21201.
+    scramble : bool, optional
+        If True, use LMS+shift scrambling. Otherwise, no scrambling is done.
+        Default is True.
+    bits : int, optional
+        Number of bits of the generator. Control the maximum number of points
+        that can be generated, which is ``2**bits``. Maximal value is 64.
+        It does not correspond to the return type, which is always
+        ``np.float64`` to prevent points from repeating themselves.
+        Default is None, which for backward compatibility, corresponds to 30.
+
+        .. versionadded:: 1.9.0
+    optimization : {None, "random-cd", "lloyd"}, optional
+        Whether to use an optimization scheme to improve the quality after
+        sampling. Note that this is a post-processing step that does not
+        guarantee that all properties of the sample will be conserved.
+        Default is None.
+
+        * ``random-cd``: random permutations of coordinates to lower the
+          centered discrepancy. The best sample based on the centered
+          discrepancy is constantly updated. Centered discrepancy-based
+          sampling shows better space-filling robustness toward 2D and 3D
+          subprojections compared to using other discrepancy measures.
+        * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
+          The process converges to equally spaced samples.
+
+        .. versionadded:: 1.10.0
+    seed : {None, int, `numpy.random.Generator`}, optional
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+
+    Notes
+    -----
+    Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in
+    :math:`[0,1)^{d}`. Scrambling them [3]_ makes them suitable for singular
+    integrands, provides a means of error estimation, and can improve their
+    rate of convergence. The scrambling strategy which is implemented is a
+    (left) linear matrix scramble (LMS) followed by a digital random shift
+    (LMS+shift) [2]_.
+
+    There are many versions of Sobol' sequences depending on their
+    'direction numbers'. This code uses direction numbers from [4]_. Hence,
+    the maximum number of dimension is 21201. The direction numbers have been
+    precomputed with search criterion 6 and can be retrieved at
+    https://web.maths.unsw.edu.au/~fkuo/sobol/.
+
+    .. warning::
+
+       Sobol' sequences are a quadrature rule and they lose their balance
+       properties if one uses a sample size that is not a power of 2, or skips
+       the first point, or thins the sequence [5]_.
+
+       If :math:`n=2^m` points are not enough then one should take :math:`2^M`
+       points for :math:`M>m`. When scrambling, the number R of independent
+       replicates does not have to be a power of 2.
+
+       Sobol' sequences are generated to some number :math:`B` of bits.
+       After :math:`2^B` points have been generated, the sequence would
+       repeat. Hence, an error is raised.
+       The number of bits can be controlled with the parameter `bits`.
+
+    References
+    ----------
+    .. [1] I. M. Sobol', "The distribution of points in a cube and the accurate
+       evaluation of integrals." Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
+       1967.
+    .. [2] J. Matousek, "On the L2-discrepancy for anchored boxes."
+       J. of Complexity 14, 527-556, 1998.
+    .. [3] Art B. Owen, "Scrambling Sobol and Niederreiter-Xing points."
+       Journal of Complexity, 14(4):466-489, December 1998.
+    .. [4] S. Joe and F. Y. Kuo, "Constructing sobol sequences with better
+       two-dimensional projections." SIAM Journal on Scientific Computing,
+       30(5):2635-2654, 2008.
+    .. [5] Art B. Owen, "On dropping the first Sobol' point."
+       :arxiv:`2008.08051`, 2020.
+
+    Examples
+    --------
+    Generate samples from a low discrepancy sequence of Sobol'.
+
+    >>> from scipy.stats import qmc
+    >>> sampler = qmc.Sobol(d=2, scramble=False)
+    >>> sample = sampler.random_base2(m=3)
+    >>> sample
+    array([[0.   , 0.   ],
+           [0.5  , 0.5  ],
+           [0.75 , 0.25 ],
+           [0.25 , 0.75 ],
+           [0.375, 0.375],
+           [0.875, 0.875],
+           [0.625, 0.125],
+           [0.125, 0.625]])
+
+    Compute the quality of the sample using the discrepancy criterion.
+
+    >>> qmc.discrepancy(sample)
+    0.013882107204860938
+
+    To continue an existing design, extra points can be obtained
+    by calling again `random_base2`. Alternatively, you can skip some
+    points like:
+
+    >>> _ = sampler.reset()
+    >>> _ = sampler.fast_forward(4)
+    >>> sample_continued = sampler.random_base2(m=2)
+    >>> sample_continued
+    array([[0.375, 0.375],
+           [0.875, 0.875],
+           [0.625, 0.125],
+           [0.125, 0.625]])
+
+    Finally, samples can be scaled to bounds.
+
+    >>> l_bounds = [0, 2]
+    >>> u_bounds = [10, 5]
+    >>> qmc.scale(sample_continued, l_bounds, u_bounds)
+    array([[3.75 , 3.125],
+           [8.75 , 4.625],
+           [6.25 , 2.375],
+           [1.25 , 3.875]])
+
+    """
+
+    MAXDIM: ClassVar[int] = _MAXDIM
+
+    def __init__(
+        self, d: IntNumber, *, scramble: bool = True,
+        bits: Optional[IntNumber] = None, seed: SeedType = None,
+        optimization: Optional[Literal["random-cd", "lloyd"]] = None
+    ) -> None:
+        # Used in `scipy.integrate.qmc_quad`
+        self._init_quad = {'d': d, 'scramble': True, 'bits': bits,
+                           'optimization': optimization}
+
+        super().__init__(d=d, optimization=optimization, seed=seed)
+        if d > self.MAXDIM:
+            raise ValueError(
+                f"Maximum supported dimensionality is {self.MAXDIM}."
+            )
+
+        self.bits = bits
+        self.dtype_i: type
+
+        if self.bits is None:
+            self.bits = 30
+
+        if self.bits <= 32:
+            self.dtype_i = np.uint32
+        elif 32 < self.bits <= 64:
+            self.dtype_i = np.uint64
+        else:
+            raise ValueError("Maximum supported 'bits' is 64")
+
+        self.maxn = 2**self.bits
+
+        # v is d x maxbit matrix
+        self._sv: np.ndarray = np.zeros((d, self.bits), dtype=self.dtype_i)
+        _initialize_v(self._sv, dim=d, bits=self.bits)
+
+        if not scramble:
+            self._shift: np.ndarray = np.zeros(d, dtype=self.dtype_i)
+        else:
+            # scramble self._shift and self._sv
+            self._scramble()
+
+        self._quasi = self._shift.copy()
+
+        # normalization constant with the largest possible number
+        # calculate in Python to not overflow int with 2**64
+        self._scale = 1.0 / 2 ** self.bits
+
+        self._first_point = (self._quasi * self._scale).reshape(1, -1)
+        # explicit casting to float64
+        self._first_point = self._first_point.astype(np.float64)
+
+    def _scramble(self) -> None:
+        """Scramble the sequence using LMS+shift."""
+        # Generate shift vector
+        self._shift = np.dot(
+            rng_integers(self.rng, 2, size=(self.d, self.bits),
+                         dtype=self.dtype_i),
+            2 ** np.arange(self.bits, dtype=self.dtype_i),
+        )
+        # Generate lower triangular matrices (stacked across dimensions)
+        ltm = np.tril(rng_integers(self.rng, 2,
+                                   size=(self.d, self.bits, self.bits),
+                                   dtype=self.dtype_i))
+        _cscramble(
+            dim=self.d, bits=self.bits,  # type: ignore[arg-type]
+            ltm=ltm, sv=self._sv
+        )
+
+    def _random(
+        self, n: IntNumber = 1, *, workers: IntNumber = 1
+    ) -> np.ndarray:
+        """Draw next point(s) in the Sobol' sequence.
+
+        Parameters
+        ----------
+        n : int, optional
+            Number of samples to generate in the parameter space. Default is 1.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            Sobol' sample.
+
+        """
+        sample: np.ndarray = np.empty((n, self.d), dtype=np.float64)
+
+        if n == 0:
+            return sample
+
+        total_n = self.num_generated + n
+        if total_n > self.maxn:
+            msg = (
+                f"At most 2**{self.bits}={self.maxn} distinct points can be "
+                f"generated. {self.num_generated} points have been previously "
+                f"generated, then: n={self.num_generated}+{n}={total_n}. "
+            )
+            if self.bits != 64:
+                msg += "Consider increasing `bits`."
+            raise ValueError(msg)
+
+        if self.num_generated == 0:
+            # verify n is 2**n
+            if not (n & (n - 1) == 0):
+                warnings.warn("The balance properties of Sobol' points require"
+                              " n to be a power of 2.", stacklevel=2)
+
+            if n == 1:
+                sample = self._first_point
+            else:
+                _draw(
+                    n=n - 1, num_gen=self.num_generated, dim=self.d,
+                    scale=self._scale, sv=self._sv, quasi=self._quasi,
+                    sample=sample
+                )
+                sample = np.concatenate(
+                    [self._first_point, sample]
+                )[:n]  # type: ignore[misc]
+        else:
+            _draw(
+                n=n, num_gen=self.num_generated - 1, dim=self.d,
+                scale=self._scale, sv=self._sv, quasi=self._quasi,
+                sample=sample
+            )
+
+        return sample
+
+    def random_base2(self, m: IntNumber) -> np.ndarray:
+        """Draw point(s) from the Sobol' sequence.
+
+        This function draws :math:`n=2^m` points in the parameter space
+        ensuring the balance properties of the sequence.
+
+        Parameters
+        ----------
+        m : int
+            Logarithm in base 2 of the number of samples; i.e., n = 2^m.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            Sobol' sample.
+
+        """
+        n = 2 ** m
+
+        total_n = self.num_generated + n
+        if not (total_n & (total_n - 1) == 0):
+            raise ValueError("The balance properties of Sobol' points require "
+                             "n to be a power of 2. {0} points have been "
+                             "previously generated, then: n={0}+2**{1}={2}. "
+                             "If you still want to do this, the function "
+                             "'Sobol.random()' can be used."
+                             .format(self.num_generated, m, total_n))
+
+        return self.random(n)
+
+    def reset(self) -> Sobol:
+        """Reset the engine to base state.
+
+        Returns
+        -------
+        engine : Sobol
+            Engine reset to its base state.
+
+        """
+        super().reset()
+        self._quasi = self._shift.copy()
+        return self
+
+    def fast_forward(self, n: IntNumber) -> Sobol:
+        """Fast-forward the sequence by `n` positions.
+
+        Parameters
+        ----------
+        n : int
+            Number of points to skip in the sequence.
+
+        Returns
+        -------
+        engine : Sobol
+            The fast-forwarded engine.
+
+        """
+        if self.num_generated == 0:
+            _fast_forward(
+                n=n - 1, num_gen=self.num_generated, dim=self.d,
+                sv=self._sv, quasi=self._quasi
+            )
+        else:
+            _fast_forward(
+                n=n, num_gen=self.num_generated - 1, dim=self.d,
+                sv=self._sv, quasi=self._quasi
+            )
+        self.num_generated += n
+        return self
+
+
+class PoissonDisk(QMCEngine):
+    """Poisson disk sampling.
+
+    Parameters
+    ----------
+    d : int
+        Dimension of the parameter space.
+    radius : float
+        Minimal distance to keep between points when sampling new candidates.
+    hypersphere : {"volume", "surface"}, optional
+        Sampling strategy to generate potential candidates to be added in the
+        final sample. Default is "volume".
+
+        * ``volume``: original Bridson algorithm as described in [1]_.
+          New candidates are sampled *within* the hypersphere.
+        * ``surface``: only sample the surface of the hypersphere.
+    ncandidates : int
+        Number of candidates to sample per iteration. More candidates result
+        in a denser sampling as more candidates can be accepted per iteration.
+    optimization : {None, "random-cd", "lloyd"}, optional
+        Whether to use an optimization scheme to improve the quality after
+        sampling. Note that this is a post-processing step that does not
+        guarantee that all properties of the sample will be conserved.
+        Default is None.
+
+        * ``random-cd``: random permutations of coordinates to lower the
+          centered discrepancy. The best sample based on the centered
+          discrepancy is constantly updated. Centered discrepancy-based
+          sampling shows better space-filling robustness toward 2D and 3D
+          subprojections compared to using other discrepancy measures.
+        * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
+          The process converges to equally spaced samples.
+
+        .. versionadded:: 1.10.0
+    seed : {None, int, `numpy.random.Generator`}, optional
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+
+    Notes
+    -----
+    Poisson disk sampling is an iterative sampling strategy. Starting from
+    a seed sample, `ncandidates` are sampled in the hypersphere
+    surrounding the seed. Candidates bellow a certain `radius` or outside the
+    domain are rejected. New samples are added in a pool of sample seed. The
+    process stops when the pool is empty or when the number of required
+    samples is reached.
+
+    The maximum number of point that a sample can contain is directly linked
+    to the `radius`. As the dimension of the space increases, a higher radius
+    spreads the points further and help overcome the curse of dimensionality.
+    See the :ref:`quasi monte carlo tutorial ` for more
+    details.
+
+    .. warning::
+
+       The algorithm is more suitable for low dimensions and sampling size
+       due to its iterative nature and memory requirements.
+       Selecting a small radius with a high dimension would
+       mean that the space could contain more samples than using lower
+       dimension or a bigger radius.
+
+    Some code taken from [2]_, written consent given on 31.03.2021
+    by the original author, Shamis, for free use in SciPy under
+    the 3-clause BSD.
+
+    References
+    ----------
+    .. [1] Robert Bridson, "Fast Poisson Disk Sampling in Arbitrary
+       Dimensions." SIGGRAPH, 2007.
+    .. [2] `StackOverflow `__.
+
+    Examples
+    --------
+    Generate a 2D sample using a `radius` of 0.2.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from matplotlib.collections import PatchCollection
+    >>> from scipy.stats import qmc
+    >>>
+    >>> rng = np.random.default_rng()
+    >>> radius = 0.2
+    >>> engine = qmc.PoissonDisk(d=2, radius=radius, seed=rng)
+    >>> sample = engine.random(20)
+
+    Visualizing the 2D sample and showing that no points are closer than
+    `radius`. ``radius/2`` is used to visualize non-intersecting circles.
+    If two samples are exactly at `radius` from each other, then their circle
+    of radius ``radius/2`` will touch.
+
+    >>> fig, ax = plt.subplots()
+    >>> _ = ax.scatter(sample[:, 0], sample[:, 1])
+    >>> circles = [plt.Circle((xi, yi), radius=radius/2, fill=False)
+    ...            for xi, yi in sample]
+    >>> collection = PatchCollection(circles, match_original=True)
+    >>> ax.add_collection(collection)
+    >>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
+    ...            xlim=[0, 1], ylim=[0, 1])
+    >>> plt.show()
+
+    Such visualization can be seen as circle packing: how many circle can
+    we put in the space. It is a np-hard problem. The method `fill_space`
+    can be used to add samples until no more samples can be added. This is
+    a hard problem and parameters may need to be adjusted manually. Beware of
+    the dimension: as the dimensionality increases, the number of samples
+    required to fill the space increases exponentially
+    (curse-of-dimensionality).
+
+    """
+
+    def __init__(
+        self,
+        d: IntNumber,
+        *,
+        radius: DecimalNumber = 0.05,
+        hypersphere: Literal["volume", "surface"] = "volume",
+        ncandidates: IntNumber = 30,
+        optimization: Optional[Literal["random-cd", "lloyd"]] = None,
+        seed: SeedType = None
+    ) -> None:
+        # Used in `scipy.integrate.qmc_quad`
+        self._init_quad = {'d': d, 'radius': radius,
+                           'hypersphere': hypersphere,
+                           'ncandidates': ncandidates,
+                           'optimization': optimization}
+        super().__init__(d=d, optimization=optimization, seed=seed)
+
+        hypersphere_sample = {
+            "volume": self._hypersphere_volume_sample,
+            "surface": self._hypersphere_surface_sample
+        }
+
+        try:
+            self.hypersphere_method = hypersphere_sample[hypersphere]
+        except KeyError as exc:
+            message = (
+                f"{hypersphere!r} is not a valid hypersphere sampling"
+                f" method. It must be one of {set(hypersphere_sample)!r}")
+            raise ValueError(message) from exc
+
+        # size of the sphere from which the samples are drawn relative to the
+        # size of a disk (radius)
+        # for the surface sampler, all new points are almost exactly 1 radius
+        # away from at least one existing sample +eps to avoid rejection
+        self.radius_factor = 2 if hypersphere == "volume" else 1.001
+        self.radius = radius
+        self.radius_squared = self.radius**2
+
+        # sample to generate per iteration in the hypersphere around center
+        self.ncandidates = ncandidates
+
+        with np.errstate(divide='ignore'):
+            self.cell_size = self.radius / np.sqrt(self.d)
+            self.grid_size = (
+                np.ceil(np.ones(self.d) / self.cell_size)
+            ).astype(int)
+
+        self._initialize_grid_pool()
+
+    def _initialize_grid_pool(self):
+        """Sampling pool and sample grid."""
+        self.sample_pool = []
+        # Positions of cells
+        # n-dim value for each grid cell
+        self.sample_grid = np.empty(
+            np.append(self.grid_size, self.d),
+            dtype=np.float32
+        )
+        # Initialise empty cells with NaNs
+        self.sample_grid.fill(np.nan)
+
+    def _random(
+        self, n: IntNumber = 1, *, workers: IntNumber = 1
+    ) -> np.ndarray:
+        """Draw `n` in the interval ``[0, 1]``.
+
+        Note that it can return fewer samples if the space is full.
+        See the note section of the class.
+
+        Parameters
+        ----------
+        n : int, optional
+            Number of samples to generate in the parameter space. Default is 1.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            QMC sample.
+
+        """
+        if n == 0 or self.d == 0:
+            return np.empty((n, self.d))
+
+        def in_limits(sample: np.ndarray) -> bool:
+            return (sample.max() <= 1.) and (sample.min() >= 0.)
+
+        def in_neighborhood(candidate: np.ndarray, n: int = 2) -> bool:
+            """
+            Check if there are samples closer than ``radius_squared`` to the
+            `candidate` sample.
+            """
+            indices = (candidate / self.cell_size).astype(int)
+            ind_min = np.maximum(indices - n, np.zeros(self.d, dtype=int))
+            ind_max = np.minimum(indices + n + 1, self.grid_size)
+
+            # Check if the center cell is empty
+            if not np.isnan(self.sample_grid[tuple(indices)][0]):
+                return True
+
+            a = [slice(ind_min[i], ind_max[i]) for i in range(self.d)]
+
+            # guards against: invalid value encountered in less as we are
+            # comparing with nan and returns False. Which is wanted.
+            with np.errstate(invalid='ignore'):
+                if np.any(
+                    np.sum(
+                        np.square(candidate - self.sample_grid[tuple(a)]),
+                        axis=self.d
+                    ) < self.radius_squared
+                ):
+                    return True
+
+            return False
+
+        def add_sample(candidate: np.ndarray) -> None:
+            self.sample_pool.append(candidate)
+            indices = (candidate / self.cell_size).astype(int)
+            self.sample_grid[tuple(indices)] = candidate
+            curr_sample.append(candidate)
+
+        curr_sample: List[np.ndarray] = []
+
+        if len(self.sample_pool) == 0:
+            # the pool is being initialized with a single random sample
+            add_sample(self.rng.random(self.d))
+            num_drawn = 1
+        else:
+            num_drawn = 0
+
+        # exhaust sample pool to have up to n sample
+        while len(self.sample_pool) and num_drawn < n:
+            # select a sample from the available pool
+            idx_center = rng_integers(self.rng, len(self.sample_pool))
+            center = self.sample_pool[idx_center]
+            del self.sample_pool[idx_center]
+
+            # generate candidates around the center sample
+            candidates = self.hypersphere_method(
+                center, self.radius * self.radius_factor, self.ncandidates
+            )
+
+            # keep candidates that satisfy some conditions
+            for candidate in candidates:
+                if in_limits(candidate) and not in_neighborhood(candidate):
+                    add_sample(candidate)
+
+                    num_drawn += 1
+                    if num_drawn >= n:
+                        break
+
+        self.num_generated += num_drawn
+        return np.array(curr_sample)
+
+    def fill_space(self) -> np.ndarray:
+        """Draw ``n`` samples in the interval ``[0, 1]``.
+
+        Unlike `random`, this method will try to add points until
+        the space is full. Depending on ``candidates`` (and to a lesser extent
+        other parameters), some empty areas can still be present in the sample.
+
+        .. warning::
+
+           This can be extremely slow in high dimensions or if the
+           ``radius`` is very small-with respect to the dimensionality.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            QMC sample.
+
+        """
+        return self.random(np.inf)  # type: ignore[arg-type]
+
+    def reset(self) -> PoissonDisk:
+        """Reset the engine to base state.
+
+        Returns
+        -------
+        engine : PoissonDisk
+            Engine reset to its base state.
+
+        """
+        super().reset()
+        self._initialize_grid_pool()
+        return self
+
+    def _hypersphere_volume_sample(
+        self, center: np.ndarray, radius: DecimalNumber,
+        candidates: IntNumber = 1
+    ) -> np.ndarray:
+        """Uniform sampling within hypersphere."""
+        # should remove samples within r/2
+        x = self.rng.standard_normal(size=(candidates, self.d))
+        ssq = np.sum(x**2, axis=1)
+        fr = radius * gammainc(self.d/2, ssq/2)**(1/self.d) / np.sqrt(ssq)
+        fr_tiled = np.tile(
+            fr.reshape(-1, 1), (1, self.d)  # type: ignore[arg-type]
+        )
+        p = center + np.multiply(x, fr_tiled)
+        return p
+
+    def _hypersphere_surface_sample(
+        self, center: np.ndarray, radius: DecimalNumber,
+        candidates: IntNumber = 1
+    ) -> np.ndarray:
+        """Uniform sampling on the hypersphere's surface."""
+        vec = self.rng.standard_normal(size=(candidates, self.d))
+        vec /= np.linalg.norm(vec, axis=1)[:, None]
+        p = center + np.multiply(vec, radius)
+        return p
+
+
+class MultivariateNormalQMC:
+    r"""QMC sampling from a multivariate Normal :math:`N(\mu, \Sigma)`.
+
+    Parameters
+    ----------
+    mean : array_like (d,)
+        The mean vector. Where ``d`` is the dimension.
+    cov : array_like (d, d), optional
+        The covariance matrix. If omitted, use `cov_root` instead.
+        If both `cov` and `cov_root` are omitted, use the identity matrix.
+    cov_root : array_like (d, d'), optional
+        A root decomposition of the covariance matrix, where ``d'`` may be less
+        than ``d`` if the covariance is not full rank. If omitted, use `cov`.
+    inv_transform : bool, optional
+        If True, use inverse transform instead of Box-Muller. Default is True.
+    engine : QMCEngine, optional
+        Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
+    seed : {None, int, `numpy.random.Generator`}, optional
+        Used only if `engine` is None.
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+
+    Examples
+    --------
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.stats import qmc
+    >>> dist = qmc.MultivariateNormalQMC(mean=[0, 5], cov=[[1, 0], [0, 1]])
+    >>> sample = dist.random(512)
+    >>> _ = plt.scatter(sample[:, 0], sample[:, 1])
+    >>> plt.show()
+
+    """
+
+    def __init__(
+            self, mean: npt.ArrayLike, cov: Optional[npt.ArrayLike] = None, *,
+            cov_root: Optional[npt.ArrayLike] = None,
+            inv_transform: bool = True,
+            engine: Optional[QMCEngine] = None,
+            seed: SeedType = None
+    ) -> None:
+        mean = np.array(mean, copy=False, ndmin=1)
+        d = mean.shape[0]
+        if cov is not None:
+            # covariance matrix provided
+            cov = np.array(cov, copy=False, ndmin=2)
+            # check for square/symmetric cov matrix and mean vector has the
+            # same d
+            if not mean.shape[0] == cov.shape[0]:
+                raise ValueError("Dimension mismatch between mean and "
+                                 "covariance.")
+            if not np.allclose(cov, cov.transpose()):
+                raise ValueError("Covariance matrix is not symmetric.")
+            # compute Cholesky decomp; if it fails, do the eigen decomposition
+            try:
+                cov_root = np.linalg.cholesky(cov).transpose()
+            except np.linalg.LinAlgError:
+                eigval, eigvec = np.linalg.eigh(cov)
+                if not np.all(eigval >= -1.0e-8):
+                    raise ValueError("Covariance matrix not PSD.")
+                eigval = np.clip(eigval, 0.0, None)
+                cov_root = (eigvec * np.sqrt(eigval)).transpose()
+        elif cov_root is not None:
+            # root decomposition provided
+            cov_root = np.atleast_2d(cov_root)
+            if not mean.shape[0] == cov_root.shape[0]:
+                raise ValueError("Dimension mismatch between mean and "
+                                 "covariance.")
+        else:
+            # corresponds to identity covariance matrix
+            cov_root = None
+
+        self._inv_transform = inv_transform
+
+        if not inv_transform:
+            # to apply Box-Muller, we need an even number of dimensions
+            engine_dim = 2 * math.ceil(d / 2)
+        else:
+            engine_dim = d
+        if engine is None:
+            self.engine = Sobol(
+                d=engine_dim, scramble=True, bits=30, seed=seed
+            )  # type: QMCEngine
+        elif isinstance(engine, QMCEngine):
+            if engine.d != engine_dim:
+                raise ValueError("Dimension of `engine` must be consistent"
+                                 " with dimensions of mean and covariance."
+                                 " If `inv_transform` is False, it must be"
+                                 " an even number.")
+            self.engine = engine
+        else:
+            raise ValueError("`engine` must be an instance of "
+                             "`scipy.stats.qmc.QMCEngine` or `None`.")
+
+        self._mean = mean
+        self._corr_matrix = cov_root
+
+        self._d = d
+
+    def random(self, n: IntNumber = 1) -> np.ndarray:
+        """Draw `n` QMC samples from the multivariate Normal.
+
+        Parameters
+        ----------
+        n : int, optional
+            Number of samples to generate in the parameter space. Default is 1.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            Sample.
+
+        """
+        base_samples = self._standard_normal_samples(n)
+        return self._correlate(base_samples)
+
+    def _correlate(self, base_samples: np.ndarray) -> np.ndarray:
+        if self._corr_matrix is not None:
+            return base_samples @ self._corr_matrix + self._mean
+        else:
+            # avoid multiplying with identity here
+            return base_samples + self._mean
+
+    def _standard_normal_samples(self, n: IntNumber = 1) -> np.ndarray:
+        """Draw `n` QMC samples from the standard Normal :math:`N(0, I_d)`.
+
+        Parameters
+        ----------
+        n : int, optional
+            Number of samples to generate in the parameter space. Default is 1.
+
+        Returns
+        -------
+        sample : array_like (n, d)
+            Sample.
+
+        """
+        # get base samples
+        samples = self.engine.random(n)
+        if self._inv_transform:
+            # apply inverse transform
+            # (values to close to 0/1 result in inf values)
+            return stats.norm.ppf(0.5 + (1 - 1e-10) * (samples - 0.5))  # type: ignore[attr-defined]
+        else:
+            # apply Box-Muller transform (note: indexes starting from 1)
+            even = np.arange(0, samples.shape[-1], 2)
+            Rs = np.sqrt(-2 * np.log(samples[:, even]))
+            thetas = 2 * math.pi * samples[:, 1 + even]
+            cos = np.cos(thetas)
+            sin = np.sin(thetas)
+            transf_samples = np.stack([Rs * cos, Rs * sin],
+                                      -1).reshape(n, -1)
+            # make sure we only return the number of dimension requested
+            return transf_samples[:, : self._d]
+
+
+class MultinomialQMC:
+    r"""QMC sampling from a multinomial distribution.
+
+    Parameters
+    ----------
+    pvals : array_like (k,)
+        Vector of probabilities of size ``k``, where ``k`` is the number
+        of categories. Elements must be non-negative and sum to 1.
+    n_trials : int
+        Number of trials.
+    engine : QMCEngine, optional
+        Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
+    seed : {None, int, `numpy.random.Generator`}, optional
+        Used only if `engine` is None.
+        If `seed` is an int or None, a new `numpy.random.Generator` is
+        created using ``np.random.default_rng(seed)``.
+        If `seed` is already a ``Generator`` instance, then the provided
+        instance is used.
+
+    Examples
+    --------
+    Let's define 3 categories and for a given sample, the sum of the trials
+    of each category is 8. The number of trials per category is determined
+    by the `pvals` associated to each category.
+    Then, we sample this distribution 64 times.
+
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.stats import qmc
+    >>> dist = qmc.MultinomialQMC(
+    ...     pvals=[0.2, 0.4, 0.4], n_trials=10, engine=qmc.Halton(d=1)
+    ... )
+    >>> sample = dist.random(64)
+
+    We can plot the sample and verify that the median of number of trials
+    for each category is following the `pvals`. That would be
+    ``pvals * n_trials = [2, 4, 4]``.
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.yaxis.get_major_locator().set_params(integer=True)
+    >>> _ = ax.boxplot(sample)
+    >>> ax.set(xlabel="Categories", ylabel="Trials")
+    >>> plt.show()
+
+    """
+
+    def __init__(
+        self, pvals: npt.ArrayLike, n_trials: IntNumber,
+        *, engine: Optional[QMCEngine] = None,
+        seed: SeedType = None
+    ) -> None:
+        self.pvals = np.array(pvals, copy=False, ndmin=1)
+        if np.min(pvals) < 0:
+            raise ValueError('Elements of pvals must be non-negative.')
+        if not np.isclose(np.sum(pvals), 1):
+            raise ValueError('Elements of pvals must sum to 1.')
+        self.n_trials = n_trials
+        if engine is None:
+            self.engine = Sobol(
+                d=1, scramble=True, bits=30, seed=seed
+            )  # type: QMCEngine
+        elif isinstance(engine, QMCEngine):
+            if engine.d != 1:
+                raise ValueError("Dimension of `engine` must be 1.")
+            self.engine = engine
+        else:
+            raise ValueError("`engine` must be an instance of "
+                             "`scipy.stats.qmc.QMCEngine` or `None`.")
+
+    def random(self, n: IntNumber = 1) -> np.ndarray:
+        """Draw `n` QMC samples from the multinomial distribution.
+
+        Parameters
+        ----------
+        n : int, optional
+            Number of samples to generate in the parameter space. Default is 1.
+
+        Returns
+        -------
+        samples : array_like (n, pvals)
+            Sample.
+
+        """
+        sample = np.empty((n, len(self.pvals)))
+        for i in range(n):
+            base_draws = self.engine.random(self.n_trials).ravel()
+            p_cumulative = np.empty_like(self.pvals, dtype=float)
+            _fill_p_cumulative(np.array(self.pvals, dtype=float), p_cumulative)
+            sample_ = np.zeros_like(self.pvals, dtype=int)
+            _categorize(base_draws, p_cumulative, sample_)
+            sample[i] = sample_
+        return sample
+
+
+def _select_optimizer(
+    optimization: Optional[Literal["random-cd", "lloyd"]], config: Dict
+) -> Optional[Callable]:
+    """A factory for optimization methods."""
+    optimization_method: Dict[str, Callable] = {
+        "random-cd": _random_cd,
+        "lloyd": _lloyd_centroidal_voronoi_tessellation
+    }
+
+    optimizer: Optional[partial]
+    if optimization is not None:
+        try:
+            optimization = optimization.lower()  # type: ignore[assignment]
+            optimizer_ = optimization_method[optimization]
+        except KeyError as exc:
+            message = (f"{optimization!r} is not a valid optimization"
+                       f" method. It must be one of"
+                       f" {set(optimization_method)!r}")
+            raise ValueError(message) from exc
+
+        # config
+        optimizer = partial(optimizer_, **config)
+    else:
+        optimizer = None
+
+    return optimizer
+
+
+def _random_cd(
+    best_sample: np.ndarray, n_iters: int, n_nochange: int, rng: GeneratorType,
+    **kwargs: Dict
+) -> np.ndarray:
+    """Optimal LHS on CD.
+
+    Create a base LHS and do random permutations of coordinates to
+    lower the centered discrepancy.
+    Because it starts with a normal LHS, it also works with the
+    `centered` keyword argument.
+
+    Two stopping criterion are used to stop the algorithm: at most,
+    `n_iters` iterations are performed; or if there is no improvement
+    for `n_nochange` consecutive iterations.
+    """
+    del kwargs  # only use keywords which are defined, needed by factory
+
+    n, d = best_sample.shape
+
+    if d == 0 or n == 0:
+        return np.empty((n, d))
+
+    best_disc = discrepancy(best_sample)
+
+    if n == 1:
+        return best_sample
+
+    bounds = ([0, d - 1],
+              [0, n - 1],
+              [0, n - 1])
+
+    n_nochange_ = 0
+    n_iters_ = 0
+    while n_nochange_ < n_nochange and n_iters_ < n_iters:
+        n_iters_ += 1
+
+        col = rng_integers(rng, *bounds[0], endpoint=True)  # type: ignore[misc]
+        row_1 = rng_integers(rng, *bounds[1], endpoint=True)  # type: ignore[misc]
+        row_2 = rng_integers(rng, *bounds[2], endpoint=True)  # type: ignore[misc]
+        disc = _perturb_discrepancy(best_sample,
+                                    row_1, row_2, col,
+                                    best_disc)
+        if disc < best_disc:
+            best_sample[row_1, col], best_sample[row_2, col] = (
+                best_sample[row_2, col], best_sample[row_1, col])
+
+            best_disc = disc
+            n_nochange_ = 0
+        else:
+            n_nochange_ += 1
+
+    return best_sample
+
+
+def _l1_norm(sample: np.ndarray) -> float:
+    return distance.pdist(sample, 'cityblock').min()
+
+
+def _lloyd_iteration(
+    sample: np.ndarray,
+    decay: float,
+    qhull_options: str
+) -> np.ndarray:
+    """Lloyd-Max algorithm iteration.
+
+    Based on the implementation of Stéfan van der Walt:
+
+    https://github.com/stefanv/lloyd
+
+    which is:
+
+        Copyright (c) 2021-04-21 Stéfan van der Walt
+        https://github.com/stefanv/lloyd
+        MIT License
+
+    Parameters
+    ----------
+    sample : array_like (n, d)
+        The sample to iterate on.
+    decay : float
+        Relaxation decay. A positive value would move the samples toward
+        their centroid, and negative value would move them away.
+        1 would move the samples to their centroid.
+    qhull_options : str
+        Additional options to pass to Qhull. See Qhull manual
+        for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
+        "Qbb Qc Qz Qj" otherwise.)
+
+    Returns
+    -------
+    sample : array_like (n, d)
+        The sample after an iteration of Lloyd's algorithm.
+
+    """
+    new_sample = np.empty_like(sample)
+
+    voronoi = Voronoi(sample, qhull_options=qhull_options)
+
+    for ii, idx in enumerate(voronoi.point_region):
+        # the region is a series of indices into self.voronoi.vertices
+        # remove samples at infinity, designated by index -1
+        region = [i for i in voronoi.regions[idx] if i != -1]
+
+        # get the vertices for this region
+        verts = voronoi.vertices[region]
+
+        # clipping would be wrong, we need to intersect
+        # verts = np.clip(verts, 0, 1)
+
+        # move samples towards centroids:
+        # Centroid in n-D is the mean for uniformly distributed nodes
+        # of a geometry.
+        centroid = np.mean(verts, axis=0)
+        new_sample[ii] = sample[ii] + (centroid - sample[ii]) * decay
+
+    # only update sample to centroid within the region
+    is_valid = np.all(np.logical_and(new_sample >= 0, new_sample <= 1), axis=1)
+    sample[is_valid] = new_sample[is_valid]
+
+    return sample
+
+
+def _lloyd_centroidal_voronoi_tessellation(
+    sample: npt.ArrayLike,
+    *,
+    tol: DecimalNumber = 1e-5,
+    maxiter: IntNumber = 10,
+    qhull_options: Optional[str] = None,
+    **kwargs: Dict
+) -> np.ndarray:
+    """Approximate Centroidal Voronoi Tessellation.
+
+    Perturb samples in N-dimensions using Lloyd-Max algorithm.
+
+    Parameters
+    ----------
+    sample : array_like (n, d)
+        The sample to iterate on. With ``n`` the number of samples and ``d``
+        the dimension. Samples must be in :math:`[0, 1]^d`, with ``d>=2``.
+    tol : float, optional
+        Tolerance for termination. If the min of the L1-norm over the samples
+        changes less than `tol`, it stops the algorithm. Default is 1e-5.
+    maxiter : int, optional
+        Maximum number of iterations. It will stop the algorithm even if
+        `tol` is above the threshold.
+        Too many iterations tend to cluster the samples as a hypersphere.
+        Default is 10.
+    qhull_options : str, optional
+        Additional options to pass to Qhull. See Qhull manual
+        for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
+        "Qbb Qc Qz Qj" otherwise.)
+
+    Returns
+    -------
+    sample : array_like (n, d)
+        The sample after being processed by Lloyd-Max algorithm.
+
+    Notes
+    -----
+    Lloyd-Max algorithm is an iterative process with the purpose of improving
+    the dispersion of samples. For given sample: (i) compute a Voronoi
+    Tessellation; (ii) find the centroid of each Voronoi cell; (iii) move the
+    samples toward the centroid of their respective cell. See [1]_, [2]_.
+
+    A relaxation factor is used to control how fast samples can move at each
+    iteration. This factor is starting at 2 and ending at 1 after `maxiter`
+    following an exponential decay.
+
+    The process converges to equally spaced samples. It implies that measures
+    like the discrepancy could suffer from too many iterations. On the other
+    hand, L1 and L2 distances should improve. This is especially true with
+    QMC methods which tend to favor the discrepancy over other criteria.
+
+    .. note::
+
+        The current implementation does not intersect the Voronoi Tessellation
+        with the boundaries. This implies that for a low number of samples,
+        empirically below 20, no Voronoi cell is touching the boundaries.
+        Hence, samples cannot be moved close to the boundaries.
+
+        Further improvements could consider the samples at infinity so that
+        all boundaries are segments of some Voronoi cells. This would fix
+        the computation of the centroid position.
+
+    .. warning::
+
+       The Voronoi Tessellation step is expensive and quickly becomes
+       intractable with dimensions as low as 10 even for a sample
+       of size as low as 1000.
+
+    .. versionadded:: 1.9.0
+
+    References
+    ----------
+    .. [1] Lloyd. "Least Squares Quantization in PCM".
+       IEEE Transactions on Information Theory, 1982.
+    .. [2] Max J. "Quantizing for minimum distortion".
+       IEEE Transactions on Information Theory, 1960.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.spatial import distance
+    >>> rng = np.random.default_rng()
+    >>> sample = rng.random((128, 2))
+
+    .. note::
+
+        The samples need to be in :math:`[0, 1]^d`. `scipy.stats.qmc.scale`
+        can be used to scale the samples from their
+        original bounds to :math:`[0, 1]^d`. And back to their original bounds.
+
+    Compute the quality of the sample using the L1 criterion.
+
+    >>> def l1_norm(sample):
+    ...    return distance.pdist(sample, 'cityblock').min()
+
+    >>> l1_norm(sample)
+    0.00161...  # random
+
+    Now process the sample using Lloyd's algorithm and check the improvement
+    on the L1. The value should increase.
+
+    >>> sample = _lloyd_centroidal_voronoi_tessellation(sample)
+    >>> l1_norm(sample)
+    0.0278...  # random
+
+    """
+    del kwargs  # only use keywords which are defined, needed by factory
+
+    sample = np.asarray(sample).copy()
+
+    if not sample.ndim == 2:
+        raise ValueError('`sample` is not a 2D array')
+
+    if not sample.shape[1] >= 2:
+        raise ValueError('`sample` dimension is not >= 2')
+
+    # Checking that sample is within the hypercube
+    if (sample.max() > 1.) or (sample.min() < 0.):
+        raise ValueError('`sample` is not in unit hypercube')
+
+    if qhull_options is None:
+        qhull_options = 'Qbb Qc Qz QJ'
+
+        if sample.shape[1] >= 5:
+            qhull_options += ' Qx'
+
+    # Fit an exponential to be 2 at 0 and 1 at `maxiter`.
+    # The decay is used for relaxation.
+    # analytical solution for y=exp(-maxiter/x) - 0.1
+    root = -maxiter / np.log(0.1)
+    decay = [np.exp(-x / root)+0.9 for x in range(maxiter)]
+
+    l1_old = _l1_norm(sample=sample)
+    for i in range(maxiter):
+        sample = _lloyd_iteration(
+                sample=sample, decay=decay[i],
+                qhull_options=qhull_options,
+        )
+
+        l1_new = _l1_norm(sample=sample)
+
+        if abs(l1_new - l1_old) < tol:
+            break
+        else:
+            l1_old = l1_new
+
+    return sample
+
+
+def _validate_workers(workers: IntNumber = 1) -> IntNumber:
+    """Validate `workers` based on platform and value.
+
+    Parameters
+    ----------
+    workers : int, optional
+        Number of workers to use for parallel processing. If -1 is
+        given all CPU threads are used. Default is 1.
+
+    Returns
+    -------
+    Workers : int
+        Number of CPU used by the algorithm
+
+    """
+    workers = int(workers)
+    if workers == -1:
+        workers = os.cpu_count()  # type: ignore[assignment]
+        if workers is None:
+            raise NotImplementedError(
+                "Cannot determine the number of cpus using os.cpu_count(), "
+                "cannot use -1 for the number of workers"
+            )
+    elif workers <= 0:
+        raise ValueError(f"Invalid number of workers: {workers}, must be -1 "
+                         "or > 0")
+
+    return workers
+
+
+def _validate_bounds(
+    l_bounds: npt.ArrayLike, u_bounds: npt.ArrayLike, d: int
+) -> Tuple[np.ndarray, ...]:
+    """Bounds input validation.
+
+    Parameters
+    ----------
+    l_bounds, u_bounds : array_like (d,)
+        Lower and upper bounds.
+    d : int
+        Dimension to use for broadcasting.
+
+    Returns
+    -------
+    l_bounds, u_bounds : array_like (d,)
+        Lower and upper bounds.
+
+    """
+    try:
+        lower = np.broadcast_to(l_bounds, d)
+        upper = np.broadcast_to(u_bounds, d)
+    except ValueError as exc:
+        msg = ("'l_bounds' and 'u_bounds' must be broadcastable and respect"
+               " the sample dimension")
+        raise ValueError(msg) from exc
+
+    if not np.all(lower < upper):
+        raise ValueError("Bounds are not consistent 'l_bounds' < 'u_bounds'")
+
+    return lower, upper
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_qmc_cy.pyi b/__packaged__/coreml/.python_dependencies/scipy/stats/_qmc_cy.pyi
new file mode 100644
index 00000000..1006385a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_qmc_cy.pyi
@@ -0,0 +1,54 @@
+import numpy as np
+from scipy._lib._util import DecimalNumber, IntNumber
+
+
+def _cy_wrapper_centered_discrepancy(
+        sample: np.ndarray, 
+        iterative: bool, 
+        workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_wrap_around_discrepancy(
+        sample: np.ndarray,
+        iterative: bool, 
+        workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_mixture_discrepancy(
+        sample: np.ndarray,
+        iterative: bool, 
+        workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_l2_star_discrepancy(
+        sample: np.ndarray,
+        iterative: bool,
+        workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_update_discrepancy(
+        x_new_view: np.ndarray,
+        sample_view: np.ndarray,
+        initial_disc: DecimalNumber,
+) -> float: ...
+
+
+def _cy_van_der_corput(
+        n: IntNumber,
+        base: IntNumber,
+        start_index: IntNumber,
+        workers: IntNumber,
+) -> np.ndarray: ...
+
+
+def _cy_van_der_corput_scrambled(
+        n: IntNumber,
+        base: IntNumber,
+        start_index: IntNumber,
+        permutations: np.ndarray,
+        workers: IntNumber,
+) -> np.ndarray: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_rcont/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_rcont/__init__.py
new file mode 100644
index 00000000..ed7a3261
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_rcont/__init__.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8 -*-
+#
+from .rcont import rvs_rcont1, rvs_rcont2
+
+__all__ = ["rvs_rcont1", "rvs_rcont2"]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_relative_risk.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_relative_risk.py
new file mode 100644
index 00000000..b514e638
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_relative_risk.py
@@ -0,0 +1,264 @@
+
+import operator
+from dataclasses import dataclass
+import numpy as np
+from scipy.special import ndtri
+from ._common import ConfidenceInterval
+
+
+def _validate_int(n, bound, name):
+    msg = f'{name} must be an integer not less than {bound}, but got {n!r}'
+    try:
+        n = operator.index(n)
+    except TypeError:
+        raise TypeError(msg) from None
+    if n < bound:
+        raise ValueError(msg)
+    return n
+
+
+@dataclass
+class RelativeRiskResult:
+    """
+    Result of `scipy.stats.contingency.relative_risk`.
+
+    Attributes
+    ----------
+    relative_risk : float
+        This is::
+
+            (exposed_cases/exposed_total) / (control_cases/control_total)
+
+    exposed_cases : int
+        The number of "cases" (i.e. occurrence of disease or other event
+        of interest) among the sample of "exposed" individuals.
+    exposed_total : int
+        The total number of "exposed" individuals in the sample.
+    control_cases : int
+        The number of "cases" among the sample of "control" or non-exposed
+        individuals.
+    control_total : int
+        The total number of "control" individuals in the sample.
+
+    Methods
+    -------
+    confidence_interval :
+        Compute the confidence interval for the relative risk estimate.
+    """
+
+    relative_risk: float
+    exposed_cases: int
+    exposed_total: int
+    control_cases: int
+    control_total: int
+
+    def confidence_interval(self, confidence_level=0.95):
+        """
+        Compute the confidence interval for the relative risk.
+
+        The confidence interval is computed using the Katz method
+        (i.e. "Method C" of [1]_; see also [2]_, section 3.1.2).
+
+        Parameters
+        ----------
+        confidence_level : float, optional
+            The confidence level to use for the confidence interval.
+            Default is 0.95.
+
+        Returns
+        -------
+        ci : ConfidenceInterval instance
+            The return value is an object with attributes ``low`` and
+            ``high`` that hold the confidence interval.
+
+        References
+        ----------
+        .. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining
+               confidence intervals for the risk ratio in cohort studies",
+               Biometrics, 34, 469-474 (1978).
+        .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
+               CRC Press LLC, Boca Raton, FL, USA (1996).
+
+
+        Examples
+        --------
+        >>> from scipy.stats.contingency import relative_risk
+        >>> result = relative_risk(exposed_cases=10, exposed_total=75,
+        ...                        control_cases=12, control_total=225)
+        >>> result.relative_risk
+        2.5
+        >>> result.confidence_interval()
+        ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033)
+        """
+        if not 0 <= confidence_level <= 1:
+            raise ValueError('confidence_level must be in the interval '
+                             '[0, 1].')
+
+        # Handle edge cases where either exposed_cases or control_cases
+        # is zero.  We follow the convention of the R function riskratio
+        # from the epitools library.
+        if self.exposed_cases == 0 and self.control_cases == 0:
+            # relative risk is nan.
+            return ConfidenceInterval(low=np.nan, high=np.nan)
+        elif self.exposed_cases == 0:
+            # relative risk is 0.
+            return ConfidenceInterval(low=0.0, high=np.nan)
+        elif self.control_cases == 0:
+            # relative risk is inf
+            return ConfidenceInterval(low=np.nan, high=np.inf)
+
+        alpha = 1 - confidence_level
+        z = ndtri(1 - alpha/2)
+        rr = self.relative_risk
+
+        # Estimate of the variance of log(rr) is
+        # var(log(rr)) = 1/exposed_cases - 1/exposed_total +
+        #                1/control_cases - 1/control_total
+        # and the standard error is the square root of that.
+        se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total +
+                     1/self.control_cases - 1/self.control_total)
+        delta = z*se
+        katz_lo = rr*np.exp(-delta)
+        katz_hi = rr*np.exp(delta)
+        return ConfidenceInterval(low=katz_lo, high=katz_hi)
+
+
+def relative_risk(exposed_cases, exposed_total, control_cases, control_total):
+    """
+    Compute the relative risk (also known as the risk ratio).
+
+    This function computes the relative risk associated with a 2x2
+    contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead
+    of accepting a table as an argument, the individual numbers that are
+    used to compute the relative risk are given as separate parameters.
+    This is to avoid the ambiguity of which row or column of the contingency
+    table corresponds to the "exposed" cases and which corresponds to the
+    "control" cases.  Unlike, say, the odds ratio, the relative risk is not
+    invariant under an interchange of the rows or columns.
+
+    Parameters
+    ----------
+    exposed_cases : nonnegative int
+        The number of "cases" (i.e. occurrence of disease or other event
+        of interest) among the sample of "exposed" individuals.
+    exposed_total : positive int
+        The total number of "exposed" individuals in the sample.
+    control_cases : nonnegative int
+        The number of "cases" among the sample of "control" or non-exposed
+        individuals.
+    control_total : positive int
+        The total number of "control" individuals in the sample.
+
+    Returns
+    -------
+    result : instance of `~scipy.stats._result_classes.RelativeRiskResult`
+        The object has the float attribute ``relative_risk``, which is::
+
+            rr = (exposed_cases/exposed_total) / (control_cases/control_total)
+
+        The object also has the method ``confidence_interval`` to compute
+        the confidence interval of the relative risk for a given confidence
+        level.
+
+    See Also
+    --------
+    odds_ratio
+
+    Notes
+    -----
+    The R package epitools has the function `riskratio`, which accepts
+    a table with the following layout::
+
+                        disease=0   disease=1
+        exposed=0 (ref)    n00         n01
+        exposed=1          n10         n11
+
+    With a 2x2 table in the above format, the estimate of the CI is
+    computed by `riskratio` when the argument method="wald" is given,
+    or with the function `riskratio.wald`.
+
+    For example, in a test of the incidence of lung cancer among a
+    sample of smokers and nonsmokers, the "exposed" category would
+    correspond to "is a smoker" and the "disease" category would
+    correspond to "has or had lung cancer".
+
+    To pass the same data to ``relative_risk``, use::
+
+        relative_risk(n11, n10 + n11, n01, n00 + n01)
+
+    .. versionadded:: 1.7.0
+
+    References
+    ----------
+    .. [1] Alan Agresti, An Introduction to Categorical Data Analysis
+           (second edition), Wiley, Hoboken, NJ, USA (2007).
+    .. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
+           CRC Press LLC, Boca Raton, FL, USA (1996).
+
+    Examples
+    --------
+    >>> from scipy.stats.contingency import relative_risk
+
+    This example is from Example 3.1 of [2]_.  The results of a heart
+    disease study are summarized in the following table::
+
+                 High CAT   Low CAT    Total
+                 --------   -------    -----
+        CHD         27         44        71
+        No CHD      95        443       538
+
+        Total      122        487       609
+
+    CHD is coronary heart disease, and CAT refers to the level of
+    circulating catecholamine.  CAT is the "exposure" variable, and
+    high CAT is the "exposed" category. So the data from the table
+    to be passed to ``relative_risk`` is::
+
+        exposed_cases = 27
+        exposed_total = 122
+        control_cases = 44
+        control_total = 487
+
+    >>> result = relative_risk(27, 122, 44, 487)
+    >>> result.relative_risk
+    2.4495156482861398
+
+    Find the confidence interval for the relative risk.
+
+    >>> result.confidence_interval(confidence_level=0.95)
+    ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354)
+
+    The interval does not contain 1, so the data supports the statement
+    that high CAT is associated with greater risk of CHD.
+    """
+    # Relative risk is a trivial calculation.  The nontrivial part is in the
+    # `confidence_interval` method of the RelativeRiskResult class.
+
+    exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases")
+    exposed_total = _validate_int(exposed_total, 1, "exposed_total")
+    control_cases = _validate_int(control_cases, 0, "control_cases")
+    control_total = _validate_int(control_total, 1, "control_total")
+
+    if exposed_cases > exposed_total:
+        raise ValueError('exposed_cases must not exceed exposed_total.')
+    if control_cases > control_total:
+        raise ValueError('control_cases must not exceed control_total.')
+
+    if exposed_cases == 0 and control_cases == 0:
+        # relative risk is 0/0.
+        rr = np.nan
+    elif exposed_cases == 0:
+        # relative risk is 0/nonzero
+        rr = 0.0
+    elif control_cases == 0:
+        # relative risk is nonzero/0.
+        rr = np.inf
+    else:
+        p1 = exposed_cases / exposed_total
+        p2 = control_cases / control_total
+        rr = p1 / p2
+    return RelativeRiskResult(relative_risk=rr,
+                              exposed_cases=exposed_cases,
+                              exposed_total=exposed_total,
+                              control_cases=control_cases,
+                              control_total=control_total)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_resampling.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_resampling.py
new file mode 100644
index 00000000..d28e97bb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_resampling.py
@@ -0,0 +1,1602 @@
+import warnings
+import numpy as np
+from itertools import combinations, permutations, product
+import inspect
+
+from scipy._lib._util import check_random_state
+from scipy.special import ndtr, ndtri, comb, factorial
+from scipy._lib._util import rng_integers
+from dataclasses import make_dataclass
+from ._common import ConfidenceInterval
+from ._axis_nan_policy import _broadcast_concatenate, _broadcast_arrays
+from ._warnings_errors import DegenerateDataWarning
+
+__all__ = ['bootstrap', 'monte_carlo_test', 'permutation_test']
+
+
+def _vectorize_statistic(statistic):
+    """Vectorize an n-sample statistic"""
+    # This is a little cleaner than np.nditer at the expense of some data
+    # copying: concatenate samples together, then use np.apply_along_axis
+    def stat_nd(*data, axis=0):
+        lengths = [sample.shape[axis] for sample in data]
+        split_indices = np.cumsum(lengths)[:-1]
+        z = _broadcast_concatenate(data, axis)
+
+        # move working axis to position 0 so that new dimensions in the output
+        # of `statistic` are _prepended_. ("This axis is removed, and replaced
+        # with new dimensions...")
+        z = np.moveaxis(z, axis, 0)
+
+        def stat_1d(z):
+            data = np.split(z, split_indices)
+            return statistic(*data)
+
+        return np.apply_along_axis(stat_1d, 0, z)[()]
+    return stat_nd
+
+
+def _jackknife_resample(sample, batch=None):
+    """Jackknife resample the sample. Only one-sample stats for now."""
+    n = sample.shape[-1]
+    batch_nominal = batch or n
+
+    for k in range(0, n, batch_nominal):
+        # col_start:col_end are the observations to remove
+        batch_actual = min(batch_nominal, n-k)
+
+        # jackknife - each row leaves out one observation
+        j = np.ones((batch_actual, n), dtype=bool)
+        np.fill_diagonal(j[:, k:k+batch_actual], False)
+        i = np.arange(n)
+        i = np.broadcast_to(i, (batch_actual, n))
+        i = i[j].reshape((batch_actual, n-1))
+
+        resamples = sample[..., i]
+        yield resamples
+
+
+def _bootstrap_resample(sample, n_resamples=None, random_state=None):
+    """Bootstrap resample the sample."""
+    n = sample.shape[-1]
+
+    # bootstrap - each row is a random resample of original observations
+    i = rng_integers(random_state, 0, n, (n_resamples, n))
+
+    resamples = sample[..., i]
+    return resamples
+
+
+def _percentile_of_score(a, score, axis):
+    """Vectorized, simplified `scipy.stats.percentileofscore`.
+    Uses logic of the 'mean' value of percentileofscore's kind parameter.
+
+    Unlike `stats.percentileofscore`, the percentile returned is a fraction
+    in [0, 1].
+    """
+    B = a.shape[axis]
+    return ((a < score).sum(axis=axis) + (a <= score).sum(axis=axis)) / (2 * B)
+
+
+def _percentile_along_axis(theta_hat_b, alpha):
+    """`np.percentile` with different percentile for each slice."""
+    # the difference between _percentile_along_axis and np.percentile is that
+    # np.percentile gets _all_ the qs for each axis slice, whereas
+    # _percentile_along_axis gets the q corresponding with each axis slice
+    shape = theta_hat_b.shape[:-1]
+    alpha = np.broadcast_to(alpha, shape)
+    percentiles = np.zeros_like(alpha, dtype=np.float64)
+    for indices, alpha_i in np.ndenumerate(alpha):
+        if np.isnan(alpha_i):
+            # e.g. when bootstrap distribution has only one unique element
+            msg = (
+                "The BCa confidence interval cannot be calculated."
+                " This problem is known to occur when the distribution"
+                " is degenerate or the statistic is np.min."
+            )
+            warnings.warn(DegenerateDataWarning(msg))
+            percentiles[indices] = np.nan
+        else:
+            theta_hat_b_i = theta_hat_b[indices]
+            percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i)
+    return percentiles[()]  # return scalar instead of 0d array
+
+
+def _bca_interval(data, statistic, axis, alpha, theta_hat_b, batch):
+    """Bias-corrected and accelerated interval."""
+    # closely follows [1] 14.3 and 15.4 (Eq. 15.36)
+
+    # calculate z0_hat
+    theta_hat = np.asarray(statistic(*data, axis=axis))[..., None]
+    percentile = _percentile_of_score(theta_hat_b, theta_hat, axis=-1)
+    z0_hat = ndtri(percentile)
+
+    # calculate a_hat
+    theta_hat_ji = []  # j is for sample of data, i is for jackknife resample
+    for j, sample in enumerate(data):
+        # _jackknife_resample will add an axis prior to the last axis that
+        # corresponds with the different jackknife resamples. Do the same for
+        # each sample of the data to ensure broadcastability. We need to
+        # create a copy of the list containing the samples anyway, so do this
+        # in the loop to simplify the code. This is not the bottleneck...
+        samples = [np.expand_dims(sample, -2) for sample in data]
+        theta_hat_i = []
+        for jackknife_sample in _jackknife_resample(sample, batch):
+            samples[j] = jackknife_sample
+            broadcasted = _broadcast_arrays(samples, axis=-1)
+            theta_hat_i.append(statistic(*broadcasted, axis=-1))
+        theta_hat_ji.append(theta_hat_i)
+
+    theta_hat_ji = [np.concatenate(theta_hat_i, axis=-1)
+                    for theta_hat_i in theta_hat_ji]
+
+    n_j = [theta_hat_i.shape[-1] for theta_hat_i in theta_hat_ji]
+
+    theta_hat_j_dot = [theta_hat_i.mean(axis=-1, keepdims=True)
+                       for theta_hat_i in theta_hat_ji]
+
+    U_ji = [(n - 1) * (theta_hat_dot - theta_hat_i)
+            for theta_hat_dot, theta_hat_i, n
+            in zip(theta_hat_j_dot, theta_hat_ji, n_j)]
+
+    nums = [(U_i**3).sum(axis=-1)/n**3 for U_i, n in zip(U_ji, n_j)]
+    dens = [(U_i**2).sum(axis=-1)/n**2 for U_i, n in zip(U_ji, n_j)]
+    a_hat = 1/6 * sum(nums) / sum(dens)**(3/2)
+
+    # calculate alpha_1, alpha_2
+    z_alpha = ndtri(alpha)
+    z_1alpha = -z_alpha
+    num1 = z0_hat + z_alpha
+    alpha_1 = ndtr(z0_hat + num1/(1 - a_hat*num1))
+    num2 = z0_hat + z_1alpha
+    alpha_2 = ndtr(z0_hat + num2/(1 - a_hat*num2))
+    return alpha_1, alpha_2, a_hat  # return a_hat for testing
+
+
+def _bootstrap_iv(data, statistic, vectorized, paired, axis, confidence_level,
+                  n_resamples, batch, method, bootstrap_result, random_state):
+    """Input validation and standardization for `bootstrap`."""
+
+    if vectorized not in {True, False, None}:
+        raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
+
+    if vectorized is None:
+        vectorized = 'axis' in inspect.signature(statistic).parameters
+
+    if not vectorized:
+        statistic = _vectorize_statistic(statistic)
+
+    axis_int = int(axis)
+    if axis != axis_int:
+        raise ValueError("`axis` must be an integer.")
+
+    n_samples = 0
+    try:
+        n_samples = len(data)
+    except TypeError:
+        raise ValueError("`data` must be a sequence of samples.")
+
+    if n_samples == 0:
+        raise ValueError("`data` must contain at least one sample.")
+
+    data_iv = []
+    for sample in data:
+        sample = np.atleast_1d(sample)
+        if sample.shape[axis_int] <= 1:
+            raise ValueError("each sample in `data` must contain two or more "
+                             "observations along `axis`.")
+        sample = np.moveaxis(sample, axis_int, -1)
+        data_iv.append(sample)
+
+    if paired not in {True, False}:
+        raise ValueError("`paired` must be `True` or `False`.")
+
+    if paired:
+        n = data_iv[0].shape[-1]
+        for sample in data_iv[1:]:
+            if sample.shape[-1] != n:
+                message = ("When `paired is True`, all samples must have the "
+                           "same length along `axis`")
+                raise ValueError(message)
+
+        # to generate the bootstrap distribution for paired-sample statistics,
+        # resample the indices of the observations
+        def statistic(i, axis=-1, data=data_iv, unpaired_statistic=statistic):
+            data = [sample[..., i] for sample in data]
+            return unpaired_statistic(*data, axis=axis)
+
+        data_iv = [np.arange(n)]
+
+    confidence_level_float = float(confidence_level)
+
+    n_resamples_int = int(n_resamples)
+    if n_resamples != n_resamples_int or n_resamples_int < 0:
+        raise ValueError("`n_resamples` must be a non-negative integer.")
+
+    if batch is None:
+        batch_iv = batch
+    else:
+        batch_iv = int(batch)
+        if batch != batch_iv or batch_iv <= 0:
+            raise ValueError("`batch` must be a positive integer or None.")
+
+    methods = {'percentile', 'basic', 'bca'}
+    method = method.lower()
+    if method not in methods:
+        raise ValueError(f"`method` must be in {methods}")
+
+    message = "`bootstrap_result` must have attribute `bootstrap_distribution'"
+    if (bootstrap_result is not None
+            and not hasattr(bootstrap_result, "bootstrap_distribution")):
+        raise ValueError(message)
+
+    message = ("Either `bootstrap_result.bootstrap_distribution.size` or "
+               "`n_resamples` must be positive.")
+    if ((not bootstrap_result or
+         not bootstrap_result.bootstrap_distribution.size)
+            and n_resamples_int == 0):
+        raise ValueError(message)
+
+    random_state = check_random_state(random_state)
+
+    return (data_iv, statistic, vectorized, paired, axis_int,
+            confidence_level_float, n_resamples_int, batch_iv,
+            method, bootstrap_result, random_state)
+
+
+fields = ['confidence_interval', 'bootstrap_distribution', 'standard_error']
+BootstrapResult = make_dataclass("BootstrapResult", fields)
+
+
+def bootstrap(data, statistic, *, n_resamples=9999, batch=None,
+              vectorized=None, paired=False, axis=0, confidence_level=0.95,
+              method='BCa', bootstrap_result=None, random_state=None):
+    r"""
+    Compute a two-sided bootstrap confidence interval of a statistic.
+
+    When `method` is ``'percentile'``, a bootstrap confidence interval is
+    computed according to the following procedure.
+
+    1. Resample the data: for each sample in `data` and for each of
+       `n_resamples`, take a random sample of the original sample
+       (with replacement) of the same size as the original sample.
+
+    2. Compute the bootstrap distribution of the statistic: for each set of
+       resamples, compute the test statistic.
+
+    3. Determine the confidence interval: find the interval of the bootstrap
+       distribution that is
+
+       - symmetric about the median and
+       - contains `confidence_level` of the resampled statistic values.
+
+    While the ``'percentile'`` method is the most intuitive, it is rarely
+    used in practice. Two more common methods are available, ``'basic'``
+    ('reverse percentile') and ``'BCa'`` ('bias-corrected and accelerated');
+    they differ in how step 3 is performed.
+
+    If the samples in `data` are  taken at random from their respective
+    distributions :math:`n` times, the confidence interval returned by
+    `bootstrap` will contain the true value of the statistic for those
+    distributions approximately `confidence_level`:math:`\, \times \, n` times.
+
+    Parameters
+    ----------
+    data : sequence of array-like
+         Each element of data is a sample from an underlying distribution.
+    statistic : callable
+        Statistic for which the confidence interval is to be calculated.
+        `statistic` must be a callable that accepts ``len(data)`` samples
+        as separate arguments and returns the resulting statistic.
+        If `vectorized` is set ``True``,
+        `statistic` must also accept a keyword argument `axis` and be
+        vectorized to compute the statistic along the provided `axis`.
+    n_resamples : int, default: ``9999``
+        The number of resamples performed to form the bootstrap distribution
+        of the statistic.
+    batch : int, optional
+        The number of resamples to process in each vectorized call to
+        `statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the
+        sample size. Default is ``None``, in which case ``batch = n_resamples``
+        (or ``batch = max(n_resamples, n)`` for ``method='BCa'``).
+    vectorized : bool, optional
+        If `vectorized` is set ``False``, `statistic` will not be passed
+        keyword argument `axis` and is expected to calculate the statistic
+        only for 1D samples. If ``True``, `statistic` will be passed keyword
+        argument `axis` and is expected to calculate the statistic along `axis`
+        when passed an ND sample array. If ``None`` (default), `vectorized`
+        will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of
+        a vectorized statistic typically reduces computation time.
+    paired : bool, default: ``False``
+        Whether the statistic treats corresponding elements of the samples
+        in `data` as paired.
+    axis : int, default: ``0``
+        The axis of the samples in `data` along which the `statistic` is
+        calculated.
+    confidence_level : float, default: ``0.95``
+        The confidence level of the confidence interval.
+    method : {'percentile', 'basic', 'bca'}, default: ``'BCa'``
+        Whether to return the 'percentile' bootstrap confidence interval
+        (``'percentile'``), the 'basic' (AKA 'reverse') bootstrap confidence
+        interval (``'basic'``), or the bias-corrected and accelerated bootstrap
+        confidence interval (``'BCa'``).
+    bootstrap_result : BootstrapResult, optional
+        Provide the result object returned by a previous call to `bootstrap`
+        to include the previous bootstrap distribution in the new bootstrap
+        distribution. This can be used, for example, to change
+        `confidence_level`, change `method`, or see the effect of performing
+        additional resampling without repeating computations.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate resamples.
+
+        If `random_state` is ``None`` (or `np.random`), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance then that instance is used.
+
+    Returns
+    -------
+    res : BootstrapResult
+        An object with attributes:
+
+        confidence_interval : ConfidenceInterval
+            The bootstrap confidence interval as an instance of
+            `collections.namedtuple` with attributes `low` and `high`.
+        bootstrap_distribution : ndarray
+            The bootstrap distribution, that is, the value of `statistic` for
+            each resample. The last dimension corresponds with the resamples
+            (e.g. ``res.bootstrap_distribution.shape[-1] == n_resamples``).
+        standard_error : float or ndarray
+            The bootstrap standard error, that is, the sample standard
+            deviation of the bootstrap distribution.
+
+    Warns
+    -----
+    `~scipy.stats.DegenerateDataWarning`
+        Generated when ``method='BCa'`` and the bootstrap distribution is
+        degenerate (e.g. all elements are identical).
+
+    Notes
+    -----
+    Elements of the confidence interval may be NaN for ``method='BCa'`` if
+    the bootstrap distribution is degenerate (e.g. all elements are identical).
+    In this case, consider using another `method` or inspecting `data` for
+    indications that other analysis may be more appropriate (e.g. all
+    observations are identical).
+
+    References
+    ----------
+    .. [1] B. Efron and R. J. Tibshirani, An Introduction to the Bootstrap,
+       Chapman & Hall/CRC, Boca Raton, FL, USA (1993)
+    .. [2] Nathaniel E. Helwig, "Bootstrap Confidence Intervals",
+       http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf
+    .. [3] Bootstrapping (statistics), Wikipedia,
+       https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
+
+    Examples
+    --------
+    Suppose we have sampled data from an unknown distribution.
+
+    >>> import numpy as np
+    >>> rng = np.random.default_rng()
+    >>> from scipy.stats import norm
+    >>> dist = norm(loc=2, scale=4)  # our "unknown" distribution
+    >>> data = dist.rvs(size=100, random_state=rng)
+
+    We are interested in the standard deviation of the distribution.
+
+    >>> std_true = dist.std()      # the true value of the statistic
+    >>> print(std_true)
+    4.0
+    >>> std_sample = np.std(data)  # the sample statistic
+    >>> print(std_sample)
+    3.9460644295563863
+
+    The bootstrap is used to approximate the variability we would expect if we
+    were to repeatedly sample from the unknown distribution and calculate the
+    statistic of the sample each time. It does this by repeatedly resampling
+    values *from the original sample* with replacement and calculating the
+    statistic of each resample. This results in a "bootstrap distribution" of
+    the statistic.
+
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy.stats import bootstrap
+    >>> data = (data,)  # samples must be in a sequence
+    >>> res = bootstrap(data, np.std, confidence_level=0.9,
+    ...                 random_state=rng)
+    >>> fig, ax = plt.subplots()
+    >>> ax.hist(res.bootstrap_distribution, bins=25)
+    >>> ax.set_title('Bootstrap Distribution')
+    >>> ax.set_xlabel('statistic value')
+    >>> ax.set_ylabel('frequency')
+    >>> plt.show()
+
+    The standard error quantifies this variability. It is calculated as the
+    standard deviation of the bootstrap distribution.
+
+    >>> res.standard_error
+    0.24427002125829136
+    >>> res.standard_error == np.std(res.bootstrap_distribution, ddof=1)
+    True
+
+    The bootstrap distribution of the statistic is often approximately normal
+    with scale equal to the standard error.
+
+    >>> x = np.linspace(3, 5)
+    >>> pdf = norm.pdf(x, loc=std_sample, scale=res.standard_error)
+    >>> fig, ax = plt.subplots()
+    >>> ax.hist(res.bootstrap_distribution, bins=25, density=True)
+    >>> ax.plot(x, pdf)
+    >>> ax.set_title('Normal Approximation of the Bootstrap Distribution')
+    >>> ax.set_xlabel('statistic value')
+    >>> ax.set_ylabel('pdf')
+    >>> plt.show()
+
+    This suggests that we could construct a 90% confidence interval on the
+    statistic based on quantiles of this normal distribution.
+
+    >>> norm.interval(0.9, loc=std_sample, scale=res.standard_error)
+    (3.5442759991341726, 4.3478528599786)
+
+    Due to central limit theorem, this normal approximation is accurate for a
+    variety of statistics and distributions underlying the samples; however,
+    the approximation is not reliable in all cases. Because `bootstrap` is
+    designed to work with arbitrary underlying distributions and statistics,
+    it uses more advanced techniques to generate an accurate confidence
+    interval.
+
+    >>> print(res.confidence_interval)
+    ConfidenceInterval(low=3.57655333533867, high=4.382043696342881)
+
+    If we sample from the original distribution 1000 times and form a bootstrap
+    confidence interval for each sample, the confidence interval
+    contains the true value of the statistic approximately 90% of the time.
+
+    >>> n_trials = 1000
+    >>> ci_contains_true_std = 0
+    >>> for i in range(n_trials):
+    ...    data = (dist.rvs(size=100, random_state=rng),)
+    ...    ci = bootstrap(data, np.std, confidence_level=0.9, n_resamples=1000,
+    ...                   random_state=rng).confidence_interval
+    ...    if ci[0] < std_true < ci[1]:
+    ...        ci_contains_true_std += 1
+    >>> print(ci_contains_true_std)
+    875
+
+    Rather than writing a loop, we can also determine the confidence intervals
+    for all 1000 samples at once.
+
+    >>> data = (dist.rvs(size=(n_trials, 100), random_state=rng),)
+    >>> res = bootstrap(data, np.std, axis=-1, confidence_level=0.9,
+    ...                 n_resamples=1000, random_state=rng)
+    >>> ci_l, ci_u = res.confidence_interval
+
+    Here, `ci_l` and `ci_u` contain the confidence interval for each of the
+    ``n_trials = 1000`` samples.
+
+    >>> print(ci_l[995:])
+    [3.77729695 3.75090233 3.45829131 3.34078217 3.48072829]
+    >>> print(ci_u[995:])
+    [4.88316666 4.86924034 4.32032996 4.2822427  4.59360598]
+
+    And again, approximately 90% contain the true value, ``std_true = 4``.
+
+    >>> print(np.sum((ci_l < std_true) & (std_true < ci_u)))
+    900
+
+    `bootstrap` can also be used to estimate confidence intervals of
+    multi-sample statistics, including those calculated by hypothesis
+    tests. `scipy.stats.mood` perform's Mood's test for equal scale parameters,
+    and it returns two outputs: a statistic, and a p-value. To get a
+    confidence interval for the test statistic, we first wrap
+    `scipy.stats.mood` in a function that accepts two sample arguments,
+    accepts an `axis` keyword argument, and returns only the statistic.
+
+    >>> from scipy.stats import mood
+    >>> def my_statistic(sample1, sample2, axis):
+    ...     statistic, _ = mood(sample1, sample2, axis=-1)
+    ...     return statistic
+
+    Here, we use the 'percentile' method with the default 95% confidence level.
+
+    >>> sample1 = norm.rvs(scale=1, size=100, random_state=rng)
+    >>> sample2 = norm.rvs(scale=2, size=100, random_state=rng)
+    >>> data = (sample1, sample2)
+    >>> res = bootstrap(data, my_statistic, method='basic', random_state=rng)
+    >>> print(mood(sample1, sample2)[0])  # element 0 is the statistic
+    -5.521109549096542
+    >>> print(res.confidence_interval)
+    ConfidenceInterval(low=-7.255994487314675, high=-4.016202624747605)
+
+    The bootstrap estimate of the standard error is also available.
+
+    >>> print(res.standard_error)
+    0.8344963846318795
+
+    Paired-sample statistics work, too. For example, consider the Pearson
+    correlation coefficient.
+
+    >>> from scipy.stats import pearsonr
+    >>> n = 100
+    >>> x = np.linspace(0, 10, n)
+    >>> y = x + rng.uniform(size=n)
+    >>> print(pearsonr(x, y)[0])  # element 0 is the statistic
+    0.9962357936065914
+
+    We wrap `pearsonr` so that it returns only the statistic.
+
+    >>> def my_statistic(x, y):
+    ...     return pearsonr(x, y)[0]
+
+    We call `bootstrap` using ``paired=True``.
+    Also, since ``my_statistic`` isn't vectorized to calculate the statistic
+    along a given axis, we pass in ``vectorized=False``.
+
+    >>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
+    ...                 random_state=rng)
+    >>> print(res.confidence_interval)
+    ConfidenceInterval(low=0.9950085825848624, high=0.9971212407917498)
+
+    The result object can be passed back into `bootstrap` to perform additional
+    resampling:
+
+    >>> len(res.bootstrap_distribution)
+    9999
+    >>> res = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
+    ...                 n_resamples=1001, random_state=rng,
+    ...                 bootstrap_result=res)
+    >>> len(res.bootstrap_distribution)
+    11000
+
+    or to change the confidence interval options:
+
+    >>> res2 = bootstrap((x, y), my_statistic, vectorized=False, paired=True,
+    ...                  n_resamples=0, random_state=rng, bootstrap_result=res,
+    ...                  method='percentile', confidence_level=0.9)
+    >>> np.testing.assert_equal(res2.bootstrap_distribution,
+    ...                         res.bootstrap_distribution)
+    >>> res.confidence_interval
+    ConfidenceInterval(low=0.9950035351407804, high=0.9971170323404578)
+
+    without repeating computation of the original bootstrap distribution.
+
+    """
+    # Input validation
+    args = _bootstrap_iv(data, statistic, vectorized, paired, axis,
+                         confidence_level, n_resamples, batch, method,
+                         bootstrap_result, random_state)
+    data, statistic, vectorized, paired, axis, confidence_level = args[:6]
+    n_resamples, batch, method, bootstrap_result, random_state = args[6:]
+
+    theta_hat_b = ([] if bootstrap_result is None
+                   else [bootstrap_result.bootstrap_distribution])
+
+    batch_nominal = batch or n_resamples or 1
+
+    for k in range(0, n_resamples, batch_nominal):
+        batch_actual = min(batch_nominal, n_resamples-k)
+        # Generate resamples
+        resampled_data = []
+        for sample in data:
+            resample = _bootstrap_resample(sample, n_resamples=batch_actual,
+                                           random_state=random_state)
+            resampled_data.append(resample)
+
+        # Compute bootstrap distribution of statistic
+        theta_hat_b.append(statistic(*resampled_data, axis=-1))
+    theta_hat_b = np.concatenate(theta_hat_b, axis=-1)
+
+    # Calculate percentile interval
+    alpha = (1 - confidence_level)/2
+    if method == 'bca':
+        interval = _bca_interval(data, statistic, axis=-1, alpha=alpha,
+                                 theta_hat_b=theta_hat_b, batch=batch)[:2]
+        percentile_fun = _percentile_along_axis
+    else:
+        interval = alpha, 1-alpha
+
+        def percentile_fun(a, q):
+            return np.percentile(a=a, q=q, axis=-1)
+
+    # Calculate confidence interval of statistic
+    ci_l = percentile_fun(theta_hat_b, interval[0]*100)
+    ci_u = percentile_fun(theta_hat_b, interval[1]*100)
+    if method == 'basic':  # see [3]
+        theta_hat = statistic(*data, axis=-1)
+        ci_l, ci_u = 2*theta_hat - ci_u, 2*theta_hat - ci_l
+
+    return BootstrapResult(confidence_interval=ConfidenceInterval(ci_l, ci_u),
+                           bootstrap_distribution=theta_hat_b,
+                           standard_error=np.std(theta_hat_b, ddof=1, axis=-1))
+
+
+def _monte_carlo_test_iv(sample, rvs, statistic, vectorized, n_resamples,
+                         batch, alternative, axis):
+    """Input validation for `monte_carlo_test`."""
+
+    axis_int = int(axis)
+    if axis != axis_int:
+        raise ValueError("`axis` must be an integer.")
+
+    if vectorized not in {True, False, None}:
+        raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
+
+    if not callable(rvs):
+        raise TypeError("`rvs` must be callable.")
+
+    if not callable(statistic):
+        raise TypeError("`statistic` must be callable.")
+
+    if vectorized is None:
+        vectorized = 'axis' in inspect.signature(statistic).parameters
+
+    if not vectorized:
+        statistic_vectorized = _vectorize_statistic(statistic)
+    else:
+        statistic_vectorized = statistic
+
+    sample = np.atleast_1d(sample)
+    sample = np.moveaxis(sample, axis, -1)
+
+    n_resamples_int = int(n_resamples)
+    if n_resamples != n_resamples_int or n_resamples_int <= 0:
+        raise ValueError("`n_resamples` must be a positive integer.")
+
+    if batch is None:
+        batch_iv = batch
+    else:
+        batch_iv = int(batch)
+        if batch != batch_iv or batch_iv <= 0:
+            raise ValueError("`batch` must be a positive integer or None.")
+
+    alternatives = {'two-sided', 'greater', 'less'}
+    alternative = alternative.lower()
+    if alternative not in alternatives:
+        raise ValueError(f"`alternative` must be in {alternatives}")
+
+    return (sample, rvs, statistic_vectorized, vectorized, n_resamples_int,
+            batch_iv, alternative, axis_int)
+
+
+fields = ['statistic', 'pvalue', 'null_distribution']
+MonteCarloTestResult = make_dataclass("MonteCarloTestResult", fields)
+
+
+def monte_carlo_test(sample, rvs, statistic, *, vectorized=None,
+                     n_resamples=9999, batch=None, alternative="two-sided",
+                     axis=0):
+    r"""
+    Monte Carlo test that a sample is drawn from a given distribution.
+
+    The null hypothesis is that the provided `sample` was drawn at random from
+    the distribution for which `rvs` generates random variates. The value of
+    the `statistic` for the given sample is compared against a Monte Carlo null
+    distribution: the value of the statistic for each of `n_resamples`
+    samples generated by `rvs`. This gives the p-value, the probability of
+    observing such an extreme value of the test statistic under the null
+    hypothesis.
+
+    Parameters
+    ----------
+    sample : array-like
+        An array of observations.
+    rvs : callable
+        Generates random variates from the distribution against which `sample`
+        will be tested. `rvs` must be a callable that accepts keyword argument
+        ``size`` (e.g. ``rvs(size=(m, n))``) and returns an N-d array sample
+        of that shape.
+    statistic : callable
+        Statistic for which the p-value of the hypothesis test is to be
+        calculated. `statistic` must be a callable that accepts a sample
+        (e.g. ``statistic(sample)``) and returns the resulting statistic.
+        If `vectorized` is set ``True``, `statistic` must also accept a keyword
+        argument `axis` and be vectorized to compute the statistic along the
+        provided `axis` of the sample array.
+    vectorized : bool, optional
+        If `vectorized` is set ``False``, `statistic` will not be passed
+        keyword argument `axis` and is expected to calculate the statistic
+        only for 1D samples. If ``True``, `statistic` will be passed keyword
+        argument `axis` and is expected to calculate the statistic along `axis`
+        when passed an ND sample array. If ``None`` (default), `vectorized`
+        will be set ``True`` if ``axis`` is a parameter of `statistic`. Use of
+        a vectorized statistic typically reduces computation time.
+    n_resamples : int, default: 9999
+        Number of random permutations used to approximate the Monte Carlo null
+        distribution.
+    batch : int, optional
+        The number of permutations to process in each call to `statistic`.
+        Memory usage is O(`batch`*``sample.size[axis]``). Default is
+        ``None``, in which case `batch` equals `n_resamples`.
+    alternative : {'two-sided', 'less', 'greater'}
+        The alternative hypothesis for which the p-value is calculated.
+        For each alternative, the p-value is defined as follows.
+
+        - ``'greater'`` : the percentage of the null distribution that is
+          greater than or equal to the observed value of the test statistic.
+        - ``'less'`` : the percentage of the null distribution that is
+          less than or equal to the observed value of the test statistic.
+        - ``'two-sided'`` : twice the smaller of the p-values above.
+
+    axis : int, default: 0
+        The axis of `sample` over which to calculate the statistic.
+
+    Returns
+    -------
+    statistic : float or ndarray
+        The observed test statistic of the sample.
+    pvalue : float or ndarray
+        The p-value for the given alternative.
+    null_distribution : ndarray
+        The values of the test statistic generated under the null hypothesis.
+
+    References
+    ----------
+
+    .. [1] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
+       Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
+       Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
+
+    Examples
+    --------
+
+    Suppose we wish to test whether a small sample has been drawn from a normal
+    distribution. We decide that we will use the skew of the sample as a
+    test statistic, and we will consider a p-value of 0.05 to be statistically
+    significant.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> def statistic(x, axis):
+    ...     return stats.skew(x, axis)
+
+    After collecting our data, we calculate the observed value of the test
+    statistic.
+
+    >>> rng = np.random.default_rng()
+    >>> x = stats.skewnorm.rvs(a=1, size=50, random_state=rng)
+    >>> statistic(x, axis=0)
+    0.12457412450240658
+
+    To determine the probability of observing such an extreme value of the
+    skewness by chance if the sample were drawn from the normal distribution,
+    we can perform a Monte Carlo hypothesis test. The test will draw many
+    samples at random from their normal distribution, calculate the skewness
+    of each sample, and compare our original skewness against this
+    distribution to determine an approximate p-value.
+
+    >>> from scipy.stats import monte_carlo_test
+    >>> # because our statistic is vectorized, we pass `vectorized=True`
+    >>> rvs = lambda size: stats.norm.rvs(size=size, random_state=rng)
+    >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True)
+    >>> print(res.statistic)
+    0.12457412450240658
+    >>> print(res.pvalue)
+    0.7012
+
+    The probability of obtaining a test statistic less than or equal to the
+    observed value under the null hypothesis is ~70%. This is greater than
+    our chosen threshold of 5%, so we cannot consider this to be significant
+    evidence against the null hypothesis.
+
+    Note that this p-value essentially matches that of
+    `scipy.stats.skewtest`, which relies on an asymptotic distribution of a
+    test statistic based on the sample skewness.
+
+    >>> stats.skewtest(x).pvalue
+    0.6892046027110614
+
+    This asymptotic approximation is not valid for small sample sizes, but
+    `monte_carlo_test` can be used with samples of any size.
+
+    >>> x = stats.skewnorm.rvs(a=1, size=7, random_state=rng)
+    >>> # stats.skewtest(x) would produce an error due to small sample
+    >>> res = monte_carlo_test(x, rvs, statistic, vectorized=True)
+
+    The Monte Carlo distribution of the test statistic is provided for
+    further investigation.
+
+    >>> import matplotlib.pyplot as plt
+    >>> fig, ax = plt.subplots()
+    >>> ax.hist(res.null_distribution, bins=50)
+    >>> ax.set_title("Monte Carlo distribution of test statistic")
+    >>> ax.set_xlabel("Value of Statistic")
+    >>> ax.set_ylabel("Frequency")
+    >>> plt.show()
+
+    """
+    args = _monte_carlo_test_iv(sample, rvs, statistic, vectorized,
+                                n_resamples, batch, alternative, axis)
+    (sample, rvs, statistic, vectorized,
+     n_resamples, batch, alternative, axis) = args
+
+    # Some statistics return plain floats; ensure they're at least np.float64
+    observed = np.asarray(statistic(sample, axis=-1))[()]
+
+    n_observations = sample.shape[-1]
+    batch_nominal = batch or n_resamples
+    null_distribution = []
+    for k in range(0, n_resamples, batch_nominal):
+        batch_actual = min(batch_nominal, n_resamples-k)
+        resamples = rvs(size=(batch_actual, n_observations))
+        null_distribution.append(statistic(resamples, axis=-1))
+    null_distribution = np.concatenate(null_distribution)
+    null_distribution = null_distribution.reshape([-1] + [1]*observed.ndim)
+
+    def less(null_distribution, observed):
+        cmps = null_distribution <= observed
+        pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1)  # see [1]
+        return pvalues
+
+    def greater(null_distribution, observed):
+        cmps = null_distribution >= observed
+        pvalues = (cmps.sum(axis=0) + 1) / (n_resamples + 1)  # see [1]
+        return pvalues
+
+    def two_sided(null_distribution, observed):
+        pvalues_less = less(null_distribution, observed)
+        pvalues_greater = greater(null_distribution, observed)
+        pvalues = np.minimum(pvalues_less, pvalues_greater) * 2
+        return pvalues
+
+    compare = {"less": less,
+               "greater": greater,
+               "two-sided": two_sided}
+
+    pvalues = compare[alternative](null_distribution, observed)
+    pvalues = np.clip(pvalues, 0, 1)
+
+    return MonteCarloTestResult(observed, pvalues, null_distribution)
+
+
+attributes = ('statistic', 'pvalue', 'null_distribution')
+PermutationTestResult = make_dataclass('PermutationTestResult', attributes)
+
+
+def _all_partitions_concatenated(ns):
+    """
+    Generate all partitions of indices of groups of given sizes, concatenated
+
+    `ns` is an iterable of ints.
+    """
+    def all_partitions(z, n):
+        for c in combinations(z, n):
+            x0 = set(c)
+            x1 = z - x0
+            yield [x0, x1]
+
+    def all_partitions_n(z, ns):
+        if len(ns) == 0:
+            yield [z]
+            return
+        for c in all_partitions(z, ns[0]):
+            for d in all_partitions_n(c[1], ns[1:]):
+                yield c[0:1] + d
+
+    z = set(range(np.sum(ns)))
+    for partitioning in all_partitions_n(z, ns[:]):
+        x = np.concatenate([list(partition)
+                            for partition in partitioning]).astype(int)
+        yield x
+
+
+def _batch_generator(iterable, batch):
+    """A generator that yields batches of elements from an iterable"""
+    iterator = iter(iterable)
+    if batch <= 0:
+        raise ValueError("`batch` must be positive.")
+    z = [item for i, item in zip(range(batch), iterator)]
+    while z:  # we don't want StopIteration without yielding an empty list
+        yield z
+        z = [item for i, item in zip(range(batch), iterator)]
+
+
+def _pairings_permutations_gen(n_permutations, n_samples, n_obs_sample, batch,
+                               random_state):
+    # Returns a generator that yields arrays of size
+    # `(batch, n_samples, n_obs_sample)`.
+    # Each row is an independent permutation of indices 0 to `n_obs_sample`.
+    batch = min(batch, n_permutations)
+
+    if hasattr(random_state, 'permuted'):
+        def batched_perm_generator():
+            indices = np.arange(n_obs_sample)
+            indices = np.tile(indices, (batch, n_samples, 1))
+            for k in range(0, n_permutations, batch):
+                batch_actual = min(batch, n_permutations-k)
+                # Don't permute in place, otherwise results depend on `batch`
+                permuted_indices = random_state.permuted(indices, axis=-1)
+                yield permuted_indices[:batch_actual]
+    else:  # RandomState and early Generators don't have `permuted`
+        def batched_perm_generator():
+            for k in range(0, n_permutations, batch):
+                batch_actual = min(batch, n_permutations-k)
+                size = (batch_actual, n_samples, n_obs_sample)
+                x = random_state.random(size=size)
+                yield np.argsort(x, axis=-1)[:batch_actual]
+
+    return batched_perm_generator()
+
+def _calculate_null_both(data, statistic, n_permutations, batch,
+                         random_state=None):
+    """
+    Calculate null distribution for independent sample tests.
+    """
+    n_samples = len(data)
+
+    # compute number of permutations
+    # (distinct partitions of data into samples of these sizes)
+    n_obs_i = [sample.shape[-1] for sample in data]  # observations per sample
+    n_obs_ic = np.cumsum(n_obs_i)
+    n_obs = n_obs_ic[-1]  # total number of observations
+    n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i-1])
+                     for i in range(n_samples-1, 0, -1)])
+
+    # perm_generator is an iterator that produces permutations of indices
+    # from 0 to n_obs. We'll concatenate the samples, use these indices to
+    # permute the data, then split the samples apart again.
+    if n_permutations >= n_max:
+        exact_test = True
+        n_permutations = n_max
+        perm_generator = _all_partitions_concatenated(n_obs_i)
+    else:
+        exact_test = False
+        # Neither RandomState.permutation nor Generator.permutation
+        # can permute axis-slices independently. If this feature is
+        # added in the future, batches of the desired size should be
+        # generated in a single call.
+        perm_generator = (random_state.permutation(n_obs)
+                          for i in range(n_permutations))
+
+    batch = batch or int(n_permutations)
+    null_distribution = []
+
+    # First, concatenate all the samples. In batches, permute samples with
+    # indices produced by the `perm_generator`, split them into new samples of
+    # the original sizes, compute the statistic for each batch, and add these
+    # statistic values to the null distribution.
+    data = np.concatenate(data, axis=-1)
+    for indices in _batch_generator(perm_generator, batch=batch):
+        indices = np.array(indices)
+
+        # `indices` is 2D: each row is a permutation of the indices.
+        # We use it to index `data` along its last axis, which corresponds
+        # with observations.
+        # After indexing, the second to last axis of `data_batch` corresponds
+        # with permutations, and the last axis corresponds with observations.
+        data_batch = data[..., indices]
+
+        # Move the permutation axis to the front: we'll concatenate a list
+        # of batched statistic values along this zeroth axis to form the
+        # null distribution.
+        data_batch = np.moveaxis(data_batch, -2, 0)
+        data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1)
+        null_distribution.append(statistic(*data_batch, axis=-1))
+    null_distribution = np.concatenate(null_distribution, axis=0)
+
+    return null_distribution, n_permutations, exact_test
+
+
+def _calculate_null_pairings(data, statistic, n_permutations, batch,
+                             random_state=None):
+    """
+    Calculate null distribution for association tests.
+    """
+    n_samples = len(data)
+
+    # compute number of permutations (factorial(n) permutations of each sample)
+    n_obs_sample = data[0].shape[-1]  # observations per sample; same for each
+    n_max = factorial(n_obs_sample)**n_samples
+
+    # `perm_generator` is an iterator that produces a list of permutations of
+    # indices from 0 to n_obs_sample, one for each sample.
+    if n_permutations >= n_max:
+        exact_test = True
+        n_permutations = n_max
+        batch = batch or int(n_permutations)
+        # cartesian product of the sets of all permutations of indices
+        perm_generator = product(*(permutations(range(n_obs_sample))
+                                   for i in range(n_samples)))
+        batched_perm_generator = _batch_generator(perm_generator, batch=batch)
+    else:
+        exact_test = False
+        batch = batch or int(n_permutations)
+        # Separate random permutations of indices for each sample.
+        # Again, it would be nice if RandomState/Generator.permutation
+        # could permute each axis-slice separately.
+        args = n_permutations, n_samples, n_obs_sample, batch, random_state
+        batched_perm_generator = _pairings_permutations_gen(*args)
+
+    null_distribution = []
+
+    for indices in batched_perm_generator:
+        indices = np.array(indices)
+
+        # `indices` is 3D: the zeroth axis is for permutations, the next is
+        # for samples, and the last is for observations. Swap the first two
+        # to make the zeroth axis correspond with samples, as it does for
+        # `data`.
+        indices = np.swapaxes(indices, 0, 1)
+
+        # When we're done, `data_batch` will be a list of length `n_samples`.
+        # Each element will be a batch of random permutations of one sample.
+        # The zeroth axis of each batch will correspond with permutations,
+        # and the last will correspond with observations. (This makes it
+        # easy to pass into `statistic`.)
+        data_batch = [None]*n_samples
+        for i in range(n_samples):
+            data_batch[i] = data[i][..., indices[i]]
+            data_batch[i] = np.moveaxis(data_batch[i], -2, 0)
+
+        null_distribution.append(statistic(*data_batch, axis=-1))
+    null_distribution = np.concatenate(null_distribution, axis=0)
+
+    return null_distribution, n_permutations, exact_test
+
+
+def _calculate_null_samples(data, statistic, n_permutations, batch,
+                            random_state=None):
+    """
+    Calculate null distribution for paired-sample tests.
+    """
+    n_samples = len(data)
+
+    # By convention, the meaning of the "samples" permutations type for
+    # data with only one sample is to flip the sign of the observations.
+    # Achieve this by adding a second sample - the negative of the original.
+    if n_samples == 1:
+        data = [data[0], -data[0]]
+
+    # The "samples" permutation strategy is the same as the "pairings"
+    # strategy except the roles of samples and observations are flipped.
+    # So swap these axes, then we'll use the function for the "pairings"
+    # strategy to do all the work!
+    data = np.swapaxes(data, 0, -1)
+
+    # (Of course, the user's statistic doesn't know what we've done here,
+    # so we need to pass it what it's expecting.)
+    def statistic_wrapped(*data, axis):
+        data = np.swapaxes(data, 0, -1)
+        if n_samples == 1:
+            data = data[0:1]
+        return statistic(*data, axis=axis)
+
+    return _calculate_null_pairings(data, statistic_wrapped, n_permutations,
+                                    batch, random_state)
+
+
+def _permutation_test_iv(data, statistic, permutation_type, vectorized,
+                         n_resamples, batch, alternative, axis, random_state):
+    """Input validation for `permutation_test`."""
+
+    axis_int = int(axis)
+    if axis != axis_int:
+        raise ValueError("`axis` must be an integer.")
+
+    permutation_types = {'samples', 'pairings', 'independent'}
+    permutation_type = permutation_type.lower()
+    if permutation_type not in permutation_types:
+        raise ValueError(f"`permutation_type` must be in {permutation_types}.")
+
+    if vectorized not in {True, False, None}:
+        raise ValueError("`vectorized` must be `True`, `False`, or `None`.")
+
+    if vectorized is None:
+        vectorized = 'axis' in inspect.signature(statistic).parameters
+
+    if not vectorized:
+        statistic = _vectorize_statistic(statistic)
+
+    message = "`data` must be a tuple containing at least two samples"
+    try:
+        if len(data) < 2 and permutation_type == 'independent':
+            raise ValueError(message)
+    except TypeError:
+        raise TypeError(message)
+
+    data = _broadcast_arrays(data, axis)
+    data_iv = []
+    for sample in data:
+        sample = np.atleast_1d(sample)
+        if sample.shape[axis] <= 1:
+            raise ValueError("each sample in `data` must contain two or more "
+                             "observations along `axis`.")
+        sample = np.moveaxis(sample, axis_int, -1)
+        data_iv.append(sample)
+
+    n_resamples_int = (int(n_resamples) if not np.isinf(n_resamples)
+                       else np.inf)
+    if n_resamples != n_resamples_int or n_resamples_int <= 0:
+        raise ValueError("`n_resamples` must be a positive integer.")
+
+    if batch is None:
+        batch_iv = batch
+    else:
+        batch_iv = int(batch)
+        if batch != batch_iv or batch_iv <= 0:
+            raise ValueError("`batch` must be a positive integer or None.")
+
+    alternatives = {'two-sided', 'greater', 'less'}
+    alternative = alternative.lower()
+    if alternative not in alternatives:
+        raise ValueError(f"`alternative` must be in {alternatives}")
+
+    random_state = check_random_state(random_state)
+
+    return (data_iv, statistic, permutation_type, vectorized, n_resamples_int,
+            batch_iv, alternative, axis_int, random_state)
+
+
+def permutation_test(data, statistic, *, permutation_type='independent',
+                     vectorized=None, n_resamples=9999, batch=None,
+                     alternative="two-sided", axis=0, random_state=None):
+    r"""
+    Performs a permutation test of a given statistic on provided data.
+
+    For independent sample statistics, the null hypothesis is that the data are
+    randomly sampled from the same distribution.
+    For paired sample statistics, two null hypothesis can be tested:
+    that the data are paired at random or that the data are assigned to samples
+    at random.
+
+    Parameters
+    ----------
+    data : iterable of array-like
+        Contains the samples, each of which is an array of observations.
+        Dimensions of sample arrays must be compatible for broadcasting except
+        along `axis`.
+    statistic : callable
+        Statistic for which the p-value of the hypothesis test is to be
+        calculated. `statistic` must be a callable that accepts samples
+        as separate arguments (e.g. ``statistic(*data)``) and returns the
+        resulting statistic.
+        If `vectorized` is set ``True``, `statistic` must also accept a keyword
+        argument `axis` and be vectorized to compute the statistic along the
+        provided `axis` of the sample arrays.
+    permutation_type : {'independent', 'samples', 'pairings'}, optional
+        The type of permutations to be performed, in accordance with the
+        null hypothesis. The first two permutation types are for paired sample
+        statistics, in which all samples contain the same number of
+        observations and observations with corresponding indices along `axis`
+        are considered to be paired; the third is for independent sample
+        statistics.
+
+        - ``'samples'`` : observations are assigned to different samples
+          but remain paired with the same observations from other samples.
+          This permutation type is appropriate for paired sample hypothesis
+          tests such as the Wilcoxon signed-rank test and the paired t-test.
+        - ``'pairings'`` : observations are paired with different observations,
+          but they remain within the same sample. This permutation type is
+          appropriate for association/correlation tests with statistics such
+          as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's
+          :math:`r`.
+        - ``'independent'`` (default) : observations are assigned to different
+          samples. Samples may contain different numbers of observations. This
+          permutation type is appropriate for independent sample hypothesis
+          tests such as the Mann-Whitney :math:`U` test and the independent
+          sample t-test.
+
+          Please see the Notes section below for more detailed descriptions
+          of the permutation types.
+
+    vectorized : bool, optional
+        If `vectorized` is set ``False``, `statistic` will not be passed
+        keyword argument `axis` and is expected to calculate the statistic
+        only for 1D samples. If ``True``, `statistic` will be passed keyword
+        argument `axis` and is expected to calculate the statistic along `axis`
+        when passed an ND sample array. If ``None`` (default), `vectorized`
+        will be set ``True`` if ``axis`` is a parameter of `statistic`. Use
+        of a vectorized statistic typically reduces computation time.
+    n_resamples : int or np.inf, default: 9999
+        Number of random permutations (resamples) used to approximate the null
+        distribution. If greater than or equal to the number of distinct
+        permutations, the exact null distribution will be computed.
+        Note that the number of distinct permutations grows very rapidly with
+        the sizes of samples, so exact tests are feasible only for very small
+        data sets.
+    batch : int, optional
+        The number of permutations to process in each call to `statistic`.
+        Memory usage is O(`batch`*``n``), where ``n`` is the total size
+        of all samples, regardless of the value of `vectorized`. Default is
+        ``None``, in which case ``batch`` is the number of permutations.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        The alternative hypothesis for which the p-value is calculated.
+        For each alternative, the p-value is defined for exact tests as
+        follows.
+
+        - ``'greater'`` : the percentage of the null distribution that is
+          greater than or equal to the observed value of the test statistic.
+        - ``'less'`` : the percentage of the null distribution that is
+          less than or equal to the observed value of the test statistic.
+        - ``'two-sided'`` (default) : twice the smaller of the p-values above.
+
+        Note that p-values for randomized tests are calculated according to the
+        conservative (over-estimated) approximation suggested in [2]_ and [3]_
+        rather than the unbiased estimator suggested in [4]_. That is, when
+        calculating the proportion of the randomized null distribution that is
+        as extreme as the observed value of the test statistic, the values in
+        the numerator and denominator are both increased by one. An
+        interpretation of this adjustment is that the observed value of the
+        test statistic is always included as an element of the randomized
+        null distribution.
+        The convention used for two-sided p-values is not universal;
+        the observed test statistic and null distribution are returned in
+        case a different definition is preferred.
+
+    axis : int, default: 0
+        The axis of the (broadcasted) samples over which to calculate the
+        statistic. If samples have a different number of dimensions,
+        singleton dimensions are prepended to samples with fewer dimensions
+        before `axis` is considered.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        Pseudorandom number generator state used to generate permutations.
+
+        If `random_state` is ``None`` (default), the
+        `numpy.random.RandomState` singleton is used.
+        If `random_state` is an int, a new ``RandomState`` instance is used,
+        seeded with `random_state`.
+        If `random_state` is already a ``Generator`` or ``RandomState``
+        instance then that instance is used.
+
+    Returns
+    -------
+    statistic : float or ndarray
+        The observed test statistic of the data.
+    pvalue : float or ndarray
+        The p-value for the given alternative.
+    null_distribution : ndarray
+        The values of the test statistic generated under the null hypothesis.
+
+    Notes
+    -----
+
+    The three types of permutation tests supported by this function are
+    described below.
+
+    **Unpaired statistics** (``permutation_type='independent'``):
+
+    The null hypothesis associated with this permutation type is that all
+    observations are sampled from the same underlying distribution and that
+    they have been assigned to one of the samples at random.
+
+    Suppose ``data`` contains two samples; e.g. ``a, b = data``.
+    When ``1 < n_resamples < binom(n, k)``, where
+
+    * ``k`` is the number of observations in ``a``,
+    * ``n`` is the total number of observations in ``a`` and ``b``, and
+    * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
+
+    the data are pooled (concatenated), randomly assigned to either the first
+    or second sample, and the statistic is calculated. This process is
+    performed repeatedly, `permutation` times, generating a distribution of the
+    statistic under the null hypothesis. The statistic of the original
+    data is compared to this distribution to determine the p-value.
+
+    When ``n_resamples >= binom(n, k)``, an exact test is performed: the data
+    are *partitioned* between the samples in each distinct way exactly once,
+    and the exact null distribution is formed.
+    Note that for a given partitioning of the data between the samples,
+    only one ordering/permutation of the data *within* each sample is
+    considered. For statistics that do not depend on the order of the data
+    within samples, this dramatically reduces computational cost without
+    affecting the shape of the null distribution (because the frequency/count
+    of each value is affected by the same factor).
+
+    For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this
+    permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``.
+    Because only one ordering/permutation of the data *within* each sample
+    is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]``
+    and ``y = [a4, a3, b1]`` would *not* be considered distinct from the
+    example above.
+
+    ``permutation_type='independent'`` does not support one-sample statistics,
+    but it can be applied to statistics with more than two samples. In this
+    case, if ``n`` is an array of the number of observations within each
+    sample, the number of distinct partitions is::
+
+        np.product([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)])
+
+    **Paired statistics, permute pairings** (``permutation_type='pairings'``):
+
+    The null hypothesis associated with this permutation type is that
+    observations within each sample are drawn from the same underlying
+    distribution and that pairings with elements of other samples are
+    assigned at random.
+
+    Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we
+    wish to consider all possible pairings of elements of ``a`` with elements
+    of a second sample, ``b``. Let ``n`` be the number of observations in
+    ``a``, which must also equal the number of observations in ``b``.
+
+    When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are
+    randomly permuted. The user-supplied statistic accepts one data argument,
+    say ``a_perm``, and calculates the statistic considering ``a_perm`` and
+    ``b``. This process is performed repeatedly, `permutation` times,
+    generating a distribution of the statistic under the null hypothesis.
+    The statistic of the original data is compared to this distribution to
+    determine the p-value.
+
+    When ``n_resamples >= factorial(n)``, an exact test is performed:
+    ``a`` is permuted in each distinct way exactly once. Therefore, the
+    `statistic` is computed for each unique pairing of samples between ``a``
+    and ``b`` exactly once.
+
+    For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
+    permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left
+    in its original order.
+
+    ``permutation_type='pairings'`` supports ``data`` containing any number
+    of samples, each of which must contain the same number of observations.
+    All samples provided in ``data`` are permuted *independently*. Therefore,
+    if ``m`` is the number of samples and ``n`` is the number of observations
+    within each sample, then the number of permutations in an exact test is::
+
+        factorial(n)**m
+
+    Note that if a two-sample statistic, for example, does not inherently
+    depend on the order in which observations are provided - only on the
+    *pairings* of observations - then only one of the two samples should be
+    provided in ``data``. This dramatically reduces computational cost without
+    affecting the shape of the null distribution (because the frequency/count
+    of each value is affected by the same factor).
+
+    **Paired statistics, permute samples** (``permutation_type='samples'``):
+
+    The null hypothesis associated with this permutation type is that
+    observations within each pair are drawn from the same underlying
+    distribution and that the sample to which they are assigned is random.
+
+    Suppose ``data`` contains two samples; e.g. ``a, b = data``.
+    Let ``n`` be the number of observations in ``a``, which must also equal
+    the number of observations in ``b``.
+
+    When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are
+    randomly swapped between samples (maintaining their pairings) and the
+    statistic is calculated. This process is performed repeatedly,
+    `permutation` times,  generating a distribution of the statistic under the
+    null hypothesis. The statistic of the original data is compared to this
+    distribution to determine the p-value.
+
+    When ``n_resamples >= 2**n``, an exact test is performed: the observations
+    are assigned to the two samples in each distinct way (while maintaining
+    pairings) exactly once.
+
+    For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this
+    permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``.
+
+    ``permutation_type='samples'`` supports ``data`` containing any number
+    of samples, each of which must contain the same number of observations.
+    If ``data`` contains more than one sample, paired observations within
+    ``data`` are exchanged between samples *independently*. Therefore, if ``m``
+    is the number of samples and ``n`` is the number of observations within
+    each sample, then the number of permutations in an exact test is::
+
+        factorial(m)**n
+
+    Several paired-sample statistical tests, such as the Wilcoxon signed rank
+    test and paired-sample t-test, can be performed considering only the
+    *difference* between two paired elements. Accordingly, if ``data`` contains
+    only one sample, then the null distribution is formed by independently
+    changing the *sign* of each observation.
+
+    .. warning::
+        The p-value is calculated by counting the elements of the null
+        distribution that are as extreme or more extreme than the observed
+        value of the statistic. Due to the use of finite precision arithmetic,
+        some statistic functions return numerically distinct values when the
+        theoretical values would be exactly equal. In some cases, this could
+        lead to a large error in the calculated p-value. `permutation_test`
+        guards against this by considering elements in the null distribution
+        that are "close" (within a factor of ``1+1e-14``) to the observed
+        value of the test statistic as equal to the observed value of the
+        test statistic. However, the user is advised to inspect the null
+        distribution to assess whether this method of comparison is
+        appropriate, and if not, calculate the p-value manually. See example
+        below.
+
+    References
+    ----------
+
+    .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951).
+    .. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be
+       Zero: Calculating Exact P-values When Permutations Are Randomly Drawn."
+       Statistical Applications in Genetics and Molecular Biology 9.1 (2010).
+    .. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference".
+       Statistical Science (2004).
+    .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap
+       (1993).
+
+    Examples
+    --------
+
+    Suppose we wish to test whether two samples are drawn from the same
+    distribution. Assume that the underlying distributions are unknown to us,
+    and that before observing the data, we hypothesized that the mean of the
+    first sample would be less than that of the second sample. We decide that
+    we will use the difference between the sample means as a test statistic,
+    and we will consider a p-value of 0.05 to be statistically significant.
+
+    For efficiency, we write the function defining the test statistic in a
+    vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the
+    statistic will be calculated for each axis-slice along `axis`.
+
+    >>> import numpy as np
+    >>> def statistic(x, y, axis):
+    ...     return np.mean(x, axis=axis) - np.mean(y, axis=axis)
+
+    After collecting our data, we calculate the observed value of the test
+    statistic.
+
+    >>> from scipy.stats import norm
+    >>> rng = np.random.default_rng()
+    >>> x = norm.rvs(size=5, random_state=rng)
+    >>> y = norm.rvs(size=6, loc = 3, random_state=rng)
+    >>> statistic(x, y, 0)
+    -3.5411688580987266
+
+    Indeed, the test statistic is negative, suggesting that the true mean of
+    the distribution underlying ``x`` is less than that of the distribution
+    underlying ``y``. To determine the probability of this occuring by chance
+    if the two samples were drawn from the same distribution, we perform
+    a permutation test.
+
+    >>> from scipy.stats import permutation_test
+    >>> # because our statistic is vectorized, we pass `vectorized=True`
+    >>> # `n_resamples=np.inf` indicates that an exact test is to be performed
+    >>> res = permutation_test((x, y), statistic, vectorized=True,
+    ...                        n_resamples=np.inf, alternative='less')
+    >>> print(res.statistic)
+    -3.5411688580987266
+    >>> print(res.pvalue)
+    0.004329004329004329
+
+    The probability of obtaining a test statistic less than or equal to the
+    observed value under the null hypothesis is 0.4329%. This is less than our
+    chosen threshold of 5%, so we consider this to be significant evidence
+    against the null hypothesis in favor of the alternative.
+
+    Because the size of the samples above was small, `permutation_test` could
+    perform an exact test. For larger samples, we resort to a randomized
+    permutation test.
+
+    >>> x = norm.rvs(size=100, random_state=rng)
+    >>> y = norm.rvs(size=120, loc=0.3, random_state=rng)
+    >>> res = permutation_test((x, y), statistic, n_resamples=100000,
+    ...                        vectorized=True, alternative='less',
+    ...                        random_state=rng)
+    >>> print(res.statistic)
+    -0.5230459671240913
+    >>> print(res.pvalue)
+    0.00016999830001699983
+
+    The approximate probability of obtaining a test statistic less than or
+    equal to the observed value under the null hypothesis is 0.0225%. This is
+    again less than our chosen threshold of 5%, so again we have significant
+    evidence to reject the null hypothesis in favor of the alternative.
+
+    For large samples and number of permutations, the result is comparable to
+    that of the corresponding asymptotic test, the independent sample t-test.
+
+    >>> from scipy.stats import ttest_ind
+    >>> res_asymptotic = ttest_ind(x, y, alternative='less')
+    >>> print(res_asymptotic.pvalue)
+    0.00012688101537979522
+
+    The permutation distribution of the test statistic is provided for
+    further investigation.
+
+    >>> import matplotlib.pyplot as plt
+    >>> plt.hist(res.null_distribution, bins=50)
+    >>> plt.title("Permutation distribution of test statistic")
+    >>> plt.xlabel("Value of Statistic")
+    >>> plt.ylabel("Frequency")
+    >>> plt.show()
+
+    Inspection of the null distribution is essential if the statistic suffers
+    from inaccuracy due to limited machine precision. Consider the following
+    case:
+
+    >>> from scipy.stats import pearsonr
+    >>> x = [1, 2, 4, 3]
+    >>> y = [2, 4, 6, 8]
+    >>> def statistic(x, y):
+    ...     return pearsonr(x, y).statistic
+    >>> res = permutation_test((x, y), statistic, vectorized=False,
+    ...                        permutation_type='pairings',
+    ...                        alternative='greater')
+    >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution
+
+    In this case, some elements of the null distribution differ from the
+    observed value of the correlation coefficient ``r`` due to numerical noise.
+    We manually inspect the elements of the null distribution that are nearly
+    the same as the observed value of the test statistic.
+
+    >>> r
+    0.8
+    >>> unique = np.unique(null)
+    >>> unique
+    array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2,  0. ,  0.2,  0.2,  0.4,
+            0.6,  0.8,  0.8,  1. ]) # may vary
+    >>> unique[np.isclose(r, unique)].tolist()
+    [0.7999999999999999, 0.8]
+
+    If `permutation_test` were to perform the comparison naively, the
+    elements of the null distribution with value ``0.7999999999999999`` would
+    not be considered as extreme or more extreme as the observed value of the
+    statistic, so the calculated p-value would be too small.
+
+    >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null)
+    >>> incorrect_pvalue
+    0.1111111111111111  # may vary
+
+    Instead, `permutation_test` treats elements of the null distribution that
+    are within ``max(1e-14, abs(r)*1e-14)`` of the observed value of the
+    statistic ``r`` to be equal to ``r``.
+
+    >>> correct_pvalue = np.count_nonzero(null >= r - 1e-14) / len(null)
+    >>> correct_pvalue
+    0.16666666666666666
+    >>> res.pvalue == correct_pvalue
+    True
+
+    This method of comparison is expected to be accurate in most practical
+    situations, but the user is advised to assess this by inspecting the
+    elements of the null distribution that are close to the observed value
+    of the statistic. Also, consider the use of statistics that can be
+    calculated using exact arithmetic (e.g. integer statistics).
+
+    """
+    args = _permutation_test_iv(data, statistic, permutation_type, vectorized,
+                                n_resamples, batch, alternative, axis,
+                                random_state)
+    (data, statistic, permutation_type, vectorized, n_resamples, batch,
+     alternative, axis, random_state) = args
+
+    observed = statistic(*data, axis=-1)
+
+    null_calculators = {"pairings": _calculate_null_pairings,
+                        "samples": _calculate_null_samples,
+                        "independent": _calculate_null_both}
+    null_calculator_args = (data, statistic, n_resamples,
+                            batch, random_state)
+    calculate_null = null_calculators[permutation_type]
+    null_distribution, n_resamples, exact_test = (
+        calculate_null(*null_calculator_args))
+
+    # See References [2] and [3]
+    adjustment = 0 if exact_test else 1
+
+    # relative tolerance for detecting numerically distinct but
+    # theoretically equal values in the null distribution
+    eps = 1e-14
+    gamma = np.maximum(eps, np.abs(eps * observed))
+
+    def less(null_distribution, observed):
+        cmps = null_distribution <= observed + gamma
+        pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment)
+        return pvalues
+
+    def greater(null_distribution, observed):
+        cmps = null_distribution >= observed - gamma
+        pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment)
+        return pvalues
+
+    def two_sided(null_distribution, observed):
+        pvalues_less = less(null_distribution, observed)
+        pvalues_greater = greater(null_distribution, observed)
+        pvalues = np.minimum(pvalues_less, pvalues_greater) * 2
+        return pvalues
+
+    compare = {"less": less,
+               "greater": greater,
+               "two-sided": two_sided}
+
+    pvalues = compare[alternative](null_distribution, observed)
+    pvalues = np.clip(pvalues, 0, 1)
+
+    return PermutationTestResult(observed, pvalues, null_distribution)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_result_classes.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_result_classes.py
new file mode 100644
index 00000000..5f4d7405
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_result_classes.py
@@ -0,0 +1,34 @@
+# This module exists only to allow Sphinx to generate docs
+# for the result objects returned by some functions in stats
+# _without_ adding them to the main stats documentation page.
+
+"""
+Result classes
+--------------
+
+.. currentmodule:: scipy.stats._result_classes
+
+.. autosummary::
+   :toctree: generated/
+
+   RelativeRiskResult
+   BinomTestResult
+   TukeyHSDResult
+   PearsonRResult
+   FitResult
+   OddsRatioResult
+   TtestResult
+
+"""
+
+__all__ = ['BinomTestResult', 'RelativeRiskResult', 'TukeyHSDResult',
+           'PearsonRResult', 'FitResult', 'OddsRatioResult',
+           'TtestResult']
+
+
+from ._binomtest import BinomTestResult
+from ._odds_ratio import OddsRatioResult
+from ._relative_risk import RelativeRiskResult
+from ._hypotests import TukeyHSDResult
+from ._stats_py import PearsonRResult, TtestResult
+from ._fit import FitResult
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_rvs_sampling.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_rvs_sampling.py
new file mode 100644
index 00000000..e54efb1d
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_rvs_sampling.py
@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+import numpy as np
+from scipy._lib._util import check_random_state
+
+
+def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
+    """
+    Generate random samples from a probability density function using the
+    ratio-of-uniforms method.
+
+    Parameters
+    ----------
+    pdf : callable
+        A function with signature `pdf(x)` that is proportional to the
+        probability density function of the distribution.
+    umax : float
+        The upper bound of the bounding rectangle in the u-direction.
+    vmin : float
+        The lower bound of the bounding rectangle in the v-direction.
+    vmax : float
+        The upper bound of the bounding rectangle in the v-direction.
+    size : int or tuple of ints, optional
+        Defining number of random variates (default is 1).
+    c : float, optional.
+        Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    rvs : ndarray
+        The random variates distributed according to the probability
+        distribution defined by the pdf.
+
+    Notes
+    -----
+    Given a univariate probability density function `pdf` and a constant `c`,
+    define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
+    If `(U, V)` is a random vector uniformly distributed over `A`,
+    then `V/U + c` follows a distribution according to `pdf`.
+
+    The above result (see [1]_, [2]_) can be used to sample random variables
+    using only the pdf, i.e. no inversion of the cdf is required. Typical
+    choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of
+    the rectangle ``R = [0, umax] x [vmin, vmax]`` where
+
+    - ``umax = sup sqrt(pdf(x))``
+    - ``vmin = inf (x - c) sqrt(pdf(x))``
+    - ``vmax = sup (x - c) sqrt(pdf(x))``
+
+    In particular, these values are finite if `pdf` is bounded and
+    ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
+    One can generate `(U, V)` uniformly on `R` and return
+    `V/U + c` if `(U, V)` are also in `A` which can be directly
+    verified.
+
+    The algorithm is not changed if one replaces `pdf` by k * `pdf` for any
+    constant k > 0. Thus, it is often convenient to work with a function
+    that is proportional to the probability density function by dropping
+    unneccessary normalization factors.
+
+    Intuitively, the method works well if `A` fills up most of the
+    enclosing rectangle such that the probability is high that `(U, V)`
+    lies in `A` whenever it lies in `R` as the number of required
+    iterations becomes too large otherwise. To be more precise, note that
+    the expected number of iterations to draw `(U, V)` uniformly
+    distributed on `R` such that `(U, V)` is also in `A` is given by
+    the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``,
+    where `area(pdf)` is the integral of `pdf` (which is equal to one if the
+    probability density function is used but can take on other values if a
+    function proportional to the density is used). The equality holds since
+    the area of `A` is equal to 0.5 * area(pdf) (Theorem 7.1 in [1]_).
+    If the sampling fails to generate a single random variate after 50000
+    iterations (i.e. not a single draw is in `A`), an exception is raised.
+
+    If the bounding rectangle is not correctly specified (i.e. if it does not
+    contain `A`), the algorithm samples from a distribution different from
+    the one given by `pdf`. It is therefore recommended to perform a
+    test such as `~scipy.stats.kstest` as a check.
+
+    References
+    ----------
+    .. [1] L. Devroye, "Non-Uniform Random Variate Generation",
+       Springer-Verlag, 1986.
+
+    .. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
+       random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
+
+    .. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
+       Variables Using the Ratio of Uniform Deviates",
+       ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+
+    Simulate normally distributed random variables. It is easy to compute the
+    bounding rectangle explicitly in that case. For simplicity, we drop the
+    normalization factor of the density.
+
+    >>> f = lambda x: np.exp(-x**2 / 2)
+    >>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
+    >>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
+    >>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500,
+    ...                                random_state=rng)
+
+    The K-S test confirms that the random variates are indeed normally
+    distributed (normality is not rejected at 5% significance level):
+
+    >>> stats.kstest(rvs, 'norm')[1]
+    0.250634764150542
+
+    The exponential distribution provides another example where the bounding
+    rectangle can be determined explicitly.
+
+    >>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
+    ...                                vmin=0, vmax=2*np.exp(-1), size=1000,
+    ...                                random_state=rng)
+    >>> stats.kstest(rvs, 'expon')[1]
+    0.21121052054580314
+
+    """
+    if vmin >= vmax:
+        raise ValueError("vmin must be smaller than vmax.")
+
+    if umax <= 0:
+        raise ValueError("umax must be positive.")
+
+    size1d = tuple(np.atleast_1d(size))
+    N = np.prod(size1d)  # number of rvs needed, reshape upon return
+
+    # start sampling using ratio of uniforms method
+    rng = check_random_state(random_state)
+    x = np.zeros(N)
+    simulated, i = 0, 1
+
+    # loop until N rvs have been generated: expected runtime is finite.
+    # to avoid infinite loop, raise exception if not a single rv has been
+    # generated after 50000 tries. even if the expected numer of iterations
+    # is 1000, the probability of this event is (1-1/1000)**50000
+    # which is of order 10e-22
+    while simulated < N:
+        k = N - simulated
+        # simulate uniform rvs on [0, umax] and [vmin, vmax]
+        u1 = umax * rng.uniform(size=k)
+        v1 = rng.uniform(vmin, vmax, size=k)
+        # apply rejection method
+        rvs = v1 / u1 + c
+        accept = (u1**2 <= pdf(rvs))
+        num_accept = np.sum(accept)
+        if num_accept > 0:
+            x[simulated:(simulated + num_accept)] = rvs[accept]
+            simulated += num_accept
+
+        if (simulated == 0) and (i*N >= 50000):
+            msg = ("Not a single random variate could be generated in {} "
+                   "attempts. The ratio of uniforms method does not appear "
+                   "to work for the provided parameters. Please check the "
+                   "pdf and the bounds.".format(i*N))
+            raise RuntimeError(msg)
+        i += 1
+
+    return np.reshape(x, size1d)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_sobol.pyi b/__packaged__/coreml/.python_dependencies/scipy/stats/_sobol.pyi
new file mode 100644
index 00000000..7ca5e3a9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_sobol.pyi
@@ -0,0 +1,54 @@
+import numpy as np
+from scipy._lib._util import IntNumber
+from typing import Literal
+
+def _initialize_v(
+    v : np.ndarray, 
+    dim : IntNumber,
+    bits: IntNumber
+) -> None: ...
+
+def _cscramble (
+    dim : IntNumber,
+    bits: IntNumber,
+    ltm : np.ndarray,
+    sv: np.ndarray
+) -> None: ...
+
+def _fill_p_cumulative(
+    p: np.ndarray,
+    p_cumulative: np.ndarray
+) -> None: ...
+
+def _draw(
+    n : IntNumber,
+    num_gen: IntNumber,
+    dim: IntNumber,
+    scale: float,
+    sv: np.ndarray,
+    quasi: np.ndarray,
+    sample: np.ndarray
+    ) -> None: ...
+
+def _fast_forward(
+    n: IntNumber,
+    num_gen: IntNumber,
+    dim: IntNumber,
+    sv: np.ndarray,
+    quasi: np.ndarray
+    ) -> None: ...
+
+def _categorize(
+    draws: np.ndarray,
+    p_cumulative: np.ndarray,
+    result: np.ndarray
+    ) -> None: ...
+
+_MAXDIM: Literal[21201]
+_MAXDEG: Literal[18]
+
+def _test_find_index(
+    p_cumulative: np.ndarray, 
+    size: int, 
+    value: float
+    ) -> int: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_sobol_direction_numbers.npz b/__packaged__/coreml/.python_dependencies/scipy/stats/_sobol_direction_numbers.npz
new file mode 100644
index 00000000..da96d451
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/stats/_sobol_direction_numbers.npz differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_stats.pxd b/__packaged__/coreml/.python_dependencies/scipy/stats/_stats.pxd
new file mode 100644
index 00000000..b50e16c5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_stats.pxd
@@ -0,0 +1,9 @@
+# destined to be used in a LowLevelCallable
+cdef double _geninvgauss_pdf(double x, void *user_data) nogil except *
+cdef double _studentized_range_cdf(int n, double[2] x, void *user_data) nogil
+cdef double _studentized_range_cdf_asymptotic(double z, void *user_data) nogil
+cdef double _studentized_range_pdf(int n, double[2] x, void *user_data) nogil
+cdef double _studentized_range_pdf_asymptotic(double z, void *user_data) nogil
+cdef double _studentized_range_moment(int n, double[3] x_arg, void *user_data) nogil
+cdef double _genhyperbolic_pdf(double x, void *user_data) nogil except *
+cdef double _genhyperbolic_logpdf(double x, void *user_data) nogil except *
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_stats_mstats_common.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_stats_mstats_common.py
new file mode 100644
index 00000000..491f05a2
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_stats_mstats_common.py
@@ -0,0 +1,501 @@
+import warnings
+import numpy as np
+import scipy.stats._stats_py
+from . import distributions
+from .._lib._bunch import _make_tuple_bunch
+from ._stats_pythran import siegelslopes as siegelslopes_pythran
+
+__all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes']
+
+# This is not a namedtuple for backwards compatibility. See PR #12983
+LinregressResult = _make_tuple_bunch('LinregressResult',
+                                     ['slope', 'intercept', 'rvalue',
+                                      'pvalue', 'stderr'],
+                                     extra_field_names=['intercept_stderr'])
+TheilslopesResult = _make_tuple_bunch('TheilslopesResult',
+                                      ['slope', 'intercept',
+                                       'low_slope', 'high_slope'])
+SiegelslopesResult = _make_tuple_bunch('SiegelslopesResult',
+                                       ['slope', 'intercept'])
+
+
+def linregress(x, y=None, alternative='two-sided'):
+    """
+    Calculate a linear least-squares regression for two sets of measurements.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Two sets of measurements.  Both arrays should have the same length.  If
+        only `x` is given (and ``y=None``), then it must be a two-dimensional
+        array where one dimension has length 2.  The two sets of measurements
+        are then found by splitting the array along the length-2 dimension. In
+        the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is
+        equivalent to ``linregress(x[0], x[1])``.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the slope of the regression line is nonzero
+        * 'less': the slope of the regression line is less than zero
+        * 'greater':  the slope of the regression line is greater than zero
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    result : ``LinregressResult`` instance
+        The return value is an object with the following attributes:
+
+        slope : float
+            Slope of the regression line.
+        intercept : float
+            Intercept of the regression line.
+        rvalue : float
+            The Pearson correlation coefficient. The square of ``rvalue``
+            is equal to the coefficient of determination.
+        pvalue : float
+            The p-value for a hypothesis test whose null hypothesis is
+            that the slope is zero, using Wald Test with t-distribution of
+            the test statistic. See `alternative` above for alternative
+            hypotheses.
+        stderr : float
+            Standard error of the estimated slope (gradient), under the
+            assumption of residual normality.
+        intercept_stderr : float
+            Standard error of the estimated intercept, under the assumption
+            of residual normality.
+
+    See Also
+    --------
+    scipy.optimize.curve_fit :
+        Use non-linear least squares to fit a function to data.
+    scipy.optimize.leastsq :
+        Minimize the sum of squares of a set of equations.
+
+    Notes
+    -----
+    Missing values are considered pair-wise: if a value is missing in `x`,
+    the corresponding value in `y` is masked.
+
+    For compatibility with older versions of SciPy, the return value acts
+    like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
+    ``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
+
+        slope, intercept, r, p, se = linregress(x, y)
+
+    With that style, however, the standard error of the intercept is not
+    available.  To have access to all the computed values, including the
+    standard error of the intercept, use the return value as an object
+    with attributes, e.g.::
+
+        result = linregress(x, y)
+        print(result.intercept, result.intercept_stderr)
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+
+    Generate some data:
+
+    >>> x = rng.random(10)
+    >>> y = 1.6*x + rng.random(10)
+
+    Perform the linear regression:
+
+    >>> res = stats.linregress(x, y)
+
+    Coefficient of determination (R-squared):
+
+    >>> print(f"R-squared: {res.rvalue**2:.6f}")
+    R-squared: 0.717533
+
+    Plot the data along with the fitted line:
+
+    >>> plt.plot(x, y, 'o', label='original data')
+    >>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
+    >>> plt.legend()
+    >>> plt.show()
+
+    Calculate 95% confidence interval on slope and intercept:
+
+    >>> # Two-sided inverse Students t-distribution
+    >>> # p - probability, df - degrees of freedom
+    >>> from scipy.stats import t
+    >>> tinv = lambda p, df: abs(t.ppf(p/2, df))
+
+    >>> ts = tinv(0.05, len(x)-2)
+    >>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
+    slope (95%): 1.453392 +/- 0.743465
+    >>> print(f"intercept (95%): {res.intercept:.6f}"
+    ...       f" +/- {ts*res.intercept_stderr:.6f}")
+    intercept (95%): 0.616950 +/- 0.544475
+
+    """
+    TINY = 1.0e-20
+    if y is None:  # x is a (2, N) or (N, 2) shaped array_like
+        x = np.asarray(x)
+        if x.shape[0] == 2:
+            x, y = x
+        elif x.shape[1] == 2:
+            x, y = x.T
+        else:
+            raise ValueError("If only `x` is given as input, it has to "
+                             "be of shape (2, N) or (N, 2); provided shape "
+                             f"was {x.shape}.")
+    else:
+        x = np.asarray(x)
+        y = np.asarray(y)
+
+    if x.size == 0 or y.size == 0:
+        raise ValueError("Inputs must not be empty.")
+
+    if np.amax(x) == np.amin(x) and len(x) > 1:
+        raise ValueError("Cannot calculate a linear regression "
+                         "if all x values are identical")
+
+    n = len(x)
+    xmean = np.mean(x, None)
+    ymean = np.mean(y, None)
+
+    # Average sums of square differences from the mean
+    #   ssxm = mean( (x-mean(x))^2 )
+    #   ssxym = mean( (x-mean(x)) * (y-mean(y)) )
+    ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat
+
+    # R-value
+    #   r = ssxym / sqrt( ssxm * ssym )
+    if ssxm == 0.0 or ssym == 0.0:
+        # If the denominator was going to be 0
+        r = 0.0
+    else:
+        r = ssxym / np.sqrt(ssxm * ssym)
+        # Test for numerical error propagation (make sure -1 < r < 1)
+        if r > 1.0:
+            r = 1.0
+        elif r < -1.0:
+            r = -1.0
+
+    slope = ssxym / ssxm
+    intercept = ymean - slope*xmean
+    if n == 2:
+        # handle case when only two points are passed in
+        if y[0] == y[1]:
+            prob = 1.0
+        else:
+            prob = 0.0
+        slope_stderr = 0.0
+        intercept_stderr = 0.0
+    else:
+        df = n - 2  # Number of degrees of freedom
+        # n-2 degrees of freedom because 2 has been used up
+        # to estimate the mean and standard deviation
+        t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
+        t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative)
+
+        slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df)
+
+        # Also calculate the standard error of the intercept
+        # The following relationship is used:
+        #   ssxm = mean( (x-mean(x))^2 )
+        #        = ssx - sx*sx
+        #        = mean( x^2 ) - mean(x)^2
+        intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2)
+
+    return LinregressResult(slope=slope, intercept=intercept, rvalue=r,
+                            pvalue=prob, stderr=slope_stderr,
+                            intercept_stderr=intercept_stderr)
+
+
+def theilslopes(y, x=None, alpha=0.95, method='separate'):
+    r"""
+    Computes the Theil-Sen estimator for a set of points (x, y).
+
+    `theilslopes` implements a method for robust linear regression.  It
+    computes the slope as the median of all slopes between paired values.
+
+    Parameters
+    ----------
+    y : array_like
+        Dependent variable.
+    x : array_like or None, optional
+        Independent variable. If None, use ``arange(len(y))`` instead.
+    alpha : float, optional
+        Confidence degree between 0 and 1. Default is 95% confidence.
+        Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
+        interpreted as "find the 90% confidence interval".
+    method : {'joint', 'separate'}, optional
+        Method to be used for computing estimate for intercept.
+        Following methods are supported,
+
+            * 'joint': Uses np.median(y - slope * x) as intercept.
+            * 'separate': Uses np.median(y) - slope * np.median(x)
+                          as intercept.
+
+        The default is 'separate'.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    result : ``TheilslopesResult`` instance
+        The return value is an object with the following attributes:
+
+        slope : float
+            Theil slope.
+        intercept : float
+            Intercept of the Theil line.
+        low_slope : float
+            Lower bound of the confidence interval on `slope`.
+        high_slope : float
+            Upper bound of the confidence interval on `slope`.
+
+    See Also
+    --------
+    siegelslopes : a similar technique using repeated medians
+
+    Notes
+    -----
+    The implementation of `theilslopes` follows [1]_. The intercept is
+    not defined in [1]_, and here it is defined as ``median(y) -
+    slope*median(x)``, which is given in [3]_. Other definitions of
+    the intercept exist in the literature such as  ``median(y - slope*x)``
+    in [4]_. The approach to compute the intercept can be determined by the
+    parameter ``method``. A confidence interval for the intercept is not
+    given as this question is not addressed in [1]_.
+
+    For compatibility with older versions of SciPy, the return value acts
+    like a ``namedtuple`` of length 4, with fields ``slope``, ``intercept``,
+    ``low_slope``, and ``high_slope``, so one can continue to write::
+
+        slope, intercept, low_slope, high_slope = theilslopes(y, x)
+
+    References
+    ----------
+    .. [1] P.K. Sen, "Estimates of the regression coefficient based on
+           Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
+    .. [2] H. Theil, "A rank-invariant method of linear and polynomial
+           regression analysis I, II and III",  Nederl. Akad. Wetensch., Proc.
+           53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
+    .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
+           John Wiley and Sons, New York, pp. 493.
+    .. [4] https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    >>> x = np.linspace(-5, 5, num=150)
+    >>> y = x + np.random.normal(size=x.size)
+    >>> y[11:15] += 10  # add outliers
+    >>> y[-5:] -= 7
+
+    Compute the slope, intercept and 90% confidence interval.  For comparison,
+    also compute the least-squares fit with `linregress`:
+
+    >>> res = stats.theilslopes(y, x, 0.90, method='separate')
+    >>> lsq_res = stats.linregress(x, y)
+
+    Plot the results. The Theil-Sen regression line is shown in red, with the
+    dashed red lines illustrating the confidence interval of the slope (note
+    that the dashed red lines are not the confidence interval of the regression
+    as the confidence interval of the intercept is not included). The green
+    line shows the least-squares fit for comparison.
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(x, y, 'b.')
+    >>> ax.plot(x, res[1] + res[0] * x, 'r-')
+    >>> ax.plot(x, res[1] + res[2] * x, 'r--')
+    >>> ax.plot(x, res[1] + res[3] * x, 'r--')
+    >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
+    >>> plt.show()
+
+    """
+    if method not in ['joint', 'separate']:
+        raise ValueError(("method must be either 'joint' or 'separate'."
+                          "'{}' is invalid.".format(method)))
+    # We copy both x and y so we can use _find_repeats.
+    y = np.array(y).flatten()
+    if x is None:
+        x = np.arange(len(y), dtype=float)
+    else:
+        x = np.array(x, dtype=float).flatten()
+        if len(x) != len(y):
+            raise ValueError("Incompatible lengths ! (%s<>%s)" %
+                             (len(y), len(x)))
+
+    # Compute sorted slopes only when deltax > 0
+    deltax = x[:, np.newaxis] - x
+    deltay = y[:, np.newaxis] - y
+    slopes = deltay[deltax > 0] / deltax[deltax > 0]
+    if not slopes.size:
+        msg = "All `x` coordinates are identical."
+        warnings.warn(msg, RuntimeWarning, stacklevel=2)
+    slopes.sort()
+    medslope = np.median(slopes)
+    if method == 'joint':
+        medinter = np.median(y - medslope * x)
+    else:
+        medinter = np.median(y) - medslope * np.median(x)
+    # Now compute confidence intervals
+    if alpha > 0.5:
+        alpha = 1. - alpha
+
+    z = distributions.norm.ppf(alpha / 2.)
+    # This implements (2.6) from Sen (1968)
+    _, nxreps = _find_repeats(x)
+    _, nyreps = _find_repeats(y)
+    nt = len(slopes)       # N in Sen (1968)
+    ny = len(y)            # n in Sen (1968)
+    # Equation 2.6 in Sen (1968):
+    sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
+                     sum(k * (k-1) * (2*k + 5) for k in nxreps) -
+                     sum(k * (k-1) * (2*k + 5) for k in nyreps))
+    # Find the confidence interval indices in `slopes`
+    try:
+        sigma = np.sqrt(sigsq)
+        Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
+        Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
+        delta = slopes[[Rl, Ru]]
+    except (ValueError, IndexError):
+        delta = (np.nan, np.nan)
+
+    return TheilslopesResult(slope=medslope, intercept=medinter,
+                             low_slope=delta[0], high_slope=delta[1])
+
+
+def _find_repeats(arr):
+    # This function assumes it may clobber its input.
+    if len(arr) == 0:
+        return np.array(0, np.float64), np.array(0, np.intp)
+
+    # XXX This cast was previously needed for the Fortran implementation,
+    # should we ditch it?
+    arr = np.asarray(arr, np.float64).ravel()
+    arr.sort()
+
+    # Taken from NumPy 1.9's np.unique.
+    change = np.concatenate(([True], arr[1:] != arr[:-1]))
+    unique = arr[change]
+    change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
+    freq = np.diff(change_idx)
+    atleast2 = freq > 1
+    return unique[atleast2], freq[atleast2]
+
+
+def siegelslopes(y, x=None, method="hierarchical"):
+    r"""
+    Computes the Siegel estimator for a set of points (x, y).
+
+    `siegelslopes` implements a method for robust linear regression
+    using repeated medians (see [1]_) to fit a line to the points (x, y).
+    The method is robust to outliers with an asymptotic breakdown point
+    of 50%.
+
+    Parameters
+    ----------
+    y : array_like
+        Dependent variable.
+    x : array_like or None, optional
+        Independent variable. If None, use ``arange(len(y))`` instead.
+    method : {'hierarchical', 'separate'}
+        If 'hierarchical', estimate the intercept using the estimated
+        slope ``slope`` (default option).
+        If 'separate', estimate the intercept independent of the estimated
+        slope. See Notes for details.
+
+    Returns
+    -------
+    result : ``SiegelslopesResult`` instance
+        The return value is an object with the following attributes:
+
+        slope : float
+            Estimate of the slope of the regression line.
+        intercept : float
+            Estimate of the intercept of the regression line.
+
+    See Also
+    --------
+    theilslopes : a similar technique without repeated medians
+
+    Notes
+    -----
+    With ``n = len(y)``, compute ``m_j`` as the median of
+    the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.
+    ``slope`` is then the median of all slopes ``m_j``.
+    Two ways are given to estimate the intercept in [1]_ which can be chosen
+    via the parameter ``method``.
+    The hierarchical approach uses the estimated slope ``slope``
+    and computes ``intercept`` as the median of ``y - slope*x``.
+    The other approach estimates the intercept separately as follows: for
+    each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`
+    lines through the remaining points and take the median ``i_j``.
+    ``intercept`` is the median of the ``i_j``.
+
+    The implementation computes `n` times the median of a vector of size `n`
+    which can be slow for large vectors. There are more efficient algorithms
+    (see [2]_) which are not implemented here.
+
+    For compatibility with older versions of SciPy, the return value acts
+    like a ``namedtuple`` of length 2, with fields ``slope`` and
+    ``intercept``, so one can continue to write::
+
+        slope, intercept = siegelslopes(y, x)
+
+    References
+    ----------
+    .. [1] A. Siegel, "Robust Regression Using Repeated Medians",
+           Biometrika, Vol. 69, pp. 242-244, 1982.
+
+    .. [2] A. Stein and M. Werman, "Finding the repeated median regression
+           line", Proceedings of the Third Annual ACM-SIAM Symposium on
+           Discrete Algorithms, pp. 409-413, 1992.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> import matplotlib.pyplot as plt
+
+    >>> x = np.linspace(-5, 5, num=150)
+    >>> y = x + np.random.normal(size=x.size)
+    >>> y[11:15] += 10  # add outliers
+    >>> y[-5:] -= 7
+
+    Compute the slope and intercept.  For comparison, also compute the
+    least-squares fit with `linregress`:
+
+    >>> res = stats.siegelslopes(y, x)
+    >>> lsq_res = stats.linregress(x, y)
+
+    Plot the results. The Siegel regression line is shown in red. The green
+    line shows the least-squares fit for comparison.
+
+    >>> fig = plt.figure()
+    >>> ax = fig.add_subplot(111)
+    >>> ax.plot(x, y, 'b.')
+    >>> ax.plot(x, res[1] + res[0] * x, 'r-')
+    >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
+    >>> plt.show()
+
+    """
+    if method not in ['hierarchical', 'separate']:
+        raise ValueError("method can only be 'hierarchical' or 'separate'")
+    y = np.asarray(y).ravel()
+    if x is None:
+        x = np.arange(len(y), dtype=float)
+    else:
+        x = np.asarray(x, dtype=float).ravel()
+        if len(x) != len(y):
+            raise ValueError("Incompatible lengths ! (%s<>%s)" %
+                             (len(y), len(x)))
+    dtype = np.result_type(x, y, np.float32)  # use at least float32
+    y, x = y.astype(dtype), x.astype(dtype)
+    medslope, medinter = siegelslopes_pythran(y, x, method)
+    return SiegelslopesResult(slope=medslope, intercept=medinter)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_stats_py.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_stats_py.py
new file mode 100644
index 00000000..d72f6af3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_stats_py.py
@@ -0,0 +1,9662 @@
+# Copyright 2002 Gary Strangman.  All rights reserved
+# Copyright 2002-2016 The SciPy Developers
+#
+# The original code from Gary Strangman was heavily adapted for
+# use in SciPy by Travis Oliphant.  The original code came with the
+# following disclaimer:
+#
+# This software is provided "as-is".  There are no expressed or implied
+# warranties of any kind, including, but not limited to, the warranties
+# of merchantability and fitness for a given application.  In no event
+# shall Gary Strangman be liable for any direct, indirect, incidental,
+# special, exemplary or consequential damages (including, but not limited
+# to, loss of use, data or profits, or business interruption) however
+# caused and on any theory of liability, whether in contract, strict
+# liability or tort (including negligence or otherwise) arising in any way
+# out of the use of this software, even if advised of the possibility of
+# such damage.
+
+"""
+A collection of basic statistical functions for Python.
+
+References
+----------
+.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
+   Probability and Statistics Tables and Formulae. Chapman & Hall: New
+   York. 2000.
+
+"""
+import warnings
+import math
+from math import gcd
+from collections import namedtuple, Counter
+
+import numpy as np
+from numpy import array, asarray, ma
+from numpy.lib import NumpyVersion
+from numpy.testing import suppress_warnings
+
+from scipy.spatial.distance import cdist
+from scipy.ndimage import _measurements
+from scipy._lib._util import (check_random_state, MapWrapper,
+                              rng_integers, _rename_parameter, _contains_nan)
+
+import scipy.special as special
+from scipy import linalg
+from . import distributions
+from . import _mstats_basic as mstats_basic
+from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
+                                   siegelslopes)
+from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
+                     _local_correlations)
+from dataclasses import make_dataclass
+from ._hypotests import _all_partitions
+from ._stats_pythran import _compute_outer_prob_inside_method
+from ._resampling import _batch_generator
+from ._axis_nan_policy import (_axis_nan_policy_factory,
+                               _broadcast_concatenate)
+from ._binomtest import _binary_search_for_binom_tst as _binary_search
+from scipy._lib._bunch import _make_tuple_bunch
+from scipy import stats
+from scipy.optimize import root_scalar
+
+
+# Functions/classes in other files should be added in `__init__.py`, not here
+__all__ = ['find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar',
+           'tmin', 'tmax', 'tstd', 'tsem', 'moment',
+           'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
+           'normaltest', 'jarque_bera',
+           'scoreatpercentile', 'percentileofscore',
+           'cumfreq', 'relfreq', 'obrientransform',
+           'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd',
+           'median_abs_deviation',
+           'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
+           'f_oneway', 'pearsonr', 'fisher_exact',
+           'spearmanr', 'pointbiserialr',
+           'kendalltau', 'weightedtau', 'multiscale_graphcorr',
+           'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
+           'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
+           'kstest', 'ks_1samp', 'ks_2samp',
+           'chisquare', 'power_divergence',
+           'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
+           'rankdata',
+           'combine_pvalues', 'wasserstein_distance', 'energy_distance',
+           'brunnermunzel', 'alexandergovern',
+           'expectile', ]
+
+
+def _chk_asarray(a, axis):
+    if axis is None:
+        a = np.ravel(a)
+        outaxis = 0
+    else:
+        a = np.asarray(a)
+        outaxis = axis
+
+    if a.ndim == 0:
+        a = np.atleast_1d(a)
+
+    return a, outaxis
+
+
+def _chk2_asarray(a, b, axis):
+    if axis is None:
+        a = np.ravel(a)
+        b = np.ravel(b)
+        outaxis = 0
+    else:
+        a = np.asarray(a)
+        b = np.asarray(b)
+        outaxis = axis
+
+    if a.ndim == 0:
+        a = np.atleast_1d(a)
+    if b.ndim == 0:
+        b = np.atleast_1d(b)
+
+    return a, b, outaxis
+
+
+def _shape_with_dropped_axis(a, axis):
+    """
+    Given an array `a` and an integer `axis`, return the shape
+    of `a` with the `axis` dimension removed.
+
+    Examples
+    --------
+    >>> a = np.zeros((3, 5, 2))
+    >>> _shape_with_dropped_axis(a, 1)
+    (3, 2)
+
+    """
+    shp = list(a.shape)
+    try:
+        del shp[axis]
+    except IndexError:
+        raise np.AxisError(axis, a.ndim) from None
+    return tuple(shp)
+
+
+def _broadcast_shapes(shape1, shape2):
+    """
+    Given two shapes (i.e. tuples of integers), return the shape
+    that would result from broadcasting two arrays with the given
+    shapes.
+
+    Examples
+    --------
+    >>> _broadcast_shapes((2, 1), (4, 1, 3))
+    (4, 2, 3)
+    """
+    d = len(shape1) - len(shape2)
+    if d <= 0:
+        shp1 = (1,)*(-d) + shape1
+        shp2 = shape2
+    else:
+        shp1 = shape1
+        shp2 = (1,)*d + shape2
+    shape = []
+    for n1, n2 in zip(shp1, shp2):
+        if n1 == 1:
+            n = n2
+        elif n2 == 1 or n1 == n2:
+            n = n1
+        else:
+            raise ValueError(f'shapes {shape1} and {shape2} could not be '
+                             'broadcast together')
+        shape.append(n)
+    return tuple(shape)
+
+
+def _broadcast_shapes_with_dropped_axis(a, b, axis):
+    """
+    Given two arrays `a` and `b` and an integer `axis`, find the
+    shape of the broadcast result after dropping `axis` from the
+    shapes of `a` and `b`.
+
+    Examples
+    --------
+    >>> a = np.zeros((5, 2, 1))
+    >>> b = np.zeros((1, 9, 3))
+    >>> _broadcast_shapes_with_dropped_axis(a, b, 1)
+    (5, 3)
+    """
+    shp1 = _shape_with_dropped_axis(a, axis)
+    shp2 = _shape_with_dropped_axis(b, axis)
+    try:
+        shp = _broadcast_shapes(shp1, shp2)
+    except ValueError:
+        raise ValueError(f'non-axis shapes {shp1} and {shp2} could not be '
+                         'broadcast together') from None
+    return shp
+
+
+SignificanceResult = _make_tuple_bunch('SignificanceResult',
+                                       ['statistic', 'pvalue'], [])
+
+
+# note that `weights` are paired with `x`
+@_axis_nan_policy_factory(
+        lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
+        result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
+def gmean(a, axis=0, dtype=None, weights=None):
+    r"""Compute the weighted geometric mean along the specified axis.
+
+    The weighted geometric mean of the array :math:`a_i` associated to weights
+    :math:`w_i` is:
+
+    .. math::
+
+        \exp \left( \frac{ \sum_{i=1}^n w_i \ln a_i }{ \sum_{i=1}^n w_i }
+                   \right) \, ,
+
+    and, with equal weights, it gives:
+
+    .. math::
+
+        \sqrt[n]{ \prod_{i=1}^n a_i } \, .
+
+    Parameters
+    ----------
+    a : array_like
+        Input array or object that can be converted to an array.
+    axis : int or None, optional
+        Axis along which the geometric mean is computed. Default is 0.
+        If None, compute over the whole array `a`.
+    dtype : dtype, optional
+        Type to which the input arrays are cast before the calculation is
+        performed.
+    weights : array_like, optional
+        The `weights` array must be broadcastable to the same shape as `a`.
+        Default is None, which gives each value a weight of 1.0.
+
+    Returns
+    -------
+    gmean : ndarray
+        See `dtype` parameter above.
+
+    See Also
+    --------
+    numpy.mean : Arithmetic average
+    numpy.average : Weighted average
+    hmean : Harmonic mean
+
+    References
+    ----------
+    .. [1] "Weighted Geometric Mean", *Wikipedia*,
+           https://en.wikipedia.org/wiki/Weighted_geometric_mean.
+
+    Examples
+    --------
+    >>> from scipy.stats import gmean
+    >>> gmean([1, 4])
+    2.0
+    >>> gmean([1, 2, 3, 4, 5, 6, 7])
+    3.3800151591412964
+    >>> gmean([1, 4, 7], weights=[3, 1, 3])
+    2.80668351922014
+
+    """
+
+    a = np.asarray(a, dtype=dtype)
+
+    if weights is not None:
+        weights = np.asarray(weights, dtype=dtype)
+
+    with np.errstate(divide='ignore'):
+        log_a = np.log(a)
+
+    return np.exp(np.average(log_a, axis=axis, weights=weights))
+
+
+@_axis_nan_policy_factory(
+        lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
+        result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
+def hmean(a, axis=0, dtype=None, *, weights=None):
+    r"""Calculate the weighted harmonic mean along the specified axis.
+
+    The weighted harmonic mean of the array :math:`a_i` associated to weights
+    :math:`w_i` is:
+
+    .. math::
+
+        \frac{ \sum_{i=1}^n w_i }{ \sum_{i=1}^n \frac{w_i}{a_i} } \, ,
+
+    and, with equal weights, it gives:
+
+    .. math::
+
+        \frac{ n }{ \sum_{i=1}^n \frac{1}{a_i} } \, .
+
+    Parameters
+    ----------
+    a : array_like
+        Input array, masked array or object that can be converted to an array.
+    axis : int or None, optional
+        Axis along which the harmonic mean is computed. Default is 0.
+        If None, compute over the whole array `a`.
+    dtype : dtype, optional
+        Type of the returned array and of the accumulator in which the
+        elements are summed. If `dtype` is not specified, it defaults to the
+        dtype of `a`, unless `a` has an integer `dtype` with a precision less
+        than that of the default platform integer. In that case, the default
+        platform integer is used.
+    weights : array_like, optional
+        The weights array can either be 1-D (in which case its length must be
+        the size of `a` along the given `axis`) or of the same shape as `a`.
+        Default is None, which gives each value a weight of 1.0.
+
+        .. versionadded:: 1.9
+
+    Returns
+    -------
+    hmean : ndarray
+        See `dtype` parameter above.
+
+    See Also
+    --------
+    numpy.mean : Arithmetic average
+    numpy.average : Weighted average
+    gmean : Geometric mean
+
+    Notes
+    -----
+    The harmonic mean is computed over a single dimension of the input
+    array, axis=0 by default, or all values in the array if axis=None.
+    float64 intermediate and return values are used for integer inputs.
+
+    References
+    ----------
+    .. [1] "Weighted Harmonic Mean", *Wikipedia*,
+           https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean
+    .. [2] Ferger, F., "The nature and use of the harmonic mean", Journal of
+           the American Statistical Association, vol. 26, pp. 36-40, 1931
+
+    Examples
+    --------
+    >>> from scipy.stats import hmean
+    >>> hmean([1, 4])
+    1.6000000000000001
+    >>> hmean([1, 2, 3, 4, 5, 6, 7])
+    2.6997245179063363
+    >>> hmean([1, 4, 7], weights=[3, 1, 3])
+    1.9029126213592233
+
+    """
+    if not isinstance(a, np.ndarray):
+        a = np.array(a, dtype=dtype)
+    elif dtype:
+        # Must change the default dtype allowing array type
+        if isinstance(a, np.ma.MaskedArray):
+            a = np.ma.asarray(a, dtype=dtype)
+        else:
+            a = np.asarray(a, dtype=dtype)
+
+    if np.all(a >= 0):
+        # Harmonic mean only defined if greater than or equal to zero.
+        if weights is not None:
+            weights = np.asanyarray(weights, dtype=dtype)
+
+        with np.errstate(divide='ignore'):
+            return 1.0 / np.average(1.0 / a, axis=axis, weights=weights)
+    else:
+        raise ValueError("Harmonic mean only defined if all elements greater "
+                         "than or equal to zero")
+
+
+@_axis_nan_policy_factory(
+        lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
+        result_to_tuple=lambda x: (x,), kwd_samples=['weights'])
+def pmean(a, p, *, axis=0, dtype=None, weights=None):
+    r"""Calculate the weighted power mean along the specified axis.
+
+    The weighted power mean of the array :math:`a_i` associated to weights
+    :math:`w_i` is:
+
+    .. math::
+
+        \left( \frac{ \sum_{i=1}^n w_i a_i^p }{ \sum_{i=1}^n w_i }
+              \right)^{ 1 / p } \, ,
+
+    and, with equal weights, it gives:
+
+    .. math::
+
+        \left( \frac{ 1 }{ n } \sum_{i=1}^n a_i^p \right)^{ 1 / p }  \, .
+
+    This mean is also called generalized mean or Hölder mean, and must not be
+    confused with the Kolmogorov generalized mean, also called
+    quasi-arithmetic mean or generalized f-mean [3]_.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array, masked array or object that can be converted to an array.
+    p : int or float
+        Exponent.
+    axis : int or None, optional
+        Axis along which the power mean is computed. Default is 0.
+        If None, compute over the whole array `a`.
+    dtype : dtype, optional
+        Type of the returned array and of the accumulator in which the
+        elements are summed. If `dtype` is not specified, it defaults to the
+        dtype of `a`, unless `a` has an integer `dtype` with a precision less
+        than that of the default platform integer. In that case, the default
+        platform integer is used.
+    weights : array_like, optional
+        The weights array can either be 1-D (in which case its length must be
+        the size of `a` along the given `axis`) or of the same shape as `a`.
+        Default is None, which gives each value a weight of 1.0.
+
+    Returns
+    -------
+    pmean : ndarray, see `dtype` parameter above.
+        Output array containing the power mean values.
+
+    See Also
+    --------
+    numpy.average : Weighted average
+    gmean : Geometric mean
+    hmean : Harmonic mean
+
+    Notes
+    -----
+    The power mean is computed over a single dimension of the input
+    array, ``axis=0`` by default, or all values in the array if ``axis=None``.
+    float64 intermediate and return values are used for integer inputs.
+
+    .. versionadded:: 1.9
+
+    References
+    ----------
+    .. [1] "Generalized Mean", *Wikipedia*,
+           https://en.wikipedia.org/wiki/Generalized_mean
+    .. [2] Norris, N., "Convexity properties of generalized mean value
+           functions", The Annals of Mathematical Statistics, vol. 8,
+           pp. 118-120, 1937
+    .. [3] Bullen, P.S., Handbook of Means and Their Inequalities, 2003
+
+    Examples
+    --------
+    >>> from scipy.stats import pmean, hmean, gmean
+    >>> pmean([1, 4], 1.3)
+    2.639372938300652
+    >>> pmean([1, 2, 3, 4, 5, 6, 7], 1.3)
+    4.157111214492084
+    >>> pmean([1, 4, 7], -2, weights=[3, 1, 3])
+    1.4969684896631954
+
+    For p=-1, power mean is equal to harmonic mean:
+
+    >>> pmean([1, 4, 7], -1, weights=[3, 1, 3])
+    1.9029126213592233
+    >>> hmean([1, 4, 7], weights=[3, 1, 3])
+    1.9029126213592233
+
+    For p=0, power mean is defined as the geometric mean:
+
+    >>> pmean([1, 4, 7], 0, weights=[3, 1, 3])
+    2.80668351922014
+    >>> gmean([1, 4, 7], weights=[3, 1, 3])
+    2.80668351922014
+
+    """
+    if not isinstance(p, (int, float)):
+        raise ValueError("Power mean only defined for exponent of type int or "
+                         "float.")
+    if p == 0:
+        return gmean(a, axis=axis, dtype=dtype, weights=weights)
+
+    if not isinstance(a, np.ndarray):
+        a = np.array(a, dtype=dtype)
+    elif dtype:
+        # Must change the default dtype allowing array type
+        if isinstance(a, np.ma.MaskedArray):
+            a = np.ma.asarray(a, dtype=dtype)
+        else:
+            a = np.asarray(a, dtype=dtype)
+
+    if np.all(a >= 0):
+        # Power mean only defined if greater than or equal to zero
+        if weights is not None:
+            weights = np.asanyarray(weights, dtype=dtype)
+
+        with np.errstate(divide='ignore'):
+            return np.float_power(
+                np.average(np.float_power(a, p), axis=axis, weights=weights),
+                1/p)
+    else:
+        raise ValueError("Power mean only defined if all elements greater "
+                         "than or equal to zero")
+
+
+ModeResult = namedtuple('ModeResult', ('mode', 'count'))
+
+
+def mode(a, axis=0, nan_policy='propagate', keepdims=None):
+    r"""Return an array of the modal (most common) value in the passed array.
+
+    If there is more than one such value, only one is returned.
+    The bin-count for the modal bins is also returned.
+
+    Parameters
+    ----------
+    a : array_like
+        n-dimensional array of which to find mode(s).
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over
+        the whole array `a`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': treats nan as it would treat any other value
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+    keepdims : bool, optional
+        If set to ``False``, the `axis` over which the statistic is taken
+        is consumed (eliminated from the output array) like other reduction
+        functions (e.g. `skew`, `kurtosis`). If set to ``True``, the `axis` is
+        retained with size one, and the result will broadcast correctly
+        against the input array. The default, ``None``, is undefined legacy
+        behavior retained for backward compatibility.
+
+        .. warning::
+            Unlike other reduction functions (e.g. `skew`, `kurtosis`), the
+            default behavior of `mode` usually retains the axis it acts
+            along. In SciPy 1.11.0, this behavior will change: the default
+            value of `keepdims` will become ``False``, the `axis` over which
+            the statistic is taken will be eliminated, and the value ``None``
+            will no longer be accepted.
+        .. versionadded:: 1.9.0
+
+    Returns
+    -------
+    mode : ndarray
+        Array of modal values.
+    count : ndarray
+        Array of counts for each mode.
+
+    Notes
+    -----
+    The mode of object arrays is calculated using `collections.Counter`, which
+    treats NaNs with different binary representations as distinct.
+
+    .. deprecated:: 1.9.0
+        Support for non-numeric arrays has been deprecated as of SciPy 1.9.0
+        and will be removed in 1.11.0. `pandas.DataFrame.mode`_ can
+        be used instead.
+
+        .. _pandas.DataFrame.mode: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.mode.html
+
+    The mode of arrays with other dtypes is calculated using `numpy.unique`.
+    In NumPy versions 1.21 and after, all NaNs - even those with different
+    binary representations - are treated as equivalent and counted as separate
+    instances of the same value.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([[3, 0, 3, 7],
+    ...               [3, 2, 6, 2],
+    ...               [1, 7, 2, 8],
+    ...               [3, 0, 6, 1],
+    ...               [3, 2, 5, 5]])
+    >>> from scipy import stats
+    >>> stats.mode(a, keepdims=True)
+    ModeResult(mode=array([[3, 0, 6, 1]]), count=array([[4, 2, 2, 1]]))
+
+    To get mode of whole array, specify ``axis=None``:
+
+    >>> stats.mode(a, axis=None, keepdims=True)
+    ModeResult(mode=[3], count=[5])
+    >>> stats.mode(a, axis=None, keepdims=False)
+    ModeResult(mode=3, count=5)
+
+    """  # noqa: E501
+
+    if keepdims is None:
+        message = ("Unlike other reduction functions (e.g. `skew`, "
+                   "`kurtosis`), the default behavior of `mode` typically "
+                   "preserves the axis it acts along. In SciPy 1.11.0, "
+                   "this behavior will change: the default value of "
+                   "`keepdims` will become False, the `axis` over which "
+                   "the statistic is taken will be eliminated, and the value "
+                   "None will no longer be accepted. "
+                   "Set `keepdims` to True or False to avoid this warning.")
+        warnings.warn(message, FutureWarning, stacklevel=2)
+
+    a = np.asarray(a)
+    if a.size == 0:
+        if keepdims is None:
+            return ModeResult(np.array([]), np.array([]))
+        else:
+            # this is tricky to get right; let np.mean do it
+            out = np.mean(a, axis=axis, keepdims=keepdims)
+            return ModeResult(out, out.copy())
+
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic._mode(a, axis, keepdims=keepdims)
+
+    if not np.issubdtype(a.dtype, np.number):
+        warnings.warn("Support for non-numeric arrays has been deprecated "
+                      "as of SciPy 1.9.0 and will be removed in "
+                      "1.11.0. `pandas.DataFrame.mode` can be used instead, "
+                      "see https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.mode.html.",  # noqa: E501
+                      DeprecationWarning, stacklevel=2)
+
+    if a.dtype == object:
+        def _mode1D(a):
+            cntr = Counter(a)
+            mode = max(cntr, key=lambda x: cntr[x])
+            return mode, cntr[mode]
+    else:
+        def _mode1D(a):
+            vals, cnts = np.unique(a, return_counts=True)
+            return vals[cnts.argmax()], cnts.max()
+
+    # np.apply_along_axis will convert the _mode1D tuples to a numpy array,
+    # casting types in the process.
+    # This recreates the results without that issue
+    # View of a, rotated so the requested axis is last
+    a_view = np.moveaxis(a, axis, -1)
+
+    inds = np.ndindex(a_view.shape[:-1])
+    modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
+    counts = np.empty(a_view.shape[:-1], dtype=np.int_)
+    for ind in inds:
+        modes[ind], counts[ind] = _mode1D(a_view[ind])
+
+    if keepdims is None or keepdims:
+        newshape = list(a.shape)
+        newshape[axis] = 1
+        return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
+    else:
+        return ModeResult(modes[()], counts[()])
+
+
+def _mask_to_limits(a, limits, inclusive):
+    """Mask an array for values outside of given limits.
+
+    This is primarily a utility function.
+
+    Parameters
+    ----------
+    a : array
+    limits : (float or None, float or None)
+        A tuple consisting of the (lower limit, upper limit).  Values in the
+        input array less than the lower limit or greater than the upper limit
+        will be masked out. None implies no limit.
+    inclusive : (bool, bool)
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to lower or upper are allowed.
+
+    Returns
+    -------
+    A MaskedArray.
+
+    Raises
+    ------
+    A ValueError if there are no values within the given limits.
+
+    """
+    lower_limit, upper_limit = limits
+    lower_include, upper_include = inclusive
+    am = ma.MaskedArray(a)
+    if lower_limit is not None:
+        if lower_include:
+            am = ma.masked_less(am, lower_limit)
+        else:
+            am = ma.masked_less_equal(am, lower_limit)
+
+    if upper_limit is not None:
+        if upper_include:
+            am = ma.masked_greater(am, upper_limit)
+        else:
+            am = ma.masked_greater_equal(am, upper_limit)
+
+    if am.count() == 0:
+        raise ValueError("No array values within given limits")
+
+    return am
+
+
+def tmean(a, limits=None, inclusive=(True, True), axis=None):
+    """Compute the trimmed mean.
+
+    This function finds the arithmetic mean of given values, ignoring values
+    outside the given `limits`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    limits : None or (lower limit, upper limit), optional
+        Values in the input array less than the lower limit or greater than the
+        upper limit will be ignored.  When limits is None (default), then all
+        values are used.  Either of the limit values in the tuple can also be
+        None representing a half-open interval.
+    inclusive : (bool, bool), optional
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to the lower or upper limits
+        are included.  The default value is (True, True).
+    axis : int or None, optional
+        Axis along which to compute test. Default is None.
+
+    Returns
+    -------
+    tmean : ndarray
+        Trimmed mean.
+
+    See Also
+    --------
+    trim_mean : Returns mean after trimming a proportion from both tails.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = np.arange(20)
+    >>> stats.tmean(x)
+    9.5
+    >>> stats.tmean(x, (3,17))
+    10.0
+
+    """
+    a = asarray(a)
+    if limits is None:
+        return np.mean(a, axis)
+    am = _mask_to_limits(a, limits, inclusive)
+    mean = np.ma.filled(am.mean(axis=axis), fill_value=np.nan)
+    return mean if mean.ndim > 0 else mean.item()
+
+
+def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
+    """Compute the trimmed variance.
+
+    This function computes the sample variance of an array of values,
+    while ignoring values which are outside of given `limits`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    limits : None or (lower limit, upper limit), optional
+        Values in the input array less than the lower limit or greater than the
+        upper limit will be ignored. When limits is None, then all values are
+        used. Either of the limit values in the tuple can also be None
+        representing a half-open interval.  The default value is None.
+    inclusive : (bool, bool), optional
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to the lower or upper limits
+        are included.  The default value is (True, True).
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over the
+        whole array `a`.
+    ddof : int, optional
+        Delta degrees of freedom.  Default is 1.
+
+    Returns
+    -------
+    tvar : float
+        Trimmed variance.
+
+    Notes
+    -----
+    `tvar` computes the unbiased sample variance, i.e. it uses a correction
+    factor ``n / (n - 1)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = np.arange(20)
+    >>> stats.tvar(x)
+    35.0
+    >>> stats.tvar(x, (3,17))
+    20.0
+
+    """
+    a = asarray(a)
+    a = a.astype(float)
+    if limits is None:
+        return a.var(ddof=ddof, axis=axis)
+    am = _mask_to_limits(a, limits, inclusive)
+    amnan = am.filled(fill_value=np.nan)
+    return np.nanvar(amnan, ddof=ddof, axis=axis)
+
+
+def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
+    """Compute the trimmed minimum.
+
+    This function finds the miminum value of an array `a` along the
+    specified axis, but only considering values greater than a specified
+    lower limit.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    lowerlimit : None or float, optional
+        Values in the input array less than the given limit will be ignored.
+        When lowerlimit is None, then all values are used. The default value
+        is None.
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over the
+        whole array `a`.
+    inclusive : {True, False}, optional
+        This flag determines whether values exactly equal to the lower limit
+        are included.  The default value is True.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    tmin : float, int or ndarray
+        Trimmed minimum.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = np.arange(20)
+    >>> stats.tmin(x)
+    0
+
+    >>> stats.tmin(x, 13)
+    13
+
+    >>> stats.tmin(x, 13, inclusive=False)
+    14
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
+
+    contains_nan, nan_policy = _contains_nan(am, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        am = ma.masked_invalid(am)
+
+    res = ma.minimum.reduce(am, axis).data
+    if res.ndim == 0:
+        return res[()]
+    return res
+
+
+def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
+    """Compute the trimmed maximum.
+
+    This function computes the maximum value of an array along a given axis,
+    while ignoring values larger than a specified upper limit.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    upperlimit : None or float, optional
+        Values in the input array greater than the given limit will be ignored.
+        When upperlimit is None, then all values are used. The default value
+        is None.
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over the
+        whole array `a`.
+    inclusive : {True, False}, optional
+        This flag determines whether values exactly equal to the upper limit
+        are included.  The default value is True.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    tmax : float, int or ndarray
+        Trimmed maximum.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = np.arange(20)
+    >>> stats.tmax(x)
+    19
+
+    >>> stats.tmax(x, 13)
+    13
+
+    >>> stats.tmax(x, 13, inclusive=False)
+    12
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
+
+    contains_nan, nan_policy = _contains_nan(am, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        am = ma.masked_invalid(am)
+
+    res = ma.maximum.reduce(am, axis).data
+    if res.ndim == 0:
+        return res[()]
+    return res
+
+
+def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
+    """Compute the trimmed sample standard deviation.
+
+    This function finds the sample standard deviation of given values,
+    ignoring values outside the given `limits`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    limits : None or (lower limit, upper limit), optional
+        Values in the input array less than the lower limit or greater than the
+        upper limit will be ignored. When limits is None, then all values are
+        used. Either of the limit values in the tuple can also be None
+        representing a half-open interval.  The default value is None.
+    inclusive : (bool, bool), optional
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to the lower or upper limits
+        are included.  The default value is (True, True).
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over the
+        whole array `a`.
+    ddof : int, optional
+        Delta degrees of freedom.  Default is 1.
+
+    Returns
+    -------
+    tstd : float
+        Trimmed sample standard deviation.
+
+    Notes
+    -----
+    `tstd` computes the unbiased sample standard deviation, i.e. it uses a
+    correction factor ``n / (n - 1)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = np.arange(20)
+    >>> stats.tstd(x)
+    5.9160797830996161
+    >>> stats.tstd(x, (3,17))
+    4.4721359549995796
+
+    """
+    return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
+
+
+def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
+    """Compute the trimmed standard error of the mean.
+
+    This function finds the standard error of the mean for given
+    values, ignoring values outside the given `limits`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of values.
+    limits : None or (lower limit, upper limit), optional
+        Values in the input array less than the lower limit or greater than the
+        upper limit will be ignored. When limits is None, then all values are
+        used. Either of the limit values in the tuple can also be None
+        representing a half-open interval.  The default value is None.
+    inclusive : (bool, bool), optional
+        A tuple consisting of the (lower flag, upper flag).  These flags
+        determine whether values exactly equal to the lower or upper limits
+        are included.  The default value is (True, True).
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over the
+        whole array `a`.
+    ddof : int, optional
+        Delta degrees of freedom.  Default is 1.
+
+    Returns
+    -------
+    tsem : float
+        Trimmed standard error of the mean.
+
+    Notes
+    -----
+    `tsem` uses unbiased sample standard deviation, i.e. it uses a
+    correction factor ``n / (n - 1)``.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = np.arange(20)
+    >>> stats.tsem(x)
+    1.3228756555322954
+    >>> stats.tsem(x, (3,17))
+    1.1547005383792515
+
+    """
+    a = np.asarray(a).ravel()
+    if limits is None:
+        return a.std(ddof=ddof) / np.sqrt(a.size)
+
+    am = _mask_to_limits(a, limits, inclusive)
+    sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
+    return sd / np.sqrt(am.count())
+
+
+#####################################
+#              MOMENTS              #
+#####################################
+
+
+def _moment_outputs(kwds):
+    moment = np.atleast_1d(kwds.get('moment', 1))
+    if moment.size == 0:
+        raise ValueError("'moment' must be a scalar or a non-empty 1D "
+                         "list/array.")
+    return len(moment)
+
+
+def _moment_result_object(*args):
+    if len(args) == 1:
+        return args[0]
+    return np.asarray(args)
+
+# `moment` fits into the `_axis_nan_policy` pattern, but it is a bit unusual
+# because the number of outputs is variable. Specifically,
+# `result_to_tuple=lambda x: (x,)` may be surprising for a function that
+# can produce more than one output, but it is intended here.
+# When `moment is called to produce the output:
+# - `result_to_tuple` packs the returned array into a single-element tuple,
+# - `_moment_result_object` extracts and returns that single element.
+# However, when the input array is empty, `moment` is never called. Instead,
+# - `_check_empty_inputs` is used to produce an empty array with the
+#   appropriate dimensions.
+# - A list comprehension creates the appropriate number of copies of this
+#   array, depending on `n_outputs`.
+# - This list - which may have multiple elements - is passed into
+#   `_moment_result_object`.
+# - If there is a single output, `_moment_result_object` extracts and returns
+#   the single output from the list.
+# - If there are multiple outputs, and therefore multiple elements in the list,
+#   `_moment_result_object` converts the list of arrays to a single array and
+#   returns it.
+# Currently this leads to a slight inconsistency: when the input array is
+# empty, there is no distinction between the `moment` function being called
+# with parameter `moments=1` and `moments=[1]`; the latter *should* produce
+# the same as the former but with a singleton zeroth dimension.
+@_axis_nan_policy_factory(  # noqa: E302
+    _moment_result_object, n_samples=1, result_to_tuple=lambda x: (x,),
+    n_outputs=_moment_outputs
+)
+def moment(a, moment=1, axis=0, nan_policy='propagate'):
+    r"""Calculate the nth moment about the mean for a sample.
+
+    A moment is a specific quantitative measure of the shape of a set of
+    points. It is often used to calculate coefficients of skewness and kurtosis
+    due to its close relationship with them.
+
+    Parameters
+    ----------
+    a : array_like
+       Input array.
+    moment : int or array_like of ints, optional
+       Order of central moment that is returned. Default is 1.
+    axis : int or None, optional
+       Axis along which the central moment is computed. Default is 0.
+       If None, compute over the whole array `a`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    n-th central moment : ndarray or float
+       The appropriate moment along the given axis or over all values if axis
+       is None. The denominator for the moment calculation is the number of
+       observations, no degrees of freedom correction is done.
+
+    See Also
+    --------
+    kurtosis, skew, describe
+
+    Notes
+    -----
+    The k-th central moment of a data sample is:
+
+    .. math::
+
+        m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
+
+    Where n is the number of samples and x-bar is the mean. This function uses
+    exponentiation by squares [1]_ for efficiency.
+
+    Note that, if `a` is an empty array (``a.size == 0``), array `moment` with
+    one element (`moment.size == 1`) is treated the same as scalar `moment`
+    (``np.isscalar(moment)``). This might produce arrays of unexpected shape.
+
+    References
+    ----------
+    .. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
+
+    Examples
+    --------
+    >>> from scipy.stats import moment
+    >>> moment([1, 2, 3, 4, 5], moment=1)
+    0.0
+    >>> moment([1, 2, 3, 4, 5], moment=2)
+    2.0
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.moment(a, moment, axis)
+
+    # for array_like moment input, return a value for each.
+    if not np.isscalar(moment):
+        mean = a.mean(axis, keepdims=True)
+        mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
+        return np.array(mmnt)
+    else:
+        return _moment(a, moment, axis)
+
+
+# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
+def _moment(a, moment, axis, *, mean=None):
+    if np.abs(moment - np.round(moment)) > 0:
+        raise ValueError("All moment parameters must be integers")
+
+    # moment of empty array is the same regardless of order
+    if a.size == 0:
+        return np.mean(a, axis=axis)
+
+    if moment == 0 or moment == 1:
+        # By definition the zeroth moment about the mean is 1, and the first
+        # moment is 0.
+        shape = list(a.shape)
+        del shape[axis]
+        dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
+
+        if len(shape) == 0:
+            return dtype(1.0 if moment == 0 else 0.0)
+        else:
+            return (np.ones(shape, dtype=dtype) if moment == 0
+                    else np.zeros(shape, dtype=dtype))
+    else:
+        # Exponentiation by squares: form exponent sequence
+        n_list = [moment]
+        current_n = moment
+        while current_n > 2:
+            if current_n % 2:
+                current_n = (current_n - 1) / 2
+            else:
+                current_n /= 2
+            n_list.append(current_n)
+
+        # Starting point for exponentiation by squares
+        mean = a.mean(axis, keepdims=True) if mean is None else mean
+        a_zero_mean = a - mean
+
+        eps = np.finfo(a_zero_mean.dtype).resolution * 10
+        with np.errstate(divide='ignore', invalid='ignore'):
+            rel_diff = np.max(np.abs(a_zero_mean), axis=axis,
+                              keepdims=True) / np.abs(mean)
+        with np.errstate(invalid='ignore'):
+            precision_loss = np.any(rel_diff < eps)
+        if precision_loss:
+            message = ("Precision loss occurred in moment calculation due to "
+                       "catastrophic cancellation. This occurs when the data "
+                       "are nearly identical. Results may be unreliable.")
+            warnings.warn(message, RuntimeWarning, stacklevel=4)
+
+        if n_list[-1] == 1:
+            s = a_zero_mean.copy()
+        else:
+            s = a_zero_mean**2
+
+        # Perform multiplications
+        for n in n_list[-2::-1]:
+            s = s**2
+            if n % 2:
+                s *= a_zero_mean
+        return np.mean(s, axis)
+
+
+def _var(x, axis=0, ddof=0, mean=None):
+    # Calculate variance of sample, warning if precision is lost
+    var = _moment(x, 2, axis, mean=mean)
+    if ddof != 0:
+        n = x.shape[axis] if axis is not None else x.size
+        var *= np.divide(n, n-ddof)  # to avoid error on division by zero
+    return var
+
+
+@_axis_nan_policy_factory(
+    lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
+)
+def skew(a, axis=0, bias=True, nan_policy='propagate'):
+    r"""Compute the sample skewness of a data set.
+
+    For normally distributed data, the skewness should be about zero. For
+    unimodal continuous distributions, a skewness value greater than zero means
+    that there is more weight in the right tail of the distribution. The
+    function `skewtest` can be used to determine if the skewness value
+    is close enough to zero, statistically speaking.
+
+    Parameters
+    ----------
+    a : ndarray
+        Input array.
+    axis : int or None, optional
+        Axis along which skewness is calculated. Default is 0.
+        If None, compute over the whole array `a`.
+    bias : bool, optional
+        If False, then the calculations are corrected for statistical bias.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    skewness : ndarray
+        The skewness of values along an axis, returning NaN where all values
+        are equal.
+
+    Notes
+    -----
+    The sample skewness is computed as the Fisher-Pearson coefficient
+    of skewness, i.e.
+
+    .. math::
+
+        g_1=\frac{m_3}{m_2^{3/2}}
+
+    where
+
+    .. math::
+
+        m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
+
+    is the biased sample :math:`i\texttt{th}` central moment, and
+    :math:`\bar{x}` is
+    the sample mean.  If ``bias`` is False, the calculations are
+    corrected for bias and the value computed is the adjusted
+    Fisher-Pearson standardized moment coefficient, i.e.
+
+    .. math::
+
+        G_1=\frac{k_3}{k_2^{3/2}}=
+            \frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
+
+    References
+    ----------
+    .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
+       Probability and Statistics Tables and Formulae. Chapman & Hall: New
+       York. 2000.
+       Section 2.2.24.1
+
+    Examples
+    --------
+    >>> from scipy.stats import skew
+    >>> skew([1, 2, 3, 4, 5])
+    0.0
+    >>> skew([2, 8, 0, 4, 1, 9, 9, 0])
+    0.2650554122698573
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    n = a.shape[axis]
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.skew(a, axis, bias)
+
+    mean = a.mean(axis, keepdims=True)
+    m2 = _moment(a, 2, axis, mean=mean)
+    m3 = _moment(a, 3, axis, mean=mean)
+    with np.errstate(all='ignore'):
+        zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
+        vals = np.where(zero, np.nan, m3 / m2**1.5)
+    if not bias:
+        can_correct = ~zero & (n > 2)
+        if can_correct.any():
+            m2 = np.extract(can_correct, m2)
+            m3 = np.extract(can_correct, m3)
+            nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
+            np.place(vals, can_correct, nval)
+
+    if vals.ndim == 0:
+        return vals.item()
+
+    return vals
+
+
+@_axis_nan_policy_factory(
+    lambda x: x, result_to_tuple=lambda x: (x,), n_outputs=1
+)
+def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
+    """Compute the kurtosis (Fisher or Pearson) of a dataset.
+
+    Kurtosis is the fourth central moment divided by the square of the
+    variance. If Fisher's definition is used, then 3.0 is subtracted from
+    the result to give 0.0 for a normal distribution.
+
+    If bias is False then the kurtosis is calculated using k statistics to
+    eliminate bias coming from biased moment estimators
+
+    Use `kurtosistest` to see if result is close enough to normal.
+
+    Parameters
+    ----------
+    a : array
+        Data for which the kurtosis is calculated.
+    axis : int or None, optional
+        Axis along which the kurtosis is calculated. Default is 0.
+        If None, compute over the whole array `a`.
+    fisher : bool, optional
+        If True, Fisher's definition is used (normal ==> 0.0). If False,
+        Pearson's definition is used (normal ==> 3.0).
+    bias : bool, optional
+        If False, then the calculations are corrected for statistical bias.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.
+
+    Returns
+    -------
+    kurtosis : array
+        The kurtosis of values along an axis, returning NaN where all values
+        are equal.
+
+    References
+    ----------
+    .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
+       Probability and Statistics Tables and Formulae. Chapman & Hall: New
+       York. 2000.
+
+    Examples
+    --------
+    In Fisher's definiton, the kurtosis of the normal distribution is zero.
+    In the following example, the kurtosis is close to zero, because it was
+    calculated from the dataset, not from the continuous distribution.
+
+    >>> import numpy as np
+    >>> from scipy.stats import norm, kurtosis
+    >>> data = norm.rvs(size=1000, random_state=3)
+    >>> kurtosis(data)
+    -0.06928694200380558
+
+    The distribution with a higher kurtosis has a heavier tail.
+    The zero valued kurtosis of the normal distribution in Fisher's definition
+    can serve as a reference point.
+
+    >>> import matplotlib.pyplot as plt
+    >>> import scipy.stats as stats
+    >>> from scipy.stats import kurtosis
+
+    >>> x = np.linspace(-5, 5, 100)
+    >>> ax = plt.subplot()
+    >>> distnames = ['laplace', 'norm', 'uniform']
+
+    >>> for distname in distnames:
+    ...     if distname == 'uniform':
+    ...         dist = getattr(stats, distname)(loc=-2, scale=4)
+    ...     else:
+    ...         dist = getattr(stats, distname)
+    ...     data = dist.rvs(size=1000)
+    ...     kur = kurtosis(data, fisher=True)
+    ...     y = dist.pdf(x)
+    ...     ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
+    ...     ax.legend()
+
+    The Laplace distribution has a heavier tail than the normal distribution.
+    The uniform distribution (which has negative kurtosis) has the thinnest
+    tail.
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.kurtosis(a, axis, fisher, bias)
+
+    n = a.shape[axis]
+    mean = a.mean(axis, keepdims=True)
+    m2 = _moment(a, 2, axis, mean=mean)
+    m4 = _moment(a, 4, axis, mean=mean)
+    with np.errstate(all='ignore'):
+        zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
+        vals = np.where(zero, np.nan, m4 / m2**2.0)
+
+    if not bias:
+        can_correct = ~zero & (n > 3)
+        if can_correct.any():
+            m2 = np.extract(can_correct, m2)
+            m4 = np.extract(can_correct, m4)
+            nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
+            np.place(vals, can_correct, nval + 3.0)
+
+    if vals.ndim == 0:
+        vals = vals.item()  # array scalar
+
+    return vals - 3 if fisher else vals
+
+
+DescribeResult = namedtuple('DescribeResult',
+                            ('nobs', 'minmax', 'mean', 'variance', 'skewness',
+                             'kurtosis'))
+
+
+def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
+    """Compute several descriptive statistics of the passed array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : int or None, optional
+        Axis along which statistics are calculated. Default is 0.
+        If None, compute over the whole array `a`.
+    ddof : int, optional
+        Delta degrees of freedom (only for variance).  Default is 1.
+    bias : bool, optional
+        If False, then the skewness and kurtosis calculations are corrected
+        for statistical bias.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+        * 'propagate': returns nan
+        * 'raise': throws an error
+        * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    nobs : int or ndarray of ints
+        Number of observations (length of data along `axis`).
+        When 'omit' is chosen as nan_policy, the length along each axis
+        slice is counted separately.
+    minmax: tuple of ndarrays or floats
+        Minimum and maximum value of `a` along the given axis.
+    mean : ndarray or float
+        Arithmetic mean of `a` along the given axis.
+    variance : ndarray or float
+        Unbiased variance of `a` along the given axis; denominator is number
+        of observations minus one.
+    skewness : ndarray or float
+        Skewness of `a` along the given axis, based on moment calculations
+        with denominator equal to the number of observations, i.e. no degrees
+        of freedom correction.
+    kurtosis : ndarray or float
+        Kurtosis (Fisher) of `a` along the given axis.  The kurtosis is
+        normalized so that it is zero for the normal distribution.  No
+        degrees of freedom are used.
+
+    See Also
+    --------
+    skew, kurtosis
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> a = np.arange(10)
+    >>> stats.describe(a)
+    DescribeResult(nobs=10, minmax=(0, 9), mean=4.5,
+                   variance=9.166666666666666, skewness=0.0,
+                   kurtosis=-1.2242424242424244)
+    >>> b = [[1, 2], [3, 4]]
+    >>> stats.describe(b)
+    DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
+                   mean=array([2., 3.]), variance=array([2., 2.]),
+                   skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.describe(a, axis, ddof, bias)
+
+    if a.size == 0:
+        raise ValueError("The input must not be empty.")
+    n = a.shape[axis]
+    mm = (np.min(a, axis=axis), np.max(a, axis=axis))
+    m = np.mean(a, axis=axis)
+    v = _var(a, axis=axis, ddof=ddof)
+    sk = skew(a, axis, bias=bias)
+    kurt = kurtosis(a, axis, bias=bias)
+
+    return DescribeResult(n, mm, m, v, sk, kurt)
+
+#####################################
+#         NORMALITY TESTS           #
+#####################################
+
+
+def _normtest_finish(z, alternative):
+    """Common code between all the normality-test functions."""
+    if alternative == 'less':
+        prob = distributions.norm.cdf(z)
+    elif alternative == 'greater':
+        prob = distributions.norm.sf(z)
+    elif alternative == 'two-sided':
+        prob = 2 * distributions.norm.sf(np.abs(z))
+    else:
+        raise ValueError("alternative must be "
+                         "'less', 'greater' or 'two-sided'")
+
+    if z.ndim == 0:
+        z = z[()]
+
+    return z, prob
+
+
+SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
+
+
+def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
+    """Test whether the skew is different from the normal distribution.
+
+    This function tests the null hypothesis that the skewness of
+    the population that the sample was drawn from is the same
+    as that of a corresponding normal distribution.
+
+    Parameters
+    ----------
+    a : array
+        The data to be tested.
+    axis : int or None, optional
+       Axis along which statistics are calculated. Default is 0.
+       If None, compute over the whole array `a`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+        * 'propagate': returns nan
+        * 'raise': throws an error
+        * 'omit': performs the calculations ignoring nan values
+
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the skewness of the distribution underlying the sample
+          is different from that of the normal distribution (i.e. 0)
+        * 'less': the skewness of the distribution underlying the sample
+          is less than that of the normal distribution
+        * 'greater': the skewness of the distribution underlying the sample
+          is greater than that of the normal distribution
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : float
+        The computed z-score for this test.
+    pvalue : float
+        The p-value for the hypothesis test.
+
+    Notes
+    -----
+    The sample size must be at least 8.
+
+    References
+    ----------
+    .. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
+            "A suggestion for using powerful and informative tests of
+            normality", American Statistician 44, pp. 316-321, 1990.
+
+    Examples
+    --------
+    >>> from scipy.stats import skewtest
+    >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
+    SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
+    >>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
+    SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
+    >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
+    SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
+    >>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
+    SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
+    >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='less')
+    SkewtestResult(statistic=1.0108048609177787, pvalue=0.8439450819289052)
+    >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='greater')
+    SkewtestResult(statistic=1.0108048609177787, pvalue=0.15605491807109484)
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.skewtest(a, axis, alternative)
+
+    if axis is None:
+        a = np.ravel(a)
+        axis = 0
+    b2 = skew(a, axis)
+    n = a.shape[axis]
+    if n < 8:
+        raise ValueError(
+            "skewtest is not valid with less than 8 samples; %i samples"
+            " were given." % int(n))
+    y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
+    beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
+             ((n-2.0) * (n+5) * (n+7) * (n+9)))
+    W2 = -1 + math.sqrt(2 * (beta2 - 1))
+    delta = 1 / math.sqrt(0.5 * math.log(W2))
+    alpha = math.sqrt(2.0 / (W2 - 1))
+    y = np.where(y == 0, 1, y)
+    Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
+
+    return SkewtestResult(*_normtest_finish(Z, alternative))
+
+
+KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
+
+
+def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
+    """Test whether a dataset has normal kurtosis.
+
+    This function tests the null hypothesis that the kurtosis
+    of the population from which the sample was drawn is that
+    of the normal distribution.
+
+    Parameters
+    ----------
+    a : array
+        Array of the sample data.
+    axis : int or None, optional
+       Axis along which to compute test. Default is 0. If None,
+       compute over the whole array `a`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+        * 'propagate': returns nan
+        * 'raise': throws an error
+        * 'omit': performs the calculations ignoring nan values
+
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the kurtosis of the distribution underlying the sample
+          is different from that of the normal distribution
+        * 'less': the kurtosis of the distribution underlying the sample
+          is less than that of the normal distribution
+        * 'greater': the kurtosis of the distribution underlying the sample
+          is greater than that of the normal distribution
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : float
+        The computed z-score for this test.
+    pvalue : float
+        The p-value for the hypothesis test.
+
+    Notes
+    -----
+    Valid only for n>20. This function uses the method described in [1]_.
+
+    References
+    ----------
+    .. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
+       statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import kurtosistest
+    >>> kurtosistest(list(range(20)))
+    KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
+    >>> kurtosistest(list(range(20)), alternative='less')
+    KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
+    >>> kurtosistest(list(range(20)), alternative='greater')
+    KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
+
+    >>> rng = np.random.default_rng()
+    >>> s = rng.normal(0, 1, 1000)
+    >>> kurtosistest(s)
+    KurtosistestResult(statistic=-1.475047944490622, pvalue=0.14019965402996987)
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.kurtosistest(a, axis, alternative)
+
+    n = a.shape[axis]
+    if n < 5:
+        raise ValueError(
+            "kurtosistest requires at least 5 observations; %i observations"
+            " were given." % int(n))
+    if n < 20:
+        warnings.warn("kurtosistest only valid for n>=20 ... continuing "
+                      "anyway, n=%i" % int(n))
+    b2 = kurtosis(a, axis, fisher=False)
+
+    E = 3.0*(n-1) / (n+1)
+    varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))  # [1]_ Eq. 1
+    x = (b2-E) / np.sqrt(varb2)  # [1]_ Eq. 4
+    # [1]_ Eq. 2:
+    sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
+                                                        (n*(n-2)*(n-3)))
+    # [1]_ Eq. 3:
+    A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
+    term1 = 1 - 2/(9.0*A)
+    denom = 1 + x*np.sqrt(2/(A-4.0))
+    term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
+                                      np.power((1-2.0/A)/np.abs(denom), 1/3.0))
+    if np.any(denom == 0):
+        msg = "Test statistic not defined in some cases due to division by " \
+              "zero. Return nan in that case..."
+        warnings.warn(msg, RuntimeWarning)
+
+    Z = (term1 - term2) / np.sqrt(2/(9.0*A))  # [1]_ Eq. 5
+
+    # zprob uses upper tail, so Z needs to be positive
+    return KurtosistestResult(*_normtest_finish(Z, alternative))
+
+
+NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
+
+
+def normaltest(a, axis=0, nan_policy='propagate'):
+    """Test whether a sample differs from a normal distribution.
+
+    This function tests the null hypothesis that a sample comes
+    from a normal distribution.  It is based on D'Agostino and
+    Pearson's [1]_, [2]_ test that combines skew and kurtosis to
+    produce an omnibus test of normality.
+
+    Parameters
+    ----------
+    a : array_like
+        The array containing the sample to be tested.
+    axis : int or None, optional
+        Axis along which to compute test. Default is 0. If None,
+        compute over the whole array `a`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    statistic : float or array
+        ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
+        ``k`` is the z-score returned by `kurtosistest`.
+    pvalue : float or array
+       A 2-sided chi squared probability for the hypothesis test.
+
+    References
+    ----------
+    .. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
+           moderate and large sample size", Biometrika, 58, 341-348
+
+    .. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
+           normality", Biometrika, 60, 613-622
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> pts = 1000
+    >>> a = rng.normal(0, 1, size=pts)
+    >>> b = rng.normal(2, 1, size=pts)
+    >>> x = np.concatenate((a, b))
+    >>> k2, p = stats.normaltest(x)
+    >>> alpha = 1e-3
+    >>> print("p = {:g}".format(p))
+    p = 8.4713e-19
+    >>> if p < alpha:  # null hypothesis: x comes from a normal distribution
+    ...     print("The null hypothesis can be rejected")
+    ... else:
+    ...     print("The null hypothesis cannot be rejected")
+    The null hypothesis can be rejected
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.normaltest(a, axis)
+
+    s, _ = skewtest(a, axis)
+    k, _ = kurtosistest(a, axis)
+    k2 = s*s + k*k
+
+    return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
+
+
+@_axis_nan_policy_factory(SignificanceResult, default_axis=None)
+def jarque_bera(x, *, axis=None):
+    """Perform the Jarque-Bera goodness of fit test on sample data.
+
+    The Jarque-Bera test tests whether the sample data has the skewness and
+    kurtosis matching a normal distribution.
+
+    Note that this test only works for a large enough number of data samples
+    (>2000) as the test statistic asymptotically has a Chi-squared distribution
+    with 2 degrees of freedom.
+
+    Parameters
+    ----------
+    x : array_like
+        Observations of a random variable.
+    axis : int or None, default: 0
+        If an int, the axis of the input along which to compute the statistic.
+        The statistic of each axis-slice (e.g. row) of the input will appear in
+        a corresponding element of the output.
+        If ``None``, the input will be raveled before computing the statistic.
+
+    Returns
+    -------
+    result : SignificanceResult
+        An object with the following attributes:
+
+        statistic : float
+            The test statistic.
+        pvalue : float
+            The p-value for the hypothesis test.
+
+    References
+    ----------
+    .. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
+           homoscedasticity and serial independence of regression residuals",
+           6 Econometric Letters 255-259.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> x = rng.normal(0, 1, 100000)
+    >>> jarque_bera_test = stats.jarque_bera(x)
+    >>> jarque_bera_test
+    Jarque_beraResult(statistic=3.3415184718131554, pvalue=0.18810419594996775)
+    >>> jarque_bera_test.statistic
+    3.3415184718131554
+    >>> jarque_bera_test.pvalue
+    0.18810419594996775
+
+    """
+    x = np.asarray(x)
+    if axis is None:
+        x = x.ravel()
+        axis = 0
+
+    n = x.shape[axis]
+    if n == 0:
+        raise ValueError('At least one observation is required.')
+
+    mu = x.mean(axis=axis, keepdims=True)
+    diffx = x - mu
+    s = skew(diffx, axis=axis, _no_deco=True)
+    k = kurtosis(diffx, axis=axis, _no_deco=True)
+    statistic = n / 6 * (s**2 + k**2 / 4)
+    pvalue = distributions.chi2.sf(statistic, df=2)
+
+    return SignificanceResult(statistic, pvalue)
+
+
+#####################################
+#        FREQUENCY FUNCTIONS        #
+#####################################
+
+
+def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
+                      axis=None):
+    """Calculate the score at a given percentile of the input sequence.
+
+    For example, the score at `per=50` is the median. If the desired quantile
+    lies between two data points, we interpolate between them, according to
+    the value of `interpolation`. If the parameter `limit` is provided, it
+    should be a tuple (lower, upper) of two values.
+
+    Parameters
+    ----------
+    a : array_like
+        A 1-D array of values from which to extract score.
+    per : array_like
+        Percentile(s) at which to extract score.  Values should be in range
+        [0,100].
+    limit : tuple, optional
+        Tuple of two scalars, the lower and upper limits within which to
+        compute the percentile. Values of `a` outside
+        this (closed) interval will be ignored.
+    interpolation_method : {'fraction', 'lower', 'higher'}, optional
+        Specifies the interpolation method to use,
+        when the desired quantile lies between two data points `i` and `j`
+        The following options are available (default is 'fraction'):
+
+          * 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
+            fractional part of the index surrounded by ``i`` and ``j``
+          * 'lower': ``i``
+          * 'higher': ``j``
+
+    axis : int, optional
+        Axis along which the percentiles are computed. Default is None. If
+        None, compute over the whole array `a`.
+
+    Returns
+    -------
+    score : float or ndarray
+        Score at percentile(s).
+
+    See Also
+    --------
+    percentileofscore, numpy.percentile
+
+    Notes
+    -----
+    This function will become obsolete in the future.
+    For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
+    that `scoreatpercentile` provides.  And it's significantly faster.
+    Therefore it's recommended to use `numpy.percentile` for users that have
+    numpy >= 1.9.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> a = np.arange(100)
+    >>> stats.scoreatpercentile(a, 50)
+    49.5
+
+    """
+    # adapted from NumPy's percentile function.  When we require numpy >= 1.8,
+    # the implementation of this function can be replaced by np.percentile.
+    a = np.asarray(a)
+    if a.size == 0:
+        # empty array, return nan(s) with shape matching `per`
+        if np.isscalar(per):
+            return np.nan
+        else:
+            return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
+
+    if limit:
+        a = a[(limit[0] <= a) & (a <= limit[1])]
+
+    sorted_ = np.sort(a, axis=axis)
+    if axis is None:
+        axis = 0
+
+    return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
+
+
+# handle sequence of per's without calling sort multiple times
+def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
+    if not np.isscalar(per):
+        score = [_compute_qth_percentile(sorted_, i,
+                                         interpolation_method, axis)
+                 for i in per]
+        return np.array(score)
+
+    if not (0 <= per <= 100):
+        raise ValueError("percentile must be in the range [0, 100]")
+
+    indexer = [slice(None)] * sorted_.ndim
+    idx = per / 100. * (sorted_.shape[axis] - 1)
+
+    if int(idx) != idx:
+        # round fractional indices according to interpolation method
+        if interpolation_method == 'lower':
+            idx = int(np.floor(idx))
+        elif interpolation_method == 'higher':
+            idx = int(np.ceil(idx))
+        elif interpolation_method == 'fraction':
+            pass  # keep idx as fraction and interpolate
+        else:
+            raise ValueError("interpolation_method can only be 'fraction', "
+                             "'lower' or 'higher'")
+
+    i = int(idx)
+    if i == idx:
+        indexer[axis] = slice(i, i + 1)
+        weights = array(1)
+        sumval = 1.0
+    else:
+        indexer[axis] = slice(i, i + 2)
+        j = i + 1
+        weights = array([(j - idx), (idx - i)], float)
+        wshape = [1] * sorted_.ndim
+        wshape[axis] = 2
+        weights.shape = wshape
+        sumval = weights.sum()
+
+    # Use np.add.reduce (== np.sum but a little faster) to coerce data type
+    return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
+
+
+def percentileofscore(a, score, kind='rank', nan_policy='propagate'):
+    """Compute the percentile rank of a score relative to a list of scores.
+
+    A `percentileofscore` of, for example, 80% means that 80% of the
+    scores in `a` are below the given score. In the case of gaps or
+    ties, the exact definition depends on the optional keyword, `kind`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to which `score` is compared.
+    score : array_like
+        Scores to compute percentiles for.
+    kind : {'rank', 'weak', 'strict', 'mean'}, optional
+        Specifies the interpretation of the resulting score.
+        The following options are available (default is 'rank'):
+
+          * 'rank': Average percentage ranking of score.  In case of multiple
+            matches, average the percentage rankings of all matching scores.
+          * 'weak': This kind corresponds to the definition of a cumulative
+            distribution function.  A percentileofscore of 80% means that 80%
+            of values are less than or equal to the provided score.
+          * 'strict': Similar to "weak", except that only values that are
+            strictly less than the given score are counted.
+          * 'mean': The average of the "weak" and "strict" scores, often used
+            in testing.  See https://en.wikipedia.org/wiki/Percentile_rank
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Specifies how to treat `nan` values in `a`.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan (for each value in `score`).
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    pcos : float
+        Percentile-position of score (0-100) relative to `a`.
+
+    See Also
+    --------
+    numpy.percentile
+    scipy.stats.scoreatpercentile, scipy.stats.rankdata
+
+    Examples
+    --------
+    Three-quarters of the given values lie below a given score:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> stats.percentileofscore([1, 2, 3, 4], 3)
+    75.0
+
+    With multiple matches, note how the scores of the two matches, 0.6
+    and 0.8 respectively, are averaged:
+
+    >>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
+    70.0
+
+    Only 2/5 values are strictly less than 3:
+
+    >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
+    40.0
+
+    But 4/5 values are less than or equal to 3:
+
+    >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
+    80.0
+
+    The average between the weak and the strict scores is:
+
+    >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
+    60.0
+
+    Score arrays (of any dimensionality) are supported:
+
+    >>> stats.percentileofscore([1, 2, 3, 3, 4], [2, 3])
+    array([40., 70.])
+
+    The inputs can be infinite:
+
+    >>> stats.percentileofscore([-np.inf, 0, 1, np.inf], [1, 2, np.inf])
+    array([75., 75., 100.])
+
+    If `a` is empty, then the resulting percentiles are all `nan`:
+
+    >>> stats.percentileofscore([], [1, 2])
+    array([nan, nan])
+    """
+
+    a = np.asarray(a)
+    n = len(a)
+    score = np.asarray(score)
+
+    # Nan treatment
+    cna, npa = _contains_nan(a, nan_policy, use_summation=False)
+    cns, nps = _contains_nan(score, nan_policy, use_summation=False)
+
+    if (cna or cns) and nan_policy == 'raise':
+        raise ValueError("The input contains nan values")
+
+    if cns:
+        # If a score is nan, then the output should be nan
+        # (also if nan_policy is "omit", because it only applies to `a`)
+        score = ma.masked_where(np.isnan(score), score)
+
+    if cna:
+        if nan_policy == "omit":
+            # Don't count nans
+            a = ma.masked_where(np.isnan(a), a)
+            n = a.count()
+
+        if nan_policy == "propagate":
+            # All outputs should be nans
+            n = 0
+
+    # Cannot compare to empty list ==> nan
+    if n == 0:
+        perct = np.full_like(score, np.nan, dtype=np.float64)
+
+    else:
+        # Prepare broadcasting
+        score = score[..., None]
+
+        def count(x):
+            return np.count_nonzero(x, -1)
+
+        # Despite using masked_array to omit nan values from processing,
+        # the CI tests on "Azure pipelines" (but not on the other CI servers)
+        # emits warnings when there are nan values, contrarily to the purpose
+        # of masked_arrays. As a fix, we simply suppress the warnings.
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning,
+                       "invalid value encountered in less")
+            sup.filter(RuntimeWarning,
+                       "invalid value encountered in greater")
+
+            # Main computations/logic
+            if kind == 'rank':
+                left = count(a < score)
+                right = count(a <= score)
+                plus1 = left < right
+                perct = (left + right + plus1) * (50.0 / n)
+            elif kind == 'strict':
+                perct = count(a < score) * (100.0 / n)
+            elif kind == 'weak':
+                perct = count(a <= score) * (100.0 / n)
+            elif kind == 'mean':
+                left = count(a < score)
+                right = count(a <= score)
+                perct = (left + right) * (50.0 / n)
+            else:
+                raise ValueError(
+                    "kind can only be 'rank', 'strict', 'weak' or 'mean'")
+
+    # Re-insert nan values
+    perct = ma.filled(perct, np.nan)
+
+    if perct.ndim == 0:
+        return perct[()]
+    return perct
+
+
+HistogramResult = namedtuple('HistogramResult',
+                             ('count', 'lowerlimit', 'binsize', 'extrapoints'))
+
+
+def _histogram(a, numbins=10, defaultlimits=None, weights=None,
+               printextras=False):
+    """Create a histogram.
+
+    Separate the range into several bins and return the number of instances
+    in each bin.
+
+    Parameters
+    ----------
+    a : array_like
+        Array of scores which will be put into bins.
+    numbins : int, optional
+        The number of bins to use for the histogram. Default is 10.
+    defaultlimits : tuple (lower, upper), optional
+        The lower and upper values for the range of the histogram.
+        If no value is given, a range slightly larger than the range of the
+        values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
+        where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
+    weights : array_like, optional
+        The weights for each value in `a`. Default is None, which gives each
+        value a weight of 1.0
+    printextras : bool, optional
+        If True, if there are extra points (i.e. the points that fall outside
+        the bin limits) a warning is raised saying how many of those points
+        there are.  Default is False.
+
+    Returns
+    -------
+    count : ndarray
+        Number of points (or sum of weights) in each bin.
+    lowerlimit : float
+        Lowest value of histogram, the lower limit of the first bin.
+    binsize : float
+        The size of the bins (all bins have the same size).
+    extrapoints : int
+        The number of points outside the range of the histogram.
+
+    See Also
+    --------
+    numpy.histogram
+
+    Notes
+    -----
+    This histogram is based on numpy's histogram but has a larger range by
+    default if default limits is not set.
+
+    """
+    a = np.ravel(a)
+    if defaultlimits is None:
+        if a.size == 0:
+            # handle empty arrays. Undetermined range, so use 0-1.
+            defaultlimits = (0, 1)
+        else:
+            # no range given, so use values in `a`
+            data_min = a.min()
+            data_max = a.max()
+            # Have bins extend past min and max values slightly
+            s = (data_max - data_min) / (2. * (numbins - 1.))
+            defaultlimits = (data_min - s, data_max + s)
+
+    # use numpy's histogram method to compute bins
+    hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
+                                   weights=weights)
+    # hist are not always floats, convert to keep with old output
+    hist = np.array(hist, dtype=float)
+    # fixed width for bins is assumed, as numpy's histogram gives
+    # fixed width bins for int values for 'bins'
+    binsize = bin_edges[1] - bin_edges[0]
+    # calculate number of extra points
+    extrapoints = len([v for v in a
+                       if defaultlimits[0] > v or v > defaultlimits[1]])
+    if extrapoints > 0 and printextras:
+        warnings.warn("Points outside given histogram range = %s"
+                      % extrapoints)
+
+    return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
+
+
+CumfreqResult = namedtuple('CumfreqResult',
+                           ('cumcount', 'lowerlimit', 'binsize',
+                            'extrapoints'))
+
+
+def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
+    """Return a cumulative frequency histogram, using the histogram function.
+
+    A cumulative histogram is a mapping that counts the cumulative number of
+    observations in all of the bins up to the specified bin.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    numbins : int, optional
+        The number of bins to use for the histogram. Default is 10.
+    defaultreallimits : tuple (lower, upper), optional
+        The lower and upper values for the range of the histogram.
+        If no value is given, a range slightly larger than the range of the
+        values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
+        where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
+    weights : array_like, optional
+        The weights for each value in `a`. Default is None, which gives each
+        value a weight of 1.0
+
+    Returns
+    -------
+    cumcount : ndarray
+        Binned values of cumulative frequency.
+    lowerlimit : float
+        Lower real limit
+    binsize : float
+        Width of each bin.
+    extrapoints : int
+        Extra points.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> x = [1, 4, 2, 1, 3, 1]
+    >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
+    >>> res.cumcount
+    array([ 1.,  2.,  3.,  3.])
+    >>> res.extrapoints
+    3
+
+    Create a normal distribution with 1000 random values
+
+    >>> samples = stats.norm.rvs(size=1000, random_state=rng)
+
+    Calculate cumulative frequencies
+
+    >>> res = stats.cumfreq(samples, numbins=25)
+
+    Calculate space of values for x
+
+    >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
+    ...                                  res.cumcount.size)
+
+    Plot histogram and cumulative histogram
+
+    >>> fig = plt.figure(figsize=(10, 4))
+    >>> ax1 = fig.add_subplot(1, 2, 1)
+    >>> ax2 = fig.add_subplot(1, 2, 2)
+    >>> ax1.hist(samples, bins=25)
+    >>> ax1.set_title('Histogram')
+    >>> ax2.bar(x, res.cumcount, width=res.binsize)
+    >>> ax2.set_title('Cumulative histogram')
+    >>> ax2.set_xlim([x.min(), x.max()])
+
+    >>> plt.show()
+
+    """
+    h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
+    cumhist = np.cumsum(h * 1, axis=0)
+    return CumfreqResult(cumhist, l, b, e)
+
+
+RelfreqResult = namedtuple('RelfreqResult',
+                           ('frequency', 'lowerlimit', 'binsize',
+                            'extrapoints'))
+
+
+def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
+    """Return a relative frequency histogram, using the histogram function.
+
+    A relative frequency  histogram is a mapping of the number of
+    observations in each of the bins relative to the total of observations.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    numbins : int, optional
+        The number of bins to use for the histogram. Default is 10.
+    defaultreallimits : tuple (lower, upper), optional
+        The lower and upper values for the range of the histogram.
+        If no value is given, a range slightly larger than the range of the
+        values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
+        where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
+    weights : array_like, optional
+        The weights for each value in `a`. Default is None, which gives each
+        value a weight of 1.0
+
+    Returns
+    -------
+    frequency : ndarray
+        Binned values of relative frequency.
+    lowerlimit : float
+        Lower real limit.
+    binsize : float
+        Width of each bin.
+    extrapoints : int
+        Extra points.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> a = np.array([2, 4, 1, 2, 3, 2])
+    >>> res = stats.relfreq(a, numbins=4)
+    >>> res.frequency
+    array([ 0.16666667, 0.5       , 0.16666667,  0.16666667])
+    >>> np.sum(res.frequency)  # relative frequencies should add up to 1
+    1.0
+
+    Create a normal distribution with 1000 random values
+
+    >>> samples = stats.norm.rvs(size=1000, random_state=rng)
+
+    Calculate relative frequencies
+
+    >>> res = stats.relfreq(samples, numbins=25)
+
+    Calculate space of values for x
+
+    >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
+    ...                                  res.frequency.size)
+
+    Plot relative frequency histogram
+
+    >>> fig = plt.figure(figsize=(5, 4))
+    >>> ax = fig.add_subplot(1, 1, 1)
+    >>> ax.bar(x, res.frequency, width=res.binsize)
+    >>> ax.set_title('Relative frequency histogram')
+    >>> ax.set_xlim([x.min(), x.max()])
+
+    >>> plt.show()
+
+    """
+    a = np.asanyarray(a)
+    h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
+    h = h / a.shape[0]
+
+    return RelfreqResult(h, l, b, e)
+
+
+#####################################
+#        VARIABILITY FUNCTIONS      #
+#####################################
+
+def obrientransform(*samples):
+    """Compute the O'Brien transform on input data (any number of arrays).
+
+    Used to test for homogeneity of variance prior to running one-way stats.
+    Each array in ``*samples`` is one level of a factor.
+    If `f_oneway` is run on the transformed data and found significant,
+    the variances are unequal.  From Maxwell and Delaney [1]_, p.112.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        Any number of arrays.
+
+    Returns
+    -------
+    obrientransform : ndarray
+        Transformed data for use in an ANOVA.  The first dimension
+        of the result corresponds to the sequence of transformed
+        arrays.  If the arrays given are all 1-D of the same length,
+        the return value is a 2-D array; otherwise it is a 1-D array
+        of type object, with each element being an ndarray.
+
+    References
+    ----------
+    .. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
+           Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
+
+    Examples
+    --------
+    We'll test the following data sets for differences in their variance.
+
+    >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
+    >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
+
+    Apply the O'Brien transform to the data.
+
+    >>> from scipy.stats import obrientransform
+    >>> tx, ty = obrientransform(x, y)
+
+    Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
+    transformed data.
+
+    >>> from scipy.stats import f_oneway
+    >>> F, p = f_oneway(tx, ty)
+    >>> p
+    0.1314139477040335
+
+    If we require that ``p < 0.05`` for significance, we cannot conclude
+    that the variances are different.
+
+    """
+    TINY = np.sqrt(np.finfo(float).eps)
+
+    # `arrays` will hold the transformed arguments.
+    arrays = []
+    sLast = None
+
+    for sample in samples:
+        a = np.asarray(sample)
+        n = len(a)
+        mu = np.mean(a)
+        sq = (a - mu)**2
+        sumsq = sq.sum()
+
+        # The O'Brien transform.
+        t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
+
+        # Check that the mean of the transformed data is equal to the
+        # original variance.
+        var = sumsq / (n - 1)
+        if abs(var - np.mean(t)) > TINY:
+            raise ValueError('Lack of convergence in obrientransform.')
+
+        arrays.append(t)
+        sLast = a.shape
+
+    if sLast:
+        for arr in arrays[:-1]:
+            if sLast != arr.shape:
+                return np.array(arrays, dtype=object)
+    return np.array(arrays)
+
+
+def sem(a, axis=0, ddof=1, nan_policy='propagate'):
+    """Compute standard error of the mean.
+
+    Calculate the standard error of the mean (or standard error of
+    measurement) of the values in the input array.
+
+    Parameters
+    ----------
+    a : array_like
+        An array containing the values for which the standard error is
+        returned.
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over
+        the whole array `a`.
+    ddof : int, optional
+        Delta degrees-of-freedom. How many degrees of freedom to adjust
+        for bias in limited samples relative to the population estimate
+        of variance. Defaults to 1.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    s : ndarray or float
+        The standard error of the mean in the sample(s), along the input axis.
+
+    Notes
+    -----
+    The default value for `ddof` is different to the default (0) used by other
+    ddof containing routines, such as np.std and np.nanstd.
+
+    Examples
+    --------
+    Find standard error along the first axis:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> a = np.arange(20).reshape(5,4)
+    >>> stats.sem(a)
+    array([ 2.8284,  2.8284,  2.8284,  2.8284])
+
+    Find standard error across the whole array, using n degrees of freedom:
+
+    >>> stats.sem(a, axis=None, ddof=0)
+    1.2893796958227628
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        a = ma.masked_invalid(a)
+        return mstats_basic.sem(a, axis, ddof)
+
+    n = a.shape[axis]
+    s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
+    return s
+
+
+def _isconst(x):
+    """
+    Check if all values in x are the same.  nans are ignored.
+
+    x must be a 1d array.
+
+    The return value is a 1d array with length 1, so it can be used
+    in np.apply_along_axis.
+    """
+    y = x[~np.isnan(x)]
+    if y.size == 0:
+        return np.array([True])
+    else:
+        return (y[0] == y).all(keepdims=True)
+
+
+def _quiet_nanmean(x):
+    """
+    Compute nanmean for the 1d array x, but quietly return nan if x is all nan.
+
+    The return value is a 1d array with length 1, so it can be used
+    in np.apply_along_axis.
+    """
+    y = x[~np.isnan(x)]
+    if y.size == 0:
+        return np.array([np.nan])
+    else:
+        return np.mean(y, keepdims=True)
+
+
+def _quiet_nanstd(x, ddof=0):
+    """
+    Compute nanstd for the 1d array x, but quietly return nan if x is all nan.
+
+    The return value is a 1d array with length 1, so it can be used
+    in np.apply_along_axis.
+    """
+    y = x[~np.isnan(x)]
+    if y.size == 0:
+        return np.array([np.nan])
+    else:
+        return np.std(y, keepdims=True, ddof=ddof)
+
+
+def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
+    """
+    Compute the z score.
+
+    Compute the z score of each value in the sample, relative to the
+    sample mean and standard deviation.
+
+    Parameters
+    ----------
+    a : array_like
+        An array like object containing the sample data.
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over
+        the whole array `a`.
+    ddof : int, optional
+        Degrees of freedom correction in the calculation of the
+        standard deviation. Default is 0.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.  Note that when the value is 'omit',
+        nans in the input also propagate to the output, but they do not affect
+        the z-scores computed for the non-nan values.
+
+    Returns
+    -------
+    zscore : array_like
+        The z-scores, standardized by mean and standard deviation of
+        input array `a`.
+
+    Notes
+    -----
+    This function preserves ndarray subclasses, and works also with
+    matrices and masked arrays (it uses `asanyarray` instead of
+    `asarray` for parameters).
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.array([ 0.7972,  0.0767,  0.4383,  0.7866,  0.8091,
+    ...                0.1954,  0.6307,  0.6599,  0.1065,  0.0508])
+    >>> from scipy import stats
+    >>> stats.zscore(a)
+    array([ 1.1273, -1.247 , -0.0552,  1.0923,  1.1664, -0.8559,  0.5786,
+            0.6748, -1.1488, -1.3324])
+
+    Computing along a specified axis, using n-1 degrees of freedom
+    (``ddof=1``) to calculate the standard deviation:
+
+    >>> b = np.array([[ 0.3148,  0.0478,  0.6243,  0.4608],
+    ...               [ 0.7149,  0.0775,  0.6072,  0.9656],
+    ...               [ 0.6341,  0.1403,  0.9759,  0.4064],
+    ...               [ 0.5918,  0.6948,  0.904 ,  0.3721],
+    ...               [ 0.0921,  0.2481,  0.1188,  0.1366]])
+    >>> stats.zscore(b, axis=1, ddof=1)
+    array([[-0.19264823, -1.28415119,  1.07259584,  0.40420358],
+           [ 0.33048416, -1.37380874,  0.04251374,  1.00081084],
+           [ 0.26796377, -1.12598418,  1.23283094, -0.37481053],
+           [-0.22095197,  0.24468594,  1.19042819, -1.21416216],
+           [-0.82780366,  1.4457416 , -0.43867764, -0.1792603 ]])
+
+    An example with `nan_policy='omit'`:
+
+    >>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15],
+    ...               [14.95, 16.06, 121.25, 94.35, 29.81]])
+    >>> stats.zscore(x, axis=1, nan_policy='omit')
+    array([[-1.13490897, -0.37830299,         nan, -0.08718406,  1.60039602],
+           [-0.91611681, -0.89090508,  1.4983032 ,  0.88731639, -0.5785977 ]])
+    """
+    return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
+
+
+def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'):
+    """
+    Compute the geometric standard score.
+
+    Compute the geometric z score of each strictly positive value in the
+    sample, relative to the geometric mean and standard deviation.
+    Mathematically the geometric z score can be evaluated as::
+
+        gzscore = log(a/gmu) / log(gsigma)
+
+    where ``gmu`` (resp. ``gsigma``) is the geometric mean (resp. standard
+    deviation).
+
+    Parameters
+    ----------
+    a : array_like
+        Sample data.
+    axis : int or None, optional
+        Axis along which to operate. Default is 0. If None, compute over
+        the whole array `a`.
+    ddof : int, optional
+        Degrees of freedom correction in the calculation of the
+        standard deviation. Default is 0.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan. 'propagate' returns nan,
+        'raise' throws an error, 'omit' performs the calculations ignoring nan
+        values. Default is 'propagate'.  Note that when the value is 'omit',
+        nans in the input also propagate to the output, but they do not affect
+        the geometric z scores computed for the non-nan values.
+
+    Returns
+    -------
+    gzscore : array_like
+        The geometric z scores, standardized by geometric mean and geometric
+        standard deviation of input array `a`.
+
+    See Also
+    --------
+    gmean : Geometric mean
+    gstd : Geometric standard deviation
+    zscore : Standard score
+
+    Notes
+    -----
+    This function preserves ndarray subclasses, and works also with
+    matrices and masked arrays (it uses ``asanyarray`` instead of
+    ``asarray`` for parameters).
+
+    .. versionadded:: 1.8
+
+    Examples
+    --------
+    Draw samples from a log-normal distribution:
+
+    >>> import numpy as np
+    >>> from scipy.stats import zscore, gzscore
+    >>> import matplotlib.pyplot as plt
+
+    >>> rng = np.random.default_rng()
+    >>> mu, sigma = 3., 1.  # mean and standard deviation
+    >>> x = rng.lognormal(mu, sigma, size=500)
+
+    Display the histogram of the samples:
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.hist(x, 50)
+    >>> plt.show()
+
+    Display the histogram of the samples standardized by the classical zscore.
+    Distribution is rescaled but its shape is unchanged.
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.hist(zscore(x), 50)
+    >>> plt.show()
+
+    Demonstrate that the distribution of geometric zscores is rescaled and
+    quasinormal:
+
+    >>> fig, ax = plt.subplots()
+    >>> ax.hist(gzscore(x), 50)
+    >>> plt.show()
+
+    """
+    a = np.asanyarray(a)
+    log = ma.log if isinstance(a, ma.MaskedArray) else np.log
+
+    return zscore(log(a), axis=axis, ddof=ddof, nan_policy=nan_policy)
+
+
+def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
+    """
+    Calculate the relative z-scores.
+
+    Return an array of z-scores, i.e., scores that are standardized to
+    zero mean and unit variance, where mean and variance are calculated
+    from the comparison array.
+
+    Parameters
+    ----------
+    scores : array_like
+        The input for which z-scores are calculated.
+    compare : array_like
+        The input from which the mean and standard deviation of the
+        normalization are taken; assumed to have the same dimension as
+        `scores`.
+    axis : int or None, optional
+        Axis over which mean and variance of `compare` are calculated.
+        Default is 0. If None, compute over the whole array `scores`.
+    ddof : int, optional
+        Degrees of freedom correction in the calculation of the
+        standard deviation. Default is 0.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle the occurrence of nans in `compare`.
+        'propagate' returns nan, 'raise' raises an exception, 'omit'
+        performs the calculations ignoring nan values. Default is
+        'propagate'. Note that when the value is 'omit', nans in `scores`
+        also propagate to the output, but they do not affect the z-scores
+        computed for the non-nan values.
+
+    Returns
+    -------
+    zscore : array_like
+        Z-scores, in the same shape as `scores`.
+
+    Notes
+    -----
+    This function preserves ndarray subclasses, and works also with
+    matrices and masked arrays (it uses `asanyarray` instead of
+    `asarray` for parameters).
+
+    Examples
+    --------
+    >>> from scipy.stats import zmap
+    >>> a = [0.5, 2.0, 2.5, 3]
+    >>> b = [0, 1, 2, 3, 4]
+    >>> zmap(a, b)
+    array([-1.06066017,  0.        ,  0.35355339,  0.70710678])
+
+    """
+    a = np.asanyarray(compare)
+
+    if a.size == 0:
+        return np.empty(a.shape)
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        if axis is None:
+            mn = _quiet_nanmean(a.ravel())
+            std = _quiet_nanstd(a.ravel(), ddof=ddof)
+            isconst = _isconst(a.ravel())
+        else:
+            mn = np.apply_along_axis(_quiet_nanmean, axis, a)
+            std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof)
+            isconst = np.apply_along_axis(_isconst, axis, a)
+    else:
+        mn = a.mean(axis=axis, keepdims=True)
+        std = a.std(axis=axis, ddof=ddof, keepdims=True)
+        if axis is None:
+            isconst = (a.item(0) == a).all()
+        else:
+            isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True)
+
+    # Set std deviations that are 0 to 1 to avoid division by 0.
+    std[isconst] = 1.0
+    z = (scores - mn) / std
+    # Set the outputs associated with a constant input to nan.
+    z[np.broadcast_to(isconst, z.shape)] = np.nan
+    return z
+
+
+def gstd(a, axis=0, ddof=1):
+    """
+    Calculate the geometric standard deviation of an array.
+
+    The geometric standard deviation describes the spread of a set of numbers
+    where the geometric mean is preferred. It is a multiplicative factor, and
+    so a dimensionless quantity.
+
+    It is defined as the exponent of the standard deviation of ``log(a)``.
+    Mathematically the population geometric standard deviation can be
+    evaluated as::
+
+        gstd = exp(std(log(a)))
+
+    .. versionadded:: 1.3.0
+
+    Parameters
+    ----------
+    a : array_like
+        An array like object containing the sample data.
+    axis : int, tuple or None, optional
+        Axis along which to operate. Default is 0. If None, compute over
+        the whole array `a`.
+    ddof : int, optional
+        Degree of freedom correction in the calculation of the
+        geometric standard deviation. Default is 1.
+
+    Returns
+    -------
+    ndarray or float
+        An array of the geometric standard deviation. If `axis` is None or `a`
+        is a 1d array a float is returned.
+
+    See Also
+    --------
+    gmean : Geometric mean
+    numpy.std : Standard deviation
+
+    Notes
+    -----
+    As the calculation requires the use of logarithms the geometric standard
+    deviation only supports strictly positive values. Any non-positive or
+    infinite values will raise a `ValueError`.
+    The geometric standard deviation is sometimes confused with the exponent of
+    the standard deviation, ``exp(std(a))``. Instead the geometric standard
+    deviation is ``exp(std(log(a)))``.
+    The default value for `ddof` is different to the default value (0) used
+    by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
+
+    References
+    ----------
+    .. [1] Kirkwood, T. B., "Geometric means and measures of dispersion",
+           Biometrics, vol. 35, pp. 908-909, 1979
+
+    Examples
+    --------
+    Find the geometric standard deviation of a log-normally distributed sample.
+    Note that the standard deviation of the distribution is one, on a
+    log scale this evaluates to approximately ``exp(1)``.
+
+    >>> import numpy as np
+    >>> from scipy.stats import gstd
+    >>> rng = np.random.default_rng()
+    >>> sample = rng.lognormal(mean=0, sigma=1, size=1000)
+    >>> gstd(sample)
+    2.810010162475324
+
+    Compute the geometric standard deviation of a multidimensional array and
+    of a given axis.
+
+    >>> a = np.arange(1, 25).reshape(2, 3, 4)
+    >>> gstd(a, axis=None)
+    2.2944076136018947
+    >>> gstd(a, axis=2)
+    array([[1.82424757, 1.22436866, 1.13183117],
+           [1.09348306, 1.07244798, 1.05914985]])
+    >>> gstd(a, axis=(1,2))
+    array([2.12939215, 1.22120169])
+
+    The geometric standard deviation further handles masked arrays.
+
+    >>> a = np.arange(1, 25).reshape(2, 3, 4)
+    >>> ma = np.ma.masked_where(a > 16, a)
+    >>> ma
+    masked_array(
+      data=[[[1, 2, 3, 4],
+             [5, 6, 7, 8],
+             [9, 10, 11, 12]],
+            [[13, 14, 15, 16],
+             [--, --, --, --],
+             [--, --, --, --]]],
+      mask=[[[False, False, False, False],
+             [False, False, False, False],
+             [False, False, False, False]],
+            [[False, False, False, False],
+             [ True,  True,  True,  True],
+             [ True,  True,  True,  True]]],
+      fill_value=999999)
+    >>> gstd(ma, axis=2)
+    masked_array(
+      data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
+            [1.0934830582350938, --, --]],
+      mask=[[False, False, False],
+            [False,  True,  True]],
+      fill_value=999999)
+
+    """
+    a = np.asanyarray(a)
+    log = ma.log if isinstance(a, ma.MaskedArray) else np.log
+
+    try:
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", RuntimeWarning)
+            return np.exp(np.std(log(a), axis=axis, ddof=ddof))
+    except RuntimeWarning as w:
+        if np.isinf(a).any():
+            raise ValueError(
+                'Infinite value encountered. The geometric standard deviation '
+                'is defined for strictly positive values only.'
+            ) from w
+        a_nan = np.isnan(a)
+        a_nan_any = a_nan.any()
+        # exclude NaN's from negativity check, but
+        # avoid expensive masking for arrays with no NaN
+        if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
+                (not a_nan_any and np.less_equal(a, 0).any())):
+            raise ValueError(
+                'Non positive value encountered. The geometric standard '
+                'deviation is defined for strictly positive values only.'
+            ) from w
+        elif 'Degrees of freedom <= 0 for slice' == str(w):
+            raise ValueError(w) from w
+        else:
+            #  Remaining warnings don't need to be exceptions.
+            return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
+    except TypeError as e:
+        raise ValueError(
+            'Invalid array input. The inputs could not be '
+            'safely coerced to any supported types') from e
+
+
+# Private dictionary initialized only once at module level
+# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
+_scale_conversions = {'raw': 1.0,
+                      'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
+
+
+def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
+        interpolation='linear', keepdims=False):
+    r"""
+    Compute the interquartile range of the data along the specified axis.
+
+    The interquartile range (IQR) is the difference between the 75th and
+    25th percentile of the data. It is a measure of the dispersion
+    similar to standard deviation or variance, but is much more robust
+    against outliers [2]_.
+
+    The ``rng`` parameter allows this function to compute other
+    percentile ranges than the actual IQR. For example, setting
+    ``rng=(0, 100)`` is equivalent to `numpy.ptp`.
+
+    The IQR of an empty array is `np.nan`.
+
+    .. versionadded:: 0.18.0
+
+    Parameters
+    ----------
+    x : array_like
+        Input array or object that can be converted to an array.
+    axis : int or sequence of int, optional
+        Axis along which the range is computed. The default is to
+        compute the IQR for the entire array.
+    rng : Two-element sequence containing floats in range of [0,100] optional
+        Percentiles over which to compute the range. Each must be
+        between 0 and 100, inclusive. The default is the true IQR:
+        ``(25, 75)``. The order of the elements is not important.
+    scale : scalar or str, optional
+        The numerical value of scale will be divided out of the final
+        result. The following string values are recognized:
+
+          * 'raw' : No scaling, just return the raw IQR.
+            **Deprecated!**  Use ``scale=1`` instead.
+          * 'normal' : Scale by
+            :math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
+
+        The default is 1.0. The use of ``scale='raw'`` is deprecated infavor
+        of ``scale=1`` and will raise an error in SciPy 1.12.0.
+        Array-like `scale` is also allowed, as long
+        as it broadcasts correctly to the output such that
+        ``out / scale`` is a valid operation. The output dimensions
+        depend on the input array, `x`, the `axis` argument, and the
+        `keepdims` flag.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+    interpolation : str, optional
+
+        Specifies the interpolation method to use when the percentile
+        boundaries lie between two data points ``i`` and ``j``.
+        The following options are available (default is 'linear'):
+
+          * 'linear': ``i + (j - i)*fraction``, where ``fraction`` is the
+            fractional part of the index surrounded by ``i`` and ``j``.
+          * 'lower': ``i``.
+          * 'higher': ``j``.
+          * 'nearest': ``i`` or ``j`` whichever is nearest.
+          * 'midpoint': ``(i + j)/2``.
+
+        For NumPy >= 1.22.0, the additional options provided by the ``method``
+        keyword of `numpy.percentile` are also valid.
+
+    keepdims : bool, optional
+        If this is set to True, the reduced axes are left in the
+        result as dimensions with size one. With this option, the result
+        will broadcast correctly against the original array `x`.
+
+    Returns
+    -------
+    iqr : scalar or ndarray
+        If ``axis=None``, a scalar is returned. If the input contains
+        integers or floats of smaller precision than ``np.float64``, then the
+        output data-type is ``np.float64``. Otherwise, the output data-type is
+        the same as that of the input.
+
+    See Also
+    --------
+    numpy.std, numpy.var
+
+    References
+    ----------
+    .. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
+    .. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
+    .. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import iqr
+    >>> x = np.array([[10, 7, 4], [3, 2, 1]])
+    >>> x
+    array([[10,  7,  4],
+           [ 3,  2,  1]])
+    >>> iqr(x)
+    4.0
+    >>> iqr(x, axis=0)
+    array([ 3.5,  2.5,  1.5])
+    >>> iqr(x, axis=1)
+    array([ 3.,  1.])
+    >>> iqr(x, axis=1, keepdims=True)
+    array([[ 3.],
+           [ 1.]])
+
+    """
+    x = asarray(x)
+
+    # This check prevents percentile from raising an error later. Also, it is
+    # consistent with `np.var` and `np.std`.
+    if not x.size:
+        return np.nan
+
+    # An error may be raised here, so fail-fast, before doing lengthy
+    # computations, even though `scale` is not used until later
+    if isinstance(scale, str):
+        scale_key = scale.lower()
+        if scale_key not in _scale_conversions:
+            raise ValueError("{0} not a valid scale for `iqr`".format(scale))
+        if scale_key == 'raw':
+            msg = ("The use of 'scale=\"raw\"' is deprecated infavor of "
+                   "'scale=1' and will raise an error in SciPy 1.12.0.")
+            warnings.warn(msg, DeprecationWarning, stacklevel=2)
+        scale = _scale_conversions[scale_key]
+
+    # Select the percentile function to use based on nans and policy
+    contains_nan, nan_policy = _contains_nan(x, nan_policy)
+
+    if contains_nan and nan_policy == 'omit':
+        percentile_func = np.nanpercentile
+    else:
+        percentile_func = np.percentile
+
+    if len(rng) != 2:
+        raise TypeError("quantile range must be two element sequence")
+
+    if np.isnan(rng).any():
+        raise ValueError("range must not contain NaNs")
+
+    rng = sorted(rng)
+    if NumpyVersion(np.__version__) >= '1.22.0':
+        pct = percentile_func(x, rng, axis=axis, method=interpolation,
+                              keepdims=keepdims)
+    else:
+        pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
+                              keepdims=keepdims)
+    out = np.subtract(pct[1], pct[0])
+
+    if scale != 1.0:
+        out /= scale
+
+    return out
+
+
+def _mad_1d(x, center, nan_policy):
+    # Median absolute deviation for 1-d array x.
+    # This is a helper function for `median_abs_deviation`; it assumes its
+    # arguments have been validated already.  In particular,  x must be a
+    # 1-d numpy array, center must be callable, and if nan_policy is not
+    # 'propagate', it is assumed to be 'omit', because 'raise' is handled
+    # in `median_abs_deviation`.
+    # No warning is generated if x is empty or all nan.
+    isnan = np.isnan(x)
+    if isnan.any():
+        if nan_policy == 'propagate':
+            return np.nan
+        x = x[~isnan]
+    if x.size == 0:
+        # MAD of an empty array is nan.
+        return np.nan
+    # Edge cases have been handled, so do the basic MAD calculation.
+    med = center(x)
+    mad = np.median(np.abs(x - med))
+    return mad
+
+
+def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
+                         nan_policy='propagate'):
+    r"""
+    Compute the median absolute deviation of the data along the given axis.
+
+    The median absolute deviation (MAD, [1]_) computes the median over the
+    absolute deviations from the median. It is a measure of dispersion
+    similar to the standard deviation but more robust to outliers [2]_.
+
+    The MAD of an empty array is ``np.nan``.
+
+    .. versionadded:: 1.5.0
+
+    Parameters
+    ----------
+    x : array_like
+        Input array or object that can be converted to an array.
+    axis : int or None, optional
+        Axis along which the range is computed. Default is 0. If None, compute
+        the MAD over the entire array.
+    center : callable, optional
+        A function that will return the central value. The default is to use
+        np.median. Any user defined function used will need to have the
+        function signature ``func(arr, axis)``.
+    scale : scalar or str, optional
+        The numerical value of scale will be divided out of the final
+        result. The default is 1.0. The string "normal" is also accepted,
+        and results in `scale` being the inverse of the standard normal
+        quantile function at 0.75, which is approximately 0.67449.
+        Array-like scale is also allowed, as long as it broadcasts correctly
+        to the output such that ``out / scale`` is a valid operation. The
+        output dimensions depend on the input array, `x`, and the `axis`
+        argument.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+        * 'propagate': returns nan
+        * 'raise': throws an error
+        * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    mad : scalar or ndarray
+        If ``axis=None``, a scalar is returned. If the input contains
+        integers or floats of smaller precision than ``np.float64``, then the
+        output data-type is ``np.float64``. Otherwise, the output data-type is
+        the same as that of the input.
+
+    See Also
+    --------
+    numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
+    scipy.stats.tstd, scipy.stats.tvar
+
+    Notes
+    -----
+    The `center` argument only affects the calculation of the central value
+    around which the MAD is calculated. That is, passing in ``center=np.mean``
+    will calculate the MAD around the mean - it will not calculate the *mean*
+    absolute deviation.
+
+    The input array may contain `inf`, but if `center` returns `inf`, the
+    corresponding MAD for that data will be `nan`.
+
+    References
+    ----------
+    .. [1] "Median absolute deviation",
+           https://en.wikipedia.org/wiki/Median_absolute_deviation
+    .. [2] "Robust measures of scale",
+           https://en.wikipedia.org/wiki/Robust_measures_of_scale
+
+    Examples
+    --------
+    When comparing the behavior of `median_abs_deviation` with ``np.std``,
+    the latter is affected when we change a single value of an array to have an
+    outlier value while the MAD hardly changes:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
+    >>> x.std()
+    0.9973906394005013
+    >>> stats.median_abs_deviation(x)
+    0.82832610097857
+    >>> x[0] = 345.6
+    >>> x.std()
+    34.42304872314415
+    >>> stats.median_abs_deviation(x)
+    0.8323442311590675
+
+    Axis handling example:
+
+    >>> x = np.array([[10, 7, 4], [3, 2, 1]])
+    >>> x
+    array([[10,  7,  4],
+           [ 3,  2,  1]])
+    >>> stats.median_abs_deviation(x)
+    array([3.5, 2.5, 1.5])
+    >>> stats.median_abs_deviation(x, axis=None)
+    2.0
+
+    Scale normal example:
+
+    >>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456)
+    >>> stats.median_abs_deviation(x)
+    1.3487398527041636
+    >>> stats.median_abs_deviation(x, scale='normal')
+    1.9996446978061115
+
+    """
+    if not callable(center):
+        raise TypeError("The argument 'center' must be callable. The given "
+                        f"value {repr(center)} is not callable.")
+
+    # An error may be raised here, so fail-fast, before doing lengthy
+    # computations, even though `scale` is not used until later
+    if isinstance(scale, str):
+        if scale.lower() == 'normal':
+            scale = 0.6744897501960817  # special.ndtri(0.75)
+        else:
+            raise ValueError(f"{scale} is not a valid scale value.")
+
+    x = asarray(x)
+
+    # Consistent with `np.var` and `np.std`.
+    if not x.size:
+        if axis is None:
+            return np.nan
+        nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis)
+        if nan_shape == ():
+            # Return nan, not array(nan)
+            return np.nan
+        return np.full(nan_shape, np.nan)
+
+    contains_nan, nan_policy = _contains_nan(x, nan_policy)
+
+    if contains_nan:
+        if axis is None:
+            mad = _mad_1d(x.ravel(), center, nan_policy)
+        else:
+            mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)
+    else:
+        if axis is None:
+            med = center(x, axis=None)
+            mad = np.median(np.abs(x - med))
+        else:
+            # Wrap the call to center() in expand_dims() so it acts like
+            # keepdims=True was used.
+            med = np.expand_dims(center(x, axis=axis), axis)
+            mad = np.median(np.abs(x - med), axis=axis)
+
+    return mad / scale
+
+
+#####################################
+#         TRIMMING FUNCTIONS        #
+#####################################
+
+
+SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
+
+
+def sigmaclip(a, low=4., high=4.):
+    """Perform iterative sigma-clipping of array elements.
+
+    Starting from the full sample, all elements outside the critical range are
+    removed, i.e. all elements of the input array `c` that satisfy either of
+    the following conditions::
+
+        c < mean(c) - std(c)*low
+        c > mean(c) + std(c)*high
+
+    The iteration continues with the updated sample until no
+    elements are outside the (updated) range.
+
+    Parameters
+    ----------
+    a : array_like
+        Data array, will be raveled if not 1-D.
+    low : float, optional
+        Lower bound factor of sigma clipping. Default is 4.
+    high : float, optional
+        Upper bound factor of sigma clipping. Default is 4.
+
+    Returns
+    -------
+    clipped : ndarray
+        Input array with clipped elements removed.
+    lower : float
+        Lower threshold value use for clipping.
+    upper : float
+        Upper threshold value use for clipping.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import sigmaclip
+    >>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
+    ...                     np.linspace(0, 20, 5)))
+    >>> fact = 1.5
+    >>> c, low, upp = sigmaclip(a, fact, fact)
+    >>> c
+    array([  9.96666667,  10.        ,  10.03333333,  10.        ])
+    >>> c.var(), c.std()
+    (0.00055555555555555165, 0.023570226039551501)
+    >>> low, c.mean() - fact*c.std(), c.min()
+    (9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
+    >>> upp, c.mean() + fact*c.std(), c.max()
+    (10.035355339059327, 10.035355339059327, 10.033333333333333)
+
+    >>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
+    ...                     np.linspace(-100, -50, 3)))
+    >>> c, low, upp = sigmaclip(a, 1.8, 1.8)
+    >>> (c == np.linspace(9.5, 10.5, 11)).all()
+    True
+
+    """
+    c = np.asarray(a).ravel()
+    delta = 1
+    while delta:
+        c_std = c.std()
+        c_mean = c.mean()
+        size = c.size
+        critlower = c_mean - c_std * low
+        critupper = c_mean + c_std * high
+        c = c[(c >= critlower) & (c <= critupper)]
+        delta = size - c.size
+
+    return SigmaclipResult(c, critlower, critupper)
+
+
+def trimboth(a, proportiontocut, axis=0):
+    """Slice off a proportion of items from both ends of an array.
+
+    Slice off the passed proportion of items from both ends of the passed
+    array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
+    rightmost 10% of scores). The trimmed values are the lowest and
+    highest ones.
+    Slice off less if proportion results in a non-integer slice index (i.e.
+    conservatively slices off `proportiontocut`).
+
+    Parameters
+    ----------
+    a : array_like
+        Data to trim.
+    proportiontocut : float
+        Proportion (in range 0-1) of total data set to trim of each end.
+    axis : int or None, optional
+        Axis along which to trim data. Default is 0. If None, compute over
+        the whole array `a`.
+
+    Returns
+    -------
+    out : ndarray
+        Trimmed version of array `a`. The order of the trimmed content
+        is undefined.
+
+    See Also
+    --------
+    trim_mean
+
+    Examples
+    --------
+    Create an array of 10 values and trim 10% of those values from each end:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+    >>> stats.trimboth(a, 0.1)
+    array([1, 3, 2, 4, 5, 6, 7, 8])
+
+    Note that the elements of the input array are trimmed by value, but the
+    output array is not necessarily sorted.
+
+    The proportion to trim is rounded down to the nearest integer. For
+    instance, trimming 25% of the values from each end of an array of 10
+    values will return an array of 6 values:
+
+    >>> b = np.arange(10)
+    >>> stats.trimboth(b, 1/4).shape
+    (6,)
+
+    Multidimensional arrays can be trimmed along any axis or across the entire
+    array:
+
+    >>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9]
+    >>> d = np.array([a, b, c])
+    >>> stats.trimboth(d, 0.4, axis=0).shape
+    (1, 10)
+    >>> stats.trimboth(d, 0.4, axis=1).shape
+    (3, 2)
+    >>> stats.trimboth(d, 0.4, axis=None).shape
+    (6,)
+
+    """
+    a = np.asarray(a)
+
+    if a.size == 0:
+        return a
+
+    if axis is None:
+        a = a.ravel()
+        axis = 0
+
+    nobs = a.shape[axis]
+    lowercut = int(proportiontocut * nobs)
+    uppercut = nobs - lowercut
+    if (lowercut >= uppercut):
+        raise ValueError("Proportion too big.")
+
+    atmp = np.partition(a, (lowercut, uppercut - 1), axis)
+
+    sl = [slice(None)] * atmp.ndim
+    sl[axis] = slice(lowercut, uppercut)
+    return atmp[tuple(sl)]
+
+
+def trim1(a, proportiontocut, tail='right', axis=0):
+    """Slice off a proportion from ONE end of the passed array distribution.
+
+    If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
+    10% of scores. The lowest or highest values are trimmed (depending on
+    the tail).
+    Slice off less if proportion results in a non-integer slice index
+    (i.e. conservatively slices off `proportiontocut` ).
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    proportiontocut : float
+        Fraction to cut off of 'left' or 'right' of distribution.
+    tail : {'left', 'right'}, optional
+        Defaults to 'right'.
+    axis : int or None, optional
+        Axis along which to trim data. Default is 0. If None, compute over
+        the whole array `a`.
+
+    Returns
+    -------
+    trim1 : ndarray
+        Trimmed version of array `a`. The order of the trimmed content is
+        undefined.
+
+    Examples
+    --------
+    Create an array of 10 values and trim 20% of its lowest values:
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+    >>> stats.trim1(a, 0.2, 'left')
+    array([2, 4, 3, 5, 6, 7, 8, 9])
+
+    Note that the elements of the input array are trimmed by value, but the
+    output array is not necessarily sorted.
+
+    The proportion to trim is rounded down to the nearest integer. For
+    instance, trimming 25% of the values from an array of 10 values will
+    return an array of 8 values:
+
+    >>> b = np.arange(10)
+    >>> stats.trim1(b, 1/4).shape
+    (8,)
+
+    Multidimensional arrays can be trimmed along any axis or across the entire
+    array:
+
+    >>> c = [2, 4, 6, 8, 0, 1, 3, 5, 7, 9]
+    >>> d = np.array([a, b, c])
+    >>> stats.trim1(d, 0.8, axis=0).shape
+    (1, 10)
+    >>> stats.trim1(d, 0.8, axis=1).shape
+    (3, 2)
+    >>> stats.trim1(d, 0.8, axis=None).shape
+    (6,)
+
+    """
+    a = np.asarray(a)
+    if axis is None:
+        a = a.ravel()
+        axis = 0
+
+    nobs = a.shape[axis]
+
+    # avoid possible corner case
+    if proportiontocut >= 1:
+        return []
+
+    if tail.lower() == 'right':
+        lowercut = 0
+        uppercut = nobs - int(proportiontocut * nobs)
+
+    elif tail.lower() == 'left':
+        lowercut = int(proportiontocut * nobs)
+        uppercut = nobs
+
+    atmp = np.partition(a, (lowercut, uppercut - 1), axis)
+
+    sl = [slice(None)] * atmp.ndim
+    sl[axis] = slice(lowercut, uppercut)
+    return atmp[tuple(sl)]
+
+
+def trim_mean(a, proportiontocut, axis=0):
+    """Return mean of array after trimming distribution from both tails.
+
+    If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
+    scores. The input is sorted before slicing. Slices off less if proportion
+    results in a non-integer slice index (i.e., conservatively slices off
+    `proportiontocut` ).
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    proportiontocut : float
+        Fraction to cut off of both tails of the distribution.
+    axis : int or None, optional
+        Axis along which the trimmed means are computed. Default is 0.
+        If None, compute over the whole array `a`.
+
+    Returns
+    -------
+    trim_mean : ndarray
+        Mean of trimmed array.
+
+    See Also
+    --------
+    trimboth
+    tmean : Compute the trimmed mean ignoring values outside given `limits`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = np.arange(20)
+    >>> stats.trim_mean(x, 0.1)
+    9.5
+    >>> x2 = x.reshape(5, 4)
+    >>> x2
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15],
+           [16, 17, 18, 19]])
+    >>> stats.trim_mean(x2, 0.25)
+    array([  8.,   9.,  10.,  11.])
+    >>> stats.trim_mean(x2, 0.25, axis=1)
+    array([  1.5,   5.5,   9.5,  13.5,  17.5])
+
+    """
+    a = np.asarray(a)
+
+    if a.size == 0:
+        return np.nan
+
+    if axis is None:
+        a = a.ravel()
+        axis = 0
+
+    nobs = a.shape[axis]
+    lowercut = int(proportiontocut * nobs)
+    uppercut = nobs - lowercut
+    if (lowercut > uppercut):
+        raise ValueError("Proportion too big.")
+
+    atmp = np.partition(a, (lowercut, uppercut - 1), axis)
+
+    sl = [slice(None)] * atmp.ndim
+    sl[axis] = slice(lowercut, uppercut)
+    return np.mean(atmp[tuple(sl)], axis=axis)
+
+
+F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
+
+
+def _create_f_oneway_nan_result(shape, axis):
+    """
+    This is a helper function for f_oneway for creating the return values
+    in certain degenerate conditions.  It creates return values that are
+    all nan with the appropriate shape for the given `shape` and `axis`.
+    """
+    axis = np.core.multiarray.normalize_axis_index(axis, len(shape))
+    shp = shape[:axis] + shape[axis+1:]
+    if shp == ():
+        f = np.nan
+        prob = np.nan
+    else:
+        f = np.full(shp, fill_value=np.nan)
+        prob = f.copy()
+    return F_onewayResult(f, prob)
+
+
+def _first(arr, axis):
+    """Return arr[..., 0:1, ...] where 0:1 is in the `axis` position."""
+    return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)
+
+
+def f_oneway(*samples, axis=0):
+    """Perform one-way ANOVA.
+
+    The one-way ANOVA tests the null hypothesis that two or more groups have
+    the same population mean.  The test is applied to samples from two or
+    more groups, possibly with differing sizes.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        The sample measurements for each group.  There must be at least
+        two arguments.  If the arrays are multidimensional, then all the
+        dimensions of the array must be the same except for `axis`.
+    axis : int, optional
+        Axis of the input arrays along which the test is applied.
+        Default is 0.
+
+    Returns
+    -------
+    statistic : float
+        The computed F statistic of the test.
+    pvalue : float
+        The associated p-value from the F distribution.
+
+    Warns
+    -----
+    `~scipy.stats.ConstantInputWarning`
+        Raised if all values within each of the input arrays are identical.
+        In this case the F statistic is either infinite or isn't defined,
+        so ``np.inf`` or ``np.nan`` is returned.
+
+    `~scipy.stats.DegenerateDataWarning`
+        Raised if the length of any input array is 0, or if all the input
+        arrays have length 1.  ``np.nan`` is returned for the F statistic
+        and the p-value in these cases.
+
+    Notes
+    -----
+    The ANOVA test has important assumptions that must be satisfied in order
+    for the associated p-value to be valid.
+
+    1. The samples are independent.
+    2. Each sample is from a normally distributed population.
+    3. The population standard deviations of the groups are all equal.  This
+       property is known as homoscedasticity.
+
+    If these assumptions are not true for a given set of data, it may still
+    be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or
+    the Alexander-Govern test (`scipy.stats.alexandergovern`) although with
+    some loss of power.
+
+    The length of each group must be at least one, and there must be at
+    least one group with length greater than one.  If these conditions
+    are not satisfied, a warning is generated and (``np.nan``, ``np.nan``)
+    is returned.
+
+    If all values in each group are identical, and there exist at least two
+    groups with different values, the function generates a warning and
+    returns (``np.inf``, 0).
+
+    If all values in all groups are the same, function generates a warning
+    and returns (``np.nan``, ``np.nan``).
+
+    The algorithm is from Heiman [2]_, pp.394-7.
+
+    References
+    ----------
+    .. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
+           Chapter 14, 2014, http://vassarstats.net/textbook/
+
+    .. [2] G.W. Heiman, "Understanding research methods and statistics: An
+           integrated introduction for psychology", Houghton, Mifflin and
+           Company, 2001.
+
+    .. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
+           http://www.biostathandbook.com/onewayanova.html
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import f_oneway
+
+    Here are some data [3]_ on a shell measurement (the length of the anterior
+    adductor muscle scar, standardized by dividing by length) in the mussel
+    Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
+    Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
+    much larger data set used in McDonald et al. (1991).
+
+    >>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
+    ...              0.0659, 0.0923, 0.0836]
+    >>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
+    ...            0.0725]
+    >>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
+    >>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
+    ...            0.0689]
+    >>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
+    >>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
+    F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544)
+
+    `f_oneway` accepts multidimensional input arrays.  When the inputs
+    are multidimensional and `axis` is not given, the test is performed
+    along the first axis of the input arrays.  For the following data, the
+    test is performed three times, once for each column.
+
+    >>> a = np.array([[9.87, 9.03, 6.81],
+    ...               [7.18, 8.35, 7.00],
+    ...               [8.39, 7.58, 7.68],
+    ...               [7.45, 6.33, 9.35],
+    ...               [6.41, 7.10, 9.33],
+    ...               [8.00, 8.24, 8.44]])
+    >>> b = np.array([[6.35, 7.30, 7.16],
+    ...               [6.65, 6.68, 7.63],
+    ...               [5.72, 7.73, 6.72],
+    ...               [7.01, 9.19, 7.41],
+    ...               [7.75, 7.87, 8.30],
+    ...               [6.90, 7.97, 6.97]])
+    >>> c = np.array([[3.31, 8.77, 1.01],
+    ...               [8.25, 3.24, 3.62],
+    ...               [6.32, 8.81, 5.19],
+    ...               [7.48, 8.83, 8.91],
+    ...               [8.59, 6.01, 6.07],
+    ...               [3.07, 9.72, 7.48]])
+    >>> F, p = f_oneway(a, b, c)
+    >>> F
+    array([1.75676344, 0.03701228, 3.76439349])
+    >>> p
+    array([0.20630784, 0.96375203, 0.04733157])
+
+    """
+    if len(samples) < 2:
+        raise TypeError('at least two inputs are required;'
+                        f' got {len(samples)}.')
+
+    samples = [np.asarray(sample, dtype=float) for sample in samples]
+
+    # ANOVA on N groups, each in its own array
+    num_groups = len(samples)
+
+    # We haven't explicitly validated axis, but if it is bad, this call of
+    # np.concatenate will raise np.AxisError.  The call will raise ValueError
+    # if the dimensions of all the arrays, except the axis dimension, are not
+    # the same.
+    alldata = np.concatenate(samples, axis=axis)
+    bign = alldata.shape[axis]
+
+    # Check this after forming alldata, so shape errors are detected
+    # and reported before checking for 0 length inputs.
+    if any(sample.shape[axis] == 0 for sample in samples):
+        warnings.warn(stats.DegenerateDataWarning('at least one input '
+                                                  'has length 0'))
+        return _create_f_oneway_nan_result(alldata.shape, axis)
+
+    # Must have at least one group with length greater than 1.
+    if all(sample.shape[axis] == 1 for sample in samples):
+        msg = ('all input arrays have length 1.  f_oneway requires that at '
+               'least one input has length greater than 1.')
+        warnings.warn(stats.DegenerateDataWarning(msg))
+        return _create_f_oneway_nan_result(alldata.shape, axis)
+
+    # Check if all values within each group are identical, and if the common
+    # value in at least one group is different from that in another group.
+    # Based on https://github.com/scipy/scipy/issues/11669
+
+    # If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ...,
+    # then is_const is a boolean array with shape (num_groups, ...).
+    # It is True if the values within the groups along the axis slice are
+    # identical. In the typical case where each input array is 1-d, is_const is
+    # a 1-d array with length num_groups.
+    is_const = np.concatenate(
+        [(_first(sample, axis) == sample).all(axis=axis,
+                                              keepdims=True)
+         for sample in samples],
+        axis=axis
+    )
+
+    # all_const is a boolean array with shape (...) (see previous comment).
+    # It is True if the values within each group along the axis slice are
+    # the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]).
+    all_const = is_const.all(axis=axis)
+    if all_const.any():
+        msg = ("Each of the input arrays is constant;"
+               "the F statistic is not defined or infinite")
+        warnings.warn(stats.ConstantInputWarning(msg))
+
+    # all_same_const is True if all the values in the groups along the axis=0
+    # slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
+    all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
+
+    # Determine the mean of the data, and subtract that from all inputs to a
+    # variance (via sum_of_sq / sq_of_sum) calculation.  Variance is invariant
+    # to a shift in location, and centering all data around zero vastly
+    # improves numerical stability.
+    offset = alldata.mean(axis=axis, keepdims=True)
+    alldata -= offset
+
+    normalized_ss = _square_of_sums(alldata, axis=axis) / bign
+
+    sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
+
+    ssbn = 0
+    for sample in samples:
+        ssbn += _square_of_sums(sample - offset,
+                                axis=axis) / sample.shape[axis]
+
+    # Naming: variables ending in bn/b are for "between treatments", wn/w are
+    # for "within treatments"
+    ssbn -= normalized_ss
+    sswn = sstot - ssbn
+    dfbn = num_groups - 1
+    dfwn = bign - num_groups
+    msb = ssbn / dfbn
+    msw = sswn / dfwn
+    with np.errstate(divide='ignore', invalid='ignore'):
+        f = msb / msw
+
+    prob = special.fdtrc(dfbn, dfwn, f)   # equivalent to stats.f.sf
+
+    # Fix any f values that should be inf or nan because the corresponding
+    # inputs were constant.
+    if np.isscalar(f):
+        if all_same_const:
+            f = np.nan
+            prob = np.nan
+        elif all_const:
+            f = np.inf
+            prob = 0.0
+    else:
+        f[all_const] = np.inf
+        prob[all_const] = 0.0
+        f[all_same_const] = np.nan
+        prob[all_same_const] = np.nan
+
+    return F_onewayResult(f, prob)
+
+
+def alexandergovern(*samples, nan_policy='propagate'):
+    """Performs the Alexander Govern test.
+
+    The Alexander-Govern approximation tests the equality of k independent
+    means in the face of heterogeneity of variance. The test is applied to
+    samples from two or more groups, possibly with differing sizes.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+        The sample measurements for each group.  There must be at least
+        two samples.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+        * 'propagate': returns nan
+        * 'raise': throws an error
+        * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    statistic : float
+        The computed A statistic of the test.
+    pvalue : float
+        The associated p-value from the chi-squared distribution.
+
+    Warns
+    -----
+    `~scipy.stats.ConstantInputWarning`
+        Raised if an input is a constant array.  The statistic is not defined
+        in this case, so ``np.nan`` is returned.
+
+    See Also
+    --------
+    f_oneway : one-way ANOVA
+
+    Notes
+    -----
+    The use of this test relies on several assumptions.
+
+    1. The samples are independent.
+    2. Each sample is from a normally distributed population.
+    3. Unlike `f_oneway`, this test does not assume on homoscedasticity,
+       instead relaxing the assumption of equal variances.
+
+    Input samples must be finite, one dimensional, and with size greater than
+    one.
+
+    References
+    ----------
+    .. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler
+           Approximation for ANOVA under Variance Heterogeneity." Journal
+           of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101.
+           JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020.
+
+    Examples
+    --------
+    >>> from scipy.stats import alexandergovern
+
+    Here are some data on annual percentage rate of interest charged on
+    new car loans at nine of the largest banks in four American cities
+    taken from the National Institute of Standards and Technology's
+    ANOVA dataset.
+
+    We use `alexandergovern` to test the null hypothesis that all cities
+    have the same mean APR against the alternative that the cities do not
+    all have the same mean APR. We decide that a significance level of 5%
+    is required to reject the null hypothesis in favor of the alternative.
+
+    >>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5]
+    >>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9]
+    >>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5]
+    >>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25,
+    ...           11.89]
+    >>> alexandergovern(atlanta, chicago, houston, memphis)
+    AlexanderGovernResult(statistic=4.65087071883494,
+                          pvalue=0.19922132490385214)
+
+    The p-value is 0.1992, indicating a nearly 20% chance of observing
+    such an extreme value of the test statistic under the null hypothesis.
+    This exceeds 5%, so we do not reject the null hypothesis in favor of
+    the alternative.
+
+    """
+    samples = _alexandergovern_input_validation(samples, nan_policy)
+
+    if np.any([(sample == sample[0]).all() for sample in samples]):
+        msg = "An input array is constant; the statistic is not defined."
+        warnings.warn(stats.ConstantInputWarning(msg))
+        return AlexanderGovernResult(np.nan, np.nan)
+
+    # The following formula numbers reference the equation described on
+    # page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other
+    # tests that serve as the basis for equation (8) but are not needed
+    # to perform the test.
+
+    # precalculate mean and length of each sample
+    lengths = np.array([ma.count(sample) if nan_policy == 'omit'
+                        else len(sample) for sample in samples])
+    means = np.array([np.mean(sample) for sample in samples])
+
+    # (1) determine standard error of the mean for each sample
+    standard_errors = [np.std(sample, ddof=1) / np.sqrt(length)
+                       for sample, length in zip(samples, lengths)]
+
+    # (2) define a weight for each sample
+    inv_sq_se = 1 / np.square(standard_errors)
+    weights = inv_sq_se / np.sum(inv_sq_se)
+
+    # (3) determine variance-weighted estimate of the common mean
+    var_w = np.sum(weights * means)
+
+    # (4) determine one-sample t statistic for each group
+    t_stats = (means - var_w)/standard_errors
+
+    # calculate parameters to be used in transformation
+    v = lengths - 1
+    a = v - .5
+    b = 48 * a**2
+    c = (a * np.log(1 + (t_stats ** 2)/v))**.5
+
+    # (8) perform a normalizing transformation on t statistic
+    z = (c + ((c**3 + 3*c)/b) -
+         ((4*c**7 + 33*c**5 + 240*c**3 + 855*c) /
+          (b**2*10 + 8*b*c**4 + 1000*b)))
+
+    # (9) calculate statistic
+    A = np.sum(np.square(z))
+
+    # "[the p value is determined from] central chi-square random deviates
+    # with k - 1 degrees of freedom". Alexander, Govern (94)
+    p = distributions.chi2.sf(A, len(samples) - 1)
+    return AlexanderGovernResult(A, p)
+
+
+def _alexandergovern_input_validation(samples, nan_policy):
+    if len(samples) < 2:
+        raise TypeError(f"2 or more inputs required, got {len(samples)}")
+
+    # input arrays are flattened
+    samples = [np.asarray(sample, dtype=float) for sample in samples]
+
+    for i, sample in enumerate(samples):
+        if np.size(sample) <= 1:
+            raise ValueError("Input sample size must be greater than one.")
+        if sample.ndim != 1:
+            raise ValueError("Input samples must be one-dimensional")
+        if np.isinf(sample).any():
+            raise ValueError("Input samples must be finite.")
+
+        contains_nan, nan_policy = _contains_nan(sample,
+                                                 nan_policy=nan_policy)
+        if contains_nan and nan_policy == 'omit':
+            samples[i] = ma.masked_invalid(sample)
+    return samples
+
+
+AlexanderGovernResult = make_dataclass("AlexanderGovernResult", ("statistic",
+                                                                 "pvalue"))
+
+
+def _pearsonr_fisher_ci(r, n, confidence_level, alternative):
+    """
+    Compute the confidence interval for Pearson's R.
+
+    Fisher's transformation is used to compute the confidence interval
+    (https://en.wikipedia.org/wiki/Fisher_transformation).
+    """
+    if r == 1:
+        zr = np.inf
+    elif r == -1:
+        zr = -np.inf
+    else:
+        zr = np.arctanh(r)
+
+    if n > 3:
+        se = np.sqrt(1 / (n - 3))
+        if alternative == "two-sided":
+            h = special.ndtri(0.5 + confidence_level/2)
+            zlo = zr - h*se
+            zhi = zr + h*se
+            rlo = np.tanh(zlo)
+            rhi = np.tanh(zhi)
+        elif alternative == "less":
+            h = special.ndtri(confidence_level)
+            zhi = zr + h*se
+            rhi = np.tanh(zhi)
+            rlo = -1.0
+        else:
+            # alternative == "greater":
+            h = special.ndtri(confidence_level)
+            zlo = zr - h*se
+            rlo = np.tanh(zlo)
+            rhi = 1.0
+    else:
+        rlo, rhi = -1.0, 1.0
+
+    return ConfidenceInterval(low=rlo, high=rhi)
+
+
+ConfidenceInterval = namedtuple('ConfidenceInterval', ['low', 'high'])
+
+PearsonRResultBase = _make_tuple_bunch('PearsonRResultBase',
+                                       ['statistic', 'pvalue'], [])
+
+
+class PearsonRResult(PearsonRResultBase):
+    """
+    Result of `scipy.stats.pearsonr`
+
+    Attributes
+    ----------
+    statistic : float
+        Pearson product-moment correlation coefficient.
+    pvalue : float
+        The p-value associated with the chosen alternative.
+
+    Methods
+    -------
+    confidence_interval
+        Computes the confidence interval of the correlation
+        coefficient `statistic` for the given confidence level.
+
+    """
+    def __init__(self, statistic, pvalue, alternative, n):
+        super().__init__(statistic, pvalue)
+        self._alternative = alternative
+        self._n = n
+
+        # add alias for consistency with other correlation functions
+        self.correlation = statistic
+
+    def confidence_interval(self, confidence_level=0.95):
+        """
+        The confidence interval for the correlation coefficient.
+
+        Compute the confidence interval for the correlation coefficient
+        ``statistic`` with the given confidence level.
+
+        The confidence interval is computed using the Fisher transformation
+        F(r) = arctanh(r) [1]_.  When the sample pairs are drawn from a
+        bivariate normal distribution, F(r) approximately follows a normal
+        distribution with standard error ``1/sqrt(n - 3)``, where ``n`` is the
+        length of the original samples along the calculation axis. When
+        ``n <= 3``, this approximation does not yield a finite, real standard
+        error, so we define the confidence interval to be -1 to 1.
+
+        Parameters
+        ----------
+        confidence_level : float
+            The confidence level for the calculation of the correlation
+            coefficient confidence interval. Default is 0.95.
+
+        Returns
+        -------
+        ci : namedtuple
+            The confidence interval is returned in a ``namedtuple`` with
+            fields `low` and `high`.
+
+        References
+        ----------
+        .. [1] "Pearson correlation coefficient", Wikipedia,
+               https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
+        """
+        return _pearsonr_fisher_ci(self.statistic, self._n, confidence_level,
+                                   self._alternative)
+
+
+def pearsonr(x, y, *, alternative='two-sided'):
+    r"""
+    Pearson correlation coefficient and p-value for testing non-correlation.
+
+    The Pearson correlation coefficient [1]_ measures the linear relationship
+    between two datasets. Like other correlation
+    coefficients, this one varies between -1 and +1 with 0 implying no
+    correlation. Correlations of -1 or +1 imply an exact linear relationship.
+    Positive correlations imply that as x increases, so does y. Negative
+    correlations imply that as x increases, y decreases.
+
+    This function also performs a test of the null hypothesis that the
+    distributions underlying the samples are uncorrelated and normally
+    distributed. (See Kowalski [3]_
+    for a discussion of the effects of non-normality of the input on the
+    distribution of the correlation coefficient.)
+    The p-value roughly indicates the probability of an uncorrelated system
+    producing datasets that have a Pearson correlation at least as extreme
+    as the one computed from these datasets.
+
+    Parameters
+    ----------
+    x : (N,) array_like
+        Input array.
+    y : (N,) array_like
+        Input array.
+    alternative : {'two-sided', 'greater', 'less'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the correlation is nonzero
+        * 'less': the correlation is negative (less than zero)
+        * 'greater':  the correlation is positive (greater than zero)
+
+        .. versionadded:: 1.9.0
+
+    Returns
+    -------
+    result : `~scipy.stats._result_classes.PearsonRResult`
+        An object with the following attributes:
+
+        statistic : float
+            Pearson product-moment correlation coefficient.
+        pvalue : float
+            The p-value associated with the chosen alternative.
+
+        The object has the following method:
+
+        confidence_interval(confidence_level=0.95)
+            This method computes the confidence interval of the correlation
+            coefficient `statistic` for the given confidence level.
+            The confidence interval is returned in a ``namedtuple`` with
+            fields `low` and `high`.  See the Notes for more details.
+
+    Warns
+    -----
+    `~scipy.stats.ConstantInputWarning`
+        Raised if an input is a constant array.  The correlation coefficient
+        is not defined in this case, so ``np.nan`` is returned.
+
+    `~scipy.stats.NearConstantInputWarning`
+        Raised if an input is "nearly" constant.  The array ``x`` is considered
+        nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
+        Numerical errors in the calculation ``x - mean(x)`` in this case might
+        result in an inaccurate calculation of r.
+
+    See Also
+    --------
+    spearmanr : Spearman rank-order correlation coefficient.
+    kendalltau : Kendall's tau, a correlation measure for ordinal data.
+
+    Notes
+    -----
+    The correlation coefficient is calculated as follows:
+
+    .. math::
+
+        r = \frac{\sum (x - m_x) (y - m_y)}
+                 {\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
+
+    where :math:`m_x` is the mean of the vector x and :math:`m_y` is
+    the mean of the vector y.
+
+    Under the assumption that x and y are drawn from
+    independent normal distributions (so the population correlation coefficient
+    is 0), the probability density function of the sample correlation
+    coefficient r is ([1]_, [2]_):
+
+    .. math::
+        f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
+
+    where n is the number of samples, and B is the beta function.  This
+    is sometimes referred to as the exact distribution of r.  This is
+    the distribution that is used in `pearsonr` to compute the p-value.
+    The distribution is a beta distribution on the interval [-1, 1],
+    with equal shape parameters a = b = n/2 - 1.  In terms of SciPy's
+    implementation of the beta distribution, the distribution of r is::
+
+        dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
+
+    The default p-value returned by `pearsonr` is a two-sided p-value. For a
+    given sample with correlation coefficient r, the p-value is
+    the probability that abs(r') of a random sample x' and y' drawn from
+    the population with zero correlation would be greater than or equal
+    to abs(r). In terms of the object ``dist`` shown above, the p-value
+    for a given r and length n can be computed as::
+
+        p = 2*dist.cdf(-abs(r))
+
+    When n is 2, the above continuous distribution is not well-defined.
+    One can interpret the limit of the beta distribution as the shape
+    parameters a and b approach a = b = 0 as a discrete distribution with
+    equal probability masses at r = 1 and r = -1.  More directly, one
+    can observe that, given the data x = [x1, x2] and y = [y1, y2], and
+    assuming x1 != x2 and y1 != y2, the only possible values for r are 1
+    and -1.  Because abs(r') for any sample x' and y' with length 2 will
+    be 1, the two-sided p-value for a sample of length 2 is always 1.
+
+    For backwards compatibility, the object that is returned also behaves
+    like a tuple of length two that holds the statistic and the p-value.
+
+    References
+    ----------
+    .. [1] "Pearson correlation coefficient", Wikipedia,
+           https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
+    .. [2] Student, "Probable error of a correlation coefficient",
+           Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
+    .. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
+           of the Sample Product-Moment Correlation Coefficient"
+           Journal of the Royal Statistical Society. Series C (Applied
+           Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> res = stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
+    >>> res
+    PearsonRResult(statistic=-0.7426106572325056, pvalue=0.15055580885344558)
+    >>> res.confidence_interval()
+    ConfidenceInterval(low=-0.9816918044786463, high=0.40501116769030976)
+
+    There is a linear dependence between x and y if y = a + b*x + e, where
+    a,b are constants and e is a random error term, assumed to be independent
+    of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
+    e follow a normal distribution with mean zero and standard deviation s>0.
+
+    >>> rng = np.random.default_rng()
+    >>> s = 0.5
+    >>> x = stats.norm.rvs(size=500, random_state=rng)
+    >>> e = stats.norm.rvs(scale=s, size=500, random_state=rng)
+    >>> y = x + e
+    >>> stats.pearsonr(x, y).statistic
+    0.9001942438244763
+
+    This should be close to the exact value given by
+
+    >>> 1/np.sqrt(1 + s**2)
+    0.8944271909999159
+
+    For s=0.5, we observe a high level of correlation. In general, a large
+    variance of the noise reduces the correlation, while the correlation
+    approaches one as the variance of the error goes to zero.
+
+    It is important to keep in mind that no correlation does not imply
+    independence unless (x, y) is jointly normal. Correlation can even be zero
+    when there is a very simple dependence structure: if X follows a
+    standard normal distribution, let y = abs(x). Note that the correlation
+    between x and y is zero. Indeed, since the expectation of x is zero,
+    cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
+    by symmetry. The following lines of code illustrate this observation:
+
+    >>> y = np.abs(x)
+    >>> stats.pearsonr(x, y)
+    PearsonRResult(statistic=-0.05444919272687482, pvalue=0.22422294836207743)
+
+    A non-zero correlation coefficient can be misleading. For example, if X has
+    a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
+    A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
+    implying a high level of correlation:
+
+    >>> y = np.where(x < 0, x, 0)
+    >>> stats.pearsonr(x, y)
+    PearsonRResult(statistic=0.861985781588, pvalue=4.813432002751103e-149)
+
+    This is unintuitive since there is no dependence of x and y if x is larger
+    than zero which happens in about half of the cases if we sample x and y.
+
+    """
+    n = len(x)
+    if n != len(y):
+        raise ValueError('x and y must have the same length.')
+
+    if n < 2:
+        raise ValueError('x and y must have length at least 2.')
+
+    x = np.asarray(x)
+    y = np.asarray(y)
+
+    if (np.issubdtype(x.dtype, np.complexfloating)
+            or np.issubdtype(y.dtype, np.complexfloating)):
+        raise ValueError('This function does not support complex data')
+
+    # If an input is constant, the correlation coefficient is not defined.
+    if (x == x[0]).all() or (y == y[0]).all():
+        msg = ("An input array is constant; the correlation coefficient "
+               "is not defined.")
+        warnings.warn(stats.ConstantInputWarning(msg))
+        result = PearsonRResult(statistic=np.nan, pvalue=np.nan, n=n,
+                                alternative=alternative)
+        return result
+
+    # dtype is the data type for the calculations.  This expression ensures
+    # that the data type is at least 64 bit floating point.  It might have
+    # more precision if the input is, for example, np.longdouble.
+    dtype = type(1.0 + x[0] + y[0])
+
+    if n == 2:
+        r = dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0]))
+        result = PearsonRResult(statistic=r, pvalue=1.0, n=n,
+                                alternative=alternative)
+        return result
+
+    xmean = x.mean(dtype=dtype)
+    ymean = y.mean(dtype=dtype)
+
+    # By using `astype(dtype)`, we ensure that the intermediate calculations
+    # use at least 64 bit floating point.
+    xm = x.astype(dtype) - xmean
+    ym = y.astype(dtype) - ymean
+
+    # Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
+    # scipy.linalg.norm(xm) does not overflow if xm is, for example,
+    # [-5e210, 5e210, 3e200, -3e200]
+    normxm = linalg.norm(xm)
+    normym = linalg.norm(ym)
+
+    threshold = 1e-13
+    if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
+        # If all the values in x (likewise y) are very close to the mean,
+        # the loss of precision that occurs in the subtraction xm = x - xmean
+        # might result in large errors in r.
+        msg = ("An input array is nearly constant; the computed "
+               "correlation coefficient may be inaccurate.")
+        warnings.warn(stats.NearConstantInputWarning(msg))
+
+    r = np.dot(xm/normxm, ym/normym)
+
+    # Presumably, if abs(r) > 1, then it is only some small artifact of
+    # floating point arithmetic.
+    r = max(min(r, 1.0), -1.0)
+
+    # As explained in the docstring, the distribution of `r` under the null
+    # hypothesis is the beta distribution on (-1, 1) with a = b = n/2 - 1.
+    ab = n/2 - 1
+    dist = stats.beta(ab, ab, loc=-1, scale=2)
+    if alternative == 'two-sided':
+        prob = 2*dist.sf(abs(r))
+    elif alternative == 'less':
+        prob = dist.cdf(r)
+    elif alternative == 'greater':
+        prob = dist.sf(r)
+    else:
+        raise ValueError('alternative must be one of '
+                         '["two-sided", "less", "greater"]')
+
+    return PearsonRResult(statistic=r, pvalue=prob, n=n,
+                          alternative=alternative)
+
+
+def fisher_exact(table, alternative='two-sided'):
+    """Perform a Fisher exact test on a 2x2 contingency table.
+
+    The null hypothesis is that the true odds ratio of the populations
+    underlying the observations is one, and the observations were sampled
+    from these populations under a condition: the marginals of the
+    resulting table must equal those of the observed table. The statistic
+    returned is the unconditional maximum likelihood estimate of the odds
+    ratio, and the p-value is the probability under the null hypothesis of
+    obtaining a table at least as extreme as the one that was actually
+    observed. There are other possible choices of statistic and two-sided
+    p-value definition associated with Fisher's exact test; please see the
+    Notes for more information.
+
+    Parameters
+    ----------
+    table : array_like of ints
+        A 2x2 contingency table.  Elements must be non-negative integers.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the odds ratio of the underlying population is not one
+        * 'less': the odds ratio of the underlying population is less than one
+        * 'greater': the odds ratio of the underlying population is greater
+          than one
+
+        See the Notes for more details.
+
+    Returns
+    -------
+    res : SignificanceResult
+        An object containing attributes:
+
+        statistic : float
+            This is the prior odds ratio, not a posterior estimate.
+        pvalue : float
+            The probability under the null hypothesis of obtaining a
+            table at least as extreme as the one that was actually observed.
+
+    See Also
+    --------
+    chi2_contingency : Chi-square test of independence of variables in a
+        contingency table.  This can be used as an alternative to
+        `fisher_exact` when the numbers in the table are large.
+    contingency.odds_ratio : Compute the odds ratio (sample or conditional
+        MLE) for a 2x2 contingency table.
+    barnard_exact : Barnard's exact test, which is a more powerful alternative
+        than Fisher's exact test for 2x2 contingency tables.
+    boschloo_exact : Boschloo's exact test, which is a more powerful alternative
+        than Fisher's exact test for 2x2 contingency tables.
+
+    Notes
+    -----
+    *Null hypothesis and p-values*
+
+    The null hypothesis is that the true odds ratio of the populations
+    underlying the observations is one, and the observations were sampled at
+    random from these populations under a condition: the marginals of the
+    resulting table must equal those of the observed table. Equivalently,
+    the null hypothesis is that the input table is from the hypergeometric
+    distribution with parameters (as used in `hypergeom`)
+    ``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the
+    input table is ``[[a, b], [c, d]]``.  This distribution has support
+    ``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values
+    in the input table, ``min(0, a - d) <= x <= a + min(b, c)``.  ``x``
+    can be interpreted as the upper-left element of a 2x2 table, so the
+    tables in the distribution have form::
+
+        [  x           n - x     ]
+        [N - x    M - (n + N) + x]
+
+    For example, if::
+
+        table = [6  2]
+                [1  4]
+
+    then the support is ``2 <= x <= 7``, and the tables in the distribution
+    are::
+
+        [2 6]   [3 5]   [4 4]   [5 3]   [6 2]  [7 1]
+        [5 0]   [4 1]   [3 2]   [2 3]   [1 4]  [0 5]
+
+    The probability of each table is given by the hypergeometric distribution
+    ``hypergeom.pmf(x, M, n, N)``.  For this example, these are (rounded to
+    three significant digits)::
+
+        x       2      3      4      5       6        7
+        p  0.0163  0.163  0.408  0.326  0.0816  0.00466
+
+    These can be computed with::
+
+        >>> import numpy as np
+        >>> from scipy.stats import hypergeom
+        >>> table = np.array([[6, 2], [1, 4]])
+        >>> M = table.sum()
+        >>> n = table[0].sum()
+        >>> N = table[:, 0].sum()
+        >>> start, end = hypergeom.support(M, n, N)
+        >>> hypergeom.pmf(np.arange(start, end+1), M, n, N)
+        array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,
+               0.004662  ])
+
+    The two-sided p-value is the probability that, under the null hypothesis,
+    a random table would have a probability equal to or less than the
+    probability of the input table.  For our example, the probability of
+    the input table (where ``x = 6``) is 0.0816.  The x values where the
+    probability does not exceed this are 2, 6 and 7, so the two-sided p-value
+    is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::
+
+        >>> from scipy.stats import fisher_exact
+        >>> res = fisher_exact(table, alternative='two-sided')
+        >>> res.pvalue
+        0.10256410256410257
+
+    The one-sided p-value for ``alternative='greater'`` is the probability
+    that a random table has ``x >= a``, which in our example is ``x >= 6``,
+    or ``0.0816 + 0.00466 ~= 0.08626``::
+
+        >>> res = fisher_exact(table, alternative='greater')
+        >>> res.pvalue
+        0.08624708624708627
+
+    This is equivalent to computing the survival function of the
+    distribution at ``x = 5`` (one less than ``x`` from the input table,
+    because we want to include the probability of ``x = 6`` in the sum)::
+
+        >>> hypergeom.sf(5, M, n, N)
+        0.08624708624708627
+
+    For ``alternative='less'``, the one-sided p-value is the probability
+    that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),
+    or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::
+
+        >>> res = fisher_exact(table, alternative='less')
+        >>> res.pvalue
+        0.9953379953379957
+
+    This is equivalent to computing the cumulative distribution function
+    of the distribution at ``x = 6``:
+
+        >>> hypergeom.cdf(6, M, n, N)
+        0.9953379953379957
+
+    *Odds ratio*
+
+    The calculated odds ratio is different from the value computed by the
+    R function ``fisher.test``.  This implementation returns the "sample"
+    or "unconditional" maximum likelihood estimate, while ``fisher.test``
+    in R uses the conditional maximum likelihood estimate.  To compute the
+    conditional maximum likelihood estimate of the odds ratio, use
+    `scipy.stats.contingency.odds_ratio`.
+
+    Examples
+    --------
+    Say we spend a few days counting whales and sharks in the Atlantic and
+    Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
+    Indian ocean 2 whales and 5 sharks. Then our contingency table is::
+
+                Atlantic  Indian
+        whales     8        2
+        sharks     1        5
+
+    We use this table to find the p-value:
+
+    >>> from scipy.stats import fisher_exact
+    >>> res = fisher_exact([[8, 2], [1, 5]])
+    >>> res.pvalue
+    0.0349...
+
+    The probability that we would observe this or an even more imbalanced ratio
+    by chance is about 3.5%.  A commonly used significance level is 5%--if we
+    adopt that, we can therefore conclude that our observed imbalance is
+    statistically significant; whales prefer the Atlantic while sharks prefer
+    the Indian ocean.
+
+    """
+    hypergeom = distributions.hypergeom
+    # int32 is not enough for the algorithm
+    c = np.asarray(table, dtype=np.int64)
+    if not c.shape == (2, 2):
+        raise ValueError("The input `table` must be of shape (2, 2).")
+
+    if np.any(c < 0):
+        raise ValueError("All values in `table` must be nonnegative.")
+
+    if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
+        # If both values in a row or column are zero, the p-value is 1 and
+        # the odds ratio is NaN.
+        return SignificanceResult(np.nan, 1.0)
+
+    if c[1, 0] > 0 and c[0, 1] > 0:
+        oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
+    else:
+        oddsratio = np.inf
+
+    n1 = c[0, 0] + c[0, 1]
+    n2 = c[1, 0] + c[1, 1]
+    n = c[0, 0] + c[1, 0]
+
+    def pmf(x):
+        return hypergeom.pmf(x, n1 + n2, n1, n)
+
+    if alternative == 'less':
+        pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
+    elif alternative == 'greater':
+        # Same formula as the 'less' case, but with the second column.
+        pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
+    elif alternative == 'two-sided':
+        mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
+        pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
+        pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
+
+        epsilon = 1e-14
+        gamma = 1 + epsilon
+
+        if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= epsilon:
+            return SignificanceResult(oddsratio, 1.)
+
+        elif c[0, 0] < mode:
+            plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
+            if hypergeom.pmf(n, n1 + n2, n1, n) > pexact * gamma:
+                return SignificanceResult(oddsratio, plower)
+
+            guess = _binary_search(lambda x: -pmf(x), -pexact * gamma, mode, n)
+            pvalue = plower + hypergeom.sf(guess, n1 + n2, n1, n)
+        else:
+            pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
+            if hypergeom.pmf(0, n1 + n2, n1, n) > pexact * gamma:
+                return SignificanceResult(oddsratio, pupper)
+
+            guess = _binary_search(pmf, pexact * gamma, 0, mode)
+            pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
+    else:
+        msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
+        raise ValueError(msg)
+
+    pvalue = min(pvalue, 1.0)
+
+    return SignificanceResult(oddsratio, pvalue)
+
+
+def spearmanr(a, b=None, axis=0, nan_policy='propagate',
+              alternative='two-sided'):
+    """Calculate a Spearman correlation coefficient with associated p-value.
+
+    The Spearman rank-order correlation coefficient is a nonparametric measure
+    of the monotonicity of the relationship between two datasets.
+    Like other correlation coefficients,
+    this one varies between -1 and +1 with 0 implying no correlation.
+    Correlations of -1 or +1 imply an exact monotonic relationship. Positive
+    correlations imply that as x increases, so does y. Negative correlations
+    imply that as x increases, y decreases.
+
+    The p-value roughly indicates the probability of an uncorrelated system
+    producing datasets that have a Spearman correlation at least as extreme
+    as the one computed from these datasets. Although calculation of the
+    p-value does not make strong assumptions about the distributions underlying
+    the samples, it is only accurate for very large samples (>500
+    observations). For smaller sample sizes, consider a permutation test (see
+    Examples section below).
+
+    Parameters
+    ----------
+    a, b : 1D or 2D array_like, b is optional
+        One or two 1-D or 2-D arrays containing multiple variables and
+        observations. When these are 1-D, each represents a vector of
+        observations of a single variable. For the behavior in the 2-D case,
+        see under ``axis``, below.
+        Both arrays need to have the same length in the ``axis`` dimension.
+    axis : int or None, optional
+        If axis=0 (default), then each column represents a variable, with
+        observations in the rows. If axis=1, the relationship is transposed:
+        each row represents a variable, while the columns contain observations.
+        If axis=None, then both arrays will be raveled.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+        * 'propagate': returns nan
+        * 'raise': throws an error
+        * 'omit': performs the calculations ignoring nan values
+
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the correlation is nonzero
+        * 'less': the correlation is negative (less than zero)
+        * 'greater':  the correlation is positive (greater than zero)
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    res : SignificanceResult
+        An object containing attributes:
+
+        statistic : float or ndarray (2-D square)
+            Spearman correlation matrix or correlation coefficient (if only 2
+            variables are given as parameters). Correlation matrix is square
+            with length equal to total number of variables (columns or rows) in
+            ``a`` and ``b`` combined.
+        pvalue : float
+            The p-value for a hypothesis test whose null hypothesis
+            is that two sets of data are linearly uncorrelated. See
+            `alternative` above for alternative hypotheses. `pvalue` has the
+            same shape as `statistic`.
+
+    Warns
+    -----
+    `~scipy.stats.ConstantInputWarning`
+        Raised if an input is a constant array.  The correlation coefficient
+        is not defined in this case, so ``np.nan`` is returned.
+
+    References
+    ----------
+    .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
+       Probability and Statistics Tables and Formulae. Chapman & Hall: New
+       York. 2000.
+       Section  14.7
+    .. [2] Kendall, M. G. and Stuart, A. (1973).
+       The Advanced Theory of Statistics, Volume 2: Inference and Relationship.
+       Griffin. 1973.
+       Section 31.18
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> res = stats.spearmanr([1, 2, 3, 4, 5], [5, 6, 7, 8, 7])
+    >>> res.statistic
+    0.8207826816681233
+    >>> res.pvalue
+    0.08858700531354381
+    >>> rng = np.random.default_rng()
+    >>> x2n = rng.standard_normal((100, 2))
+    >>> y2n = rng.standard_normal((100, 2))
+    >>> res = stats.spearmanr(x2n)
+    >>> res.statistic, res.pvalue
+    (-0.07960396039603959, 0.4311168705769747)
+    >>> res = stats.spearmanr(x2n[:, 0], x2n[:, 1])
+    >>> res.statistic, res.pvalue
+    (-0.07960396039603959, 0.4311168705769747)
+    >>> res = stats.spearmanr(x2n, y2n)
+    >>> res.statistic
+    array([[ 1.        , -0.07960396, -0.08314431,  0.09662166],
+           [-0.07960396,  1.        , -0.14448245,  0.16738074],
+           [-0.08314431, -0.14448245,  1.        ,  0.03234323],
+           [ 0.09662166,  0.16738074,  0.03234323,  1.        ]])
+    >>> res.pvalue
+    array([[0.        , 0.43111687, 0.41084066, 0.33891628],
+           [0.43111687, 0.        , 0.15151618, 0.09600687],
+           [0.41084066, 0.15151618, 0.        , 0.74938561],
+           [0.33891628, 0.09600687, 0.74938561, 0.        ]])
+    >>> res = stats.spearmanr(x2n.T, y2n.T, axis=1)
+    >>> res.statistic
+    array([[ 1.        , -0.07960396, -0.08314431,  0.09662166],
+           [-0.07960396,  1.        , -0.14448245,  0.16738074],
+           [-0.08314431, -0.14448245,  1.        ,  0.03234323],
+           [ 0.09662166,  0.16738074,  0.03234323,  1.        ]])
+    >>> res = stats.spearmanr(x2n, y2n, axis=None)
+    >>> res.statistic, res.pvalue
+    (0.044981624540613524, 0.5270803651336189)
+    >>> res = stats.spearmanr(x2n.ravel(), y2n.ravel())
+    >>> res.statistic, res.pvalue
+    (0.044981624540613524, 0.5270803651336189)
+
+    >>> rng = np.random.default_rng()
+    >>> xint = rng.integers(10, size=(100, 2))
+    >>> res = stats.spearmanr(xint)
+    >>> res.statistic, res.pvalue
+    (0.09800224850707953, 0.3320271757932076)
+
+    For small samples, consider performing a permutation test instead of
+    relying on the asymptotic p-value. Note that to calculate the null
+    distribution of the statistic (for all possibly pairings between
+    observations in sample ``x`` and ``y``), only one of the two inputs needs
+    to be permuted.
+
+    >>> x = [1.76405235, 0.40015721, 0.97873798,
+    ...      2.2408932, 1.86755799, -0.97727788]
+    >>> y = [2.71414076, 0.2488, 0.87551913,
+    ...      2.6514917, 2.01160156, 0.47699563]
+    >>> def statistic(x):  # permute only `x`
+    ...     return stats.spearmanr(x, y).statistic
+    >>> res_exact = stats.permutation_test((x,), statistic,
+    ...                                    permutation_type='pairings')
+    >>> res_asymptotic = stats.spearmanr(x, y)
+    >>> res_exact.pvalue, res_asymptotic.pvalue  # asymptotic pvalue is too low
+    (0.10277777777777777, 0.07239650145772594)
+
+    """
+    if axis is not None and axis > 1:
+        raise ValueError("spearmanr only handles 1-D or 2-D arrays, "
+                         "supplied axis argument {}, please use only "
+                         "values 0, 1 or None for axis".format(axis))
+
+    a, axisout = _chk_asarray(a, axis)
+    if a.ndim > 2:
+        raise ValueError("spearmanr only handles 1-D or 2-D arrays")
+
+    if b is None:
+        if a.ndim < 2:
+            raise ValueError("`spearmanr` needs at least 2 "
+                             "variables to compare")
+    else:
+        # Concatenate a and b, so that we now only have to handle the case
+        # of a 2-D `a`.
+        b, _ = _chk_asarray(b, axis)
+        if axisout == 0:
+            a = np.column_stack((a, b))
+        else:
+            a = np.row_stack((a, b))
+
+    n_vars = a.shape[1 - axisout]
+    n_obs = a.shape[axisout]
+    if n_obs <= 1:
+        # Handle empty arrays or single observations.
+        res = SignificanceResult(np.nan, np.nan)
+        res.correlation = np.nan
+        return res
+
+    warn_msg = ("An input array is constant; the correlation coefficient "
+                "is not defined.")
+    if axisout == 0:
+        if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
+            # If an input is constant, the correlation coefficient
+            # is not defined.
+            warnings.warn(stats.ConstantInputWarning(warn_msg))
+            res = SignificanceResult(np.nan, np.nan)
+            res.correlation = np.nan
+            return res
+    else:  # case when axisout == 1 b/c a is 2 dim only
+        if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
+            # If an input is constant, the correlation coefficient
+            # is not defined.
+            warnings.warn(stats.ConstantInputWarning(warn_msg))
+            res = SignificanceResult(np.nan, np.nan)
+            res.correlation = np.nan
+            return res
+
+    a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
+    variable_has_nan = np.zeros(n_vars, dtype=bool)
+    if a_contains_nan:
+        if nan_policy == 'omit':
+            return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy,
+                                          alternative=alternative)
+        elif nan_policy == 'propagate':
+            if a.ndim == 1 or n_vars <= 2:
+                res = SignificanceResult(np.nan, np.nan)
+                res.correlation = np.nan
+                return res
+            else:
+                # Keep track of variables with NaNs, set the outputs to NaN
+                # only for those variables
+                variable_has_nan = np.isnan(a).any(axis=axisout)
+
+    a_ranked = np.apply_along_axis(rankdata, axisout, a)
+    rs = np.corrcoef(a_ranked, rowvar=axisout)
+    dof = n_obs - 2  # degrees of freedom
+
+    # rs can have elements equal to 1, so avoid zero division warnings
+    with np.errstate(divide='ignore'):
+        # clip the small negative values possibly caused by rounding
+        # errors before taking the square root
+        t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
+
+    t, prob = _ttest_finish(dof, t, alternative)
+
+    # For backwards compatibility, return scalars when comparing 2 columns
+    if rs.shape == (2, 2):
+        res = SignificanceResult(rs[1, 0], prob[1, 0])
+        res.correlation = rs[1, 0]
+        return res
+    else:
+        rs[variable_has_nan, :] = np.nan
+        rs[:, variable_has_nan] = np.nan
+        res = SignificanceResult(rs, prob)
+        res.correlation = rs
+        return res
+
+
+def pointbiserialr(x, y):
+    r"""Calculate a point biserial correlation coefficient and its p-value.
+
+    The point biserial correlation is used to measure the relationship
+    between a binary variable, x, and a continuous variable, y. Like other
+    correlation coefficients, this one varies between -1 and +1 with 0
+    implying no correlation. Correlations of -1 or +1 imply a determinative
+    relationship.
+
+    This function may be computed using a shortcut formula but produces the
+    same result as `pearsonr`.
+
+    Parameters
+    ----------
+    x : array_like of bools
+        Input array.
+    y : array_like
+        Input array.
+
+    Returns
+    -------
+    res: SignificanceResult
+        An object containing attributes:
+
+        statistic : float
+            The R value.
+        pvalue : float
+            The two-sided p-value.
+
+    Notes
+    -----
+    `pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
+    It is equivalent to `pearsonr`.
+
+    The value of the point-biserial correlation can be calculated from:
+
+    .. math::
+
+        r_{pb} = \frac{\overline{Y_{1}} -
+                 \overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
+
+    Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
+    observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
+    are number of observations coded 0 and 1 respectively; :math:`N` is the
+    total number of observations and :math:`s_{y}` is the standard
+    deviation of all the metric observations.
+
+    A value of :math:`r_{pb}` that is significantly different from zero is
+    completely equivalent to a significant difference in means between the two
+    groups. Thus, an independent groups t Test with :math:`N-2` degrees of
+    freedom may be used to test whether :math:`r_{pb}` is nonzero. The
+    relation between the t-statistic for comparing two independent groups and
+    :math:`r_{pb}` is given by:
+
+    .. math::
+
+        t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
+
+    References
+    ----------
+    .. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
+           Statist., Vol. 20, no.1, pp. 125-126, 1949.
+
+    .. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
+           Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
+           np. 3, pp. 603-607, 1954.
+
+    .. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
+           Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
+           :doi:`10.1002/9781118445112.stat06227`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> a = np.array([0, 0, 0, 1, 1, 1, 1])
+    >>> b = np.arange(7)
+    >>> stats.pointbiserialr(a, b)
+    (0.8660254037844386, 0.011724811003954652)
+    >>> stats.pearsonr(a, b)
+    (0.86602540378443871, 0.011724811003954626)
+    >>> np.corrcoef(a, b)
+    array([[ 1.       ,  0.8660254],
+           [ 0.8660254,  1.       ]])
+
+    """
+    rpb, prob = pearsonr(x, y)
+    # create result object with alias for backward compatibility
+    res = SignificanceResult(rpb, prob)
+    res.correlation = rpb
+    return res
+
+
+def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate',
+               method='auto', variant='b', alternative='two-sided'):
+    """Calculate Kendall's tau, a correlation measure for ordinal data.
+
+    Kendall's tau is a measure of the correspondence between two rankings.
+    Values close to 1 indicate strong agreement, and values close to -1
+    indicate strong disagreement. This implements two variants of Kendall's
+    tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
+    differ only in how they are normalized to lie within the range -1 to 1;
+    the hypothesis tests (their p-values) are identical. Kendall's original
+    tau-a is not implemented separately because both tau-b and tau-c reduce
+    to tau-a in the absence of ties.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Arrays of rankings, of the same shape. If arrays are not 1-D, they
+        will be flattened to 1-D.
+    initial_lexsort : bool, optional, deprecated
+        This argument is unused.
+
+        .. deprecated:: 1.10.0
+           `kendalltau` keyword argument `initial_lexsort` is deprecated as it
+           is unused and will be removed in SciPy 1.12.0.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    method : {'auto', 'asymptotic', 'exact'}, optional
+        Defines which method is used to calculate the p-value [5]_.
+        The following options are available (default is 'auto'):
+
+          * 'auto': selects the appropriate method based on a trade-off
+            between speed and accuracy
+          * 'asymptotic': uses a normal approximation valid for large samples
+          * 'exact': computes the exact p-value, but can only be used if no ties
+            are present. As the sample size increases, the 'exact' computation
+            time may grow and the result may lose some precision.
+    variant : {'b', 'c'}, optional
+        Defines which variant of Kendall's tau is returned. Default is 'b'.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': the rank correlation is nonzero
+        * 'less': the rank correlation is negative (less than zero)
+        * 'greater':  the rank correlation is positive (greater than zero)
+
+    Returns
+    -------
+    res : SignificanceResult
+        An object containing attributes:
+
+        statistic : float
+           The tau statistic.
+        pvalue : float
+           The p-value for a hypothesis test whose null hypothesis is
+           an absence of association, tau = 0.
+
+    See Also
+    --------
+    spearmanr : Calculates a Spearman rank-order correlation coefficient.
+    theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
+    weightedtau : Computes a weighted version of Kendall's tau.
+
+    Notes
+    -----
+    The definition of Kendall's tau that is used is [2]_::
+
+      tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
+
+      tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
+
+    where P is the number of concordant pairs, Q the number of discordant
+    pairs, T the number of ties only in `x`, and U the number of ties only in
+    `y`.  If a tie occurs for the same pair in both `x` and `y`, it is not
+    added to either T or U. n is the total number of samples, and m is the
+    number of unique values in either `x` or `y`, whichever is smaller.
+
+    References
+    ----------
+    .. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
+           Vol. 30, No. 1/2, pp. 81-93, 1938.
+    .. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
+           Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
+    .. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
+           Wiley & Sons, 1967.
+    .. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
+           tables", Software: Practice and Experience, Vol. 24, No. 3,
+           pp. 327-336, 1994.
+    .. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
+           Charles Griffin & Co., 1970.
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> x1 = [12, 2, 1, 12, 2]
+    >>> x2 = [1, 4, 7, 1, 0]
+    >>> res = stats.kendalltau(x1, x2)
+    >>> res.statistic
+    -0.47140452079103173
+    >>> res.pvalue
+    0.2827454599327748
+
+    """
+    if initial_lexsort is not None:
+        msg = ("'kendalltau' keyword argument 'initial_lexsort' is deprecated"
+               " as it is unused and will be removed in SciPy 1.12.0.")
+        warnings.warn(msg, DeprecationWarning, stacklevel=2)
+
+    x = np.asarray(x).ravel()
+    y = np.asarray(y).ravel()
+
+    if x.size != y.size:
+        raise ValueError("All inputs to `kendalltau` must be of the same "
+                         f"size, found x-size {x.size} and y-size {y.size}")
+    elif not x.size or not y.size:
+        # Return NaN if arrays are empty
+        res = SignificanceResult(np.nan, np.nan)
+        res.correlation = np.nan
+        return res
+
+    # check both x and y
+    cnx, npx = _contains_nan(x, nan_policy)
+    cny, npy = _contains_nan(y, nan_policy)
+    contains_nan = cnx or cny
+    if npx == 'omit' or npy == 'omit':
+        nan_policy = 'omit'
+
+    if contains_nan and nan_policy == 'propagate':
+        res = SignificanceResult(np.nan, np.nan)
+        res.correlation = np.nan
+        return res
+
+    elif contains_nan and nan_policy == 'omit':
+        x = ma.masked_invalid(x)
+        y = ma.masked_invalid(y)
+        if variant == 'b':
+            return mstats_basic.kendalltau(x, y, method=method, use_ties=True,
+                                           alternative=alternative)
+        else:
+            message = ("nan_policy='omit' is currently compatible only with "
+                       "variant='b'.")
+            raise ValueError(message)
+
+    def count_rank_tie(ranks):
+        cnt = np.bincount(ranks).astype('int64', copy=False)
+        cnt = cnt[cnt > 1]
+        return ((cnt * (cnt - 1) // 2).sum(),
+                (cnt * (cnt - 1.) * (cnt - 2)).sum(),
+                (cnt * (cnt - 1.) * (2*cnt + 5)).sum())
+
+    size = x.size
+    perm = np.argsort(y)  # sort on y and convert y to dense ranks
+    x, y = x[perm], y[perm]
+    y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
+
+    # stable sort on x and convert x to dense ranks
+    perm = np.argsort(x, kind='mergesort')
+    x, y = x[perm], y[perm]
+    x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
+
+    dis = _kendall_dis(x, y)  # discordant pairs
+
+    obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
+    cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
+
+    ntie = (cnt * (cnt - 1) // 2).sum()  # joint ties
+    xtie, x0, x1 = count_rank_tie(x)     # ties in x, stats
+    ytie, y0, y1 = count_rank_tie(y)     # ties in y, stats
+
+    tot = (size * (size - 1)) // 2
+
+    if xtie == tot or ytie == tot:
+        res = SignificanceResult(np.nan, np.nan)
+        res.correlation = np.nan
+        return res
+
+    # Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
+    #               = con + dis + xtie + ytie - ntie
+    con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
+    if variant == 'b':
+        tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
+    elif variant == 'c':
+        minclasses = min(len(set(x)), len(set(y)))
+        tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
+    else:
+        raise ValueError(f"Unknown variant of the method chosen: {variant}. "
+                         "variant must be 'b' or 'c'.")
+
+    # Limit range to fix computational errors
+    tau = min(1., max(-1., tau))
+
+    # The p-value calculation is the same for all variants since the p-value
+    # depends only on con_minus_dis.
+    if method == 'exact' and (xtie != 0 or ytie != 0):
+        raise ValueError("Ties found, exact method cannot be used.")
+
+    if method == 'auto':
+        if (xtie == 0 and ytie == 0) and (size <= 33 or
+                                          min(dis, tot-dis) <= 1):
+            method = 'exact'
+        else:
+            method = 'asymptotic'
+
+    if xtie == 0 and ytie == 0 and method == 'exact':
+        pvalue = mstats_basic._kendall_p_exact(size, tot-dis, alternative)
+    elif method == 'asymptotic':
+        # con_minus_dis is approx normally distributed with this variance [3]_
+        m = size * (size - 1.)
+        var = ((m * (2*size + 5) - x1 - y1) / 18 +
+               (2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
+        z = con_minus_dis / np.sqrt(var)
+        _, pvalue = _normtest_finish(z, alternative)
+    else:
+        raise ValueError(f"Unknown method {method} specified.  Use 'auto', "
+                         "'exact' or 'asymptotic'.")
+
+    # create result object with alias for backward compatibility
+    res = SignificanceResult(tau, pvalue)
+    res.correlation = tau
+    return res
+
+
+def weightedtau(x, y, rank=True, weigher=None, additive=True):
+    r"""Compute a weighted version of Kendall's :math:`\tau`.
+
+    The weighted :math:`\tau` is a weighted version of Kendall's
+    :math:`\tau` in which exchanges of high weight are more influential than
+    exchanges of low weight. The default parameters compute the additive
+    hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
+    been shown to provide the best balance between important and
+    unimportant elements [1]_.
+
+    The weighting is defined by means of a rank array, which assigns a
+    nonnegative rank to each element (higher importance ranks being
+    associated with smaller values, e.g., 0 is the highest possible rank),
+    and a weigher function, which assigns a weight based on the rank to
+    each element. The weight of an exchange is then the sum or the product
+    of the weights of the ranks of the exchanged elements. The default
+    parameters compute :math:`\tau_\mathrm h`: an exchange between
+    elements with rank :math:`r` and :math:`s` (starting from zero) has
+    weight :math:`1/(r+1) + 1/(s+1)`.
+
+    Specifying a rank array is meaningful only if you have in mind an
+    external criterion of importance. If, as it usually happens, you do
+    not have in mind a specific rank, the weighted :math:`\tau` is
+    defined by averaging the values obtained using the decreasing
+    lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
+    behavior with default parameters. Note that the convention used
+    here for ranking (lower values imply higher importance) is opposite
+    to that used by other SciPy statistical functions.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Arrays of scores, of the same shape. If arrays are not 1-D, they will
+        be flattened to 1-D.
+    rank : array_like of ints or bool, optional
+        A nonnegative rank assigned to each element. If it is None, the
+        decreasing lexicographical rank by (`x`, `y`) will be used: elements of
+        higher rank will be those with larger `x`-values, using `y`-values to
+        break ties (in particular, swapping `x` and `y` will give a different
+        result). If it is False, the element indices will be used
+        directly as ranks. The default is True, in which case this
+        function returns the average of the values obtained using the
+        decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
+    weigher : callable, optional
+        The weigher function. Must map nonnegative integers (zero
+        representing the most important element) to a nonnegative weight.
+        The default, None, provides hyperbolic weighing, that is,
+        rank :math:`r` is mapped to weight :math:`1/(r+1)`.
+    additive : bool, optional
+        If True, the weight of an exchange is computed by adding the
+        weights of the ranks of the exchanged elements; otherwise, the weights
+        are multiplied. The default is True.
+
+    Returns
+    -------
+    res: SignificanceResult
+        An object containing attributes:
+
+        statistic : float
+           The weighted :math:`\tau` correlation index.
+        pvalue : float
+           Presently ``np.nan``, as the null distribution of the statistic is
+           unknown (even in the additive hyperbolic case).
+
+    See Also
+    --------
+    kendalltau : Calculates Kendall's tau.
+    spearmanr : Calculates a Spearman rank-order correlation coefficient.
+    theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
+
+    Notes
+    -----
+    This function uses an :math:`O(n \log n)`, mergesort-based algorithm
+    [1]_ that is a weighted extension of Knight's algorithm for Kendall's
+    :math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
+    between rankings without ties (i.e., permutations) by setting
+    `additive` and `rank` to False, as the definition given in [1]_ is a
+    generalization of Shieh's.
+
+    NaNs are considered the smallest possible score.
+
+    .. versionadded:: 0.19.0
+
+    References
+    ----------
+    .. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
+           ties", Proceedings of the 24th international conference on World
+           Wide Web, pp. 1166-1176, ACM, 2015.
+    .. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
+           Ungrouped Data", Journal of the American Statistical Association,
+           Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
+    .. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
+           Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> x = [12, 2, 1, 12, 2]
+    >>> y = [1, 4, 7, 1, 0]
+    >>> res = stats.weightedtau(x, y)
+    >>> res.statistic
+    -0.56694968153682723
+    >>> res.pvalue
+    nan
+    >>> res = stats.weightedtau(x, y, additive=False)
+    >>> res.statistic
+    -0.62205716951801038
+
+    NaNs are considered the smallest possible score:
+
+    >>> x = [12, 2, 1, 12, 2]
+    >>> y = [1, 4, 7, 1, np.nan]
+    >>> res = stats.weightedtau(x, y)
+    >>> res.statistic
+    -0.56694968153682723
+
+    This is exactly Kendall's tau:
+
+    >>> x = [12, 2, 1, 12, 2]
+    >>> y = [1, 4, 7, 1, 0]
+    >>> res = stats.weightedtau(x, y, weigher=lambda x: 1)
+    >>> res.statistic
+    -0.47140452079103173
+
+    >>> x = [12, 2, 1, 12, 2]
+    >>> y = [1, 4, 7, 1, 0]
+    >>> stats.weightedtau(x, y, rank=None)
+    SignificanceResult(statistic=-0.4157652301037516, pvalue=nan)
+    >>> stats.weightedtau(y, x, rank=None)
+    SignificanceResult(statistic=-0.7181341329699028, pvalue=nan)
+
+    """
+    x = np.asarray(x).ravel()
+    y = np.asarray(y).ravel()
+
+    if x.size != y.size:
+        raise ValueError("All inputs to `weightedtau` must be "
+                         "of the same size, "
+                         "found x-size %s and y-size %s" % (x.size, y.size))
+    if not x.size:
+        # Return NaN if arrays are empty
+        res = SignificanceResult(np.nan, np.nan)
+        res.correlation = np.nan
+        return res
+
+    # If there are NaNs we apply _toint64()
+    if np.isnan(np.sum(x)):
+        x = _toint64(x)
+    if np.isnan(np.sum(y)):
+        y = _toint64(y)
+
+    # Reduce to ranks unsupported types
+    if x.dtype != y.dtype:
+        if x.dtype != np.int64:
+            x = _toint64(x)
+        if y.dtype != np.int64:
+            y = _toint64(y)
+    else:
+        if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
+            x = _toint64(x)
+            y = _toint64(y)
+
+    if rank is True:
+        tau = (
+            _weightedrankedtau(x, y, None, weigher, additive) +
+            _weightedrankedtau(y, x, None, weigher, additive)
+        ) / 2
+        res = SignificanceResult(tau, np.nan)
+        res.correlation = tau
+        return res
+
+    if rank is False:
+        rank = np.arange(x.size, dtype=np.intp)
+    elif rank is not None:
+        rank = np.asarray(rank).ravel()
+        if rank.size != x.size:
+            raise ValueError(
+                "All inputs to `weightedtau` must be of the same size, "
+                "found x-size %s and rank-size %s" % (x.size, rank.size)
+            )
+
+    tau = _weightedrankedtau(x, y, rank, weigher, additive)
+    res = SignificanceResult(tau, np.nan)
+    res.correlation = tau
+    return res
+
+
+# FROM MGCPY: https://github.com/neurodata/mgcpy
+
+
+class _ParallelP:
+    """Helper function to calculate parallel p-value."""
+
+    def __init__(self, x, y, random_states):
+        self.x = x
+        self.y = y
+        self.random_states = random_states
+
+    def __call__(self, index):
+        order = self.random_states[index].permutation(self.y.shape[0])
+        permy = self.y[order][:, order]
+
+        # calculate permuted stats, store in null distribution
+        perm_stat = _mgc_stat(self.x, permy)[0]
+
+        return perm_stat
+
+
+def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
+    r"""Helper function that calculates the p-value. See below for uses.
+
+    Parameters
+    ----------
+    x, y : ndarray
+        `x` and `y` have shapes `(n, p)` and `(n, q)`.
+    stat : float
+        The sample test statistic.
+    reps : int, optional
+        The number of replications used to estimate the null when using the
+        permutation test. The default is 1000 replications.
+    workers : int or map-like callable, optional
+        If `workers` is an int the population is subdivided into `workers`
+        sections and evaluated in parallel (uses
+        `multiprocessing.Pool `). Supply `-1` to use all cores
+        available to the Process. Alternatively supply a map-like callable,
+        such as `multiprocessing.Pool.map` for evaluating the population in
+        parallel. This evaluation is carried out as `workers(func, iterable)`.
+        Requires that `func` be pickleable.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    pvalue : float
+        The sample test p-value.
+    null_dist : list
+        The approximated null distribution.
+
+    """
+    # generate seeds for each rep (change to new parallel random number
+    # capabilities in numpy >= 1.17+)
+    random_state = check_random_state(random_state)
+    random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
+                     size=4, dtype=np.uint32)) for _ in range(reps)]
+
+    # parallelizes with specified workers over number of reps and set seeds
+    parallelp = _ParallelP(x=x, y=y, random_states=random_states)
+    with MapWrapper(workers) as mapwrapper:
+        null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
+
+    # calculate p-value and significant permutation map through list
+    pvalue = (1 + (null_dist >= stat).sum()) / (1 + reps)
+
+    return pvalue, null_dist
+
+
+def _euclidean_dist(x):
+    return cdist(x, x)
+
+
+MGCResult = _make_tuple_bunch('MGCResult',
+                              ['statistic', 'pvalue', 'mgc_dict'], [])
+
+
+def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
+                         workers=1, is_twosamp=False, random_state=None):
+    r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
+
+    Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
+    one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
+    the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
+    called the "scale". A priori, however, it is not know which scales will be
+    most informative. So, MGC computes all distance pairs, and then efficiently
+    computes the distance correlations for all scales. The local correlations
+    illustrate which scales are relatively informative about the relationship.
+    The key, therefore, to successfully discover and decipher relationships
+    between disparate data modalities is to adaptively determine which scales
+    are the most informative, and the geometric implication for the most
+    informative scales. Doing so not only provides an estimate of whether the
+    modalities are related, but also provides insight into how the
+    determination was made. This is especially important in high-dimensional
+    data, where simple visualizations do not reveal relationships to the
+    unaided human eye. Characterizations of this implementation in particular
+    have been derived from and benchmarked within in [2]_.
+
+    Parameters
+    ----------
+    x, y : ndarray
+        If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
+        the number of samples and `p` and `q` are the number of dimensions,
+        then the MGC independence test will be run.  Alternatively, ``x`` and
+        ``y`` can have shapes ``(n, n)`` if they are distance or similarity
+        matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
+        and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
+        two-sample MGC test will be run.
+    compute_distance : callable, optional
+        A function that computes the distance or similarity among the samples
+        within each data matrix. Set to ``None`` if ``x`` and ``y`` are
+        already distance matrices. The default uses the euclidean norm metric.
+        If you are calling a custom function, either create the distance
+        matrix before-hand or create a function of the form
+        ``compute_distance(x)`` where `x` is the data matrix for which
+        pairwise distances are calculated.
+    reps : int, optional
+        The number of replications used to estimate the null when using the
+        permutation test. The default is ``1000``.
+    workers : int or map-like callable, optional
+        If ``workers`` is an int the population is subdivided into ``workers``
+        sections and evaluated in parallel (uses ``multiprocessing.Pool
+        ``). Supply ``-1`` to use all cores available to the
+        Process. Alternatively supply a map-like callable, such as
+        ``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
+        This evaluation is carried out as ``workers(func, iterable)``.
+        Requires that `func` be pickleable. The default is ``1``.
+    is_twosamp : bool, optional
+        If `True`, a two sample test will be run. If ``x`` and ``y`` have
+        shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and
+        set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
+        ``(n, p)`` and a two sample test is desired. The default is ``False``.
+        Note that this will not run if inputs are distance matrices.
+    random_state : {None, int, `numpy.random.Generator`,
+                    `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+    Returns
+    -------
+    res : MGCResult
+        An object containing attributes:
+
+        statistic : float
+            The sample MGC test statistic within `[-1, 1]`.
+        pvalue : float
+            The p-value obtained via permutation.
+        mgc_dict : dict
+            Contains additional useful results:
+
+                - mgc_map : ndarray
+                    A 2D representation of the latent geometry of the
+                    relationship.
+                - opt_scale : (int, int)
+                    The estimated optimal scale as a `(x, y)` pair.
+                - null_dist : list
+                    The null distribution derived from the permuted matrices.
+
+    See Also
+    --------
+    pearsonr : Pearson correlation coefficient and p-value for testing
+               non-correlation.
+    kendalltau : Calculates Kendall's tau.
+    spearmanr : Calculates a Spearman rank-order correlation coefficient.
+
+    Notes
+    -----
+    A description of the process of MGC and applications on neuroscience data
+    can be found in [1]_. It is performed using the following steps:
+
+    #. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
+       modified to be mean zero columnwise. This results in two
+       :math:`n \times n` distance matrices :math:`A` and :math:`B` (the
+       centering and unbiased modification) [3]_.
+
+    #. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
+
+       * The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
+         are calculated for each property. Here, :math:`G_k (i, j)` indicates
+         the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
+         and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
+         the :math:`i`-th row of :math:`B`
+
+       * Let :math:`\circ` denotes the entry-wise matrix product, then local
+         correlations are summed and normalized using the following statistic:
+
+    .. math::
+
+        c^{kl} = \frac{\sum_{ij} A G_k B H_l}
+                      {\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
+
+    #. The MGC test statistic is the smoothed optimal local correlation of
+       :math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
+       (which essentially set all isolated large correlations) as 0 and
+       connected large correlations the same as before, see [3]_.) MGC is,
+
+    .. math::
+
+        MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
+                                                    \right)
+
+    The test statistic returns a value between :math:`(-1, 1)` since it is
+    normalized.
+
+    The p-value returned is calculated using a permutation test. This process
+    is completed by first randomly permuting :math:`y` to estimate the null
+    distribution and then calculating the probability of observing a test
+    statistic, under the null, at least as extreme as the observed test
+    statistic.
+
+    MGC requires at least 5 samples to run with reliable results. It can also
+    handle high-dimensional data sets.
+    In addition, by manipulating the input data matrices, the two-sample
+    testing problem can be reduced to the independence testing problem [4]_.
+    Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
+    :math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
+    follows:
+
+    .. math::
+
+        X = [U | V] \in \mathcal{R}^{p \times (n + m)}
+        Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
+
+    Then, the MGC statistic can be calculated as normal. This methodology can
+    be extended to similar tests such as distance correlation [4]_.
+
+    .. versionadded:: 1.4.0
+
+    References
+    ----------
+    .. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
+           Maggioni, M., & Shen, C. (2019). Discovering and deciphering
+           relationships across disparate data modalities. ELife.
+    .. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
+           Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
+           mgcpy: A Comprehensive High Dimensional Independence Testing Python
+           Package. :arXiv:`1907.02088`
+    .. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
+           correlation to multiscale graph correlation. Journal of the American
+           Statistical Association.
+    .. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
+           Distance and Kernel Methods for Hypothesis Testing.
+           :arXiv:`1806.05514`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import multiscale_graphcorr
+    >>> x = np.arange(100)
+    >>> y = x
+    >>> res = multiscale_graphcorr(x, y)
+    >>> res.statistic, res.pvalue
+    (1.0, 0.001)
+
+    To run an unpaired two-sample test,
+
+    >>> x = np.arange(100)
+    >>> y = np.arange(79)
+    >>> res = multiscale_graphcorr(x, y)
+    >>> res.statistic, res.pvalue  # doctest: +SKIP
+    (0.033258146255703246, 0.023)
+
+    or, if shape of the inputs are the same,
+
+    >>> x = np.arange(100)
+    >>> y = x
+    >>> res = multiscale_graphcorr(x, y, is_twosamp=True)
+    >>> res.statistic, res.pvalue  # doctest: +SKIP
+    (-0.008021809890200488, 1.0)
+
+    """
+    if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
+        raise ValueError("x and y must be ndarrays")
+
+    # convert arrays of type (n,) to (n, 1)
+    if x.ndim == 1:
+        x = x[:, np.newaxis]
+    elif x.ndim != 2:
+        raise ValueError("Expected a 2-D array `x`, found shape "
+                         "{}".format(x.shape))
+    if y.ndim == 1:
+        y = y[:, np.newaxis]
+    elif y.ndim != 2:
+        raise ValueError("Expected a 2-D array `y`, found shape "
+                         "{}".format(y.shape))
+
+    nx, px = x.shape
+    ny, py = y.shape
+
+    # check for NaNs
+    _contains_nan(x, nan_policy='raise')
+    _contains_nan(y, nan_policy='raise')
+
+    # check for positive or negative infinity and raise error
+    if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
+        raise ValueError("Inputs contain infinities")
+
+    if nx != ny:
+        if px == py:
+            # reshape x and y for two sample testing
+            is_twosamp = True
+        else:
+            raise ValueError("Shape mismatch, x and y must have shape [n, p] "
+                             "and [n, q] or have shape [n, p] and [m, p].")
+
+    if nx < 5 or ny < 5:
+        raise ValueError("MGC requires at least 5 samples to give reasonable "
+                         "results.")
+
+    # convert x and y to float
+    x = x.astype(np.float64)
+    y = y.astype(np.float64)
+
+    # check if compute_distance_matrix if a callable()
+    if not callable(compute_distance) and compute_distance is not None:
+        raise ValueError("Compute_distance must be a function.")
+
+    # check if number of reps exists, integer, or > 0 (if under 1000 raises
+    # warning)
+    if not isinstance(reps, int) or reps < 0:
+        raise ValueError("Number of reps must be an integer greater than 0.")
+    elif reps < 1000:
+        msg = ("The number of replications is low (under 1000), and p-value "
+               "calculations may be unreliable. Use the p-value result, with "
+               "caution!")
+        warnings.warn(msg, RuntimeWarning)
+
+    if is_twosamp:
+        if compute_distance is None:
+            raise ValueError("Cannot run if inputs are distance matrices")
+        x, y = _two_sample_transform(x, y)
+
+    if compute_distance is not None:
+        # compute distance matrices for x and y
+        x = compute_distance(x)
+        y = compute_distance(y)
+
+    # calculate MGC stat
+    stat, stat_dict = _mgc_stat(x, y)
+    stat_mgc_map = stat_dict["stat_mgc_map"]
+    opt_scale = stat_dict["opt_scale"]
+
+    # calculate permutation MGC p-value
+    pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
+                                   random_state=random_state)
+
+    # save all stats (other than stat/p-value) in dictionary
+    mgc_dict = {"mgc_map": stat_mgc_map,
+                "opt_scale": opt_scale,
+                "null_dist": null_dist}
+
+    # create result object with alias for backward compatibility
+    res = MGCResult(stat, pvalue, mgc_dict)
+    res.stat = stat
+    return res
+
+
+def _mgc_stat(distx, disty):
+    r"""Helper function that calculates the MGC stat. See above for use.
+
+    Parameters
+    ----------
+    distx, disty : ndarray
+        `distx` and `disty` have shapes `(n, p)` and `(n, q)` or
+        `(n, n)` and `(n, n)`
+        if distance matrices.
+
+    Returns
+    -------
+    stat : float
+        The sample MGC test statistic within `[-1, 1]`.
+    stat_dict : dict
+        Contains additional useful additional returns containing the following
+        keys:
+
+            - stat_mgc_map : ndarray
+                MGC-map of the statistics.
+            - opt_scale : (float, float)
+                The estimated optimal scale as a `(x, y)` pair.
+
+    """
+    # calculate MGC map and optimal scale
+    stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
+
+    n, m = stat_mgc_map.shape
+    if m == 1 or n == 1:
+        # the global scale at is the statistic calculated at maximial nearest
+        # neighbors. There is not enough local scale to search over, so
+        # default to global scale
+        stat = stat_mgc_map[m - 1][n - 1]
+        opt_scale = m * n
+    else:
+        samp_size = len(distx) - 1
+
+        # threshold to find connected region of significant local correlations
+        sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
+
+        # maximum within the significant region
+        stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
+
+    stat_dict = {"stat_mgc_map": stat_mgc_map,
+                 "opt_scale": opt_scale}
+
+    return stat, stat_dict
+
+
+def _threshold_mgc_map(stat_mgc_map, samp_size):
+    r"""
+    Finds a connected region of significance in the MGC-map by thresholding.
+
+    Parameters
+    ----------
+    stat_mgc_map : ndarray
+        All local correlations within `[-1,1]`.
+    samp_size : int
+        The sample size of original data.
+
+    Returns
+    -------
+    sig_connect : ndarray
+        A binary matrix with 1's indicating the significant region.
+
+    """
+    m, n = stat_mgc_map.shape
+
+    # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
+    # with varying levels of performance. Threshold is based on a beta
+    # approximation.
+    per_sig = 1 - (0.02 / samp_size)  # Percentile to consider as significant
+    threshold = samp_size * (samp_size - 3)/4 - 1/2  # Beta approximation
+    threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
+
+    # the global scale at is the statistic calculated at maximial nearest
+    # neighbors. Threshold is the maximum on the global and local scales
+    threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
+
+    # find the largest connected component of significant correlations
+    sig_connect = stat_mgc_map > threshold
+    if np.sum(sig_connect) > 0:
+        sig_connect, _ = _measurements.label(sig_connect)
+        _, label_counts = np.unique(sig_connect, return_counts=True)
+
+        # skip the first element in label_counts, as it is count(zeros)
+        max_label = np.argmax(label_counts[1:]) + 1
+        sig_connect = sig_connect == max_label
+    else:
+        sig_connect = np.array([[False]])
+
+    return sig_connect
+
+
+def _smooth_mgc_map(sig_connect, stat_mgc_map):
+    """Finds the smoothed maximal within the significant region R.
+
+    If area of R is too small it returns the last local correlation. Otherwise,
+    returns the maximum within significant_connected_region.
+
+    Parameters
+    ----------
+    sig_connect : ndarray
+        A binary matrix with 1's indicating the significant region.
+    stat_mgc_map : ndarray
+        All local correlations within `[-1, 1]`.
+
+    Returns
+    -------
+    stat : float
+        The sample MGC statistic within `[-1, 1]`.
+    opt_scale: (float, float)
+        The estimated optimal scale as an `(x, y)` pair.
+
+    """
+    m, n = stat_mgc_map.shape
+
+    # the global scale at is the statistic calculated at maximial nearest
+    # neighbors. By default, statistic and optimal scale are global.
+    stat = stat_mgc_map[m - 1][n - 1]
+    opt_scale = [m, n]
+
+    if np.linalg.norm(sig_connect) != 0:
+        # proceed only when the connected region's area is sufficiently large
+        # 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
+        # with varying levels of performance
+        if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
+            max_corr = max(stat_mgc_map[sig_connect])
+
+            # find all scales within significant_connected_region that maximize
+            # the local correlation
+            max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
+
+            if max_corr >= stat:
+                stat = max_corr
+
+                k, l = max_corr_index
+                one_d_indices = k * n + l  # 2D to 1D indexing
+                k = np.max(one_d_indices) // n
+                l = np.max(one_d_indices) % n
+                opt_scale = [k+1, l+1]  # adding 1s to match R indexing
+
+    return stat, opt_scale
+
+
+def _two_sample_transform(u, v):
+    """Helper function that concatenates x and y for two sample MGC stat.
+
+    See above for use.
+
+    Parameters
+    ----------
+    u, v : ndarray
+        `u` and `v` have shapes `(n, p)` and `(m, p)`.
+
+    Returns
+    -------
+    x : ndarray
+        Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
+        `(2n, p)`.
+    y : ndarray
+        Label matrix for `x` where 0 refers to samples that comes from `u` and
+        1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
+
+    """
+    nx = u.shape[0]
+    ny = v.shape[0]
+    x = np.concatenate([u, v], axis=0)
+    y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
+    return x, y
+
+
+#####################################
+#       INFERENTIAL STATISTICS      #
+#####################################
+
+TtestResultBase = _make_tuple_bunch('TtestResultBase',
+                                    ['statistic', 'pvalue'], ['df'])
+
+
+class TtestResult(TtestResultBase):
+    """
+    Result of a t-test.
+
+    See the documentation of the particular t-test function for more
+    information about the definition of the statistic and meaning of
+    the confidence interval.
+
+    Attributes
+    ----------
+    statistic : float or array
+        The t-statistic of the sample.
+    pvalue : float or array
+        The p-value associated with the given alternative.
+    df : float or array
+        The number of degrees of freedom used in calculation of the
+        t-statistic; this is one less than the size of the sample
+        (``a.shape[axis]-1`` if there are no masked elements or omitted NaNs).
+
+    Methods
+    -------
+    confidence_interval
+        Computes a confidence interval around the population statistic
+        for the given confidence level.
+        The confidence interval is returned in a ``namedtuple`` with
+        fields `low` and `high`.
+
+    """
+
+    def __init__(self, statistic, pvalue, df,  # public
+                 alternative, standard_error, estimate):  # private
+        super().__init__(statistic, pvalue, df=df)
+        self._alternative = alternative
+        self._standard_error = standard_error  # denominator of t-statistic
+        self._estimate = estimate  # point estimate of sample mean
+
+    def confidence_interval(self, confidence_level=0.95):
+        """
+        Parameters
+        ----------
+        confidence_level : float
+            The confidence level for the calculation of the population mean
+            confidence interval. Default is 0.95.
+
+        Returns
+        -------
+        ci : namedtuple
+            The confidence interval is returned in a ``namedtuple`` with
+            fields `low` and `high`.
+
+        """
+        low, high = _t_confidence_interval(self.df, self.statistic,
+                                           confidence_level, self._alternative)
+        low = low * self._standard_error + self._estimate
+        high = high * self._standard_error + self._estimate
+        return ConfidenceInterval(low=low, high=high)
+
+
+def pack_TtestResult(statistic, pvalue, df, alternative, standard_error,
+                      estimate):
+    # this could be any number of dimensions (including 0d), but there is
+    # at most one unique value
+    alternative = np.atleast_1d(alternative).ravel()
+    alternative = alternative[0] if alternative.size else np.nan
+    return TtestResult(statistic, pvalue, df=df, alternative=alternative,
+                       standard_error=standard_error, estimate=estimate)
+
+
+def unpack_TtestResult(res):
+    return (res.statistic, res.pvalue, res.df, res._alternative,
+            res._standard_error, res._estimate)
+
+
+@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2,
+                          result_to_tuple=unpack_TtestResult, n_outputs=6)
+def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
+                alternative="two-sided"):
+    """Calculate the T-test for the mean of ONE group of scores.
+
+    This is a test for the null hypothesis that the expected value
+    (mean) of a sample of independent observations `a` is equal to the given
+    population mean, `popmean`.
+
+    Parameters
+    ----------
+    a : array_like
+        Sample observation.
+    popmean : float or array_like
+        Expected value in null hypothesis. If array_like, then its length along
+        `axis` must equal 1, and it must otherwise be broadcastable with `a`.
+    axis : int or None, optional
+        Axis along which to compute test; default is 0. If None, compute over
+        the whole array `a`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the mean of the underlying distribution of the sample
+          is different than the given population mean (`popmean`)
+        * 'less': the mean of the underlying distribution of the sample is
+          less than the given population mean (`popmean`)
+        * 'greater': the mean of the underlying distribution of the sample is
+          greater than the given population mean (`popmean`)
+
+    Returns
+    -------
+    result : `~scipy.stats._result_classes.TtestResult`
+        An object with the following attributes:
+
+        statistic : float or array
+            The t-statistic.
+        pvalue : float or array
+            The p-value associated with the given alternative.
+        df : float or array
+            The number of degrees of freedom used in calculation of the
+            t-statistic; this is one less than the size of the sample
+            (``a.shape[axis]``).
+
+            .. versionadded:: 1.10.0
+
+        The object also has the following method:
+
+        confidence_interval(confidence_level=0.95)
+            Computes a confidence interval around the population
+            mean for the given confidence level.
+            The confidence interval is returned in a ``namedtuple`` with
+            fields `low` and `high`.
+
+            .. versionadded:: 1.10.0
+
+    Notes
+    -----
+    The statistic is calculated as ``(np.mean(a) - popmean)/se``, where
+    ``se`` is the standard error. Therefore, the statistic will be positive
+    when the sample mean is greater than the population mean and negative when
+    the sample mean is less than the population mean.
+
+    Examples
+    --------
+    Suppose we wish to test the null hypothesis that the mean of a population
+    is equal to 0.5. We choose a confidence level of 99%; that is, we will
+    reject the null hypothesis in favor of the alternative if the p-value is
+    less than 0.01.
+
+    When testing random variates from the standard uniform distribution, which
+    has a mean of 0.5, we expect the data to be consistent with the null
+    hypothesis most of the time.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> rvs = stats.uniform.rvs(size=50, random_state=rng)
+    >>> stats.ttest_1samp(rvs, popmean=0.5)
+    TtestResult(statistic=2.456308468440, pvalue=0.017628209047638, df=49)
+
+    As expected, the p-value of 0.017 is not below our threshold of 0.01, so
+    we cannot reject the null hypothesis.
+
+    When testing data from the standard *normal* distribution, which has a mean
+    of 0, we would expect the null hypothesis to be rejected.
+
+    >>> rvs = stats.norm.rvs(size=50, random_state=rng)
+    >>> stats.ttest_1samp(rvs, popmean=0.5)
+    TtestResult(statistic=-7.433605518875, pvalue=1.416760157221e-09, df=49)
+
+    Indeed, the p-value is lower than our threshold of 0.01, so we reject the
+    null hypothesis in favor of the default "two-sided" alternative: the mean
+    of the population is *not* equal to 0.5.
+
+    However, suppose we were to test the null hypothesis against the
+    one-sided alternative that the mean of the population is *greater* than
+    0.5. Since the mean of the standard normal is less than 0.5, we would not
+    expect the null hypothesis to be rejected.
+
+    >>> stats.ttest_1samp(rvs, popmean=0.5, alternative='greater')
+    TtestResult(statistic=-7.433605518875, pvalue=0.99999999929, df=49)
+
+    Unsurprisingly, with a p-value greater than our threshold, we would not
+    reject the null hypothesis.
+
+    Note that when working with a confidence level of 99%, a true null
+    hypothesis will be rejected approximately 1% of the time.
+
+    >>> rvs = stats.uniform.rvs(size=(100, 50), random_state=rng)
+    >>> res = stats.ttest_1samp(rvs, popmean=0.5, axis=1)
+    >>> np.sum(res.pvalue < 0.01)
+    1
+
+    Indeed, even though all 100 samples above were drawn from the standard
+    uniform distribution, which *does* have a population mean of 0.5, we would
+    mistakenly reject the null hypothesis for one of them.
+
+    `ttest_1samp` can also compute a confidence interval around the population
+    mean.
+
+    >>> rvs = stats.norm.rvs(size=50, random_state=rng)
+    >>> res = stats.ttest_1samp(rvs, popmean=0)
+    >>> ci = res.confidence_interval(confidence_level=0.95)
+    >>> ci
+    ConfidenceInterval(low=-0.3193887540880017, high=0.2898583388980972)
+
+    The bounds of the 95% confidence interval are the
+    minimum and maximum values of the parameter `popmean` for which the
+    p-value of the test would be 0.05.
+
+    >>> res = stats.ttest_1samp(rvs, popmean=ci.low)
+    >>> np.testing.assert_allclose(res.pvalue, 0.05)
+    >>> res = stats.ttest_1samp(rvs, popmean=ci.high)
+    >>> np.testing.assert_allclose(res.pvalue, 0.05)
+
+    Under certain assumptions about the population from which a sample
+    is drawn, the confidence interval with confidence level 95% is expected
+    to contain the true population mean in 95% of sample replications.
+
+    >>> rvs = stats.norm.rvs(size=(50, 1000), loc=1, random_state=rng)
+    >>> res = stats.ttest_1samp(rvs, popmean=0)
+    >>> ci = res.confidence_interval()
+    >>> contains_pop_mean = (ci.low < 1) & (ci.high > 1)
+    >>> contains_pop_mean.sum()
+    953
+
+    """
+    a, axis = _chk_asarray(a, axis)
+
+    n = a.shape[axis]
+    df = n - 1
+
+    mean = np.mean(a, axis)
+    try:
+        popmean = np.squeeze(popmean, axis=axis)
+    except ValueError as e:
+        raise ValueError("`popmean.shape[axis]` must equal 1.") from e
+    d = mean - popmean
+    v = _var(a, axis, ddof=1)
+    denom = np.sqrt(v / n)
+
+    with np.errstate(divide='ignore', invalid='ignore'):
+        t = np.divide(d, denom)
+    t, prob = _ttest_finish(df, t, alternative)
+
+    # when nan_policy='omit', `df` can be different for different axis-slices
+    df = np.broadcast_to(df, t.shape)[()]
+    # _axis_nan_policy decorator doesn't play well with strings
+    alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative]
+    return TtestResult(t, prob, df=df, alternative=alternative_num,
+                       standard_error=denom, estimate=mean)
+
+
+def _t_confidence_interval(df, t, confidence_level, alternative):
+    # Input validation on `alternative` is already done
+    # We just need IV on confidence_level
+    if confidence_level < 0 or confidence_level > 1:
+        message = "`confidence_level` must be a number between 0 and 1."
+        raise ValueError(message)
+
+    if alternative < 0:  # 'less'
+        p = confidence_level
+        low, high = np.broadcast_arrays(-np.inf, special.stdtrit(df, p))
+    elif alternative > 0:  # 'greater'
+        p = 1 - confidence_level
+        low, high = np.broadcast_arrays(special.stdtrit(df, p), np.inf)
+    elif alternative == 0:  # 'two-sided'
+        tail_probability = (1 - confidence_level)/2
+        p = tail_probability, 1-tail_probability
+        # axis of p must be the zeroth and orthogonal to all the rest
+        p = np.reshape(p, [2] + [1]*np.asarray(df).ndim)
+        low, high = special.stdtrit(df, p)
+    else:  # alternative is NaN when input is empty (see _axis_nan_policy)
+        p, nans = np.broadcast_arrays(t, np.nan)
+        low, high = nans, nans
+
+    return low[()], high[()]
+
+
+def _ttest_finish(df, t, alternative):
+    """Common code between all 3 t-test functions."""
+    # We use ``stdtr`` directly here as it handles the case when ``nan``
+    # values are present in the data and masked arrays are passed
+    # while ``t.cdf`` emits runtime warnings. This way ``_ttest_finish``
+    # can be shared between the ``stats`` and ``mstats`` versions.
+
+    if alternative == 'less':
+        pval = special.stdtr(df, t)
+    elif alternative == 'greater':
+        pval = special.stdtr(df, -t)
+    elif alternative == 'two-sided':
+        pval = special.stdtr(df, -np.abs(t))*2
+    else:
+        raise ValueError("alternative must be "
+                         "'less', 'greater' or 'two-sided'")
+
+    if t.ndim == 0:
+        t = t[()]
+    if pval.ndim == 0:
+        pval = pval[()]
+
+    return t, pval
+
+
+def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
+
+    d = mean1 - mean2
+    with np.errstate(divide='ignore', invalid='ignore'):
+        t = np.divide(d, denom)
+    t, prob = _ttest_finish(df, t, alternative)
+
+    return (t, prob)
+
+
+def _unequal_var_ttest_denom(v1, n1, v2, n2):
+    vn1 = v1 / n1
+    vn2 = v2 / n2
+    with np.errstate(divide='ignore', invalid='ignore'):
+        df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
+
+    # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
+    # Hence it doesn't matter what df is as long as it's not NaN.
+    df = np.where(np.isnan(df), 1, df)
+    denom = np.sqrt(vn1 + vn2)
+    return df, denom
+
+
+def _equal_var_ttest_denom(v1, n1, v2, n2):
+    df = n1 + n2 - 2.0
+    svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
+    denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
+    return df, denom
+
+
+Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
+
+
+def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
+                         equal_var=True, alternative="two-sided"):
+    r"""
+    T-test for means of two independent samples from descriptive statistics.
+
+    This is a test for the null hypothesis that two independent
+    samples have identical average (expected) values.
+
+    Parameters
+    ----------
+    mean1 : array_like
+        The mean(s) of sample 1.
+    std1 : array_like
+        The corrected sample standard deviation of sample 1 (i.e. ``ddof=1``).
+    nobs1 : array_like
+        The number(s) of observations of sample 1.
+    mean2 : array_like
+        The mean(s) of sample 2.
+    std2 : array_like
+        The corrected sample standard deviation of sample 2 (i.e. ``ddof=1``).
+    nobs2 : array_like
+        The number(s) of observations of sample 2.
+    equal_var : bool, optional
+        If True (default), perform a standard independent 2 sample test
+        that assumes equal population variances [1]_.
+        If False, perform Welch's t-test, which does not assume equal
+        population variance [2]_.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the means of the distributions are unequal.
+        * 'less': the mean of the first distribution is less than the
+          mean of the second distribution.
+        * 'greater': the mean of the first distribution is greater than the
+          mean of the second distribution.
+
+        .. versionadded:: 1.6.0
+
+    Returns
+    -------
+    statistic : float or array
+        The calculated t-statistics.
+    pvalue : float or array
+        The two-tailed p-value.
+
+    See Also
+    --------
+    scipy.stats.ttest_ind
+
+    Notes
+    -----
+    The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the
+    standard error. Therefore, the statistic will be positive when `mean1` is
+    greater than `mean2` and negative when `mean1` is less than `mean2`.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
+
+    .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
+
+    Examples
+    --------
+    Suppose we have the summary data for two samples, as follows (with the
+    Sample Variance being the corrected sample variance)::
+
+                         Sample   Sample
+                   Size   Mean   Variance
+        Sample 1    13    15.0     87.5
+        Sample 2    11    12.0     39.0
+
+    Apply the t-test to this data (with the assumption that the population
+    variances are equal):
+
+    >>> import numpy as np
+    >>> from scipy.stats import ttest_ind_from_stats
+    >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
+    ...                      mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
+    Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
+
+    For comparison, here is the data from which those summary statistics
+    were taken.  With this data, we can compute the same result using
+    `scipy.stats.ttest_ind`:
+
+    >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
+    >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
+    >>> from scipy.stats import ttest_ind
+    >>> ttest_ind(a, b)
+    Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
+
+    Suppose we instead have binary data and would like to apply a t-test to
+    compare the proportion of 1s in two independent groups::
+
+                          Number of    Sample     Sample
+                    Size    ones        Mean     Variance
+        Sample 1    150      30         0.2        0.161073
+        Sample 2    200      45         0.225      0.175251
+
+    The sample mean :math:`\hat{p}` is the proportion of ones in the sample
+    and the variance for a binary observation is estimated by
+    :math:`\hat{p}(1-\hat{p})`.
+
+    >>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.161073), nobs1=150,
+    ...                      mean2=0.225, std2=np.sqrt(0.175251), nobs2=200)
+    Ttest_indResult(statistic=-0.5627187905196761, pvalue=0.5739887114209541)
+
+    For comparison, we could compute the t statistic and p-value using
+    arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
+
+    >>> group1 = np.array([1]*30 + [0]*(150-30))
+    >>> group2 = np.array([1]*45 + [0]*(200-45))
+    >>> ttest_ind(group1, group2)
+    Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
+
+    """
+    mean1 = np.asarray(mean1)
+    std1 = np.asarray(std1)
+    mean2 = np.asarray(mean2)
+    std2 = np.asarray(std2)
+    if equal_var:
+        df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
+    else:
+        df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
+                                             std2**2, nobs2)
+
+    res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
+    return Ttest_indResult(*res)
+
+
+def _ttest_nans(a, b, axis, namedtuple_type):
+    """
+    Generate an array of `nan`, with shape determined by `a`, `b` and `axis`.
+
+    This function is used by ttest_ind and ttest_rel to create the return
+    value when one of the inputs has size 0.
+
+    The shapes of the arrays are determined by dropping `axis` from the
+    shapes of `a` and `b` and broadcasting what is left.
+
+    The return value is a named tuple of the type given in `namedtuple_type`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> a = np.zeros((9, 2))
+    >>> b = np.zeros((5, 1))
+    >>> _ttest_nans(a, b, 0, Ttest_indResult)
+    Ttest_indResult(statistic=array([nan, nan]), pvalue=array([nan, nan]))
+
+    >>> a = np.zeros((3, 0, 9))
+    >>> b = np.zeros((1, 10))
+    >>> stat, p = _ttest_nans(a, b, -1, Ttest_indResult)
+    >>> stat
+    array([], shape=(3, 0), dtype=float64)
+    >>> p
+    array([], shape=(3, 0), dtype=float64)
+
+    >>> a = np.zeros(10)
+    >>> b = np.zeros(7)
+    >>> _ttest_nans(a, b, 0, Ttest_indResult)
+    Ttest_indResult(statistic=nan, pvalue=nan)
+
+    """
+    shp = _broadcast_shapes_with_dropped_axis(a, b, axis)
+    if len(shp) == 0:
+        t = np.nan
+        p = np.nan
+    else:
+        t = np.full(shp, fill_value=np.nan)
+        p = t.copy()
+    return namedtuple_type(t, p)
+
+
+def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate',
+              permutations=None, random_state=None, alternative="two-sided",
+              trim=0):
+    """
+    Calculate the T-test for the means of *two independent* samples of scores.
+
+    This is a test for the null hypothesis that 2 independent samples
+    have identical average (expected) values. This test assumes that the
+    populations have identical variances by default.
+
+    Parameters
+    ----------
+    a, b : array_like
+        The arrays must have the same shape, except in the dimension
+        corresponding to `axis` (the first, by default).
+    axis : int or None, optional
+        Axis along which to compute test. If None, compute over the whole
+        arrays, `a`, and `b`.
+    equal_var : bool, optional
+        If True (default), perform a standard independent 2 sample test
+        that assumes equal population variances [1]_.
+        If False, perform Welch's t-test, which does not assume equal
+        population variance [2]_.
+
+        .. versionadded:: 0.11.0
+
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+        The 'omit' option is not currently available for permutation tests or
+        one-sided asympyotic tests.
+
+    permutations : non-negative int, np.inf, or None (default), optional
+        If 0 or None (default), use the t-distribution to calculate p-values.
+        Otherwise, `permutations` is  the number of random permutations that
+        will be used to estimate p-values using a permutation test. If
+        `permutations` equals or exceeds the number of distinct partitions of
+        the pooled data, an exact test is performed instead (i.e. each
+        distinct partition is used exactly once). See Notes for details.
+
+        .. versionadded:: 1.7.0
+
+    random_state : {None, int, `numpy.random.Generator`,
+            `numpy.random.RandomState`}, optional
+
+        If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+        singleton is used.
+        If `seed` is an int, a new ``RandomState`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` or ``RandomState`` instance then
+        that instance is used.
+
+        Pseudorandom number generator state used to generate permutations
+        (used only when `permutations` is not None).
+
+        .. versionadded:: 1.7.0
+
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the means of the distributions underlying the samples
+          are unequal.
+        * 'less': the mean of the distribution underlying the first sample
+          is less than the mean of the distribution underlying the second
+          sample.
+        * 'greater': the mean of the distribution underlying the first
+          sample is greater than the mean of the distribution underlying
+          the second sample.
+
+        .. versionadded:: 1.6.0
+
+    trim : float, optional
+        If nonzero, performs a trimmed (Yuen's) t-test.
+        Defines the fraction of elements to be trimmed from each end of the
+        input samples. If 0 (default), no elements will be trimmed from either
+        side. The number of trimmed elements from each tail is the floor of the
+        trim times the number of elements. Valid range is [0, .5).
+
+        .. versionadded:: 1.7
+
+    Returns
+    -------
+    statistic : float or array
+        The calculated t-statistic.
+    pvalue : float or array
+        The p-value.
+
+    Notes
+    -----
+    Suppose we observe two independent samples, e.g. flower petal lengths, and
+    we are considering whether the two samples were drawn from the same
+    population (e.g. the same species of flower or two species with similar
+    petal characteristics) or two different populations.
+
+    The t-test quantifies the difference between the arithmetic means
+    of the two samples. The p-value quantifies the probability of observing
+    as or more extreme values assuming the null hypothesis, that the
+    samples are drawn from populations with the same population means, is true.
+    A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that
+    our observation is not so unlikely to have occurred by chance. Therefore,
+    we do not reject the null hypothesis of equal population means.
+    If the p-value is smaller than our threshold, then we have evidence
+    against the null hypothesis of equal population means.
+
+    By default, the p-value is determined by comparing the t-statistic of the
+    observed data against a theoretical t-distribution.
+    When ``1 < permutations < binom(n, k)``, where
+
+    * ``k`` is the number of observations in `a`,
+    * ``n`` is the total number of observations in `a` and `b`, and
+    * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
+
+    the data are pooled (concatenated), randomly assigned to either group `a`
+    or `b`, and the t-statistic is calculated. This process is performed
+    repeatedly (`permutation` times), generating a distribution of the
+    t-statistic under the null hypothesis, and the t-statistic of the observed
+    data is compared to this distribution to determine the p-value.
+    Specifically, the p-value reported is the "achieved significance level"
+    (ASL) as defined in 4.4 of [3]_. Note that there are other ways of
+    estimating p-values using randomized permutation tests; for other
+    options, see the more general `permutation_test`.
+
+    When ``permutations >= binom(n, k)``, an exact test is performed: the data
+    are partitioned between the groups in each distinct way exactly once.
+
+    The permutation test can be computationally expensive and not necessarily
+    more accurate than the analytical test, but it does not make strong
+    assumptions about the shape of the underlying distribution.
+
+    Use of trimming is commonly referred to as the trimmed t-test. At times
+    called Yuen's t-test, this is an extension of Welch's t-test, with the
+    difference being the use of winsorized means in calculation of the variance
+    and the trimmed sample size in calculation of the statistic. Trimming is
+    recommended if the underlying distribution is long-tailed or contaminated
+    with outliers [4]_.
+
+    The statistic is calculated as ``(np.mean(a) - np.mean(b))/se``, where
+    ``se`` is the standard error. Therefore, the statistic will be positive
+    when the sample mean of `a` is greater than the sample mean of `b` and
+    negative when the sample mean of `a` is less than the sample mean of
+    `b`.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
+
+    .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
+
+    .. [3] B. Efron and T. Hastie. Computer Age Statistical Inference. (2016).
+
+    .. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population
+           Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR,
+           www.jstor.org/stable/2334299. Accessed 30 Mar. 2021.
+
+    .. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and
+           Performance of the Two-Sample Trimmed t." Biometrika, vol. 60,
+           no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550.
+           Accessed 30 Mar. 2021.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+
+    Test with sample with identical means:
+
+    >>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
+    >>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
+    >>> stats.ttest_ind(rvs1, rvs2)
+    Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015)
+    >>> stats.ttest_ind(rvs1, rvs2, equal_var=False)
+    Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064)
+
+    `ttest_ind` underestimates p for unequal variances:
+
+    >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng)
+    >>> stats.ttest_ind(rvs1, rvs3)
+    Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033)
+    >>> stats.ttest_ind(rvs1, rvs3, equal_var=False)
+    Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867)
+
+    When ``n1 != n2``, the equal variance t-statistic is no longer equal to the
+    unequal variance t-statistic:
+
+    >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng)
+    >>> stats.ttest_ind(rvs1, rvs4)
+    Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703)
+    >>> stats.ttest_ind(rvs1, rvs4, equal_var=False)
+    Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811)
+
+    T-test with different means, variance, and n:
+
+    >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng)
+    >>> stats.ttest_ind(rvs1, rvs5)
+    Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885)
+    >>> stats.ttest_ind(rvs1, rvs5, equal_var=False)
+    Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686)
+
+    When performing a permutation test, more permutations typically yields
+    more accurate results. Use a ``np.random.Generator`` to ensure
+    reproducibility:
+
+    >>> stats.ttest_ind(rvs1, rvs5, permutations=10000,
+    ...                 random_state=rng)
+    Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052994700529947)
+
+    Take these two samples, one of which has an extreme tail.
+
+    >>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3)
+    >>> b = (1.1, 2.9, 4.2)
+
+    Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example,
+    using 20% trimming, ``trim=.2``, the test will reduce the impact of one
+    (``np.floor(trim*len(a))``) element from each tail of sample `a`. It will
+    have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0.
+
+    >>> stats.ttest_ind(a, b, trim=.2)
+    Ttest_indResult(statistic=3.4463884028073513,
+                    pvalue=0.01369338726499547)
+    """
+    if not (0 <= trim < .5):
+        raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
+
+    a, b, axis = _chk2_asarray(a, b, axis)
+
+    # check both a and b
+    cna, npa = _contains_nan(a, nan_policy)
+    cnb, npb = _contains_nan(b, nan_policy)
+    contains_nan = cna or cnb
+    if npa == 'omit' or npb == 'omit':
+        nan_policy = 'omit'
+
+    if contains_nan and nan_policy == 'omit':
+        if permutations or trim != 0:
+            raise ValueError("nan-containing/masked inputs with "
+                             "nan_policy='omit' are currently not "
+                             "supported by permutation tests or "
+                             "trimmed tests.")
+        a = ma.masked_invalid(a)
+        b = ma.masked_invalid(b)
+        return mstats_basic.ttest_ind(a, b, axis, equal_var, alternative)
+
+    if a.size == 0 or b.size == 0:
+        return _ttest_nans(a, b, axis, Ttest_indResult)
+
+    if permutations is not None and permutations != 0:
+        if trim != 0:
+            raise ValueError("Permutations are currently not supported "
+                             "with trimming.")
+        if permutations < 0 or (np.isfinite(permutations) and
+                                int(permutations) != permutations):
+            raise ValueError("Permutations must be a non-negative integer.")
+
+        res = _permutation_ttest(a, b, permutations=permutations,
+                                 axis=axis, equal_var=equal_var,
+                                 nan_policy=nan_policy,
+                                 random_state=random_state,
+                                 alternative=alternative)
+
+    else:
+        n1 = a.shape[axis]
+        n2 = b.shape[axis]
+
+        if trim == 0:
+            v1 = _var(a, axis, ddof=1)
+            v2 = _var(b, axis, ddof=1)
+            m1 = np.mean(a, axis)
+            m2 = np.mean(b, axis)
+        else:
+            v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis)
+            v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis)
+
+        if equal_var:
+            df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
+        else:
+            df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
+        res = _ttest_ind_from_stats(m1, m2, denom, df, alternative)
+    return Ttest_indResult(*res)
+
+
+def _ttest_trim_var_mean_len(a, trim, axis):
+    """Variance, mean, and length of winsorized input along specified axis"""
+    # for use with `ttest_ind` when trimming.
+    # further calculations in this test assume that the inputs are sorted.
+    # From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..."
+    a = np.sort(a, axis=axis)
+
+    # `g` is the number of elements to be replaced on each tail, converted
+    # from a percentage amount of trimming
+    n = a.shape[axis]
+    g = int(n * trim)
+
+    # Calculate the Winsorized variance of the input samples according to
+    # specified `g`
+    v = _calculate_winsorized_variance(a, g, axis)
+
+    # the total number of elements in the trimmed samples
+    n -= 2 * g
+
+    # calculate the g-times trimmed mean, as defined in [4] (1-1)
+    m = trim_mean(a, trim, axis=axis)
+    return v, m, n
+
+
+def _calculate_winsorized_variance(a, g, axis):
+    """Calculates g-times winsorized variance along specified axis"""
+    # it is expected that the input `a` is sorted along the correct axis
+    if g == 0:
+        return _var(a, ddof=1, axis=axis)
+    # move the intended axis to the end that way it is easier to manipulate
+    a_win = np.moveaxis(a, axis, -1)
+
+    # save where NaNs are for later use.
+    nans_indices = np.any(np.isnan(a_win), axis=-1)
+
+    # Winsorization and variance calculation are done in one step in [4]
+    # (1-3), but here winsorization is done first; replace the left and
+    # right sides with the repeating value. This can be see in effect in (
+    # 1-3) in [4], where the leftmost and rightmost tails are replaced with
+    # `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the
+    # right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in
+    # array indexing.
+    a_win[..., :g] = a_win[..., [g]]
+    a_win[..., -g:] = a_win[..., [-g - 1]]
+
+    # Determine the variance. In [4], the degrees of freedom is expressed as
+    # `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of
+    # page 369, beginning of page 370). This is converted to NumPy's format,
+    # `n - ddof` for use with `np.var`. The result is converted to an
+    # array to accommodate indexing later.
+    var_win = np.asarray(_var(a_win, ddof=(2 * g + 1), axis=-1))
+
+    # with `nan_policy='propagate'`, NaNs may be completely trimmed out
+    # because they were sorted into the tail of the array. In these cases,
+    # replace computed variances with `np.nan`.
+    var_win[nans_indices] = np.nan
+    return var_win
+
+
+def _permutation_distribution_t(data, permutations, size_a, equal_var,
+                                random_state=None):
+    """Generation permutation distribution of t statistic"""
+
+    random_state = check_random_state(random_state)
+
+    # prepare permutation indices
+    size = data.shape[-1]
+    # number of distinct combinations
+    n_max = special.comb(size, size_a)
+
+    if permutations < n_max:
+        perm_generator = (random_state.permutation(size)
+                          for i in range(permutations))
+    else:
+        permutations = n_max
+        perm_generator = (np.concatenate(z)
+                          for z in _all_partitions(size_a, size-size_a))
+
+    t_stat = []
+    for indices in _batch_generator(perm_generator, batch=50):
+        # get one batch from perm_generator at a time as a list
+        indices = np.array(indices)
+        # generate permutations
+        data_perm = data[..., indices]
+        # move axis indexing permutations to position 0 to broadcast
+        # nicely with t_stat_observed, which doesn't have this dimension
+        data_perm = np.moveaxis(data_perm, -2, 0)
+
+        a = data_perm[..., :size_a]
+        b = data_perm[..., size_a:]
+        t_stat.append(_calc_t_stat(a, b, equal_var))
+
+    t_stat = np.concatenate(t_stat, axis=0)
+
+    return t_stat, permutations, n_max
+
+
+def _calc_t_stat(a, b, equal_var, axis=-1):
+    """Calculate the t statistic along the given dimension."""
+    na = a.shape[axis]
+    nb = b.shape[axis]
+    avg_a = np.mean(a, axis=axis)
+    avg_b = np.mean(b, axis=axis)
+    var_a = _var(a, axis=axis, ddof=1)
+    var_b = _var(b, axis=axis, ddof=1)
+
+    if not equal_var:
+        denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1]
+    else:
+        denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1]
+
+    return (avg_a-avg_b)/denom
+
+
+def _permutation_ttest(a, b, permutations, axis=0, equal_var=True,
+                       nan_policy='propagate', random_state=None,
+                       alternative="two-sided"):
+    """
+    Calculates the T-test for the means of TWO INDEPENDENT samples of scores
+    using permutation methods.
+
+    This test is similar to `stats.ttest_ind`, except it doesn't rely on an
+    approximate normality assumption since it uses a permutation test.
+    This function is only called from ttest_ind when permutations is not None.
+
+    Parameters
+    ----------
+    a, b : array_like
+        The arrays must be broadcastable, except along the dimension
+        corresponding to `axis` (the zeroth, by default).
+    axis : int, optional
+        The axis over which to operate on a and b.
+    permutations : int, optional
+        Number of permutations used to calculate p-value. If greater than or
+        equal to the number of distinct permutations, perform an exact test.
+    equal_var : bool, optional
+        If False, an equal variance (Welch's) t-test is conducted.  Otherwise,
+        an ordinary t-test is conducted.
+    random_state : {None, int, `numpy.random.Generator`}, optional
+        If `seed` is None the `numpy.random.Generator` singleton is used.
+        If `seed` is an int, a new ``Generator`` instance is used,
+        seeded with `seed`.
+        If `seed` is already a ``Generator`` instance then that instance is
+        used.
+        Pseudorandom number generator state used for generating random
+        permutations.
+
+    Returns
+    -------
+    statistic : float or array
+        The calculated t-statistic.
+    pvalue : float or array
+        The p-value.
+
+    """
+    random_state = check_random_state(random_state)
+
+    t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis)
+
+    na = a.shape[axis]
+    mat = _broadcast_concatenate((a, b), axis=axis)
+    mat = np.moveaxis(mat, axis, -1)
+
+    t_stat, permutations, n_max = _permutation_distribution_t(
+        mat, permutations, size_a=na, equal_var=equal_var,
+        random_state=random_state)
+
+    compare = {"less": np.less_equal,
+               "greater": np.greater_equal,
+               "two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))}
+
+    # Calculate the p-values
+    cmps = compare[alternative](t_stat, t_stat_observed)
+    # Randomized test p-value calculation should use biased estimate; see e.g.
+    # https://www.degruyter.com/document/doi/10.2202/1544-6115.1585/
+    adjustment = 1 if n_max > permutations else 0
+    pvalues = (cmps.sum(axis=0) + adjustment) / (permutations + adjustment)
+
+    # nans propagate naturally in statistic calculation, but need to be
+    # propagated manually into pvalues
+    if nan_policy == 'propagate' and np.isnan(t_stat_observed).any():
+        if np.ndim(pvalues) == 0:
+            pvalues = np.float64(np.nan)
+        else:
+            pvalues[np.isnan(t_stat_observed)] = np.nan
+
+    return (t_stat_observed, pvalues)
+
+
+def _get_len(a, axis, msg):
+    try:
+        n = a.shape[axis]
+    except IndexError:
+        raise np.AxisError(axis, a.ndim, msg) from None
+    return n
+
+
+@_axis_nan_policy_factory(pack_TtestResult, default_axis=0, n_samples=2,
+                          result_to_tuple=unpack_TtestResult, n_outputs=6,
+                          paired=True)
+def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
+    """Calculate the t-test on TWO RELATED samples of scores, a and b.
+
+    This is a test for the null hypothesis that two related or
+    repeated samples have identical average (expected) values.
+
+    Parameters
+    ----------
+    a, b : array_like
+        The arrays must have the same shape.
+    axis : int or None, optional
+        Axis along which to compute test. If None, compute over the whole
+        arrays, `a`, and `b`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+        * 'two-sided': the means of the distributions underlying the samples
+          are unequal.
+        * 'less': the mean of the distribution underlying the first sample
+          is less than the mean of the distribution underlying the second
+          sample.
+        * 'greater': the mean of the distribution underlying the first
+          sample is greater than the mean of the distribution underlying
+          the second sample.
+
+        .. versionadded:: 1.6.0
+
+    Returns
+    -------
+    result : `~scipy.stats._result_classes.TtestResult`
+        An object with the following attributes:
+
+        statistic : float or array
+            The t-statistic.
+        pvalue : float or array
+            The p-value associated with the given alternative.
+        df : float or array
+            The number of degrees of freedom used in calculation of the
+            t-statistic; this is one less than the size of the sample
+            (``a.shape[axis]``).
+
+            .. versionadded:: 1.10.0
+
+        The object also has the following method:
+
+        confidence_interval(confidence_level=0.95)
+            Computes a confidence interval around the difference in
+            population means for the given confidence level.
+            The confidence interval is returned in a ``namedtuple`` with
+            fields `low` and `high`.
+
+            .. versionadded:: 1.10.0
+
+    Notes
+    -----
+    Examples for use are scores of the same set of student in
+    different exams, or repeated sampling from the same units. The
+    test measures whether the average score differs significantly
+    across samples (e.g. exams). If we observe a large p-value, for
+    example greater than 0.05 or 0.1 then we cannot reject the null
+    hypothesis of identical average scores. If the p-value is smaller
+    than the threshold, e.g. 1%, 5% or 10%, then we reject the null
+    hypothesis of equal averages. Small p-values are associated with
+    large t-statistics.
+
+    The t-statistic is calculated as ``np.mean(a - b)/se``, where ``se`` is the
+    standard error. Therefore, the t-statistic will be positive when the sample
+    mean of ``a - b`` is greater than zero and negative when the sample mean of
+    ``a - b`` is less than zero.
+
+    References
+    ----------
+    https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+
+    >>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
+    >>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
+    ...         + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
+    >>> stats.ttest_rel(rvs1, rvs2)
+    TtestResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672, df=499)  # noqa
+    >>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
+    ...         + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
+    >>> stats.ttest_rel(rvs1, rvs3)
+    TtestResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09, df=499)  # noqa
+
+    """
+    a, b, axis = _chk2_asarray(a, b, axis)
+
+    na = _get_len(a, axis, "first argument")
+    nb = _get_len(b, axis, "second argument")
+    if na != nb:
+        raise ValueError('unequal length arrays')
+
+    if na == 0 or nb == 0:
+        # _axis_nan_policy decorator ensures this only happens with 1d input
+        return TtestResult(np.nan, np.nan, df=np.nan, alternative=np.nan,
+                           standard_error=np.nan, estimate=np.nan)
+
+    n = a.shape[axis]
+    df = n - 1
+
+    d = (a - b).astype(np.float64)
+    v = _var(d, axis, ddof=1)
+    dm = np.mean(d, axis)
+    denom = np.sqrt(v / n)
+
+    with np.errstate(divide='ignore', invalid='ignore'):
+        t = np.divide(dm, denom)
+    t, prob = _ttest_finish(df, t, alternative)
+
+    # when nan_policy='omit', `df` can be different for different axis-slices
+    df = np.broadcast_to(df, t.shape)[()]
+
+    # _axis_nan_policy decorator doesn't play well with strings
+    alternative_num = {"less": -1, "two-sided": 0, "greater": 1}[alternative]
+    return TtestResult(t, prob, df=df, alternative=alternative_num,
+                       standard_error=denom, estimate=dm)
+
+
+# Map from names to lambda_ values used in power_divergence().
+_power_div_lambda_names = {
+    "pearson": 1,
+    "log-likelihood": 0,
+    "freeman-tukey": -0.5,
+    "mod-log-likelihood": -1,
+    "neyman": -2,
+    "cressie-read": 2/3,
+}
+
+
+def _count(a, axis=None):
+    """Count the number of non-masked elements of an array.
+
+    This function behaves like `np.ma.count`, but is much faster
+    for ndarrays.
+    """
+    if hasattr(a, 'count'):
+        num = a.count(axis=axis)
+        if isinstance(num, np.ndarray) and num.ndim == 0:
+            # In some cases, the `count` method returns a scalar array (e.g.
+            # np.array(3)), but we want a plain integer.
+            num = int(num)
+    else:
+        if axis is None:
+            num = a.size
+        else:
+            num = a.shape[axis]
+    return num
+
+
+def _m_broadcast_to(a, shape):
+    if np.ma.isMaskedArray(a):
+        return np.ma.masked_array(np.broadcast_to(a, shape),
+                                  mask=np.broadcast_to(a.mask, shape))
+    return np.broadcast_to(a, shape, subok=True)
+
+
+Power_divergenceResult = namedtuple('Power_divergenceResult',
+                                    ('statistic', 'pvalue'))
+
+
+def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
+    """Cressie-Read power divergence statistic and goodness of fit test.
+
+    This function tests the null hypothesis that the categorical data
+    has the given frequencies, using the Cressie-Read power divergence
+    statistic.
+
+    Parameters
+    ----------
+    f_obs : array_like
+        Observed frequencies in each category.
+    f_exp : array_like, optional
+        Expected frequencies in each category.  By default the categories are
+        assumed to be equally likely.
+    ddof : int, optional
+        "Delta degrees of freedom": adjustment to the degrees of freedom
+        for the p-value.  The p-value is computed using a chi-squared
+        distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
+        is the number of observed frequencies.  The default value of `ddof`
+        is 0.
+    axis : int or None, optional
+        The axis of the broadcast result of `f_obs` and `f_exp` along which to
+        apply the test.  If axis is None, all values in `f_obs` are treated
+        as a single data set.  Default is 0.
+    lambda_ : float or str, optional
+        The power in the Cressie-Read power divergence statistic.  The default
+        is 1.  For convenience, `lambda_` may be assigned one of the following
+        strings, in which case the corresponding numerical value is used:
+
+        * ``"pearson"`` (value 1)
+            Pearson's chi-squared statistic. In this case, the function is
+            equivalent to `chisquare`.
+        * ``"log-likelihood"`` (value 0)
+            Log-likelihood ratio. Also known as the G-test [3]_.
+        * ``"freeman-tukey"`` (value -1/2)
+            Freeman-Tukey statistic.
+        * ``"mod-log-likelihood"`` (value -1)
+            Modified log-likelihood ratio.
+        * ``"neyman"`` (value -2)
+            Neyman's statistic.
+        * ``"cressie-read"`` (value 2/3)
+            The power recommended in [5]_.
+
+    Returns
+    -------
+    statistic : float or ndarray
+        The Cressie-Read power divergence test statistic.  The value is
+        a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
+    pvalue : float or ndarray
+        The p-value of the test.  The value is a float if `ddof` and the
+        return value `stat` are scalars.
+
+    See Also
+    --------
+    chisquare
+
+    Notes
+    -----
+    This test is invalid when the observed or expected frequencies in each
+    category are too small.  A typical rule is that all of the observed
+    and expected frequencies should be at least 5.
+
+    Also, the sum of the observed and expected frequencies must be the same
+    for the test to be valid; `power_divergence` raises an error if the sums
+    do not agree within a relative tolerance of ``1e-8``.
+
+    When `lambda_` is less than zero, the formula for the statistic involves
+    dividing by `f_obs`, so a warning or error may be generated if any value
+    in `f_obs` is 0.
+
+    Similarly, a warning or error may be generated if any value in `f_exp` is
+    zero when `lambda_` >= 0.
+
+    The default degrees of freedom, k-1, are for the case when no parameters
+    of the distribution are estimated. If p parameters are estimated by
+    efficient maximum likelihood then the correct degrees of freedom are
+    k-1-p. If the parameters are estimated in a different way, then the
+    dof can be between k-1-p and k-1. However, it is also possible that
+    the asymptotic distribution is not a chisquare, in which case this
+    test is not appropriate.
+
+    This function handles masked arrays.  If an element of `f_obs` or `f_exp`
+    is masked, then data at that position is ignored, and does not count
+    towards the size of the data set.
+
+    .. versionadded:: 0.13.0
+
+    References
+    ----------
+    .. [1] Lowry, Richard.  "Concepts and Applications of Inferential
+           Statistics". Chapter 8.
+           https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
+    .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
+    .. [3] "G-test", https://en.wikipedia.org/wiki/G-test
+    .. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
+           practice of statistics in biological research", New York: Freeman
+           (1981)
+    .. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
+           Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
+           pp. 440-464.
+
+    Examples
+    --------
+    (See `chisquare` for more examples.)
+
+    When just `f_obs` is given, it is assumed that the expected frequencies
+    are uniform and given by the mean of the observed frequencies.  Here we
+    perform a G-test (i.e. use the log-likelihood ratio statistic):
+
+    >>> import numpy as np
+    >>> from scipy.stats import power_divergence
+    >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
+    (2.006573162632538, 0.84823476779463769)
+
+    The expected frequencies can be given with the `f_exp` argument:
+
+    >>> power_divergence([16, 18, 16, 14, 12, 12],
+    ...                  f_exp=[16, 16, 16, 16, 16, 8],
+    ...                  lambda_='log-likelihood')
+    (3.3281031458963746, 0.6495419288047497)
+
+    When `f_obs` is 2-D, by default the test is applied to each column.
+
+    >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
+    >>> obs.shape
+    (6, 2)
+    >>> power_divergence(obs, lambda_="log-likelihood")
+    (array([ 2.00657316,  6.77634498]), array([ 0.84823477,  0.23781225]))
+
+    By setting ``axis=None``, the test is applied to all data in the array,
+    which is equivalent to applying the test to the flattened array.
+
+    >>> power_divergence(obs, axis=None)
+    (23.31034482758621, 0.015975692534127565)
+    >>> power_divergence(obs.ravel())
+    (23.31034482758621, 0.015975692534127565)
+
+    `ddof` is the change to make to the default degrees of freedom.
+
+    >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
+    (2.0, 0.73575888234288467)
+
+    The calculation of the p-values is done by broadcasting the
+    test statistic with `ddof`.
+
+    >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
+    (2.0, array([ 0.84914504,  0.73575888,  0.5724067 ]))
+
+    `f_obs` and `f_exp` are also broadcast.  In the following, `f_obs` has
+    shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
+    `f_obs` and `f_exp` has shape (2, 6).  To compute the desired chi-squared
+    statistics, we must use ``axis=1``:
+
+    >>> power_divergence([16, 18, 16, 14, 12, 12],
+    ...                  f_exp=[[16, 16, 16, 16, 16, 8],
+    ...                         [8, 20, 20, 16, 12, 12]],
+    ...                  axis=1)
+    (array([ 3.5 ,  9.25]), array([ 0.62338763,  0.09949846]))
+
+    """
+    # Convert the input argument `lambda_` to a numerical value.
+    if isinstance(lambda_, str):
+        if lambda_ not in _power_div_lambda_names:
+            names = repr(list(_power_div_lambda_names.keys()))[1:-1]
+            raise ValueError("invalid string for lambda_: {0!r}. "
+                             "Valid strings are {1}".format(lambda_, names))
+        lambda_ = _power_div_lambda_names[lambda_]
+    elif lambda_ is None:
+        lambda_ = 1
+
+    f_obs = np.asanyarray(f_obs)
+    f_obs_float = f_obs.astype(np.float64)
+
+    if f_exp is not None:
+        f_exp = np.asanyarray(f_exp)
+        bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
+        f_obs_float = _m_broadcast_to(f_obs_float, bshape)
+        f_exp = _m_broadcast_to(f_exp, bshape)
+        rtol = 1e-8  # to pass existing tests
+        with np.errstate(invalid='ignore'):
+            f_obs_sum = f_obs_float.sum(axis=axis)
+            f_exp_sum = f_exp.sum(axis=axis)
+            relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
+                             np.minimum(f_obs_sum, f_exp_sum))
+            diff_gt_tol = (relative_diff > rtol).any()
+        if diff_gt_tol:
+            msg = (f"For each axis slice, the sum of the observed "
+                   f"frequencies must agree with the sum of the "
+                   f"expected frequencies to a relative tolerance "
+                   f"of {rtol}, but the percent differences are:\n"
+                   f"{relative_diff}")
+            raise ValueError(msg)
+
+    else:
+        # Ignore 'invalid' errors so the edge case of a data set with length 0
+        # is handled without spurious warnings.
+        with np.errstate(invalid='ignore'):
+            f_exp = f_obs.mean(axis=axis, keepdims=True)
+
+    # `terms` is the array of terms that are summed along `axis` to create
+    # the test statistic.  We use some specialized code for a few special
+    # cases of lambda_.
+    if lambda_ == 1:
+        # Pearson's chi-squared statistic
+        terms = (f_obs_float - f_exp)**2 / f_exp
+    elif lambda_ == 0:
+        # Log-likelihood ratio (i.e. G-test)
+        terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
+    elif lambda_ == -1:
+        # Modified log-likelihood ratio
+        terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
+    else:
+        # General Cressie-Read power divergence.
+        terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
+        terms /= 0.5 * lambda_ * (lambda_ + 1)
+
+    stat = terms.sum(axis=axis)
+
+    num_obs = _count(terms, axis=axis)
+    ddof = asarray(ddof)
+    p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
+
+    return Power_divergenceResult(stat, p)
+
+
+def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
+    """Calculate a one-way chi-square test.
+
+    The chi-square test tests the null hypothesis that the categorical data
+    has the given frequencies.
+
+    Parameters
+    ----------
+    f_obs : array_like
+        Observed frequencies in each category.
+    f_exp : array_like, optional
+        Expected frequencies in each category.  By default the categories are
+        assumed to be equally likely.
+    ddof : int, optional
+        "Delta degrees of freedom": adjustment to the degrees of freedom
+        for the p-value.  The p-value is computed using a chi-squared
+        distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
+        is the number of observed frequencies.  The default value of `ddof`
+        is 0.
+    axis : int or None, optional
+        The axis of the broadcast result of `f_obs` and `f_exp` along which to
+        apply the test.  If axis is None, all values in `f_obs` are treated
+        as a single data set.  Default is 0.
+
+    Returns
+    -------
+    chisq : float or ndarray
+        The chi-squared test statistic.  The value is a float if `axis` is
+        None or `f_obs` and `f_exp` are 1-D.
+    p : float or ndarray
+        The p-value of the test.  The value is a float if `ddof` and the
+        return value `chisq` are scalars.
+
+    See Also
+    --------
+    scipy.stats.power_divergence
+    scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table.
+    scipy.stats.barnard_exact : An unconditional exact test. An alternative
+        to chi-squared test for small sample sizes.
+
+    Notes
+    -----
+    This test is invalid when the observed or expected frequencies in each
+    category are too small.  A typical rule is that all of the observed
+    and expected frequencies should be at least 5. According to [3]_, the
+    total number of samples is recommended to be greater than 13,
+    otherwise exact tests (such as Barnard's Exact test) should be used
+    because they do not overreject.
+
+    Also, the sum of the observed and expected frequencies must be the same
+    for the test to be valid; `chisquare` raises an error if the sums do not
+    agree within a relative tolerance of ``1e-8``.
+
+    The default degrees of freedom, k-1, are for the case when no parameters
+    of the distribution are estimated. If p parameters are estimated by
+    efficient maximum likelihood then the correct degrees of freedom are
+    k-1-p. If the parameters are estimated in a different way, then the
+    dof can be between k-1-p and k-1. However, it is also possible that
+    the asymptotic distribution is not chi-square, in which case this test
+    is not appropriate.
+
+    References
+    ----------
+    .. [1] Lowry, Richard.  "Concepts and Applications of Inferential
+           Statistics". Chapter 8.
+           https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
+    .. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
+    .. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable
+           in the case of a correlated system of variables is such that it can be reasonably
+           supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50
+           (1900), pp. 157-175.
+
+    Examples
+    --------
+    When just `f_obs` is given, it is assumed that the expected frequencies
+    are uniform and given by the mean of the observed frequencies.
+
+    >>> import numpy as np
+    >>> from scipy.stats import chisquare
+    >>> chisquare([16, 18, 16, 14, 12, 12])
+    (2.0, 0.84914503608460956)
+
+    With `f_exp` the expected frequencies can be given.
+
+    >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
+    (3.5, 0.62338762774958223)
+
+    When `f_obs` is 2-D, by default the test is applied to each column.
+
+    >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
+    >>> obs.shape
+    (6, 2)
+    >>> chisquare(obs)
+    (array([ 2.        ,  6.66666667]), array([ 0.84914504,  0.24663415]))
+
+    By setting ``axis=None``, the test is applied to all data in the array,
+    which is equivalent to applying the test to the flattened array.
+
+    >>> chisquare(obs, axis=None)
+    (23.31034482758621, 0.015975692534127565)
+    >>> chisquare(obs.ravel())
+    (23.31034482758621, 0.015975692534127565)
+
+    `ddof` is the change to make to the default degrees of freedom.
+
+    >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
+    (2.0, 0.73575888234288467)
+
+    The calculation of the p-values is done by broadcasting the
+    chi-squared statistic with `ddof`.
+
+    >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
+    (2.0, array([ 0.84914504,  0.73575888,  0.5724067 ]))
+
+    `f_obs` and `f_exp` are also broadcast.  In the following, `f_obs` has
+    shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
+    `f_obs` and `f_exp` has shape (2, 6).  To compute the desired chi-squared
+    statistics, we use ``axis=1``:
+
+    >>> chisquare([16, 18, 16, 14, 12, 12],
+    ...           f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
+    ...           axis=1)
+    (array([ 3.5 ,  9.25]), array([ 0.62338763,  0.09949846]))
+
+    """
+    return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
+                            lambda_="pearson")
+
+
+KstestResult = _make_tuple_bunch('KstestResult', ['statistic', 'pvalue'],
+                                 ['statistic_location', 'statistic_sign'])
+
+
+def _compute_dplus(cdfvals, x):
+    """Computes D+ as used in the Kolmogorov-Smirnov test.
+
+    Parameters
+    ----------
+    cdfvals : array_like
+        Sorted array of CDF values between 0 and 1
+    x: array_like
+        Sorted array of the stochastic variable itself
+
+    Returns
+    -------
+    res: Pair with the following elements:
+        - The maximum distance of the CDF values below Uniform(0, 1).
+        - The location at which the maximum is reached.
+
+    """
+    n = len(cdfvals)
+    dplus = (np.arange(1.0, n + 1) / n - cdfvals)
+    amax = dplus.argmax()
+    loc_max = x[amax]
+    return (dplus[amax], loc_max)
+
+
+def _compute_dminus(cdfvals, x):
+    """Computes D- as used in the Kolmogorov-Smirnov test.
+
+    Parameters
+    ----------
+    cdfvals : array_like
+        Sorted array of CDF values between 0 and 1
+    x: array_like
+        Sorted array of the stochastic variable itself
+
+    Returns
+    -------
+    res: Pair with the following elements:
+        - Maximum distance of the CDF values above Uniform(0, 1)
+        - The location at which the maximum is reached.
+    """
+    n = len(cdfvals)
+    dminus = (cdfvals - np.arange(0.0, n)/n)
+    amax = dminus.argmax()
+    loc_max = x[amax]
+    return (dminus[amax], loc_max)
+
+
+@_rename_parameter("mode", "method")
+def ks_1samp(x, cdf, args=(), alternative='two-sided', method='auto'):
+    """
+    Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
+
+    This test compares the underlying distribution F(x) of a sample
+    against a given continuous distribution G(x). See Notes for a description
+    of the available null and alternative hypotheses.
+
+    Parameters
+    ----------
+    x : array_like
+        a 1-D array of observations of iid random variables.
+    cdf : callable
+        callable used to calculate the cdf.
+    args : tuple, sequence, optional
+        Distribution parameters, used with `cdf`.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the null and alternative hypotheses. Default is 'two-sided'.
+        Please see explanations in the Notes below.
+    method : {'auto', 'exact', 'approx', 'asymp'}, optional
+        Defines the distribution used for calculating the p-value.
+        The following options are available (default is 'auto'):
+
+          * 'auto' : selects one of the other options.
+          * 'exact' : uses the exact distribution of test statistic.
+          * 'approx' : approximates the two-sided probability with twice
+            the one-sided probability
+          * 'asymp': uses asymptotic distribution of test statistic
+
+    Returns
+    -------
+    res: KstestResult
+        An object containing attributes:
+
+        statistic : float
+            KS test statistic, either D+, D-, or D (the maximum of the two)
+        pvalue : float
+            One-tailed or two-tailed p-value.
+        statistic_location : float
+            Value of `x` corresponding with the KS statistic; i.e., the
+            distance between the empirical distribution function and the
+            hypothesized cumulative distribution function is measured at this
+            observation.
+        statistic_sign : int
+            +1 if the KS statistic is the maximum positive difference between
+            the empirical distribution function and the hypothesized cumulative
+            distribution function (D+); -1 if the KS statistic is the maximum
+            negative difference (D-).
+
+
+    See Also
+    --------
+    ks_2samp, kstest
+
+    Notes
+    -----
+    There are three options for the null and corresponding alternative
+    hypothesis that can be selected using the `alternative` parameter.
+
+    - `two-sided`: The null hypothesis is that the two distributions are
+      identical, F(x)=G(x) for all x; the alternative is that they are not
+      identical.
+
+    - `less`: The null hypothesis is that F(x) >= G(x) for all x; the
+      alternative is that F(x) < G(x) for at least one x.
+
+    - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
+      alternative is that F(x) > G(x) for at least one x.
+
+    Note that the alternative hypotheses describe the *CDFs* of the
+    underlying distributions, not the observed values. For example,
+    suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
+    x1 tend to be less than those in x2.
+
+    Examples
+    --------
+    Suppose we wish to test the null hypothesis that a sample is distributed
+    according to the standard normal.
+    We choose a confidence level of 95%; that is, we will reject the null
+    hypothesis in favor of the alternative if the p-value is less than 0.05.
+
+    When testing uniformly distributed data, we would expect the
+    null hypothesis to be rejected.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> stats.ks_1samp(stats.uniform.rvs(size=100, random_state=rng),
+    ...                stats.norm.cdf)
+    KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23)
+
+    Indeed, the p-value is lower than our threshold of 0.05, so we reject the
+    null hypothesis in favor of the default "two-sided" alternative: the data
+    are *not* distributed according to the standard normal.
+
+    When testing random variates from the standard normal distribution, we
+    expect the data to be consistent with the null hypothesis most of the time.
+
+    >>> x = stats.norm.rvs(size=100, random_state=rng)
+    >>> stats.ks_1samp(x, stats.norm.cdf)
+    KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717)
+
+    As expected, the p-value of 0.92 is not below our threshold of 0.05, so
+    we cannot reject the null hypothesis.
+
+    Suppose, however, that the random variates are distributed according to
+    a normal distribution that is shifted toward greater values. In this case,
+    the cumulative density function (CDF) of the underlying distribution tends
+    to be *less* than the CDF of the standard normal. Therefore, we would
+    expect the null hypothesis to be rejected with ``alternative='less'``:
+
+    >>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng)
+    >>> stats.ks_1samp(x, stats.norm.cdf, alternative='less')
+    KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743)
+
+    and indeed, with p-value smaller than our threshold, we reject the null
+    hypothesis in favor of the alternative.
+
+    """
+    mode = method
+
+    alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
+        alternative.lower()[0], alternative)
+    if alternative not in ['two-sided', 'greater', 'less']:
+        raise ValueError("Unexpected alternative %s" % alternative)
+    if np.ma.is_masked(x):
+        x = x.compressed()
+
+    N = len(x)
+    x = np.sort(x)
+    cdfvals = cdf(x, *args)
+
+    if alternative == 'greater':
+        Dplus, d_location = _compute_dplus(cdfvals, x)
+        return KstestResult(Dplus, distributions.ksone.sf(Dplus, N),
+                            statistic_location=d_location,
+                            statistic_sign=1)
+
+    if alternative == 'less':
+        Dminus, d_location = _compute_dminus(cdfvals, x)
+        return KstestResult(Dminus, distributions.ksone.sf(Dminus, N),
+                            statistic_location=d_location,
+                            statistic_sign=-1)
+
+    # alternative == 'two-sided':
+    Dplus, dplus_location = _compute_dplus(cdfvals, x)
+    Dminus, dminus_location = _compute_dminus(cdfvals, x)
+    if Dplus > Dminus:
+        D = Dplus
+        d_location = dplus_location
+        d_sign = 1
+    else:
+        D = Dminus
+        d_location = dminus_location
+        d_sign = -1
+
+    if mode == 'auto':  # Always select exact
+        mode = 'exact'
+    if mode == 'exact':
+        prob = distributions.kstwo.sf(D, N)
+    elif mode == 'asymp':
+        prob = distributions.kstwobign.sf(D * np.sqrt(N))
+    else:
+        # mode == 'approx'
+        prob = 2 * distributions.ksone.sf(D, N)
+    prob = np.clip(prob, 0, 1)
+    return KstestResult(D, prob,
+                        statistic_location=d_location,
+                        statistic_sign=d_sign)
+
+
+Ks_2sampResult = KstestResult
+
+
+def _compute_prob_outside_square(n, h):
+    """
+    Compute the proportion of paths that pass outside the two diagonal lines.
+
+    Parameters
+    ----------
+    n : integer
+        n > 0
+    h : integer
+        0 <= h <= n
+
+    Returns
+    -------
+    p : float
+        The proportion of paths that pass outside the lines x-y = +/-h.
+
+    """
+    # Compute Pr(D_{n,n} >= h/n)
+    # Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... )
+    # / binom(2n, n)
+    # This formulation exhibits subtractive cancellation.
+    # Instead divide each term by binom(2n, n), then factor common terms
+    # and use a Horner-like algorithm
+    # P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
+
+    P = 0.0
+    k = int(np.floor(n / h))
+    while k >= 0:
+        p1 = 1.0
+        # Each of the Ai terms has numerator and denominator with
+        # h simple terms.
+        for j in range(h):
+            p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
+        P = p1 * (1.0 - P)
+        k -= 1
+    return 2 * P
+
+
+def _count_paths_outside_method(m, n, g, h):
+    """Count the number of paths that pass outside the specified diagonal.
+
+    Parameters
+    ----------
+    m : integer
+        m > 0
+    n : integer
+        n > 0
+    g : integer
+        g is greatest common divisor of m and n
+    h : integer
+        0 <= h <= lcm(m,n)
+
+    Returns
+    -------
+    p : float
+        The number of paths that go low.
+        The calculation may overflow - check for a finite answer.
+
+    Notes
+    -----
+    Count the integer lattice paths from (0, 0) to (m, n), which at some
+    point (x, y) along the path, satisfy:
+      m*y <= n*x - h*g
+    The paths make steps of size +1 in either positive x or positive y
+    directions.
+
+    We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
+    Hodges, J.L. Jr.,
+    "The Significance Probability of the Smirnov Two-Sample Test,"
+    Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
+
+    """
+    # Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
+    # B(x, y) = #{paths from (0,0) to (x,y) without
+    #             previously crossing the boundary}
+    #         = binom(x, y) - #{paths which already reached the boundary}
+    # Multiply by the number of path extensions going from (x, y) to (m, n)
+    # Sum.
+
+    # Probability is symmetrical in m, n.  Computation below assumes m >= n.
+    if m < n:
+        m, n = n, m
+    mg = m // g
+    ng = n // g
+
+    # Not every x needs to be considered.
+    # xj holds the list of x values to be checked.
+    # Wherever n*x/m + ng*h crosses an integer
+    lxj = n + (mg-h)//mg
+    xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
+    # B is an array just holding a few values of B(x,y), the ones needed.
+    # B[j] == B(x_j, j)
+    if lxj == 0:
+        return special.binom(m + n, n)
+    B = np.zeros(lxj)
+    B[0] = 1
+    # Compute the B(x, y) terms
+    for j in range(1, lxj):
+        Bj = special.binom(xj[j] + j, j)
+        for i in range(j):
+            bin = special.binom(xj[j] - xj[i] + j - i, j-i)
+            Bj -= bin * B[i]
+        B[j] = Bj
+    # Compute the number of path extensions...
+    num_paths = 0
+    for j in range(lxj):
+        bin = special.binom((m-xj[j]) + (n - j), n-j)
+        term = B[j] * bin
+        num_paths += term
+    return num_paths
+
+
+def _attempt_exact_2kssamp(n1, n2, g, d, alternative):
+    """Attempts to compute the exact 2sample probability.
+
+    n1, n2 are the sample sizes
+    g is the gcd(n1, n2)
+    d is the computed max difference in ECDFs
+
+    Returns (success, d, probability)
+    """
+    lcm = (n1 // g) * n2
+    h = int(np.round(d * lcm))
+    d = h * 1.0 / lcm
+    if h == 0:
+        return True, d, 1.0
+    saw_fp_error, prob = False, np.nan
+    try:
+        with np.errstate(invalid="raise", over="raise"):
+            if alternative == 'two-sided':
+                if n1 == n2:
+                    prob = _compute_prob_outside_square(n1, h)
+                else:
+                    prob = _compute_outer_prob_inside_method(n1, n2, g, h)
+            else:
+                if n1 == n2:
+                    # prob = binom(2n, n-h) / binom(2n, n)
+                    # Evaluating in that form incurs roundoff errors
+                    # from special.binom. Instead calculate directly
+                    jrange = np.arange(h)
+                    prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
+                else:
+                    with np.errstate(over='raise'):
+                        num_paths = _count_paths_outside_method(n1, n2, g, h)
+                    bin = special.binom(n1 + n2, n1)
+                    if num_paths > bin or np.isinf(bin):
+                        saw_fp_error = True
+                    else:
+                        prob = num_paths / bin
+
+    except (FloatingPointError, OverflowError):
+        saw_fp_error = True
+
+    if saw_fp_error:
+        return False, d, np.nan
+    if not (0 <= prob <= 1):
+        return False, d, prob
+    return True, d, prob
+
+
+@_rename_parameter("mode", "method")
+def ks_2samp(data1, data2, alternative='two-sided', method='auto'):
+    """
+    Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.
+
+    This test compares the underlying continuous distributions F(x) and G(x)
+    of two independent samples.  See Notes for a description of the available
+    null and alternative hypotheses.
+
+    Parameters
+    ----------
+    data1, data2 : array_like, 1-Dimensional
+        Two arrays of sample observations assumed to be drawn from a continuous
+        distribution, sample sizes can be different.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the null and alternative hypotheses. Default is 'two-sided'.
+        Please see explanations in the Notes below.
+    method : {'auto', 'exact', 'asymp'}, optional
+        Defines the method used for calculating the p-value.
+        The following options are available (default is 'auto'):
+
+          * 'auto' : use 'exact' for small size arrays, 'asymp' for large
+          * 'exact' : use exact distribution of test statistic
+          * 'asymp' : use asymptotic distribution of test statistic
+
+    Returns
+    -------
+    res: KstestResult
+        An object containing attributes:
+
+        statistic : float
+            KS test statistic.
+        pvalue : float
+            One-tailed or two-tailed p-value.
+        statistic_location : float
+            Value from `data1` or `data2` corresponding with the KS statistic;
+            i.e., the distance between the empirical distribution functions is
+            measured at this observation.
+        statistic_sign : int
+            +1 if the empirical distribution function of `data1` exceeds
+            the empirical distribution function of `data2` at
+            `statistic_location`, otherwise -1.
+
+    See Also
+    --------
+    kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
+
+    Notes
+    -----
+    There are three options for the null and corresponding alternative
+    hypothesis that can be selected using the `alternative` parameter.
+
+    - `less`: The null hypothesis is that F(x) >= G(x) for all x; the
+      alternative is that F(x) < G(x) for at least one x. The statistic
+      is the magnitude of the minimum (most negative) difference between the
+      empirical distribution functions of the samples.
+
+    - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
+      alternative is that F(x) > G(x) for at least one x. The statistic
+      is the maximum (most positive) difference between the empirical
+      distribution functions of the samples.
+
+    - `two-sided`: The null hypothesis is that the two distributions are
+      identical, F(x)=G(x) for all x; the alternative is that they are not
+      identical. The statistic is the maximum absolute difference between the
+      empirical distribution functions of the samples.
+
+    Note that the alternative hypotheses describe the *CDFs* of the
+    underlying distributions, not the observed values of the data. For example,
+    suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
+    x1 tend to be less than those in x2.
+
+    If the KS statistic is large, then the p-value will be small, and this may
+    be taken as evidence against the null hypothesis in favor of the
+    alternative.
+
+    If ``method='exact'``, `ks_2samp` attempts to compute an exact p-value,
+    that is, the probability under the null hypothesis of obtaining a test
+    statistic value as extreme as the value computed from the data.
+    If ``method='asymp'``, the asymptotic Kolmogorov-Smirnov distribution is
+    used to compute an approximate p-value.
+    If ``method='auto'``, an exact p-value computation is attempted if both
+    sample sizes are less than 10000; otherwise, the asymptotic method is used.
+    In any case, if an exact p-value calculation is attempted and fails, a
+    warning will be emitted, and the asymptotic p-value will be returned.
+
+    The 'two-sided' 'exact' computation computes the complementary probability
+    and then subtracts from 1.  As such, the minimum probability it can return
+    is about 1e-16.  While the algorithm itself is exact, numerical
+    errors may accumulate for large sample sizes.   It is most suited to
+    situations in which one of the sample sizes is only a few thousand.
+
+    We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
+
+    References
+    ----------
+    .. [1] Hodges, J.L. Jr.,  "The Significance Probability of the Smirnov
+           Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
+
+    Examples
+    --------
+    Suppose we wish to test the null hypothesis that two samples were drawn
+    from the same distribution.
+    We choose a confidence level of 95%; that is, we will reject the null
+    hypothesis in favor of the alternative if the p-value is less than 0.05.
+
+    If the first sample were drawn from a uniform distribution and the second
+    were drawn from the standard normal, we would expect the null hypothesis
+    to be rejected.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> sample1 = stats.uniform.rvs(size=100, random_state=rng)
+    >>> sample2 = stats.norm.rvs(size=110, random_state=rng)
+    >>> stats.ks_2samp(sample1, sample2)
+    KstestResult(statistic=0.5454545454545454, pvalue=7.37417839555191e-15)
+
+    Indeed, the p-value is lower than our threshold of 0.05, so we reject the
+    null hypothesis in favor of the default "two-sided" alternative: the data
+    were *not* drawn from the same distribution.
+
+    When both samples are drawn from the same distribution, we expect the data
+    to be consistent with the null hypothesis most of the time.
+
+    >>> sample1 = stats.norm.rvs(size=105, random_state=rng)
+    >>> sample2 = stats.norm.rvs(size=95, random_state=rng)
+    >>> stats.ks_2samp(sample1, sample2)
+    KstestResult(statistic=0.10927318295739348, pvalue=0.5438289009927495)
+
+    As expected, the p-value of 0.54 is not below our threshold of 0.05, so
+    we cannot reject the null hypothesis.
+
+    Suppose, however, that the first sample were drawn from
+    a normal distribution shifted toward greater values. In this case,
+    the cumulative density function (CDF) of the underlying distribution tends
+    to be *less* than the CDF underlying the second sample. Therefore, we would
+    expect the null hypothesis to be rejected with ``alternative='less'``:
+
+    >>> sample1 = stats.norm.rvs(size=105, loc=0.5, random_state=rng)
+    >>> stats.ks_2samp(sample1, sample2, alternative='less')
+    KstestResult(statistic=0.4055137844611529, pvalue=3.5474563068855554e-08)
+
+    and indeed, with p-value smaller than our threshold, we reject the null
+    hypothesis in favor of the alternative.
+
+    """
+    mode = method
+
+    if mode not in ['auto', 'exact', 'asymp']:
+        raise ValueError(f'Invalid value for mode: {mode}')
+    alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
+        alternative.lower()[0], alternative)
+    if alternative not in ['two-sided', 'less', 'greater']:
+        raise ValueError(f'Invalid value for alternative: {alternative}')
+    MAX_AUTO_N = 10000  # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
+    if np.ma.is_masked(data1):
+        data1 = data1.compressed()
+    if np.ma.is_masked(data2):
+        data2 = data2.compressed()
+    data1 = np.sort(data1)
+    data2 = np.sort(data2)
+    n1 = data1.shape[0]
+    n2 = data2.shape[0]
+    if min(n1, n2) == 0:
+        raise ValueError('Data passed to ks_2samp must not be empty')
+
+    data_all = np.concatenate([data1, data2])
+    # using searchsorted solves equal data problem
+    cdf1 = np.searchsorted(data1, data_all, side='right') / n1
+    cdf2 = np.searchsorted(data2, data_all, side='right') / n2
+    cddiffs = cdf1 - cdf2
+
+    # Identify the location of the statistic
+    argminS = np.argmin(cddiffs)
+    argmaxS = np.argmax(cddiffs)
+    loc_minS = data_all[argminS]
+    loc_maxS = data_all[argmaxS]
+
+    # Ensure sign of minS is not negative.
+    minS = np.clip(-cddiffs[argminS], 0, 1)
+    maxS = cddiffs[argmaxS]
+
+    if alternative == 'less' or (alternative == 'two-sided' and minS > maxS):
+        d = minS
+        d_location = loc_minS
+        d_sign = -1
+    else:
+        d = maxS
+        d_location = loc_maxS
+        d_sign = 1
+    g = gcd(n1, n2)
+    n1g = n1 // g
+    n2g = n2 // g
+    prob = -np.inf
+    if mode == 'auto':
+        mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
+    elif mode == 'exact':
+        # If lcm(n1, n2) is too big, switch from exact to asymp
+        if n1g >= np.iinfo(np.int32).max / n2g:
+            mode = 'asymp'
+            warnings.warn(
+                f"Exact ks_2samp calculation not possible with samples sizes "
+                f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning,
+                stacklevel=3)
+
+    if mode == 'exact':
+        success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
+        if not success:
+            mode = 'asymp'
+            warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
+                          f"Switching to method={mode}.", RuntimeWarning,
+                          stacklevel=3)
+
+    if mode == 'asymp':
+        # The product n1*n2 is large.  Use Smirnov's asymptoptic formula.
+        # Ensure float to avoid overflow in multiplication
+        # sorted because the one-sided formula is not symmetric in n1, n2
+        m, n = sorted([float(n1), float(n2)], reverse=True)
+        en = m * n / (m + n)
+        if alternative == 'two-sided':
+            prob = distributions.kstwo.sf(d, np.round(en))
+        else:
+            z = np.sqrt(en) * d
+            # Use Hodges' suggested approximation Eqn 5.3
+            # Requires m to be the larger of (n1, n2)
+            expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
+            prob = np.exp(expt)
+
+    prob = np.clip(prob, 0, 1)
+    return KstestResult(d, prob, statistic_location=d_location,
+                        statistic_sign=d_sign)
+
+
+def _parse_kstest_args(data1, data2, args, N):
+    # kstest allows many different variations of arguments.
+    # Pull out the parsing into a separate function
+    # (xvals, yvals, )  # 2sample
+    # (xvals, cdf function,..)
+    # (xvals, name of distribution, ...)
+    # (name of distribution, name of distribution, ...)
+
+    # Returns xvals, yvals, cdf
+    # where cdf is a cdf function, or None
+    # and yvals is either an array_like of values, or None
+    # and xvals is array_like.
+    rvsfunc, cdf = None, None
+    if isinstance(data1, str):
+        rvsfunc = getattr(distributions, data1).rvs
+    elif callable(data1):
+        rvsfunc = data1
+
+    if isinstance(data2, str):
+        cdf = getattr(distributions, data2).cdf
+        data2 = None
+    elif callable(data2):
+        cdf = data2
+        data2 = None
+
+    data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1)
+    return data1, data2, cdf
+
+
+@_rename_parameter("mode", "method")
+def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', method='auto'):
+    """
+    Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for
+    goodness of fit.
+
+    The one-sample test compares the underlying distribution F(x) of a sample
+    against a given distribution G(x). The two-sample test compares the
+    underlying distributions of two independent samples. Both tests are valid
+    only for continuous distributions.
+
+    Parameters
+    ----------
+    rvs : str, array_like, or callable
+        If an array, it should be a 1-D array of observations of random
+        variables.
+        If a callable, it should be a function to generate random variables;
+        it is required to have a keyword argument `size`.
+        If a string, it should be the name of a distribution in `scipy.stats`,
+        which will be used to generate random variables.
+    cdf : str, array_like or callable
+        If array_like, it should be a 1-D array of observations of random
+        variables, and the two-sample test is performed
+        (and rvs must be array_like).
+        If a callable, that callable is used to calculate the cdf.
+        If a string, it should be the name of a distribution in `scipy.stats`,
+        which will be used as the cdf function.
+    args : tuple, sequence, optional
+        Distribution parameters, used if `rvs` or `cdf` are strings or
+        callables.
+    N : int, optional
+        Sample size if `rvs` is string or callable.  Default is 20.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the null and alternative hypotheses. Default is 'two-sided'.
+        Please see explanations in the Notes below.
+    method : {'auto', 'exact', 'approx', 'asymp'}, optional
+        Defines the distribution used for calculating the p-value.
+        The following options are available (default is 'auto'):
+
+          * 'auto' : selects one of the other options.
+          * 'exact' : uses the exact distribution of test statistic.
+          * 'approx' : approximates the two-sided probability with twice the
+            one-sided probability
+          * 'asymp': uses asymptotic distribution of test statistic
+
+    Returns
+    -------
+    res: KstestResult
+        An object containing attributes:
+
+        statistic : float
+            KS test statistic, either D+, D-, or D (the maximum of the two)
+        pvalue : float
+            One-tailed or two-tailed p-value.
+        statistic_location : float
+            In a one-sample test, this is the value of `rvs`
+            corresponding with the KS statistic; i.e., the distance between
+            the empirical distribution function and the hypothesized cumulative
+            distribution function is measured at this observation.
+
+            In a two-sample test, this is the value from `rvs` or `cdf`
+            corresponding with the KS statistic; i.e., the distance between
+            the empirical distribution functions is measured at this
+            observation.
+        statistic_sign : int
+            In a one-sample test, this is +1 if the KS statistic is the
+            maximum positive difference between the empirical distribution
+            function and the hypothesized cumulative distribution function
+            (D+); it is -1 if the KS statistic is the maximum negative
+            difference (D-).
+
+            In a two-sample test, this is +1 if the empirical distribution
+            function of `rvs` exceeds the empirical distribution
+            function of `cdf` at `statistic_location`, otherwise -1.
+
+    See Also
+    --------
+    ks_1samp, ks_2samp
+
+    Notes
+    -----
+    There are three options for the null and corresponding alternative
+    hypothesis that can be selected using the `alternative` parameter.
+
+    - `two-sided`: The null hypothesis is that the two distributions are
+      identical, F(x)=G(x) for all x; the alternative is that they are not
+      identical.
+
+    - `less`: The null hypothesis is that F(x) >= G(x) for all x; the
+      alternative is that F(x) < G(x) for at least one x.
+
+    - `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
+      alternative is that F(x) > G(x) for at least one x.
+
+    Note that the alternative hypotheses describe the *CDFs* of the
+    underlying distributions, not the observed values. For example,
+    suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
+    x1 tend to be less than those in x2.
+
+
+    Examples
+    --------
+    Suppose we wish to test the null hypothesis that a sample is distributed
+    according to the standard normal.
+    We choose a confidence level of 95%; that is, we will reject the null
+    hypothesis in favor of the alternative if the p-value is less than 0.05.
+
+    When testing uniformly distributed data, we would expect the
+    null hypothesis to be rejected.
+
+    >>> import numpy as np
+    >>> from scipy import stats
+    >>> rng = np.random.default_rng()
+    >>> stats.kstest(stats.uniform.rvs(size=100, random_state=rng),
+    ...              stats.norm.cdf)
+    KstestResult(statistic=0.5001899973268688, pvalue=1.1616392184763533e-23)
+
+    Indeed, the p-value is lower than our threshold of 0.05, so we reject the
+    null hypothesis in favor of the default "two-sided" alternative: the data
+    are *not* distributed according to the standard normal.
+
+    When testing random variates from the standard normal distribution, we
+    expect the data to be consistent with the null hypothesis most of the time.
+
+    >>> x = stats.norm.rvs(size=100, random_state=rng)
+    >>> stats.kstest(x, stats.norm.cdf)
+    KstestResult(statistic=0.05345882212970396, pvalue=0.9227159037744717)
+
+    As expected, the p-value of 0.92 is not below our threshold of 0.05, so
+    we cannot reject the null hypothesis.
+
+    Suppose, however, that the random variates are distributed according to
+    a normal distribution that is shifted toward greater values. In this case,
+    the cumulative density function (CDF) of the underlying distribution tends
+    to be *less* than the CDF of the standard normal. Therefore, we would
+    expect the null hypothesis to be rejected with ``alternative='less'``:
+
+    >>> x = stats.norm.rvs(size=100, loc=0.5, random_state=rng)
+    >>> stats.kstest(x, stats.norm.cdf, alternative='less')
+    KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743)
+
+    and indeed, with p-value smaller than our threshold, we reject the null
+    hypothesis in favor of the alternative.
+
+    For convenience, the previous test can be performed using the name of the
+    distribution as the second argument.
+
+    >>> stats.kstest(x, "norm", alternative='less')
+    KstestResult(statistic=0.17482387821055168, pvalue=0.001913921057766743)
+
+    The examples above have all been one-sample tests identical to those
+    performed by `ks_1samp`. Note that `kstest` can also perform two-sample
+    tests identical to those performed by `ks_2samp`. For example, when two
+    samples are drawn from the same distribution, we expect the data to be
+    consistent with the null hypothesis most of the time.
+
+    >>> sample1 = stats.laplace.rvs(size=105, random_state=rng)
+    >>> sample2 = stats.laplace.rvs(size=95, random_state=rng)
+    >>> stats.kstest(sample1, sample2)
+    KstestResult(statistic=0.11779448621553884, pvalue=0.4494256912629795)
+
+    As expected, the p-value of 0.45 is not below our threshold of 0.05, so
+    we cannot reject the null hypothesis.
+
+    """
+    # to not break compatibility with existing code
+    if alternative == 'two_sided':
+        alternative = 'two-sided'
+    if alternative not in ['two-sided', 'greater', 'less']:
+        raise ValueError("Unexpected alternative %s" % alternative)
+    xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N)
+    if cdf:
+        return ks_1samp(xvals, cdf, args=args, alternative=alternative,
+                        method=method)
+    return ks_2samp(xvals, yvals, alternative=alternative, method=method)
+
+
+def tiecorrect(rankvals):
+    """Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
+
+    Parameters
+    ----------
+    rankvals : array_like
+        A 1-D sequence of ranks.  Typically this will be the array
+        returned by `~scipy.stats.rankdata`.
+
+    Returns
+    -------
+    factor : float
+        Correction factor for U or H.
+
+    See Also
+    --------
+    rankdata : Assign ranks to the data
+    mannwhitneyu : Mann-Whitney rank test
+    kruskal : Kruskal-Wallis H test
+
+    References
+    ----------
+    .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
+           Sciences.  New York: McGraw-Hill.
+
+    Examples
+    --------
+    >>> from scipy.stats import tiecorrect, rankdata
+    >>> tiecorrect([1, 2.5, 2.5, 4])
+    0.9
+    >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
+    >>> ranks
+    array([ 1. ,  4. ,  2.5,  5.5,  7. ,  8. ,  2.5,  9. ,  5.5])
+    >>> tiecorrect(ranks)
+    0.9833333333333333
+
+    """
+    arr = np.sort(rankvals)
+    idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
+    cnt = np.diff(idx).astype(np.float64)
+
+    size = np.float64(arr.size)
+    return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
+
+
+RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
+
+
+@_axis_nan_policy_factory(RanksumsResult, n_samples=2)
+def ranksums(x, y, alternative='two-sided'):
+    """Compute the Wilcoxon rank-sum statistic for two samples.
+
+    The Wilcoxon rank-sum test tests the null hypothesis that two sets
+    of measurements are drawn from the same distribution.  The alternative
+    hypothesis is that values in one sample are more likely to be
+    larger than the values in the other sample.
+
+    This test should be used to compare two samples from continuous
+    distributions.  It does not handle ties between measurements
+    in x and y.  For tie-handling and an optional continuity correction
+    see `scipy.stats.mannwhitneyu`.
+
+    Parameters
+    ----------
+    x,y : array_like
+        The data from the two samples.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis. Default is 'two-sided'.
+        The following options are available:
+
+        * 'two-sided': one of the distributions (underlying `x` or `y`) is
+          stochastically greater than the other.
+        * 'less': the distribution underlying `x` is stochastically less
+          than the distribution underlying `y`.
+        * 'greater': the distribution underlying `x` is stochastically greater
+          than the distribution underlying `y`.
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    statistic : float
+        The test statistic under the large-sample approximation that the
+        rank sum statistic is normally distributed.
+    pvalue : float
+        The p-value of the test.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
+
+    Examples
+    --------
+    We can test the hypothesis that two independent unequal-sized samples are
+    drawn from the same distribution with computing the Wilcoxon rank-sum
+    statistic.
+
+    >>> import numpy as np
+    >>> from scipy.stats import ranksums
+    >>> rng = np.random.default_rng()
+    >>> sample1 = rng.uniform(-1, 1, 200)
+    >>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution
+    >>> ranksums(sample1, sample2)
+    RanksumsResult(statistic=-7.887059, pvalue=3.09390448e-15)  # may vary
+    >>> ranksums(sample1, sample2, alternative='less')
+    RanksumsResult(statistic=-7.750585297581713, pvalue=4.573497606342543e-15) # may vary
+    >>> ranksums(sample1, sample2, alternative='greater')
+    RanksumsResult(statistic=-7.750585297581713, pvalue=0.9999999999999954) # may vary
+
+    The p-value of less than ``0.05`` indicates that this test rejects the
+    hypothesis at the 5% significance level.
+
+    """
+    x, y = map(np.asarray, (x, y))
+    n1 = len(x)
+    n2 = len(y)
+    alldata = np.concatenate((x, y))
+    ranked = rankdata(alldata)
+    x = ranked[:n1]
+    s = np.sum(x, axis=0)
+    expected = n1 * (n1+n2+1) / 2.0
+    z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
+    z, prob = _normtest_finish(z, alternative)
+
+    return RanksumsResult(z, prob)
+
+
+KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
+
+
+@_axis_nan_policy_factory(KruskalResult, n_samples=None)
+def kruskal(*samples, nan_policy='propagate'):
+    """Compute the Kruskal-Wallis H-test for independent samples.
+
+    The Kruskal-Wallis H-test tests the null hypothesis that the population
+    median of all of the groups are equal.  It is a non-parametric version of
+    ANOVA.  The test works on 2 or more independent samples, which may have
+    different sizes.  Note that rejecting the null hypothesis does not
+    indicate which of the groups differs.  Post hoc comparisons between
+    groups are required to determine which groups are different.
+
+    Parameters
+    ----------
+    sample1, sample2, ... : array_like
+       Two or more arrays with the sample measurements can be given as
+       arguments. Samples must be one-dimensional.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    statistic : float
+       The Kruskal-Wallis H statistic, corrected for ties.
+    pvalue : float
+       The p-value for the test using the assumption that H has a chi
+       square distribution. The p-value returned is the survival function of
+       the chi square distribution evaluated at H.
+
+    See Also
+    --------
+    f_oneway : 1-way ANOVA.
+    mannwhitneyu : Mann-Whitney rank test on two samples.
+    friedmanchisquare : Friedman test for repeated measurements.
+
+    Notes
+    -----
+    Due to the assumption that H has a chi square distribution, the number
+    of samples in each group must not be too small.  A typical rule is
+    that each sample must have at least 5 measurements.
+
+    References
+    ----------
+    .. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
+       One-Criterion Variance Analysis", Journal of the American Statistical
+       Association, Vol. 47, Issue 260, pp. 583-621, 1952.
+    .. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> x = [1, 3, 5, 7, 9]
+    >>> y = [2, 4, 6, 8, 10]
+    >>> stats.kruskal(x, y)
+    KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
+
+    >>> x = [1, 1, 1]
+    >>> y = [2, 2, 2]
+    >>> z = [2, 2]
+    >>> stats.kruskal(x, y, z)
+    KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
+
+    """
+    samples = list(map(np.asarray, samples))
+
+    num_groups = len(samples)
+    if num_groups < 2:
+        raise ValueError("Need at least two groups in stats.kruskal()")
+
+    for sample in samples:
+        if sample.size == 0:
+            return KruskalResult(np.nan, np.nan)
+        elif sample.ndim != 1:
+            raise ValueError("Samples must be one-dimensional.")
+
+    n = np.asarray(list(map(len, samples)))
+
+    if nan_policy not in ('propagate', 'raise', 'omit'):
+        raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'")
+
+    contains_nan = False
+    for sample in samples:
+        cn = _contains_nan(sample, nan_policy)
+        if cn[0]:
+            contains_nan = True
+            break
+
+    if contains_nan and nan_policy == 'omit':
+        for sample in samples:
+            sample = ma.masked_invalid(sample)
+        return mstats_basic.kruskal(*samples)
+
+    if contains_nan and nan_policy == 'propagate':
+        return KruskalResult(np.nan, np.nan)
+
+    alldata = np.concatenate(samples)
+    ranked = rankdata(alldata)
+    ties = tiecorrect(ranked)
+    if ties == 0:
+        raise ValueError('All numbers are identical in kruskal')
+
+    # Compute sum^2/n for each group and sum
+    j = np.insert(np.cumsum(n), 0, 0)
+    ssbn = 0
+    for i in range(num_groups):
+        ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
+
+    totaln = np.sum(n, dtype=float)
+    h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
+    df = num_groups - 1
+    h /= ties
+
+    return KruskalResult(h, distributions.chi2.sf(h, df))
+
+
+FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
+                                     ('statistic', 'pvalue'))
+
+
+def friedmanchisquare(*samples):
+    """Compute the Friedman test for repeated samples.
+
+    The Friedman test tests the null hypothesis that repeated samples of
+    the same individuals have the same distribution.  It is often used
+    to test for consistency among samples obtained in different ways.
+    For example, if two sampling techniques are used on the same set of
+    individuals, the Friedman test can be used to determine if the two
+    sampling techniques are consistent.
+
+    Parameters
+    ----------
+    sample1, sample2, sample3... : array_like
+        Arrays of observations.  All of the arrays must have the same number
+        of elements.  At least three samples must be given.
+
+    Returns
+    -------
+    statistic : float
+        The test statistic, correcting for ties.
+    pvalue : float
+        The associated p-value assuming that the test statistic has a chi
+        squared distribution.
+
+    Notes
+    -----
+    Due to the assumption that the test statistic has a chi squared
+    distribution, the p-value is only reliable for n > 10 and more than
+    6 repeated samples.
+
+    References
+    ----------
+    .. [1] https://en.wikipedia.org/wiki/Friedman_test
+
+    """
+    k = len(samples)
+    if k < 3:
+        raise ValueError('At least 3 sets of samples must be given '
+                         'for Friedman test, got {}.'.format(k))
+
+    n = len(samples[0])
+    for i in range(1, k):
+        if len(samples[i]) != n:
+            raise ValueError('Unequal N in friedmanchisquare.  Aborting.')
+
+    # Rank data
+    data = np.vstack(samples).T
+    data = data.astype(float)
+    for i in range(len(data)):
+        data[i] = rankdata(data[i])
+
+    # Handle ties
+    ties = 0
+    for d in data:
+        replist, repnum = find_repeats(array(d))
+        for t in repnum:
+            ties += t * (t*t - 1)
+    c = 1 - ties / (k*(k*k - 1)*n)
+
+    ssbn = np.sum(data.sum(axis=0)**2)
+    chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
+
+    return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
+
+
+BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
+                                 ('statistic', 'pvalue'))
+
+
+def brunnermunzel(x, y, alternative="two-sided", distribution="t",
+                  nan_policy='propagate'):
+    """Compute the Brunner-Munzel test on samples x and y.
+
+    The Brunner-Munzel test is a nonparametric test of the null hypothesis that
+    when values are taken one by one from each group, the probabilities of
+    getting large values in both groups are equal.
+    Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
+    assumption of equivariance of two groups. Note that this does not assume
+    the distributions are same. This test works on two independent samples,
+    which may have different sizes.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Array of samples, should be one-dimensional.
+    alternative : {'two-sided', 'less', 'greater'}, optional
+        Defines the alternative hypothesis.
+        The following options are available (default is 'two-sided'):
+
+          * 'two-sided'
+          * 'less': one-sided
+          * 'greater': one-sided
+    distribution : {'t', 'normal'}, optional
+        Defines how to get the p-value.
+        The following options are available (default is 't'):
+
+          * 't': get the p-value by t-distribution
+          * 'normal': get the p-value by standard normal distribution.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': returns nan
+          * 'raise': throws an error
+          * 'omit': performs the calculations ignoring nan values
+
+    Returns
+    -------
+    statistic : float
+        The Brunner-Munzer W statistic.
+    pvalue : float
+        p-value assuming an t distribution. One-sided or
+        two-sided, depending on the choice of `alternative` and `distribution`.
+
+    See Also
+    --------
+    mannwhitneyu : Mann-Whitney rank test on two samples.
+
+    Notes
+    -----
+    Brunner and Munzel recommended to estimate the p-value by t-distribution
+    when the size of data is 50 or less. If the size is lower than 10, it would
+    be better to use permuted Brunner Munzel test (see [2]_).
+
+    References
+    ----------
+    .. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
+           problem: Asymptotic theory and a small-sample approximation".
+           Biometrical Journal. Vol. 42(2000): 17-25.
+    .. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
+           non-parametric Behrens-Fisher problem". Computational Statistics and
+           Data Analysis. Vol. 51(2007): 5192-5204.
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
+    >>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
+    >>> w, p_value = stats.brunnermunzel(x1, x2)
+    >>> w
+    3.1374674823029505
+    >>> p_value
+    0.0057862086661515377
+
+    """
+    x = np.asarray(x)
+    y = np.asarray(y)
+
+    # check both x and y
+    cnx, npx = _contains_nan(x, nan_policy)
+    cny, npy = _contains_nan(y, nan_policy)
+    contains_nan = cnx or cny
+    if npx == "omit" or npy == "omit":
+        nan_policy = "omit"
+
+    if contains_nan and nan_policy == "propagate":
+        return BrunnerMunzelResult(np.nan, np.nan)
+    elif contains_nan and nan_policy == "omit":
+        x = ma.masked_invalid(x)
+        y = ma.masked_invalid(y)
+        return mstats_basic.brunnermunzel(x, y, alternative, distribution)
+
+    nx = len(x)
+    ny = len(y)
+    if nx == 0 or ny == 0:
+        return BrunnerMunzelResult(np.nan, np.nan)
+    rankc = rankdata(np.concatenate((x, y)))
+    rankcx = rankc[0:nx]
+    rankcy = rankc[nx:nx+ny]
+    rankcx_mean = np.mean(rankcx)
+    rankcy_mean = np.mean(rankcy)
+    rankx = rankdata(x)
+    ranky = rankdata(y)
+    rankx_mean = np.mean(rankx)
+    ranky_mean = np.mean(ranky)
+
+    Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
+    Sx /= nx - 1
+    Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
+    Sy /= ny - 1
+
+    wbfn = nx * ny * (rankcy_mean - rankcx_mean)
+    wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
+
+    if distribution == "t":
+        df_numer = np.power(nx * Sx + ny * Sy, 2.0)
+        df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
+        df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
+        df = df_numer / df_denom
+
+        if (df_numer == 0) and (df_denom == 0):
+            message = ("p-value cannot be estimated with `distribution='t' "
+                       "because degrees of freedom parameter is undefined "
+                       "(0/0). Try using `distribution='normal'")
+            warnings.warn(message, RuntimeWarning)
+
+        p = distributions.t.cdf(wbfn, df)
+    elif distribution == "normal":
+        p = distributions.norm.cdf(wbfn)
+    else:
+        raise ValueError(
+            "distribution should be 't' or 'normal'")
+
+    if alternative == "greater":
+        pass
+    elif alternative == "less":
+        p = 1 - p
+    elif alternative == "two-sided":
+        p = 2 * np.min([p, 1-p])
+    else:
+        raise ValueError(
+            "alternative should be 'less', 'greater' or 'two-sided'")
+
+    return BrunnerMunzelResult(wbfn, p)
+
+
+def combine_pvalues(pvalues, method='fisher', weights=None):
+    """
+    Combine p-values from independent tests that bear upon the same hypothesis.
+
+    These methods are intended only for combining p-values from hypothesis
+    tests based upon continuous distributions.
+
+    Each method assumes that under the null hypothesis, the p-values are
+    sampled independently and uniformly from the interval [0, 1]. A test
+    statistic (different for each method) is computed and a combined
+    p-value is calculated based upon the distribution of this test statistic
+    under the null hypothesis.
+
+    Parameters
+    ----------
+    pvalues : array_like, 1-D
+        Array of p-values assumed to come from independent tests based on
+        continuous distributions.
+    method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}
+
+        Name of method to use to combine p-values.
+
+        The available methods are (see Notes for details):
+
+        * 'fisher': Fisher's method (Fisher's combined probability test)
+        * 'pearson': Pearson's method
+        * 'mudholkar_george': Mudholkar's and George's method
+        * 'tippett': Tippett's method
+        * 'stouffer': Stouffer's Z-score method
+    weights : array_like, 1-D, optional
+        Optional array of weights used only for Stouffer's Z-score method.
+
+    Returns
+    -------
+    res : SignificanceResult
+        An object containing attributes:
+
+        statistic : float
+            The statistic calculated by the specified method.
+        pvalue : float
+            The combined p-value.
+
+    Notes
+    -----
+    If this function is applied to tests with a discrete statistics such as
+    any rank test or contingency-table test, it will yield systematically
+    wrong results, e.g. Fisher's method will systematically overestimate the
+    p-value [1]_. This problem becomes less severe for large sample sizes
+    when the discrete distributions become approximately continuous.
+
+    The differences between the methods can be best illustrated by their
+    statistics and what aspects of a combination of p-values they emphasise
+    when considering significance [2]_. For example, methods emphasising large
+    p-values are more sensitive to strong false and true negatives; conversely
+    methods focussing on small p-values are sensitive to positives.
+
+    * The statistics of Fisher's method (also known as Fisher's combined
+      probability test) [3]_ is :math:`-2\\sum_i \\log(p_i)`, which is
+      equivalent (as a test statistics) to the product of individual p-values:
+      :math:`\\prod_i p_i`. Under the null hypothesis, this statistics follows
+      a :math:`\\chi^2` distribution. This method emphasises small p-values.
+    * Pearson's method uses :math:`-2\\sum_i\\log(1-p_i)`, which is equivalent
+      to :math:`\\prod_i \\frac{1}{1-p_i}` [2]_.
+      It thus emphasises large p-values.
+    * Mudholkar and George compromise between Fisher's and Pearson's method by
+      averaging their statistics [4]_. Their method emphasises extreme
+      p-values, both close to 1 and 0.
+    * Stouffer's method [5]_ uses Z-scores and the statistic:
+      :math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the
+      standard normal distribution. The advantage of this method is that it is
+      straightforward to introduce weights, which can make Stouffer's method
+      more powerful than Fisher's method when the p-values are from studies
+      of different size [6]_ [7]_.
+    * Tippett's method uses the smallest p-value as a statistic.
+      (Mind that this minimum is not the combined p-value.)
+
+    Fisher's method may be extended to combine p-values from dependent tests
+    [8]_. Extensions such as Brown's method and Kost's method are not currently
+    implemented.
+
+    .. versionadded:: 0.15.0
+
+    References
+    ----------
+    .. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete
+           Distributions." Journal of the American Statistical Association 57,
+           no. 297 (1962), 10-19.
+    .. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
+           combining p-values."  Biometrika 105.1 (2018): 239-246.
+    .. [3] https://en.wikipedia.org/wiki/Fisher%27s_method
+    .. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
+           random variables." Metrika 30.1 (1983): 1-13.
+    .. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
+    .. [6] Whitlock, M. C. "Combining probability from independent tests: the
+           weighted Z-method is superior to Fisher's approach." Journal of
+           Evolutionary Biology 18, no. 5 (2005): 1368-1373.
+    .. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
+           for combining probabilities in meta-analysis." Journal of
+           Evolutionary Biology 24, no. 8 (2011): 1836-1841.
+    .. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
+
+    """
+    pvalues = np.asarray(pvalues)
+    if pvalues.ndim != 1:
+        raise ValueError("pvalues is not 1-D")
+
+    if method == 'fisher':
+        statistic = -2 * np.sum(np.log(pvalues))
+        pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
+    elif method == 'pearson':
+        statistic = 2 * np.sum(np.log1p(-pvalues))
+        pval = distributions.chi2.cdf(-statistic, 2 * len(pvalues))
+    elif method == 'mudholkar_george':
+        normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
+        statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
+        nu = 5 * len(pvalues) + 4
+        approx_factor = np.sqrt(nu / (nu - 2))
+        pval = distributions.t.sf(statistic * normalizing_factor
+                                  * approx_factor, nu)
+    elif method == 'tippett':
+        statistic = np.min(pvalues)
+        pval = distributions.beta.cdf(statistic, 1, len(pvalues))
+    elif method == 'stouffer':
+        if weights is None:
+            weights = np.ones_like(pvalues)
+        elif len(weights) != len(pvalues):
+            raise ValueError("pvalues and weights must be of the same size.")
+
+        weights = np.asarray(weights)
+        if weights.ndim != 1:
+            raise ValueError("weights is not 1-D")
+
+        Zi = distributions.norm.isf(pvalues)
+        statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
+        pval = distributions.norm.sf(statistic)
+
+    else:
+        raise ValueError(
+            f"Invalid method {method!r}. Valid methods are 'fisher', "
+            "'pearson', 'mudholkar_george', 'tippett', and 'stouffer'"
+        )
+
+    return SignificanceResult(statistic, pval)
+
+
+#####################################
+#       STATISTICAL DISTANCES       #
+#####################################
+
+
+def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
+    r"""
+    Compute the first Wasserstein distance between two 1D distributions.
+
+    This distance is also known as the earth mover's distance, since it can be
+    seen as the minimum amount of "work" required to transform :math:`u` into
+    :math:`v`, where "work" is measured as the amount of distribution weight
+    that must be moved, multiplied by the distance it has to be moved.
+
+    .. versionadded:: 1.0.0
+
+    Parameters
+    ----------
+    u_values, v_values : array_like
+        Values observed in the (empirical) distribution.
+    u_weights, v_weights : array_like, optional
+        Weight for each value. If unspecified, each value is assigned the same
+        weight.
+        `u_weights` (resp. `v_weights`) must have the same length as
+        `u_values` (resp. `v_values`). If the weight sum differs from 1, it
+        must still be positive and finite so that the weights can be normalized
+        to sum to 1.
+
+    Returns
+    -------
+    distance : float
+        The computed distance between the distributions.
+
+    Notes
+    -----
+    The first Wasserstein distance between the distributions :math:`u` and
+    :math:`v` is:
+
+    .. math::
+
+        l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
+        \mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
+
+    where :math:`\Gamma (u, v)` is the set of (probability) distributions on
+    :math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
+    :math:`v` on the first and second factors respectively.
+
+    If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
+    :math:`v`, this distance also equals to:
+
+    .. math::
+
+        l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
+
+    See [2]_ for a proof of the equivalence of both definitions.
+
+    The input distributions can be empirical, therefore coming from samples
+    whose values are effectively inputs of the function, or they can be seen as
+    generalized functions, in which case they are weighted sums of Dirac delta
+    functions located at the specified values.
+
+    References
+    ----------
+    .. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
+    .. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
+           Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
+
+    Examples
+    --------
+    >>> from scipy.stats import wasserstein_distance
+    >>> wasserstein_distance([0, 1, 3], [5, 6, 8])
+    5.0
+    >>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
+    0.25
+    >>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
+    ...                      [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
+    4.0781331438047861
+
+    """
+    return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
+
+
+def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
+    r"""Compute the energy distance between two 1D distributions.
+
+    .. versionadded:: 1.0.0
+
+    Parameters
+    ----------
+    u_values, v_values : array_like
+        Values observed in the (empirical) distribution.
+    u_weights, v_weights : array_like, optional
+        Weight for each value. If unspecified, each value is assigned the same
+        weight.
+        `u_weights` (resp. `v_weights`) must have the same length as
+        `u_values` (resp. `v_values`). If the weight sum differs from 1, it
+        must still be positive and finite so that the weights can be normalized
+        to sum to 1.
+
+    Returns
+    -------
+    distance : float
+        The computed distance between the distributions.
+
+    Notes
+    -----
+    The energy distance between two distributions :math:`u` and :math:`v`, whose
+    respective CDFs are :math:`U` and :math:`V`, equals to:
+
+    .. math::
+
+        D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
+        \mathbb E|Y - Y'| \right)^{1/2}
+
+    where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
+    independent random variables whose probability distribution is :math:`u`
+    (resp. :math:`v`).
+
+    Sometimes the square of this quantity is referred to as the "energy
+    distance" (e.g. in [2]_, [4]_), but as noted in [1]_ and [3]_, only the
+    definition above satisfies the axioms of a distance function (metric).
+
+    As shown in [2]_, for one-dimensional real-valued variables, the energy
+    distance is linked to the non-distribution-free version of the Cramér-von
+    Mises distance:
+
+    .. math::
+
+        D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
+        \right)^{1/2}
+
+    Note that the common Cramér-von Mises criterion uses the distribution-free
+    version of the distance. See [2]_ (section 2), for more details about both
+    versions of the distance.
+
+    The input distributions can be empirical, therefore coming from samples
+    whose values are effectively inputs of the function, or they can be seen as
+    generalized functions, in which case they are weighted sums of Dirac delta
+    functions located at the specified values.
+
+    References
+    ----------
+    .. [1] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
+           Computational Statistics, 8(1):27-38 (2015).
+    .. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
+           Green State University, Department of Mathematics and Statistics,
+           Technical Report 02-16 (2002).
+    .. [3] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
+    .. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
+           Munos "The Cramer Distance as a Solution to Biased Wasserstein
+           Gradients" (2017). :arXiv:`1705.10743`.
+
+    Examples
+    --------
+    >>> from scipy.stats import energy_distance
+    >>> energy_distance([0], [2])
+    2.0000000000000004
+    >>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
+    1.0000000000000002
+    >>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
+    ...                 [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
+    0.88003340976158217
+
+    """
+    return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
+                                      u_weights, v_weights)
+
+
+def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
+    r"""
+    Compute, between two one-dimensional distributions :math:`u` and
+    :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
+    statistical distance that is defined as:
+
+    .. math::
+
+        l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
+
+    p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
+    gives the energy distance.
+
+    Parameters
+    ----------
+    u_values, v_values : array_like
+        Values observed in the (empirical) distribution.
+    u_weights, v_weights : array_like, optional
+        Weight for each value. If unspecified, each value is assigned the same
+        weight.
+        `u_weights` (resp. `v_weights`) must have the same length as
+        `u_values` (resp. `v_values`). If the weight sum differs from 1, it
+        must still be positive and finite so that the weights can be normalized
+        to sum to 1.
+
+    Returns
+    -------
+    distance : float
+        The computed distance between the distributions.
+
+    Notes
+    -----
+    The input distributions can be empirical, therefore coming from samples
+    whose values are effectively inputs of the function, or they can be seen as
+    generalized functions, in which case they are weighted sums of Dirac delta
+    functions located at the specified values.
+
+    References
+    ----------
+    .. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
+           Munos "The Cramer Distance as a Solution to Biased Wasserstein
+           Gradients" (2017). :arXiv:`1705.10743`.
+
+    """
+    u_values, u_weights = _validate_distribution(u_values, u_weights)
+    v_values, v_weights = _validate_distribution(v_values, v_weights)
+
+    u_sorter = np.argsort(u_values)
+    v_sorter = np.argsort(v_values)
+
+    all_values = np.concatenate((u_values, v_values))
+    all_values.sort(kind='mergesort')
+
+    # Compute the differences between pairs of successive values of u and v.
+    deltas = np.diff(all_values)
+
+    # Get the respective positions of the values of u and v among the values of
+    # both distributions.
+    u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
+    v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
+
+    # Calculate the CDFs of u and v using their weights, if specified.
+    if u_weights is None:
+        u_cdf = u_cdf_indices / u_values.size
+    else:
+        u_sorted_cumweights = np.concatenate(([0],
+                                              np.cumsum(u_weights[u_sorter])))
+        u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
+
+    if v_weights is None:
+        v_cdf = v_cdf_indices / v_values.size
+    else:
+        v_sorted_cumweights = np.concatenate(([0],
+                                              np.cumsum(v_weights[v_sorter])))
+        v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
+
+    # Compute the value of the integral based on the CDFs.
+    # If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
+    # of about 15%.
+    if p == 1:
+        return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
+    if p == 2:
+        return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
+    return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
+                                       deltas)), 1/p)
+
+
+def _validate_distribution(values, weights):
+    """
+    Validate the values and weights from a distribution input of `cdf_distance`
+    and return them as ndarray objects.
+
+    Parameters
+    ----------
+    values : array_like
+        Values observed in the (empirical) distribution.
+    weights : array_like
+        Weight for each value.
+
+    Returns
+    -------
+    values : ndarray
+        Values as ndarray.
+    weights : ndarray
+        Weights as ndarray.
+
+    """
+    # Validate the value array.
+    values = np.asarray(values, dtype=float)
+    if len(values) == 0:
+        raise ValueError("Distribution can't be empty.")
+
+    # Validate the weight array, if specified.
+    if weights is not None:
+        weights = np.asarray(weights, dtype=float)
+        if len(weights) != len(values):
+            raise ValueError('Value and weight array-likes for the same '
+                             'empirical distribution must be of the same size.')
+        if np.any(weights < 0):
+            raise ValueError('All weights must be non-negative.')
+        if not 0 < np.sum(weights) < np.inf:
+            raise ValueError('Weight array-like sum must be positive and '
+                             'finite. Set as None for an equal distribution of '
+                             'weight.')
+
+        return values, weights
+
+    return values, None
+
+
+#####################################
+#         SUPPORT FUNCTIONS         #
+#####################################
+
+RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
+
+
+def find_repeats(arr):
+    """Find repeats and repeat counts.
+
+    Parameters
+    ----------
+    arr : array_like
+        Input array. This is cast to float64.
+
+    Returns
+    -------
+    values : ndarray
+        The unique values from the (flattened) input that are repeated.
+
+    counts : ndarray
+        Number of times the corresponding 'value' is repeated.
+
+    Notes
+    -----
+    In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
+    difference is that `find_repeats` only returns repeated values.
+
+    Examples
+    --------
+    >>> from scipy import stats
+    >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
+    RepeatedResults(values=array([2.]), counts=array([4]))
+
+    >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
+    RepeatedResults(values=array([4.,  5.]), counts=array([2, 2]))
+
+    """
+    # Note: always copies.
+    return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
+
+
+def _sum_of_squares(a, axis=0):
+    """Square each element of the input array, and return the sum(s) of that.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int or None, optional
+        Axis along which to calculate. Default is 0. If None, compute over
+        the whole array `a`.
+
+    Returns
+    -------
+    sum_of_squares : ndarray
+        The sum along the given axis for (a**2).
+
+    See Also
+    --------
+    _square_of_sums : The square(s) of the sum(s) (the opposite of
+        `_sum_of_squares`).
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    return np.sum(a*a, axis)
+
+
+def _square_of_sums(a, axis=0):
+    """Sum elements of the input array, and return the square(s) of that sum.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int or None, optional
+        Axis along which to calculate. Default is 0. If None, compute over
+        the whole array `a`.
+
+    Returns
+    -------
+    square_of_sums : float or ndarray
+        The square of the sum over `axis`.
+
+    See Also
+    --------
+    _sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    s = np.sum(a, axis)
+    if not np.isscalar(s):
+        return s.astype(float) * s
+    else:
+        return float(s) * s
+
+
+def rankdata(a, method='average', *, axis=None, nan_policy='propagate'):
+    """Assign ranks to data, dealing with ties appropriately.
+
+    By default (``axis=None``), the data array is first flattened, and a flat
+    array of ranks is returned. Separately reshape the rank array to the
+    shape of the data array if desired (see Examples).
+
+    Ranks begin at 1.  The `method` argument controls how ranks are assigned
+    to equal values.  See [1]_ for further discussion of ranking methods.
+
+    Parameters
+    ----------
+    a : array_like
+        The array of values to be ranked.
+    method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
+        The method used to assign ranks to tied elements.
+        The following methods are available (default is 'average'):
+
+          * 'average': The average of the ranks that would have been assigned to
+            all the tied values is assigned to each value.
+          * 'min': The minimum of the ranks that would have been assigned to all
+            the tied values is assigned to each value.  (This is also
+            referred to as "competition" ranking.)
+          * 'max': The maximum of the ranks that would have been assigned to all
+            the tied values is assigned to each value.
+          * 'dense': Like 'min', but the rank of the next highest element is
+            assigned the rank immediately after those assigned to the tied
+            elements.
+          * 'ordinal': All values are given a distinct rank, corresponding to
+            the order that the values occur in `a`.
+    axis : {None, int}, optional
+        Axis along which to perform the ranking. If ``None``, the data array
+        is first flattened.
+    nan_policy : {'propagate', 'omit', 'raise'}, optional
+        Defines how to handle when input contains nan.
+        The following options are available (default is 'propagate'):
+
+          * 'propagate': propagates nans through the rank calculation
+          * 'omit': performs the calculations ignoring nan values
+          * 'raise': raises an error
+
+        .. note::
+
+            When `nan_policy` is 'propagate', the output is an array of *all*
+            nans because ranks relative to nans in the input are undefined.
+            When `nan_policy` is 'omit', nans in `a` are ignored when ranking
+            the other values, and the corresponding locations of the output
+            are nan.
+
+        .. versionadded:: 1.10
+
+    Returns
+    -------
+    ranks : ndarray
+         An array of size equal to the size of `a`, containing rank
+         scores.
+
+    References
+    ----------
+    .. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import rankdata
+    >>> rankdata([0, 2, 3, 2])
+    array([ 1. ,  2.5,  4. ,  2.5])
+    >>> rankdata([0, 2, 3, 2], method='min')
+    array([ 1,  2,  4,  2])
+    >>> rankdata([0, 2, 3, 2], method='max')
+    array([ 1,  3,  4,  3])
+    >>> rankdata([0, 2, 3, 2], method='dense')
+    array([ 1,  2,  3,  2])
+    >>> rankdata([0, 2, 3, 2], method='ordinal')
+    array([ 1,  2,  4,  3])
+    >>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
+    array([[1. , 2.5],
+          [4. , 2.5]])
+    >>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
+    array([[1. , 2.5, 2.5],
+           [2. , 1. , 3. ]])
+    >>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="propagate")
+    array([nan, nan, nan, nan, nan, nan])
+    >>> rankdata([0, 2, 3, np.nan, -2, np.nan], nan_policy="omit")
+    array([ 2.,  3.,  4., nan,  1., nan])
+
+    """
+    if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
+        raise ValueError('unknown method "{0}"'.format(method))
+
+    a = np.asarray(a)
+
+    if axis is not None:
+        if a.size == 0:
+            # The return values of `normalize_axis_index` are ignored.  The
+            # call validates `axis`, even though we won't use it.
+            # use scipy._lib._util._normalize_axis_index when available
+            np.core.multiarray.normalize_axis_index(axis, a.ndim)
+            dt = np.float64 if method == 'average' else np.int_
+            return np.empty(a.shape, dtype=dt)
+        return np.apply_along_axis(rankdata, axis, a, method,
+                                   nan_policy=nan_policy)
+
+    arr = np.ravel(a)
+    contains_nan, nan_policy = _contains_nan(arr, nan_policy)
+    nan_indexes = None
+    if contains_nan:
+        if nan_policy == 'omit':
+            nan_indexes = np.isnan(arr)
+        if nan_policy == 'propagate':
+            return np.full_like(arr, np.nan)
+
+    algo = 'mergesort' if method == 'ordinal' else 'quicksort'
+    sorter = np.argsort(arr, kind=algo)
+
+    inv = np.empty(sorter.size, dtype=np.intp)
+    inv[sorter] = np.arange(sorter.size, dtype=np.intp)
+
+    if method == 'ordinal':
+        result = inv + 1
+
+    arr = arr[sorter]
+    obs = np.r_[True, arr[1:] != arr[:-1]]
+    dense = obs.cumsum()[inv]
+
+    if method == 'dense':
+        result = dense
+
+    # cumulative counts of each unique value
+    count = np.r_[np.nonzero(obs)[0], len(obs)]
+
+    if method == 'max':
+        result = count[dense]
+
+    if method == 'min':
+        result = count[dense - 1] + 1
+
+    if method == 'average':
+        result = .5 * (count[dense] + count[dense - 1] + 1)
+
+    if nan_indexes is not None:
+        result = result.astype('float64')
+        result[nan_indexes] = np.nan
+
+    return result
+
+
+def expectile(a, alpha=0.5, *, weights=None):
+    r"""Compute the expectile at the specified level.
+
+    Expectiles are a generalization of the expectation in the same way as
+    quantiles are a generalization of the median. The expectile at level
+    `alpha = 0.5` is the mean (average). See Notes for more details.
+
+    Parameters
+    ----------
+    a : array_like
+        Array containing numbers whose expectile is desired.
+    alpha : float, default: 0.5
+        The level of the expectile; e.g., `alpha=0.5` gives the mean.
+    weights : array_like, optional
+        An array of weights associated with the values in `a`.
+        The `weights` must be broadcastable to the same shape as `a`.
+        Default is None, which gives each value a weight of 1.0.
+        An integer valued weight element acts like repeating the corresponding
+        observation in `a` that many times. See Notes for more details.
+
+    Returns
+    -------
+    expectile : ndarray
+        The empirical expectile at level `alpha`.
+
+    See Also
+    --------
+    numpy.mean : Arithmetic average
+    numpy.quantile : Quantile
+
+    Notes
+    -----
+    In general, the expectile at level :math:`\alpha` of a random variable
+    :math:`X` with cumulative distribution function (CDF) :math:`F` is given
+    by the unique solution :math:`t` of:
+
+    .. math::
+
+        \alpha E((X - t)_+) = (1 - \alpha) E((t - X)_+) \,.
+
+    Here, :math:`(x)_+ = \max(0, x)` is the positive part of :math:`x`.
+    This equation can be equivalently written as:
+
+    .. math::
+
+        \alpha \int_t^\infty (x - t)\mathrm{d}F(x)
+        = (1 - \alpha) \int_{-\infty}^t (t - x)\mathrm{d}F(x) \,.
+
+    The empirical expectile at level :math:`\alpha` (`alpha`) of a sample
+    :math:`a_i` (the array `a`) is defined by plugging in the empirical CDF of
+    `a`. Given sample or case weights :math:`w` (the array `weights`), it
+    reads :math:`F_a(x) = \frac{1}{\sum_i a_i} \sum_i w_i 1_{a_i \leq x}`
+    with indicator function :math:`1_{A}`. This leads to the definition of the
+    empirical expectile at level `alpha` as the unique solution :math:`t` of:
+
+    .. math::
+
+        \alpha \sum_{i=1}^n w_i (a_i - t)_+ =
+            (1 - \alpha) \sum_{i=1}^n w_i (t - a_i)_+ \,.
+
+    For :math:`\alpha=0.5`, this simplifies to the weighted average.
+    Furthermore, the larger :math:`\alpha`, the larger the value of the
+    expectile.
+
+    As a final remark, the expectile at level :math:`\alpha` can also be
+    written as a minimization problem. One often used choice is
+
+    .. math::
+
+        \operatorname{argmin}_t
+        E(\lvert 1_{t\geq X} - \alpha\rvert(t - X)^2) \,.
+
+    References
+    ----------
+    .. [1] W. K. Newey and J. L. Powell (1987), "Asymmetric Least Squares
+           Estimation and Testing," Econometrica, 55, 819-847.
+    .. [2] T. Gneiting (2009). "Making and Evaluating Point Forecasts,"
+           Journal of the American Statistical Association, 106, 746 - 762.
+           :doi:`10.48550/arXiv.0912.0902`
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import expectile
+    >>> a = [1, 4, 2, -1]
+    >>> expectile(a, alpha=0.5) == np.mean(a)
+    True
+    >>> expectile(a, alpha=0.2)
+    0.42857142857142855
+    >>> expectile(a, alpha=0.8)
+    2.5714285714285716
+    >>> weights = [1, 3, 1, 1]
+
+    """
+    if alpha < 0 or alpha > 1:
+        raise ValueError(
+            "The expectile level alpha must be in the range [0, 1]."
+        )
+    a = np.asarray(a)
+
+    if weights is not None:
+        weights = np.broadcast_to(weights, a.shape)
+
+    # This is the empirical equivalent of Eq. (13) with identification
+    # function from Table 9 (omitting a factor of 2) in [2] (their y is our
+    # data a, their x is our t)
+    def first_order(t):
+        return np.average(np.abs((a <= t) - alpha) * (t - a), weights=weights)
+
+    if alpha >= 0.5:
+        x0 = np.average(a, weights=weights)
+        x1 = np.amax(a)
+    else:
+        x1 = np.average(a, weights=weights)
+        x0 = np.amin(a)
+
+    if x0 == x1:
+        # a has a single unique element
+        return x0
+
+    # Note that the expectile is the unique solution, so no worries about
+    # finding a wrong root.
+    res = root_scalar(first_order, x0=x0, x1=x1)
+    return res.root
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_tukeylambda_stats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_tukeylambda_stats.py
new file mode 100644
index 00000000..9ccf9b6b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_tukeylambda_stats.py
@@ -0,0 +1,199 @@
+import numpy as np
+from numpy import poly1d
+from scipy.special import beta
+
+
+# The following code was used to generate the Pade coefficients for the
+# Tukey Lambda variance function.  Version 0.17 of mpmath was used.
+#---------------------------------------------------------------------------
+# import mpmath as mp
+#
+# mp.mp.dps = 60
+#
+# one   = mp.mpf(1)
+# two   = mp.mpf(2)
+#
+# def mpvar(lam):
+#     if lam == 0:
+#         v = mp.pi**2 / three
+#     else:
+#         v = (two / lam**2) * (one / (one + two*lam) -
+#                               mp.beta(lam + one, lam + one))
+#     return v
+#
+# t = mp.taylor(mpvar, 0, 8)
+# p, q = mp.pade(t, 4, 4)
+# print("p =", [mp.fp.mpf(c) for c in p])
+# print("q =", [mp.fp.mpf(c) for c in q])
+#---------------------------------------------------------------------------
+
+# Pade coefficients for the Tukey Lambda variance function.
+_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127,
+                       -0.5370742306855439, 0.17292046290190008,
+                       -0.02371146284628187]
+_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124,
+                       1.7660926747377275, 0.2643989311168465]
+
+# numpy.poly1d instances for the numerator and denominator of the
+# Pade approximation to the Tukey Lambda variance.
+_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1])
+_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1])
+
+
+def tukeylambda_variance(lam):
+    """Variance of the Tukey Lambda distribution.
+
+    Parameters
+    ----------
+    lam : array_like
+        The lambda values at which to compute the variance.
+
+    Returns
+    -------
+    v : ndarray
+        The variance.  For lam < -0.5, the variance is not defined, so
+        np.nan is returned.  For lam = 0.5, np.inf is returned.
+
+    Notes
+    -----
+    In an interval around lambda=0, this function uses the [4,4] Pade
+    approximation to compute the variance.  Otherwise it uses the standard
+    formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution).  The
+    Pade approximation is used because the standard formula has a removable
+    discontinuity at lambda = 0, and does not produce accurate numerical
+    results near lambda = 0.
+    """
+    lam = np.asarray(lam)
+    shp = lam.shape
+    lam = np.atleast_1d(lam).astype(np.float64)
+
+    # For absolute values of lam less than threshold, use the Pade
+    # approximation.
+    threshold = 0.075
+
+    # Play games with masks to implement the conditional evaluation of
+    # the distribution.
+    # lambda < -0.5:  var = nan
+    low_mask = lam < -0.5
+    # lambda == -0.5: var = inf
+    neghalf_mask = lam == -0.5
+    # abs(lambda) < threshold:  use Pade approximation
+    small_mask = np.abs(lam) < threshold
+    # else the "regular" case:  use the explicit formula.
+    reg_mask = ~(low_mask | neghalf_mask | small_mask)
+
+    # Get the 'lam' values for the cases where they are needed.
+    small = lam[small_mask]
+    reg = lam[reg_mask]
+
+    # Compute the function for each case.
+    v = np.empty_like(lam)
+    v[low_mask] = np.nan
+    v[neghalf_mask] = np.inf
+    if small.size > 0:
+        # Use the Pade approximation near lambda = 0.
+        v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small)
+    if reg.size > 0:
+        v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) -
+                                      beta(reg + 1, reg + 1))
+    v.shape = shp
+    return v
+
+
+# The following code was used to generate the Pade coefficients for the
+# Tukey Lambda kurtosis function.  Version 0.17 of mpmath was used.
+#---------------------------------------------------------------------------
+# import mpmath as mp
+#
+# mp.mp.dps = 60
+#
+# one   = mp.mpf(1)
+# two   = mp.mpf(2)
+# three = mp.mpf(3)
+# four  = mp.mpf(4)
+#
+# def mpkurt(lam):
+#     if lam == 0:
+#         k = mp.mpf(6)/5
+#     else:
+#         numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) +
+#                  three*mp.beta(two*lam+one, two*lam+one))
+#         denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2
+#         k = numer / denom - three
+#     return k
+#
+# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the
+# # taylor function and we request a degree 9 Taylor polynomial, we actually
+# # get degree 8.
+# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01)
+# t = [mp.chop(c, tol=1e-15) for c in t]
+# p, q = mp.pade(t, 4, 4)
+# print("p =", [mp.fp.mpf(c) for c in p])
+# print("q =", [mp.fp.mpf(c) for c in q])
+#---------------------------------------------------------------------------
+
+# Pade coefficients for the Tukey Lambda kurtosis function.
+_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077,
+                        0.20601184383406815, 4.59796302262789]
+_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842,
+                        0.43075235247853005, -2.789746758009912]
+
+# numpy.poly1d instances for the numerator and denominator of the
+# Pade approximation to the Tukey Lambda kurtosis.
+_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1])
+_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1])
+
+
+def tukeylambda_kurtosis(lam):
+    """Kurtosis of the Tukey Lambda distribution.
+
+    Parameters
+    ----------
+    lam : array_like
+        The lambda values at which to compute the variance.
+
+    Returns
+    -------
+    v : ndarray
+        The variance.  For lam < -0.25, the variance is not defined, so
+        np.nan is returned.  For lam = 0.25, np.inf is returned.
+
+    """
+    lam = np.asarray(lam)
+    shp = lam.shape
+    lam = np.atleast_1d(lam).astype(np.float64)
+
+    # For absolute values of lam less than threshold, use the Pade
+    # approximation.
+    threshold = 0.055
+
+    # Use masks to implement the conditional evaluation of the kurtosis.
+    # lambda < -0.25:  kurtosis = nan
+    low_mask = lam < -0.25
+    # lambda == -0.25: kurtosis = inf
+    negqrtr_mask = lam == -0.25
+    # lambda near 0:  use Pade approximation
+    small_mask = np.abs(lam) < threshold
+    # else the "regular" case:  use the explicit formula.
+    reg_mask = ~(low_mask | negqrtr_mask | small_mask)
+
+    # Get the 'lam' values for the cases where they are needed.
+    small = lam[small_mask]
+    reg = lam[reg_mask]
+
+    # Compute the function for each case.
+    k = np.empty_like(lam)
+    k[low_mask] = np.nan
+    k[negqrtr_mask] = np.inf
+    if small.size > 0:
+        k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)
+    if reg.size > 0:
+        numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) +
+                 3 * beta(2 * reg + 1, 2 * reg + 1))
+        denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2
+        k[reg_mask] = numer / denom - 3
+
+    # The return value will be a numpy array; resetting the shape ensures that
+    # if `lam` was a scalar, the return value is a 0-d array.
+    k.shape = shp
+    return k
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran.pxd b/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran.pxd
new file mode 100644
index 00000000..c709feef
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran.pxd
@@ -0,0 +1,1309 @@
+# File automatically generated using autopxd2
+
+from libc.stdio cimport FILE
+
+cdef extern from "unuran.h" nogil:
+
+    cdef struct unur_distr
+
+    ctypedef unur_distr UNUR_DISTR
+
+    cdef struct unur_par
+
+    ctypedef unur_par UNUR_PAR
+
+    cdef struct unur_gen
+
+    ctypedef unur_gen UNUR_GEN
+
+    cdef struct unur_urng
+
+    ctypedef unur_urng UNUR_URNG
+
+    ctypedef double UNUR_FUNCT_CONT(double x, unur_distr* distr)
+
+    ctypedef double UNUR_FUNCT_DISCR(int x, unur_distr* distr)
+
+    ctypedef int UNUR_IFUNCT_DISCR(double x, unur_distr* distr)
+
+    ctypedef double UNUR_FUNCT_CVEC(double* x, unur_distr* distr)
+
+    ctypedef int UNUR_VFUNCT_CVEC(double* result, double* x, unur_distr* distr)
+
+    ctypedef double UNUR_FUNCTD_CVEC(double* x, int coord, unur_distr* distr)
+
+    cdef struct unur_slist
+
+    ctypedef void UNUR_ERROR_HANDLER(char* objid, char* file, int line, char* errortype, int unur_errno, char* reason)
+
+    UNUR_URNG* unur_get_default_urng()
+
+    UNUR_URNG* unur_set_default_urng(UNUR_URNG* urng_new)
+
+    UNUR_URNG* unur_set_default_urng_aux(UNUR_URNG* urng_new)
+
+    UNUR_URNG* unur_get_default_urng_aux()
+
+    int unur_set_urng(UNUR_PAR* parameters, UNUR_URNG* urng)
+
+    UNUR_URNG* unur_chg_urng(UNUR_GEN* generator, UNUR_URNG* urng)
+
+    UNUR_URNG* unur_get_urng(UNUR_GEN* generator)
+
+    int unur_set_urng_aux(UNUR_PAR* parameters, UNUR_URNG* urng_aux)
+
+    int unur_use_urng_aux_default(UNUR_PAR* parameters)
+
+    int unur_chgto_urng_aux_default(UNUR_GEN* generator)
+
+    UNUR_URNG* unur_chg_urng_aux(UNUR_GEN* generator, UNUR_URNG* urng_aux)
+
+    UNUR_URNG* unur_get_urng_aux(UNUR_GEN* generator)
+
+    double unur_urng_sample(UNUR_URNG* urng)
+
+    double unur_sample_urng(UNUR_GEN* gen)
+
+    int unur_urng_sample_array(UNUR_URNG* urng, double* X, int dim)
+
+    int unur_urng_reset(UNUR_URNG* urng)
+
+    int unur_urng_sync(UNUR_URNG* urng)
+
+    int unur_urng_seed(UNUR_URNG* urng, unsigned long seed)
+
+    int unur_urng_anti(UNUR_URNG* urng, int anti)
+
+    int unur_urng_nextsub(UNUR_URNG* urng)
+
+    int unur_urng_resetsub(UNUR_URNG* urng)
+
+    int unur_gen_sync(UNUR_GEN* generator)
+
+    int unur_gen_seed(UNUR_GEN* generator, unsigned long seed)
+
+    int unur_gen_anti(UNUR_GEN* generator, int anti)
+
+    int unur_gen_reset(UNUR_GEN* generator)
+
+    int unur_gen_nextsub(UNUR_GEN* generator)
+
+    int unur_gen_resetsub(UNUR_GEN* generator)
+
+    ctypedef double (*_unur_urng_new_sampleunif_ft)(void* state)
+
+    UNUR_URNG* unur_urng_new(_unur_urng_new_sampleunif_ft sampleunif, void* state)
+
+    void unur_urng_free(UNUR_URNG* urng)
+
+    ctypedef unsigned int (*_unur_urng_set_sample_array_samplearray_ft)(void* state, double* X, int dim)
+
+    int unur_urng_set_sample_array(UNUR_URNG* urng, _unur_urng_set_sample_array_samplearray_ft samplearray)
+
+    ctypedef void (*_unur_urng_set_sync_sync_ft)(void* state)
+
+    int unur_urng_set_sync(UNUR_URNG* urng, _unur_urng_set_sync_sync_ft sync)
+
+    ctypedef void (*_unur_urng_set_seed_setseed_ft)(void* state, unsigned long seed)
+
+    int unur_urng_set_seed(UNUR_URNG* urng, _unur_urng_set_seed_setseed_ft setseed)
+
+    ctypedef void (*_unur_urng_set_anti_setanti_ft)(void* state, int anti)
+
+    int unur_urng_set_anti(UNUR_URNG* urng, _unur_urng_set_anti_setanti_ft setanti)
+
+    ctypedef void (*_unur_urng_set_reset_reset_ft)(void* state)
+
+    int unur_urng_set_reset(UNUR_URNG* urng, _unur_urng_set_reset_reset_ft reset)
+
+    ctypedef void (*_unur_urng_set_nextsub_nextsub_ft)(void* state)
+
+    int unur_urng_set_nextsub(UNUR_URNG* urng, _unur_urng_set_nextsub_nextsub_ft nextsub)
+
+    ctypedef void (*_unur_urng_set_resetsub_resetsub_ft)(void* state)
+
+    int unur_urng_set_resetsub(UNUR_URNG* urng, _unur_urng_set_resetsub_resetsub_ft resetsub)
+
+    ctypedef void (*_unur_urng_set_delete_fpdelete_ft)(void* state)
+
+    int unur_urng_set_delete(UNUR_URNG* urng, _unur_urng_set_delete_fpdelete_ft fpdelete)
+
+    cdef enum:
+        UNUR_DISTR_CONT
+        UNUR_DISTR_CEMP
+        UNUR_DISTR_CVEC
+        UNUR_DISTR_CVEMP
+        UNUR_DISTR_MATR
+        UNUR_DISTR_DISCR
+
+    void unur_distr_free(UNUR_DISTR* distribution)
+
+    int unur_distr_set_name(UNUR_DISTR* distribution, char* name)
+
+    char* unur_distr_get_name(UNUR_DISTR* distribution)
+
+    int unur_distr_get_dim(UNUR_DISTR* distribution)
+
+    unsigned int unur_distr_get_type(UNUR_DISTR* distribution)
+
+    int unur_distr_is_cont(UNUR_DISTR* distribution)
+
+    int unur_distr_is_cvec(UNUR_DISTR* distribution)
+
+    int unur_distr_is_cemp(UNUR_DISTR* distribution)
+
+    int unur_distr_is_cvemp(UNUR_DISTR* distribution)
+
+    int unur_distr_is_discr(UNUR_DISTR* distribution)
+
+    int unur_distr_is_matr(UNUR_DISTR* distribution)
+
+    int unur_distr_set_extobj(UNUR_DISTR* distribution, void* extobj)
+
+    void* unur_distr_get_extobj(UNUR_DISTR* distribution)
+
+    UNUR_DISTR* unur_distr_clone(UNUR_DISTR* distr)
+
+    UNUR_DISTR* unur_distr_cemp_new()
+
+    int unur_distr_cemp_set_data(UNUR_DISTR* distribution, double* sample, int n_sample)
+
+    int unur_distr_cemp_read_data(UNUR_DISTR* distribution, char* filename)
+
+    int unur_distr_cemp_get_data(UNUR_DISTR* distribution, double** sample)
+
+    int unur_distr_cemp_set_hist(UNUR_DISTR* distribution, double* prob, int n_prob, double xmin, double xmax)
+
+    int unur_distr_cemp_set_hist_prob(UNUR_DISTR* distribution, double* prob, int n_prob)
+
+    int unur_distr_cemp_set_hist_domain(UNUR_DISTR* distribution, double xmin, double xmax)
+
+    int unur_distr_cemp_set_hist_bins(UNUR_DISTR* distribution, double* bins, int n_bins)
+
+    UNUR_DISTR* unur_distr_cont_new()
+
+    int unur_distr_cont_set_pdf(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* pdf)
+
+    int unur_distr_cont_set_dpdf(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* dpdf)
+
+    int unur_distr_cont_set_cdf(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* cdf)
+
+    int unur_distr_cont_set_invcdf(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* invcdf)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_pdf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_dpdf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_cdf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_invcdf(UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_pdf(double x, UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_dpdf(double x, UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_cdf(double x, UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_invcdf(double u, UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_logpdf(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* logpdf)
+
+    int unur_distr_cont_set_dlogpdf(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* dlogpdf)
+
+    int unur_distr_cont_set_logcdf(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* logcdf)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_logpdf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_dlogpdf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_logcdf(UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_logpdf(double x, UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_dlogpdf(double x, UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_logcdf(double x, UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_pdfstr(UNUR_DISTR* distribution, char* pdfstr)
+
+    int unur_distr_cont_set_cdfstr(UNUR_DISTR* distribution, char* cdfstr)
+
+    char* unur_distr_cont_get_pdfstr(UNUR_DISTR* distribution)
+
+    char* unur_distr_cont_get_dpdfstr(UNUR_DISTR* distribution)
+
+    char* unur_distr_cont_get_cdfstr(UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_pdfparams(UNUR_DISTR* distribution, double* params, int n_params)
+
+    int unur_distr_cont_get_pdfparams(UNUR_DISTR* distribution, double** params)
+
+    int unur_distr_cont_set_pdfparams_vec(UNUR_DISTR* distribution, int par, double* param_vec, int n_param_vec)
+
+    int unur_distr_cont_get_pdfparams_vec(UNUR_DISTR* distribution, int par, double** param_vecs)
+
+    int unur_distr_cont_set_logpdfstr(UNUR_DISTR* distribution, char* logpdfstr)
+
+    char* unur_distr_cont_get_logpdfstr(UNUR_DISTR* distribution)
+
+    char* unur_distr_cont_get_dlogpdfstr(UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_logcdfstr(UNUR_DISTR* distribution, char* logcdfstr)
+
+    char* unur_distr_cont_get_logcdfstr(UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_domain(UNUR_DISTR* distribution, double left, double right)
+
+    int unur_distr_cont_get_domain(UNUR_DISTR* distribution, double* left, double* right)
+
+    int unur_distr_cont_get_truncated(UNUR_DISTR* distribution, double* left, double* right)
+
+    int unur_distr_cont_set_hr(UNUR_DISTR* distribution, UNUR_FUNCT_CONT* hazard)
+
+    UNUR_FUNCT_CONT* unur_distr_cont_get_hr(UNUR_DISTR* distribution)
+
+    double unur_distr_cont_eval_hr(double x, UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_hrstr(UNUR_DISTR* distribution, char* hrstr)
+
+    char* unur_distr_cont_get_hrstr(UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_mode(UNUR_DISTR* distribution, double mode)
+
+    int unur_distr_cont_upd_mode(UNUR_DISTR* distribution)
+
+    double unur_distr_cont_get_mode(UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_center(UNUR_DISTR* distribution, double center)
+
+    double unur_distr_cont_get_center(UNUR_DISTR* distribution)
+
+    int unur_distr_cont_set_pdfarea(UNUR_DISTR* distribution, double area)
+
+    int unur_distr_cont_upd_pdfarea(UNUR_DISTR* distribution)
+
+    double unur_distr_cont_get_pdfarea(UNUR_DISTR* distribution)
+
+    UNUR_DISTR* unur_distr_cxtrans_new(UNUR_DISTR* distribution)
+
+    UNUR_DISTR* unur_distr_cxtrans_get_distribution(UNUR_DISTR* distribution)
+
+    int unur_distr_cxtrans_set_alpha(UNUR_DISTR* distribution, double alpha)
+
+    int unur_distr_cxtrans_set_rescale(UNUR_DISTR* distribution, double mu, double sigma)
+
+    double unur_distr_cxtrans_get_alpha(UNUR_DISTR* distribution)
+
+    double unur_distr_cxtrans_get_mu(UNUR_DISTR* distribution)
+
+    double unur_distr_cxtrans_get_sigma(UNUR_DISTR* distribution)
+
+    int unur_distr_cxtrans_set_logpdfpole(UNUR_DISTR* distribution, double logpdfpole, double dlogpdfpole)
+
+    int unur_distr_cxtrans_set_domain(UNUR_DISTR* distribution, double left, double right)
+
+    UNUR_DISTR* unur_distr_corder_new(UNUR_DISTR* distribution, int n, int k)
+
+    UNUR_DISTR* unur_distr_corder_get_distribution(UNUR_DISTR* distribution)
+
+    int unur_distr_corder_set_rank(UNUR_DISTR* distribution, int n, int k)
+
+    int unur_distr_corder_get_rank(UNUR_DISTR* distribution, int* n, int* k)
+
+    UNUR_DISTR* unur_distr_cvec_new(int dim)
+
+    int unur_distr_cvec_set_pdf(UNUR_DISTR* distribution, UNUR_FUNCT_CVEC* pdf)
+
+    int unur_distr_cvec_set_dpdf(UNUR_DISTR* distribution, UNUR_VFUNCT_CVEC* dpdf)
+
+    int unur_distr_cvec_set_pdpdf(UNUR_DISTR* distribution, UNUR_FUNCTD_CVEC* pdpdf)
+
+    UNUR_FUNCT_CVEC* unur_distr_cvec_get_pdf(UNUR_DISTR* distribution)
+
+    UNUR_VFUNCT_CVEC* unur_distr_cvec_get_dpdf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCTD_CVEC* unur_distr_cvec_get_pdpdf(UNUR_DISTR* distribution)
+
+    double unur_distr_cvec_eval_pdf(double* x, UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_eval_dpdf(double* result, double* x, UNUR_DISTR* distribution)
+
+    double unur_distr_cvec_eval_pdpdf(double* x, int coord, UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_logpdf(UNUR_DISTR* distribution, UNUR_FUNCT_CVEC* logpdf)
+
+    int unur_distr_cvec_set_dlogpdf(UNUR_DISTR* distribution, UNUR_VFUNCT_CVEC* dlogpdf)
+
+    int unur_distr_cvec_set_pdlogpdf(UNUR_DISTR* distribution, UNUR_FUNCTD_CVEC* pdlogpdf)
+
+    UNUR_FUNCT_CVEC* unur_distr_cvec_get_logpdf(UNUR_DISTR* distribution)
+
+    UNUR_VFUNCT_CVEC* unur_distr_cvec_get_dlogpdf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCTD_CVEC* unur_distr_cvec_get_pdlogpdf(UNUR_DISTR* distribution)
+
+    double unur_distr_cvec_eval_logpdf(double* x, UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_eval_dlogpdf(double* result, double* x, UNUR_DISTR* distribution)
+
+    double unur_distr_cvec_eval_pdlogpdf(double* x, int coord, UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_mean(UNUR_DISTR* distribution, double* mean)
+
+    double* unur_distr_cvec_get_mean(UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_covar(UNUR_DISTR* distribution, double* covar)
+
+    int unur_distr_cvec_set_covar_inv(UNUR_DISTR* distribution, double* covar_inv)
+
+    double* unur_distr_cvec_get_covar(UNUR_DISTR* distribution)
+
+    double* unur_distr_cvec_get_cholesky(UNUR_DISTR* distribution)
+
+    double* unur_distr_cvec_get_covar_inv(UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_rankcorr(UNUR_DISTR* distribution, double* rankcorr)
+
+    double* unur_distr_cvec_get_rankcorr(UNUR_DISTR* distribution)
+
+    double* unur_distr_cvec_get_rk_cholesky(UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_marginals(UNUR_DISTR* distribution, UNUR_DISTR* marginal)
+
+    int unur_distr_cvec_set_marginal_array(UNUR_DISTR* distribution, UNUR_DISTR** marginals)
+
+    int unur_distr_cvec_set_marginal_list(UNUR_DISTR* distribution)
+
+    UNUR_DISTR* unur_distr_cvec_get_marginal(UNUR_DISTR* distribution, int n)
+
+    int unur_distr_cvec_set_pdfparams(UNUR_DISTR* distribution, double* params, int n_params)
+
+    int unur_distr_cvec_get_pdfparams(UNUR_DISTR* distribution, double** params)
+
+    int unur_distr_cvec_set_pdfparams_vec(UNUR_DISTR* distribution, int par, double* param_vec, int n_params)
+
+    int unur_distr_cvec_get_pdfparams_vec(UNUR_DISTR* distribution, int par, double** param_vecs)
+
+    int unur_distr_cvec_set_domain_rect(UNUR_DISTR* distribution, double* lowerleft, double* upperright)
+
+    int unur_distr_cvec_is_indomain(double* x, UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_mode(UNUR_DISTR* distribution, double* mode)
+
+    int unur_distr_cvec_upd_mode(UNUR_DISTR* distribution)
+
+    double* unur_distr_cvec_get_mode(UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_center(UNUR_DISTR* distribution, double* center)
+
+    double* unur_distr_cvec_get_center(UNUR_DISTR* distribution)
+
+    int unur_distr_cvec_set_pdfvol(UNUR_DISTR* distribution, double volume)
+
+    int unur_distr_cvec_upd_pdfvol(UNUR_DISTR* distribution)
+
+    double unur_distr_cvec_get_pdfvol(UNUR_DISTR* distribution)
+
+    UNUR_DISTR* unur_distr_condi_new(UNUR_DISTR* distribution, double* pos, double* dir, int k)
+
+    int unur_distr_condi_set_condition(unur_distr* distribution, double* pos, double* dir, int k)
+
+    int unur_distr_condi_get_condition(unur_distr* distribution, double** pos, double** dir, int* k)
+
+    UNUR_DISTR* unur_distr_condi_get_distribution(UNUR_DISTR* distribution)
+
+    UNUR_DISTR* unur_distr_cvemp_new(int dim)
+
+    int unur_distr_cvemp_set_data(UNUR_DISTR* distribution, double* sample, int n_sample)
+
+    int unur_distr_cvemp_read_data(UNUR_DISTR* distribution, char* filename)
+
+    int unur_distr_cvemp_get_data(UNUR_DISTR* distribution, double** sample)
+
+    UNUR_DISTR* unur_distr_discr_new()
+
+    int unur_distr_discr_set_pv(UNUR_DISTR* distribution, double* pv, int n_pv)
+
+    int unur_distr_discr_make_pv(UNUR_DISTR* distribution)
+
+    int unur_distr_discr_get_pv(UNUR_DISTR* distribution, double** pv)
+
+    int unur_distr_discr_set_pmf(UNUR_DISTR* distribution, UNUR_FUNCT_DISCR* pmf)
+
+    int unur_distr_discr_set_cdf(UNUR_DISTR* distribution, UNUR_FUNCT_DISCR* cdf)
+
+    int unur_distr_discr_set_invcdf(UNUR_DISTR* distribution, UNUR_IFUNCT_DISCR* invcdf)
+
+    UNUR_FUNCT_DISCR* unur_distr_discr_get_pmf(UNUR_DISTR* distribution)
+
+    UNUR_FUNCT_DISCR* unur_distr_discr_get_cdf(UNUR_DISTR* distribution)
+
+    UNUR_IFUNCT_DISCR* unur_distr_discr_get_invcdf(UNUR_DISTR* distribution)
+
+    double unur_distr_discr_eval_pv(int k, UNUR_DISTR* distribution)
+
+    double unur_distr_discr_eval_pmf(int k, UNUR_DISTR* distribution)
+
+    double unur_distr_discr_eval_cdf(int k, UNUR_DISTR* distribution)
+
+    int unur_distr_discr_eval_invcdf(double u, UNUR_DISTR* distribution)
+
+    int unur_distr_discr_set_pmfstr(UNUR_DISTR* distribution, char* pmfstr)
+
+    int unur_distr_discr_set_cdfstr(UNUR_DISTR* distribution, char* cdfstr)
+
+    char* unur_distr_discr_get_pmfstr(UNUR_DISTR* distribution)
+
+    char* unur_distr_discr_get_cdfstr(UNUR_DISTR* distribution)
+
+    int unur_distr_discr_set_pmfparams(UNUR_DISTR* distribution, double* params, int n_params)
+
+    int unur_distr_discr_get_pmfparams(UNUR_DISTR* distribution, double** params)
+
+    int unur_distr_discr_set_domain(UNUR_DISTR* distribution, int left, int right)
+
+    int unur_distr_discr_get_domain(UNUR_DISTR* distribution, int* left, int* right)
+
+    int unur_distr_discr_set_mode(UNUR_DISTR* distribution, int mode)
+
+    int unur_distr_discr_upd_mode(UNUR_DISTR* distribution)
+
+    int unur_distr_discr_get_mode(UNUR_DISTR* distribution)
+
+    int unur_distr_discr_set_pmfsum(UNUR_DISTR* distribution, double sum)
+
+    int unur_distr_discr_upd_pmfsum(UNUR_DISTR* distribution)
+
+    double unur_distr_discr_get_pmfsum(UNUR_DISTR* distribution)
+
+    UNUR_DISTR* unur_distr_matr_new(int n_rows, int n_cols)
+
+    int unur_distr_matr_get_dim(UNUR_DISTR* distribution, int* n_rows, int* n_cols)
+
+    UNUR_PAR* unur_auto_new(UNUR_DISTR* distribution)
+
+    int unur_auto_set_logss(UNUR_PAR* parameters, int logss)
+
+    UNUR_PAR* unur_dari_new(UNUR_DISTR* distribution)
+
+    int unur_dari_set_squeeze(UNUR_PAR* parameters, int squeeze)
+
+    int unur_dari_set_tablesize(UNUR_PAR* parameters, int size)
+
+    int unur_dari_set_cpfactor(UNUR_PAR* parameters, double cp_factor)
+
+    int unur_dari_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_dari_chg_verify(UNUR_GEN* generator, int verify)
+
+    UNUR_PAR* unur_dau_new(UNUR_DISTR* distribution)
+
+    int unur_dau_set_urnfactor(UNUR_PAR* parameters, double factor)
+
+    UNUR_PAR* unur_dgt_new(UNUR_DISTR* distribution)
+
+    int unur_dgt_set_guidefactor(UNUR_PAR* parameters, double factor)
+
+    int unur_dgt_set_variant(UNUR_PAR* parameters, unsigned variant)
+
+    int unur_dgt_eval_invcdf_recycle(UNUR_GEN* generator, double u, double* recycle)
+
+    int unur_dgt_eval_invcdf(UNUR_GEN* generator, double u)
+
+    UNUR_PAR* unur_dsrou_new(UNUR_DISTR* distribution)
+
+    int unur_dsrou_set_cdfatmode(UNUR_PAR* parameters, double Fmode)
+
+    int unur_dsrou_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_dsrou_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_dsrou_chg_cdfatmode(UNUR_GEN* generator, double Fmode)
+
+    UNUR_PAR* unur_dss_new(UNUR_DISTR* distribution)
+
+    UNUR_PAR* unur_arou_new(UNUR_DISTR* distribution)
+
+    int unur_arou_set_usedars(UNUR_PAR* parameters, int usedars)
+
+    int unur_arou_set_darsfactor(UNUR_PAR* parameters, double factor)
+
+    int unur_arou_set_max_sqhratio(UNUR_PAR* parameters, double max_ratio)
+
+    double unur_arou_get_sqhratio(UNUR_GEN* generator)
+
+    double unur_arou_get_hatarea(UNUR_GEN* generator)
+
+    double unur_arou_get_squeezearea(UNUR_GEN* generator)
+
+    int unur_arou_set_max_segments(UNUR_PAR* parameters, int max_segs)
+
+    int unur_arou_set_cpoints(UNUR_PAR* parameters, int n_stp, double* stp)
+
+    int unur_arou_set_usecenter(UNUR_PAR* parameters, int usecenter)
+
+    int unur_arou_set_guidefactor(UNUR_PAR* parameters, double factor)
+
+    int unur_arou_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_arou_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_arou_set_pedantic(UNUR_PAR* parameters, int pedantic)
+
+    UNUR_PAR* unur_ars_new(UNUR_DISTR* distribution)
+
+    int unur_ars_set_max_intervals(UNUR_PAR* parameters, int max_ivs)
+
+    int unur_ars_set_cpoints(UNUR_PAR* parameters, int n_cpoints, double* cpoints)
+
+    int unur_ars_set_reinit_percentiles(UNUR_PAR* parameters, int n_percentiles, double* percentiles)
+
+    int unur_ars_chg_reinit_percentiles(UNUR_GEN* generator, int n_percentiles, double* percentiles)
+
+    int unur_ars_set_reinit_ncpoints(UNUR_PAR* parameters, int ncpoints)
+
+    int unur_ars_chg_reinit_ncpoints(UNUR_GEN* generator, int ncpoints)
+
+    int unur_ars_set_max_iter(UNUR_PAR* parameters, int max_iter)
+
+    int unur_ars_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_ars_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_ars_set_pedantic(UNUR_PAR* parameters, int pedantic)
+
+    double unur_ars_get_loghatarea(UNUR_GEN* generator)
+
+    double unur_ars_eval_invcdfhat(UNUR_GEN* generator, double u)
+
+    UNUR_PAR* unur_hinv_new(UNUR_DISTR* distribution)
+
+    int unur_hinv_set_order(UNUR_PAR* parameters, int order)
+
+    int unur_hinv_set_u_resolution(UNUR_PAR* parameters, double u_resolution)
+
+    int unur_hinv_set_cpoints(UNUR_PAR* parameters, double* stp, int n_stp)
+
+    int unur_hinv_set_boundary(UNUR_PAR* parameters, double left, double right)
+
+    int unur_hinv_set_guidefactor(UNUR_PAR* parameters, double factor)
+
+    int unur_hinv_set_max_intervals(UNUR_PAR* parameters, int max_ivs)
+
+    int unur_hinv_get_n_intervals(UNUR_GEN* generator)
+
+    double unur_hinv_eval_approxinvcdf(UNUR_GEN* generator, double u)
+
+    int unur_hinv_chg_truncated(UNUR_GEN* generator, double left, double right)
+
+    int unur_hinv_estimate_error(UNUR_GEN* generator, int samplesize, double* max_error, double* MAE)
+
+    UNUR_PAR* unur_hrb_new(UNUR_DISTR* distribution)
+
+    int unur_hrb_set_upperbound(UNUR_PAR* parameters, double upperbound)
+
+    int unur_hrb_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_hrb_chg_verify(UNUR_GEN* generator, int verify)
+
+    UNUR_PAR* unur_hrd_new(UNUR_DISTR* distribution)
+
+    int unur_hrd_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_hrd_chg_verify(UNUR_GEN* generator, int verify)
+
+    UNUR_PAR* unur_hri_new(UNUR_DISTR* distribution)
+
+    int unur_hri_set_p0(UNUR_PAR* parameters, double p0)
+
+    int unur_hri_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_hri_chg_verify(UNUR_GEN* generator, int verify)
+
+    UNUR_PAR* unur_itdr_new(UNUR_DISTR* distribution)
+
+    int unur_itdr_set_xi(UNUR_PAR* parameters, double xi)
+
+    int unur_itdr_set_cp(UNUR_PAR* parameters, double cp)
+
+    int unur_itdr_set_ct(UNUR_PAR* parameters, double ct)
+
+    double unur_itdr_get_xi(UNUR_GEN* generator)
+
+    double unur_itdr_get_cp(UNUR_GEN* generator)
+
+    double unur_itdr_get_ct(UNUR_GEN* generator)
+
+    double unur_itdr_get_area(UNUR_GEN* generator)
+
+    int unur_itdr_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_itdr_chg_verify(UNUR_GEN* generator, int verify)
+
+    UNUR_PAR* unur_mcorr_new(UNUR_DISTR* distribution)
+
+    int unur_mcorr_set_eigenvalues(UNUR_PAR* par, double* eigenvalues)
+
+    int unur_mcorr_chg_eigenvalues(UNUR_GEN* gen, double* eigenvalues)
+
+    UNUR_PAR* unur_ninv_new(UNUR_DISTR* distribution)
+
+    int unur_ninv_set_useregula(UNUR_PAR* parameters)
+
+    int unur_ninv_set_usenewton(UNUR_PAR* parameters)
+
+    int unur_ninv_set_usebisect(UNUR_PAR* parameters)
+
+    int unur_ninv_set_max_iter(UNUR_PAR* parameters, int max_iter)
+
+    int unur_ninv_chg_max_iter(UNUR_GEN* generator, int max_iter)
+
+    int unur_ninv_set_x_resolution(UNUR_PAR* parameters, double x_resolution)
+
+    int unur_ninv_chg_x_resolution(UNUR_GEN* generator, double x_resolution)
+
+    int unur_ninv_set_u_resolution(UNUR_PAR* parameters, double u_resolution)
+
+    int unur_ninv_chg_u_resolution(UNUR_GEN* generator, double u_resolution)
+
+    int unur_ninv_set_start(UNUR_PAR* parameters, double left, double right)
+
+    int unur_ninv_chg_start(UNUR_GEN* gen, double left, double right)
+
+    int unur_ninv_set_table(UNUR_PAR* parameters, int no_of_points)
+
+    int unur_ninv_chg_table(UNUR_GEN* gen, int no_of_points)
+
+    int unur_ninv_chg_truncated(UNUR_GEN* gen, double left, double right)
+
+    double unur_ninv_eval_approxinvcdf(UNUR_GEN* generator, double u)
+
+    UNUR_PAR* unur_nrou_new(UNUR_DISTR* distribution)
+
+    int unur_nrou_set_u(UNUR_PAR* parameters, double umin, double umax)
+
+    int unur_nrou_set_v(UNUR_PAR* parameters, double vmax)
+
+    int unur_nrou_set_r(UNUR_PAR* parameters, double r)
+
+    int unur_nrou_set_center(UNUR_PAR* parameters, double center)
+
+    int unur_nrou_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_nrou_chg_verify(UNUR_GEN* generator, int verify)
+
+    UNUR_PAR* unur_pinv_new(UNUR_DISTR* distribution)
+
+    int unur_pinv_set_order(UNUR_PAR* parameters, int order)
+
+    int unur_pinv_set_smoothness(UNUR_PAR* parameters, int smoothness)
+
+    int unur_pinv_set_u_resolution(UNUR_PAR* parameters, double u_resolution)
+
+    int unur_pinv_set_use_upoints(UNUR_PAR* parameters, int use_upoints)
+
+    int unur_pinv_set_usepdf(UNUR_PAR* parameters)
+
+    int unur_pinv_set_usecdf(UNUR_PAR* parameters)
+
+    int unur_pinv_set_boundary(UNUR_PAR* parameters, double left, double right)
+
+    int unur_pinv_set_searchboundary(UNUR_PAR* parameters, int left, int right)
+
+    int unur_pinv_set_max_intervals(UNUR_PAR* parameters, int max_ivs)
+
+    int unur_pinv_get_n_intervals(UNUR_GEN* generator)
+
+    int unur_pinv_set_keepcdf(UNUR_PAR* parameters, int keepcdf)
+
+    double unur_pinv_eval_approxinvcdf(UNUR_GEN* generator, double u)
+
+    double unur_pinv_eval_approxcdf(UNUR_GEN* generator, double x)
+
+    int unur_pinv_estimate_error(UNUR_GEN* generator, int samplesize, double* max_error, double* MAE)
+
+    UNUR_PAR* unur_srou_new(UNUR_DISTR* distribution)
+
+    int unur_srou_set_r(UNUR_PAR* parameters, double r)
+
+    int unur_srou_set_cdfatmode(UNUR_PAR* parameters, double Fmode)
+
+    int unur_srou_set_pdfatmode(UNUR_PAR* parameters, double fmode)
+
+    int unur_srou_set_usesqueeze(UNUR_PAR* parameters, int usesqueeze)
+
+    int unur_srou_set_usemirror(UNUR_PAR* parameters, int usemirror)
+
+    int unur_srou_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_srou_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_srou_chg_cdfatmode(UNUR_GEN* generator, double Fmode)
+
+    int unur_srou_chg_pdfatmode(UNUR_GEN* generator, double fmode)
+
+    UNUR_PAR* unur_ssr_new(UNUR_DISTR* distribution)
+
+    int unur_ssr_set_cdfatmode(UNUR_PAR* parameters, double Fmode)
+
+    int unur_ssr_set_pdfatmode(UNUR_PAR* parameters, double fmode)
+
+    int unur_ssr_set_usesqueeze(UNUR_PAR* parameters, int usesqueeze)
+
+    int unur_ssr_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_ssr_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_ssr_chg_cdfatmode(UNUR_GEN* generator, double Fmode)
+
+    int unur_ssr_chg_pdfatmode(UNUR_GEN* generator, double fmode)
+
+    UNUR_PAR* unur_tabl_new(UNUR_DISTR* distribution)
+
+    int unur_tabl_set_variant_ia(UNUR_PAR* parameters, int use_ia)
+
+    int unur_tabl_set_cpoints(UNUR_PAR* parameters, int n_cpoints, double* cpoints)
+
+    int unur_tabl_set_nstp(UNUR_PAR* parameters, int n_stp)
+
+    int unur_tabl_set_useear(UNUR_PAR* parameters, int useear)
+
+    int unur_tabl_set_areafraction(UNUR_PAR* parameters, double fraction)
+
+    int unur_tabl_set_usedars(UNUR_PAR* parameters, int usedars)
+
+    int unur_tabl_set_darsfactor(UNUR_PAR* parameters, double factor)
+
+    int unur_tabl_set_variant_splitmode(UNUR_PAR* parameters, unsigned splitmode)
+
+    int unur_tabl_set_max_sqhratio(UNUR_PAR* parameters, double max_ratio)
+
+    double unur_tabl_get_sqhratio(UNUR_GEN* generator)
+
+    double unur_tabl_get_hatarea(UNUR_GEN* generator)
+
+    double unur_tabl_get_squeezearea(UNUR_GEN* generator)
+
+    int unur_tabl_set_max_intervals(UNUR_PAR* parameters, int max_ivs)
+
+    int unur_tabl_get_n_intervals(UNUR_GEN* generator)
+
+    int unur_tabl_set_slopes(UNUR_PAR* parameters, double* slopes, int n_slopes)
+
+    int unur_tabl_set_guidefactor(UNUR_PAR* parameters, double factor)
+
+    int unur_tabl_set_boundary(UNUR_PAR* parameters, double left, double right)
+
+    int unur_tabl_chg_truncated(UNUR_GEN* gen, double left, double right)
+
+    int unur_tabl_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_tabl_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_tabl_set_pedantic(UNUR_PAR* parameters, int pedantic)
+
+    UNUR_PAR* unur_tdr_new(UNUR_DISTR* distribution)
+
+    int unur_tdr_set_c(UNUR_PAR* parameters, double c)
+
+    int unur_tdr_set_variant_gw(UNUR_PAR* parameters)
+
+    int unur_tdr_set_variant_ps(UNUR_PAR* parameters)
+
+    int unur_tdr_set_variant_ia(UNUR_PAR* parameters)
+
+    int unur_tdr_set_usedars(UNUR_PAR* parameters, int usedars)
+
+    int unur_tdr_set_darsfactor(UNUR_PAR* parameters, double factor)
+
+    int unur_tdr_set_cpoints(UNUR_PAR* parameters, int n_stp, double* stp)
+
+    int unur_tdr_set_reinit_percentiles(UNUR_PAR* parameters, int n_percentiles, double* percentiles)
+
+    int unur_tdr_chg_reinit_percentiles(UNUR_GEN* generator, int n_percentiles, double* percentiles)
+
+    int unur_tdr_set_reinit_ncpoints(UNUR_PAR* parameters, int ncpoints)
+
+    int unur_tdr_chg_reinit_ncpoints(UNUR_GEN* generator, int ncpoints)
+
+    int unur_tdr_chg_truncated(UNUR_GEN* gen, double left, double right)
+
+    int unur_tdr_set_max_sqhratio(UNUR_PAR* parameters, double max_ratio)
+
+    double unur_tdr_get_sqhratio(UNUR_GEN* generator)
+
+    double unur_tdr_get_hatarea(UNUR_GEN* generator)
+
+    double unur_tdr_get_squeezearea(UNUR_GEN* generator)
+
+    int unur_tdr_set_max_intervals(UNUR_PAR* parameters, int max_ivs)
+
+    int unur_tdr_set_usecenter(UNUR_PAR* parameters, int usecenter)
+
+    int unur_tdr_set_usemode(UNUR_PAR* parameters, int usemode)
+
+    int unur_tdr_set_guidefactor(UNUR_PAR* parameters, double factor)
+
+    int unur_tdr_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_tdr_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_tdr_set_pedantic(UNUR_PAR* parameters, int pedantic)
+
+    double unur_tdr_eval_invcdfhat(UNUR_GEN* generator, double u, double* hx, double* fx, double* sqx)
+
+    int _unur_tdr_is_ARS_running(UNUR_GEN* generator)
+
+    UNUR_PAR* unur_utdr_new(UNUR_DISTR* distribution)
+
+    int unur_utdr_set_pdfatmode(UNUR_PAR* parameters, double fmode)
+
+    int unur_utdr_set_cpfactor(UNUR_PAR* parameters, double cp_factor)
+
+    int unur_utdr_set_deltafactor(UNUR_PAR* parameters, double delta)
+
+    int unur_utdr_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_utdr_chg_verify(UNUR_GEN* generator, int verify)
+
+    int unur_utdr_chg_pdfatmode(UNUR_GEN* generator, double fmode)
+
+    UNUR_PAR* unur_empk_new(UNUR_DISTR* distribution)
+
+    int unur_empk_set_kernel(UNUR_PAR* parameters, unsigned kernel)
+
+    int unur_empk_set_kernelgen(UNUR_PAR* parameters, UNUR_GEN* kernelgen, double alpha, double kernelvar)
+
+    int unur_empk_set_beta(UNUR_PAR* parameters, double beta)
+
+    int unur_empk_set_smoothing(UNUR_PAR* parameters, double smoothing)
+
+    int unur_empk_chg_smoothing(UNUR_GEN* generator, double smoothing)
+
+    int unur_empk_set_varcor(UNUR_PAR* parameters, int varcor)
+
+    int unur_empk_chg_varcor(UNUR_GEN* generator, int varcor)
+
+    int unur_empk_set_positive(UNUR_PAR* parameters, int positive)
+
+    UNUR_PAR* unur_empl_new(UNUR_DISTR* distribution)
+
+    UNUR_PAR* unur_hist_new(UNUR_DISTR* distribution)
+
+    UNUR_PAR* unur_mvtdr_new(UNUR_DISTR* distribution)
+
+    int unur_mvtdr_set_stepsmin(UNUR_PAR* parameters, int stepsmin)
+
+    int unur_mvtdr_set_boundsplitting(UNUR_PAR* parameters, double boundsplitting)
+
+    int unur_mvtdr_set_maxcones(UNUR_PAR* parameters, int maxcones)
+
+    int unur_mvtdr_get_ncones(UNUR_GEN* generator)
+
+    double unur_mvtdr_get_hatvol(UNUR_GEN* generator)
+
+    int unur_mvtdr_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_mvtdr_chg_verify(UNUR_GEN* generator, int verify)
+
+    UNUR_PAR* unur_norta_new(UNUR_DISTR* distribution)
+
+    UNUR_PAR* unur_vempk_new(UNUR_DISTR* distribution)
+
+    int unur_vempk_set_smoothing(UNUR_PAR* parameters, double smoothing)
+
+    int unur_vempk_chg_smoothing(UNUR_GEN* generator, double smoothing)
+
+    int unur_vempk_set_varcor(UNUR_PAR* parameters, int varcor)
+
+    int unur_vempk_chg_varcor(UNUR_GEN* generator, int varcor)
+
+    UNUR_PAR* unur_vnrou_new(UNUR_DISTR* distribution)
+
+    int unur_vnrou_set_u(UNUR_PAR* parameters, double* umin, double* umax)
+
+    int unur_vnrou_chg_u(UNUR_GEN* generator, double* umin, double* umax)
+
+    int unur_vnrou_set_v(UNUR_PAR* parameters, double vmax)
+
+    int unur_vnrou_chg_v(UNUR_GEN* generator, double vmax)
+
+    int unur_vnrou_set_r(UNUR_PAR* parameters, double r)
+
+    int unur_vnrou_set_verify(UNUR_PAR* parameters, int verify)
+
+    int unur_vnrou_chg_verify(UNUR_GEN* generator, int verify)
+
+    double unur_vnrou_get_volumehat(UNUR_GEN* generator)
+
+    UNUR_PAR* unur_gibbs_new(UNUR_DISTR* distribution)
+
+    int unur_gibbs_set_variant_coordinate(UNUR_PAR* parameters)
+
+    int unur_gibbs_set_variant_random_direction(UNUR_PAR* parameters)
+
+    int unur_gibbs_set_c(UNUR_PAR* parameters, double c)
+
+    int unur_gibbs_set_startingpoint(UNUR_PAR* parameters, double* x0)
+
+    int unur_gibbs_set_thinning(UNUR_PAR* parameters, int thinning)
+
+    int unur_gibbs_set_burnin(UNUR_PAR* parameters, int burnin)
+
+    double* unur_gibbs_get_state(UNUR_GEN* generator)
+
+    int unur_gibbs_chg_state(UNUR_GEN* generator, double* state)
+
+    int unur_gibbs_reset_state(UNUR_GEN* generator)
+
+    UNUR_PAR* unur_hitro_new(UNUR_DISTR* distribution)
+
+    int unur_hitro_set_variant_coordinate(UNUR_PAR* parameters)
+
+    int unur_hitro_set_variant_random_direction(UNUR_PAR* parameters)
+
+    int unur_hitro_set_use_adaptiveline(UNUR_PAR* parameters, int adaptive)
+
+    int unur_hitro_set_use_boundingrectangle(UNUR_PAR* parameters, int rectangle)
+
+    int unur_hitro_set_use_adaptiverectangle(UNUR_PAR* parameters, int adaptive)
+
+    int unur_hitro_set_r(UNUR_PAR* parameters, double r)
+
+    int unur_hitro_set_v(UNUR_PAR* parameters, double vmax)
+
+    int unur_hitro_set_u(UNUR_PAR* parameters, double* umin, double* umax)
+
+    int unur_hitro_set_adaptive_multiplier(UNUR_PAR* parameters, double factor)
+
+    int unur_hitro_set_startingpoint(UNUR_PAR* parameters, double* x0)
+
+    int unur_hitro_set_thinning(UNUR_PAR* parameters, int thinning)
+
+    int unur_hitro_set_burnin(UNUR_PAR* parameters, int burnin)
+
+    double* unur_hitro_get_state(UNUR_GEN* generator)
+
+    int unur_hitro_chg_state(UNUR_GEN* generator, double* state)
+
+    int unur_hitro_reset_state(UNUR_GEN* generator)
+
+    UNUR_PAR* unur_cstd_new(UNUR_DISTR* distribution)
+
+    int unur_cstd_set_variant(UNUR_PAR* parameters, unsigned variant)
+
+    int unur_cstd_chg_truncated(UNUR_GEN* generator, double left, double right)
+
+    double unur_cstd_eval_invcdf(UNUR_GEN* generator, double u)
+
+    UNUR_PAR* unur_dstd_new(UNUR_DISTR* distribution)
+
+    int unur_dstd_set_variant(UNUR_PAR* parameters, unsigned variant)
+
+    int unur_dstd_chg_truncated(UNUR_GEN* generator, int left, int right)
+
+    int unur_dstd_eval_invcdf(UNUR_GEN* generator, double u)
+
+    UNUR_PAR* unur_mvstd_new(UNUR_DISTR* distribution)
+
+    UNUR_PAR* unur_mixt_new(int n, double* prob, UNUR_GEN** comp)
+
+    int unur_mixt_set_useinversion(UNUR_PAR* parameters, int useinv)
+
+    double unur_mixt_eval_invcdf(UNUR_GEN* generator, double u)
+
+    UNUR_PAR* unur_cext_new(UNUR_DISTR* distribution)
+
+    ctypedef int (*_unur_cext_set_init_init_ft)(UNUR_GEN* gen)
+
+    int unur_cext_set_init(UNUR_PAR* parameters, _unur_cext_set_init_init_ft init)
+
+    ctypedef double (*_unur_cext_set_sample_sample_ft)(UNUR_GEN* gen)
+
+    int unur_cext_set_sample(UNUR_PAR* parameters, _unur_cext_set_sample_sample_ft sample)
+
+    void* unur_cext_get_params(UNUR_GEN* generator, size_t size)
+
+    double* unur_cext_get_distrparams(UNUR_GEN* generator)
+
+    int unur_cext_get_ndistrparams(UNUR_GEN* generator)
+
+    UNUR_PAR* unur_dext_new(UNUR_DISTR* distribution)
+
+    ctypedef int (*_unur_dext_set_init_init_ft)(UNUR_GEN* gen)
+
+    int unur_dext_set_init(UNUR_PAR* parameters, _unur_dext_set_init_init_ft init)
+
+    ctypedef int (*_unur_dext_set_sample_sample_ft)(UNUR_GEN* gen)
+
+    int unur_dext_set_sample(UNUR_PAR* parameters, _unur_dext_set_sample_sample_ft sample)
+
+    void* unur_dext_get_params(UNUR_GEN* generator, size_t size)
+
+    double* unur_dext_get_distrparams(UNUR_GEN* generator)
+
+    int unur_dext_get_ndistrparams(UNUR_GEN* generator)
+
+    UNUR_PAR* unur_unif_new(UNUR_DISTR* dummy)
+
+    UNUR_GEN* unur_str2gen(char* string)
+
+    UNUR_DISTR* unur_str2distr(char* string)
+
+    UNUR_GEN* unur_makegen_ssu(char* distrstr, char* methodstr, UNUR_URNG* urng)
+
+    UNUR_GEN* unur_makegen_dsu(UNUR_DISTR* distribution, char* methodstr, UNUR_URNG* urng)
+
+    UNUR_PAR* _unur_str2par(UNUR_DISTR* distribution, char* method, unur_slist** mlist)
+
+    UNUR_GEN* unur_init(UNUR_PAR* parameters)
+
+    int unur_reinit(UNUR_GEN* generator)
+
+    int unur_sample_discr(UNUR_GEN* generator)
+
+    double unur_sample_cont(UNUR_GEN* generator)
+
+    int unur_sample_vec(UNUR_GEN* generator, double* vector)
+
+    int unur_sample_matr(UNUR_GEN* generator, double* matrix)
+
+    double unur_quantile(UNUR_GEN* generator, double U)
+
+    void unur_free(UNUR_GEN* generator)
+
+    char* unur_gen_info(UNUR_GEN* generator, int help)
+
+    int unur_get_dimension(UNUR_GEN* generator)
+
+    char* unur_get_genid(UNUR_GEN* generator)
+
+    UNUR_DISTR* unur_get_distr(UNUR_GEN* generator)
+
+    int unur_set_use_distr_privatecopy(UNUR_PAR* parameters, int use_privatecopy)
+
+    UNUR_GEN* unur_gen_clone(UNUR_GEN* gen)
+
+    void unur_par_free(UNUR_PAR* par)
+
+    cdef enum:
+        UNUR_DISTR_GENERIC
+        UNUR_DISTR_CORDER
+        UNUR_DISTR_CXTRANS
+        UNUR_DISTR_CONDI
+        UNUR_DISTR_BETA
+        UNUR_DISTR_CAUCHY
+        UNUR_DISTR_CHI
+        UNUR_DISTR_CHISQUARE
+        UNUR_DISTR_EPANECHNIKOV
+        UNUR_DISTR_EXPONENTIAL
+        UNUR_DISTR_EXTREME_I
+        UNUR_DISTR_EXTREME_II
+        UNUR_DISTR_F
+        UNUR_DISTR_GAMMA
+        UNUR_DISTR_GHYP
+        UNUR_DISTR_GIG
+        UNUR_DISTR_GIG2
+        UNUR_DISTR_HYPERBOLIC
+        UNUR_DISTR_IG
+        UNUR_DISTR_LAPLACE
+        UNUR_DISTR_LOGISTIC
+        UNUR_DISTR_LOGNORMAL
+        UNUR_DISTR_LOMAX
+        UNUR_DISTR_NORMAL
+        UNUR_DISTR_GAUSSIAN
+        UNUR_DISTR_PARETO
+        UNUR_DISTR_POWEREXPONENTIAL
+        UNUR_DISTR_RAYLEIGH
+        UNUR_DISTR_SLASH
+        UNUR_DISTR_STUDENT
+        UNUR_DISTR_TRIANGULAR
+        UNUR_DISTR_UNIFORM
+        UNUR_DISTR_BOXCAR
+        UNUR_DISTR_WEIBULL
+        UNUR_DISTR_BURR_I
+        UNUR_DISTR_BURR_II
+        UNUR_DISTR_BURR_III
+        UNUR_DISTR_BURR_IV
+        UNUR_DISTR_BURR_V
+        UNUR_DISTR_BURR_VI
+        UNUR_DISTR_BURR_VII
+        UNUR_DISTR_BURR_VIII
+        UNUR_DISTR_BURR_IX
+        UNUR_DISTR_BURR_X
+        UNUR_DISTR_BURR_XI
+        UNUR_DISTR_BURR_XII
+        UNUR_DISTR_BINOMIAL
+        UNUR_DISTR_GEOMETRIC
+        UNUR_DISTR_HYPERGEOMETRIC
+        UNUR_DISTR_LOGARITHMIC
+        UNUR_DISTR_NEGATIVEBINOMIAL
+        UNUR_DISTR_POISSON
+        UNUR_DISTR_ZIPF
+        UNUR_DISTR_MCAUCHY
+        UNUR_DISTR_MNORMAL
+        UNUR_DISTR_MSTUDENT
+        UNUR_DISTR_MEXPONENTIAL
+        UNUR_DISTR_COPULA
+        UNUR_DISTR_MCORRELATION
+
+    UNUR_DISTR* unur_distr_beta(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_burr(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_cauchy(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_chi(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_chisquare(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_exponential(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_extremeI(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_extremeII(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_F(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_gamma(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_ghyp(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_gig(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_gig2(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_hyperbolic(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_ig(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_laplace(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_logistic(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_lognormal(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_lomax(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_normal(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_pareto(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_powerexponential(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_rayleigh(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_slash(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_student(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_triangular(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_uniform(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_weibull(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_multinormal(int dim, double* mean, double* covar)
+
+    UNUR_DISTR* unur_distr_multicauchy(int dim, double* mean, double* covar)
+
+    UNUR_DISTR* unur_distr_multistudent(int dim, double nu, double* mean, double* covar)
+
+    UNUR_DISTR* unur_distr_multiexponential(int dim, double* sigma, double* theta)
+
+    UNUR_DISTR* unur_distr_copula(int dim, double* rankcorr)
+
+    UNUR_DISTR* unur_distr_correlation(int n)
+
+    UNUR_DISTR* unur_distr_binomial(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_geometric(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_hypergeometric(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_logarithmic(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_negativebinomial(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_poisson(double* params, int n_params)
+
+    UNUR_DISTR* unur_distr_zipf(double* params, int n_params)
+
+    FILE* unur_set_stream(FILE* new_stream)
+
+    FILE* unur_get_stream()
+
+    int unur_set_debug(UNUR_PAR* parameters, unsigned debug)
+
+    int unur_chg_debug(UNUR_GEN* generator, unsigned debug)
+
+    int unur_set_default_debug(unsigned debug)
+
+    int unur_errno
+
+    int unur_get_errno()
+
+    void unur_reset_errno()
+
+    char* unur_get_strerror(int errnocode)
+
+    UNUR_ERROR_HANDLER* unur_set_error_handler(UNUR_ERROR_HANDLER* new_handler)
+
+    UNUR_ERROR_HANDLER* unur_set_error_handler_off()
+
+    cdef enum:
+        UNUR_SUCCESS
+        UNUR_FAILURE
+        UNUR_ERR_DISTR_SET
+        UNUR_ERR_DISTR_GET
+        UNUR_ERR_DISTR_NPARAMS
+        UNUR_ERR_DISTR_DOMAIN
+        UNUR_ERR_DISTR_GEN
+        UNUR_ERR_DISTR_REQUIRED
+        UNUR_ERR_DISTR_UNKNOWN
+        UNUR_ERR_DISTR_INVALID
+        UNUR_ERR_DISTR_DATA
+        UNUR_ERR_DISTR_PROP
+        UNUR_ERR_PAR_SET
+        UNUR_ERR_PAR_VARIANT
+        UNUR_ERR_PAR_INVALID
+        UNUR_ERR_GEN
+        UNUR_ERR_GEN_DATA
+        UNUR_ERR_GEN_CONDITION
+        UNUR_ERR_GEN_INVALID
+        UNUR_ERR_GEN_SAMPLING
+        UNUR_ERR_NO_REINIT
+        UNUR_ERR_NO_QUANTILE
+        UNUR_ERR_URNG
+        UNUR_ERR_URNG_MISS
+        UNUR_ERR_STR
+        UNUR_ERR_STR_UNKNOWN
+        UNUR_ERR_STR_SYNTAX
+        UNUR_ERR_STR_INVALID
+        UNUR_ERR_FSTR_SYNTAX
+        UNUR_ERR_FSTR_DERIV
+        UNUR_ERR_DOMAIN
+        UNUR_ERR_ROUNDOFF
+        UNUR_ERR_MALLOC
+        UNUR_ERR_NULL
+        UNUR_ERR_COOKIE
+        UNUR_ERR_GENERIC
+        UNUR_ERR_SILENT
+        UNUR_ERR_INF
+        UNUR_ERR_NAN
+        UNUR_ERR_COMPILE
+        UNUR_ERR_SHOULD_NOT_HAPPEN
+
+    double INFINITY
+
+    unur_slist* _unur_slist_new()
+
+    int _unur_slist_append(unur_slist* slist, void* element)
+
+    int _unur_slist_length(unur_slist* slist)
+
+    void* _unur_slist_get(unur_slist* slist, int n)
+
+    void* _unur_slist_replace(unur_slist* slist, int n, void* element)
+
+    void _unur_slist_free(unur_slist* slist)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran_wrapper.pyi b/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran_wrapper.pyi
new file mode 100644
index 00000000..723e06b8
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_unuran/unuran_wrapper.pyi
@@ -0,0 +1,178 @@
+import numpy as np
+from typing import (Union, Any, Tuple, List, overload, Callable, NamedTuple,
+                    Protocol)
+import numpy.typing as npt
+from scipy._lib._util import SeedType
+import scipy.stats as stats
+
+
+ArrayLike0D = Union[bool, int, float, complex, str, bytes, np.generic]
+
+
+__all__: List[str]
+
+
+class UNURANError(RuntimeError):
+    ...
+
+
+class Method:
+    @overload
+    def rvs(self, size: None = ...) -> float | int: ...  # type: ignore[misc]
+    @overload
+    def rvs(self, size: int | Tuple[int, ...] = ...) -> np.ndarray: ...
+    def set_random_state(self, random_state: SeedType) -> None: ...
+
+
+class TDRDist(Protocol):
+    @property
+    def pdf(self) -> Callable[..., float]: ...
+    @property
+    def dpdf(self) -> Callable[..., float]: ...
+    @property
+    def support(self) -> Tuple[float, float]: ...
+
+
+class TransformedDensityRejection(Method):
+    def __init__(self,
+                 dist: TDRDist,
+                 *,
+                 mode: None | float = ...,
+                 center: None | float = ...,
+                 domain: None | Tuple[float, float] = ...,
+                 c: float = ...,
+                 construction_points: int | npt.ArrayLike = ...,
+                 use_dars: bool = ...,
+                 max_squeeze_hat_ratio: float = ...,
+                 random_state: SeedType = ...) -> None: ...
+    @property
+    def squeeze_hat_ratio(self) -> float: ...
+    @property
+    def squeeze_area(self) -> float: ...
+    @overload
+    def ppf_hat(self, u: ArrayLike0D) -> float: ...  # type: ignore[misc]
+    @overload
+    def ppf_hat(self, u: npt.ArrayLike) -> np.ndarray: ...
+
+
+class SROUDist(Protocol):
+    @property
+    def pdf(self) -> Callable[..., float]: ...
+    @property
+    def support(self) -> Tuple[float, float]: ...
+
+
+class SimpleRatioUniforms(Method):
+    def __init__(self,
+                 dist: SROUDist,
+                 *,
+                 mode: None | float = ...,
+                 pdf_area: float = ...,
+                 domain: None | Tuple[float, float] = ...,
+                 cdf_at_mode: float = ...,
+                 random_state: SeedType = ...) -> None: ...
+
+
+UError = NamedTuple('UError', [('max_error', float),
+                               ('mean_absolute_error', float)])
+
+class PINVDist(Protocol):
+    @property
+    def pdf(self) -> Callable[..., float]: ...
+    @property
+    def cdf(self) -> Callable[..., float]: ...
+    @property
+    def logpdf(self) -> Callable[..., float]: ...
+
+
+class NumericalInversePolynomial(Method):
+    def __init__(self,
+                 dist: PINVDist,
+                 *,
+                 mode: None | float = ...,
+                 center: None | float = ...,
+                 domain: None | Tuple[float, float] = ...,
+                 order: int = ...,
+                 u_resolution: float = ...,
+                 random_state: SeedType = ...) -> None: ...
+    @property
+    def intervals(self) -> int: ...
+    @overload
+    def ppf(self, u: ArrayLike0D) -> float: ...  # type: ignore[misc]
+    @overload
+    def ppf(self, u: npt.ArrayLike) -> np.ndarray: ...
+    @overload
+    def cdf(self, x: ArrayLike0D) -> float: ...  # type: ignore[misc]
+    @overload
+    def cdf(self, x: npt.ArrayLike) -> np.ndarray: ...
+    def u_error(self, sample_size: int = ...) -> UError: ...
+    def qrvs(self,
+             size: None | int | Tuple[int, ...] = ...,
+             d: None | int = ...,
+             qmc_engine: None | stats.qmc.QMCEngine = ...) -> npt.ArrayLike: ...
+
+
+class HINVDist(Protocol):
+    @property
+    def pdf(self) -> Callable[..., float]: ...
+    @property
+    def cdf(self) -> Callable[..., float]: ...
+    @property
+    def support(self) -> Tuple[float, float]: ...
+
+
+class NumericalInverseHermite(Method):
+    def __init__(self,
+                 dist: HINVDist,
+                 *,
+                 domain: None | Tuple[float, float] = ...,
+                 order: int= ...,
+                 u_resolution: float = ...,
+                 construction_points: None | npt.ArrayLike = ...,
+                 max_intervals: int = ...,
+                 random_state: SeedType = ...) -> None: ...
+    @property
+    def intervals(self) -> int: ...
+    @overload
+    def ppf(self, u: ArrayLike0D) -> float: ...  # type: ignore[misc]
+    @overload
+    def ppf(self, u: npt.ArrayLike) -> np.ndarray: ...
+    def qrvs(self,
+             size: None | int | Tuple[int, ...] = ...,
+             d: None | int = ...,
+             qmc_engine: None | stats.qmc.QMCEngine = ...) -> npt.ArrayLike: ...
+    def u_error(self, sample_size: int = ...) -> UError: ...
+
+
+class DAUDist(Protocol):
+    @property
+    def pmf(self) -> Callable[..., float]: ...
+    @property
+    def support(self) -> Tuple[float, float]: ...
+
+class DiscreteAliasUrn(Method):
+    def __init__(self,
+                 dist: npt.ArrayLike | DAUDist,
+                 *,
+                 domain: None | Tuple[float, float] = ...,
+                 urn_factor: float = ...,
+                 random_state: SeedType = ...) -> None: ...
+
+
+class DGTDist(Protocol):
+    @property
+    def pmf(self) -> Callable[..., float]: ...
+    @property
+    def support(self) -> Tuple[float, float]: ...
+
+class DiscreteGuideTable(Method):
+    def __init__(self,
+                 dist: npt.ArrayLike | DGTDist,
+                 *,
+                 domain: None | Tuple[float, float] = ...,
+                 guide_factor: float = ...,
+                 random_state: SeedType = ...) -> None: ...
+    @overload
+    def ppf(self, u: ArrayLike0D) -> float: ...  # type: ignore[misc]
+    @overload
+    def ppf(self, u: npt.ArrayLike) -> np.ndarray: ...
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_variation.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_variation.py
new file mode 100644
index 00000000..7d1e94d0
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_variation.py
@@ -0,0 +1,224 @@
+
+import numpy as np
+from numpy.core.multiarray import normalize_axis_index
+from scipy._lib._util import _nan_allsame, _contains_nan
+from ._stats_py import _chk_asarray
+
+
+def _nanvariation(a, *, axis=0, ddof=0, keepdims=False):
+    """
+    Private version of `variation` that ignores nan.
+
+    `a` must be a numpy array.
+    `axis` is assumed to be normalized, i.e. 0 <= axis < a.ndim.
+    """
+    #
+    # In theory, this should be as simple as something like
+    #     nanstd(a, ddof=ddof, axis=axis, keepdims=keepdims) /
+    #     nanmean(a, axis=axis, keepdims=keepdims)
+    # In practice, annoying issues arise.  Specifically, numpy
+    # generates warnings in certain edge cases that we don't want
+    # to propagate to the user.  Unfortunately, there does not
+    # appear to be a thread-safe way to filter out the warnings,
+    # so we have to do the calculation in a way that doesn't
+    # generate numpy warnings.
+    #
+    # Let N be the number of non-nan inputs in a slice.
+    # Conditions that generate nan:
+    #   * empty input (i.e. N = 0)
+    #   * All non-nan values 0
+    #   * N < ddof
+    #   * N == ddof and the input is constant
+    # Conditions that generate inf:
+    #   * non-constant input and either
+    #       * the mean is 0, or
+    #       * N == ddof
+    #
+    a_isnan = np.isnan(a)
+    all_nan = a_isnan.all(axis=axis, keepdims=True)
+    all_nan_full = np.broadcast_to(all_nan, a.shape)
+    all_zero = (a_isnan | (a == 0)).all(axis=axis, keepdims=True) & ~all_nan
+
+    # ngood is the number of non-nan values in each slice.
+    ngood = (a.shape[axis] -
+             np.expand_dims(np.count_nonzero(a_isnan, axis=axis), axis))
+    # The return value is nan where ddof > ngood.
+    ddof_too_big = ddof > ngood
+    # If ddof == ngood, the return value is nan where the input is constant and
+    # inf otherwise.
+    ddof_equal_n = ddof == ngood
+
+    is_const = _nan_allsame(a, axis=axis, keepdims=True)
+
+    a2 = a.copy()
+    # If an entire slice is nan, `np.nanmean` will generate a warning,
+    # so we replace those nan's with 1.0 before computing the mean.
+    # We'll fix the corresponding output later.
+    a2[all_nan_full] = 1.0
+    mean_a = np.nanmean(a2, axis=axis, keepdims=True)
+
+    # If ddof >= ngood (the number of non-nan values in the slice), `np.nanstd`
+    # will generate a warning, so set all the values in such a slice to 1.0.
+    # We'll fix the corresponding output later.
+    a2[np.broadcast_to(ddof_too_big, a2.shape) | ddof_equal_n] = 1.0
+    with np.errstate(invalid='ignore'):
+        std_a = np.nanstd(a2, axis=axis, ddof=ddof, keepdims=True)
+    del a2
+
+    sum_zero = np.nansum(a, axis=axis, keepdims=True) == 0
+
+    # Where the sum along the axis is 0, replace mean_a with 1.  This avoids
+    # division by zero.  We'll fix the corresponding output later.
+    mean_a[sum_zero] = 1.0
+
+    # Here--finally!--is the calculation of the variation.
+    result = std_a / mean_a
+
+    # Now fix the values that were given fake data to avoid warnings.
+    result[~is_const & sum_zero] = np.inf
+    signed_inf_mask = ~is_const & ddof_equal_n
+    result[signed_inf_mask] = np.sign(mean_a[signed_inf_mask]) * np.inf
+    nan_mask = all_zero | all_nan | ddof_too_big | (ddof_equal_n & is_const)
+    result[nan_mask] = np.nan
+
+    if not keepdims:
+        result = np.squeeze(result, axis=axis)
+        if result.shape == ():
+            result = result[()]
+
+    return result
+
+
+def variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False):
+    """
+    Compute the coefficient of variation.
+
+    The coefficient of variation is the standard deviation divided by the
+    mean.  This function is equivalent to::
+
+        np.std(x, axis=axis, ddof=ddof) / np.mean(x)
+
+    The default for ``ddof`` is 0, but many definitions of the coefficient
+    of variation use the square root of the unbiased sample variance
+    for the sample standard deviation, which corresponds to ``ddof=1``.
+
+    The function does not take the absolute value of the mean of the data,
+    so the return value is negative if the mean is negative.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int or None, optional
+        Axis along which to calculate the coefficient of variation.
+        Default is 0. If None, compute over the whole array `a`.
+    nan_policy : {'propagate', 'raise', 'omit'}, optional
+        Defines how to handle when input contains ``nan``.
+        The following options are available:
+
+          * 'propagate': return ``nan``
+          * 'raise': raise an exception
+          * 'omit': perform the calculation with ``nan`` values omitted
+
+        The default is 'propagate'.
+    ddof : int, optional
+        Gives the "Delta Degrees Of Freedom" used when computing the
+        standard deviation.  The divisor used in the calculation of the
+        standard deviation is ``N - ddof``, where ``N`` is the number of
+        elements.  `ddof` must be less than ``N``; if it isn't, the result
+        will be ``nan`` or ``inf``, depending on ``N`` and the values in
+        the array.  By default `ddof` is zero for backwards compatibility,
+        but it is recommended to use ``ddof=1`` to ensure that the sample
+        standard deviation is computed as the square root of the unbiased
+        sample variance.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left in the
+        result as dimensions with size one. With this option, the result
+        will broadcast correctly against the input array.
+
+    Returns
+    -------
+    variation : ndarray
+        The calculated variation along the requested axis.
+
+    Notes
+    -----
+    There are several edge cases that are handled without generating a
+    warning:
+
+    * If both the mean and the standard deviation are zero, ``nan``
+      is returned.
+    * If the mean is zero and the standard deviation is nonzero, ``inf``
+      is returned.
+    * If the input has length zero (either because the array has zero
+      length, or all the input values are ``nan`` and ``nan_policy`` is
+      ``'omit'``), ``nan`` is returned.
+    * If the input contains ``inf``, ``nan`` is returned.
+
+    References
+    ----------
+    .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
+       Probability and Statistics Tables and Formulae. Chapman & Hall: New
+       York. 2000.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats import variation
+    >>> variation([1, 2, 3, 4, 5], ddof=1)
+    0.5270462766947299
+
+    Compute the variation along a given dimension of an array that contains
+    a few ``nan`` values:
+
+    >>> x = np.array([[  10.0, np.nan, 11.0, 19.0, 23.0, 29.0, 98.0],
+    ...               [  29.0,   30.0, 32.0, 33.0, 35.0, 56.0, 57.0],
+    ...               [np.nan, np.nan, 12.0, 13.0, 16.0, 16.0, 17.0]])
+    >>> variation(x, axis=1, ddof=1, nan_policy='omit')
+    array([1.05109361, 0.31428986, 0.146483  ])
+
+    """
+    a, axis = _chk_asarray(a, axis)
+    axis = normalize_axis_index(axis, ndim=a.ndim)
+    n = a.shape[axis]
+
+    contains_nan, nan_policy = _contains_nan(a, nan_policy)
+    if contains_nan and nan_policy == 'omit':
+        return _nanvariation(a, axis=axis, ddof=ddof, keepdims=keepdims)
+
+    if a.size == 0 or ddof > n:
+        # Handle as a special case to avoid spurious warnings.
+        # The return values, if any, are all nan.
+        shp = list(a.shape)
+        if keepdims:
+            shp[axis] = 1
+        else:
+            del shp[axis]
+        if len(shp) == 0:
+            result = np.nan
+        else:
+            result = np.full(shp, fill_value=np.nan)
+
+        return result
+
+    mean_a = a.mean(axis, keepdims=True)
+
+    if ddof == n:
+        # Another special case.  Result is either inf or nan.
+        std_a = a.std(axis=axis, ddof=0, keepdims=True)
+        result = np.full_like(std_a, fill_value=np.nan)
+        result.flat[std_a.flat > 0] = (np.sign(mean_a) * np.inf).flat
+        if result.shape == ():
+            result = result[()]
+        return result
+
+    with np.errstate(divide='ignore', invalid='ignore'):
+        std_a = a.std(axis, ddof=ddof, keepdims=True)
+        result = std_a / mean_a
+
+    if not keepdims:
+        result = np.squeeze(result, axis=axis)
+        if result.shape == ():
+            result = result[()]
+
+    return result
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/_warnings_errors.py b/__packaged__/coreml/.python_dependencies/scipy/stats/_warnings_errors.py
new file mode 100644
index 00000000..77a2e650
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/_warnings_errors.py
@@ -0,0 +1,38 @@
+# Warnings
+
+
+class DegenerateDataWarning(RuntimeWarning):
+    """Warns when data is degenerate and results may not be reliable."""
+    def __init__(self, msg=None):
+        if msg is None:
+            msg = ("Degenerate data encountered; results may not be reliable.")
+        self.args = (msg,)
+
+
+class ConstantInputWarning(DegenerateDataWarning):
+    """Warns when all values in data are exactly equal."""
+    def __init__(self, msg=None):
+        if msg is None:
+            msg = ("All values in data are exactly equal; "
+                   "results may not be reliable.")
+        self.args = (msg,)
+
+
+class NearConstantInputWarning(DegenerateDataWarning):
+    """Warns when all values in data are nearly equal."""
+    def __init__(self, msg=None):
+        if msg is None:
+            msg = ("All values in data are nearly equal; "
+                   "results may not be reliable.")
+        self.args = (msg,)
+
+
+# Errors
+
+
+class FitError(RuntimeError):
+    """Represents an error condition when fitting a distribution to data."""
+    def __init__(self, msg=None):
+        if msg is None:
+            msg = ("An error occured when fitting a distribution to data.")
+        self.args = (msg,)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/biasedurn.py b/__packaged__/coreml/.python_dependencies/scipy/stats/biasedurn.py
new file mode 100644
index 00000000..63c771b5
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/biasedurn.py
@@ -0,0 +1,29 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+
+
+import warnings
+from . import _biasedurn
+
+
+__all__ = [  # noqa: F822
+    '_PyFishersNCHypergeometric',
+    '_PyWalleniusNCHypergeometric',
+    '_PyStochasticLib3'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.biasedurn is deprecated and has no attribute "
+            f"{name}.")
+
+    warnings.warn("the `scipy.stats.biasedurn` namespace is deprecated and "
+                  "will be removed in SciPy v2.0.0.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_biasedurn, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/contingency.py b/__packaged__/coreml/.python_dependencies/scipy/stats/contingency.py
new file mode 100644
index 00000000..e0346fb9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/contingency.py
@@ -0,0 +1,419 @@
+"""
+Contingency table functions (:mod:`scipy.stats.contingency`)
+============================================================
+
+Functions for creating and analyzing contingency tables.
+
+.. currentmodule:: scipy.stats.contingency
+
+.. autosummary::
+   :toctree: generated/
+
+   chi2_contingency
+   relative_risk
+   odds_ratio
+   crosstab
+   association
+
+   expected_freq
+   margins
+
+"""
+
+
+from functools import reduce
+import math
+import numpy as np
+from ._stats_py import power_divergence
+from ._relative_risk import relative_risk
+from ._crosstab import crosstab
+from ._odds_ratio import odds_ratio
+from scipy._lib._bunch import _make_tuple_bunch
+
+
+__all__ = ['margins', 'expected_freq', 'chi2_contingency', 'crosstab',
+           'association', 'relative_risk', 'odds_ratio']
+
+
+def margins(a):
+    """Return a list of the marginal sums of the array `a`.
+
+    Parameters
+    ----------
+    a : ndarray
+        The array for which to compute the marginal sums.
+
+    Returns
+    -------
+    margsums : list of ndarrays
+        A list of length `a.ndim`.  `margsums[k]` is the result
+        of summing `a` over all axes except `k`; it has the same
+        number of dimensions as `a`, but the length of each axis
+        except axis `k` will be 1.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats.contingency import margins
+
+    >>> a = np.arange(12).reshape(2, 6)
+    >>> a
+    array([[ 0,  1,  2,  3,  4,  5],
+           [ 6,  7,  8,  9, 10, 11]])
+    >>> m0, m1 = margins(a)
+    >>> m0
+    array([[15],
+           [51]])
+    >>> m1
+    array([[ 6,  8, 10, 12, 14, 16]])
+
+    >>> b = np.arange(24).reshape(2,3,4)
+    >>> m0, m1, m2 = margins(b)
+    >>> m0
+    array([[[ 66]],
+           [[210]]])
+    >>> m1
+    array([[[ 60],
+            [ 92],
+            [124]]])
+    >>> m2
+    array([[[60, 66, 72, 78]]])
+    """
+    margsums = []
+    ranged = list(range(a.ndim))
+    for k in ranged:
+        marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k])
+        margsums.append(marg)
+    return margsums
+
+
+def expected_freq(observed):
+    """
+    Compute the expected frequencies from a contingency table.
+
+    Given an n-dimensional contingency table of observed frequencies,
+    compute the expected frequencies for the table based on the marginal
+    sums under the assumption that the groups associated with each
+    dimension are independent.
+
+    Parameters
+    ----------
+    observed : array_like
+        The table of observed frequencies.  (While this function can handle
+        a 1-D array, that case is trivial.  Generally `observed` is at
+        least 2-D.)
+
+    Returns
+    -------
+    expected : ndarray of float64
+        The expected frequencies, based on the marginal sums of the table.
+        Same shape as `observed`.
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> from scipy.stats.contingency import expected_freq
+    >>> observed = np.array([[10, 10, 20],[20, 20, 20]])
+    >>> expected_freq(observed)
+    array([[ 12.,  12.,  16.],
+           [ 18.,  18.,  24.]])
+
+    """
+    # Typically `observed` is an integer array. If `observed` has a large
+    # number of dimensions or holds large values, some of the following
+    # computations may overflow, so we first switch to floating point.
+    observed = np.asarray(observed, dtype=np.float64)
+
+    # Create a list of the marginal sums.
+    margsums = margins(observed)
+
+    # Create the array of expected frequencies.  The shapes of the
+    # marginal sums returned by apply_over_axes() are just what we
+    # need for broadcasting in the following product.
+    d = observed.ndim
+    expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1)
+    return expected
+
+
+Chi2ContingencyResult = _make_tuple_bunch(
+    'Chi2ContingencyResult',
+    ['statistic', 'pvalue', 'dof', 'expected_freq'], []
+)
+
+
+def chi2_contingency(observed, correction=True, lambda_=None):
+    """Chi-square test of independence of variables in a contingency table.
+
+    This function computes the chi-square statistic and p-value for the
+    hypothesis test of independence of the observed frequencies in the
+    contingency table [1]_ `observed`.  The expected frequencies are computed
+    based on the marginal sums under the assumption of independence; see
+    `scipy.stats.contingency.expected_freq`.  The number of degrees of
+    freedom is (expressed using numpy functions and attributes)::
+
+        dof = observed.size - sum(observed.shape) + observed.ndim - 1
+
+
+    Parameters
+    ----------
+    observed : array_like
+        The contingency table. The table contains the observed frequencies
+        (i.e. number of occurrences) in each category.  In the two-dimensional
+        case, the table is often described as an "R x C table".
+    correction : bool, optional
+        If True, *and* the degrees of freedom is 1, apply Yates' correction
+        for continuity.  The effect of the correction is to adjust each
+        observed value by 0.5 towards the corresponding expected value.
+    lambda_ : float or str, optional
+        By default, the statistic computed in this test is Pearson's
+        chi-squared statistic [2]_.  `lambda_` allows a statistic from the
+        Cressie-Read power divergence family [3]_ to be used instead.  See
+        `scipy.stats.power_divergence` for details.
+
+    Returns
+    -------
+    res : Chi2ContingencyResult
+        An object containing attributes:
+
+        statistic : float
+            The test statistic.
+        pvalue : float
+            The p-value of the test.
+        dof : int
+            The degrees of freedom.
+        expected_freq : ndarray, same shape as `observed`
+            The expected frequencies, based on the marginal sums of the table.
+
+    See Also
+    --------
+    scipy.stats.contingency.expected_freq
+    scipy.stats.fisher_exact
+    scipy.stats.chisquare
+    scipy.stats.power_divergence
+    scipy.stats.barnard_exact
+    scipy.stats.boschloo_exact
+
+    Notes
+    -----
+    An often quoted guideline for the validity of this calculation is that
+    the test should be used only if the observed and expected frequencies
+    in each cell are at least 5.
+
+    This is a test for the independence of different categories of a
+    population. The test is only meaningful when the dimension of
+    `observed` is two or more.  Applying the test to a one-dimensional
+    table will always result in `expected` equal to `observed` and a
+    chi-square statistic equal to 0.
+
+    This function does not handle masked arrays, because the calculation
+    does not make sense with missing values.
+
+    Like `scipy.stats.chisquare`, this function computes a chi-square
+    statistic; the convenience this function provides is to figure out the
+    expected frequencies and degrees of freedom from the given contingency
+    table. If these were already known, and if the Yates' correction was not
+    required, one could use `scipy.stats.chisquare`.  That is, if one calls::
+
+        res = chi2_contingency(obs, correction=False)
+
+    then the following is true::
+
+        (res.statistic, res.pvalue) == stats.chisquare(obs.ravel(),
+                                                       f_exp=ex.ravel(),
+                                                       ddof=obs.size - 1 - dof)
+
+    The `lambda_` argument was added in version 0.13.0 of scipy.
+
+    References
+    ----------
+    .. [1] "Contingency table",
+           https://en.wikipedia.org/wiki/Contingency_table
+    .. [2] "Pearson's chi-squared test",
+           https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
+    .. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
+           Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
+           pp. 440-464.
+
+    Examples
+    --------
+    A two-way example (2 x 3):
+
+    >>> import numpy as np
+    >>> from scipy.stats import chi2_contingency
+    >>> obs = np.array([[10, 10, 20], [20, 20, 20]])
+    >>> res = chi2_contingency(obs)
+    >>> res.statistic
+    2.7777777777777777
+    >>> res.pvalue
+    0.24935220877729619
+    >>> res.dof
+    2
+    >>> res.expected_freq
+    array([[ 12.,  12.,  16.],
+           [ 18.,  18.,  24.]])
+
+    Perform the test using the log-likelihood ratio (i.e. the "G-test")
+    instead of Pearson's chi-squared statistic.
+
+    >>> res = chi2_contingency(obs, lambda_="log-likelihood")
+    >>> res.statistic
+    2.7688587616781319
+    >>> res.pvalue
+    0.25046668010954165
+
+    A four-way example (2 x 2 x 2 x 2):
+
+    >>> obs = np.array(
+    ...     [[[[12, 17],
+    ...        [11, 16]],
+    ...       [[11, 12],
+    ...        [15, 16]]],
+    ...      [[[23, 15],
+    ...        [30, 22]],
+    ...       [[14, 17],
+    ...        [15, 16]]]])
+    >>> res = chi2_contingency(obs)
+    >>> res.statistic
+    8.7584514426741897
+    >>> res.pvalue
+    0.64417725029295503
+    """
+    observed = np.asarray(observed)
+    if np.any(observed < 0):
+        raise ValueError("All values in `observed` must be nonnegative.")
+    if observed.size == 0:
+        raise ValueError("No data; `observed` has size 0.")
+
+    expected = expected_freq(observed)
+    if np.any(expected == 0):
+        # Include one of the positions where expected is zero in
+        # the exception message.
+        zeropos = list(zip(*np.nonzero(expected == 0)))[0]
+        raise ValueError("The internally computed table of expected "
+                         "frequencies has a zero element at %s." % (zeropos,))
+
+    # The degrees of freedom
+    dof = expected.size - sum(expected.shape) + expected.ndim - 1
+
+    if dof == 0:
+        # Degenerate case; this occurs when `observed` is 1D (or, more
+        # generally, when it has only one nontrivial dimension).  In this
+        # case, we also have observed == expected, so chi2 is 0.
+        chi2 = 0.0
+        p = 1.0
+    else:
+        if dof == 1 and correction:
+            # Adjust `observed` according to Yates' correction for continuity.
+            # Magnitude of correction no bigger than difference; see gh-13875
+            diff = expected - observed
+            direction = np.sign(diff)
+            magnitude = np.minimum(0.5, np.abs(diff))
+            observed = observed + magnitude * direction
+
+        chi2, p = power_divergence(observed, expected,
+                                   ddof=observed.size - 1 - dof, axis=None,
+                                   lambda_=lambda_)
+
+    return Chi2ContingencyResult(chi2, p, dof, expected)
+
+
+def association(observed, method="cramer", correction=False, lambda_=None):
+    """Calculates degree of association between two nominal variables.
+
+    The function provides the option for computing one of three measures of
+    association between two nominal variables from the data given in a 2d
+    contingency table: Tschuprow's T, Pearson's Contingency Coefficient
+    and Cramer's V.
+
+    Parameters
+    ----------
+    observed : array-like
+        The array of observed values
+    method : {"cramer", "tschuprow", "pearson"} (default = "cramer")
+        The association test statistic.
+    correction : bool, optional
+        Inherited from `scipy.stats.contingency.chi2_contingency()`
+    lambda_ : float or str, optional
+        Inherited from `scipy.stats.contingency.chi2_contingency()`
+
+    Returns
+    -------
+    statistic : float
+        Value of the test statistic
+
+    Notes
+    -----
+    Cramer's V, Tschuprow's T and Pearson's Contingency Coefficient, all
+    measure the degree to which two nominal or ordinal variables are related,
+    or the level of their association. This differs from correlation, although
+    many often mistakenly consider them equivalent. Correlation measures in
+    what way two variables are related, whereas, association measures how
+    related the variables are. As such, association does not subsume
+    independent variables, and is rather a test of independence. A value of
+    1.0 indicates perfect association, and 0.0 means the variables have no
+    association.
+
+    Both the Cramer's V and Tschuprow's T are extensions of the phi
+    coefficient.  Moreover, due to the close relationship between the
+    Cramer's V and Tschuprow's T the returned values can often be similar
+    or even equivalent.  They are likely to diverge more as the array shape
+    diverges from a 2x2.
+
+    References
+    ----------
+    .. [1] "Tschuprow's T",
+           https://en.wikipedia.org/wiki/Tschuprow's_T
+    .. [2] Tschuprow, A. A. (1939)
+           Principles of the Mathematical Theory of Correlation;
+           translated by M. Kantorowitsch. W. Hodge & Co.
+    .. [3] "Cramer's V", https://en.wikipedia.org/wiki/Cramer's_V
+    .. [4] "Nominal Association: Phi and Cramer's V",
+           http://www.people.vcu.edu/~pdattalo/702SuppRead/MeasAssoc/NominalAssoc.html
+    .. [5] Gingrich, Paul, "Association Between Variables",
+           http://uregina.ca/~gingrich/ch11a.pdf
+
+    Examples
+    --------
+    An example with a 4x2 contingency table:
+
+    >>> import numpy as np
+    >>> from scipy.stats.contingency import association
+    >>> obs4x2 = np.array([[100, 150], [203, 322], [420, 700], [320, 210]])
+
+    Pearson's contingency coefficient
+    >>> association(obs4x2, method="pearson")
+    0.18303298140595667
+
+    Cramer's V
+
+    >>> association(obs4x2, method="cramer")
+    0.18617813077483678
+
+    Tschuprow's T
+
+    >>> association(obs4x2, method="tschuprow")
+    0.14146478765062995
+    """
+    arr = np.asarray(observed)
+    if not np.issubdtype(arr.dtype, np.integer):
+        raise ValueError("`observed` must be an integer array.")
+
+    if len(arr.shape) != 2:
+        raise ValueError("method only accepts 2d arrays")
+
+    chi2_stat = chi2_contingency(arr, correction=correction,
+                                 lambda_=lambda_)
+
+    phi2 = chi2_stat.statistic / arr.sum()
+    n_rows, n_cols = arr.shape
+    if method == "cramer":
+        value = phi2 / min(n_cols - 1, n_rows - 1)
+    elif method == "tschuprow":
+        value = phi2 / math.sqrt((n_rows - 1) * (n_cols - 1))
+    elif method == 'pearson':
+        value = phi2 / (1 + phi2)
+    else:
+        raise ValueError("Invalid argument value: 'method' argument must "
+                         "be 'cramer', 'tschuprow', or 'pearson'")
+
+    return math.sqrt(value)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/distributions.py b/__packaged__/coreml/.python_dependencies/scipy/stats/distributions.py
new file mode 100644
index 00000000..43c43e2b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/distributions.py
@@ -0,0 +1,24 @@
+#
+# Author:  Travis Oliphant  2002-2011 with contributions from
+#          SciPy Developers 2004-2011
+#
+# NOTE: To look at history using `git blame`, use `git blame -M -C -C`
+#       instead of `git blame -Lxxx,+x`.
+#
+from ._distn_infrastructure import (rv_discrete, rv_continuous, rv_frozen)
+
+from . import _continuous_distns
+from . import _discrete_distns
+
+from ._continuous_distns import *
+from ._levy_stable import levy_stable
+from ._discrete_distns import *
+from ._entropy import entropy
+
+# For backwards compatibility e.g. pymc expects distributions.__all__.
+__all__ = ['rv_discrete', 'rv_continuous', 'rv_histogram', 'entropy']
+
+# Add only the distribution names, not the *_gen names.
+__all__ += _continuous_distns._distn_names
+__all__ += ['levy_stable']
+__all__ += _discrete_distns._distn_names
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/kde.py b/__packaged__/coreml/.python_dependencies/scipy/stats/kde.py
new file mode 100644
index 00000000..da7f1059
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/kde.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _kde
+
+
+__all__ = [  # noqa: F822
+    'gaussian_kde', 'linalg', 'logsumexp', 'check_random_state',
+    'atleast_2d', 'reshape', 'newaxis', 'exp', 'ravel', 'power',
+    'atleast_1d', 'squeeze', 'sum', 'transpose', 'cov',
+    'gaussian_kernel_estimate'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.kde is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.stats instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
+                  "the `scipy.stats.kde` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_kde, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/morestats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/morestats.py
new file mode 100644
index 00000000..597e3696
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/morestats.py
@@ -0,0 +1,42 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _morestats
+
+
+__all__ = [  # noqa: F822
+    'mvsdist',
+    'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
+    'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
+    'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
+    'fligner', 'mood', 'wilcoxon', 'median_test',
+    'circmean', 'circvar', 'circstd', 'anderson_ksamp',
+    'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
+    'yeojohnson_normplot', 'annotations', 'namedtuple', 'isscalar', 'log',
+    'around', 'unique', 'arange', 'sort', 'amin', 'amax', 'atleast_1d',
+    'array', 'compress', 'exp', 'ravel', 'count_nonzero', 'arctan2',
+    'hypot', 'optimize', 'find_repeats',
+    'chi2_contingency', 'distributions', 'rv_generic', 'Mean',
+    'Variance', 'Std_dev', 'ShapiroResult', 'AndersonResult',
+    'Anderson_ksampResult', 'AnsariResult', 'BartlettResult',
+    'LeveneResult', 'FlignerResult', 'WilcoxonResult'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.morestats is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.stats instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
+                  "the `scipy.stats.morestats` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_morestats, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/mstats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/mstats.py
new file mode 100644
index 00000000..18b743ab
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/mstats.py
@@ -0,0 +1,135 @@
+"""
+===================================================================
+Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
+===================================================================
+
+.. currentmodule:: scipy.stats.mstats
+
+This module contains a large number of statistical functions that can
+be used with masked arrays.
+
+Most of these functions are similar to those in `scipy.stats` but might
+have small differences in the API or in the algorithm used. Since this
+is a relatively new package, some API changes are still possible.
+
+Summary statistics
+==================
+
+.. autosummary::
+   :toctree: generated/
+
+   describe
+   gmean
+   hmean
+   kurtosis
+   mode
+   mquantiles
+   hdmedian
+   hdquantiles
+   hdquantiles_sd
+   idealfourths
+   plotting_positions
+   meppf
+   moment
+   skew
+   tmean
+   tvar
+   tmin
+   tmax
+   tsem
+   variation
+   find_repeats
+   sem
+   trimmed_mean
+   trimmed_mean_ci
+   trimmed_std
+   trimmed_var
+
+Frequency statistics
+====================
+
+.. autosummary::
+   :toctree: generated/
+
+   scoreatpercentile
+
+Correlation functions
+=====================
+
+.. autosummary::
+   :toctree: generated/
+
+   f_oneway
+   pearsonr
+   spearmanr
+   pointbiserialr
+   kendalltau
+   kendalltau_seasonal
+   linregress
+   siegelslopes
+   theilslopes
+   sen_seasonal_slopes
+
+Statistical tests
+=================
+
+.. autosummary::
+   :toctree: generated/
+
+   ttest_1samp
+   ttest_onesamp
+   ttest_ind
+   ttest_rel
+   chisquare
+   kstest
+   ks_2samp
+   ks_1samp
+   ks_twosamp
+   mannwhitneyu
+   rankdata
+   kruskal
+   kruskalwallis
+   friedmanchisquare
+   brunnermunzel
+   skewtest
+   kurtosistest
+   normaltest
+
+Transformations
+===============
+
+.. autosummary::
+   :toctree: generated/
+
+   obrientransform
+   trim
+   trima
+   trimmed_stde
+   trimr
+   trimtail
+   trimboth
+   winsorize
+   zmap
+   zscore
+
+Other
+=====
+
+.. autosummary::
+   :toctree: generated/
+
+   argstoarray
+   count_tied_groups
+   msign
+   compare_medians_ms
+   median_cihs
+   mjci
+   mquantiles_cimj
+   rsh
+
+"""
+from ._mstats_basic import *
+from ._mstats_extras import *
+# Functions that support masked array input in stats but need to be kept in the
+# mstats namespace for backwards compatibility:
+from scipy.stats import gmean, hmean, zmap, zscore, chisquare
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/mstats_basic.py b/__packaged__/coreml/.python_dependencies/scipy/stats/mstats_basic.py
new file mode 100644
index 00000000..4af7f4a3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/mstats_basic.py
@@ -0,0 +1,58 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mstats_basic
+
+
+__all__ = [  # noqa: F822
+    'argstoarray',
+    'count_tied_groups',
+    'describe',
+    'f_oneway', 'find_repeats','friedmanchisquare',
+    'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
+    'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest',
+    'ks_1samp', 'kstest',
+    'linregress',
+    'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
+    'normaltest',
+    'obrientransform',
+    'pearsonr','plotting_positions','pointbiserialr',
+    'rankdata',
+    'scoreatpercentile','sem',
+    'sen_seasonal_slopes','skew','skewtest','spearmanr',
+    'siegelslopes', 'theilslopes',
+    'tmax','tmean','tmin','trim','trimboth',
+    'trimtail','trima','trimr','trimmed_mean','trimmed_std',
+    'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
+    'ttest_ind','ttest_rel','tvar',
+    'variation',
+    'winsorize',
+    'brunnermunzel', 'ma', 'masked', 'nomask', 'namedtuple',
+    'distributions', 'stats_linregress', 'stats_LinregressResult',
+    'stats_theilslopes', 'stats_siegelslopes', 'ModeResult',
+    'SpearmanrResult', 'KendalltauResult', 'PointbiserialrResult',
+    'Ttest_1sampResult', 'Ttest_indResult', 'Ttest_relResult',
+    'MannwhitneyuResult', 'KruskalResult', 'trimdoc', 'trim1',
+    'DescribeResult', 'stde_median', 'SkewtestResult', 'KurtosistestResult',
+    'NormaltestResult', 'F_onewayResult', 'FriedmanchisquareResult',
+    'BrunnerMunzelResult'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.mstats_basic is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.stats instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
+                  "the `scipy.stats.mstats_basic` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mstats_basic, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/mstats_extras.py b/__packaged__/coreml/.python_dependencies/scipy/stats/mstats_extras.py
new file mode 100644
index 00000000..c06caf42
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/mstats_extras.py
@@ -0,0 +1,34 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mstats_extras
+
+
+__all__ = [  # noqa: F822
+    'compare_medians_ms',
+    'hdquantiles', 'hdmedian', 'hdquantiles_sd',
+    'idealfourths',
+    'median_cihs','mjci','mquantiles_cimj',
+    'rsh',
+    'trimmed_mean_ci', 'float_', 'int_', 'ma', 'MaskedArray', 'mstats',
+    'norm', 'beta', 't', 'binom'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.mstats_extras is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.stats instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
+                  "the `scipy.stats.mstats_extras` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mstats_extras, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/mvn.py b/__packaged__/coreml/.python_dependencies/scipy/stats/mvn.py
new file mode 100644
index 00000000..a29acf75
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/mvn.py
@@ -0,0 +1,31 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _mvn  # type: ignore
+
+
+__all__ = [  # noqa: F822
+    'mvnun',
+    'mvnun_weighted',
+    'mvndst',
+    'dkblck'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.mvn is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.stats instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
+                  "the `scipy.stats.mvn` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_mvn, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/qmc.py b/__packaged__/coreml/.python_dependencies/scipy/stats/qmc.py
new file mode 100644
index 00000000..110ce7ff
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/qmc.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+r"""
+====================================================
+Quasi-Monte Carlo submodule (:mod:`scipy.stats.qmc`)
+====================================================
+
+.. currentmodule:: scipy.stats.qmc
+
+This module provides Quasi-Monte Carlo generators and associated helper
+functions.
+
+
+Quasi-Monte Carlo
+=================
+
+Engines
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+   QMCEngine
+   Sobol
+   Halton
+   LatinHypercube
+   PoissonDisk
+   MultinomialQMC
+   MultivariateNormalQMC
+
+Helpers
+-------
+
+.. autosummary::
+   :toctree: generated/
+
+   discrepancy
+   update_discrepancy
+   scale
+
+
+Introduction to Quasi-Monte Carlo
+=================================
+
+Quasi-Monte Carlo (QMC) methods [1]_, [2]_, [3]_ provide an
+:math:`n \times d` array of numbers in :math:`[0,1]`. They can be used in
+place of :math:`n` points from the :math:`U[0,1]^{d}` distribution. Compared to
+random points, QMC points are designed to have fewer gaps and clumps. This is
+quantified by discrepancy measures [4]_. From the Koksma-Hlawka
+inequality [5]_ we know that low discrepancy reduces a bound on
+integration error. Averaging a function :math:`f` over :math:`n` QMC points
+can achieve an integration error close to :math:`O(n^{-1})` for well
+behaved functions [2]_.
+
+Most QMC constructions are designed for special values of :math:`n`
+such as powers of 2 or large primes. Changing the sample
+size by even one can degrade their performance, even their
+rate of convergence [6]_. For instance :math:`n=100` points may give less
+accuracy than :math:`n=64` if the method was designed for :math:`n=2^m`.
+
+Some QMC constructions are extensible in :math:`n`: we can find
+another special sample size :math:`n' > n` and often an infinite
+sequence of increasing special sample sizes. Some QMC
+constructions are extensible in :math:`d`: we can increase the dimension,
+possibly to some upper bound, and typically without requiring
+special values of :math:`d`. Some QMC methods are extensible in
+both :math:`n` and :math:`d`.
+
+QMC points are deterministic. That makes it hard to estimate the accuracy of
+integrals estimated by averages over QMC points. Randomized QMC (RQMC) [7]_
+points are constructed so that each point is individually :math:`U[0,1]^{d}`
+while collectively the :math:`n` points retain their low discrepancy.
+One can make :math:`R` independent replications of RQMC points to
+see how stable a computation is. From :math:`R` independent values,
+a t-test (or bootstrap t-test [8]_) then gives approximate confidence
+intervals on the mean value. Some RQMC methods produce a
+root mean squared error that is actually :math:`o(1/n)` and smaller than
+the rate seen in unrandomized QMC. An intuitive explanation is
+that the error is a sum of many small ones and random errors
+cancel in a way that deterministic ones do not. RQMC also
+has advantages on integrands that are singular or, for other
+reasons, fail to be Riemann integrable.
+
+(R)QMC cannot beat Bahkvalov's curse of dimension (see [9]_). For
+any random or deterministic method, there are worst case functions
+that will give it poor performance in high dimensions. A worst
+case function for QMC might be 0 at all n points but very
+large elsewhere. Worst case analyses get very pessimistic
+in high dimensions. (R)QMC can bring a great improvement over
+MC when the functions on which it is used are not worst case.
+For instance (R)QMC can be especially effective on integrands
+that are well approximated by sums of functions of
+some small number of their input variables at a time [10]_, [11]_.
+That property is often a surprising finding about those functions.
+
+Also, to see an improvement over IID MC, (R)QMC requires a bit of smoothness of
+the integrand, roughly the mixed first order derivative in each direction,
+:math:`\partial^d f/\partial x_1 \cdots \partial x_d`, must be integral.
+For instance, a function that is 1 inside the hypersphere and 0 outside of it
+has infinite variation in the sense of Hardy and Krause for any dimension
+:math:`d = 2`.
+
+Scrambled nets are a kind of RQMC that have some valuable robustness
+properties [12]_. If the integrand is square integrable, they give variance
+:math:`var_{SNET} = o(1/n)`. There is a finite upper bound on
+:math:`var_{SNET} / var_{MC}` that holds simultaneously for every square
+integrable integrand. Scrambled nets satisfy a strong law of large numbers
+for :math:`f` in :math:`L^p` when :math:`p>1`. In some
+special cases there is a central limit theorem [13]_. For smooth enough
+integrands they can achieve RMSE nearly :math:`O(n^{-3})`. See [12]_
+for references about these properties.
+
+The main kinds of QMC methods are lattice rules [14]_ and digital
+nets and sequences [2]_, [15]_. The theories meet up in polynomial
+lattice rules [16]_ which can produce digital nets. Lattice rules
+require some form of search for good constructions. For digital
+nets there are widely used default constructions.
+
+The most widely used QMC methods are Sobol' sequences [17]_.
+These are digital nets. They are extensible in both :math:`n` and :math:`d`.
+They can be scrambled. The special sample sizes are powers
+of 2. Another popular method are Halton sequences [18]_.
+The constructions resemble those of digital nets. The earlier
+dimensions have much better equidistribution properties than
+later ones. There are essentially no special sample sizes.
+They are not thought to be as accurate as Sobol' sequences.
+They can be scrambled. The nets of Faure [19]_ are also widely
+used. All dimensions are equally good, but the special sample
+sizes grow rapidly with dimension :math:`d`. They can be scrambled.
+The nets of Niederreiter and Xing [20]_ have the best asymptotic
+properties but have not shown good empirical performance [21]_.
+
+Higher order digital nets are formed by a digit interleaving process
+in the digits of the constructed points. They can achieve higher
+levels of asymptotic accuracy given higher smoothness conditions on :math:`f`
+and they can be scrambled [22]_. There is little or no empirical work
+showing the improved rate to be attained.
+
+Using QMC is like using the entire period of a small random
+number generator. The constructions are similar and so
+therefore are the computational costs [23]_.
+
+(R)QMC is sometimes improved by passing the points through
+a baker's transformation (tent function) prior to using them.
+That function has the form :math:`1-2|x-1/2|`. As :math:`x` goes from 0 to
+1, this function goes from 0 to 1 and then back. It is very
+useful to produce a periodic function for lattice rules [14]_,
+and sometimes it improves the convergence rate [24]_.
+
+It is not straightforward to apply QMC methods to Markov
+chain Monte Carlo (MCMC).  We can think of MCMC as using
+:math:`n=1` point in :math:`[0,1]^{d}` for very large :math:`d`, with
+ergodic results corresponding to :math:`d \to \infty`. One proposal is
+in [25]_ and under strong conditions an improved rate of convergence
+has been shown [26]_.
+
+Returning to Sobol' points: there are many versions depending
+on what are called direction numbers. Those are the result of
+searches and are tabulated. A very widely used set of direction
+numbers come from [27]_. It is extensible in dimension up to
+:math:`d=21201`.
+
+References
+----------
+.. [1] Owen, Art B. "Monte Carlo Book: the Quasi-Monte Carlo parts." 2019.
+.. [2] Niederreiter, Harald. "Random number generation and quasi-Monte Carlo
+   methods." Society for Industrial and Applied Mathematics, 1992.
+.. [3] Dick, Josef, Frances Y. Kuo, and Ian H. Sloan. "High-dimensional
+   integration: the quasi-Monte Carlo way." Acta Numerica no. 22: 133, 2013.
+.. [4] Aho, A. V., C. Aistleitner, T. Anderson, K. Appel, V. Arnol'd, N.
+   Aronszajn, D. Asotsky et al. "W. Chen et al.(eds.), "A Panorama of
+   Discrepancy Theory", Sringer International Publishing,
+   Switzerland: 679, 2014.
+.. [5] Hickernell, Fred J. "Koksma-Hlawka Inequality." Wiley StatsRef:
+   Statistics Reference Online, 2014.
+.. [6] Owen, Art B. "On dropping the first Sobol' point." :arxiv:`2008.08051`,
+   2020.
+.. [7] L'Ecuyer, Pierre, and Christiane Lemieux. "Recent advances in randomized
+   quasi-Monte Carlo methods." In Modeling uncertainty, pp. 419-474. Springer,
+   New York, NY, 2002.
+.. [8] DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence
+   intervals." Statistical science: 189-212, 1996.
+.. [9] Dimov, Ivan T. "Monte Carlo methods for applied scientists." World
+   Scientific, 2008.
+.. [10] Caflisch, Russel E., William J. Morokoff, and Art B. Owen. "Valuation
+   of mortgage backed securities using Brownian bridges to reduce effective
+   dimension." Journal of Computational Finance: no. 1 27-46, 1997.
+.. [11] Sloan, Ian H., and Henryk Wozniakowski. "When are quasi-Monte Carlo
+   algorithms efficient for high dimensional integrals?." Journal of Complexity
+   14, no. 1 (1998): 1-33.
+.. [12] Owen, Art B., and Daniel Rudolf, "A strong law of large numbers for
+   scrambled net integration." SIAM Review, to appear.
+.. [13] Loh, Wei-Liem. "On the asymptotic distribution of scrambled net
+   quadrature." The Annals of Statistics 31, no. 4: 1282-1324, 2003.
+.. [14] Sloan, Ian H. and S. Joe. "Lattice methods for multiple integration."
+   Oxford University Press, 1994.
+.. [15] Dick, Josef, and Friedrich Pillichshammer. "Digital nets and sequences:
+   discrepancy theory and quasi-Monte Carlo integration." Cambridge University
+   Press, 2010.
+.. [16] Dick, Josef, F. Kuo, Friedrich Pillichshammer, and I. Sloan.
+   "Construction algorithms for polynomial lattice rules for multivariate
+   integration." Mathematics of computation 74, no. 252: 1895-1921, 2005.
+.. [17] Sobol', Il'ya Meerovich. "On the distribution of points in a cube and
+   the approximate evaluation of integrals." Zhurnal Vychislitel'noi Matematiki
+   i Matematicheskoi Fiziki 7, no. 4: 784-802, 1967.
+.. [18] Halton, John H. "On the efficiency of certain quasi-random sequences of
+   points in evaluating multi-dimensional integrals." Numerische Mathematik 2,
+   no. 1: 84-90, 1960.
+.. [19] Faure, Henri. "Discrepance de suites associees a un systeme de
+   numeration (en dimension s)." Acta arithmetica 41, no. 4: 337-351, 1982.
+.. [20] Niederreiter, Harold, and Chaoping Xing. "Low-discrepancy sequences and
+   global function fields with many rational places." Finite Fields and their
+   applications 2, no. 3: 241-273, 1996.
+.. [21] Hong, Hee Sun, and Fred J. Hickernell. "Algorithm 823: Implementing
+   scrambled digital sequences." ACM Transactions on Mathematical Software
+   (TOMS) 29, no. 2: 95-109, 2003.
+.. [22] Dick, Josef. "Higher order scrambled digital nets achieve the optimal
+   rate of the root mean square error for smooth integrands." The Annals of
+   Statistics 39, no. 3: 1372-1398, 2011.
+.. [23] Niederreiter, Harald. "Multidimensional numerical integration using
+   pseudorandom numbers." In Stochastic Programming 84 Part I, pp. 17-38.
+   Springer, Berlin, Heidelberg, 1986.
+.. [24] Hickernell, Fred J. "Obtaining O (N-2+e) Convergence for Lattice
+   Quadrature Rules." In Monte Carlo and Quasi-Monte Carlo Methods 2000,
+   pp. 274-289. Springer, Berlin, Heidelberg, 2002.
+.. [25] Owen, Art B., and Seth D. Tribble. "A quasi-Monte Carlo Metropolis
+   algorithm." Proceedings of the National Academy of Sciences 102,
+   no. 25: 8844-8849, 2005.
+.. [26] Chen, Su. "Consistency and convergence rate of Markov chain quasi Monte
+   Carlo with examples." PhD diss., Stanford University, 2011.
+.. [27] Joe, Stephen, and Frances Y. Kuo. "Constructing Sobol sequences with
+   better two-dimensional projections." SIAM Journal on Scientific Computing
+   30, no. 5: 2635-2654, 2008.
+
+"""
+from ._qmc import *
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/sampling.py b/__packaged__/coreml/.python_dependencies/scipy/stats/sampling.py
new file mode 100644
index 00000000..2111f1f4
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/sampling.py
@@ -0,0 +1,51 @@
+"""
+======================================================
+Random Number Generators (:mod:`scipy.stats.sampling`)
+======================================================
+
+.. currentmodule:: scipy.stats.sampling
+
+This module contains a collection of random number generators to sample
+from univariate continuous and discrete distributions. It uses the
+implementation of a C library called "UNU.RAN".
+
+Generators Wrapped
+==================
+
+For continuous distributions
+----------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   NumericalInverseHermite
+   NumericalInversePolynomial
+   TransformedDensityRejection
+   SimpleRatioUniforms
+
+For discrete distributions
+--------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   DiscreteAliasUrn
+   DiscreteGuideTable
+
+Warnings / Errors used in :mod:`scipy.stats.sampling`
+-----------------------------------------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   UNURANError
+"""
+from ._unuran.unuran_wrapper import (  # noqa: F401
+    TransformedDensityRejection,
+    DiscreteAliasUrn,
+    DiscreteGuideTable,
+    NumericalInversePolynomial,
+    NumericalInverseHermite,
+    SimpleRatioUniforms,
+    UNURANError
+)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/statlib.py b/__packaged__/coreml/.python_dependencies/scipy/stats/statlib.py
new file mode 100644
index 00000000..a468c856
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/statlib.py
@@ -0,0 +1,30 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _statlib  # type: ignore
+
+
+__all__ = [  # noqa: F822
+    'swilk',
+    'gscale',
+    'prho'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.statlib is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.stats instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
+                  "the `scipy.stats.statlib` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_statlib, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/stats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/stats.py
new file mode 100644
index 00000000..5a519308
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/stats.py
@@ -0,0 +1,62 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+import warnings
+from . import _stats_py
+
+
+__all__ = [  # noqa: F822
+    'find_repeats', 'gmean', 'hmean', 'pmean', 'mode', 'tmean', 'tvar',
+    'tmin', 'tmax', 'tstd', 'tsem', 'moment',
+    'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
+    'normaltest', 'jarque_bera', 'itemfreq',
+    'scoreatpercentile', 'percentileofscore',
+    'cumfreq', 'relfreq', 'obrientransform',
+    'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd',
+    'median_absolute_deviation', 'median_abs_deviation',
+    'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
+    'f_oneway', 'F_onewayConstantInputWarning',
+    'F_onewayBadInputSizesWarning',
+    'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
+    'pearsonr', 'fisher_exact',
+    'SpearmanRConstantInputWarning', 'spearmanr', 'pointbiserialr',
+    'kendalltau', 'weightedtau', 'multiscale_graphcorr',
+    'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
+    'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
+    'kstest', 'ks_1samp', 'ks_2samp',
+    'chisquare', 'power_divergence',
+    'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
+    'rankdata',
+    'combine_pvalues', 'wasserstein_distance', 'energy_distance',
+    'brunnermunzel', 'alexandergovern', 'gcd', 'namedtuple', 'array',
+    'ma', 'cdist', 'check_random_state', 'MapWrapper',
+    'rng_integers', 'float_factorial', 'linalg', 'distributions',
+    'mstats_basic', 'make_dataclass', 'ModeResult', 'DescribeResult',
+    'SkewtestResult', 'KurtosistestResult', 'NormaltestResult',
+    'Jarque_beraResult', 'HistogramResult', 'CumfreqResult',
+    'RelfreqResult', 'SigmaclipResult', 'F_onewayResult',
+    'AlexanderGovernResult', 'AlexanderGovernConstantInputWarning',
+    'SpearmanrResult', 'PointbiserialrResult', 'KendalltauResult',
+    'WeightedTauResult', 'MGCResult', 'Ttest_1sampResult', 'Ttest_indResult',
+    'Ttest_relResult', 'Power_divergenceResult', 'KstestResult',
+    'Ks_2sampResult', 'RanksumsResult', 'KruskalResult',
+    'FriedmanchisquareResult', 'BrunnerMunzelResult', 'RepeatedResults'
+]
+
+
+def __dir__():
+    return __all__
+
+
+def __getattr__(name):
+    if name not in __all__:
+        raise AttributeError(
+            "scipy.stats.stats is deprecated and has no attribute "
+            f"{name}. Try looking in scipy.stats instead.")
+
+    warnings.warn(f"Please use `{name}` from the `scipy.stats` namespace, "
+                  "the `scipy.stats.stats` namespace is deprecated.",
+                  category=DeprecationWarning, stacklevel=2)
+
+    return getattr(_stats_py, name)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/__init__.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/common_tests.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/common_tests.py
new file mode 100644
index 00000000..b86b58a1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/common_tests.py
@@ -0,0 +1,450 @@
+import pickle
+import re
+
+import numpy as np
+import numpy.testing as npt
+from numpy.testing import assert_allclose, assert_equal
+from pytest import raises as assert_raises
+
+import numpy.ma.testutils as ma_npt
+
+from scipy._lib._util import getfullargspec_no_self as _getfullargspec
+from scipy import stats
+
+
+def check_named_results(res, attributes, ma=False):
+    for i, attr in enumerate(attributes):
+        if ma:
+            ma_npt.assert_equal(res[i], getattr(res, attr))
+        else:
+            npt.assert_equal(res[i], getattr(res, attr))
+
+
+def check_normalization(distfn, args, distname):
+    norm_moment = distfn.moment(0, *args)
+    npt.assert_allclose(norm_moment, 1.0)
+
+    if distname == "rv_histogram_instance":
+        atol, rtol = 1e-5, 0
+    else:
+        atol, rtol = 1e-7, 1e-7
+
+    normalization_expect = distfn.expect(lambda x: 1, args=args)
+    npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol,
+            err_msg=distname, verbose=True)
+
+    _a, _b = distfn.support(*args)
+    normalization_cdf = distfn.cdf(_b, *args)
+    npt.assert_allclose(normalization_cdf, 1.0)
+
+
+def check_moment(distfn, arg, m, v, msg):
+    m1 = distfn.moment(1, *arg)
+    m2 = distfn.moment(2, *arg)
+    if not np.isinf(m):
+        npt.assert_almost_equal(m1, m, decimal=10, err_msg=msg +
+                            ' - 1st moment')
+    else:                     # or np.isnan(m1),
+        npt.assert_(np.isinf(m1),
+               msg + ' - 1st moment -infinite, m1=%s' % str(m1))
+
+    if not np.isinf(v):
+        npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10, err_msg=msg +
+                            ' - 2ndt moment')
+    else:                     # or np.isnan(m2),
+        npt.assert_(np.isinf(m2),
+               msg + ' - 2nd moment -infinite, m2=%s' % str(m2))
+
+
+def check_mean_expect(distfn, arg, m, msg):
+    if np.isfinite(m):
+        m1 = distfn.expect(lambda x: x, arg)
+        npt.assert_almost_equal(m1, m, decimal=5, err_msg=msg +
+                            ' - 1st moment (expect)')
+
+
+def check_var_expect(distfn, arg, m, v, msg):
+    kwargs = {'rtol': 5e-6} if msg == "rv_histogram_instance" else {}
+    if np.isfinite(v):
+        m2 = distfn.expect(lambda x: x*x, arg)
+        npt.assert_allclose(m2, v + m*m, **kwargs)
+
+
+def check_skew_expect(distfn, arg, m, v, s, msg):
+    if np.isfinite(s):
+        m3e = distfn.expect(lambda x: np.power(x-m, 3), arg)
+        npt.assert_almost_equal(m3e, s * np.power(v, 1.5),
+                decimal=5, err_msg=msg + ' - skew')
+    else:
+        npt.assert_(np.isnan(s))
+
+
+def check_kurt_expect(distfn, arg, m, v, k, msg):
+    if np.isfinite(k):
+        m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
+        npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5,
+                err_msg=msg + ' - kurtosis')
+    elif not np.isposinf(k):
+        npt.assert_(np.isnan(k))
+
+
+def check_entropy(distfn, arg, msg):
+    ent = distfn.entropy(*arg)
+    npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan')
+
+
+def check_private_entropy(distfn, args, superclass):
+    # compare a generic _entropy with the distribution-specific implementation
+    npt.assert_allclose(distfn._entropy(*args),
+                        superclass._entropy(distfn, *args))
+
+
+def check_entropy_vect_scale(distfn, arg):
+    # check 2-d
+    sc = np.asarray([[1, 2], [3, 4]])
+    v_ent = distfn.entropy(*arg, scale=sc)
+    s_ent = [distfn.entropy(*arg, scale=s) for s in sc.ravel()]
+    s_ent = np.asarray(s_ent).reshape(v_ent.shape)
+    assert_allclose(v_ent, s_ent, atol=1e-14)
+
+    # check invalid value, check cast
+    sc = [1, 2, -3]
+    v_ent = distfn.entropy(*arg, scale=sc)
+    s_ent = [distfn.entropy(*arg, scale=s) for s in sc]
+    s_ent = np.asarray(s_ent).reshape(v_ent.shape)
+    assert_allclose(v_ent, s_ent, atol=1e-14)
+
+
+def check_edge_support(distfn, args):
+    # Make sure that x=self.a and self.b are handled correctly.
+    x = distfn.support(*args)
+    if isinstance(distfn, stats.rv_discrete):
+        x = x[0]-1, x[1]
+
+    npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0])
+    npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0])
+
+    if distfn.name not in ('skellam', 'dlaplace'):
+        # with a = -inf, log(0) generates warnings
+        npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0])
+        npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf])
+
+    npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x)
+    npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1])
+
+    # out-of-bounds for isf & ppf
+    npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all())
+    npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all())
+
+
+def check_named_args(distfn, x, shape_args, defaults, meths):
+    ## Check calling w/ named arguments.
+
+    # check consistency of shapes, numargs and _parse signature
+    signature = _getfullargspec(distfn._parse_args)
+    npt.assert_(signature.varargs is None)
+    npt.assert_(signature.varkw is None)
+    npt.assert_(not signature.kwonlyargs)
+    npt.assert_(list(signature.defaults) == list(defaults))
+
+    shape_argnames = signature.args[:-len(defaults)]  # a, b, loc=0, scale=1
+    if distfn.shapes:
+        shapes_ = distfn.shapes.replace(',', ' ').split()
+    else:
+        shapes_ = ''
+    npt.assert_(len(shapes_) == distfn.numargs)
+    npt.assert_(len(shapes_) == len(shape_argnames))
+
+    # check calling w/ named arguments
+    shape_args = list(shape_args)
+
+    vals = [meth(x, *shape_args) for meth in meths]
+    npt.assert_(np.all(np.isfinite(vals)))
+
+    names, a, k = shape_argnames[:], shape_args[:], {}
+    while names:
+        k.update({names.pop(): a.pop()})
+        v = [meth(x, *a, **k) for meth in meths]
+        npt.assert_array_equal(vals, v)
+        if 'n' not in k.keys():
+            # `n` is first parameter of moment(), so can't be used as named arg
+            npt.assert_equal(distfn.moment(1, *a, **k),
+                             distfn.moment(1, *shape_args))
+
+    # unknown arguments should not go through:
+    k.update({'kaboom': 42})
+    assert_raises(TypeError, distfn.cdf, x, **k)
+
+
+def check_random_state_property(distfn, args):
+    # check the random_state attribute of a distribution *instance*
+
+    # This test fiddles with distfn.random_state. This breaks other tests,
+    # hence need to save it and then restore.
+    rndm = distfn.random_state
+
+    # baseline: this relies on the global state
+    np.random.seed(1234)
+    distfn.random_state = None
+    r0 = distfn.rvs(*args, size=8)
+
+    # use an explicit instance-level random_state
+    distfn.random_state = 1234
+    r1 = distfn.rvs(*args, size=8)
+    npt.assert_equal(r0, r1)
+
+    distfn.random_state = np.random.RandomState(1234)
+    r2 = distfn.rvs(*args, size=8)
+    npt.assert_equal(r0, r2)
+
+    # check that np.random.Generator can be used (numpy >= 1.17)
+    if hasattr(np.random, 'default_rng'):
+        # obtain a np.random.Generator object
+        rng = np.random.default_rng(1234)
+        distfn.rvs(*args, size=1, random_state=rng)
+
+    # can override the instance-level random_state for an individual .rvs call
+    distfn.random_state = 2
+    orig_state = distfn.random_state.get_state()
+
+    r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234))
+    npt.assert_equal(r0, r3)
+
+    # ... and that does not alter the instance-level random_state!
+    npt.assert_equal(distfn.random_state.get_state(), orig_state)
+
+    # finally, restore the random_state
+    distfn.random_state = rndm
+
+
+def check_meth_dtype(distfn, arg, meths):
+    q0 = [0.25, 0.5, 0.75]
+    x0 = distfn.ppf(q0, *arg)
+    x_cast = [x0.astype(tp) for tp in
+                        (np.int_, np.float16, np.float32, np.float64)]
+
+    for x in x_cast:
+        # casting may have clipped the values, exclude those
+        distfn._argcheck(*arg)
+        x = x[(distfn.a < x) & (x < distfn.b)]
+        for meth in meths:
+            val = meth(x, *arg)
+            npt.assert_(val.dtype == np.float_)
+
+
+def check_ppf_dtype(distfn, arg):
+    q0 = np.asarray([0.25, 0.5, 0.75])
+    q_cast = [q0.astype(tp) for tp in (np.float16, np.float32, np.float64)]
+    for q in q_cast:
+        for meth in [distfn.ppf, distfn.isf]:
+            val = meth(q, *arg)
+            npt.assert_(val.dtype == np.float_)
+
+
+def check_cmplx_deriv(distfn, arg):
+    # Distributions allow complex arguments.
+    def deriv(f, x, *arg):
+        x = np.asarray(x)
+        h = 1e-10
+        return (f(x + h*1j, *arg)/h).imag
+
+    x0 = distfn.ppf([0.25, 0.51, 0.75], *arg)
+    x_cast = [x0.astype(tp) for tp in
+                        (np.int_, np.float16, np.float32, np.float64)]
+
+    for x in x_cast:
+        # casting may have clipped the values, exclude those
+        distfn._argcheck(*arg)
+        x = x[(distfn.a < x) & (x < distfn.b)]
+
+        pdf, cdf, sf = distfn.pdf(x, *arg), distfn.cdf(x, *arg), distfn.sf(x, *arg)
+        assert_allclose(deriv(distfn.cdf, x, *arg), pdf, rtol=1e-5)
+        assert_allclose(deriv(distfn.logcdf, x, *arg), pdf/cdf, rtol=1e-5)
+
+        assert_allclose(deriv(distfn.sf, x, *arg), -pdf, rtol=1e-5)
+        assert_allclose(deriv(distfn.logsf, x, *arg), -pdf/sf, rtol=1e-5)
+
+        assert_allclose(deriv(distfn.logpdf, x, *arg),
+                        deriv(distfn.pdf, x, *arg) / distfn.pdf(x, *arg),
+                        rtol=1e-5)
+
+
+def check_pickling(distfn, args):
+    # check that a distribution instance pickles and unpickles
+    # pay special attention to the random_state property
+
+    # save the random_state (restore later)
+    rndm = distfn.random_state
+
+    # check unfrozen
+    distfn.random_state = 1234
+    distfn.rvs(*args, size=8)
+    s = pickle.dumps(distfn)
+    r0 = distfn.rvs(*args, size=8)
+
+    unpickled = pickle.loads(s)
+    r1 = unpickled.rvs(*args, size=8)
+    npt.assert_equal(r0, r1)
+
+    # also smoke test some methods
+    medians = [distfn.ppf(0.5, *args), unpickled.ppf(0.5, *args)]
+    npt.assert_equal(medians[0], medians[1])
+    npt.assert_equal(distfn.cdf(medians[0], *args),
+                     unpickled.cdf(medians[1], *args))
+
+    # check frozen pickling/unpickling with rvs
+    frozen_dist = distfn(*args)
+    pkl = pickle.dumps(frozen_dist)
+    unpickled = pickle.loads(pkl)
+
+    r0 = frozen_dist.rvs(size=8)
+    r1 = unpickled.rvs(size=8)
+    npt.assert_equal(r0, r1)
+
+    # check pickling/unpickling of .fit method
+    if hasattr(distfn, "fit"):
+        fit_function = distfn.fit
+        pickled_fit_function = pickle.dumps(fit_function)
+        unpickled_fit_function = pickle.loads(pickled_fit_function)
+        assert fit_function.__name__ == unpickled_fit_function.__name__ == "fit"
+
+    # restore the random_state
+    distfn.random_state = rndm
+
+
+def check_freezing(distfn, args):
+    # regression test for gh-11089: freezing a distribution fails
+    # if loc and/or scale are specified
+    if isinstance(distfn, stats.rv_continuous):
+        locscale = {'loc': 1, 'scale': 2}
+    else:
+        locscale = {'loc': 1}
+
+    rv = distfn(*args, **locscale)
+    assert rv.a == distfn(*args).a
+    assert rv.b == distfn(*args).b
+
+
+def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype):
+    np.random.seed(123)
+    sample = distfunc.rvs(*allargs)
+    assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname)
+    if not shape_only:
+        rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype)
+        np.random.seed(123)
+        expected = rvs(*allargs)
+        assert_allclose(sample, expected, rtol=1e-13)
+
+
+def check_deprecation_warning_gh5982_moment(distfn, arg, distname):
+    # See description of cases that need to be tested in the definition of
+    # scipy.stats.rv_generic.moment
+    shapes = [] if distfn.shapes is None else distfn.shapes.split(", ")
+    kwd_shapes = dict(zip(shapes, arg or []))  # dictionary of shape kwds
+    n = kwd_shapes.pop('n', None)
+
+    message1 = "moment() missing 1 required positional argument"
+    message2 = "_parse_args() missing 1 required positional argument: 'n'"
+    message3 = "moment() got multiple values for first argument"
+
+    if 'n' in shapes:
+        expected = distfn.mean(n=n, **kwd_shapes)
+
+        # A1
+        res = distfn.moment(1, n=n, **kwd_shapes)
+        assert_allclose(res, expected)
+
+        # A2
+        with assert_raises(TypeError, match=re.escape(message1)):
+            distfn.moment(n=n, **kwd_shapes)
+
+        # A3
+        # if `n` is not provided at all
+        with assert_raises(TypeError, match=re.escape(message2)):
+            distfn.moment(1, **kwd_shapes)
+        # if `n` is provided as a positional argument
+        res = distfn.moment(1, *arg)
+        assert_allclose(res, expected)
+
+        # A4
+        with assert_raises(TypeError, match=re.escape(message1)):
+            distfn.moment(**kwd_shapes)
+
+    else:
+        expected = distfn.mean(**kwd_shapes)
+
+        # B1
+        with assert_raises(TypeError, match=re.escape(message3)):
+            res = distfn.moment(1, n=1, **kwd_shapes)
+
+        # B2
+        with np.testing.assert_warns(DeprecationWarning):
+            res = distfn.moment(n=1, **kwd_shapes)
+            assert_allclose(res, expected)
+
+        # B3
+        res = distfn.moment(1, *arg)
+        assert_allclose(res, expected)
+
+        # B4
+        with assert_raises(TypeError, match=re.escape(message1)):
+            distfn.moment(**kwd_shapes)
+
+
+def check_deprecation_warning_gh5982_interval(distfn, arg, distname):
+    # See description of cases that need to be tested in the definition of
+    # scipy.stats.rv_generic.moment
+    shapes = [] if distfn.shapes is None else distfn.shapes.split(", ")
+    kwd_shapes = dict(zip(shapes, arg or []))  # dictionary of shape kwds
+    alpha = kwd_shapes.pop('alpha', None)
+
+    def my_interval(*args, **kwds):
+        return (distfn.ppf(0.25, *args, **kwds),
+                distfn.ppf(0.75, *args, **kwds))
+
+    message1 = "interval() missing 1 required positional argument"
+    message2 = "_parse_args() missing 1 required positional argument: 'alpha'"
+    message3 = "interval() got multiple values for first argument"
+
+    if 'alpha' in shapes:
+        expected = my_interval(alpha=alpha, **kwd_shapes)
+
+        # A1
+        res = distfn.interval(0.5, alpha=alpha, **kwd_shapes)
+        assert_allclose(res, expected)
+
+        # A2
+        with assert_raises(TypeError, match=re.escape(message1)):
+            distfn.interval(alpha=alpha, **kwd_shapes)
+
+        # A3
+        # if `alpha` is not provided at all
+        with assert_raises(TypeError, match=re.escape(message2)):
+            distfn.interval(0.5, **kwd_shapes)
+        # if `alpha` is provided as a positional argument
+        res = distfn.interval(0.5, *arg)
+        assert_allclose(res, expected)
+
+        # A4
+        with assert_raises(TypeError, match=re.escape(message1)):
+            distfn.interval(**kwd_shapes)
+
+    else:
+        expected = my_interval(**kwd_shapes)
+
+        # B1
+        with assert_raises(TypeError, match=re.escape(message3)):
+            res = distfn.interval(0.5, alpha=1, **kwd_shapes)
+
+        # B2
+        with np.testing.assert_warns(DeprecationWarning):
+            res = distfn.interval(alpha=0.5, **kwd_shapes)
+            assert_allclose(res, expected)
+
+        # B3
+        res = distfn.interval(0.5, *arg)
+        assert_allclose(res, expected)
+
+        # B4
+        with assert_raises(TypeError, match=re.escape(message1)):
+            distfn.interval(**kwd_shapes)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/fisher_exact_results_from_r.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/fisher_exact_results_from_r.py
new file mode 100644
index 00000000..b7dd8936
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/fisher_exact_results_from_r.py
@@ -0,0 +1,607 @@
+# DO NOT EDIT THIS FILE!
+# This file was generated by the R script
+#     generate_fisher_exact_results_from_r.R
+# The script was run with R version 3.6.2 (2019-12-12) at 2020-11-09 06:16:09
+
+
+from collections import namedtuple
+import numpy as np
+
+
+Inf = np.inf
+
+Parameters = namedtuple('Parameters',
+                        ['table', 'confidence_level', 'alternative'])
+RResults = namedtuple('RResults',
+                      ['pvalue', 'conditional_odds_ratio',
+                       'conditional_odds_ratio_ci'])
+data = [
+    (Parameters(table=[[100, 2], [1000, 5]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.1300759363430016,
+              conditional_odds_ratio=0.25055839934223,
+              conditional_odds_ratio_ci=(0.04035202926536294,
+                                         2.662846672960251))),
+    (Parameters(table=[[2, 7], [8, 2]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.02301413756522116,
+              conditional_odds_ratio=0.0858623513573622,
+              conditional_odds_ratio_ci=(0.004668988338943325,
+                                         0.895792956493601))),
+    (Parameters(table=[[5, 1], [10, 10]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.1973244147157191,
+              conditional_odds_ratio=4.725646047336587,
+              conditional_odds_ratio_ci=(0.4153910882532168,
+                                         259.2593661129417))),
+    (Parameters(table=[[5, 15], [20, 20]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.09580440012477633,
+              conditional_odds_ratio=0.3394396617440851,
+              conditional_odds_ratio_ci=(0.08056337526385809,
+                                         1.22704788545557))),
+    (Parameters(table=[[5, 16], [16, 25]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.2697004098849359,
+              conditional_odds_ratio=0.4937791394540491,
+              conditional_odds_ratio_ci=(0.1176691231650079,
+                                         1.787463657995973))),
+    (Parameters(table=[[10, 5], [10, 1]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.1973244147157192,
+              conditional_odds_ratio=0.2116112781158479,
+              conditional_odds_ratio_ci=(0.003857141267422399,
+                                         2.407369893767229))),
+    (Parameters(table=[[10, 5], [10, 0]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.06126482213438735,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         1.451643573543705))),
+    (Parameters(table=[[5, 0], [1, 4]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.04761904761904762,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(1.024822256141754,
+                                         Inf))),
+    (Parameters(table=[[0, 5], [1, 4]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         39.00054996869288))),
+    (Parameters(table=[[5, 1], [0, 4]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.04761904761904761,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(1.024822256141754,
+                                         Inf))),
+    (Parameters(table=[[0, 1], [3, 2]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         39.00054996869287))),
+    (Parameters(table=[[200, 7], [8, 300]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=2.005657880389071e-122,
+              conditional_odds_ratio=977.7866978606228,
+              conditional_odds_ratio_ci=(349.2595113327733,
+                                         3630.382605689872))),
+    (Parameters(table=[[28, 21], [6, 1957]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=5.728437460831947e-44,
+              conditional_odds_ratio=425.2403028434684,
+              conditional_odds_ratio_ci=(152.4166024390096,
+                                         1425.700792178893))),
+    (Parameters(table=[[190, 800], [200, 900]],
+                confidence_level=0.95,
+                alternative='two.sided'),
+     RResults(pvalue=0.574111858126088,
+              conditional_odds_ratio=1.068697577856801,
+              conditional_odds_ratio_ci=(0.8520462587912048,
+                                         1.340148950273938))),
+    (Parameters(table=[[100, 2], [1000, 5]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.1300759363430016,
+              conditional_odds_ratio=0.25055839934223,
+              conditional_odds_ratio_ci=(0.02502345007115455,
+                                         6.304424772117853))),
+    (Parameters(table=[[2, 7], [8, 2]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.02301413756522116,
+              conditional_odds_ratio=0.0858623513573622,
+              conditional_odds_ratio_ci=(0.001923034001462487,
+                                         1.53670836950172))),
+    (Parameters(table=[[5, 1], [10, 10]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.1973244147157191,
+              conditional_odds_ratio=4.725646047336587,
+              conditional_odds_ratio_ci=(0.2397970951413721,
+                                         1291.342011095509))),
+    (Parameters(table=[[5, 15], [20, 20]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.09580440012477633,
+              conditional_odds_ratio=0.3394396617440851,
+              conditional_odds_ratio_ci=(0.05127576113762925,
+                                         1.717176678806983))),
+    (Parameters(table=[[5, 16], [16, 25]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.2697004098849359,
+              conditional_odds_ratio=0.4937791394540491,
+              conditional_odds_ratio_ci=(0.07498546954483619,
+                                         2.506969905199901))),
+    (Parameters(table=[[10, 5], [10, 1]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.1973244147157192,
+              conditional_odds_ratio=0.2116112781158479,
+              conditional_odds_ratio_ci=(0.0007743881879531337,
+                                         4.170192301163831))),
+    (Parameters(table=[[10, 5], [10, 0]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.06126482213438735,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         2.642491011905582))),
+    (Parameters(table=[[5, 0], [1, 4]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.04761904761904762,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0.496935393325443,
+                                         Inf))),
+    (Parameters(table=[[0, 5], [1, 4]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         198.019801980198))),
+    (Parameters(table=[[5, 1], [0, 4]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.04761904761904761,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0.496935393325443,
+                                         Inf))),
+    (Parameters(table=[[0, 1], [3, 2]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         198.019801980198))),
+    (Parameters(table=[[200, 7], [8, 300]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=2.005657880389071e-122,
+              conditional_odds_ratio=977.7866978606228,
+              conditional_odds_ratio_ci=(270.0334165523604,
+                                         5461.333333326708))),
+    (Parameters(table=[[28, 21], [6, 1957]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=5.728437460831947e-44,
+              conditional_odds_ratio=425.2403028434684,
+              conditional_odds_ratio_ci=(116.7944750275836,
+                                         1931.995993191814))),
+    (Parameters(table=[[190, 800], [200, 900]],
+                confidence_level=0.99,
+                alternative='two.sided'),
+     RResults(pvalue=0.574111858126088,
+              conditional_odds_ratio=1.068697577856801,
+              conditional_odds_ratio_ci=(0.7949398282935892,
+                                         1.436229679394333))),
+    (Parameters(table=[[100, 2], [1000, 5]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.1300759363430016,
+              conditional_odds_ratio=0.25055839934223,
+              conditional_odds_ratio_ci=(0,
+                                         1.797867027270803))),
+    (Parameters(table=[[2, 7], [8, 2]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.0185217259520665,
+              conditional_odds_ratio=0.0858623513573622,
+              conditional_odds_ratio_ci=(0,
+                                         0.6785254803404526))),
+    (Parameters(table=[[5, 1], [10, 10]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.9782608695652173,
+              conditional_odds_ratio=4.725646047336587,
+              conditional_odds_ratio_ci=(0,
+                                         127.8497388102893))),
+    (Parameters(table=[[5, 15], [20, 20]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.05625775074399956,
+              conditional_odds_ratio=0.3394396617440851,
+              conditional_odds_ratio_ci=(0,
+                                         1.032332939718425))),
+    (Parameters(table=[[5, 16], [16, 25]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.1808979350599346,
+              conditional_odds_ratio=0.4937791394540491,
+              conditional_odds_ratio_ci=(0,
+                                         1.502407513296985))),
+    (Parameters(table=[[10, 5], [10, 1]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.1652173913043479,
+              conditional_odds_ratio=0.2116112781158479,
+              conditional_odds_ratio_ci=(0,
+                                         1.820421051562392))),
+    (Parameters(table=[[10, 5], [10, 0]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.0565217391304348,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         1.06224603077045))),
+    (Parameters(table=[[5, 0], [1, 4]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[0, 5], [1, 4]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.5,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         19.00192394479939))),
+    (Parameters(table=[[5, 1], [0, 4]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[0, 1], [3, 2]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.4999999999999999,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         19.00192394479939))),
+    (Parameters(table=[[200, 7], [8, 300]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=977.7866978606228,
+              conditional_odds_ratio_ci=(0,
+                                         3045.460216525746))),
+    (Parameters(table=[[28, 21], [6, 1957]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=425.2403028434684,
+              conditional_odds_ratio_ci=(0,
+                                         1186.440170942579))),
+    (Parameters(table=[[190, 800], [200, 900]],
+                confidence_level=0.95,
+                alternative='less'),
+     RResults(pvalue=0.7416227010368963,
+              conditional_odds_ratio=1.068697577856801,
+              conditional_odds_ratio_ci=(0,
+                                         1.293551891610822))),
+    (Parameters(table=[[100, 2], [1000, 5]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.1300759363430016,
+              conditional_odds_ratio=0.25055839934223,
+              conditional_odds_ratio_ci=(0,
+                                         4.375946050832565))),
+    (Parameters(table=[[2, 7], [8, 2]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.0185217259520665,
+              conditional_odds_ratio=0.0858623513573622,
+              conditional_odds_ratio_ci=(0,
+                                         1.235282118191202))),
+    (Parameters(table=[[5, 1], [10, 10]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.9782608695652173,
+              conditional_odds_ratio=4.725646047336587,
+              conditional_odds_ratio_ci=(0,
+                                         657.2063583945989))),
+    (Parameters(table=[[5, 15], [20, 20]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.05625775074399956,
+              conditional_odds_ratio=0.3394396617440851,
+              conditional_odds_ratio_ci=(0,
+                                         1.498867660683128))),
+    (Parameters(table=[[5, 16], [16, 25]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.1808979350599346,
+              conditional_odds_ratio=0.4937791394540491,
+              conditional_odds_ratio_ci=(0,
+                                         2.186159386716762))),
+    (Parameters(table=[[10, 5], [10, 1]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.1652173913043479,
+              conditional_odds_ratio=0.2116112781158479,
+              conditional_odds_ratio_ci=(0,
+                                         3.335351451901569))),
+    (Parameters(table=[[10, 5], [10, 0]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.0565217391304348,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         2.075407697450433))),
+    (Parameters(table=[[5, 0], [1, 4]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[0, 5], [1, 4]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.5,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         99.00009507969122))),
+    (Parameters(table=[[5, 1], [0, 4]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[0, 1], [3, 2]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.4999999999999999,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         99.00009507969123))),
+    (Parameters(table=[[200, 7], [8, 300]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=977.7866978606228,
+              conditional_odds_ratio_ci=(0,
+                                         4503.078257659934))),
+    (Parameters(table=[[28, 21], [6, 1957]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=425.2403028434684,
+              conditional_odds_ratio_ci=(0,
+                                         1811.766127544222))),
+    (Parameters(table=[[190, 800], [200, 900]],
+                confidence_level=0.99,
+                alternative='less'),
+     RResults(pvalue=0.7416227010368963,
+              conditional_odds_ratio=1.068697577856801,
+              conditional_odds_ratio_ci=(0,
+                                         1.396522811516685))),
+    (Parameters(table=[[100, 2], [1000, 5]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.979790445314723,
+              conditional_odds_ratio=0.25055839934223,
+              conditional_odds_ratio_ci=(0.05119649909830196,
+                                         Inf))),
+    (Parameters(table=[[2, 7], [8, 2]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.9990149169715733,
+              conditional_odds_ratio=0.0858623513573622,
+              conditional_odds_ratio_ci=(0.007163749169069961,
+                                         Inf))),
+    (Parameters(table=[[5, 1], [10, 10]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.1652173913043478,
+              conditional_odds_ratio=4.725646047336587,
+              conditional_odds_ratio_ci=(0.5493234651081089,
+                                         Inf))),
+    (Parameters(table=[[5, 15], [20, 20]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.9849086665340765,
+              conditional_odds_ratio=0.3394396617440851,
+              conditional_odds_ratio_ci=(0.1003538933958604,
+                                         Inf))),
+    (Parameters(table=[[5, 16], [16, 25]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.9330176609214881,
+              conditional_odds_ratio=0.4937791394540491,
+              conditional_odds_ratio_ci=(0.146507416280863,
+                                         Inf))),
+    (Parameters(table=[[10, 5], [10, 1]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.9782608695652174,
+              conditional_odds_ratio=0.2116112781158479,
+              conditional_odds_ratio_ci=(0.007821681994077808,
+                                         Inf))),
+    (Parameters(table=[[10, 5], [10, 0]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[5, 0], [1, 4]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.02380952380952382,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(1.487678929918272,
+                                         Inf))),
+    (Parameters(table=[[0, 5], [1, 4]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[5, 1], [0, 4]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.0238095238095238,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(1.487678929918272,
+                                         Inf))),
+    (Parameters(table=[[0, 1], [3, 2]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[200, 7], [8, 300]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=2.005657880388915e-122,
+              conditional_odds_ratio=977.7866978606228,
+              conditional_odds_ratio_ci=(397.784359748113,
+                                         Inf))),
+    (Parameters(table=[[28, 21], [6, 1957]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=5.728437460831983e-44,
+              conditional_odds_ratio=425.2403028434684,
+              conditional_odds_ratio_ci=(174.7148056880929,
+                                         Inf))),
+    (Parameters(table=[[190, 800], [200, 900]],
+                confidence_level=0.95,
+                alternative='greater'),
+     RResults(pvalue=0.2959825901308897,
+              conditional_odds_ratio=1.068697577856801,
+              conditional_odds_ratio_ci=(0.8828406663967776,
+                                         Inf))),
+    (Parameters(table=[[100, 2], [1000, 5]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.979790445314723,
+              conditional_odds_ratio=0.25055839934223,
+              conditional_odds_ratio_ci=(0.03045407081240429,
+                                         Inf))),
+    (Parameters(table=[[2, 7], [8, 2]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.9990149169715733,
+              conditional_odds_ratio=0.0858623513573622,
+              conditional_odds_ratio_ci=(0.002768053063547901,
+                                         Inf))),
+    (Parameters(table=[[5, 1], [10, 10]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.1652173913043478,
+              conditional_odds_ratio=4.725646047336587,
+              conditional_odds_ratio_ci=(0.2998184792279909,
+                                         Inf))),
+    (Parameters(table=[[5, 15], [20, 20]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.9849086665340765,
+              conditional_odds_ratio=0.3394396617440851,
+              conditional_odds_ratio_ci=(0.06180414342643172,
+                                         Inf))),
+    (Parameters(table=[[5, 16], [16, 25]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.9330176609214881,
+              conditional_odds_ratio=0.4937791394540491,
+              conditional_odds_ratio_ci=(0.09037094010066403,
+                                         Inf))),
+    (Parameters(table=[[10, 5], [10, 1]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.9782608695652174,
+              conditional_odds_ratio=0.2116112781158479,
+              conditional_odds_ratio_ci=(0.001521592095430679,
+                                         Inf))),
+    (Parameters(table=[[10, 5], [10, 0]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[5, 0], [1, 4]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.02380952380952382,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0.6661157890359722,
+                                         Inf))),
+    (Parameters(table=[[0, 5], [1, 4]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[5, 1], [0, 4]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.0238095238095238,
+              conditional_odds_ratio=Inf,
+              conditional_odds_ratio_ci=(0.6661157890359725,
+                                         Inf))),
+    (Parameters(table=[[0, 1], [3, 2]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=1,
+              conditional_odds_ratio=0,
+              conditional_odds_ratio_ci=(0,
+                                         Inf))),
+    (Parameters(table=[[200, 7], [8, 300]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=2.005657880388915e-122,
+              conditional_odds_ratio=977.7866978606228,
+              conditional_odds_ratio_ci=(297.9619252357688,
+                                         Inf))),
+    (Parameters(table=[[28, 21], [6, 1957]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=5.728437460831983e-44,
+              conditional_odds_ratio=425.2403028434684,
+              conditional_odds_ratio_ci=(130.3213490295859,
+                                         Inf))),
+    (Parameters(table=[[190, 800], [200, 900]],
+                confidence_level=0.99,
+                alternative='greater'),
+     RResults(pvalue=0.2959825901308897,
+              conditional_odds_ratio=1.068697577856801,
+              conditional_odds_ratio_ci=(0.8176272148267533,
+                                         Inf))),
+]
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy
new file mode 100644
index 00000000..ade03afd
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy
new file mode 100644
index 00000000..6768c7d2
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy
new file mode 100644
index 00000000..8680bb00
Binary files /dev/null and b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy differ
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/AtmWtAg.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/AtmWtAg.dat
new file mode 100644
index 00000000..30537565
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/AtmWtAg.dat
@@ -0,0 +1,108 @@
+NIST/ITL StRD 
+Dataset Name:   AtmWtAg   (AtmWtAg.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 108) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Powell, L.J., Murphy, T.J. and Gramlich, J.W. (1982).
+                "The Absolute Isotopic Abundance & Atomic Weight
+                of a Reference Sample of Silver".
+                NBS Journal of Research, 87, pp. 9-19.
+
+
+Data:           1 Factor
+                2 Treatments
+                24 Replicates/Cell
+                48 Observations
+                7 Constant Leading Digits
+                Average Level of Difficulty
+                Observed Data
+
+
+Model:          3 Parameters (mu, tau_1, tau_2)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares             F Statistic
+
+
+Between Instrument  1 3.63834187500000E-09 3.63834187500000E-09 1.59467335677930E+01
+Within Instrument  46 1.04951729166667E-08 2.28155932971014E-10
+
+                   Certified R-Squared 2.57426544538321E-01
+
+                   Certified Residual
+                   Standard Deviation  1.51048314446410E-05
+
+
+
+
+
+
+
+
+
+
+
+Data:  Instrument           AgWt
+           1            107.8681568
+           1            107.8681465
+           1            107.8681572
+           1            107.8681785
+           1            107.8681446
+           1            107.8681903
+           1            107.8681526
+           1            107.8681494
+           1            107.8681616
+           1            107.8681587
+           1            107.8681519
+           1            107.8681486
+           1            107.8681419
+           1            107.8681569
+           1            107.8681508
+           1            107.8681672
+           1            107.8681385
+           1            107.8681518
+           1            107.8681662
+           1            107.8681424
+           1            107.8681360
+           1            107.8681333
+           1            107.8681610
+           1            107.8681477
+           2            107.8681079
+           2            107.8681344
+           2            107.8681513
+           2            107.8681197
+           2            107.8681604
+           2            107.8681385
+           2            107.8681642
+           2            107.8681365
+           2            107.8681151
+           2            107.8681082
+           2            107.8681517
+           2            107.8681448
+           2            107.8681198
+           2            107.8681482
+           2            107.8681334
+           2            107.8681609
+           2            107.8681101
+           2            107.8681512
+           2            107.8681469
+           2            107.8681360
+           2            107.8681254
+           2            107.8681261
+           2            107.8681450
+           2            107.8681368
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SiRstv.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SiRstv.dat
new file mode 100644
index 00000000..18ea8971
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SiRstv.dat
@@ -0,0 +1,85 @@
+NIST/ITL StRD 
+Dataset Name:   SiRstv     (SiRstv.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 85) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Ehrstein, James and Croarkin, M. Carroll.
+                Unpublished NIST dataset.
+
+
+Data:           1 Factor
+                5 Treatments
+                5  Replicates/Cell
+                25 Observations
+                3 Constant Leading Digits
+                Lower Level of Difficulty
+                Observed Data
+
+
+Model:          6 Parameters (mu,tau_1, ... , tau_5)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares             F Statistic
+
+Between Instrument  4 5.11462616000000E-02 1.27865654000000E-02 1.18046237440255E+00
+Within Instrument  20 2.16636560000000E-01 1.08318280000000E-02
+
+                   Certified R-Squared 1.90999039051129E-01
+
+                   Certified Residual
+                   Standard Deviation  1.04076068334656E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Instrument   Resistance
+           1         196.3052
+           1         196.1240
+           1         196.1890
+           1         196.2569
+           1         196.3403
+           2         196.3042
+           2         196.3825
+           2         196.1669
+           2         196.3257
+           2         196.0422
+           3         196.1303
+           3         196.2005
+           3         196.2889
+           3         196.0343
+           3         196.1811
+           4         196.2795
+           4         196.1748
+           4         196.1494
+           4         196.1485
+           4         195.9885
+           5         196.2119
+           5         196.1051
+           5         196.1850
+           5         196.0052
+           5         196.2090
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs01.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs01.dat
new file mode 100644
index 00000000..945b24bf
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs01.dat
@@ -0,0 +1,249 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs01   (SmLs01.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 249) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                21 Replicates/Cell
+                189 Observations
+                1 Constant Leading Digit
+                Lower Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares            F Statistic
+
+Between Treatment   8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01
+Within Treatment  180 1.80000000000000E+00 1.00000000000000E-02
+
+                  Certified R-Squared 4.82758620689655E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1         1.4
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           2         1.3
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           3         1.5
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           4         1.3
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           5         1.5
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           6         1.3
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           7         1.5
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           8         1.3
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           9         1.5
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs02.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs02.dat
new file mode 100644
index 00000000..ee76633a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs02.dat
@@ -0,0 +1,1869 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs02   (SmLs02.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 1869) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                201 Replicates/Cell
+                1809 Observations
+                1 Constant Leading Digit
+                Lower Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares             F Statistic
+
+Between Treatment    8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02
+Within Treatment  1800 1.80000000000000E+01 1.00000000000000E-02
+
+                  Certified R-Squared 4.71830985915493E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1         1.4
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           2         1.3
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           3         1.5
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           4         1.3
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           5         1.5
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           6         1.3
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           7         1.5
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           8         1.3
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           9         1.5
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs03.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs03.dat
new file mode 100644
index 00000000..55dfa231
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs03.dat
@@ -0,0 +1,18069 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs03   (SmLs03.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 18069) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                2001 Replicates/Cell
+                18009 Observations
+                1 Constant Leading Digit
+                Lower Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares             F Statistic
+
+Between Treatment     8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03
+Within Treatment  18000 1.80000000000000E+02 1.00000000000000E-02
+
+                  Certified R-Squared 4.70712773465067E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1         1.4
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           1         1.3
+           1         1.5
+           2         1.3
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           2         1.2
+           2         1.4
+           3         1.5
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           3         1.4
+           3         1.6
+           4         1.3
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           4         1.2
+           4         1.4
+           5         1.5
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           5         1.4
+           5         1.6
+           6         1.3
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           6         1.2
+           6         1.4
+           7         1.5
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           7         1.4
+           7         1.6
+           8         1.3
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           8         1.2
+           8         1.4
+           9         1.5
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
+           9         1.4
+           9         1.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs04.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs04.dat
new file mode 100644
index 00000000..6a2a9fc9
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs04.dat
@@ -0,0 +1,249 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs04   (SmLs04.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 249) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                21 Replicates/Cell
+                189 Observations
+                7 Constant Leading Digits
+                Average Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares             F Statistic
+
+Between Treatment   8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01
+Within Treatment  180 1.80000000000000E+00 1.00000000000000E-02
+
+                  Certified R-Squared 4.82758620689655E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1       1000000.4
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           2       1000000.3
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           3       1000000.5
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           4       1000000.3
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           5       1000000.5
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           6       1000000.3
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           7       1000000.5
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           8       1000000.3
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           9       1000000.5
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs05.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs05.dat
new file mode 100644
index 00000000..fe11c40b
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs05.dat
@@ -0,0 +1,1869 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs05   (SmLs05.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 1869) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                201 Replicates/Cell
+                1809 Observations
+                7 Constant Leading Digits
+                Average Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares            F Statistic
+
+Between Treatment    8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02
+Within Treatment  1800 1.80000000000000E+01 1.00000000000000E-02
+
+                  Certified R-Squared 4.71830985915493E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1       1000000.4
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           2       1000000.3
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           3       1000000.5
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           4       1000000.3
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           5       1000000.5
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           6       1000000.3
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           7       1000000.5
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           8       1000000.3
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           9       1000000.5
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs06.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs06.dat
new file mode 100644
index 00000000..602e4fbd
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs06.dat
@@ -0,0 +1,18069 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs06   (SmLs06.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 18069) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                2001 Replicates/Cell
+                18009 Observations
+                7 Constant Leading Digits
+                Average Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares             F Statistic
+
+Between Treatment     8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03
+Within Treatment  18000 1.80000000000000E+02 1.00000000000000E-02
+
+                  Certified R-Squared 4.70712773465067E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1       1000000.4
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           1       1000000.3
+           1       1000000.5
+           2       1000000.3
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           2       1000000.2
+           2       1000000.4
+           3       1000000.5
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           3       1000000.4
+           3       1000000.6
+           4       1000000.3
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           4       1000000.2
+           4       1000000.4
+           5       1000000.5
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           5       1000000.4
+           5       1000000.6
+           6       1000000.3
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           6       1000000.2
+           6       1000000.4
+           7       1000000.5
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           7       1000000.4
+           7       1000000.6
+           8       1000000.3
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           8       1000000.2
+           8       1000000.4
+           9       1000000.5
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
+           9       1000000.4
+           9       1000000.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs07.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs07.dat
new file mode 100644
index 00000000..deeac955
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs07.dat
@@ -0,0 +1,249 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs07   (SmLs07.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 249) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                21 Replicates/Cell
+                189 Observations
+                13 Constant Leading Digits
+                Higher Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares            F Statistic
+
+Between Treatment   8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01
+Within Treatment  180 1.80000000000000E+00 1.00000000000000E-02
+
+                  Certified R-Squared 4.82758620689655E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1    1000000000000.4
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           2    1000000000000.3
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           3    1000000000000.5
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           4    1000000000000.3
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           5    1000000000000.5
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           6    1000000000000.3
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           7    1000000000000.5
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           8    1000000000000.3
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           9    1000000000000.5
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs08.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs08.dat
new file mode 100644
index 00000000..c5ee643f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs08.dat
@@ -0,0 +1,1869 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs08   (SmLs08.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 1869) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                201 Replicates/Cell
+                1809 Observations
+                13 Constant Leading Digits
+                Higher Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares              F Statistic
+
+Between Treatment    8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02
+Within Treatment  1800 1.80000000000000E+01 1.00000000000000E-02
+
+                  Certified R-Squared 4.71830985915493E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1    1000000000000.4
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           2    1000000000000.3
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           3    1000000000000.5
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           4    1000000000000.3
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           5    1000000000000.5
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           6    1000000000000.3
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           7    1000000000000.5
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           8    1000000000000.3
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           9    1000000000000.5
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs09.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs09.dat
new file mode 100644
index 00000000..887905e3
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_anova/SmLs09.dat
@@ -0,0 +1,18069 @@
+NIST/ITL StRD 
+Dataset Name:   SmLs09   (SmLs09.dat)
+
+
+File Format:    ASCII
+                Certified Values   (lines 41 to 47)
+                Data               (lines 61 to 18069) 
+
+
+Procedure:      Analysis of Variance
+
+
+Reference:      Simon, Stephen D. and Lesage, James P. (1989).
+                "Assessing the Accuracy of ANOVA Calculations in
+                Statistical Software".
+                Computational Statistics & Data Analysis, 8, pp. 325-332.
+
+
+Data:           1 Factor
+                9 Treatments
+                2001 Replicates/Cell
+                18009 Observations
+                13 Constant Leading Digits
+                Higher Level of Difficulty
+                Generated Data
+
+
+Model:          10 Parameters (mu,tau_1, ... , tau_9)
+                y_{ij} = mu + tau_i + epsilon_{ij}
+
+
+
+
+
+
+Certified Values:
+
+Source of                  Sums of               Mean               
+Variation          df      Squares              Squares              F Statistic
+
+Between Treatment     8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03
+Within Treatment  18000 1.80000000000000E+02 1.00000000000000E-02
+
+                  Certified R-Squared 4.70712773465067E-01
+
+                  Certified Residual
+                  Standard Deviation  1.00000000000000E-01
+
+
+
+
+
+
+
+
+
+
+
+
+Data:  Treatment   Response
+           1    1000000000000.4
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           1    1000000000000.3
+           1    1000000000000.5
+           2    1000000000000.3
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           2    1000000000000.2
+           2    1000000000000.4
+           3    1000000000000.5
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           3    1000000000000.4
+           3    1000000000000.6
+           4    1000000000000.3
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           4    1000000000000.2
+           4    1000000000000.4
+           5    1000000000000.5
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           5    1000000000000.4
+           5    1000000000000.6
+           6    1000000000000.3
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           6    1000000000000.2
+           6    1000000000000.4
+           7    1000000000000.5
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           7    1000000000000.4
+           7    1000000000000.6
+           8    1000000000000.3
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           8    1000000000000.2
+           8    1000000000000.4
+           9    1000000000000.5
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
+           9    1000000000000.4
+           9    1000000000000.6
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_linregress/Norris.dat b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_linregress/Norris.dat
new file mode 100644
index 00000000..4bf8ed91
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/nist_linregress/Norris.dat
@@ -0,0 +1,97 @@
+NIST/ITL StRD
+Dataset Name:  Norris (Norris.dat)
+
+File Format:   ASCII
+               Certified Values  (lines 31 to 46)
+               Data              (lines 61 to 96)
+
+Procedure:     Linear Least Squares Regression
+
+Reference:     Norris, J., NIST.  
+               Calibration of Ozone Monitors.
+
+Data:          1 Response Variable (y)
+               1 Predictor Variable (x)
+               36 Observations
+               Lower Level of Difficulty
+               Observed Data
+
+Model:         Linear Class
+               2 Parameters (B0,B1)
+
+               y = B0 + B1*x + e
+
+
+
+               Certified Regression Statistics
+
+                                          Standard Deviation
+     Parameter          Estimate             of Estimate
+
+        B0        -0.262323073774029     0.232818234301152
+        B1         1.00211681802045      0.429796848199937E-03
+
+     Residual
+     Standard Deviation   0.884796396144373
+
+     R-Squared            0.999993745883712
+
+
+               Certified Analysis of Variance Table
+
+Source of Degrees of    Sums of             Mean  
+Variation  Freedom      Squares            Squares           F Statistic
+              
+Regression    1     4255954.13232369   4255954.13232369   5436385.54079785
+Residual     34     26.6173985294224   0.782864662630069
+
+                 
+                                          
+                                          
+                                                           
+
+                            
+                                   
+                                                       
+
+
+
+
+Data:       y          x
+           0.1        0.2
+         338.8      337.4
+         118.1      118.2
+         888.0      884.6
+           9.2       10.1
+         228.1      226.5
+         668.5      666.3
+         998.5      996.3
+         449.1      448.6
+         778.9      777.0
+         559.2      558.2
+           0.3        0.4
+           0.1        0.6
+         778.1      775.5
+         668.8      666.9
+         339.3      338.0
+         448.9      447.5
+          10.8       11.6
+         557.7      556.0
+         228.3      228.1
+         998.0      995.8
+         888.8      887.6
+         119.6      120.2
+           0.3        0.3
+           0.6        0.3
+         557.6      556.8
+         339.3      339.1
+         888.0      887.2
+         998.5      999.0
+         778.9      779.0
+          10.2       11.1
+         117.6      118.3
+         228.9      229.2
+         668.4      669.1
+         449.2      448.9
+           0.2        0.5
+                                   
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/studentized_range_mpmath_ref.json b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/studentized_range_mpmath_ref.json
new file mode 100644
index 00000000..bb971286
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/data/studentized_range_mpmath_ref.json
@@ -0,0 +1,1499 @@
+{
+  "COMMENT": "!!!!!! THIS FILE WAS AUTOGENERATED BY RUNNING `python studentized_range_mpmath_ref.py` !!!!!!",
+  "moment_data": [
+    {
+      "src_case": {
+        "m": 0,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-09,
+        "expected_rtol": 1e-09
+      },
+      "mp_result": 1.0
+    },
+    {
+      "src_case": {
+        "m": 1,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-09,
+        "expected_rtol": 1e-09
+      },
+      "mp_result": 1.8342745127927962
+    },
+    {
+      "src_case": {
+        "m": 2,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-09,
+        "expected_rtol": 1e-09
+      },
+      "mp_result": 4.567483357831711
+    },
+    {
+      "src_case": {
+        "m": 3,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-09,
+        "expected_rtol": 1e-09
+      },
+      "mp_result": 14.412156886227011
+    },
+    {
+      "src_case": {
+        "m": 4,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-09,
+        "expected_rtol": 1e-09
+      },
+      "mp_result": 56.012250366720444
+    }
+  ],
+  "cdf_data": [
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0027502772229359594
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.8544145010066327e-12
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0027520560662338336
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.39089126131273e-13
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.002752437649536182
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.0862189999210748e-12
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.002752755744313648
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0027527430186246545
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.002752666667812431
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.505275157135514e-24
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 3.8546698113384126e-25
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.7362668562706085e-11
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 5.571947730052616e-26
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.032619249089036e-27
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.539763646681808e-22
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.618313512511099e-12
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 4.919231733354114e-28
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.159348906295542e-13
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.22331624289542043
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.2395624637676257
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.23510918942128056
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.23786536230099864
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.000651656693149116
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.2401356460422021
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.003971273224673166
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0008732969319364606
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.24023154593376422
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.001300816146573152
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.5682573722040226e-07
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0005841098057517027
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.2267674885784e-05
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0005731712496327297
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.746798012658064e-06
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 5.807700350854172e-07
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.147637957472628e-08
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 8.306675539750552e-08
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.8711786295203324
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9818862781476212
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9566506502400175
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9849546621386962
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9731488893573804
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.8450530667988544
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.6164875232404174
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9845292772767739
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.8079691517949077
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.7573606942645745
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.8587525248147736
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.8611036193280976
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.46523135355387657
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.6318042819232383
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.5574947140294286
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.5970517763141937
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.6493671527818267
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.6466699776044968
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9881335633712994
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999999861266821
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.999908236635449
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999978467928313
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999999996690216
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999999993640496
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9570401457077894
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999997977351971
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9991738325963548
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999730883609333
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999999905199205
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999999950566264
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9312318042339768
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999991743904675
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9977643922032399
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999054426012515
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999999602948055
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.9999999792458618
+    }
+  ],
+  "pdf_data": [
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.05487847613526332
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.564099684606509e-10
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.05494947290360002
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 8.442593793786411e-11
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.054964710604860405
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.764441961563576e-11
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.05497690690332341
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.05497385731702228
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 4.758021225803992e-22
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.054977415200879516
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.8004731453548083e-19
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.5564176176604816e-09
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.342768070688728e-24
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.454372265306114e-10
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 3.9138464398429654e-25
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 5.266341131767418e-23
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 8.234556126446594e-11
+    },
+    {
+      "src_case": {
+        "q": 0.1,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.32929780487562e-26
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.36083736990527154
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.4137959132282269
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.4080239698771056
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.398772020275752
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.4160873922094346
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.4157583991350054
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.005210720148451848
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.02575314059867804
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.009782573637596617
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.006818708302379005
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0047089182958790715
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.004627085294166373
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0010886280311369462
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.630674470916427e-06
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 4.121713278199428e-05
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 9.319506007252685e-06
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.5585754418789747e-06
+    },
+    {
+      "src_case": {
+        "q": 1,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.4190335899441991e-06
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.07185383302009114
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.050268901219386576
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.03321056847176124
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.04044172384981084
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.030571365659999617
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.030120779149073032
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.17501664247670937
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.22374394725370736
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.23246597521020534
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.23239043677504484
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.23057775622748988
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.23012666145240815
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.2073676639537027
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.3245990542431859
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0033733228559870584
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 7.728665739003835e-05
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.38244500549096866
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.45434978340834464
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.43334135870667473
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.159522630228393e-09
+    },
+    {
+      "src_case": {
+        "q": 4,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.45807877248528855
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 3.5303467191175695e-08
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 3.121281850105421e-06
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 3,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.1901591191700855e-09
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0006784051704217357
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.011845582636101885
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 3.844183552674918e-05
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 3.215093171597309e-08
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 5.125792577534542e-07
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 10,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.7759015355532446e-08
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 10,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.0017957646258393628
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 3,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.018534407764819284
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 20,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 0.00013316083413164858
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 50,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 2.082489228991225e-06
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 100,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 1.3444226792257012e-07
+    },
+    {
+      "src_case": {
+        "q": 10,
+        "k": 20,
+        "v": 120,
+        "expected_atol": 1e-11,
+        "expected_rtol": 1e-11
+      },
+      "mp_result": 7.446912854228521e-08
+    }
+  ]
+}
\ No newline at end of file
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_axis_nan_policy.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_axis_nan_policy.py
new file mode 100644
index 00000000..017bd1a1
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_axis_nan_policy.py
@@ -0,0 +1,1044 @@
+# Many scipy.stats functions support `axis` and `nan_policy` parameters.
+# When the two are combined, it can be tricky to get all the behavior just
+# right. This file contains a suite of common tests for scipy.stats functions
+# that support `axis` and `nan_policy` and additional tests for some associated
+# functions in stats._util.
+
+from itertools import product, combinations_with_replacement, permutations
+import re
+import pickle
+import pytest
+
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, suppress_warnings
+from scipy import stats
+from scipy.stats._axis_nan_policy import _masked_arrays_2_sentinel_arrays
+
+
+def unpack_ttest_result(res):
+    low, high = res.confidence_interval()
+    return (res.statistic, res.pvalue, res.df, res._standard_error,
+            res._estimate, low, high)
+
+
+axis_nan_policy_cases = [
+    # function, args, kwds, number of samples, number of outputs,
+    # ... paired, unpacker function
+    # args, kwds typically aren't needed; just showing that they work
+    (stats.kruskal, tuple(), dict(), 3, 2, False, None),  # 4 samples is slow
+    (stats.ranksums, ('less',), dict(), 2, 2, False, None),
+    (stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, 2, False, None),
+    (stats.wilcoxon, ('pratt',), {'mode': 'auto'}, 2, 2, True,
+     lambda res: (res.statistic, res.pvalue)),
+    (stats.wilcoxon, tuple(), dict(), 1, 2, True,
+     lambda res: (res.statistic, res.pvalue)),
+    (stats.wilcoxon, tuple(), {'mode': 'approx'}, 1, 3, True,
+     lambda res: (res.statistic, res.pvalue, res.zstatistic)),
+    (stats.gmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.hmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.pmean, (1.42,), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.kurtosis, tuple(), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.skew, tuple(), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.kstat, tuple(), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.kstatvar, tuple(), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.moment, tuple(), dict(), 1, 1, False, lambda x: (x,)),
+    (stats.moment, tuple(), dict(moment=[1, 2]), 1, 2, False, None),
+    (stats.jarque_bera, tuple(), dict(), 1, 2, False, None),
+    (stats.ttest_1samp, (np.array([0]),), dict(), 1, 7, False,
+     unpack_ttest_result),
+    (stats.ttest_rel, tuple(), dict(), 2, 7, True, unpack_ttest_result)
+]
+
+# If the message is one of those expected, put nans in
+# appropriate places of `statistics` and `pvalues`
+too_small_messages = {"The input contains nan",  # for nan_policy="raise"
+                      "Degrees of freedom <= 0 for slice",
+                      "x and y should have at least 5 elements",
+                      "Data must be at least length 3",
+                      "The sample must contain at least two",
+                      "x and y must contain at least two",
+                      "division by zero",
+                      "Mean of empty slice",
+                      "Data passed to ks_2samp must not be empty",
+                      "Not enough test observations",
+                      "Not enough other observations",
+                      "At least one observation is required",
+                      "zero-size array to reduction operation maximum",
+                      "`x` and `y` must be of nonzero size.",
+                      "The exact distribution of the Wilcoxon test",
+                      "Data input must not be empty"}
+
+# If the message is one of these, results of the function may be inaccurate,
+# but NaNs are not to be placed
+inaccuracy_messages = {"Precision loss occurred in moment calculation",
+                       "Sample size too small for normal approximation."}
+
+
+def _mixed_data_generator(n_samples, n_repetitions, axis, rng,
+                          paired=False):
+    # generate random samples to check the response of hypothesis tests to
+    # samples with different (but broadcastable) shapes and various
+    # nan patterns (e.g. all nans, some nans, no nans) along axis-slices
+
+    data = []
+    for i in range(n_samples):
+        n_patterns = 6  # number of distinct nan patterns
+        n_obs = 20 if paired else 20 + i  # observations per axis-slice
+        x = np.ones((n_repetitions, n_patterns, n_obs)) * np.nan
+
+        for j in range(n_repetitions):
+            samples = x[j, :, :]
+
+            # case 0: axis-slice with all nans (0 reals)
+            # cases 1-3: axis-slice with 1-3 reals (the rest nans)
+            # case 4: axis-slice with mostly (all but two) reals
+            # case 5: axis slice with all reals
+            for k, n_reals in enumerate([0, 1, 2, 3, n_obs-2, n_obs]):
+                # for cases 1-3, need paired nansw  to be in the same place
+                indices = rng.permutation(n_obs)[:n_reals]
+                samples[k, indices] = rng.random(size=n_reals)
+
+            # permute the axis-slices just to show that order doesn't matter
+            samples[:] = rng.permutation(samples, axis=0)
+
+        # For multi-sample tests, we want to test broadcasting and check
+        # that nan policy works correctly for each nan pattern for each input.
+        # This takes care of both simultaneosly.
+        new_shape = [n_repetitions] + [1]*n_samples + [n_obs]
+        new_shape[1 + i] = 6
+        x = x.reshape(new_shape)
+
+        x = np.moveaxis(x, -1, axis)
+        data.append(x)
+    return data
+
+
+def _homogeneous_data_generator(n_samples, n_repetitions, axis, rng,
+                                paired=False, all_nans=True):
+    # generate random samples to check the response of hypothesis tests to
+    # samples with different (but broadcastable) shapes and homogeneous
+    # data (all nans or all finite)
+    data = []
+    for i in range(n_samples):
+        n_obs = 20 if paired else 20 + i  # observations per axis-slice
+        shape = [n_repetitions] + [1]*n_samples + [n_obs]
+        shape[1 + i] = 2
+        x = np.ones(shape) * np.nan if all_nans else rng.random(shape)
+        x = np.moveaxis(x, -1, axis)
+        data.append(x)
+    return data
+
+
+def nan_policy_1d(hypotest, data1d, unpacker, *args, n_outputs=2,
+                  nan_policy='raise', paired=False, _no_deco=True, **kwds):
+    # Reference implementation for how `nan_policy` should work for 1d samples
+
+    if nan_policy == 'raise':
+        for sample in data1d:
+            if np.any(np.isnan(sample)):
+                raise ValueError("The input contains nan values")
+
+    elif nan_policy == 'propagate':
+        # For all hypothesis tests tested, returning nans is the right thing.
+        # But many hypothesis tests don't propagate correctly (e.g. they treat
+        # np.nan the same as np.inf, which doesn't make sense when ranks are
+        # involved) so override that behavior here.
+        for sample in data1d:
+            if np.any(np.isnan(sample)):
+                return np.full(n_outputs, np.nan)
+
+    elif nan_policy == 'omit':
+        # manually omit nans (or pairs in which at least one element is nan)
+        if not paired:
+            data1d = [sample[~np.isnan(sample)] for sample in data1d]
+        else:
+            nan_mask = np.isnan(data1d[0])
+            for sample in data1d[1:]:
+                nan_mask = np.logical_or(nan_mask, np.isnan(sample))
+            data1d = [sample[~nan_mask] for sample in data1d]
+
+    return unpacker(hypotest(*data1d, *args, _no_deco=_no_deco, **kwds))
+
+
+@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
+                          "paired", "unpacker"), axis_nan_policy_cases)
+@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
+@pytest.mark.parametrize(("axis"), (1,))
+@pytest.mark.parametrize(("data_generator"), ("mixed",))
+def test_axis_nan_policy_fast(hypotest, args, kwds, n_samples, n_outputs,
+                              paired, unpacker, nan_policy, axis,
+                              data_generator):
+    _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
+                          unpacker, nan_policy, axis, data_generator)
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
+                          "paired", "unpacker"), axis_nan_policy_cases)
+@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
+@pytest.mark.parametrize(("axis"), range(-3, 3))
+@pytest.mark.parametrize(("data_generator"),
+                         ("all_nans", "all_finite", "mixed"))
+def test_axis_nan_policy_full(hypotest, args, kwds, n_samples, n_outputs,
+                              paired, unpacker, nan_policy, axis,
+                              data_generator):
+    _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
+                          unpacker, nan_policy, axis, data_generator)
+
+
+def _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
+                          unpacker, nan_policy, axis, data_generator):
+    # Tests the 1D and vectorized behavior of hypothesis tests against a
+    # reference implementation (nan_policy_1d with np.ndenumerate)
+
+    # Some hypothesis tests return a non-iterable that needs an `unpacker` to
+    # extract the statistic and p-value. For those that don't:
+    if not unpacker:
+        def unpacker(res):
+            return res
+
+    rng = np.random.default_rng(0)
+
+    # Generate multi-dimensional test data with all important combinations
+    # of patterns of nans along `axis`
+    n_repetitions = 3  # number of repetitions of each pattern
+    data_gen_kwds = {'n_samples': n_samples, 'n_repetitions': n_repetitions,
+                     'axis': axis, 'rng': rng, 'paired': paired}
+    if data_generator == 'mixed':
+        inherent_size = 6  # number of distinct types of patterns
+        data = _mixed_data_generator(**data_gen_kwds)
+    elif data_generator == 'all_nans':
+        inherent_size = 2  # hard-coded in _homogeneous_data_generator
+        data_gen_kwds['all_nans'] = True
+        data = _homogeneous_data_generator(**data_gen_kwds)
+    elif data_generator == 'all_finite':
+        inherent_size = 2  # hard-coded in _homogeneous_data_generator
+        data_gen_kwds['all_nans'] = False
+        data = _homogeneous_data_generator(**data_gen_kwds)
+
+    output_shape = [n_repetitions] + [inherent_size]*n_samples
+
+    # To generate reference behavior to compare against, loop over the axis-
+    # slices in data. Make indexing easier by moving `axis` to the end and
+    # broadcasting all samples to the same shape.
+    data_b = [np.moveaxis(sample, axis, -1) for sample in data]
+    data_b = [np.broadcast_to(sample, output_shape + [sample.shape[-1]])
+              for sample in data_b]
+    statistics = np.zeros(output_shape)
+    pvalues = np.zeros(output_shape)
+
+    for i, _ in np.ndenumerate(statistics):
+        data1d = [sample[i] for sample in data_b]
+        with np.errstate(divide='ignore', invalid='ignore'):
+            try:
+                res1d = nan_policy_1d(hypotest, data1d, unpacker, *args,
+                                      n_outputs=n_outputs,
+                                      nan_policy=nan_policy,
+                                      paired=paired, _no_deco=True, **kwds)
+
+                # Eventually we'll check the results of a single, vectorized
+                # call of `hypotest` against the arrays `statistics` and
+                # `pvalues` populated using the reference `nan_policy_1d`.
+                # But while we're at it, check the results of a 1D call to
+                # `hypotest` against the reference `nan_policy_1d`.
+                res1db = unpacker(hypotest(*data1d, *args,
+                                           nan_policy=nan_policy, **kwds))
+                assert_equal(res1db[0], res1d[0])
+                if len(res1db) == 2:
+                    assert_equal(res1db[1], res1d[1])
+
+            # When there is not enough data in 1D samples, many existing
+            # hypothesis tests raise errors instead of returning nans .
+            # For vectorized calls, we put nans in the corresponding elements
+            # of the output.
+            except (RuntimeWarning, UserWarning, ValueError,
+                    ZeroDivisionError) as e:
+
+                # whatever it is, make sure same error is raised by both
+                # `nan_policy_1d` and `hypotest`
+                with pytest.raises(type(e), match=re.escape(str(e))):
+                    nan_policy_1d(hypotest, data1d, unpacker, *args,
+                                  n_outputs=n_outputs, nan_policy=nan_policy,
+                                  paired=paired, _no_deco=True, **kwds)
+                with pytest.raises(type(e), match=re.escape(str(e))):
+                    hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
+
+                if any([str(e).startswith(message)
+                        for message in too_small_messages]):
+                    res1d = np.full(n_outputs, np.nan)
+                elif any([str(e).startswith(message)
+                          for message in inaccuracy_messages]):
+                    with suppress_warnings() as sup:
+                        sup.filter(RuntimeWarning)
+                        sup.filter(UserWarning)
+                        res1d = nan_policy_1d(hypotest, data1d, unpacker,
+                                              *args, n_outputs=n_outputs,
+                                              nan_policy=nan_policy,
+                                              paired=paired, _no_deco=True,
+                                              **kwds)
+                else:
+                    raise e
+        statistics[i] = res1d[0]
+        if len(res1d) == 2:
+            pvalues[i] = res1d[1]
+
+    # Perform a vectorized call to the hypothesis test.
+    # If `nan_policy == 'raise'`, check that it raises the appropriate error.
+    # If not, compare against the output against `statistics` and `pvalues`
+    if nan_policy == 'raise' and not data_generator == "all_finite":
+        message = 'The input contains nan values'
+        with pytest.raises(ValueError, match=message):
+            hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)
+
+    else:
+        with suppress_warnings() as sup, \
+             np.errstate(divide='ignore', invalid='ignore'):
+            sup.filter(RuntimeWarning, "Precision loss occurred in moment")
+            sup.filter(UserWarning, "Sample size too small for normal "
+                                    "approximation.")
+            res = unpacker(hypotest(*data, axis=axis, nan_policy=nan_policy,
+                                    *args, **kwds))
+        assert_allclose(res[0], statistics, rtol=1e-15)
+        assert_equal(res[0].dtype, statistics.dtype)
+
+        if len(res) == 2:
+            assert_allclose(res[1], pvalues, rtol=1e-15)
+            assert_equal(res[1].dtype, pvalues.dtype)
+
+
+@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
+                          "paired", "unpacker"), axis_nan_policy_cases)
+@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
+@pytest.mark.parametrize(("data_generator"),
+                         ("all_nans", "all_finite", "mixed", "empty"))
+def test_axis_nan_policy_axis_is_None(hypotest, args, kwds, n_samples,
+                                      n_outputs, paired, unpacker, nan_policy,
+                                      data_generator):
+    # check for correct behavior when `axis=None`
+
+    if not unpacker:
+        def unpacker(res):
+            return res
+
+    rng = np.random.default_rng(0)
+
+    if data_generator == "empty":
+        data = [rng.random((2, 0)) for i in range(n_samples)]
+    else:
+        data = [rng.random((2, 20)) for i in range(n_samples)]
+
+    if data_generator == "mixed":
+        masks = [rng.random((2, 20)) > 0.9 for i in range(n_samples)]
+        for sample, mask in zip(data, masks):
+            sample[mask] = np.nan
+    elif data_generator == "all_nans":
+        data = [sample * np.nan for sample in data]
+
+    data_raveled = [sample.ravel() for sample in data]
+
+    if nan_policy == 'raise' and data_generator not in {"all_finite", "empty"}:
+        message = 'The input contains nan values'
+
+        # check for correct behavior whether or not data is 1d to begin with
+        with pytest.raises(ValueError, match=message):
+            hypotest(*data, axis=None, nan_policy=nan_policy,
+                     *args, **kwds)
+        with pytest.raises(ValueError, match=message):
+            hypotest(*data_raveled, axis=None, nan_policy=nan_policy,
+                     *args, **kwds)
+
+    else:
+        # behavior of reference implementation with 1d input, hypotest with 1d
+        # input, and hypotest with Nd input should match, whether that means
+        # that outputs are equal or they raise the same exception
+
+        ea_str, eb_str, ec_str = None, None, None
+        with np.errstate(divide='ignore', invalid='ignore'):
+            try:
+                res1da = nan_policy_1d(hypotest, data_raveled, unpacker, *args,
+                                       n_outputs=n_outputs,
+                                       nan_policy=nan_policy, paired=paired,
+                                       _no_deco=True, **kwds)
+            except (RuntimeWarning, ValueError, ZeroDivisionError) as ea:
+                ea_str = str(ea)
+
+            try:
+                res1db = unpacker(hypotest(*data_raveled, *args,
+                                           nan_policy=nan_policy, **kwds))
+            except (RuntimeWarning, ValueError, ZeroDivisionError) as eb:
+                eb_str = str(eb)
+
+            try:
+                res1dc = unpacker(hypotest(*data, *args, axis=None,
+                                           nan_policy=nan_policy, **kwds))
+            except (RuntimeWarning, ValueError, ZeroDivisionError) as ec:
+                ec_str = str(ec)
+
+            if ea_str or eb_str or ec_str:
+                assert any([str(ea_str).startswith(message)
+                            for message in too_small_messages])
+                assert ea_str == eb_str == ec_str
+            else:
+                assert_equal(res1db, res1da)
+                assert_equal(res1dc, res1da)
+
+
+# Test keepdims for:
+#     - single-output and multi-output functions (gmean and mannwhitneyu)
+#     - Axis negative, positive, None, and tuple
+#     - 1D with no NaNs
+#     - 1D with NaN propagation
+#     - Zero-sized output
+@pytest.mark.parametrize("nan_policy", ("omit", "propagate"))
+@pytest.mark.parametrize(
+    ("hypotest", "args", "kwds", "n_samples", "unpacker"),
+    ((stats.gmean, tuple(), dict(), 1, lambda x: (x,)),
+     (stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, None))
+)
+@pytest.mark.parametrize(
+    ("sample_shape", "axis_cases"),
+    (((2, 3, 3, 4), (None, 0, -1, (0, 2), (1, -1), (3, 1, 2, 0))),
+     ((10, ), (0, -1)),
+     ((20, 0), (0, 1)))
+)
+def test_keepdims(hypotest, args, kwds, n_samples, unpacker,
+                  sample_shape, axis_cases, nan_policy):
+    # test if keepdims parameter works correctly
+    if not unpacker:
+        def unpacker(res):
+            return res
+    rng = np.random.default_rng(0)
+    data = [rng.random(sample_shape) for _ in range(n_samples)]
+    nan_data = [sample.copy() for sample in data]
+    nan_mask = [rng.random(sample_shape) < 0.2 for _ in range(n_samples)]
+    for sample, mask in zip(nan_data, nan_mask):
+        sample[mask] = np.nan
+    for axis in axis_cases:
+        expected_shape = list(sample_shape)
+        if axis is None:
+            expected_shape = np.ones(len(sample_shape))
+        else:
+            if isinstance(axis, int):
+                expected_shape[axis] = 1
+            else:
+                for ax in axis:
+                    expected_shape[ax] = 1
+        expected_shape = tuple(expected_shape)
+        res = unpacker(hypotest(*data, *args, axis=axis, keepdims=True,
+                                **kwds))
+        res_base = unpacker(hypotest(*data, *args, axis=axis, keepdims=False,
+                                     **kwds))
+        nan_res = unpacker(hypotest(*nan_data, *args, axis=axis,
+                                    keepdims=True, nan_policy=nan_policy,
+                                    **kwds))
+        nan_res_base = unpacker(hypotest(*nan_data, *args, axis=axis,
+                                         keepdims=False,
+                                         nan_policy=nan_policy, **kwds))
+        for r, r_base, rn, rn_base in zip(res, res_base, nan_res,
+                                          nan_res_base):
+            assert r.shape == expected_shape
+            r = np.squeeze(r, axis=axis)
+            assert_equal(r, r_base)
+            assert rn.shape == expected_shape
+            rn = np.squeeze(rn, axis=axis)
+            assert_equal(rn, rn_base)
+
+
+@pytest.mark.parametrize(("fun", "nsamp"),
+                         [(stats.kstat, 1),
+                          (stats.kstatvar, 1)])
+def test_hypotest_back_compat_no_axis(fun, nsamp):
+    m, n = 8, 9
+
+    rng = np.random.default_rng(0)
+    x = rng.random((nsamp, m, n))
+    res = fun(*x)
+    res2 = fun(*x, _no_deco=True)
+    res3 = fun([xi.ravel() for xi in x])
+    assert_equal(res, res2)
+    assert_equal(res, res3)
+
+
+@pytest.mark.parametrize(("axis"), (0, 1, 2))
+def test_axis_nan_policy_decorated_positional_axis(axis):
+    # Test for correct behavior of function decorated with
+    # _axis_nan_policy_decorator whether `axis` is provided as positional or
+    # keyword argument
+
+    shape = (8, 9, 10)
+    rng = np.random.default_rng(0)
+    x = rng.random(shape)
+    y = rng.random(shape)
+    res1 = stats.mannwhitneyu(x, y, True, 'two-sided', axis)
+    res2 = stats.mannwhitneyu(x, y, True, 'two-sided', axis=axis)
+    assert_equal(res1, res2)
+
+    message = "mannwhitneyu() got multiple values for argument 'axis'"
+    with pytest.raises(TypeError, match=re.escape(message)):
+        stats.mannwhitneyu(x, y, True, 'two-sided', axis, axis=axis)
+
+
+def test_axis_nan_policy_decorated_positional_args():
+    # Test for correct behavior of function decorated with
+    # _axis_nan_policy_decorator when function accepts *args
+
+    shape = (3, 8, 9, 10)
+    rng = np.random.default_rng(0)
+    x = rng.random(shape)
+    x[0, 0, 0, 0] = np.nan
+    stats.kruskal(*x)
+
+    message = "kruskal() got an unexpected keyword argument 'samples'"
+    with pytest.raises(TypeError, match=re.escape(message)):
+        stats.kruskal(samples=x)
+
+    with pytest.raises(TypeError, match=re.escape(message)):
+        stats.kruskal(*x, samples=x)
+
+
+def test_axis_nan_policy_decorated_keyword_samples():
+    # Test for correct behavior of function decorated with
+    # _axis_nan_policy_decorator whether samples are provided as positional or
+    # keyword arguments
+
+    shape = (2, 8, 9, 10)
+    rng = np.random.default_rng(0)
+    x = rng.random(shape)
+    x[0, 0, 0, 0] = np.nan
+    res1 = stats.mannwhitneyu(*x)
+    res2 = stats.mannwhitneyu(x=x[0], y=x[1])
+    assert_equal(res1, res2)
+
+    message = "mannwhitneyu() got multiple values for argument"
+    with pytest.raises(TypeError, match=re.escape(message)):
+        stats.mannwhitneyu(*x, x=x[0], y=x[1])
+
+
+@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
+                          "paired", "unpacker"), axis_nan_policy_cases)
+def test_axis_nan_policy_decorated_pickled(hypotest, args, kwds, n_samples,
+                                           n_outputs, paired, unpacker):
+    rng = np.random.default_rng(0)
+
+    # Some hypothesis tests return a non-iterable that needs an `unpacker` to
+    # extract the statistic and p-value. For those that don't:
+    if not unpacker:
+        def unpacker(res):
+            return res
+
+    data = rng.uniform(size=(n_samples, 2, 30))
+    pickled_hypotest = pickle.dumps(hypotest)
+    unpickled_hypotest = pickle.loads(pickled_hypotest)
+    res1 = unpacker(hypotest(*data, *args, axis=-1, **kwds))
+    res2 = unpacker(unpickled_hypotest(*data, *args, axis=-1, **kwds))
+    assert_allclose(res1, res2, rtol=1e-12)
+
+
+def test_check_empty_inputs():
+    # Test that _check_empty_inputs is doing its job, at least for single-
+    # sample inputs. (Multi-sample functionality is tested below.)
+    # If the input sample is not empty, it should return None.
+    # If the input sample is empty, it should return an array of NaNs or an
+    # empty array of appropriate shape. np.mean is used as a reference for the
+    # output because, like the statistics calculated by these functions,
+    # it works along and "consumes" `axis` but preserves the other axes.
+    for i in range(5):
+        for combo in combinations_with_replacement([0, 1, 2], i):
+            for axis in range(len(combo)):
+                samples = (np.zeros(combo),)
+                output = stats._axis_nan_policy._check_empty_inputs(samples,
+                                                                    axis)
+                if output is not None:
+                    with np.testing.suppress_warnings() as sup:
+                        sup.filter(RuntimeWarning, "Mean of empty slice.")
+                        sup.filter(RuntimeWarning, "invalid value encountered")
+                        reference = samples[0].mean(axis=axis)
+                    np.testing.assert_equal(output, reference)
+
+
+def _check_arrays_broadcastable(arrays, axis):
+    # https://numpy.org/doc/stable/user/basics.broadcasting.html
+    # "When operating on two arrays, NumPy compares their shapes element-wise.
+    # It starts with the trailing (i.e. rightmost) dimensions and works its
+    # way left.
+    # Two dimensions are compatible when
+    # 1. they are equal, or
+    # 2. one of them is 1
+    # ...
+    # Arrays do not need to have the same number of dimensions."
+    # (Clarification: if the arrays are compatible according to the criteria
+    #  above and an array runs out of dimensions, it is still compatible.)
+    # Below, we follow the rules above except ignoring `axis`
+
+    n_dims = max([arr.ndim for arr in arrays])
+    if axis is not None:
+        # convert to negative axis
+        axis = (-n_dims + axis) if axis >= 0 else axis
+
+    for dim in range(1, n_dims+1):  # we'll index from -1 to -n_dims, inclusive
+        if -dim == axis:
+            continue  # ignore lengths along `axis`
+
+        dim_lengths = set()
+        for arr in arrays:
+            if dim <= arr.ndim and arr.shape[-dim] != 1:
+                dim_lengths.add(arr.shape[-dim])
+
+        if len(dim_lengths) > 1:
+            return False
+    return True
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
+                          "paired", "unpacker"), axis_nan_policy_cases)
+def test_empty(hypotest, args, kwds, n_samples, n_outputs, paired, unpacker):
+    # test for correct output shape when at least one input is empty
+
+    if unpacker is None:
+        unpacker = lambda res: (res[0], res[1])  # noqa: E731
+
+    def small_data_generator(n_samples, n_dims):
+
+        def small_sample_generator(n_dims):
+            # return all possible "small" arrays in up to n_dim dimensions
+            for i in n_dims:
+                # "small" means with size along dimension either 0 or 1
+                for combo in combinations_with_replacement([0, 1, 2], i):
+                    yield np.zeros(combo)
+
+        # yield all possible combinations of small samples
+        gens = [small_sample_generator(n_dims) for i in range(n_samples)]
+        for i in product(*gens):
+            yield i
+
+    n_dims = [2, 3]
+    for samples in small_data_generator(n_samples, n_dims):
+
+        # this test is only for arrays of zero size
+        if not any((sample.size == 0 for sample in samples)):
+            continue
+
+        max_axis = max((sample.ndim for sample in samples))
+
+        # need to test for all valid values of `axis` parameter, too
+        for axis in range(-max_axis, max_axis):
+
+            try:
+                # After broadcasting, all arrays are the same shape, so
+                # the shape of the output should be the same as a single-
+                # sample statistic. Use np.mean as a reference.
+                concat = stats._stats_py._broadcast_concatenate(samples, axis)
+                with np.testing.suppress_warnings() as sup:
+                    sup.filter(RuntimeWarning, "Mean of empty slice.")
+                    sup.filter(RuntimeWarning, "invalid value encountered")
+                    expected = np.mean(concat, axis=axis) * np.nan
+
+                res = hypotest(*samples, *args, axis=axis, **kwds)
+                res = unpacker(res)
+
+                for i in range(n_outputs):
+                    assert_equal(res[i], expected)
+
+            except ValueError:
+                # confirm that the arrays truly are not broadcastable
+                assert not _check_arrays_broadcastable(samples, axis)
+
+                # confirm that _both_ `_broadcast_concatenate` and `hypotest`
+                # produce this information.
+                message = "Array shapes are incompatible for broadcasting."
+                with pytest.raises(ValueError, match=message):
+                    stats._stats_py._broadcast_concatenate(samples, axis)
+                with pytest.raises(ValueError, match=message):
+                    hypotest(*samples, *args, axis=axis, **kwds)
+
+
+def test_masked_array_2_sentinel_array():
+    # prepare arrays
+    np.random.seed(0)
+    A = np.random.rand(10, 11, 12)
+    B = np.random.rand(12)
+    mask = A < 0.5
+    A = np.ma.masked_array(A, mask)
+
+    # set arbitrary elements to special values
+    # (these values might have been considered for use as sentinel values)
+    max_float = np.finfo(np.float64).max
+    max_float2 = np.nextafter(max_float, -np.inf)
+    max_float3 = np.nextafter(max_float2, -np.inf)
+    A[3, 4, 1] = np.nan
+    A[4, 5, 2] = np.inf
+    A[5, 6, 3] = max_float
+    B[8] = np.nan
+    B[7] = np.inf
+    B[6] = max_float2
+
+    # convert masked A to array with sentinel value, don't modify B
+    out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([A, B])
+    A_out, B_out = out_arrays
+
+    # check that good sentinel value was chosen (according to intended logic)
+    assert (sentinel != max_float) and (sentinel != max_float2)
+    assert sentinel == max_float3
+
+    # check that output arrays are as intended
+    A_reference = A.data
+    A_reference[A.mask] = sentinel
+    np.testing.assert_array_equal(A_out, A_reference)
+    assert B_out is B
+
+
+def test_masked_dtype():
+    # When _masked_arrays_2_sentinel_arrays was first added, it always
+    # upcast the arrays to np.float64. After gh16662, check expected promotion
+    # and that the expected sentinel is found.
+
+    # these are important because the max of the promoted dtype is the first
+    # candidate to be the sentinel value
+    max16 = np.iinfo(np.int16).max
+    max128c = np.finfo(np.complex128).max
+
+    # a is a regular array, b has masked elements, and c has no masked elements
+    a = np.array([1, 2, max16], dtype=np.int16)
+    b = np.ma.array([1, 2, 1], dtype=np.int8, mask=[0, 1, 0])
+    c = np.ma.array([1, 2, 1], dtype=np.complex128, mask=[0, 0, 0])
+
+    # check integer masked -> sentinel conversion
+    out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a, b])
+    a_out, b_out = out_arrays
+    assert sentinel == max16-1  # not max16 because max16 was in the data
+    assert b_out.dtype == np.int16  # check expected promotion
+    assert_allclose(b_out, [b[0], sentinel, b[-1]])  # check sentinel placement
+    assert a_out is a  # not a masked array, so left untouched
+    assert not isinstance(b_out, np.ma.MaskedArray)  # b became regular array
+
+    # similarly with complex
+    out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([b, c])
+    b_out, c_out = out_arrays
+    assert sentinel == max128c  # max128c was not in the data
+    assert b_out.dtype == np.complex128  # b got promoted
+    assert_allclose(b_out, [b[0], sentinel, b[-1]])  # check sentinel placement
+    assert not isinstance(b_out, np.ma.MaskedArray)  # b became regular array
+    assert not isinstance(c_out, np.ma.MaskedArray)  # c became regular array
+
+    # Also, check edge case when a sentinel value cannot be found in the data
+    min8, max8 = np.iinfo(np.int8).min, np.iinfo(np.int8).max
+    a = np.arange(min8, max8+1, dtype=np.int8)  # use all possible values
+    mask1 = np.zeros_like(a, dtype=bool)
+    mask0 = np.zeros_like(a, dtype=bool)
+
+    # a masked value can be used as the sentinel
+    mask1[1] = True
+    a1 = np.ma.array(a, mask=mask1)
+    out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a1])
+    assert sentinel == min8+1
+
+    # unless it's the smallest possible; skipped for simiplicity (see code)
+    mask0[0] = True
+    a0 = np.ma.array(a, mask=mask0)
+    message = "This function replaces masked elements with sentinel..."
+    with pytest.raises(ValueError, match=message):
+        _masked_arrays_2_sentinel_arrays([a0])
+
+    # test that dtype is preserved in functions
+    a = np.ma.array([1, 2, 3], mask=[0, 1, 0], dtype=np.float32)
+    assert stats.gmean(a).dtype == np.float32
+
+
+def test_masked_stat_1d():
+    # basic test of _axis_nan_policy_factory with 1D masked sample
+    males = [19, 22, 16, 29, 24]
+    females = [20, 11, 17, 12]
+    res = stats.mannwhitneyu(males, females)
+
+    # same result when extra nan is omitted
+    females2 = [20, 11, 17, np.nan, 12]
+    res2 = stats.mannwhitneyu(males, females2, nan_policy='omit')
+    np.testing.assert_array_equal(res2, res)
+
+    # same result when extra element is masked
+    females3 = [20, 11, 17, 1000, 12]
+    mask3 = [False, False, False, True, False]
+    females3 = np.ma.masked_array(females3, mask=mask3)
+    res3 = stats.mannwhitneyu(males, females3)
+    np.testing.assert_array_equal(res3, res)
+
+    # same result when extra nan is omitted and additional element is masked
+    females4 = [20, 11, 17, np.nan, 1000, 12]
+    mask4 = [False, False, False, False, True, False]
+    females4 = np.ma.masked_array(females4, mask=mask4)
+    res4 = stats.mannwhitneyu(males, females4, nan_policy='omit')
+    np.testing.assert_array_equal(res4, res)
+
+    # same result when extra elements, including nan, are masked
+    females5 = [20, 11, 17, np.nan, 1000, 12]
+    mask5 = [False, False, False, True, True, False]
+    females5 = np.ma.masked_array(females5, mask=mask5)
+    res5 = stats.mannwhitneyu(males, females5, nan_policy='propagate')
+    res6 = stats.mannwhitneyu(males, females5, nan_policy='raise')
+    np.testing.assert_array_equal(res5, res)
+    np.testing.assert_array_equal(res6, res)
+
+
+@pytest.mark.parametrize(("axis"), range(-3, 3))
+def test_masked_stat_3d(axis):
+    # basic test of _axis_nan_policy_factory with 3D masked sample
+    np.random.seed(0)
+    a = np.random.rand(3, 4, 5)
+    b = np.random.rand(4, 5)
+    c = np.random.rand(4, 1)
+
+    mask_a = a < 0.1
+    mask_c = [False, False, False, True]
+    a_masked = np.ma.masked_array(a, mask=mask_a)
+    c_masked = np.ma.masked_array(c, mask=mask_c)
+
+    a_nans = a.copy()
+    a_nans[mask_a] = np.nan
+    c_nans = c.copy()
+    c_nans[mask_c] = np.nan
+
+    res = stats.kruskal(a_nans, b, c_nans, nan_policy='omit', axis=axis)
+    res2 = stats.kruskal(a_masked, b, c_masked, axis=axis)
+    np.testing.assert_array_equal(res, res2)
+
+
+def test_mixed_mask_nan_1():
+    # targeted test of _axis_nan_policy_factory with 2D masked sample:
+    # omitting samples with masks and nan_policy='omit' are equivalent
+    # also checks paired-sample sentinel value removal
+    m, n = 3, 20
+    axis = -1
+
+    np.random.seed(0)
+    a = np.random.rand(m, n)
+    b = np.random.rand(m, n)
+    mask_a1 = np.random.rand(m, n) < 0.2
+    mask_a2 = np.random.rand(m, n) < 0.1
+    mask_b1 = np.random.rand(m, n) < 0.15
+    mask_b2 = np.random.rand(m, n) < 0.15
+    mask_a1[2, :] = True
+
+    a_nans = a.copy()
+    b_nans = b.copy()
+    a_nans[mask_a1 | mask_a2] = np.nan
+    b_nans[mask_b1 | mask_b2] = np.nan
+
+    a_masked1 = np.ma.masked_array(a, mask=mask_a1)
+    b_masked1 = np.ma.masked_array(b, mask=mask_b1)
+    a_masked1[mask_a2] = np.nan
+    b_masked1[mask_b2] = np.nan
+
+    a_masked2 = np.ma.masked_array(a, mask=mask_a2)
+    b_masked2 = np.ma.masked_array(b, mask=mask_b2)
+    a_masked2[mask_a1] = np.nan
+    b_masked2[mask_b1] = np.nan
+
+    a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
+    b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
+
+    res = stats.wilcoxon(a_nans, b_nans, nan_policy='omit', axis=axis)
+    res1 = stats.wilcoxon(a_masked1, b_masked1, nan_policy='omit', axis=axis)
+    res2 = stats.wilcoxon(a_masked2, b_masked2, nan_policy='omit', axis=axis)
+    res3 = stats.wilcoxon(a_masked3, b_masked3, nan_policy='raise', axis=axis)
+    res4 = stats.wilcoxon(a_masked3, b_masked3,
+                          nan_policy='propagate', axis=axis)
+
+    np.testing.assert_array_equal(res1, res)
+    np.testing.assert_array_equal(res2, res)
+    np.testing.assert_array_equal(res3, res)
+    np.testing.assert_array_equal(res4, res)
+
+
+def test_mixed_mask_nan_2():
+    # targeted test of _axis_nan_policy_factory with 2D masked sample:
+    # check for expected interaction between masks and nans
+
+    # Cases here are
+    # [mixed nan/mask, all nans, all masked,
+    # unmasked nan, masked nan, unmasked non-nan]
+    a = [[1, np.nan, 2], [np.nan, np.nan, np.nan], [1, 2, 3],
+         [1, np.nan, 3], [1, np.nan, 3], [1, 2, 3]]
+    mask = [[1, 0, 1], [0, 0, 0], [1, 1, 1],
+            [0, 0, 0], [0, 1, 0], [0, 0, 0]]
+    a_masked = np.ma.masked_array(a, mask=mask)
+    b = [[4, 5, 6]]
+    ref1 = stats.ranksums([1, 3], [4, 5, 6])
+    ref2 = stats.ranksums([1, 2, 3], [4, 5, 6])
+
+    # nan_policy = 'omit'
+    # all elements are removed from first three rows
+    # middle element is removed from fourth and fifth rows
+    # no elements removed from last row
+    res = stats.ranksums(a_masked, b, nan_policy='omit', axis=-1)
+    stat_ref = [np.nan, np.nan, np.nan,
+                ref1.statistic, ref1.statistic, ref2.statistic]
+    p_ref = [np.nan, np.nan, np.nan,
+             ref1.pvalue, ref1.pvalue, ref2.pvalue]
+    np.testing.assert_array_equal(res.statistic, stat_ref)
+    np.testing.assert_array_equal(res.pvalue, p_ref)
+
+    # nan_policy = 'propagate'
+    # nans propagate in first, second, and fourth row
+    # all elements are removed by mask from third row
+    # middle element is removed from fifth row
+    # no elements removed from last row
+    res = stats.ranksums(a_masked, b, nan_policy='propagate', axis=-1)
+    stat_ref = [np.nan, np.nan, np.nan,
+                np.nan, ref1.statistic, ref2.statistic]
+    p_ref = [np.nan, np.nan, np.nan,
+             np.nan, ref1.pvalue, ref2.pvalue]
+    np.testing.assert_array_equal(res.statistic, stat_ref)
+    np.testing.assert_array_equal(res.pvalue, p_ref)
+
+
+def test_axis_None_vs_tuple():
+    # `axis` `None` should be equivalent to tuple with all axes
+    shape = (3, 8, 9, 10)
+    rng = np.random.default_rng(0)
+    x = rng.random(shape)
+    res = stats.kruskal(*x, axis=None)
+    res2 = stats.kruskal(*x, axis=(0, 1, 2))
+    np.testing.assert_array_equal(res, res2)
+
+
+def test_axis_None_vs_tuple_with_broadcasting():
+    # `axis` `None` should be equivalent to tuple with all axes,
+    # which should be equivalent to raveling the arrays before passing them
+    rng = np.random.default_rng(0)
+    x = rng.random((5, 1))
+    y = rng.random((1, 5))
+    x2, y2 = np.broadcast_arrays(x, y)
+
+    res0 = stats.mannwhitneyu(x.ravel(), y.ravel())
+    res1 = stats.mannwhitneyu(x, y, axis=None)
+    res2 = stats.mannwhitneyu(x, y, axis=(0, 1))
+    res3 = stats.mannwhitneyu(x2.ravel(), y2.ravel())
+
+    assert res1 == res0
+    assert res2 == res0
+    assert res3 != res0
+
+
+@pytest.mark.parametrize(("axis"),
+                         list(permutations(range(-3, 3), 2)) + [(-4, 1)])
+def test_other_axis_tuples(axis):
+    # Check that _axis_nan_policy_factory treates all `axis` tuples as expected
+    rng = np.random.default_rng(0)
+    shape_x = (4, 5, 6)
+    shape_y = (1, 6)
+    x = rng.random(shape_x)
+    y = rng.random(shape_y)
+    axis_original = axis
+
+    # convert axis elements to positive
+    axis = tuple([(i if i >= 0 else 3 + i) for i in axis])
+    axis = sorted(axis)
+
+    if len(set(axis)) != len(axis):
+        message = "`axis` must contain only distinct elements"
+        with pytest.raises(np.AxisError, match=re.escape(message)):
+            stats.mannwhitneyu(x, y, axis=axis_original)
+        return
+
+    if axis[0] < 0 or axis[-1] > 2:
+        message = "`axis` is out of bounds for array of dimension 3"
+        with pytest.raises(np.AxisError, match=re.escape(message)):
+            stats.mannwhitneyu(x, y, axis=axis_original)
+        return
+
+    res = stats.mannwhitneyu(x, y, axis=axis_original)
+
+    # reference behavior
+    not_axis = {0, 1, 2} - set(axis)  # which axis is not part of `axis`
+    not_axis = next(iter(not_axis))  # take it out of the set
+
+    x2 = x
+    shape_y_broadcasted = [1, 1, 6]
+    shape_y_broadcasted[not_axis] = shape_x[not_axis]
+    y2 = np.broadcast_to(y, shape_y_broadcasted)
+
+    m = x2.shape[not_axis]
+    x2 = np.moveaxis(x2, axis, (1, 2))
+    y2 = np.moveaxis(y2, axis, (1, 2))
+    x2 = np.reshape(x2, (m, -1))
+    y2 = np.reshape(y2, (m, -1))
+    res2 = stats.mannwhitneyu(x2, y2, axis=1)
+
+    np.testing.assert_array_equal(res, res2)
+
+
+@pytest.mark.parametrize(("weighted_fun_name"), ["gmean", "hmean", "pmean"])
+def test_mean_mixed_mask_nan_weights(weighted_fun_name):
+    # targeted test of _axis_nan_policy_factory with 2D masked sample:
+    # omitting samples with masks and nan_policy='omit' are equivalent
+    # also checks paired-sample sentinel value removal
+
+    if weighted_fun_name == 'pmean':
+        def weighted_fun(a, **kwargs):
+            return stats.pmean(a, p=0.42, **kwargs)
+    else:
+        weighted_fun = getattr(stats, weighted_fun_name)
+
+    m, n = 3, 20
+    axis = -1
+
+    rng = np.random.default_rng(6541968121)
+    a = rng.uniform(size=(m, n))
+    b = rng.uniform(size=(m, n))
+    mask_a1 = rng.uniform(size=(m, n)) < 0.2
+    mask_a2 = rng.uniform(size=(m, n)) < 0.1
+    mask_b1 = rng.uniform(size=(m, n)) < 0.15
+    mask_b2 = rng.uniform(size=(m, n)) < 0.15
+    mask_a1[2, :] = True
+
+    a_nans = a.copy()
+    b_nans = b.copy()
+    a_nans[mask_a1 | mask_a2] = np.nan
+    b_nans[mask_b1 | mask_b2] = np.nan
+
+    a_masked1 = np.ma.masked_array(a, mask=mask_a1)
+    b_masked1 = np.ma.masked_array(b, mask=mask_b1)
+    a_masked1[mask_a2] = np.nan
+    b_masked1[mask_b2] = np.nan
+
+    a_masked2 = np.ma.masked_array(a, mask=mask_a2)
+    b_masked2 = np.ma.masked_array(b, mask=mask_b2)
+    a_masked2[mask_a1] = np.nan
+    b_masked2[mask_b1] = np.nan
+
+    a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
+    b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
+
+    mask_all = (mask_a1 | mask_a2 | mask_b1 | mask_b2)
+    a_masked4 = np.ma.masked_array(a, mask=mask_all)
+    b_masked4 = np.ma.masked_array(b, mask=mask_all)
+
+    with np.testing.suppress_warnings() as sup:
+        message = 'invalid value encountered'
+        sup.filter(RuntimeWarning, message)
+        res = weighted_fun(a_nans, weights=b_nans,
+                           nan_policy='omit', axis=axis)
+        res1 = weighted_fun(a_masked1, weights=b_masked1,
+                            nan_policy='omit', axis=axis)
+        res2 = weighted_fun(a_masked2, weights=b_masked2,
+                            nan_policy='omit', axis=axis)
+        res3 = weighted_fun(a_masked3, weights=b_masked3,
+                            nan_policy='raise', axis=axis)
+        res4 = weighted_fun(a_masked3, weights=b_masked3,
+                            nan_policy='propagate', axis=axis)
+        # Would test with a_masked3/b_masked3, but there is a bug in np.average
+        # that causes a bug in _no_deco mean with masked weights. Would use
+        # np.ma.average, but that causes other problems. See numpy/numpy#7330.
+        if weighted_fun_name not in {'pmean', 'gmean'}:
+            weighted_fun_ma = getattr(stats.mstats, weighted_fun_name)
+            res5 = weighted_fun_ma(a_masked4, weights=b_masked4,
+                                   axis=axis, _no_deco=True)
+
+    np.testing.assert_array_equal(res1, res)
+    np.testing.assert_array_equal(res2, res)
+    np.testing.assert_array_equal(res3, res)
+    np.testing.assert_array_equal(res4, res)
+    if weighted_fun_name not in {'pmean', 'gmean'}:
+        # _no_deco mean returns masked array, last element was masked
+        np.testing.assert_allclose(res5.compressed(), res[~np.isnan(res)])
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_binned_statistic.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_binned_statistic.py
new file mode 100644
index 00000000..38fab2ff
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_binned_statistic.py
@@ -0,0 +1,568 @@
+import numpy as np
+from numpy.testing import assert_allclose
+import pytest
+from pytest import raises as assert_raises
+from scipy.stats import (binned_statistic, binned_statistic_2d,
+                         binned_statistic_dd)
+from scipy._lib._util import check_random_state
+
+from .common_tests import check_named_results
+
+
+class TestBinnedStatistic:
+
+    @classmethod
+    def setup_class(cls):
+        rng = check_random_state(9865)
+        cls.x = rng.uniform(size=100)
+        cls.y = rng.uniform(size=100)
+        cls.v = rng.uniform(size=100)
+        cls.X = rng.uniform(size=(100, 3))
+        cls.w = rng.uniform(size=100)
+        cls.u = rng.uniform(size=100) + 1e6
+
+    def test_1d_count(self):
+        x = self.x
+        v = self.v
+
+        count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
+        count2, edges2 = np.histogram(x, bins=10)
+
+        assert_allclose(count1, count2)
+        assert_allclose(edges1, edges2)
+
+    def test_gh5927(self):
+        # smoke test for gh5927 - binned_statistic was using `is` for string
+        # comparison
+        x = self.x
+        v = self.v
+        statistics = ['mean', 'median', 'count', 'sum']
+        for statistic in statistics:
+            binned_statistic(x, v, statistic, bins=10)
+
+    def test_big_number_std(self):
+        # tests for numerical stability of std calculation
+        # see issue gh-10126 for more
+        x = self.x
+        u = self.u
+        stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10)
+        stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10)
+
+        assert_allclose(stat1, stat2)
+
+    def test_empty_bins_std(self):
+        # tests that std returns gives nan for empty bins
+        x = self.x
+        u = self.u
+        print(binned_statistic(x, u, 'count', bins=1000))
+        stat1, edges1, bc = binned_statistic(x, u, 'std', bins=1000)
+        stat2, edges2, bc = binned_statistic(x, u, np.std, bins=1000)
+
+        assert_allclose(stat1, stat2)
+
+    def test_non_finite_inputs_and_int_bins(self):
+        # if either `values` or `sample` contain np.inf or np.nan throw
+        # see issue gh-9010 for more
+        x = self.x
+        u = self.u
+        orig = u[0]
+        u[0] = np.inf
+        assert_raises(ValueError, binned_statistic, u, x, 'std', bins=10)
+        # need to test for non-python specific ints, e.g. np.int8, np.int64
+        assert_raises(ValueError, binned_statistic, u, x, 'std',
+                      bins=np.int64(10))
+        u[0] = np.nan
+        assert_raises(ValueError, binned_statistic, u, x, 'count', bins=10)
+        # replace original value, u belongs the class
+        u[0] = orig
+
+    def test_1d_result_attributes(self):
+        x = self.x
+        v = self.v
+
+        res = binned_statistic(x, v, 'count', bins=10)
+        attributes = ('statistic', 'bin_edges', 'binnumber')
+        check_named_results(res, attributes)
+
+    def test_1d_sum(self):
+        x = self.x
+        v = self.v
+
+        sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
+        sum2, edges2 = np.histogram(x, bins=10, weights=v)
+
+        assert_allclose(sum1, sum2)
+        assert_allclose(edges1, edges2)
+
+    def test_1d_mean(self):
+        x = self.x
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
+        stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_1d_std(self):
+        x = self.x
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
+        stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_1d_min(self):
+        x = self.x
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10)
+        stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_1d_max(self):
+        x = self.x
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10)
+        stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_1d_median(self):
+        x = self.x
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
+        stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_1d_bincode(self):
+        x = self.x[:20]
+        v = self.v[:20]
+
+        count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
+        bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
+                        1, 2, 1])
+
+        bcount = [(bc == i).sum() for i in np.unique(bc)]
+
+        assert_allclose(bc, bc2)
+        assert_allclose(bcount, count1)
+
+    def test_1d_range_keyword(self):
+        # Regression test for gh-3063, range can be (min, max) or [(min, max)]
+        np.random.seed(9865)
+        x = np.arange(30)
+        data = np.random.random(30)
+
+        mean, bins, _ = binned_statistic(x[:15], data[:15])
+        mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
+        mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
+
+        assert_allclose(mean, mean_range)
+        assert_allclose(bins, bins_range)
+        assert_allclose(mean, mean_range2)
+        assert_allclose(bins, bins_range2)
+
+    def test_1d_multi_values(self):
+        x = self.x
+        v = self.v
+        w = self.w
+
+        stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10)
+        stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10)
+        stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10)
+
+        assert_allclose(stat2[0], stat1v)
+        assert_allclose(stat2[1], stat1w)
+        assert_allclose(edges1v, edges2)
+        assert_allclose(bc1v, bc2)
+
+    def test_2d_count(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        count1, binx1, biny1, bc = binned_statistic_2d(
+            x, y, v, 'count', bins=5)
+        count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
+
+        assert_allclose(count1, count2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_result_attributes(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        res = binned_statistic_2d(x, y, v, 'count', bins=5)
+        attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
+        check_named_results(res, attributes)
+
+    def test_2d_sum(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
+        sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
+
+        assert_allclose(sum1, sum2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_mean(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
+        stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_mean_unicode(self):
+        x = self.x
+        y = self.y
+        v = self.v
+        stat1, binx1, biny1, bc = binned_statistic_2d(
+            x, y, v, 'mean', bins=5)
+        stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
+        assert_allclose(stat1, stat2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_std(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
+        stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_min(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5)
+        stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_max(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5)
+        stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_median(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        stat1, binx1, biny1, bc = binned_statistic_2d(
+            x, y, v, 'median', bins=5)
+        stat2, binx2, biny2, bc = binned_statistic_2d(
+            x, y, v, np.median, bins=5)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(binx1, binx2)
+        assert_allclose(biny1, biny2)
+
+    def test_2d_bincode(self):
+        x = self.x[:20]
+        y = self.y[:20]
+        v = self.v[:20]
+
+        count1, binx1, biny1, bc = binned_statistic_2d(
+            x, y, v, 'count', bins=3)
+        bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
+                        6, 11, 16, 6, 6, 11, 8])
+
+        bcount = [(bc == i).sum() for i in np.unique(bc)]
+
+        assert_allclose(bc, bc2)
+        count1adj = count1[count1.nonzero()]
+        assert_allclose(bcount, count1adj)
+
+    def test_2d_multi_values(self):
+        x = self.x
+        y = self.y
+        v = self.v
+        w = self.w
+
+        stat1v, binx1v, biny1v, bc1v = binned_statistic_2d(
+            x, y, v, 'mean', bins=8)
+        stat1w, binx1w, biny1w, bc1w = binned_statistic_2d(
+            x, y, w, 'mean', bins=8)
+        stat2, binx2, biny2, bc2 = binned_statistic_2d(
+            x, y, [v, w], 'mean', bins=8)
+
+        assert_allclose(stat2[0], stat1v)
+        assert_allclose(stat2[1], stat1w)
+        assert_allclose(binx1v, binx2)
+        assert_allclose(biny1w, biny2)
+        assert_allclose(bc1v, bc2)
+
+    def test_2d_binnumbers_unraveled(self):
+        x = self.x
+        y = self.y
+        v = self.v
+
+        stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20)
+        stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10)
+
+        stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d(
+            x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True)
+
+        bcx3 = np.searchsorted(edgesx, x, side='right')
+        bcy3 = np.searchsorted(edgesy, y, side='right')
+
+        # `numpy.searchsorted` is non-inclusive on right-edge, compensate
+        bcx3[x == x.max()] -= 1
+        bcy3[y == y.max()] -= 1
+
+        assert_allclose(bcx, bc2[0])
+        assert_allclose(bcy, bc2[1])
+        assert_allclose(bcx3, bc2[0])
+        assert_allclose(bcy3, bc2[1])
+
+    def test_dd_count(self):
+        X = self.X
+        v = self.v
+
+        count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
+        count2, edges2 = np.histogramdd(X, bins=3)
+
+        assert_allclose(count1, count2)
+        assert_allclose(edges1, edges2)
+
+    def test_dd_result_attributes(self):
+        X = self.X
+        v = self.v
+
+        res = binned_statistic_dd(X, v, 'count', bins=3)
+        attributes = ('statistic', 'bin_edges', 'binnumber')
+        check_named_results(res, attributes)
+
+    def test_dd_sum(self):
+        X = self.X
+        v = self.v
+
+        sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
+        sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
+        sum3, edges3, bc = binned_statistic_dd(X, v, np.sum, bins=3)
+
+        assert_allclose(sum1, sum2)
+        assert_allclose(edges1, edges2)
+        assert_allclose(sum1, sum3)
+        assert_allclose(edges1, edges3)
+
+    def test_dd_mean(self):
+        X = self.X
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
+        stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_dd_std(self):
+        X = self.X
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
+        stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_dd_min(self):
+        X = self.X
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3)
+        stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_dd_max(self):
+        X = self.X
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3)
+        stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_dd_median(self):
+        X = self.X
+        v = self.v
+
+        stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
+        stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
+
+        assert_allclose(stat1, stat2)
+        assert_allclose(edges1, edges2)
+
+    def test_dd_bincode(self):
+        X = self.X[:20]
+        v = self.v[:20]
+
+        count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
+        bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
+                        32, 36, 91, 43, 87, 81, 81])
+
+        bcount = [(bc == i).sum() for i in np.unique(bc)]
+
+        assert_allclose(bc, bc2)
+        count1adj = count1[count1.nonzero()]
+        assert_allclose(bcount, count1adj)
+
+    def test_dd_multi_values(self):
+        X = self.X
+        v = self.v
+        w = self.w
+
+        for stat in ["count", "sum", "mean", "std", "min", "max", "median",
+                     np.std]:
+            stat1v, edges1v, bc1v = binned_statistic_dd(X, v, stat, bins=8)
+            stat1w, edges1w, bc1w = binned_statistic_dd(X, w, stat, bins=8)
+            stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], stat, bins=8)
+            assert_allclose(stat2[0], stat1v)
+            assert_allclose(stat2[1], stat1w)
+            assert_allclose(edges1v, edges2)
+            assert_allclose(edges1w, edges2)
+            assert_allclose(bc1v, bc2)
+
+    def test_dd_binnumbers_unraveled(self):
+        X = self.X
+        v = self.v
+
+        stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15)
+        stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20)
+        stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10)
+
+        stat2, edges2, bc2 = binned_statistic_dd(
+            X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True)
+
+        assert_allclose(bcx, bc2[0])
+        assert_allclose(bcy, bc2[1])
+        assert_allclose(bcz, bc2[2])
+
+    def test_dd_binned_statistic_result(self):
+        # NOTE: tests the reuse of bin_edges from previous call
+        x = np.random.random((10000, 3))
+        v = np.random.random((10000))
+        bins = np.linspace(0, 1, 10)
+        bins = (bins, bins, bins)
+
+        result = binned_statistic_dd(x, v, 'mean', bins=bins)
+        stat = result.statistic
+
+        result = binned_statistic_dd(x, v, 'mean',
+                                     binned_statistic_result=result)
+        stat2 = result.statistic
+
+        assert_allclose(stat, stat2)
+
+    def test_dd_zero_dedges(self):
+        x = np.random.random((10000, 3))
+        v = np.random.random((10000))
+        bins = np.linspace(0, 1, 10)
+        bins = np.append(bins, 1)
+        bins = (bins, bins, bins)
+        with assert_raises(ValueError, match='difference is numerically 0'):
+            binned_statistic_dd(x, v, 'mean', bins=bins)
+
+    def test_dd_range_errors(self):
+        # Test that descriptive exceptions are raised as appropriate for bad
+        # values of the `range` argument. (See gh-12996)
+        with assert_raises(ValueError,
+                           match='In range, start must be <= stop'):
+            binned_statistic_dd([self.y], self.v,
+                                range=[[1, 0]])
+        with assert_raises(
+                ValueError,
+                match='In dimension 1 of range, start must be <= stop'):
+            binned_statistic_dd([self.x, self.y], self.v,
+                                range=[[1, 0], [0, 1]])
+        with assert_raises(
+                ValueError,
+                match='In dimension 2 of range, start must be <= stop'):
+            binned_statistic_dd([self.x, self.y], self.v,
+                                range=[[0, 1], [1, 0]])
+        with assert_raises(
+                ValueError,
+                match='range given for 1 dimensions; 2 required'):
+            binned_statistic_dd([self.x, self.y], self.v,
+                                range=[[0, 1]])
+
+    def test_binned_statistic_float32(self):
+        X = np.array([0, 0.42358226], dtype=np.float32)
+        stat, _, _ = binned_statistic(X, None, 'count', bins=5)
+        assert_allclose(stat, np.array([1, 0, 0, 0, 1], dtype=np.float64))
+
+    def test_gh14332(self):
+        # Test the wrong output when the `sample` is close to bin edge
+        x = []
+        size = 20
+        for i in range(size):
+            x += [1-0.1**i]
+
+        bins = np.linspace(0,1,11)
+        sum1, edges1, bc = binned_statistic_dd(x, np.ones(len(x)),
+                                               bins=[bins], statistic='sum')
+        sum2, edges2 = np.histogram(x, bins=bins)
+
+        assert_allclose(sum1, sum2)
+        assert_allclose(edges1[0], edges2)
+
+    @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
+    @pytest.mark.parametrize("statistic", [np.mean, np.median, np.sum, np.std,
+                                           np.min, np.max, 'count',
+                                           lambda x: (x**2).sum(),
+                                           lambda x: (x**2).sum() * 1j])
+    def test_dd_all(self, dtype, statistic):
+        def ref_statistic(x):
+            return len(x) if statistic == 'count' else statistic(x)
+
+        rng = np.random.default_rng(3704743126639371)
+        n = 10
+        x = rng.random(size=n)
+        i = x >= 0.5
+        v = rng.random(size=n)
+        if dtype is np.complex128:
+            v = v + rng.random(size=n)*1j
+
+        stat, _, _ = binned_statistic_dd(x, v, statistic, bins=2)
+        ref = np.array([ref_statistic(v[~i]), ref_statistic(v[i])])
+        assert_allclose(stat, ref)
+        assert stat.dtype == np.result_type(ref.dtype, np.float64)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_boost_ufuncs.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_boost_ufuncs.py
new file mode 100644
index 00000000..e3a6b212
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_boost_ufuncs.py
@@ -0,0 +1,44 @@
+
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose
+from scipy.stats import _boost
+
+
+type_char_to_type_tol = {'f': (np.float32, 32*np.finfo(np.float32).eps),
+                         'd': (np.float64, 32*np.finfo(np.float64).eps),
+                         'g': (np.longdouble, 32*np.finfo(np.longdouble).eps)}
+
+
+# Each item in this list is
+#   (func, args, expected_value)
+# All the values can be represented exactly, even with np.float32.
+#
+# This is not an exhaustive test data set of all the functions!
+# It is a spot check of several functions, primarily for
+# checking that the different data types are handled correctly.
+test_data = [
+    (_boost._beta_cdf, (0.5, 2, 3), 0.6875),
+    (_boost._beta_ppf, (0.6875, 2, 3), 0.5),
+    (_boost._beta_pdf, (0.5, 2, 3), 1.5),
+    (_boost._beta_sf, (0.5, 2, 1), 0.75),
+    (_boost._beta_isf, (0.75, 2, 1), 0.5),
+    (_boost._binom_cdf, (1, 3, 0.5), 0.5),
+    (_boost._binom_pdf, (1, 4, 0.5), 0.25),
+    (_boost._hypergeom_cdf, (2, 3, 5, 6), 0.5),
+    (_boost._nbinom_cdf, (1, 4, 0.25), 0.015625),
+    (_boost._ncf_mean, (10, 12, 2.5), 1.5),
+]
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+@pytest.mark.parametrize('func, args, expected', test_data)
+def test_stats_boost_ufunc(func, args, expected):
+    type_sigs = func.types
+    type_chars = [sig.split('->')[-1] for sig in type_sigs]
+    for type_char in type_chars:
+        typ, rtol = type_char_to_type_tol[type_char]
+        args = [typ(arg) for arg in args]
+        value = func(*args)
+        assert isinstance(value, typ)
+        assert_allclose(value, expected, rtol=rtol)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_contingency.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_contingency.py
new file mode 100644
index 00000000..ea7fad58
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_contingency.py
@@ -0,0 +1,241 @@
+import numpy as np
+from numpy.testing import (assert_equal, assert_array_equal,
+                           assert_array_almost_equal, assert_approx_equal,
+                           assert_allclose)
+import pytest
+from pytest import raises as assert_raises
+from scipy.special import xlogy
+from scipy.stats.contingency import (margins, expected_freq,
+                                     chi2_contingency, association)
+
+
+def test_margins():
+    a = np.array([1])
+    m = margins(a)
+    assert_equal(len(m), 1)
+    m0 = m[0]
+    assert_array_equal(m0, np.array([1]))
+
+    a = np.array([[1]])
+    m0, m1 = margins(a)
+    expected0 = np.array([[1]])
+    expected1 = np.array([[1]])
+    assert_array_equal(m0, expected0)
+    assert_array_equal(m1, expected1)
+
+    a = np.arange(12).reshape(2, 6)
+    m0, m1 = margins(a)
+    expected0 = np.array([[15], [51]])
+    expected1 = np.array([[6, 8, 10, 12, 14, 16]])
+    assert_array_equal(m0, expected0)
+    assert_array_equal(m1, expected1)
+
+    a = np.arange(24).reshape(2, 3, 4)
+    m0, m1, m2 = margins(a)
+    expected0 = np.array([[[66]], [[210]]])
+    expected1 = np.array([[[60], [92], [124]]])
+    expected2 = np.array([[[60, 66, 72, 78]]])
+    assert_array_equal(m0, expected0)
+    assert_array_equal(m1, expected1)
+    assert_array_equal(m2, expected2)
+
+
+def test_expected_freq():
+    assert_array_equal(expected_freq([1]), np.array([1.0]))
+
+    observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
+    e = expected_freq(observed)
+    assert_array_equal(e, np.ones_like(observed))
+
+    observed = np.array([[10, 10, 20], [20, 20, 20]])
+    e = expected_freq(observed)
+    correct = np.array([[12., 12., 16.], [18., 18., 24.]])
+    assert_array_almost_equal(e, correct)
+
+
+def test_chi2_contingency_trivial():
+    # Some very simple tests for chi2_contingency.
+
+    # A trivial case
+    obs = np.array([[1, 2], [1, 2]])
+    chi2, p, dof, expected = chi2_contingency(obs, correction=False)
+    assert_equal(chi2, 0.0)
+    assert_equal(p, 1.0)
+    assert_equal(dof, 1)
+    assert_array_equal(obs, expected)
+
+    # A *really* trivial case: 1-D data.
+    obs = np.array([1, 2, 3])
+    chi2, p, dof, expected = chi2_contingency(obs, correction=False)
+    assert_equal(chi2, 0.0)
+    assert_equal(p, 1.0)
+    assert_equal(dof, 0)
+    assert_array_equal(obs, expected)
+
+
+def test_chi2_contingency_R():
+    # Some test cases that were computed independently, using R.
+
+    # Rcode = \
+    # """
+    # # Data vector.
+    # data <- c(
+    #   12, 34, 23,     4,  47,  11,
+    #   35, 31, 11,    34,  10,  18,
+    #   12, 32,  9,    18,  13,  19,
+    #   12, 12, 14,     9,  33,  25
+    #   )
+    #
+    # # Create factor tags:r=rows, c=columns, t=tiers
+    # r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
+    # c <- factor(gl(3, 1,   2*3*4, labels=c("c1", "c2", "c3")))
+    # t <- factor(gl(2, 3,   2*3*4, labels=c("t1", "t2")))
+    #
+    # # 3-way Chi squared test of independence
+    # s = summary(xtabs(data~r+c+t))
+    # print(s)
+    # """
+    # Routput = \
+    # """
+    # Call: xtabs(formula = data ~ r + c + t)
+    # Number of cases in table: 478
+    # Number of factors: 3
+    # Test for independence of all factors:
+    #         Chisq = 102.17, df = 17, p-value = 3.514e-14
+    # """
+    obs = np.array(
+        [[[12, 34, 23],
+          [35, 31, 11],
+          [12, 32, 9],
+          [12, 12, 14]],
+         [[4, 47, 11],
+          [34, 10, 18],
+          [18, 13, 19],
+          [9, 33, 25]]])
+    chi2, p, dof, expected = chi2_contingency(obs)
+    assert_approx_equal(chi2, 102.17, significant=5)
+    assert_approx_equal(p, 3.514e-14, significant=4)
+    assert_equal(dof, 17)
+
+    # Rcode = \
+    # """
+    # # Data vector.
+    # data <- c(
+    #     #
+    #     12, 17,
+    #     11, 16,
+    #     #
+    #     11, 12,
+    #     15, 16,
+    #     #
+    #     23, 15,
+    #     30, 22,
+    #     #
+    #     14, 17,
+    #     15, 16
+    #     )
+    #
+    # # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
+    # r <- factor(gl(2, 2,  2*2*2*2, labels=c("r1", "r2")))
+    # c <- factor(gl(2, 1,  2*2*2*2, labels=c("c1", "c2")))
+    # d <- factor(gl(2, 4,  2*2*2*2, labels=c("d1", "d2")))
+    # t <- factor(gl(2, 8,  2*2*2*2, labels=c("t1", "t2")))
+    #
+    # # 4-way Chi squared test of independence
+    # s = summary(xtabs(data~r+c+d+t))
+    # print(s)
+    # """
+    # Routput = \
+    # """
+    # Call: xtabs(formula = data ~ r + c + d + t)
+    # Number of cases in table: 262
+    # Number of factors: 4
+    # Test for independence of all factors:
+    #         Chisq = 8.758, df = 11, p-value = 0.6442
+    # """
+    obs = np.array(
+        [[[[12, 17],
+           [11, 16]],
+          [[11, 12],
+           [15, 16]]],
+         [[[23, 15],
+           [30, 22]],
+          [[14, 17],
+           [15, 16]]]])
+    chi2, p, dof, expected = chi2_contingency(obs)
+    assert_approx_equal(chi2, 8.758, significant=4)
+    assert_approx_equal(p, 0.6442, significant=4)
+    assert_equal(dof, 11)
+
+
+def test_chi2_contingency_g():
+    c = np.array([[15, 60], [15, 90]])
+    g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
+                                    correction=False)
+    assert_allclose(g, 2*xlogy(c, c/e).sum())
+
+    g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
+                                    correction=True)
+    c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
+    assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
+
+    c = np.array([[10, 12, 10], [12, 10, 10]])
+    g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
+    assert_allclose(g, 2*xlogy(c, c/e).sum())
+
+
+def test_chi2_contingency_bad_args():
+    # Test that "bad" inputs raise a ValueError.
+
+    # Negative value in the array of observed frequencies.
+    obs = np.array([[-1, 10], [1, 2]])
+    assert_raises(ValueError, chi2_contingency, obs)
+
+    # The zeros in this will result in zeros in the array
+    # of expected frequencies.
+    obs = np.array([[0, 1], [0, 1]])
+    assert_raises(ValueError, chi2_contingency, obs)
+
+    # A degenerate case: `observed` has size 0.
+    obs = np.empty((0, 8))
+    assert_raises(ValueError, chi2_contingency, obs)
+
+
+def test_chi2_contingency_yates_gh13875():
+    # Magnitude of Yates' continuity correction should not exceed difference
+    # between expected and observed value of the statistic; see gh-13875
+    observed = np.array([[1573, 3], [4, 0]])
+    p = chi2_contingency(observed)[1]
+    assert_allclose(p, 1, rtol=1e-12)
+
+
+@pytest.mark.parametrize("correction", [False, True])
+def test_result(correction):
+    obs = np.array([[1, 2], [1, 2]])
+    res = chi2_contingency(obs, correction=correction)
+    assert_equal((res.statistic, res.pvalue, res.dof, res.expected_freq), res)
+
+
+def test_bad_association_args():
+    # Invalid Test Statistic
+    assert_raises(ValueError, association, [[1, 2], [3, 4]], "X")
+    # Invalid array shape
+    assert_raises(ValueError, association, [[[1, 2]], [[3, 4]]], "cramer")
+    # chi2_contingency exception
+    assert_raises(ValueError, association, [[-1, 10], [1, 2]], 'cramer')
+    # Invalid Array Item Data Type
+    assert_raises(ValueError, association,
+                  np.array([[1, 2], ["dd", 4]], dtype=object), 'cramer')
+
+
+@pytest.mark.parametrize('stat, expected',
+                         [('cramer', 0.09222412010290792),
+                          ('tschuprow', 0.0775509319944633),
+                          ('pearson', 0.12932925727138758)])
+def test_assoc(stat, expected):
+    # 2d Array
+    obs1 = np.array([[12, 13, 14, 15, 16],
+                     [17, 16, 18, 19, 11],
+                     [9, 15, 14, 12, 11]])
+    a = association(observed=obs1, method=stat)
+    assert_allclose(a, expected)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_continuous_basic.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_continuous_basic.py
new file mode 100644
index 00000000..d0873dea
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_continuous_basic.py
@@ -0,0 +1,997 @@
+import numpy as np
+import numpy.testing as npt
+import pytest
+from pytest import raises as assert_raises
+from scipy.integrate import IntegrationWarning
+import itertools
+
+from scipy import stats
+from .common_tests import (check_normalization, check_moment, check_mean_expect,
+                           check_var_expect, check_skew_expect,
+                           check_kurt_expect, check_entropy,
+                           check_private_entropy, check_entropy_vect_scale,
+                           check_edge_support, check_named_args,
+                           check_random_state_property,
+                           check_meth_dtype, check_ppf_dtype, check_cmplx_deriv,
+                           check_pickling, check_rvs_broadcast, check_freezing,
+                           check_deprecation_warning_gh5982_moment,
+                           check_deprecation_warning_gh5982_interval)
+from scipy.stats._distr_params import distcont
+from scipy.stats._distn_infrastructure import rv_continuous_frozen
+
+"""
+Test all continuous distributions.
+
+Parameters were chosen for those distributions that pass the
+Kolmogorov-Smirnov test.  This provides safe parameters for each
+distributions so that we can perform further testing of class methods.
+
+These tests currently check only/mostly for serious errors and exceptions,
+not for numerically exact results.
+"""
+
+# Note that you need to add new distributions you want tested
+# to _distr_params
+
+DECIMAL = 5  # specify the precision of the tests  # increased from 0 to 5
+
+# For skipping test_cont_basic
+distslow = ['recipinvgauss', 'vonmises', 'kappa4', 'vonmises_line',
+            'gausshyper', 'norminvgauss', 'geninvgauss', 'genhyperbolic',
+            'truncnorm', 'truncweibull_min']
+
+# distxslow are sorted by speed (very slow to slow)
+distxslow = ['studentized_range', 'kstwo', 'ksone', 'wrapcauchy', 'genexpon']
+
+# For skipping test_moments, which is already marked slow
+distxslow_test_moments = ['studentized_range', 'vonmises', 'vonmises_line',
+                          'ksone', 'kstwo', 'recipinvgauss', 'genexpon']
+
+# skip check_fit_args (test is slow)
+skip_fit_test_mle = ['exponpow', 'exponweib', 'gausshyper', 'genexpon',
+                     'halfgennorm', 'gompertz', 'johnsonsb', 'johnsonsu',
+                     'kappa4', 'ksone', 'kstwo', 'kstwobign', 'mielke', 'ncf',
+                     'nct', 'powerlognorm', 'powernorm', 'recipinvgauss',
+                     'trapezoid', 'vonmises', 'vonmises_line', 'levy_stable',
+                     'rv_histogram_instance', 'studentized_range']
+
+# these were really slow in `test_fit`.py.
+# note that this list is used to skip both fit_test and fit_fix tests
+slow_fit_test_mm = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
+                    'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
+                    'kappa4', 'kstwobign', 'recipinvgauss',
+                    'trapezoid', 'truncexpon', 'vonmises', 'vonmises_line',
+                    'studentized_range']
+# pearson3 fails due to something weird
+# the first list fails due to non-finite distribution moments encountered
+# most of the rest fail due to integration warnings
+# pearson3 is overriden as not implemented due to gh-11746
+fail_fit_test_mm = (['alpha', 'betaprime', 'bradford', 'burr', 'burr12',
+                     'cauchy', 'crystalball', 'f', 'fisk', 'foldcauchy',
+                     'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
+                     'kappa3', 'levy', 'levy_l', 'loglaplace', 'lomax',
+                     'mielke', 'nakagami', 'ncf', 'skewcauchy', 't',
+                     'tukeylambda', 'invweibull']
+                     + ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
+                        'nct', 'pareto', 'powernorm', 'powerlognorm']
+                     + ['pearson3'])
+skip_fit_test = {"MLE": skip_fit_test_mle,
+                 "MM": slow_fit_test_mm + fail_fit_test_mm}
+
+# skip check_fit_args_fix (test is slow)
+skip_fit_fix_test_mle = ['burr', 'exponpow', 'exponweib', 'gausshyper',
+                         'genexpon', 'halfgennorm', 'gompertz', 'johnsonsb',
+                         'johnsonsu', 'kappa4', 'ksone', 'kstwo', 'kstwobign',
+                         'levy_stable', 'mielke', 'ncf', 'ncx2',
+                         'powerlognorm', 'powernorm', 'rdist', 'recipinvgauss',
+                         'trapezoid', 'vonmises', 'vonmises_line',
+                         'studentized_range']
+# the first list fails due to non-finite distribution moments encountered
+# most of the rest fail due to integration warnings
+# pearson3 is overriden as not implemented due to gh-11746
+fail_fit_fix_test_mm = (['alpha', 'betaprime', 'burr', 'burr12', 'cauchy',
+                         'crystalball', 'f', 'fisk', 'foldcauchy',
+                         'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
+                         'kappa3', 'levy', 'levy_l', 'loglaplace', 'lomax',
+                         'mielke', 'nakagami', 'ncf', 'nct', 'skewcauchy', 't',
+                         'truncpareto', 'invweibull']
+                         + ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
+                            'pareto', 'powernorm', 'powerlognorm']
+                         + ['pearson3'])
+skip_fit_fix_test = {"MLE": skip_fit_fix_test_mle,
+                     "MM": slow_fit_test_mm + fail_fit_fix_test_mm}
+
+# These distributions fail the complex derivative test below.
+# Here 'fail' mean produce wrong results and/or raise exceptions, depending
+# on the implementation details of corresponding special functions.
+# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
+fails_cmplx = set(['argus', 'beta', 'betaprime', 'chi', 'chi2', 'cosine',
+                   'dgamma', 'dweibull', 'erlang', 'f', 'gamma',
+                   'gausshyper', 'gengamma', 'genhyperbolic',
+                   'geninvgauss', 'gennorm', 'genpareto',
+                   'halfgennorm', 'invgamma',
+                   'ksone', 'kstwo', 'kstwobign', 'levy_l', 'loggamma',
+                   'logistic', 'loguniform', 'maxwell', 'nakagami',
+                   'ncf', 'nct', 'ncx2', 'norminvgauss', 'pearson3', 'rdist',
+                   'reciprocal', 'rice', 'skewnorm', 't', 'truncweibull_min',
+                   'tukeylambda', 'vonmises', 'vonmises_line',
+                   'rv_histogram_instance', 'truncnorm', 'studentized_range'])
+
+# rv_histogram instances, with uniform and non-uniform bins;
+# stored as (dist, arg) tuples for cases_test_cont_basic
+# and cases_test_moments.
+histogram_test_instances = []
+case1 = {'a': [1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6,
+               6, 6, 6, 7, 7, 7, 8, 8, 9], 'bins': 8}  # equal width bins
+case2 = {'a': [1, 1], 'bins': [0, 1, 10]}  # unequal width bins
+for case, density in itertools.product([case1, case2], [True, False]):
+    _hist = np.histogram(**case, density=density)
+    _rv_hist = stats.rv_histogram(_hist, density=density)
+    histogram_test_instances.append((_rv_hist, tuple()))
+
+
+def cases_test_cont_basic():
+    for distname, arg in distcont[:] + histogram_test_instances:
+        if distname == 'levy_stable':
+            continue
+        elif distname in distslow:
+            yield pytest.param(distname, arg, marks=pytest.mark.slow)
+        elif distname in distxslow:
+            yield pytest.param(distname, arg, marks=pytest.mark.xslow)
+        else:
+            yield distname, arg
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
+@pytest.mark.parametrize('sn, n_fit_samples', [(500, 200)])
+def test_cont_basic(distname, arg, sn, n_fit_samples):
+    # this test skips slow distributions
+
+    try:
+        distfn = getattr(stats, distname)
+    except TypeError:
+        distfn = distname
+        distname = 'rv_histogram_instance'
+
+    rng = np.random.RandomState(765456)
+    rvs = distfn.rvs(size=sn, *arg, random_state=rng)
+    m, v = distfn.stats(*arg)
+
+    if distname not in {'laplace_asymmetric'}:
+        check_sample_meanvar_(m, v, rvs)
+    check_cdf_ppf(distfn, arg, distname)
+    check_sf_isf(distfn, arg, distname)
+    check_pdf(distfn, arg, distname)
+    check_pdf_logpdf(distfn, arg, distname)
+    check_pdf_logpdf_at_endpoints(distfn, arg, distname)
+    check_cdf_logcdf(distfn, arg, distname)
+    check_sf_logsf(distfn, arg, distname)
+    check_ppf_broadcast(distfn, arg, distname)
+    check_deprecation_warning_gh5982_moment(distfn, arg, distname)
+    check_deprecation_warning_gh5982_interval(distfn, arg, distname)
+
+    alpha = 0.01
+    if distname == 'rv_histogram_instance':
+        check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
+    elif distname != 'geninvgauss':
+        # skip kstest for geninvgauss since cdf is too slow; see test for
+        # rv generation in TestGenInvGauss in test_distributions.py
+        check_distribution_rvs(distname, arg, alpha, rvs)
+
+    locscale_defaults = (0, 1)
+    meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
+             distfn.logsf]
+    # make sure arguments are within support
+    spec_x = {'weibull_max': -0.5, 'levy_l': -0.5,
+              'pareto': 1.5, 'truncpareto': 3.2, 'tukeylambda': 0.3,
+              'rv_histogram_instance': 5.0}
+    x = spec_x.get(distname, 0.5)
+    if distname == 'invweibull':
+        arg = (1,)
+    elif distname == 'ksone':
+        arg = (3,)
+
+    check_named_args(distfn, x, arg, locscale_defaults, meths)
+    check_random_state_property(distfn, arg)
+    check_pickling(distfn, arg)
+    check_freezing(distfn, arg)
+
+    # Entropy
+    if distname not in ['kstwobign', 'kstwo', 'ncf']:
+        check_entropy(distfn, arg, distname)
+
+    if distfn.numargs == 0:
+        check_vecentropy(distfn, arg)
+
+    if (distfn.__class__._entropy != stats.rv_continuous._entropy
+            and distname != 'vonmises'):
+        check_private_entropy(distfn, arg, stats.rv_continuous)
+
+    with npt.suppress_warnings() as sup:
+        sup.filter(IntegrationWarning, "The occurrence of roundoff error")
+        sup.filter(IntegrationWarning, "Extremely bad integrand")
+        sup.filter(RuntimeWarning, "invalid value")
+        check_entropy_vect_scale(distfn, arg)
+
+    check_retrieving_support(distfn, arg)
+    check_edge_support(distfn, arg)
+
+    check_meth_dtype(distfn, arg, meths)
+    check_ppf_dtype(distfn, arg)
+
+    if distname not in fails_cmplx:
+        check_cmplx_deriv(distfn, arg)
+
+    if distname != 'truncnorm':
+        check_ppf_private(distfn, arg, distname)
+
+    for method in ["MLE", "MM"]:
+        if distname not in skip_fit_test[method]:
+            check_fit_args(distfn, arg, rvs[:n_fit_samples], method)
+
+        if distname not in skip_fit_fix_test[method]:
+            check_fit_args_fix(distfn, arg, rvs[:n_fit_samples], method)
+
+
+@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
+def test_rvs_scalar(distname, arg):
+    # rvs should return a scalar when given scalar arguments (gh-12428)
+    try:
+        distfn = getattr(stats, distname)
+    except TypeError:
+        distfn = distname
+        distname = 'rv_histogram_instance'
+
+    assert np.isscalar(distfn.rvs(*arg))
+    assert np.isscalar(distfn.rvs(*arg, size=()))
+    assert np.isscalar(distfn.rvs(*arg, size=None))
+
+
+def test_levy_stable_random_state_property():
+    # levy_stable only implements rvs(), so it is skipped in the
+    # main loop in test_cont_basic(). Here we apply just the test
+    # check_random_state_property to levy_stable.
+    check_random_state_property(stats.levy_stable, (0.5, 0.1))
+
+
+def cases_test_moments():
+    fail_normalization = set()
+    fail_higher = set(['ncf'])
+
+    for distname, arg in distcont[:] + histogram_test_instances:
+        if distname == 'levy_stable':
+            continue
+
+        if distname in distxslow_test_moments:
+            yield pytest.param(distname, arg, True, True, True,
+                               marks=pytest.mark.xslow(reason="too slow"))
+            continue
+
+        cond1 = distname not in fail_normalization
+        cond2 = distname not in fail_higher
+
+        marks = list()
+        # Currently unused, `marks` can be used to add a timeout to a test of
+        # a specific distribution.  For example, this shows how a timeout could
+        # be added for the 'skewnorm' distribution:
+        #
+        #     marks = list()
+        #     if distname == 'skewnorm':
+        #         marks.append(pytest.mark.timeout(300))
+
+        yield pytest.param(distname, arg, cond1, cond2, False, marks=marks)
+
+        if not cond1 or not cond2:
+            # Run the distributions that have issues twice, once skipping the
+            # not_ok parts, once with the not_ok parts but marked as knownfail
+            yield pytest.param(distname, arg, True, True, True,
+                               marks=[pytest.mark.xfail] + marks)
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,'
+                         'is_xfailing',
+                         cases_test_moments())
+def test_moments(distname, arg, normalization_ok, higher_ok, is_xfailing):
+    try:
+        distfn = getattr(stats, distname)
+    except TypeError:
+        distfn = distname
+        distname = 'rv_histogram_instance'
+
+    with npt.suppress_warnings() as sup:
+        sup.filter(IntegrationWarning,
+                   "The integral is probably divergent, or slowly convergent.")
+        sup.filter(IntegrationWarning,
+                   "The maximum number of subdivisions.")
+
+        if is_xfailing:
+            sup.filter(IntegrationWarning)
+
+        m, v, s, k = distfn.stats(*arg, moments='mvsk')
+
+        with np.errstate(all="ignore"):
+            if normalization_ok:
+                check_normalization(distfn, arg, distname)
+
+            if higher_ok:
+                check_mean_expect(distfn, arg, m, distname)
+                check_skew_expect(distfn, arg, m, v, s, distname)
+                check_var_expect(distfn, arg, m, v, distname)
+                check_kurt_expect(distfn, arg, m, v, k, distname)
+
+        check_moment(distfn, arg, m, v, distname)
+
+
+@pytest.mark.parametrize('dist,shape_args', distcont)
+def test_rvs_broadcast(dist, shape_args):
+    if dist in ['gausshyper', 'genexpon', 'studentized_range']:
+        pytest.skip("too slow")
+
+    # If shape_only is True, it means the _rvs method of the
+    # distribution uses more than one random number to generate a random
+    # variate.  That means the result of using rvs with broadcasting or
+    # with a nontrivial size will not necessarily be the same as using the
+    # numpy.vectorize'd version of rvs(), so we can only compare the shapes
+    # of the results, not the values.
+    # Whether or not a distribution is in the following list is an
+    # implementation detail of the distribution, not a requirement.  If
+    # the implementation the rvs() method of a distribution changes, this
+    # test might also have to be changed.
+    shape_only = dist in ['argus', 'betaprime', 'dgamma', 'dweibull',
+                          'exponnorm', 'genhyperbolic', 'geninvgauss',
+                          'levy_stable', 'nct', 'norminvgauss', 'rice',
+                          'skewnorm', 'semicircular', 'gennorm', 'loggamma']
+
+    distfunc = getattr(stats, dist)
+    loc = np.zeros(2)
+    scale = np.ones((3, 1))
+    nargs = distfunc.numargs
+    allargs = []
+    bshape = [3, 2]
+    # Generate shape parameter arguments...
+    for k in range(nargs):
+        shp = (k + 4,) + (1,)*(k + 2)
+        allargs.append(shape_args[k]*np.ones(shp))
+        bshape.insert(0, k + 4)
+    allargs.extend([loc, scale])
+    # bshape holds the expected shape when loc, scale, and the shape
+    # parameters are all broadcast together.
+
+    check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd')
+
+
+# Expected values of the SF, CDF, PDF were computed using
+# mpmath with mpmath.mp.dps = 50 and output at 20:
+#
+# def ks(x, n):
+#     x = mpmath.mpf(x)
+#     logp = -mpmath.power(6.0*n*x+1.0, 2)/18.0/n
+#     sf, cdf = mpmath.exp(logp), -mpmath.expm1(logp)
+#     pdf = (6.0*n*x+1.0) * 2 * sf/3
+#     print(mpmath.nstr(sf, 20), mpmath.nstr(cdf, 20), mpmath.nstr(pdf, 20))
+#
+# Tests use 1/n < x < 1-1/n and n > 1e6 to use the asymptotic computation.
+# Larger x has a smaller sf.
+@pytest.mark.parametrize('x,n,sf,cdf,pdf,rtol',
+                         [(2.0e-5, 1000000000,
+                           0.44932297307934442379, 0.55067702692065557621,
+                           35946.137394996276407, 5e-15),
+                          (2.0e-9, 1000000000,
+                           0.99999999061111115519, 9.3888888448132728224e-9,
+                           8.6666665852962971765, 5e-14),
+                          (5.0e-4, 1000000000,
+                           7.1222019433090374624e-218, 1.0,
+                           1.4244408634752704094e-211, 5e-14)])
+def test_gh17775_regression(x, n, sf, cdf, pdf, rtol):
+    # Regression test for gh-17775. In scipy 1.9.3 and earlier,
+    # these test would fail.
+    #
+    # KS one asymptotic sf ~ e^(-(6nx+1)^2 / 18n)
+    # Given a large 32-bit integer n, 6n will overflow in the c implementation.
+    # Example of broken behaviour:
+    # ksone.sf(2.0e-5, 1000000000) == 0.9374359693473666
+    ks = stats.ksone
+    vals = np.array([ks.sf(x, n), ks.cdf(x, n), ks.pdf(x, n)])
+    expected = np.array([sf, cdf, pdf])
+    npt.assert_allclose(vals, expected, rtol=rtol)
+    # The sf+cdf must sum to 1.0.
+    npt.assert_equal(vals[0] + vals[1], 1.0)
+    # Check inverting the (potentially very small) sf (uses a lower tolerance)
+    npt.assert_allclose([ks.isf(sf, n)], [x], rtol=1e-8)
+
+
+def test_rvs_gh2069_regression():
+    # Regression tests for gh-2069.  In scipy 0.17 and earlier,
+    # these tests would fail.
+    #
+    # A typical example of the broken behavior:
+    # >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
+    # array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
+    rng = np.random.RandomState(123)
+    vals = stats.norm.rvs(loc=np.zeros(5), scale=1, random_state=rng)
+    d = np.diff(vals)
+    npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
+    vals = stats.norm.rvs(loc=0, scale=np.ones(5), random_state=rng)
+    d = np.diff(vals)
+    npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
+    vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5), random_state=rng)
+    d = np.diff(vals)
+    npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
+    vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5),
+                          random_state=rng)
+    d = np.diff(vals.ravel())
+    npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
+
+    assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
+                  [[1, 1], [1, 1]], 1)
+    assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
+    assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
+                     [[1], [2]], (4,))
+
+
+def test_nomodify_gh9900_regression():
+    # Regression test for gh-9990
+    # Prior to gh-9990, calls to stats.truncnorm._cdf() use what ever was
+    # set inside the stats.truncnorm instance during stats.truncnorm.cdf().
+    # This could cause issues wth multi-threaded code.
+    # Since then, the calls to cdf() are not permitted to modify the global
+    # stats.truncnorm instance.
+    tn = stats.truncnorm
+    # Use the right-half truncated normal
+    # Check that the cdf and _cdf return the same result.
+    npt.assert_almost_equal(tn.cdf(1, 0, np.inf), 0.6826894921370859)
+    npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]), 0.6826894921370859)
+
+    # Now use the left-half truncated normal
+    npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), 0.31731050786291415)
+    npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]), 0.31731050786291415)
+
+    # Check that the right-half truncated normal _cdf hasn't changed
+    npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]), 0.6826894921370859)  # noqa, NOT 1.6826894921370859
+    npt.assert_almost_equal(tn.cdf(1, 0, np.inf), 0.6826894921370859)
+
+    # Check that the left-half truncated normal _cdf hasn't changed
+    npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]), 0.31731050786291415)  # noqa, Not -0.6826894921370859
+    npt.assert_almost_equal(tn.cdf(1, -np.inf, 0), 1)                     # Not 1.6826894921370859
+    npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0), 0.31731050786291415)  # Not -0.6826894921370859
+
+
+def test_broadcast_gh9990_regression():
+    # Regression test for gh-9990
+    # The x-value 7 only lies within the support of 4 of the supplied
+    # distributions.  Prior to 9990, one array passed to
+    # stats.reciprocal._cdf would have 4 elements, but an array
+    # previously stored by stats.reciprocal_argcheck() would have 6, leading
+    # to a broadcast error.
+    a = np.array([1, 2, 3, 4, 5, 6])
+    b = np.array([8, 16, 1, 32, 1, 48])
+    ans = [stats.reciprocal.cdf(7, _a, _b) for _a, _b in zip(a,b)]
+    npt.assert_array_almost_equal(stats.reciprocal.cdf(7, a, b), ans)
+
+    ans = [stats.reciprocal.cdf(1, _a, _b) for _a, _b in zip(a,b)]
+    npt.assert_array_almost_equal(stats.reciprocal.cdf(1, a, b), ans)
+
+    ans = [stats.reciprocal.cdf(_a, _a, _b) for _a, _b in zip(a,b)]
+    npt.assert_array_almost_equal(stats.reciprocal.cdf(a, a, b), ans)
+
+    ans = [stats.reciprocal.cdf(_b, _a, _b) for _a, _b in zip(a,b)]
+    npt.assert_array_almost_equal(stats.reciprocal.cdf(b, a, b), ans)
+
+
+def test_broadcast_gh7933_regression():
+    # Check broadcast works
+    stats.truncnorm.logpdf(
+        np.array([3.0, 2.0, 1.0]),
+        a=(1.5 - np.array([6.0, 5.0, 4.0])) / 3.0,
+        b=np.inf,
+        loc=np.array([6.0, 5.0, 4.0]),
+        scale=3.0
+    )
+
+
+def test_gh2002_regression():
+    # Add a check that broadcast works in situations where only some
+    # x-values are compatible with some of the shape arguments.
+    x = np.r_[-2:2:101j]
+    a = np.r_[-np.ones(50), np.ones(51)]
+    expected = [stats.truncnorm.pdf(_x, _a, np.inf) for _x, _a in zip(x, a)]
+    ans = stats.truncnorm.pdf(x, a, np.inf)
+    npt.assert_array_almost_equal(ans, expected)
+
+
+def test_gh1320_regression():
+    # Check that the first example from gh-1320 now works.
+    c = 2.62
+    stats.genextreme.ppf(0.5, np.array([[c], [c + 0.5]]))
+    # The other examples in gh-1320 appear to have stopped working
+    # some time ago.
+    # ans = stats.genextreme.moment(2, np.array([c, c + 0.5]))
+    # expected = np.array([25.50105963, 115.11191437])
+    # stats.genextreme.moment(5, np.array([[c], [c + 0.5]]))
+    # stats.genextreme.moment(5, np.array([c, c + 0.5]))
+
+
+def test_method_of_moments():
+    # example from https://en.wikipedia.org/wiki/Method_of_moments_(statistics)
+    np.random.seed(1234)
+    x = [0, 0, 0, 0, 1]
+    a = 1/5 - 2*np.sqrt(3)/5
+    b = 1/5 + 2*np.sqrt(3)/5
+    # force use of method of moments (uniform.fit is overriden)
+    loc, scale = super(type(stats.uniform), stats.uniform).fit(x, method="MM")
+    npt.assert_almost_equal(loc, a, decimal=4)
+    npt.assert_almost_equal(loc+scale, b, decimal=4)
+
+
+def check_sample_meanvar_(popmean, popvar, sample):
+    if np.isfinite(popmean):
+        check_sample_mean(sample, popmean)
+    if np.isfinite(popvar):
+        check_sample_var(sample, popvar)
+
+
+def check_sample_mean(sample, popmean):
+    # Checks for unlikely difference between sample mean and population mean
+    prob = stats.ttest_1samp(sample, popmean).pvalue
+    assert prob > 0.01
+
+
+def check_sample_var(sample, popvar):
+    # check that population mean lies within the CI bootstrapped from the
+    # sample. This used to be a chi-squared test for variance, but there were
+    # too many false positives
+    res = stats.bootstrap(
+        (sample,),
+        lambda x, axis: x.var(ddof=1, axis=axis),
+        confidence_level=0.995,
+    )
+    conf = res.confidence_interval
+    low, high = conf.low, conf.high
+    assert low <= popvar <= high
+
+
+def check_cdf_ppf(distfn, arg, msg):
+    values = [0.001, 0.5, 0.999]
+    npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
+                            values, decimal=DECIMAL, err_msg=msg +
+                            ' - cdf-ppf roundtrip')
+
+
+def check_sf_isf(distfn, arg, msg):
+    npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
+                            [0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
+                            ' - sf-isf roundtrip')
+    npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
+                            1.0 - distfn.sf([0.1, 0.9], *arg),
+                            decimal=DECIMAL, err_msg=msg +
+                            ' - cdf-sf relationship')
+
+
+def check_pdf(distfn, arg, msg):
+    # compares pdf at median with numerical derivative of cdf
+    median = distfn.ppf(0.5, *arg)
+    eps = 1e-6
+    pdfv = distfn.pdf(median, *arg)
+    if (pdfv < 1e-4) or (pdfv > 1e4):
+        # avoid checking a case where pdf is close to zero or
+        # huge (singularity)
+        median = median + 0.1
+        pdfv = distfn.pdf(median, *arg)
+    cdfdiff = (distfn.cdf(median + eps, *arg) -
+               distfn.cdf(median - eps, *arg))/eps/2.0
+    # replace with better diff and better test (more points),
+    # actually, this works pretty well
+    msg += ' - cdf-pdf relationship'
+    npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
+
+
+def check_pdf_logpdf(distfn, args, msg):
+    # compares pdf at several points with the log of the pdf
+    points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
+    vals = distfn.ppf(points, *args)
+    vals = vals[np.isfinite(vals)]
+    pdf = distfn.pdf(vals, *args)
+    logpdf = distfn.logpdf(vals, *args)
+    pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
+    logpdf = logpdf[np.isfinite(logpdf)]
+    msg += " - logpdf-log(pdf) relationship"
+    npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
+
+
+def check_pdf_logpdf_at_endpoints(distfn, args, msg):
+    # compares pdf with the log of the pdf at the (finite) end points
+    points = np.array([0, 1])
+    vals = distfn.ppf(points, *args)
+    vals = vals[np.isfinite(vals)]
+    pdf = distfn.pdf(vals, *args)
+    logpdf = distfn.logpdf(vals, *args)
+    pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
+    logpdf = logpdf[np.isfinite(logpdf)]
+    msg += " - logpdf-log(pdf) relationship"
+    npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
+
+
+def check_sf_logsf(distfn, args, msg):
+    # compares sf at several points with the log of the sf
+    points = np.array([0.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
+    vals = distfn.ppf(points, *args)
+    vals = vals[np.isfinite(vals)]
+    sf = distfn.sf(vals, *args)
+    logsf = distfn.logsf(vals, *args)
+    sf = sf[sf != 0]
+    logsf = logsf[np.isfinite(logsf)]
+    msg += " - logsf-log(sf) relationship"
+    npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
+
+
+def check_cdf_logcdf(distfn, args, msg):
+    # compares cdf at several points with the log of the cdf
+    points = np.array([0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
+    vals = distfn.ppf(points, *args)
+    vals = vals[np.isfinite(vals)]
+    cdf = distfn.cdf(vals, *args)
+    logcdf = distfn.logcdf(vals, *args)
+    cdf = cdf[cdf != 0]
+    logcdf = logcdf[np.isfinite(logcdf)]
+    msg += " - logcdf-log(cdf) relationship"
+    npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
+
+
+def check_ppf_broadcast(distfn, arg, msg):
+    # compares ppf for multiple argsets.
+    num_repeats = 5
+    args = [] * num_repeats
+    if arg:
+        args = [np.array([_] * num_repeats) for _ in arg]
+
+    median = distfn.ppf(0.5, *arg)
+    medians = distfn.ppf(0.5, *args)
+    msg += " - ppf multiple"
+    npt.assert_almost_equal(medians, [median] * num_repeats, decimal=7, err_msg=msg)
+
+
+def check_distribution_rvs(dist, args, alpha, rvs):
+    # dist is either a cdf function or name of a distribution in scipy.stats.
+    # args are the args for scipy.stats.dist(*args)
+    # alpha is a significance level, ~0.01
+    # rvs is array_like of random variables
+    # test from scipy.stats.tests
+    # this version reuses existing random variables
+    D, pval = stats.kstest(rvs, dist, args=args, N=1000)
+    if (pval < alpha):
+        # The rvs passed in failed the K-S test, which _could_ happen
+        # but is unlikely if alpha is small enough.
+        # Repeat the test with a new sample of rvs.
+        # Generate 1000 rvs, perform a K-S test that the new sample of rvs
+        # are distributed according to the distribution.
+        D, pval = stats.kstest(dist, dist, args=args, N=1000)
+        npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
+                    "; alpha = " + str(alpha) + "\nargs = " + str(args))
+
+
+def check_vecentropy(distfn, args):
+    npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
+
+
+def check_loc_scale(distfn, arg, m, v, msg):
+    # Make `loc` and `scale` arrays to catch bugs like gh-13580 where
+    # `loc` and `scale` arrays improperly broadcast with shapes.
+    loc, scale = np.array([10.0, 20.0]), np.array([10.0, 20.0])
+    mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
+    npt.assert_allclose(m*scale + loc, mt)
+    npt.assert_allclose(v*scale*scale, vt)
+
+
+def check_ppf_private(distfn, arg, msg):
+    # fails by design for truncnorm self.nb not defined
+    ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
+    npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
+
+
+def check_retrieving_support(distfn, args):
+    loc, scale = 1, 2
+    supp = distfn.support(*args)
+    supp_loc_scale = distfn.support(*args, loc=loc, scale=scale)
+    npt.assert_almost_equal(np.array(supp)*scale + loc,
+                            np.array(supp_loc_scale))
+
+
+def check_fit_args(distfn, arg, rvs, method):
+    with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
+        sup.filter(category=RuntimeWarning,
+                   message="The shape parameter of the erlang")
+        sup.filter(category=RuntimeWarning,
+                   message="floating point number truncated")
+        vals = distfn.fit(rvs, method=method)
+        vals2 = distfn.fit(rvs, optimizer='powell', method=method)
+    # Only check the length of the return; accuracy tested in test_fit.py
+    npt.assert_(len(vals) == 2+len(arg))
+    npt.assert_(len(vals2) == 2+len(arg))
+
+
+def check_fit_args_fix(distfn, arg, rvs, method):
+    with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
+        sup.filter(category=RuntimeWarning,
+                   message="The shape parameter of the erlang")
+
+        vals = distfn.fit(rvs, floc=0, method=method)
+        vals2 = distfn.fit(rvs, fscale=1, method=method)
+        npt.assert_(len(vals) == 2+len(arg))
+        npt.assert_(vals[-2] == 0)
+        npt.assert_(vals2[-1] == 1)
+        npt.assert_(len(vals2) == 2+len(arg))
+        if len(arg) > 0:
+            vals3 = distfn.fit(rvs, f0=arg[0], method=method)
+            npt.assert_(len(vals3) == 2+len(arg))
+            npt.assert_(vals3[0] == arg[0])
+        if len(arg) > 1:
+            vals4 = distfn.fit(rvs, f1=arg[1], method=method)
+            npt.assert_(len(vals4) == 2+len(arg))
+            npt.assert_(vals4[1] == arg[1])
+        if len(arg) > 2:
+            vals5 = distfn.fit(rvs, f2=arg[2], method=method)
+            npt.assert_(len(vals5) == 2+len(arg))
+            npt.assert_(vals5[2] == arg[2])
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+@pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf',
+                                    'sf', 'logsf', 'ppf', 'isf'])
+@pytest.mark.parametrize('distname, args', distcont)
+def test_methods_with_lists(method, distname, args):
+    # Test that the continuous distributions can accept Python lists
+    # as arguments.
+    dist = getattr(stats, distname)
+    f = getattr(dist, method)
+    if distname == 'invweibull' and method.startswith('log'):
+        x = [1.5, 2]
+    else:
+        x = [0.1, 0.2]
+
+    shape2 = [[a]*2 for a in args]
+    loc = [0, 0.1]
+    scale = [1, 1.01]
+    result = f(x, *shape2, loc=loc, scale=scale)
+    npt.assert_allclose(result,
+                        [f(*v) for v in zip(x, *shape2, loc, scale)],
+                        rtol=1e-14, atol=5e-14)
+
+
+@pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf',
+                                    'sf', 'logsf', 'ppf', 'isf'])
+def test_gilbrat_deprecation(method):
+    expected = getattr(stats.gibrat, method)(1)
+    with pytest.warns(
+        DeprecationWarning,
+        match=rf"\s*`gilbrat\.{method}` is deprecated,.*",
+    ):
+        result = getattr(stats.gilbrat, method)(1)
+    assert result == expected
+
+
+@pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf',
+                                    'sf', 'logsf', 'ppf', 'isf'])
+def test_gilbrat_deprecation_frozen(method):
+    expected = getattr(stats.gibrat, method)(1)
+    with pytest.warns(DeprecationWarning, match=r"\s*`gilbrat` is deprecated"):
+        # warn on instantiation of frozen distribution...
+        g = stats.gilbrat()
+    # ... not on its methods
+    result = getattr(g, method)(1)
+    assert result == expected
+
+
+def test_burr_fisk_moment_gh13234_regression():
+    vals0 = stats.burr.moment(1, 5, 4)
+    assert isinstance(vals0, float)
+
+    vals1 = stats.fisk.moment(1, 8)
+    assert isinstance(vals1, float)
+
+
+def test_moments_with_array_gh12192_regression():
+    # array loc and scalar scale
+    vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=1)
+    expected0 = np.array([1., 2., 3.])
+    npt.assert_equal(vals0, expected0)
+
+    # array loc and invalid scalar scale
+    vals1 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=-1)
+    expected1 = np.array([np.nan, np.nan, np.nan])
+    npt.assert_equal(vals1, expected1)
+
+    # array loc and array scale with invalid entries
+    vals2 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]),
+                              scale=[-3, 1, 0])
+    expected2 = np.array([np.nan, 2., np.nan])
+    npt.assert_equal(vals2, expected2)
+
+    # (loc == 0) & (scale < 0)
+    vals3 = stats.norm.moment(order=2, loc=0, scale=-4)
+    expected3 = np.nan
+    npt.assert_equal(vals3, expected3)
+    assert isinstance(vals3, expected3.__class__)
+
+    # array loc with 0 entries and scale with invalid entries
+    vals4 = stats.norm.moment(order=2, loc=[1, 0, 2], scale=[3, -4, -5])
+    expected4 = np.array([10., np.nan, np.nan])
+    npt.assert_equal(vals4, expected4)
+
+    # all(loc == 0) & (array scale with invalid entries)
+    vals5 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[5., -2, 100.])
+    expected5 = np.array([25., np.nan, 10000.])
+    npt.assert_equal(vals5, expected5)
+
+    # all( (loc == 0) & (scale < 0) )
+    vals6 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[-5., -2, -100.])
+    expected6 = np.array([np.nan, np.nan, np.nan])
+    npt.assert_equal(vals6, expected6)
+
+    # scalar args, loc, and scale
+    vals7 = stats.chi.moment(order=2, df=1, loc=0, scale=0)
+    expected7 = np.nan
+    npt.assert_equal(vals7, expected7)
+    assert isinstance(vals7, expected7.__class__)
+
+    # array args, scalar loc, and scalar scale
+    vals8 = stats.chi.moment(order=2, df=[1, 2, 3], loc=0, scale=0)
+    expected8 = np.array([np.nan, np.nan, np.nan])
+    npt.assert_equal(vals8, expected8)
+
+    # array args, array loc, and array scale
+    vals9 = stats.chi.moment(order=2, df=[1, 2, 3], loc=[1., 0., 2.],
+                             scale=[1., -3., 0.])
+    expected9 = np.array([3.59576912, np.nan, np.nan])
+    npt.assert_allclose(vals9, expected9, rtol=1e-8)
+
+    # (n > 4), all(loc != 0), and all(scale != 0)
+    vals10 = stats.norm.moment(5, [1., 2.], [1., 2.])
+    expected10 = np.array([26., 832.])
+    npt.assert_allclose(vals10, expected10, rtol=1e-13)
+
+    # test broadcasting and more
+    a = [-1.1, 0, 1, 2.2, np.pi]
+    b = [-1.1, 0, 1, 2.2, np.pi]
+    loc = [-1.1, 0, np.sqrt(2)]
+    scale = [-2.1, 0, 1, 2.2, np.pi]
+
+    a = np.array(a).reshape((-1, 1, 1, 1))
+    b = np.array(b).reshape((-1, 1, 1))
+    loc = np.array(loc).reshape((-1, 1))
+    scale = np.array(scale)
+
+    vals11 = stats.beta.moment(order=2, a=a, b=b, loc=loc, scale=scale)
+
+    a, b, loc, scale = np.broadcast_arrays(a, b, loc, scale)
+
+    for i in np.ndenumerate(a):
+        with np.errstate(invalid='ignore', divide='ignore'):
+            i = i[0]  # just get the index
+            # check against same function with scalar input
+            expected = stats.beta.moment(order=2, a=a[i], b=b[i],
+                                         loc=loc[i], scale=scale[i])
+            np.testing.assert_equal(vals11[i], expected)
+
+
+def test_broadcasting_in_moments_gh12192_regression():
+    vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=[[1]])
+    expected0 = np.array([[1., 2., 3.]])
+    npt.assert_equal(vals0, expected0)
+    assert vals0.shape == expected0.shape
+
+    vals1 = stats.norm.moment(order=1, loc=np.array([[1], [2], [3]]),
+                              scale=[1, 2, 3])
+    expected1 = np.array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]])
+    npt.assert_equal(vals1, expected1)
+    assert vals1.shape == expected1.shape
+
+    vals2 = stats.chi.moment(order=1, df=[1., 2., 3.], loc=0., scale=1.)
+    expected2 = np.array([0.79788456, 1.25331414, 1.59576912])
+    npt.assert_allclose(vals2, expected2, rtol=1e-8)
+    assert vals2.shape == expected2.shape
+
+    vals3 = stats.chi.moment(order=1, df=[[1.], [2.], [3.]], loc=[0., 1., 2.],
+                             scale=[-1., 0., 3.])
+    expected3 = np.array([[np.nan, np.nan, 4.39365368],
+                          [np.nan, np.nan, 5.75994241],
+                          [np.nan, np.nan, 6.78730736]])
+    npt.assert_allclose(vals3, expected3, rtol=1e-8)
+    assert vals3.shape == expected3.shape
+
+
+def test_kappa3_array_gh13582():
+    # https://github.com/scipy/scipy/pull/15140#issuecomment-994958241
+    shapes = [0.5, 1.5, 2.5, 3.5, 4.5]
+    moments = 'mvsk'
+    res = np.array([[stats.kappa3.stats(shape, moments=moment)
+                   for shape in shapes] for moment in moments])
+    res2 = np.array(stats.kappa3.stats(shapes, moments=moments))
+    npt.assert_allclose(res, res2)
+
+
+@pytest.mark.xslow
+def test_kappa4_array_gh13582():
+    h = np.array([-0.5, 2.5, 3.5, 4.5, -3])
+    k = np.array([-0.5, 1, -1.5, 0, 3.5])
+    moments = 'mvsk'
+    res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
+                   for i in range(5)] for moment in moments])
+    res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
+    npt.assert_allclose(res, res2)
+
+    # https://github.com/scipy/scipy/pull/15250#discussion_r775112913
+    h = np.array([-1, -1/4, -1/4, 1, -1, 0])
+    k = np.array([1, 1, 1/2, -1/3, -1, 0])
+    res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
+                   for i in range(6)] for moment in moments])
+    res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
+    npt.assert_allclose(res, res2)
+
+    # https://github.com/scipy/scipy/pull/15250#discussion_r775115021
+    h = np.array([-1, -0.5, 1])
+    k = np.array([-1, -0.5, 0, 1])[:, None]
+    res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
+    assert res2.shape == (4, 4, 3)
+
+
+def test_frozen_attributes():
+    # gh-14827 reported that all frozen distributions had both pmf and pdf
+    # attributes; continuous should have pdf and discrete should have pmf.
+    message = "'rv_continuous_frozen' object has no attribute"
+    with pytest.raises(AttributeError, match=message):
+        stats.norm().pmf
+    with pytest.raises(AttributeError, match=message):
+        stats.norm().logpmf
+    stats.norm.pmf = "herring"
+    frozen_norm = stats.norm()
+    assert isinstance(frozen_norm, rv_continuous_frozen)
+    delattr(stats.norm, 'pmf')
+
+
+def test_skewnorm_pdf_gh16038():
+    rng = np.random.default_rng(0)
+    x, a = -np.inf, 0
+    npt.assert_equal(stats.skewnorm.pdf(x, a), stats.norm.pdf(x))
+    x, a = rng.random(size=(3, 3)), rng.random(size=(3, 3))
+    mask = rng.random(size=(3, 3)) < 0.5
+    a[mask] = 0
+    x_norm = x[mask]
+    res = stats.skewnorm.pdf(x, a)
+    npt.assert_equal(res[mask], stats.norm.pdf(x_norm))
+    npt.assert_equal(res[~mask], stats.skewnorm.pdf(x[~mask], a[~mask]))
+
+
+# for scalar input, these functions should return scalar output
+scalar_out = [['rvs', []], ['pdf', [0]], ['logpdf', [0]], ['cdf', [0]],
+              ['logcdf', [0]], ['sf', [0]], ['logsf', [0]], ['ppf', [0]],
+              ['isf', [0]], ['moment', [1]], ['entropy', []], ['expect', []],
+              ['median', []], ['mean', []], ['std', []], ['var', []]]
+scalars_out = [['interval', [0.95]], ['support', []], ['stats', ['mv']]]
+
+
+@pytest.mark.parametrize('case', scalar_out + scalars_out)
+def test_scalar_for_scalar(case):
+    # Some rv_continuous functions returned 0d array instead of NumPy scalar
+    # Guard against regression
+    method_name, args = case
+    method = getattr(stats.norm(), method_name)
+    res = method(*args)
+    if case in scalar_out:
+        assert isinstance(res, np.number)
+    else:
+        assert isinstance(res[0], np.number)
+        assert isinstance(res[1], np.number)
+
+
+def test_scalar_for_scalar2():
+    # test methods that are not attributes of frozen distributions
+    res = stats.norm.fit([1, 2, 3])
+    assert isinstance(res[0], np.number)
+    assert isinstance(res[1], np.number)
+    res = stats.norm.fit_loc_scale([1, 2, 3])
+    assert isinstance(res[0], np.number)
+    assert isinstance(res[1], np.number)
+    res = stats.norm.nnlf((0, 1), [1, 2, 3])
+    assert isinstance(res, np.number)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_crosstab.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_crosstab.py
new file mode 100644
index 00000000..35eda2de
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_crosstab.py
@@ -0,0 +1,115 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_array_equal, assert_equal
+from scipy.stats.contingency import crosstab
+
+
+@pytest.mark.parametrize('sparse', [False, True])
+def test_crosstab_basic(sparse):
+    a = [0, 0, 9, 9, 0, 0, 9]
+    b = [2, 1, 3, 1, 2, 3, 3]
+    expected_avals = [0, 9]
+    expected_bvals = [1, 2, 3]
+    expected_count = np.array([[1, 2, 1],
+                               [1, 0, 2]])
+    (avals, bvals), count = crosstab(a, b, sparse=sparse)
+    assert_array_equal(avals, expected_avals)
+    assert_array_equal(bvals, expected_bvals)
+    if sparse:
+        assert_array_equal(count.A, expected_count)
+    else:
+        assert_array_equal(count, expected_count)
+
+
+def test_crosstab_basic_1d():
+    # Verify that a single input sequence works as expected.
+    x = [1, 2, 3, 1, 2, 3, 3]
+    expected_xvals = [1, 2, 3]
+    expected_count = np.array([2, 2, 3])
+    (xvals,), count = crosstab(x)
+    assert_array_equal(xvals, expected_xvals)
+    assert_array_equal(count, expected_count)
+
+
+def test_crosstab_basic_3d():
+    # Verify the function for three input sequences.
+    a = 'a'
+    b = 'b'
+    x = [0, 0, 9, 9, 0, 0, 9, 9]
+    y = [a, a, a, a, b, b, b, a]
+    z = [1, 2, 3, 1, 2, 3, 3, 1]
+    expected_xvals = [0, 9]
+    expected_yvals = [a, b]
+    expected_zvals = [1, 2, 3]
+    expected_count = np.array([[[1, 1, 0],
+                                [0, 1, 1]],
+                               [[2, 0, 1],
+                                [0, 0, 1]]])
+    (xvals, yvals, zvals), count = crosstab(x, y, z)
+    assert_array_equal(xvals, expected_xvals)
+    assert_array_equal(yvals, expected_yvals)
+    assert_array_equal(zvals, expected_zvals)
+    assert_array_equal(count, expected_count)
+
+
+@pytest.mark.parametrize('sparse', [False, True])
+def test_crosstab_levels(sparse):
+    a = [0, 0, 9, 9, 0, 0, 9]
+    b = [1, 2, 3, 1, 2, 3, 3]
+    expected_avals = [0, 9]
+    expected_bvals = [0, 1, 2, 3]
+    expected_count = np.array([[0, 1, 2, 1],
+                               [0, 1, 0, 2]])
+    (avals, bvals), count = crosstab(a, b, levels=[None, [0, 1, 2, 3]],
+                                     sparse=sparse)
+    assert_array_equal(avals, expected_avals)
+    assert_array_equal(bvals, expected_bvals)
+    if sparse:
+        assert_array_equal(count.A, expected_count)
+    else:
+        assert_array_equal(count, expected_count)
+
+
+@pytest.mark.parametrize('sparse', [False, True])
+def test_crosstab_extra_levels(sparse):
+    # The pair of values (-1, 3) will be ignored, because we explicitly
+    # request the counted `a` values to be [0, 9].
+    a = [0, 0, 9, 9, 0, 0, 9, -1]
+    b = [1, 2, 3, 1, 2, 3, 3, 3]
+    expected_avals = [0, 9]
+    expected_bvals = [0, 1, 2, 3]
+    expected_count = np.array([[0, 1, 2, 1],
+                               [0, 1, 0, 2]])
+    (avals, bvals), count = crosstab(a, b, levels=[[0, 9], [0, 1, 2, 3]],
+                                     sparse=sparse)
+    assert_array_equal(avals, expected_avals)
+    assert_array_equal(bvals, expected_bvals)
+    if sparse:
+        assert_array_equal(count.A, expected_count)
+    else:
+        assert_array_equal(count, expected_count)
+
+
+def test_validation_at_least_one():
+    with pytest.raises(TypeError, match='At least one'):
+        crosstab()
+
+
+def test_validation_same_lengths():
+    with pytest.raises(ValueError, match='must have the same length'):
+        crosstab([1, 2], [1, 2, 3, 4])
+
+
+def test_validation_sparse_only_two_args():
+    with pytest.raises(ValueError, match='only two input sequences'):
+        crosstab([0, 1, 1], [8, 8, 9], [1, 3, 3], sparse=True)
+
+
+def test_validation_len_levels_matches_args():
+    with pytest.raises(ValueError, match='number of input sequences'):
+        crosstab([0, 1, 1], [8, 8, 9], levels=([0, 1, 2, 3],))
+
+
+def test_result():
+    res = crosstab([0, 1], [1, 2])
+    assert_equal((res.elements, res.count), res)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_basic.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_basic.py
new file mode 100644
index 00000000..868576eb
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_basic.py
@@ -0,0 +1,545 @@
+import numpy.testing as npt
+from numpy.testing import assert_allclose
+
+import numpy as np
+import pytest
+
+from scipy import stats
+from .common_tests import (check_normalization, check_moment, check_mean_expect,
+                           check_var_expect, check_skew_expect,
+                           check_kurt_expect, check_entropy,
+                           check_private_entropy, check_edge_support,
+                           check_named_args, check_random_state_property,
+                           check_pickling, check_rvs_broadcast, check_freezing,
+                           check_deprecation_warning_gh5982_moment,
+                           check_deprecation_warning_gh5982_interval)
+from scipy.stats._distr_params import distdiscrete, invdistdiscrete
+from scipy.stats._distn_infrastructure import rv_discrete_frozen
+
+vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4])
+distdiscrete += [[stats.rv_discrete(values=vals), ()]]
+
+# For these distributions, test_discrete_basic only runs with test mode full
+distslow = {'zipfian', 'nhypergeom'}
+
+
+def cases_test_discrete_basic():
+    seen = set()
+    for distname, arg in distdiscrete:
+        if distname in distslow:
+            yield pytest.param(distname, arg, distname, marks=pytest.mark.slow)
+        else:
+            yield distname, arg, distname not in seen
+        seen.add(distname)
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+@pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic())
+def test_discrete_basic(distname, arg, first_case):
+    try:
+        distfn = getattr(stats, distname)
+    except TypeError:
+        distfn = distname
+        distname = 'sample distribution'
+    np.random.seed(9765456)
+    rvs = distfn.rvs(size=2000, *arg)
+    supp = np.unique(rvs)
+    m, v = distfn.stats(*arg)
+    check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf')
+
+    check_pmf_cdf(distfn, arg, distname)
+    check_oth(distfn, arg, supp, distname + ' oth')
+    check_edge_support(distfn, arg)
+    check_deprecation_warning_gh5982_moment(distfn, arg, distname)
+    check_deprecation_warning_gh5982_interval(distfn, arg, distname)
+
+    alpha = 0.01
+    check_discrete_chisquare(distfn, arg, rvs, alpha,
+           distname + ' chisquare')
+
+    if first_case:
+        locscale_defaults = (0,)
+        meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
+                 distfn.logsf]
+        # make sure arguments are within support
+        # for some distributions, this needs to be overridden
+        spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0,
+                  'nchypergeom_wallenius': 6}
+        k = spec_k.get(distname, 1)
+        check_named_args(distfn, k, arg, locscale_defaults, meths)
+        if distname != 'sample distribution':
+            check_scale_docstring(distfn)
+        check_random_state_property(distfn, arg)
+        check_pickling(distfn, arg)
+        check_freezing(distfn, arg)
+
+        # Entropy
+        check_entropy(distfn, arg, distname)
+        if distfn.__class__._entropy != stats.rv_discrete._entropy:
+            check_private_entropy(distfn, arg, stats.rv_discrete)
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+@pytest.mark.parametrize('distname,arg', distdiscrete)
+def test_moments(distname, arg):
+    try:
+        distfn = getattr(stats, distname)
+    except TypeError:
+        distfn = distname
+        distname = 'sample distribution'
+    m, v, s, k = distfn.stats(*arg, moments='mvsk')
+    check_normalization(distfn, arg, distname)
+
+    # compare `stats` and `moment` methods
+    check_moment(distfn, arg, m, v, distname)
+    check_mean_expect(distfn, arg, m, distname)
+    check_var_expect(distfn, arg, m, v, distname)
+    check_skew_expect(distfn, arg, m, v, s, distname)
+    if distname not in ['zipf', 'yulesimon']:
+        check_kurt_expect(distfn, arg, m, v, k, distname)
+
+    # frozen distr moments
+    check_moment_frozen(distfn, arg, m, 1)
+    check_moment_frozen(distfn, arg, v+m*m, 2)
+
+
+@pytest.mark.parametrize('dist,shape_args', distdiscrete)
+def test_rvs_broadcast(dist, shape_args):
+    # If shape_only is True, it means the _rvs method of the
+    # distribution uses more than one random number to generate a random
+    # variate.  That means the result of using rvs with broadcasting or
+    # with a nontrivial size will not necessarily be the same as using the
+    # numpy.vectorize'd version of rvs(), so we can only compare the shapes
+    # of the results, not the values.
+    # Whether or not a distribution is in the following list is an
+    # implementation detail of the distribution, not a requirement.  If
+    # the implementation the rvs() method of a distribution changes, this
+    # test might also have to be changed.
+    shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace',
+                          'nchypergeom_fisher', 'nchypergeom_wallenius']
+
+    try:
+        distfunc = getattr(stats, dist)
+    except TypeError:
+        distfunc = dist
+        dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk)
+    loc = np.zeros(2)
+    nargs = distfunc.numargs
+    allargs = []
+    bshape = []
+    # Generate shape parameter arguments...
+    for k in range(nargs):
+        shp = (k + 3,) + (1,)*(k + 1)
+        param_val = shape_args[k]
+        allargs.append(np.full(shp, param_val))
+        bshape.insert(0, shp[0])
+    allargs.append(loc)
+    bshape.append(loc.size)
+    # bshape holds the expected shape when loc, scale, and the shape
+    # parameters are all broadcast together.
+    check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_])
+
+
+@pytest.mark.parametrize('dist,args', distdiscrete)
+def test_ppf_with_loc(dist, args):
+    try:
+        distfn = getattr(stats, dist)
+    except TypeError:
+        distfn = dist
+    #check with a negative, no and positive relocation.
+    np.random.seed(1942349)
+    re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
+    _a, _b = distfn.support(*args)
+    for loc in re_locs:
+        npt.assert_array_equal(
+            [_a-1+loc, _b+loc],
+            [distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)]
+            )
+
+
+@pytest.mark.parametrize('dist, args', distdiscrete)
+def test_isf_with_loc(dist, args):
+    try:
+        distfn = getattr(stats, dist)
+    except TypeError:
+        distfn = dist
+    # check with a negative, no and positive relocation.
+    np.random.seed(1942349)
+    re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
+    _a, _b = distfn.support(*args)
+    for loc in re_locs:
+        expected = _b + loc, _a - 1 + loc
+        res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
+        npt.assert_array_equal(expected, res)
+    # test broadcasting behaviour
+    re_locs = [np.random.randint(-10, -1, size=(5, 3)),
+               np.zeros((5, 3)),
+               np.random.randint(1, 10, size=(5, 3))]
+    _a, _b = distfn.support(*args)
+    for loc in re_locs:
+        expected = _b + loc, _a - 1 + loc
+        res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
+        npt.assert_array_equal(expected, res)
+
+
+def check_cdf_ppf(distfn, arg, supp, msg):
+    # supp is assumed to be an array of integers in the support of distfn
+    # (but not necessarily all the integers in the support).
+    # This test assumes that the PMF of any value in the support of the
+    # distribution is greater than 1e-8.
+
+    # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer}
+    cdf_supp = distfn.cdf(supp, *arg)
+    # In very rare cases, the finite precision calculation of ppf(cdf(supp))
+    # can produce an array in which an element is off by one.  We nudge the
+    # CDF values down by 10 ULPs help to avoid this.
+    cdf_supp0 = cdf_supp - 10*np.spacing(cdf_supp)
+    npt.assert_array_equal(distfn.ppf(cdf_supp0, *arg),
+                           supp, msg + '-roundtrip')
+    # Repeat the same calculation, but with the CDF values decreased by 1e-8.
+    npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg),
+                           supp, msg + '-roundtrip')
+
+    if not hasattr(distfn, 'xk'):
+        _a, _b = distfn.support(*arg)
+        supp1 = supp[supp < _b]
+        npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg),
+                               supp1 + distfn.inc, msg + ' ppf-cdf-next')
+
+
+def check_pmf_cdf(distfn, arg, distname):
+    if hasattr(distfn, 'xk'):
+        index = distfn.xk
+    else:
+        startind = int(distfn.ppf(0.01, *arg) - 1)
+        index = list(range(startind, startind + 10))
+    cdfs = distfn.cdf(index, *arg)
+    pmfs_cum = distfn.pmf(index, *arg).cumsum()
+
+    atol, rtol = 1e-10, 1e-10
+    if distname == 'skellam':    # ncx2 accuracy
+        atol, rtol = 1e-5, 1e-5
+    npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0],
+                        atol=atol, rtol=rtol)
+
+    # also check that pmf at non-integral k is zero
+    k = np.asarray(index)
+    k_shifted = k[:-1] + np.diff(k)/2
+    npt.assert_equal(distfn.pmf(k_shifted, *arg), 0)
+
+    # better check frozen distributions, and also when loc != 0
+    loc = 0.5
+    dist = distfn(loc=loc, *arg)
+    npt.assert_allclose(dist.pmf(k[1:] + loc), np.diff(dist.cdf(k + loc)))
+    npt.assert_equal(dist.pmf(k_shifted + loc), 0)
+
+
+def check_moment_frozen(distfn, arg, m, k):
+    npt.assert_allclose(distfn(*arg).moment(k), m,
+                        atol=1e-10, rtol=1e-10)
+
+
+def check_oth(distfn, arg, supp, msg):
+    # checking other methods of distfn
+    npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg),
+                        atol=1e-10, rtol=1e-10)
+
+    q = np.linspace(0.01, 0.99, 20)
+    npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg),
+                        atol=1e-10, rtol=1e-10)
+
+    median_sf = distfn.isf(0.5, *arg)
+    npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
+    npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
+
+
+def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
+    """Perform chisquare test for random sample of a discrete distribution
+
+    Parameters
+    ----------
+    distname : string
+        name of distribution function
+    arg : sequence
+        parameters of distribution
+    alpha : float
+        significance level, threshold for p-value
+
+    Returns
+    -------
+    result : bool
+        0 if test passes, 1 if test fails
+
+    """
+    wsupp = 0.05
+
+    # construct intervals with minimum mass `wsupp`.
+    # intervals are left-half-open as in a cdf difference
+    _a, _b = distfn.support(*arg)
+    lo = int(max(_a, -1000))
+    high = int(min(_b, 1000)) + 1
+    distsupport = range(lo, high)
+    last = 0
+    distsupp = [lo]
+    distmass = []
+    for ii in distsupport:
+        current = distfn.cdf(ii, *arg)
+        if current - last >= wsupp - 1e-14:
+            distsupp.append(ii)
+            distmass.append(current - last)
+            last = current
+            if current > (1 - wsupp):
+                break
+    if distsupp[-1] < _b:
+        distsupp.append(_b)
+        distmass.append(1 - last)
+    distsupp = np.array(distsupp)
+    distmass = np.array(distmass)
+
+    # convert intervals to right-half-open as required by histogram
+    histsupp = distsupp + 1e-8
+    histsupp[0] = _a
+
+    # find sample frequencies and perform chisquare test
+    freq, hsupp = np.histogram(rvs, histsupp)
+    chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass)
+
+    npt.assert_(pval > alpha,
+                'chisquare - test for %s at arg = %s with pval = %s' %
+                (msg, str(arg), str(pval)))
+
+
+def check_scale_docstring(distfn):
+    if distfn.__doc__ is not None:
+        # Docstrings can be stripped if interpreter is run with -OO
+        npt.assert_('scale' not in distfn.__doc__)
+
+
+@pytest.mark.parametrize('method', ['pmf', 'logpmf', 'cdf', 'logcdf',
+                                    'sf', 'logsf', 'ppf', 'isf'])
+@pytest.mark.parametrize('distname, args', distdiscrete)
+def test_methods_with_lists(method, distname, args):
+    # Test that the discrete distributions can accept Python lists
+    # as arguments.
+    try:
+        dist = getattr(stats, distname)
+    except TypeError:
+        return
+    if method in ['ppf', 'isf']:
+        z = [0.1, 0.2]
+    else:
+        z = [0, 1]
+    p2 = [[p]*2 for p in args]
+    loc = [0, 1]
+    result = dist.pmf(z, *p2, loc=loc)
+    npt.assert_allclose(result,
+                        [dist.pmf(*v) for v in zip(z, *p2, loc)],
+                        rtol=1e-15, atol=1e-15)
+
+
+@pytest.mark.parametrize('distname, args', invdistdiscrete)
+def test_cdf_gh13280_regression(distname, args):
+    # Test for nan output when shape parameters are invalid
+    dist = getattr(stats, distname)
+    x = np.arange(-2, 15)
+    vals = dist.cdf(x, *args)
+    expected = np.nan
+    npt.assert_equal(vals, expected)
+
+
+def cases_test_discrete_integer_shapes():
+    # distributions parameters that are only allowed to be integral when
+    # fitting, but are allowed to be real as input to PDF, etc.
+    integrality_exceptions = {'nbinom': {'n'}}
+
+    seen = set()
+    for distname, shapes in distdiscrete:
+        if distname in seen:
+            continue
+        seen.add(distname)
+
+        try:
+            dist = getattr(stats, distname)
+        except TypeError:
+            continue
+
+        shape_info = dist._shape_info()
+
+        for i, shape in enumerate(shape_info):
+            if (shape.name in integrality_exceptions.get(distname, set()) or
+                    not shape.integrality):
+                continue
+
+            yield distname, shape.name, shapes
+
+
+@pytest.mark.parametrize('distname, shapename, shapes',
+                         cases_test_discrete_integer_shapes())
+def test_integer_shapes(distname, shapename, shapes):
+    dist = getattr(stats, distname)
+    shape_info = dist._shape_info()
+    shape_names = [shape.name for shape in shape_info]
+    i = shape_names.index(shapename)  # this element of params must be integral
+
+    shapes_copy = list(shapes)
+
+    valid_shape = shapes[i]
+    invalid_shape = valid_shape - 0.5  # arbitrary non-integral value
+    new_valid_shape = valid_shape - 1
+    shapes_copy[i] = [[valid_shape], [invalid_shape], [new_valid_shape]]
+
+    a, b = dist.support(*shapes)
+    x = np.round(np.linspace(a, b, 5))
+
+    pmf = dist.pmf(x, *shapes_copy)
+    assert not np.any(np.isnan(pmf[0, :]))
+    assert np.all(np.isnan(pmf[1, :]))
+    assert not np.any(np.isnan(pmf[2, :]))
+
+
+def test_frozen_attributes():
+    # gh-14827 reported that all frozen distributions had both pmf and pdf
+    # attributes; continuous should have pdf and discrete should have pmf.
+    message = "'rv_discrete_frozen' object has no attribute"
+    with pytest.raises(AttributeError, match=message):
+        stats.binom(10, 0.5).pdf
+    with pytest.raises(AttributeError, match=message):
+        stats.binom(10, 0.5).logpdf
+    stats.binom.pdf = "herring"
+    frozen_binom = stats.binom(10, 0.5)
+    assert isinstance(frozen_binom, rv_discrete_frozen)
+    delattr(stats.binom, 'pdf')
+
+
+@pytest.mark.parametrize('distname, shapes', distdiscrete)
+def test_interval(distname, shapes):
+    # gh-11026 reported that `interval` returns incorrect values when
+    # `confidence=1`. The values were not incorrect, but it was not intuitive
+    # that the left end of the interval should extend beyond the support of the
+    # distribution. Confirm that this is the behavior for all distributions.
+    if isinstance(distname, str):
+        dist = getattr(stats, distname)
+    else:
+        dist = distname
+    a, b = dist.support(*shapes)
+    npt.assert_equal(dist.ppf([0, 1], *shapes), (a-1, b))
+    npt.assert_equal(dist.isf([1, 0], *shapes), (a-1, b))
+    npt.assert_equal(dist.interval(1, *shapes), (a-1, b))
+
+
+def test_rv_sample():
+    # Thoroughly test rv_sample and check that gh-3758 is resolved
+
+    # Generate a random discrete distribution
+    rng = np.random.default_rng(98430143469)
+    xk = np.sort(rng.random(10) * 10)
+    pk = rng.random(10)
+    pk /= np.sum(pk)
+    dist = stats.rv_discrete(values=(xk, pk))
+
+    # Generate points to the left and right of xk
+    xk_left = (np.array([0] + xk[:-1].tolist()) + xk)/2
+    xk_right = (np.array(xk[1:].tolist() + [xk[-1]+1]) + xk)/2
+
+    # Generate points to the left and right of cdf
+    cdf2 = np.cumsum(pk)
+    cdf2_left = (np.array([0] + cdf2[:-1].tolist()) + cdf2)/2
+    cdf2_right = (np.array(cdf2[1:].tolist() + [1]) + cdf2)/2
+
+    # support - leftmost and rightmost xk
+    a, b = dist.support()
+    assert_allclose(a, xk[0])
+    assert_allclose(b, xk[-1])
+
+    # pmf - supported only on the xk
+    assert_allclose(dist.pmf(xk), pk)
+    assert_allclose(dist.pmf(xk_right), 0)
+    assert_allclose(dist.pmf(xk_left), 0)
+
+    # logpmf is log of the pmf; log(0) = -np.inf
+    with np.errstate(divide='ignore'):
+        assert_allclose(dist.logpmf(xk), np.log(pk))
+        assert_allclose(dist.logpmf(xk_right), -np.inf)
+        assert_allclose(dist.logpmf(xk_left), -np.inf)
+
+    # cdf - the cumulative sum of the pmf
+    assert_allclose(dist.cdf(xk), cdf2)
+    assert_allclose(dist.cdf(xk_right), cdf2)
+    assert_allclose(dist.cdf(xk_left), [0]+cdf2[:-1].tolist())
+
+    with np.errstate(divide='ignore'):
+        assert_allclose(dist.logcdf(xk), np.log(dist.cdf(xk)),
+                        atol=1e-15)
+        assert_allclose(dist.logcdf(xk_right), np.log(dist.cdf(xk_right)),
+                        atol=1e-15)
+        assert_allclose(dist.logcdf(xk_left), np.log(dist.cdf(xk_left)),
+                        atol=1e-15)
+
+    # sf is 1-cdf
+    assert_allclose(dist.sf(xk), 1-dist.cdf(xk))
+    assert_allclose(dist.sf(xk_right), 1-dist.cdf(xk_right))
+    assert_allclose(dist.sf(xk_left), 1-dist.cdf(xk_left))
+
+    with np.errstate(divide='ignore'):
+        assert_allclose(dist.logsf(xk), np.log(dist.sf(xk)),
+                        atol=1e-15)
+        assert_allclose(dist.logsf(xk_right), np.log(dist.sf(xk_right)),
+                        atol=1e-15)
+        assert_allclose(dist.logsf(xk_left), np.log(dist.sf(xk_left)),
+                        atol=1e-15)
+
+    # ppf
+    assert_allclose(dist.ppf(cdf2), xk)
+    assert_allclose(dist.ppf(cdf2_left), xk)
+    assert_allclose(dist.ppf(cdf2_right)[:-1], xk[1:])
+    assert_allclose(dist.ppf(0), a - 1)
+    assert_allclose(dist.ppf(1), b)
+
+    # isf
+    sf2 = dist.sf(xk)
+    assert_allclose(dist.isf(sf2), xk)
+    assert_allclose(dist.isf(1-cdf2_left), dist.ppf(cdf2_left))
+    assert_allclose(dist.isf(1-cdf2_right), dist.ppf(cdf2_right))
+    assert_allclose(dist.isf(0), b)
+    assert_allclose(dist.isf(1), a - 1)
+
+    # interval is (ppf(alpha/2), isf(alpha/2))
+    ps = np.linspace(0.01, 0.99, 10)
+    int2 = dist.ppf(ps/2), dist.isf(ps/2)
+    assert_allclose(dist.interval(1-ps), int2)
+    assert_allclose(dist.interval(0), dist.median())
+    assert_allclose(dist.interval(1), (a-1, b))
+
+    # median is simply ppf(0.5)
+    med2 = dist.ppf(0.5)
+    assert_allclose(dist.median(), med2)
+
+    # all four stats (mean, var, skew, and kurtosis) from the definitions
+    mean2 = np.sum(xk*pk)
+    var2 = np.sum((xk - mean2)**2 * pk)
+    skew2 = np.sum((xk - mean2)**3 * pk) / var2**(3/2)
+    kurt2 = np.sum((xk - mean2)**4 * pk) / var2**2 - 3
+    assert_allclose(dist.mean(), mean2)
+    assert_allclose(dist.std(), np.sqrt(var2))
+    assert_allclose(dist.var(), var2)
+    assert_allclose(dist.stats(moments='mvsk'), (mean2, var2, skew2, kurt2))
+
+    # noncentral moment against definition
+    mom3 = np.sum((xk**3) * pk)
+    assert_allclose(dist.moment(3), mom3)
+
+    # expect - check against moments
+    assert_allclose(dist.expect(lambda x: 1), 1)
+    assert_allclose(dist.expect(), mean2)
+    assert_allclose(dist.expect(lambda x: x**3), mom3)
+
+    # entropy is the negative of the expected value of log(p)
+    with np.errstate(divide='ignore'):
+        assert_allclose(-dist.expect(lambda x: dist.logpmf(x)), dist.entropy())
+
+    # RVS is just ppf of uniform random variates
+    rng = np.random.default_rng(98430143469)
+    rvs = dist.rvs(size=100, random_state=rng)
+    rng = np.random.default_rng(98430143469)
+    rvs0 = dist.ppf(rng.random(size=100))
+    assert_allclose(rvs, rvs0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_distns.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_distns.py
new file mode 100644
index 00000000..44d14388
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_discrete_distns.py
@@ -0,0 +1,566 @@
+import pytest
+from scipy.stats import (betabinom, hypergeom, nhypergeom, bernoulli,
+                         boltzmann, skellam, zipf, zipfian, binom, nbinom,
+                         nchypergeom_fisher, nchypergeom_wallenius, randint)
+
+import numpy as np
+from numpy.testing import (
+    assert_almost_equal, assert_equal, assert_allclose, suppress_warnings
+)
+from scipy.special import binom as special_binom
+from scipy.optimize import root_scalar
+from scipy.integrate import quad
+
+
+# The expected values were computed with Wolfram Alpha, using
+# the expression CDF[HypergeometricDistribution[N, n, M], k].
+@pytest.mark.parametrize('k, M, n, N, expected, rtol',
+                         [(3, 10, 4, 5,
+                           0.9761904761904762, 1e-15),
+                          (107, 10000, 3000, 215,
+                           0.9999999997226765, 1e-15),
+                          (10, 10000, 3000, 215,
+                           2.681682217692179e-21, 5e-11)])
+def test_hypergeom_cdf(k, M, n, N, expected, rtol):
+    p = hypergeom.cdf(k, M, n, N)
+    assert_allclose(p, expected, rtol=rtol)
+
+
+# The expected values were computed with Wolfram Alpha, using
+# the expression SurvivalFunction[HypergeometricDistribution[N, n, M], k].
+@pytest.mark.parametrize('k, M, n, N, expected, rtol',
+                         [(25, 10000, 3000, 215,
+                           0.9999999999052958, 1e-15),
+                          (125, 10000, 3000, 215,
+                           1.4416781705752128e-18, 5e-11)])
+def test_hypergeom_sf(k, M, n, N, expected, rtol):
+    p = hypergeom.sf(k, M, n, N)
+    assert_allclose(p, expected, rtol=rtol)
+
+
+def test_hypergeom_logpmf():
+    # symmetries test
+    # f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K)
+    k = 5
+    N = 50
+    K = 10
+    n = 5
+    logpmf1 = hypergeom.logpmf(k, N, K, n)
+    logpmf2 = hypergeom.logpmf(n - k, N, N - K, n)
+    logpmf3 = hypergeom.logpmf(K - k, N, K, N - n)
+    logpmf4 = hypergeom.logpmf(k, N, n, K)
+    assert_almost_equal(logpmf1, logpmf2, decimal=12)
+    assert_almost_equal(logpmf1, logpmf3, decimal=12)
+    assert_almost_equal(logpmf1, logpmf4, decimal=12)
+
+    # test related distribution
+    # Bernoulli distribution if n = 1
+    k = 1
+    N = 10
+    K = 7
+    n = 1
+    hypergeom_logpmf = hypergeom.logpmf(k, N, K, n)
+    bernoulli_logpmf = bernoulli.logpmf(k, K/N)
+    assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12)
+
+
+def test_nhypergeom_pmf():
+    # test with hypergeom
+    M, n, r = 45, 13, 8
+    k = 6
+    NHG = nhypergeom.pmf(k, M, n, r)
+    HG = hypergeom.pmf(k, M, n, k+r-1) * (M - n - (r-1)) / (M - (k+r-1))
+    assert_allclose(HG, NHG, rtol=1e-10)
+
+
+def test_nhypergeom_pmfcdf():
+    # test pmf and cdf with arbitrary values.
+    M = 8
+    n = 3
+    r = 4
+    support = np.arange(n+1)
+    pmf = nhypergeom.pmf(support, M, n, r)
+    cdf = nhypergeom.cdf(support, M, n, r)
+    assert_allclose(pmf, [1/14, 3/14, 5/14, 5/14], rtol=1e-13)
+    assert_allclose(cdf, [1/14, 4/14, 9/14, 1.0], rtol=1e-13)
+
+
+def test_nhypergeom_r0():
+    # test with `r = 0`.
+    M = 10
+    n = 3
+    r = 0
+    pmf = nhypergeom.pmf([[0, 1, 2, 0], [1, 2, 0, 3]], M, n, r)
+    assert_allclose(pmf, [[1, 0, 0, 1], [0, 0, 1, 0]], rtol=1e-13)
+
+
+def test_nhypergeom_rvs_shape():
+    # Check that when given a size with more dimensions than the
+    # dimensions of the broadcast parameters, rvs returns an array
+    # with the correct shape.
+    x = nhypergeom.rvs(22, [7, 8, 9], [[12], [13]], size=(5, 1, 2, 3))
+    assert x.shape == (5, 1, 2, 3)
+
+
+def test_nhypergeom_accuracy():
+    # Check that nhypergeom.rvs post-gh-13431 gives the same values as
+    # inverse transform sampling
+    np.random.seed(0)
+    x = nhypergeom.rvs(22, 7, 11, size=100)
+    np.random.seed(0)
+    p = np.random.uniform(size=100)
+    y = nhypergeom.ppf(p, 22, 7, 11)
+    assert_equal(x, y)
+
+
+def test_boltzmann_upper_bound():
+    k = np.arange(-3, 5)
+
+    N = 1
+    p = boltzmann.pmf(k, 0.123, N)
+    expected = k == 0
+    assert_equal(p, expected)
+
+    lam = np.log(2)
+    N = 3
+    p = boltzmann.pmf(k, lam, N)
+    expected = [0, 0, 0, 4/7, 2/7, 1/7, 0, 0]
+    assert_allclose(p, expected, rtol=1e-13)
+
+    c = boltzmann.cdf(k, lam, N)
+    expected = [0, 0, 0, 4/7, 6/7, 1, 1, 1]
+    assert_allclose(c, expected, rtol=1e-13)
+
+
+def test_betabinom_a_and_b_unity():
+    # test limiting case that betabinom(n, 1, 1) is a discrete uniform
+    # distribution from 0 to n
+    n = 20
+    k = np.arange(n + 1)
+    p = betabinom(n, 1, 1).pmf(k)
+    expected = np.repeat(1 / (n + 1), n + 1)
+    assert_almost_equal(p, expected)
+
+
+def test_betabinom_bernoulli():
+    # test limiting case that betabinom(1, a, b) = bernoulli(a / (a + b))
+    a = 2.3
+    b = 0.63
+    k = np.arange(2)
+    p = betabinom(1, a, b).pmf(k)
+    expected = bernoulli(a / (a + b)).pmf(k)
+    assert_almost_equal(p, expected)
+
+
+def test_issue_10317():
+    alpha, n, p = 0.9, 10, 1
+    assert_equal(nbinom.interval(confidence=alpha, n=n, p=p), (0, 0))
+
+
+def test_issue_11134():
+    alpha, n, p = 0.95, 10, 0
+    assert_equal(binom.interval(confidence=alpha, n=n, p=p), (0, 0))
+
+
+def test_issue_7406():
+    np.random.seed(0)
+    assert_equal(binom.ppf(np.random.rand(10), 0, 0.5), 0)
+
+    # Also check that endpoints (q=0, q=1) are correct
+    assert_equal(binom.ppf(0, 0, 0.5), -1)
+    assert_equal(binom.ppf(1, 0, 0.5), 0)
+
+
+def test_issue_5122():
+    p = 0
+    n = np.random.randint(100, size=10)
+
+    x = 0
+    ppf = binom.ppf(x, n, p)
+    assert_equal(ppf, -1)
+
+    x = np.linspace(0.01, 0.99, 10)
+    ppf = binom.ppf(x, n, p)
+    assert_equal(ppf, 0)
+
+    x = 1
+    ppf = binom.ppf(x, n, p)
+    assert_equal(ppf, n)
+
+
+def test_issue_1603():
+    assert_equal(binom(1000, np.logspace(-3, -100)).ppf(0.01), 0)
+
+
+def test_issue_5503():
+    p = 0.5
+    x = np.logspace(3, 14, 12)
+    assert_allclose(binom.cdf(x, 2*x, p), 0.5, atol=1e-2)
+
+
+@pytest.mark.parametrize('x, n, p, cdf_desired', [
+    (300, 1000, 3/10, 0.51559351981411995636),
+    (3000, 10000, 3/10, 0.50493298381929698016),
+    (30000, 100000, 3/10, 0.50156000591726422864),
+    (300000, 1000000, 3/10, 0.50049331906666960038),
+    (3000000, 10000000, 3/10, 0.50015600124585261196),
+    (30000000, 100000000, 3/10, 0.50004933192735230102),
+    (30010000, 100000000, 3/10, 0.98545384016570790717),
+    (29990000, 100000000, 3/10, 0.01455017177985268670),
+    (29950000, 100000000, 3/10, 5.02250963487432024943e-28),
+])
+def test_issue_5503pt2(x, n, p, cdf_desired):
+    assert_allclose(binom.cdf(x, n, p), cdf_desired)
+
+
+def test_issue_5503pt3():
+    # From Wolfram Alpha: CDF[BinomialDistribution[1e12, 1e-12], 2]
+    assert_allclose(binom.cdf(2, 10**12, 10**-12), 0.91969860292869777384)
+
+
+def test_issue_6682():
+    # Reference value from R:
+    # options(digits=16)
+    # print(pnbinom(250, 50, 32/63, lower.tail=FALSE))
+    assert_allclose(nbinom.sf(250, 50, 32./63.), 1.460458510976452e-35)
+
+
+def test_boost_divide_by_zero_issue_15101():
+    n = 1000
+    p = 0.01
+    k = 996
+    assert_allclose(binom.pmf(k, n, p), 0.0)
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+def test_skellam_gh11474():
+    # test issue reported in gh-11474 caused by `cdfchn`
+    mu = [1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000]
+    cdf = skellam.cdf(0, mu, mu)
+    # generated in R
+    # library(skellam)
+    # options(digits = 16)
+    # mu = c(1, 10, 100, 1000, 5000, 5050, 5100, 5250, 6000)
+    # pskellam(0, mu, mu, TRUE)
+    cdf_expected = [0.6542541612768356, 0.5448901559424127, 0.5141135799745580,
+                    0.5044605891382528, 0.5019947363350450, 0.5019848365953181,
+                    0.5019750827993392, 0.5019466621805060, 0.5018209330219539]
+    assert_allclose(cdf, cdf_expected)
+
+
+class TestZipfian:
+    def test_zipfian_asymptotic(self):
+        # test limiting case that zipfian(a, n) -> zipf(a) as n-> oo
+        a = 6.5
+        N = 10000000
+        k = np.arange(1, 21)
+        assert_allclose(zipfian.pmf(k, a, N), zipf.pmf(k, a))
+        assert_allclose(zipfian.cdf(k, a, N), zipf.cdf(k, a))
+        assert_allclose(zipfian.sf(k, a, N), zipf.sf(k, a))
+        assert_allclose(zipfian.stats(a, N, moments='msvk'),
+                        zipf.stats(a, moments='msvk'))
+
+    def test_zipfian_continuity(self):
+        # test that zipfian(0.999999, n) ~ zipfian(1.000001, n)
+        # (a = 1 switches between methods of calculating harmonic sum)
+        alt1, agt1 = 0.99999999, 1.00000001
+        N = 30
+        k = np.arange(1, N + 1)
+        assert_allclose(zipfian.pmf(k, alt1, N), zipfian.pmf(k, agt1, N),
+                        rtol=5e-7)
+        assert_allclose(zipfian.cdf(k, alt1, N), zipfian.cdf(k, agt1, N),
+                        rtol=5e-7)
+        assert_allclose(zipfian.sf(k, alt1, N), zipfian.sf(k, agt1, N),
+                        rtol=5e-7)
+        assert_allclose(zipfian.stats(alt1, N, moments='msvk'),
+                        zipfian.stats(agt1, N, moments='msvk'), rtol=5e-7)
+
+    def test_zipfian_R(self):
+        # test against R VGAM package
+        # library(VGAM)
+        # k <- c(13, 16,  1,  4,  4,  8, 10, 19,  5,  7)
+        # a <- c(1.56712977, 3.72656295, 5.77665117, 9.12168729, 5.79977172,
+        #        4.92784796, 9.36078764, 4.3739616 , 7.48171872, 4.6824154)
+        # n <- c(70, 80, 48, 65, 83, 89, 50, 30, 20, 20)
+        # pmf <- dzipf(k, N = n, shape = a)
+        # cdf <- pzipf(k, N = n, shape = a)
+        # print(pmf)
+        # print(cdf)
+        np.random.seed(0)
+        k = np.random.randint(1, 20, size=10)
+        a = np.random.rand(10)*10 + 1
+        n = np.random.randint(1, 100, size=10)
+        pmf = [8.076972e-03, 2.950214e-05, 9.799333e-01, 3.216601e-06,
+               3.158895e-04, 3.412497e-05, 4.350472e-10, 2.405773e-06,
+               5.860662e-06, 1.053948e-04]
+        cdf = [0.8964133, 0.9998666, 0.9799333, 0.9999995, 0.9998584,
+               0.9999458, 1.0000000, 0.9999920, 0.9999977, 0.9998498]
+        # skip the first point; zipUC is not accurate for low a, n
+        assert_allclose(zipfian.pmf(k, a, n)[1:], pmf[1:], rtol=1e-6)
+        assert_allclose(zipfian.cdf(k, a, n)[1:], cdf[1:], rtol=5e-5)
+
+    np.random.seed(0)
+    naive_tests = np.vstack((np.logspace(-2, 1, 10),
+                             np.random.randint(2, 40, 10))).T
+
+    @pytest.mark.parametrize("a, n", naive_tests)
+    def test_zipfian_naive(self, a, n):
+        # test against bare-bones implementation
+
+        @np.vectorize
+        def Hns(n, s):
+            """Naive implementation of harmonic sum"""
+            return (1/np.arange(1, n+1)**s).sum()
+
+        @np.vectorize
+        def pzip(k, a, n):
+            """Naive implementation of zipfian pmf"""
+            if k < 1 or k > n:
+                return 0.
+            else:
+                return 1 / k**a / Hns(n, a)
+
+        k = np.arange(n+1)
+        pmf = pzip(k, a, n)
+        cdf = np.cumsum(pmf)
+        mean = np.average(k, weights=pmf)
+        var = np.average((k - mean)**2, weights=pmf)
+        std = var**0.5
+        skew = np.average(((k-mean)/std)**3, weights=pmf)
+        kurtosis = np.average(((k-mean)/std)**4, weights=pmf) - 3
+        assert_allclose(zipfian.pmf(k, a, n), pmf)
+        assert_allclose(zipfian.cdf(k, a, n), cdf)
+        assert_allclose(zipfian.stats(a, n, moments="mvsk"),
+                        [mean, var, skew, kurtosis])
+
+
+class TestNCH():
+    np.random.seed(2)  # seeds 0 and 1 had some xl = xu; randint failed
+    shape = (2, 4, 3)
+    max_m = 100
+    m1 = np.random.randint(1, max_m, size=shape)    # red balls
+    m2 = np.random.randint(1, max_m, size=shape)    # white balls
+    N = m1 + m2                                     # total balls
+    n = randint.rvs(0, N, size=N.shape)             # number of draws
+    xl = np.maximum(0, n-m2)                        # lower bound of support
+    xu = np.minimum(n, m1)                          # upper bound of support
+    x = randint.rvs(xl, xu, size=xl.shape)
+    odds = np.random.rand(*x.shape)*2
+
+    # test output is more readable when function names (strings) are passed
+    @pytest.mark.parametrize('dist_name',
+                             ['nchypergeom_fisher', 'nchypergeom_wallenius'])
+    def test_nch_hypergeom(self, dist_name):
+        # Both noncentral hypergeometric distributions reduce to the
+        # hypergeometric distribution when odds = 1
+        dists = {'nchypergeom_fisher': nchypergeom_fisher,
+                 'nchypergeom_wallenius': nchypergeom_wallenius}
+        dist = dists[dist_name]
+        x, N, m1, n = self.x, self.N, self.m1, self.n
+        assert_allclose(dist.pmf(x, N, m1, n, odds=1),
+                        hypergeom.pmf(x, N, m1, n))
+
+    def test_nchypergeom_fisher_naive(self):
+        # test against a very simple implementation
+        x, N, m1, n, odds = self.x, self.N, self.m1, self.n, self.odds
+
+        @np.vectorize
+        def pmf_mean_var(x, N, m1, n, w):
+            # simple implementation of nchypergeom_fisher pmf
+            m2 = N - m1
+            xl = np.maximum(0, n-m2)
+            xu = np.minimum(n, m1)
+
+            def f(x):
+                t1 = special_binom(m1, x)
+                t2 = special_binom(m2, n - x)
+                return t1 * t2 * w**x
+
+            def P(k):
+                return sum((f(y)*y**k for y in range(xl, xu + 1)))
+
+            P0 = P(0)
+            P1 = P(1)
+            P2 = P(2)
+            pmf = f(x) / P0
+            mean = P1 / P0
+            var = P2 / P0 - (P1 / P0)**2
+            return pmf, mean, var
+
+        pmf, mean, var = pmf_mean_var(x, N, m1, n, odds)
+        assert_allclose(nchypergeom_fisher.pmf(x, N, m1, n, odds), pmf)
+        assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='m'),
+                        mean)
+        assert_allclose(nchypergeom_fisher.stats(N, m1, n, odds, moments='v'),
+                        var)
+
+    def test_nchypergeom_wallenius_naive(self):
+        # test against a very simple implementation
+
+        np.random.seed(2)
+        shape = (2, 4, 3)
+        max_m = 100
+        m1 = np.random.randint(1, max_m, size=shape)
+        m2 = np.random.randint(1, max_m, size=shape)
+        N = m1 + m2
+        n = randint.rvs(0, N, size=N.shape)
+        xl = np.maximum(0, n-m2)
+        xu = np.minimum(n, m1)
+        x = randint.rvs(xl, xu, size=xl.shape)
+        w = np.random.rand(*x.shape)*2
+
+        def support(N, m1, n, w):
+            m2 = N - m1
+            xl = np.maximum(0, n-m2)
+            xu = np.minimum(n, m1)
+            return xl, xu
+
+        @np.vectorize
+        def mean(N, m1, n, w):
+            m2 = N - m1
+            xl, xu = support(N, m1, n, w)
+
+            def fun(u):
+                return u/m1 + (1 - (n-u)/m2)**w - 1
+
+            return root_scalar(fun, bracket=(xl, xu)).root
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning,
+                       message="invalid value encountered in mean")
+            assert_allclose(nchypergeom_wallenius.mean(N, m1, n, w),
+                            mean(N, m1, n, w), rtol=2e-2)
+
+        @np.vectorize
+        def variance(N, m1, n, w):
+            m2 = N - m1
+            u = mean(N, m1, n, w)
+            a = u * (m1 - u)
+            b = (n-u)*(u + m2 - n)
+            return N*a*b / ((N-1) * (m1*b + m2*a))
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning,
+                       message="invalid value encountered in mean")
+            assert_allclose(
+                nchypergeom_wallenius.stats(N, m1, n, w, moments='v'),
+                variance(N, m1, n, w),
+                rtol=5e-2
+            )
+
+        @np.vectorize
+        def pmf(x, N, m1, n, w):
+            m2 = N - m1
+            xl, xu = support(N, m1, n, w)
+
+            def integrand(t):
+                D = w*(m1 - x) + (m2 - (n-x))
+                res = (1-t**(w/D))**x * (1-t**(1/D))**(n-x)
+                return res
+
+            def f(x):
+                t1 = special_binom(m1, x)
+                t2 = special_binom(m2, n - x)
+                the_integral = quad(integrand, 0, 1,
+                                    epsrel=1e-16, epsabs=1e-16)
+                return t1 * t2 * the_integral[0]
+
+            return f(x)
+
+        pmf0 = pmf(x, N, m1, n, w)
+        pmf1 = nchypergeom_wallenius.pmf(x, N, m1, n, w)
+
+        atol, rtol = 1e-6, 1e-6
+        i = np.abs(pmf1 - pmf0) < atol + rtol*np.abs(pmf0)
+        assert i.sum() > np.prod(shape) / 2  # works at least half the time
+
+        # for those that fail, discredit the naive implementation
+        for N, m1, n, w in zip(N[~i], m1[~i], n[~i], w[~i]):
+            # get the support
+            m2 = N - m1
+            xl, xu = support(N, m1, n, w)
+            x = np.arange(xl, xu + 1)
+
+            # calculate sum of pmf over the support
+            # the naive implementation is very wrong in these cases
+            assert pmf(x, N, m1, n, w).sum() < .5
+            assert_allclose(nchypergeom_wallenius.pmf(x, N, m1, n, w).sum(), 1)
+
+    def test_wallenius_against_mpmath(self):
+        # precompute data with mpmath since naive implementation above
+        # is not reliable. See source code in gh-13330.
+        M = 50
+        n = 30
+        N = 20
+        odds = 2.25
+        # Expected results, computed with mpmath.
+        sup = np.arange(21)
+        pmf = np.array([3.699003068656875e-20,
+                        5.89398584245431e-17,
+                        2.1594437742911123e-14,
+                        3.221458044649955e-12,
+                        2.4658279241205077e-10,
+                        1.0965862603981212e-08,
+                        3.057890479665704e-07,
+                        5.622818831643761e-06,
+                        7.056482841531681e-05,
+                        0.000618899425358671,
+                        0.003854172932571669,
+                        0.01720592676256026,
+                        0.05528844897093792,
+                        0.12772363313574242,
+                        0.21065898367825722,
+                        0.24465958845359234,
+                        0.1955114898110033,
+                        0.10355390084949237,
+                        0.03414490375225675,
+                        0.006231989845775931,
+                        0.0004715577304677075])
+        mean = 14.808018384813426
+        var = 2.6085975877923717
+
+        # nchypergeom_wallenius.pmf returns 0 for pmf(0) and pmf(1), and pmf(2)
+        # has only three digits of accuracy (~ 2.1511e-14).
+        assert_allclose(nchypergeom_wallenius.pmf(sup, M, n, N, odds), pmf,
+                        rtol=1e-13, atol=1e-13)
+        assert_allclose(nchypergeom_wallenius.mean(M, n, N, odds),
+                        mean, rtol=1e-13)
+        assert_allclose(nchypergeom_wallenius.var(M, n, N, odds),
+                        var, rtol=1e-11)
+
+    @pytest.mark.parametrize('dist_name',
+                             ['nchypergeom_fisher', 'nchypergeom_wallenius'])
+    def test_rvs_shape(self, dist_name):
+        # Check that when given a size with more dimensions than the
+        # dimensions of the broadcast parameters, rvs returns an array
+        # with the correct shape.
+        dists = {'nchypergeom_fisher': nchypergeom_fisher,
+                 'nchypergeom_wallenius': nchypergeom_wallenius}
+        dist = dists[dist_name]
+        x = dist.rvs(50, 30, [[10], [20]], [0.5, 1.0, 2.0], size=(5, 1, 2, 3))
+        assert x.shape == (5, 1, 2, 3)
+
+
+@pytest.mark.parametrize("mu, q, expected",
+                         [[10, 120, -1.240089881791596e-38],
+                          [1500, 0, -86.61466680572661]])
+def test_nbinom_11465(mu, q, expected):
+    # test nbinom.logcdf at extreme tails
+    size = 20
+    n, p = size, size/(size+mu)
+    # In R:
+    # options(digits=16)
+    # pnbinom(mu=10, size=20, q=120, log.p=TRUE)
+    assert_allclose(nbinom.logcdf(q, n, p), expected)
+
+
+def test_gh_17146():
+    # Check that discrete distributions return PMF of zero at non-integral x.
+    # See gh-17146.
+    x = np.linspace(0, 1, 11)
+    p = 0.8
+    pmf = bernoulli(p).pmf(x)
+    i = (x % 1 == 0)
+    assert_allclose(pmf[-1], p)
+    assert_allclose(pmf[0], 1-p)
+    assert_equal(pmf[~i], 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_distributions.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_distributions.py
new file mode 100755
index 00000000..e0c55071
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_distributions.py
@@ -0,0 +1,7625 @@
+"""
+Test functions for stats module
+"""
+import warnings
+import re
+import sys
+import pickle
+from pathlib import Path
+import os
+import json
+import platform
+
+from numpy.testing import (assert_equal, assert_array_equal,
+                           assert_almost_equal, assert_array_almost_equal,
+                           assert_allclose, assert_, assert_warns,
+                           assert_array_less, suppress_warnings, IS_PYPY)
+import pytest
+from pytest import raises as assert_raises
+
+import numpy
+import numpy as np
+from numpy import typecodes, array
+from numpy.lib.recfunctions import rec_append_fields
+from scipy import special
+from scipy._lib._util import check_random_state
+from scipy.integrate import (IntegrationWarning, quad, trapezoid,
+                             cumulative_trapezoid)
+import scipy.stats as stats
+from scipy.stats._distn_infrastructure import argsreduce
+import scipy.stats.distributions
+
+from scipy.special import xlogy, polygamma, entr
+from scipy.stats._distr_params import distcont, invdistcont
+from .test_discrete_basic import distdiscrete, invdistdiscrete
+from scipy.stats._continuous_distns import FitDataError, _argus_phi
+from scipy.optimize import root, fmin
+from itertools import product
+
+# python -OO strips docstrings
+DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
+
+# Failing on macOS 11, Intel CPUs. See gh-14901
+MACOS_INTEL = (sys.platform == 'darwin') and (platform.machine() == 'x86_64')
+
+
+# distributions to skip while testing the fix for the support method
+# introduced in gh-13294. These distributions are skipped as they
+# always return a non-nan support for every parametrization.
+skip_test_support_gh13294_regression = ['tukeylambda', 'pearson3']
+
+
+def _assert_hasattr(a, b, msg=None):
+    if msg is None:
+        msg = '%s does not have attribute %s' % (a, b)
+    assert_(hasattr(a, b), msg=msg)
+
+
+def test_api_regression():
+    # https://github.com/scipy/scipy/issues/3802
+    _assert_hasattr(scipy.stats.distributions, 'f_gen')
+
+
+def check_vonmises_pdf_periodic(k, L, s, x):
+    vm = stats.vonmises(k, loc=L, scale=s)
+    assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
+
+
+def check_vonmises_cdf_periodic(k, L, s, x):
+    vm = stats.vonmises(k, loc=L, scale=s)
+    assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
+
+
+def test_distributions_submodule():
+    actual = set(scipy.stats.distributions.__all__)
+    continuous = [dist[0] for dist in distcont]    # continuous dist names
+    discrete = [dist[0] for dist in distdiscrete]  # discrete dist names
+    other = ['rv_discrete', 'rv_continuous', 'rv_histogram',
+             'entropy', 'trapz']
+    expected = continuous + discrete + other
+
+    # need to remove, e.g.,
+    # 
+    expected = set(filter(lambda s: not str(s).startswith('<'), expected))
+
+    # gilbrat is deprecated and no longer in distcont
+    actual.remove('gilbrat')
+
+    assert actual == expected
+
+
+def test_vonmises_pdf_periodic():
+    for k in [0.1, 1, 101]:
+        for x in [0, 1, numpy.pi, 10, 100]:
+            check_vonmises_pdf_periodic(k, 0, 1, x)
+            check_vonmises_pdf_periodic(k, 1, 1, x)
+            check_vonmises_pdf_periodic(k, 0, 10, x)
+
+            check_vonmises_cdf_periodic(k, 0, 1, x)
+            check_vonmises_cdf_periodic(k, 1, 1, x)
+            check_vonmises_cdf_periodic(k, 0, 10, x)
+
+
+def test_vonmises_line_support():
+    assert_equal(stats.vonmises_line.a, -np.pi)
+    assert_equal(stats.vonmises_line.b, np.pi)
+
+
+def test_vonmises_numerical():
+    vm = stats.vonmises(800)
+    assert_almost_equal(vm.cdf(0), 0.5)
+
+
+# Expected values of the vonmises PDF were computed using
+# mpmath with 50 digits of precision:
+#
+# def vmpdf_mp(x, kappa):
+#     x = mpmath.mpf(x)
+#     kappa = mpmath.mpf(kappa)
+#     num = mpmath.exp(kappa*mpmath.cos(x))
+#     den = 2 * mpmath.pi * mpmath.besseli(0, kappa)
+#     return num/den
+
+@pytest.mark.parametrize('x, kappa, expected_pdf',
+                         [(0.1, 0.01, 0.16074242744907072),
+                          (0.1, 25.0, 1.7515464099118245),
+                          (0.1, 800, 0.2073272544458798),
+                          (2.0, 0.01, 0.15849003875385817),
+                          (2.0, 25.0, 8.356882934278192e-16),
+                          (2.0, 800, 0.0)])
+def test_vonmises_pdf(x, kappa, expected_pdf):
+    pdf = stats.vonmises.pdf(x, kappa)
+    assert_allclose(pdf, expected_pdf, rtol=1e-15)
+
+
+# Expected values of the vonmises entropy were computed using
+# mpmath with 50 digits of precision:
+#
+# def vonmises_entropy(kappa):
+#     kappa = mpmath.mpf(kappa)
+#     return (-kappa * mpmath.besseli(1, kappa) /
+#             mpmath.besseli(0, kappa) + mpmath.log(2 * mpmath.pi *
+#             mpmath.besseli(0, kappa)))
+# >>> float(vonmises_entropy(kappa))
+
+@pytest.mark.parametrize('kappa, expected_entropy',
+                         [(1, 1.6274014590199897),
+                          (5, 0.6756431570114528),
+                          (100, -0.8811275441649473),
+                          (1000, -2.03468891852547),
+                          (2000, -2.3813876496587847)])
+def test_vonmises_entropy(kappa, expected_entropy):
+    entropy = stats.vonmises.entropy(kappa)
+    assert_allclose(entropy, expected_entropy, rtol=1e-13)
+
+
+def test_vonmises_rvs_gh4598():
+    # check that random variates wrap around as discussed in gh-4598
+    seed = abs(hash('von_mises_rvs'))
+    rng1 = np.random.default_rng(seed)
+    rng2 = np.random.default_rng(seed)
+    rng3 = np.random.default_rng(seed)
+    rvs1 = stats.vonmises(1, loc=0, scale=1).rvs(random_state=rng1)
+    rvs2 = stats.vonmises(1, loc=2*np.pi, scale=1).rvs(random_state=rng2)
+    rvs3 = stats.vonmises(1, loc=0,
+                          scale=(2*np.pi/abs(rvs1)+1)).rvs(random_state=rng3)
+    assert_allclose(rvs1, rvs2, atol=1e-15)
+    assert_allclose(rvs1, rvs3, atol=1e-15)
+
+
+# Expected values of the vonmises LOGPDF were computed
+# using wolfram alpha:
+# kappa * cos(x) - log(2*pi*I0(kappa))
+@pytest.mark.parametrize('x, kappa, expected_logpdf',
+                         [(0.1, 0.01, -1.8279520246003170),
+                          (0.1, 25.0, 0.5604990605420549),
+                          (0.1, 800, -1.5734567947337514),
+                          (2.0, 0.01, -1.8420635346185686),
+                          (2.0, 25.0, -34.7182759850871489),
+                          (2.0, 800, -1130.4942582548682739)])
+def test_vonmises_logpdf(x, kappa, expected_logpdf):
+    logpdf = stats.vonmises.logpdf(x, kappa)
+    assert_allclose(logpdf, expected_logpdf, rtol=1e-15)
+
+
+def test_vonmises_expect():
+    """
+    Test that the vonmises expectation values are
+    computed correctly.  This test checks that the
+    numeric integration estimates the correct normalization
+    (1) and mean angle (loc).  These expectations are
+    independent of the chosen 2pi interval.
+    """
+    rng = np.random.default_rng(6762668991392531563)
+
+    loc, kappa, lb = rng.random(3) * 10
+    res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1)
+    assert_allclose(res, 1)
+    assert np.issubdtype(res.dtype, np.floating)
+
+    bounds = lb, lb + 2 * np.pi
+    res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1, *bounds)
+    assert_allclose(res, 1)
+    assert np.issubdtype(res.dtype, np.floating)
+
+    bounds = lb, lb + 2 * np.pi
+    res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: np.exp(1j*x),
+                                                      *bounds, complex_func=1)
+    assert_allclose(np.angle(res), loc % (2*np.pi))
+    assert np.issubdtype(res.dtype, np.complexfloating)
+
+
+def _assert_less_or_close_loglike(dist, data, func, **kwds):
+    """
+    This utility function checks that the log-likelihood (computed by
+    func) of the result computed using dist.fit() is less than or equal
+    to the result computed using the generic fit method.  Because of
+    normal numerical imprecision, the "equality" check is made using
+    `np.allclose` with a relative tolerance of 1e-15.
+    """
+    mle_analytical = dist.fit(data, **kwds)
+    numerical_opt = super(type(dist), dist).fit(data, **kwds)
+    ll_mle_analytical = func(mle_analytical, data)
+    ll_numerical_opt = func(numerical_opt, data)
+    assert (ll_mle_analytical <= ll_numerical_opt or
+            np.allclose(ll_mle_analytical, ll_numerical_opt, rtol=1e-15))
+
+
+def assert_fit_warnings(dist):
+    param = ['floc', 'fscale']
+    if dist.shapes:
+        nshapes = len(dist.shapes.split(","))
+        param += ['f0', 'f1', 'f2'][:nshapes]
+    all_fixed = dict(zip(param, np.arange(len(param))))
+    data = [1, 2, 3]
+    with pytest.raises(RuntimeError,
+                       match="All parameters fixed. There is nothing "
+                       "to optimize."):
+        dist.fit(data, **all_fixed)
+    with pytest.raises(ValueError,
+                       match="The data contains non-finite values"):
+        dist.fit([np.nan])
+    with pytest.raises(ValueError,
+                       match="The data contains non-finite values"):
+        dist.fit([np.inf])
+    with pytest.raises(TypeError, match="Unknown keyword arguments:"):
+        dist.fit(data, extra_keyword=2)
+    with pytest.raises(TypeError, match="Too many positional arguments."):
+        dist.fit(data, *[1]*(len(param) - 1))
+
+
+@pytest.mark.parametrize('dist',
+                         ['alpha', 'betaprime',
+                          'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
+                          'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gibrat',
+                          'powerlognorm', 'rayleigh', 'wald'])
+def test_support(dist):
+    """gh-6235"""
+    dct = dict(distcont)
+    args = dct[dist]
+
+    dist = getattr(stats, dist)
+
+    assert_almost_equal(dist.pdf(dist.a, *args), 0)
+    assert_equal(dist.logpdf(dist.a, *args), -np.inf)
+    assert_almost_equal(dist.pdf(dist.b, *args), 0)
+    assert_equal(dist.logpdf(dist.b, *args), -np.inf)
+
+
+class TestRandInt:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.randint.rvs(5, 30, size=100)
+        assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
+        assert_(len(vals) == 100)
+        vals = stats.randint.rvs(5, 30, size=(2, 50))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.randint.rvs(15, 46)
+        assert_((val >= 15) & (val < 46))
+        assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
+        val = stats.randint(15, 46).rvs(3)
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_pdf(self):
+        k = numpy.r_[0:36]
+        out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
+        vals = stats.randint.pmf(k, 5, 30)
+        assert_array_almost_equal(vals, out)
+
+    def test_cdf(self):
+        x = np.linspace(0, 36, 100)
+        k = numpy.floor(x)
+        out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
+        vals = stats.randint.cdf(x, 5, 30)
+        assert_array_almost_equal(vals, out, decimal=12)
+
+
+class TestBinom:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.binom.rvs(10, 0.75, size=(2, 50))
+        assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.binom.rvs(10, 0.75)
+        assert_(isinstance(val, int))
+        val = stats.binom(10, 0.75).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_pmf(self):
+        # regression test for Ticket #1842
+        vals1 = stats.binom.pmf(100, 100, 1)
+        vals2 = stats.binom.pmf(0, 100, 0)
+        assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
+        assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
+
+    def test_entropy(self):
+        # Basic entropy tests.
+        b = stats.binom(2, 0.5)
+        expected_p = np.array([0.25, 0.5, 0.25])
+        expected_h = -sum(xlogy(expected_p, expected_p))
+        h = b.entropy()
+        assert_allclose(h, expected_h)
+
+        b = stats.binom(2, 0.0)
+        h = b.entropy()
+        assert_equal(h, 0.0)
+
+        b = stats.binom(2, 1.0)
+        h = b.entropy()
+        assert_equal(h, 0.0)
+
+    def test_warns_p0(self):
+        # no spurious warnigns are generated for p=0; gh-3817
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", RuntimeWarning)
+            assert_equal(stats.binom(n=2, p=0).mean(), 0)
+            assert_equal(stats.binom(n=2, p=0).std(), 0)
+
+
+class TestArcsine:
+
+    def test_endpoints(self):
+        # Regression test for gh-13697.  The following calculation
+        # should not generate a warning.
+        p = stats.arcsine.pdf([0, 1])
+        assert_equal(p, [np.inf, np.inf])
+
+
+class TestBernoulli:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.bernoulli.rvs(0.75, size=(2, 50))
+        assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.bernoulli.rvs(0.75)
+        assert_(isinstance(val, int))
+        val = stats.bernoulli(0.75).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_entropy(self):
+        # Simple tests of entropy.
+        b = stats.bernoulli(0.25)
+        expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
+        h = b.entropy()
+        assert_allclose(h, expected_h)
+
+        b = stats.bernoulli(0.0)
+        h = b.entropy()
+        assert_equal(h, 0.0)
+
+        b = stats.bernoulli(1.0)
+        h = b.entropy()
+        assert_equal(h, 0.0)
+
+
+class TestBradford:
+    # gh-6216
+    def test_cdf_ppf(self):
+        c = 0.1
+        x = np.logspace(-20, -4)
+        q = stats.bradford.cdf(x, c)
+        xx = stats.bradford.ppf(q, c)
+        assert_allclose(x, xx)
+
+
+class TestChi:
+
+    # "Exact" value of chi.sf(10, 4), as computed by Wolfram Alpha with
+    #     1 - CDF[ChiDistribution[4], 10]
+    CHI_SF_10_4 = 9.83662422461598e-21
+    # "Exact" value of chi.mean(df=1000) as computed by Wolfram Alpha with
+    #       Mean[ChiDistribution[1000]]
+    CHI_MEAN_1000 = 31.614871896980
+
+    def test_sf(self):
+        s = stats.chi.sf(10, 4)
+        assert_allclose(s, self.CHI_SF_10_4, rtol=1e-15)
+
+    def test_isf(self):
+        x = stats.chi.isf(self.CHI_SF_10_4, 4)
+        assert_allclose(x, 10, rtol=1e-15)
+
+    def test_mean(self):
+        x = stats.chi.mean(df=1000)
+        assert_allclose(x, self.CHI_MEAN_1000, rtol=1e-12)
+
+
+class TestNBinom:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
+        assert_(numpy.all(vals >= 0))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.nbinom.rvs(10, 0.75)
+        assert_(isinstance(val, int))
+        val = stats.nbinom(10, 0.75).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_pmf(self):
+        # regression test for ticket 1779
+        assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
+                        stats.nbinom.pmf(700, 721, 0.52))
+        # logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
+        val = scipy.stats.nbinom.logpmf(0, 1, 1)
+        assert_equal(val, 0)
+
+    def test_logcdf_gh16159(self):
+        # check that gh16159 is resolved.
+        vals = stats.nbinom.logcdf([0, 5, 0, 5], n=4.8, p=0.45)
+        ref = np.log(stats.nbinom.cdf([0, 5, 0, 5], n=4.8, p=0.45))
+        assert_allclose(vals, ref)
+
+
+class TestGenInvGauss:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    @pytest.mark.slow
+    def test_rvs_with_mode_shift(self):
+        # ratio_unif w/ mode shift
+        gig = stats.geninvgauss(2.3, 1.5)
+        _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
+        assert_equal(p > 0.05, True)
+
+    @pytest.mark.slow
+    def test_rvs_without_mode_shift(self):
+        # ratio_unif w/o mode shift
+        gig = stats.geninvgauss(0.9, 0.75)
+        _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
+        assert_equal(p > 0.05, True)
+
+    @pytest.mark.slow
+    def test_rvs_new_method(self):
+        # new algorithm of Hoermann / Leydold
+        gig = stats.geninvgauss(0.1, 0.2)
+        _, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
+        assert_equal(p > 0.05, True)
+
+    @pytest.mark.slow
+    def test_rvs_p_zero(self):
+        def my_ks_check(p, b):
+            gig = stats.geninvgauss(p, b)
+            rvs = gig.rvs(size=1500, random_state=1234)
+            return stats.kstest(rvs, gig.cdf)[1] > 0.05
+        # boundary cases when p = 0
+        assert_equal(my_ks_check(0, 0.2), True)  # new algo
+        assert_equal(my_ks_check(0, 0.9), True)  # ratio_unif w/o shift
+        assert_equal(my_ks_check(0, 1.5), True)  # ratio_unif with shift
+
+    def test_rvs_negative_p(self):
+        # if p negative, return inverse
+        assert_equal(
+                stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
+                1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
+
+    def test_invgauss(self):
+        # test that invgauss is special case
+        ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
+        assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
+        # test pdf and cdf
+        mu, x = 100, np.linspace(0.01, 1, 10)
+        pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
+        assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
+        cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
+        assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
+
+    def test_pdf_R(self):
+        # test against R package GIGrvg
+        # x <- seq(0.01, 5, length.out = 10)
+        # GIGrvg::dgig(x, 0.5, 1, 1)
+        vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
+                           2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
+                           9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
+                           3.602084467e-02])
+        x = np.linspace(0.01, 5, 10)
+        assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
+
+    def test_pdf_zero(self):
+        # pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
+        assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
+        # if x is large and p is moderate, make sure that pdf does not
+        # overflow because of x**(p-1); exp(-b*x) forces pdf to zero
+        assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
+
+
+class TestGenHyperbolic:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_pdf_r(self):
+        # test against R package GeneralizedHyperbolic
+        # x <- seq(-10, 10, length.out = 10)
+        # GeneralizedHyperbolic::dghyp(
+        #    x = x, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
+        # )
+        vals_R = np.array([
+            2.94895678275316e-13, 1.75746848647696e-10, 9.48149804073045e-08,
+            4.17862521692026e-05, 0.0103947630463822, 0.240864958986839,
+            0.162833527161649, 0.0374609592899472, 0.00634894847327781,
+            0.000941920705790324
+            ])
+
+        lmbda, alpha, beta = 2, 2, 1
+        mu, delta = 0.5, 1.5
+        args = (lmbda, alpha*delta, beta*delta)
+
+        gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
+        x = np.linspace(-10, 10, 10)
+
+        assert_allclose(gh.pdf(x), vals_R, atol=0, rtol=1e-13)
+
+    def test_cdf_r(self):
+        # test against R package GeneralizedHyperbolic
+        # q <- seq(-10, 10, length.out = 10)
+        # GeneralizedHyperbolic::pghyp(
+        #   q = q, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
+        # )
+        vals_R = np.array([
+            1.01881590921421e-13, 6.13697274983578e-11, 3.37504977637992e-08,
+            1.55258698166181e-05, 0.00447005453832497, 0.228935323956347,
+            0.755759458895243, 0.953061062884484, 0.992598013917513,
+            0.998942646586662
+            ])
+
+        lmbda, alpha, beta = 2, 2, 1
+        mu, delta = 0.5, 1.5
+        args = (lmbda, alpha*delta, beta*delta)
+
+        gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
+        x = np.linspace(-10, 10, 10)
+
+        assert_allclose(gh.cdf(x), vals_R, atol=0, rtol=1e-6)
+
+    def test_moments_r(self):
+        # test against R package GeneralizedHyperbolic
+        # sapply(1:4,
+        #    function(x) GeneralizedHyperbolic::ghypMom(
+        #        order = x, lambda = 2, alpha = 2,
+        #        beta = 1, delta = 1.5, mu = 0.5,
+        #        momType = 'raw')
+        # )
+
+        vals_R = [2.36848366948115, 8.4739346779246,
+                  37.8870502710066, 205.76608511485]
+
+        lmbda, alpha, beta = 2, 2, 1
+        mu, delta = 0.5, 1.5
+        args = (lmbda, alpha*delta, beta*delta)
+
+        vals_us = [
+            stats.genhyperbolic(*args, loc=mu, scale=delta).moment(i)
+            for i in range(1, 5)
+            ]
+
+        assert_allclose(vals_us, vals_R, atol=0, rtol=1e-13)
+
+    def test_rvs(self):
+        # Kolmogorov-Smirnov test to ensure alignemnt
+        # of analytical and empirical cdfs
+
+        lmbda, alpha, beta = 2, 2, 1
+        mu, delta = 0.5, 1.5
+        args = (lmbda, alpha*delta, beta*delta)
+
+        gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
+        _, p = stats.kstest(gh.rvs(size=1500, random_state=1234), gh.cdf)
+
+        assert_equal(p > 0.05, True)
+
+    def test_pdf_t(self):
+        # Test Against T-Student with 1 - 30 df
+        df = np.linspace(1, 30, 10)
+
+        # in principle alpha should be zero in practice for big lmbdas
+        # alpha cannot be too small else pdf does not integrate
+        alpha, beta = np.float_power(df, 2)*np.finfo(np.float32).eps, 0
+        mu, delta = 0, np.sqrt(df)
+        args = (-df/2, alpha, beta)
+
+        gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
+        x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
+
+        assert_allclose(
+            gh.pdf(x), stats.t.pdf(x, df),
+            atol=0, rtol=1e-6
+            )
+
+    def test_pdf_cauchy(self):
+        # Test Against Cauchy distribution
+
+        # in principle alpha should be zero in practice for big lmbdas
+        # alpha cannot be too small else pdf does not integrate
+        lmbda, alpha, beta = -0.5, np.finfo(np.float32).eps, 0
+        mu, delta = 0, 1
+        args = (lmbda, alpha, beta)
+
+        gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
+        x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
+
+        assert_allclose(
+            gh.pdf(x), stats.cauchy.pdf(x),
+            atol=0, rtol=1e-6
+            )
+
+    def test_pdf_laplace(self):
+        # Test Against Laplace with location param [-10, 10]
+        loc = np.linspace(-10, 10, 10)
+
+        # in principle delta should be zero in practice for big loc delta
+        # cannot be too small else pdf does not integrate
+        delta = np.finfo(np.float32).eps
+
+        lmbda, alpha, beta = 1, 1, 0
+        args = (lmbda, alpha*delta, beta*delta)
+
+        # ppf does not integrate for scale < 5e-4
+        # therefore using simple linspace to define the support
+        gh = stats.genhyperbolic(*args, loc=loc, scale=delta)
+        x = np.linspace(-20, 20, 50)[:, np.newaxis]
+
+        assert_allclose(
+            gh.pdf(x), stats.laplace.pdf(x, loc=loc, scale=1),
+            atol=0, rtol=1e-11
+            )
+
+    def test_pdf_norminvgauss(self):
+        # Test Against NIG with varying alpha/beta/delta/mu
+
+        alpha, beta, delta, mu = (
+                np.linspace(1, 20, 10),
+                np.linspace(0, 19, 10)*np.float_power(-1, range(10)),
+                np.linspace(1, 1, 10),
+                np.linspace(-100, 100, 10)
+                )
+
+        lmbda = - 0.5
+        args = (lmbda, alpha * delta, beta * delta)
+
+        gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
+        x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
+
+        assert_allclose(
+            gh.pdf(x), stats.norminvgauss.pdf(
+                x, a=alpha, b=beta, loc=mu, scale=delta),
+            atol=0, rtol=1e-13
+            )
+
+
+class TestNormInvGauss:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_cdf_R(self):
+        # test pdf and cdf vals against R
+        # require("GeneralizedHyperbolic")
+        # x_test <- c(-7, -5, 0, 8, 15)
+        # r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
+        # r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
+        r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
+                          9.988650664e-01, 9.999848769e-01])
+        x_test = np.array([-7, -5, 0, 8, 15])
+        vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
+        assert_allclose(vals_cdf, r_cdf, atol=1e-9)
+
+    def test_pdf_R(self):
+        # values from R as defined in test_cdf_R
+        r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
+                          7.450485342e-04, 8.917889931e-06])
+        x_test = np.array([-7, -5, 0, 8, 15])
+        vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
+        assert_allclose(vals_pdf, r_pdf, atol=1e-9)
+
+    @pytest.mark.parametrize('x, a, b, sf, rtol',
+                             [(-1, 1, 0, 0.8759652211005315, 1e-13),
+                              (25, 1, 0, 1.1318690184042579e-13, 1e-4),
+                              (1, 5, -1.5, 0.002066711134653577, 1e-12),
+                              (10, 5, -1.5, 2.308435233930669e-29, 1e-9)])
+    def test_sf_isf_mpmath(self, x, a, b, sf, rtol):
+        # The data in this test is based on this code that uses mpmath:
+        #
+        # -----
+        # import mpmath
+        #
+        # mpmath.mp.dps = 50
+        #
+        # def pdf(x, a, b):
+        #     x = mpmath.mpf(x)
+        #     a = mpmath.mpf(a)
+        #     b = mpmath.mpf(b)
+        #     g = mpmath.sqrt(a**2 - b**2)
+        #     t = mpmath.sqrt(1 + x**2)
+        #     return (a * mpmath.besselk(1, a*t) * mpmath.exp(g + b*x)
+        #             / (mpmath.pi * t))
+        #
+        # def sf(x, a, b):
+        #     return mpmath.quad(lambda x: pdf(x, a, b), [x, mpmath.inf])
+        #
+        # -----
+        #
+        # In particular,
+        #
+        # >>> float(sf(-1, 1, 0))
+        # 0.8759652211005315
+        # >>> float(sf(25, 1, 0))
+        # 1.1318690184042579e-13
+        # >>> float(sf(1, 5, -1.5))
+        # 0.002066711134653577
+        # >>> float(sf(10, 5, -1.5))
+        # 2.308435233930669e-29
+
+        s = stats.norminvgauss.sf(x, a, b)
+        assert_allclose(s, sf, rtol=rtol)
+        i = stats.norminvgauss.isf(sf, a, b)
+        assert_allclose(i, x, rtol=rtol)
+
+    def test_sf_isf_mpmath_vectorized(self):
+        x = [-1, 25]
+        a = [1, 1]
+        b = 0
+        sf = [0.8759652211005315, 1.1318690184042579e-13]  # see previous test
+        s = stats.norminvgauss.sf(x, a, b)
+        assert_allclose(s, sf, rtol=1e-13, atol=1e-16)
+        i = stats.norminvgauss.isf(sf, a, b)
+        # Not perfect, but better than it was. See gh-13338.
+        assert_allclose(i, x, rtol=1e-6)
+
+    def test_gh8718(self):
+        # Add test that gh-13338 resolved gh-8718
+        dst = stats.norminvgauss(1, 0)
+        x = np.arange(0, 20, 2)
+        sf = dst.sf(x)
+        isf = dst.isf(sf)
+        assert_allclose(isf, x)
+
+    def test_stats(self):
+        a, b = 1, 0.5
+        gamma = np.sqrt(a**2 - b**2)
+        v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
+                   3.0 * (1 + 4 * b**2 / a**2) / gamma)
+        assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
+
+    def test_ppf(self):
+        a, b = 1, 0.5
+        x_test = np.array([0.001, 0.5, 0.999])
+        vals = stats.norminvgauss.ppf(x_test, a, b)
+        assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
+
+
+class TestGeom:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.geom.rvs(0.75, size=(2, 50))
+        assert_(numpy.all(vals >= 0))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.geom.rvs(0.75)
+        assert_(isinstance(val, int))
+        val = stats.geom(0.75).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_rvs_9313(self):
+        # previously, RVS were converted to `np.int32` on some platforms,
+        # causing overflow for moderately large integer output (gh-9313).
+        # Check that this is resolved to the extent possible w/ `np.int64`.
+        rng = np.random.default_rng(649496242618848)
+        rvs = stats.geom.rvs(np.exp(-35), size=5, random_state=rng)
+        assert rvs.dtype == np.int64
+        assert np.all(rvs > np.iinfo(np.int32).max)
+
+    def test_pmf(self):
+        vals = stats.geom.pmf([1, 2, 3], 0.5)
+        assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
+
+    def test_logpmf(self):
+        # regression test for ticket 1793
+        vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
+        vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
+        assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
+
+        # regression test for gh-4028
+        val = stats.geom.logpmf(1, 1)
+        assert_equal(val, 0.0)
+
+    def test_cdf_sf(self):
+        vals = stats.geom.cdf([1, 2, 3], 0.5)
+        vals_sf = stats.geom.sf([1, 2, 3], 0.5)
+        expected = array([0.5, 0.75, 0.875])
+        assert_array_almost_equal(vals, expected)
+        assert_array_almost_equal(vals_sf, 1-expected)
+
+    def test_logcdf_logsf(self):
+        vals = stats.geom.logcdf([1, 2, 3], 0.5)
+        vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
+        expected = array([0.5, 0.75, 0.875])
+        assert_array_almost_equal(vals, np.log(expected))
+        assert_array_almost_equal(vals_sf, np.log1p(-expected))
+
+    def test_ppf(self):
+        vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
+        expected = array([1.0, 2.0, 3.0])
+        assert_array_almost_equal(vals, expected)
+
+    def test_ppf_underflow(self):
+        # this should not underflow
+        assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
+
+
+class TestPlanck:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_sf(self):
+        vals = stats.planck.sf([1, 2, 3], 5.)
+        expected = array([4.5399929762484854e-05,
+                          3.0590232050182579e-07,
+                          2.0611536224385579e-09])
+        assert_array_almost_equal(vals, expected)
+
+    def test_logsf(self):
+        vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
+        expected = array([-1001000., -2001000., -3001000.])
+        assert_array_almost_equal(vals, expected)
+
+
+class TestGennorm:
+    def test_laplace(self):
+        # test against Laplace (special case for beta=1)
+        points = [1, 2, 3]
+        pdf1 = stats.gennorm.pdf(points, 1)
+        pdf2 = stats.laplace.pdf(points)
+        assert_almost_equal(pdf1, pdf2)
+
+    def test_norm(self):
+        # test against normal (special case for beta=2)
+        points = [1, 2, 3]
+        pdf1 = stats.gennorm.pdf(points, 2)
+        pdf2 = stats.norm.pdf(points, scale=2**-.5)
+        assert_almost_equal(pdf1, pdf2)
+
+    def test_rvs(self):
+        np.random.seed(0)
+        # 0 < beta < 1
+        dist = stats.gennorm(0.5)
+        rvs = dist.rvs(size=1000)
+        assert stats.kstest(rvs, dist.cdf).pvalue > 0.1
+        # beta = 1
+        dist = stats.gennorm(1)
+        rvs = dist.rvs(size=1000)
+        rvs_laplace = stats.laplace.rvs(size=1000)
+        assert stats.ks_2samp(rvs, rvs_laplace).pvalue > 0.1
+        # beta = 2
+        dist = stats.gennorm(2)
+        rvs = dist.rvs(size=1000)
+        rvs_norm = stats.norm.rvs(scale=1/2**0.5, size=1000)
+        assert stats.ks_2samp(rvs, rvs_norm).pvalue > 0.1
+
+    def test_rvs_broadcasting(self):
+        np.random.seed(0)
+        dist = stats.gennorm([[0.5, 1.], [2., 5.]])
+        rvs = dist.rvs(size=[1000, 2, 2])
+        assert stats.kstest(rvs[:, 0, 0], stats.gennorm(0.5).cdf)[1] > 0.1
+        assert stats.kstest(rvs[:, 0, 1], stats.gennorm(1.0).cdf)[1] > 0.1
+        assert stats.kstest(rvs[:, 1, 0], stats.gennorm(2.0).cdf)[1] > 0.1
+        assert stats.kstest(rvs[:, 1, 1], stats.gennorm(5.0).cdf)[1] > 0.1
+
+
+class TestHalfgennorm:
+    def test_expon(self):
+        # test against exponential (special case for beta=1)
+        points = [1, 2, 3]
+        pdf1 = stats.halfgennorm.pdf(points, 1)
+        pdf2 = stats.expon.pdf(points)
+        assert_almost_equal(pdf1, pdf2)
+
+    def test_halfnorm(self):
+        # test against half normal (special case for beta=2)
+        points = [1, 2, 3]
+        pdf1 = stats.halfgennorm.pdf(points, 2)
+        pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
+        assert_almost_equal(pdf1, pdf2)
+
+    def test_gennorm(self):
+        # test against generalized normal
+        points = [1, 2, 3]
+        pdf1 = stats.halfgennorm.pdf(points, .497324)
+        pdf2 = stats.gennorm.pdf(points, .497324)
+        assert_almost_equal(pdf1, 2*pdf2)
+
+
+class TestLaplaceasymmetric:
+    def test_laplace(self):
+        # test against Laplace (special case for kappa=1)
+        points = np.array([1, 2, 3])
+        pdf1 = stats.laplace_asymmetric.pdf(points, 1)
+        pdf2 = stats.laplace.pdf(points)
+        assert_allclose(pdf1, pdf2)
+
+    def test_asymmetric_laplace_pdf(self):
+        # test assymetric Laplace
+        points = np.array([1, 2, 3])
+        kappa = 2
+        kapinv = 1/kappa
+        pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
+        pdf2 = stats.laplace_asymmetric.pdf(points*(kappa**2), kapinv)
+        assert_allclose(pdf1, pdf2)
+
+    def test_asymmetric_laplace_log_10_16(self):
+        # test assymetric Laplace
+        points = np.array([-np.log(16), np.log(10)])
+        kappa = 2
+        pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
+        cdf1 = stats.laplace_asymmetric.cdf(points, kappa)
+        sf1 = stats.laplace_asymmetric.sf(points, kappa)
+        pdf2 = np.array([1/10, 1/250])
+        cdf2 = np.array([1/5, 1 - 1/500])
+        sf2 = np.array([4/5, 1/500])
+        ppf1 = stats.laplace_asymmetric.ppf(cdf2, kappa)
+        ppf2 = points
+        isf1 = stats.laplace_asymmetric.isf(sf2, kappa)
+        isf2 = points
+        assert_allclose(np.concatenate((pdf1, cdf1, sf1, ppf1, isf1)),
+                        np.concatenate((pdf2, cdf2, sf2, ppf2, isf2)))
+
+
+class TestTruncnorm:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_ppf_ticket1131(self):
+        vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
+                                   loc=[3]*7, scale=2)
+        expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
+        assert_array_almost_equal(vals, expected)
+
+    def test_isf_ticket1131(self):
+        vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
+                                   loc=[3]*7, scale=2)
+        expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
+        assert_array_almost_equal(vals, expected)
+
+    def test_gh_2477_small_values(self):
+        # Check a case that worked in the original issue.
+        low, high = -11, -10
+        x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
+        assert_(low < x.min() < x.max() < high)
+        # Check a case that failed in the original issue.
+        low, high = 10, 11
+        x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
+        assert_(low < x.min() < x.max() < high)
+
+    def test_gh_2477_large_values(self):
+        # Check a case that used to fail because of extreme tailness.
+        low, high = 100, 101
+        x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
+        assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
+
+        # Check some additional extreme tails
+        low, high = 1000, 1001
+        x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
+        assert_(low < x.min() < x.max() < high)
+
+        low, high = 10000, 10001
+        x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
+        assert_(low < x.min() < x.max() < high)
+
+        low, high = -10001, -10000
+        x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
+        assert_(low < x.min() < x.max() < high)
+
+    def test_gh_9403_nontail_values(self):
+        for low, high in [[3, 4], [-4, -3]]:
+            xvals = np.array([-np.inf, low, high, np.inf])
+            xmid = (high+low)/2.0
+            cdfs = stats.truncnorm.cdf(xvals, low, high)
+            sfs = stats.truncnorm.sf(xvals, low, high)
+            pdfs = stats.truncnorm.pdf(xvals, low, high)
+            expected_cdfs = np.array([0, 0, 1, 1])
+            expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
+            expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
+            if low < 0:
+                expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
+            assert_almost_equal(cdfs, expected_cdfs)
+            assert_almost_equal(sfs, expected_sfs)
+            assert_almost_equal(pdfs, expected_pdfs)
+            assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
+                                low + 0.5)
+            pvals = np.array([0, 0.5, 1.0])
+            ppfs = stats.truncnorm.ppf(pvals, low, high)
+            expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
+            assert_almost_equal(ppfs, expected_ppfs)
+
+            if low < 0:
+                assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
+                                    0.8475544278436675)
+                assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
+                                    0.1524455721563326)
+            else:
+                assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
+                                    0.8475544278436675)
+                assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
+                                    0.1524455721563326)
+            pdf = stats.truncnorm.pdf(xmid, low, high)
+            assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
+
+    def test_gh_9403_medium_tail_values(self):
+        for low, high in [[39, 40], [-40, -39]]:
+            xvals = np.array([-np.inf, low, high, np.inf])
+            xmid = (high+low)/2.0
+            cdfs = stats.truncnorm.cdf(xvals, low, high)
+            sfs = stats.truncnorm.sf(xvals, low, high)
+            pdfs = stats.truncnorm.pdf(xvals, low, high)
+            expected_cdfs = np.array([0, 0, 1, 1])
+            expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
+            expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
+            if low < 0:
+                expected_pdfs = np.array([0, 2.73349092e-16,
+                                          3.90256074e+01, 0])
+            assert_almost_equal(cdfs, expected_cdfs)
+            assert_almost_equal(sfs, expected_sfs)
+            assert_almost_equal(pdfs, expected_pdfs)
+            assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
+                                low + 0.5)
+            pvals = np.array([0, 0.5, 1.0])
+            ppfs = stats.truncnorm.ppf(pvals, low, high)
+            expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
+            assert_almost_equal(ppfs, expected_ppfs)
+            cdfs = stats.truncnorm.cdf(ppfs, low, high)
+            assert_almost_equal(cdfs, pvals)
+
+            if low < 0:
+                assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
+                                    0.9999999970389126)
+                assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
+                                    2.961048103554866e-09)
+            else:
+                assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
+                                    0.9999999970389126)
+                assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
+                                    2.961048103554866e-09)
+            pdf = stats.truncnorm.pdf(xmid, low, high)
+            assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
+
+            xvals = np.linspace(low, high, 11)
+            xvals2 = -xvals[::-1]
+            assert_almost_equal(stats.truncnorm.cdf(xvals, low, high),
+                                stats.truncnorm.sf(xvals2, -high, -low)[::-1])
+            assert_almost_equal(stats.truncnorm.sf(xvals, low, high),
+                                stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
+            assert_almost_equal(stats.truncnorm.pdf(xvals, low, high),
+                                stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
+
+    def test_cdf_tail_15110_14753(self):
+        # Check accuracy issues reported in gh-14753 and gh-155110
+        # Ground truth values calculated using Wolfram Alpha, e.g.
+        # (CDF[NormalDistribution[0,1],83/10]-CDF[NormalDistribution[0,1],8])/
+        #     (1 - CDF[NormalDistribution[0,1],8])
+        assert_allclose(stats.truncnorm(13., 15.).cdf(14.),
+                        0.9999987259565643)
+        assert_allclose(stats.truncnorm(8, np.inf).cdf(8.3),
+                        0.9163220907327540)
+
+    def _test_moments_one_range(self, a, b, expected, rtol=1e-7):
+        m0, v0, s0, k0 = expected[:4]
+        m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
+        assert_allclose(m, m0)
+        assert_allclose(v, v0)
+        assert_allclose(s, s0, rtol=rtol)
+        assert_allclose(k, k0, rtol=rtol)
+
+    # Test data for the truncnorm stats() method.
+    # The data in each row is:
+    #   a, b, mean, variance, skewness, excess kurtosis. Generated using
+    # https://gist.github.com/WarrenWeckesser/636b537ee889679227d53543d333a720
+    _truncnorm_stats_data = [
+        [-30, 30,
+         0.0, 1.0, 0.0, 0.0],
+        [-10, 10,
+         0.0, 1.0, 0.0, -1.4927521335810455e-19],
+        [-3, 3,
+         0.0, 0.9733369246625415, 0.0, -0.17111443639774404],
+        [-2, 2,
+         0.0, 0.7737413035499232, 0.0, -0.6344632828703505],
+        [0, np.inf,
+         0.7978845608028654,
+         0.3633802276324187,
+         0.995271746431156,
+         0.8691773036059741],
+        [-np.inf, 0,
+         -0.7978845608028654,
+         0.3633802276324187,
+         -0.995271746431156,
+         0.8691773036059741],
+        [-1, 3,
+         0.282786110727154,
+         0.6161417353578293,
+         0.5393018494027877,
+         -0.20582065135274694],
+        [-3, 1,
+         -0.282786110727154,
+         0.6161417353578293,
+         -0.5393018494027877,
+         -0.20582065135274694],
+        [-10, -9,
+         -9.108456288012409,
+         0.011448805821636248,
+         -1.8985607290949496,
+         5.0733461105025075],
+    ]
+    _truncnorm_stats_data = np.array(_truncnorm_stats_data)
+
+    @pytest.mark.parametrize("case", _truncnorm_stats_data)
+    def test_moments(self, case):
+        a, b, m0, v0, s0, k0 = case
+        m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
+        assert_allclose([m, v, s, k], [m0, v0, s0, k0], atol=1e-17)
+
+    def test_9902_moments(self):
+        m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
+        assert_almost_equal(m, 0.79788456)
+        assert_almost_equal(v, 0.36338023)
+
+    def test_gh_1489_trac_962_rvs(self):
+        # Check the original example.
+        low, high = 10, 15
+        x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
+        assert_(low < x.min() < x.max() < high)
+
+    def test_gh_11299_rvs(self):
+        # Arose from investigating gh-11299
+        # Test multiple shape parameters simultaneously.
+        low = [-10, 10, -np.inf, -5, -np.inf, -np.inf, -45, -45, 40, -10, 40]
+        high = [-5, 11, 5, np.inf, 40, -40, 40, -40, 45, np.inf, np.inf]
+        x = stats.truncnorm.rvs(low, high, size=(5, len(low)))
+        assert np.shape(x) == (5, len(low))
+        assert_(np.all(low <= x.min(axis=0)))
+        assert_(np.all(x.max(axis=0) <= high))
+
+    def test_rvs_Generator(self):
+        # check that rvs can use a Generator
+        if hasattr(np.random, "default_rng"):
+            stats.truncnorm.rvs(-10, -5, size=5,
+                                random_state=np.random.default_rng())
+
+    def test_logcdf_gh17064(self):
+        # regression test for gh-17064 - avoid roundoff error for logcdfs ~0
+        a = np.array([-np.inf, -np.inf, -8, -np.inf, 10])
+        b = np.array([np.inf, np.inf, 8, 10, np.inf])
+        x = np.array([10, 7.5, 7.5, 9, 20])
+        expected = [-7.619853024160525e-24, -3.190891672910947e-14,
+                    -3.128682067168231e-14, -1.1285122074235991e-19,
+                    -3.61374964828753e-66]
+        assert_allclose(stats.truncnorm(a, b).logcdf(x), expected)
+        assert_allclose(stats.truncnorm(-b, -a).logsf(-x), expected)
+
+
+class TestGenLogistic:
+
+    # Expected values computed with mpmath with 50 digits of precision.
+    @pytest.mark.parametrize('x, expected', [(-1000, -1499.5945348918917),
+                                             (-125, -187.09453489189184),
+                                             (0, -1.3274028432916989),
+                                             (100, -99.59453489189184),
+                                             (1000, -999.5945348918918)])
+    def test_logpdf(self, x, expected):
+        c = 1.5
+        logp = stats.genlogistic.logpdf(x, c)
+        assert_allclose(logp, expected, rtol=1e-13)
+
+
+class TestHypergeom:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
+        assert_(numpy.all(vals >= 0) &
+                numpy.all(vals <= 3))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.hypergeom.rvs(20, 3, 10)
+        assert_(isinstance(val, int))
+        val = stats.hypergeom(20, 3, 10).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_precision(self):
+        # comparison number from mpmath
+        M = 2500
+        n = 50
+        N = 500
+        tot = M
+        good = n
+        hgpmf = stats.hypergeom.pmf(2, tot, good, N)
+        assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
+
+    def test_args(self):
+        # test correct output for corner cases of arguments
+        # see gh-2325
+        assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
+        assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
+
+        assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
+        assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
+
+    def test_cdf_above_one(self):
+        # for some values of parameters, hypergeom cdf was >1, see gh-2238
+        assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
+
+    def test_precision2(self):
+        # Test hypergeom precision for large numbers.  See #1218.
+        # Results compared with those from R.
+        oranges = 9.9e4
+        pears = 1.1e5
+        fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
+        quantile = 2e4
+        res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
+               for eaten in fruits_eaten]
+        expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
+                             8.265601e-11, 0.1237904, 1])
+        assert_allclose(res, expected, atol=0, rtol=5e-7)
+
+        # Test with array_like first argument
+        quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
+        res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
+        expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
+        assert_allclose(res2, expected2, atol=0, rtol=5e-7)
+
+    def test_entropy(self):
+        # Simple tests of entropy.
+        hg = stats.hypergeom(4, 1, 1)
+        h = hg.entropy()
+        expected_p = np.array([0.75, 0.25])
+        expected_h = -np.sum(xlogy(expected_p, expected_p))
+        assert_allclose(h, expected_h)
+
+        hg = stats.hypergeom(1, 1, 1)
+        h = hg.entropy()
+        assert_equal(h, 0.0)
+
+    def test_logsf(self):
+        # Test logsf for very large numbers. See issue #4982
+        # Results compare with those from R (v3.2.0):
+        # phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
+        # -2239.771
+
+        k = 1e4
+        M = 1e7
+        n = 1e6
+        N = 5e4
+
+        result = stats.hypergeom.logsf(k, M, n, N)
+        expected = -2239.771   # From R
+        assert_almost_equal(result, expected, decimal=3)
+
+        k = 1
+        M = 1600
+        n = 600
+        N = 300
+
+        result = stats.hypergeom.logsf(k, M, n, N)
+        expected = -2.566567e-68   # From R
+        assert_almost_equal(result, expected, decimal=15)
+
+    def test_logcdf(self):
+        # Test logcdf for very large numbers. See issue #8692
+        # Results compare with those from R (v3.3.2):
+        # phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
+        # -5273.335
+
+        k = 1
+        M = 1e7
+        n = 1e6
+        N = 5e4
+
+        result = stats.hypergeom.logcdf(k, M, n, N)
+        expected = -5273.335   # From R
+        assert_almost_equal(result, expected, decimal=3)
+
+        # Same example as in issue #8692
+        k = 40
+        M = 1600
+        n = 50
+        N = 300
+
+        result = stats.hypergeom.logcdf(k, M, n, N)
+        expected = -7.565148879229e-23    # From R
+        assert_almost_equal(result, expected, decimal=15)
+
+        k = 125
+        M = 1600
+        n = 250
+        N = 500
+
+        result = stats.hypergeom.logcdf(k, M, n, N)
+        expected = -4.242688e-12    # From R
+        assert_almost_equal(result, expected, decimal=15)
+
+        # test broadcasting robustness based on reviewer
+        # concerns in PR 9603; using an array version of
+        # the example from issue #8692
+        k = np.array([40, 40, 40])
+        M = 1600
+        n = 50
+        N = 300
+
+        result = stats.hypergeom.logcdf(k, M, n, N)
+        expected = np.full(3, -7.565148879229e-23)  # filled from R result
+        assert_almost_equal(result, expected, decimal=15)
+
+
+class TestLoggamma:
+
+    # Expected sf values were computed with mpmath. For given x and c,
+    #     x = mpmath.mpf(x)
+    #     c = mpmath.mpf(c)
+    #     sf = mpmath.gammainc(c, mpmath.exp(x), mpmath.inf,
+    #                          regularized=True)
+    @pytest.mark.parametrize('x, c, sf', [(4, 1.5, 1.6341528919488565e-23),
+                                          (6, 100, 8.23836829202024e-74)])
+    def test_sf_isf(self, x, c, sf):
+        s = stats.loggamma.sf(x, c)
+        assert_allclose(s, sf, rtol=1e-12)
+        y = stats.loggamma.isf(s, c)
+        assert_allclose(y, x, rtol=1e-12)
+
+    def test_logpdf(self):
+        # Test logpdf with x=-500, c=2.  ln(gamma(2)) = 0, and
+        # exp(-500) ~= 7e-218, which is far smaller than the ULP
+        # of c*x=-1000, so logpdf(-500, 2) = c*x - exp(x) - ln(gamma(2))
+        # should give -1000.0.
+        lp = stats.loggamma.logpdf(-500, 2)
+        assert_allclose(lp, -1000.0, rtol=1e-14)
+
+    def test_stats(self):
+        # The following precomputed values are from the table in section 2.2
+        # of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
+        # Chan (thesis, McMaster University, 1993).
+        table = np.array([
+                # c,    mean,   var,    skew,    exc. kurt.
+                0.5, -1.9635, 4.9348, -1.5351, 4.0000,
+                1.0, -0.5772, 1.6449, -1.1395, 2.4000,
+                12.0, 2.4427, 0.0869, -0.2946, 0.1735,
+            ]).reshape(-1, 5)
+        for c, mean, var, skew, kurt in table:
+            computed = stats.loggamma.stats(c, moments='msvk')
+            assert_array_almost_equal(computed, [mean, var, skew, kurt],
+                                      decimal=4)
+
+    @pytest.mark.parametrize('c', [0.1, 0.001])
+    def test_rvs(self, c):
+        # Regression test for gh-11094.
+        x = stats.loggamma.rvs(c, size=100000)
+        # Before gh-11094 was fixed, the case with c=0.001 would
+        # generate many -inf values.
+        assert np.isfinite(x).all()
+        # Crude statistical test.  About half the values should be
+        # less than the median and half greater than the median.
+        med = stats.loggamma.median(c)
+        btest = stats.binomtest(np.count_nonzero(x < med), len(x))
+        ci = btest.proportion_ci(confidence_level=0.999)
+        assert ci.low < 0.5 < ci.high
+
+
+class TestLogistic:
+    # gh-6226
+    def test_cdf_ppf(self):
+        x = np.linspace(-20, 20)
+        y = stats.logistic.cdf(x)
+        xx = stats.logistic.ppf(y)
+        assert_allclose(x, xx)
+
+    def test_sf_isf(self):
+        x = np.linspace(-20, 20)
+        y = stats.logistic.sf(x)
+        xx = stats.logistic.isf(y)
+        assert_allclose(x, xx)
+
+    def test_extreme_values(self):
+        # p is chosen so that 1 - (1 - p) == p in double precision
+        p = 9.992007221626409e-16
+        desired = 34.53957599234088
+        assert_allclose(stats.logistic.ppf(1 - p), desired)
+        assert_allclose(stats.logistic.isf(p), desired)
+
+    def test_logpdf_basic(self):
+        logp = stats.logistic.logpdf([-15, 0, 10])
+        # Expected values computed with mpmath with 50 digits of precision.
+        expected = [-15.000000611804547,
+                    -1.3862943611198906,
+                    -10.000090797798434]
+        assert_allclose(logp, expected, rtol=1e-13)
+
+    def test_logpdf_extreme_values(self):
+        logp = stats.logistic.logpdf([800, -800])
+        # For such large arguments, logpdf(x) = -abs(x) when computed
+        # with 64 bit floating point.
+        assert_equal(logp, [-800, -800])
+
+    @pytest.mark.parametrize("loc_rvs,scale_rvs", [(0.4484955, 0.10216821),
+                                                   (0.62918191, 0.74367064)])
+    def test_fit(self, loc_rvs, scale_rvs):
+        data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
+
+        # test that result of fit method is the same as optimization
+        def func(input, data):
+            a, b = input
+            n = len(data)
+            x1 = np.sum(np.exp((data - a) / b) /
+                        (1 + np.exp((data - a) / b))) - n / 2
+            x2 = np.sum(((data - a) / b) *
+                        ((np.exp((data - a) / b) - 1) /
+                         (np.exp((data - a) / b) + 1))) - n
+            return x1, x2
+
+        expected_solution = root(func, stats.logistic._fitstart(data), args=(
+            data,)).x
+        fit_method = stats.logistic.fit(data)
+
+        # other than computational variances, the fit method and the solution
+        # to this system of equations are equal
+        assert_allclose(fit_method, expected_solution, atol=1e-30)
+
+    def test_fit_comp_optimizer(self):
+        data = stats.logistic.rvs(size=100, loc=0.5, scale=2)
+
+        # obtain objective function to compare results of the fit methods
+        args = [data, (stats.logistic._fitstart(data),)]
+        func = stats.logistic._reduce_func(args, {})[1]
+
+        _assert_less_or_close_loglike(stats.logistic, data, func)
+        _assert_less_or_close_loglike(stats.logistic, data, func, floc=1)
+        _assert_less_or_close_loglike(stats.logistic, data, func, fscale=1)
+
+    @pytest.mark.parametrize('testlogcdf', [True, False])
+    def test_logcdfsf_tails(self, testlogcdf):
+        # Test either logcdf or logsf.  By symmetry, we can use the same
+        # expected values for both by switching the sign of x for logsf.
+        x = np.array([-10000, -800, 17, 50, 500])
+        if testlogcdf:
+            y = stats.logistic.logcdf(x)
+        else:
+            y = stats.logistic.logsf(-x)
+        # The expected values were computed with mpmath.
+        expected = [-10000.0, -800.0, -4.139937633089748e-08,
+                    -1.9287498479639178e-22, -7.124576406741286e-218]
+        assert_allclose(y, expected, rtol=2e-15)
+
+
+class TestLogser:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.logser.rvs(0.75, size=(2, 50))
+        assert_(numpy.all(vals >= 1))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.logser.rvs(0.75)
+        assert_(isinstance(val, int))
+        val = stats.logser(0.75).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_pmf_small_p(self):
+        m = stats.logser.pmf(4, 1e-20)
+        # The expected value was computed using mpmath:
+        #   >>> import mpmath
+        #   >>> mpmath.mp.dps = 64
+        #   >>> k = 4
+        #   >>> p = mpmath.mpf('1e-20')
+        #   >>> float(-(p**k)/k/mpmath.log(1-p))
+        #   2.5e-61
+        # It is also clear from noticing that for very small p,
+        # log(1-p) is approximately -p, and the formula becomes
+        #    p**(k-1) / k
+        assert_allclose(m, 2.5e-61)
+
+    def test_mean_small_p(self):
+        m = stats.logser.mean(1e-8)
+        # The expected mean was computed using mpmath:
+        #   >>> import mpmath
+        #   >>> mpmath.dps = 60
+        #   >>> p = mpmath.mpf('1e-8')
+        #   >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
+        #   1.000000005
+        assert_allclose(m, 1.000000005)
+
+
+class TestGumbel_r_l:
+    @pytest.fixture(scope='function')
+    def rng(self):
+        return np.random.default_rng(1234)
+
+    @pytest.mark.parametrize("dist", [stats.gumbel_r, stats.gumbel_l])
+    @pytest.mark.parametrize("loc_rvs", [-1, 0, 1])
+    @pytest.mark.parametrize("scale_rvs", [.1, 1, 5])
+    @pytest.mark.parametrize('fix_loc, fix_scale',
+                             ([True, False], [False, True]))
+    def test_fit_comp_optimizer(self, dist, loc_rvs, scale_rvs,
+                                fix_loc, fix_scale, rng):
+        data = dist.rvs(size=100, loc=loc_rvs, scale=scale_rvs,
+                        random_state=rng)
+
+        # obtain objective function to compare results of the fit methods
+        args = [data, (dist._fitstart(data),)]
+        func = dist._reduce_func(args, {})[1]
+
+        kwds = dict()
+        # the fixed location and scales are arbitrarily modified to not be
+        # close to the true value.
+        if fix_loc:
+            kwds['floc'] = loc_rvs * 2
+        if fix_scale:
+            kwds['fscale'] = scale_rvs * 2
+
+        # test that the gumbel_* fit method is better than super method
+        _assert_less_or_close_loglike(dist, data, func, **kwds)
+
+    @pytest.mark.parametrize("dist, sgn", [(stats.gumbel_r, 1),
+                                           (stats.gumbel_l, -1)])
+    def test_fit(self, dist, sgn):
+        z = sgn*np.array([3, 3, 3, 3, 3, 3, 3, 3.00000001])
+        loc, scale = dist.fit(z)
+        # The expected values were computed with mpmath with 60 digits
+        # of precision.
+        assert_allclose(loc, sgn*3.0000000001667906)
+        assert_allclose(scale, 1.2495222465145514e-09, rtol=1e-6)
+
+
+class TestPareto:
+    def test_stats(self):
+        # Check the stats() method with some simple values. Also check
+        # that the calculations do not trigger RuntimeWarnings.
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", RuntimeWarning)
+
+            m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
+            assert_equal(m, np.inf)
+            assert_equal(v, np.inf)
+            assert_equal(s, np.nan)
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
+            assert_equal(m, np.inf)
+            assert_equal(v, np.inf)
+            assert_equal(s, np.nan)
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
+            assert_equal(m, 3.0)
+            assert_equal(v, np.inf)
+            assert_equal(s, np.nan)
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
+            assert_equal(m, 2.0)
+            assert_equal(v, np.inf)
+            assert_equal(s, np.nan)
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
+            assert_allclose(m, 2.5 / 1.5)
+            assert_allclose(v, 2.5 / (1.5*1.5*0.5))
+            assert_equal(s, np.nan)
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
+            assert_allclose(m, 1.5)
+            assert_allclose(v, 0.75)
+            assert_equal(s, np.nan)
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
+            assert_allclose(m, 3.5 / 2.5)
+            assert_allclose(v, 3.5 / (2.5*2.5*1.5))
+            assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
+            assert_allclose(m, 4.0 / 3.0)
+            assert_allclose(v, 4.0 / 18.0)
+            assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
+            assert_equal(k, np.nan)
+
+            m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
+            assert_allclose(m, 4.5 / 3.5)
+            assert_allclose(v, 4.5 / (3.5*3.5*2.5))
+            assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
+            assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
+
+    def test_sf(self):
+        x = 1e9
+        b = 2
+        scale = 1.5
+        p = stats.pareto.sf(x, b, loc=0, scale=scale)
+        expected = (scale/x)**b   # 2.25e-18
+        assert_allclose(p, expected)
+
+    @pytest.fixture(scope='function')
+    def rng(self):
+        return np.random.default_rng(1234)
+
+    @pytest.mark.filterwarnings("ignore:invalid value encountered in "
+                                "double_scalars")
+    @pytest.mark.parametrize("rvs_shape", [1, 2])
+    @pytest.mark.parametrize("rvs_loc", [0, 2])
+    @pytest.mark.parametrize("rvs_scale", [1, 5])
+    def test_fit(self, rvs_shape, rvs_loc, rvs_scale, rng):
+        data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
+                                loc=rvs_loc, random_state=rng)
+
+        # shape can still be fixed with multiple names
+        shape_mle_analytical1 = stats.pareto.fit(data, floc=0, f0=1.04)[0]
+        shape_mle_analytical2 = stats.pareto.fit(data, floc=0, fix_b=1.04)[0]
+        shape_mle_analytical3 = stats.pareto.fit(data, floc=0, fb=1.04)[0]
+        assert (shape_mle_analytical1 == shape_mle_analytical2 ==
+                shape_mle_analytical3 == 1.04)
+
+        # data can be shifted with changes to `loc`
+        data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
+                                loc=(rvs_loc + 2), random_state=rng)
+        shape_mle_a, loc_mle_a, scale_mle_a = stats.pareto.fit(data, floc=2)
+        assert_equal(scale_mle_a + 2, data.min())
+
+        data_shift = data - 2
+        ndata = data_shift.shape[0]
+        assert_equal(shape_mle_a,
+                     ndata / np.sum(np.log(data_shift/data_shift.min())))
+        assert_equal(loc_mle_a, 2)
+
+    @pytest.mark.parametrize("rvs_shape", [.1, 2])
+    @pytest.mark.parametrize("rvs_loc", [0, 2])
+    @pytest.mark.parametrize("rvs_scale", [1, 5])
+    @pytest.mark.parametrize('fix_shape, fix_loc, fix_scale',
+                             [p for p in product([True, False], repeat=3)
+                              if False in p])
+    @np.errstate(invalid="ignore")
+    def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale,
+                                    fix_shape, fix_loc, fix_scale, rng):
+        data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
+                                loc=rvs_loc, random_state=rng)
+        args = [data, (stats.pareto._fitstart(data), )]
+        func = stats.pareto._reduce_func(args, {})[1]
+
+        kwds = {}
+        if fix_shape:
+            kwds['f0'] = rvs_shape
+        if fix_loc:
+            kwds['floc'] = rvs_loc
+        if fix_scale:
+            kwds['fscale'] = rvs_scale
+
+        _assert_less_or_close_loglike(stats.pareto, data, func, **kwds)
+
+    @np.errstate(invalid="ignore")
+    def test_fit_known_bad_seed(self):
+        # Tests a known seed and set of parameters that would produce a result
+        # would violate the support of Pareto if the fit method did not check
+        # the constraint `fscale + floc < min(data)`.
+        shape, location, scale = 1, 0, 1
+        data = stats.pareto.rvs(shape, location, scale, size=100,
+                                random_state=np.random.default_rng(2535619))
+        args = [data, (stats.pareto._fitstart(data), )]
+        func = stats.pareto._reduce_func(args, {})[1]
+        _assert_less_or_close_loglike(stats.pareto, data, func)
+
+    def test_fit_warnings(self):
+        assert_fit_warnings(stats.pareto)
+        # `floc` that causes invalid negative data
+        assert_raises(FitDataError, stats.pareto.fit, [1, 2, 3], floc=2)
+        # `floc` and `fscale` combination causes invalid data
+        assert_raises(FitDataError, stats.pareto.fit, [5, 2, 3], floc=1,
+                      fscale=3)
+
+    def test_negative_data(self, rng):
+        data = stats.pareto.rvs(loc=-130, b=1, size=100, random_state=rng)
+        assert_array_less(data, 0)
+        # The purpose of this test is to make sure that no runtime warnings are
+        # raised for all negative data, not the output of the fit method. Other
+        # methods test the output but have to silence warnings from the super
+        # method.
+        _ = stats.pareto.fit(data)
+
+
+class TestGenpareto:
+    def test_ab(self):
+        # c >= 0: a, b = [0, inf]
+        for c in [1., 0.]:
+            c = np.asarray(c)
+            a, b = stats.genpareto._get_support(c)
+            assert_equal(a, 0.)
+            assert_(np.isposinf(b))
+
+        # c < 0: a=0, b=1/|c|
+        c = np.asarray(-2.)
+        a, b = stats.genpareto._get_support(c)
+        assert_allclose([a, b], [0., 0.5])
+
+    def test_c0(self):
+        # with c=0, genpareto reduces to the exponential distribution
+        # rv = stats.genpareto(c=0.)
+        rv = stats.genpareto(c=0.)
+        x = np.linspace(0, 10., 30)
+        assert_allclose(rv.pdf(x), stats.expon.pdf(x))
+        assert_allclose(rv.cdf(x), stats.expon.cdf(x))
+        assert_allclose(rv.sf(x), stats.expon.sf(x))
+
+        q = np.linspace(0., 1., 10)
+        assert_allclose(rv.ppf(q), stats.expon.ppf(q))
+
+    def test_cm1(self):
+        # with c=-1, genpareto reduces to the uniform distr on [0, 1]
+        rv = stats.genpareto(c=-1.)
+        x = np.linspace(0, 10., 30)
+        assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
+        assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
+        assert_allclose(rv.sf(x), stats.uniform.sf(x))
+
+        q = np.linspace(0., 1., 10)
+        assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
+
+        # logpdf(1., c=-1) should be zero
+        assert_allclose(rv.logpdf(1), 0)
+
+    def test_x_inf(self):
+        # make sure x=inf is handled gracefully
+        rv = stats.genpareto(c=0.1)
+        assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
+        assert_(np.isneginf(rv.logpdf(np.inf)))
+
+        rv = stats.genpareto(c=0.)
+        assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
+        assert_(np.isneginf(rv.logpdf(np.inf)))
+
+        rv = stats.genpareto(c=-1.)
+        assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
+        assert_(np.isneginf(rv.logpdf(np.inf)))
+
+    def test_c_continuity(self):
+        # pdf is continuous at c=0, -1
+        x = np.linspace(0, 10, 30)
+        for c in [0, -1]:
+            pdf0 = stats.genpareto.pdf(x, c)
+            for dc in [1e-14, -1e-14]:
+                pdfc = stats.genpareto.pdf(x, c + dc)
+                assert_allclose(pdf0, pdfc, atol=1e-12)
+
+            cdf0 = stats.genpareto.cdf(x, c)
+            for dc in [1e-14, 1e-14]:
+                cdfc = stats.genpareto.cdf(x, c + dc)
+                assert_allclose(cdf0, cdfc, atol=1e-12)
+
+    def test_c_continuity_ppf(self):
+        q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
+                  np.linspace(0.01, 1, 30, endpoint=False),
+                  1. - np.logspace(1e-12, 0.01, base=0.1)]
+        for c in [0., -1.]:
+            ppf0 = stats.genpareto.ppf(q, c)
+            for dc in [1e-14, -1e-14]:
+                ppfc = stats.genpareto.ppf(q, c + dc)
+                assert_allclose(ppf0, ppfc, atol=1e-12)
+
+    def test_c_continuity_isf(self):
+        q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
+                  np.linspace(0.01, 1, 30, endpoint=False),
+                  1. - np.logspace(1e-12, 0.01, base=0.1)]
+        for c in [0., -1.]:
+            isf0 = stats.genpareto.isf(q, c)
+            for dc in [1e-14, -1e-14]:
+                isfc = stats.genpareto.isf(q, c + dc)
+                assert_allclose(isf0, isfc, atol=1e-12)
+
+    def test_cdf_ppf_roundtrip(self):
+        # this should pass with machine precision. hat tip @pbrod
+        q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
+                  np.linspace(0.01, 1, 30, endpoint=False),
+                  1. - np.logspace(1e-12, 0.01, base=0.1)]
+        for c in [1e-8, -1e-18, 1e-15, -1e-15]:
+            assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
+                            q, atol=1e-15)
+
+    def test_logsf(self):
+        logp = stats.genpareto.logsf(1e10, .01, 0, 1)
+        assert_allclose(logp, -1842.0680753952365)
+
+    # Values in 'expected_stats' are
+    # [mean, variance, skewness, excess kurtosis].
+    @pytest.mark.parametrize(
+        'c, expected_stats',
+        [(0, [1, 1, 2, 6]),
+         (1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
+         (1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
+         (-1, [1/2, 1/12, 0, -6/5])])
+    def test_stats(self, c, expected_stats):
+        result = stats.genpareto.stats(c, moments='mvsk')
+        assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
+
+    def test_var(self):
+        # Regression test for gh-11168.
+        v = stats.genpareto.var(1e-8)
+        assert_allclose(v, 1.000000040000001, rtol=1e-13)
+
+
+class TestPearson3:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.pearson3.rvs(0.1, size=(2, 50))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllFloat'])
+        val = stats.pearson3.rvs(0.5)
+        assert_(isinstance(val, float))
+        val = stats.pearson3(0.5).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllFloat'])
+        assert_(len(val) == 3)
+
+    def test_pdf(self):
+        vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
+        assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
+                        atol=1e-6)
+        vals = stats.pearson3.pdf(-3, 0.1)
+        assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
+        vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
+        assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
+                                        0.39885918, 0.23413173]), atol=1e-6)
+
+    def test_cdf(self):
+        vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
+        assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
+                        atol=1e-6)
+        vals = stats.pearson3.cdf(-3, 0.1)
+        assert_allclose(vals, [0.00082256], atol=1e-6)
+        vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
+        assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
+                               5.06649130e-01, 8.41442111e-01], atol=1e-6)
+
+    def test_negative_cdf_bug_11186(self):
+        # incorrect CDFs for negative skews in gh-11186; fixed in gh-12640
+        # Also check vectorization w/ negative, zero, and positive skews
+        skews = [-3, -1, 0, 0.5]
+        x_eval = 0.5
+        neg_inf = -30  # avoid RuntimeWarning caused by np.log(0)
+        cdfs = stats.pearson3.cdf(x_eval, skews)
+        int_pdfs = [quad(stats.pearson3(skew).pdf, neg_inf, x_eval)[0]
+                    for skew in skews]
+        assert_allclose(cdfs, int_pdfs)
+
+    def test_return_array_bug_11746(self):
+        # pearson3.moment was returning size 0 or 1 array instead of float
+        # The first moment is equal to the loc, which defaults to zero
+        moment = stats.pearson3.moment(1, 2)
+        assert_equal(moment, 0)
+        assert isinstance(moment, np.number)
+
+        moment = stats.pearson3.moment(1, 0.000001)
+        assert_equal(moment, 0)
+        assert isinstance(moment, np.number)
+
+    def test_ppf_bug_17050(self):
+        # incorrect PPF for negative skews were reported in gh-17050
+        # Check that this is fixed (even in the array case)
+        skews = [-3, -1, 0, 0.5]
+        x_eval = 0.5
+        res = stats.pearson3.ppf(stats.pearson3.cdf(x_eval, skews), skews)
+        assert_allclose(res, x_eval)
+
+        # Negation of the skew flips the distribution about the origin, so
+        # the following should hold
+        skew = np.array([[-0.5], [1.5]])
+        x = np.linspace(-2, 2)
+        assert_allclose(stats.pearson3.pdf(x, skew),
+                        stats.pearson3.pdf(-x, -skew))
+        assert_allclose(stats.pearson3.cdf(x, skew),
+                        stats.pearson3.sf(-x, -skew))
+        assert_allclose(stats.pearson3.ppf(x, skew),
+                        -stats.pearson3.isf(x, -skew))
+
+
+class TestKappa4:
+    def test_cdf_genpareto(self):
+        # h = 1 and k != 0 is generalized Pareto
+        x = [0.0, 0.1, 0.2, 0.5]
+        h = 1.0
+        for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
+                  1.9]:
+            vals = stats.kappa4.cdf(x, h, k)
+            # shape parameter is opposite what is expected
+            vals_comp = stats.genpareto.cdf(x, -k)
+            assert_allclose(vals, vals_comp)
+
+    def test_cdf_genextreme(self):
+        # h = 0 and k != 0 is generalized extreme value
+        x = np.linspace(-5, 5, 10)
+        h = 0.0
+        k = np.linspace(-3, 3, 10)
+        vals = stats.kappa4.cdf(x, h, k)
+        vals_comp = stats.genextreme.cdf(x, k)
+        assert_allclose(vals, vals_comp)
+
+    def test_cdf_expon(self):
+        # h = 1 and k = 0 is exponential
+        x = np.linspace(0, 10, 10)
+        h = 1.0
+        k = 0.0
+        vals = stats.kappa4.cdf(x, h, k)
+        vals_comp = stats.expon.cdf(x)
+        assert_allclose(vals, vals_comp)
+
+    def test_cdf_gumbel_r(self):
+        # h = 0 and k = 0 is gumbel_r
+        x = np.linspace(-5, 5, 10)
+        h = 0.0
+        k = 0.0
+        vals = stats.kappa4.cdf(x, h, k)
+        vals_comp = stats.gumbel_r.cdf(x)
+        assert_allclose(vals, vals_comp)
+
+    def test_cdf_logistic(self):
+        # h = -1 and k = 0 is logistic
+        x = np.linspace(-5, 5, 10)
+        h = -1.0
+        k = 0.0
+        vals = stats.kappa4.cdf(x, h, k)
+        vals_comp = stats.logistic.cdf(x)
+        assert_allclose(vals, vals_comp)
+
+    def test_cdf_uniform(self):
+        # h = 1 and k = 1 is uniform
+        x = np.linspace(-5, 5, 10)
+        h = 1.0
+        k = 1.0
+        vals = stats.kappa4.cdf(x, h, k)
+        vals_comp = stats.uniform.cdf(x)
+        assert_allclose(vals, vals_comp)
+
+    def test_integers_ctor(self):
+        # regression test for gh-7416: _argcheck fails for integer h and k
+        # in numpy 1.12
+        stats.kappa4(1, 2)
+
+
+class TestPoisson:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_pmf_basic(self):
+        # Basic case
+        ln2 = np.log(2)
+        vals = stats.poisson.pmf([0, 1, 2], ln2)
+        expected = [0.5, ln2/2, ln2**2/4]
+        assert_allclose(vals, expected)
+
+    def test_mu0(self):
+        # Edge case: mu=0
+        vals = stats.poisson.pmf([0, 1, 2], 0)
+        expected = [1, 0, 0]
+        assert_array_equal(vals, expected)
+
+        interval = stats.poisson.interval(0.95, 0)
+        assert_equal(interval, (0, 0))
+
+    def test_rvs(self):
+        vals = stats.poisson.rvs(0.5, size=(2, 50))
+        assert_(numpy.all(vals >= 0))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.poisson.rvs(0.5)
+        assert_(isinstance(val, int))
+        val = stats.poisson(0.5).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_stats(self):
+        mu = 16.0
+        result = stats.poisson.stats(mu, moments='mvsk')
+        assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
+
+        mu = np.array([0.0, 1.0, 2.0])
+        result = stats.poisson.stats(mu, moments='mvsk')
+        expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
+        assert_allclose(result, expected)
+
+
+class TestKSTwo:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_cdf(self):
+        for n in [1, 2, 3, 10, 100, 1000]:
+            # Test x-values:
+            #  0, 1/2n, where the cdf should be 0
+            #  1/n, where the cdf should be n!/n^n
+            #  0.5, where the cdf should match ksone.cdf
+            # 1-1/n, where cdf = 1-2/n^n
+            # 1, where cdf == 1
+            # (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
+            x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
+            v1 = (1.0/n)**n
+            lg = scipy.special.gammaln(n+1)
+            elg = (np.exp(lg) if v1 != 0 else 0)
+            expected = np.array([0, 0, v1 * elg,
+                                 1 - 2*stats.ksone.sf(0.5, n),
+                                 max(1 - 2*v1, 0.0),
+                                 1.0])
+            vals_cdf = stats.kstwo.cdf(x, n)
+            assert_allclose(vals_cdf, expected)
+
+    def test_sf(self):
+        x = np.linspace(0, 1, 11)
+        for n in [1, 2, 3, 10, 100, 1000]:
+            # Same x values as in test_cdf, and use sf = 1 - cdf
+            x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
+            v1 = (1.0/n)**n
+            lg = scipy.special.gammaln(n+1)
+            elg = (np.exp(lg) if v1 != 0 else 0)
+            expected = np.array([1.0, 1.0,
+                                 1 - v1 * elg,
+                                 2*stats.ksone.sf(0.5, n),
+                                 min(2*v1, 1.0), 0])
+            vals_sf = stats.kstwo.sf(x, n)
+            assert_allclose(vals_sf, expected)
+
+    def test_cdf_sqrtn(self):
+        # For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
+        # cdf(a/sqrt(n), n) is an increasing function of n (and a)
+        # Check that the function is indeed increasing (allowing for some
+        # small floating point and algorithm differences.)
+        x = np.linspace(0, 2, 11)[1:]
+        ns = [50, 100, 200, 400, 1000, 2000]
+        for _x in x:
+            xn = _x / np.sqrt(ns)
+            probs = stats.kstwo.cdf(xn, ns)
+            diffs = np.diff(probs)
+            assert_array_less(diffs, 1e-8)
+
+    def test_cdf_sf(self):
+        x = np.linspace(0, 1, 11)
+        for n in [1, 2, 3, 10, 100, 1000]:
+            vals_cdf = stats.kstwo.cdf(x, n)
+            vals_sf = stats.kstwo.sf(x, n)
+            assert_array_almost_equal(vals_cdf, 1 - vals_sf)
+
+    def test_cdf_sf_sqrtn(self):
+        x = np.linspace(0, 1, 11)
+        for n in [1, 2, 3, 10, 100, 1000]:
+            xn = x / np.sqrt(n)
+            vals_cdf = stats.kstwo.cdf(xn, n)
+            vals_sf = stats.kstwo.sf(xn, n)
+            assert_array_almost_equal(vals_cdf, 1 - vals_sf)
+
+    def test_ppf_of_cdf(self):
+        x = np.linspace(0, 1, 11)
+        for n in [1, 2, 3, 10, 100, 1000]:
+            xn = x[x > 0.5/n]
+            vals_cdf = stats.kstwo.cdf(xn, n)
+            # CDFs close to 1 are better dealt with using the SF
+            cond = (0 < vals_cdf) & (vals_cdf < 0.99)
+            vals = stats.kstwo.ppf(vals_cdf, n)
+            assert_allclose(vals[cond], xn[cond], rtol=1e-4)
+
+    def test_isf_of_sf(self):
+        x = np.linspace(0, 1, 11)
+        for n in [1, 2, 3, 10, 100, 1000]:
+            xn = x[x > 0.5/n]
+            vals_isf = stats.kstwo.isf(xn, n)
+            cond = (0 < vals_isf) & (vals_isf < 1.0)
+            vals = stats.kstwo.sf(vals_isf, n)
+            assert_allclose(vals[cond], xn[cond], rtol=1e-4)
+
+    def test_ppf_of_cdf_sqrtn(self):
+        x = np.linspace(0, 1, 11)
+        for n in [1, 2, 3, 10, 100, 1000]:
+            xn = (x / np.sqrt(n))[x > 0.5/n]
+            vals_cdf = stats.kstwo.cdf(xn, n)
+            cond = (0 < vals_cdf) & (vals_cdf < 1.0)
+            vals = stats.kstwo.ppf(vals_cdf, n)
+            assert_allclose(vals[cond], xn[cond])
+
+    def test_isf_of_sf_sqrtn(self):
+        x = np.linspace(0, 1, 11)
+        for n in [1, 2, 3, 10, 100, 1000]:
+            xn = (x / np.sqrt(n))[x > 0.5/n]
+            vals_sf = stats.kstwo.sf(xn, n)
+            # SFs close to 1 are better dealt with using the CDF
+            cond = (0 < vals_sf) & (vals_sf < 0.95)
+            vals = stats.kstwo.isf(vals_sf, n)
+            assert_allclose(vals[cond], xn[cond])
+
+    def test_ppf(self):
+        probs = np.linspace(0, 1, 11)[1:]
+        for n in [1, 2, 3, 10, 100, 1000]:
+            xn = stats.kstwo.ppf(probs, n)
+            vals_cdf = stats.kstwo.cdf(xn, n)
+            assert_allclose(vals_cdf, probs)
+
+    def test_simard_lecuyer_table1(self):
+        # Compute the cdf for values near the mean of the distribution.
+        # The mean u ~ log(2)*sqrt(pi/(2n))
+        # Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
+        # This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
+        #  "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
+        # Except that the values below are not from the published table, but
+        # were generated using an independent SageMath implementation of
+        # Durbin's algorithm (with the exponentiation and scaling of
+        # Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
+        # Some of the values in the published table have relative
+        # errors greater than 1e-4.
+        ns = [10, 50, 100, 200, 500, 1000]
+        ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
+        expected = np.array([
+            [1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01,
+             9.97685592e-01, 9.99999942e-01],
+            [2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01,
+             9.96177701e-01, 9.99998662e-01],
+            [1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01,
+             9.95866877e-01, 9.99998240e-01],
+            [4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01,
+             9.95661824e-01, 9.99997964e-01],
+            [2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01,
+             9.95491207e-01, 9.99997750e-01],
+            [1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01,
+             9.95409545e-01, 9.99997657e-01]
+        ])
+        for idx, n in enumerate(ns):
+            x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
+            vals_cdf = stats.kstwo.cdf(x, n)
+            assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
+
+
+class TestZipf:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.zipf.rvs(1.5, size=(2, 50))
+        assert_(numpy.all(vals >= 1))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.zipf.rvs(1.5)
+        assert_(isinstance(val, int))
+        val = stats.zipf(1.5).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+
+    def test_moments(self):
+        # n-th moment is finite iff a > n + 1
+        m, v = stats.zipf.stats(a=2.8)
+        assert_(np.isfinite(m))
+        assert_equal(v, np.inf)
+
+        s, k = stats.zipf.stats(a=4.8, moments='sk')
+        assert_(not np.isfinite([s, k]).all())
+
+
+class TestDLaplace:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        vals = stats.dlaplace.rvs(1.5, size=(2, 50))
+        assert_(numpy.shape(vals) == (2, 50))
+        assert_(vals.dtype.char in typecodes['AllInteger'])
+        val = stats.dlaplace.rvs(1.5)
+        assert_(isinstance(val, int))
+        val = stats.dlaplace(1.5).rvs(3)
+        assert_(isinstance(val, numpy.ndarray))
+        assert_(val.dtype.char in typecodes['AllInteger'])
+        assert_(stats.dlaplace.rvs(0.8) is not None)
+
+    def test_stats(self):
+        # compare the explicit formulas w/ direct summation using pmf
+        a = 1.
+        dl = stats.dlaplace(a)
+        m, v, s, k = dl.stats('mvsk')
+
+        N = 37
+        xx = np.arange(-N, N+1)
+        pp = dl.pmf(xx)
+        m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
+        assert_equal((m, s), (0, 0))
+        assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
+
+    def test_stats2(self):
+        a = np.log(2.)
+        dl = stats.dlaplace(a)
+        m, v, s, k = dl.stats('mvsk')
+        assert_equal((m, s), (0., 0.))
+        assert_allclose((v, k), (4., 3.25))
+
+
+class TestInvgauss:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    @pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
+                             [(2, 0, 1), (4.635, 4.362, 6.303)])
+    def test_fit(self, rvs_mu, rvs_loc, rvs_scale):
+        data = stats.invgauss.rvs(size=100, mu=rvs_mu,
+                                  loc=rvs_loc, scale=rvs_scale)
+        # Analytical MLEs are calculated with formula when `floc` is fixed
+        mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc)
+
+        data = data - rvs_loc
+        mu_temp = np.mean(data)
+        scale_mle = len(data) / (np.sum(data**(-1) - mu_temp**(-1)))
+        mu_mle = mu_temp/scale_mle
+
+        # `mu` and `scale` match analytical formula
+        assert_allclose(mu_mle, mu, atol=1e-15, rtol=1e-15)
+        assert_allclose(scale_mle, scale, atol=1e-15, rtol=1e-15)
+        assert_equal(loc, rvs_loc)
+        data = stats.invgauss.rvs(size=100, mu=rvs_mu,
+                                  loc=rvs_loc, scale=rvs_scale)
+        # fixed parameters are returned
+        mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc - 1,
+                                            fscale=rvs_scale + 1)
+        assert_equal(rvs_scale + 1, scale)
+        assert_equal(rvs_loc - 1, loc)
+
+        # shape can still be fixed with multiple names
+        shape_mle1 = stats.invgauss.fit(data, fmu=1.04)[0]
+        shape_mle2 = stats.invgauss.fit(data, fix_mu=1.04)[0]
+        shape_mle3 = stats.invgauss.fit(data, f0=1.04)[0]
+        assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04
+
+    @pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
+                             [(2, 0, 1), (6.311, 3.225, 4.520)])
+    def test_fit_MLE_comp_optimizer(self, rvs_mu, rvs_loc, rvs_scale):
+        data = stats.invgauss.rvs(size=100, mu=rvs_mu,
+                                  loc=rvs_loc, scale=rvs_scale)
+
+        super_fit = super(type(stats.invgauss), stats.invgauss).fit
+        # fitting without `floc` uses superclass fit method
+        super_fitted = super_fit(data)
+        invgauss_fit = stats.invgauss.fit(data)
+        assert_equal(super_fitted, invgauss_fit)
+
+        # fitting with `fmu` is uses superclass fit method
+        super_fitted = super_fit(data, floc=0, fmu=2)
+        invgauss_fit = stats.invgauss.fit(data, floc=0, fmu=2)
+        assert_equal(super_fitted, invgauss_fit)
+
+        # obtain log-likelihood objective function to compare results
+        args = [data, (stats.invgauss._fitstart(data), )]
+        func = stats.invgauss._reduce_func(args, {})[1]
+
+        # fixed `floc` uses analytical formula and provides better fit than
+        # super method
+        _assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc)
+
+        # fixed `floc` not resulting in invalid data < 0 uses analytical
+        # formulas and provides a better fit than the super method
+        assert np.all((data - (rvs_loc - 1)) > 0)
+        _assert_less_or_close_loglike(stats.invgauss, data, func,
+                                      floc=rvs_loc - 1)
+
+        # fixed `floc` to an arbitrary number, 0, still provides a better fit
+        # than the super method
+        _assert_less_or_close_loglike(stats.invgauss, data, func, floc=0)
+
+        # fixed `fscale` to an arbitrary number still provides a better fit
+        # than the super method
+        _assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc,
+                                      fscale=np.random.rand(1)[0])
+
+    def test_fit_raise_errors(self):
+        assert_fit_warnings(stats.invgauss)
+        # FitDataError is raised when negative invalid data
+        with pytest.raises(FitDataError):
+            stats.invgauss.fit([1, 2, 3], floc=2)
+
+    def test_cdf_sf(self):
+        # Regression tests for gh-13614.
+        # Ground truth from R's statmod library (pinvgauss), e.g.
+        # library(statmod)
+        # options(digits=15)
+        # mu = c(4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
+        #        3.02332573e-03, 1.46755891e-03)
+        # print(pinvgauss(5, mu, 1))
+
+        # make sure a finite value is returned when mu is very small. see
+        # GH-13614
+        mu = [4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
+              3.02332573e-03, 1.46755891e-03]
+        expected = [1, 1, 1, 1, 1]
+        actual = stats.invgauss.cdf(0.4, mu=mu)
+        assert_equal(expected, actual)
+
+        # test if the function can distinguish small left/right tail
+        # probabilities from zero.
+        cdf_actual = stats.invgauss.cdf(0.001, mu=1.05)
+        assert_allclose(cdf_actual, 4.65246506892667e-219)
+        sf_actual = stats.invgauss.sf(110, mu=1.05)
+        assert_allclose(sf_actual, 4.12851625944048e-25)
+
+        # test if x does not cause numerical issues when mu is very small
+        # and x is close to mu in value.
+
+        # slightly smaller than mu
+        actual = stats.invgauss.cdf(0.00009, 0.0001)
+        assert_allclose(actual, 2.9458022894924e-26)
+
+        # slightly bigger than mu
+        actual = stats.invgauss.cdf(0.000102, 0.0001)
+        assert_allclose(actual, 0.976445540507925)
+
+    def test_logcdf_logsf(self):
+        # Regression tests for improvements made in gh-13616.
+        # Ground truth from R's statmod library (pinvgauss), e.g.
+        # library(statmod)
+        # options(digits=15)
+        # print(pinvgauss(0.001, 1.05, 1, log.p=TRUE, lower.tail=FALSE))
+
+        # test if logcdf and logsf can compute values too small to
+        # be represented on the unlogged scale. See: gh-13616
+        logcdf = stats.invgauss.logcdf(0.0001, mu=1.05)
+        assert_allclose(logcdf, -5003.87872590367)
+        logcdf = stats.invgauss.logcdf(110, 1.05)
+        assert_allclose(logcdf, -4.12851625944087e-25)
+        logsf = stats.invgauss.logsf(0.001, mu=1.05)
+        assert_allclose(logsf, -4.65246506892676e-219)
+        logsf = stats.invgauss.logsf(110, 1.05)
+        assert_allclose(logsf, -56.1467092416426)
+
+
+class TestLaplace:
+    @pytest.mark.parametrize("rvs_loc", [-5, 0, 1, 2])
+    @pytest.mark.parametrize("rvs_scale", [1, 2, 3, 10])
+    def test_fit(self, rvs_loc, rvs_scale):
+        # tests that various inputs follow expected behavior
+        # for a variety of `loc` and `scale`.
+        data = stats.laplace.rvs(size=100, loc=rvs_loc, scale=rvs_scale)
+
+        # MLE estimates are given by
+        loc_mle = np.median(data)
+        scale_mle = np.sum(np.abs(data - loc_mle)) / len(data)
+
+        # standard outputs should match analytical MLE formulas
+        loc, scale = stats.laplace.fit(data)
+        assert_allclose(loc, loc_mle, atol=1e-15, rtol=1e-15)
+        assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
+
+        # fixed parameter should use analytical formula for other
+        loc, scale = stats.laplace.fit(data, floc=loc_mle)
+        assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
+        loc, scale = stats.laplace.fit(data, fscale=scale_mle)
+        assert_allclose(loc, loc_mle)
+
+        # test with non-mle fixed parameter
+        # create scale with non-median loc
+        loc = rvs_loc * 2
+        scale_mle = np.sum(np.abs(data - loc)) / len(data)
+
+        # fixed loc to non median, scale should match
+        # scale calculation with modified loc
+        loc, scale = stats.laplace.fit(data, floc=loc)
+        assert_equal(scale_mle, scale)
+
+        # fixed scale created with non median loc,
+        # loc output should still be the data median.
+        loc, scale = stats.laplace.fit(data, fscale=scale_mle)
+        assert_equal(loc_mle, loc)
+
+        # error raised when both `floc` and `fscale` are fixed
+        assert_raises(RuntimeError, stats.laplace.fit, data, floc=loc_mle,
+                      fscale=scale_mle)
+
+        # error is raised with non-finite values
+        assert_raises(ValueError, stats.laplace.fit, [np.nan])
+        assert_raises(ValueError, stats.laplace.fit, [np.inf])
+
+    @pytest.mark.parametrize("rvs_scale,rvs_loc", [(10, -5),
+                                                   (5, 10),
+                                                   (.2, .5)])
+    def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale):
+        data = stats.laplace.rvs(size=1000, loc=rvs_loc, scale=rvs_scale)
+
+        # the log-likelihood function for laplace is given by
+        def ll(loc, scale, data):
+            return -1 * (- (len(data)) * np.log(2*scale) -
+                         (1/scale)*np.sum(np.abs(data - loc)))
+
+        # test that the objective function result of the analytical MLEs is
+        # less than or equal to that of the numerically optimized estimate
+        loc, scale = stats.laplace.fit(data)
+        loc_opt, scale_opt = super(type(stats.laplace),
+                                   stats.laplace).fit(data)
+        ll_mle = ll(loc, scale, data)
+        ll_opt = ll(loc_opt, scale_opt, data)
+        assert ll_mle < ll_opt or np.allclose(ll_mle, ll_opt,
+                                              atol=1e-15, rtol=1e-15)
+
+    def test_fit_simple_non_random_data(self):
+        data = np.array([1.0, 1.0, 3.0, 5.0, 8.0, 14.0])
+        # with `floc` fixed to 6, scale should be 4.
+        loc, scale = stats.laplace.fit(data, floc=6)
+        assert_allclose(scale, 4, atol=1e-15, rtol=1e-15)
+        # with `fscale` fixed to 6, loc should be 4.
+        loc, scale = stats.laplace.fit(data, fscale=6)
+        assert_allclose(loc, 4, atol=1e-15, rtol=1e-15)
+
+    def test_sf_cdf_extremes(self):
+        # These calculations should not generate warnings.
+        x = 1000
+        p0 = stats.laplace.cdf(-x)
+        # The exact value is smaller than can be represented with
+        # 64 bit floating point, so the exected result is 0.
+        assert p0 == 0.0
+        # The closest 64 bit floating point representation of the
+        # exact value is 1.0.
+        p1 = stats.laplace.cdf(x)
+        assert p1 == 1.0
+
+        p0 = stats.laplace.sf(x)
+        # The exact value is smaller than can be represented with
+        # 64 bit floating point, so the exected result is 0.
+        assert p0 == 0.0
+        # The closest 64 bit floating point representation of the
+        # exact value is 1.0.
+        p1 = stats.laplace.sf(-x)
+        assert p1 == 1.0
+
+    def test_sf(self):
+        x = 200
+        p = stats.laplace.sf(x)
+        assert_allclose(p, np.exp(-x)/2, rtol=1e-13)
+
+    def test_isf(self):
+        p = 1e-25
+        x = stats.laplace.isf(p)
+        assert_allclose(x, -np.log(2*p), rtol=1e-13)
+
+
+class TestPowerlaw(object):
+    @pytest.fixture(scope='function')
+    def rng(self):
+        return np.random.default_rng(1234)
+
+    @pytest.mark.parametrize("rvs_shape", [.1, .5, .75, 1, 2])
+    @pytest.mark.parametrize("rvs_loc", [-1, 0, 1])
+    @pytest.mark.parametrize("rvs_scale", [.1, 1, 5])
+    @pytest.mark.parametrize('fix_shape, fix_loc, fix_scale',
+                             [p for p in product([True, False], repeat=3)
+                              if False in p])
+    def test_fit_MLE_comp_optimizer(self, rvs_shape, rvs_loc, rvs_scale,
+                                    fix_shape, fix_loc, fix_scale, rng):
+        data = stats.powerlaw.rvs(size=250, a=rvs_shape, loc=rvs_loc,
+                                  scale=rvs_scale, random_state=rng)
+
+        args = [data, (stats.powerlaw._fitstart(data), )]
+        func = stats.powerlaw._reduce_func(args, {})[1]
+
+        kwds = dict()
+        if fix_shape:
+            kwds['f0'] = rvs_shape
+        if fix_loc:
+            kwds['floc'] = np.nextafter(data.min(), -np.inf)
+        if fix_scale:
+            kwds['fscale'] = rvs_scale
+        _assert_less_or_close_loglike(stats.powerlaw, data, func, **kwds)
+
+    def test_problem_case(self):
+        # An observed problem with the test method indicated that some fixed
+        # scale values could cause bad results, this is now corrected.
+        a = 2.50002862645130604506
+        location = 0.0
+        scale = 35.249023299873095
+
+        data = stats.powerlaw.rvs(a=a, loc=location, scale=scale, size=100,
+                                  random_state=np.random.default_rng(5))
+
+        kwds = {'fscale': data.ptp() * 2}
+        args = [data, (stats.powerlaw._fitstart(data), )]
+        func = stats.powerlaw._reduce_func(args, {})[1]
+
+        _assert_less_or_close_loglike(stats.powerlaw, data, func, **kwds)
+
+    def test_fit_warnings(self):
+        assert_fit_warnings(stats.powerlaw)
+        # test for error when `fscale + floc <= np.max(data)` is not satisfied
+        msg = r" Maximum likelihood estimation with 'powerlaw' requires"
+        with assert_raises(FitDataError, match=msg):
+            stats.powerlaw.fit([1, 2, 4], floc=0, fscale=3)
+
+        # test for error when `data - floc >= 0`  is not satisfied
+        msg = r" Maximum likelihood estimation with 'powerlaw' requires"
+        with assert_raises(FitDataError, match=msg):
+            stats.powerlaw.fit([1, 2, 4], floc=2)
+
+        # test for fixed location not less than `min(data)`.
+        msg = r" Maximum likelihood estimation with 'powerlaw' requires"
+        with assert_raises(FitDataError, match=msg):
+            stats.powerlaw.fit([1, 2, 4], floc=1)
+
+        # test for when fixed scale is less than or equal to range of data
+        msg = r"Negative or zero `fscale` is outside"
+        with assert_raises(ValueError, match=msg):
+            stats.powerlaw.fit([1, 2, 4], fscale=-3)
+
+        # test for when fixed scale is less than or equal to range of data
+        msg = r"`fscale` must be greater than the range of data."
+        with assert_raises(ValueError, match=msg):
+            stats.powerlaw.fit([1, 2, 4], fscale=3)
+
+    def test_minimum_data_zero_gh17801(self):
+        # gh-17801 reported an overflow error when the minimum value of the
+        # data is zero. Check that this problem is resolved.
+        data = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6]
+        dist = stats.powerlaw
+        with np.errstate(over='ignore'):
+            _assert_less_or_close_loglike(dist, data, dist.nnlf)
+
+
+class TestInvGamma:
+    def test_invgamma_inf_gh_1866(self):
+        # invgamma's moments are only finite for a>n
+        # specific numbers checked w/ boost 1.54
+        with warnings.catch_warnings():
+            warnings.simplefilter('error', RuntimeWarning)
+            mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
+            expected = [0.05461496450, 0.0001723162534, 1.020362676,
+                        2.055616582]
+            assert_allclose(mvsk, expected)
+
+            a = [1.1, 3.1, 5.6]
+            mvsk = stats.invgamma.stats(a=a, moments='mvsk')
+            expected = ([10., 0.476190476, 0.2173913043],       # mmm
+                        [np.inf, 0.2061430632, 0.01312749422],  # vvv
+                        [np.nan, 41.95235392, 2.919025532],     # sss
+                        [np.nan, np.nan, 24.51923076])          # kkk
+            for x, y in zip(mvsk, expected):
+                assert_almost_equal(x, y)
+
+    def test_cdf_ppf(self):
+        # gh-6245
+        x = np.logspace(-2.6, 0)
+        y = stats.invgamma.cdf(x, 1)
+        xx = stats.invgamma.ppf(y, 1)
+        assert_allclose(x, xx)
+
+    def test_sf_isf(self):
+        # gh-6245
+        if sys.maxsize > 2**32:
+            x = np.logspace(2, 100)
+        else:
+            # Invgamme roundtrip on 32-bit systems has relative accuracy
+            # ~1e-15 until x=1e+15, and becomes inf above x=1e+18
+            x = np.logspace(2, 18)
+
+        y = stats.invgamma.sf(x, 1)
+        xx = stats.invgamma.isf(y, 1)
+        assert_allclose(x, xx, rtol=1.0)
+
+
+class TestF:
+    def test_endpoints(self):
+        # Compute the pdf at the left endpoint dst.a.
+        data = [[stats.f, (2, 1), 1.0]]
+        for _f, _args, _correct in data:
+            ans = _f.pdf(_f.a, *_args)
+
+        ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
+        correct = [_correct_ for _f, _args, _correct_ in data]
+        assert_array_almost_equal(ans, correct)
+
+    def test_f_moments(self):
+        # n-th moment of F distributions is only finite for n < dfd / 2
+        m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
+        assert_(np.isfinite(m))
+        assert_(np.isfinite(v))
+        assert_(np.isfinite(s))
+        assert_(not np.isfinite(k))
+
+    def test_moments_warnings(self):
+        # no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
+        with warnings.catch_warnings():
+            warnings.simplefilter('error', RuntimeWarning)
+            stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
+
+    def test_stats_broadcast(self):
+        dfn = np.array([[3], [11]])
+        dfd = np.array([11, 12])
+        m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
+        m2 = [dfd / (dfd - 2)]*2
+        assert_allclose(m, m2)
+        v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
+        assert_allclose(v, v2)
+        s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
+              ((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
+        assert_allclose(s, s2)
+        k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
+                      (dfd - 4) * (dfd - 2)**2)
+        k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
+        k2 = k2num / k2den
+        assert_allclose(k, k2)
+
+
+def test_rvgeneric_std():
+    # Regression test for #1191
+    assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
+
+
+def test_moments_t():
+    # regression test for #8786
+    assert_equal(stats.t.stats(df=1, moments='mvsk'),
+                 (np.inf, np.nan, np.nan, np.nan))
+    assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
+                 (0.0, np.inf, np.nan, np.nan))
+    assert_equal(stats.t.stats(df=2, moments='mvsk'),
+                 (0.0, np.inf, np.nan, np.nan))
+    assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
+                 (0.0, 2.01/(2.01-2.0), np.nan, np.inf))
+    assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
+    assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
+    assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
+    assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
+
+
+def test_t_entropy():
+    df = [1, 2, 25, 100]
+    # Expected values were computed with mpmath.
+    expected = [2.5310242469692907, 1.9602792291600821,
+                1.459327578078393, 1.4289633653182439]
+    assert_allclose(stats.t.entropy(df), expected, rtol=1e-13)
+
+
+@pytest.mark.parametrize("methname", ["pdf", "logpdf", "cdf",
+                                      "ppf", "sf", "isf"])
+@pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1],
+                                        [[0, 1, 0], [1, 1, 1]],
+                                        [[1, 0], [0, 1]],
+                                        [[0], [1]]])
+def test_t_inf_df(methname, df_infmask):
+    np.random.seed(0)
+    df_infmask = np.asarray(df_infmask, dtype=bool)
+    df = np.random.uniform(0, 10, size=df_infmask.shape)
+    x = np.random.randn(*df_infmask.shape)
+    df[df_infmask] = np.inf
+    t_dist = stats.t(df=df, loc=3, scale=1)
+    t_dist_ref = stats.t(df=df[~df_infmask], loc=3, scale=1)
+    norm_dist = stats.norm(loc=3, scale=1)
+    t_meth = getattr(t_dist, methname)
+    t_meth_ref = getattr(t_dist_ref, methname)
+    norm_meth = getattr(norm_dist, methname)
+    res = t_meth(x)
+    assert_equal(res[df_infmask], norm_meth(x[df_infmask]))
+    assert_equal(res[~df_infmask], t_meth_ref(x[~df_infmask]))
+
+
+@pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1],
+                                        [[0, 1, 0], [1, 1, 1]],
+                                        [[1, 0], [0, 1]],
+                                        [[0], [1]]])
+def test_t_inf_df_stats_entropy(df_infmask):
+    np.random.seed(0)
+    df_infmask = np.asarray(df_infmask, dtype=bool)
+    df = np.random.uniform(0, 10, size=df_infmask.shape)
+    df[df_infmask] = np.inf
+    res = stats.t.stats(df=df, loc=3, scale=1, moments='mvsk')
+    res_ex_inf = stats.norm.stats(loc=3, scale=1, moments='mvsk')
+    res_ex_noinf = stats.t.stats(df=df[~df_infmask], loc=3, scale=1,
+                                 moments='mvsk')
+    for i in range(4):
+        assert_equal(res[i][df_infmask], res_ex_inf[i])
+        assert_equal(res[i][~df_infmask], res_ex_noinf[i])
+
+    res = stats.t.entropy(df=df, loc=3, scale=1)
+    res_ex_inf = stats.norm.entropy(loc=3, scale=1)
+    res_ex_noinf = stats.t.entropy(df=df[~df_infmask], loc=3, scale=1)
+    assert_equal(res[df_infmask], res_ex_inf)
+    assert_equal(res[~df_infmask], res_ex_noinf)
+
+
+class TestRvDiscrete:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_rvs(self):
+        states = [-1, 0, 1, 2, 3, 4]
+        probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
+        samples = 1000
+        r = stats.rv_discrete(name='sample', values=(states, probability))
+        x = r.rvs(size=samples)
+        assert_(isinstance(x, numpy.ndarray))
+
+        for s, p in zip(states, probability):
+            assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
+
+        x = r.rvs()
+        assert np.issubdtype(type(x), np.integer)
+
+    def test_entropy(self):
+        # Basic tests of entropy.
+        pvals = np.array([0.25, 0.45, 0.3])
+        p = stats.rv_discrete(values=([0, 1, 2], pvals))
+        expected_h = -sum(xlogy(pvals, pvals))
+        h = p.entropy()
+        assert_allclose(h, expected_h)
+
+        p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
+        h = p.entropy()
+        assert_equal(h, 0.0)
+
+    def test_pmf(self):
+        xk = [1, 2, 4]
+        pk = [0.5, 0.3, 0.2]
+        rv = stats.rv_discrete(values=(xk, pk))
+
+        x = [[1., 4.],
+             [3., 2]]
+        assert_allclose(rv.pmf(x),
+                        [[0.5, 0.2],
+                         [0., 0.3]], atol=1e-14)
+
+    def test_cdf(self):
+        xk = [1, 2, 4]
+        pk = [0.5, 0.3, 0.2]
+        rv = stats.rv_discrete(values=(xk, pk))
+
+        x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
+        expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
+        assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
+
+        # also check scalar arguments
+        assert_allclose([rv.cdf(xx) for xx in x_values],
+                        expected, atol=1e-14)
+
+    def test_ppf(self):
+        xk = [1, 2, 4]
+        pk = [0.5, 0.3, 0.2]
+        rv = stats.rv_discrete(values=(xk, pk))
+
+        q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
+        expected = [1, 1, 2, 2, 4, 4]
+        assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
+
+        # also check scalar arguments
+        assert_allclose([rv.ppf(q) for q in q_values],
+                        expected, atol=1e-14)
+
+    def test_cdf_ppf_next(self):
+        # copied and special cased from test_discrete_basic
+        vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
+        rv = stats.rv_discrete(values=vals)
+
+        assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
+                           rv.xk[1:])
+
+    def test_multidimension(self):
+        xk = np.arange(12).reshape((3, 4))
+        pk = np.array([[0.1, 0.1, 0.15, 0.05],
+                       [0.1, 0.1, 0.05, 0.05],
+                       [0.1, 0.1, 0.05, 0.05]])
+        rv = stats.rv_discrete(values=(xk, pk))
+
+        assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
+
+    def test_bad_input(self):
+        xk = [1, 2, 3]
+        pk = [0.5, 0.5]
+        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
+
+        pk = [1, 2, 3]
+        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
+
+        xk = [1, 2, 3]
+        pk = [0.5, 1.2, -0.7]
+        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
+
+        xk = [1, 2, 3, 4, 5]
+        pk = [0.3, 0.3, 0.3, 0.3, -0.2]
+        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
+
+    def test_shape_rv_sample(self):
+        # tests added for gh-9565
+
+        # mismatch of 2d inputs
+        xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
+        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
+
+        # same number of elements, but shapes not compatible
+        xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
+        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
+
+        # same shapes => no error
+        xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
+        assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
+
+    def test_expect1(self):
+        xk = [1, 2, 4, 6, 7, 11]
+        pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
+        rv = stats.rv_discrete(values=(xk, pk))
+
+        assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
+
+    def test_expect2(self):
+        # rv_sample should override _expect. Bug report from
+        # https://stackoverflow.com/questions/63199792
+        y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,
+             1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0,
+             1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0,
+             2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0,
+             3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0,
+             4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0]
+
+        py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0,
+              0.004399999999999999, 0.6862, 0.0, 0.0, 0.0,
+              0.00019999999999997797, 0.0006000000000000449,
+              0.024499999999999966, 0.006400000000000072,
+              0.0043999999999999595, 0.019499999999999962,
+              0.03770000000000007, 0.01759999999999995, 0.015199999999999991,
+              0.018100000000000005, 0.04500000000000004, 0.0025999999999999357,
+              0.0, 0.0041000000000001036, 0.005999999999999894,
+              0.0042000000000000925, 0.0050000000000000044,
+              0.0041999999999999815, 0.0004999999999999449,
+              0.009199999999999986, 0.008200000000000096,
+              0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128,
+              0.0006000000000000449, 0.02510000000000001, 0.0,
+              0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0,
+              0.008199999999999985, 0.005600000000000049, 0.0]
+
+        rv = stats.rv_discrete(values=(y, py))
+
+        # check the mean
+        assert_allclose(rv.expect(), rv.mean(), atol=1e-14)
+        assert_allclose(rv.expect(),
+                        sum(v * w for v, w in zip(y, py)), atol=1e-14)
+
+        # also check the second moment
+        assert_allclose(rv.expect(lambda x: x**2),
+                        sum(v**2 * w for v, w in zip(y, py)), atol=1e-14)
+
+
+class TestSkewCauchy:
+    def test_cauchy(self):
+        x = np.linspace(-5, 5, 100)
+        assert_array_almost_equal(stats.skewcauchy.pdf(x, a=0),
+                                  stats.cauchy.pdf(x))
+        assert_array_almost_equal(stats.skewcauchy.cdf(x, a=0),
+                                  stats.cauchy.cdf(x))
+        assert_array_almost_equal(stats.skewcauchy.ppf(x, a=0),
+                                  stats.cauchy.ppf(x))
+
+    def test_skewcauchy_R(self):
+        # options(digits=16)
+        # library(sgt)
+        # # lmbda, x contain the values generated for a, x below
+        # lmbda <- c(0.0976270078546495, 0.430378732744839, 0.2055267521432877,
+        #            0.0897663659937937, -0.15269040132219, 0.2917882261333122,
+        #            -0.12482557747462, 0.7835460015641595, 0.9273255210020589,
+        #            -0.2331169623484446)
+        # x <- c(2.917250380826646, 0.2889491975290444, 0.6804456109393229,
+        #        4.25596638292661, -4.289639418021131, -4.1287070029845925,
+        #        -4.797816025596743, 3.32619845547938, 2.7815675094985046,
+        #        3.700121482468191)
+        # pdf = dsgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
+        #            var.adj = sqrt(2))
+        # cdf = psgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
+        #            var.adj = sqrt(2))
+        # qsgt(cdf, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
+        #      var.adj = sqrt(2))
+
+        np.random.seed(0)
+        a = np.random.rand(10) * 2 - 1
+        x = np.random.rand(10) * 10 - 5
+        pdf = [0.039473975217333909, 0.305829714049903223, 0.24140158118994162,
+               0.019585772402693054, 0.021436553695989482, 0.00909817103867518,
+               0.01658423410016873, 0.071083288030394126, 0.103250045941454524,
+               0.013110230778426242]
+        cdf = [0.87426677718213752, 0.37556468910780882, 0.59442096496538066,
+               0.91304659850890202, 0.09631964100300605, 0.03829624330921733,
+               0.08245240578402535, 0.72057062945510386, 0.62826415852515449,
+               0.95011308463898292]
+        assert_allclose(stats.skewcauchy.pdf(x, a), pdf)
+        assert_allclose(stats.skewcauchy.cdf(x, a), cdf)
+        assert_allclose(stats.skewcauchy.ppf(cdf, a), x)
+
+
+# Test data for TestSkewNorm.test_noncentral_moments()
+# The expected noncentral moments were computed by Wolfram Alpha.
+# In Wolfram Alpha, enter
+#    SkewNormalDistribution[0, 1, a] moment
+# with `a` replaced by the desired shape parameter.  In the results, there
+# should be a table of the first four moments. Click on "More" to get more
+# moments.  The expected moments start with the first moment (order = 1).
+_skewnorm_noncentral_moments = [
+    (2, [2*np.sqrt(2/(5*np.pi)),
+         1,
+         22/5*np.sqrt(2/(5*np.pi)),
+         3,
+         446/25*np.sqrt(2/(5*np.pi)),
+         15,
+         2682/25*np.sqrt(2/(5*np.pi)),
+         105,
+         107322/125*np.sqrt(2/(5*np.pi))]),
+    (0.1, [np.sqrt(2/(101*np.pi)),
+           1,
+           302/101*np.sqrt(2/(101*np.pi)),
+           3,
+           (152008*np.sqrt(2/(101*np.pi)))/10201,
+           15,
+           (107116848*np.sqrt(2/(101*np.pi)))/1030301,
+           105,
+           (97050413184*np.sqrt(2/(101*np.pi)))/104060401]),
+    (-3, [-3/np.sqrt(5*np.pi),
+          1,
+          -63/(10*np.sqrt(5*np.pi)),
+          3,
+          -2529/(100*np.sqrt(5*np.pi)),
+          15,
+          -30357/(200*np.sqrt(5*np.pi)),
+          105,
+          -2428623/(2000*np.sqrt(5*np.pi)),
+          945,
+          -242862867/(20000*np.sqrt(5*np.pi)),
+          10395,
+          -29143550277/(200000*np.sqrt(5*np.pi)),
+          135135]),
+]
+
+
+class TestSkewNorm:
+    def setup_method(self):
+        self.rng = check_random_state(1234)
+
+    def test_normal(self):
+        # When the skewness is 0 the distribution is normal
+        x = np.linspace(-5, 5, 100)
+        assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
+                                  stats.norm.pdf(x))
+
+    def test_rvs(self):
+        shape = (3, 4, 5)
+        x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
+        assert_equal(shape, x.shape)
+
+        x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
+        assert_equal(shape, x.shape)
+
+    def test_moments(self):
+        X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
+                               random_state=self.rng)
+        expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
+        computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
+        assert_array_almost_equal(computed, expected, decimal=2)
+
+        X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
+                               random_state=self.rng)
+        expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
+        computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
+        assert_array_almost_equal(computed, expected, decimal=2)
+
+    def test_cdf_large_x(self):
+        # Regression test for gh-7746.
+        # The x values are large enough that the closest 64 bit floating
+        # point representation of the exact CDF is 1.0.
+        p = stats.skewnorm.cdf([10, 20, 30], -1)
+        assert_allclose(p, np.ones(3), rtol=1e-14)
+        p = stats.skewnorm.cdf(25, 2.5)
+        assert_allclose(p, 1.0, rtol=1e-14)
+
+    def test_cdf_sf_small_values(self):
+        # Triples are [x, a, cdf(x, a)].  These values were computed
+        # using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
+        cdfvals = [
+            [-8, 1, 3.870035046664392611e-31],
+            [-4, 2, 8.1298399188811398e-21],
+            [-2, 5, 1.55326826787106273e-26],
+            [-9, -1, 2.257176811907681295e-19],
+            [-10, -4, 1.523970604832105213e-23],
+        ]
+        for x, a, cdfval in cdfvals:
+            p = stats.skewnorm.cdf(x, a)
+            assert_allclose(p, cdfval, rtol=1e-8)
+            # For the skew normal distribution, sf(-x, -a) = cdf(x, a).
+            p = stats.skewnorm.sf(-x, -a)
+            assert_allclose(p, cdfval, rtol=1e-8)
+
+    @pytest.mark.parametrize('a, moments', _skewnorm_noncentral_moments)
+    def test_noncentral_moments(self, a, moments):
+        for order, expected in enumerate(moments, start=1):
+            mom = stats.skewnorm.moment(order, a)
+            assert_allclose(mom, expected, rtol=1e-14)
+
+    def test_fit(self):
+        rng = np.random.default_rng(4609813989115202851)
+
+        a, loc, scale = -2, 3.5, 0.5  # arbitrary, valid parameters
+        dist = stats.skewnorm(a, loc, scale)
+        rvs = dist.rvs(size=100, random_state=rng)
+
+        # test that MLE still honors guesses and fixed parameters
+        a2, loc2, scale2 = stats.skewnorm.fit(rvs, -1.5, floc=3)
+        a3, loc3, scale3 = stats.skewnorm.fit(rvs, -1.6, floc=3)
+        assert loc2 == loc3 == 3  # fixed parameter is respected
+        assert a2 != a3  # different guess -> (slightly) different outcome
+        # quality of fit is tested elsewhere
+
+        # test that MoM honors fixed parameters, accepts (but ignores) guesses
+        a4, loc4, scale4 = stats.skewnorm.fit(rvs, 3, fscale=3, method='mm')
+        assert scale4 == 3
+        # because scale was fixed, only the mean and skewness will be matched
+        dist4 = stats.skewnorm(a4, loc4, scale4)
+        res = dist4.stats(moments='ms')
+        ref = np.mean(rvs), stats.skew(rvs)
+        assert_allclose(res, ref)
+
+        # Test behavior when skew of data is beyond maximum of skewnorm
+        rvs = stats.pareto.rvs(1, size=100, random_state=rng)
+
+        # MLE still works
+        res = stats.skewnorm.fit(rvs)
+        assert np.all(np.isfinite(res))
+
+        # MoM fits variance and skewness
+        a5, loc5, scale5 = stats.skewnorm.fit(rvs, method='mm')
+        assert np.isinf(a5)
+        # distribution infrastruction doesn't allow infinite shape parameters
+        # into _stats; it just bypasses it and produces NaNs. Calculate
+        # moments manually.
+        m, v = np.mean(rvs), np.var(rvs)
+        assert_allclose(m, loc5 + scale5 * np.sqrt(2/np.pi))
+        assert_allclose(v, scale5**2 * (1 - 2 / np.pi))
+
+
+class TestExpon:
+    def test_zero(self):
+        assert_equal(stats.expon.pdf(0), 1)
+
+    def test_tail(self):  # Regression test for ticket 807
+        assert_equal(stats.expon.cdf(1e-18), 1e-18)
+        assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
+
+    def test_nan_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
+        assert_raises(ValueError, stats.expon.fit, x)
+
+    def test_inf_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
+        assert_raises(ValueError, stats.expon.fit, x)
+
+
+class TestNorm:
+    def test_nan_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
+        assert_raises(ValueError, stats.norm.fit, x)
+
+    def test_inf_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
+        assert_raises(ValueError, stats.norm.fit, x)
+
+    def test_bad_keyword_arg(self):
+        x = [1, 2, 3]
+        assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
+
+
+class TestUniform:
+    """gh-10300"""
+    def test_nan_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
+        assert_raises(ValueError, stats.uniform.fit, x)
+
+    def test_inf_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
+        assert_raises(ValueError, stats.uniform.fit, x)
+
+
+class TestExponNorm:
+    def test_moments(self):
+        # Some moment test cases based on non-loc/scaled formula
+        def get_moms(lam, sig, mu):
+            # See wikipedia for these formulae
+            #  where it is listed as an exponentially modified gaussian
+            opK2 = 1.0 + 1 / (lam*sig)**2
+            exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
+            exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
+            return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
+
+        mu, sig, lam = 0, 1, 1
+        K = 1.0 / (lam * sig)
+        sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
+        assert_almost_equal(sts, get_moms(lam, sig, mu))
+        mu, sig, lam = -3, 2, 0.1
+        K = 1.0 / (lam * sig)
+        sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
+        assert_almost_equal(sts, get_moms(lam, sig, mu))
+        mu, sig, lam = 0, 3, 1
+        K = 1.0 / (lam * sig)
+        sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
+        assert_almost_equal(sts, get_moms(lam, sig, mu))
+        mu, sig, lam = -5, 11, 3.5
+        K = 1.0 / (lam * sig)
+        sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
+        assert_almost_equal(sts, get_moms(lam, sig, mu))
+
+    def test_nan_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
+        assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1)
+
+    def test_inf_raises_error(self):
+        # see gh-issue 10300
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
+        assert_raises(ValueError, stats.exponnorm.fit, x, floc=0, fscale=1)
+
+    def test_extremes_x(self):
+        # Test for extreme values against overflows
+        assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
+        assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
+        assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
+        assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
+
+    # Expected values for the PDF were computed with mpmath, with
+    # the following function, and with mpmath.mp.dps = 50.
+    #
+    #   def exponnorm_stdpdf(x, K):
+    #       x = mpmath.mpf(x)
+    #       K = mpmath.mpf(K)
+    #       t1 = mpmath.exp(1/(2*K**2) - x/K)
+    #       erfcarg = -(x - 1/K)/mpmath.sqrt(2)
+    #       t2 = mpmath.erfc(erfcarg)
+    #       return t1 * t2 / (2*K)
+    #
+    @pytest.mark.parametrize('x, K, expected',
+                             [(20, 0.01, 6.90010764753618e-88),
+                              (1, 0.01, 0.24438994313247364),
+                              (-1, 0.01, 0.23955149623472075),
+                              (-20, 0.01, 4.6004708690125477e-88),
+                              (10, 1, 7.48518298877006e-05),
+                              (10, 10000, 9.990005048283775e-05)])
+    def test_std_pdf(self, x, K, expected):
+        assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=5e-12)
+
+    # Expected values for the CDF were computed with mpmath using
+    # the following function and with mpmath.mp.dps = 60:
+    #
+    #   def mp_exponnorm_cdf(x, K, loc=0, scale=1):
+    #       x = mpmath.mpf(x)
+    #       K = mpmath.mpf(K)
+    #       loc = mpmath.mpf(loc)
+    #       scale = mpmath.mpf(scale)
+    #       z = (x - loc)/scale
+    #       return (mpmath.ncdf(z)
+    #               - mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
+    #
+    @pytest.mark.parametrize('x, K, scale, expected',
+                             [[0, 0.01, 1, 0.4960109760186432],
+                              [-5, 0.005, 1, 2.7939945412195734e-07],
+                              [-1e4, 0.01, 100, 0.0],
+                              [-1e4, 0.01, 1000, 6.920401854427357e-24],
+                              [5, 0.001, 1, 0.9999997118542392]])
+    def test_cdf_small_K(self, x, K, scale, expected):
+        p = stats.exponnorm.cdf(x, K, scale=scale)
+        if expected == 0.0:
+            assert p == 0.0
+        else:
+            assert_allclose(p, expected, rtol=1e-13)
+
+    # Expected values for the SF were computed with mpmath using
+    # the following function and with mpmath.mp.dps = 60:
+    #
+    #   def mp_exponnorm_sf(x, K, loc=0, scale=1):
+    #       x = mpmath.mpf(x)
+    #       K = mpmath.mpf(K)
+    #       loc = mpmath.mpf(loc)
+    #       scale = mpmath.mpf(scale)
+    #       z = (x - loc)/scale
+    #       return (mpmath.ncdf(-z)
+    #               + mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
+    #
+    @pytest.mark.parametrize('x, K, scale, expected',
+                             [[10, 0.01, 1, 8.474702916146657e-24],
+                              [2, 0.005, 1, 0.02302280664231312],
+                              [5, 0.005, 0.5, 8.024820681931086e-24],
+                              [10, 0.005, 0.5, 3.0603340062892486e-89],
+                              [20, 0.005, 0.5, 0.0],
+                              [-3, 0.001, 1, 0.9986545205566117]])
+    def test_sf_small_K(self, x, K, scale, expected):
+        p = stats.exponnorm.sf(x, K, scale=scale)
+        if expected == 0.0:
+            assert p == 0.0
+        else:
+            assert_allclose(p, expected, rtol=5e-13)
+
+
+class TestGenExpon:
+    def test_pdf_unity_area(self):
+        from scipy.integrate import simps
+        # PDF should integrate to one
+        p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
+        assert_almost_equal(simps(p, dx=0.01), 1, 1)
+
+    def test_cdf_bounds(self):
+        # CDF should always be positive
+        cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
+        assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
+
+    def test_sf_tail(self):
+        # Expected value computed with mpmath. This script
+        #     import mpmath
+        #     mpmath.mp.dps = 80
+        #     x = mpmath.mpf('15.0')
+        #     a = mpmath.mpf('1.0')
+        #     b = mpmath.mpf('2.0')
+        #     c = mpmath.mpf('1.5')
+        #     print(float(mpmath.exp((-a-b)*x + (b/c)*-mpmath.expm1(-c*x))))
+        # prints
+        #     1.0859444834514553e-19
+        s = stats.genexpon.sf(15, 1, 2, 1.5)
+        assert_allclose(s, 1.0859444834514553e-19, rtol=1e-13)
+
+
+class TestExponpow:
+    def test_tail(self):
+        assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
+        assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
+                            5)
+
+
+class TestSkellam:
+    def test_pmf(self):
+        # comparison to R
+        k = numpy.arange(-10, 15)
+        mu1, mu2 = 10, 5
+        skpmfR = numpy.array(
+                   [4.2254582961926893e-005, 1.1404838449648488e-004,
+                    2.8979625801752660e-004, 6.9177078182101231e-004,
+                    1.5480716105844708e-003, 3.2412274963433889e-003,
+                    6.3373707175123292e-003, 1.1552351566696643e-002,
+                    1.9606152375042644e-002, 3.0947164083410337e-002,
+                    4.5401737566767360e-002, 6.1894328166820688e-002,
+                    7.8424609500170578e-002, 9.2418812533573133e-002,
+                    1.0139793148019728e-001, 1.0371927988298846e-001,
+                    9.9076583077406091e-002, 8.8546660073089561e-002,
+                    7.4187842052486810e-002, 5.8392772862200251e-002,
+                    4.3268692953013159e-002, 3.0248159818374226e-002,
+                    1.9991434305603021e-002, 1.2516877303301180e-002,
+                    7.4389876226229707e-003])
+
+        assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
+
+    @pytest.mark.filterwarnings('ignore::RuntimeWarning')
+    def test_cdf(self):
+        # comparison to R, only 5 decimals
+        k = numpy.arange(-10, 15)
+        mu1, mu2 = 10, 5
+        skcdfR = numpy.array(
+                   [6.4061475386192104e-005, 1.7810985988267694e-004,
+                    4.6790611790020336e-004, 1.1596768997212152e-003,
+                    2.7077485103056847e-003, 5.9489760066490718e-003,
+                    1.2286346724161398e-002, 2.3838698290858034e-002,
+                    4.3444850665900668e-002, 7.4392014749310995e-002,
+                    1.1979375231607835e-001, 1.8168808048289900e-001,
+                    2.6011268998306952e-001, 3.5253150251664261e-001,
+                    4.5392943399683988e-001, 5.5764871387982828e-001,
+                    6.5672529695723436e-001, 7.4527195703032389e-001,
+                    8.1945979908281064e-001, 8.7785257194501087e-001,
+                    9.2112126489802404e-001, 9.5136942471639818e-001,
+                    9.7136085902200120e-001, 9.8387773632530240e-001,
+                    9.9131672394792536e-001])
+
+        assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
+
+
+class TestLognorm:
+    def test_pdf(self):
+        # Regression test for Ticket #1471: avoid nan with 0/0 situation
+        # Also make sure there are no warnings at x=0, cf gh-5202
+        with warnings.catch_warnings():
+            warnings.simplefilter('error', RuntimeWarning)
+            pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
+            assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
+
+    def test_logcdf(self):
+        # Regression test for gh-5940: sf et al would underflow too early
+        x2, mu, sigma = 201.68, 195, 0.149
+        assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
+                        stats.norm.sf(np.log(x2-mu)/sigma))
+        assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
+                        stats.norm.logsf(np.log(x2-mu)/sigma))
+
+
+class TestBeta:
+    def test_logpdf(self):
+        # Regression test for Ticket #1326: avoid nan with 0*log(0) situation
+        logpdf = stats.beta.logpdf(0, 1, 0.5)
+        assert_almost_equal(logpdf, -0.69314718056)
+        logpdf = stats.beta.logpdf(0, 0.5, 1)
+        assert_almost_equal(logpdf, np.inf)
+
+    def test_logpdf_ticket_1866(self):
+        alpha, beta = 267, 1472
+        x = np.array([0.2, 0.5, 0.6])
+        b = stats.beta(alpha, beta)
+        assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
+        assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
+
+    def test_fit_bad_keyword_args(self):
+        x = [0.1, 0.5, 0.6]
+        assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
+                      plate="shrimp")
+
+    def test_fit_duplicated_fixed_parameter(self):
+        # At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
+        # More than one raises a ValueError.
+        x = [0.1, 0.5, 0.6]
+        assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
+
+    @pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
+    def test_issue_12635(self):
+        # Confirm that Boost's beta distribution resolves gh-12635.
+        # Check against R:
+        # options(digits=16)
+        # p = 0.9999999999997369
+        # a = 75.0
+        # b = 66334470.0
+        # print(qbeta(p, a, b))
+        p, a, b = 0.9999999999997369, 75.0, 66334470.0
+        assert_allclose(stats.beta.ppf(p, a, b), 2.343620802982393e-06)
+
+    @pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
+    def test_issue_12794(self):
+        # Confirm that Boost's beta distribution resolves gh-12794.
+        # Check against R.
+        # options(digits=16)
+        # p = 1e-11
+        # count_list = c(10,100,1000)
+        # print(qbeta(1-p, count_list + 1, 100000 - count_list))
+        inv_R = np.array([0.0004944464889611935,
+                          0.0018360586912635726,
+                          0.0122663919942518351])
+        count_list = np.array([10, 100, 1000])
+        p = 1e-11
+        inv = stats.beta.isf(p, count_list + 1, 100000 - count_list)
+        assert_allclose(inv, inv_R)
+        res = stats.beta.sf(inv, count_list + 1, 100000 - count_list)
+        assert_allclose(res, p)
+
+    @pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
+    def test_issue_12796(self):
+        # Confirm that Boost's beta distribution succeeds in the case
+        # of gh-12796
+        alpha_2 = 5e-6
+        count_ = np.arange(1, 20)
+        nobs = 100000
+        q, a, b = 1 - alpha_2, count_ + 1, nobs - count_
+        inv = stats.beta.ppf(q, a, b)
+        res = stats.beta.cdf(inv, a, b)
+        assert_allclose(res, 1 - alpha_2)
+
+    def test_endpoints(self):
+        # Confirm that boost's beta distribution returns inf at x=1
+        # when b<1
+        a, b = 1, 0.5
+        assert_equal(stats.beta.pdf(1, a, b), np.inf)
+
+        # Confirm that boost's beta distribution returns inf at x=0
+        # when a<1
+        a, b = 0.2, 3
+        assert_equal(stats.beta.pdf(0, a, b), np.inf)
+
+    @pytest.mark.xfail(IS_PYPY, reason="Does not convert boost warning")
+    def test_boost_eval_issue_14606(self):
+        q, a, b = 0.995, 1.0e11, 1.0e13
+        with pytest.warns(RuntimeWarning):
+            stats.beta.ppf(q, a, b)
+
+    @pytest.mark.parametrize('method', [stats.beta.ppf, stats.beta.isf])
+    @pytest.mark.parametrize('a, b', [(1e-310, 12.5), (12.5, 1e-310)])
+    def test_beta_ppf_with_subnormal_a_b(self, method, a, b):
+        # Regression test for gh-17444: beta.ppf(p, a, b) and beta.isf(p, a, b)
+        # would result in a segmentation fault if either a or b was subnormal.
+        p = 0.9
+        # Depending on the version of Boost that we have vendored and
+        # our setting of the Boost double promotion policy, the call
+        # `stats.beta.ppf(p, a, b)` might raise an OverflowError or
+        # return a value.  We'll accept either behavior (and not care about
+        # the value), because our goal here is to verify that the call does
+        # not trigger a segmentation fault.
+        try:
+            method(p, a, b)
+        except OverflowError:
+            # The OverflowError exception occurs with Boost 1.80 or earlier
+            # when Boost's double promotion policy is false; see
+            #   https://github.com/boostorg/math/issues/882
+            # and
+            #   https://github.com/boostorg/math/pull/883
+            # Once we have vendored the fixed version of Boost, we can drop
+            # this try-except wrapper and just call the function.
+            pass
+
+
+class TestBetaPrime:
+    def test_logpdf(self):
+        alpha, beta = 267, 1472
+        x = np.array([0.2, 0.5, 0.6])
+        b = stats.betaprime(alpha, beta)
+        assert_(np.isfinite(b.logpdf(x)).all())
+        assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
+
+    def test_cdf(self):
+        # regression test for gh-4030: Implementation of
+        # scipy.stats.betaprime.cdf()
+        x = stats.betaprime.cdf(0, 0.2, 0.3)
+        assert_equal(x, 0.0)
+
+        alpha, beta = 267, 1472
+        x = np.array([0.2, 0.5, 0.6])
+        cdfs = stats.betaprime.cdf(x, alpha, beta)
+        assert_(np.isfinite(cdfs).all())
+
+        # check the new cdf implementation vs generic one:
+        gen_cdf = stats.rv_continuous._cdf_single
+        cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
+        assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
+
+
+class TestGamma:
+    def test_pdf(self):
+        # a few test cases to compare with R
+        pdf = stats.gamma.pdf(90, 394, scale=1./5)
+        assert_almost_equal(pdf, 0.002312341)
+
+        pdf = stats.gamma.pdf(3, 10, scale=1./5)
+        assert_almost_equal(pdf, 0.1620358)
+
+    def test_logpdf(self):
+        # Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
+        # situation
+        logpdf = stats.gamma.logpdf(0, 1)
+        assert_almost_equal(logpdf, 0)
+
+    def test_fit_bad_keyword_args(self):
+        x = [0.1, 0.5, 0.6]
+        assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
+
+    def test_isf(self):
+        # Test cases for when the probability is very small. See gh-13664.
+        # The expected values can be checked with mpmath.  With mpmath,
+        # the survival function sf(x, k) can be computed as
+        #
+        #     mpmath.gammainc(k, x, mpmath.inf, regularized=True)
+        #
+        # Here we have:
+        #
+        # >>> mpmath.mp.dps = 60
+        # >>> float(mpmath.gammainc(1, 39.14394658089878, mpmath.inf,
+        # ...                       regularized=True))
+        # 9.99999999999999e-18
+        # >>> float(mpmath.gammainc(100, 330.6557590436547, mpmath.inf,
+        #                           regularized=True))
+        # 1.000000000000028e-50
+        #
+        assert np.isclose(stats.gamma.isf(1e-17, 1),
+                          39.14394658089878, atol=1e-14)
+        assert np.isclose(stats.gamma.isf(1e-50, 100),
+                          330.6557590436547, atol=1e-13)
+
+
+class TestChi2:
+    # regression tests after precision improvements, ticket:1041, not verified
+    def test_precision(self):
+        assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
+                            decimal=14)
+        assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
+                            decimal=14)
+
+    def test_ppf(self):
+        # Expected values computed with mpmath.
+        df = 4.8
+        x = stats.chi2.ppf(2e-47, df)
+        assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
+        x = stats.chi2.ppf(0.5, df)
+        assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
+
+        df = 13
+        x = stats.chi2.ppf(2e-77, df)
+        assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
+        x = stats.chi2.ppf(0.1, df)
+        assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
+
+
+class TestGumbelL:
+    # gh-6228
+    def test_cdf_ppf(self):
+        x = np.linspace(-100, -4)
+        y = stats.gumbel_l.cdf(x)
+        xx = stats.gumbel_l.ppf(y)
+        assert_allclose(x, xx)
+
+    def test_logcdf_logsf(self):
+        x = np.linspace(-100, -4)
+        y = stats.gumbel_l.logcdf(x)
+        z = stats.gumbel_l.logsf(x)
+        u = np.exp(y)
+        v = -special.expm1(z)
+        assert_allclose(u, v)
+
+    def test_sf_isf(self):
+        x = np.linspace(-20, 5)
+        y = stats.gumbel_l.sf(x)
+        xx = stats.gumbel_l.isf(y)
+        assert_allclose(x, xx)
+
+    @pytest.mark.parametrize('loc', [-1, 1])
+    def test_fit_fixed_param(self, loc):
+        # ensure fixed location is correctly reflected from `gumbel_r.fit`
+        # See comments at end of gh-12737.
+        data = stats.gumbel_l.rvs(size=100, loc=loc)
+        fitted_loc, _ = stats.gumbel_l.fit(data, floc=loc)
+        assert_equal(fitted_loc, loc)
+
+
+class TestGumbelR:
+
+    def test_sf(self):
+        # Expected value computed with mpmath:
+        #   >>> import mpmath
+        #   >>> mpmath.mp.dps = 40
+        #   >>> float(mpmath.mp.one - mpmath.exp(-mpmath.exp(-50)))
+        #   1.9287498479639178e-22
+        assert_allclose(stats.gumbel_r.sf(50), 1.9287498479639178e-22,
+                        rtol=1e-14)
+
+    def test_isf(self):
+        # Expected value computed with mpmath:
+        #   >>> import mpmath
+        #   >>> mpmath.mp.dps = 40
+        #   >>> float(-mpmath.log(-mpmath.log(mpmath.mp.one - 1e-17)))
+        #   39.14394658089878
+        assert_allclose(stats.gumbel_r.isf(1e-17), 39.14394658089878,
+                        rtol=1e-14)
+
+
+class TestLevyStable:
+    @pytest.fixture
+    def nolan_pdf_sample_data(self):
+        """Sample data points for pdf computed with Nolan's stablec
+
+        See - http://fs2.american.edu/jpnolan/www/stable/stable.html
+
+        There's a known limitation of Nolan's executable for alpha < 0.2.
+
+        The data table loaded below is generated from Nolan's stablec
+        with the following parameter space:
+
+            alpha = 0.1, 0.2, ..., 2.0
+            beta = -1.0, -0.9, ..., 1.0
+            p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
+        and the equivalent for the right tail
+
+        Typically inputs for stablec:
+
+            stablec.exe <<
+            1 # pdf
+            1 # Nolan S equivalent to S0 in scipy
+            .25,2,.25 # alpha
+            -1,-1,0 # beta
+            -10,10,1 # x
+            1,0 # gamma, delta
+            2 # output file
+        """
+        data = np.load(
+            Path(__file__).parent /
+            'data/levy_stable/stable-Z1-pdf-sample-data.npy'
+        )
+        data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta,pct')
+        return data
+
+    @pytest.fixture
+    def nolan_cdf_sample_data(self):
+        """Sample data points for cdf computed with Nolan's stablec
+
+        See - http://fs2.american.edu/jpnolan/www/stable/stable.html
+
+        There's a known limitation of Nolan's executable for alpha < 0.2.
+
+        The data table loaded below is generated from Nolan's stablec
+        with the following parameter space:
+
+            alpha = 0.1, 0.2, ..., 2.0
+            beta = -1.0, -0.9, ..., 1.0
+            p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
+
+        and the equivalent for the right tail
+
+        Ideally, Nolan's output for CDF values should match the percentile
+        from where they have been sampled from. Even more so as we extract
+        percentile x positions from stablec too. However, we note at places
+        Nolan's stablec will produce absolute errors in order of 1e-5. We
+        compare against his calculations here. In future, once we less
+        reliant on Nolan's paper we might switch to comparing directly at
+        percentiles (those x values being produced from some alternative
+        means).
+
+        Typically inputs for stablec:
+
+            stablec.exe <<
+            2 # cdf
+            1 # Nolan S equivalent to S0 in scipy
+            .25,2,.25 # alpha
+            -1,-1,0 # beta
+            -10,10,1 # x
+            1,0 # gamma, delta
+            2 # output file
+        """
+        data = np.load(
+            Path(__file__).parent /
+            'data/levy_stable/stable-Z1-cdf-sample-data.npy'
+        )
+        data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta,pct')
+        return data
+
+    @pytest.fixture
+    def nolan_loc_scale_sample_data(self):
+        """Sample data where loc, scale are different from 0, 1
+
+        Data extracted in similar way to pdf/cdf above using
+        Nolan's stablec but set to an arbitrary location scale of
+        (2, 3) for various important parameters alpha, beta and for
+        parameterisations S0 and S1.
+        """
+        data = np.load(
+            Path(__file__).parent /
+            'data/levy_stable/stable-loc-scale-sample-data.npy'
+        )
+        return data
+
+    @pytest.mark.parametrize(
+        "sample_size", [
+            pytest.param(50), pytest.param(1500, marks=pytest.mark.slow)
+        ]
+    )
+    @pytest.mark.parametrize("parameterization", ["S0", "S1"])
+    @pytest.mark.parametrize(
+        "alpha,beta", [(1.0, 0), (1.0, -0.5), (1.5, 0), (1.9, 0.5)]
+    )
+    @pytest.mark.parametrize("gamma,delta", [(1, 0), (3, 2)])
+    def test_rvs(
+            self,
+            parameterization,
+            alpha,
+            beta,
+            gamma,
+            delta,
+            sample_size,
+    ):
+        stats.levy_stable.parameterization = parameterization
+        ls = stats.levy_stable(
+            alpha=alpha, beta=beta, scale=gamma, loc=delta
+        )
+        _, p = stats.kstest(
+            ls.rvs(size=sample_size, random_state=1234), ls.cdf
+        )
+        assert p > 0.05
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize('beta', [0.5, 1])
+    def test_rvs_alpha1(self, beta):
+        """Additional test cases for rvs for alpha equal to 1."""
+        np.random.seed(987654321)
+        alpha = 1.0
+        loc = 0.5
+        scale = 1.5
+        x = stats.levy_stable.rvs(alpha, beta, loc=loc, scale=scale,
+                                  size=5000)
+        stat, p = stats.kstest(x, 'levy_stable',
+                               args=(alpha, beta, loc, scale))
+        assert p > 0.01
+
+    def test_fit(self):
+        # construct data to have percentiles that match
+        # example in McCulloch 1986.
+        x = [
+            -.05413, -.05413, 0., 0., 0., 0., .00533, .00533, .00533, .00533,
+            .00533, .03354, .03354, .03354, .03354, .03354, .05309, .05309,
+            .05309, .05309, .05309
+        ]
+        alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
+        assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
+        assert_almost_equal(beta1, -.22, 2)
+        assert_almost_equal(scale1, 0.01717, 4)
+        assert_almost_equal(
+            loc1, 0.00233, 2
+        )  # to 2 dps due to rounding error in McCulloch86
+
+        # cover alpha=2 scenario
+        x2 = x + [.05309, .05309, .05309, .05309, .05309]
+        alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
+        assert_equal(alpha2, 2)
+        assert_equal(beta2, -1)
+        assert_almost_equal(scale2, .02503, 4)
+        assert_almost_equal(loc2, .03354, 4)
+
+    @pytest.mark.xfail(reason="Unknown problem with fitstart.")
+    @pytest.mark.parametrize(
+        "alpha,beta,delta,gamma",
+        [
+            (1.5, 0.4, 2, 3),
+            (1.0, 0.4, 2, 3),
+        ]
+    )
+    @pytest.mark.parametrize(
+        "parametrization", ["S0", "S1"]
+    )
+    def test_fit_rvs(self, alpha, beta, delta, gamma, parametrization):
+        """Test that fit agrees with rvs for each parametrization."""
+        stats.levy_stable.parametrization = parametrization
+        data = stats.levy_stable.rvs(
+            alpha, beta, loc=delta, scale=gamma, size=10000, random_state=1234
+        )
+        fit = stats.levy_stable._fitstart(data)
+        alpha_obs, beta_obs, delta_obs, gamma_obs = fit
+        assert_allclose(
+            [alpha, beta, delta, gamma],
+            [alpha_obs, beta_obs, delta_obs, gamma_obs],
+            rtol=0.01,
+        )
+
+    def test_fit_beta_flip(self):
+        # Confirm that sign of beta affects loc, not alpha or scale.
+        x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100])
+        alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
+        alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x)
+        assert_equal(beta1, 1)
+        assert loc1 != 0
+        assert_almost_equal(alpha2, alpha1)
+        assert_almost_equal(beta2, -beta1)
+        assert_almost_equal(loc2, -loc1)
+        assert_almost_equal(scale2, scale1)
+
+    def test_fit_delta_shift(self):
+        # Confirm that loc slides up and down if data shifts.
+        SHIFT = 1
+        x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100])
+        alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(-x)
+        alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x + SHIFT)
+        assert_almost_equal(alpha2, alpha1)
+        assert_almost_equal(beta2, beta1)
+        assert_almost_equal(loc2, loc1 + SHIFT)
+        assert_almost_equal(scale2, scale1)
+
+    def test_fit_loc_extrap(self):
+        # Confirm that loc goes out of sample for alpha close to 1.
+        x = [1, 1, 3, 3, 10, 10, 10, 30, 30, 140, 140]
+        alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
+        assert alpha1 < 1, f"Expected alpha < 1, got {alpha1}"
+        assert loc1 < min(x), f"Expected loc < {min(x)}, got {loc1}"
+
+        x2 = [1, 1, 3, 3, 10, 10, 10, 30, 30, 130, 130]
+        alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
+        assert alpha2 > 1, f"Expected alpha > 1, got {alpha2}"
+        assert loc2 > max(x2), f"Expected loc > {max(x2)}, got {loc2}"
+
+    @pytest.mark.parametrize(
+        "pct_range,alpha_range,beta_range", [
+            pytest.param(
+                [.01, .5, .99],
+                [.1, 1, 2],
+                [-1, 0, .8],
+            ),
+            pytest.param(
+                [.01, .05, .5, .95, .99],
+                [.1, .5, 1, 1.5, 2],
+                [-.9, -.5, 0, .3, .6, 1],
+                marks=pytest.mark.slow
+            ),
+            pytest.param(
+                [.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
+                np.linspace(0.1, 2, 20),
+                np.linspace(-1, 1, 21),
+                marks=pytest.mark.xslow,
+            ),
+        ]
+    )
+    def test_pdf_nolan_samples(
+            self, nolan_pdf_sample_data, pct_range, alpha_range, beta_range
+    ):
+        """Test pdf values against Nolan's stablec.exe output"""
+        data = nolan_pdf_sample_data
+
+        # some tests break on linux 32 bit
+        uname = platform.uname()
+        is_linux_32 = uname.system == 'Linux' and uname.machine == 'i686'
+        platform_desc = "/".join(
+            [uname.system, uname.machine, uname.processor])
+
+        # fmt: off
+        # There are a number of cases which fail on some but not all platforms.
+        # These are excluded by the filters below. TODO: Rewrite tests so that
+        # the now filtered out test cases are still run but marked in pytest as
+        # expected to fail.
+        tests = [
+            [
+                'dni', 1e-7, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    ~(
+                        (
+                            (r['beta'] == 0) &
+                            (r['pct'] == 0.5)
+                        ) |
+                        (
+                            (r['beta'] >= 0.9) &
+                            (r['alpha'] >= 1.6) &
+                            (r['pct'] == 0.5)
+                        ) |
+                        (
+                            (r['alpha'] <= 0.4) &
+                            np.isin(r['pct'], [.01, .99])
+                        ) |
+                        (
+                            (r['alpha'] <= 0.3) &
+                            np.isin(r['pct'], [.05, .95])
+                        ) |
+                        (
+                            (r['alpha'] <= 0.2) &
+                            np.isin(r['pct'], [.1, .9])
+                        ) |
+                        (
+                            (r['alpha'] == 0.1) &
+                            np.isin(r['pct'], [.25, .75]) &
+                            np.isin(np.abs(r['beta']), [.5, .6, .7])
+                        ) |
+                        (
+                            (r['alpha'] == 0.1) &
+                            np.isin(r['pct'], [.5]) &
+                            np.isin(np.abs(r['beta']), [.1])
+                        ) |
+                        (
+                            (r['alpha'] == 0.1) &
+                            np.isin(r['pct'], [.35, .65]) &
+                            np.isin(np.abs(r['beta']), [-.4, -.3, .3, .4, .5])
+                        ) |
+                        (
+                            (r['alpha'] == 0.2) &
+                            (r['beta'] == 0.5) &
+                            (r['pct'] == 0.25)
+                        ) |
+                        (
+                            (r['alpha'] == 0.2) &
+                            (r['beta'] == -0.3) &
+                            (r['pct'] == 0.65)
+                        ) |
+                        (
+                            (r['alpha'] == 0.2) &
+                            (r['beta'] == 0.3) &
+                            (r['pct'] == 0.35)
+                        ) |
+                        (
+                            (r['alpha'] == 1.) &
+                            np.isin(r['pct'], [.5]) &
+                            np.isin(np.abs(r['beta']), [.1, .2, .3, .4])
+                        ) |
+                        (
+                            (r['alpha'] == 1.) &
+                            np.isin(r['pct'], [.35, .65]) &
+                            np.isin(np.abs(r['beta']), [.8, .9, 1.])
+                        ) |
+                        (
+                            (r['alpha'] == 1.) &
+                            np.isin(r['pct'], [.01, .99]) &
+                            np.isin(np.abs(r['beta']), [-.1, .1])
+                        ) |
+                        # various points ok but too sparse to list
+                        (r['alpha'] >= 1.1)
+                    )
+                )
+            ],
+            # piecewise generally good accuracy
+            [
+                'piecewise', 1e-11, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (r['alpha'] > 0.2) &
+                    (r['alpha'] != 1.)
+                )
+            ],
+            # for alpha = 1. for linux 32 bit optimize.bisect
+            # has some issues for .01 and .99 percentile
+            [
+                'piecewise', 1e-11, lambda r: (
+                    (r['alpha'] == 1.) &
+                    (not is_linux_32) &
+                    np.isin(r['pct'], pct_range) &
+                    (1. in alpha_range) &
+                    np.isin(r['beta'], beta_range)
+                )
+            ],
+            # for small alpha very slightly reduced accuracy
+            [
+                'piecewise', 2.5e-10, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (r['alpha'] <= 0.2)
+                )
+            ],
+            # fft accuracy reduces as alpha decreases
+            [
+                'fft-simpson', 1e-5, lambda r: (
+                    (r['alpha'] >= 1.9) &
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range)
+                ),
+            ],
+            [
+                'fft-simpson', 1e-6, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (r['alpha'] > 1) &
+                    (r['alpha'] < 1.9)
+                )
+            ],
+            # fft relative errors for alpha < 1, will raise if enabled
+            # ['fft-simpson', 1e-4, lambda r: r['alpha'] == 0.9],
+            # ['fft-simpson', 1e-3, lambda r: r['alpha'] == 0.8],
+            # ['fft-simpson', 1e-2, lambda r: r['alpha'] == 0.7],
+            # ['fft-simpson', 1e-1, lambda r: r['alpha'] == 0.6],
+        ]
+        # fmt: on
+        for ix, (default_method, rtol,
+                 filter_func) in enumerate(tests):
+            stats.levy_stable.pdf_default_method = default_method
+            subdata = data[filter_func(data)
+                           ] if filter_func is not None else data
+            with suppress_warnings() as sup:
+                # occurs in FFT methods only
+                sup.record(
+                    RuntimeWarning,
+                    "Density calculations experimental for FFT method.*"
+                )
+                p = stats.levy_stable.pdf(
+                    subdata['x'],
+                    subdata['alpha'],
+                    subdata['beta'],
+                    scale=1,
+                    loc=0
+                )
+                with np.errstate(over="ignore"):
+                    subdata2 = rec_append_fields(
+                        subdata,
+                        ['calc', 'abserr', 'relerr'],
+                        [
+                            p,
+                            np.abs(p - subdata['p']),
+                            np.abs(p - subdata['p']) / np.abs(subdata['p'])
+                        ]
+                    )
+                failures = subdata2[
+                  (subdata2['relerr'] >= rtol) |
+                  np.isnan(p)
+                ]
+                assert_allclose(
+                    p,
+                    subdata['p'],
+                    rtol,
+                    err_msg="pdf test %s failed with method '%s'"
+                            " [platform: %s]\n%s\n%s" %
+                    (ix, default_method, platform_desc, failures.dtype.names,
+                        failures),
+                    verbose=False
+                )
+
+    @pytest.mark.parametrize(
+        "pct_range,alpha_range,beta_range", [
+            pytest.param(
+                [.01, .5, .99],
+                [.1, 1, 2],
+                [-1, 0, .8],
+            ),
+            pytest.param(
+                [.01, .05, .5, .95, .99],
+                [.1, .5, 1, 1.5, 2],
+                [-.9, -.5, 0, .3, .6, 1],
+                marks=pytest.mark.slow
+            ),
+            pytest.param(
+                [.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
+                np.linspace(0.1, 2, 20),
+                np.linspace(-1, 1, 21),
+                marks=pytest.mark.xslow,
+            ),
+        ]
+    )
+    def test_cdf_nolan_samples(
+            self, nolan_cdf_sample_data, pct_range, alpha_range, beta_range
+    ):
+        """ Test cdf values against Nolan's stablec.exe output."""
+        data = nolan_cdf_sample_data
+        tests = [
+            # piecewise generally good accuracy
+            [
+                'piecewise', 2e-12, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    ~(
+                        (
+                            (r['alpha'] == 1.) &
+                            np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
+                            (r['pct'] == 0.01)
+                        ) |
+                        (
+                            (r['alpha'] == 1.) &
+                            np.isin(r['beta'], [0.1, 0.2, 0.3]) &
+                            (r['pct'] == 0.99)
+                        )
+                    )
+                )
+            ],
+            # for some points with alpha=1, Nolan's STABLE clearly
+            # loses accuracy
+            [
+                'piecewise', 5e-2, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (
+                        (r['alpha'] == 1.) &
+                        np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
+                        (r['pct'] == 0.01)
+                    ) |
+                    (
+                        (r['alpha'] == 1.) &
+                        np.isin(r['beta'], [0.1, 0.2, 0.3]) &
+                        (r['pct'] == 0.99)
+                    )
+                )
+            ],
+            # fft accuracy poor, very poor alpha < 1
+            [
+                'fft-simpson', 1e-5, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (r['alpha'] > 1.7)
+                )
+            ],
+            [
+                'fft-simpson', 1e-4, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (r['alpha'] > 1.5) &
+                    (r['alpha'] <= 1.7)
+                )
+            ],
+            [
+                'fft-simpson', 1e-3, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (r['alpha'] > 1.3) &
+                    (r['alpha'] <= 1.5)
+                )
+            ],
+            [
+                'fft-simpson', 1e-2, lambda r: (
+                    np.isin(r['pct'], pct_range) &
+                    np.isin(r['alpha'], alpha_range) &
+                    np.isin(r['beta'], beta_range) &
+                    (r['alpha'] > 1.0) &
+                    (r['alpha'] <= 1.3)
+                )
+            ],
+        ]
+        for ix, (default_method, rtol,
+                 filter_func) in enumerate(tests):
+            stats.levy_stable.cdf_default_method = default_method
+            subdata = data[filter_func(data)
+                           ] if filter_func is not None else data
+            with suppress_warnings() as sup:
+                sup.record(
+                    RuntimeWarning,
+                    'Cumulative density calculations experimental for FFT'
+                    + ' method. Use piecewise method instead.*'
+                )
+                p = stats.levy_stable.cdf(
+                    subdata['x'],
+                    subdata['alpha'],
+                    subdata['beta'],
+                    scale=1,
+                    loc=0
+                )
+                with np.errstate(over="ignore"):
+                    subdata2 = rec_append_fields(
+                        subdata,
+                        ['calc', 'abserr', 'relerr'],
+                        [
+                            p,
+                            np.abs(p - subdata['p']),
+                            np.abs(p - subdata['p']) / np.abs(subdata['p'])
+                        ]
+                    )
+                failures = subdata2[
+                  (subdata2['relerr'] >= rtol) |
+                  np.isnan(p)
+                ]
+                assert_allclose(
+                    p,
+                    subdata['p'],
+                    rtol,
+                    err_msg="cdf test %s failed with method '%s'\n%s\n%s" %
+                    (ix, default_method, failures.dtype.names, failures),
+                    verbose=False
+                )
+
+    @pytest.mark.parametrize("param", [0, 1])
+    @pytest.mark.parametrize("case", ["pdf", "cdf"])
+    def test_location_scale(
+            self, nolan_loc_scale_sample_data, param, case
+    ):
+        """Tests for pdf and cdf where loc, scale are different from 0, 1
+        """
+
+        uname = platform.uname()
+        is_linux_32 = uname.system == 'Linux' and "32bit" in platform.architecture()[0]
+        # Test seems to be unstable (see gh-17839 for a bug report on Debian
+        # i386), so skip it.
+        if is_linux_32 and case == 'pdf':
+            pytest.skip("Test unstable on some platforms; see gh-17839, 17859")
+
+        data = nolan_loc_scale_sample_data
+        # We only test against piecewise as location/scale transforms
+        # are same for other methods.
+        stats.levy_stable.cdf_default_method = "piecewise"
+        stats.levy_stable.pdf_default_method = "piecewise"
+
+        subdata = data[data["param"] == param]
+        stats.levy_stable.parameterization = f"S{param}"
+
+        assert case in ["pdf", "cdf"]
+        function = (
+            stats.levy_stable.pdf if case == "pdf" else stats.levy_stable.cdf
+        )
+
+        v1 = function(
+            subdata['x'], subdata['alpha'], subdata['beta'], scale=2, loc=3
+        )
+        assert_allclose(v1, subdata[case], 1e-5)
+
+    @pytest.mark.parametrize(
+        "method,decimal_places",
+        [
+            ['dni', 4],
+            ['piecewise', 4],
+        ]
+    )
+    def test_pdf_alpha_equals_one_beta_non_zero(self, method, decimal_places):
+        """ sample points extracted from Tables and Graphs of Stable
+        Probability Density Functions - Donald R Holt - 1973 - p 187.
+        """
+        xs = np.array(
+            [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
+        )
+        density = np.array(
+            [
+                .3183, .3096, .2925, .2622, .1591, .1587, .1599, .1635, .0637,
+                .0729, .0812, .0955, .0318, .0390, .0458, .0586, .0187, .0236,
+                .0285, .0384
+            ]
+        )
+        betas = np.array(
+            [
+                0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0,
+                .25, .5, 1
+            ]
+        )
+        with np.errstate(all='ignore'), suppress_warnings() as sup:
+            sup.filter(
+                category=RuntimeWarning,
+                message="Density calculation unstable.*"
+            )
+            stats.levy_stable.pdf_default_method = method
+            # stats.levy_stable.fft_grid_spacing = 0.0001
+            pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
+            assert_almost_equal(
+                pdf, density, decimal_places, method
+            )
+
+    @pytest.mark.parametrize(
+        "params,expected",
+        [
+            [(1.48, -.22, 0, 1), (0, np.inf, np.NaN, np.NaN)],
+            [(2, .9, 10, 1.5), (10, 4.5, 0, 0)]
+        ]
+    )
+    def test_stats(self, params, expected):
+        observed = stats.levy_stable.stats(
+            params[0], params[1], loc=params[2], scale=params[3],
+            moments='mvsk'
+        )
+        assert_almost_equal(observed, expected)
+
+    @pytest.mark.parametrize('alpha', [0.25, 0.5, 0.75])
+    @pytest.mark.parametrize(
+        'function,beta,points,expected',
+        [
+            (
+                stats.levy_stable.cdf,
+                1.0,
+                np.linspace(-25, 0, 10),
+                0.0,
+            ),
+            (
+                stats.levy_stable.pdf,
+                1.0,
+                np.linspace(-25, 0, 10),
+                0.0,
+            ),
+            (
+                stats.levy_stable.cdf,
+                -1.0,
+                np.linspace(0, 25, 10),
+                1.0,
+            ),
+            (
+                stats.levy_stable.pdf,
+                -1.0,
+                np.linspace(0, 25, 10),
+                0.0,
+            )
+        ]
+    )
+    def test_distribution_outside_support(
+            self, alpha, function, beta, points, expected
+    ):
+        """Ensure the pdf/cdf routines do not return nan outside support.
+
+        This distribution's support becomes truncated in a few special cases:
+            support is [mu, infty) if alpha < 1 and beta = 1
+            support is (-infty, mu] if alpha < 1 and beta = -1
+        Otherwise, the support is all reals. Here, mu is zero by default.
+        """
+        assert 0 < alpha < 1
+        assert_almost_equal(
+            function(points, alpha=alpha, beta=beta),
+            np.full(len(points), expected)
+        )
+
+
+class TestArrayArgument:  # test for ticket:992
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_noexception(self):
+        rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
+                             size=(10, 5))
+        assert_equal(rvs.shape, (10, 5))
+
+
+class TestDocstring:
+    def test_docstrings(self):
+        # See ticket #761
+        if stats.rayleigh.__doc__ is not None:
+            assert_("rayleigh" in stats.rayleigh.__doc__.lower())
+        if stats.bernoulli.__doc__ is not None:
+            assert_("bernoulli" in stats.bernoulli.__doc__.lower())
+
+    def test_no_name_arg(self):
+        # If name is not given, construction shouldn't fail.  See #1508.
+        stats.rv_continuous()
+        stats.rv_discrete()
+
+
+def TestArgsreduce():
+    a = array([1, 3, 2, 1, 2, 3, 3])
+    b, c = argsreduce(a > 1, a, 2)
+
+    assert_array_equal(b, [3, 2, 2, 3, 3])
+    assert_array_equal(c, [2, 2, 2, 2, 2])
+
+    b, c = argsreduce(2 > 1, a, 2)
+    assert_array_equal(b, a[0])
+    assert_array_equal(c, [2])
+
+    b, c = argsreduce(a > 0, a, 2)
+    assert_array_equal(b, a)
+    assert_array_equal(c, [2] * numpy.size(a))
+
+
+class TestFitMethod:
+    skip = ['ncf', 'ksone', 'kstwo']
+
+    def setup_method(self):
+        np.random.seed(1234)
+
+    # skip these b/c deprecated, or only loc and scale arguments
+    fitSkipNonFinite = ['expon', 'norm', 'uniform']
+
+    @pytest.mark.parametrize('dist,args', distcont)
+    def test_fit_w_non_finite_data_values(self, dist, args):
+        """gh-10300"""
+        if dist in self.fitSkipNonFinite:
+            pytest.skip("%s fit known to fail or deprecated" % dist)
+        x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
+        y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
+        distfunc = getattr(stats, dist)
+        assert_raises(ValueError, distfunc.fit, x, fscale=1)
+        assert_raises(ValueError, distfunc.fit, y, fscale=1)
+
+    def test_fix_fit_2args_lognorm(self):
+        # Regression test for #1551.
+        np.random.seed(12345)
+        with np.errstate(all='ignore'):
+            x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
+            expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
+            assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
+                            [expected_shape, 0, 20], atol=1e-8)
+
+    def test_fix_fit_norm(self):
+        x = np.arange(1, 6)
+
+        loc, scale = stats.norm.fit(x)
+        assert_almost_equal(loc, 3)
+        assert_almost_equal(scale, np.sqrt(2))
+
+        loc, scale = stats.norm.fit(x, floc=2)
+        assert_equal(loc, 2)
+        assert_equal(scale, np.sqrt(3))
+
+        loc, scale = stats.norm.fit(x, fscale=2)
+        assert_almost_equal(loc, 3)
+        assert_equal(scale, 2)
+
+    def test_fix_fit_gamma(self):
+        x = np.arange(1, 6)
+        meanlog = np.log(x).mean()
+
+        # A basic test of gamma.fit with floc=0.
+        floc = 0
+        a, loc, scale = stats.gamma.fit(x, floc=floc)
+        s = np.log(x.mean()) - meanlog
+        assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
+        assert_equal(loc, floc)
+        assert_almost_equal(scale, x.mean()/a, decimal=8)
+
+        # Regression tests for gh-2514.
+        # The problem was that if `floc=0` was given, any other fixed
+        # parameters were ignored.
+        f0 = 1
+        floc = 0
+        a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
+        assert_equal(a, f0)
+        assert_equal(loc, floc)
+        assert_almost_equal(scale, x.mean()/a, decimal=8)
+
+        f0 = 2
+        floc = 0
+        a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
+        assert_equal(a, f0)
+        assert_equal(loc, floc)
+        assert_almost_equal(scale, x.mean()/a, decimal=8)
+
+        # loc and scale fixed.
+        floc = 0
+        fscale = 2
+        a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
+        assert_equal(loc, floc)
+        assert_equal(scale, fscale)
+        c = meanlog - np.log(fscale)
+        assert_almost_equal(special.digamma(a), c)
+
+    def test_fix_fit_beta(self):
+        # Test beta.fit when both floc and fscale are given.
+
+        def mlefunc(a, b, x):
+            # Zeros of this function are critical points of
+            # the maximum likelihood function.
+            n = len(x)
+            s1 = np.log(x).sum()
+            s2 = np.log(1-x).sum()
+            psiab = special.psi(a + b)
+            func = [s1 - n * (-psiab + special.psi(a)),
+                    s2 - n * (-psiab + special.psi(b))]
+            return func
+
+        # Basic test with floc and fscale given.
+        x = np.array([0.125, 0.25, 0.5])
+        a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
+        assert_equal(loc, 0)
+        assert_equal(scale, 1)
+        assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
+
+        # Basic test with f0, floc and fscale given.
+        # This is also a regression test for gh-2514.
+        x = np.array([0.125, 0.25, 0.5])
+        a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
+        assert_equal(a, 2)
+        assert_equal(loc, 0)
+        assert_equal(scale, 1)
+        da, db = mlefunc(a, b, x)
+        assert_allclose(db, 0, atol=1e-5)
+
+        # Same floc and fscale values as above, but reverse the data
+        # and fix b (f1).
+        x2 = 1 - x
+        a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
+        assert_equal(b2, 2)
+        assert_equal(loc2, 0)
+        assert_equal(scale2, 1)
+        da, db = mlefunc(a2, b2, x2)
+        assert_allclose(da, 0, atol=1e-5)
+        # a2 of this test should equal b from above.
+        assert_almost_equal(a2, b)
+
+        # Check for detection of data out of bounds when floc and fscale
+        # are given.
+        assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
+        y = np.array([0, .5, 1])
+        assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
+        assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
+        assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
+
+        # Check that attempting to fix all the parameters raises a ValueError.
+        assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
+                      floc=2, fscale=3)
+
+    def test_expon_fit(self):
+        x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
+
+        loc, scale = stats.expon.fit(x)
+        assert_equal(loc, 2)    # x.min()
+        assert_equal(scale, 2)  # x.mean() - x.min()
+
+        loc, scale = stats.expon.fit(x, fscale=3)
+        assert_equal(loc, 2)    # x.min()
+        assert_equal(scale, 3)  # fscale
+
+        loc, scale = stats.expon.fit(x, floc=0)
+        assert_equal(loc, 0)    # floc
+        assert_equal(scale, 4)  # x.mean() - loc
+
+    def test_lognorm_fit(self):
+        x = np.array([1.5, 3, 10, 15, 23, 59])
+        lnxm1 = np.log(x - 1)
+
+        shape, loc, scale = stats.lognorm.fit(x, floc=1)
+        assert_allclose(shape, lnxm1.std(), rtol=1e-12)
+        assert_equal(loc, 1)
+        assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
+
+        shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
+        assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
+                        rtol=1e-12)
+        assert_equal(loc, 1)
+        assert_equal(scale, 6)
+
+        shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
+        assert_equal(shape, 0.75)
+        assert_equal(loc, 1)
+        assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
+
+    def test_uniform_fit(self):
+        x = np.array([1.0, 1.1, 1.2, 9.0])
+
+        loc, scale = stats.uniform.fit(x)
+        assert_equal(loc, x.min())
+        assert_equal(scale, x.ptp())
+
+        loc, scale = stats.uniform.fit(x, floc=0)
+        assert_equal(loc, 0)
+        assert_equal(scale, x.max())
+
+        loc, scale = stats.uniform.fit(x, fscale=10)
+        assert_equal(loc, 0)
+        assert_equal(scale, 10)
+
+        assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
+        assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize("method", ["MLE", "MM"])
+    def test_fshapes(self, method):
+        # take a beta distribution, with shapes='a, b', and make sure that
+        # fa is equivalent to f0, and fb is equivalent to f1
+        a, b = 3., 4.
+        x = stats.beta.rvs(a, b, size=100, random_state=1234)
+        res_1 = stats.beta.fit(x, f0=3., method=method)
+        res_2 = stats.beta.fit(x, fa=3., method=method)
+        assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
+
+        res_2 = stats.beta.fit(x, fix_a=3., method=method)
+        assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
+
+        res_3 = stats.beta.fit(x, f1=4., method=method)
+        res_4 = stats.beta.fit(x, fb=4., method=method)
+        assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
+
+        res_4 = stats.beta.fit(x, fix_b=4., method=method)
+        assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
+
+        # cannot specify both positional and named args at the same time
+        assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2, method=method)
+
+        # check that attempting to fix all parameters raises a ValueError
+        assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
+                      floc=2, fscale=3, method=method)
+
+        # check that specifying floc, fscale and fshapes works for
+        # beta and gamma which override the generic fit method
+        res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1, method=method)
+        aa, bb, ll, ss = res_5
+        assert_equal([aa, ll, ss], [3., 0, 1])
+
+        # gamma distribution
+        a = 3.
+        data = stats.gamma.rvs(a, size=100)
+        aa, ll, ss = stats.gamma.fit(data, fa=a, method=method)
+        assert_equal(aa, a)
+
+    @pytest.mark.parametrize("method", ["MLE", "MM"])
+    def test_extra_params(self, method):
+        # unknown parameters should raise rather than be silently ignored
+        dist = stats.exponnorm
+        data = dist.rvs(K=2, size=100)
+        dct = dict(enikibeniki=-101)
+        assert_raises(TypeError, dist.fit, data, **dct, method=method)
+
+
+class TestFrozen:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    # Test that a frozen distribution gives the same results as the original
+    # object.
+    #
+    # Only tested for the normal distribution (with loc and scale specified)
+    # and for the gamma distribution (with a shape parameter specified).
+    def test_norm(self):
+        dist = stats.norm
+        frozen = stats.norm(loc=10.0, scale=3.0)
+
+        result_f = frozen.pdf(20.0)
+        result = dist.pdf(20.0, loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.cdf(20.0)
+        result = dist.cdf(20.0, loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.ppf(0.25)
+        result = dist.ppf(0.25, loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.isf(0.25)
+        result = dist.isf(0.25, loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.sf(10.0)
+        result = dist.sf(10.0, loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.median()
+        result = dist.median(loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.mean()
+        result = dist.mean(loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.var()
+        result = dist.var(loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.std()
+        result = dist.std(loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.entropy()
+        result = dist.entropy(loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        result_f = frozen.moment(2)
+        result = dist.moment(2, loc=10.0, scale=3.0)
+        assert_equal(result_f, result)
+
+        assert_equal(frozen.a, dist.a)
+        assert_equal(frozen.b, dist.b)
+
+    def test_gamma(self):
+        a = 2.0
+        dist = stats.gamma
+        frozen = stats.gamma(a)
+
+        result_f = frozen.pdf(20.0)
+        result = dist.pdf(20.0, a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.cdf(20.0)
+        result = dist.cdf(20.0, a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.ppf(0.25)
+        result = dist.ppf(0.25, a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.isf(0.25)
+        result = dist.isf(0.25, a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.sf(10.0)
+        result = dist.sf(10.0, a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.median()
+        result = dist.median(a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.mean()
+        result = dist.mean(a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.var()
+        result = dist.var(a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.std()
+        result = dist.std(a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.entropy()
+        result = dist.entropy(a)
+        assert_equal(result_f, result)
+
+        result_f = frozen.moment(2)
+        result = dist.moment(2, a)
+        assert_equal(result_f, result)
+
+        assert_equal(frozen.a, frozen.dist.a)
+        assert_equal(frozen.b, frozen.dist.b)
+
+    def test_regression_ticket_1293(self):
+        # Create a frozen distribution.
+        frozen = stats.lognorm(1)
+        # Call one of its methods that does not take any keyword arguments.
+        m1 = frozen.moment(2)
+        # Now call a method that takes a keyword argument.
+        frozen.stats(moments='mvsk')
+        # Call moment(2) again.
+        # After calling stats(), the following was raising an exception.
+        # So this test passes if the following does not raise an exception.
+        m2 = frozen.moment(2)
+        # The following should also be true, of course.  But it is not
+        # the focus of this test.
+        assert_equal(m1, m2)
+
+    def test_ab(self):
+        # test that the support of a frozen distribution
+        # (i) remains frozen even if it changes for the original one
+        # (ii) is actually correct if the shape parameters are such that
+        #      the values of [a, b] are not the default [0, inf]
+        # take a genpareto as an example where the support
+        # depends on the value of the shape parameter:
+        # for c > 0: a, b = 0, inf
+        # for c < 0: a, b = 0, -1/c
+
+        c = -0.1
+        rv = stats.genpareto(c=c)
+        a, b = rv.dist._get_support(c)
+        assert_equal([a, b], [0., 10.])
+
+        c = 0.1
+        stats.genpareto.pdf(0, c=c)
+        assert_equal(rv.dist._get_support(c), [0, np.inf])
+
+        c = -0.1
+        rv = stats.genpareto(c=c)
+        a, b = rv.dist._get_support(c)
+        assert_equal([a, b], [0., 10.])
+
+        c = 0.1
+        stats.genpareto.pdf(0, c)  # this should NOT change genpareto.b
+        assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
+
+        rv1 = stats.genpareto(c=0.1)
+        assert_(rv1.dist is not rv.dist)
+
+        # c >= 0: a, b = [0, inf]
+        for c in [1., 0.]:
+            c = np.asarray(c)
+            rv = stats.genpareto(c=c)
+            a, b = rv.a, rv.b
+            assert_equal(a, 0.)
+            assert_(np.isposinf(b))
+
+            # c < 0: a=0, b=1/|c|
+            c = np.asarray(-2.)
+            a, b = stats.genpareto._get_support(c)
+            assert_allclose([a, b], [0., 0.5])
+
+    def test_rv_frozen_in_namespace(self):
+        # Regression test for gh-3522
+        assert_(hasattr(stats.distributions, 'rv_frozen'))
+
+    def test_random_state(self):
+        # only check that the random_state attribute exists,
+        frozen = stats.norm()
+        assert_(hasattr(frozen, 'random_state'))
+
+        # ... that it can be set,
+        frozen.random_state = 42
+        assert_equal(frozen.random_state.get_state(),
+                     np.random.RandomState(42).get_state())
+
+        # ... and that .rvs method accepts it as an argument
+        rndm = np.random.RandomState(1234)
+        frozen.rvs(size=8, random_state=rndm)
+
+    def test_pickling(self):
+        # test that a frozen instance pickles and unpickles
+        # (this method is a clone of common_tests.check_pickling)
+        beta = stats.beta(2.3098496451481823, 0.62687954300963677)
+        poiss = stats.poisson(3.)
+        sample = stats.rv_discrete(values=([0, 1, 2, 3],
+                                           [0.1, 0.2, 0.3, 0.4]))
+
+        for distfn in [beta, poiss, sample]:
+            distfn.random_state = 1234
+            distfn.rvs(size=8)
+            s = pickle.dumps(distfn)
+            r0 = distfn.rvs(size=8)
+
+            unpickled = pickle.loads(s)
+            r1 = unpickled.rvs(size=8)
+            assert_equal(r0, r1)
+
+            # also smoke test some methods
+            medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
+            assert_equal(medians[0], medians[1])
+            assert_equal(distfn.cdf(medians[0]),
+                         unpickled.cdf(medians[1]))
+
+    def test_expect(self):
+        # smoke test the expect method of the frozen distribution
+        # only take a gamma w/loc and scale and poisson with loc specified
+        def func(x):
+            return x
+
+        gm = stats.gamma(a=2, loc=3, scale=4)
+        with np.errstate(invalid="ignore", divide="ignore"):
+            gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
+            gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
+                                           lb=1, ub=2, conditional=True)
+        assert_allclose(gm_val, gamma_val)
+
+        p = stats.poisson(3, loc=4)
+        p_val = p.expect(func)
+        poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
+        assert_allclose(p_val, poisson_val)
+
+
+class TestExpect:
+    # Test for expect method.
+    #
+    # Uses normal distribution and beta distribution for finite bounds, and
+    # hypergeom for discrete distribution with finite support
+    def test_norm(self):
+        v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
+        assert_almost_equal(v, 4, decimal=14)
+
+        m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
+        assert_almost_equal(m, 5, decimal=14)
+
+        lb = stats.norm.ppf(0.05, loc=5, scale=2)
+        ub = stats.norm.ppf(0.95, loc=5, scale=2)
+        prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
+        assert_almost_equal(prob90, 0.9, decimal=14)
+
+        prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
+                                    conditional=True)
+        assert_almost_equal(prob90c, 1., decimal=14)
+
+    def test_beta(self):
+        # case with finite support interval
+        v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
+                              loc=5, scale=2)
+        assert_almost_equal(v, 1./18., decimal=13)
+
+        m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
+        assert_almost_equal(m, 19/3., decimal=13)
+
+        ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
+        lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
+        prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
+                                   scale=2., lb=lb, ub=ub, conditional=False)
+        assert_almost_equal(prob90, 0.9, decimal=13)
+
+        prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
+                                    scale=2, lb=lb, ub=ub, conditional=True)
+        assert_almost_equal(prob90c, 1., decimal=13)
+
+    def test_hypergeom(self):
+        # test case with finite bounds
+
+        # without specifying bounds
+        m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
+        m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
+        assert_almost_equal(m, m_true, decimal=13)
+
+        v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
+                                   loc=5.)
+        assert_almost_equal(v, v_true, decimal=14)
+
+        # with bounds, bounds equal to shifted support
+        v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
+                                          args=(20, 10, 8),
+                                          loc=5., lb=5, ub=13)
+        assert_almost_equal(v_bounds, v_true, decimal=14)
+
+        # drop boundary points
+        prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
+        prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
+                                             loc=5., lb=6, ub=12)
+        assert_almost_equal(prob_bounds, prob_true, decimal=13)
+
+        # conditional
+        prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
+                                         lb=6, ub=12, conditional=True)
+        assert_almost_equal(prob_bc, 1, decimal=14)
+
+        # check simple integral
+        prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
+                                        lb=0, ub=8)
+        assert_almost_equal(prob_b, 1, decimal=13)
+
+    def test_poisson(self):
+        # poisson, use lower bound only
+        prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
+                                           conditional=False)
+        prob_b_true = 1-stats.poisson.cdf(2, 2)
+        assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
+
+        prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
+                                       conditional=True)
+        assert_almost_equal(prob_lb, 1, decimal=14)
+
+    def test_genhalflogistic(self):
+        # genhalflogistic, changes upper bound of support in _argcheck
+        # regression test for gh-2622
+        halflog = stats.genhalflogistic
+        # check consistency when calling expect twice with the same input
+        res1 = halflog.expect(args=(1.5,))
+        halflog.expect(args=(0.5,))
+        res2 = halflog.expect(args=(1.5,))
+        assert_almost_equal(res1, res2, decimal=14)
+
+    def test_rice_overflow(self):
+        # rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
+        # check that using i0e fixes it
+        assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
+
+        assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
+        assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
+        assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
+
+    def test_logser(self):
+        # test a discrete distribution with infinite support and loc
+        p, loc = 0.3, 3
+        res_0 = stats.logser.expect(lambda k: k, args=(p,))
+        # check against the correct answer (sum of a geom series)
+        assert_allclose(res_0,
+                        p / (p - 1.) / np.log(1. - p), atol=1e-15)
+
+        # now check it with `loc`
+        res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
+        assert_allclose(res_l, res_0 + loc, atol=1e-15)
+
+    @pytest.mark.filterwarnings('ignore::RuntimeWarning')
+    def test_skellam(self):
+        # Use a discrete distribution w/ bi-infinite support. Compute two first
+        # moments and compare to known values (cf skellam.stats)
+        p1, p2 = 18, 22
+        m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
+        m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
+        assert_allclose(m1, p1 - p2, atol=1e-12)
+        assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
+
+    def test_randint(self):
+        # Use a discrete distribution w/ parameter-dependent support, which
+        # is larger than the default chunksize
+        lo, hi = 0, 113
+        res = stats.randint.expect(lambda x: x, (lo, hi))
+        assert_allclose(res,
+                        sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
+
+    def test_zipf(self):
+        # Test that there is no infinite loop even if the sum diverges
+        assert_warns(RuntimeWarning, stats.zipf.expect,
+                     lambda x: x**2, (2,))
+
+    def test_discrete_kwds(self):
+        # check that discrete expect accepts keywords to control the summation
+        n0 = stats.poisson.expect(lambda x: 1, args=(2,))
+        n1 = stats.poisson.expect(lambda x: 1, args=(2,),
+                                  maxcount=1001, chunksize=32, tolerance=1e-8)
+        assert_almost_equal(n0, n1, decimal=14)
+
+    def test_moment(self):
+        # test the .moment() method: compute a higher moment and compare to
+        # a known value
+        def poiss_moment5(mu):
+            return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
+
+        for mu in [5, 7]:
+            m5 = stats.poisson.moment(5, mu)
+            assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
+
+    def test_challenging_cases_gh8928(self):
+        # Several cases where `expect` failed to produce a correct result were
+        # reported in gh-8928. Check that these cases have been resolved.
+        assert_allclose(stats.norm.expect(loc=36, scale=1.0), 36)
+        assert_allclose(stats.norm.expect(loc=40, scale=1.0), 40)
+        assert_allclose(stats.norm.expect(loc=10, scale=0.1), 10)
+        assert_allclose(stats.gamma.expect(args=(148,)), 148)
+        assert_allclose(stats.logistic.expect(loc=85), 85)
+
+    def test_lb_ub_gh15855(self):
+        # Make sure changes to `expect` made in gh15855 treat lb/ub correctly
+        dist = stats.uniform
+        ref = dist.mean(loc=10, scale=5)  # 12.5
+        # moment over whole distribution
+        assert_allclose(dist.expect(loc=10, scale=5), ref)
+        # moment over whole distribution, lb and ub outside of support
+        assert_allclose(dist.expect(loc=10, scale=5, lb=9, ub=16), ref)
+        # moment over 60% of distribution, [lb, ub] centered within support
+        assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=14), ref*0.6)
+        # moment over truncated distribution, essentially
+        assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=14,
+                                    conditional=True), ref)
+        # moment over 40% of distribution, [lb, ub] not centered within support
+        assert_allclose(dist.expect(loc=10, scale=5, lb=11, ub=13), 12*0.4)
+        # moment with lb > ub
+        assert_allclose(dist.expect(loc=10, scale=5, lb=13, ub=11), -12*0.4)
+        # moment with lb > ub, conditional
+        assert_allclose(dist.expect(loc=10, scale=5, lb=13, ub=11,
+                                    conditional=True), 12)
+
+
+class TestNct:
+    def test_nc_parameter(self):
+        # Parameter values c<=0 were not enabled (gh-2402).
+        # For negative values c and for c=0 results of rv.cdf(0) below were nan
+        rv = stats.nct(5, 0)
+        assert_equal(rv.cdf(0), 0.5)
+        rv = stats.nct(5, -1)
+        assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
+
+    def test_broadcasting(self):
+        res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
+                            np.linspace(0.1, 1, 4))
+        expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
+                          [0.00217142, 0.00395366, 0.00683888, 0.01126276],
+                          [0.00153078, 0.00291093, 0.00525206, 0.00900815]])
+        assert_allclose(res, expected, rtol=1e-5)
+
+    def test_variance_gh_issue_2401(self):
+        # Computation of the variance of a non-central t-distribution resulted
+        # in a TypeError: ufunc 'isinf' not supported for the input types,
+        # and the inputs could not be safely coerced to any supported types
+        # according to the casting rule 'safe'
+        rv = stats.nct(4, 0)
+        assert_equal(rv.var(), 2.0)
+
+    def test_nct_inf_moments(self):
+        # n-th moment of nct only exists for df > n
+        m, v, s, k = stats.nct.stats(df=0.9, nc=0.3, moments='mvsk')
+        assert_equal([m, v, s, k], [np.nan, np.nan, np.nan, np.nan])
+
+        m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
+        assert_(np.isfinite(m))
+        assert_equal([v, s, k], [np.nan, np.nan, np.nan])
+
+        m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
+        assert_(np.isfinite([m, v, s]).all())
+        assert_equal(k, np.nan)
+
+    def test_nct_stats_large_df_values(self):
+        # previously gamma function was used which lost precision at df=345
+        # cf. https://github.com/scipy/scipy/issues/12919 for details
+        nct_mean_df_1000 = stats.nct.mean(1000, 2)
+        nct_stats_df_1000 = stats.nct.stats(1000, 2)
+        # These expected values were computed with mpmath. They were also
+        # verified with the Wolfram Alpha expressions:
+        #     Mean[NoncentralStudentTDistribution[1000, 2]]
+        #     Var[NoncentralStudentTDistribution[1000, 2]]
+        expected_stats_df_1000 = [2.0015015641422464, 1.0040115288163005]
+        assert_allclose(nct_mean_df_1000, expected_stats_df_1000[0],
+                        rtol=1e-10)
+        assert_allclose(nct_stats_df_1000, expected_stats_df_1000,
+                        rtol=1e-10)
+        # and a bigger df value
+        nct_mean = stats.nct.mean(100000, 2)
+        nct_stats = stats.nct.stats(100000, 2)
+        # These expected values were computed with mpmath.
+        expected_stats = [2.0000150001562518, 1.0000400011500288]
+        assert_allclose(nct_mean, expected_stats[0], rtol=1e-10)
+        assert_allclose(nct_stats, expected_stats, rtol=1e-9)
+
+
+class TestRecipInvGauss:
+
+    def test_pdf_endpoint(self):
+        p = stats.recipinvgauss.pdf(0, 0.6)
+        assert p == 0.0
+
+    def test_logpdf_endpoint(self):
+        logp = stats.recipinvgauss.logpdf(0, 0.6)
+        assert logp == -np.inf
+
+    def test_cdf_small_x(self):
+        # The expected value was computer with mpmath:
+        #
+        # import mpmath
+        #
+        # mpmath.mp.dps = 100
+        #
+        # def recipinvgauss_cdf_mp(x, mu):
+        #     x = mpmath.mpf(x)
+        #     mu = mpmath.mpf(mu)
+        #     trm1 = 1/mu - x
+        #     trm2 = 1/mu + x
+        #     isqx = 1/mpmath.sqrt(x)
+        #     return (mpmath.ncdf(-isqx*trm1)
+        #             - mpmath.exp(2/mu)*mpmath.ncdf(-isqx*trm2))
+        #
+        p = stats.recipinvgauss.cdf(0.05, 0.5)
+        expected = 6.590396159501331e-20
+        assert_allclose(p, expected, rtol=1e-14)
+
+    def test_sf_large_x(self):
+        # The expected value was computed with mpmath; see test_cdf_small.
+        p = stats.recipinvgauss.sf(80, 0.5)
+        expected = 2.699819200556787e-18
+        assert_allclose(p, expected, 5e-15)
+
+
+class TestRice:
+    def test_rice_zero_b(self):
+        # rice distribution should work with b=0, cf gh-2164
+        x = [0.2, 1., 5.]
+        assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
+        assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
+        assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
+        assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
+
+        q = [0.1, 0.1, 0.5, 0.9]
+        assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
+
+        mvsk = stats.rice.stats(0, moments='mvsk')
+        assert_(np.isfinite(mvsk).all())
+
+        # furthermore, pdf is continuous as b\to 0
+        # rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
+        # see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
+        b = 1e-8
+        assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
+                        atol=b, rtol=0)
+
+    def test_rice_rvs(self):
+        rvs = stats.rice.rvs
+        assert_equal(rvs(b=3.).size, 1)
+        assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
+
+    def test_rice_gh9836(self):
+        # test that gh-9836 is resolved; previously jumped to 1 at the end
+
+        cdf = stats.rice.cdf(np.arange(10, 160, 10), np.arange(10, 160, 10))
+        # Generated in R
+        # library(VGAM)
+        # options(digits=16)
+        # x = seq(10, 150, 10)
+        # print(price(x, sigma=1, vee=x))
+        cdf_exp = [0.4800278103504522, 0.4900233218590353, 0.4933500379379548,
+                   0.4950128317658719, 0.4960103776798502, 0.4966753655438764,
+                   0.4971503395812474, 0.4975065620443196, 0.4977836197921638,
+                   0.4980052636649550, 0.4981866072661382, 0.4983377260666599,
+                   0.4984655952615694, 0.4985751970541413, 0.4986701850071265]
+        assert_allclose(cdf, cdf_exp)
+
+        probabilities = np.arange(0.1, 1, 0.1)
+        ppf = stats.rice.ppf(probabilities, 500/4, scale=4)
+        # Generated in R
+        # library(VGAM)
+        # options(digits=16)
+        # p = seq(0.1, .9, by = .1)
+        # print(qrice(p, vee = 500, sigma = 4))
+        ppf_exp = [494.8898762347361, 496.6495690858350, 497.9184315188069,
+                   499.0026277378915, 500.0159999146250, 501.0293721352668,
+                   502.1135684981884, 503.3824312270405, 505.1421247157822]
+        assert_allclose(ppf, ppf_exp)
+
+        ppf = scipy.stats.rice.ppf(0.5, np.arange(10, 150, 10))
+        # Generated in R
+        # library(VGAM)
+        # options(digits=16)
+        # b <- seq(10, 140, 10)
+        # print(qrice(0.5, vee = b, sigma = 1))
+        ppf_exp = [10.04995862522287, 20.02499480078302, 30.01666512465732,
+                   40.01249934924363, 50.00999966676032, 60.00833314046875,
+                   70.00714273568241, 80.00624991862573, 90.00555549840364,
+                   100.00499995833597, 110.00454542324384, 120.00416664255323,
+                   130.00384613488120, 140.00357141338748]
+        assert_allclose(ppf, ppf_exp)
+
+
+class TestErlang:
+    def setup_method(self):
+        np.random.seed(1234)
+
+    def test_erlang_runtimewarning(self):
+        # erlang should generate a RuntimeWarning if a non-integer
+        # shape parameter is used.
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", RuntimeWarning)
+
+            # The non-integer shape parameter 1.3 should trigger a
+            # RuntimeWarning
+            assert_raises(RuntimeWarning,
+                          stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
+
+            # Calling the fit method with `f0` set to an integer should
+            # *not* trigger a RuntimeWarning.  It should return the same
+            # values as gamma.fit(...).
+            data = [0.5, 1.0, 2.0, 4.0]
+            result_erlang = stats.erlang.fit(data, f0=1)
+            result_gamma = stats.gamma.fit(data, f0=1)
+            assert_allclose(result_erlang, result_gamma, rtol=1e-3)
+
+    def test_gh_pr_10949_argcheck(self):
+        assert_equal(stats.erlang.pdf(0.5, a=[1, -1]),
+                     stats.gamma.pdf(0.5, a=[1, -1]))
+
+
+class TestRayleigh:
+    def setup_method(self):
+        np.random.seed(987654321)
+
+    # gh-6227
+    def test_logpdf(self):
+        y = stats.rayleigh.logpdf(50)
+        assert_allclose(y, -1246.0879769945718)
+
+    def test_logsf(self):
+        y = stats.rayleigh.logsf(50)
+        assert_allclose(y, -1250)
+
+    @pytest.mark.parametrize("rvs_loc,rvs_scale", [(0.85373171, 0.86932204),
+                                                   (0.20558821, 0.61621008)])
+    def test_fit(self, rvs_loc, rvs_scale):
+        data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
+
+        def scale_mle(data, floc):
+            return (np.sum((data - floc) ** 2) / (2 * len(data))) ** .5
+
+        # when `floc` is provided, `scale` is found with an analytical formula
+        scale_expect = scale_mle(data, rvs_loc)
+        loc, scale = stats.rayleigh.fit(data, floc=rvs_loc)
+        assert_equal(loc, rvs_loc)
+        assert_equal(scale, scale_expect)
+
+        # when `fscale` is fixed, superclass fit is used to determine `loc`.
+        loc, scale = stats.rayleigh.fit(data, fscale=.6)
+        assert_equal(scale, .6)
+
+        # with both parameters free, one dimensional optimization is done
+        # over a new function that takes into account the dependent relation
+        # of `scale` to `loc`.
+        loc, scale = stats.rayleigh.fit(data)
+        # test that `scale` is defined by its relation to `loc`
+        assert_equal(scale, scale_mle(data, loc))
+
+    @pytest.mark.parametrize("rvs_loc,rvs_scale", [[0.74, 0.01],
+                                                   [0.08464463, 0.12069025]])
+    def test_fit_comparison_super_method(self, rvs_loc, rvs_scale):
+        # test that the objective function result of the analytical MLEs is
+        # less than or equal to that of the numerically optimized estimate
+        data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
+
+        # obtain objective function with same method as `rv_continuous.fit`
+        args = [data, (stats.rayleigh._fitstart(data), )]
+        func = stats.rayleigh._reduce_func(args, {})[1]
+
+        _assert_less_or_close_loglike(stats.rayleigh, data, func)
+
+    def test_fit_warnings(self):
+        assert_fit_warnings(stats.rayleigh)
+
+    def test_fit_gh17088(self):
+        # `rayleigh.fit` could return a location that was inconsistent with
+        # the data. See gh-17088.
+        rng = np.random.default_rng(456)
+        loc, scale, size = 50, 600, 500
+        rvs = stats.rayleigh.rvs(loc, scale, size=size, random_state=rng)
+        loc_fit, _ = stats.rayleigh.fit(rvs)
+        assert loc_fit < np.min(rvs)
+        loc_fit, scale_fit = stats.rayleigh.fit(rvs, fscale=scale)
+        assert loc_fit < np.min(rvs)
+        assert scale_fit == scale
+
+
+class TestExponWeib:
+
+    def test_pdf_logpdf(self):
+        # Regression test for gh-3508.
+        x = 0.1
+        a = 1.0
+        c = 100.0
+        p = stats.exponweib.pdf(x, a, c)
+        logp = stats.exponweib.logpdf(x, a, c)
+        # Expected values were computed with mpmath.
+        assert_allclose([p, logp],
+                        [1.0000000000000054e-97, -223.35075402042244])
+
+    def test_a_is_1(self):
+        # For issue gh-3508.
+        # Check that when a=1, the pdf and logpdf methods of exponweib are the
+        # same as those of weibull_min.
+        x = np.logspace(-4, -1, 4)
+        a = 1
+        c = 100
+
+        p = stats.exponweib.pdf(x, a, c)
+        expected = stats.weibull_min.pdf(x, c)
+        assert_allclose(p, expected)
+
+        logp = stats.exponweib.logpdf(x, a, c)
+        expected = stats.weibull_min.logpdf(x, c)
+        assert_allclose(logp, expected)
+
+    def test_a_is_1_c_is_1(self):
+        # When a = 1 and c = 1, the distribution is exponential.
+        x = np.logspace(-8, 1, 10)
+        a = 1
+        c = 1
+
+        p = stats.exponweib.pdf(x, a, c)
+        expected = stats.expon.pdf(x)
+        assert_allclose(p, expected)
+
+        logp = stats.exponweib.logpdf(x, a, c)
+        expected = stats.expon.logpdf(x)
+        assert_allclose(logp, expected)
+
+
+class TestFatigueLife:
+
+    def test_sf_tail(self):
+        # Expected value computed with mpmath:
+        #     import mpmath
+        #     mpmath.mp.dps = 80
+        #     x = mpmath.mpf(800.0)
+        #     c = mpmath.mpf(2.5)
+        #     s = float(1 - mpmath.ncdf(1/c * (mpmath.sqrt(x)
+        #                                      - 1/mpmath.sqrt(x))))
+        #     print(s)
+        # Output:
+        #     6.593376447038406e-30
+        s = stats.fatiguelife.sf(800.0, 2.5)
+        assert_allclose(s, 6.593376447038406e-30, rtol=1e-13)
+
+    def test_isf_tail(self):
+        # See test_sf_tail for the mpmath code.
+        p = 6.593376447038406e-30
+        q = stats.fatiguelife.isf(p, 2.5)
+        assert_allclose(q, 800.0, rtol=1e-13)
+
+
+class TestWeibull:
+
+    def test_logpdf(self):
+        # gh-6217
+        y = stats.weibull_min.logpdf(0, 1)
+        assert_equal(y, 0)
+
+    def test_with_maxima_distrib(self):
+        # Tests for weibull_min and weibull_max.
+        # The expected values were computed using the symbolic algebra
+        # program 'maxima' with the package 'distrib', which has
+        # 'pdf_weibull' and 'cdf_weibull'.  The mapping between the
+        # scipy and maxima functions is as follows:
+        # -----------------------------------------------------------------
+        # scipy                              maxima
+        # ---------------------------------  ------------------------------
+        # weibull_min.pdf(x, a, scale=b)     pdf_weibull(x, a, b)
+        # weibull_min.logpdf(x, a, scale=b)  log(pdf_weibull(x, a, b))
+        # weibull_min.cdf(x, a, scale=b)     cdf_weibull(x, a, b)
+        # weibull_min.logcdf(x, a, scale=b)  log(cdf_weibull(x, a, b))
+        # weibull_min.sf(x, a, scale=b)      1 - cdf_weibull(x, a, b)
+        # weibull_min.logsf(x, a, scale=b)   log(1 - cdf_weibull(x, a, b))
+        #
+        # weibull_max.pdf(x, a, scale=b)     pdf_weibull(-x, a, b)
+        # weibull_max.logpdf(x, a, scale=b)  log(pdf_weibull(-x, a, b))
+        # weibull_max.cdf(x, a, scale=b)     1 - cdf_weibull(-x, a, b)
+        # weibull_max.logcdf(x, a, scale=b)  log(1 - cdf_weibull(-x, a, b))
+        # weibull_max.sf(x, a, scale=b)      cdf_weibull(-x, a, b)
+        # weibull_max.logsf(x, a, scale=b)   log(cdf_weibull(-x, a, b))
+        # -----------------------------------------------------------------
+        x = 1.5
+        a = 2.0
+        b = 3.0
+
+        # weibull_min
+
+        p = stats.weibull_min.pdf(x, a, scale=b)
+        assert_allclose(p, np.exp(-0.25)/3)
+
+        lp = stats.weibull_min.logpdf(x, a, scale=b)
+        assert_allclose(lp, -0.25 - np.log(3))
+
+        c = stats.weibull_min.cdf(x, a, scale=b)
+        assert_allclose(c, -special.expm1(-0.25))
+
+        lc = stats.weibull_min.logcdf(x, a, scale=b)
+        assert_allclose(lc, np.log(-special.expm1(-0.25)))
+
+        s = stats.weibull_min.sf(x, a, scale=b)
+        assert_allclose(s, np.exp(-0.25))
+
+        ls = stats.weibull_min.logsf(x, a, scale=b)
+        assert_allclose(ls, -0.25)
+
+        # Also test using a large value x, for which computing the survival
+        # function using the CDF would result in 0.
+        s = stats.weibull_min.sf(30, 2, scale=3)
+        assert_allclose(s, np.exp(-100))
+
+        ls = stats.weibull_min.logsf(30, 2, scale=3)
+        assert_allclose(ls, -100)
+
+        # weibull_max
+        x = -1.5
+
+        p = stats.weibull_max.pdf(x, a, scale=b)
+        assert_allclose(p, np.exp(-0.25)/3)
+
+        lp = stats.weibull_max.logpdf(x, a, scale=b)
+        assert_allclose(lp, -0.25 - np.log(3))
+
+        c = stats.weibull_max.cdf(x, a, scale=b)
+        assert_allclose(c, np.exp(-0.25))
+
+        lc = stats.weibull_max.logcdf(x, a, scale=b)
+        assert_allclose(lc, -0.25)
+
+        s = stats.weibull_max.sf(x, a, scale=b)
+        assert_allclose(s, -special.expm1(-0.25))
+
+        ls = stats.weibull_max.logsf(x, a, scale=b)
+        assert_allclose(ls, np.log(-special.expm1(-0.25)))
+
+        # Also test using a value of x close to 0, for which computing the
+        # survival function using the CDF would result in 0.
+        s = stats.weibull_max.sf(-1e-9, 2, scale=3)
+        assert_allclose(s, -special.expm1(-1/9000000000000000000))
+
+        ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
+        assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
+
+    def test_fit_min(self):
+        rng = np.random.default_rng(5985959307161735394)
+
+        c, loc, scale = 2, 3.5, 0.5  # arbitrary, valid parameters
+        dist = stats.weibull_min(c, loc, scale)
+        rvs = dist.rvs(size=100, random_state=rng)
+
+        # test that MLE still honors guesses and fixed parameters
+        c2, loc2, scale2 = stats.weibull_min.fit(rvs, 1.5, floc=3)
+        c3, loc3, scale3 = stats.weibull_min.fit(rvs, 1.6, floc=3)
+        assert loc2 == loc3 == 3  # fixed parameter is respected
+        assert c2 != c3  # different guess -> (slightly) different outcome
+        # quality of fit is tested elsewhere
+
+        # test that MoM honors fixed parameters, accepts (but ignores) guesses
+        c4, loc4, scale4 = stats.weibull_min.fit(rvs, 3, fscale=3, method='mm')
+        assert scale4 == 3
+        # because scale was fixed, only the mean and skewness will be matched
+        dist4 = stats.weibull_min(c4, loc4, scale4)
+        res = dist4.stats(moments='ms')
+        ref = np.mean(rvs), stats.skew(rvs)
+        assert_allclose(res, ref)
+
+
+class TestTruncWeibull(object):
+
+    def test_pdf_bounds(self):
+        # test bounds
+        y = stats.truncweibull_min.pdf([0.1, 2.0], 2.0, 0.11, 1.99)
+        assert_equal(y, [0.0, 0.0])
+
+    def test_logpdf(self):
+        y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, np.inf)
+        assert_equal(y, 0.0)
+
+        # hand calculation
+        y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, 4.0)
+        assert_allclose(y, 0.14541345786885884)
+
+    def test_ppf_bounds(self):
+        # test bounds
+        y = stats.truncweibull_min.ppf([0.0, 1.0], 2.0, 0.1, 2.0)
+        assert_equal(y, [0.1, 2.0])
+
+    def test_cdf_to_ppf(self):
+        q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.]
+        x = stats.truncweibull_min.ppf(q, 2., 0., 3.)
+        q_out = stats.truncweibull_min.cdf(x, 2., 0., 3.)
+        assert_allclose(q, q_out)
+
+    def test_sf_to_isf(self):
+        q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.]
+        x = stats.truncweibull_min.isf(q, 2., 0., 3.)
+        q_out = stats.truncweibull_min.sf(x, 2., 0., 3.)
+        assert_allclose(q, q_out)
+
+    def test_munp(self):
+        c = 2.
+        a = 1.
+        b = 3.
+
+        def xnpdf(x, n):
+            return x**n*stats.truncweibull_min.pdf(x, c, a, b)
+
+        m0 = stats.truncweibull_min.moment(0, c, a, b)
+        assert_equal(m0, 1.)
+
+        m1 = stats.truncweibull_min.moment(1, c, a, b)
+        m1_expected, _ = quad(lambda x: xnpdf(x, 1), a, b)
+        assert_allclose(m1, m1_expected)
+
+        m2 = stats.truncweibull_min.moment(2, c, a, b)
+        m2_expected, _ = quad(lambda x: xnpdf(x, 2), a, b)
+        assert_allclose(m2, m2_expected)
+
+        m3 = stats.truncweibull_min.moment(3, c, a, b)
+        m3_expected, _ = quad(lambda x: xnpdf(x, 3), a, b)
+        assert_allclose(m3, m3_expected)
+
+        m4 = stats.truncweibull_min.moment(4, c, a, b)
+        m4_expected, _ = quad(lambda x: xnpdf(x, 4), a, b)
+        assert_allclose(m4, m4_expected)
+
+    def test_reference_values(self):
+        a = 1.
+        b = 3.
+        c = 2.
+        x_med = np.sqrt(1 - np.log(0.5 + np.exp(-(8. + np.log(2.)))))
+
+        cdf = stats.truncweibull_min.cdf(x_med, c, a, b)
+        assert_allclose(cdf, 0.5)
+
+        lc = stats.truncweibull_min.logcdf(x_med, c, a, b)
+        assert_allclose(lc, -np.log(2.))
+
+        ppf = stats.truncweibull_min.ppf(0.5, c, a, b)
+        assert_allclose(ppf, x_med)
+
+        sf = stats.truncweibull_min.sf(x_med, c, a, b)
+        assert_allclose(sf, 0.5)
+
+        ls = stats.truncweibull_min.logsf(x_med, c, a, b)
+        assert_allclose(ls, -np.log(2.))
+
+        isf = stats.truncweibull_min.isf(0.5, c, a, b)
+        assert_allclose(isf, x_med)
+
+    def test_compare_weibull_min(self):
+        # Verify that the truncweibull_min distribution gives the same results
+        # as the original weibull_min
+        x = 1.5
+        c = 2.0
+        a = 0.0
+        b = np.inf
+        scale = 3.0
+
+        p = stats.weibull_min.pdf(x, c, scale=scale)
+        p_trunc = stats.truncweibull_min.pdf(x, c, a, b, scale=scale)
+        assert_allclose(p, p_trunc)
+
+        lp = stats.weibull_min.logpdf(x, c, scale=scale)
+        lp_trunc = stats.truncweibull_min.logpdf(x, c, a, b, scale=scale)
+        assert_allclose(lp, lp_trunc)
+
+        cdf = stats.weibull_min.cdf(x, c, scale=scale)
+        cdf_trunc = stats.truncweibull_min.cdf(x, c, a, b, scale=scale)
+        assert_allclose(cdf, cdf_trunc)
+
+        lc = stats.weibull_min.logcdf(x, c, scale=scale)
+        lc_trunc = stats.truncweibull_min.logcdf(x, c, a, b, scale=scale)
+        assert_allclose(lc, lc_trunc)
+
+        s = stats.weibull_min.sf(x, c, scale=scale)
+        s_trunc = stats.truncweibull_min.sf(x, c, a, b, scale=scale)
+        assert_allclose(s, s_trunc)
+
+        ls = stats.weibull_min.logsf(x, c, scale=scale)
+        ls_trunc = stats.truncweibull_min.logsf(x, c, a, b, scale=scale)
+        assert_allclose(ls, ls_trunc)
+
+        # # Also test using a large value x, for which computing the survival
+        # # function using the CDF would result in 0.
+        s = stats.truncweibull_min.sf(30, 2, a, b, scale=3)
+        assert_allclose(s, np.exp(-100))
+
+        ls = stats.truncweibull_min.logsf(30, 2, a, b, scale=3)
+        assert_allclose(ls, -100)
+
+    def test_compare_weibull_min2(self):
+        # Verify that the truncweibull_min distribution PDF and CDF results
+        # are the same as those calculated from truncating weibull_min
+        c, a, b = 2.5, 0.25, 1.25
+        x = np.linspace(a, b, 100)
+
+        pdf1 = stats.truncweibull_min.pdf(x, c, a, b)
+        cdf1 = stats.truncweibull_min.cdf(x, c, a, b)
+
+        norm = stats.weibull_min.cdf(b, c) - stats.weibull_min.cdf(a, c)
+        pdf2 = stats.weibull_min.pdf(x, c) / norm
+        cdf2 = (stats.weibull_min.cdf(x, c) - stats.weibull_min.cdf(a, c))/norm
+
+        np.testing.assert_allclose(pdf1, pdf2)
+        np.testing.assert_allclose(cdf1, cdf2)
+
+
+class TestRdist:
+    def test_rdist_cdf_gh1285(self):
+        # check workaround in rdist._cdf for issue gh-1285.
+        distfn = stats.rdist
+        values = [0.001, 0.5, 0.999]
+        assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
+                            values, decimal=5)
+
+    def test_rdist_beta(self):
+        # rdist is a special case of stats.beta
+        x = np.linspace(-0.99, 0.99, 10)
+        c = 2.7
+        assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
+                            stats.rdist(c).pdf(x))
+
+
+class TestTrapezoid:
+    def test_reduces_to_triang(self):
+        modes = [0, 0.3, 0.5, 1]
+        for mode in modes:
+            x = [0, mode, 1]
+            assert_almost_equal(stats.trapezoid.pdf(x, mode, mode),
+                                stats.triang.pdf(x, mode))
+            assert_almost_equal(stats.trapezoid.cdf(x, mode, mode),
+                                stats.triang.cdf(x, mode))
+
+    def test_reduces_to_uniform(self):
+        x = np.linspace(0, 1, 10)
+        assert_almost_equal(stats.trapezoid.pdf(x, 0, 1), stats.uniform.pdf(x))
+        assert_almost_equal(stats.trapezoid.cdf(x, 0, 1), stats.uniform.cdf(x))
+
+    def test_cases(self):
+        # edge cases
+        assert_almost_equal(stats.trapezoid.pdf(0, 0, 0), 2)
+        assert_almost_equal(stats.trapezoid.pdf(1, 1, 1), 2)
+        assert_almost_equal(stats.trapezoid.pdf(0.5, 0, 0.8),
+                            1.11111111111111111)
+        assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 1.0),
+                            1.11111111111111111)
+
+        # straightforward case
+        assert_almost_equal(stats.trapezoid.pdf(0.1, 0.2, 0.8), 0.625)
+        assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 0.8), 1.25)
+        assert_almost_equal(stats.trapezoid.pdf(0.9, 0.2, 0.8), 0.625)
+
+        assert_almost_equal(stats.trapezoid.cdf(0.1, 0.2, 0.8), 0.03125)
+        assert_almost_equal(stats.trapezoid.cdf(0.2, 0.2, 0.8), 0.125)
+        assert_almost_equal(stats.trapezoid.cdf(0.5, 0.2, 0.8), 0.5)
+        assert_almost_equal(stats.trapezoid.cdf(0.9, 0.2, 0.8), 0.96875)
+        assert_almost_equal(stats.trapezoid.cdf(1.0, 0.2, 0.8), 1.0)
+
+    def test_moments_and_entropy(self):
+        # issue #11795: improve precision of trapezoid stats
+        # Apply formulas from Wikipedia for the following parameters:
+        a, b, c, d = -3, -1, 2, 3  # => 1/3, 5/6, -3, 6
+        p1, p2, loc, scale = (b-a) / (d-a), (c-a) / (d-a), a, d-a
+        h = 2 / (d+c-b-a)
+
+        def moment(n):
+            return (h * ((d**(n+2) - c**(n+2)) / (d-c)
+                         - (b**(n+2) - a**(n+2)) / (b-a)) /
+                    (n+1) / (n+2))
+
+        mean = moment(1)
+        var = moment(2) - mean**2
+        entropy = 0.5 * (d-c+b-a) / (d+c-b-a) + np.log(0.5 * (d+c-b-a))
+        assert_almost_equal(stats.trapezoid.mean(p1, p2, loc, scale),
+                            mean, decimal=13)
+        assert_almost_equal(stats.trapezoid.var(p1, p2, loc, scale),
+                            var, decimal=13)
+        assert_almost_equal(stats.trapezoid.entropy(p1, p2, loc, scale),
+                            entropy, decimal=13)
+
+        # Check boundary cases where scipy d=0 or d=1.
+        assert_almost_equal(stats.trapezoid.mean(0, 0, -3, 6), -1, decimal=13)
+        assert_almost_equal(stats.trapezoid.mean(0, 1, -3, 6), 0, decimal=13)
+        assert_almost_equal(stats.trapezoid.var(0, 1, -3, 6), 3, decimal=13)
+
+    def test_trapezoid_vect(self):
+        # test that array-valued shapes and arguments are handled
+        c = np.array([0.1, 0.2, 0.3])
+        d = np.array([0.5, 0.6])[:, None]
+        x = np.array([0.15, 0.25, 0.9])
+        v = stats.trapezoid.pdf(x, c, d)
+
+        cc, dd, xx = np.broadcast_arrays(c, d, x)
+
+        res = np.empty(xx.size, dtype=xx.dtype)
+        ind = np.arange(xx.size)
+        for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
+            res[i] = stats.trapezoid.pdf(x1, c1, d1)
+
+        assert_allclose(v, res.reshape(v.shape), atol=1e-15)
+
+        # Check that the stats() method supports vector arguments.
+        v = np.asarray(stats.trapezoid.stats(c, d, moments="mvsk"))
+        cc, dd = np.broadcast_arrays(c, d)
+        res = np.empty((cc.size, 4))  # 4 stats returned per value
+        ind = np.arange(cc.size)
+        for i, c1, d1 in zip(ind, cc.ravel(), dd.ravel()):
+            res[i] = stats.trapezoid.stats(c1, d1, moments="mvsk")
+
+        assert_allclose(v, res.T.reshape(v.shape), atol=1e-15)
+
+    def test_trapz(self):
+        # Basic test for alias
+        x = np.linspace(0, 1, 10)
+        assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
+
+
+class TestTriang:
+    def test_edge_cases(self):
+        with np.errstate(all='raise'):
+            assert_equal(stats.triang.pdf(0, 0), 2.)
+            assert_equal(stats.triang.pdf(0.5, 0), 1.)
+            assert_equal(stats.triang.pdf(1, 0), 0.)
+
+            assert_equal(stats.triang.pdf(0, 1), 0)
+            assert_equal(stats.triang.pdf(0.5, 1), 1.)
+            assert_equal(stats.triang.pdf(1, 1), 2)
+
+            assert_equal(stats.triang.cdf(0., 0.), 0.)
+            assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
+            assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
+
+            assert_equal(stats.triang.cdf(0., 1.), 0.)
+            assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
+            assert_equal(stats.triang.cdf(1., 1.), 1)
+
+
+class TestMielke:
+    def test_moments(self):
+        k, s = 4.642, 0.597
+        # n-th moment exists only if n < s
+        assert_equal(stats.mielke(k, s).moment(1), np.inf)
+        assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
+        assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
+
+    def test_burr_equivalence(self):
+        x = np.linspace(0.01, 100, 50)
+        k, s = 2.45, 5.32
+        assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
+
+
+class TestBurr:
+    def test_endpoints_7491(self):
+        # gh-7491
+        # Compute the pdf at the left endpoint dst.a.
+        data = [
+            [stats.fisk, (1,), 1],
+            [stats.burr, (0.5, 2), 1],
+            [stats.burr, (1, 1), 1],
+            [stats.burr, (2, 0.5), 1],
+            [stats.burr12, (1, 0.5), 0.5],
+            [stats.burr12, (1, 1), 1.0],
+            [stats.burr12, (1, 2), 2.0]]
+
+        ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
+        correct = [_correct_ for _f, _args, _correct_ in data]
+        assert_array_almost_equal(ans, correct)
+
+        ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
+        correct = [np.log(_correct_) for _f, _args, _correct_ in data]
+        assert_array_almost_equal(ans, correct)
+
+    def test_burr_stats_9544(self):
+        # gh-9544.  Test from gh-9978
+        c, d = 5.0, 3
+        mean, variance = stats.burr(c, d).stats()
+        # mean = sc.beta(3 + 1/5, 1. - 1/5) * 3  = 1.4110263...
+        # var =  sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 -
+        #        (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
+        mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
+        assert_allclose(mean, mean_hc)
+        assert_allclose(variance, variance_hc)
+
+    def test_burr_nan_mean_var_9544(self):
+        # gh-9544.  Test from gh-9978
+        c, d = 0.5, 3
+        mean, variance = stats.burr(c, d).stats()
+        assert_(np.isnan(mean))
+        assert_(np.isnan(variance))
+        c, d = 1.5, 3
+        mean, variance = stats.burr(c, d).stats()
+        assert_(np.isfinite(mean))
+        assert_(np.isnan(variance))
+
+        c, d = 0.5, 3
+        e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
+        assert_(np.isnan(e1))
+        assert_(np.isnan(e2))
+        assert_(np.isnan(e3))
+        assert_(np.isnan(e4))
+        c, d = 1.5, 3
+        e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
+        assert_(np.isfinite(e1))
+        assert_(np.isnan(e2))
+        assert_(np.isnan(e3))
+        assert_(np.isnan(e4))
+        c, d = 2.5, 3
+        e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
+        assert_(np.isfinite(e1))
+        assert_(np.isfinite(e2))
+        assert_(np.isnan(e3))
+        assert_(np.isnan(e4))
+        c, d = 3.5, 3
+        e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
+        assert_(np.isfinite(e1))
+        assert_(np.isfinite(e2))
+        assert_(np.isfinite(e3))
+        assert_(np.isnan(e4))
+        c, d = 4.5, 3
+        e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
+        assert_(np.isfinite(e1))
+        assert_(np.isfinite(e2))
+        assert_(np.isfinite(e3))
+        assert_(np.isfinite(e4))
+
+
+class TestStudentizedRange:
+    # For alpha = .05, .01, and .001, and for each value of
+    # v = [1, 3, 10, 20, 120, inf], a Q was picked from each table for
+    # k = [2, 8, 14, 20].
+
+    # these arrays are written with `k` as column, and `v` as rows.
+    # Q values are taken from table 3:
+    # https://www.jstor.org/stable/2237810
+    q05 = [17.97, 45.40, 54.33, 59.56,
+           4.501, 8.853, 10.35, 11.24,
+           3.151, 5.305, 6.028, 6.467,
+           2.950, 4.768, 5.357, 5.714,
+           2.800, 4.363, 4.842, 5.126,
+           2.772, 4.286, 4.743, 5.012]
+    q01 = [90.03, 227.2, 271.8, 298.0,
+           8.261, 15.64, 18.22, 19.77,
+           4.482, 6.875, 7.712, 8.226,
+           4.024, 5.839, 6.450, 6.823,
+           3.702, 5.118, 5.562, 5.827,
+           3.643, 4.987, 5.400, 5.645]
+    q001 = [900.3, 2272, 2718, 2980,
+            18.28, 34.12, 39.69, 43.05,
+            6.487, 9.352, 10.39, 11.03,
+            5.444, 7.313, 7.966, 8.370,
+            4.772, 6.039, 6.448, 6.695,
+            4.654, 5.823, 6.191, 6.411]
+    qs = np.concatenate((q05, q01, q001))
+    ps = [.95, .99, .999]
+    vs = [1, 3, 10, 20, 120, np.inf]
+    ks = [2, 8, 14, 20]
+
+    data = list(zip(product(ps, vs, ks), qs))
+
+    # A small selection of large-v cases generated with R's `ptukey`
+    # Each case is in the format (q, k, v, r_result)
+    r_data = [
+        (0.1, 3, 9001, 0.002752818526842),
+        (1, 10, 1000, 0.000526142388912),
+        (1, 3, np.inf, 0.240712641229283),
+        (4, 3, np.inf, 0.987012338626815),
+        (1, 10, np.inf, 0.000519869467083),
+    ]
+
+    def test_cdf_against_tables(self):
+        for pvk, q in self.data:
+            p_expected, v, k = pvk
+            res_p = stats.studentized_range.cdf(q, k, v)
+            assert_allclose(res_p, p_expected, rtol=1e-4)
+
+    @pytest.mark.slow
+    def test_ppf_against_tables(self):
+        for pvk, q_expected in self.data:
+            p, v, k = pvk
+            res_q = stats.studentized_range.ppf(p, k, v)
+            assert_allclose(res_q, q_expected, rtol=5e-4)
+
+    path_prefix = os.path.dirname(__file__)
+    relative_path = "data/studentized_range_mpmath_ref.json"
+    with open(os.path.join(path_prefix, relative_path), "r") as file:
+        pregenerated_data = json.load(file)
+
+    @pytest.mark.parametrize("case_result", pregenerated_data["cdf_data"])
+    def test_cdf_against_mp(self, case_result):
+        src_case = case_result["src_case"]
+        mp_result = case_result["mp_result"]
+        qkv = src_case["q"], src_case["k"], src_case["v"]
+        res = stats.studentized_range.cdf(*qkv)
+
+        assert_allclose(res, mp_result,
+                        atol=src_case["expected_atol"],
+                        rtol=src_case["expected_rtol"])
+
+    @pytest.mark.parametrize("case_result", pregenerated_data["pdf_data"])
+    def test_pdf_against_mp(self, case_result):
+        src_case = case_result["src_case"]
+        mp_result = case_result["mp_result"]
+        qkv = src_case["q"], src_case["k"], src_case["v"]
+        res = stats.studentized_range.pdf(*qkv)
+
+        assert_allclose(res, mp_result,
+                        atol=src_case["expected_atol"],
+                        rtol=src_case["expected_rtol"])
+
+    @pytest.mark.slow
+    @pytest.mark.xfail_on_32bit("intermittent RuntimeWarning: invalid value.")
+    @pytest.mark.parametrize("case_result", pregenerated_data["moment_data"])
+    def test_moment_against_mp(self, case_result):
+        src_case = case_result["src_case"]
+        mp_result = case_result["mp_result"]
+        mkv = src_case["m"], src_case["k"], src_case["v"]
+
+        # Silence invalid value encountered warnings. Actual problems will be
+        # caught by the result comparison.
+        with np.errstate(invalid='ignore'):
+            res = stats.studentized_range.moment(*mkv)
+
+        assert_allclose(res, mp_result,
+                        atol=src_case["expected_atol"],
+                        rtol=src_case["expected_rtol"])
+
+    def test_pdf_integration(self):
+        k, v = 3, 10
+        # Test whether PDF integration is 1 like it should be.
+        res = quad(stats.studentized_range.pdf, 0, np.inf, args=(k, v))
+        assert_allclose(res[0], 1)
+
+    @pytest.mark.xslow
+    def test_pdf_against_cdf(self):
+        k, v = 3, 10
+
+        # Test whether the integrated PDF matches the CDF using cumulative
+        # integration. Use a small step size to reduce error due to the
+        # summation. This is slow, but tests the results well.
+        x = np.arange(0, 10, step=0.01)
+
+        y_cdf = stats.studentized_range.cdf(x, k, v)[1:]
+        y_pdf_raw = stats.studentized_range.pdf(x, k, v)
+        y_pdf_cumulative = cumulative_trapezoid(y_pdf_raw, x)
+
+        # Because of error caused by the summation, use a relatively large rtol
+        assert_allclose(y_pdf_cumulative, y_cdf, rtol=1e-4)
+
+    @pytest.mark.parametrize("r_case_result", r_data)
+    def test_cdf_against_r(self, r_case_result):
+        # Test large `v` values using R
+        q, k, v, r_res = r_case_result
+        with np.errstate(invalid='ignore'):
+            res = stats.studentized_range.cdf(q, k, v)
+        assert_allclose(res, r_res)
+
+    @pytest.mark.slow
+    @pytest.mark.xfail_on_32bit("intermittent RuntimeWarning: invalid value.")
+    def test_moment_vectorization(self):
+        # Test moment broadcasting. Calls `_munp` directly because
+        # `rv_continuous.moment` is broken at time of writing. See gh-12192
+
+        # Silence invalid value encountered warnings. Actual problems will be
+        # caught by the result comparison.
+        with np.errstate(invalid='ignore'):
+            m = stats.studentized_range._munp([1, 2], [4, 5], [10, 11])
+
+        assert_allclose(m.shape, (2,))
+
+        with pytest.raises(ValueError, match="...could not be broadcast..."):
+            stats.studentized_range._munp(1, [4, 5], [10, 11, 12])
+
+    @pytest.mark.xslow
+    def test_fitstart_valid(self):
+        with suppress_warnings() as sup, np.errstate(invalid="ignore"):
+            # the integration warning message may differ
+            sup.filter(IntegrationWarning)
+            k, df, _, _ = stats.studentized_range._fitstart([1, 2, 3])
+        assert_(stats.studentized_range._argcheck(k, df))
+
+    def test_infinite_df(self):
+        # Check that the CDF and PDF infinite and normal integrators
+        # roughly match for a high df case
+        res = stats.studentized_range.pdf(3, 10, np.inf)
+        res_finite = stats.studentized_range.pdf(3, 10, 99999)
+        assert_allclose(res, res_finite, atol=1e-4, rtol=1e-4)
+
+        res = stats.studentized_range.cdf(3, 10, np.inf)
+        res_finite = stats.studentized_range.cdf(3, 10, 99999)
+        assert_allclose(res, res_finite, atol=1e-4, rtol=1e-4)
+
+    def test_df_cutoff(self):
+        # Test that the CDF and PDF properly switch integrators at df=100,000.
+        # The infinite integrator should be different enough that it fails
+        # an allclose assertion. Also sanity check that using the same
+        # integrator does pass the allclose with a 1-df difference, which
+        # should be tiny.
+
+        res = stats.studentized_range.pdf(3, 10, 100000)
+        res_finite = stats.studentized_range.pdf(3, 10, 99999)
+        res_sanity = stats.studentized_range.pdf(3, 10, 99998)
+        assert_raises(AssertionError, assert_allclose, res, res_finite,
+                      atol=1e-6, rtol=1e-6)
+        assert_allclose(res_finite, res_sanity, atol=1e-6, rtol=1e-6)
+
+        res = stats.studentized_range.cdf(3, 10, 100000)
+        res_finite = stats.studentized_range.cdf(3, 10, 99999)
+        res_sanity = stats.studentized_range.cdf(3, 10, 99998)
+        assert_raises(AssertionError, assert_allclose, res, res_finite,
+                      atol=1e-6, rtol=1e-6)
+        assert_allclose(res_finite, res_sanity, atol=1e-6, rtol=1e-6)
+
+    def test_clipping(self):
+        # The result of this computation was -9.9253938401489e-14 on some
+        # systems. The correct result is very nearly zero, but should not be
+        # negative.
+        q, k, v = 34.6413996195345746, 3, 339
+        p = stats.studentized_range.sf(q, k, v)
+        assert_allclose(p, 0, atol=1e-10)
+        assert p >= 0
+
+
+def test_540_567():
+    # test for nan returned in tickets 540, 567
+    assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
+                        decimal=10, err_msg='test_540_567')
+    assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
+                        decimal=10, err_msg='test_540_567')
+    assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
+                                       scale=0.204423758009),
+                        0.98353464004309321,
+                        decimal=10, err_msg='test_540_567')
+
+
+def test_regression_ticket_1316():
+    # The following was raising an exception, because _construct_default_doc()
+    # did not handle the default keyword extradoc=None.  See ticket #1316.
+    stats._continuous_distns.gamma_gen(name='gamma')
+
+
+def test_regression_ticket_1326():
+    # adjust to avoid nan with 0*log(0)
+    assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
+
+
+def test_regression_tukey_lambda():
+    # Make sure that Tukey-Lambda distribution correctly handles
+    # non-positive lambdas.
+    x = np.linspace(-5.0, 5.0, 101)
+
+    with np.errstate(divide='ignore'):
+        for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
+            p = stats.tukeylambda.pdf(x, lam)
+            assert_((p != 0.0).all())
+            assert_(~np.isnan(p).all())
+
+        lam = np.array([[-1.0], [0.0], [2.0]])
+        p = stats.tukeylambda.pdf(x, lam)
+
+    assert_(~np.isnan(p).all())
+    assert_((p[0] != 0.0).all())
+    assert_((p[1] != 0.0).all())
+    assert_((p[2] != 0.0).any())
+    assert_((p[2] == 0.0).any())
+
+
+@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
+def test_regression_ticket_1421():
+    assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
+    assert_('pmf(x,' in stats.poisson.__doc__)
+
+
+def test_nan_arguments_gh_issue_1362():
+    with np.errstate(invalid='ignore'):
+        assert_(np.isnan(stats.t.logcdf(1, np.nan)))
+        assert_(np.isnan(stats.t.cdf(1, np.nan)))
+        assert_(np.isnan(stats.t.logsf(1, np.nan)))
+        assert_(np.isnan(stats.t.sf(1, np.nan)))
+        assert_(np.isnan(stats.t.pdf(1, np.nan)))
+        assert_(np.isnan(stats.t.logpdf(1, np.nan)))
+        assert_(np.isnan(stats.t.ppf(1, np.nan)))
+        assert_(np.isnan(stats.t.isf(1, np.nan)))
+
+        assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
+        assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
+        assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
+        assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
+        assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
+        assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
+        assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
+        assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
+
+
+def test_frozen_fit_ticket_1536():
+    np.random.seed(5678)
+    true = np.array([0.25, 0., 0.5])
+    x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
+
+    with np.errstate(divide='ignore'):
+        params = np.array(stats.lognorm.fit(x, floc=0.))
+
+    assert_almost_equal(params, true, decimal=2)
+
+    params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
+    assert_almost_equal(params, true, decimal=2)
+
+    params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
+    assert_almost_equal(params, true, decimal=2)
+
+    params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
+    assert_almost_equal(params, true, decimal=2)
+
+    np.random.seed(5678)
+    loc = 1
+    floc = 0.9
+    x = stats.norm.rvs(loc, 2., size=100)
+    params = np.array(stats.norm.fit(x, floc=floc))
+    expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
+    assert_almost_equal(params, expected, decimal=4)
+
+
+def test_regression_ticket_1530():
+    # Check the starting value works for Cauchy distribution fit.
+    np.random.seed(654321)
+    rvs = stats.cauchy.rvs(size=100)
+    params = stats.cauchy.fit(rvs)
+    expected = (0.045, 1.142)
+    assert_almost_equal(params, expected, decimal=1)
+
+
+def test_gh_pr_4806():
+    # Check starting values for Cauchy distribution fit.
+    np.random.seed(1234)
+    x = np.random.randn(42)
+    for offset in 10000.0, 1222333444.0:
+        loc, scale = stats.cauchy.fit(x + offset)
+        assert_allclose(loc, offset, atol=1.0)
+        assert_allclose(scale, 0.6, atol=1.0)
+
+
+def test_tukeylambda_stats_ticket_1545():
+    # Some test for the variance and kurtosis of the Tukey Lambda distr.
+    # See test_tukeylamdba_stats.py for more tests.
+
+    mv = stats.tukeylambda.stats(0, moments='mvsk')
+    # Known exact values:
+    expected = [0, np.pi**2/3, 0, 1.2]
+    assert_almost_equal(mv, expected, decimal=10)
+
+    mv = stats.tukeylambda.stats(3.13, moments='mvsk')
+    # 'expected' computed with mpmath.
+    expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
+    assert_almost_equal(mv, expected, decimal=10)
+
+    mv = stats.tukeylambda.stats(0.14, moments='mvsk')
+    # 'expected' computed with mpmath.
+    expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
+    assert_almost_equal(mv, expected, decimal=10)
+
+
+def test_poisson_logpmf_ticket_1436():
+    assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
+
+
+def test_powerlaw_stats():
+    """Test the powerlaw stats function.
+
+    This unit test is also a regression test for ticket 1548.
+
+    The exact values are:
+    mean:
+        mu = a / (a + 1)
+    variance:
+        sigma**2 = a / ((a + 2) * (a + 1) ** 2)
+    skewness:
+        One formula (see https://en.wikipedia.org/wiki/Skewness) is
+            gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
+        A short calculation shows that E[X**k] is a / (a + k), so gamma_1
+        can be implemented as
+            n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
+            d = sqrt(a/((a+2)*(a+1)**2)) ** 3
+            gamma_1 = n/d
+        Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
+        one gets the more concise formula:
+            gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
+    kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
+        The excess kurtosis is
+            gamma_2 = mu_4 / sigma**4 - 3
+        A bit of calculus and algebra (sympy helps) shows that
+            mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
+        so
+            gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
+        which can be rearranged to
+            gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
+    """
+    cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
+             (2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
+    for a, exact_mvsk in cases:
+        mvsk = stats.powerlaw.stats(a, moments="mvsk")
+        assert_array_almost_equal(mvsk, exact_mvsk)
+
+
+def test_powerlaw_edge():
+    # Regression test for gh-3986.
+    p = stats.powerlaw.logpdf(0, 1)
+    assert_equal(p, 0.0)
+
+
+def test_exponpow_edge():
+    # Regression test for gh-3982.
+    p = stats.exponpow.logpdf(0, 1)
+    assert_equal(p, 0.0)
+
+    # Check pdf and logpdf at x = 0 for other values of b.
+    p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
+    assert_equal(p, [np.inf, 1.0, 0.0])
+    p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
+    assert_equal(p, [np.inf, 0.0, -np.inf])
+
+
+def test_gengamma_edge():
+    # Regression test for gh-3985.
+    p = stats.gengamma.pdf(0, 1, 1)
+    assert_equal(p, 1.0)
+
+
+def test_gengamma_endpoint_with_neg_c():
+    p = stats.gengamma.pdf(0, 1, -1)
+    assert p == 0.0
+    logp = stats.gengamma.logpdf(0, 1, -1)
+    assert logp == -np.inf
+
+
+def test_gengamma_munp():
+    # Regression tests for gh-4724.
+    p = stats.gengamma._munp(-2, 200, 1.)
+    assert_almost_equal(p, 1./199/198)
+
+    p = stats.gengamma._munp(-2, 10, 1.)
+    assert_almost_equal(p, 1./9/8)
+
+
+def test_ksone_fit_freeze():
+    # Regression test for ticket #1638.
+    d = np.array(
+        [-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
+         -0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
+         0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
+         0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
+         0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
+         0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
+         -0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
+         -0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
+         -0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
+         -0.06037974, 0.37670779, -0.21684405])
+
+    with np.errstate(invalid='ignore'):
+        with suppress_warnings() as sup:
+            sup.filter(IntegrationWarning,
+                       "The maximum number of subdivisions .50. has been "
+                       "achieved.")
+            sup.filter(RuntimeWarning,
+                       "floating point number truncated to an integer")
+            stats.ksone.fit(d)
+
+
+def test_norm_logcdf():
+    # Test precision of the logcdf of the normal distribution.
+    # This precision was enhanced in ticket 1614.
+    x = -np.asarray(list(range(0, 120, 4)))
+    # Values from R
+    expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
+                -131.69539607, -203.91715537, -292.09872100, -396.25241451,
+                -516.38564863, -652.50322759, -804.60844201, -972.70364403,
+                -1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
+                -2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
+                -3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
+                -4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
+                -6277.63751711, -6733.67260303]
+
+    assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
+
+    # also test the complex-valued code path
+    assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
+
+    # test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
+    deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
+    deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
+    assert_allclose(deriv, deriv_expected, atol=1e-10)
+
+
+def test_levy_cdf_ppf():
+    # Test levy.cdf, including small arguments.
+    x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
+
+    # Expected values were calculated separately with mpmath.
+    # E.g.
+    # >>> mpmath.mp.dps = 100
+    # >>> x = mpmath.mp.mpf('0.01')
+    # >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
+    expected = np.array([0.9747728793699604,
+                         0.3173105078629141,
+                         0.1572992070502851,
+                         0.0015654022580025495,
+                         1.523970604832105e-23,
+                         1.795832784800726e-219])
+
+    y = stats.levy.cdf(x)
+    assert_allclose(y, expected, rtol=1e-10)
+
+    # ppf(expected) should get us back to x.
+    xx = stats.levy.ppf(expected)
+    assert_allclose(xx, x, rtol=1e-13)
+
+
+def test_levy_sf():
+    # Large values, far into the tail of the distribution.
+    x = np.array([1e15, 1e25, 1e35, 1e50])
+    # Expected values were calculated with mpmath.
+    expected = np.array([2.5231325220201597e-08,
+                         2.52313252202016e-13,
+                         2.52313252202016e-18,
+                         7.978845608028653e-26])
+    y = stats.levy.sf(x)
+    assert_allclose(y, expected, rtol=1e-14)
+
+
+# The expected values for levy.isf(p) were calculated with mpmath.
+# For loc=0 and scale=1, the inverse SF can be computed with
+#
+#     import mpmath
+#
+#     def levy_invsf(p):
+#         return 1/(2*mpmath.erfinv(p)**2)
+#
+# For example, with mpmath.mp.dps set to 60, float(levy_invsf(1e-20))
+# returns 6.366197723675814e+39.
+#
+@pytest.mark.parametrize('p, expected_isf',
+                         [(1e-20, 6.366197723675814e+39),
+                          (1e-8, 6366197723675813.0),
+                          (0.375, 4.185810119346273),
+                          (0.875, 0.42489442055310134),
+                          (0.999, 0.09235685880262713),
+                          (0.9999999962747097, 0.028766845244146945)])
+def test_levy_isf(p, expected_isf):
+    x = stats.levy.isf(p)
+    assert_allclose(x, expected_isf, atol=5e-15)
+
+
+def test_levy_l_sf():
+    # Test levy_l.sf for small arguments.
+    x = np.array([-0.016, -0.01, -0.005, -0.0015])
+    # Expected values were calculated with mpmath.
+    expected = np.array([2.6644463892359302e-15,
+                         1.523970604832107e-23,
+                         2.0884875837625492e-45,
+                         5.302850374626878e-147])
+    y = stats.levy_l.sf(x)
+    assert_allclose(y, expected, rtol=1e-13)
+
+
+def test_levy_l_isf():
+    # Test roundtrip sf(isf(p)), including a small input value.
+    p = np.array([3.0e-15, 0.25, 0.99])
+    x = stats.levy_l.isf(p)
+    q = stats.levy_l.sf(x)
+    assert_allclose(q, p, rtol=5e-14)
+
+
+def test_hypergeom_interval_1802():
+    # these two had endless loops
+    assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
+                 (152.0, 197.0))
+    assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
+                 (152.0, 197.0))
+    # this was working also before
+    assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
+                 (153.0, 196.0))
+
+    # degenerate case .a == .b
+    assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
+    assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
+
+
+def test_distribution_too_many_args():
+    np.random.seed(1234)
+
+    # Check that a TypeError is raised when too many args are given to a method
+    # Regression test for ticket 1815.
+    x = np.linspace(0.1, 0.7, num=5)
+    assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
+    assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
+    assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
+    assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
+    assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
+    assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
+    assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
+    assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
+    assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
+    assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
+
+    # These should not give errors
+    stats.gamma.pdf(x, 2, 3)  # loc=3
+    stats.gamma.pdf(x, 2, 3, 4)  # loc=3, scale=4
+    stats.gamma.stats(2., 3)
+    stats.gamma.stats(2., 3, 4)
+    stats.gamma.stats(2., 3, 4, 'mv')
+    stats.gamma.rvs(2., 3, 4, 5)
+    stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
+
+    # Also for a discrete distribution
+    stats.geom.pmf(x, 2, loc=3)  # no error, loc=3
+    assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
+    assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
+
+    # And for distributions with 0, 2 and 3 args respectively
+    assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
+    assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
+    assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
+    assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
+    assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
+    stats.ncf.pdf(x, 3, 4, 5, 6, 1.0)  # 3 args, plus loc/scale
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+def test_ncx2_tails_ticket_955():
+    # Trac #955 -- check that the cdf computed by special functions
+    # matches the integrated pdf
+    a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
+    b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
+    assert_allclose(a, b, rtol=1e-3, atol=0)
+
+
+def test_ncx2_tails_pdf():
+    # ncx2.pdf does not return nans in extreme tails(example from gh-1577)
+    # NB: this is to check that nan_to_num is not needed in ncx2.pdf
+    with warnings.catch_warnings():
+        warnings.simplefilter('error', RuntimeWarning)
+        assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
+        logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
+
+    assert_(np.isneginf(logval).all())
+
+    # Verify logpdf has extended precision when pdf underflows to 0
+    with warnings.catch_warnings():
+        warnings.simplefilter('error', RuntimeWarning)
+        assert_equal(stats.ncx2.pdf(10000, 3, 12), 0)
+        assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883)
+
+
+@pytest.mark.parametrize('method, expected', [
+    ('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
+    ('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
+    ('logpdf', np.array([-15.90413011, -17.88416331])),
+    ('ppf', np.array([4.865182052, 7.017182271]))
+])
+def test_ncx2_zero_nc(method, expected):
+    # gh-5441
+    # ncx2 with nc=0 is identical to chi2
+    # Comparison to R (v3.5.1)
+    # > options(digits=10)
+    # > pchisq(0.1, df=10, ncp=c(0,4))
+    # > dchisq(0.1, df=10, ncp=c(0,4))
+    # > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
+    # > qchisq(0.1, df=10, ncp=c(0,4))
+
+    result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
+    assert_allclose(result, expected, atol=1e-15)
+
+
+def test_ncx2_zero_nc_rvs():
+    # gh-5441
+    # ncx2 with nc=0 is identical to chi2
+    result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
+    expected = stats.chi2.rvs(df=10, random_state=1)
+    assert_allclose(result, expected, atol=1e-15)
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+def test_ncx2_gh12731():
+    # test that gh-12731 is resolved; previously these were all 0.5
+    nc = 10**np.arange(5, 10)
+    assert_equal(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
+
+
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+def test_ncx2_gh8665():
+    # test that gh-8665 is resolved; previously this tended to nonzero value
+    x = np.array([4.99515382e+00, 1.07617327e+01, 2.31854502e+01,
+                  4.99515382e+01, 1.07617327e+02, 2.31854502e+02,
+                  4.99515382e+02, 1.07617327e+03, 2.31854502e+03,
+                  4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
+                  4.99515382e+04])
+    nu, lam = 20, 499.51538166556196
+
+    sf = stats.ncx2.sf(x, df=nu, nc=lam)
+    # computed in R. Couldn't find a survival function implementation
+    # options(digits=16)
+    # x <- c(4.99515382e+00, 1.07617327e+01, 2.31854502e+01, 4.99515382e+01,
+    #        1.07617327e+02, 2.31854502e+02, 4.99515382e+02, 1.07617327e+03,
+    #        2.31854502e+03, 4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
+    #        4.99515382e+04)
+    # nu <- 20
+    # lam <- 499.51538166556196
+    # 1 - pchisq(x, df = nu, ncp = lam)
+    sf_expected = [1.0000000000000000, 1.0000000000000000, 1.0000000000000000,
+                   1.0000000000000000, 1.0000000000000000, 0.9999999999999888,
+                   0.6646525582135460, 0.0000000000000000, 0.0000000000000000,
+                   0.0000000000000000, 0.0000000000000000, 0.0000000000000000,
+                   0.0000000000000000]
+    assert_allclose(sf, sf_expected, atol=1e-12)
+
+
+def test_ncx2_gh11777():
+    # regression test for gh-11777:
+    # At high values of degrees of freedom df, ensure the pdf of ncx2 does
+    # not get clipped to zero when the non-centrality parameter is
+    # sufficiently less than df
+    df = 6700
+    nc = 5300
+    x = np.linspace(stats.ncx2.ppf(0.001, df, nc),
+                    stats.ncx2.ppf(0.999, df, nc), num=10000)
+    ncx2_pdf = stats.ncx2.pdf(x, df, nc)
+    gauss_approx = stats.norm.pdf(x, df + nc, np.sqrt(2 * df + 4 * nc))
+    # use huge tolerance as we're only looking for obvious discrepancy
+    assert_allclose(ncx2_pdf, gauss_approx, atol=1e-4)
+
+
+def test_foldnorm_zero():
+    # Parameter value c=0 was not enabled, see gh-2399.
+    rv = stats.foldnorm(0, scale=1)
+    assert_equal(rv.cdf(0), 0)  # rv.cdf(0) previously resulted in: nan
+
+
+def test_stats_shapes_argcheck():
+    # stats method was failing for vector shapes if some of the values
+    # were outside of the allowed range, see gh-2678
+    mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5)  # 0 is not a legal `a`
+    mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
+    mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
+    assert_equal(mv2_augmented, mv3)
+
+    # -1 is not a legal shape parameter
+    mv3 = stats.lognorm.stats([2, 2.4, -1])
+    mv2 = stats.lognorm.stats([2, 2.4])
+    mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
+    assert_equal(mv2_augmented, mv3)
+
+    # FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
+    # stats method with multiple shape parameters is not properly vectorized
+    # anyway, so some distributions may or may not fail.
+
+
+# Test subclassing distributions w/ explicit shapes
+
+class _distr_gen(stats.rv_continuous):
+    def _pdf(self, x, a):
+        return 42
+
+
+class _distr2_gen(stats.rv_continuous):
+    def _cdf(self, x, a):
+        return 42 * a + x
+
+
+class _distr3_gen(stats.rv_continuous):
+    def _pdf(self, x, a, b):
+        return a + b
+
+    def _cdf(self, x, a):
+        # Different # of shape params from _pdf, to be able to check that
+        # inspection catches the inconsistency."""
+        return 42 * a + x
+
+
+class _distr6_gen(stats.rv_continuous):
+    # Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
+    def _pdf(self, x, a, b):
+        return a*x + b
+
+    def _cdf(self, x, a, b):
+        return 42 * a + x
+
+
+class TestSubclassingExplicitShapes:
+    # Construct a distribution w/ explicit shapes parameter and test it.
+
+    def test_correct_shapes(self):
+        dummy_distr = _distr_gen(name='dummy', shapes='a')
+        assert_equal(dummy_distr.pdf(1, a=1), 42)
+
+    def test_wrong_shapes_1(self):
+        dummy_distr = _distr_gen(name='dummy', shapes='A')
+        assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
+
+    def test_wrong_shapes_2(self):
+        dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
+        dct = dict(a=1, b=2, c=3)
+        assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
+
+    def test_shapes_string(self):
+        # shapes must be a string
+        dct = dict(name='dummy', shapes=42)
+        assert_raises(TypeError, _distr_gen, **dct)
+
+    def test_shapes_identifiers_1(self):
+        # shapes must be a comma-separated list of valid python identifiers
+        dct = dict(name='dummy', shapes='(!)')
+        assert_raises(SyntaxError, _distr_gen, **dct)
+
+    def test_shapes_identifiers_2(self):
+        dct = dict(name='dummy', shapes='4chan')
+        assert_raises(SyntaxError, _distr_gen, **dct)
+
+    def test_shapes_identifiers_3(self):
+        dct = dict(name='dummy', shapes='m(fti)')
+        assert_raises(SyntaxError, _distr_gen, **dct)
+
+    def test_shapes_identifiers_nodefaults(self):
+        dct = dict(name='dummy', shapes='a=2')
+        assert_raises(SyntaxError, _distr_gen, **dct)
+
+    def test_shapes_args(self):
+        dct = dict(name='dummy', shapes='*args')
+        assert_raises(SyntaxError, _distr_gen, **dct)
+
+    def test_shapes_kwargs(self):
+        dct = dict(name='dummy', shapes='**kwargs')
+        assert_raises(SyntaxError, _distr_gen, **dct)
+
+    def test_shapes_keywords(self):
+        # python keywords cannot be used for shape parameters
+        dct = dict(name='dummy', shapes='a, b, c, lambda')
+        assert_raises(SyntaxError, _distr_gen, **dct)
+
+    def test_shapes_signature(self):
+        # test explicit shapes which agree w/ the signature of _pdf
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x, a):
+                return stats.norm._pdf(x) * a
+
+        dist = _dist_gen(shapes='a')
+        assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
+
+    def test_shapes_signature_inconsistent(self):
+        # test explicit shapes which do not agree w/ the signature of _pdf
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x, a):
+                return stats.norm._pdf(x) * a
+
+        dist = _dist_gen(shapes='a, b')
+        assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
+
+    def test_star_args(self):
+        # test _pdf with only starargs
+        # NB: **kwargs of pdf will never reach _pdf
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x, *args):
+                extra_kwarg = args[0]
+                return stats.norm._pdf(x) * extra_kwarg
+
+        dist = _dist_gen(shapes='extra_kwarg')
+        assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
+        assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
+        assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
+
+    def test_star_args_2(self):
+        # test _pdf with named & starargs
+        # NB: **kwargs of pdf will never reach _pdf
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x, offset, *args):
+                extra_kwarg = args[0]
+                return stats.norm._pdf(x) * extra_kwarg + offset
+
+        dist = _dist_gen(shapes='offset, extra_kwarg')
+        assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
+                     stats.norm.pdf(0.5)*33 + 111)
+        assert_equal(dist.pdf(0.5, 111, 33),
+                     stats.norm.pdf(0.5)*33 + 111)
+
+    def test_extra_kwarg(self):
+        # **kwargs to _pdf are ignored.
+        # this is a limitation of the framework (_pdf(x, *goodargs))
+        class _distr_gen(stats.rv_continuous):
+            def _pdf(self, x, *args, **kwargs):
+                # _pdf should handle *args, **kwargs itself.  Here "handling"
+                # is ignoring *args and looking for ``extra_kwarg`` and using
+                # that.
+                extra_kwarg = kwargs.pop('extra_kwarg', 1)
+                return stats.norm._pdf(x) * extra_kwarg
+
+        dist = _distr_gen(shapes='extra_kwarg')
+        assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
+
+    def shapes_empty_string(self):
+        # shapes='' is equivalent to shapes=None
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x):
+                return stats.norm.pdf(x)
+
+        dist = _dist_gen(shapes='')
+        assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
+
+
+class TestSubclassingNoShapes:
+    # Construct a distribution w/o explicit shapes parameter and test it.
+
+    def test_only__pdf(self):
+        dummy_distr = _distr_gen(name='dummy')
+        assert_equal(dummy_distr.pdf(1, a=1), 42)
+
+    def test_only__cdf(self):
+        # _pdf is determined from _cdf by taking numerical derivative
+        dummy_distr = _distr2_gen(name='dummy')
+        assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
+
+    @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
+    def test_signature_inspection(self):
+        # check that _pdf signature inspection works correctly, and is used in
+        # the class docstring
+        dummy_distr = _distr_gen(name='dummy')
+        assert_equal(dummy_distr.numargs, 1)
+        assert_equal(dummy_distr.shapes, 'a')
+        res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
+                         dummy_distr.__doc__)
+        assert_(len(res) == 1)
+
+    @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
+    def test_signature_inspection_2args(self):
+        # same for 2 shape params and both _pdf and _cdf defined
+        dummy_distr = _distr6_gen(name='dummy')
+        assert_equal(dummy_distr.numargs, 2)
+        assert_equal(dummy_distr.shapes, 'a, b')
+        res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
+                         dummy_distr.__doc__)
+        assert_(len(res) == 1)
+
+    def test_signature_inspection_2args_incorrect_shapes(self):
+        # both _pdf and _cdf defined, but shapes are inconsistent: raises
+        assert_raises(TypeError, _distr3_gen, name='dummy')
+
+    def test_defaults_raise(self):
+        # default arguments should raise
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x, a=42):
+                return 42
+        assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
+
+    def test_starargs_raise(self):
+        # without explicit shapes, *args are not allowed
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x, a, *args):
+                return 42
+        assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
+
+    def test_kwargs_raise(self):
+        # without explicit shapes, **kwargs are not allowed
+        class _dist_gen(stats.rv_continuous):
+            def _pdf(self, x, a, **kwargs):
+                return 42
+        assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
+
+
+@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
+def test_docstrings():
+    badones = [r',\s*,', r'\(\s*,', r'^\s*:']
+    for distname in stats.__all__:
+        dist = getattr(stats, distname)
+        if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
+            for regex in badones:
+                assert_(re.search(regex, dist.__doc__) is None)
+
+
+def test_infinite_input():
+    assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
+    assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
+
+
+def test_lomax_accuracy():
+    # regression test for gh-4033
+    p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
+    assert_allclose(p, 1e-100)
+
+
+def test_gompertz_accuracy():
+    # Regression test for gh-4031
+    p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
+    assert_allclose(p, 1e-100)
+
+
+def test_truncexpon_accuracy():
+    # regression test for gh-4035
+    p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
+    assert_allclose(p, 1e-100)
+
+
+def test_rayleigh_accuracy():
+    # regression test for gh-4034
+    p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
+    assert_almost_equal(p, 9.0, decimal=15)
+
+
+def test_genextreme_give_no_warnings():
+    """regression test for gh-6219"""
+
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter("always")
+
+        stats.genextreme.cdf(.5, 0)
+        stats.genextreme.pdf(.5, 0)
+        stats.genextreme.ppf(.5, 0)
+        stats.genextreme.logpdf(-np.inf, 0.0)
+        number_of_warnings_thrown = len(w)
+        assert_equal(number_of_warnings_thrown, 0)
+
+
+def test_genextreme_entropy():
+    # regression test for gh-5181
+    euler_gamma = 0.5772156649015329
+
+    h = stats.genextreme.entropy(-1.0)
+    assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
+
+    h = stats.genextreme.entropy(0)
+    assert_allclose(h, euler_gamma + 1, rtol=1e-14)
+
+    h = stats.genextreme.entropy(1.0)
+    assert_equal(h, 1)
+
+    h = stats.genextreme.entropy(-2.0, scale=10)
+    assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
+
+    h = stats.genextreme.entropy(10)
+    assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
+
+    h = stats.genextreme.entropy(-10)
+    assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
+
+
+def test_genextreme_sf_isf():
+    # Expected values were computed using mpmath:
+    #
+    #    import mpmath
+    #
+    #    def mp_genextreme_sf(x, xi, mu=0, sigma=1):
+    #        # Formula from wikipedia, which has a sign convention for xi that
+    #        # is the opposite of scipy's shape parameter.
+    #        if xi != 0:
+    #            t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
+    #        else:
+    #            t = mpmath.exp(-(x - mu)/sigma)
+    #        return 1 - mpmath.exp(-t)
+    #
+    # >>> mpmath.mp.dps = 1000
+    # >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
+    # >>> float(s)
+    # 1.6777205262585625e-57
+    # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
+    # >>> float(s)
+    # 1.52587890625e-21
+    # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
+    # >>> float(s)
+    # 0.00034218086528426593
+
+    x = 1e8
+    s = stats.genextreme.sf(x, -0.125)
+    assert_allclose(s, 1.6777205262585625e-57)
+    x2 = stats.genextreme.isf(s, -0.125)
+    assert_allclose(x2, x)
+
+    x = 7.98
+    s = stats.genextreme.sf(x, 0.125)
+    assert_allclose(s, 1.52587890625e-21)
+    x2 = stats.genextreme.isf(s, 0.125)
+    assert_allclose(x2, x)
+
+    x = 7.98
+    s = stats.genextreme.sf(x, 0)
+    assert_allclose(s, 0.00034218086528426593)
+    x2 = stats.genextreme.isf(s, 0)
+    assert_allclose(x2, x)
+
+
+def test_burr12_ppf_small_arg():
+    prob = 1e-16
+    quantile = stats.burr12.ppf(prob, 2, 3)
+    # The expected quantile was computed using mpmath:
+    #   >>> import mpmath
+    #   >>> mpmath.mp.dps = 100
+    #   >>> prob = mpmath.mpf('1e-16')
+    #   >>> c = mpmath.mpf(2)
+    #   >>> d = mpmath.mpf(3)
+    #   >>> float(((1-prob)**(-1/d) - 1)**(1/c))
+    #   5.7735026918962575e-09
+    assert_allclose(quantile, 5.7735026918962575e-09)
+
+
+def test_crystalball_function():
+    """
+    All values are calculated using the independent implementation of the
+    ROOT framework (see https://root.cern.ch/).
+    Corresponding ROOT code is given in the comments.
+    """
+    X = np.linspace(-5.0, 5.0, 21)[:-1]
+
+    # for(float x = -5.0; x < 5.0; x+=0.5)
+    #   std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
+    calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
+    expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
+                         0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
+                         0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
+                         0.013226, 0.00334407, 0.000658486, 0.000100982,
+                         1.20606e-05])
+    assert_allclose(expected, calculated, rtol=0.001)
+
+    # for(float x = -5.0; x < 5.0; x+=0.5)
+    #   std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
+    calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
+    expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
+                         0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
+                         0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
+                         0.0530497, 0.0172227, 0.00435458, 0.000857469,
+                         0.000131497, 1.57051e-05])
+    assert_allclose(expected, calculated, rtol=0.001)
+
+    # for(float x = -5.0; x < 5.0; x+=0.5) {
+    #   std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
+    #   std::cout << ", ";
+    # }
+    calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
+    expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
+                         0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
+                         0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
+                         0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
+                         0.0265249])
+    assert_allclose(expected, calculated, rtol=0.001)
+
+    # for(float x = -5.0; x < 5.0; x+=0.5)
+    #   std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
+    calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
+    expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
+                         0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
+                         0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
+                         0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
+    assert_allclose(expected, calculated, rtol=0.001)
+
+    # for(float x = -5.0; x < 5.0; x+=0.5)
+    #   std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
+    calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
+    expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
+                         0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
+                         0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
+                         0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
+                         0.999997])
+    assert_allclose(expected, calculated, rtol=0.001)
+
+    # for(float x = -5.0; x < 5.0; x+=0.5) {
+    #   std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
+    #   std::cout << ", ";
+    # }
+    calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
+    expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
+                         0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
+                         0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
+                         0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
+    assert_allclose(expected, calculated, rtol=0.001)
+
+
+def test_crystalball_function_moments():
+    """
+    All values are calculated using the pdf formula and the integrate function
+    of Mathematica
+    """
+    # The Last two (alpha, n) pairs test the special case n == alpha**2
+    beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
+    m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
+
+    # The distribution should be correctly normalised
+    expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
+    calculated_0th_moment = stats.crystalball._munp(0, beta, m)
+    assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
+
+    # calculated using wolframalpha.com
+    # e.g. for beta = 2 and m = 3 we calculate the norm like this:
+    #   integrate exp(-x^2/2) from -2 to infinity +
+    #   integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
+    norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
+
+    a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
+    expected_1th_moment = a / norm
+    calculated_1th_moment = stats.crystalball._munp(1, beta, m)
+    assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
+
+    a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
+    expected_2th_moment = a / norm
+    calculated_2th_moment = stats.crystalball._munp(2, beta, m)
+    assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
+
+    a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
+    expected_3th_moment = a / norm
+    calculated_3th_moment = stats.crystalball._munp(3, beta, m)
+    assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
+
+    a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
+    expected_4th_moment = a / norm
+    calculated_4th_moment = stats.crystalball._munp(4, beta, m)
+    assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
+
+    a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
+    expected_5th_moment = a / norm
+    calculated_5th_moment = stats.crystalball._munp(5, beta, m)
+    assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
+
+
+def test_crystalball_entropy():
+    # regression test for gh-13602
+    cb = stats.crystalball(2, 3)
+    res1 = cb.entropy()
+    # -20000 and 30 are negative and positive infinity, respectively
+    lo, hi, N = -20000, 30, 200000
+    x = np.linspace(lo, hi, N)
+    res2 = trapezoid(entr(cb.pdf(x)), x)
+    assert_allclose(res1, res2, rtol=1e-7)
+
+
+def test_invweibull_fit():
+    """
+    Test fitting invweibull to data.
+
+    Here is a the same calculation in R:
+
+    > library(evd)
+    > library(fitdistrplus)
+    > x = c(1, 1.25, 2, 2.5, 2.8,  3, 3.8, 4, 5, 8, 10, 12, 64, 99)
+    > result = fitdist(x, 'frechet', control=list(reltol=1e-13),
+    +                  fix.arg=list(loc=0), start=list(shape=2, scale=3))
+    > result
+    Fitting of the distribution ' frechet ' by maximum likelihood
+    Parameters:
+          estimate Std. Error
+    shape 1.048482  0.2261815
+    scale 3.099456  0.8292887
+    Fixed parameters:
+        value
+    loc     0
+
+    """
+
+    def optimizer(func, x0, args=(), disp=0):
+        return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
+
+    x = np.array([1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99])
+    c, loc, scale = stats.invweibull.fit(x, floc=0, optimizer=optimizer)
+    assert_allclose(c, 1.048482, rtol=5e-6)
+    assert loc == 0
+    assert_allclose(scale, 3.099456, rtol=5e-6)
+
+
+# Expected values were computed with mpmath.
+@pytest.mark.parametrize('x, c, expected',
+                         [(3, 1.5, 0.175064510070713299327),
+                          (2000, 1.5, 1.11802773877318715787e-5),
+                          (2000, 9.25, 2.92060308832269637092e-31),
+                          (1e15, 1.5, 3.16227766016837933199884e-23)])
+def test_invweibull_sf(x, c, expected):
+    computed = stats.invweibull.sf(x, c)
+    assert_allclose(computed, expected, rtol=1e-15)
+
+
+# Expected values were computed with mpmath.
+@pytest.mark.parametrize('p, c, expected',
+                         [(0.5, 2.5, 1.15789669836468183976),
+                          (3e-18, 5, 3195.77171838060906447)])
+def test_invweibull_isf(p, c, expected):
+    computed = stats.invweibull.isf(p, c)
+    assert_allclose(computed, expected, rtol=1e-15)
+
+
+@pytest.mark.parametrize(
+    'df1,df2,x',
+    [(2, 2, [-0.5, 0.2, 1.0, 2.3]),
+     (4, 11, [-0.5, 0.2, 1.0, 2.3]),
+     (7, 17, [1, 2, 3, 4, 5])]
+)
+def test_ncf_edge_case(df1, df2, x):
+    # Test for edge case described in gh-11660.
+    # Non-central Fisher distribution when nc = 0
+    # should be the same as Fisher distribution.
+    nc = 0
+    expected_cdf = stats.f.cdf(x, df1, df2)
+    calculated_cdf = stats.ncf.cdf(x, df1, df2, nc)
+    assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14)
+
+    # when ncf_gen._skip_pdf will be used instead of generic pdf,
+    # this additional test will be useful.
+    expected_pdf = stats.f.pdf(x, df1, df2)
+    calculated_pdf = stats.ncf.pdf(x, df1, df2, nc)
+    assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6)
+
+
+def test_ncf_variance():
+    # Regression test for gh-10658 (incorrect variance formula for ncf).
+    # The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
+    # example, Wolfram Alpha with the expression
+    #     Variance[NoncentralFRatioDistribution[2, 6, 4]]
+    # or with the implementation of the noncentral F distribution in the C++
+    # library Boost.
+    v = stats.ncf.var(2, 6, 4)
+    assert_allclose(v, 42.75, rtol=1e-14)
+
+
+def test_ncf_cdf_spotcheck():
+    # Regression test for gh-15582 testing against values from R/MATLAB
+    # Generate check_val from R or MATLAB as follows:
+    #          R: pf(20, df1 = 6, df2 = 33, ncp = 30.4) = 0.998921
+    #     MATLAB: ncfcdf(20, 6, 33, 30.4) = 0.998921
+    scipy_val = stats.ncf.cdf(20, 6, 33, 30.4)
+    check_val = 0.998921
+    assert_allclose(check_val, np.round(scipy_val, decimals=6))
+
+
+@pytest.mark.skipif(sys.maxsize <= 2**32,
+                    reason="On some 32-bit the warning is not raised")
+def test_ncf_ppf_issue_17026():
+    # Regression test for gh-17026
+    x = np.linspace(0, 1, 600)
+    x[0] = 1e-16
+    par = (0.1, 2, 5, 0, 1)
+    with pytest.warns(RuntimeWarning):
+        q = stats.ncf.ppf(x, *par)
+        q0 = [stats.ncf.ppf(xi, *par) for xi in x]
+    assert_allclose(q, q0)
+
+
+class TestHistogram:
+    def setup_method(self):
+        np.random.seed(1234)
+
+        # We have 8 bins
+        # [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
+        # But actually np.histogram will put the last 9 also in the [8,9) bin!
+        # Therefore there is a slight difference below for the last bin, from
+        # what you might have expected.
+        histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+                                  6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
+        self.template = stats.rv_histogram(histogram)
+
+        data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
+        norm_histogram = np.histogram(data, bins=50)
+        self.norm_template = stats.rv_histogram(norm_histogram)
+
+    def test_pdf(self):
+        values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
+                           5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
+        pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
+                                 2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
+                                 4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
+                                 4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
+                                 3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
+        assert_allclose(self.template.pdf(values), pdf_values)
+
+        # Test explicitly the corner cases:
+        # As stated above the pdf in the bin [8,9) is greater than
+        # one would naively expect because np.histogram putted the 9
+        # into the [8,9) bin.
+        assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
+        assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
+        # 9 is outside our defined bins [8,9) hence the pdf is already 0
+        # for a continuous distribution this is fine, because a single value
+        # does not have a finite probability!
+        assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
+        assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
+
+        x = np.linspace(-2, 2, 10)
+        assert_allclose(self.norm_template.pdf(x),
+                        stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
+
+    def test_cdf_ppf(self):
+        values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
+                           5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
+        cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
+                                 1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
+                                 6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
+                                 15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
+                                 22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
+        assert_allclose(self.template.cdf(values), cdf_values)
+        # First three and last two values in cdf_value are not unique
+        assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
+
+        # Test of cdf and ppf are inverse functions
+        x = np.linspace(1.0, 9.0, 100)
+        assert_allclose(self.template.ppf(self.template.cdf(x)), x)
+        x = np.linspace(0.0, 1.0, 100)
+        assert_allclose(self.template.cdf(self.template.ppf(x)), x)
+
+        x = np.linspace(-2, 2, 10)
+        assert_allclose(self.norm_template.cdf(x),
+                        stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
+
+    def test_rvs(self):
+        N = 10000
+        sample = self.template.rvs(size=N, random_state=123)
+        assert_equal(np.sum(sample < 1.0), 0.0)
+        assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
+        assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
+        assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
+        assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
+        assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
+        assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
+        assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
+        assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
+        assert_equal(np.sum(sample > 9.0), 0.0)
+
+    def test_munp(self):
+        for n in range(4):
+            assert_allclose(self.norm_template._munp(n),
+                            stats.norm(1.0, 2.5).moment(n), rtol=0.05)
+
+    def test_entropy(self):
+        assert_allclose(self.norm_template.entropy(),
+                        stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
+
+
+def test_histogram_non_uniform():
+    # Tests rv_histogram works even for non-uniform bin widths
+    counts, bins = ([1, 1], [0, 1, 1001])
+
+    dist = stats.rv_histogram((counts, bins), density=False)
+    np.testing.assert_allclose(dist.pdf([0.5, 200]), [0.5, 0.0005])
+    assert dist.median() == 1
+
+    dist = stats.rv_histogram((counts, bins), density=True)
+    np.testing.assert_allclose(dist.pdf([0.5, 200]), 1/1001)
+    assert dist.median() == 1001/2
+
+    # Omitting density produces a warning for non-uniform bins...
+    message = "Bin widths are not constant. Assuming..."
+    with assert_warns(RuntimeWarning, match=message):
+        dist = stats.rv_histogram((counts, bins))
+        assert dist.median() == 1001/2  # default is like `density=True`
+
+    # ... but not for uniform bins
+    dist = stats.rv_histogram((counts, [0, 1, 2]))
+    assert dist.median() == 1
+
+
+class TestLogUniform:
+    def test_alias(self):
+        # This test makes sure that "reciprocal" and "loguniform" are
+        # aliases of the same distribution and that both are log-uniform
+        rng = np.random.default_rng(98643218961)
+        rv = stats.loguniform(10 ** -3, 10 ** 0)
+        rvs = rv.rvs(size=10000, random_state=rng)
+
+        rng = np.random.default_rng(98643218961)
+        rv2 = stats.reciprocal(10 ** -3, 10 ** 0)
+        rvs2 = rv2.rvs(size=10000, random_state=rng)
+
+        assert_allclose(rvs2, rvs)
+
+        vals, _ = np.histogram(np.log10(rvs), bins=10)
+        assert 900 <= vals.min() <= vals.max() <= 1100
+        assert np.abs(np.median(vals) - 1000) <= 10
+
+    @pytest.mark.parametrize("method", ['mle', 'mm'])
+    def test_fit_override(self, method):
+        # loguniform is overparameterized, so check that fit override enforces
+        # scale=1 unless fscale is provided by the user
+        rng = np.random.default_rng(98643218961)
+        rvs = stats.loguniform.rvs(0.1, 1, size=1000, random_state=rng)
+
+        a, b, loc, scale = stats.loguniform.fit(rvs, method=method)
+        assert scale == 1
+
+        a, b, loc, scale = stats.loguniform.fit(rvs, fscale=2, method=method)
+        assert scale == 2
+
+
+class TestArgus:
+    def test_argus_rvs_large_chi(self):
+        # test that the algorithm can handle large values of chi
+        x = stats.argus.rvs(50, size=500, random_state=325)
+        assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
+
+    @pytest.mark.parametrize('chi, random_state', [
+            [0.1, 325],   # chi <= 0.5: rejection method case 1
+            [1.3, 155],   # 0.5 < chi <= 1.8: rejection method case 2
+            [3.5, 135]    # chi > 1.8: transform conditional Gamma distribution
+        ])
+    def test_rvs(self, chi, random_state):
+        x = stats.argus.rvs(chi, size=500, random_state=random_state)
+        _, p = stats.kstest(x, "argus", (chi, ))
+        assert_(p > 0.05)
+
+    @pytest.mark.parametrize('chi', [1e-9, 1e-6])
+    def test_rvs_small_chi(self, chi):
+        # test for gh-11699 => rejection method case 1 can even handle chi=0
+        # the CDF of the distribution for chi=0 is 1 - (1 - x**2)**(3/2)
+        # test rvs against distribution of limit chi=0
+        r = stats.argus.rvs(chi, size=500, random_state=890981)
+        _, p = stats.kstest(r, lambda x: 1 - (1 - x**2)**(3/2))
+        assert_(p > 0.05)
+
+    # Expected values were computed with mpmath.
+    @pytest.mark.parametrize('chi, expected_mean',
+                             [(1, 0.6187026683551835),
+                              (10, 0.984805536783744),
+                              (40, 0.9990617659702923),
+                              (60, 0.9995831885165300),
+                              (99, 0.9998469348663028)])
+    def test_mean(self, chi, expected_mean):
+        m = stats.argus.mean(chi, scale=1)
+        assert_allclose(m, expected_mean, rtol=1e-13)
+
+    # Expected values were computed with mpmath.
+    @pytest.mark.parametrize('chi, expected_var, rtol',
+                             [(1, 0.05215651254197807, 1e-13),
+                              (10, 0.00015805472008165595, 1e-11),
+                              (40, 5.877763210262901e-07, 1e-8),
+                              (60, 1.1590179389611416e-07, 1e-8),
+                              (99, 1.5623277006064666e-08, 1e-8)])
+    def test_var(self, chi, expected_var, rtol):
+        v = stats.argus.var(chi, scale=1)
+        assert_allclose(v, expected_var, rtol=rtol)
+
+    # Expected values were computed with mpmath (code: see gh-13370).
+    @pytest.mark.parametrize('chi, expected, rtol',
+                             [(0.9, 0.07646314974436118, 1e-14),
+                              (0.5, 0.015429797891863365, 1e-14),
+                              (0.1, 0.0001325825293278049, 1e-14),
+                              (0.01, 1.3297677078224565e-07, 1e-15),
+                              (1e-3, 1.3298072023958999e-10, 1e-14),
+                              (1e-4, 1.3298075973486862e-13, 1e-14),
+                              (1e-6, 1.32980760133771e-19, 1e-14),
+                              (1e-9, 1.329807601338109e-28, 1e-15)])
+    def test_argus_phi_small_chi(self, chi, expected, rtol):
+        assert_allclose(_argus_phi(chi), expected, rtol=rtol)
+
+    # Expected values were computed with mpmath (code: see gh-13370).
+    @pytest.mark.parametrize(
+        'chi, expected',
+        [(0.5, (0.28414073302940573, 1.2742227939992954, 1.2381254688255896)),
+         (0.2, (0.296172952995264, 1.2951290588110516, 1.1865767100877576)),
+         (0.1, (0.29791447523536274, 1.29806307956989, 1.1793168289857412)),
+         (0.01, (0.2984904104866452, 1.2990283628160553, 1.1769268414080531)),
+         (1e-3, (0.298496172925224, 1.2990380082487925, 1.176902956021053)),
+         (1e-4, (0.29849623054991836, 1.2990381047023793, 1.1769027171686324)),
+         (1e-6, (0.2984962311319278, 1.2990381056765605, 1.1769027147562232)),
+         (1e-9, (0.298496231131986, 1.299038105676658, 1.1769027147559818))])
+    def test_pdf_small_chi(self, chi, expected):
+        x = np.array([0.1, 0.5, 0.9])
+        assert_allclose(stats.argus.pdf(x, chi), expected, rtol=1e-13)
+
+    # Expected values were computed with mpmath (code: see gh-13370).
+    @pytest.mark.parametrize(
+        'chi, expected',
+        [(0.5, (0.9857660526895221, 0.6616565930168475, 0.08796070398429937)),
+         (0.2, (0.9851555052359501, 0.6514666238985464, 0.08362690023746594)),
+         (0.1, (0.9850670974995661, 0.6500061310508574, 0.08302050640683846)),
+         (0.01, (0.9850378582451867, 0.6495239242251358, 0.08282109244852445)),
+         (1e-3, (0.9850375656906663, 0.6495191015522573, 0.08281910005231098)),
+         (1e-4, (0.9850375627651049, 0.6495190533254682, 0.08281908012852317)),
+         (1e-6, (0.9850375627355568, 0.6495190528383777, 0.08281907992729293)),
+         (1e-9, (0.9850375627355538, 0.649519052838329, 0.0828190799272728))])
+    def test_sf_small_chi(self, chi, expected):
+        x = np.array([0.1, 0.5, 0.9])
+        assert_allclose(stats.argus.sf(x, chi), expected, rtol=1e-14)
+
+    # Expected values were computed with mpmath (code: see gh-13370).
+    @pytest.mark.parametrize(
+        'chi, expected',
+        [(0.5, (0.0142339473104779, 0.3383434069831524, 0.9120392960157007)),
+         (0.2, (0.014844494764049919, 0.34853337610145363, 0.916373099762534)),
+         (0.1, (0.014932902500433911, 0.34999386894914264, 0.9169794935931616)),
+         (0.01, (0.014962141754813293, 0.35047607577486417, 0.9171789075514756)),
+         (1e-3, (0.01496243430933372, 0.35048089844774266, 0.917180899947689)),
+         (1e-4, (0.014962437234895118, 0.3504809466745317, 0.9171809198714769)),
+         (1e-6, (0.01496243726444329, 0.3504809471616223, 0.9171809200727071)),
+         (1e-9, (0.014962437264446245, 0.350480947161671, 0.9171809200727272))])
+    def test_cdf_small_chi(self, chi, expected):
+        x = np.array([0.1, 0.5, 0.9])
+        assert_allclose(stats.argus.cdf(x, chi), expected, rtol=1e-12)
+
+    # Expected values were computed with mpmath (code: see gh-13370).
+    @pytest.mark.parametrize(
+        'chi, expected, rtol',
+        [(0.5, (0.5964284712757741, 0.052890651988588604), 1e-12),
+         (0.101, (0.5893490968089076, 0.053017469847275685), 1e-11),
+         (0.1, (0.5893431757009437, 0.05301755449499372), 1e-13),
+         (0.01, (0.5890515677940915, 0.05302167905837031), 1e-13),
+         (1e-3, (0.5890486520005177, 0.053021719862088104), 1e-13),
+         (1e-4, (0.5890486228426105, 0.0530217202700811), 1e-13),
+         (1e-6, (0.5890486225481156, 0.05302172027420182), 1e-13),
+         (1e-9, (0.5890486225480862, 0.05302172027420224), 1e-13)])
+    def test_stats_small_chi(self, chi, expected, rtol):
+        val = stats.argus.stats(chi, moments='mv')
+        assert_allclose(val, expected, rtol=rtol)
+
+
+class TestNakagami:
+
+    def test_logpdf(self):
+        # Test nakagami logpdf for an input where the PDF is smaller
+        # than can be represented with 64 bit floating point.
+        # The expected value of logpdf was computed with mpmath:
+        #
+        #   def logpdf(x, nu):
+        #       x = mpmath.mpf(x)
+        #       nu = mpmath.mpf(nu)
+        #       return (mpmath.log(2) + nu*mpmath.log(nu) -
+        #               mpmath.loggamma(nu) + (2*nu - 1)*mpmath.log(x) -
+        #               nu*x**2)
+        #
+        nu = 2.5
+        x = 25
+        logp = stats.nakagami.logpdf(x, nu)
+        assert_allclose(logp, -1546.9253055607549)
+
+    def test_sf_isf(self):
+        # Test nakagami sf and isf when the survival function
+        # value is very small.
+        # The expected value of the survival function was computed
+        # with mpmath:
+        #
+        #   def sf(x, nu):
+        #       x = mpmath.mpf(x)
+        #       nu = mpmath.mpf(nu)
+        #       return mpmath.gammainc(nu, nu*x*x, regularized=True)
+        #
+        nu = 2.5
+        x0 = 5.0
+        sf = stats.nakagami.sf(x0, nu)
+        assert_allclose(sf, 2.736273158588307e-25, rtol=1e-13)
+        # Check round trip back to x0.
+        x1 = stats.nakagami.isf(sf, nu)
+        assert_allclose(x1, x0, rtol=1e-13)
+
+    @pytest.mark.xfail(reason="Fit of nakagami not reliable, see gh-10908.")
+    @pytest.mark.parametrize('nu', [1.6, 2.5, 3.9])
+    @pytest.mark.parametrize('loc', [25.0, 10, 35])
+    @pytest.mark.parametrize('scale', [13, 5, 20])
+    def test_fit(self, nu, loc, scale):
+        # Regression test for gh-13396 (21/27 cases failed previously)
+        # The first tuple of the parameters' values is discussed in gh-10908
+        N = 100
+        samples = stats.nakagami.rvs(size=N, nu=nu, loc=loc,
+                                     scale=scale, random_state=1337)
+        nu_est, loc_est, scale_est = stats.nakagami.fit(samples)
+        assert_allclose(nu_est, nu, rtol=0.2)
+        assert_allclose(loc_est, loc, rtol=0.2)
+        assert_allclose(scale_est, scale, rtol=0.2)
+
+        def dlogl_dnu(nu, loc, scale):
+            return ((-2*nu + 1) * np.sum(1/(samples - loc))
+                    + 2*nu/scale**2 * np.sum(samples - loc))
+
+        def dlogl_dloc(nu, loc, scale):
+            return (N * (1 + np.log(nu) - polygamma(0, nu)) +
+                    2 * np.sum(np.log((samples - loc) / scale))
+                    - np.sum(((samples - loc) / scale)**2))
+
+        def dlogl_dscale(nu, loc, scale):
+            return (- 2 * N * nu / scale
+                    + 2 * nu / scale ** 3 * np.sum((samples - loc) ** 2))
+
+        assert_allclose(dlogl_dnu(nu_est, loc_est, scale_est), 0, atol=1e-3)
+        assert_allclose(dlogl_dloc(nu_est, loc_est, scale_est), 0, atol=1e-3)
+        assert_allclose(dlogl_dscale(nu_est, loc_est, scale_est), 0, atol=1e-3)
+
+    @pytest.mark.parametrize('loc', [25.0, 10, 35])
+    @pytest.mark.parametrize('scale', [13, 5, 20])
+    def test_fit_nu(self, loc, scale):
+        # For nu = 0.5, we have analytical values for
+        # the MLE of the loc and the scale
+        nu = 0.5
+        n = 100
+        samples = stats.nakagami.rvs(size=n, nu=nu, loc=loc,
+                                     scale=scale, random_state=1337)
+        nu_est, loc_est, scale_est = stats.nakagami.fit(samples, f0=nu)
+
+        # Analytical values
+        loc_theo = np.min(samples)
+        scale_theo = np.sqrt(np.mean((samples - loc_est) ** 2))
+
+        assert_allclose(nu_est, nu, rtol=1e-7)
+        assert_allclose(loc_est, loc_theo, rtol=1e-7)
+        assert_allclose(scale_est, scale_theo, rtol=1e-7)
+
+
+class TestWrapCauchy:
+
+    def test_cdf_shape_broadcasting(self):
+        # Regression test for gh-13791.
+        # Check that wrapcauchy.cdf broadcasts the shape parameter
+        # correctly.
+        c = np.array([[0.03, 0.25], [0.5, 0.75]])
+        x = np.array([[1.0], [4.0]])
+        p = stats.wrapcauchy.cdf(x, c)
+        assert p.shape == (2, 2)
+        scalar_values = [stats.wrapcauchy.cdf(x1, c1)
+                         for (x1, c1) in np.nditer((x, c))]
+        assert_allclose(p.ravel(), scalar_values, rtol=1e-13)
+
+    def test_cdf_center(self):
+        p = stats.wrapcauchy.cdf(np.pi, 0.03)
+        assert_allclose(p, 0.5, rtol=1e-14)
+
+    def test_cdf(self):
+        x1 = 1.0  # less than pi
+        x2 = 4.0  # greater than pi
+        c = 0.75
+        p = stats.wrapcauchy.cdf([x1, x2], c)
+        cr = (1 + c)/(1 - c)
+        assert_allclose(p[0], np.arctan(cr*np.tan(x1/2))/np.pi)
+        assert_allclose(p[1], 1 - np.arctan(cr*np.tan(np.pi - x2/2))/np.pi)
+
+
+def test_rvs_no_size_error():
+    # _rvs methods must have parameter `size`; see gh-11394
+    class rvs_no_size_gen(stats.rv_continuous):
+        def _rvs(self):
+            return 1
+
+    rvs_no_size = rvs_no_size_gen(name='rvs_no_size')
+
+    with assert_raises(TypeError, match=r"_rvs\(\) got (an|\d) unexpected"):
+        rvs_no_size.rvs()
+
+
+@pytest.mark.parametrize('distname, args', invdistdiscrete + invdistcont)
+def test_support_gh13294_regression(distname, args):
+    if distname in skip_test_support_gh13294_regression:
+        pytest.skip(f"skipping test for the support method for "
+                    f"distribution {distname}.")
+    dist = getattr(stats, distname)
+    # test support method with invalid arguents
+    if isinstance(dist, stats.rv_continuous):
+        # test with valid scale
+        if len(args) != 0:
+            a0, b0 = dist.support(*args)
+            assert_equal(a0, np.nan)
+            assert_equal(b0, np.nan)
+        # test with invalid scale
+        # For some distributions, that take no parameters,
+        # the case of only invalid scale occurs and hence,
+        # it is implicitly tested in this test case.
+        loc1, scale1 = 0, -1
+        a1, b1 = dist.support(*args, loc1, scale1)
+        assert_equal(a1, np.nan)
+        assert_equal(b1, np.nan)
+    else:
+        a, b = dist.support(*args)
+        assert_equal(a, np.nan)
+        assert_equal(b, np.nan)
+
+
+def test_support_broadcasting_gh13294_regression():
+    a0, b0 = stats.norm.support([0, 0, 0, 1], [1, 1, 1, -1])
+    ex_a0 = np.array([-np.inf, -np.inf, -np.inf, np.nan])
+    ex_b0 = np.array([np.inf, np.inf, np.inf, np.nan])
+    assert_equal(a0, ex_a0)
+    assert_equal(b0, ex_b0)
+    assert a0.shape == ex_a0.shape
+    assert b0.shape == ex_b0.shape
+
+    a1, b1 = stats.norm.support([], [])
+    ex_a1, ex_b1 = np.array([]), np.array([])
+    assert_equal(a1, ex_a1)
+    assert_equal(b1, ex_b1)
+    assert a1.shape == ex_a1.shape
+    assert b1.shape == ex_b1.shape
+
+    a2, b2 = stats.norm.support([0, 0, 0, 1], [-1])
+    ex_a2 = np.array(4*[np.nan])
+    ex_b2 = np.array(4*[np.nan])
+    assert_equal(a2, ex_a2)
+    assert_equal(b2, ex_b2)
+    assert a2.shape == ex_a2.shape
+    assert b2.shape == ex_b2.shape
+
+
+def test_stats_broadcasting_gh14953_regression():
+    # test case in gh14953
+    loc = [0., 0.]
+    scale = [[1.], [2.], [3.]]
+    assert_equal(stats.norm.var(loc, scale), [[1., 1.], [4., 4.], [9., 9.]])
+    # test some edge cases
+    loc = np.empty((0, ))
+    scale = np.empty((1, 0))
+    assert stats.norm.var(loc, scale).shape == (1, 0)
+
+
+# Check a few values of the cosine distribution's cdf, sf, ppf and
+# isf methods.  Expected values were computed with mpmath.
+
+@pytest.mark.parametrize('x, expected',
+                         [(-3.14159, 4.956444476505336e-19),
+                          (3.14, 0.9999999998928399)])
+def test_cosine_cdf_sf(x, expected):
+    assert_allclose(stats.cosine.cdf(x), expected)
+    assert_allclose(stats.cosine.sf(-x), expected)
+
+
+@pytest.mark.parametrize('p, expected',
+                         [(1e-6, -3.1080612413765905),
+                          (1e-17, -3.141585429601399),
+                          (0.975, 2.1447547020964923)])
+def test_cosine_ppf_isf(p, expected):
+    assert_allclose(stats.cosine.ppf(p), expected)
+    assert_allclose(stats.cosine.isf(p), -expected)
+
+
+def test_cosine_logpdf_endpoints():
+    logp = stats.cosine.logpdf([-np.pi, np.pi])
+    assert_equal(logp, [-np.inf, -np.inf])
+
+
+def test_distr_params_lists():
+    # distribution objects are extra distributions added in
+    # test_discrete_basic. All other distributions are strings (names)
+    # and so we only choose those to compare whether both lists match.
+    discrete_distnames = {name for name, _ in distdiscrete
+                          if isinstance(name, str)}
+    invdiscrete_distnames = {name for name, _ in invdistdiscrete}
+    assert discrete_distnames == invdiscrete_distnames
+
+    cont_distnames = {name for name, _ in distcont}
+    invcont_distnames = {name for name, _ in invdistcont}
+    assert cont_distnames == invcont_distnames
+
+
+def test_moment_order_4():
+    # gh-13655 reported that if a distribution has a `_stats` method that
+    # accepts the `moments` parameter, then if the distribution's `moment`
+    # method is called with `order=4`, the faster/more accurate`_stats` gets
+    # called, but the results aren't used, and the generic `_munp` method is
+    # called to calculate the moment anyway. This tests that the issue has
+    # been fixed.
+    # stats.skewnorm._stats accepts the `moments` keyword
+    stats.skewnorm._stats(a=0, moments='k')  # no failure = has `moments`
+    # When `moment` is called, `_stats` is used, so the moment is very accurate
+    # (exactly equal to Pearson's kurtosis of the normal distribution, 3)
+    assert stats.skewnorm.moment(order=4, a=0) == 3.0
+    # At the time of gh-13655, skewnorm._munp() used the generic method
+    # to compute its result, which was inefficient and not very accurate.
+    # At that time, the following assertion would fail.  skewnorm._munp()
+    # has since been made more accurate and efficient, so now this test
+    # is expected to pass.
+    assert stats.skewnorm._munp(4, 0) == 3.0
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_entropy.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_entropy.py
new file mode 100644
index 00000000..98601cb6
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_entropy.py
@@ -0,0 +1,287 @@
+
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+# avoid new uses of the following; prefer assert/np.testing.assert_allclose
+from numpy.testing import (assert_, assert_almost_equal,
+                           assert_array_almost_equal)
+
+import pytest
+from pytest import raises as assert_raises
+import scipy.stats as stats
+
+
+class TestEntropy:
+    def test_entropy_positive(self):
+        # See ticket #497
+        pk = [0.5, 0.2, 0.3]
+        qk = [0.1, 0.25, 0.65]
+        eself = stats.entropy(pk, pk)
+        edouble = stats.entropy(pk, qk)
+        assert_(0.0 == eself)
+        assert_(edouble >= 0.0)
+
+    def test_entropy_base(self):
+        pk = np.ones(16, float)
+        S = stats.entropy(pk, base=2.)
+        assert_(abs(S - 4.) < 1.e-5)
+
+        qk = np.ones(16, float)
+        qk[:8] = 2.
+        S = stats.entropy(pk, qk)
+        S2 = stats.entropy(pk, qk, base=2.)
+        assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
+
+    def test_entropy_zero(self):
+        # Test for PR-479
+        assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
+                            decimal=12)
+
+    def test_entropy_2d(self):
+        pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
+        qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
+        assert_array_almost_equal(stats.entropy(pk, qk),
+                                  [0.1933259, 0.18609809])
+
+    def test_entropy_2d_zero(self):
+        pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
+        qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
+        assert_array_almost_equal(stats.entropy(pk, qk),
+                                  [np.inf, 0.18609809])
+
+        pk[0][0] = 0.0
+        assert_array_almost_equal(stats.entropy(pk, qk),
+                                  [0.17403988, 0.18609809])
+
+    def test_entropy_base_2d_nondefault_axis(self):
+        pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
+        assert_array_almost_equal(stats.entropy(pk, axis=1),
+                                  [0.63651417, 0.63651417, 0.66156324])
+
+    def test_entropy_2d_nondefault_axis(self):
+        pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
+        qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
+        assert_array_almost_equal(stats.entropy(pk, qk, axis=1),
+                                  [0.231049, 0.231049, 0.127706])
+
+    def test_entropy_raises_value_error(self):
+        pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
+        qk = [[0.1, 0.2], [0.6, 0.3]]
+        assert_raises(ValueError, stats.entropy, pk, qk)
+
+    def test_base_entropy_with_axis_0_is_equal_to_default(self):
+        pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
+        assert_array_almost_equal(stats.entropy(pk, axis=0),
+                                  stats.entropy(pk))
+
+    def test_entropy_with_axis_0_is_equal_to_default(self):
+        pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
+        qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
+        assert_array_almost_equal(stats.entropy(pk, qk, axis=0),
+                                  stats.entropy(pk, qk))
+
+    def test_base_entropy_transposed(self):
+        pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
+        assert_array_almost_equal(stats.entropy(pk.T).T,
+                                  stats.entropy(pk, axis=1))
+
+    def test_entropy_transposed(self):
+        pk = np.array([[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]])
+        qk = np.array([[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]])
+        assert_array_almost_equal(stats.entropy(pk.T, qk.T).T,
+                                  stats.entropy(pk, qk, axis=1))
+
+    def test_entropy_broadcasting(self):
+        np.random.rand(0)
+        x = np.random.rand(3)
+        y = np.random.rand(2, 1)
+        res = stats.entropy(x, y, axis=-1)
+        assert_equal(res[0], stats.entropy(x, y[0]))
+        assert_equal(res[1], stats.entropy(x, y[1]))
+
+    def test_entropy_shape_mismatch(self):
+        x = np.random.rand(10, 1, 12)
+        y = np.random.rand(11, 2)
+        message = "shape mismatch: objects cannot be broadcast"
+        with pytest.raises(ValueError, match=message):
+            stats.entropy(x, y)
+
+    def test_input_validation(self):
+        x = np.random.rand(10)
+        message = "`base` must be a positive number."
+        with pytest.raises(ValueError, match=message):
+            stats.entropy(x, base=-2)
+
+
+class TestDifferentialEntropy:
+    """
+    Vasicek results are compared with the R package vsgoftest.
+
+    # library(vsgoftest)
+    #
+    # samp <- c()
+    # entropy.estimate(x = samp, window = )
+
+    """
+
+    def test_differential_entropy_vasicek(self):
+
+        random_state = np.random.RandomState(0)
+        values = random_state.standard_normal(100)
+
+        entropy = stats.differential_entropy(values, method='vasicek')
+        assert_allclose(entropy, 1.342551, rtol=1e-6)
+
+        entropy = stats.differential_entropy(values, window_length=1,
+                                             method='vasicek')
+        assert_allclose(entropy, 1.122044, rtol=1e-6)
+
+        entropy = stats.differential_entropy(values, window_length=8,
+                                             method='vasicek')
+        assert_allclose(entropy, 1.349401, rtol=1e-6)
+
+    def test_differential_entropy_vasicek_2d_nondefault_axis(self):
+        random_state = np.random.RandomState(0)
+        values = random_state.standard_normal((3, 100))
+
+        entropy = stats.differential_entropy(values, axis=1, method='vasicek')
+        assert_allclose(
+            entropy,
+            [1.342551, 1.341826, 1.293775],
+            rtol=1e-6,
+        )
+
+        entropy = stats.differential_entropy(values, axis=1, window_length=1,
+                                             method='vasicek')
+        assert_allclose(
+            entropy,
+            [1.122044, 1.102944, 1.129616],
+            rtol=1e-6,
+        )
+
+        entropy = stats.differential_entropy(values, axis=1, window_length=8,
+                                             method='vasicek')
+        assert_allclose(
+            entropy,
+            [1.349401, 1.338514, 1.292332],
+            rtol=1e-6,
+        )
+
+    def test_differential_entropy_raises_value_error(self):
+        random_state = np.random.RandomState(0)
+        values = random_state.standard_normal((3, 100))
+
+        error_str = (
+            r"Window length \({window_length}\) must be positive and less "
+            r"than half the sample size \({sample_size}\)."
+        )
+
+        sample_size = values.shape[1]
+
+        for window_length in {-1, 0, sample_size//2, sample_size}:
+
+            formatted_error_str = error_str.format(
+                window_length=window_length,
+                sample_size=sample_size,
+            )
+
+            with assert_raises(ValueError, match=formatted_error_str):
+                stats.differential_entropy(
+                    values,
+                    window_length=window_length,
+                    axis=1,
+                )
+
+    def test_base_differential_entropy_with_axis_0_is_equal_to_default(self):
+        random_state = np.random.RandomState(0)
+        values = random_state.standard_normal((100, 3))
+
+        entropy = stats.differential_entropy(values, axis=0)
+        default_entropy = stats.differential_entropy(values)
+        assert_allclose(entropy, default_entropy)
+
+    def test_base_differential_entropy_transposed(self):
+        random_state = np.random.RandomState(0)
+        values = random_state.standard_normal((3, 100))
+
+        assert_allclose(
+            stats.differential_entropy(values.T).T,
+            stats.differential_entropy(values, axis=1),
+        )
+
+    def test_input_validation(self):
+        x = np.random.rand(10)
+
+        message = "`base` must be a positive number or `None`."
+        with pytest.raises(ValueError, match=message):
+            stats.differential_entropy(x, base=-2)
+
+        message = "`method` must be one of..."
+        with pytest.raises(ValueError, match=message):
+            stats.differential_entropy(x, method='ekki-ekki')
+
+    @pytest.mark.parametrize('method', ['vasicek', 'van es',
+                                        'ebrahimi', 'correa'])
+    def test_consistency(self, method):
+        # test that method is a consistent estimator
+        n = 10000 if method == 'correa' else 1000000
+        rvs = stats.norm.rvs(size=n, random_state=0)
+        expected = stats.norm.entropy()
+        res = stats.differential_entropy(rvs, method=method)
+        assert_allclose(res, expected, rtol=0.005)
+
+    # values from differential_entropy reference [6], table 1, n=50, m=7
+    norm_rmse_std_cases = {  # method: (RMSE, STD)
+                           'vasicek': (0.198, 0.109),
+                           'van es': (0.212, 0.110),
+                           'correa': (0.135, 0.112),
+                           'ebrahimi': (0.128, 0.109)
+                           }
+
+    @pytest.mark.parametrize('method, expected',
+                             list(norm_rmse_std_cases.items()))
+    def test_norm_rmse_std(self, method, expected):
+        # test that RMSE and standard deviation of estimators matches values
+        # given in differential_entropy reference [6]. Incidentally, also
+        # tests vectorization.
+        reps, n, m = 10000, 50, 7
+        rmse_expected, std_expected = expected
+        rvs = stats.norm.rvs(size=(reps, n), random_state=0)
+        true_entropy = stats.norm.entropy()
+        res = stats.differential_entropy(rvs, window_length=m,
+                                         method=method, axis=-1)
+        assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)),
+                        rmse_expected, atol=0.005)
+        assert_allclose(np.std(res), std_expected, atol=0.002)
+
+    # values from differential_entropy reference [6], table 2, n=50, m=7
+    expon_rmse_std_cases = {  # method: (RMSE, STD)
+                            'vasicek': (0.194, 0.148),
+                            'van es': (0.179, 0.149),
+                            'correa': (0.155, 0.152),
+                            'ebrahimi': (0.151, 0.148)
+                            }
+
+    @pytest.mark.parametrize('method, expected',
+                             list(expon_rmse_std_cases.items()))
+    def test_expon_rmse_std(self, method, expected):
+        # test that RMSE and standard deviation of estimators matches values
+        # given in differential_entropy reference [6]. Incidentally, also
+        # tests vectorization.
+        reps, n, m = 10000, 50, 7
+        rmse_expected, std_expected = expected
+        rvs = stats.expon.rvs(size=(reps, n), random_state=0)
+        true_entropy = stats.expon.entropy()
+        res = stats.differential_entropy(rvs, window_length=m,
+                                         method=method, axis=-1)
+        assert_allclose(np.sqrt(np.mean((res - true_entropy)**2)),
+                        rmse_expected, atol=0.005)
+        assert_allclose(np.std(res), std_expected, atol=0.002)
+
+    @pytest.mark.parametrize('n, method', [(8, 'van es'),
+                                           (12, 'ebrahimi'),
+                                           (1001, 'vasicek')])
+    def test_method_auto(self, n, method):
+        rvs = stats.norm.rvs(size=(n,), random_state=0)
+        res1 = stats.differential_entropy(rvs)
+        res2 = stats.differential_entropy(rvs, method=method)
+        assert res1 == res2
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_fit.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_fit.py
new file mode 100644
index 00000000..c129d094
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_fit.py
@@ -0,0 +1,850 @@
+import os
+import numpy as np
+import numpy.testing as npt
+from numpy.testing import assert_allclose, assert_equal
+import pytest
+from scipy import stats
+from scipy.optimize import differential_evolution
+
+from .test_continuous_basic import distcont
+from scipy.stats._distn_infrastructure import FitError
+from scipy.stats._distr_params import distdiscrete
+from scipy.stats import goodness_of_fit
+
+
+# this is not a proper statistical test for convergence, but only
+# verifies that the estimate and true values don't differ by too much
+
+fit_sizes = [1000, 5000, 10000]  # sample sizes to try
+
+thresh_percent = 0.25  # percent of true parameters for fail cut-off
+thresh_min = 0.75  # minimum difference estimate - true to fail test
+
+mle_failing_fits = [
+        'burr',
+        'chi2',
+        'gausshyper',
+        'genexpon',
+        'gengamma',
+        'kappa4',
+        'ksone',
+        'kstwo',
+        'mielke',
+        'ncf',
+        'ncx2',
+        'pearson3',
+        'powerlognorm',
+        'truncexpon',
+        'truncpareto',
+        'tukeylambda',
+        'vonmises',
+        'levy_stable',
+        'trapezoid',
+        'truncweibull_min',
+        'studentized_range',
+]
+
+mm_failing_fits = ['alpha', 'betaprime', 'burr', 'burr12', 'cauchy', 'chi',
+                   'chi2', 'crystalball', 'dgamma', 'dweibull', 'f',
+                   'fatiguelife', 'fisk', 'foldcauchy', 'genextreme',
+                   'gengamma', 'genhyperbolic', 'gennorm', 'genpareto',
+                   'halfcauchy', 'invgamma', 'invweibull', 'johnsonsu',
+                   'kappa3', 'ksone', 'kstwo', 'levy', 'levy_l',
+                   'levy_stable', 'loglaplace', 'lomax', 'mielke', 'nakagami',
+                   'ncf', 'nct', 'ncx2', 'pareto', 'powerlognorm', 'powernorm',
+                   'skewcauchy', 't', 'trapezoid', 'triang', 'truncpareto',
+                   'truncweibull_min', 'tukeylambda', 'studentized_range']
+
+# not sure if these fail, but they caused my patience to fail
+mm_slow_fits = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
+                'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
+                'kappa4', 'kstwobign', 'recipinvgauss',
+                'truncexpon', 'vonmises', 'vonmises_line']
+
+failing_fits = {"MM": mm_failing_fits + mm_slow_fits, "MLE": mle_failing_fits}
+
+# Don't run the fit test on these:
+skip_fit = [
+    'erlang',  # Subclass of gamma, generates a warning.
+    'genhyperbolic',  # too slow
+]
+
+
+def cases_test_cont_fit():
+    # this tests the closeness of the estimated parameters to the true
+    # parameters with fit method of continuous distributions
+    # Note: is slow, some distributions don't converge with sample
+    # size <= 10000
+    for distname, arg in distcont:
+        if distname not in skip_fit:
+            yield distname, arg
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize('distname,arg', cases_test_cont_fit())
+@pytest.mark.parametrize('method', ["MLE", "MM"])
+def test_cont_fit(distname, arg, method):
+    if distname in failing_fits[method]:
+        # Skip failing fits unless overridden
+        try:
+            xfail = not int(os.environ['SCIPY_XFAIL'])
+        except Exception:
+            xfail = True
+        if xfail:
+            msg = "Fitting %s doesn't work reliably yet" % distname
+            msg += (" [Set environment variable SCIPY_XFAIL=1 to run this"
+                    " test nevertheless.]")
+            pytest.xfail(msg)
+
+    distfn = getattr(stats, distname)
+
+    truearg = np.hstack([arg, [0.0, 1.0]])
+    diffthreshold = np.max(np.vstack([truearg*thresh_percent,
+                                      np.full(distfn.numargs+2, thresh_min)]),
+                           0)
+
+    for fit_size in fit_sizes:
+        # Note that if a fit succeeds, the other fit_sizes are skipped
+        np.random.seed(1234)
+
+        with np.errstate(all='ignore'):
+            rvs = distfn.rvs(size=fit_size, *arg)
+            est = distfn.fit(rvs, method=method)  # start with default values
+
+        diff = est - truearg
+
+        # threshold for location
+        diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,
+                                    thresh_min])
+
+        if np.any(np.isnan(est)):
+            raise AssertionError('nan returned in fit')
+        else:
+            if np.all(np.abs(diff) <= diffthreshold):
+                break
+    else:
+        txt = 'parameter: %s\n' % str(truearg)
+        txt += 'estimated: %s\n' % str(est)
+        txt += 'diff     : %s\n' % str(diff)
+        raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
+
+
+def _check_loc_scale_mle_fit(name, data, desired, atol=None):
+    d = getattr(stats, name)
+    actual = d.fit(data)[-2:]
+    assert_allclose(actual, desired, atol=atol,
+                    err_msg='poor mle fit of (loc, scale) in %s' % name)
+
+
+def test_non_default_loc_scale_mle_fit():
+    data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00])
+    _check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3)
+    _check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3)
+
+
+def test_expon_fit():
+    """gh-6167"""
+    data = [0, 0, 0, 0, 2, 2, 2, 2]
+    phat = stats.expon.fit(data, floc=0)
+    assert_allclose(phat, [0, 1.0], atol=1e-3)
+
+
+def test_fit_error():
+    data = np.concatenate([np.zeros(29), np.ones(21)])
+    message = "Optimization converged to parameters that are..."
+    with pytest.raises(FitError, match=message), \
+            pytest.warns(RuntimeWarning):
+        stats.beta.fit(data)
+
+
+@pytest.mark.parametrize("dist, params",
+                         [(stats.norm, (0.5, 2.5)),  # type: ignore[attr-defined] # noqa
+                          (stats.binom, (10, 0.3, 2))])  # type: ignore[attr-defined] # noqa
+def test_nnlf_and_related_methods(dist, params):
+    rng = np.random.default_rng(983459824)
+
+    if hasattr(dist, 'pdf'):
+        logpxf = dist.logpdf
+    else:
+        logpxf = dist.logpmf
+
+    x = dist.rvs(*params, size=100, random_state=rng)
+    ref = -logpxf(x, *params).sum()
+    res1 = dist.nnlf(params, x)
+    res2 = dist._penalized_nnlf(params, x)
+    assert_allclose(res1, ref)
+    assert_allclose(res2, ref)
+
+
+def cases_test_fit_mle():
+    # These fail default test or hang
+    skip_basic_fit = {'argus', 'foldnorm', 'truncpareto', 'truncweibull_min',
+                      'ksone', 'levy_stable', 'studentized_range', 'kstwo'}
+    slow_basic_fit = {'burr12', 'johnsonsb', 'bradford', 'fisk', 'mielke',
+                      'exponpow', 'rdist', 'norminvgauss', 'betaprime',
+                      'powerlaw', 'pareto', 'johnsonsu', 'loglaplace',
+                      'wrapcauchy', 'weibull_max', 'arcsine', 'binom', 'rice',
+                      'uniform', 'f', 'invweibull', 'genpareto',
+                      'nbinom', 'kappa3', 'lognorm', 'halfgennorm', 'pearson3',
+                      'alpha', 't', 'crystalball', 'fatiguelife', 'nakagami',
+                      'kstwobign', 'gompertz', 'dweibull', 'lomax', 'invgauss',
+                      'recipinvgauss', 'chi', 'foldcauchy', 'powernorm',
+                      'gennorm', 'randint', 'genextreme'}
+    xslow_basic_fit = {'nchypergeom_fisher', 'nchypergeom_wallenius',
+                       'gausshyper', 'genexpon', 'gengamma', 'genhyperbolic',
+                       'geninvgauss', 'tukeylambda', 'skellam', 'ncx2',
+                       'hypergeom', 'nhypergeom', 'zipfian', 'ncf',
+                       'truncnorm', 'powerlognorm', 'beta',
+                       'loguniform', 'reciprocal', 'trapezoid', 'nct',
+                       'kappa4', 'betabinom', 'exponweib', 'genhalflogistic',
+                       'burr', 'triang'}
+
+    for dist in dict(distdiscrete + distcont):
+        if dist in skip_basic_fit or not isinstance(dist, str):
+            reason = "tested separately"
+            yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
+        elif dist in slow_basic_fit:
+            reason = "too slow (>= 0.25s)"
+            yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
+        elif dist in xslow_basic_fit:
+            reason = "too slow (>= 1.0s)"
+            yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
+        else:
+            yield dist
+
+
+def cases_test_fit_mse():
+    # the first four are so slow that I'm not sure whether they would pass
+    skip_basic_fit = {'levy_stable', 'studentized_range', 'ksone', 'skewnorm',
+                      'norminvgauss',  # super slow (~1 hr) but passes
+                      'kstwo',  # very slow (~25 min) but passes
+                      'geninvgauss',  # quite slow (~4 minutes) but passes
+                      'gausshyper', 'genhyperbolic',  # integration warnings
+                      'argus',  # close, but doesn't meet tolerance
+                      'vonmises'}  # can have negative CDF; doesn't play nice
+    slow_basic_fit = {'wald', 'genextreme', 'anglit', 'semicircular',
+                      'kstwobign', 'arcsine', 'genlogistic', 'truncexpon',
+                      'fisk', 'uniform', 'exponnorm', 'maxwell', 'lomax',
+                      'laplace_asymmetric', 'lognorm', 'foldcauchy',
+                      'genpareto', 'powernorm', 'loglaplace', 'foldnorm',
+                      'recipinvgauss', 'exponpow', 'bradford', 'weibull_max',
+                      'gompertz', 'dweibull', 'truncpareto', 'weibull_min',
+                      'johnsonsu', 'loggamma', 'kappa3', 'fatiguelife',
+                      'pareto', 'invweibull', 'alpha', 'erlang', 'dgamma',
+                      'chi2', 'crystalball', 'nakagami', 'truncweibull_min',
+                      't', 'vonmises_line', 'triang', 'wrapcauchy', 'gamma',
+                      'mielke', 'chi', 'johnsonsb', 'exponweib',
+                      'genhalflogistic', 'randint', 'nhypergeom', 'hypergeom',
+                      'betabinom'}
+    xslow_basic_fit = {'burr', 'halfgennorm', 'invgamma',
+                       'invgauss', 'powerlaw', 'burr12', 'trapezoid', 'kappa4',
+                       'f', 'powerlognorm', 'ncx2', 'rdist', 'reciprocal',
+                       'loguniform', 'betaprime', 'rice', 'gennorm',
+                       'gengamma', 'truncnorm', 'ncf', 'nct', 'pearson3',
+                       'beta', 'genexpon', 'tukeylambda', 'zipfian',
+                       'nchypergeom_wallenius', 'nchypergeom_fisher'}
+    warns_basic_fit = {'skellam'}  # can remove mark after gh-14901 is resolved
+
+    for dist in dict(distdiscrete + distcont):
+        if dist in skip_basic_fit or not isinstance(dist, str):
+            reason = "Fails. Oh well."
+            yield pytest.param(dist, marks=pytest.mark.skip(reason=reason))
+        elif dist in slow_basic_fit:
+            reason = "too slow (>= 0.25s)"
+            yield pytest.param(dist, marks=pytest.mark.slow(reason=reason))
+        elif dist in xslow_basic_fit:
+            reason = "too slow (>= 1.0s)"
+            yield pytest.param(dist, marks=pytest.mark.xslow(reason=reason))
+        elif dist in warns_basic_fit:
+            mark = pytest.mark.filterwarnings('ignore::RuntimeWarning')
+            yield pytest.param(dist, marks=mark)
+        else:
+            yield dist
+
+
+def cases_test_fitstart():
+    for distname, shapes in dict(distcont).items():
+        if (not isinstance(distname, str) or
+                distname in {'studentized_range', 'recipinvgauss'}):  # slow
+            continue
+        yield distname, shapes
+
+
+@pytest.mark.parametrize('distname, shapes', cases_test_fitstart())
+def test_fitstart(distname, shapes):
+    dist = getattr(stats, distname)
+    rng = np.random.default_rng(216342614)
+    data = rng.random(10)
+
+    with np.errstate(invalid='ignore', divide='ignore'):  # irrelevant to test
+        guess = dist._fitstart(data)
+
+    assert dist._argcheck(*guess[:-2])
+
+
+def assert_nlff_less_or_close(dist, data, params1, params0, rtol=1e-7, atol=0,
+                              nlff_name='nnlf'):
+    nlff = getattr(dist, nlff_name)
+    nlff1 = nlff(params1, data)
+    nlff0 = nlff(params0, data)
+    if not (nlff1 < nlff0):
+        np.testing.assert_allclose(nlff1, nlff0, rtol=rtol, atol=atol)
+
+
+class TestFit:
+    dist = stats.binom  # type: ignore[attr-defined]
+    seed = 654634816187
+    rng = np.random.default_rng(seed)
+    data = stats.binom.rvs(5, 0.5, size=100, random_state=rng)  # type: ignore[attr-defined] # noqa
+    shape_bounds_a = [(1, 10), (0, 1)]
+    shape_bounds_d = {'n': (1, 10), 'p': (0, 1)}
+    atol = 5e-2
+    rtol = 1e-2
+    tols = {'atol': atol, 'rtol': rtol}
+
+    def opt(self, *args, **kwds):
+        return differential_evolution(*args, seed=0, **kwds)
+
+    def test_dist_iv(self):
+        message = "`dist` must be an instance of..."
+        with pytest.raises(ValueError, match=message):
+            stats.fit(10, self.data, self.shape_bounds_a)
+
+    def test_data_iv(self):
+        message = "`data` must be exactly one-dimensional."
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, [[1, 2, 3]], self.shape_bounds_a)
+
+        message = "All elements of `data` must be finite numbers."
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, [1, 2, 3, np.nan], self.shape_bounds_a)
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, [1, 2, 3, np.inf], self.shape_bounds_a)
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, ['1', '2', '3'], self.shape_bounds_a)
+
+    def test_bounds_iv(self):
+        message = "Bounds provided for the following unrecognized..."
+        shape_bounds = {'n': (1, 10), 'p': (0, 1), '1': (0, 10)}
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+        message = "Each element of a `bounds` sequence must be a tuple..."
+        shape_bounds = [(1, 10, 3), (0, 1)]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+        message = "Each element of `bounds` must be a tuple specifying..."
+        shape_bounds = [(1, 10, 3), (0, 1, 0.5)]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+        shape_bounds = [1, 0]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+        message = "A `bounds` sequence must contain at least 2 elements..."
+        shape_bounds = [(1, 10)]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+        message = "A `bounds` sequence may not contain more than 3 elements..."
+        bounds = [(1, 10), (1, 10), (1, 10), (1, 10)]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, bounds)
+
+        message = "There are no values for `p` on the interval..."
+        shape_bounds = {'n': (1, 10), 'p': (1, 0)}
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+        message = "There are no values for `n` on the interval..."
+        shape_bounds = [(10, 1), (0, 1)]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+        message = "There are no integer values for `n` on the interval..."
+        shape_bounds = [(1.4, 1.6), (0, 1)]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+        message = "The intersection of user-provided bounds for `n`"
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data)
+        shape_bounds = [(-np.inf, np.inf), (0, 1)]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, shape_bounds)
+
+    def test_guess_iv(self):
+        message = "Guesses provided for the following unrecognized..."
+        guess = {'n': 1, 'p': 0.5, '1': 255}
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+        message = "Each element of `guess` must be a scalar..."
+        guess = {'n': 1, 'p': 'hi'}
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+        guess = [1, 'f']
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+        guess = [[1, 2]]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+        message = "A `guess` sequence must contain at least 2..."
+        guess = [1]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+        message = "A `guess` sequence may not contain more than 3..."
+        guess = [1, 2, 3, 4]
+        with pytest.raises(ValueError, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+        message = "Guess for parameter `n` rounded..."
+        guess = {'n': 4.5, 'p': -0.5}
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+        message = "Guess for parameter `loc` rounded..."
+        guess = [5, 0.5, 0.5]
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+        message = "Guess for parameter `p` clipped..."
+        guess = {'n': 5, 'p': -0.5}
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+        message = "Guess for parameter `loc` clipped..."
+        guess = [5, 0.5, 1]
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.fit(self.dist, self.data, self.shape_bounds_d, guess=guess)
+
+    def basic_fit_test(self, dist_name, method):
+
+        N = 5000
+        dist_data = dict(distcont + distdiscrete)
+        rng = np.random.default_rng(self.seed)
+        dist = getattr(stats, dist_name)
+        shapes = np.array(dist_data[dist_name])
+        bounds = np.empty((len(shapes) + 2, 2), dtype=np.float64)
+        bounds[:-2, 0] = shapes/10.**np.sign(shapes)
+        bounds[:-2, 1] = shapes*10.**np.sign(shapes)
+        bounds[-2] = (0, 10)
+        bounds[-1] = (1e-16, 10)
+        loc = rng.uniform(*bounds[-2])
+        scale = rng.uniform(*bounds[-1])
+        ref = list(dist_data[dist_name]) + [loc, scale]
+
+        if getattr(dist, 'pmf', False):
+            ref = ref[:-1]
+            ref[-1] = np.floor(loc)
+            data = dist.rvs(*ref, size=N, random_state=rng)
+            bounds = bounds[:-1]
+        if getattr(dist, 'pdf', False):
+            data = dist.rvs(*ref, size=N, random_state=rng)
+
+        with npt.suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "overflow encountered")
+            res = stats.fit(dist, data, bounds, method=method,
+                            optimizer=self.opt)
+
+        nlff_names = {'mle': 'nnlf', 'mse': '_penalized_nlpsf'}
+        nlff_name = nlff_names[method]
+        assert_nlff_less_or_close(dist, data, res.params, ref, **self.tols,
+                                  nlff_name=nlff_name)
+
+    @pytest.mark.parametrize("dist_name", cases_test_fit_mle())
+    def test_basic_fit_mle(self, dist_name):
+        self.basic_fit_test(dist_name, "mle")
+
+    @pytest.mark.parametrize("dist_name", cases_test_fit_mse())
+    def test_basic_fit_mse(self, dist_name):
+        self.basic_fit_test(dist_name, "mse")
+
+    def test_argus(self):
+        # Can't guarantee that all distributions will fit all data with
+        # arbitrary bounds. This distribution just happens to fail above.
+        # Try something slightly different.
+        N = 1000
+        rng = np.random.default_rng(self.seed)
+        dist = stats.argus
+        shapes = (1., 2., 3.)
+        data = dist.rvs(*shapes, size=N, random_state=rng)
+        shape_bounds = {'chi': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
+        res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
+
+        assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
+
+    def test_foldnorm(self):
+        # Can't guarantee that all distributions will fit all data with
+        # arbitrary bounds. This distribution just happens to fail above.
+        # Try something slightly different.
+        N = 1000
+        rng = np.random.default_rng(self.seed)
+        dist = stats.foldnorm
+        shapes = (1.952125337355587, 2., 3.)
+        data = dist.rvs(*shapes, size=N, random_state=rng)
+        shape_bounds = {'c': (0.1, 10), 'loc': (0.1, 10), 'scale': (0.1, 10)}
+        res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
+
+        assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
+
+    def test_truncpareto(self):
+        # Can't guarantee that all distributions will fit all data with
+        # arbitrary bounds. This distribution just happens to fail above.
+        # Try something slightly different.
+        N = 1000
+        rng = np.random.default_rng(self.seed)
+        dist = stats.truncpareto
+        shapes = (1.8, 5.3, 2.3, 4.1)
+        data = dist.rvs(*shapes, size=N, random_state=rng)
+        shape_bounds = [(0.1, 10)]*4
+        res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
+
+        assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
+
+    def test_truncweibull_min(self):
+        # Can't guarantee that all distributions will fit all data with
+        # arbitrary bounds. This distribution just happens to fail above.
+        # Try something slightly different.
+        N = 1000
+        rng = np.random.default_rng(self.seed)
+        dist = stats.truncweibull_min
+        shapes = (2.5, 0.25, 1.75, 2., 3.)
+        data = dist.rvs(*shapes, size=N, random_state=rng)
+        shape_bounds = [(0.1, 10)]*5
+        res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
+
+        assert_nlff_less_or_close(dist, data, res.params, shapes, **self.tols)
+
+    def test_missing_shape_bounds(self):
+        # some distributions have a small domain w.r.t. a parameter, e.g.
+        # $p \in [0, 1]$ for binomial distribution
+        # User does not need to provide these because the intersection of the
+        # user's bounds (none) and the distribution's domain is finite
+        N = 1000
+        rng = np.random.default_rng(self.seed)
+
+        dist = stats.binom
+        n, p, loc = 10, 0.65, 0
+        data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
+        shape_bounds = {'n': np.array([0, 20])}  # check arrays are OK, too
+        res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
+        assert_allclose(res.params, (n, p, loc), **self.tols)
+
+        dist = stats.bernoulli
+        p, loc = 0.314159, 0
+        data = dist.rvs(p, loc=loc, size=N, random_state=rng)
+        res = stats.fit(dist, data, optimizer=self.opt)
+        assert_allclose(res.params, (p, loc), **self.tols)
+
+    def test_fit_only_loc_scale(self):
+        # fit only loc
+        N = 5000
+        rng = np.random.default_rng(self.seed)
+
+        dist = stats.norm
+        loc, scale = 1.5, 1
+        data = dist.rvs(loc=loc, size=N, random_state=rng)
+        loc_bounds = (0, 5)
+        bounds = {'loc': loc_bounds}
+        res = stats.fit(dist, data, bounds, optimizer=self.opt)
+        assert_allclose(res.params, (loc, scale), **self.tols)
+
+        # fit only scale
+        loc, scale = 0, 2.5
+        data = dist.rvs(scale=scale, size=N, random_state=rng)
+        scale_bounds = (0, 5)
+        bounds = {'scale': scale_bounds}
+        res = stats.fit(dist, data, bounds, optimizer=self.opt)
+        assert_allclose(res.params, (loc, scale), **self.tols)
+
+        # fit only loc and scale
+        dist = stats.norm
+        loc, scale = 1.5, 2.5
+        data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
+        bounds = {'loc': loc_bounds, 'scale': scale_bounds}
+        res = stats.fit(dist, data, bounds, optimizer=self.opt)
+        assert_allclose(res.params, (loc, scale), **self.tols)
+
+    def test_everything_fixed(self):
+        N = 5000
+        rng = np.random.default_rng(self.seed)
+
+        dist = stats.norm
+        loc, scale = 1.5, 2.5
+        data = dist.rvs(loc=loc, scale=scale, size=N, random_state=rng)
+
+        # loc, scale fixed to 0, 1 by default
+        res = stats.fit(dist, data)
+        assert_allclose(res.params, (0, 1), **self.tols)
+
+        # loc, scale explicitly fixed
+        bounds = {'loc': (loc, loc), 'scale': (scale, scale)}
+        res = stats.fit(dist, data, bounds)
+        assert_allclose(res.params, (loc, scale), **self.tols)
+
+        # `n` gets fixed during polishing
+        dist = stats.binom
+        n, p, loc = 10, 0.65, 0
+        data = dist.rvs(n, p, loc=loc, size=N, random_state=rng)
+        shape_bounds = {'n': (0, 20), 'p': (0.65, 0.65)}
+        res = stats.fit(dist, data, shape_bounds, optimizer=self.opt)
+        assert_allclose(res.params, (n, p, loc), **self.tols)
+
+    def test_failure(self):
+        N = 5000
+        rng = np.random.default_rng(self.seed)
+
+        dist = stats.nbinom
+        shapes = (5, 0.5)
+        data = dist.rvs(*shapes, size=N, random_state=rng)
+
+        assert data.min() == 0
+        # With lower bounds on location at 0.5, likelihood is zero
+        bounds = [(0, 30), (0, 1), (0.5, 10)]
+        res = stats.fit(dist, data, bounds)
+        message = "Optimization converged to parameter values that are"
+        assert res.message.startswith(message)
+        assert res.success is False
+
+    @pytest.mark.xslow
+    def test_guess(self):
+        # Test that guess helps DE find the desired solution
+        N = 2000
+        rng = np.random.default_rng(self.seed)
+        dist = stats.nhypergeom
+        params = (20, 7, 12, 0)
+        bounds = [(2, 200), (0.7, 70), (1.2, 120), (0, 10)]
+
+        data = dist.rvs(*params, size=N, random_state=rng)
+
+        res = stats.fit(dist, data, bounds, optimizer=self.opt)
+        assert not np.allclose(res.params, params, **self.tols)
+
+        res = stats.fit(dist, data, bounds, guess=params, optimizer=self.opt)
+        assert_allclose(res.params, params, **self.tols)
+
+    def test_mse_accuracy_1(self):
+        # Test maximum spacing estimation against example from Wikipedia
+        # https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples
+        data = [2, 4]
+        dist = stats.expon
+        bounds = {'loc': (0, 0), 'scale': (1e-8, 10)}
+        res_mle = stats.fit(dist, data, bounds=bounds, method='mle')
+        assert_allclose(res_mle.params.scale, 3, atol=1e-3)
+        res_mse = stats.fit(dist, data, bounds=bounds, method='mse')
+        assert_allclose(res_mse.params.scale, 3.915, atol=1e-3)
+
+    def test_mse_accuracy_2(self):
+        # Test maximum spacing estimation against example from Wikipedia
+        # https://en.wikipedia.org/wiki/Maximum_spacing_estimation#Examples
+        rng = np.random.default_rng(9843212616816518964)
+
+        dist = stats.uniform
+        n = 10
+        data = dist(3, 6).rvs(size=n, random_state=rng)
+        bounds = {'loc': (0, 10), 'scale': (1e-8, 10)}
+        res = stats.fit(dist, data, bounds=bounds, method='mse')
+        # (loc=3.608118420015416, scale=5.509323262055043)
+
+        x = np.sort(data)
+        a = (n*x[0] - x[-1])/(n - 1)
+        b = (n*x[-1] - x[0])/(n - 1)
+        ref = a, b-a  # (3.6081133632151503, 5.509328130317254)
+        assert_allclose(res.params, ref, rtol=1e-4)
+
+
+# Data from Matlab: https://www.mathworks.com/help/stats/lillietest.html
+examgrades = [65, 61, 81, 88, 69, 89, 55, 84, 86, 84, 71, 81, 84, 81, 78, 67,
+              96, 66, 73, 75, 59, 71, 69, 63, 79, 76, 63, 85, 87, 88, 80, 71,
+              65, 84, 71, 75, 81, 79, 64, 65, 84, 77, 70, 75, 84, 75, 73, 92,
+              90, 79, 80, 71, 73, 71, 58, 79, 73, 64, 77, 82, 81, 59, 54, 82,
+              57, 79, 79, 73, 74, 82, 63, 64, 73, 69, 87, 68, 81, 73, 83, 73,
+              80, 73, 73, 71, 66, 78, 64, 74, 68, 67, 75, 75, 80, 85, 74, 76,
+              80, 77, 93, 70, 86, 80, 81, 83, 68, 60, 85, 64, 74, 82, 81, 77,
+              66, 85, 75, 81, 69, 60, 83, 72]
+
+
+class TestGoodnessOfFit:
+
+    def test_gof_iv(self):
+        dist = stats.norm
+        x = [1, 2, 3]
+
+        message = r"`dist` must be a \(non-frozen\) instance of..."
+        with pytest.raises(TypeError, match=message):
+            goodness_of_fit(stats.norm(), x)
+
+        message = "`data` must be a one-dimensional array of numbers."
+        with pytest.raises(ValueError, match=message):
+            goodness_of_fit(dist, [[1, 2, 3]])
+
+        message = "`statistic` must be one of..."
+        with pytest.raises(ValueError, match=message):
+            goodness_of_fit(dist, x, statistic='mm')
+
+        message = "`n_mc_samples` must be an integer."
+        with pytest.raises(TypeError, match=message):
+            goodness_of_fit(dist, x, n_mc_samples=1000.5)
+
+        message = "'herring' cannot be used to seed a"
+        with pytest.raises(ValueError, match=message):
+            goodness_of_fit(dist, x, random_state='herring')
+
+    def test_against_ks(self):
+        rng = np.random.default_rng(8517426291317196949)
+        x = examgrades
+        known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
+        res = goodness_of_fit(stats.norm, x, known_params=known_params,
+                              statistic='ks', random_state=rng)
+        ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
+        assert_allclose(res.statistic, ref.statistic)  # ~0.0848
+        assert_allclose(res.pvalue, ref.pvalue, atol=5e-3)  # ~0.335
+
+    def test_against_lilliefors(self):
+        rng = np.random.default_rng(2291803665717442724)
+        x = examgrades
+        res = goodness_of_fit(stats.norm, x, statistic='ks', random_state=rng)
+        known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
+        ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
+        assert_allclose(res.statistic, ref.statistic)  # ~0.0848
+        assert_allclose(res.pvalue, 0.0348, atol=5e-3)
+
+    def test_against_cvm(self):
+        rng = np.random.default_rng(8674330857509546614)
+        x = examgrades
+        known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
+        res = goodness_of_fit(stats.norm, x, known_params=known_params,
+                              statistic='cvm', random_state=rng)
+        ref = stats.cramervonmises(x, stats.norm(**known_params).cdf)
+        assert_allclose(res.statistic, ref.statistic)  # ~0.090
+        assert_allclose(res.pvalue, ref.pvalue, atol=5e-3)  # ~0.636
+
+    def test_against_anderson_case_0(self):
+        # "Case 0" is where loc and scale are known [1]
+        rng = np.random.default_rng(7384539336846690410)
+        x = np.arange(1, 101)
+        # loc that produced critical value of statistic found w/ root_scalar
+        known_params = {'loc': 45.01575354024957, 'scale': 30}
+        res = goodness_of_fit(stats.norm, x, known_params=known_params,
+                              statistic='ad', random_state=rng)
+        assert_allclose(res.statistic, 2.492)  # See [1] Table 1A 1.0
+        assert_allclose(res.pvalue, 0.05, atol=5e-3)
+
+    def test_against_anderson_case_1(self):
+        # "Case 1" is where scale is known and loc is fit [1]
+        rng = np.random.default_rng(5040212485680146248)
+        x = np.arange(1, 101)
+        # scale that produced critical value of statistic found w/ root_scalar
+        known_params = {'scale': 29.957112639101933}
+        res = goodness_of_fit(stats.norm, x, known_params=known_params,
+                              statistic='ad', random_state=rng)
+        assert_allclose(res.statistic, 0.908)  # See [1] Table 1B 1.1
+        assert_allclose(res.pvalue, 0.1, atol=5e-3)
+
+    def test_against_anderson_case_2(self):
+        # "Case 2" is where loc is known and scale is fit [1]
+        rng = np.random.default_rng(726693985720914083)
+        x = np.arange(1, 101)
+        # loc that produced critical value of statistic found w/ root_scalar
+        known_params = {'loc': 44.5680212261933}
+        res = goodness_of_fit(stats.norm, x, known_params=known_params,
+                              statistic='ad', random_state=rng)
+        assert_allclose(res.statistic, 2.904)  # See [1] Table 1B 1.2
+        assert_allclose(res.pvalue, 0.025, atol=5e-3)
+
+    def test_against_anderson_case_3(self):
+        # "Case 3" is where both loc and scale are fit [1]
+        rng = np.random.default_rng(6763691329830218206)
+        # c that produced critical value of statistic found w/ root_scalar
+        x = stats.skewnorm.rvs(1.4477847789132101, loc=1, scale=2, size=100,
+                               random_state=rng)
+        res = goodness_of_fit(stats.norm, x, statistic='ad', random_state=rng)
+        assert_allclose(res.statistic, 0.559)  # See [1] Table 1B 1.2
+        assert_allclose(res.pvalue, 0.15, atol=5e-3)
+
+    @pytest.mark.slow
+    def test_against_anderson_gumbel_r(self):
+        rng = np.random.default_rng(7302761058217743)
+        # c that produced critical value of statistic found w/ root_scalar
+        x = stats.genextreme(0.051896837188595134, loc=0.5,
+                             scale=1.5).rvs(size=1000, random_state=rng)
+        res = goodness_of_fit(stats.gumbel_r, x, statistic='ad',
+                              random_state=rng)
+        ref = stats.anderson(x, dist='gumbel_r')
+        assert_allclose(res.statistic, ref.critical_values[0])
+        assert_allclose(res.pvalue, ref.significance_level[0]/100, atol=5e-3)
+
+    def test_params_effects(self):
+        # Ensure that `guessed_params`, `fit_params`, and `known_params` have
+        # the intended effects.
+        rng = np.random.default_rng(9121950977643805391)
+        x = stats.skewnorm.rvs(-5.044559778383153, loc=1, scale=2, size=50,
+                               random_state=rng)
+
+        # Show that `guessed_params` don't fit to the guess,
+        # but `fit_params` and `known_params` respect the provided fit
+        guessed_params = {'c': 13.4}
+        fit_params = {'scale': 13.73}
+        known_params = {'loc': -13.85}
+        rng = np.random.default_rng(9121950977643805391)
+        res1 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
+                               guessed_params=guessed_params,
+                               fit_params=fit_params,
+                               known_params=known_params, random_state=rng)
+        assert not np.allclose(res1.fit_result.params.c, 13.4)
+        assert_equal(res1.fit_result.params.scale, 13.73)
+        assert_equal(res1.fit_result.params.loc, -13.85)
+
+        # Show that changing the guess changes the parameter that gets fit,
+        # and it changes the null distribution
+        guessed_params = {'c': 2}
+        rng = np.random.default_rng(9121950977643805391)
+        res2 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
+                               guessed_params=guessed_params,
+                               fit_params=fit_params,
+                               known_params=known_params, random_state=rng)
+        assert not np.allclose(res2.fit_result.params.c,
+                               res1.fit_result.params.c, rtol=1e-8)
+        assert not np.allclose(res2.null_distribution,
+                               res1.null_distribution, rtol=1e-8)
+        assert_equal(res2.fit_result.params.scale, 13.73)
+        assert_equal(res2.fit_result.params.loc, -13.85)
+
+        # If we set all parameters as fit_params and known_params,
+        # they're all fixed to those values, but the null distribution
+        # varies.
+        fit_params = {'c': 13.4, 'scale': 13.73}
+        rng = np.random.default_rng(9121950977643805391)
+        res3 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
+                               guessed_params=guessed_params,
+                               fit_params=fit_params,
+                               known_params=known_params, random_state=rng)
+        assert_equal(res3.fit_result.params.c, 13.4)
+        assert_equal(res3.fit_result.params.scale, 13.73)
+        assert_equal(res3.fit_result.params.loc, -13.85)
+        assert not np.allclose(res3.null_distribution, res1.null_distribution)
+
+
+class TestFitResult:
+    def test_plot_iv(self):
+        rng = np.random.default_rng(1769658657308472721)
+        data = stats.norm.rvs(0, 1, size=100, random_state=rng)
+
+        def optimizer(*args, **kwargs):
+            return differential_evolution(*args, **kwargs, seed=rng)
+
+        bounds = [(0, 30), (0, 1)]
+        res = stats.fit(stats.norm, data, bounds, optimizer=optimizer)
+        try:
+            import matplotlib  # noqa
+            message = r"`plot_type` must be one of \{'..."
+            with pytest.raises(ValueError, match=message):
+                res.plot(plot_type='llama')
+        except (ModuleNotFoundError, ImportError):
+            message = r"matplotlib must be installed to use method `plot`."
+            with pytest.raises(ModuleNotFoundError, match=message):
+                res.plot(plot_type='llama')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_hypotests.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_hypotests.py
new file mode 100644
index 00000000..50834c8c
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_hypotests.py
@@ -0,0 +1,1712 @@
+from itertools import product
+
+import numpy as np
+import functools
+import pytest
+from numpy.testing import (assert_, assert_equal, assert_allclose,
+                           assert_almost_equal)  # avoid new uses
+from pytest import raises as assert_raises
+
+import scipy.stats as stats
+from scipy.stats import distributions
+from scipy.stats._hypotests import (epps_singleton_2samp, cramervonmises,
+                                    _cdf_cvm, cramervonmises_2samp,
+                                    _pval_cvm_2samp_exact, barnard_exact,
+                                    boschloo_exact)
+from scipy.stats._mannwhitneyu import mannwhitneyu, _mwu_state
+from .common_tests import check_named_results
+from scipy._lib._testutils import _TestPythranFunc
+
+
+class TestEppsSingleton:
+    def test_statistic_1(self):
+        # first example in Goerg & Kaiser, also in original paper of
+        # Epps & Singleton. Note: values do not match exactly, the
+        # value of the interquartile range varies depending on how
+        # quantiles are computed
+        x = np.array([-0.35, 2.55, 1.73, 0.73, 0.35,
+                      2.69, 0.46, -0.94, -0.37, 12.07])
+        y = np.array([-1.15, -0.15, 2.48, 3.25, 3.71,
+                      4.29, 5.00, 7.74, 8.38, 8.60])
+        w, p = epps_singleton_2samp(x, y)
+        assert_almost_equal(w, 15.14, decimal=1)
+        assert_almost_equal(p, 0.00442, decimal=3)
+
+    def test_statistic_2(self):
+        # second example in Goerg & Kaiser, again not a perfect match
+        x = np.array((0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 10,
+                      10, 10, 10))
+        y = np.array((10, 4, 0, 5, 10, 10, 0, 5, 6, 7, 10, 3, 1, 7, 0, 8, 1,
+                      5, 8, 10))
+        w, p = epps_singleton_2samp(x, y)
+        assert_allclose(w, 8.900, atol=0.001)
+        assert_almost_equal(p, 0.06364, decimal=3)
+
+    def test_epps_singleton_array_like(self):
+        np.random.seed(1234)
+        x, y = np.arange(30), np.arange(28)
+
+        w1, p1 = epps_singleton_2samp(list(x), list(y))
+        w2, p2 = epps_singleton_2samp(tuple(x), tuple(y))
+        w3, p3 = epps_singleton_2samp(x, y)
+
+        assert_(w1 == w2 == w3)
+        assert_(p1 == p2 == p3)
+
+    def test_epps_singleton_size(self):
+        # raise error if less than 5 elements
+        x, y = (1, 2, 3, 4), np.arange(10)
+        assert_raises(ValueError, epps_singleton_2samp, x, y)
+
+    def test_epps_singleton_nonfinite(self):
+        # raise error if there are non-finite values
+        x, y = (1, 2, 3, 4, 5, np.inf), np.arange(10)
+        assert_raises(ValueError, epps_singleton_2samp, x, y)
+        x, y = np.arange(10), (1, 2, 3, 4, 5, np.nan)
+        assert_raises(ValueError, epps_singleton_2samp, x, y)
+
+    def test_epps_singleton_1d_input(self):
+        x = np.arange(100).reshape(-1, 1)
+        assert_raises(ValueError, epps_singleton_2samp, x, x)
+
+    def test_names(self):
+        x, y = np.arange(20), np.arange(30)
+        res = epps_singleton_2samp(x, y)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+
+class TestCvm:
+    # the expected values of the cdfs are taken from Table 1 in
+    # Csorgo / Faraway: The Exact and Asymptotic Distribution of
+    # Cramér-von Mises Statistics, 1996.
+    def test_cdf_4(self):
+        assert_allclose(
+                _cdf_cvm([0.02983, 0.04111, 0.12331, 0.94251], 4),
+                [0.01, 0.05, 0.5, 0.999],
+                atol=1e-4)
+
+    def test_cdf_10(self):
+        assert_allclose(
+                _cdf_cvm([0.02657, 0.03830, 0.12068, 0.56643], 10),
+                [0.01, 0.05, 0.5, 0.975],
+                atol=1e-4)
+
+    def test_cdf_1000(self):
+        assert_allclose(
+                _cdf_cvm([0.02481, 0.03658, 0.11889, 1.16120], 1000),
+                [0.01, 0.05, 0.5, 0.999],
+                atol=1e-4)
+
+    def test_cdf_inf(self):
+        assert_allclose(
+                _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204]),
+                [0.01, 0.05, 0.5, 0.999],
+                atol=1e-4)
+
+    def test_cdf_support(self):
+        # cdf has support on [1/(12*n), n/3]
+        assert_equal(_cdf_cvm([1/(12*533), 533/3], 533), [0, 1])
+        assert_equal(_cdf_cvm([1/(12*(27 + 1)), (27 + 1)/3], 27), [0, 1])
+
+    def test_cdf_large_n(self):
+        # test that asymptotic cdf and cdf for large samples are close
+        assert_allclose(
+                _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100], 10000),
+                _cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100]),
+                atol=1e-4)
+
+    def test_large_x(self):
+        # for large values of x and n, the series used to compute the cdf
+        # converges slowly.
+        # this leads to bug in R package goftest and MAPLE code that is
+        # the basis of the implemenation in scipy
+        # note: cdf = 1 for x >= 1000/3 and n = 1000
+        assert_(0.99999 < _cdf_cvm(333.3, 1000) < 1.0)
+        assert_(0.99999 < _cdf_cvm(333.3) < 1.0)
+
+    def test_low_p(self):
+        # _cdf_cvm can return values larger than 1. In that case, we just
+        # return a p-value of zero.
+        n = 12
+        res = cramervonmises(np.ones(n)*0.8, 'norm')
+        assert_(_cdf_cvm(res.statistic, n) > 1.0)
+        assert_equal(res.pvalue, 0)
+
+    def test_invalid_input(self):
+        x = np.arange(10).reshape((2, 5))
+        assert_raises(ValueError, cramervonmises, x, "norm")
+        assert_raises(ValueError, cramervonmises, [1.5], "norm")
+        assert_raises(ValueError, cramervonmises, (), "norm")
+
+    def test_values_R(self):
+        # compared against R package goftest, version 1.1.1
+        # goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6), "pnorm")
+        res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm")
+        assert_allclose(res.statistic, 0.288156, atol=1e-6)
+        assert_allclose(res.pvalue, 0.1453465, atol=1e-6)
+
+        # goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6),
+        #                   "pnorm", mean = 3, sd = 1.5)
+        res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm", (3, 1.5))
+        assert_allclose(res.statistic, 0.9426685, atol=1e-6)
+        assert_allclose(res.pvalue, 0.002026417, atol=1e-6)
+
+        # goftest::cvm.test(c(1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5), "pexp")
+        res = cramervonmises([1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5], "expon")
+        assert_allclose(res.statistic, 0.8421854, atol=1e-6)
+        assert_allclose(res.pvalue, 0.004433406, atol=1e-6)
+
+    def test_callable_cdf(self):
+        x, args = np.arange(5), (1.4, 0.7)
+        r1 = cramervonmises(x, distributions.expon.cdf)
+        r2 = cramervonmises(x, "expon")
+        assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
+
+        r1 = cramervonmises(x, distributions.beta.cdf, args)
+        r2 = cramervonmises(x, "beta", args)
+        assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
+
+
+class TestMannWhitneyU:
+    def setup_method(self):
+        _mwu_state._recursive = True
+
+    # All magic numbers are from R wilcox.test unless otherwise specied
+    # https://rdrr.io/r/stats/wilcox.test.html
+
+    # --- Test Input Validation ---
+
+    def test_input_validation(self):
+        x = np.array([1, 2])  # generic, valid inputs
+        y = np.array([3, 4])
+        with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
+            mannwhitneyu([], y)
+        with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
+            mannwhitneyu(x, [])
+        with assert_raises(ValueError, match="`use_continuity` must be one"):
+            mannwhitneyu(x, y, use_continuity='ekki')
+        with assert_raises(ValueError, match="`alternative` must be one of"):
+            mannwhitneyu(x, y, alternative='ekki')
+        with assert_raises(ValueError, match="`axis` must be an integer"):
+            mannwhitneyu(x, y, axis=1.5)
+        with assert_raises(ValueError, match="`method` must be one of"):
+            mannwhitneyu(x, y, method='ekki')
+
+    def test_auto(self):
+        # Test that default method ('auto') chooses intended method
+
+        np.random.seed(1)
+        n = 8  # threshold to switch from exact to asymptotic
+
+        # both inputs are smaller than threshold; should use exact
+        x = np.random.rand(n-1)
+        y = np.random.rand(n-1)
+        auto = mannwhitneyu(x, y)
+        asymptotic = mannwhitneyu(x, y, method='asymptotic')
+        exact = mannwhitneyu(x, y, method='exact')
+        assert auto.pvalue == exact.pvalue
+        assert auto.pvalue != asymptotic.pvalue
+
+        # one input is smaller than threshold; should use exact
+        x = np.random.rand(n-1)
+        y = np.random.rand(n+1)
+        auto = mannwhitneyu(x, y)
+        asymptotic = mannwhitneyu(x, y, method='asymptotic')
+        exact = mannwhitneyu(x, y, method='exact')
+        assert auto.pvalue == exact.pvalue
+        assert auto.pvalue != asymptotic.pvalue
+
+        # other input is smaller than threshold; should use exact
+        auto = mannwhitneyu(y, x)
+        asymptotic = mannwhitneyu(x, y, method='asymptotic')
+        exact = mannwhitneyu(x, y, method='exact')
+        assert auto.pvalue == exact.pvalue
+        assert auto.pvalue != asymptotic.pvalue
+
+        # both inputs are larger than threshold; should use asymptotic
+        x = np.random.rand(n+1)
+        y = np.random.rand(n+1)
+        auto = mannwhitneyu(x, y)
+        asymptotic = mannwhitneyu(x, y, method='asymptotic')
+        exact = mannwhitneyu(x, y, method='exact')
+        assert auto.pvalue != exact.pvalue
+        assert auto.pvalue == asymptotic.pvalue
+
+        # both inputs are smaller than threshold, but there is a tie
+        # should use asymptotic
+        x = np.random.rand(n-1)
+        y = np.random.rand(n-1)
+        y[3] = x[3]
+        auto = mannwhitneyu(x, y)
+        asymptotic = mannwhitneyu(x, y, method='asymptotic')
+        exact = mannwhitneyu(x, y, method='exact')
+        assert auto.pvalue != exact.pvalue
+        assert auto.pvalue == asymptotic.pvalue
+
+    # --- Test Basic Functionality ---
+
+    x = [210.052110, 110.190630, 307.918612]
+    y = [436.08811482466416, 416.37397329768191, 179.96975939463582,
+         197.8118754228619, 34.038757281225756, 138.54220550921517,
+         128.7769351470246, 265.92721427951852, 275.6617533155341,
+         592.34083395416258, 448.73177590617018, 300.61495185038905,
+         187.97508449019588]
+
+    # This test was written for mann_whitney_u in gh-4933.
+    # Originally, the p-values for alternatives were swapped;
+    # this has been corrected and the tests have been refactored for
+    # compactness, but otherwise the tests are unchanged.
+    # R code for comparison, e.g.:
+    # options(digits = 16)
+    # x = c(210.052110, 110.190630, 307.918612)
+    # y = c(436.08811482466416, 416.37397329768191, 179.96975939463582,
+    #       197.8118754228619, 34.038757281225756, 138.54220550921517,
+    #       128.7769351470246, 265.92721427951852, 275.6617533155341,
+    #       592.34083395416258, 448.73177590617018, 300.61495185038905,
+    #       187.97508449019588)
+    # wilcox.test(x, y, alternative="g", exact=TRUE)
+    cases_basic = [[{"alternative": 'two-sided', "method": "asymptotic"},
+                    (16, 0.6865041817876)],
+                   [{"alternative": 'less', "method": "asymptotic"},
+                    (16, 0.3432520908938)],
+                   [{"alternative": 'greater', "method": "asymptotic"},
+                    (16, 0.7047591913255)],
+                   [{"alternative": 'two-sided', "method": "exact"},
+                    (16, 0.7035714285714)],
+                   [{"alternative": 'less', "method": "exact"},
+                    (16, 0.3517857142857)],
+                   [{"alternative": 'greater', "method": "exact"},
+                    (16, 0.6946428571429)]]
+
+    @pytest.mark.parametrize(("kwds", "expected"), cases_basic)
+    def test_basic(self, kwds, expected):
+        res = mannwhitneyu(self.x, self.y, **kwds)
+        assert_allclose(res, expected)
+
+    cases_continuity = [[{"alternative": 'two-sided', "use_continuity": True},
+                         (23, 0.6865041817876)],
+                        [{"alternative": 'less', "use_continuity": True},
+                         (23, 0.7047591913255)],
+                        [{"alternative": 'greater', "use_continuity": True},
+                         (23, 0.3432520908938)],
+                        [{"alternative": 'two-sided', "use_continuity": False},
+                         (23, 0.6377328900502)],
+                        [{"alternative": 'less', "use_continuity": False},
+                         (23, 0.6811335549749)],
+                        [{"alternative": 'greater', "use_continuity": False},
+                         (23, 0.3188664450251)]]
+
+    @pytest.mark.parametrize(("kwds", "expected"), cases_continuity)
+    def test_continuity(self, kwds, expected):
+        # When x and y are interchanged, less and greater p-values should
+        # swap (compare to above). This wouldn't happen if the continuity
+        # correction were applied in the wrong direction. Note that less and
+        # greater p-values do not sum to 1 when continuity correction is on,
+        # which is what we'd expect. Also check that results match R when
+        # continuity correction is turned off.
+        # Note that method='asymptotic' -> exact=FALSE
+        # and use_continuity=False -> correct=FALSE, e.g.:
+        # wilcox.test(x, y, alternative="t", exact=FALSE, correct=FALSE)
+        res = mannwhitneyu(self.y, self.x, method='asymptotic', **kwds)
+        assert_allclose(res, expected)
+
+    def test_tie_correct(self):
+        # Test tie correction against R's wilcox.test
+        # options(digits = 16)
+        # x = c(1, 2, 3, 4)
+        # y = c(1, 2, 3, 4, 5)
+        # wilcox.test(x, y, exact=FALSE)
+        x = [1, 2, 3, 4]
+        y0 = np.array([1, 2, 3, 4, 5])
+        dy = np.array([0, 1, 0, 1, 0])*0.01
+        dy2 = np.array([0, 0, 1, 0, 0])*0.01
+        y = [y0-0.01, y0-dy, y0-dy2, y0, y0+dy2, y0+dy, y0+0.01]
+        res = mannwhitneyu(x, y, axis=-1, method="asymptotic")
+        U_expected = [10, 9, 8.5, 8, 7.5, 7, 6]
+        p_expected = [1, 0.9017048037317, 0.804080657472, 0.7086240584439,
+                      0.6197963884941, 0.5368784563079, 0.3912672792826]
+        assert_equal(res.statistic, U_expected)
+        assert_allclose(res.pvalue, p_expected)
+
+    # --- Test Exact Distribution of U ---
+
+    # These are tabulated values of the CDF of the exact distribution of
+    # the test statistic from pg 52 of reference [1] (Mann-Whitney Original)
+    pn3 = {1: [0.25, 0.5, 0.75], 2: [0.1, 0.2, 0.4, 0.6],
+           3: [0.05, .1, 0.2, 0.35, 0.5, 0.65]}
+    pn4 = {1: [0.2, 0.4, 0.6], 2: [0.067, 0.133, 0.267, 0.4, 0.6],
+           3: [0.028, 0.057, 0.114, 0.2, .314, 0.429, 0.571],
+           4: [0.014, 0.029, 0.057, 0.1, 0.171, 0.243, 0.343, 0.443, 0.557]}
+    pm5 = {1: [0.167, 0.333, 0.5, 0.667],
+           2: [0.047, 0.095, 0.19, 0.286, 0.429, 0.571],
+           3: [0.018, 0.036, 0.071, 0.125, 0.196, 0.286, 0.393, 0.5, 0.607],
+           4: [0.008, 0.016, 0.032, 0.056, 0.095, 0.143,
+               0.206, 0.278, 0.365, 0.452, 0.548],
+           5: [0.004, 0.008, 0.016, 0.028, 0.048, 0.075, 0.111,
+               0.155, 0.21, 0.274, 0.345, .421, 0.5, 0.579]}
+    pm6 = {1: [0.143, 0.286, 0.428, 0.571],
+           2: [0.036, 0.071, 0.143, 0.214, 0.321, 0.429, 0.571],
+           3: [0.012, 0.024, 0.048, 0.083, 0.131,
+               0.19, 0.274, 0.357, 0.452, 0.548],
+           4: [0.005, 0.01, 0.019, 0.033, 0.057, 0.086, 0.129,
+               0.176, 0.238, 0.305, 0.381, 0.457, 0.543],  # the last element
+           # of the previous list, 0.543, has been modified from 0.545;
+           # I assume it was a typo
+           5: [0.002, 0.004, 0.009, 0.015, 0.026, 0.041, 0.063, 0.089,
+               0.123, 0.165, 0.214, 0.268, 0.331, 0.396, 0.465, 0.535],
+           6: [0.001, 0.002, 0.004, 0.008, 0.013, 0.021, 0.032, 0.047,
+               0.066, 0.09, 0.12, 0.155, 0.197, 0.242, 0.294, 0.350,
+               0.409, 0.469, 0.531]}
+
+    def test_exact_distribution(self):
+        # I considered parametrize. I decided against it.
+        p_tables = {3: self.pn3, 4: self.pn4, 5: self.pm5, 6: self.pm6}
+        for n, table in p_tables.items():
+            for m, p in table.items():
+                # check p-value against table
+                u = np.arange(0, len(p))
+                assert_allclose(_mwu_state.cdf(k=u, m=m, n=n), p, atol=1e-3)
+
+                # check identity CDF + SF - PMF = 1
+                # ( In this implementation, SF(U) includes PMF(U) )
+                u2 = np.arange(0, m*n+1)
+                assert_allclose(_mwu_state.cdf(k=u2, m=m, n=n)
+                                + _mwu_state.sf(k=u2, m=m, n=n)
+                                - _mwu_state.pmf(k=u2, m=m, n=n), 1)
+
+                # check symmetry about mean of U, i.e. pmf(U) = pmf(m*n-U)
+                pmf = _mwu_state.pmf(k=u2, m=m, n=n)
+                assert_allclose(pmf, pmf[::-1])
+
+                # check symmetry w.r.t. interchange of m, n
+                pmf2 = _mwu_state.pmf(k=u2, m=n, n=m)
+                assert_allclose(pmf, pmf2)
+
+    def test_asymptotic_behavior(self):
+        np.random.seed(0)
+
+        # for small samples, the asymptotic test is not very accurate
+        x = np.random.rand(5)
+        y = np.random.rand(5)
+        res1 = mannwhitneyu(x, y, method="exact")
+        res2 = mannwhitneyu(x, y, method="asymptotic")
+        assert res1.statistic == res2.statistic
+        assert np.abs(res1.pvalue - res2.pvalue) > 1e-2
+
+        # for large samples, they agree reasonably well
+        x = np.random.rand(40)
+        y = np.random.rand(40)
+        res1 = mannwhitneyu(x, y, method="exact")
+        res2 = mannwhitneyu(x, y, method="asymptotic")
+        assert res1.statistic == res2.statistic
+        assert np.abs(res1.pvalue - res2.pvalue) < 1e-3
+
+    # --- Test Corner Cases ---
+
+    def test_exact_U_equals_mean(self):
+        # Test U == m*n/2 with exact method
+        # Without special treatment, two-sided p-value > 1 because both
+        # one-sided p-values are > 0.5
+        res_l = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="less",
+                             method="exact")
+        res_g = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="greater",
+                             method="exact")
+        assert_equal(res_l.pvalue, res_g.pvalue)
+        assert res_l.pvalue > 0.5
+
+        res = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="two-sided",
+                           method="exact")
+        assert_equal(res, (3, 1))
+        # U == m*n/2 for asymptotic case tested in test_gh_2118
+        # The reason it's tricky for the asymptotic test has to do with
+        # continuity correction.
+
+    cases_scalar = [[{"alternative": 'two-sided', "method": "asymptotic"},
+                     (0, 1)],
+                    [{"alternative": 'less', "method": "asymptotic"},
+                     (0, 0.5)],
+                    [{"alternative": 'greater', "method": "asymptotic"},
+                     (0, 0.977249868052)],
+                    [{"alternative": 'two-sided', "method": "exact"}, (0, 1)],
+                    [{"alternative": 'less', "method": "exact"}, (0, 0.5)],
+                    [{"alternative": 'greater', "method": "exact"}, (0, 1)]]
+
+    @pytest.mark.parametrize(("kwds", "result"), cases_scalar)
+    def test_scalar_data(self, kwds, result):
+        # just making sure scalars work
+        assert_allclose(mannwhitneyu(1, 2, **kwds), result)
+
+    def test_equal_scalar_data(self):
+        # when two scalars are equal, there is an -0.5/0 in the asymptotic
+        # approximation. R gives pvalue=1.0 for alternatives 'less' and
+        # 'greater' but NA for 'two-sided'. I don't see why, so I don't
+        # see a need for a special case to match that behavior.
+        assert_equal(mannwhitneyu(1, 1, method="exact"), (0.5, 1))
+        assert_equal(mannwhitneyu(1, 1, method="asymptotic"), (0.5, 1))
+
+        # without continuity correction, this becomes 0/0, which really
+        # is undefined
+        assert_equal(mannwhitneyu(1, 1, method="asymptotic",
+                                  use_continuity=False), (0.5, np.nan))
+
+    # --- Test Enhancements / Bug Reports ---
+
+    @pytest.mark.parametrize("method", ["asymptotic", "exact"])
+    def test_gh_12837_11113(self, method):
+        # Test that behavior for broadcastable nd arrays is appropriate:
+        # output shape is correct and all values are equal to when the test
+        # is performed on one pair of samples at a time.
+        # Tests that gh-12837 and gh-11113 (requests for n-d input)
+        # are resolved
+        np.random.seed(0)
+
+        # arrays are broadcastable except for axis = -3
+        axis = -3
+        m, n = 7, 10  # sample sizes
+        x = np.random.rand(m, 3, 8)
+        y = np.random.rand(6, n, 1, 8) + 0.1
+        res = mannwhitneyu(x, y, method=method, axis=axis)
+
+        shape = (6, 3, 8)  # appropriate shape of outputs, given inputs
+        assert res.pvalue.shape == shape
+        assert res.statistic.shape == shape
+
+        # move axis of test to end for simplicity
+        x, y = np.moveaxis(x, axis, -1), np.moveaxis(y, axis, -1)
+
+        x = x[None, ...]  # give x a zeroth dimension
+        assert x.ndim == y.ndim
+
+        x = np.broadcast_to(x, shape + (m,))
+        y = np.broadcast_to(y, shape + (n,))
+        assert x.shape[:-1] == shape
+        assert y.shape[:-1] == shape
+
+        # loop over pairs of samples
+        statistics = np.zeros(shape)
+        pvalues = np.zeros(shape)
+        for indices in product(*[range(i) for i in shape]):
+            xi = x[indices]
+            yi = y[indices]
+            temp = mannwhitneyu(xi, yi, method=method)
+            statistics[indices] = temp.statistic
+            pvalues[indices] = temp.pvalue
+
+        np.testing.assert_equal(res.pvalue, pvalues)
+        np.testing.assert_equal(res.statistic, statistics)
+
+    def test_gh_11355(self):
+        # Test for correct behavior with NaN/Inf in input
+        x = [1, 2, 3, 4]
+        y = [3, 6, 7, 8, 9, 3, 2, 1, 4, 4, 5]
+        res1 = mannwhitneyu(x, y)
+
+        # Inf is not a problem. This is a rank test, and it's the largest value
+        y[4] = np.inf
+        res2 = mannwhitneyu(x, y)
+
+        assert_equal(res1.statistic, res2.statistic)
+        assert_equal(res1.pvalue, res2.pvalue)
+
+        # NaNs should propagate by default.
+        y[4] = np.nan
+        res3 = mannwhitneyu(x, y)
+        assert_equal(res3.statistic, np.nan)
+        assert_equal(res3.pvalue, np.nan)
+
+    cases_11355 = [([1, 2, 3, 4],
+                    [3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
+                    10, 0.1297704873477),
+                   ([1, 2, 3, 4],
+                    [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
+                    8.5, 0.08735617507695),
+                   ([1, 2, np.inf, 4],
+                    [3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
+                    17.5, 0.5988856695752),
+                   ([1, 2, np.inf, 4],
+                    [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
+                    16, 0.4687165824462),
+                   ([1, np.inf, np.inf, 4],
+                    [3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
+                    24.5, 0.7912517950119)]
+
+    @pytest.mark.parametrize(("x", "y", "statistic", "pvalue"), cases_11355)
+    def test_gh_11355b(self, x, y, statistic, pvalue):
+        # Test for correct behavior with NaN/Inf in input
+        res = mannwhitneyu(x, y, method='asymptotic')
+        assert_allclose(res.statistic, statistic, atol=1e-12)
+        assert_allclose(res.pvalue, pvalue, atol=1e-12)
+
+    cases_9184 = [[True, "less", "asymptotic", 0.900775348204],
+                  [True, "greater", "asymptotic", 0.1223118025635],
+                  [True, "two-sided", "asymptotic", 0.244623605127],
+                  [False, "less", "asymptotic", 0.8896643190401],
+                  [False, "greater", "asymptotic", 0.1103356809599],
+                  [False, "two-sided", "asymptotic", 0.2206713619198],
+                  [True, "less", "exact", 0.8967698967699],
+                  [True, "greater", "exact", 0.1272061272061],
+                  [True, "two-sided", "exact", 0.2544122544123]]
+
+    @pytest.mark.parametrize(("use_continuity", "alternative",
+                              "method", "pvalue_exp"), cases_9184)
+    def test_gh_9184(self, use_continuity, alternative, method, pvalue_exp):
+        # gh-9184 might be considered a doc-only bug. Please see the
+        # documentation to confirm that mannwhitneyu correctly notes
+        # that the output statistic is that of the first sample (x). In any
+        # case, check the case provided there against output from R.
+        # R code:
+        # options(digits=16)
+        # x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
+        # y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
+        # wilcox.test(x, y, alternative = "less", exact = FALSE)
+        # wilcox.test(x, y, alternative = "greater", exact = FALSE)
+        # wilcox.test(x, y, alternative = "two.sided", exact = FALSE)
+        # wilcox.test(x, y, alternative = "less", exact = FALSE,
+        #             correct=FALSE)
+        # wilcox.test(x, y, alternative = "greater", exact = FALSE,
+        #             correct=FALSE)
+        # wilcox.test(x, y, alternative = "two.sided", exact = FALSE,
+        #             correct=FALSE)
+        # wilcox.test(x, y, alternative = "less", exact = TRUE)
+        # wilcox.test(x, y, alternative = "greater", exact = TRUE)
+        # wilcox.test(x, y, alternative = "two.sided", exact = TRUE)
+        statistic_exp = 35
+        x = (0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
+        y = (1.15, 0.88, 0.90, 0.74, 1.21)
+        res = mannwhitneyu(x, y, use_continuity=use_continuity,
+                           alternative=alternative, method=method)
+        assert_equal(res.statistic, statistic_exp)
+        assert_allclose(res.pvalue, pvalue_exp)
+
+    def test_gh_6897(self):
+        # Test for correct behavior with empty input
+        with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
+            mannwhitneyu([], [])
+
+    def test_gh_4067(self):
+        # Test for correct behavior with all NaN input - default is propagate
+        a = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
+        b = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
+        res = mannwhitneyu(a, b)
+        assert_equal(res.statistic, np.nan)
+        assert_equal(res.pvalue, np.nan)
+
+    # All cases checked against R wilcox.test, e.g.
+    # options(digits=16)
+    # x = c(1, 2, 3)
+    # y = c(1.5, 2.5)
+    # wilcox.test(x, y, exact=FALSE, alternative='less')
+
+    cases_2118 = [[[1, 2, 3], [1.5, 2.5], "greater", (3, 0.6135850036578)],
+                  [[1, 2, 3], [1.5, 2.5], "less", (3, 0.6135850036578)],
+                  [[1, 2, 3], [1.5, 2.5], "two-sided", (3, 1.0)],
+                  [[1, 2, 3], [2], "greater", (1.5, 0.681324055883)],
+                  [[1, 2, 3], [2], "less", (1.5, 0.681324055883)],
+                  [[1, 2, 3], [2], "two-sided", (1.5, 1)],
+                  [[1, 2], [1, 2], "greater", (2, 0.667497228949)],
+                  [[1, 2], [1, 2], "less", (2, 0.667497228949)],
+                  [[1, 2], [1, 2], "two-sided", (2, 1)]]
+
+    @pytest.mark.parametrize(["x", "y", "alternative", "expected"], cases_2118)
+    def test_gh_2118(self, x, y, alternative, expected):
+        # test cases in which U == m*n/2 when method is asymptotic
+        # applying continuity correction could result in p-value > 1
+        res = mannwhitneyu(x, y, use_continuity=True, alternative=alternative,
+                           method="asymptotic")
+        assert_allclose(res, expected, rtol=1e-12)
+
+    def teardown_method(self):
+        _mwu_state._recursive = None
+
+
+class TestMannWhitneyU_iterative(TestMannWhitneyU):
+    def setup_method(self):
+        _mwu_state._recursive = False
+
+    def teardown_method(self):
+        _mwu_state._recursive = None
+
+
+@pytest.mark.xslow
+def test_mann_whitney_u_switch():
+    # Check that mannwhiteneyu switches between recursive and iterative
+    # implementations at n = 500
+
+    # ensure that recursion is not enforced
+    _mwu_state._recursive = None
+    _mwu_state._fmnks = -np.ones((1, 1, 1))
+
+    rng = np.random.default_rng(9546146887652)
+    x = rng.random(5)
+
+    # use iterative algorithm because n > 500
+    y = rng.random(501)
+    stats.mannwhitneyu(x, y, method='exact')
+    # iterative algorithm doesn't modify _mwu_state._fmnks
+    assert np.all(_mwu_state._fmnks == -1)
+
+    # use recursive algorithm because n <= 500
+    y = rng.random(500)
+    stats.mannwhitneyu(x, y, method='exact')
+
+    # recursive algorithm has modified _mwu_state._fmnks
+    assert not np.all(_mwu_state._fmnks == -1)
+
+
+class TestSomersD(_TestPythranFunc):
+    def setup_method(self):
+        self.dtypes = self.ALL_INTEGER + self.ALL_FLOAT
+        self.arguments = {0: (np.arange(10),
+                              self.ALL_INTEGER + self.ALL_FLOAT),
+                          1: (np.arange(10),
+                              self.ALL_INTEGER + self.ALL_FLOAT)}
+        input_array = [self.arguments[idx][0] for idx in self.arguments]
+        # In this case, self.partialfunc can simply be stats.somersd,
+        # since `alternative` is an optional argument. If it is required,
+        # we can use functools.partial to freeze the value, because
+        # we only mainly test various array inputs, not str, etc.
+        self.partialfunc = functools.partial(stats.somersd,
+                                             alternative='two-sided')
+        self.expected = self.partialfunc(*input_array)
+
+    def pythranfunc(self, *args):
+        res = self.partialfunc(*args)
+        assert_allclose(res.statistic, self.expected.statistic, atol=1e-15)
+        assert_allclose(res.pvalue, self.expected.pvalue, atol=1e-15)
+
+    def test_pythranfunc_keywords(self):
+        # Not specifying the optional keyword args
+        table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]]
+        res1 = stats.somersd(table)
+        # Specifying the optional keyword args with default value
+        optional_args = self.get_optional_args(stats.somersd)
+        res2 = stats.somersd(table, **optional_args)
+        # Check if the results are the same in two cases
+        assert_allclose(res1.statistic, res2.statistic, atol=1e-15)
+        assert_allclose(res1.pvalue, res2.pvalue, atol=1e-15)
+
+    def test_like_kendalltau(self):
+        # All tests correspond with one in test_stats.py `test_kendalltau`
+
+        # case without ties, con-dis equal zero
+        x = [5, 2, 1, 3, 6, 4, 7, 8]
+        y = [5, 2, 6, 3, 1, 8, 7, 4]
+        # Cross-check with result from SAS FREQ:
+        expected = (0.000000000000000, 1.000000000000000)
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # case without ties, con-dis equal zero
+        x = [0, 5, 2, 1, 3, 6, 4, 7, 8]
+        y = [5, 2, 0, 6, 3, 1, 8, 7, 4]
+        # Cross-check with result from SAS FREQ:
+        expected = (0.000000000000000, 1.000000000000000)
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # case without ties, con-dis close to zero
+        x = [5, 2, 1, 3, 6, 4, 7]
+        y = [5, 2, 6, 3, 1, 7, 4]
+        # Cross-check with result from SAS FREQ:
+        expected = (-0.142857142857140, 0.630326953157670)
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # simple case without ties
+        x = np.arange(10)
+        y = np.arange(10)
+        # Cross-check with result from SAS FREQ:
+        # SAS p value is not provided.
+        expected = (1.000000000000000, 0)
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # swap a couple values and a couple more
+        x = np.arange(10)
+        y = np.array([0, 2, 1, 3, 4, 6, 5, 7, 8, 9])
+        # Cross-check with result from SAS FREQ:
+        expected = (0.911111111111110, 0.000000000000000)
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # same in opposite direction
+        x = np.arange(10)
+        y = np.arange(10)[::-1]
+        # Cross-check with result from SAS FREQ:
+        # SAS p value is not provided.
+        expected = (-1.000000000000000, 0)
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # swap a couple values and a couple more
+        x = np.arange(10)
+        y = np.array([9, 7, 8, 6, 5, 3, 4, 2, 1, 0])
+        # Cross-check with result from SAS FREQ:
+        expected = (-0.9111111111111111, 0.000000000000000)
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # with some ties
+        x1 = [12, 2, 1, 12, 2]
+        x2 = [1, 4, 7, 1, 0]
+        # Cross-check with result from SAS FREQ:
+        expected = (-0.500000000000000, 0.304901788178780)
+        res = stats.somersd(x1, x2)
+        assert_allclose(res.statistic, expected[0], atol=1e-15)
+        assert_allclose(res.pvalue, expected[1], atol=1e-15)
+
+        # with only ties in one or both inputs
+        # SAS will not produce an output for these:
+        # NOTE: No statistics are computed for x * y because x has fewer
+        # than 2 nonmissing levels.
+        # WARNING: No OUTPUT data set is produced for this table because a
+        # row or column variable has fewer than 2 nonmissing levels and no
+        # statistics are computed.
+
+        res = stats.somersd([2, 2, 2], [2, 2, 2])
+        assert_allclose(res.statistic, np.nan)
+        assert_allclose(res.pvalue, np.nan)
+
+        res = stats.somersd([2, 0, 2], [2, 2, 2])
+        assert_allclose(res.statistic, np.nan)
+        assert_allclose(res.pvalue, np.nan)
+
+        res = stats.somersd([2, 2, 2], [2, 0, 2])
+        assert_allclose(res.statistic, np.nan)
+        assert_allclose(res.pvalue, np.nan)
+
+        res = stats.somersd([0], [0])
+        assert_allclose(res.statistic, np.nan)
+        assert_allclose(res.pvalue, np.nan)
+
+        # empty arrays provided as input
+        res = stats.somersd([], [])
+        assert_allclose(res.statistic, np.nan)
+        assert_allclose(res.pvalue, np.nan)
+
+        # test unequal length inputs
+        x = np.arange(10.)
+        y = np.arange(20.)
+        assert_raises(ValueError, stats.somersd, x, y)
+
+    def test_asymmetry(self):
+        # test that somersd is asymmetric w.r.t. input order and that
+        # convention is as described: first input is row variable & independent
+        # data is from Wikipedia:
+        # https://en.wikipedia.org/wiki/Somers%27_D
+        # but currently that example contradicts itself - it says X is
+        # independent yet take D_XY
+
+        x = [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 1, 2,
+             2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
+        y = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+             2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+        # Cross-check with result from SAS FREQ:
+        d_cr = 0.272727272727270
+        d_rc = 0.342857142857140
+        p = 0.092891940883700  # same p-value for either direction
+        res = stats.somersd(x, y)
+        assert_allclose(res.statistic, d_cr, atol=1e-15)
+        assert_allclose(res.pvalue, p, atol=1e-4)
+        assert_equal(res.table.shape, (3, 2))
+        res = stats.somersd(y, x)
+        assert_allclose(res.statistic, d_rc, atol=1e-15)
+        assert_allclose(res.pvalue, p, atol=1e-15)
+        assert_equal(res.table.shape, (2, 3))
+
+    def test_somers_original(self):
+        # test against Somers' original paper [1]
+
+        # Table 5A
+        # Somers' convention was column IV
+        table = np.array([[8, 2], [6, 5], [3, 4], [1, 3], [2, 3]])
+        # Our convention (and that of SAS FREQ) is row IV
+        table = table.T
+        dyx = 129/340
+        assert_allclose(stats.somersd(table).statistic, dyx)
+
+        # table 7A - d_yx = 1
+        table = np.array([[25, 0], [85, 0], [0, 30]])
+        dxy, dyx = 3300/5425, 3300/3300
+        assert_allclose(stats.somersd(table).statistic, dxy)
+        assert_allclose(stats.somersd(table.T).statistic, dyx)
+
+        # table 7B - d_yx < 0
+        table = np.array([[25, 0], [0, 30], [85, 0]])
+        dyx = -1800/3300
+        assert_allclose(stats.somersd(table.T).statistic, dyx)
+
+    def test_contingency_table_with_zero_rows_cols(self):
+        # test that zero rows/cols in contingency table don't affect result
+
+        N = 100
+        shape = 4, 6
+        size = np.prod(shape)
+
+        np.random.seed(0)
+        s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
+        res = stats.somersd(s)
+
+        s2 = np.insert(s, 2, np.zeros(shape[1]), axis=0)
+        res2 = stats.somersd(s2)
+
+        s3 = np.insert(s, 2, np.zeros(shape[0]), axis=1)
+        res3 = stats.somersd(s3)
+
+        s4 = np.insert(s2, 2, np.zeros(shape[0]+1), axis=1)
+        res4 = stats.somersd(s4)
+
+        # Cross-check with result from SAS FREQ:
+        assert_allclose(res.statistic, -0.116981132075470, atol=1e-15)
+        assert_allclose(res.statistic, res2.statistic)
+        assert_allclose(res.statistic, res3.statistic)
+        assert_allclose(res.statistic, res4.statistic)
+
+        assert_allclose(res.pvalue, 0.156376448188150, atol=1e-15)
+        assert_allclose(res.pvalue, res2.pvalue)
+        assert_allclose(res.pvalue, res3.pvalue)
+        assert_allclose(res.pvalue, res4.pvalue)
+
+    def test_invalid_contingency_tables(self):
+        N = 100
+        shape = 4, 6
+        size = np.prod(shape)
+
+        np.random.seed(0)
+        # start with a valid contingency table
+        s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
+
+        s5 = s - 2
+        message = "All elements of the contingency table must be non-negative"
+        with assert_raises(ValueError, match=message):
+            stats.somersd(s5)
+
+        s6 = s + 0.01
+        message = "All elements of the contingency table must be integer"
+        with assert_raises(ValueError, match=message):
+            stats.somersd(s6)
+
+        message = ("At least two elements of the contingency "
+                   "table must be nonzero.")
+        with assert_raises(ValueError, match=message):
+            stats.somersd([[]])
+
+        with assert_raises(ValueError, match=message):
+            stats.somersd([[1]])
+
+        s7 = np.zeros((3, 3))
+        with assert_raises(ValueError, match=message):
+            stats.somersd(s7)
+
+        s7[0, 1] = 1
+        with assert_raises(ValueError, match=message):
+            stats.somersd(s7)
+
+    def test_only_ranks_matter(self):
+        # only ranks of input data should matter
+        x = [1, 2, 3]
+        x2 = [-1, 2.1, np.inf]
+        y = [3, 2, 1]
+        y2 = [0, -0.5, -np.inf]
+        res = stats.somersd(x, y)
+        res2 = stats.somersd(x2, y2)
+        assert_equal(res.statistic, res2.statistic)
+        assert_equal(res.pvalue, res2.pvalue)
+
+    def test_contingency_table_return(self):
+        # check that contingency table is returned
+        x = np.arange(10)
+        y = np.arange(10)
+        res = stats.somersd(x, y)
+        assert_equal(res.table, np.eye(10))
+
+    def test_somersd_alternative(self):
+        # Test alternative parameter, asymptotic method (due to tie)
+
+        # Based on scipy.stats.test_stats.TestCorrSpearman2::test_alternative
+        x1 = [1, 2, 3, 4, 5]
+        x2 = [5, 6, 7, 8, 7]
+
+        # strong positive correlation
+        expected = stats.somersd(x1, x2, alternative="two-sided")
+        assert expected.statistic > 0
+
+        # rank correlation > 0 -> large "less" p-value
+        res = stats.somersd(x1, x2, alternative="less")
+        assert_equal(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
+
+        # rank correlation > 0 -> small "greater" p-value
+        res = stats.somersd(x1, x2, alternative="greater")
+        assert_equal(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, expected.pvalue / 2)
+
+        # reverse the direction of rank correlation
+        x2.reverse()
+
+        # strong negative correlation
+        expected = stats.somersd(x1, x2, alternative="two-sided")
+        assert expected.statistic < 0
+
+        # rank correlation < 0 -> large "greater" p-value
+        res = stats.somersd(x1, x2, alternative="greater")
+        assert_equal(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
+
+        # rank correlation < 0 -> small "less" p-value
+        res = stats.somersd(x1, x2, alternative="less")
+        assert_equal(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, expected.pvalue / 2)
+
+        with pytest.raises(ValueError, match="alternative must be 'less'..."):
+            stats.somersd(x1, x2, alternative="ekki-ekki")
+
+    @pytest.mark.parametrize("positive_correlation", (False, True))
+    def test_somersd_perfect_correlation(self, positive_correlation):
+        # Before the addition of `alternative`, perfect correlation was
+        # treated as a special case. Now it is treated like any other case, but
+        # make sure there are no divide by zero warnings or associated errors
+
+        x1 = np.arange(10)
+        x2 = x1 if positive_correlation else np.flip(x1)
+        expected_statistic = 1 if positive_correlation else -1
+
+        # perfect correlation -> small "two-sided" p-value (0)
+        res = stats.somersd(x1, x2, alternative="two-sided")
+        assert res.statistic == expected_statistic
+        assert res.pvalue == 0
+
+        # rank correlation > 0 -> large "less" p-value (1)
+        res = stats.somersd(x1, x2, alternative="less")
+        assert res.statistic == expected_statistic
+        assert res.pvalue == (1 if positive_correlation else 0)
+
+        # rank correlation > 0 -> small "greater" p-value (0)
+        res = stats.somersd(x1, x2, alternative="greater")
+        assert res.statistic == expected_statistic
+        assert res.pvalue == (0 if positive_correlation else 1)
+
+
+class TestBarnardExact:
+    """Some tests to show that barnard_exact() works correctly."""
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[43, 40], [10, 39]], (3.555406779643, 0.000362832367)),
+            ([[100, 2], [1000, 5]], (-1.776382925679, 0.135126970878)),
+            ([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
+            ([[5, 1], [10, 10]], (1.449486150679, 0.156277546306)),
+            ([[5, 15], [20, 20]], (-1.851640199545, 0.066363501421)),
+            ([[5, 16], [20, 25]], (-1.609639949352, 0.116984852192)),
+            ([[10, 5], [10, 1]], (-1.449486150679, 0.177536588915)),
+            ([[5, 0], [1, 4]], (2.581988897472, 0.013671875000)),
+            ([[0, 1], [3, 2]], (-1.095445115010, 0.509667991877)),
+            ([[0, 2], [6, 4]], (-1.549193338483, 0.197019618792)),
+            ([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
+        ],
+    )
+    def test_precise(self, input_sample, expected):
+        """The expected values have been generated by R, using a resolution
+        for the nuisance parameter of 1e-6 :
+        ```R
+        library(Barnard)
+        options(digits=10)
+        barnard.test(43, 40, 10, 39, dp=1e-6, pooled=TRUE)
+        ```
+        """
+        res = barnard_exact(input_sample)
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_allclose([statistic, pvalue], expected)
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[43, 40], [10, 39]], (3.920362887717, 0.000289470662)),
+            ([[100, 2], [1000, 5]], (-1.139432816087, 0.950272080594)),
+            ([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
+            ([[5, 1], [10, 10]], (1.622375939458, 0.150599922226)),
+            ([[5, 15], [20, 20]], (-1.974771239528, 0.063038448651)),
+            ([[5, 16], [20, 25]], (-1.722122973346, 0.133329494287)),
+            ([[10, 5], [10, 1]], (-1.765469659009, 0.250566655215)),
+            ([[5, 0], [1, 4]], (5.477225575052, 0.007812500000)),
+            ([[0, 1], [3, 2]], (-1.224744871392, 0.509667991877)),
+            ([[0, 2], [6, 4]], (-1.732050807569, 0.197019618792)),
+            ([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
+        ],
+    )
+    def test_pooled_param(self, input_sample, expected):
+        """The expected values have been generated by R, using a resolution
+        for the nuisance parameter of 1e-6 :
+        ```R
+        library(Barnard)
+        options(digits=10)
+        barnard.test(43, 40, 10, 39, dp=1e-6, pooled=FALSE)
+        ```
+        """
+        res = barnard_exact(input_sample, pooled=False)
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_allclose([statistic, pvalue], expected)
+
+    def test_raises(self):
+        # test we raise an error for wrong input number of nuisances.
+        error_msg = (
+            "Number of points `n` must be strictly positive, found 0"
+        )
+        with assert_raises(ValueError, match=error_msg):
+            barnard_exact([[1, 2], [3, 4]], n=0)
+
+        # test we raise an error for wrong shape of input.
+        error_msg = "The input `table` must be of shape \\(2, 2\\)."
+        with assert_raises(ValueError, match=error_msg):
+            barnard_exact(np.arange(6).reshape(2, 3))
+
+        # Test all values must be positives
+        error_msg = "All values in `table` must be nonnegative."
+        with assert_raises(ValueError, match=error_msg):
+            barnard_exact([[-1, 2], [3, 4]])
+
+        # Test value error on wrong alternative param
+        error_msg = (
+            "`alternative` should be one of {'two-sided', 'less', 'greater'},"
+            " found .*"
+        )
+        with assert_raises(ValueError, match=error_msg):
+            barnard_exact([[1, 2], [3, 4]], "not-correct")
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[0, 0], [4, 3]], (1.0, 0)),
+        ],
+    )
+    def test_edge_cases(self, input_sample, expected):
+        res = barnard_exact(input_sample)
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_equal(pvalue, expected[0])
+        assert_equal(statistic, expected[1])
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[0, 5], [0, 10]], (1.0, np.nan)),
+            ([[5, 0], [10, 0]], (1.0, np.nan)),
+        ],
+    )
+    def test_row_or_col_zero(self, input_sample, expected):
+        res = barnard_exact(input_sample)
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_equal(pvalue, expected[0])
+        assert_equal(statistic, expected[1])
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[2, 7], [8, 2]], (-2.518474945157, 0.009886140845)),
+            ([[7, 200], [300, 8]], (-21.320036698460, 0.0)),
+            ([[21, 28], [1957, 6]], (-30.489638143953, 0.0)),
+        ],
+    )
+    @pytest.mark.parametrize("alternative", ["greater", "less"])
+    def test_less_greater(self, input_sample, expected, alternative):
+        """
+        "The expected values have been generated by R, using a resolution
+        for the nuisance parameter of 1e-6 :
+        ```R
+        library(Barnard)
+        options(digits=10)
+        a = barnard.test(2, 7, 8, 2, dp=1e-6, pooled=TRUE)
+        a$p.value[1]
+        ```
+        In this test, we are using the "one-sided" return value `a$p.value[1]`
+        to test our pvalue.
+        """
+        expected_stat, less_pvalue_expect = expected
+
+        if alternative == "greater":
+            input_sample = np.array(input_sample)[:, ::-1]
+            expected_stat = -expected_stat
+
+        res = barnard_exact(input_sample, alternative=alternative)
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_allclose(
+            [statistic, pvalue], [expected_stat, less_pvalue_expect], atol=1e-7
+        )
+
+
+class TestBoschlooExact:
+    """Some tests to show that boschloo_exact() works correctly."""
+
+    ATOL = 1e-7
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
+            ([[5, 1], [10, 10]], (0.9782609, 0.9450994)),
+            ([[5, 16], [20, 25]], (0.08913823, 0.05827348)),
+            ([[10, 5], [10, 1]], (0.1652174, 0.08565611)),
+            ([[5, 0], [1, 4]], (1, 1)),
+            ([[0, 1], [3, 2]], (0.5, 0.34375)),
+            ([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
+            ([[7, 12], [8, 3]], (0.06406797, 0.03410916)),
+            ([[10, 24], [25, 37]], (0.2009359, 0.1512882)),
+        ],
+    )
+    def test_less(self, input_sample, expected):
+        """The expected values have been generated by R, using a resolution
+        for the nuisance parameter of 1e-8 :
+        ```R
+        library(Exact)
+        options(digits=10)
+        data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
+        a = exact.test(data, method="Boschloo", alternative="less",
+                       tsmethod="central", np.interval=TRUE, beta=1e-8)
+        ```
+        """
+        res = boschloo_exact(input_sample, alternative="less")
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[43, 40], [10, 39]], (0.0002875544, 0.0001615562)),
+            ([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
+            ([[5, 1], [10, 10]], (0.1652174, 0.09008534)),
+            ([[5, 15], [20, 20]], (0.9849087, 0.9706997)),
+            ([[5, 16], [20, 25]], (0.972349, 0.9524124)),
+            ([[5, 0], [1, 4]], (0.02380952, 0.006865367)),
+            ([[0, 1], [3, 2]], (1, 1)),
+            ([[0, 2], [6, 4]], (1, 1)),
+            ([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
+            ([[7, 12], [8, 3]], (0.9895302, 0.9771215)),
+            ([[10, 24], [25, 37]], (0.9012936, 0.8633275)),
+        ],
+    )
+    def test_greater(self, input_sample, expected):
+        """The expected values have been generated by R, using a resolution
+        for the nuisance parameter of 1e-8 :
+        ```R
+        library(Exact)
+        options(digits=10)
+        data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
+        a = exact.test(data, method="Boschloo", alternative="greater",
+                       tsmethod="central", np.interval=TRUE, beta=1e-8)
+        ```
+        """
+        res = boschloo_exact(input_sample, alternative="greater")
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[43, 40], [10, 39]], (0.0002875544, 0.0003231115)),
+            ([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
+            ([[5, 1], [10, 10]], (0.1652174, 0.1801707)),
+            ([[5, 16], [20, 25]], (0.08913823, 0.116547)),
+            ([[5, 0], [1, 4]], (0.02380952, 0.01373073)),
+            ([[0, 1], [3, 2]], (0.5, 0.6875)),
+            ([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
+            ([[7, 12], [8, 3]], (0.06406797, 0.06821831)),
+        ],
+    )
+    def test_two_sided(self, input_sample, expected):
+        """The expected values have been generated by R, using a resolution
+        for the nuisance parameter of 1e-8 :
+        ```R
+        library(Exact)
+        options(digits=10)
+        data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
+        a = exact.test(data, method="Boschloo", alternative="two.sided",
+                       tsmethod="central", np.interval=TRUE, beta=1e-8)
+        ```
+        """
+        res = boschloo_exact(input_sample, alternative="two-sided", n=64)
+        # Need n = 64 for python 32-bit
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
+
+    def test_raises(self):
+        # test we raise an error for wrong input number of nuisances.
+        error_msg = (
+            "Number of points `n` must be strictly positive, found 0"
+        )
+        with assert_raises(ValueError, match=error_msg):
+            boschloo_exact([[1, 2], [3, 4]], n=0)
+
+        # test we raise an error for wrong shape of input.
+        error_msg = "The input `table` must be of shape \\(2, 2\\)."
+        with assert_raises(ValueError, match=error_msg):
+            boschloo_exact(np.arange(6).reshape(2, 3))
+
+        # Test all values must be positives
+        error_msg = "All values in `table` must be nonnegative."
+        with assert_raises(ValueError, match=error_msg):
+            boschloo_exact([[-1, 2], [3, 4]])
+
+        # Test value error on wrong alternative param
+        error_msg = (
+            r"`alternative` should be one of \('two-sided', 'less', "
+            r"'greater'\), found .*"
+        )
+        with assert_raises(ValueError, match=error_msg):
+            boschloo_exact([[1, 2], [3, 4]], "not-correct")
+
+    @pytest.mark.parametrize(
+        "input_sample,expected",
+        [
+            ([[0, 5], [0, 10]], (np.nan, np.nan)),
+            ([[5, 0], [10, 0]], (np.nan, np.nan)),
+        ],
+    )
+    def test_row_or_col_zero(self, input_sample, expected):
+        res = boschloo_exact(input_sample)
+        statistic, pvalue = res.statistic, res.pvalue
+        assert_equal(pvalue, expected[0])
+        assert_equal(statistic, expected[1])
+
+    def test_two_sided_gt_1(self):
+        # Check that returned p-value does not exceed 1 even when twice
+        # the minimum of the one-sided p-values does. See gh-15345.
+        tbl = [[1, 1], [13, 12]]
+        pl = boschloo_exact(tbl, alternative='less').pvalue
+        pg = boschloo_exact(tbl, alternative='greater').pvalue
+        assert 2*min(pl, pg) > 1
+        pt = boschloo_exact(tbl, alternative='two-sided').pvalue
+        assert pt == 1.0
+
+    @pytest.mark.parametrize("alternative", ("less", "greater"))
+    def test_against_fisher_exact(self, alternative):
+        # Check that the statistic of `boschloo_exact` is the same as the
+        # p-value of `fisher_exact` (for one-sided tests). See gh-15345.
+        tbl = [[2, 7], [8, 2]]
+        boschloo_stat = boschloo_exact(tbl, alternative=alternative).statistic
+        fisher_p = stats.fisher_exact(tbl, alternative=alternative)[1]
+        assert_allclose(boschloo_stat, fisher_p)
+
+
+class TestCvm_2samp:
+    def test_invalid_input(self):
+        x = np.arange(10).reshape((2, 5))
+        y = np.arange(5)
+        msg = 'The samples must be one-dimensional'
+        with pytest.raises(ValueError, match=msg):
+            cramervonmises_2samp(x, y)
+        with pytest.raises(ValueError, match=msg):
+            cramervonmises_2samp(y, x)
+        msg = 'x and y must contain at least two observations.'
+        with pytest.raises(ValueError, match=msg):
+            cramervonmises_2samp([], y)
+        with pytest.raises(ValueError, match=msg):
+            cramervonmises_2samp(y, [1])
+        msg = 'method must be either auto, exact or asymptotic'
+        with pytest.raises(ValueError, match=msg):
+            cramervonmises_2samp(y, y, 'xyz')
+
+    def test_list_input(self):
+        x = [2, 3, 4, 7, 6]
+        y = [0.2, 0.7, 12, 18]
+        r1 = cramervonmises_2samp(x, y)
+        r2 = cramervonmises_2samp(np.array(x), np.array(y))
+        assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
+
+    def test_example_conover(self):
+        # Example 2 in Section 6.2 of W.J. Conover: Practical Nonparametric
+        # Statistics, 1971.
+        x = [7.6, 8.4, 8.6, 8.7, 9.3, 9.9, 10.1, 10.6, 11.2]
+        y = [5.2, 5.7, 5.9, 6.5, 6.8, 8.2, 9.1, 9.8, 10.8, 11.3, 11.5, 12.3,
+             12.5, 13.4, 14.6]
+        r = cramervonmises_2samp(x, y)
+        assert_allclose(r.statistic, 0.262, atol=1e-3)
+        assert_allclose(r.pvalue, 0.18, atol=1e-2)
+
+    @pytest.mark.parametrize('statistic, m, n, pval',
+                             [(710, 5, 6, 48./462),
+                              (1897, 7, 7, 117./1716),
+                              (576, 4, 6, 2./210),
+                              (1764, 6, 7, 2./1716)])
+    def test_exact_pvalue(self, statistic, m, n, pval):
+        # the exact values are taken from Anderson: On the distribution of the
+        # two-sample Cramer-von-Mises criterion, 1962.
+        # The values are taken from Table 2, 3, 4 and 5
+        assert_equal(_pval_cvm_2samp_exact(statistic, m, n), pval)
+
+    def test_large_sample(self):
+        # for large samples, the statistic U gets very large
+        # do a sanity check that p-value is not 0, 1 or nan
+        np.random.seed(4367)
+        x = distributions.norm.rvs(size=1000000)
+        y = distributions.norm.rvs(size=900000)
+        r = cramervonmises_2samp(x, y)
+        assert_(0 < r.pvalue < 1)
+        r = cramervonmises_2samp(x, y+0.1)
+        assert_(0 < r.pvalue < 1)
+
+    def test_exact_vs_asymptotic(self):
+        np.random.seed(0)
+        x = np.random.rand(7)
+        y = np.random.rand(8)
+        r1 = cramervonmises_2samp(x, y, method='exact')
+        r2 = cramervonmises_2samp(x, y, method='asymptotic')
+        assert_equal(r1.statistic, r2.statistic)
+        assert_allclose(r1.pvalue, r2.pvalue, atol=1e-2)
+
+    def test_method_auto(self):
+        x = np.arange(20)
+        y = [0.5, 4.7, 13.1]
+        r1 = cramervonmises_2samp(x, y, method='exact')
+        r2 = cramervonmises_2samp(x, y, method='auto')
+        assert_equal(r1.pvalue, r2.pvalue)
+        # switch to asymptotic if one sample has more than 20 observations
+        x = np.arange(21)
+        r1 = cramervonmises_2samp(x, y, method='asymptotic')
+        r2 = cramervonmises_2samp(x, y, method='auto')
+        assert_equal(r1.pvalue, r2.pvalue)
+
+    def test_same_input(self):
+        # make sure trivial edge case can be handled
+        # note that _cdf_cvm_inf(0) = nan. implementation avoids nan by
+        # returning pvalue=1 for very small values of the statistic
+        x = np.arange(15)
+        res = cramervonmises_2samp(x, x)
+        assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
+        # check exact p-value
+        res = cramervonmises_2samp(x[:4], x[:4])
+        assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
+
+
+class TestTukeyHSD:
+
+    data_same_size = ([24.5, 23.5, 26.4, 27.1, 29.9],
+                      [28.4, 34.2, 29.5, 32.2, 30.1],
+                      [26.1, 28.3, 24.3, 26.2, 27.8])
+    data_diff_size = ([24.5, 23.5, 26.28, 26.4, 27.1, 29.9, 30.1, 30.1],
+                      [28.4, 34.2, 29.5, 32.2, 30.1],
+                      [26.1, 28.3, 24.3, 26.2, 27.8])
+    extreme_size = ([24.5, 23.5, 26.4],
+                    [28.4, 34.2, 29.5, 32.2, 30.1, 28.4, 34.2, 29.5, 32.2,
+                     30.1],
+                    [26.1, 28.3, 24.3, 26.2, 27.8])
+
+    sas_same_size = """
+    Comparison LowerCL Difference UpperCL Significance
+    2 - 3	0.6908830568	4.34	7.989116943	    1
+    2 - 1	0.9508830568	4.6 	8.249116943 	1
+    3 - 2	-7.989116943	-4.34	-0.6908830568	1
+    3 - 1	-3.389116943	0.26	3.909116943	    0
+    1 - 2	-8.249116943	-4.6	-0.9508830568	1
+    1 - 3	-3.909116943	-0.26	3.389116943	    0
+    """
+
+    sas_diff_size = """
+    Comparison LowerCL Difference UpperCL Significance
+    2 - 1	0.2679292645	3.645	7.022070736	    1
+    2 - 3	0.5934764007	4.34	8.086523599	    1
+    1 - 2	-7.022070736	-3.645	-0.2679292645	1
+    1 - 3	-2.682070736	0.695	4.072070736	    0
+    3 - 2	-8.086523599	-4.34	-0.5934764007	1
+    3 - 1	-4.072070736	-0.695	2.682070736	    0
+    """
+
+    sas_extreme = """
+    Comparison LowerCL Difference UpperCL Significance
+    2 - 3	1.561605075	    4.34	7.118394925	    1
+    2 - 1	2.740784879	    6.08	9.419215121	    1
+    3 - 2	-7.118394925	-4.34	-1.561605075	1
+    3 - 1	-1.964526566	1.74	5.444526566	    0
+    1 - 2	-9.419215121	-6.08	-2.740784879	1
+    1 - 3	-5.444526566	-1.74	1.964526566	    0
+    """
+
+    @pytest.mark.parametrize("data,res_expect_str,atol",
+                             ((data_same_size, sas_same_size, 1e-4),
+                              (data_diff_size, sas_diff_size, 1e-4),
+                              (extreme_size, sas_extreme, 1e-10),
+                              ),
+                             ids=["equal size sample",
+                                  "unequal sample size",
+                                  "extreme sample size differences"])
+    def test_compare_sas(self, data, res_expect_str, atol):
+        '''
+        SAS code used to generate results for each sample:
+        DATA ACHE;
+        INPUT BRAND RELIEF;
+        CARDS;
+        1 24.5
+        ...
+        3 27.8
+        ;
+        ods graphics on;   ODS RTF;ODS LISTING CLOSE;
+           PROC ANOVA DATA=ACHE;
+           CLASS BRAND;
+           MODEL RELIEF=BRAND;
+           MEANS BRAND/TUKEY CLDIFF;
+           TITLE 'COMPARE RELIEF ACROSS MEDICINES  - ANOVA EXAMPLE';
+           ods output  CLDiffs =tc;
+        proc print data=tc;
+            format LowerCL 17.16 UpperCL 17.16 Difference 17.16;
+            title "Output with many digits";
+        RUN;
+        QUIT;
+        ODS RTF close;
+        ODS LISTING;
+        '''
+        res_expect = np.asarray(res_expect_str.replace(" - ", " ").split()[5:],
+                                dtype=float).reshape((6, 6))
+        res_tukey = stats.tukey_hsd(*data)
+        conf = res_tukey.confidence_interval()
+        # loop over the comparisons
+        for i, j, l, s, h, sig in res_expect:
+            i, j = int(i) - 1, int(j) - 1
+            assert_allclose(conf.low[i, j], l, atol=atol)
+            assert_allclose(res_tukey.statistic[i, j], s, atol=atol)
+            assert_allclose(conf.high[i, j], h, atol=atol)
+            assert_allclose((res_tukey.pvalue[i, j] <= .05), sig == 1)
+
+    matlab_sm_siz = """
+        1	2	-8.2491590248597	-4.6	-0.9508409751403	0.0144483269098
+        1	3	-3.9091590248597	-0.26	3.3891590248597	0.9803107240900
+        2	3	0.6908409751403	4.34	7.9891590248597	0.0203311368795
+        """
+
+    matlab_diff_sz = """
+        1	2	-7.02207069748501	-3.645	-0.26792930251500 0.03371498443080
+        1	3	-2.68207069748500	0.695	4.07207069748500 0.85572267328807
+        2	3	0.59347644287720	4.34	8.08652355712281 0.02259047020620
+        """
+
+    @pytest.mark.parametrize("data,res_expect_str,atol",
+                             ((data_same_size, matlab_sm_siz, 1e-12),
+                              (data_diff_size, matlab_diff_sz, 1e-7)),
+                             ids=["equal size sample",
+                                  "unequal size sample"])
+    def test_compare_matlab(self, data, res_expect_str, atol):
+        """
+        vals = [24.5, 23.5,  26.4, 27.1, 29.9, 28.4, 34.2, 29.5, 32.2, 30.1,
+         26.1, 28.3, 24.3, 26.2, 27.8]
+        names = {'zero', 'zero', 'zero', 'zero', 'zero', 'one', 'one', 'one',
+         'one', 'one', 'two', 'two', 'two', 'two', 'two'}
+        [p,t,stats] = anova1(vals,names,"off");
+        [c,m,h,nms] = multcompare(stats, "CType","hsd");
+        """
+        res_expect = np.asarray(res_expect_str.split(),
+                                dtype=float).reshape((3, 6))
+        res_tukey = stats.tukey_hsd(*data)
+        conf = res_tukey.confidence_interval()
+        # loop over the comparisons
+        for i, j, l, s, h, p in res_expect:
+            i, j = int(i) - 1, int(j) - 1
+            assert_allclose(conf.low[i, j], l, atol=atol)
+            assert_allclose(res_tukey.statistic[i, j], s, atol=atol)
+            assert_allclose(conf.high[i, j], h, atol=atol)
+            assert_allclose(res_tukey.pvalue[i, j], p, atol=atol)
+
+    def test_compare_r(self):
+        """
+        Testing against results and p-values from R:
+        from: https://www.rdocumentation.org/packages/stats/versions/3.6.2/
+        topics/TukeyHSD
+        > require(graphics)
+        > summary(fm1 <- aov(breaks ~ tension, data = warpbreaks))
+        > TukeyHSD(fm1, "tension", ordered = TRUE)
+        > plot(TukeyHSD(fm1, "tension"))
+        Tukey multiple comparisons of means
+        95% family-wise confidence level
+        factor levels have been ordered
+        Fit: aov(formula = breaks ~ tension, data = warpbreaks)
+        $tension
+        """
+        str_res = """
+                diff        lwr      upr     p adj
+        2 - 3  4.722222 -4.8376022 14.28205 0.4630831
+        1 - 3 14.722222  5.1623978 24.28205 0.0014315
+        1 - 2 10.000000  0.4401756 19.55982 0.0384598
+        """
+        res_expect = np.asarray(str_res.replace(" - ", " ").split()[5:],
+                                dtype=float).reshape((3, 6))
+        data = ([26, 30, 54, 25, 70, 52, 51, 26, 67,
+                 27, 14, 29, 19, 29, 31, 41, 20, 44],
+                [18, 21, 29, 17, 12, 18, 35, 30, 36,
+                 42, 26, 19, 16, 39, 28, 21, 39, 29],
+                [36, 21, 24, 18, 10, 43, 28, 15, 26,
+                 20, 21, 24, 17, 13, 15, 15, 16, 28])
+
+        res_tukey = stats.tukey_hsd(*data)
+        conf = res_tukey.confidence_interval()
+        # loop over the comparisons
+        for i, j, s, l, h, p in res_expect:
+            i, j = int(i) - 1, int(j) - 1
+            # atols are set to the number of digits present in the r result.
+            assert_allclose(conf.low[i, j], l, atol=1e-7)
+            assert_allclose(res_tukey.statistic[i, j], s, atol=1e-6)
+            assert_allclose(conf.high[i, j], h, atol=1e-5)
+            assert_allclose(res_tukey.pvalue[i, j], p, atol=1e-7)
+
+    def test_engineering_stat_handbook(self):
+        '''
+        Example sourced from:
+        https://www.itl.nist.gov/div898/handbook/prc/section4/prc471.htm
+        '''
+        group1 = [6.9, 5.4, 5.8, 4.6, 4.0]
+        group2 = [8.3, 6.8, 7.8, 9.2, 6.5]
+        group3 = [8.0, 10.5, 8.1, 6.9, 9.3]
+        group4 = [5.8, 3.8, 6.1, 5.6, 6.2]
+        res = stats.tukey_hsd(group1, group2, group3, group4)
+        conf = res.confidence_interval()
+        lower = np.asarray([
+            [0, 0, 0, -2.25],
+            [.29, 0, -2.93, .13],
+            [1.13, 0, 0, .97],
+            [0, 0, 0, 0]])
+        upper = np.asarray([
+            [0, 0, 0, 1.93],
+            [4.47, 0, 1.25, 4.31],
+            [5.31, 0, 0, 5.15],
+            [0, 0, 0, 0]])
+
+        for (i, j) in [(1, 0), (2, 0), (0, 3), (1, 2), (2, 3)]:
+            assert_allclose(conf.low[i, j], lower[i, j], atol=1e-2)
+            assert_allclose(conf.high[i, j], upper[i, j], atol=1e-2)
+
+    def test_rand_symm(self):
+        # test some expected identities of the results
+        np.random.seed(1234)
+        data = np.random.rand(3, 100)
+        res = stats.tukey_hsd(*data)
+        conf = res.confidence_interval()
+        # the confidence intervals should be negated symmetric of each other
+        assert_equal(conf.low, -conf.high.T)
+        # the `high` and `low` center diagonals should be the same since the
+        # mean difference in a self comparison is 0.
+        assert_equal(np.diagonal(conf.high), conf.high[0, 0])
+        assert_equal(np.diagonal(conf.low), conf.low[0, 0])
+        # statistic array should be antisymmetric with zeros on the diagonal
+        assert_equal(res.statistic, -res.statistic.T)
+        assert_equal(np.diagonal(res.statistic), 0)
+        # p-values should be symmetric and 1 when compared to itself
+        assert_equal(res.pvalue, res.pvalue.T)
+        assert_equal(np.diagonal(res.pvalue), 1)
+
+    def test_no_inf(self):
+        with assert_raises(ValueError, match="...must be finite."):
+            stats.tukey_hsd([1, 2, 3], [2, np.inf], [6, 7, 3])
+
+    def test_is_1d(self):
+        with assert_raises(ValueError, match="...must be one-dimensional"):
+            stats.tukey_hsd([[1, 2], [2, 3]], [2, 5], [5, 23, 6])
+
+    def test_no_empty(self):
+        with assert_raises(ValueError, match="...must be greater than one"):
+            stats.tukey_hsd([], [2, 5], [4, 5, 6])
+
+    @pytest.mark.parametrize("nargs", (0, 1))
+    def test_not_enough_treatments(self, nargs):
+        with assert_raises(ValueError, match="...more than 1 treatment."):
+            stats.tukey_hsd(*([[23, 7, 3]] * nargs))
+
+    @pytest.mark.parametrize("cl", [-.5, 0, 1, 2])
+    def test_conf_level_invalid(self, cl):
+        with assert_raises(ValueError, match="must be between 0 and 1"):
+            r = stats.tukey_hsd([23, 7, 3], [3, 4], [9, 4])
+            r.confidence_interval(cl)
+
+    def test_2_args_ttest(self):
+        # that with 2 treatments the `pvalue` is equal to that of `ttest_ind`
+        res_tukey = stats.tukey_hsd(*self.data_diff_size[:2])
+        res_ttest = stats.ttest_ind(*self.data_diff_size[:2])
+        assert_allclose(res_ttest.pvalue, res_tukey.pvalue[0, 1])
+        assert_allclose(res_ttest.pvalue, res_tukey.pvalue[1, 0])
+
+
+class TestPoissonMeansTest:
+    @pytest.mark.parametrize("c1, n1, c2, n2, p_expect", (
+        # example from [1], 6. Illustrative examples: Example 1
+        [0, 100, 3, 100, 0.0884],
+        [2, 100, 6, 100, 0.1749]
+    ))
+    def test_paper_examples(self, c1, n1, c2, n2, p_expect):
+        res = stats.poisson_means_test(c1, n1, c2, n2)
+        assert_allclose(res.pvalue, p_expect, atol=1e-4)
+
+    @pytest.mark.parametrize("c1, n1, c2, n2, p_expect, alt, d", (
+        # These test cases are produced by the wrapped fortran code from the
+        # original authors. Using a slightly modified version of this fortran,
+        # found here, https://github.com/nolanbconaway/poisson-etest,
+        # additional tests were created.
+        [20, 10, 20, 10, 0.9999997568929630, 'two-sided', 0],
+        [10, 10, 10, 10, 0.9999998403241203, 'two-sided', 0],
+        [50, 15, 1, 1, 0.09920321053409643, 'two-sided', .05],
+        [3, 100, 20, 300, 0.12202725450896404, 'two-sided', 0],
+        [3, 12, 4, 20, 0.40416087318539173, 'greater', 0],
+        [4, 20, 3, 100, 0.008053640402974236, 'greater', 0],
+        # publishing paper does not include a `less` alternative,
+        # so it was calculated with switched argument order and
+        # alternative="greater"
+        [4, 20, 3, 10, 0.3083216325432898, 'less', 0],
+        [1, 1, 50, 15, 0.09322998607245102, 'less', 0]
+    ))
+    def test_fortran_authors(self, c1, n1, c2, n2, p_expect, alt, d):
+        res = stats.poisson_means_test(c1, n1, c2, n2, alternative=alt, diff=d)
+        assert_allclose(res.pvalue, p_expect, atol=2e-6, rtol=1e-16)
+
+    def test_different_results(self):
+        # The implementation in Fortran is known to break down at higher
+        # counts and observations, so we expect different results. By
+        # inspection we can infer the p-value to be near one.
+        count1, count2 = 10000, 10000
+        nobs1, nobs2 = 10000, 10000
+        res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
+        assert_allclose(res.pvalue, 1)
+
+    def test_less_than_zero_lambda_hat2(self):
+        # demonstrates behavior that fixes a known fault from original Fortran.
+        # p-value should clearly be near one.
+        count1, count2 = 0, 0
+        nobs1, nobs2 = 1, 1
+        res = stats.poisson_means_test(count1, nobs1, count2, nobs2)
+        assert_allclose(res.pvalue, 1)
+
+    def test_input_validation(self):
+        count1, count2 = 0, 0
+        nobs1, nobs2 = 1, 1
+
+        # test non-integral events
+        message = '`k1` and `k2` must be integers.'
+        with assert_raises(TypeError, match=message):
+            stats.poisson_means_test(.7, nobs1, count2, nobs2)
+        with assert_raises(TypeError, match=message):
+            stats.poisson_means_test(count1, nobs1, .7, nobs2)
+
+        # test negative events
+        message = '`k1` and `k2` must be greater than or equal to 0.'
+        with assert_raises(ValueError, match=message):
+            stats.poisson_means_test(-1, nobs1, count2, nobs2)
+        with assert_raises(ValueError, match=message):
+            stats.poisson_means_test(count1, nobs1, -1, nobs2)
+
+        # test negative sample size
+        message = '`n1` and `n2` must be greater than 0.'
+        with assert_raises(ValueError, match=message):
+            stats.poisson_means_test(count1, -1, count2, nobs2)
+        with assert_raises(ValueError, match=message):
+            stats.poisson_means_test(count1, nobs1, count2, -1)
+
+        # test negative difference
+        message = 'diff must be greater than or equal to 0.'
+        with assert_raises(ValueError, match=message):
+            stats.poisson_means_test(count1, nobs1, count2, nobs2, diff=-1)
+
+        # test invalid alternatvie
+        message = 'Alternative must be one of ...'
+        with assert_raises(ValueError, match=message):
+            stats.poisson_means_test(1, 2, 1, 2, alternative='error')
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_kdeoth.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_kdeoth.py
new file mode 100644
index 00000000..16960998
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_kdeoth.py
@@ -0,0 +1,604 @@
+from scipy import stats, linalg, integrate
+import numpy as np
+from numpy.testing import (assert_almost_equal, assert_, assert_equal,
+    assert_array_almost_equal, assert_array_almost_equal_nulp, assert_allclose)
+import pytest
+from pytest import raises as assert_raises
+
+
+def test_kde_1d():
+    #some basic tests comparing to normal distribution
+    np.random.seed(8765678)
+    n_basesample = 500
+    xn = np.random.randn(n_basesample)
+    xnmean = xn.mean()
+    xnstd = xn.std(ddof=1)
+
+    # get kde for original sample
+    gkde = stats.gaussian_kde(xn)
+
+    # evaluate the density function for the kde for some points
+    xs = np.linspace(-7,7,501)
+    kdepdf = gkde.evaluate(xs)
+    normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
+    intervall = xs[1] - xs[0]
+
+    assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
+    prob1 = gkde.integrate_box_1d(xnmean, np.inf)
+    prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
+    assert_almost_equal(prob1, 0.5, decimal=1)
+    assert_almost_equal(prob2, 0.5, decimal=1)
+    assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
+    assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
+
+    assert_almost_equal(gkde.integrate_kde(gkde),
+                        (kdepdf**2).sum()*intervall, decimal=2)
+    assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
+                        (kdepdf*normpdf).sum()*intervall, decimal=2)
+
+
+def test_kde_1d_weighted():
+    #some basic tests comparing to normal distribution
+    np.random.seed(8765678)
+    n_basesample = 500
+    xn = np.random.randn(n_basesample)
+    wn = np.random.rand(n_basesample)
+    xnmean = np.average(xn, weights=wn)
+    xnstd = np.sqrt(np.average((xn-xnmean)**2, weights=wn))
+
+    # get kde for original sample
+    gkde = stats.gaussian_kde(xn, weights=wn)
+
+    # evaluate the density function for the kde for some points
+    xs = np.linspace(-7,7,501)
+    kdepdf = gkde.evaluate(xs)
+    normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
+    intervall = xs[1] - xs[0]
+
+    assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
+    prob1 = gkde.integrate_box_1d(xnmean, np.inf)
+    prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
+    assert_almost_equal(prob1, 0.5, decimal=1)
+    assert_almost_equal(prob2, 0.5, decimal=1)
+    assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
+    assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
+
+    assert_almost_equal(gkde.integrate_kde(gkde),
+                        (kdepdf**2).sum()*intervall, decimal=2)
+    assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
+                        (kdepdf*normpdf).sum()*intervall, decimal=2)
+
+
+@pytest.mark.slow
+def test_kde_2d():
+    #some basic tests comparing to normal distribution
+    np.random.seed(8765678)
+    n_basesample = 500
+
+    mean = np.array([1.0, 3.0])
+    covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
+
+    # Need transpose (shape (2, 500)) for kde
+    xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
+
+    # get kde for original sample
+    gkde = stats.gaussian_kde(xn)
+
+    # evaluate the density function for the kde for some points
+    x, y = np.mgrid[-7:7:500j, -7:7:500j]
+    grid_coords = np.vstack([x.ravel(), y.ravel()])
+    kdepdf = gkde.evaluate(grid_coords)
+    kdepdf = kdepdf.reshape(500, 500)
+
+    normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
+    intervall = y.ravel()[1] - y.ravel()[0]
+
+    assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
+
+    small = -1e100
+    large = 1e100
+    prob1 = gkde.integrate_box([small, mean[1]], [large, large])
+    prob2 = gkde.integrate_box([small, small], [large, mean[1]])
+
+    assert_almost_equal(prob1, 0.5, decimal=1)
+    assert_almost_equal(prob2, 0.5, decimal=1)
+    assert_almost_equal(gkde.integrate_kde(gkde),
+                        (kdepdf**2).sum()*(intervall**2), decimal=2)
+    assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
+                        (kdepdf*normpdf).sum()*(intervall**2), decimal=2)
+
+
+@pytest.mark.slow
+def test_kde_2d_weighted():
+    #some basic tests comparing to normal distribution
+    np.random.seed(8765678)
+    n_basesample = 500
+
+    mean = np.array([1.0, 3.0])
+    covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
+
+    # Need transpose (shape (2, 500)) for kde
+    xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
+    wn = np.random.rand(n_basesample)
+
+    # get kde for original sample
+    gkde = stats.gaussian_kde(xn, weights=wn)
+
+    # evaluate the density function for the kde for some points
+    x, y = np.mgrid[-7:7:500j, -7:7:500j]
+    grid_coords = np.vstack([x.ravel(), y.ravel()])
+    kdepdf = gkde.evaluate(grid_coords)
+    kdepdf = kdepdf.reshape(500, 500)
+
+    normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance)
+    intervall = y.ravel()[1] - y.ravel()[0]
+
+    assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01)
+
+    small = -1e100
+    large = 1e100
+    prob1 = gkde.integrate_box([small, mean[1]], [large, large])
+    prob2 = gkde.integrate_box([small, small], [large, mean[1]])
+
+    assert_almost_equal(prob1, 0.5, decimal=1)
+    assert_almost_equal(prob2, 0.5, decimal=1)
+    assert_almost_equal(gkde.integrate_kde(gkde),
+                        (kdepdf**2).sum()*(intervall**2), decimal=2)
+    assert_almost_equal(gkde.integrate_gaussian(mean, covariance),
+                        (kdepdf*normpdf).sum()*(intervall**2), decimal=2)
+
+
+def test_kde_bandwidth_method():
+    def scotts_factor(kde_obj):
+        """Same as default, just check that it works."""
+        return np.power(kde_obj.n, -1./(kde_obj.d+4))
+
+    np.random.seed(8765678)
+    n_basesample = 50
+    xn = np.random.randn(n_basesample)
+
+    # Default
+    gkde = stats.gaussian_kde(xn)
+    # Supply a callable
+    gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
+    # Supply a scalar
+    gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
+
+    xs = np.linspace(-7,7,51)
+    kdepdf = gkde.evaluate(xs)
+    kdepdf2 = gkde2.evaluate(xs)
+    assert_almost_equal(kdepdf, kdepdf2)
+    kdepdf3 = gkde3.evaluate(xs)
+    assert_almost_equal(kdepdf, kdepdf3)
+
+    assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
+
+
+def test_kde_bandwidth_method_weighted():
+    def scotts_factor(kde_obj):
+        """Same as default, just check that it works."""
+        return np.power(kde_obj.neff, -1./(kde_obj.d+4))
+
+    np.random.seed(8765678)
+    n_basesample = 50
+    xn = np.random.randn(n_basesample)
+
+    # Default
+    gkde = stats.gaussian_kde(xn)
+    # Supply a callable
+    gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
+    # Supply a scalar
+    gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
+
+    xs = np.linspace(-7,7,51)
+    kdepdf = gkde.evaluate(xs)
+    kdepdf2 = gkde2.evaluate(xs)
+    assert_almost_equal(kdepdf, kdepdf2)
+    kdepdf3 = gkde3.evaluate(xs)
+    assert_almost_equal(kdepdf, kdepdf3)
+
+    assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
+
+
+# Subclasses that should stay working (extracted from various sources).
+# Unfortunately the earlier design of gaussian_kde made it necessary for users
+# to create these kinds of subclasses, or call _compute_covariance() directly.
+
+class _kde_subclass1(stats.gaussian_kde):
+    def __init__(self, dataset):
+        self.dataset = np.atleast_2d(dataset)
+        self.d, self.n = self.dataset.shape
+        self.covariance_factor = self.scotts_factor
+        self._compute_covariance()
+
+
+class _kde_subclass2(stats.gaussian_kde):
+    def __init__(self, dataset):
+        self.covariance_factor = self.scotts_factor
+        super().__init__(dataset)
+
+
+class _kde_subclass4(stats.gaussian_kde):
+    def covariance_factor(self):
+        return 0.5 * self.silverman_factor()
+
+
+def test_gaussian_kde_subclassing():
+    x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
+    xs = np.linspace(-10, 10, num=50)
+
+    # gaussian_kde itself
+    kde = stats.gaussian_kde(x1)
+    ys = kde(xs)
+
+    # subclass 1
+    kde1 = _kde_subclass1(x1)
+    y1 = kde1(xs)
+    assert_array_almost_equal_nulp(ys, y1, nulp=10)
+
+    # subclass 2
+    kde2 = _kde_subclass2(x1)
+    y2 = kde2(xs)
+    assert_array_almost_equal_nulp(ys, y2, nulp=10)
+
+    # subclass 3 was removed because we have no obligation to maintain support
+    # for user invocation of private methods
+
+    # subclass 4
+    kde4 = _kde_subclass4(x1)
+    y4 = kde4(x1)
+    y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
+
+    assert_array_almost_equal(y_expected, y4, decimal=6)
+
+    # Not a subclass, but check for use of _compute_covariance()
+    kde5 = kde
+    kde5.covariance_factor = lambda: kde.factor
+    kde5._compute_covariance()
+    y5 = kde5(xs)
+    assert_array_almost_equal_nulp(ys, y5, nulp=10)
+
+
+def test_gaussian_kde_covariance_caching():
+    x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
+    xs = np.linspace(-10, 10, num=5)
+    # These expected values are from scipy 0.10, before some changes to
+    # gaussian_kde.  They were not compared with any external reference.
+    y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
+
+    # Set the bandwidth, then reset it to the default.
+    kde = stats.gaussian_kde(x1)
+    kde.set_bandwidth(bw_method=0.5)
+    kde.set_bandwidth(bw_method='scott')
+    y2 = kde(xs)
+
+    assert_array_almost_equal(y_expected, y2, decimal=7)
+
+
+def test_gaussian_kde_monkeypatch():
+    """Ugly, but people may rely on this.  See scipy pull request 123,
+    specifically the linked ML thread "Width of the Gaussian in stats.kde".
+    If it is necessary to break this later on, that is to be discussed on ML.
+    """
+    x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
+    xs = np.linspace(-10, 10, num=50)
+
+    # The old monkeypatched version to get at Silverman's Rule.
+    kde = stats.gaussian_kde(x1)
+    kde.covariance_factor = kde.silverman_factor
+    kde._compute_covariance()
+    y1 = kde(xs)
+
+    # The new saner version.
+    kde2 = stats.gaussian_kde(x1, bw_method='silverman')
+    y2 = kde2(xs)
+
+    assert_array_almost_equal_nulp(y1, y2, nulp=10)
+
+
+def test_kde_integer_input():
+    """Regression test for #1181."""
+    x1 = np.arange(5)
+    kde = stats.gaussian_kde(x1)
+    y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
+    assert_array_almost_equal(kde(x1), y_expected, decimal=6)
+
+
+_ftypes = ['float32', 'float64', 'float96', 'float128', 'int32', 'int64']
+
+@pytest.mark.parametrize("bw_type", _ftypes + ["scott", "silverman"])
+@pytest.mark.parametrize("dtype", _ftypes)
+def test_kde_output_dtype(dtype, bw_type):
+    # Check whether the datatypes are available
+    dtype = getattr(np, dtype, None)
+
+    if bw_type in ["scott", "silverman"]:
+        bw = bw_type
+    else:
+        bw_type = getattr(np, bw_type, None)
+        bw = bw_type(3) if bw_type else None
+
+    if any(dt is None for dt in [dtype, bw]):
+        pytest.skip()
+
+    weights = np.arange(5, dtype=dtype)
+    dataset = np.arange(5, dtype=dtype)
+    k = stats.gaussian_kde(dataset, bw_method=bw, weights=weights)
+    points = np.arange(5, dtype=dtype)
+    result = k(points)
+    # weights are always cast to float64
+    assert result.dtype == np.result_type(dataset, points, np.float64(weights),
+                                          k.factor)
+
+
+def test_pdf_logpdf_validation():
+    rng = np.random.default_rng(64202298293133848336925499069837723291)
+    xn = rng.standard_normal((2, 10))
+    gkde = stats.gaussian_kde(xn)
+    xs = rng.standard_normal((3, 10))
+
+    msg = "points have dimension 3, dataset has dimension 2"
+    with pytest.raises(ValueError, match=msg):
+        gkde.logpdf(xs)
+
+
+def test_pdf_logpdf():
+    np.random.seed(1)
+    n_basesample = 50
+    xn = np.random.randn(n_basesample)
+
+    # Default
+    gkde = stats.gaussian_kde(xn)
+
+    xs = np.linspace(-15, 12, 25)
+    pdf = gkde.evaluate(xs)
+    pdf2 = gkde.pdf(xs)
+    assert_almost_equal(pdf, pdf2, decimal=12)
+
+    logpdf = np.log(pdf)
+    logpdf2 = gkde.logpdf(xs)
+    assert_almost_equal(logpdf, logpdf2, decimal=12)
+
+    # There are more points than data
+    gkde = stats.gaussian_kde(xs)
+    pdf = np.log(gkde.evaluate(xn))
+    pdf2 = gkde.logpdf(xn)
+    assert_almost_equal(pdf, pdf2, decimal=12)
+
+
+def test_pdf_logpdf_weighted():
+    np.random.seed(1)
+    n_basesample = 50
+    xn = np.random.randn(n_basesample)
+    wn = np.random.rand(n_basesample)
+
+    # Default
+    gkde = stats.gaussian_kde(xn, weights=wn)
+
+    xs = np.linspace(-15, 12, 25)
+    pdf = gkde.evaluate(xs)
+    pdf2 = gkde.pdf(xs)
+    assert_almost_equal(pdf, pdf2, decimal=12)
+
+    logpdf = np.log(pdf)
+    logpdf2 = gkde.logpdf(xs)
+    assert_almost_equal(logpdf, logpdf2, decimal=12)
+
+    # There are more points than data
+    gkde = stats.gaussian_kde(xs, weights=np.random.rand(len(xs)))
+    pdf = np.log(gkde.evaluate(xn))
+    pdf2 = gkde.logpdf(xn)
+    assert_almost_equal(pdf, pdf2, decimal=12)
+
+
+def test_marginal_1_axis():
+    rng = np.random.default_rng(6111799263660870475)
+    n_data = 50
+    n_dim = 10
+    dataset = rng.normal(size=(n_dim, n_data))
+    points = rng.normal(size=(n_dim, 3))
+
+    dimensions = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])  # dimensions to keep
+
+    kde = stats.gaussian_kde(dataset)
+    marginal = kde.marginal(dimensions)
+    pdf = marginal.pdf(points[dimensions])
+
+    def marginal_pdf_single(point):
+        def f(x):
+            x = np.concatenate(([x], point[dimensions]))
+            return kde.pdf(x)[0]
+        return integrate.quad(f, -np.inf, np.inf)[0]
+
+    def marginal_pdf(points):
+        return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
+
+    ref = marginal_pdf(points)
+
+    assert_allclose(pdf, ref, rtol=1e-6)
+
+
+@pytest.mark.xslow
+def test_marginal_2_axis():
+    rng = np.random.default_rng(6111799263660870475)
+    n_data = 30
+    n_dim = 4
+    dataset = rng.normal(size=(n_dim, n_data))
+    points = rng.normal(size=(n_dim, 3))
+
+    dimensions = np.array([1, 3])  # dimensions to keep
+
+    kde = stats.gaussian_kde(dataset)
+    marginal = kde.marginal(dimensions)
+    pdf = marginal.pdf(points[dimensions])
+
+    def marginal_pdf(points):
+        def marginal_pdf_single(point):
+            def f(y, x):
+                w, z = point[dimensions]
+                x = np.array([x, w, y, z])
+                return kde.pdf(x)[0]
+            return integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)[0]
+
+        return np.apply_along_axis(marginal_pdf_single, axis=0, arr=points)
+
+    ref = marginal_pdf(points)
+
+    assert_allclose(pdf, ref, rtol=1e-6)
+
+
+def test_marginal_iv():
+    # test input validation
+    rng = np.random.default_rng(6111799263660870475)
+    n_data = 30
+    n_dim = 4
+    dataset = rng.normal(size=(n_dim, n_data))
+    points = rng.normal(size=(n_dim, 3))
+
+    kde = stats.gaussian_kde(dataset)
+
+    # check that positive and negative indices are equivalent
+    dimensions1 = [-1, 1]
+    marginal1 = kde.marginal(dimensions1)
+    pdf1 = marginal1.pdf(points[dimensions1])
+
+    dimensions2 = [3, -3]
+    marginal2 = kde.marginal(dimensions2)
+    pdf2 = marginal2.pdf(points[dimensions2])
+
+    assert_equal(pdf1, pdf2)
+
+    # IV for non-integer dimensions
+    message = "Elements of `dimensions` must be integers..."
+    with pytest.raises(ValueError, match=message):
+        kde.marginal([1, 2.5])
+
+    # IV for uniquenes
+    message = "All elements of `dimensions` must be unique."
+    with pytest.raises(ValueError, match=message):
+        kde.marginal([1, 2, 2])
+
+    # IV for non-integer dimensions
+    message = (r"Dimensions \[-5  6\] are invalid for a distribution in 4...")
+    with pytest.raises(ValueError, match=message):
+        kde.marginal([1, -5, 6])
+
+
+@pytest.mark.xslow
+def test_logpdf_overflow():
+    # regression test for gh-12988; testing against linalg instability for
+    # very high dimensionality kde
+    np.random.seed(1)
+    n_dimensions = 2500
+    n_samples = 5000
+    xn = np.array([np.random.randn(n_samples) + (n) for n in range(
+        0, n_dimensions)])
+
+    # Default
+    gkde = stats.gaussian_kde(xn)
+
+    logpdf = gkde.logpdf(np.arange(0, n_dimensions))
+    np.testing.assert_equal(np.isneginf(logpdf[0]), False)
+    np.testing.assert_equal(np.isnan(logpdf[0]), False)
+
+
+def test_weights_intact():
+    # regression test for gh-9709: weights are not modified
+    np.random.seed(12345)
+    vals = np.random.lognormal(size=100)
+    weights = np.random.choice([1.0, 10.0, 100], size=vals.size)
+    orig_weights = weights.copy()
+
+    stats.gaussian_kde(np.log10(vals), weights=weights)
+    assert_allclose(weights, orig_weights, atol=1e-14, rtol=1e-14)
+
+
+def test_weights_integer():
+    # integer weights are OK, cf gh-9709 (comment)
+    np.random.seed(12345)
+    values = [0.2, 13.5, 21.0, 75.0, 99.0]
+    weights = [1, 2, 4, 8, 16]  # a list of integers
+    pdf_i = stats.gaussian_kde(values, weights=weights)
+    pdf_f = stats.gaussian_kde(values, weights=np.float64(weights))
+
+    xn = [0.3, 11, 88]
+    assert_allclose(pdf_i.evaluate(xn),
+                    pdf_f.evaluate(xn), atol=1e-14, rtol=1e-14)
+
+
+def test_seed():
+    # Test the seed option of the resample method
+    def test_seed_sub(gkde_trail):
+        n_sample = 200
+        # The results should be different without using seed
+        samp1 = gkde_trail.resample(n_sample)
+        samp2 = gkde_trail.resample(n_sample)
+        assert_raises(
+            AssertionError, assert_allclose, samp1, samp2, atol=1e-13
+        )
+        # Use integer seed
+        seed = 831
+        samp1 = gkde_trail.resample(n_sample, seed=seed)
+        samp2 = gkde_trail.resample(n_sample, seed=seed)
+        assert_allclose(samp1, samp2, atol=1e-13)
+        # Use RandomState
+        rstate1 = np.random.RandomState(seed=138)
+        samp1 = gkde_trail.resample(n_sample, seed=rstate1)
+        rstate2 = np.random.RandomState(seed=138)
+        samp2 = gkde_trail.resample(n_sample, seed=rstate2)
+        assert_allclose(samp1, samp2, atol=1e-13)
+
+        # check that np.random.Generator can be used (numpy >= 1.17)
+        if hasattr(np.random, 'default_rng'):
+            # obtain a np.random.Generator object
+            rng = np.random.default_rng(1234)
+            gkde_trail.resample(n_sample, seed=rng)
+
+    np.random.seed(8765678)
+    n_basesample = 500
+    wn = np.random.rand(n_basesample)
+    # Test 1D case
+    xn_1d = np.random.randn(n_basesample)
+
+    gkde_1d = stats.gaussian_kde(xn_1d)
+    test_seed_sub(gkde_1d)
+    gkde_1d_weighted = stats.gaussian_kde(xn_1d, weights=wn)
+    test_seed_sub(gkde_1d_weighted)
+
+    # Test 2D case
+    mean = np.array([1.0, 3.0])
+    covariance = np.array([[1.0, 2.0], [2.0, 6.0]])
+    xn_2d = np.random.multivariate_normal(mean, covariance, size=n_basesample).T
+
+    gkde_2d = stats.gaussian_kde(xn_2d)
+    test_seed_sub(gkde_2d)
+    gkde_2d_weighted = stats.gaussian_kde(xn_2d, weights=wn)
+    test_seed_sub(gkde_2d_weighted)
+
+
+def test_singular_data_covariance_gh10205():
+    # When the data lie in a lower-dimensional subspace and this causes
+    # and exception, check that the error message is informative.
+    rng = np.random.default_rng(2321583144339784787)
+    mu = np.array([1, 10, 20])
+    sigma = np.array([[4, 10, 0], [10, 25, 0], [0, 0, 100]])
+    data = rng.multivariate_normal(mu, sigma, 1000)
+    try:  # doesn't raise any error on some platforms, and that's OK
+        stats.gaussian_kde(data.T)
+    except linalg.LinAlgError:
+        msg = "The data appears to lie in a lower-dimensional subspace..."
+        with assert_raises(linalg.LinAlgError, match=msg):
+            stats.gaussian_kde(data.T)
+
+
+def test_fewer_points_than_dimensions_gh17436():
+    # When the number of points is fewer than the number of dimensions, the
+    # the covariance matrix would be singular, and the exception tested in
+    # test_singular_data_covariance_gh10205 would occur. However, sometimes
+    # this occurs when the user passes in the transpose of what `gaussian_kde`
+    # expects. This can result in a huge covariance matrix, so bail early.
+    rng = np.random.default_rng(2046127537594925772)
+    rvs = rng.multivariate_normal(np.zeros(3), np.eye(3), size=5)
+    message = "Number of dimensions is greater than number of samples..."
+    with pytest.raises(ValueError, match=message):
+        stats.gaussian_kde(rvs)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_morestats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_morestats.py
new file mode 100644
index 00000000..2293c794
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_morestats.py
@@ -0,0 +1,2673 @@
+# Author:  Travis Oliphant, 2002
+#
+# Further enhancements and tests added by numerous SciPy developers.
+#
+import warnings
+import sys
+
+import numpy as np
+from numpy.random import RandomState
+from numpy.testing import (assert_array_equal,
+    assert_almost_equal, assert_array_less, assert_array_almost_equal,
+    assert_, assert_allclose, assert_equal, suppress_warnings)
+import pytest
+from pytest import raises as assert_raises
+import re
+from scipy import optimize
+from scipy import stats
+from scipy.stats._morestats import _abw_state
+from .common_tests import check_named_results
+from .._hypotests import _get_wilcoxon_distr, _get_wilcoxon_distr2
+from scipy.stats._binomtest import _binary_search_for_binom_tst
+from scipy.stats._distr_params import distcont
+
+distcont = dict(distcont)  # type: ignore
+
+# Matplotlib is not a scipy dependency but is optionally used in probplot, so
+# check if it's available
+try:
+    import matplotlib
+    matplotlib.rcParams['backend'] = 'Agg'
+    import matplotlib.pyplot as plt
+    have_matplotlib = True
+except Exception:
+    have_matplotlib = False
+
+
+# test data gear.dat from NIST for Levene and Bartlett test
+# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
+g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
+g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
+g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
+g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
+g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
+g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
+g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
+g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
+g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
+g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
+
+
+# The loggamma RVS stream is changing due to gh-13349; this version
+# preserves the old stream so that tests don't change.
+def _old_loggamma_rvs(*args, **kwargs):
+    return np.log(stats.gamma.rvs(*args, **kwargs))
+
+
+class TestBayes_mvs:
+    def test_basic(self):
+        # Expected values in this test simply taken from the function.  For
+        # some checks regarding correctness of implementation, see review in
+        # gh-674
+        data = [6, 9, 12, 7, 8, 8, 13]
+        mean, var, std = stats.bayes_mvs(data)
+        assert_almost_equal(mean.statistic, 9.0)
+        assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
+                        rtol=1e-14)
+
+        assert_almost_equal(var.statistic, 10.0)
+        assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
+                        rtol=1e-09)
+
+        assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
+        assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
+                        rtol=1e-14)
+
+    def test_empty_input(self):
+        assert_raises(ValueError, stats.bayes_mvs, [])
+
+    def test_result_attributes(self):
+        x = np.arange(15)
+        attributes = ('statistic', 'minmax')
+        res = stats.bayes_mvs(x)
+
+        for i in res:
+            check_named_results(i, attributes)
+
+
+class TestMvsdist:
+    def test_basic(self):
+        data = [6, 9, 12, 7, 8, 8, 13]
+        mean, var, std = stats.mvsdist(data)
+        assert_almost_equal(mean.mean(), 9.0)
+        assert_allclose(mean.interval(0.9), (7.1036502226125329,
+                                             10.896349777387467), rtol=1e-14)
+
+        assert_almost_equal(var.mean(), 10.0)
+        assert_allclose(var.interval(0.9), (3.1767242068607087,
+                                            24.45910381334018), rtol=1e-09)
+
+        assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
+        assert_allclose(std.interval(0.9), (1.7823367265645145,
+                                            4.9456146050146312), rtol=1e-14)
+
+    def test_empty_input(self):
+        assert_raises(ValueError, stats.mvsdist, [])
+
+    def test_bad_arg(self):
+        # Raise ValueError if fewer than two data points are given.
+        data = [1]
+        assert_raises(ValueError, stats.mvsdist, data)
+
+    def test_warns(self):
+        # regression test for gh-5270
+        # make sure there are no spurious divide-by-zero warnings
+        with warnings.catch_warnings():
+            warnings.simplefilter('error', RuntimeWarning)
+            [x.mean() for x in stats.mvsdist([1, 2, 3])]
+            [x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
+
+
+class TestShapiro:
+    def test_basic(self):
+        x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
+              4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
+              0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
+        w, pw = stats.shapiro(x1)
+        shapiro_test = stats.shapiro(x1)
+        assert_almost_equal(w, 0.90047299861907959, decimal=6)
+        assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
+        assert_almost_equal(pw, 0.042089745402336121, decimal=6)
+        assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
+
+        x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
+              3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
+              0.08, 3.67, 2.81, 3.49]
+        w, pw = stats.shapiro(x2)
+        shapiro_test = stats.shapiro(x2)
+        assert_almost_equal(w, 0.9590270, decimal=6)
+        assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
+        assert_almost_equal(pw, 0.52460, decimal=3)
+        assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
+
+        # Verified against R
+        x3 = stats.norm.rvs(loc=5, scale=3, size=100, random_state=12345678)
+        w, pw = stats.shapiro(x3)
+        shapiro_test = stats.shapiro(x3)
+        assert_almost_equal(w, 0.9772805571556091, decimal=6)
+        assert_almost_equal(shapiro_test.statistic, 0.9772805571556091, decimal=6)
+        assert_almost_equal(pw, 0.08144091814756393, decimal=3)
+        assert_almost_equal(shapiro_test.pvalue, 0.08144091814756393, decimal=3)
+
+        # Extracted from original paper
+        x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
+              0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
+              3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
+        W_expected = 0.83467
+        p_expected = 0.000914
+        w, pw = stats.shapiro(x4)
+        shapiro_test = stats.shapiro(x4)
+        assert_almost_equal(w, W_expected, decimal=4)
+        assert_almost_equal(shapiro_test.statistic, W_expected, decimal=4)
+        assert_almost_equal(pw, p_expected, decimal=5)
+        assert_almost_equal(shapiro_test.pvalue, p_expected, decimal=5)
+
+    def test_2d(self):
+        x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
+              4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
+              0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
+        w, pw = stats.shapiro(x1)
+        shapiro_test = stats.shapiro(x1)
+        assert_almost_equal(w, 0.90047299861907959, decimal=6)
+        assert_almost_equal(shapiro_test.statistic, 0.90047299861907959, decimal=6)
+        assert_almost_equal(pw, 0.042089745402336121, decimal=6)
+        assert_almost_equal(shapiro_test.pvalue, 0.042089745402336121, decimal=6)
+
+        x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
+              3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
+              0.08, 3.67, 2.81, 3.49]]
+        w, pw = stats.shapiro(x2)
+        shapiro_test = stats.shapiro(x2)
+        assert_almost_equal(w, 0.9590270, decimal=6)
+        assert_almost_equal(shapiro_test.statistic, 0.9590270, decimal=6)
+        assert_almost_equal(pw, 0.52460, decimal=3)
+        assert_almost_equal(shapiro_test.pvalue, 0.52460, decimal=3)
+
+    def test_empty_input(self):
+        assert_raises(ValueError, stats.shapiro, [])
+        assert_raises(ValueError, stats.shapiro, [[], [], []])
+
+    def test_not_enough_values(self):
+        assert_raises(ValueError, stats.shapiro, [1, 2])
+        assert_raises(ValueError, stats.shapiro, np.array([[], [2]], dtype=object))
+
+    def test_bad_arg(self):
+        # Length of x is less than 3.
+        x = [1]
+        assert_raises(ValueError, stats.shapiro, x)
+
+    def test_nan_input(self):
+        x = np.arange(10.)
+        x[9] = np.nan
+
+        w, pw = stats.shapiro(x)
+        shapiro_test = stats.shapiro(x)
+        assert_equal(w, np.nan)
+        assert_equal(shapiro_test.statistic, np.nan)
+        assert_almost_equal(pw, 1.0)
+        assert_almost_equal(shapiro_test.pvalue, 1.0)
+
+    def test_gh14462(self):
+        # shapiro is theoretically location-invariant, but when the magnitude
+        # of the values is much greater than the variance, there can be
+        # numerical issues. Fixed by subtracting median from the data.
+        # See gh-14462.
+
+        trans_val, maxlog = stats.boxcox([122500, 474400, 110400])
+        res = stats.shapiro(trans_val)
+
+        # Reference from R:
+        # options(digits=16)
+        # x = c(0.00000000e+00, 3.39996924e-08, -6.35166875e-09)
+        # shapiro.test(x)
+        ref = (0.86468431705371, 0.2805581751566)
+
+        assert_allclose(res, ref, rtol=1e-5)
+
+
+class TestAnderson:
+    def test_normal(self):
+        rs = RandomState(1234567890)
+        x1 = rs.standard_exponential(size=50)
+        x2 = rs.standard_normal(size=50)
+        A, crit, sig = stats.anderson(x1)
+        assert_array_less(crit[:-1], A)
+        A, crit, sig = stats.anderson(x2)
+        assert_array_less(A, crit[-2:])
+
+        v = np.ones(10)
+        v[0] = 0
+        A, crit, sig = stats.anderson(v)
+        # The expected statistic 3.208057 was computed independently of scipy.
+        # For example, in R:
+        #   > library(nortest)
+        #   > v <- rep(1, 10)
+        #   > v[1] <- 0
+        #   > result <- ad.test(v)
+        #   > result$statistic
+        #          A
+        #   3.208057
+        assert_allclose(A, 3.208057)
+
+    def test_expon(self):
+        rs = RandomState(1234567890)
+        x1 = rs.standard_exponential(size=50)
+        x2 = rs.standard_normal(size=50)
+        A, crit, sig = stats.anderson(x1, 'expon')
+        assert_array_less(A, crit[-2:])
+        with np.errstate(all='ignore'):
+            A, crit, sig = stats.anderson(x2, 'expon')
+        assert_(A > crit[-1])
+
+    def test_gumbel(self):
+        # Regression test for gh-6306.  Before that issue was fixed,
+        # this case would return a2=inf.
+        v = np.ones(100)
+        v[0] = 0.0
+        a2, crit, sig = stats.anderson(v, 'gumbel')
+        # A brief reimplementation of the calculation of the statistic.
+        n = len(v)
+        xbar, s = stats.gumbel_l.fit(v)
+        logcdf = stats.gumbel_l.logcdf(v, xbar, s)
+        logsf = stats.gumbel_l.logsf(v, xbar, s)
+        i = np.arange(1, n+1)
+        expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
+
+        assert_allclose(a2, expected_a2)
+
+    def test_bad_arg(self):
+        assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
+
+    def test_result_attributes(self):
+        rs = RandomState(1234567890)
+        x = rs.standard_exponential(size=50)
+        res = stats.anderson(x)
+        attributes = ('statistic', 'critical_values', 'significance_level')
+        check_named_results(res, attributes)
+
+    def test_gumbel_l(self):
+        # gh-2592, gh-6337
+        # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
+        rs = RandomState(1234567890)
+        x = rs.gumbel(size=100)
+        A1, crit1, sig1 = stats.anderson(x, 'gumbel')
+        A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
+
+        assert_allclose(A2, A1)
+
+    def test_gumbel_r(self):
+        # gh-2592, gh-6337
+        # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
+        rs = RandomState(1234567890)
+        x1 = rs.gumbel(size=100)
+        x2 = np.ones(100)
+        # A constant array is a degenerate case and breaks gumbel_r.fit, so
+        # change one value in x2.
+        x2[0] = 0.996
+        A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
+        A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
+
+        assert_array_less(A1, crit1[-2:])
+        assert_(A2 > crit2[-1])
+
+    @pytest.mark.parametrize('distname',
+                             ['norm', 'expon', 'gumbel_l', 'extreme1',
+                              'gumbel', 'gumbel_r', 'logistic'])
+    def test_anderson_fit_params(self, distname):
+        # check that anderson now returns a FitResult
+        rng = np.random.default_rng(330691555377792039)
+        real_distname = ('gumbel_l' if distname in {'extreme1', 'gumbel'}
+                         else distname)
+        dist = getattr(stats, real_distname)
+        params = distcont[real_distname]
+        x = dist.rvs(*params, size=1000, random_state=rng)
+        res = stats.anderson(x, distname)
+        assert res.fit_result.success
+
+
+class TestAndersonKSamp:
+    def test_example1a(self):
+        # Example data from Scholz & Stephens (1987), originally
+        # published in Lehmann (1995, Nonparametrics, Statistical
+        # Methods Based on Ranks, p. 309)
+        # Pass a mixture of lists and arrays
+        t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
+        t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
+        t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
+        t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
+
+        Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
+
+        assert_almost_equal(Tk, 4.449, 3)
+        assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
+                                  tm[0:5], 4)
+        assert_allclose(p, 0.0021, atol=0.00025)
+
+    def test_example1b(self):
+        # Example data from Scholz & Stephens (1987), originally
+        # published in Lehmann (1995, Nonparametrics, Statistical
+        # Methods Based on Ranks, p. 309)
+        # Pass arrays
+        t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
+        t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
+        t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
+        t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
+        Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
+
+        assert_almost_equal(Tk, 4.480, 3)
+        assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
+                                  tm[0:5], 4)
+        assert_allclose(p, 0.0020, atol=0.00025)
+
+    def test_example2a(self):
+        # Example data taken from an earlier technical report of
+        # Scholz and Stephens
+        # Pass lists instead of arrays
+        t1 = [194, 15, 41, 29, 33, 181]
+        t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
+        t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
+        t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
+              118, 25, 156, 310, 76, 26, 44, 23, 62]
+        t5 = [130, 208, 70, 101, 208]
+        t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
+        t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
+        t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
+              12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
+        t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
+              54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
+        t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
+               22, 139, 210, 97, 30, 23, 13, 14]
+        t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
+        t12 = [50, 254, 5, 283, 35, 12]
+        t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
+        t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
+               61, 34]
+
+        Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
+                                          t9, t10, t11, t12, t13, t14),
+                                         midrank=False)
+        assert_almost_equal(Tk, 3.288, 3)
+        assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
+                                  tm[0:5], 4)
+        assert_allclose(p, 0.0041, atol=0.00025)
+
+    def test_example2b(self):
+        # Example data taken from an earlier technical report of
+        # Scholz and Stephens
+        t1 = [194, 15, 41, 29, 33, 181]
+        t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
+        t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
+        t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
+              118, 25, 156, 310, 76, 26, 44, 23, 62]
+        t5 = [130, 208, 70, 101, 208]
+        t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
+        t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
+        t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
+              12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
+        t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
+              54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
+        t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
+               22, 139, 210, 97, 30, 23, 13, 14]
+        t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
+        t12 = [50, 254, 5, 283, 35, 12]
+        t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
+        t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
+               61, 34]
+
+        Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
+                                          t9, t10, t11, t12, t13, t14),
+                                         midrank=True)
+
+        assert_almost_equal(Tk, 3.294, 3)
+        assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
+                                  tm[0:5], 4)
+        assert_allclose(p, 0.0041, atol=0.00025)
+
+    def test_R_kSamples(self):
+        # test values generates with R package kSamples
+        # package version 1.2-6 (2017-06-14)
+        # r1 = 1:100
+        # continuous case (no ties) --> version  1
+        # res <- kSamples::ad.test(r1, r1 + 40.5)
+        # res$ad[1, "T.AD"] #  41.105
+        # res$ad[1, " asympt. P-value"] #  5.8399e-18
+        #
+        # discrete case (ties allowed) --> version  2 (here: midrank=True)
+        # res$ad[2, "T.AD"] #  41.235
+        #
+        # res <- kSamples::ad.test(r1, r1 + .5)
+        # res$ad[1, "T.AD"] #  -1.2824
+        # res$ad[1, " asympt. P-value"] #  1
+        # res$ad[2, "T.AD"] #  -1.2944
+        #
+        # res <- kSamples::ad.test(r1, r1 + 7.5)
+        # res$ad[1, "T.AD"] # 1.4923
+        # res$ad[1, " asympt. P-value"] # 0.077501
+        #
+        # res <- kSamples::ad.test(r1, r1 + 6)
+        # res$ad[2, "T.AD"] # 0.63892
+        # res$ad[2, " asympt. P-value"] # 0.17981
+        #
+        # res <- kSamples::ad.test(r1, r1 + 11.5)
+        # res$ad[1, "T.AD"] # 4.5042
+        # res$ad[1, " asympt. P-value"] # 0.00545
+        #
+        # res <- kSamples::ad.test(r1, r1 + 13.5)
+        # res$ad[1, "T.AD"] # 6.2982
+        # res$ad[1, " asympt. P-value"] # 0.00118
+
+        x1 = np.linspace(1, 100, 100)
+        # test case: different distributions;p-value floored at 0.001
+        # test case for issue #5493 / #8536
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message='p-value floored')
+            s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
+        assert_almost_equal(s, 41.105, 3)
+        assert_equal(p, 0.001)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message='p-value floored')
+            s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
+        assert_almost_equal(s, 41.235, 3)
+        assert_equal(p, 0.001)
+
+        # test case: similar distributions --> p-value capped at 0.25
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message='p-value capped')
+            s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
+        assert_almost_equal(s, -1.2824, 4)
+        assert_equal(p, 0.25)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message='p-value capped')
+            s, _, p = stats.anderson_ksamp([x1, x1 + .5])
+        assert_almost_equal(s, -1.2944, 4)
+        assert_equal(p, 0.25)
+
+        # test case: check interpolated p-value in [0.01, 0.25] (no ties)
+        s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
+        assert_almost_equal(s, 1.4923, 4)
+        assert_allclose(p, 0.0775, atol=0.005, rtol=0)
+
+        # test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
+        s, _, p = stats.anderson_ksamp([x1, x1 + 6])
+        assert_almost_equal(s, 0.6389, 4)
+        assert_allclose(p, 0.1798, atol=0.005, rtol=0)
+
+        # test extended critical values for p=0.001 and p=0.005
+        s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
+        assert_almost_equal(s, 4.5042, 4)
+        assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
+
+        s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
+        assert_almost_equal(s, 6.2982, 4)
+        assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
+
+    def test_not_enough_samples(self):
+        assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
+
+    def test_no_distinct_observations(self):
+        assert_raises(ValueError, stats.anderson_ksamp,
+                      (np.ones(5), np.ones(5)))
+
+    def test_empty_sample(self):
+        assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
+
+    def test_result_attributes(self):
+        # Pass a mixture of lists and arrays
+        t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
+        t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
+        res = stats.anderson_ksamp((t1, t2), midrank=False)
+
+        attributes = ('statistic', 'critical_values', 'significance_level')
+        check_named_results(res, attributes)
+
+        assert_equal(res.significance_level, res.pvalue)
+
+
+class TestAnsari:
+
+    def test_small(self):
+        x = [1, 2, 3, 3, 4]
+        y = [3, 2, 6, 1, 6, 1, 4, 1]
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "Ties preclude use of exact statistic.")
+            W, pval = stats.ansari(x, y)
+        assert_almost_equal(W, 23.5, 11)
+        assert_almost_equal(pval, 0.13499256881897437, 11)
+
+    def test_approx(self):
+        ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
+                           101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
+        parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
+                           100, 96, 108, 103, 104, 114, 114, 113, 108,
+                           106, 99))
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "Ties preclude use of exact statistic.")
+            W, pval = stats.ansari(ramsay, parekh)
+
+        assert_almost_equal(W, 185.5, 11)
+        assert_almost_equal(pval, 0.18145819972867083, 11)
+
+    def test_exact(self):
+        W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
+        assert_almost_equal(W, 10.0, 11)
+        assert_almost_equal(pval, 0.533333333333333333, 7)
+
+    def test_bad_arg(self):
+        assert_raises(ValueError, stats.ansari, [], [1])
+        assert_raises(ValueError, stats.ansari, [1], [])
+
+    def test_result_attributes(self):
+        x = [1, 2, 3, 3, 4]
+        y = [3, 2, 6, 1, 6, 1, 4, 1]
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, "Ties preclude use of exact statistic.")
+            res = stats.ansari(x, y)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+    def test_bad_alternative(self):
+        # invalid value for alternative must raise a ValueError
+        x1 = [1, 2, 3, 4]
+        x2 = [5, 6, 7, 8]
+        match = "'alternative' must be 'two-sided'"
+        with assert_raises(ValueError, match=match):
+            stats.ansari(x1, x2, alternative='foo')
+
+    def test_alternative_exact(self):
+        x1 = [-5, 1, 5, 10, 15, 20, 25]  # high scale, loc=10
+        x2 = [7.5, 8.5, 9.5, 10.5, 11.5, 12.5]  # low scale, loc=10
+        # ratio of scales is greater than 1. So, the
+        # p-value must be high when `alternative='less'`
+        # and low when `alternative='greater'`.
+        statistic, pval = stats.ansari(x1, x2)
+        pval_l = stats.ansari(x1, x2, alternative='less').pvalue
+        pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
+        assert pval_l > 0.95
+        assert pval_g < 0.05  # level of significance.
+        # also check if the p-values sum up to 1 plus the probability
+        # mass under the calculated statistic.
+        prob = _abw_state.pmf(statistic, len(x1), len(x2))
+        assert_allclose(pval_g + pval_l, 1 + prob, atol=1e-12)
+        # also check if one of the one-sided p-value equals half the
+        # two-sided p-value and the other one-sided p-value is its
+        # compliment.
+        assert_allclose(pval_g, pval/2, atol=1e-12)
+        assert_allclose(pval_l, 1+prob-pval/2, atol=1e-12)
+        # sanity check. The result should flip if
+        # we exchange x and y.
+        pval_l_reverse = stats.ansari(x2, x1, alternative='less').pvalue
+        pval_g_reverse = stats.ansari(x2, x1, alternative='greater').pvalue
+        assert pval_l_reverse < 0.05
+        assert pval_g_reverse > 0.95
+
+    @pytest.mark.parametrize(
+        'x, y, alternative, expected',
+        # the tests are designed in such a way that the
+        # if else statement in ansari test for exact
+        # mode is covered.
+        [([1, 2, 3, 4], [5, 6, 7, 8], 'less', 0.6285714285714),
+         ([1, 2, 3, 4], [5, 6, 7, 8], 'greater', 0.6285714285714),
+         ([1, 2, 3], [4, 5, 6, 7, 8], 'less', 0.8928571428571),
+         ([1, 2, 3], [4, 5, 6, 7, 8], 'greater', 0.2857142857143),
+         ([1, 2, 3, 4, 5], [6, 7, 8], 'less', 0.2857142857143),
+         ([1, 2, 3, 4, 5], [6, 7, 8], 'greater', 0.8928571428571)]
+    )
+    def test_alternative_exact_with_R(self, x, y, alternative, expected):
+        # testing with R on arbitrary data
+        # Sample R code used for the third test case above:
+        # ```R
+        # > options(digits=16)
+        # > x <- c(1,2,3)
+        # > y <- c(4,5,6,7,8)
+        # > ansari.test(x, y, alternative='less', exact=TRUE)
+        #
+        #     Ansari-Bradley test
+        #
+        # data:  x and y
+        # AB = 6, p-value = 0.8928571428571
+        # alternative hypothesis: true ratio of scales is less than 1
+        #
+        # ```
+        pval = stats.ansari(x, y, alternative=alternative).pvalue
+        assert_allclose(pval, expected, atol=1e-12)
+
+    def test_alternative_approx(self):
+        # intuitive tests for approximation
+        x1 = stats.norm.rvs(0, 5, size=100, random_state=123)
+        x2 = stats.norm.rvs(0, 2, size=100, random_state=123)
+        # for m > 55 or n > 55, the test should automatically
+        # switch to approximation.
+        pval_l = stats.ansari(x1, x2, alternative='less').pvalue
+        pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
+        assert_allclose(pval_l, 1.0, atol=1e-12)
+        assert_allclose(pval_g, 0.0, atol=1e-12)
+        # also check if one of the one-sided p-value equals half the
+        # two-sided p-value and the other one-sided p-value is its
+        # compliment.
+        x1 = stats.norm.rvs(0, 2, size=60, random_state=123)
+        x2 = stats.norm.rvs(0, 1.5, size=60, random_state=123)
+        pval = stats.ansari(x1, x2).pvalue
+        pval_l = stats.ansari(x1, x2, alternative='less').pvalue
+        pval_g = stats.ansari(x1, x2, alternative='greater').pvalue
+        assert_allclose(pval_g, pval/2, atol=1e-12)
+        assert_allclose(pval_l, 1-pval/2, atol=1e-12)
+
+
+class TestBartlett:
+
+    def test_data(self):
+        # https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
+        args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
+        T, pval = stats.bartlett(*args)
+        assert_almost_equal(T, 20.78587342806484, 7)
+        assert_almost_equal(pval, 0.0136358632781, 7)
+
+    def test_bad_arg(self):
+        # Too few args raises ValueError.
+        assert_raises(ValueError, stats.bartlett, [1])
+
+    def test_result_attributes(self):
+        args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
+        res = stats.bartlett(*args)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+    def test_empty_arg(self):
+        args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
+        assert_equal((np.nan, np.nan), stats.bartlett(*args))
+
+    # temporary fix for issue #9252: only accept 1d input
+    def test_1d_input(self):
+        x = np.array([[1, 2], [3, 4]])
+        assert_raises(ValueError, stats.bartlett, g1, x)
+
+
+class TestLevene:
+
+    def test_data(self):
+        # https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
+        args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
+        W, pval = stats.levene(*args)
+        assert_almost_equal(W, 1.7059176930008939, 7)
+        assert_almost_equal(pval, 0.0990829755522, 7)
+
+    def test_trimmed1(self):
+        # Test that center='trimmed' gives the same result as center='mean'
+        # when proportiontocut=0.
+        W1, pval1 = stats.levene(g1, g2, g3, center='mean')
+        W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
+                                 proportiontocut=0.0)
+        assert_almost_equal(W1, W2)
+        assert_almost_equal(pval1, pval2)
+
+    def test_trimmed2(self):
+        x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
+        y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
+        np.random.seed(1234)
+        x2 = np.random.permutation(x)
+
+        # Use center='trimmed'
+        W0, pval0 = stats.levene(x, y, center='trimmed',
+                                 proportiontocut=0.125)
+        W1, pval1 = stats.levene(x2, y, center='trimmed',
+                                 proportiontocut=0.125)
+        # Trim the data here, and use center='mean'
+        W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
+        # Result should be the same.
+        assert_almost_equal(W0, W2)
+        assert_almost_equal(W1, W2)
+        assert_almost_equal(pval1, pval2)
+
+    def test_equal_mean_median(self):
+        x = np.linspace(-1, 1, 21)
+        np.random.seed(1234)
+        x2 = np.random.permutation(x)
+        y = x**3
+        W1, pval1 = stats.levene(x, y, center='mean')
+        W2, pval2 = stats.levene(x2, y, center='median')
+        assert_almost_equal(W1, W2)
+        assert_almost_equal(pval1, pval2)
+
+    def test_bad_keyword(self):
+        x = np.linspace(-1, 1, 21)
+        assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
+
+    def test_bad_center_value(self):
+        x = np.linspace(-1, 1, 21)
+        assert_raises(ValueError, stats.levene, x, x, center='trim')
+
+    def test_too_few_args(self):
+        assert_raises(ValueError, stats.levene, [1])
+
+    def test_result_attributes(self):
+        args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
+        res = stats.levene(*args)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+    # temporary fix for issue #9252: only accept 1d input
+    def test_1d_input(self):
+        x = np.array([[1, 2], [3, 4]])
+        assert_raises(ValueError, stats.levene, g1, x)
+
+
+class TestBinomTestP:
+    """
+    Tests for stats.binomtest as a replacement for deprecated stats.binom_test.
+    """
+    @staticmethod
+    def binom_test_func(x, n=None, p=0.5, alternative='two-sided'):
+        # This processing of x and n is copied from binom_test.
+        x = np.atleast_1d(x).astype(np.int_)
+        if len(x) == 2:
+            n = x[1] + x[0]
+            x = x[0]
+        elif len(x) == 1:
+            x = x[0]
+            if n is None or n < x:
+                raise ValueError("n must be >= x")
+            n = np.int_(n)
+        else:
+            raise ValueError("Incorrect length for x.")
+
+        result = stats.binomtest(x, n, p=p, alternative=alternative)
+        return result.pvalue
+
+    def test_data(self):
+        pval = self.binom_test_func(100, 250)
+        assert_almost_equal(pval, 0.0018833009350757682, 11)
+        pval = self.binom_test_func(201, 405)
+        assert_almost_equal(pval, 0.92085205962670713, 11)
+        pval = self.binom_test_func([682, 243], p=3/4)
+        assert_almost_equal(pval, 0.38249155957481695, 11)
+
+    def test_bad_len_x(self):
+        # Length of x must be 1 or 2.
+        assert_raises(ValueError, self.binom_test_func, [1, 2, 3])
+
+    def test_bad_n(self):
+        # len(x) is 1, but n is invalid.
+        # Missing n
+        assert_raises(ValueError, self.binom_test_func, [100])
+        # n less than x[0]
+        assert_raises(ValueError, self.binom_test_func, [100], n=50)
+
+    def test_bad_p(self):
+        assert_raises(ValueError,
+                      self.binom_test_func, [50, 50], p=2.0)
+
+    def test_alternatives(self):
+        res = self.binom_test_func(51, 235, p=1/6, alternative='less')
+        assert_almost_equal(res, 0.982022657605858)
+
+        res = self.binom_test_func(51, 235, p=1/6, alternative='greater')
+        assert_almost_equal(res, 0.02654424571169085)
+
+        res = self.binom_test_func(51, 235, p=1/6, alternative='two-sided')
+        assert_almost_equal(res, 0.0437479701823997)
+
+    @pytest.mark.skipif(sys.maxsize <= 2**32, reason="32-bit does not overflow")
+    def test_boost_overflow_raises(self):
+        # Boost.Math error policy should raise exceptions in Python
+        assert_raises(OverflowError, self.binom_test_func, 5.0, 6, p=sys.float_info.min)
+
+
+class TestBinomTest:
+    """Tests for stats.binomtest."""
+
+    # Expected results here are from R binom.test, e.g.
+    # options(digits=16)
+    # binom.test(484, 967, p=0.48)
+    #
+    def test_two_sided_pvalues1(self):
+        # `tol` could be stricter on most architectures, but the value
+        # here is limited by accuracy of `binom.cdf` for large inputs on
+        # Linux_Python_37_32bit_full and aarch64
+        rtol = 1e-10  # aarch64 observed rtol: 1.5e-11
+        res = stats.binomtest(10079999, 21000000, 0.48)
+        assert_allclose(res.pvalue, 1.0, rtol=rtol)
+        res = stats.binomtest(10079990, 21000000, 0.48)
+        assert_allclose(res.pvalue, 0.9966892187965, rtol=rtol)
+        res = stats.binomtest(10080009, 21000000, 0.48)
+        assert_allclose(res.pvalue, 0.9970377203856, rtol=rtol)
+        res = stats.binomtest(10080017, 21000000, 0.48)
+        assert_allclose(res.pvalue, 0.9940754817328, rtol=1e-9)
+
+    def test_two_sided_pvalues2(self):
+        rtol = 1e-10  # no aarch64 failure with 1e-15, preemptive bump
+        res = stats.binomtest(9, n=21, p=0.48)
+        assert_allclose(res.pvalue, 0.6689672431939, rtol=rtol)
+        res = stats.binomtest(4, 21, 0.48)
+        assert_allclose(res.pvalue, 0.008139563452106, rtol=rtol)
+        res = stats.binomtest(11, 21, 0.48)
+        assert_allclose(res.pvalue, 0.8278629664608, rtol=rtol)
+        res = stats.binomtest(7, 21, 0.48)
+        assert_allclose(res.pvalue, 0.1966772901718, rtol=rtol)
+        res = stats.binomtest(3, 10, .5)
+        assert_allclose(res.pvalue, 0.34375, rtol=rtol)
+        res = stats.binomtest(2, 2, .4)
+        assert_allclose(res.pvalue, 0.16, rtol=rtol)
+        res = stats.binomtest(2, 4, .3)
+        assert_allclose(res.pvalue, 0.5884, rtol=rtol)
+
+    def test_edge_cases(self):
+        rtol = 1e-10  # aarch64 observed rtol: 1.33e-15
+        res = stats.binomtest(484, 967, 0.5)
+        assert_allclose(res.pvalue, 1, rtol=rtol)
+        res = stats.binomtest(3, 47, 3/47)
+        assert_allclose(res.pvalue, 1, rtol=rtol)
+        res = stats.binomtest(13, 46, 13/46)
+        assert_allclose(res.pvalue, 1, rtol=rtol)
+        res = stats.binomtest(15, 44, 15/44)
+        assert_allclose(res.pvalue, 1, rtol=rtol)
+        res = stats.binomtest(7, 13, 0.5)
+        assert_allclose(res.pvalue, 1, rtol=rtol)
+        res = stats.binomtest(6, 11, 0.5)
+        assert_allclose(res.pvalue, 1, rtol=rtol)
+
+    def test_binary_srch_for_binom_tst(self):
+        # Test that old behavior of binomtest is maintained
+        # by the new binary search method in cases where d
+        # exactly equals the input on one side.
+        n = 10
+        p = 0.5
+        k = 3
+        # First test for the case where k > mode of PMF
+        i = np.arange(np.ceil(p * n), n+1)
+        d = stats.binom.pmf(k, n, p)
+        # Old way of calculating y, probably consistent with R.
+        y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
+        # New way with binary search.
+        ix = _binary_search_for_binom_tst(lambda x1:
+                                          -stats.binom.pmf(x1, n, p),
+                                          -d, np.ceil(p * n), n)
+        y2 = n - ix + int(d == stats.binom.pmf(ix, n, p))
+        assert_allclose(y1, y2, rtol=1e-9)
+        # Now test for the other side.
+        k = 7
+        i = np.arange(np.floor(p * n) + 1)
+        d = stats.binom.pmf(k, n, p)
+        # Old way of calculating y.
+        y1 = np.sum(stats.binom.pmf(i, n, p) <= d, axis=0)
+        # New way with binary search.
+        ix = _binary_search_for_binom_tst(lambda x1:
+                                          stats.binom.pmf(x1, n, p),
+                                          d, 0, np.floor(p * n))
+        y2 = ix + 1
+        assert_allclose(y1, y2, rtol=1e-9)
+
+    # Expected results here are from R 3.6.2 binom.test
+    @pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
+                             [('less', 0.148831050443,
+                               0.0, 0.2772002496709138),
+                              ('greater', 0.9004695898947,
+                               0.1366613252458672, 1.0),
+                              ('two-sided', 0.2983720970096,
+                               0.1266555521019559, 0.2918426890886281)])
+    def test_confidence_intervals1(self, alternative, pval, ci_low, ci_high):
+        res = stats.binomtest(20, n=100, p=0.25, alternative=alternative)
+        assert_allclose(res.pvalue, pval, rtol=1e-12)
+        assert_equal(res.statistic, 0.2)
+        ci = res.proportion_ci(confidence_level=0.95)
+        assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-12)
+
+    # Expected results here are from R 3.6.2 binom.test.
+    @pytest.mark.parametrize('alternative, pval, ci_low, ci_high',
+                             [('less',
+                               0.005656361, 0.0, 0.1872093),
+                              ('greater',
+                               0.9987146, 0.008860761, 1.0),
+                              ('two-sided',
+                               0.01191714, 0.006872485, 0.202706269)])
+    def test_confidence_intervals2(self, alternative, pval, ci_low, ci_high):
+        res = stats.binomtest(3, n=50, p=0.2, alternative=alternative)
+        assert_allclose(res.pvalue, pval, rtol=1e-6)
+        assert_equal(res.statistic, 0.06)
+        ci = res.proportion_ci(confidence_level=0.99)
+        assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
+
+    # Expected results here are from R 3.6.2 binom.test.
+    @pytest.mark.parametrize('alternative, pval, ci_high',
+                             [('less', 0.05631351, 0.2588656),
+                              ('greater', 1.0, 1.0),
+                              ('two-sided', 0.07604122, 0.3084971)])
+    def test_confidence_interval_exact_k0(self, alternative, pval, ci_high):
+        # Test with k=0, n = 10.
+        res = stats.binomtest(0, 10, p=0.25, alternative=alternative)
+        assert_allclose(res.pvalue, pval, rtol=1e-6)
+        ci = res.proportion_ci(confidence_level=0.95)
+        assert_equal(ci.low, 0.0)
+        assert_allclose(ci.high, ci_high, rtol=1e-6)
+
+    # Expected results here are from R 3.6.2 binom.test.
+    @pytest.mark.parametrize('alternative, pval, ci_low',
+                             [('less', 1.0, 0.0),
+                              ('greater', 9.536743e-07, 0.7411344),
+                              ('two-sided', 9.536743e-07, 0.6915029)])
+    def test_confidence_interval_exact_k_is_n(self, alternative, pval, ci_low):
+        # Test with k = n = 10.
+        res = stats.binomtest(10, 10, p=0.25, alternative=alternative)
+        assert_allclose(res.pvalue, pval, rtol=1e-6)
+        ci = res.proportion_ci(confidence_level=0.95)
+        assert_equal(ci.high, 1.0)
+        assert_allclose(ci.low, ci_low, rtol=1e-6)
+
+    # Expected results are from the prop.test function in R 3.6.2.
+    @pytest.mark.parametrize(
+        'k, alternative, corr, conf, ci_low, ci_high',
+        [[3, 'two-sided', True, 0.95, 0.08094782, 0.64632928],
+         [3, 'two-sided', True, 0.99, 0.0586329, 0.7169416],
+         [3, 'two-sided', False, 0.95, 0.1077913, 0.6032219],
+         [3, 'two-sided', False, 0.99, 0.07956632, 0.6799753],
+         [3, 'less', True, 0.95, 0.0, 0.6043476],
+         [3, 'less', True, 0.99, 0.0, 0.6901811],
+         [3, 'less', False, 0.95, 0.0, 0.5583002],
+         [3, 'less', False, 0.99, 0.0, 0.6507187],
+         [3, 'greater', True, 0.95, 0.09644904, 1.0],
+         [3, 'greater', True, 0.99, 0.06659141, 1.0],
+         [3, 'greater', False, 0.95, 0.1268766, 1.0],
+         [3, 'greater', False, 0.99, 0.08974147, 1.0],
+
+         [0, 'two-sided', True, 0.95, 0.0, 0.3445372],
+         [0, 'two-sided', False, 0.95, 0.0, 0.2775328],
+         [0, 'less', True, 0.95, 0.0, 0.2847374],
+         [0, 'less', False, 0.95, 0.0, 0.212942],
+         [0, 'greater', True, 0.95, 0.0, 1.0],
+         [0, 'greater', False, 0.95, 0.0, 1.0],
+
+         [10, 'two-sided', True, 0.95, 0.6554628, 1.0],
+         [10, 'two-sided', False, 0.95, 0.7224672, 1.0],
+         [10, 'less', True, 0.95, 0.0, 1.0],
+         [10, 'less', False, 0.95, 0.0, 1.0],
+         [10, 'greater', True, 0.95, 0.7152626, 1.0],
+         [10, 'greater', False, 0.95, 0.787058, 1.0]]
+    )
+    def test_ci_wilson_method(self, k, alternative, corr, conf,
+                              ci_low, ci_high):
+        res = stats.binomtest(k, n=10, p=0.1, alternative=alternative)
+        if corr:
+            method = 'wilsoncc'
+        else:
+            method = 'wilson'
+        ci = res.proportion_ci(confidence_level=conf, method=method)
+        assert_allclose((ci.low, ci.high), (ci_low, ci_high), rtol=1e-6)
+
+    def test_estimate_equals_hypothesized_prop(self):
+        # Test the special case where the estimated proportion equals
+        # the hypothesized proportion.  When alternative is 'two-sided',
+        # the p-value is 1.
+        res = stats.binomtest(4, 16, 0.25)
+        assert_equal(res.statistic, 0.25)
+        assert_equal(res.pvalue, 1.0)
+
+    @pytest.mark.parametrize('k, n', [(0, 0), (-1, 2)])
+    def test_invalid_k_n(self, k, n):
+        with pytest.raises(ValueError,
+                           match="must be an integer not less than"):
+            stats.binomtest(k, n)
+
+    def test_invalid_k_too_big(self):
+        with pytest.raises(ValueError,
+                           match="k must not be greater than n"):
+            stats.binomtest(11, 10, 0.25)
+
+    def test_invalid_confidence_level(self):
+        res = stats.binomtest(3, n=10, p=0.1)
+        with pytest.raises(ValueError, match="must be in the interval"):
+            res.proportion_ci(confidence_level=-1)
+
+    def test_invalid_ci_method(self):
+        res = stats.binomtest(3, n=10, p=0.1)
+        with pytest.raises(ValueError, match="method must be"):
+            res.proportion_ci(method="plate of shrimp")
+
+    def test_alias(self):
+        res = stats.binomtest(3, n=10, p=0.1)
+        assert_equal(res.proportion_estimate, res.statistic)
+
+
+class TestFligner:
+
+    def test_data(self):
+        # numbers from R: fligner.test in package stats
+        x1 = np.arange(5)
+        assert_array_almost_equal(stats.fligner(x1, x1**2),
+                                  (3.2282229927203536, 0.072379187848207877),
+                                  11)
+
+    def test_trimmed1(self):
+        # Perturb input to break ties in the transformed data
+        # See https://github.com/scipy/scipy/pull/8042 for more details
+        rs = np.random.RandomState(123)
+        _perturb = lambda g: (np.asarray(g) + 1e-10*rs.randn(len(g))).tolist()
+        g1_ = _perturb(g1)
+        g2_ = _perturb(g2)
+        g3_ = _perturb(g3)
+        # Test that center='trimmed' gives the same result as center='mean'
+        # when proportiontocut=0.
+        Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean')
+        Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed',
+                                    proportiontocut=0.0)
+        assert_almost_equal(Xsq1, Xsq2)
+        assert_almost_equal(pval1, pval2)
+
+    def test_trimmed2(self):
+        x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
+        y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
+        # Use center='trimmed'
+        Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
+                                    proportiontocut=0.125)
+        # Trim the data here, and use center='mean'
+        Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
+        # Result should be the same.
+        assert_almost_equal(Xsq1, Xsq2)
+        assert_almost_equal(pval1, pval2)
+
+    # The following test looks reasonable at first, but fligner() uses the
+    # function stats.rankdata(), and in one of the cases in this test,
+    # there are ties, while in the other (because of normal rounding
+    # errors) there are not.  This difference leads to differences in the
+    # third significant digit of W.
+    #
+    #def test_equal_mean_median(self):
+    #    x = np.linspace(-1,1,21)
+    #    y = x**3
+    #    W1, pval1 = stats.fligner(x, y, center='mean')
+    #    W2, pval2 = stats.fligner(x, y, center='median')
+    #    assert_almost_equal(W1, W2)
+    #    assert_almost_equal(pval1, pval2)
+
+    def test_bad_keyword(self):
+        x = np.linspace(-1, 1, 21)
+        assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
+
+    def test_bad_center_value(self):
+        x = np.linspace(-1, 1, 21)
+        assert_raises(ValueError, stats.fligner, x, x, center='trim')
+
+    def test_bad_num_args(self):
+        # Too few args raises ValueError.
+        assert_raises(ValueError, stats.fligner, [1])
+
+    def test_empty_arg(self):
+        x = np.arange(5)
+        assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
+
+
+def mood_cases_with_ties():
+    # Generate random `x` and `y` arrays with ties both between and within the
+    # samples. Expected results are (statistic, pvalue) from SAS.
+    expected_results = [(-1.76658511464992, .0386488678399305),
+                        (-.694031428192304, .2438312498647250),
+                        (-1.15093525352151, .1248794365836150)]
+    seeds = [23453254, 1298352315, 987234597]
+    for si, seed in enumerate(seeds):
+        rng = np.random.default_rng(seed)
+        xy = rng.random(100)
+        # Generate random indices to make ties
+        tie_ind = rng.integers(low=0, high=99, size=5)
+        # Generate a random number of ties for each index.
+        num_ties_per_ind = rng.integers(low=1, high=5, size=5)
+        # At each `tie_ind`, mark the next `n` indices equal to that value.
+        for i, n in zip(tie_ind, num_ties_per_ind):
+            for j in range(i + 1, i + n):
+                xy[j] = xy[i]
+        # scramble order of xy before splitting into `x, y`
+        rng.shuffle(xy)
+        x, y = np.split(xy, 2)
+        yield x, y, 'less', *expected_results[si]
+
+class TestMood:
+    @pytest.mark.parametrize("x,y,alternative,stat_expect,p_expect",
+                             mood_cases_with_ties())
+    def test_against_SAS(self, x, y, alternative, stat_expect, p_expect):
+        """
+        Example code used to generate SAS output:
+        DATA myData;
+        INPUT X Y;
+        CARDS;
+        1 0
+        1 1
+        1 2
+        1 3
+        1 4
+        2 0
+        2 1
+        2 4
+        2 9
+        2 16
+        ods graphics on;
+        proc npar1way mood data=myData ;
+           class X;
+            ods output  MoodTest=mt;
+        proc contents data=mt;
+        proc print data=mt;
+          format     Prob1 17.16 Prob2 17.16 Statistic 17.16 Z 17.16 ;
+            title "Mood Two-Sample Test";
+        proc print data=myData;
+            title "Data for above results";
+          run;
+        """
+        statistic, pvalue = stats.mood(x, y, alternative=alternative)
+        assert_allclose(stat_expect, statistic, atol=1e-16)
+        assert_allclose(p_expect, pvalue, atol=1e-16)
+
+    @pytest.mark.parametrize("alternative, expected",
+                             [('two-sided', (1.019938533549930,
+                                             .3077576129778760)),
+                              ('less', (1.019938533549930,
+                                        1 - .1538788064889380)),
+                              ('greater', (1.019938533549930,
+                                           .1538788064889380))])
+    def test_against_SAS_2(self, alternative, expected):
+        # Code to run in SAS in above function
+        x = [111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
+             101, 96, 97, 102, 107, 113, 116, 113, 110, 98]
+        y = [107, 108, 106, 98, 105, 103, 110, 105, 104, 100,
+             96, 108, 103, 104, 114, 114, 113, 108, 106, 99]
+        res = stats.mood(x, y, alternative=alternative)
+        assert_allclose(res, expected)
+
+    def test_mood_order_of_args(self):
+        # z should change sign when the order of arguments changes, pvalue
+        # should not change
+        np.random.seed(1234)
+        x1 = np.random.randn(10, 1)
+        x2 = np.random.randn(15, 1)
+        z1, p1 = stats.mood(x1, x2)
+        z2, p2 = stats.mood(x2, x1)
+        assert_array_almost_equal([z1, p1], [-z2, p2])
+
+    def test_mood_with_axis_none(self):
+        # Test with axis = None, compare with results from R
+        x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
+               1.59528080213779, 0.329507771815361, -0.820468384118015,
+               0.487429052428485, 0.738324705129217, 0.575781351653492,
+              -0.305388387156356, 1.51178116845085, 0.389843236411431,
+              -0.621240580541804, -2.2146998871775, 1.12493091814311,
+              -0.0449336090152309, -0.0161902630989461, 0.943836210685299,
+               0.821221195098089, 0.593901321217509]
+
+        x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
+              -1.13037567424629, -0.0802517565509893, 0.132420284381094,
+               0.707954729271733, -0.23969802417184, 1.98447393665293,
+              -0.138787012119665, 0.417650750792556, 0.981752777463662,
+              -0.392695355503813, -1.03966897694891, 1.78222896030858,
+              -2.31106908460517, 0.878604580921265, 0.035806718015226,
+               1.01282869212708, 0.432265154539617, 2.09081920524915,
+              -1.19992581964387, 1.58963820029007, 1.95465164222325,
+               0.00493777682814261, -2.45170638784613, 0.477237302613617,
+              -0.596558168631403, 0.792203270299649, 0.289636710177348]
+
+        x1 = np.array(x1)
+        x2 = np.array(x2)
+        x1.shape = (10, 2)
+        x2.shape = (15, 2)
+        assert_array_almost_equal(stats.mood(x1, x2, axis=None),
+                                  [-1.31716607555, 0.18778296257])
+
+    def test_mood_2d(self):
+        # Test if the results of mood test in 2-D case are consistent with the
+        # R result for the same inputs.  Numbers from R mood.test().
+        ny = 5
+        np.random.seed(1234)
+        x1 = np.random.randn(10, ny)
+        x2 = np.random.randn(15, ny)
+        z_vectest, pval_vectest = stats.mood(x1, x2)
+
+        for j in range(ny):
+            assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
+                                      stats.mood(x1[:, j], x2[:, j]))
+
+        # inverse order of dimensions
+        x1 = x1.transpose()
+        x2 = x2.transpose()
+        z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
+
+        for i in range(ny):
+            # check axis handling is self consistent
+            assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
+                                      stats.mood(x1[i, :], x2[i, :]))
+
+    def test_mood_3d(self):
+        shape = (10, 5, 6)
+        np.random.seed(1234)
+        x1 = np.random.randn(*shape)
+        x2 = np.random.randn(*shape)
+
+        for axis in range(3):
+            z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
+            # Tests that result for 3-D arrays is equal to that for the
+            # same calculation on a set of 1-D arrays taken from the
+            # 3-D array
+            axes_idx = ([1, 2], [0, 2], [0, 1])  # the two axes != axis
+            for i in range(shape[axes_idx[axis][0]]):
+                for j in range(shape[axes_idx[axis][1]]):
+                    if axis == 0:
+                        slice1 = x1[:, i, j]
+                        slice2 = x2[:, i, j]
+                    elif axis == 1:
+                        slice1 = x1[i, :, j]
+                        slice2 = x2[i, :, j]
+                    else:
+                        slice1 = x1[i, j, :]
+                        slice2 = x2[i, j, :]
+
+                    assert_array_almost_equal([z_vectest[i, j],
+                                               pval_vectest[i, j]],
+                                              stats.mood(slice1, slice2))
+
+    def test_mood_bad_arg(self):
+        # Raise ValueError when the sum of the lengths of the args is
+        # less than 3
+        assert_raises(ValueError, stats.mood, [1], [])
+
+    def test_mood_alternative(self):
+
+        np.random.seed(0)
+        x = stats.norm.rvs(scale=0.75, size=100)
+        y = stats.norm.rvs(scale=1.25, size=100)
+
+        stat1, p1 = stats.mood(x, y, alternative='two-sided')
+        stat2, p2 = stats.mood(x, y, alternative='less')
+        stat3, p3 = stats.mood(x, y, alternative='greater')
+
+        assert stat1 == stat2 == stat3
+        assert_allclose(p1, 0, atol=1e-7)
+        assert_allclose(p2, p1/2)
+        assert_allclose(p3, 1 - p1/2)
+
+        with pytest.raises(ValueError, match="alternative must be..."):
+            stats.mood(x, y, alternative='ekki-ekki')
+
+    @pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
+    def test_result(self, alternative):
+        rng = np.random.default_rng(265827767938813079281100964083953437622)
+        x1 = rng.standard_normal((10, 1))
+        x2 = rng.standard_normal((15, 1))
+
+        res = stats.mood(x1, x2, alternative=alternative)
+        assert_equal((res.statistic, res.pvalue), res)
+
+
+class TestProbplot:
+
+    def test_basic(self):
+        x = stats.norm.rvs(size=20, random_state=12345)
+        osm, osr = stats.probplot(x, fit=False)
+        osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
+                        -0.73908135, -0.5857176, -0.44506467, -0.31273668,
+                        -0.18568928, -0.06158146, 0.06158146, 0.18568928,
+                        0.31273668, 0.44506467, 0.5857176, 0.73908135,
+                        0.91222575, 1.11829229, 1.38768012, 1.8241636]
+        assert_allclose(osr, np.sort(x))
+        assert_allclose(osm, osm_expected)
+
+        res, res_fit = stats.probplot(x, fit=True)
+        res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
+        assert_allclose(res_fit, res_fit_expected)
+
+    def test_sparams_keyword(self):
+        x = stats.norm.rvs(size=100, random_state=123456)
+        # Check that None, () and 0 (loc=0, for normal distribution) all work
+        # and give the same results
+        osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
+        osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
+        osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
+        assert_allclose(osm1, osm2)
+        assert_allclose(osm1, osm3)
+        assert_allclose(osr1, osr2)
+        assert_allclose(osr1, osr3)
+        # Check giving (loc, scale) params for normal distribution
+        osm, osr = stats.probplot(x, sparams=(), fit=False)
+
+    def test_dist_keyword(self):
+        x = stats.norm.rvs(size=20, random_state=12345)
+        osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
+        osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
+        assert_allclose(osm1, osm2)
+        assert_allclose(osr1, osr2)
+
+        assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
+        assert_raises(AttributeError, stats.probplot, x, dist=[])
+
+        class custom_dist:
+            """Some class that looks just enough like a distribution."""
+            def ppf(self, q):
+                return stats.norm.ppf(q, loc=2)
+
+        osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
+        osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
+        assert_allclose(osm1, osm2)
+        assert_allclose(osr1, osr2)
+
+    @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+    def test_plot_kwarg(self):
+        fig = plt.figure()
+        fig.add_subplot(111)
+        x = stats.t.rvs(3, size=100, random_state=7654321)
+        res1, fitres1 = stats.probplot(x, plot=plt)
+        plt.close()
+        res2, fitres2 = stats.probplot(x, plot=None)
+        res3 = stats.probplot(x, fit=False, plot=plt)
+        plt.close()
+        res4 = stats.probplot(x, fit=False, plot=None)
+        # Check that results are consistent between combinations of `fit` and
+        # `plot` keywords.
+        assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
+        assert_allclose(res1, res2)
+        assert_allclose(res1, res3)
+        assert_allclose(res1, res4)
+        assert_allclose(fitres1, fitres2)
+
+        # Check that a Matplotlib Axes object is accepted
+        fig = plt.figure()
+        ax = fig.add_subplot(111)
+        stats.probplot(x, fit=False, plot=ax)
+        plt.close()
+
+    def test_probplot_bad_args(self):
+        # Raise ValueError when given an invalid distribution.
+        assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
+
+    def test_empty(self):
+        assert_equal(stats.probplot([], fit=False),
+                     (np.array([]), np.array([])))
+        assert_equal(stats.probplot([], fit=True),
+                     ((np.array([]), np.array([])),
+                      (np.nan, np.nan, 0.0)))
+
+    def test_array_of_size_one(self):
+        with np.errstate(invalid='ignore'):
+            assert_equal(stats.probplot([1], fit=True),
+                         ((np.array([0.]), np.array([1])),
+                          (np.nan, np.nan, 0.0)))
+
+
+class TestWilcoxon:
+    def test_wilcoxon_bad_arg(self):
+        # Raise ValueError when two args of different lengths are given or
+        # zero_method is unknown.
+        assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
+        assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
+        assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2],
+                      alternative="dummy")
+        assert_raises(ValueError, stats.wilcoxon, [1]*10, mode="xyz")
+
+    def test_zero_diff(self):
+        x = np.arange(20)
+        # pratt and wilcox do not work if x - y == 0
+        assert_raises(ValueError, stats.wilcoxon, x, x, "wilcox",
+                      mode="approx")
+        assert_raises(ValueError, stats.wilcoxon, x, x, "pratt",
+                      mode="approx")
+        # ranksum is n*(n+1)/2, split in half if zero_method == "zsplit"
+        assert_equal(stats.wilcoxon(x, x, "zsplit", mode="approx"),
+                     (20*21/4, 1.0))
+
+    def test_pratt(self):
+        # regression test for gh-6805: p-value matches value from R package
+        # coin (wilcoxsign_test) reported in the issue
+        x = [1, 2, 3, 4]
+        y = [1, 2, 3, 5]
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message="Sample size too small")
+            res = stats.wilcoxon(x, y, zero_method="pratt", mode="approx")
+        assert_allclose(res, (0.0, 0.31731050786291415))
+
+    def test_wilcoxon_arg_type(self):
+        # Should be able to accept list as arguments.
+        # Address issue 6070.
+        arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
+
+        _ = stats.wilcoxon(arr, zero_method="pratt", mode="approx")
+        _ = stats.wilcoxon(arr, zero_method="zsplit", mode="approx")
+        _ = stats.wilcoxon(arr, zero_method="wilcox", mode="approx")
+
+    def test_accuracy_wilcoxon(self):
+        freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
+        nums = range(-4, 5)
+        x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
+        y = np.zeros(x.size)
+
+        T, p = stats.wilcoxon(x, y, "pratt", mode="approx")
+        assert_allclose(T, 423)
+        assert_allclose(p, 0.0031724568006762576)
+
+        T, p = stats.wilcoxon(x, y, "zsplit", mode="approx")
+        assert_allclose(T, 441)
+        assert_allclose(p, 0.0032145343172473055)
+
+        T, p = stats.wilcoxon(x, y, "wilcox", mode="approx")
+        assert_allclose(T, 327)
+        assert_allclose(p, 0.00641346115861)
+
+        # Test the 'correction' option, using values computed in R with:
+        # > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
+        x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
+        y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
+        T, p = stats.wilcoxon(x, y, correction=False, mode="approx")
+        assert_equal(T, 34)
+        assert_allclose(p, 0.6948866, rtol=1e-6)
+        T, p = stats.wilcoxon(x, y, correction=True, mode="approx")
+        assert_equal(T, 34)
+        assert_allclose(p, 0.7240817, rtol=1e-6)
+
+    def test_wilcoxon_result_attributes(self):
+        x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
+        y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
+        res = stats.wilcoxon(x, y, correction=False, mode="approx")
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+    def test_wilcoxon_has_zstatistic(self):
+        rng = np.random.default_rng(89426135444)
+        x, y = rng.random(15), rng.random(15)
+
+        res = stats.wilcoxon(x, y, mode="approx")
+        ref = stats.norm.ppf(res.pvalue/2)
+        assert_allclose(res.zstatistic, ref)
+
+        res = stats.wilcoxon(x, y, mode="exact")
+        assert not hasattr(res, 'zstatistic')
+
+        res = stats.wilcoxon(x, y)
+        assert not hasattr(res, 'zstatistic')
+
+    def test_wilcoxon_tie(self):
+        # Regression test for gh-2391.
+        # Corresponding R code is:
+        #   > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
+        #   > result$p.value
+        #   [1] 0.001565402
+        #   > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
+        #   > result$p.value
+        #   [1] 0.001904195
+        stat, p = stats.wilcoxon([0.1] * 10, mode="approx")
+        expected_p = 0.001565402
+        assert_equal(stat, 0)
+        assert_allclose(p, expected_p, rtol=1e-6)
+
+        stat, p = stats.wilcoxon([0.1] * 10, correction=True, mode="approx")
+        expected_p = 0.001904195
+        assert_equal(stat, 0)
+        assert_allclose(p, expected_p, rtol=1e-6)
+
+    def test_onesided(self):
+        # tested against "R version 3.4.1 (2017-06-30)"
+        # x <- c(125, 115, 130, 140, 140, 115, 140, 125, 140, 135)
+        # y <- c(110, 122, 125, 120, 140, 124, 123, 137, 135, 145)
+        # cfg <- list(x = x, y = y, paired = TRUE, exact = FALSE)
+        # do.call(wilcox.test, c(cfg, list(alternative = "less", correct = FALSE)))
+        # do.call(wilcox.test, c(cfg, list(alternative = "less", correct = TRUE)))
+        # do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = FALSE)))
+        # do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = TRUE)))
+        x = [125, 115, 130, 140, 140, 115, 140, 125, 140, 135]
+        y = [110, 122, 125, 120, 140, 124, 123, 137, 135, 145]
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message="Sample size too small")
+            w, p = stats.wilcoxon(x, y, alternative="less", mode="approx")
+        assert_equal(w, 27)
+        assert_almost_equal(p, 0.7031847, decimal=6)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message="Sample size too small")
+            w, p = stats.wilcoxon(x, y, alternative="less", correction=True,
+                                  mode="approx")
+        assert_equal(w, 27)
+        assert_almost_equal(p, 0.7233656, decimal=6)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message="Sample size too small")
+            w, p = stats.wilcoxon(x, y, alternative="greater", mode="approx")
+        assert_equal(w, 27)
+        assert_almost_equal(p, 0.2968153, decimal=6)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message="Sample size too small")
+            w, p = stats.wilcoxon(x, y, alternative="greater", correction=True,
+                                  mode="approx")
+        assert_equal(w, 27)
+        assert_almost_equal(p, 0.3176447, decimal=6)
+
+    def test_exact_basic(self):
+        for n in range(1, 51):
+            pmf1 = _get_wilcoxon_distr(n)
+            pmf2 = _get_wilcoxon_distr2(n)
+            assert_equal(n*(n+1)/2 + 1, len(pmf1))
+            assert_equal(sum(pmf1), 1)
+            assert_array_almost_equal(pmf1, pmf2)
+
+    def test_exact_pval(self):
+        # expected values computed with "R version 3.4.1 (2017-06-30)"
+        x = np.array([1.81, 0.82, 1.56, -0.48, 0.81, 1.28, -1.04, 0.23,
+                      -0.75, 0.14])
+        y = np.array([0.71, 0.65, -0.2, 0.85, -1.1, -0.45, -0.84, -0.24,
+                      -0.68, -0.76])
+        _, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
+        assert_almost_equal(p, 0.1054688, decimal=6)
+        _, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
+        assert_almost_equal(p, 0.9580078, decimal=6)
+        _, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
+        assert_almost_equal(p, 0.05273438, decimal=6)
+
+        x = np.arange(0, 20) + 0.5
+        y = np.arange(20, 0, -1)
+        _, p = stats.wilcoxon(x, y, alternative="two-sided", mode="exact")
+        assert_almost_equal(p, 0.8694878, decimal=6)
+        _, p = stats.wilcoxon(x, y, alternative="less", mode="exact")
+        assert_almost_equal(p, 0.4347439, decimal=6)
+        _, p = stats.wilcoxon(x, y, alternative="greater", mode="exact")
+        assert_almost_equal(p, 0.5795889, decimal=6)
+
+    # These inputs were chosen to give a W statistic that is either the
+    # center of the distribution (when the length of the support is odd), or
+    # the value to the left of the center (when the length of the support is
+    # even).  Also, the numbers are chosen so that the W statistic is the
+    # sum of the positive values.
+
+    @pytest.mark.parametrize('x', [[-1, -2, 3],
+                                   [-1, 2, -3, -4, 5],
+                                   [-1, -2, 3, -4, -5, -6, 7, 8]])
+    def test_exact_p_1(self, x):
+        w, p = stats.wilcoxon(x)
+        x = np.array(x)
+        wtrue = x[x > 0].sum()
+        assert_equal(w, wtrue)
+        assert_equal(p, 1)
+
+    def test_auto(self):
+        # auto default to exact if there are no ties and n<= 25
+        x = np.arange(0, 25) + 0.5
+        y = np.arange(25, 0, -1)
+        assert_equal(stats.wilcoxon(x, y),
+                     stats.wilcoxon(x, y, mode="exact"))
+
+        # if there are ties (i.e. zeros in d = x-y), then switch to approx
+        d = np.arange(0, 13)
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning, message="Exact p-value calculation")
+            w, p = stats.wilcoxon(d)
+        assert_equal(stats.wilcoxon(d, mode="approx"), (w, p))
+
+        # use approximation for samples > 25
+        d = np.arange(1, 52)
+        assert_equal(stats.wilcoxon(d), stats.wilcoxon(d, mode="approx"))
+
+
+class TestKstat:
+    def test_moments_normal_distribution(self):
+        np.random.seed(32149)
+        data = np.random.randn(12345)
+        moments = [stats.kstat(data, n) for n in [1, 2, 3, 4]]
+
+        expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
+        assert_allclose(moments, expected, rtol=1e-4)
+
+        # test equivalence with `stats.moment`
+        m1 = stats.moment(data, moment=1)
+        m2 = stats.moment(data, moment=2)
+        m3 = stats.moment(data, moment=3)
+        assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
+
+    def test_empty_input(self):
+        assert_raises(ValueError, stats.kstat, [])
+
+    def test_nan_input(self):
+        data = np.arange(10.)
+        data[6] = np.nan
+
+        assert_equal(stats.kstat(data), np.nan)
+
+    def test_kstat_bad_arg(self):
+        # Raise ValueError if n > 4 or n < 1.
+        data = np.arange(10)
+        for n in [0, 4.001]:
+            assert_raises(ValueError, stats.kstat, data, n=n)
+
+
+class TestKstatVar:
+    def test_empty_input(self):
+        assert_raises(ValueError, stats.kstatvar, [])
+
+    def test_nan_input(self):
+        data = np.arange(10.)
+        data[6] = np.nan
+
+        assert_equal(stats.kstat(data), np.nan)
+
+    def test_bad_arg(self):
+        # Raise ValueError is n is not 1 or 2.
+        data = [1]
+        n = 10
+        assert_raises(ValueError, stats.kstatvar, data, n=n)
+
+
+class TestPpccPlot:
+    def setup_method(self):
+        self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5
+
+    def test_basic(self):
+        N = 5
+        svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
+        ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
+                         0.93519298]
+        assert_allclose(svals, np.linspace(-10, 10, num=N))
+        assert_allclose(ppcc, ppcc_expected)
+
+    def test_dist(self):
+        # Test that we can specify distributions both by name and as objects.
+        svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
+        svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
+                                        dist=stats.tukeylambda)
+        assert_allclose(svals1, svals2, rtol=1e-20)
+        assert_allclose(ppcc1, ppcc2, rtol=1e-20)
+        # Test that 'tukeylambda' is the default dist
+        svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
+        assert_allclose(svals1, svals3, rtol=1e-20)
+        assert_allclose(ppcc1, ppcc3, rtol=1e-20)
+
+    @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+    def test_plot_kwarg(self):
+        # Check with the matplotlib.pyplot module
+        fig = plt.figure()
+        ax = fig.add_subplot(111)
+        stats.ppcc_plot(self.x, -20, 20, plot=plt)
+        fig.delaxes(ax)
+
+        # Check that a Matplotlib Axes object is accepted
+        ax = fig.add_subplot(111)
+        stats.ppcc_plot(self.x, -20, 20, plot=ax)
+        plt.close()
+
+    def test_invalid_inputs(self):
+        # `b` has to be larger than `a`
+        assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
+
+        # Raise ValueError when given an invalid distribution.
+        assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
+                      dist="plate_of_shrimp")
+
+    def test_empty(self):
+        # For consistency with probplot return for one empty array,
+        # ppcc contains all zeros and svals is the same as for normal array
+        # input.
+        svals, ppcc = stats.ppcc_plot([], 0, 1)
+        assert_allclose(svals, np.linspace(0, 1, num=80))
+        assert_allclose(ppcc, np.zeros(80, dtype=float))
+
+
+class TestPpccMax:
+    def test_ppcc_max_bad_arg(self):
+        # Raise ValueError when given an invalid distribution.
+        data = [1]
+        assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
+
+    def test_ppcc_max_basic(self):
+        x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
+                                  random_state=1234567) + 1e4
+        assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=7)
+
+    def test_dist(self):
+        x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
+                                  random_state=1234567) + 1e4
+
+        # Test that we can specify distributions both by name and as objects.
+        max1 = stats.ppcc_max(x, dist='tukeylambda')
+        max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
+        assert_almost_equal(max1, -0.71215366521264145, decimal=5)
+        assert_almost_equal(max2, -0.71215366521264145, decimal=5)
+
+        # Test that 'tukeylambda' is the default dist
+        max3 = stats.ppcc_max(x)
+        assert_almost_equal(max3, -0.71215366521264145, decimal=5)
+
+    def test_brack(self):
+        x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
+                                  random_state=1234567) + 1e4
+        assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
+
+        assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
+                            -0.71215366521264145, decimal=7)
+
+        assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
+                            -0.71215366521264145, decimal=7)
+
+
+class TestBoxcox_llf:
+
+    def test_basic(self):
+        x = stats.norm.rvs(size=10000, loc=10, random_state=54321)
+        lmbda = 1
+        llf = stats.boxcox_llf(lmbda, x)
+        llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
+        assert_allclose(llf, llf_expected)
+
+    def test_array_like(self):
+        x = stats.norm.rvs(size=100, loc=10, random_state=54321)
+        lmbda = 1
+        llf = stats.boxcox_llf(lmbda, x)
+        llf2 = stats.boxcox_llf(lmbda, list(x))
+        assert_allclose(llf, llf2, rtol=1e-12)
+
+    def test_2d_input(self):
+        # Note: boxcox_llf() was already working with 2-D input (sort of), so
+        # keep it like that.  boxcox() doesn't work with 2-D input though, due
+        # to brent() returning a scalar.
+        x = stats.norm.rvs(size=100, loc=10, random_state=54321)
+        lmbda = 1
+        llf = stats.boxcox_llf(lmbda, x)
+        llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
+        assert_allclose([llf, llf], llf2, rtol=1e-12)
+
+    def test_empty(self):
+        assert_(np.isnan(stats.boxcox_llf(1, [])))
+
+    def test_gh_6873(self):
+        # Regression test for gh-6873.
+        # This example was taken from gh-7534, a duplicate of gh-6873.
+        data = [198.0, 233.0, 233.0, 392.0]
+        llf = stats.boxcox_llf(-8, data)
+        # The expected value was computed with mpmath.
+        assert_allclose(llf, -17.93934208579061)
+
+
+# This is the data from github user Qukaiyi, given as an example
+# of a data set that caused boxcox to fail.
+_boxcox_data = [
+    15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875,
+    207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660,
+    904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575,
+    68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855,
+    1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000,
+    198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051,
+    345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787,
+    57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237,
+    131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418,
+    246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193,
+    872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561,
+    483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858,
+    88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096,
+    402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232,
+    606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486,
+    95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334,
+    174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335,
+    898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524,
+    81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735,
+    132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217,
+    150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448,
+    10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175,
+    56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907,
+    244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624,
+    406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795,
+    145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075,
+    367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870,
+    55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390,
+    236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340,
+    84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337,
+    120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375,
+    887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988,
+    509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229,
+    411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084,
+    479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426,
+    1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598,
+    1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047,
+    194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310,
+    4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793,
+    1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221,
+    266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321,
+    309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743,
+    1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201,
+    141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518,
+    2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845,
+    724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016,
+    1891609
+]
+
+class TestBoxcox:
+
+    def test_fixed_lmbda(self):
+        x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
+        xt = stats.boxcox(x, lmbda=1)
+        assert_allclose(xt, x - 1)
+        xt = stats.boxcox(x, lmbda=-1)
+        assert_allclose(xt, 1 - 1/x)
+
+        xt = stats.boxcox(x, lmbda=0)
+        assert_allclose(xt, np.log(x))
+
+        # Also test that array_like input works
+        xt = stats.boxcox(list(x), lmbda=0)
+        assert_allclose(xt, np.log(x))
+
+        # test that constant input is accepted; see gh-12225
+        xt = stats.boxcox(np.ones(10), 2)
+        assert_equal(xt, np.zeros(10))
+
+    def test_lmbda_None(self):
+        # Start from normal rv's, do inverse transform to check that
+        # optimization function gets close to the right answer.
+        lmbda = 2.5
+        x = stats.norm.rvs(loc=10, size=50000, random_state=1245)
+        x_inv = (x * lmbda + 1)**(-lmbda)
+        xt, maxlog = stats.boxcox(x_inv)
+
+        assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
+
+    def test_alpha(self):
+        rng = np.random.RandomState(1234)
+        x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
+
+        # Some regular values for alpha, on a small sample size
+        _, _, interval = stats.boxcox(x, alpha=0.75)
+        assert_allclose(interval, [4.004485780226041, 5.138756355035744])
+        _, _, interval = stats.boxcox(x, alpha=0.05)
+        assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
+
+        # Try some extreme values, see we don't hit the N=500 limit
+        x = _old_loggamma_rvs(7, size=500, random_state=rng) + 15
+        _, _, interval = stats.boxcox(x, alpha=0.001)
+        assert_allclose(interval, [0.3988867, 11.40553131])
+        _, _, interval = stats.boxcox(x, alpha=0.999)
+        assert_allclose(interval, [5.83316246, 5.83735292])
+
+    def test_boxcox_bad_arg(self):
+        # Raise ValueError if any data value is negative.
+        x = np.array([-1, 2])
+        assert_raises(ValueError, stats.boxcox, x)
+        # Raise ValueError if data is constant.
+        assert_raises(ValueError, stats.boxcox, np.array([1]))
+        # Raise ValueError if data is not 1-dimensional.
+        assert_raises(ValueError, stats.boxcox, np.array([[1], [2]]))
+
+    def test_empty(self):
+        assert_(stats.boxcox([]).shape == (0,))
+
+    def test_gh_6873(self):
+        # Regression test for gh-6873.
+        y, lam = stats.boxcox(_boxcox_data)
+        # The expected value of lam was computed with the function
+        # powerTransform in the R library 'car'.  I trust that value
+        # to only about five significant digits.
+        assert_allclose(lam, -0.051654, rtol=1e-5)
+
+    @pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
+    def test_bounded_optimizer_within_bounds(self, bounds):
+        # Define custom optimizer with bounds.
+        def optimizer(fun):
+            return optimize.minimize_scalar(fun, bounds=bounds,
+                                            method="bounded")
+
+        _, lmbda = stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
+        assert bounds[0] < lmbda < bounds[1]
+
+    def test_bounded_optimizer_against_unbounded_optimizer(self):
+        # Test whether setting bounds on optimizer excludes solution from
+        # unbounded optimizer.
+
+        # Get unbounded solution.
+        _, lmbda = stats.boxcox(_boxcox_data, lmbda=None)
+
+        # Set tolerance and bounds around solution.
+        bounds = (lmbda + 0.1, lmbda + 1)
+        options = {'xatol': 1e-12}
+
+        def optimizer(fun):
+            return optimize.minimize_scalar(fun, bounds=bounds,
+                                            method="bounded", options=options)
+
+        # Check bounded solution. Lower bound should be active.
+        _, lmbda_bounded = stats.boxcox(_boxcox_data, lmbda=None,
+                                        optimizer=optimizer)
+        assert lmbda_bounded != lmbda
+        assert_allclose(lmbda_bounded, bounds[0])
+
+    @pytest.mark.parametrize("optimizer", ["str", (1, 2), 0.1])
+    def test_bad_optimizer_type_raises_error(self, optimizer):
+        # Check if error is raised if string, tuple or float is passed
+        with pytest.raises(ValueError, match="`optimizer` must be a callable"):
+            stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
+
+    def test_bad_optimizer_value_raises_error(self):
+        # Check if error is raised if `optimizer` function does not return
+        # `OptimizeResult` object
+
+        # Define test function that always returns 1
+        def optimizer(fun):
+            return 1
+
+        message = "`optimizer` must return an object containing the optimal..."
+        with pytest.raises(ValueError, match=message):
+            stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
+
+
+class TestBoxcoxNormmax:
+    def setup_method(self):
+        self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
+
+    def test_pearsonr(self):
+        maxlog = stats.boxcox_normmax(self.x)
+        assert_allclose(maxlog, 1.804465, rtol=1e-6)
+
+    def test_mle(self):
+        maxlog = stats.boxcox_normmax(self.x, method='mle')
+        assert_allclose(maxlog, 1.758101, rtol=1e-6)
+
+        # Check that boxcox() uses 'mle'
+        _, maxlog_boxcox = stats.boxcox(self.x)
+        assert_allclose(maxlog_boxcox, maxlog)
+
+    def test_all(self):
+        maxlog_all = stats.boxcox_normmax(self.x, method='all')
+        assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
+
+    @pytest.mark.parametrize("method", ["mle", "pearsonr", "all"])
+    @pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
+    def test_bounded_optimizer_within_bounds(self, method, bounds):
+
+        def optimizer(fun):
+            return optimize.minimize_scalar(fun, bounds=bounds,
+                                            method="bounded")
+
+        maxlog = stats.boxcox_normmax(self.x, method=method,
+                                      optimizer=optimizer)
+        assert np.all(bounds[0] < maxlog)
+        assert np.all(maxlog < bounds[1])
+
+    def test_user_defined_optimizer(self):
+        # tests an optimizer that is not based on scipy.optimize.minimize
+        lmbda = stats.boxcox_normmax(self.x)
+        lmbda_rounded = np.round(lmbda, 5)
+        lmbda_range = np.linspace(lmbda_rounded-0.01, lmbda_rounded+0.01, 1001)
+
+        class MyResult:
+            pass
+
+        def optimizer(fun):
+            # brute force minimum over the range
+            objs = []
+            for lmbda in lmbda_range:
+                objs.append(fun(lmbda))
+            res = MyResult()
+            res.x = lmbda_range[np.argmin(objs)]
+            return res
+
+        lmbda2 = stats.boxcox_normmax(self.x, optimizer=optimizer)
+        assert lmbda2 != lmbda                 # not identical
+        assert_allclose(lmbda2, lmbda, 1e-5)   # but as close as it should be
+
+    def test_user_defined_optimizer_and_brack_raises_error(self):
+        optimizer = optimize.minimize_scalar
+
+        # Using default `brack=None` with user-defined `optimizer` works as
+        # expected.
+        stats.boxcox_normmax(self.x, brack=None, optimizer=optimizer)
+
+        # Using user-defined `brack` with user-defined `optimizer` is expected
+        # to throw an error. Instead, users should specify
+        # optimizer-specific parameters in the optimizer function itself.
+        with pytest.raises(ValueError, match="`brack` must be None if "
+                                             "`optimizer` is given"):
+
+            stats.boxcox_normmax(self.x, brack=(-2.0, 2.0),
+                                 optimizer=optimizer)
+
+
+class TestBoxcoxNormplot:
+    def setup_method(self):
+        self.x = _old_loggamma_rvs(5, size=500, random_state=7654321) + 5
+
+    def test_basic(self):
+        N = 5
+        lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
+        ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
+                         0.95843297]
+        assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
+        assert_allclose(ppcc, ppcc_expected)
+
+    @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
+    def test_plot_kwarg(self):
+        # Check with the matplotlib.pyplot module
+        fig = plt.figure()
+        ax = fig.add_subplot(111)
+        stats.boxcox_normplot(self.x, -20, 20, plot=plt)
+        fig.delaxes(ax)
+
+        # Check that a Matplotlib Axes object is accepted
+        ax = fig.add_subplot(111)
+        stats.boxcox_normplot(self.x, -20, 20, plot=ax)
+        plt.close()
+
+    def test_invalid_inputs(self):
+        # `lb` has to be larger than `la`
+        assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
+        # `x` can not contain negative values
+        assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
+
+    def test_empty(self):
+        assert_(stats.boxcox_normplot([], 0, 1).size == 0)
+
+
+class TestYeojohnson_llf:
+
+    def test_array_like(self):
+        x = stats.norm.rvs(size=100, loc=0, random_state=54321)
+        lmbda = 1
+        llf = stats.yeojohnson_llf(lmbda, x)
+        llf2 = stats.yeojohnson_llf(lmbda, list(x))
+        assert_allclose(llf, llf2, rtol=1e-12)
+
+    def test_2d_input(self):
+        x = stats.norm.rvs(size=100, loc=10, random_state=54321)
+        lmbda = 1
+        llf = stats.yeojohnson_llf(lmbda, x)
+        llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T)
+        assert_allclose([llf, llf], llf2, rtol=1e-12)
+
+    def test_empty(self):
+        assert_(np.isnan(stats.yeojohnson_llf(1, [])))
+
+
+class TestYeojohnson:
+
+    def test_fixed_lmbda(self):
+        rng = np.random.RandomState(12345)
+
+        # Test positive input
+        x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
+        assert np.all(x > 0)
+        xt = stats.yeojohnson(x, lmbda=1)
+        assert_allclose(xt, x)
+        xt = stats.yeojohnson(x, lmbda=-1)
+        assert_allclose(xt, 1 - 1 / (x + 1))
+        xt = stats.yeojohnson(x, lmbda=0)
+        assert_allclose(xt, np.log(x + 1))
+        xt = stats.yeojohnson(x, lmbda=1)
+        assert_allclose(xt, x)
+
+        # Test negative input
+        x = _old_loggamma_rvs(5, size=50, random_state=rng) - 5
+        assert np.all(x < 0)
+        xt = stats.yeojohnson(x, lmbda=2)
+        assert_allclose(xt, -np.log(-x + 1))
+        xt = stats.yeojohnson(x, lmbda=1)
+        assert_allclose(xt, x)
+        xt = stats.yeojohnson(x, lmbda=3)
+        assert_allclose(xt, 1 / (-x + 1) - 1)
+
+        # test both positive and negative input
+        x = _old_loggamma_rvs(5, size=50, random_state=rng) - 2
+        assert not np.all(x < 0)
+        assert not np.all(x >= 0)
+        pos = x >= 0
+        xt = stats.yeojohnson(x, lmbda=1)
+        assert_allclose(xt[pos], x[pos])
+        xt = stats.yeojohnson(x, lmbda=-1)
+        assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
+        xt = stats.yeojohnson(x, lmbda=0)
+        assert_allclose(xt[pos], np.log(x[pos] + 1))
+        xt = stats.yeojohnson(x, lmbda=1)
+        assert_allclose(xt[pos], x[pos])
+
+        neg = ~pos
+        xt = stats.yeojohnson(x, lmbda=2)
+        assert_allclose(xt[neg], -np.log(-x[neg] + 1))
+        xt = stats.yeojohnson(x, lmbda=1)
+        assert_allclose(xt[neg], x[neg])
+        xt = stats.yeojohnson(x, lmbda=3)
+        assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
+
+    @pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
+    def test_lmbda_None(self, lmbda):
+        # Start from normal rv's, do inverse transform to check that
+        # optimization function gets close to the right answer.
+
+        def _inverse_transform(x, lmbda):
+            x_inv = np.zeros(x.shape, dtype=x.dtype)
+            pos = x >= 0
+
+            # when x >= 0
+            if abs(lmbda) < np.spacing(1.):
+                x_inv[pos] = np.exp(x[pos]) - 1
+            else:  # lmbda != 0
+                x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
+
+            # when x < 0
+            if abs(lmbda - 2) > np.spacing(1.):
+                x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
+                                           1 / (2 - lmbda))
+            else:  # lmbda == 2
+                x_inv[~pos] = 1 - np.exp(-x[~pos])
+
+            return x_inv
+
+        n_samples = 20000
+        np.random.seed(1234567)
+        x = np.random.normal(loc=0, scale=1, size=(n_samples))
+
+        x_inv = _inverse_transform(x, lmbda)
+        xt, maxlog = stats.yeojohnson(x_inv)
+
+        assert_allclose(maxlog, lmbda, atol=1e-2)
+
+        assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
+        assert_almost_equal(0, xt.mean(), decimal=1)
+        assert_almost_equal(1, xt.std(), decimal=1)
+
+    def test_empty(self):
+        assert_(stats.yeojohnson([]).shape == (0,))
+
+    def test_array_like(self):
+        x = stats.norm.rvs(size=100, loc=0, random_state=54321)
+        xt1, _ = stats.yeojohnson(x)
+        xt2, _ = stats.yeojohnson(list(x))
+        assert_allclose(xt1, xt2, rtol=1e-12)
+
+    @pytest.mark.parametrize('dtype', [np.complex64, np.complex128])
+    def test_input_dtype_complex(self, dtype):
+        x = np.arange(6, dtype=dtype)
+        err_msg = ('Yeo-Johnson transformation is not defined for complex '
+                   'numbers.')
+        with pytest.raises(ValueError, match=err_msg):
+            stats.yeojohnson(x)
+
+    @pytest.mark.parametrize('dtype', [np.int8, np.uint8, np.int16, np.int32])
+    def test_input_dtype_integer(self, dtype):
+        x_int = np.arange(8, dtype=dtype)
+        x_float = np.arange(8, dtype=np.float64)
+        xt_int, lmbda_int = stats.yeojohnson(x_int)
+        xt_float, lmbda_float = stats.yeojohnson(x_float)
+        assert_allclose(xt_int, xt_float, rtol=1e-7)
+        assert_allclose(lmbda_int, lmbda_float, rtol=1e-7)
+
+    def test_input_high_variance(self):
+        # non-regression test for gh-10821
+        x = np.array([3251637.22, 620695.44, 11642969.00, 2223468.22,
+                      85307500.00, 16494389.89, 917215.88, 11642969.00,
+                      2145773.87, 4962000.00, 620695.44, 651234.50,
+                      1907876.71, 4053297.88, 3251637.22, 3259103.08,
+                      9547969.00, 20631286.23, 12807072.08, 2383819.84,
+                      90114500.00, 17209575.46, 12852969.00, 2414609.99,
+                      2170368.23])
+        xt_yeo, lam_yeo = stats.yeojohnson(x)
+        xt_box, lam_box = stats.boxcox(x + 1)
+        assert_allclose(xt_yeo, xt_box, rtol=1e-6)
+        assert_allclose(lam_yeo, lam_box, rtol=1e-6)
+
+
+class TestYeojohnsonNormmax:
+    def setup_method(self):
+        self.x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
+
+    def test_mle(self):
+        maxlog = stats.yeojohnson_normmax(self.x)
+        assert_allclose(maxlog, 1.876393, rtol=1e-6)
+
+    def test_darwin_example(self):
+        # test from original paper "A new family of power transformations to
+        # improve normality or symmetry" by Yeo and Johnson.
+        x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
+             7.5, -6.0]
+        lmbda = stats.yeojohnson_normmax(x)
+        assert np.allclose(lmbda, 1.305, atol=1e-3)
+
+
+class TestCircFuncs:
+    # In gh-5747, the R package `circular` was used to calculate reference
+    # values for the circular variance, e.g.:
+    # library(circular)
+    # options(digits=16)
+    # x = c(0, 2*pi/3, 5*pi/3)
+    # var.circular(x)
+    @pytest.mark.parametrize("test_func,expected",
+                             [(stats.circmean, 0.167690146),
+                              (stats.circvar, 0.006455174270186603),
+                              (stats.circstd, 6.520702116)])
+    def test_circfuncs(self, test_func, expected):
+        x = np.array([355, 5, 2, 359, 10, 350])
+        assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
+
+    def test_circfuncs_small(self):
+        x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
+        M1 = x.mean()
+        M2 = stats.circmean(x, high=360)
+        assert_allclose(M2, M1, rtol=1e-5)
+
+        V1 = (x*np.pi/180).var()
+        # for small variations, circvar is approximately half the
+        # linear variance
+        V1 = V1 / 2.
+        V2 = stats.circvar(x, high=360)
+        assert_allclose(V2, V1, rtol=1e-4)
+
+        S1 = x.std()
+        S2 = stats.circstd(x, high=360)
+        assert_allclose(S2, S1, rtol=1e-4)
+
+    @pytest.mark.parametrize("test_func, numpy_func",
+                             [(stats.circmean, np.mean),
+                              (stats.circvar, np.var),
+                              (stats.circstd, np.std)])
+    def test_circfuncs_close(self, test_func, numpy_func):
+        # circfuncs should handle very similar inputs (gh-12740)
+        x = np.array([0.12675364631578953] * 10 + [0.12675365920187928] * 100)
+        circstat = test_func(x)
+        normal = numpy_func(x)
+        assert_allclose(circstat, normal, atol=2e-8)
+
+    def test_circmean_axis(self):
+        x = np.array([[355, 5, 2, 359, 10, 350],
+                      [351, 7, 4, 352, 9, 349],
+                      [357, 9, 8, 358, 4, 356]])
+        M1 = stats.circmean(x, high=360)
+        M2 = stats.circmean(x.ravel(), high=360)
+        assert_allclose(M1, M2, rtol=1e-14)
+
+        M1 = stats.circmean(x, high=360, axis=1)
+        M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
+        assert_allclose(M1, M2, rtol=1e-14)
+
+        M1 = stats.circmean(x, high=360, axis=0)
+        M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
+        assert_allclose(M1, M2, rtol=1e-14)
+
+    def test_circvar_axis(self):
+        x = np.array([[355, 5, 2, 359, 10, 350],
+                      [351, 7, 4, 352, 9, 349],
+                      [357, 9, 8, 358, 4, 356]])
+
+        V1 = stats.circvar(x, high=360)
+        V2 = stats.circvar(x.ravel(), high=360)
+        assert_allclose(V1, V2, rtol=1e-11)
+
+        V1 = stats.circvar(x, high=360, axis=1)
+        V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
+        assert_allclose(V1, V2, rtol=1e-11)
+
+        V1 = stats.circvar(x, high=360, axis=0)
+        V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
+        assert_allclose(V1, V2, rtol=1e-11)
+
+    def test_circstd_axis(self):
+        x = np.array([[355, 5, 2, 359, 10, 350],
+                      [351, 7, 4, 352, 9, 349],
+                      [357, 9, 8, 358, 4, 356]])
+
+        S1 = stats.circstd(x, high=360)
+        S2 = stats.circstd(x.ravel(), high=360)
+        assert_allclose(S1, S2, rtol=1e-11)
+
+        S1 = stats.circstd(x, high=360, axis=1)
+        S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
+        assert_allclose(S1, S2, rtol=1e-11)
+
+        S1 = stats.circstd(x, high=360, axis=0)
+        S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
+        assert_allclose(S1, S2, rtol=1e-11)
+
+    @pytest.mark.parametrize("test_func,expected",
+                             [(stats.circmean, 0.167690146),
+                              (stats.circvar, 0.006455174270186603),
+                              (stats.circstd, 6.520702116)])
+    def test_circfuncs_array_like(self, test_func, expected):
+        x = [355, 5, 2, 359, 10, 350]
+        assert_allclose(test_func(x, high=360), expected, rtol=1e-7)
+
+    @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
+                                           stats.circstd])
+    def test_empty(self, test_func):
+        assert_(np.isnan(test_func([])))
+
+    @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
+                                           stats.circstd])
+    def test_nan_propagate(self, test_func):
+        x = [355, 5, 2, 359, 10, 350, np.nan]
+        assert_(np.isnan(test_func(x, high=360)))
+
+    @pytest.mark.parametrize("test_func,expected",
+                             [(stats.circmean,
+                               {None: np.nan, 0: 355.66582264, 1: 0.28725053}),
+                              (stats.circvar,
+                               {None: np.nan,
+                                0: 0.002570671054089924,
+                                1: 0.005545914017677123}),
+                              (stats.circstd,
+                               {None: np.nan, 0: 4.11093193, 1: 6.04265394})])
+    def test_nan_propagate_array(self, test_func, expected):
+        x = np.array([[355, 5, 2, 359, 10, 350, 1],
+                      [351, 7, 4, 352, 9, 349, np.nan],
+                      [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
+        for axis in expected.keys():
+            out = test_func(x, high=360, axis=axis)
+            if axis is None:
+                assert_(np.isnan(out))
+            else:
+                assert_allclose(out[0], expected[axis], rtol=1e-7)
+                assert_(np.isnan(out[1:]).all())
+
+    @pytest.mark.parametrize("test_func,expected",
+                             [(stats.circmean,
+                               {None: 359.4178026893944,
+                                0: np.array([353.0, 6.0, 3.0, 355.5, 9.5,
+                                             349.5]),
+                                1: np.array([0.16769015, 358.66510252])}),
+                              (stats.circvar,
+                               {None: 0.008396678483192477,
+                                0: np.array([1.9997969, 0.4999873, 0.4999873,
+                                             6.1230956, 0.1249992, 0.1249992]
+                                            )*(np.pi/180)**2,
+                                1: np.array([0.006455174270186603,
+                                             0.01016767581393285])}),
+                              (stats.circstd,
+                               {None: 7.440570778057074,
+                                0: np.array([2.00020313, 1.00002539, 1.00002539,
+                                             3.50108929, 0.50000317,
+                                             0.50000317]),
+                                1: np.array([6.52070212, 8.19138093])})])
+    def test_nan_omit_array(self, test_func, expected):
+        x = np.array([[355, 5, 2, 359, 10, 350, np.nan],
+                      [351, 7, 4, 352, 9, 349, np.nan],
+                      [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
+        for axis in expected.keys():
+            out = test_func(x, high=360, nan_policy='omit', axis=axis)
+            if axis is None:
+                assert_allclose(out, expected[axis], rtol=1e-7)
+            else:
+                assert_allclose(out[:-1], expected[axis], rtol=1e-7)
+                assert_(np.isnan(out[-1]))
+
+    @pytest.mark.parametrize("test_func,expected",
+                             [(stats.circmean, 0.167690146),
+                              (stats.circvar, 0.006455174270186603),
+                              (stats.circstd, 6.520702116)])
+    def test_nan_omit(self, test_func, expected):
+        x = [355, 5, 2, 359, 10, 350, np.nan]
+        assert_allclose(test_func(x, high=360, nan_policy='omit'),
+                        expected, rtol=1e-7)
+
+    @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
+                                           stats.circstd])
+    def test_nan_omit_all(self, test_func):
+        x = [np.nan, np.nan, np.nan, np.nan, np.nan]
+        assert_(np.isnan(test_func(x, nan_policy='omit')))
+
+    @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
+                                           stats.circstd])
+    def test_nan_omit_all_axis(self, test_func):
+        x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan],
+                      [np.nan, np.nan, np.nan, np.nan, np.nan]])
+        out = test_func(x, nan_policy='omit', axis=1)
+        assert_(np.isnan(out).all())
+        assert_(len(out) == 2)
+
+    @pytest.mark.parametrize("x",
+                             [[355, 5, 2, 359, 10, 350, np.nan],
+                              np.array([[355, 5, 2, 359, 10, 350, np.nan],
+                                        [351, 7, 4, 352, np.nan, 9, 349]])])
+    @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
+                                           stats.circstd])
+    def test_nan_raise(self, test_func, x):
+        assert_raises(ValueError, test_func, x, high=360, nan_policy='raise')
+
+    @pytest.mark.parametrize("x",
+                             [[355, 5, 2, 359, 10, 350, np.nan],
+                              np.array([[355, 5, 2, 359, 10, 350, np.nan],
+                                        [351, 7, 4, 352, np.nan, 9, 349]])])
+    @pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
+                                           stats.circstd])
+    def test_bad_nan_policy(self, test_func, x):
+        assert_raises(ValueError, test_func, x, high=360, nan_policy='foobar')
+
+    def test_circmean_scalar(self):
+        x = 1.
+        M1 = x
+        M2 = stats.circmean(x)
+        assert_allclose(M2, M1, rtol=1e-5)
+
+    def test_circmean_range(self):
+        # regression test for gh-6420: circmean(..., high, low) must be
+        # between `high` and `low`
+        m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
+        assert_(m < np.pi)
+        assert_(m > -np.pi)
+
+    def test_circfuncs_uint8(self):
+        # regression test for gh-7255: overflow when working with
+        # numpy uint8 data type
+        x = np.array([150, 10], dtype='uint8')
+        assert_equal(stats.circmean(x, high=180), 170.0)
+        assert_allclose(stats.circvar(x, high=180), 0.2339555554617, rtol=1e-7)
+        assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)
+
+
+class TestMedianTest:
+
+    def test_bad_n_samples(self):
+        # median_test requires at least two samples.
+        assert_raises(ValueError, stats.median_test, [1, 2, 3])
+
+    def test_empty_sample(self):
+        # Each sample must contain at least one value.
+        assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
+
+    def test_empty_when_ties_ignored(self):
+        # The grand median is 1, and all values in the first argument are
+        # equal to the grand median.  With ties="ignore", those values are
+        # ignored, which results in the first sample being (in effect) empty.
+        # This should raise a ValueError.
+        assert_raises(ValueError, stats.median_test,
+                      [1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
+
+    def test_empty_contingency_row(self):
+        # The grand median is 1, and with the default ties="below", all the
+        # values in the samples are counted as being below the grand median.
+        # This would result a row of zeros in the contingency table, which is
+        # an error.
+        assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
+
+        # With ties="above", all the values are counted as above the
+        # grand median.
+        assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
+                      ties="above")
+
+    def test_bad_ties(self):
+        assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
+                      ties="foo")
+
+    def test_bad_nan_policy(self):
+        assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
+
+    def test_bad_keyword(self):
+        assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
+                      foo="foo")
+
+    def test_simple(self):
+        x = [1, 2, 3]
+        y = [1, 2, 3]
+        stat, p, med, tbl = stats.median_test(x, y)
+
+        # The median is floating point, but this equality test should be safe.
+        assert_equal(med, 2.0)
+
+        assert_array_equal(tbl, [[1, 1], [2, 2]])
+
+        # The expected values of the contingency table equal the contingency
+        # table, so the statistic should be 0 and the p-value should be 1.
+        assert_equal(stat, 0)
+        assert_equal(p, 1)
+
+    def test_ties_options(self):
+        # Test the contingency table calculation.
+        x = [1, 2, 3, 4]
+        y = [5, 6]
+        z = [7, 8, 9]
+        # grand median is 5.
+
+        # Default 'ties' option is "below".
+        stat, p, m, tbl = stats.median_test(x, y, z)
+        assert_equal(m, 5)
+        assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
+
+        stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
+        assert_equal(m, 5)
+        assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
+
+        stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
+        assert_equal(m, 5)
+        assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
+
+    def test_nan_policy_options(self):
+        x = [1, 2, np.nan]
+        y = [4, 5, 6]
+        mt1 = stats.median_test(x, y, nan_policy='propagate')
+        s, p, m, t = stats.median_test(x, y, nan_policy='omit')
+
+        assert_equal(mt1, (np.nan, np.nan, np.nan, None))
+        assert_allclose(s, 0.31250000000000006)
+        assert_allclose(p, 0.57615012203057869)
+        assert_equal(m, 4.0)
+        assert_equal(t, np.array([[0, 2],[2, 1]]))
+        assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
+
+    def test_basic(self):
+        # median_test calls chi2_contingency to compute the test statistic
+        # and p-value.  Make sure it hasn't screwed up the call...
+
+        x = [1, 2, 3, 4, 5]
+        y = [2, 4, 6, 8]
+
+        stat, p, m, tbl = stats.median_test(x, y)
+        assert_equal(m, 4)
+        assert_equal(tbl, [[1, 2], [4, 2]])
+
+        exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
+        assert_allclose(stat, exp_stat)
+        assert_allclose(p, exp_p)
+
+        stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
+        assert_equal(m, 4)
+        assert_equal(tbl, [[1, 2], [4, 2]])
+
+        exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
+        assert_allclose(stat, exp_stat)
+        assert_allclose(p, exp_p)
+
+        stat, p, m, tbl = stats.median_test(x, y, correction=False)
+        assert_equal(m, 4)
+        assert_equal(tbl, [[1, 2], [4, 2]])
+
+        exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
+        assert_allclose(stat, exp_stat)
+        assert_allclose(p, exp_p)
+
+    @pytest.mark.parametrize("correction", [False, True])
+    def test_result(self, correction):
+        x = [1, 2, 3]
+        y = [1, 2, 3]
+
+        res = stats.median_test(x, y, correction=correction)
+        assert_equal((res.statistic, res.pvalue, res.median, res.table), res)
+
+
+class TestDirectionalStats:
+    # Reference implementations are not available
+    def test_directional_stats_correctness(self):
+        # Data from Fisher: Dispersion on a sphere, 1953 and
+        # Mardia and Jupp, Directional Statistics.
+
+        decl = -np.deg2rad(np.array([343.2, 62., 36.9, 27., 359.,
+                                     5.7, 50.4, 357.6, 44.]))
+        incl = -np.deg2rad(np.array([66.1, 68.7, 70.1, 82.1, 79.5,
+                                     73., 69.3, 58.8, 51.4]))
+        data = np.stack((np.cos(incl) * np.cos(decl),
+                         np.cos(incl) * np.sin(decl),
+                         np.sin(incl)),
+                        axis=1)
+
+        dirstats = stats.directional_stats(data)
+        directional_mean = dirstats.mean_direction
+        mean_rounded = np.round(directional_mean, 4)
+
+        reference_mean = np.array([0.2984, -0.1346, -0.9449])
+        assert_allclose(mean_rounded, reference_mean)
+
+    @pytest.mark.parametrize('angles, ref', [
+        ([-np.pi/2, np.pi/2], 1.),
+        ([0, 2*np.pi], 0.)
+    ])
+    def test_directional_stats_2d_special_cases(self, angles, ref):
+        if callable(ref):
+            ref = ref(angles)
+        data = np.stack([np.cos(angles), np.sin(angles)], axis=1)
+        res = 1 - stats.directional_stats(data).mean_resultant_length
+        assert_allclose(res, ref)
+
+    def test_directional_stats_2d(self):
+        # Test that for circular data directional_stats
+        # yields the same result as circmean/circvar
+        rng = np.random.default_rng(0xec9a6899d5a2830e0d1af479dbe1fd0c)
+        testdata = 2 * np.pi * rng.random((1000, ))
+        testdata_vector = np.stack((np.cos(testdata),
+                                    np.sin(testdata)),
+                                   axis=1)
+        dirstats = stats.directional_stats(testdata_vector)
+        directional_mean = dirstats.mean_direction
+        directional_mean_angle = np.arctan2(directional_mean[1],
+                                            directional_mean[0])
+        directional_mean_angle = directional_mean_angle % (2*np.pi)
+        circmean = stats.circmean(testdata)
+        assert_allclose(circmean, directional_mean_angle)
+
+        directional_var = 1 - dirstats.mean_resultant_length
+        circular_var = stats.circvar(testdata)
+        assert_allclose(directional_var, circular_var)
+
+    def test_directional_mean_higher_dim(self):
+        # test that directional_stats works for higher dimensions
+        # here a 4D array is reduced over axis = 2
+        data = np.array([[0.8660254, 0.5, 0.],
+                         [0.8660254, -0.5, 0.]])
+        full_array = np.tile(data, (2, 2, 2, 1))
+        expected = np.array([[[1., 0., 0.],
+                              [1., 0., 0.]],
+                             [[1., 0., 0.],
+                              [1., 0., 0.]]])
+        dirstats = stats.directional_stats(full_array, axis=2)
+        assert_allclose(expected, dirstats.mean_direction)
+
+    def test_directional_stats_list_ndarray_input(self):
+        # test that list and numpy array inputs yield same results
+        data = [[0.8660254, 0.5, 0.], [0.8660254, -0.5, 0]]
+        data_array = np.asarray(data)
+        res = stats.directional_stats(data)
+        ref = stats.directional_stats(data_array)
+        assert_allclose(res.mean_direction, ref.mean_direction)
+        assert_allclose(res.mean_resultant_length,
+                        res.mean_resultant_length)
+
+    def test_directional_stats_1d_error(self):
+        # test that one-dimensional data raises ValueError
+        data = np.ones((5, ))
+        message = (r"samples must at least be two-dimensional. "
+                   r"Instead samples has shape: (5,)")
+        with pytest.raises(ValueError, match=re.escape(message)):
+            stats.directional_stats(data)
+
+    def test_directional_stats_normalize(self):
+        # test that directional stats calculations yield same results
+        # for unnormalized input with normalize=True and normalized
+        # input with normalize=False
+        data = np.array([[0.8660254, 0.5, 0.],
+                         [1.7320508, -1., 0.]])
+        res = stats.directional_stats(data, normalize=True)
+        normalized_data = data / np.linalg.norm(data, axis=-1,
+                                                keepdims=True)
+        ref = stats.directional_stats(normalized_data,
+                                      normalize=False)
+        assert_allclose(res.mean_direction, ref.mean_direction)
+        assert_allclose(res.mean_resultant_length,
+                        ref.mean_resultant_length)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_basic.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_basic.py
new file mode 100644
index 00000000..44461858
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_basic.py
@@ -0,0 +1,1977 @@
+"""
+Tests for the stats.mstats module (support for masked arrays)
+"""
+import warnings
+import platform
+
+import numpy as np
+from numpy import nan
+import numpy.ma as ma
+from numpy.ma import masked, nomask
+
+import scipy.stats.mstats as mstats
+from scipy import stats
+from .common_tests import check_named_results
+import pytest
+from pytest import raises as assert_raises
+from numpy.ma.testutils import (assert_equal, assert_almost_equal,
+    assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
+    assert_allclose, assert_array_equal)
+from numpy.testing import suppress_warnings
+from scipy.stats import _mstats_basic
+
+class TestMquantiles:
+    def test_mquantiles_limit_keyword(self):
+        # Regression test for Trac ticket #867
+        data = np.array([[6., 7., 1.],
+                         [47., 15., 2.],
+                         [49., 36., 3.],
+                         [15., 39., 4.],
+                         [42., 40., -999.],
+                         [41., 41., -999.],
+                         [7., -999., -999.],
+                         [39., -999., -999.],
+                         [43., -999., -999.],
+                         [40., -999., -999.],
+                         [36., -999., -999.]])
+        desired = [[19.2, 14.6, 1.45],
+                   [40.0, 37.5, 2.5],
+                   [42.8, 40.05, 3.55]]
+        quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
+        assert_almost_equal(quants, desired)
+
+
+def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
+    # Note this doesn't test when axis is not specified
+    x = mstats.gmean(array_like, axis=axis, dtype=dtype)
+    assert_allclose(x, desired, rtol=rtol)
+    assert_equal(x.dtype, dtype)
+
+def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
+    x = stats.hmean(array_like, axis=axis, dtype=dtype)
+    assert_allclose(x, desired, rtol=rtol)
+    assert_equal(x.dtype, dtype)
+
+
+class TestGeoMean:
+    def test_1d(self):
+        a = [1, 2, 3, 4]
+        desired = np.power(1*2*3*4, 1./4.)
+        check_equal_gmean(a, desired, rtol=1e-14)
+
+    def test_1d_ma(self):
+        #  Test a 1d masked array
+        a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
+        desired = 45.2872868812
+        check_equal_gmean(a, desired)
+
+        a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
+        desired = np.power(1*2*3, 1./3.)
+        check_equal_gmean(a, desired, rtol=1e-14)
+
+    def test_1d_ma_value(self):
+        #  Test a 1d masked array with a masked value
+        a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
+        desired = 41.4716627439
+        check_equal_gmean(a, desired)
+
+    def test_1d_ma0(self):
+        #  Test a 1d masked array with zero element
+        a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
+        desired = 0
+        check_equal_gmean(a, desired)
+
+    def test_1d_ma_inf(self):
+        #  Test a 1d masked array with negative element
+        a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
+        desired = np.nan
+        with np.errstate(invalid='ignore'):
+            check_equal_gmean(a, desired)
+
+    @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
+    def test_1d_float96(self):
+        a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
+        desired_dt = np.power(1*2*3, 1./3.).astype(np.float96)
+        check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14)
+
+    def test_2d_ma(self):
+        a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
+                     mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
+        desired = np.array([1, 2, 3, 4])
+        check_equal_gmean(a, desired, axis=0, rtol=1e-14)
+
+        desired = ma.array([np.power(1*2*3*4, 1./4.),
+                            np.power(2*3, 1./2.),
+                            np.power(1*4, 1./2.)])
+        check_equal_gmean(a, desired, axis=-1, rtol=1e-14)
+
+        #  Test a 2d masked array
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = 52.8885199
+        check_equal_gmean(np.ma.array(a), desired)
+
+
+class TestHarMean:
+    def test_1d(self):
+        a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
+        desired = 3. / (1./1 + 1./2 + 1./3)
+        check_equal_hmean(a, desired, rtol=1e-14)
+
+        a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
+        desired = 34.1417152147
+        check_equal_hmean(a, desired)
+
+        a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
+                        mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
+        desired = 31.8137186141
+        check_equal_hmean(a, desired)
+
+    @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
+    def test_1d_float96(self):
+        a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
+        desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96)
+        check_equal_hmean(a, desired_dt, dtype=np.float96)
+
+    def test_2d(self):
+        a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
+                     mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
+        desired = ma.array([1, 2, 3, 4])
+        check_equal_hmean(a, desired, axis=0, rtol=1e-14)
+
+        desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)]
+        check_equal_hmean(a, desired, axis=-1, rtol=1e-14)
+
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = 38.6696271841
+        check_equal_hmean(np.ma.array(a), desired)
+
+
+class TestRanking:
+    def test_ranking(self):
+        x = ma.array([0,1,1,1,2,3,4,5,5,6,])
+        assert_almost_equal(mstats.rankdata(x),
+                           [1,3,3,3,5,6,7,8.5,8.5,10])
+        x[[3,4]] = masked
+        assert_almost_equal(mstats.rankdata(x),
+                           [1,2.5,2.5,0,0,4,5,6.5,6.5,8])
+        assert_almost_equal(mstats.rankdata(x, use_missing=True),
+                            [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
+        x = ma.array([0,1,5,1,2,4,3,5,1,6,])
+        assert_almost_equal(mstats.rankdata(x),
+                           [1,3,8.5,3,5,7,6,8.5,3,10])
+        x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
+        assert_almost_equal(mstats.rankdata(x),
+                            [[1,3,3,3,5], [6,7,8.5,8.5,10]])
+        assert_almost_equal(mstats.rankdata(x, axis=1),
+                           [[1,3,3,3,5], [1,2,3.5,3.5,5]])
+        assert_almost_equal(mstats.rankdata(x,axis=0),
+                           [[1,1,1,1,1], [2,2,2,2,2,]])
+
+
+class TestCorr:
+    def test_pearsonr(self):
+        # Tests some computations of Pearson's r
+        x = ma.arange(10)
+        with warnings.catch_warnings():
+            # The tests in this context are edge cases, with perfect
+            # correlation or anticorrelation, or totally masked data.
+            # None of these should trigger a RuntimeWarning.
+            warnings.simplefilter("error", RuntimeWarning)
+
+            assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
+            assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
+
+            x = ma.array(x, mask=True)
+            pr = mstats.pearsonr(x, x)
+            assert_(pr[0] is masked)
+            assert_(pr[1] is masked)
+
+        x1 = ma.array([-1.0, 0.0, 1.0])
+        y1 = ma.array([0, 0, 3])
+        r, p = mstats.pearsonr(x1, y1)
+        assert_almost_equal(r, np.sqrt(3)/2)
+        assert_almost_equal(p, 1.0/3)
+
+        # (x2, y2) have the same unmasked data as (x1, y1).
+        mask = [False, False, False, True]
+        x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
+        y2 = ma.array([0, 0, 3, -1], mask=mask)
+        r, p = mstats.pearsonr(x2, y2)
+        assert_almost_equal(r, np.sqrt(3)/2)
+        assert_almost_equal(p, 1.0/3)
+
+    def test_pearsonr_misaligned_mask(self):
+        mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])
+        my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])
+        x = np.array([1, 4, 5, 6])
+        y = np.array([9, 6, 5, 9])
+        mr, mp = mstats.pearsonr(mx, my)
+        r, p = stats.pearsonr(x, y)
+        assert_equal(mr, r)
+        assert_equal(mp, p)
+
+    def test_spearmanr(self):
+        # Tests some computations of Spearman's rho
+        (x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])
+        assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
+        (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
+        (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
+        assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
+
+        x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
+              1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
+        y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
+              0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
+        assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
+        x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
+              1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
+        y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
+              0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
+        (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
+        assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
+        # Next test is to make sure calculation uses sufficient precision.
+        # The denominator's value is ~n^3 and used to be represented as an
+        # int. 2000**3 > 2**32 so these arrays would cause overflow on
+        # some machines.
+        x = list(range(2000))
+        y = list(range(2000))
+        y[0], y[9] = y[9], y[0]
+        y[10], y[434] = y[434], y[10]
+        y[435], y[1509] = y[1509], y[435]
+        # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
+        #     = 1 - (1 / 500)
+        #     = 0.998
+        assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)
+
+        # test for namedtuple attributes
+        res = mstats.spearmanr(x, y)
+        attributes = ('correlation', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def test_spearmanr_alternative(self):
+        # check against R
+        # options(digits=16)
+        # cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
+        #            1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7),
+        #          c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
+        #            0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4),
+        #          alternative='two.sided', method='spearman')
+        x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
+             1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
+        y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
+             0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
+
+        r_exp = 0.6887298747763864  # from cor.test
+
+        r, p = mstats.spearmanr(x, y)
+        assert_allclose(r, r_exp)
+        assert_allclose(p, 0.004519192910756)
+
+        r, p = mstats.spearmanr(x, y, alternative='greater')
+        assert_allclose(r, r_exp)
+        assert_allclose(p, 0.002259596455378)
+
+        r, p = mstats.spearmanr(x, y, alternative='less')
+        assert_allclose(r, r_exp)
+        assert_allclose(p, 0.9977404035446)
+
+        # intuitive test (with obvious positive correlation)
+        n = 100
+        x = np.linspace(0, 5, n)
+        y = 0.1*x + np.random.rand(n)  # y is positively correlated w/ x
+
+        stat1, p1 = mstats.spearmanr(x, y)
+
+        stat2, p2 = mstats.spearmanr(x, y, alternative="greater")
+        assert_allclose(p2, p1 / 2)  # positive correlation -> small p
+
+        stat3, p3 = mstats.spearmanr(x, y, alternative="less")
+        assert_allclose(p3, 1 - p1 / 2)  # positive correlation -> large p
+
+        assert stat1 == stat2 == stat3
+
+        with pytest.raises(ValueError, match="alternative must be 'less'..."):
+            mstats.spearmanr(x, y, alternative="ekki-ekki")
+
+    @pytest.mark.skipif(platform.machine() == 'ppc64le',
+                        reason="fails/crashes on ppc64le")
+    def test_kendalltau(self):
+        # check case with maximum disorder and p=1
+        x = ma.array(np.array([9, 2, 5, 6]))
+        y = ma.array(np.array([4, 7, 9, 11]))
+        # Cross-check with exact result from R:
+        # cor.test(x,y,method="kendall",exact=1)
+        expected = [0.0, 1.0]
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
+
+        # simple case without ties
+        x = ma.array(np.arange(10))
+        y = ma.array(np.arange(10))
+        # Cross-check with exact result from R:
+        # cor.test(x,y,method="kendall",exact=1)
+        expected = [1.0, 5.511463844797e-07]
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
+
+        # check exception in case of invalid method keyword
+        assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')
+
+        # swap a couple of values
+        b = y[1]
+        y[1] = y[2]
+        y[2] = b
+        # Cross-check with exact result from R:
+        # cor.test(x,y,method="kendall",exact=1)
+        expected = [0.9555555555555556, 5.511463844797e-06]
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
+
+        # swap a couple more
+        b = y[5]
+        y[5] = y[6]
+        y[6] = b
+        # Cross-check with exact result from R:
+        # cor.test(x,y,method="kendall",exact=1)
+        expected = [0.9111111111111111, 2.976190476190e-05]
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
+
+        # same in opposite direction
+        x = ma.array(np.arange(10))
+        y = ma.array(np.arange(10)[::-1])
+        # Cross-check with exact result from R:
+        # cor.test(x,y,method="kendall",exact=1)
+        expected = [-1.0, 5.511463844797e-07]
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
+
+        # swap a couple of values
+        b = y[1]
+        y[1] = y[2]
+        y[2] = b
+        # Cross-check with exact result from R:
+        # cor.test(x,y,method="kendall",exact=1)
+        expected = [-0.9555555555555556, 5.511463844797e-06]
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
+
+        # swap a couple more
+        b = y[5]
+        y[5] = y[6]
+        y[6] = b
+        # Cross-check with exact result from R:
+        # cor.test(x,y,method="kendall",exact=1)
+        expected = [-0.9111111111111111, 2.976190476190e-05]
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
+
+        # Tests some computations of Kendall's tau
+        x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])
+        y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
+        z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),
+                            [+0.3333333, 0.75])
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),
+                            [+0.3333333, 0.4969059])
+        assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),
+                            [-0.5477226, 0.2785987])
+        #
+        x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,
+                            10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])
+        y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,
+                            25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])
+        result = mstats.kendalltau(x, y)
+        assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
+
+        # test for namedtuple attributes
+        attributes = ('correlation', 'pvalue')
+        check_named_results(result, attributes, ma=True)
+
+    @pytest.mark.skipif(platform.machine() == 'ppc64le',
+                        reason="fails/crashes on ppc64le")
+    @pytest.mark.slow
+    def test_kendalltau_large(self):
+        # make sure internal variable use correct precision with
+        # larger arrays
+        x = np.arange(2000, dtype=float)
+        x = ma.masked_greater(x, 1995)
+        y = np.arange(2000, dtype=float)
+        y = np.concatenate((y[1000:], y[:1000]))
+        assert_(np.isfinite(mstats.kendalltau(x, y)[1]))
+
+    def test_kendalltau_seasonal(self):
+        # Tests the seasonal Kendall tau.
+        x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
+             [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
+             [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
+             [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
+        x = ma.fix_invalid(x).T
+        output = mstats.kendalltau_seasonal(x)
+        assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
+        assert_almost_equal(output['seasonal p-value'].round(2),
+                            [0.18,0.53,0.20,0.04])
+
+    @pytest.mark.parametrize("method", ("exact", "asymptotic"))
+    @pytest.mark.parametrize("alternative", ("two-sided", "greater", "less"))
+    def test_kendalltau_mstats_vs_stats(self, method, alternative):
+        # Test that mstats.kendalltau and stats.kendalltau with
+        # nan_policy='omit' matches behavior of stats.kendalltau
+        # Accuracy of the alternatives is tested in stats/tests/test_stats.py
+
+        np.random.seed(0)
+        n = 50
+        x = np.random.rand(n)
+        y = np.random.rand(n)
+        mask = np.random.rand(n) > 0.5
+
+        x_masked = ma.array(x, mask=mask)
+        y_masked = ma.array(y, mask=mask)
+        res_masked = mstats.kendalltau(
+            x_masked, y_masked, method=method, alternative=alternative)
+
+        x_compressed = x_masked.compressed()
+        y_compressed = y_masked.compressed()
+        res_compressed = stats.kendalltau(
+            x_compressed, y_compressed, method=method, alternative=alternative)
+
+        x[mask] = np.nan
+        y[mask] = np.nan
+        res_nan = stats.kendalltau(
+            x, y, method=method, nan_policy='omit', alternative=alternative)
+
+        assert_allclose(res_masked, res_compressed)
+        assert_allclose(res_nan, res_compressed)
+
+    def test_kendall_p_exact_medium(self):
+        # Test for the exact method with medium samples (some n >= 171)
+        # expected values generated using SymPy
+        expectations = {(100, 2393): 0.62822615287956040664,
+                        (101, 2436): 0.60439525773513602669,
+                        (170, 0): 2.755801935583541e-307,
+                        (171, 0): 0.0,
+                        (171, 1): 2.755801935583541e-307,
+                        (172, 1): 0.0,
+                        (200, 9797): 0.74753983745929675209,
+                        (201, 9656): 0.40959218958120363618}
+        for nc, expected in expectations.items():
+            res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
+            assert_almost_equal(res, expected)
+
+    @pytest.mark.xslow
+    def test_kendall_p_exact_large(self):
+        # Test for the exact method with large samples (n >= 171)
+        # expected values generated using SymPy
+        expectations = {(400, 38965): 0.48444283672113314099,
+                        (401, 39516): 0.66363159823474837662,
+                        (800, 156772): 0.42265448483120932055,
+                        (801, 157849): 0.53437553412194416236,
+                        (1600, 637472): 0.84200727400323538419,
+                        (1601, 630304): 0.34465255088058593946}
+
+        for nc, expected in expectations.items():
+            res = _mstats_basic._kendall_p_exact(nc[0], nc[1])
+            assert_almost_equal(res, expected)
+
+    def test_pointbiserial(self):
+        x = [1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+             0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, -1]
+        y = [14.8, 13.8, 12.4, 10.1, 7.1, 6.1, 5.8, 4.6, 4.3, 3.5, 3.3, 3.2,
+             3.0, 2.8, 2.8, 2.5, 2.4, 2.3, 2.1, 1.7, 1.7, 1.5, 1.3, 1.3, 1.2,
+             1.2, 1.1, 0.8, 0.7, 0.6, 0.5, 0.2, 0.2, 0.1, np.nan]
+        assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
+
+        # test for namedtuple attributes
+        res = mstats.pointbiserialr(x, y)
+        attributes = ('correlation', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+
+class TestTrimming:
+
+    def test_trim(self):
+        a = ma.arange(10)
+        assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
+        a = ma.arange(10)
+        assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
+        a = ma.arange(10)
+        assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
+                     [None,None,None,3,4,5,6,7,None,None])
+        a = ma.arange(10)
+        assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
+                     [None,1,2,3,4,5,6,7,None,None])
+
+        a = ma.arange(12)
+        a[[0,-1]] = a[5] = masked
+        assert_equal(mstats.trim(a, (2,8)),
+                     [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
+
+        x = ma.arange(100).reshape(10, 10)
+        expected = [1]*10 + [0]*70 + [1]*20
+        trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
+        assert_equal(trimx._mask.ravel(), expected)
+        trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
+        assert_equal(trimx._mask.ravel(), expected)
+        trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
+        assert_equal(trimx._mask.T.ravel(), expected)
+
+        # same as above, but with an extra masked row inserted
+        x = ma.arange(110).reshape(11, 10)
+        x[1] = masked
+        expected = [1]*20 + [0]*70 + [1]*20
+        trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
+        assert_equal(trimx._mask.ravel(), expected)
+        trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
+        assert_equal(trimx._mask.ravel(), expected)
+        trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
+        assert_equal(trimx.T._mask.ravel(), expected)
+
+    def test_trim_old(self):
+        x = ma.arange(100)
+        assert_equal(mstats.trimboth(x).count(), 60)
+        assert_equal(mstats.trimtail(x,tail='r').count(), 80)
+        x[50:70] = masked
+        trimx = mstats.trimboth(x)
+        assert_equal(trimx.count(), 48)
+        assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
+        x._mask = nomask
+        x.shape = (10,10)
+        assert_equal(mstats.trimboth(x).count(), 60)
+        assert_equal(mstats.trimtail(x).count(), 80)
+
+    def test_trimr(self):
+        x = ma.arange(10)
+        result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))
+        expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
+                            mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
+        assert_equal(result, expected)
+        assert_equal(result.mask, expected.mask)
+
+    def test_trimmedmean(self):
+        data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
+                         296,299,306,376,428,515,666,1310,2611])
+        assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
+        assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
+        assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
+
+    def test_trimmed_stde(self):
+        data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
+                         296,299,306,376,428,515,666,1310,2611])
+        assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
+        assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
+
+    def test_winsorization(self):
+        data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
+                         296,299,306,376,428,515,666,1310,2611])
+        assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
+                            21551.4, 1)
+        assert_almost_equal(
+            mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),
+            11887.3, 1)
+        data[5] = masked
+        winsorized = mstats.winsorize(data)
+        assert_equal(winsorized.mask, data.mask)
+
+    def test_winsorization_nan(self):
+        data = ma.array([np.nan, np.nan, 0, 1, 2])
+        assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),
+                      nan_policy='raise')
+        # Testing propagate (default behavior)
+        assert_equal(mstats.winsorize(data, (0.4, 0.4)),
+                     ma.array([2, 2, 2, 2, 2]))
+        assert_equal(mstats.winsorize(data, (0.8, 0.8)),
+                     ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))
+        assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),
+                     ma.array([np.nan, np.nan, 2, 2, 2]))
+        assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),
+                     ma.array([np.nan, np.nan, 2, 2, 2]))
+
+class TestMoments:
+    # Comparison numbers are found using R v.1.5.1
+    # note that length(testcase) = 4
+    # testmathworks comes from documentation for the
+    # Statistics Toolbox for Matlab and can be found at both
+    # https://www.mathworks.com/help/stats/kurtosis.html
+    # https://www.mathworks.com/help/stats/skewness.html
+    # Note that both test cases came from here.
+    testcase = [1,2,3,4]
+    testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
+                                    np.nan])
+    testcase_2d = ma.array(
+    np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
+           [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
+           [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
+           [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
+           [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
+    mask=np.array([[True, False, False, True, False],
+           [True, True, True, False, True],
+           [False, False, False, False, False],
+           [True, True, True, True, True],
+           [False, False, True, False, False]], dtype=bool))
+
+    def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
+        expect = np.asarray(expect)
+        if shape is not None:
+            expect = np.broadcast_to(expect, shape)
+        assert_array_equal(actual, expect)
+        if dtype is None:
+            dtype = expect.dtype
+        assert actual.dtype == dtype
+
+    def test_moment(self):
+        y = mstats.moment(self.testcase,1)
+        assert_almost_equal(y,0.0,10)
+        y = mstats.moment(self.testcase,2)
+        assert_almost_equal(y,1.25)
+        y = mstats.moment(self.testcase,3)
+        assert_almost_equal(y,0.0)
+        y = mstats.moment(self.testcase,4)
+        assert_almost_equal(y,2.5625)
+
+        # check array_like input for moment
+        y = mstats.moment(self.testcase, [1, 2, 3, 4])
+        assert_allclose(y, [0, 1.25, 0, 2.5625])
+
+        # check moment input consists only of integers
+        y = mstats.moment(self.testcase, 0.0)
+        assert_allclose(y, 1.0)
+        assert_raises(ValueError, mstats.moment, self.testcase, 1.2)
+        y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])
+        assert_allclose(y, [0, 1.25, 0, 2.5625])
+
+        # test empty input
+        y = mstats.moment([])
+        self._assert_equal(y, np.nan, dtype=np.float64)
+        y = mstats.moment(np.array([], dtype=np.float32))
+        self._assert_equal(y, np.nan, dtype=np.float32)
+        y = mstats.moment(np.zeros((1, 0)), axis=0)
+        self._assert_equal(y, [], shape=(0,), dtype=np.float64)
+        y = mstats.moment([[]], axis=1)
+        self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
+        y = mstats.moment([[]], moment=[0, 1], axis=0)
+        self._assert_equal(y, [], shape=(2, 0))
+
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_equal(mstats.moment(x, 2), ma.masked)  # NaN value is ignored
+
+    def test_variation(self):
+        y = mstats.variation(self.testcase)
+        assert_almost_equal(y,0.44721359549996, 10)
+
+    def test_variation_ddof(self):
+        # test variation with delta degrees of freedom
+        # regression test for gh-13341
+        a = np.array([1, 2, 3, 4, 5])
+        y = mstats.variation(a, ddof=1)
+        assert_almost_equal(y, 0.5270462766947299)
+
+    def test_skewness(self):
+        y = mstats.skew(self.testmathworks)
+        assert_almost_equal(y,-0.29322304336607,10)
+        y = mstats.skew(self.testmathworks,bias=0)
+        assert_almost_equal(y,-0.437111105023940,10)
+        y = mstats.skew(self.testcase)
+        assert_almost_equal(y,0.0,10)
+
+        # test that skew works on multidimensional masked arrays
+        correct_2d = ma.array(
+            np.array([0.6882870394455785, 0, 0.2665647526856708,
+                      0, -0.05211472114254485]),
+            mask=np.array([False, False, False, True, False], dtype=bool)
+        )
+        assert_allclose(mstats.skew(self.testcase_2d, 1), correct_2d)
+        for i, row in enumerate(self.testcase_2d):
+            assert_almost_equal(mstats.skew(row), correct_2d[i])
+
+        correct_2d_bias_corrected = ma.array(
+            np.array([1.685952043212545, 0.0, 0.3973712716070531, 0,
+                      -0.09026534484117164]),
+            mask=np.array([False, False, False, True, False], dtype=bool)
+        )
+        assert_allclose(mstats.skew(self.testcase_2d, 1, bias=False),
+                        correct_2d_bias_corrected)
+        for i, row in enumerate(self.testcase_2d):
+            assert_almost_equal(mstats.skew(row, bias=False),
+                                correct_2d_bias_corrected[i])
+
+        # Check consistency between stats and mstats implementations
+        assert_allclose(mstats.skew(self.testcase_2d[2, :]),
+                        stats.skew(self.testcase_2d[2, :]))
+
+    def test_kurtosis(self):
+        # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
+        # for compatibility with Matlab)
+        y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
+        assert_almost_equal(y, 2.1658856802973, 10)
+        # Note that MATLAB has confusing docs for the following case
+        #  kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
+        #  kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
+        #  The MATLAB docs imply that both should give Fisher's
+        y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)
+        assert_almost_equal(y, 3.663542721189047, 10)
+        y = mstats.kurtosis(self.testcase, 0, 0)
+        assert_almost_equal(y, 1.64)
+
+        # test that kurtosis works on multidimensional masked arrays
+        correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
+                                        -1.26979517952]),
+                              mask=np.array([False, False, False, True,
+                                             False], dtype=bool))
+        assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
+                                  correct_2d)
+        for i, row in enumerate(self.testcase_2d):
+            assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
+
+        correct_2d_bias_corrected = ma.array(
+            np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
+            mask=np.array([False, False, False, True, False], dtype=bool))
+        assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
+                                                  bias=False),
+                                  correct_2d_bias_corrected)
+        for i, row in enumerate(self.testcase_2d):
+            assert_almost_equal(mstats.kurtosis(row, bias=False),
+                                correct_2d_bias_corrected[i])
+
+        # Check consistency between stats and mstats implementations
+        assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
+                                       stats.kurtosis(self.testcase_2d[2, :]),
+                                       nulp=4)
+
+
+class TestMode:
+    def test_mode(self):
+        a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
+        a2 = np.reshape(a1, (3,5))
+        a3 = np.array([1,2,3,4,5,6])
+        a4 = np.reshape(a3, (3,2))
+        ma1 = ma.masked_where(ma.array(a1) > 2, a1)
+        ma2 = ma.masked_where(a2 > 2, a2)
+        ma3 = ma.masked_where(a3 < 2, a3)
+        ma4 = ma.masked_where(ma.array(a4) < 2, a4)
+        assert_equal(mstats.mode(a1, axis=None), (3,4))
+        assert_equal(mstats.mode(a1, axis=0), (3,4))
+        assert_equal(mstats.mode(ma1, axis=None), (0,3))
+        assert_equal(mstats.mode(a2, axis=None), (3,4))
+        assert_equal(mstats.mode(ma2, axis=None), (0,3))
+        assert_equal(mstats.mode(a3, axis=None), (1,1))
+        assert_equal(mstats.mode(ma3, axis=None), (2,1))
+        assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
+        assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
+        assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
+        assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
+        assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
+        assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
+
+        a1_res = mstats.mode(a1, axis=None)
+
+        # test for namedtuple attributes
+        attributes = ('mode', 'count')
+        check_named_results(a1_res, attributes, ma=True)
+
+    def test_mode_modifies_input(self):
+        # regression test for gh-6428: mode(..., axis=None) may not modify
+        # the input array
+        im = np.zeros((100, 100))
+        im[:50, :] += 1
+        im[:, :50] += 1
+        cp = im.copy()
+        mstats.mode(im, None)
+        assert_equal(im, cp)
+
+
+class TestPercentile:
+    def setup_method(self):
+        self.a1 = [3, 4, 5, 10, -3, -5, 6]
+        self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
+        self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
+
+    def test_percentile(self):
+        x = np.arange(8) * 0.5
+        assert_equal(mstats.scoreatpercentile(x, 0), 0.)
+        assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
+        assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
+
+    def test_2D(self):
+        x = ma.array([[1, 1, 1],
+                      [1, 1, 1],
+                      [4, 4, 3],
+                      [1, 1, 1],
+                      [1, 1, 1]])
+        assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1])
+
+
+class TestVariability:
+    """  Comparison numbers are found using R v.1.5.1
+         note that length(testcase) = 4
+    """
+    testcase = ma.fix_invalid([1,2,3,4,np.nan])
+
+    def test_sem(self):
+        # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
+        y = mstats.sem(self.testcase)
+        assert_almost_equal(y, 0.6454972244)
+        n = self.testcase.count()
+        assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
+                        mstats.sem(self.testcase, ddof=2))
+
+    def test_zmap(self):
+        # This is not in R, so tested by using:
+        #    (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
+        y = mstats.zmap(self.testcase, self.testcase)
+        desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
+                                 0.44721359549996, 1.3416407864999])
+        assert_array_almost_equal(desired_unmaskedvals,
+                                  y.data[y.mask == False], decimal=12)
+
+    def test_zscore(self):
+        # This is not in R, so tested by using:
+        #     (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
+        y = mstats.zscore(self.testcase)
+        desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
+                                  0.44721359549996, 1.3416407864999, np.nan])
+        assert_almost_equal(desired, y, decimal=12)
+
+
+class TestMisc:
+
+    def test_obrientransform(self):
+        args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
+                [6]+[7]*2+[8]*4+[9]*9+[10]*16]
+        result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
+                  [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
+        assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4),
+                            result, 4)
+
+    def test_ks_2samp(self):
+        x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
+             [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
+             [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
+             [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
+        x = ma.fix_invalid(x).T
+        (winter, spring, summer, fall) = x.T
+
+        assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4),
+                            (0.1818, 0.9628))
+        assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4),
+                            (0.1469, 0.6886))
+        assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4),
+                            (0.1818, 0.6011))
+
+    def test_friedmanchisq(self):
+        # No missing values
+        args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
+                [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
+                [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
+        result = mstats.friedmanchisquare(*args)
+        assert_almost_equal(result[0], 10.4737, 4)
+        assert_almost_equal(result[1], 0.005317, 6)
+        # Missing values
+        x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
+             [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
+             [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
+             [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
+        x = ma.fix_invalid(x)
+        result = mstats.friedmanchisquare(*x)
+        assert_almost_equal(result[0], 2.0156, 4)
+        assert_almost_equal(result[1], 0.5692, 4)
+
+        # test for namedtuple attributes
+        attributes = ('statistic', 'pvalue')
+        check_named_results(result, attributes, ma=True)
+
+
+def test_regress_simple():
+    # Regress a line with sinusoidal noise. Test for #1273.
+    x = np.linspace(0, 100, 100)
+    y = 0.2 * np.linspace(0, 100, 100) + 10
+    y += np.sin(np.linspace(0, 20, 100))
+
+    result = mstats.linregress(x, y)
+
+    # Result is of a correct class and with correct fields
+    lr = stats._stats_mstats_common.LinregressResult
+    assert_(isinstance(result, lr))
+    attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
+    check_named_results(result, attributes, ma=True)
+    assert 'intercept_stderr' in dir(result)
+
+    # Slope and intercept are estimated correctly
+    assert_almost_equal(result.slope, 0.19644990055858422)
+    assert_almost_equal(result.intercept, 10.211269918932341)
+    assert_almost_equal(result.stderr, 0.002395781449783862)
+    assert_almost_equal(result.intercept_stderr, 0.13866936078570702)
+
+
+def test_linregress_identical_x():
+    x = np.zeros(10)
+    y = np.random.random(10)
+    msg = "Cannot calculate a linear regression if all x values are identical"
+    with assert_raises(ValueError, match=msg):
+        mstats.linregress(x, y)
+
+
+def test_theilslopes():
+    # Test for basic slope and intercept.
+    slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1])
+    assert_almost_equal(slope, 0.5)
+    assert_almost_equal(intercept, 0.5)
+
+    slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1],
+                                                        method='joint')
+    assert_almost_equal(slope, 0.5)
+    assert_almost_equal(intercept, 0.0)
+
+    # Test for correct masking.
+    y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False])
+    slope, intercept, lower, upper = mstats.theilslopes(y)
+    assert_almost_equal(slope, 1./3)
+    assert_almost_equal(intercept, 2./3)
+
+    slope, intercept, lower, upper = mstats.theilslopes(y,
+                                                        method='joint')
+    assert_almost_equal(slope, 1./3)
+    assert_almost_equal(intercept, 0.0)
+
+    # Test of confidence intervals from example in Sen (1968).
+    x = [1, 2, 3, 4, 10, 12, 18]
+    y = [9, 15, 19, 20, 45, 55, 78]
+    slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
+    assert_almost_equal(slope, 4)
+    assert_almost_equal(intercept, 4.0)
+    assert_almost_equal(upper, 4.38, decimal=2)
+    assert_almost_equal(lower, 3.71, decimal=2)
+
+    slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07,
+                                                        method='joint')
+    assert_almost_equal(slope, 4)
+    assert_almost_equal(intercept, 6.0)
+    assert_almost_equal(upper, 4.38, decimal=2)
+    assert_almost_equal(lower, 3.71, decimal=2)
+
+
+def test_theilslopes_warnings():
+    # Test `theilslopes` with degenerate input; see gh-15943
+    with pytest.warns(RuntimeWarning, match="All `x` coordinates are..."):
+        res = mstats.theilslopes([0, 1], [0, 0])
+        assert np.all(np.isnan(res))
+    with suppress_warnings() as sup:
+        sup.filter(RuntimeWarning, "invalid value encountered...")
+        res = mstats.theilslopes([0, 0, 0], [0, 1, 0])
+        assert_allclose(res, (0, 0, np.nan, np.nan))
+
+
+def test_theilslopes_namedtuple_consistency():
+    """
+    Simple test to ensure tuple backwards-compatibility of the returned
+    TheilslopesResult object
+    """
+    y = [1, 2, 4]
+    x = [4, 6, 8]
+    slope, intercept, low_slope, high_slope = mstats.theilslopes(y, x)
+    result = mstats.theilslopes(y, x)
+
+    # note all four returned values are distinct here
+    assert_equal(slope, result.slope)
+    assert_equal(intercept, result.intercept)
+    assert_equal(low_slope, result.low_slope)
+    assert_equal(high_slope, result.high_slope)
+
+
+def test_siegelslopes():
+    # method should be exact for straight line
+    y = 2 * np.arange(10) + 0.5
+    assert_equal(mstats.siegelslopes(y), (2.0, 0.5))
+    assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5))
+
+    x = 2 * np.arange(10)
+    y = 5 * x - 3.0
+    assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
+    assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0))
+
+    # method is robust to outliers: brekdown point of 50%
+    y[:4] = 1000
+    assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
+
+    # if there are no outliers, results should be comparble to linregress
+    x = np.arange(10)
+    y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231)
+    slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y)
+
+    slope, intercept = mstats.siegelslopes(y, x)
+    assert_allclose(slope, slope_ols, rtol=0.1)
+    assert_allclose(intercept, intercept_ols, rtol=0.1)
+
+    slope, intercept = mstats.siegelslopes(y, x, method='separate')
+    assert_allclose(slope, slope_ols, rtol=0.1)
+    assert_allclose(intercept, intercept_ols, rtol=0.1)
+
+
+def test_siegelslopes_namedtuple_consistency():
+    """
+    Simple test to ensure tuple backwards-compatibility of the returned
+    SiegelslopesResult object.
+    """
+    y = [1, 2, 4]
+    x = [4, 6, 8]
+    slope, intercept = mstats.siegelslopes(y, x)
+    result = mstats.siegelslopes(y, x)
+
+    # note both returned values are distinct here
+    assert_equal(slope, result.slope)
+    assert_equal(intercept, result.intercept)
+
+
+def test_plotting_positions():
+    # Regression test for #1256
+    pos = mstats.plotting_positions(np.arange(3), 0, 0)
+    assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
+
+
+class TestNormalitytests():
+
+    def test_vs_nonmasked(self):
+        x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
+        assert_array_almost_equal(mstats.normaltest(x),
+                                  stats.normaltest(x))
+        assert_array_almost_equal(mstats.skewtest(x),
+                                  stats.skewtest(x))
+        assert_array_almost_equal(mstats.kurtosistest(x),
+                                  stats.kurtosistest(x))
+
+        funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
+        mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
+        x = [1, 2, 3, 4]
+        for func, mfunc in zip(funcs, mfuncs):
+            assert_raises(ValueError, func, x)
+            assert_raises(ValueError, mfunc, x)
+
+    def test_axis_None(self):
+        # Test axis=None (equal to axis=0 for 1-D input)
+        x = np.array((-2,-1,0,1,2,3)*4)**2
+        assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
+        assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
+        assert_allclose(mstats.kurtosistest(x, axis=None),
+                        mstats.kurtosistest(x))
+
+    def test_maskedarray_input(self):
+        # Add some masked values, test result doesn't change
+        x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
+        xm = np.ma.array(np.r_[np.inf, x, 10],
+                         mask=np.r_[True, [False] * x.size, True])
+        assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
+        assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
+        assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
+
+    def test_nd_input(self):
+        x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
+        x_2d = np.vstack([x] * 2).T
+        for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
+            res_1d = func(x)
+            res_2d = func(x_2d)
+            assert_allclose(res_2d[0], [res_1d[0]] * 2)
+            assert_allclose(res_2d[1], [res_1d[1]] * 2)
+
+    def test_normaltest_result_attributes(self):
+        x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
+        res = mstats.normaltest(x)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def test_kurtosistest_result_attributes(self):
+        x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
+        res = mstats.kurtosistest(x)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def regression_test_9033(self):
+        # x cleary non-normal but power of negtative denom needs
+        # to be handled correctly to reject normality
+        counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
+        x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
+        assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)
+
+    @pytest.mark.parametrize("test", ["skewtest", "kurtosistest"])
+    @pytest.mark.parametrize("alternative", ["less", "greater"])
+    def test_alternative(self, test, alternative):
+        x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)
+
+        stats_test = getattr(stats, test)
+        mstats_test = getattr(mstats, test)
+
+        z_ex, p_ex = stats_test(x, alternative=alternative)
+        z, p = mstats_test(x, alternative=alternative)
+        assert_allclose(z, z_ex, atol=1e-12)
+        assert_allclose(p, p_ex, atol=1e-12)
+
+        # test with masked arrays
+        x[1:5] = np.nan
+        x = np.ma.masked_array(x, mask=np.isnan(x))
+        z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)
+        z, p = mstats_test(x, alternative=alternative)
+        assert_allclose(z, z_ex, atol=1e-12)
+        assert_allclose(p, p_ex, atol=1e-12)
+
+    def test_bad_alternative(self):
+        x = stats.norm.rvs(size=20, random_state=123)
+        msg = r"alternative must be 'less', 'greater' or 'two-sided'"
+
+        with pytest.raises(ValueError, match=msg):
+            mstats.skewtest(x, alternative='error')
+
+        with pytest.raises(ValueError, match=msg):
+            mstats.kurtosistest(x, alternative='error')
+
+
+class TestFOneway():
+    def test_result_attributes(self):
+        a = np.array([655, 788], dtype=np.uint16)
+        b = np.array([789, 772], dtype=np.uint16)
+        res = mstats.f_oneway(a, b)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+
+class TestMannwhitneyu():
+    # data from gh-1428
+    x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1.])
+
+    y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
+                  2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
+                  1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
+                  2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
+                  2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                  2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
+                  1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
+                  1., 1., 1., 1.])
+
+    def test_result_attributes(self):
+        res = mstats.mannwhitneyu(self.x, self.y)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def test_against_stats(self):
+        # gh-4641 reported that stats.mannwhitneyu returned half the p-value
+        # of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu
+        # is now two-sided, so they match.
+        res1 = mstats.mannwhitneyu(self.x, self.y)
+        res2 = stats.mannwhitneyu(self.x, self.y)
+        assert res1.statistic == res2.statistic
+        assert_allclose(res1.pvalue, res2.pvalue)
+
+
+class TestKruskal():
+    def test_result_attributes(self):
+        x = [1, 3, 5, 7, 9]
+        y = [2, 4, 6, 8, 10]
+
+        res = mstats.kruskal(x, y)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+
+# TODO: for all ttest functions, add tests with masked array inputs
+class TestTtest_rel():
+    def test_vs_nonmasked(self):
+        np.random.seed(1234567)
+        outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
+
+        # 1-D inputs
+        res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
+        res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
+        assert_allclose(res1, res2)
+
+        # 2-D inputs
+        res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
+        res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
+        assert_allclose(res1, res2)
+        res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
+        res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
+        assert_allclose(res1, res2)
+
+        # Check default is axis=0
+        res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
+        assert_allclose(res2, res3)
+
+    def test_fully_masked(self):
+        np.random.seed(1234567)
+        outcome = ma.masked_array(np.random.randn(3, 2),
+                                  mask=[[1, 1, 1], [0, 0, 0]])
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in absolute")
+            for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
+                t, p = mstats.ttest_rel(*pair)
+                assert_array_equal(t, (np.nan, np.nan))
+                assert_array_equal(p, (np.nan, np.nan))
+
+    def test_result_attributes(self):
+        np.random.seed(1234567)
+        outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
+
+        res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def test_invalid_input_size(self):
+        assert_raises(ValueError, mstats.ttest_rel,
+                      np.arange(10), np.arange(11))
+        x = np.arange(24)
+        assert_raises(ValueError, mstats.ttest_rel,
+                      x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
+        assert_raises(ValueError, mstats.ttest_rel,
+                      x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
+
+    def test_empty(self):
+        res1 = mstats.ttest_rel([], [])
+        assert_(np.all(np.isnan(res1)))
+
+    def test_zero_division(self):
+        t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
+        assert_equal((np.abs(t), p), (np.inf, 0))
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in absolute")
+            t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
+            assert_array_equal(t, np.array([np.nan, np.nan]))
+            assert_array_equal(p, np.array([np.nan, np.nan]))
+
+    def test_bad_alternative(self):
+        msg = r"alternative must be 'less', 'greater' or 'two-sided'"
+        with pytest.raises(ValueError, match=msg):
+            mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
+
+    @pytest.mark.parametrize("alternative", ["less", "greater"])
+    def test_alternative(self, alternative):
+        x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42)
+        y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42)
+
+        t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative)
+        t, p = mstats.ttest_rel(x, y, alternative=alternative)
+        assert_allclose(t, t_ex, rtol=1e-14)
+        assert_allclose(p, p_ex, rtol=1e-14)
+
+        # test with masked arrays
+        x[1:10] = np.nan
+        y[1:10] = np.nan
+        x = np.ma.masked_array(x, mask=np.isnan(x))
+        y = np.ma.masked_array(y, mask=np.isnan(y))
+        t, p = mstats.ttest_rel(x, y, alternative=alternative)
+        t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(),
+                                     alternative=alternative)
+        assert_allclose(t, t_ex, rtol=1e-14)
+        assert_allclose(p, p_ex, rtol=1e-14)
+
+
+class TestTtest_ind():
+    def test_vs_nonmasked(self):
+        np.random.seed(1234567)
+        outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
+
+        # 1-D inputs
+        res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
+        res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
+        assert_allclose(res1, res2)
+
+        # 2-D inputs
+        res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
+        res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
+        assert_allclose(res1, res2)
+        res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
+        res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
+        assert_allclose(res1, res2)
+
+        # Check default is axis=0
+        res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
+        assert_allclose(res2, res3)
+
+        # Check equal_var
+        res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
+        res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
+        assert_allclose(res4, res5)
+        res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
+        res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
+        assert_allclose(res4, res5)
+
+    def test_fully_masked(self):
+        np.random.seed(1234567)
+        outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in absolute")
+            for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
+                t, p = mstats.ttest_ind(*pair)
+                assert_array_equal(t, (np.nan, np.nan))
+                assert_array_equal(p, (np.nan, np.nan))
+
+    def test_result_attributes(self):
+        np.random.seed(1234567)
+        outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
+
+        res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def test_empty(self):
+        res1 = mstats.ttest_ind([], [])
+        assert_(np.all(np.isnan(res1)))
+
+    def test_zero_division(self):
+        t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
+        assert_equal((np.abs(t), p), (np.inf, 0))
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in absolute")
+            t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
+            assert_array_equal(t, (np.nan, np.nan))
+            assert_array_equal(p, (np.nan, np.nan))
+
+        t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
+        assert_equal((np.abs(t), p), (np.inf, 0))
+        assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
+                                            equal_var=False), (np.nan, np.nan))
+
+    def test_bad_alternative(self):
+        msg = r"alternative must be 'less', 'greater' or 'two-sided'"
+        with pytest.raises(ValueError, match=msg):
+            mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
+
+    @pytest.mark.parametrize("alternative", ["less", "greater"])
+    def test_alternative(self, alternative):
+        x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
+        y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123)
+
+        t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative)
+        t, p = mstats.ttest_ind(x, y, alternative=alternative)
+        assert_allclose(t, t_ex, rtol=1e-14)
+        assert_allclose(p, p_ex, rtol=1e-14)
+
+        # test with masked arrays
+        x[1:10] = np.nan
+        y[80:90] = np.nan
+        x = np.ma.masked_array(x, mask=np.isnan(x))
+        y = np.ma.masked_array(y, mask=np.isnan(y))
+        t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(),
+                                     alternative=alternative)
+        t, p = mstats.ttest_ind(x, y, alternative=alternative)
+        assert_allclose(t, t_ex, rtol=1e-14)
+        assert_allclose(p, p_ex, rtol=1e-14)
+
+
+class TestTtest_1samp():
+    def test_vs_nonmasked(self):
+        np.random.seed(1234567)
+        outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
+
+        # 1-D inputs
+        res1 = stats.ttest_1samp(outcome[:, 0], 1)
+        res2 = mstats.ttest_1samp(outcome[:, 0], 1)
+        assert_allclose(res1, res2)
+
+    def test_fully_masked(self):
+        np.random.seed(1234567)
+        outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
+        expected = (np.nan, np.nan)
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in absolute")
+            for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]:
+                t, p = mstats.ttest_1samp(*pair)
+                assert_array_equal(p, expected)
+                assert_array_equal(t, expected)
+
+    def test_result_attributes(self):
+        np.random.seed(1234567)
+        outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
+
+        res = mstats.ttest_1samp(outcome[:, 0], 1)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def test_empty(self):
+        res1 = mstats.ttest_1samp([], 1)
+        assert_(np.all(np.isnan(res1)))
+
+    def test_zero_division(self):
+        t, p = mstats.ttest_1samp([0, 0, 0], 1)
+        assert_equal((np.abs(t), p), (np.inf, 0))
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in absolute")
+            t, p = mstats.ttest_1samp([0, 0, 0], 0)
+            assert_(np.isnan(t))
+            assert_array_equal(p, (np.nan, np.nan))
+
+    def test_bad_alternative(self):
+        msg = r"alternative must be 'less', 'greater' or 'two-sided'"
+        with pytest.raises(ValueError, match=msg):
+            mstats.ttest_1samp([1, 2, 3], 4, alternative='foo')
+
+    @pytest.mark.parametrize("alternative", ["less", "greater"])
+    def test_alternative(self, alternative):
+        x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
+
+        t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative)
+        t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
+        assert_allclose(t, t_ex, rtol=1e-14)
+        assert_allclose(p, p_ex, rtol=1e-14)
+
+        # test with masked arrays
+        x[1:10] = np.nan
+        x = np.ma.masked_array(x, mask=np.isnan(x))
+        t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9,
+                                       alternative=alternative)
+        t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
+        assert_allclose(t, t_ex, rtol=1e-14)
+        assert_allclose(p, p_ex, rtol=1e-14)
+
+
+class TestDescribe:
+    """
+    Tests for mstats.describe.
+
+    Note that there are also tests for `mstats.describe` in the
+    class TestCompareWithStats.
+    """
+    def test_basic_with_axis(self):
+        # This is a basic test that is also a regression test for gh-7303.
+        a = np.ma.masked_array([[0, 1, 2, 3, 4, 9],
+                                [5, 5, 0, 9, 3, 3]],
+                               mask=[[0, 0, 0, 0, 0, 1],
+                                     [0, 0, 1, 1, 0, 0]])
+        result = mstats.describe(a, axis=1)
+        assert_equal(result.nobs, [5, 4])
+        amin, amax = result.minmax
+        assert_equal(amin, [0, 3])
+        assert_equal(amax, [4, 5])
+        assert_equal(result.mean, [2.0, 4.0])
+        assert_equal(result.variance, [2.0, 1.0])
+        assert_equal(result.skewness, [0.0, 0.0])
+        assert_allclose(result.kurtosis, [-1.3, -2.0])
+
+
+class TestCompareWithStats:
+    """
+    Class to compare mstats results with stats results.
+
+    It is in general assumed that scipy.stats is at a more mature stage than
+    stats.mstats.  If a routine in mstats results in similar results like in
+    scipy.stats, this is considered also as a proper validation of scipy.mstats
+    routine.
+
+    Different sample sizes are used for testing, as some problems between stats
+    and mstats are dependent on sample size.
+
+    Author: Alexander Loew
+
+    NOTE that some tests fail. This might be caused by
+    a) actual differences or bugs between stats and mstats
+    b) numerical inaccuracies
+    c) different definitions of routine interfaces
+
+    These failures need to be checked. Current workaround is to have disabled these tests,
+    but issuing reports on scipy-dev
+
+    """
+    def get_n(self):
+        """ Returns list of sample sizes to be used for comparison. """
+        return [1000, 100, 10, 5]
+
+    def generate_xy_sample(self, n):
+        # This routine generates numpy arrays and corresponding masked arrays
+        # with the same data, but additional masked values
+        np.random.seed(1234567)
+        x = np.random.randn(n)
+        y = x + np.random.randn(n)
+        xm = np.full(len(x) + 5, 1e16)
+        ym = np.full(len(y) + 5, 1e16)
+        xm[0:len(x)] = x
+        ym[0:len(y)] = y
+        mask = xm > 9e15
+        xm = np.ma.array(xm, mask=mask)
+        ym = np.ma.array(ym, mask=mask)
+        return x, y, xm, ym
+
+    def generate_xy_sample2D(self, n, nx):
+        x = np.full((n, nx), np.nan)
+        y = np.full((n, nx), np.nan)
+        xm = np.full((n+5, nx), np.nan)
+        ym = np.full((n+5, nx), np.nan)
+
+        for i in range(nx):
+            x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)
+
+        xm[0:n, :] = x[0:n]
+        ym[0:n, :] = y[0:n]
+        xm = np.ma.array(xm, mask=np.isnan(xm))
+        ym = np.ma.array(ym, mask=np.isnan(ym))
+        return x, y, xm, ym
+
+    def test_linregress(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            result1 = stats.linregress(x, y)
+            result2 = stats.mstats.linregress(xm, ym)
+            assert_allclose(np.asarray(result1), np.asarray(result2))
+
+    def test_pearsonr(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r, p = stats.pearsonr(x, y)
+            rm, pm = stats.mstats.pearsonr(xm, ym)
+
+            assert_almost_equal(r, rm, decimal=14)
+            assert_almost_equal(p, pm, decimal=14)
+
+    def test_spearmanr(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r, p = stats.spearmanr(x, y)
+            rm, pm = stats.mstats.spearmanr(xm, ym)
+            assert_almost_equal(r, rm, 14)
+            assert_almost_equal(p, pm, 14)
+
+    def test_spearmanr_backcompat_useties(self):
+        # A regression test to ensure we don't break backwards compat
+        # more than we have to (see gh-9204).
+        x = np.arange(6)
+        assert_raises(ValueError, mstats.spearmanr, x, x, False)
+
+    def test_gmean(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r = stats.gmean(abs(x))
+            rm = stats.mstats.gmean(abs(xm))
+            assert_allclose(r, rm, rtol=1e-13)
+
+            r = stats.gmean(abs(y))
+            rm = stats.mstats.gmean(abs(ym))
+            assert_allclose(r, rm, rtol=1e-13)
+
+    def test_hmean(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+
+            r = stats.hmean(abs(x))
+            rm = stats.mstats.hmean(abs(xm))
+            assert_almost_equal(r, rm, 10)
+
+            r = stats.hmean(abs(y))
+            rm = stats.mstats.hmean(abs(ym))
+            assert_almost_equal(r, rm, 10)
+
+    def test_skew(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+
+            r = stats.skew(x)
+            rm = stats.mstats.skew(xm)
+            assert_almost_equal(r, rm, 10)
+
+            r = stats.skew(y)
+            rm = stats.mstats.skew(ym)
+            assert_almost_equal(r, rm, 10)
+
+    def test_moment(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+
+            r = stats.moment(x)
+            rm = stats.mstats.moment(xm)
+            assert_almost_equal(r, rm, 10)
+
+            r = stats.moment(y)
+            rm = stats.mstats.moment(ym)
+            assert_almost_equal(r, rm, 10)
+
+    def test_zscore(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+
+            # reference solution
+            zx = (x - x.mean()) / x.std()
+            zy = (y - y.mean()) / y.std()
+
+            # validate stats
+            assert_allclose(stats.zscore(x), zx, rtol=1e-10)
+            assert_allclose(stats.zscore(y), zy, rtol=1e-10)
+
+            # compare stats and mstats
+            assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
+                            rtol=1e-10)
+            assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
+                            rtol=1e-10)
+
+    def test_kurtosis(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r = stats.kurtosis(x)
+            rm = stats.mstats.kurtosis(xm)
+            assert_almost_equal(r, rm, 10)
+
+            r = stats.kurtosis(y)
+            rm = stats.mstats.kurtosis(ym)
+            assert_almost_equal(r, rm, 10)
+
+    def test_sem(self):
+        # example from stats.sem doc
+        a = np.arange(20).reshape(5, 4)
+        am = np.ma.array(a)
+        r = stats.sem(a, ddof=1)
+        rm = stats.mstats.sem(am, ddof=1)
+
+        assert_allclose(r, 2.82842712, atol=1e-5)
+        assert_allclose(rm, 2.82842712, atol=1e-5)
+
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
+                                stats.sem(x, axis=None, ddof=0), decimal=13)
+            assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
+                                stats.sem(y, axis=None, ddof=0), decimal=13)
+            assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
+                                stats.sem(x, axis=None, ddof=1), decimal=13)
+            assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
+                                stats.sem(y, axis=None, ddof=1), decimal=13)
+
+    def test_describe(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r = stats.describe(x, ddof=1)
+            rm = stats.mstats.describe(xm, ddof=1)
+            for ii in range(6):
+                assert_almost_equal(np.asarray(r[ii]),
+                                    np.asarray(rm[ii]),
+                                    decimal=12)
+
+    def test_describe_result_attributes(self):
+        actual = mstats.describe(np.arange(5))
+        attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
+                      'kurtosis')
+        check_named_results(actual, attributes, ma=True)
+
+    def test_rankdata(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r = stats.rankdata(x)
+            rm = stats.mstats.rankdata(x)
+            assert_allclose(r, rm)
+
+    def test_tmean(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
+            assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
+
+    def test_tmax(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            assert_almost_equal(stats.tmax(x,2.),
+                                stats.mstats.tmax(xm,2.), 10)
+            assert_almost_equal(stats.tmax(y,2.),
+                                stats.mstats.tmax(ym,2.), 10)
+
+            assert_almost_equal(stats.tmax(x, upperlimit=3.),
+                                stats.mstats.tmax(xm, upperlimit=3.), 10)
+            assert_almost_equal(stats.tmax(y, upperlimit=3.),
+                                stats.mstats.tmax(ym, upperlimit=3.), 10)
+
+    def test_tmin(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            assert_equal(stats.tmin(x), stats.mstats.tmin(xm))
+            assert_equal(stats.tmin(y), stats.mstats.tmin(ym))
+
+            assert_almost_equal(stats.tmin(x, lowerlimit=-1.),
+                                stats.mstats.tmin(xm, lowerlimit=-1.), 10)
+            assert_almost_equal(stats.tmin(y, lowerlimit=-1.),
+                                stats.mstats.tmin(ym, lowerlimit=-1.), 10)
+
+    def test_zmap(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            z = stats.zmap(x, y)
+            zm = stats.mstats.zmap(xm, ym)
+            assert_allclose(z, zm[0:len(z)], atol=1e-10)
+
+    def test_variation(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
+                                decimal=12)
+            assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
+                                decimal=12)
+
+    def test_tvar(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
+                                decimal=12)
+            assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
+                                decimal=12)
+
+    def test_trimboth(self):
+        a = np.arange(20)
+        b = stats.trimboth(a, 0.1)
+        bm = stats.mstats.trimboth(a, 0.1)
+        assert_allclose(np.sort(b), bm.data[~bm.mask])
+
+    def test_tsem(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),
+                                decimal=14)
+            assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),
+                                decimal=14)
+            assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),
+                                stats.mstats.tsem(xm, limits=(-2., 2.)),
+                                decimal=14)
+
+    def test_skewtest(self):
+        # this test is for 1D data
+        for n in self.get_n():
+            if n > 8:
+                x, y, xm, ym = self.generate_xy_sample(n)
+                r = stats.skewtest(x)
+                rm = stats.mstats.skewtest(xm)
+                assert_allclose(r, rm)
+
+    def test_skewtest_result_attributes(self):
+        x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
+        res = mstats.skewtest(x)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes, ma=True)
+
+    def test_skewtest_2D_notmasked(self):
+        # a normal ndarray is passed to the masked function
+        x = np.random.random((20, 2)) * 20.
+        r = stats.skewtest(x)
+        rm = stats.mstats.skewtest(x)
+        assert_allclose(np.asarray(r), np.asarray(rm))
+
+    def test_skewtest_2D_WithMask(self):
+        nx = 2
+        for n in self.get_n():
+            if n > 8:
+                x, y, xm, ym = self.generate_xy_sample2D(n, nx)
+                r = stats.skewtest(x)
+                rm = stats.mstats.skewtest(xm)
+
+                assert_allclose(r[0][0], rm[0][0], rtol=1e-14)
+                assert_allclose(r[0][1], rm[0][1], rtol=1e-14)
+
+    def test_normaltest(self):
+        with np.errstate(over='raise'), suppress_warnings() as sup:
+            sup.filter(UserWarning, "kurtosistest only valid for n>=20")
+            for n in self.get_n():
+                if n > 8:
+                    x, y, xm, ym = self.generate_xy_sample(n)
+                    r = stats.normaltest(x)
+                    rm = stats.mstats.normaltest(xm)
+                    assert_allclose(np.asarray(r), np.asarray(rm))
+
+    def test_find_repeats(self):
+        x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')
+        tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')
+        mask = (tmp == 5.)
+        xm = np.ma.array(tmp, mask=mask)
+        x_orig, xm_orig = x.copy(), xm.copy()
+
+        r = stats.find_repeats(x)
+        rm = stats.mstats.find_repeats(xm)
+
+        assert_equal(r, rm)
+        assert_equal(x, x_orig)
+        assert_equal(xm, xm_orig)
+
+        # This crazy behavior is expected by count_tied_groups, but is not
+        # in the docstring...
+        _, counts = stats.mstats.find_repeats([])
+        assert_equal(counts, np.array(0, dtype=np.intp))
+
+    def test_kendalltau(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r = stats.kendalltau(x, y)
+            rm = stats.mstats.kendalltau(xm, ym)
+            assert_almost_equal(r[0], rm[0], decimal=10)
+            assert_almost_equal(r[1], rm[1], decimal=7)
+
+    def test_obrientransform(self):
+        for n in self.get_n():
+            x, y, xm, ym = self.generate_xy_sample(n)
+            r = stats.obrientransform(x)
+            rm = stats.mstats.obrientransform(xm)
+            assert_almost_equal(r.T, rm[0:len(x)])
+
+    def test_ks_1samp(self):
+        """Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays."""
+        for mode in ['auto', 'exact', 'asymp']:
+            with suppress_warnings() as sup:
+                for alternative in ['less', 'greater', 'two-sided']:
+                    for n in self.get_n():
+                        x, y, xm, ym = self.generate_xy_sample(n)
+                        res1 = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
+                        res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res2))
+                        res3 = stats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res3))
+
+    def test_kstest_1samp(self):
+        """Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays."""
+        for mode in ['auto', 'exact', 'asymp']:
+            with suppress_warnings() as sup:
+                for alternative in ['less', 'greater', 'two-sided']:
+                    for n in self.get_n():
+                        x, y, xm, ym = self.generate_xy_sample(n)
+                        res1 = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
+                        res2 = stats.mstats.kstest(xm, 'norm', alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res2))
+                        res3 = stats.kstest(xm, 'norm', alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res3))
+
+    def test_ks_2samp(self):
+        """Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays.
+        gh-8431"""
+        for mode in ['auto', 'exact', 'asymp']:
+            with suppress_warnings() as sup:
+                if mode in ['auto', 'exact']:
+                    message = "ks_2samp: Exact calculation unsuccessful."
+                    sup.filter(RuntimeWarning, message)
+                for alternative in ['less', 'greater', 'two-sided']:
+                    for n in self.get_n():
+                        x, y, xm, ym = self.generate_xy_sample(n)
+                        res1 = stats.ks_2samp(x, y, alternative=alternative, mode=mode)
+                        res2 = stats.mstats.ks_2samp(xm, ym, alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res2))
+                        res3 = stats.ks_2samp(xm, y, alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res3))
+
+    def test_kstest_2samp(self):
+        """Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays."""
+        for mode in ['auto', 'exact', 'asymp']:
+            with suppress_warnings() as sup:
+                if mode in ['auto', 'exact']:
+                    message = "ks_2samp: Exact calculation unsuccessful."
+                    sup.filter(RuntimeWarning, message)
+                for alternative in ['less', 'greater', 'two-sided']:
+                    for n in self.get_n():
+                        x, y, xm, ym = self.generate_xy_sample(n)
+                        res1 = stats.kstest(x, y, alternative=alternative, mode=mode)
+                        res2 = stats.mstats.kstest(xm, ym, alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res2))
+                        res3 = stats.kstest(xm, y, alternative=alternative, mode=mode)
+                        assert_equal(np.asarray(res1), np.asarray(res3))
+
+
+class TestBrunnerMunzel:
+    # Data from (Lumley, 1996)
+    X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1,
+                              1, 1, 1, 2, 4, 1, 1, np.nan])
+    Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4])
+    significant = 14
+
+    def test_brunnermunzel_one_sided(self):
+        # Results are compared with R's lawstat package.
+        u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less')
+        u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater')
+        u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater')
+        u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less')
+
+        assert_almost_equal(p1, p2, decimal=self.significant)
+        assert_almost_equal(p3, p4, decimal=self.significant)
+        assert_(p1 != p3)
+        assert_almost_equal(u1, 3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(u2, -3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(u3, 3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(u4, -3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(p1, 0.0028931043330757342,
+                            decimal=self.significant)
+        assert_almost_equal(p3, 0.99710689566692423,
+                            decimal=self.significant)
+
+    def test_brunnermunzel_two_sided(self):
+        # Results are compared with R's lawstat package.
+        u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided')
+        u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided')
+
+        assert_almost_equal(p1, p2, decimal=self.significant)
+        assert_almost_equal(u1, 3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(u2, -3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(p1, 0.0057862086661515377,
+                            decimal=self.significant)
+
+    def test_brunnermunzel_default(self):
+        # The default value for alternative is two-sided
+        u1, p1 = mstats.brunnermunzel(self.X, self.Y)
+        u2, p2 = mstats.brunnermunzel(self.Y, self.X)
+
+        assert_almost_equal(p1, p2, decimal=self.significant)
+        assert_almost_equal(u1, 3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(u2, -3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(p1, 0.0057862086661515377,
+                            decimal=self.significant)
+
+    def test_brunnermunzel_alternative_error(self):
+        alternative = "error"
+        distribution = "t"
+        assert_(alternative not in ["two-sided", "greater", "less"])
+        assert_raises(ValueError,
+                      mstats.brunnermunzel,
+                      self.X,
+                      self.Y,
+                      alternative,
+                      distribution)
+
+    def test_brunnermunzel_distribution_norm(self):
+        u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal")
+        u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal")
+        assert_almost_equal(p1, p2, decimal=self.significant)
+        assert_almost_equal(u1, 3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(u2, -3.1374674823029505,
+                            decimal=self.significant)
+        assert_almost_equal(p1, 0.0017041417600383024,
+                            decimal=self.significant)
+
+    def test_brunnermunzel_distribution_error(self):
+        alternative = "two-sided"
+        distribution = "error"
+        assert_(alternative not in ["t", "normal"])
+        assert_raises(ValueError,
+                      mstats.brunnermunzel,
+                      self.X,
+                      self.Y,
+                      alternative,
+                      distribution)
+
+    def test_brunnermunzel_empty_imput(self):
+        u1, p1 = mstats.brunnermunzel(self.X, [])
+        u2, p2 = mstats.brunnermunzel([], self.Y)
+        u3, p3 = mstats.brunnermunzel([], [])
+
+        assert_(np.isnan(u1))
+        assert_(np.isnan(p1))
+        assert_(np.isnan(u2))
+        assert_(np.isnan(p2))
+        assert_(np.isnan(u3))
+        assert_(np.isnan(p3))
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_extras.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_extras.py
new file mode 100644
index 00000000..6320fc8f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_mstats_extras.py
@@ -0,0 +1,150 @@
+import numpy as np
+import numpy.ma as ma
+import scipy.stats.mstats as ms
+
+from numpy.testing import (assert_equal, assert_almost_equal, assert_,
+    assert_allclose)
+
+
+def test_compare_medians_ms():
+    x = np.arange(7)
+    y = x + 10
+    assert_almost_equal(ms.compare_medians_ms(x, y), 0)
+
+    y2 = np.linspace(0, 1, num=10)
+    assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778)
+
+
+def test_hdmedian():
+    # 1-D array
+    x = ma.arange(11)
+    assert_allclose(ms.hdmedian(x), 5, rtol=1e-14)
+    x.mask = ma.make_mask(x)
+    x.mask[:7] = False
+    assert_allclose(ms.hdmedian(x), 3, rtol=1e-14)
+
+    # Check that `var` keyword returns a value.  TODO: check whether returned
+    # value is actually correct.
+    assert_(ms.hdmedian(x, var=True).size == 2)
+
+    # 2-D array
+    x2 = ma.arange(22).reshape((11, 2))
+    assert_allclose(ms.hdmedian(x2, axis=0), [10, 11])
+    x2.mask = ma.make_mask(x2)
+    x2.mask[:7, :] = False
+    assert_allclose(ms.hdmedian(x2, axis=0), [6, 7])
+
+
+def test_rsh():
+    np.random.seed(132345)
+    x = np.random.randn(100)
+    res = ms.rsh(x)
+    # Just a sanity check that the code runs and output shape is correct.
+    # TODO: check that implementation is correct.
+    assert_(res.shape == x.shape)
+
+    # Check points keyword
+    res = ms.rsh(x, points=[0, 1.])
+    assert_(res.size == 2)
+
+
+def test_mjci():
+    # Tests the Marits-Jarrett estimator
+    data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
+                      296,299,306,376,428,515,666,1310,2611])
+    assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5)
+
+
+def test_trimmed_mean_ci():
+    # Tests the confidence intervals of the trimmed mean.
+    data = ma.array([545,555,558,572,575,576,578,580,
+                     594,605,635,651,653,661,666])
+    assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
+    assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1),
+                 [561.8, 630.6])
+
+
+def test_idealfourths():
+    # Tests ideal-fourths
+    test = np.arange(100)
+    assert_almost_equal(np.asarray(ms.idealfourths(test)),
+                        [24.416667,74.583333],6)
+    test_2D = test.repeat(3).reshape(-1,3)
+    assert_almost_equal(ms.idealfourths(test_2D, axis=0),
+                        [[24.416667,24.416667,24.416667],
+                         [74.583333,74.583333,74.583333]],6)
+    assert_almost_equal(ms.idealfourths(test_2D, axis=1),
+                        test.repeat(2).reshape(-1,2))
+    test = [0, 0]
+    _result = ms.idealfourths(test)
+    assert_(np.isnan(_result).all())
+
+
+class TestQuantiles:
+    data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014,
+        0.887764025,0.239407086,0.349638551,0.972791145,0.149789972,
+        0.936947700,0.132359948,0.046041972,0.641675031,0.945530547,
+        0.224218684,0.771450991,0.820257774,0.336458052,0.589113496,
+        0.509736129,0.696838829,0.491323573,0.622767425,0.775189248,
+        0.641461450,0.118455200,0.773029450,0.319280007,0.752229111,
+        0.047841438,0.466295911,0.583850781,0.840581845,0.550086491,
+        0.466470062,0.504765074,0.226855960,0.362641207,0.891620942,
+        0.127898691,0.490094097,0.044882048,0.041441695,0.317976349,
+        0.504135618,0.567353033,0.434617473,0.636243375,0.231803616,
+        0.230154113,0.160011327,0.819464108,0.854706985,0.438809221,
+        0.487427267,0.786907310,0.408367937,0.405534192,0.250444460,
+        0.995309248,0.144389588,0.739947527,0.953543606,0.680051621,
+        0.388382017,0.863530727,0.006514031,0.118007779,0.924024803,
+        0.384236354,0.893687694,0.626534881,0.473051932,0.750134705,
+        0.241843555,0.432947602,0.689538104,0.136934797,0.150206859,
+        0.474335206,0.907775349,0.525869295,0.189184225,0.854284286,
+        0.831089744,0.251637345,0.587038213,0.254475554,0.237781276,
+        0.827928620,0.480283781,0.594514455,0.213641488,0.024194386,
+        0.536668589,0.699497811,0.892804071,0.093835427,0.731107772]
+
+    def test_hdquantiles(self):
+        data = self.data
+        assert_almost_equal(ms.hdquantiles(data,[0., 1.]),
+                            [0.006514031, 0.995309248])
+        hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75])
+        assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,])
+
+        data = np.array(data).reshape(10,10)
+        hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0)
+        assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75]))
+        assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75]))
+        hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True)
+        assert_almost_equal(hdq[...,0],
+                            ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True))
+        assert_almost_equal(hdq[...,-1],
+                            ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True))
+
+    def test_hdquantiles_sd(self):
+        # Standard deviation is a jackknife estimator, so we can check if
+        # the efficient version (hdquantiles_sd) matches a rudimentary,
+        # but clear version here.
+
+        hd_std_errs = ms.hdquantiles_sd(self.data)
+
+        # jacknnife standard error, Introduction to the Bootstrap Eq. 11.5
+        n = len(self.data)
+        jdata = np.broadcast_to(self.data, (n, n))
+        jselector = np.logical_not(np.eye(n))  # leave out one sample each row
+        jdata = jdata[jselector].reshape(n, n-1)
+        jdist = ms.hdquantiles(jdata, axis=1)
+        jdist_mean = np.mean(jdist, axis=0)
+        jstd = ((n-1)/n * np.sum((jdist - jdist_mean)**2, axis=0))**.5
+
+        assert_almost_equal(hd_std_errs, jstd)
+        # Test actual values for good measure
+        assert_almost_equal(hd_std_errs, [0.0379258, 0.0380656, 0.0380013])
+
+        two_data_points = ms.hdquantiles_sd([1, 2])
+        assert_almost_equal(two_data_points, [0.5, 0.5, 0.5])
+
+    def test_mquantiles_cimj(self):
+        # Only test that code runs, implementation not checked for correctness
+        ci_lower, ci_upper = ms.mquantiles_cimj(self.data)
+        assert_(ci_lower.size == ci_upper.size == 3)
+
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_multivariate.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_multivariate.py
new file mode 100644
index 00000000..429ed284
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_multivariate.py
@@ -0,0 +1,2905 @@
+"""
+Test functions for multivariate normal distributions.
+
+"""
+import pickle
+
+from numpy.testing import (assert_allclose, assert_almost_equal,
+                           assert_array_almost_equal, assert_equal,
+                           assert_array_less, assert_)
+import pytest
+from pytest import raises as assert_raises
+
+from .test_continuous_basic import check_distribution_rvs
+
+import numpy
+import numpy as np
+
+import scipy.linalg
+from scipy.stats._multivariate import (_PSD,
+                                       _lnB,
+                                       _cho_inv_batch,
+                                       multivariate_normal_frozen)
+from scipy.stats import (multivariate_normal, multivariate_hypergeom,
+                         matrix_normal, special_ortho_group, ortho_group,
+                         random_correlation, unitary_group, dirichlet,
+                         beta, wishart, multinomial, invwishart, chi2,
+                         invgamma, norm, uniform, ks_2samp, kstest, binom,
+                         hypergeom, multivariate_t, cauchy, normaltest,
+                         random_table, uniform_direction)
+from scipy.stats import _covariance, Covariance
+
+from scipy.integrate import romb
+from scipy.special import multigammaln
+
+from .common_tests import check_random_state_property
+
+from unittest.mock import patch
+
+
+def assert_close(res, ref, *args, **kwargs):
+    res, ref = np.asarray(res), np.asarray(ref)
+    assert_allclose(res, ref, *args, **kwargs)
+    assert_equal(res.shape, ref.shape)
+
+
+class TestCovariance:
+
+    def test_input_validation(self):
+
+        message = "The input `precision` must be a square, two-dimensional..."
+        with pytest.raises(ValueError, match=message):
+            _covariance.CovViaPrecision(np.ones(2))
+
+        message = "`precision.shape` must equal `covariance.shape`."
+        with pytest.raises(ValueError, match=message):
+            _covariance.CovViaPrecision(np.eye(3), covariance=np.eye(2))
+
+        message = "The input `diagonal` must be a one-dimensional array..."
+        with pytest.raises(ValueError, match=message):
+            _covariance.CovViaDiagonal("alpaca")
+
+        message = "The input `cholesky` must be a square, two-dimensional..."
+        with pytest.raises(ValueError, match=message):
+            _covariance.CovViaCholesky(np.ones(2))
+
+        message = "The input `eigenvalues` must be a one-dimensional..."
+        with pytest.raises(ValueError, match=message):
+            _covariance.CovViaEigendecomposition(("alpaca", np.eye(2)))
+
+        message = "The input `eigenvectors` must be a square..."
+        with pytest.raises(ValueError, match=message):
+            _covariance.CovViaEigendecomposition((np.ones(2), "alpaca"))
+
+        message = "The shapes of `eigenvalues` and `eigenvectors` must be..."
+        with pytest.raises(ValueError, match=message):
+            _covariance.CovViaEigendecomposition(([1, 2, 3], np.eye(2)))
+
+    _covariance_preprocessing = {"Diagonal": np.diag,
+                                 "Precision": np.linalg.inv,
+                                 "Cholesky": np.linalg.cholesky,
+                                 "Eigendecomposition": np.linalg.eigh,
+                                 "PSD": lambda x:
+                                     _PSD(x, allow_singular=True)}
+    _all_covariance_types = np.array(list(_covariance_preprocessing))
+    _matrices = {"diagonal full rank": np.diag([1, 2, 3]),
+                 "general full rank": [[5, 1, 3], [1, 6, 4], [3, 4, 7]],
+                 "diagonal singular": np.diag([1, 0, 3]),
+                 "general singular": [[5, -1, 0], [-1, 5, 0], [0, 0, 0]]}
+    _cov_types = {"diagonal full rank": _all_covariance_types,
+                  "general full rank": _all_covariance_types[1:],
+                  "diagonal singular": _all_covariance_types[[0, -2, -1]],
+                  "general singular": _all_covariance_types[-2:]}
+
+    @pytest.mark.parametrize("cov_type_name", _all_covariance_types[:-1])
+    def test_factories(self, cov_type_name):
+        A = np.diag([1, 2, 3])
+        x = [-4, 2, 5]
+
+        cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
+        preprocessing = self._covariance_preprocessing[cov_type_name]
+        factory = getattr(Covariance, f"from_{cov_type_name.lower()}")
+
+        res = factory(preprocessing(A))
+        ref = cov_type(preprocessing(A))
+        assert type(res) == type(ref)
+        assert_allclose(res.whiten(x), ref.whiten(x))
+
+    @pytest.mark.parametrize("matrix_type", list(_matrices))
+    @pytest.mark.parametrize("cov_type_name", _all_covariance_types)
+    def test_covariance(self, matrix_type, cov_type_name):
+        message = (f"CovVia{cov_type_name} does not support {matrix_type} "
+                   "matrices")
+        if cov_type_name not in self._cov_types[matrix_type]:
+            pytest.skip(message)
+
+        A = self._matrices[matrix_type]
+        cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
+        preprocessing = self._covariance_preprocessing[cov_type_name]
+
+        psd = _PSD(A, allow_singular=True)
+
+        # test properties
+        cov_object = cov_type(preprocessing(A))
+        assert_close(cov_object.log_pdet, psd.log_pdet)
+        assert_equal(cov_object.rank, psd.rank)
+        assert_equal(cov_object.shape, np.asarray(A).shape)
+        assert_close(cov_object.covariance, np.asarray(A))
+
+        # test whitening/coloring 1D x
+        rng = np.random.default_rng(5292808890472453840)
+        x = rng.random(size=3)
+        res = cov_object.whiten(x)
+        ref = x @ psd.U
+        # res != ref in general; but res @ res == ref @ ref
+        assert_close(res @ res, ref @ ref)
+        if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
+            # CovViaPSD does not have _colorize
+            assert_close(cov_object.colorize(res), x)
+
+        # test whitening/coloring 3D x
+        x = rng.random(size=(2, 4, 3))
+        res = cov_object.whiten(x)
+        ref = x @ psd.U
+        assert_close((res**2).sum(axis=-1), (ref**2).sum(axis=-1))
+        if hasattr(cov_object, "_colorize") and "singular" not in matrix_type:
+            assert_close(cov_object.colorize(res), x)
+
+    @pytest.mark.parametrize("size", [None, tuple(), 1, (2, 4, 3)])
+    @pytest.mark.parametrize("matrix_type", list(_matrices))
+    @pytest.mark.parametrize("cov_type_name", _all_covariance_types)
+    def test_mvn_with_covariance(self, size, matrix_type, cov_type_name):
+        message = (f"CovVia{cov_type_name} does not support {matrix_type} "
+                   "matrices")
+        if cov_type_name not in self._cov_types[matrix_type]:
+            pytest.skip(message)
+
+        A = self._matrices[matrix_type]
+        cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
+        preprocessing = self._covariance_preprocessing[cov_type_name]
+
+        mean = [0.1, 0.2, 0.3]
+        cov_object = cov_type(preprocessing(A))
+        mvn = multivariate_normal
+        dist0 = multivariate_normal(mean, A, allow_singular=True)
+        dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
+
+        rng = np.random.default_rng(5292808890472453840)
+        x = rng.multivariate_normal(mean, A, size=size)
+        rng = np.random.default_rng(5292808890472453840)
+        x1 = mvn.rvs(mean, cov_object, size=size, random_state=rng)
+        rng = np.random.default_rng(5292808890472453840)
+        x2 = mvn(mean, cov_object, seed=rng).rvs(size=size)
+        if isinstance(cov_object, _covariance.CovViaPSD):
+            assert_close(x1, np.squeeze(x))  # for backward compatibility
+            assert_close(x2, np.squeeze(x))
+        else:
+            assert_equal(x1.shape, x.shape)
+            assert_equal(x2.shape, x.shape)
+            assert_close(x2, x1)
+
+        assert_close(mvn.pdf(x, mean, cov_object), dist0.pdf(x))
+        assert_close(dist1.pdf(x), dist0.pdf(x))
+        assert_close(mvn.logpdf(x, mean, cov_object), dist0.logpdf(x))
+        assert_close(dist1.logpdf(x), dist0.logpdf(x))
+        assert_close(mvn.entropy(mean, cov_object), dist0.entropy())
+        assert_close(dist1.entropy(), dist0.entropy())
+
+    @pytest.mark.parametrize("size", [tuple(), (2, 4, 3)])
+    @pytest.mark.parametrize("cov_type_name", _all_covariance_types)
+    def test_mvn_with_covariance_cdf(self, size, cov_type_name):
+        # This is split from the test above because it's slow to be running
+        # with all matrix types, and there's no need because _mvn.mvnun
+        # does the calculation. All Covariance needs to do is pass is
+        # provide the `covariance` attribute.
+        matrix_type = "diagonal full rank"
+        A = self._matrices[matrix_type]
+        cov_type = getattr(_covariance, f"CovVia{cov_type_name}")
+        preprocessing = self._covariance_preprocessing[cov_type_name]
+
+        mean = [0.1, 0.2, 0.3]
+        cov_object = cov_type(preprocessing(A))
+        mvn = multivariate_normal
+        dist0 = multivariate_normal(mean, A, allow_singular=True)
+        dist1 = multivariate_normal(mean, cov_object, allow_singular=True)
+
+        rng = np.random.default_rng(5292808890472453840)
+        x = rng.multivariate_normal(mean, A, size=size)
+
+        assert_close(mvn.cdf(x, mean, cov_object), dist0.cdf(x))
+        assert_close(dist1.cdf(x), dist0.cdf(x))
+        assert_close(mvn.logcdf(x, mean, cov_object), dist0.logcdf(x))
+        assert_close(dist1.logcdf(x), dist0.logcdf(x))
+
+    def test_covariance_instantiation(self):
+        message = "The `Covariance` class cannot be instantiated directly."
+        with pytest.raises(NotImplementedError, match=message):
+            Covariance()
+
+    @pytest.mark.filterwarnings("ignore::RuntimeWarning")  # matrix not PSD
+    def test_gh9942(self):
+        # Originally there was a mistake in the `multivariate_normal_frozen`
+        # `rvs` method that caused all covariance objects to be processed as
+        # a `_CovViaPSD`. Ensure that this is resolved.
+        A = np.diag([1, 2, -1e-8])
+        n = A.shape[0]
+        mean = np.zeros(n)
+
+        # Error if the matrix is processed as a `_CovViaPSD`
+        with pytest.raises(ValueError, match="The input matrix must be..."):
+            multivariate_normal(mean, A).rvs()
+
+        # No error if it is provided as a `CovViaEigendecomposition`
+        seed = 3562050283508273023
+        rng1 = np.random.default_rng(seed)
+        rng2 = np.random.default_rng(seed)
+        cov = Covariance.from_eigendecomposition(np.linalg.eigh(A))
+        rv = multivariate_normal(mean, cov)
+        res = rv.rvs(random_state=rng1)
+        ref = multivariate_normal.rvs(mean, cov, random_state=rng2)
+        assert_equal(res, ref)
+
+
+def _sample_orthonormal_matrix(n):
+    M = np.random.randn(n, n)
+    u, s, v = scipy.linalg.svd(M)
+    return u
+
+
+class TestMultivariateNormal:
+    def test_input_shape(self):
+        mu = np.arange(3)
+        cov = np.identity(2)
+        assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
+        assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
+        assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov)
+        assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov)
+
+    def test_scalar_values(self):
+        np.random.seed(1234)
+
+        # When evaluated on scalar data, the pdf should return a scalar
+        x, mean, cov = 1.5, 1.7, 2.5
+        pdf = multivariate_normal.pdf(x, mean, cov)
+        assert_equal(pdf.ndim, 0)
+
+        # When evaluated on a single vector, the pdf should return a scalar
+        x = np.random.randn(5)
+        mean = np.random.randn(5)
+        cov = np.abs(np.random.randn(5))  # Diagonal values for cov. matrix
+        pdf = multivariate_normal.pdf(x, mean, cov)
+        assert_equal(pdf.ndim, 0)
+
+        # When evaluated on scalar data, the cdf should return a scalar
+        x, mean, cov = 1.5, 1.7, 2.5
+        cdf = multivariate_normal.cdf(x, mean, cov)
+        assert_equal(cdf.ndim, 0)
+
+        # When evaluated on a single vector, the cdf should return a scalar
+        x = np.random.randn(5)
+        mean = np.random.randn(5)
+        cov = np.abs(np.random.randn(5))  # Diagonal values for cov. matrix
+        cdf = multivariate_normal.cdf(x, mean, cov)
+        assert_equal(cdf.ndim, 0)
+
+    def test_logpdf(self):
+        # Check that the log of the pdf is in fact the logpdf
+        np.random.seed(1234)
+        x = np.random.randn(5)
+        mean = np.random.randn(5)
+        cov = np.abs(np.random.randn(5))
+        d1 = multivariate_normal.logpdf(x, mean, cov)
+        d2 = multivariate_normal.pdf(x, mean, cov)
+        assert_allclose(d1, np.log(d2))
+
+    def test_logpdf_default_values(self):
+        # Check that the log of the pdf is in fact the logpdf
+        # with default parameters Mean=None and cov = 1
+        np.random.seed(1234)
+        x = np.random.randn(5)
+        d1 = multivariate_normal.logpdf(x)
+        d2 = multivariate_normal.pdf(x)
+        # check whether default values are being used
+        d3 = multivariate_normal.logpdf(x, None, 1)
+        d4 = multivariate_normal.pdf(x, None, 1)
+        assert_allclose(d1, np.log(d2))
+        assert_allclose(d3, np.log(d4))
+
+    def test_logcdf(self):
+        # Check that the log of the cdf is in fact the logcdf
+        np.random.seed(1234)
+        x = np.random.randn(5)
+        mean = np.random.randn(5)
+        cov = np.abs(np.random.randn(5))
+        d1 = multivariate_normal.logcdf(x, mean, cov)
+        d2 = multivariate_normal.cdf(x, mean, cov)
+        assert_allclose(d1, np.log(d2))
+
+    def test_logcdf_default_values(self):
+        # Check that the log of the cdf is in fact the logcdf
+        # with default parameters Mean=None and cov = 1
+        np.random.seed(1234)
+        x = np.random.randn(5)
+        d1 = multivariate_normal.logcdf(x)
+        d2 = multivariate_normal.cdf(x)
+        # check whether default values are being used
+        d3 = multivariate_normal.logcdf(x, None, 1)
+        d4 = multivariate_normal.cdf(x, None, 1)
+        assert_allclose(d1, np.log(d2))
+        assert_allclose(d3, np.log(d4))
+
+    def test_rank(self):
+        # Check that the rank is detected correctly.
+        np.random.seed(1234)
+        n = 4
+        mean = np.random.randn(n)
+        for expected_rank in range(1, n + 1):
+            s = np.random.randn(n, expected_rank)
+            cov = np.dot(s, s.T)
+            distn = multivariate_normal(mean, cov, allow_singular=True)
+            assert_equal(distn.cov_object.rank, expected_rank)
+
+    def test_degenerate_distributions(self):
+
+        for n in range(1, 5):
+            z = np.random.randn(n)
+            for k in range(1, n):
+                # Sample a small covariance matrix.
+                s = np.random.randn(k, k)
+                cov_kk = np.dot(s, s.T)
+
+                # Embed the small covariance matrix into a larger singular one.
+                cov_nn = np.zeros((n, n))
+                cov_nn[:k, :k] = cov_kk
+
+                # Embed part of the vector in the same way
+                x = np.zeros(n)
+                x[:k] = z[:k]
+
+                # Define a rotation of the larger low rank matrix.
+                u = _sample_orthonormal_matrix(n)
+                cov_rr = np.dot(u, np.dot(cov_nn, u.T))
+                y = np.dot(u, x)
+
+                # Check some identities.
+                distn_kk = multivariate_normal(np.zeros(k), cov_kk,
+                                               allow_singular=True)
+                distn_nn = multivariate_normal(np.zeros(n), cov_nn,
+                                               allow_singular=True)
+                distn_rr = multivariate_normal(np.zeros(n), cov_rr,
+                                               allow_singular=True)
+                assert_equal(distn_kk.cov_object.rank, k)
+                assert_equal(distn_nn.cov_object.rank, k)
+                assert_equal(distn_rr.cov_object.rank, k)
+                pdf_kk = distn_kk.pdf(x[:k])
+                pdf_nn = distn_nn.pdf(x)
+                pdf_rr = distn_rr.pdf(y)
+                assert_allclose(pdf_kk, pdf_nn)
+                assert_allclose(pdf_kk, pdf_rr)
+                logpdf_kk = distn_kk.logpdf(x[:k])
+                logpdf_nn = distn_nn.logpdf(x)
+                logpdf_rr = distn_rr.logpdf(y)
+                assert_allclose(logpdf_kk, logpdf_nn)
+                assert_allclose(logpdf_kk, logpdf_rr)
+
+                # Add an orthogonal component and find the density
+                y_orth = y + u[:, -1]
+                pdf_rr_orth = distn_rr.pdf(y_orth)
+                logpdf_rr_orth = distn_rr.logpdf(y_orth)
+
+                # Ensure that this has zero probability
+                assert_equal(pdf_rr_orth, 0.0)
+                assert_equal(logpdf_rr_orth, -np.inf)
+
+    def test_degenerate_array(self):
+        # Test that we can generate arrays of random variate from a degenerate
+        # multivariate normal, and that the pdf for these samples is non-zero
+        # (i.e. samples from the distribution lie on the subspace)
+        k = 10
+        for n in range(2, 6):
+            for r in range(1, n):
+                mn = np.zeros(n)
+                u = _sample_orthonormal_matrix(n)[:, :r]
+                vr = np.dot(u, u.T)
+                X = multivariate_normal.rvs(mean=mn, cov=vr, size=k)
+
+                pdf = multivariate_normal.pdf(X, mean=mn, cov=vr,
+                                              allow_singular=True)
+                assert_equal(pdf.size, k)
+                assert np.all(pdf > 0.0)
+
+                logpdf = multivariate_normal.logpdf(X, mean=mn, cov=vr,
+                                                    allow_singular=True)
+                assert_equal(logpdf.size, k)
+                assert np.all(logpdf > -np.inf)
+
+    def test_large_pseudo_determinant(self):
+        # Check that large pseudo-determinants are handled appropriately.
+
+        # Construct a singular diagonal covariance matrix
+        # whose pseudo determinant overflows double precision.
+        large_total_log = 1000.0
+        npos = 100
+        nzero = 2
+        large_entry = np.exp(large_total_log / npos)
+        n = npos + nzero
+        cov = np.zeros((n, n), dtype=float)
+        np.fill_diagonal(cov, large_entry)
+        cov[-nzero:, -nzero:] = 0
+
+        # Check some determinants.
+        assert_equal(scipy.linalg.det(cov), 0)
+        assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
+        assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
+                        (1, large_total_log))
+
+        # Check the pseudo-determinant.
+        psd = _PSD(cov)
+        assert_allclose(psd.log_pdet, large_total_log)
+
+    def test_broadcasting(self):
+        np.random.seed(1234)
+        n = 4
+
+        # Construct a random covariance matrix.
+        data = np.random.randn(n, n)
+        cov = np.dot(data, data.T)
+        mean = np.random.randn(n)
+
+        # Construct an ndarray which can be interpreted as
+        # a 2x3 array whose elements are random data vectors.
+        X = np.random.randn(2, 3, n)
+
+        # Check that multiple data points can be evaluated at once.
+        desired_pdf = multivariate_normal.pdf(X, mean, cov)
+        desired_cdf = multivariate_normal.cdf(X, mean, cov)
+        for i in range(2):
+            for j in range(3):
+                actual = multivariate_normal.pdf(X[i, j], mean, cov)
+                assert_allclose(actual, desired_pdf[i,j])
+                # Repeat for cdf
+                actual = multivariate_normal.cdf(X[i, j], mean, cov)
+                assert_allclose(actual, desired_cdf[i,j], rtol=1e-3)
+
+    def test_normal_1D(self):
+        # The probability density function for a 1D normal variable should
+        # agree with the standard normal distribution in scipy.stats.distributions
+        x = np.linspace(0, 2, 10)
+        mean, cov = 1.2, 0.9
+        scale = cov**0.5
+        d1 = norm.pdf(x, mean, scale)
+        d2 = multivariate_normal.pdf(x, mean, cov)
+        assert_allclose(d1, d2)
+        # The same should hold for the cumulative distribution function
+        d1 = norm.cdf(x, mean, scale)
+        d2 = multivariate_normal.cdf(x, mean, cov)
+        assert_allclose(d1, d2)
+
+    def test_marginalization(self):
+        # Integrating out one of the variables of a 2D Gaussian should
+        # yield a 1D Gaussian
+        mean = np.array([2.5, 3.5])
+        cov = np.array([[.5, 0.2], [0.2, .6]])
+        n = 2 ** 8 + 1  # Number of samples
+        delta = 6 / (n - 1)  # Grid spacing
+
+        v = np.linspace(0, 6, n)
+        xv, yv = np.meshgrid(v, v)
+        pos = np.empty((n, n, 2))
+        pos[:, :, 0] = xv
+        pos[:, :, 1] = yv
+        pdf = multivariate_normal.pdf(pos, mean, cov)
+
+        # Marginalize over x and y axis
+        margin_x = romb(pdf, delta, axis=0)
+        margin_y = romb(pdf, delta, axis=1)
+
+        # Compare with standard normal distribution
+        gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
+        gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
+        assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
+        assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
+
+    def test_frozen(self):
+        # The frozen distribution should agree with the regular one
+        np.random.seed(1234)
+        x = np.random.randn(5)
+        mean = np.random.randn(5)
+        cov = np.abs(np.random.randn(5))
+        norm_frozen = multivariate_normal(mean, cov)
+        assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
+        assert_allclose(norm_frozen.logpdf(x),
+                        multivariate_normal.logpdf(x, mean, cov))
+        assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov))
+        assert_allclose(norm_frozen.logcdf(x),
+                        multivariate_normal.logcdf(x, mean, cov))
+    
+    @pytest.mark.parametrize(
+        'covariance',
+        [
+            np.eye(2),
+            Covariance.from_diagonal([1, 1]),
+        ]
+    )
+    def test_frozen_multivariate_normal_exposes_attributes(self, covariance):
+        mean = np.ones((2,))
+        cov_should_be = np.eye(2)
+        norm_frozen = multivariate_normal(mean, covariance)
+        assert np.allclose(norm_frozen.mean, mean)
+        assert np.allclose(norm_frozen.cov, cov_should_be)
+
+    def test_pseudodet_pinv(self):
+        # Make sure that pseudo-inverse and pseudo-det agree on cutoff
+
+        # Assemble random covariance matrix with large and small eigenvalues
+        np.random.seed(1234)
+        n = 7
+        x = np.random.randn(n, n)
+        cov = np.dot(x, x.T)
+        s, u = scipy.linalg.eigh(cov)
+        s = np.full(n, 0.5)
+        s[0] = 1.0
+        s[-1] = 1e-7
+        cov = np.dot(u, np.dot(np.diag(s), u.T))
+
+        # Set cond so that the lowest eigenvalue is below the cutoff
+        cond = 1e-5
+        psd = _PSD(cov, cond=cond)
+        psd_pinv = _PSD(psd.pinv, cond=cond)
+
+        # Check that the log pseudo-determinant agrees with the sum
+        # of the logs of all but the smallest eigenvalue
+        assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
+        # Check that the pseudo-determinant of the pseudo-inverse
+        # agrees with 1 / pseudo-determinant
+        assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
+
+    def test_exception_nonsquare_cov(self):
+        cov = [[1, 2, 3], [4, 5, 6]]
+        assert_raises(ValueError, _PSD, cov)
+
+    def test_exception_nonfinite_cov(self):
+        cov_nan = [[1, 0], [0, np.nan]]
+        assert_raises(ValueError, _PSD, cov_nan)
+        cov_inf = [[1, 0], [0, np.inf]]
+        assert_raises(ValueError, _PSD, cov_inf)
+
+    def test_exception_non_psd_cov(self):
+        cov = [[1, 0], [0, -1]]
+        assert_raises(ValueError, _PSD, cov)
+
+    def test_exception_singular_cov(self):
+        np.random.seed(1234)
+        x = np.random.randn(5)
+        mean = np.random.randn(5)
+        cov = np.ones((5, 5))
+        e = np.linalg.LinAlgError
+        assert_raises(e, multivariate_normal, mean, cov)
+        assert_raises(e, multivariate_normal.pdf, x, mean, cov)
+        assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
+        assert_raises(e, multivariate_normal.cdf, x, mean, cov)
+        assert_raises(e, multivariate_normal.logcdf, x, mean, cov)
+
+        # Message used to be "singular matrix", but this is more accurate.
+        # See gh-15508
+        cov = [[1., 0.], [1., 1.]]
+        msg = "When `allow_singular is False`, the input matrix"
+        with pytest.raises(np.linalg.LinAlgError, match=msg):
+            multivariate_normal(cov=cov)
+
+    def test_R_values(self):
+        # Compare the multivariate pdf with some values precomputed
+        # in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
+
+        # The values below were generated by the following R-script:
+        # > library(mnormt)
+        # > x <- seq(0, 2, length=5)
+        # > y <- 3*x - 2
+        # > z <- x + cos(y)
+        # > mu <- c(1, 3, 2)
+        # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
+        # > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
+        r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
+                          0.0103803050, 0.0140250800])
+
+        x = np.linspace(0, 2, 5)
+        y = 3 * x - 2
+        z = x + np.cos(y)
+        r = np.array([x, y, z]).T
+
+        mean = np.array([1, 3, 2], 'd')
+        cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
+
+        pdf = multivariate_normal.pdf(r, mean, cov)
+        assert_allclose(pdf, r_pdf, atol=1e-10)
+
+        # Compare the multivariate cdf with some values precomputed
+        # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
+
+        # The values below were generated by the following R-script:
+        # > library(mnormt)
+        # > x <- seq(0, 2, length=5)
+        # > y <- 3*x - 2
+        # > z <- x + cos(y)
+        # > mu <- c(1, 3, 2)
+        # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
+        # > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma)
+        r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761,
+                          0.1063242573, 0.2501068509])
+
+        cdf = multivariate_normal.cdf(r, mean, cov)
+        assert_allclose(cdf, r_cdf, atol=2e-5)
+
+        # Also test bivariate cdf with some values precomputed
+        # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
+
+        # The values below were generated by the following R-script:
+        # > library(mnormt)
+        # > x <- seq(0, 2, length=5)
+        # > y <- 3*x - 2
+        # > mu <- c(1, 3)
+        # > Sigma <- matrix(c(1,2,2,5), 2, 2)
+        # > r_cdf2 <- pmnorm(cbind(x,y), mu, Sigma)
+        r_cdf2 = np.array([0.01262147, 0.05838989, 0.18389571,
+                           0.40696599, 0.66470577])
+
+        r2 = np.array([x, y]).T
+
+        mean2 = np.array([1, 3], 'd')
+        cov2 = np.array([[1, 2], [2, 5]], 'd')
+
+        cdf2 = multivariate_normal.cdf(r2, mean2, cov2)
+        assert_allclose(cdf2, r_cdf2, atol=1e-5)
+
+    def test_multivariate_normal_rvs_zero_covariance(self):
+        mean = np.zeros(2)
+        covariance = np.zeros((2, 2))
+        model = multivariate_normal(mean, covariance, allow_singular=True)
+        sample = model.rvs()
+        assert_equal(sample, [0, 0])
+
+    def test_rvs_shape(self):
+        # Check that rvs parses the mean and covariance correctly, and returns
+        # an array of the right shape
+        N = 300
+        d = 4
+        sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
+        assert_equal(sample.shape, (N, d))
+
+        sample = multivariate_normal.rvs(mean=None,
+                                         cov=np.array([[2, .1], [.1, 1]]),
+                                         size=N)
+        assert_equal(sample.shape, (N, 2))
+
+        u = multivariate_normal(mean=0, cov=1)
+        sample = u.rvs(N)
+        assert_equal(sample.shape, (N, ))
+
+    def test_large_sample(self):
+        # Generate large sample and compare sample mean and sample covariance
+        # with mean and covariance matrix.
+
+        np.random.seed(2846)
+
+        n = 3
+        mean = np.random.randn(n)
+        M = np.random.randn(n, n)
+        cov = np.dot(M, M.T)
+        size = 5000
+
+        sample = multivariate_normal.rvs(mean, cov, size)
+
+        assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
+        assert_allclose(sample.mean(0), mean, rtol=1e-1)
+
+    def test_entropy(self):
+        np.random.seed(2846)
+
+        n = 3
+        mean = np.random.randn(n)
+        M = np.random.randn(n, n)
+        cov = np.dot(M, M.T)
+
+        rv = multivariate_normal(mean, cov)
+
+        # Check that frozen distribution agrees with entropy function
+        assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
+        # Compare entropy with manually computed expression involving
+        # the sum of the logs of the eigenvalues of the covariance matrix
+        eigs = np.linalg.eig(cov)[0]
+        desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
+        assert_almost_equal(desired, rv.entropy())
+
+    def test_lnB(self):
+        alpha = np.array([1, 1, 1])
+        desired = .5  # e^lnB = 1/2 for [1, 1, 1]
+
+        assert_almost_equal(np.exp(_lnB(alpha)), desired)
+
+    def test_cdf_with_lower_limit_arrays(self):
+        # test CDF with lower limit in several dimensions
+        rng = np.random.default_rng(2408071309372769818)
+        mean = [0, 0]
+        cov = np.eye(2)
+        a = rng.random((4, 3, 2))*6 - 3
+        b = rng.random((4, 3, 2))*6 - 3
+
+        cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
+
+        cdf2a = multivariate_normal.cdf(b, mean, cov)
+        cdf2b = multivariate_normal.cdf(a, mean, cov)
+        ab1 = np.concatenate((a[..., 0:1], b[..., 1:2]), axis=-1)
+        ab2 = np.concatenate((a[..., 1:2], b[..., 0:1]), axis=-1)
+        cdf2ab1 = multivariate_normal.cdf(ab1, mean, cov)
+        cdf2ab2 = multivariate_normal.cdf(ab2, mean, cov)
+        cdf2 = cdf2a + cdf2b - cdf2ab1 - cdf2ab2
+
+        assert_allclose(cdf1, cdf2)
+
+    def test_cdf_with_lower_limit_consistency(self):
+        # check that multivariate normal CDF functions are consistent
+        rng = np.random.default_rng(2408071309372769818)
+        mean = rng.random(3)
+        cov = rng.random((3, 3))
+        cov = cov @ cov.T
+        a = rng.random((2, 3))*6 - 3
+        b = rng.random((2, 3))*6 - 3
+
+        cdf1 = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
+        cdf2 = multivariate_normal(mean, cov).cdf(b, lower_limit=a)
+        cdf3 = np.exp(multivariate_normal.logcdf(b, mean, cov, lower_limit=a))
+        cdf4 = np.exp(multivariate_normal(mean, cov).logcdf(b, lower_limit=a))
+
+        assert_allclose(cdf2, cdf1, rtol=1e-4)
+        assert_allclose(cdf3, cdf1, rtol=1e-4)
+        assert_allclose(cdf4, cdf1, rtol=1e-4)
+
+    def test_cdf_signs(self):
+        # check that sign of output is correct when np.any(lower > x)
+        mean = np.zeros(3)
+        cov = np.eye(3)
+        b = [[1, 1, 1], [0, 0, 0], [1, 0, 1], [0, 1, 0]]
+        a = [[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]
+        # when odd number of elements of b < a, output is negative
+        expected_signs = np.array([1, -1, -1, 1])
+        cdf = multivariate_normal.cdf(b, mean, cov, lower_limit=a)
+        assert_allclose(cdf, cdf[0]*expected_signs)
+
+    def test_mean_cov(self):
+        # test the interaction between a Covariance object and mean
+        P = np.diag(1 / np.array([1, 2, 3]))
+        cov_object = _covariance.CovViaPrecision(P)
+
+        message = "`cov` represents a covariance matrix in 3 dimensions..."
+        with pytest.raises(ValueError, match=message):
+            multivariate_normal.entropy([0, 0], cov_object)
+
+        with pytest.raises(ValueError, match=message):
+            multivariate_normal([0, 0], cov_object)
+
+        x = [0.5, 0.5, 0.5]
+        ref = multivariate_normal.pdf(x, [0, 0, 0], cov_object)
+        assert_equal(multivariate_normal.pdf(x, cov=cov_object), ref)
+
+        ref = multivariate_normal.pdf(x, [1, 1, 1], cov_object)
+        assert_equal(multivariate_normal.pdf(x, 1, cov=cov_object), ref)
+
+
+class TestMatrixNormal:
+
+    def test_bad_input(self):
+        # Check that bad inputs raise errors
+        num_rows = 4
+        num_cols = 3
+        M = np.full((num_rows,num_cols), 0.3)
+        U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
+        V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
+
+        # Incorrect dimensions
+        assert_raises(ValueError, matrix_normal, np.zeros((5,4,3)))
+        assert_raises(ValueError, matrix_normal, M, np.zeros(10), V)
+        assert_raises(ValueError, matrix_normal, M, U, np.zeros(10))
+        assert_raises(ValueError, matrix_normal, M, U, U)
+        assert_raises(ValueError, matrix_normal, M, V, V)
+        assert_raises(ValueError, matrix_normal, M.T, U, V)
+
+        e = np.linalg.LinAlgError
+        # Singular covariance for the rvs method of a non-frozen instance
+        assert_raises(e, matrix_normal.rvs,
+                      M, U, np.ones((num_cols, num_cols)))
+        assert_raises(e, matrix_normal.rvs,
+                      M, np.ones((num_rows, num_rows)), V)
+        # Singular covariance for a frozen instance
+        assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols)))
+        assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V)
+
+    def test_default_inputs(self):
+        # Check that default argument handling works
+        num_rows = 4
+        num_cols = 3
+        M = np.full((num_rows,num_cols), 0.3)
+        U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
+        V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
+        Z = np.zeros((num_rows, num_cols))
+        Zr = np.zeros((num_rows, 1))
+        Zc = np.zeros((1, num_cols))
+        Ir = np.identity(num_rows)
+        Ic = np.identity(num_cols)
+        I1 = np.identity(1)
+
+        assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
+                     (num_rows, num_cols))
+        assert_equal(matrix_normal.rvs(mean=M).shape,
+                     (num_rows, num_cols))
+        assert_equal(matrix_normal.rvs(rowcov=U).shape,
+                     (num_rows, 1))
+        assert_equal(matrix_normal.rvs(colcov=V).shape,
+                     (1, num_cols))
+        assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
+                     (num_rows, num_cols))
+        assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
+                     (num_rows, num_cols))
+        assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
+                     (num_rows, num_cols))
+
+        assert_equal(matrix_normal(mean=M).rowcov, Ir)
+        assert_equal(matrix_normal(mean=M).colcov, Ic)
+        assert_equal(matrix_normal(rowcov=U).mean, Zr)
+        assert_equal(matrix_normal(rowcov=U).colcov, I1)
+        assert_equal(matrix_normal(colcov=V).mean, Zc)
+        assert_equal(matrix_normal(colcov=V).rowcov, I1)
+        assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
+        assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
+        assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
+
+    def test_covariance_expansion(self):
+        # Check that covariance can be specified with scalar or vector
+        num_rows = 4
+        num_cols = 3
+        M = np.full((num_rows, num_cols), 0.3)
+        Uv = np.full(num_rows, 0.2)
+        Us = 0.2
+        Vv = np.full(num_cols, 0.1)
+        Vs = 0.1
+
+        Ir = np.identity(num_rows)
+        Ic = np.identity(num_cols)
+
+        assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov,
+                     0.2*Ir)
+        assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov,
+                     0.1*Ic)
+        assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov,
+                     0.2*Ir)
+        assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov,
+                     0.1*Ic)
+
+    def test_frozen_matrix_normal(self):
+        for i in range(1,5):
+            for j in range(1,5):
+                M = np.full((i,j), 0.3)
+                U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
+                V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
+
+                frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
+
+                rvs1 = frozen.rvs(random_state=1234)
+                rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
+                                         random_state=1234)
+                assert_equal(rvs1, rvs2)
+
+                X = frozen.rvs(random_state=1234)
+
+                pdf1 = frozen.pdf(X)
+                pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
+                assert_equal(pdf1, pdf2)
+
+                logpdf1 = frozen.logpdf(X)
+                logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
+                assert_equal(logpdf1, logpdf2)
+
+    def test_matches_multivariate(self):
+        # Check that the pdfs match those obtained by vectorising and
+        # treating as a multivariate normal.
+        for i in range(1,5):
+            for j in range(1,5):
+                M = np.full((i,j), 0.3)
+                U = 0.5 * np.identity(i) + np.full((i,i), 0.5)
+                V = 0.7 * np.identity(j) + np.full((j,j), 0.3)
+
+                frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
+                X = frozen.rvs(random_state=1234)
+                pdf1 = frozen.pdf(X)
+                logpdf1 = frozen.logpdf(X)
+
+                vecX = X.T.flatten()
+                vecM = M.T.flatten()
+                cov = np.kron(V,U)
+                pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov)
+                logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
+
+                assert_allclose(pdf1, pdf2, rtol=1E-10)
+                assert_allclose(logpdf1, logpdf2, rtol=1E-10)
+
+    def test_array_input(self):
+        # Check array of inputs has the same output as the separate entries.
+        num_rows = 4
+        num_cols = 3
+        M = np.full((num_rows,num_cols), 0.3)
+        U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
+        V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
+        N = 10
+
+        frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
+        X1 = frozen.rvs(size=N, random_state=1234)
+        X2 = frozen.rvs(size=N, random_state=4321)
+        X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0)
+        assert_equal(X.shape, (2, N, num_rows, num_cols))
+
+        array_logpdf = frozen.logpdf(X)
+        assert_equal(array_logpdf.shape, (2, N))
+        for i in range(2):
+            for j in range(N):
+                separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M,
+                                                       rowcov=U, colcov=V)
+                assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10)
+
+    def test_moments(self):
+        # Check that the sample moments match the parameters
+        num_rows = 4
+        num_cols = 3
+        M = np.full((num_rows,num_cols), 0.3)
+        U = 0.5 * np.identity(num_rows) + np.full((num_rows, num_rows), 0.5)
+        V = 0.7 * np.identity(num_cols) + np.full((num_cols, num_cols), 0.3)
+        N = 1000
+
+        frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
+        X = frozen.rvs(size=N, random_state=1234)
+
+        sample_mean = np.mean(X,axis=0)
+        assert_allclose(sample_mean, M, atol=0.1)
+
+        sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T)
+        assert_allclose(sample_colcov, V, atol=0.1)
+
+        sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape(
+                                                        N*num_cols,num_rows).T)
+        assert_allclose(sample_rowcov, U, atol=0.1)
+
+    def test_samples(self):
+        # Regression test to ensure that we always generate the same stream of
+        # random variates.
+        actual = matrix_normal.rvs(
+            mean=np.array([[1, 2], [3, 4]]),
+            rowcov=np.array([[4, -1], [-1, 2]]),
+            colcov=np.array([[5, 1], [1, 10]]),
+            random_state=np.random.default_rng(0),
+            size=2
+        )
+        expected = np.array(
+            [[[1.56228264238181, -1.24136424071189],
+              [2.46865788392114, 6.22964440489445]],
+             [[3.86405716144353, 10.73714311429529],
+              [2.59428444080606, 5.79987854490876]]]
+        )
+        assert_allclose(actual, expected)
+
+
+class TestDirichlet:
+
+    def test_frozen_dirichlet(self):
+        np.random.seed(2846)
+
+        n = np.random.randint(1, 32)
+        alpha = np.random.uniform(10e-10, 100, n)
+
+        d = dirichlet(alpha)
+
+        assert_equal(d.var(), dirichlet.var(alpha))
+        assert_equal(d.mean(), dirichlet.mean(alpha))
+        assert_equal(d.entropy(), dirichlet.entropy(alpha))
+        num_tests = 10
+        for i in range(num_tests):
+            x = np.random.uniform(10e-10, 100, n)
+            x /= np.sum(x)
+            assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
+            assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
+
+    def test_numpy_rvs_shape_compatibility(self):
+        np.random.seed(2846)
+        alpha = np.array([1.0, 2.0, 3.0])
+        x = np.random.dirichlet(alpha, size=7)
+        assert_equal(x.shape, (7, 3))
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+        dirichlet.pdf(x.T, alpha)
+        dirichlet.pdf(x.T[:-1], alpha)
+        dirichlet.logpdf(x.T, alpha)
+        dirichlet.logpdf(x.T[:-1], alpha)
+
+    def test_alpha_with_zeros(self):
+        np.random.seed(2846)
+        alpha = [1.0, 0.0, 3.0]
+        # don't pass invalid alpha to np.random.dirichlet
+        x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_alpha_with_negative_entries(self):
+        np.random.seed(2846)
+        alpha = [1.0, -2.0, 3.0]
+        # don't pass invalid alpha to np.random.dirichlet
+        x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_data_with_zeros(self):
+        alpha = np.array([1.0, 2.0, 3.0, 4.0])
+        x = np.array([0.1, 0.0, 0.2, 0.7])
+        dirichlet.pdf(x, alpha)
+        dirichlet.logpdf(x, alpha)
+        alpha = np.array([1.0, 1.0, 1.0, 1.0])
+        assert_almost_equal(dirichlet.pdf(x, alpha), 6)
+        assert_almost_equal(dirichlet.logpdf(x, alpha), np.log(6))
+
+    def test_data_with_zeros_and_small_alpha(self):
+        alpha = np.array([1.0, 0.5, 3.0, 4.0])
+        x = np.array([0.1, 0.0, 0.2, 0.7])
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_data_with_negative_entries(self):
+        alpha = np.array([1.0, 2.0, 3.0, 4.0])
+        x = np.array([0.1, -0.1, 0.3, 0.7])
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_data_with_too_large_entries(self):
+        alpha = np.array([1.0, 2.0, 3.0, 4.0])
+        x = np.array([0.1, 1.1, 0.3, 0.7])
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_data_too_deep_c(self):
+        alpha = np.array([1.0, 2.0, 3.0])
+        x = np.full((2, 7, 7), 1 / 14)
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_alpha_too_deep(self):
+        alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
+        x = np.full((2, 2, 7), 1 / 4)
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_alpha_correct_depth(self):
+        alpha = np.array([1.0, 2.0, 3.0])
+        x = np.full((3, 7), 1 / 3)
+        dirichlet.pdf(x, alpha)
+        dirichlet.logpdf(x, alpha)
+
+    def test_non_simplex_data(self):
+        alpha = np.array([1.0, 2.0, 3.0])
+        x = np.full((3, 7), 1 / 2)
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_data_vector_too_short(self):
+        alpha = np.array([1.0, 2.0, 3.0, 4.0])
+        x = np.full((2, 7), 1 / 2)
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_data_vector_too_long(self):
+        alpha = np.array([1.0, 2.0, 3.0, 4.0])
+        x = np.full((5, 7), 1 / 5)
+        assert_raises(ValueError, dirichlet.pdf, x, alpha)
+        assert_raises(ValueError, dirichlet.logpdf, x, alpha)
+
+    def test_mean_and_var(self):
+        alpha = np.array([1., 0.8, 0.2])
+        d = dirichlet(alpha)
+
+        expected_var = [1. / 12., 0.08, 0.03]
+        expected_mean = [0.5, 0.4, 0.1]
+
+        assert_array_almost_equal(d.var(), expected_var)
+        assert_array_almost_equal(d.mean(), expected_mean)
+
+    def test_scalar_values(self):
+        alpha = np.array([0.2])
+        d = dirichlet(alpha)
+
+        # For alpha of length 1, mean and var should be scalar instead of array
+        assert_equal(d.mean().ndim, 0)
+        assert_equal(d.var().ndim, 0)
+
+        assert_equal(d.pdf([1.]).ndim, 0)
+        assert_equal(d.logpdf([1.]).ndim, 0)
+
+    def test_K_and_K_minus_1_calls_equal(self):
+        # Test that calls with K and K-1 entries yield the same results.
+
+        np.random.seed(2846)
+
+        n = np.random.randint(1, 32)
+        alpha = np.random.uniform(10e-10, 100, n)
+
+        d = dirichlet(alpha)
+        num_tests = 10
+        for i in range(num_tests):
+            x = np.random.uniform(10e-10, 100, n)
+            x /= np.sum(x)
+            assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
+
+    def test_multiple_entry_calls(self):
+        # Test that calls with multiple x vectors as matrix work
+        np.random.seed(2846)
+
+        n = np.random.randint(1, 32)
+        alpha = np.random.uniform(10e-10, 100, n)
+        d = dirichlet(alpha)
+
+        num_tests = 10
+        num_multiple = 5
+        xm = None
+        for i in range(num_tests):
+            for m in range(num_multiple):
+                x = np.random.uniform(10e-10, 100, n)
+                x /= np.sum(x)
+                if xm is not None:
+                    xm = np.vstack((xm, x))
+                else:
+                    xm = x
+            rm = d.pdf(xm.T)
+            rs = None
+            for xs in xm:
+                r = d.pdf(xs)
+                if rs is not None:
+                    rs = np.append(rs, r)
+                else:
+                    rs = r
+            assert_array_almost_equal(rm, rs)
+
+    def test_2D_dirichlet_is_beta(self):
+        np.random.seed(2846)
+
+        alpha = np.random.uniform(10e-10, 100, 2)
+        d = dirichlet(alpha)
+        b = beta(alpha[0], alpha[1])
+
+        num_tests = 10
+        for i in range(num_tests):
+            x = np.random.uniform(10e-10, 100, 2)
+            x /= np.sum(x)
+            assert_almost_equal(b.pdf(x), d.pdf([x]))
+
+        assert_almost_equal(b.mean(), d.mean()[0])
+        assert_almost_equal(b.var(), d.var()[0])
+
+
+def test_multivariate_normal_dimensions_mismatch():
+    # Regression test for GH #3493. Check that setting up a PDF with a mean of
+    # length M and a covariance matrix of size (N, N), where M != N, raises a
+    # ValueError with an informative error message.
+    mu = np.array([0.0, 0.0])
+    sigma = np.array([[1.0]])
+
+    assert_raises(ValueError, multivariate_normal, mu, sigma)
+
+    # A simple check that the right error message was passed along. Checking
+    # that the entire message is there, word for word, would be somewhat
+    # fragile, so we just check for the leading part.
+    try:
+        multivariate_normal(mu, sigma)
+    except ValueError as e:
+        msg = "Dimension mismatch"
+        assert_equal(str(e)[:len(msg)], msg)
+
+
+class TestWishart:
+    def test_scale_dimensions(self):
+        # Test that we can call the Wishart with various scale dimensions
+
+        # Test case: dim=1, scale=1
+        true_scale = np.array(1, ndmin=2)
+        scales = [
+            1,                    # scalar
+            [1],                  # iterable
+            np.array(1),          # 0-dim
+            np.r_[1],             # 1-dim
+            np.array(1, ndmin=2)  # 2-dim
+        ]
+        for scale in scales:
+            w = wishart(1, scale)
+            assert_equal(w.scale, true_scale)
+            assert_equal(w.scale.shape, true_scale.shape)
+
+        # Test case: dim=2, scale=[[1,0]
+        #                          [0,2]
+        true_scale = np.array([[1,0],
+                               [0,2]])
+        scales = [
+            [1,2],             # iterable
+            np.r_[1,2],        # 1-dim
+            np.array([[1,0],   # 2-dim
+                      [0,2]])
+        ]
+        for scale in scales:
+            w = wishart(2, scale)
+            assert_equal(w.scale, true_scale)
+            assert_equal(w.scale.shape, true_scale.shape)
+
+        # We cannot call with a df < dim - 1
+        assert_raises(ValueError, wishart, 1, np.eye(2))
+
+        # But we can call with dim - 1 < df < dim
+        wishart(1.1, np.eye(2))  # no error
+        # see gh-5562
+
+        # We cannot call with a 3-dimension array
+        scale = np.array(1, ndmin=3)
+        assert_raises(ValueError, wishart, 1, scale)
+
+    def test_quantile_dimensions(self):
+        # Test that we can call the Wishart rvs with various quantile dimensions
+
+        # If dim == 1, consider x.shape = [1,1,1]
+        X = [
+            1,                      # scalar
+            [1],                    # iterable
+            np.array(1),            # 0-dim
+            np.r_[1],               # 1-dim
+            np.array(1, ndmin=2),   # 2-dim
+            np.array([1], ndmin=3)  # 3-dim
+        ]
+
+        w = wishart(1,1)
+        density = w.pdf(np.array(1, ndmin=3))
+        for x in X:
+            assert_equal(w.pdf(x), density)
+
+        # If dim == 1, consider x.shape = [1,1,*]
+        X = [
+            [1,2,3],                     # iterable
+            np.r_[1,2,3],                # 1-dim
+            np.array([1,2,3], ndmin=3)   # 3-dim
+        ]
+
+        w = wishart(1,1)
+        density = w.pdf(np.array([1,2,3], ndmin=3))
+        for x in X:
+            assert_equal(w.pdf(x), density)
+
+        # If dim == 2, consider x.shape = [2,2,1]
+        # where x[:,:,*] = np.eye(1)*2
+        X = [
+            2,                    # scalar
+            [2,2],                # iterable
+            np.array(2),          # 0-dim
+            np.r_[2,2],           # 1-dim
+            np.array([[2,0],
+                      [0,2]]),    # 2-dim
+            np.array([[2,0],
+                      [0,2]])[:,:,np.newaxis]  # 3-dim
+        ]
+
+        w = wishart(2,np.eye(2))
+        density = w.pdf(np.array([[2,0],
+                                  [0,2]])[:,:,np.newaxis])
+        for x in X:
+            assert_equal(w.pdf(x), density)
+
+    def test_frozen(self):
+        # Test that the frozen and non-frozen Wishart gives the same answers
+
+        # Construct an arbitrary positive definite scale matrix
+        dim = 4
+        scale = np.diag(np.arange(dim)+1)
+        scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
+        scale = np.dot(scale.T, scale)
+
+        # Construct a collection of positive definite matrices to test the PDF
+        X = []
+        for i in range(5):
+            x = np.diag(np.arange(dim)+(i+1)**2)
+            x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
+            x = np.dot(x.T, x)
+            X.append(x)
+        X = np.array(X).T
+
+        # Construct a 1D and 2D set of parameters
+        parameters = [
+            (10, 1, np.linspace(0.1, 10, 5)),  # 1D case
+            (10, scale, X)
+        ]
+
+        for (df, scale, x) in parameters:
+            w = wishart(df, scale)
+            assert_equal(w.var(), wishart.var(df, scale))
+            assert_equal(w.mean(), wishart.mean(df, scale))
+            assert_equal(w.mode(), wishart.mode(df, scale))
+            assert_equal(w.entropy(), wishart.entropy(df, scale))
+            assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
+
+    def test_1D_is_chisquared(self):
+        # The 1-dimensional Wishart with an identity scale matrix is just a
+        # chi-squared distribution.
+        # Test variance, mean, entropy, pdf
+        # Kolgomorov-Smirnov test for rvs
+        np.random.seed(482974)
+
+        sn = 500
+        dim = 1
+        scale = np.eye(dim)
+
+        df_range = np.arange(1, 10, 2, dtype=float)
+        X = np.linspace(0.1,10,num=10)
+        for df in df_range:
+            w = wishart(df, scale)
+            c = chi2(df)
+
+            # Statistics
+            assert_allclose(w.var(), c.var())
+            assert_allclose(w.mean(), c.mean())
+            assert_allclose(w.entropy(), c.entropy())
+
+            # PDF
+            assert_allclose(w.pdf(X), c.pdf(X))
+
+            # rvs
+            rvs = w.rvs(size=sn)
+            args = (df,)
+            alpha = 0.01
+            check_distribution_rvs('chi2', args, alpha, rvs)
+
+    def test_is_scaled_chisquared(self):
+        # The 2-dimensional Wishart with an arbitrary scale matrix can be
+        # transformed to a scaled chi-squared distribution.
+        # For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
+        # :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
+        np.random.seed(482974)
+
+        sn = 500
+        df = 10
+        dim = 4
+        # Construct an arbitrary positive definite matrix
+        scale = np.diag(np.arange(4)+1)
+        scale[np.tril_indices(4, k=-1)] = np.arange(6)
+        scale = np.dot(scale.T, scale)
+        # Use :math:`\lambda = [1, \dots, 1]'`
+        lamda = np.ones((dim,1))
+        sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
+        w = wishart(df, sigma_lamda)
+        c = chi2(df, scale=sigma_lamda)
+
+        # Statistics
+        assert_allclose(w.var(), c.var())
+        assert_allclose(w.mean(), c.mean())
+        assert_allclose(w.entropy(), c.entropy())
+
+        # PDF
+        X = np.linspace(0.1,10,num=10)
+        assert_allclose(w.pdf(X), c.pdf(X))
+
+        # rvs
+        rvs = w.rvs(size=sn)
+        args = (df,0,sigma_lamda)
+        alpha = 0.01
+        check_distribution_rvs('chi2', args, alpha, rvs)
+
+class TestMultinomial:
+    def test_logpmf(self):
+        vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7))
+        assert_allclose(vals1, -1.483270127243324, rtol=1e-8)
+
+        vals2 = multinomial.logpmf([3, 4], 0, [.3, .7])
+        assert_allclose(vals2, np.NAN, rtol=1e-8)
+
+        vals3 = multinomial.logpmf([3, 4], 0, [-2, 3])
+        assert_allclose(vals3, np.NAN, rtol=1e-8)
+
+    def test_reduces_binomial(self):
+        # test that the multinomial pmf reduces to the binomial pmf in the 2d
+        # case
+        val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7))
+        val2 = binom.logpmf(3, 7, 0.3)
+        assert_allclose(val1, val2, rtol=1e-8)
+
+        val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9))
+        val2 = binom.pmf(6, 14, 0.1)
+        assert_allclose(val1, val2, rtol=1e-8)
+
+    def test_R(self):
+        # test against the values produced by this R code
+        # (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html)
+        # X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
+        # X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
+        # X
+        # apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5)))
+
+        n, p = 3, [1./8, 2./8, 5./8]
+        r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375,
+                  (2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125,
+                  (0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500,
+                  (2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500,
+                  (1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000}
+        for x in r_vals:
+            assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14)
+
+    def test_rvs_np(self):
+        # test that .rvs agrees w/numpy
+        sc_rvs = multinomial.rvs(3, [1/4.]*3, size=7, random_state=123)
+        rndm = np.random.RandomState(123)
+        np_rvs = rndm.multinomial(3, [1/4.]*3, size=7)
+        assert_equal(sc_rvs, np_rvs)
+
+    def test_pmf(self):
+        vals0 = multinomial.pmf((5,), 5, (1,))
+        assert_allclose(vals0, 1, rtol=1e-8)
+
+        vals1 = multinomial.pmf((3,4), 7, (.3, .7))
+        assert_allclose(vals1, .22689449999999994, rtol=1e-8)
+
+        vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8,
+                (.1, .9))
+        assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8)
+
+        x = np.empty((0,2), dtype=np.float64)
+        vals3 = multinomial.pmf(x, 4, (.3, .7))
+        assert_equal(vals3, np.empty([], dtype=np.float64))
+
+        vals4 = multinomial.pmf([1,2], 4, (.3, .7))
+        assert_allclose(vals4, 0, rtol=1e-8)
+
+        vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0])
+        assert_allclose(vals5, 0.219478737997, rtol=1e-8)
+
+    def test_pmf_broadcasting(self):
+        vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]])
+        assert_allclose(vals0, [.243, .384], rtol=1e-8)
+
+        vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9])
+        assert_allclose(vals1, [.243, 0], rtol=1e-8)
+
+        vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9])
+        assert_allclose(vals2, [[.243, 0]], rtol=1e-8)
+
+        vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9])
+        assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8)
+
+        vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9])
+        assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8)
+
+    def test_cov(self):
+        cov1 = multinomial.cov(5, (.2, .3, .5))
+        cov2 = [[5*.2*.8, -5*.2*.3, -5*.2*.5],
+                [-5*.3*.2, 5*.3*.7, -5*.3*.5],
+                [-5*.5*.2, -5*.5*.3, 5*.5*.5]]
+        assert_allclose(cov1, cov2, rtol=1e-8)
+
+    def test_cov_broadcasting(self):
+        cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]])
+        cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]]
+        assert_allclose(cov1, cov2, rtol=1e-8)
+
+        cov3 = multinomial.cov([4, 5], [.1, .9])
+        cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]]
+        assert_allclose(cov3, cov4, rtol=1e-8)
+
+        cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
+        cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]],
+                [[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]]
+        assert_allclose(cov5, cov6, rtol=1e-8)
+
+    def test_entropy(self):
+        # this is equivalent to a binomial distribution with n=2, so the
+        # entropy .77899774929 is easily computed "by hand"
+        ent0 = multinomial.entropy(2, [.2, .8])
+        assert_allclose(ent0, binom.entropy(2, .2), rtol=1e-8)
+
+    def test_entropy_broadcasting(self):
+        ent0 = multinomial.entropy([2, 3], [.2, .3])
+        assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)],
+                rtol=1e-8)
+
+        ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]])
+        assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)],
+                rtol=1e-8)
+
+        ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]])
+        assert_allclose(ent2,
+                [[binom.entropy(7, .3), binom.entropy(7, .4)],
+                 [binom.entropy(8, .3), binom.entropy(8, .4)]],
+                rtol=1e-8)
+
+    def test_mean(self):
+        mean1 = multinomial.mean(5, [.2, .8])
+        assert_allclose(mean1, [5*.2, 5*.8], rtol=1e-8)
+
+    def test_mean_broadcasting(self):
+        mean1 = multinomial.mean([5, 6], [.2, .8])
+        assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8)
+
+    def test_frozen(self):
+        # The frozen distribution should agree with the regular one
+        np.random.seed(1234)
+        n = 12
+        pvals = (.1, .2, .3, .4)
+        x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]]
+        x = np.asarray(x, dtype=np.float64)
+        mn_frozen = multinomial(n, pvals)
+        assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals))
+        assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals))
+        assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals))
+
+    def test_gh_11860(self):
+        # gh-11860 reported cases in which the adjustments made by multinomial
+        # to the last element of `p` can cause `nan`s even when the input is
+        # essentially valid. Check that a pathological case returns a finite,
+        # nonzero result. (This would fail in main before the PR.)
+        n = 88
+        rng = np.random.default_rng(8879715917488330089)
+        p = rng.random(n)
+        p[-1] = 1e-30
+        p /= np.sum(p)
+        x = np.ones(n)
+        logpmf = multinomial.logpmf(x, n, p)
+        assert np.isfinite(logpmf)
+
+class TestInvwishart:
+    def test_frozen(self):
+        # Test that the frozen and non-frozen inverse Wishart gives the same
+        # answers
+
+        # Construct an arbitrary positive definite scale matrix
+        dim = 4
+        scale = np.diag(np.arange(dim)+1)
+        scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
+        scale = np.dot(scale.T, scale)
+
+        # Construct a collection of positive definite matrices to test the PDF
+        X = []
+        for i in range(5):
+            x = np.diag(np.arange(dim)+(i+1)**2)
+            x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
+            x = np.dot(x.T, x)
+            X.append(x)
+        X = np.array(X).T
+
+        # Construct a 1D and 2D set of parameters
+        parameters = [
+            (10, 1, np.linspace(0.1, 10, 5)),  # 1D case
+            (10, scale, X)
+        ]
+
+        for (df, scale, x) in parameters:
+            iw = invwishart(df, scale)
+            assert_equal(iw.var(), invwishart.var(df, scale))
+            assert_equal(iw.mean(), invwishart.mean(df, scale))
+            assert_equal(iw.mode(), invwishart.mode(df, scale))
+            assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
+
+    def test_1D_is_invgamma(self):
+        # The 1-dimensional inverse Wishart with an identity scale matrix is
+        # just an inverse gamma distribution.
+        # Test variance, mean, pdf
+        # Kolgomorov-Smirnov test for rvs
+        np.random.seed(482974)
+
+        sn = 500
+        dim = 1
+        scale = np.eye(dim)
+
+        df_range = np.arange(5, 20, 2, dtype=float)
+        X = np.linspace(0.1,10,num=10)
+        for df in df_range:
+            iw = invwishart(df, scale)
+            ig = invgamma(df/2, scale=1./2)
+
+            # Statistics
+            assert_allclose(iw.var(), ig.var())
+            assert_allclose(iw.mean(), ig.mean())
+
+            # PDF
+            assert_allclose(iw.pdf(X), ig.pdf(X))
+
+            # rvs
+            rvs = iw.rvs(size=sn)
+            args = (df/2, 0, 1./2)
+            alpha = 0.01
+            check_distribution_rvs('invgamma', args, alpha, rvs)
+
+    def test_wishart_invwishart_2D_rvs(self):
+        dim = 3
+        df = 10
+
+        # Construct a simple non-diagonal positive definite matrix
+        scale = np.eye(dim)
+        scale[0,1] = 0.5
+        scale[1,0] = 0.5
+
+        # Construct frozen Wishart and inverse Wishart random variables
+        w = wishart(df, scale)
+        iw = invwishart(df, scale)
+
+        # Get the generated random variables from a known seed
+        np.random.seed(248042)
+        w_rvs = wishart.rvs(df, scale)
+        np.random.seed(248042)
+        frozen_w_rvs = w.rvs()
+        np.random.seed(248042)
+        iw_rvs = invwishart.rvs(df, scale)
+        np.random.seed(248042)
+        frozen_iw_rvs = iw.rvs()
+
+        # Manually calculate what it should be, based on the Bartlett (1933)
+        # decomposition of a Wishart into D A A' D', where D is the Cholesky
+        # factorization of the scale matrix and A is the lower triangular matrix
+        # with the square root of chi^2 variates on the diagonal and N(0,1)
+        # variates in the lower triangle.
+        np.random.seed(248042)
+        covariances = np.random.normal(size=3)
+        variances = np.r_[
+            np.random.chisquare(df),
+            np.random.chisquare(df-1),
+            np.random.chisquare(df-2),
+        ]**0.5
+
+        # Construct the lower-triangular A matrix
+        A = np.diag(variances)
+        A[np.tril_indices(dim, k=-1)] = covariances
+
+        # Wishart random variate
+        D = np.linalg.cholesky(scale)
+        DA = D.dot(A)
+        manual_w_rvs = np.dot(DA, DA.T)
+
+        # inverse Wishart random variate
+        # Supposing that the inverse wishart has scale matrix `scale`, then the
+        # random variate is the inverse of a random variate drawn from a Wishart
+        # distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
+        iD = np.linalg.cholesky(np.linalg.inv(scale))
+        iDA = iD.dot(A)
+        manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
+
+        # Test for equality
+        assert_allclose(w_rvs, manual_w_rvs)
+        assert_allclose(frozen_w_rvs, manual_w_rvs)
+        assert_allclose(iw_rvs, manual_iw_rvs)
+        assert_allclose(frozen_iw_rvs, manual_iw_rvs)
+
+    def test_cho_inv_batch(self):
+        """Regression test for gh-8844."""
+        a0 = np.array([[2, 1, 0, 0.5],
+                       [1, 2, 0.5, 0.5],
+                       [0, 0.5, 3, 1],
+                       [0.5, 0.5, 1, 2]])
+        a1 = np.array([[2, -1, 0, 0.5],
+                       [-1, 2, 0.5, 0.5],
+                       [0, 0.5, 3, 1],
+                       [0.5, 0.5, 1, 4]])
+        a = np.array([a0, a1])
+        ainv = a.copy()
+        _cho_inv_batch(ainv)
+        ident = np.eye(4)
+        assert_allclose(a[0].dot(ainv[0]), ident, atol=1e-15)
+        assert_allclose(a[1].dot(ainv[1]), ident, atol=1e-15)
+
+    def test_logpdf_4x4(self):
+        """Regression test for gh-8844."""
+        X = np.array([[2, 1, 0, 0.5],
+                      [1, 2, 0.5, 0.5],
+                      [0, 0.5, 3, 1],
+                      [0.5, 0.5, 1, 2]])
+        Psi = np.array([[9, 7, 3, 1],
+                        [7, 9, 5, 1],
+                        [3, 5, 8, 2],
+                        [1, 1, 2, 9]])
+        nu = 6
+        prob = invwishart.logpdf(X, nu, Psi)
+        # Explicit calculation from the formula on wikipedia.
+        p = X.shape[0]
+        sig, logdetX = np.linalg.slogdet(X)
+        sig, logdetPsi = np.linalg.slogdet(Psi)
+        M = np.linalg.solve(X, Psi)
+        expected = ((nu/2)*logdetPsi
+                    - (nu*p/2)*np.log(2)
+                    - multigammaln(nu/2, p)
+                    - (nu + p + 1)/2*logdetX
+                    - 0.5*M.trace())
+        assert_allclose(prob, expected)
+
+
+class TestSpecialOrthoGroup:
+    def test_reproducibility(self):
+        np.random.seed(514)
+        x = special_ortho_group.rvs(3)
+        expected = np.array([[-0.99394515, -0.04527879, 0.10011432],
+                             [0.04821555, -0.99846897, 0.02711042],
+                             [0.09873351, 0.03177334, 0.99460653]])
+        assert_array_almost_equal(x, expected)
+
+        random_state = np.random.RandomState(seed=514)
+        x = special_ortho_group.rvs(3, random_state=random_state)
+        assert_array_almost_equal(x, expected)
+
+    def test_invalid_dim(self):
+        assert_raises(ValueError, special_ortho_group.rvs, None)
+        assert_raises(ValueError, special_ortho_group.rvs, (2, 2))
+        assert_raises(ValueError, special_ortho_group.rvs, 1)
+        assert_raises(ValueError, special_ortho_group.rvs, 2.5)
+
+    def test_frozen_matrix(self):
+        dim = 7
+        frozen = special_ortho_group(dim)
+
+        rvs1 = frozen.rvs(random_state=1234)
+        rvs2 = special_ortho_group.rvs(dim, random_state=1234)
+
+        assert_equal(rvs1, rvs2)
+
+    def test_det_and_ortho(self):
+        xs = [special_ortho_group.rvs(dim)
+              for dim in range(2,12)
+              for i in range(3)]
+
+        # Test that determinants are always +1
+        dets = [np.linalg.det(x) for x in xs]
+        assert_allclose(dets, [1.]*30, rtol=1e-13)
+
+        # Test that these are orthogonal matrices
+        for x in xs:
+            assert_array_almost_equal(np.dot(x, x.T),
+                                      np.eye(x.shape[0]))
+
+    def test_haar(self):
+        # Test that the distribution is constant under rotation
+        # Every column should have the same distribution
+        # Additionally, the distribution should be invariant under another rotation
+
+        # Generate samples
+        dim = 5
+        samples = 1000  # Not too many, or the test takes too long
+        ks_prob = .05
+        np.random.seed(514)
+        xs = special_ortho_group.rvs(dim, size=samples)
+
+        # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
+        #   effectively picking off entries in the matrices of xs.
+        #   These projections should all have the same disribution,
+        #     establishing rotational invariance. We use the two-sided
+        #     KS test to confirm this.
+        #   We could instead test that angles between random vectors
+        #     are uniformly distributed, but the below is sufficient.
+        #   It is not feasible to consider all pairs, so pick a few.
+        els = ((0,0), (0,2), (1,4), (2,3))
+        #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
+        proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
+        pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
+        ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
+        assert_array_less([ks_prob]*len(pairs), ks_tests)
+
+class TestOrthoGroup:
+    def test_reproducibility(self):
+        seed = 514
+        np.random.seed(seed)
+        x = ortho_group.rvs(3)
+        x2 = ortho_group.rvs(3, random_state=seed)
+        # Note this matrix has det -1, distinguishing O(N) from SO(N)
+        assert_almost_equal(np.linalg.det(x), -1)
+        expected = np.array([[0.381686, -0.090374, 0.919863],
+                             [0.905794, -0.161537, -0.391718],
+                             [-0.183993, -0.98272, -0.020204]])
+        assert_array_almost_equal(x, expected)
+        assert_array_almost_equal(x2, expected)
+
+    def test_invalid_dim(self):
+        assert_raises(ValueError, ortho_group.rvs, None)
+        assert_raises(ValueError, ortho_group.rvs, (2, 2))
+        assert_raises(ValueError, ortho_group.rvs, 1)
+        assert_raises(ValueError, ortho_group.rvs, 2.5)
+
+    def test_frozen_matrix(self):
+        dim = 7
+        frozen = ortho_group(dim)
+        frozen_seed = ortho_group(dim, seed=1234)
+
+        rvs1 = frozen.rvs(random_state=1234)
+        rvs2 = ortho_group.rvs(dim, random_state=1234)
+        rvs3 = frozen_seed.rvs(size=1)
+
+        assert_equal(rvs1, rvs2)
+        assert_equal(rvs1, rvs3)
+
+    def test_det_and_ortho(self):
+        xs = [[ortho_group.rvs(dim)
+               for i in range(10)]
+              for dim in range(2,12)]
+
+        # Test that abs determinants are always +1
+        dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs])
+        assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13)
+
+        # Test that we get both positive and negative determinants
+        # Check that we have at least one and less than 10 negative dets in a sample of 10. The rest are positive by the previous test.
+        # Test each dimension separately
+        assert_array_less([0]*10, [np.nonzero(d < 0)[0].shape[0] for d in dets])
+        assert_array_less([np.nonzero(d < 0)[0].shape[0] for d in dets], [10]*10)
+
+        # Test that these are orthogonal matrices
+        for xx in xs:
+            for x in xx:
+                assert_array_almost_equal(np.dot(x, x.T),
+                                          np.eye(x.shape[0]))
+
+    def test_haar(self):
+        # Test that the distribution is constant under rotation
+        # Every column should have the same distribution
+        # Additionally, the distribution should be invariant under another rotation
+
+        # Generate samples
+        dim = 5
+        samples = 1000  # Not too many, or the test takes too long
+        ks_prob = .05
+        np.random.seed(518)  # Note that the test is sensitive to seed too
+        xs = ortho_group.rvs(dim, size=samples)
+
+        # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
+        #   effectively picking off entries in the matrices of xs.
+        #   These projections should all have the same disribution,
+        #     establishing rotational invariance. We use the two-sided
+        #     KS test to confirm this.
+        #   We could instead test that angles between random vectors
+        #     are uniformly distributed, but the below is sufficient.
+        #   It is not feasible to consider all pairs, so pick a few.
+        els = ((0,0), (0,2), (1,4), (2,3))
+        #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
+        proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
+        pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
+        ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
+        assert_array_less([ks_prob]*len(pairs), ks_tests)
+
+    @pytest.mark.slow
+    def test_pairwise_distances(self):
+        # Test that the distribution of pairwise distances is close to correct.
+        np.random.seed(514)
+
+        def random_ortho(dim):
+            u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim)))
+            return np.dot(u, v)
+
+        for dim in range(2, 6):
+            def generate_test_statistics(rvs, N=1000, eps=1e-10):
+                stats = np.array([
+                    np.sum((rvs(dim=dim) - rvs(dim=dim))**2)
+                    for _ in range(N)
+                ])
+                # Add a bit of noise to account for numeric accuracy.
+                stats += np.random.uniform(-eps, eps, size=stats.shape)
+                return stats
+
+            expected = generate_test_statistics(random_ortho)
+            actual = generate_test_statistics(scipy.stats.ortho_group.rvs)
+
+            _D, p = scipy.stats.ks_2samp(expected, actual)
+
+            assert_array_less(.05, p)
+
+class TestRandomCorrelation:
+    def test_reproducibility(self):
+        np.random.seed(514)
+        eigs = (.5, .8, 1.2, 1.5)
+        x = random_correlation.rvs(eigs)
+        x2 = random_correlation.rvs(eigs, random_state=514)
+        expected = np.array([[1., -0.184851, 0.109017, -0.227494],
+                             [-0.184851, 1., 0.231236, 0.326669],
+                             [0.109017, 0.231236, 1., -0.178912],
+                             [-0.227494, 0.326669, -0.178912, 1.]])
+        assert_array_almost_equal(x, expected)
+        assert_array_almost_equal(x2, expected)
+
+    def test_invalid_eigs(self):
+        assert_raises(ValueError, random_correlation.rvs, None)
+        assert_raises(ValueError, random_correlation.rvs, 'test')
+        assert_raises(ValueError, random_correlation.rvs, 2.5)
+        assert_raises(ValueError, random_correlation.rvs, [2.5])
+        assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]])
+        assert_raises(ValueError, random_correlation.rvs, [2.5, -.5])
+        assert_raises(ValueError, random_correlation.rvs, [1, 2, .1])
+
+    def test_frozen_matrix(self):
+        eigs = (.5, .8, 1.2, 1.5)
+        frozen = random_correlation(eigs)
+        frozen_seed = random_correlation(eigs, seed=514)
+
+        rvs1 = random_correlation.rvs(eigs, random_state=514)
+        rvs2 = frozen.rvs(random_state=514)
+        rvs3 = frozen_seed.rvs()
+
+        assert_equal(rvs1, rvs2)
+        assert_equal(rvs1, rvs3)
+
+    def test_definition(self):
+        # Test the definition of a correlation matrix in several dimensions:
+        #
+        # 1. Det is product of eigenvalues (and positive by construction
+        #    in examples)
+        # 2. 1's on diagonal
+        # 3. Matrix is symmetric
+
+        def norm(i, e):
+            return i*e/sum(e)
+
+        np.random.seed(123)
+
+        eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)]
+        eigs.append([4,0,0,0])
+
+        ones = [[1.]*len(e) for e in eigs]
+        xs = [random_correlation.rvs(e) for e in eigs]
+
+        # Test that determinants are products of eigenvalues
+        #   These are positive by construction
+        # Could also test that the eigenvalues themselves are correct,
+        #   but this seems sufficient.
+        dets = [np.fabs(np.linalg.det(x)) for x in xs]
+        dets_known = [np.prod(e) for e in eigs]
+        assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13)
+
+        # Test for 1's on the diagonal
+        diags = [np.diag(x) for x in xs]
+        for a, b in zip(diags, ones):
+            assert_allclose(a, b, rtol=1e-13)
+
+        # Correlation matrices are symmetric
+        for x in xs:
+            assert_allclose(x, x.T, rtol=1e-13)
+
+    def test_to_corr(self):
+        # Check some corner cases in to_corr
+
+        # ajj == 1
+        m = np.array([[0.1, 0], [0, 1]], dtype=float)
+        m = random_correlation._to_corr(m)
+        assert_allclose(m, np.array([[1, 0], [0, 0.1]]))
+
+        # Floating point overflow; fails to compute the correct
+        # rotation, but should still produce some valid rotation
+        # rather than infs/nans
+        with np.errstate(over='ignore'):
+            g = np.array([[0, 1], [-1, 0]])
+
+            m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float)
+            m = random_correlation._to_corr(m0.copy())
+            assert_allclose(m, g.T.dot(m0).dot(g))
+
+            m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float)
+            m = random_correlation._to_corr(m0.copy())
+            assert_allclose(m, g.T.dot(m0).dot(g))
+
+        # Zero discriminant; should set the first diag entry to 1
+        m0 = np.array([[2, 1], [1, 2]], dtype=float)
+        m = random_correlation._to_corr(m0.copy())
+        assert_allclose(m[0,0], 1)
+
+        # Slightly negative discriminant; should be approx correct still
+        m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float)
+        m = random_correlation._to_corr(m0.copy())
+        assert_allclose(m[0,0], 1)
+
+
+class TestUniformDirection:
+    @pytest.mark.parametrize("dim", [1, 3])
+    @pytest.mark.parametrize("size", [None, 1, 5, (5, 4)])
+    def test_samples(self, dim, size):
+        # test that samples have correct shape and norm 1
+        rng = np.random.default_rng(2777937887058094419)
+        uniform_direction_dist = uniform_direction(dim, seed=rng)
+        samples = uniform_direction_dist.rvs(size)
+        mean, cov = np.zeros(dim), np.eye(dim)
+        expected_shape = rng.multivariate_normal(mean, cov, size=size).shape
+        assert samples.shape == expected_shape
+        norms = np.linalg.norm(samples, axis=-1)
+        assert_allclose(norms, 1.)
+
+    @pytest.mark.parametrize("dim", [None, 0, (2, 2), 2.5])
+    def test_invalid_dim(self, dim):
+        message = ("Dimension of vector must be specified, "
+                   "and must be an integer greater than 0.")
+        with pytest.raises(ValueError, match=message):
+            uniform_direction.rvs(dim)
+
+    def test_frozen_distribution(self):
+        dim = 5
+        frozen = uniform_direction(dim)
+        frozen_seed = uniform_direction(dim, seed=514)
+
+        rvs1 = frozen.rvs(random_state=514)
+        rvs2 = uniform_direction.rvs(dim, random_state=514)
+        rvs3 = frozen_seed.rvs()
+
+        assert_equal(rvs1, rvs2)
+        assert_equal(rvs1, rvs3)
+
+    @pytest.mark.parametrize("dim", [2, 5, 8])
+    def test_uniform(self, dim):
+        rng = np.random.default_rng(1036978481269651776)
+        spherical_dist = uniform_direction(dim, seed=rng)
+        # generate random, orthogonal vectors
+        v1, v2 = spherical_dist.rvs(size=2)
+        v2 -= v1 @ v2 * v1
+        v2 /= np.linalg.norm(v2)
+        assert_allclose(v1 @ v2, 0, atol=1e-14)  # orthogonal
+        # generate data and project onto orthogonal vectors
+        samples = spherical_dist.rvs(size=10000)
+        s1 = samples @ v1
+        s2 = samples @ v2
+        angles = np.arctan2(s1, s2)
+        # test that angles follow a uniform distribution
+        # normalize angles to range [0, 1]
+        angles += np.pi
+        angles /= 2*np.pi
+        # perform KS test
+        uniform_dist = uniform()
+        kstest_result = kstest(angles, uniform_dist.cdf)
+        assert kstest_result.pvalue > 0.05
+
+class TestUnitaryGroup:
+    def test_reproducibility(self):
+        np.random.seed(514)
+        x = unitary_group.rvs(3)
+        x2 = unitary_group.rvs(3, random_state=514)
+
+        expected = np.array([[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j],
+                             [0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j],
+                             [-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]])
+
+        assert_array_almost_equal(x, expected)
+        assert_array_almost_equal(x2, expected)
+
+    def test_invalid_dim(self):
+        assert_raises(ValueError, unitary_group.rvs, None)
+        assert_raises(ValueError, unitary_group.rvs, (2, 2))
+        assert_raises(ValueError, unitary_group.rvs, 1)
+        assert_raises(ValueError, unitary_group.rvs, 2.5)
+
+    def test_frozen_matrix(self):
+        dim = 7
+        frozen = unitary_group(dim)
+        frozen_seed = unitary_group(dim, seed=514)
+
+        rvs1 = frozen.rvs(random_state=514)
+        rvs2 = unitary_group.rvs(dim, random_state=514)
+        rvs3 = frozen_seed.rvs(size=1)
+
+        assert_equal(rvs1, rvs2)
+        assert_equal(rvs1, rvs3)
+
+    def test_unitarity(self):
+        xs = [unitary_group.rvs(dim)
+              for dim in range(2,12)
+              for i in range(3)]
+
+        # Test that these are unitary matrices
+        for x in xs:
+            assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15)
+
+    def test_haar(self):
+        # Test that the eigenvalues, which lie on the unit circle in
+        # the complex plane, are uncorrelated.
+
+        # Generate samples
+        dim = 5
+        samples = 1000  # Not too many, or the test takes too long
+        np.random.seed(514)  # Note that the test is sensitive to seed too
+        xs = unitary_group.rvs(dim, size=samples)
+
+        # The angles "x" of the eigenvalues should be uniformly distributed
+        # Overall this seems to be a necessary but weak test of the distribution.
+        eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs])
+        x = np.arctan2(eigs.imag, eigs.real)
+        res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
+        assert_(res.pvalue > 0.05)
+
+
+class TestMultivariateT:
+
+    # These tests were created by running vpa(mvtpdf(...)) in MATLAB. The
+    # function takes no `mu` parameter. The tests were run as
+    #
+    # >> ans = vpa(mvtpdf(x - mu, shape, df));
+    #
+    PDF_TESTS = [(
+        # x
+        [
+            [1, 2],
+            [4, 1],
+            [2, 1],
+            [2, 4],
+            [1, 4],
+            [4, 1],
+            [3, 2],
+            [3, 3],
+            [4, 4],
+            [5, 1],
+        ],
+        # loc
+        [0, 0],
+        # shape
+        [
+            [1, 0],
+            [0, 1]
+        ],
+        # df
+        4,
+        # ans
+        [
+            0.013972450422333741737457302178882,
+            0.0010998721906793330026219646100571,
+            0.013972450422333741737457302178882,
+            0.00073682844024025606101402363634634,
+            0.0010998721906793330026219646100571,
+            0.0010998721906793330026219646100571,
+            0.0020732579600816823488240725481546,
+            0.00095660371505271429414668515889275,
+            0.00021831953784896498569831346792114,
+            0.00037725616140301147447000396084604
+        ]
+
+    ), (
+        # x
+        [
+            [0.9718, 0.1298, 0.8134],
+            [0.4922, 0.5522, 0.7185],
+            [0.3010, 0.1491, 0.5008],
+            [0.5971, 0.2585, 0.8940],
+            [0.5434, 0.5287, 0.9507],
+        ],
+        # loc
+        [-1, 1, 50],
+        # shape
+        [
+            [1.0000, 0.5000, 0.2500],
+            [0.5000, 1.0000, -0.1000],
+            [0.2500, -0.1000, 1.0000],
+        ],
+        # df
+        8,
+        # ans
+        [
+            0.00000000000000069609279697467772867405511133763,
+            0.00000000000000073700739052207366474839369535934,
+            0.00000000000000069522909962669171512174435447027,
+            0.00000000000000074212293557998314091880208889767,
+            0.00000000000000077039675154022118593323030449058,
+        ]
+    )]
+
+    @pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS)
+    def test_pdf_correctness(self, x, loc, shape, df, ans):
+        dist = multivariate_t(loc, shape, df, seed=0)
+        val = dist.pdf(x)
+        assert_array_almost_equal(val, ans)
+
+    @pytest.mark.parametrize("x, loc, shape, df, ans", PDF_TESTS)
+    def test_logpdf_correct(self, x, loc, shape, df, ans):
+        dist = multivariate_t(loc, shape, df, seed=0)
+        val1 = dist.pdf(x)
+        val2 = dist.logpdf(x)
+        assert_array_almost_equal(np.log(val1), val2)
+
+    # https://github.com/scipy/scipy/issues/10042#issuecomment-576795195
+    def test_mvt_with_df_one_is_cauchy(self):
+        x = [9, 7, 4, 1, -3, 9, 0, -3, -1, 3]
+        val = multivariate_t.pdf(x, df=1)
+        ans = cauchy.pdf(x)
+        assert_array_almost_equal(val, ans)
+
+    def test_mvt_with_high_df_is_approx_normal(self):
+        # `normaltest` returns the chi-squared statistic and the associated
+        # p-value. The null hypothesis is that `x` came from a normal
+        # distribution, so a low p-value represents rejecting the null, i.e.
+        # that it is unlikely that `x` came a normal distribution.
+        P_VAL_MIN = 0.1
+
+        dist = multivariate_t(0, 1, df=100000, seed=1)
+        samples = dist.rvs(size=100000)
+        _, p = normaltest(samples)
+        assert (p > P_VAL_MIN)
+
+        dist = multivariate_t([-2, 3], [[10, -1], [-1, 10]], df=100000,
+                              seed=42)
+        samples = dist.rvs(size=100000)
+        _, p = normaltest(samples)
+        assert ((p > P_VAL_MIN).all())
+
+    @patch('scipy.stats.multivariate_normal._logpdf')
+    def test_mvt_with_inf_df_calls_normal(self, mock):
+        dist = multivariate_t(0, 1, df=np.inf, seed=7)
+        assert isinstance(dist, multivariate_normal_frozen)
+        multivariate_t.pdf(0, df=np.inf)
+        assert mock.call_count == 1
+        multivariate_t.logpdf(0, df=np.inf)
+        assert mock.call_count == 2
+
+    def test_shape_correctness(self):
+        # pdf and logpdf should return scalar when the
+        # number of samples in x is one.
+        dim = 4
+        loc = np.zeros(dim)
+        shape = np.eye(dim)
+        df = 4.5
+        x = np.zeros(dim)
+        res = multivariate_t(loc, shape, df).pdf(x)
+        assert np.isscalar(res)
+        res = multivariate_t(loc, shape, df).logpdf(x)
+        assert np.isscalar(res)
+
+        # pdf() and logpdf() should return probabilities of shape
+        # (n_samples,) when x has n_samples.
+        n_samples = 7
+        x = np.random.random((n_samples, dim))
+        res = multivariate_t(loc, shape, df).pdf(x)
+        assert (res.shape == (n_samples,))
+        res = multivariate_t(loc, shape, df).logpdf(x)
+        assert (res.shape == (n_samples,))
+
+        # rvs() should return scalar unless a size argument is applied.
+        res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs()
+        assert np.isscalar(res)
+
+        # rvs() should return vector of shape (size,) if size argument
+        # is applied.
+        size = 7
+        res = multivariate_t(np.zeros(1), np.eye(1), 1).rvs(size=size)
+        assert (res.shape == (size,))
+
+    def test_default_arguments(self):
+        dist = multivariate_t()
+        assert_equal(dist.loc, [0])
+        assert_equal(dist.shape, [[1]])
+        assert (dist.df == 1)
+
+    DEFAULT_ARGS_TESTS = [
+        (None, None, None, 0, 1, 1),
+        (None, None, 7, 0, 1, 7),
+        (None, [[7, 0], [0, 7]], None, [0, 0], [[7, 0], [0, 7]], 1),
+        (None, [[7, 0], [0, 7]], 7, [0, 0], [[7, 0], [0, 7]], 7),
+        ([7, 7], None, None, [7, 7], [[1, 0], [0, 1]], 1),
+        ([7, 7], None, 7, [7, 7], [[1, 0], [0, 1]], 7),
+        ([7, 7], [[7, 0], [0, 7]], None, [7, 7], [[7, 0], [0, 7]], 1),
+        ([7, 7], [[7, 0], [0, 7]], 7, [7, 7], [[7, 0], [0, 7]], 7)
+    ]
+
+    @pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans", DEFAULT_ARGS_TESTS)
+    def test_default_args(self, loc, shape, df, loc_ans, shape_ans, df_ans):
+        dist = multivariate_t(loc=loc, shape=shape, df=df)
+        assert_equal(dist.loc, loc_ans)
+        assert_equal(dist.shape, shape_ans)
+        assert (dist.df == df_ans)
+
+    ARGS_SHAPES_TESTS = [
+        (-1, 2, 3, [-1], [[2]], 3),
+        ([-1], [2], 3, [-1], [[2]], 3),
+        (np.array([-1]), np.array([2]), 3, [-1], [[2]], 3)
+    ]
+
+    @pytest.mark.parametrize("loc, shape, df, loc_ans, shape_ans, df_ans", ARGS_SHAPES_TESTS)
+    def test_scalar_list_and_ndarray_arguments(self, loc, shape, df, loc_ans, shape_ans, df_ans):
+        dist = multivariate_t(loc, shape, df)
+        assert_equal(dist.loc, loc_ans)
+        assert_equal(dist.shape, shape_ans)
+        assert_equal(dist.df, df_ans)
+
+    def test_argument_error_handling(self):
+        # `loc` should be a one-dimensional vector.
+        loc = [[1, 1]]
+        assert_raises(ValueError,
+                      multivariate_t,
+                      **dict(loc=loc))
+
+        # `shape` should be scalar or square matrix.
+        shape = [[1, 1], [2, 2], [3, 3]]
+        assert_raises(ValueError,
+                      multivariate_t,
+                      **dict(loc=loc, shape=shape))
+
+        # `df` should be greater than zero.
+        loc = np.zeros(2)
+        shape = np.eye(2)
+        df = -1
+        assert_raises(ValueError,
+                      multivariate_t,
+                      **dict(loc=loc, shape=shape, df=df))
+        df = 0
+        assert_raises(ValueError,
+                      multivariate_t,
+                      **dict(loc=loc, shape=shape, df=df))
+
+    def test_reproducibility(self):
+        rng = np.random.RandomState(4)
+        loc = rng.uniform(size=3)
+        shape = np.eye(3)
+        dist1 = multivariate_t(loc, shape, df=3, seed=2)
+        dist2 = multivariate_t(loc, shape, df=3, seed=2)
+        samples1 = dist1.rvs(size=10)
+        samples2 = dist2.rvs(size=10)
+        assert_equal(samples1, samples2)
+
+    def test_allow_singular(self):
+        # Make shape singular and verify error was raised.
+        args = dict(loc=[0,0], shape=[[0,0],[0,1]], df=1, allow_singular=False)
+        assert_raises(np.linalg.LinAlgError, multivariate_t, **args)
+
+    @pytest.mark.parametrize("size", [(10, 3), (5, 6, 4, 3)])
+    @pytest.mark.parametrize("dim", [2, 3, 4, 5])
+    @pytest.mark.parametrize("df", [1., 2., np.inf])
+    def test_rvs(self, size, dim, df):
+        dist = multivariate_t(np.zeros(dim), np.eye(dim), df)
+        rvs = dist.rvs(size=size)
+        assert rvs.shape == size + (dim, )
+
+
+class TestMultivariateHypergeom:
+    @pytest.mark.parametrize(
+        "x, m, n, expected",
+        [
+            # Ground truth value from R dmvhyper
+            ([3, 4], [5, 10], 7, -1.119814),
+            # test for `n=0`
+            ([3, 4], [5, 10], 0, np.NINF),
+            # test for `x < 0`
+            ([-3, 4], [5, 10], 7, np.NINF),
+            # test for `m < 0` (RuntimeWarning issue)
+            ([3, 4], [-5, 10], 7, np.nan),
+            # test for all `m < 0` and `x.sum() != n`
+            ([[1, 2], [3, 4]], [[-4, -6], [-5, -10]],
+             [3, 7], [np.nan, np.nan]),
+            # test for `x < 0` and `m < 0` (RuntimeWarning issue)
+            ([-3, 4], [-5, 10], 1, np.nan),
+            # test for `x > m`
+            ([1, 11], [10, 1], 12, np.nan),
+            # test for `m < 0` (RuntimeWarning issue)
+            ([1, 11], [10, -1], 12, np.nan),
+            # test for `n < 0`
+            ([3, 4], [5, 10], -7, np.nan),
+            # test for `x.sum() != n`
+            ([3, 3], [5, 10], 7, np.NINF)
+        ]
+    )
+    def test_logpmf(self, x, m, n, expected):
+        vals = multivariate_hypergeom.logpmf(x, m, n)
+        assert_allclose(vals, expected, rtol=1e-6)
+
+    def test_reduces_hypergeom(self):
+        # test that the multivariate_hypergeom pmf reduces to the
+        # hypergeom pmf in the 2d case.
+        val1 = multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
+        val2 = hypergeom.pmf(k=3, M=15, n=4, N=10)
+        assert_allclose(val1, val2, rtol=1e-8)
+
+        val1 = multivariate_hypergeom.pmf(x=[7, 3], m=[15, 10], n=10)
+        val2 = hypergeom.pmf(k=7, M=25, n=10, N=15)
+        assert_allclose(val1, val2, rtol=1e-8)
+
+    def test_rvs(self):
+        # test if `rvs` is unbiased and large sample size converges
+        # to the true mean.
+        rv = multivariate_hypergeom(m=[3, 5], n=4)
+        rvs = rv.rvs(size=1000, random_state=123)
+        assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
+
+    def test_rvs_broadcasting(self):
+        rv = multivariate_hypergeom(m=[[3, 5], [5, 10]], n=[4, 9])
+        rvs = rv.rvs(size=(1000, 2), random_state=123)
+        assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
+
+    @pytest.mark.parametrize('m, n', (
+        ([0, 0, 20, 0, 0], 5), ([0, 0, 0, 0, 0], 0),
+        ([0, 0], 0), ([0], 0)
+    ))
+    def test_rvs_gh16171(self, m, n):
+        res = multivariate_hypergeom.rvs(m, n)
+        m = np.asarray(m)
+        res_ex = m.copy()
+        res_ex[m != 0] = n
+        assert_equal(res, res_ex)
+
+    @pytest.mark.parametrize(
+        "x, m, n, expected",
+        [
+            ([5], [5], 5, 1),
+            ([3, 4], [5, 10], 7, 0.3263403),
+            # Ground truth value from R dmvhyper
+            ([[[3, 5], [0, 8]], [[-1, 9], [1, 1]]],
+             [5, 10], [[8, 8], [8, 2]],
+             [[0.3916084, 0.006993007], [0, 0.4761905]]),
+            # test with empty arrays.
+            (np.array([], np.int_), np.array([], np.int_), 0, []),
+            ([1, 2], [4, 5], 5, 0),
+            # Ground truth value from R dmvhyper
+            ([3, 3, 0], [5, 6, 7], 6, 0.01077354)
+        ]
+    )
+    def test_pmf(self, x, m, n, expected):
+        vals = multivariate_hypergeom.pmf(x, m, n)
+        assert_allclose(vals, expected, rtol=1e-7)
+
+    @pytest.mark.parametrize(
+        "x, m, n, expected",
+        [
+            ([3, 4], [[5, 10], [10, 15]], 7, [0.3263403, 0.3407531]),
+            ([[1], [2]], [[3], [4]], [1, 3], [1., 0.]),
+            ([[[1], [2]]], [[3], [4]], [1, 3], [[1., 0.]]),
+            ([[1], [2]], [[[[3]]]], [1, 3], [[[1., 0.]]])
+        ]
+    )
+    def test_pmf_broadcasting(self, x, m, n, expected):
+        vals = multivariate_hypergeom.pmf(x, m, n)
+        assert_allclose(vals, expected, rtol=1e-7)
+
+    def test_cov(self):
+        cov1 = multivariate_hypergeom.cov(m=[3, 7, 10], n=12)
+        cov2 = [[0.64421053, -0.26526316, -0.37894737],
+                [-0.26526316, 1.14947368, -0.88421053],
+                [-0.37894737, -0.88421053, 1.26315789]]
+        assert_allclose(cov1, cov2, rtol=1e-8)
+
+    def test_cov_broadcasting(self):
+        cov1 = multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
+        cov2 = [[[1.05, -1.05], [-1.05, 1.05]],
+                [[1.56, -1.56], [-1.56, 1.56]]]
+        assert_allclose(cov1, cov2, rtol=1e-8)
+
+        cov3 = multivariate_hypergeom.cov(m=[[4], [5]], n=[4, 5])
+        cov4 = [[[0.]], [[0.]]]
+        assert_allclose(cov3, cov4, rtol=1e-8)
+
+        cov5 = multivariate_hypergeom.cov(m=[7, 9], n=[8, 12])
+        cov6 = [[[1.05, -1.05], [-1.05, 1.05]],
+                [[0.7875, -0.7875], [-0.7875, 0.7875]]]
+        assert_allclose(cov5, cov6, rtol=1e-8)
+
+    def test_var(self):
+        # test with hypergeom
+        var0 = multivariate_hypergeom.var(m=[10, 5], n=4)
+        var1 = hypergeom.var(M=15, n=4, N=10)
+        assert_allclose(var0, var1, rtol=1e-8)
+
+    def test_var_broadcasting(self):
+        var0 = multivariate_hypergeom.var(m=[10, 5], n=[4, 8])
+        var1 = multivariate_hypergeom.var(m=[10, 5], n=4)
+        var2 = multivariate_hypergeom.var(m=[10, 5], n=8)
+        assert_allclose(var0[0], var1, rtol=1e-8)
+        assert_allclose(var0[1], var2, rtol=1e-8)
+
+        var3 = multivariate_hypergeom.var(m=[[10, 5], [10, 14]], n=[4, 8])
+        var4 = [[0.6984127, 0.6984127], [1.352657, 1.352657]]
+        assert_allclose(var3, var4, rtol=1e-8)
+
+        var5 = multivariate_hypergeom.var(m=[[5], [10]], n=[5, 10])
+        var6 = [[0.], [0.]]
+        assert_allclose(var5, var6, rtol=1e-8)
+
+    def test_mean(self):
+        # test with hypergeom
+        mean0 = multivariate_hypergeom.mean(m=[10, 5], n=4)
+        mean1 = hypergeom.mean(M=15, n=4, N=10)
+        assert_allclose(mean0[0], mean1, rtol=1e-8)
+
+        mean2 = multivariate_hypergeom.mean(m=[12, 8], n=10)
+        mean3 = [12.*10./20., 8.*10./20.]
+        assert_allclose(mean2, mean3, rtol=1e-8)
+
+    def test_mean_broadcasting(self):
+        mean0 = multivariate_hypergeom.mean(m=[[3, 5], [10, 5]], n=[4, 8])
+        mean1 = [[3.*4./8., 5.*4./8.], [10.*8./15., 5.*8./15.]]
+        assert_allclose(mean0, mean1, rtol=1e-8)
+
+    def test_mean_edge_cases(self):
+        mean0 = multivariate_hypergeom.mean(m=[0, 0, 0], n=0)
+        assert_equal(mean0, [0., 0., 0.])
+
+        mean1 = multivariate_hypergeom.mean(m=[1, 0, 0], n=2)
+        assert_equal(mean1, [np.nan, np.nan, np.nan])
+
+        mean2 = multivariate_hypergeom.mean(m=[[1, 0, 0], [1, 0, 1]], n=2)
+        assert_allclose(mean2, [[np.nan, np.nan, np.nan], [1., 0., 1.]],
+                        rtol=1e-17)
+
+        mean3 = multivariate_hypergeom.mean(m=np.array([], np.int_), n=0)
+        assert_equal(mean3, [])
+        assert_(mean3.shape == (0, ))
+
+    def test_var_edge_cases(self):
+        var0 = multivariate_hypergeom.var(m=[0, 0, 0], n=0)
+        assert_allclose(var0, [0., 0., 0.], rtol=1e-16)
+
+        var1 = multivariate_hypergeom.var(m=[1, 0, 0], n=2)
+        assert_equal(var1, [np.nan, np.nan, np.nan])
+
+        var2 = multivariate_hypergeom.var(m=[[1, 0, 0], [1, 0, 1]], n=2)
+        assert_allclose(var2, [[np.nan, np.nan, np.nan], [0., 0., 0.]],
+                        rtol=1e-17)
+
+        var3 = multivariate_hypergeom.var(m=np.array([], np.int_), n=0)
+        assert_equal(var3, [])
+        assert_(var3.shape == (0, ))
+
+    def test_cov_edge_cases(self):
+        cov0 = multivariate_hypergeom.cov(m=[1, 0, 0], n=1)
+        cov1 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
+        assert_allclose(cov0, cov1, rtol=1e-17)
+
+        cov3 = multivariate_hypergeom.cov(m=[0, 0, 0], n=0)
+        cov4 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
+        assert_equal(cov3, cov4)
+
+        cov5 = multivariate_hypergeom.cov(m=np.array([], np.int_), n=0)
+        cov6 = np.array([], dtype=np.float_).reshape(0, 0)
+        assert_allclose(cov5, cov6, rtol=1e-17)
+        assert_(cov5.shape == (0, 0))
+
+    def test_frozen(self):
+        # The frozen distribution should agree with the regular one
+        np.random.seed(1234)
+        n = 12
+        m = [7, 9, 11, 13]
+        x = [[0, 0, 0, 12], [0, 0, 1, 11], [0, 1, 1, 10],
+             [1, 1, 1, 9], [1, 1, 2, 8]]
+        x = np.asarray(x, dtype=np.int_)
+        mhg_frozen = multivariate_hypergeom(m, n)
+        assert_allclose(mhg_frozen.pmf(x),
+                        multivariate_hypergeom.pmf(x, m, n))
+        assert_allclose(mhg_frozen.logpmf(x),
+                        multivariate_hypergeom.logpmf(x, m, n))
+        assert_allclose(mhg_frozen.var(), multivariate_hypergeom.var(m, n))
+        assert_allclose(mhg_frozen.cov(), multivariate_hypergeom.cov(m, n))
+
+    def test_invalid_params(self):
+        assert_raises(ValueError, multivariate_hypergeom.pmf, 5, 10, 5)
+        assert_raises(ValueError, multivariate_hypergeom.pmf, 5, [10], 5)
+        assert_raises(ValueError, multivariate_hypergeom.pmf, [5, 4], [10], 5)
+        assert_raises(TypeError, multivariate_hypergeom.pmf, [5.5, 4.5],
+                      [10, 15], 5)
+        assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
+                      [10.5, 15.5], 5)
+        assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
+                      [10, 15], 5.5)
+
+
+class TestRandomTable:
+    def get_rng(self):
+        return np.random.default_rng(628174795866951638)
+
+    def test_process_parameters(self):
+        message = "`row` must be one-dimensional"
+        with pytest.raises(ValueError, match=message):
+            random_table([[1, 2]], [1, 2])
+
+        message = "`col` must be one-dimensional"
+        with pytest.raises(ValueError, match=message):
+            random_table([1, 2], [[1, 2]])
+
+        message = "each element of `row` must be non-negative"
+        with pytest.raises(ValueError, match=message):
+            random_table([1, -1], [1, 2])
+
+        message = "each element of `col` must be non-negative"
+        with pytest.raises(ValueError, match=message):
+            random_table([1, 2], [1, -2])
+
+        message = "sums over `row` and `col` must be equal"
+        with pytest.raises(ValueError, match=message):
+            random_table([1, 2], [1, 0])
+
+        message = "each element of `row` must be an integer"
+        with pytest.raises(ValueError, match=message):
+            random_table([2.1, 2.1], [1, 1, 2])
+
+        message = "each element of `col` must be an integer"
+        with pytest.raises(ValueError, match=message):
+            random_table([1, 2], [1.1, 1.1, 1])
+
+        row = [1, 3]
+        col = [2, 1, 1]
+        r, c, n = random_table._process_parameters([1, 3], [2, 1, 1])
+        assert_equal(row, r)
+        assert_equal(col, c)
+        assert n == np.sum(row)
+
+    @pytest.mark.parametrize("scale,method",
+                             ((1, "boyett"), (100, "patefield")))
+    def test_process_rvs_method_on_None(self, scale, method):
+        row = np.array([1, 3]) * scale
+        col = np.array([2, 1, 1]) * scale
+
+        ct = random_table
+        expected = ct.rvs(row, col, method=method, random_state=1)
+        got = ct.rvs(row, col, method=None, random_state=1)
+
+        assert_equal(expected, got)
+
+    def test_process_rvs_method_bad_argument(self):
+        row = [1, 3]
+        col = [2, 1, 1]
+
+        # order of items in set is random, so cannot check that
+        message = "'foo' not recognized, must be one of"
+        with pytest.raises(ValueError, match=message):
+            random_table.rvs(row, col, method="foo")
+
+    @pytest.mark.parametrize('frozen', (True, False))
+    @pytest.mark.parametrize('log', (True, False))
+    def test_pmf_logpmf(self, frozen, log):
+        # The pmf is tested through random sample generation
+        # with Boyett's algorithm, whose implementation is simple
+        # enough to verify manually for correctness.
+        rng = self.get_rng()
+        row = [2, 6]
+        col = [1, 3, 4]
+        rvs = random_table.rvs(row, col, size=1000,
+                               method="boyett", random_state=rng)
+
+        obj = random_table(row, col) if frozen else random_table
+        method = getattr(obj, "logpmf" if log else "pmf")
+        if not frozen:
+            original_method = method
+
+            def method(x):
+                return original_method(x, row, col)
+        pmf = (lambda x: np.exp(method(x))) if log else method
+
+        unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True)
+
+        # rough accuracy check
+        p = pmf(unique_rvs)
+        assert_allclose(p * len(rvs), counts, rtol=0.1)
+
+        # accept any iterable
+        p2 = pmf(list(unique_rvs[0]))
+        assert_equal(p2, p[0])
+
+        # accept high-dimensional input and 2d input
+        rvs_nd = rvs.reshape((10, 100) + rvs.shape[1:])
+        p = pmf(rvs_nd)
+        assert p.shape == (10, 100)
+        for i in range(p.shape[0]):
+            for j in range(p.shape[1]):
+                pij = p[i, j]
+                rvij = rvs_nd[i, j]
+                qij = pmf(rvij)
+                assert_equal(pij, qij)
+
+        # probability is zero if column marginal does not match
+        x = [[0, 1, 1], [2, 1, 3]]
+        assert_equal(np.sum(x, axis=-1), row)
+        p = pmf(x)
+        assert p == 0
+
+        # probability is zero if row marginal does not match
+        x = [[0, 1, 2], [1, 2, 2]]
+        assert_equal(np.sum(x, axis=-2), col)
+        p = pmf(x)
+        assert p == 0
+
+        # response to invalid inputs
+        message = "`x` must be at least two-dimensional"
+        with pytest.raises(ValueError, match=message):
+            pmf([1])
+
+        message = "`x` must contain only integral values"
+        with pytest.raises(ValueError, match=message):
+            pmf([[1.1]])
+
+        message = "`x` must contain only integral values"
+        with pytest.raises(ValueError, match=message):
+            pmf([[np.nan]])
+
+        message = "`x` must contain only non-negative values"
+        with pytest.raises(ValueError, match=message):
+            pmf([[-1]])
+
+        message = "shape of `x` must agree with `row`"
+        with pytest.raises(ValueError, match=message):
+            pmf([[1, 2, 3]])
+
+        message = "shape of `x` must agree with `col`"
+        with pytest.raises(ValueError, match=message):
+            pmf([[1, 2],
+                 [3, 4]])
+
+    @pytest.mark.parametrize("method", ("boyett", "patefield"))
+    def test_rvs_mean(self, method):
+        # test if `rvs` is unbiased and large sample size converges
+        # to the true mean.
+        rng = self.get_rng()
+        row = [2, 6]
+        col = [1, 3, 4]
+        rvs = random_table.rvs(row, col, size=1000, method=method,
+                               random_state=rng)
+        mean = random_table.mean(row, col)
+        assert_equal(np.sum(mean), np.sum(row))
+        assert_allclose(rvs.mean(0), mean, atol=0.05)
+        assert_equal(rvs.sum(axis=-1), np.broadcast_to(row, (1000, 2)))
+        assert_equal(rvs.sum(axis=-2), np.broadcast_to(col, (1000, 3)))
+
+    def test_rvs_cov(self):
+        # test if `rvs` generated with patefield and boyett algorithms
+        # produce approximately the same covariance matrix
+        rng = self.get_rng()
+        row = [2, 6]
+        col = [1, 3, 4]
+        rvs1 = random_table.rvs(row, col, size=10000, method="boyett",
+                                random_state=rng)
+        rvs2 = random_table.rvs(row, col, size=10000, method="patefield",
+                                random_state=rng)
+        cov1 = np.var(rvs1, axis=0)
+        cov2 = np.var(rvs2, axis=0)
+        assert_allclose(cov1, cov2, atol=0.02)
+
+    @pytest.mark.parametrize("method", ("boyett", "patefield"))
+    def test_rvs_size(self, method):
+        row = [2, 6]
+        col = [1, 3, 4]
+
+        # test size `None`
+        rv = random_table.rvs(row, col, method=method,
+                              random_state=self.get_rng())
+        assert rv.shape == (2, 3)
+
+        # test size 1
+        rv2 = random_table.rvs(row, col, size=1, method=method,
+                               random_state=self.get_rng())
+        assert rv2.shape == (1, 2, 3)
+        assert_equal(rv, rv2[0])
+
+        # test size 0
+        rv3 = random_table.rvs(row, col, size=0, method=method,
+                               random_state=self.get_rng())
+        assert rv3.shape == (0, 2, 3)
+
+        # test other valid size
+        rv4 = random_table.rvs(row, col, size=20, method=method,
+                               random_state=self.get_rng())
+        assert rv4.shape == (20, 2, 3)
+
+        rv5 = random_table.rvs(row, col, size=(4, 5), method=method,
+                               random_state=self.get_rng())
+        assert rv5.shape == (4, 5, 2, 3)
+
+        assert_allclose(rv5.reshape(20, 2, 3), rv4, rtol=1e-15)
+
+        # test invalid size
+        message = "`size` must be a non-negative integer or `None`"
+        with pytest.raises(ValueError, match=message):
+            random_table.rvs(row, col, size=-1, method=method,
+                             random_state=self.get_rng())
+
+        with pytest.raises(ValueError, match=message):
+            random_table.rvs(row, col, size=np.nan, method=method,
+                             random_state=self.get_rng())
+
+    @pytest.mark.parametrize("method", ("boyett", "patefield"))
+    def test_rvs_method(self, method):
+        # This test assumes that pmf is correct and checks that random samples
+        # follow this probability distribution. This seems like a circular
+        # argument, since pmf is checked in test_pmf_logpmf with random samples
+        # generated with the rvs method. This test is not redundant, because
+        # test_pmf_logpmf intentionally uses rvs generation with Boyett only,
+        # but here we test both Boyett and Patefield.
+        row = [2, 6]
+        col = [1, 3, 4]
+
+        ct = random_table
+        rvs = ct.rvs(row, col, size=100000, method=method,
+                     random_state=self.get_rng())
+
+        unique_rvs, counts = np.unique(rvs, axis=0, return_counts=True)
+
+        # generated frequencies should match expected frequencies
+        p = ct.pmf(unique_rvs, row, col)
+        assert_allclose(p * len(rvs), counts, rtol=0.02)
+
+    @pytest.mark.parametrize("method", ("boyett", "patefield"))
+    def test_rvs_with_zeros_in_col_row(self, method):
+        row = [0, 1, 0]
+        col = [1, 0, 0, 0]
+        d = random_table(row, col)
+        rv = d.rvs(1000, method=method, random_state=self.get_rng())
+        expected = np.zeros((1000, len(row), len(col)))
+        expected[...] = [[0, 0, 0, 0],
+                         [1, 0, 0, 0],
+                         [0, 0, 0, 0]]
+        assert_equal(rv, expected)
+
+    @pytest.mark.parametrize("method", (None, "boyett", "patefield"))
+    @pytest.mark.parametrize("col", ([], [0]))
+    @pytest.mark.parametrize("row", ([], [0]))
+    def test_rvs_with_edge_cases(self, method, row, col):
+        d = random_table(row, col)
+        rv = d.rvs(10, method=method, random_state=self.get_rng())
+        expected = np.zeros((10, len(row), len(col)))
+        assert_equal(rv, expected)
+
+    @pytest.mark.parametrize('v', (1, 2))
+    def test_rvs_rcont(self, v):
+        # This test checks the internal low-level interface.
+        # It is implicitly also checked by the other test_rvs* calls.
+        import scipy.stats._rcont as _rcont
+
+        row = np.array([1, 3], dtype=np.int64)
+        col = np.array([2, 1, 1], dtype=np.int64)
+
+        rvs = getattr(_rcont, f"rvs_rcont{v}")
+
+        ntot = np.sum(row)
+        result = rvs(row, col, ntot, 1, self.get_rng())
+
+        assert result.shape == (1, len(row), len(col))
+        assert np.sum(result) == ntot
+
+    def test_frozen(self):
+        row = [2, 6]
+        col = [1, 3, 4]
+        d = random_table(row, col, seed=self.get_rng())
+
+        sample = d.rvs()
+
+        expected = random_table.mean(row, col)
+        assert_equal(expected, d.mean())
+
+        expected = random_table.pmf(sample, row, col)
+        assert_equal(expected, d.pmf(sample))
+
+        expected = random_table.logpmf(sample, row, col)
+        assert_equal(expected, d.logpmf(sample))
+
+    @pytest.mark.parametrize("method", ("boyett", "patefield"))
+    def test_rvs_frozen(self, method):
+        row = [2, 6]
+        col = [1, 3, 4]
+        d = random_table(row, col, seed=self.get_rng())
+
+        expected = random_table.rvs(row, col, size=10, method=method,
+                                    random_state=self.get_rng())
+        got = d.rvs(size=10, method=method)
+        assert_equal(expected, got)
+
+def check_pickling(distfn, args):
+    # check that a distribution instance pickles and unpickles
+    # pay special attention to the random_state property
+
+    # save the random_state (restore later)
+    rndm = distfn.random_state
+
+    distfn.random_state = 1234
+    distfn.rvs(*args, size=8)
+    s = pickle.dumps(distfn)
+    r0 = distfn.rvs(*args, size=8)
+
+    unpickled = pickle.loads(s)
+    r1 = unpickled.rvs(*args, size=8)
+    assert_equal(r0, r1)
+
+    # restore the random_state
+    distfn.random_state = rndm
+
+
+def test_random_state_property():
+    scale = np.eye(3)
+    scale[0, 1] = 0.5
+    scale[1, 0] = 0.5
+    dists = [
+        [multivariate_normal, ()],
+        [dirichlet, (np.array([1.]), )],
+        [wishart, (10, scale)],
+        [invwishart, (10, scale)],
+        [multinomial, (5, [0.5, 0.4, 0.1])],
+        [ortho_group, (2,)],
+        [special_ortho_group, (2,)]
+    ]
+    for distfn, args in dists:
+        check_random_state_property(distfn, args)
+        check_pickling(distfn, args)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_odds_ratio.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_odds_ratio.py
new file mode 100644
index 00000000..ffb38a05
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_odds_ratio.py
@@ -0,0 +1,147 @@
+import pytest
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+from .._discrete_distns import nchypergeom_fisher, hypergeom
+from scipy.stats._odds_ratio import odds_ratio
+from .data.fisher_exact_results_from_r import data
+
+
+class TestOddsRatio:
+
+    @pytest.mark.parametrize('parameters, rresult', data)
+    def test_results_from_r(self, parameters, rresult):
+        alternative = parameters.alternative.replace('.', '-')
+        result = odds_ratio(parameters.table)
+        # The results computed by R are not very accurate.
+        if result.statistic < 400:
+            or_rtol = 5e-4
+            ci_rtol = 2e-2
+        else:
+            or_rtol = 5e-2
+            ci_rtol = 1e-1
+        assert_allclose(result.statistic,
+                        rresult.conditional_odds_ratio, rtol=or_rtol)
+        ci = result.confidence_interval(parameters.confidence_level,
+                                        alternative)
+        assert_allclose((ci.low, ci.high), rresult.conditional_odds_ratio_ci,
+                        rtol=ci_rtol)
+
+        # Also do a self-check for the conditional odds ratio.
+        # With the computed conditional odds ratio as the noncentrality
+        # parameter of the noncentral hypergeometric distribution with
+        # parameters table.sum(), table[0].sum(), and table[:,0].sum() as
+        # total, ngood and nsample, respectively, the mean of the distribution
+        # should equal table[0, 0].
+        cor = result.statistic
+        table = np.array(parameters.table)
+        total = table.sum()
+        ngood = table[0].sum()
+        nsample = table[:, 0].sum()
+        # nchypergeom_fisher does not allow the edge cases where the
+        # noncentrality parameter is 0 or inf, so handle those values
+        # separately here.
+        if cor == 0:
+            nchg_mean = hypergeom.support(total, ngood, nsample)[0]
+        elif cor == np.inf:
+            nchg_mean = hypergeom.support(total, ngood, nsample)[1]
+        else:
+            nchg_mean = nchypergeom_fisher.mean(total, ngood, nsample, cor)
+        assert_allclose(nchg_mean, table[0, 0], rtol=1e-13)
+
+        # Check that the confidence interval is correct.
+        alpha = 1 - parameters.confidence_level
+        if alternative == 'two-sided':
+            if ci.low > 0:
+                sf = nchypergeom_fisher.sf(table[0, 0] - 1,
+                                           total, ngood, nsample, ci.low)
+                assert_allclose(sf, alpha/2, rtol=1e-11)
+            if np.isfinite(ci.high):
+                cdf = nchypergeom_fisher.cdf(table[0, 0],
+                                             total, ngood, nsample, ci.high)
+                assert_allclose(cdf, alpha/2, rtol=1e-11)
+        elif alternative == 'less':
+            if np.isfinite(ci.high):
+                cdf = nchypergeom_fisher.cdf(table[0, 0],
+                                             total, ngood, nsample, ci.high)
+                assert_allclose(cdf, alpha, rtol=1e-11)
+        else:
+            # alternative == 'greater'
+            if ci.low > 0:
+                sf = nchypergeom_fisher.sf(table[0, 0] - 1,
+                                           total, ngood, nsample, ci.low)
+                assert_allclose(sf, alpha, rtol=1e-11)
+
+    @pytest.mark.parametrize('table', [
+        [[0, 0], [5, 10]],
+        [[5, 10], [0, 0]],
+        [[0, 5], [0, 10]],
+        [[5, 0], [10, 0]],
+    ])
+    def test_row_or_col_zero(self, table):
+        result = odds_ratio(table)
+        assert_equal(result.statistic, np.nan)
+        ci = result.confidence_interval()
+        assert_equal((ci.low, ci.high), (0, np.inf))
+
+    @pytest.mark.parametrize("case",
+                             [[0.95, 'two-sided', 0.4879913, 2.635883],
+                              [0.90, 'two-sided', 0.5588516, 2.301663]])
+    def test_sample_odds_ratio_ci(self, case):
+        # Compare the sample odds ratio confidence interval to the R function
+        # oddsratio.wald from the epitools package, e.g.
+        # > library(epitools)
+        # > table = matrix(c(10, 20, 41, 93), nrow=2, ncol=2, byrow=TRUE)
+        # > result = oddsratio.wald(table)
+        # > result$measure
+        #           odds ratio with 95% C.I.
+        # Predictor  estimate     lower    upper
+        #   Exposed1 1.000000        NA       NA
+        #   Exposed2 1.134146 0.4879913 2.635883
+
+        confidence_level, alternative, ref_low, ref_high = case
+        table = [[10, 20], [41, 93]]
+        result = odds_ratio(table, kind='sample')
+        assert_allclose(result.statistic, 1.134146, rtol=1e-6)
+        ci = result.confidence_interval(confidence_level, alternative)
+        assert_allclose([ci.low, ci.high], [ref_low, ref_high], rtol=1e-6)
+
+    @pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
+    def test_sample_odds_ratio_one_sided_ci(self, alternative):
+        # can't find a good reference for one-sided CI, so bump up the sample
+        # size and compare against the conditional odds ratio CI
+        table = [[1000, 2000], [4100, 9300]]
+        res = odds_ratio(table, kind='sample')
+        ref = odds_ratio(table, kind='conditional')
+        assert_allclose(res.statistic, ref.statistic, atol=1e-5)
+        assert_allclose(res.confidence_interval(alternative=alternative),
+                        ref.confidence_interval(alternative=alternative),
+                        atol=2e-3)
+
+    @pytest.mark.parametrize('kind', ['sample', 'conditional'])
+    @pytest.mark.parametrize('bad_table', [123, "foo", [10, 11, 12]])
+    def test_invalid_table_shape(self, kind, bad_table):
+        with pytest.raises(ValueError, match="Invalid shape"):
+            odds_ratio(bad_table, kind=kind)
+
+    def test_invalid_table_type(self):
+        with pytest.raises(ValueError, match='must be an array of integers'):
+            odds_ratio([[1.0, 3.4], [5.0, 9.9]])
+
+    def test_negative_table_values(self):
+        with pytest.raises(ValueError, match='must be nonnegative'):
+            odds_ratio([[1, 2], [3, -4]])
+
+    def test_invalid_kind(self):
+        with pytest.raises(ValueError, match='`kind` must be'):
+            odds_ratio([[10, 20], [30, 14]], kind='magnetoreluctance')
+
+    def test_invalid_alternative(self):
+        result = odds_ratio([[5, 10], [2, 32]])
+        with pytest.raises(ValueError, match='`alternative` must be'):
+            result.confidence_interval(alternative='depleneration')
+
+    @pytest.mark.parametrize('level', [-0.5, 1.5])
+    def test_invalid_confidence_level(self, level):
+        result = odds_ratio([[5, 10], [2, 32]])
+        with pytest.raises(ValueError, match='must be between 0 and 1'):
+            result.confidence_interval(confidence_level=level)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_qmc.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_qmc.py
new file mode 100644
index 00000000..c1f01757
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_qmc.py
@@ -0,0 +1,1326 @@
+import os
+from collections import Counter
+from itertools import combinations, product
+
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, assert_array_equal
+
+from scipy.spatial import distance
+from scipy.stats import shapiro
+from scipy.stats._sobol import _test_find_index
+from scipy.stats import qmc
+from scipy.stats._qmc import (
+    van_der_corput, n_primes, primes_from_2_to,
+    update_discrepancy, QMCEngine, _l1_norm,
+    _perturb_discrepancy, _lloyd_centroidal_voronoi_tessellation
+)  # noqa
+
+
+class TestUtils:
+    def test_scale(self):
+        # 1d scalar
+        space = [[0], [1], [0.5]]
+        out = [[-2], [6], [2]]
+        scaled_space = qmc.scale(space, l_bounds=-2, u_bounds=6)
+
+        assert_allclose(scaled_space, out)
+
+        # 2d space
+        space = [[0, 0], [1, 1], [0.5, 0.5]]
+        bounds = np.array([[-2, 0], [6, 5]])
+        out = [[-2, 0], [6, 5], [2, 2.5]]
+
+        scaled_space = qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
+
+        assert_allclose(scaled_space, out)
+
+        scaled_back_space = qmc.scale(scaled_space, l_bounds=bounds[0],
+                                      u_bounds=bounds[1], reverse=True)
+        assert_allclose(scaled_back_space, space)
+
+        # broadcast
+        space = [[0, 0, 0], [1, 1, 1], [0.5, 0.5, 0.5]]
+        l_bounds, u_bounds = 0, [6, 5, 3]
+        out = [[0, 0, 0], [6, 5, 3], [3, 2.5, 1.5]]
+
+        scaled_space = qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds)
+
+        assert_allclose(scaled_space, out)
+
+    def test_scale_random(self):
+        rng = np.random.default_rng(317589836511269190194010915937762468165)
+        sample = rng.random((30, 10))
+        a = -rng.random(10) * 10
+        b = rng.random(10) * 10
+        scaled = qmc.scale(sample, a, b, reverse=False)
+        unscaled = qmc.scale(scaled, a, b, reverse=True)
+        assert_allclose(unscaled, sample)
+
+    def test_scale_errors(self):
+        with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
+            space = [0, 1, 0.5]
+            qmc.scale(space, l_bounds=-2, u_bounds=6)
+
+        with pytest.raises(ValueError, match=r"Bounds are not consistent"):
+            space = [[0, 0], [1, 1], [0.5, 0.5]]
+            bounds = np.array([[-2, 6], [6, 5]])
+            qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
+
+        with pytest.raises(ValueError, match=r"'l_bounds' and 'u_bounds'"
+                                             r" must be broadcastable"):
+            space = [[0, 0], [1, 1], [0.5, 0.5]]
+            l_bounds, u_bounds = [-2, 0, 2], [6, 5]
+            qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds)
+
+        with pytest.raises(ValueError, match=r"'l_bounds' and 'u_bounds'"
+                                             r" must be broadcastable"):
+            space = [[0, 0], [1, 1], [0.5, 0.5]]
+            bounds = np.array([[-2, 0, 2], [6, 5, 5]])
+            qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
+
+        with pytest.raises(ValueError, match=r"Sample is not in unit "
+                                             r"hypercube"):
+            space = [[0, 0], [1, 1.5], [0.5, 0.5]]
+            bounds = np.array([[-2, 0], [6, 5]])
+            qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1])
+
+        with pytest.raises(ValueError, match=r"Sample is out of bounds"):
+            out = [[-2, 0], [6, 5], [8, 2.5]]
+            bounds = np.array([[-2, 0], [6, 5]])
+            qmc.scale(out, l_bounds=bounds[0], u_bounds=bounds[1],
+                      reverse=True)
+
+    def test_discrepancy(self):
+        space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
+        space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0)
+        space_2 = np.array([[1, 5], [2, 4], [3, 3], [4, 2], [5, 1], [6, 6]])
+        space_2 = (2.0 * space_2 - 1.0) / (2.0 * 6.0)
+
+        # From Fang et al. Design and modeling for computer experiments, 2006
+        assert_allclose(qmc.discrepancy(space_1), 0.0081, atol=1e-4)
+        assert_allclose(qmc.discrepancy(space_2), 0.0105, atol=1e-4)
+
+        # From Zhou Y.-D. et al. Mixture discrepancy for quasi-random point
+        # sets. Journal of Complexity, 29 (3-4), pp. 283-301, 2013.
+        # Example 4 on Page 298
+        sample = np.array([[2, 1, 1, 2, 2, 2],
+                           [1, 2, 2, 2, 2, 2],
+                           [2, 1, 1, 1, 1, 1],
+                           [1, 1, 1, 1, 2, 2],
+                           [1, 2, 2, 2, 1, 1],
+                           [2, 2, 2, 2, 1, 1],
+                           [2, 2, 2, 1, 2, 2]])
+        sample = (2.0 * sample - 1.0) / (2.0 * 2.0)
+
+        assert_allclose(qmc.discrepancy(sample, method='MD'), 2.5000,
+                        atol=1e-4)
+        assert_allclose(qmc.discrepancy(sample, method='WD'), 1.3680,
+                        atol=1e-4)
+        assert_allclose(qmc.discrepancy(sample, method='CD'), 0.3172,
+                        atol=1e-4)
+
+        # From Tim P. et al. Minimizing the L2 and Linf star discrepancies
+        # of a single point in the unit hypercube. JCAM, 2005
+        # Table 1 on Page 283
+        for dim in [2, 4, 8, 16, 32, 64]:
+            ref = np.sqrt(3**(-dim))
+            assert_allclose(qmc.discrepancy(np.array([[1]*dim]),
+                                            method='L2-star'), ref)
+
+    def test_discrepancy_errors(self):
+        sample = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
+
+        with pytest.raises(
+            ValueError, match=r"Sample is not in unit hypercube"
+        ):
+            qmc.discrepancy(sample)
+
+        with pytest.raises(ValueError, match=r"Sample is not a 2D array"):
+            qmc.discrepancy([1, 3])
+
+        sample = [[0, 0], [1, 1], [0.5, 0.5]]
+        with pytest.raises(ValueError, match=r"'toto' is not a valid ..."):
+            qmc.discrepancy(sample, method="toto")
+
+    def test_discrepancy_parallel(self, monkeypatch):
+        sample = np.array([[2, 1, 1, 2, 2, 2],
+                           [1, 2, 2, 2, 2, 2],
+                           [2, 1, 1, 1, 1, 1],
+                           [1, 1, 1, 1, 2, 2],
+                           [1, 2, 2, 2, 1, 1],
+                           [2, 2, 2, 2, 1, 1],
+                           [2, 2, 2, 1, 2, 2]])
+        sample = (2.0 * sample - 1.0) / (2.0 * 2.0)
+
+        assert_allclose(qmc.discrepancy(sample, method='MD', workers=8),
+                        2.5000,
+                        atol=1e-4)
+        assert_allclose(qmc.discrepancy(sample, method='WD', workers=8),
+                        1.3680,
+                        atol=1e-4)
+        assert_allclose(qmc.discrepancy(sample, method='CD', workers=8),
+                        0.3172,
+                        atol=1e-4)
+
+        # From Tim P. et al. Minimizing the L2 and Linf star discrepancies
+        # of a single point in the unit hypercube. JCAM, 2005
+        # Table 1 on Page 283
+        for dim in [2, 4, 8, 16, 32, 64]:
+            ref = np.sqrt(3 ** (-dim))
+            assert_allclose(qmc.discrepancy(np.array([[1] * dim]),
+                                            method='L2-star', workers=-1), ref)
+
+        monkeypatch.setattr(os, 'cpu_count', lambda: None)
+        with pytest.raises(NotImplementedError, match="Cannot determine the"):
+            qmc.discrepancy(sample, workers=-1)
+
+        with pytest.raises(ValueError, match="Invalid number of workers..."):
+            qmc.discrepancy(sample, workers=-2)
+
+    def test_update_discrepancy(self):
+        # From Fang et al. Design and modeling for computer experiments, 2006
+        space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
+        space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0)
+
+        disc_init = qmc.discrepancy(space_1[:-1], iterative=True)
+        disc_iter = update_discrepancy(space_1[-1], space_1[:-1], disc_init)
+
+        assert_allclose(disc_iter, 0.0081, atol=1e-4)
+
+        # n QMCEngine:
+        seed = np.random.default_rng(170382760648021597650530316304495310428)
+        if self.can_scramble:
+            return self.qmce(scramble=scramble, seed=seed, **kwargs)
+        else:
+            if scramble:
+                pytest.skip()
+            else:
+                return self.qmce(seed=seed, **kwargs)
+
+    def reference(self, scramble: bool) -> np.ndarray:
+        return self.scramble_nd if scramble else self.unscramble_nd
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_0dim(self, scramble):
+        engine = self.engine(d=0, scramble=scramble)
+        sample = engine.random(4)
+        assert_array_equal(np.empty((4, 0)), sample)
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_0sample(self, scramble):
+        engine = self.engine(d=2, scramble=scramble)
+        sample = engine.random(0)
+        assert_array_equal(np.empty((0, 2)), sample)
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_1sample(self, scramble):
+        engine = self.engine(d=2, scramble=scramble)
+        sample = engine.random(1)
+        assert (1, 2) == sample.shape
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_bounds(self, scramble):
+        engine = self.engine(d=100, scramble=scramble)
+        sample = engine.random(512)
+        assert np.all(sample >= 0)
+        assert np.all(sample <= 1)
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_sample(self, scramble):
+        ref_sample = self.reference(scramble=scramble)
+        engine = self.engine(d=2, scramble=scramble)
+        sample = engine.random(n=len(ref_sample))
+
+        assert_allclose(sample, ref_sample, atol=1e-1)
+        assert engine.num_generated == len(ref_sample)
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_continuing(self, scramble):
+        engine = self.engine(d=2, scramble=scramble)
+        ref_sample = engine.random(n=8)
+
+        engine = self.engine(d=2, scramble=scramble)
+
+        n_half = len(ref_sample) // 2
+
+        _ = engine.random(n=n_half)
+        sample = engine.random(n=n_half)
+        assert_allclose(sample, ref_sample[n_half:], atol=1e-1)
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_reset(self, scramble):
+        engine = self.engine(d=2, scramble=scramble)
+        ref_sample = engine.random(n=8)
+
+        engine.reset()
+        assert engine.num_generated == 0
+
+        sample = engine.random(n=8)
+        assert_allclose(sample, ref_sample)
+
+    @pytest.mark.parametrize("scramble", scramble, ids=ids)
+    def test_fast_forward(self, scramble):
+        engine = self.engine(d=2, scramble=scramble)
+        ref_sample = engine.random(n=8)
+
+        engine = self.engine(d=2, scramble=scramble)
+
+        engine.fast_forward(4)
+        sample = engine.random(n=4)
+
+        assert_allclose(sample, ref_sample[4:], atol=1e-1)
+
+        # alternate fast forwarding with sampling
+        engine.reset()
+        even_draws = []
+        for i in range(8):
+            if i % 2 == 0:
+                even_draws.append(engine.random())
+            else:
+                engine.fast_forward(1)
+        assert_allclose(
+            ref_sample[[i for i in range(8) if i % 2 == 0]],
+            np.concatenate(even_draws),
+            atol=1e-5
+        )
+
+    @pytest.mark.parametrize("scramble", [True])
+    def test_distribution(self, scramble):
+        d = 50
+        engine = self.engine(d=d, scramble=scramble)
+        sample = engine.random(1024)
+        assert_allclose(
+            np.mean(sample, axis=0), np.repeat(0.5, d), atol=1e-2
+        )
+        assert_allclose(
+            np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=1e-2
+        )
+        assert_allclose(
+            np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=1e-2
+        )
+
+    def test_raises_optimizer(self):
+        message = r"'toto' is not a valid optimization method"
+        with pytest.raises(ValueError, match=message):
+            self.engine(d=1, scramble=False, optimization="toto")
+
+    @pytest.mark.parametrize(
+        "optimization,metric",
+        [
+            ("random-CD", qmc.discrepancy),
+            ("lloyd", lambda sample: -_l1_norm(sample))]
+    )
+    def test_optimizers(self, optimization, metric):
+        engine = self.engine(d=2, scramble=False)
+        sample_ref = engine.random(n=64)
+        metric_ref = metric(sample_ref)
+
+        optimal_ = self.engine(d=2, scramble=False, optimization=optimization)
+        sample_ = optimal_.random(n=64)
+        metric_ = metric(sample_)
+
+        assert metric_ < metric_ref
+
+
+class TestHalton(QMCEngineTests):
+    qmce = qmc.Halton
+    can_scramble = True
+    # theoretical values known from Van der Corput
+    unscramble_nd = np.array([[0, 0], [1 / 2, 1 / 3],
+                              [1 / 4, 2 / 3], [3 / 4, 1 / 9],
+                              [1 / 8, 4 / 9], [5 / 8, 7 / 9],
+                              [3 / 8, 2 / 9], [7 / 8, 5 / 9]])
+    # theoretical values unknown: convergence properties checked
+    scramble_nd = np.array([[0.50246036, 0.09937553],
+                            [0.00246036, 0.43270887],
+                            [0.75246036, 0.7660422],
+                            [0.25246036, 0.32159776],
+                            [0.62746036, 0.65493109],
+                            [0.12746036, 0.98826442],
+                            [0.87746036, 0.21048664],
+                            [0.37746036, 0.54381998]])
+
+    def test_workers(self):
+        ref_sample = self.reference(scramble=True)
+        engine = self.engine(d=2, scramble=True)
+        sample = engine.random(n=len(ref_sample), workers=8)
+
+        assert_allclose(sample, ref_sample, atol=1e-3)
+
+        # worker + integers
+        engine.reset()
+        ref_sample = engine.integers(10)
+        engine.reset()
+        sample = engine.integers(10, workers=8)
+        assert_equal(sample, ref_sample)
+
+
+class TestLHS(QMCEngineTests):
+    qmce = qmc.LatinHypercube
+    can_scramble = False
+
+    def test_continuing(self, *args):
+        pytest.skip("Not applicable: not a sequence.")
+
+    def test_fast_forward(self, *args):
+        pytest.skip("Not applicable: not a sequence.")
+
+    def test_sample(self, *args):
+        pytest.skip("Not applicable: the value of reference sample is"
+                    " implementation dependent.")
+
+    @pytest.mark.parametrize("strength", [1, 2])
+    @pytest.mark.parametrize("scramble", [False, True])
+    @pytest.mark.parametrize("optimization", [None, "random-CD"])
+    def test_sample_stratified(self, optimization, scramble, strength):
+        seed = np.random.default_rng(37511836202578819870665127532742111260)
+        p = 5
+        n = p**2
+        d = 6
+
+        engine = qmc.LatinHypercube(d=d, scramble=scramble,
+                                    strength=strength,
+                                    optimization=optimization,
+                                    seed=seed)
+        sample = engine.random(n=n)
+        assert sample.shape == (n, d)
+        assert engine.num_generated == n
+
+        # centering stratifies samples in the middle of equal segments:
+        # * inter-sample distance is constant in 1D sub-projections
+        # * after ordering, columns are equal
+        expected1d = (np.arange(n) + 0.5) / n
+        expected = np.broadcast_to(expected1d, (d, n)).T
+        assert np.any(sample != expected)
+
+        sorted_sample = np.sort(sample, axis=0)
+        tol = 0.5 / n if scramble else 0
+
+        assert_allclose(sorted_sample, expected, atol=tol)
+        assert np.any(sample - expected > tol)
+
+        if strength == 2 and optimization is None:
+            unique_elements = np.arange(p)
+            desired = set(product(unique_elements, unique_elements))
+
+            for i, j in combinations(range(engine.d), 2):
+                samples_2d = sample[:, [i, j]]
+                res = (samples_2d * p).astype(int)
+                res_set = set((tuple(row) for row in res))
+                assert_equal(res_set, desired)
+
+    def test_raises(self):
+        message = r"not a valid strength"
+        with pytest.raises(ValueError, match=message):
+            qmc.LatinHypercube(1, strength=3)
+
+        message = r"n is not the square of a prime number"
+        with pytest.raises(ValueError, match=message):
+            engine = qmc.LatinHypercube(d=2, strength=2)
+            engine.random(16)
+
+        message = r"n is not the square of a prime number"
+        with pytest.raises(ValueError, match=message):
+            engine = qmc.LatinHypercube(d=2, strength=2)
+            engine.random(5)  # because int(sqrt(5)) would result in 2
+
+        message = r"n is too small for d"
+        with pytest.raises(ValueError, match=message):
+            engine = qmc.LatinHypercube(d=5, strength=2)
+            engine.random(9)
+
+        message = r"'centered' is deprecated"
+        with pytest.warns(UserWarning, match=message):
+            qmc.LatinHypercube(1, centered=True)
+
+
+class TestSobol(QMCEngineTests):
+    qmce = qmc.Sobol
+    can_scramble = True
+    # theoretical values from Joe Kuo2010
+    unscramble_nd = np.array([[0., 0.],
+                              [0.5, 0.5],
+                              [0.75, 0.25],
+                              [0.25, 0.75],
+                              [0.375, 0.375],
+                              [0.875, 0.875],
+                              [0.625, 0.125],
+                              [0.125, 0.625]])
+
+    # theoretical values unknown: convergence properties checked
+    scramble_nd = np.array([[0.25331921, 0.41371179],
+                            [0.8654213, 0.9821167],
+                            [0.70097554, 0.03664616],
+                            [0.18027647, 0.60895735],
+                            [0.10521339, 0.21897069],
+                            [0.53019685, 0.66619033],
+                            [0.91122276, 0.34580743],
+                            [0.45337471, 0.78912079]])
+
+    def test_warning(self):
+        with pytest.warns(UserWarning, match=r"The balance properties of "
+                                             r"Sobol' points"):
+            engine = qmc.Sobol(1)
+            engine.random(10)
+
+    def test_random_base2(self):
+        engine = qmc.Sobol(2, scramble=False)
+        sample = engine.random_base2(2)
+        assert_array_equal(self.unscramble_nd[:4], sample)
+
+        # resampling still having N=2**n
+        sample = engine.random_base2(2)
+        assert_array_equal(self.unscramble_nd[4:8], sample)
+
+        # resampling again but leading to N!=2**n
+        with pytest.raises(ValueError, match=r"The balance properties of "
+                                             r"Sobol' points"):
+            engine.random_base2(2)
+
+    def test_raise(self):
+        with pytest.raises(ValueError, match=r"Maximum supported "
+                                             r"dimensionality"):
+            qmc.Sobol(qmc.Sobol.MAXDIM + 1)
+
+        with pytest.raises(ValueError, match=r"Maximum supported "
+                                             r"'bits' is 64"):
+            qmc.Sobol(1, bits=65)
+
+    def test_high_dim(self):
+        engine = qmc.Sobol(1111, scramble=False)
+        count1 = Counter(engine.random().flatten().tolist())
+        count2 = Counter(engine.random().flatten().tolist())
+        assert_equal(count1, Counter({0.0: 1111}))
+        assert_equal(count2, Counter({0.5: 1111}))
+
+    @pytest.mark.parametrize("bits", [2, 3])
+    def test_bits(self, bits):
+        engine = qmc.Sobol(2, scramble=False, bits=bits)
+        ns = 2**bits
+        sample = engine.random(ns)
+        assert_array_equal(self.unscramble_nd[:ns], sample)
+
+        with pytest.raises(ValueError, match="increasing `bits`"):
+            engine.random()
+
+    def test_64bits(self):
+        engine = qmc.Sobol(2, scramble=False, bits=64)
+        sample = engine.random(8)
+        assert_array_equal(self.unscramble_nd, sample)
+
+
+class TestPoisson(QMCEngineTests):
+    qmce = qmc.PoissonDisk
+    can_scramble = False
+
+    def test_bounds(self, *args):
+        pytest.skip("Too costly in memory.")
+
+    def test_fast_forward(self, *args):
+        pytest.skip("Not applicable: recursive process.")
+
+    def test_sample(self, *args):
+        pytest.skip("Not applicable: the value of reference sample is"
+                    " implementation dependent.")
+
+    def test_continuing(self, *args):
+        # can continue a sampling, but will not preserve the same order
+        # because candidates are lost, so we will not select the same center
+        radius = 0.05
+        ns = 6
+        engine = self.engine(d=2, radius=radius, scramble=False)
+
+        sample_init = engine.random(n=ns)
+        assert len(sample_init) <= ns
+        assert l2_norm(sample_init) >= radius
+
+        sample_continued = engine.random(n=ns)
+        assert len(sample_continued) <= ns
+        assert l2_norm(sample_continued) >= radius
+
+        sample = np.concatenate([sample_init, sample_continued], axis=0)
+        assert len(sample) <= ns * 2
+        assert l2_norm(sample) >= radius
+
+    def test_mindist(self):
+        rng = np.random.default_rng(132074951149370773672162394161442690287)
+        ns = 50
+
+        low, high = 0.08, 0.2
+        radii = (high - low) * rng.random(5) + low
+
+        dimensions = [1, 3, 4]
+        hypersphere_methods = ["volume", "surface"]
+
+        gen = product(dimensions, radii, hypersphere_methods)
+
+        for d, radius, hypersphere in gen:
+            engine = self.qmce(
+                d=d, radius=radius, hypersphere=hypersphere, seed=rng
+            )
+            sample = engine.random(ns)
+
+            assert len(sample) <= ns
+            assert l2_norm(sample) >= radius
+
+    def test_fill_space(self):
+        radius = 0.2
+        engine = self.qmce(d=2, radius=radius)
+
+        sample = engine.fill_space()
+        # circle packing problem is np complex
+        assert l2_norm(sample) >= radius
+
+    def test_raises(self):
+        message = r"'toto' is not a valid hypersphere sampling"
+        with pytest.raises(ValueError, match=message):
+            qmc.PoissonDisk(1, hypersphere="toto")
+
+
+class TestMultinomialQMC:
+    def test_validations(self):
+        # negative Ps
+        p = np.array([0.12, 0.26, -0.05, 0.35, 0.22])
+        with pytest.raises(ValueError, match=r"Elements of pvals must "
+                                             r"be non-negative."):
+            qmc.MultinomialQMC(p, n_trials=10)
+
+        # sum of P too large
+        p = np.array([0.12, 0.26, 0.1, 0.35, 0.22])
+        message = r"Elements of pvals must sum to 1."
+        with pytest.raises(ValueError, match=message):
+            qmc.MultinomialQMC(p, n_trials=10)
+
+        p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
+
+        message = r"Dimension of `engine` must be 1."
+        with pytest.raises(ValueError, match=message):
+            qmc.MultinomialQMC(p, n_trials=10, engine=qmc.Sobol(d=2))
+
+        message = r"`engine` must be an instance of..."
+        with pytest.raises(ValueError, match=message):
+            qmc.MultinomialQMC(p, n_trials=10, engine=np.random.default_rng())
+
+    @pytest.mark.filterwarnings('ignore::UserWarning')
+    def test_MultinomialBasicDraw(self):
+        seed = np.random.default_rng(6955663962957011631562466584467607969)
+        p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
+        expected = np.array([[13, 24, 6, 35, 22]])
+        engine = qmc.MultinomialQMC(p, n_trials=100, seed=seed)
+        assert_array_equal(engine.random(1), expected)
+
+    def test_MultinomialDistribution(self):
+        seed = np.random.default_rng(77797854505813727292048130876699859000)
+        p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
+        engine = qmc.MultinomialQMC(p, n_trials=8192, seed=seed)
+        draws = engine.random(1)
+        assert_allclose(draws / np.sum(draws), np.atleast_2d(p), atol=1e-4)
+
+    def test_FindIndex(self):
+        p_cumulative = np.array([0.1, 0.4, 0.45, 0.6, 0.75, 0.9, 0.99, 1.0])
+        size = len(p_cumulative)
+        assert_equal(_test_find_index(p_cumulative, size, 0.0), 0)
+        assert_equal(_test_find_index(p_cumulative, size, 0.4), 2)
+        assert_equal(_test_find_index(p_cumulative, size, 0.44999), 2)
+        assert_equal(_test_find_index(p_cumulative, size, 0.45001), 3)
+        assert_equal(_test_find_index(p_cumulative, size, 1.0), size - 1)
+
+    @pytest.mark.filterwarnings('ignore::UserWarning')
+    def test_other_engine(self):
+        # same as test_MultinomialBasicDraw with different engine
+        seed = np.random.default_rng(283753519042773243071753037669078065412)
+        p = np.array([0.12, 0.26, 0.05, 0.35, 0.22])
+        expected = np.array([[12, 25, 5, 36, 22]])
+        base_engine = qmc.Sobol(1, scramble=True, seed=seed)
+        engine = qmc.MultinomialQMC(p, n_trials=100, engine=base_engine,
+                                    seed=seed)
+        assert_array_equal(engine.random(1), expected)
+
+
+class TestNormalQMC:
+    def test_NormalQMC(self):
+        # d = 1
+        engine = qmc.MultivariateNormalQMC(mean=np.zeros(1))
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 1))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 1))
+        # d = 2
+        engine = qmc.MultivariateNormalQMC(mean=np.zeros(2))
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 2))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 2))
+
+    def test_NormalQMCInvTransform(self):
+        # d = 1
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(1), inv_transform=True)
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 1))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 1))
+        # d = 2
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(2), inv_transform=True)
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 2))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 2))
+
+    def test_NormalQMCSeeded(self):
+        # test even dimension
+        seed = np.random.default_rng(274600237797326520096085022671371676017)
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(2), inv_transform=False, seed=seed)
+        samples = engine.random(n=2)
+        samples_expected = np.array([[0.446961, -1.243236],
+                                     [-0.230754, 0.21354]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+        # test odd dimension
+        seed = np.random.default_rng(274600237797326520096085022671371676017)
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(3), inv_transform=False, seed=seed)
+        samples = engine.random(n=2)
+        samples_expected = np.array([[0.446961, -1.243236, 0.324827],
+                                     [-0.997875, 0.399134, 1.032234]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+        # same test with another engine
+        seed = np.random.default_rng(274600237797326520096085022671371676017)
+        base_engine = qmc.Sobol(4, scramble=True, seed=seed)
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(3), inv_transform=False,
+            engine=base_engine, seed=seed
+        )
+        samples = engine.random(n=2)
+        samples_expected = np.array([[0.446961, -1.243236, 0.324827],
+                                     [-0.997875, 0.399134, 1.032234]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+    def test_NormalQMCSeededInvTransform(self):
+        # test even dimension
+        seed = np.random.default_rng(288527772707286126646493545351112463929)
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(2), seed=seed, inv_transform=True)
+        samples = engine.random(n=2)
+        samples_expected = np.array([[-0.804472, 0.384649],
+                                     [0.396424, -0.117676]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+        # test odd dimension
+        seed = np.random.default_rng(288527772707286126646493545351112463929)
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(3), seed=seed, inv_transform=True)
+        samples = engine.random(n=2)
+        samples_expected = np.array([[-0.804472, 0.384649, 1.583568],
+                                     [0.165333, -2.266828, -1.655572]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+    def test_other_engine(self):
+        for d in (0, 1, 2):
+            base_engine = qmc.Sobol(d=d, scramble=False)
+            engine = qmc.MultivariateNormalQMC(mean=np.zeros(d),
+                                               engine=base_engine,
+                                               inv_transform=True)
+            samples = engine.random()
+            assert_equal(samples.shape, (1, d))
+
+    def test_NormalQMCShapiro(self):
+        rng = np.random.default_rng(13242)
+        engine = qmc.MultivariateNormalQMC(mean=np.zeros(2), seed=rng)
+        samples = engine.random(n=256)
+        assert all(np.abs(samples.mean(axis=0)) < 1e-2)
+        assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
+        # perform Shapiro-Wilk test for normality
+        for i in (0, 1):
+            _, pval = shapiro(samples[:, i])
+            assert pval > 0.9
+        # make sure samples are uncorrelated
+        cov = np.cov(samples.transpose())
+        assert np.abs(cov[0, 1]) < 1e-2
+
+    def test_NormalQMCShapiroInvTransform(self):
+        rng = np.random.default_rng(3234455)
+        engine = qmc.MultivariateNormalQMC(
+            mean=np.zeros(2), inv_transform=True, seed=rng)
+        samples = engine.random(n=256)
+        assert all(np.abs(samples.mean(axis=0)) < 1e-2)
+        assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
+        # perform Shapiro-Wilk test for normality
+        for i in (0, 1):
+            _, pval = shapiro(samples[:, i])
+            assert pval > 0.9
+        # make sure samples are uncorrelated
+        cov = np.cov(samples.transpose())
+        assert np.abs(cov[0, 1]) < 1e-2
+
+
+class TestMultivariateNormalQMC:
+
+    def test_validations(self):
+
+        message = r"Dimension of `engine` must be consistent"
+        with pytest.raises(ValueError, match=message):
+            qmc.MultivariateNormalQMC([0], engine=qmc.Sobol(d=2))
+
+        message = r"Dimension of `engine` must be consistent"
+        with pytest.raises(ValueError, match=message):
+            qmc.MultivariateNormalQMC([0, 0, 0], engine=qmc.Sobol(d=4))
+
+        message = r"`engine` must be an instance of..."
+        with pytest.raises(ValueError, match=message):
+            qmc.MultivariateNormalQMC([0, 0], engine=np.random.default_rng())
+
+        message = r"Covariance matrix not PSD."
+        with pytest.raises(ValueError, match=message):
+            qmc.MultivariateNormalQMC([0, 0], [[1, 2], [2, 1]])
+
+        message = r"Covariance matrix is not symmetric."
+        with pytest.raises(ValueError, match=message):
+            qmc.MultivariateNormalQMC([0, 0], [[1, 0], [2, 1]])
+
+        message = r"Dimension mismatch between mean and covariance."
+        with pytest.raises(ValueError, match=message):
+            qmc.MultivariateNormalQMC([0], [[1, 0], [0, 1]])
+
+    def test_MultivariateNormalQMCNonPD(self):
+        # try with non-pd but psd cov; should work
+        engine = qmc.MultivariateNormalQMC(
+            [0, 0, 0], [[1, 0, 1], [0, 1, 1], [1, 1, 2]],
+        )
+        assert engine._corr_matrix is not None
+
+    def test_MultivariateNormalQMC(self):
+        # d = 1 scalar
+        engine = qmc.MultivariateNormalQMC(mean=0, cov=5)
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 1))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 1))
+
+        # d = 2 list
+        engine = qmc.MultivariateNormalQMC(mean=[0, 1], cov=[[1, 0], [0, 1]])
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 2))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 2))
+
+        # d = 3 np.array
+        mean = np.array([0, 1, 2])
+        cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+        engine = qmc.MultivariateNormalQMC(mean, cov)
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 3))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 3))
+
+    def test_MultivariateNormalQMCInvTransform(self):
+        # d = 1 scalar
+        engine = qmc.MultivariateNormalQMC(mean=0, cov=5, inv_transform=True)
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 1))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 1))
+
+        # d = 2 list
+        engine = qmc.MultivariateNormalQMC(
+            mean=[0, 1], cov=[[1, 0], [0, 1]], inv_transform=True,
+        )
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 2))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 2))
+
+        # d = 3 np.array
+        mean = np.array([0, 1, 2])
+        cov = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+        engine = qmc.MultivariateNormalQMC(mean, cov, inv_transform=True)
+        samples = engine.random()
+        assert_equal(samples.shape, (1, 3))
+        samples = engine.random(n=5)
+        assert_equal(samples.shape, (5, 3))
+
+    def test_MultivariateNormalQMCSeeded(self):
+        # test even dimension
+        rng = np.random.default_rng(180182791534511062935571481899241825000)
+        a = rng.standard_normal((2, 2))
+        A = a @ a.transpose() + np.diag(rng.random(2))
+        engine = qmc.MultivariateNormalQMC(np.array([0, 0]), A,
+                                           inv_transform=False, seed=rng)
+        samples = engine.random(n=2)
+        samples_expected = np.array([[0.479575, 0.934723],
+                                     [1.712571, 0.172699]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+        # test odd dimension
+        rng = np.random.default_rng(180182791534511062935571481899241825000)
+        a = rng.standard_normal((3, 3))
+        A = a @ a.transpose() + np.diag(rng.random(3))
+        engine = qmc.MultivariateNormalQMC(np.array([0, 0, 0]), A,
+                                           inv_transform=False, seed=rng)
+        samples = engine.random(n=2)
+        samples_expected = np.array([[2.463393, 2.252826, -0.886809],
+                                     [1.252468, 0.029449, -1.126328]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+    def test_MultivariateNormalQMCSeededInvTransform(self):
+        # test even dimension
+        rng = np.random.default_rng(224125808928297329711992996940871155974)
+        a = rng.standard_normal((2, 2))
+        A = a @ a.transpose() + np.diag(rng.random(2))
+        engine = qmc.MultivariateNormalQMC(
+            np.array([0, 0]), A, seed=rng, inv_transform=True
+        )
+        samples = engine.random(n=2)
+        samples_expected = np.array([[-3.095968, -0.566545],
+                                     [0.603154, 0.222434]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+        # test odd dimension
+        rng = np.random.default_rng(224125808928297329711992996940871155974)
+        a = rng.standard_normal((3, 3))
+        A = a @ a.transpose() + np.diag(rng.random(3))
+        engine = qmc.MultivariateNormalQMC(
+            np.array([0, 0, 0]), A, seed=rng, inv_transform=True
+        )
+        samples = engine.random(n=2)
+        samples_expected = np.array([[1.427248, -0.338187, -1.560687],
+                                     [-0.357026, 1.662937, -0.29769]])
+        assert_allclose(samples, samples_expected, atol=1e-4)
+
+    def test_MultivariateNormalQMCShapiro(self):
+        # test the standard case
+        seed = np.random.default_rng(188960007281846377164494575845971645056)
+        engine = qmc.MultivariateNormalQMC(
+            mean=[0, 0], cov=[[1, 0], [0, 1]], seed=seed
+        )
+        samples = engine.random(n=256)
+        assert all(np.abs(samples.mean(axis=0)) < 1e-2)
+        assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
+        # perform Shapiro-Wilk test for normality
+        for i in (0, 1):
+            _, pval = shapiro(samples[:, i])
+            assert pval > 0.9
+        # make sure samples are uncorrelated
+        cov = np.cov(samples.transpose())
+        assert np.abs(cov[0, 1]) < 1e-2
+
+        # test the correlated, non-zero mean case
+        engine = qmc.MultivariateNormalQMC(
+            mean=[1.0, 2.0], cov=[[1.5, 0.5], [0.5, 1.5]], seed=seed
+        )
+        samples = engine.random(n=256)
+        assert all(np.abs(samples.mean(axis=0) - [1, 2]) < 1e-2)
+        assert all(np.abs(samples.std(axis=0) - np.sqrt(1.5)) < 1e-2)
+        # perform Shapiro-Wilk test for normality
+        for i in (0, 1):
+            _, pval = shapiro(samples[:, i])
+            assert pval > 0.9
+        # check covariance
+        cov = np.cov(samples.transpose())
+        assert np.abs(cov[0, 1] - 0.5) < 1e-2
+
+    def test_MultivariateNormalQMCShapiroInvTransform(self):
+        # test the standard case
+        seed = np.random.default_rng(200089821034563288698994840831440331329)
+        engine = qmc.MultivariateNormalQMC(
+            mean=[0, 0], cov=[[1, 0], [0, 1]], seed=seed, inv_transform=True
+        )
+        samples = engine.random(n=256)
+        assert all(np.abs(samples.mean(axis=0)) < 1e-2)
+        assert all(np.abs(samples.std(axis=0) - 1) < 1e-2)
+        # perform Shapiro-Wilk test for normality
+        for i in (0, 1):
+            _, pval = shapiro(samples[:, i])
+            assert pval > 0.9
+        # make sure samples are uncorrelated
+        cov = np.cov(samples.transpose())
+        assert np.abs(cov[0, 1]) < 1e-2
+
+        # test the correlated, non-zero mean case
+        engine = qmc.MultivariateNormalQMC(
+            mean=[1.0, 2.0],
+            cov=[[1.5, 0.5], [0.5, 1.5]],
+            seed=seed,
+            inv_transform=True,
+        )
+        samples = engine.random(n=256)
+        assert all(np.abs(samples.mean(axis=0) - [1, 2]) < 1e-2)
+        assert all(np.abs(samples.std(axis=0) - np.sqrt(1.5)) < 1e-2)
+        # perform Shapiro-Wilk test for normality
+        for i in (0, 1):
+            _, pval = shapiro(samples[:, i])
+            assert pval > 0.9
+        # check covariance
+        cov = np.cov(samples.transpose())
+        assert np.abs(cov[0, 1] - 0.5) < 1e-2
+
+    def test_MultivariateNormalQMCDegenerate(self):
+        # X, Y iid standard Normal and Z = X + Y, random vector (X, Y, Z)
+        seed = np.random.default_rng(163206374175814483578698216542904486209)
+        engine = qmc.MultivariateNormalQMC(
+            mean=[0.0, 0.0, 0.0],
+            cov=[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 2.0]],
+            seed=seed,
+        )
+        samples = engine.random(n=512)
+        assert all(np.abs(samples.mean(axis=0)) < 1e-2)
+        assert np.abs(np.std(samples[:, 0]) - 1) < 1e-2
+        assert np.abs(np.std(samples[:, 1]) - 1) < 1e-2
+        assert np.abs(np.std(samples[:, 2]) - np.sqrt(2)) < 1e-2
+        for i in (0, 1, 2):
+            _, pval = shapiro(samples[:, i])
+            assert pval > 0.8
+        cov = np.cov(samples.transpose())
+        assert np.abs(cov[0, 1]) < 1e-2
+        assert np.abs(cov[0, 2] - 1) < 1e-2
+        # check to see if X + Y = Z almost exactly
+        assert all(np.abs(samples[:, 0] + samples[:, 1] - samples[:, 2])
+                   < 1e-5)
+
+
+class TestLloyd:
+    def test_lloyd(self):
+        # quite sensible seed as it can go up before going further down
+        rng = np.random.RandomState(1809831)
+        sample = rng.uniform(0, 1, size=(128, 2))
+        base_l1 = _l1_norm(sample)
+        base_l2 = l2_norm(sample)
+
+        for _ in range(4):
+            sample_lloyd = _lloyd_centroidal_voronoi_tessellation(
+                    sample, maxiter=1,
+            )
+            curr_l1 = _l1_norm(sample_lloyd)
+            curr_l2 = l2_norm(sample_lloyd)
+
+            # higher is better for the distance measures
+            assert base_l1 < curr_l1
+            assert base_l2 < curr_l2
+
+            base_l1 = curr_l1
+            base_l2 = curr_l2
+
+            sample = sample_lloyd
+
+    def test_lloyd_non_mutating(self):
+        """
+        Verify that the input samples are not mutated in place and that they do
+        not share memory with the output.
+        """
+        sample_orig = np.array([[0.1, 0.1],
+                                [0.1, 0.2],
+                                [0.2, 0.1],
+                                [0.2, 0.2]])
+        sample_copy = sample_orig.copy()
+        new_sample = _lloyd_centroidal_voronoi_tessellation(
+            sample=sample_orig
+        )
+        assert_allclose(sample_orig, sample_copy)
+        assert not np.may_share_memory(sample_orig, new_sample)
+
+    def test_lloyd_errors(self):
+        with pytest.raises(ValueError, match=r"`sample` is not a 2D array"):
+            sample = [0, 1, 0.5]
+            _lloyd_centroidal_voronoi_tessellation(sample)
+
+        msg = r"`sample` dimension is not >= 2"
+        with pytest.raises(ValueError, match=msg):
+            sample = [[0], [0.4], [1]]
+            _lloyd_centroidal_voronoi_tessellation(sample)
+
+        msg = r"`sample` is not in unit hypercube"
+        with pytest.raises(ValueError, match=msg):
+            sample = [[-1.1, 0], [0.1, 0.4], [1, 2]]
+            _lloyd_centroidal_voronoi_tessellation(sample)
+
+
+# mindist
+def l2_norm(sample):
+    return distance.pdist(sample).min()
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_rank.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_rank.py
new file mode 100644
index 00000000..a30c390a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_rank.py
@@ -0,0 +1,320 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_array_equal
+
+from scipy.stats import rankdata, tiecorrect
+import pytest
+
+
+class TestTieCorrect:
+
+    def test_empty(self):
+        """An empty array requires no correction, should return 1.0."""
+        ranks = np.array([], dtype=np.float64)
+        c = tiecorrect(ranks)
+        assert_equal(c, 1.0)
+
+    def test_one(self):
+        """A single element requires no correction, should return 1.0."""
+        ranks = np.array([1.0], dtype=np.float64)
+        c = tiecorrect(ranks)
+        assert_equal(c, 1.0)
+
+    def test_no_correction(self):
+        """Arrays with no ties require no correction."""
+        ranks = np.arange(2.0)
+        c = tiecorrect(ranks)
+        assert_equal(c, 1.0)
+        ranks = np.arange(3.0)
+        c = tiecorrect(ranks)
+        assert_equal(c, 1.0)
+
+    def test_basic(self):
+        """Check a few basic examples of the tie correction factor."""
+        # One tie of two elements
+        ranks = np.array([1.0, 2.5, 2.5])
+        c = tiecorrect(ranks)
+        T = 2.0
+        N = ranks.size
+        expected = 1.0 - (T**3 - T) / (N**3 - N)
+        assert_equal(c, expected)
+
+        # One tie of two elements (same as above, but tie is not at the end)
+        ranks = np.array([1.5, 1.5, 3.0])
+        c = tiecorrect(ranks)
+        T = 2.0
+        N = ranks.size
+        expected = 1.0 - (T**3 - T) / (N**3 - N)
+        assert_equal(c, expected)
+
+        # One tie of three elements
+        ranks = np.array([1.0, 3.0, 3.0, 3.0])
+        c = tiecorrect(ranks)
+        T = 3.0
+        N = ranks.size
+        expected = 1.0 - (T**3 - T) / (N**3 - N)
+        assert_equal(c, expected)
+
+        # Two ties, lengths 2 and 3.
+        ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
+        c = tiecorrect(ranks)
+        T1 = 2.0
+        T2 = 3.0
+        N = ranks.size
+        expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
+        assert_equal(c, expected)
+
+    def test_overflow(self):
+        ntie, k = 2000, 5
+        a = np.repeat(np.arange(k), ntie)
+        n = a.size  # ntie * k
+        out = tiecorrect(rankdata(a))
+        assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
+
+
+class TestRankData:
+
+    def test_empty(self):
+        """stats.rankdata([]) should return an empty array."""
+        a = np.array([], dtype=int)
+        r = rankdata(a)
+        assert_array_equal(r, np.array([], dtype=np.float64))
+        r = rankdata([])
+        assert_array_equal(r, np.array([], dtype=np.float64))
+
+    def test_one(self):
+        """Check stats.rankdata with an array of length 1."""
+        data = [100]
+        a = np.array(data, dtype=int)
+        r = rankdata(a)
+        assert_array_equal(r, np.array([1.0], dtype=np.float64))
+        r = rankdata(data)
+        assert_array_equal(r, np.array([1.0], dtype=np.float64))
+
+    def test_basic(self):
+        """Basic tests of stats.rankdata."""
+        data = [100, 10, 50]
+        expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
+        a = np.array(data, dtype=int)
+        r = rankdata(a)
+        assert_array_equal(r, expected)
+        r = rankdata(data)
+        assert_array_equal(r, expected)
+
+        data = [40, 10, 30, 10, 50]
+        expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
+        a = np.array(data, dtype=int)
+        r = rankdata(a)
+        assert_array_equal(r, expected)
+        r = rankdata(data)
+        assert_array_equal(r, expected)
+
+        data = [20, 20, 20, 10, 10, 10]
+        expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
+        a = np.array(data, dtype=int)
+        r = rankdata(a)
+        assert_array_equal(r, expected)
+        r = rankdata(data)
+        assert_array_equal(r, expected)
+        # The docstring states explicitly that the argument is flattened.
+        a2d = a.reshape(2, 3)
+        r = rankdata(a2d)
+        assert_array_equal(r, expected)
+
+    def test_rankdata_object_string(self):
+        min_rank = lambda a: [1 + sum(i < j for i in a) for j in a]
+        max_rank = lambda a: [sum(i <= j for i in a) for j in a]
+        ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)])
+
+        def average_rank(a):
+            return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
+
+        def dense_rank(a):
+            b = np.unique(a)
+            return [1 + sum(i < j for i in b) for j in a]
+
+        rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
+                     average=average_rank, dense=dense_rank)
+
+        def check_ranks(a):
+            for method in 'min', 'max', 'dense', 'ordinal', 'average':
+                out = rankdata(a, method=method)
+                assert_array_equal(out, rankf[method](a))
+
+        val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
+        check_ranks(np.random.choice(val, 200))
+        check_ranks(np.random.choice(val, 200).astype('object'))
+
+        val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
+        check_ranks(np.random.choice(val, 200).astype('object'))
+
+    def test_large_int(self):
+        data = np.array([2**60, 2**60+1], dtype=np.uint64)
+        r = rankdata(data)
+        assert_array_equal(r, [1.0, 2.0])
+
+        data = np.array([2**60, 2**60+1], dtype=np.int64)
+        r = rankdata(data)
+        assert_array_equal(r, [1.0, 2.0])
+
+        data = np.array([2**60, -2**60+1], dtype=np.int64)
+        r = rankdata(data)
+        assert_array_equal(r, [2.0, 1.0])
+
+    def test_big_tie(self):
+        for n in [10000, 100000, 1000000]:
+            data = np.ones(n, dtype=int)
+            r = rankdata(data)
+            expected_rank = 0.5 * (n + 1)
+            assert_array_equal(r, expected_rank * data,
+                               "test failed with n=%d" % n)
+
+    def test_axis(self):
+        data = [[0, 2, 1],
+                [4, 2, 2]]
+        expected0 = [[1., 1.5, 1.],
+                     [2., 1.5, 2.]]
+        r0 = rankdata(data, axis=0)
+        assert_array_equal(r0, expected0)
+        expected1 = [[1., 3., 2.],
+                     [3., 1.5, 1.5]]
+        r1 = rankdata(data, axis=1)
+        assert_array_equal(r1, expected1)
+
+    methods = ["average", "min", "max", "dense", "ordinal"]
+    dtypes = [np.float64] + [np.int_]*4
+
+    @pytest.mark.parametrize("axis", [0, 1])
+    @pytest.mark.parametrize("method, dtype", zip(methods, dtypes))
+    def test_size_0_axis(self, axis, method, dtype):
+        shape = (3, 0)
+        data = np.zeros(shape)
+        r = rankdata(data, method=method, axis=axis)
+        assert_equal(r.shape, shape)
+        assert_equal(r.dtype, dtype)
+
+    @pytest.mark.parametrize('axis', range(3))
+    @pytest.mark.parametrize('method', methods)
+    def test_nan_policy_omit_3d(self, axis, method):
+        shape = (20, 21, 22)
+        rng = np.random.default_rng(abs(hash('falafel')))
+
+        a = rng.random(size=shape)
+        i = rng.random(size=shape) < 0.4
+        j = rng.random(size=shape) < 0.1
+        k = rng.random(size=shape) < 0.1
+        a[i] = np.nan
+        a[j] = -np.inf
+        a[k] - np.inf
+
+        def rank_1d_omit(a, method):
+            out = np.zeros_like(a)
+            i = np.isnan(a)
+            a_compressed = a[~i]
+            res = rankdata(a_compressed, method)
+            out[~i] = res
+            out[i] = np.nan
+            return out
+
+        def rank_omit(a, method, axis):
+            return np.apply_along_axis(lambda a: rank_1d_omit(a, method),
+                                       axis, a)
+
+        res = rankdata(a, method, axis=axis, nan_policy='omit')
+        res0 = rank_omit(a, method, axis=axis)
+
+        assert_array_equal(res, res0)
+
+    def test_nan_policy_2d_axis_none(self):
+        # 2 2d-array test with axis=None
+        data = [[0, np.nan, 3],
+                [4, 2, np.nan],
+                [1, 2, 2]]
+        assert_array_equal(rankdata(data, axis=None, nan_policy='omit'),
+                           [1., np.nan, 6., 7., 4., np.nan, 2., 4., 4.])
+        assert_array_equal(rankdata(data, axis=None, nan_policy='propagate'),
+                           [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
+                            np.nan, np.nan, np.nan])
+
+    def test_nan_policy_raise(self):
+        # 1 1d-array test
+        data = [0, 2, 3, -2, np.nan, np.nan]
+        with pytest.raises(ValueError, match="The input contains nan"):
+            rankdata(data, nan_policy='raise')
+
+        # 2 2d-array test
+        data = [[0, np.nan, 3],
+                [4, 2, np.nan],
+                [np.nan, 2, 2]]
+
+        with pytest.raises(ValueError, match="The input contains nan"):
+            rankdata(data, axis=0, nan_policy="raise")
+
+        with pytest.raises(ValueError, match="The input contains nan"):
+            rankdata(data, axis=1, nan_policy="raise")
+
+    def test_nan_policy_propagate(self):
+        # 1 1d-array test
+        data = [0, 2, 3, -2, np.nan, np.nan]
+        assert_array_equal(rankdata(data, nan_policy='propagate'),
+                           [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])
+
+        # 2 2d-array test
+        data = [[0, np.nan, 3],
+                [4, 2, np.nan],
+                [1, 2, 2]]
+        assert_array_equal(rankdata(data, axis=0, nan_policy='propagate'),
+                           [[1, np.nan, np.nan],
+                            [3, np.nan, np.nan],
+                            [2, np.nan, np.nan]])
+        assert_array_equal(rankdata(data, axis=1, nan_policy='propagate'),
+                           [[np.nan, np.nan, np.nan],
+                            [np.nan, np.nan, np.nan],
+                            [1, 2.5, 2.5]])
+
+
+_cases = (
+    # values, method, expected
+    ([], 'average', []),
+    ([], 'min', []),
+    ([], 'max', []),
+    ([], 'dense', []),
+    ([], 'ordinal', []),
+    #
+    ([100], 'average', [1.0]),
+    ([100], 'min', [1.0]),
+    ([100], 'max', [1.0]),
+    ([100], 'dense', [1.0]),
+    ([100], 'ordinal', [1.0]),
+    #
+    ([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
+    ([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
+    ([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
+    ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
+    ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
+    #
+    ([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
+    ([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
+    ([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
+    ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
+    ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
+    #
+    ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
+    ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
+    ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
+    ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
+    ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
+    #
+    ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
+    ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
+    ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
+    ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
+    ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
+    #
+    ([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
+)
+
+
+def test_cases():
+    for values, method, expected in _cases:
+        r = rankdata(values, method=method)
+        assert_array_equal(r, expected)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_relative_risk.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_relative_risk.py
new file mode 100644
index 00000000..28ee790a
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_relative_risk.py
@@ -0,0 +1,96 @@
+
+import pytest
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal
+from scipy.stats.contingency import relative_risk
+
+
+# Test just the calculation of the relative risk, including edge
+# cases that result in a relative risk of 0, inf or nan.
+@pytest.mark.parametrize(
+    'exposed_cases, exposed_total, control_cases, control_total, expected_rr',
+    [(1, 4, 3, 8, 0.25 / 0.375),
+     (0, 10, 5, 20, 0),
+     (0, 10, 0, 20, np.nan),
+     (5, 15, 0, 20, np.inf)]
+)
+def test_relative_risk(exposed_cases, exposed_total,
+                       control_cases, control_total, expected_rr):
+    result = relative_risk(exposed_cases, exposed_total,
+                           control_cases, control_total)
+    assert_allclose(result.relative_risk, expected_rr, rtol=1e-13)
+
+
+def test_relative_risk_confidence_interval():
+    result = relative_risk(exposed_cases=16, exposed_total=128,
+                           control_cases=24, control_total=256)
+    rr = result.relative_risk
+    ci = result.confidence_interval(confidence_level=0.95)
+    # The corresponding calculation in R using the epitools package.
+    #
+    # > library(epitools)
+    # > c <- matrix(c(232, 112, 24, 16), nrow=2)
+    # > result <- riskratio(c)
+    # > result$measure
+    #               risk ratio with 95% C.I.
+    # Predictor  estimate     lower    upper
+    #   Exposed1 1.000000        NA       NA
+    #   Exposed2 1.333333 0.7347317 2.419628
+    #
+    # The last line is the result that we want.
+    assert_allclose(rr, 4/3)
+    assert_allclose((ci.low, ci.high), (0.7347317, 2.419628), rtol=5e-7)
+
+
+def test_relative_risk_ci_conflevel0():
+    result = relative_risk(exposed_cases=4, exposed_total=12,
+                           control_cases=5, control_total=30)
+    rr = result.relative_risk
+    assert_allclose(rr, 2.0, rtol=1e-14)
+    ci = result.confidence_interval(0)
+    assert_allclose((ci.low, ci.high), (2.0, 2.0), rtol=1e-12)
+
+
+def test_relative_risk_ci_conflevel1():
+    result = relative_risk(exposed_cases=4, exposed_total=12,
+                           control_cases=5, control_total=30)
+    ci = result.confidence_interval(1)
+    assert_equal((ci.low, ci.high), (0, np.inf))
+
+
+def test_relative_risk_ci_edge_cases_00():
+    result = relative_risk(exposed_cases=0, exposed_total=12,
+                           control_cases=0, control_total=30)
+    assert_equal(result.relative_risk, np.nan)
+    ci = result.confidence_interval()
+    assert_equal((ci.low, ci.high), (np.nan, np.nan))
+
+
+def test_relative_risk_ci_edge_cases_01():
+    result = relative_risk(exposed_cases=0, exposed_total=12,
+                           control_cases=1, control_total=30)
+    assert_equal(result.relative_risk, 0)
+    ci = result.confidence_interval()
+    assert_equal((ci.low, ci.high), (0.0, np.nan))
+
+
+def test_relative_risk_ci_edge_cases_10():
+    result = relative_risk(exposed_cases=1, exposed_total=12,
+                           control_cases=0, control_total=30)
+    assert_equal(result.relative_risk, np.inf)
+    ci = result.confidence_interval()
+    assert_equal((ci.low, ci.high), (np.nan, np.inf))
+
+
+@pytest.mark.parametrize('ec, et, cc, ct', [(0, 0, 10, 20),
+                                            (-1, 10, 1, 5),
+                                            (1, 10, 0, 0),
+                                            (1, 10, -1, 4)])
+def test_relative_risk_bad_value(ec, et, cc, ct):
+    with pytest.raises(ValueError, match="must be an integer not less than"):
+        relative_risk(ec, et, cc, ct)
+
+
+def test_relative_risk_bad_type():
+    with pytest.raises(TypeError, match="must be an integer"):
+        relative_risk(1, 10, 2.0, 40)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_resampling.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_resampling.py
new file mode 100644
index 00000000..46e4bab7
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_resampling.py
@@ -0,0 +1,1651 @@
+import numpy as np
+import pytest
+from scipy.stats import bootstrap, monte_carlo_test, permutation_test
+from numpy.testing import assert_allclose, assert_equal, suppress_warnings
+from scipy import stats
+from scipy import special
+from .. import _resampling as _resampling
+from scipy._lib._util import rng_integers
+from scipy.optimize import root
+
+
+def test_bootstrap_iv():
+
+    message = "`data` must be a sequence of samples."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(1, np.mean)
+
+    message = "`data` must contain at least one sample."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(tuple(), np.mean)
+
+    message = "each sample in `data` must contain two or more observations..."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3], [1]), np.mean)
+
+    message = ("When `paired is True`, all samples must have the same length ")
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3], [1, 2, 3, 4]), np.mean, paired=True)
+
+    message = "`vectorized` must be `True`, `False`, or `None`."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(1, np.mean, vectorized='ekki')
+
+    message = "`axis` must be an integer."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, axis=1.5)
+
+    message = "could not convert string to float"
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, confidence_level='ni')
+
+    message = "`n_resamples` must be a non-negative integer."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, n_resamples=-1000)
+
+    message = "`n_resamples` must be a non-negative integer."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, n_resamples=1000.5)
+
+    message = "`batch` must be a positive integer or None."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, batch=-1000)
+
+    message = "`batch` must be a positive integer or None."
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, batch=1000.5)
+
+    message = "`method` must be in"
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, method='ekki')
+
+    message = "`bootstrap_result` must have attribute `bootstrap_distribution'"
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, bootstrap_result=10)
+
+    message = "Either `bootstrap_result.bootstrap_distribution.size`"
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, n_resamples=0)
+
+    message = "'herring' cannot be used to seed a"
+    with pytest.raises(ValueError, match=message):
+        bootstrap(([1, 2, 3],), np.mean, random_state='herring')
+
+
+@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
+@pytest.mark.parametrize("axis", [0, 1, 2])
+def test_bootstrap_batch(method, axis):
+    # for one-sample statistics, batch size shouldn't affect the result
+    np.random.seed(0)
+
+    x = np.random.rand(10, 11, 12)
+    res1 = bootstrap((x,), np.mean, batch=None, method=method,
+                     random_state=0, axis=axis, n_resamples=100)
+    res2 = bootstrap((x,), np.mean, batch=10, method=method,
+                     random_state=0, axis=axis, n_resamples=100)
+
+    assert_equal(res2.confidence_interval.low, res1.confidence_interval.low)
+    assert_equal(res2.confidence_interval.high, res1.confidence_interval.high)
+    assert_equal(res2.standard_error, res1.standard_error)
+
+
+@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
+def test_bootstrap_paired(method):
+    # test that `paired` works as expected
+    np.random.seed(0)
+    n = 100
+    x = np.random.rand(n)
+    y = np.random.rand(n)
+
+    def my_statistic(x, y, axis=-1):
+        return ((x-y)**2).mean(axis=axis)
+
+    def my_paired_statistic(i, axis=-1):
+        a = x[i]
+        b = y[i]
+        res = my_statistic(a, b)
+        return res
+
+    i = np.arange(len(x))
+
+    res1 = bootstrap((i,), my_paired_statistic, random_state=0)
+    res2 = bootstrap((x, y), my_statistic, paired=True, random_state=0)
+
+    assert_allclose(res1.confidence_interval, res2.confidence_interval)
+    assert_allclose(res1.standard_error, res2.standard_error)
+
+
+@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
+@pytest.mark.parametrize("axis", [0, 1, 2])
+@pytest.mark.parametrize("paired", [True, False])
+def test_bootstrap_vectorized(method, axis, paired):
+    # test that paired is vectorized as expected: when samples are tiled,
+    # CI and standard_error of each axis-slice is the same as those of the
+    # original 1d sample
+
+    np.random.seed(0)
+
+    def my_statistic(x, y, z, axis=-1):
+        return x.mean(axis=axis) + y.mean(axis=axis) + z.mean(axis=axis)
+
+    shape = 10, 11, 12
+    n_samples = shape[axis]
+
+    x = np.random.rand(n_samples)
+    y = np.random.rand(n_samples)
+    z = np.random.rand(n_samples)
+    res1 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
+                     random_state=0, axis=0, n_resamples=100)
+    assert (res1.bootstrap_distribution.shape
+            == res1.standard_error.shape + (100,))
+
+    reshape = [1, 1, 1]
+    reshape[axis] = n_samples
+    x = np.broadcast_to(x.reshape(reshape), shape)
+    y = np.broadcast_to(y.reshape(reshape), shape)
+    z = np.broadcast_to(z.reshape(reshape), shape)
+    res2 = bootstrap((x, y, z), my_statistic, paired=paired, method=method,
+                     random_state=0, axis=axis, n_resamples=100)
+
+    assert_allclose(res2.confidence_interval.low,
+                    res1.confidence_interval.low)
+    assert_allclose(res2.confidence_interval.high,
+                    res1.confidence_interval.high)
+    assert_allclose(res2.standard_error, res1.standard_error)
+
+    result_shape = list(shape)
+    result_shape.pop(axis)
+
+    assert_equal(res2.confidence_interval.low.shape, result_shape)
+    assert_equal(res2.confidence_interval.high.shape, result_shape)
+    assert_equal(res2.standard_error.shape, result_shape)
+
+
+@pytest.mark.parametrize("method", ['basic', 'percentile', 'BCa'])
+def test_bootstrap_against_theory(method):
+    # based on https://www.statology.org/confidence-intervals-python/
+    data = stats.norm.rvs(loc=5, scale=2, size=5000, random_state=0)
+    alpha = 0.95
+    dist = stats.t(df=len(data)-1, loc=np.mean(data), scale=stats.sem(data))
+    expected_interval = dist.interval(confidence=alpha)
+    expected_se = dist.std()
+
+    res = bootstrap((data,), np.mean, n_resamples=5000,
+                    confidence_level=alpha, method=method,
+                    random_state=0)
+    assert_allclose(res.confidence_interval, expected_interval, rtol=5e-4)
+    assert_allclose(res.standard_error, expected_se, atol=3e-4)
+
+
+tests_R = {"basic": (23.77, 79.12),
+           "percentile": (28.86, 84.21),
+           "BCa": (32.31, 91.43)}
+
+
+@pytest.mark.parametrize("method, expected", tests_R.items())
+def test_bootstrap_against_R(method, expected):
+    # Compare against R's "boot" library
+    # library(boot)
+
+    # stat <- function (x, a) {
+    #     mean(x[a])
+    # }
+
+    # x <- c(10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
+    #        23, 34, 50, 81, 89, 121, 134, 213)
+
+    # # Use a large value so we get a few significant digits for the CI.
+    # n = 1000000
+    # bootresult = boot(x, stat, n)
+    # result <- boot.ci(bootresult)
+    # print(result)
+    x = np.array([10, 12, 12.5, 12.5, 13.9, 15, 21, 22,
+                  23, 34, 50, 81, 89, 121, 134, 213])
+    res = bootstrap((x,), np.mean, n_resamples=1000000, method=method,
+                    random_state=0)
+    assert_allclose(res.confidence_interval, expected, rtol=0.005)
+
+
+tests_against_itself_1samp = {"basic": 1780,
+                              "percentile": 1784,
+                              "BCa": 1784}
+
+
+def test_multisample_BCa_against_R():
+    # Because bootstrap is stochastic, it's tricky to test against reference
+    # behavior. Here, we show that SciPy's BCa CI matches R wboot's BCa CI
+    # much more closely than the other SciPy CIs do.
+
+    # arbitrary skewed data
+    x = [0.75859206, 0.5910282, -0.4419409, -0.36654601,
+         0.34955357, -1.38835871, 0.76735821]
+    y = [1.41186073, 0.49775975, 0.08275588, 0.24086388,
+         0.03567057, 0.52024419, 0.31966611, 1.32067634]
+
+    # a multi-sample statistic for which the BCa CI tends to be different
+    # from the other CIs
+    def statistic(x, y, axis):
+        s1 = stats.skew(x, axis=axis)
+        s2 = stats.skew(y, axis=axis)
+        return s1 - s2
+
+    # compute confidence intervals using each method
+    rng = np.random.default_rng(468865032284792692)
+
+    res_basic = stats.bootstrap((x, y), statistic, method='basic',
+                                batch=100, random_state=rng)
+    res_percent = stats.bootstrap((x, y), statistic, method='percentile',
+                                  batch=100, random_state=rng)
+    res_bca = stats.bootstrap((x, y), statistic, method='bca',
+                              batch=100, random_state=rng)
+
+    # compute midpoints so we can compare just one number for each
+    mid_basic = np.mean(res_basic.confidence_interval)
+    mid_percent = np.mean(res_percent.confidence_interval)
+    mid_bca = np.mean(res_bca.confidence_interval)
+
+    # reference for BCA CI computed using R wboot package:
+    # library(wBoot)
+    # library(moments)
+
+    # x = c(0.75859206, 0.5910282, -0.4419409, -0.36654601,
+    #       0.34955357, -1.38835871,  0.76735821)
+    # y = c(1.41186073, 0.49775975, 0.08275588, 0.24086388,
+    #       0.03567057, 0.52024419, 0.31966611, 1.32067634)
+
+    # twoskew <- function(x1, y1) {skewness(x1) - skewness(y1)}
+    # boot.two.bca(x, y, skewness, conf.level = 0.95,
+    #              R = 9999, stacked = FALSE)
+    mid_wboot = -1.5519
+
+    # compute percent difference relative to wboot BCA method
+    diff_basic = (mid_basic - mid_wboot)/abs(mid_wboot)
+    diff_percent = (mid_percent - mid_wboot)/abs(mid_wboot)
+    diff_bca = (mid_bca - mid_wboot)/abs(mid_wboot)
+
+    # SciPy's BCa CI midpoint is much closer than that of the other methods
+    assert diff_basic < -0.15
+    assert diff_percent > 0.15
+    assert abs(diff_bca) < 0.03
+
+
+def test_BCa_acceleration_against_reference():
+    # Compare the (deterministic) acceleration parameter for a multi-sample
+    # problem against a reference value. The example is from [1], but Efron's
+    # value seems inaccurate. Straightorward code for computing the
+    # reference acceleration (0.011008228344026734) is available at:
+    # https://github.com/scipy/scipy/pull/16455#issuecomment-1193400981
+
+    y = np.array([10, 27, 31, 40, 46, 50, 52, 104, 146])
+    z = np.array([16, 23, 38, 94, 99, 141, 197])
+
+    def statistic(z, y, axis=0):
+        return np.mean(z, axis=axis) - np.mean(y, axis=axis)
+
+    data = [z, y]
+    res = stats.bootstrap(data, statistic)
+
+    axis = -1
+    alpha = 0.95
+    theta_hat_b = res.bootstrap_distribution
+    batch = 100
+    _, _, a_hat = _resampling._bca_interval(data, statistic, axis, alpha,
+                                            theta_hat_b, batch)
+    assert_allclose(a_hat, 0.011008228344026734)
+
+
+@pytest.mark.parametrize("method, expected",
+                         tests_against_itself_1samp.items())
+def test_bootstrap_against_itself_1samp(method, expected):
+    # The expected values in this test were generated using bootstrap
+    # to check for unintended changes in behavior. The test also makes sure
+    # that bootstrap works with multi-sample statistics and that the
+    # `axis` argument works as expected / function is vectorized.
+    np.random.seed(0)
+
+    n = 100  # size of sample
+    n_resamples = 999  # number of bootstrap resamples used to form each CI
+    confidence_level = 0.9
+
+    # The true mean is 5
+    dist = stats.norm(loc=5, scale=1)
+    stat_true = dist.mean()
+
+    # Do the same thing 2000 times. (The code is fully vectorized.)
+    n_replications = 2000
+    data = dist.rvs(size=(n_replications, n))
+    res = bootstrap((data,),
+                    statistic=np.mean,
+                    confidence_level=confidence_level,
+                    n_resamples=n_resamples,
+                    batch=50,
+                    method=method,
+                    axis=-1)
+    ci = res.confidence_interval
+
+    # ci contains vectors of lower and upper confidence interval bounds
+    ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
+    assert ci_contains_true == expected
+
+    # ci_contains_true is not inconsistent with confidence_level
+    pvalue = stats.binomtest(ci_contains_true, n_replications,
+                             confidence_level).pvalue
+    assert pvalue > 0.1
+
+
+tests_against_itself_2samp = {"basic": 892,
+                              "percentile": 890}
+
+
+@pytest.mark.parametrize("method, expected",
+                         tests_against_itself_2samp.items())
+def test_bootstrap_against_itself_2samp(method, expected):
+    # The expected values in this test were generated using bootstrap
+    # to check for unintended changes in behavior. The test also makes sure
+    # that bootstrap works with multi-sample statistics and that the
+    # `axis` argument works as expected / function is vectorized.
+    np.random.seed(0)
+
+    n1 = 100  # size of sample 1
+    n2 = 120  # size of sample 2
+    n_resamples = 999  # number of bootstrap resamples used to form each CI
+    confidence_level = 0.9
+
+    # The statistic we're interested in is the difference in means
+    def my_stat(data1, data2, axis=-1):
+        mean1 = np.mean(data1, axis=axis)
+        mean2 = np.mean(data2, axis=axis)
+        return mean1 - mean2
+
+    # The true difference in the means is -0.1
+    dist1 = stats.norm(loc=0, scale=1)
+    dist2 = stats.norm(loc=0.1, scale=1)
+    stat_true = dist1.mean() - dist2.mean()
+
+    # Do the same thing 1000 times. (The code is fully vectorized.)
+    n_replications = 1000
+    data1 = dist1.rvs(size=(n_replications, n1))
+    data2 = dist2.rvs(size=(n_replications, n2))
+    res = bootstrap((data1, data2),
+                    statistic=my_stat,
+                    confidence_level=confidence_level,
+                    n_resamples=n_resamples,
+                    batch=50,
+                    method=method,
+                    axis=-1)
+    ci = res.confidence_interval
+
+    # ci contains vectors of lower and upper confidence interval bounds
+    ci_contains_true = np.sum((ci[0] < stat_true) & (stat_true < ci[1]))
+    assert ci_contains_true == expected
+
+    # ci_contains_true is not inconsistent with confidence_level
+    pvalue = stats.binomtest(ci_contains_true, n_replications,
+                             confidence_level).pvalue
+    assert pvalue > 0.1
+
+
+@pytest.mark.parametrize("method", ["basic", "percentile"])
+@pytest.mark.parametrize("axis", [0, 1])
+def test_bootstrap_vectorized_3samp(method, axis):
+    def statistic(*data, axis=0):
+        # an arbitrary, vectorized statistic
+        return sum((sample.mean(axis) for sample in data))
+
+    def statistic_1d(*data):
+        # the same statistic, not vectorized
+        for sample in data:
+            assert sample.ndim == 1
+        return statistic(*data, axis=0)
+
+    np.random.seed(0)
+    x = np.random.rand(4, 5)
+    y = np.random.rand(4, 5)
+    z = np.random.rand(4, 5)
+    res1 = bootstrap((x, y, z), statistic, vectorized=True,
+                     axis=axis, n_resamples=100, method=method, random_state=0)
+    res2 = bootstrap((x, y, z), statistic_1d, vectorized=False,
+                     axis=axis, n_resamples=100, method=method, random_state=0)
+    assert_allclose(res1.confidence_interval, res2.confidence_interval)
+    assert_allclose(res1.standard_error, res2.standard_error)
+
+
+@pytest.mark.xfail_on_32bit("Failure is not concerning; see gh-14107")
+@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
+@pytest.mark.parametrize("axis", [0, 1])
+def test_bootstrap_vectorized_1samp(method, axis):
+    def statistic(x, axis=0):
+        # an arbitrary, vectorized statistic
+        return x.mean(axis=axis)
+
+    def statistic_1d(x):
+        # the same statistic, not vectorized
+        assert x.ndim == 1
+        return statistic(x, axis=0)
+
+    np.random.seed(0)
+    x = np.random.rand(4, 5)
+    res1 = bootstrap((x,), statistic, vectorized=True, axis=axis,
+                     n_resamples=100, batch=None, method=method,
+                     random_state=0)
+    res2 = bootstrap((x,), statistic_1d, vectorized=False, axis=axis,
+                     n_resamples=100, batch=10, method=method,
+                     random_state=0)
+    assert_allclose(res1.confidence_interval, res2.confidence_interval)
+    assert_allclose(res1.standard_error, res2.standard_error)
+
+
+@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
+def test_bootstrap_degenerate(method):
+    data = 35 * [10000.]
+    if method == "BCa":
+        with np.errstate(invalid='ignore'):
+            msg = "The BCa confidence interval cannot be calculated"
+            with pytest.warns(stats.DegenerateDataWarning, match=msg):
+                res = bootstrap([data, ], np.mean, method=method)
+                assert_equal(res.confidence_interval, (np.nan, np.nan))
+    else:
+        res = bootstrap([data, ], np.mean, method=method)
+        assert_equal(res.confidence_interval, (10000., 10000.))
+    assert_equal(res.standard_error, 0)
+
+
+@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
+def test_bootstrap_gh15678(method):
+    # Check that gh-15678 is fixed: when statistic function returned a Python
+    # float, method="BCa" failed when trying to add a dimension to the float
+    rng = np.random.default_rng(354645618886684)
+    dist = stats.norm(loc=2, scale=4)
+    data = dist.rvs(size=100, random_state=rng)
+    data = (data,)
+    res = bootstrap(data, stats.skew, method=method, n_resamples=100,
+                    random_state=np.random.default_rng(9563))
+    # this always worked because np.apply_along_axis returns NumPy data type
+    ref = bootstrap(data, stats.skew, method=method, n_resamples=100,
+                    random_state=np.random.default_rng(9563), vectorized=False)
+    assert_allclose(res.confidence_interval, ref.confidence_interval)
+    assert_allclose(res.standard_error, ref.standard_error)
+    assert isinstance(res.standard_error, np.float64)
+
+
+def test_bootstrap_min():
+    # Check that gh-15883 is fixed: percentileofscore should
+    # behave according to the 'mean' behavior and not trigger nan for BCa
+    rng = np.random.default_rng(1891289180021102)
+    dist = stats.norm(loc=2, scale=4)
+    data = dist.rvs(size=100, random_state=rng)
+    true_min = np.min(data)
+    data = (data,)
+    res = bootstrap(data, np.min, method="BCa", n_resamples=100,
+                    random_state=np.random.default_rng(3942))
+    assert true_min == res.confidence_interval.low
+    res2 = bootstrap(-np.array(data), np.max, method="BCa", n_resamples=100,
+                     random_state=np.random.default_rng(3942))
+    assert_allclose(-res.confidence_interval.low,
+                    res2.confidence_interval.high)
+    assert_allclose(-res.confidence_interval.high,
+                    res2.confidence_interval.low)
+
+
+@pytest.mark.parametrize("additional_resamples", [0, 1000])
+def test_re_boostrap(additional_resamples):
+    # Test behavior of parameter `bootstrap_result`
+    rng = np.random.default_rng(8958153316228384)
+    x = rng.random(size=100)
+
+    n1 = 1000
+    n2 = additional_resamples
+    n3 = n1 + additional_resamples
+
+    rng = np.random.default_rng(296689032789913033)
+    res = stats.bootstrap((x,), np.mean, n_resamples=n1, random_state=rng,
+                          confidence_level=0.95, method='percentile')
+    res = stats.bootstrap((x,), np.mean, n_resamples=n2, random_state=rng,
+                          confidence_level=0.90, method='BCa',
+                          bootstrap_result=res)
+
+    rng = np.random.default_rng(296689032789913033)
+    ref = stats.bootstrap((x,), np.mean, n_resamples=n3, random_state=rng,
+                          confidence_level=0.90, method='BCa')
+
+    assert_allclose(res.standard_error, ref.standard_error, rtol=1e-14)
+    assert_allclose(res.confidence_interval, ref.confidence_interval,
+                    rtol=1e-14)
+
+
+def test_jackknife_resample():
+    shape = 3, 4, 5, 6
+    np.random.seed(0)
+    x = np.random.rand(*shape)
+    y = next(_resampling._jackknife_resample(x))
+
+    for i in range(shape[-1]):
+        # each resample is indexed along second to last axis
+        # (last axis is the one the statistic will be taken over / consumed)
+        slc = y[..., i, :]
+        expected = np.delete(x, i, axis=-1)
+
+        assert np.array_equal(slc, expected)
+
+    y2 = np.concatenate(list(_resampling._jackknife_resample(x, batch=2)),
+                        axis=-2)
+    assert np.array_equal(y2, y)
+
+
+@pytest.mark.parametrize("rng_name", ["RandomState", "default_rng"])
+def test_bootstrap_resample(rng_name):
+    rng = getattr(np.random, rng_name, None)
+    if rng is None:
+        pytest.skip(f"{rng_name} not available.")
+    rng1 = rng(0)
+    rng2 = rng(0)
+
+    n_resamples = 10
+    shape = 3, 4, 5, 6
+
+    np.random.seed(0)
+    x = np.random.rand(*shape)
+    y = _resampling._bootstrap_resample(x, n_resamples, random_state=rng1)
+
+    for i in range(n_resamples):
+        # each resample is indexed along second to last axis
+        # (last axis is the one the statistic will be taken over / consumed)
+        slc = y[..., i, :]
+
+        js = rng_integers(rng2, 0, shape[-1], shape[-1])
+        expected = x[..., js]
+
+        assert np.array_equal(slc, expected)
+
+
+@pytest.mark.parametrize("score", [0, 0.5, 1])
+@pytest.mark.parametrize("axis", [0, 1, 2])
+def test_percentile_of_score(score, axis):
+    shape = 10, 20, 30
+    np.random.seed(0)
+    x = np.random.rand(*shape)
+    p = _resampling._percentile_of_score(x, score, axis=-1)
+
+    def vectorized_pos(a, score, axis):
+        return np.apply_along_axis(stats.percentileofscore, axis, a, score)
+
+    p2 = vectorized_pos(x, score, axis=-1)/100
+
+    assert_allclose(p, p2, 1e-15)
+
+
+def test_percentile_along_axis():
+    # the difference between _percentile_along_axis and np.percentile is that
+    # np.percentile gets _all_ the qs for each axis slice, whereas
+    # _percentile_along_axis gets the q corresponding with each axis slice
+
+    shape = 10, 20
+    np.random.seed(0)
+    x = np.random.rand(*shape)
+    q = np.random.rand(*shape[:-1]) * 100
+    y = _resampling._percentile_along_axis(x, q)
+
+    for i in range(shape[0]):
+        res = y[i]
+        expected = np.percentile(x[i], q[i], axis=-1)
+        assert_allclose(res, expected, 1e-15)
+
+
+@pytest.mark.parametrize("axis", [0, 1, 2])
+def test_vectorize_statistic(axis):
+    # test that _vectorize_statistic vectorizes a statistic along `axis`
+
+    def statistic(*data, axis):
+        # an arbitrary, vectorized statistic
+        return sum((sample.mean(axis) for sample in data))
+
+    def statistic_1d(*data):
+        # the same statistic, not vectorized
+        for sample in data:
+            assert sample.ndim == 1
+        return statistic(*data, axis=0)
+
+    # vectorize the non-vectorized statistic
+    statistic2 = _resampling._vectorize_statistic(statistic_1d)
+
+    np.random.seed(0)
+    x = np.random.rand(4, 5, 6)
+    y = np.random.rand(4, 1, 6)
+    z = np.random.rand(1, 5, 6)
+
+    res1 = statistic(x, y, z, axis=axis)
+    res2 = statistic2(x, y, z, axis=axis)
+    assert_allclose(res1, res2)
+
+
+@pytest.mark.parametrize("method", ["basic", "percentile", "BCa"])
+def test_vector_valued_statistic(method):
+    # Generate 95% confidence interval around MLE of normal distribution
+    # parameters. Repeat 100 times, each time on sample of size 100.
+    # Check that confidence interval contains true parameters ~95 times.
+    # Confidence intervals are estimated and stochastic; a test failure
+    # does not necessarily indicate that something is wrong. More important
+    # than values of `counts` below is that the shapes of the outputs are
+    # correct.
+
+    rng = np.random.default_rng(2196847219)
+    params = 1, 0.5
+    sample = stats.norm.rvs(*params, size=(100, 100), random_state=rng)
+
+    def statistic(data, axis):
+        return np.asarray([np.mean(data, axis),
+                           np.std(data, axis, ddof=1)])
+
+    res = bootstrap((sample,), statistic, method=method, axis=-1,
+                    n_resamples=9999, batch=200)
+
+    counts = np.sum((res.confidence_interval.low.T < params)
+                    & (res.confidence_interval.high.T > params),
+                    axis=0)
+    assert np.all(counts >= 90)
+    assert np.all(counts <= 100)
+    assert res.confidence_interval.low.shape == (2, 100)
+    assert res.confidence_interval.high.shape == (2, 100)
+    assert res.standard_error.shape == (2, 100)
+    assert res.bootstrap_distribution.shape == (2, 100, 9999)
+
+
+@pytest.mark.slow
+@pytest.mark.filterwarnings('ignore::RuntimeWarning')
+def test_vector_valued_statistic_gh17715():
+    # gh-17715 reported a mistake introduced in the extension of BCa to
+    # multi-sample statistics; a `len` should have been `.shape[-1]`. Check
+    # that this is resolved.
+
+    rng = np.random.default_rng(141921000979291141)
+
+    def concordance(x, y, axis):
+        xm = x.mean(axis)
+        ym = y.mean(axis)
+        cov = ((x - xm[..., None]) * (y - ym[..., None])).mean(axis)
+        return (2 * cov) / (x.var(axis) + y.var(axis) + (xm - ym) ** 2)
+
+    def statistic(tp, tn, fp, fn, axis):
+        actual = tp + fp
+        expected = tp + fn
+        return np.nan_to_num(concordance(actual, expected, axis))
+
+    def statistic_extradim(*args, axis):
+        return statistic(*args, axis)[np.newaxis, ...]
+
+    data = [[4, 0, 0, 2],  # (tp, tn, fp, fn)
+            [2, 1, 2, 1],
+            [0, 6, 0, 0],
+            [0, 6, 3, 0],
+            [0, 8, 1, 0]]
+    data = np.array(data).T
+
+    res = bootstrap(data, statistic_extradim, random_state=rng, paired=True)
+    ref = bootstrap(data, statistic, random_state=rng, paired=True)
+    assert_allclose(res.confidence_interval.low[0],
+                    ref.confidence_interval.low, atol=1e-15)
+    assert_allclose(res.confidence_interval.high[0],
+                    ref.confidence_interval.high, atol=1e-15)
+
+
+# --- Test Monte Carlo Hypothesis Test --- #
+
+class TestMonteCarloHypothesisTest:
+    atol = 2.5e-2  # for comparing p-value
+
+    def rvs(self, rvs_in, rs):
+        return lambda *args, **kwds: rvs_in(*args, random_state=rs, **kwds)
+
+    def test_input_validation(self):
+        # test that the appropriate error messages are raised for invalid input
+
+        def stat(x):
+            return stats.skewnorm(x).statistic
+
+        message = "`axis` must be an integer."
+        with pytest.raises(ValueError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, axis=1.5)
+
+        message = "`vectorized` must be `True`, `False`, or `None`."
+        with pytest.raises(ValueError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, vectorized=1.5)
+
+        message = "`rvs` must be callable."
+        with pytest.raises(TypeError, match=message):
+            monte_carlo_test([1, 2, 3], None, stat)
+
+        message = "`statistic` must be callable."
+        with pytest.raises(TypeError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, None)
+
+        message = "`n_resamples` must be a positive integer."
+        with pytest.raises(ValueError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
+                             n_resamples=-1000)
+
+        message = "`n_resamples` must be a positive integer."
+        with pytest.raises(ValueError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
+                             n_resamples=1000.5)
+
+        message = "`batch` must be a positive integer or None."
+        with pytest.raises(ValueError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=-1000)
+
+        message = "`batch` must be a positive integer or None."
+        with pytest.raises(ValueError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, stat, batch=1000.5)
+
+        message = "`alternative` must be in..."
+        with pytest.raises(ValueError, match=message):
+            monte_carlo_test([1, 2, 3], stats.norm.rvs, stat,
+                             alternative='ekki')
+
+    def test_batch(self):
+        # make sure that the `batch` parameter is respected by checking the
+        # maximum batch size provided in calls to `statistic`
+        rng = np.random.default_rng(23492340193)
+        x = rng.random(10)
+
+        def statistic(x, axis):
+            batch_size = 1 if x.ndim == 1 else len(x)
+            statistic.batch_size = max(batch_size, statistic.batch_size)
+            statistic.counter += 1
+            return stats.skewtest(x, axis=axis).statistic
+        statistic.counter = 0
+        statistic.batch_size = 0
+
+        kwds = {'sample': x, 'statistic': statistic,
+                'n_resamples': 1000, 'vectorized': True}
+
+        kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
+        res1 = monte_carlo_test(batch=1, **kwds)
+        assert_equal(statistic.counter, 1001)
+        assert_equal(statistic.batch_size, 1)
+
+        kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
+        statistic.counter = 0
+        res2 = monte_carlo_test(batch=50, **kwds)
+        assert_equal(statistic.counter, 21)
+        assert_equal(statistic.batch_size, 50)
+
+        kwds['rvs'] = self.rvs(stats.norm.rvs, np.random.default_rng(32842398))
+        statistic.counter = 0
+        res3 = monte_carlo_test(**kwds)
+        assert_equal(statistic.counter, 2)
+        assert_equal(statistic.batch_size, 1000)
+
+        assert_equal(res1.pvalue, res3.pvalue)
+        assert_equal(res2.pvalue, res3.pvalue)
+
+    @pytest.mark.parametrize('axis', range(-3, 3))
+    def test_axis(self, axis):
+        # test that Nd-array samples are handled correctly for valid values
+        # of the `axis` parameter
+        rng = np.random.default_rng(2389234)
+        norm_rvs = self.rvs(stats.norm.rvs, rng)
+
+        size = [2, 3, 4]
+        size[axis] = 100
+        x = norm_rvs(size=size)
+        expected = stats.skewtest(x, axis=axis)
+
+        def statistic(x, axis):
+            return stats.skewtest(x, axis=axis).statistic
+
+        res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
+                               n_resamples=20000, axis=axis)
+
+        assert_allclose(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
+
+    @pytest.mark.parametrize('alternative', ("less", "greater"))
+    @pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5))  # skewness
+    def test_against_ks_1samp(self, alternative, a):
+        # test that monte_carlo_test can reproduce pvalue of ks_1samp
+        rng = np.random.default_rng(65723433)
+
+        x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
+        expected = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative)
+
+        def statistic1d(x):
+            return stats.ks_1samp(x, stats.norm.cdf, mode='asymp',
+                                  alternative=alternative).statistic
+
+        norm_rvs = self.rvs(stats.norm.rvs, rng)
+        res = monte_carlo_test(x, norm_rvs, statistic1d,
+                               n_resamples=1000, vectorized=False,
+                               alternative=alternative)
+
+        assert_allclose(res.statistic, expected.statistic)
+        if alternative == 'greater':
+            assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
+        elif alternative == 'less':
+            assert_allclose(1-res.pvalue, expected.pvalue, atol=self.atol)
+
+    @pytest.mark.parametrize('hypotest', (stats.skewtest, stats.kurtosistest))
+    @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
+    @pytest.mark.parametrize('a', np.linspace(-2, 2, 5))  # skewness
+    def test_against_normality_tests(self, hypotest, alternative, a):
+        # test that monte_carlo_test can reproduce pvalue of normality tests
+        rng = np.random.default_rng(85723405)
+
+        x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
+        expected = hypotest(x, alternative=alternative)
+
+        def statistic(x, axis):
+            return hypotest(x, axis=axis).statistic
+
+        norm_rvs = self.rvs(stats.norm.rvs, rng)
+        res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
+                               alternative=alternative)
+
+        assert_allclose(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
+
+    @pytest.mark.parametrize('a', np.arange(-2, 3))  # skewness parameter
+    def test_against_normaltest(self, a):
+        # test that monte_carlo_test can reproduce pvalue of normaltest
+        rng = np.random.default_rng(12340513)
+
+        x = stats.skewnorm.rvs(a=a, size=150, random_state=rng)
+        expected = stats.normaltest(x)
+
+        def statistic(x, axis):
+            return stats.normaltest(x, axis=axis).statistic
+
+        norm_rvs = self.rvs(stats.norm.rvs, rng)
+        res = monte_carlo_test(x, norm_rvs, statistic, vectorized=True,
+                               alternative='greater')
+
+        assert_allclose(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
+
+    @pytest.mark.parametrize('a', np.linspace(-0.5, 0.5, 5))  # skewness
+    def test_against_cramervonmises(self, a):
+        # test that monte_carlo_test can reproduce pvalue of cramervonmises
+        rng = np.random.default_rng(234874135)
+
+        x = stats.skewnorm.rvs(a=a, size=30, random_state=rng)
+        expected = stats.cramervonmises(x, stats.norm.cdf)
+
+        def statistic1d(x):
+            return stats.cramervonmises(x, stats.norm.cdf).statistic
+
+        norm_rvs = self.rvs(stats.norm.rvs, rng)
+        res = monte_carlo_test(x, norm_rvs, statistic1d,
+                               n_resamples=1000, vectorized=False,
+                               alternative='greater')
+
+        assert_allclose(res.statistic, expected.statistic)
+        assert_allclose(res.pvalue, expected.pvalue, atol=self.atol)
+
+    @pytest.mark.parametrize('dist_name', ('norm', 'logistic'))
+    @pytest.mark.parametrize('i', range(5))
+    def test_against_anderson(self, dist_name, i):
+        # test that monte_carlo_test can reproduce results of `anderson`. Note:
+        # `anderson` does not provide a p-value; it provides a list of
+        # significance levels and the associated critical value of the test
+        # statistic. `i` used to index this list.
+
+        # find the skewness for which the sample statistic matches one of the
+        # critical values provided by `stats.anderson`
+
+        def fun(a):
+            rng = np.random.default_rng(394295467)
+            x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
+            expected = stats.anderson(x, dist_name)
+            return expected.statistic - expected.critical_values[i]
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            sol = root(fun, x0=0)
+        assert sol.success
+
+        # get the significance level (p-value) associated with that critical
+        # value
+        a = sol.x[0]
+        rng = np.random.default_rng(394295467)
+        x = stats.tukeylambda.rvs(a, size=100, random_state=rng)
+        expected = stats.anderson(x, dist_name)
+        expected_stat = expected.statistic
+        expected_p = expected.significance_level[i]/100
+
+        # perform equivalent Monte Carlo test and compare results
+        def statistic1d(x):
+            return stats.anderson(x, dist_name).statistic
+
+        dist_rvs = self.rvs(getattr(stats, dist_name).rvs, rng)
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            res = monte_carlo_test(x, dist_rvs,
+                                   statistic1d, n_resamples=1000,
+                                   vectorized=False, alternative='greater')
+
+        assert_allclose(res.statistic, expected_stat)
+        assert_allclose(res.pvalue, expected_p, atol=2*self.atol)
+
+    def test_p_never_zero(self):
+        # Use biased estimate of p-value to ensure that p-value is never zero
+        # per monte_carlo_test reference [1]
+        rng = np.random.default_rng(2190176673029737545)
+        x = np.zeros(100)
+        res = monte_carlo_test(x, rng.random, np.mean,
+                               vectorized=True, alternative='less')
+        assert res.pvalue == 0.0001
+
+
+class TestPermutationTest:
+
+    rtol = 1e-14
+
+    def setup_method(self):
+        self.rng = np.random.default_rng(7170559330470561044)
+
+    # -- Input validation -- #
+
+    def test_permutation_test_iv(self):
+
+        def stat(x, y, axis):
+            return stats.ttest_ind((x, y), axis).statistic
+
+        message = "each sample in `data` must contain two or more ..."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1]), stat)
+
+        message = "`data` must be a tuple containing at least two samples"
+        with pytest.raises(ValueError, match=message):
+            permutation_test((1,), stat)
+        with pytest.raises(TypeError, match=message):
+            permutation_test(1, stat)
+
+        message = "`axis` must be an integer."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat, axis=1.5)
+
+        message = "`permutation_type` must be in..."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat,
+                             permutation_type="ekki")
+
+        message = "`vectorized` must be `True`, `False`, or `None`."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat, vectorized=1.5)
+
+        message = "`n_resamples` must be a positive integer."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=-1000)
+
+        message = "`n_resamples` must be a positive integer."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat, n_resamples=1000.5)
+
+        message = "`batch` must be a positive integer or None."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=-1000)
+
+        message = "`batch` must be a positive integer or None."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat, batch=1000.5)
+
+        message = "`alternative` must be in..."
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat, alternative='ekki')
+
+        message = "'herring' cannot be used to seed a"
+        with pytest.raises(ValueError, match=message):
+            permutation_test(([1, 2, 3], [1, 2, 3]), stat,
+                             random_state='herring')
+
+    # -- Test Parameters -- #
+    @pytest.mark.parametrize('random_state', [np.random.RandomState,
+                                              np.random.default_rng])
+    @pytest.mark.parametrize('permutation_type',
+                             ['pairings', 'samples', 'independent'])
+    def test_batch(self, permutation_type, random_state):
+        # make sure that the `batch` parameter is respected by checking the
+        # maximum batch size provided in calls to `statistic`
+        x = self.rng.random(10)
+        y = self.rng.random(10)
+
+        def statistic(x, y, axis):
+            batch_size = 1 if x.ndim == 1 else len(x)
+            statistic.batch_size = max(batch_size, statistic.batch_size)
+            statistic.counter += 1
+            return np.mean(x, axis=axis) - np.mean(y, axis=axis)
+        statistic.counter = 0
+        statistic.batch_size = 0
+
+        kwds = {'n_resamples': 1000, 'permutation_type': permutation_type,
+                'vectorized': True}
+        res1 = stats.permutation_test((x, y), statistic, batch=1,
+                                      random_state=random_state(0), **kwds)
+        assert_equal(statistic.counter, 1001)
+        assert_equal(statistic.batch_size, 1)
+
+        statistic.counter = 0
+        res2 = stats.permutation_test((x, y), statistic, batch=50,
+                                      random_state=random_state(0), **kwds)
+        assert_equal(statistic.counter, 21)
+        assert_equal(statistic.batch_size, 50)
+
+        statistic.counter = 0
+        res3 = stats.permutation_test((x, y), statistic, batch=1000,
+                                      random_state=random_state(0), **kwds)
+        assert_equal(statistic.counter, 2)
+        assert_equal(statistic.batch_size, 1000)
+
+        assert_equal(res1.pvalue, res3.pvalue)
+        assert_equal(res2.pvalue, res3.pvalue)
+
+    @pytest.mark.parametrize('random_state', [np.random.RandomState,
+                                              np.random.default_rng])
+    @pytest.mark.parametrize('permutation_type, exact_size',
+                             [('pairings', special.factorial(3)**2),
+                              ('samples', 2**3),
+                              ('independent', special.binom(6, 3))])
+    def test_permutations(self, permutation_type, exact_size, random_state):
+        # make sure that the `permutations` parameter is respected by checking
+        # the size of the null distribution
+        x = self.rng.random(3)
+        y = self.rng.random(3)
+
+        def statistic(x, y, axis):
+            return np.mean(x, axis=axis) - np.mean(y, axis=axis)
+
+        kwds = {'permutation_type': permutation_type,
+                'vectorized': True}
+        res = stats.permutation_test((x, y), statistic, n_resamples=3,
+                                     random_state=random_state(0), **kwds)
+        assert_equal(res.null_distribution.size, 3)
+
+        res = stats.permutation_test((x, y), statistic, **kwds)
+        assert_equal(res.null_distribution.size, exact_size)
+
+    # -- Randomized Permutation Tests -- #
+
+    # To get reasonable accuracy, these next three tests are somewhat slow.
+    # Originally, I had them passing for all combinations of permutation type,
+    # alternative, and RNG, but that takes too long for CI. Instead, split
+    # into three tests, each testing a particular combination of the three
+    # parameters.
+
+    def test_randomized_test_against_exact_both(self):
+        # check that the randomized and exact tests agree to reasonable
+        # precision for permutation_type='both
+
+        alternative, rng = 'less', 0
+
+        nx, ny, permutations = 8, 9, 24000
+        assert special.binom(nx + ny, nx) > permutations
+
+        x = stats.norm.rvs(size=nx)
+        y = stats.norm.rvs(size=ny)
+        data = x, y
+
+        def statistic(x, y, axis):
+            return np.mean(x, axis=axis) - np.mean(y, axis=axis)
+
+        kwds = {'vectorized': True, 'permutation_type': 'independent',
+                'batch': 100, 'alternative': alternative, 'random_state': rng}
+        res = permutation_test(data, statistic, n_resamples=permutations,
+                               **kwds)
+        res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
+
+        assert res.statistic == res2.statistic
+        assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
+
+    @pytest.mark.slow()
+    def test_randomized_test_against_exact_samples(self):
+        # check that the randomized and exact tests agree to reasonable
+        # precision for permutation_type='samples'
+
+        alternative, rng = 'greater', None
+
+        nx, ny, permutations = 15, 15, 32000
+        assert 2**nx > permutations
+
+        x = stats.norm.rvs(size=nx)
+        y = stats.norm.rvs(size=ny)
+        data = x, y
+
+        def statistic(x, y, axis):
+            return np.mean(x - y, axis=axis)
+
+        kwds = {'vectorized': True, 'permutation_type': 'samples',
+                'batch': 100, 'alternative': alternative, 'random_state': rng}
+        res = permutation_test(data, statistic, n_resamples=permutations,
+                               **kwds)
+        res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
+
+        assert res.statistic == res2.statistic
+        assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
+
+    def test_randomized_test_against_exact_pairings(self):
+        # check that the randomized and exact tests agree to reasonable
+        # precision for permutation_type='pairings'
+
+        alternative, rng = 'two-sided', self.rng
+
+        nx, ny, permutations = 8, 8, 40000
+        assert special.factorial(nx) > permutations
+
+        x = stats.norm.rvs(size=nx)
+        y = stats.norm.rvs(size=ny)
+        data = [x]
+
+        def statistic1d(x):
+            return stats.pearsonr(x, y)[0]
+
+        statistic = _resampling._vectorize_statistic(statistic1d)
+
+        kwds = {'vectorized': True, 'permutation_type': 'samples',
+                'batch': 100, 'alternative': alternative, 'random_state': rng}
+        res = permutation_test(data, statistic, n_resamples=permutations,
+                               **kwds)
+        res2 = permutation_test(data, statistic, n_resamples=np.inf, **kwds)
+
+        assert res.statistic == res2.statistic
+        assert_allclose(res.pvalue, res2.pvalue, atol=1e-2)
+
+    @pytest.mark.parametrize('alternative', ('less', 'greater'))
+    # Different conventions for two-sided p-value here VS ttest_ind.
+    # Eventually, we can add multiple options for the two-sided alternative
+    # here in permutation_test.
+    @pytest.mark.parametrize('permutations', (30, 1e9))
+    @pytest.mark.parametrize('axis', (0, 1, 2))
+    def test_against_permutation_ttest(self, alternative, permutations, axis):
+        # check that this function and ttest_ind with permutations give
+        # essentially identical results.
+
+        x = np.arange(3*4*5).reshape(3, 4, 5)
+        y = np.moveaxis(np.arange(4)[:, None, None], 0, axis)
+
+        rng1 = np.random.default_rng(4337234444626115331)
+        res1 = stats.ttest_ind(x, y, permutations=permutations, axis=axis,
+                               random_state=rng1, alternative=alternative)
+
+        def statistic(x, y, axis):
+            return stats.ttest_ind(x, y, axis=axis).statistic
+
+        rng2 = np.random.default_rng(4337234444626115331)
+        res2 = permutation_test((x, y), statistic, vectorized=True,
+                                n_resamples=permutations,
+                                alternative=alternative, axis=axis,
+                                random_state=rng2)
+
+        assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
+        assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol)
+
+    # -- Independent (Unpaired) Sample Tests -- #
+
+    @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
+    def test_against_ks_2samp(self, alternative):
+
+        x = self.rng.normal(size=4, scale=1)
+        y = self.rng.normal(size=5, loc=3, scale=3)
+
+        expected = stats.ks_2samp(x, y, alternative=alternative, mode='exact')
+
+        def statistic1d(x, y):
+            return stats.ks_2samp(x, y, mode='asymp',
+                                  alternative=alternative).statistic
+
+        # ks_2samp is always a one-tailed 'greater' test
+        # it's the statistic that changes (D+ vs D- vs max(D+, D-))
+        res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
+                               alternative='greater', random_state=self.rng)
+
+        assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
+
+    @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
+    def test_against_ansari(self, alternative):
+
+        x = self.rng.normal(size=4, scale=1)
+        y = self.rng.normal(size=5, scale=3)
+
+        # ansari has a different convention for 'alternative'
+        alternative_correspondence = {"less": "greater",
+                                      "greater": "less",
+                                      "two-sided": "two-sided"}
+        alternative_scipy = alternative_correspondence[alternative]
+        expected = stats.ansari(x, y, alternative=alternative_scipy)
+
+        def statistic1d(x, y):
+            return stats.ansari(x, y).statistic
+
+        res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
+                               alternative=alternative, random_state=self.rng)
+
+        assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
+
+    @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
+    def test_against_mannwhitneyu(self, alternative):
+
+        x = stats.uniform.rvs(size=(3, 5, 2), loc=0, random_state=self.rng)
+        y = stats.uniform.rvs(size=(3, 5, 2), loc=0.05, random_state=self.rng)
+
+        expected = stats.mannwhitneyu(x, y, axis=1, alternative=alternative)
+
+        def statistic(x, y, axis):
+            return stats.mannwhitneyu(x, y, axis=axis).statistic
+
+        res = permutation_test((x, y), statistic, vectorized=True,
+                               n_resamples=np.inf, alternative=alternative,
+                               axis=1, random_state=self.rng)
+
+        assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
+
+    def test_against_cvm(self):
+
+        x = stats.norm.rvs(size=4, scale=1, random_state=self.rng)
+        y = stats.norm.rvs(size=5, loc=3, scale=3, random_state=self.rng)
+
+        expected = stats.cramervonmises_2samp(x, y, method='exact')
+
+        def statistic1d(x, y):
+            return stats.cramervonmises_2samp(x, y,
+                                              method='asymptotic').statistic
+
+        # cramervonmises_2samp has only one alternative, greater
+        res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
+                               alternative='greater', random_state=self.rng)
+
+        assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
+
+    @pytest.mark.xslow()
+    @pytest.mark.parametrize('axis', (-1, 2))
+    def test_vectorized_nsamp_ptype_both(self, axis):
+        # Test that permutation_test with permutation_type='independent' works
+        # properly for a 3-sample statistic with nd array samples of different
+        # (but compatible) shapes and ndims. Show that exact permutation test
+        # and random permutation tests approximate SciPy's asymptotic pvalues
+        # and that exact and random permutation test results are even closer
+        # to one another (than they are to the asymptotic results).
+
+        # Three samples, different (but compatible) shapes with different ndims
+        rng = np.random.default_rng(6709265303529651545)
+        x = rng.random(size=(3))
+        y = rng.random(size=(1, 3, 2))
+        z = rng.random(size=(2, 1, 4))
+        data = (x, y, z)
+
+        # Define the statistic (and pvalue for comparison)
+        def statistic1d(*data):
+            return stats.kruskal(*data).statistic
+
+        def pvalue1d(*data):
+            return stats.kruskal(*data).pvalue
+
+        statistic = _resampling._vectorize_statistic(statistic1d)
+        pvalue = _resampling._vectorize_statistic(pvalue1d)
+
+        # Calculate the expected results
+        x2 = np.broadcast_to(x, (2, 3, 3))  # broadcast manually because
+        y2 = np.broadcast_to(y, (2, 3, 2))  # _vectorize_statistic doesn't
+        z2 = np.broadcast_to(z, (2, 3, 4))
+        expected_statistic = statistic(x2, y2, z2, axis=axis)
+        expected_pvalue = pvalue(x2, y2, z2, axis=axis)
+
+        # Calculate exact and randomized permutation results
+        kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater',
+                'permutation_type': 'independent', 'random_state': self.rng}
+        res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds)
+        res2 = permutation_test(data, statistic1d, n_resamples=1000, **kwds)
+
+        # Check results
+        assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
+        assert_allclose(res.statistic, res2.statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected_pvalue, atol=6e-2)
+        assert_allclose(res.pvalue, res2.pvalue, atol=3e-2)
+
+    # -- Paired-Sample Tests -- #
+
+    @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
+    def test_against_wilcoxon(self, alternative):
+
+        x = stats.uniform.rvs(size=(3, 6, 2), loc=0, random_state=self.rng)
+        y = stats.uniform.rvs(size=(3, 6, 2), loc=0.05, random_state=self.rng)
+
+        # We'll check both 1- and 2-sample versions of the same test;
+        # we expect identical results to wilcoxon in all cases.
+        def statistic_1samp_1d(z):
+            # 'less' ensures we get the same of two statistics every time
+            return stats.wilcoxon(z, alternative='less').statistic
+
+        def statistic_2samp_1d(x, y):
+            return stats.wilcoxon(x, y, alternative='less').statistic
+
+        def test_1d(x, y):
+            return stats.wilcoxon(x, y, alternative=alternative)
+
+        test = _resampling._vectorize_statistic(test_1d)
+
+        expected = test(x, y, axis=1)
+        expected_stat = expected[0]
+        expected_p = expected[1]
+
+        kwds = {'vectorized': False, 'axis': 1, 'alternative': alternative,
+                'permutation_type': 'samples', 'random_state': self.rng,
+                'n_resamples': np.inf}
+        res1 = permutation_test((x-y,), statistic_1samp_1d, **kwds)
+        res2 = permutation_test((x, y), statistic_2samp_1d, **kwds)
+
+        # `wilcoxon` returns a different statistic with 'two-sided'
+        assert_allclose(res1.statistic, res2.statistic, rtol=self.rtol)
+        if alternative != 'two-sided':
+            assert_allclose(res2.statistic, expected_stat, rtol=self.rtol)
+
+        assert_allclose(res2.pvalue, expected_p, rtol=self.rtol)
+        assert_allclose(res1.pvalue, res2.pvalue, rtol=self.rtol)
+
+    @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided"))
+    def test_against_binomtest(self, alternative):
+
+        x = self.rng.integers(0, 2, size=10)
+        x[x == 0] = -1
+        # More naturally, the test would flip elements between 0 and one.
+        # However, permutation_test will flip the _signs_ of the elements.
+        # So we have to work with +1/-1 instead of 1/0.
+
+        def statistic(x, axis=0):
+            return np.sum(x > 0, axis=axis)
+
+        k, n, p = statistic(x), 10, 0.5
+        expected = stats.binomtest(k, n, p, alternative=alternative)
+
+        res = stats.permutation_test((x,), statistic, vectorized=True,
+                                     permutation_type='samples',
+                                     n_resamples=np.inf, random_state=self.rng,
+                                     alternative=alternative)
+        assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
+
+    # -- Exact Association Tests -- #
+
+    def test_against_kendalltau(self):
+
+        x = self.rng.normal(size=6)
+        y = x + self.rng.normal(size=6)
+
+        expected = stats.kendalltau(x, y, method='exact')
+
+        def statistic1d(x):
+            return stats.kendalltau(x, y, method='asymptotic').statistic
+
+        # kendalltau currently has only one alternative, two-sided
+        res = permutation_test((x,), statistic1d, permutation_type='pairings',
+                               n_resamples=np.inf, random_state=self.rng)
+
+        assert_allclose(res.statistic, expected.statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected.pvalue, rtol=self.rtol)
+
+    @pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided'))
+    def test_against_fisher_exact(self, alternative):
+
+        def statistic(x,):
+            return np.sum((x == 1) & (y == 1))
+
+        # x and y are binary random variables with some dependence
+        rng = np.random.default_rng(6235696159000529929)
+        x = (rng.random(7) > 0.6).astype(float)
+        y = (rng.random(7) + 0.25*x > 0.6).astype(float)
+        tab = stats.contingency.crosstab(x, y)[1]
+
+        res = permutation_test((x,), statistic, permutation_type='pairings',
+                               n_resamples=np.inf, alternative=alternative,
+                               random_state=rng)
+        res2 = stats.fisher_exact(tab, alternative=alternative)
+
+        assert_allclose(res.pvalue, res2[1])
+
+    @pytest.mark.xslow()
+    @pytest.mark.parametrize('axis', (-2, 1))
+    def test_vectorized_nsamp_ptype_samples(self, axis):
+        # Test that permutation_test with permutation_type='samples' works
+        # properly for a 3-sample statistic with nd array samples of different
+        # (but compatible) shapes and ndims. Show that exact permutation test
+        # reproduces SciPy's exact pvalue and that random permutation test
+        # approximates it.
+
+        x = self.rng.random(size=(2, 4, 3))
+        y = self.rng.random(size=(1, 4, 3))
+        z = self.rng.random(size=(2, 4, 1))
+        x = stats.rankdata(x, axis=axis)
+        y = stats.rankdata(y, axis=axis)
+        z = stats.rankdata(z, axis=axis)
+        y = y[0]  # to check broadcast with different ndim
+        data = (x, y, z)
+
+        def statistic1d(*data):
+            return stats.page_trend_test(data, ranked=True,
+                                         method='asymptotic').statistic
+
+        def pvalue1d(*data):
+            return stats.page_trend_test(data, ranked=True,
+                                         method='exact').pvalue
+
+        statistic = _resampling._vectorize_statistic(statistic1d)
+        pvalue = _resampling._vectorize_statistic(pvalue1d)
+
+        expected_statistic = statistic(*np.broadcast_arrays(*data), axis=axis)
+        expected_pvalue = pvalue(*np.broadcast_arrays(*data), axis=axis)
+
+        # Let's forgive this use of an integer seed, please.
+        kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater',
+                'permutation_type': 'pairings', 'random_state': 0}
+        res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds)
+        res2 = permutation_test(data, statistic1d, n_resamples=5000, **kwds)
+
+        assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
+        assert_allclose(res.statistic, res2.statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected_pvalue, rtol=self.rtol)
+        assert_allclose(res.pvalue, res2.pvalue, atol=3e-2)
+
+    # -- Test Against External References -- #
+
+    tie_case_1 = {'x': [1, 2, 3, 4], 'y': [1.5, 2, 2.5],
+                  'expected_less': 0.2000000000,
+                  'expected_2sided': 0.4,  # 2*expected_less
+                  'expected_Pr_gte_S_mean': 0.3428571429,  # see note below
+                  'expected_statistic': 7.5,
+                  'expected_avg': 9.142857, 'expected_std': 1.40698}
+    tie_case_2 = {'x': [111, 107, 100, 99, 102, 106, 109, 108],
+                  'y': [107, 108, 106, 98, 105, 103, 110, 105, 104],
+                  'expected_less': 0.1555738379,
+                  'expected_2sided': 0.3111476758,
+                  'expected_Pr_gte_S_mean': 0.2969971205,  # see note below
+                  'expected_statistic': 32.5,
+                  'expected_avg': 38.117647, 'expected_std': 5.172124}
+
+    @pytest.mark.xslow()  # only the second case is slow, really
+    @pytest.mark.parametrize('case', (tie_case_1, tie_case_2))
+    def test_with_ties(self, case):
+        """
+        Results above from SAS PROC NPAR1WAY, e.g.
+
+        DATA myData;
+        INPUT X Y;
+        CARDS;
+        1 1
+        1 2
+        1 3
+        1 4
+        2 1.5
+        2 2
+        2 2.5
+        ods graphics on;
+        proc npar1way AB data=myData;
+            class X;
+            EXACT;
+        run;
+        ods graphics off;
+
+        Note: SAS provides Pr >= |S-Mean|, which is different from our
+        definition of a two-sided p-value.
+
+        """
+
+        x = case['x']
+        y = case['y']
+
+        expected_statistic = case['expected_statistic']
+        expected_less = case['expected_less']
+        expected_2sided = case['expected_2sided']
+        expected_Pr_gte_S_mean = case['expected_Pr_gte_S_mean']
+        expected_avg = case['expected_avg']
+        expected_std = case['expected_std']
+
+        def statistic1d(x, y):
+            return stats.ansari(x, y).statistic
+
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(UserWarning, "Ties preclude use of exact statistic")
+            res = permutation_test((x, y), statistic1d, n_resamples=np.inf,
+                                   alternative='less')
+            res2 = permutation_test((x, y), statistic1d, n_resamples=np.inf,
+                                    alternative='two-sided')
+
+        assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected_less, atol=1e-10)
+        assert_allclose(res2.pvalue, expected_2sided, atol=1e-10)
+        assert_allclose(res2.null_distribution.mean(), expected_avg, rtol=1e-6)
+        assert_allclose(res2.null_distribution.std(), expected_std, rtol=1e-6)
+
+        # SAS provides Pr >= |S-Mean|; might as well check against that, too
+        S = res.statistic
+        mean = res.null_distribution.mean()
+        n = len(res.null_distribution)
+        Pr_gte_S_mean = np.sum(np.abs(res.null_distribution-mean)
+                               >= np.abs(S-mean))/n
+        assert_allclose(expected_Pr_gte_S_mean, Pr_gte_S_mean)
+
+    @pytest.mark.parametrize('alternative, expected_pvalue',
+                             (('less', 0.9708333333333),
+                              ('greater', 0.05138888888889),
+                              ('two-sided', 0.1027777777778)))
+    def test_against_spearmanr_in_R(self, alternative, expected_pvalue):
+        """
+        Results above from R cor.test, e.g.
+
+        options(digits=16)
+        x <- c(1.76405235, 0.40015721, 0.97873798,
+               2.2408932, 1.86755799, -0.97727788)
+        y <- c(2.71414076, 0.2488, 0.87551913,
+               2.6514917, 2.01160156, 0.47699563)
+        cor.test(x, y, method = "spearm", alternative = "t")
+        """
+        # data comes from
+        # np.random.seed(0)
+        # x = stats.norm.rvs(size=6)
+        # y = x + stats.norm.rvs(size=6)
+        x = [1.76405235, 0.40015721, 0.97873798,
+             2.2408932, 1.86755799, -0.97727788]
+        y = [2.71414076, 0.2488, 0.87551913,
+             2.6514917, 2.01160156, 0.47699563]
+        expected_statistic = 0.7714285714285715
+
+        def statistic1d(x):
+            return stats.spearmanr(x, y).statistic
+
+        res = permutation_test((x,), statistic1d, permutation_type='pairings',
+                               n_resamples=np.inf, alternative=alternative)
+
+        assert_allclose(res.statistic, expected_statistic, rtol=self.rtol)
+        assert_allclose(res.pvalue, expected_pvalue, atol=1e-13)
+
+    @pytest.mark.parametrize("batch", (-1, 0))
+    def test_batch_generator_iv(self, batch):
+        with pytest.raises(ValueError, match="`batch` must be positive."):
+            list(_resampling._batch_generator([1, 2, 3], batch))
+
+    batch_generator_cases = [(range(0), 3, []),
+                             (range(6), 3, [[0, 1, 2], [3, 4, 5]]),
+                             (range(8), 3, [[0, 1, 2], [3, 4, 5], [6, 7]])]
+
+    @pytest.mark.parametrize("iterable, batch, expected",
+                             batch_generator_cases)
+    def test_batch_generator(self, iterable, batch, expected):
+        got = list(_resampling._batch_generator(iterable, batch))
+        assert got == expected
+
+    def test_finite_precision_statistic(self):
+        # Some statistics return numerically distinct values when the values
+        # should be equal in theory. Test that `permutation_test` accounts
+        # for this in some way.
+        x = [1, 2, 4, 3]
+        y = [2, 4, 6, 8]
+
+        def statistic(x, y):
+            return stats.pearsonr(x, y)[0]
+
+        res = stats.permutation_test((x, y), statistic, vectorized=False,
+                                     permutation_type='pairings')
+        r, pvalue, null = res.statistic, res.pvalue, res.null_distribution
+
+        correct_p = 2 * np.sum(null >= r - 1e-14) / len(null)
+        assert pvalue == correct_p == 1/3
+        # Compare against other exact correlation tests using R corr.test
+        # options(digits=16)
+        # x = c(1, 2, 4, 3)
+        # y = c(2, 4, 6, 8)
+        # cor.test(x, y, alternative = "t", method = "spearman")  # 0.333333333
+        # cor.test(x, y, alternative = "t", method = "kendall")  # 0.333333333
+
+
+def test_all_partitions_concatenated():
+    # make sure that _all_paritions_concatenated produces the correct number
+    # of partitions of the data into samples of the given sizes and that
+    # all are unique
+    n = np.array([3, 2, 4], dtype=int)
+    nc = np.cumsum(n)
+
+    all_partitions = set()
+    counter = 0
+    for partition_concatenated in _resampling._all_partitions_concatenated(n):
+        counter += 1
+        partitioning = np.split(partition_concatenated, nc[:-1])
+        all_partitions.add(tuple([frozenset(i) for i in partitioning]))
+
+    expected = np.product([special.binom(sum(n[i:]), sum(n[i+1:]))
+                           for i in range(len(n)-1)])
+
+    assert_equal(counter, expected)
+    assert_equal(len(all_partitions), expected)
+
+
+@pytest.mark.parametrize('fun_name',
+                         ['bootstrap', 'permutation_test', 'monte_carlo_test'])
+def test_parameter_vectorized(fun_name):
+    # Check that parameter `vectorized` is working as desired for all
+    # resampling functions. Results don't matter; just don't fail asserts.
+    rng = np.random.default_rng(75245098234592)
+    sample = rng.random(size=10)
+
+    def rvs(size):  # needed by `monte_carlo_test`
+        return stats.norm.rvs(size=size, random_state=rng)
+
+    fun_options = {'bootstrap': {'data': (sample,), 'random_state': rng,
+                                 'method': 'percentile'},
+                   'permutation_test': {'data': (sample,), 'random_state': rng,
+                                        'permutation_type': 'samples'},
+                   'monte_carlo_test': {'sample': sample, 'rvs': rvs}}
+    common_options = {'n_resamples': 100}
+
+    fun = getattr(stats, fun_name)
+    options = fun_options[fun_name]
+    options.update(common_options)
+
+    def statistic(x, axis):
+        assert x.ndim > 1 or np.array_equal(x, sample)
+        return np.mean(x, axis=axis)
+    fun(statistic=statistic, vectorized=None, **options)
+    fun(statistic=statistic, vectorized=True, **options)
+
+    def statistic(x):
+        assert x.ndim == 1
+        return np.mean(x)
+    fun(statistic=statistic, vectorized=None, **options)
+    fun(statistic=statistic, vectorized=False, **options)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_sampling.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_sampling.py
new file mode 100644
index 00000000..8da2839e
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_sampling.py
@@ -0,0 +1,1357 @@
+import threading
+import pickle
+import pytest
+from copy import deepcopy
+import platform
+import sys
+import math
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal, suppress_warnings
+from numpy.lib import NumpyVersion
+from scipy.stats.sampling import (
+    TransformedDensityRejection,
+    DiscreteAliasUrn,
+    DiscreteGuideTable,
+    NumericalInversePolynomial,
+    NumericalInverseHermite,
+    SimpleRatioUniforms,
+    UNURANError
+)
+from scipy import stats
+from scipy import special
+from scipy.stats import chisquare, cramervonmises
+from scipy.stats._distr_params import distdiscrete, distcont
+from scipy._lib._util import check_random_state
+
+
+# common test data: this data can be shared between all the tests.
+
+
+# Normal distribution shared between all the continuous methods
+class StandardNormal:
+    def pdf(self, x):
+        # normalization constant needed for NumericalInverseHermite
+        return 1./np.sqrt(2.*np.pi) * np.exp(-0.5 * x*x)
+
+    def dpdf(self, x):
+        return 1./np.sqrt(2.*np.pi) * -x * np.exp(-0.5 * x*x)
+
+    def cdf(self, x):
+        return special.ndtr(x)
+
+
+all_methods = [
+    ("TransformedDensityRejection", {"dist": StandardNormal()}),
+    ("DiscreteAliasUrn", {"dist": [0.02, 0.18, 0.8]}),
+    ("DiscreteGuideTable", {"dist": [0.02, 0.18, 0.8]}),
+    ("NumericalInversePolynomial", {"dist": StandardNormal()}),
+    ("NumericalInverseHermite", {"dist": StandardNormal()}),
+    ("SimpleRatioUniforms", {"dist": StandardNormal(), "mode": 0})
+]
+
+if (sys.implementation.name == 'pypy'
+        and sys.implementation.version < (7, 3, 10)):
+    # changed in PyPy for v7.3.10
+    floaterr = r"unsupported operand type for float\(\): 'list'"
+else:
+    floaterr = r"must be real number, not list"
+# Make sure an internal error occurs in UNU.RAN when invalid callbacks are
+# passed. Moreover, different generators throw different error messages.
+# So, in case of an `UNURANError`, we do not validate the error message.
+bad_pdfs_common = [
+    # Negative PDF
+    (lambda x: -x, UNURANError, r"..."),
+    # Returning wrong type
+    (lambda x: [], TypeError, floaterr),
+    # Undefined name inside the function
+    (lambda x: foo, NameError, r"name 'foo' is not defined"),  # type: ignore[name-defined]  # noqa
+    # Infinite value returned => Overflow error.
+    (lambda x: np.inf, UNURANError, r"..."),
+    # NaN value => internal error in UNU.RAN
+    (lambda x: np.nan, UNURANError, r"..."),
+    # signature of PDF wrong
+    (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
+]
+
+
+# same approach for dpdf
+bad_dpdf_common = [
+    # Infinite value returned.
+    (lambda x: np.inf, UNURANError, r"..."),
+    # NaN value => internal error in UNU.RAN
+    (lambda x: np.nan, UNURANError, r"..."),
+    # Returning wrong type
+    (lambda x: [], TypeError, floaterr),
+    # Undefined name inside the function
+    (lambda x: foo, NameError, r"name 'foo' is not defined"),  # type: ignore[name-defined]  # noqa
+    # signature of dPDF wrong
+    (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
+]
+
+
+# same approach for logpdf
+bad_logpdfs_common = [
+    # Returning wrong type
+    (lambda x: [], TypeError, floaterr),
+    # Undefined name inside the function
+    (lambda x: foo, NameError, r"name 'foo' is not defined"),  # type: ignore[name-defined]  # noqa
+    # Infinite value returned => Overflow error.
+    (lambda x: np.inf, UNURANError, r"..."),
+    # NaN value => internal error in UNU.RAN
+    (lambda x: np.nan, UNURANError, r"..."),
+    # signature of logpdf wrong
+    (lambda: 1.0, TypeError, r"takes 0 positional arguments but 1 was given")
+]
+
+
+bad_pv_common = [
+    ([], r"must contain at least one element"),
+    ([[1.0, 0.0]], r"wrong number of dimensions \(expected 1, got 2\)"),
+    ([0.2, 0.4, np.nan, 0.8], r"must contain only finite / non-nan values"),
+    ([0.2, 0.4, np.inf, 0.8], r"must contain only finite / non-nan values"),
+    ([0.0, 0.0], r"must contain at least one non-zero value"),
+]
+
+
+# size of the domains is incorrect
+bad_sized_domains = [
+    # > 2 elements in the domain
+    ((1, 2, 3), ValueError, r"must be a length 2 tuple"),
+    # empty domain
+    ((), ValueError, r"must be a length 2 tuple")
+]
+
+# domain values are incorrect
+bad_domains = [
+    ((2, 1), UNURANError, r"left >= right"),
+    ((1, 1), UNURANError, r"left >= right"),
+]
+
+# infinite and nan values present in domain.
+inf_nan_domains = [
+    # left >= right
+    ((10, 10), UNURANError, r"left >= right"),
+    ((np.inf, np.inf), UNURANError, r"left >= right"),
+    ((-np.inf, -np.inf), UNURANError, r"left >= right"),
+    ((np.inf, -np.inf), UNURANError, r"left >= right"),
+    # Also include nans in some of the domains.
+    ((-np.inf, np.nan), ValueError, r"only non-nan values"),
+    ((np.nan, np.inf), ValueError, r"only non-nan values")
+]
+
+# `nan` values present in domain. Some distributions don't support
+# infinite tails, so don't mix the nan values with infinities.
+nan_domains = [
+    ((0, np.nan), ValueError, r"only non-nan values"),
+    ((np.nan, np.nan), ValueError, r"only non-nan values")
+]
+
+
+# all the methods should throw errors for nan, bad sized, and bad valued
+# domains.
+@pytest.mark.parametrize("domain, err, msg",
+                         bad_domains + bad_sized_domains +
+                         nan_domains)  # type: ignore[operator]
+@pytest.mark.parametrize("method, kwargs", all_methods)
+def test_bad_domain(domain, err, msg, method, kwargs):
+    Method = getattr(stats.sampling, method)
+    with pytest.raises(err, match=msg):
+        Method(**kwargs, domain=domain)
+
+
+@pytest.mark.parametrize("method, kwargs", all_methods)
+def test_random_state(method, kwargs):
+    Method = getattr(stats.sampling, method)
+
+    # simple seed that works for any version of NumPy
+    seed = 123
+    rng1 = Method(**kwargs, random_state=seed)
+    rng2 = Method(**kwargs, random_state=seed)
+    assert_equal(rng1.rvs(100), rng2.rvs(100))
+
+    # global seed
+    np.random.seed(123)
+    rng1 = Method(**kwargs)
+    rvs1 = rng1.rvs(100)
+    np.random.seed(None)
+    rng2 = Method(**kwargs, random_state=123)
+    rvs2 = rng2.rvs(100)
+    assert_equal(rvs1, rvs2)
+
+    # Generator seed for new NumPy
+    # when a RandomState is given, it should take the bitgen_t
+    # member of the class and create a Generator instance.
+    seed1 = np.random.RandomState(np.random.MT19937(123))
+    seed2 = np.random.Generator(np.random.MT19937(123))
+    rng1 = Method(**kwargs, random_state=seed1)
+    rng2 = Method(**kwargs, random_state=seed2)
+    assert_equal(rng1.rvs(100), rng2.rvs(100))
+
+
+def test_set_random_state():
+    rng1 = TransformedDensityRejection(StandardNormal(), random_state=123)
+    rng2 = TransformedDensityRejection(StandardNormal())
+    rng2.set_random_state(123)
+    assert_equal(rng1.rvs(100), rng2.rvs(100))
+    rng = TransformedDensityRejection(StandardNormal(), random_state=123)
+    rvs1 = rng.rvs(100)
+    rng.set_random_state(123)
+    rvs2 = rng.rvs(100)
+    assert_equal(rvs1, rvs2)
+
+
+def test_threading_behaviour():
+    # Test if the API is thread-safe.
+    # This verifies if the lock mechanism and the use of `PyErr_Occurred`
+    # is correct.
+    errors = {"err1": None, "err2": None}
+
+    class Distribution:
+        def __init__(self, pdf_msg):
+            self.pdf_msg = pdf_msg
+
+        def pdf(self, x):
+            if 49.9 < x < 50.0:
+                raise ValueError(self.pdf_msg)
+            return x
+
+        def dpdf(self, x):
+            return 1
+
+    def func1():
+        dist = Distribution('foo')
+        rng = TransformedDensityRejection(dist, domain=(10, 100),
+                                          random_state=12)
+        try:
+            rng.rvs(100000)
+        except ValueError as e:
+            errors['err1'] = e.args[0]
+
+    def func2():
+        dist = Distribution('bar')
+        rng = TransformedDensityRejection(dist, domain=(10, 100),
+                                          random_state=2)
+        try:
+            rng.rvs(100000)
+        except ValueError as e:
+            errors['err2'] = e.args[0]
+
+    t1 = threading.Thread(target=func1)
+    t2 = threading.Thread(target=func2)
+
+    t1.start()
+    t2.start()
+
+    t1.join()
+    t2.join()
+
+    assert errors['err1'] == 'foo'
+    assert errors['err2'] == 'bar'
+
+
+@pytest.mark.parametrize("method, kwargs", all_methods)
+def test_pickle(method, kwargs):
+    Method = getattr(stats.sampling, method)
+    rng1 = Method(**kwargs, random_state=123)
+    obj = pickle.dumps(rng1)
+    rng2 = pickle.loads(obj)
+    assert_equal(rng1.rvs(100), rng2.rvs(100))
+
+
+@pytest.mark.parametrize("size", [None, 0, (0, ), 1, (10, 3), (2, 3, 4, 5),
+                                  (0, 0), (0, 1)])
+def test_rvs_size(size):
+    # As the `rvs` method is present in the base class and shared between
+    # all the classes, we can just test with one of the methods.
+    rng = TransformedDensityRejection(StandardNormal())
+    if size is None:
+        assert np.isscalar(rng.rvs(size))
+    else:
+        if np.isscalar(size):
+            size = (size, )
+        assert rng.rvs(size).shape == size
+
+
+def test_with_scipy_distribution():
+    # test if the setup works with SciPy's rv_frozen distributions
+    dist = stats.norm()
+    urng = np.random.default_rng(0)
+    rng = NumericalInverseHermite(dist, random_state=urng)
+    u = np.linspace(0, 1, num=100)
+    check_cont_samples(rng, dist, dist.stats())
+    assert_allclose(dist.ppf(u), rng.ppf(u))
+    # test if it works with `loc` and `scale`
+    dist = stats.norm(loc=10., scale=5.)
+    rng = NumericalInverseHermite(dist, random_state=urng)
+    check_cont_samples(rng, dist, dist.stats())
+    assert_allclose(dist.ppf(u), rng.ppf(u))
+    # check for discrete distributions
+    dist = stats.binom(10, 0.2)
+    rng = DiscreteAliasUrn(dist, random_state=urng)
+    domain = dist.support()
+    pv = dist.pmf(np.arange(domain[0], domain[1]+1))
+    check_discr_samples(rng, pv, dist.stats())
+
+
+def check_cont_samples(rng, dist, mv_ex):
+    rvs = rng.rvs(100000)
+    mv = rvs.mean(), rvs.var()
+    # test the moments only if the variance is finite
+    if np.isfinite(mv_ex[1]):
+        assert_allclose(mv, mv_ex, rtol=1e-7, atol=1e-1)
+    # Cramer Von Mises test for goodness-of-fit
+    rvs = rng.rvs(500)
+    dist.cdf = np.vectorize(dist.cdf)
+    pval = cramervonmises(rvs, dist.cdf).pvalue
+    assert pval > 0.1
+
+
+def check_discr_samples(rng, pv, mv_ex):
+    rvs = rng.rvs(100000)
+    # test if the first few moments match
+    mv = rvs.mean(), rvs.var()
+    assert_allclose(mv, mv_ex, rtol=1e-3, atol=1e-1)
+    # normalize
+    pv = pv / pv.sum()
+    # chi-squared test for goodness-of-fit
+    obs_freqs = np.zeros_like(pv)
+    _, freqs = np.unique(rvs, return_counts=True)
+    freqs = freqs / freqs.sum()
+    obs_freqs[:freqs.size] = freqs
+    pval = chisquare(obs_freqs, pv).pvalue
+    assert pval > 0.1
+
+
+def test_warning_center_not_in_domain():
+    # UNURAN will warn if the center provided or the one computed w/o the
+    # domain is outside of the domain
+    msg = "102 : center moved into domain of distribution"
+    with pytest.warns(RuntimeWarning, match=msg):
+        NumericalInversePolynomial(StandardNormal(), center=0, domain=(3, 5))
+    with pytest.warns(RuntimeWarning, match=msg):
+        NumericalInversePolynomial(StandardNormal(), domain=(3, 5))
+
+
+@pytest.mark.parametrize('method', ["SimpleRatioUniforms",
+                                    "NumericalInversePolynomial",
+                                    "TransformedDensityRejection"])
+def test_error_mode_not_in_domain(method):
+    # UNURAN raises an error if the mode is not in the domain
+    # the behavior is different compared to the case that center is not in the
+    # domain. mode is supposed to be the exact value, center can be an
+    # approximate value
+    Method = getattr(stats.sampling, method)
+    msg = "17 : mode not in domain"
+    with pytest.raises(UNURANError, match=msg):
+        Method(StandardNormal(), mode=0, domain=(3, 5))
+
+
+@pytest.mark.parametrize('method', ["NumericalInverseHermite",
+                                    "NumericalInversePolynomial"])
+class TestQRVS:
+    def test_input_validation(self, method):
+        match = "`qmc_engine` must be an instance of..."
+        with pytest.raises(ValueError, match=match):
+            Method = getattr(stats.sampling, method)
+            gen = Method(StandardNormal())
+            gen.qrvs(qmc_engine=0)
+
+        # issues with QMCEngines and old NumPy
+        Method = getattr(stats.sampling, method)
+        gen = Method(StandardNormal())
+
+        match = "`d` must be consistent with dimension of `qmc_engine`."
+        with pytest.raises(ValueError, match=match):
+            gen.qrvs(d=3, qmc_engine=stats.qmc.Halton(2))
+
+    qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)]
+    # `size=None` should not add anything to the shape, `size=1` should
+    sizes = [(None, tuple()), (1, (1,)), (4, (4,)),
+             ((4,), (4,)), ((2, 4), (2, 4))]  # type: ignore
+    # Neither `d=None` nor `d=1` should add anything to the shape
+    ds = [(None, tuple()), (1, tuple()), (3, (3,))]
+
+    @pytest.mark.parametrize('qrng', qrngs)
+    @pytest.mark.parametrize('size_in, size_out', sizes)
+    @pytest.mark.parametrize('d_in, d_out', ds)
+    def test_QRVS_shape_consistency(self, qrng, size_in, size_out,
+                                    d_in, d_out, method):
+        w32 = sys.platform == "win32" and platform.architecture()[0] == "32bit"
+        if w32 and method == "NumericalInversePolynomial":
+            pytest.xfail("NumericalInversePolynomial.qrvs fails for Win "
+                         "32-bit")
+
+        dist = StandardNormal()
+        Method = getattr(stats.sampling, method)
+        gen = Method(dist)
+
+        # If d and qrng.d are inconsistent, an error is raised
+        if d_in is not None and qrng is not None and qrng.d != d_in:
+            match = "`d` must be consistent with dimension of `qmc_engine`."
+            with pytest.raises(ValueError, match=match):
+                gen.qrvs(size_in, d=d_in, qmc_engine=qrng)
+            return
+
+        # Sometimes d is really determined by qrng
+        if d_in is None and qrng is not None and qrng.d != 1:
+            d_out = (qrng.d,)
+
+        shape_expected = size_out + d_out
+
+        qrng2 = deepcopy(qrng)
+        qrvs = gen.qrvs(size=size_in, d=d_in, qmc_engine=qrng)
+        if size_in is not None:
+            assert qrvs.shape == shape_expected
+
+        if qrng2 is not None:
+            uniform = qrng2.random(np.prod(size_in) or 1)
+            qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected)
+            assert_allclose(qrvs, qrvs2, atol=1e-12)
+
+    def test_QRVS_size_tuple(self, method):
+        # QMCEngine samples are always of shape (n, d). When `size` is a tuple,
+        # we set `n = prod(size)` in the call to qmc_engine.random, transform
+        # the sample, and reshape it to the final dimensions. When we reshape,
+        # we need to be careful, because the _columns_ of the sample returned
+        # by a QMCEngine are "independent"-ish, but the elements within the
+        # columns are not. We need to make sure that this doesn't get mixed up
+        # by reshaping: qrvs[..., i] should remain "independent"-ish of
+        # qrvs[..., i+1], but the elements within qrvs[..., i] should be
+        # transformed from the same low-discrepancy sequence.
+
+        dist = StandardNormal()
+        Method = getattr(stats.sampling, method)
+        gen = Method(dist)
+
+        size = (3, 4)
+        d = 5
+        qrng = stats.qmc.Halton(d, seed=0)
+        qrng2 = stats.qmc.Halton(d, seed=0)
+
+        uniform = qrng2.random(np.prod(size))
+
+        qrvs = gen.qrvs(size=size, d=d, qmc_engine=qrng)
+        qrvs2 = stats.norm.ppf(uniform)
+
+        for i in range(d):
+            sample = qrvs[..., i]
+            sample2 = qrvs2[:, i].reshape(size)
+            assert_allclose(sample, sample2, atol=1e-12)
+
+
+class TestTransformedDensityRejection:
+    # Simple Custom Distribution
+    class dist0:
+        def pdf(self, x):
+            return 3/4 * (1-x*x)
+
+        def dpdf(self, x):
+            return 3/4 * (-2*x)
+
+        def cdf(self, x):
+            return 3/4 * (x - x**3/3 + 2/3)
+
+        def support(self):
+            return -1, 1
+
+    # Standard Normal Distribution
+    class dist1:
+        def pdf(self, x):
+            return stats.norm._pdf(x / 0.1)
+
+        def dpdf(self, x):
+            return -x / 0.01 * stats.norm._pdf(x / 0.1)
+
+        def cdf(self, x):
+            return stats.norm._cdf(x / 0.1)
+
+    # pdf with piecewise linear function as transformed density
+    # with T = -1/sqrt with shift. Taken from UNU.RAN test suite
+    # (from file t_tdr_ps.c)
+    class dist2:
+        def __init__(self, shift):
+            self.shift = shift
+
+        def pdf(self, x):
+            x -= self.shift
+            y = 1. / (abs(x) + 1.)
+            return 0.5 * y * y
+
+        def dpdf(self, x):
+            x -= self.shift
+            y = 1. / (abs(x) + 1.)
+            y = y * y * y
+            return y if (x < 0.) else -y
+
+        def cdf(self, x):
+            x -= self.shift
+            if x <= 0.:
+                return 0.5 / (1. - x)
+            else:
+                return 1. - 0.5 / (1. + x)
+
+    dists = [dist0(), dist1(), dist2(0.), dist2(10000.)]
+
+    # exact mean and variance of the distributions in the list dists
+    mv0 = [0., 4./15.]
+    mv1 = [0., 0.01]
+    mv2 = [0., np.inf]
+    mv3 = [10000., np.inf]
+    mvs = [mv0, mv1, mv2, mv3]
+
+    @pytest.mark.parametrize("dist, mv_ex",
+                             zip(dists, mvs))
+    def test_basic(self, dist, mv_ex):
+        with suppress_warnings() as sup:
+            # filter the warnings thrown by UNU.RAN
+            sup.filter(RuntimeWarning)
+            rng = TransformedDensityRejection(dist, random_state=42)
+        check_cont_samples(rng, dist, mv_ex)
+
+    # PDF 0 everywhere => bad construction points
+    bad_pdfs = [(lambda x: 0, UNURANError, r"50 : bad construction points.")]
+    bad_pdfs += bad_pdfs_common  # type: ignore[arg-type]
+
+    @pytest.mark.parametrize("pdf, err, msg", bad_pdfs)
+    def test_bad_pdf(self, pdf, err, msg):
+        class dist:
+            pass
+        dist.pdf = pdf
+        dist.dpdf = lambda x: 1  # an arbitrary dPDF
+        with pytest.raises(err, match=msg):
+            TransformedDensityRejection(dist)
+
+    @pytest.mark.parametrize("dpdf, err, msg", bad_dpdf_common)
+    def test_bad_dpdf(self, dpdf, err, msg):
+        class dist:
+            pass
+        dist.pdf = lambda x: x
+        dist.dpdf = dpdf
+        with pytest.raises(err, match=msg):
+            TransformedDensityRejection(dist, domain=(1, 10))
+
+    # test domains with inf + nan in them. need to write a custom test for
+    # this because not all methods support infinite tails.
+    @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
+    def test_inf_nan_domains(self, domain, err, msg):
+        with pytest.raises(err, match=msg):
+            TransformedDensityRejection(StandardNormal(), domain=domain)
+
+    @pytest.mark.parametrize("construction_points", [-1, 0, 0.1])
+    def test_bad_construction_points_scalar(self, construction_points):
+        with pytest.raises(ValueError, match=r"`construction_points` must be "
+                                             r"a positive integer."):
+            TransformedDensityRejection(
+                StandardNormal(), construction_points=construction_points
+            )
+
+    def test_bad_construction_points_array(self):
+        # empty array
+        construction_points = []
+        with pytest.raises(ValueError, match=r"`construction_points` must "
+                                             r"either be a "
+                                             r"scalar or a non-empty array."):
+            TransformedDensityRejection(
+                StandardNormal(), construction_points=construction_points
+            )
+
+        # construction_points not monotonically increasing
+        construction_points = [1, 1, 1, 1, 1, 1]
+        with pytest.warns(RuntimeWarning, match=r"33 : starting points not "
+                                                r"strictly monotonically "
+                                                r"increasing"):
+            TransformedDensityRejection(
+                StandardNormal(), construction_points=construction_points
+            )
+
+        # construction_points containing nans
+        construction_points = [np.nan, np.nan, np.nan]
+        with pytest.raises(UNURANError, match=r"50 : bad construction "
+                                              r"points."):
+            TransformedDensityRejection(
+                StandardNormal(), construction_points=construction_points
+            )
+
+        # construction_points out of domain
+        construction_points = [-10, 10]
+        with pytest.warns(RuntimeWarning, match=r"50 : starting point out of "
+                                                r"domain"):
+            TransformedDensityRejection(
+                StandardNormal(), domain=(-3, 3),
+                construction_points=construction_points
+            )
+
+    @pytest.mark.parametrize("c", [-1., np.nan, np.inf, 0.1, 1.])
+    def test_bad_c(self, c):
+        msg = r"`c` must either be -0.5 or 0."
+        with pytest.raises(ValueError, match=msg):
+            TransformedDensityRejection(StandardNormal(), c=-1.)
+
+    u = [np.linspace(0, 1, num=1000), [], [[]], [np.nan],
+         [-np.inf, np.nan, np.inf], 0,
+         [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]]
+
+    @pytest.mark.parametrize("u", u)
+    def test_ppf_hat(self, u):
+        # Increase the `max_squeeze_hat_ratio` so the ppf_hat is more
+        # accurate.
+        rng = TransformedDensityRejection(StandardNormal(),
+                                          max_squeeze_hat_ratio=0.9999)
+        # Older versions of NumPy throw RuntimeWarnings for comparisons
+        # with nan.
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in greater")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "greater_equal")
+            sup.filter(RuntimeWarning, "invalid value encountered in less")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "less_equal")
+            res = rng.ppf_hat(u)
+            expected = stats.norm.ppf(u)
+        assert_allclose(res, expected, rtol=1e-3, atol=1e-5)
+        assert res.shape == expected.shape
+
+    def test_bad_dist(self):
+        # Empty distribution
+        class dist:
+            ...
+
+        msg = r"`pdf` required but not found."
+        with pytest.raises(ValueError, match=msg):
+            TransformedDensityRejection(dist)
+
+        # dPDF not present in dist
+        class dist:
+            pdf = lambda x: 1-x*x  # noqa: E731
+
+        msg = r"`dpdf` required but not found."
+        with pytest.raises(ValueError, match=msg):
+            TransformedDensityRejection(dist)
+
+
+class TestDiscreteAliasUrn:
+    # DAU fails on these probably because of large domains and small
+    # computation errors in PMF. Mean/SD match but chi-squared test fails.
+    basic_fail_dists = {
+        'nchypergeom_fisher',  # numerical erros on tails
+        'nchypergeom_wallenius',  # numerical erros on tails
+        'randint'  # fails on 32-bit ubuntu
+    }
+
+    @pytest.mark.parametrize("distname, params", distdiscrete)
+    def test_basic(self, distname, params):
+        if distname in self.basic_fail_dists:
+            msg = ("DAU fails on these probably because of large domains "
+                   "and small computation errors in PMF.")
+            pytest.skip(msg)
+        if not isinstance(distname, str):
+            dist = distname
+        else:
+            dist = getattr(stats, distname)
+        dist = dist(*params)
+        domain = dist.support()
+        if not np.isfinite(domain[1] - domain[0]):
+            # DAU only works with finite domain. So, skip the distributions
+            # with infinite tails.
+            pytest.skip("DAU only works with a finite domain.")
+        k = np.arange(domain[0], domain[1]+1)
+        pv = dist.pmf(k)
+        mv_ex = dist.stats('mv')
+        rng = DiscreteAliasUrn(dist, random_state=42)
+        check_discr_samples(rng, pv, mv_ex)
+
+    # Can't use bad_pmf_common here as we evaluate PMF early on to avoid
+    # unhelpful errors from UNU.RAN.
+    bad_pmf = [
+        # inf returned
+        (lambda x: np.inf, ValueError,
+         r"must contain only finite / non-nan values"),
+        # nan returned
+        (lambda x: np.nan, ValueError,
+         r"must contain only finite / non-nan values"),
+        # all zeros
+        (lambda x: 0.0, ValueError,
+         r"must contain at least one non-zero value"),
+        # Undefined name inside the function
+        (lambda x: foo, NameError,  # type: ignore[name-defined]  # noqa
+         r"name 'foo' is not defined"),
+        # Returning wrong type.
+        (lambda x: [], ValueError,
+         r"setting an array element with a sequence."),
+        # probabilities < 0
+        (lambda x: -x, UNURANError,
+         r"50 : probability < 0"),
+        # signature of PMF wrong
+        (lambda: 1.0, TypeError,
+         r"takes 0 positional arguments but 1 was given")
+    ]
+
+    @pytest.mark.parametrize("pmf, err, msg", bad_pmf)
+    def test_bad_pmf(self, pmf, err, msg):
+        class dist:
+            pass
+        dist.pmf = pmf
+        with pytest.raises(err, match=msg):
+            DiscreteAliasUrn(dist, domain=(1, 10))
+
+    @pytest.mark.parametrize("pv", [[0.18, 0.02, 0.8],
+                                    [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
+    def test_sampling_with_pv(self, pv):
+        pv = np.asarray(pv, dtype=np.float64)
+        rng = DiscreteAliasUrn(pv, random_state=123)
+        rvs = rng.rvs(100_000)
+        pv = pv / pv.sum()
+        variates = np.arange(0, len(pv))
+        # test if the first few moments match
+        m_expected = np.average(variates, weights=pv)
+        v_expected = np.average((variates - m_expected) ** 2, weights=pv)
+        mv_expected = m_expected, v_expected
+        check_discr_samples(rng, pv, mv_expected)
+
+    @pytest.mark.parametrize("pv, msg", bad_pv_common)
+    def test_bad_pv(self, pv, msg):
+        with pytest.raises(ValueError, match=msg):
+            DiscreteAliasUrn(pv)
+
+    # DAU doesn't support infinite tails. So, it should throw an error when
+    # inf is present in the domain.
+    inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
+                  (0, np.inf), (-np.inf, 0)]
+
+    @pytest.mark.parametrize("domain", inf_domain)
+    def test_inf_domain(self, domain):
+        with pytest.raises(ValueError, match=r"must be finite"):
+            DiscreteAliasUrn(stats.binom(10, 0.2), domain=domain)
+
+    def test_bad_urn_factor(self):
+        with pytest.warns(RuntimeWarning, match=r"relative urn size < 1."):
+            DiscreteAliasUrn([0.5, 0.5], urn_factor=-1)
+
+    def test_bad_args(self):
+        msg = (r"`domain` must be provided when the "
+               r"probability vector is not available.")
+
+        class dist:
+            def pmf(self, x):
+                return x
+
+        with pytest.raises(ValueError, match=msg):
+            DiscreteAliasUrn(dist)
+
+
+class TestNumericalInversePolynomial:
+    # Simple Custom Distribution
+    class dist0:
+        def pdf(self, x):
+            return 3/4 * (1-x*x)
+
+        def cdf(self, x):
+            return 3/4 * (x - x**3/3 + 2/3)
+
+        def support(self):
+            return -1, 1
+
+    # Standard Normal Distribution
+    class dist1:
+        def pdf(self, x):
+            return stats.norm._pdf(x / 0.1)
+
+        def cdf(self, x):
+            return stats.norm._cdf(x / 0.1)
+
+    # Sin 2 distribution
+    #          /  0.05 + 0.45*(1 +sin(2 Pi x))  if |x| <= 1
+    #  f(x) = <
+    #          \  0        otherwise
+    # Taken from UNU.RAN test suite (from file t_pinv.c)
+    class dist2:
+        def pdf(self, x):
+            return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x))
+
+        def cdf(self, x):
+            return (0.05*(x + 1) +
+                    0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) /
+                    (4.*np.pi))
+
+        def support(self):
+            return -1, 1
+
+    # Sin 10 distribution
+    #          /  0.05 + 0.45*(1 +sin(2 Pi x))  if |x| <= 5
+    #  f(x) = <
+    #          \  0        otherwise
+    # Taken from UNU.RAN test suite (from file t_pinv.c)
+    class dist3:
+        def pdf(self, x):
+            return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x)))
+
+        def cdf(self, x):
+            return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) -
+                                                   np.cos(2*np.pi*x))
+
+        def support(self):
+            return -5, 5
+
+    dists = [dist0(), dist1(), dist2(), dist3()]
+
+    # exact mean and variance of the distributions in the list dists
+    mv0 = [0., 4./15.]
+    mv1 = [0., 0.01]
+    mv2 = [-0.45/np.pi, 2/3*0.5 - 0.45**2/np.pi**2]
+    mv3 = [-0.45/np.pi, 0.2 * 250/3 * 0.5 - 0.45**2/np.pi**2]
+    mvs = [mv0, mv1, mv2, mv3]
+
+    @pytest.mark.parametrize("dist, mv_ex",
+                             zip(dists, mvs))
+    def test_basic(self, dist, mv_ex):
+        rng = NumericalInversePolynomial(dist, random_state=42)
+        check_cont_samples(rng, dist, mv_ex)
+
+    very_slow_dists = ['studentized_range', 'trapezoid', 'triang', 'vonmises',
+                       'levy_stable', 'kappa4', 'ksone', 'kstwo', 'levy_l',
+                       'gausshyper', 'anglit']
+    # for these distributions, some assertions fail due to minor
+    # numerical differences. They can be avoided either by changing
+    # the seed or by increasing the u_resolution.
+    fail_dists = ['ncf', 'pareto', 'chi2', 'fatiguelife', 'halfgennorm',
+                  'gibrat', 'lognorm', 'ncx2', 't']
+
+    @pytest.mark.xslow
+    @pytest.mark.parametrize("distname, params", distcont)
+    def test_basic_all_scipy_dists(self, distname, params):
+        if distname in self.very_slow_dists:
+            pytest.skip(f"PINV too slow for {distname}")
+        if distname in self.fail_dists:
+            pytest.skip(f"PINV fails for {distname}")
+        dist = (getattr(stats, distname)
+                if isinstance(distname, str)
+                else distname)
+        dist = dist(*params)
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            rng = NumericalInversePolynomial(dist, random_state=42)
+        check_cont_samples(rng, dist, [dist.mean(), dist.var()])
+
+    @pytest.mark.parametrize("pdf, err, msg", bad_pdfs_common)
+    def test_bad_pdf(self, pdf, err, msg):
+        class dist:
+            pass
+        dist.pdf = pdf
+        with pytest.raises(err, match=msg):
+            NumericalInversePolynomial(dist, domain=[0, 5])
+
+    @pytest.mark.parametrize("logpdf, err, msg", bad_logpdfs_common)
+    def test_bad_logpdf(self, logpdf, err, msg):
+        class dist:
+            pass
+        dist.logpdf = logpdf
+        with pytest.raises(err, match=msg):
+            NumericalInversePolynomial(dist, domain=[0, 5])
+
+    # test domains with inf + nan in them. need to write a custom test for
+    # this because not all methods support infinite tails.
+    @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
+    def test_inf_nan_domains(self, domain, err, msg):
+        with pytest.raises(err, match=msg):
+            NumericalInversePolynomial(StandardNormal(), domain=domain)
+
+    u = [
+        # test if quantile 0 and 1 return -inf and inf respectively and check
+        # the correctness of the PPF for equidistant points between 0 and 1.
+        np.linspace(0, 1, num=10000),
+        # test the PPF method for empty arrays
+        [], [[]],
+        # test if nans and infs return nan result.
+        [np.nan], [-np.inf, np.nan, np.inf],
+        # test if a scalar is returned for a scalar input.
+        0,
+        # test for arrays with nans, values greater than 1 and less than 0,
+        # and some valid values.
+        [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
+    ]
+
+    @pytest.mark.parametrize("u", u)
+    def test_ppf(self, u):
+        dist = StandardNormal()
+        rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
+        # Older versions of NumPy throw RuntimeWarnings for comparisons
+        # with nan.
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in greater")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "greater_equal")
+            sup.filter(RuntimeWarning, "invalid value encountered in less")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "less_equal")
+            res = rng.ppf(u)
+            expected = stats.norm.ppf(u)
+        assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
+        assert res.shape == expected.shape
+
+    x = [np.linspace(-10, 10, num=10000), [], [[]], [np.nan],
+         [-np.inf, np.nan, np.inf], 0,
+         [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-np.inf, 3, 4]]]
+
+    @pytest.mark.parametrize("x", x)
+    def test_cdf(self, x):
+        dist = StandardNormal()
+        rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
+        # Older versions of NumPy throw RuntimeWarnings for comparisons
+        # with nan.
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in greater")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "greater_equal")
+            sup.filter(RuntimeWarning, "invalid value encountered in less")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "less_equal")
+            res = rng.cdf(x)
+            expected = stats.norm.cdf(x)
+        assert_allclose(res, expected, rtol=1e-11, atol=1e-11)
+        assert res.shape == expected.shape
+
+    def test_u_error(self):
+        dist = StandardNormal()
+        rng = NumericalInversePolynomial(dist, u_resolution=1e-10)
+        max_error, mae = rng.u_error()
+        assert max_error < 1e-10
+        assert mae <= max_error
+        rng = NumericalInversePolynomial(dist, u_resolution=1e-14)
+        max_error, mae = rng.u_error()
+        assert max_error < 1e-14
+        assert mae <= max_error
+
+    bad_orders = [1, 4.5, 20, np.inf, np.nan]
+    bad_u_resolution = [1e-20, 1e-1, np.inf, np.nan]
+
+    @pytest.mark.parametrize("order", bad_orders)
+    def test_bad_orders(self, order):
+        dist = StandardNormal()
+
+        msg = r"`order` must be an integer in the range \[3, 17\]."
+        with pytest.raises(ValueError, match=msg):
+            NumericalInversePolynomial(dist, order=order)
+
+    @pytest.mark.parametrize("u_resolution", bad_u_resolution)
+    def test_bad_u_resolution(self, u_resolution):
+        msg = r"`u_resolution` must be between 1e-15 and 1e-5."
+        with pytest.raises(ValueError, match=msg):
+            NumericalInversePolynomial(StandardNormal(),
+                                       u_resolution=u_resolution)
+
+    def test_bad_args(self):
+
+        class BadDist:
+            def cdf(self, x):
+                return stats.norm._cdf(x)
+
+        dist = BadDist()
+        msg = r"Either of the methods `pdf` or `logpdf` must be specified"
+        with pytest.raises(ValueError, match=msg):
+            rng = NumericalInversePolynomial(dist)
+
+        dist = StandardNormal()
+        rng = NumericalInversePolynomial(dist)
+        msg = r"`sample_size` must be greater than or equal to 1000."
+        with pytest.raises(ValueError, match=msg):
+            rng.u_error(10)
+
+        class Distribution:
+            def pdf(self, x):
+                return np.exp(-0.5 * x*x)
+
+        dist = Distribution()
+        rng = NumericalInversePolynomial(dist)
+        msg = r"Exact CDF required but not found."
+        with pytest.raises(ValueError, match=msg):
+            rng.u_error()
+
+    def test_logpdf_pdf_consistency(self):
+        # 1. check that PINV works with pdf and logpdf only
+        # 2. check that generated ppf is the same (up to a small tolerance)
+
+        class MyDist:
+            pass
+
+        # create genrator from dist with only pdf
+        dist_pdf = MyDist()
+        dist_pdf.pdf = lambda x: math.exp(-x*x/2)
+        rng1 = NumericalInversePolynomial(dist_pdf)
+
+        # create dist with only logpdf
+        dist_logpdf = MyDist()
+        dist_logpdf.logpdf = lambda x: -x*x/2
+        rng2 = NumericalInversePolynomial(dist_logpdf)
+
+        q = np.linspace(1e-5, 1-1e-5, num=100)
+        assert_allclose(rng1.ppf(q), rng2.ppf(q))
+
+
+class TestNumericalInverseHermite:
+    #         /  (1 +sin(2 Pi x))/2  if |x| <= 1
+    # f(x) = <
+    #         \  0        otherwise
+    # Taken from UNU.RAN test suite (from file t_hinv.c)
+    class dist0:
+        def pdf(self, x):
+            return 0.5*(1. + np.sin(2.*np.pi*x))
+
+        def dpdf(self, x):
+            return np.pi*np.cos(2.*np.pi*x)
+
+        def cdf(self, x):
+            return (1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) / (4.*np.pi)
+
+        def support(self):
+            return -1, 1
+
+    #         /  Max(sin(2 Pi x)),0)Pi/2  if -1 < x <0.5
+    # f(x) = <
+    #         \  0        otherwise
+    # Taken from UNU.RAN test suite (from file t_hinv.c)
+    class dist1:
+        def pdf(self, x):
+            if (x <= -0.5):
+                return np.sin((2. * np.pi) * x) * 0.5 * np.pi
+            if (x < 0.):
+                return 0.
+            if (x <= 0.5):
+                return np.sin((2. * np.pi) * x) * 0.5 * np.pi
+
+        def dpdf(self, x):
+            if (x <= -0.5):
+                return np.cos((2. * np.pi) * x) * np.pi * np.pi
+            if (x < 0.):
+                return 0.
+            if (x <= 0.5):
+                return np.cos((2. * np.pi) * x) * np.pi * np.pi
+
+        def cdf(self, x):
+            if (x <= -0.5):
+                return 0.25 * (1 - np.cos((2. * np.pi) * x))
+            if (x < 0.):
+                return 0.5
+            if (x <= 0.5):
+                return 0.75 - 0.25 * np.cos((2. * np.pi) * x)
+
+        def support(self):
+            return -1, 0.5
+
+    dists = [dist0(), dist1()]
+
+    # exact mean and variance of the distributions in the list dists
+    mv0 = [-1/(2*np.pi), 1/3 - 1/(4*np.pi*np.pi)]
+    mv1 = [-1/4, 3/8-1/(2*np.pi*np.pi) - 1/16]
+    mvs = [mv0, mv1]
+
+    @pytest.mark.parametrize("dist, mv_ex",
+                             zip(dists, mvs))
+    @pytest.mark.parametrize("order", [3, 5])
+    def test_basic(self, dist, mv_ex, order):
+        rng = NumericalInverseHermite(dist, order=order, random_state=42)
+        check_cont_samples(rng, dist, mv_ex)
+
+    # test domains with inf + nan in them. need to write a custom test for
+    # this because not all methods support infinite tails.
+    @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
+    def test_inf_nan_domains(self, domain, err, msg):
+        with pytest.raises(err, match=msg):
+            NumericalInverseHermite(StandardNormal(), domain=domain)
+
+    def basic_test_all_scipy_dists(self, distname, shapes):
+        slow_dists = {'ksone', 'kstwo', 'levy_stable', 'skewnorm'}
+        fail_dists = {'beta', 'gausshyper', 'geninvgauss', 'ncf', 'nct',
+                      'norminvgauss', 'genhyperbolic', 'studentized_range',
+                      'vonmises', 'kappa4', 'invgauss', 'wald'}
+
+        if distname in slow_dists:
+            pytest.skip("Distribution is too slow")
+        if distname in fail_dists:
+            # specific reasons documented in gh-13319
+            # https://github.com/scipy/scipy/pull/13319#discussion_r626188955
+            pytest.xfail("Fails - usually due to inaccurate CDF/PDF")
+
+        np.random.seed(0)
+
+        dist = getattr(stats, distname)(*shapes)
+        fni = NumericalInverseHermite(dist)
+
+        x = np.random.rand(10)
+        p_tol = np.max(np.abs(dist.ppf(x)-fni.ppf(x))/np.abs(dist.ppf(x)))
+        u_tol = np.max(np.abs(dist.cdf(fni.ppf(x)) - x))
+
+        assert p_tol < 1e-8
+        assert u_tol < 1e-12
+
+    @pytest.mark.filterwarnings('ignore::RuntimeWarning')
+    @pytest.mark.xslow
+    @pytest.mark.parametrize(("distname", "shapes"), distcont)
+    def test_basic_all_scipy_dists(self, distname, shapes):
+        # if distname == "truncnorm":
+        #     pytest.skip("Tested separately")
+        self.basic_test_all_scipy_dists(distname, shapes)
+
+    @pytest.mark.filterwarnings('ignore::RuntimeWarning')
+    def test_basic_truncnorm_gh17155(self):
+        self.basic_test_all_scipy_dists("truncnorm", (0.1, 2))
+
+    def test_input_validation(self):
+        match = r"`order` must be either 1, 3, or 5."
+        with pytest.raises(ValueError, match=match):
+            NumericalInverseHermite(StandardNormal(), order=2)
+
+        match = "`cdf` required but not found"
+        with pytest.raises(ValueError, match=match):
+            NumericalInverseHermite("norm")
+
+        match = "could not convert string to float"
+        with pytest.raises(ValueError, match=match):
+            NumericalInverseHermite(StandardNormal(),
+                                    u_resolution='ekki')
+
+    rngs = [None, 0, np.random.RandomState(0)]
+    rngs.append(np.random.default_rng(0))  # type: ignore
+    sizes = [(None, tuple()), (8, (8,)), ((4, 5, 6), (4, 5, 6))]
+
+    @pytest.mark.parametrize('rng', rngs)
+    @pytest.mark.parametrize('size_in, size_out', sizes)
+    def test_RVS(self, rng, size_in, size_out):
+        dist = StandardNormal()
+        fni = NumericalInverseHermite(dist)
+
+        rng2 = deepcopy(rng)
+        rvs = fni.rvs(size=size_in, random_state=rng)
+        if size_in is not None:
+            assert rvs.shape == size_out
+
+        if rng2 is not None:
+            rng2 = check_random_state(rng2)
+            uniform = rng2.uniform(size=size_in)
+            rvs2 = stats.norm.ppf(uniform)
+            assert_allclose(rvs, rvs2)
+
+    def test_inaccurate_CDF(self):
+        # CDF function with inaccurate tail cannot be inverted; see gh-13319
+        # https://github.com/scipy/scipy/pull/13319#discussion_r626188955
+        shapes = (2.3098496451481823, 0.6268795430096368)
+        match = ("98 : one or more intervals very short; possibly due to "
+                 "numerical problems with a pole or very flat tail")
+
+        # fails with default tol
+        with pytest.warns(RuntimeWarning, match=match):
+            NumericalInverseHermite(stats.beta(*shapes))
+
+        # no error with coarser tol
+        NumericalInverseHermite(stats.beta(*shapes), u_resolution=1e-8)
+
+    def test_custom_distribution(self):
+        dist1 = StandardNormal()
+        fni1 = NumericalInverseHermite(dist1)
+
+        dist2 = stats.norm()
+        fni2 = NumericalInverseHermite(dist2)
+
+        assert_allclose(fni1.rvs(random_state=0), fni2.rvs(random_state=0))
+
+    u = [
+        # check the correctness of the PPF for equidistant points between
+        # 0.02 and 0.98.
+        np.linspace(0., 1., num=10000),
+        # test the PPF method for empty arrays
+        [], [[]],
+        # test if nans and infs return nan result.
+        [np.nan], [-np.inf, np.nan, np.inf],
+        # test if a scalar is returned for a scalar input.
+        0,
+        # test for arrays with nans, values greater than 1 and less than 0,
+        # and some valid values.
+        [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
+    ]
+
+    @pytest.mark.parametrize("u", u)
+    def test_ppf(self, u):
+        dist = StandardNormal()
+        rng = NumericalInverseHermite(dist, u_resolution=1e-12)
+        # Older versions of NumPy throw RuntimeWarnings for comparisons
+        # with nan.
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in greater")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "greater_equal")
+            sup.filter(RuntimeWarning, "invalid value encountered in less")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "less_equal")
+            res = rng.ppf(u)
+            expected = stats.norm.ppf(u)
+        assert_allclose(res, expected, rtol=1e-9, atol=3e-10)
+        assert res.shape == expected.shape
+
+    def test_u_error(self):
+        dist = StandardNormal()
+        rng = NumericalInverseHermite(dist, u_resolution=1e-10)
+        max_error, mae = rng.u_error()
+        assert max_error < 1e-10
+        assert mae <= max_error
+        with suppress_warnings() as sup:
+            # ignore warning about u-resolution being too small.
+            sup.filter(RuntimeWarning)
+            rng = NumericalInverseHermite(dist, u_resolution=1e-14)
+        max_error, mae = rng.u_error()
+        assert max_error < 1e-14
+        assert mae <= max_error
+
+
+class TestDiscreteGuideTable:
+    basic_fail_dists = {
+        'nchypergeom_fisher',  # numerical errors on tails
+        'nchypergeom_wallenius',  # numerical errors on tails
+        'randint'  # fails on 32-bit ubuntu
+    }
+
+    def test_guide_factor_gt3_raises_warning(self):
+        pv = [0.1, 0.3, 0.6]
+        urng = np.random.default_rng()
+        with pytest.warns(RuntimeWarning):
+            DiscreteGuideTable(pv, random_state=urng, guide_factor=7)
+
+    def test_guide_factor_zero_raises_warning(self):
+        pv = [0.1, 0.3, 0.6]
+        urng = np.random.default_rng()
+        with pytest.warns(RuntimeWarning):
+            DiscreteGuideTable(pv, random_state=urng, guide_factor=0)
+
+    def test_negative_guide_factor_raises_warning(self):
+        # This occurs from the UNU.RAN wrapper automatically.
+        # however it already gives a useful warning
+        # Here we just test that a warning is raised.
+        pv = [0.1, 0.3, 0.6]
+        urng = np.random.default_rng()
+        with pytest.warns(RuntimeWarning):
+            DiscreteGuideTable(pv, random_state=urng, guide_factor=-1)
+
+    @pytest.mark.parametrize("distname, params", distdiscrete)
+    def test_basic(self, distname, params):
+        if distname in self.basic_fail_dists:
+            msg = ("DGT fails on these probably because of large domains "
+                   "and small computation errors in PMF.")
+            pytest.skip(msg)
+
+        if not isinstance(distname, str):
+            dist = distname
+        else:
+            dist = getattr(stats, distname)
+
+        dist = dist(*params)
+        domain = dist.support()
+
+        if not np.isfinite(domain[1] - domain[0]):
+            # DGT only works with finite domain. So, skip the distributions
+            # with infinite tails.
+            pytest.skip("DGT only works with a finite domain.")
+
+        k = np.arange(domain[0], domain[1]+1)
+        pv = dist.pmf(k)
+        mv_ex = dist.stats('mv')
+        rng = DiscreteGuideTable(dist, random_state=42)
+        check_discr_samples(rng, pv, mv_ex)
+
+    u = [
+        # the correctness of the PPF for equidistant points between 0 and 1.
+        np.linspace(0, 1, num=10000),
+        # test the PPF method for empty arrays
+        [], [[]],
+        # test if nans and infs return nan result.
+        [np.nan], [-np.inf, np.nan, np.inf],
+        # test if a scalar is returned for a scalar input.
+        0,
+        # test for arrays with nans, values greater than 1 and less than 0,
+        # and some valid values.
+        [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]]
+    ]
+
+    @pytest.mark.parametrize('u', u)
+    def test_ppf(self, u):
+        n, p = 4, 0.1
+        dist = stats.binom(n, p)
+        rng = DiscreteGuideTable(dist, random_state=42)
+
+        # Older versions of NumPy throw RuntimeWarnings for comparisons
+        # with nan.
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in greater")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "greater_equal")
+            sup.filter(RuntimeWarning, "invalid value encountered in less")
+            sup.filter(RuntimeWarning, "invalid value encountered in "
+                                       "less_equal")
+
+            res = rng.ppf(u)
+            expected = stats.binom.ppf(u, n, p)
+        assert_equal(res.shape, expected.shape)
+        assert_equal(res, expected)
+
+    @pytest.mark.parametrize("pv, msg", bad_pv_common)
+    def test_bad_pv(self, pv, msg):
+        with pytest.raises(ValueError, match=msg):
+            DiscreteGuideTable(pv)
+
+    # DGT doesn't support infinite tails. So, it should throw an error when
+    # inf is present in the domain.
+    inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
+                  (0, np.inf), (-np.inf, 0)]
+
+    @pytest.mark.parametrize("domain", inf_domain)
+    def test_inf_domain(self, domain):
+        with pytest.raises(ValueError, match=r"must be finite"):
+            DiscreteGuideTable(stats.binom(10, 0.2), domain=domain)
+
+
+class TestSimpleRatioUniforms:
+    # pdf with piecewise linear function as transformed density
+    # with T = -1/sqrt with shift. Taken from UNU.RAN test suite
+    # (from file t_srou.c)
+    class dist:
+        def __init__(self, shift):
+            self.shift = shift
+            self.mode = shift
+
+        def pdf(self, x):
+            x -= self.shift
+            y = 1. / (abs(x) + 1.)
+            return 0.5 * y * y
+
+        def cdf(self, x):
+            x -= self.shift
+            if x <= 0.:
+                return 0.5 / (1. - x)
+            else:
+                return 1. - 0.5 / (1. + x)
+
+    dists = [dist(0.), dist(10000.)]
+
+    # exact mean and variance of the distributions in the list dists
+    mv1 = [0., np.inf]
+    mv2 = [10000., np.inf]
+    mvs = [mv1, mv2]
+
+    @pytest.mark.parametrize("dist, mv_ex",
+                             zip(dists, mvs))
+    def test_basic(self, dist, mv_ex):
+        rng = SimpleRatioUniforms(dist, mode=dist.mode, random_state=42)
+        check_cont_samples(rng, dist, mv_ex)
+        rng = SimpleRatioUniforms(dist, mode=dist.mode,
+                                  cdf_at_mode=dist.cdf(dist.mode),
+                                  random_state=42)
+        check_cont_samples(rng, dist, mv_ex)
+
+    # test domains with inf + nan in them. need to write a custom test for
+    # this because not all methods support infinite tails.
+    @pytest.mark.parametrize("domain, err, msg", inf_nan_domains)
+    def test_inf_nan_domains(self, domain, err, msg):
+        with pytest.raises(err, match=msg):
+            SimpleRatioUniforms(StandardNormal(), domain=domain)
+
+    def test_bad_args(self):
+        # pdf_area < 0
+        with pytest.raises(ValueError, match=r"`pdf_area` must be > 0"):
+            SimpleRatioUniforms(StandardNormal(), mode=0, pdf_area=-1)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_stats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_stats.py
new file mode 100644
index 00000000..50f3849f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_stats.py
@@ -0,0 +1,8173 @@
+""" Test functions for stats module
+
+    WRITTEN BY LOUIS LUANGKESORN  FOR THE STATS MODULE
+    BASED ON WILKINSON'S STATISTICS QUIZ
+    https://www.stanford.edu/~clint/bench/wilk.txt
+
+    Additional tests by a host of SciPy developers.
+"""
+import os
+import re
+import warnings
+from collections import namedtuple
+from itertools import product
+
+from numpy.testing import (assert_, assert_equal,
+                           assert_almost_equal, assert_array_almost_equal,
+                           assert_array_equal, assert_approx_equal,
+                           assert_allclose, assert_warns, suppress_warnings,
+                           assert_array_less)
+import pytest
+from pytest import raises as assert_raises
+import numpy.ma.testutils as mat
+from numpy import array, arange, float32, float64, power
+import numpy as np
+
+import scipy.stats as stats
+import scipy.stats.mstats as mstats
+import scipy.stats._mstats_basic as mstats_basic
+from scipy.stats._ksstats import kolmogn
+from scipy.special._testutils import FuncData
+from scipy.special import binom
+from scipy import optimize
+from .common_tests import check_named_results
+from scipy.spatial.distance import cdist
+from numpy.lib import NumpyVersion
+from scipy.stats._axis_nan_policy import _broadcast_concatenate
+from scipy.stats._stats_py import _permutation_distribution_t
+
+
+""" Numbers in docstrings beginning with 'W' refer to the section numbers
+    and headings found in the STATISTICS QUIZ of Leland Wilkinson.  These are
+    considered to be essential functionality.  True testing and
+    evaluation of a statistics package requires use of the
+    NIST Statistical test data.  See McCoullough(1999) Assessing The Reliability
+    of Statistical Software for a test methodology and its
+    implementation in testing SAS, SPSS, and S-Plus
+"""
+
+#  Datasets
+#  These data sets are from the nasty.dat sets used by Wilkinson
+#  For completeness, I should write the relevant tests and count them as failures
+#  Somewhat acceptable, since this is still beta software.  It would count as a
+#  good target for 1.0 status
+X = array([1,2,3,4,5,6,7,8,9], float)
+ZERO = array([0,0,0,0,0,0,0,0,0], float)
+BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,
+             99999998,99999999], float)
+LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,
+                0.99999997,0.99999998,0.99999999], float)
+HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float)
+TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float)
+ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float)
+
+
+class TestTrimmedStats:
+    # TODO: write these tests to handle missing values properly
+    dprec = np.finfo(np.float64).precision
+
+    def test_tmean(self):
+        y = stats.tmean(X, (2, 8), (True, True))
+        assert_approx_equal(y, 5.0, significant=self.dprec)
+
+        y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False))
+        y2 = stats.tmean(X, limits=None)
+        assert_approx_equal(y1, y2, significant=self.dprec)
+
+        x_2d = arange(63, dtype=float64).reshape(9, 7)
+        y = stats.tmean(x_2d, axis=None)
+        assert_approx_equal(y, x_2d.mean(), significant=self.dprec)
+
+        y = stats.tmean(x_2d, axis=0)
+        assert_array_almost_equal(y, x_2d.mean(axis=0), decimal=8)
+
+        y = stats.tmean(x_2d, axis=1)
+        assert_array_almost_equal(y, x_2d.mean(axis=1), decimal=8)
+
+        y = stats.tmean(x_2d, limits=(2, 61), axis=None)
+        assert_approx_equal(y, 31.5, significant=self.dprec)
+
+        y = stats.tmean(x_2d, limits=(2, 21), axis=0)
+        y_true = [14, 11.5, 9, 10, 11, 12, 13]
+        assert_array_almost_equal(y, y_true, decimal=8)
+
+        y = stats.tmean(x_2d, limits=(2, 21), inclusive=(True, False), axis=0)
+        y_true = [10.5, 11.5, 9, 10, 11, 12, 13]
+        assert_array_almost_equal(y, y_true, decimal=8)
+
+        x_2d_with_nan = np.array(x_2d)
+        x_2d_with_nan[-1, -3:] = np.nan
+        y = stats.tmean(x_2d_with_nan, limits=(1, 13), axis=0)
+        y_true = [7, 4.5, 5.5, 6.5, np.nan, np.nan, np.nan]
+        assert_array_almost_equal(y, y_true, decimal=8)
+
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning, "Mean of empty slice")
+
+            y = stats.tmean(x_2d, limits=(2, 21), axis=1)
+            y_true = [4, 10, 17, 21, np.nan, np.nan, np.nan, np.nan, np.nan]
+            assert_array_almost_equal(y, y_true, decimal=8)
+
+            y = stats.tmean(x_2d, limits=(2, 21),
+                            inclusive=(False, True), axis=1)
+            y_true = [4.5, 10, 17, 21, np.nan, np.nan, np.nan, np.nan, np.nan]
+            assert_array_almost_equal(y, y_true, decimal=8)
+
+    def test_tvar(self):
+        y = stats.tvar(X, limits=(2, 8), inclusive=(True, True))
+        assert_approx_equal(y, 4.6666666666666661, significant=self.dprec)
+
+        y = stats.tvar(X, limits=None)
+        assert_approx_equal(y, X.var(ddof=1), significant=self.dprec)
+
+        x_2d = arange(63, dtype=float64).reshape((9, 7))
+        y = stats.tvar(x_2d, axis=None)
+        assert_approx_equal(y, x_2d.var(ddof=1), significant=self.dprec)
+
+        y = stats.tvar(x_2d, axis=0)
+        assert_array_almost_equal(y[0], np.full((1, 7), 367.50000000), decimal=8)
+
+        y = stats.tvar(x_2d, axis=1)
+        assert_array_almost_equal(y[0], np.full((1, 9), 4.66666667), decimal=8)
+
+        y = stats.tvar(x_2d[3, :])
+        assert_approx_equal(y, 4.666666666666667, significant=self.dprec)
+
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning, "Degrees of freedom <= 0 for slice.")
+
+            # Limiting some values along one axis
+            y = stats.tvar(x_2d, limits=(1, 5), axis=1, inclusive=(True, True))
+            assert_approx_equal(y[0], 2.5, significant=self.dprec)
+
+            # Limiting all values along one axis
+            y = stats.tvar(x_2d, limits=(0, 6), axis=1, inclusive=(True, True))
+            assert_approx_equal(y[0], 4.666666666666667, significant=self.dprec)
+            assert_equal(y[1], np.nan)
+
+    def test_tstd(self):
+        y = stats.tstd(X, (2, 8), (True, True))
+        assert_approx_equal(y, 2.1602468994692865, significant=self.dprec)
+
+        y = stats.tstd(X, limits=None)
+        assert_approx_equal(y, X.std(ddof=1), significant=self.dprec)
+
+    def test_tmin(self):
+        assert_equal(stats.tmin(4), 4)
+
+        x = np.arange(10)
+        assert_equal(stats.tmin(x), 0)
+        assert_equal(stats.tmin(x, lowerlimit=0), 0)
+        assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1)
+
+        x = x.reshape((5, 2))
+        assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1])
+        assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8])
+        assert_equal(stats.tmin(x, axis=None), 0)
+
+        x = np.arange(10.)
+        x[9] = np.nan
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning, "invalid value*")
+            assert_equal(stats.tmin(x), np.nan)
+            assert_equal(stats.tmin(x, nan_policy='omit'), 0.)
+            assert_raises(ValueError, stats.tmin, x, nan_policy='raise')
+            assert_raises(ValueError, stats.tmin, x, nan_policy='foobar')
+            msg = "'propagate', 'raise', 'omit'"
+            with assert_raises(ValueError, match=msg):
+                stats.tmin(x, nan_policy='foo')
+
+    def test_tmax(self):
+        assert_equal(stats.tmax(4), 4)
+
+        x = np.arange(10)
+        assert_equal(stats.tmax(x), 9)
+        assert_equal(stats.tmax(x, upperlimit=9), 9)
+        assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8)
+
+        x = x.reshape((5, 2))
+        assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7])
+        assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9])
+        assert_equal(stats.tmax(x, axis=None), 9)
+
+        x = np.arange(10.)
+        x[6] = np.nan
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning, "invalid value*")
+            assert_equal(stats.tmax(x), np.nan)
+            assert_equal(stats.tmax(x, nan_policy='omit'), 9.)
+            assert_raises(ValueError, stats.tmax, x, nan_policy='raise')
+            assert_raises(ValueError, stats.tmax, x, nan_policy='foobar')
+
+    def test_tsem(self):
+        y = stats.tsem(X, limits=(3, 8), inclusive=(False, True))
+        y_ref = np.array([4, 5, 6, 7, 8])
+        assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size),
+                            significant=self.dprec)
+
+        assert_approx_equal(stats.tsem(X, limits=[-1, 10]),
+                            stats.tsem(X, limits=None),
+                            significant=self.dprec)
+
+
+class TestCorrPearsonr:
+    """ W.II.D. Compute a correlation matrix on all the variables.
+
+        All the correlations, except for ZERO and MISS, should be exactly 1.
+        ZERO and MISS should have undefined or missing correlations with the
+        other variables.  The same should go for SPEARMAN correlations, if
+        your program has them.
+    """
+
+    def test_pXX(self):
+        y = stats.pearsonr(X,X)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pXBIG(self):
+        y = stats.pearsonr(X,BIG)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pXLITTLE(self):
+        y = stats.pearsonr(X,LITTLE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pXHUGE(self):
+        y = stats.pearsonr(X,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pXTINY(self):
+        y = stats.pearsonr(X,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pXROUND(self):
+        y = stats.pearsonr(X,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pBIGBIG(self):
+        y = stats.pearsonr(BIG,BIG)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pBIGLITTLE(self):
+        y = stats.pearsonr(BIG,LITTLE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pBIGHUGE(self):
+        y = stats.pearsonr(BIG,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pBIGTINY(self):
+        y = stats.pearsonr(BIG,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pBIGROUND(self):
+        y = stats.pearsonr(BIG,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pLITTLELITTLE(self):
+        y = stats.pearsonr(LITTLE,LITTLE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pLITTLEHUGE(self):
+        y = stats.pearsonr(LITTLE,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pLITTLETINY(self):
+        y = stats.pearsonr(LITTLE,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pLITTLEROUND(self):
+        y = stats.pearsonr(LITTLE,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pHUGEHUGE(self):
+        y = stats.pearsonr(HUGE,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pHUGETINY(self):
+        y = stats.pearsonr(HUGE,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pHUGEROUND(self):
+        y = stats.pearsonr(HUGE,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pTINYTINY(self):
+        y = stats.pearsonr(TINY,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pTINYROUND(self):
+        y = stats.pearsonr(TINY,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pROUNDROUND(self):
+        y = stats.pearsonr(ROUND,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_pearsonr_result_attributes(self):
+        res = stats.pearsonr(X, X)
+        attributes = ('correlation', 'pvalue')
+        check_named_results(res, attributes)
+        assert_equal(res.correlation, res.statistic)
+
+    def test_r_almost_exactly_pos1(self):
+        a = arange(3.0)
+        r, prob = stats.pearsonr(a, a)
+
+        assert_allclose(r, 1.0, atol=1e-15)
+        # With n = len(a) = 3, the error in prob grows like the
+        # square root of the error in r.
+        assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
+
+    def test_r_almost_exactly_neg1(self):
+        a = arange(3.0)
+        r, prob = stats.pearsonr(a, -a)
+
+        assert_allclose(r, -1.0, atol=1e-15)
+        # With n = len(a) = 3, the error in prob grows like the
+        # square root of the error in r.
+        assert_allclose(prob, 0.0, atol=np.sqrt(2*np.spacing(1.0)))
+
+    def test_basic(self):
+        # A basic test, with a correlation coefficient
+        # that is not 1 or -1.
+        a = array([-1, 0, 1])
+        b = array([0, 0, 3])
+        r, prob = stats.pearsonr(a, b)
+        assert_approx_equal(r, np.sqrt(3)/2)
+        assert_approx_equal(prob, 1/3)
+
+    def test_constant_input(self):
+        # Zero variance input
+        # See https://github.com/scipy/scipy/issues/3728
+        msg = "An input array is constant"
+        with assert_warns(stats.ConstantInputWarning, match=msg):
+            r, p = stats.pearsonr([0.667, 0.667, 0.667], [0.123, 0.456, 0.789])
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+
+    def test_near_constant_input(self):
+        # Near constant input (but not constant):
+        x = [2, 2, 2 + np.spacing(2)]
+        y = [3, 3, 3 + 6*np.spacing(3)]
+        msg = "An input array is nearly constant; the computed"
+        with assert_warns(stats.NearConstantInputWarning, match=msg):
+            # r and p are garbage, so don't bother checking them in this case.
+            # (The exact value of r would be 1.)
+            r, p = stats.pearsonr(x, y)
+
+    def test_very_small_input_values(self):
+        # Very small values in an input.  A naive implementation will
+        # suffer from underflow.
+        # See https://github.com/scipy/scipy/issues/9353
+        x = [0.004434375, 0.004756007, 0.003911996, 0.0038005, 0.003409971]
+        y = [2.48e-188, 7.41e-181, 4.09e-208, 2.08e-223, 2.66e-245]
+        r, p = stats.pearsonr(x,y)
+
+        # The expected values were computed using mpmath with 80 digits
+        # of precision.
+        assert_allclose(r, 0.7272930540750450)
+        assert_allclose(p, 0.1637805429533202)
+
+    def test_very_large_input_values(self):
+        # Very large values in an input.  A naive implementation will
+        # suffer from overflow.
+        # See https://github.com/scipy/scipy/issues/8980
+        x = 1e90*np.array([0, 0, 0, 1, 1, 1, 1])
+        y = 1e90*np.arange(7)
+
+        r, p = stats.pearsonr(x, y)
+
+        # The expected values were computed using mpmath with 80 digits
+        # of precision.
+        assert_allclose(r, 0.8660254037844386)
+        assert_allclose(p, 0.011724811003954638)
+
+    def test_extremely_large_input_values(self):
+        # Extremely large values in x and y.  These values would cause the
+        # product sigma_x * sigma_y to overflow if the two factors were
+        # computed independently.
+        x = np.array([2.3e200, 4.5e200, 6.7e200, 8e200])
+        y = np.array([1.2e199, 5.5e200, 3.3e201, 1.0e200])
+        r, p = stats.pearsonr(x, y)
+
+        # The expected values were computed using mpmath with 80 digits
+        # of precision.
+        assert_allclose(r, 0.351312332103289)
+        assert_allclose(p, 0.648687667896711)
+
+    def test_length_two_pos1(self):
+        # Inputs with length 2.
+        # See https://github.com/scipy/scipy/issues/7730
+        res = stats.pearsonr([1, 2], [3, 5])
+        r, p = res
+        assert_equal(r, 1)
+        assert_equal(p, 1)
+        assert_equal(res.confidence_interval(), (-1, 1))
+
+    def test_length_two_neg2(self):
+        # Inputs with length 2.
+        # See https://github.com/scipy/scipy/issues/7730
+        r, p = stats.pearsonr([2, 1], [3, 5])
+        assert_equal(r, -1)
+        assert_equal(p, 1)
+
+    # Expected values computed with R 3.6.2 cor.test, e.g.
+    # options(digits=16)
+    # x <- c(1, 2, 3, 4)
+    # y <- c(0, 1, 0.5, 1)
+    # cor.test(x, y, method = "pearson", alternative = "g")
+    # correlation coefficient and p-value for alternative='two-sided'
+    # calculated with mpmath agree to 16 digits.
+    @pytest.mark.parametrize('alternative, pval, rlow, rhigh, sign',
+            [('two-sided', 0.325800137536, -0.814938968841, 0.99230697523, 1),  # noqa
+             ('less', 0.8370999312316, -1, 0.985600937290653, 1),
+             ('greater', 0.1629000687684, -0.6785654158217636, 1, 1),
+             ('two-sided', 0.325800137536, -0.992306975236, 0.81493896884, -1),
+             ('less', 0.1629000687684, -1.0, 0.6785654158217636, -1),
+             ('greater', 0.8370999312316, -0.985600937290653, 1.0, -1)])
+    def test_basic_example(self, alternative, pval, rlow, rhigh, sign):
+        x = [1, 2, 3, 4]
+        y = np.array([0, 1, 0.5, 1]) * sign
+        result = stats.pearsonr(x, y, alternative=alternative)
+        assert_allclose(result.statistic, 0.6741998624632421*sign, rtol=1e-12)
+        assert_allclose(result.pvalue, pval, rtol=1e-6)
+        ci = result.confidence_interval()
+        assert_allclose(ci, (rlow, rhigh), rtol=1e-6)
+
+    def test_negative_correlation_pvalue_gh17795(self):
+        x = np.arange(10)
+        y = -x
+        test_greater = stats.pearsonr(x, y, alternative='greater')
+        test_less = stats.pearsonr(x, y, alternative='less')
+        assert_allclose(test_greater.pvalue, 1)
+        assert_allclose(test_less.pvalue, 0, atol=1e-20)
+
+    def test_length3_r_exactly_negative_one(self):
+        x = [1, 2, 3]
+        y = [5, -4, -13]
+        res = stats.pearsonr(x, y)
+
+        # The expected r and p are exact.
+        r, p = res
+        assert_allclose(r, -1.0)
+        assert_allclose(p, 0.0, atol=1e-7)
+
+        assert_equal(res.confidence_interval(), (-1, 1))
+
+    def test_unequal_lengths(self):
+        x = [1, 2, 3]
+        y = [4, 5]
+        assert_raises(ValueError, stats.pearsonr, x, y)
+
+    def test_len1(self):
+        x = [1]
+        y = [2]
+        assert_raises(ValueError, stats.pearsonr, x, y)
+
+    def test_complex_data(self):
+        x = [-1j, -2j, -3.0j]
+        y = [-1j, -2j, -3.0j]
+        message = 'This function does not support complex data'
+        with pytest.raises(ValueError, match=message):
+            stats.pearsonr(x, y)
+
+
+class TestFisherExact:
+    """Some tests to show that fisher_exact() works correctly.
+
+    Note that in SciPy 0.9.0 this was not working well for large numbers due to
+    inaccuracy of the hypergeom distribution (see #1218). Fixed now.
+
+    Also note that R and SciPy have different argument formats for their
+    hypergeometric distribution functions.
+
+    R:
+    > phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
+    [1] 1.701815e-09
+    """
+
+    def test_basic(self):
+        fisher_exact = stats.fisher_exact
+
+        res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
+        assert_approx_equal(res, 0.01106, significant=4)
+        res = fisher_exact([[100, 2], [1000, 5]])[1]
+        assert_approx_equal(res, 0.1301, significant=4)
+        res = fisher_exact([[2, 7], [8, 2]])[1]
+        assert_approx_equal(res, 0.0230141, significant=6)
+        res = fisher_exact([[5, 1], [10, 10]])[1]
+        assert_approx_equal(res, 0.1973244, significant=6)
+        res = fisher_exact([[5, 15], [20, 20]])[1]
+        assert_approx_equal(res, 0.0958044, significant=6)
+        res = fisher_exact([[5, 16], [20, 25]])[1]
+        assert_approx_equal(res, 0.1725862, significant=6)
+        res = fisher_exact([[10, 5], [10, 1]])[1]
+        assert_approx_equal(res, 0.1973244, significant=6)
+        res = fisher_exact([[5, 0], [1, 4]])[1]
+        assert_approx_equal(res, 0.04761904, significant=6)
+        res = fisher_exact([[0, 1], [3, 2]])[1]
+        assert_approx_equal(res, 1.0)
+        res = fisher_exact([[0, 2], [6, 4]])[1]
+        assert_approx_equal(res, 0.4545454545)
+        res = fisher_exact([[2, 7], [8, 2]])
+        assert_approx_equal(res[1], 0.0230141, significant=6)
+        assert_approx_equal(res[0], 4.0 / 56)
+
+    def test_precise(self):
+        # results from R
+        #
+        # R defines oddsratio differently (see Notes section of fisher_exact
+        # docstring), so those will not match.  We leave them in anyway, in
+        # case they will be useful later on. We test only the p-value.
+        tablist = [
+            ([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
+            ([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
+            ([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
+            ([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
+            ([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
+            ([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
+            ([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
+            ([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
+            ([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
+            ([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
+            ([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
+            ]
+        for table, res_r in tablist:
+            res = stats.fisher_exact(np.asarray(table))
+            np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
+                                           verbose=True)
+
+    def test_gh4130(self):
+        # Previously, a fudge factor used to distinguish between theoeretically
+        # and numerically different probability masses was 1e-4; it has been
+        # tightened to fix gh4130. Accuracy checked against R fisher.test.
+        # options(digits=16)
+        # table <- matrix(c(6, 108, 37, 200), nrow = 2)
+        # fisher.test(table, alternative = "t")
+        x = [[6, 37], [108, 200]]
+        res = stats.fisher_exact(x)
+        assert_allclose(res[1], 0.005092697748126)
+
+        # case from https://github.com/brentp/fishers_exact_test/issues/27
+        # That package has an (absolute?) fudge factor of 1e-6; too big
+        x = [[22, 0], [0, 102]]
+        res = stats.fisher_exact(x)
+        assert_allclose(res[1], 7.175066786244549e-25)
+
+        # case from https://github.com/brentp/fishers_exact_test/issues/1
+        x = [[94, 48], [3577, 16988]]
+        res = stats.fisher_exact(x)
+        assert_allclose(res[1], 2.069356340993818e-37)
+
+    def test_gh9231(self):
+        # Previously, fisher_exact was extremely slow for this table
+        # As reported in gh-9231, the p-value should be very nearly zero
+        x = [[5829225, 5692693], [5760959, 5760959]]
+        res = stats.fisher_exact(x)
+        assert_allclose(res[1], 0, atol=1e-170)
+
+    @pytest.mark.slow
+    def test_large_numbers(self):
+        # Test with some large numbers. Regression test for #1401
+        pvals = [5.56e-11, 2.666e-11, 1.363e-11]  # from R
+        for pval, num in zip(pvals, [75, 76, 77]):
+            res = stats.fisher_exact([[17704, 496], [1065, num]])[1]
+            assert_approx_equal(res, pval, significant=4)
+
+        res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]
+        assert_approx_equal(res, 0.2751, significant=4)
+
+    def test_raises(self):
+        # test we raise an error for wrong shape of input.
+        assert_raises(ValueError, stats.fisher_exact,
+                      np.arange(6).reshape(2, 3))
+
+    def test_row_or_col_zero(self):
+        tables = ([[0, 0], [5, 10]],
+                  [[5, 10], [0, 0]],
+                  [[0, 5], [0, 10]],
+                  [[5, 0], [10, 0]])
+        for table in tables:
+            oddsratio, pval = stats.fisher_exact(table)
+            assert_equal(pval, 1.0)
+            assert_equal(oddsratio, np.nan)
+
+    def test_less_greater(self):
+        tables = (
+            # Some tables to compare with R:
+            [[2, 7], [8, 2]],
+            [[200, 7], [8, 300]],
+            [[28, 21], [6, 1957]],
+            [[190, 800], [200, 900]],
+            # Some tables with simple exact values
+            # (includes regression test for ticket #1568):
+            [[0, 2], [3, 0]],
+            [[1, 1], [2, 1]],
+            [[2, 0], [1, 2]],
+            [[0, 1], [2, 3]],
+            [[1, 0], [1, 4]],
+            )
+        pvals = (
+            # from R:
+            [0.018521725952066501, 0.9990149169715733],
+            [1.0, 2.0056578803889148e-122],
+            [1.0, 5.7284374608319831e-44],
+            [0.7416227, 0.2959826],
+            # Exact:
+            [0.1, 1.0],
+            [0.7, 0.9],
+            [1.0, 0.3],
+            [2./3, 1.0],
+            [1.0, 1./3],
+            )
+        for table, pval in zip(tables, pvals):
+            res = []
+            res.append(stats.fisher_exact(table, alternative="less")[1])
+            res.append(stats.fisher_exact(table, alternative="greater")[1])
+            assert_allclose(res, pval, atol=0, rtol=1e-7)
+
+    def test_gh3014(self):
+        # check if issue #3014 has been fixed.
+        # before, this would have risen a ValueError
+        odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])
+
+    @pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
+    def test_result(self, alternative):
+        table = np.array([[14500, 20000], [30000, 40000]])
+        res = stats.fisher_exact(table, alternative=alternative)
+        assert_equal((res.statistic, res.pvalue), res)
+
+
+class TestCorrSpearmanr:
+    """ W.II.D. Compute a correlation matrix on all the variables.
+
+        All the correlations, except for ZERO and MISS, should be exactly 1.
+        ZERO and MISS should have undefined or missing correlations with the
+        other variables.  The same should go for SPEARMAN correlations, if
+        your program has them.
+    """
+
+    def test_scalar(self):
+        y = stats.spearmanr(4., 2.)
+        assert_(np.isnan(y).all())
+
+    def test_uneven_lengths(self):
+        assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9])
+        assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8)
+
+    def test_uneven_2d_shapes(self):
+        # Different number of columns should work - those just get concatenated.
+        np.random.seed(232324)
+        x = np.random.randn(4, 3)
+        y = np.random.randn(4, 2)
+        assert stats.spearmanr(x, y).statistic.shape == (5, 5)
+        assert stats.spearmanr(x.T, y.T, axis=1).pvalue.shape == (5, 5)
+
+        assert_raises(ValueError, stats.spearmanr, x, y, axis=1)
+        assert_raises(ValueError, stats.spearmanr, x.T, y.T)
+
+    def test_ndim_too_high(self):
+        np.random.seed(232324)
+        x = np.random.randn(4, 3, 2)
+        assert_raises(ValueError, stats.spearmanr, x)
+        assert_raises(ValueError, stats.spearmanr, x, x)
+        assert_raises(ValueError, stats.spearmanr, x, None, None)
+        # But should work with axis=None (raveling axes) for two input arrays
+        assert_allclose(stats.spearmanr(x, x, axis=None),
+                        stats.spearmanr(x.flatten(), x.flatten(), axis=0))
+
+    def test_nan_policy(self):
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
+        assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'),
+                           (1.0, 0.0))
+        assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
+        assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
+
+    def test_nan_policy_bug_12458(self):
+        np.random.seed(5)
+        x = np.random.rand(5, 10)
+        k = 6
+        x[:, k] = np.nan
+        y = np.delete(x, k, axis=1)
+        corx, px = stats.spearmanr(x, nan_policy='omit')
+        cory, py = stats.spearmanr(y)
+        corx = np.delete(np.delete(corx, k, axis=1), k, axis=0)
+        px = np.delete(np.delete(px, k, axis=1), k, axis=0)
+        assert_allclose(corx, cory, atol=1e-14)
+        assert_allclose(px, py, atol=1e-14)
+
+    def test_nan_policy_bug_12411(self):
+        np.random.seed(5)
+        m = 5
+        n = 10
+        x = np.random.randn(m, n)
+        x[1, 0] = np.nan
+        x[3, -1] = np.nan
+        corr, pvalue = stats.spearmanr(x, axis=1, nan_policy="propagate")
+        res = [[stats.spearmanr(x[i, :], x[j, :]).statistic for i in range(m)]
+               for j in range(m)]
+        assert_allclose(corr, res)
+
+    def test_sXX(self):
+        y = stats.spearmanr(X,X)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sXBIG(self):
+        y = stats.spearmanr(X,BIG)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sXLITTLE(self):
+        y = stats.spearmanr(X,LITTLE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sXHUGE(self):
+        y = stats.spearmanr(X,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sXTINY(self):
+        y = stats.spearmanr(X,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sXROUND(self):
+        y = stats.spearmanr(X,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sBIGBIG(self):
+        y = stats.spearmanr(BIG,BIG)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sBIGLITTLE(self):
+        y = stats.spearmanr(BIG,LITTLE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sBIGHUGE(self):
+        y = stats.spearmanr(BIG,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sBIGTINY(self):
+        y = stats.spearmanr(BIG,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sBIGROUND(self):
+        y = stats.spearmanr(BIG,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sLITTLELITTLE(self):
+        y = stats.spearmanr(LITTLE,LITTLE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sLITTLEHUGE(self):
+        y = stats.spearmanr(LITTLE,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sLITTLETINY(self):
+        y = stats.spearmanr(LITTLE,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sLITTLEROUND(self):
+        y = stats.spearmanr(LITTLE,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sHUGEHUGE(self):
+        y = stats.spearmanr(HUGE,HUGE)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sHUGETINY(self):
+        y = stats.spearmanr(HUGE,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sHUGEROUND(self):
+        y = stats.spearmanr(HUGE,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sTINYTINY(self):
+        y = stats.spearmanr(TINY,TINY)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sTINYROUND(self):
+        y = stats.spearmanr(TINY,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_sROUNDROUND(self):
+        y = stats.spearmanr(ROUND,ROUND)
+        r = y[0]
+        assert_approx_equal(r,1.0)
+
+    def test_spearmanr_result_attributes(self):
+        res = stats.spearmanr(X, X)
+        attributes = ('correlation', 'pvalue')
+        check_named_results(res, attributes)
+        assert_equal(res.correlation, res.statistic)
+
+    def test_1d_vs_2d(self):
+        x1 = [1, 2, 3, 4, 5, 6]
+        x2 = [1, 2, 3, 4, 6, 5]
+        res1 = stats.spearmanr(x1, x2)
+        res2 = stats.spearmanr(np.asarray([x1, x2]).T)
+        assert_allclose(res1, res2)
+
+    def test_1d_vs_2d_nans(self):
+        # Now the same with NaNs present.  Regression test for gh-9103.
+        for nan_policy in ['propagate', 'omit']:
+            x1 = [1, np.nan, 3, 4, 5, 6]
+            x2 = [1, 2, 3, 4, 6, np.nan]
+            res1 = stats.spearmanr(x1, x2, nan_policy=nan_policy)
+            res2 = stats.spearmanr(np.asarray([x1, x2]).T, nan_policy=nan_policy)
+            assert_allclose(res1, res2)
+
+    def test_3cols(self):
+        x1 = np.arange(6)
+        x2 = -x1
+        x3 = np.array([0, 1, 2, 3, 5, 4])
+        x = np.asarray([x1, x2, x3]).T
+        actual = stats.spearmanr(x)
+        expected_corr = np.array([[1, -1, 0.94285714],
+                                  [-1, 1, -0.94285714],
+                                  [0.94285714, -0.94285714, 1]])
+        expected_pvalue = np.zeros((3, 3), dtype=float)
+        expected_pvalue[2, 0:2] = 0.00480466472
+        expected_pvalue[0:2, 2] = 0.00480466472
+
+        assert_allclose(actual.statistic, expected_corr)
+        assert_allclose(actual.pvalue, expected_pvalue)
+
+    def test_gh_9103(self):
+        # Regression test for gh-9103.
+        x = np.array([[np.nan, 3.0, 4.0, 5.0, 5.1, 6.0, 9.2],
+                      [5.0, np.nan, 4.1, 4.8, 4.9, 5.0, 4.1],
+                      [0.5, 4.0, 7.1, 3.8, 8.0, 5.1, 7.6]]).T
+        corr = np.array([[np.nan, np.nan, np.nan],
+                         [np.nan, np.nan, np.nan],
+                         [np.nan, np.nan, 1.]])
+        assert_allclose(stats.spearmanr(x, nan_policy='propagate').statistic,
+                        corr)
+
+        res = stats.spearmanr(x, nan_policy='omit').statistic
+        assert_allclose((res[0][1], res[0][2], res[1][2]),
+                        (0.2051957, 0.4857143, -0.4707919), rtol=1e-6)
+
+    def test_gh_8111(self):
+        # Regression test for gh-8111 (different result for float/int/bool).
+        n = 100
+        np.random.seed(234568)
+        x = np.random.rand(n)
+        m = np.random.rand(n) > 0.7
+
+        # bool against float, no nans
+        a = (x > .5)
+        b = np.array(x)
+        res1 = stats.spearmanr(a, b, nan_policy='omit').statistic
+
+        # bool against float with NaNs
+        b[m] = np.nan
+        res2 = stats.spearmanr(a, b, nan_policy='omit').statistic
+
+        # int against float with NaNs
+        a = a.astype(np.int32)
+        res3 = stats.spearmanr(a, b, nan_policy='omit').statistic
+
+        expected = [0.865895477, 0.866100381, 0.866100381]
+        assert_allclose([res1, res2, res3], expected)
+
+
+class TestCorrSpearmanr2:
+    """Some further tests of the spearmanr function."""
+
+    def test_spearmanr_vs_r(self):
+        # Cross-check with R:
+        # cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr")
+        x1 = [1, 2, 3, 4, 5]
+        x2 = [5, 6, 7, 8, 7]
+        expected = (0.82078268166812329, 0.088587005313543798)
+        res = stats.spearmanr(x1, x2)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    def test_empty_arrays(self):
+        assert_equal(stats.spearmanr([], []), (np.nan, np.nan))
+
+    def test_normal_draws(self):
+        np.random.seed(7546)
+        x = np.array([np.random.normal(loc=1, scale=1, size=500),
+                    np.random.normal(loc=1, scale=1, size=500)])
+        corr = [[1.0, 0.3],
+                [0.3, 1.0]]
+        x = np.dot(np.linalg.cholesky(corr), x)
+        expected = (0.28659685838743354, 6.579862219051161e-11)
+        res = stats.spearmanr(x[0], x[1])
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    def test_corr_1(self):
+        assert_approx_equal(stats.spearmanr([1, 1, 2], [1, 1, 2])[0], 1.0)
+
+    def test_nan_policies(self):
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
+        assert_allclose(stats.spearmanr(x, x, nan_policy='omit'),
+                        (1.0, 0))
+        assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
+        assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
+
+    def test_unequal_lengths(self):
+        x = np.arange(10.)
+        y = np.arange(20.)
+        assert_raises(ValueError, stats.spearmanr, x, y)
+
+    def test_omit_paired_value(self):
+        x1 = [1, 2, 3, 4]
+        x2 = [8, 7, 6, np.nan]
+        res1 = stats.spearmanr(x1, x2, nan_policy='omit')
+        res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit')
+        assert_equal(res1, res2)
+
+    def test_gh_issue_6061_windows_overflow(self):
+        x = list(range(2000))
+        y = list(range(2000))
+        y[0], y[9] = y[9], y[0]
+        y[10], y[434] = y[434], y[10]
+        y[435], y[1509] = y[1509], y[435]
+        # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
+        #     = 1 - (1 / 500)
+        #     = 0.998
+        x.append(np.nan)
+        y.append(3.0)
+        assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998)
+
+    def test_tie0(self):
+        # with only ties in one or both inputs
+        warn_msg = "An input array is constant"
+        with assert_warns(stats.ConstantInputWarning, match=warn_msg):
+            r, p = stats.spearmanr([2, 2, 2], [2, 2, 2])
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+            r, p = stats.spearmanr([2, 0, 2], [2, 2, 2])
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+            r, p = stats.spearmanr([2, 2, 2], [2, 0, 2])
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+
+    def test_tie1(self):
+        # Data
+        x = [1.0, 2.0, 3.0, 4.0]
+        y = [1.0, 2.0, 2.0, 3.0]
+        # Ranks of the data, with tie-handling.
+        xr = [1.0, 2.0, 3.0, 4.0]
+        yr = [1.0, 2.5, 2.5, 4.0]
+        # Result of spearmanr should be the same as applying
+        # pearsonr to the ranks.
+        sr = stats.spearmanr(x, y)
+        pr = stats.pearsonr(xr, yr)
+        assert_almost_equal(sr, pr)
+
+    def test_tie2(self):
+        # Test tie-handling if inputs contain nan's
+        # Data without nan's
+        x1 = [1, 2, 2.5, 2]
+        y1 = [1, 3, 2.5, 4]
+        # Same data with nan's
+        x2 = [1, 2, 2.5, 2, np.nan]
+        y2 = [1, 3, 2.5, 4, np.nan]
+
+        # Results for two data sets should be the same if nan's are ignored
+        sr1 = stats.spearmanr(x1, y1)
+        sr2 = stats.spearmanr(x2, y2, nan_policy='omit')
+        assert_almost_equal(sr1, sr2)
+
+    def test_ties_axis_1(self):
+        z1 = np.array([[1, 1, 1, 1], [1, 2, 3, 4]])
+        z2 = np.array([[1, 2, 3, 4], [1, 1, 1, 1]])
+        z3 = np.array([[1, 1, 1, 1], [1, 1, 1, 1]])
+        warn_msg = "An input array is constant"
+        with assert_warns(stats.ConstantInputWarning, match=warn_msg):
+            r, p = stats.spearmanr(z1, axis=1)
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+            r, p = stats.spearmanr(z2, axis=1)
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+            r, p = stats.spearmanr(z3, axis=1)
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+
+    def test_gh_11111(self):
+        x = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
+        y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587,
+            0.0007535430349118562, 0.0002661781514710257, 0, 0,
+            0.0007835762419683435])
+        warn_msg = "An input array is constant"
+        with assert_warns(stats.ConstantInputWarning, match=warn_msg):
+            r, p = stats.spearmanr(x, y)
+            assert_equal(r, np.nan)
+            assert_equal(p, np.nan)
+
+    def test_index_error(self):
+        x = np.array([1.0, 7.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
+        y = np.array([0, 0.009783728115345005, 0, 0, 0.0019759230121848587,
+            0.0007535430349118562, 0.0002661781514710257, 0, 0,
+            0.0007835762419683435])
+        assert_raises(ValueError, stats.spearmanr, x, y, axis=2)
+
+    def test_alternative(self):
+        # Test alternative parameter
+
+        # Simple test - Based on the above ``test_spearmanr_vs_r``
+        x1 = [1, 2, 3, 4, 5]
+        x2 = [5, 6, 7, 8, 7]
+
+        # strong positive correlation
+        expected = (0.82078268166812329, 0.088587005313543798)
+
+        # correlation > 0 -> large "less" p-value
+        res = stats.spearmanr(x1, x2, alternative="less")
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], 1 - (expected[1] / 2))
+
+        # correlation > 0 -> small "less" p-value
+        res = stats.spearmanr(x1, x2, alternative="greater")
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1] / 2)
+
+        with pytest.raises(ValueError, match="alternative must be 'less'..."):
+            stats.spearmanr(x1, x2, alternative="ekki-ekki")
+
+    @pytest.mark.parametrize("alternative", ('two-sided', 'less', 'greater'))
+    def test_alternative_nan_policy(self, alternative):
+        # Test nan policies
+        x1 = [1, 2, 3, 4, 5]
+        x2 = [5, 6, 7, 8, 7]
+        x1nan = x1 + [np.nan]
+        x2nan = x2 + [np.nan]
+
+        # test nan_policy="propagate"
+        assert_array_equal(stats.spearmanr(x1nan, x2nan), (np.nan, np.nan))
+
+        # test nan_policy="omit"
+        res_actual = stats.spearmanr(x1nan, x2nan, nan_policy='omit',
+                                     alternative=alternative)
+        res_expected = stats.spearmanr(x1, x2, alternative=alternative)
+        assert_allclose(res_actual, res_expected)
+
+        # test nan_policy="raise"
+        message = 'The input contains nan values'
+        with pytest.raises(ValueError, match=message):
+            stats.spearmanr(x1nan, x2nan, nan_policy='raise',
+                            alternative=alternative)
+
+        # test invalid nan_policy
+        message = "nan_policy must be one of..."
+        with pytest.raises(ValueError, match=message):
+            stats.spearmanr(x1nan, x2nan, nan_policy='ekki-ekki',
+                            alternative=alternative)
+
+
+#    W.II.E.  Tabulate X against X, using BIG as a case weight.  The values
+#    should appear on the diagonal and the total should be 899999955.
+#    If the table cannot hold these values, forget about working with
+#    census data.  You can also tabulate HUGE against TINY.  There is no
+#    reason a tabulation program should not be able to distinguish
+#    different values regardless of their magnitude.
+
+# I need to figure out how to do this one.
+
+
+def test_kendalltau():
+    # For the cases without ties, both variants should give the same
+    # result.
+    variants = ('b', 'c')
+
+    # case without ties, con-dis equal zero
+    x = [5, 2, 1, 3, 6, 4, 7, 8]
+    y = [5, 2, 6, 3, 1, 8, 7, 4]
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (0.0, 1.0)
+    for taux in variants:
+        res = stats.kendalltau(x, y)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # case without ties, con-dis equal zero
+    x = [0, 5, 2, 1, 3, 6, 4, 7, 8]
+    y = [5, 2, 0, 6, 3, 1, 8, 7, 4]
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (0.0, 1.0)
+    for taux in variants:
+        res = stats.kendalltau(x, y)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # case without ties, con-dis close to zero
+    x = [5, 2, 1, 3, 6, 4, 7]
+    y = [5, 2, 6, 3, 1, 7, 4]
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (-0.14285714286, 0.77261904762)
+    for taux in variants:
+        res = stats.kendalltau(x, y)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # case without ties, con-dis close to zero
+    x = [2, 1, 3, 6, 4, 7, 8]
+    y = [2, 6, 3, 1, 8, 7, 4]
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (0.047619047619, 1.0)
+    for taux in variants:
+        res = stats.kendalltau(x, y)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # simple case without ties
+    x = np.arange(10)
+    y = np.arange(10)
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (1.0, 5.511463844797e-07)
+    for taux in variants:
+        res = stats.kendalltau(x, y, variant=taux)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # swap a couple of values
+    b = y[1]
+    y[1] = y[2]
+    y[2] = b
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (0.9555555555555556, 5.511463844797e-06)
+    for taux in variants:
+        res = stats.kendalltau(x, y, variant=taux)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # swap a couple more
+    b = y[5]
+    y[5] = y[6]
+    y[6] = b
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (0.9111111111111111, 2.976190476190e-05)
+    for taux in variants:
+        res = stats.kendalltau(x, y, variant=taux)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # same in opposite direction
+    x = np.arange(10)
+    y = np.arange(10)[::-1]
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (-1.0, 5.511463844797e-07)
+    for taux in variants:
+        res = stats.kendalltau(x, y, variant=taux)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # swap a couple of values
+    b = y[1]
+    y[1] = y[2]
+    y[2] = b
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (-0.9555555555555556, 5.511463844797e-06)
+    for taux in variants:
+        res = stats.kendalltau(x, y, variant=taux)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # swap a couple more
+    b = y[5]
+    y[5] = y[6]
+    y[6] = b
+    # Cross-check with exact result from R:
+    # cor.test(x,y,method="kendall",exact=1)
+    expected = (-0.9111111111111111, 2.976190476190e-05)
+    for taux in variants:
+        res = stats.kendalltau(x, y, variant=taux)
+        assert_approx_equal(res[0], expected[0])
+        assert_approx_equal(res[1], expected[1])
+
+    # Check a case where variants are different
+    # Example values found from Kendall (1970).
+    # P-value is the same for the both variants
+    x = array([1, 2, 2, 4, 4, 6, 6, 8, 9, 9])
+    y = array([1, 2, 4, 4, 4, 4, 8, 8, 8, 10])
+    expected = 0.85895569
+    assert_approx_equal(stats.kendalltau(x, y, variant='b')[0], expected)
+    expected = 0.825
+    assert_approx_equal(stats.kendalltau(x, y, variant='c')[0], expected)
+
+    # check exception in case of ties and method='exact' requested
+    y[2] = y[1]
+    assert_raises(ValueError, stats.kendalltau, x, y, method='exact')
+
+    # check exception in case of invalid method keyword
+    assert_raises(ValueError, stats.kendalltau, x, y, method='banana')
+
+    # check exception in case of invalid variant keyword
+    assert_raises(ValueError, stats.kendalltau, x, y, variant='rms')
+
+    # tau-b with some ties
+    # Cross-check with R:
+    # cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE)
+    x1 = [12, 2, 1, 12, 2]
+    x2 = [1, 4, 7, 1, 0]
+    expected = (-0.47140452079103173, 0.28274545993277478)
+    res = stats.kendalltau(x1, x2)
+    assert_approx_equal(res[0], expected[0])
+    assert_approx_equal(res[1], expected[1])
+
+    # test for namedtuple attribute results
+    attributes = ('correlation', 'pvalue')
+    for taux in variants:
+        res = stats.kendalltau(x1, x2, variant=taux)
+        check_named_results(res, attributes)
+        assert_equal(res.correlation, res.statistic)
+
+    # with only ties in one or both inputs in tau-b or tau-c
+    for taux in variants:
+        assert_equal(stats.kendalltau([2, 2, 2], [2, 2, 2], variant=taux),
+                     (np.nan, np.nan))
+        assert_equal(stats.kendalltau([2, 0, 2], [2, 2, 2], variant=taux),
+                     (np.nan, np.nan))
+        assert_equal(stats.kendalltau([2, 2, 2], [2, 0, 2], variant=taux),
+                     (np.nan, np.nan))
+
+    # empty arrays provided as input
+    assert_equal(stats.kendalltau([], []), (np.nan, np.nan))
+
+    # check with larger arrays
+    np.random.seed(7546)
+    x = np.array([np.random.normal(loc=1, scale=1, size=500),
+                np.random.normal(loc=1, scale=1, size=500)])
+    corr = [[1.0, 0.3],
+            [0.3, 1.0]]
+    x = np.dot(np.linalg.cholesky(corr), x)
+    expected = (0.19291382765531062, 1.1337095377742629e-10)
+    res = stats.kendalltau(x[0], x[1])
+    assert_approx_equal(res[0], expected[0])
+    assert_approx_equal(res[1], expected[1])
+
+    # this should result in 1 for taub but not tau-c
+    assert_approx_equal(stats.kendalltau([1, 1, 2], [1, 1, 2], variant='b')[0],
+                        1.0)
+    assert_approx_equal(stats.kendalltau([1, 1, 2], [1, 1, 2], variant='c')[0],
+                        0.88888888)
+
+    # test nan_policy
+    x = np.arange(10.)
+    x[9] = np.nan
+    assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan))
+    assert_allclose(stats.kendalltau(x, x, nan_policy='omit'),
+                    (1.0, 5.5114638e-6), rtol=1e-06)
+    assert_allclose(stats.kendalltau(x, x, nan_policy='omit', method='asymptotic'),
+                    (1.0, 0.00017455009626808976), rtol=1e-06)
+    assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise')
+    assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar')
+
+    # test unequal length inputs
+    x = np.arange(10.)
+    y = np.arange(20.)
+    assert_raises(ValueError, stats.kendalltau, x, y)
+
+    # test all ties
+    tau, p_value = stats.kendalltau([], [])
+    assert_equal(np.nan, tau)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.kendalltau([0], [0])
+    assert_equal(np.nan, tau)
+    assert_equal(np.nan, p_value)
+
+    # Regression test for GitHub issue #6061 - Overflow on Windows
+    x = np.arange(2000, dtype=float)
+    x = np.ma.masked_greater(x, 1995)
+    y = np.arange(2000, dtype=float)
+    y = np.concatenate((y[1000:], y[:1000]))
+    assert_(np.isfinite(stats.kendalltau(x,y)[1]))
+
+
+def test_kendalltau_vs_mstats_basic():
+    np.random.seed(42)
+    for s in range(2,10):
+        a = []
+        # Generate rankings with ties
+        for i in range(s):
+            a += [i]*i
+        b = list(a)
+        np.random.shuffle(a)
+        np.random.shuffle(b)
+        expected = mstats_basic.kendalltau(a, b)
+        actual = stats.kendalltau(a, b)
+        assert_approx_equal(actual[0], expected[0])
+        assert_approx_equal(actual[1], expected[1])
+
+
+def test_kendalltau_nan_2nd_arg():
+    # regression test for gh-6134: nans in the second arg were not handled
+    x = [1., 2., 3., 4.]
+    y = [np.nan, 2.4, 3.4, 3.4]
+
+    r1 = stats.kendalltau(x, y, nan_policy='omit')
+    r2 = stats.kendalltau(x[1:], y[1:])
+    assert_allclose(r1.statistic, r2.statistic, atol=1e-15)
+
+
+def test_kendalltau_dep_initial_lexsort():
+    with pytest.warns(
+        DeprecationWarning,
+        match="'kendalltau' keyword argument 'initial_lexsort'"
+    ):
+        stats.kendalltau([], [], initial_lexsort=True)
+
+
+class TestKendallTauAlternative:
+    def test_kendalltau_alternative_asymptotic(self):
+        # Test alternative parameter, asymptotic method (due to tie)
+
+        # Based on TestCorrSpearman2::test_alternative
+        x1 = [1, 2, 3, 4, 5]
+        x2 = [5, 6, 7, 8, 7]
+
+        # strong positive correlation
+        expected = stats.kendalltau(x1, x2, alternative="two-sided")
+        assert expected[0] > 0
+
+        # rank correlation > 0 -> large "less" p-value
+        res = stats.kendalltau(x1, x2, alternative="less")
+        assert_equal(res[0], expected[0])
+        assert_allclose(res[1], 1 - (expected[1] / 2))
+
+        # rank correlation > 0 -> small "greater" p-value
+        res = stats.kendalltau(x1, x2, alternative="greater")
+        assert_equal(res[0], expected[0])
+        assert_allclose(res[1], expected[1] / 2)
+
+        # reverse the direction of rank correlation
+        x2.reverse()
+
+        # strong negative correlation
+        expected = stats.kendalltau(x1, x2, alternative="two-sided")
+        assert expected[0] < 0
+
+        # rank correlation < 0 -> large "greater" p-value
+        res = stats.kendalltau(x1, x2, alternative="greater")
+        assert_equal(res[0], expected[0])
+        assert_allclose(res[1], 1 - (expected[1] / 2))
+
+        # rank correlation < 0 -> small "less" p-value
+        res = stats.kendalltau(x1, x2, alternative="less")
+        assert_equal(res[0], expected[0])
+        assert_allclose(res[1], expected[1] / 2)
+
+        with pytest.raises(ValueError, match="alternative must be 'less'..."):
+            stats.kendalltau(x1, x2, alternative="ekki-ekki")
+
+    # There are a lot of special cases considered in the calculation of the
+    # exact p-value, so we test each separately. We also need to test
+    # separately when the observed statistic is in the left tail vs the right
+    # tail because the code leverages symmetry of the null distribution; to
+    # do that we use the same test case but negate one of the samples.
+    # Reference values computed using R cor.test, e.g.
+    # options(digits=16)
+    # x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
+    # y <- c( 2.6,  3.1,  2.5,  5.0,  3.6,  4.0,  5.2,  2.8,  3.8)
+    # cor.test(x, y, method = "kendall", alternative = "g")
+
+    alternatives = ('less', 'two-sided', 'greater')
+    p_n1 = [np.nan, np.nan, np.nan]
+    p_n2 = [1, 1, 0.5]
+    p_c0 = [1, 0.3333333333333, 0.1666666666667]
+    p_c1 = [0.9583333333333, 0.3333333333333, 0.1666666666667]
+    p_no_correlation = [0.5916666666667, 1, 0.5916666666667]
+    p_no_correlationb = [0.5475694444444, 1, 0.5475694444444]
+    p_n_lt_171 = [0.9624118165785, 0.1194389329806, 0.0597194664903]
+    p_n_lt_171b = [0.246236925303, 0.4924738506059, 0.755634083327]
+    p_n_lt_171c = [0.9847475308925, 0.03071385306533, 0.01535692653267]
+
+    def exact_test(self, x, y, alternative, rev, stat_expected, p_expected):
+        if rev:
+            y = -np.asarray(y)
+            stat_expected *= -1
+        res = stats.kendalltau(x, y, method='exact', alternative=alternative)
+        res_expected = stat_expected, p_expected
+        assert_allclose(res, res_expected)
+
+    case_R_n1 = (list(zip(alternatives, p_n1, [False]*3))
+                 + list(zip(alternatives, reversed(p_n1), [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_n1)
+    def test_against_R_n1(self, alternative, p_expected, rev):
+        x, y = [1], [2]
+        stat_expected = np.nan
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_R_n2 = (list(zip(alternatives, p_n2, [False]*3))
+                 + list(zip(alternatives, reversed(p_n2), [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_n2)
+    def test_against_R_n2(self, alternative, p_expected, rev):
+        x, y = [1, 2], [3, 4]
+        stat_expected = 0.9999999999999998
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_R_c0 = (list(zip(alternatives, p_c0, [False]*3))
+                 + list(zip(alternatives, reversed(p_c0), [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_c0)
+    def test_against_R_c0(self, alternative, p_expected, rev):
+        x, y = [1, 2, 3], [1, 2, 3]
+        stat_expected = 1
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_R_c1 = (list(zip(alternatives, p_c1, [False]*3))
+                 + list(zip(alternatives, reversed(p_c1), [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_c1)
+    def test_against_R_c1(self, alternative, p_expected, rev):
+        x, y = [1, 2, 3, 4], [1, 2, 4, 3]
+        stat_expected = 0.6666666666666667
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_R_no_corr = (list(zip(alternatives, p_no_correlation, [False]*3))
+                      + list(zip(alternatives, reversed(p_no_correlation),
+                                 [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_no_corr)
+    def test_against_R_no_correlation(self, alternative, p_expected, rev):
+        x, y = [1, 2, 3, 4, 5], [1, 5, 4, 2, 3]
+        stat_expected = 0
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_no_cor_b = (list(zip(alternatives, p_no_correlationb, [False]*3))
+                     + list(zip(alternatives, reversed(p_no_correlationb),
+                                [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_no_cor_b)
+    def test_against_R_no_correlationb(self, alternative, p_expected, rev):
+        x, y = [1, 2, 3, 4, 5, 6, 7, 8], [8, 6, 1, 3, 2, 5, 4, 7]
+        stat_expected = 0
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_R_lt_171 = (list(zip(alternatives, p_n_lt_171, [False]*3))
+                     + list(zip(alternatives, reversed(p_n_lt_171), [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171)
+    def test_against_R_lt_171(self, alternative, p_expected, rev):
+        # Data from Hollander & Wolfe (1973), p. 187f.
+        # Used from https://rdrr.io/r/stats/cor.test.html
+        x = [44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1]
+        y = [2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8]
+        stat_expected = 0.4444444444444445
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_R_lt_171b = (list(zip(alternatives, p_n_lt_171b, [False]*3))
+                      + list(zip(alternatives, reversed(p_n_lt_171b),
+                                 [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171b)
+    def test_against_R_lt_171b(self, alternative, p_expected, rev):
+        np.random.seed(0)
+        x = np.random.rand(100)
+        y = np.random.rand(100)
+        stat_expected = -0.04686868686868687
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_R_lt_171c = (list(zip(alternatives, p_n_lt_171c, [False]*3))
+                      + list(zip(alternatives, reversed(p_n_lt_171c),
+                                 [True]*3)))
+
+    @pytest.mark.parametrize("alternative, p_expected, rev", case_R_lt_171c)
+    def test_against_R_lt_171c(self, alternative, p_expected, rev):
+        np.random.seed(0)
+        x = np.random.rand(170)
+        y = np.random.rand(170)
+        stat_expected = 0.1115906717716673
+        self.exact_test(x, y, alternative, rev, stat_expected, p_expected)
+
+    case_gt_171 = (list(zip(alternatives, [False]*3)) +
+                   list(zip(alternatives, [True]*3)))
+
+    @pytest.mark.parametrize("alternative, rev", case_gt_171)
+    def test_gt_171(self, alternative, rev):
+        np.random.seed(0)
+        x = np.random.rand(400)
+        y = np.random.rand(400)
+        res0 = stats.kendalltau(x, y, method='exact',
+                                alternative=alternative)
+        res1 = stats.kendalltau(x, y, method='asymptotic',
+                                alternative=alternative)
+        assert_equal(res0[0], res1[0])
+        assert_allclose(res0[1], res1[1], rtol=1e-3)
+
+    @pytest.mark.parametrize("method", ('exact', 'asymptotic'))
+    @pytest.mark.parametrize("alternative", ('two-sided', 'less', 'greater'))
+    def test_nan_policy(self, method, alternative):
+        # Test nan policies
+        x1 = [1, 2, 3, 4, 5]
+        x2 = [5, 6, 7, 8, 9]
+        x1nan = x1 + [np.nan]
+        x2nan = x2 + [np.nan]
+
+        # test nan_policy="propagate"
+        res_actual = stats.kendalltau(x1nan, x2nan,
+                                      method=method, alternative=alternative)
+        res_expected = (np.nan, np.nan)
+        assert_allclose(res_actual, res_expected)
+
+        # test nan_policy="omit"
+        res_actual = stats.kendalltau(x1nan, x2nan, nan_policy='omit',
+                                      method=method, alternative=alternative)
+        res_expected = stats.kendalltau(x1, x2, method=method,
+                                        alternative=alternative)
+        assert_allclose(res_actual, res_expected)
+
+        # test nan_policy="raise"
+        message = 'The input contains nan values'
+        with pytest.raises(ValueError, match=message):
+            stats.kendalltau(x1nan, x2nan, nan_policy='raise',
+                             method=method, alternative=alternative)
+
+        # test invalid nan_policy
+        message = "nan_policy must be one of..."
+        with pytest.raises(ValueError, match=message):
+            stats.kendalltau(x1nan, x2nan, nan_policy='ekki-ekki',
+                             method=method, alternative=alternative)
+
+
+def test_weightedtau():
+    x = [12, 2, 1, 12, 2]
+    y = [1, 4, 7, 1, 0]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.56694968153682723)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.weightedtau(x, y, additive=False)
+    assert_approx_equal(tau, -0.62205716951801038)
+    assert_equal(np.nan, p_value)
+    # This must be exactly Kendall's tau
+    tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1)
+    assert_approx_equal(tau, -0.47140452079103173)
+    assert_equal(np.nan, p_value)
+
+    # test for namedtuple attribute results
+    res = stats.weightedtau(x, y)
+    attributes = ('correlation', 'pvalue')
+    check_named_results(res, attributes)
+    assert_equal(res.correlation, res.statistic)
+
+    # Asymmetric, ranked version
+    tau, p_value = stats.weightedtau(x, y, rank=None)
+    assert_approx_equal(tau, -0.4157652301037516)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.weightedtau(y, x, rank=None)
+    assert_approx_equal(tau, -0.7181341329699029)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.weightedtau(x, y, rank=None, additive=False)
+    assert_approx_equal(tau, -0.40644850966246893)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.weightedtau(y, x, rank=None, additive=False)
+    assert_approx_equal(tau, -0.83766582937355172)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.weightedtau(x, y, rank=False)
+    assert_approx_equal(tau, -0.51604397940261848)
+    assert_equal(np.nan, p_value)
+    # This must be exactly Kendall's tau
+    tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1)
+    assert_approx_equal(tau, -0.47140452079103173)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1)
+    assert_approx_equal(tau, -0.47140452079103173)
+    assert_equal(np.nan, p_value)
+    # Test argument conversion
+    tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y)
+    assert_approx_equal(tau, -0.56694968153682723)
+    tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y)
+    assert_approx_equal(tau, -0.56694968153682723)
+    tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64))
+    assert_approx_equal(tau, -0.56694968153682723)
+    # All ties
+    tau, p_value = stats.weightedtau([], [])
+    assert_equal(np.nan, tau)
+    assert_equal(np.nan, p_value)
+    tau, p_value = stats.weightedtau([0], [0])
+    assert_equal(np.nan, tau)
+    assert_equal(np.nan, p_value)
+    # Size mismatches
+    assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2])
+    assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0])
+    # NaNs
+    x = [12, 2, 1, 12, 2]
+    y = [1, 4, 7, 1, np.nan]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.56694968153682723)
+    x = [12, 2, np.nan, 12, 2]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.56694968153682723)
+    # NaNs when the dtype of x and y are all np.float64
+    x = [12.0, 2.0, 1.0, 12.0, 2.0]
+    y = [1.0, 4.0, 7.0, 1.0, np.nan]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.56694968153682723)
+    x = [12.0, 2.0, np.nan, 12.0, 2.0]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.56694968153682723)
+    # NaNs when there are more than one NaN in x or y
+    x = [12.0, 2.0, 1.0, 12.0, 1.0]
+    y = [1.0, 4.0, 7.0, 1.0, 1.0]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.6615242347139803)
+    x = [12.0, 2.0, np.nan, 12.0, np.nan]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.6615242347139803)
+    y = [np.nan, 4.0, 7.0, np.nan, np.nan]
+    tau, p_value = stats.weightedtau(x, y)
+    assert_approx_equal(tau, -0.6615242347139803)
+
+
+def test_segfault_issue_9710():
+    # https://github.com/scipy/scipy/issues/9710
+    # This test was created to check segfault
+    # In issue SEGFAULT only repros in optimized builds after calling the function twice
+    stats.weightedtau([1], [1.0])
+    stats.weightedtau([1], [1.0])
+    # The code below also caused SEGFAULT
+    stats.weightedtau([np.nan], [52])
+
+
+def test_kendall_tau_large():
+    n = 172
+    # Test omit policy
+    x = np.arange(n + 1).astype(float)
+    y = np.arange(n + 1).astype(float)
+    y[-1] = np.nan
+    _, pval = stats.kendalltau(x, y, method='exact', nan_policy='omit')
+    assert_equal(pval, 0.0)
+
+
+def test_weightedtau_vs_quadratic():
+    # Trivial quadratic implementation, all parameters mandatory
+    def wkq(x, y, rank, weigher, add):
+        tot = conc = disc = u = v = 0
+        for (i, j) in product(range(len(x)), range(len(x))):
+            w = weigher(rank[i]) + weigher(rank[j]) if add \
+                else weigher(rank[i]) * weigher(rank[j])
+            tot += w
+            if x[i] == x[j]:
+                u += w
+            if y[i] == y[j]:
+                v += w
+            if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]:
+                conc += w
+            elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]:
+                disc += w
+        return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v)
+
+    def weigher(x):
+        return 1. / (x + 1)
+
+    np.random.seed(42)
+    for s in range(3,10):
+        a = []
+        # Generate rankings with ties
+        for i in range(s):
+            a += [i]*i
+        b = list(a)
+        np.random.shuffle(a)
+        np.random.shuffle(b)
+        # First pass: use element indices as ranks
+        rank = np.arange(len(a), dtype=np.intp)
+        for _ in range(2):
+            for add in [True, False]:
+                expected = wkq(a, b, rank, weigher, add)
+                actual = stats.weightedtau(a, b, rank, weigher, add).statistic
+                assert_approx_equal(expected, actual)
+            # Second pass: use a random rank
+            np.random.shuffle(rank)
+
+
+class TestFindRepeats:
+
+    def test_basic(self):
+        a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5]
+        res, nums = stats.find_repeats(a)
+        assert_array_equal(res, [1, 2, 3, 4])
+        assert_array_equal(nums, [3, 3, 2, 2])
+
+    def test_empty_result(self):
+        # Check that empty arrays are returned when there are no repeats.
+        for a in [[10, 20, 50, 30, 40], []]:
+            repeated, counts = stats.find_repeats(a)
+            assert_array_equal(repeated, [])
+            assert_array_equal(counts, [])
+
+
+class TestRegression:
+
+    def test_linregressBIGX(self):
+        # W.II.F.  Regress BIG on X.
+        result = stats.linregress(X, BIG)
+        assert_almost_equal(result.intercept, 99999990)
+        assert_almost_equal(result.rvalue, 1.0)
+        # The uncertainty ought to be almost zero
+        # since all points lie on a line
+        assert_almost_equal(result.stderr, 0.0)
+        assert_almost_equal(result.intercept_stderr, 0.0)
+
+    def test_regressXX(self):
+        # W.IV.B.  Regress X on X.
+        # The constant should be exactly 0 and the regression coefficient
+        # should be 1.  This is a perfectly valid regression and the
+        # program should not complain.
+        result = stats.linregress(X, X)
+        assert_almost_equal(result.intercept, 0.0)
+        assert_almost_equal(result.rvalue, 1.0)
+        # The uncertainly on regression through two points ought to be 0
+        assert_almost_equal(result.stderr, 0.0)
+        assert_almost_equal(result.intercept_stderr, 0.0)
+
+        # W.IV.C. Regress X on BIG and LITTLE (two predictors).  The program
+        # should tell you that this model is "singular" because BIG and
+        # LITTLE are linear combinations of each other.  Cryptic error
+        # messages are unacceptable here.  Singularity is the most
+        # fundamental regression error.
+        #
+        # Need to figure out how to handle multiple linear regression.
+        # This is not obvious
+
+    def test_regressZEROX(self):
+        # W.IV.D. Regress ZERO on X.
+        # The program should inform you that ZERO has no variance or it should
+        # go ahead and compute the regression and report a correlation and
+        # total sum of squares of exactly 0.
+        result = stats.linregress(X, ZERO)
+        assert_almost_equal(result.intercept, 0.0)
+        assert_almost_equal(result.rvalue, 0.0)
+
+    def test_regress_simple(self):
+        # Regress a line with sinusoidal noise.
+        x = np.linspace(0, 100, 100)
+        y = 0.2 * np.linspace(0, 100, 100) + 10
+        y += np.sin(np.linspace(0, 20, 100))
+
+        result = stats.linregress(x, y)
+        lr = stats._stats_mstats_common.LinregressResult
+        assert_(isinstance(result, lr))
+        assert_almost_equal(result.stderr, 2.3957814497838803e-3)
+
+    def test_regress_alternative(self):
+        # test alternative parameter
+        x = np.linspace(0, 100, 100)
+        y = 0.2 * np.linspace(0, 100, 100) + 10  # slope is greater than zero
+        y += np.sin(np.linspace(0, 20, 100))
+
+        with pytest.raises(ValueError, match="alternative must be 'less'..."):
+            stats.linregress(x, y, alternative="ekki-ekki")
+
+        res1 = stats.linregress(x, y, alternative="two-sided")
+
+        # slope is greater than zero, so "less" p-value should be large
+        res2 = stats.linregress(x, y, alternative="less")
+        assert_allclose(res2.pvalue, 1 - (res1.pvalue / 2))
+
+        # slope is greater than zero, so "greater" p-value should be small
+        res3 = stats.linregress(x, y, alternative="greater")
+        assert_allclose(res3.pvalue, res1.pvalue / 2)
+
+        assert res1.rvalue == res2.rvalue == res3.rvalue
+
+    def test_regress_against_R(self):
+        # test against R `lm`
+        # options(digits=16)
+        # x <- c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131)
+        # y <- c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48)
+        # relation <- lm(y~x)
+        # print(summary(relation))
+
+        x = [151, 174, 138, 186, 128, 136, 179, 163, 152, 131]
+        y = [63, 81, 56, 91, 47, 57, 76, 72, 62, 48]
+        res = stats.linregress(x, y, alternative="two-sided")
+        # expected values from R's `lm` above
+        assert_allclose(res.slope, 0.6746104491292)
+        assert_allclose(res.intercept, -38.4550870760770)
+        assert_allclose(res.rvalue, np.sqrt(0.95478224775))
+        assert_allclose(res.pvalue, 1.16440531074e-06)
+        assert_allclose(res.stderr, 0.0519051424731)
+        assert_allclose(res.intercept_stderr, 8.0490133029927)
+
+    def test_regress_simple_onearg_rows(self):
+        # Regress a line w sinusoidal noise,
+        # with a single input of shape (2, N)
+        x = np.linspace(0, 100, 100)
+        y = 0.2 * np.linspace(0, 100, 100) + 10
+        y += np.sin(np.linspace(0, 20, 100))
+        rows = np.vstack((x, y))
+
+        result = stats.linregress(rows)
+        assert_almost_equal(result.stderr, 2.3957814497838803e-3)
+        assert_almost_equal(result.intercept_stderr, 1.3866936078570702e-1)
+
+    def test_regress_simple_onearg_cols(self):
+        x = np.linspace(0, 100, 100)
+        y = 0.2 * np.linspace(0, 100, 100) + 10
+        y += np.sin(np.linspace(0, 20, 100))
+        columns = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))
+
+        result = stats.linregress(columns)
+        assert_almost_equal(result.stderr, 2.3957814497838803e-3)
+        assert_almost_equal(result.intercept_stderr, 1.3866936078570702e-1)
+
+    def test_regress_shape_error(self):
+        # Check that a single input argument to linregress with wrong shape
+        # results in a ValueError.
+        assert_raises(ValueError, stats.linregress, np.ones((3, 3)))
+
+    def test_linregress(self):
+        # compared with multivariate ols with pinv
+        x = np.arange(11)
+        y = np.arange(5, 16)
+        y[[(1), (-2)]] -= 1
+        y[[(0), (-1)]] += 1
+
+        result = stats.linregress(x, y)
+
+        # This test used to use 'assert_array_almost_equal' but its
+        # formualtion got confusing since LinregressResult became
+        # _lib._bunch._make_tuple_bunch instead of namedtuple
+        # (for backwards compatibility, see PR #12983)
+        assert_ae = lambda x, y: assert_almost_equal(x, y, decimal=14)
+        assert_ae(result.slope, 1.0)
+        assert_ae(result.intercept, 5.0)
+        assert_ae(result.rvalue, 0.98229948625750)
+        assert_ae(result.pvalue, 7.45259691e-008)
+        assert_ae(result.stderr, 0.063564172616372733)
+        assert_ae(result.intercept_stderr, 0.37605071654517686)
+
+    def test_regress_simple_negative_cor(self):
+        # If the slope of the regression is negative the factor R tend
+        # to -1 not 1.  Sometimes rounding errors makes it < -1
+        # leading to stderr being NaN.
+        a, n = 1e-71, 100000
+        x = np.linspace(a, 2 * a, n)
+        y = np.linspace(2 * a, a, n)
+        result = stats.linregress(x, y)
+
+        # Make sure propagated numerical errors
+        # did not bring rvalue below -1 (or were coersced)
+        assert_(result.rvalue >= -1)
+        assert_almost_equal(result.rvalue, -1)
+
+        # slope and intercept stderror should stay numeric
+        assert_(not np.isnan(result.stderr))
+        assert_(not np.isnan(result.intercept_stderr))
+
+    def test_linregress_result_attributes(self):
+        x = np.linspace(0, 100, 100)
+        y = 0.2 * np.linspace(0, 100, 100) + 10
+        y += np.sin(np.linspace(0, 20, 100))
+        result = stats.linregress(x, y)
+
+        # Result is of a correct class
+        lr = stats._stats_mstats_common.LinregressResult
+        assert_(isinstance(result, lr))
+
+        # LinregressResult elements have correct names
+        attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
+        check_named_results(result, attributes)
+        # Also check that the extra attribute (intercept_stderr) is present
+        assert 'intercept_stderr' in dir(result)
+
+    def test_regress_two_inputs(self):
+        # Regress a simple line formed by two points.
+        x = np.arange(2)
+        y = np.arange(3, 5)
+        result = stats.linregress(x, y)
+
+        # Non-horizontal line
+        assert_almost_equal(result.pvalue, 0.0)
+
+        # Zero error through two points
+        assert_almost_equal(result.stderr, 0.0)
+        assert_almost_equal(result.intercept_stderr, 0.0)
+
+    def test_regress_two_inputs_horizontal_line(self):
+        # Regress a horizontal line formed by two points.
+        x = np.arange(2)
+        y = np.ones(2)
+        result = stats.linregress(x, y)
+
+        # Horizontal line
+        assert_almost_equal(result.pvalue, 1.0)
+
+        # Zero error through two points
+        assert_almost_equal(result.stderr, 0.0)
+        assert_almost_equal(result.intercept_stderr, 0.0)
+
+    def test_nist_norris(self):
+        x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0,
+             558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1,
+             995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0,
+             11.1, 118.3, 229.2, 669.1, 448.9, 0.5]
+
+        y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9,
+             559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3,
+             998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9,
+             10.2, 117.6, 228.9, 668.4, 449.2, 0.2]
+
+        result = stats.linregress(x, y)
+
+        assert_almost_equal(result.slope, 1.00211681802045)
+        assert_almost_equal(result.intercept, -0.262323073774029)
+        assert_almost_equal(result.rvalue**2, 0.999993745883712)
+        assert_almost_equal(result.pvalue, 0.0)
+        assert_almost_equal(result.stderr, 0.00042979684820)
+        assert_almost_equal(result.intercept_stderr, 0.23281823430153)
+
+    def test_compare_to_polyfit(self):
+        x = np.linspace(0, 100, 100)
+        y = 0.2 * np.linspace(0, 100, 100) + 10
+        y += np.sin(np.linspace(0, 20, 100))
+        result = stats.linregress(x, y)
+        poly = np.polyfit(x, y, 1)  # Fit 1st degree polynomial
+
+        # Make sure linear regression slope and intercept
+        # match with results from numpy polyfit
+        assert_almost_equal(result.slope, poly[0])
+        assert_almost_equal(result.intercept, poly[1])
+
+    def test_empty_input(self):
+        assert_raises(ValueError, stats.linregress, [], [])
+
+    def test_nan_input(self):
+        x = np.arange(10.)
+        x[9] = np.nan
+
+        with np.errstate(invalid="ignore"):
+            result = stats.linregress(x, x)
+
+        # Make sure the resut still comes back as `LinregressResult`
+        lr = stats._stats_mstats_common.LinregressResult
+        assert_(isinstance(result, lr))
+        assert_array_equal(result, (np.nan,)*5)
+        assert_equal(result.intercept_stderr, np.nan)
+
+    def test_identical_x(self):
+        x = np.zeros(10)
+        y = np.random.random(10)
+        msg = "Cannot calculate a linear regression"
+        with assert_raises(ValueError, match=msg):
+            stats.linregress(x, y)
+
+
+def test_theilslopes():
+    # Basic slope test.
+    slope, intercept, lower, upper = stats.theilslopes([0,1,1])
+    assert_almost_equal(slope, 0.5)
+    assert_almost_equal(intercept, 0.5)
+
+    msg = ("method must be either 'joint' or 'separate'."
+           "'joint_separate' is invalid.")
+    with pytest.raises(ValueError, match=msg):
+        stats.theilslopes([0, 1, 1], method='joint_separate')
+
+    slope, intercept, lower, upper = stats.theilslopes([0, 1, 1],
+                                                       method='joint')
+    assert_almost_equal(slope, 0.5)
+    assert_almost_equal(intercept, 0.0)
+
+    # Test of confidence intervals.
+    x = [1, 2, 3, 4, 10, 12, 18]
+    y = [9, 15, 19, 20, 45, 55, 78]
+    slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07,
+                                                       method='separate')
+    assert_almost_equal(slope, 4)
+    assert_almost_equal(intercept, 4.0)
+    assert_almost_equal(upper, 4.38, decimal=2)
+    assert_almost_equal(lower, 3.71, decimal=2)
+
+    slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07,
+                                                       method='joint')
+    assert_almost_equal(slope, 4)
+    assert_almost_equal(intercept, 6.0)
+    assert_almost_equal(upper, 4.38, decimal=2)
+    assert_almost_equal(lower, 3.71, decimal=2)
+
+
+def test_cumfreq():
+    x = [1, 4, 2, 1, 3, 1]
+    cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
+    assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.]))
+    cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4,
+                                                      defaultreallimits=(1.5, 5))
+    assert_(extrapoints == 3)
+
+    # test for namedtuple attribute results
+    attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')
+    res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
+    check_named_results(res, attributes)
+
+
+def test_relfreq():
+    a = np.array([1, 4, 2, 1, 3, 1])
+    relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
+    assert_array_almost_equal(relfreqs,
+                              array([0.5, 0.16666667, 0.16666667, 0.16666667]))
+
+    # test for namedtuple attribute results
+    attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints')
+    res = stats.relfreq(a, numbins=4)
+    check_named_results(res, attributes)
+
+    # check array_like input is accepted
+    relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1],
+                                                            numbins=4)
+    assert_array_almost_equal(relfreqs, relfreqs2)
+
+
+class TestScoreatpercentile:
+    def setup_method(self):
+        self.a1 = [3, 4, 5, 10, -3, -5, 6]
+        self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
+        self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
+
+    def test_basic(self):
+        x = arange(8) * 0.5
+        assert_equal(stats.scoreatpercentile(x, 0), 0.)
+        assert_equal(stats.scoreatpercentile(x, 100), 3.5)
+        assert_equal(stats.scoreatpercentile(x, 50), 1.75)
+
+    def test_fraction(self):
+        scoreatperc = stats.scoreatpercentile
+
+        # Test defaults
+        assert_equal(scoreatperc(list(range(10)), 50), 4.5)
+        assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)
+        assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)
+        assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)
+        assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)
+
+        # explicitly specify interpolation_method 'fraction' (the default)
+        assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),
+                     4.5)
+        assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),
+                                 interpolation_method='fraction'),
+                     4.5)
+        assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),
+                                 interpolation_method='fraction'),
+                     4.5)
+        assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),
+                                 interpolation_method='fraction'),
+                     55)
+        assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),
+                                 interpolation_method='fraction'),
+                     5.5)
+
+    def test_lower_higher(self):
+        scoreatperc = stats.scoreatpercentile
+
+        # interpolation_method 'lower'/'higher'
+        assert_equal(scoreatperc(list(range(10)), 50,
+                                 interpolation_method='lower'), 4)
+        assert_equal(scoreatperc(list(range(10)), 50,
+                                 interpolation_method='higher'), 5)
+        assert_equal(scoreatperc(list(range(10)), 50, (2,7),
+                                 interpolation_method='lower'), 4)
+        assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),
+                                 interpolation_method='higher'), 5)
+        assert_equal(scoreatperc(list(range(100)), 50, (1,8),
+                                 interpolation_method='lower'), 4)
+        assert_equal(scoreatperc(list(range(100)), 50, (1,8),
+                                 interpolation_method='higher'), 5)
+        assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),
+                                 interpolation_method='lower'), 10)
+        assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),
+                                 interpolation_method='higher'), 100)
+        assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),
+                                 interpolation_method='lower'), 1)
+        assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),
+                                 interpolation_method='higher'), 10)
+
+    def test_sequence_per(self):
+        x = arange(8) * 0.5
+        expected = np.array([0, 3.5, 1.75])
+        res = stats.scoreatpercentile(x, [0, 100, 50])
+        assert_allclose(res, expected)
+        assert_(isinstance(res, np.ndarray))
+        # Test with ndarray.  Regression test for gh-2861
+        assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),
+                        expected)
+        # Also test combination of 2-D array, axis not None and array-like per
+        res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),
+                                       np.array([0, 1, 100, 100]), axis=1)
+        expected2 = array([[0, 4, 8],
+                           [0.03, 4.03, 8.03],
+                           [3, 7, 11],
+                           [3, 7, 11]])
+        assert_allclose(res2, expected2)
+
+    def test_axis(self):
+        scoreatperc = stats.scoreatpercentile
+        x = arange(12).reshape(3, 4)
+
+        assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])
+
+        r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
+        assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)
+
+        r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]
+        assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)
+
+        x = array([[1, 1, 1],
+                   [1, 1, 1],
+                   [4, 4, 3],
+                   [1, 1, 1],
+                   [1, 1, 1]])
+        score = stats.scoreatpercentile(x, 50)
+        assert_equal(score.shape, ())
+        assert_equal(score, 1.0)
+        score = stats.scoreatpercentile(x, 50, axis=0)
+        assert_equal(score.shape, (3,))
+        assert_equal(score, [1, 1, 1])
+
+    def test_exception(self):
+        assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,
+            interpolation_method='foobar')
+        assert_raises(ValueError, stats.scoreatpercentile, [1], 101)
+        assert_raises(ValueError, stats.scoreatpercentile, [1], -1)
+
+    def test_empty(self):
+        assert_equal(stats.scoreatpercentile([], 50), np.nan)
+        assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)
+        assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])
+
+
+@pytest.mark.filterwarnings('ignore::FutureWarning')
+class TestMode:
+
+    deprecation_msg = r"Support for non-numeric arrays has been deprecated"
+
+    def test_empty(self):
+        vals, counts = stats.mode([])
+        assert_equal(vals, np.array([]))
+        assert_equal(counts, np.array([]))
+
+    def test_scalar(self):
+        vals, counts = stats.mode(4.)
+        assert_equal(vals, np.array([4.]))
+        assert_equal(counts, np.array([1]))
+
+    def test_basic(self):
+        data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
+        vals = stats.mode(data1)
+        assert_equal(vals[0][0], 6)
+        assert_equal(vals[1][0], 3)
+
+    def test_axes(self):
+        data1 = [10, 10, 30, 40]
+        data2 = [10, 10, 10, 10]
+        data3 = [20, 10, 20, 20]
+        data4 = [30, 30, 30, 30]
+        data5 = [40, 30, 30, 30]
+        arr = np.array([data1, data2, data3, data4, data5])
+
+        vals = stats.mode(arr, axis=None)
+        assert_equal(vals[0], np.array([30]))
+        assert_equal(vals[1], np.array([8]))
+
+        vals = stats.mode(arr, axis=0)
+        assert_equal(vals[0], np.array([[10, 10, 30, 30]]))
+        assert_equal(vals[1], np.array([[2, 3, 3, 2]]))
+
+        vals = stats.mode(arr, axis=1)
+        assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]]))
+        assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]]))
+
+    @pytest.mark.parametrize('axis', np.arange(-4, 0))
+    def test_negative_axes_gh_15375(self, axis):
+        np.random.seed(984213899)
+        a = np.random.rand(10, 11, 12, 13)
+        res0 = stats.mode(a, axis=a.ndim+axis)
+        res1 = stats.mode(a, axis=axis)
+        np.testing.assert_array_equal(res0, res1)
+
+    def test_strings(self):
+        data1 = ['rain', 'showers', 'showers']
+        with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+            vals = stats.mode(data1)
+        assert_equal(vals[0][0], 'showers')
+        assert_equal(vals[1][0], 2)
+
+    def test_mixed_objects(self):
+        objects = [10, True, np.nan, 'hello', 10]
+        arr = np.empty((5,), dtype=object)
+        arr[:] = objects
+        with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+            vals = stats.mode(arr)
+        assert_equal(vals[0][0], 10)
+        assert_equal(vals[1][0], 2)
+
+    def test_objects(self):
+        # Python objects must be sortable (le + eq) and have ne defined
+        # for np.unique to work. hash is for set.
+        class Point:
+            def __init__(self, x):
+                self.x = x
+
+            def __eq__(self, other):
+                return self.x == other.x
+
+            def __ne__(self, other):
+                return self.x != other.x
+
+            def __lt__(self, other):
+                return self.x < other.x
+
+            def __hash__(self):
+                return hash(self.x)
+
+        points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]]
+        arr = np.empty((8,), dtype=object)
+        arr[:] = points
+        assert_(len(set(points)) == 4)
+        assert_equal(np.unique(arr).shape, (4,))
+        with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+            vals = stats.mode(arr)
+
+        assert_equal(vals[0][0], Point(2))
+        assert_equal(vals[1][0], 4)
+
+    def test_mode_result_attributes(self):
+        data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
+        data2 = []
+        actual = stats.mode(data1)
+        attributes = ('mode', 'count')
+        check_named_results(actual, attributes)
+        actual2 = stats.mode(data2)
+        check_named_results(actual2, attributes)
+
+    def test_mode_nan(self):
+        data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
+        actual = stats.mode(data1)
+        assert_equal(actual, (6, 3))
+
+        actual = stats.mode(data1, nan_policy='omit')
+        assert_equal(actual, (6, 3))
+        assert_raises(ValueError, stats.mode, data1, nan_policy='raise')
+        assert_raises(ValueError, stats.mode, data1, nan_policy='foobar')
+
+    @pytest.mark.parametrize("data", [
+        [3, 5, 1, 1, 3],
+        [3, np.nan, 5, 1, 1, 3],
+        [3, 5, 1],
+        [3, np.nan, 5, 1],
+    ])
+    def test_smallest_equal(self, data):
+        result = stats.mode(data, nan_policy='omit')
+        assert_equal(result[0][0], 1)
+
+    def test_obj_arrays_ndim(self):
+        # regression test for gh-9645: `mode` fails for object arrays w/ndim > 1
+        data = [['Oxidation'], ['Oxidation'], ['Polymerization'], ['Reduction']]
+        ar = np.array(data, dtype=object)
+        with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+            m = stats.mode(ar, axis=0)
+        assert np.all(m.mode == 'Oxidation') and m.mode.shape == (1, 1)
+        assert np.all(m.count == 2) and m.count.shape == (1, 1)
+
+        data1 = data + [[np.nan]]
+        ar1 = np.array(data1, dtype=object)
+        with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+            m = stats.mode(ar1, axis=0)
+        assert np.all(m.mode == 'Oxidation') and m.mode.shape == (1, 1)
+        assert np.all(m.count == 2) and m.count.shape == (1, 1)
+
+    @pytest.mark.parametrize('axis', np.arange(-3, 3))
+    @pytest.mark.parametrize('dtype', [np.float64, 'object'])
+    def test_mode_shape_gh_9955(self, axis, dtype):
+        rng = np.random.default_rng(984213899)
+        a = rng.uniform(size=(3, 4, 5)).astype(dtype)
+        if dtype == 'object':
+            with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+                res = stats.mode(a, axis=axis, keepdims=False)
+        else:
+            res = stats.mode(a, axis=axis, keepdims=False)
+        reference_shape = list(a.shape)
+        reference_shape.pop(axis)
+        np.testing.assert_array_equal(res.mode.shape, reference_shape)
+        np.testing.assert_array_equal(res.count.shape, reference_shape)
+
+    def test_nan_policy_propagate_gh_9815(self):
+        # mode should treat np.nan as it would any other object when
+        # nan_policy='propagate'
+        a = [2, np.nan, 1, np.nan]
+        if NumpyVersion(np.__version__) >= '1.21.0':
+            res = stats.mode(a)
+            assert np.isnan(res.mode[0]) and res.count[0] == 2
+
+        # mode should work on object arrays. There were issues when
+        # objects do not support comparison operations.
+        a = np.array(a, dtype='object')
+        with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+            res = stats.mode(a)
+        assert np.isnan(res.mode[0]) and res.count[0] == 2
+
+        a = np.array([10, True, 'hello', 10], dtype='object')
+        with pytest.warns(DeprecationWarning, match=self.deprecation_msg):
+            res = stats.mode(a)
+        assert_array_equal(res, [[10], [2]])
+
+    def test_keepdims(self):
+        # test empty arrays (handled by `np.mean`)
+        a = np.zeros((1, 2, 3, 0))
+
+        res = stats.mode(a, axis=1, keepdims=False)
+        assert res.mode.shape == res.count.shape == (1, 3, 0)
+
+        res = stats.mode(a, axis=1, keepdims=True)
+        assert res.mode.shape == res.count.shape == (1, 1, 3, 0)
+
+        # test nan_policy='propagate'
+        a = [[1, 3, 3, np.nan], [1, 1, np.nan, 1]]
+
+        res = stats.mode(a, axis=1, keepdims=False)
+        assert_array_equal(res.mode, [3, 1])
+        assert_array_equal(res.count, [2, 3])
+
+        res = stats.mode(a, axis=1, keepdims=True)
+        assert_array_equal(res.mode, [[3], [1]])
+        assert_array_equal(res.count, [[2], [3]])
+
+        a = np.array(a)
+        res = stats.mode(a, axis=None, keepdims=False)
+        ref = stats.mode(a.ravel(), keepdims=False)
+        assert_array_equal(res, ref)
+        assert res.mode.shape == ref.mode.shape == ()
+
+        res = stats.mode(a, axis=None, keepdims=True)
+        ref = stats.mode(a.ravel(), keepdims=True)
+        assert_array_equal(res, ref)
+        assert res.mode.shape == ref.mode.shape == (1,)
+
+        # test nan_policy='omit'
+        a = [[1, np.nan, np.nan, np.nan, 1],
+             [np.nan, np.nan, np.nan, np.nan, 2],
+             [1, 2, np.nan, 5, 5]]
+
+        res = stats.mode(a, axis=1, keepdims=False, nan_policy='omit')
+        assert_array_equal(res.mode, [1, 2, 5])
+        assert_array_equal(res.count, [2, 1, 2])
+
+        res = stats.mode(a, axis=1, keepdims=True, nan_policy='omit')
+        assert_array_equal(res.mode, [[1], [2], [5]])
+        assert_array_equal(res.count, [[2], [1], [2]])
+
+        a = np.array(a)
+        res = stats.mode(a, axis=None, keepdims=False, nan_policy='omit')
+        ref = stats.mode(a.ravel(), keepdims=False, nan_policy='omit')
+        assert_array_equal(res, ref)
+        assert res.mode.shape == ref.mode.shape == ()
+
+        res = stats.mode(a, axis=None, keepdims=True, nan_policy='omit')
+        ref = stats.mode(a.ravel(), keepdims=True, nan_policy='omit')
+        assert_array_equal(res, ref)
+        assert res.mode.shape == ref.mode.shape == (1,)
+
+    def test_gh16952(self):
+        # Check that bug reported in gh-16952 is resolved
+        shape = (4, 3)
+        data = np.ones(shape)
+        data[0, 0] = np.nan
+        res = stats.mode(a=data, axis=1, keepdims=False, nan_policy="omit")
+        assert_array_equal(res.mode, [1, 1, 1, 1])
+        assert_array_equal(res.count, [2, 3, 3, 3])
+
+
+def test_mode_futurewarning():
+    a = [1, 2, 5, 3, 5]
+
+    future_msg = "Unlike other reduction functions..."
+    with pytest.warns(FutureWarning, match=future_msg):
+        res = stats.mode(a)
+    assert_array_equal(res, ([5], [2]))
+
+    # no FutureWarning if `keepdims` is specified
+    res = stats.mode(a, keepdims=True)
+    assert_array_equal(res, ([5], [2]))
+
+    res = stats.mode(a, keepdims=False)
+    assert_array_equal(res, [5, 2])
+
+
+class TestSEM:
+
+    testcase = [1, 2, 3, 4]
+    scalar_testcase = 4.
+
+    def test_sem(self):
+        # This is not in R, so used:
+        #     sqrt(var(testcase)*3/4)/sqrt(3)
+
+        # y = stats.sem(self.shoes[0])
+        # assert_approx_equal(y,0.775177399)
+        with suppress_warnings() as sup, np.errstate(invalid="ignore"):
+            sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
+            y = stats.sem(self.scalar_testcase)
+        assert_(np.isnan(y))
+
+        y = stats.sem(self.testcase)
+        assert_approx_equal(y, 0.6454972244)
+        n = len(self.testcase)
+        assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
+                        stats.sem(self.testcase, ddof=2))
+
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_equal(stats.sem(x), np.nan)
+        assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769)
+        assert_raises(ValueError, stats.sem, x, nan_policy='raise')
+        assert_raises(ValueError, stats.sem, x, nan_policy='foobar')
+
+
+class TestZmapZscore:
+
+    @pytest.mark.parametrize(
+        'x, y',
+        [([1, 2, 3, 4], [1, 2, 3, 4]),
+         ([1, 2, 3], [0, 1, 2, 3, 4])]
+    )
+    def test_zmap(self, x, y):
+        z = stats.zmap(x, y)
+        # For these simple cases, calculate the expected result directly
+        # by using the formula for the z-score.
+        expected = (x - np.mean(y))/np.std(y)
+        assert_allclose(z, expected, rtol=1e-12)
+
+    def test_zmap_axis(self):
+        # Test use of 'axis' keyword in zmap.
+        x = np.array([[0.0, 0.0, 1.0, 1.0],
+                      [1.0, 1.0, 1.0, 2.0],
+                      [2.0, 0.0, 2.0, 0.0]])
+
+        t1 = 1.0/np.sqrt(2.0/3)
+        t2 = np.sqrt(3.)/3
+        t3 = np.sqrt(2.)
+
+        z0 = stats.zmap(x, x, axis=0)
+        z1 = stats.zmap(x, x, axis=1)
+
+        z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
+                       [0.0, t3, -t3/2, t1],
+                       [t1, -t3/2, t3, -t1]]
+        z1_expected = [[-1.0, -1.0, 1.0, 1.0],
+                       [-t2, -t2, -t2, np.sqrt(3.)],
+                       [1.0, -1.0, 1.0, -1.0]]
+
+        assert_array_almost_equal(z0, z0_expected)
+        assert_array_almost_equal(z1, z1_expected)
+
+    def test_zmap_ddof(self):
+        # Test use of 'ddof' keyword in zmap.
+        x = np.array([[0.0, 0.0, 1.0, 1.0],
+                      [0.0, 1.0, 2.0, 3.0]])
+
+        z = stats.zmap(x, x, axis=1, ddof=1)
+
+        z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
+        z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
+        assert_array_almost_equal(z[0], z0_expected)
+        assert_array_almost_equal(z[1], z1_expected)
+
+    @pytest.mark.parametrize('ddof', [0, 2])
+    def test_zmap_nan_policy_omit(self, ddof):
+        # nans in `scores` are propagated, regardless of `nan_policy`.
+        # `nan_policy` only affects how nans in `compare` are handled.
+        scores = np.array([-3, -1, 2, np.nan])
+        compare = np.array([-8, -3, 2, 7, 12, np.nan])
+        z = stats.zmap(scores, compare, ddof=ddof, nan_policy='omit')
+        assert_allclose(z, stats.zmap(scores, compare[~np.isnan(compare)],
+                                      ddof=ddof))
+
+    @pytest.mark.parametrize('ddof', [0, 2])
+    def test_zmap_nan_policy_omit_with_axis(self, ddof):
+        scores = np.arange(-5.0, 9.0).reshape(2, -1)
+        compare = np.linspace(-8, 6, 24).reshape(2, -1)
+        compare[0, 4] = np.nan
+        compare[0, 6] = np.nan
+        compare[1, 1] = np.nan
+        z = stats.zmap(scores, compare, nan_policy='omit', axis=1, ddof=ddof)
+        expected = np.array([stats.zmap(scores[0],
+                                        compare[0][~np.isnan(compare[0])],
+                                        ddof=ddof),
+                             stats.zmap(scores[1],
+                                        compare[1][~np.isnan(compare[1])],
+                                        ddof=ddof)])
+        assert_allclose(z, expected, rtol=1e-14)
+
+    def test_zmap_nan_policy_raise(self):
+        scores = np.array([1, 2, 3])
+        compare = np.array([-8, -3, 2, 7, 12, np.nan])
+        with pytest.raises(ValueError, match='input contains nan'):
+            stats.zmap(scores, compare, nan_policy='raise')
+
+    def test_zscore(self):
+        # not in R, so tested by using:
+        #    (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
+        y = stats.zscore([1, 2, 3, 4])
+        desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996,
+                    1.3416407864999])
+        assert_array_almost_equal(desired, y, decimal=12)
+
+    def test_zscore_axis(self):
+        # Test use of 'axis' keyword in zscore.
+        x = np.array([[0.0, 0.0, 1.0, 1.0],
+                      [1.0, 1.0, 1.0, 2.0],
+                      [2.0, 0.0, 2.0, 0.0]])
+
+        t1 = 1.0/np.sqrt(2.0/3)
+        t2 = np.sqrt(3.)/3
+        t3 = np.sqrt(2.)
+
+        z0 = stats.zscore(x, axis=0)
+        z1 = stats.zscore(x, axis=1)
+
+        z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
+                       [0.0, t3, -t3/2, t1],
+                       [t1, -t3/2, t3, -t1]]
+        z1_expected = [[-1.0, -1.0, 1.0, 1.0],
+                       [-t2, -t2, -t2, np.sqrt(3.)],
+                       [1.0, -1.0, 1.0, -1.0]]
+
+        assert_array_almost_equal(z0, z0_expected)
+        assert_array_almost_equal(z1, z1_expected)
+
+    def test_zscore_ddof(self):
+        # Test use of 'ddof' keyword in zscore.
+        x = np.array([[0.0, 0.0, 1.0, 1.0],
+                      [0.0, 1.0, 2.0, 3.0]])
+
+        z = stats.zscore(x, axis=1, ddof=1)
+
+        z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
+        z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
+        assert_array_almost_equal(z[0], z0_expected)
+        assert_array_almost_equal(z[1], z1_expected)
+
+    def test_zscore_nan_propagate(self):
+        x = np.array([1, 2, np.nan, 4, 5])
+        z = stats.zscore(x, nan_policy='propagate')
+        assert all(np.isnan(z))
+
+    def test_zscore_nan_omit(self):
+        x = np.array([1, 2, np.nan, 4, 5])
+
+        z = stats.zscore(x, nan_policy='omit')
+
+        expected = np.array([-1.2649110640673518,
+                             -0.6324555320336759,
+                             np.nan,
+                             0.6324555320336759,
+                             1.2649110640673518
+                             ])
+        assert_array_almost_equal(z, expected)
+
+    def test_zscore_nan_omit_with_ddof(self):
+        x = np.array([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0])
+        z = stats.zscore(x, ddof=1, nan_policy='omit')
+        expected = np.r_[np.nan, stats.zscore(x[1:], ddof=1)]
+        assert_allclose(z, expected, rtol=1e-13)
+
+    def test_zscore_nan_raise(self):
+        x = np.array([1, 2, np.nan, 4, 5])
+
+        assert_raises(ValueError, stats.zscore, x, nan_policy='raise')
+
+    def test_zscore_constant_input_1d(self):
+        x = [-0.087] * 3
+        z = stats.zscore(x)
+        assert_equal(z, np.full(len(x), np.nan))
+
+    def test_zscore_constant_input_2d(self):
+        x = np.array([[10.0, 10.0, 10.0, 10.0],
+                      [10.0, 11.0, 12.0, 13.0]])
+        z0 = stats.zscore(x, axis=0)
+        assert_equal(z0, np.array([[np.nan, -1.0, -1.0, -1.0],
+                                   [np.nan, 1.0, 1.0, 1.0]]))
+        z1 = stats.zscore(x, axis=1)
+        assert_equal(z1, np.array([[np.nan, np.nan, np.nan, np.nan],
+                                   stats.zscore(x[1])]))
+        z = stats.zscore(x, axis=None)
+        assert_equal(z, stats.zscore(x.ravel()).reshape(x.shape))
+
+        y = np.ones((3, 6))
+        z = stats.zscore(y, axis=None)
+        assert_equal(z, np.full(y.shape, np.nan))
+
+    def test_zscore_constant_input_2d_nan_policy_omit(self):
+        x = np.array([[10.0, 10.0, 10.0, 10.0],
+                      [10.0, 11.0, 12.0, np.nan],
+                      [10.0, 12.0, np.nan, 10.0]])
+        z0 = stats.zscore(x, nan_policy='omit', axis=0)
+        s = np.sqrt(3/2)
+        s2 = np.sqrt(2)
+        assert_allclose(z0, np.array([[np.nan, -s, -1.0, np.nan],
+                                      [np.nan, 0, 1.0, np.nan],
+                                      [np.nan, s, np.nan, np.nan]]))
+        z1 = stats.zscore(x, nan_policy='omit', axis=1)
+        assert_allclose(z1, np.array([[np.nan, np.nan, np.nan, np.nan],
+                                      [-s, 0, s, np.nan],
+                                      [-s2/2, s2, np.nan, -s2/2]]))
+
+    def test_zscore_2d_all_nan_row(self):
+        # A row is all nan, and we use axis=1.
+        x = np.array([[np.nan, np.nan, np.nan, np.nan],
+                      [10.0, 10.0, 12.0, 12.0]])
+        z = stats.zscore(x, nan_policy='omit', axis=1)
+        assert_equal(z, np.array([[np.nan, np.nan, np.nan, np.nan],
+                                  [-1.0, -1.0, 1.0, 1.0]]))
+
+    def test_zscore_2d_all_nan(self):
+        # The entire 2d array is nan, and we use axis=None.
+        y = np.full((2, 3), np.nan)
+        z = stats.zscore(y, nan_policy='omit', axis=None)
+        assert_equal(z, y)
+
+    @pytest.mark.parametrize('x', [np.array([]), np.zeros((3, 0, 5))])
+    def test_zscore_empty_input(self, x):
+        z = stats.zscore(x)
+        assert_equal(z, x)
+
+    def test_gzscore_normal_array(self):
+        z = stats.gzscore([1, 2, 3, 4])
+        desired = ([-1.526072095151, -0.194700599824, 0.584101799472,
+                    1.136670895503])
+        assert_allclose(desired, z)
+
+    def test_gzscore_masked_array(self):
+        x = np.array([1, 2, -1, 3, 4])
+        mx = np.ma.masked_array(x, mask=[0, 0, 1, 0, 0])
+        z = stats.gzscore(mx)
+        desired = ([-1.526072095151, -0.194700599824, np.inf, 0.584101799472,
+                    1.136670895503])
+        assert_allclose(desired, z)
+
+
+class TestMedianAbsDeviation:
+    def setup_class(self):
+        self.dat_nan = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9,
+                                 3.03, 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5,
+                                 3.6, 3.7, 3.7, 3.7, 3.7, 3.77, 5.28, np.nan])
+        self.dat = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,
+                             3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7,
+                             3.7, 3.7, 3.7, 3.77, 5.28, 28.95])
+
+    def test_median_abs_deviation(self):
+        assert_almost_equal(stats.median_abs_deviation(self.dat, axis=None),
+                            0.355)
+        dat = self.dat.reshape(6, 4)
+        mad = stats.median_abs_deviation(dat, axis=0)
+        mad_expected = np.asarray([0.435, 0.5, 0.45, 0.4])
+        assert_array_almost_equal(mad, mad_expected)
+
+    def test_mad_nan_omit(self):
+        mad = stats.median_abs_deviation(self.dat_nan, nan_policy='omit')
+        assert_almost_equal(mad, 0.34)
+
+    def test_axis_and_nan(self):
+        x = np.array([[1.0, 2.0, 3.0, 4.0, np.nan],
+                      [1.0, 4.0, 5.0, 8.0, 9.0]])
+        mad = stats.median_abs_deviation(x, axis=1)
+        assert_equal(mad, np.array([np.nan, 3.0]))
+
+    def test_nan_policy_omit_with_inf(sef):
+        z = np.array([1, 3, 4, 6, 99, np.nan, np.inf])
+        mad = stats.median_abs_deviation(z, nan_policy='omit')
+        assert_equal(mad, 3.0)
+
+    @pytest.mark.parametrize('axis', [0, 1, 2, None])
+    def test_size_zero_with_axis(self, axis):
+        x = np.zeros((3, 0, 4))
+        mad = stats.median_abs_deviation(x, axis=axis)
+        assert_equal(mad, np.full_like(x.sum(axis=axis), fill_value=np.nan))
+
+    @pytest.mark.parametrize('nan_policy, expected',
+                             [('omit', np.array([np.nan, 1.5, 1.5])),
+                              ('propagate', np.array([np.nan, np.nan, 1.5]))])
+    def test_nan_policy_with_axis(self, nan_policy, expected):
+        x = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+                      [1, 5, 3, 6, np.nan, np.nan],
+                      [5, 6, 7, 9, 9, 10]])
+        mad = stats.median_abs_deviation(x, nan_policy=nan_policy, axis=1)
+        assert_equal(mad, expected)
+
+    @pytest.mark.parametrize('axis, expected',
+                             [(1, [2.5, 2.0, 12.0]), (None, 4.5)])
+    def test_center_mean_with_nan(self, axis, expected):
+        x = np.array([[1, 2, 4, 9, np.nan],
+                      [0, 1, 1, 1, 12],
+                      [-10, -10, -10, 20, 20]])
+        mad = stats.median_abs_deviation(x, center=np.mean, nan_policy='omit',
+                                         axis=axis)
+        assert_allclose(mad, expected, rtol=1e-15, atol=1e-15)
+
+    def test_center_not_callable(self):
+        with pytest.raises(TypeError, match='callable'):
+            stats.median_abs_deviation([1, 2, 3, 5], center=99)
+
+
+def _check_warnings(warn_list, expected_type, expected_len):
+    """
+    Checks that all of the warnings from a list returned by
+    `warnings.catch_all(record=True)` are of the required type and that the list
+    contains expected number of warnings.
+    """
+    assert_equal(len(warn_list), expected_len, "number of warnings")
+    for warn_ in warn_list:
+        assert_(warn_.category is expected_type)
+
+
+class TestIQR:
+
+    def test_basic(self):
+        x = np.arange(8) * 0.5
+        np.random.shuffle(x)
+        assert_equal(stats.iqr(x), 1.75)
+
+    def test_api(self):
+        d = np.ones((5, 5))
+        stats.iqr(d)
+        stats.iqr(d, None)
+        stats.iqr(d, 1)
+        stats.iqr(d, (0, 1))
+        stats.iqr(d, None, (10, 90))
+        stats.iqr(d, None, (30, 20), 1.0)
+        stats.iqr(d, None, (25, 75), 1.5, 'propagate')
+        stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear')
+        stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True)
+
+    def test_empty(self):
+        assert_equal(stats.iqr([]), np.nan)
+        assert_equal(stats.iqr(np.arange(0)), np.nan)
+
+    def test_constant(self):
+        # Constant array always gives 0
+        x = np.ones((7, 4))
+        assert_equal(stats.iqr(x), 0.0)
+        assert_array_equal(stats.iqr(x, axis=0), np.zeros(4))
+        assert_array_equal(stats.iqr(x, axis=1), np.zeros(7))
+        assert_equal(stats.iqr(x, interpolation='linear'), 0.0)
+        assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0)
+        assert_equal(stats.iqr(x, interpolation='nearest'), 0.0)
+        assert_equal(stats.iqr(x, interpolation='lower'), 0.0)
+        assert_equal(stats.iqr(x, interpolation='higher'), 0.0)
+
+        # 0 only along constant dimensions
+        # This also tests much of `axis`
+        y = np.ones((4, 5, 6)) * np.arange(6)
+        assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6)))
+        assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6)))
+        assert_array_equal(stats.iqr(y, axis=2), np.full((4, 5), 2.5))
+        assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6))
+        assert_array_equal(stats.iqr(y, axis=(0, 2)), np.full(5, 3.))
+        assert_array_equal(stats.iqr(y, axis=(1, 2)), np.full(4, 3.))
+
+    def test_scalarlike(self):
+        x = np.arange(1) + 7.0
+        assert_equal(stats.iqr(x[0]), 0.0)
+        assert_equal(stats.iqr(x), 0.0)
+        assert_array_equal(stats.iqr(x, keepdims=True), [0.0])
+
+    def test_2D(self):
+        x = np.arange(15).reshape((3, 5))
+        assert_equal(stats.iqr(x), 7.0)
+        assert_array_equal(stats.iqr(x, axis=0), np.full(5, 5.))
+        assert_array_equal(stats.iqr(x, axis=1), np.full(3, 2.))
+        assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0)
+        assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0)
+
+    def test_axis(self):
+        # The `axis` keyword is also put through its paces in `test_keepdims`.
+        o = np.random.normal(size=(71, 23))
+        x = np.dstack([o] * 10)                 # x.shape = (71, 23, 10)
+        q = stats.iqr(o)
+
+        assert_equal(stats.iqr(x, axis=(0, 1)), q)
+        x = np.moveaxis(x, -1, 0)               # x.shape = (10, 71, 23)
+        assert_equal(stats.iqr(x, axis=(2, 1)), q)
+        x = x.swapaxes(0, 1)                    # x.shape = (71, 10, 23)
+        assert_equal(stats.iqr(x, axis=(0, 2)), q)
+        x = x.swapaxes(0, 1)                    # x.shape = (10, 71, 23)
+
+        assert_equal(stats.iqr(x, axis=(0, 1, 2)),
+                     stats.iqr(x, axis=None))
+        assert_equal(stats.iqr(x, axis=(0,)),
+                     stats.iqr(x, axis=0))
+
+        d = np.arange(3 * 5 * 7 * 11)
+        # Older versions of numpy only shuffle along axis=0.
+        # Not sure about newer, don't care.
+        np.random.shuffle(d)
+        d = d.reshape((3, 5, 7, 11))
+        assert_equal(stats.iqr(d, axis=(0, 1, 2))[0],
+                     stats.iqr(d[:,:,:, 0].ravel()))
+        assert_equal(stats.iqr(d, axis=(0, 1, 3))[1],
+                     stats.iqr(d[:,:, 1,:].ravel()))
+        assert_equal(stats.iqr(d, axis=(3, 1, -4))[2],
+                     stats.iqr(d[:,:, 2,:].ravel()))
+        assert_equal(stats.iqr(d, axis=(3, 1, 2))[2],
+                     stats.iqr(d[2,:,:,:].ravel()))
+        assert_equal(stats.iqr(d, axis=(3, 2))[2, 1],
+                     stats.iqr(d[2, 1,:,:].ravel()))
+        assert_equal(stats.iqr(d, axis=(1, -2))[2, 1],
+                     stats.iqr(d[2, :, :, 1].ravel()))
+        assert_equal(stats.iqr(d, axis=(1, 3))[2, 2],
+                     stats.iqr(d[2, :, 2,:].ravel()))
+
+        assert_raises(np.AxisError, stats.iqr, d, axis=4)
+        assert_raises(ValueError, stats.iqr, d, axis=(0, 0))
+
+    def test_rng(self):
+        x = np.arange(5)
+        assert_equal(stats.iqr(x), 2)
+        assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5)
+        assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5)
+        assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6)  # 3-1.4
+
+        assert_raises(ValueError, stats.iqr, x, rng=(0, 101))
+        assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25))
+        assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60))
+
+    def test_interpolation(self):
+        x = np.arange(5)
+        y = np.arange(4)
+        # Default
+        assert_equal(stats.iqr(x), 2)
+        assert_equal(stats.iqr(y), 1.5)
+        # Linear
+        assert_equal(stats.iqr(x, interpolation='linear'), 2)
+        assert_equal(stats.iqr(y, interpolation='linear'), 1.5)
+        # Higher
+        assert_equal(stats.iqr(x, interpolation='higher'), 2)
+        assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3)
+        assert_equal(stats.iqr(y, interpolation='higher'), 2)
+        # Lower (will generally, but not always be the same as higher)
+        assert_equal(stats.iqr(x, interpolation='lower'), 2)
+        assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2)
+        assert_equal(stats.iqr(y, interpolation='lower'), 2)
+        # Nearest
+        assert_equal(stats.iqr(x, interpolation='nearest'), 2)
+        assert_equal(stats.iqr(y, interpolation='nearest'), 1)
+        # Midpoint
+        assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
+        assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5)
+        assert_equal(stats.iqr(y, interpolation='midpoint'), 2)
+
+        # Check all method= values new in numpy 1.22.0 are accepted
+        if NumpyVersion(np.__version__) >= '1.22.0':
+            for method in ('inverted_cdf', 'averaged_inverted_cdf',
+                           'closest_observation', 'interpolated_inverted_cdf',
+                           'hazen', 'weibull', 'median_unbiased',
+                           'normal_unbiased'):
+                stats.iqr(y, interpolation=method)
+
+        assert_raises(ValueError, stats.iqr, x, interpolation='foobar')
+
+    def test_keepdims(self):
+        # Also tests most of `axis`
+        x = np.ones((3, 5, 7, 11))
+        assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ())
+        assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11))
+        assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11))
+        assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7))
+        assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11))
+        assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ())
+        assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,))
+
+        assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1))
+        assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11))
+        assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))
+        assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1))
+        assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11))
+        assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))
+        assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))
+
+    def test_nanpolicy(self):
+        x = np.arange(15.0).reshape((3, 5))
+
+        # No NaNs
+        assert_equal(stats.iqr(x, nan_policy='propagate'), 7)
+        assert_equal(stats.iqr(x, nan_policy='omit'), 7)
+        assert_equal(stats.iqr(x, nan_policy='raise'), 7)
+
+        # Yes NaNs
+        x[1, 2] = np.nan
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("always")
+            assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan)
+            assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])
+            assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
+
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("always")
+            assert_equal(stats.iqr(x, nan_policy='omit'), 7.5)
+            assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), np.full(5, 5))
+            assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2])
+
+        assert_raises(ValueError, stats.iqr, x, nan_policy='raise')
+        assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise')
+        assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise')
+
+        # Bad policy
+        assert_raises(ValueError, stats.iqr, x, nan_policy='barfood')
+
+    def test_scale(self):
+        x = np.arange(15.0).reshape((3, 5))
+
+        # No NaNs
+        assert_equal(stats.iqr(x, scale=1.0), 7)
+        assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795)
+        assert_equal(stats.iqr(x, scale=2.0), 3.5)
+
+        # Yes NaNs
+        x[1, 2] = np.nan
+        with warnings.catch_warnings(record=True):
+            warnings.simplefilter("always")
+            assert_equal(stats.iqr(x, scale=1.0, nan_policy='propagate'), np.nan)
+            assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan)
+            assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan)
+            # axis=1 chosen to show behavior with both nans and without
+            assert_equal(stats.iqr(x, axis=1, scale=1.0,
+                                   nan_policy='propagate'), [2, np.nan, 2])
+            assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
+                                          nan_policy='propagate'),
+                                np.array([2, np.nan, 2]) / 1.3489795)
+            assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'),
+                         [1, np.nan, 1])
+            # Since NumPy 1.17.0.dev, warnings are no longer emitted by
+            # np.percentile with nans, so we don't check the number of
+            # warnings here. See https://github.com/numpy/numpy/pull/12679.
+
+        assert_equal(stats.iqr(x, scale=1.0, nan_policy='omit'), 7.5)
+        assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),
+                            7.5 / 1.3489795)
+        assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75)
+
+        # Bad scale
+        assert_raises(ValueError, stats.iqr, x, scale='foobar')
+
+        with pytest.warns(
+            DeprecationWarning,
+            match="The use of 'scale=\"raw\"'"
+        ):
+            stats.iqr([1], scale='raw')
+
+
+class TestMoments:
+    """
+        Comparison numbers are found using R v.1.5.1
+        note that length(testcase) = 4
+        testmathworks comes from documentation for the
+        Statistics Toolbox for Matlab and can be found at both
+        https://www.mathworks.com/help/stats/kurtosis.html
+        https://www.mathworks.com/help/stats/skewness.html
+        Note that both test cases came from here.
+    """
+    testcase = [1,2,3,4]
+    scalar_testcase = 4.
+    np.random.seed(1234)
+    testcase_moment_accuracy = np.random.rand(42)
+    testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965]
+
+    def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
+        expect = np.asarray(expect)
+        if shape is not None:
+            expect = np.broadcast_to(expect, shape)
+        assert_array_equal(actual, expect)
+        if dtype is None:
+            dtype = expect.dtype
+        assert actual.dtype == dtype
+
+    def test_moment(self):
+        # mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))
+        y = stats.moment(self.scalar_testcase)
+        assert_approx_equal(y, 0.0)
+        y = stats.moment(self.testcase, 0)
+        assert_approx_equal(y, 1.0)
+        y = stats.moment(self.testcase, 1)
+        assert_approx_equal(y, 0.0, 10)
+        y = stats.moment(self.testcase, 2)
+        assert_approx_equal(y, 1.25)
+        y = stats.moment(self.testcase, 3)
+        assert_approx_equal(y, 0.0)
+        y = stats.moment(self.testcase, 4)
+        assert_approx_equal(y, 2.5625)
+
+        # check array_like input for moment
+        y = stats.moment(self.testcase, [1, 2, 3, 4])
+        assert_allclose(y, [0, 1.25, 0, 2.5625])
+
+        # check moment input consists only of integers
+        y = stats.moment(self.testcase, 0.0)
+        assert_approx_equal(y, 1.0)
+        assert_raises(ValueError, stats.moment, self.testcase, 1.2)
+        y = stats.moment(self.testcase, [1.0, 2, 3, 4.0])
+        assert_allclose(y, [0, 1.25, 0, 2.5625])
+
+        # test empty input
+        message = "Mean of empty slice."
+        with pytest.warns(RuntimeWarning, match=message):
+            y = stats.moment([])
+            self._assert_equal(y, np.nan, dtype=np.float64)
+            y = stats.moment(np.array([], dtype=np.float32))
+            self._assert_equal(y, np.nan, dtype=np.float32)
+            y = stats.moment(np.zeros((1, 0)), axis=0)
+            self._assert_equal(y, [], shape=(0,), dtype=np.float64)
+            y = stats.moment([[]], axis=1)
+            self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
+            y = stats.moment([[]], moment=[0, 1], axis=0)
+            self._assert_equal(y, [], shape=(2, 0))
+
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_equal(stats.moment(x, 2), np.nan)
+        assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0)
+        assert_raises(ValueError, stats.moment, x, nan_policy='raise')
+        assert_raises(ValueError, stats.moment, x, nan_policy='foobar')
+
+    @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.complex128])
+    @pytest.mark.parametrize('expect, moment', [(0, 1), (1, 0)])
+    def test_constant_moments(self, dtype, expect, moment):
+        x = np.random.rand(5).astype(dtype)
+        y = stats.moment(x, moment=moment)
+        self._assert_equal(y, expect, dtype=dtype)
+
+        y = stats.moment(np.broadcast_to(x, (6, 5)), axis=0, moment=moment)
+        self._assert_equal(y, expect, shape=(5,), dtype=dtype)
+
+        y = stats.moment(np.broadcast_to(x, (1, 2, 3, 4, 5)), axis=2,
+                         moment=moment)
+        self._assert_equal(y, expect, shape=(1, 2, 4, 5), dtype=dtype)
+
+        y = stats.moment(np.broadcast_to(x, (1, 2, 3, 4, 5)), axis=None,
+                         moment=moment)
+        self._assert_equal(y, expect, shape=(), dtype=dtype)
+
+    def test_moment_propagate_nan(self):
+        # Check that the shape of the result is the same for inputs
+        # with and without nans, cf gh-5817
+        a = np.arange(8).reshape(2, -1).astype(float)
+        a[1, 0] = np.nan
+        mm = stats.moment(a, 2, axis=1, nan_policy="propagate")
+        np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15)
+
+    def test_moment_empty_moment(self):
+        # tests moment with empty `moment` list
+        with pytest.raises(ValueError, match=r"'moment' must be a scalar or a"
+                                             r" non-empty 1D list/array."):
+            stats.moment([1, 2, 3, 4], moment=[])
+
+    def test_skewness(self):
+        # Scalar test case
+        with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+            y = stats.skew(self.scalar_testcase)
+            assert np.isnan(y)
+        # sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) /
+        #     ((sqrt(var(testmathworks)*4/5))**3)/5
+        y = stats.skew(self.testmathworks)
+        assert_approx_equal(y, -0.29322304336607, 10)
+        y = stats.skew(self.testmathworks, bias=0)
+        assert_approx_equal(y, -0.437111105023940, 10)
+        y = stats.skew(self.testcase)
+        assert_approx_equal(y, 0.0, 10)
+
+        x = np.arange(10.)
+        x[9] = np.nan
+        with np.errstate(invalid='ignore'):
+            assert_equal(stats.skew(x), np.nan)
+        assert_equal(stats.skew(x, nan_policy='omit'), 0.)
+        assert_raises(ValueError, stats.skew, x, nan_policy='raise')
+        assert_raises(ValueError, stats.skew, x, nan_policy='foobar')
+
+    def test_skewness_scalar(self):
+        # `skew` must return a scalar for 1-dim input
+        assert_equal(stats.skew(arange(10)), 0.0)
+
+    def test_skew_propagate_nan(self):
+        # Check that the shape of the result is the same for inputs
+        # with and without nans, cf gh-5817
+        a = np.arange(8).reshape(2, -1).astype(float)
+        a[1, 0] = np.nan
+        with np.errstate(invalid='ignore'):
+            s = stats.skew(a, axis=1, nan_policy="propagate")
+        np.testing.assert_allclose(s, [0, np.nan], atol=1e-15)
+
+    def test_skew_constant_value(self):
+        # Skewness of a constant input should be zero even when the mean is not
+        # exact (gh-13245)
+        with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+            a = np.repeat(-0.27829495, 10)
+            assert np.isnan(stats.skew(a))
+            assert np.isnan(stats.skew(a * float(2**50)))
+            assert np.isnan(stats.skew(a / float(2**50)))
+            assert np.isnan(stats.skew(a, bias=False))
+
+            # similarly, from gh-11086:
+            assert np.isnan(stats.skew([14.3]*7))
+            assert np.isnan(stats.skew(1 + np.arange(-3, 4)*1e-16))
+
+    def test_kurtosis(self):
+        # Scalar test case
+        with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+            y = stats.kurtosis(self.scalar_testcase)
+            assert np.isnan(y)
+        #   sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
+        #   sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5
+        #   Set flags for axis = 0 and
+        #   fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab)
+        y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
+        assert_approx_equal(y, 2.1658856802973, 10)
+
+        # Note that MATLAB has confusing docs for the following case
+        #  kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
+        #  kurtosis(x)  gives a biased estimate of Fisher's skewness (Pearson-3)
+        #  The MATLAB docs imply that both should give Fisher's
+        y = stats.kurtosis(self.testmathworks, fisher=0, bias=0)
+        assert_approx_equal(y, 3.663542721189047, 10)
+        y = stats.kurtosis(self.testcase, 0, 0)
+        assert_approx_equal(y, 1.64)
+
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_equal(stats.kurtosis(x), np.nan)
+        assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000)
+        assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise')
+        assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar')
+
+    def test_kurtosis_array_scalar(self):
+        assert_equal(type(stats.kurtosis([1,2,3])), float)
+
+    def test_kurtosis_propagate_nan(self):
+        # Check that the shape of the result is the same for inputs
+        # with and without nans, cf gh-5817
+        a = np.arange(8).reshape(2, -1).astype(float)
+        a[1, 0] = np.nan
+        k = stats.kurtosis(a, axis=1, nan_policy="propagate")
+        np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15)
+
+    def test_kurtosis_constant_value(self):
+        # Kurtosis of a constant input should be zero, even when the mean is not
+        # exact (gh-13245)
+        a = np.repeat(-0.27829495, 10)
+        with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+            assert np.isnan(stats.kurtosis(a, fisher=False))
+            assert np.isnan(stats.kurtosis(a * float(2**50), fisher=False))
+            assert np.isnan(stats.kurtosis(a / float(2**50), fisher=False))
+            assert np.isnan(stats.kurtosis(a, fisher=False, bias=False))
+
+    def test_moment_accuracy(self):
+        # 'moment' must have a small enough error compared to the slower
+        #  but very accurate numpy.power() implementation.
+        tc_no_mean = self.testcase_moment_accuracy - \
+                     np.mean(self.testcase_moment_accuracy)
+        assert_allclose(np.power(tc_no_mean, 42).mean(),
+                            stats.moment(self.testcase_moment_accuracy, 42))
+
+    def test_precision_loss_gh15554(self):
+        # gh-15554 was one of several issues that have reported problems with
+        # constant or near-constant input. We can't always fix these, but
+        # make sure there's a warning.
+        with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+            rng = np.random.default_rng(34095309370)
+            a = rng.random(size=(100, 10))
+            a[:, 0] = 1.01
+            stats.skew(a)[0]
+
+    def test_empty_1d(self):
+        message = "Mean of empty slice."
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.skew([])
+        with pytest.warns(RuntimeWarning, match=message):
+            stats.kurtosis([])
+
+
+class TestStudentTest:
+    X1 = np.array([-1, 0, 1])
+    X2 = np.array([0, 1, 2])
+    T1_0 = 0
+    P1_0 = 1
+    T1_1 = -1.7320508075
+    P1_1 = 0.22540333075
+    T1_2 = -3.464102
+    P1_2 = 0.0741799
+    T2_0 = 1.732051
+    P2_0 = 0.2254033
+    P1_1_l = P1_1 / 2
+    P1_1_g = 1 - (P1_1 / 2)
+
+    def test_onesample(self):
+        with suppress_warnings() as sup, np.errstate(invalid="ignore"), \
+                pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+            sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
+            t, p = stats.ttest_1samp(4., 3.)
+        assert_(np.isnan(t))
+        assert_(np.isnan(p))
+
+        t, p = stats.ttest_1samp(self.X1, 0)
+
+        assert_array_almost_equal(t, self.T1_0)
+        assert_array_almost_equal(p, self.P1_0)
+
+        res = stats.ttest_1samp(self.X1, 0)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+        t, p = stats.ttest_1samp(self.X2, 0)
+
+        assert_array_almost_equal(t, self.T2_0)
+        assert_array_almost_equal(p, self.P2_0)
+
+        t, p = stats.ttest_1samp(self.X1, 1)
+
+        assert_array_almost_equal(t, self.T1_1)
+        assert_array_almost_equal(p, self.P1_1)
+
+        t, p = stats.ttest_1samp(self.X1, 2)
+
+        assert_array_almost_equal(t, self.T1_2)
+        assert_array_almost_equal(p, self.P1_2)
+
+        # check nan policy
+        x = stats.norm.rvs(loc=5, scale=10, size=51, random_state=7654567)
+        x[50] = np.nan
+        with np.errstate(invalid="ignore"):
+            assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan))
+
+            assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'),
+                                      (-1.6412624074367159, 0.107147027334048005))
+            assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise')
+            assert_raises(ValueError, stats.ttest_1samp, x, 5.0,
+                          nan_policy='foobar')
+
+    def test_1samp_alternative(self):
+        assert_raises(ValueError, stats.ttest_1samp, self.X1, 0,
+                      alternative="error")
+
+        t, p = stats.ttest_1samp(self.X1, 1, alternative="less")
+        assert_allclose(p, self.P1_1_l)
+        assert_allclose(t, self.T1_1)
+
+        t, p = stats.ttest_1samp(self.X1, 1, alternative="greater")
+        assert_allclose(p, self.P1_1_g)
+        assert_allclose(t, self.T1_1)
+
+    @pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
+    def test_1samp_ci_1d(self, alternative):
+        # test confidence interval method against reference values
+        rng = np.random.default_rng(8066178009154342972)
+        n = 10
+        x = rng.normal(size=n, loc=1.5, scale=2)
+        popmean = rng.normal()  # this shouldn't affect confidence interval
+        # Reference values generated with R t.test:
+        # options(digits=16)
+        # x = c(2.75532884,  0.93892217,  0.94835861,  1.49489446, -0.62396595,
+        #      -1.88019867, -1.55684465,  4.88777104,  5.15310979,  4.34656348)
+        # t.test(x, conf.level=0.85, alternative='l')
+
+        ref = {'two-sided': [0.3594423211709136, 2.9333455028290860],
+               'greater': [0.7470806207371626, np.inf],
+               'less': [-np.inf, 2.545707203262837]}
+        res = stats.ttest_1samp(x, popmean=popmean, alternative=alternative)
+        ci = res.confidence_interval(confidence_level=0.85)
+        assert_allclose(ci, ref[alternative])
+        assert_equal(res.df, n-1)
+
+    def test_1samp_ci_iv(self):
+        # test `confidence_interval` method input validation
+        res = stats.ttest_1samp(np.arange(10), 0)
+        message = '`confidence_level` must be a number between 0 and 1.'
+        with pytest.raises(ValueError, match=message):
+            res.confidence_interval(confidence_level=10)
+
+
+class TestPercentileOfScore:
+
+    def f(self, *args, **kwargs):
+        return stats.percentileofscore(*args, **kwargs)
+
+    @pytest.mark.parametrize("kind, result", [("rank", 40),
+                                              ("mean", 35),
+                                              ("strict", 30),
+                                              ("weak", 40)])
+    def test_unique(self, kind, result):
+        a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+        assert_equal(self.f(a, 4, kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", 45),
+                                              ("mean", 40),
+                                              ("strict", 30),
+                                              ("weak", 50)])
+    def test_multiple2(self, kind, result):
+        a = [1, 2, 3, 4, 4, 5, 6, 7, 8, 9]
+        assert_equal(self.f(a, 4, kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", 50),
+                                              ("mean", 45),
+                                              ("strict", 30),
+                                              ("weak", 60)])
+    def test_multiple3(self, kind, result):
+        a = [1, 2, 3, 4, 4, 4, 5, 6, 7, 8]
+        assert_equal(self.f(a, 4, kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", 30),
+                                              ("mean", 30),
+                                              ("strict", 30),
+                                              ("weak", 30)])
+    def test_missing(self, kind, result):
+        a = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11]
+        assert_equal(self.f(a, 4, kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", 40),
+                                              ("mean", 35),
+                                              ("strict", 30),
+                                              ("weak", 40)])
+    def test_large_numbers(self, kind, result):
+        a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
+        assert_equal(self.f(a, 40, kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", 50),
+                                              ("mean", 45),
+                                              ("strict", 30),
+                                              ("weak", 60)])
+    def test_large_numbers_multiple3(self, kind, result):
+        a = [10, 20, 30, 40, 40, 40, 50, 60, 70, 80]
+        assert_equal(self.f(a, 40, kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", 30),
+                                              ("mean", 30),
+                                              ("strict", 30),
+                                              ("weak", 30)])
+    def test_large_numbers_missing(self, kind, result):
+        a = [10, 20, 30, 50, 60, 70, 80, 90, 100, 110]
+        assert_equal(self.f(a, 40, kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", [0, 10, 100, 100]),
+                                              ("mean", [0, 5, 95, 100]),
+                                              ("strict", [0, 0, 90, 100]),
+                                              ("weak", [0, 10, 100, 100])])
+    def test_boundaries(self, kind, result):
+        a = [10, 20, 30, 50, 60, 70, 80, 90, 100, 110]
+        assert_equal(self.f(a, [0, 10, 110, 200], kind=kind), result)
+
+    @pytest.mark.parametrize("kind, result", [("rank", [0, 10, 100]),
+                                              ("mean", [0, 5, 95]),
+                                              ("strict", [0, 0, 90]),
+                                              ("weak", [0, 10, 100])])
+    def test_inf(self, kind, result):
+        a = [1, 2, 3, 4, 5, 6, 7, 8, 9, +np.inf]
+        assert_equal(self.f(a, [-np.inf, 1, +np.inf], kind=kind), result)
+
+    cases = [("propagate", [], 1, np.nan),
+             ("propagate", [np.nan], 1, np.nan),
+             ("propagate", [np.nan], [0, 1, 2], [np.nan, np.nan, np.nan]),
+             ("propagate", [1, 2], [1, 2, np.nan], [50, 100, np.nan]),
+             ("omit", [1, 2, np.nan], [0, 1, 2], [0, 50, 100]),
+             ("omit", [1, 2], [0, 1, np.nan], [0, 50, np.nan]),
+             ("omit", [np.nan, np.nan], [0, 1, 2], [np.nan, np.nan, np.nan])]
+
+    @pytest.mark.parametrize("policy, a, score, result", cases)
+    def test_nans_ok(self, policy, a, score, result):
+        assert_equal(self.f(a, score, nan_policy=policy), result)
+
+    cases = [
+        ("raise", [1, 2, 3, np.nan], [1, 2, 3],
+         "The input contains nan values"),
+        ("raise", [1, 2, 3], [1, 2, 3, np.nan],
+         "The input contains nan values"),
+    ]
+
+    @pytest.mark.parametrize("policy, a, score, message", cases)
+    def test_nans_fail(self, policy, a, score, message):
+        with assert_raises(ValueError, match=message):
+            self.f(a, score, nan_policy=policy)
+
+    @pytest.mark.parametrize("shape", [
+        (6, ),
+        (2, 3),
+        (2, 1, 3),
+        (2, 1, 1, 3),
+    ])
+    def test_nd(self, shape):
+        a = np.array([0, 1, 2, 3, 4, 5])
+        scores = a.reshape(shape)
+        results = scores*10
+        a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+        assert_equal(self.f(a, scores, kind="rank"), results)
+
+
+PowerDivCase = namedtuple('Case',  # type: ignore[name-match]
+                          ['f_obs', 'f_exp', 'ddof', 'axis',
+                           'chi2',     # Pearson's
+                           'log',      # G-test (log-likelihood)
+                           'mod_log',  # Modified log-likelihood
+                           'cr',       # Cressie-Read (lambda=2/3)
+                           ])
+
+# The details of the first two elements in power_div_1d_cases are used
+# in a test in TestPowerDivergence.  Check that code before making
+# any changes here.
+power_div_1d_cases = [
+    # Use the default f_exp.
+    PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None,
+                 chi2=4,
+                 log=2*(4*np.log(4/8) + 12*np.log(12/8)),
+                 mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
+                 cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
+    # Give a non-uniform f_exp.
+    PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None,
+                 chi2=24,
+                 log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)),
+                 mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)),
+                 cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) +
+                     8*((8/2)**(2/3) - 1))/(5/9)),
+    # f_exp is a scalar.
+    PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None,
+                 chi2=4,
+                 log=2*(4*np.log(4/8) + 12*np.log(12/8)),
+                 mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
+                 cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
+    # f_exp equal to f_obs.
+    PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0,
+                 chi2=0, log=0, mod_log=0, cr=0),
+]
+
+
+power_div_empty_cases = [
+    # Shape is (0,)--a data set with length 0.  The computed
+    # test statistic should be 0.
+    PowerDivCase(f_obs=[],
+                 f_exp=None, ddof=0, axis=0,
+                 chi2=0, log=0, mod_log=0, cr=0),
+    # Shape is (0, 3).  This is 3 data sets, but each data set has
+    # length 0, so the computed test statistic should be [0, 0, 0].
+    PowerDivCase(f_obs=np.array([[],[],[]]).T,
+                 f_exp=None, ddof=0, axis=0,
+                 chi2=[0, 0, 0],
+                 log=[0, 0, 0],
+                 mod_log=[0, 0, 0],
+                 cr=[0, 0, 0]),
+    # Shape is (3, 0).  This represents an empty collection of
+    # data sets in which each data set has length 3.  The test
+    # statistic should be an empty array.
+    PowerDivCase(f_obs=np.array([[],[],[]]),
+                 f_exp=None, ddof=0, axis=0,
+                 chi2=[],
+                 log=[],
+                 mod_log=[],
+                 cr=[]),
+]
+
+
+class TestPowerDivergence:
+
+    def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_,
+                               expected_stat):
+        f_obs = np.asarray(f_obs)
+        if axis is None:
+            num_obs = f_obs.size
+        else:
+            b = np.broadcast(f_obs, f_exp)
+            num_obs = b.shape[axis]
+
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "Mean of empty slice")
+            stat, p = stats.power_divergence(
+                                f_obs=f_obs, f_exp=f_exp, ddof=ddof,
+                                axis=axis, lambda_=lambda_)
+            assert_allclose(stat, expected_stat)
+
+            if lambda_ == 1 or lambda_ == "pearson":
+                # Also test stats.chisquare.
+                stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
+                                          axis=axis)
+                assert_allclose(stat, expected_stat)
+
+        ddof = np.asarray(ddof)
+        expected_p = stats.distributions.chi2.sf(expected_stat,
+                                                 num_obs - 1 - ddof)
+        assert_allclose(p, expected_p)
+
+    def test_basic(self):
+        for case in power_div_1d_cases:
+            self.check_power_divergence(
+                   case.f_obs, case.f_exp, case.ddof, case.axis,
+                   None, case.chi2)
+            self.check_power_divergence(
+                   case.f_obs, case.f_exp, case.ddof, case.axis,
+                   "pearson", case.chi2)
+            self.check_power_divergence(
+                   case.f_obs, case.f_exp, case.ddof, case.axis,
+                   1, case.chi2)
+            self.check_power_divergence(
+                   case.f_obs, case.f_exp, case.ddof, case.axis,
+                   "log-likelihood", case.log)
+            self.check_power_divergence(
+                   case.f_obs, case.f_exp, case.ddof, case.axis,
+                   "mod-log-likelihood", case.mod_log)
+            self.check_power_divergence(
+                   case.f_obs, case.f_exp, case.ddof, case.axis,
+                   "cressie-read", case.cr)
+            self.check_power_divergence(
+                   case.f_obs, case.f_exp, case.ddof, case.axis,
+                   2/3, case.cr)
+
+    def test_basic_masked(self):
+        for case in power_div_1d_cases:
+            mobs = np.ma.array(case.f_obs)
+            self.check_power_divergence(
+                   mobs, case.f_exp, case.ddof, case.axis,
+                   None, case.chi2)
+            self.check_power_divergence(
+                   mobs, case.f_exp, case.ddof, case.axis,
+                   "pearson", case.chi2)
+            self.check_power_divergence(
+                   mobs, case.f_exp, case.ddof, case.axis,
+                   1, case.chi2)
+            self.check_power_divergence(
+                   mobs, case.f_exp, case.ddof, case.axis,
+                   "log-likelihood", case.log)
+            self.check_power_divergence(
+                   mobs, case.f_exp, case.ddof, case.axis,
+                   "mod-log-likelihood", case.mod_log)
+            self.check_power_divergence(
+                   mobs, case.f_exp, case.ddof, case.axis,
+                   "cressie-read", case.cr)
+            self.check_power_divergence(
+                   mobs, case.f_exp, case.ddof, case.axis,
+                   2/3, case.cr)
+
+    def test_axis(self):
+        case0 = power_div_1d_cases[0]
+        case1 = power_div_1d_cases[1]
+        f_obs = np.vstack((case0.f_obs, case1.f_obs))
+        f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
+                           case1.f_exp))
+        # Check the four computational code paths in power_divergence
+        # using a 2D array with axis=1.
+        self.check_power_divergence(
+               f_obs, f_exp, 0, 1,
+               "pearson", [case0.chi2, case1.chi2])
+        self.check_power_divergence(
+               f_obs, f_exp, 0, 1,
+               "log-likelihood", [case0.log, case1.log])
+        self.check_power_divergence(
+               f_obs, f_exp, 0, 1,
+               "mod-log-likelihood", [case0.mod_log, case1.mod_log])
+        self.check_power_divergence(
+               f_obs, f_exp, 0, 1,
+               "cressie-read", [case0.cr, case1.cr])
+        # Reshape case0.f_obs to shape (2,2), and use axis=None.
+        # The result should be the same.
+        self.check_power_divergence(
+               np.array(case0.f_obs).reshape(2, 2), None, 0, None,
+               "pearson", case0.chi2)
+
+    def test_ddof_broadcasting(self):
+        # Test that ddof broadcasts correctly.
+        # ddof does not affect the test statistic.  It is broadcast
+        # with the computed test statistic for the computation of
+        # the p value.
+
+        case0 = power_div_1d_cases[0]
+        case1 = power_div_1d_cases[1]
+        # Create 4x2 arrays of observed and expected frequencies.
+        f_obs = np.vstack((case0.f_obs, case1.f_obs)).T
+        f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
+                           case1.f_exp)).T
+
+        expected_chi2 = [case0.chi2, case1.chi2]
+
+        # ddof has shape (2, 1).  This is broadcast with the computed
+        # statistic, so p will have shape (2,2).
+        ddof = np.array([[0], [1]])
+
+        stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof)
+        assert_allclose(stat, expected_chi2)
+
+        # Compute the p values separately, passing in scalars for ddof.
+        stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0])
+        stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0])
+
+        assert_array_equal(p, np.vstack((p0, p1)))
+
+    def test_empty_cases(self):
+        with warnings.catch_warnings():
+            for case in power_div_empty_cases:
+                self.check_power_divergence(
+                       case.f_obs, case.f_exp, case.ddof, case.axis,
+                       "pearson", case.chi2)
+                self.check_power_divergence(
+                       case.f_obs, case.f_exp, case.ddof, case.axis,
+                       "log-likelihood", case.log)
+                self.check_power_divergence(
+                       case.f_obs, case.f_exp, case.ddof, case.axis,
+                       "mod-log-likelihood", case.mod_log)
+                self.check_power_divergence(
+                       case.f_obs, case.f_exp, case.ddof, case.axis,
+                       "cressie-read", case.cr)
+
+    def test_power_divergence_result_attributes(self):
+        f_obs = power_div_1d_cases[0].f_obs
+        f_exp = power_div_1d_cases[0].f_exp
+        ddof = power_div_1d_cases[0].ddof
+        axis = power_div_1d_cases[0].axis
+
+        res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
+                                     axis=axis, lambda_="pearson")
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+    def test_power_divergence_gh_12282(self):
+        # The sums of observed and expected frequencies must match
+        f_obs = np.array([[10, 20], [30, 20]])
+        f_exp = np.array([[5, 15], [35, 25]])
+        with assert_raises(ValueError, match='For each axis slice...'):
+            stats.power_divergence(f_obs=[10, 20], f_exp=[30, 60])
+        with assert_raises(ValueError, match='For each axis slice...'):
+            stats.power_divergence(f_obs=f_obs, f_exp=f_exp, axis=1)
+        stat, pval = stats.power_divergence(f_obs=f_obs, f_exp=f_exp)
+        assert_allclose(stat, [5.71428571, 2.66666667])
+        assert_allclose(pval, [0.01682741, 0.10247043])
+
+
+def test_gh_chisquare_12282():
+    # Currently `chisquare` is implemented via power_divergence
+    # in case that ever changes, perform a basic test like
+    # test_power_divergence_gh_12282
+    with assert_raises(ValueError, match='For each axis slice...'):
+        stats.chisquare(f_obs=[10, 20], f_exp=[30, 60])
+
+
+@pytest.mark.parametrize("n, dtype", [(200, np.uint8), (1000000, np.int32)])
+def test_chiquare_data_types(n, dtype):
+    # Regression test for gh-10159.
+    obs = np.array([n, 0], dtype=dtype)
+    exp = np.array([n // 2, n // 2], dtype=dtype)
+    stat, p = stats.chisquare(obs, exp)
+    assert_allclose(stat, n, rtol=1e-13)
+
+
+def test_chisquare_masked_arrays():
+    # Test masked arrays.
+    obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T
+    mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T
+    mobs = np.ma.masked_array(obs, mask)
+    expected_chisq = np.array([24.0, 0.5])
+    expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)),
+                           2*(3*np.log(0.75) + 5*np.log(1.25))])
+
+    chi2 = stats.distributions.chi2
+
+    chisq, p = stats.chisquare(mobs)
+    mat.assert_array_equal(chisq, expected_chisq)
+    mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
+                                             mobs.count(axis=0) - 1))
+
+    g, p = stats.power_divergence(mobs, lambda_='log-likelihood')
+    mat.assert_array_almost_equal(g, expected_g, decimal=15)
+    mat.assert_array_almost_equal(p, chi2.sf(expected_g,
+                                             mobs.count(axis=0) - 1))
+
+    chisq, p = stats.chisquare(mobs.T, axis=1)
+    mat.assert_array_equal(chisq, expected_chisq)
+    mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
+                                             mobs.T.count(axis=1) - 1))
+    g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood")
+    mat.assert_array_almost_equal(g, expected_g, decimal=15)
+    mat.assert_array_almost_equal(p, chi2.sf(expected_g,
+                                             mobs.count(axis=0) - 1))
+
+    obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0])
+    exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1])
+    chi2, p = stats.chisquare(obs1, f_exp=exp1)
+    # Because of the mask at index 3 of obs1 and at index 4 of exp1,
+    # only the first three elements are included in the calculation
+    # of the statistic.
+    mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8)
+
+    # When axis=None, the two values should have type np.float64.
+    chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None)
+    assert_(isinstance(chisq, np.float64))
+    assert_(isinstance(p, np.float64))
+    assert_equal(chisq, 1.0)
+    assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2))
+
+    # Empty arrays:
+    # A data set with length 0 returns a masked scalar.
+    with np.errstate(invalid='ignore'):
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "Mean of empty slice")
+            chisq, p = stats.chisquare(np.ma.array([]))
+    assert_(isinstance(chisq, np.ma.MaskedArray))
+    assert_equal(chisq.shape, ())
+    assert_(chisq.mask)
+
+    empty3 = np.ma.array([[],[],[]])
+
+    # empty3 is a collection of 0 data sets (whose lengths would be 3, if
+    # there were any), so the return value is an array with length 0.
+    chisq, p = stats.chisquare(empty3)
+    assert_(isinstance(chisq, np.ma.MaskedArray))
+    mat.assert_array_equal(chisq, [])
+
+    # empty3.T is an array containing 3 data sets, each with length 0,
+    # so an array of size (3,) is returned, with all values masked.
+    with np.errstate(invalid='ignore'):
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "Mean of empty slice")
+            chisq, p = stats.chisquare(empty3.T)
+
+    assert_(isinstance(chisq, np.ma.MaskedArray))
+    assert_equal(chisq.shape, (3,))
+    assert_(np.all(chisq.mask))
+
+
+def test_power_divergence_against_cressie_read_data():
+    # Test stats.power_divergence against tables 4 and 5 from
+    # Cressie and Read, "Multimonial Goodness-of-Fit Tests",
+    # J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464.
+    # This tests the calculation for several values of lambda.
+
+    # Table 4 data recalculated for greater precision according to:
+    # Shelby J. Haberman, Analysis of Qualitative Data: Volume 1
+    # Introductory Topics, Academic Press, New York, USA (1978).
+    obs = np.array([15, 11, 14, 17, 5, 11, 10, 4, 8,
+                    10, 7, 9, 11, 3, 6, 1, 1, 4])
+    beta = -0.083769  # Haberman (1978), p. 15
+    i = np.arange(1, len(obs) + 1)
+    alpha = np.log(obs.sum() / np.exp(beta*i).sum())
+    expected_counts = np.exp(alpha + beta*i)
+
+    # `table4` holds just the second and third columns from Table 4.
+    table4 = np.vstack((obs, expected_counts)).T
+
+    table5 = np.array([
+        # lambda, statistic
+        -10.0, 72.2e3,
+        -5.0, 28.9e1,
+        -3.0, 65.6,
+        -2.0, 40.6,
+        -1.5, 34.0,
+        -1.0, 29.5,
+        -0.5, 26.5,
+        0.0, 24.6,
+        0.5, 23.4,
+        0.67, 23.1,
+        1.0, 22.7,
+        1.5, 22.6,
+        2.0, 22.9,
+        3.0, 24.8,
+        5.0, 35.5,
+        10.0, 21.4e1,
+        ]).reshape(-1, 2)
+
+    for lambda_, expected_stat in table5:
+        stat, p = stats.power_divergence(table4[:,0], table4[:,1],
+                                         lambda_=lambda_)
+        assert_allclose(stat, expected_stat, rtol=5e-3)
+
+
+def test_friedmanchisquare():
+    # see ticket:113
+    # verified with matlab and R
+    # From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets"
+    # 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)
+    x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,
+                 0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),
+          array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,
+                 0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),
+          array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,
+                 0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),
+          array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,
+                 0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]
+
+    # From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001:
+    x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),
+          array([2,2,1,2,3,1,2,3,2,1,1,3]),
+          array([2,4,3,3,4,3,3,4,4,1,2,1]),
+          array([3,5,4,3,4,4,3,3,3,4,4,4])]
+
+    # From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01:
+    # Probability from this example is inexact using Chisquare approximation of Friedman Chisquare.
+    x3 = [array([7.0,9.9,8.5,5.1,10.3]),
+          array([5.3,5.7,4.7,3.5,7.7]),
+          array([4.9,7.6,5.5,2.8,8.4]),
+          array([8.8,8.9,8.1,3.3,9.1])]
+
+    assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),
+                              (10.2283464566929, 0.0167215803284414))
+    assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
+                              (18.9428571428571, 0.000280938375189499))
+    assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),
+                              (10.68, 0.0135882729582176))
+    assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])
+
+    # test for namedtuple attribute results
+    attributes = ('statistic', 'pvalue')
+    res = stats.friedmanchisquare(*x1)
+    check_named_results(res, attributes)
+
+    # test using mstats
+    assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1],
+                                                       x1[2], x1[3]),
+                              (10.2283464566929, 0.0167215803284414))
+    # the following fails
+    # assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
+    #                           (18.9428571428571, 0.000280938375189499))
+    assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1],
+                                                       x3[2], x3[3]),
+                              (10.68, 0.0135882729582176))
+    assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1])
+
+
+class TestKSTest:
+    """Tests kstest and ks_1samp agree with K-S various sizes, alternatives, modes."""
+
+    def _testOne(self, x, alternative, expected_statistic, expected_prob, mode='auto', decimal=14):
+        result = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
+        expected = np.array([expected_statistic, expected_prob])
+        assert_array_almost_equal(np.array(result), expected, decimal=decimal)
+
+    def _test_kstest_and_ks1samp(self, x, alternative, mode='auto', decimal=14):
+        result = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
+        result_1samp = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
+        assert_array_almost_equal(np.array(result), result_1samp, decimal=decimal)
+
+    def test_namedtuple_attributes(self):
+        x = np.linspace(-1, 1, 9)
+        # test for namedtuple attribute results
+        attributes = ('statistic', 'pvalue')
+        res = stats.kstest(x, 'norm')
+        check_named_results(res, attributes)
+
+    def test_agree_with_ks_1samp(self):
+        x = np.linspace(-1, 1, 9)
+        self._test_kstest_and_ks1samp(x, 'two-sided')
+
+        x = np.linspace(-15, 15, 9)
+        self._test_kstest_and_ks1samp(x, 'two-sided')
+
+        x = [-1.23, 0.06, -0.60, 0.17, 0.66, -0.17, -0.08, 0.27, -0.98, -0.99]
+        self._test_kstest_and_ks1samp(x, 'two-sided')
+        self._test_kstest_and_ks1samp(x, 'greater', mode='exact')
+        self._test_kstest_and_ks1samp(x, 'less', mode='exact')
+
+    # missing: no test that uses *args
+
+
+class TestKSOneSample:
+    """Tests kstest and ks_samp 1-samples with K-S various sizes, alternatives, modes."""
+
+    def _testOne(self, x, alternative, expected_statistic, expected_prob, mode='auto', decimal=14):
+        result = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
+        expected = np.array([expected_statistic, expected_prob])
+        assert_array_almost_equal(np.array(result), expected, decimal=decimal)
+
+    def test_namedtuple_attributes(self):
+        x = np.linspace(-1, 1, 9)
+        # test for namedtuple attribute results
+        attributes = ('statistic', 'pvalue')
+        res = stats.ks_1samp(x, stats.norm.cdf)
+        check_named_results(res, attributes)
+
+    def test_agree_with_r(self):
+        # comparing with some values from R
+        x = np.linspace(-1, 1, 9)
+        self._testOne(x, 'two-sided', 0.15865525393145705, 0.95164069201518386)
+
+        x = np.linspace(-15, 15, 9)
+        self._testOne(x, 'two-sided', 0.44435602715924361, 0.038850140086788665)
+
+        x = [-1.23, 0.06, -0.60, 0.17, 0.66, -0.17, -0.08, 0.27, -0.98, -0.99]
+        self._testOne(x, 'two-sided', 0.293580126801961, 0.293408463684361)
+        self._testOne(x, 'greater', 0.293580126801961, 0.146988835042376, mode='exact')
+        self._testOne(x, 'less', 0.109348552425692, 0.732768892470675, mode='exact')
+
+    def test_known_examples(self):
+        # the following tests rely on deterministically replicated rvs
+        x = stats.norm.rvs(loc=0.2, size=100, random_state=987654321)
+        self._testOne(x, 'two-sided', 0.12464329735846891, 0.089444888711820769, mode='asymp')
+        self._testOne(x, 'less', 0.12464329735846891, 0.040989164077641749)
+        self._testOne(x, 'greater', 0.0072115233216310994, 0.98531158590396228)
+
+    def test_ks1samp_allpaths(self):
+        # Check NaN input, output.
+        assert_(np.isnan(kolmogn(np.nan, 1, True)))
+        with assert_raises(ValueError, match='n is not integral: 1.5'):
+            kolmogn(1.5, 1, True)
+        assert_(np.isnan(kolmogn(-1, 1, True)))
+
+        dataset = np.asarray([
+            # Check x out of range
+            (101, 1, True, 1.0),
+            (101, 1.1, True, 1.0),
+            (101, 0, True, 0.0),
+            (101, -0.1, True, 0.0),
+
+            (32, 1.0 / 64, True, 0.0),  # Ruben-Gambino
+            (32, 1.0 / 64, False, 1.0),  # Ruben-Gambino
+
+            (32, 0.5, True, 0.9999999363163307),  # Miller
+            (32, 0.5, False, 6.368366937916623e-08),  # Miller 2 * special.smirnov(32, 0.5)
+
+            # Check some other paths
+            (32, 1.0 / 8, True, 0.34624229979775223),
+            (32, 1.0 / 4, True, 0.9699508336558085),
+            (1600, 0.49, False, 0.0),
+            (1600, 1 / 16.0, False, 7.0837876229702195e-06),  # 2 * special.smirnov(1600, 1/16.0)
+            (1600, 14 / 1600, False, 0.99962357317602),  # _kolmogn_DMTW
+            (1600, 1 / 32, False, 0.08603386296651416),  # _kolmogn_PelzGood
+        ])
+        FuncData(kolmogn, dataset, (0, 1, 2), 3).check(dtypes=[int, float, bool])
+
+    @pytest.mark.parametrize("ksfunc", [stats.kstest, stats.ks_1samp])
+    @pytest.mark.parametrize("alternative, x6val, ref_location, ref_sign",
+                             [('greater', 6, 6, +1),
+                              ('less', 7, 7, -1),
+                              ('two-sided', 6, 6, +1),
+                              ('two-sided', 7, 7, -1)])
+    def test_location_sign(self, ksfunc, alternative,
+                           x6val, ref_location, ref_sign):
+        # Test that location and sign corresponding with statistic are as
+        # expected. (Test is designed to be easy to predict.)
+        x = np.arange(10) + 0.5
+        x[6] = x6val
+        cdf = stats.uniform(scale=10).cdf
+        res = ksfunc(x, cdf, alternative=alternative)
+        assert_allclose(res.statistic, 0.1, rtol=1e-15)
+        assert res.statistic_location == ref_location
+        assert res.statistic_sign == ref_sign
+
+    # missing: no test that uses *args
+
+
+class TestKSTwoSamples:
+    """Tests 2-samples with K-S various sizes, alternatives, modes."""
+
+    def _testOne(self, x1, x2, alternative, expected_statistic, expected_prob, mode='auto'):
+        result = stats.ks_2samp(x1, x2, alternative, mode=mode)
+        expected = np.array([expected_statistic, expected_prob])
+        assert_array_almost_equal(np.array(result), expected)
+
+    def testSmall(self):
+        self._testOne([0], [1], 'two-sided', 1.0/1, 1.0)
+        self._testOne([0], [1], 'greater', 1.0/1, 0.5)
+        self._testOne([0], [1], 'less', 0.0/1, 1.0)
+        self._testOne([1], [0], 'two-sided', 1.0/1, 1.0)
+        self._testOne([1], [0], 'greater', 0.0/1, 1.0)
+        self._testOne([1], [0], 'less', 1.0/1, 0.5)
+
+    def testTwoVsThree(self):
+        data1 = np.array([1.0, 2.0])
+        data1p = data1 + 0.01
+        data1m = data1 - 0.01
+        data2 = np.array([1.0, 2.0, 3.0])
+        self._testOne(data1p, data2, 'two-sided', 1.0 / 3, 1.0)
+        self._testOne(data1p, data2, 'greater', 1.0 / 3, 0.7)
+        self._testOne(data1p, data2, 'less', 1.0 / 3, 0.7)
+        self._testOne(data1m, data2, 'two-sided', 2.0 / 3, 0.6)
+        self._testOne(data1m, data2, 'greater', 2.0 / 3, 0.3)
+        self._testOne(data1m, data2, 'less', 0, 1.0)
+
+    def testTwoVsFour(self):
+        data1 = np.array([1.0, 2.0])
+        data1p = data1 + 0.01
+        data1m = data1 - 0.01
+        data2 = np.array([1.0, 2.0, 3.0, 4.0])
+        self._testOne(data1p, data2, 'two-sided', 2.0 / 4, 14.0/15)
+        self._testOne(data1p, data2, 'greater', 2.0 / 4, 8.0/15)
+        self._testOne(data1p, data2, 'less', 1.0 / 4, 12.0/15)
+
+        self._testOne(data1m, data2, 'two-sided', 3.0 / 4, 6.0/15)
+        self._testOne(data1m, data2, 'greater', 3.0 / 4, 3.0/15)
+        self._testOne(data1m, data2, 'less', 0, 1.0)
+
+    def test100_100(self):
+        x100 = np.linspace(1, 100, 100)
+        x100_2_p1 = x100 + 2 + 0.1
+        x100_2_m1 = x100 + 2 - 0.1
+        self._testOne(x100, x100_2_p1, 'two-sided', 3.0 / 100, 0.9999999999962055)
+        self._testOne(x100, x100_2_p1, 'greater', 3.0 / 100, 0.9143290114276248)
+        self._testOne(x100, x100_2_p1, 'less', 0, 1.0)
+        self._testOne(x100, x100_2_m1, 'two-sided', 2.0 / 100, 1.0)
+        self._testOne(x100, x100_2_m1, 'greater', 2.0 / 100, 0.960978450786184)
+        self._testOne(x100, x100_2_m1, 'less', 0, 1.0)
+
+    def test100_110(self):
+        x100 = np.linspace(1, 100, 100)
+        x110 = np.linspace(1, 100, 110)
+        x110_20_p1 = x110 + 20 + 0.1
+        x110_20_m1 = x110 + 20 - 0.1
+        # 100, 110
+        self._testOne(x100, x110_20_p1, 'two-sided', 232.0 / 1100, 0.015739183865607353)
+        self._testOne(x100, x110_20_p1, 'greater', 232.0 / 1100, 0.007869594319053203)
+        self._testOne(x100, x110_20_p1, 'less', 0, 1)
+        self._testOne(x100, x110_20_m1, 'two-sided', 229.0 / 1100, 0.017803803861026313)
+        self._testOne(x100, x110_20_m1, 'greater', 229.0 / 1100, 0.008901905958245056)
+        self._testOne(x100, x110_20_m1, 'less', 0.0, 1.0)
+
+    def testRepeatedValues(self):
+        x2233 = np.array([2] * 3 + [3] * 4 + [5] * 5 + [6] * 4, dtype=int)
+        x3344 = x2233 + 1
+        x2356 = np.array([2] * 3 + [3] * 4 + [5] * 10 + [6] * 4, dtype=int)
+        x3467 = np.array([3] * 10 + [4] * 2 + [6] * 10 + [7] * 4, dtype=int)
+        self._testOne(x2233, x3344, 'two-sided', 5.0/16, 0.4262934613454952)
+        self._testOne(x2233, x3344, 'greater', 5.0/16, 0.21465428276573786)
+        self._testOne(x2233, x3344, 'less', 0.0/16, 1.0)
+        self._testOne(x2356, x3467, 'two-sided', 190.0/21/26, 0.0919245790168125)
+        self._testOne(x2356, x3467, 'greater', 190.0/21/26, 0.0459633806858544)
+        self._testOne(x2356, x3467, 'less', 70.0/21/26, 0.6121593130022775)
+
+    def testEqualSizes(self):
+        data2 = np.array([1.0, 2.0, 3.0])
+        self._testOne(data2, data2+1, 'two-sided', 1.0/3, 1.0)
+        self._testOne(data2, data2+1, 'greater', 1.0/3, 0.75)
+        self._testOne(data2, data2+1, 'less', 0.0/3, 1.)
+        self._testOne(data2, data2+0.5, 'two-sided', 1.0/3, 1.0)
+        self._testOne(data2, data2+0.5, 'greater', 1.0/3, 0.75)
+        self._testOne(data2, data2+0.5, 'less', 0.0/3, 1.)
+        self._testOne(data2, data2-0.5, 'two-sided', 1.0/3, 1.0)
+        self._testOne(data2, data2-0.5, 'greater', 0.0/3, 1.0)
+        self._testOne(data2, data2-0.5, 'less', 1.0/3, 0.75)
+
+    @pytest.mark.slow
+    def testMiddlingBoth(self):
+        # 500, 600
+        n1, n2 = 500, 600
+        delta = 1.0/n1/n2/2/2
+        x = np.linspace(1, 200, n1) - delta
+        y = np.linspace(2, 200, n2)
+        self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='auto')
+        self._testOne(x, y, 'two-sided', 2000.0 / n1 / n2, 1.0, mode='asymp')
+        self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='asymp')
+        self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='asymp')
+        with suppress_warnings() as sup:
+            message = "ks_2samp: Exact calculation unsuccessful."
+            sup.filter(RuntimeWarning, message)
+            self._testOne(x, y, 'greater', 2000.0 / n1 / n2, 0.9697596024683929, mode='exact')
+            self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            self._testOne(x, y, 'less', 500.0 / n1 / n2, 0.9968735843165021, mode='exact')
+            _check_warnings(w, RuntimeWarning, 1)
+
+    @pytest.mark.slow
+    def testMediumBoth(self):
+        # 1000, 1100
+        n1, n2 = 1000, 1100
+        delta = 1.0/n1/n2/2/2
+        x = np.linspace(1, 200, n1) - delta
+        y = np.linspace(2, 200, n2)
+        self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='asymp')
+        self._testOne(x, y, 'two-sided', 6600.0 / n1 / n2, 1.0, mode='auto')
+        self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='asymp')
+        self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='asymp')
+
+        with suppress_warnings() as sup:
+            message = "ks_2samp: Exact calculation unsuccessful."
+            sup.filter(RuntimeWarning, message)
+            self._testOne(x, y, 'greater', 6600.0 / n1 / n2, 0.9573185808092622, mode='exact')
+            self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            self._testOne(x, y, 'less', 1000.0 / n1 / n2, 0.9982410869433984, mode='exact')
+            _check_warnings(w, RuntimeWarning, 1)
+
+    def testLarge(self):
+        # 10000, 110
+        n1, n2 = 10000, 110
+        lcm = n1*11.0
+        delta = 1.0/n1/n2/2/2
+        x = np.linspace(1, 200, n1) - delta
+        y = np.linspace(2, 100, n2)
+        self._testOne(x, y, 'two-sided', 55275.0 / lcm, 4.2188474935755949e-15)
+        self._testOne(x, y, 'greater', 561.0 / lcm, 0.99115454582047591)
+        self._testOne(x, y, 'less', 55275.0 / lcm, 3.1317328311518713e-26)
+
+    def test_gh11184(self):
+        # 3000, 3001, exact two-sided
+        np.random.seed(123456)
+        x = np.random.normal(size=3000)
+        y = np.random.normal(size=3001) * 1.5
+        self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, mode='asymp')
+        self._testOne(x, y, 'two-sided', 0.11292880151060758, 2.7755575615628914e-15, mode='exact')
+
+    @pytest.mark.xslow
+    def test_gh11184_bigger(self):
+        # 10000, 10001, exact two-sided
+        np.random.seed(123456)
+        x = np.random.normal(size=10000)
+        y = np.random.normal(size=10001) * 1.5
+        self._testOne(x, y, 'two-sided', 0.10597913208679133, 3.3149311398483503e-49, mode='asymp')
+        self._testOne(x, y, 'two-sided', 0.10597913208679133, 2.7755575615628914e-15, mode='exact')
+        self._testOne(x, y, 'greater', 0.10597913208679133, 2.7947433906389253e-41, mode='asymp')
+        self._testOne(x, y, 'less', 0.09658002199780022, 2.7947433906389253e-41, mode='asymp')
+
+    @pytest.mark.xslow
+    def test_gh12999(self):
+        np.random.seed(123456)
+        for x in range(1000, 12000, 1000):
+            vals1 = np.random.normal(size=(x))
+            vals2 = np.random.normal(size=(x + 10), loc=0.5)
+            exact = stats.ks_2samp(vals1, vals2, mode='exact').pvalue
+            asymp = stats.ks_2samp(vals1, vals2, mode='asymp').pvalue
+            # these two p-values should be in line with each other
+            assert_array_less(exact, 3 * asymp)
+            assert_array_less(asymp, 3 * exact)
+
+    @pytest.mark.slow
+    def testLargeBoth(self):
+        # 10000, 11000
+        n1, n2 = 10000, 11000
+        lcm = n1*11.0
+        delta = 1.0/n1/n2/2/2
+        x = np.linspace(1, 200, n1) - delta
+        y = np.linspace(2, 200, n2)
+        self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576, mode='asymp')
+        self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990456491488628, mode='exact')
+        self._testOne(x, y, 'two-sided', 563.0 / lcm, 0.9990660108966576, mode='auto')
+        self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673)
+        self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724)
+        with suppress_warnings() as sup:
+            message = "ks_2samp: Exact calculation unsuccessful."
+            sup.filter(RuntimeWarning, message)
+            self._testOne(x, y, 'greater', 563.0 / lcm, 0.7561851877420673, mode='exact')
+            self._testOne(x, y, 'less', 10.0 / lcm, 0.9998239693191724, mode='exact')
+
+    def testNamedAttributes(self):
+        # test for namedtuple attribute results
+        attributes = ('statistic', 'pvalue')
+        res = stats.ks_2samp([1, 2], [3])
+        check_named_results(res, attributes)
+
+    @pytest.mark.slow
+    def test_some_code_paths(self):
+        # Check that some code paths are executed
+        from scipy.stats._stats_py import (
+            _count_paths_outside_method,
+            _compute_outer_prob_inside_method
+        )
+
+        _compute_outer_prob_inside_method(1, 1, 1, 1)
+        _count_paths_outside_method(1000, 1, 1, 1001)
+
+        with np.errstate(invalid='raise'):
+            assert_raises(FloatingPointError, _count_paths_outside_method,
+                          1100, 1099, 1, 1)
+            assert_raises(FloatingPointError, _count_paths_outside_method,
+                          2000, 1000, 1, 1)
+
+    def test_argument_checking(self):
+        # Check that an empty array causes a ValueError
+        assert_raises(ValueError, stats.ks_2samp, [], [1])
+        assert_raises(ValueError, stats.ks_2samp, [1], [])
+        assert_raises(ValueError, stats.ks_2samp, [], [])
+
+    @pytest.mark.slow
+    def test_gh12218(self):
+        """Ensure gh-12218 is fixed."""
+        # gh-1228 triggered a TypeError calculating sqrt(n1*n2*(n1+n2)).
+        # n1, n2 both large integers, the product exceeded 2^64
+        np.random.seed(12345678)
+        n1 = 2097152  # 2*^21
+        rvs1 = stats.uniform.rvs(size=n1, loc=0., scale=1)
+        rvs2 = rvs1 + 1  # Exact value of rvs2 doesn't matter.
+        stats.ks_2samp(rvs1, rvs2, alternative='greater', mode='asymp')
+        stats.ks_2samp(rvs1, rvs2, alternative='less', mode='asymp')
+        stats.ks_2samp(rvs1, rvs2, alternative='two-sided', mode='asymp')
+
+    def test_warnings_gh_14019(self):
+        # Check that RuntimeWarning is raised when method='auto' and exact
+        # p-value calculation fails. See gh-14019.
+        rng = np.random.default_rng(abs(hash('test_warnings_gh_14019')))
+        # random samples of the same size as in the issue
+        data1 = rng.random(size=881) + 0.5
+        data2 = rng.random(size=369)
+        message = "ks_2samp: Exact calculation unsuccessful"
+        with pytest.warns(RuntimeWarning, match=message):
+            res = stats.ks_2samp(data1, data2, alternative='less')
+            assert_allclose(res.pvalue, 0, atol=1e-14)
+
+    @pytest.mark.parametrize("ksfunc", [stats.kstest, stats.ks_2samp])
+    @pytest.mark.parametrize("alternative, x6val, ref_location, ref_sign",
+                             [('greater', 5.9, 5.9, +1),
+                              ('less', 6.1, 6.0, -1),
+                              ('two-sided', 5.9, 5.9, +1),
+                              ('two-sided', 6.1, 6.0, -1)])
+    def test_location_sign(self, ksfunc, alternative,
+                           x6val, ref_location, ref_sign):
+        # Test that location and sign corresponding with statistic are as
+        # expected. (Test is designed to be easy to predict.)
+        x = np.arange(10, dtype=np.float64)
+        y = x.copy()
+        x[6] = x6val
+        res = stats.ks_2samp(x, y, alternative=alternative)
+        assert res.statistic == 0.1
+        assert res.statistic_location == ref_location
+        assert res.statistic_sign == ref_sign
+
+def test_ttest_rel():
+    # regression test
+    tr,pr = 0.81248591389165692, 0.41846234511362157
+    tpr = ([tr,-tr],[pr,pr])
+
+    rvs1 = np.linspace(1,100,100)
+    rvs2 = np.linspace(1.01,99.989,100)
+    rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
+    rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
+
+    t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
+    assert_array_almost_equal([t,p],(tr,pr))
+    t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
+    assert_array_almost_equal([t,p],tpr)
+    t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
+    assert_array_almost_equal([t,p],tpr)
+
+    # test scalars
+    with suppress_warnings() as sup, np.errstate(invalid="ignore"), \
+            pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+        sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
+        t, p = stats.ttest_rel(4., 3.)
+    assert_(np.isnan(t))
+    assert_(np.isnan(p))
+
+    # test for namedtuple attribute results
+    attributes = ('statistic', 'pvalue')
+    res = stats.ttest_rel(rvs1, rvs2, axis=0)
+    check_named_results(res, attributes)
+
+    # test on 3 dimensions
+    rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
+    rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
+    t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
+    assert_array_almost_equal(np.abs(t), tr)
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (2, 3))
+
+    t, p = stats.ttest_rel(np.moveaxis(rvs1_3D, 2, 0),
+                           np.moveaxis(rvs2_3D, 2, 0),
+                           axis=2)
+    assert_array_almost_equal(np.abs(t), tr)
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (3, 2))
+
+    # test alternative parameter
+    assert_raises(ValueError, stats.ttest_rel, rvs1, rvs2, alternative="error")
+
+    t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="less")
+    assert_allclose(p, 1 - pr/2)
+    assert_allclose(t, tr)
+
+    t, p = stats.ttest_rel(rvs1, rvs2, axis=0, alternative="greater")
+    assert_allclose(p, pr/2)
+    assert_allclose(t, tr)
+
+    # check nan policy
+    rng = np.random.RandomState(12345678)
+    x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng)
+    x[500] = np.nan
+    y = (stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng) +
+         stats.norm.rvs(scale=0.2, size=501, random_state=rng))
+    y[500] = np.nan
+
+    with np.errstate(invalid="ignore"):
+        assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan))
+
+    assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'),
+                              (0.25299925303978066, 0.8003729814201519))
+    assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise')
+    assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar')
+
+    # test zero division problem
+    with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+        t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1])
+    assert_equal((np.abs(t), p), (np.inf, 0))
+    with np.errstate(invalid="ignore"):
+        assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
+
+        # check that nan in input array result in nan output
+        anan = np.array([[1, np.nan], [-1, 1]])
+        assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))),
+                     ([0, np.nan], [1, np.nan]))
+
+    # test incorrect input shape raise an error
+    x = np.arange(24)
+    assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),
+                  x.reshape((2, 3, 4)))
+
+    # Convert from two-sided p-values to one sided using T result data.
+    def convert(t, p, alt):
+        if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
+            return p / 2
+        return 1 - (p / 2)
+    converter = np.vectorize(convert)
+
+    rvs1_2D[:, 20:30] = np.nan
+    rvs2_2D[:, 15:25] = np.nan
+
+    tr, pr = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit')
+
+    t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit',
+                           alternative='less')
+    assert_allclose(t, tr, rtol=1e-14)
+    with np.errstate(invalid='ignore'):
+        assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14)
+
+    t, p = stats.ttest_rel(rvs1_2D, rvs2_2D, 0, nan_policy='omit',
+                           alternative='greater')
+    assert_allclose(t, tr, rtol=1e-14)
+    with np.errstate(invalid='ignore'):
+        assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14)
+
+
+def test_ttest_rel_nan_2nd_arg():
+    # regression test for gh-6134: nans in the second arg were not handled
+    x = [np.nan, 2.0, 3.0, 4.0]
+    y = [1.0, 2.0, 1.0, 2.0]
+
+    r1 = stats.ttest_rel(x, y, nan_policy='omit')
+    r2 = stats.ttest_rel(y, x, nan_policy='omit')
+    assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
+    assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
+
+    # NB: arguments are paired when NaNs are dropped
+    r3 = stats.ttest_rel(y[1:], x[1:])
+    assert_allclose(r2, r3, atol=1e-15)
+
+    # .. and this is consistent with R. R code:
+    # x = c(NA, 2.0, 3.0, 4.0)
+    # y = c(1.0, 2.0, 1.0, 2.0)
+    # t.test(x, y, paired=TRUE)
+    assert_allclose(r2, (-2, 0.1835), atol=1e-4)
+
+
+def test_ttest_rel_empty_1d_returns_nan():
+    # Two empty inputs should return a TtestResult containing nan
+    # for both values.
+    result = stats.ttest_rel([], [])
+    assert isinstance(result, stats._stats_py.TtestResult)
+    assert_equal(result, (np.nan, np.nan))
+
+
+@pytest.mark.parametrize('b, expected_shape',
+                         [(np.empty((1, 5, 0)), (3, 5)),
+                          (np.empty((1, 0, 0)), (3, 0))])
+def test_ttest_rel_axis_size_zero(b, expected_shape):
+    # In this test, the length of the axis dimension is zero.
+    # The results should be arrays containing nan with shape
+    # given by the broadcast nonaxis dimensions.
+    a = np.empty((3, 1, 0))
+    result = stats.ttest_rel(a, b, axis=-1)
+    assert isinstance(result, stats._stats_py.TtestResult)
+    expected_value = np.full(expected_shape, fill_value=np.nan)
+    assert_equal(result.statistic, expected_value)
+    assert_equal(result.pvalue, expected_value)
+
+
+def test_ttest_rel_nonaxis_size_zero():
+    # In this test, the length of the axis dimension is nonzero,
+    # but one of the nonaxis dimensions has length 0.  Check that
+    # we still get the correctly broadcast shape, which is (5, 0)
+    # in this case.
+    a = np.empty((1, 8, 0))
+    b = np.empty((5, 8, 1))
+    result = stats.ttest_rel(a, b, axis=1)
+    assert isinstance(result, stats._stats_py.TtestResult)
+    assert_equal(result.statistic.shape, (5, 0))
+    assert_equal(result.pvalue.shape, (5, 0))
+
+
+@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
+def test_ttest_rel_ci_1d(alternative):
+    # test confidence interval method against reference values
+    rng = np.random.default_rng(3749065329432213059)
+    n = 10
+    x = rng.normal(size=n, loc=1.5, scale=2)
+    y = rng.normal(size=n, loc=2, scale=2)
+    # Reference values generated with R t.test:
+    # options(digits=16)
+    # x = c(1.22825792,  1.63950485,  4.39025641,  0.68609437,  2.03813481,
+    #       -1.20040109,  1.81997937,  1.86854636,  2.94694282,  3.94291373)
+    # y = c(3.49961496, 1.53192536, 5.53620083, 2.91687718, 0.04858043,
+    #       3.78505943, 3.3077496 , 2.30468892, 3.42168074, 0.56797592)
+    # t.test(x, y, paired=TRUE, conf.level=0.85, alternative='l')
+
+    ref = {'two-sided': [-1.912194489914035, 0.400169725914035],
+           'greater': [-1.563944820311475, np.inf],
+           'less': [-np.inf, 0.05192005631147523]}
+    res = stats.ttest_rel(x, y, alternative=alternative)
+    ci = res.confidence_interval(confidence_level=0.85)
+    assert_allclose(ci, ref[alternative])
+    assert_equal(res.df, n-1)
+
+
+@pytest.mark.parametrize("test_fun, args",
+                         [(stats.ttest_1samp, (np.arange(10), 0)),
+                          (stats.ttest_rel, (np.arange(10), np.arange(10)))])
+def test_ttest_ci_iv(test_fun, args):
+    # test `confidence_interval` method input validation
+    res = test_fun(*args)
+    message = '`confidence_level` must be a number between 0 and 1.'
+    with pytest.raises(ValueError, match=message):
+        res.confidence_interval(confidence_level=10)
+
+
+def _desc_stats(x1, x2, axis=0):
+    def _stats(x, axis=0):
+        x = np.asarray(x)
+        mu = np.mean(x, axis=axis)
+        std = np.std(x, axis=axis, ddof=1)
+        nobs = x.shape[axis]
+        return mu, std, nobs
+    return _stats(x1, axis) + _stats(x2, axis)
+
+
+def test_ttest_ind():
+    # regression test
+    tr = 1.0912746897927283
+    pr = 0.27647818616351882
+    tpr = ([tr,-tr],[pr,pr])
+
+    rvs2 = np.linspace(1,100,100)
+    rvs1 = np.linspace(5,105,100)
+    rvs1_2D = np.array([rvs1, rvs2])
+    rvs2_2D = np.array([rvs2, rvs1])
+
+    t,p = stats.ttest_ind(rvs1, rvs2, axis=0)
+    assert_array_almost_equal([t,p],(tr,pr))
+    # test from_stats API
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
+                                                                      rvs2)),
+                              [t, p])
+    t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
+    assert_array_almost_equal([t,p],tpr)
+    args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
+                              [t, p])
+    t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
+    assert_array_almost_equal([t,p],tpr)
+    args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
+                              [t, p])
+
+    # test scalars
+    with suppress_warnings() as sup, np.errstate(invalid="ignore"), \
+            pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+        sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
+        t, p = stats.ttest_ind(4., 3.)
+    assert_(np.isnan(t))
+    assert_(np.isnan(p))
+
+    # test on 3 dimensions
+    rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
+    rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
+    t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)
+    assert_almost_equal(np.abs(t), np.abs(tr))
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (2, 3))
+
+    t, p = stats.ttest_ind(np.moveaxis(rvs1_3D, 2, 0),
+                           np.moveaxis(rvs2_3D, 2, 0),
+                           axis=2)
+    assert_array_almost_equal(np.abs(t), np.abs(tr))
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (3, 2))
+
+    # test alternative parameter
+    assert_raises(ValueError, stats.ttest_ind, rvs1, rvs2, alternative="error")
+    assert_raises(ValueError, stats.ttest_ind_from_stats,
+                  *_desc_stats(rvs1_2D.T, rvs2_2D.T), alternative="error")
+
+    t, p = stats.ttest_ind(rvs1, rvs2, alternative="less")
+    assert_allclose(p, 1 - (pr/2))
+    assert_allclose(t, tr)
+
+    t, p = stats.ttest_ind(rvs1, rvs2, alternative="greater")
+    assert_allclose(p, pr/2)
+    assert_allclose(t, tr)
+
+    # Below makes sure ttest_ind_from_stats p-val functions identically to
+    # ttest_ind
+    t, p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="less")
+    args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
+    assert_allclose(
+        stats.ttest_ind_from_stats(*args, alternative="less"), [t, p])
+
+    t, p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, alternative="greater")
+    args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
+    assert_allclose(
+        stats.ttest_ind_from_stats(*args, alternative="greater"), [t, p])
+
+    # check nan policy
+    rng = np.random.RandomState(12345678)
+    x = stats.norm.rvs(loc=5, scale=10, size=501, random_state=rng)
+    x[500] = np.nan
+    y = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
+
+    with np.errstate(invalid="ignore"):
+        assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan))
+
+    assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'),
+                              (0.24779670949091914, 0.80434267337517906))
+    assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise')
+    assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar')
+
+    # test zero division problem
+    with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+        t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1])
+    assert_equal((np.abs(t), p), (np.inf, 0))
+
+    with np.errstate(invalid="ignore"):
+        assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
+
+        # check that nan in input array result in nan output
+        anan = np.array([[1, np.nan], [-1, 1]])
+        assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))),
+                     ([0, np.nan], [1, np.nan]))
+
+    rvs1_3D[:, :, 10:15] = np.nan
+    rvs2_3D[:, :, 6:12] = np.nan
+
+    # Convert from two-sided p-values to one sided using T result data.
+    def convert(t, p, alt):
+        if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
+            return p / 2
+        return 1 - (p / 2)
+    converter = np.vectorize(convert)
+
+    tr, pr = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit')
+
+    t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit',
+                        alternative='less')
+    assert_allclose(t, tr, rtol=1e-14)
+    assert_allclose(p, converter(tr, pr, 'less'), rtol=1e-14)
+
+    t, p = stats.ttest_ind(rvs1_3D, rvs2_3D, 0, nan_policy='omit',
+                        alternative='greater')
+    assert_allclose(t, tr, rtol=1e-14)
+    assert_allclose(p, converter(tr, pr, 'greater'), rtol=1e-14)
+
+
+class Test_ttest_ind_permutations():
+    N = 20
+
+    # data for most tests
+    np.random.seed(0)
+    a = np.vstack((np.arange(3*N//4), np.random.random(3*N//4)))
+    b = np.vstack((np.arange(N//4) + 100, np.random.random(N//4)))
+
+    # data for equal variance tests
+    a2 = np.arange(10)
+    b2 = np.arange(10) + 100
+
+    # data for exact test
+    a3 = [1, 2]
+    b3 = [3, 4]
+
+    # data for bigger test
+    np.random.seed(0)
+    rvs1 = stats.norm.rvs(loc=5, scale=10,  # type: ignore
+                          size=500).reshape(100, 5).T
+    rvs2 = stats.norm.rvs(loc=8, scale=20, size=100)  # type: ignore
+
+    p_d = [1/1001, (676+1)/1001]  # desired pvalues
+    p_d_gen = [1/1001, (672 + 1)/1001]  # desired pvalues for Generator seed
+    p_d_big = [(993+1)/1001, (685+1)/1001, (840+1)/1001,
+               (955+1)/1001, (255+1)/1001]
+
+    params = [
+        (a, b, {"axis": 1}, p_d),                     # basic test
+        (a.T, b.T, {'axis': 0}, p_d),                 # along axis 0
+        (a[0, :], b[0, :], {'axis': None}, p_d[0]),   # 1d data
+        (a[0, :].tolist(), b[0, :].tolist(), {'axis': None}, p_d[0]),
+        # different seeds
+        (a, b, {'random_state': 0, "axis": 1}, p_d),
+        (a, b, {'random_state': np.random.RandomState(0), "axis": 1}, p_d),
+        (a2, b2, {'equal_var': True}, 1/1001),  # equal variances
+        (rvs1, rvs2, {'axis': -1, 'random_state': 0}, p_d_big),  # bigger test
+        (a3, b3, {}, 1/3),  # exact test
+        (a, b, {'random_state': np.random.default_rng(0), "axis": 1}, p_d_gen),
+        ]
+
+    @pytest.mark.parametrize("a,b,update,p_d", params)
+    def test_ttest_ind_permutations(self, a, b, update, p_d):
+        options_a = {'axis': None, 'equal_var': False}
+        options_p = {'axis': None, 'equal_var': False,
+                     'permutations': 1000, 'random_state': 0}
+        options_a.update(update)
+        options_p.update(update)
+
+        stat_a, _ = stats.ttest_ind(a, b, **options_a)
+        stat_p, pvalue = stats.ttest_ind(a, b, **options_p)
+        assert_array_almost_equal(stat_a, stat_p, 5)
+        assert_array_almost_equal(pvalue, p_d)
+
+    def test_ttest_ind_exact_alternative(self):
+        np.random.seed(0)
+        N = 3
+        a = np.random.rand(2, N, 2)
+        b = np.random.rand(2, N, 2)
+
+        options_p = {'axis': 1, 'permutations': 1000}
+
+        options_p.update(alternative="greater")
+        res_g_ab = stats.ttest_ind(a, b, **options_p)
+        res_g_ba = stats.ttest_ind(b, a, **options_p)
+
+        options_p.update(alternative="less")
+        res_l_ab = stats.ttest_ind(a, b, **options_p)
+        res_l_ba = stats.ttest_ind(b, a, **options_p)
+
+        options_p.update(alternative="two-sided")
+        res_2_ab = stats.ttest_ind(a, b, **options_p)
+        res_2_ba = stats.ttest_ind(b, a, **options_p)
+
+        # Alternative doesn't affect the statistic
+        assert_equal(res_g_ab.statistic, res_l_ab.statistic)
+        assert_equal(res_g_ab.statistic, res_2_ab.statistic)
+
+        # Reversing order of inputs negates statistic
+        assert_equal(res_g_ab.statistic, -res_g_ba.statistic)
+        assert_equal(res_l_ab.statistic, -res_l_ba.statistic)
+        assert_equal(res_2_ab.statistic, -res_2_ba.statistic)
+
+        # Reversing order of inputs does not affect p-value of 2-sided test
+        assert_equal(res_2_ab.pvalue, res_2_ba.pvalue)
+
+        # In exact test, distribution is perfectly symmetric, so these
+        # identities are exactly satisfied.
+        assert_equal(res_g_ab.pvalue, res_l_ba.pvalue)
+        assert_equal(res_l_ab.pvalue, res_g_ba.pvalue)
+        mask = res_g_ab.pvalue <= 0.5
+        assert_equal(res_g_ab.pvalue[mask] + res_l_ba.pvalue[mask],
+                     res_2_ab.pvalue[mask])
+        assert_equal(res_l_ab.pvalue[~mask] + res_g_ba.pvalue[~mask],
+                     res_2_ab.pvalue[~mask])
+
+    def test_ttest_ind_exact_selection(self):
+        # test the various ways of activating the exact test
+        np.random.seed(0)
+        N = 3
+        a = np.random.rand(N)
+        b = np.random.rand(N)
+        res0 = stats.ttest_ind(a, b)
+        res1 = stats.ttest_ind(a, b, permutations=1000)
+        res2 = stats.ttest_ind(a, b, permutations=0)
+        res3 = stats.ttest_ind(a, b, permutations=np.inf)
+        assert res1.pvalue != res0.pvalue
+        assert res2.pvalue == res0.pvalue
+        assert res3.pvalue == res1.pvalue
+
+    def test_ttest_ind_exact_distribution(self):
+        # the exact distribution of the test statistic should have
+        # binom(na + nb, na) elements, all unique. This was not always true
+        # in gh-4824; fixed by gh-13661.
+        np.random.seed(0)
+        a = np.random.rand(3)
+        b = np.random.rand(4)
+
+        data = np.concatenate((a, b))
+        na, nb = len(a), len(b)
+
+        permutations = 100000
+        t_stat, _, _ = _permutation_distribution_t(data, permutations, na,
+                                                   True)
+
+        n_unique = len(set(t_stat))
+        assert n_unique == binom(na + nb, na)
+        assert len(t_stat) == n_unique
+
+    def test_ttest_ind_randperm_alternative(self):
+        np.random.seed(0)
+        N = 50
+        a = np.random.rand(2, 3, N)
+        b = np.random.rand(3, N)
+        options_p = {'axis': -1, 'permutations': 1000, "random_state": 0}
+
+        options_p.update(alternative="greater")
+        res_g_ab = stats.ttest_ind(a, b, **options_p)
+        res_g_ba = stats.ttest_ind(b, a, **options_p)
+
+        options_p.update(alternative="less")
+        res_l_ab = stats.ttest_ind(a, b, **options_p)
+        res_l_ba = stats.ttest_ind(b, a, **options_p)
+
+        # Alternative doesn't affect the statistic
+        assert_equal(res_g_ab.statistic, res_l_ab.statistic)
+
+        # Reversing order of inputs negates statistic
+        assert_equal(res_g_ab.statistic, -res_g_ba.statistic)
+        assert_equal(res_l_ab.statistic, -res_l_ba.statistic)
+
+        # For random permutations, the chance of ties between the observed
+        # test statistic and the population is small, so:
+        assert_equal(res_g_ab.pvalue + res_l_ab.pvalue,
+                     1 + 1/(options_p['permutations'] + 1))
+        assert_equal(res_g_ba.pvalue + res_l_ba.pvalue,
+                     1 + 1/(options_p['permutations'] + 1))
+
+    @pytest.mark.slow()
+    def test_ttest_ind_randperm_alternative2(self):
+        np.random.seed(0)
+        N = 50
+        a = np.random.rand(N, 4)
+        b = np.random.rand(N, 4)
+        options_p = {'permutations': 20000, "random_state": 0}
+
+        options_p.update(alternative="greater")
+        res_g_ab = stats.ttest_ind(a, b, **options_p)
+
+        options_p.update(alternative="less")
+        res_l_ab = stats.ttest_ind(a, b, **options_p)
+
+        options_p.update(alternative="two-sided")
+        res_2_ab = stats.ttest_ind(a, b, **options_p)
+
+        # For random permutations, the chance of ties between the observed
+        # test statistic and the population is small, so:
+        assert_equal(res_g_ab.pvalue + res_l_ab.pvalue,
+                     1 + 1/(options_p['permutations'] + 1))
+
+        # For for large sample sizes, the distribution should be approximately
+        # symmetric, so these identities should be approximately satisfied
+        mask = res_g_ab.pvalue <= 0.5
+        assert_allclose(2 * res_g_ab.pvalue[mask],
+                        res_2_ab.pvalue[mask], atol=2e-2)
+        assert_allclose(2 * (1-res_g_ab.pvalue[~mask]),
+                        res_2_ab.pvalue[~mask], atol=2e-2)
+        assert_allclose(2 * res_l_ab.pvalue[~mask],
+                        res_2_ab.pvalue[~mask], atol=2e-2)
+        assert_allclose(2 * (1-res_l_ab.pvalue[mask]),
+                        res_2_ab.pvalue[mask], atol=2e-2)
+
+    def test_ttest_ind_permutation_nanpolicy(self):
+        np.random.seed(0)
+        N = 50
+        a = np.random.rand(N, 5)
+        b = np.random.rand(N, 5)
+        a[5, 1] = np.nan
+        b[8, 2] = np.nan
+        a[9, 3] = np.nan
+        b[9, 3] = np.nan
+        options_p = {'permutations': 1000, "random_state": 0}
+
+        # Raise
+        options_p.update(nan_policy="raise")
+        with assert_raises(ValueError, match="The input contains nan values"):
+            res = stats.ttest_ind(a, b, **options_p)
+
+        # Propagate
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning, "invalid value*")
+            options_p.update(nan_policy="propagate")
+            res = stats.ttest_ind(a, b, **options_p)
+
+            mask = np.isnan(a).any(axis=0) | np.isnan(b).any(axis=0)
+            res2 = stats.ttest_ind(a[:, ~mask], b[:, ~mask], **options_p)
+
+            assert_equal(res.pvalue[mask], np.nan)
+            assert_equal(res.statistic[mask], np.nan)
+
+            assert_allclose(res.pvalue[~mask], res2.pvalue)
+            assert_allclose(res.statistic[~mask], res2.statistic)
+
+            # Propagate 1d
+            res = stats.ttest_ind(a.ravel(), b.ravel(), **options_p)
+            assert np.isnan(res.pvalue)  # assert makes sure it's a scalar
+            assert np.isnan(res.statistic)
+
+    def test_ttest_ind_permutation_check_inputs(self):
+        with assert_raises(ValueError, match="Permutations must be"):
+            stats.ttest_ind(self.a2, self.b2, permutations=-3)
+        with assert_raises(ValueError, match="Permutations must be"):
+            stats.ttest_ind(self.a2, self.b2, permutations=1.5)
+        with assert_raises(ValueError, match="'hello' cannot be used"):
+            stats.ttest_ind(self.a, self.b, permutations=1,
+                            random_state='hello')
+
+    def test_ttest_ind_permutation_check_p_values(self):
+        # p-values should never be exactly zero
+        N = 10
+        a = np.random.rand(N, 20)
+        b = np.random.rand(N, 20)
+        p_values = stats.ttest_ind(a, b, permutations=1).pvalue
+        print(0.0 not in p_values)
+        assert 0.0 not in p_values
+
+
+class Test_ttest_ind_common:
+    # for tests that are performed on variations of the t-test such as
+    # permutations and trimming
+    @pytest.mark.slow()
+    @pytest.mark.parametrize("kwds", [{'permutations': 200, 'random_state': 0},
+                                      {'trim': .2}, {}],
+                             ids=["permutations", "trim", "basic"])
+    @pytest.mark.parametrize('equal_var', [True, False],
+                             ids=['equal_var', 'unequal_var'])
+    def test_ttest_many_dims(self, kwds, equal_var):
+        # Test that test works on many-dimensional arrays
+        np.random.seed(0)
+        a = np.random.rand(5, 4, 4, 7, 1, 6)
+        b = np.random.rand(4, 1, 8, 2, 6)
+        res = stats.ttest_ind(a, b, axis=-3, **kwds)
+
+        # compare fully-vectorized t-test against t-test on smaller slice
+        i, j, k = 2, 3, 1
+        a2 = a[i, :, j, :, 0, :]
+        b2 = b[:, 0, :, k, :]
+        res2 = stats.ttest_ind(a2, b2, axis=-2, **kwds)
+        assert_equal(res.statistic[i, :, j, k, :],
+                     res2.statistic)
+        assert_equal(res.pvalue[i, :, j, k, :],
+                     res2.pvalue)
+
+        # compare against t-test on one axis-slice at a time
+
+        # manually broadcast with tile; move axis to end to simplify
+        x = np.moveaxis(np.tile(a, (1, 1, 1, 1, 2, 1)), -3, -1)
+        y = np.moveaxis(np.tile(b, (5, 1, 4, 1, 1, 1)), -3, -1)
+        shape = x.shape[:-1]
+        statistics = np.zeros(shape)
+        pvalues = np.zeros(shape)
+        for indices in product(*(range(i) for i in shape)):
+            xi = x[indices]  # use tuple to index single axis slice
+            yi = y[indices]
+            res3 = stats.ttest_ind(xi, yi, axis=-1, **kwds)
+            statistics[indices] = res3.statistic
+            pvalues[indices] = res3.pvalue
+
+        assert_allclose(statistics, res.statistic)
+        assert_allclose(pvalues, res.pvalue)
+
+    @pytest.mark.parametrize("kwds", [{'permutations': 200, 'random_state': 0},
+                                      {'trim': .2}, {}],
+                             ids=["trim", "permutations", "basic"])
+    @pytest.mark.parametrize("axis", [-1, 0])
+    def test_nans_on_axis(self, kwds, axis):
+        # confirm that with `nan_policy='propagate'`, NaN results are returned
+        # on the correct location
+        a = np.random.randint(10, size=(5, 3, 10)).astype('float')
+        b = np.random.randint(10, size=(5, 3, 10)).astype('float')
+        # set some indices in `a` and `b` to be `np.nan`.
+        a[0][2][3] = np.nan
+        b[2][0][6] = np.nan
+
+        # arbitrarily use `np.sum` as a baseline for which indices should be
+        # NaNs
+        expected = np.isnan(np.sum(a + b, axis=axis))
+        # multidimensional inputs to `t.sf(np.abs(t), df)` with NaNs on some
+        # indices throws an warning. See issue gh-13844
+        with suppress_warnings() as sup, np.errstate(invalid="ignore"):
+            sup.filter(RuntimeWarning,
+                       "invalid value encountered in less_equal")
+            sup.filter(RuntimeWarning, "Precision loss occurred")
+            res = stats.ttest_ind(a, b, axis=axis, **kwds)
+        p_nans = np.isnan(res.pvalue)
+        assert_array_equal(p_nans, expected)
+        statistic_nans = np.isnan(res.statistic)
+        assert_array_equal(statistic_nans, expected)
+
+
+class Test_ttest_trim:
+    params = [
+        [[1, 2, 3], [1.1, 2.9, 4.2], 0.53619490753126731, -0.6864951273557258,
+         .2],
+        [[56, 128.6, 12, 123.8, 64.34, 78, 763.3], [1.1, 2.9, 4.2],
+         0.00998909252078421, 4.591598691181999, .2],
+        [[56, 128.6, 12, 123.8, 64.34, 78, 763.3], [1.1, 2.9, 4.2],
+         0.10512380092302633, 2.832256715395378, .32],
+        [[2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9],
+         [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1],
+         0.002878909511344, -4.2461168970325, .2],
+        [[-0.84504783, 0.13366078, 3.53601757, -0.62908581, 0.54119466,
+          -1.16511574, -0.08836614, 1.18495416, 2.48028757, -1.58925028,
+          -1.6706357, 0.3090472, -2.12258305, 0.3697304, -1.0415207,
+          -0.57783497, -0.90997008, 1.09850192, 0.41270579, -1.4927376],
+         [1.2725522, 1.1657899, 2.7509041, 1.2389013, -0.9490494, -1.0752459,
+          1.1038576, 2.9912821, 3.5349111, 0.4171922, 1.0168959, -0.7625041,
+          -0.4300008, 3.0431921, 1.6035947, 0.5285634, -0.7649405, 1.5575896,
+          1.3670797, 1.1726023], 0.005293305834235, -3.0983317739483, .2]]
+
+    @pytest.mark.parametrize("a,b,pr,tr,trim", params)
+    def test_ttest_compare_r(self, a, b, pr, tr, trim):
+        '''
+        Using PairedData's yuen.t.test method. Something to note is that there
+        are at least 3 R packages that come with a trimmed t-test method, and
+        comparisons were made between them. It was found that PairedData's
+        method's results match this method, SAS, and one of the other R
+        methods. A notable discrepancy was the DescTools implementation of the
+        function, which only sometimes agreed with SAS, WRS2, PairedData and
+        this implementation. For this reason, most comparisons in R are made
+        against PairedData's method.
+
+        Rather than providing the input and output for all evaluations, here is
+        a representative example:
+        > library(PairedData)
+        > a <- c(1, 2, 3)
+        > b <- c(1.1, 2.9, 4.2)
+        > options(digits=16)
+        > yuen.t.test(a, b, tr=.2)
+
+            Two-sample Yuen test, trim=0.2
+
+        data:  x and y
+        t = -0.68649512735573, df = 3.4104431643464, p-value = 0.5361949075313
+        alternative hypothesis: true difference in trimmed means is not equal
+        to 0
+        95 percent confidence interval:
+         -3.912777195645217  2.446110528978550
+        sample estimates:
+        trimmed mean of x trimmed mean of y
+        2.000000000000000 2.73333333333333
+        '''
+        statistic, pvalue = stats.ttest_ind(a, b, trim=trim, equal_var=False)
+        assert_allclose(statistic, tr, atol=1e-15)
+        assert_allclose(pvalue, pr, atol=1e-15)
+
+    def test_compare_SAS(self):
+        # Source of the data used in this test:
+        # https://support.sas.com/resources/papers/proceedings14/1660-2014.pdf
+        a = [12, 14, 18, 25, 32, 44, 12, 14, 18, 25, 32, 44]
+        b = [17, 22, 14, 12, 30, 29, 19, 17, 22, 14, 12, 30, 29, 19]
+        # In this paper, a trimming percentage of 5% is used. However,
+        # in their implementation, the number of values trimmed is rounded to
+        # the nearest whole number. However, consistent with
+        # `scipy.stats.trimmed_mean`, this test truncates to the lower
+        # whole number. In this example, the paper notes that 1 value is
+        # trimmed off of each side. 9% replicates this amount of trimming.
+        statistic, pvalue = stats.ttest_ind(a, b, trim=.09, equal_var=False)
+        assert_allclose(pvalue, 0.514522, atol=1e-6)
+        assert_allclose(statistic, 0.669169, atol=1e-6)
+
+    def test_equal_var(self):
+        '''
+        The PairedData library only supports unequal variances. To compare
+        samples with equal variances, the multicon library is used.
+        > library(multicon)
+        > a <- c(2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9)
+        > b <- c(6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1)
+        > dv = c(a,b)
+        > iv = c(rep('a', length(a)), rep('b', length(b)))
+        > yuenContrast(dv~ iv, EQVAR = TRUE)
+        $Ms
+           N                 M wgt
+        a 11 2.442857142857143   1
+        b 11 5.385714285714286  -1
+
+        $test
+                              stat df              crit                   p
+        results -4.246116897032513 12 2.178812829667228 0.00113508833897713
+        '''
+        a = [2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9]
+        b = [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1]
+        # `equal_var=True` is default
+        statistic, pvalue = stats.ttest_ind(a, b, trim=.2)
+        assert_allclose(pvalue, 0.00113508833897713, atol=1e-10)
+        assert_allclose(statistic, -4.246116897032513, atol=1e-10)
+
+    @pytest.mark.parametrize('alt,pr,tr',
+                             (('greater', 0.9985605452443, -4.2461168970325),
+                              ('less', 0.001439454755672, -4.2461168970325),),
+                             )
+    def test_alternatives(self, alt, pr, tr):
+        '''
+        > library(PairedData)
+        > a <- c(2.7,2.7,1.1,3.0,1.9,3.0,3.8,3.8,0.3,1.9,1.9)
+        > b <- c(6.5,5.4,8.1,3.5,0.5,3.8,6.8,4.9,9.5,6.2,4.1)
+        > options(digits=16)
+        > yuen.t.test(a, b, alternative = 'greater')
+        '''
+        a = [2.7, 2.7, 1.1, 3.0, 1.9, 3.0, 3.8, 3.8, 0.3, 1.9, 1.9]
+        b = [6.5, 5.4, 8.1, 3.5, 0.5, 3.8, 6.8, 4.9, 9.5, 6.2, 4.1]
+
+        statistic, pvalue = stats.ttest_ind(a, b, trim=.2, equal_var=False,
+                                            alternative=alt)
+        assert_allclose(pvalue, pr, atol=1e-10)
+        assert_allclose(statistic, tr, atol=1e-10)
+
+    def test_errors_unsupported(self):
+        # confirm that attempting to trim with NaNs or permutations raises an
+        # error
+        match = "Permutations are currently not supported with trimming."
+        with assert_raises(ValueError, match=match):
+            stats.ttest_ind([1, 2], [2, 3], trim=.2, permutations=2)
+        match = ("not supported by permutation tests or trimmed tests.")
+        with assert_raises(ValueError, match=match):
+            stats.ttest_ind([1, 2], [2, np.nan, 3], trim=.2, nan_policy='omit')
+
+    @pytest.mark.parametrize("trim", [-.2, .5, 1])
+    def test_trim_bounds_error(self, trim):
+        match = "Trimming percentage should be 0 <= `trim` < .5."
+        with assert_raises(ValueError, match=match):
+            stats.ttest_ind([1, 2], [2, 1], trim=trim)
+
+
+def test__broadcast_concatenate():
+    # test that _broadcast_concatenate properly broadcasts arrays along all
+    # axes except `axis`, then concatenates along axis
+    np.random.seed(0)
+    a = np.random.rand(5, 4, 4, 3, 1, 6)
+    b = np.random.rand(4, 1, 8, 2, 6)
+    c = _broadcast_concatenate((a, b), axis=-3)
+    # broadcast manually as an independent check
+    a = np.tile(a, (1, 1, 1, 1, 2, 1))
+    b = np.tile(b[None, ...], (5, 1, 4, 1, 1, 1))
+    for index in product(*(range(i) for i in c.shape)):
+        i, j, k, l, m, n = index
+        if l < a.shape[-3]:
+            assert a[i, j, k, l, m, n] == c[i, j, k, l, m, n]
+        else:
+            assert b[i, j, k, l - a.shape[-3], m, n] == c[i, j, k, l, m, n]
+
+
+def test_ttest_ind_with_uneq_var():
+    # check vs. R
+    a = (1, 2, 3)
+    b = (1.1, 2.9, 4.2)
+    pr = 0.53619490753126731
+    tr = -0.68649512735572582
+    t, p = stats.ttest_ind(a, b, equal_var=False)
+    assert_array_almost_equal([t,p], [tr, pr])
+    # test from desc stats API
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
+                                                         equal_var=False),
+                              [t, p])
+
+    a = (1, 2, 3, 4)
+    pr = 0.84354139131608286
+    tr = -0.2108663315950719
+    t, p = stats.ttest_ind(a, b, equal_var=False)
+    assert_array_almost_equal([t,p], [tr, pr])
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
+                                                         equal_var=False),
+                              [t, p])
+
+    # regression test
+    tr = 1.0912746897927283
+    tr_uneq_n = 0.66745638708050492
+    pr = 0.27647831993021388
+    pr_uneq_n = 0.50873585065616544
+    tpr = ([tr,-tr],[pr,pr])
+
+    rvs3 = np.linspace(1,100, 25)
+    rvs2 = np.linspace(1,100,100)
+    rvs1 = np.linspace(5,105,100)
+    rvs1_2D = np.array([rvs1, rvs2])
+
+    rvs2_2D = np.array([rvs2, rvs1])
+
+    t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
+    assert_array_almost_equal([t,p],(tr,pr))
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
+                                                                      rvs2),
+                                                         equal_var=False),
+                              (t, p))
+
+    t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False)
+    assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n))
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
+                                                                      rvs3),
+                                                         equal_var=False),
+                              (t, p))
+
+    t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False)
+    assert_array_almost_equal([t,p],tpr)
+    args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
+                                                         equal_var=False),
+                              (t, p))
+
+    t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False)
+    assert_array_almost_equal([t,p],tpr)
+    args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
+    assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
+                                                         equal_var=False),
+                              (t, p))
+
+    # test for namedtuple attribute results
+    attributes = ('statistic', 'pvalue')
+    res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
+    check_named_results(res, attributes)
+
+    # test on 3 dimensions
+    rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
+    rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
+    t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False)
+    assert_almost_equal(np.abs(t), np.abs(tr))
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (2, 3))
+    args = _desc_stats(rvs1_3D, rvs2_3D, axis=1)
+    t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
+    assert_almost_equal(np.abs(t), np.abs(tr))
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (2, 3))
+
+    t, p = stats.ttest_ind(np.moveaxis(rvs1_3D, 2, 0),
+                           np.moveaxis(rvs2_3D, 2, 0),
+                           axis=2, equal_var=False)
+    assert_array_almost_equal(np.abs(t), np.abs(tr))
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (3, 2))
+    args = _desc_stats(np.moveaxis(rvs1_3D, 2, 0),
+                       np.moveaxis(rvs2_3D, 2, 0), axis=2)
+    t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
+    assert_array_almost_equal(np.abs(t), np.abs(tr))
+    assert_array_almost_equal(np.abs(p), pr)
+    assert_equal(t.shape, (3, 2))
+
+    # test zero division problem
+    with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+        t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
+    assert_equal((np.abs(t), p), (np.inf, 0))
+    with np.errstate(all='ignore'):
+        assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False),
+                     (np.nan, np.nan))
+
+        # check that nan in input array result in nan output
+        anan = np.array([[1, np.nan], [-1, 1]])
+        assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False),
+                     ([0, np.nan], [1, np.nan]))
+
+
+def test_ttest_ind_nan_2nd_arg():
+    # regression test for gh-6134: nans in the second arg were not handled
+    x = [np.nan, 2.0, 3.0, 4.0]
+    y = [1.0, 2.0, 1.0, 2.0]
+
+    r1 = stats.ttest_ind(x, y, nan_policy='omit')
+    r2 = stats.ttest_ind(y, x, nan_policy='omit')
+    assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
+    assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
+
+    # NB: arguments are not paired when NaNs are dropped
+    r3 = stats.ttest_ind(y, x[1:])
+    assert_allclose(r2, r3, atol=1e-15)
+
+    # .. and this is consistent with R. R code:
+    # x = c(NA, 2.0, 3.0, 4.0)
+    # y = c(1.0, 2.0, 1.0, 2.0)
+    # t.test(x, y, var.equal=TRUE)
+    assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901),
+                    atol=1e-15)
+
+
+def test_ttest_ind_empty_1d_returns_nan():
+    # Two empty inputs should return a Ttest_indResult containing nan
+    # for both values.
+    result = stats.ttest_ind([], [])
+    assert isinstance(result, stats._stats_py.Ttest_indResult)
+    assert_equal(result, (np.nan, np.nan))
+
+
+@pytest.mark.parametrize('b, expected_shape',
+                         [(np.empty((1, 5, 0)), (3, 5)),
+                          (np.empty((1, 0, 0)), (3, 0))])
+def test_ttest_ind_axis_size_zero(b, expected_shape):
+    # In this test, the length of the axis dimension is zero.
+    # The results should be arrays containing nan with shape
+    # given by the broadcast nonaxis dimensions.
+    a = np.empty((3, 1, 0))
+    result = stats.ttest_ind(a, b, axis=-1)
+    assert isinstance(result, stats._stats_py.Ttest_indResult)
+    expected_value = np.full(expected_shape, fill_value=np.nan)
+    assert_equal(result.statistic, expected_value)
+    assert_equal(result.pvalue, expected_value)
+
+
+def test_ttest_ind_nonaxis_size_zero():
+    # In this test, the length of the axis dimension is nonzero,
+    # but one of the nonaxis dimensions has length 0.  Check that
+    # we still get the correctly broadcast shape, which is (5, 0)
+    # in this case.
+    a = np.empty((1, 8, 0))
+    b = np.empty((5, 8, 1))
+    result = stats.ttest_ind(a, b, axis=1)
+    assert isinstance(result, stats._stats_py.Ttest_indResult)
+    assert_equal(result.statistic.shape, (5, 0))
+    assert_equal(result.pvalue.shape, (5, 0))
+
+
+def test_ttest_ind_nonaxis_size_zero_different_lengths():
+    # In this test, the length of the axis dimension is nonzero,
+    # and that size is different in the two inputs,
+    # and one of the nonaxis dimensions has length 0.  Check that
+    # we still get the correctly broadcast shape, which is (5, 0)
+    # in this case.
+    a = np.empty((1, 7, 0))
+    b = np.empty((5, 8, 1))
+    result = stats.ttest_ind(a, b, axis=1)
+    assert isinstance(result, stats._stats_py.Ttest_indResult)
+    assert_equal(result.statistic.shape, (5, 0))
+    assert_equal(result.pvalue.shape, (5, 0))
+
+
+def test_gh5686():
+    mean1, mean2 = np.array([1, 2]), np.array([3, 4])
+    std1, std2 = np.array([5, 3]), np.array([4, 5])
+    nobs1, nobs2 = np.array([130, 140]), np.array([100, 150])
+    # This will raise a TypeError unless gh-5686 is fixed.
+    stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
+
+
+def test_ttest_ind_from_stats_inputs_zero():
+    # Regression test for gh-6409.
+    result = stats.ttest_ind_from_stats(0, 0, 6, 0, 0, 6, equal_var=False)
+    assert_equal(result, [np.nan, np.nan])
+
+
+def test_ttest_1samp_new():
+    n1, n2, n3 = (10,15,20)
+    rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))
+
+    # check multidimensional array and correct axis handling
+    # deterministic rvn1 and rvn2 would be better as in test_ttest_rel
+    t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)
+    t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)
+    t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)
+    assert_array_almost_equal(t1,t2, decimal=14)
+    assert_almost_equal(t1[0,0],t3, decimal=14)
+    assert_equal(t1.shape, (n2,n3))
+
+    t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1, 1, n3)),axis=1)  # noqa
+    t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)
+    t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)
+    assert_array_almost_equal(t1,t2, decimal=14)
+    assert_almost_equal(t1[0,0],t3, decimal=14)
+    assert_equal(t1.shape, (n1,n3))
+
+    t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2,1)),axis=2)  # noqa
+    t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)
+    t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)
+    assert_array_almost_equal(t1,t2, decimal=14)
+    assert_almost_equal(t1[0,0],t3, decimal=14)
+    assert_equal(t1.shape, (n1,n2))
+
+    # test zero division problem
+    t, p = stats.ttest_1samp([0, 0, 0], 1)
+    assert_equal((np.abs(t), p), (np.inf, 0))
+
+    # test alternative parameter
+    # Convert from two-sided p-values to one sided using T result data.
+    def convert(t, p, alt):
+        if (t < 0 and alt == "less") or (t > 0 and alt == "greater"):
+            return p / 2
+        return 1 - (p / 2)
+    converter = np.vectorize(convert)
+    tr, pr = stats.ttest_1samp(rvn1[:, :, :], 1)
+
+    t, p = stats.ttest_1samp(rvn1[:, :, :], 1, alternative="greater")
+    pc = converter(tr, pr, "greater")
+    assert_allclose(p, pc)
+    assert_allclose(t, tr)
+
+    t, p = stats.ttest_1samp(rvn1[:, :, :], 1, alternative="less")
+    pc = converter(tr, pr, "less")
+    assert_allclose(p, pc)
+    assert_allclose(t, tr)
+
+    with np.errstate(all='ignore'):
+        assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan))
+
+        # check that nan in input array result in nan output
+        anan = np.array([[1, np.nan],[-1, 1]])
+        assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan]))
+
+    rvn1[0:2, 1:3, 4:8] = np.nan
+
+    tr, pr = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit')
+
+    t, p = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit',
+                             alternative="greater")
+    pc = converter(tr, pr, "greater")
+    assert_allclose(p, pc)
+    assert_allclose(t, tr)
+
+    t, p = stats.ttest_1samp(rvn1[:, :, :], 1, nan_policy='omit',
+                             alternative="less")
+    pc = converter(tr, pr, "less")
+    assert_allclose(p, pc)
+    assert_allclose(t, tr)
+
+
+def test_ttest_1samp_popmean_array():
+    # when popmean.shape[axis] != 1, raise an error
+    # if the user wants to test multiple null hypotheses simultaneously,
+    # use standard broadcasting rules
+    rng = np.random.default_rng(2913300596553337193)
+    x = rng.random(size=(1, 15, 20))
+
+    message = r"`popmean.shape\[axis\]` must equal 1."
+    popmean = rng.random(size=(5, 2, 20))
+    with pytest.raises(ValueError, match=message):
+        stats.ttest_1samp(x, popmean=popmean, axis=-2)
+
+    popmean = rng.random(size=(5, 1, 20))
+    res = stats.ttest_1samp(x, popmean=popmean, axis=-2)
+    assert res.statistic.shape == (5, 20)
+
+    ci = np.expand_dims(res.confidence_interval(), axis=-2)
+    res = stats.ttest_1samp(x, popmean=ci, axis=-2)
+    assert_allclose(res.pvalue, 0.05)
+
+
+class TestDescribe:
+    def test_describe_scalar(self):
+        with suppress_warnings() as sup, np.errstate(invalid="ignore"), \
+             pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+            sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
+            n, mm, m, v, sk, kurt = stats.describe(4.)
+        assert_equal(n, 1)
+        assert_equal(mm, (4.0, 4.0))
+        assert_equal(m, 4.0)
+        assert np.isnan(v)
+        assert np.isnan(sk)
+        assert np.isnan(kurt)
+
+    def test_describe_numbers(self):
+        x = np.vstack((np.ones((3,4)), np.full((2, 4), 2)))
+        nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
+        mc = np.array([1.4, 1.4, 1.4, 1.4])
+        vc = np.array([0.3, 0.3, 0.3, 0.3])
+        skc = [0.40824829046386357] * 4
+        kurtc = [-1.833333333333333] * 4
+        n, mm, m, v, sk, kurt = stats.describe(x)
+        assert_equal(n, nc)
+        assert_equal(mm, mmc)
+        assert_equal(m, mc)
+        assert_equal(v, vc)
+        assert_array_almost_equal(sk, skc, decimal=13)
+        assert_array_almost_equal(kurt, kurtc, decimal=13)
+        n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
+        assert_equal(n, nc)
+        assert_equal(mm, mmc)
+        assert_equal(m, mc)
+        assert_equal(v, vc)
+        assert_array_almost_equal(sk, skc, decimal=13)
+        assert_array_almost_equal(kurt, kurtc, decimal=13)
+
+        x = np.arange(10.)
+        x[9] = np.nan
+
+        nc, mmc = (9, (0.0, 8.0))
+        mc = 4.0
+        vc = 7.5
+        skc = 0.0
+        kurtc = -1.2300000000000002
+        n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit')
+        assert_equal(n, nc)
+        assert_equal(mm, mmc)
+        assert_equal(m, mc)
+        assert_equal(v, vc)
+        assert_array_almost_equal(sk, skc)
+        assert_array_almost_equal(kurt, kurtc, decimal=13)
+
+        assert_raises(ValueError, stats.describe, x, nan_policy='raise')
+        assert_raises(ValueError, stats.describe, x, nan_policy='foobar')
+
+    def test_describe_result_attributes(self):
+        actual = stats.describe(np.arange(5))
+        attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
+                      'kurtosis')
+        check_named_results(actual, attributes)
+
+    def test_describe_ddof(self):
+        x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
+        nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
+        mc = np.array([1.4, 1.4, 1.4, 1.4])
+        vc = np.array([0.24, 0.24, 0.24, 0.24])
+        skc = [0.40824829046386357] * 4
+        kurtc = [-1.833333333333333] * 4
+        n, mm, m, v, sk, kurt = stats.describe(x, ddof=0)
+        assert_equal(n, nc)
+        assert_allclose(mm, mmc, rtol=1e-15)
+        assert_allclose(m, mc, rtol=1e-15)
+        assert_allclose(v, vc, rtol=1e-15)
+        assert_array_almost_equal(sk, skc, decimal=13)
+        assert_array_almost_equal(kurt, kurtc, decimal=13)
+
+    def test_describe_axis_none(self):
+        x = np.vstack((np.ones((3, 4)), np.full((2, 4), 2)))
+
+        # expected values
+        e_nobs, e_minmax = (20, (1.0, 2.0))
+        e_mean = 1.3999999999999999
+        e_var = 0.25263157894736848
+        e_skew = 0.4082482904638634
+        e_kurt = -1.8333333333333333
+
+        # actual values
+        a = stats.describe(x, axis=None)
+
+        assert_equal(a.nobs, e_nobs)
+        assert_almost_equal(a.minmax, e_minmax)
+        assert_almost_equal(a.mean, e_mean)
+        assert_almost_equal(a.variance, e_var)
+        assert_array_almost_equal(a.skewness, e_skew, decimal=13)
+        assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13)
+
+    def test_describe_empty(self):
+        assert_raises(ValueError, stats.describe, [])
+
+
+def test_normalitytests():
+    with pytest.warns(RuntimeWarning, match="Precision loss occurred"):
+        assert_raises(ValueError, stats.skewtest, 4.)
+        assert_raises(ValueError, stats.kurtosistest, 4.)
+        assert_raises(ValueError, stats.normaltest, 4.)
+
+    # numbers verified with R: dagoTest in package fBasics
+    st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)
+    pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)
+    pv_skew_less, pv_kurt_less = 1 - pv_skew / 2, pv_kurt / 2
+    pv_skew_greater, pv_kurt_greater = pv_skew / 2, 1 - pv_kurt / 2
+    x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
+    attributes = ('statistic', 'pvalue')
+
+    assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal))
+    check_named_results(stats.normaltest(x), attributes)
+    assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew))
+    assert_array_almost_equal(stats.skewtest(x, alternative='less'),
+                              (st_skew, pv_skew_less))
+    assert_array_almost_equal(stats.skewtest(x, alternative='greater'),
+                              (st_skew, pv_skew_greater))
+    check_named_results(stats.skewtest(x), attributes)
+    assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt))
+    assert_array_almost_equal(stats.kurtosistest(x, alternative='less'),
+                              (st_kurt, pv_kurt_less))
+    assert_array_almost_equal(stats.kurtosistest(x, alternative='greater'),
+                              (st_kurt, pv_kurt_greater))
+    check_named_results(stats.kurtosistest(x), attributes)
+
+    # some more intuitive tests for kurtosistest and skewtest.
+    # see gh-13549.
+    # skew parameter is 1 > 0
+    a1 = stats.skewnorm.rvs(a=1, size=10000, random_state=123)
+    pval = stats.skewtest(a1, alternative='greater').pvalue
+    assert_almost_equal(pval, 0.0, decimal=5)
+    # excess kurtosis of laplace is 3 > 0
+    a2 = stats.laplace.rvs(size=10000, random_state=123)
+    pval = stats.kurtosistest(a2, alternative='greater').pvalue
+    assert_almost_equal(pval, 0.0)
+
+    # Test axis=None (equal to axis=0 for 1-D input)
+    assert_array_almost_equal(stats.normaltest(x, axis=None),
+           (st_normal, pv_normal))
+    assert_array_almost_equal(stats.skewtest(x, axis=None),
+           (st_skew, pv_skew))
+    assert_array_almost_equal(stats.kurtosistest(x, axis=None),
+           (st_kurt, pv_kurt))
+
+    x = np.arange(10.)
+    x[9] = np.nan
+    with np.errstate(invalid="ignore"):
+        assert_array_equal(stats.skewtest(x), (np.nan, np.nan))
+
+    expected = (1.0184643553962129, 0.30845733195153502)
+    assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected)
+
+    # test alternative with nan_policy='omit'
+    a1[10:100] = np.nan
+    z, p = stats.skewtest(a1, nan_policy='omit')
+    zl, pl = stats.skewtest(a1, nan_policy='omit', alternative='less')
+    zg, pg = stats.skewtest(a1, nan_policy='omit', alternative='greater')
+    assert_allclose(zl, z, atol=1e-15)
+    assert_allclose(zg, z, atol=1e-15)
+    assert_allclose(pl, 1 - p/2, atol=1e-15)
+    assert_allclose(pg, p/2, atol=1e-15)
+
+    with np.errstate(all='ignore'):
+        assert_raises(ValueError, stats.skewtest, x, nan_policy='raise')
+    assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar')
+    assert_raises(ValueError, stats.skewtest, list(range(8)),
+                  alternative='foobar')
+
+    x = np.arange(30.)
+    x[29] = np.nan
+    with np.errstate(all='ignore'):
+        assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan))
+
+    expected = (-2.2683547379505273, 0.023307594135872967)
+    assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'),
+                              expected)
+
+    # test alternative with nan_policy='omit'
+    a2[10:20] = np.nan
+    z, p = stats.kurtosistest(a2[:100], nan_policy='omit')
+    zl, pl = stats.kurtosistest(a2[:100], nan_policy='omit',
+                                alternative='less')
+    zg, pg = stats.kurtosistest(a2[:100], nan_policy='omit',
+                                alternative='greater')
+    assert_allclose(zl, z, atol=1e-15)
+    assert_allclose(zg, z, atol=1e-15)
+    assert_allclose(pl, 1 - p/2, atol=1e-15)
+    assert_allclose(pg, p/2, atol=1e-15)
+
+    assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise')
+    assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar')
+    assert_raises(ValueError, stats.kurtosistest, list(range(20)),
+                  alternative='foobar')
+
+    with np.errstate(all='ignore'):
+        assert_array_equal(stats.normaltest(x), (np.nan, np.nan))
+
+    expected = (6.2260409514287449, 0.04446644248650191)
+    assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected)
+
+    assert_raises(ValueError, stats.normaltest, x, nan_policy='raise')
+    assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar')
+
+    # regression test for issue gh-9033: x cleary non-normal but power of
+    # negtative denom needs to be handled correctly to reject normality
+    counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
+    x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
+    assert_equal(stats.kurtosistest(x)[1] < 0.01, True)
+
+
+class TestRankSums:
+
+    np.random.seed(0)
+    x, y = np.random.rand(2, 10)
+
+    @pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
+    def test_ranksums_result_attributes(self, alternative):
+        # ranksums pval = mannwhitneyu pval w/out continuity or tie correction
+        res1 = stats.ranksums(self.x, self.y,
+                              alternative=alternative).pvalue
+        res2 = stats.mannwhitneyu(self.x, self.y, use_continuity=False,
+                                  alternative=alternative).pvalue
+        assert_allclose(res1, res2)
+
+    def test_ranksums_named_results(self):
+        res = stats.ranksums(self.x, self.y)
+        check_named_results(res, ('statistic', 'pvalue'))
+
+    def test_input_validation(self):
+        with assert_raises(ValueError, match="alternative must be 'less'"):
+            stats.ranksums(self.x, self.y, alternative='foobar')
+
+
+class TestJarqueBera:
+    def test_jarque_bera_stats(self):
+        np.random.seed(987654321)
+        x = np.random.normal(0, 1, 100000)
+        y = np.random.chisquare(10000, 100000)
+        z = np.random.rayleigh(1, 100000)
+
+        assert_equal(stats.jarque_bera(x)[0], stats.jarque_bera(x).statistic)
+        assert_equal(stats.jarque_bera(x)[1], stats.jarque_bera(x).pvalue)
+
+        assert_equal(stats.jarque_bera(y)[0], stats.jarque_bera(y).statistic)
+        assert_equal(stats.jarque_bera(y)[1], stats.jarque_bera(y).pvalue)
+
+        assert_equal(stats.jarque_bera(z)[0], stats.jarque_bera(z).statistic)
+        assert_equal(stats.jarque_bera(z)[1], stats.jarque_bera(z).pvalue)
+
+        assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1])
+        assert_(stats.jarque_bera(x).pvalue > stats.jarque_bera(y).pvalue)
+
+        assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1])
+        assert_(stats.jarque_bera(x).pvalue > stats.jarque_bera(z).pvalue)
+
+        assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1])
+        assert_(stats.jarque_bera(y).pvalue > stats.jarque_bera(z).pvalue)
+
+    def test_jarque_bera_array_like(self):
+        np.random.seed(987654321)
+        x = np.random.normal(0, 1, 100000)
+
+        jb_test1 = JB1, p1 = stats.jarque_bera(list(x))
+        jb_test2 = JB2, p2 = stats.jarque_bera(tuple(x))
+        jb_test3 = JB3, p3 = stats.jarque_bera(x.reshape(2, 50000))
+
+        assert_(JB1 == JB2 == JB3 == jb_test1.statistic == jb_test2.statistic == jb_test3.statistic)
+        assert_(p1 == p2 == p3 == jb_test1.pvalue == jb_test2.pvalue == jb_test3.pvalue)
+
+    def test_jarque_bera_size(self):
+        assert_raises(ValueError, stats.jarque_bera, [])
+
+    def test_axis(self):
+        rng = np.random.default_rng(abs(hash('JarqueBera')))
+        x = rng.random(size=(2, 45))
+
+        assert_equal(stats.jarque_bera(x, axis=None),
+                     stats.jarque_bera(x.ravel()))
+
+        res = stats.jarque_bera(x, axis=1)
+        s0, p0 = stats.jarque_bera(x[0, :])
+        s1, p1 = stats.jarque_bera(x[1, :])
+        assert_allclose(res.statistic, [s0, s1])
+        assert_allclose(res.pvalue, [p0, p1])
+
+        resT = stats.jarque_bera(x.T, axis=0)
+        assert_allclose(res, resT)
+
+
+def test_skewtest_too_few_samples():
+    # Regression test for ticket #1492.
+    # skewtest requires at least 8 samples; 7 should raise a ValueError.
+    x = np.arange(7.0)
+    assert_raises(ValueError, stats.skewtest, x)
+
+
+def test_kurtosistest_too_few_samples():
+    # Regression test for ticket #1425.
+    # kurtosistest requires at least 5 samples; 4 should raise a ValueError.
+    x = np.arange(4.0)
+    assert_raises(ValueError, stats.kurtosistest, x)
+
+
+class TestMannWhitneyU:
+    X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589,
+         20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105,
+         19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953,
+         20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274,
+         20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021,
+         19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892,
+         17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179,
+         20.4970638083542, 19.5567594734914]
+
+    Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575,
+         19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655,
+         19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841,
+         18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636,
+         19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356]
+
+    significant = 14
+
+    def test_mannwhitneyu_one_sided(self):
+        u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less')
+        u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater')
+        u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater')
+        u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less')
+
+        assert_equal(p1, p2)
+        assert_equal(p3, p4)
+        assert_(p1 != p3)
+        assert_equal(u1, 498)
+        assert_equal(u2, 102)
+        assert_equal(u3, 498)
+        assert_equal(u4, 102)
+        assert_approx_equal(p1, 0.999957683256589, significant=self.significant)
+        assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant)
+
+    def test_mannwhitneyu_two_sided(self):
+        u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided')
+        u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided')
+
+        assert_equal(p1, p2)
+        assert_equal(u1, 498)
+        assert_equal(u2, 102)
+        assert_approx_equal(p1, 9.188326533255e-05,
+                            significant=self.significant)
+
+    def test_mannwhitneyu_no_correct_one_sided(self):
+        u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
+                                    alternative='less')
+        u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
+                                    alternative='greater')
+        u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
+                                    alternative='greater')
+        u4, p4 = stats.mannwhitneyu(self.Y, self.X, False,
+                                    alternative='less')
+
+        assert_equal(p1, p2)
+        assert_equal(p3, p4)
+        assert_(p1 != p3)
+        assert_equal(u1, 498)
+        assert_equal(u2, 102)
+        assert_equal(u3, 498)
+        assert_equal(u4, 102)
+        assert_approx_equal(p1, 0.999955905990004, significant=self.significant)
+        assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant)
+
+    def test_mannwhitneyu_no_correct_two_sided(self):
+        u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
+                                    alternative='two-sided')
+        u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
+                                    alternative='two-sided')
+
+        assert_equal(p1, p2)
+        assert_equal(u1, 498)
+        assert_equal(u2, 102)
+        assert_approx_equal(p1, 8.81880199916178e-05,
+                            significant=self.significant)
+
+    def test_mannwhitneyu_ones(self):
+        # test for gh-1428
+        x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1.])
+
+        y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
+                      2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
+                      1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
+                      2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
+                      2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
+                      2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
+                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
+                      1., 1., 1., 1.])
+
+        # checked against R wilcox.test
+        assert_allclose(stats.mannwhitneyu(x, y, alternative='less'),
+                        (16980.5, 2.8214327656317373e-005))
+        # p-value from R, e.g. wilcox.test(x, y, alternative="g")
+        assert_allclose(stats.mannwhitneyu(x, y, alternative='greater'),
+                        (16980.5, 0.9999719954296))
+        assert_allclose(stats.mannwhitneyu(x, y, alternative='two-sided'),
+                        (16980.5, 5.642865531266e-05))
+
+    def test_mannwhitneyu_result_attributes(self):
+        # test for namedtuple attribute results
+        attributes = ('statistic', 'pvalue')
+        res = stats.mannwhitneyu(self.X, self.Y, alternative="less")
+        check_named_results(res, attributes)
+
+
+def test_pointbiserial():
+    # same as mstats test except for the nan
+    # Test data: https://web.archive.org/web/20060504220742/https://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output
+    x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
+         0,0,0,0,1]
+    y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
+         2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
+         0.8,0.7,0.6,0.5,0.2,0.2,0.1]
+    assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)
+
+    # test for namedtuple attribute results
+    attributes = ('correlation', 'pvalue')
+    res = stats.pointbiserialr(x, y)
+    check_named_results(res, attributes)
+    assert_equal(res.correlation, res.statistic)
+
+
+def test_obrientransform():
+    # A couple tests calculated by hand.
+    x1 = np.array([0, 2, 4])
+    t1 = stats.obrientransform(x1)
+    expected = [7, -2, 7]
+    assert_allclose(t1[0], expected)
+
+    x2 = np.array([0, 3, 6, 9])
+    t2 = stats.obrientransform(x2)
+    expected = np.array([30, 0, 0, 30])
+    assert_allclose(t2[0], expected)
+
+    # Test two arguments.
+    a, b = stats.obrientransform(x1, x2)
+    assert_equal(a, t1[0])
+    assert_equal(b, t2[0])
+
+    # Test three arguments.
+    a, b, c = stats.obrientransform(x1, x2, x1)
+    assert_equal(a, t1[0])
+    assert_equal(b, t2[0])
+    assert_equal(c, t1[0])
+
+    # This is a regression test to check np.var replacement.
+    # The author of this test didn't separately verify the numbers.
+    x1 = np.arange(5)
+    result = np.array(
+      [[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],
+       [21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])
+    assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)
+
+    # Example from "O'Brien Test for Homogeneity of Variance"
+    # by Herve Abdi.
+    values = range(5, 11)
+    reps = np.array([5, 11, 9, 3, 2, 2])
+    data = np.repeat(values, reps)
+    transformed_values = np.array([3.1828, 0.5591, 0.0344,
+                                   1.6086, 5.2817, 11.0538])
+    expected = np.repeat(transformed_values, reps)
+    result = stats.obrientransform(data)
+    assert_array_almost_equal(result[0], expected, decimal=4)
+
+
+def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7,
+                      weights=None):
+    # Note this doesn't test when axis is not specified
+    x = stats.gmean(array_like, axis=axis, dtype=dtype, weights=weights)
+    assert_allclose(x, desired, rtol=rtol)
+    assert_equal(x.dtype, dtype)
+
+
+def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7,
+                      weights=None):
+    x = stats.hmean(array_like, axis=axis, dtype=dtype, weights=weights)
+    assert_allclose(x, desired, rtol=rtol)
+    assert_equal(x.dtype, dtype)
+
+
+def check_equal_pmean(array_like, exp, desired, axis=None, dtype=None,
+                      rtol=1e-7, weights=None):
+    x = stats.pmean(array_like, exp, axis=axis, dtype=dtype, weights=weights)
+    assert_allclose(x, desired, rtol=rtol)
+    assert_equal(x.dtype, dtype)
+
+
+class TestHarMean:
+    def test_0(self):
+        a = [1, 0, 2]
+        desired = 0
+        check_equal_hmean(a, desired)
+
+    def test_1d_list(self):
+        #  Test a 1d list
+        a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
+        desired = 34.1417152147
+        check_equal_hmean(a, desired)
+
+        a = [1, 2, 3, 4]
+        desired = 4. / (1. / 1 + 1. / 2 + 1. / 3 + 1. / 4)
+        check_equal_hmean(a, desired)
+
+    def test_1d_array(self):
+        #  Test a 1d array
+        a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
+        desired = 34.1417152147
+        check_equal_hmean(a, desired)
+
+    def test_1d_array_with_zero(self):
+        a = np.array([1, 0])
+        desired = 0.0
+        assert_equal(stats.hmean(a), desired)
+
+    def test_1d_array_with_negative_value(self):
+        a = np.array([1, 0, -1])
+        assert_raises(ValueError, stats.hmean, a)
+
+    # Note the next tests use axis=None as default, not axis=0
+    def test_2d_list(self):
+        #  Test a 2d list
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = 38.6696271841
+        check_equal_hmean(a, desired)
+
+    def test_2d_array(self):
+        #  Test a 2d array
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = 38.6696271841
+        check_equal_hmean(np.array(a), desired)
+
+    def test_2d_axis0(self):
+        #  Test a 2d list with axis=0
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545])
+        check_equal_hmean(a, desired, axis=0)
+
+    def test_2d_axis0_with_zero(self):
+        a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = np.array([22.88135593, 0.0, 52.90076336, 65.45454545])
+        assert_allclose(stats.hmean(a, axis=0), desired)
+
+    def test_2d_axis1(self):
+        #  Test a 2d list with axis=1
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = np.array([19.2, 63.03939962, 103.80078637])
+        check_equal_hmean(a, desired, axis=1)
+
+    def test_2d_axis1_with_zero(self):
+        a = [[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = np.array([0.0, 63.03939962, 103.80078637])
+        assert_allclose(stats.hmean(a, axis=1), desired)
+
+    def test_weights_1d_list(self):
+        # Desired result from:
+        # https://www.hackmath.net/en/math-problem/35871
+        a = [2, 10, 6]
+        weights = [10, 5, 3]
+        desired = 3
+        check_equal_hmean(a, desired, weights=weights, rtol=1e-5)
+
+    def test_weights_2d_array_axis0(self):
+        # Desired result from:
+        # https://www.hackmath.net/en/math-problem/35871
+        a = np.array([[2, 5], [10, 5], [6, 5]])
+        weights = np.array([[10, 1], [5, 1], [3, 1]])
+        desired = np.array([3, 5])
+        check_equal_hmean(a, desired, axis=0, weights=weights, rtol=1e-5)
+
+    def test_weights_2d_array_axis1(self):
+        # Desired result from:
+        # https://www.hackmath.net/en/math-problem/35871
+        a = np.array([[2, 10, 6], [7, 7, 7]])
+        weights = np.array([[10, 5, 3], [1, 1, 1]])
+        desired = np.array([3, 7])
+        check_equal_hmean(a, desired, axis=1, weights=weights, rtol=1e-5)
+
+    def test_weights_masked_1d_array(self):
+        # Desired result from:
+        # https://www.hackmath.net/en/math-problem/35871
+        a = np.array([2, 10, 6, 42])
+        weights = np.ma.array([10, 5, 3, 42], mask=[0, 0, 0, 1])
+        desired = 3
+        check_equal_hmean(a, desired, weights=weights, rtol=1e-5)
+
+
+class TestGeoMean:
+    def test_0(self):
+        a = [1, 0, 2]
+        desired = 0
+        check_equal_gmean(a, desired)
+
+    def test_1d_list(self):
+        #  Test a 1d list
+        a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
+        desired = 45.2872868812
+        check_equal_gmean(a, desired)
+
+        a = [1, 2, 3, 4]
+        desired = power(1 * 2 * 3 * 4, 1. / 4.)
+        check_equal_gmean(a, desired, rtol=1e-14)
+
+    def test_1d_array(self):
+        #  Test a 1d array
+        a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
+        desired = 45.2872868812
+        check_equal_gmean(a, desired)
+
+        a = array([1, 2, 3, 4], float32)
+        desired = power(1 * 2 * 3 * 4, 1. / 4.)
+        check_equal_gmean(a, desired, dtype=float32)
+
+    # Note the next tests use axis=None as default, not axis=0
+    def test_2d_list(self):
+        #  Test a 2d list
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = 52.8885199
+        check_equal_gmean(a, desired)
+
+    def test_2d_array(self):
+        #  Test a 2d array
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = 52.8885199
+        check_equal_gmean(array(a), desired)
+
+    def test_2d_axis0(self):
+        #  Test a 2d list with axis=0
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])
+        check_equal_gmean(a, desired, axis=0)
+
+        a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
+        desired = array([1, 2, 3, 4])
+        check_equal_gmean(a, desired, axis=0, rtol=1e-14)
+
+    def test_2d_axis1(self):
+        #  Test a 2d list with axis=1
+        a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
+        desired = np.array([22.13363839, 64.02171746, 104.40086817])
+        check_equal_gmean(a, desired, axis=1)
+
+        a = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
+        v = power(1 * 2 * 3 * 4, 1. / 4.)
+        desired = array([v, v, v])
+        check_equal_gmean(a, desired, axis=1, rtol=1e-14)
+
+    def test_large_values(self):
+        a = array([1e100, 1e200, 1e300])
+        desired = 1e200
+        check_equal_gmean(a, desired, rtol=1e-13)
+
+    def test_1d_list0(self):
+        #  Test a 1d list with zero element
+        a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
+        desired = 0.0  # due to exp(-inf)=0
+        with np.errstate(all='ignore'):
+            check_equal_gmean(a, desired)
+
+    def test_1d_array0(self):
+        #  Test a 1d array with zero element
+        a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
+        desired = 0.0  # due to exp(-inf)=0
+        with np.errstate(divide='ignore'):
+            check_equal_gmean(a, desired)
+
+    def test_1d_list_neg(self):
+        #  Test a 1d list with negative element
+        a = [10, 20, 30, 40, 50, 60, 70, 80, 90, -1]
+        desired = np.nan  # due to log(-1) = nan
+        with np.errstate(invalid='ignore'):
+            check_equal_gmean(a, desired)
+
+    def test_weights_1d_list(self):
+        # Desired result from:
+        # https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
+        a = [1, 2, 3, 4, 5]
+        weights = [2, 5, 6, 4, 3]
+        desired = 2.77748
+        check_equal_gmean(a, desired, weights=weights, rtol=1e-5)
+
+    def test_weights_1d_array(self):
+        # Desired result from:
+        # https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
+        a = np.array([1, 2, 3, 4, 5])
+        weights = np.array([2, 5, 6, 4, 3])
+        desired = 2.77748
+        check_equal_gmean(a, desired, weights=weights, rtol=1e-5)
+
+    def test_weights_masked_1d_array(self):
+        # Desired result from:
+        # https://www.dummies.com/education/math/business-statistics/how-to-find-the-weighted-geometric-mean-of-a-data-set/
+        a = np.array([1, 2, 3, 4, 5, 6])
+        weights = np.ma.array([2, 5, 6, 4, 3, 5], mask=[0, 0, 0, 0, 0, 1])
+        desired = 2.77748
+        check_equal_gmean(a, desired, weights=weights, rtol=1e-5)
+
+
+class TestPowMean:
+
+    def pmean_reference(a, p):
+        return (np.sum(a**p) / a.size)**(1/p)
+
+    def wpmean_reference(a, p, weights):
+        return (np.sum(weights * a**p) / np.sum(weights))**(1/p)
+
+    def test_bad_exponent(self):
+        with pytest.raises(ValueError, match='Power mean only defined for'):
+            stats.pmean([1, 2, 3], [0])
+        with pytest.raises(ValueError, match='Power mean only defined for'):
+            stats.pmean([1, 2, 3], np.array([0]))
+
+    def test_1d_list(self):
+        a, p = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 3.5
+        desired = TestPowMean.pmean_reference(np.array(a), p)
+        check_equal_pmean(a, p, desired)
+
+        a, p = [1, 2, 3, 4], 2
+        desired = np.sqrt((1**2 + 2**2 + 3**2 + 4**2) / 4)
+        check_equal_pmean(a, p, desired)
+
+    def test_1d_array(self):
+        a, p = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]), -2.5
+        desired = TestPowMean.pmean_reference(a, p)
+        check_equal_pmean(a, p, desired)
+
+    def test_1d_array_with_zero(self):
+        a, p = np.array([1, 0]), -1
+        desired = 0.0
+        assert_equal(stats.pmean(a, p), desired)
+
+    def test_1d_array_with_negative_value(self):
+        a, p = np.array([1, 0, -1]), 1.23
+        with pytest.raises(ValueError, match='Power mean only defined if all'):
+            stats.pmean(a, p)
+
+    @pytest.mark.parametrize(
+        ("a", "p"),
+        [([[10, 20], [50, 60], [90, 100]], -0.5),
+         (np.array([[10, 20], [50, 60], [90, 100]]), 0.5)]
+    )
+    def test_2d_axisnone(self, a, p):
+        desired = TestPowMean.pmean_reference(np.array(a), p)
+        check_equal_pmean(a, p, desired)
+
+    @pytest.mark.parametrize(
+        ("a", "p"),
+        [([[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], -0.5),
+         ([[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], 0.5)]
+    )
+    def test_2d_list_axis0(self, a, p):
+        desired = [
+            TestPowMean.pmean_reference(
+                np.array([a[i][j] for i in range(len(a))]), p
+            )
+            for j in range(len(a[0]))
+        ]
+        check_equal_pmean(a, p, desired, axis=0)
+
+    @pytest.mark.parametrize(
+        ("a", "p"),
+        [([[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], -0.5),
+         ([[10, 0, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]], 0.5)]
+    )
+    def test_2d_list_axis1(self, a, p):
+        desired = [TestPowMean.pmean_reference(np.array(a_), p) for a_ in a]
+        check_equal_pmean(a, p, desired, axis=1)
+
+    def test_weights_1d_list(self):
+        a, p = [2, 10, 6], -1.23456789
+        weights = [10, 5, 3]
+        desired = TestPowMean.wpmean_reference(np.array(a), p, weights)
+        check_equal_pmean(a, p, desired, weights=weights, rtol=1e-5)
+
+    def test_weights_masked_1d_array(self):
+        a, p = np.array([2, 10, 6, 42]), 1
+        weights = np.ma.array([10, 5, 3, 42], mask=[0, 0, 0, 1])
+        desired = np.average(a, weights=weights)
+        check_equal_pmean(a, p, desired, weights=weights, rtol=1e-5)
+
+    @pytest.mark.parametrize(
+        ("axis", "fun_name", "p"),
+        [(None, "wpmean_reference", 9.87654321),
+         (0, "gmean", 0),
+         (1, "hmean", -1)]
+    )
+    def test_weights_2d_array(self, axis, fun_name, p):
+        if fun_name == 'wpmean_reference':
+            def fun(a, axis, weights):
+                return TestPowMean.wpmean_reference(a, p, weights)
+        else:
+            fun = getattr(stats, fun_name)
+        a = np.array([[2, 5], [10, 5], [6, 5]])
+        weights = np.array([[10, 1], [5, 1], [3, 1]])
+        desired = fun(a, axis=axis, weights=weights)
+        check_equal_pmean(a, p, desired, axis=axis, weights=weights, rtol=1e-5)
+
+
+class TestGeometricStandardDeviation:
+    # must add 1 as `gstd` is only defined for positive values
+    array_1d = np.arange(2 * 3 * 4) + 1
+    gstd_array_1d = 2.294407613602
+    array_3d = array_1d.reshape(2, 3, 4)
+
+    def test_1d_array(self):
+        gstd_actual = stats.gstd(self.array_1d)
+        assert_allclose(gstd_actual, self.gstd_array_1d)
+
+    def test_1d_numeric_array_like_input(self):
+        gstd_actual = stats.gstd(tuple(self.array_1d))
+        assert_allclose(gstd_actual, self.gstd_array_1d)
+
+    def test_raises_value_error_non_array_like_input(self):
+        with pytest.raises(ValueError, match='Invalid array input'):
+            stats.gstd('This should fail as it can not be cast to an array.')
+
+    def test_raises_value_error_zero_entry(self):
+        with pytest.raises(ValueError, match='Non positive value'):
+            stats.gstd(np.append(self.array_1d, [0]))
+
+    def test_raises_value_error_negative_entry(self):
+        with pytest.raises(ValueError, match='Non positive value'):
+            stats.gstd(np.append(self.array_1d, [-1]))
+
+    def test_raises_value_error_inf_entry(self):
+        with pytest.raises(ValueError, match='Infinite value'):
+            stats.gstd(np.append(self.array_1d, [np.inf]))
+
+    def test_propagates_nan_values(self):
+        a = array([[1, 1, 1, 16], [np.nan, 1, 2, 3]])
+        gstd_actual = stats.gstd(a, axis=1)
+        assert_allclose(gstd_actual, np.array([4, np.nan]))
+
+    def test_ddof_equal_to_number_of_observations(self):
+        with pytest.raises(ValueError, match='Degrees of freedom <= 0'):
+            stats.gstd(self.array_1d, ddof=self.array_1d.size)
+
+    def test_3d_array(self):
+        gstd_actual = stats.gstd(self.array_3d, axis=None)
+        assert_allclose(gstd_actual, self.gstd_array_1d)
+
+    def test_3d_array_axis_type_tuple(self):
+        gstd_actual = stats.gstd(self.array_3d, axis=(1,2))
+        assert_allclose(gstd_actual, [2.12939215, 1.22120169])
+
+    def test_3d_array_axis_0(self):
+        gstd_actual = stats.gstd(self.array_3d, axis=0)
+        gstd_desired = np.array([
+            [6.1330555493918, 3.958900210120, 3.1206598248344, 2.6651441426902],
+            [2.3758135028411, 2.174581428192, 2.0260062829505, 1.9115518327308],
+            [1.8205343606803, 1.746342404566, 1.6846557065742, 1.6325269194382]
+        ])
+        assert_allclose(gstd_actual, gstd_desired)
+
+    def test_3d_array_axis_1(self):
+        gstd_actual = stats.gstd(self.array_3d, axis=1)
+        gstd_desired = np.array([
+            [3.118993630946, 2.275985934063, 1.933995977619, 1.742896469724],
+            [1.271693593916, 1.254158641801, 1.238774141609, 1.225164057869]
+        ])
+        assert_allclose(gstd_actual, gstd_desired)
+
+    def test_3d_array_axis_2(self):
+        gstd_actual = stats.gstd(self.array_3d, axis=2)
+        gstd_desired = np.array([
+            [1.8242475707664, 1.2243686572447, 1.1318311657788],
+            [1.0934830582351, 1.0724479791887, 1.0591498540749]
+        ])
+        assert_allclose(gstd_actual, gstd_desired)
+
+    def test_masked_3d_array(self):
+        ma = np.ma.masked_where(self.array_3d > 16, self.array_3d)
+        gstd_actual = stats.gstd(ma, axis=2)
+        gstd_desired = stats.gstd(self.array_3d, axis=2)
+        mask = [[0, 0, 0], [0, 1, 1]]
+        assert_allclose(gstd_actual, gstd_desired)
+        assert_equal(gstd_actual.mask, mask)
+
+
+@pytest.mark.parametrize('alternative', ['two-sided', 'greater', 'less'])
+def test_binom_test_deprecation(alternative):
+    deprecation_msg = ("'binom_test' is deprecated in favour of"
+                       " 'binomtest' from version 1.7.0 and will"
+                       " be removed in Scipy 1.12.0.")
+    num = 10
+    rng = np.random.default_rng(156114182869662948677852568516310985853)
+    X = rng.integers(10, 100, (num,))
+    N = X + rng.integers(0, 100, (num,))
+    P = rng.uniform(0, 1, (num,))
+    for x, n, p in zip(X, N, P):
+        with pytest.warns(DeprecationWarning, match=deprecation_msg):
+            res = stats.binom_test(x, n, p, alternative=alternative)
+        assert res == stats.binomtest(x, n, p, alternative=alternative).pvalue
+
+
+def test_binomtest():
+    # precision tests compared to R for ticket:986
+    pp = np.concatenate((np.linspace(0.1, 0.2, 5),
+                         np.linspace(0.45, 0.65, 5),
+                         np.linspace(0.85, 0.95, 5)))
+    n = 501
+    x = 450
+    results = [0.0, 0.0, 1.0159969301994141e-304,
+               2.9752418572150531e-275, 7.7668382922535275e-250,
+               2.3381250925167094e-099, 7.8284591587323951e-081,
+               9.9155947819961383e-065, 2.8729390725176308e-050,
+               1.7175066298388421e-037, 0.0021070691951093692,
+               0.12044570587262322, 0.88154763174802508, 0.027120993063129286,
+               2.6102587134694721e-006]
+
+    for p, res in zip(pp, results):
+        assert_approx_equal(stats.binomtest(x, n, p).pvalue, res,
+                            significant=12, err_msg='fail forp=%f' % p)
+    assert_approx_equal(stats.binomtest(50, 100, 0.1).pvalue,
+                        5.8320387857343647e-024,
+                        significant=12)
+
+
+def test_binomtest2():
+    # test added for issue #2384
+    res2 = [
+        [1.0, 1.0],
+        [0.5, 1.0, 0.5],
+        [0.25, 1.00, 1.00, 0.25],
+        [0.125, 0.625, 1.000, 0.625, 0.125],
+        [0.0625, 0.3750, 1.0000, 1.0000, 0.3750, 0.0625],
+        [0.03125, 0.21875, 0.68750, 1.00000, 0.68750, 0.21875, 0.03125],
+        [0.015625, 0.125000, 0.453125, 1.000000, 1.000000, 0.453125, 0.125000,
+         0.015625],
+        [0.0078125, 0.0703125, 0.2890625, 0.7265625, 1.0000000, 0.7265625,
+         0.2890625, 0.0703125, 0.0078125],
+        [0.00390625, 0.03906250, 0.17968750, 0.50781250, 1.00000000,
+         1.00000000, 0.50781250, 0.17968750, 0.03906250, 0.00390625],
+        [0.001953125, 0.021484375, 0.109375000, 0.343750000, 0.753906250,
+         1.000000000, 0.753906250, 0.343750000, 0.109375000, 0.021484375,
+         0.001953125]
+    ]
+    for k in range(1, 11):
+        res1 = [stats.binomtest(v, k, 0.5).pvalue for v in range(k + 1)]
+        assert_almost_equal(res1, res2[k-1], decimal=10)
+
+
+def test_binomtest3():
+    # test added for issue #2384
+    # test when x == n*p and neighbors
+    res3 = [stats.binomtest(v, v*k, 1./k).pvalue
+            for v in range(1, 11) for k in range(2, 11)]
+    assert_equal(res3, np.ones(len(res3), int))
+
+    # > bt=c()
+    # > for(i in as.single(1:10)) {
+    # +     for(k in as.single(2:10)) {
+    # +         bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value);
+    # +         print(c(i+1, k*i,(1/k)))
+    # +     }
+    # + }
+    binom_testm1 = np.array([
+         0.5, 0.5555555555555556, 0.578125, 0.5904000000000003,
+         0.5981224279835393, 0.603430543396034, 0.607304096221924,
+         0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115,
+         0.68853759765625, 0.6980101120000006, 0.703906431368616,
+         0.70793209416498, 0.7108561134173507, 0.713076544331419,
+         0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174,
+         0.74986110468096, 0.7548015520398076, 0.7581671424768577,
+         0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625,
+         0.761553963657302, 0.774800934828818, 0.7818005980538996,
+         0.78613491480358, 0.789084353140195, 0.7912217659828884,
+         0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176,
+         0.7976688481430754, 0.8039848974727624, 0.807891868948366,
+         0.8105487660137676, 0.812473307174702, 0.8139318233591120,
+         0.815075399104785, 0.7744140625, 0.8037322594985427,
+         0.814742863657656, 0.8205425178645808, 0.8241275984172285,
+         0.8265645374416, 0.8283292196088257, 0.829666291102775,
+         0.8307144686362666, 0.7905273437499996, 0.8178712053954738,
+         0.828116983756619, 0.833508948940494, 0.8368403871552892,
+         0.839104213210105, 0.840743186196171, 0.84198481438049,
+         0.8429580531563676, 0.803619384765625, 0.829338573944648,
+         0.8389591907548646, 0.84401876783902, 0.84714369697889,
+         0.8492667010581667, 0.850803474598719, 0.851967542858308,
+         0.8528799045949524, 0.8145294189453126, 0.838881732845347,
+         0.847979024541911, 0.852760894015685, 0.8557134656773457,
+         0.8577190131799202, 0.85917058278431, 0.860270010472127,
+         0.861131648404582, 0.823802947998047, 0.846984756807511,
+         0.855635653643743, 0.860180994825685, 0.86298688573253,
+         0.864892525675245, 0.866271647085603, 0.867316125625004,
+         0.8681346531755114
+        ])
+
+    # > bt=c()
+    # > for(i in as.single(1:10)) {
+    # +     for(k in as.single(2:10)) {
+    # +         bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value);
+    # +         print(c(i+1, k*i,(1/k)))
+    # +     }
+    # + }
+
+    binom_testp1 = np.array([
+         0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551,
+         0.2635138663069203, 0.2636951804161073, 0.2638162407564354,
+         0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875,
+         0.4295746560000003, 0.43473045988554, 0.4383309503172684,
+         0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875,
+         0.4927602499618962, 0.5096031427383425, 0.5189636628480,
+         0.5249280070771274, 0.5290623300865124, 0.5320974248125793,
+         0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808,
+         0.5669248746708034, 0.576436455045805, 0.5824538812831795,
+         0.5866053321547824, 0.589642781414643, 0.5919618019300193,
+         0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209,
+         0.617303847446822, 0.623172512167948, 0.627208862156123,
+         0.6301556891501057, 0.632401894928977, 0.6341708982290303,
+         0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579,
+         0.65392850011132, 0.657816519817211, 0.660650782947676,
+         0.662808780346311, 0.6645068560246006, 0.7905273437499996,
+         0.6478843304312477, 0.6640468318879372, 0.6727589686071775,
+         0.6782129857784873, 0.681950188903695, 0.684671508668418,
+         0.686741824999918, 0.688369886732168, 0.803619384765625,
+         0.668716055304315, 0.684360013879534, 0.6927642396829181,
+         0.6980155964704895, 0.701609591890657, 0.7042244320992127,
+         0.7062125081341817, 0.707775152962577, 0.8145294189453126,
+         0.686243374488305, 0.7013873696358975, 0.709501223328243,
+         0.714563595144314, 0.718024953392931, 0.7205416252126137,
+         0.722454130389843, 0.723956813292035, 0.823802947998047,
+         0.701255953767043, 0.715928221686075, 0.723772209289768,
+         0.7286603031173616, 0.7319999279787631, 0.7344267920995765,
+         0.736270323773157, 0.737718376096348
+        ])
+
+    res4_p1 = [stats.binomtest(v+1, v*k, 1./k).pvalue
+               for v in range(1, 11) for k in range(2, 11)]
+    res4_m1 = [stats.binomtest(v-1, v*k, 1./k).pvalue
+               for v in range(1, 11) for k in range(2, 11)]
+
+    assert_almost_equal(res4_p1, binom_testp1, decimal=13)
+    assert_almost_equal(res4_m1, binom_testm1, decimal=13)
+
+
+class TestTrim:
+    # test trim functions
+    def test_trim1(self):
+        a = np.arange(11)
+        assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10))
+        assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9))
+        assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')),
+                     np.arange(2, 11))
+        assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')),
+                     np.arange(3, 11))
+        assert_equal(stats.trim1(a, 1.0), [])
+        assert_equal(stats.trim1(a, 1.0, tail='left'), [])
+
+        # empty input
+        assert_equal(stats.trim1([], 0.1), [])
+        assert_equal(stats.trim1([], 3/11., tail='left'), [])
+        assert_equal(stats.trim1([], 4/6.), [])
+
+        # test axis
+        a = np.arange(24).reshape(6, 4)
+        ref = np.arange(4, 24).reshape(5, 4)  # first row trimmed
+
+        axis = 0
+        trimmed = stats.trim1(a, 0.2, tail='left', axis=axis)
+        assert_equal(np.sort(trimmed, axis=axis), ref)
+
+        axis = 1
+        trimmed = stats.trim1(a.T, 0.2, tail='left', axis=axis)
+        assert_equal(np.sort(trimmed, axis=axis), ref.T)
+
+    def test_trimboth(self):
+        a = np.arange(11)
+        assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8))
+        assert_equal(np.sort(stats.trimboth(a, 0.2)),
+                     np.array([2, 3, 4, 5, 6, 7, 8]))
+        assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)),
+                     np.arange(4, 20).reshape(4, 4))
+        assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T,
+                                            2/6.)),
+                     np.array([[2, 8, 14, 20], [3, 9, 15, 21]]))
+        assert_raises(ValueError, stats.trimboth,
+                      np.arange(24).reshape(4, 6).T, 4/6.)
+
+        # empty input
+        assert_equal(stats.trimboth([], 0.1), [])
+        assert_equal(stats.trimboth([], 3/11.), [])
+        assert_equal(stats.trimboth([], 4/6.), [])
+
+    def test_trim_mean(self):
+        # don't use pre-sorted arrays
+        a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6])
+        idx = np.array([3, 5, 0, 1, 2, 4])
+        a2 = np.arange(24).reshape(6, 4)[idx, :]
+        a3 = np.arange(24).reshape(6, 4, order='F')[idx, :]
+        assert_equal(stats.trim_mean(a3, 2/6.),
+                     np.array([2.5, 8.5, 14.5, 20.5]))
+        assert_equal(stats.trim_mean(a2, 2/6.),
+                     np.array([10., 11., 12., 13.]))
+        idx4 = np.array([1, 0, 3, 2])
+        a4 = np.arange(24).reshape(4, 6)[idx4, :]
+        assert_equal(stats.trim_mean(a4, 2/6.),
+                     np.array([9., 10., 11., 12., 13., 14.]))
+        # shuffled arange(24) as array_like
+        a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23,
+             20, 2, 14, 4, 13, 8, 3]
+        assert_equal(stats.trim_mean(a, 2/6.), 11.5)
+        assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)
+
+        # check axis argument
+        np.random.seed(1234)
+        a = np.random.randint(20, size=(5, 6, 4, 7))
+        for axis in [0, 1, 2, 3, -1]:
+            res1 = stats.trim_mean(a, 2/6., axis=axis)
+            res2 = stats.trim_mean(np.moveaxis(a, axis, 0), 2/6.)
+            assert_equal(res1, res2)
+
+        res1 = stats.trim_mean(a, 2/6., axis=None)
+        res2 = stats.trim_mean(a.ravel(), 2/6.)
+        assert_equal(res1, res2)
+
+        assert_raises(ValueError, stats.trim_mean, a, 0.6)
+
+        # empty input
+        assert_equal(stats.trim_mean([], 0.0), np.nan)
+        assert_equal(stats.trim_mean([], 0.6), np.nan)
+
+
+class TestSigmaClip:
+    def test_sigmaclip1(self):
+        a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
+        fact = 4  # default
+        c, low, upp = stats.sigmaclip(a)
+        assert_(c.min() > low)
+        assert_(c.max() < upp)
+        assert_equal(low, c.mean() - fact*c.std())
+        assert_equal(upp, c.mean() + fact*c.std())
+        assert_equal(c.size, a.size)
+
+    def test_sigmaclip2(self):
+        a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
+        fact = 1.5
+        c, low, upp = stats.sigmaclip(a, fact, fact)
+        assert_(c.min() > low)
+        assert_(c.max() < upp)
+        assert_equal(low, c.mean() - fact*c.std())
+        assert_equal(upp, c.mean() + fact*c.std())
+        assert_equal(c.size, 4)
+        assert_equal(a.size, 36)  # check original array unchanged
+
+    def test_sigmaclip3(self):
+        a = np.concatenate((np.linspace(9.5, 10.5, 11),
+                            np.linspace(-100, -50, 3)))
+        fact = 1.8
+        c, low, upp = stats.sigmaclip(a, fact, fact)
+        assert_(c.min() > low)
+        assert_(c.max() < upp)
+        assert_equal(low, c.mean() - fact*c.std())
+        assert_equal(upp, c.mean() + fact*c.std())
+        assert_equal(c, np.linspace(9.5, 10.5, 11))
+
+    def test_sigmaclip_result_attributes(self):
+        a = np.concatenate((np.linspace(9.5, 10.5, 11),
+                            np.linspace(-100, -50, 3)))
+        fact = 1.8
+        res = stats.sigmaclip(a, fact, fact)
+        attributes = ('clipped', 'lower', 'upper')
+        check_named_results(res, attributes)
+
+    def test_std_zero(self):
+        # regression test #8632
+        x = np.ones(10)
+        assert_equal(stats.sigmaclip(x)[0], x)
+
+
+class TestAlexanderGovern:
+    def test_compare_dtypes(self):
+        args = [[13, 13, 13, 13, 13, 13, 13, 12, 12],
+                [14, 13, 12, 12, 12, 12, 12, 11, 11],
+                [14, 14, 13, 13, 13, 13, 13, 12, 12],
+                [15, 14, 13, 13, 13, 12, 12, 12, 11]]
+        args_int16 = np.array(args, dtype=np.int16)
+        args_int32 = np.array(args, dtype=np.int32)
+        args_uint8 = np.array(args, dtype=np.uint8)
+        args_float64 = np.array(args, dtype=np.float64)
+
+        res_int16 = stats.alexandergovern(*args_int16)
+        res_int32 = stats.alexandergovern(*args_int32)
+        res_unit8 = stats.alexandergovern(*args_uint8)
+        res_float64 = stats.alexandergovern(*args_float64)
+
+        assert (res_int16.pvalue == res_int32.pvalue ==
+                res_unit8.pvalue == res_float64.pvalue)
+        assert (res_int16.statistic == res_int32.statistic ==
+                res_unit8.statistic == res_float64.statistic)
+
+    def test_bad_inputs(self):
+        # input array is of size zero
+        with assert_raises(ValueError, match="Input sample size must be"
+                                             " greater than one."):
+            stats.alexandergovern([1, 2], [])
+        # input is a singular non list element
+        with assert_raises(ValueError, match="Input sample size must be"
+                                             " greater than one."):
+            stats.alexandergovern([1, 2], 2)
+        # input list is of size 1
+        with assert_raises(ValueError, match="Input sample size must be"
+                                             " greater than one."):
+            stats.alexandergovern([1, 2], [2])
+        # inputs are not finite (infinity)
+        with assert_raises(ValueError, match="Input samples must be finite."):
+            stats.alexandergovern([1, 2], [np.inf, np.inf])
+        # inputs are multidimensional
+        with assert_raises(ValueError, match="Input samples must be one"
+                                             "-dimensional"):
+            stats.alexandergovern([1, 2], [[1, 2], [3, 4]])
+
+    def test_compare_r(self):
+        '''
+        Data generated in R with
+        > set.seed(1)
+        > library("onewaytests")
+        > library("tibble")
+        > y <- c(rnorm(40, sd=10),
+        +        rnorm(30, sd=15),
+        +        rnorm(20, sd=20))
+        > x <- c(rep("one", times=40),
+        +        rep("two", times=30),
+        +        rep("eight", times=20))
+        > x <- factor(x)
+        > ag.test(y ~ x, tibble(y,x))
+
+        Alexander-Govern Test (alpha = 0.05)
+        -------------------------------------------------------------
+        data : y and x
+
+        statistic  : 1.359941
+        parameter  : 2
+        p.value    : 0.5066321
+
+        Result     : Difference is not statistically significant.
+        -------------------------------------------------------------
+        Example adapted from:
+        https://eval-serv2.metpsy.uni-jena.de/wiki-metheval-hp/index.php/R_FUN_Alexander-Govern
+
+        '''
+        one = [-6.264538107423324, 1.8364332422208225, -8.356286124100471,
+               15.952808021377916, 3.295077718153605, -8.204683841180152,
+               4.874290524284853, 7.383247051292173, 5.757813516534923,
+               -3.0538838715635603, 15.11781168450848, 3.898432364114311,
+               -6.2124058054180376, -22.146998871774997, 11.249309181431082,
+               -0.4493360901523085, -0.16190263098946087, 9.438362106852992,
+               8.212211950980885, 5.939013212175088, 9.189773716082183,
+               7.821363007310671, 0.745649833651906, -19.89351695863373,
+               6.198257478947102, -0.5612873952900078, -1.557955067053293,
+               -14.707523838992744, -4.781500551086204, 4.179415601997024,
+               13.58679551529044, -1.0278772734299553, 3.876716115593691,
+               -0.5380504058290512, -13.770595568286065, -4.149945632996798,
+               -3.942899537103493, -0.5931339671118566, 11.000253719838831,
+               7.631757484575442]
+
+        two = [-2.4678539438038034, -3.8004252020476135, 10.454450631071062,
+               8.34994798010486, -10.331335418242798, -10.612427354431794,
+               5.468729432052455, 11.527993867731237, -1.6851931822534207,
+               13.216615896813222, 5.971588205506021, -9.180395898761569,
+               5.116795371366372, -16.94044644121189, 21.495355525515556,
+               29.7059984775879, -5.508322146997636, -15.662019394747961,
+               8.545794411636193, -2.0258190582123654, 36.024266407571645,
+               -0.5886000409975387, 10.346090436761651, 0.4200323817099909,
+               -11.14909813323608, 2.8318844927151434, -27.074379433365568,
+               21.98332292344329, 2.2988000731784655, 32.58917505543229]
+
+        eight = [9.510190577993251, -14.198928618436291, 12.214527069781099,
+                 -18.68195263288503, -25.07266800478204, 5.828924710349257,
+                 -8.86583746436866, 0.02210703263248262, 1.4868264830332811,
+                 -11.79041892376144, -11.37337465637004, -2.7035723024766414,
+                 23.56173993146409, -30.47133600859524, 11.878923752568431,
+                 6.659007424270365, 21.261996745527256, -6.083678472686013,
+                 7.400376198325763, 5.341975815444621]
+        soln = stats.alexandergovern(one, two, eight)
+        assert_allclose(soln.statistic, 1.3599405447999450836)
+        assert_allclose(soln.pvalue, 0.50663205309676440091)
+
+    def test_compare_scholar(self):
+        '''
+        Data taken from 'The Modification and Evaluation of the
+        Alexander-Govern Test in Terms of Power' by Kingsley Ochuko, T.,
+        Abdullah, S., Binti Zain, Z., & Soaad Syed Yahaya, S. (2015).
+        '''
+        young = [482.43, 484.36, 488.84, 495.15, 495.24, 502.69, 504.62,
+                 518.29, 519.1, 524.1, 524.12, 531.18, 548.42, 572.1, 584.68,
+                 609.09, 609.53, 666.63, 676.4]
+        middle = [335.59, 338.43, 353.54, 404.27, 437.5, 469.01, 485.85,
+                  487.3, 493.08, 494.31, 499.1, 886.41]
+        old = [519.01, 528.5, 530.23, 536.03, 538.56, 538.83, 557.24, 558.61,
+               558.95, 565.43, 586.39, 594.69, 629.22, 645.69, 691.84]
+        soln = stats.alexandergovern(young, middle, old)
+        assert_allclose(soln.statistic, 5.3237, atol=1e-3)
+        assert_allclose(soln.pvalue, 0.06982, atol=1e-4)
+
+        # verify with ag.test in r
+        '''
+        > library("onewaytests")
+        > library("tibble")
+        > young <- c(482.43, 484.36, 488.84, 495.15, 495.24, 502.69, 504.62,
+        +                  518.29, 519.1, 524.1, 524.12, 531.18, 548.42, 572.1,
+        +                  584.68, 609.09, 609.53, 666.63, 676.4)
+        > middle <- c(335.59, 338.43, 353.54, 404.27, 437.5, 469.01, 485.85,
+        +                   487.3, 493.08, 494.31, 499.1, 886.41)
+        > old <- c(519.01, 528.5, 530.23, 536.03, 538.56, 538.83, 557.24,
+        +                   558.61, 558.95, 565.43, 586.39, 594.69, 629.22,
+        +                   645.69, 691.84)
+        > young_fct <- c(rep("young", times=19))
+        > middle_fct <-c(rep("middle", times=12))
+        > old_fct <- c(rep("old", times=15))
+        > ag.test(a ~ b, tibble(a=c(young, middle, old), b=factor(c(young_fct,
+        +                                              middle_fct, old_fct))))
+
+        Alexander-Govern Test (alpha = 0.05)
+        -------------------------------------------------------------
+        data : a and b
+
+        statistic  : 5.324629
+        parameter  : 2
+        p.value    : 0.06978651
+
+        Result     : Difference is not statistically significant.
+        -------------------------------------------------------------
+
+        '''
+        assert_allclose(soln.statistic, 5.324629)
+        assert_allclose(soln.pvalue, 0.06978651)
+
+    def test_compare_scholar3(self):
+        '''
+        Data taken from 'Robustness And Comparative Power Of WelchAspin,
+        Alexander-Govern And Yuen Tests Under Non-Normality And Variance
+        Heteroscedasticity', by Ayed A. Almoied. 2017. Page 34-37.
+        https://digitalcommons.wayne.edu/cgi/viewcontent.cgi?article=2775&context=oa_dissertations
+        '''
+        x1 = [-1.77559, -1.4113, -0.69457, -0.54148, -0.18808, -0.07152,
+              0.04696, 0.051183, 0.148695, 0.168052, 0.422561, 0.458555,
+              0.616123, 0.709968, 0.839956, 0.857226, 0.929159, 0.981442,
+              0.999554, 1.642958]
+        x2 = [-1.47973, -1.2722, -0.91914, -0.80916, -0.75977, -0.72253,
+              -0.3601, -0.33273, -0.28859, -0.09637, -0.08969, -0.01824,
+              0.260131, 0.289278, 0.518254, 0.683003, 0.877618, 1.172475,
+              1.33964, 1.576766]
+        soln = stats.alexandergovern(x1, x2)
+        assert_allclose(soln.statistic, 0.713526, atol=1e-5)
+        assert_allclose(soln.pvalue, 0.398276, atol=1e-5)
+
+        '''
+        tested in ag.test in R:
+        > library("onewaytests")
+        > library("tibble")
+        > x1 <- c(-1.77559, -1.4113, -0.69457, -0.54148, -0.18808, -0.07152,
+        +          0.04696, 0.051183, 0.148695, 0.168052, 0.422561, 0.458555,
+        +          0.616123, 0.709968, 0.839956, 0.857226, 0.929159, 0.981442,
+        +          0.999554, 1.642958)
+        > x2 <- c(-1.47973, -1.2722, -0.91914, -0.80916, -0.75977, -0.72253,
+        +         -0.3601, -0.33273, -0.28859, -0.09637, -0.08969, -0.01824,
+        +         0.260131, 0.289278, 0.518254, 0.683003, 0.877618, 1.172475,
+        +         1.33964, 1.576766)
+        > x1_fact <- c(rep("x1", times=20))
+        > x2_fact <- c(rep("x2", times=20))
+        > a <- c(x1, x2)
+        > b <- factor(c(x1_fact, x2_fact))
+        > ag.test(a ~ b, tibble(a, b))
+        Alexander-Govern Test (alpha = 0.05)
+        -------------------------------------------------------------
+        data : a and b
+
+        statistic  : 0.7135182
+        parameter  : 1
+        p.value    : 0.3982783
+
+        Result     : Difference is not statistically significant.
+        -------------------------------------------------------------
+        '''
+        assert_allclose(soln.statistic, 0.7135182)
+        assert_allclose(soln.pvalue, 0.3982783)
+
+    def test_nan_policy_propogate(self):
+        args = [[1, 2, 3, 4], [1, np.nan]]
+        # default nan_policy is 'propagate'
+        res = stats.alexandergovern(*args)
+        assert_equal(res.pvalue, np.nan)
+        assert_equal(res.statistic, np.nan)
+
+    def test_nan_policy_raise(self):
+        args = [[1, 2, 3, 4], [1, np.nan]]
+        with assert_raises(ValueError, match="The input contains nan values"):
+            stats.alexandergovern(*args, nan_policy='raise')
+
+    def test_nan_policy_omit(self):
+        args_nan = [[1, 2, 3, None, 4], [1, np.nan, 19, 25]]
+        args_no_nan = [[1, 2, 3, 4], [1, 19, 25]]
+        res_nan = stats.alexandergovern(*args_nan, nan_policy='omit')
+        res_no_nan = stats.alexandergovern(*args_no_nan)
+        assert_equal(res_nan.pvalue, res_no_nan.pvalue)
+        assert_equal(res_nan.statistic, res_no_nan.statistic)
+
+    def test_constant_input(self):
+        # Zero variance input, consistent with `stats.pearsonr`
+        msg = "An input array is constant; the statistic is not defined."
+        with assert_warns(stats.ConstantInputWarning, match=msg):
+            res = stats.alexandergovern([0.667, 0.667, 0.667],
+                                        [0.123, 0.456, 0.789])
+            assert_equal(res.statistic, np.nan)
+            assert_equal(res.pvalue, np.nan)
+
+
+class TestFOneWay:
+
+    def test_trivial(self):
+        # A trivial test of stats.f_oneway, with F=0.
+        F, p = stats.f_oneway([0, 2], [0, 2])
+        assert_equal(F, 0.0)
+        assert_equal(p, 1.0)
+
+    def test_basic(self):
+        # Despite being a floating point calculation, this data should
+        # result in F being exactly 2.0.
+        F, p = stats.f_oneway([0, 2], [2, 4])
+        assert_equal(F, 2.0)
+        assert_allclose(p, 1 - np.sqrt(0.5), rtol=1e-14)
+
+    def test_known_exact(self):
+        # Another trivial dataset for which the exact F and p can be
+        # calculated.
+        F, p = stats.f_oneway([2], [2], [2, 3, 4])
+        # The use of assert_equal might be too optimistic, but the calculation
+        # in this case is trivial enough that it is likely to go through with
+        # no loss of precision.
+        assert_equal(F, 3/5)
+        assert_equal(p, 5/8)
+
+    def test_large_integer_array(self):
+        a = np.array([655, 788], dtype=np.uint16)
+        b = np.array([789, 772], dtype=np.uint16)
+        F, p = stats.f_oneway(a, b)
+        # The expected value was verified by computing it with mpmath with
+        # 40 digits of precision.
+        assert_allclose(F, 0.77450216931805540, rtol=1e-14)
+
+    def test_result_attributes(self):
+        a = np.array([655, 788], dtype=np.uint16)
+        b = np.array([789, 772], dtype=np.uint16)
+        res = stats.f_oneway(a, b)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+    def test_nist(self):
+        # These are the nist ANOVA files. They can be found at:
+        # https://www.itl.nist.gov/div898/strd/anova/anova.html
+        filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat',
+                     'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat',
+                     'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat']
+
+        for test_case in filenames:
+            rtol = 1e-7
+            fname = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                 'data/nist_anova', test_case))
+            with open(fname, 'r') as f:
+                content = f.read().split('\n')
+            certified = [line.split() for line in content[40:48]
+                         if line.strip()]
+            dataf = np.loadtxt(fname, skiprows=60)
+            y, x = dataf.T
+            y = y.astype(int)
+            caty = np.unique(y)
+            f = float(certified[0][-1])
+
+            xlist = [x[y == i] for i in caty]
+            res = stats.f_oneway(*xlist)
+
+            # With the hard test cases we relax the tolerance a bit.
+            hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat')
+            if test_case in hard_tc:
+                rtol = 1e-4
+
+            assert_allclose(res[0], f, rtol=rtol,
+                            err_msg='Failing testcase: %s' % test_case)
+
+    @pytest.mark.parametrize("a, b, expected", [
+        (np.array([42, 42, 42]), np.array([7, 7, 7]), (np.inf, 0)),
+        (np.array([42, 42, 42]), np.array([42, 42, 42]), (np.nan, np.nan))
+        ])
+    def test_constant_input(self, a, b, expected):
+        # For more details, look on https://github.com/scipy/scipy/issues/11669
+        msg = "Each of the input arrays is constant;"
+        with assert_warns(stats.ConstantInputWarning, match=msg):
+            f, p = stats.f_oneway(a, b)
+            assert f, p == expected
+
+    @pytest.mark.parametrize('axis', [-2, -1, 0, 1])
+    def test_2d_inputs(self, axis):
+        a = np.array([[1, 4, 3, 3],
+                      [2, 5, 3, 3],
+                      [3, 6, 3, 3],
+                      [2, 3, 3, 3],
+                      [1, 4, 3, 3]])
+        b = np.array([[3, 1, 5, 3],
+                      [4, 6, 5, 3],
+                      [4, 3, 5, 3],
+                      [1, 5, 5, 3],
+                      [5, 5, 5, 3],
+                      [2, 3, 5, 3],
+                      [8, 2, 5, 3],
+                      [2, 2, 5, 3]])
+        c = np.array([[4, 3, 4, 3],
+                      [4, 2, 4, 3],
+                      [5, 4, 4, 3],
+                      [5, 4, 4, 3]])
+
+        if axis in [-1, 1]:
+            a = a.T
+            b = b.T
+            c = c.T
+            take_axis = 0
+        else:
+            take_axis = 1
+
+        warn_msg = "Each of the input arrays is constant;"
+        with assert_warns(stats.ConstantInputWarning, match=warn_msg):
+            f, p = stats.f_oneway(a, b, c, axis=axis)
+
+        # Verify that the result computed with the 2d arrays matches
+        # the result of calling f_oneway individually on each slice.
+        for j in [0, 1]:
+            fj, pj = stats.f_oneway(np.take(a, j, take_axis),
+                                    np.take(b, j, take_axis),
+                                    np.take(c, j, take_axis))
+            assert_allclose(f[j], fj, rtol=1e-14)
+            assert_allclose(p[j], pj, rtol=1e-14)
+        for j in [2, 3]:
+            with assert_warns(stats.ConstantInputWarning, match=warn_msg):
+                fj, pj = stats.f_oneway(np.take(a, j, take_axis),
+                                        np.take(b, j, take_axis),
+                                        np.take(c, j, take_axis))
+                assert_equal(f[j], fj)
+                assert_equal(p[j], pj)
+
+    def test_3d_inputs(self):
+        # Some 3-d arrays. (There is nothing special about the values.)
+        a = 1/np.arange(1.0, 4*5*7 + 1).reshape(4, 5, 7)
+        b = 2/np.arange(1.0, 4*8*7 + 1).reshape(4, 8, 7)
+        c = np.cos(1/np.arange(1.0, 4*4*7 + 1).reshape(4, 4, 7))
+
+        f, p = stats.f_oneway(a, b, c, axis=1)
+
+        assert f.shape == (4, 7)
+        assert p.shape == (4, 7)
+
+        for i in range(a.shape[0]):
+            for j in range(a.shape[2]):
+                fij, pij = stats.f_oneway(a[i, :, j], b[i, :, j], c[i, :, j])
+                assert_allclose(fij, f[i, j])
+                assert_allclose(pij, p[i, j])
+
+    def test_length0_1d_error(self):
+        # Require at least one value in each group.
+        msg = 'all input arrays have length 1.'
+        with assert_warns(stats.DegenerateDataWarning, match=msg):
+            result = stats.f_oneway([1, 2, 3], [], [4, 5, 6, 7])
+            assert_equal(result, (np.nan, np.nan))
+
+    def test_length0_2d_error(self):
+        msg = 'all input arrays have length 1.'
+        with assert_warns(stats.DegenerateDataWarning, match=msg):
+            ncols = 3
+            a = np.ones((4, ncols))
+            b = np.ones((0, ncols))
+            c = np.ones((5, ncols))
+            f, p = stats.f_oneway(a, b, c)
+            nans = np.full((ncols,), fill_value=np.nan)
+            assert_equal(f, nans)
+            assert_equal(p, nans)
+
+    def test_all_length_one(self):
+        msg = 'all input arrays have length 1.'
+        with assert_warns(stats.DegenerateDataWarning, match=msg):
+            result = stats.f_oneway([10], [11], [12], [13])
+            assert_equal(result, (np.nan, np.nan))
+
+    @pytest.mark.parametrize('args', [(), ([1, 2, 3],)])
+    def test_too_few_inputs(self, args):
+        with assert_raises(TypeError):
+            stats.f_oneway(*args)
+
+    def test_axis_error(self):
+        a = np.ones((3, 4))
+        b = np.ones((5, 4))
+        with assert_raises(np.AxisError):
+            stats.f_oneway(a, b, axis=2)
+
+    def test_bad_shapes(self):
+        a = np.ones((3, 4))
+        b = np.ones((5, 4))
+        with assert_raises(ValueError):
+            stats.f_oneway(a, b, axis=1)
+
+
+class TestKruskal:
+    def test_simple(self):
+        x = [1]
+        y = [2]
+        h, p = stats.kruskal(x, y)
+        assert_equal(h, 1.0)
+        assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
+        h, p = stats.kruskal(np.array(x), np.array(y))
+        assert_equal(h, 1.0)
+        assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
+
+    def test_basic(self):
+        x = [1, 3, 5, 7, 9]
+        y = [2, 4, 6, 8, 10]
+        h, p = stats.kruskal(x, y)
+        assert_approx_equal(h, 3./11, significant=10)
+        assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
+        h, p = stats.kruskal(np.array(x), np.array(y))
+        assert_approx_equal(h, 3./11, significant=10)
+        assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
+
+    def test_simple_tie(self):
+        x = [1]
+        y = [1, 2]
+        h_uncorr = 1.5**2 + 2*2.25**2 - 12
+        corr = 0.75
+        expected = h_uncorr / corr   # 0.5
+        h, p = stats.kruskal(x, y)
+        # Since the expression is simple and the exact answer is 0.5, it
+        # should be safe to use assert_equal().
+        assert_equal(h, expected)
+
+    def test_another_tie(self):
+        x = [1, 1, 1, 2]
+        y = [2, 2, 2, 2]
+        h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9
+        corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
+        expected = h_uncorr / corr
+        h, p = stats.kruskal(x, y)
+        assert_approx_equal(h, expected)
+
+    def test_three_groups(self):
+        # A test of stats.kruskal with three groups, with ties.
+        x = [1, 1, 1]
+        y = [2, 2, 2]
+        z = [2, 2]
+        h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9  # 5.0
+        corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
+        expected = h_uncorr / corr  # 7.0
+        h, p = stats.kruskal(x, y, z)
+        assert_approx_equal(h, expected)
+        assert_approx_equal(p, stats.distributions.chi2.sf(h, 2))
+
+    def test_empty(self):
+        # A test of stats.kruskal with three groups, with ties.
+        x = [1, 1, 1]
+        y = [2, 2, 2]
+        z = []
+        assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan))
+
+    def test_kruskal_result_attributes(self):
+        x = [1, 3, 5, 7, 9]
+        y = [2, 4, 6, 8, 10]
+        res = stats.kruskal(x, y)
+        attributes = ('statistic', 'pvalue')
+        check_named_results(res, attributes)
+
+    def test_nan_policy(self):
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_equal(stats.kruskal(x, x), (np.nan, np.nan))
+        assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0))
+        assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise')
+        assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar')
+
+    def test_large_no_samples(self):
+        # Test to see if large samples are handled correctly.
+        n = 50000
+        x = np.random.randn(n)
+        y = np.random.randn(n) + 50
+        h, p = stats.kruskal(x, y)
+        expected = 0
+        assert_approx_equal(p, expected)
+
+
+class TestCombinePvalues:
+
+    def test_fisher(self):
+        # Example taken from https://en.wikipedia.org/wiki/Fisher%27s_exact_test#Example
+        xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher')
+        assert_approx_equal(p, 0.02156, significant=4)
+
+    def test_stouffer(self):
+        Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer')
+        assert_approx_equal(p, 0.01651, significant=4)
+
+    def test_stouffer2(self):
+        Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer')
+        assert_approx_equal(p, 0.5, significant=4)
+
+    def test_weighted_stouffer(self):
+        Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
+                                     weights=np.ones(3))
+        assert_approx_equal(p, 0.01651, significant=4)
+
+    def test_weighted_stouffer2(self):
+        Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
+                                     weights=np.array((1, 4, 9)))
+        assert_approx_equal(p, 0.1464, significant=4)
+
+    def test_pearson(self):
+        Z, p = stats.combine_pvalues([.01, .2, .3], method='pearson')
+        assert_approx_equal(p, 0.02213, significant=4)
+
+    def test_tippett(self):
+        Z, p = stats.combine_pvalues([.01, .2, .3], method='tippett')
+        assert_approx_equal(p, 0.0297, significant=4)
+
+    def test_mudholkar_george(self):
+        Z, p = stats.combine_pvalues([.1, .1, .1], method='mudholkar_george')
+        assert_approx_equal(p, 0.019462, significant=4)
+
+    def test_mudholkar_george_equal_fisher_pearson_average(self):
+        Z, p = stats.combine_pvalues([.01, .2, .3], method='mudholkar_george')
+        Z_f, p_f = stats.combine_pvalues([.01, .2, .3], method='fisher')
+        Z_p, p_p = stats.combine_pvalues([.01, .2, .3], method='pearson')
+        assert_approx_equal(0.5 * (Z_f+Z_p), Z, significant=4)
+
+    methods = ["fisher", "pearson", "tippett", "stouffer", "mudholkar_george"]
+
+    @pytest.mark.parametrize("variant", ["single", "all", "random"])
+    @pytest.mark.parametrize("method", methods)
+    def test_monotonicity(self, variant, method):
+        # Test that result increases monotonically with respect to input.
+        m, n = 10, 7
+        rng = np.random.default_rng(278448169958891062669391462690811630763)
+
+        # `pvaluess` is an m × n array of p values. Each row corresponds to
+        # a set of p values to be combined with p values increasing
+        # monotonically down one column (single), simultaneously down each
+        # column (all), or independently down each column (random).
+        if variant == "single":
+            pvaluess = np.full((m, n), rng.random(n))
+            pvaluess[:, 0] = np.linspace(0.1, 0.9, m)
+        elif variant == "all":
+            pvaluess = np.full((n, m), np.linspace(0.1, 0.9, m)).T
+        elif variant == "random":
+            pvaluess = np.sort(rng.uniform(0, 1, size=(m, n)), axis=0)
+
+        combined_pvalues = [
+            stats.combine_pvalues(pvalues, method=method)[1]
+            for pvalues in pvaluess
+        ]
+        assert np.all(np.diff(combined_pvalues) >= 0)
+
+    @pytest.mark.parametrize("method", methods)
+    def test_result(self, method):
+        res = stats.combine_pvalues([.01, .2, .3], method=method)
+        assert_equal((res.statistic, res.pvalue), res)
+
+
+class TestCdfDistanceValidation:
+    """
+    Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors
+    for bad inputs.
+    """
+
+    def test_distinct_value_and_weight_lengths(self):
+        # When the number of weights does not match the number of values,
+        # a ValueError should be raised.
+        assert_raises(ValueError, stats.wasserstein_distance,
+                      [1], [2], [4], [3, 1])
+        assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0])
+
+    def test_zero_weight(self):
+        # When a distribution is given zero weight, a ValueError should be
+        # raised.
+        assert_raises(ValueError, stats.wasserstein_distance,
+                      [0, 1], [2], [0, 0])
+        assert_raises(ValueError, stats.wasserstein_distance,
+                      [0, 1], [2], [3, 1], [0])
+
+    def test_negative_weights(self):
+        # A ValueError should be raised if there are any negative weights.
+        assert_raises(ValueError, stats.wasserstein_distance,
+                      [0, 1], [2, 2], [1, 1], [3, -1])
+
+    def test_empty_distribution(self):
+        # A ValueError should be raised when trying to measure the distance
+        # between something and nothing.
+        assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2])
+        assert_raises(ValueError, stats.wasserstein_distance, [1], [])
+
+    def test_inf_weight(self):
+        # An inf weight is not valid.
+        assert_raises(ValueError, stats.wasserstein_distance,
+                      [1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1])
+
+
+class TestWassersteinDistance:
+    """ Tests for wasserstein_distance() output values.
+    """
+
+    def test_simple(self):
+        # For basic distributions, the value of the Wasserstein distance is
+        # straightforward.
+        assert_almost_equal(
+            stats.wasserstein_distance([0, 1], [0], [1, 1], [1]),
+            .5)
+        assert_almost_equal(stats.wasserstein_distance(
+            [0, 1], [0], [3, 1], [1]),
+            .25)
+        assert_almost_equal(stats.wasserstein_distance(
+            [0, 2], [0], [1, 1], [1]),
+            1)
+        assert_almost_equal(stats.wasserstein_distance(
+            [0, 1, 2], [1, 2, 3]),
+            1)
+
+    def test_same_distribution(self):
+        # Any distribution moved to itself should have a Wasserstein distance of
+        # zero.
+        assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0)
+        assert_equal(
+            stats.wasserstein_distance([1, 1, 1, 4], [4, 1],
+                                       [1, 1, 1, 1], [1, 3]),
+            0)
+
+    def test_shift(self):
+        # If the whole distribution is shifted by x, then the Wasserstein
+        # distance should be x.
+        assert_almost_equal(stats.wasserstein_distance([0], [1]), 1)
+        assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10)
+        assert_almost_equal(
+            stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]),
+            10)
+        assert_almost_equal(
+            stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2],
+                                       [3, 1, 1], [1, 3, 1]),
+            2.5)
+
+    def test_combine_weights(self):
+        # Assigning a weight w to a value is equivalent to including that value
+        # w times in the value array with weight of 1.
+        assert_almost_equal(
+            stats.wasserstein_distance(
+                [0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
+                [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
+            stats.wasserstein_distance([5, 0, 1], [0, 4, 3],
+                                       [1, 2, 4], [1, 2, 4]))
+
+    def test_collapse(self):
+        # Collapsing a distribution to a point distribution at zero is
+        # equivalent to taking the average of the absolute values of the values.
+        u = np.arange(-10, 30, 0.3)
+        v = np.zeros_like(u)
+        assert_almost_equal(
+            stats.wasserstein_distance(u, v),
+            np.mean(np.abs(u)))
+
+        u_weights = np.arange(len(u))
+        v_weights = u_weights[::-1]
+        assert_almost_equal(
+            stats.wasserstein_distance(u, v, u_weights, v_weights),
+            np.average(np.abs(u), weights=u_weights))
+
+    def test_zero_weight(self):
+        # Values with zero weight have no impact on the Wasserstein distance.
+        assert_almost_equal(
+            stats.wasserstein_distance([1, 2, 100000], [1, 1],
+                                       [1, 1, 0], [1, 1]),
+            stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1]))
+
+    def test_inf_values(self):
+        # Inf values can lead to an inf distance or trigger a RuntimeWarning
+        # (and return NaN) if the distance is undefined.
+        assert_equal(
+            stats.wasserstein_distance([1, 2, np.inf], [1, 1]),
+            np.inf)
+        assert_equal(
+            stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]),
+            np.inf)
+        assert_equal(
+            stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]),
+            np.inf)
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning, "invalid value*")
+            assert_equal(
+                stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]),
+                np.nan)
+
+
+class TestEnergyDistance:
+    """ Tests for energy_distance() output values.
+    """
+
+    def test_simple(self):
+        # For basic distributions, the value of the energy distance is
+        # straightforward.
+        assert_almost_equal(
+            stats.energy_distance([0, 1], [0], [1, 1], [1]),
+            np.sqrt(2) * .5)
+        assert_almost_equal(stats.energy_distance(
+            [0, 1], [0], [3, 1], [1]),
+            np.sqrt(2) * .25)
+        assert_almost_equal(stats.energy_distance(
+            [0, 2], [0], [1, 1], [1]),
+            2 * .5)
+        assert_almost_equal(
+            stats.energy_distance([0, 1, 2], [1, 2, 3]),
+            np.sqrt(2) * (3*(1./3**2))**.5)
+
+    def test_same_distribution(self):
+        # Any distribution moved to itself should have a energy distance of
+        # zero.
+        assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0)
+        assert_equal(
+            stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]),
+            0)
+
+    def test_shift(self):
+        # If a single-point distribution is shifted by x, then the energy
+        # distance should be sqrt(2) * sqrt(x).
+        assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2))
+        assert_almost_equal(
+            stats.energy_distance([-5], [5]),
+            np.sqrt(2) * 10**.5)
+
+    def test_combine_weights(self):
+        # Assigning a weight w to a value is equivalent to including that value
+        # w times in the value array with weight of 1.
+        assert_almost_equal(
+            stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
+                                  [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
+            stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4]))
+
+    def test_zero_weight(self):
+        # Values with zero weight have no impact on the energy distance.
+        assert_almost_equal(
+            stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]),
+            stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1]))
+
+    def test_inf_values(self):
+        # Inf values can lead to an inf distance or trigger a RuntimeWarning
+        # (and return NaN) if the distance is undefined.
+        assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf)
+        assert_equal(
+            stats.energy_distance([1, 2, np.inf], [-np.inf, 1]),
+            np.inf)
+        assert_equal(
+            stats.energy_distance([1, -np.inf, np.inf], [1, 1]),
+            np.inf)
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning, "invalid value*")
+            assert_equal(
+                stats.energy_distance([1, 2, np.inf], [np.inf, 1]),
+                np.nan)
+
+
+class TestBrunnerMunzel:
+    # Data from (Lumley, 1996)
+    X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
+    Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
+    significant = 13
+
+    def test_brunnermunzel_one_sided(self):
+        # Results are compared with R's lawstat package.
+        u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='less')
+        u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='greater')
+        u3, p3 = stats.brunnermunzel(self.X, self.Y, alternative='greater')
+        u4, p4 = stats.brunnermunzel(self.Y, self.X, alternative='less')
+
+        assert_approx_equal(p1, p2, significant=self.significant)
+        assert_approx_equal(p3, p4, significant=self.significant)
+        assert_(p1 != p3)
+        assert_approx_equal(u1, 3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(u2, -3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(u3, 3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(u4, -3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(p1, 0.0028931043330757342,
+                            significant=self.significant)
+        assert_approx_equal(p3, 0.99710689566692423,
+                            significant=self.significant)
+
+    def test_brunnermunzel_two_sided(self):
+        # Results are compared with R's lawstat package.
+        u1, p1 = stats.brunnermunzel(self.X, self.Y, alternative='two-sided')
+        u2, p2 = stats.brunnermunzel(self.Y, self.X, alternative='two-sided')
+
+        assert_approx_equal(p1, p2, significant=self.significant)
+        assert_approx_equal(u1, 3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(u2, -3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(p1, 0.0057862086661515377,
+                            significant=self.significant)
+
+    def test_brunnermunzel_default(self):
+        # The default value for alternative is two-sided
+        u1, p1 = stats.brunnermunzel(self.X, self.Y)
+        u2, p2 = stats.brunnermunzel(self.Y, self.X)
+
+        assert_approx_equal(p1, p2, significant=self.significant)
+        assert_approx_equal(u1, 3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(u2, -3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(p1, 0.0057862086661515377,
+                            significant=self.significant)
+
+    def test_brunnermunzel_alternative_error(self):
+        alternative = "error"
+        distribution = "t"
+        nan_policy = "propagate"
+        assert_(alternative not in ["two-sided", "greater", "less"])
+        assert_raises(ValueError,
+                      stats.brunnermunzel,
+                      self.X,
+                      self.Y,
+                      alternative,
+                      distribution,
+                      nan_policy)
+
+    def test_brunnermunzel_distribution_norm(self):
+        u1, p1 = stats.brunnermunzel(self.X, self.Y, distribution="normal")
+        u2, p2 = stats.brunnermunzel(self.Y, self.X, distribution="normal")
+        assert_approx_equal(p1, p2, significant=self.significant)
+        assert_approx_equal(u1, 3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(u2, -3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(p1, 0.0017041417600383024,
+                            significant=self.significant)
+
+    def test_brunnermunzel_distribution_error(self):
+        alternative = "two-sided"
+        distribution = "error"
+        nan_policy = "propagate"
+        assert_(alternative not in ["t", "normal"])
+        assert_raises(ValueError,
+                      stats.brunnermunzel,
+                      self.X,
+                      self.Y,
+                      alternative,
+                      distribution,
+                      nan_policy)
+
+    def test_brunnermunzel_empty_imput(self):
+        u1, p1 = stats.brunnermunzel(self.X, [])
+        u2, p2 = stats.brunnermunzel([], self.Y)
+        u3, p3 = stats.brunnermunzel([], [])
+
+        assert_equal(u1, np.nan)
+        assert_equal(p1, np.nan)
+        assert_equal(u2, np.nan)
+        assert_equal(p2, np.nan)
+        assert_equal(u3, np.nan)
+        assert_equal(p3, np.nan)
+
+    def test_brunnermunzel_nan_input_propagate(self):
+        X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
+        Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
+        u1, p1 = stats.brunnermunzel(X, Y, nan_policy="propagate")
+        u2, p2 = stats.brunnermunzel(Y, X, nan_policy="propagate")
+
+        assert_equal(u1, np.nan)
+        assert_equal(p1, np.nan)
+        assert_equal(u2, np.nan)
+        assert_equal(p2, np.nan)
+
+    def test_brunnermunzel_nan_input_raise(self):
+        X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
+        Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
+        alternative = "two-sided"
+        distribution = "t"
+        nan_policy = "raise"
+
+        assert_raises(ValueError,
+                      stats.brunnermunzel,
+                      X,
+                      Y,
+                      alternative,
+                      distribution,
+                      nan_policy)
+        assert_raises(ValueError,
+                      stats.brunnermunzel,
+                      Y,
+                      X,
+                      alternative,
+                      distribution,
+                      nan_policy)
+
+    def test_brunnermunzel_nan_input_omit(self):
+        X = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1, np.nan]
+        Y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
+        u1, p1 = stats.brunnermunzel(X, Y, nan_policy="omit")
+        u2, p2 = stats.brunnermunzel(Y, X, nan_policy="omit")
+
+        assert_approx_equal(p1, p2, significant=self.significant)
+        assert_approx_equal(u1, 3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(u2, -3.1374674823029505,
+                            significant=self.significant)
+        assert_approx_equal(p1, 0.0057862086661515377,
+                            significant=self.significant)
+
+    def test_brunnermunzel_return_nan(self):
+        """ tests that a warning is emitted when p is nan
+        p-value with t-distributions can be nan (0/0) (see gh-15843)
+        """
+        x = [1, 2, 3]
+        y = [5, 6, 7, 8, 9]
+
+        with pytest.warns(RuntimeWarning, match='p-value cannot be estimated'):
+            stats.brunnermunzel(x, y, distribution="t")
+
+    def test_brunnermunzel_normal_dist(self):
+        """ tests that a p is 0 for datasets that cause p->nan
+        when t-distribution is used (see gh-15843)
+        """
+        x = [1, 2, 3]
+        y = [5, 6, 7, 8, 9]
+
+        with pytest.warns(RuntimeWarning, match='divide by zero'):
+            _, p = stats.brunnermunzel(x, y, distribution="normal")
+        assert_equal(p, 0)
+
+
+class TestRatioUniforms:
+    """ Tests for rvs_ratio_uniforms.
+    """
+
+    def test_rv_generation(self):
+        # use KS test to check distribution of rvs
+        # normal distribution
+        f = stats.norm.pdf
+        v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
+        umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
+        rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500,
+                                       random_state=12345)
+        assert_equal(stats.kstest(rvs, 'norm')[1] > 0.25, True)
+
+        # exponential distribution
+        rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
+                                       vmin=0, vmax=2*np.exp(-1),
+                                       size=1000, random_state=12345)
+        assert_equal(stats.kstest(rvs, 'expon')[1] > 0.25, True)
+
+    def test_shape(self):
+        # test shape of return value depending on size parameter
+        f = stats.norm.pdf
+        v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
+        umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
+
+        r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=3,
+                                      random_state=1234)
+        r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3,),
+                                      random_state=1234)
+        r3 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 1),
+                                      random_state=1234)
+        assert_equal(r1, r2)
+        assert_equal(r2, r3.flatten())
+        assert_equal(r1.shape, (3,))
+        assert_equal(r3.shape, (3, 1))
+
+        r4 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 3, 3),
+                                      random_state=12)
+        r5 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=27,
+                                      random_state=12)
+        assert_equal(r4.flatten(), r5)
+        assert_equal(r4.shape, (3, 3, 3))
+
+        r6 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, random_state=1234)
+        r7 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=1,
+                                      random_state=1234)
+        r8 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(1, ),
+                                      random_state=1234)
+        assert_equal(r6, r7)
+        assert_equal(r7, r8)
+
+    def test_random_state(self):
+        f = stats.norm.pdf
+        v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
+        umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
+        np.random.seed(1234)
+        r1 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4))
+        r2 = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=(3, 4),
+                                      random_state=1234)
+        assert_equal(r1, r2)
+
+    def test_exceptions(self):
+        f = stats.norm.pdf
+        # need vmin < vmax
+        assert_raises(ValueError,
+                      stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=3, vmax=1)
+        assert_raises(ValueError,
+                      stats.rvs_ratio_uniforms, pdf=f, umax=1, vmin=1, vmax=1)
+        # need umax > 0
+        assert_raises(ValueError,
+                      stats.rvs_ratio_uniforms, pdf=f, umax=-1, vmin=1, vmax=1)
+        assert_raises(ValueError,
+                      stats.rvs_ratio_uniforms, pdf=f, umax=0, vmin=1, vmax=1)
+
+
+class TestMGCErrorWarnings:
+    """ Tests errors and warnings derived from MGC.
+    """
+    def test_error_notndarray(self):
+        # raises error if x or y is not a ndarray
+        x = np.arange(20)
+        y = [5] * 20
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
+        assert_raises(ValueError, stats.multiscale_graphcorr, y, x)
+
+    def test_error_shape(self):
+        # raises error if number of samples different (n)
+        x = np.arange(100).reshape(25, 4)
+        y = x.reshape(10, 10)
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
+
+    def test_error_lowsamples(self):
+        # raises error if samples are low (< 3)
+        x = np.arange(3)
+        y = np.arange(3)
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
+
+    def test_error_nans(self):
+        # raises error if inputs contain NaNs
+        x = np.arange(20, dtype=float)
+        x[0] = np.nan
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, x)
+
+        y = np.arange(20)
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
+
+    def test_error_wrongdisttype(self):
+        # raises error if metric is not a function
+        x = np.arange(20)
+        compute_distance = 0
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, x,
+                      compute_distance=compute_distance)
+
+    @pytest.mark.parametrize("reps", [
+        -1,    # reps is negative
+        '1',   # reps is not integer
+    ])
+    def test_error_reps(self, reps):
+        # raises error if reps is negative
+        x = np.arange(20)
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, x, reps=reps)
+
+    def test_warns_reps(self):
+        # raises warning when reps is less than 1000
+        x = np.arange(20)
+        reps = 100
+        assert_warns(RuntimeWarning, stats.multiscale_graphcorr, x, x, reps=reps)
+
+    def test_error_infty(self):
+        # raises error if input contains infinities
+        x = np.arange(20)
+        y = np.ones(20) * np.inf
+        assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
+
+
+class TestMGCStat:
+    """ Test validity of MGC test statistic
+    """
+    def _simulations(self, samps=100, dims=1, sim_type=""):
+        # linear simulation
+        if sim_type == "linear":
+            x = np.random.uniform(-1, 1, size=(samps, 1))
+            y = x + 0.3 * np.random.random_sample(size=(x.size, 1))
+
+        # spiral simulation
+        elif sim_type == "nonlinear":
+            unif = np.array(np.random.uniform(0, 5, size=(samps, 1)))
+            x = unif * np.cos(np.pi * unif)
+            y = unif * np.sin(np.pi * unif) + (0.4
+                * np.random.random_sample(size=(x.size, 1)))
+
+        # independence (tests type I simulation)
+        elif sim_type == "independence":
+            u = np.random.normal(0, 1, size=(samps, 1))
+            v = np.random.normal(0, 1, size=(samps, 1))
+            u_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
+            v_2 = np.random.binomial(1, p=0.5, size=(samps, 1))
+            x = u/3 + 2*u_2 - 1
+            y = v/3 + 2*v_2 - 1
+
+        # raises error if not approved sim_type
+        else:
+            raise ValueError("sim_type must be linear, nonlinear, or "
+                             "independence")
+
+        # add dimensions of noise for higher dimensions
+        if dims > 1:
+            dims_noise = np.random.normal(0, 1, size=(samps, dims-1))
+            x = np.concatenate((x, dims_noise), axis=1)
+
+        return x, y
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
+        ("linear", 0.97, 1/1000),           # test linear simulation
+        ("nonlinear", 0.163, 1/1000),       # test spiral simulation
+        ("independence", -0.0094, 0.78)     # test independence simulation
+    ])
+    def test_oned(self, sim_type, obs_stat, obs_pvalue):
+        np.random.seed(12345678)
+
+        # generate x and y
+        x, y = self._simulations(samps=100, dims=1, sim_type=sim_type)
+
+        # test stat and pvalue
+        stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
+        assert_approx_equal(stat, obs_stat, significant=1)
+        assert_approx_equal(pvalue, obs_pvalue, significant=1)
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize("sim_type, obs_stat, obs_pvalue", [
+        ("linear", 0.184, 1/1000),           # test linear simulation
+        ("nonlinear", 0.0190, 0.117),        # test spiral simulation
+    ])
+    def test_fived(self, sim_type, obs_stat, obs_pvalue):
+        np.random.seed(12345678)
+
+        # generate x and y
+        x, y = self._simulations(samps=100, dims=5, sim_type=sim_type)
+
+        # test stat and pvalue
+        stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
+        assert_approx_equal(stat, obs_stat, significant=1)
+        assert_approx_equal(pvalue, obs_pvalue, significant=1)
+
+    @pytest.mark.xslow
+    def test_twosamp(self):
+        np.random.seed(12345678)
+
+        # generate x and y
+        x = np.random.binomial(100, 0.5, size=(100, 5))
+        y = np.random.normal(0, 1, size=(80, 5))
+
+        # test stat and pvalue
+        stat, pvalue, _ = stats.multiscale_graphcorr(x, y)
+        assert_approx_equal(stat, 1.0, significant=1)
+        assert_approx_equal(pvalue, 0.001, significant=1)
+
+        # generate x and y
+        y = np.random.normal(0, 1, size=(100, 5))
+
+        # test stat and pvalue
+        stat, pvalue, _ = stats.multiscale_graphcorr(x, y, is_twosamp=True)
+        assert_approx_equal(stat, 1.0, significant=1)
+        assert_approx_equal(pvalue, 0.001, significant=1)
+
+    @pytest.mark.slow
+    def test_workers(self):
+        np.random.seed(12345678)
+
+        # generate x and y
+        x, y = self._simulations(samps=100, dims=1, sim_type="linear")
+
+        # test stat and pvalue
+        stat, pvalue, _ = stats.multiscale_graphcorr(x, y, workers=2)
+        assert_approx_equal(stat, 0.97, significant=1)
+        assert_approx_equal(pvalue, 0.001, significant=1)
+
+    @pytest.mark.slow
+    def test_random_state(self):
+        # generate x and y
+        x, y = self._simulations(samps=100, dims=1, sim_type="linear")
+
+        # test stat and pvalue
+        stat, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1)
+        assert_approx_equal(stat, 0.97, significant=1)
+        assert_approx_equal(pvalue, 0.001, significant=1)
+
+    @pytest.mark.slow
+    def test_dist_perm(self):
+        np.random.seed(12345678)
+        # generate x and y
+        x, y = self._simulations(samps=100, dims=1, sim_type="nonlinear")
+        distx = cdist(x, x, metric="euclidean")
+        disty = cdist(y, y, metric="euclidean")
+
+        stat_dist, pvalue_dist, _ = stats.multiscale_graphcorr(distx, disty,
+                                                               compute_distance=None,
+                                                               random_state=1)
+        assert_approx_equal(stat_dist, 0.163, significant=1)
+        assert_approx_equal(pvalue_dist, 0.001, significant=1)
+
+    @pytest.mark.slow
+    def test_pvalue_literature(self):
+        np.random.seed(12345678)
+
+        # generate x and y
+        x, y = self._simulations(samps=100, dims=1, sim_type="linear")
+
+        # test stat and pvalue
+        _, pvalue, _ = stats.multiscale_graphcorr(x, y, random_state=1)
+        assert_allclose(pvalue, 1/1001)
+
+    @pytest.mark.slow
+    def test_alias(self):
+        np.random.seed(12345678)
+
+        # generate x and y
+        x, y = self._simulations(samps=100, dims=1, sim_type="linear")
+
+        res = stats.multiscale_graphcorr(x, y, random_state=1)
+        assert_equal(res.stat, res.statistic)
+
+
+class TestPageTrendTest:
+    # expected statistic and p-values generated using R at
+    # https://rdrr.io/cran/cultevo/, e.g.
+    # library(cultevo)
+    # data = rbind(c(72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81, 43,
+    #                   70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50),
+    #              c(68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31, 67,
+    #                69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43),
+    #              c(81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68, 17,
+    #                16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55))
+    # result = page.test(data, verbose=FALSE)
+    # Most test cases generated to achieve common critical p-values so that
+    # results could be checked (to limited precision) against tables in
+    # scipy.stats.page_trend_test reference [1]
+
+    np.random.seed(0)
+    data_3_25 = np.random.rand(3, 25)
+    data_10_26 = np.random.rand(10, 26)
+
+    ts = [
+          (12805, 0.3886487053947608, False, 'asymptotic', data_3_25),
+          (49140, 0.02888978556179862, False, 'asymptotic', data_10_26),
+          (12332, 0.7722477197436702, False, 'asymptotic',
+           [[72, 47, 73, 35, 47, 96, 30, 59, 41, 36, 56, 49, 81,
+             43, 70, 47, 28, 28, 62, 20, 61, 20, 80, 24, 50],
+            [68, 52, 60, 34, 44, 20, 65, 88, 21, 81, 48, 31, 31,
+             67, 69, 94, 30, 24, 40, 87, 70, 43, 50, 96, 43],
+            [81, 13, 85, 35, 79, 12, 92, 86, 21, 64, 16, 64, 68,
+             17, 16, 89, 71, 43, 43, 36, 54, 13, 66, 51, 55]]),
+          (266, 4.121656378600823e-05, False, 'exact',
+           [[1.5, 4., 8.3, 5, 19, 11],
+            [5, 4, 3.5, 10, 20, 21],
+            [8.4, 3.2, 10, 12, 14, 15]]),
+          (332, 0.9566400920502488, True, 'exact',
+           [[4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1],
+            [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1], [4, 3, 2, 1],
+            [3, 4, 1, 2], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
+            [1, 2, 3, 4], [1, 2, 3, 4]]),
+          (241, 0.9622210164861476, True, 'exact',
+           [[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
+            [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
+            [3, 2, 1], [2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3],
+            [1, 2, 3], [1, 2, 3], [1, 2, 3]]),
+          (197, 0.9619432897162209, True, 'exact',
+           [[6, 5, 4, 3, 2, 1], [6, 5, 4, 3, 2, 1], [1, 3, 4, 5, 2, 6]]),
+          (423, 0.9590458306880073, True, 'exact',
+           [[5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1],
+            [5, 4, 3, 2, 1], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1],
+            [4, 1, 3, 2, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5],
+            [1, 2, 3, 4, 5]]),
+          (217, 0.9693058575034678, True, 'exact',
+           [[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
+            [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
+            [2, 1, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3],
+            [1, 2, 3]]),
+          (395, 0.991530289351305, True, 'exact',
+           [[7, 6, 5, 4, 3, 2, 1], [7, 6, 5, 4, 3, 2, 1],
+            [6, 5, 7, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7]]),
+          (117, 0.9997817843373017, True, 'exact',
+           [[3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1], [3, 2, 1],
+            [3, 2, 1], [3, 2, 1], [3, 2, 1], [2, 1, 3], [1, 2, 3]]),
+         ]
+
+    @pytest.mark.parametrize("L, p, ranked, method, data", ts)
+    def test_accuracy(self, L, p, ranked, method, data):
+        np.random.seed(42)
+        res = stats.page_trend_test(data, ranked=ranked, method=method)
+        assert_equal(L, res.statistic)
+        assert_allclose(p, res.pvalue)
+        assert_equal(method, res.method)
+
+    ts2 = [
+           (542, 0.9481266260876332, True, 'exact',
+            [[10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
+             [1, 8, 4, 7, 6, 5, 9, 3, 2, 10]]),
+           (1322, 0.9993113928199309, True, 'exact',
+            [[10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
+             [10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [9, 2, 8, 7, 6, 5, 4, 3, 10, 1],
+             [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]),
+           (2286, 0.9908688345484833, True, 'exact',
+            [[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
+             [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
+             [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
+             [8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
+             [8, 7, 6, 5, 4, 3, 2, 1], [1, 3, 5, 6, 4, 7, 2, 8],
+             [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
+             [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
+             [1, 2, 3, 4, 5, 6, 7, 8]]),
+          ]
+
+    # only the first of these appears slow because intermediate data are
+    # cached and used on the rest
+    @pytest.mark.parametrize("L, p, ranked, method, data", ts)
+    @pytest.mark.slow()
+    def test_accuracy2(self, L, p, ranked, method, data):
+        np.random.seed(42)
+        res = stats.page_trend_test(data, ranked=ranked, method=method)
+        assert_equal(L, res.statistic)
+        assert_allclose(p, res.pvalue)
+        assert_equal(method, res.method)
+
+    def test_options(self):
+        np.random.seed(42)
+        m, n = 10, 20
+        predicted_ranks = np.arange(1, n+1)
+        perm = np.random.permutation(np.arange(n))
+        data = np.random.rand(m, n)
+        ranks = stats.rankdata(data, axis=1)
+        res1 = stats.page_trend_test(ranks)
+        res2 = stats.page_trend_test(ranks, ranked=True)
+        res3 = stats.page_trend_test(data, ranked=False)
+        res4 = stats.page_trend_test(ranks, predicted_ranks=predicted_ranks)
+        res5 = stats.page_trend_test(ranks[:, perm],
+                                     predicted_ranks=predicted_ranks[perm])
+        assert_equal(res1.statistic, res2.statistic)
+        assert_equal(res1.statistic, res3.statistic)
+        assert_equal(res1.statistic, res4.statistic)
+        assert_equal(res1.statistic, res5.statistic)
+
+    def test_Ames_assay(self):
+        # test from _page_trend_test.py [2] page 151; data on page 144
+        np.random.seed(42)
+
+        data = [[101, 117, 111], [91, 90, 107], [103, 133, 121],
+                [136, 140, 144], [190, 161, 201], [146, 120, 116]]
+        data = np.array(data).T
+        predicted_ranks = np.arange(1, 7)
+
+        res = stats.page_trend_test(data, ranked=False,
+                                    predicted_ranks=predicted_ranks,
+                                    method="asymptotic")
+        assert_equal(res.statistic, 257)
+        assert_almost_equal(res.pvalue, 0.0035, decimal=4)
+
+        res = stats.page_trend_test(data, ranked=False,
+                                    predicted_ranks=predicted_ranks,
+                                    method="exact")
+        assert_equal(res.statistic, 257)
+        assert_almost_equal(res.pvalue, 0.0023, decimal=4)
+
+    def test_input_validation(self):
+        # test data not a 2d array
+        with assert_raises(ValueError, match="`data` must be a 2d array."):
+            stats.page_trend_test(None)
+        with assert_raises(ValueError, match="`data` must be a 2d array."):
+            stats.page_trend_test([])
+        with assert_raises(ValueError, match="`data` must be a 2d array."):
+            stats.page_trend_test([1, 2])
+        with assert_raises(ValueError, match="`data` must be a 2d array."):
+            stats.page_trend_test([[[1]]])
+
+        # test invalid dimensions
+        with assert_raises(ValueError, match="Page's L is only appropriate"):
+            stats.page_trend_test(np.random.rand(1, 3))
+        with assert_raises(ValueError, match="Page's L is only appropriate"):
+            stats.page_trend_test(np.random.rand(2, 2))
+
+        # predicted ranks must include each integer [1, 2, 3] exactly once
+        message = "`predicted_ranks` must include each integer"
+        with assert_raises(ValueError, match=message):
+            stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
+                                  predicted_ranks=[0, 1, 2])
+        with assert_raises(ValueError, match=message):
+            stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
+                                  predicted_ranks=[1.1, 2, 3])
+        with assert_raises(ValueError, match=message):
+            stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
+                                  predicted_ranks=[1, 2, 3, 3])
+        with assert_raises(ValueError, match=message):
+            stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
+                                  predicted_ranks="invalid")
+
+        # test improperly ranked data
+        with assert_raises(ValueError, match="`data` is not properly ranked"):
+            stats.page_trend_test([[0, 2, 3], [1, 2, 3]], True)
+        with assert_raises(ValueError, match="`data` is not properly ranked"):
+            stats.page_trend_test([[1, 2, 3], [1, 2, 4]], True)
+
+        # various
+        with assert_raises(ValueError, match="`data` contains NaNs"):
+            stats.page_trend_test([[1, 2, 3], [1, 2, np.nan]],
+                                  ranked=False)
+        with assert_raises(ValueError, match="`method` must be in"):
+            stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
+                                  method="ekki")
+        with assert_raises(TypeError, match="`ranked` must be boolean."):
+            stats.page_trend_test(data=[[1, 2, 3], [1, 2, 3]],
+                                  ranked="ekki")
+
+
+rng = np.random.default_rng(902340982)
+x = rng.random(10)
+y = rng.random(10)
+
+
+@pytest.mark.parametrize("fun, args",
+                         [(stats.wilcoxon, (x,)),
+                          (stats.ks_1samp, (x, stats.norm.cdf)),  # type: ignore[attr-defined] # noqa
+                          (stats.ks_2samp, (x, y)),
+                          (stats.kstest, (x, y)),
+                          ])
+def test_rename_mode_method(fun, args):
+
+    res = fun(*args, method='exact')
+    res2 = fun(*args, mode='exact')
+    assert_equal(res, res2)
+
+    err = rf"{fun.__name__}() got multiple values for argument"
+    with pytest.raises(TypeError, match=re.escape(err)):
+        fun(*args, method='exact', mode='exact')
+
+
+class TestExpectile:
+    def test_same_as_mean(self):
+        rng = np.random.default_rng(42)
+        x = rng.random(size=20)
+        assert_allclose(stats.expectile(x, alpha=0.5), np.mean(x))
+
+    def test_minimum(self):
+        rng = np.random.default_rng(42)
+        x = rng.random(size=20)
+        assert_allclose(stats.expectile(x, alpha=0), np.amin(x))
+
+    def test_maximum(self):
+        rng = np.random.default_rng(42)
+        x = rng.random(size=20)
+        assert_allclose(stats.expectile(x, alpha=1), np.amax(x))
+
+    def test_weights(self):
+        # expectile should minimize `fun` defined below; see
+        # F. Sobotka and T. Kneib, "Geoadditive expectile regression",
+        # Computational Statistics and Data Analysis 56 (2012) 755-767
+        # :doi:`10.1016/j.csda.2010.11.015`
+        rng = np.random.default_rng(1856392524598679138)
+
+        def fun(u, a, alpha, weights):
+            w = np.full_like(a, fill_value=alpha)
+            w[a <= u] = 1 - alpha
+            return np.sum(w * weights * (a - u)**2)
+
+        def expectile2(a, alpha, weights):
+            bracket = np.min(a), np.max(a)
+            return optimize.minimize_scalar(fun, bracket=bracket,
+                                            args=(a, alpha, weights)).x
+
+        n = 10
+        a = rng.random(n)
+        alpha = rng.random()
+        weights = rng.random(n)
+
+        res = stats.expectile(a, alpha, weights=weights)
+        ref = expectile2(a, alpha, weights)
+        assert_allclose(res, ref)
+
+    @pytest.mark.parametrize(
+        "alpha", [0.2, 0.5 - 1e-12, 0.5, 0.5 + 1e-12, 0.8]
+    )
+    @pytest.mark.parametrize("n", [20, 2000])
+    def test_expectile_properties(self, alpha, n):
+        """
+        See Section 6 of
+        I. Steinwart, C. Pasin, R.C. Williamson & S. Zhang (2014).
+        "Elicitation and Identification of Properties". COLT.
+        http://proceedings.mlr.press/v35/steinwart14.html
+
+        and
+
+        Propositions 5, 6, 7 of
+        F. Bellini, B. Klar, and A. Müller and E. Rosazza Gianin (2013).
+        "Generalized Quantiles as Risk Measures"
+        http://doi.org/10.2139/ssrn.2225751
+        """
+        rng = np.random.default_rng(42)
+        x = rng.normal(size=n)
+
+        # 0. definite / constancy
+        # Let T(X) denote the expectile of rv X ~ F.
+        # T(c) = c for constant c
+        for c in [-5, 0, 0.5]:
+            assert_allclose(
+                stats.expectile(np.full(shape=n, fill_value=c), alpha=alpha),
+                c
+            )
+
+        # 1. translation equivariance
+        # T(X + c) = T(X) + c
+        c = rng.exponential()
+        assert_allclose(
+            stats.expectile(x + c, alpha=alpha),
+            stats.expectile(x, alpha=alpha) + c,
+        )
+        assert_allclose(
+            stats.expectile(x - c, alpha=alpha),
+            stats.expectile(x, alpha=alpha) - c,
+        )
+
+        # 2. positively homogeneity
+        # T(cX) = c * T(X) for c > 0
+        assert_allclose(
+            stats.expectile(c * x, alpha=alpha),
+            c * stats.expectile(x, alpha=alpha),
+        )
+
+        # 3. subadditivity
+        # Note that subadditivity holds for alpha >= 0.5.
+        # T(X + Y) <= T(X) + T(Y)
+        # For alpha = 0.5, i.e. the mean, strict equality holds.
+        # For alpha < 0.5, one can use property 6. to show
+        # T(X + Y) >= T(X) + T(Y)
+        y = rng.logistic(size=n, loc=10)  # different distibution than x
+        if alpha == 0.5:
+            def assert_op(a, b):
+                assert_allclose(a, b)
+
+        elif alpha > 0.5:
+            def assert_op(a, b):
+                assert a < b
+
+        else:
+            def assert_op(a, b):
+                assert a > b
+
+        assert_op(
+            stats.expectile(np.r_[x + y], alpha=alpha),
+            stats.expectile(x, alpha=alpha)
+            + stats.expectile(y, alpha=alpha)
+        )
+
+        # 4. monotonicity
+        # This holds for first order stochastic dominance X:
+        # X >= Y whenever P(X <= x) < P(Y <= x)
+        # T(X) <= T(Y) whenever X <= Y
+        y = rng.normal(size=n, loc=5)
+        assert (
+            stats.expectile(x, alpha=alpha) <= stats.expectile(y, alpha=alpha)
+        )
+
+        # 5. convexity for alpha > 0.5, concavity for alpha < 0.5
+        # convexity is
+        # T((1 - c) X + c Y) <= (1 - c) T(X) + c T(Y) for 0 <= c <= 1
+        y = rng.logistic(size=n, loc=10)
+        for c in [0.1, 0.5, 0.8]:
+            assert_op(
+                stats.expectile((1-c)*x + c*y, alpha=alpha),
+                (1-c) * stats.expectile(x, alpha=alpha) +
+                c * stats.expectile(y, alpha=alpha)
+            )
+
+        # 6. negative argument
+        # T_{alpha}(-X) = -T_{1-alpha}(X)
+        assert_allclose(
+            stats.expectile(-x, alpha=alpha),
+            -stats.expectile(x, alpha=1-alpha),
+        )
+
+    @pytest.mark.parametrize("n", [20, 2000])
+    def test_monotonicity_in_alpha(self, n):
+        rng = np.random.default_rng(42)
+        x = rng.pareto(a=2, size=n)
+        e_list = []
+        alpha_seq = np.logspace(-15, np.log10(0.5), 100)
+        # sorted list of unique alpha values in interval (0, 1)
+        for alpha in np.r_[0, alpha_seq, 1 - alpha_seq[:-1:-1], 1]:
+            e_list.append(stats.expectile(x, alpha=alpha))
+        assert np.all(np.diff(e_list) > 0)
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_tukeylambda_stats.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_tukeylambda_stats.py
new file mode 100644
index 00000000..bd793641
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_tukeylambda_stats.py
@@ -0,0 +1,86 @@
+import numpy as np
+from numpy.testing import assert_allclose, assert_equal
+
+from scipy.stats._tukeylambda_stats import (tukeylambda_variance,
+                                            tukeylambda_kurtosis)
+
+
+def test_tukeylambda_stats_known_exact():
+    """Compare results with some known exact formulas."""
+    # Some exact values of the Tukey Lambda variance and kurtosis:
+    # lambda   var      kurtosis
+    #   0     pi**2/3     6/5     (logistic distribution)
+    #  0.5    4 - pi    (5/3 - pi/2)/(pi/4 - 1)**2 - 3
+    #   1      1/3       -6/5     (uniform distribution on (-1,1))
+    #   2      1/12      -6/5     (uniform distribution on (-1/2, 1/2))
+
+    # lambda = 0
+    var = tukeylambda_variance(0)
+    assert_allclose(var, np.pi**2 / 3, atol=1e-12)
+    kurt = tukeylambda_kurtosis(0)
+    assert_allclose(kurt, 1.2, atol=1e-10)
+
+    # lambda = 0.5
+    var = tukeylambda_variance(0.5)
+    assert_allclose(var, 4 - np.pi, atol=1e-12)
+    kurt = tukeylambda_kurtosis(0.5)
+    desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3
+    assert_allclose(kurt, desired, atol=1e-10)
+
+    # lambda = 1
+    var = tukeylambda_variance(1)
+    assert_allclose(var, 1.0 / 3, atol=1e-12)
+    kurt = tukeylambda_kurtosis(1)
+    assert_allclose(kurt, -1.2, atol=1e-10)
+
+    # lambda = 2
+    var = tukeylambda_variance(2)
+    assert_allclose(var, 1.0 / 12, atol=1e-12)
+    kurt = tukeylambda_kurtosis(2)
+    assert_allclose(kurt, -1.2, atol=1e-10)
+
+
+def test_tukeylambda_stats_mpmath():
+    """Compare results with some values that were computed using mpmath."""
+    a10 = dict(atol=1e-10, rtol=0)
+    a12 = dict(atol=1e-12, rtol=0)
+    data = [
+        # lambda        variance              kurtosis
+        [-0.1, 4.78050217874253547, 3.78559520346454510],
+        [-0.0649, 4.16428023599895777, 2.52019675947435718],
+        [-0.05, 3.93672267890775277, 2.13129793057777277],
+        [-0.001, 3.30128380390964882, 1.21452460083542988],
+        [0.001, 3.27850775649572176, 1.18560634779287585],
+        [0.03125, 2.95927803254615800, 0.804487555161819980],
+        [0.05, 2.78281053405464501, 0.611604043886644327],
+        [0.0649, 2.65282386754100551, 0.476834119532774540],
+        [1.2, 0.242153920578588346, -1.23428047169049726],
+        [10.0, 0.00095237579757703597, 2.37810697355144933],
+        [20.0, 0.00012195121951131043, 7.37654321002709531],
+    ]
+
+    for lam, var_expected, kurt_expected in data:
+        var = tukeylambda_variance(lam)
+        assert_allclose(var, var_expected, **a12)
+        kurt = tukeylambda_kurtosis(lam)
+        assert_allclose(kurt, kurt_expected, **a10)
+
+    # Test with vector arguments (most of the other tests are for single
+    # values).
+    lam, var_expected, kurt_expected = zip(*data)
+    var = tukeylambda_variance(lam)
+    assert_allclose(var, var_expected, **a12)
+    kurt = tukeylambda_kurtosis(lam)
+    assert_allclose(kurt, kurt_expected, **a10)
+
+
+def test_tukeylambda_stats_invalid():
+    """Test values of lambda outside the domains of the functions."""
+    lam = [-1.0, -0.5]
+    var = tukeylambda_variance(lam)
+    assert_equal(var, np.array([np.nan, np.inf]))
+
+    lam = [-1.0, -0.25]
+    kurt = tukeylambda_kurtosis(lam)
+    assert_equal(kurt, np.array([np.nan, np.inf]))
+
diff --git a/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_variation.py b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_variation.py
new file mode 100644
index 00000000..2a546b1f
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/stats/tests/test_variation.py
@@ -0,0 +1,158 @@
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose
+import pytest
+from scipy.stats import variation
+
+
+class TestVariation:
+    """
+    Test class for scipy.stats.variation
+    """
+
+    def test_ddof(self):
+        x = np.arange(9.0)
+        assert_allclose(variation(x, ddof=1), np.sqrt(60/8)/4)
+
+    @pytest.mark.parametrize('sgn', [1, -1])
+    def test_sign(self, sgn):
+        x = np.array([1, 2, 3, 4, 5])
+        v = variation(sgn*x)
+        expected = sgn*np.sqrt(2)/3
+        assert_allclose(v, expected, rtol=1e-10)
+
+    def test_scalar(self):
+        # A scalar is treated like a 1-d sequence with length 1.
+        assert_equal(variation(4.0), 0.0)
+
+    @pytest.mark.parametrize('nan_policy, expected',
+                             [('propagate', np.nan),
+                              ('omit', np.sqrt(20/3)/4)])
+    def test_variation_nan(self, nan_policy, expected):
+        x = np.arange(10.)
+        x[9] = np.nan
+        assert_allclose(variation(x, nan_policy=nan_policy), expected)
+
+    def test_nan_policy_raise(self):
+        x = np.array([1.0, 2.0, np.nan, 3.0])
+        with pytest.raises(ValueError, match='input contains nan'):
+            variation(x, nan_policy='raise')
+
+    def test_bad_nan_policy(self):
+        with pytest.raises(ValueError, match='must be one of'):
+            variation([1, 2, 3], nan_policy='foobar')
+
+    def test_keepdims(self):
+        x = np.arange(10).reshape(2, 5)
+        y = variation(x, axis=1, keepdims=True)
+        expected = np.array([[np.sqrt(2)/2],
+                             [np.sqrt(2)/7]])
+        assert_allclose(y, expected)
+
+    @pytest.mark.parametrize('axis, expected',
+                             [(0, np.empty((1, 0))),
+                              (1, np.full((5, 1), fill_value=np.nan))])
+    def test_keepdims_size0(self, axis, expected):
+        x = np.zeros((5, 0))
+        y = variation(x, axis=axis, keepdims=True)
+        assert_equal(y, expected)
+
+    @pytest.mark.parametrize('incr, expected_fill', [(0, np.inf), (1, np.nan)])
+    def test_keepdims_and_ddof_eq_len_plus_incr(self, incr, expected_fill):
+        x = np.array([[1, 1, 2, 2], [1, 2, 3, 3]])
+        y = variation(x, axis=1, ddof=x.shape[1] + incr, keepdims=True)
+        assert_equal(y, np.full((2, 1), fill_value=expected_fill))
+
+    def test_propagate_nan(self):
+        # Check that the shape of the result is the same for inputs
+        # with and without nans, cf gh-5817
+        a = np.arange(8).reshape(2, -1).astype(float)
+        a[1, 0] = np.nan
+        v = variation(a, axis=1, nan_policy="propagate")
+        assert_allclose(v, [np.sqrt(5/4)/1.5, np.nan], atol=1e-15)
+
+    def test_axis_none(self):
+        # Check that `variation` computes the result on the flattened
+        # input when axis is None.
+        y = variation([[0, 1], [2, 3]], axis=None)
+        assert_allclose(y, np.sqrt(5/4)/1.5)
+
+    def test_bad_axis(self):
+        # Check that an invalid axis raises np.AxisError.
+        x = np.array([[1, 2, 3], [4, 5, 6]])
+        with pytest.raises(np.AxisError):
+            variation(x, axis=10)
+
+    def test_mean_zero(self):
+        # Check that `variation` returns inf for a sequence that is not
+        # identically zero but whose mean is zero.
+        x = np.array([10, -3, 1, -4, -4])
+        y = variation(x)
+        assert_equal(y, np.inf)
+
+        x2 = np.array([x, -10*x])
+        y2 = variation(x2, axis=1)
+        assert_equal(y2, [np.inf, np.inf])
+
+    @pytest.mark.parametrize('x', [np.zeros(5), [], [1, 2, np.inf, 9]])
+    def test_return_nan(self, x):
+        # Test some cases where `variation` returns nan.
+        y = variation(x)
+        assert_equal(y, np.nan)
+
+    @pytest.mark.parametrize('axis, expected',
+                             [(0, []), (1, [np.nan]*3), (None, np.nan)])
+    def test_2d_size_zero_with_axis(self, axis, expected):
+        x = np.empty((3, 0))
+        y = variation(x, axis=axis)
+        assert_equal(y, expected)
+
+    def test_neg_inf(self):
+        # Edge case that produces -inf: ddof equals the number of non-nan
+        # values, the values are not constant, and the mean is negative.
+        x1 = np.array([-3, -5])
+        assert_equal(variation(x1, ddof=2), -np.inf)
+
+        x2 = np.array([[np.nan, 1, -10, np.nan],
+                       [-20, -3, np.nan, np.nan]])
+        assert_equal(variation(x2, axis=1, ddof=2, nan_policy='omit'),
+                     [-np.inf, -np.inf])
+
+    @pytest.mark.parametrize("nan_policy", ['propagate', 'omit'])
+    def test_combined_edge_cases(self, nan_policy):
+        x = np.array([[0, 10, np.nan, 1],
+                      [0, -5, np.nan, 2],
+                      [0, -5, np.nan, 3]])
+        y = variation(x, axis=0, nan_policy=nan_policy)
+        assert_allclose(y, [np.nan, np.inf, np.nan, np.sqrt(2/3)/2])
+
+    @pytest.mark.parametrize(
+        'ddof, expected',
+        [(0, [np.sqrt(1/6), np.sqrt(5/8), np.inf, 0, np.nan, 0.0, np.nan]),
+         (1, [0.5, np.sqrt(5/6), np.inf, 0, np.nan, 0, np.nan]),
+         (2, [np.sqrt(0.5), np.sqrt(5/4), np.inf, np.nan, np.nan, 0, np.nan])]
+    )
+    def test_more_nan_policy_omit_tests(self, ddof, expected):
+        # The slightly strange formatting in the follow array is my attempt to
+        # maintain a clean tabular arrangement of the data while satisfying
+        # the demands of pycodestyle.  Currently, E201 and E241 are not
+        # disabled by the `# noqa` annotation.
+        nan = np.nan
+        x = np.array([[1.0, 2.0, nan, 3.0],
+                      [0.0, 4.0, 3.0, 1.0],
+                      [nan, -.5, 0.5, nan],
+                      [nan, 9.0, 9.0, nan],
+                      [nan, nan, nan, nan],
+                      [3.0, 3.0, 3.0, 3.0],
+                      [0.0, 0.0, 0.0, 0.0]])
+        v = variation(x, axis=1, ddof=ddof, nan_policy='omit')
+        assert_allclose(v, expected)
+
+    def test_variation_ddof(self):
+        # test variation with delta degrees of freedom
+        # regression test for gh-13341
+        a = np.array([1, 2, 3, 4, 5])
+        nan_a = np.array([1, 2, 3, np.nan, 4, 5, np.nan])
+        y = variation(a, ddof=1)
+        nan_y = variation(nan_a, nan_policy="omit", ddof=1)
+        assert_allclose(y, np.sqrt(5/2)/3)
+        assert y == nan_y
diff --git a/__packaged__/coreml/.python_dependencies/scipy/version.py b/__packaged__/coreml/.python_dependencies/scipy/version.py
new file mode 100644
index 00000000..1bf7f237
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/scipy/version.py
@@ -0,0 +1,12 @@
+# THIS FILE IS GENERATED DURING THE SCIPY BUILD
+# See tools/version_utils.py for details
+
+short_version = '1.10.1'
+version = '1.10.1'
+full_version = '1.10.1'
+git_revision = 'c1ed5ec'
+commit_count = '0'
+release = True
+
+if not release:
+    version = full_version
diff --git a/__packaged__/coreml/.python_dependencies/tests/__init__.py b/__packaged__/coreml/.python_dependencies/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/__packaged__/coreml/.python_dependencies/tests/test_stable_diffusion.py b/__packaged__/coreml/.python_dependencies/tests/test_stable_diffusion.py
new file mode 100644
index 00000000..a2e42a18
--- /dev/null
+++ b/__packaged__/coreml/.python_dependencies/tests/test_stable_diffusion.py
@@ -0,0 +1,410 @@
+#
+# For licensing see accompanying LICENSE.md file.
+# Copyright (C) 2022 Apple Inc. All Rights Reserved.
+#
+
+import argparse
+import contextlib
+import coremltools as ct
+from diffusers import StableDiffusionPipeline
+import json
+import logging
+import numpy as np
+import os
+import unittest
+from PIL import Image
+from statistics import median
+import tempfile
+import time
+
+import torch
+
+torch.set_grad_enabled(False)
+
+from python_coreml_stable_diffusion import torch2coreml, pipeline, coreml_model
+
+logger = logging.getLogger(__name__)
+logger.setLevel("INFO")
+
+# Testing configuration
+TEST_SEED = 93
+TEST_PROMPT = "a high quality photo of an astronaut riding a horse in space"
+TEST_COMPUTE_UNIT = ["CPU_AND_GPU", "ALL", "CPU_AND_NE"]
+TEST_PSNR_THRESHOLD = 35  # dB
+TEST_ABSOLUTE_MAX_LATENCY = 90  # seconds
+TEST_WARMUP_INFERENCE_STEPS = 3
+TEST_TEXT_TO_IMAGE_SPEED_REPEATS = 3
+TEST_MINIMUM_PROMPT_TO_IMAGE_CLIP_COSINE_SIMILARITY = 0.3  # in range [0.,1.]
+
+
+class TestStableDiffusionForTextToImage(unittest.TestCase):
+    """ Test Stable Diffusion text-to-image pipeline for:
+
+    - PyTorch to CoreML conversion via coremltools
+    - Speed of CoreML runtime across several compute units
+    - Integration with `diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py`
+    - Efficacy of the safety_checker
+    - Affinity of the generated image with the original prompt via CLIP score
+    - The bridge between Python and Swift CLI
+    - The signal parity of Swift CLI generated image with that of Python CLI
+    """
+    cli_args = None
+
+    @classmethod
+    def setUpClass(cls):
+        cls.pytorch_pipe = StableDiffusionPipeline.from_pretrained(
+            cls.cli_args.model_version,
+            use_auth_token=True,
+        )
+
+        # To be initialized after test_torch_to_coreml_conversion is run
+        cls.coreml_pipe = None
+        cls.active_compute_unit = None
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.pytorch_pipe = None
+        cls.coreml_pipe = None
+        cls.active_compute_unit = None
+
+    def test_torch_to_coreml_conversion(self):
+        """ Tests:
+        - PyTorch to CoreML conversion via coremltools
+        """
+        with self.subTest(model="vae_decoder"):
+            logger.info("Converting vae_decoder")
+            torch2coreml.convert_vae_decoder(self.pytorch_pipe, self.cli_args)
+            logger.info("Successfully converted vae_decoder")
+
+        with self.subTest(model="unet"):
+            logger.info("Converting unet")
+            torch2coreml.convert_unet(self.pytorch_pipe, self.cli_args)
+            logger.info("Successfully converted unet")
+
+        with self.subTest(model="text_encoder"):
+            logger.info("Converting text_encoder")
+            torch2coreml.convert_text_encoder(self.pytorch_pipe, self.cli_args)
+            logger.info("Successfully converted text_encoder")
+
+        with self.subTest(model="safety_checker"):
+            logger.info("Converting safety_checker")
+            torch2coreml.convert_safety_checker(self.pytorch_pipe,
+                                                self.cli_args)
+            logger.info("Successfully converted safety_checker")
+
+    def test_end_to_end_image_generation_speed(self):
+        """ Tests:
+        - Speed of CoreML runtime across several compute units
+        - Integration with `diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py`
+        """
+        latency = {
+            compute_unit:
+            self._coreml_text_to_image_with_compute_unit(compute_unit)
+            for compute_unit in TEST_COMPUTE_UNIT
+        }
+        latency["num_repeats_for_median"] = TEST_TEXT_TO_IMAGE_SPEED_REPEATS
+
+        json_path = os.path.join(self.cli_args.o, "benchmark.json")
+        logger.info(f"Saving inference benchmark results to {json_path}")
+        with open(json_path, "w") as f:
+            json.dump(latency, f)
+
+        for compute_unit in TEST_COMPUTE_UNIT:
+            with self.subTest(compute_unit=compute_unit):
+                self.assertGreater(TEST_ABSOLUTE_MAX_LATENCY,
+                                   latency[compute_unit])
+
+    def test_image_to_prompt_clip_score(self):
+        """ Tests:
+        Affinity of the generated image with the original prompt via CLIP score
+        """
+        logger.warning(
+            "This test will download the CLIP ViT-B/16 model (approximately 600 MB) from Hugging Face"
+        )
+
+        from transformers import CLIPProcessor, CLIPModel
+
+        model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
+        processor = CLIPProcessor.from_pretrained(
+            "openai/clip-vit-base-patch16")
+
+        for compute_unit in TEST_COMPUTE_UNIT:
+            with self.subTest(compute_unit=compute_unit):
+                image_path = pipeline.get_image_path(self.cli_args,
+                                                     prompt=TEST_PROMPT,
+                                                     compute_unit=compute_unit)
+                image = Image.open(image_path)
+
+                # Preprocess images and text for inference with CLIP
+                inputs = processor(text=[TEST_PROMPT],
+                                   images=image,
+                                   return_tensors="pt",
+                                   padding=True)
+                outputs = model(**inputs)
+
+                # Compute cosine similarity between image and text embeddings
+                image_text_cosine_similarity = outputs.image_embeds @ outputs.text_embeds.T
+                logger.info(
+                    f"Image ({image_path}) to text ({TEST_PROMPT}) CLIP score: {image_text_cosine_similarity[0].item():.2f}"
+                )
+
+                # Ensure that the minimum cosine similarity threshold is achieved
+                self.assertGreater(
+                    image_text_cosine_similarity,
+                    TEST_MINIMUM_PROMPT_TO_IMAGE_CLIP_COSINE_SIMILARITY,
+                )
+
+    def test_safety_checker_efficacy(self):
+        """ Tests:
+        - Efficacy of the safety_checker
+        """
+        self._init_coreml_pipe(compute_unit=self.active_compute_unit)
+
+        safety_checker_test_prompt = "NSFW"
+        image = self.coreml_pipe(safety_checker_test_prompt)
+
+        # Image must have been erased by the safety checker
+        self.assertEqual(np.array(image["images"][0]).sum(), 0.)
+        self.assertTrue(image["nsfw_content_detected"].any())
+
+    def test_swift_cli_image_generation(self):
+        """ Tests:
+        - The bridge between Python and Swift CLI
+        - The signal parity of Swift CLI generated image with that of Python CLI
+        """
+        # coremltools to Core ML compute unit mapping
+        compute_unit_map = {
+            "ALL": "all",
+            "CPU_AND_GPU": "cpuAndGPU",
+            "CPU_AND_NE": "cpuAndNeuralEngine"
+        }
+
+        # Prepare resources for Swift CLI
+        resources_dir = torch2coreml.bundle_resources_for_swift_cli(
+            self.cli_args)
+        logger.info("Bundled resources for Swift CLI")
+
+        # Execute image generation with Swift CLI
+        # Note: First time takes ~5 minutes due to project building and so on
+        cmd = " ".join([
+            f"swift run StableDiffusionSample \"{TEST_PROMPT}\"",
+            f"--resource-path {resources_dir}",
+            f"--seed {TEST_SEED}",
+            f"--output-path {self.cli_args.o}",
+            f"--compute-units {compute_unit_map[TEST_COMPUTE_UNIT[-1]]}"
+        ])
+        logger.info(f"Executing `{cmd}`")
+        os.system(cmd)
+        logger.info(f"Image generation with Swift CLI is complete")
+
+        # Load Swift CLI generated image
+        swift_cli_image = Image.open(
+            os.path.join(
+                self.cli_args.o, "_".join(TEST_PROMPT.rsplit(" ")) + "." +
+                str(TEST_SEED) + ".final.png"))
+
+        # Load Python CLI (pipeline.py) generated image
+        python_cli_image = Image.open(pipeline.get_image_path(self.cli_args,
+                                                              prompt=TEST_PROMPT,
+                                                              compute_unit=TEST_COMPUTE_UNIT[-1]))
+
+        # Compute signal parity
+        swift2torch_psnr = torch2coreml.report_correctness(
+            np.array(swift_cli_image.convert("RGB")),
+            np.array(python_cli_image.convert("RGB")),
+            "Swift CLI and Python CLI generated images")
+        self.assertGreater(swift2torch_psnr, torch2coreml.ABSOLUTE_MIN_PSNR)
+
+    def _init_coreml_pipe(self, compute_unit):
+        """ Initializes CoreML pipe for the requested compute_unit
+        """
+        assert compute_unit in ct.ComputeUnit._member_names_, f"Not a valid coremltools.ComputeUnit: {compute_unit}"
+
+        if self.active_compute_unit == compute_unit:
+            logger.info(
+                "self.coreml_pipe matches requested compute_unit, skipping reinitialization"
+            )
+            assert \
+                isinstance(self.coreml_pipe, pipeline.CoreMLStableDiffusionPipeline), \
+                type(self.coreml_pipe)
+        else:
+            self.active_compute_unit = compute_unit
+            self.coreml_pipe = pipeline.get_coreml_pipe(
+                pytorch_pipe=self.pytorch_pipe,
+                mlpackages_dir=self.cli_args.o,
+                model_version=self.cli_args.model_version,
+                compute_unit=self.active_compute_unit,)
+
+
+    def _coreml_text_to_image_with_compute_unit(self, compute_unit):
+        """ Benchmark end-to-end text-to-image generation with the requested compute_unit
+        """
+        self._init_coreml_pipe(compute_unit)
+
+        # Warm up (not necessary in all settings but improves consistency for benchmarking)
+        logger.info(
+            f"Warmup image generation with {TEST_WARMUP_INFERENCE_STEPS} inference steps"
+        )
+        image = self.coreml_pipe(
+            TEST_PROMPT, num_inference_steps=TEST_WARMUP_INFERENCE_STEPS)
+
+        # Test end-to-end speed
+        logger.info(
+            f"Run full image generation {TEST_TEXT_TO_IMAGE_SPEED_REPEATS} times and report median"
+        )
+
+        def test_coreml_text_to_image_speed():
+            """ Execute Core ML based image generation
+            """
+            _reset_seed()
+            image = self.coreml_pipe(TEST_PROMPT)["images"][0]
+            out_path = pipeline.get_image_path(self.cli_args,
+                                        prompt=TEST_PROMPT,
+                                        compute_unit=compute_unit)
+            logger.info(f"Saving generated image to {out_path}")
+            image.save(out_path)
+
+        def collect_timings(callable, n):
+            """ Collect user latency for callable
+            """
+            user_latencies = []
+            for _ in range(n):
+                s = time.time()
+                callable()
+                user_latencies.append(float(f"{time.time() - s:.2f}"))
+            return user_latencies
+
+        coreml_latencies = collect_timings(
+            callable=test_coreml_text_to_image_speed,
+            n=TEST_TEXT_TO_IMAGE_SPEED_REPEATS)
+        coreml_median_latency = median(coreml_latencies)
+
+        logger.info(
+            f"End-to-end latencies with coremltools.ComputeUnit.{compute_unit}: median={coreml_median_latency:.2f}"
+        )
+
+        return coreml_median_latency
+
+
+def _reset_seed():
+    """ Reset RNG state in order to reproduce the results across multiple runs
+    """
+    torch.manual_seed(TEST_SEED)
+    np.random.seed(TEST_SEED)
+
+
+def _get_test_artifacts_dir(args):
+    if cli_args.persistent_test_artifacts_dir is not None:
+        os.makedirs(cli_args.persistent_test_artifacts_dir, exist_ok=True)
+        return contextlib.nullcontext(
+            enter_result=cli_args.persistent_test_artifacts_dir)
+    else:
+        return tempfile.TemporaryDirectory(
+            prefix="python_coreml_stable_diffusion_tests")
+
+
+def _extend_parser(parser):
+    parser.add_argument(
+        "--persistent-test-artifacts-dir",
+        type=str,
+        default=None,
+        help=
+        ("If specified, test artifacts such as Core ML models and generated images are saved in this directory. ",
+         "Otherwise, all artifacts are erased after the test program terminates."
+         ))
+    parser.add_argument(
+        "--fast",
+        action="store_true",
+        help=
+        "If specified, runs fewer repeats for `test_end_to_end_image_generation_speed`"
+    )
+    parser.add_argument(
+        "--test-image-to-prompt-clip-score-opt-in",
+        action="store_true",
+        help=
+        ("If specified, enables `test_image_to_prompt_clip_score` to verify the relevance of the "
+         "generated image content to the original text prompt. This test is an opt-in "
+         "test because it involves an additional one time 600MB model download."
+         ))
+    parser.add_argument(
+        "--test-swift-cli-opt-in",
+        action="store_true",
+        help=
+        ("If specified, compiles all models and builds the Swift CLI to run image generation and compares "
+         "results across Python and Swift runtime"))
+    parser.add_argument(
+        "--test-safety-checker-efficacy-opt-in",
+        action="store_true",
+        help=
+        ("If specified, generates a potentially NSFW image to check whether the `safety_checker` "
+         "accurately detects and removes the content"))
+    return parser
+
+
+if __name__ == "__main__":
+    # Reproduce the CLI of the original pipeline
+    parser = torch2coreml.parser_spec()
+    parser = _extend_parser(parser)
+    cli_args = parser.parse_args()
+
+    cli_args.check_output_correctness = True
+    cli_args.prompt = TEST_PROMPT
+    cli_args.seed = TEST_SEED
+    cli_args.compute_unit = TEST_COMPUTE_UNIT[0]
+    cli_args.scheduler = None  # use default
+    torch2coreml.ABSOLUTE_MIN_PSNR = TEST_PSNR_THRESHOLD
+
+    if cli_args.fast:
+        logger.info(
+            "`--fast` detected: Image generation will be run once " \
+            f"(instead of {TEST_TEXT_TO_IMAGE_SPEED_REPEATS } times) " \
+            "with ComputeUnit.ALL (other compute units are skipped)" \
+            " (median can not be reported)")
+        TEST_TEXT_TO_IMAGE_SPEED_REPEATS = 1
+        TEST_COMPUTE_UNIT = ["ALL"]
+
+        logger.info("`--fast` detected: Skipping `--check-output-correctness` tests")
+        cli_args.check_output_correctness = False
+    elif cli_args.attention_implementation == "ORIGINAL":
+        TEST_COMPUTE_UNIT = ["CPU_AND_GPU", "ALL"]
+    elif cli_args.attention_implementation == "SPLIT_EINSUM":
+        TEST_COMPUTE_UNIT = ["ALL", "CPU_AND_NE"]
+
+    logger.info(f"Testing compute units: {TEST_COMPUTE_UNIT}")
+
+
+    # Save CoreML model files and generated images into the artifacts dir
+    with _get_test_artifacts_dir(cli_args) as test_artifacts_dir:
+        cli_args.o = test_artifacts_dir
+        logger.info(f"Test artifacts will be saved under {test_artifacts_dir}")
+
+        TestStableDiffusionForTextToImage.cli_args = cli_args
+
+        # Run the following tests in sequential order
+        suite = unittest.TestSuite()
+        suite.addTest(
+            TestStableDiffusionForTextToImage(
+                "test_torch_to_coreml_conversion"))
+        suite.addTest(
+            TestStableDiffusionForTextToImage(
+                "test_end_to_end_image_generation_speed"))
+
+        if cli_args.test_safety_checker_efficacy_opt_in:
+            suite.addTest(
+                TestStableDiffusionForTextToImage("test_safety_checker_efficacy"))
+
+        if cli_args.test_image_to_prompt_clip_score_opt_in:
+            suite.addTest(
+                TestStableDiffusionForTextToImage(
+                    "test_image_to_prompt_clip_score"))
+
+        if cli_args.test_swift_cli_opt_in:
+            suite.addTest(
+                TestStableDiffusionForTextToImage(
+                    "test_swift_cli_image_generation"))
+
+        if os.getenv("DEBUG", False):
+            suite.debug()
+        else:
+            runner = unittest.TextTestRunner()
+            runner.run(suite)
diff --git a/__packaged__/coreml/README.md b/__packaged__/coreml/README.md
new file mode 100644
index 00000000..9e2a7cb1
--- /dev/null
+++ b/__packaged__/coreml/README.md
@@ -0,0 +1,4 @@
+# CoreML Backend
+Faster inference on Apple Silicon with [apple/ml-stable-diffusion](https://github.com/apple/ml-stable-diffusion).
+
+Converted mlpackages are stored in the directory specified by `DREAM_TEXTURES_COREML_HOME`, or `~/.cache/dream_textures_coreml` by default.
\ No newline at end of file
diff --git a/__packaged__/coreml/__init__.py b/__packaged__/coreml/__init__.py
new file mode 100644
index 00000000..16156f98
--- /dev/null
+++ b/__packaged__/coreml/__init__.py
@@ -0,0 +1,89 @@
+bl_info = {
+    "name": "CoreML Backend",
+    "blender": (3, 1, 0),
+    "category": "Paint",
+}
+
+from multiprocessing import current_process
+import site
+import sys
+import os
+
+def _load_dependencies():
+    site.addsitedir(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".python_dependencies"))
+    deps = sys.path.pop(-1)
+    sys.path.insert(0, deps)
+
+if current_process().name == "__actor__":
+    _load_dependencies()
+else:
+    import bpy
+    from typing import Tuple
+    from dream_textures.api import Backend, Task, Model, Prompt, SeamlessAxes, StepPreviewMode, StepCallback, Callback
+    from dream_textures.diffusers_backend import DiffusersBackend
+    from .actor import CoreMLActor
+
+    class CoreMLBackend(Backend):
+        name = "CoreML"
+        description = "CPU/GPU/NE accelerated generation on Apple Silicon"
+
+        compute_unit: bpy.props.EnumProperty(
+            name="Compute Unit",
+            items=(
+                ('ALL', 'All', 'Use all compute units available, including the neural engine'),
+                ('CPU_ONLY', 'CPU', 'Limit the model to only use the CPU'),
+                ('CPU_AND_GPU', 'CPU and GPU', 'Use both the CPU and GPU, but not the neural engine'),
+                ('CPU_AND_NE', 'CPU and NE', 'Use both the CPU and neural engine, but not the GPU'),
+            )
+        )
+
+        def list_models(self, context):
+            return DiffusersBackend.list_models(self, context)
+        
+        def list_schedulers(self, context):
+            return [
+                "DDIM",
+                "DPM Solver Multistep",
+                "Euler Ancestral Discrete",
+                "Euler Discrete",
+                "LMS Discrete",
+                "PNDM"
+            ]
+
+        def draw_speed_optimizations(self, layout, context):
+            layout.prop(self, "compute_unit")
+
+        def generate(self, task: Task, model: Model, prompt: Prompt, size: Tuple[int, int] | None, seed: int, steps: int, guidance_scale: float, scheduler: str, seamless_axes: SeamlessAxes, step_preview_mode: StepPreviewMode, iterations: int, step_callback: StepCallback, callback: Callback):
+            gen: CoreMLActor = CoreMLActor.shared()
+            future = gen.generate(
+                model=model.id.replace('models--', '').replace('--', '/'),
+                prompt=prompt.positive,
+                negative_prompt=prompt.negative,
+                size=size,
+                seed=seed,
+                steps=steps,
+                guidance_scale=guidance_scale,
+                scheduler=scheduler,
+                seamless_axes=seamless_axes,
+                step_preview_mode=step_preview_mode,
+                iterations=iterations,
+                compute_unit=self.compute_unit,
+                controlnet=None,
+                controlnet_inputs=[]
+            )
+            def on_step(_, result):
+                step_callback(result)
+            def on_done(future):
+                result = future.result(last_only=True)
+                callback([result])
+            def on_exception(_, exception):
+                callback(exception)
+            future.add_response_callback(on_step)
+            future.add_exception_callback(on_exception)
+            future.add_done_callback(on_done)
+
+    def register():
+        bpy.utils.register_class(CoreMLBackend)
+
+    def unregister():
+        bpy.utils.unregister_class(CoreMLBackend)
diff --git a/__packaged__/coreml/actor.py b/__packaged__/coreml/actor.py
new file mode 100644
index 00000000..999f6f59
--- /dev/null
+++ b/__packaged__/coreml/actor.py
@@ -0,0 +1,233 @@
+import numpy as np
+from numpy.typing import NDArray
+from dream_textures.generator_process import Actor
+from dream_textures.generator_process.future import Future
+from dream_textures.generator_process.models import ImageGenerationResult
+from dream_textures.api import GenerationResult
+import os
+import random
+import gc
+
+class CoreMLActor(Actor):
+    invalidation_args = None
+    cached_pipe = None
+
+    def generate(
+        self,
+        model: str,
+        prompt: str,
+        negative_prompt: str | None,
+        size: tuple[int, int] | None,
+        seed: int | None,
+        steps: int,
+        guidance_scale: float,
+        scheduler: str,
+
+        seamless_axes: str,
+        step_preview_mode: str,
+        iterations: int,
+
+        compute_unit: str,
+        controlnet: list[str] | None,
+        controlnet_inputs: list[str]
+    ):
+        future = Future()
+        yield future
+
+        import diffusers
+        from python_coreml_stable_diffusion import pipeline
+        from python_coreml_stable_diffusion import torch2coreml, unet
+        import torch
+        from PIL import ImageOps
+        
+        seed = random.randrange(0, np.iinfo(np.uint32).max) if seed is None else seed
+        np.random.seed(seed)
+
+        new_invalidation_args = (model, scheduler, controlnet)
+        if self.cached_pipe is None or new_invalidation_args != self.invalidation_args:
+            self.invalidation_args = new_invalidation_args
+
+
+            future.add_response(GenerationResult(progress=1, total=1, title="Loading reference pipeline"))
+
+            # Initializing PyTorch pipe for reference configuration
+            from diffusers import StableDiffusionPipeline
+            pytorch_pipe = StableDiffusionPipeline.from_pretrained(model,
+                                                                use_auth_token=True)
+            # There is currently no UI for this, so remove it.
+            # This avoids wasting time converting and loading it.
+            pytorch_pipe.safety_checker = None
+            
+            mlpackage_cache = os.path.expanduser(
+                os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "dream_textures_coreml"))
+            )
+            mlpackage_dir = os.path.join(mlpackage_cache, model.replace('/', '_'))
+
+            if not os.path.exists(mlpackage_dir):
+                def step_title(i, model_type):
+                    future.add_response(GenerationResult(progress=i, total=4, title=f"Converting model to CoreML ({model_type})"))
+                os.makedirs(mlpackage_dir, exist_ok=True)
+                class ConversionArgs:
+                    model_version = model
+                    compute_unit = 'ALL'
+                    latent_h = None
+                    latent_w = None
+                    attention_implementation = unet.ATTENTION_IMPLEMENTATION_IN_EFFECT.name
+                    o = mlpackage_dir
+                    check_output_correctness = False
+                    chunk_unet = False
+                    quantize_weights_to_8bits = False
+                    unet_support_controlnet = False
+                    text_encoder_vocabulary_url = "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json"
+                    text_encoder_merges_url = "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt"
+                conversion_args = ConversionArgs()
+
+                step_title(1, "VAE decoder")
+                torch2coreml.convert_vae_decoder(pytorch_pipe, conversion_args)
+
+                step_title(2, "VAE encoder")
+                torch2coreml.convert_vae_encoder(pytorch_pipe, conversion_args)
+
+                step_title(3, "U-Net")
+                torch2coreml.convert_unet(pytorch_pipe, conversion_args)
+
+                step_title(4, "text encoder")
+                torch2coreml.convert_text_encoder(pytorch_pipe, conversion_args)
+
+            future.add_response(GenerationResult(progress=0, total=1, title=f"Loading converted CoreML pipeline"))
+
+            user_specified_scheduler = None
+            if scheduler is not None:
+                user_specified_scheduler = pipeline.SCHEDULER_MAP[
+                    scheduler.replace(' ', '')].from_config(pytorch_pipe.scheduler.config)
+
+            # NOTE: Modified to have a `callback` parameter.
+            def get_coreml_pipe(pytorch_pipe,
+                        mlpackages_dir,
+                        model_version,
+                        compute_unit,
+                        delete_original_pipe=True,
+                        scheduler_override=None,
+                        controlnet_models=None,
+                        callback=lambda model_name: None):
+                """ Initializes and returns a `CoreMLStableDiffusionPipeline` from an original
+                diffusers PyTorch pipeline
+                """
+                # Ensure `scheduler_override` object is of correct type if specified
+                if scheduler_override is not None:
+                    assert isinstance(scheduler_override, diffusers.SchedulerMixin)
+                    pipeline.logger.warning(
+                        "Overriding scheduler in pipeline: "
+                        f"Default={pytorch_pipe.scheduler}, Override={scheduler_override}")
+
+                # Gather configured tokenizer and scheduler attributes from the original pipe
+                coreml_pipe_kwargs = {
+                    "tokenizer": pytorch_pipe.tokenizer,
+                    "scheduler": pytorch_pipe.scheduler if scheduler_override is None else scheduler_override,
+                    "feature_extractor": pytorch_pipe.feature_extractor,
+                }
+
+                model_names_to_load = ["text_encoder", "unet", "vae_decoder"]
+                if getattr(pytorch_pipe, "safety_checker", None) is not None:
+                    model_names_to_load.append("safety_checker")
+                else:
+                    pipeline.logger.warning(
+                        f"Original diffusers pipeline for {model_version} does not have a safety_checker, "
+                        "Core ML pipeline will mirror this behavior.")
+                    coreml_pipe_kwargs["safety_checker"] = None
+
+                if delete_original_pipe:
+                    del pytorch_pipe
+                    gc.collect()
+                    pipeline.logger.info("Removed PyTorch pipe to reduce peak memory consumption")
+
+                if controlnet_models:
+                    model_names_to_load.remove("unet")
+                    callback("control-unet")
+                    coreml_pipe_kwargs["unet"] = pipeline._load_mlpackage(
+                        "control-unet",
+                        mlpackages_dir,
+                        model_version,
+                        compute_unit,
+                    )
+                    coreml_pipe_kwargs["controlnet"] = []
+                    for i, model_version in enumerate(controlnet_models):
+                        callback(f"controlnet-{i}")
+                        coreml_pipe_kwargs["controlnet"].append(
+                            pipeline._load_mlpackage_controlnet(
+                                mlpackages_dir, 
+                                model_version, 
+                                compute_unit,
+                            )
+                        )
+                else:
+                    coreml_pipe_kwargs["controlnet"] = None
+
+                # Load Core ML models
+                pipeline.logger.info(f"Loading Core ML models in memory from {mlpackages_dir}")
+                def load_package_with_callback(model_name):
+                    callback(model_name)
+                    return pipeline._load_mlpackage(
+                        model_name,
+                        mlpackages_dir,
+                        model_version,
+                        compute_unit,
+                    )
+                coreml_pipe_kwargs.update({
+                    model_name: load_package_with_callback(model_name)
+                    for model_name in model_names_to_load
+                })
+                pipeline.logger.info("Done.")
+
+                pipeline.logger.info("Initializing Core ML pipe for image generation")
+                coreml_pipe = pipeline.CoreMLStableDiffusionPipeline(**coreml_pipe_kwargs)
+                pipeline.logger.info("Done.")
+
+                return coreml_pipe
+
+            model_i = 1
+            def load_callback(model_name):
+                nonlocal model_i
+                future.add_response(GenerationResult(progress=model_i, total=3 + len(controlnet_inputs), title=f"Loading {model_name} mlpackage (this can take a while)"))
+                model_i += 1
+            self.cached_pipe = get_coreml_pipe(
+                pytorch_pipe=pytorch_pipe,
+                mlpackages_dir=mlpackage_dir,
+                model_version=model,
+                compute_unit=compute_unit,
+                scheduler_override=user_specified_scheduler,
+                controlnet_models=controlnet,
+                callback=load_callback
+            )
+
+        height = self.cached_pipe.height if size is None else size[1]
+        width = self.cached_pipe.width if size is None else size[0]
+
+        if controlnet:
+            controlnet_cond = []
+            for i, _ in enumerate(controlnet):
+                image_path = controlnet_inputs[i]
+                image = pipeline.prepare_controlnet_cond(image_path, height, width)
+                controlnet_cond.append(image)
+        else:
+            controlnet_cond = None
+
+        # Beginning image generation.
+        generator = torch.Generator(device="cpu").manual_seed(seed)
+        def callback(i, t, latents):
+            preview = ImageGenerationResult.step_preview(self, step_preview_mode, width, height, torch.from_numpy(latents), generator, i)
+            image = next(iter(preview.images), None)
+            future.add_response(GenerationResult(progress=i, total=steps, image=image, seed=seed))
+        image = self.cached_pipe(
+            prompt=prompt,
+            height=height,
+            width=width,
+            num_inference_steps=steps,
+            guidance_scale=guidance_scale,
+            controlnet_cond=controlnet_cond,
+            negative_prompt=negative_prompt,
+            callback=callback
+        )
+
+        future.add_response(GenerationResult(progress=steps, total=steps, image=np.asarray(ImageOps.flip(image["images"][0]).convert('RGBA'), dtype=np.float32) / 255., seed=seed))
+        future.set_done()
\ No newline at end of file
diff --git a/__packaged__/coreml/preferences.py b/__packaged__/coreml/preferences.py
new file mode 100644
index 00000000..a6c5d247
--- /dev/null
+++ b/__packaged__/coreml/preferences.py
@@ -0,0 +1,7 @@
+import bpy
+
+class CoreMLBackendPreferences(bpy.types.AddonPreferences):
+    bl_idname = __package__
+
+    def draw(self, context):
+        layout = self.layout
\ No newline at end of file
diff --git a/__packaged__/coreml/requirements.txt b/__packaged__/coreml/requirements.txt
new file mode 100644
index 00000000..289327f1
--- /dev/null
+++ b/__packaged__/coreml/requirements.txt
@@ -0,0 +1,3 @@
+coremltools
+git+https://github.com/apple/ml-stable-diffusion@main#egg=python_coreml_stable_diffusion
+scipy
\ No newline at end of file
diff --git a/__packaged__/coreml/test.png b/__packaged__/coreml/test.png
new file mode 100644
index 00000000..a9e9af2b
Binary files /dev/null and b/__packaged__/coreml/test.png differ
diff --git a/api/backend/backend.py b/api/backend/backend.py
index 3beb5285..ce327ba2 100644
--- a/api/backend/backend.py
+++ b/api/backend/backend.py
@@ -1,12 +1,9 @@
 try:
     import bpy
     from typing import Callable, List, Tuple
+    from ..models.generation_arguments import GenerationArguments
     from ..models.generation_result import GenerationResult
-    from ..models.task import Task
     from ..models.model import Model
-    from ..models.prompt import Prompt
-    from ..models.seamless_axes import SeamlessAxes
-    from ..models.step_preview_mode import StepPreviewMode
 
     StepCallback = Callable[[GenerationResult], None]
     Callback = Callable[[List[GenerationResult] | Exception], None]
@@ -20,11 +17,7 @@ class Backend(bpy.types.PropertyGroup):
         def list_models(self) -> List[Model]
         def generate(
             self,
-            task: Task,
-            model: Model,
-            prompt: Prompt,
-            size: Tuple[int, int] | None,
-            seamless_axes: SeamlessAxes,
+            arguments: GenerationArguments,
 
             step_callback: StepCallback,
             callback: Callback
@@ -91,22 +84,38 @@ def draw_extra(self, layout, context):
 
         def generate(
             self,
-            task: Task,
-            model: Model,
-            prompt: Prompt,
-            size: Tuple[int, int] | None,
-            seed: int,
-            steps: int,
-            guidance_scale: float,
-            scheduler: str,
-            seamless_axes: SeamlessAxes,
-            step_preview_mode: StepPreviewMode,
-            iterations: int,
-
+            arguments: GenerationArguments,
             step_callback: StepCallback,
             callback: Callback
         ):
             """A request to generate an image."""
             ...
+        
+        def validate(
+            self,
+            arguments: GenerationArguments
+        ):
+            """Validates the given arguments in the UI without generating.
+            
+            This validation should occur as quickly as possible.
+            
+            To report problems with the inputs, raise a `ValueError`.
+            Use the `FixItError` to provide a solution to the problem as well.
+            
+            ```python
+            if arguments.steps % 2 == 0:
+                throw FixItError(
+                    "The number of steps is even",
+                    solution=FixItError.UpdateGenerationArgumentsSolution(
+                        title="Add 1 more step",
+                        arguments=dataclasses.replace(
+                            arguments,
+                            steps=arguments.steps + 1
+                        )
+                    )
+                )
+            ```
+            """
+            ...
 except:
     pass
\ No newline at end of file
diff --git a/api/models/__init__.py b/api/models/__init__.py
index 5041e364..8219c44b 100644
--- a/api/models/__init__.py
+++ b/api/models/__init__.py
@@ -3,4 +3,5 @@
 from .prompt import *
 from .seamless_axes import *
 from .step_preview_mode import *
-from .task import *
\ No newline at end of file
+from .task import *
+from .fix_it_error import *
\ No newline at end of file
diff --git a/api/models/fix_it_error.py b/api/models/fix_it_error.py
new file mode 100644
index 00000000..3292c3f4
--- /dev/null
+++ b/api/models/fix_it_error.py
@@ -0,0 +1,41 @@
+from typing import Callable, Any
+from .generation_arguments import GenerationArguments
+from dataclasses import dataclass
+
+class FixItError(Exception):
+    """An exception with a solution.
+
+    Call the `draw` method to render the UI elements responsible for resolving this error.
+    """
+    def __init__(self, message, solution: 'Solution'):
+        super().__init__(message)
+
+        self._solution = solution
+    
+    def _draw(self, dream_prompt, context, layout):
+        self._solution._draw(dream_prompt, context, layout)
+    
+    @dataclass
+    class Solution:
+        def _draw(self, dream_prompt, context, layout):
+            ...
+
+    @dataclass
+    class ChangeProperty(Solution):
+        """Prompts the user to change the given `property` of the `GenerationArguments`."""
+        property: str
+
+        def _draw(self, dream_prompt, context, layout):
+            layout.prop(dream_prompt, self.property)
+    
+    @dataclass
+    class RunOperator(Solution):
+        """Runs the given operator"""
+        title: str
+        operator: str
+        modify_operator: Callable[[Any], None]
+
+        def _draw(self, dream_prompt, context, layout):
+            self.modify_operator(
+                layout.operator(self.operator, text=self.title)
+            )
\ No newline at end of file
diff --git a/api/models/generation_arguments.py b/api/models/generation_arguments.py
new file mode 100644
index 00000000..05406e4b
--- /dev/null
+++ b/api/models/generation_arguments.py
@@ -0,0 +1,104 @@
+from dataclasses import dataclass
+from typing import Tuple, List
+from ..models.task import Task
+from ..models.model import Model
+from ..models.prompt import Prompt
+from ..models.seamless_axes import SeamlessAxes
+from ..models.step_preview_mode import StepPreviewMode
+
+@dataclass
+class GenerationArguments:
+    task: Task
+    """The type of generation to perform.
+    
+    Use a match statement to perform different actions based on the selected task.
+    
+    ```python
+    match task:
+        case PromptToImage():
+            ...
+        case ImageToImage(image=image, strength=strength, fit=fit):
+            ...
+        case Inpaint(image=image, fit=fit, strength=strength, mask_source=mask_source, mask_prompt=mask_prompt, confidence=confidence):
+            ...
+        case DepthToImage(depth=depth, image=image, strength=strength):
+            ...
+        case Outpaint(image=image, origin=origin):
+            ...
+        case _:
+            raise NotImplementedError()
+    ```
+    """
+
+    model: Model
+    """The selected model.
+
+    This is one of the options provided by `Backend.list_models`.
+    """
+
+    prompt: Prompt
+    """The positive and (optionally) negative prompt.
+
+    If `prompt.negative` is `None`, then the 'Negative Prompt' panel was disabled by the user.
+    """
+    
+    size: Tuple[int, int] | None
+    """The target size of the image, or `None` to use the native size of the model."""
+
+    seed: int
+    """The random or user-provided seed to use."""
+
+    steps: int
+    """The number of inference steps to perform."""
+
+    guidance_scale: float
+    """The selected classifier-free guidance scale."""
+
+    scheduler: str
+    """The selected scheduler.
+    
+    This is one of the options provided by `Backend.list_schedulers`.
+    """
+
+    seamless_axes: SeamlessAxes
+    """Which axes to tile seamlessly."""
+
+    step_preview_mode: StepPreviewMode
+    """The style of preview to display at each step."""
+
+    iterations: int
+    """The number of images to generate.
+    
+    The value sent to `callback` should contain the same number of `GenerationResult` instances in a list.
+    """
+
+    @staticmethod
+    def _map_property_name(name: str) -> str | List[str] | None:
+        """Converts a property name from `GenerationArguments` to the corresponding property of a `DreamPrompt`."""
+        match name:
+            case "model":
+                return "model"
+            case "prompt":
+                return ["prompt", "use_negative_prompt", "negative_prompt"]
+            case "prompt.positive":
+                return "prompt"
+            case "prompt.negative":
+                return ["use_negative_prompt", "negative_prompt"]
+            case "size":
+                return ["use_size", "width", "height"]
+            case "seed":
+                return "seed"
+            case "steps":
+                return "steps"
+            case "guidance_scale":
+                return "cfg_scale"
+            case "scheduler":
+                return "scheduler"
+            case "seamless_axes":
+                return "seamless_axes"
+            case "step_preview_mode":
+                return "step_preview_mode"
+            case "iterations":
+                return "iterations"
+            case _:
+                return None
\ No newline at end of file
diff --git a/api/models/task.py b/api/models/task.py
index 1ff11bcc..106fa3b3 100644
--- a/api/models/task.py
+++ b/api/models/task.py
@@ -31,11 +31,17 @@ class Task:
             ...
     ```
     """
-    pass
+
+    @classmethod
+    def name(cls) -> str:
+        "unknown"
+    """A human readable name for this task."""
 
 @dataclass
 class PromptToImage(Task):
-    pass
+    @classmethod
+    def name(cls):
+        return "prompt to image"
 
 @dataclass
 class ImageToImage(Task):
@@ -43,26 +49,39 @@ class ImageToImage(Task):
     strength: float
     fit: bool
 
+    @classmethod
+    def name(cls):
+        return "image to image"
+
 @dataclass
-class Inpaint(Task):
+class Inpaint(ImageToImage):
     class MaskSource(IntEnum):
         ALPHA = 0
         PROMPT = 1
 
-    image: NDArray
-    strength: float
-    fit: bool
     mask_source: MaskSource
     mask_prompt: str
     confidence: float
 
+    @classmethod
+    def name(cls):
+        return "inpainting"
+
 @dataclass
 class DepthToImage(Task):
     depth: NDArray | None
     image: NDArray | None
     strength: float
 
+    @classmethod
+    def name(cls):
+        return "depth to image"
+
 @dataclass
 class Outpaint(Task):
     image: NDArray
-    origin: Tuple[int, int]
\ No newline at end of file
+    origin: Tuple[int, int]
+
+    @classmethod
+    def name(cls):
+        return "outpainting"
\ No newline at end of file
diff --git a/diffusers_backend.py b/diffusers_backend.py
index 1ab23cb2..368c9560 100644
--- a/diffusers_backend.py
+++ b/diffusers_backend.py
@@ -1,16 +1,19 @@
+import bpy
 from bpy.props import FloatProperty, IntProperty, EnumProperty, BoolProperty
-from typing import List, Tuple
+from typing import List
 
 from .api import Backend, StepCallback, Callback
-from .api.models import Model, Task, Prompt, SeamlessAxes, GenerationResult, StepPreviewMode
+from .api.models import Model, GenerationArguments, GenerationResult
 from .api.models.task import PromptToImage, ImageToImage, Inpaint, DepthToImage, Outpaint
+from .api.models.fix_it_error import FixItError
 
 from .generator_process import Generator
 from .generator_process.actions.prompt_to_image import ImageGenerationResult
 from .generator_process.future import Future
 from .generator_process.models import Optimizations, Scheduler
 from .generator_process.actions.huggingface_hub import ModelType
-from .preferences import StableDiffusionPreferences
+
+from .preferences import StableDiffusionPreferences, _template_model_download_progress, InstallModel
 
 from functools import reduce
 
@@ -105,26 +108,26 @@ def optimizations(self) -> Optimizations:
             optimizations.attention_slice_size = 'auto'
         return optimizations
 
-    def generate(self, task: Task, model: Model, prompt: Prompt, size: Tuple[int, int] | None, seed: int, steps: int, guidance_scale: float, scheduler: str, seamless_axes: SeamlessAxes, step_preview_mode: StepPreviewMode, iterations: int, step_callback: StepCallback, callback: Callback):
+    def generate(self, arguments: GenerationArguments, step_callback: StepCallback, callback: Callback):
         gen = Generator.shared()
         common_kwargs = {
-            'model': model.id,
-            'scheduler': Scheduler(scheduler),
+            'model': arguments.model.id,
+            'scheduler': Scheduler(arguments.scheduler),
             'optimizations': self.optimizations(),
-            'prompt': prompt.positive,
-            'steps': steps,
-            'width': size[0] if size is not None else None,
-            'height': size[1] if size is not None else None,
-            'seed': seed,
-            'cfg_scale': guidance_scale,
-            'use_negative_prompt': prompt.negative is not None,
-            'negative_prompt': prompt.negative or "",
-            'seamless_axes': seamless_axes,
-            'iterations': iterations,
-            'step_preview_mode': step_preview_mode,
+            'prompt': arguments.prompt.positive,
+            'steps': arguments.steps,
+            'width': arguments.size[0] if arguments.size is not None else None,
+            'height': arguments.size[1] if arguments.size is not None else None,
+            'seed': arguments.seed,
+            'cfg_scale': arguments.guidance_scale,
+            'use_negative_prompt': arguments.prompt.negative is not None,
+            'negative_prompt': arguments.prompt.negative or "",
+            'seamless_axes': arguments.seamless_axes,
+            'iterations': arguments.iterations,
+            'step_preview_mode': arguments.step_preview_mode,
         }
         future: Future
-        match task:
+        match arguments.task:
             case PromptToImage():
                 future = gen.prompt_to_image(**common_kwargs)
             case ImageToImage(image=image, strength=strength, fit=fit):
@@ -157,11 +160,11 @@ def generate(self, task: Task, model: Model, prompt: Prompt, size: Tuple[int, in
             case _:
                 raise NotImplementedError()
         def on_step(_, step_image: ImageGenerationResult):
-            step_callback(GenerationResult(progress=step_image.step, total=steps, image=step_image.images[-1], seed=step_image.seeds[-1]))
+            step_callback(GenerationResult(progress=step_image.step, total=arguments.steps, image=step_image.images[-1], seed=step_image.seeds[-1]))
         def on_done(future: Future):
             result: ImageGenerationResult = future.result(last_only=True)
             callback([
-                GenerationResult(progress=result.step, total=steps, image=result.images[i], seed=result.seeds[i])
+                GenerationResult(progress=result.step, total=arguments.steps, image=result.images[i], seed=result.seeds[i])
                 for i in range(len(result.images))
             ])
         def on_exception(_, exception):
@@ -170,6 +173,34 @@ def on_exception(_, exception):
         future.add_exception_callback(on_exception)
         future.add_done_callback(on_done)
 
+    def validate(self, arguments: GenerationArguments):
+        installed_models = bpy.context.preferences.addons[StableDiffusionPreferences.bl_idname].preferences.installed_models
+        model = next((m for m in installed_models if m.model_base == arguments.model.id), None)
+        if model is None:
+            raise FixItError("No model selected.", FixItError.ChangeProperty("model"))
+        else:
+            if not ModelType[model.model_type].matches_task(arguments.task):
+                class DownloadModel(FixItError.Solution):
+                    def _draw(self, dream_prompt, context, layout):
+                        if not _template_model_download_progress(context, layout):
+                            target_model_type = ModelType.from_task(arguments.task)
+                            if target_model_type is not None:
+                                install_model = layout.operator(InstallModel.bl_idname, text=f"Download {target_model_type.recommended_model()} (Recommended)", icon="IMPORT")
+                                install_model.model = target_model_type.recommended_model()
+                                install_model.prefer_fp16_revision = context.preferences.addons[StableDiffusionPreferences.bl_idname].preferences.prefer_fp16_revision
+                model_task_description = f"""Incorrect model type selected for {type(arguments.task).name().replace('_', ' ').lower()} tasks.
+The selected model is for {model.model_type.replace('_', ' ').lower()}."""
+                if not any(ModelType[m.model_type].matches_task(arguments.task) for m in installed_models):
+                    raise FixItError(
+                        message=model_task_description + "\nYou do not have any compatible models downloaded:",
+                        solution=DownloadModel()
+                    )
+                else:
+                    raise FixItError(
+                        message=model_task_description + "\nSelect a different model below.",
+                        solution=FixItError.ChangeProperty("model")
+                    )
+
     def draw_speed_optimizations(self, layout, context):
         inferred_device = Optimizations.infer_device()
         if self.cpu_only:
diff --git a/generator_process/actions/huggingface_hub.py b/generator_process/actions/huggingface_hub.py
index ebaba892..a3231c8c 100644
--- a/generator_process/actions/huggingface_hub.py
+++ b/generator_process/actions/huggingface_hub.py
@@ -15,6 +15,7 @@
 import json
 import enum
 from ..future import Future
+from ...api.models.task import *
 
 class ModelType(enum.IntEnum):
     """
@@ -48,6 +49,41 @@ def recommended_model(self) -> str:
                 return "stabilityai/stable-diffusion-2-inpainting"
             case _:
                 return "stabilityai/stable-diffusion-2-1"
+    
+    def matches_task(self, task: Task) -> bool:
+        """Indicates if the model type is correct for a given `Task`.
+        
+        If not an error should be shown to the user to select a different model.
+        """
+        match task:
+            case PromptToImage():
+                return self == ModelType.PROMPT_TO_IMAGE
+            case Inpaint():
+                return self == ModelType.INPAINTING
+            case DepthToImage():
+                return self == ModelType.DEPTH
+            case Outpaint():
+                return self == ModelType.INPAINTING
+            case ImageToImage():
+                return self == ModelType.PROMPT_TO_IMAGE
+            case _:
+                return False
+    
+    @staticmethod
+    def from_task(task: Task) -> 'ModelType | None':
+        match task:
+            case PromptToImage():
+                return ModelType.PROMPT_TO_IMAGE
+            case Inpaint():
+                return ModelType.INPAINTING
+            case DepthToImage():
+                return ModelType.DEPTH
+            case Outpaint():
+                return ModelType.INPAINTING
+            case ImageToImage():
+                return ModelType.PROMPT_TO_IMAGE
+            case _:
+                return None
 
 @dataclass
 class Model:
diff --git a/generator_process/models/__init__.py b/generator_process/models/__init__.py
index cf1a0b83..99bf2a16 100644
--- a/generator_process/models/__init__.py
+++ b/generator_process/models/__init__.py
@@ -1,4 +1,3 @@
-from .fix_it_error import *
 from .image_generation_result import *
 from .optimizations import *
 from .scheduler import *
diff --git a/generator_process/models/fix_it_error.py b/generator_process/models/fix_it_error.py
deleted file mode 100644
index 5a19ee1f..00000000
--- a/generator_process/models/fix_it_error.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from typing import Callable, Any
-
-class FixItError(Exception):
-    """An exception with a solution.
-
-    Call the `draw` method to render the UI elements responsible for resolving this error.
-    """
-    def __init__(self, message, fix_it: Callable[[Any, Any], None]):
-        super().__init__(message)
-
-        self._fix_it = fix_it
-    
-    def draw(self, context, layout):
-        self._fix_it(context, layout)
\ No newline at end of file
diff --git a/operators/dream_texture.py b/operators/dream_texture.py
index 16b558f7..7e52fad6 100644
--- a/operators/dream_texture.py
+++ b/operators/dream_texture.py
@@ -33,7 +33,9 @@ class DreamTexture(bpy.types.Operator):
     @classmethod
     def poll(cls, context):
         try:
-            context.scene.dream_textures_prompt.validate(context)
+            prompt = context.scene.dream_textures_prompt
+            backend: api.Backend = prompt.get_backend()
+            backend.validate(prompt.generate_args(context))
         except:
             return False
         return Generator.shared().can_use()
diff --git a/operators/project.py b/operators/project.py
index b22476b4..fd0d4dd9 100644
--- a/operators/project.py
+++ b/operators/project.py
@@ -17,7 +17,7 @@
 from ..preferences import StableDiffusionPreferences
 
 from ..generator_process import Generator
-from ..generator_process.models import FixItError
+from ..api.models import FixItError
 from ..generator_process.actions.huggingface_hub import ModelType
 import tempfile
 
@@ -163,7 +163,7 @@ def draw(self, context):
                     error_box.use_property_split = False
                     for i, line in enumerate(e.args[0].split('\n')):
                         error_box.label(text=line, icon="ERROR" if i == 0 else "NONE")
-                    e.draw(context, error_box)
+                    e._draw(context.scene.dream_textures_project_prompt, context, error_box)
                 except Exception as e:
                     print(e)
         return ActionsPanel
diff --git a/property_groups/dream_prompt.py b/property_groups/dream_prompt.py
index 70af9580..4a5a59ed 100644
--- a/property_groups/dream_prompt.py
+++ b/property_groups/dream_prompt.py
@@ -80,6 +80,11 @@ def model_options(self, context):
         for model in self.get_backend().list_models(context)
     ]
 
+def _model_update(self, context):
+    options = [m for m in model_options(self, context) if m is not None]
+    if self.model == '' and len(options) > 0:
+        self.model = options[0]
+
 def backend_options(self, context):
     return [
         (backend._id(), backend.name if hasattr(backend, "name") else backend.__name__, backend.description if hasattr(backend, "description") else "")
@@ -97,7 +102,7 @@ def seed_clamp(self, ctx):
 
 attributes = {
     "backend": EnumProperty(name="Backend", items=backend_options, default=1, description="Specify which generation backend to use"),
-    "model": EnumProperty(name="Model", items=model_options, description="Specify which model to use for inference"),
+    "model": EnumProperty(name="Model", items=model_options, description="Specify which model to use for inference", update=_model_update),
     
     "control_nets": CollectionProperty(type=ControlNet),
     "active_control_net": IntProperty(name="Active ControlNet"),
@@ -211,7 +216,7 @@ def get_optimizations(self: DreamPrompt):
         optimizations.attention_slice_size = 'auto'
     return optimizations
 
-def generate_args(self, context, iteration=0):
+def generate_args(self, context, iteration=0) -> api.GenerationArguments:
     is_file_batch = self.prompt_structure == file_batch_structure.id
     file_batch_lines = []
     file_batch_lines_negative = []
@@ -255,7 +260,7 @@ def generate_args(self, context, iteration=0):
                         task = api.DepthToImage(
                             depth=np.array(context.scene.init_depth.pixels)
                                 .astype(np.float32)
-                                .reshape((scene.init_depth.size[1], scene.init_depth.size[0], scene.init_depth.channels)),
+                                .reshape((context.scene.init_depth.size[1], context.scene.init_depth.size[0], context.scene.init_depth.channels)),
                             image=init_image,
                             strength=self.strength
                         )
@@ -280,22 +285,22 @@ def generate_args(self, context, iteration=0):
                     origin=(self.outpaint_origin[0], self.outpaint_origin[1])
                 )
 
-    args = {
-        'task': task,
-        'model': next(model for model in self.get_backend().list_models(context) if model is not None and model.id == self.model),
-        'prompt': api.Prompt(
+    return api.GenerationArguments(
+        task=task,
+        model=next(model for model in self.get_backend().list_models(context) if model is not None and model.id == self.model),
+        prompt=api.Prompt(
             file_batch_lines if is_file_batch else self.generate_prompt(),
             file_batch_lines_negative if is_file_batch else (self.negative_prompt if self.use_negative_prompt else None)
         ),
-        'size': (self.width, self.height) if self.use_size else None,
-        'seed': self.get_seed(),
-        'steps': self.steps,
-        'guidance_scale': self.cfg_scale,
-        'scheduler': self.scheduler,
-        'seamless_axes': SeamlessAxes(self.seamless_axes),
-        'step_preview_mode': StepPreviewMode(self.step_preview_mode),
-        'iterations': self.iterations
-    }
+        size=(self.width, self.height) if self.use_size else None,
+        seed=self.get_seed(),
+        steps=self.steps,
+        guidance_scale=self.cfg_scale,
+        scheduler=self.scheduler,
+        seamless_axes=SeamlessAxes(self.seamless_axes),
+        step_preview_mode=StepPreviewMode(self.step_preview_mode),
+        iterations=self.iterations
+    )
     # args['control'] = [
     #     np.flipud(
     #         np.array(net.control_image.pixels)
@@ -304,7 +309,6 @@ def generate_args(self, context, iteration=0):
     #     for net in args['control_nets']
     #     if net.control_image is not None
     # ]
-    return args
 
 def get_backend(self) -> api.Backend:
     return getattr(self, api.Backend._lookup(self.backend)._attribute())
diff --git a/property_groups/dream_prompt_validation.py b/property_groups/dream_prompt_validation.py
index 69da4857..71fa1a2b 100644
--- a/property_groups/dream_prompt_validation.py
+++ b/property_groups/dream_prompt_validation.py
@@ -1,5 +1,4 @@
 from ..preferences import StableDiffusionPreferences, _template_model_download_progress, InstallModel
-from ..generator_process.models import FixItError
 from ..generator_process.actions.huggingface_hub import ModelType
 from ..preferences import OpenURL
 
diff --git a/ui/panels/dream_texture.py b/ui/panels/dream_texture.py
index 5d4dc464..e93a2ded 100644
--- a/ui/panels/dream_texture.py
+++ b/ui/panels/dream_texture.py
@@ -16,7 +16,7 @@
 from ...property_groups.dream_prompt import DreamPrompt, backend_options
 from ...generator_process.actions.prompt_to_image import Optimizations
 from ...generator_process.actions.detect_seamless import SeamlessAxes
-from ...generator_process.models import FixItError
+from ...api.models import FixItError
 from ... import api
 
 def dream_texture_panels():
@@ -357,13 +357,14 @@ def draw(self, context):
 
             # Validation
             try:
-                prompt.validate(context)
+                backend: api.Backend = prompt.get_backend()
+                backend.validate(prompt.generate_args(context))
             except FixItError as e:
                 error_box = layout.box()
                 error_box.use_property_split = False
                 for i, line in enumerate(e.args[0].split('\n')):
                     error_box.label(text=line, icon="ERROR" if i == 0 else "NONE")
-                e.draw(context, error_box)
+                e._draw(prompt, context, error_box)
             except Exception as e:
                 print(e)
     return ActionsPanel
\ No newline at end of file